Merge branch 'master' of https://github.com/liquid-mirror/VEX into VEX folder.
diff --git a/VEX/HACKING.README b/VEX/HACKING.README
new file mode 100644
index 0000000..195a67e
--- /dev/null
+++ b/VEX/HACKING.README
@@ -0,0 +1,5 @@
+
+This directory and its children contain LibVEX, a library for dynamic
+binary instrumentation and translation.  See LICENSE.README for
+licensing and contribution information.
+
diff --git a/VEX/LICENSE.GPL b/VEX/LICENSE.GPL
new file mode 100644
index 0000000..3912109
--- /dev/null
+++ b/VEX/LICENSE.GPL
@@ -0,0 +1,340 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/VEX/LICENSE.README b/VEX/LICENSE.README
new file mode 100644
index 0000000..339b33d
--- /dev/null
+++ b/VEX/LICENSE.README
@@ -0,0 +1,23 @@
+
+This directory and its children contain LibVEX, a library for dynamic
+binary instrumentation and translation.
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file LICENSE.GPL.
+
+If you want to contribute code to LibVEX, please ensure it is licensed
+as "GPL v2 or later".
diff --git a/VEX/Makefile-gcc b/VEX/Makefile-gcc
new file mode 100644
index 0000000..eaeb8f5
--- /dev/null
+++ b/VEX/Makefile-gcc
@@ -0,0 +1,415 @@
+
+PUB_HEADERS = 	pub/libvex_basictypes.h 		\
+		pub/libvex_ir.h				\
+		pub/libvex.h				\
+		pub/libvex_trc_values.h			\
+		pub/libvex_emnote.h			\
+		pub/libvex_guest_x86.h			\
+		pub/libvex_guest_amd64.h		\
+		pub/libvex_guest_arm.h			\
+		pub/libvex_guest_ppc32.h		\
+		pub/libvex_guest_ppc64.h		\
+		pub/libvex_guest_s390x.h		\
+		pub/libvex_s390x_common.h		\
+		pub/libvex_guest_mips32.h		\
+		pub/libvex_guest_offsets.h
+
+PRIV_HEADERS = 	priv/host_x86_defs.h			\
+		priv/host_amd64_defs.h			\
+		priv/host_arm_defs.h			\
+		priv/host_ppc_defs.h			\
+		priv/host_s390_defs.h			\
+		priv/host_mips_defs.h			\
+		priv/host_generic_maddf.h	        \
+		priv/host_generic_regs.h	        \
+		priv/host_generic_simd64.h	        \
+		priv/host_generic_simd128.h	        \
+		priv/host_generic_simd256.h	        \
+		priv/main_globals.h			\
+		priv/main_util.h			\
+		priv/guest_generic_x87.h               	\
+		priv/guest_generic_bb_to_IR.h		\
+		priv/guest_x86_defs.h			\
+		priv/guest_amd64_defs.h	               	\
+		priv/guest_arm_defs.h			\
+		priv/guest_ppc_defs.h			\
+		priv/guest_mips_defs.h			\
+		priv/s390_disasm.h		        \
+		priv/s390_defs.h		        \
+		priv/ir_match.h			        \
+		priv/ir_opt.h
+
+LIB_OBJS = 	priv/ir_defs.o                          \
+		priv/ir_match.o			        \
+		priv/ir_opt.o				\
+		priv/ir_inject.o			\
+		priv/main_main.o			\
+		priv/main_globals.o			\
+		priv/main_util.o			\
+		priv/s390_disasm.o			\
+		priv/host_x86_defs.o			\
+		priv/host_amd64_defs.o			\
+		priv/host_arm_defs.o			\
+		priv/host_arm64_defs.o			\
+		priv/host_ppc_defs.o			\
+		priv/host_s390_defs.o			\
+		priv/host_mips_defs.o			\
+		priv/host_x86_isel.o			\
+		priv/host_amd64_isel.o			\
+		priv/host_arm_isel.o			\
+		priv/host_arm64_isel.o			\
+		priv/host_ppc_isel.o			\
+		priv/host_s390_isel.o			\
+		priv/host_mips_isel.o			\
+		priv/host_generic_maddf.o	        \
+		priv/host_generic_regs.o	        \
+		priv/host_generic_simd64.o	        \
+		priv/host_generic_simd128.o	        \
+		priv/host_generic_simd256.o	        \
+		priv/host_generic_reg_alloc2.o		\
+		priv/guest_generic_x87.o	        \
+		priv/guest_generic_bb_to_IR.o		\
+		priv/guest_x86_helpers.o		\
+		priv/guest_amd64_helpers.o		\
+		priv/guest_arm_helpers.o		\
+		priv/guest_arm64_helpers.o		\
+		priv/guest_ppc_helpers.o		\
+		priv/guest_s390_helpers.o		\
+		priv/guest_mips_helpers.o		\
+		priv/guest_x86_toIR.o			\
+		priv/guest_amd64_toIR.o			\
+		priv/guest_arm_toIR.o			\
+		priv/guest_arm64_toIR.o			\
+		priv/guest_ppc_toIR.o                   \
+		priv/guest_s390_toIR.o			\
+		priv/guest_mips_toIR.o
+
+PUB_INCLUDES = -Ipub
+
+# Do not add any priv/host-ARCH or priv/guest-ARCH directories to this
+# list, as they contain duplicate file names (each host has a hdefs.h,
+# for example).
+PRIV_INCLUDES = -Ipriv
+
+
+ifndef CC
+   CC = gcc 
+endif 
+ifndef AR
+   AR = ar 
+endif
+
+# Put -g -O2 after any flags we inherit from V.  -O2 vs -O
+# makes a significant difference, at least with gcc4.
+CCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -Wshadow \
+		-Wpointer-arith -Wbad-function-cast -Wcast-qual \
+		-Wcast-align -Wmissing-declarations \
+		-Wwrite-strings -Wformat -Wformat-security \
+		-std=gnu99 \
+		$(EXTRA_CFLAGS) -g -O2 -fstrict-aliasing
+
+#CC = icc
+#CCFLAGS = -g -Wall -wd981 -wd279 -wd1287 -wd869 -wd111 -wd188 -wd186
+# 981: operands are evaluated in unspecified order
+# 279: controlling expression is constant
+# 1287: invalid attribute for parameter
+# 869: parameter "..." was never referenced
+# 111: statement is unreachable
+# 188: enumerated type mixed with another type
+# (the above are for icc 8.0 -- 8.0.0.55 I think)
+# 186: pointless comparison of unsigned integer with zero
+
+# kludge: stops V biarch builds screwing up at -j 2 or above
+# The Right fix is to autoconf/automake-ise vex.
+.NOTPARALLEL:
+
+all: vex
+
+# Empty, needed for Valgrind
+install:
+
+scratch: clean all
+
+vex: libvex.a
+
+libvex.a: $(LIB_OBJS)
+	rm -f libvex.a
+	$(AR) crus libvex.a $(LIB_OBJS)
+
+
+# The idea with these TAG-s is to mark the flavour of libvex.a 
+# most recently built, so if the same target is re-requested, we
+# don't rebuild everything, but if a different one is requested
+# then we scrub everything and start over.
+
+libvex-x86-linux.a: TAG-x86-linux libvex.a
+	mv -f libvex.a libvex-x86-linux.a
+TAG-x86-linux:
+	if [ ! -f TAG-x86-linux ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-x86-linux
+
+libvex-amd64-linux.a: TAG-amd64-linux libvex.a
+	mv -f libvex.a libvex-amd64-linux.a
+TAG-amd64-linux:
+	if [ ! -f TAG-amd64-linux ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-amd64-linux
+
+libvex-ppc32-linux.a: TAG-ppc32-linux libvex.a
+	mv -f libvex.a libvex-ppc32-linux.a
+TAG-ppc32-linux:
+	if [ ! -f TAG-ppc32-linux ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-ppc32-linux
+
+libvex-ppc64-linux.a: TAG-ppc64-linux libvex.a
+	mv -f libvex.a libvex-ppc64-linux.a
+TAG-ppc64-linux:
+	if [ ! -f TAG-ppc64-linux ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-ppc64-linux
+
+libvex-mips-linux.a: TAG-mips32-linux libvex.a
+	mv -f libvex.a libvex-mips32-linux.a
+TAG-mips-linux:
+	if [ ! -f TAG-mips32-linux ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-mips32-linux
+
+libvex-ppc32-aix5.a: TAG-ppc32-aix5 libvex.a
+	mv -f libvex.a libvex-ppc32-aix5.a
+TAG-ppc32-aix5:
+	if [ ! -f TAG-ppc32-aix5 ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-ppc32-aix5
+
+libvex-ppc64-aix5.a: TAG-ppc64-aix5 libvex.a
+	mv -f libvex.a libvex-ppc64-aix5.a
+TAG-ppc64-aix5:
+	if [ ! -f TAG-ppc64-aix5 ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-ppc64-aix5
+
+libvex-x86-darwin.a: TAG-x86-darwin libvex.a
+	mv -f libvex.a libvex-x86-darwin.a
+TAG-x86-darwin:
+	if [ ! -f TAG-x86-darwin ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-x86-darwin
+
+libvex-amd64-darwin.a: TAG-amd64-darwin libvex.a
+	mv -f libvex.a libvex-amd64-darwin.a
+TAG-amd64-darwin:
+	if [ ! -f TAG-amd64-darwin ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-amd64-darwin
+
+libvex-arm64-linux.a: TAG-arm64-linux libvex.a
+	mv -f libvex.a libvex-arm64-linux.a
+TAG-arm64-linux:
+	if [ ! -f TAG-arm64-linux ] ; then rm -f $(LIB_OBJS) TAG-* libvex.a ; fi
+	touch TAG-arm64-linux
+
+
+clean:
+	rm -f $(LIB_OBJS) *.a TAG-* \
+		pub/libvex_guest_offsets.h \
+		auxprogs/genoffsets.s
+
+minidist:
+	rm -f vex--minidist-2005MMDD.tar
+	tar cf vex--minidist-2005MMDD.tar $(PUB_HEADERS) $(PRIV_HEADERS) \
+		Makefile-gcc					\
+		`echo $(LIB_OBJS) | sed "s/\.o/\.c/g"`
+	@echo 
+	@echo minidist done, size follows:
+	@ls -l vex--minidist-2005MMDD.tar
+	@echo
+
+# This is very uggerly.  Need to sed out both "xyzzyN" and
+# "xyzzy$N" since gcc on different targets emits the constants
+# differently -- with a leading $ on x86/amd64 but none on ppc32/64.
+# ICC also emits the constants differently with a leading # #define
+pub/libvex_guest_offsets.h:
+	rm -f auxprogs/genoffsets.s
+	$(CC) $(CCFLAGS) -O -S -o auxprogs/genoffsets.s \
+				auxprogs/genoffsets.c
+	grep xyzzy auxprogs/genoffsets.s | grep "^[# ]*#define" \
+	   | sed "s/# #define/#define/g" \
+	   | sed "s/xyzzy\\$$//g" \
+	   | sed "s/xyzzy#//g" \
+	   | sed "s/xyzzy//g" \
+	   > pub/libvex_guest_offsets.h
+	rm -f auxprogs/genoffsets.s
+
+
+ALL_HEADERS  = $(PUB_HEADERS) $(PRIV_HEADERS)
+ALL_INCLUDES = $(PUB_INCLUDES) $(PRIV_INCLUDES)
+
+priv/ir_defs.o: $(ALL_HEADERS) priv/ir_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/ir_defs.o \
+					 -c priv/ir_defs.c
+
+priv/ir_inject.o: $(ALL_HEADERS) priv/ir_inject.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/ir_inject.o \
+					 -c priv/ir_inject.c
+
+priv/ir_match.o: $(ALL_HEADERS) priv/ir_match.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/ir_match.o \
+					 -c priv/ir_match.c
+
+priv/ir_opt.o: $(ALL_HEADERS) priv/ir_opt.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/ir_opt.o \
+					 -c priv/ir_opt.c
+
+priv/main_main.o: $(ALL_HEADERS) priv/main_main.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/main_main.o \
+					 -c priv/main_main.c
+
+priv/main_globals.o: $(ALL_HEADERS) priv/main_globals.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/main_globals.o \
+					 -c priv/main_globals.c
+
+priv/main_util.o: $(ALL_HEADERS) priv/main_util.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/main_util.o \
+					 -c priv/main_util.c
+
+priv/host_x86_defs.o: $(ALL_HEADERS) priv/host_x86_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_x86_defs.o \
+					 -c priv/host_x86_defs.c
+
+priv/host_amd64_defs.o: $(ALL_HEADERS) priv/host_amd64_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_amd64_defs.o \
+					 -c priv/host_amd64_defs.c
+
+priv/host_arm_defs.o: $(ALL_HEADERS) priv/host_arm_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_arm_defs.o \
+					 -c priv/host_arm_defs.c
+
+priv/host_arm64_defs.o: $(ALL_HEADERS) priv/host_arm64_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_arm64_defs.o \
+					 -c priv/host_arm64_defs.c
+
+priv/host_ppc_defs.o: $(ALL_HEADERS) priv/host_ppc_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_ppc_defs.o \
+					 -c priv/host_ppc_defs.c
+
+priv/host_s390_defs.o: $(ALL_HEADERS) priv/host_s390_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_s390_defs.o \
+					 -c priv/host_s390_defs.c
+
+priv/host_mips_defs.o: $(ALL_HEADERS) priv/host_mips_defs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_mips_defs.o \
+					 -c priv/host_mips_defs.c
+
+priv/host_x86_isel.o: $(ALL_HEADERS) priv/host_x86_isel.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_x86_isel.o \
+					 -c priv/host_x86_isel.c
+
+priv/host_amd64_isel.o: $(ALL_HEADERS) priv/host_amd64_isel.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_amd64_isel.o \
+					 -c priv/host_amd64_isel.c
+
+priv/host_arm_isel.o: $(ALL_HEADERS) priv/host_arm_isel.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_arm_isel.o \
+					 -c priv/host_arm_isel.c
+
+priv/host_arm64_isel.o: $(ALL_HEADERS) priv/host_arm64_isel.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_arm64_isel.o \
+					 -c priv/host_arm64_isel.c
+
+priv/host_ppc_isel.o: $(ALL_HEADERS) priv/host_ppc_isel.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_ppc_isel.o \
+					 -c priv/host_ppc_isel.c
+
+priv/host_s390_isel.o: $(ALL_HEADERS) priv/host_s390_isel.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_s390_isel.o \
+					 -c priv/host_s390_isel.c
+
+priv/host_mips_isel.o: $(ALL_HEADERS) priv/host_mips_isel.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_mips_isel.o \
+					 -c priv/host_mips_isel.c
+
+priv/host_generic_maddf.o: $(ALL_HEADERS) priv/host_generic_maddf.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_generic_maddf.o \
+					 -c priv/host_generic_maddf.c
+
+priv/host_generic_regs.o: $(ALL_HEADERS) priv/host_generic_regs.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_generic_regs.o \
+					 -c priv/host_generic_regs.c
+
+priv/host_generic_simd64.o: $(ALL_HEADERS) priv/host_generic_simd64.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_generic_simd64.o \
+					 -c priv/host_generic_simd64.c
+
+priv/host_generic_simd128.o: $(ALL_HEADERS) priv/host_generic_simd128.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_generic_simd128.o \
+					 -c priv/host_generic_simd128.c
+
+priv/host_generic_simd256.o: $(ALL_HEADERS) priv/host_generic_simd256.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_generic_simd256.o \
+					 -c priv/host_generic_simd256.c
+
+priv/host_generic_reg_alloc2.o: $(ALL_HEADERS) priv/host_generic_reg_alloc2.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/host_generic_reg_alloc2.o \
+					 -c priv/host_generic_reg_alloc2.c
+
+priv/guest_x86_toIR.o: $(ALL_HEADERS) priv/guest_x86_toIR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_x86_toIR.o \
+					 -c priv/guest_x86_toIR.c
+
+priv/guest_generic_x87.o: $(ALL_HEADERS) priv/guest_generic_x87.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_generic_x87.o \
+					 -c priv/guest_generic_x87.c
+
+priv/guest_generic_bb_to_IR.o: $(ALL_HEADERS) priv/guest_generic_bb_to_IR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_generic_bb_to_IR.o \
+					 -c priv/guest_generic_bb_to_IR.c
+
+priv/guest_x86_helpers.o: $(ALL_HEADERS) priv/guest_x86_helpers.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_x86_helpers.o \
+					 -c priv/guest_x86_helpers.c
+
+priv/guest_amd64_helpers.o: $(ALL_HEADERS) priv/guest_amd64_helpers.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_amd64_helpers.o \
+					 -c priv/guest_amd64_helpers.c
+
+priv/guest_amd64_toIR.o: $(ALL_HEADERS) priv/guest_amd64_toIR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_amd64_toIR.o \
+					 -c priv/guest_amd64_toIR.c
+
+priv/guest_arm_helpers.o: $(ALL_HEADERS) priv/guest_arm_helpers.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_arm_helpers.o \
+					 -c priv/guest_arm_helpers.c
+
+priv/guest_arm64_helpers.o: $(ALL_HEADERS) priv/guest_arm64_helpers.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_arm64_helpers.o \
+					 -c priv/guest_arm64_helpers.c
+
+priv/guest_arm_toIR.o: $(ALL_HEADERS) priv/guest_arm_toIR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_arm_toIR.o \
+					 -c priv/guest_arm_toIR.c
+
+priv/guest_arm64_toIR.o: $(ALL_HEADERS) priv/guest_arm64_toIR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_arm64_toIR.o \
+					 -c priv/guest_arm64_toIR.c
+
+priv/guest_ppc_helpers.o: $(ALL_HEADERS) priv/guest_ppc_helpers.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_ppc_helpers.o \
+					 -c priv/guest_ppc_helpers.c
+
+priv/guest_s390_helpers.o: $(ALL_HEADERS) priv/guest_s390_helpers.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_s390_helpers.o \
+					 -c priv/guest_s390_helpers.c
+
+priv/guest_ppc_toIR.o: $(ALL_HEADERS) priv/guest_ppc_toIR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_ppc_toIR.o \
+					 -c priv/guest_ppc_toIR.c
+
+priv/guest_s390_toIR.o: $(ALL_HEADERS) priv/guest_s390_toIR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_s390_toIR.o \
+					 -c priv/guest_s390_toIR.c
+
+priv/s390_disasm.o: $(ALL_HEADERS) priv/s390_disasm.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/s390_disasm.o \
+					 -c priv/s390_disasm.c
+
+priv/guest_mips_helpers.o: $(ALL_HEADERS) priv/guest_mips_helpers.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_mips_helpers.o \
+					 -c priv/guest_mips_helpers.c
+
+priv/guest_mips_toIR.o: $(ALL_HEADERS) priv/guest_mips_toIR.c
+	$(CC) $(CCFLAGS) $(ALL_INCLUDES) -o priv/guest_mips_toIR.o \
+					 -c priv/guest_mips_toIR.c
diff --git a/VEX/TODO.txt b/VEX/TODO.txt
new file mode 100644
index 0000000..24dc3cb
--- /dev/null
+++ b/VEX/TODO.txt
@@ -0,0 +1,55 @@
+
+Last updated 15 Nov 04
+~~~~~~~~~~~~~~~~~~~~~~
+
+Critical (correctness)
+~~~~~~~~~~~~~~~~~~~~~~
+x86 isel: should free up all fp reg tags when calling a helper.
+And save FP and SSE insns across the helper.
+
+iropt: reconsider precise exceptions
+
+x86 guest: look at FP accuracy
+
+
+Optimisation opportunities
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+Improved isel for memcheck artefacts on x86 (generate neg ; sbbl)
+
+Assess tt_fast miss rates 
+
+improve stack-update pass
+
+proper profiling machinery
+
+do not CSE exprs :: Ity_Bit
+
+x86 iselIntExpr_RMI: actually generate the M case if possible
+
+
+JIT speedups
+~~~~~~~~~~~~
+Ensure incremental flatness throughout
+
+Profile again with cachegrind/calltree
+
+change IRTemp to 16 bits?
+
+
+Integration
+~~~~~~~~~~~
+Get rid of sloppy-malloc
+
+Get rid of partial-loads-ok
+
+Optimisation after first instrumentation rather than 2nd ?
+
+disallow dirty helpers from writing SP/IP
+
+write API doc, clarify IR semantics
+
+make IR utils module
+
+generic stack pointer identification at startup?
+
+New memstack_k: old or new sp?
diff --git a/VEX/auxprogs/genoffsets.c b/VEX/auxprogs/genoffsets.c
new file mode 100644
index 0000000..b84efd3
--- /dev/null
+++ b/VEX/auxprogs/genoffsets.c
@@ -0,0 +1,333 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                           genoffsets.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include <stdio.h>
+
+/* A program which, when compiled to assembly, exposes various guest
+   state offsets.  The program isn't executed, since that breaks
+   cross-compilation.
+
+   It does rely on the assumption that 'my_offsetof(Ty,Field)' is
+   folded to a constant at a compile time, which seems a bit dodgy
+   to me.  On gcc4 it is possible to use __builtin_offsetof, which
+   sounds safer, but that doesn't exist on older gccs.  Oh Well.
+*/
+
+#include "../pub/libvex_basictypes.h"
+#include "../pub/libvex_guest_x86.h"
+#include "../pub/libvex_guest_amd64.h"
+#include "../pub/libvex_guest_ppc32.h"
+#include "../pub/libvex_guest_ppc64.h"
+#include "../pub/libvex_guest_arm.h"
+#include "../pub/libvex_guest_arm64.h"
+#include "../pub/libvex_guest_s390x.h"
+#include "../pub/libvex_guest_mips32.h"
+#include "../pub/libvex_guest_mips64.h"
+#include "../pub/libvex_guest_tilegx.h"
+
+#define VG_STRINGIFZ(__str)  #__str
+#define VG_STRINGIFY(__str)  VG_STRINGIFZ(__str)
+
+#define my_offsetof(__type,__field) (&((__type*)0)->__field)
+
+/* This forces gcc to evaluate the my_offsetof call at compile time,
+   and then emits it in the assembly, along with the nonsense string
+   "xyzzy", for easy greppability.  Once this file is compiled to
+   assembly, the lines containing "xyzzy" are grepped out and sed-ed
+   to produce the final result.  See the Makefile rule for
+   pub/libvex_guest_offsets.h. */
+#define GENOFFSET(_structUppercase,_structLowercase,_fieldname)  \
+   __asm__ __volatile__ ( \
+      "\n#define OFFSET_" \
+      VG_STRINGIFY(_structLowercase) "_" \
+      VG_STRINGIFY(_fieldname) \
+      " xyzzy%0\n" : /*out*/ \
+                   : /*in*/ "n" \
+         (my_offsetof(VexGuest##_structUppercase##State, \
+          guest_##_fieldname)) \
+   )
+
+void foo ( void );
+__attribute__((noinline))
+void foo ( void )
+{
+   // x86
+   GENOFFSET(X86,x86,EAX);
+   GENOFFSET(X86,x86,EBX);
+   GENOFFSET(X86,x86,ECX);
+   GENOFFSET(X86,x86,EDX);
+   GENOFFSET(X86,x86,ESI);
+   GENOFFSET(X86,x86,EDI);
+   GENOFFSET(X86,x86,EBP);
+   GENOFFSET(X86,x86,ESP);
+   GENOFFSET(X86,x86,EIP);
+   GENOFFSET(X86,x86,CS);
+   GENOFFSET(X86,x86,DS);
+   GENOFFSET(X86,x86,ES);
+   GENOFFSET(X86,x86,FS);
+   GENOFFSET(X86,x86,GS);
+   GENOFFSET(X86,x86,SS);
+
+   // amd64
+   GENOFFSET(AMD64,amd64,RAX);
+   GENOFFSET(AMD64,amd64,RBX);
+   GENOFFSET(AMD64,amd64,RCX);
+   GENOFFSET(AMD64,amd64,RDX);
+   GENOFFSET(AMD64,amd64,RSI);
+   GENOFFSET(AMD64,amd64,RDI);
+   GENOFFSET(AMD64,amd64,RSP);
+   GENOFFSET(AMD64,amd64,RBP);
+   GENOFFSET(AMD64,amd64,R8);
+   GENOFFSET(AMD64,amd64,R9);
+   GENOFFSET(AMD64,amd64,R10);
+   GENOFFSET(AMD64,amd64,R11);
+   GENOFFSET(AMD64,amd64,R12);
+   GENOFFSET(AMD64,amd64,R13);
+   GENOFFSET(AMD64,amd64,R14);
+   GENOFFSET(AMD64,amd64,R15);
+   GENOFFSET(AMD64,amd64,RIP);
+
+   // ppc32
+   GENOFFSET(PPC32,ppc32,GPR0);
+   GENOFFSET(PPC32,ppc32,GPR1);
+   GENOFFSET(PPC32,ppc32,GPR2);
+   GENOFFSET(PPC32,ppc32,GPR3);
+   GENOFFSET(PPC32,ppc32,GPR4);
+   GENOFFSET(PPC32,ppc32,GPR5);
+   GENOFFSET(PPC32,ppc32,GPR6);
+   GENOFFSET(PPC32,ppc32,GPR7);
+   GENOFFSET(PPC32,ppc32,GPR8);
+   GENOFFSET(PPC32,ppc32,GPR9);
+   GENOFFSET(PPC32,ppc32,GPR10);
+   GENOFFSET(PPC32,ppc32,CIA);
+   GENOFFSET(PPC32,ppc32,CR0_0);
+
+   // ppc64
+   GENOFFSET(PPC64,ppc64,GPR0);
+   GENOFFSET(PPC64,ppc64,GPR1);
+   GENOFFSET(PPC64,ppc64,GPR2);
+   GENOFFSET(PPC64,ppc64,GPR3);
+   GENOFFSET(PPC64,ppc64,GPR4);
+   GENOFFSET(PPC64,ppc64,GPR5);
+   GENOFFSET(PPC64,ppc64,GPR6);
+   GENOFFSET(PPC64,ppc64,GPR7);
+   GENOFFSET(PPC64,ppc64,GPR8);
+   GENOFFSET(PPC64,ppc64,GPR9);
+   GENOFFSET(PPC64,ppc64,GPR10);
+   GENOFFSET(PPC64,ppc64,CIA);
+   GENOFFSET(PPC64,ppc64,CR0_0);
+
+   // arm
+   GENOFFSET(ARM,arm,R0);
+   GENOFFSET(ARM,arm,R1);
+   GENOFFSET(ARM,arm,R2);
+   GENOFFSET(ARM,arm,R3);
+   GENOFFSET(ARM,arm,R4);
+   GENOFFSET(ARM,arm,R5);
+   GENOFFSET(ARM,arm,R7);
+   GENOFFSET(ARM,arm,R13);
+   GENOFFSET(ARM,arm,R14);
+   GENOFFSET(ARM,arm,R15T);
+
+   // arm64
+   GENOFFSET(ARM64,arm64,X0);
+   GENOFFSET(ARM64,arm64,X1);
+   GENOFFSET(ARM64,arm64,X2);
+   GENOFFSET(ARM64,arm64,X3);
+   GENOFFSET(ARM64,arm64,X4);
+   GENOFFSET(ARM64,arm64,X5);
+   GENOFFSET(ARM64,arm64,X6);
+   GENOFFSET(ARM64,arm64,X7);
+   GENOFFSET(ARM64,arm64,X8);
+   GENOFFSET(ARM64,arm64,XSP);
+   GENOFFSET(ARM64,arm64,PC);
+
+   // s390x
+   GENOFFSET(S390X,s390x,r2);
+   GENOFFSET(S390X,s390x,r3);
+   GENOFFSET(S390X,s390x,r4);
+   GENOFFSET(S390X,s390x,r5);
+   GENOFFSET(S390X,s390x,r6);
+   GENOFFSET(S390X,s390x,r7);
+   GENOFFSET(S390X,s390x,r15);
+   GENOFFSET(S390X,s390x,IA);
+   GENOFFSET(S390X,s390x,SYSNO);
+   GENOFFSET(S390X,s390x,IP_AT_SYSCALL);
+   GENOFFSET(S390X,s390x,fpc);
+   GENOFFSET(S390X,s390x,CC_OP);
+   GENOFFSET(S390X,s390x,CC_DEP1);
+   GENOFFSET(S390X,s390x,CC_DEP2);
+   GENOFFSET(S390X,s390x,CC_NDEP);
+
+   // MIPS32
+   GENOFFSET(MIPS32,mips32,r0);
+   GENOFFSET(MIPS32,mips32,r1);   
+   GENOFFSET(MIPS32,mips32,r2);
+   GENOFFSET(MIPS32,mips32,r3);
+   GENOFFSET(MIPS32,mips32,r4);
+   GENOFFSET(MIPS32,mips32,r5);
+   GENOFFSET(MIPS32,mips32,r6);
+   GENOFFSET(MIPS32,mips32,r7);
+   GENOFFSET(MIPS32,mips32,r8);
+   GENOFFSET(MIPS32,mips32,r9);
+   GENOFFSET(MIPS32,mips32,r10);
+   GENOFFSET(MIPS32,mips32,r11);
+   GENOFFSET(MIPS32,mips32,r12);
+   GENOFFSET(MIPS32,mips32,r13);
+   GENOFFSET(MIPS32,mips32,r14);
+   GENOFFSET(MIPS32,mips32,r15);
+   GENOFFSET(MIPS32,mips32,r15);
+   GENOFFSET(MIPS32,mips32,r17);
+   GENOFFSET(MIPS32,mips32,r18);
+   GENOFFSET(MIPS32,mips32,r19);
+   GENOFFSET(MIPS32,mips32,r20);
+   GENOFFSET(MIPS32,mips32,r21);
+   GENOFFSET(MIPS32,mips32,r22);
+   GENOFFSET(MIPS32,mips32,r23);
+   GENOFFSET(MIPS32,mips32,r24);
+   GENOFFSET(MIPS32,mips32,r25);
+   GENOFFSET(MIPS32,mips32,r26);
+   GENOFFSET(MIPS32,mips32,r27);
+   GENOFFSET(MIPS32,mips32,r28);
+   GENOFFSET(MIPS32,mips32,r29);
+   GENOFFSET(MIPS32,mips32,r30);
+   GENOFFSET(MIPS32,mips32,r31);
+   GENOFFSET(MIPS32,mips32,PC);
+   GENOFFSET(MIPS32,mips32,HI);
+   GENOFFSET(MIPS32,mips32,LO);
+
+   // MIPS64
+   GENOFFSET(MIPS64,mips64,r0);
+   GENOFFSET(MIPS64,mips64,r1);
+   GENOFFSET(MIPS64,mips64,r2);
+   GENOFFSET(MIPS64,mips64,r3);
+   GENOFFSET(MIPS64,mips64,r4);
+   GENOFFSET(MIPS64,mips64,r5);
+   GENOFFSET(MIPS64,mips64,r6);
+   GENOFFSET(MIPS64,mips64,r7);
+   GENOFFSET(MIPS64,mips64,r8);
+   GENOFFSET(MIPS64,mips64,r9);
+   GENOFFSET(MIPS64,mips64,r10);
+   GENOFFSET(MIPS64,mips64,r11);
+   GENOFFSET(MIPS64,mips64,r12);
+   GENOFFSET(MIPS64,mips64,r13);
+   GENOFFSET(MIPS64,mips64,r14);
+   GENOFFSET(MIPS64,mips64,r15);
+   GENOFFSET(MIPS64,mips64,r15);
+   GENOFFSET(MIPS64,mips64,r17);
+   GENOFFSET(MIPS64,mips64,r18);
+   GENOFFSET(MIPS64,mips64,r19);
+   GENOFFSET(MIPS64,mips64,r20);
+   GENOFFSET(MIPS64,mips64,r21);
+   GENOFFSET(MIPS64,mips64,r22);
+   GENOFFSET(MIPS64,mips64,r23);
+   GENOFFSET(MIPS64,mips64,r24);
+   GENOFFSET(MIPS64,mips64,r25);
+   GENOFFSET(MIPS64,mips64,r26);
+   GENOFFSET(MIPS64,mips64,r27);
+   GENOFFSET(MIPS64,mips64,r28);
+   GENOFFSET(MIPS64,mips64,r29);
+   GENOFFSET(MIPS64,mips64,r30);
+   GENOFFSET(MIPS64,mips64,r31);
+   GENOFFSET(MIPS64,mips64,PC);
+   GENOFFSET(MIPS64,mips64,HI);
+   GENOFFSET(MIPS64,mips64,LO);
+
+   // Tilegx
+   GENOFFSET(TILEGX,tilegx,r0);
+   GENOFFSET(TILEGX,tilegx,r1);
+   GENOFFSET(TILEGX,tilegx,r2);
+   GENOFFSET(TILEGX,tilegx,r3);
+   GENOFFSET(TILEGX,tilegx,r4);
+   GENOFFSET(TILEGX,tilegx,r5);
+   GENOFFSET(TILEGX,tilegx,r6);
+   GENOFFSET(TILEGX,tilegx,r7);
+   GENOFFSET(TILEGX,tilegx,r8);
+   GENOFFSET(TILEGX,tilegx,r9);
+   GENOFFSET(TILEGX,tilegx,r10);
+   GENOFFSET(TILEGX,tilegx,r11);
+   GENOFFSET(TILEGX,tilegx,r12);
+   GENOFFSET(TILEGX,tilegx,r13);
+   GENOFFSET(TILEGX,tilegx,r14);
+   GENOFFSET(TILEGX,tilegx,r15);
+   GENOFFSET(TILEGX,tilegx,r16);
+   GENOFFSET(TILEGX,tilegx,r17);
+   GENOFFSET(TILEGX,tilegx,r18);
+   GENOFFSET(TILEGX,tilegx,r19);
+   GENOFFSET(TILEGX,tilegx,r20);
+   GENOFFSET(TILEGX,tilegx,r21);
+   GENOFFSET(TILEGX,tilegx,r22);
+   GENOFFSET(TILEGX,tilegx,r23);
+   GENOFFSET(TILEGX,tilegx,r24);
+   GENOFFSET(TILEGX,tilegx,r25);
+   GENOFFSET(TILEGX,tilegx,r26);
+   GENOFFSET(TILEGX,tilegx,r27);
+   GENOFFSET(TILEGX,tilegx,r28);
+   GENOFFSET(TILEGX,tilegx,r29);
+   GENOFFSET(TILEGX,tilegx,r30);
+   GENOFFSET(TILEGX,tilegx,r31);
+   GENOFFSET(TILEGX,tilegx,r32);
+   GENOFFSET(TILEGX,tilegx,r33);
+   GENOFFSET(TILEGX,tilegx,r34);
+   GENOFFSET(TILEGX,tilegx,r35);
+   GENOFFSET(TILEGX,tilegx,r36);
+   GENOFFSET(TILEGX,tilegx,r37);
+   GENOFFSET(TILEGX,tilegx,r38);
+   GENOFFSET(TILEGX,tilegx,r39);
+   GENOFFSET(TILEGX,tilegx,r40);
+   GENOFFSET(TILEGX,tilegx,r41);
+   GENOFFSET(TILEGX,tilegx,r42);
+   GENOFFSET(TILEGX,tilegx,r43);
+   GENOFFSET(TILEGX,tilegx,r44);
+   GENOFFSET(TILEGX,tilegx,r45);
+   GENOFFSET(TILEGX,tilegx,r46);
+   GENOFFSET(TILEGX,tilegx,r47);
+   GENOFFSET(TILEGX,tilegx,r48);
+   GENOFFSET(TILEGX,tilegx,r49);
+   GENOFFSET(TILEGX,tilegx,r50);
+   GENOFFSET(TILEGX,tilegx,r51);
+   GENOFFSET(TILEGX,tilegx,r52);
+   GENOFFSET(TILEGX,tilegx,r53);
+   GENOFFSET(TILEGX,tilegx,r54);
+   GENOFFSET(TILEGX,tilegx,r55);
+   GENOFFSET(TILEGX,tilegx,pc);
+   GENOFFSET(TILEGX,tilegx,EMNOTE);
+   GENOFFSET(TILEGX,tilegx,CMSTART);
+   GENOFFSET(TILEGX,tilegx,NRADDR);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end                                             genoffsets.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/nanoarm.orig b/VEX/nanoarm.orig
new file mode 100644
index 0000000..90742df
--- /dev/null
+++ b/VEX/nanoarm.orig
@@ -0,0 +1,19 @@
+       0:       e1a0c00d        mov     ip, sp
+. 0 00008000 4
+. 0d c0 a0 e1
+
+       4:       e92dd810        stmdb   sp!, {r4, fp, ip, lr, pc}
+. 1 00008004 4
+. 10 d8 2d e9
+
+       8:       e24cb004        sub     fp, ip, #4      ; 0x4
+. 2 00008008 4
+. 04 b0 4c e2
+
+       c:       e3a00014        mov     r0, #20 ; 0x14
+. 3 0000800C 4
+. 14 00 a0 e3
+
+      10:       ebfffffe        bl      0 <newHHW>
+. 4 00008010 4
+. fe ff ff eb
diff --git a/VEX/orig_amd64/Compare.hs b/VEX/orig_amd64/Compare.hs
new file mode 100644
index 0000000..6641267
--- /dev/null
+++ b/VEX/orig_amd64/Compare.hs
@@ -0,0 +1,63 @@
+
+module Main where
+
+import Char ( isSpace )
+
+{- Compares a .sorted file with a raw printout of instructions
+   and shows differences.
+
+   First file (REF) is has lines of format
+
+      hex-digits  SPACEs  insn(possibly with spaces)
+
+   Second file (TEST) has lines of format
+
+      insn(possibly with spaces)
+
+   Purpose is to extract the insn (text), remove spaces, and compare.
+
+   How to use:
+(cd .. && make) && (../vex test1.orig | grep LALALA | cut -b 22- > out.txt)
+/home/sewardj/Tools/HugsInst/bin/runhugs Compare.hs | grep FAIL 
+-}
+
+main = mayn "test2.sorted" "out.txt"
+
+mayn :: String -> String -> IO ()
+
+mayn sorted_fn dump_fn
+   = do sorted <- readFile sorted_fn
+        dump   <- readFile dump_fn
+        let ress = zipWith check (lines (deTab sorted))
+                                 (lines (deTab dump))
+        putStrLn (unlines ress)
+
+
+check :: String -> String -> String
+check ref test 
+   = let ref_clean = dropWhile isHex ref
+         ok        = compere ref_clean test
+         summary   = grok ("REF: " ++ trim ref_clean) 
+                     ++ "   " ++ grok ("TEST: " ++ trim test)
+     in
+     if  ok
+     then "pass:    " ++ summary
+     else "FAIL:    " ++ summary
+
+trim = reverse . dropWhile isSpace . reverse . dropWhile isSpace
+
+compere s1 s2 = filter (not . isSpace) s1 == filter (not . isSpace) s2
+
+isHex c = c `elem` "ABCDEF0123456789abcdef"
+
+grok str 
+   = let n = length str
+         limit = 40
+     in
+     if   n >= limit
+     then str
+     else take limit (str ++ repeat ' ')
+
+deTab [] = []
+deTab (c:cs) = if c == '\t' then "  " ++ deTab cs
+               else c: deTab cs
diff --git a/VEX/orig_amd64/SortedToOrig.hs b/VEX/orig_amd64/SortedToOrig.hs
new file mode 100644
index 0000000..0d947b6
--- /dev/null
+++ b/VEX/orig_amd64/SortedToOrig.hs
@@ -0,0 +1,29 @@
+
+module Main where
+
+main 
+  = do x1 <- readFile "test2.sorted"
+       let x2 = lines x1
+           x3 = zip [1 ..] x2
+           x4 = concat (map qq x3)
+       --putStr x4
+       writeFile "test2.orig" x4
+
+
+qq :: (Int, String) -> String
+qq (n, s0) 
+   = let ws     = words s0
+         bytes  = head ws
+         rest   = unwords (tail ws)
+         bytes2 = foo bytes
+     in
+     unlines [
+        "",
+        rest,
+        ". " ++ show n ++ " 0x12345678 " ++ show (1 + (length bytes `div` 2)),
+        ". " ++ bytes2 ++ "C3"
+     ]
+
+
+foo [] = []
+foo (x:y:rest) = x:y:' ':foo rest
diff --git a/VEX/orig_amd64/test1.orig b/VEX/orig_amd64/test1.orig
new file mode 100644
index 0000000..68258c0
--- /dev/null
+++ b/VEX/orig_amd64/test1.orig
@@ -0,0 +1,5281 @@
+
+addl 1052(%rsp,%rdx,4), %eax
+. 1 0x12345678 8
+. 03 84 94 1C 04 00 00 C3
+
+addl 108(%rbx), %ecx
+. 2 0x12345678 4
+. 03 4B 6C C3
+
+addl $2, %eax
+. 3 0x12345678 4
+. 83 C0 02 C3
+
+addl $2, %edx
+. 4 0x12345678 4
+. 83 C2 02 C3
+
+addl $32, %ecx
+. 5 0x12345678 4
+. 83 C1 20 C3
+
+addl $32, %esi
+. 6 0x12345678 4
+. 83 C6 20 C3
+
+addl $49, %r12d
+. 7 0x12345678 5
+. 41 83 C4 31 C3
+
+addl $4, %ecx
+. 8 0x12345678 4
+. 83 C1 04 C3
+
+addl -4(%rbp,%rdx,4), %eax
+. 9 0x12345678 5
+. 03 44 95 FC C3
+
+addl 51640(%rax), %ecx
+. 10 0x12345678 7
+. 03 88 B8 C9 00 00 C3
+
+addl 51640(%rbp,%rax), %ecx
+. 11 0x12345678 8
+. 03 8C 05 B8 C9 00 00 C3
+
+addl 51640(%rdx), %ecx
+. 12 0x12345678 7
+. 03 8A B8 C9 00 00 C3
+
+addl 51644(%rax), %esi
+. 13 0x12345678 7
+. 03 B0 BC C9 00 00 C3
+
+addl 51644(%rdx), %esi
+. 14 0x12345678 7
+. 03 B2 BC C9 00 00 C3
+
+addl 51648(%rax), %edi
+. 15 0x12345678 7
+. 03 B8 C0 C9 00 00 C3
+
+addl 51648(%rdx), %edi
+. 16 0x12345678 7
+. 03 BA C0 C9 00 00 C3
+
+addl $5, %eax
+. 17 0x12345678 4
+. 83 C0 05 C3
+
+addl $5, %r14d
+. 18 0x12345678 5
+. 41 83 C6 05 C3
+
+addl 672(%rbp,%rax,4), %r13d
+. 19 0x12345678 9
+. 44 03 AC 85 A0 02 00 00 C3
+
+addl %ebx, %eax
+. 20 0x12345678 3
+. 01 D8 C3
+
+addl %ecx, %r9d
+. 21 0x12345678 4
+. 41 01 C9 C3
+
+addl %edx, %eax
+. 22 0x12345678 3
+. 01 D0 C3
+
+addl %r12d, %eax
+. 23 0x12345678 4
+. 44 01 E0 C3
+
+addl %r12d, %r13d
+. 24 0x12345678 4
+. 45 01 E5 C3
+
+addl %r13d, %eax
+. 25 0x12345678 4
+. 44 01 E8 C3
+
+addl (%rbp,%rax,4), %edi
+. 26 0x12345678 5
+. 03 7C 85 00 C3
+
+addq $104, %rsp
+. 27 0x12345678 5
+. 48 83 C4 68 C3
+
+addq $1304, %rsp
+. 28 0x12345678 8
+. 48 81 C4 18 05 00 00 C3
+
+addq $2104, %rsp
+. 29 0x12345678 8
+. 48 81 C4 38 08 00 00 C3
+
+addq $264, %rsp
+. 30 0x12345678 8
+. 48 81 C4 08 01 00 00 C3
+
+addq 32(%rbx), %rax
+. 31 0x12345678 5
+. 48 03 43 20 C3
+
+addq $3400, %rsp
+. 32 0x12345678 8
+. 48 81 C4 48 0D 00 00 C3
+
+addq $56, %rsp
+. 33 0x12345678 5
+. 48 83 C4 38 C3
+
+addq $72, %rsp
+. 34 0x12345678 5
+. 48 83 C4 48 C3
+
+addq $824, %rsp
+. 35 0x12345678 8
+. 48 81 C4 38 03 00 00 C3
+
+addq $8, %rsp
+. 36 0x12345678 5
+. 48 83 C4 08 C3
+
+addq %rax, %rax
+. 37 0x12345678 4
+. 48 01 C0 C3
+
+addq %rax, %rdi
+. 38 0x12345678 4
+. 48 01 C7 C3
+
+addq %rax, %rdx
+. 39 0x12345678 4
+. 48 01 C2 C3
+
+addq %rbp, %rax
+. 40 0x12345678 4
+. 48 01 E8 C3
+
+addq %rbp, %rdx
+. 41 0x12345678 4
+. 48 01 EA C3
+
+addq %rbp, %rsi
+. 42 0x12345678 4
+. 48 01 EE C3
+
+addq %rcx, %rax
+. 43 0x12345678 4
+. 48 01 C8 C3
+
+addq %rdx, %rax
+. 44 0x12345678 4
+. 48 01 D0 C3
+
+addq %rdx, %rdx
+. 45 0x12345678 4
+. 48 01 D2 C3
+
+addq %rsi, %rdx
+. 46 0x12345678 4
+. 48 01 F2 C3
+
+addw %dx, 80(%rsp,%rax,2)
+. 47 0x12345678 6
+. 66 01 54 44 50 C3
+
+andl $-2097153, %eax
+. 48 0x12345678 6
+. 25 FF FF DF FF C3
+
+andl $-2097153, %edx
+. 49 0x12345678 7
+. 81 E2 FF FF DF FF C3
+
+andl $-2097153, %r12d
+. 50 0x12345678 8
+. 41 81 E4 FF FF DF FF C3
+
+andl $-2097153, %r15d
+. 51 0x12345678 8
+. 41 81 E7 FF FF DF FF C3
+
+andl $-2097153, %r8d
+. 52 0x12345678 8
+. 41 81 E0 FF FF DF FF C3
+
+andl $255, %r8d
+. 53 0x12345678 8
+. 41 81 E0 FF 00 00 00 C3
+
+andl $-2, %eax
+. 54 0x12345678 4
+. 83 E0 FE C3
+
+andl $31, %ecx
+. 55 0x12345678 4
+. 83 E1 1F C3
+
+andl $32767, %eax
+. 56 0x12345678 6
+. 25 FF 7F 00 00 C3
+
+andl %edx, %eax
+. 57 0x12345678 3
+. 21 D0 C3
+
+andl %edx, (%rbx,%rax,4)
+. 58 0x12345678 4
+. 21 14 83 C3
+
+andl %esi, %edx
+. 59 0x12345678 3
+. 21 F2 C3
+
+call lalala
+. 60 0x12345678 5
+. E8 62 16 00 00
+
+cltd
+. 61 0x12345678 2
+. 99 C3
+
+cltq
+. 62 0x12345678 3
+. 48 98 C3
+
+cmova %ecx, %edx
+. 63 0x12345678 4
+. 0F 47 D1 C3
+
+cmovae %eax, %edi
+. 64 0x12345678 4
+. 0F 43 F8 C3
+
+cmovae %eax, %esi
+. 65 0x12345678 4
+. 0F 43 F0 C3
+
+cmovg %eax, %ecx
+. 66 0x12345678 4
+. 0F 4F C8 C3
+
+cmovg %eax, %edi
+. 67 0x12345678 4
+. 0F 4F F8 C3
+
+cmovg %eax, %esi
+. 68 0x12345678 4
+. 0F 4F F0 C3
+
+cmovg %eax, %r12d
+. 69 0x12345678 5
+. 44 0F 4F E0 C3
+
+cmovge %eax, %ecx
+. 70 0x12345678 4
+. 0F 4D C8 C3
+
+cmovg %r13d, %eax
+. 71 0x12345678 5
+. 41 0F 4F C5 C3
+
+cmovl %eax, %r13d
+. 72 0x12345678 5
+. 44 0F 4C E8 C3
+
+cmovle %eax, %ecx
+. 73 0x12345678 4
+. 0F 4E C8 C3
+
+cmovle %eax, %esi
+. 74 0x12345678 4
+. 0F 4E F0 C3
+
+cmovne %eax, %edx
+. 75 0x12345678 4
+. 0F 45 D0 C3
+
+cmovne %edi, %edx
+. 76 0x12345678 4
+. 0F 45 D7 C3
+
+cmpb $0, 128(%rbp,%rax)
+. 77 0x12345678 9
+. 80 BC 05 80 00 00 00 00 C3
+
+cmpb $0, 128(%rdi,%rdx)
+. 78 0x12345678 9
+. 80 BC 17 80 00 00 00 00 C3
+
+cmpb $0, 19706(%rbp,%rax)
+. 79 0x12345678 9
+. 80 BC 05 FA 4C 00 00 00 C3
+
+cmpb $0, 2112(%rsp,%rax)
+. 80 0x12345678 9
+. 80 BC 04 40 08 00 00 00 C3
+
+cmpb $0, 2112(%rsp,%rsi)
+. 81 0x12345678 9
+. 80 BC 34 40 08 00 00 00 C3
+
+cmpb $0, 32(%rsp,%rax)
+. 82 0x12345678 6
+. 80 7C 04 20 00 C3
+
+cmpb %al, %r10b
+. 83 0x12345678 4
+. 41 38 C2 C3
+
+cmpb %cl, %sil
+. 84 0x12345678 4
+. 40 38 CE C3
+
+cmpb %dl, %cl
+. 85 0x12345678 3
+. 38 D1 C3
+
+cmpb %sil, %cl
+. 86 0x12345678 4
+. 40 38 F1 C3
+
+cmpb %sil, %dl
+. 87 0x12345678 4
+. 40 38 F2 C3
+
+cmpb %sil, (%rsp)
+. 88 0x12345678 5
+. 40 38 34 24 C3
+
+cmpl $0, 1088(%rsp,%rdx,4)
+. 89 0x12345678 9
+. 83 BC 94 40 04 00 00 00 C3
+
+cmpl $0, 108(%rbx)
+. 90 0x12345678 5
+. 83 7B 6C 00 C3
+
+cmpl $0, 108(%rdi)
+. 91 0x12345678 5
+. 83 7F 6C 00 C3
+
+cmpl $0, 12(%rsp)
+. 92 0x12345678 6
+. 83 7C 24 0C 00 C3
+
+cmpl $0, 20(%rsp)
+. 93 0x12345678 6
+. 83 7C 24 14 00 C3
+
+cmpl $0, 24(%rsp)
+. 94 0x12345678 6
+. 83 7C 24 18 00 C3
+
+cmpl $0, 32(%rsp,%rax,4)
+. 95 0x12345678 6
+. 83 7C 84 20 00 C3
+
+cmpl $0, 644(%rdi)
+. 96 0x12345678 8
+. 83 BF 84 02 00 00 00 C3
+
+cmpl $0, 668(%rbp)
+. 97 0x12345678 8
+. 83 BD 9C 02 00 00 00 C3
+
+cmpl $0, (%r14,%rax,4)
+. 98 0x12345678 6
+. 41 83 3C 86 00 C3
+
+cmpl $0, (%rax)
+. 99 0x12345678 4
+. 83 38 00 C3
+
+cmpl $0, (%rbx,%rax,4)
+. 100 0x12345678 5
+. 83 3C 83 00 C3
+
+cmpl $0, (%rdi)
+. 101 0x12345678 4
+. 83 3F 00 C3
+
+cmpl $0, (%rsi)
+. 102 0x12345678 4
+. 83 3E 00 C3
+
+cmpl $101, %ecx
+. 103 0x12345678 4
+. 83 F9 65 C3
+
+cmpl 1088(%rsp,%rdi,4), %r12d
+. 104 0x12345678 9
+. 44 3B A4 BC 40 04 00 00 C3
+
+cmpl 1088(%rsp,%rdx,4), %r12d
+. 105 0x12345678 9
+. 44 3B A4 94 40 04 00 00 C3
+
+cmpl 108(%rbx), %edx
+. 106 0x12345678 4
+. 3B 53 6C C3
+
+cmpl 108(%rbx), %r9d
+. 107 0x12345678 5
+. 44 3B 4B 6C C3
+
+cmpl $1199, 668(%rbp)
+. 108 0x12345678 11
+. 81 BD 9C 02 00 00 AF 04 00 00 C3
+
+cmpl 124(%rbx), %r8d
+. 109 0x12345678 5
+. 44 3B 43 7C C3
+
+cmpl 12(%rsp), %edx
+. 110 0x12345678 5
+. 3B 54 24 0C C3
+
+cmpl $-1, 48(%rbx)
+. 111 0x12345678 5
+. 83 7B 30 FF C3
+
+cmpl $14, %r13d
+. 112 0x12345678 5
+. 41 83 FD 0E C3
+
+cmpl $15, %edi
+. 113 0x12345678 4
+. 83 FF 0F C3
+
+cmpl $15, %r8d
+. 114 0x12345678 5
+. 41 83 F8 0F C3
+
+cmpl $1, 656(%rbx)
+. 115 0x12345678 8
+. 83 BB 90 02 00 00 01 C3
+
+cmpl $1, 660(%rbx)
+. 116 0x12345678 8
+. 83 BB 94 02 00 00 01 C3
+
+cmpl $1, 660(%rdi)
+. 117 0x12345678 8
+. 83 BF 94 02 00 00 01 C3
+
+cmpl 16(%rsp), %edx
+. 118 0x12345678 5
+. 3B 54 24 10 C3
+
+cmpl $17, %r12d
+. 119 0x12345678 5
+. 41 83 FC 11 C3
+
+cmpl $18002, 24(%rsp)
+. 120 0x12345678 9
+. 81 7C 24 18 52 46 00 00 C3
+
+cmpl $199, 668(%rbp)
+. 121 0x12345678 11
+. 81 BD 9C 02 00 00 C7 00 00 00 C3
+
+cmpl $19, %eax
+. 122 0x12345678 4
+. 83 F8 13 C3
+
+cmpl $1, %ebx
+. 123 0x12345678 4
+. 83 FB 01 C3
+
+cmpl $-1, %ecx
+. 124 0x12345678 4
+. 83 F9 FF C3
+
+cmpl $1, %edi
+. 125 0x12345678 4
+. 83 FF 01 C3
+
+cmpl $1, %edx
+. 126 0x12345678 4
+. 83 FA 01 C3
+
+cmpl $-1, %esi
+. 127 0x12345678 4
+. 83 FE FF C3
+
+cmpl $1, %esi
+. 128 0x12345678 4
+. 83 FE 01 C3
+
+cmpl $-1, %r13d
+. 129 0x12345678 5
+. 41 83 FD FF C3
+
+cmpl $1, %r13d
+. 130 0x12345678 5
+. 41 83 FD 01 C3
+
+cmpl $-1, %r9d
+. 131 0x12345678 5
+. 41 83 F9 FF C3
+
+cmpl $-1, (%rbx,%rax,4)
+. 132 0x12345678 5
+. 83 3C 83 FF C3
+
+cmpl 20(%rsp), %edi
+. 133 0x12345678 5
+. 3B 7C 24 14 C3
+
+cmpl 20(%rsp), %edx
+. 134 0x12345678 5
+. 3B 54 24 14 C3
+
+cmpl $2399, 668(%rbp)
+. 135 0x12345678 11
+. 81 BD 9C 02 00 00 5F 09 00 00 C3
+
+cmpl 24(%rsp), %ebx
+. 136 0x12345678 5
+. 3B 5C 24 18 C3
+
+cmpl 24(%rsp), %edi
+. 137 0x12345678 5
+. 3B 7C 24 18 C3
+
+cmpl $254, %ebx
+. 138 0x12345678 7
+. 81 FB FE 00 00 00 C3
+
+cmpl $255, %ebx
+. 139 0x12345678 7
+. 81 FB FF 00 00 00 C3
+
+cmpl $255, %ecx
+. 140 0x12345678 7
+. 81 F9 FF 00 00 00 C3
+
+cmpl $255, %edi
+. 141 0x12345678 7
+. 81 FF FF 00 00 00 C3
+
+cmpl $255, %edx
+. 142 0x12345678 7
+. 81 FA FF 00 00 00 C3
+
+cmpl $255, %r12d
+. 143 0x12345678 8
+. 41 81 FC FF 00 00 00 C3
+
+cmpl $256, %edi
+. 144 0x12345678 7
+. 81 FF 00 01 00 00 C3
+
+cmpl $256, %esi
+. 145 0x12345678 7
+. 81 FE 00 01 00 00 C3
+
+cmpl $2, 656(%rbp)
+. 146 0x12345678 8
+. 83 BD 90 02 00 00 02 C3
+
+cmpl $2, 656(%rdi)
+. 147 0x12345678 8
+. 83 BF 90 02 00 00 02 C3
+
+cmpl 28(%rsp), %edi
+. 148 0x12345678 5
+. 3B 7C 24 1C C3
+
+cmpl 28(%rsp), %r12d
+. 149 0x12345678 6
+. 44 3B 64 24 1C C3
+
+cmpl 28(%rsp), %r13d
+. 150 0x12345678 6
+. 44 3B 6C 24 1C C3
+
+cmpl $2, %ebx
+. 151 0x12345678 4
+. 83 FB 02 C3
+
+cmpl $2, %r13d
+. 152 0x12345678 5
+. 41 83 FD 02 C3
+
+cmpl $31, %edi
+. 153 0x12345678 4
+. 83 FF 1F C3
+
+cmpl $3, 20(%rsp)
+. 154 0x12345678 6
+. 83 7C 24 14 03 C3
+
+cmpl $3, 32(%rsp)
+. 155 0x12345678 6
+. 83 7C 24 20 03 C3
+
+cmpl $33, %ebx
+. 156 0x12345678 4
+. 83 FB 21 C3
+
+cmpl $33, %r9d
+. 157 0x12345678 5
+. 41 83 F9 21 C3
+
+cmpl $3, %eax
+. 158 0x12345678 4
+. 83 F8 03 C3
+
+cmpl $3, %r13d
+. 159 0x12345678 5
+. 41 83 FD 03 C3
+
+cmpl $3, %r8d
+. 160 0x12345678 5
+. 41 83 F8 03 C3
+
+cmpl $3, %r9d
+. 161 0x12345678 5
+. 41 83 F9 03 C3
+
+cmpl $49, %eax
+. 162 0x12345678 4
+. 83 F8 31 C3
+
+cmpl $599, 668(%rbp)
+. 163 0x12345678 11
+. 81 BD 9C 02 00 00 57 02 00 00 C3
+
+cmpl $5, %ebx
+. 164 0x12345678 4
+. 83 FB 05 C3
+
+cmpl 64(%rsp,%rax,4), %r12d
+. 165 0x12345678 6
+. 44 3B 64 84 40 C3
+
+cmpl 64(%rsp,%rdi,4), %r12d
+. 166 0x12345678 6
+. 44 3B 64 BC 40 C3
+
+cmpl 64(%rsp,%rdx,4), %eax
+. 167 0x12345678 5
+. 3B 44 94 40 C3
+
+cmpl $65534, %eax
+. 168 0x12345678 6
+. 3D FE FF 00 00 C3
+
+cmpl $65534, %edx
+. 169 0x12345678 7
+. 81 FA FE FF 00 00 C3
+
+cmpl $65535, %eax
+. 170 0x12345678 6
+. 3D FF FF 00 00 C3
+
+cmpl $65536, %ebx
+. 171 0x12345678 7
+. 81 FB 00 00 01 00 C3
+
+cmpl 668(%rbp), %edx
+. 172 0x12345678 7
+. 3B 95 9C 02 00 00 C3
+
+cmpl 668(%rbp), %r12d
+. 173 0x12345678 8
+. 44 3B A5 9C 02 00 00 C3
+
+cmpl $6, %r14d
+. 174 0x12345678 5
+. 41 83 FE 06 C3
+
+cmpl $7, 644(%r11)
+. 175 0x12345678 9
+. 41 83 BB 84 02 00 00 07 C3
+
+cmpl $7, 644(%rbp)
+. 176 0x12345678 8
+. 83 BD 84 02 00 00 07 C3
+
+cmpl $7, 644(%rbx)
+. 177 0x12345678 8
+. 83 BB 84 02 00 00 07 C3
+
+cmpl $7, 644(%rdi)
+. 178 0x12345678 8
+. 83 BF 84 02 00 00 07 C3
+
+cmpl $7, 644(%rsi)
+. 179 0x12345678 8
+. 83 BE 84 02 00 00 07 C3
+
+cmpl $7, %r14d
+. 180 0x12345678 5
+. 41 83 FE 07 C3
+
+cmpl $99, 12(%rsp)
+. 181 0x12345678 6
+. 83 7C 24 0C 63 C3
+
+cmpl $99, 24(%rsp)
+. 182 0x12345678 6
+. 83 7C 24 18 63 C3
+
+cmpl $9999, %ebp
+. 183 0x12345678 7
+. 81 FD 0F 27 00 00 C3
+
+cmpl $9, %eax
+. 184 0x12345678 4
+. 83 F8 09 C3
+
+cmpl %eax, 64(%rsp,%rdx,4)
+. 185 0x12345678 5
+. 39 44 94 40 C3
+
+cmpl %eax, %ecx
+. 186 0x12345678 3
+. 39 C1 C3
+
+cmpl %eax, %edi
+. 187 0x12345678 3
+. 39 C7 C3
+
+cmpl %eax, %edx
+. 188 0x12345678 3
+. 39 C2 C3
+
+cmpl %eax, %esi
+. 189 0x12345678 3
+. 39 C6 C3
+
+cmpl %eax, %r13d
+. 190 0x12345678 4
+. 41 39 C5 C3
+
+cmpl %eax, %r8d
+. 191 0x12345678 4
+. 41 39 C0 C3
+
+cmpl %ebp, %edi
+. 192 0x12345678 3
+. 39 EF C3
+
+cmpl %ebp, %r12d
+. 193 0x12345678 4
+. 41 39 EC C3
+
+cmpl %ecx, %eax
+. 194 0x12345678 3
+. 39 C8 C3
+
+cmpl %ecx, %r13d
+. 195 0x12345678 4
+. 41 39 CD C3
+
+cmpl %edi, %eax
+. 196 0x12345678 3
+. 39 F8 C3
+
+cmpl %edi, %r10d
+. 197 0x12345678 4
+. 41 39 FA C3
+
+cmpl %edx, %ecx
+. 198 0x12345678 3
+. 39 D1 C3
+
+cmpl %edx, %edi
+. 199 0x12345678 3
+. 39 D7 C3
+
+cmpl %edx, 0x12345678(%rip)
+. 200 0x12345678 7
+. 39 15 78 56 34 12 C3
+
+cmpl %edx, %r12d
+. 201 0x12345678 4
+. 41 39 D4 C3
+
+cmpl %edx, %r9d
+. 202 0x12345678 4
+. 41 39 D1 C3
+
+cmpl %edx, (%rcx,%rax,4)
+. 203 0x12345678 4
+. 39 14 81 C3
+
+cmpl %r10d, %r9d
+. 204 0x12345678 4
+. 45 39 D1 C3
+
+cmpl (%r10,%rax,4), %r8d
+. 205 0x12345678 5
+. 45 3B 04 82 C3
+
+cmpl %r11d, %ecx
+. 206 0x12345678 4
+. 44 39 D9 C3
+
+cmpl %r12d, %eax
+. 207 0x12345678 4
+. 44 39 E0 C3
+
+cmpl %r12d, %ebp
+. 208 0x12345678 4
+. 44 39 E5 C3
+
+cmpl %r12d, %edi
+. 209 0x12345678 4
+. 44 39 E7 C3
+
+cmpl %r13d, 16(%rsp)
+. 210 0x12345678 6
+. 44 39 6C 24 10 C3
+
+cmpl %r13d, %eax
+. 211 0x12345678 4
+. 44 39 E8 C3
+
+cmpl %r13d, %ebp
+. 212 0x12345678 4
+. 44 39 ED C3
+
+cmpl %r13d, %edi
+. 213 0x12345678 4
+. 44 39 EF C3
+
+cmpl %r13d, %edx
+. 214 0x12345678 4
+. 44 39 EA C3
+
+cmpl %r13d, %r12d
+. 215 0x12345678 4
+. 45 39 EC C3
+
+cmpl %r14d, %eax
+. 216 0x12345678 4
+. 44 39 F0 C3
+
+cmpl %r14d, %ebx
+. 217 0x12345678 4
+. 44 39 F3 C3
+
+cmpl %r14d, %edi
+. 218 0x12345678 4
+. 44 39 F7 C3
+
+cmpl %r14d, %r12d
+. 219 0x12345678 4
+. 45 39 F4 C3
+
+cmpl %r14d, %r13d
+. 220 0x12345678 4
+. 45 39 F5 C3
+
+cmpl %r15d, %r14d
+. 221 0x12345678 4
+. 45 39 FE C3
+
+cmpl %r8d, %edi
+. 222 0x12345678 4
+. 44 39 C7 C3
+
+cmpl %r8d, %esi
+. 223 0x12345678 4
+. 44 39 C6 C3
+
+cmpl %r9d, %edx
+. 224 0x12345678 4
+. 44 39 CA C3
+
+cmpw %ax, %r10w
+. 225 0x12345678 5
+. 66 41 39 C2 C3
+
+decl 12(%rsp)
+. 226 0x12345678 5
+. FF 4C 24 0C C3
+
+decl 24(%rsp)
+. 227 0x12345678 5
+. FF 4C 24 18 C3
+
+decl 32(%rsp,%rax,4)
+. 228 0x12345678 5
+. FF 4C 84 20 C3
+
+decl %eax
+. 229 0x12345678 3
+. FF C8 C3
+
+decl %ebp
+. 230 0x12345678 3
+. FF CD C3
+
+decl %ebx
+. 231 0x12345678 3
+. FF CB C3
+
+decl %ecx
+. 232 0x12345678 3
+. FF C9 C3
+
+decl %edi
+. 233 0x12345678 3
+. FF CF C3
+
+decl %esi
+. 234 0x12345678 3
+. FF CE C3
+
+decl %r10d
+. 235 0x12345678 4
+. 41 FF CA C3
+
+decl %r11d
+. 236 0x12345678 4
+. 41 FF CB C3
+
+decl %r12d
+. 237 0x12345678 4
+. 41 FF CC C3
+
+decl %r8d
+. 238 0x12345678 4
+. 41 FF C8 C3
+
+decl (%r9)
+. 239 0x12345678 4
+. 41 FF 09 C3
+
+idivl %ebx
+. 240 0x12345678 3
+. F7 FB C3
+
+imull $7621, 8(%rsp), %eax
+. 241 0x12345678 9
+. 69 44 24 08 C5 1D 00 00 C3
+
+imull %eax, %r12d
+. 242 0x12345678 5
+. 44 0F AF E0 C3
+
+imulq $1431655766, %rax, %rax
+. 243 0x12345678 8
+. 48 69 C0 56 55 55 55 C3
+
+imulq %rdx, %rax
+. 244 0x12345678 5
+. 48 0F AF C2 C3
+
+incl 1056(%rsp,%rax,4)
+. 245 0x12345678 8
+. FF 84 84 20 04 00 00 C3
+
+incl 116(%r11)
+. 246 0x12345678 5
+. 41 FF 43 74 C3
+
+incl 116(%rdi)
+. 247 0x12345678 4
+. FF 47 74 C3
+
+incl 116(%rsi)
+. 248 0x12345678 4
+. FF 46 74 C3
+
+incl 124(%rdi)
+. 249 0x12345678 4
+. FF 47 7C C3
+
+incl 12(%rsp)
+. 250 0x12345678 5
+. FF 44 24 0C C3
+
+incl 24(%rsp)
+. 251 0x12345678 5
+. FF 44 24 18 C3
+
+incl 45448(%rbp,%rax,4)
+. 252 0x12345678 8
+. FF 84 85 88 B1 00 00 C3
+
+incl 45448(%rbp,%rdx,4)
+. 253 0x12345678 8
+. FF 84 95 88 B1 00 00 C3
+
+incl 48(%rsp,%rsi,4)
+. 254 0x12345678 5
+. FF 44 B4 30 C3
+
+incl 672(%rbx)
+. 255 0x12345678 7
+. FF 83 A0 02 00 00 C3
+
+incl 672(%rbx,%rax,4)
+. 256 0x12345678 8
+. FF 84 83 A0 02 00 00 C3
+
+incl 676(%rbx)
+. 257 0x12345678 7
+. FF 83 A4 02 00 00 C3
+
+incl 676(%rbx,%rax,4)
+. 258 0x12345678 8
+. FF 84 83 A4 02 00 00 C3
+
+incl %eax
+. 259 0x12345678 3
+. FF C0 C3
+
+incl %ebp
+. 260 0x12345678 3
+. FF C5 C3
+
+incl %ebx
+. 261 0x12345678 3
+. FF C3 C3
+
+incl %ecx
+. 262 0x12345678 3
+. FF C1 C3
+
+incl %edi
+. 263 0x12345678 3
+. FF C7 C3
+
+incl %edx
+. 264 0x12345678 3
+. FF C2 C3
+
+incl %esi
+. 265 0x12345678 3
+. FF C6 C3
+
+incl %r10d
+. 266 0x12345678 4
+. 41 FF C2 C3
+
+incl %r11d
+. 267 0x12345678 4
+. 41 FF C3 C3
+
+incl %r12d
+. 268 0x12345678 4
+. 41 FF C4 C3
+
+incl %r13d
+. 269 0x12345678 4
+. 41 FF C5 C3
+
+incl %r14d
+. 270 0x12345678 4
+. 41 FF C6 C3
+
+incl %r8d
+. 271 0x12345678 4
+. 41 FF C0 C3
+
+incl %r9d
+. 272 0x12345678 4
+. 41 FF C1 C3
+
+incl (%rbp,%rax,4)
+. 273 0x12345678 5
+. FF 44 85 00 C3
+
+incq %rdx
+. 274 0x12345678 4
+. 48 FF C2 C3
+
+ja lalala
+. 275 0x12345678 6
+. 0F 87 DD 12 00 00
+
+jbe lalala
+. 276 0x12345678 6
+. 0F 86 D7 12 00 00
+
+je lalala
+. 277 0x12345678 6
+. 0F 84 D1 12 00 00
+
+jge lalala
+. 278 0x12345678 6
+. 0F 8D CB 12 00 00
+
+jg lalala
+. 279 0x12345678 6
+. 0F 8F C5 12 00 00
+
+jle lalala
+. 280 0x12345678 6
+. 0F 8E BF 12 00 00
+
+jl lalala
+. 281 0x12345678 6
+. 0F 8C B9 12 00 00
+
+jmp lalala
+. 282 0x12345678 5
+. E9 B4 12 00 00
+
+jne lalala
+. 283 0x12345678 6
+. 0F 85 AE 12 00 00
+
+jns lalala
+. 284 0x12345678 6
+. 0F 89 A8 12 00 00
+
+js lalala
+. 285 0x12345678 6
+. 0F 88 A2 12 00 00
+
+leal -1(%r11), %edi
+. 286 0x12345678 5
+. 41 8D 7B FF C3
+
+leal -1(%r12,%rax), %eax
+. 287 0x12345678 6
+. 41 8D 44 04 FF C3
+
+leal 1(%r13), %eax
+. 288 0x12345678 5
+. 41 8D 45 01 C3
+
+leal 1(%r13), %edx
+. 289 0x12345678 5
+. 41 8D 55 01 C3
+
+leal 1(%r8), %edx
+. 290 0x12345678 5
+. 41 8D 50 01 C3
+
+leal 1(%rax), %esi
+. 291 0x12345678 4
+. 8D 70 01 C3
+
+leal -1(%rax), %r14d
+. 292 0x12345678 5
+. 44 8D 70 FF C3
+
+leal 1(%rax,%r8), %r8d
+. 293 0x12345678 6
+. 46 8D 44 00 01 C3
+
+leal 1(%rax,%rcx), %ecx
+. 294 0x12345678 5
+. 8D 4C 08 01 C3
+
+leal 1(%rbp), %esi
+. 295 0x12345678 4
+. 8D 75 01 C3
+
+leal -1(%rbx), %eax
+. 296 0x12345678 4
+. 8D 43 FF C3
+
+leal -1(%rcx), %eax
+. 297 0x12345678 4
+. 8D 41 FF C3
+
+leal 1(%rcx), %eax
+. 298 0x12345678 4
+. 8D 41 01 C3
+
+leal 1(%rdi), %eax
+. 299 0x12345678 4
+. 8D 47 01 C3
+
+leal 1(%rdi), %ecx
+. 300 0x12345678 4
+. 8D 4F 01 C3
+
+leal -1(%rdx), %eax
+. 301 0x12345678 4
+. 8D 42 FF C3
+
+leal 1(%rdx), %eax
+. 302 0x12345678 4
+. 8D 42 01 C3
+
+leal -1(%rsi), %ebp
+. 303 0x12345678 4
+. 8D 6E FF C3
+
+leal -1(%rsi), %r12d
+. 304 0x12345678 5
+. 44 8D 66 FF C3
+
+leal -1(%rsi), %r9d
+. 305 0x12345678 5
+. 44 8D 4E FF C3
+
+leal -2(%rbx), %eax
+. 306 0x12345678 4
+. 8D 43 FE C3
+
+leal -2(%rdi), %eax
+. 307 0x12345678 4
+. 8D 47 FE C3
+
+leal 31(%r13), %eax
+. 308 0x12345678 5
+. 41 8D 45 1F C3
+
+leal 34(%rbp), %edx
+. 309 0x12345678 4
+. 8D 55 22 C3
+
+leal 35(%rbp), %eax
+. 310 0x12345678 4
+. 8D 45 23 C3
+
+leal -3(%rbx), %eax
+. 311 0x12345678 4
+. 8D 43 FD C3
+
+leal 48(%rax), %esi
+. 312 0x12345678 4
+. 8D 70 30 C3
+
+leal -4(%r14), %edi
+. 313 0x12345678 5
+. 41 8D 7E FC C3
+
+leal 4(%rdi), %ecx
+. 314 0x12345678 4
+. 8D 4F 04 C3
+
+leal 7(%rcx), %eax
+. 315 0x12345678 4
+. 8D 41 07 C3
+
+leal 8(%r8), %r11d
+. 316 0x12345678 5
+. 45 8D 58 08 C3
+
+leal (%r10,%r13), %ecx
+. 317 0x12345678 5
+. 43 8D 0C 2A C3
+
+leal (%r12,%r8), %eax
+. 318 0x12345678 5
+. 43 8D 04 04 C3
+
+leal (%r13,%rdi,2), %edx
+. 319 0x12345678 6
+. 41 8D 54 7D 00 C3
+
+leal (%r13,%rsi), %eax
+. 320 0x12345678 6
+. 41 8D 44 35 00 C3
+
+leal (%r14,%r13), %eax
+. 321 0x12345678 5
+. 43 8D 04 2E C3
+
+leal (%r8,%r10), %eax
+. 322 0x12345678 5
+. 43 8D 04 10 C3
+
+leal (%r8,%rdx), %eax
+. 323 0x12345678 5
+. 41 8D 04 10 C3
+
+leal (%r9,%r15), %edi
+. 324 0x12345678 5
+. 43 8D 3C 39 C3
+
+leal (%rax,%r10), %eax
+. 325 0x12345678 5
+. 42 8D 04 10 C3
+
+leal (%rax,%r14), %r15d
+. 326 0x12345678 5
+. 46 8D 3C 30 C3
+
+leal (%rax,%r8), %eax
+. 327 0x12345678 5
+. 42 8D 04 00 C3
+
+leal (%rax,%r9), %eax
+. 328 0x12345678 5
+. 42 8D 04 08 C3
+
+leal (%rax,%rax,2), %eax
+. 329 0x12345678 4
+. 8D 04 40 C3
+
+leal (%rax,%rdi), %eax
+. 330 0x12345678 4
+. 8D 04 38 C3
+
+leal (%rax,%rdx), %eax
+. 331 0x12345678 4
+. 8D 04 10 C3
+
+leal (%rcx,%r9), %eax
+. 332 0x12345678 5
+. 42 8D 04 09 C3
+
+leal (%rdi,%r13), %eax
+. 333 0x12345678 5
+. 42 8D 04 2F C3
+
+leal (%rdx,%r13), %eax
+. 334 0x12345678 5
+. 42 8D 04 2A C3
+
+leal (%rdx,%r8), %edx
+. 335 0x12345678 5
+. 42 8D 14 02 C3
+
+leal (%rdx,%r9), %edx
+. 336 0x12345678 5
+. 42 8D 14 0A C3
+
+leal (%rdx,%rcx), %eax
+. 337 0x12345678 4
+. 8D 04 0A C3
+
+leal (%rdx,%rdi), %edx
+. 338 0x12345678 4
+. 8D 14 3A C3
+
+leal (%rsi,%r13), %eax
+. 339 0x12345678 5
+. 42 8D 04 2E C3
+
+leal (%rsi,%r15), %eax
+. 340 0x12345678 5
+. 42 8D 04 3E C3
+
+leal (%rsi,%rcx), %eax
+. 341 0x12345678 4
+. 8D 04 0E C3
+
+leal (%rsi,%rsi,2), %eax
+. 342 0x12345678 4
+. 8D 04 76 C3
+
+leaq 1(%r11), %rdx
+. 343 0x12345678 5
+. 49 8D 53 01 C3
+
+leaq 20(%rsp), %rax
+. 344 0x12345678 6
+. 48 8D 44 24 14 C3
+
+leaq 37696(%rcx,%rax), %r10
+. 345 0x12345678 9
+. 4C 8D 94 01 40 93 00 00 C3
+
+leaq 37696(%rcx,%rax), %r9
+. 346 0x12345678 9
+. 4C 8D 8C 01 40 93 00 00 C3
+
+leaq 37708(%rbp,%rax,2), %rdi
+. 347 0x12345678 9
+. 48 8D BC 45 4C 93 00 00 C3
+
+leaq 37708(%rbp,%rdi,2), %rdi
+. 348 0x12345678 9
+. 48 8D BC 7D 4C 93 00 00 C3
+
+leaq 37708(%rbp,%rdi,2), %rsi
+. 349 0x12345678 9
+. 48 8D B4 7D 4C 93 00 00 C3
+
+leaq 39256(%rbp,%rax,8), %r8
+. 350 0x12345678 9
+. 4C 8D 84 C5 58 99 00 00 C3
+
+leaq 39256(%rbp,%rdi,8), %rdi
+. 351 0x12345678 9
+. 48 8D BC FD 58 99 00 00 C3
+
+leaq 45448(%rbp,%rdi,8), %rsi
+. 352 0x12345678 9
+. 48 8D B4 FD 88 B1 00 00 C3
+
+leaq (%rax,%rax), %rdx
+. 353 0x12345678 5
+. 48 8D 14 00 C3
+
+leaq (%rax,%rdx), %rax
+. 354 0x12345678 5
+. 48 8D 04 10 C3
+
+leaq (%rbp,%rax,2), %r13
+. 355 0x12345678 6
+. 4C 8D 6C 45 00 C3
+
+leaq (%rbp,%rax,2), %rax
+. 356 0x12345678 6
+. 48 8D 44 45 00 C3
+
+leaq (%rbp,%rax,2), %rcx
+. 357 0x12345678 6
+. 48 8D 4C 45 00 C3
+
+leaq (%rbp,%rax,2), %rdx
+. 358 0x12345678 6
+. 48 8D 54 45 00 C3
+
+leaq (%rbp,%rax), %rdx
+. 359 0x12345678 6
+. 48 8D 54 05 00 C3
+
+leaq (%rbp,%rdx,2), %rdx
+. 360 0x12345678 6
+. 48 8D 54 55 00 C3
+
+leaq (%rdx,%rsi), %rdx
+. 361 0x12345678 5
+. 48 8D 14 32 C3
+
+movb $0, 2112(%rsp,%rax)
+. 362 0x12345678 9
+. C6 84 04 40 08 00 00 00 C3
+
+movb $0, 32(%rsp,%rax)
+. 363 0x12345678 6
+. C6 44 04 20 00 C3
+
+movb $0, 37450(%rax,%rcx)
+. 364 0x12345678 9
+. C6 84 08 4A 92 00 00 00 C3
+
+movb $1, 2112(%rsp,%rax)
+. 365 0x12345678 9
+. C6 84 04 40 08 00 00 01 C3
+
+movb $1, 32(%rsp,%rcx)
+. 366 0x12345678 6
+. C6 44 0C 20 01 C3
+
+movb $15, 37450(%rax,%rcx)
+. 367 0x12345678 9
+. C6 84 08 4A 92 00 00 0F C3
+
+movb $15, 37708(%rax,%rdx)
+. 368 0x12345678 9
+. C6 84 10 4C 93 00 00 0F C3
+
+movb %al, 1(%rsp)
+. 369 0x12345678 5
+. 88 44 24 01 C3
+
+movb %al, 384(%rdi,%rdx)
+. 370 0x12345678 8
+. 88 84 17 80 01 00 00 C3
+
+movb %al, (%rcx,%rdx)
+. 371 0x12345678 4
+. 88 04 11 C3
+
+movb %al, (%rdx)
+. 372 0x12345678 3
+. 88 02 C3
+
+movb %cl, 32(%rsp)
+. 373 0x12345678 5
+. 88 4C 24 20 C3
+
+movb %cl, (%rsp)
+. 374 0x12345678 4
+. 88 0C 24 C3
+
+movb %dil, 32(%rsp,%rax)
+. 375 0x12345678 6
+. 40 88 7C 04 20 C3
+
+movb %dl, 1704(%rbp,%rax)
+. 376 0x12345678 8
+. 88 94 05 A8 06 00 00 C3
+
+movb %dl, 32(%rsp,%rax)
+. 377 0x12345678 5
+. 88 54 04 20 C3
+
+movb %dl, (%rax,%rsi)
+. 378 0x12345678 4
+. 88 14 30 C3
+
+movb %r8b, 19706(%rbp,%rax)
+. 379 0x12345678 9
+. 44 88 84 05 FA 4C 00 00 C3
+
+movb %r9b, (%rsp,%rax)
+. 380 0x12345678 5
+. 44 88 0C 04 C3
+
+mov %eax, %eax
+. 381 0x12345678 3
+. 89 C0 C3
+
+mov %edi, %eax
+. 382 0x12345678 3
+. 89 F8 C3
+
+mov %esi, %eax
+. 383 0x12345678 3
+. 89 F0 C3
+
+movl $0, 1056(%rsp,%rax,4)
+. 384 0x12345678 12
+. C7 84 84 20 04 00 00 00 00 00 00 C3
+
+movl $0, 116(%rdi)
+. 385 0x12345678 8
+. C7 47 74 00 00 00 00 C3
+
+movl $0, 124(%rdi)
+. 386 0x12345678 8
+. C7 47 7C 00 00 00 00 C3
+
+movl $0, 12(%rsp)
+. 387 0x12345678 9
+. C7 44 24 0C 00 00 00 00 C3
+
+movl $0, 24(%rsp)
+. 388 0x12345678 9
+. C7 44 24 18 00 00 00 00 C3
+
+movl $0, 28(%rsp)
+. 389 0x12345678 9
+. C7 44 24 1C 00 00 00 00 C3
+
+movl $0, 45448(%rbp,%rax,4)
+. 390 0x12345678 12
+. C7 84 85 88 B1 00 00 00 00 00 00 C3
+
+movl $0, 48(%rsp,%rax,4)
+. 391 0x12345678 9
+. C7 44 84 30 00 00 00 00 C3
+
+movl $0, 640(%rdi)
+. 392 0x12345678 11
+. C7 87 80 02 00 00 00 00 00 00 C3
+
+movl $0, 644(%rdi)
+. 393 0x12345678 11
+. C7 87 84 02 00 00 00 00 00 00 C3
+
+movl $0, 672(%rbx,%rax,4)
+. 394 0x12345678 12
+. C7 84 83 A0 02 00 00 00 00 00 00 C3
+
+movl $0, 8(%rsp)
+. 395 0x12345678 9
+. C7 44 24 08 00 00 00 00 C3
+
+movl $0, %eax
+. 396 0x12345678 6
+. B8 00 00 00 00 C3
+
+movl $0, %ebx
+. 397 0x12345678 6
+. BB 00 00 00 00 C3
+
+movl $0, %ecx
+. 398 0x12345678 6
+. B9 00 00 00 00 C3
+
+movl $0, %edi
+. 399 0x12345678 6
+. BF 00 00 00 00 C3
+
+movl $0, %edx
+. 400 0x12345678 6
+. BA 00 00 00 00 C3
+
+movl $0, %r12d
+. 401 0x12345678 7
+. 41 BC 00 00 00 00 C3
+
+movl $0, %r13d
+. 402 0x12345678 7
+. 41 BD 00 00 00 00 C3
+
+movl $0, %r8d
+. 403 0x12345678 7
+. 41 B8 00 00 00 00 C3
+
+movl $0, %r9d
+. 404 0x12345678 7
+. 41 B9 00 00 00 00 C3
+
+movl $0, (%rbp,%rax,4)
+. 405 0x12345678 9
+. C7 44 85 00 00 00 00 00 C3
+
+movl $0, (%rbx,%rax,4)
+. 406 0x12345678 8
+. C7 04 83 00 00 00 00 C3
+
+movl $1001, %edi
+. 407 0x12345678 6
+. BF E9 03 00 00 C3
+
+movl $1002, %edi
+. 408 0x12345678 6
+. BF EA 03 00 00 C3
+
+movl $1003, %edi
+. 409 0x12345678 6
+. BF EB 03 00 00 C3
+
+movl $1004, %edi
+. 410 0x12345678 6
+. BF EC 03 00 00 C3
+
+movl $1005, %edi
+. 411 0x12345678 6
+. BF ED 03 00 00 C3
+
+movl $1006, %edi
+. 412 0x12345678 6
+. BF EE 03 00 00 C3
+
+movl $1007, %edi
+. 413 0x12345678 6
+. BF EF 03 00 00 C3
+
+movl $100, %eax
+. 414 0x12345678 6
+. B8 64 00 00 00 C3
+
+movl $104, %esi
+. 415 0x12345678 6
+. BE 68 00 00 00 C3
+
+movl 1056(%rsp,%rax,4), %ecx
+. 416 0x12345678 8
+. 8B 8C 84 20 04 00 00 C3
+
+movl 1056(%rsp,%rax,4), %esi
+. 417 0x12345678 8
+. 8B B4 84 20 04 00 00 C3
+
+movl 1056(%rsp,%rdx,4), %eax
+. 418 0x12345678 8
+. 8B 84 94 20 04 00 00 C3
+
+movl 1088(%rsp,%rdx,4), %eax
+. 419 0x12345678 8
+. 8B 84 94 40 04 00 00 C3
+
+movl 1088(%rsp,%rsi,4), %eax
+. 420 0x12345678 8
+. 8B 84 B4 40 04 00 00 C3
+
+movl 108(%rbx), %r9d
+. 421 0x12345678 5
+. 44 8B 4B 6C C3
+
+movl 108(%rdi), %ebp
+. 422 0x12345678 4
+. 8B 6F 6C C3
+
+movl 108(%rdi), %edx
+. 423 0x12345678 4
+. 8B 57 6C C3
+
+movl $10, %edi
+. 424 0x12345678 6
+. BF 0A 00 00 00 C3
+
+movl $1, 12(%rsp)
+. 425 0x12345678 9
+. C7 44 24 0C 01 00 00 00 C3
+
+movl 112(%rsp), %eax
+. 426 0x12345678 5
+. 8B 44 24 70 C3
+
+movl 112(%rsp), %edi
+. 427 0x12345678 5
+. 8B 7C 24 70 C3
+
+movl $114, %esi
+. 428 0x12345678 6
+. BE 72 00 00 00 C3
+
+movl 116(%rbp), %eax
+. 429 0x12345678 4
+. 8B 45 74 C3
+
+movl 116(%rbp), %edx
+. 430 0x12345678 4
+. 8B 55 74 C3
+
+movl $1, 16(%rsp)
+. 431 0x12345678 9
+. C7 44 24 10 01 00 00 00 C3
+
+movl 124(%rbp), %eax
+. 432 0x12345678 4
+. 8B 45 7C C3
+
+movl 124(%rbx), %edx
+. 433 0x12345678 4
+. 8B 53 7C C3
+
+movl 124(%rbx), %r10d
+. 434 0x12345678 5
+. 44 8B 53 7C C3
+
+movl 124(%rdi), %r8d
+. 435 0x12345678 5
+. 44 8B 47 7C C3
+
+movl $1, 24(%rsp)
+. 436 0x12345678 9
+. C7 44 24 18 01 00 00 00 C3
+
+movl 12(%rsp), %eax
+. 437 0x12345678 5
+. 8B 44 24 0C C3
+
+movl 12(%rsp), %edx
+. 438 0x12345678 5
+. 8B 54 24 0C C3
+
+movl 12(%rsp), %r8d
+. 439 0x12345678 6
+. 44 8B 44 24 0C C3
+
+movl 1360(%rsp), %eax
+. 440 0x12345678 8
+. 8B 84 24 50 05 00 00 C3
+
+movl $144, %esi
+. 441 0x12345678 6
+. BE 90 00 00 00 C3
+
+movl $-1, 48(%rbx)
+. 442 0x12345678 8
+. C7 43 30 FF FF FF FF C3
+
+movl $15, %r8d
+. 443 0x12345678 7
+. 41 B8 0F 00 00 00 C3
+
+movl 16(%rsp), %edx
+. 444 0x12345678 5
+. 8B 54 24 10 C3
+
+movl 16(%rsp,%rax,4), %r14d
+. 445 0x12345678 6
+. 44 8B 74 84 10 C3
+
+movl $17, %ecx
+. 446 0x12345678 6
+. B9 11 00 00 00 C3
+
+movl $1, %eax
+. 447 0x12345678 6
+. B8 01 00 00 00 C3
+
+movl $-1, %ebp
+. 448 0x12345678 6
+. BD FF FF FF FF C3
+
+movl $1, %ebx
+. 449 0x12345678 6
+. BB 01 00 00 00 C3
+
+movl $-1, %ecx
+. 450 0x12345678 6
+. B9 FF FF FF FF C3
+
+movl $1, %edi
+. 451 0x12345678 6
+. BF 01 00 00 00 C3
+
+movl $-1, %edx
+. 452 0x12345678 6
+. BA FF FF FF FF C3
+
+movl $1, %edx
+. 453 0x12345678 6
+. BA 01 00 00 00 C3
+
+movl $1, %esi
+. 454 0x12345678 6
+. BE 01 00 00 00 C3
+
+movl $1, %r14d
+. 455 0x12345678 7
+. 41 BE 01 00 00 00 C3
+
+movl $1, %r8d
+. 456 0x12345678 7
+. 41 B8 01 00 00 00 C3
+
+movl $1, %r9d
+. 457 0x12345678 7
+. 41 B9 01 00 00 00 C3
+
+movl $-2097153, %esi
+. 458 0x12345678 6
+. BE FF FF DF FF C3
+
+movl 20(%rsp), %eax
+. 459 0x12345678 5
+. 8B 44 24 14 C3
+
+movl 20(%rsp), %ecx
+. 460 0x12345678 5
+. 8B 4C 24 14 C3
+
+movl 20(%rsp), %edx
+. 461 0x12345678 5
+. 8B 54 24 14 C3
+
+movl 20(%rsp), %esi
+. 462 0x12345678 5
+. 8B 74 24 14 C3
+
+movl 20(%rsp), %r11d
+. 463 0x12345678 6
+. 44 8B 5C 24 14 C3
+
+movl 20(%rsp), %r12d
+. 464 0x12345678 6
+. 44 8B 64 24 14 C3
+
+movl 20(%rsp), %r8d
+. 465 0x12345678 6
+. 44 8B 44 24 14 C3
+
+movl 20(%rsp), %r9d
+. 466 0x12345678 6
+. 44 8B 4C 24 14 C3
+
+movl 2368(%rsp,%rax,4), %eax
+. 467 0x12345678 8
+. 8B 84 84 40 09 00 00 C3
+
+movl 2368(%rsp,%rax,4), %edx
+. 468 0x12345678 8
+. 8B 94 84 40 09 00 00 C3
+
+movl 2368(%rsp,%rax,4), %r13d
+. 469 0x12345678 9
+. 44 8B AC 84 40 09 00 00 C3
+
+movl 2368(%rsp,%rax,4), %r8d
+. 470 0x12345678 9
+. 44 8B 84 84 40 09 00 00 C3
+
+movl $23, %esi
+. 471 0x12345678 6
+. BE 17 00 00 00 C3
+
+movl $24, %r8d
+. 472 0x12345678 7
+. 41 B8 18 00 00 00 C3
+
+movl 24(%rsp), %ecx
+. 473 0x12345678 5
+. 8B 4C 24 18 C3
+
+movl 24(%rsp), %edi
+. 474 0x12345678 5
+. 8B 7C 24 18 C3
+
+movl 24(%rsp), %r8d
+. 475 0x12345678 6
+. 44 8B 44 24 18 C3
+
+movl $27, %edx
+. 476 0x12345678 6
+. BA 1B 00 00 00 C3
+
+movl $2863311531, %edx
+. 477 0x12345678 6
+. BA AB AA AA AA C3
+
+movl 28(%rsp), %ecx
+. 478 0x12345678 5
+. 8B 4C 24 1C C3
+
+movl 28(%rsp), %edi
+. 479 0x12345678 5
+. 8B 7C 24 1C C3
+
+movl 28(%rsp), %edx
+. 480 0x12345678 5
+. 8B 54 24 1C C3
+
+movl 28(%rsp), %r12d
+. 481 0x12345678 6
+. 44 8B 64 24 1C C3
+
+movl $2, %r10d
+. 482 0x12345678 7
+. 41 BA 02 00 00 00 C3
+
+movl $2, %r14d
+. 483 0x12345678 7
+. 41 BE 02 00 00 00 C3
+
+movl $2, %r9d
+. 484 0x12345678 7
+. 41 B9 02 00 00 00 C3
+
+movl $2, (%rsp)
+. 485 0x12345678 8
+. C7 04 24 02 00 00 00 C3
+
+movl $3001, %edi
+. 486 0x12345678 6
+. BF B9 0B 00 00 C3
+
+movl $3002, %edi
+. 487 0x12345678 6
+. BF BA 0B 00 00 C3
+
+movl $3003, %edi
+. 488 0x12345678 6
+. BF BB 0B 00 00 C3
+
+movl $3004, %edi
+. 489 0x12345678 6
+. BF BC 0B 00 00 C3
+
+movl $3005, %edi
+. 490 0x12345678 6
+. BF BD 0B 00 00 C3
+
+movl $3006, %edi
+. 491 0x12345678 6
+. BF BE 0B 00 00 C3
+
+movl $3007, %edi
+. 492 0x12345678 6
+. BF BF 0B 00 00 C3
+
+movl $32, %r13d
+. 493 0x12345678 7
+. 41 BD 20 00 00 00 C3
+
+movl 32(%rsp), %eax
+. 494 0x12345678 5
+. 8B 44 24 20 C3
+
+movl 32(%rsp), %r13d
+. 495 0x12345678 6
+. 44 8B 6C 24 20 C3
+
+movl $33, %edx
+. 496 0x12345678 6
+. BA 21 00 00 00 C3
+
+movl 36(%rsp), %eax
+. 497 0x12345678 5
+. 8B 44 24 24 C3
+
+movl 36(%rsp), %ebx
+. 498 0x12345678 5
+. 8B 5C 24 24 C3
+
+movl 36(%rsp), %ecx
+. 499 0x12345678 5
+. 8B 4C 24 24 C3
+
+movl 36(%rsp), %edx
+. 500 0x12345678 5
+. 8B 54 24 24 C3
+
+movl 36(%rsp), %esi
+. 501 0x12345678 5
+. 8B 74 24 24 C3
+
+movl 36(%rsp), %r8d
+. 502 0x12345678 6
+. 44 8B 44 24 24 C3
+
+movl $38, %esi
+. 503 0x12345678 6
+. BE 26 00 00 00 C3
+
+movl 39256(%rbp,%rax,4), %r9d
+. 504 0x12345678 9
+. 44 8B 8C 85 58 99 00 00 C3
+
+movl $3, %r12d
+. 505 0x12345678 7
+. 41 BC 03 00 00 00 C3
+
+movl $3, %r14d
+. 506 0x12345678 7
+. 41 BE 03 00 00 00 C3
+
+movl $3, %r8d
+. 507 0x12345678 7
+. 41 B8 03 00 00 00 C3
+
+movl 416(%rsp,%rax,4), %r13d
+. 508 0x12345678 9
+. 44 8B AC 84 A0 01 00 00 C3
+
+movl 48(%rbx), %edi
+. 509 0x12345678 4
+. 8B 7B 30 C3
+
+movl 48(%rsp), %eax
+. 510 0x12345678 5
+. 8B 44 24 30 C3
+
+movl 48(%rsp), %edx
+. 511 0x12345678 5
+. 8B 54 24 30 C3
+
+movl 48(%rsp,%rax,4), %edx
+. 512 0x12345678 5
+. 8B 54 84 30 C3
+
+movl 496(%rsp,%rax,4), %edx
+. 513 0x12345678 8
+. 8B 94 84 F0 01 00 00 C3
+
+movl $49, %esi
+. 514 0x12345678 6
+. BE 31 00 00 00 C3
+
+movl $4, %r14d
+. 515 0x12345678 7
+. 41 BE 04 00 00 00 C3
+
+movl 4(%rbp,%rax,4), %eax
+. 516 0x12345678 5
+. 8B 44 85 04 C3
+
+movl 4(%rbp,%rdx,4), %eax
+. 517 0x12345678 5
+. 8B 44 95 04 C3
+
+movl 51640(%rax), %ecx
+. 518 0x12345678 7
+. 8B 88 B8 C9 00 00 C3
+
+movl 51644(%rax), %esi
+. 519 0x12345678 7
+. 8B B0 BC C9 00 00 C3
+
+movl 51648(%rax), %edi
+. 520 0x12345678 7
+. 8B B8 C0 C9 00 00 C3
+
+movl 52(%rsp), %eax
+. 521 0x12345678 5
+. 8B 44 24 34 C3
+
+movl 52(%rsp), %edx
+. 522 0x12345678 5
+. 8B 54 24 34 C3
+
+movl $53, %edx
+. 523 0x12345678 6
+. BA 35 00 00 00 C3
+
+movl $56, %esi
+. 524 0x12345678 6
+. BE 38 00 00 00 C3
+
+movl 56(%rsp), %eax
+. 525 0x12345678 5
+. 8B 44 24 38 C3
+
+movl 644(%r11), %eax
+. 526 0x12345678 8
+. 41 8B 83 84 02 00 00 C3
+
+movl 644(%rdi), %eax
+. 527 0x12345678 7
+. 8B 87 84 02 00 00 C3
+
+movl 644(%rsi), %eax
+. 528 0x12345678 7
+. 8B 86 84 02 00 00 C3
+
+movl 644(%rsi), %edx
+. 529 0x12345678 7
+. 8B 96 84 02 00 00 C3
+
+movl 648(%rbx), %ecx
+. 530 0x12345678 7
+. 8B 8B 88 02 00 00 C3
+
+movl 648(%rbx), %esi
+. 531 0x12345678 7
+. 8B B3 88 02 00 00 C3
+
+movl 648(%rdi), %edx
+. 532 0x12345678 7
+. 8B 97 88 02 00 00 C3
+
+movl 64(%rsp), %eax
+. 533 0x12345678 5
+. 8B 44 24 40 C3
+
+movl 64(%rsp), %r8d
+. 534 0x12345678 6
+. 44 8B 44 24 40 C3
+
+movl 64(%rsp,%rsi,4), %eax
+. 535 0x12345678 5
+. 8B 44 B4 40 C3
+
+movl 652(%rbx), %edx
+. 536 0x12345678 7
+. 8B 93 8C 02 00 00 C3
+
+movl 652(%rbx), %esi
+. 537 0x12345678 7
+. 8B B3 8C 02 00 00 C3
+
+movl 652(%rbx), %r8d
+. 538 0x12345678 8
+. 44 8B 83 8C 02 00 00 C3
+
+movl 652(%rdi), %eax
+. 539 0x12345678 7
+. 8B 87 8C 02 00 00 C3
+
+movl $65536, %ebx
+. 540 0x12345678 6
+. BB 00 00 01 00 C3
+
+movl 656(%rdi), %r13d
+. 541 0x12345678 8
+. 44 8B AF 90 02 00 00 C3
+
+movl $65, %esi
+. 542 0x12345678 6
+. BE 41 00 00 00 C3
+
+movl 660(%rbx), %edx
+. 543 0x12345678 7
+. 8B 93 94 02 00 00 C3
+
+movl 668(%rbp), %edx
+. 544 0x12345678 7
+. 8B 95 9C 02 00 00 C3
+
+movl 668(%rbp), %r12d
+. 545 0x12345678 8
+. 44 8B A5 9C 02 00 00 C3
+
+movl 668(%rdi), %ecx
+. 546 0x12345678 7
+. 8B 8F 9C 02 00 00 C3
+
+movl $66, %esi
+. 547 0x12345678 6
+. BE 42 00 00 00 C3
+
+movl 68(%rsp), %eax
+. 548 0x12345678 5
+. 8B 44 24 44 C3
+
+movl 68(%rsp), %esi
+. 549 0x12345678 5
+. 8B 74 24 44 C3
+
+###movl 68(%rsp), %r8d
+###. 550 0x12345678 5
+###. 44 8B 44 24 C3
+
+movl $69, %esi
+. 551 0x12345678 6
+. BE 45 00 00 00 C3
+
+movl 72(%rsp), %eax
+. 552 0x12345678 5
+. 8B 44 24 48 C3
+
+movl 72(%rsp), %esi
+. 553 0x12345678 5
+. 8B 74 24 48 C3
+
+movl $80, %esi
+. 554 0x12345678 6
+. BE 50 00 00 00 C3
+
+movl 80(%rsp), %eax
+. 555 0x12345678 5
+. 8B 44 24 50 C3
+
+movl 80(%rsp), %edi
+. 556 0x12345678 5
+. 8B 7C 24 50 C3
+
+movl $83, %esi
+. 557 0x12345678 6
+. BE 53 00 00 00 C3
+
+movl 84(%rsp), %eax
+. 558 0x12345678 5
+. 8B 44 24 54 C3
+
+movl 84(%rsp), %ecx
+. 559 0x12345678 5
+. 8B 4C 24 54 C3
+
+movl 84(%rsp), %edi
+. 560 0x12345678 5
+. 8B 7C 24 54 C3
+
+movl 88(%rdi), %ecx
+. 561 0x12345678 4
+. 8B 4F 58 C3
+
+movl 88(%rsp), %eax
+. 562 0x12345678 5
+. 8B 44 24 58 C3
+
+movl 88(%rsp), %ecx
+. 563 0x12345678 5
+. 8B 4C 24 58 C3
+
+movl 896(%rsp,%rax,4), %r15d
+. 564 0x12345678 9
+. 44 8B BC 84 80 03 00 00 C3
+
+movl $89, %esi
+. 565 0x12345678 6
+. BE 59 00 00 00 C3
+
+movl $8, %r8d
+. 566 0x12345678 7
+. 41 B8 08 00 00 00 C3
+
+movl $8, %r9d
+. 567 0x12345678 7
+. 41 B9 08 00 00 00 C3
+
+movl 8(%rsp), %edx
+. 568 0x12345678 5
+. 8B 54 24 08 C3
+
+movl $90, %esi
+. 569 0x12345678 6
+. BE 5A 00 00 00 C3
+
+movl 96(%rsp,%rax,4), %r13d
+. 570 0x12345678 6
+. 44 8B 6C 84 60 C3
+
+movl $999999999, %ecx
+. 571 0x12345678 6
+. B9 FF C9 9A 3B C3
+
+movl %eax, 1056(%rsp,%rdx,4)
+. 572 0x12345678 8
+. 89 84 94 20 04 00 00 C3
+
+movl %eax, 1088(%rsp,%rsi,4)
+. 573 0x12345678 8
+. 89 84 B4 40 04 00 00 C3
+
+movl %eax, 12(%rsp)
+. 574 0x12345678 5
+. 89 44 24 0C C3
+
+movl %eax, -16(%rsi,%rdx,4)
+. 575 0x12345678 5
+. 89 44 96 F0 C3
+
+movl %eax, 16(%rsp)
+. 576 0x12345678 5
+. 89 44 24 10 C3
+
+movl %eax, 20(%rsp)
+. 577 0x12345678 5
+. 89 44 24 14 C3
+
+movl %eax, 2368(%rsp,%rcx,4)
+. 578 0x12345678 8
+. 89 84 8C 40 09 00 00 C3
+
+movl %eax, 28(%rsp)
+. 579 0x12345678 5
+. 89 44 24 1C C3
+
+movl %eax, 32(%rsp,%rdx,4)
+. 580 0x12345678 5
+. 89 44 94 20 C3
+
+movl %eax, 48(%rsp)
+. 581 0x12345678 5
+. 89 44 24 30 C3
+
+movl %eax, 496(%rsp,%rdx,4)
+. 582 0x12345678 8
+. 89 84 94 F0 01 00 00 C3
+
+movl %eax, -4(%rsi,%rdx,4)
+. 583 0x12345678 5
+. 89 44 96 FC C3
+
+movl %eax, 51640(%rsi)
+. 584 0x12345678 7
+. 89 86 B8 C9 00 00 C3
+
+movl %eax, 51644(%rsi)
+. 585 0x12345678 7
+. 89 86 BC C9 00 00 C3
+
+movl %eax, 51648(%rsi)
+. 586 0x12345678 7
+. 89 86 C0 C9 00 00 C3
+
+movl %eax, 52(%rsp)
+. 587 0x12345678 5
+. 89 44 24 34 C3
+
+movl %eax, 56(%rsp)
+. 588 0x12345678 5
+. 89 44 24 38 C3
+
+movl %eax, 644(%r11)
+. 589 0x12345678 8
+. 41 89 83 84 02 00 00 C3
+
+movl %eax, 644(%rdi)
+. 590 0x12345678 7
+. 89 87 84 02 00 00 C3
+
+movl %eax, 644(%rsi)
+. 591 0x12345678 7
+. 89 86 84 02 00 00 C3
+
+movl %eax, 64(%rsp,%rcx,4)
+. 592 0x12345678 5
+. 89 44 8C 40 C3
+
+movl %eax, 64(%rsp,%rsi,4)
+. 593 0x12345678 5
+. 89 44 B4 40 C3
+
+movl %eax, 652(%rdi)
+. 594 0x12345678 7
+. 89 87 8C 02 00 00 C3
+
+movl %eax, 72(%rsp)
+. 595 0x12345678 5
+. 89 44 24 48 C3
+
+movl %eax, 88(%rsp)
+. 596 0x12345678 5
+. 89 44 24 58 C3
+
+movl %eax, 896(%rsp,%rdx,4)
+. 597 0x12345678 8
+. 89 84 94 80 03 00 00 C3
+
+movl %eax, 8(%rsp)
+. 598 0x12345678 5
+. 89 44 24 08 C3
+
+movl %eax, 96(%rsp)
+. 599 0x12345678 5
+. 89 44 24 60 C3
+
+movl %eax, 96(%rsp,%rdx,4)
+. 600 0x12345678 5
+. 89 44 94 60 C3
+
+movl %eax, %ecx
+. 601 0x12345678 3
+. 89 C1 C3
+
+movl %eax, %edi
+. 602 0x12345678 3
+. 89 C7 C3
+
+movl %eax, %edx
+. 603 0x12345678 3
+. 89 C2 C3
+
+movl %eax, %esi
+. 604 0x12345678 3
+. 89 C6 C3
+
+movl %eax, %r15d
+. 605 0x12345678 4
+. 41 89 C7 C3
+
+movl %eax, (%rbp,%rcx,4)
+. 606 0x12345678 5
+. 89 44 8D 00 C3
+
+movl %eax, (%rbp,%rdx,4)
+. 607 0x12345678 5
+. 89 44 95 00 C3
+
+movl %eax, (%rbx,%rcx,4)
+. 608 0x12345678 4
+. 89 04 8B C3
+
+movl %eax, (%rbx,%rsi,4)
+. 609 0x12345678 4
+. 89 04 B3 C3
+
+movl %eax, (%rdi,%rdx,4)
+. 610 0x12345678 4
+. 89 04 97 C3
+
+movl %ebp, %eax
+. 611 0x12345678 3
+. 89 E8 C3
+
+movl %ebp, %ecx
+. 612 0x12345678 3
+. 89 E9 C3
+
+movl %ebp, %esi
+. 613 0x12345678 3
+. 89 EE C3
+
+movl %ebp, %r12d
+. 614 0x12345678 4
+. 41 89 EC C3
+
+movl %ebp, %r8d
+. 615 0x12345678 4
+. 41 89 E8 C3
+
+movl %ebx, 2368(%rsp,%rax,4)
+. 616 0x12345678 8
+. 89 9C 84 40 09 00 00 C3
+
+movl %ebx, %eax
+. 617 0x12345678 3
+. 89 D8 C3
+
+movl %ebx, %edx
+. 618 0x12345678 3
+. 89 DA C3
+
+movl %ebx, %esi
+. 619 0x12345678 3
+. 89 DE C3
+
+movl %ebx, %r12d
+. 620 0x12345678 4
+. 41 89 DC C3
+
+movl %ebx, %r8d
+. 621 0x12345678 4
+. 41 89 D8 C3
+
+movl %ebx, (%rdi,%rax,4)
+. 622 0x12345678 4
+. 89 1C 87 C3
+
+movl %ecx, 16(%rsp)
+. 623 0x12345678 5
+. 89 4C 24 10 C3
+
+movl %ecx, 16(%rsp,%rax,4)
+. 624 0x12345678 5
+. 89 4C 84 10 C3
+
+movl %ecx, 24(%rsp)
+. 625 0x12345678 5
+. 89 4C 24 18 C3
+
+movl %ecx, 28(%rsp)
+. 626 0x12345678 5
+. 89 4C 24 1C C3
+
+movl %ecx, 36(%rsp)
+. 627 0x12345678 5
+. 89 4C 24 24 C3
+
+movl %ecx, 80(%rsp)
+. 628 0x12345678 5
+. 89 4C 24 50 C3
+
+movl %ecx, 84(%rsp)
+. 629 0x12345678 5
+. 89 4C 24 54 C3
+
+movl %ecx, %eax
+. 630 0x12345678 3
+. 89 C8 C3
+
+movl %ecx, %ebx
+. 631 0x12345678 3
+. 89 CB C3
+
+movl %ecx, %edi
+. 632 0x12345678 3
+. 89 CF C3
+
+movl %ecx, %edx
+. 633 0x12345678 3
+. 89 CA C3
+
+movl %ecx, %r12d
+. 634 0x12345678 4
+. 41 89 CC C3
+
+movl %ecx, %r13d
+. 635 0x12345678 4
+. 41 89 CD C3
+
+movl %ecx, %r8d
+. 636 0x12345678 4
+. 41 89 C8 C3
+
+movl %ecx, (%r8,%rdx,4)
+. 637 0x12345678 5
+. 41 89 0C 90 C3
+
+movl %ecx, (%rbx,%rdx,4)
+. 638 0x12345678 4
+. 89 0C 93 C3
+
+movl %edi, 64(%rsp)
+. 639 0x12345678 5
+. 89 7C 24 40 C3
+
+movl %edi, 68(%rsp)
+. 640 0x12345678 5
+. 89 7C 24 44 C3
+
+movl %edi, 84(%rsp)
+. 641 0x12345678 5
+. 89 7C 24 54 C3
+
+movl %edi, 88(%rsp)
+. 642 0x12345678 5
+. 89 7C 24 58 C3
+
+movl %edi, %eax
+. 643 0x12345678 3
+. 89 F8 C3
+
+movl %edi, %ecx
+. 644 0x12345678 3
+. 89 F9 C3
+
+movl %edi, %edx
+. 645 0x12345678 3
+. 89 FA C3
+
+movl %edi, %r10d
+. 646 0x12345678 4
+. 41 89 FA C3
+
+movl %edi, %r11d
+. 647 0x12345678 4
+. 41 89 FB C3
+
+movl %edi, (%rsi,%rax,4)
+. 648 0x12345678 4
+. 89 3C 86 C3
+
+movl %edx, 1088(%rsp,%rcx,4)
+. 649 0x12345678 8
+. 89 94 8C 40 04 00 00 C3
+
+movl %edx, 12(%rsp)
+. 650 0x12345678 5
+. 89 54 24 0C C3
+
+movl %edx, 16(%rsp)
+. 651 0x12345678 5
+. 89 54 24 10 C3
+
+movl %edx, 20(%rsp)
+. 652 0x12345678 5
+. 89 54 24 14 C3
+
+movl %edx, 416(%rsp)
+. 653 0x12345678 8
+. 89 94 24 A0 01 00 00 C3
+
+movl %edx, 48(%rbx)
+. 654 0x12345678 4
+. 89 53 30 C3
+
+movl %edx, 496(%rsp,%rax,4)
+. 655 0x12345678 8
+. 89 94 84 F0 01 00 00 C3
+
+movl %edx, 52(%rsp)
+. 656 0x12345678 5
+. 89 54 24 34 C3
+
+movl %edx, 56(%rsp)
+. 657 0x12345678 5
+. 89 54 24 38 C3
+
+movl %edx, 644(%rsi)
+. 658 0x12345678 7
+. 89 96 84 02 00 00 C3
+
+movl %edx, 648(%rdi)
+. 659 0x12345678 7
+. 89 97 88 02 00 00 C3
+
+movl %edx, 96(%rsp,%rax,4)
+. 660 0x12345678 5
+. 89 54 84 60 C3
+
+movl %edx, %eax
+. 661 0x12345678 3
+. 89 D0 C3
+
+movl %edx, %ebp
+. 662 0x12345678 3
+. 89 D5 C3
+
+movl %edx, %ebx
+. 663 0x12345678 3
+. 89 D3 C3
+
+movl %edx, %ecx
+. 664 0x12345678 3
+. 89 D1 C3
+
+movl %edx, %edi
+. 665 0x12345678 3
+. 89 D7 C3
+
+movl %edx, %esi
+. 666 0x12345678 3
+. 89 D6 C3
+
+movl %edx, %r12d
+. 667 0x12345678 4
+. 41 89 D4 C3
+
+movl %edx, (%r15,%rax,4)
+. 668 0x12345678 5
+. 41 89 14 87 C3
+
+movl %esi, 1056(%rsp,%rax,4)
+. 669 0x12345678 8
+. 89 B4 84 20 04 00 00 C3
+
+movl %esi, 416(%rsp,%rax,4)
+. 670 0x12345678 8
+. 89 B4 84 A0 01 00 00 C3
+
+movl %esi, 64(%rsp)
+. 671 0x12345678 5
+. 89 74 24 40 C3
+
+movl %esi, 68(%rsp)
+. 672 0x12345678 5
+. 89 74 24 44 C3
+
+movl %esi, %eax
+. 673 0x12345678 3
+. 89 F0 C3
+
+movl %esi, %ebp
+. 674 0x12345678 3
+. 89 F5 C3
+
+movl %esi, %ebx
+. 675 0x12345678 3
+. 89 F3 C3
+
+movl %esi, %ecx
+. 676 0x12345678 3
+. 89 F1 C3
+
+movl %esi, %edx
+. 677 0x12345678 3
+. 89 F2 C3
+
+movl %esi, %r8d
+. 678 0x12345678 4
+. 41 89 F0 C3
+
+movl %esi, (%rbx,%rdx,4)
+. 679 0x12345678 4
+. 89 34 93 C3
+
+movl 12345(,%rax,4), %r12d
+. 680 0x12345678 9
+. 44 8B 24 85 39 30 00 00 C3
+
+movl $0x12345678, %esi
+. 681 0x12345678 6
+. BE 78 56 34 12 C3
+
+movl %r10d, %ecx
+. 682 0x12345678 4
+. 44 89 D1 C3
+
+movl %r10d, %r11d
+. 683 0x12345678 4
+. 45 89 D3 C3
+
+movl %r10d, %r9d
+. 684 0x12345678 4
+. 45 89 D1 C3
+
+movl (%r10,%rax,4), %r8d
+. 685 0x12345678 5
+. 45 8B 04 82 C3
+
+movl %r12d, 20(%rsp)
+. 686 0x12345678 6
+. 44 89 64 24 14 C3
+
+movl %r12d, 28(%rsp)
+. 687 0x12345678 6
+. 44 89 64 24 1C C3
+
+movl %r12d, %eax
+. 688 0x12345678 4
+. 44 89 E0 C3
+
+movl %r12d, %ecx
+. 689 0x12345678 4
+. 44 89 E1 C3
+
+movl %r12d, %edi
+. 690 0x12345678 4
+. 44 89 E7 C3
+
+movl %r12d, %edx
+. 691 0x12345678 4
+. 44 89 E2 C3
+
+movl %r12d, %esi
+. 692 0x12345678 4
+. 44 89 E6 C3
+
+movl %r12d, %r10d
+. 693 0x12345678 4
+. 45 89 E2 C3
+
+movl %r12d, %r8d
+. 694 0x12345678 4
+. 45 89 E0 C3
+
+movl %r12d, (%rbp,%rax,4)
+. 695 0x12345678 6
+. 44 89 64 85 00 C3
+
+movl (%r12,%rax,4), %ecx
+. 696 0x12345678 5
+. 41 8B 0C 84 C3
+
+movl %r13d, 416(%rsp,%rax,4)
+. 697 0x12345678 9
+. 44 89 AC 84 A0 01 00 00 C3
+
+movl %r13d, 48(%rsp)
+. 698 0x12345678 6
+. 44 89 6C 24 30 C3
+
+movl %r13d, 52(%rsp)
+. 699 0x12345678 6
+. 44 89 6C 24 34 C3
+
+movl %r13d, %eax
+. 700 0x12345678 4
+. 44 89 E8 C3
+
+movl %r13d, %ebp
+. 701 0x12345678 4
+. 44 89 ED C3
+
+movl %r13d, %ebx
+. 702 0x12345678 4
+. 44 89 EB C3
+
+movl %r13d, %edx
+. 703 0x12345678 4
+. 44 89 EA C3
+
+movl %r13d, %r10d
+. 704 0x12345678 4
+. 45 89 EA C3
+
+movl %r13d, %r11d
+. 705 0x12345678 4
+. 45 89 EB C3
+
+movl %r13d, %r12d
+. 706 0x12345678 4
+. 45 89 EC C3
+
+movl %r13d, %r8d
+. 707 0x12345678 4
+. 45 89 E8 C3
+
+movl %r13d, %r9d
+. 708 0x12345678 4
+. 45 89 E9 C3
+
+movl %r13d, (%rsp)
+. 709 0x12345678 5
+. 44 89 2C 24 C3
+
+movl %r14d, 16(%rsp,%rax,4)
+. 710 0x12345678 6
+. 44 89 74 84 10 C3
+
+movl %r14d, %eax
+. 711 0x12345678 4
+. 44 89 F0 C3
+
+movl %r14d, %ebp
+. 712 0x12345678 4
+. 44 89 F5 C3
+
+movl %r14d, %ebx
+. 713 0x12345678 4
+. 44 89 F3 C3
+
+movl %r14d, %edi
+. 714 0x12345678 4
+. 44 89 F7 C3
+
+movl %r14d, %edx
+. 715 0x12345678 4
+. 44 89 F2 C3
+
+movl %r14d, %esi
+. 716 0x12345678 4
+. 44 89 F6 C3
+
+movl %r14d, %r11d
+. 717 0x12345678 4
+. 45 89 F3 C3
+
+movl %r14d, %r8d
+. 718 0x12345678 4
+. 45 89 F0 C3
+
+movl %r14d, %r9d
+. 719 0x12345678 4
+. 45 89 F1 C3
+
+movl %r14d, (%rbp,%rax,4)
+. 720 0x12345678 6
+. 44 89 74 85 00 C3
+
+movl %r15d, 80(%rsp)
+. 721 0x12345678 6
+. 44 89 7C 24 50 C3
+
+movl %r15d, 84(%rsp)
+. 722 0x12345678 6
+. 44 89 7C 24 54 C3
+
+movl %r15d, 896(%rsp,%rax,4)
+. 723 0x12345678 9
+. 44 89 BC 84 80 03 00 00 C3
+
+movl %r15d, %ebp
+. 724 0x12345678 4
+. 44 89 FD C3
+
+movl %r15d, %esi
+. 725 0x12345678 4
+. 44 89 FE C3
+
+movl %r15d, %r14d
+. 726 0x12345678 4
+. 45 89 FE C3
+
+movl %r15d, %r8d
+. 727 0x12345678 4
+. 45 89 F8 C3
+
+movl %r15d, %r9d
+. 728 0x12345678 4
+. 45 89 F9 C3
+
+movl (%r15,%rax,4), %eax
+. 729 0x12345678 5
+. 41 8B 04 87 C3
+
+movl (%r15,%rax,4), %edx
+. 730 0x12345678 5
+. 41 8B 14 87 C3
+
+movl (%r15,%rax,4), %r8d
+. 731 0x12345678 5
+. 45 8B 04 87 C3
+
+movl %r8d, 12(%rsp)
+. 732 0x12345678 6
+. 44 89 44 24 0C C3
+
+movl %r8d, 20(%rsp)
+. 733 0x12345678 6
+. 44 89 44 24 14 C3
+
+movl %r8d, 2368(%rsp,%rax,4)
+. 734 0x12345678 9
+. 44 89 84 84 40 09 00 00 C3
+
+movl %r8d, 32(%rsp)
+. 735 0x12345678 6
+. 44 89 44 24 20 C3
+
+movl %r8d, 36(%rsp)
+. 736 0x12345678 6
+. 44 89 44 24 24 C3
+
+movl %r8d, 668(%rbx)
+. 737 0x12345678 8
+. 44 89 83 9C 02 00 00 C3
+
+movl %r8d, 68(%rsp)
+. 738 0x12345678 6
+. 44 89 44 24 44 C3
+
+movl %r8d, 72(%rsp)
+. 739 0x12345678 6
+. 44 89 44 24 48 C3
+
+movl %r8d, 896(%rsp)
+. 740 0x12345678 9
+. 44 89 84 24 80 03 00 00 C3
+
+movl %r8d, %edx
+. 741 0x12345678 4
+. 44 89 C2 C3
+
+movl %r8d, %esi
+. 742 0x12345678 4
+. 44 89 C6 C3
+
+movl (%r8,%rax,4), %ecx
+. 743 0x12345678 5
+. 41 8B 0C 80 C3
+
+movl (%r8,%rax,4), %r10d
+. 744 0x12345678 5
+. 45 8B 14 80 C3
+
+movl (%r8,%rax,4), %r8d
+. 745 0x12345678 5
+. 45 8B 04 80 C3
+
+movl %r9d, -16(%rsi,%rax,4)
+. 746 0x12345678 6
+. 44 89 4C 86 F0 C3
+
+movl %r9d, 28(%rsp)
+. 747 0x12345678 6
+. 44 89 4C 24 1C C3
+
+movl %r9d, 32(%rsp)
+. 748 0x12345678 6
+. 44 89 4C 24 20 C3
+
+movl %r9d, 496(%rsp)
+. 749 0x12345678 9
+. 44 89 8C 24 F0 01 00 00 C3
+
+movl %r9d, -4(%rsi,%rax,4)
+. 750 0x12345678 6
+. 44 89 4C 86 FC C3
+
+movl %r9d, %ebp
+. 751 0x12345678 4
+. 44 89 CD C3
+
+movl %r9d, %ecx
+. 752 0x12345678 4
+. 44 89 C9 C3
+
+movl %r9d, %edi
+. 753 0x12345678 4
+. 44 89 CF C3
+
+movl %r9d, %edx
+. 754 0x12345678 4
+. 44 89 CA C3
+
+movl %r9d, %r8d
+. 755 0x12345678 4
+. 45 89 C8 C3
+
+movl (%rbp,%rax,4), %eax
+. 756 0x12345678 5
+. 8B 44 85 00 C3
+
+movl (%rbp,%rax,4), %edx
+. 757 0x12345678 5
+. 8B 54 85 00 C3
+
+movl (%rbp,%rax,4), %r12d
+. 758 0x12345678 6
+. 44 8B 64 85 00 C3
+
+movl (%rbp,%rax,4), %r14d
+. 759 0x12345678 6
+. 44 8B 74 85 00 C3
+
+movl (%rbp,%rax), %eax
+. 760 0x12345678 5
+. 8B 44 05 00 C3
+
+movl (%rbp,%rax), %edx
+. 761 0x12345678 5
+. 8B 54 05 00 C3
+
+movl (%rbp,%rax), %r12d
+. 762 0x12345678 6
+. 44 8B 64 05 00 C3
+
+movl (%rbp,%rax), %r8d
+. 763 0x12345678 6
+. 44 8B 44 05 00 C3
+
+movl (%rbp,%rdx,4), %eax
+. 764 0x12345678 5
+. 8B 44 95 00 C3
+
+movl (%rbp,%rdx), %edx
+. 765 0x12345678 5
+. 8B 54 15 00 C3
+
+movl (%rbx,%rax,4), %edi
+. 766 0x12345678 4
+. 8B 3C 83 C3
+
+movl (%rbx,%rax,4), %edx
+. 767 0x12345678 4
+. 8B 14 83 C3
+
+movl (%rbx,%rcx,4), %esi
+. 768 0x12345678 4
+. 8B 34 8B C3
+
+movl (%rbx,%rdx,4), %eax
+. 769 0x12345678 4
+. 8B 04 93 C3
+
+movl (%rbx,%rsi,4), %ecx
+. 770 0x12345678 4
+. 8B 0C B3 C3
+
+movl (%rdx,%rax,4), %ecx
+. 771 0x12345678 4
+. 8B 0C 82 C3
+
+movl (%rsi,%rax,4), %r9d
+. 772 0x12345678 5
+. 44 8B 0C 86 C3
+
+movl (%rsi,%rdx,4), %eax
+. 773 0x12345678 4
+. 8B 04 96 C3
+
+movq 120(%rsp), %r9
+. 774 0x12345678 6
+. 4C 8B 4C 24 78 C3
+
+movq 120(%rsp), %rax
+. 775 0x12345678 6
+. 48 8B 44 24 78 C3
+
+movq 1368(%rsp), %rdi
+. 776 0x12345678 9
+. 48 8B BC 24 58 05 00 00 C3
+
+movq 1368(%rsp), %rsi
+. 777 0x12345678 9
+. 48 8B B4 24 58 05 00 00 C3
+
+movq 24(%rbx), %rdi
+. 778 0x12345678 5
+. 48 8B 7B 18 C3
+
+movq 24(%rsp), %rbp
+. 779 0x12345678 6
+. 48 8B 6C 24 18 C3
+
+movq 24(%rsp), %rbx
+. 780 0x12345678 6
+. 48 8B 5C 24 18 C3
+
+movq 24(%rsp), %rcx
+. 781 0x12345678 6
+. 48 8B 4C 24 18 C3
+
+movq 24(%rsp), %rdi
+. 782 0x12345678 6
+. 48 8B 7C 24 18 C3
+
+movq 24(%rsp), %rdx
+. 783 0x12345678 6
+. 48 8B 54 24 18 C3
+
+movq 24(%rsp), %rsi
+. 784 0x12345678 6
+. 48 8B 74 24 18 C3
+
+movq 32(%rbx), %rsi
+. 785 0x12345678 5
+. 48 8B 73 20 C3
+
+movq 32(%rsp), %rbp
+. 786 0x12345678 6
+. 48 8B 6C 24 20 C3
+
+movq 32(%rsp), %rdx
+. 787 0x12345678 6
+. 48 8B 54 24 20 C3
+
+movq 3456(%rsp), %rdx
+. 788 0x12345678 9
+. 48 8B 94 24 80 0D 00 00 C3
+
+movq 3456(%rsp), %rsi
+. 789 0x12345678 9
+. 48 8B B4 24 80 0D 00 00 C3
+
+movq 40(%rdi), %r15
+. 790 0x12345678 5
+. 4C 8B 7F 28 C3
+
+movq 40(%rsp), %r12
+. 791 0x12345678 6
+. 4C 8B 64 24 28 C3
+
+movq 40(%rsp), %r8
+. 792 0x12345678 6
+. 4C 8B 44 24 28 C3
+
+movq 40(%rsp), %rcx
+. 793 0x12345678 6
+. 48 8B 4C 24 28 C3
+
+movq 40(%rsp), %rdi
+. 794 0x12345678 6
+. 48 8B 7C 24 28 C3
+
+movq 40(%rsp), %rdx
+. 795 0x12345678 6
+. 48 8B 54 24 28 C3
+
+movq 40(%rsp), %rsi
+. 796 0x12345678 6
+. 48 8B 74 24 28 C3
+
+movq 48(%rsp), %r13
+. 797 0x12345678 6
+. 4C 8B 6C 24 30 C3
+
+movq 48(%rsp), %r8
+. 798 0x12345678 6
+. 4C 8B 44 24 30 C3
+
+movq 48(%rsp), %rax
+. 799 0x12345678 6
+. 48 8B 44 24 30 C3
+
+movq 48(%rsp), %rcx
+. 800 0x12345678 6
+. 48 8B 4C 24 30 C3
+
+movq 48(%rsp), %rdx
+. 801 0x12345678 6
+. 48 8B 54 24 30 C3
+
+movq 48(%rsp), %rsi
+. 802 0x12345678 6
+. 48 8B 74 24 30 C3
+
+movq 56(%rdi), %r12
+. 803 0x12345678 5
+. 4C 8B 67 38 C3
+
+movq 56(%rdi), %r14
+. 804 0x12345678 5
+. 4C 8B 77 38 C3
+
+movq 56(%rsp), %r14
+. 805 0x12345678 6
+. 4C 8B 74 24 38 C3
+
+movq 56(%rsp), %r8
+. 806 0x12345678 6
+. 4C 8B 44 24 38 C3
+
+movq 56(%rsp), %rdi
+. 807 0x12345678 6
+. 48 8B 7C 24 38 C3
+
+movq 56(%rsp), %rdx
+. 808 0x12345678 6
+. 48 8B 54 24 38 C3
+
+movq 56(%rsp), %rsi
+. 809 0x12345678 6
+. 48 8B 74 24 38 C3
+
+movq 64(%rdi), %r13
+. 810 0x12345678 5
+. 4C 8B 6F 40 C3
+
+movq 64(%rdi), %rsi
+. 811 0x12345678 5
+. 48 8B 77 40 C3
+
+movq 64(%rsp), %r15
+. 812 0x12345678 6
+. 4C 8B 7C 24 40 C3
+
+movq 72(%rdi), %r15
+. 813 0x12345678 5
+. 4C 8B 7F 48 C3
+
+movq 72(%rdi), %rbp
+. 814 0x12345678 5
+. 48 8B 6F 48 C3
+
+movq 80(%r11), %rdx
+. 815 0x12345678 5
+. 49 8B 53 50 C3
+
+movq 80(%rdi), %rdx
+. 816 0x12345678 5
+. 48 8B 57 50 C3
+
+movq 80(%rsi), %rdx
+. 817 0x12345678 5
+. 48 8B 56 50 C3
+
+movq %r12, -32(%rsp)
+. 818 0x12345678 6
+. 4C 89 64 24 E0 C3
+
+movq %r13, -24(%rsp)
+. 819 0x12345678 6
+. 4C 89 6C 24 E8 C3
+
+movq %r14, -16(%rsp)
+. 820 0x12345678 6
+. 4C 89 74 24 F0 C3
+
+movq %r14, %rdi
+. 821 0x12345678 4
+. 4C 89 F7 C3
+
+movq %r15, -8(%rsp)
+. 822 0x12345678 6
+. 4C 89 7C 24 F8 C3
+
+movq %r15, %r10
+. 823 0x12345678 4
+. 4D 89 FA C3
+
+movq %r15, %rcx
+. 824 0x12345678 4
+. 4C 89 F9 C3
+
+movq %r15, %rdx
+. 825 0x12345678 4
+. 4C 89 FA C3
+
+movq %r15, %rsi
+. 826 0x12345678 4
+. 4C 89 FE C3
+
+movq %rax, 80(%rbx)
+. 827 0x12345678 5
+. 48 89 43 50 C3
+
+movq %rax, %r10
+. 828 0x12345678 4
+. 49 89 C2 C3
+
+movq %rax, %rcx
+. 829 0x12345678 4
+. 48 89 C1 C3
+
+movq %rax, %rdi
+. 830 0x12345678 4
+. 48 89 C7 C3
+
+movq %rax, %rdx
+. 831 0x12345678 4
+. 48 89 C2 C3
+
+movq %rax, (%rsp)
+. 832 0x12345678 5
+. 48 89 04 24 C3
+
+movq %rbp, -40(%rsp)
+. 833 0x12345678 6
+. 48 89 6C 24 D8 C3
+
+movq %rbp, %r11
+. 834 0x12345678 4
+. 49 89 EB C3
+
+movq %rbp, %rsi
+. 835 0x12345678 4
+. 48 89 EE C3
+
+movq %rbx, -48(%rsp)
+. 836 0x12345678 6
+. 48 89 5C 24 D0 C3
+
+movq %rbx, %rdi
+. 837 0x12345678 4
+. 48 89 DF C3
+
+movq %rbx, %rsi
+. 838 0x12345678 4
+. 48 89 DE C3
+
+movq %rcx, %rbp
+. 839 0x12345678 4
+. 48 89 CD C3
+
+movq %rcx, %rsi
+. 840 0x12345678 4
+. 48 89 CE C3
+
+movq %rdi, 24(%rsp)
+. 841 0x12345678 6
+. 48 89 7C 24 18 C3
+
+movq %rdi, 56(%rsp)
+. 842 0x12345678 6
+. 48 89 7C 24 38 C3
+
+movq %rdi, %rbp
+. 843 0x12345678 4
+. 48 89 FD C3
+
+movq %rdi, %rbx
+. 844 0x12345678 4
+. 48 89 FB C3
+
+movq %rdi, %rsi
+. 845 0x12345678 4
+. 48 89 FE C3
+
+movq %rdx, 32(%rsp)
+. 846 0x12345678 6
+. 48 89 54 24 20 C3
+
+movq %rdx, 40(%rsp)
+. 847 0x12345678 6
+. 48 89 54 24 28 C3
+
+movq %rdx, 8(%rsp)
+. 848 0x12345678 6
+. 48 89 54 24 08 C3
+
+movq %rdx, %rax
+. 849 0x12345678 4
+. 48 89 D0 C3
+
+movq %rdx, %rbx
+. 850 0x12345678 4
+. 48 89 D3 C3
+
+movq %rdx, %rdi
+. 851 0x12345678 4
+. 48 89 D7 C3
+
+movq %rsi, 40(%rsp)
+. 852 0x12345678 6
+. 48 89 74 24 28 C3
+
+movq %rsi, 48(%rsp)
+. 853 0x12345678 6
+. 48 89 74 24 30 C3
+
+movq %rsi, 8(%rsp)
+. 854 0x12345678 6
+. 48 89 74 24 08 C3
+
+movq %rsi, %r15
+. 855 0x12345678 4
+. 49 89 F7 C3
+
+movq %rsi, %rdx
+. 856 0x12345678 4
+. 48 89 F2 C3
+
+movq %rsi, (%rsp)
+. 857 0x12345678 5
+. 48 89 34 24 C3
+
+movq %rsp, %r11
+. 858 0x12345678 4
+. 49 89 E3 C3
+
+movq (%rsp), %rcx
+. 859 0x12345678 5
+. 48 8B 0C 24 C3
+
+movq (%rsp), %rdx
+. 860 0x12345678 5
+. 48 8B 14 24 C3
+
+movq (%rsp), %rsi
+. 861 0x12345678 5
+. 48 8B 34 24 C3
+
+movq -999(%rip), %rcx
+. 862 0x12345678 8
+. 48 8B 0D 19 FC FF FF C3
+
+movq 1(%rip), %rdi
+. 863 0x12345678 8
+. 48 8B 3D 01 00 00 00 C3
+
+movq 2(%rip), %rsi
+. 864 0x12345678 8
+. 48 8B 35 02 00 00 00 C3
+
+mov (%rbx,%rax,4), %eax
+. 865 0x12345678 4
+. 8B 04 83 C3
+
+mov (%rbx,%rsi,4), %eax
+. 866 0x12345678 4
+. 8B 04 B3 C3
+
+mov (%rcx,%rax,4), %eax
+. 867 0x12345678 4
+. 8B 04 81 C3
+
+mov (%rdx,%rax,4), %eax
+. 868 0x12345678 4
+. 8B 04 82 C3
+
+mov (%rsi,%rax,4), %eax
+. 869 0x12345678 4
+. 8B 04 86 C3
+
+mov (%rsi,%rdx,4), %eax
+. 870 0x12345678 4
+. 8B 04 96 C3
+
+movslq 108(%rbx),%rax
+. 871 0x12345678 5
+. 48 63 43 6C C3
+
+movslq 116(%r11),%rcx
+. 872 0x12345678 5
+. 49 63 4B 74 C3
+
+movslq 116(%rdi),%rcx
+. 873 0x12345678 5
+. 48 63 4F 74 C3
+
+movslq 116(%rsi),%rcx
+. 874 0x12345678 5
+. 48 63 4E 74 C3
+
+movslq 12(%rsp),%rax
+. 875 0x12345678 6
+. 48 63 44 24 0C C3
+
+movslq 20(%rsp),%rax
+. 876 0x12345678 6
+. 48 63 44 24 14 C3
+
+movslq 24(%rsp),%rax
+. 877 0x12345678 6
+. 48 63 44 24 18 C3
+
+movslq 24(%rsp),%rdx
+. 878 0x12345678 6
+. 48 63 54 24 18 C3
+
+movslq 28(%rsp),%r8
+. 879 0x12345678 6
+. 4C 63 44 24 1C C3
+
+movslq 28(%rsp),%rax
+. 880 0x12345678 6
+. 48 63 44 24 1C C3
+
+movslq 28(%rsp),%rcx
+. 881 0x12345678 6
+. 48 63 4C 24 1C C3
+
+movslq %eax,%rdx
+. 882 0x12345678 4
+. 48 63 D0 C3
+
+movslq %ebp,%rcx
+. 883 0x12345678 4
+. 48 63 CD C3
+
+movslq %ebp,%rdx
+. 884 0x12345678 4
+. 48 63 D5 C3
+
+movslq %ebx,%r10
+. 885 0x12345678 4
+. 4C 63 D3 C3
+
+movslq %ebx,%rax
+. 886 0x12345678 4
+. 48 63 C3 C3
+
+movslq %ebx,%rcx
+. 887 0x12345678 4
+. 48 63 CB C3
+
+movslq %ebx,%rdx
+. 888 0x12345678 4
+. 48 63 D3 C3
+
+movslq %ecx,%rax
+. 889 0x12345678 4
+. 48 63 C1 C3
+
+movslq %ecx,%rdx
+. 890 0x12345678 4
+. 48 63 D1 C3
+
+movslq %edi,%rax
+. 891 0x12345678 4
+. 48 63 C7 C3
+
+movslq %edi,%rcx
+. 892 0x12345678 4
+. 48 63 CF C3
+
+movslq %edi,%rdx
+. 893 0x12345678 4
+. 48 63 D7 C3
+
+movslq %edi,%rsi
+. 894 0x12345678 4
+. 48 63 F7 C3
+
+movslq %edx,%rax
+. 895 0x12345678 4
+. 48 63 C2 C3
+
+movslq %edx,%rdx
+. 896 0x12345678 4
+. 48 63 D2 C3
+
+movslq %edx,%rsi
+. 897 0x12345678 4
+. 48 63 F2 C3
+
+movslq %esi,%rax
+. 898 0x12345678 4
+. 48 63 C6 C3
+
+movslq %r10d,%rax
+. 899 0x12345678 4
+. 49 63 C2 C3
+
+movslq %r10d,%rcx
+. 900 0x12345678 4
+. 49 63 CA C3
+
+movslq %r10d,%rdx
+. 901 0x12345678 4
+. 49 63 D2 C3
+
+movslq %r10d,%rsi
+. 902 0x12345678 4
+. 49 63 F2 C3
+
+movslq %r11d,%rcx
+. 903 0x12345678 4
+. 49 63 CB C3
+
+movslq %r11d,%rdx
+. 904 0x12345678 4
+. 49 63 D3 C3
+
+movslq %r12d,%rax
+. 905 0x12345678 4
+. 49 63 C4 C3
+
+movslq %r12d,%rcx
+. 906 0x12345678 4
+. 49 63 CC C3
+
+movslq %r12d,%rdx
+. 907 0x12345678 4
+. 49 63 D4 C3
+
+movslq %r13d,%rax
+. 908 0x12345678 4
+. 49 63 C5 C3
+
+movslq %r13d,%rdx
+. 909 0x12345678 4
+. 49 63 D5 C3
+
+movslq %r14d,%rax
+. 910 0x12345678 4
+. 49 63 C6 C3
+
+movslq %r14d,%rdx
+. 911 0x12345678 4
+. 49 63 D6 C3
+
+movslq %r15d,%rax
+. 912 0x12345678 4
+. 49 63 C7 C3
+
+movslq %r8d,%rax
+. 913 0x12345678 4
+. 49 63 C0 C3
+
+movslq %r8d,%rdx
+. 914 0x12345678 4
+. 49 63 D0 C3
+
+movslq %r9d,%rax
+. 915 0x12345678 4
+. 49 63 C1 C3
+
+movslq %r9d,%rcx
+. 916 0x12345678 4
+. 49 63 C9 C3
+
+movslq %r9d,%rdx
+. 917 0x12345678 4
+. 49 63 D1 C3
+
+movw $0, -2(%rdi,%rdx,2)
+. 918 0x12345678 8
+. 66 C7 44 57 FE 00 00 C3
+
+movw $0, -4(%rdi,%rdx,2)
+. 919 0x12345678 8
+. 66 C7 44 57 FC 00 00 C3
+
+movw $0, -6(%rdi,%rdx,2)
+. 920 0x12345678 8
+. 66 C7 44 57 FA 00 00 C3
+
+movw $0, 80(%rsp,%rax,2)
+. 921 0x12345678 8
+. 66 C7 44 44 50 00 00 C3
+
+movw $0, (%r8,%rax,2)
+. 922 0x12345678 8
+. 66 41 C7 04 40 00 00 C3
+
+movw $0, (%rbp,%rax,2)
+. 923 0x12345678 8
+. 66 C7 44 45 00 00 00 C3
+
+movw $0, (%rcx,%rdx,2)
+. 924 0x12345678 7
+. 66 C7 04 51 00 00 C3
+
+movw $0, (%rdi,%rax,2)
+. 925 0x12345678 7
+. 66 C7 04 47 00 00 C3
+
+movw $1, (%rbp,%rax,2)
+. 926 0x12345678 8
+. 66 C7 44 45 00 01 00 C3
+
+movw %ax, (%rbp,%rdx,2)
+. 927 0x12345678 6
+. 66 89 44 55 00 C3
+
+movw %cx, 80(%rsp)
+. 928 0x12345678 6
+. 66 89 4C 24 50 C3
+
+movw %cx, 82(%rsp)
+. 929 0x12345678 6
+. 66 89 4C 24 52 C3
+
+movw %di, 88(%rsp)
+. 930 0x12345678 6
+. 66 89 7C 24 58 C3
+
+movw %di, 90(%rsp)
+. 931 0x12345678 6
+. 66 89 7C 24 5A C3
+
+movw %r10w, (%rbp,%rax,2)
+. 932 0x12345678 7
+. 66 44 89 54 45 00 C3
+
+movw %si, 84(%rsp)
+. 933 0x12345678 6
+. 66 89 74 24 54 C3
+
+movw %si, 86(%rsp)
+. 934 0x12345678 6
+. 66 89 74 24 56 C3
+
+movw %si, (%rcx,%rax,2)
+. 935 0x12345678 5
+. 66 89 34 41 C3
+
+movzbl 124(%rdi), %eax
+. 936 0x12345678 5
+. 0F B6 47 7C C3
+
+movzbl 12(%r10), %eax
+. 937 0x12345678 6
+. 41 0F B6 42 0C C3
+
+movzbl 12(%r9), %eax
+. 938 0x12345678 6
+. 41 0F B6 41 0C C3
+
+movzbl 1704(%rbp,%rax), %eax
+. 939 0x12345678 9
+. 0F B6 84 05 A8 06 00 00 C3
+
+movzbl 1704(%rbp,%rax), %esi
+. 940 0x12345678 9
+. 0F B6 B4 05 A8 06 00 00 C3
+
+movzbl 19706(%rbp,%r10), %eax
+. 941 0x12345678 10
+. 42 0F B6 84 15 FA 4C 00 00 C3
+
+movzbl -1(%rcx,%r8), %eax
+. 942 0x12345678 7
+. 42 0F B6 44 01 FF C3
+
+movzbl -1(%rdx,%rsi), %eax
+. 943 0x12345678 6
+. 0F B6 44 32 FF C3
+
+movzbl 1(%rsp), %ecx
+. 944 0x12345678 6
+. 0F B6 4C 24 01 C3
+
+movzbl -2(%rcx,%r8), %eax
+. 945 0x12345678 7
+. 42 0F B6 44 01 FE C3
+
+movzbl -2(%rdx,%rsi), %eax
+. 946 0x12345678 6
+. 0F B6 44 32 FE C3
+
+movzbl 32(%rsp), %ecx
+. 947 0x12345678 6
+. 0F B6 4C 24 20 C3
+
+movzbl 32(%rsp,%rax), %ecx
+. 948 0x12345678 6
+. 0F B6 4C 04 20 C3
+
+movzbl 37708(%rax,%r13), %eax
+. 949 0x12345678 10
+. 42 0F B6 84 28 4C 93 00 00 C3
+
+movzbl 37708(%rax,%rdx), %eax
+. 950 0x12345678 9
+. 0F B6 84 10 4C 93 00 00 C3
+
+movzbl 37708(%rbp,%rax,2), %r8d
+. 951 0x12345678 10
+. 44 0F B6 84 45 4C 93 00 00 C3
+
+movzbl 37708(%rbp,%rcx), %edx
+. 952 0x12345678 9
+. 0F B6 94 0D 4C 93 00 00 C3
+
+movzbl 37708(%rcx,%rdx), %r8d
+. 953 0x12345678 10
+. 44 0F B6 84 11 4C 93 00 00 C3
+
+movzbl 37966(%rbp,%rcx), %eax
+. 954 0x12345678 9
+. 0F B6 84 0D 4E 94 00 00 C3
+
+movzbl 38224(%rbp,%rcx), %edx
+. 955 0x12345678 9
+. 0F B6 94 0D 50 95 00 00 C3
+
+movzbl 38482(%rbp,%rcx), %eax
+. 956 0x12345678 9
+. 0F B6 84 0D 52 96 00 00 C3
+
+movzbl 384(%rbx,%rax), %esi
+. 957 0x12345678 9
+. 0F B6 B4 03 80 01 00 00 C3
+
+movzbl 38740(%rbp,%rcx), %edx
+. 958 0x12345678 9
+. 0F B6 94 0D 54 97 00 00 C3
+
+movzbl 38998(%rbp,%rcx), %eax
+. 959 0x12345678 9
+. 0F B6 84 0D 56 98 00 00 C3
+
+movzbl -3(%rcx,%r8), %eax
+. 960 0x12345678 7
+. 42 0F B6 44 01 FD C3
+
+movzbl -3(%rdx,%rsi), %eax
+. 961 0x12345678 6
+. 0F B6 44 32 FD C3
+
+movzbl 643(%r11), %eax
+. 962 0x12345678 9
+. 41 0F B6 83 83 02 00 00 C3
+
+movzbl 643(%rdi), %eax
+. 963 0x12345678 8
+. 0F B6 87 83 02 00 00 C3
+
+movzbl 643(%rsi), %eax
+. 964 0x12345678 8
+. 0F B6 86 83 02 00 00 C3
+
+movzbl 664(%rbx), %eax
+. 965 0x12345678 8
+. 0F B6 83 98 02 00 00 C3
+
+movzbl %al, %eax
+. 966 0x12345678 4
+. 0F B6 C0 C3
+
+movzbl %al, %r14d
+. 967 0x12345678 5
+. 44 0F B6 F0 C3
+
+movzbl %bh, %ebp
+. 968 0x12345678 4
+. 0F B6 EF C3
+
+movzbl %bl,%esi
+. 969 0x12345678 4
+. 0F B6 F3 C3
+
+movzbl %dl, %r11d
+. 970 0x12345678 5
+. 44 0F B6 DA C3
+
+movzbl (%rax), %r12d
+. 971 0x12345678 5
+. 44 0F B6 20 C3
+
+movzbl (%rax,%rcx), %eax
+. 972 0x12345678 5
+. 0F B6 04 08 C3
+
+movzbl (%rax,%rdi), %eax
+. 973 0x12345678 5
+. 0F B6 04 38 C3
+
+movzbl (%rax,%rdi), %edi
+. 974 0x12345678 5
+. 0F B6 3C 38 C3
+
+movzbl (%rax,%rdi), %esi
+. 975 0x12345678 5
+. 0F B6 34 38 C3
+
+movzbl (%rax,%rdi), %r9d
+. 976 0x12345678 6
+. 44 0F B6 0C 38 C3
+
+movzbl (%rax,%rdx), %eax
+. 977 0x12345678 5
+. 0F B6 04 10 C3
+
+movzbl (%rax,%rdx), %r10d
+. 978 0x12345678 6
+. 44 0F B6 14 10 C3
+
+movzbl (%rax,%rsi), %ecx
+. 979 0x12345678 5
+. 0F B6 0C 30 C3
+
+movzbl (%rax,%rsi), %edx
+. 980 0x12345678 5
+. 0F B6 14 30 C3
+
+movzbl (%rcx,%rsi), %eax
+. 981 0x12345678 5
+. 0F B6 04 31 C3
+
+movzbl (%rdx), %ecx
+. 982 0x12345678 4
+. 0F B6 0A C3
+
+movzbl (%rdx,%rsi), %eax
+. 983 0x12345678 5
+. 0F B6 04 32 C3
+
+movzbl (%rdx,%rsi), %edx
+. 984 0x12345678 5
+. 0F B6 14 32 C3
+
+movzbl (%rsp), %eax
+. 985 0x12345678 5
+. 0F B6 04 24 C3
+
+movzbl %sil, %esi
+. 986 0x12345678 5
+. 40 0F B6 F6 C3
+
+movzbq 1704(%rbp,%r10), %rdx
+. 987 0x12345678 10
+. 4A 0F B6 94 15 A8 06 00 00 C3
+
+movzbq 1704(%rbp,%rax), %rdx
+. 988 0x12345678 10
+. 48 0F B6 94 05 A8 06 00 00 C3
+
+movzbq (%rax,%r13), %rax
+. 989 0x12345678 6
+. 4A 0F B6 04 28 C3
+
+movzbq (%rax,%r8), %rsi
+. 990 0x12345678 6
+. 4A 0F B6 34 00 C3
+
+movzbq (%rax,%rcx), %rax
+. 991 0x12345678 6
+. 48 0F B6 04 08 C3
+
+movzbq (%rax,%rdx), %rax
+. 992 0x12345678 6
+. 48 0F B6 04 10 C3
+
+movzbw 37708(%rcx,%rdx), %dx
+. 993 0x12345678 10
+. 66 0F B6 94 11 4C 93 00 00 C3
+
+movzbw (%r8), %si
+. 994 0x12345678 6
+. 66 41 0F B6 30 C3
+
+movzwl 80(%rsp,%rax,2), %eax
+. 995 0x12345678 6
+. 0F B7 44 44 50 C3
+
+movzwl 86(%r15,%r8,2), %edx
+. 996 0x12345678 7
+. 43 0F B7 54 47 56 C3
+
+movzwl %ax, %ecx
+. 997 0x12345678 4
+. 0F B7 C8 C3
+
+movzwl %dx, %eax
+. 998 0x12345678 4
+. 0F B7 C2 C3
+
+movzwl (%r15,%rax,2), %eax
+. 999 0x12345678 6
+. 41 0F B7 04 47 C3
+
+movzwl (%rcx,%rax,2), %eax
+. 1000 0x12345678 5
+. 0F B7 04 41 C3
+
+movzwl (%rcx,%rax,2), %r10d
+. 1001 0x12345678 6
+. 44 0F B7 14 41 C3
+
+movzwl %si, %eax
+. 1002 0x12345678 4
+. 0F B7 C6 C3
+
+movzwq 10(%r15,%r8,2), %rax
+. 1003 0x12345678 7
+. 4B 0F B7 44 47 0A C3
+
+movzwq 10(%r15,%rax,2), %rax
+. 1004 0x12345678 7
+. 49 0F B7 44 47 0A C3
+
+movzwq 10(%r15,%rcx,2), %rax
+. 1005 0x12345678 7
+. 49 0F B7 44 4F 0A C3
+
+movzwq 12(%r15,%r8,2), %rax
+. 1006 0x12345678 7
+. 4B 0F B7 44 47 0C C3
+
+movzwq 12(%r15,%rax,2), %rax
+. 1007 0x12345678 7
+. 49 0F B7 44 47 0C C3
+
+movzwq 12(%r15,%rcx,2), %rax
+. 1008 0x12345678 7
+. 49 0F B7 44 4F 0C C3
+
+movzwq 14(%r15,%r8,2), %rax
+. 1009 0x12345678 7
+. 4B 0F B7 44 47 0E C3
+
+movzwq 14(%r15,%rax,2), %rax
+. 1010 0x12345678 7
+. 49 0F B7 44 47 0E C3
+
+movzwq 14(%r15,%rcx,2), %rax
+. 1011 0x12345678 7
+. 49 0F B7 44 4F 0E C3
+
+movzwq 16(%r15,%r8,2), %rax
+. 1012 0x12345678 7
+. 4B 0F B7 44 47 10 C3
+
+movzwq 16(%r15,%rax,2), %rax
+. 1013 0x12345678 7
+. 49 0F B7 44 47 10 C3
+
+movzwq 16(%r15,%rcx,2), %rax
+. 1014 0x12345678 7
+. 49 0F B7 44 4F 10 C3
+
+movzwq 18(%r15,%r8,2), %rax
+. 1015 0x12345678 7
+. 4B 0F B7 44 47 12 C3
+
+movzwq 18(%r15,%rax,2), %rax
+. 1016 0x12345678 7
+. 49 0F B7 44 47 12 C3
+
+movzwq 18(%r15,%rcx,2), %rax
+. 1017 0x12345678 7
+. 49 0F B7 44 4F 12 C3
+
+movzwq 20(%r15,%r8,2), %rax
+. 1018 0x12345678 7
+. 4B 0F B7 44 47 14 C3
+
+movzwq 20(%r15,%rax,2), %rax
+. 1019 0x12345678 7
+. 49 0F B7 44 47 14 C3
+
+movzwq 20(%r15,%rcx,2), %rax
+. 1020 0x12345678 7
+. 49 0F B7 44 4F 14 C3
+
+movzwq 22(%r15,%r8,2), %rax
+. 1021 0x12345678 7
+. 4B 0F B7 44 47 16 C3
+
+movzwq 22(%r15,%rax,2), %rax
+. 1022 0x12345678 7
+. 49 0F B7 44 47 16 C3
+
+movzwq 22(%r15,%rcx,2), %rax
+. 1023 0x12345678 7
+. 49 0F B7 44 4F 16 C3
+
+movzwq 24(%r15,%r8,2), %rax
+. 1024 0x12345678 7
+. 4B 0F B7 44 47 18 C3
+
+movzwq 24(%r15,%rax,2), %rax
+. 1025 0x12345678 7
+. 49 0F B7 44 47 18 C3
+
+movzwq 24(%r15,%rcx,2), %rax
+. 1026 0x12345678 7
+. 49 0F B7 44 4F 18 C3
+
+movzwq 26(%r15,%r8,2), %rax
+. 1027 0x12345678 7
+. 4B 0F B7 44 47 1A C3
+
+movzwq 26(%r15,%rax,2), %rax
+. 1028 0x12345678 7
+. 49 0F B7 44 47 1A C3
+
+movzwq 26(%r15,%rcx,2), %rax
+. 1029 0x12345678 7
+. 49 0F B7 44 4F 1A C3
+
+movzwq 28(%r15,%r8,2), %rax
+. 1030 0x12345678 7
+. 4B 0F B7 44 47 1C C3
+
+movzwq 28(%r15,%rax,2), %rax
+. 1031 0x12345678 7
+. 49 0F B7 44 47 1C C3
+
+movzwq 28(%r15,%rcx,2), %rax
+. 1032 0x12345678 7
+. 49 0F B7 44 4F 1C C3
+
+movzwq 2(%r15,%r8,2), %rax
+. 1033 0x12345678 7
+. 4B 0F B7 44 47 02 C3
+
+movzwq 2(%r15,%rax,2), %rax
+. 1034 0x12345678 7
+. 49 0F B7 44 47 02 C3
+
+movzwq 2(%r15,%rcx,2), %rax
+. 1035 0x12345678 7
+. 49 0F B7 44 4F 02 C3
+
+movzwq 30(%r15,%r8,2), %rax
+. 1036 0x12345678 7
+. 4B 0F B7 44 47 1E C3
+
+movzwq 30(%r15,%rax,2), %rax
+. 1037 0x12345678 7
+. 49 0F B7 44 47 1E C3
+
+movzwq 30(%r15,%rcx,2), %rax
+. 1038 0x12345678 7
+. 49 0F B7 44 4F 1E C3
+
+movzwq 32(%r15,%r8,2), %rax
+. 1039 0x12345678 7
+. 4B 0F B7 44 47 20 C3
+
+movzwq 32(%r15,%rax,2), %rax
+. 1040 0x12345678 7
+. 49 0F B7 44 47 20 C3
+
+movzwq 32(%r15,%rcx,2), %rax
+. 1041 0x12345678 7
+. 49 0F B7 44 4F 20 C3
+
+movzwq 34(%r15,%r8,2), %rax
+. 1042 0x12345678 7
+. 4B 0F B7 44 47 22 C3
+
+movzwq 34(%r15,%rax,2), %rax
+. 1043 0x12345678 7
+. 49 0F B7 44 47 22 C3
+
+movzwq 34(%r15,%rcx,2), %rax
+. 1044 0x12345678 7
+. 49 0F B7 44 4F 22 C3
+
+movzwq 36(%r15,%r8,2), %rax
+. 1045 0x12345678 7
+. 4B 0F B7 44 47 24 C3
+
+movzwq 36(%r15,%rax,2), %rax
+. 1046 0x12345678 7
+. 49 0F B7 44 47 24 C3
+
+movzwq 36(%r15,%rcx,2), %rax
+. 1047 0x12345678 7
+. 49 0F B7 44 4F 24 C3
+
+movzwq 38(%r15,%r8,2), %rax
+. 1048 0x12345678 7
+. 4B 0F B7 44 47 26 C3
+
+movzwq 38(%r15,%rax,2), %rax
+. 1049 0x12345678 7
+. 49 0F B7 44 47 26 C3
+
+movzwq 38(%r15,%rcx,2), %rax
+. 1050 0x12345678 7
+. 49 0F B7 44 4F 26 C3
+
+movzwq 40(%r15,%r8,2), %rax
+. 1051 0x12345678 7
+. 4B 0F B7 44 47 28 C3
+
+movzwq 40(%r15,%rax,2), %rax
+. 1052 0x12345678 7
+. 49 0F B7 44 47 28 C3
+
+movzwq 40(%r15,%rcx,2), %rax
+. 1053 0x12345678 7
+. 49 0F B7 44 4F 28 C3
+
+movzwq 42(%r15,%r8,2), %rax
+. 1054 0x12345678 7
+. 4B 0F B7 44 47 2A C3
+
+movzwq 42(%r15,%rax,2), %rax
+. 1055 0x12345678 7
+. 49 0F B7 44 47 2A C3
+
+movzwq 42(%r15,%rcx,2), %rax
+. 1056 0x12345678 7
+. 49 0F B7 44 4F 2A C3
+
+movzwq 44(%r15,%r8,2), %rax
+. 1057 0x12345678 7
+. 4B 0F B7 44 47 2C C3
+
+movzwq 44(%r15,%rax,2), %rax
+. 1058 0x12345678 7
+. 49 0F B7 44 47 2C C3
+
+movzwq 44(%r15,%rcx,2), %rax
+. 1059 0x12345678 7
+. 49 0F B7 44 4F 2C C3
+
+movzwq 46(%r15,%r8,2), %rax
+. 1060 0x12345678 7
+. 4B 0F B7 44 47 2E C3
+
+movzwq 46(%r15,%rax,2), %rax
+. 1061 0x12345678 7
+. 49 0F B7 44 47 2E C3
+
+movzwq 46(%r15,%rcx,2), %rax
+. 1062 0x12345678 7
+. 49 0F B7 44 4F 2E C3
+
+movzwq 48(%r15,%r8,2), %rax
+. 1063 0x12345678 7
+. 4B 0F B7 44 47 30 C3
+
+movzwq 48(%r15,%rax,2), %rax
+. 1064 0x12345678 7
+. 49 0F B7 44 47 30 C3
+
+movzwq 48(%r15,%rcx,2), %rax
+. 1065 0x12345678 7
+. 49 0F B7 44 4F 30 C3
+
+movzwq 4(%r15,%r8,2), %rax
+. 1066 0x12345678 7
+. 4B 0F B7 44 47 04 C3
+
+movzwq 4(%r15,%rax,2), %rax
+. 1067 0x12345678 7
+. 49 0F B7 44 47 04 C3
+
+movzwq 4(%r15,%rcx,2), %rax
+. 1068 0x12345678 7
+. 49 0F B7 44 4F 04 C3
+
+movzwq 50(%r15,%r8,2), %rax
+. 1069 0x12345678 7
+. 4B 0F B7 44 47 32 C3
+
+movzwq 50(%r15,%rax,2), %rax
+. 1070 0x12345678 7
+. 49 0F B7 44 47 32 C3
+
+movzwq 50(%r15,%rcx,2), %rax
+. 1071 0x12345678 7
+. 49 0F B7 44 4F 32 C3
+
+movzwq 52(%r15,%r8,2), %rax
+. 1072 0x12345678 7
+. 4B 0F B7 44 47 34 C3
+
+movzwq 52(%r15,%rax,2), %rax
+. 1073 0x12345678 7
+. 49 0F B7 44 47 34 C3
+
+movzwq 52(%r15,%rcx,2), %rax
+. 1074 0x12345678 7
+. 49 0F B7 44 4F 34 C3
+
+movzwq 54(%r15,%r8,2), %rax
+. 1075 0x12345678 7
+. 4B 0F B7 44 47 36 C3
+
+movzwq 54(%r15,%rax,2), %rax
+. 1076 0x12345678 7
+. 49 0F B7 44 47 36 C3
+
+movzwq 54(%r15,%rcx,2), %rax
+. 1077 0x12345678 7
+. 49 0F B7 44 4F 36 C3
+
+movzwq 56(%r15,%r8,2), %rax
+. 1078 0x12345678 7
+. 4B 0F B7 44 47 38 C3
+
+movzwq 56(%r15,%rax,2), %rax
+. 1079 0x12345678 7
+. 49 0F B7 44 47 38 C3
+
+movzwq 56(%r15,%rcx,2), %rax
+. 1080 0x12345678 7
+. 49 0F B7 44 4F 38 C3
+
+movzwq 58(%r15,%r8,2), %rax
+. 1081 0x12345678 7
+. 4B 0F B7 44 47 3A C3
+
+movzwq 58(%r15,%rax,2), %rax
+. 1082 0x12345678 7
+. 49 0F B7 44 47 3A C3
+
+movzwq 58(%r15,%rcx,2), %rax
+. 1083 0x12345678 7
+. 49 0F B7 44 4F 3A C3
+
+movzwq 60(%r15,%r8,2), %rax
+. 1084 0x12345678 7
+. 4B 0F B7 44 47 3C C3
+
+movzwq 60(%r15,%rax,2), %rax
+. 1085 0x12345678 7
+. 49 0F B7 44 47 3C C3
+
+movzwq 60(%r15,%rcx,2), %rax
+. 1086 0x12345678 7
+. 49 0F B7 44 4F 3C C3
+
+movzwq 62(%r15,%r8,2), %rax
+. 1087 0x12345678 7
+. 4B 0F B7 44 47 3E C3
+
+movzwq 62(%r15,%rax,2), %rax
+. 1088 0x12345678 7
+. 49 0F B7 44 47 3E C3
+
+movzwq 62(%r15,%rcx,2), %rax
+. 1089 0x12345678 7
+. 49 0F B7 44 4F 3E C3
+
+movzwq 64(%r15,%r8,2), %rax
+. 1090 0x12345678 7
+. 4B 0F B7 44 47 40 C3
+
+movzwq 64(%r15,%rax,2), %rax
+. 1091 0x12345678 7
+. 49 0F B7 44 47 40 C3
+
+movzwq 64(%r15,%rcx,2), %rax
+. 1092 0x12345678 7
+. 49 0F B7 44 4F 40 C3
+
+movzwq 66(%r15,%r8,2), %rax
+. 1093 0x12345678 7
+. 4B 0F B7 44 47 42 C3
+
+movzwq 66(%r15,%rax,2), %rax
+. 1094 0x12345678 7
+. 49 0F B7 44 47 42 C3
+
+movzwq 66(%r15,%rcx,2), %rax
+. 1095 0x12345678 7
+. 49 0F B7 44 4F 42 C3
+
+movzwq 68(%r15,%r8,2), %rax
+. 1096 0x12345678 7
+. 4B 0F B7 44 47 44 C3
+
+movzwq 68(%r15,%rax,2), %rax
+. 1097 0x12345678 7
+. 49 0F B7 44 47 44 C3
+
+movzwq 68(%r15,%rcx,2), %rax
+. 1098 0x12345678 7
+. 49 0F B7 44 4F 44 C3
+
+movzwq 6(%r15,%r8,2), %rax
+. 1099 0x12345678 7
+. 4B 0F B7 44 47 06 C3
+
+movzwq 6(%r15,%rax,2), %rax
+. 1100 0x12345678 7
+. 49 0F B7 44 47 06 C3
+
+movzwq 6(%r15,%rcx,2), %rax
+. 1101 0x12345678 7
+. 49 0F B7 44 4F 06 C3
+
+movzwq 70(%r15,%r8,2), %rax
+. 1102 0x12345678 7
+. 4B 0F B7 44 47 46 C3
+
+movzwq 70(%r15,%rax,2), %rax
+. 1103 0x12345678 7
+. 49 0F B7 44 47 46 C3
+
+movzwq 70(%r15,%rcx,2), %rax
+. 1104 0x12345678 7
+. 49 0F B7 44 4F 46 C3
+
+movzwq 72(%r15,%r8,2), %rax
+. 1105 0x12345678 7
+. 4B 0F B7 44 47 48 C3
+
+movzwq 72(%r15,%rax,2), %rax
+. 1106 0x12345678 7
+. 49 0F B7 44 47 48 C3
+
+movzwq 72(%r15,%rcx,2), %rax
+. 1107 0x12345678 7
+. 49 0F B7 44 4F 48 C3
+
+movzwq 74(%r15,%r8,2), %rax
+. 1108 0x12345678 7
+. 4B 0F B7 44 47 4A C3
+
+movzwq 74(%r15,%rax,2), %rax
+. 1109 0x12345678 7
+. 49 0F B7 44 47 4A C3
+
+movzwq 74(%r15,%rcx,2), %rax
+. 1110 0x12345678 7
+. 49 0F B7 44 4F 4A C3
+
+movzwq 76(%r15,%r8,2), %rax
+. 1111 0x12345678 7
+. 4B 0F B7 44 47 4C C3
+
+movzwq 76(%r15,%rax,2), %rax
+. 1112 0x12345678 7
+. 49 0F B7 44 47 4C C3
+
+movzwq 76(%r15,%rcx,2), %rax
+. 1113 0x12345678 7
+. 49 0F B7 44 4F 4C C3
+
+movzwq 78(%r15,%r8,2), %rax
+. 1114 0x12345678 7
+. 4B 0F B7 44 47 4E C3
+
+movzwq 78(%r15,%rax,2), %rax
+. 1115 0x12345678 7
+. 49 0F B7 44 47 4E C3
+
+movzwq 78(%r15,%rcx,2), %rax
+. 1116 0x12345678 7
+. 49 0F B7 44 4F 4E C3
+
+movzwq 80(%r15,%r8,2), %rax
+. 1117 0x12345678 7
+. 4B 0F B7 44 47 50 C3
+
+movzwq 80(%r15,%rax,2), %rax
+. 1118 0x12345678 7
+. 49 0F B7 44 47 50 C3
+
+movzwq 80(%r15,%rcx,2), %rax
+. 1119 0x12345678 7
+. 49 0F B7 44 4F 50 C3
+
+movzwq 82(%r15,%r8,2), %rax
+. 1120 0x12345678 7
+. 4B 0F B7 44 47 52 C3
+
+movzwq 82(%r15,%rax,2), %rax
+. 1121 0x12345678 7
+. 49 0F B7 44 47 52 C3
+
+movzwq 82(%r15,%rcx,2), %rax
+. 1122 0x12345678 7
+. 49 0F B7 44 4F 52 C3
+
+movzwq 84(%r15,%r8,2), %rax
+. 1123 0x12345678 7
+. 4B 0F B7 44 47 54 C3
+
+movzwq 84(%r15,%rax,2), %rax
+. 1124 0x12345678 7
+. 49 0F B7 44 47 54 C3
+
+movzwq 84(%r15,%rcx,2), %rax
+. 1125 0x12345678 7
+. 49 0F B7 44 4F 54 C3
+
+movzwq 86(%r15,%rax,2), %rax
+. 1126 0x12345678 7
+. 49 0F B7 44 47 56 C3
+
+movzwq 86(%r15,%rcx,2), %rax
+. 1127 0x12345678 7
+. 49 0F B7 44 4F 56 C3
+
+movzwq 88(%r15,%r8,2), %rax
+. 1128 0x12345678 7
+. 4B 0F B7 44 47 58 C3
+
+movzwq 88(%r15,%rax,2), %rax
+. 1129 0x12345678 7
+. 49 0F B7 44 47 58 C3
+
+movzwq 88(%r15,%rcx,2), %rax
+. 1130 0x12345678 7
+. 49 0F B7 44 4F 58 C3
+
+movzwq 8(%r15,%r8,2), %rax
+. 1131 0x12345678 7
+. 4B 0F B7 44 47 08 C3
+
+movzwq 8(%r15,%rax,2), %rax
+. 1132 0x12345678 7
+. 49 0F B7 44 47 08 C3
+
+movzwq 8(%r15,%rcx,2), %rax
+. 1133 0x12345678 7
+. 49 0F B7 44 4F 08 C3
+
+movzwq 90(%r15,%r8,2), %rax
+. 1134 0x12345678 7
+. 4B 0F B7 44 47 5A C3
+
+movzwq 90(%r15,%rax,2), %rax
+. 1135 0x12345678 7
+. 49 0F B7 44 47 5A C3
+
+movzwq 90(%r15,%rcx,2), %rax
+. 1136 0x12345678 7
+. 49 0F B7 44 4F 5A C3
+
+movzwq 92(%r15,%r8,2), %rax
+. 1137 0x12345678 7
+. 4B 0F B7 44 47 5C C3
+
+movzwq 92(%r15,%rax,2), %rax
+. 1138 0x12345678 7
+. 49 0F B7 44 47 5C C3
+
+movzwq 92(%r15,%rcx,2), %rax
+. 1139 0x12345678 7
+. 49 0F B7 44 4F 5C C3
+
+movzwq 94(%r15,%r8,2), %rax
+. 1140 0x12345678 7
+. 4B 0F B7 44 47 5E C3
+
+movzwq 94(%r15,%rax,2), %rax
+. 1141 0x12345678 7
+. 49 0F B7 44 47 5E C3
+
+movzwq 94(%r15,%rcx,2), %rax
+. 1142 0x12345678 7
+. 49 0F B7 44 4F 5E C3
+
+movzwq 96(%r15,%r8,2), %rax
+. 1143 0x12345678 7
+. 4B 0F B7 44 47 60 C3
+
+movzwq 96(%r15,%rax,2), %rax
+. 1144 0x12345678 7
+. 49 0F B7 44 47 60 C3
+
+movzwq 96(%r15,%rcx,2), %rax
+. 1145 0x12345678 7
+. 49 0F B7 44 4F 60 C3
+
+movzwq 98(%r15,%r8,2), %rax
+. 1146 0x12345678 7
+. 4B 0F B7 44 47 62 C3
+
+movzwq 98(%r15,%rax,2), %rax
+. 1147 0x12345678 7
+. 49 0F B7 44 47 62 C3
+
+movzwq 98(%r15,%rcx,2), %rax
+. 1148 0x12345678 7
+. 49 0F B7 44 4F 62 C3
+
+movzwq (%r15,%r8,2), %rdx
+. 1149 0x12345678 6
+. 4B 0F B7 14 47 C3
+
+movzwq (%r15,%rax,2), %rax
+. 1150 0x12345678 6
+. 49 0F B7 04 47 C3
+
+movzwq (%r15,%rax,2), %rcx
+. 1151 0x12345678 6
+. 49 0F B7 0C 47 C3
+
+movzwq (%r15,%rcx,2), %rax
+. 1152 0x12345678 6
+. 49 0F B7 04 4F C3
+
+negl %ecx
+. 1153 0x12345678 3
+. F7 D9 C3
+
+notl %edx
+. 1154 0x12345678 3
+. F7 D2 C3
+
+orl $2097152, (%rbp,%rax,4)
+. 1155 0x12345678 9
+. 81 4C 85 00 00 00 20 00 C3
+
+orl %eax, 640(%rsi)
+. 1156 0x12345678 7
+. 09 86 80 02 00 00 C3
+
+orl %eax, %esi
+. 1157 0x12345678 3
+. 09 C6 C3
+
+orl %eax, %r12d
+. 1158 0x12345678 4
+. 41 09 C4 C3
+
+orl %ebp, 640(%rsi)
+. 1159 0x12345678 7
+. 09 AE 80 02 00 00 C3
+
+orl %ebp, (%rbx,%rax,4)
+. 1160 0x12345678 4
+. 09 2C 83 C3
+
+orl %edi, 640(%rsi)
+. 1161 0x12345678 7
+. 09 BE 80 02 00 00 C3
+
+orl %edx, 640(%rsi)
+. 1162 0x12345678 7
+. 09 96 80 02 00 00 C3
+
+orl %edx, %eax
+. 1163 0x12345678 3
+. 09 D0 C3
+
+orl %esi, 640(%r11)
+. 1164 0x12345678 8
+. 41 09 B3 80 02 00 00 C3
+
+orl %esi, 640(%rdi)
+. 1165 0x12345678 7
+. 09 B7 80 02 00 00 C3
+
+orl %r10d, 640(%rsi)
+. 1166 0x12345678 8
+. 44 09 96 80 02 00 00 C3
+
+orl %r8d, 640(%rsi)
+. 1167 0x12345678 8
+. 44 09 86 80 02 00 00 C3
+
+orl %r8d, (%rbx,%rax,4)
+. 1168 0x12345678 5
+. 44 09 04 83 C3
+
+orl %r9d, 640(%rsi)
+. 1169 0x12345678 8
+. 44 09 8E 80 02 00 00 C3
+
+popq %r12
+. 1170 0x12345678 3
+. 41 5C C3
+
+popq %r13
+. 1171 0x12345678 3
+. 41 5D C3
+
+popq %r14
+. 1172 0x12345678 3
+. 41 5E C3
+
+popq %r15
+. 1173 0x12345678 3
+. 41 5F C3
+
+popq %rbp
+. 1174 0x12345678 2
+. 5D C3
+
+popq %rbx
+. 1175 0x12345678 2
+. 5B C3
+
+pushq %r12
+. 1176 0x12345678 3
+. 41 54 C3
+
+pushq %r13
+. 1177 0x12345678 3
+. 41 55 C3
+
+pushq %r14
+. 1178 0x12345678 3
+. 41 56 C3
+
+pushq %r15
+. 1179 0x12345678 3
+. 41 57 C3
+
+pushq %rbp
+. 1180 0x12345678 2
+. 55 C3
+
+pushq %rbx
+. 1181 0x12345678 2
+. 53 C3
+
+rep ; ret
+. 1182 0x12345678 2
+. F3 C3
+
+ret
+. 1183 0x12345678 1
+. C3
+
+roll %eax
+. 1184 0x12345678 3
+. D1 C0 C3
+
+sall $16, %eax
+. 1185 0x12345678 4
+. C1 E0 10 C3
+
+sall 16(%rsp)
+. 1186 0x12345678 5
+. D1 64 24 10 C3
+
+sall $4, %edx
+. 1187 0x12345678 4
+. C1 E2 04 C3
+
+sall $4, %r10d
+. 1188 0x12345678 5
+. 41 C1 E2 04 C3
+
+sall $8, 640(%r11)
+. 1189 0x12345678 9
+. 41 C1 A3 80 02 00 00 08 C3
+
+sall $8, 640(%rdi)
+. 1190 0x12345678 8
+. C1 A7 80 02 00 00 08 C3
+
+sall $8, 640(%rsi)
+. 1191 0x12345678 8
+. C1 A6 80 02 00 00 08 C3
+
+sall $8, %eax
+. 1192 0x12345678 4
+. C1 E0 08 C3
+
+sall $8, %esi
+. 1193 0x12345678 4
+. C1 E6 08 C3
+
+sall $8, %r12d
+. 1194 0x12345678 5
+. 41 C1 E4 08 C3
+
+sall %cl, %eax
+. 1195 0x12345678 3
+. D3 E0 C3
+
+sall %cl, %ebp
+. 1196 0x12345678 3
+. D3 E5 C3
+
+sall %cl, %edi
+. 1197 0x12345678 3
+. D3 E7 C3
+
+sall %cl, %edx
+. 1198 0x12345678 3
+. D3 E2 C3
+
+sall %cl, %esi
+. 1199 0x12345678 3
+. D3 E6 C3
+
+sall %cl, %r10d
+. 1200 0x12345678 4
+. 41 D3 E2 C3
+
+sall %cl, %r8d
+. 1201 0x12345678 4
+. 41 D3 E0 C3
+
+sall %cl, %r9d
+. 1202 0x12345678 4
+. 41 D3 E1 C3
+
+salq $10, %rax
+. 1203 0x12345678 5
+. 48 C1 E0 0A C3
+
+salq $10, %rdx
+. 1204 0x12345678 5
+. 48 C1 E2 0A C3
+
+salq $4, %rax
+. 1205 0x12345678 5
+. 48 C1 E0 04 C3
+
+salq $4, %rdx
+. 1206 0x12345678 5
+. 48 C1 E2 04 C3
+
+salq $4, %rsi
+. 1207 0x12345678 5
+. 48 C1 E6 04 C3
+
+salq $7, %rax
+. 1208 0x12345678 5
+. 48 C1 E0 07 C3
+
+salq $7, %rdi
+. 1209 0x12345678 5
+. 48 C1 E7 07 C3
+
+salq $7, %rdx
+. 1210 0x12345678 5
+. 48 C1 E2 07 C3
+
+sarl $31, %ecx
+. 1211 0x12345678 4
+. C1 F9 1F C3
+
+sarl $3, %ecx
+. 1212 0x12345678 4
+. C1 F9 03 C3
+
+sarl $5, %eax
+. 1213 0x12345678 4
+. C1 F8 05 C3
+
+sarl $5, %edx
+. 1214 0x12345678 4
+. C1 FA 05 C3
+
+sarl $8, %ecx
+. 1215 0x12345678 4
+. C1 F9 08 C3
+
+sarl $8, %edx
+. 1216 0x12345678 4
+. C1 FA 08 C3
+
+sarl %cl, %eax
+. 1217 0x12345678 3
+. D3 F8 C3
+
+sarl %cl, %esi
+. 1218 0x12345678 3
+. D3 FE C3
+
+sarl %eax
+. 1219 0x12345678 3
+. D1 F8 C3
+
+sarl %edi
+. 1220 0x12345678 3
+. D1 FF C3
+
+seta %al
+. 1221 0x12345678 4
+. 0F 97 C0 C3
+
+sete %al
+. 1222 0x12345678 4
+. 0F 94 C0 C3
+
+setg %al
+. 1223 0x12345678 4
+. 0F 9F C0 C3
+
+setg %dl
+. 1224 0x12345678 4
+. 0F 9F C2 C3
+
+setge %dl
+. 1225 0x12345678 4
+. 0F 9D C2 C3
+
+setle %al
+. 1226 0x12345678 4
+. 0F 9E C0 C3
+
+setle %dl
+. 1227 0x12345678 4
+. 0F 9E C2 C3
+
+setne %al
+. 1228 0x12345678 4
+. 0F 95 C0 C3
+
+shrl $16, %ecx
+. 1229 0x12345678 4
+. C1 E9 10 C3
+
+shrl $16, %edi
+. 1230 0x12345678 4
+. C1 EF 10 C3
+
+shrl $16, %esi
+. 1231 0x12345678 4
+. C1 EE 10 C3
+
+shrl $16, %r8d
+. 1232 0x12345678 5
+. 41 C1 E8 10 C3
+
+shrl $24, %r8d
+. 1233 0x12345678 5
+. 41 C1 E8 18 C3
+
+shrl $31, %eax
+. 1234 0x12345678 4
+. C1 E8 1F C3
+
+shrl $31, %edx
+. 1235 0x12345678 4
+. C1 EA 1F C3
+
+shrl %eax
+. 1236 0x12345678 3
+. D1 E8 C3
+
+shrq $32, %rax
+. 1237 0x12345678 5
+. 48 C1 E8 20 C3
+
+shrq $32, %rcx
+. 1238 0x12345678 5
+. 48 C1 E9 20 C3
+
+shrw $8, %dx
+. 1239 0x12345678 5
+. 66 C1 EA 08 C3
+
+subl 16(%rsp), %edx
+. 1240 0x12345678 5
+. 2B 54 24 10 C3
+
+subl 16(%rsp), %esi
+. 1241 0x12345678 5
+. 2B 74 24 10 C3
+
+subl 20(%rsp), %eax
+. 1242 0x12345678 5
+. 2B 44 24 14 C3
+
+subl 20(%rsp), %edx
+. 1243 0x12345678 5
+. 2B 54 24 14 C3
+
+subl 24(%rsp), %r8d
+. 1244 0x12345678 6
+. 44 2B 44 24 18 C3
+
+subl 28(%rsp), %eax
+. 1245 0x12345678 5
+. 2B 44 24 1C C3
+
+subl $4, %ebx
+. 1246 0x12345678 4
+. 83 EB 04 C3
+
+subl $5, %ecx
+. 1247 0x12345678 4
+. 83 E9 05 C3
+
+subl 672(%rbp,%rax,4), %r13d
+. 1248 0x12345678 9
+. 44 2B AC 85 A0 02 00 00 C3
+
+subl $8, 644(%r11)
+. 1249 0x12345678 9
+. 41 83 AB 84 02 00 00 08 C3
+
+subl $8, 644(%rdi)
+. 1250 0x12345678 8
+. 83 AF 84 02 00 00 08 C3
+
+subl $8, 644(%rsi)
+. 1251 0x12345678 8
+. 83 AE 84 02 00 00 08 C3
+
+subl $8, %r11d
+. 1252 0x12345678 5
+. 41 83 EB 08 C3
+
+subl %eax, %ecx
+. 1253 0x12345678 3
+. 29 C1 C3
+
+subl %eax, %edx
+. 1254 0x12345678 3
+. 29 C2 C3
+
+subl %eax, %esi
+. 1255 0x12345678 3
+. 29 C6 C3
+
+subl %ebp, %eax
+. 1256 0x12345678 3
+. 29 E8 C3
+
+subl %ebx, %edx
+. 1257 0x12345678 3
+. 29 DA C3
+
+subl %ecx, %eax
+. 1258 0x12345678 3
+. 29 C8 C3
+
+subl %ecx, %r11d
+. 1259 0x12345678 4
+. 41 29 CB C3
+
+subl %ecx, %r9d
+. 1260 0x12345678 4
+. 41 29 C9 C3
+
+subl %edi, %eax
+. 1261 0x12345678 3
+. 29 F8 C3
+
+subl %edi, %ecx
+. 1262 0x12345678 3
+. 29 F9 C3
+
+subl %edi, %edx
+. 1263 0x12345678 3
+. 29 FA C3
+
+subl %edi, %esi
+. 1264 0x12345678 3
+. 29 FE C3
+
+subl %edi, %r8d
+. 1265 0x12345678 4
+. 41 29 F8 C3
+
+subl %edx, %ebx
+. 1266 0x12345678 3
+. 29 D3 C3
+
+subl %esi, %eax
+. 1267 0x12345678 3
+. 29 F0 C3
+
+subl %esi, %edx
+. 1268 0x12345678 3
+. 29 F2 C3
+
+subl %esi, %r9d
+. 1269 0x12345678 4
+. 41 29 F1 C3
+
+subl %r10d, %eax
+. 1270 0x12345678 4
+. 44 29 D0 C3
+
+subl %r10d, %ecx
+. 1271 0x12345678 4
+. 44 29 D1 C3
+
+subl %r11d, %ecx
+. 1272 0x12345678 4
+. 44 29 D9 C3
+
+subl %r11d, %edi
+. 1273 0x12345678 4
+. 44 29 DF C3
+
+subl %r12d, %eax
+. 1274 0x12345678 4
+. 44 29 E0 C3
+
+subl %r12d, %ecx
+. 1275 0x12345678 4
+. 44 29 E1 C3
+
+subl %r12d, %edx
+. 1276 0x12345678 4
+. 44 29 E2 C3
+
+subl %r13d, 12(%rsp)
+. 1277 0x12345678 6
+. 44 29 6C 24 0C C3
+
+subl %r13d, %eax
+. 1278 0x12345678 4
+. 44 29 E8 C3
+
+subl %r13d, %edx
+. 1279 0x12345678 4
+. 44 29 EA C3
+
+subl %r14d, %edi
+. 1280 0x12345678 4
+. 44 29 F7 C3
+
+subl %r15d, %eax
+. 1281 0x12345678 4
+. 44 29 F8 C3
+
+subl %r15d, %edx
+. 1282 0x12345678 4
+. 44 29 FA C3
+
+subl %r15d, %r9d
+. 1283 0x12345678 4
+. 45 29 F9 C3
+
+subl %r8d, %eax
+. 1284 0x12345678 4
+. 44 29 C0 C3
+
+subl %r8d, %ecx
+. 1285 0x12345678 4
+. 44 29 C1 C3
+
+subl %r8d, %edx
+. 1286 0x12345678 4
+. 44 29 C2 C3
+
+subl %r9d, %ecx
+. 1287 0x12345678 4
+. 44 29 C9 C3
+
+subl (%rbp,%rax), %edi
+. 1288 0x12345678 5
+. 2B 7C 05 00 C3
+
+subl (%rbp,%rdx), %eax
+. 1289 0x12345678 5
+. 2B 44 15 00 C3
+
+subq $104, %rsp
+. 1290 0x12345678 5
+. 48 83 EC 68 C3
+
+subq $1304, %rsp
+. 1291 0x12345678 8
+. 48 81 EC 18 05 00 00 C3
+
+subq $2104, %rsp
+. 1292 0x12345678 8
+. 48 81 EC 38 08 00 00 C3
+
+subq $264, %rsp
+. 1293 0x12345678 8
+. 48 81 EC 08 01 00 00 C3
+
+subq $3400, %rsp
+. 1294 0x12345678 8
+. 48 81 EC 48 0D 00 00 C3
+
+subq $56, %rsp
+. 1295 0x12345678 5
+. 48 83 EC 38 C3
+
+subq $72, %rsp
+. 1296 0x12345678 5
+. 48 83 EC 48 C3
+
+subq $824, %rsp
+. 1297 0x12345678 8
+. 48 81 EC 38 03 00 00 C3
+
+subq $8, %rsp
+. 1298 0x12345678 5
+. 48 83 EC 08 C3
+
+testb $1, %al
+. 1299 0x12345678 3
+. A8 01 C3
+
+testb $1, %dil
+. 1300 0x12345678 5
+. 40 F6 C7 01 C3
+
+testb $1, %dl
+. 1301 0x12345678 4
+. F6 C2 01 C3
+
+testb %al, %al
+. 1302 0x12345678 3
+. 84 C0 C3
+
+testb %bpl, %bpl
+. 1303 0x12345678 4
+. 40 84 ED C3
+
+testl $2097152, %eax
+. 1304 0x12345678 6
+. A9 00 00 20 00 C3
+
+testl %ebp, %ebp
+. 1305 0x12345678 3
+. 85 ED C3
+
+testl %ebp, (%rbx,%rax,4)
+. 1306 0x12345678 4
+. 85 2C 83 C3
+
+testl %ebx, %ebx
+. 1307 0x12345678 3
+. 85 DB C3
+
+testl %ecx, %ecx
+. 1308 0x12345678 3
+. 85 C9 C3
+
+testl %edi, %edi
+. 1309 0x12345678 3
+. 85 FF C3
+
+testl %edi, (%rbx,%rax,4)
+. 1310 0x12345678 4
+. 85 3C 83 C3
+
+testl %edx, (%rbx,%rax,4)
+. 1311 0x12345678 4
+. 85 14 83 C3
+
+testl %esi, %esi
+. 1312 0x12345678 3
+. 85 F6 C3
+
+testl %esi, (%rbx,%rax,4)
+. 1313 0x12345678 4
+. 85 34 83 C3
+
+testl %r11d, %r11d
+. 1314 0x12345678 4
+. 45 85 DB C3
+
+testl %r13d, %r13d
+. 1315 0x12345678 4
+. 45 85 ED C3
+
+testl %r14d, %r14d
+. 1316 0x12345678 4
+. 45 85 F6 C3
+
+testl %r8d, %r8d
+. 1317 0x12345678 4
+. 45 85 C0 C3
+
+testl %r8d, (%rbx,%rax,4)
+. 1318 0x12345678 5
+. 44 85 04 83 C3
+
+xorl %edx, %eax
+. 1319 0x12345678 3
+. 31 D0 C3
+
+nop
+. 1320 0x12345678 2
+. 90 C3
+
diff --git a/VEX/orig_amd64/test1.sorted b/VEX/orig_amd64/test1.sorted
new file mode 100644
index 0000000..993949b
--- /dev/null
+++ b/VEX/orig_amd64/test1.sorted
@@ -0,0 +1,1318 @@
+0384941C040000            		addl	1052(%rsp,%rdx,4), %eax
+034B6C                     		addl	108(%rbx), %ecx
+83C002                     		addl	$2, %eax
+83C202                     		addl	$2, %edx
+83C120                     		addl	$32, %ecx
+83C620                     		addl	$32, %esi
+4183C431                   		addl	$49, %r12d
+83C104                     		addl	$4, %ecx
+034495FC                   		addl	-4(%rbp,%rdx,4), %eax
+0388B8C90000              		addl	51640(%rax), %ecx
+038C05B8C90000            		addl	51640(%rbp,%rax), %ecx
+038AB8C90000              		addl	51640(%rdx), %ecx
+03B0BCC90000              		addl	51644(%rax), %esi
+03B2BCC90000              		addl	51644(%rdx), %esi
+03B8C0C90000              		addl	51648(%rax), %edi
+03BAC0C90000              		addl	51648(%rdx), %edi
+83C005                     		addl	$5, %eax
+4183C605                   		addl	$5, %r14d
+4403AC85A0020000          		addl	672(%rbp,%rax,4), %r13d
+01D8                       		addl	%ebx, %eax
+4101C9                     		addl	%ecx, %r9d
+01D0                       		addl	%edx, %eax
+4401E0                     		addl	%r12d, %eax
+4501E5                     		addl	%r12d, %r13d
+4401E8                     		addl	%r13d, %eax
+037C8500                   		addl	(%rbp,%rax,4), %edi
+4883C468                   		addq	$104, %rsp
+4881C418050000            		addq	$1304, %rsp
+4881C438080000            		addq	$2104, %rsp
+4881C408010000            		addq	$264, %rsp
+48034320                   		addq	32(%rbx), %rax
+4881C4480D0000            		addq	$3400, %rsp
+4883C438                   		addq	$56, %rsp
+4883C448                   		addq	$72, %rsp
+4881C438030000            		addq	$824, %rsp
+4883C408                   		addq	$8, %rsp
+4801C0                     		addq	%rax, %rax
+4801C7                     		addq	%rax, %rdi
+4801C2                     		addq	%rax, %rdx
+4801E8                     		addq	%rbp, %rax
+4801EA                     		addq	%rbp, %rdx
+4801EE                     		addq	%rbp, %rsi
+4801C8                     		addq	%rcx, %rax
+4801D0                     		addq	%rdx, %rax
+4801D2                     		addq	%rdx, %rdx
+4801F2                     		addq	%rsi, %rdx
+6601544450                		addw	%dx, 80(%rsp,%rax,2)
+25FFFFDFFF                		andl	$-2097153, %eax
+81E2FFFFDFFF              		andl	$-2097153, %edx
+4181E4FFFFDFFF            		andl	$-2097153, %r12d
+4181E7FFFFDFFF            		andl	$-2097153, %r15d
+4181E0FFFFDFFF            		andl	$-2097153, %r8d
+4181E0FF000000            		andl	$255, %r8d
+83E0FE                     		andl	$-2, %eax
+83E11F                     		andl	$31, %ecx
+25FF7F0000                		andl	$32767, %eax
+21D0                       		andl	%edx, %eax
+211483                     		andl	%edx, (%rbx,%rax,4)
+21F2                       		andl	%esi, %edx
+E862160000                		call	lalala
+99                         		cltd
+4898                       		cltq
+0F47D1                     		cmova	%ecx, %edx
+0F43F8                     		cmovae	%eax, %edi
+0F43F0                     		cmovae	%eax, %esi
+0F4FC8                     		cmovg	%eax, %ecx
+0F4FF8                     		cmovg	%eax, %edi
+0F4FF0                     		cmovg	%eax, %esi
+440F4FE0                   		cmovg	%eax, %r12d
+0F4DC8                     		cmovge	%eax, %ecx
+410F4FC5                   		cmovg	%r13d, %eax
+440F4CE8                   		cmovl	%eax, %r13d
+0F4EC8                     		cmovle	%eax, %ecx
+0F4EF0                     		cmovle	%eax, %esi
+0F45D0                     		cmovne	%eax, %edx
+0F45D7                     		cmovne	%edi, %edx
+80BC058000000000          		cmpb	$0, 128(%rbp,%rax)
+80BC178000000000          		cmpb	$0, 128(%rdi,%rdx)
+80BC05FA4C000000          		cmpb	$0, 19706(%rbp,%rax)
+80BC044008000000          		cmpb	$0, 2112(%rsp,%rax)
+80BC344008000000          		cmpb	$0, 2112(%rsp,%rsi)
+807C042000                		cmpb	$0, 32(%rsp,%rax)
+4138C2                     		cmpb	%al, %r10b
+4038CE                     		cmpb	%cl, %sil
+38D1                       		cmpb	%dl, %cl
+4038F1                     		cmpb	%sil, %cl
+4038F2                     		cmpb	%sil, %dl
+40383424                   		cmpb	%sil, (%rsp)
+83BC944004000000          		cmpl	$0, 1088(%rsp,%rdx,4)
+837B6C00                   		cmpl	$0, 108(%rbx)
+837F6C00                   		cmpl	$0, 108(%rdi)
+837C240C00                		cmpl	$0, 12(%rsp)
+837C241400                		cmpl	$0, 20(%rsp)
+837C241800                		cmpl	$0, 24(%rsp)
+837C842000                		cmpl	$0, 32(%rsp,%rax,4)
+83BF8402000000            		cmpl	$0, 644(%rdi)
+83BD9C02000000            		cmpl	$0, 668(%rbp)
+41833C8600                		cmpl	$0, (%r14,%rax,4)
+833800                     		cmpl	$0, (%rax)
+833C8300                   		cmpl	$0, (%rbx,%rax,4)
+833F00                     		cmpl	$0, (%rdi)
+833E00                     		cmpl	$0, (%rsi)
+83F965                     		cmpl	$101, %ecx
+443BA4BC40040000          		cmpl	1088(%rsp,%rdi,4), %r12d
+443BA49440040000          		cmpl	1088(%rsp,%rdx,4), %r12d
+3B536C                     		cmpl	108(%rbx), %edx
+443B4B6C                   		cmpl	108(%rbx), %r9d
+81BD9C020000AF040000     		cmpl	$1199, 668(%rbp)
+443B437C                   		cmpl	124(%rbx), %r8d
+3B54240C                   		cmpl	12(%rsp), %edx
+837B30FF                   		cmpl	$-1, 48(%rbx)
+4183FD0E                   		cmpl	$14, %r13d
+83FF0F                     		cmpl	$15, %edi
+4183F80F                   		cmpl	$15, %r8d
+83BB9002000001            		cmpl	$1, 656(%rbx)
+83BB9402000001            		cmpl	$1, 660(%rbx)
+83BF9402000001            		cmpl	$1, 660(%rdi)
+3B542410                   		cmpl	16(%rsp), %edx
+4183FC11                   		cmpl	$17, %r12d
+817C241852460000          		cmpl	$18002, 24(%rsp)
+81BD9C020000C7000000     		cmpl	$199, 668(%rbp)
+83F813                     		cmpl	$19, %eax
+83FB01                     		cmpl	$1, %ebx
+83F9FF                     		cmpl	$-1, %ecx
+83FF01                     		cmpl	$1, %edi
+83FA01                     		cmpl	$1, %edx
+83FEFF                     		cmpl	$-1, %esi
+83FE01                     		cmpl	$1, %esi
+4183FDFF                   		cmpl	$-1, %r13d
+4183FD01                   		cmpl	$1, %r13d
+4183F9FF                   		cmpl	$-1, %r9d
+833C83FF                   		cmpl	$-1, (%rbx,%rax,4)
+3B7C2414                   		cmpl	20(%rsp), %edi
+3B542414                   		cmpl	20(%rsp), %edx
+81BD9C0200005F090000     		cmpl	$2399, 668(%rbp)
+3B5C2418                   		cmpl	24(%rsp), %ebx
+3B7C2418                   		cmpl	24(%rsp), %edi
+81FBFE000000              		cmpl	$254, %ebx
+81FBFF000000              		cmpl	$255, %ebx
+81F9FF000000              		cmpl	$255, %ecx
+81FFFF000000              		cmpl	$255, %edi
+81FAFF000000              		cmpl	$255, %edx
+4181FCFF000000            		cmpl	$255, %r12d
+81FF00010000              		cmpl	$256, %edi
+81FE00010000              		cmpl	$256, %esi
+83BD9002000002            		cmpl	$2, 656(%rbp)
+83BF9002000002            		cmpl	$2, 656(%rdi)
+3B7C241C                   		cmpl	28(%rsp), %edi
+443B64241C                		cmpl	28(%rsp), %r12d
+443B6C241C                		cmpl	28(%rsp), %r13d
+83FB02                     		cmpl	$2, %ebx
+4183FD02                   		cmpl	$2, %r13d
+83FF1F                     		cmpl	$31, %edi
+837C241403                		cmpl	$3, 20(%rsp)
+837C242003                		cmpl	$3, 32(%rsp)
+83FB21                     		cmpl	$33, %ebx
+4183F921                   		cmpl	$33, %r9d
+83F803                     		cmpl	$3, %eax
+4183FD03                   		cmpl	$3, %r13d
+4183F803                   		cmpl	$3, %r8d
+4183F903                   		cmpl	$3, %r9d
+83F831                     		cmpl	$49, %eax
+81BD9C02000057020000     		cmpl	$599, 668(%rbp)
+83FB05                     		cmpl	$5, %ebx
+443B648440                		cmpl	64(%rsp,%rax,4), %r12d
+443B64BC40                		cmpl	64(%rsp,%rdi,4), %r12d
+3B449440                   		cmpl	64(%rsp,%rdx,4), %eax
+3DFEFF0000                		cmpl	$65534, %eax
+81FAFEFF0000              		cmpl	$65534, %edx
+3DFFFF0000                		cmpl	$65535, %eax
+81FB00000100              		cmpl	$65536, %ebx
+3B959C020000              		cmpl	668(%rbp), %edx
+443BA59C020000            		cmpl	668(%rbp), %r12d
+4183FE06                   		cmpl	$6, %r14d
+4183BB8402000007          		cmpl	$7, 644(%r11)
+83BD8402000007            		cmpl	$7, 644(%rbp)
+83BB8402000007            		cmpl	$7, 644(%rbx)
+83BF8402000007            		cmpl	$7, 644(%rdi)
+83BE8402000007            		cmpl	$7, 644(%rsi)
+4183FE07                   		cmpl	$7, %r14d
+837C240C63                		cmpl	$99, 12(%rsp)
+837C241863                		cmpl	$99, 24(%rsp)
+81FD0F270000              		cmpl	$9999, %ebp
+83F809                     		cmpl	$9, %eax
+39449440                   		cmpl	%eax, 64(%rsp,%rdx,4)
+39C1                       		cmpl	%eax, %ecx
+39C7                       		cmpl	%eax, %edi
+39C2                       		cmpl	%eax, %edx
+39C6                       		cmpl	%eax, %esi
+4139C5                     		cmpl	%eax, %r13d
+4139C0                     		cmpl	%eax, %r8d
+39EF                       		cmpl	%ebp, %edi
+4139EC                     		cmpl	%ebp, %r12d
+39C8                       		cmpl	%ecx, %eax
+4139CD                     		cmpl	%ecx, %r13d
+39F8                       		cmpl	%edi, %eax
+4139FA                     		cmpl	%edi, %r10d
+39D1                       		cmpl	%edx, %ecx
+39D7                       		cmpl	%edx, %edi
+391578563412              		cmpl	%edx, 0x12345678(%rip)
+4139D4                     		cmpl	%edx, %r12d
+4139D1                     		cmpl	%edx, %r9d
+391481                     		cmpl	%edx, (%rcx,%rax,4)
+4539D1                     		cmpl	%r10d, %r9d
+453B0482                   		cmpl	(%r10,%rax,4), %r8d
+4439D9                     		cmpl	%r11d, %ecx
+4439E0                     		cmpl	%r12d, %eax
+4439E5                     		cmpl	%r12d, %ebp
+4439E7                     		cmpl	%r12d, %edi
+44396C2410                		cmpl	%r13d, 16(%rsp)
+4439E8                     		cmpl	%r13d, %eax
+4439ED                     		cmpl	%r13d, %ebp
+4439EF                     		cmpl	%r13d, %edi
+4439EA                     		cmpl	%r13d, %edx
+4539EC                     		cmpl	%r13d, %r12d
+4439F0                     		cmpl	%r14d, %eax
+4439F3                     		cmpl	%r14d, %ebx
+4439F7                     		cmpl	%r14d, %edi
+4539F4                     		cmpl	%r14d, %r12d
+4539F5                     		cmpl	%r14d, %r13d
+4539FE                     		cmpl	%r15d, %r14d
+4439C7                     		cmpl	%r8d, %edi
+4439C6                     		cmpl	%r8d, %esi
+4439CA                     		cmpl	%r9d, %edx
+664139C2                   		cmpw	%ax, %r10w
+FF4C240C                   		decl	12(%rsp)
+FF4C2418                   		decl	24(%rsp)
+FF4C8420                   		decl	32(%rsp,%rax,4)
+FFC8                       		decl	%eax
+FFCD                       		decl	%ebp
+FFCB                       		decl	%ebx
+FFC9                       		decl	%ecx
+FFCF                       		decl	%edi
+FFCE                       		decl	%esi
+41FFCA                     		decl	%r10d
+41FFCB                     		decl	%r11d
+41FFCC                     		decl	%r12d
+41FFC8                     		decl	%r8d
+41FF09                     		decl	(%r9)
+F7FB                       		idivl	%ebx
+69442408C51D0000          		imull	$7621, 8(%rsp), %eax
+440FAFE0                   		imull	%eax, %r12d
+4869C056555555            		imulq	$1431655766, %rax, %rax
+480FAFC2                   		imulq	%rdx, %rax
+FF848420040000            		incl	1056(%rsp,%rax,4)
+41FF4374                   		incl	116(%r11)
+FF4774                     		incl	116(%rdi)
+FF4674                     		incl	116(%rsi)
+FF477C                     		incl	124(%rdi)
+FF44240C                   		incl	12(%rsp)
+FF442418                   		incl	24(%rsp)
+FF848588B10000            		incl	45448(%rbp,%rax,4)
+FF849588B10000            		incl	45448(%rbp,%rdx,4)
+FF44B430                   		incl	48(%rsp,%rsi,4)
+FF83A0020000              		incl	672(%rbx)
+FF8483A0020000            		incl	672(%rbx,%rax,4)
+FF83A4020000              		incl	676(%rbx)
+FF8483A4020000            		incl	676(%rbx,%rax,4)
+FFC0                       		incl	%eax
+FFC5                       		incl	%ebp
+FFC3                       		incl	%ebx
+FFC1                       		incl	%ecx
+FFC7                       		incl	%edi
+FFC2                       		incl	%edx
+FFC6                       		incl	%esi
+41FFC2                     		incl	%r10d
+41FFC3                     		incl	%r11d
+41FFC4                     		incl	%r12d
+41FFC5                     		incl	%r13d
+41FFC6                     		incl	%r14d
+41FFC0                     		incl	%r8d
+41FFC1                     		incl	%r9d
+FF448500                   		incl	(%rbp,%rax,4)
+48FFC2                     		incq	%rdx
+0F87DD120000              		ja	lalala
+0F86D7120000              		jbe	lalala
+0F84D1120000              		je	lalala
+0F8DCB120000              		jge	lalala
+0F8FC5120000              		jg	lalala
+0F8EBF120000              		jle	lalala
+0F8CB9120000              		jl	lalala
+E9B4120000                		jmp	lalala
+0F85AE120000              		jne	lalala
+0F89A8120000              		jns	lalala
+0F88A2120000              		js	lalala
+418D7BFF                   		leal	-1(%r11), %edi
+418D4404FF                		leal	-1(%r12,%rax), %eax
+418D4501                   		leal	1(%r13), %eax
+418D5501                   		leal	1(%r13), %edx
+418D5001                   		leal	1(%r8), %edx
+8D7001                     		leal	1(%rax), %esi
+448D70FF                   		leal	-1(%rax), %r14d
+468D440001                		leal	1(%rax,%r8), %r8d
+8D4C0801                   		leal	1(%rax,%rcx), %ecx
+8D7501                     		leal	1(%rbp), %esi
+8D43FF                     		leal	-1(%rbx), %eax
+8D41FF                     		leal	-1(%rcx), %eax
+8D4101                     		leal	1(%rcx), %eax
+8D4701                     		leal	1(%rdi), %eax
+8D4F01                     		leal	1(%rdi), %ecx
+8D42FF                     		leal	-1(%rdx), %eax
+8D4201                     		leal	1(%rdx), %eax
+8D6EFF                     		leal	-1(%rsi), %ebp
+448D66FF                   		leal	-1(%rsi), %r12d
+448D4EFF                   		leal	-1(%rsi), %r9d
+8D43FE                     		leal	-2(%rbx), %eax
+8D47FE                     		leal	-2(%rdi), %eax
+418D451F                   		leal	31(%r13), %eax
+8D5522                     		leal	34(%rbp), %edx
+8D4523                     		leal	35(%rbp), %eax
+8D43FD                     		leal	-3(%rbx), %eax
+8D7030                     		leal	48(%rax), %esi
+418D7EFC                   		leal	-4(%r14), %edi
+8D4F04                     		leal	4(%rdi), %ecx
+8D4107                     		leal	7(%rcx), %eax
+458D5808                   		leal	8(%r8), %r11d
+438D0C2A                   		leal	(%r10,%r13), %ecx
+438D0404                   		leal	(%r12,%r8), %eax
+418D547D00                		leal	(%r13,%rdi,2), %edx
+418D443500                		leal	(%r13,%rsi), %eax
+438D042E                   		leal	(%r14,%r13), %eax
+438D0410                   		leal	(%r8,%r10), %eax
+418D0410                   		leal	(%r8,%rdx), %eax
+438D3C39                   		leal	(%r9,%r15), %edi
+428D0410                   		leal	(%rax,%r10), %eax
+468D3C30                   		leal	(%rax,%r14), %r15d
+428D0400                   		leal	(%rax,%r8), %eax
+428D0408                   		leal	(%rax,%r9), %eax
+8D0440                     		leal	(%rax,%rax,2), %eax
+8D0438                     		leal	(%rax,%rdi), %eax
+8D0410                     		leal	(%rax,%rdx), %eax
+428D0409                   		leal	(%rcx,%r9), %eax
+428D042F                   		leal	(%rdi,%r13), %eax
+428D042A                   		leal	(%rdx,%r13), %eax
+428D1402                   		leal	(%rdx,%r8), %edx
+428D140A                   		leal	(%rdx,%r9), %edx
+8D040A                     		leal	(%rdx,%rcx), %eax
+8D143A                     		leal	(%rdx,%rdi), %edx
+428D042E                   		leal	(%rsi,%r13), %eax
+428D043E                   		leal	(%rsi,%r15), %eax
+8D040E                     		leal	(%rsi,%rcx), %eax
+8D0476                     		leal	(%rsi,%rsi,2), %eax
+498D5301                   		leaq	1(%r11), %rdx
+488D442414                		leaq	20(%rsp), %rax
+4C8D940140930000          		leaq	37696(%rcx,%rax), %r10
+4C8D8C0140930000          		leaq	37696(%rcx,%rax), %r9
+488DBC454C930000          		leaq	37708(%rbp,%rax,2), %rdi
+488DBC7D4C930000          		leaq	37708(%rbp,%rdi,2), %rdi
+488DB47D4C930000          		leaq	37708(%rbp,%rdi,2), %rsi
+4C8D84C558990000          		leaq	39256(%rbp,%rax,8), %r8
+488DBCFD58990000          		leaq	39256(%rbp,%rdi,8), %rdi
+488DB4FD88B10000          		leaq	45448(%rbp,%rdi,8), %rsi
+488D1400                   		leaq	(%rax,%rax), %rdx
+488D0410                   		leaq	(%rax,%rdx), %rax
+4C8D6C4500                		leaq	(%rbp,%rax,2), %r13
+488D444500                		leaq	(%rbp,%rax,2), %rax
+488D4C4500                		leaq	(%rbp,%rax,2), %rcx
+488D544500                		leaq	(%rbp,%rax,2), %rdx
+488D540500                		leaq	(%rbp,%rax), %rdx
+488D545500                		leaq	(%rbp,%rdx,2), %rdx
+488D1432                   		leaq	(%rdx,%rsi), %rdx
+C684044008000000          		movb	$0, 2112(%rsp,%rax)
+C644042000                		movb	$0, 32(%rsp,%rax)
+C684084A92000000          		movb	$0, 37450(%rax,%rcx)
+C684044008000001          		movb	$1, 2112(%rsp,%rax)
+C6440C2001                		movb	$1, 32(%rsp,%rcx)
+C684084A9200000F          		movb	$15, 37450(%rax,%rcx)
+C684104C9300000F          		movb	$15, 37708(%rax,%rdx)
+88442401                   		movb	%al, 1(%rsp)
+88841780010000            		movb	%al, 384(%rdi,%rdx)
+880411                     		movb	%al, (%rcx,%rdx)
+8802                       		movb	%al, (%rdx)
+884C2420                   		movb	%cl, 32(%rsp)
+880C24                     		movb	%cl, (%rsp)
+40887C0420                		movb	%dil, 32(%rsp,%rax)
+889405A8060000            		movb	%dl, 1704(%rbp,%rax)
+88540420                   		movb	%dl, 32(%rsp,%rax)
+881430                     		movb	%dl, (%rax,%rsi)
+44888405FA4C0000          		movb	%r8b, 19706(%rbp,%rax)
+44880C04                   		movb	%r9b, (%rsp,%rax)
+89C0                       		mov	%eax, %eax
+89F8                       		mov	%edi, %eax
+89F0                       		mov	%esi, %eax
+C784842004000000000000   		movl	$0, 1056(%rsp,%rax,4)
+C7477400000000            		movl	$0, 116(%rdi)
+C7477C00000000            		movl	$0, 124(%rdi)
+C744240C00000000          		movl	$0, 12(%rsp)
+C744241800000000          		movl	$0, 24(%rsp)
+C744241C00000000          		movl	$0, 28(%rsp)
+C7848588B1000000000000   		movl	$0, 45448(%rbp,%rax,4)
+C744843000000000          		movl	$0, 48(%rsp,%rax,4)
+C7878002000000000000     		movl	$0, 640(%rdi)
+C7878402000000000000     		movl	$0, 644(%rdi)
+C78483A002000000000000   		movl	$0, 672(%rbx,%rax,4)
+C744240800000000          		movl	$0, 8(%rsp)
+B800000000                		movl	$0, %eax
+BB00000000                		movl	$0, %ebx
+B900000000                		movl	$0, %ecx
+BF00000000                		movl	$0, %edi
+BA00000000                		movl	$0, %edx
+41BC00000000              		movl	$0, %r12d
+41BD00000000              		movl	$0, %r13d
+41B800000000              		movl	$0, %r8d
+41B900000000              		movl	$0, %r9d
+C744850000000000          		movl	$0, (%rbp,%rax,4)
+C7048300000000            		movl	$0, (%rbx,%rax,4)
+BFE9030000                		movl	$1001, %edi
+BFEA030000                		movl	$1002, %edi
+BFEB030000                		movl	$1003, %edi
+BFEC030000                		movl	$1004, %edi
+BFED030000                		movl	$1005, %edi
+BFEE030000                		movl	$1006, %edi
+BFEF030000                		movl	$1007, %edi
+B864000000                		movl	$100, %eax
+BE68000000                		movl	$104, %esi
+8B8C8420040000            		movl	1056(%rsp,%rax,4), %ecx
+8BB48420040000            		movl	1056(%rsp,%rax,4), %esi
+8B849420040000            		movl	1056(%rsp,%rdx,4), %eax
+8B849440040000            		movl	1088(%rsp,%rdx,4), %eax
+8B84B440040000            		movl	1088(%rsp,%rsi,4), %eax
+448B4B6C                   		movl	108(%rbx), %r9d
+8B6F6C                     		movl	108(%rdi), %ebp
+8B576C                     		movl	108(%rdi), %edx
+BF0A000000                		movl	$10, %edi
+C744240C01000000          		movl	$1, 12(%rsp)
+8B442470                   		movl	112(%rsp), %eax
+8B7C2470                   		movl	112(%rsp), %edi
+BE72000000                		movl	$114, %esi
+8B4574                     		movl	116(%rbp), %eax
+8B5574                     		movl	116(%rbp), %edx
+C744241001000000          		movl	$1, 16(%rsp)
+8B457C                     		movl	124(%rbp), %eax
+8B537C                     		movl	124(%rbx), %edx
+448B537C                   		movl	124(%rbx), %r10d
+448B477C                   		movl	124(%rdi), %r8d
+C744241801000000          		movl	$1, 24(%rsp)
+8B44240C                   		movl	12(%rsp), %eax
+8B54240C                   		movl	12(%rsp), %edx
+448B44240C                		movl	12(%rsp), %r8d
+8B842450050000            		movl	1360(%rsp), %eax
+BE90000000                		movl	$144, %esi
+C74330FFFFFFFF            		movl	$-1, 48(%rbx)
+41B80F000000              		movl	$15, %r8d
+8B542410                   		movl	16(%rsp), %edx
+448B748410                		movl	16(%rsp,%rax,4), %r14d
+B911000000                		movl	$17, %ecx
+B801000000                		movl	$1, %eax
+BDFFFFFFFF                		movl	$-1, %ebp
+BB01000000                		movl	$1, %ebx
+B9FFFFFFFF                		movl	$-1, %ecx
+BF01000000                		movl	$1, %edi
+BAFFFFFFFF                		movl	$-1, %edx
+BA01000000                		movl	$1, %edx
+BE01000000                		movl	$1, %esi
+41BE01000000              		movl	$1, %r14d
+41B801000000              		movl	$1, %r8d
+41B901000000              		movl	$1, %r9d
+BEFFFFDFFF                		movl	$-2097153, %esi
+8B442414                   		movl	20(%rsp), %eax
+8B4C2414                   		movl	20(%rsp), %ecx
+8B542414                   		movl	20(%rsp), %edx
+8B742414                   		movl	20(%rsp), %esi
+448B5C2414                		movl	20(%rsp), %r11d
+448B642414                		movl	20(%rsp), %r12d
+448B442414                		movl	20(%rsp), %r8d
+448B4C2414                		movl	20(%rsp), %r9d
+8B848440090000            		movl	2368(%rsp,%rax,4), %eax
+8B948440090000            		movl	2368(%rsp,%rax,4), %edx
+448BAC8440090000          		movl	2368(%rsp,%rax,4), %r13d
+448B848440090000          		movl	2368(%rsp,%rax,4), %r8d
+BE17000000                		movl	$23, %esi
+41B818000000              		movl	$24, %r8d
+8B4C2418                   		movl	24(%rsp), %ecx
+8B7C2418                   		movl	24(%rsp), %edi
+448B442418                		movl	24(%rsp), %r8d
+BA1B000000                		movl	$27, %edx
+BAABAAAAAA                		movl	$2863311531, %edx
+8B4C241C                   		movl	28(%rsp), %ecx
+8B7C241C                   		movl	28(%rsp), %edi
+8B54241C                   		movl	28(%rsp), %edx
+448B64241C                		movl	28(%rsp), %r12d
+41BA02000000              		movl	$2, %r10d
+41BE02000000              		movl	$2, %r14d
+41B902000000              		movl	$2, %r9d
+C7042402000000            		movl	$2, (%rsp)
+BFB90B0000                		movl	$3001, %edi
+BFBA0B0000                		movl	$3002, %edi
+BFBB0B0000                		movl	$3003, %edi
+BFBC0B0000                		movl	$3004, %edi
+BFBD0B0000                		movl	$3005, %edi
+BFBE0B0000                		movl	$3006, %edi
+BFBF0B0000                		movl	$3007, %edi
+41BD20000000              		movl	$32, %r13d
+8B442420                   		movl	32(%rsp), %eax
+448B6C2420                		movl	32(%rsp), %r13d
+BA21000000                		movl	$33, %edx
+8B442424                   		movl	36(%rsp), %eax
+8B5C2424                   		movl	36(%rsp), %ebx
+8B4C2424                   		movl	36(%rsp), %ecx
+8B542424                   		movl	36(%rsp), %edx
+8B742424                   		movl	36(%rsp), %esi
+448B442424                		movl	36(%rsp), %r8d
+BE26000000                		movl	$38, %esi
+448B8C8558990000          		movl	39256(%rbp,%rax,4), %r9d
+41BC03000000              		movl	$3, %r12d
+41BE03000000              		movl	$3, %r14d
+41B803000000              		movl	$3, %r8d
+448BAC84A0010000          		movl	416(%rsp,%rax,4), %r13d
+8B7B30                     		movl	48(%rbx), %edi
+8B442430                   		movl	48(%rsp), %eax
+8B542430                   		movl	48(%rsp), %edx
+8B548430                   		movl	48(%rsp,%rax,4), %edx
+8B9484F0010000            		movl	496(%rsp,%rax,4), %edx
+BE31000000                		movl	$49, %esi
+41BE04000000              		movl	$4, %r14d
+8B448504                   		movl	4(%rbp,%rax,4), %eax
+8B449504                   		movl	4(%rbp,%rdx,4), %eax
+8B88B8C90000              		movl	51640(%rax), %ecx
+8BB0BCC90000              		movl	51644(%rax), %esi
+8BB8C0C90000              		movl	51648(%rax), %edi
+8B442434                   		movl	52(%rsp), %eax
+8B542434                   		movl	52(%rsp), %edx
+BA35000000                		movl	$53, %edx
+BE38000000                		movl	$56, %esi
+8B442438                   		movl	56(%rsp), %eax
+418B8384020000            		movl	644(%r11), %eax
+8B8784020000              		movl	644(%rdi), %eax
+8B8684020000              		movl	644(%rsi), %eax
+8B9684020000              		movl	644(%rsi), %edx
+8B8B88020000              		movl	648(%rbx), %ecx
+8BB388020000              		movl	648(%rbx), %esi
+8B9788020000              		movl	648(%rdi), %edx
+8B442440                   		movl	64(%rsp), %eax
+448B442440                		movl	64(%rsp), %r8d
+8B44B440                   		movl	64(%rsp,%rsi,4), %eax
+8B938C020000              		movl	652(%rbx), %edx
+8BB38C020000              		movl	652(%rbx), %esi
+448B838C020000            		movl	652(%rbx), %r8d
+8B878C020000              		movl	652(%rdi), %eax
+BB00000100                		movl	$65536, %ebx
+448BAF90020000            		movl	656(%rdi), %r13d
+BE41000000                		movl	$65, %esi
+8B9394020000              		movl	660(%rbx), %edx
+8B959C020000              		movl	668(%rbp), %edx
+448BA59C020000            		movl	668(%rbp), %r12d
+8B8F9C020000              		movl	668(%rdi), %ecx
+BE42000000                		movl	$66, %esi
+8B442444                   		movl	68(%rsp), %eax
+8B742444                   		movl	68(%rsp), %esi
+BE45000000                		movl	$69, %esi
+8B442448                   		movl	72(%rsp), %eax
+8B742448                   		movl	72(%rsp), %esi
+BE50000000                		movl	$80, %esi
+8B442450                   		movl	80(%rsp), %eax
+8B7C2450                   		movl	80(%rsp), %edi
+BE53000000                		movl	$83, %esi
+8B442454                   		movl	84(%rsp), %eax
+8B4C2454                   		movl	84(%rsp), %ecx
+8B7C2454                   		movl	84(%rsp), %edi
+8B4F58                     		movl	88(%rdi), %ecx
+8B442458                   		movl	88(%rsp), %eax
+8B4C2458                   		movl	88(%rsp), %ecx
+448BBC8480030000          		movl	896(%rsp,%rax,4), %r15d
+BE59000000                		movl	$89, %esi
+41B808000000              		movl	$8, %r8d
+41B908000000              		movl	$8, %r9d
+8B542408                   		movl	8(%rsp), %edx
+BE5A000000                		movl	$90, %esi
+448B6C8460                		movl	96(%rsp,%rax,4), %r13d
+B9FFC99A3B                		movl	$999999999, %ecx
+89849420040000            		movl	%eax, 1056(%rsp,%rdx,4)
+8984B440040000            		movl	%eax, 1088(%rsp,%rsi,4)
+8944240C                   		movl	%eax, 12(%rsp)
+894496F0                   		movl	%eax, -16(%rsi,%rdx,4)
+89442410                   		movl	%eax, 16(%rsp)
+89442414                   		movl	%eax, 20(%rsp)
+89848C40090000            		movl	%eax, 2368(%rsp,%rcx,4)
+8944241C                   		movl	%eax, 28(%rsp)
+89449420                   		movl	%eax, 32(%rsp,%rdx,4)
+89442430                   		movl	%eax, 48(%rsp)
+898494F0010000            		movl	%eax, 496(%rsp,%rdx,4)
+894496FC                   		movl	%eax, -4(%rsi,%rdx,4)
+8986B8C90000              		movl	%eax, 51640(%rsi)
+8986BCC90000              		movl	%eax, 51644(%rsi)
+8986C0C90000              		movl	%eax, 51648(%rsi)
+89442434                   		movl	%eax, 52(%rsp)
+89442438                   		movl	%eax, 56(%rsp)
+41898384020000            		movl	%eax, 644(%r11)
+898784020000              		movl	%eax, 644(%rdi)
+898684020000              		movl	%eax, 644(%rsi)
+89448C40                   		movl	%eax, 64(%rsp,%rcx,4)
+8944B440                   		movl	%eax, 64(%rsp,%rsi,4)
+89878C020000              		movl	%eax, 652(%rdi)
+89442448                   		movl	%eax, 72(%rsp)
+89442458                   		movl	%eax, 88(%rsp)
+89849480030000            		movl	%eax, 896(%rsp,%rdx,4)
+89442408                   		movl	%eax, 8(%rsp)
+89442460                   		movl	%eax, 96(%rsp)
+89449460                   		movl	%eax, 96(%rsp,%rdx,4)
+89C1                       		movl	%eax, %ecx
+89C7                       		movl	%eax, %edi
+89C2                       		movl	%eax, %edx
+89C6                       		movl	%eax, %esi
+4189C7                     		movl	%eax, %r15d
+89448D00                   		movl	%eax, (%rbp,%rcx,4)
+89449500                   		movl	%eax, (%rbp,%rdx,4)
+89048B                     		movl	%eax, (%rbx,%rcx,4)
+8904B3                     		movl	%eax, (%rbx,%rsi,4)
+890497                     		movl	%eax, (%rdi,%rdx,4)
+89E8                       		movl	%ebp, %eax
+89E9                       		movl	%ebp, %ecx
+89EE                       		movl	%ebp, %esi
+4189EC                     		movl	%ebp, %r12d
+4189E8                     		movl	%ebp, %r8d
+899C8440090000            		movl	%ebx, 2368(%rsp,%rax,4)
+89D8                       		movl	%ebx, %eax
+89DA                       		movl	%ebx, %edx
+89DE                       		movl	%ebx, %esi
+4189DC                     		movl	%ebx, %r12d
+4189D8                     		movl	%ebx, %r8d
+891C87                     		movl	%ebx, (%rdi,%rax,4)
+894C2410                   		movl	%ecx, 16(%rsp)
+894C8410                   		movl	%ecx, 16(%rsp,%rax,4)
+894C2418                   		movl	%ecx, 24(%rsp)
+894C241C                   		movl	%ecx, 28(%rsp)
+894C2424                   		movl	%ecx, 36(%rsp)
+894C2450                   		movl	%ecx, 80(%rsp)
+894C2454                   		movl	%ecx, 84(%rsp)
+89C8                       		movl	%ecx, %eax
+89CB                       		movl	%ecx, %ebx
+89CF                       		movl	%ecx, %edi
+89CA                       		movl	%ecx, %edx
+4189CC                     		movl	%ecx, %r12d
+4189CD                     		movl	%ecx, %r13d
+4189C8                     		movl	%ecx, %r8d
+41890C90                   		movl	%ecx, (%r8,%rdx,4)
+890C93                     		movl	%ecx, (%rbx,%rdx,4)
+897C2440                   		movl	%edi, 64(%rsp)
+897C2444                   		movl	%edi, 68(%rsp)
+897C2454                   		movl	%edi, 84(%rsp)
+897C2458                   		movl	%edi, 88(%rsp)
+89F8                       		movl	%edi, %eax
+89F9                       		movl	%edi, %ecx
+89FA                       		movl	%edi, %edx
+4189FA                     		movl	%edi, %r10d
+4189FB                     		movl	%edi, %r11d
+893C86                     		movl	%edi, (%rsi,%rax,4)
+89948C40040000            		movl	%edx, 1088(%rsp,%rcx,4)
+8954240C                   		movl	%edx, 12(%rsp)
+89542410                   		movl	%edx, 16(%rsp)
+89542414                   		movl	%edx, 20(%rsp)
+899424A0010000            		movl	%edx, 416(%rsp)
+895330                     		movl	%edx, 48(%rbx)
+899484F0010000            		movl	%edx, 496(%rsp,%rax,4)
+89542434                   		movl	%edx, 52(%rsp)
+89542438                   		movl	%edx, 56(%rsp)
+899684020000              		movl	%edx, 644(%rsi)
+899788020000              		movl	%edx, 648(%rdi)
+89548460                   		movl	%edx, 96(%rsp,%rax,4)
+89D0                       		movl	%edx, %eax
+89D5                       		movl	%edx, %ebp
+89D3                       		movl	%edx, %ebx
+89D1                       		movl	%edx, %ecx
+89D7                       		movl	%edx, %edi
+89D6                       		movl	%edx, %esi
+4189D4                     		movl	%edx, %r12d
+41891487                   		movl	%edx, (%r15,%rax,4)
+89B48420040000            		movl	%esi, 1056(%rsp,%rax,4)
+89B484A0010000            		movl	%esi, 416(%rsp,%rax,4)
+89742440                   		movl	%esi, 64(%rsp)
+89742444                   		movl	%esi, 68(%rsp)
+89F0                       		movl	%esi, %eax
+89F5                       		movl	%esi, %ebp
+89F3                       		movl	%esi, %ebx
+89F1                       		movl	%esi, %ecx
+89F2                       		movl	%esi, %edx
+4189F0                     		movl	%esi, %r8d
+893493                     		movl	%esi, (%rbx,%rdx,4)
+448B248539300000          		movl	12345(,%rax,4), %r12d
+BE78563412                		movl	$0x12345678, %esi
+4489D1                     		movl	%r10d, %ecx
+4589D3                     		movl	%r10d, %r11d
+4589D1                     		movl	%r10d, %r9d
+458B0482                   		movl	(%r10,%rax,4), %r8d
+4489642414                		movl	%r12d, 20(%rsp)
+448964241C                		movl	%r12d, 28(%rsp)
+4489E0                     		movl	%r12d, %eax
+4489E1                     		movl	%r12d, %ecx
+4489E7                     		movl	%r12d, %edi
+4489E2                     		movl	%r12d, %edx
+4489E6                     		movl	%r12d, %esi
+4589E2                     		movl	%r12d, %r10d
+4589E0                     		movl	%r12d, %r8d
+4489648500                		movl	%r12d, (%rbp,%rax,4)
+418B0C84                   		movl	(%r12,%rax,4), %ecx
+4489AC84A0010000          		movl	%r13d, 416(%rsp,%rax,4)
+44896C2430                		movl	%r13d, 48(%rsp)
+44896C2434                		movl	%r13d, 52(%rsp)
+4489E8                     		movl	%r13d, %eax
+4489ED                     		movl	%r13d, %ebp
+4489EB                     		movl	%r13d, %ebx
+4489EA                     		movl	%r13d, %edx
+4589EA                     		movl	%r13d, %r10d
+4589EB                     		movl	%r13d, %r11d
+4589EC                     		movl	%r13d, %r12d
+4589E8                     		movl	%r13d, %r8d
+4589E9                     		movl	%r13d, %r9d
+44892C24                   		movl	%r13d, (%rsp)
+4489748410                		movl	%r14d, 16(%rsp,%rax,4)
+4489F0                     		movl	%r14d, %eax
+4489F5                     		movl	%r14d, %ebp
+4489F3                     		movl	%r14d, %ebx
+4489F7                     		movl	%r14d, %edi
+4489F2                     		movl	%r14d, %edx
+4489F6                     		movl	%r14d, %esi
+4589F3                     		movl	%r14d, %r11d
+4589F0                     		movl	%r14d, %r8d
+4589F1                     		movl	%r14d, %r9d
+4489748500                		movl	%r14d, (%rbp,%rax,4)
+44897C2450                		movl	%r15d, 80(%rsp)
+44897C2454                		movl	%r15d, 84(%rsp)
+4489BC8480030000          		movl	%r15d, 896(%rsp,%rax,4)
+4489FD                     		movl	%r15d, %ebp
+4489FE                     		movl	%r15d, %esi
+4589FE                     		movl	%r15d, %r14d
+4589F8                     		movl	%r15d, %r8d
+4589F9                     		movl	%r15d, %r9d
+418B0487                   		movl	(%r15,%rax,4), %eax
+418B1487                   		movl	(%r15,%rax,4), %edx
+458B0487                   		movl	(%r15,%rax,4), %r8d
+448944240C                		movl	%r8d, 12(%rsp)
+4489442414                		movl	%r8d, 20(%rsp)
+4489848440090000          		movl	%r8d, 2368(%rsp,%rax,4)
+4489442420                		movl	%r8d, 32(%rsp)
+4489442424                		movl	%r8d, 36(%rsp)
+4489839C020000            		movl	%r8d, 668(%rbx)
+4489442444                		movl	%r8d, 68(%rsp)
+4489442448                		movl	%r8d, 72(%rsp)
+4489842480030000          		movl	%r8d, 896(%rsp)
+4489C2                     		movl	%r8d, %edx
+4489C6                     		movl	%r8d, %esi
+418B0C80                   		movl	(%r8,%rax,4), %ecx
+458B1480                   		movl	(%r8,%rax,4), %r10d
+458B0480                   		movl	(%r8,%rax,4), %r8d
+44894C86F0                		movl	%r9d, -16(%rsi,%rax,4)
+44894C241C                		movl	%r9d, 28(%rsp)
+44894C2420                		movl	%r9d, 32(%rsp)
+44898C24F0010000          		movl	%r9d, 496(%rsp)
+44894C86FC                		movl	%r9d, -4(%rsi,%rax,4)
+4489CD                     		movl	%r9d, %ebp
+4489C9                     		movl	%r9d, %ecx
+4489CF                     		movl	%r9d, %edi
+4489CA                     		movl	%r9d, %edx
+4589C8                     		movl	%r9d, %r8d
+8B448500                   		movl	(%rbp,%rax,4), %eax
+8B548500                   		movl	(%rbp,%rax,4), %edx
+448B648500                		movl	(%rbp,%rax,4), %r12d
+448B748500                		movl	(%rbp,%rax,4), %r14d
+8B440500                   		movl	(%rbp,%rax), %eax
+8B540500                   		movl	(%rbp,%rax), %edx
+448B640500                		movl	(%rbp,%rax), %r12d
+448B440500                		movl	(%rbp,%rax), %r8d
+8B449500                   		movl	(%rbp,%rdx,4), %eax
+8B541500                   		movl	(%rbp,%rdx), %edx
+8B3C83                     		movl	(%rbx,%rax,4), %edi
+8B1483                     		movl	(%rbx,%rax,4), %edx
+8B348B                     		movl	(%rbx,%rcx,4), %esi
+8B0493                     		movl	(%rbx,%rdx,4), %eax
+8B0CB3                     		movl	(%rbx,%rsi,4), %ecx
+8B0C82                     		movl	(%rdx,%rax,4), %ecx
+448B0C86                   		movl	(%rsi,%rax,4), %r9d
+8B0496                     		movl	(%rsi,%rdx,4), %eax
+4C8B4C2478                		movq	120(%rsp), %r9
+488B442478                		movq	120(%rsp), %rax
+488BBC2458050000          		movq	1368(%rsp), %rdi
+488BB42458050000          		movq	1368(%rsp), %rsi
+488B7B18                   		movq	24(%rbx), %rdi
+488B6C2418                		movq	24(%rsp), %rbp
+488B5C2418                		movq	24(%rsp), %rbx
+488B4C2418                		movq	24(%rsp), %rcx
+488B7C2418                		movq	24(%rsp), %rdi
+488B542418                		movq	24(%rsp), %rdx
+488B742418                		movq	24(%rsp), %rsi
+488B7320                   		movq	32(%rbx), %rsi
+488B6C2420                		movq	32(%rsp), %rbp
+488B542420                		movq	32(%rsp), %rdx
+488B9424800D0000          		movq	3456(%rsp), %rdx
+488BB424800D0000          		movq	3456(%rsp), %rsi
+4C8B7F28                   		movq	40(%rdi), %r15
+4C8B642428                		movq	40(%rsp), %r12
+4C8B442428                		movq	40(%rsp), %r8
+488B4C2428                		movq	40(%rsp), %rcx
+488B7C2428                		movq	40(%rsp), %rdi
+488B542428                		movq	40(%rsp), %rdx
+488B742428                		movq	40(%rsp), %rsi
+4C8B6C2430                		movq	48(%rsp), %r13
+4C8B442430                		movq	48(%rsp), %r8
+488B442430                		movq	48(%rsp), %rax
+488B4C2430                		movq	48(%rsp), %rcx
+488B542430                		movq	48(%rsp), %rdx
+488B742430                		movq	48(%rsp), %rsi
+4C8B6738                   		movq	56(%rdi), %r12
+4C8B7738                   		movq	56(%rdi), %r14
+4C8B742438                		movq	56(%rsp), %r14
+4C8B442438                		movq	56(%rsp), %r8
+488B7C2438                		movq	56(%rsp), %rdi
+488B542438                		movq	56(%rsp), %rdx
+488B742438                		movq	56(%rsp), %rsi
+4C8B6F40                   		movq	64(%rdi), %r13
+488B7740                   		movq	64(%rdi), %rsi
+4C8B7C2440                		movq	64(%rsp), %r15
+4C8B7F48                   		movq	72(%rdi), %r15
+488B6F48                   		movq	72(%rdi), %rbp
+498B5350                   		movq	80(%r11), %rdx
+488B5750                   		movq	80(%rdi), %rdx
+488B5650                   		movq	80(%rsi), %rdx
+4C896424E0                		movq	%r12, -32(%rsp)
+4C896C24E8                		movq	%r13, -24(%rsp)
+4C897424F0                		movq	%r14, -16(%rsp)
+4C89F7                     		movq	%r14, %rdi
+4C897C24F8                		movq	%r15, -8(%rsp)
+4D89FA                     		movq	%r15, %r10
+4C89F9                     		movq	%r15, %rcx
+4C89FA                     		movq	%r15, %rdx
+4C89FE                     		movq	%r15, %rsi
+48894350                   		movq	%rax, 80(%rbx)
+4989C2                     		movq	%rax, %r10
+4889C1                     		movq	%rax, %rcx
+4889C7                     		movq	%rax, %rdi
+4889C2                     		movq	%rax, %rdx
+48890424                   		movq	%rax, (%rsp)
+48896C24D8                		movq	%rbp, -40(%rsp)
+4989EB                     		movq	%rbp, %r11
+4889EE                     		movq	%rbp, %rsi
+48895C24D0                		movq	%rbx, -48(%rsp)
+4889DF                     		movq	%rbx, %rdi
+4889DE                     		movq	%rbx, %rsi
+4889CD                     		movq	%rcx, %rbp
+4889CE                     		movq	%rcx, %rsi
+48897C2418                		movq	%rdi, 24(%rsp)
+48897C2438                		movq	%rdi, 56(%rsp)
+4889FD                     		movq	%rdi, %rbp
+4889FB                     		movq	%rdi, %rbx
+4889FE                     		movq	%rdi, %rsi
+4889542420                		movq	%rdx, 32(%rsp)
+4889542428                		movq	%rdx, 40(%rsp)
+4889542408                		movq	%rdx, 8(%rsp)
+4889D0                     		movq	%rdx, %rax
+4889D3                     		movq	%rdx, %rbx
+4889D7                     		movq	%rdx, %rdi
+4889742428                		movq	%rsi, 40(%rsp)
+4889742430                		movq	%rsi, 48(%rsp)
+4889742408                		movq	%rsi, 8(%rsp)
+4989F7                     		movq	%rsi, %r15
+4889F2                     		movq	%rsi, %rdx
+48893424                   		movq	%rsi, (%rsp)
+4989E3                     		movq	%rsp, %r11
+488B0C24                   		movq	(%rsp), %rcx
+488B1424                   		movq	(%rsp), %rdx
+488B3424                   		movq	(%rsp), %rsi
+488B0D19FCFFFF            		movq	-999(%rip), %rcx
+488B3D01000000            		movq	1(%rip), %rdi
+488B3502000000            		movq	2(%rip), %rsi
+8B0483                     		mov	(%rbx,%rax,4), %eax
+8B04B3                     		mov	(%rbx,%rsi,4), %eax
+8B0481                     		mov	(%rcx,%rax,4), %eax
+8B0482                     		mov	(%rdx,%rax,4), %eax
+8B0486                     		mov	(%rsi,%rax,4), %eax
+8B0496                     		mov	(%rsi,%rdx,4), %eax
+4863436C                   		movslq	108(%rbx),%rax
+49634B74                   		movslq	116(%r11),%rcx
+48634F74                   		movslq	116(%rdi),%rcx
+48634E74                   		movslq	116(%rsi),%rcx
+486344240C                		movslq	12(%rsp),%rax
+4863442414                		movslq	20(%rsp),%rax
+4863442418                		movslq	24(%rsp),%rax
+4863542418                		movslq	24(%rsp),%rdx
+4C6344241C                		movslq	28(%rsp),%r8
+486344241C                		movslq	28(%rsp),%rax
+48634C241C                		movslq	28(%rsp),%rcx
+4863D0                     		movslq	%eax,%rdx
+4863CD                     		movslq	%ebp,%rcx
+4863D5                     		movslq	%ebp,%rdx
+4C63D3                     		movslq	%ebx,%r10
+4863C3                     		movslq	%ebx,%rax
+4863CB                     		movslq	%ebx,%rcx
+4863D3                     		movslq	%ebx,%rdx
+4863C1                     		movslq	%ecx,%rax
+4863D1                     		movslq	%ecx,%rdx
+4863C7                     		movslq	%edi,%rax
+4863CF                     		movslq	%edi,%rcx
+4863D7                     		movslq	%edi,%rdx
+4863F7                     		movslq	%edi,%rsi
+4863C2                     		movslq	%edx,%rax
+4863D2                     		movslq	%edx,%rdx
+4863F2                     		movslq	%edx,%rsi
+4863C6                     		movslq	%esi,%rax
+4963C2                     		movslq	%r10d,%rax
+4963CA                     		movslq	%r10d,%rcx
+4963D2                     		movslq	%r10d,%rdx
+4963F2                     		movslq	%r10d,%rsi
+4963CB                     		movslq	%r11d,%rcx
+4963D3                     		movslq	%r11d,%rdx
+4963C4                     		movslq	%r12d,%rax
+4963CC                     		movslq	%r12d,%rcx
+4963D4                     		movslq	%r12d,%rdx
+4963C5                     		movslq	%r13d,%rax
+4963D5                     		movslq	%r13d,%rdx
+4963C6                     		movslq	%r14d,%rax
+4963D6                     		movslq	%r14d,%rdx
+4963C7                     		movslq	%r15d,%rax
+4963C0                     		movslq	%r8d,%rax
+4963D0                     		movslq	%r8d,%rdx
+4963C1                     		movslq	%r9d,%rax
+4963C9                     		movslq	%r9d,%rcx
+4963D1                     		movslq	%r9d,%rdx
+66C74457FE0000            		movw	$0, -2(%rdi,%rdx,2)
+66C74457FC0000            		movw	$0, -4(%rdi,%rdx,2)
+66C74457FA0000            		movw	$0, -6(%rdi,%rdx,2)
+66C74444500000            		movw	$0, 80(%rsp,%rax,2)
+6641C704400000            		movw	$0, (%r8,%rax,2)
+66C74445000000            		movw	$0, (%rbp,%rax,2)
+66C704510000              		movw	$0, (%rcx,%rdx,2)
+66C704470000              		movw	$0, (%rdi,%rax,2)
+66C74445000100            		movw	$1, (%rbp,%rax,2)
+6689445500                		movw	%ax, (%rbp,%rdx,2)
+66894C2450                		movw	%cx, 80(%rsp)
+66894C2452                		movw	%cx, 82(%rsp)
+66897C2458                		movw	%di, 88(%rsp)
+66897C245A                		movw	%di, 90(%rsp)
+664489544500              		movw	%r10w, (%rbp,%rax,2)
+6689742454                		movw	%si, 84(%rsp)
+6689742456                		movw	%si, 86(%rsp)
+66893441                   		movw	%si, (%rcx,%rax,2)
+0FB6477C                   		movzbl	124(%rdi), %eax
+410FB6420C                		movzbl	12(%r10), %eax
+410FB6410C                		movzbl	12(%r9), %eax
+0FB68405A8060000          		movzbl	1704(%rbp,%rax), %eax
+0FB6B405A8060000          		movzbl	1704(%rbp,%rax), %esi
+420FB68415FA4C0000       		movzbl	19706(%rbp,%r10), %eax
+420FB64401FF              		movzbl	-1(%rcx,%r8), %eax
+0FB64432FF                		movzbl	-1(%rdx,%rsi), %eax
+0FB64C2401                		movzbl	1(%rsp), %ecx
+420FB64401FE              		movzbl	-2(%rcx,%r8), %eax
+0FB64432FE                		movzbl	-2(%rdx,%rsi), %eax
+0FB64C2420                		movzbl	32(%rsp), %ecx
+0FB64C0420                		movzbl	32(%rsp,%rax), %ecx
+420FB684284C930000       		movzbl	37708(%rax,%r13), %eax
+0FB684104C930000          		movzbl	37708(%rax,%rdx), %eax
+440FB684454C930000       		movzbl	37708(%rbp,%rax,2), %r8d
+0FB6940D4C930000          		movzbl	37708(%rbp,%rcx), %edx
+440FB684114C930000       		movzbl	37708(%rcx,%rdx), %r8d
+0FB6840D4E940000          		movzbl	37966(%rbp,%rcx), %eax
+0FB6940D50950000          		movzbl	38224(%rbp,%rcx), %edx
+0FB6840D52960000          		movzbl	38482(%rbp,%rcx), %eax
+0FB6B40380010000          		movzbl	384(%rbx,%rax), %esi
+0FB6940D54970000          		movzbl	38740(%rbp,%rcx), %edx
+0FB6840D56980000          		movzbl	38998(%rbp,%rcx), %eax
+420FB64401FD              		movzbl	-3(%rcx,%r8), %eax
+0FB64432FD                		movzbl	-3(%rdx,%rsi), %eax
+410FB68383020000          		movzbl	643(%r11), %eax
+0FB68783020000            		movzbl	643(%rdi), %eax
+0FB68683020000            		movzbl	643(%rsi), %eax
+0FB68398020000            		movzbl	664(%rbx), %eax
+0FB6C0                     		movzbl	%al, %eax
+440FB6F0                   		movzbl	%al, %r14d
+0FB6EF                     		movzbl	%bh, %ebp
+0FB6F3                     		movzbl	%bl,%esi
+440FB6DA                   		movzbl	%dl, %r11d
+440FB620                   		movzbl	(%rax), %r12d
+0FB60408                   		movzbl	(%rax,%rcx), %eax
+0FB60438                   		movzbl	(%rax,%rdi), %eax
+0FB63C38                   		movzbl	(%rax,%rdi), %edi
+0FB63438                   		movzbl	(%rax,%rdi), %esi
+440FB60C38                		movzbl	(%rax,%rdi), %r9d
+0FB60410                   		movzbl	(%rax,%rdx), %eax
+440FB61410                		movzbl	(%rax,%rdx), %r10d
+0FB60C30                   		movzbl	(%rax,%rsi), %ecx
+0FB61430                   		movzbl	(%rax,%rsi), %edx
+0FB60431                   		movzbl	(%rcx,%rsi), %eax
+0FB60A                     		movzbl	(%rdx), %ecx
+0FB60432                   		movzbl	(%rdx,%rsi), %eax
+0FB61432                   		movzbl	(%rdx,%rsi), %edx
+0FB60424                   		movzbl	(%rsp), %eax
+400FB6F6                   		movzbl	%sil, %esi
+4A0FB69415A8060000       		movzbq	1704(%rbp,%r10), %rdx
+480FB69405A8060000       		movzbq	1704(%rbp,%rax), %rdx
+4A0FB60428                		movzbq	(%rax,%r13), %rax
+4A0FB63400                		movzbq	(%rax,%r8), %rsi
+480FB60408                		movzbq	(%rax,%rcx), %rax
+480FB60410                		movzbq	(%rax,%rdx), %rax
+660FB694114C930000       		movzbw	37708(%rcx,%rdx), %dx
+66410FB630                		movzbw	(%r8), %si
+0FB7444450                		movzwl	80(%rsp,%rax,2), %eax
+430FB7544756              		movzwl	86(%r15,%r8,2), %edx
+0FB7C8                     		movzwl	%ax, %ecx 
+0FB7C2                     		movzwl	%dx, %eax 
+410FB70447                		movzwl	(%r15,%rax,2), %eax
+0FB70441                   		movzwl	(%rcx,%rax,2), %eax
+440FB71441                		movzwl	(%rcx,%rax,2), %r10d
+0FB7C6                     		movzwl	%si, %eax 
+4B0FB744470A              		movzwq	10(%r15,%r8,2), %rax
+490FB744470A              		movzwq	10(%r15,%rax,2), %rax
+490FB7444F0A              		movzwq	10(%r15,%rcx,2), %rax
+4B0FB744470C              		movzwq	12(%r15,%r8,2), %rax
+490FB744470C              		movzwq	12(%r15,%rax,2), %rax
+490FB7444F0C              		movzwq	12(%r15,%rcx,2), %rax
+4B0FB744470E              		movzwq	14(%r15,%r8,2), %rax
+490FB744470E              		movzwq	14(%r15,%rax,2), %rax
+490FB7444F0E              		movzwq	14(%r15,%rcx,2), %rax
+4B0FB7444710              		movzwq	16(%r15,%r8,2), %rax
+490FB7444710              		movzwq	16(%r15,%rax,2), %rax
+490FB7444F10              		movzwq	16(%r15,%rcx,2), %rax
+4B0FB7444712              		movzwq	18(%r15,%r8,2), %rax
+490FB7444712              		movzwq	18(%r15,%rax,2), %rax
+490FB7444F12              		movzwq	18(%r15,%rcx,2), %rax
+4B0FB7444714              		movzwq	20(%r15,%r8,2), %rax
+490FB7444714              		movzwq	20(%r15,%rax,2), %rax
+490FB7444F14              		movzwq	20(%r15,%rcx,2), %rax
+4B0FB7444716              		movzwq	22(%r15,%r8,2), %rax
+490FB7444716              		movzwq	22(%r15,%rax,2), %rax
+490FB7444F16              		movzwq	22(%r15,%rcx,2), %rax
+4B0FB7444718              		movzwq	24(%r15,%r8,2), %rax
+490FB7444718              		movzwq	24(%r15,%rax,2), %rax
+490FB7444F18              		movzwq	24(%r15,%rcx,2), %rax
+4B0FB744471A              		movzwq	26(%r15,%r8,2), %rax
+490FB744471A              		movzwq	26(%r15,%rax,2), %rax
+490FB7444F1A              		movzwq	26(%r15,%rcx,2), %rax
+4B0FB744471C              		movzwq	28(%r15,%r8,2), %rax
+490FB744471C              		movzwq	28(%r15,%rax,2), %rax
+490FB7444F1C              		movzwq	28(%r15,%rcx,2), %rax
+4B0FB7444702              		movzwq	2(%r15,%r8,2), %rax
+490FB7444702              		movzwq	2(%r15,%rax,2), %rax
+490FB7444F02              		movzwq	2(%r15,%rcx,2), %rax
+4B0FB744471E              		movzwq	30(%r15,%r8,2), %rax
+490FB744471E              		movzwq	30(%r15,%rax,2), %rax
+490FB7444F1E              		movzwq	30(%r15,%rcx,2), %rax
+4B0FB7444720              		movzwq	32(%r15,%r8,2), %rax
+490FB7444720              		movzwq	32(%r15,%rax,2), %rax
+490FB7444F20              		movzwq	32(%r15,%rcx,2), %rax
+4B0FB7444722              		movzwq	34(%r15,%r8,2), %rax
+490FB7444722              		movzwq	34(%r15,%rax,2), %rax
+490FB7444F22              		movzwq	34(%r15,%rcx,2), %rax
+4B0FB7444724              		movzwq	36(%r15,%r8,2), %rax
+490FB7444724              		movzwq	36(%r15,%rax,2), %rax
+490FB7444F24              		movzwq	36(%r15,%rcx,2), %rax
+4B0FB7444726              		movzwq	38(%r15,%r8,2), %rax
+490FB7444726              		movzwq	38(%r15,%rax,2), %rax
+490FB7444F26              		movzwq	38(%r15,%rcx,2), %rax
+4B0FB7444728              		movzwq	40(%r15,%r8,2), %rax
+490FB7444728              		movzwq	40(%r15,%rax,2), %rax
+490FB7444F28              		movzwq	40(%r15,%rcx,2), %rax
+4B0FB744472A              		movzwq	42(%r15,%r8,2), %rax
+490FB744472A              		movzwq	42(%r15,%rax,2), %rax
+490FB7444F2A              		movzwq	42(%r15,%rcx,2), %rax
+4B0FB744472C              		movzwq	44(%r15,%r8,2), %rax
+490FB744472C              		movzwq	44(%r15,%rax,2), %rax
+490FB7444F2C              		movzwq	44(%r15,%rcx,2), %rax
+4B0FB744472E              		movzwq	46(%r15,%r8,2), %rax
+490FB744472E              		movzwq	46(%r15,%rax,2), %rax
+490FB7444F2E              		movzwq	46(%r15,%rcx,2), %rax
+4B0FB7444730              		movzwq	48(%r15,%r8,2), %rax
+490FB7444730              		movzwq	48(%r15,%rax,2), %rax
+490FB7444F30              		movzwq	48(%r15,%rcx,2), %rax
+4B0FB7444704              		movzwq	4(%r15,%r8,2), %rax
+490FB7444704              		movzwq	4(%r15,%rax,2), %rax
+490FB7444F04              		movzwq	4(%r15,%rcx,2), %rax
+4B0FB7444732              		movzwq	50(%r15,%r8,2), %rax
+490FB7444732              		movzwq	50(%r15,%rax,2), %rax
+490FB7444F32              		movzwq	50(%r15,%rcx,2), %rax
+4B0FB7444734              		movzwq	52(%r15,%r8,2), %rax
+490FB7444734              		movzwq	52(%r15,%rax,2), %rax
+490FB7444F34              		movzwq	52(%r15,%rcx,2), %rax
+4B0FB7444736              		movzwq	54(%r15,%r8,2), %rax
+490FB7444736              		movzwq	54(%r15,%rax,2), %rax
+490FB7444F36              		movzwq	54(%r15,%rcx,2), %rax
+4B0FB7444738              		movzwq	56(%r15,%r8,2), %rax
+490FB7444738              		movzwq	56(%r15,%rax,2), %rax
+490FB7444F38              		movzwq	56(%r15,%rcx,2), %rax
+4B0FB744473A              		movzwq	58(%r15,%r8,2), %rax
+490FB744473A              		movzwq	58(%r15,%rax,2), %rax
+490FB7444F3A              		movzwq	58(%r15,%rcx,2), %rax
+4B0FB744473C              		movzwq	60(%r15,%r8,2), %rax
+490FB744473C              		movzwq	60(%r15,%rax,2), %rax
+490FB7444F3C              		movzwq	60(%r15,%rcx,2), %rax
+4B0FB744473E              		movzwq	62(%r15,%r8,2), %rax
+490FB744473E              		movzwq	62(%r15,%rax,2), %rax
+490FB7444F3E              		movzwq	62(%r15,%rcx,2), %rax
+4B0FB7444740              		movzwq	64(%r15,%r8,2), %rax
+490FB7444740              		movzwq	64(%r15,%rax,2), %rax
+490FB7444F40              		movzwq	64(%r15,%rcx,2), %rax
+4B0FB7444742              		movzwq	66(%r15,%r8,2), %rax
+490FB7444742              		movzwq	66(%r15,%rax,2), %rax
+490FB7444F42              		movzwq	66(%r15,%rcx,2), %rax
+4B0FB7444744              		movzwq	68(%r15,%r8,2), %rax
+490FB7444744              		movzwq	68(%r15,%rax,2), %rax
+490FB7444F44              		movzwq	68(%r15,%rcx,2), %rax
+4B0FB7444706              		movzwq	6(%r15,%r8,2), %rax
+490FB7444706              		movzwq	6(%r15,%rax,2), %rax
+490FB7444F06              		movzwq	6(%r15,%rcx,2), %rax
+4B0FB7444746              		movzwq	70(%r15,%r8,2), %rax
+490FB7444746              		movzwq	70(%r15,%rax,2), %rax
+490FB7444F46              		movzwq	70(%r15,%rcx,2), %rax
+4B0FB7444748              		movzwq	72(%r15,%r8,2), %rax
+490FB7444748              		movzwq	72(%r15,%rax,2), %rax
+490FB7444F48              		movzwq	72(%r15,%rcx,2), %rax
+4B0FB744474A              		movzwq	74(%r15,%r8,2), %rax
+490FB744474A              		movzwq	74(%r15,%rax,2), %rax
+490FB7444F4A              		movzwq	74(%r15,%rcx,2), %rax
+4B0FB744474C              		movzwq	76(%r15,%r8,2), %rax
+490FB744474C              		movzwq	76(%r15,%rax,2), %rax
+490FB7444F4C              		movzwq	76(%r15,%rcx,2), %rax
+4B0FB744474E              		movzwq	78(%r15,%r8,2), %rax
+490FB744474E              		movzwq	78(%r15,%rax,2), %rax
+490FB7444F4E              		movzwq	78(%r15,%rcx,2), %rax
+4B0FB7444750              		movzwq	80(%r15,%r8,2), %rax
+490FB7444750              		movzwq	80(%r15,%rax,2), %rax
+490FB7444F50              		movzwq	80(%r15,%rcx,2), %rax
+4B0FB7444752              		movzwq	82(%r15,%r8,2), %rax
+490FB7444752              		movzwq	82(%r15,%rax,2), %rax
+490FB7444F52              		movzwq	82(%r15,%rcx,2), %rax
+4B0FB7444754              		movzwq	84(%r15,%r8,2), %rax
+490FB7444754              		movzwq	84(%r15,%rax,2), %rax
+490FB7444F54              		movzwq	84(%r15,%rcx,2), %rax
+490FB7444756              		movzwq	86(%r15,%rax,2), %rax
+490FB7444F56              		movzwq	86(%r15,%rcx,2), %rax
+4B0FB7444758              		movzwq	88(%r15,%r8,2), %rax
+490FB7444758              		movzwq	88(%r15,%rax,2), %rax
+490FB7444F58              		movzwq	88(%r15,%rcx,2), %rax
+4B0FB7444708              		movzwq	8(%r15,%r8,2), %rax
+490FB7444708              		movzwq	8(%r15,%rax,2), %rax
+490FB7444F08              		movzwq	8(%r15,%rcx,2), %rax
+4B0FB744475A              		movzwq	90(%r15,%r8,2), %rax
+490FB744475A              		movzwq	90(%r15,%rax,2), %rax
+490FB7444F5A              		movzwq	90(%r15,%rcx,2), %rax
+4B0FB744475C              		movzwq	92(%r15,%r8,2), %rax
+490FB744475C              		movzwq	92(%r15,%rax,2), %rax
+490FB7444F5C              		movzwq	92(%r15,%rcx,2), %rax
+4B0FB744475E              		movzwq	94(%r15,%r8,2), %rax
+490FB744475E              		movzwq	94(%r15,%rax,2), %rax
+490FB7444F5E              		movzwq	94(%r15,%rcx,2), %rax
+4B0FB7444760              		movzwq	96(%r15,%r8,2), %rax
+490FB7444760              		movzwq	96(%r15,%rax,2), %rax
+490FB7444F60              		movzwq	96(%r15,%rcx,2), %rax
+4B0FB7444762              		movzwq	98(%r15,%r8,2), %rax
+490FB7444762              		movzwq	98(%r15,%rax,2), %rax
+490FB7444F62              		movzwq	98(%r15,%rcx,2), %rax
+4B0FB71447                		movzwq	(%r15,%r8,2), %rdx
+490FB70447                		movzwq	(%r15,%rax,2), %rax
+490FB70C47                		movzwq	(%r15,%rax,2), %rcx
+490FB7044F                		movzwq	(%r15,%rcx,2), %rax
+F7D9                       		negl	%ecx
+F7D2                       		notl	%edx
+814C850000002000          		orl	$2097152, (%rbp,%rax,4)
+098680020000              		orl	%eax, 640(%rsi)
+09C6                       		orl	%eax, %esi
+4109C4                     		orl	%eax, %r12d
+09AE80020000              		orl	%ebp, 640(%rsi)
+092C83                     		orl	%ebp, (%rbx,%rax,4)
+09BE80020000              		orl	%edi, 640(%rsi)
+099680020000              		orl	%edx, 640(%rsi)
+09D0                       		orl	%edx, %eax
+4109B380020000            		orl	%esi, 640(%r11)
+09B780020000              		orl	%esi, 640(%rdi)
+44099680020000            		orl	%r10d, 640(%rsi)
+44098680020000            		orl	%r8d, 640(%rsi)
+44090483                   		orl	%r8d, (%rbx,%rax,4)
+44098E80020000            		orl	%r9d, 640(%rsi)
+415C                       		popq	%r12
+415D                       		popq	%r13
+415E                       		popq	%r14
+415F                       		popq	%r15
+5D                         		popq	%rbp
+5B                         		popq	%rbx
+4154                       		pushq	%r12
+4155                       		pushq	%r13
+4156                       		pushq	%r14
+4157                       		pushq	%r15
+55                         		pushq	%rbp
+53                         		pushq	%rbx
+F3C3                       		rep ; ret
+D1C0                       		roll	%eax
+C1E010                     		sall	$16, %eax
+D1642410                   		sall	16(%rsp)
+C1E204                     		sall	$4, %edx
+41C1E204                   		sall	$4, %r10d
+41C1A38002000008          		sall	$8, 640(%r11)
+C1A78002000008            		sall	$8, 640(%rdi)
+C1A68002000008            		sall	$8, 640(%rsi)
+C1E008                     		sall	$8, %eax
+C1E608                     		sall	$8, %esi
+41C1E408                   		sall	$8, %r12d
+D3E0                       		sall	%cl, %eax
+D3E5                       		sall	%cl, %ebp
+D3E7                       		sall	%cl, %edi
+D3E2                       		sall	%cl, %edx
+D3E6                       		sall	%cl, %esi
+41D3E2                     		sall	%cl, %r10d
+41D3E0                     		sall	%cl, %r8d
+41D3E1                     		sall	%cl, %r9d
+48C1E00A                   		salq	$10, %rax
+48C1E20A                   		salq	$10, %rdx
+48C1E004                   		salq	$4, %rax
+48C1E204                   		salq	$4, %rdx
+48C1E604                   		salq	$4, %rsi
+48C1E007                   		salq	$7, %rax
+48C1E707                   		salq	$7, %rdi
+48C1E207                   		salq	$7, %rdx
+C1F91F                     		sarl	$31, %ecx
+C1F903                     		sarl	$3, %ecx
+C1F805                     		sarl	$5, %eax
+C1FA05                     		sarl	$5, %edx
+C1F908                     		sarl	$8, %ecx
+C1FA08                     		sarl	$8, %edx
+D3F8                       		sarl	%cl, %eax
+D3FE                       		sarl	%cl, %esi
+D1F8                       		sarl	%eax
+D1FF                       		sarl	%edi
+0F97C0                     		seta	%al
+0F94C0                     		sete	%al
+0F9FC0                     		setg	%al
+0F9FC2                     		setg	%dl
+0F9DC2                     		setge	%dl
+0F9EC0                     		setle	%al
+0F9EC2                     		setle	%dl
+0F95C0                     		setne	%al
+C1E910                     		shrl	$16, %ecx
+C1EF10                     		shrl	$16, %edi
+C1EE10                     		shrl	$16, %esi
+41C1E810                   		shrl	$16, %r8d
+41C1E818                   		shrl	$24, %r8d
+C1E81F                     		shrl	$31, %eax
+C1EA1F                     		shrl	$31, %edx
+D1E8                       		shrl	%eax
+48C1E820                   		shrq	$32, %rax
+48C1E920                   		shrq	$32, %rcx
+66C1EA08                   		shrw	$8, %dx
+2B542410                   		subl	16(%rsp), %edx
+2B742410                   		subl	16(%rsp), %esi
+2B442414                   		subl	20(%rsp), %eax
+2B542414                   		subl	20(%rsp), %edx
+442B442418                		subl	24(%rsp), %r8d
+2B44241C                   		subl	28(%rsp), %eax
+83EB04                     		subl	$4, %ebx
+83E905                     		subl	$5, %ecx
+442BAC85A0020000          		subl	672(%rbp,%rax,4), %r13d
+4183AB8402000008          		subl	$8, 644(%r11)
+83AF8402000008            		subl	$8, 644(%rdi)
+83AE8402000008            		subl	$8, 644(%rsi)
+4183EB08                   		subl	$8, %r11d
+29C1                       		subl	%eax, %ecx
+29C2                       		subl	%eax, %edx
+29C6                       		subl	%eax, %esi
+29E8                       		subl	%ebp, %eax
+29DA                       		subl	%ebx, %edx
+29C8                       		subl	%ecx, %eax
+4129CB                     		subl	%ecx, %r11d
+4129C9                     		subl	%ecx, %r9d
+29F8                       		subl	%edi, %eax
+29F9                       		subl	%edi, %ecx
+29FA                       		subl	%edi, %edx
+29FE                       		subl	%edi, %esi
+4129F8                     		subl	%edi, %r8d
+29D3                       		subl	%edx, %ebx
+29F0                       		subl	%esi, %eax
+29F2                       		subl	%esi, %edx
+4129F1                     		subl	%esi, %r9d
+4429D0                     		subl	%r10d, %eax
+4429D1                     		subl	%r10d, %ecx
+4429D9                     		subl	%r11d, %ecx
+4429DF                     		subl	%r11d, %edi
+4429E0                     		subl	%r12d, %eax
+4429E1                     		subl	%r12d, %ecx
+4429E2                     		subl	%r12d, %edx
+44296C240C                		subl	%r13d, 12(%rsp)
+4429E8                     		subl	%r13d, %eax
+4429EA                     		subl	%r13d, %edx
+4429F7                     		subl	%r14d, %edi
+4429F8                     		subl	%r15d, %eax
+4429FA                     		subl	%r15d, %edx
+4529F9                     		subl	%r15d, %r9d
+4429C0                     		subl	%r8d, %eax
+4429C1                     		subl	%r8d, %ecx
+4429C2                     		subl	%r8d, %edx
+4429C9                     		subl	%r9d, %ecx
+2B7C0500                   		subl	(%rbp,%rax), %edi
+2B441500                   		subl	(%rbp,%rdx), %eax
+4883EC68                   		subq	$104, %rsp
+4881EC18050000            		subq	$1304, %rsp
+4881EC38080000            		subq	$2104, %rsp
+4881EC08010000            		subq	$264, %rsp
+4881EC480D0000            		subq	$3400, %rsp
+4883EC38                   		subq	$56, %rsp
+4883EC48                   		subq	$72, %rsp
+4881EC38030000            		subq	$824, %rsp
+4883EC08                   		subq	$8, %rsp
+A801                       		testb	$1, %al
+40F6C701                   		testb	$1, %dil
+F6C201                     		testb	$1, %dl
+84C0                       		testb	%al, %al
+4084ED                     		testb	%bpl, %bpl
+A900002000                		testl	$2097152, %eax
+85ED                       		testl	%ebp, %ebp
+852C83                     		testl	%ebp, (%rbx,%rax,4)
+85DB                       		testl	%ebx, %ebx
+85C9                       		testl	%ecx, %ecx
+85FF                       		testl	%edi, %edi
+853C83                     		testl	%edi, (%rbx,%rax,4)
+851483                     		testl	%edx, (%rbx,%rax,4)
+85F6                       		testl	%esi, %esi
+853483                     		testl	%esi, (%rbx,%rax,4)
+4585DB                     		testl	%r11d, %r11d
+4585ED                     		testl	%r13d, %r13d
+4585F6                     		testl	%r14d, %r14d
+4585C0                     		testl	%r8d, %r8d
+44850483                   		testl	%r8d, (%rbx,%rax,4)
+31D0                       		xorl	%edx, %eax
+90                         		nop
diff --git a/VEX/orig_amd64/test2.orig b/VEX/orig_amd64/test2.orig
new file mode 100644
index 0000000..841a91a
--- /dev/null
+++ b/VEX/orig_amd64/test2.orig
@@ -0,0 +1,23917 @@
+
+addb 8(%rdx), %dil
+. 1 0x12345678 5
+. 40 02 7A 08 C3
+
+addl $102, %esi
+. 2 0x12345678 4
+. 83 C6 66 C3
+
+addl $108, %edx
+. 3 0x12345678 4
+. 83 C2 6C C3
+
+addl $108, %esi
+. 4 0x12345678 4
+. 83 C6 6C C3
+
+addl $10, %r13d
+. 5 0x12345678 5
+. 41 83 C5 0A C3
+
+addl $115, %esi
+. 6 0x12345678 4
+. 83 C6 73 C3
+
+addl $-128, %ebx
+. 7 0x12345678 4
+. 83 C3 80 C3
+
+addl $-128, %edi
+. 8 0x12345678 4
+. 83 C7 80 C3
+
+addl 12(%rsp), %eax
+. 9 0x12345678 5
+. 03 44 24 0C C3
+
+addl 136(%rsp), %edi
+. 10 0x12345678 8
+. 03 BC 24 88 00 00 00 C3
+
+addl $13, %ecx
+. 11 0x12345678 4
+. 83 C1 0D C3
+
+addl $16, %ebx
+. 12 0x12345678 4
+. 83 C3 10 C3
+
+addl $17, %eax
+. 13 0x12345678 4
+. 83 C0 11 C3
+
+addl $18, %ebp
+. 14 0x12345678 4
+. 83 C5 12 C3
+
+addl 20(%rsp), %eax
+. 15 0x12345678 5
+. 03 44 24 14 C3
+
+addl 20(%rsp), %r14d
+. 16 0x12345678 6
+. 44 03 74 24 14 C3
+
+addl $21, %ebx
+. 17 0x12345678 4
+. 83 C3 15 C3
+
+addl 24(%rsp), %eax
+. 18 0x12345678 5
+. 03 44 24 18 C3
+
+addl $28, 16(%rsp)
+. 19 0x12345678 6
+. 83 44 24 10 1C C3
+
+addl 28(%rsp), %eax
+. 20 0x12345678 5
+. 03 44 24 1C C3
+
+addl 28(%rsp), %ebp
+. 21 0x12345678 5
+. 03 6C 24 1C C3
+
+addl 28(%rsp), %r14d
+. 22 0x12345678 6
+. 44 03 74 24 1C C3
+
+addl 28(%rsp), %r15d
+. 23 0x12345678 6
+. 44 03 7C 24 1C C3
+
+addl $29, %eax
+. 24 0x12345678 4
+. 83 C0 1D C3
+
+addl $2, %eax
+. 25 0x12345678 4
+. 83 C0 02 C3
+
+addl $2, %ebp
+. 26 0x12345678 4
+. 83 C5 02 C3
+
+addl $2, %ebx
+. 27 0x12345678 4
+. 83 C3 02 C3
+
+addl $2, %edi
+. 28 0x12345678 4
+. 83 C7 02 C3
+
+addl $2, %r12d
+. 29 0x12345678 5
+. 41 83 C4 02 C3
+
+addl $2, %r15d
+. 30 0x12345678 5
+. 41 83 C7 02 C3
+
+addl $31, 16(%rsp)
+. 31 0x12345678 6
+. 83 44 24 10 1F C3
+
+addl 312(%rsp), %edi
+. 32 0x12345678 8
+. 03 BC 24 38 01 00 00 C3
+
+addl 324(%rsp), %r13d
+. 33 0x12345678 9
+. 44 03 AC 24 44 01 00 00 C3
+
+addl 332(%rsp), %ebp
+. 34 0x12345678 8
+. 03 AC 24 4C 01 00 00 C3
+
+addl $33, %eax
+. 35 0x12345678 4
+. 83 C0 21 C3
+
+addl 344(%rsp), %ebp
+. 36 0x12345678 8
+. 03 AC 24 58 01 00 00 C3
+
+addl 344(%rsp), %edi
+. 37 0x12345678 8
+. 03 BC 24 58 01 00 00 C3
+
+addl 352(%rsp), %ebp
+. 38 0x12345678 8
+. 03 AC 24 60 01 00 00 C3
+
+addl 356(%rsp), %ebp
+. 39 0x12345678 8
+. 03 AC 24 64 01 00 00 C3
+
+addl 368(%rsp), %ebp
+. 40 0x12345678 8
+. 03 AC 24 70 01 00 00 C3
+
+addl 36(%rsp), %eax
+. 41 0x12345678 5
+. 03 44 24 24 C3
+
+addl 36(%rsp), %edi
+. 42 0x12345678 5
+. 03 7C 24 24 C3
+
+addl $37, 16(%rsp)
+. 43 0x12345678 6
+. 83 44 24 10 25 C3
+
+addl $37, %ecx
+. 44 0x12345678 4
+. 83 C1 25 C3
+
+addl $3, %ebp
+. 45 0x12345678 4
+. 83 C5 03 C3
+
+addl $3, %ebx
+. 46 0x12345678 4
+. 83 C3 03 C3
+
+addl $3, %edi
+. 47 0x12345678 4
+. 83 C7 03 C3
+
+addl $41, 16(%rsp)
+. 48 0x12345678 6
+. 83 44 24 10 29 C3
+
+addl 44(%rsp), %edi
+. 49 0x12345678 5
+. 03 7C 24 2C C3
+
+addl 456(%rsp), %eax
+. 50 0x12345678 8
+. 03 84 24 C8 01 00 00 C3
+
+addl $4, %ebp
+. 51 0x12345678 4
+. 83 C5 04 C3
+
+addl $4, %ebx
+. 52 0x12345678 4
+. 83 C3 04 C3
+
+addl $4, %edi
+. 53 0x12345678 4
+. 83 C7 04 C3
+
+addl 556(%rsp), %eax
+. 54 0x12345678 8
+. 03 84 24 2C 02 00 00 C3
+
+addl 556(%rsp), %ebp
+. 55 0x12345678 8
+. 03 AC 24 2C 02 00 00 C3
+
+addl 56(%rsp), %eax
+. 56 0x12345678 5
+. 03 44 24 38 C3
+
+addl $5, %eax
+. 57 0x12345678 4
+. 83 C0 05 C3
+
+addl $5, %ebp
+. 58 0x12345678 4
+. 83 C5 05 C3
+
+addl 60(%rsp), %eax
+. 59 0x12345678 5
+. 03 44 24 3C C3
+
+addl 60(%rsp), %r12d
+. 60 0x12345678 6
+. 44 03 64 24 3C C3
+
+addl $69634, %eax
+. 61 0x12345678 6
+. 05 02 10 01 00 C3
+
+addl 76(%rsp), %r12d
+. 62 0x12345678 6
+. 44 03 64 24 4C C3
+
+addl $77851, %edi
+. 63 0x12345678 7
+. 81 C7 1B 30 01 00 C3
+
+addl $77851, %r12d
+. 64 0x12345678 8
+. 41 81 C4 1B 30 01 00 C3
+
+addl $77852, %r12d
+. 65 0x12345678 8
+. 41 81 C4 1C 30 01 00 C3
+
+addl $77873, %ebx
+. 66 0x12345678 7
+. 81 C3 31 30 01 00 C3
+
+addl $77874, %ebx
+. 67 0x12345678 7
+. 81 C3 32 30 01 00 C3
+
+addl $77875, %ebx
+. 68 0x12345678 7
+. 81 C3 33 30 01 00 C3
+
+addl $77886, %ebp
+. 69 0x12345678 7
+. 81 C5 3E 30 01 00 C3
+
+addl $77891, %eax
+. 70 0x12345678 6
+. 05 43 30 01 00 C3
+
+addl $77891, %edi
+. 71 0x12345678 7
+. 81 C7 43 30 01 00 C3
+
+addl $77891, %r12d
+. 72 0x12345678 8
+. 41 81 C4 43 30 01 00 C3
+
+addl $77892, %eax
+. 73 0x12345678 6
+. 05 44 30 01 00 C3
+
+addl $77892, %edi
+. 74 0x12345678 7
+. 81 C7 44 30 01 00 C3
+
+addl $77893, %eax
+. 75 0x12345678 6
+. 05 45 30 01 00 C3
+
+addl $77893, %edi
+. 76 0x12345678 7
+. 81 C7 45 30 01 00 C3
+
+addl $77893, %r12d
+. 77 0x12345678 8
+. 41 81 C4 45 30 01 00 C3
+
+addl $77893, %r13d
+. 78 0x12345678 8
+. 41 81 C5 45 30 01 00 C3
+
+addl $77894, %ebx
+. 79 0x12345678 7
+. 81 C3 46 30 01 00 C3
+
+addl $77900, %ebp
+. 80 0x12345678 7
+. 81 C5 4C 30 01 00 C3
+
+addl $77903, %ebp
+. 81 0x12345678 7
+. 81 C5 4F 30 01 00 C3
+
+addl $7, %edi
+. 82 0x12345678 4
+. 83 C7 07 C3
+
+addl $7, %r13d
+. 83 0x12345678 5
+. 41 83 C5 07 C3
+
+addl $81921, %eax
+. 84 0x12345678 6
+. 05 01 40 01 00 C3
+
+addl $8, %eax
+. 85 0x12345678 4
+. 83 C0 08 C3
+
+addl 8(%rdx), %edi
+. 86 0x12345678 4
+. 03 7A 08 C3
+
+addl 8(%rsp), %r15d
+. 87 0x12345678 6
+. 44 03 7C 24 08 C3
+
+addl %eax, 20(%rsp)
+. 88 0x12345678 5
+. 01 44 24 14 C3
+
+addl %eax, 24(%rsp)
+. 89 0x12345678 5
+. 01 44 24 18 C3
+
+addl %eax, 36(%rsp)
+. 90 0x12345678 5
+. 01 44 24 24 C3
+
+addl %eax, 40(%rsp)
+. 91 0x12345678 5
+. 01 44 24 28 C3
+
+addl %eax, 44(%rsp)
+. 92 0x12345678 5
+. 01 44 24 2C C3
+
+addl %eax, 56(%rsp)
+. 93 0x12345678 5
+. 01 44 24 38 C3
+
+addl %eax, 8(%r13)
+. 94 0x12345678 5
+. 41 01 45 08 C3
+
+addl %eax, %eax
+. 95 0x12345678 3
+. 01 C0 C3
+
+addl %eax, %edi
+. 96 0x12345678 3
+. 01 C7 C3
+
+addl %eax, %edx
+. 97 0x12345678 3
+. 01 C2 C3
+
+addl %eax, %esi
+. 98 0x12345678 3
+. 01 C6 C3
+
+addl %eax, %r15d
+. 99 0x12345678 4
+. 41 01 C7 C3
+
+addl %ebp, 8(%rsp)
+. 100 0x12345678 5
+. 01 6C 24 08 C3
+
+addl %ebp, %eax
+. 101 0x12345678 3
+. 01 E8 C3
+
+addl %ebp, %ebx
+. 102 0x12345678 3
+. 01 EB C3
+
+addl %ebp, %edi
+. 103 0x12345678 3
+. 01 EF C3
+
+addl %ebp, %esi
+. 104 0x12345678 3
+. 01 EE C3
+
+addl %ecx, 124(%rsp)
+. 105 0x12345678 5
+. 01 4C 24 7C C3
+
+addl %ecx, 292(%rsp)
+. 106 0x12345678 8
+. 01 8C 24 24 01 00 00 C3
+
+addl %ecx, 8(%rsp)
+. 107 0x12345678 5
+. 01 4C 24 08 C3
+
+addl %ecx, %ecx
+. 108 0x12345678 3
+. 01 C9 C3
+
+addl %ecx, %edi
+. 109 0x12345678 3
+. 01 CF C3
+
+addl %ecx, %edx
+. 110 0x12345678 3
+. 01 CA C3
+
+addl %ecx, %r8d
+. 111 0x12345678 4
+. 41 01 C8 C3
+
+addl %edi, 12(%rsp)
+. 112 0x12345678 5
+. 01 7C 24 0C C3
+
+addl %edi, %eax
+. 113 0x12345678 3
+. 01 F8 C3
+
+addl %edi, %edi
+. 114 0x12345678 3
+. 01 FF C3
+
+addl %edi, %esi
+. 115 0x12345678 3
+. 01 FE C3
+
+addl %edi, %r15d
+. 116 0x12345678 4
+. 41 01 FF C3
+
+addl %edi, 5555555(%rip)
+. 117 0x12345678 7
+. 01 3D 63 C5 54 00 C3
+
+addl %edx, %eax
+. 118 0x12345678 3
+. 01 D0 C3
+
+addl %edx, %ebx
+. 119 0x12345678 3
+. 01 D3 C3
+
+addl %edx, %ecx
+. 120 0x12345678 3
+. 01 D1 C3
+
+addl %edx, %edi
+. 121 0x12345678 3
+. 01 D7 C3
+
+addl %edx, %esi
+. 122 0x12345678 3
+. 01 D6 C3
+
+addl %edx, %r8d
+. 123 0x12345678 4
+. 41 01 D0 C3
+
+addl %esi, %eax
+. 124 0x12345678 3
+. 01 F0 C3
+
+addl %esi, %edi
+. 125 0x12345678 3
+. 01 F7 C3
+
+addl %esi, %r13d
+. 126 0x12345678 4
+. 41 01 F5 C3
+
+addl 1(%rip), %edi
+. 127 0x12345678 7
+. 03 3D 01 00 00 00 C3
+
+addl 2(%rip), %esi
+. 128 0x12345678 7
+. 03 35 02 00 00 00 C3
+
+addl 3(%rip), %edi
+. 129 0x12345678 7
+. 03 3D 03 00 00 00 C3
+
+addl 4(%rip), %esi
+. 130 0x12345678 7
+. 03 35 04 00 00 00 C3
+
+addl %r12d, 56(%rsp)
+. 131 0x12345678 6
+. 44 01 64 24 38 C3
+
+addl %r12d, 8(%rbx)
+. 132 0x12345678 5
+. 44 01 63 08 C3
+
+addl %r12d, %r15d
+. 133 0x12345678 4
+. 45 01 E7 C3
+
+addl %r13d, %edi
+. 134 0x12345678 4
+. 44 01 EF C3
+
+addl %r13d, %esi
+. 135 0x12345678 4
+. 44 01 EE C3
+
+addl %r14d, %eax
+. 136 0x12345678 4
+. 44 01 F0 C3
+
+addl %r14d, %ebp
+. 137 0x12345678 4
+. 44 01 F5 C3
+
+addl %r15d, 24(%rsp)
+. 138 0x12345678 6
+. 44 01 7C 24 18 C3
+
+addl %r15d, %eax
+. 139 0x12345678 4
+. 44 01 F8 C3
+
+addl %r8d, %edi
+. 140 0x12345678 4
+. 44 01 C7 C3
+
+addq $104, %rsp
+. 141 0x12345678 5
+. 48 83 C4 68 C3
+
+addq $120, %rsp
+. 142 0x12345678 5
+. 48 83 C4 78 C3
+
+addq $136, %rsp
+. 143 0x12345678 8
+. 48 81 C4 88 00 00 00 C3
+
+addq $152, %rsp
+. 144 0x12345678 8
+. 48 81 C4 98 00 00 00 C3
+
+addq $168, %rsp
+. 145 0x12345678 8
+. 48 81 C4 A8 00 00 00 C3
+
+addq 16(%rcx), %rdx
+. 146 0x12345678 5
+. 48 03 51 10 C3
+
+addq 16(%rsi), %rdx
+. 147 0x12345678 5
+. 48 03 56 10 C3
+
+addq $16, %rsp
+. 148 0x12345678 5
+. 48 83 C4 10 C3
+
+addq 16(%rsp), %rax
+. 149 0x12345678 6
+. 48 03 44 24 10 C3
+
+addq $184, %rsp
+. 150 0x12345678 8
+. 48 81 C4 B8 00 00 00 C3
+
+addq $208, %rsp
+. 151 0x12345678 8
+. 48 81 C4 D0 00 00 00 C3
+
+addq $216, %rsp
+. 152 0x12345678 8
+. 48 81 C4 D8 00 00 00 C3
+
+addq $24, %rsp
+. 153 0x12345678 5
+. 48 83 C4 18 C3
+
+addq 24(%rsp), %rax
+. 154 0x12345678 6
+. 48 03 44 24 18 C3
+
+addq $280, %rsp
+. 155 0x12345678 8
+. 48 81 C4 18 01 00 00 C3
+
+addq $312, %rsp
+. 156 0x12345678 8
+. 48 81 C4 38 01 00 00 C3
+
+addq 32(%rsp), %rax
+. 157 0x12345678 6
+. 48 03 44 24 20 C3
+
+addq $40, %rsp
+. 158 0x12345678 5
+. 48 83 C4 28 C3
+
+addq 40(%rsp), %rax
+. 159 0x12345678 6
+. 48 03 44 24 28 C3
+
+addq $472, %rsp
+. 160 0x12345678 8
+. 48 81 C4 D8 01 00 00 C3
+
+addq $4, %rbp
+. 161 0x12345678 5
+. 48 83 C5 04 C3
+
+addq $4, %rbx
+. 162 0x12345678 5
+. 48 83 C3 04 C3
+
+addq $56, %rsp
+. 163 0x12345678 5
+. 48 83 C4 38 C3
+
+addq $728, %rsp
+. 164 0x12345678 8
+. 48 81 C4 D8 02 00 00 C3
+
+addq $72, %rsp
+. 165 0x12345678 5
+. 48 83 C4 48 C3
+
+addq $88, %rsp
+. 166 0x12345678 5
+. 48 83 C4 58 C3
+
+addq $8, %rax
+. 167 0x12345678 5
+. 48 83 C0 08 C3
+
+addq $8, %rsp
+. 168 0x12345678 5
+. 48 83 C4 08 C3
+
+addq $968, %rsp
+. 169 0x12345678 8
+. 48 81 C4 C8 03 00 00 C3
+
+addq 7(%rip), %rdx
+. 170 0x12345678 8
+. 48 03 15 07 00 00 00 C3
+
+addq $2, %rdi
+. 171 0x12345678 5
+. 48 83 C7 02 C3
+
+addq $7878, %rax
+. 172 0x12345678 7
+. 48 05 C6 1E 00 00 C3
+
+addq %r15, %r12
+. 173 0x12345678 4
+. 4D 01 FC C3
+
+addq %r15, %rax
+. 174 0x12345678 4
+. 4C 01 F8 C3
+
+addq %r15, %rdi
+. 175 0x12345678 4
+. 4C 01 FF C3
+
+addq %r15, %rsi
+. 176 0x12345678 4
+. 4C 01 FE C3
+
+addq %rax, 112(%rsp)
+. 177 0x12345678 6
+. 48 01 44 24 70 C3
+
+addq %rax, 32(%rsp)
+. 178 0x12345678 6
+. 48 01 44 24 20 C3
+
+addq %rax, 48(%rsp)
+. 179 0x12345678 6
+. 48 01 44 24 30 C3
+
+addq %rax, 96(%rsp)
+. 180 0x12345678 6
+. 48 01 44 24 60 C3
+
+addq %rax, %r13
+. 181 0x12345678 4
+. 49 01 C5 C3
+
+addq %rax, %r15
+. 182 0x12345678 4
+. 49 01 C7 C3
+
+addq %rax, %rbp
+. 183 0x12345678 4
+. 48 01 C5 C3
+
+addq %rax, %rbx
+. 184 0x12345678 4
+. 48 01 C3 C3
+
+addq %rax, 989(%rip)
+. 185 0x12345678 8
+. 48 01 05 DD 03 00 00 C3
+
+addq %rbp, %rax
+. 186 0x12345678 4
+. 48 01 E8 C3
+
+addq %rbp, %rbx
+. 187 0x12345678 4
+. 48 01 EB C3
+
+addq %rbp, %rdi
+. 188 0x12345678 4
+. 48 01 EF C3
+
+addq %rbp, %rsi
+. 189 0x12345678 4
+. 48 01 EE C3
+
+addq %rbx, %rax
+. 190 0x12345678 4
+. 48 01 D8 C3
+
+addq $12345, %rax
+. 191 0x12345678 7
+. 48 05 39 30 00 00 C3
+
+addw %ax, 22(%r14,%rdx,2)
+. 192 0x12345678 7
+. 66 41 01 44 56 16 C3
+
+addw %ax, 22(%rbx,%rdx,2)
+. 193 0x12345678 6
+. 66 01 44 53 16 C3
+
+andb 8(%rdx), %dil
+. 194 0x12345678 5
+. 40 22 7A 08 C3
+
+andl $12, %eax
+. 195 0x12345678 4
+. 83 E0 0C C3
+
+andl $12, %esi
+. 196 0x12345678 4
+. 83 E6 0C C3
+
+andl $13, %esi
+. 197 0x12345678 4
+. 83 E6 0D C3
+
+andl $15, 20(%rsp)
+. 198 0x12345678 6
+. 83 64 24 14 0F C3
+
+andl $15, %eax
+. 199 0x12345678 4
+. 83 E0 0F C3
+
+andl $15, %ecx
+. 200 0x12345678 4
+. 83 E1 0F C3
+
+andl $15, %edx
+. 201 0x12345678 4
+. 83 E2 0F C3
+
+andl $15, %esi
+. 202 0x12345678 4
+. 83 E6 0F C3
+
+andl $15, %r13d
+. 203 0x12345678 5
+. 41 83 E5 0F C3
+
+andl $16383, %ecx
+. 204 0x12345678 7
+. 81 E1 FF 3F 00 00 C3
+
+andl $16711680, %esi
+. 205 0x12345678 7
+. 81 E6 00 00 FF 00 C3
+
+andl $16711680, %r8d
+. 206 0x12345678 8
+. 41 81 E0 00 00 FF 00 C3
+
+andl $16777215, %eax
+. 207 0x12345678 6
+. 25 FF FF FF 00 C3
+
+andl $16777215, %ebp
+. 208 0x12345678 7
+. 81 E5 FF FF FF 00 C3
+
+andl $16777215, %ebx
+. 209 0x12345678 7
+. 81 E3 FF FF FF 00 C3
+
+andl $16777215, %ecx
+. 210 0x12345678 7
+. 81 E1 FF FF FF 00 C3
+
+andl $16777215, %edx
+. 211 0x12345678 7
+. 81 E2 FF FF FF 00 C3
+
+andl $16777215, %r13d
+. 212 0x12345678 8
+. 41 81 E5 FF FF FF 00 C3
+
+andl $16777215, %r9d
+. 213 0x12345678 8
+. 41 81 E1 FF FF FF 00 C3
+
+andl $-16777216, %eax
+. 214 0x12345678 6
+. 25 00 00 00 FF C3
+
+andl $16777216, %edi
+. 215 0x12345678 7
+. 81 E7 00 00 00 01 C3
+
+andl $16777216, %esi
+. 216 0x12345678 7
+. 81 E6 00 00 00 01 C3
+
+andl 176(%rsp), %r12d
+. 217 0x12345678 9
+. 44 23 A4 24 B0 00 00 00 C3
+
+andl $192, %edi
+. 218 0x12345678 7
+. 81 E7 C0 00 00 00 C3
+
+andl $1, %eax
+. 219 0x12345678 4
+. 83 E0 01 C3
+
+andl $1, %ebx
+. 220 0x12345678 4
+. 83 E3 01 C3
+
+andl $1, %ecx
+. 221 0x12345678 4
+. 83 E1 01 C3
+
+andl $1, %edi
+. 222 0x12345678 4
+. 83 E7 01 C3
+
+andl $1, %edx
+. 223 0x12345678 4
+. 83 E2 01 C3
+
+andl $1, %esi
+. 224 0x12345678 4
+. 83 E6 01 C3
+
+andl $1, %r8d
+. 225 0x12345678 5
+. 41 83 E0 01 C3
+
+andl $249, %eax
+. 226 0x12345678 6
+. 25 F9 00 00 00 C3
+
+andl $24, %eax
+. 227 0x12345678 4
+. 83 E0 18 C3
+
+andl $251, %eax
+. 228 0x12345678 6
+. 25 FB 00 00 00 C3
+
+andl $254, %edi
+. 229 0x12345678 7
+. 81 E7 FE 00 00 00 C3
+
+andl $255, %eax
+. 230 0x12345678 6
+. 25 FF 00 00 00 C3
+
+andl $27, %edi
+. 231 0x12345678 4
+. 83 E7 1B C3
+
+andl $2, %r12d
+. 232 0x12345678 5
+. 41 83 E4 02 C3
+
+andl $30, %ecx
+. 233 0x12345678 4
+. 83 E1 1E C3
+
+andl $31, %eax
+. 234 0x12345678 4
+. 83 E0 1F C3
+
+andl $31, %ebp
+. 235 0x12345678 4
+. 83 E5 1F C3
+
+andl $31, %ebx
+. 236 0x12345678 4
+. 83 E3 1F C3
+
+andl $31, %ecx
+. 237 0x12345678 4
+. 83 E1 1F C3
+
+andl $31, %edx
+. 238 0x12345678 4
+. 83 E2 1F C3
+
+andl $31, %r12d
+. 239 0x12345678 5
+. 41 83 E4 1F C3
+
+andl $3, %eax
+. 240 0x12345678 4
+. 83 E0 03 C3
+
+andl $3, %ebp
+. 241 0x12345678 4
+. 83 E5 03 C3
+
+andl $3, %ebx
+. 242 0x12345678 4
+. 83 E3 03 C3
+
+andl $-3, %ecx
+. 243 0x12345678 4
+. 83 E1 FD C3
+
+andl $3, %esi
+. 244 0x12345678 4
+. 83 E6 03 C3
+
+andl $4095, %ebp
+. 245 0x12345678 7
+. 81 E5 FF 0F 00 00 C3
+
+andl $-4, %eax
+. 246 0x12345678 4
+. 83 E0 FC C3
+
+andl $-4, %ebx
+. 247 0x12345678 4
+. 83 E3 FC C3
+
+andl $4, %ebx
+. 248 0x12345678 4
+. 83 E3 04 C3
+
+andl $-4, %edi
+. 249 0x12345678 4
+. 83 E7 FC C3
+
+andl $-4, %r12d
+. 250 0x12345678 5
+. 41 83 E4 FC C3
+
+andl $4, %r12d
+. 251 0x12345678 5
+. 41 83 E4 04 C3
+
+andl $-4, %r13d
+. 252 0x12345678 5
+. 41 83 E5 FC C3
+
+andl $4, %r13d
+. 253 0x12345678 5
+. 41 83 E5 04 C3
+
+andl $511, %r13d
+. 254 0x12345678 8
+. 41 81 E5 FF 01 00 00 C3
+
+andl $512, %r14d
+. 255 0x12345678 8
+. 41 81 E6 00 02 00 00 C3
+
+andl $-57, %eax
+. 256 0x12345678 4
+. 83 E0 C7 C3
+
+andl $-57, %edx
+. 257 0x12345678 4
+. 83 E2 C7 C3
+
+andl $-57, %r12d
+. 258 0x12345678 5
+. 41 83 E4 C7 C3
+
+andl $5, %eax
+. 259 0x12345678 4
+. 83 E0 05 C3
+
+andl $63, %eax
+. 260 0x12345678 4
+. 83 E0 3F C3
+
+andl $63, %ebx
+. 261 0x12345678 4
+. 83 E3 3F C3
+
+andl $63, %r12d
+. 262 0x12345678 5
+. 41 83 E4 3F C3
+
+andl $63, %r15d
+. 263 0x12345678 5
+. 41 83 E7 3F C3
+
+andl $-64513, %esi
+. 264 0x12345678 7
+. 81 E6 FF 03 FF FF C3
+
+andl $65280, %edi
+. 265 0x12345678 7
+. 81 E7 00 FF 00 00 C3
+
+andl $65280, %edx
+. 266 0x12345678 7
+. 81 E2 00 FF 00 00 C3
+
+andl $65280, %r10d
+. 267 0x12345678 8
+. 41 81 E2 00 FF 00 00 C3
+
+andl $65535, %eax
+. 268 0x12345678 6
+. 25 FF FF 00 00 C3
+
+andl $65535, %r14d
+. 269 0x12345678 8
+. 41 81 E6 FF FF 00 00 C3
+
+andl $6, %esi
+. 270 0x12345678 4
+. 83 E6 06 C3
+
+andl $7, %eax
+. 271 0x12345678 4
+. 83 E0 07 C3
+
+andl $7, %ebp
+. 272 0x12345678 4
+. 83 E5 07 C3
+
+andl $7, %ecx
+. 273 0x12345678 4
+. 83 E1 07 C3
+
+andl $7, %edi
+. 274 0x12345678 4
+. 83 E7 07 C3
+
+andl $7, %edx
+. 275 0x12345678 4
+. 83 E2 07 C3
+
+andl $7, %esi
+. 276 0x12345678 4
+. 83 E6 07 C3
+
+andl $7, %r12d
+. 277 0x12345678 5
+. 41 83 E4 07 C3
+
+andl $7, %r13d
+. 278 0x12345678 5
+. 41 83 E5 07 C3
+
+andl $7, %r15d
+. 279 0x12345678 5
+. 41 83 E7 07 C3
+
+andl $7, %r9d
+. 280 0x12345678 5
+. 41 83 E1 07 C3
+
+andl $-8, %eax
+. 281 0x12345678 4
+. 83 E0 F8 C3
+
+andl $-8, %edi
+. 282 0x12345678 4
+. 83 E7 F8 C3
+
+andl $8, %edi
+. 283 0x12345678 4
+. 83 E7 08 C3
+
+andl 8(%rdx), %edi
+. 284 0x12345678 4
+. 23 7A 08 C3
+
+andl $9, %eax
+. 285 0x12345678 4
+. 83 E0 09 C3
+
+andl %eax, %ebp
+. 286 0x12345678 3
+. 21 C5 C3
+
+andl %eax, %ecx
+. 287 0x12345678 3
+. 21 C1 C3
+
+andl %ebx, %eax
+. 288 0x12345678 3
+. 21 D8 C3
+
+andl %ebx, %r14d
+. 289 0x12345678 4
+. 41 21 DE C3
+
+andl %edx, %eax
+. 290 0x12345678 3
+. 21 D0 C3
+
+andl %edx, %ecx
+. 291 0x12345678 3
+. 21 D1 C3
+
+andl %esi, %ecx
+. 292 0x12345678 3
+. 21 F1 C3
+
+andl %r12d, %eax
+. 293 0x12345678 4
+. 44 21 E0 C3
+
+andq %r12, %rsi
+. 294 0x12345678 4
+. 4C 21 E6 C3
+
+andq %rax, 112(%rsp)
+. 295 0x12345678 6
+. 48 21 44 24 70 C3
+
+andq %rax, 88(%rsp)
+. 296 0x12345678 6
+. 48 21 44 24 58 C3
+
+andq %rax, %rsi
+. 297 0x12345678 4
+. 48 21 C6 C3
+
+call *128(%rsp)
+. 298 0x12345678 7
+. FF 94 24 80 00 00 00
+
+call *136(%rsp)
+. 299 0x12345678 7
+. FF 94 24 88 00 00 00
+
+call *144(%rsp)
+. 300 0x12345678 7
+. FF 94 24 90 00 00 00
+
+call *16(%rsp)
+. 301 0x12345678 4
+. FF 54 24 10
+
+call *24(%rsp)
+. 302 0x12345678 4
+. FF 54 24 18
+
+call *264(%rsp)
+. 303 0x12345678 7
+. FF 94 24 08 01 00 00
+
+call *272(%rsp)
+. 304 0x12345678 7
+. FF 94 24 10 01 00 00
+
+call *296(%rsp)
+. 305 0x12345678 7
+. FF 94 24 28 01 00 00
+
+call *784(%rsp)
+. 306 0x12345678 7
+. FF 94 24 10 03 00 00
+
+call *792(%rsp)
+. 307 0x12345678 7
+. FF 94 24 18 03 00 00
+
+call *808(%rsp)
+. 308 0x12345678 7
+. FF 94 24 28 03 00 00
+
+call *%r10
+. 309 0x12345678 3
+. 41 FF D2
+
+call *%r13
+. 310 0x12345678 3
+. 41 FF D5
+
+call *%r14
+. 311 0x12345678 3
+. 41 FF D6
+
+call *%r15
+. 312 0x12345678 3
+. 41 FF D7
+
+call *313131(%rip)
+. 313 0x12345678 6
+. FF 15 2B C7 04 00
+
+call *9909(%rip)
+. 314 0x12345678 6
+. FF 15 B5 26 00 00
+
+cltd
+. 315 0x12345678 2
+. 99 C3
+
+cltq
+. 316 0x12345678 3
+. 48 98 C3
+
+cmova %ebx, %edx
+. 317 0x12345678 4
+. 0F 47 D3 C3
+
+cmova %ecx, %esi
+. 318 0x12345678 4
+. 0F 47 F1 C3
+
+cmova %ecx, %r10d
+. 319 0x12345678 5
+. 44 0F 47 D1 C3
+
+cmova %edx, %eax
+. 320 0x12345678 4
+. 0F 47 C2 C3
+
+cmova %edx, %edi
+. 321 0x12345678 4
+. 0F 47 FA C3
+
+cmova %edx, %r11d
+. 322 0x12345678 5
+. 44 0F 47 DA C3
+
+cmova %edx, %r8d
+. 323 0x12345678 5
+. 44 0F 47 C2 C3
+
+cmova %edx, %r9d
+. 324 0x12345678 5
+. 44 0F 47 CA C3
+
+cmovae %r9d, %eax
+. 325 0x12345678 5
+. 41 0F 43 C1 C3
+
+cmovae %r9d, %ebx
+. 326 0x12345678 5
+. 41 0F 43 D9 C3
+
+cmovae %r9d, %ecx
+. 327 0x12345678 5
+. 41 0F 43 C9 C3
+
+cmovae %r9d, %edi
+. 328 0x12345678 5
+. 41 0F 43 F9 C3
+
+cmovae %r9d, %edx
+. 329 0x12345678 5
+. 41 0F 43 D1 C3
+
+cmovae %r9d, %esi
+. 330 0x12345678 5
+. 41 0F 43 F1 C3
+
+cmovae %r9d, %r8d
+. 331 0x12345678 5
+. 45 0F 43 C1 C3
+
+cmovb %eax, %r12d
+. 332 0x12345678 5
+. 44 0F 42 E0 C3
+
+cmovb %ebx, %edx
+. 333 0x12345678 4
+. 0F 42 D3 C3
+
+cmovb %ecx, %esi
+. 334 0x12345678 4
+. 0F 42 F1 C3
+
+cmovb %ecx, %r10d
+. 335 0x12345678 5
+. 44 0F 42 D1 C3
+
+cmovb %edx, %eax
+. 336 0x12345678 4
+. 0F 42 C2 C3
+
+cmovb %edx, %ebp
+. 337 0x12345678 4
+. 0F 42 EA C3
+
+cmovb %edx, %edi
+. 338 0x12345678 4
+. 0F 42 FA C3
+
+cmovb %edx, %r11d
+. 339 0x12345678 5
+. 44 0F 42 DA C3
+
+cmovb %edx, %r8d
+. 340 0x12345678 5
+. 44 0F 42 C2 C3
+
+cmovb %edx, %r9d
+. 341 0x12345678 5
+. 44 0F 42 CA C3
+
+cmovbe %eax, %edx
+. 342 0x12345678 4
+. 0F 46 D0 C3
+
+cmovbe %eax, %esi
+. 343 0x12345678 4
+. 0F 46 F0 C3
+
+cmove 104(%rsp), %eax
+. 344 0x12345678 6
+. 0F 44 44 24 68 C3
+
+cmove 380(%rsp), %ecx
+. 345 0x12345678 9
+. 0F 44 8C 24 7C 01 00 00 C3
+
+cmove 380(%rsp), %edi
+. 346 0x12345678 9
+. 0F 44 BC 24 7C 01 00 00 C3
+
+cmove 380(%rsp), %edx
+. 347 0x12345678 9
+. 0F 44 94 24 7C 01 00 00 C3
+
+cmove 380(%rsp), %esi
+. 348 0x12345678 9
+. 0F 44 B4 24 7C 01 00 00 C3
+
+cmove 396(%rsp), %ecx
+. 349 0x12345678 9
+. 0F 44 8C 24 8C 01 00 00 C3
+
+cmove 396(%rsp), %edi
+. 350 0x12345678 9
+. 0F 44 BC 24 8C 01 00 00 C3
+
+cmove 396(%rsp), %edx
+. 351 0x12345678 9
+. 0F 44 94 24 8C 01 00 00 C3
+
+cmove 396(%rsp), %esi
+. 352 0x12345678 9
+. 0F 44 B4 24 8C 01 00 00 C3
+
+cmove 412(%rsp), %ecx
+. 353 0x12345678 9
+. 0F 44 8C 24 9C 01 00 00 C3
+
+cmove 412(%rsp), %edi
+. 354 0x12345678 9
+. 0F 44 BC 24 9C 01 00 00 C3
+
+cmove 412(%rsp), %edx
+. 355 0x12345678 9
+. 0F 44 94 24 9C 01 00 00 C3
+
+cmove 412(%rsp), %esi
+. 356 0x12345678 9
+. 0F 44 B4 24 9C 01 00 00 C3
+
+cmove 496(%rsp), %edi
+. 357 0x12345678 9
+. 0F 44 BC 24 F0 01 00 00 C3
+
+cmove 496(%rsp), %esi
+. 358 0x12345678 9
+. 0F 44 B4 24 F0 01 00 00 C3
+
+cmove 512(%rsp), %ecx
+. 359 0x12345678 9
+. 0F 44 8C 24 00 02 00 00 C3
+
+cmove 512(%rsp), %edx
+. 360 0x12345678 9
+. 0F 44 94 24 00 02 00 00 C3
+
+cmove 528(%rsp), %ecx
+. 361 0x12345678 9
+. 0F 44 8C 24 10 02 00 00 C3
+
+cmove 528(%rsp), %edi
+. 362 0x12345678 9
+. 0F 44 BC 24 10 02 00 00 C3
+
+cmove 528(%rsp), %edx
+. 363 0x12345678 9
+. 0F 44 94 24 10 02 00 00 C3
+
+cmove 528(%rsp), %esi
+. 364 0x12345678 9
+. 0F 44 B4 24 10 02 00 00 C3
+
+cmove %eax, %ebp
+. 365 0x12345678 4
+. 0F 44 E8 C3
+
+cmove %eax, %edx
+. 366 0x12345678 4
+. 0F 44 D0 C3
+
+cmove %eax, %r13d
+. 367 0x12345678 5
+. 44 0F 44 E8 C3
+
+cmove %ecx, %edx
+. 368 0x12345678 4
+. 0F 44 D1 C3
+
+cmove %rax, %rdi
+. 369 0x12345678 5
+. 48 0F 44 F8 C3
+
+cmove %rax, %rdx
+. 370 0x12345678 5
+. 48 0F 44 D0 C3
+
+cmove %rax, %rsi
+. 371 0x12345678 5
+. 48 0F 44 F0 C3
+
+cmove %rbp, %rax
+. 372 0x12345678 5
+. 48 0F 44 C5 C3
+
+cmove %rdi, %rbx
+. 373 0x12345678 5
+. 48 0F 44 DF C3
+
+cmove %rdx, %rsi
+. 374 0x12345678 5
+. 48 0F 44 F2 C3
+
+cmove %rsi, %rax
+. 375 0x12345678 5
+. 48 0F 44 C6 C3
+
+cmovg %edi, %esi
+. 376 0x12345678 4
+. 0F 4F F7 C3
+
+cmovg %edx, %eax
+. 377 0x12345678 4
+. 0F 4F C2 C3
+
+cmovg %edx, %ecx
+. 378 0x12345678 4
+. 0F 4F CA C3
+
+cmovge %ecx, %eax
+. 379 0x12345678 4
+. 0F 4D C1 C3
+
+cmovge %ecx, %edi
+. 380 0x12345678 4
+. 0F 4D F9 C3
+
+cmovge %ecx, %edx
+. 381 0x12345678 4
+. 0F 4D D1 C3
+
+cmovge %ecx, %esi
+. 382 0x12345678 4
+. 0F 4D F1 C3
+
+cmovge %edx, %eax
+. 383 0x12345678 4
+. 0F 4D C2 C3
+
+cmovge %r10d, %eax
+. 384 0x12345678 5
+. 41 0F 4D C2 C3
+
+cmovge %r10d, %ebx
+. 385 0x12345678 5
+. 41 0F 4D DA C3
+
+cmovge %r10d, %ecx
+. 386 0x12345678 5
+. 41 0F 4D CA C3
+
+cmovge %r10d, %edi
+. 387 0x12345678 5
+. 41 0F 4D FA C3
+
+cmovge %r10d, %edx
+. 388 0x12345678 5
+. 41 0F 4D D2 C3
+
+cmovge %r10d, %r11d
+. 389 0x12345678 5
+. 45 0F 4D DA C3
+
+cmovge %r10d, %r13d
+. 390 0x12345678 5
+. 45 0F 4D EA C3
+
+cmovge %r10d, %r8d
+. 391 0x12345678 5
+. 45 0F 4D C2 C3
+
+cmovge %r10d, %r9d
+. 392 0x12345678 5
+. 45 0F 4D CA C3
+
+cmovge %r8d, %ecx
+. 393 0x12345678 5
+. 41 0F 4D C8 C3
+
+cmovge %r8d, %r10d
+. 394 0x12345678 5
+. 45 0F 4D D0 C3
+
+cmovge %r9d, %eax
+. 395 0x12345678 5
+. 41 0F 4D C1 C3
+
+cmovge %r9d, %ebx
+. 396 0x12345678 5
+. 41 0F 4D D9 C3
+
+cmovge %r9d, %ecx
+. 397 0x12345678 5
+. 41 0F 4D C9 C3
+
+cmovge %r9d, %edi
+. 398 0x12345678 5
+. 41 0F 4D F9 C3
+
+cmovge %r9d, %edx
+. 399 0x12345678 5
+. 41 0F 4D D1 C3
+
+cmovge %r9d, %esi
+. 400 0x12345678 5
+. 41 0F 4D F1 C3
+
+cmovge %r9d, %r8d
+. 401 0x12345678 5
+. 45 0F 4D C1 C3
+
+cmovg %esi, %ecx
+. 402 0x12345678 4
+. 0F 4F CE C3
+
+cmovg %r8d, %eax
+. 403 0x12345678 5
+. 41 0F 4F C0 C3
+
+cmovg %r8d, %edx
+. 404 0x12345678 5
+. 41 0F 4F D0 C3
+
+cmovl %eax, %r13d
+. 405 0x12345678 5
+. 44 0F 4C E8 C3
+
+cmovl %ebx, %r12d
+. 406 0x12345678 5
+. 44 0F 4C E3 C3
+
+cmovl %edi, %esi
+. 407 0x12345678 4
+. 0F 4C F7 C3
+
+cmovl %edx, %ebp
+. 408 0x12345678 4
+. 0F 4C EA C3
+
+cmovl %edx, %ecx
+. 409 0x12345678 4
+. 0F 4C CA C3
+
+cmovl %edx, %r12d
+. 410 0x12345678 5
+. 44 0F 4C E2 C3
+
+cmovle %ecx, %eax
+. 411 0x12345678 4
+. 0F 4E C1 C3
+
+cmovle %ecx, %ebx
+. 412 0x12345678 4
+. 0F 4E D9 C3
+
+cmovle %ecx, %edi
+. 413 0x12345678 4
+. 0F 4E F9 C3
+
+cmovle %ecx, %edx
+. 414 0x12345678 4
+. 0F 4E D1 C3
+
+cmovle %ecx, %r11d
+. 415 0x12345678 5
+. 44 0F 4E D9 C3
+
+cmovle %ecx, %r8d
+. 416 0x12345678 5
+. 44 0F 4E C1 C3
+
+cmovle %ecx, %r9d
+. 417 0x12345678 5
+. 44 0F 4E C9 C3
+
+cmovle %r10d, %eax
+. 418 0x12345678 5
+. 41 0F 4E C2 C3
+
+cmovle %r10d, %ebx
+. 419 0x12345678 5
+. 41 0F 4E DA C3
+
+cmovle %r10d, %ecx
+. 420 0x12345678 5
+. 41 0F 4E CA C3
+
+cmovle %r10d, %edi
+. 421 0x12345678 5
+. 41 0F 4E FA C3
+
+cmovle %r10d, %edx
+. 422 0x12345678 5
+. 41 0F 4E D2 C3
+
+cmovle %r10d, %esi
+. 423 0x12345678 5
+. 41 0F 4E F2 C3
+
+cmovle %r10d, %r11d
+. 424 0x12345678 5
+. 45 0F 4E DA C3
+
+cmovle %r10d, %r12d
+. 425 0x12345678 5
+. 45 0F 4E E2 C3
+
+cmovle %r10d, %r8d
+. 426 0x12345678 5
+. 45 0F 4E C2 C3
+
+cmovle %r8d, %eax
+. 427 0x12345678 5
+. 41 0F 4E C0 C3
+
+cmovle %r8d, %edi
+. 428 0x12345678 5
+. 41 0F 4E F8 C3
+
+cmovle %r8d, %edx
+. 429 0x12345678 5
+. 41 0F 4E D0 C3
+
+cmovle %r8d, %esi
+. 430 0x12345678 5
+. 41 0F 4E F0 C3
+
+cmovl %r8d, %eax
+. 431 0x12345678 5
+. 41 0F 4C C0 C3
+
+cmovl %r8d, %edx
+. 432 0x12345678 5
+. 41 0F 4C D0 C3
+
+cmovne 104(%rsp), %eax
+. 433 0x12345678 6
+. 0F 45 44 24 68 C3
+
+cmovne 144(%rsp), %edx
+. 434 0x12345678 9
+. 0F 45 94 24 90 00 00 00 C3
+
+cmovne 152(%rsp), %ecx
+. 435 0x12345678 9
+. 0F 45 8C 24 98 00 00 00 C3
+
+cmovne 16(%rsp), %ebx
+. 436 0x12345678 6
+. 0F 45 5C 24 10 C3
+
+cmovne 16(%rsp), %r12d
+. 437 0x12345678 7
+. 44 0F 45 64 24 10 C3
+
+cmovne 24(%rsp), %ebx
+. 438 0x12345678 6
+. 0F 45 5C 24 18 C3
+
+cmovne 32(%rsp), %ebx
+. 439 0x12345678 6
+. 0F 45 5C 24 20 C3
+
+cmovne 40(%rsp), %ebx
+. 440 0x12345678 6
+. 0F 45 5C 24 28 C3
+
+cmovne 48(%rsp), %ebx
+. 441 0x12345678 6
+. 0F 45 5C 24 30 C3
+
+cmovne %eax, %ebp
+. 442 0x12345678 4
+. 0F 45 E8 C3
+
+cmovne %eax, %ebx
+. 443 0x12345678 4
+. 0F 45 D8 C3
+
+cmovne %eax, %ecx
+. 444 0x12345678 4
+. 0F 45 C8 C3
+
+cmovne %eax, %edi
+. 445 0x12345678 4
+. 0F 45 F8 C3
+
+cmovne %eax, %esi
+. 446 0x12345678 4
+. 0F 45 F0 C3
+
+cmovne %eax, %r14d
+. 447 0x12345678 5
+. 44 0F 45 F0 C3
+
+cmovne %edx, %eax
+. 448 0x12345678 4
+. 0F 45 C2 C3
+
+cmovne %r10d, %eax
+. 449 0x12345678 5
+. 41 0F 45 C2 C3
+
+cmovne %r10d, %ebx
+. 450 0x12345678 5
+. 41 0F 45 DA C3
+
+cmovne %r10d, %ecx
+. 451 0x12345678 5
+. 41 0F 45 CA C3
+
+cmovne %r10d, %edx
+. 452 0x12345678 5
+. 41 0F 45 D2 C3
+
+cmovne %r10d, %r11d
+. 453 0x12345678 5
+. 45 0F 45 DA C3
+
+cmovne %r10d, %r12d
+. 454 0x12345678 5
+. 45 0F 45 E2 C3
+
+cmovne %r10d, %r13d
+. 455 0x12345678 5
+. 45 0F 45 EA C3
+
+cmovne %r10d, %r9d
+. 456 0x12345678 5
+. 45 0F 45 CA C3
+
+cmovne %r14d, %esi
+. 457 0x12345678 5
+. 41 0F 45 F6 C3
+
+cmovne %r8d, %eax
+. 458 0x12345678 5
+. 41 0F 45 C0 C3
+
+cmovne %r8d, %ecx
+. 459 0x12345678 5
+. 41 0F 45 C8 C3
+
+cmovne %r8d, %edx
+. 460 0x12345678 5
+. 41 0F 45 D0 C3
+
+cmovne %r8d, %r10d
+. 461 0x12345678 5
+. 45 0F 45 D0 C3
+
+cmovne %rax, %r14
+. 462 0x12345678 5
+. 4C 0F 45 F0 C3
+
+cmovne %rax, %r9
+. 463 0x12345678 5
+. 4C 0F 45 C8 C3
+
+cmovne %rax, %rbx
+. 464 0x12345678 5
+. 48 0F 45 D8 C3
+
+cmovne %rax, %rcx
+. 465 0x12345678 5
+. 48 0F 45 C8 C3
+
+cmovne %rax, %rdi
+. 466 0x12345678 5
+. 48 0F 45 F8 C3
+
+cmovne %rax, %rdx
+. 467 0x12345678 5
+. 48 0F 45 D0 C3
+
+cmovne %rax, %rsi
+. 468 0x12345678 5
+. 48 0F 45 F0 C3
+
+cmovne %rdx, %r9
+. 469 0x12345678 5
+. 4C 0F 45 CA C3
+
+cmovne %rdx, %rax
+. 470 0x12345678 5
+. 48 0F 45 C2 C3
+
+cmovns %eax, %edx
+. 471 0x12345678 4
+. 0F 49 D0 C3
+
+cmovns %esi, %ecx
+. 472 0x12345678 4
+. 0F 49 CE C3
+
+cmovns %r8d, %ebx
+. 473 0x12345678 5
+. 41 0F 49 D8 C3
+
+cmovns %rax, %rdx
+. 474 0x12345678 5
+. 48 0F 49 D0 C3
+
+cmovs %ecx, %eax
+. 475 0x12345678 4
+. 0F 48 C1 C3
+
+cmovs %ecx, %ebx
+. 476 0x12345678 4
+. 0F 48 D9 C3
+
+cmovs %ecx, %edi
+. 477 0x12345678 4
+. 0F 48 F9 C3
+
+cmovs %ecx, %edx
+. 478 0x12345678 4
+. 0F 48 D1 C3
+
+cmovs %ecx, %r11d
+. 479 0x12345678 5
+. 44 0F 48 D9 C3
+
+cmovs %ecx, %r8d
+. 480 0x12345678 5
+. 44 0F 48 C1 C3
+
+cmovs %ecx, %r9d
+. 481 0x12345678 5
+. 44 0F 48 C9 C3
+
+cmovs %r10d, %eax
+. 482 0x12345678 5
+. 41 0F 48 C2 C3
+
+cmovs %r10d, %ebx
+. 483 0x12345678 5
+. 41 0F 48 DA C3
+
+cmovs %r10d, %ecx
+. 484 0x12345678 5
+. 41 0F 48 CA C3
+
+cmovs %r10d, %edi
+. 485 0x12345678 5
+. 41 0F 48 FA C3
+
+cmovs %r10d, %edx
+. 486 0x12345678 5
+. 41 0F 48 D2 C3
+
+cmovs %r10d, %esi
+. 487 0x12345678 5
+. 41 0F 48 F2 C3
+
+cmovs %r10d, %r8d
+. 488 0x12345678 5
+. 45 0F 48 C2 C3
+
+cmpb $0, 103(%rsp)
+. 489 0x12345678 6
+. 80 7C 24 67 00 C3
+
+cmpb $0, 107(%rsp)
+. 490 0x12345678 6
+. 80 7C 24 6B 00 C3
+
+cmpb $0, 115(%rsp)
+. 491 0x12345678 6
+. 80 7C 24 73 00 C3
+
+cmpb $0, 11(%rsp)
+. 492 0x12345678 6
+. 80 7C 24 0B 00 C3
+
+cmpb $0, 123(%rsp)
+. 493 0x12345678 6
+. 80 7C 24 7B 00 C3
+
+cmpb $0, 135(%rsp)
+. 494 0x12345678 9
+. 80 BC 24 87 00 00 00 00 C3
+
+cmpb $0, 13(%rsp)
+. 495 0x12345678 6
+. 80 7C 24 0D 00 C3
+
+cmpb $0, 14(%rsp)
+. 496 0x12345678 6
+. 80 7C 24 0E 00 C3
+
+cmpb $0, 151(%rsp)
+. 497 0x12345678 9
+. 80 BC 24 97 00 00 00 00 C3
+
+cmpb $0, 15(%rsp)
+. 498 0x12345678 6
+. 80 7C 24 0F 00 C3
+
+cmpb $0, 16(%rdx,%rax)
+. 499 0x12345678 6
+. 80 7C 02 10 00 C3
+
+cmpb $0, 16(%rsp)
+. 500 0x12345678 6
+. 80 7C 24 10 00 C3
+
+cmpb $0, 171(%rsp)
+. 501 0x12345678 9
+. 80 BC 24 AB 00 00 00 00 C3
+
+cmpb $0, 17(%rbx)
+. 502 0x12345678 5
+. 80 7B 11 00 C3
+
+cmpb $0, 183(%rsp)
+. 503 0x12345678 9
+. 80 BC 24 B7 00 00 00 00 C3
+
+cmpb $0, 18(%rbx)
+. 504 0x12345678 5
+. 80 7B 12 00 C3
+
+cmpb $0, 22(%rsp)
+. 505 0x12345678 6
+. 80 7C 24 16 00 C3
+
+cmpb $0, 239(%rsp)
+. 506 0x12345678 9
+. 80 BC 24 EF 00 00 00 00 C3
+
+cmpb $0, 23(%rsp)
+. 507 0x12345678 6
+. 80 7C 24 17 00 C3
+
+cmpb $0, 270(%rsp)
+. 508 0x12345678 9
+. 80 BC 24 0E 01 00 00 00 C3
+
+cmpb $0, 27(%rsp)
+. 509 0x12345678 6
+. 80 7C 24 1B 00 C3
+
+cmpb $0, 283(%rsp)
+. 510 0x12345678 9
+. 80 BC 24 1B 01 00 00 00 C3
+
+cmpb $0, 31(%rsp)
+. 511 0x12345678 6
+. 80 7C 24 1F 00 C3
+
+cmpb $0, 331(%rsp)
+. 512 0x12345678 9
+. 80 BC 24 4B 01 00 00 00 C3
+
+cmpb $0, 375(%rsp)
+. 513 0x12345678 9
+. 80 BC 24 77 01 00 00 00 C3
+
+cmpb $0, 37(%rsp)
+. 514 0x12345678 6
+. 80 7C 24 25 00 C3
+
+cmpb $0, 38(%rsp)
+. 515 0x12345678 6
+. 80 7C 24 26 00 C3
+
+cmpb $0, 39(%rsp)
+. 516 0x12345678 6
+. 80 7C 24 27 00 C3
+
+cmpb $0, 43(%rsp)
+. 517 0x12345678 6
+. 80 7C 24 2B 00 C3
+
+cmpb $0, 44(%r12)
+. 518 0x12345678 7
+. 41 80 7C 24 2C 00 C3
+
+cmpb $0, 44(%r13)
+. 519 0x12345678 6
+. 41 80 7D 2C 00 C3
+
+cmpb $0, 44(%rax)
+. 520 0x12345678 5
+. 80 78 2C 00 C3
+
+cmpb $0, 46(%rsp)
+. 521 0x12345678 6
+. 80 7C 24 2E 00 C3
+
+cmpb $0, 47(%rsp)
+. 522 0x12345678 6
+. 80 7C 24 2F 00 C3
+
+cmpb $0, 48(%rsp)
+. 523 0x12345678 6
+. 80 7C 24 30 00 C3
+
+cmpb $0, 4(%rax)
+. 524 0x12345678 5
+. 80 78 04 00 C3
+
+cmpb $0, 4(%rax,%rcx)
+. 525 0x12345678 6
+. 80 7C 08 04 00 C3
+
+cmpb $0, 55(%rsp)
+. 526 0x12345678 6
+. 80 7C 24 37 00 C3
+
+cmpb $0, 5(%rax)
+. 527 0x12345678 5
+. 80 78 05 00 C3
+
+cmpb $0, 60(%rsp)
+. 528 0x12345678 6
+. 80 7C 24 3C 00 C3
+
+cmpb $0, 6(%rsp)
+. 529 0x12345678 6
+. 80 7C 24 06 00 C3
+
+cmpb $0, 8(%rax)
+. 530 0x12345678 5
+. 80 78 08 00 C3
+
+cmpb $0, 8(%rdi)
+. 531 0x12345678 5
+. 80 7F 08 00 C3
+
+cmpb $0, 439043(%rip)
+. 532 0x12345678 8
+. 80 3D 03 B3 06 00 00 C3
+
+cmpb $0, 9(%rip)
+. 533 0x12345678 8
+. 80 3D 09 00 00 00 00 C3
+
+cmpb $0, (%rax,%r12)
+. 534 0x12345678 6
+. 42 80 3C 20 00 C3
+
+cmpb $0, (%rax,%r13)
+. 535 0x12345678 6
+. 42 80 3C 28 00 C3
+
+cmpb $0, (%rax,%rdi)
+. 536 0x12345678 5
+. 80 3C 38 00 C3
+
+cmpb $0, (%rbx,%rax)
+. 537 0x12345678 5
+. 80 3C 03 00 C3
+
+cmpb $0, (%rcx,%r10)
+. 538 0x12345678 6
+. 42 80 3C 11 00 C3
+
+cmpb $0, (%rcx,%r9)
+. 539 0x12345678 6
+. 42 80 3C 09 00 C3
+
+cmpb $0, (%rcx,%rdi)
+. 540 0x12345678 5
+. 80 3C 39 00 C3
+
+cmpb $0, (%rdi)
+. 541 0x12345678 4
+. 80 3F 00 C3
+
+cmpb $0, (%rdi,%rax)
+. 542 0x12345678 5
+. 80 3C 07 00 C3
+
+cmpb $0, (%rdx,%rax)
+. 543 0x12345678 5
+. 80 3C 02 00 C3
+
+cmpb $0, (%rsi)
+. 544 0x12345678 4
+. 80 3E 00 C3
+
+cmpb $100, 1(%r12)
+. 545 0x12345678 7
+. 41 80 7C 24 01 64 C3
+
+cmpb $101, 1(%r12)
+. 546 0x12345678 7
+. 41 80 7C 24 01 65 C3
+
+cmpb $-10, 1(%r12)
+. 547 0x12345678 7
+. 41 80 7C 24 01 F6 C3
+
+cmpb $102, 1(%r12)
+. 548 0x12345678 7
+. 41 80 7C 24 01 66 C3
+
+cmpb $102, %al
+. 549 0x12345678 3
+. 3C 66 C3
+
+cmpb $103, 1(%r12)
+. 550 0x12345678 7
+. 41 80 7C 24 01 67 C3
+
+cmpb $104, 1(%r12)
+. 551 0x12345678 7
+. 41 80 7C 24 01 68 C3
+
+cmpb $105, 1(%r12)
+. 552 0x12345678 7
+. 41 80 7C 24 01 69 C3
+
+cmpb $106, 1(%r12)
+. 553 0x12345678 7
+. 41 80 7C 24 01 6A C3
+
+cmpb $107, 1(%r12)
+. 554 0x12345678 7
+. 41 80 7C 24 01 6B C3
+
+cmpb $108, 1(%r12)
+. 555 0x12345678 7
+. 41 80 7C 24 01 6C C3
+
+cmpb $108, (%rcx,%r12)
+. 556 0x12345678 6
+. 42 80 3C 21 6C C3
+
+cmpb $109, 1(%r12)
+. 557 0x12345678 7
+. 41 80 7C 24 01 6D C3
+
+cmpb $10, %dil
+. 558 0x12345678 5
+. 40 80 FF 0A C3
+
+cmpb $110, 1(%r12)
+. 559 0x12345678 7
+. 41 80 7C 24 01 6E C3
+
+cmpb $1, 107(%rsp)
+. 560 0x12345678 6
+. 80 7C 24 6B 01 C3
+
+cmpb $1, 111(%rsp)
+. 561 0x12345678 6
+. 80 7C 24 6F 01 C3
+
+cmpb $111, 2(%r12)
+. 562 0x12345678 7
+. 41 80 7C 24 02 6F C3
+
+cmpb $1, 112(%rsp)
+. 563 0x12345678 6
+. 80 7C 24 70 01 C3
+
+cmpb $111, %cl
+. 564 0x12345678 4
+. 80 F9 6F C3
+
+cmpb $1, 11(%rsp)
+. 565 0x12345678 6
+. 80 7C 24 0B 01 C3
+
+cmpb $112, 1(%r12)
+. 566 0x12345678 7
+. 41 80 7C 24 01 70 C3
+
+cmpb $112, 2(%r12)
+. 567 0x12345678 7
+. 41 80 7C 24 02 70 C3
+
+cmpb $113, 1(%r12)
+. 568 0x12345678 7
+. 41 80 7C 24 01 71 C3
+
+cmpb $1, 138(%rsp)
+. 569 0x12345678 9
+. 80 BC 24 8A 00 00 00 01 C3
+
+cmpb $113, %bl
+. 570 0x12345678 4
+. 80 FB 71 C3
+
+cmpb $-113, %cl
+. 571 0x12345678 4
+. 80 F9 8F C3
+
+cmpb $114, 1(%r12)
+. 572 0x12345678 7
+. 41 80 7C 24 01 72 C3
+
+cmpb $114, %bl
+. 573 0x12345678 4
+. 80 FB 72 C3
+
+cmpb $115, 1(%r12)
+. 574 0x12345678 7
+. 41 80 7C 24 01 73 C3
+
+cmpb $115, %bl
+. 575 0x12345678 4
+. 80 FB 73 C3
+
+cmpb $1, 15(%rsp)
+. 576 0x12345678 6
+. 80 7C 24 0F 01 C3
+
+cmpb $116, 1(%r12)
+. 577 0x12345678 7
+. 41 80 7C 24 01 74 C3
+
+cmpb $1, 16(%rsp)
+. 578 0x12345678 6
+. 80 7C 24 10 01 C3
+
+cmpb $117, 1(%r12)
+. 579 0x12345678 7
+. 41 80 7C 24 01 75 C3
+
+cmpb $118, 1(%r12)
+. 580 0x12345678 7
+. 41 80 7C 24 01 76 C3
+
+cmpb $11, %cl
+. 581 0x12345678 4
+. 80 F9 0B C3
+
+cmpb $-12, 1(%r12)
+. 582 0x12345678 7
+. 41 80 7C 24 01 F4 C3
+
+cmpb $1, 22(%rsp)
+. 583 0x12345678 6
+. 80 7C 24 16 01 C3
+
+cmpb $-125, 1(%rax)
+. 584 0x12345678 5
+. 80 78 01 83 C3
+
+cmpb $126, 1(%r12)
+. 585 0x12345678 7
+. 41 80 7C 24 01 7E C3
+
+cmpb $126, 2(%r12)
+. 586 0x12345678 7
+. 41 80 7C 24 02 7E C3
+
+cmpb $127, 1(%r12)
+. 587 0x12345678 7
+. 41 80 7C 24 01 7F C3
+
+cmpb $127, 2(%r12)
+. 588 0x12345678 7
+. 41 80 7C 24 02 7F C3
+
+cmpb $1, 27(%rsp)
+. 589 0x12345678 6
+. 80 7C 24 1B 01 C3
+
+cmpb $-128, %al
+. 590 0x12345678 3
+. 3C 80 C3
+
+cmpb $13, 14(%rax)
+. 591 0x12345678 5
+. 80 78 0E 0D C3
+
+cmpb $-13, 1(%r12)
+. 592 0x12345678 7
+. 41 80 7C 24 01 F3 C3
+
+cmpb $1, 38(%rsp)
+. 593 0x12345678 6
+. 80 7C 24 26 01 C3
+
+cmpb $-13, %bl
+. 594 0x12345678 4
+. 80 FB F3 C3
+
+cmpb $-14, 1(%r12)
+. 595 0x12345678 7
+. 41 80 7C 24 01 F2 C3
+
+cmpb $1, 42(%rsp)
+. 596 0x12345678 6
+. 80 7C 24 2A 01 C3
+
+cmpb $1, 47(%rsp)
+. 597 0x12345678 6
+. 80 7C 24 2F 01 C3
+
+cmpb $1, 48(%rsp)
+. 598 0x12345678 6
+. 80 7C 24 30 01 C3
+
+cmpb $-14, %bl
+. 599 0x12345678 4
+. 80 FB F2 C3
+
+cmpb $15, 106(%rsp)
+. 600 0x12345678 6
+. 80 7C 24 6A 0F C3
+
+cmpb $15, 139(%rsp)
+. 601 0x12345678 9
+. 80 BC 24 8B 00 00 00 0F C3
+
+cmpb $15, 19(%rsp)
+. 602 0x12345678 6
+. 80 7C 24 13 0F C3
+
+cmpb $-15, 1(%r12)
+. 603 0x12345678 7
+. 41 80 7C 24 01 F1 C3
+
+cmpb $15, 1(%r12)
+. 604 0x12345678 7
+. 41 80 7C 24 01 0F C3
+
+cmpb $15, 25(%rsp)
+. 605 0x12345678 6
+. 80 7C 24 19 0F C3
+
+cmpb $15, 26(%rsp)
+. 606 0x12345678 6
+. 80 7C 24 1A 0F C3
+
+cmpb $15, 27(%rsp)
+. 607 0x12345678 6
+. 80 7C 24 1B 0F C3
+
+cmpb $15, 61(%rsp)
+. 608 0x12345678 6
+. 80 7C 24 3D 0F C3
+
+cmpb $15, 62(%rsp)
+. 609 0x12345678 6
+. 80 7C 24 3E 0F C3
+
+cmpb $15, 63(%rsp)
+. 610 0x12345678 6
+. 80 7C 24 3F 0F C3
+
+cmpb $15, 79(%rsp)
+. 611 0x12345678 6
+. 80 7C 24 4F 0F C3
+
+cmpb $15, %al
+. 612 0x12345678 3
+. 3C 0F C3
+
+cmpb $15, %bl
+. 613 0x12345678 4
+. 80 FB 0F C3
+
+cmpb $15, (%r12)
+. 614 0x12345678 6
+. 41 80 3C 24 0F C3
+
+cmpb $15, %r12b
+. 615 0x12345678 5
+. 41 80 FC 0F C3
+
+cmpb $15, %r15b
+. 616 0x12345678 5
+. 41 80 FF 0F C3
+
+cmpb $16, 2(%r12)
+. 617 0x12345678 7
+. 41 80 7C 24 02 10 C3
+
+cmpb $1, 67(%rsp)
+. 618 0x12345678 6
+. 80 7C 24 43 01 C3
+
+cmpb $-16, %al
+. 619 0x12345678 3
+. 3C F0 C3
+
+cmpb $16, %al
+. 620 0x12345678 3
+. 3C 10 C3
+
+cmpb $16, %cl
+. 621 0x12345678 4
+. 80 F9 10 C3
+
+cmpb $-16, (%rax)
+. 622 0x12345678 4
+. 80 38 F0 C3
+
+cmpb $-17, 1(%r12)
+. 623 0x12345678 7
+. 41 80 7C 24 01 EF C3
+
+cmpb $17, 2(%r12)
+. 624 0x12345678 7
+. 41 80 7C 24 02 11 C3
+
+cmpb $-18, 1(%r12)
+. 625 0x12345678 7
+. 41 80 7C 24 01 EE C3
+
+cmpb $18, 1(%r12)
+. 626 0x12345678 7
+. 41 80 7C 24 01 12 C3
+
+cmpb $1, 8(%rdi)
+. 627 0x12345678 5
+. 80 7F 08 01 C3
+
+cmpb $19, 17(%rax)
+. 628 0x12345678 5
+. 80 78 11 13 C3
+
+cmpb $-19, 1(%r12)
+. 629 0x12345678 7
+. 41 80 7C 24 01 ED C3
+
+cmpb $19, 1(%r12)
+. 630 0x12345678 7
+. 41 80 7C 24 01 13 C3
+
+cmpb $1, %al
+. 631 0x12345678 3
+. 3C 01 C3
+
+cmpb $1, %bl
+. 632 0x12345678 4
+. 80 FB 01 C3
+
+cmpb $1, %cl
+. 633 0x12345678 4
+. 80 F9 01 C3
+
+cmpb $1, %dil
+. 634 0x12345678 5
+. 40 80 FF 01 C3
+
+cmpb $1, %dl
+. 635 0x12345678 4
+. 80 FA 01 C3
+
+cmpb $1, %r12b
+. 636 0x12345678 5
+. 41 80 FC 01 C3
+
+cmpb $1, %r14b
+. 637 0x12345678 5
+. 41 80 FE 01 C3
+
+cmpb $1, %r8b
+. 638 0x12345678 5
+. 41 80 F8 01 C3
+
+cmpb $1, %sil
+. 639 0x12345678 5
+. 40 80 FE 01 C3
+
+cmpb $-20, 1(%r12)
+. 640 0x12345678 7
+. 41 80 7C 24 01 EC C3
+
+cmpb $-21, 1(%r12)
+. 641 0x12345678 7
+. 41 80 7C 24 01 EB C3
+
+cmpb $21, 1(%r12)
+. 642 0x12345678 7
+. 41 80 7C 24 01 15 C3
+
+cmpb $21, %dl
+. 643 0x12345678 4
+. 80 FA 15 C3
+
+cmpb $-2, 1(%r12)
+. 644 0x12345678 7
+. 41 80 7C 24 01 FE C3
+
+cmpb $-22, 1(%r12)
+. 645 0x12345678 7
+. 41 80 7C 24 01 EA C3
+
+cmpb $22, 1(%r12)
+. 646 0x12345678 7
+. 41 80 7C 24 01 16 C3
+
+cmpb $-23, 1(%r12)
+. 647 0x12345678 7
+. 41 80 7C 24 01 E9 C3
+
+cmpb $23, 1(%r12)
+. 648 0x12345678 7
+. 41 80 7C 24 01 17 C3
+
+cmpb $-24, 1(%r12)
+. 649 0x12345678 7
+. 41 80 7C 24 01 E8 C3
+
+cmpb $24, 1(%r12)
+. 650 0x12345678 7
+. 41 80 7C 24 01 18 C3
+
+cmpb $-25, 1(%r12)
+. 651 0x12345678 7
+. 41 80 7C 24 01 E7 C3
+
+cmpb $25, %al
+. 652 0x12345678 3
+. 3C 19 C3
+
+cmpb $-26, 1(%r12)
+. 653 0x12345678 7
+. 41 80 7C 24 01 E6 C3
+
+cmpb $-26, 2(%r12)
+. 654 0x12345678 7
+. 41 80 7C 24 02 E6 C3
+
+cmpb $-27, 1(%r12)
+. 655 0x12345678 7
+. 41 80 7C 24 01 E5 C3
+
+cmpb $27, 8(%rax)
+. 656 0x12345678 5
+. 80 78 08 1B C3
+
+cmpb $-28, 1(%r12)
+. 657 0x12345678 7
+. 41 80 7C 24 01 E4 C3
+
+cmpb $-29, 1(%r12)
+. 658 0x12345678 7
+. 41 80 7C 24 01 E3 C3
+
+cmpb $29, 2(%rax)
+. 659 0x12345678 5
+. 80 78 02 1D C3
+
+cmpb $-29, %bl
+. 660 0x12345678 4
+. 80 FB E3 C3
+
+cmpb $2, %cl
+. 661 0x12345678 4
+. 80 F9 02 C3
+
+cmpb $-30, 1(%r12)
+. 662 0x12345678 7
+. 41 80 7C 24 01 E2 C3
+
+cmpb $-31, 1(%r12)
+. 663 0x12345678 7
+. 41 80 7C 24 01 E1 C3
+
+cmpb $-3, 1(%r12)
+. 664 0x12345678 7
+. 41 80 7C 24 01 FD C3
+
+cmpb $-32, 1(%r12)
+. 665 0x12345678 7
+. 41 80 7C 24 01 E0 C3
+
+cmpb $-33, 1(%r12)
+. 666 0x12345678 7
+. 41 80 7C 24 01 DF C3
+
+cmpb $3, 38(%rsp)
+. 667 0x12345678 6
+. 80 7C 24 26 03 C3
+
+cmpb $-33, %r12b
+. 668 0x12345678 5
+. 41 80 FC DF C3
+
+cmpb $-34, 1(%r12)
+. 669 0x12345678 7
+. 41 80 7C 24 01 DE C3
+
+cmpb $-34, %r12b
+. 670 0x12345678 5
+. 41 80 FC DE C3
+
+cmpb $-35, 1(%r12)
+. 671 0x12345678 7
+. 41 80 7C 24 01 DD C3
+
+cmpb $-35, %r12b
+. 672 0x12345678 5
+. 41 80 FC DD C3
+
+cmpb $3, 5(%rax)
+. 673 0x12345678 5
+. 80 78 05 03 C3
+
+cmpb $-36, 1(%r12)
+. 674 0x12345678 7
+. 41 80 7C 24 01 DC C3
+
+cmpb $36, 3(%rax)
+. 675 0x12345678 5
+. 80 78 03 24 C3
+
+cmpb $-36, %r12b
+. 676 0x12345678 5
+. 41 80 FC DC C3
+
+cmpb $-37, 1(%r12)
+. 677 0x12345678 7
+. 41 80 7C 24 01 DB C3
+
+cmpb $37, %al
+. 678 0x12345678 3
+. 3C 25 C3
+
+cmpb $-37, %r12b
+. 679 0x12345678 5
+. 41 80 FC DB C3
+
+cmpb $-38, 1(%r12)
+. 680 0x12345678 7
+. 41 80 7C 24 01 DA C3
+
+cmpb $-38, %r12b
+. 681 0x12345678 5
+. 41 80 FC DA C3
+
+cmpb $-39, 1(%r12)
+. 682 0x12345678 7
+. 41 80 7C 24 01 D9 C3
+
+cmpb $-39, %r12b
+. 683 0x12345678 5
+. 41 80 FC D9 C3
+
+cmpb $3, %bpl
+. 684 0x12345678 5
+. 40 80 FD 03 C3
+
+cmpb $3, %r14b
+. 685 0x12345678 5
+. 41 80 FE 03 C3
+
+cmpb $3, %sil
+. 686 0x12345678 5
+. 40 80 FE 03 C3
+
+cmpb $-40, 1(%r12)
+. 687 0x12345678 7
+. 41 80 7C 24 01 D8 C3
+
+cmpb $40, %al
+. 688 0x12345678 3
+. 3C 28 C3
+
+cmpb $40, %cl
+. 689 0x12345678 4
+. 80 F9 28 C3
+
+cmpb $-40, %r12b
+. 690 0x12345678 5
+. 41 80 FC D8 C3
+
+cmpb $-41, 1(%r12)
+. 691 0x12345678 7
+. 41 80 7C 24 01 D7 C3
+
+cmpb $41, 1(%r12)
+. 692 0x12345678 7
+. 41 80 7C 24 01 29 C3
+
+cmpb $-4, 1(%r12)
+. 693 0x12345678 7
+. 41 80 7C 24 01 FC C3
+
+cmpb $-42, 1(%r12)
+. 694 0x12345678 7
+. 41 80 7C 24 01 D6 C3
+
+cmpb $42, 1(%r12)
+. 695 0x12345678 7
+. 41 80 7C 24 01 2A C3
+
+cmpb $-42, 2(%r12)
+. 696 0x12345678 7
+. 41 80 7C 24 02 D6 C3
+
+cmpb $42, 2(%r12)
+. 697 0x12345678 7
+. 41 80 7C 24 02 2A C3
+
+cmpb $-43, 1(%r12)
+. 698 0x12345678 7
+. 41 80 7C 24 01 D5 C3
+
+cmpb $43, 1(%r12)
+. 699 0x12345678 7
+. 41 80 7C 24 01 2B C3
+
+cmpb $-44, 1(%r12)
+. 700 0x12345678 7
+. 41 80 7C 24 01 D4 C3
+
+cmpb $44, 1(%r12)
+. 701 0x12345678 7
+. 41 80 7C 24 01 2C C3
+
+cmpb $44, 2(%r12)
+. 702 0x12345678 7
+. 41 80 7C 24 02 2C C3
+
+cmpb $44, (%rcx,%r12)
+. 703 0x12345678 6
+. 42 80 3C 21 2C C3
+
+cmpb $-45, 1(%r12)
+. 704 0x12345678 7
+. 41 80 7C 24 01 D3 C3
+
+cmpb $45, (%rcx,%r12)
+. 705 0x12345678 6
+. 42 80 3C 21 2D C3
+
+cmpb $-46, 1(%r12)
+. 706 0x12345678 7
+. 41 80 7C 24 01 D2 C3
+
+cmpb $46, %al
+. 707 0x12345678 3
+. 3C 2E C3
+
+cmpb $-47, 1(%r12)
+. 708 0x12345678 7
+. 41 80 7C 24 01 D1 C3
+
+cmpb $48, (%rcx,%r12)
+. 709 0x12345678 6
+. 42 80 3C 21 30 C3
+
+cmpb $4, %bpl
+. 710 0x12345678 5
+. 40 80 FD 04 C3
+
+cmpb $4, %cl
+. 711 0x12345678 4
+. 80 F9 04 C3
+
+cmpb $4, %dl
+. 712 0x12345678 4
+. 80 FA 04 C3
+
+cmpb $4, %r13b
+. 713 0x12345678 5
+. 41 80 FD 04 C3
+
+cmpb $5, 11(%rax)
+. 714 0x12345678 5
+. 80 78 0B 05 C3
+
+cmpb $-5, 1(%r12)
+. 715 0x12345678 7
+. 41 80 7C 24 01 FB C3
+
+cmpb $-56, 10(%rax)
+. 716 0x12345678 5
+. 80 78 0A C8 C3
+
+cmpb $-56, 7(%rax)
+. 717 0x12345678 5
+. 80 78 07 C8 C3
+
+cmpb $-58, 1(%r12)
+. 718 0x12345678 7
+. 41 80 7C 24 01 C6 C3
+
+cmpb $-59, 1(%r12)
+. 719 0x12345678 7
+. 41 80 7C 24 01 C5 C3
+
+cmpb $5, %al
+. 720 0x12345678 3
+. 3C 05 C3
+
+cmpb $5, %r13b
+. 721 0x12345678 5
+. 41 80 FD 05 C3
+
+cmpb $5, %r15b
+. 722 0x12345678 5
+. 41 80 FF 05 C3
+
+cmpb $-60, 1(%r12)
+. 723 0x12345678 7
+. 41 80 7C 24 01 C4 C3
+
+cmpb $-61, 1(%r12)
+. 724 0x12345678 7
+. 41 80 7C 24 01 C3 C3
+
+cmpb $-6, 1(%r12)
+. 725 0x12345678 7
+. 41 80 7C 24 01 FA C3
+
+cmpb $-62, 1(%r12)
+. 726 0x12345678 7
+. 41 80 7C 24 01 C2 C3
+
+cmpb $-62, 2(%r12)
+. 727 0x12345678 7
+. 41 80 7C 24 02 C2 C3
+
+cmpb $-63, 12(%rax)
+. 728 0x12345678 5
+. 80 78 0C C1 C3
+
+cmpb $-63, 15(%rax)
+. 729 0x12345678 5
+. 80 78 0F C1 C3
+
+cmpb $-63, 3(%rax)
+. 730 0x12345678 5
+. 80 78 03 C1 C3
+
+cmpb $-63, 6(%rax)
+. 731 0x12345678 5
+. 80 78 06 C1 C3
+
+cmpb $-63, 9(%rax)
+. 732 0x12345678 5
+. 80 78 09 C1 C3
+
+cmpb $-63, (%rax)
+. 733 0x12345678 4
+. 80 38 C1 C3
+
+cmpb $-64, 13(%rax)
+. 734 0x12345678 5
+. 80 78 0D C0 C3
+
+cmpb $-64, 16(%rax)
+. 735 0x12345678 5
+. 80 78 10 C0 C3
+
+cmpb $-64, 1(%rax)
+. 736 0x12345678 5
+. 80 78 01 C0 C3
+
+cmpb $-64, 4(%rax)
+. 737 0x12345678 5
+. 80 78 04 C0 C3
+
+cmpb $-65, %al
+. 738 0x12345678 3
+. 3C BF C3
+
+cmpb $68, 2(%rax)
+. 739 0x12345678 5
+. 80 78 02 44 C3
+
+cmpb $6, %cl
+. 740 0x12345678 4
+. 80 F9 06 C3
+
+cmpb $-7, 1(%r12)
+. 741 0x12345678 7
+. 41 80 7C 24 01 F9 C3
+
+cmpb $80, 1(%r12)
+. 742 0x12345678 7
+. 41 80 7C 24 01 50 C3
+
+cmpb $81, 1(%r12)
+. 743 0x12345678 7
+. 41 80 7C 24 01 51 C3
+
+cmpb $81, 2(%r12)
+. 744 0x12345678 7
+. 41 80 7C 24 02 51 C3
+
+cmpb $-8, 1(%r12)
+. 745 0x12345678 7
+. 41 80 7C 24 01 F8 C3
+
+cmpb $-82, 1(%r12)
+. 746 0x12345678 7
+. 41 80 7C 24 01 AE C3
+
+cmpb $82, 1(%r12)
+. 747 0x12345678 7
+. 41 80 7C 24 01 52 C3
+
+cmpb $82, 2(%r12)
+. 748 0x12345678 7
+. 41 80 7C 24 02 52 C3
+
+cmpb $-82, %r13b
+. 749 0x12345678 5
+. 41 80 FD AE C3
+
+cmpb $83, 1(%r12)
+. 750 0x12345678 7
+. 41 80 7C 24 01 53 C3
+
+cmpb $83, 2(%r12)
+. 751 0x12345678 7
+. 41 80 7C 24 02 53 C3
+
+cmpb $83, (%rcx,%r12)
+. 752 0x12345678 6
+. 42 80 3C 21 53 C3
+
+cmpb $84, 1(%r12)
+. 753 0x12345678 7
+. 41 80 7C 24 01 54 C3
+
+cmpb $85, 1(%r12)
+. 754 0x12345678 7
+. 41 80 7C 24 01 55 C3
+
+cmpb $86, 1(%r12)
+. 755 0x12345678 7
+. 41 80 7C 24 01 56 C3
+
+cmpb $-86, %r13b
+. 756 0x12345678 5
+. 41 80 FD AA C3
+
+cmpb $87, 1(%r12)
+. 757 0x12345678 7
+. 41 80 7C 24 01 57 C3
+
+cmpb $87, %al
+. 758 0x12345678 3
+. 3C 57 C3
+
+cmpb $88, 1(%r12)
+. 759 0x12345678 7
+. 41 80 7C 24 01 58 C3
+
+cmpb $88, 2(%r12)
+. 760 0x12345678 7
+. 41 80 7C 24 02 58 C3
+
+cmpb $89, 1(%r12)
+. 761 0x12345678 7
+. 41 80 7C 24 01 59 C3
+
+cmpb $89, 2(%r12)
+. 762 0x12345678 7
+. 41 80 7C 24 02 59 C3
+
+cmpb $90, 1(%r12)
+. 763 0x12345678 7
+. 41 80 7C 24 01 5A C3
+
+cmpb $90, 2(%r12)
+. 764 0x12345678 7
+. 41 80 7C 24 02 5A C3
+
+cmpb $-90, %r13b
+. 765 0x12345678 5
+. 41 80 FD A6 C3
+
+cmpb $91, 1(%r12)
+. 766 0x12345678 7
+. 41 80 7C 24 01 5B C3
+
+cmpb $91, 2(%r12)
+. 767 0x12345678 7
+. 41 80 7C 24 02 5B C3
+
+cmpb $92, 1(%r12)
+. 768 0x12345678 7
+. 41 80 7C 24 01 5C C3
+
+cmpb $92, 2(%r12)
+. 769 0x12345678 7
+. 41 80 7C 24 02 5C C3
+
+cmpb $-92, %r13b
+. 770 0x12345678 5
+. 41 80 FD A4 C3
+
+cmpb $93, 1(%r12)
+. 771 0x12345678 7
+. 41 80 7C 24 01 5D C3
+
+cmpb $93, 2(%r12)
+. 772 0x12345678 7
+. 41 80 7C 24 02 5D C3
+
+cmpb $94, 1(%r12)
+. 773 0x12345678 7
+. 41 80 7C 24 01 5E C3
+
+cmpb $94, 2(%r12)
+. 774 0x12345678 7
+. 41 80 7C 24 02 5E C3
+
+cmpb $95, 1(%r12)
+. 775 0x12345678 7
+. 41 80 7C 24 01 5F C3
+
+cmpb $95, 2(%r12)
+. 776 0x12345678 7
+. 41 80 7C 24 02 5F C3
+
+cmpb $95, %al
+. 777 0x12345678 3
+. 3C 5F C3
+
+cmpb $96, 1(%r12)
+. 778 0x12345678 7
+. 41 80 7C 24 01 60 C3
+
+cmpb $97, 1(%r12)
+. 779 0x12345678 7
+. 41 80 7C 24 01 61 C3
+
+cmpb $98, 1(%r12)
+. 780 0x12345678 7
+. 41 80 7C 24 01 62 C3
+
+cmpb $99, 1(%r12)
+. 781 0x12345678 7
+. 41 80 7C 24 01 63 C3
+
+cmpb $9, %al
+. 782 0x12345678 3
+. 3C 09 C3
+
+cmpb $9, %cl
+. 783 0x12345678 4
+. 80 F9 09 C3
+
+cmpb %al, 8(%rdi)
+. 784 0x12345678 4
+. 38 47 08 C3
+
+cmpb %al, 8(%rdx)
+. 785 0x12345678 4
+. 38 42 08 C3
+
+cmpb %al, %bl
+. 786 0x12345678 3
+. 38 C3 C3
+
+cmpb %al, %dl
+. 787 0x12345678 3
+. 38 C2 C3
+
+cmpb %bl, %dl
+. 788 0x12345678 3
+. 38 DA C3
+
+cmpb %cl, %r10b
+. 789 0x12345678 4
+. 41 38 CA C3
+
+cmpb %cl, %sil
+. 790 0x12345678 4
+. 40 38 CE C3
+
+cmpb %dil, %sil
+. 791 0x12345678 4
+. 40 38 FE C3
+
+cmpb %dl, %al
+. 792 0x12345678 3
+. 38 D0 C3
+
+cmpb %dl, %bl
+. 793 0x12345678 3
+. 38 D3 C3
+
+cmpb %dl, %cl
+. 794 0x12345678 3
+. 38 D1 C3
+
+cmpb %dl, %dil
+. 795 0x12345678 4
+. 40 38 D7 C3
+
+cmpb %dl, %r11b
+. 796 0x12345678 4
+. 41 38 D3 C3
+
+cmpb %dl, %r8b
+. 797 0x12345678 4
+. 41 38 D0 C3
+
+cmpb %dl, %r9b
+. 798 0x12345678 4
+. 41 38 D1 C3
+
+cmpb %r11b, %r8b
+. 799 0x12345678 4
+. 45 38 D8 C3
+
+cmpb %r15b, 139(%rsp)
+. 800 0x12345678 9
+. 44 38 BC 24 8B 00 00 00 C3
+
+cmpb %r15b, 79(%rsp)
+. 801 0x12345678 6
+. 44 38 7C 24 4F C3
+
+cmpl $0, 8(%rax)
+. 802 0x12345678 5
+. 83 78 08 00 C3
+
+cmpl $1000000, %eax
+. 803 0x12345678 6
+. 3D 40 42 0F 00 C3
+
+cmpl $10000, 12(%rax)
+. 804 0x12345678 8
+. 81 78 0C 10 27 00 00 C3
+
+cmpl $10000, %ebx
+. 805 0x12345678 7
+. 81 FB 10 27 00 00 C3
+
+cmpl $10000, (%rdi)
+. 806 0x12345678 7
+. 81 3F 10 27 00 00 C3
+
+cmpl $1000, %eax
+. 807 0x12345678 6
+. 3D E8 03 00 00 C3
+
+cmpl $100, %eax
+. 808 0x12345678 4
+. 83 F8 64 C3
+
+cmpl $100, %edx
+. 809 0x12345678 4
+. 83 FA 64 C3
+
+cmpl $101, %eax
+. 810 0x12345678 4
+. 83 F8 65 C3
+
+cmpl $101, %edx
+. 811 0x12345678 4
+. 83 FA 65 C3
+
+cmpl $102, %eax
+. 812 0x12345678 4
+. 83 F8 66 C3
+
+cmpl $103, %eax
+. 813 0x12345678 4
+. 83 F8 67 C3
+
+cmpl $1073741824, %esi
+. 814 0x12345678 7
+. 81 FE 00 00 00 40 C3
+
+cmpl 108(%rsp), %eax
+. 815 0x12345678 5
+. 3B 44 24 6C C3
+
+cmpl 108(%rsp), %ebx
+. 816 0x12345678 5
+. 3B 5C 24 6C C3
+
+cmpl 108(%rsp), %r12d
+. 817 0x12345678 6
+. 44 3B 64 24 6C C3
+
+cmpl $10, %eax
+. 818 0x12345678 4
+. 83 F8 0A C3
+
+cmpl $10, %r13d
+. 819 0x12345678 5
+. 41 83 FD 0A C3
+
+cmpl $10, %r9d
+. 820 0x12345678 5
+. 41 83 F9 0A C3
+
+cmpl $1, 108(%rsp)
+. 821 0x12345678 6
+. 83 7C 24 6C 01 C3
+
+cmpl $11, %eax
+. 822 0x12345678 4
+. 83 F8 0B C3
+
+cmpl 120(%rsp), %eax
+. 823 0x12345678 5
+. 3B 44 24 78 C3
+
+cmpl 120(%rsp), %ebp
+. 824 0x12345678 5
+. 3B 6C 24 78 C3
+
+cmpl 120(%rsp), %ebx
+. 825 0x12345678 5
+. 3B 5C 24 78 C3
+
+cmpl 120(%rsp), %ecx
+. 826 0x12345678 5
+. 3B 4C 24 78 C3
+
+cmpl 120(%rsp), %r12d
+. 827 0x12345678 6
+. 44 3B 64 24 78 C3
+
+cmpl 120(%rsp), %r9d
+. 828 0x12345678 6
+. 44 3B 4C 24 78 C3
+
+cmpl $124, %eax
+. 829 0x12345678 4
+. 83 F8 7C C3
+
+cmpl $124, %r12d
+. 830 0x12345678 5
+. 41 83 FC 7C C3
+
+cmpl $-1, 24(%rax)
+. 831 0x12345678 5
+. 83 78 18 FF C3
+
+cmpl $1, 24(%rsp)
+. 832 0x12345678 6
+. 83 7C 24 18 01 C3
+
+cmpl 124(%rsp), %eax
+. 833 0x12345678 5
+. 3B 44 24 7C C3
+
+cmpl $128, %eax
+. 834 0x12345678 6
+. 3D 80 00 00 00 C3
+
+cmpl $128, %ebx
+. 835 0x12345678 7
+. 81 FB 80 00 00 00 C3
+
+cmpl $128, %ecx
+. 836 0x12345678 7
+. 81 F9 80 00 00 00 C3
+
+cmpl $128, %edi
+. 837 0x12345678 7
+. 81 FF 80 00 00 00 C3
+
+cmpl $128, %edx
+. 838 0x12345678 7
+. 81 FA 80 00 00 00 C3
+
+cmpl $128, %esi
+. 839 0x12345678 7
+. 81 FE 80 00 00 00 C3
+
+cmpl $128, %r8d
+. 840 0x12345678 8
+. 41 81 F8 80 00 00 00 C3
+
+cmpl $1, 28(%rsp)
+. 841 0x12345678 6
+. 83 7C 24 1C 01 C3
+
+cmpl $-129, %eax
+. 842 0x12345678 6
+. 3D 7F FF FF FF C3
+
+cmpl $-129, %ebx
+. 843 0x12345678 7
+. 81 FB 7F FF FF FF C3
+
+cmpl $-129, %ecx
+. 844 0x12345678 7
+. 81 F9 7F FF FF FF C3
+
+cmpl $-129, %edi
+. 845 0x12345678 7
+. 81 FF 7F FF FF FF C3
+
+cmpl $-129, %edx
+. 846 0x12345678 7
+. 81 FA 7F FF FF FF C3
+
+cmpl $-129, %esi
+. 847 0x12345678 7
+. 81 FE 7F FF FF FF C3
+
+cmpl $-129, %r8d
+. 848 0x12345678 8
+. 41 81 F8 7F FF FF FF C3
+
+cmpl $12, %eax
+. 849 0x12345678 4
+. 83 F8 0C C3
+
+cmpl 12(%r13), %r15d
+. 850 0x12345678 5
+. 45 3B 7D 0C C3
+
+cmpl 12(%r14), %eax
+. 851 0x12345678 5
+. 41 3B 46 0C C3
+
+cmpl 12(%r14), %r12d
+. 852 0x12345678 5
+. 45 3B 66 0C C3
+
+cmpl 12(%r14), %r13d
+. 853 0x12345678 5
+. 45 3B 6E 0C C3
+
+cmpl 12(%rax), %edx
+. 854 0x12345678 4
+. 3B 50 0C C3
+
+cmpl 12(%rbx), %esi
+. 855 0x12345678 4
+. 3B 73 0C C3
+
+cmpl 12(%rbx), %r13d
+. 856 0x12345678 5
+. 44 3B 6B 0C C3
+
+cmpl 12(%rcx), %ebx
+. 857 0x12345678 4
+. 3B 59 0C C3
+
+cmpl 12(%rdi), %eax
+. 858 0x12345678 4
+. 3B 47 0C C3
+
+cmpl 12(%rdi), %esi
+. 859 0x12345678 4
+. 3B 77 0C C3
+
+cmpl 12(%rdx), %eax
+. 860 0x12345678 4
+. 3B 42 0C C3
+
+cmpl 12(%rdx), %ebx
+. 861 0x12345678 4
+. 3B 5A 0C C3
+
+cmpl 12(%rsi), %edi
+. 862 0x12345678 4
+. 3B 7E 0C C3
+
+cmpl 12(%rsp), %eax
+. 863 0x12345678 5
+. 3B 44 24 0C C3
+
+cmpl 12(%rsp), %r15d
+. 864 0x12345678 6
+. 44 3B 7C 24 0C C3
+
+cmpl $1, 36(%rsp)
+. 865 0x12345678 6
+. 83 7C 24 24 01 C3
+
+cmpl $138, %eax
+. 866 0x12345678 6
+. 3D 8A 00 00 00 C3
+
+cmpl $138, %r13d
+. 867 0x12345678 8
+. 41 81 FD 8A 00 00 00 C3
+
+cmpl $13, %edi
+. 868 0x12345678 4
+. 83 FF 0D C3
+
+cmpl $13, %esi
+. 869 0x12345678 4
+. 83 FE 0D C3
+
+cmpl $13, (%rdi)
+. 870 0x12345678 4
+. 83 3F 0D C3
+
+cmpl $144, %ebx
+. 871 0x12345678 7
+. 81 FB 90 00 00 00 C3
+
+cmpl $14, %eax
+. 872 0x12345678 4
+. 83 F8 0E C3
+
+cmpl $14, %ebp
+. 873 0x12345678 4
+. 83 FD 0E C3
+
+cmpl $14, %edi
+. 874 0x12345678 4
+. 83 FF 0E C3
+
+cmpl $1, 528(%rsp)
+. 875 0x12345678 9
+. 83 BC 24 10 02 00 00 01 C3
+
+cmpl $158, %eax
+. 876 0x12345678 6
+. 3D 9E 00 00 00 C3
+
+cmpl $159, %ebx
+. 877 0x12345678 7
+. 81 FB 9F 00 00 00 C3
+
+cmpl $15, %eax
+. 878 0x12345678 4
+. 83 F8 0F C3
+
+cmpl $15, %ebp
+. 879 0x12345678 4
+. 83 FD 0F C3
+
+cmpl $15, %edi
+. 880 0x12345678 4
+. 83 FF 0F C3
+
+cmpl $15, %r14d
+. 881 0x12345678 5
+. 41 83 FE 0F C3
+
+cmpl $1, 688(%rsp,%rcx,4)
+. 882 0x12345678 9
+. 83 BC 8C B0 02 00 00 01 C3
+
+cmpl $1, 688(%rsp,%rsi,4)
+. 883 0x12345678 9
+. 83 BC B4 B0 02 00 00 01 C3
+
+cmpl $16, %eax
+. 884 0x12345678 4
+. 83 F8 10 C3
+
+cmpl $16, %edi
+. 885 0x12345678 4
+. 83 FF 10 C3
+
+cmpl $16, %edx
+. 886 0x12345678 4
+. 83 FA 10 C3
+
+cmpl $16, %r15d
+. 887 0x12345678 5
+. 41 83 FF 10 C3
+
+cmpl 16(%rbp), %r14d
+. 888 0x12345678 5
+. 44 3B 75 10 C3
+
+cmpl 16(%rsp), %eax
+. 889 0x12345678 5
+. 3B 44 24 10 C3
+
+cmpl 16(%rsp), %edx
+. 890 0x12345678 5
+. 3B 54 24 10 C3
+
+cmpl 16(%rsp), %r14d
+. 891 0x12345678 6
+. 44 3B 74 24 10 C3
+
+cmpl $174, %eax
+. 892 0x12345678 6
+. 3D AE 00 00 00 C3
+
+cmpl $175, %eax
+. 893 0x12345678 6
+. 3D AF 00 00 00 C3
+
+cmpl $175, %ebx
+. 894 0x12345678 7
+. 81 FB AF 00 00 00 C3
+
+cmpl $17, %eax
+. 895 0x12345678 4
+. 83 F8 11 C3
+
+cmpl $182, %ebx
+. 896 0x12345678 7
+. 81 FB B6 00 00 00 C3
+
+cmpl $183, %ebx
+. 897 0x12345678 7
+. 81 FB B7 00 00 00 C3
+
+cmpl $1, 88(%rsp)
+. 898 0x12345678 6
+. 83 7C 24 58 01 C3
+
+cmpl $18, %eax
+. 899 0x12345678 4
+. 83 F8 12 C3
+
+cmpl $18, %ecx
+. 900 0x12345678 4
+. 83 F9 12 C3
+
+cmpl $-1, 8(%rax)
+. 901 0x12345678 5
+. 83 78 08 FF C3
+
+cmpl $1, 8(%rbx,%rax)
+. 902 0x12345678 6
+. 83 7C 03 08 01 C3
+
+cmpl $1, 8(%rcx,%rdx)
+. 903 0x12345678 6
+. 83 7C 11 08 01 C3
+
+cmpl $1, 8(%rdi,%rax)
+. 904 0x12345678 6
+. 83 7C 07 08 01 C3
+
+cmpl $1, 8(%rdx,%rax)
+. 905 0x12345678 6
+. 83 7C 02 08 01 C3
+
+cmpl $1, 8(%rdx,%rax,4)
+. 906 0x12345678 6
+. 83 7C 82 08 01 C3
+
+cmpl $1, 8(%rsi,%rax)
+. 907 0x12345678 6
+. 83 7C 06 08 01 C3
+
+cmpl $1, 8(%rsi,%rdx)
+. 908 0x12345678 6
+. 83 7C 16 08 01 C3
+
+cmpl $1, 8(%rsp)
+. 909 0x12345678 6
+. 83 7C 24 08 01 C3
+
+cmpl $192, %edi
+. 910 0x12345678 7
+. 81 FF C0 00 00 00 C3
+
+cmpl $19, %eax
+. 911 0x12345678 4
+. 83 F8 13 C3
+
+cmpl $-1, %eax
+. 912 0x12345678 4
+. 83 F8 FF C3
+
+cmpl $1, %eax
+. 913 0x12345678 4
+. 83 F8 01 C3
+
+cmpl $1, %ebp
+. 914 0x12345678 4
+. 83 FD 01 C3
+
+cmpl $1, %ebx
+. 915 0x12345678 4
+. 83 FB 01 C3
+
+cmpl $-1, %ecx
+. 916 0x12345678 4
+. 83 F9 FF C3
+
+cmpl $1, %ecx
+. 917 0x12345678 4
+. 83 F9 01 C3
+
+cmpl $-1, %edi
+. 918 0x12345678 4
+. 83 FF FF C3
+
+cmpl $1, %edi
+. 919 0x12345678 4
+. 83 FF 01 C3
+
+cmpl $-1, %edx
+. 920 0x12345678 4
+. 83 FA FF C3
+
+cmpl $1, %edx
+. 921 0x12345678 4
+. 83 FA 01 C3
+
+cmpl $1, %esi
+. 922 0x12345678 4
+. 83 FE 01 C3
+
+cmpl $1, (%r12)
+. 923 0x12345678 6
+. 41 83 3C 24 01 C3
+
+cmpl $1, %r12d
+. 924 0x12345678 5
+. 41 83 FC 01 C3
+
+cmpl $1, (%r13)
+. 925 0x12345678 6
+. 41 83 7D 00 01 C3
+
+cmpl $1, %r13d
+. 926 0x12345678 5
+. 41 83 FD 01 C3
+
+cmpl $-1, (%r14)
+. 927 0x12345678 5
+. 41 83 3E FF C3
+
+cmpl $1, %r14d
+. 928 0x12345678 5
+. 41 83 FE 01 C3
+
+cmpl $1, (%r14,%rax,4)
+. 929 0x12345678 6
+. 41 83 3C 86 01 C3
+
+cmpl $-1, (%r15)
+. 930 0x12345678 5
+. 41 83 3F FF C3
+
+cmpl $-1, %r8d
+. 931 0x12345678 5
+. 41 83 F8 FF C3
+
+cmpl $1, %r9d
+. 932 0x12345678 5
+. 41 83 F9 01 C3
+
+cmpl $-1, (%rax)
+. 933 0x12345678 4
+. 83 38 FF C3
+
+cmpl $1, (%rax)
+. 934 0x12345678 4
+. 83 38 01 C3
+
+cmpl $-1, (%rbp)
+. 935 0x12345678 5
+. 83 7D 00 FF C3
+
+cmpl $1, (%rbx)
+. 936 0x12345678 4
+. 83 3B 01 C3
+
+cmpl $1, (%rdi)
+. 937 0x12345678 4
+. 83 3F 01 C3
+
+cmpl $202, %eax
+. 938 0x12345678 6
+. 3D CA 00 00 00 C3
+
+cmpl $202, %r13d
+. 939 0x12345678 8
+. 41 81 FD CA 00 00 00 C3
+
+cmpl $205, %eax
+. 940 0x12345678 6
+. 3D CD 00 00 00 C3
+
+cmpl 20(%rax), %ebx
+. 941 0x12345678 4
+. 3B 58 14 C3
+
+cmpl 20(%rbp), %r12d
+. 942 0x12345678 5
+. 44 3B 65 14 C3
+
+cmpl 20(%rsp), %r14d
+. 943 0x12345678 6
+. 44 3B 74 24 14 C3
+
+cmpl $2, 1024(%rsp)
+. 944 0x12345678 9
+. 83 BC 24 00 04 00 00 02 C3
+
+cmpl $2, 104(%rsp)
+. 945 0x12345678 6
+. 83 7C 24 68 02 C3
+
+cmpl $2, 108(%rsp)
+. 946 0x12345678 6
+. 83 7C 24 6C 02 C3
+
+cmpl $-2147483648, %edi
+. 947 0x12345678 7
+. 81 FF 00 00 00 80 C3
+
+cmpl $-2147483648, %edx
+. 948 0x12345678 7
+. 81 FA 00 00 00 80 C3
+
+cmpl $-2147483648, %esi
+. 949 0x12345678 7
+. 81 FE 00 00 00 80 C3
+
+cmpl $234, %eax
+. 950 0x12345678 6
+. 3D EA 00 00 00 C3
+
+cmpl $234, %r13d
+. 951 0x12345678 8
+. 41 81 FD EA 00 00 00 C3
+
+cmpl $2, 36(%rsp)
+. 952 0x12345678 6
+. 83 7C 24 24 02 C3
+
+cmpl $23, %eax
+. 953 0x12345678 4
+. 83 F8 17 C3
+
+cmpl $240, %eax
+. 954 0x12345678 6
+. 3D F0 00 00 00 C3
+
+cmpl $242, %eax
+. 955 0x12345678 6
+. 3D F2 00 00 00 C3
+
+cmpl $243, %eax
+. 956 0x12345678 6
+. 3D F3 00 00 00 C3
+
+cmpl 248(%rsp), %eax
+. 957 0x12345678 8
+. 3B 84 24 F8 00 00 00 C3
+
+cmpl 248(%rsp), %r12d
+. 958 0x12345678 9
+. 44 3B A4 24 F8 00 00 00 C3
+
+cmpl $24, %eax
+. 959 0x12345678 4
+. 83 F8 18 C3
+
+cmpl $255, %eax
+. 960 0x12345678 6
+. 3D FF 00 00 00 C3
+
+cmpl $255, %edi
+. 961 0x12345678 7
+. 81 FF FF 00 00 00 C3
+
+cmpl $255, %r14d
+. 962 0x12345678 8
+. 41 81 FE FF 00 00 00 C3
+
+cmpl $256, %eax
+. 963 0x12345678 6
+. 3D 00 01 00 00 C3
+
+cmpl $256, %ebx
+. 964 0x12345678 7
+. 81 FB 00 01 00 00 C3
+
+cmpl $256, %ecx
+. 965 0x12345678 7
+. 81 F9 00 01 00 00 C3
+
+cmpl $256, %edi
+. 966 0x12345678 7
+. 81 FF 00 01 00 00 C3
+
+cmpl $256, %edx
+. 967 0x12345678 7
+. 81 FA 00 01 00 00 C3
+
+cmpl $256, %esi
+. 968 0x12345678 7
+. 81 FE 00 01 00 00 C3
+
+cmpl $256, %r8d
+. 969 0x12345678 8
+. 41 81 F8 00 01 00 00 C3
+
+cmpl $266, %eax
+. 970 0x12345678 6
+. 3D 0A 01 00 00 C3
+
+cmpl $266, %r13d
+. 971 0x12345678 8
+. 41 81 FD 0A 01 00 00 C3
+
+cmpl $26, %eax
+. 972 0x12345678 4
+. 83 F8 1A C3
+
+cmpl $26, %r12d
+. 973 0x12345678 5
+. 41 83 FC 1A C3
+
+cmpl $27, %eax
+. 974 0x12345678 4
+. 83 F8 1B C3
+
+cmpl $284, %eax
+. 975 0x12345678 6
+. 3D 1C 01 00 00 C3
+
+cmpl $284, %r12d
+. 976 0x12345678 8
+. 41 81 FC 1C 01 00 00 C3
+
+cmpl $285, %eax
+. 977 0x12345678 6
+. 3D 1D 01 00 00 C3
+
+cmpl $28, %eax
+. 978 0x12345678 4
+. 83 F8 1C C3
+
+cmpl $28, %r12d
+. 979 0x12345678 5
+. 41 83 FC 1C C3
+
+cmpl 28(%r12), %ebp
+. 980 0x12345678 6
+. 41 3B 6C 24 1C C3
+
+cmpl 28(%r12), %ecx
+. 981 0x12345678 6
+. 41 3B 4C 24 1C C3
+
+cmpl 28(%r14), %ecx
+. 982 0x12345678 5
+. 41 3B 4E 1C C3
+
+cmpl $2, 8(%rax,%rdi)
+. 983 0x12345678 6
+. 83 7C 38 08 02 C3
+
+cmpl $2, 8(%rbx,%rax)
+. 984 0x12345678 6
+. 83 7C 03 08 02 C3
+
+cmpl $2, 8(%rcx,%r13)
+. 985 0x12345678 7
+. 42 83 7C 29 08 02 C3
+
+cmpl $2, 8(%rdi,%rdx)
+. 986 0x12345678 6
+. 83 7C 17 08 02 C3
+
+cmpl $2, 8(%rdx,%rax)
+. 987 0x12345678 6
+. 83 7C 02 08 02 C3
+
+cmpl $2, 8(%rsi,%rdx)
+. 988 0x12345678 6
+. 83 7C 16 08 02 C3
+
+cmpl 28(%rsp), %eax
+. 989 0x12345678 5
+. 3B 44 24 1C C3
+
+cmpl $2, %eax
+. 990 0x12345678 4
+. 83 F8 02 C3
+
+cmpl $2, %ebp
+. 991 0x12345678 4
+. 83 FD 02 C3
+
+cmpl $-2, %ebx
+. 992 0x12345678 4
+. 83 FB FE C3
+
+cmpl $2, %ebx
+. 993 0x12345678 4
+. 83 FB 02 C3
+
+cmpl $2, %ecx
+. 994 0x12345678 4
+. 83 F9 02 C3
+
+cmpl $2, %edi
+. 995 0x12345678 4
+. 83 FF 02 C3
+
+cmpl $2, %edx
+. 996 0x12345678 4
+. 83 FA 02 C3
+
+cmpl $2, %esi
+. 997 0x12345678 4
+. 83 FE 02 C3
+
+cmpl $2, (%r12)
+. 998 0x12345678 6
+. 41 83 3C 24 02 C3
+
+cmpl $2, %r12d
+. 999 0x12345678 5
+. 41 83 FC 02 C3
+
+cmpl $-2, %r13d
+. 1000 0x12345678 5
+. 41 83 FD FE C3
+
+cmpl $2, %r13d
+. 1001 0x12345678 5
+. 41 83 FD 02 C3
+
+cmpl $2, %r14d
+. 1002 0x12345678 5
+. 41 83 FE 02 C3
+
+cmpl $-2, (%r14,%rax,4)
+. 1003 0x12345678 6
+. 41 83 3C 86 FE C3
+
+cmpl $-2, (%r14,%rbx,4)
+. 1004 0x12345678 6
+. 41 83 3C 9E FE C3
+
+cmpl $2, %r9d
+. 1005 0x12345678 5
+. 41 83 F9 02 C3
+
+cmpl $2, (%rax)
+. 1006 0x12345678 4
+. 83 38 02 C3
+
+cmpl $2, (%rbp)
+. 1007 0x12345678 5
+. 83 7D 00 02 C3
+
+cmpl $2, (%rbx)
+. 1008 0x12345678 4
+. 83 3B 02 C3
+
+cmpl $2, (%rdi)
+. 1009 0x12345678 4
+. 83 3F 02 C3
+
+cmpl $-2, (%rdi,%rbx,4)
+. 1010 0x12345678 5
+. 83 3C 9F FE C3
+
+cmpl $2, (%rdx)
+. 1011 0x12345678 4
+. 83 3A 02 C3
+
+cmpl $-2, (%rsi,%rbx,4)
+. 1012 0x12345678 5
+. 83 3C 9E FE C3
+
+cmpl $3, 1024(%rsp)
+. 1013 0x12345678 9
+. 83 BC 24 00 04 00 00 03 C3
+
+cmpl $316, %eax
+. 1014 0x12345678 6
+. 3D 3C 01 00 00 C3
+
+cmpl $316, %r12d
+. 1015 0x12345678 8
+. 41 81 FC 3C 01 00 00 C3
+
+cmpl $31, %eax
+. 1016 0x12345678 4
+. 83 F8 1F C3
+
+cmpl $31, %ebx
+. 1017 0x12345678 4
+. 83 FB 1F C3
+
+cmpl $31, %ecx
+. 1018 0x12345678 4
+. 83 F9 1F C3
+
+cmpl $31, %edi
+. 1019 0x12345678 4
+. 83 FF 1F C3
+
+cmpl $31, %esi
+. 1020 0x12345678 4
+. 83 FE 1F C3
+
+cmpl $32768, %eax
+. 1021 0x12345678 6
+. 3D 00 80 00 00 C3
+
+cmpl $32768, %edi
+. 1022 0x12345678 7
+. 81 FF 00 80 00 00 C3
+
+cmpl $32768, %edx
+. 1023 0x12345678 7
+. 81 FA 00 80 00 00 C3
+
+cmpl $32768, %esi
+. 1024 0x12345678 7
+. 81 FE 00 80 00 00 C3
+
+cmpl $32768, %r8d
+. 1025 0x12345678 8
+. 41 81 F8 00 80 00 00 C3
+
+cmpl $-32769, %eax
+. 1026 0x12345678 6
+. 3D FF 7F FF FF C3
+
+cmpl $-32769, %edi
+. 1027 0x12345678 7
+. 81 FF FF 7F FF FF C3
+
+cmpl $-32769, %edx
+. 1028 0x12345678 7
+. 81 FA FF 7F FF FF C3
+
+cmpl $-32769, %esi
+. 1029 0x12345678 7
+. 81 FE FF 7F FF FF C3
+
+cmpl $-32769, %r8d
+. 1030 0x12345678 8
+. 41 81 F8 FF 7F FF FF C3
+
+cmpl $32, %eax
+. 1031 0x12345678 4
+. 83 F8 20 C3
+
+cmpl $32, %r15d
+. 1032 0x12345678 5
+. 41 83 FF 20 C3
+
+cmpl 32(%rbp), %r12d
+. 1033 0x12345678 5
+. 44 3B 65 20 C3
+
+cmpl 32(%rbx), %r12d
+. 1034 0x12345678 5
+. 44 3B 63 20 C3
+
+cmpl $3, 36(%rsp)
+. 1035 0x12345678 6
+. 83 7C 24 24 03 C3
+
+cmpl $37, %eax
+. 1036 0x12345678 4
+. 83 F8 25 C3
+
+cmpl $38, %eax
+. 1037 0x12345678 4
+. 83 F8 26 C3
+
+cmpl $38, %ebp
+. 1038 0x12345678 4
+. 83 FD 26 C3
+
+cmpl $38, %edx
+. 1039 0x12345678 4
+. 83 FA 26 C3
+
+cmpl $3, %eax
+. 1040 0x12345678 4
+. 83 F8 03 C3
+
+cmpl $3, %ebx
+. 1041 0x12345678 4
+. 83 FB 03 C3
+
+cmpl $3, %ecx
+. 1042 0x12345678 4
+. 83 F9 03 C3
+
+cmpl $3, %edi
+. 1043 0x12345678 4
+. 83 FF 03 C3
+
+cmpl $3, %esi
+. 1044 0x12345678 4
+. 83 FE 03 C3
+
+cmpl $3, %r9d
+. 1045 0x12345678 5
+. 41 83 F9 03 C3
+
+cmpl $3, (%rax)
+. 1046 0x12345678 4
+. 83 38 03 C3
+
+cmpl $3, (%rcx)
+. 1047 0x12345678 4
+. 83 39 03 C3
+
+cmpl $3, (%rdi)
+. 1048 0x12345678 4
+. 83 3F 03 C3
+
+cmpl $3, (%rdx)
+. 1049 0x12345678 4
+. 83 3A 03 C3
+
+cmpl $400, %eax
+. 1050 0x12345678 6
+. 3D 90 01 00 00 C3
+
+cmpl 40(%rsp), %ebx
+. 1051 0x12345678 5
+. 3B 5C 24 28 C3
+
+cmpl $4, 1024(%rsp)
+. 1052 0x12345678 9
+. 83 BC 24 00 04 00 00 04 C3
+
+cmpl $4, 104(%rsp)
+. 1053 0x12345678 6
+. 83 7C 24 68 04 C3
+
+cmpl $412, %eax
+. 1054 0x12345678 6
+. 3D 9C 01 00 00 C3
+
+cmpl $412, %r12d
+. 1055 0x12345678 8
+. 41 81 FC 9C 01 00 00 C3
+
+cmpl $41, %eax
+. 1056 0x12345678 4
+. 83 F8 29 C3
+
+cmpl $43, %eax
+. 1057 0x12345678 4
+. 83 F8 2B C3
+
+cmpl $4, 40(%rsp)
+. 1058 0x12345678 6
+. 83 7C 24 28 04 C3
+
+cmpl $444, %eax
+. 1059 0x12345678 6
+. 3D BC 01 00 00 C3
+
+cmpl $444, %r12d
+. 1060 0x12345678 8
+. 41 81 FC BC 01 00 00 C3
+
+cmpl $4, 48(%rsp)
+. 1061 0x12345678 6
+. 83 7C 24 30 04 C3
+
+cmpl $46, %eax
+. 1062 0x12345678 4
+. 83 F8 2E C3
+
+cmpl $474, %eax
+. 1063 0x12345678 6
+. 3D DA 01 00 00 C3
+
+cmpl $474, %r12d
+. 1064 0x12345678 8
+. 41 81 FC DA 01 00 00 C3
+
+cmpl $48, %eax
+. 1065 0x12345678 4
+. 83 F8 30 C3
+
+cmpl 48(%r12), %ebp
+. 1066 0x12345678 6
+. 41 3B 6C 24 30 C3
+
+cmpl $499, %eax
+. 1067 0x12345678 6
+. 3D F3 01 00 00 C3
+
+cmpl $4, %eax
+. 1068 0x12345678 4
+. 83 F8 04 C3
+
+cmpl $4, %ebp
+. 1069 0x12345678 4
+. 83 FD 04 C3
+
+cmpl $4, %ebx
+. 1070 0x12345678 4
+. 83 FB 04 C3
+
+cmpl $4, %edi
+. 1071 0x12345678 4
+. 83 FF 04 C3
+
+cmpl $4, %edx
+. 1072 0x12345678 4
+. 83 FA 04 C3
+
+cmpl $4, %esi
+. 1073 0x12345678 4
+. 83 FE 04 C3
+
+cmpl $4, %r13d
+. 1074 0x12345678 5
+. 41 83 FD 04 C3
+
+cmpl $4, %r14d
+. 1075 0x12345678 5
+. 41 83 FE 04 C3
+
+cmpl $4, %r15d
+. 1076 0x12345678 5
+. 41 83 FF 04 C3
+
+cmpl $4, %r8d
+. 1077 0x12345678 5
+. 41 83 F8 04 C3
+
+cmpl $4, (%rbp)
+. 1078 0x12345678 5
+. 83 7D 00 04 C3
+
+cmpl 4(%rcx), %eax
+. 1079 0x12345678 4
+. 3B 41 04 C3
+
+cmpl $4, (%rdi)
+. 1080 0x12345678 4
+. 83 3F 04 C3
+
+cmpl 4(%rdx), %eax
+. 1081 0x12345678 4
+. 3B 42 04 C3
+
+cmpl 4(%rsp), %r13d
+. 1082 0x12345678 6
+. 44 3B 6C 24 04 C3
+
+cmpl $500, %eax
+. 1083 0x12345678 6
+. 3D F4 01 00 00 C3
+
+cmpl $-509604128, (%rax)
+. 1084 0x12345678 7
+. 81 38 E0 0E A0 E1 C3
+
+cmpl $-509604384, 8(%rax)
+. 1085 0x12345678 8
+. 81 78 08 E0 0D A0 E1 C3
+
+cmpl $-509605408, 20(%rax)
+. 1086 0x12345678 8
+. 81 78 14 E0 09 A0 E1 C3
+
+cmpl $-509606176, 16(%rax)
+. 1087 0x12345678 8
+. 81 78 10 E0 06 A0 E1 C3
+
+cmpl $-509607200, 12(%rax)
+. 1088 0x12345678 8
+. 81 78 0C E0 02 A0 E1 C3
+
+cmpl $-509607456, 4(%rax)
+. 1089 0x12345678 8
+. 81 78 04 E0 01 A0 E1 C3
+
+cmpl $50, %eax
+. 1090 0x12345678 4
+. 83 F8 32 C3
+
+cmpl $528, %eax
+. 1091 0x12345678 6
+. 3D 10 02 00 00 C3
+
+cmpl $528, %edx
+. 1092 0x12345678 7
+. 81 FA 10 02 00 00 C3
+
+cmpl $5, 368(%rsp)
+. 1093 0x12345678 9
+. 83 BC 24 70 01 00 00 05 C3
+
+cmpl $54, %eax
+. 1094 0x12345678 4
+. 83 F8 36 C3
+
+cmpl $55, %eax
+. 1095 0x12345678 4
+. 83 F8 37 C3
+
+cmpl $5, %eax
+. 1096 0x12345678 4
+. 83 F8 05 C3
+
+cmpl $5, %ebp
+. 1097 0x12345678 4
+. 83 FD 05 C3
+
+cmpl $5, %ebx
+. 1098 0x12345678 4
+. 83 FB 05 C3
+
+cmpl $5, %edi
+. 1099 0x12345678 4
+. 83 FF 05 C3
+
+cmpl $5, %edx
+. 1100 0x12345678 4
+. 83 FA 05 C3
+
+cmpl $5, (%r12)
+. 1101 0x12345678 6
+. 41 83 3C 24 05 C3
+
+cmpl $5, (%rbp)
+. 1102 0x12345678 5
+. 83 7D 00 05 C3
+
+cmpl $5, (%rdi)
+. 1103 0x12345678 4
+. 83 3F 05 C3
+
+cmpl $60, %eax
+. 1104 0x12345678 4
+. 83 F8 3C C3
+
+cmpl $60, %r12d
+. 1105 0x12345678 5
+. 41 83 FC 3C C3
+
+cmpl 60(%rsp), %r14d
+. 1106 0x12345678 6
+. 44 3B 74 24 3C C3
+
+cmpl $62, %eax
+. 1107 0x12345678 4
+. 83 F8 3E C3
+
+cmpl $62, %edx
+. 1108 0x12345678 4
+. 83 FA 3E C3
+
+cmpl $63, %eax
+. 1109 0x12345678 4
+. 83 F8 3F C3
+
+cmpl $64, %ebx
+. 1110 0x12345678 4
+. 83 FB 40 C3
+
+cmpl $65535, %esi
+. 1111 0x12345678 7
+. 81 FE FF FF 00 00 C3
+
+cmpl $65536, %eax
+. 1112 0x12345678 6
+. 3D 00 00 01 00 C3
+
+cmpl $65536, %edi
+. 1113 0x12345678 7
+. 81 FF 00 00 01 00 C3
+
+cmpl $65536, %edx
+. 1114 0x12345678 7
+. 81 FA 00 00 01 00 C3
+
+cmpl $65536, %r8d
+. 1115 0x12345678 8
+. 41 81 F8 00 00 01 00 C3
+
+cmpl 672(%rsp,%rax,4), %esi
+. 1116 0x12345678 8
+. 3B B4 84 A0 02 00 00 C3
+
+cmpl $6, 80(%rsp)
+. 1117 0x12345678 6
+. 83 7C 24 50 06 C3
+
+cmpl $69631, 16(%rsp)
+. 1118 0x12345678 9
+. 81 7C 24 10 FF 0F 01 00 C3
+
+cmpl $69631, 20(%rsp)
+. 1119 0x12345678 9
+. 81 7C 24 14 FF 0F 01 00 C3
+
+cmpl $69632, 16(%rbp)
+. 1120 0x12345678 8
+. 81 7D 10 00 10 01 00 C3
+
+cmpl $69632, 4(%rdi)
+. 1121 0x12345678 8
+. 81 7F 04 00 10 01 00 C3
+
+cmpl $69632, %eax
+. 1122 0x12345678 6
+. 3D 00 10 01 00 C3
+
+cmpl $69632, %ebp
+. 1123 0x12345678 7
+. 81 FD 00 10 01 00 C3
+
+cmpl $69632, (%rax,%rdx,4)
+. 1124 0x12345678 8
+. 81 3C 90 00 10 01 00 C3
+
+cmpl $69633, %eax
+. 1125 0x12345678 6
+. 3D 01 10 01 00 C3
+
+cmpl $69633, %ebp
+. 1126 0x12345678 7
+. 81 FD 01 10 01 00 C3
+
+cmpl $69633, %ebx
+. 1127 0x12345678 7
+. 81 FB 01 10 01 00 C3
+
+cmpl $69633, %ecx
+. 1128 0x12345678 7
+. 81 F9 01 10 01 00 C3
+
+cmpl $69633, %edi
+. 1129 0x12345678 7
+. 81 FF 01 10 01 00 C3
+
+cmpl $69633, %edx
+. 1130 0x12345678 7
+. 81 FA 01 10 01 00 C3
+
+cmpl $69633, %r12d
+. 1131 0x12345678 8
+. 41 81 FC 01 10 01 00 C3
+
+cmpl $69633, %r15d
+. 1132 0x12345678 8
+. 41 81 FF 01 10 01 00 C3
+
+cmpl $69634, %eax
+. 1133 0x12345678 6
+. 3D 02 10 01 00 C3
+
+cmpl $69634, %ebp
+. 1134 0x12345678 7
+. 81 FD 02 10 01 00 C3
+
+cmpl $69634, %ebx
+. 1135 0x12345678 7
+. 81 FB 02 10 01 00 C3
+
+cmpl $69634, %ecx
+. 1136 0x12345678 7
+. 81 F9 02 10 01 00 C3
+
+cmpl $69634, %edi
+. 1137 0x12345678 7
+. 81 FF 02 10 01 00 C3
+
+cmpl $69634, %edx
+. 1138 0x12345678 7
+. 81 FA 02 10 01 00 C3
+
+cmpl $69634, %r12d
+. 1139 0x12345678 8
+. 41 81 FC 02 10 01 00 C3
+
+cmpl $69634, %r15d
+. 1140 0x12345678 8
+. 41 81 FF 02 10 01 00 C3
+
+cmpl $69635, %eax
+. 1141 0x12345678 6
+. 3D 03 10 01 00 C3
+
+cmpl $69635, %ebp
+. 1142 0x12345678 7
+. 81 FD 03 10 01 00 C3
+
+cmpl $69635, %ebx
+. 1143 0x12345678 7
+. 81 FB 03 10 01 00 C3
+
+cmpl $69635, %ecx
+. 1144 0x12345678 7
+. 81 F9 03 10 01 00 C3
+
+cmpl $69635, %edi
+. 1145 0x12345678 7
+. 81 FF 03 10 01 00 C3
+
+cmpl $69635, %edx
+. 1146 0x12345678 7
+. 81 FA 03 10 01 00 C3
+
+cmpl $69635, %r12d
+. 1147 0x12345678 8
+. 41 81 FC 03 10 01 00 C3
+
+cmpl $69635, %r15d
+. 1148 0x12345678 8
+. 41 81 FF 03 10 01 00 C3
+
+cmpl $69636, %eax
+. 1149 0x12345678 6
+. 3D 04 10 01 00 C3
+
+cmpl $69636, %ebp
+. 1150 0x12345678 7
+. 81 FD 04 10 01 00 C3
+
+cmpl $69636, %ebx
+. 1151 0x12345678 7
+. 81 FB 04 10 01 00 C3
+
+cmpl $69636, %ecx
+. 1152 0x12345678 7
+. 81 F9 04 10 01 00 C3
+
+cmpl $69636, %edi
+. 1153 0x12345678 7
+. 81 FF 04 10 01 00 C3
+
+cmpl $69636, %edx
+. 1154 0x12345678 7
+. 81 FA 04 10 01 00 C3
+
+cmpl $69637, %eax
+. 1155 0x12345678 6
+. 3D 05 10 01 00 C3
+
+cmpl $69638, %eax
+. 1156 0x12345678 6
+. 3D 06 10 01 00 C3
+
+cmpl $69639, %eax
+. 1157 0x12345678 6
+. 3D 07 10 01 00 C3
+
+cmpl $6, %eax
+. 1158 0x12345678 4
+. 83 F8 06 C3
+
+cmpl $6, %ebp
+. 1159 0x12345678 4
+. 83 FD 06 C3
+
+cmpl $6, %edi
+. 1160 0x12345678 4
+. 83 FF 06 C3
+
+cmpl $6, %edx
+. 1161 0x12345678 4
+. 83 FA 06 C3
+
+cmpl $6, (%r12)
+. 1162 0x12345678 6
+. 41 83 3C 24 06 C3
+
+cmpl $6, (%r14)
+. 1163 0x12345678 5
+. 41 83 3E 06 C3
+
+cmpl $6, (%rbp)
+. 1164 0x12345678 5
+. 83 7D 00 06 C3
+
+cmpl $6, (%rdi)
+. 1165 0x12345678 4
+. 83 3F 06 C3
+
+cmpl $73728, (%rax)
+. 1166 0x12345678 7
+. 81 38 00 20 01 00 C3
+
+cmpl $73728, (%rdi)
+. 1167 0x12345678 7
+. 81 3F 00 20 01 00 C3
+
+cmpl $73729, (%rax)
+. 1168 0x12345678 7
+. 81 38 01 20 01 00 C3
+
+cmpl $73729, (%rdx)
+. 1169 0x12345678 7
+. 81 3A 01 20 01 00 C3
+
+cmpl $73731, (%rdi)
+. 1170 0x12345678 7
+. 81 3F 03 20 01 00 C3
+
+cmpl $73732, %ecx
+. 1171 0x12345678 7
+. 81 F9 04 20 01 00 C3
+
+cmpl $73732, %edx
+. 1172 0x12345678 7
+. 81 FA 04 20 01 00 C3
+
+cmpl $73734, (%rax)
+. 1173 0x12345678 7
+. 81 38 06 20 01 00 C3
+
+cmpl $77824, %r13d
+. 1174 0x12345678 8
+. 41 81 FD 00 30 01 00 C3
+
+cmpl $77825, %ebp
+. 1175 0x12345678 7
+. 81 FD 01 30 01 00 C3
+
+cmpl $77825, %edi
+. 1176 0x12345678 7
+. 81 FF 01 30 01 00 C3
+
+cmpl $77825, %esi
+. 1177 0x12345678 7
+. 81 FE 01 30 01 00 C3
+
+cmpl $77825, %r8d
+. 1178 0x12345678 8
+. 41 81 F8 01 30 01 00 C3
+
+cmpl $77827, %eax
+. 1179 0x12345678 6
+. 3D 03 30 01 00 C3
+
+cmpl $77827, %ecx
+. 1180 0x12345678 7
+. 81 F9 03 30 01 00 C3
+
+cmpl $77828, %ecx
+. 1181 0x12345678 7
+. 81 F9 04 30 01 00 C3
+
+cmpl $77829, %ebp
+. 1182 0x12345678 7
+. 81 FD 05 30 01 00 C3
+
+cmpl $77829, %edi
+. 1183 0x12345678 7
+. 81 FF 05 30 01 00 C3
+
+cmpl $77829, %esi
+. 1184 0x12345678 7
+. 81 FE 05 30 01 00 C3
+
+cmpl $77829, %r8d
+. 1185 0x12345678 8
+. 41 81 F8 05 30 01 00 C3
+
+cmpl $77831, 8(%rdi)
+. 1186 0x12345678 8
+. 81 7F 08 07 30 01 00 C3
+
+cmpl $77831, %eax
+. 1187 0x12345678 6
+. 3D 07 30 01 00 C3
+
+cmpl $77833, %esi
+. 1188 0x12345678 7
+. 81 FE 09 30 01 00 C3
+
+cmpl $77837, %edi
+. 1189 0x12345678 7
+. 81 FF 0D 30 01 00 C3
+
+cmpl $77837, %esi
+. 1190 0x12345678 7
+. 81 FE 0D 30 01 00 C3
+
+cmpl $77837, %r13d
+. 1191 0x12345678 8
+. 41 81 FD 0D 30 01 00 C3
+
+cmpl $77839, %ecx
+. 1192 0x12345678 7
+. 81 F9 0F 30 01 00 C3
+
+cmpl $77840, %ecx
+. 1193 0x12345678 7
+. 81 F9 10 30 01 00 C3
+
+cmpl $77841, %edi
+. 1194 0x12345678 7
+. 81 FF 11 30 01 00 C3
+
+cmpl $77841, %esi
+. 1195 0x12345678 7
+. 81 FE 11 30 01 00 C3
+
+cmpl $77841, %r13d
+. 1196 0x12345678 8
+. 41 81 FD 11 30 01 00 C3
+
+cmpl $77843, %ecx
+. 1197 0x12345678 7
+. 81 F9 13 30 01 00 C3
+
+cmpl $77845, %ebp
+. 1198 0x12345678 7
+. 81 FD 15 30 01 00 C3
+
+cmpl $77845, %edi
+. 1199 0x12345678 7
+. 81 FF 15 30 01 00 C3
+
+cmpl $77845, %esi
+. 1200 0x12345678 7
+. 81 FE 15 30 01 00 C3
+
+cmpl $77845, %r13d
+. 1201 0x12345678 8
+. 41 81 FD 15 30 01 00 C3
+
+cmpl $77849, %esi
+. 1202 0x12345678 7
+. 81 FE 19 30 01 00 C3
+
+cmpl $77851, %ecx
+. 1203 0x12345678 7
+. 81 F9 1B 30 01 00 C3
+
+cmpl $77851, %edi
+. 1204 0x12345678 7
+. 81 FF 1B 30 01 00 C3
+
+cmpl $77852, 20(%rsp)
+. 1205 0x12345678 9
+. 81 7C 24 14 1C 30 01 00 C3
+
+cmpl $77852, %ebp
+. 1206 0x12345678 7
+. 81 FD 1C 30 01 00 C3
+
+cmpl $77852, %r12d
+. 1207 0x12345678 8
+. 41 81 FC 1C 30 01 00 C3
+
+cmpl $77853, %esi
+. 1208 0x12345678 7
+. 81 FE 1D 30 01 00 C3
+
+cmpl $77855, %ecx
+. 1209 0x12345678 7
+. 81 F9 1F 30 01 00 C3
+
+cmpl $77855, %edi
+. 1210 0x12345678 7
+. 81 FF 1F 30 01 00 C3
+
+cmpl $77856, 20(%rsp)
+. 1211 0x12345678 9
+. 81 7C 24 14 20 30 01 00 C3
+
+cmpl $77856, %ebp
+. 1212 0x12345678 7
+. 81 FD 20 30 01 00 C3
+
+cmpl $77856, %r12d
+. 1213 0x12345678 8
+. 41 81 FC 20 30 01 00 C3
+
+cmpl $77857, %esi
+. 1214 0x12345678 7
+. 81 FE 21 30 01 00 C3
+
+cmpl $77859, 20(%rsp)
+. 1215 0x12345678 9
+. 81 7C 24 14 23 30 01 00 C3
+
+cmpl $77859, %edi
+. 1216 0x12345678 7
+. 81 FF 23 30 01 00 C3
+
+cmpl $77860, 20(%rsp)
+. 1217 0x12345678 9
+. 81 7C 24 14 24 30 01 00 C3
+
+cmpl $77861, %esi
+. 1218 0x12345678 7
+. 81 FE 25 30 01 00 C3
+
+cmpl $77865, %esi
+. 1219 0x12345678 7
+. 81 FE 29 30 01 00 C3
+
+cmpl $77869, %esi
+. 1220 0x12345678 7
+. 81 FE 2D 30 01 00 C3
+
+cmpl $77978, %ebp
+. 1221 0x12345678 7
+. 81 FD 9A 30 01 00 C3
+
+cmpl $77978, %r12d
+. 1222 0x12345678 8
+. 41 81 FC 9A 30 01 00 C3
+
+cmpl $77979, %ebp
+. 1223 0x12345678 7
+. 81 FD 9B 30 01 00 C3
+
+cmpl $77979, %r12d
+. 1224 0x12345678 8
+. 41 81 FC 9B 30 01 00 C3
+
+cmpl $77981, %ebp
+. 1225 0x12345678 7
+. 81 FD 9D 30 01 00 C3
+
+cmpl $77981, %r12d
+. 1226 0x12345678 8
+. 41 81 FC 9D 30 01 00 C3
+
+cmpl $77982, %ebp
+. 1227 0x12345678 7
+. 81 FD 9E 30 01 00 C3
+
+cmpl $77982, %r12d
+. 1228 0x12345678 8
+. 41 81 FC 9E 30 01 00 C3
+
+cmpl $77983, %ebp
+. 1229 0x12345678 7
+. 81 FD 9F 30 01 00 C3
+
+cmpl $77983, %r12d
+. 1230 0x12345678 8
+. 41 81 FC 9F 30 01 00 C3
+
+cmpl $79, %eax
+. 1231 0x12345678 4
+. 83 F8 4F C3
+
+cmpl $79, %ebx
+. 1232 0x12345678 4
+. 83 FB 4F C3
+
+cmpl $7, %eax
+. 1233 0x12345678 4
+. 83 F8 07 C3
+
+cmpl $7, %ebp
+. 1234 0x12345678 4
+. 83 FD 07 C3
+
+cmpl $7, %ebx
+. 1235 0x12345678 4
+. 83 FB 07 C3
+
+cmpl $7, %ecx
+. 1236 0x12345678 4
+. 83 F9 07 C3
+
+cmpl $7, %edi
+. 1237 0x12345678 4
+. 83 FF 07 C3
+
+cmpl $7, %edx
+. 1238 0x12345678 4
+. 83 FA 07 C3
+
+cmpl $7, %esi
+. 1239 0x12345678 4
+. 83 FE 07 C3
+
+cmpl $7, %r12d
+. 1240 0x12345678 5
+. 41 83 FC 07 C3
+
+cmpl $7, (%rax)
+. 1241 0x12345678 4
+. 83 38 07 C3
+
+cmpl $7, (%rbp)
+. 1242 0x12345678 5
+. 83 7D 00 07 C3
+
+cmpl $7, (%rcx)
+. 1243 0x12345678 4
+. 83 39 07 C3
+
+cmpl $7, (%rdi)
+. 1244 0x12345678 4
+. 83 3F 07 C3
+
+cmpl $7, (%rdx)
+. 1245 0x12345678 4
+. 83 3A 07 C3
+
+cmpl $7, (%rsi)
+. 1246 0x12345678 4
+. 83 3E 07 C3
+
+cmpl $8, 104(%rsp)
+. 1247 0x12345678 6
+. 83 7C 24 68 08 C3
+
+cmpl $81920, 16(%r14)
+. 1248 0x12345678 9
+. 41 81 7E 10 00 40 01 00 C3
+
+cmpl $81920, 32(%rbx)
+. 1249 0x12345678 8
+. 81 7B 20 00 40 01 00 C3
+
+cmpl $826343424, %edi
+. 1250 0x12345678 7
+. 81 FF 00 00 41 31 C3
+
+cmpl $8, 40(%rsp)
+. 1251 0x12345678 6
+. 83 7C 24 28 08 C3
+
+cmpl $86016, 28(%r12)
+. 1252 0x12345678 10
+. 41 81 7C 24 1C 00 50 01 00 C3
+
+cmpl $86016, 28(%r13)
+. 1253 0x12345678 9
+. 41 81 7D 1C 00 50 01 00 C3
+
+cmpl $86016, 28(%r15)
+. 1254 0x12345678 9
+. 41 81 7F 1C 00 50 01 00 C3
+
+cmpl $86016, 28(%rbp)
+. 1255 0x12345678 8
+. 81 7D 1C 00 50 01 00 C3
+
+cmpl $86016, 28(%rdi)
+. 1256 0x12345678 8
+. 81 7F 1C 00 50 01 00 C3
+
+cmpl $86016, 4(%rax)
+. 1257 0x12345678 8
+. 81 78 04 00 50 01 00 C3
+
+cmpl $86016, %edi
+. 1258 0x12345678 7
+. 81 FF 00 50 01 00 C3
+
+cmpl $86017, %edi
+. 1259 0x12345678 7
+. 81 FF 01 50 01 00 C3
+
+cmpl $86018, %edi
+. 1260 0x12345678 7
+. 81 FF 02 50 01 00 C3
+
+cmpl $86019, %edi
+. 1261 0x12345678 7
+. 81 FF 03 50 01 00 C3
+
+cmpl $8, 8(%rax)
+. 1262 0x12345678 5
+. 83 78 08 08 C3
+
+cmpl $8, 8(%rsi,%rax)
+. 1263 0x12345678 6
+. 83 7C 06 08 08 C3
+
+cmpl $8, 8(%rsp)
+. 1264 0x12345678 6
+. 83 7C 24 08 08 C3
+
+cmpl $8, %eax
+. 1265 0x12345678 4
+. 83 F8 08 C3
+
+cmpl $8, %ebx
+. 1266 0x12345678 4
+. 83 FB 08 C3
+
+cmpl $8, %edi
+. 1267 0x12345678 4
+. 83 FF 08 C3
+
+cmpl $8, %edx
+. 1268 0x12345678 4
+. 83 FA 08 C3
+
+cmpl $8, %esi
+. 1269 0x12345678 4
+. 83 FE 08 C3
+
+cmpl $8, %r13d
+. 1270 0x12345678 5
+. 41 83 FD 08 C3
+
+cmpl 8(%r13), %edi
+. 1271 0x12345678 5
+. 41 3B 7D 08 C3
+
+cmpl $8, %r14d
+. 1272 0x12345678 5
+. 41 83 FE 08 C3
+
+cmpl $8, %r8d
+. 1273 0x12345678 5
+. 41 83 F8 08 C3
+
+cmpl 8(%rsp), %r15d
+. 1274 0x12345678 6
+. 44 3B 7C 24 08 C3
+
+cmpl $922, %eax
+. 1275 0x12345678 6
+. 3D 9A 03 00 00 C3
+
+cmpl $922, %r12d
+. 1276 0x12345678 8
+. 41 81 FC 9A 03 00 00 C3
+
+cmpl $94208, %esi
+. 1277 0x12345678 7
+. 81 FE 00 70 01 00 C3
+
+cmpl $94209, %esi
+. 1278 0x12345678 7
+. 81 FE 01 70 01 00 C3
+
+cmpl $94210, %esi
+. 1279 0x12345678 7
+. 81 FE 02 70 01 00 C3
+
+cmpl $94, %ebx
+. 1280 0x12345678 4
+. 83 FB 5E C3
+
+cmpl $954, %eax
+. 1281 0x12345678 6
+. 3D BA 03 00 00 C3
+
+cmpl $954, %r12d
+. 1282 0x12345678 8
+. 41 81 FC BA 03 00 00 C3
+
+cmpl $95, %ebx
+. 1283 0x12345678 4
+. 83 FB 5F C3
+
+cmpl $95, %r12d
+. 1284 0x12345678 5
+. 41 83 FC 5F C3
+
+cmpl $96, %ebx
+. 1285 0x12345678 4
+. 83 FB 60 C3
+
+cmpl $989, %edx
+. 1286 0x12345678 7
+. 81 FA DD 03 00 00 C3
+
+cmpl $999, %eax
+. 1287 0x12345678 6
+. 3D E7 03 00 00 C3
+
+cmpl $9, %eax
+. 1288 0x12345678 4
+. 83 F8 09 C3
+
+cmpl $9, %r14d
+. 1289 0x12345678 5
+. 41 83 FE 09 C3
+
+cmpl $9, (%rax)
+. 1290 0x12345678 4
+. 83 38 09 C3
+
+cmpl %eax, 120(%rsp)
+. 1291 0x12345678 5
+. 39 44 24 78 C3
+
+cmpl %eax, 124(%rsp)
+. 1292 0x12345678 5
+. 39 44 24 7C C3
+
+cmpl %eax, 12(%rsi,%rdx)
+. 1293 0x12345678 5
+. 39 44 16 0C C3
+
+cmpl %eax, 140(%rsp)
+. 1294 0x12345678 8
+. 39 84 24 8C 00 00 00 C3
+
+cmpl %eax, 16(%rbp)
+. 1295 0x12345678 4
+. 39 45 10 C3
+
+cmpl %eax, 16(%rsp)
+. 1296 0x12345678 5
+. 39 44 24 10 C3
+
+cmpl %eax, 20(%rsp)
+. 1297 0x12345678 5
+. 39 44 24 14 C3
+
+cmpl %eax, 288(%rsp)
+. 1298 0x12345678 8
+. 39 84 24 20 01 00 00 C3
+
+cmpl %eax, 32(%rbp)
+. 1299 0x12345678 4
+. 39 45 20 C3
+
+cmpl %eax, 4(%r14)
+. 1300 0x12345678 5
+. 41 39 46 04 C3
+
+cmpl %eax, 4(%rdi)
+. 1301 0x12345678 4
+. 39 47 04 C3
+
+cmpl %eax, 8(%rbp)
+. 1302 0x12345678 4
+. 39 45 08 C3
+
+cmpl %eax, 8(%rbx)
+. 1303 0x12345678 4
+. 39 43 08 C3
+
+cmpl %eax, 8(%rdi)
+. 1304 0x12345678 4
+. 39 47 08 C3
+
+cmpl %eax, 8(%rdx)
+. 1305 0x12345678 4
+. 39 42 08 C3
+
+cmpl %eax, 8(%rsi)
+. 1306 0x12345678 4
+. 39 46 08 C3
+
+cmpl %eax, 8(%rsp)
+. 1307 0x12345678 5
+. 39 44 24 08 C3
+
+cmpl %eax, %ebp
+. 1308 0x12345678 3
+. 39 C5 C3
+
+cmpl %eax, %ebx
+. 1309 0x12345678 3
+. 39 C3 C3
+
+cmpl %eax, %ecx
+. 1310 0x12345678 3
+. 39 C1 C3
+
+cmpl %eax, %edx
+. 1311 0x12345678 3
+. 39 C2 C3
+
+cmpl %eax, %esi
+. 1312 0x12345678 3
+. 39 C6 C3
+
+cmpl %eax, %r12d
+. 1313 0x12345678 4
+. 41 39 C4 C3
+
+cmpl %eax, %r13d
+. 1314 0x12345678 4
+. 41 39 C5 C3
+
+cmpl %eax, (%r14)
+. 1315 0x12345678 4
+. 41 39 06 C3
+
+cmpl %eax, %r15d
+. 1316 0x12345678 4
+. 41 39 C7 C3
+
+cmpl %eax, %r8d
+. 1317 0x12345678 4
+. 41 39 C0 C3
+
+cmpl %eax, (%rbx,%rdx,4)
+. 1318 0x12345678 4
+. 39 04 93 C3
+
+cmpl %eax, (%rdi)
+. 1319 0x12345678 3
+. 39 07 C3
+
+cmpl %ebp, 12(%rdx,%rax)
+. 1320 0x12345678 5
+. 39 6C 02 0C C3
+
+cmpl %ebp, 4(%rsp)
+. 1321 0x12345678 5
+. 39 6C 24 04 C3
+
+cmpl %ebp, 8(%rax)
+. 1322 0x12345678 4
+. 39 68 08 C3
+
+cmpl %ebp, %eax
+. 1323 0x12345678 3
+. 39 E8 C3
+
+cmpl %ebp, %ebx
+. 1324 0x12345678 3
+. 39 EB C3
+
+cmpl %ebp, %edx
+. 1325 0x12345678 3
+. 39 EA C3
+
+cmpl %ebp, %r12d
+. 1326 0x12345678 4
+. 41 39 EC C3
+
+cmpl %ebp, %r13d
+. 1327 0x12345678 4
+. 41 39 ED C3
+
+cmpl %ebp, (%rcx,%rax,4)
+. 1328 0x12345678 4
+. 39 2C 81 C3
+
+cmpl %ebp, (%rdx,%rax,4)
+. 1329 0x12345678 4
+. 39 2C 82 C3
+
+cmpl %ebx, 16(%rsp)
+. 1330 0x12345678 5
+. 39 5C 24 10 C3
+
+cmpl %ebx, %eax
+. 1331 0x12345678 3
+. 39 D8 C3
+
+cmpl %ebx, %ebp
+. 1332 0x12345678 3
+. 39 DD C3
+
+cmpl %ebx, %r13d
+. 1333 0x12345678 4
+. 41 39 DD C3
+
+cmpl %ebx, %r15d
+. 1334 0x12345678 4
+. 41 39 DF C3
+
+cmpl %ecx, %ebx
+. 1335 0x12345678 3
+. 39 CB C3
+
+cmpl %ecx, %edi
+. 1336 0x12345678 3
+. 39 CF C3
+
+cmpl %ecx, %edx
+. 1337 0x12345678 3
+. 39 CA C3
+
+cmpl %ecx, %esi
+. 1338 0x12345678 3
+. 39 CE C3
+
+cmpl %ecx, %r13d
+. 1339 0x12345678 4
+. 41 39 CD C3
+
+cmpl %ecx, %r14d
+. 1340 0x12345678 4
+. 41 39 CE C3
+
+cmpl %ecx, %r15d
+. 1341 0x12345678 4
+. 41 39 CF C3
+
+cmpl %ecx, %r8d
+. 1342 0x12345678 4
+. 41 39 C8 C3
+
+cmpl %ecx, (%rbx)
+. 1343 0x12345678 3
+. 39 0B C3
+
+cmpl %edi, %eax
+. 1344 0x12345678 3
+. 39 F8 C3
+
+cmpl %edi, %ecx
+. 1345 0x12345678 3
+. 39 F9 C3
+
+cmpl %edi, %edx
+. 1346 0x12345678 3
+. 39 FA C3
+
+cmpl %edi, %esi
+. 1347 0x12345678 3
+. 39 FE C3
+
+cmpl %edx, 12(%rsp)
+. 1348 0x12345678 5
+. 39 54 24 0C C3
+
+cmpl %edx, 16(%rsp)
+. 1349 0x12345678 5
+. 39 54 24 10 C3
+
+cmpl %edx, 288(%rsp,%rax,4)
+. 1350 0x12345678 8
+. 39 94 84 20 01 00 00 C3
+
+cmpl %edx, 292(%rsp,%rax,4)
+. 1351 0x12345678 8
+. 39 94 84 24 01 00 00 C3
+
+cmpl %edx, 68(%rsp)
+. 1352 0x12345678 5
+. 39 54 24 44 C3
+
+cmpl %edx, %eax
+. 1353 0x12345678 3
+. 39 D0 C3
+
+cmpl %edx, %ebp
+. 1354 0x12345678 3
+. 39 D5 C3
+
+cmpl %edx, %ebx
+. 1355 0x12345678 3
+. 39 D3 C3
+
+cmpl %edx, %ecx
+. 1356 0x12345678 3
+. 39 D1 C3
+
+cmpl %edx, %edi
+. 1357 0x12345678 3
+. 39 D7 C3
+
+cmpl %edx, %esi
+. 1358 0x12345678 3
+. 39 D6 C3
+
+cmpl %edx, %r12d
+. 1359 0x12345678 4
+. 41 39 D4 C3
+
+cmpl %edx, %r13d
+. 1360 0x12345678 4
+. 41 39 D5 C3
+
+cmpl %edx, %r14d
+. 1361 0x12345678 4
+. 41 39 D6 C3
+
+cmpl %edx, %r15d
+. 1362 0x12345678 4
+. 41 39 D7 C3
+
+cmpl %edx, (%rbx)
+. 1363 0x12345678 3
+. 39 13 C3
+
+cmpl %edx, (%rdi,%rax,4)
+. 1364 0x12345678 4
+. 39 14 87 C3
+
+cmpl %edx, (%rsi,%rax,4)
+. 1365 0x12345678 4
+. 39 14 86 C3
+
+cmpl %esi, 12(%rbx,%rax)
+. 1366 0x12345678 5
+. 39 74 03 0C C3
+
+cmpl %esi, %eax
+. 1367 0x12345678 3
+. 39 F0 C3
+
+cmpl %esi, %ecx
+. 1368 0x12345678 3
+. 39 F1 C3
+
+cmpl %esi, %edx
+. 1369 0x12345678 3
+. 39 F2 C3
+
+cmpl %esi, (%rcx,%rdx,8)
+. 1370 0x12345678 4
+. 39 34 D1 C3
+
+cmpl %esi, (%rdi,%rax,4)
+. 1371 0x12345678 4
+. 39 34 87 C3
+
+cmpl %r10d, %ecx
+. 1372 0x12345678 4
+. 44 39 D1 C3
+
+cmpl %r10d, %r12d
+. 1373 0x12345678 4
+. 45 39 D4 C3
+
+cmpl %r10d, %r8d
+. 1374 0x12345678 4
+. 45 39 D0 C3
+
+cmpl %r11d, %r12d
+. 1375 0x12345678 4
+. 45 39 DC C3
+
+cmpl %r11d, %r9d
+. 1376 0x12345678 4
+. 45 39 D9 C3
+
+cmpl %r12d, %eax
+. 1377 0x12345678 4
+. 44 39 E0 C3
+
+cmpl %r12d, %ebx
+. 1378 0x12345678 4
+. 44 39 E3 C3
+
+cmpl %r13d, 192(%rsp,%rax,4)
+. 1379 0x12345678 9
+. 44 39 AC 84 C0 00 00 00 C3
+
+cmpl %r13d, %ebp
+. 1380 0x12345678 4
+. 44 39 ED C3
+
+cmpl %r14d, 20(%rsp)
+. 1381 0x12345678 6
+. 44 39 74 24 14 C3
+
+cmpl %r14d, %ebp
+. 1382 0x12345678 4
+. 44 39 F5 C3
+
+cmpl %r15d, %ebp
+. 1383 0x12345678 4
+. 44 39 FD C3
+
+cmpl %r15d, %ebx
+. 1384 0x12345678 4
+. 44 39 FB C3
+
+cmpl %r15d, %ecx
+. 1385 0x12345678 4
+. 44 39 F9 C3
+
+cmpl %r15d, %r12d
+. 1386 0x12345678 4
+. 45 39 FC C3
+
+cmpl %r15d, %r14d
+. 1387 0x12345678 4
+. 45 39 FE C3
+
+cmpl %r15d, %r8d
+. 1388 0x12345678 4
+. 45 39 F8 C3
+
+cmpl %r8d, %ecx
+. 1389 0x12345678 4
+. 44 39 C1 C3
+
+cmpl %r8d, %edx
+. 1390 0x12345678 4
+. 44 39 C2 C3
+
+cmpl %r8d, %esi
+. 1391 0x12345678 4
+. 44 39 C6 C3
+
+cmpl %r9d, %esi
+. 1392 0x12345678 4
+. 44 39 CE C3
+
+cmpl (%rax), %edx
+. 1393 0x12345678 3
+. 3B 10 C3
+
+cmpl (%rdx), %eax
+. 1394 0x12345678 3
+. 3B 02 C3
+
+cmpl (%rsi), %eax
+. 1395 0x12345678 3
+. 3B 06 C3
+
+cmpl (%rsi), %edx
+. 1396 0x12345678 3
+. 3B 16 C3
+
+cmpl 16(%rip), %eax
+. 1397 0x12345678 7
+. 3B 05 10 00 00 00 C3
+
+cmpl 16(%rip), %r13d
+. 1398 0x12345678 8
+. 44 3B 2D 10 00 00 00 C3
+
+cmpl 16(%rip), %r15d
+. 1399 0x12345678 8
+. 44 3B 3D 10 00 00 00 C3
+
+cmpl 20(%rip), %ecx
+. 1400 0x12345678 7
+. 3B 0D 14 00 00 00 C3
+
+cmpq $0, 16(%rdi)
+. 1401 0x12345678 6
+. 48 83 7F 10 00 C3
+
+cmpq $0, 16(%rsp)
+. 1402 0x12345678 7
+. 48 83 7C 24 10 00 C3
+
+cmpq $0, 192(%rsp)
+. 1403 0x12345678 10
+. 48 83 BC 24 C0 00 00 00 00 C3
+
+cmpq $0, 200(%rsp)
+. 1404 0x12345678 10
+. 48 83 BC 24 C8 00 00 00 00 C3
+
+cmpq $0, 24(%rax)
+. 1405 0x12345678 6
+. 48 83 78 18 00 C3
+
+cmpq $0, 24(%rbx)
+. 1406 0x12345678 6
+. 48 83 7B 18 00 C3
+
+cmpq $0, 24(%rsp)
+. 1407 0x12345678 7
+. 48 83 7C 24 18 00 C3
+
+cmpq $0, 264(%rsp)
+. 1408 0x12345678 10
+. 48 83 BC 24 08 01 00 00 00 C3
+
+cmpq $0, 272(%rsp)
+. 1409 0x12345678 10
+. 48 83 BC 24 10 01 00 00 00 C3
+
+cmpq $0, 32(%r13)
+. 1410 0x12345678 6
+. 49 83 7D 20 00 C3
+
+cmpq $0, 32(%rbp)
+. 1411 0x12345678 6
+. 48 83 7D 20 00 C3
+
+cmpq $0, 560(%rsp)
+. 1412 0x12345678 10
+. 48 83 BC 24 30 02 00 00 00 C3
+
+cmpq $0, 8(%rax)
+. 1413 0x12345678 6
+. 48 83 78 08 00 C3
+
+cmpq $0, 8(%rbx)
+. 1414 0x12345678 6
+. 48 83 7B 08 00 C3
+
+cmpq $0, 8(%rdi)
+. 1415 0x12345678 6
+. 48 83 7F 08 00 C3
+
+cmpq $0, 8(%rdx,%rbx,8)
+. 1416 0x12345678 7
+. 48 83 7C DA 08 00 C3
+
+cmpq $0, (%r12,%rax,8)
+. 1417 0x12345678 6
+. 49 83 3C C4 00 C3
+
+cmpq $0, (%r13,%rax,8)
+. 1418 0x12345678 7
+. 49 83 7C C5 00 00 C3
+
+cmpq $0, (%r9,%rax,8)
+. 1419 0x12345678 6
+. 49 83 3C C1 00 C3
+
+cmpq $0, (%rax)
+. 1420 0x12345678 5
+. 48 83 38 00 C3
+
+cmpq $0, (%rax,%rbx,8)
+. 1421 0x12345678 6
+. 48 83 3C D8 00 C3
+
+cmpq $0, (%rax,%rcx,8)
+. 1422 0x12345678 6
+. 48 83 3C C8 00 C3
+
+cmpq $0, (%rax,%rdx,8)
+. 1423 0x12345678 6
+. 48 83 3C D0 00 C3
+
+cmpq $0, (%rbp,%rax,8)
+. 1424 0x12345678 7
+. 48 83 7C C5 00 00 C3
+
+cmpq $0, (%rbx)
+. 1425 0x12345678 5
+. 48 83 3B 00 C3
+
+cmpq $0, (%rbx,%rax,8)
+. 1426 0x12345678 6
+. 48 83 3C C3 00 C3
+
+cmpq $0, (%rbx,%rcx,8)
+. 1427 0x12345678 6
+. 48 83 3C CB 00 C3
+
+cmpq $0, (%rcx)
+. 1428 0x12345678 5
+. 48 83 39 00 C3
+
+cmpq $0, (%rcx,%rax,8)
+. 1429 0x12345678 6
+. 48 83 3C C1 00 C3
+
+cmpq $0, (%rdi)
+. 1430 0x12345678 5
+. 48 83 3F 00 C3
+
+cmpq $0, (%rdi,%rax,8)
+. 1431 0x12345678 6
+. 48 83 3C C7 00 C3
+
+cmpq $0, (%rdx)
+. 1432 0x12345678 5
+. 48 83 3A 00 C3
+
+cmpq $0, (%rdx,%rax,8)
+. 1433 0x12345678 6
+. 48 83 3C C2 00 C3
+
+cmpq $0, (%rdx,%rbx,8)
+. 1434 0x12345678 6
+. 48 83 3C DA 00 C3
+
+cmpq $0, (%rsi)
+. 1435 0x12345678 5
+. 48 83 3E 00 C3
+
+cmpq $0, (%rsi,%rax,8)
+. 1436 0x12345678 6
+. 48 83 3C C6 00 C3
+
+cmpq $0, (%rsp)
+. 1437 0x12345678 6
+. 48 83 3C 24 00 C3
+
+cmpq $255, %rdi
+. 1438 0x12345678 8
+. 48 81 FF FF 00 00 00 C3
+
+cmpq $65535, %rsi
+. 1439 0x12345678 8
+. 48 81 FE FF FF 00 00 C3
+
+cmpq %r13, %r8
+. 1440 0x12345678 4
+. 4D 39 E8 C3
+
+cmpq %r14, (%r12)
+. 1441 0x12345678 5
+. 4D 39 34 24 C3
+
+cmpq %rax, 8(%rbp)
+. 1442 0x12345678 5
+. 48 39 45 08 C3
+
+cmpq %rax, 8(%rdi)
+. 1443 0x12345678 5
+. 48 39 47 08 C3
+
+cmpq %rax, %rsi
+. 1444 0x12345678 4
+. 48 39 C6 C3
+
+cmpq %rbp, (%rax,%rcx,8)
+. 1445 0x12345678 5
+. 48 39 2C C8 C3
+
+cmpq %rcx, 8(%rdx)
+. 1446 0x12345678 5
+. 48 39 4A 08 C3
+
+cmpq %rdx, (%rax,%rcx,8)
+. 1447 0x12345678 5
+. 48 39 14 C8 C3
+
+cmpw $1, 142(%rsp)
+. 1448 0x12345678 10
+. 66 83 BC 24 8E 00 00 00 01 C3
+
+cmpw $128, %ax
+. 1449 0x12345678 5
+. 66 3D 80 00 C3
+
+cmpw $128, %bx
+. 1450 0x12345678 6
+. 66 81 FB 80 00 C3
+
+cmpw $128, %cx
+. 1451 0x12345678 6
+. 66 81 F9 80 00 C3
+
+cmpw $128, %di
+. 1452 0x12345678 6
+. 66 81 FF 80 00 C3
+
+cmpw $128, %dx
+. 1453 0x12345678 6
+. 66 81 FA 80 00 C3
+
+cmpw $128, %r11w
+. 1454 0x12345678 7
+. 66 41 81 FB 80 00 C3
+
+cmpw $128, %r8w
+. 1455 0x12345678 7
+. 66 41 81 F8 80 00 C3
+
+cmpw $128, %r9w
+. 1456 0x12345678 7
+. 66 41 81 F9 80 00 C3
+
+cmpw $-129, %ax
+. 1457 0x12345678 5
+. 66 3D 7F FF C3
+
+cmpw $-129, %bx
+. 1458 0x12345678 6
+. 66 81 FB 7F FF C3
+
+cmpw $-129, %di
+. 1459 0x12345678 6
+. 66 81 FF 7F FF C3
+
+cmpw $-129, %dx
+. 1460 0x12345678 6
+. 66 81 FA 7F FF C3
+
+cmpw $-129, %r11w
+. 1461 0x12345678 7
+. 66 41 81 FB 7F FF C3
+
+cmpw $-129, %r8w
+. 1462 0x12345678 7
+. 66 41 81 F8 7F FF C3
+
+cmpw $-129, %r9w
+. 1463 0x12345678 7
+. 66 41 81 F9 7F FF C3
+
+cmpw $-129, %si
+. 1464 0x12345678 6
+. 66 81 FE 7F FF C3
+
+cmpw $1, %ax
+. 1465 0x12345678 5
+. 66 83 F8 01 C3
+
+cmpw $1, %cx
+. 1466 0x12345678 5
+. 66 83 F9 01 C3
+
+cmpw $1, %di
+. 1467 0x12345678 5
+. 66 83 FF 01 C3
+
+cmpw $1, %dx
+. 1468 0x12345678 5
+. 66 83 FA 01 C3
+
+cmpw $2, 30(%rbx)
+. 1469 0x12345678 6
+. 66 83 7B 1E 02 C3
+
+cmpw $256, %ax
+. 1470 0x12345678 5
+. 66 3D 00 01 C3
+
+cmpw $256, %bx
+. 1471 0x12345678 6
+. 66 81 FB 00 01 C3
+
+cmpw $256, %cx
+. 1472 0x12345678 6
+. 66 81 F9 00 01 C3
+
+cmpw $256, %di
+. 1473 0x12345678 6
+. 66 81 FF 00 01 C3
+
+cmpw $256, %dx
+. 1474 0x12345678 6
+. 66 81 FA 00 01 C3
+
+cmpw $256, %r11w
+. 1475 0x12345678 7
+. 66 41 81 FB 00 01 C3
+
+cmpw $256, %r8w
+. 1476 0x12345678 7
+. 66 41 81 F8 00 01 C3
+
+cmpw $256, %r9w
+. 1477 0x12345678 7
+. 66 41 81 F9 00 01 C3
+
+cmpw $2, %ax
+. 1478 0x12345678 5
+. 66 83 F8 02 C3
+
+cmpw $-2, %dx
+. 1479 0x12345678 5
+. 66 83 FA FE C3
+
+cmpw $-2, (%rdx,%rax)
+. 1480 0x12345678 6
+. 66 83 3C 02 FE C3
+
+cmpw $-2, (%rsi,%rax)
+. 1481 0x12345678 6
+. 66 83 3C 06 FE C3
+
+cmpw $3, %ax
+. 1482 0x12345678 5
+. 66 83 F8 03 C3
+
+cmpw $9999, 24(%r12,%rax,2)
+. 1483 0x12345678 9
+. 66 41 81 7C 44 18 0F 27 C3
+
+cmpw %ax, 8(%rdi)
+. 1484 0x12345678 5
+. 66 39 47 08 C3
+
+cmpw %ax, %r9w
+. 1485 0x12345678 5
+. 66 41 39 C1 C3
+
+cmpw %di, %si
+. 1486 0x12345678 4
+. 66 39 FE C3
+
+cmpw %dx, %cx
+. 1487 0x12345678 4
+. 66 39 D1 C3
+
+cmpw %dx, %r9w
+. 1488 0x12345678 5
+. 66 41 39 D1 C3
+
+cmpw %r10w, %dx
+. 1489 0x12345678 5
+. 66 44 39 D2 C3
+
+cmpw %r8w, %ax
+. 1490 0x12345678 5
+. 66 44 39 C0 C3
+
+cmpw %r8w, %dx
+. 1491 0x12345678 5
+. 66 44 39 C2 C3
+
+cwtl
+. 1492 0x12345678 2
+. 98 C3
+
+decb %al
+. 1493 0x12345678 3
+. FE C8 C3
+
+decb %dl
+. 1494 0x12345678 3
+. FE CA C3
+
+decb %r14b
+. 1495 0x12345678 4
+. 41 FE CE C3
+
+decb %sil
+. 1496 0x12345678 4
+. 40 FE CE C3
+
+decl %eax
+. 1497 0x12345678 3
+. FF C8 C3
+
+decl %ebp
+. 1498 0x12345678 3
+. FF CD C3
+
+decl %ebx
+. 1499 0x12345678 3
+. FF CB C3
+
+decl %ecx
+. 1500 0x12345678 3
+. FF C9 C3
+
+decl %edi
+. 1501 0x12345678 3
+. FF CF C3
+
+decl %edx
+. 1502 0x12345678 3
+. FF CA C3
+
+decl %esi
+. 1503 0x12345678 3
+. FF CE C3
+
+decl %r13d
+. 1504 0x12345678 4
+. 41 FF CD C3
+
+decl %r14d
+. 1505 0x12345678 4
+. 41 FF CE C3
+
+decl %r15d
+. 1506 0x12345678 4
+. 41 FF CF C3
+
+divq %r8
+. 1507 0x12345678 4
+. 49 F7 F0 C3
+
+idivl %edi
+. 1508 0x12345678 3
+. F7 FF C3
+
+imull 8(%r13), %eax
+. 1509 0x12345678 6
+. 41 0F AF 45 08 C3
+
+imull 8(%r14), %eax
+. 1510 0x12345678 6
+. 41 0F AF 46 08 C3
+
+imull 8(%rbx), %eax
+. 1511 0x12345678 5
+. 0F AF 43 08 C3
+
+imull 8(%rdx), %edi
+. 1512 0x12345678 5
+. 0F AF 7A 08 C3
+
+imull %eax, %ebp
+. 1513 0x12345678 4
+. 0F AF E8 C3
+
+imull %ecx, %edx
+. 1514 0x12345678 4
+. 0F AF D1 C3
+
+imull %ecx, %r8d
+. 1515 0x12345678 5
+. 44 0F AF C1 C3
+
+imull %edx, %eax
+. 1516 0x12345678 4
+. 0F AF C2 C3
+
+imull %esi, %edi
+. 1517 0x12345678 4
+. 0F AF FE C3
+
+imulq $1431655766, %rax, %rbx
+. 1518 0x12345678 8
+. 48 69 D8 56 55 55 55 C3
+
+incl 108(%rsp)
+. 1519 0x12345678 5
+. FF 44 24 6C C3
+
+incl 120(%rsp)
+. 1520 0x12345678 5
+. FF 44 24 78 C3
+
+incl 124(%rsp)
+. 1521 0x12345678 5
+. FF 44 24 7C C3
+
+incl 12(%rsp)
+. 1522 0x12345678 5
+. FF 44 24 0C C3
+
+incl 140(%rsp)
+. 1523 0x12345678 8
+. FF 84 24 8C 00 00 00 C3
+
+incl 16(%rsp)
+. 1524 0x12345678 5
+. FF 44 24 10 C3
+
+incl 20(%rsp)
+. 1525 0x12345678 5
+. FF 44 24 14 C3
+
+incl 24(%rsp)
+. 1526 0x12345678 5
+. FF 44 24 18 C3
+
+incl 288(%rsp)
+. 1527 0x12345678 8
+. FF 84 24 20 01 00 00 C3
+
+incl 28(%rsp)
+. 1528 0x12345678 5
+. FF 44 24 1C C3
+
+incl 40(%rsp)
+. 1529 0x12345678 5
+. FF 44 24 28 C3
+
+incl 56(%rsp)
+. 1530 0x12345678 5
+. FF 44 24 38 C3
+
+incl 68(%rsp)
+. 1531 0x12345678 5
+. FF 44 24 44 C3
+
+incl %eax
+. 1532 0x12345678 3
+. FF C0 C3
+
+incl %ebp
+. 1533 0x12345678 3
+. FF C5 C3
+
+incl %ebx
+. 1534 0x12345678 3
+. FF C3 C3
+
+incl %ecx
+. 1535 0x12345678 3
+. FF C1 C3
+
+incl %edi
+. 1536 0x12345678 3
+. FF C7 C3
+
+incl %edx
+. 1537 0x12345678 3
+. FF C2 C3
+
+incl %esi
+. 1538 0x12345678 3
+. FF C6 C3
+
+incl 1(%rip)
+. 1539 0x12345678 7
+. FF 05 01 00 00 00 C3
+
+incl 8(%rip)
+. 1540 0x12345678 7
+. FF 05 08 00 00 00 C3
+
+incl 9(%rip)
+. 1541 0x12345678 7
+. FF 05 09 00 00 00 C3
+
+incl 0(%rip)
+. 1542 0x12345678 7
+. FF 05 00 00 00 00 C3
+
+incl %r10d
+. 1543 0x12345678 4
+. 41 FF C2 C3
+
+incl %r12d
+. 1544 0x12345678 4
+. 41 FF C4 C3
+
+incl %r13d
+. 1545 0x12345678 4
+. 41 FF C5 C3
+
+incl %r14d
+. 1546 0x12345678 4
+. 41 FF C6 C3
+
+incl (%r14,%rax,4)
+. 1547 0x12345678 5
+. 41 FF 04 86 C3
+
+incl %r15d
+. 1548 0x12345678 4
+. 41 FF C7 C3
+
+incl %r8d
+. 1549 0x12345678 4
+. 41 FF C0 C3
+
+incl %r9d
+. 1550 0x12345678 4
+. 41 FF C1 C3
+
+incl (%rax)
+. 1551 0x12345678 3
+. FF 00 C3
+
+incl -99(%rip)
+. 1552 0x12345678 7
+. FF 05 9D FF FF FF C3
+
+incq %r13
+. 1553 0x12345678 4
+. 49 FF C5 C3
+
+incq %rax
+. 1554 0x12345678 4
+. 48 FF C0 C3
+
+incq %rbp
+. 1555 0x12345678 4
+. 48 FF C5 C3
+
+incq %rbx
+. 1556 0x12345678 4
+. 48 FF C3 C3
+
+incq %rdi
+. 1557 0x12345678 4
+. 48 FF C7 C3
+
+incq %rsi
+. 1558 0x12345678 4
+. 48 FF C6 C3
+
+incq 33(%rip)
+. 1559 0x12345678 8
+. 48 FF 05 21 00 00 00 C3
+
+jmp *3998(,%rax,8)
+. 1560 0x12345678 7
+. FF 24 C5 9E 0F 00 00
+
+jmp *%rdx
+. 1561 0x12345678 2
+. FF E2
+
+leal 0(,%r14,8), %r12d
+. 1562 0x12345678 9
+. 46 8D 24 F5 00 00 00 00 C3
+
+leal 0(,%rdx,8), %eax
+. 1563 0x12345678 8
+. 8D 04 D5 00 00 00 00 C3
+
+leal 0(,%rdx,8), %edi
+. 1564 0x12345678 8
+. 8D 3C D5 00 00 00 00 C3
+
+leal 0(,%rsi,4), %edx
+. 1565 0x12345678 8
+. 8D 14 B5 00 00 00 00 C3
+
+leal 0(,%rsi,8), %edx
+. 1566 0x12345678 8
+. 8D 14 F5 00 00 00 00 C3
+
+leal -112(%rax), %ebx
+. 1567 0x12345678 4
+. 8D 58 90 C3
+
+leal -112(%rbx), %eax
+. 1568 0x12345678 4
+. 8D 43 90 C3
+
+leal -128(%rax), %ebx
+. 1569 0x12345678 4
+. 8D 58 80 C3
+
+leal -128(%rax), %edi
+. 1570 0x12345678 4
+. 8D 78 80 C3
+
+leal -129(%rax), %r12d
+. 1571 0x12345678 8
+. 44 8D A0 7F FF FF FF C3
+
+leal -144(%rax), %r12d
+. 1572 0x12345678 8
+. 44 8D A0 70 FF FF FF C3
+
+leal -144(%rax), %r14d
+. 1573 0x12345678 8
+. 44 8D B0 70 FF FF FF C3
+
+leal -176(%rdx), %ebx
+. 1574 0x12345678 7
+. 8D 9A 50 FF FF FF C3
+
+leal -184(%rax), %ebx
+. 1575 0x12345678 7
+. 8D 98 48 FF FF FF C3
+
+leal -184(%rdx), %ebx
+. 1576 0x12345678 7
+. 8D 9A 48 FF FF FF C3
+
+leal -192(%rax), %ebp
+. 1577 0x12345678 7
+. 8D A8 40 FF FF FF C3
+
+leal -1(%r12), %eax
+. 1578 0x12345678 6
+. 41 8D 44 24 FF C3
+
+leal 1(%r12), %eax
+. 1579 0x12345678 6
+. 41 8D 44 24 01 C3
+
+leal 1(%r12), %ebx
+. 1580 0x12345678 6
+. 41 8D 5C 24 01 C3
+
+leal 1(%r12), %edi
+. 1581 0x12345678 6
+. 41 8D 7C 24 01 C3
+
+leal -1(%r13), %eax
+. 1582 0x12345678 5
+. 41 8D 45 FF C3
+
+leal 1(%r13), %eax
+. 1583 0x12345678 5
+. 41 8D 45 01 C3
+
+leal -1(%r14), %eax
+. 1584 0x12345678 5
+. 41 8D 46 FF C3
+
+leal 1(%r14), %eax
+. 1585 0x12345678 5
+. 41 8D 46 01 C3
+
+leal -1(%r14), %edi
+. 1586 0x12345678 5
+. 41 8D 7E FF C3
+
+leal 1(%r15), %eax
+. 1587 0x12345678 5
+. 41 8D 47 01 C3
+
+leal 1(%r8), %ecx
+. 1588 0x12345678 5
+. 41 8D 48 01 C3
+
+leal -1(,%rax,8), %edi
+. 1589 0x12345678 8
+. 8D 3C C5 FF FF FF FF C3
+
+leal 1(%rax), %r12d
+. 1590 0x12345678 5
+. 44 8D 60 01 C3
+
+leal -1(%rax,%r12), %ebp
+. 1591 0x12345678 6
+. 42 8D 6C 20 FF C3
+
+leal -1(%rax,%r12), %edx
+. 1592 0x12345678 6
+. 42 8D 54 20 FF C3
+
+leal -1(%rax,%rbp), %eax
+. 1593 0x12345678 5
+. 8D 44 28 FF C3
+
+leal -1(%rax,%rbp), %ebx
+. 1594 0x12345678 5
+. 8D 5C 28 FF C3
+
+leal -1(%rax,%rbx), %eax
+. 1595 0x12345678 5
+. 8D 44 18 FF C3
+
+leal 1(%rax,%rbx), %r15d
+. 1596 0x12345678 6
+. 44 8D 7C 18 01 C3
+
+leal -1(%rbp), %eax
+. 1597 0x12345678 4
+. 8D 45 FF C3
+
+leal 1(%rbp), %eax
+. 1598 0x12345678 4
+. 8D 45 01 C3
+
+leal -1(%rbp), %edi
+. 1599 0x12345678 4
+. 8D 7D FF C3
+
+leal 1(%rbp), %edi
+. 1600 0x12345678 4
+. 8D 7D 01 C3
+
+leal -1(%rbp), %esi
+. 1601 0x12345678 4
+. 8D 75 FF C3
+
+leal 1(%rbp), %r12d
+. 1602 0x12345678 5
+. 44 8D 65 01 C3
+
+leal 1(%rbp), %r13d
+. 1603 0x12345678 5
+. 44 8D 6D 01 C3
+
+leal -1(%rbp,%rbx), %r12d
+. 1604 0x12345678 6
+. 44 8D 64 1D FF C3
+
+leal -1(%rbx), %eax
+. 1605 0x12345678 4
+. 8D 43 FF C3
+
+leal 1(%rbx), %edi
+. 1606 0x12345678 4
+. 8D 7B 01 C3
+
+leal 1(%rbx), %edx
+. 1607 0x12345678 4
+. 8D 53 01 C3
+
+leal -1(%rcx), %eax
+. 1608 0x12345678 4
+. 8D 41 FF C3
+
+leal 1(%rcx), %eax
+. 1609 0x12345678 4
+. 8D 41 01 C3
+
+leal 1(%rcx), %edi
+. 1610 0x12345678 4
+. 8D 79 01 C3
+
+leal 1(%rcx,%r8), %r8d
+. 1611 0x12345678 6
+. 46 8D 44 01 01 C3
+
+leal 1(%rcx,%rdx), %edx
+. 1612 0x12345678 5
+. 8D 54 11 01 C3
+
+leal 1(%rdi), %eax
+. 1613 0x12345678 4
+. 8D 47 01 C3
+
+leal -1(%rdi), %ebp
+. 1614 0x12345678 4
+. 8D 6F FF C3
+
+leal 1(%rdi), %edx
+. 1615 0x12345678 4
+. 8D 57 01 C3
+
+leal -1(%rdx), %eax
+. 1616 0x12345678 4
+. 8D 42 FF C3
+
+leal 1(%rdx), %eax
+. 1617 0x12345678 4
+. 8D 42 01 C3
+
+leal 1(%rdx), %edi
+. 1618 0x12345678 4
+. 8D 7A 01 C3
+
+leal 1(%rdx,%r8), %r8d
+. 1619 0x12345678 6
+. 46 8D 44 02 01 C3
+
+leal 1(%rdx,%rax), %eax
+. 1620 0x12345678 5
+. 8D 44 02 01 C3
+
+leal 1(%rdx,%rbp), %r10d
+. 1621 0x12345678 6
+. 44 8D 54 2A 01 C3
+
+leal 1(%rdx,%rbx), %ebx
+. 1622 0x12345678 5
+. 8D 5C 1A 01 C3
+
+leal 1(%rdx,%rcx), %ecx
+. 1623 0x12345678 5
+. 8D 4C 0A 01 C3
+
+leal 1(%rdx,%rdi), %edi
+. 1624 0x12345678 5
+. 8D 7C 3A 01 C3
+
+leal 1(%rdx,%rsi), %esi
+. 1625 0x12345678 5
+. 8D 74 32 01 C3
+
+leal -1(%rsi), %eax
+. 1626 0x12345678 4
+. 8D 46 FF C3
+
+leal 1(%rsi), %eax
+. 1627 0x12345678 4
+. 8D 46 01 C3
+
+leal 1(%rsi,%rdi), %edi
+. 1628 0x12345678 5
+. 8D 7C 3E 01 C3
+
+leal -200(%rax), %ebp
+. 1629 0x12345678 7
+. 8D A8 38 FF FF FF C3
+
+leal -200(%rax), %r14d
+. 1630 0x12345678 8
+. 44 8D B0 38 FF FF FF C3
+
+leal -208(%rax), %ebp
+. 1631 0x12345678 7
+. 8D A8 30 FF FF FF C3
+
+leal -208(%rax), %ebx
+. 1632 0x12345678 7
+. 8D 98 30 FF FF FF C3
+
+leal -20(%rdx), %eax
+. 1633 0x12345678 4
+. 8D 42 EC C3
+
+leal -216(%rax), %ebx
+. 1634 0x12345678 7
+. 8D 98 28 FF FF FF C3
+
+leal -224(%rax), %ebx
+. 1635 0x12345678 7
+. 8D 98 20 FF FF FF C3
+
+leal -232(%rax), %ebx
+. 1636 0x12345678 7
+. 8D 98 18 FF FF FF C3
+
+leal -24(%rbx), %eax
+. 1637 0x12345678 4
+. 8D 43 E8 C3
+
+leal -2(%r15), %eax
+. 1638 0x12345678 5
+. 41 8D 47 FE C3
+
+leal -2(%r9), %eax
+. 1639 0x12345678 5
+. 41 8D 41 FE C3
+
+leal 2(,%rax,4), %eax
+. 1640 0x12345678 8
+. 8D 04 85 02 00 00 00 C3
+
+leal 2(%rax), %ebp
+. 1641 0x12345678 4
+. 8D 68 02 C3
+
+leal -2(%rbp), %eax
+. 1642 0x12345678 4
+. 8D 45 FE C3
+
+leal 2(%rbp), %ebx
+. 1643 0x12345678 4
+. 8D 5D 02 C3
+
+leal 2(%rbp), %edi
+. 1644 0x12345678 4
+. 8D 7D 02 C3
+
+leal 2(%rbp), %edx
+. 1645 0x12345678 4
+. 8D 55 02 C3
+
+leal 2(%rbp), %esi
+. 1646 0x12345678 4
+. 8D 75 02 C3
+
+leal 2(%rbp), %r13d
+. 1647 0x12345678 5
+. 44 8D 6D 02 C3
+
+leal 2(%rbx), %eax
+. 1648 0x12345678 4
+. 8D 43 02 C3
+
+leal 2(%rcx), %edi
+. 1649 0x12345678 4
+. 8D 79 02 C3
+
+leal 2(%rdi), %edx
+. 1650 0x12345678 4
+. 8D 57 02 C3
+
+leal 2(%rsi), %eax
+. 1651 0x12345678 4
+. 8D 46 02 C3
+
+leal -32(%rdx), %eax
+. 1652 0x12345678 4
+. 8D 42 E0 C3
+
+leal 38(%r14), %eax
+. 1653 0x12345678 5
+. 41 8D 46 26 C3
+
+leal 3(%rax), %ebp
+. 1654 0x12345678 4
+. 8D 68 03 C3
+
+leal 3(%rbp), %ebx
+. 1655 0x12345678 4
+. 8D 5D 03 C3
+
+leal 3(%rbp), %edi
+. 1656 0x12345678 4
+. 8D 7D 03 C3
+
+leal 3(%rbp), %edx
+. 1657 0x12345678 4
+. 8D 55 03 C3
+
+leal 3(%rbp), %esi
+. 1658 0x12345678 4
+. 8D 75 03 C3
+
+leal 3(%rbp), %r12d
+. 1659 0x12345678 5
+. 44 8D 65 03 C3
+
+leal 3(%rbp), %r13d
+. 1660 0x12345678 5
+. 44 8D 6D 03 C3
+
+leal 3(%rdi), %eax
+. 1661 0x12345678 4
+. 8D 47 03 C3
+
+leal 3(%rdx), %eax
+. 1662 0x12345678 4
+. 8D 42 03 C3
+
+leal 3(%rdx,%rbp), %ebp
+. 1663 0x12345678 5
+. 8D 6C 2A 03 C3
+
+leal 3(%rsi), %eax
+. 1664 0x12345678 4
+. 8D 46 03 C3
+
+leal 43(%rdx,%rdx), %edx
+. 1665 0x12345678 5
+. 8D 54 12 2B C3
+
+leal 48(%r14), %eax
+. 1666 0x12345678 5
+. 41 8D 46 30 C3
+
+leal -48(%rax,%rbp), %ebp
+. 1667 0x12345678 5
+. 8D 6C 28 D0 C3
+
+leal -48(%rdx), %eax
+. 1668 0x12345678 4
+. 8D 42 D0 C3
+
+leal 4(%rax,%rbx), %r15d
+. 1669 0x12345678 6
+. 44 8D 7C 18 04 C3
+
+leal 4(%rbp), %eax
+. 1670 0x12345678 4
+. 8D 45 04 C3
+
+leal -4(%rbp), %edi
+. 1671 0x12345678 4
+. 8D 7D FC C3
+
+leal 4(%rbp), %edi
+. 1672 0x12345678 4
+. 8D 7D 04 C3
+
+leal -4(%rcx), %eax
+. 1673 0x12345678 4
+. 8D 41 FC C3
+
+leal -4(%rdi), %eax
+. 1674 0x12345678 4
+. 8D 47 FC C3
+
+leal -4(%rdx), %eax
+. 1675 0x12345678 4
+. 8D 42 FC C3
+
+leal 4(%rdx,%rbp), %ebp
+. 1676 0x12345678 5
+. 8D 6C 2A 04 C3
+
+leal -4(%rsi), %eax
+. 1677 0x12345678 4
+. 8D 46 FC C3
+
+leal -4(%rsi), %ecx
+. 1678 0x12345678 4
+. 8D 4E FC C3
+
+leal -64(%rax), %r13d
+. 1679 0x12345678 5
+. 44 8D 68 C0 C3
+
+leal 64(,%rbx,8), %edi
+. 1680 0x12345678 8
+. 8D 3C DD 40 00 00 00 C3
+
+leal 64(,%rdi,8), %edi
+. 1681 0x12345678 8
+. 8D 3C FD 40 00 00 00 C3
+
+leal -69631(%rdi), %eax
+. 1682 0x12345678 7
+. 8D 87 01 F0 FE FF C3
+
+leal -69633(%rbx), %eax
+. 1683 0x12345678 7
+. 8D 83 FF EF FE FF C3
+
+leal -69633(%rcx), %eax
+. 1684 0x12345678 7
+. 8D 81 FF EF FE FF C3
+
+leal -69633(%rdi), %eax
+. 1685 0x12345678 7
+. 8D 87 FF EF FE FF C3
+
+leal -69633(%rdx), %eax
+. 1686 0x12345678 7
+. 8D 82 FF EF FE FF C3
+
+leal 69634(%r13,%r13), %r13d
+. 1687 0x12345678 9
+. 47 8D AC 2D 02 10 01 00 C3
+
+leal -72(%rax), %r13d
+. 1688 0x12345678 5
+. 44 8D 68 B8 C3
+
+leal -73731(%rcx), %eax
+. 1689 0x12345678 7
+. 8D 81 FD DF FE FF C3
+
+leal -73731(%rdi), %eax
+. 1690 0x12345678 7
+. 8D 87 FD DF FE FF C3
+
+leal -73731(%rdx), %eax
+. 1691 0x12345678 7
+. 8D 82 FD DF FE FF C3
+
+leal -77825(%rdi), %eax
+. 1692 0x12345678 7
+. 8D 87 FF CF FE FF C3
+
+leal -77837(%rcx), %eax
+. 1693 0x12345678 7
+. 8D 81 F3 CF FE FF C3
+
+leal -78092(%r14), %eax
+. 1694 0x12345678 8
+. 41 8D 86 F4 CE FE FF C3
+
+leal -7(%rbx), %eax
+. 1695 0x12345678 4
+. 8D 43 F9 C3
+
+leal 7(%rdx), %eax
+. 1696 0x12345678 4
+. 8D 42 07 C3
+
+leal -80(%rax), %r12d
+. 1697 0x12345678 5
+. 44 8D 60 B0 C3
+
+leal -80(%rax), %r13d
+. 1698 0x12345678 5
+. 44 8D 68 B0 C3
+
+leal -81920(%rdi), %eax
+. 1699 0x12345678 7
+. 8D 87 00 C0 FE FF C3
+
+leal -88(%rax), %ebx
+. 1700 0x12345678 4
+. 8D 58 A8 C3
+
+leal -88(%rdx), %ebx
+. 1701 0x12345678 4
+. 8D 5A A8 C3
+
+leal -94208(%r13), %eax
+. 1702 0x12345678 8
+. 41 8D 85 00 90 FE FF C3
+
+leal -97(%rdx), %eax
+. 1703 0x12345678 4
+. 8D 42 9F C3
+
+leal (%r12,%r13), %eax
+. 1704 0x12345678 5
+. 43 8D 04 2C C3
+
+leal (%r9,%rbx), %ebp
+. 1705 0x12345678 5
+. 41 8D 2C 19 C3
+
+leal (%r9,%rcx), %r9d
+. 1706 0x12345678 5
+. 45 8D 0C 09 C3
+
+leal (%rax,%r12,2), %edi
+. 1707 0x12345678 5
+. 42 8D 3C 60 C3
+
+leal (%rax,%r13), %edi
+. 1708 0x12345678 5
+. 42 8D 3C 28 C3
+
+leal (%rax,%rax,2), %eax
+. 1709 0x12345678 4
+. 8D 04 40 C3
+
+leal (%rax,%rax), %edi
+. 1710 0x12345678 4
+. 8D 3C 00 C3
+
+leal (%rax,%rbp), %r15d
+. 1711 0x12345678 5
+. 44 8D 3C 28 C3
+
+leal (%rax,%rbx,4), %eax
+. 1712 0x12345678 4
+. 8D 04 98 C3
+
+leal (%rax,%rbx,8), %eax
+. 1713 0x12345678 4
+. 8D 04 D8 C3
+
+leal (%rax,%rbx), %r15d
+. 1714 0x12345678 5
+. 44 8D 3C 18 C3
+
+leal (%rax,%rdx), %eax
+. 1715 0x12345678 4
+. 8D 04 10 C3
+
+leal (%rbp,%r12), %eax
+. 1716 0x12345678 6
+. 42 8D 44 25 00 C3
+
+leal (%rbp,%r12), %ebx
+. 1717 0x12345678 6
+. 42 8D 5C 25 00 C3
+
+leal (%rbp,%r12), %esi
+. 1718 0x12345678 6
+. 42 8D 74 25 00 C3
+
+leal (%rbp,%rbx), %r8d
+. 1719 0x12345678 6
+. 44 8D 44 1D 00 C3
+
+leal (%rbx,%rbp), %edi
+. 1720 0x12345678 4
+. 8D 3C 2B C3
+
+leal (%rbx,%rbp), %esi
+. 1721 0x12345678 4
+. 8D 34 2B C3
+
+leal (%rcx,%rcx), %edi
+. 1722 0x12345678 4
+. 8D 3C 09 C3
+
+leal (%rdi,%rbx,4), %eax
+. 1723 0x12345678 4
+. 8D 04 9F C3
+
+leal (%rdi,%rcx), %edi
+. 1724 0x12345678 4
+. 8D 3C 0F C3
+
+leal (%rdx,%rbp), %r11d
+. 1725 0x12345678 5
+. 44 8D 1C 2A C3
+
+leal (%rdx,%rbx), %edx
+. 1726 0x12345678 4
+. 8D 14 1A C3
+
+leal (%rdx,%rdi), %r10d
+. 1727 0x12345678 5
+. 44 8D 14 3A C3
+
+leal (%rdx,%rdx), %eax
+. 1728 0x12345678 4
+. 8D 04 12 C3
+
+leal (%rdx,%rdx), %edi
+. 1729 0x12345678 4
+. 8D 3C 12 C3
+
+leal (%rsi,%rsi,2), %esi
+. 1730 0x12345678 4
+. 8D 34 76 C3
+
+leal (%rsi,%rsi), %eax
+. 1731 0x12345678 4
+. 8D 04 36 C3
+
+leal (%rsi,%rsi), %edx
+. 1732 0x12345678 4
+. 8D 14 36 C3
+
+leaq 0(,%rax,4), %r13
+. 1733 0x12345678 9
+. 4C 8D 2C 85 00 00 00 00 C3
+
+leaq 0(,%rax,4), %rbx
+. 1734 0x12345678 9
+. 48 8D 1C 85 00 00 00 00 C3
+
+leaq 0(,%rax,4), %rcx
+. 1735 0x12345678 9
+. 48 8D 0C 85 00 00 00 00 C3
+
+leaq 0(,%rax,4), %rdi
+. 1736 0x12345678 9
+. 48 8D 3C 85 00 00 00 00 C3
+
+leaq 0(,%rax,4), %rdx
+. 1737 0x12345678 9
+. 48 8D 14 85 00 00 00 00 C3
+
+leaq 0(,%rdx,4), %rax
+. 1738 0x12345678 9
+. 48 8D 04 95 00 00 00 00 C3
+
+leaq 100(%rsp), %rdx
+. 1739 0x12345678 6
+. 48 8D 54 24 64 C3
+
+leaq 104(%rsp), %rcx
+. 1740 0x12345678 6
+. 48 8D 4C 24 68 C3
+
+leaq 108(%rsp), %r8
+. 1741 0x12345678 6
+. 4C 8D 44 24 6C C3
+
+leaq 112(%rsp), %r15
+. 1742 0x12345678 6
+. 4C 8D 7C 24 70 C3
+
+leaq 116(%rsp), %rdi
+. 1743 0x12345678 6
+. 48 8D 7C 24 74 C3
+
+leaq 120(%rsp), %rsi
+. 1744 0x12345678 6
+. 48 8D 74 24 78 C3
+
+leaq 128(%rsp), %r15
+. 1745 0x12345678 9
+. 4C 8D BC 24 80 00 00 00 C3
+
+leaq 12(%rsp), %rdi
+. 1746 0x12345678 6
+. 48 8D 7C 24 0C C3
+
+leaq 12(%rsp), %rsi
+. 1747 0x12345678 6
+. 48 8D 74 24 0C C3
+
+leaq 144(%rsp), %r8
+. 1748 0x12345678 9
+. 4C 8D 84 24 90 00 00 00 C3
+
+leaq 144(%rsp), %rbx
+. 1749 0x12345678 9
+. 48 8D 9C 24 90 00 00 00 C3
+
+leaq 144(%rsp), %rdi
+. 1750 0x12345678 9
+. 48 8D BC 24 90 00 00 00 C3
+
+leaq 151(%rsp), %rdi
+. 1751 0x12345678 9
+. 48 8D BC 24 97 00 00 00 C3
+
+leaq 152(%rsp), %rdx
+. 1752 0x12345678 9
+. 48 8D 94 24 98 00 00 00 C3
+
+leaq 160(%rsp), %rax
+. 1753 0x12345678 9
+. 48 8D 84 24 A0 00 00 00 C3
+
+leaq 168(%rsp), %rdi
+. 1754 0x12345678 9
+. 48 8D BC 24 A8 00 00 00 C3
+
+leaq 168(%rsp), %rsi
+. 1755 0x12345678 9
+. 48 8D B4 24 A8 00 00 00 C3
+
+leaq 16(%rbp), %rdi
+. 1756 0x12345678 5
+. 48 8D 7D 10 C3
+
+leaq 16(%rbx), %rdi
+. 1757 0x12345678 5
+. 48 8D 7B 10 C3
+
+leaq 16(%rdx), %rsi
+. 1758 0x12345678 5
+. 48 8D 72 10 C3
+
+leaq 16(%rsp), %r13
+. 1759 0x12345678 6
+. 4C 8D 6C 24 10 C3
+
+leaq 16(%rsp), %r14
+. 1760 0x12345678 6
+. 4C 8D 74 24 10 C3
+
+leaq 16(%rsp), %r15
+. 1761 0x12345678 6
+. 4C 8D 7C 24 10 C3
+
+leaq 16(%rsp), %rcx
+. 1762 0x12345678 6
+. 48 8D 4C 24 10 C3
+
+leaq 16(%rsp), %rdx
+. 1763 0x12345678 6
+. 48 8D 54 24 10 C3
+
+leaq 16(%rsp), %rsi
+. 1764 0x12345678 6
+. 48 8D 74 24 10 C3
+
+leaq 172(%rsp), %rdi
+. 1765 0x12345678 9
+. 48 8D BC 24 AC 00 00 00 C3
+
+leaq 172(%rsp), %rdx
+. 1766 0x12345678 9
+. 48 8D 94 24 AC 00 00 00 C3
+
+leaq 176(%rsp), %r8
+. 1767 0x12345678 9
+. 4C 8D 84 24 B0 00 00 00 C3
+
+leaq 176(%rsp), %rdi
+. 1768 0x12345678 9
+. 48 8D BC 24 B0 00 00 00 C3
+
+leaq 17(%rbx), %rsi
+. 1769 0x12345678 5
+. 48 8D 73 11 C3
+
+leaq 180(%rsp), %rdi
+. 1770 0x12345678 9
+. 48 8D BC 24 B4 00 00 00 C3
+
+leaq 184(%rsp), %rdi
+. 1771 0x12345678 9
+. 48 8D BC 24 B8 00 00 00 C3
+
+leaq 188(%rsp), %rdi
+. 1772 0x12345678 9
+. 48 8D BC 24 BC 00 00 00 C3
+
+leaq 188(%rsp), %rsi
+. 1773 0x12345678 9
+. 48 8D B4 24 BC 00 00 00 C3
+
+leaq 18(%r15), %rbp
+. 1774 0x12345678 5
+. 49 8D 6F 12 C3
+
+leaq 192(%rsp), %r13
+. 1775 0x12345678 9
+. 4C 8D AC 24 C0 00 00 00 C3
+
+leaq 192(%rsp), %rdi
+. 1776 0x12345678 9
+. 48 8D BC 24 C0 00 00 00 C3
+
+leaq 1(%r15), %rax
+. 1777 0x12345678 5
+. 49 8D 47 01 C3
+
+leaq 1(%r15), %rdi
+. 1778 0x12345678 5
+. 49 8D 7F 01 C3
+
+leaq 1(%rbp), %rax
+. 1779 0x12345678 5
+. 48 8D 45 01 C3
+
+leaq 1(%rbx), %rdi
+. 1780 0x12345678 5
+. 48 8D 7B 01 C3
+
+leaq 1(%rcx), %rdi
+. 1781 0x12345678 5
+. 48 8D 79 01 C3
+
+leaq 207(%rsp), %rax
+. 1782 0x12345678 9
+. 48 8D 84 24 CF 00 00 00 C3
+
+leaq 208(%rsp), %r9
+. 1783 0x12345678 9
+. 4C 8D 8C 24 D0 00 00 00 C3
+
+leaq 208(%rsp), %rcx
+. 1784 0x12345678 9
+. 48 8D 8C 24 D0 00 00 00 C3
+
+leaq 208(%rsp), %rdi
+. 1785 0x12345678 9
+. 48 8D BC 24 D0 00 00 00 C3
+
+leaq 208(%rsp), %rdx
+. 1786 0x12345678 9
+. 48 8D 94 24 D0 00 00 00 C3
+
+leaq 20(%rsp), %r12
+. 1787 0x12345678 6
+. 4C 8D 64 24 14 C3
+
+leaq 20(%rsp), %rcx
+. 1788 0x12345678 6
+. 48 8D 4C 24 14 C3
+
+leaq 20(%rsp), %rdx
+. 1789 0x12345678 6
+. 48 8D 54 24 14 C3
+
+leaq 224(%rsp), %rax
+. 1790 0x12345678 9
+. 48 8D 84 24 E0 00 00 00 C3
+
+leaq 240(%rsp), %r13
+. 1791 0x12345678 9
+. 4C 8D AC 24 F0 00 00 00 C3
+
+leaq 240(%rsp), %rax
+. 1792 0x12345678 9
+. 48 8D 84 24 F0 00 00 00 C3
+
+leaq 240(%rsp), %rdi
+. 1793 0x12345678 9
+. 48 8D BC 24 F0 00 00 00 C3
+
+leaq 24(%rsp), %rsi
+. 1794 0x12345678 6
+. 48 8D 74 24 18 C3
+
+leaq 272(%rsp), %r13
+. 1795 0x12345678 9
+. 4C 8D AC 24 10 01 00 00 C3
+
+leaq 272(%rsp), %rcx
+. 1796 0x12345678 9
+. 48 8D 8C 24 10 01 00 00 C3
+
+leaq 28(%rsp), %rdi
+. 1797 0x12345678 6
+. 48 8D 7C 24 1C C3
+
+leaq 2(%r15), %rdi
+. 1798 0x12345678 5
+. 49 8D 7F 02 C3
+
+leaq 2(%rcx), %rdi
+. 1799 0x12345678 5
+. 48 8D 79 02 C3
+
+leaq 324(%rsp), %rdi
+. 1800 0x12345678 9
+. 48 8D BC 24 44 01 00 00 C3
+
+leaq 32(%rsp), %r13
+. 1801 0x12345678 6
+. 4C 8D 6C 24 20 C3
+
+leaq 32(%rsp), %r14
+. 1802 0x12345678 6
+. 4C 8D 74 24 20 C3
+
+leaq 32(%rsp), %r8
+. 1803 0x12345678 6
+. 4C 8D 44 24 20 C3
+
+leaq 32(%rsp), %rax
+. 1804 0x12345678 6
+. 48 8D 44 24 20 C3
+
+leaq 32(%rsp), %rbx
+. 1805 0x12345678 6
+. 48 8D 5C 24 20 C3
+
+leaq 32(%rsp), %rcx
+. 1806 0x12345678 6
+. 48 8D 4C 24 20 C3
+
+leaq 32(%rsp), %rdx
+. 1807 0x12345678 6
+. 48 8D 54 24 20 C3
+
+leaq 32(%rsp), %rsi
+. 1808 0x12345678 6
+. 48 8D 74 24 20 C3
+
+leaq 331(%rsp), %r15
+. 1809 0x12345678 9
+. 4C 8D BC 24 4B 01 00 00 C3
+
+leaq 332(%rsp), %rdi
+. 1810 0x12345678 9
+. 48 8D BC 24 4C 01 00 00 C3
+
+leaq 336(%rsp), %rbx
+. 1811 0x12345678 9
+. 48 8D 9C 24 50 01 00 00 C3
+
+leaq 336(%rsp), %rcx
+. 1812 0x12345678 9
+. 48 8D 8C 24 50 01 00 00 C3
+
+leaq 336(%rsp), %rdi
+. 1813 0x12345678 9
+. 48 8D BC 24 50 01 00 00 C3
+
+leaq 340(%rsp), %rdi
+. 1814 0x12345678 9
+. 48 8D BC 24 54 01 00 00 C3
+
+leaq 344(%rsp), %rdi
+. 1815 0x12345678 9
+. 48 8D BC 24 58 01 00 00 C3
+
+leaq 352(%rsp), %rdi
+. 1816 0x12345678 9
+. 48 8D BC 24 60 01 00 00 C3
+
+leaq 356(%rsp), %rdi
+. 1817 0x12345678 9
+. 48 8D BC 24 64 01 00 00 C3
+
+leaq 360(%rsp), %rdi
+. 1818 0x12345678 9
+. 48 8D BC 24 68 01 00 00 C3
+
+leaq 364(%rsp), %rdi
+. 1819 0x12345678 9
+. 48 8D BC 24 6C 01 00 00 C3
+
+leaq 368(%rsp), %rdi
+. 1820 0x12345678 9
+. 48 8D BC 24 70 01 00 00 C3
+
+leaq 36(%rsp), %rdi
+. 1821 0x12345678 6
+. 48 8D 7C 24 24 C3
+
+leaq 375(%rsp), %r15
+. 1822 0x12345678 9
+. 4C 8D BC 24 77 01 00 00 C3
+
+leaq 376(%rsp), %rsi
+. 1823 0x12345678 9
+. 48 8D B4 24 78 01 00 00 C3
+
+leaq 380(%rsp), %rdx
+. 1824 0x12345678 9
+. 48 8D 94 24 7C 01 00 00 C3
+
+leaq 384(%rsp), %rcx
+. 1825 0x12345678 9
+. 48 8D 8C 24 80 01 00 00 C3
+
+leaq 388(%rsp), %r8
+. 1826 0x12345678 9
+. 4C 8D 84 24 84 01 00 00 C3
+
+leaq 392(%rsp), %rsi
+. 1827 0x12345678 9
+. 48 8D B4 24 88 01 00 00 C3
+
+leaq 396(%rsp), %rdx
+. 1828 0x12345678 9
+. 48 8D 94 24 8C 01 00 00 C3
+
+leaq 3(%r15), %rdi
+. 1829 0x12345678 5
+. 49 8D 7F 03 C3
+
+leaq 3(%rcx), %rdi
+. 1830 0x12345678 5
+. 48 8D 79 03 C3
+
+leaq 400(%rsp), %r13
+. 1831 0x12345678 9
+. 4C 8D AC 24 90 01 00 00 C3
+
+leaq 400(%rsp), %rcx
+. 1832 0x12345678 9
+. 48 8D 8C 24 90 01 00 00 C3
+
+leaq 400(%rsp), %rdx
+. 1833 0x12345678 9
+. 48 8D 94 24 90 01 00 00 C3
+
+leaq 400(%rsp), %rsi
+. 1834 0x12345678 9
+. 48 8D B4 24 90 01 00 00 C3
+
+leaq 404(%rsp), %r8
+. 1835 0x12345678 9
+. 4C 8D 84 24 94 01 00 00 C3
+
+leaq 408(%rsp), %rsi
+. 1836 0x12345678 9
+. 48 8D B4 24 98 01 00 00 C3
+
+leaq 40(%rsp), %rcx
+. 1837 0x12345678 6
+. 48 8D 4C 24 28 C3
+
+leaq 40(%rsp), %rsi
+. 1838 0x12345678 6
+. 48 8D 74 24 28 C3
+
+leaq 412(%rsp), %rdx
+. 1839 0x12345678 9
+. 48 8D 94 24 9C 01 00 00 C3
+
+leaq 416(%rsp), %rcx
+. 1840 0x12345678 9
+. 48 8D 8C 24 A0 01 00 00 C3
+
+leaq 420(%rsp), %r8
+. 1841 0x12345678 9
+. 4C 8D 84 24 A4 01 00 00 C3
+
+leaq 424(%rsp), %rsi
+. 1842 0x12345678 9
+. 48 8D B4 24 A8 01 00 00 C3
+
+leaq 428(%rsp), %rdx
+. 1843 0x12345678 9
+. 48 8D 94 24 AC 01 00 00 C3
+
+leaq 432(%rsp), %rcx
+. 1844 0x12345678 9
+. 48 8D 8C 24 B0 01 00 00 C3
+
+leaq 436(%rsp), %r8
+. 1845 0x12345678 9
+. 4C 8D 84 24 B4 01 00 00 C3
+
+leaq 43(%rsp), %rdi
+. 1846 0x12345678 6
+. 48 8D 7C 24 2B C3
+
+leaq 440(%rsp), %rsi
+. 1847 0x12345678 9
+. 48 8D B4 24 B8 01 00 00 C3
+
+leaq 444(%rsp), %rdx
+. 1848 0x12345678 9
+. 48 8D 94 24 BC 01 00 00 C3
+
+leaq 448(%rsp), %rcx
+. 1849 0x12345678 9
+. 48 8D 8C 24 C0 01 00 00 C3
+
+leaq 44(%rsp), %r8
+. 1850 0x12345678 6
+. 4C 8D 44 24 2C C3
+
+leaq 44(%rsp), %rdi
+. 1851 0x12345678 6
+. 48 8D 7C 24 2C C3
+
+leaq 44(%rsp), %rsi
+. 1852 0x12345678 6
+. 48 8D 74 24 2C C3
+
+leaq 452(%rsp), %r8
+. 1853 0x12345678 9
+. 4C 8D 84 24 C4 01 00 00 C3
+
+leaq 456(%rsp), %rdi
+. 1854 0x12345678 9
+. 48 8D BC 24 C8 01 00 00 C3
+
+leaq 460(%rsp), %rsi
+. 1855 0x12345678 9
+. 48 8D B4 24 CC 01 00 00 C3
+
+leaq 464(%rsp), %rdx
+. 1856 0x12345678 9
+. 48 8D 94 24 D0 01 00 00 C3
+
+leaq 468(%rsp), %rcx
+. 1857 0x12345678 9
+. 48 8D 8C 24 D4 01 00 00 C3
+
+leaq 472(%rsp), %r8
+. 1858 0x12345678 9
+. 4C 8D 84 24 D8 01 00 00 C3
+
+leaq 476(%rsp), %rsi
+. 1859 0x12345678 9
+. 48 8D B4 24 DC 01 00 00 C3
+
+leaq 480(%rsp), %rdx
+. 1860 0x12345678 9
+. 48 8D 94 24 E0 01 00 00 C3
+
+leaq 484(%rsp), %rcx
+. 1861 0x12345678 9
+. 48 8D 8C 24 E4 01 00 00 C3
+
+leaq 488(%rsp), %r8
+. 1862 0x12345678 9
+. 4C 8D 84 24 E8 01 00 00 C3
+
+leaq 48(%r12,%rbx,4), %rbx
+. 1863 0x12345678 6
+. 49 8D 5C 9C 30 C3
+
+leaq 48(%r13,%rax,4), %rax
+. 1864 0x12345678 6
+. 49 8D 44 85 30 C3
+
+leaq 48(%rbp,%rax), %rax
+. 1865 0x12345678 6
+. 48 8D 44 05 30 C3
+
+leaq 48(%rbx,%rax), %rcx
+. 1866 0x12345678 6
+. 48 8D 4C 03 30 C3
+
+leaq 48(%rsp), %r8
+. 1867 0x12345678 6
+. 4C 8D 44 24 30 C3
+
+leaq 48(%rsp), %rbp
+. 1868 0x12345678 6
+. 48 8D 6C 24 30 C3
+
+leaq 48(%rsp), %rcx
+. 1869 0x12345678 6
+. 48 8D 4C 24 30 C3
+
+leaq 48(%rsp), %rsi
+. 1870 0x12345678 6
+. 48 8D 74 24 30 C3
+
+leaq 492(%rsp), %rsi
+. 1871 0x12345678 9
+. 48 8D B4 24 EC 01 00 00 C3
+
+leaq 496(%rsp), %rdx
+. 1872 0x12345678 9
+. 48 8D 94 24 F0 01 00 00 C3
+
+leaq 4(%rax,%rbx), %rax
+. 1873 0x12345678 6
+. 48 8D 44 18 04 C3
+
+leaq 500(%rsp), %rcx
+. 1874 0x12345678 9
+. 48 8D 8C 24 F4 01 00 00 C3
+
+leaq 504(%rsp), %r8
+. 1875 0x12345678 9
+. 4C 8D 84 24 F8 01 00 00 C3
+
+leaq 508(%rsp), %rsi
+. 1876 0x12345678 9
+. 48 8D B4 24 FC 01 00 00 C3
+
+leaq 512(%rsp), %rdx
+. 1877 0x12345678 9
+. 48 8D 94 24 00 02 00 00 C3
+
+leaq 516(%rsp), %rcx
+. 1878 0x12345678 9
+. 48 8D 8C 24 04 02 00 00 C3
+
+leaq 520(%rsp), %r8
+. 1879 0x12345678 9
+. 4C 8D 84 24 08 02 00 00 C3
+
+leaq 524(%rsp), %rsi
+. 1880 0x12345678 9
+. 48 8D B4 24 0C 02 00 00 C3
+
+leaq 528(%rsp), %rdx
+. 1881 0x12345678 9
+. 48 8D 94 24 10 02 00 00 C3
+
+leaq 52(%rsp), %rdx
+. 1882 0x12345678 6
+. 48 8D 54 24 34 C3
+
+leaq 532(%rsp), %rcx
+. 1883 0x12345678 9
+. 48 8D 8C 24 14 02 00 00 C3
+
+leaq 536(%rsp), %r8
+. 1884 0x12345678 9
+. 4C 8D 84 24 18 02 00 00 C3
+
+leaq 540(%rsp), %rsi
+. 1885 0x12345678 9
+. 48 8D B4 24 1C 02 00 00 C3
+
+leaq 544(%rsp), %rdx
+. 1886 0x12345678 9
+. 48 8D 94 24 20 02 00 00 C3
+
+leaq 548(%rsp), %rcx
+. 1887 0x12345678 9
+. 48 8D 8C 24 24 02 00 00 C3
+
+leaq 552(%rsp), %r8
+. 1888 0x12345678 9
+. 4C 8D 84 24 28 02 00 00 C3
+
+leaq 556(%rsp), %rdi
+. 1889 0x12345678 9
+. 48 8D BC 24 2C 02 00 00 C3
+
+leaq 576(%rsp), %r14
+. 1890 0x12345678 9
+. 4C 8D B4 24 40 02 00 00 C3
+
+leaq 576(%rsp), %rcx
+. 1891 0x12345678 9
+. 48 8D 8C 24 40 02 00 00 C3
+
+leaq 576(%rsp), %rdx
+. 1892 0x12345678 9
+. 48 8D 94 24 40 02 00 00 C3
+
+leaq 60(%rsp), %rdi
+. 1893 0x12345678 6
+. 48 8D 7C 24 3C C3
+
+leaq 640(%rsp), %rcx
+. 1894 0x12345678 9
+. 48 8D 8C 24 80 02 00 00 C3
+
+leaq 640(%rsp), %rdx
+. 1895 0x12345678 9
+. 48 8D 94 24 80 02 00 00 C3
+
+leaq 64(%rsp), %r14
+. 1896 0x12345678 6
+. 4C 8D 74 24 40 C3
+
+leaq 64(%rsp), %rcx
+. 1897 0x12345678 6
+. 48 8D 4C 24 40 C3
+
+leaq 64(%rsp), %rsi
+. 1898 0x12345678 6
+. 48 8D 74 24 40 C3
+
+leaq 672(%rsp), %rdi
+. 1899 0x12345678 9
+. 48 8D BC 24 A0 02 00 00 C3
+
+leaq 704(%rsp), %rcx
+. 1900 0x12345678 9
+. 48 8D 8C 24 C0 02 00 00 C3
+
+leaq 768(%rsp), %r13
+. 1901 0x12345678 9
+. 4C 8D AC 24 00 03 00 00 C3
+
+leaq 768(%rsp), %r14
+. 1902 0x12345678 9
+. 4C 8D B4 24 00 03 00 00 C3
+
+leaq 768(%rsp), %rcx
+. 1903 0x12345678 9
+. 48 8D 8C 24 00 03 00 00 C3
+
+leaq 768(%rsp), %rdx
+. 1904 0x12345678 9
+. 48 8D 94 24 00 03 00 00 C3
+
+leaq 76(%rsp), %rdi
+. 1905 0x12345678 6
+. 48 8D 7C 24 4C C3
+
+leaq 80(%rsp), %rcx
+. 1906 0x12345678 6
+. 48 8D 4C 24 50 C3
+
+leaq 832(%rsp), %r12
+. 1907 0x12345678 9
+. 4C 8D A4 24 40 03 00 00 C3
+
+leaq 832(%rsp), %r13
+. 1908 0x12345678 9
+. 4C 8D AC 24 40 03 00 00 C3
+
+leaq 832(%rsp), %r14
+. 1909 0x12345678 9
+. 4C 8D B4 24 40 03 00 00 C3
+
+leaq 832(%rsp), %r15
+. 1910 0x12345678 9
+. 4C 8D BC 24 40 03 00 00 C3
+
+leaq 896(%rsp), %r12
+. 1911 0x12345678 9
+. 4C 8D A4 24 80 03 00 00 C3
+
+leaq 896(%rsp), %r13
+. 1912 0x12345678 9
+. 4C 8D AC 24 80 03 00 00 C3
+
+leaq 896(%rsp), %r14
+. 1913 0x12345678 9
+. 4C 8D B4 24 80 03 00 00 C3
+
+leaq 896(%rsp), %r15
+. 1914 0x12345678 9
+. 4C 8D BC 24 80 03 00 00 C3
+
+leaq 896(%rsp), %rbx
+. 1915 0x12345678 9
+. 48 8D 9C 24 80 03 00 00 C3
+
+leaq 896(%rsp), %rcx
+. 1916 0x12345678 9
+. 48 8D 8C 24 80 03 00 00 C3
+
+leaq 896(%rsp), %rdx
+. 1917 0x12345678 9
+. 48 8D 94 24 80 03 00 00 C3
+
+leaq 896(%rsp), %rsi
+. 1918 0x12345678 9
+. 48 8D B4 24 80 03 00 00 C3
+
+leaq 8(%rdx), %rax
+. 1919 0x12345678 5
+. 48 8D 42 08 C3
+
+leaq 92(%rsp), %rax
+. 1920 0x12345678 6
+. 48 8D 44 24 5C C3
+
+leaq 96(%rsp), %rdx
+. 1921 0x12345678 6
+. 48 8D 54 24 60 C3
+
+leaq (%r12,%rbx), %rax
+. 1922 0x12345678 5
+. 49 8D 04 1C C3
+
+leaq (%rax,%rax,2), %rax
+. 1923 0x12345678 5
+. 48 8D 04 40 C3
+
+leaq (%rax,%rax,4), %rax
+. 1924 0x12345678 5
+. 48 8D 04 80 C3
+
+leaq (%rax,%rax,4), %rdx
+. 1925 0x12345678 5
+. 48 8D 14 80 C3
+
+leaq (%rax,%rbp), %rax
+. 1926 0x12345678 5
+. 48 8D 04 28 C3
+
+leaq (%rbp,%r12), %r13
+. 1927 0x12345678 6
+. 4E 8D 6C 25 00 C3
+
+leaq (%rbp,%r12), %rbx
+. 1928 0x12345678 6
+. 4A 8D 5C 25 00 C3
+
+leaq (%rbx,%rbx,2), %rbx
+. 1929 0x12345678 5
+. 48 8D 1C 5B C3
+
+leaq (%rbx,%rbx,4), %rax
+. 1930 0x12345678 5
+. 48 8D 04 9B C3
+
+leaq (%rbx,%rbx,4), %rdi
+. 1931 0x12345678 5
+. 48 8D 3C 9B C3
+
+leaq (%rcx,%rcx,2), %rax
+. 1932 0x12345678 5
+. 48 8D 04 49 C3
+
+leaq (%rcx,%rcx,2), %rdx
+. 1933 0x12345678 5
+. 48 8D 14 49 C3
+
+leaq (%rdi,%rbp), %rdi
+. 1934 0x12345678 5
+. 48 8D 3C 2F C3
+
+leaq (%rdi,%rdi,2), %rdi
+. 1935 0x12345678 5
+. 48 8D 3C 7F C3
+
+leaq (%rdx,%rdx,2), %rax
+. 1936 0x12345678 5
+. 48 8D 04 52 C3
+
+leaq (%rdx,%rdx,4), %rax
+. 1937 0x12345678 5
+. 48 8D 04 92 C3
+
+leaq (%rsi,%rbp), %rsi
+. 1938 0x12345678 5
+. 48 8D 34 2E C3
+
+leaq (%rsi,%rsi,4), %rax
+. 1939 0x12345678 5
+. 48 8D 04 B6 C3
+
+mov 104(%rsp), %r9d
+. 1940 0x12345678 6
+. 44 8B 4C 24 68 C3
+
+mov 124(%rsp), %ebx
+. 1941 0x12345678 5
+. 8B 5C 24 7C C3
+
+mov 12(%rax), %edi
+. 1942 0x12345678 4
+. 8B 78 0C C3
+
+mov -20(%rsp), %ecx
+. 1943 0x12345678 5
+. 8B 4C 24 EC C3
+
+mov 24(%rdx), %eax
+. 1944 0x12345678 4
+. 8B 42 18 C3
+
+mov 292(%rsp), %r12d
+. 1945 0x12345678 9
+. 44 8B A4 24 24 01 00 00 C3
+
+mov 48(%rsp), %eax
+. 1946 0x12345678 5
+. 8B 44 24 30 C3
+
+mov 8(%r12), %eax
+. 1947 0x12345678 6
+. 41 8B 44 24 08 C3
+
+mov 8(%rax), %eax
+. 1948 0x12345678 4
+. 8B 40 08 C3
+
+mov 8(%rax), %edi
+. 1949 0x12345678 4
+. 8B 78 08 C3
+
+mov 8(%rbp), %eax
+. 1950 0x12345678 4
+. 8B 45 08 C3
+
+mov 8(%rcx), %r8d
+. 1951 0x12345678 5
+. 44 8B 41 08 C3
+
+mov 8(%rdx), %r8d
+. 1952 0x12345678 5
+. 44 8B 42 08 C3
+
+mov -8(%rsp), %edx
+. 1953 0x12345678 5
+. 8B 54 24 F8 C3
+
+mov 8(%rsp), %edx
+. 1954 0x12345678 5
+. 8B 54 24 08 C3
+
+mov 8(%rsp), %esi
+. 1955 0x12345678 5
+. 8B 74 24 08 C3
+
+movabsq $4599094494223104511, %rdi
+. 1956 0x12345678 11
+. 48 BF FF 79 9F 50 13 44 D3 3F C3
+
+movabsq $4604418534313441775, %rdi
+. 1957 0x12345678 11
+. 48 BF EF 39 FA FE 42 2E E6 3F C3
+
+movabsq $4607182418800017408, %rdi
+. 1958 0x12345678 11
+. 48 BF 00 00 00 00 00 00 F0 3F C3
+
+movabsq $4609176140021203710, %rdi
+. 1959 0x12345678 11
+. 48 BF FE 82 2B 65 47 15 F7 3F C3
+
+movabsq $4614256656552045848, %rdi
+. 1960 0x12345678 11
+. 48 BF 18 2D 44 54 FB 21 09 40 C3
+
+movabsq $4614662735865160561, %rdi
+. 1961 0x12345678 11
+. 48 BF 71 A3 79 09 4F 93 0A 40 C3
+
+movabsq $9221120237041090560, %rdi
+. 1962 0x12345678 11
+. 48 BF 00 00 00 00 00 00 F8 7F C3
+
+movb $0, 140(%rdi)
+. 1963 0x12345678 8
+. C6 87 8C 00 00 00 00 C3
+
+movb $0, 144(%rsp)
+. 1964 0x12345678 9
+. C6 84 24 90 00 00 00 00 C3
+
+movb $0, 156(%rdi)
+. 1965 0x12345678 8
+. C6 87 9C 00 00 00 00 C3
+
+movb $0, 157(%rdi)
+. 1966 0x12345678 8
+. C6 87 9D 00 00 00 00 C3
+
+movb $0, 158(%rdi)
+. 1967 0x12345678 8
+. C6 87 9E 00 00 00 00 C3
+
+movb $0, 15(%rsp)
+. 1968 0x12345678 6
+. C6 44 24 0F 00 C3
+
+movb $0, 160(%rsp)
+. 1969 0x12345678 9
+. C6 84 24 A0 00 00 00 00 C3
+
+movb $0, 160(%rsp,%rax)
+. 1970 0x12345678 9
+. C6 84 04 A0 00 00 00 00 C3
+
+movb $0, 16(%rax)
+. 1971 0x12345678 5
+. C6 40 10 00 C3
+
+movb $0, 16(%rax,%rdx)
+. 1972 0x12345678 6
+. C6 44 10 10 00 C3
+
+movb $0, 16(%rbx)
+. 1973 0x12345678 5
+. C6 43 10 00 C3
+
+movb $0, 16(%rsi,%rdx)
+. 1974 0x12345678 6
+. C6 44 16 10 00 C3
+
+movb $0, 17(%rax)
+. 1975 0x12345678 5
+. C6 40 11 00 C3
+
+movb $0, 17(%rbx)
+. 1976 0x12345678 5
+. C6 43 11 00 C3
+
+movb $0, 18(%rax)
+. 1977 0x12345678 5
+. C6 40 12 00 C3
+
+movb $0, 18(%rbx)
+. 1978 0x12345678 5
+. C6 43 12 00 C3
+
+movb $0, 270(%rsp)
+. 1979 0x12345678 9
+. C6 84 24 0E 01 00 00 00 C3
+
+movb $0, 283(%rsp)
+. 1980 0x12345678 9
+. C6 84 24 1B 01 00 00 00 C3
+
+movb $0, 331(%rsp)
+. 1981 0x12345678 9
+. C6 84 24 4B 01 00 00 00 C3
+
+movb $0, 375(%rsp)
+. 1982 0x12345678 9
+. C6 84 24 77 01 00 00 00 C3
+
+movb $0, 37(%rsp)
+. 1983 0x12345678 6
+. C6 44 24 25 00 C3
+
+movb $0, 39(%rsp)
+. 1984 0x12345678 6
+. C6 44 24 27 00 C3
+
+movb $0, 43(%rsp)
+. 1985 0x12345678 6
+. C6 44 24 2B 00 C3
+
+movb $0, 44(%rax)
+. 1986 0x12345678 5
+. C6 40 2C 00 C3
+
+movb $0, 47(%rsp)
+. 1987 0x12345678 6
+. C6 44 24 2F 00 C3
+
+movb $0, 4(%rsi,%rdx)
+. 1988 0x12345678 6
+. C6 44 16 04 00 C3
+
+movb $0, 8(%rdi)
+. 1989 0x12345678 5
+. C6 47 08 00 C3
+
+movb $0, 8(%rip)
+. 1990 0x12345678 8
+. C6 05 08 00 00 00 00 C3
+
+movb $0, 9(%rax)
+. 1991 0x12345678 5
+. C6 40 09 00 C3
+
+movb $0, 11(%rip)
+. 1992 0x12345678 8
+. C6 05 0B 00 00 00 00 C3
+
+movb $0, (%r14)
+. 1993 0x12345678 5
+. 41 C6 06 00 C3
+
+movb $0, (%r15)
+. 1994 0x12345678 5
+. 41 C6 07 00 C3
+
+movb $0, (%rax,%r13)
+. 1995 0x12345678 6
+. 42 C6 04 28 00 C3
+
+movb $0, (%rbx,%rax)
+. 1996 0x12345678 5
+. C6 04 03 00 C3
+
+movb $0, (%rcx,%r10)
+. 1997 0x12345678 6
+. 42 C6 04 11 00 C3
+
+movb $0, (%rdi)
+. 1998 0x12345678 4
+. C6 07 00 C3
+
+movb $0, (%rdx,%rax)
+. 1999 0x12345678 5
+. C6 04 02 00 C3
+
+movb $1, 151(%rsp)
+. 2000 0x12345678 9
+. C6 84 24 97 00 00 00 01 C3
+
+movb $1, 15(%rsp)
+. 2001 0x12345678 6
+. C6 44 24 0F 01 C3
+
+movb $1, 16(%rcx,%rdx)
+. 2002 0x12345678 6
+. C6 44 11 10 01 C3
+
+movb $1, 37(%rsp)
+. 2003 0x12345678 6
+. C6 44 24 25 01 C3
+
+movb $1, 39(%rsp)
+. 2004 0x12345678 6
+. C6 44 24 27 01 C3
+
+movb $1, 44(%rax)
+. 2005 0x12345678 5
+. C6 40 2C 01 C3
+
+movb $1, 47(%rsp)
+. 2006 0x12345678 6
+. C6 44 24 2F 01 C3
+
+movb $1, 4(%rcx,%rax,4)
+. 2007 0x12345678 6
+. C6 44 81 04 01 C3
+
+movb $1, 13(%rip)
+. 2008 0x12345678 8
+. C6 05 0D 00 00 00 01 C3
+
+movb $1, (%r12)
+. 2009 0x12345678 6
+. 41 C6 04 24 01 C3
+
+movb $1, (%r13)
+. 2010 0x12345678 6
+. 41 C6 45 00 01 C3
+
+movb $1, (%r15)
+. 2011 0x12345678 5
+. 41 C6 07 01 C3
+
+movb $1, (%rax,%r12)
+. 2012 0x12345678 6
+. 42 C6 04 20 01 C3
+
+movb $1, (%rdi)
+. 2013 0x12345678 4
+. C6 07 01 C3
+
+movb $1, (%rdx,%r14)
+. 2014 0x12345678 6
+. 42 C6 04 32 01 C3
+
+movb $1, (%rdx,%rax)
+. 2015 0x12345678 5
+. C6 04 02 01 C3
+
+movb $1, 99(%rip)
+. 2016 0x12345678 8
+. C6 05 63 00 00 00 01 C3
+
+movb $44, 160(%rsp,%rax)
+. 2017 0x12345678 9
+. C6 84 04 A0 00 00 00 2C C3
+
+movb $44, (%rsp,%rax)
+. 2018 0x12345678 5
+. C6 04 04 2C C3
+
+movb $45, (%rsp,%rax)
+. 2019 0x12345678 5
+. C6 04 04 2D C3
+
+movb $48, (%rsp)
+. 2020 0x12345678 5
+. C6 04 24 30 C3
+
+movb $49, 160(%rsp,%rax)
+. 2021 0x12345678 9
+. C6 84 04 A0 00 00 00 31 C3
+
+movb $82, 160(%rsp,%rax)
+. 2022 0x12345678 9
+. C6 84 04 A0 00 00 00 52 C3
+
+movb %al, 103(%rsp)
+. 2023 0x12345678 5
+. 88 44 24 67 C3
+
+movb %al, 106(%rsp)
+. 2024 0x12345678 5
+. 88 44 24 6A C3
+
+movb %al, 107(%rsp)
+. 2025 0x12345678 5
+. 88 44 24 6B C3
+
+movb %al, 111(%rsp)
+. 2026 0x12345678 5
+. 88 44 24 6F C3
+
+movb %al, 112(%rsp)
+. 2027 0x12345678 5
+. 88 44 24 70 C3
+
+movb %al, 11(%rsp)
+. 2028 0x12345678 5
+. 88 44 24 0B C3
+
+movb %al, 127(%rsp)
+. 2029 0x12345678 5
+. 88 44 24 7F C3
+
+movb %al, 138(%rsp)
+. 2030 0x12345678 8
+. 88 84 24 8A 00 00 00 C3
+
+movb %al, 139(%rsp)
+. 2031 0x12345678 8
+. 88 84 24 8B 00 00 00 C3
+
+movb %al, 14(%rsp)
+. 2032 0x12345678 5
+. 88 44 24 0E C3
+
+movb %al, 15(%rsp)
+. 2033 0x12345678 5
+. 88 44 24 0F C3
+
+movb %al, 160(%rsp,%rdx)
+. 2034 0x12345678 8
+. 88 84 14 A0 00 00 00 C3
+
+movb %al, 16(%rsp)
+. 2035 0x12345678 5
+. 88 44 24 10 C3
+
+movb %al, 19(%rsp)
+. 2036 0x12345678 5
+. 88 44 24 13 C3
+
+movb %al, 22(%rsp)
+. 2037 0x12345678 5
+. 88 44 24 16 C3
+
+movb %al, 23(%rsp)
+. 2038 0x12345678 5
+. 88 44 24 17 C3
+
+movb %al, 25(%rsp)
+. 2039 0x12345678 5
+. 88 44 24 19 C3
+
+movb %al, 26(%rsp)
+. 2040 0x12345678 5
+. 88 44 24 1A C3
+
+movb %al, 270(%rsp)
+. 2041 0x12345678 8
+. 88 84 24 0E 01 00 00 C3
+
+movb %al, 271(%rsp)
+. 2042 0x12345678 8
+. 88 84 24 0F 01 00 00 C3
+
+movb %al, 27(%rsp)
+. 2043 0x12345678 5
+. 88 44 24 1B C3
+
+movb %al, 30(%rsp)
+. 2044 0x12345678 5
+. 88 44 24 1E C3
+
+movb %al, 31(%rsp)
+. 2045 0x12345678 5
+. 88 44 24 1F C3
+
+movb %al, 35(%rsp)
+. 2046 0x12345678 5
+. 88 44 24 23 C3
+
+movb %al, 38(%rsp)
+. 2047 0x12345678 5
+. 88 44 24 26 C3
+
+movb %al, 39(%rsp)
+. 2048 0x12345678 5
+. 88 44 24 27 C3
+
+movb %al, 43(%rsp)
+. 2049 0x12345678 5
+. 88 44 24 2B C3
+
+movb %al, 44(%rbx)
+. 2050 0x12345678 4
+. 88 43 2C C3
+
+movb %al, 47(%rsp)
+. 2051 0x12345678 5
+. 88 44 24 2F C3
+
+movb %al, 48(%rsp)
+. 2052 0x12345678 5
+. 88 44 24 30 C3
+
+movb %al, 60(%rsp)
+. 2053 0x12345678 5
+. 88 44 24 3C C3
+
+movb %al, 61(%rsp)
+. 2054 0x12345678 5
+. 88 44 24 3D C3
+
+movb %al, 62(%rsp)
+. 2055 0x12345678 5
+. 88 44 24 3E C3
+
+movb %al, 63(%rsp)
+. 2056 0x12345678 5
+. 88 44 24 3F C3
+
+movb %al, 67(%rsp)
+. 2057 0x12345678 5
+. 88 44 24 43 C3
+
+movb %al, 6(%rsp)
+. 2058 0x12345678 5
+. 88 44 24 06 C3
+
+movb %al, 79(%rsp)
+. 2059 0x12345678 5
+. 88 44 24 4F C3
+
+movb %al, 7(%rsp)
+. 2060 0x12345678 5
+. 88 44 24 07 C3
+
+movb %al, 87(%rsp)
+. 2061 0x12345678 5
+. 88 44 24 57 C3
+
+movb %al, (%rsp,%rcx)
+. 2062 0x12345678 4
+. 88 04 0C C3
+
+movb %bl, 135(%rsp)
+. 2063 0x12345678 8
+. 88 9C 24 87 00 00 00 C3
+
+movb %bl, 271(%rsp)
+. 2064 0x12345678 8
+. 88 9C 24 0F 01 00 00 C3
+
+movb %bl, 46(%rsp)
+. 2065 0x12345678 5
+. 88 5C 24 2E C3
+
+movb %bl, 8(%rax)
+. 2066 0x12345678 4
+. 88 58 08 C3
+
+movb %cl, 23(%rsp)
+. 2067 0x12345678 5
+. 88 4C 24 17 C3
+
+movb %cl, 271(%rsp)
+. 2068 0x12345678 8
+. 88 8C 24 0F 01 00 00 C3
+
+movb %cl, 27(%rsp)
+. 2069 0x12345678 5
+. 88 4C 24 1B C3
+
+movb %cl, 32(%rsp)
+. 2070 0x12345678 5
+. 88 4C 24 20 C3
+
+movb %cl, 39(%rsp)
+. 2071 0x12345678 5
+. 88 4C 24 27 C3
+
+movb %cl, 43(%rsp)
+. 2072 0x12345678 5
+. 88 4C 24 2B C3
+
+movb %cl, 48(%rsp)
+. 2073 0x12345678 5
+. 88 4C 24 30 C3
+
+movb %cl, 64(%rsp)
+. 2074 0x12345678 5
+. 88 4C 24 40 C3
+
+movb %cl, 96(%rsp)
+. 2075 0x12345678 5
+. 88 4C 24 60 C3
+
+movb %dil, 43(%rsp)
+. 2076 0x12345678 6
+. 40 88 7C 24 2B C3
+
+movb %dil, 75(%rsp)
+. 2077 0x12345678 6
+. 40 88 7C 24 4B C3
+
+movb %dil, (%rax)
+. 2078 0x12345678 4
+. 40 88 38 C3
+
+movb %dl, 13(%rsp)
+. 2079 0x12345678 5
+. 88 54 24 0D C3
+
+movb %dl, 23(%rsp)
+. 2080 0x12345678 5
+. 88 54 24 17 C3
+
+movb %dl, 271(%rsp)
+. 2081 0x12345678 8
+. 88 94 24 0F 01 00 00 C3
+
+movb %dl, 30(%rsp)
+. 2082 0x12345678 5
+. 88 54 24 1E C3
+
+movb %dl, 31(%rsp)
+. 2083 0x12345678 5
+. 88 54 24 1F C3
+
+movb %dl, 35(%rsp)
+. 2084 0x12345678 5
+. 88 54 24 23 C3
+
+movb %dl, 39(%rsp)
+. 2085 0x12345678 5
+. 88 54 24 27 C3
+
+movb %dl, 42(%rsp)
+. 2086 0x12345678 5
+. 88 54 24 2A C3
+
+movb %dl, 47(%rsp)
+. 2087 0x12345678 5
+. 88 54 24 2F C3
+
+movb %dl, 48(%rsp)
+. 2088 0x12345678 5
+. 88 54 24 30 C3
+
+movb %dl, 55(%rsp)
+. 2089 0x12345678 5
+. 88 54 24 37 C3
+
+movb %dl, 59(%rsp)
+. 2090 0x12345678 5
+. 88 54 24 3B C3
+
+movb %dl, 67(%rsp)
+. 2091 0x12345678 5
+. 88 54 24 43 C3
+
+movb %dl, 79(%rsp)
+. 2092 0x12345678 5
+. 88 54 24 4F C3
+
+movb %dl, 95(%rsp)
+. 2093 0x12345678 5
+. 88 54 24 5F C3
+
+movb %dl, (%rax,%rcx)
+. 2094 0x12345678 4
+. 88 14 08 C3
+
+movb %dl, (%rsp,%rax)
+. 2095 0x12345678 4
+. 88 14 04 C3
+
+movb %r12b, 271(%rsp)
+. 2096 0x12345678 9
+. 44 88 A4 24 0F 01 00 00 C3
+
+movb %r8b, 11(%rsp)
+. 2097 0x12345678 6
+. 44 88 44 24 0B C3
+
+movb %r8b, 15(%rsp)
+. 2098 0x12345678 6
+. 44 88 44 24 0F C3
+
+movb %r8b, 16(%rcx,%rdx)
+. 2099 0x12345678 6
+. 44 88 44 11 10 C3
+
+movb %r8b, 16(%rdi,%rdx)
+. 2100 0x12345678 6
+. 44 88 44 17 10 C3
+
+movb %r8b, 16(%rsi,%rdx)
+. 2101 0x12345678 6
+. 44 88 44 16 10 C3
+
+movb %r8b, 39(%rsp)
+. 2102 0x12345678 6
+. 44 88 44 24 27 C3
+
+movb %r9b, 39(%rsp)
+. 2103 0x12345678 6
+. 44 88 4C 24 27 C3
+
+movb %r9b, 4771(%rip)
+. 2104 0x12345678 8
+. 44 88 0D A3 12 00 00 C3
+
+movb %sil, 18(%rbx)
+. 2105 0x12345678 5
+. 40 88 73 12 C3
+
+movl %eax, %eax
+. 2106 0x12345678 3
+. 89 C0 C3
+
+movl %eax, %edx
+. 2107 0x12345678 3
+. 89 C2 C3
+
+movl %ebp, %eax
+. 2108 0x12345678 3
+. 89 E8 C3
+
+movl %ebp, %r15d
+. 2109 0x12345678 4
+. 41 89 EF C3
+
+movl %ebx, %eax
+. 2110 0x12345678 3
+. 89 D8 C3
+
+movl %ebx, %ebx
+. 2111 0x12345678 3
+. 89 DB C3
+
+movl %ebx, %edx
+. 2112 0x12345678 3
+. 89 DA C3
+
+movl %ecx, %ebx
+. 2113 0x12345678 3
+. 89 CB C3
+
+movl %ecx, %ecx
+. 2114 0x12345678 3
+. 89 C9 C3
+
+movl %edi, %eax
+. 2115 0x12345678 3
+. 89 F8 C3
+
+movl %edi, %edi
+. 2116 0x12345678 3
+. 89 FF C3
+
+movl %edx, %eax
+. 2117 0x12345678 3
+. 89 D0 C3
+
+movl %edx, %edx
+. 2118 0x12345678 3
+. 89 D2 C3
+
+movl %esi, %eax
+. 2119 0x12345678 3
+. 89 F0 C3
+
+movl %esi, %edx
+. 2120 0x12345678 3
+. 89 F2 C3
+
+movl %esi, %esi
+. 2121 0x12345678 3
+. 89 F6 C3
+
+movl $0, 100(%rdi)
+. 2122 0x12345678 8
+. C7 47 64 00 00 00 00 C3
+
+movl $0, 104(%rdi)
+. 2123 0x12345678 8
+. C7 47 68 00 00 00 00 C3
+
+movl $0, 108(%rdi)
+. 2124 0x12345678 8
+. C7 47 6C 00 00 00 00 C3
+
+movl $0, 108(%rsp)
+. 2125 0x12345678 9
+. C7 44 24 6C 00 00 00 00 C3
+
+movl $0, 112(%rdi)
+. 2126 0x12345678 8
+. C7 47 70 00 00 00 00 C3
+
+movl $0, 116(%rdi)
+. 2127 0x12345678 8
+. C7 47 74 00 00 00 00 C3
+
+movl $0, 116(%rsp)
+. 2128 0x12345678 9
+. C7 44 24 74 00 00 00 00 C3
+
+movl $0, 120(%rdi)
+. 2129 0x12345678 8
+. C7 47 78 00 00 00 00 C3
+
+movl $0, 120(%rsp)
+. 2130 0x12345678 9
+. C7 44 24 78 00 00 00 00 C3
+
+movl $0, 124(%rdi)
+. 2131 0x12345678 8
+. C7 47 7C 00 00 00 00 C3
+
+movl $0, 124(%rsp)
+. 2132 0x12345678 9
+. C7 44 24 7C 00 00 00 00 C3
+
+movl $0, 128(%rdi)
+. 2133 0x12345678 11
+. C7 87 80 00 00 00 00 00 00 00 C3
+
+movl $0, 12(%rax)
+. 2134 0x12345678 8
+. C7 40 0C 00 00 00 00 C3
+
+movl $0, 12(%rbx)
+. 2135 0x12345678 8
+. C7 43 0C 00 00 00 00 C3
+
+movl $0, 12(%rdi)
+. 2136 0x12345678 8
+. C7 47 0C 00 00 00 00 C3
+
+movl $0, 12(%rsp)
+. 2137 0x12345678 9
+. C7 44 24 0C 00 00 00 00 C3
+
+movl $0, 132(%rdi)
+. 2138 0x12345678 11
+. C7 87 84 00 00 00 00 00 00 00 C3
+
+movl $0, 136(%rdi)
+. 2139 0x12345678 11
+. C7 87 88 00 00 00 00 00 00 00 C3
+
+movl $0, 140(%rsp)
+. 2140 0x12345678 12
+. C7 84 24 8C 00 00 00 00 00 00 00 C3
+
+movl $0, 144(%rdi)
+. 2141 0x12345678 11
+. C7 87 90 00 00 00 00 00 00 00 C3
+
+movl $0, 148(%rdi)
+. 2142 0x12345678 11
+. C7 87 94 00 00 00 00 00 00 00 C3
+
+movl $0, 152(%rdi)
+. 2143 0x12345678 11
+. C7 87 98 00 00 00 00 00 00 00 C3
+
+movl $0, 156(%rsp)
+. 2144 0x12345678 12
+. C7 84 24 9C 00 00 00 00 00 00 00 C3
+
+movl $0, 160(%rdi)
+. 2145 0x12345678 11
+. C7 87 A0 00 00 00 00 00 00 00 C3
+
+movl $0, 16(%rbx)
+. 2146 0x12345678 8
+. C7 43 10 00 00 00 00 C3
+
+movl $0, 16(%rdi)
+. 2147 0x12345678 8
+. C7 47 10 00 00 00 00 C3
+
+movl $0, 16(%rsp)
+. 2148 0x12345678 9
+. C7 44 24 10 00 00 00 00 C3
+
+movl $0, 204(%rsp)
+. 2149 0x12345678 12
+. C7 84 24 CC 00 00 00 00 00 00 00 C3
+
+movl $0, 20(%rbx)
+. 2150 0x12345678 8
+. C7 43 14 00 00 00 00 C3
+
+movl $0, 20(%rdi)
+. 2151 0x12345678 8
+. C7 47 14 00 00 00 00 C3
+
+movl $0, 20(%rsp)
+. 2152 0x12345678 9
+. C7 44 24 14 00 00 00 00 C3
+
+movl $0, 24(%rax)
+. 2153 0x12345678 8
+. C7 40 18 00 00 00 00 C3
+
+movl $0, 24(%rdi)
+. 2154 0x12345678 8
+. C7 47 18 00 00 00 00 C3
+
+movl $0, 288(%rsp)
+. 2155 0x12345678 12
+. C7 84 24 20 01 00 00 00 00 00 00 C3
+
+movl $0, 288(%rsp,%rax,4)
+. 2156 0x12345678 12
+. C7 84 84 20 01 00 00 00 00 00 00 C3
+
+movl $0, 28(%rax)
+. 2157 0x12345678 8
+. C7 40 1C 00 00 00 00 C3
+
+movl $0, 28(%rdi)
+. 2158 0x12345678 8
+. C7 47 1C 00 00 00 00 C3
+
+movl $0, 28(%rsp)
+. 2159 0x12345678 9
+. C7 44 24 1C 00 00 00 00 C3
+
+movl $0, 292(%rsp)
+. 2160 0x12345678 12
+. C7 84 24 24 01 00 00 00 00 00 00 C3
+
+movl $0, 32(%rax)
+. 2161 0x12345678 8
+. C7 40 20 00 00 00 00 C3
+
+movl $0, 32(%rdi)
+. 2162 0x12345678 8
+. C7 47 20 00 00 00 00 C3
+
+movl $0, 348(%rsp)
+. 2163 0x12345678 12
+. C7 84 24 5C 01 00 00 00 00 00 00 C3
+
+movl $0, 36(%rdi)
+. 2164 0x12345678 8
+. C7 47 24 00 00 00 00 C3
+
+movl $0, 40(%rax)
+. 2165 0x12345678 8
+. C7 40 28 00 00 00 00 C3
+
+movl $0, 40(%rdi)
+. 2166 0x12345678 8
+. C7 47 28 00 00 00 00 C3
+
+movl $0, 44(%rdi)
+. 2167 0x12345678 8
+. C7 47 2C 00 00 00 00 C3
+
+movl $0, 48(%rax)
+. 2168 0x12345678 8
+. C7 40 30 00 00 00 00 C3
+
+movl $0, 48(%rdi)
+. 2169 0x12345678 8
+. C7 47 30 00 00 00 00 C3
+
+movl $0, 4(%rdi)
+. 2170 0x12345678 8
+. C7 47 04 00 00 00 00 C3
+
+movl $0, 52(%rdi)
+. 2171 0x12345678 8
+. C7 47 34 00 00 00 00 C3
+
+movl $0, 56(%rax)
+. 2172 0x12345678 8
+. C7 40 38 00 00 00 00 C3
+
+movl $0, 56(%rdi)
+. 2173 0x12345678 8
+. C7 47 38 00 00 00 00 C3
+
+movl $0, 56(%rsp)
+. 2174 0x12345678 9
+. C7 44 24 38 00 00 00 00 C3
+
+movl $0, 572(%rsp)
+. 2175 0x12345678 12
+. C7 84 24 3C 02 00 00 00 00 00 00 C3
+
+movl $0, 60(%rdi)
+. 2176 0x12345678 8
+. C7 47 3C 00 00 00 00 C3
+
+movl $0, 64(%rdi)
+. 2177 0x12345678 8
+. C7 47 40 00 00 00 00 C3
+
+movl $0, 68(%rdi)
+. 2178 0x12345678 8
+. C7 47 44 00 00 00 00 C3
+
+movl $0, 72(%rdi)
+. 2179 0x12345678 8
+. C7 47 48 00 00 00 00 C3
+
+movl $0, 76(%rdi)
+. 2180 0x12345678 8
+. C7 47 4C 00 00 00 00 C3
+
+movl $0, 80(%rdi)
+. 2181 0x12345678 8
+. C7 47 50 00 00 00 00 C3
+
+movl $0, 84(%rdi)
+. 2182 0x12345678 8
+. C7 47 54 00 00 00 00 C3
+
+movl $0, 88(%rdi)
+. 2183 0x12345678 8
+. C7 47 58 00 00 00 00 C3
+
+movl $0, 8(%rbx,%rax)
+. 2184 0x12345678 9
+. C7 44 03 08 00 00 00 00 C3
+
+movl $0, 8(%rbx,%rdx)
+. 2185 0x12345678 9
+. C7 44 13 08 00 00 00 00 C3
+
+movl $0, 8(%rdi)
+. 2186 0x12345678 8
+. C7 47 08 00 00 00 00 C3
+
+movl $0, 8(%rsi,%rdx)
+. 2187 0x12345678 9
+. C7 44 16 08 00 00 00 00 C3
+
+movl $0, 8(%rsp)
+. 2188 0x12345678 9
+. C7 44 24 08 00 00 00 00 C3
+
+movl $0, 92(%rdi)
+. 2189 0x12345678 8
+. C7 47 5C 00 00 00 00 C3
+
+movl $0, 96(%rdi)
+. 2190 0x12345678 8
+. C7 47 60 00 00 00 00 C3
+
+movl 0, %eax
+. 2191 0x12345678 8
+. 8B 04 25 00 00 00 00 C3
+
+movl $10000, %esi
+. 2192 0x12345678 6
+. BE 10 27 00 00 C3
+
+movl $1005, %edx
+. 2193 0x12345678 6
+. BA ED 03 00 00 C3
+
+movl $1006, %edx
+. 2194 0x12345678 6
+. BA EE 03 00 00 C3
+
+movl $100, %eax
+. 2195 0x12345678 6
+. B8 64 00 00 00 C3
+
+movl 100(%rsp), %edi
+. 2196 0x12345678 5
+. 8B 7C 24 64 C3
+
+movl 100(%rsp), %edx
+. 2197 0x12345678 5
+. 8B 54 24 64 C3
+
+movl 100(%rsp), %esi
+. 2198 0x12345678 5
+. 8B 74 24 64 C3
+
+movl $1013, %edx
+. 2199 0x12345678 6
+. BA F5 03 00 00 C3
+
+movl $10, 20(%rdi)
+. 2200 0x12345678 8
+. C7 47 14 0A 00 00 00 C3
+
+movl $1023, %edx
+. 2201 0x12345678 6
+. BA FF 03 00 00 C3
+
+movl $1024, %edi
+. 2202 0x12345678 6
+. BF 00 04 00 00 C3
+
+movl 1024(%rsp), %eax
+. 2203 0x12345678 8
+. 8B 84 24 00 04 00 00 C3
+
+movl $-1025, %edi
+. 2204 0x12345678 6
+. BF FF FB FF FF C3
+
+movl $1028, %edx
+. 2205 0x12345678 6
+. BA 04 04 00 00 C3
+
+movl $1029, %edx
+. 2206 0x12345678 6
+. BA 05 04 00 00 C3
+
+movl $10300, %edx
+. 2207 0x12345678 6
+. BA 3C 28 00 00 C3
+
+movl $1031, %edx
+. 2208 0x12345678 6
+. BA 07 04 00 00 C3
+
+movl $1034, %edx
+. 2209 0x12345678 6
+. BA 0A 04 00 00 C3
+
+movl $1035, %edx
+. 2210 0x12345678 6
+. BA 0B 04 00 00 C3
+
+movl $10377, %edx
+. 2211 0x12345678 6
+. BA 89 28 00 00 C3
+
+movl $1037, %edx
+. 2212 0x12345678 6
+. BA 0D 04 00 00 C3
+
+movl $10, 40(%rbx)
+. 2213 0x12345678 8
+. C7 43 28 0A 00 00 00 C3
+
+movl $1041, %edx
+. 2214 0x12345678 6
+. BA 11 04 00 00 C3
+
+movl $1042, %edx
+. 2215 0x12345678 6
+. BA 12 04 00 00 C3
+
+movl $10437, %edx
+. 2216 0x12345678 6
+. BA C5 28 00 00 C3
+
+movl $1043, %edx
+. 2217 0x12345678 6
+. BA 13 04 00 00 C3
+
+movl $10456, %edx
+. 2218 0x12345678 6
+. BA D8 28 00 00 C3
+
+movl $10496, %edx
+. 2219 0x12345678 6
+. BA 00 29 00 00 C3
+
+movl $104, %eax
+. 2220 0x12345678 6
+. B8 68 00 00 00 C3
+
+movl $104, %edx
+. 2221 0x12345678 6
+. BA 68 00 00 00 C3
+
+movl 104(%rsp), %ecx
+. 2222 0x12345678 5
+. 8B 4C 24 68 C3
+
+movl 104(%rsp), %edi
+. 2223 0x12345678 5
+. 8B 7C 24 68 C3
+
+movl 104(%rsp), %edx
+. 2224 0x12345678 5
+. 8B 54 24 68 C3
+
+movl 104(%rsp), %esi
+. 2225 0x12345678 5
+. 8B 74 24 68 C3
+
+movl 104(%rsp), %r8d
+. 2226 0x12345678 6
+. 44 8B 44 24 68 C3
+
+movl 104(%rsp), %r9d
+. 2227 0x12345678 6
+. 44 8B 4C 24 68 C3
+
+movl $10549, %edx
+. 2228 0x12345678 6
+. BA 35 29 00 00 C3
+
+movl $1054, %edx
+. 2229 0x12345678 6
+. BA 1E 04 00 00 C3
+
+movl $1055, %edx
+. 2230 0x12345678 6
+. BA 1F 04 00 00 C3
+
+movl $105, %edx
+. 2231 0x12345678 6
+. BA 69 00 00 00 C3
+
+movl $106, %edx
+. 2232 0x12345678 6
+. BA 6A 00 00 00 C3
+
+movl $10703, %edx
+. 2233 0x12345678 6
+. BA CF 29 00 00 C3
+
+movl $1073741824, %eax
+. 2234 0x12345678 6
+. B8 00 00 00 40 C3
+
+movl $1073741824, %edi
+. 2235 0x12345678 6
+. BF 00 00 00 40 C3
+
+movl $1073741824, %esi
+. 2236 0x12345678 6
+. BE 00 00 00 40 C3
+
+movl $1073741825, %eax
+. 2237 0x12345678 6
+. B8 01 00 00 40 C3
+
+movl $1073741826, %eax
+. 2238 0x12345678 6
+. B8 02 00 00 40 C3
+
+movl $1073741827, %eax
+. 2239 0x12345678 6
+. B8 03 00 00 40 C3
+
+movl $1073741828, %eax
+. 2240 0x12345678 6
+. B8 04 00 00 40 C3
+
+movl $1073741829, %eax
+. 2241 0x12345678 6
+. B8 05 00 00 40 C3
+
+movl $1073741830, %eax
+. 2242 0x12345678 6
+. B8 06 00 00 40 C3
+
+movl $1073741831, %eax
+. 2243 0x12345678 6
+. B8 07 00 00 40 C3
+
+movl $1073741832, %eax
+. 2244 0x12345678 6
+. B8 08 00 00 40 C3
+
+movl $1073741833, %eax
+. 2245 0x12345678 6
+. B8 09 00 00 40 C3
+
+movl $1073741834, %eax
+. 2246 0x12345678 6
+. B8 0A 00 00 40 C3
+
+movl $1073741835, %eax
+. 2247 0x12345678 6
+. B8 0B 00 00 40 C3
+
+movl $1073741836, %eax
+. 2248 0x12345678 6
+. B8 0C 00 00 40 C3
+
+movl $1073741837, %eax
+. 2249 0x12345678 6
+. B8 0D 00 00 40 C3
+
+movl $1073741838, %eax
+. 2250 0x12345678 6
+. B8 0E 00 00 40 C3
+
+movl $1073741839, %eax
+. 2251 0x12345678 6
+. B8 0F 00 00 40 C3
+
+movl $107, %edx
+. 2252 0x12345678 6
+. BA 6B 00 00 00 C3
+
+movl $108, 40(%rbx)
+. 2253 0x12345678 8
+. C7 43 28 6C 00 00 00 C3
+
+movl $10895, %edx
+. 2254 0x12345678 6
+. BA 8F 2A 00 00 C3
+
+movl $108, %eax
+. 2255 0x12345678 6
+. B8 6C 00 00 00 C3
+
+movl $108, %edx
+. 2256 0x12345678 6
+. BA 6C 00 00 00 C3
+
+movl 108(%rsp), %eax
+. 2257 0x12345678 5
+. 8B 44 24 6C C3
+
+movl 108(%rsp), %ebp
+. 2258 0x12345678 5
+. 8B 6C 24 6C C3
+
+movl 108(%rsp), %ecx
+. 2259 0x12345678 5
+. 8B 4C 24 6C C3
+
+movl 108(%rsp), %edi
+. 2260 0x12345678 5
+. 8B 7C 24 6C C3
+
+movl 108(%rsp), %edx
+. 2261 0x12345678 5
+. 8B 54 24 6C C3
+
+movl $10911, %edx
+. 2262 0x12345678 6
+. BA 9F 2A 00 00 C3
+
+movl $10921, %edx
+. 2263 0x12345678 6
+. BA A9 2A 00 00 C3
+
+movl $10922, %edx
+. 2264 0x12345678 6
+. BA AA 2A 00 00 C3
+
+movl $10998, %edx
+. 2265 0x12345678 6
+. BA F6 2A 00 00 C3
+
+movl $10, %eax
+. 2266 0x12345678 6
+. B8 0A 00 00 00 C3
+
+movl $10, %edi
+. 2267 0x12345678 6
+. BF 0A 00 00 00 C3
+
+movl $10, %edx
+. 2268 0x12345678 6
+. BA 0A 00 00 00 C3
+
+movl $10, %r14d
+. 2269 0x12345678 7
+. 41 BE 0A 00 00 00 C3
+
+movl $10, (%rax)
+. 2270 0x12345678 7
+. C7 00 0A 00 00 00 C3
+
+movl $11000, %edx
+. 2271 0x12345678 6
+. BA F8 2A 00 00 C3
+
+movl $1100, %edx
+. 2272 0x12345678 6
+. BA 4C 04 00 00 C3
+
+movl $1101, %edx
+. 2273 0x12345678 6
+. BA 4D 04 00 00 C3
+
+movl $11046, %edx
+. 2274 0x12345678 6
+. BA 26 2B 00 00 C3
+
+movl $1, 104(%rsp)
+. 2275 0x12345678 9
+. C7 44 24 68 01 00 00 00 C3
+
+movl $11073, %edx
+. 2276 0x12345678 6
+. BA 41 2B 00 00 C3
+
+movl $11074, %edx
+. 2277 0x12345678 6
+. BA 42 2B 00 00 C3
+
+movl $1, 108(%rsp)
+. 2278 0x12345678 9
+. C7 44 24 6C 01 00 00 00 C3
+
+movl $110, %edx
+. 2279 0x12345678 6
+. BA 6E 00 00 00 C3
+
+movl $1110, %edx
+. 2280 0x12345678 6
+. BA 56 04 00 00 C3
+
+movl $111, %edx
+. 2281 0x12345678 6
+. BA 6F 00 00 00 C3
+
+movl $11204, %edx
+. 2282 0x12345678 6
+. BA C4 2B 00 00 C3
+
+movl $11220, %edx
+. 2283 0x12345678 6
+. BA D4 2B 00 00 C3
+
+movl $11225, %edx
+. 2284 0x12345678 6
+. BA D9 2B 00 00 C3
+
+movl $1123, %edx
+. 2285 0x12345678 6
+. BA 63 04 00 00 C3
+
+movl $11257, %edx
+. 2286 0x12345678 6
+. BA F9 2B 00 00 C3
+
+movl $112, %eax
+. 2287 0x12345678 6
+. B8 70 00 00 00 C3
+
+movl $112, %edx
+. 2288 0x12345678 6
+. BA 70 00 00 00 C3
+
+movl $-1, 12(%rbx,%rax)
+. 2289 0x12345678 9
+. C7 44 03 0C FF FF FF FF C3
+
+movl $-1, 12(%rcx,%rax)
+. 2290 0x12345678 9
+. C7 44 01 0C FF FF FF FF C3
+
+movl $-1, 12(%rsi,%rdx)
+. 2291 0x12345678 9
+. C7 44 16 0C FF FF FF FF C3
+
+movl 112(%rsp), %edi
+. 2292 0x12345678 5
+. 8B 7C 24 70 C3
+
+movl $1136, %edx
+. 2293 0x12345678 6
+. BA 70 04 00 00 C3
+
+movl $113, %eax
+. 2294 0x12345678 6
+. B8 71 00 00 00 C3
+
+movl $113, %edx
+. 2295 0x12345678 6
+. BA 71 00 00 00 C3
+
+movl $11458, %edx
+. 2296 0x12345678 6
+. BA C2 2C 00 00 C3
+
+movl $114, %edx
+. 2297 0x12345678 6
+. BA 72 00 00 00 C3
+
+movl $11584, %edx
+. 2298 0x12345678 6
+. BA 40 2D 00 00 C3
+
+movl $11596, %edx
+. 2299 0x12345678 6
+. BA 4C 2D 00 00 C3
+
+movl $1159, %edx
+. 2300 0x12345678 6
+. BA 87 04 00 00 C3
+
+movl $115, %edx
+. 2301 0x12345678 6
+. BA 73 00 00 00 C3
+
+movl $11681, %edx
+. 2302 0x12345678 6
+. BA A1 2D 00 00 C3
+
+movl $116, %eax
+. 2303 0x12345678 6
+. B8 74 00 00 00 C3
+
+movl $116, %edx
+. 2304 0x12345678 6
+. BA 74 00 00 00 C3
+
+movl $-1, 16(%rsp)
+. 2305 0x12345678 9
+. C7 44 24 10 FF FF FF FF C3
+
+movl $1, 16(%rsp)
+. 2306 0x12345678 9
+. C7 44 24 10 01 00 00 00 C3
+
+movl 116(%rsp), %ebx
+. 2307 0x12345678 5
+. 8B 5C 24 74 C3
+
+movl 116(%rsp), %edi
+. 2308 0x12345678 5
+. 8B 7C 24 74 C3
+
+movl 116(%rsp), %edx
+. 2309 0x12345678 5
+. 8B 54 24 74 C3
+
+movl $1172, %edx
+. 2310 0x12345678 6
+. BA 94 04 00 00 C3
+
+movl $117, %edx
+. 2311 0x12345678 6
+. BA 75 00 00 00 C3
+
+movl $118, %edx
+. 2312 0x12345678 6
+. BA 76 00 00 00 C3
+
+movl $119, %eax
+. 2313 0x12345678 6
+. B8 77 00 00 00 C3
+
+movl $11, %edi
+. 2314 0x12345678 6
+. BF 0B 00 00 00 C3
+
+movl $11, %r14d
+. 2315 0x12345678 7
+. 41 BE 0B 00 00 00 C3
+
+movl $11, (%rax)
+. 2316 0x12345678 7
+. C7 00 0B 00 00 00 C3
+
+movl $120, 12(%rdi)
+. 2317 0x12345678 8
+. C7 47 0C 78 00 00 00 C3
+
+movl $1205, %edx
+. 2318 0x12345678 6
+. BA B5 04 00 00 C3
+
+movl $120, %eax
+. 2319 0x12345678 6
+. B8 78 00 00 00 C3
+
+movl $120, %edi
+. 2320 0x12345678 6
+. BF 78 00 00 00 C3
+
+movl $-1, 20(%rax)
+. 2321 0x12345678 8
+. C7 40 14 FF FF FF FF C3
+
+movl $-1, 20(%rsp)
+. 2322 0x12345678 9
+. C7 44 24 14 FF FF FF FF C3
+
+movl 120(%rsp), %eax
+. 2323 0x12345678 5
+. 8B 44 24 78 C3
+
+movl 120(%rsp), %ecx
+. 2324 0x12345678 5
+. 8B 4C 24 78 C3
+
+movl 120(%rsp), %edx
+. 2325 0x12345678 5
+. 8B 54 24 78 C3
+
+movl $1213, %edx
+. 2326 0x12345678 6
+. BA BD 04 00 00 C3
+
+movl $1214, %edx
+. 2327 0x12345678 6
+. BA BE 04 00 00 C3
+
+movl $121, %edx
+. 2328 0x12345678 6
+. BA 79 00 00 00 C3
+
+movl $1222, %edx
+. 2329 0x12345678 6
+. BA C6 04 00 00 C3
+
+movl $1229, %edx
+. 2330 0x12345678 6
+. BA CD 04 00 00 C3
+
+movl $1239, %edx
+. 2331 0x12345678 6
+. BA D7 04 00 00 C3
+
+movl $124, %eax
+. 2332 0x12345678 6
+. B8 7C 00 00 00 C3
+
+movl $-1, 24(%rax)
+. 2333 0x12345678 8
+. C7 40 18 FF FF FF FF C3
+
+movl $1, 24(%rdx)
+. 2334 0x12345678 8
+. C7 42 18 01 00 00 00 C3
+
+movl 124(%rsp), %eax
+. 2335 0x12345678 5
+. 8B 44 24 7C C3
+
+movl 124(%rsp), %ebx
+. 2336 0x12345678 5
+. 8B 5C 24 7C C3
+
+movl 124(%rsp), %ecx
+. 2337 0x12345678 5
+. 8B 4C 24 7C C3
+
+movl 124(%rsp), %edi
+. 2338 0x12345678 5
+. 8B 7C 24 7C C3
+
+movl 124(%rsp), %esi
+. 2339 0x12345678 5
+. 8B 74 24 7C C3
+
+movl 124(%rsp), %r13d
+. 2340 0x12345678 6
+. 44 8B 6C 24 7C C3
+
+movl $1250, %edx
+. 2341 0x12345678 6
+. BA E2 04 00 00 C3
+
+movl $1251, %edx
+. 2342 0x12345678 6
+. BA E3 04 00 00 C3
+
+movl $1252, %edx
+. 2343 0x12345678 6
+. BA E4 04 00 00 C3
+
+movl $12, 68(%rax)
+. 2344 0x12345678 8
+. C7 40 44 0C 00 00 00 C3
+
+movl $127, %r10d
+. 2345 0x12345678 7
+. 41 BA 7F 00 00 00 C3
+
+movl $127, %r9d
+. 2346 0x12345678 7
+. 41 B9 7F 00 00 00 C3
+
+movl $128, 120(%rbx)
+. 2347 0x12345678 8
+. C7 43 78 80 00 00 00 C3
+
+movl $1281, %edx
+. 2348 0x12345678 6
+. BA 01 05 00 00 C3
+
+movl $128, 68(%rbx)
+. 2349 0x12345678 8
+. C7 43 44 80 00 00 00 C3
+
+movl $128, 72(%rsp)
+. 2350 0x12345678 9
+. C7 44 24 48 80 00 00 00 C3
+
+movl $128, 80(%rax)
+. 2351 0x12345678 8
+. C7 40 50 80 00 00 00 C3
+
+movl $128, 80(%rbx)
+. 2352 0x12345678 8
+. C7 43 50 80 00 00 00 C3
+
+movl $1288, %edx
+. 2353 0x12345678 6
+. BA 08 05 00 00 C3
+
+movl $1289, %edx
+. 2354 0x12345678 6
+. BA 09 05 00 00 C3
+
+movl $-128, %ecx
+. 2355 0x12345678 6
+. B9 80 FF FF FF C3
+
+movl $128, %edi
+. 2356 0x12345678 6
+. BF 80 00 00 00 C3
+
+movl $-128, %r10d
+. 2357 0x12345678 7
+. 41 BA 80 FF FF FF C3
+
+movl $1, 28(%rsp)
+. 2358 0x12345678 9
+. C7 44 24 1C 01 00 00 00 C3
+
+movl 128(%rsp), %edi
+. 2359 0x12345678 8
+. 8B BC 24 80 00 00 00 C3
+
+movl $12, %eax
+. 2360 0x12345678 6
+. B8 0C 00 00 00 C3
+
+movl $12, %edi
+. 2361 0x12345678 6
+. BF 0C 00 00 00 C3
+
+movl $12, %r13d
+. 2362 0x12345678 7
+. 41 BD 0C 00 00 00 C3
+
+movl $12, %r14d
+. 2363 0x12345678 7
+. 41 BE 0C 00 00 00 C3
+
+movl 12(%r8), %eax
+. 2364 0x12345678 5
+. 41 8B 40 0C C3
+
+movl $12, (%rax)
+. 2365 0x12345678 7
+. C7 00 0C 00 00 00 C3
+
+movl 12(%rax), %eax
+. 2366 0x12345678 4
+. 8B 40 0C C3
+
+movl 12(%rax), %ebx
+. 2367 0x12345678 4
+. 8B 58 0C C3
+
+movl 12(%rax), %ecx
+. 2368 0x12345678 4
+. 8B 48 0C C3
+
+movl 12(%rax), %esi
+. 2369 0x12345678 4
+. 8B 70 0C C3
+
+movl 12(%rax,%rdi), %ecx
+. 2370 0x12345678 5
+. 8B 4C 38 0C C3
+
+movl 12(%rax,%rdi), %r13d
+. 2371 0x12345678 6
+. 44 8B 6C 38 0C C3
+
+movl 12(%rax,%rdx), %esi
+. 2372 0x12345678 5
+. 8B 74 10 0C C3
+
+movl 12(%rbp), %ebx
+. 2373 0x12345678 4
+. 8B 5D 0C C3
+
+movl 12(%rbp), %edi
+. 2374 0x12345678 4
+. 8B 7D 0C C3
+
+movl 12(%rbp), %edx
+. 2375 0x12345678 4
+. 8B 55 0C C3
+
+movl 12(%rbx), %eax
+. 2376 0x12345678 4
+. 8B 43 0C C3
+
+movl 12(%rbx), %ecx
+. 2377 0x12345678 4
+. 8B 4B 0C C3
+
+movl 12(%rbx), %edi
+. 2378 0x12345678 4
+. 8B 7B 0C C3
+
+movl 12(%rbx), %edx
+. 2379 0x12345678 4
+. 8B 53 0C C3
+
+movl 12(%rbx,%rdx), %esi
+. 2380 0x12345678 5
+. 8B 74 13 0C C3
+
+movl 12(%rdi), %edx
+. 2381 0x12345678 4
+. 8B 57 0C C3
+
+movl 12(%rdi), %esi
+. 2382 0x12345678 4
+. 8B 77 0C C3
+
+movl 12(%rdi,%rdx), %ebp
+. 2383 0x12345678 5
+. 8B 6C 17 0C C3
+
+movl 12(%rdx), %ecx
+. 2384 0x12345678 4
+. 8B 4A 0C C3
+
+movl 12(%rdx), %esi
+. 2385 0x12345678 4
+. 8B 72 0C C3
+
+movl 12(%rdx,%r13), %eax
+. 2386 0x12345678 6
+. 42 8B 44 2A 0C C3
+
+movl 12(%rsi), %eax
+. 2387 0x12345678 4
+. 8B 46 0C C3
+
+movl 12(%rsi,%rcx), %eax
+. 2388 0x12345678 5
+. 8B 44 0E 0C C3
+
+movl 12(%rsp), %eax
+. 2389 0x12345678 5
+. 8B 44 24 0C C3
+
+movl 12(%rsp), %ecx
+. 2390 0x12345678 5
+. 8B 4C 24 0C C3
+
+movl 12(%rsp), %edi
+. 2391 0x12345678 5
+. 8B 7C 24 0C C3
+
+movl 12(%rsp), %edx
+. 2392 0x12345678 5
+. 8B 54 24 0C C3
+
+movl 12(%rsp), %esi
+. 2393 0x12345678 5
+. 8B 74 24 0C C3
+
+movl $1306, %edx
+. 2394 0x12345678 6
+. BA 1A 05 00 00 C3
+
+movl $1307, %edx
+. 2395 0x12345678 6
+. BA 1B 05 00 00 C3
+
+movl $1313, %edx
+. 2396 0x12345678 6
+. BA 21 05 00 00 C3
+
+movl $1321, %edx
+. 2397 0x12345678 6
+. BA 29 05 00 00 C3
+
+movl $1324, %edx
+. 2398 0x12345678 6
+. BA 2C 05 00 00 C3
+
+movl $1327, %edx
+. 2399 0x12345678 6
+. BA 2F 05 00 00 C3
+
+movl $132, %edi
+. 2400 0x12345678 6
+. BF 84 00 00 00 C3
+
+movl $-1, 32(%rsp)
+. 2401 0x12345678 9
+. C7 44 24 20 FF FF FF FF C3
+
+movl 132(%rsp), %edi
+. 2402 0x12345678 8
+. 8B BC 24 84 00 00 00 C3
+
+movl $1338, %edx
+. 2403 0x12345678 6
+. BA 3A 05 00 00 C3
+
+movl $1343, %edx
+. 2404 0x12345678 6
+. BA 3F 05 00 00 C3
+
+movl $1344, %edx
+. 2405 0x12345678 6
+. BA 40 05 00 00 C3
+
+movl $1345, %edx
+. 2406 0x12345678 6
+. BA 41 05 00 00 C3
+
+movl $1347, %edx
+. 2407 0x12345678 6
+. BA 43 05 00 00 C3
+
+movl $1, 348(%rsp)
+. 2408 0x12345678 12
+. C7 84 24 5C 01 00 00 01 00 00 00 C3
+
+movl $1350, %edx
+. 2409 0x12345678 6
+. BA 46 05 00 00 C3
+
+movl $136, 80(%rbx)
+. 2410 0x12345678 8
+. C7 43 50 88 00 00 00 C3
+
+movl $136, 92(%rax)
+. 2411 0x12345678 8
+. C7 40 5C 88 00 00 00 C3
+
+movl $136, 92(%rbx)
+. 2412 0x12345678 8
+. C7 43 5C 88 00 00 00 C3
+
+movl $136, %edi
+. 2413 0x12345678 6
+. BF 88 00 00 00 C3
+
+movl $136, %edx
+. 2414 0x12345678 6
+. BA 88 00 00 00 C3
+
+movl 136(%rsp), %ebx
+. 2415 0x12345678 8
+. 8B 9C 24 88 00 00 00 C3
+
+movl 136(%rsp), %ecx
+. 2416 0x12345678 8
+. 8B 8C 24 88 00 00 00 C3
+
+movl 136(%rsp), %edi
+. 2417 0x12345678 8
+. 8B BC 24 88 00 00 00 C3
+
+movl $-1, 376(%rsp)
+. 2418 0x12345678 12
+. C7 84 24 78 01 00 00 FF FF FF FF C3
+
+movl $-1, 380(%rsp)
+. 2419 0x12345678 12
+. C7 84 24 7C 01 00 00 FF FF FF FF C3
+
+movl $-1, 384(%rsp)
+. 2420 0x12345678 12
+. C7 84 24 80 01 00 00 FF FF FF FF C3
+
+movl $1388, %edx
+. 2421 0x12345678 6
+. BA 6C 05 00 00 C3
+
+movl $-1, 388(%rsp)
+. 2422 0x12345678 12
+. C7 84 24 84 01 00 00 FF FF FF FF C3
+
+movl $-1, 392(%rsp)
+. 2423 0x12345678 12
+. C7 84 24 88 01 00 00 FF FF FF FF C3
+
+movl $-1, 396(%rsp)
+. 2424 0x12345678 12
+. C7 84 24 8C 01 00 00 FF FF FF FF C3
+
+movl $13, %eax
+. 2425 0x12345678 6
+. B8 0D 00 00 00 C3
+
+movl $13, %r14d
+. 2426 0x12345678 7
+. 41 BE 0D 00 00 00 C3
+
+movl $13, (%rax)
+. 2427 0x12345678 7
+. C7 00 0D 00 00 00 C3
+
+movl $-1, 400(%rsp)
+. 2428 0x12345678 12
+. C7 84 24 90 01 00 00 FF FF FF FF C3
+
+movl $140, 104(%rax)
+. 2429 0x12345678 8
+. C7 40 68 8C 00 00 00 C3
+
+movl $140, 104(%rbx)
+. 2430 0x12345678 8
+. C7 43 68 8C 00 00 00 C3
+
+movl $-1, 404(%rsp)
+. 2431 0x12345678 12
+. C7 84 24 94 01 00 00 FF FF FF FF C3
+
+movl $1407, %edx
+. 2432 0x12345678 6
+. BA 7F 05 00 00 C3
+
+movl $-1, 408(%rsp)
+. 2433 0x12345678 12
+. C7 84 24 98 01 00 00 FF FF FF FF C3
+
+movl $140, 92(%rbx)
+. 2434 0x12345678 8
+. C7 43 5C 8C 00 00 00 C3
+
+movl $140, %edi
+. 2435 0x12345678 6
+. BF 8C 00 00 00 C3
+
+movl 140(%rsp), %eax
+. 2436 0x12345678 8
+. 8B 84 24 8C 00 00 00 C3
+
+movl 140(%rsp), %ecx
+. 2437 0x12345678 8
+. 8B 8C 24 8C 00 00 00 C3
+
+movl 140(%rsp), %edi
+. 2438 0x12345678 8
+. 8B BC 24 8C 00 00 00 C3
+
+movl 140(%rsp), %edx
+. 2439 0x12345678 8
+. 8B 94 24 8C 00 00 00 C3
+
+movl $-1, 412(%rsp)
+. 2440 0x12345678 12
+. C7 84 24 9C 01 00 00 FF FF FF FF C3
+
+movl $-1, 416(%rsp)
+. 2441 0x12345678 12
+. C7 84 24 A0 01 00 00 FF FF FF FF C3
+
+movl $-1, 420(%rsp)
+. 2442 0x12345678 12
+. C7 84 24 A4 01 00 00 FF FF FF FF C3
+
+movl $1421, %edx
+. 2443 0x12345678 6
+. BA 8D 05 00 00 C3
+
+movl $1422, %edx
+. 2444 0x12345678 6
+. BA 8E 05 00 00 C3
+
+movl $1423, %edx
+. 2445 0x12345678 6
+. BA 8F 05 00 00 C3
+
+movl $1424, %edx
+. 2446 0x12345678 6
+. BA 90 05 00 00 C3
+
+movl $-1, 424(%rsp)
+. 2447 0x12345678 12
+. C7 84 24 A8 01 00 00 FF FF FF FF C3
+
+movl $-1, 428(%rsp)
+. 2448 0x12345678 12
+. C7 84 24 AC 01 00 00 FF FF FF FF C3
+
+movl $-1, 432(%rsp)
+. 2449 0x12345678 12
+. C7 84 24 B0 01 00 00 FF FF FF FF C3
+
+movl $-1, 436(%rsp)
+. 2450 0x12345678 12
+. C7 84 24 B4 01 00 00 FF FF FF FF C3
+
+movl $-1, 440(%rsp)
+. 2451 0x12345678 12
+. C7 84 24 B8 01 00 00 FF FF FF FF C3
+
+movl $144, 128(%rbx)
+. 2452 0x12345678 11
+. C7 83 80 00 00 00 90 00 00 00 C3
+
+movl $-1, 444(%rsp)
+. 2453 0x12345678 12
+. C7 84 24 BC 01 00 00 FF FF FF FF C3
+
+movl $-1, 448(%rsp)
+. 2454 0x12345678 12
+. C7 84 24 C0 01 00 00 FF FF FF FF C3
+
+movl $144, %edi
+. 2455 0x12345678 6
+. BF 90 00 00 00 C3
+
+movl 144(%rdi), %esi
+. 2456 0x12345678 7
+. 8B B7 90 00 00 00 C3
+
+movl 144(%rsp), %edi
+. 2457 0x12345678 8
+. 8B BC 24 90 00 00 00 C3
+
+movl $1450, %edx
+. 2458 0x12345678 6
+. BA AA 05 00 00 C3
+
+movl $-1, 452(%rsp)
+. 2459 0x12345678 12
+. C7 84 24 C4 01 00 00 FF FF FF FF C3
+
+movl $1459, %edx
+. 2460 0x12345678 6
+. BA B3 05 00 00 C3
+
+movl $145, %edx
+. 2461 0x12345678 6
+. BA 91 00 00 00 C3
+
+movl $-1, 460(%rsp)
+. 2462 0x12345678 12
+. C7 84 24 CC 01 00 00 FF FF FF FF C3
+
+movl $-1, 464(%rsp)
+. 2463 0x12345678 12
+. C7 84 24 D0 01 00 00 FF FF FF FF C3
+
+movl $-1, 468(%rsp)
+. 2464 0x12345678 12
+. C7 84 24 D4 01 00 00 FF FF FF FF C3
+
+movl $-1, 472(%rsp)
+. 2465 0x12345678 12
+. C7 84 24 D8 01 00 00 FF FF FF FF C3
+
+movl $-1, 476(%rsp)
+. 2466 0x12345678 12
+. C7 84 24 DC 01 00 00 FF FF FF FF C3
+
+movl $-1, 480(%rsp)
+. 2467 0x12345678 12
+. C7 84 24 E0 01 00 00 FF FF FF FF C3
+
+movl $148, 116(%rbx)
+. 2468 0x12345678 8
+. C7 43 74 94 00 00 00 C3
+
+movl $-1, 484(%rsp)
+. 2469 0x12345678 12
+. C7 84 24 E4 01 00 00 FF FF FF FF C3
+
+movl $-1, 488(%rsp)
+. 2470 0x12345678 12
+. C7 84 24 E8 01 00 00 FF FF FF FF C3
+
+movl $148, %eax
+. 2471 0x12345678 6
+. B8 94 00 00 00 C3
+
+movl $148, %edi
+. 2472 0x12345678 6
+. BF 94 00 00 00 C3
+
+movl 148(%rsp), %ecx
+. 2473 0x12345678 8
+. 8B 8C 24 94 00 00 00 C3
+
+movl 148(%rsp), %edi
+. 2474 0x12345678 8
+. 8B BC 24 94 00 00 00 C3
+
+movl $1490, %edx
+. 2475 0x12345678 6
+. BA D2 05 00 00 C3
+
+movl $-1, 492(%rsp)
+. 2476 0x12345678 12
+. C7 84 24 EC 01 00 00 FF FF FF FF C3
+
+movl $-1, 496(%rsp)
+. 2477 0x12345678 12
+. C7 84 24 F0 01 00 00 FF FF FF FF C3
+
+movl $14, %edi
+. 2478 0x12345678 6
+. BF 0E 00 00 00 C3
+
+movl $14, %r14d
+. 2479 0x12345678 7
+. 41 BE 0E 00 00 00 C3
+
+movl $-1, 500(%rsp)
+. 2480 0x12345678 12
+. C7 84 24 F4 01 00 00 FF FF FF FF C3
+
+movl $-1, 504(%rsp)
+. 2481 0x12345678 12
+. C7 84 24 F8 01 00 00 FF FF FF FF C3
+
+movl $-1, 508(%rsp)
+. 2482 0x12345678 12
+. C7 84 24 FC 01 00 00 FF FF FF FF C3
+
+movl $-1, 512(%rsp)
+. 2483 0x12345678 12
+. C7 84 24 00 02 00 00 FF FF FF FF C3
+
+movl $-1, 516(%rsp)
+. 2484 0x12345678 12
+. C7 84 24 04 02 00 00 FF FF FF FF C3
+
+movl $151, %edx
+. 2485 0x12345678 6
+. BA 97 00 00 00 C3
+
+movl $-1, 520(%rsp)
+. 2486 0x12345678 12
+. C7 84 24 08 02 00 00 FF FF FF FF C3
+
+movl $-1, 524(%rsp)
+. 2487 0x12345678 12
+. C7 84 24 0C 02 00 00 FF FF FF FF C3
+
+movl $-1, 528(%rsp)
+. 2488 0x12345678 12
+. C7 84 24 10 02 00 00 FF FF FF FF C3
+
+movl $152, %edi
+. 2489 0x12345678 6
+. BF 98 00 00 00 C3
+
+movl $152, %edx
+. 2490 0x12345678 6
+. BA 98 00 00 00 C3
+
+movl 152(%rsp), %edi
+. 2491 0x12345678 8
+. 8B BC 24 98 00 00 00 C3
+
+movl 152(%rsp), %edx
+. 2492 0x12345678 8
+. 8B 94 24 98 00 00 00 C3
+
+movl $1530, %edx
+. 2493 0x12345678 6
+. BA FA 05 00 00 C3
+
+movl $-1, 532(%rsp)
+. 2494 0x12345678 12
+. C7 84 24 14 02 00 00 FF FF FF FF C3
+
+movl $-1, 536(%rsp)
+. 2495 0x12345678 12
+. C7 84 24 18 02 00 00 FF FF FF FF C3
+
+movl $153, %edx
+. 2496 0x12345678 6
+. BA 99 00 00 00 C3
+
+movl $156, %edi
+. 2497 0x12345678 6
+. BF 9C 00 00 00 C3
+
+movl 156(%rsp), %eax
+. 2498 0x12345678 8
+. 8B 84 24 9C 00 00 00 C3
+
+movl 156(%rsp), %ecx
+. 2499 0x12345678 8
+. 8B 8C 24 9C 00 00 00 C3
+
+movl 156(%rsp), %edi
+. 2500 0x12345678 8
+. 8B BC 24 9C 00 00 00 C3
+
+movl $157, %edi
+. 2501 0x12345678 6
+. BF 9D 00 00 00 C3
+
+movl $158, %edi
+. 2502 0x12345678 6
+. BF 9E 00 00 00 C3
+
+movl $1590, %edx
+. 2503 0x12345678 6
+. BA 36 06 00 00 C3
+
+movl $15, %eax
+. 2504 0x12345678 6
+. B8 0F 00 00 00 C3
+
+movl $15, %edi
+. 2505 0x12345678 6
+. BF 0F 00 00 00 C3
+
+movl $15, %r14d
+. 2506 0x12345678 7
+. 41 BE 0F 00 00 00 C3
+
+movl $160, %edi
+. 2507 0x12345678 6
+. BF A0 00 00 00 C3
+
+movl $160, %edx
+. 2508 0x12345678 6
+. BA A0 00 00 00 C3
+
+movl 160(%rsp), %edi
+. 2509 0x12345678 8
+. 8B BC 24 A0 00 00 00 C3
+
+movl $161, %edx
+. 2510 0x12345678 6
+. BA A1 00 00 00 C3
+
+movl $163, %edx
+. 2511 0x12345678 6
+. BA A3 00 00 00 C3
+
+movl $164, %eax
+. 2512 0x12345678 6
+. B8 A4 00 00 00 C3
+
+movl $164, %edx
+. 2513 0x12345678 6
+. BA A4 00 00 00 C3
+
+movl 164(%rsp), %edi
+. 2514 0x12345678 8
+. 8B BC 24 A4 00 00 00 C3
+
+movl $16711680, %edi
+. 2515 0x12345678 6
+. BF 00 00 FF 00 C3
+
+movl $168, %edx
+. 2516 0x12345678 6
+. BA A8 00 00 00 C3
+
+movl $1, 68(%rsp)
+. 2517 0x12345678 9
+. C7 44 24 44 01 00 00 00 C3
+
+movl $16, 8(%rsp)
+. 2518 0x12345678 9
+. C7 44 24 08 10 00 00 00 C3
+
+movl 168(%rsp), %esi
+. 2519 0x12345678 8
+. 8B B4 24 A8 00 00 00 C3
+
+movl $16, %eax
+. 2520 0x12345678 6
+. B8 10 00 00 00 C3
+
+movl $16, %edi
+. 2521 0x12345678 6
+. BF 10 00 00 00 C3
+
+movl $16, %edx
+. 2522 0x12345678 6
+. BA 10 00 00 00 C3
+
+movl 16(%r12), %eax
+. 2523 0x12345678 6
+. 41 8B 44 24 10 C3
+
+movl 16(%r12), %esi
+. 2524 0x12345678 6
+. 41 8B 74 24 10 C3
+
+movl $16, %r14d
+. 2525 0x12345678 7
+. 41 BE 10 00 00 00 C3
+
+movl 16(%r8), %eax
+. 2526 0x12345678 5
+. 41 8B 40 10 C3
+
+movl 16(%rbp), %ebx
+. 2527 0x12345678 4
+. 8B 5D 10 C3
+
+movl 16(%rbp), %edi
+. 2528 0x12345678 4
+. 8B 7D 10 C3
+
+movl 16(%rbp), %esi
+. 2529 0x12345678 4
+. 8B 75 10 C3
+
+movl 16(%rbp), %r12d
+. 2530 0x12345678 5
+. 44 8B 65 10 C3
+
+movl 16(%rbp,%rbx,4), %edi
+. 2531 0x12345678 5
+. 8B 7C 9D 10 C3
+
+movl 16(%rbx), %eax
+. 2532 0x12345678 4
+. 8B 43 10 C3
+
+movl 16(%rbx), %edi
+. 2533 0x12345678 4
+. 8B 7B 10 C3
+
+movl 16(%rbx), %esi
+. 2534 0x12345678 4
+. 8B 73 10 C3
+
+movl 16(%rbx,%rbp,4), %eax
+. 2535 0x12345678 5
+. 8B 44 AB 10 C3
+
+movl 16(%rdi), %eax
+. 2536 0x12345678 4
+. 8B 47 10 C3
+
+movl 16(%rdi), %edi
+. 2537 0x12345678 4
+. 8B 7F 10 C3
+
+movl 16(%rdi,%rax,4), %eax
+. 2538 0x12345678 5
+. 8B 44 87 10 C3
+
+movl 16(%rdi,%rax,4), %edx
+. 2539 0x12345678 5
+. 8B 54 87 10 C3
+
+movl 16(%rdx), %eax
+. 2540 0x12345678 4
+. 8B 42 10 C3
+
+movl 16(%rsi), %eax
+. 2541 0x12345678 4
+. 8B 46 10 C3
+
+movl $16, (%rsp)
+. 2542 0x12345678 8
+. C7 04 24 10 00 00 00 C3
+
+movl 16(%rsp), %eax
+. 2543 0x12345678 5
+. 8B 44 24 10 C3
+
+movl 16(%rsp), %ebp
+. 2544 0x12345678 5
+. 8B 6C 24 10 C3
+
+movl -16(%rsp), %ecx
+. 2545 0x12345678 5
+. 8B 4C 24 F0 C3
+
+movl 16(%rsp), %ecx
+. 2546 0x12345678 5
+. 8B 4C 24 10 C3
+
+movl 16(%rsp), %edi
+. 2547 0x12345678 5
+. 8B 7C 24 10 C3
+
+movl -16(%rsp), %edx
+. 2548 0x12345678 5
+. 8B 54 24 F0 C3
+
+movl 16(%rsp), %edx
+. 2549 0x12345678 5
+. 8B 54 24 10 C3
+
+movl 16(%rsp), %esi
+. 2550 0x12345678 5
+. 8B 74 24 10 C3
+
+movl 16(%rsp), %r15d
+. 2551 0x12345678 6
+. 44 8B 7C 24 10 C3
+
+movl 16(%rsp), %r8d
+. 2552 0x12345678 6
+. 44 8B 44 24 10 C3
+
+movl $172, 68(%rsp)
+. 2553 0x12345678 9
+. C7 44 24 44 AC 00 00 00 C3
+
+movl 172(%rsp), %edi
+. 2554 0x12345678 8
+. 8B BC 24 AC 00 00 00 C3
+
+movl $1752, %edx
+. 2555 0x12345678 6
+. BA D8 06 00 00 C3
+
+movl $1758, %edx
+. 2556 0x12345678 6
+. BA DE 06 00 00 C3
+
+movl $1764, %edx
+. 2557 0x12345678 6
+. BA E4 06 00 00 C3
+
+movl $17664, %edi
+. 2558 0x12345678 6
+. BF 00 45 00 00 C3
+
+movl $176, 68(%rsp)
+. 2559 0x12345678 9
+. C7 44 24 44 B0 00 00 00 C3
+
+movl 176(%rsp), %ecx
+. 2560 0x12345678 8
+. 8B 8C 24 B0 00 00 00 C3
+
+movl 176(%rsp), %edi
+. 2561 0x12345678 8
+. 8B BC 24 B0 00 00 00 C3
+
+movl $1770, %edx
+. 2562 0x12345678 6
+. BA EA 06 00 00 C3
+
+movl $178, %edx
+. 2563 0x12345678 6
+. BA B2 00 00 00 C3
+
+movl $17, %eax
+. 2564 0x12345678 6
+. B8 11 00 00 00 C3
+
+movl $1802, %edx
+. 2565 0x12345678 6
+. BA 0A 07 00 00 C3
+
+movl $1803, %edx
+. 2566 0x12345678 6
+. BA 0B 07 00 00 C3
+
+movl $180, %eax
+. 2567 0x12345678 6
+. B8 B4 00 00 00 C3
+
+movl $18176, %edi
+. 2568 0x12345678 6
+. BF 00 47 00 00 C3
+
+movl $181, %edx
+. 2569 0x12345678 6
+. BA B5 00 00 00 C3
+
+movl $182, %edx
+. 2570 0x12345678 6
+. BA B6 00 00 00 C3
+
+movl $183, %edx
+. 2571 0x12345678 6
+. BA B7 00 00 00 C3
+
+movl $184, %edx
+. 2572 0x12345678 6
+. BA B8 00 00 00 C3
+
+movl 184(%rsp), %edi
+. 2573 0x12345678 8
+. 8B BC 24 B8 00 00 00 C3
+
+movl $186, %edx
+. 2574 0x12345678 6
+. BA BA 00 00 00 C3
+
+movl $-1879048193, %edi
+. 2575 0x12345678 6
+. BF FF FF FF 8F C3
+
+movl $1, 88(%rsp)
+. 2576 0x12345678 9
+. C7 44 24 58 01 00 00 00 C3
+
+movl 188(%rsp), %edi
+. 2577 0x12345678 8
+. 8B BC 24 BC 00 00 00 C3
+
+movl $1, 8(%rcx,%rax)
+. 2578 0x12345678 9
+. C7 44 01 08 01 00 00 00 C3
+
+movl $1, 8(%rdx,%rax)
+. 2579 0x12345678 9
+. C7 44 02 08 01 00 00 00 C3
+
+movl $1, 8(%rsp)
+. 2580 0x12345678 9
+. C7 44 24 08 01 00 00 00 C3
+
+movl $190, %edx
+. 2581 0x12345678 6
+. BA BE 00 00 00 C3
+
+movl $1910, %edx
+. 2582 0x12345678 6
+. BA 76 07 00 00 C3
+
+movl $1911, %edx
+. 2583 0x12345678 6
+. BA 77 07 00 00 C3
+
+movl $19, 24(%rax)
+. 2584 0x12345678 8
+. C7 40 18 13 00 00 00 C3
+
+movl $192, %edx
+. 2585 0x12345678 6
+. BA C0 00 00 00 C3
+
+movl 192(%rsp), %edi
+. 2586 0x12345678 8
+. 8B BC 24 C0 00 00 00 C3
+
+movl $1944, %edx
+. 2587 0x12345678 6
+. BA 98 07 00 00 C3
+
+movl $1958, %edx
+. 2588 0x12345678 6
+. BA A6 07 00 00 C3
+
+movl $196, %eax
+. 2589 0x12345678 6
+. B8 C4 00 00 00 C3
+
+movl 196(%rsp), %edi
+. 2590 0x12345678 8
+. 8B BC 24 C4 00 00 00 C3
+
+movl $197, %edx
+. 2591 0x12345678 6
+. BA C5 00 00 00 C3
+
+movl $199, %edx
+. 2592 0x12345678 6
+. BA C7 00 00 00 C3
+
+movl $-1, %eax
+. 2593 0x12345678 6
+. B8 FF FF FF FF C3
+
+movl $1, %eax
+. 2594 0x12345678 6
+. B8 01 00 00 00 C3
+
+movl $1, %ebp
+. 2595 0x12345678 6
+. BD 01 00 00 00 C3
+
+movl $1, %ecx
+. 2596 0x12345678 6
+. B9 01 00 00 00 C3
+
+movl $-1, %edi
+. 2597 0x12345678 6
+. BF FF FF FF FF C3
+
+movl $1, %edi
+. 2598 0x12345678 6
+. BF 01 00 00 00 C3
+
+movl $1, %edx
+. 2599 0x12345678 6
+. BA 01 00 00 00 C3
+
+movl $1, %esi
+. 2600 0x12345678 6
+. BE 01 00 00 00 C3
+
+movl $1, %r11d
+. 2601 0x12345678 7
+. 41 BB 01 00 00 00 C3
+
+movl $1, %r12d
+. 2602 0x12345678 7
+. 41 BC 01 00 00 00 C3
+
+movl $-1, %r13d
+. 2603 0x12345678 7
+. 41 BD FF FF FF FF C3
+
+movl $-1, %r14d
+. 2604 0x12345678 7
+. 41 BE FF FF FF FF C3
+
+movl $1, %r14d
+. 2605 0x12345678 7
+. 41 BE 01 00 00 00 C3
+
+movl $1, %r15d
+. 2606 0x12345678 7
+. 41 BF 01 00 00 00 C3
+
+movl $-1, %r8d
+. 2607 0x12345678 7
+. 41 B8 FF FF FF FF C3
+
+movl $1, %r8d
+. 2608 0x12345678 7
+. 41 B8 01 00 00 00 C3
+
+movl $1, %r9d
+. 2609 0x12345678 7
+. 41 B9 01 00 00 00 C3
+
+movl $1, (%rax)
+. 2610 0x12345678 7
+. C7 00 01 00 00 00 C3
+
+movl $-1, (%rsp)
+. 2611 0x12345678 8
+. C7 04 24 FF FF FF FF C3
+
+movl 200(%rsp), %edi
+. 2612 0x12345678 8
+. 8B BC 24 C8 00 00 00 C3
+
+movl $2048, %edi
+. 2613 0x12345678 6
+. BF 00 08 00 00 C3
+
+movl 204(%rsp), %eax
+. 2614 0x12345678 8
+. 8B 84 24 CC 00 00 00 C3
+
+movl 204(%rsp), %ecx
+. 2615 0x12345678 8
+. 8B 8C 24 CC 00 00 00 C3
+
+movl 204(%rsp), %edi
+. 2616 0x12345678 8
+. 8B BC 24 CC 00 00 00 C3
+
+movl $205, %edx
+. 2617 0x12345678 6
+. BA CD 00 00 00 C3
+
+movl $206, %edx
+. 2618 0x12345678 6
+. BA CE 00 00 00 C3
+
+movl $207, %edx
+. 2619 0x12345678 6
+. BA CF 00 00 00 C3
+
+movl $2084, %edx
+. 2620 0x12345678 6
+. BA 24 08 00 00 C3
+
+movl $208, %edx
+. 2621 0x12345678 6
+. BA D0 00 00 00 C3
+
+movl 208(%rsp), %edi
+. 2622 0x12345678 8
+. 8B BC 24 D0 00 00 00 C3
+
+movl $2092, %edx
+. 2623 0x12345678 6
+. BA 2C 08 00 00 C3
+
+movl $2093, %edx
+. 2624 0x12345678 6
+. BA 2D 08 00 00 C3
+
+movl $2095, %edx
+. 2625 0x12345678 6
+. BA 2F 08 00 00 C3
+
+movl $2097152, %edi
+. 2626 0x12345678 6
+. BF 00 00 20 00 C3
+
+movl $20, %eax
+. 2627 0x12345678 6
+. B8 14 00 00 00 C3
+
+movl $20, %edi
+. 2628 0x12345678 6
+. BF 14 00 00 00 C3
+
+movl $20, %edx
+. 2629 0x12345678 6
+. BA 14 00 00 00 C3
+
+movl 20(%r12), %ebx
+. 2630 0x12345678 6
+. 41 8B 5C 24 14 C3
+
+movl 20(%r12), %edx
+. 2631 0x12345678 6
+. 41 8B 54 24 14 C3
+
+movl 20(%r13), %eax
+. 2632 0x12345678 5
+. 41 8B 45 14 C3
+
+movl 20(%r13), %ebp
+. 2633 0x12345678 5
+. 41 8B 6D 14 C3
+
+movl 20(%r13), %ecx
+. 2634 0x12345678 5
+. 41 8B 4D 14 C3
+
+movl 20(%r13), %edx
+. 2635 0x12345678 5
+. 41 8B 55 14 C3
+
+movl 20(%r13), %r14d
+. 2636 0x12345678 5
+. 45 8B 75 14 C3
+
+movl 20(%r14), %ebp
+. 2637 0x12345678 5
+. 41 8B 6E 14 C3
+
+movl 20(%r14), %ecx
+. 2638 0x12345678 5
+. 41 8B 4E 14 C3
+
+movl 20(%r14), %edx
+. 2639 0x12345678 5
+. 41 8B 56 14 C3
+
+movl 20(%r15), %ecx
+. 2640 0x12345678 5
+. 41 8B 4F 14 C3
+
+movl 20(%r8), %esi
+. 2641 0x12345678 5
+. 41 8B 70 14 C3
+
+movl 20(%rax), %eax
+. 2642 0x12345678 4
+. 8B 40 14 C3
+
+movl 20(%rax), %edx
+. 2643 0x12345678 4
+. 8B 50 14 C3
+
+movl 20(%rax), %r10d
+. 2644 0x12345678 5
+. 44 8B 50 14 C3
+
+movl 20(%rax), %r14d
+. 2645 0x12345678 5
+. 44 8B 70 14 C3
+
+movl 20(%rbx), %eax
+. 2646 0x12345678 4
+. 8B 43 14 C3
+
+movl 20(%rbx), %ecx
+. 2647 0x12345678 4
+. 8B 4B 14 C3
+
+movl 20(%rbx), %r14d
+. 2648 0x12345678 5
+. 44 8B 73 14 C3
+
+movl $20, (%rdi)
+. 2649 0x12345678 7
+. C7 07 14 00 00 00 C3
+
+movl 20(%rdi), %ecx
+. 2650 0x12345678 4
+. 8B 4F 14 C3
+
+movl 20(%rdi), %r8d
+. 2651 0x12345678 5
+. 44 8B 47 14 C3
+
+movl 20(%rsp), %eax
+. 2652 0x12345678 5
+. 8B 44 24 14 C3
+
+movl 20(%rsp), %edi
+. 2653 0x12345678 5
+. 8B 7C 24 14 C3
+
+movl 20(%rsp), %edx
+. 2654 0x12345678 5
+. 8B 54 24 14 C3
+
+movl 20(%rsp), %esi
+. 2655 0x12345678 5
+. 8B 74 24 14 C3
+
+movl $2, 108(%rsp)
+. 2656 0x12345678 9
+. C7 44 24 6C 02 00 00 00 C3
+
+movl $210, %edx
+. 2657 0x12345678 6
+. BA D2 00 00 00 C3
+
+movl $2124, %edx
+. 2658 0x12345678 6
+. BA 4C 08 00 00 C3
+
+movl $212, %eax
+. 2659 0x12345678 6
+. B8 D4 00 00 00 C3
+
+movl 212(%rsp), %edi
+. 2660 0x12345678 8
+. 8B BC 24 D4 00 00 00 C3
+
+movl $213, %edi
+. 2661 0x12345678 6
+. BF D5 00 00 00 C3
+
+movl $-2147483648, %edi
+. 2662 0x12345678 6
+. BF 00 00 00 80 C3
+
+movl $2, 148(%rsp)
+. 2663 0x12345678 12
+. C7 84 24 94 00 00 00 02 00 00 00 C3
+
+movl $2, 16(%rdi,%rax,4)
+. 2664 0x12345678 9
+. C7 44 87 10 02 00 00 00 C3
+
+movl 216(%rsp), %edi
+. 2665 0x12345678 8
+. 8B BC 24 D8 00 00 00 C3
+
+movl $2172, %edx
+. 2666 0x12345678 6
+. BA 7C 08 00 00 C3
+
+movl $2173, %edx
+. 2667 0x12345678 6
+. BA 7D 08 00 00 C3
+
+movl $2184, %edx
+. 2668 0x12345678 6
+. BA 88 08 00 00 C3
+
+movl $2185, %edx
+. 2669 0x12345678 6
+. BA 89 08 00 00 C3
+
+movl $2186, %edx
+. 2670 0x12345678 6
+. BA 8A 08 00 00 C3
+
+movl $2187, %edx
+. 2671 0x12345678 6
+. BA 8B 08 00 00 C3
+
+movl $2188, %edx
+. 2672 0x12345678 6
+. BA 8C 08 00 00 C3
+
+movl $2195, %edx
+. 2673 0x12345678 6
+. BA 93 08 00 00 C3
+
+movl $2198, %edx
+. 2674 0x12345678 6
+. BA 96 08 00 00 C3
+
+movl $2199, %edx
+. 2675 0x12345678 6
+. BA 97 08 00 00 C3
+
+movl $21, %edi
+. 2676 0x12345678 6
+. BF 15 00 00 00 C3
+
+movl $2205, %edx
+. 2677 0x12345678 6
+. BA 9D 08 00 00 C3
+
+movl $2206, %edx
+. 2678 0x12345678 6
+. BA 9E 08 00 00 C3
+
+movl 220(%rsp), %edi
+. 2679 0x12345678 8
+. 8B BC 24 DC 00 00 00 C3
+
+movl $2211, %edx
+. 2680 0x12345678 6
+. BA A3 08 00 00 C3
+
+movl $2235, %edx
+. 2681 0x12345678 6
+. BA BB 08 00 00 C3
+
+movl $223, %edx
+. 2682 0x12345678 6
+. BA DF 00 00 00 C3
+
+movl $2240, %edx
+. 2683 0x12345678 6
+. BA C0 08 00 00 C3
+
+movl 224(%rsp), %ecx
+. 2684 0x12345678 8
+. 8B 8C 24 E0 00 00 00 C3
+
+movl 224(%rsp), %edi
+. 2685 0x12345678 8
+. 8B BC 24 E0 00 00 00 C3
+
+movl $2256, %edx
+. 2686 0x12345678 6
+. BA D0 08 00 00 C3
+
+movl $2261, %edi
+. 2687 0x12345678 6
+. BF D5 08 00 00 C3
+
+movl $2277, %edx
+. 2688 0x12345678 6
+. BA E5 08 00 00 C3
+
+movl $227, %edx
+. 2689 0x12345678 6
+. BA E3 00 00 00 C3
+
+movl $2285, %edx
+. 2690 0x12345678 6
+. BA ED 08 00 00 C3
+
+movl $228, %eax
+. 2691 0x12345678 6
+. B8 E4 00 00 00 C3
+
+movl 228(%rsp), %edi
+. 2692 0x12345678 8
+. 8B BC 24 E4 00 00 00 C3
+
+movl $2301, %edx
+. 2693 0x12345678 6
+. BA FD 08 00 00 C3
+
+movl $2302, %edx
+. 2694 0x12345678 6
+. BA FE 08 00 00 C3
+
+movl $2312, %edx
+. 2695 0x12345678 6
+. BA 08 09 00 00 C3
+
+movl $2319, %edx
+. 2696 0x12345678 6
+. BA 0F 09 00 00 C3
+
+movl $2324, %edx
+. 2697 0x12345678 6
+. BA 14 09 00 00 C3
+
+movl $232, %edx
+. 2698 0x12345678 6
+. BA E8 00 00 00 C3
+
+movl 232(%rsp), %edi
+. 2699 0x12345678 8
+. 8B BC 24 E8 00 00 00 C3
+
+movl $2336, %edx
+. 2700 0x12345678 6
+. BA 20 09 00 00 C3
+
+movl $2, 348(%rsp)
+. 2701 0x12345678 12
+. C7 84 24 5C 01 00 00 02 00 00 00 C3
+
+movl $234, %edx
+. 2702 0x12345678 6
+. BA EA 00 00 00 C3
+
+movl $2350, %edx
+. 2703 0x12345678 6
+. BA 2E 09 00 00 C3
+
+movl $2352, %edx
+. 2704 0x12345678 6
+. BA 30 09 00 00 C3
+
+movl $2355, %edx
+. 2705 0x12345678 6
+. BA 33 09 00 00 C3
+
+movl $2362, %edx
+. 2706 0x12345678 6
+. BA 3A 09 00 00 C3
+
+movl $2363, %edx
+. 2707 0x12345678 6
+. BA 3B 09 00 00 C3
+
+movl $2395, %edx
+. 2708 0x12345678 6
+. BA 5B 09 00 00 C3
+
+movl $2396, %edx
+. 2709 0x12345678 6
+. BA 5C 09 00 00 C3
+
+movl $240, %edx
+. 2710 0x12345678 6
+. BA F0 00 00 00 C3
+
+movl 240(%rsp), %edi
+. 2711 0x12345678 8
+. 8B BC 24 F0 00 00 00 C3
+
+movl $24, 156(%rsp)
+. 2712 0x12345678 12
+. C7 84 24 9C 00 00 00 18 00 00 00 C3
+
+movl $244, %eax
+. 2713 0x12345678 6
+. B8 F4 00 00 00 C3
+
+movl 244(%rsp), %edi
+. 2714 0x12345678 8
+. 8B BC 24 F4 00 00 00 C3
+
+movl $247, %edx
+. 2715 0x12345678 6
+. BA F7 00 00 00 C3
+
+movl $248, %edx
+. 2716 0x12345678 6
+. BA F8 00 00 00 C3
+
+movl 248(%rsp), %edi
+. 2717 0x12345678 8
+. 8B BC 24 F8 00 00 00 C3
+
+movl $249, %edx
+. 2718 0x12345678 6
+. BA F9 00 00 00 C3
+
+movl $24, %eax
+. 2719 0x12345678 6
+. B8 18 00 00 00 C3
+
+movl $24, %edi
+. 2720 0x12345678 6
+. BF 18 00 00 00 C3
+
+movl 24(%r12), %eax
+. 2721 0x12345678 6
+. 41 8B 44 24 18 C3
+
+movl 24(%r12), %edx
+. 2722 0x12345678 6
+. 41 8B 54 24 18 C3
+
+movl 24(%r12), %r9d
+. 2723 0x12345678 6
+. 45 8B 4C 24 18 C3
+
+movl 24(%r13), %eax
+. 2724 0x12345678 5
+. 41 8B 45 18 C3
+
+movl 24(%r13), %edx
+. 2725 0x12345678 5
+. 41 8B 55 18 C3
+
+movl 24(%r13), %r12d
+. 2726 0x12345678 5
+. 45 8B 65 18 C3
+
+movl 24(%rax), %eax
+. 2727 0x12345678 4
+. 8B 40 18 C3
+
+movl 24(%rbp), %eax
+. 2728 0x12345678 4
+. 8B 45 18 C3
+
+movl 24(%rbp), %ebx
+. 2729 0x12345678 4
+. 8B 5D 18 C3
+
+movl 24(%rbp), %edx
+. 2730 0x12345678 4
+. 8B 55 18 C3
+
+movl 24(%rbp), %esi
+. 2731 0x12345678 4
+. 8B 75 18 C3
+
+movl 24(%rbp), %r13d
+. 2732 0x12345678 5
+. 44 8B 6D 18 C3
+
+movl 24(%rbx), %ecx
+. 2733 0x12345678 4
+. 8B 4B 18 C3
+
+movl 24(%rbx), %edx
+. 2734 0x12345678 4
+. 8B 53 18 C3
+
+movl 24(%rbx), %esi
+. 2735 0x12345678 4
+. 8B 73 18 C3
+
+movl $2, 4(%rdi)
+. 2736 0x12345678 8
+. C7 47 04 02 00 00 00 C3
+
+movl 24(%rdi), %edi
+. 2737 0x12345678 4
+. 8B 7F 18 C3
+
+movl 24(%rdi), %r12d
+. 2738 0x12345678 5
+. 44 8B 67 18 C3
+
+movl 24(%rdx), %eax
+. 2739 0x12345678 4
+. 8B 42 18 C3
+
+movl 24(%rdx), %r9d
+. 2740 0x12345678 5
+. 44 8B 4A 18 C3
+
+movl 24(%rsi), %ebx
+. 2741 0x12345678 4
+. 8B 5E 18 C3
+
+movl 24(%rsp), %eax
+. 2742 0x12345678 5
+. 8B 44 24 18 C3
+
+movl 24(%rsp), %ecx
+. 2743 0x12345678 5
+. 8B 4C 24 18 C3
+
+movl 24(%rsp), %edi
+. 2744 0x12345678 5
+. 8B 7C 24 18 C3
+
+movl 24(%rsp), %edx
+. 2745 0x12345678 5
+. 8B 54 24 18 C3
+
+movl 24(%rsp), %esi
+. 2746 0x12345678 5
+. 8B 74 24 18 C3
+
+movl $250, %edx
+. 2747 0x12345678 6
+. BA FA 00 00 00 C3
+
+movl 252(%rsp), %edi
+. 2748 0x12345678 8
+. 8B BC 24 FC 00 00 00 C3
+
+movl $254, %edx
+. 2749 0x12345678 6
+. BA FE 00 00 00 C3
+
+movl $255, %eax
+. 2750 0x12345678 6
+. B8 FF 00 00 00 C3
+
+movl $255, %edi
+. 2751 0x12345678 6
+. BF FF 00 00 00 C3
+
+movl $255, %r10d
+. 2752 0x12345678 7
+. 41 BA FF 00 00 00 C3
+
+movl $255, %r12d
+. 2753 0x12345678 7
+. 41 BC FF 00 00 00 C3
+
+movl $255, %r9d
+. 2754 0x12345678 7
+. 41 B9 FF 00 00 00 C3
+
+movl $256, %edx
+. 2755 0x12345678 6
+. BA 00 01 00 00 C3
+
+movl $25, %eax
+. 2756 0x12345678 6
+. B8 19 00 00 00 C3
+
+movl $260, %eax
+. 2757 0x12345678 6
+. B8 04 01 00 00 C3
+
+movl $2611, %edx
+. 2758 0x12345678 6
+. BA 33 0A 00 00 C3
+
+movl $2617, %edx
+. 2759 0x12345678 6
+. BA 39 0A 00 00 C3
+
+movl $2621, %edx
+. 2760 0x12345678 6
+. BA 3D 0A 00 00 C3
+
+movl $2645, %edx
+. 2761 0x12345678 6
+. BA 55 0A 00 00 C3
+
+movl $2649, %edx
+. 2762 0x12345678 6
+. BA 59 0A 00 00 C3
+
+movl $265, %edx
+. 2763 0x12345678 6
+. BA 09 01 00 00 C3
+
+movl $2689, %edx
+. 2764 0x12345678 6
+. BA 81 0A 00 00 C3
+
+movl $2715, %edx
+. 2765 0x12345678 6
+. BA 9B 0A 00 00 C3
+
+movl 272(%rsp), %edi
+. 2766 0x12345678 8
+. 8B BC 24 10 01 00 00 C3
+
+movl $2733, %edx
+. 2767 0x12345678 6
+. BA AD 0A 00 00 C3
+
+movl $273, %edx
+. 2768 0x12345678 6
+. BA 11 01 00 00 C3
+
+movl $2740, %edx
+. 2769 0x12345678 6
+. BA B4 0A 00 00 C3
+
+movl $2765, %edx
+. 2770 0x12345678 6
+. BA CD 0A 00 00 C3
+
+movl $2768, %edx
+. 2771 0x12345678 6
+. BA D0 0A 00 00 C3
+
+movl $276, %eax
+. 2772 0x12345678 6
+. B8 14 01 00 00 C3
+
+movl $276, %edx
+. 2773 0x12345678 6
+. BA 14 01 00 00 C3
+
+movl 276(%rsp), %edi
+. 2774 0x12345678 8
+. 8B BC 24 14 01 00 00 C3
+
+movl 276(%rsp), %edx
+. 2775 0x12345678 8
+. 8B 94 24 14 01 00 00 C3
+
+movl $2771, %edx
+. 2776 0x12345678 6
+. BA D3 0A 00 00 C3
+
+movl $2774, %edx
+. 2777 0x12345678 6
+. BA D6 0A 00 00 C3
+
+movl $2776, %edx
+. 2778 0x12345678 6
+. BA D8 0A 00 00 C3
+
+movl $2777, %edx
+. 2779 0x12345678 6
+. BA D9 0A 00 00 C3
+
+movl $278, %eax
+. 2780 0x12345678 6
+. B8 16 01 00 00 C3
+
+movl $2799, %edx
+. 2781 0x12345678 6
+. BA EF 0A 00 00 C3
+
+movl $27, %eax
+. 2782 0x12345678 6
+. B8 1B 00 00 00 C3
+
+movl $280, %eax
+. 2783 0x12345678 6
+. B8 18 01 00 00 C3
+
+movl $281, %edx
+. 2784 0x12345678 6
+. BA 19 01 00 00 C3
+
+movl $282, %eax
+. 2785 0x12345678 6
+. B8 1A 01 00 00 C3
+
+movl $283, %edx
+. 2786 0x12345678 6
+. BA 1B 01 00 00 C3
+
+movl $28, 40(%rbx)
+. 2787 0x12345678 8
+. C7 43 28 1C 00 00 00 C3
+
+movl $284, %eax
+. 2788 0x12345678 6
+. B8 1C 01 00 00 C3
+
+movl $284, %edx
+. 2789 0x12345678 6
+. BA 1C 01 00 00 C3
+
+movl 284(%rsp), %ebx
+. 2790 0x12345678 8
+. 8B 9C 24 1C 01 00 00 C3
+
+movl $286, %eax
+. 2791 0x12345678 6
+. B8 1E 01 00 00 C3
+
+movl $286, %edx
+. 2792 0x12345678 6
+. BA 1E 01 00 00 C3
+
+movl $2875, %edx
+. 2793 0x12345678 6
+. BA 3B 0B 00 00 C3
+
+movl $287, %edx
+. 2794 0x12345678 6
+. BA 1F 01 00 00 C3
+
+movl $2884, %edx
+. 2795 0x12345678 6
+. BA 44 0B 00 00 C3
+
+movl $288, %edi
+. 2796 0x12345678 6
+. BF 20 01 00 00 C3
+
+movl $288, %edx
+. 2797 0x12345678 6
+. BA 20 01 00 00 C3
+
+movl $2, 88(%rsp)
+. 2798 0x12345678 9
+. C7 44 24 58 02 00 00 00 C3
+
+movl 288(%rsp), %eax
+. 2799 0x12345678 8
+. 8B 84 24 20 01 00 00 C3
+
+movl 288(%rsp), %ecx
+. 2800 0x12345678 8
+. 8B 8C 24 20 01 00 00 C3
+
+movl $28, %eax
+. 2801 0x12345678 6
+. B8 1C 00 00 00 C3
+
+movl $28, %edi
+. 2802 0x12345678 6
+. BF 1C 00 00 00 C3
+
+movl $28, %edx
+. 2803 0x12345678 6
+. BA 1C 00 00 00 C3
+
+movl 28(%r12), %edi
+. 2804 0x12345678 6
+. 41 8B 7C 24 1C C3
+
+movl 28(%r12), %edx
+. 2805 0x12345678 6
+. 41 8B 54 24 1C C3
+
+movl 28(%r13), %ecx
+. 2806 0x12345678 5
+. 41 8B 4D 1C C3
+
+movl 28(%rbp), %eax
+. 2807 0x12345678 4
+. 8B 45 1C C3
+
+movl 28(%rbx), %edx
+. 2808 0x12345678 4
+. 8B 53 1C C3
+
+movl 28(%rdi), %edx
+. 2809 0x12345678 4
+. 8B 57 1C C3
+
+movl 28(%rdi), %r10d
+. 2810 0x12345678 5
+. 44 8B 57 1C C3
+
+movl 28(%rdi), %r11d
+. 2811 0x12345678 5
+. 44 8B 5F 1C C3
+
+movl $2, 8(%rdx,%rbx)
+. 2812 0x12345678 9
+. C7 44 1A 08 02 00 00 00 C3
+
+movl 28(%rsp), %eax
+. 2813 0x12345678 5
+. 8B 44 24 1C C3
+
+movl 28(%rsp), %edi
+. 2814 0x12345678 5
+. 8B 7C 24 1C C3
+
+movl 28(%rsp), %esi
+. 2815 0x12345678 5
+. 8B 74 24 1C C3
+
+movl $2915, %edx
+. 2816 0x12345678 6
+. BA 63 0B 00 00 C3
+
+movl $2924, %edx
+. 2817 0x12345678 6
+. BA 6C 0B 00 00 C3
+
+movl $2929, %edx
+. 2818 0x12345678 6
+. BA 71 0B 00 00 C3
+
+movl 292(%rsp), %ebp
+. 2819 0x12345678 8
+. 8B AC 24 24 01 00 00 C3
+
+movl 292(%rsp), %edi
+. 2820 0x12345678 8
+. 8B BC 24 24 01 00 00 C3
+
+movl 292(%rsp), %esi
+. 2821 0x12345678 8
+. 8B B4 24 24 01 00 00 C3
+
+movl $294, %edx
+. 2822 0x12345678 6
+. BA 26 01 00 00 C3
+
+movl $296, %edi
+. 2823 0x12345678 6
+. BF 28 01 00 00 C3
+
+movl 296(%rsp), %eax
+. 2824 0x12345678 8
+. 8B 84 24 28 01 00 00 C3
+
+movl $2971, %edx
+. 2825 0x12345678 6
+. BA 9B 0B 00 00 C3
+
+movl $29, %edi
+. 2826 0x12345678 6
+. BF 1D 00 00 00 C3
+
+movl $2, %eax
+. 2827 0x12345678 6
+. B8 02 00 00 00 C3
+
+movl $2, %ecx
+. 2828 0x12345678 6
+. B9 02 00 00 00 C3
+
+movl $2, %edi
+. 2829 0x12345678 6
+. BF 02 00 00 00 C3
+
+movl $2, %edx
+. 2830 0x12345678 6
+. BA 02 00 00 00 C3
+
+movl $2, %esi
+. 2831 0x12345678 6
+. BE 02 00 00 00 C3
+
+movl $2, %r14d
+. 2832 0x12345678 7
+. 41 BE 02 00 00 00 C3
+
+movl $-2, (%r14,%rax,4)
+. 2833 0x12345678 9
+. 41 C7 04 86 FE FF FF FF C3
+
+movl $2, (%rax)
+. 2834 0x12345678 7
+. C7 00 02 00 00 00 C3
+
+movl $2, (%rcx)
+. 2835 0x12345678 7
+. C7 01 02 00 00 00 C3
+
+movl $-2, (%rcx,%rax,4)
+. 2836 0x12345678 8
+. C7 04 81 FE FF FF FF C3
+
+movl $2, (%rdx)
+. 2837 0x12345678 7
+. C7 02 02 00 00 00 C3
+
+movl $300, %edx
+. 2838 0x12345678 6
+. BA 2C 01 00 00 C3
+
+movl $304, %edi
+. 2839 0x12345678 6
+. BF 30 01 00 00 C3
+
+movl $304, %edx
+. 2840 0x12345678 6
+. BA 30 01 00 00 C3
+
+movl $305, %edx
+. 2841 0x12345678 6
+. BA 31 01 00 00 C3
+
+movl $309, %edx
+. 2842 0x12345678 6
+. BA 35 01 00 00 C3
+
+movl $30, %edi
+. 2843 0x12345678 6
+. BF 1E 00 00 00 C3
+
+movl $312, 68(%rsp)
+. 2844 0x12345678 9
+. C7 44 24 44 38 01 00 00 C3
+
+movl 312(%rsp), %ecx
+. 2845 0x12345678 8
+. 8B 8C 24 38 01 00 00 C3
+
+movl $314, %edx
+. 2846 0x12345678 6
+. BA 3A 01 00 00 C3
+
+movl $3181, %edx
+. 2847 0x12345678 6
+. BA 6D 0C 00 00 C3
+
+movl $3183, %edx
+. 2848 0x12345678 6
+. BA 6F 0C 00 00 C3
+
+movl $31, %eax
+. 2849 0x12345678 6
+. B8 1F 00 00 00 C3
+
+movl $31, %edi
+. 2850 0x12345678 6
+. BF 1F 00 00 00 C3
+
+movl $3203, %edx
+. 2851 0x12345678 6
+. BA 83 0C 00 00 C3
+
+movl $323, %edx
+. 2852 0x12345678 6
+. BA 43 01 00 00 C3
+
+movl $3, 24(%rax)
+. 2853 0x12345678 8
+. C7 40 18 03 00 00 00 C3
+
+movl $325, %edx
+. 2854 0x12345678 6
+. BA 45 01 00 00 C3
+
+movl $326, %edx
+. 2855 0x12345678 6
+. BA 46 01 00 00 C3
+
+movl $32767, %ecx
+. 2856 0x12345678 6
+. B9 FF 7F 00 00 C3
+
+movl $32767, %r9d
+. 2857 0x12345678 7
+. 41 B9 FF 7F 00 00 C3
+
+movl $-32768, %r10d
+. 2858 0x12345678 7
+. 41 BA 00 80 FF FF C3
+
+movl $-32768, %r8d
+. 2859 0x12345678 7
+. 41 B8 00 80 FF FF C3
+
+movl $327, %edx
+. 2860 0x12345678 6
+. BA 47 01 00 00 C3
+
+movl $32, 8(%rsp)
+. 2861 0x12345678 9
+. C7 44 24 08 20 00 00 00 C3
+
+movl $3298, %edx
+. 2862 0x12345678 6
+. BA E2 0C 00 00 C3
+
+movl $329, %edx
+. 2863 0x12345678 6
+. BA 49 01 00 00 C3
+
+movl $32, %eax
+. 2864 0x12345678 6
+. B8 20 00 00 00 C3
+
+movl $32, %edi
+. 2865 0x12345678 6
+. BF 20 00 00 00 C3
+
+movl $32, %edx
+. 2866 0x12345678 6
+. BA 20 00 00 00 C3
+
+movl $32, %esi
+. 2867 0x12345678 6
+. BE 20 00 00 00 C3
+
+movl 32(%r13), %eax
+. 2868 0x12345678 5
+. 41 8B 45 20 C3
+
+movl $32, %r14d
+. 2869 0x12345678 7
+. 41 BE 20 00 00 00 C3
+
+movl 32(%r14), %eax
+. 2870 0x12345678 5
+. 41 8B 46 20 C3
+
+movl 32(%r15), %eax
+. 2871 0x12345678 5
+. 41 8B 47 20 C3
+
+movl 32(%r15), %ebx
+. 2872 0x12345678 5
+. 41 8B 5F 20 C3
+
+movl 32(%rax), %edi
+. 2873 0x12345678 4
+. 8B 78 20 C3
+
+movl 32(%rbp), %ebx
+. 2874 0x12345678 4
+. 8B 5D 20 C3
+
+movl 32(%rbp), %edi
+. 2875 0x12345678 4
+. 8B 7D 20 C3
+
+movl 32(%rbp), %edx
+. 2876 0x12345678 4
+. 8B 55 20 C3
+
+movl 32(%rdi), %eax
+. 2877 0x12345678 4
+. 8B 47 20 C3
+
+movl 32(%rdi), %ecx
+. 2878 0x12345678 4
+. 8B 4F 20 C3
+
+movl 32(%rdi), %r8d
+. 2879 0x12345678 5
+. 44 8B 47 20 C3
+
+movl 32(%rdx), %eax
+. 2880 0x12345678 4
+. 8B 42 20 C3
+
+movl 32(%rsp), %eax
+. 2881 0x12345678 5
+. 8B 44 24 20 C3
+
+movl 32(%rsp), %ecx
+. 2882 0x12345678 5
+. 8B 4C 24 20 C3
+
+movl 32(%rsp), %edi
+. 2883 0x12345678 5
+. 8B 7C 24 20 C3
+
+movl 32(%rsp), %edx
+. 2884 0x12345678 5
+. 8B 54 24 20 C3
+
+movl 32(%rsp), %esi
+. 2885 0x12345678 5
+. 8B 74 24 20 C3
+
+movl 32(%rsp), %r8d
+. 2886 0x12345678 6
+. 44 8B 44 24 20 C3
+
+movl 32(%rsp), %r9d
+. 2887 0x12345678 6
+. 44 8B 4C 24 20 C3
+
+movl $3310, %edx
+. 2888 0x12345678 6
+. BA EE 0C 00 00 C3
+
+movl $3315, %edx
+. 2889 0x12345678 6
+. BA F3 0C 00 00 C3
+
+movl $3339, %edx
+. 2890 0x12345678 6
+. BA 0B 0D 00 00 C3
+
+movl $333, %edx
+. 2891 0x12345678 6
+. BA 4D 01 00 00 C3
+
+movl $3345, %edx
+. 2892 0x12345678 6
+. BA 11 0D 00 00 C3
+
+movl $3354, %edx
+. 2893 0x12345678 6
+. BA 1A 0D 00 00 C3
+
+movl $3357, %edx
+. 2894 0x12345678 6
+. BA 1D 0D 00 00 C3
+
+movl $3365, %edx
+. 2895 0x12345678 6
+. BA 25 0D 00 00 C3
+
+movl 336(%rsp), %eax
+. 2896 0x12345678 8
+. 8B 84 24 50 01 00 00 C3
+
+movl 340(%rsp), %eax
+. 2897 0x12345678 8
+. 8B 84 24 54 01 00 00 C3
+
+movl $342, %edx
+. 2898 0x12345678 6
+. BA 56 01 00 00 C3
+
+movl $345, %edx
+. 2899 0x12345678 6
+. BA 59 01 00 00 C3
+
+movl $3461, %edx
+. 2900 0x12345678 6
+. BA 85 0D 00 00 C3
+
+movl $3464, %edx
+. 2901 0x12345678 6
+. BA 88 0D 00 00 C3
+
+movl $3465, %edx
+. 2902 0x12345678 6
+. BA 89 0D 00 00 C3
+
+movl $3469, %edx
+. 2903 0x12345678 6
+. BA 8D 0D 00 00 C3
+
+movl $3471, %edx
+. 2904 0x12345678 6
+. BA 8F 0D 00 00 C3
+
+movl $3473, %edx
+. 2905 0x12345678 6
+. BA 91 0D 00 00 C3
+
+movl $3478, %edx
+. 2906 0x12345678 6
+. BA 96 0D 00 00 C3
+
+movl $347, %edx
+. 2907 0x12345678 6
+. BA 5B 01 00 00 C3
+
+movl 348(%rsp), %r12d
+. 2908 0x12345678 9
+. 44 8B A4 24 5C 01 00 00 C3
+
+movl $349, %edx
+. 2909 0x12345678 6
+. BA 5D 01 00 00 C3
+
+movl $350, %edx
+. 2910 0x12345678 6
+. BA 5E 01 00 00 C3
+
+movl $351, %edx
+. 2911 0x12345678 6
+. BA 5F 01 00 00 C3
+
+movl $352, %edx
+. 2912 0x12345678 6
+. BA 60 01 00 00 C3
+
+movl 360(%rsp), %eax
+. 2913 0x12345678 8
+. 8B 84 24 68 01 00 00 C3
+
+movl $3644, %edx
+. 2914 0x12345678 6
+. BA 3C 0E 00 00 C3
+
+movl $3, 64(%rsp)
+. 2915 0x12345678 9
+. C7 44 24 40 03 00 00 00 C3
+
+movl 364(%rsp), %eax
+. 2916 0x12345678 8
+. 8B 84 24 6C 01 00 00 C3
+
+movl $36, %eax
+. 2917 0x12345678 6
+. B8 24 00 00 00 C3
+
+movl $36, %edi
+. 2918 0x12345678 6
+. BF 24 00 00 00 C3
+
+movl 36(%rsp), %eax
+. 2919 0x12345678 5
+. 8B 44 24 24 C3
+
+movl 36(%rsp), %edi
+. 2920 0x12345678 5
+. 8B 7C 24 24 C3
+
+movl 36(%rsp), %edx
+. 2921 0x12345678 5
+. 8B 54 24 24 C3
+
+movl 36(%rsp), %esi
+. 2922 0x12345678 5
+. 8B 74 24 24 C3
+
+movl $375, %edx
+. 2923 0x12345678 6
+. BA 77 01 00 00 C3
+
+movl 376(%rsp), %ecx
+. 2924 0x12345678 8
+. 8B 8C 24 78 01 00 00 C3
+
+movl 376(%rsp), %edi
+. 2925 0x12345678 8
+. 8B BC 24 78 01 00 00 C3
+
+movl 376(%rsp), %edx
+. 2926 0x12345678 8
+. 8B 94 24 78 01 00 00 C3
+
+movl 376(%rsp), %esi
+. 2927 0x12345678 8
+. 8B B4 24 78 01 00 00 C3
+
+movl $37, %ecx
+. 2928 0x12345678 6
+. B9 25 00 00 00 C3
+
+movl $37, %edi
+. 2929 0x12345678 6
+. BF 25 00 00 00 C3
+
+movl 384(%rsp), %ecx
+. 2930 0x12345678 8
+. 8B 8C 24 80 01 00 00 C3
+
+movl 384(%rsp), %edi
+. 2931 0x12345678 8
+. 8B BC 24 80 01 00 00 C3
+
+movl 384(%rsp), %edx
+. 2932 0x12345678 8
+. 8B 94 24 80 01 00 00 C3
+
+movl 384(%rsp), %esi
+. 2933 0x12345678 8
+. 8B B4 24 80 01 00 00 C3
+
+movl $388, %edx
+. 2934 0x12345678 6
+. BA 84 01 00 00 C3
+
+movl $3, 88(%rsp)
+. 2935 0x12345678 9
+. C7 44 24 58 03 00 00 00 C3
+
+movl 388(%rsp), %ecx
+. 2936 0x12345678 8
+. 8B 8C 24 84 01 00 00 C3
+
+movl 388(%rsp), %edi
+. 2937 0x12345678 8
+. 8B BC 24 84 01 00 00 C3
+
+movl 388(%rsp), %edx
+. 2938 0x12345678 8
+. 8B 94 24 84 01 00 00 C3
+
+movl 388(%rsp), %esi
+. 2939 0x12345678 8
+. 8B B4 24 84 01 00 00 C3
+
+movl $390, %edx
+. 2940 0x12345678 6
+. BA 86 01 00 00 C3
+
+movl 392(%rsp), %ecx
+. 2941 0x12345678 8
+. 8B 8C 24 88 01 00 00 C3
+
+movl 392(%rsp), %edi
+. 2942 0x12345678 8
+. 8B BC 24 88 01 00 00 C3
+
+movl 392(%rsp), %edx
+. 2943 0x12345678 8
+. 8B 94 24 88 01 00 00 C3
+
+movl 392(%rsp), %esi
+. 2944 0x12345678 8
+. 8B B4 24 88 01 00 00 C3
+
+movl $3, %eax
+. 2945 0x12345678 6
+. B8 03 00 00 00 C3
+
+movl $3, %ebp
+. 2946 0x12345678 6
+. BD 03 00 00 00 C3
+
+movl $3, %ecx
+. 2947 0x12345678 6
+. B9 03 00 00 00 C3
+
+movl $3, %edi
+. 2948 0x12345678 6
+. BF 03 00 00 00 C3
+
+movl $3, %edx
+. 2949 0x12345678 6
+. BA 03 00 00 00 C3
+
+movl $3, %esi
+. 2950 0x12345678 6
+. BE 03 00 00 00 C3
+
+movl $3, (%rax)
+. 2951 0x12345678 7
+. C7 00 03 00 00 00 C3
+
+movl $3, (%rcx)
+. 2952 0x12345678 7
+. C7 01 03 00 00 00 C3
+
+movl $3, (%rdx)
+. 2953 0x12345678 7
+. C7 02 03 00 00 00 C3
+
+movl 400(%rsp), %ecx
+. 2954 0x12345678 8
+. 8B 8C 24 90 01 00 00 C3
+
+movl 400(%rsp), %edi
+. 2955 0x12345678 8
+. 8B BC 24 90 01 00 00 C3
+
+movl 400(%rsp), %edx
+. 2956 0x12345678 8
+. 8B 94 24 90 01 00 00 C3
+
+movl 400(%rsp), %esi
+. 2957 0x12345678 8
+. 8B B4 24 90 01 00 00 C3
+
+movl $402, %edx
+. 2958 0x12345678 6
+. BA 92 01 00 00 C3
+
+movl $403, %edx
+. 2959 0x12345678 6
+. BA 93 01 00 00 C3
+
+movl $404, %edx
+. 2960 0x12345678 6
+. BA 94 01 00 00 C3
+
+movl 404(%rsp), %ecx
+. 2961 0x12345678 8
+. 8B 8C 24 94 01 00 00 C3
+
+movl 404(%rsp), %edi
+. 2962 0x12345678 8
+. 8B BC 24 94 01 00 00 C3
+
+movl 404(%rsp), %edx
+. 2963 0x12345678 8
+. 8B 94 24 94 01 00 00 C3
+
+movl 404(%rsp), %esi
+. 2964 0x12345678 8
+. 8B B4 24 94 01 00 00 C3
+
+movl $405, %edx
+. 2965 0x12345678 6
+. BA 95 01 00 00 C3
+
+movl $406, %edx
+. 2966 0x12345678 6
+. BA 96 01 00 00 C3
+
+movl $408, %edx
+. 2967 0x12345678 6
+. BA 98 01 00 00 C3
+
+movl 408(%rsp), %ecx
+. 2968 0x12345678 8
+. 8B 8C 24 98 01 00 00 C3
+
+movl 408(%rsp), %edi
+. 2969 0x12345678 8
+. 8B BC 24 98 01 00 00 C3
+
+movl 408(%rsp), %edx
+. 2970 0x12345678 8
+. 8B 94 24 98 01 00 00 C3
+
+movl 408(%rsp), %esi
+. 2971 0x12345678 8
+. 8B B4 24 98 01 00 00 C3
+
+movl $40, %eax
+. 2972 0x12345678 6
+. B8 28 00 00 00 C3
+
+movl $40, %edi
+. 2973 0x12345678 6
+. BF 28 00 00 00 C3
+
+movl 40(%r12), %esi
+. 2974 0x12345678 6
+. 41 8B 74 24 28 C3
+
+movl 40(%r13), %edi
+. 2975 0x12345678 5
+. 41 8B 7D 28 C3
+
+movl 40(%r13), %esi
+. 2976 0x12345678 5
+. 41 8B 75 28 C3
+
+movl 40(%rbp), %eax
+. 2977 0x12345678 4
+. 8B 45 28 C3
+
+movl 40(%rsp), %eax
+. 2978 0x12345678 5
+. 8B 44 24 28 C3
+
+movl 40(%rsp), %edi
+. 2979 0x12345678 5
+. 8B 7C 24 28 C3
+
+movl 40(%rsp), %edx
+. 2980 0x12345678 5
+. 8B 54 24 28 C3
+
+movl 40(%rsp), %esi
+. 2981 0x12345678 5
+. 8B 74 24 28 C3
+
+movl $4, 104(%rsp)
+. 2982 0x12345678 9
+. C7 44 24 68 04 00 00 00 C3
+
+movl $4, 108(%rax)
+. 2983 0x12345678 8
+. C7 40 6C 04 00 00 00 C3
+
+movl $4, 108(%rbx)
+. 2984 0x12345678 8
+. C7 43 6C 04 00 00 00 C3
+
+movl $4, 132(%rbx)
+. 2985 0x12345678 11
+. C7 83 84 00 00 00 04 00 00 00 C3
+
+movl $4, 156(%rsp)
+. 2986 0x12345678 12
+. C7 84 24 9C 00 00 00 04 00 00 00 C3
+
+movl 416(%rsp), %ecx
+. 2987 0x12345678 8
+. 8B 8C 24 A0 01 00 00 C3
+
+movl 416(%rsp), %edi
+. 2988 0x12345678 8
+. 8B BC 24 A0 01 00 00 C3
+
+movl 416(%rsp), %edx
+. 2989 0x12345678 8
+. 8B 94 24 A0 01 00 00 C3
+
+movl 416(%rsp), %esi
+. 2990 0x12345678 8
+. 8B B4 24 A0 01 00 00 C3
+
+movl $4, 188(%rsp)
+. 2991 0x12345678 12
+. C7 84 24 BC 00 00 00 04 00 00 00 C3
+
+movl $4, 20(%rsp)
+. 2992 0x12345678 9
+. C7 44 24 14 04 00 00 00 C3
+
+movl 420(%rsp), %ecx
+. 2993 0x12345678 8
+. 8B 8C 24 A4 01 00 00 C3
+
+movl 420(%rsp), %edi
+. 2994 0x12345678 8
+. 8B BC 24 A4 01 00 00 C3
+
+movl 420(%rsp), %edx
+. 2995 0x12345678 8
+. 8B 94 24 A4 01 00 00 C3
+
+movl 420(%rsp), %esi
+. 2996 0x12345678 8
+. 8B B4 24 A4 01 00 00 C3
+
+movl $424, %edx
+. 2997 0x12345678 6
+. BA A8 01 00 00 C3
+
+movl $428, %edx
+. 2998 0x12345678 6
+. BA AC 01 00 00 C3
+
+movl 428(%rsp), %edi
+. 2999 0x12345678 8
+. 8B BC 24 AC 01 00 00 C3
+
+movl $4294967295, %eax
+. 3000 0x12345678 6
+. B8 FF FF FF FF C3
+
+movl $433, %edx
+. 3001 0x12345678 6
+. BA B1 01 00 00 C3
+
+movl 436(%rsp), %edi
+. 3002 0x12345678 8
+. 8B BC 24 B4 01 00 00 C3
+
+movl $440, %edx
+. 3003 0x12345678 6
+. BA B8 01 00 00 C3
+
+movl $442, %edx
+. 3004 0x12345678 6
+. BA BA 01 00 00 C3
+
+movl 444(%rsp), %edi
+. 3005 0x12345678 8
+. 8B BC 24 BC 01 00 00 C3
+
+movl $4, 48(%rax)
+. 3006 0x12345678 8
+. C7 40 30 04 00 00 00 C3
+
+movl $4, 48(%rbx)
+. 3007 0x12345678 8
+. C7 43 30 04 00 00 00 C3
+
+movl $44, %eax
+. 3008 0x12345678 6
+. B8 2C 00 00 00 C3
+
+movl $44, %edi
+. 3009 0x12345678 6
+. BF 2C 00 00 00 C3
+
+movl 44(%rsp), %eax
+. 3010 0x12345678 5
+. 8B 44 24 2C C3
+
+movl 44(%rsp), %ebx
+. 3011 0x12345678 5
+. 8B 5C 24 2C C3
+
+movl 44(%rsp), %edi
+. 3012 0x12345678 5
+. 8B 7C 24 2C C3
+
+movl 44(%rsp), %edx
+. 3013 0x12345678 5
+. 8B 54 24 2C C3
+
+movl 452(%rsp), %edi
+. 3014 0x12345678 8
+. 8B BC 24 C4 01 00 00 C3
+
+movl $4, 60(%rax)
+. 3015 0x12345678 8
+. C7 40 3C 04 00 00 00 C3
+
+movl $4, 60(%rbx)
+. 3016 0x12345678 8
+. C7 43 3C 04 00 00 00 C3
+
+movl 460(%rsp), %edi
+. 3017 0x12345678 8
+. 8B BC 24 CC 01 00 00 C3
+
+movl $463, %edx
+. 3018 0x12345678 6
+. BA CF 01 00 00 C3
+
+movl 464(%rsp), %edx
+. 3019 0x12345678 8
+. 8B 94 24 D0 01 00 00 C3
+
+movl $465, %edx
+. 3020 0x12345678 6
+. BA D1 01 00 00 C3
+
+movl $466, %edx
+. 3021 0x12345678 6
+. BA D2 01 00 00 C3
+
+movl $467, %edx
+. 3022 0x12345678 6
+. BA D3 01 00 00 C3
+
+movl 468(%rsp), %edi
+. 3023 0x12345678 8
+. 8B BC 24 D4 01 00 00 C3
+
+movl $469, %edx
+. 3024 0x12345678 6
+. BA D5 01 00 00 C3
+
+movl $4, 72(%rax)
+. 3025 0x12345678 8
+. C7 40 48 04 00 00 00 C3
+
+movl 472(%rsp), %edx
+. 3026 0x12345678 8
+. 8B 94 24 D8 01 00 00 C3
+
+movl $473, %edx
+. 3027 0x12345678 6
+. BA D9 01 00 00 C3
+
+movl $476, %edx
+. 3028 0x12345678 6
+. BA DC 01 00 00 C3
+
+movl 476(%rsp), %esi
+. 3029 0x12345678 8
+. 8B B4 24 DC 01 00 00 C3
+
+movl $477, %edx
+. 3030 0x12345678 6
+. BA DD 01 00 00 C3
+
+movl $478, %edx
+. 3031 0x12345678 6
+. BA DE 01 00 00 C3
+
+movl $479, %edx
+. 3032 0x12345678 6
+. BA DF 01 00 00 C3
+
+movl $4, 80(%rax)
+. 3033 0x12345678 8
+. C7 40 50 04 00 00 00 C3
+
+movl 480(%rsp), %ecx
+. 3034 0x12345678 8
+. 8B 8C 24 E0 01 00 00 C3
+
+movl $481, %edx
+. 3035 0x12345678 6
+. BA E1 01 00 00 C3
+
+movl $483, %edx
+. 3036 0x12345678 6
+. BA E3 01 00 00 C3
+
+movl $4, 84(%rax)
+. 3037 0x12345678 8
+. C7 40 54 04 00 00 00 C3
+
+movl $4, 84(%rbx)
+. 3038 0x12345678 8
+. C7 43 54 04 00 00 00 C3
+
+movl $48, 4(%rsp)
+. 3039 0x12345678 9
+. C7 44 24 04 30 00 00 00 C3
+
+movl 484(%rsp), %esi
+. 3040 0x12345678 8
+. 8B B4 24 E4 01 00 00 C3
+
+movl $487, %edx
+. 3041 0x12345678 6
+. BA E7 01 00 00 C3
+
+movl 488(%rsp), %ecx
+. 3042 0x12345678 8
+. 8B 8C 24 E8 01 00 00 C3
+
+movl $48, %eax
+. 3043 0x12345678 6
+. B8 30 00 00 00 C3
+
+movl $48, %edi
+. 3044 0x12345678 6
+. BF 30 00 00 00 C3
+
+movl $48, %edx
+. 3045 0x12345678 6
+. BA 30 00 00 00 C3
+
+movl 48(%r13), %edx
+. 3046 0x12345678 5
+. 41 8B 55 30 C3
+
+movl 48(%r8), %edi
+. 3047 0x12345678 5
+. 41 8B 78 30 C3
+
+movl $4, 8(%rax)
+. 3048 0x12345678 8
+. C7 40 08 04 00 00 00 C3
+
+movl 48(%rax), %edx
+. 3049 0x12345678 4
+. 8B 50 30 C3
+
+movl 48(%rax), %r8d
+. 3050 0x12345678 5
+. 44 8B 40 30 C3
+
+movl 48(%rax), %r9d
+. 3051 0x12345678 5
+. 44 8B 48 30 C3
+
+movl 48(%rbp), %edi
+. 3052 0x12345678 4
+. 8B 7D 30 C3
+
+movl 48(%rsp), %eax
+. 3053 0x12345678 5
+. 8B 44 24 30 C3
+
+movl 48(%rsp), %edi
+. 3054 0x12345678 5
+. 8B 7C 24 30 C3
+
+movl 48(%rsp), %edx
+. 3055 0x12345678 5
+. 8B 54 24 30 C3
+
+movl 48(%rsp), %esi
+. 3056 0x12345678 5
+. 8B 74 24 30 C3
+
+movl 492(%rsp), %edi
+. 3057 0x12345678 8
+. 8B BC 24 EC 01 00 00 C3
+
+movl 492(%rsp), %esi
+. 3058 0x12345678 8
+. 8B B4 24 EC 01 00 00 C3
+
+movl $4950, %edx
+. 3059 0x12345678 6
+. BA 56 13 00 00 C3
+
+movl $4957, %edx
+. 3060 0x12345678 6
+. BA 5D 13 00 00 C3
+
+movl $4958, %edx
+. 3061 0x12345678 6
+. BA 5E 13 00 00 C3
+
+movl $496, %edx
+. 3062 0x12345678 6
+. BA F0 01 00 00 C3
+
+movl $4, 96(%rax)
+. 3063 0x12345678 8
+. C7 40 60 04 00 00 00 C3
+
+movl $4, 96(%rbx)
+. 3064 0x12345678 8
+. C7 43 60 04 00 00 00 C3
+
+movl $499, %edx
+. 3065 0x12345678 6
+. BA F3 01 00 00 C3
+
+movl $49, %ecx
+. 3066 0x12345678 6
+. B9 31 00 00 00 C3
+
+movl $4, %eax
+. 3067 0x12345678 6
+. B8 04 00 00 00 C3
+
+movl $4, %ebp
+. 3068 0x12345678 6
+. BD 04 00 00 00 C3
+
+movl $4, %ecx
+. 3069 0x12345678 6
+. B9 04 00 00 00 C3
+
+movl $-4, %edi
+. 3070 0x12345678 6
+. BF FC FF FF FF C3
+
+movl $4, %edi
+. 3071 0x12345678 6
+. BF 04 00 00 00 C3
+
+movl 4, %edx
+. 3072 0x12345678 8
+. 8B 14 25 04 00 00 00 C3
+
+movl $4, %esi
+. 3073 0x12345678 6
+. BE 04 00 00 00 C3
+
+movl 4(%r13), %eax
+. 3074 0x12345678 5
+. 41 8B 45 04 C3
+
+movl 4(%r13), %edi
+. 3075 0x12345678 5
+. 41 8B 7D 04 C3
+
+movl $4, %r14d
+. 3076 0x12345678 7
+. 41 BE 04 00 00 00 C3
+
+movl $4, %r8d
+. 3077 0x12345678 7
+. 41 B8 04 00 00 00 C3
+
+movl 4(%r8), %eax
+. 3078 0x12345678 5
+. 41 8B 40 04 C3
+
+movl $4, (%rax)
+. 3079 0x12345678 7
+. C7 00 04 00 00 00 C3
+
+movl 4(%rax), %eax
+. 3080 0x12345678 4
+. 8B 40 04 C3
+
+movl 4(%rax), %edi
+. 3081 0x12345678 4
+. 8B 78 04 C3
+
+movl 4(%rbx), %edi
+. 3082 0x12345678 4
+. 8B 7B 04 C3
+
+movl 4(%rdi), %edi
+. 3083 0x12345678 4
+. 8B 7F 04 C3
+
+movl 4(%rdi), %esi
+. 3084 0x12345678 4
+. 8B 77 04 C3
+
+movl 4(%rsi), %eax
+. 3085 0x12345678 4
+. 8B 46 04 C3
+
+movl 4(%rsp), %ebx
+. 3086 0x12345678 5
+. 8B 5C 24 04 C3
+
+movl 4(%rsp), %ecx
+. 3087 0x12345678 5
+. 8B 4C 24 04 C3
+
+movl 4(%rsp), %edi
+. 3088 0x12345678 5
+. 8B 7C 24 04 C3
+
+movl 4(%rsp), %esi
+. 3089 0x12345678 5
+. 8B 74 24 04 C3
+
+movl $5000, 24(%rsp)
+. 3090 0x12345678 9
+. C7 44 24 18 88 13 00 00 C3
+
+movl 500(%rsp), %edi
+. 3091 0x12345678 8
+. 8B BC 24 F4 01 00 00 C3
+
+movl 500(%rsp), %esi
+. 3092 0x12345678 8
+. 8B B4 24 F4 01 00 00 C3
+
+movl $50, 16(%rdi)
+. 3093 0x12345678 8
+. C7 47 10 32 00 00 00 C3
+
+movl $501, %edx
+. 3094 0x12345678 6
+. BA F5 01 00 00 C3
+
+movl $504, %edx
+. 3095 0x12345678 6
+. BA F8 01 00 00 C3
+
+movl 504(%rsp), %edi
+. 3096 0x12345678 8
+. 8B BC 24 F8 01 00 00 C3
+
+movl 504(%rsp), %esi
+. 3097 0x12345678 8
+. 8B B4 24 F8 01 00 00 C3
+
+movl $505, %edx
+. 3098 0x12345678 6
+. BA F9 01 00 00 C3
+
+movl $507, %edx
+. 3099 0x12345678 6
+. BA FB 01 00 00 C3
+
+movl $5088, %edx
+. 3100 0x12345678 6
+. BA E0 13 00 00 C3
+
+movl $5089, %edx
+. 3101 0x12345678 6
+. BA E1 13 00 00 C3
+
+movl 508(%rsp), %ecx
+. 3102 0x12345678 8
+. 8B 8C 24 FC 01 00 00 C3
+
+movl 508(%rsp), %edx
+. 3103 0x12345678 8
+. 8B 94 24 FC 01 00 00 C3
+
+movl $5092, %edx
+. 3104 0x12345678 6
+. BA E4 13 00 00 C3
+
+movl $5093, %edx
+. 3105 0x12345678 6
+. BA E5 13 00 00 C3
+
+movl $509, %edx
+. 3106 0x12345678 6
+. BA FD 01 00 00 C3
+
+movl $511, %edx
+. 3107 0x12345678 6
+. BA FF 01 00 00 C3
+
+movl $512, 40(%rbx)
+. 3108 0x12345678 8
+. C7 43 28 00 02 00 00 C3
+
+movl $512, %edx
+. 3109 0x12345678 6
+. BA 00 02 00 00 C3
+
+movl $5158, %edx
+. 3110 0x12345678 6
+. BA 26 14 00 00 C3
+
+movl 516(%rsp), %ecx
+. 3111 0x12345678 8
+. 8B 8C 24 04 02 00 00 C3
+
+movl 516(%rsp), %edx
+. 3112 0x12345678 8
+. 8B 94 24 04 02 00 00 C3
+
+movl $5181, %edx
+. 3113 0x12345678 6
+. BA 3D 14 00 00 C3
+
+movl $519, %edx
+. 3114 0x12345678 6
+. BA 07 02 00 00 C3
+
+movl $5200, %edx
+. 3115 0x12345678 6
+. BA 50 14 00 00 C3
+
+movl $5202, %edx
+. 3116 0x12345678 6
+. BA 52 14 00 00 C3
+
+movl 520(%rsp), %ecx
+. 3117 0x12345678 8
+. 8B 8C 24 08 02 00 00 C3
+
+movl 520(%rsp), %edx
+. 3118 0x12345678 8
+. 8B 94 24 08 02 00 00 C3
+
+movl $5222, %edx
+. 3119 0x12345678 6
+. BA 66 14 00 00 C3
+
+movl $5237, %edx
+. 3120 0x12345678 6
+. BA 75 14 00 00 C3
+
+movl 524(%rsp), %ecx
+. 3121 0x12345678 8
+. 8B 8C 24 0C 02 00 00 C3
+
+movl 524(%rsp), %edi
+. 3122 0x12345678 8
+. 8B BC 24 0C 02 00 00 C3
+
+movl 524(%rsp), %edx
+. 3123 0x12345678 8
+. 8B 94 24 0C 02 00 00 C3
+
+movl 524(%rsp), %esi
+. 3124 0x12345678 8
+. 8B B4 24 0C 02 00 00 C3
+
+movl $52, %eax
+. 3125 0x12345678 6
+. B8 34 00 00 00 C3
+
+movl $52, %edi
+. 3126 0x12345678 6
+. BF 34 00 00 00 C3
+
+movl 52(%r8,%rax,4), %eax
+. 3127 0x12345678 6
+. 41 8B 44 80 34 C3
+
+movl 52(%rsp), %edi
+. 3128 0x12345678 5
+. 8B 7C 24 34 C3
+
+movl 532(%rsp), %ecx
+. 3129 0x12345678 8
+. 8B 8C 24 14 02 00 00 C3
+
+movl 532(%rsp), %edi
+. 3130 0x12345678 8
+. 8B BC 24 14 02 00 00 C3
+
+movl 532(%rsp), %edx
+. 3131 0x12345678 8
+. 8B 94 24 14 02 00 00 C3
+
+movl 532(%rsp), %esi
+. 3132 0x12345678 8
+. 8B B4 24 14 02 00 00 C3
+
+movl 536(%rsp), %ecx
+. 3133 0x12345678 8
+. 8B 8C 24 18 02 00 00 C3
+
+movl 536(%rsp), %edi
+. 3134 0x12345678 8
+. 8B BC 24 18 02 00 00 C3
+
+movl 536(%rsp), %edx
+. 3135 0x12345678 8
+. 8B 94 24 18 02 00 00 C3
+
+movl 536(%rsp), %esi
+. 3136 0x12345678 8
+. 8B B4 24 18 02 00 00 C3
+
+movl 540(%rsp), %edi
+. 3137 0x12345678 8
+. 8B BC 24 1C 02 00 00 C3
+
+movl $541, %edx
+. 3138 0x12345678 6
+. BA 1D 02 00 00 C3
+
+movl $544, %edx
+. 3139 0x12345678 6
+. BA 20 02 00 00 C3
+
+movl 544(%rsp), %edi
+. 3140 0x12345678 8
+. 8B BC 24 20 02 00 00 C3
+
+movl 544(%rsp), %esi
+. 3141 0x12345678 8
+. 8B B4 24 20 02 00 00 C3
+
+movl $5, 48(%rax)
+. 3142 0x12345678 8
+. C7 40 30 05 00 00 00 C3
+
+movl $5, 48(%rbx)
+. 3143 0x12345678 8
+. C7 43 30 05 00 00 00 C3
+
+movl 548(%rsp), %edi
+. 3144 0x12345678 8
+. 8B BC 24 24 02 00 00 C3
+
+movl 548(%rsp), %edx
+. 3145 0x12345678 8
+. 8B 94 24 24 02 00 00 C3
+
+movl 548(%rsp), %esi
+. 3146 0x12345678 8
+. 8B B4 24 24 02 00 00 C3
+
+movl 552(%rsp), %ecx
+. 3147 0x12345678 8
+. 8B 8C 24 28 02 00 00 C3
+
+movl 552(%rsp), %edi
+. 3148 0x12345678 8
+. 8B BC 24 28 02 00 00 C3
+
+movl 556(%rsp), %edx
+. 3149 0x12345678 8
+. 8B 94 24 2C 02 00 00 C3
+
+movl $5583, %edx
+. 3150 0x12345678 6
+. BA CF 15 00 00 C3
+
+movl $560, %edx
+. 3151 0x12345678 6
+. BA 30 02 00 00 C3
+
+movl 560(%rsp), %edx
+. 3152 0x12345678 8
+. 8B 94 24 30 02 00 00 C3
+
+movl $567, %edx
+. 3153 0x12345678 6
+. BA 37 02 00 00 C3
+
+movl $569, %edx
+. 3154 0x12345678 6
+. BA 39 02 00 00 C3
+
+movl $56, %eax
+. 3155 0x12345678 6
+. B8 38 00 00 00 C3
+
+movl $56, %edi
+. 3156 0x12345678 6
+. BF 38 00 00 00 C3
+
+movl 56(%rsp), %eax
+. 3157 0x12345678 5
+. 8B 44 24 38 C3
+
+movl 56(%rsp), %edi
+. 3158 0x12345678 5
+. 8B 7C 24 38 C3
+
+movl 56(%rsp), %edx
+. 3159 0x12345678 5
+. 8B 54 24 38 C3
+
+movl 56(%rsp), %esi
+. 3160 0x12345678 5
+. 8B 74 24 38 C3
+
+movl $5700, %edx
+. 3161 0x12345678 6
+. BA 44 16 00 00 C3
+
+movl 572(%rsp), %eax
+. 3162 0x12345678 8
+. 8B 84 24 3C 02 00 00 C3
+
+movl 572(%rsp), %ecx
+. 3163 0x12345678 8
+. 8B 8C 24 3C 02 00 00 C3
+
+movl $574, %edx
+. 3164 0x12345678 6
+. BA 3E 02 00 00 C3
+
+movl $575, %edx
+. 3165 0x12345678 6
+. BA 3F 02 00 00 C3
+
+movl $5835, %edx
+. 3166 0x12345678 6
+. BA CB 16 00 00 C3
+
+movl $5930, %edx
+. 3167 0x12345678 6
+. BA 2A 17 00 00 C3
+
+movl $5, %eax
+. 3168 0x12345678 6
+. B8 05 00 00 00 C3
+
+movl $5, %ebp
+. 3169 0x12345678 6
+. BD 05 00 00 00 C3
+
+movl $5, %edi
+. 3170 0x12345678 6
+. BF 05 00 00 00 C3
+
+movl $5, %edx
+. 3171 0x12345678 6
+. BA 05 00 00 00 C3
+
+movl $5, %esi
+. 3172 0x12345678 6
+. BE 05 00 00 00 C3
+
+movl $5, (%rax)
+. 3173 0x12345678 7
+. C7 00 05 00 00 00 C3
+
+movl $5, (%rcx)
+. 3174 0x12345678 7
+. C7 01 05 00 00 00 C3
+
+movl $5, (%rdx)
+. 3175 0x12345678 7
+. C7 02 05 00 00 00 C3
+
+movl $604, %edx
+. 3176 0x12345678 6
+. BA 5C 02 00 00 C3
+
+movl $60, 56(%rax)
+. 3177 0x12345678 8
+. C7 40 38 3C 00 00 00 C3
+
+movl $60, 56(%rbx)
+. 3178 0x12345678 8
+. C7 43 38 3C 00 00 00 C3
+
+movl $607, %edx
+. 3179 0x12345678 6
+. BA 5F 02 00 00 C3
+
+movl $608, %edx
+. 3180 0x12345678 6
+. BA 60 02 00 00 C3
+
+movl $60, %eax
+. 3181 0x12345678 6
+. B8 3C 00 00 00 C3
+
+movl $60, %edi
+. 3182 0x12345678 6
+. BF 3C 00 00 00 C3
+
+movl 60(%rsp), %eax
+. 3183 0x12345678 5
+. 8B 44 24 3C C3
+
+movl 60(%rsp), %edi
+. 3184 0x12345678 5
+. 8B 7C 24 3C C3
+
+movl 60(%rsp), %edx
+. 3185 0x12345678 5
+. 8B 54 24 3C C3
+
+movl 60(%rsp), %esi
+. 3186 0x12345678 5
+. 8B 74 24 3C C3
+
+movl $612, %edx
+. 3187 0x12345678 6
+. BA 64 02 00 00 C3
+
+movl $615, %edx
+. 3188 0x12345678 6
+. BA 67 02 00 00 C3
+
+movl $616, %edx
+. 3189 0x12345678 6
+. BA 68 02 00 00 C3
+
+movl $623, %edx
+. 3190 0x12345678 6
+. BA 6F 02 00 00 C3
+
+movl $624, %edx
+. 3191 0x12345678 6
+. BA 70 02 00 00 C3
+
+movl $6253, %edx
+. 3192 0x12345678 6
+. BA 6D 18 00 00 C3
+
+movl $6277, %edx
+. 3193 0x12345678 6
+. BA 85 18 00 00 C3
+
+movl $6292, %edx
+. 3194 0x12345678 6
+. BA 94 18 00 00 C3
+
+movl $630, %edx
+. 3195 0x12345678 6
+. BA 76 02 00 00 C3
+
+movl $631, %edx
+. 3196 0x12345678 6
+. BA 77 02 00 00 C3
+
+movl $63, %edi
+. 3197 0x12345678 6
+. BF 3F 00 00 00 C3
+
+movl $640, %edx
+. 3198 0x12345678 6
+. BA 80 02 00 00 C3
+
+movl $641, %edx
+. 3199 0x12345678 6
+. BA 81 02 00 00 C3
+
+movl $642, %edx
+. 3200 0x12345678 6
+. BA 82 02 00 00 C3
+
+movl $644, %edx
+. 3201 0x12345678 6
+. BA 84 02 00 00 C3
+
+movl $64, 68(%rax)
+. 3202 0x12345678 8
+. C7 40 44 40 00 00 00 C3
+
+movl $64, 68(%rbx)
+. 3203 0x12345678 8
+. C7 43 44 40 00 00 00 C3
+
+movl $64, 72(%rax)
+. 3204 0x12345678 8
+. C7 40 48 40 00 00 00 C3
+
+movl $64, 72(%rbx)
+. 3205 0x12345678 8
+. C7 43 48 40 00 00 00 C3
+
+movl $64, 8(%rsp)
+. 3206 0x12345678 9
+. C7 44 24 08 40 00 00 00 C3
+
+movl $64, %eax
+. 3207 0x12345678 6
+. B8 40 00 00 00 C3
+
+movl $64, %edi
+. 3208 0x12345678 6
+. BF 40 00 00 00 C3
+
+movl $64, %edx
+. 3209 0x12345678 6
+. BA 40 00 00 00 C3
+
+movl $64, %r14d
+. 3210 0x12345678 7
+. 41 BE 40 00 00 00 C3
+
+movl 64(%rsp), %ecx
+. 3211 0x12345678 5
+. 8B 4C 24 40 C3
+
+movl 64(%rsp), %edx
+. 3212 0x12345678 5
+. 8B 54 24 40 C3
+
+movl $65280, %edi
+. 3213 0x12345678 6
+. BF 00 FF 00 00 C3
+
+movl $653, %edx
+. 3214 0x12345678 6
+. BA 8D 02 00 00 C3
+
+movl $65535, %eax
+. 3215 0x12345678 6
+. B8 FF FF 00 00 C3
+
+movl $65535, %ecx
+. 3216 0x12345678 6
+. B9 FF FF 00 00 C3
+
+movl $65535, %r12d
+. 3217 0x12345678 7
+. 41 BC FF FF 00 00 C3
+
+movl $65535, %r9d
+. 3218 0x12345678 7
+. 41 B9 FF FF 00 00 C3
+
+movl $659, %edx
+. 3219 0x12345678 6
+. BA 93 02 00 00 C3
+
+movl $6604, %edx
+. 3220 0x12345678 6
+. BA CC 19 00 00 C3
+
+movl $664, %edx
+. 3221 0x12345678 6
+. BA 98 02 00 00 C3
+
+movl $6660, %edx
+. 3222 0x12345678 6
+. BA 04 1A 00 00 C3
+
+movl 672(%rsp,%r8,4), %ebp
+. 3223 0x12345678 9
+. 42 8B AC 84 A0 02 00 00 C3
+
+movl 672(%rsp,%rax,4), %ebp
+. 3224 0x12345678 8
+. 8B AC 84 A0 02 00 00 C3
+
+movl 672(%rsp,%rdx,4), %ebp
+. 3225 0x12345678 8
+. 8B AC 94 A0 02 00 00 C3
+
+movl $673, %edx
+. 3226 0x12345678 6
+. BA A1 02 00 00 C3
+
+movl $6745, %edx
+. 3227 0x12345678 6
+. BA 59 1A 00 00 C3
+
+movl $674, %edx
+. 3228 0x12345678 6
+. BA A2 02 00 00 C3
+
+movl $6768, %edx
+. 3229 0x12345678 6
+. BA 70 1A 00 00 C3
+
+movl $6786, %edx
+. 3230 0x12345678 6
+. BA 82 1A 00 00 C3
+
+movl $6788, %edx
+. 3231 0x12345678 6
+. BA 84 1A 00 00 C3
+
+movl $6807, %edx
+. 3232 0x12345678 6
+. BA 97 1A 00 00 C3
+
+movl $680, %edx
+. 3233 0x12345678 6
+. BA A8 02 00 00 C3
+
+movl $681, %edx
+. 3234 0x12345678 6
+. BA A9 02 00 00 C3
+
+movl $6822, %edx
+. 3235 0x12345678 6
+. BA A6 1A 00 00 C3
+
+movl $6841, %edx
+. 3236 0x12345678 6
+. BA B9 1A 00 00 C3
+
+movl $684, %edx
+. 3237 0x12345678 6
+. BA AC 02 00 00 C3
+
+movl $6857, %edx
+. 3238 0x12345678 6
+. BA C9 1A 00 00 C3
+
+movl $6858, %edx
+. 3239 0x12345678 6
+. BA CA 1A 00 00 C3
+
+movl $6859, %edx
+. 3240 0x12345678 6
+. BA CB 1A 00 00 C3
+
+movl $6860, %edx
+. 3241 0x12345678 6
+. BA CC 1A 00 00 C3
+
+movl $686, %edx
+. 3242 0x12345678 6
+. BA AE 02 00 00 C3
+
+movl $687, %edx
+. 3243 0x12345678 6
+. BA AF 02 00 00 C3
+
+movl 688(%rsp,%r8,4), %eax
+. 3244 0x12345678 9
+. 42 8B 84 84 B0 02 00 00 C3
+
+movl 688(%rsp,%rdx,4), %eax
+. 3245 0x12345678 8
+. 8B 84 94 B0 02 00 00 C3
+
+movl $6896, %edx
+. 3246 0x12345678 6
+. BA F0 1A 00 00 C3
+
+movl $6897, %edx
+. 3247 0x12345678 6
+. BA F1 1A 00 00 C3
+
+movl $6898, %edx
+. 3248 0x12345678 6
+. BA F2 1A 00 00 C3
+
+movl $6899, %edx
+. 3249 0x12345678 6
+. BA F3 1A 00 00 C3
+
+movl $68, %eax
+. 3250 0x12345678 6
+. B8 44 00 00 00 C3
+
+movl $68, %edi
+. 3251 0x12345678 6
+. BF 44 00 00 00 C3
+
+movl 68(%rsp), %eax
+. 3252 0x12345678 5
+. 8B 44 24 44 C3
+
+movl 68(%rsp), %ebp
+. 3253 0x12345678 5
+. 8B 6C 24 44 C3
+
+movl 68(%rsp), %ebx
+. 3254 0x12345678 5
+. 8B 5C 24 44 C3
+
+movl 68(%rsp), %edi
+. 3255 0x12345678 5
+. 8B 7C 24 44 C3
+
+movl 68(%rsp), %edx
+. 3256 0x12345678 5
+. 8B 54 24 44 C3
+
+movl $690, %edx
+. 3257 0x12345678 6
+. BA B2 02 00 00 C3
+
+movl $691, %edx
+. 3258 0x12345678 6
+. BA B3 02 00 00 C3
+
+movl $69631, (%rcx)
+. 3259 0x12345678 7
+. C7 01 FF 0F 01 00 C3
+
+movl $69631, (%rdx)
+. 3260 0x12345678 7
+. C7 02 FF 0F 01 00 C3
+
+movl $69631, (%rsi)
+. 3261 0x12345678 7
+. C7 06 FF 0F 01 00 C3
+
+movl $69632, %eax
+. 3262 0x12345678 6
+. B8 00 10 01 00 C3
+
+movl $69632, (%rdx)
+. 3263 0x12345678 7
+. C7 02 00 10 01 00 C3
+
+movl $69632, (%rsi)
+. 3264 0x12345678 7
+. C7 06 00 10 01 00 C3
+
+movl $69633, %eax
+. 3265 0x12345678 6
+. B8 01 10 01 00 C3
+
+movl $69633, %edi
+. 3266 0x12345678 6
+. BF 01 10 01 00 C3
+
+movl $69633, %edx
+. 3267 0x12345678 6
+. BA 01 10 01 00 C3
+
+movl $69633, %esi
+. 3268 0x12345678 6
+. BE 01 10 01 00 C3
+
+movl $69633, (%rcx)
+. 3269 0x12345678 7
+. C7 01 01 10 01 00 C3
+
+movl $69633, (%rdx)
+. 3270 0x12345678 7
+. C7 02 01 10 01 00 C3
+
+movl $69633, (%rsi)
+. 3271 0x12345678 7
+. C7 06 01 10 01 00 C3
+
+movl $69634, %eax
+. 3272 0x12345678 6
+. B8 02 10 01 00 C3
+
+movl $69634, %edi
+. 3273 0x12345678 6
+. BF 02 10 01 00 C3
+
+movl $69634, %esi
+. 3274 0x12345678 6
+. BE 02 10 01 00 C3
+
+movl $69634, (%rcx)
+. 3275 0x12345678 7
+. C7 01 02 10 01 00 C3
+
+movl $69634, (%rdx)
+. 3276 0x12345678 7
+. C7 02 02 10 01 00 C3
+
+movl $69634, (%rsi)
+. 3277 0x12345678 7
+. C7 06 02 10 01 00 C3
+
+movl $69635, 64(%rsp)
+. 3278 0x12345678 9
+. C7 44 24 40 03 10 01 00 C3
+
+movl $69635, %eax
+. 3279 0x12345678 6
+. B8 03 10 01 00 C3
+
+movl $69635, %edi
+. 3280 0x12345678 6
+. BF 03 10 01 00 C3
+
+movl $69635, %edx
+. 3281 0x12345678 6
+. BA 03 10 01 00 C3
+
+movl $69635, %esi
+. 3282 0x12345678 6
+. BE 03 10 01 00 C3
+
+movl $69635, (%rcx)
+. 3283 0x12345678 7
+. C7 01 03 10 01 00 C3
+
+movl $69635, (%rdx)
+. 3284 0x12345678 7
+. C7 02 03 10 01 00 C3
+
+movl $69635, (%rsi)
+. 3285 0x12345678 7
+. C7 06 03 10 01 00 C3
+
+movl $69636, 64(%rsp)
+. 3286 0x12345678 9
+. C7 44 24 40 04 10 01 00 C3
+
+movl $69636, %eax
+. 3287 0x12345678 6
+. B8 04 10 01 00 C3
+
+movl $69636, %edi
+. 3288 0x12345678 6
+. BF 04 10 01 00 C3
+
+movl $69636, %esi
+. 3289 0x12345678 6
+. BE 04 10 01 00 C3
+
+movl $69636, (%rcx)
+. 3290 0x12345678 7
+. C7 01 04 10 01 00 C3
+
+movl $69636, (%rdx)
+. 3291 0x12345678 7
+. C7 02 04 10 01 00 C3
+
+movl $69636, (%rsi)
+. 3292 0x12345678 7
+. C7 06 04 10 01 00 C3
+
+movl $69637, %edi
+. 3293 0x12345678 6
+. BF 05 10 01 00 C3
+
+movl $69637, %esi
+. 3294 0x12345678 6
+. BE 05 10 01 00 C3
+
+movl $69637, (%rdx)
+. 3295 0x12345678 7
+. C7 02 05 10 01 00 C3
+
+movl $69637, (%rsi)
+. 3296 0x12345678 7
+. C7 06 05 10 01 00 C3
+
+movl $69638, %eax
+. 3297 0x12345678 6
+. B8 06 10 01 00 C3
+
+movl $69638, %edi
+. 3298 0x12345678 6
+. BF 06 10 01 00 C3
+
+movl $69638, %esi
+. 3299 0x12345678 6
+. BE 06 10 01 00 C3
+
+movl $69638, (%rcx)
+. 3300 0x12345678 7
+. C7 01 06 10 01 00 C3
+
+movl $69638, (%rdx)
+. 3301 0x12345678 7
+. C7 02 06 10 01 00 C3
+
+movl $69638, (%rsi)
+. 3302 0x12345678 7
+. C7 06 06 10 01 00 C3
+
+movl $69639, %eax
+. 3303 0x12345678 6
+. B8 07 10 01 00 C3
+
+movl $69639, %edi
+. 3304 0x12345678 6
+. BF 07 10 01 00 C3
+
+movl $69639, %esi
+. 3305 0x12345678 6
+. BE 07 10 01 00 C3
+
+movl $69639, (%rcx)
+. 3306 0x12345678 7
+. C7 01 07 10 01 00 C3
+
+movl $69639, (%rdx)
+. 3307 0x12345678 7
+. C7 02 07 10 01 00 C3
+
+movl $69639, (%rsi)
+. 3308 0x12345678 7
+. C7 06 07 10 01 00 C3
+
+movl $696, %edx
+. 3309 0x12345678 6
+. BA B8 02 00 00 C3
+
+movl $69, %edi
+. 3310 0x12345678 6
+. BF 45 00 00 00 C3
+
+movl $6, %eax
+. 3311 0x12345678 6
+. B8 06 00 00 00 C3
+
+movl $6, %edi
+. 3312 0x12345678 6
+. BF 06 00 00 00 C3
+
+movl $6, %edx
+. 3313 0x12345678 6
+. BA 06 00 00 00 C3
+
+movl $6, %esi
+. 3314 0x12345678 6
+. BE 06 00 00 00 C3
+
+movl $6, (%rax)
+. 3315 0x12345678 7
+. C7 00 06 00 00 00 C3
+
+movl $6, (%rcx)
+. 3316 0x12345678 7
+. C7 01 06 00 00 00 C3
+
+movl $6, (%rdx)
+. 3317 0x12345678 7
+. C7 02 06 00 00 00 C3
+
+movl $702, %edx
+. 3318 0x12345678 6
+. BA BE 02 00 00 C3
+
+movl 704(%rsp), %r10d
+. 3319 0x12345678 9
+. 44 8B 94 24 C0 02 00 00 C3
+
+movl $708, %edx
+. 3320 0x12345678 6
+. BA C4 02 00 00 C3
+
+movl $7092, %edx
+. 3321 0x12345678 6
+. BA B4 1B 00 00 C3
+
+movl $70, %edx
+. 3322 0x12345678 6
+. BA 46 00 00 00 C3
+
+movl $714, %edx
+. 3323 0x12345678 6
+. BA CA 02 00 00 C3
+
+movl $7174, %edx
+. 3324 0x12345678 6
+. BA 06 1C 00 00 C3
+
+movl $7199, %edx
+. 3325 0x12345678 6
+. BA 1F 1C 00 00 C3
+
+movl $721, %edx
+. 3326 0x12345678 6
+. BA D1 02 00 00 C3
+
+movl $722, %edx
+. 3327 0x12345678 6
+. BA D2 02 00 00 C3
+
+movl $723, %edx
+. 3328 0x12345678 6
+. BA D3 02 00 00 C3
+
+movl $7284, %edx
+. 3329 0x12345678 6
+. BA 74 1C 00 00 C3
+
+movl $72, %eax
+. 3330 0x12345678 6
+. B8 48 00 00 00 C3
+
+movl $72, %edi
+. 3331 0x12345678 6
+. BF 48 00 00 00 C3
+
+movl 72(%rsp), %edi
+. 3332 0x12345678 5
+. 8B 7C 24 48 C3
+
+movl $736, %edx
+. 3333 0x12345678 6
+. BA E0 02 00 00 C3
+
+movl $73728, (%rax)
+. 3334 0x12345678 7
+. C7 00 00 20 01 00 C3
+
+movl $73729, (%rax)
+. 3335 0x12345678 7
+. C7 00 01 20 01 00 C3
+
+movl $73730, (%rax)
+. 3336 0x12345678 7
+. C7 00 02 20 01 00 C3
+
+movl $73731, (%rax)
+. 3337 0x12345678 7
+. C7 00 03 20 01 00 C3
+
+movl $73732, (%rax)
+. 3338 0x12345678 7
+. C7 00 04 20 01 00 C3
+
+movl $73733, (%rax)
+. 3339 0x12345678 7
+. C7 00 05 20 01 00 C3
+
+movl $73734, (%rax)
+. 3340 0x12345678 7
+. C7 00 06 20 01 00 C3
+
+movl $73735, (%rax)
+. 3341 0x12345678 7
+. C7 00 07 20 01 00 C3
+
+movl $7375, %edx
+. 3342 0x12345678 6
+. BA CF 1C 00 00 C3
+
+movl $737, %edx
+. 3343 0x12345678 6
+. BA E1 02 00 00 C3
+
+movl $738, %edx
+. 3344 0x12345678 6
+. BA E2 02 00 00 C3
+
+movl $7416, %edx
+. 3345 0x12345678 6
+. BA F8 1C 00 00 C3
+
+movl $741, %edx
+. 3346 0x12345678 6
+. BA E5 02 00 00 C3
+
+movl $7429, %edx
+. 3347 0x12345678 6
+. BA 05 1D 00 00 C3
+
+movl $7430, %edx
+. 3348 0x12345678 6
+. BA 06 1D 00 00 C3
+
+movl $745, %edx
+. 3349 0x12345678 6
+. BA E9 02 00 00 C3
+
+movl $7476, %edx
+. 3350 0x12345678 6
+. BA 34 1D 00 00 C3
+
+movl $7489, %edx
+. 3351 0x12345678 6
+. BA 41 1D 00 00 C3
+
+movl $7, 48(%rbx)
+. 3352 0x12345678 8
+. C7 43 30 07 00 00 00 C3
+
+movl $751, %edx
+. 3353 0x12345678 6
+. BA EF 02 00 00 C3
+
+movl $761, %edx
+. 3354 0x12345678 6
+. BA F9 02 00 00 C3
+
+movl $764, %edx
+. 3355 0x12345678 6
+. BA FC 02 00 00 C3
+
+movl $7679, %edx
+. 3356 0x12345678 6
+. BA FF 1D 00 00 C3
+
+movl $76, %eax
+. 3357 0x12345678 6
+. B8 4C 00 00 00 C3
+
+movl $76, %edi
+. 3358 0x12345678 6
+. BF 4C 00 00 00 C3
+
+movl 76(%rsp), %edi
+. 3359 0x12345678 5
+. 8B 7C 24 4C C3
+
+movl 76(%rsp), %edx
+. 3360 0x12345678 5
+. 8B 54 24 4C C3
+
+movl $7702, %edx
+. 3361 0x12345678 6
+. BA 16 1E 00 00 C3
+
+movl $770, %edx
+. 3362 0x12345678 6
+. BA 02 03 00 00 C3
+
+movl $7725, %edx
+. 3363 0x12345678 6
+. BA 2D 1E 00 00 C3
+
+movl $776, %edx
+. 3364 0x12345678 6
+. BA 08 03 00 00 C3
+
+movl $7770, %edx
+. 3365 0x12345678 6
+. BA 5A 1E 00 00 C3
+
+movl $77824, 12(%rsp)
+. 3366 0x12345678 9
+. C7 44 24 0C 00 30 01 00 C3
+
+movl $77824, 44(%rsp)
+. 3367 0x12345678 9
+. C7 44 24 2C 00 30 01 00 C3
+
+movl $77824, 8(%rsp)
+. 3368 0x12345678 9
+. C7 44 24 08 00 30 01 00 C3
+
+movl $77824, %r13d
+. 3369 0x12345678 7
+. 41 BD 00 30 01 00 C3
+
+movl $77824, (%rsi)
+. 3370 0x12345678 7
+. C7 06 00 30 01 00 C3
+
+movl $77825, 12(%rsp)
+. 3371 0x12345678 9
+. C7 44 24 0C 01 30 01 00 C3
+
+movl $77825, 8(%rsp)
+. 3372 0x12345678 9
+. C7 44 24 08 01 30 01 00 C3
+
+movl $77825, %eax
+. 3373 0x12345678 6
+. B8 01 30 01 00 C3
+
+movl $77825, %edi
+. 3374 0x12345678 6
+. BF 01 30 01 00 C3
+
+movl $77825, %edx
+. 3375 0x12345678 6
+. BA 01 30 01 00 C3
+
+movl $77825, %esi
+. 3376 0x12345678 6
+. BE 01 30 01 00 C3
+
+movl $77827, %edi
+. 3377 0x12345678 6
+. BF 03 30 01 00 C3
+
+movl $77828, %edi
+. 3378 0x12345678 6
+. BF 04 30 01 00 C3
+
+movl $77828, %r13d
+. 3379 0x12345678 7
+. 41 BD 04 30 01 00 C3
+
+movl $77829, 12(%rsp)
+. 3380 0x12345678 9
+. C7 44 24 0C 05 30 01 00 C3
+
+movl $77829, 8(%rsp)
+. 3381 0x12345678 9
+. C7 44 24 08 05 30 01 00 C3
+
+movl $77829, %eax
+. 3382 0x12345678 6
+. B8 05 30 01 00 C3
+
+movl $77829, %edi
+. 3383 0x12345678 6
+. BF 05 30 01 00 C3
+
+movl $77829, %edx
+. 3384 0x12345678 6
+. BA 05 30 01 00 C3
+
+movl $77829, %esi
+. 3385 0x12345678 6
+. BE 05 30 01 00 C3
+
+movl $77831, %edi
+. 3386 0x12345678 6
+. BF 07 30 01 00 C3
+
+movl $77832, %edi
+. 3387 0x12345678 6
+. BF 08 30 01 00 C3
+
+movl $77832, %r13d
+. 3388 0x12345678 7
+. 41 BD 08 30 01 00 C3
+
+movl $77833, %eax
+. 3389 0x12345678 6
+. B8 09 30 01 00 C3
+
+movl $77833, %esi
+. 3390 0x12345678 6
+. BE 09 30 01 00 C3
+
+movl $77837, 12(%rsp)
+. 3391 0x12345678 9
+. C7 44 24 0C 0D 30 01 00 C3
+
+movl $77837, 8(%rsp)
+. 3392 0x12345678 9
+. C7 44 24 08 0D 30 01 00 C3
+
+movl $77837, %eax
+. 3393 0x12345678 6
+. B8 0D 30 01 00 C3
+
+movl $77837, %edi
+. 3394 0x12345678 6
+. BF 0D 30 01 00 C3
+
+movl $77837, %edx
+. 3395 0x12345678 6
+. BA 0D 30 01 00 C3
+
+movl $77837, %esi
+. 3396 0x12345678 6
+. BE 0D 30 01 00 C3
+
+movl $77839, %edi
+. 3397 0x12345678 6
+. BF 0F 30 01 00 C3
+
+movl $77840, %edi
+. 3398 0x12345678 6
+. BF 10 30 01 00 C3
+
+movl $77840, %r13d
+. 3399 0x12345678 7
+. 41 BD 10 30 01 00 C3
+
+movl $77841, 12(%rsp)
+. 3400 0x12345678 9
+. C7 44 24 0C 11 30 01 00 C3
+
+movl $77841, 8(%rsp)
+. 3401 0x12345678 9
+. C7 44 24 08 11 30 01 00 C3
+
+movl $77841, %eax
+. 3402 0x12345678 6
+. B8 11 30 01 00 C3
+
+movl $77841, %edi
+. 3403 0x12345678 6
+. BF 11 30 01 00 C3
+
+movl $77841, %edx
+. 3404 0x12345678 6
+. BA 11 30 01 00 C3
+
+movl $77841, %esi
+. 3405 0x12345678 6
+. BE 11 30 01 00 C3
+
+movl $77843, %edi
+. 3406 0x12345678 6
+. BF 13 30 01 00 C3
+
+movl $77844, %edi
+. 3407 0x12345678 6
+. BF 14 30 01 00 C3
+
+movl $77844, %r13d
+. 3408 0x12345678 7
+. 41 BD 14 30 01 00 C3
+
+movl $77845, 12(%rsp)
+. 3409 0x12345678 9
+. C7 44 24 0C 15 30 01 00 C3
+
+movl $77845, 8(%rsp)
+. 3410 0x12345678 9
+. C7 44 24 08 15 30 01 00 C3
+
+movl $77845, %eax
+. 3411 0x12345678 6
+. B8 15 30 01 00 C3
+
+movl $77845, %edi
+. 3412 0x12345678 6
+. BF 15 30 01 00 C3
+
+movl $77845, %edx
+. 3413 0x12345678 6
+. BA 15 30 01 00 C3
+
+movl $77845, %esi
+. 3414 0x12345678 6
+. BE 15 30 01 00 C3
+
+movl $77847, %edi
+. 3415 0x12345678 6
+. BF 17 30 01 00 C3
+
+movl $77848, %r13d
+. 3416 0x12345678 7
+. 41 BD 18 30 01 00 C3
+
+movl $77849, %eax
+. 3417 0x12345678 6
+. B8 19 30 01 00 C3
+
+movl $77849, %edi
+. 3418 0x12345678 6
+. BF 19 30 01 00 C3
+
+movl $77849, %esi
+. 3419 0x12345678 6
+. BE 19 30 01 00 C3
+
+movl $77851, 20(%rsp)
+. 3420 0x12345678 9
+. C7 44 24 14 1B 30 01 00 C3
+
+movl $77851, 28(%rsp)
+. 3421 0x12345678 9
+. C7 44 24 1C 1B 30 01 00 C3
+
+movl $77851, %edi
+. 3422 0x12345678 6
+. BF 1B 30 01 00 C3
+
+movl $77852, 20(%rsp)
+. 3423 0x12345678 9
+. C7 44 24 14 1C 30 01 00 C3
+
+movl $77852, %ecx
+. 3424 0x12345678 6
+. B9 1C 30 01 00 C3
+
+movl $77852, %edi
+. 3425 0x12345678 6
+. BF 1C 30 01 00 C3
+
+movl $77852, %edx
+. 3426 0x12345678 6
+. BA 1C 30 01 00 C3
+
+movl $77853, %eax
+. 3427 0x12345678 6
+. B8 1D 30 01 00 C3
+
+movl $77853, %esi
+. 3428 0x12345678 6
+. BE 1D 30 01 00 C3
+
+movl $77855, 20(%rsp)
+. 3429 0x12345678 9
+. C7 44 24 14 1F 30 01 00 C3
+
+movl $77855, 28(%rsp)
+. 3430 0x12345678 9
+. C7 44 24 1C 1F 30 01 00 C3
+
+movl $77855, %edi
+. 3431 0x12345678 6
+. BF 1F 30 01 00 C3
+
+movl $77856, 20(%rsp)
+. 3432 0x12345678 9
+. C7 44 24 14 20 30 01 00 C3
+
+movl $77856, %ecx
+. 3433 0x12345678 6
+. B9 20 30 01 00 C3
+
+movl $77856, %edi
+. 3434 0x12345678 6
+. BF 20 30 01 00 C3
+
+movl $77856, %edx
+. 3435 0x12345678 6
+. BA 20 30 01 00 C3
+
+movl $77857, %eax
+. 3436 0x12345678 6
+. B8 21 30 01 00 C3
+
+movl $77857, %esi
+. 3437 0x12345678 6
+. BE 21 30 01 00 C3
+
+movl $77859, 20(%rsp)
+. 3438 0x12345678 9
+. C7 44 24 14 23 30 01 00 C3
+
+movl $77859, 28(%rsp)
+. 3439 0x12345678 9
+. C7 44 24 1C 23 30 01 00 C3
+
+movl $77859, %edi
+. 3440 0x12345678 6
+. BF 23 30 01 00 C3
+
+movl $77860, 20(%rsp)
+. 3441 0x12345678 9
+. C7 44 24 14 24 30 01 00 C3
+
+movl $77861, %eax
+. 3442 0x12345678 6
+. B8 25 30 01 00 C3
+
+movl $77861, %edi
+. 3443 0x12345678 6
+. BF 25 30 01 00 C3
+
+movl $77861, %esi
+. 3444 0x12345678 6
+. BE 25 30 01 00 C3
+
+movl $77863, %edi
+. 3445 0x12345678 6
+. BF 27 30 01 00 C3
+
+movl $77865, %eax
+. 3446 0x12345678 6
+. B8 29 30 01 00 C3
+
+movl $77865, %esi
+. 3447 0x12345678 6
+. BE 29 30 01 00 C3
+
+movl $77867, %edi
+. 3448 0x12345678 6
+. BF 2B 30 01 00 C3
+
+movl $77869, %eax
+. 3449 0x12345678 6
+. B8 2D 30 01 00 C3
+
+movl $77869, %edi
+. 3450 0x12345678 6
+. BF 2D 30 01 00 C3
+
+movl $77869, %esi
+. 3451 0x12345678 6
+. BE 2D 30 01 00 C3
+
+movl $77871, %edi
+. 3452 0x12345678 6
+. BF 2F 30 01 00 C3
+
+movl $77872, %edi
+. 3453 0x12345678 6
+. BF 30 30 01 00 C3
+
+movl $77878, %edi
+. 3454 0x12345678 6
+. BF 36 30 01 00 C3
+
+movl $77879, %edi
+. 3455 0x12345678 6
+. BF 37 30 01 00 C3
+
+movl $77880, %edi
+. 3456 0x12345678 6
+. BF 38 30 01 00 C3
+
+movl $77881, %edi
+. 3457 0x12345678 6
+. BF 39 30 01 00 C3
+
+movl $77883, %edi
+. 3458 0x12345678 6
+. BF 3B 30 01 00 C3
+
+movl $77886, %edi
+. 3459 0x12345678 6
+. BF 3E 30 01 00 C3
+
+movl $77888, %edi
+. 3460 0x12345678 6
+. BF 40 30 01 00 C3
+
+movl $77889, %edi
+. 3461 0x12345678 6
+. BF 41 30 01 00 C3
+
+movl $77890, %edi
+. 3462 0x12345678 6
+. BF 42 30 01 00 C3
+
+movl $77891, %edi
+. 3463 0x12345678 6
+. BF 43 30 01 00 C3
+
+movl $77892, %edi
+. 3464 0x12345678 6
+. BF 44 30 01 00 C3
+
+movl $77893, %edi
+. 3465 0x12345678 6
+. BF 45 30 01 00 C3
+
+movl $77894, %edi
+. 3466 0x12345678 6
+. BF 46 30 01 00 C3
+
+movl $77895, %edi
+. 3467 0x12345678 6
+. BF 47 30 01 00 C3
+
+movl $77896, %edi
+. 3468 0x12345678 6
+. BF 48 30 01 00 C3
+
+movl $77897, %edi
+. 3469 0x12345678 6
+. BF 49 30 01 00 C3
+
+movl $77899, %edi
+. 3470 0x12345678 6
+. BF 4B 30 01 00 C3
+
+movl $778, %edx
+. 3471 0x12345678 6
+. BA 0A 03 00 00 C3
+
+movl $77900, %edi
+. 3472 0x12345678 6
+. BF 4C 30 01 00 C3
+
+movl $77901, 4(%rsp)
+. 3473 0x12345678 9
+. C7 44 24 04 4D 30 01 00 C3
+
+movl $77901, %edi
+. 3474 0x12345678 6
+. BF 4D 30 01 00 C3
+
+movl $77902, %edi
+. 3475 0x12345678 6
+. BF 4E 30 01 00 C3
+
+movl $77903, %edi
+. 3476 0x12345678 6
+. BF 4F 30 01 00 C3
+
+movl $77904, 4(%rsp)
+. 3477 0x12345678 9
+. C7 44 24 04 50 30 01 00 C3
+
+movl $77904, %edi
+. 3478 0x12345678 6
+. BF 50 30 01 00 C3
+
+movl $77905, %edi
+. 3479 0x12345678 6
+. BF 51 30 01 00 C3
+
+movl $77906, %edi
+. 3480 0x12345678 6
+. BF 52 30 01 00 C3
+
+movl $77907, %edi
+. 3481 0x12345678 6
+. BF 53 30 01 00 C3
+
+movl $77913, %ebp
+. 3482 0x12345678 6
+. BD 59 30 01 00 C3
+
+movl $77913, %ecx
+. 3483 0x12345678 6
+. B9 59 30 01 00 C3
+
+movl $77913, %esi
+. 3484 0x12345678 6
+. BE 59 30 01 00 C3
+
+movl $77914, %ebp
+. 3485 0x12345678 6
+. BD 5A 30 01 00 C3
+
+movl $77914, %ecx
+. 3486 0x12345678 6
+. B9 5A 30 01 00 C3
+
+movl $77914, %esi
+. 3487 0x12345678 6
+. BE 5A 30 01 00 C3
+
+movl $77915, %ebp
+. 3488 0x12345678 6
+. BD 5B 30 01 00 C3
+
+movl $77915, %ecx
+. 3489 0x12345678 6
+. B9 5B 30 01 00 C3
+
+movl $77915, %esi
+. 3490 0x12345678 6
+. BE 5B 30 01 00 C3
+
+movl $77916, %ebp
+. 3491 0x12345678 6
+. BD 5C 30 01 00 C3
+
+movl $77916, %ecx
+. 3492 0x12345678 6
+. B9 5C 30 01 00 C3
+
+movl $77916, %esi
+. 3493 0x12345678 6
+. BE 5C 30 01 00 C3
+
+movl $77917, %edi
+. 3494 0x12345678 6
+. BF 5D 30 01 00 C3
+
+movl $77918, %edi
+. 3495 0x12345678 6
+. BF 5E 30 01 00 C3
+
+movl $77919, %edi
+. 3496 0x12345678 6
+. BF 5F 30 01 00 C3
+
+movl $77920, %edi
+. 3497 0x12345678 6
+. BF 60 30 01 00 C3
+
+movl $77921, %edi
+. 3498 0x12345678 6
+. BF 61 30 01 00 C3
+
+movl $77922, %edi
+. 3499 0x12345678 6
+. BF 62 30 01 00 C3
+
+movl $77923, %edi
+. 3500 0x12345678 6
+. BF 63 30 01 00 C3
+
+movl $77924, %edi
+. 3501 0x12345678 6
+. BF 64 30 01 00 C3
+
+movl $77925, %edi
+. 3502 0x12345678 6
+. BF 65 30 01 00 C3
+
+movl $77926, %edi
+. 3503 0x12345678 6
+. BF 66 30 01 00 C3
+
+movl $77927, %edi
+. 3504 0x12345678 6
+. BF 67 30 01 00 C3
+
+movl $77928, %edi
+. 3505 0x12345678 6
+. BF 68 30 01 00 C3
+
+movl $77929, %edi
+. 3506 0x12345678 6
+. BF 69 30 01 00 C3
+
+movl $77930, %edi
+. 3507 0x12345678 6
+. BF 6A 30 01 00 C3
+
+movl $77931, %edi
+. 3508 0x12345678 6
+. BF 6B 30 01 00 C3
+
+movl $77932, %edi
+. 3509 0x12345678 6
+. BF 6C 30 01 00 C3
+
+movl $77933, %edi
+. 3510 0x12345678 6
+. BF 6D 30 01 00 C3
+
+movl $77934, %edi
+. 3511 0x12345678 6
+. BF 6E 30 01 00 C3
+
+movl $77935, %edi
+. 3512 0x12345678 6
+. BF 6F 30 01 00 C3
+
+movl $77937, %edi
+. 3513 0x12345678 6
+. BF 71 30 01 00 C3
+
+movl $77938, %edi
+. 3514 0x12345678 6
+. BF 72 30 01 00 C3
+
+movl $77939, %edi
+. 3515 0x12345678 6
+. BF 73 30 01 00 C3
+
+movl $77940, %edi
+. 3516 0x12345678 6
+. BF 74 30 01 00 C3
+
+movl $77941, %edi
+. 3517 0x12345678 6
+. BF 75 30 01 00 C3
+
+movl $77942, %edi
+. 3518 0x12345678 6
+. BF 76 30 01 00 C3
+
+movl $77943, %edi
+. 3519 0x12345678 6
+. BF 77 30 01 00 C3
+
+movl $77945, %edi
+. 3520 0x12345678 6
+. BF 79 30 01 00 C3
+
+movl $77949, %r13d
+. 3521 0x12345678 7
+. 41 BD 7D 30 01 00 C3
+
+movl $77950, %r13d
+. 3522 0x12345678 7
+. 41 BD 7E 30 01 00 C3
+
+movl $77951, %r13d
+. 3523 0x12345678 7
+. 41 BD 7F 30 01 00 C3
+
+movl $77952, %r13d
+. 3524 0x12345678 7
+. 41 BD 80 30 01 00 C3
+
+movl $77953, %r13d
+. 3525 0x12345678 7
+. 41 BD 81 30 01 00 C3
+
+movl $77954, %r13d
+. 3526 0x12345678 7
+. 41 BD 82 30 01 00 C3
+
+movl $77955, %r13d
+. 3527 0x12345678 7
+. 41 BD 83 30 01 00 C3
+
+movl $77956, %r13d
+. 3528 0x12345678 7
+. 41 BD 84 30 01 00 C3
+
+movl $77957, %r13d
+. 3529 0x12345678 7
+. 41 BD 85 30 01 00 C3
+
+movl $77958, %r13d
+. 3530 0x12345678 7
+. 41 BD 86 30 01 00 C3
+
+movl $77959, %r13d
+. 3531 0x12345678 7
+. 41 BD 87 30 01 00 C3
+
+movl $77960, %r13d
+. 3532 0x12345678 7
+. 41 BD 88 30 01 00 C3
+
+movl $77961, %r13d
+. 3533 0x12345678 7
+. 41 BD 89 30 01 00 C3
+
+movl $77962, %r13d
+. 3534 0x12345678 7
+. 41 BD 8A 30 01 00 C3
+
+movl $77963, %r13d
+. 3535 0x12345678 7
+. 41 BD 8B 30 01 00 C3
+
+movl $77964, %r13d
+. 3536 0x12345678 7
+. 41 BD 8C 30 01 00 C3
+
+movl $77965, %r13d
+. 3537 0x12345678 7
+. 41 BD 8D 30 01 00 C3
+
+movl $77966, %r13d
+. 3538 0x12345678 7
+. 41 BD 8E 30 01 00 C3
+
+movl $77967, %r13d
+. 3539 0x12345678 7
+. 41 BD 8F 30 01 00 C3
+
+movl $77968, %r13d
+. 3540 0x12345678 7
+. 41 BD 90 30 01 00 C3
+
+movl $77969, %r13d
+. 3541 0x12345678 7
+. 41 BD 91 30 01 00 C3
+
+movl $77970, %r13d
+. 3542 0x12345678 7
+. 41 BD 92 30 01 00 C3
+
+movl $77971, %r13d
+. 3543 0x12345678 7
+. 41 BD 93 30 01 00 C3
+
+movl $77972, %r13d
+. 3544 0x12345678 7
+. 41 BD 94 30 01 00 C3
+
+movl $77973, %r13d
+. 3545 0x12345678 7
+. 41 BD 95 30 01 00 C3
+
+movl $77974, %r13d
+. 3546 0x12345678 7
+. 41 BD 96 30 01 00 C3
+
+movl $77975, %r13d
+. 3547 0x12345678 7
+. 41 BD 97 30 01 00 C3
+
+movl $77976, %r13d
+. 3548 0x12345678 7
+. 41 BD 98 30 01 00 C3
+
+movl $77977, %r13d
+. 3549 0x12345678 7
+. 41 BD 99 30 01 00 C3
+
+movl $77978, %ecx
+. 3550 0x12345678 6
+. B9 9A 30 01 00 C3
+
+movl $77978, %edx
+. 3551 0x12345678 6
+. BA 9A 30 01 00 C3
+
+movl $77979, %ecx
+. 3552 0x12345678 6
+. B9 9B 30 01 00 C3
+
+movl $77979, %edx
+. 3553 0x12345678 6
+. BA 9B 30 01 00 C3
+
+movl $77980, %ecx
+. 3554 0x12345678 6
+. B9 9C 30 01 00 C3
+
+movl $77980, %edx
+. 3555 0x12345678 6
+. BA 9C 30 01 00 C3
+
+movl $77981, %ecx
+. 3556 0x12345678 6
+. B9 9D 30 01 00 C3
+
+movl $77981, %edx
+. 3557 0x12345678 6
+. BA 9D 30 01 00 C3
+
+movl $77982, %ecx
+. 3558 0x12345678 6
+. B9 9E 30 01 00 C3
+
+movl $77982, %edx
+. 3559 0x12345678 6
+. BA 9E 30 01 00 C3
+
+movl $77983, %ecx
+. 3560 0x12345678 6
+. B9 9F 30 01 00 C3
+
+movl $77983, %edx
+. 3561 0x12345678 6
+. BA 9F 30 01 00 C3
+
+movl $77984, %r13d
+. 3562 0x12345678 7
+. 41 BD A0 30 01 00 C3
+
+movl $77985, %r13d
+. 3563 0x12345678 7
+. 41 BD A1 30 01 00 C3
+
+movl $77986, %r13d
+. 3564 0x12345678 7
+. 41 BD A2 30 01 00 C3
+
+movl $77987, %r13d
+. 3565 0x12345678 7
+. 41 BD A3 30 01 00 C3
+
+movl $77988, %r13d
+. 3566 0x12345678 7
+. 41 BD A4 30 01 00 C3
+
+movl $77989, %r13d
+. 3567 0x12345678 7
+. 41 BD A5 30 01 00 C3
+
+movl $77990, %r13d
+. 3568 0x12345678 7
+. 41 BD A6 30 01 00 C3
+
+movl $77991, %r13d
+. 3569 0x12345678 7
+. 41 BD A7 30 01 00 C3
+
+movl $77992, %r13d
+. 3570 0x12345678 7
+. 41 BD A8 30 01 00 C3
+
+movl $77993, %ecx
+. 3571 0x12345678 6
+. B9 A9 30 01 00 C3
+
+movl $77994, %ecx
+. 3572 0x12345678 6
+. B9 AA 30 01 00 C3
+
+movl $77995, %ecx
+. 3573 0x12345678 6
+. B9 AB 30 01 00 C3
+
+movl $77996, %ecx
+. 3574 0x12345678 6
+. B9 AC 30 01 00 C3
+
+movl $77997, %ecx
+. 3575 0x12345678 6
+. B9 AD 30 01 00 C3
+
+movl $77998, %ecx
+. 3576 0x12345678 6
+. B9 AE 30 01 00 C3
+
+movl $77999, (%rsi)
+. 3577 0x12345678 7
+. C7 06 AF 30 01 00 C3
+
+movl $779, %edx
+. 3578 0x12345678 6
+. BA 0B 03 00 00 C3
+
+movl $78000, (%rsi)
+. 3579 0x12345678 7
+. C7 06 B0 30 01 00 C3
+
+movl $78001, (%rsi)
+. 3580 0x12345678 7
+. C7 06 B1 30 01 00 C3
+
+movl $78002, (%rsi)
+. 3581 0x12345678 7
+. C7 06 B2 30 01 00 C3
+
+movl $78003, %ecx
+. 3582 0x12345678 6
+. B9 B3 30 01 00 C3
+
+movl $78004, %ecx
+. 3583 0x12345678 6
+. B9 B4 30 01 00 C3
+
+movl $78005, %ecx
+. 3584 0x12345678 6
+. B9 B5 30 01 00 C3
+
+movl $78006, %ecx
+. 3585 0x12345678 6
+. B9 B6 30 01 00 C3
+
+movl $78007, %ecx
+. 3586 0x12345678 6
+. B9 B7 30 01 00 C3
+
+movl $78008, %ecx
+. 3587 0x12345678 6
+. B9 B8 30 01 00 C3
+
+movl $78009, %ecx
+. 3588 0x12345678 6
+. B9 B9 30 01 00 C3
+
+movl $78010, %ecx
+. 3589 0x12345678 6
+. B9 BA 30 01 00 C3
+
+movl $78011, %ecx
+. 3590 0x12345678 6
+. B9 BB 30 01 00 C3
+
+movl $78012, (%rsi)
+. 3591 0x12345678 7
+. C7 06 BC 30 01 00 C3
+
+movl $78013, (%rsi)
+. 3592 0x12345678 7
+. C7 06 BD 30 01 00 C3
+
+movl $78014, (%rsi)
+. 3593 0x12345678 7
+. C7 06 BE 30 01 00 C3
+
+movl $78015, (%rsi)
+. 3594 0x12345678 7
+. C7 06 BF 30 01 00 C3
+
+movl $78016, %ecx
+. 3595 0x12345678 6
+. B9 C0 30 01 00 C3
+
+movl $78017, %ecx
+. 3596 0x12345678 6
+. B9 C1 30 01 00 C3
+
+movl $78018, %ecx
+. 3597 0x12345678 6
+. B9 C2 30 01 00 C3
+
+movl $78019, %ecx
+. 3598 0x12345678 6
+. B9 C3 30 01 00 C3
+
+movl $78020, %ecx
+. 3599 0x12345678 6
+. B9 C4 30 01 00 C3
+
+movl $78021, %ecx
+. 3600 0x12345678 6
+. B9 C5 30 01 00 C3
+
+movl $78022, %ecx
+. 3601 0x12345678 6
+. B9 C6 30 01 00 C3
+
+movl $78023, %ecx
+. 3602 0x12345678 6
+. B9 C7 30 01 00 C3
+
+movl $78024, %ecx
+. 3603 0x12345678 6
+. B9 C8 30 01 00 C3
+
+movl $78025, (%rsi)
+. 3604 0x12345678 7
+. C7 06 C9 30 01 00 C3
+
+movl $78026, (%rsi)
+. 3605 0x12345678 7
+. C7 06 CA 30 01 00 C3
+
+movl $78027, (%rsi)
+. 3606 0x12345678 7
+. C7 06 CB 30 01 00 C3
+
+movl $78028, (%rsi)
+. 3607 0x12345678 7
+. C7 06 CC 30 01 00 C3
+
+movl $78030, %ecx
+. 3608 0x12345678 6
+. B9 CE 30 01 00 C3
+
+movl $78032, %ecx
+. 3609 0x12345678 6
+. B9 D0 30 01 00 C3
+
+movl $78033, %ecx
+. 3610 0x12345678 6
+. B9 D1 30 01 00 C3
+
+movl $78034, %ecx
+. 3611 0x12345678 6
+. B9 D2 30 01 00 C3
+
+movl $78035, %ecx
+. 3612 0x12345678 6
+. B9 D3 30 01 00 C3
+
+movl $78036, %ecx
+. 3613 0x12345678 6
+. B9 D4 30 01 00 C3
+
+movl $78037, %ecx
+. 3614 0x12345678 6
+. B9 D5 30 01 00 C3
+
+movl $78038, (%rsi)
+. 3615 0x12345678 7
+. C7 06 D6 30 01 00 C3
+
+movl $78039, (%rsi)
+. 3616 0x12345678 7
+. C7 06 D7 30 01 00 C3
+
+movl $78040, (%rsi)
+. 3617 0x12345678 7
+. C7 06 D8 30 01 00 C3
+
+movl $78041, (%rsi)
+. 3618 0x12345678 7
+. C7 06 D9 30 01 00 C3
+
+movl $78043, %edi
+. 3619 0x12345678 6
+. BF DB 30 01 00 C3
+
+movl $78045, %edi
+. 3620 0x12345678 6
+. BF DD 30 01 00 C3
+
+movl $78046, %edi
+. 3621 0x12345678 6
+. BF DE 30 01 00 C3
+
+movl $78047, %edi
+. 3622 0x12345678 6
+. BF DF 30 01 00 C3
+
+movl $78048, %edi
+. 3623 0x12345678 6
+. BF E0 30 01 00 C3
+
+movl $78049, %edi
+. 3624 0x12345678 6
+. BF E1 30 01 00 C3
+
+movl $78050, %edi
+. 3625 0x12345678 6
+. BF E2 30 01 00 C3
+
+movl $78052, %edi
+. 3626 0x12345678 6
+. BF E4 30 01 00 C3
+
+movl $78053, %edi
+. 3627 0x12345678 6
+. BF E5 30 01 00 C3
+
+movl $78054, %ecx
+. 3628 0x12345678 6
+. B9 E6 30 01 00 C3
+
+movl $78055, %ecx
+. 3629 0x12345678 6
+. B9 E7 30 01 00 C3
+
+movl $78056, %ecx
+. 3630 0x12345678 6
+. B9 E8 30 01 00 C3
+
+movl $78056, %edi
+. 3631 0x12345678 6
+. BF E8 30 01 00 C3
+
+movl $78061, %ecx
+. 3632 0x12345678 6
+. B9 ED 30 01 00 C3
+
+movl $78062, %ecx
+. 3633 0x12345678 6
+. B9 EE 30 01 00 C3
+
+movl $78063, %ecx
+. 3634 0x12345678 6
+. B9 EF 30 01 00 C3
+
+movl $78064, %ecx
+. 3635 0x12345678 6
+. B9 F0 30 01 00 C3
+
+movl $78065, %ecx
+. 3636 0x12345678 6
+. B9 F1 30 01 00 C3
+
+movl $78066, %ecx
+. 3637 0x12345678 6
+. B9 F2 30 01 00 C3
+
+movl $78067, %ecx
+. 3638 0x12345678 6
+. B9 F3 30 01 00 C3
+
+movl $78068, %ecx
+. 3639 0x12345678 6
+. B9 F4 30 01 00 C3
+
+movl $78069, %ecx
+. 3640 0x12345678 6
+. B9 F5 30 01 00 C3
+
+movl $78070, %ecx
+. 3641 0x12345678 6
+. B9 F6 30 01 00 C3
+
+movl $78071, %ecx
+. 3642 0x12345678 6
+. B9 F7 30 01 00 C3
+
+movl $78072, %ecx
+. 3643 0x12345678 6
+. B9 F8 30 01 00 C3
+
+movl $78073, %ecx
+. 3644 0x12345678 6
+. B9 F9 30 01 00 C3
+
+movl $78074, %ecx
+. 3645 0x12345678 6
+. B9 FA 30 01 00 C3
+
+movl $78075, %ecx
+. 3646 0x12345678 6
+. B9 FB 30 01 00 C3
+
+movl $78076, %ecx
+. 3647 0x12345678 6
+. B9 FC 30 01 00 C3
+
+movl $78077, %ecx
+. 3648 0x12345678 6
+. B9 FD 30 01 00 C3
+
+movl $78078, %ecx
+. 3649 0x12345678 6
+. B9 FE 30 01 00 C3
+
+movl $78079, %ecx
+. 3650 0x12345678 6
+. B9 FF 30 01 00 C3
+
+movl $78080, %ecx
+. 3651 0x12345678 6
+. B9 00 31 01 00 C3
+
+movl $78081, %ecx
+. 3652 0x12345678 6
+. B9 01 31 01 00 C3
+
+movl $78082, %ecx
+. 3653 0x12345678 6
+. B9 02 31 01 00 C3
+
+movl $78083, %ecx
+. 3654 0x12345678 6
+. B9 03 31 01 00 C3
+
+movl $78084, %ecx
+. 3655 0x12345678 6
+. B9 04 31 01 00 C3
+
+movl $78085, %ecx
+. 3656 0x12345678 6
+. B9 05 31 01 00 C3
+
+movl $78086, %ecx
+. 3657 0x12345678 6
+. B9 06 31 01 00 C3
+
+movl $78087, %ecx
+. 3658 0x12345678 6
+. B9 07 31 01 00 C3
+
+movl $78088, %ecx
+. 3659 0x12345678 6
+. B9 08 31 01 00 C3
+
+movl $78089, %ecx
+. 3660 0x12345678 6
+. B9 09 31 01 00 C3
+
+movl $78090, %ecx
+. 3661 0x12345678 6
+. B9 0A 31 01 00 C3
+
+movl $78091, %ecx
+. 3662 0x12345678 6
+. B9 0B 31 01 00 C3
+
+movl $78092, %ecx
+. 3663 0x12345678 6
+. B9 0C 31 01 00 C3
+
+movl $78092, %edx
+. 3664 0x12345678 6
+. BA 0C 31 01 00 C3
+
+movl $78093, %ecx
+. 3665 0x12345678 6
+. B9 0D 31 01 00 C3
+
+movl $78093, %edx
+. 3666 0x12345678 6
+. BA 0D 31 01 00 C3
+
+movl $78094, %ecx
+. 3667 0x12345678 6
+. B9 0E 31 01 00 C3
+
+movl $78094, %edx
+. 3668 0x12345678 6
+. BA 0E 31 01 00 C3
+
+movl $78095, %ecx
+. 3669 0x12345678 6
+. B9 0F 31 01 00 C3
+
+movl $78095, %edx
+. 3670 0x12345678 6
+. BA 0F 31 01 00 C3
+
+movl $78096, %ecx
+. 3671 0x12345678 6
+. B9 10 31 01 00 C3
+
+movl $78096, %edx
+. 3672 0x12345678 6
+. BA 10 31 01 00 C3
+
+movl $78097, %ecx
+. 3673 0x12345678 6
+. B9 11 31 01 00 C3
+
+movl $78097, %edx
+. 3674 0x12345678 6
+. BA 11 31 01 00 C3
+
+movl $78098, %ecx
+. 3675 0x12345678 6
+. B9 12 31 01 00 C3
+
+movl $78098, %edx
+. 3676 0x12345678 6
+. BA 12 31 01 00 C3
+
+movl $78099, %ecx
+. 3677 0x12345678 6
+. B9 13 31 01 00 C3
+
+movl $78099, %edx
+. 3678 0x12345678 6
+. BA 13 31 01 00 C3
+
+movl $780, %edx
+. 3679 0x12345678 6
+. BA 0C 03 00 00 C3
+
+movl $78100, %ecx
+. 3680 0x12345678 6
+. B9 14 31 01 00 C3
+
+movl $78101, %ecx
+. 3681 0x12345678 6
+. B9 15 31 01 00 C3
+
+movl $78102, %ecx
+. 3682 0x12345678 6
+. B9 16 31 01 00 C3
+
+movl $78103, %ecx
+. 3683 0x12345678 6
+. B9 17 31 01 00 C3
+
+movl $78104, %ecx
+. 3684 0x12345678 6
+. B9 18 31 01 00 C3
+
+movl $78105, %ecx
+. 3685 0x12345678 6
+. B9 19 31 01 00 C3
+
+movl $78106, %ecx
+. 3686 0x12345678 6
+. B9 1A 31 01 00 C3
+
+movl $78107, %ecx
+. 3687 0x12345678 6
+. B9 1B 31 01 00 C3
+
+movl $78108, %ecx
+. 3688 0x12345678 6
+. B9 1C 31 01 00 C3
+
+movl $78109, %ecx
+. 3689 0x12345678 6
+. B9 1D 31 01 00 C3
+
+movl $78110, %ecx
+. 3690 0x12345678 6
+. B9 1E 31 01 00 C3
+
+movl $7816, %edx
+. 3691 0x12345678 6
+. BA 88 1E 00 00 C3
+
+movl $781, %edx
+. 3692 0x12345678 6
+. BA 0D 03 00 00 C3
+
+movl $782, %edx
+. 3693 0x12345678 6
+. BA 0E 03 00 00 C3
+
+movl $788, %edx
+. 3694 0x12345678 6
+. BA 14 03 00 00 C3
+
+movl $7902, %edx
+. 3695 0x12345678 6
+. BA DE 1E 00 00 C3
+
+movl $7912, %edx
+. 3696 0x12345678 6
+. BA E8 1E 00 00 C3
+
+movl $7922, %edx
+. 3697 0x12345678 6
+. BA F2 1E 00 00 C3
+
+movl $794, %edx
+. 3698 0x12345678 6
+. BA 1A 03 00 00 C3
+
+movl $7970, %edx
+. 3699 0x12345678 6
+. BA 22 1F 00 00 C3
+
+movl $7978, %edx
+. 3700 0x12345678 6
+. BA 2A 1F 00 00 C3
+
+movl $797, %edx
+. 3701 0x12345678 6
+. BA 1D 03 00 00 C3
+
+movl $7986, %edx
+. 3702 0x12345678 6
+. BA 32 1F 00 00 C3
+
+movl $7994, %edx
+. 3703 0x12345678 6
+. BA 3A 1F 00 00 C3
+
+movl $7, %eax
+. 3704 0x12345678 6
+. B8 07 00 00 00 C3
+
+movl $7, %edi
+. 3705 0x12345678 6
+. BF 07 00 00 00 C3
+
+movl $7, %esi
+. 3706 0x12345678 6
+. BE 07 00 00 00 C3
+
+movl $7, (%rax)
+. 3707 0x12345678 7
+. C7 00 07 00 00 00 C3
+
+movl $8003, %edx
+. 3708 0x12345678 6
+. BA 43 1F 00 00 C3
+
+movl $800, %edx
+. 3709 0x12345678 6
+. BA 20 03 00 00 C3
+
+movl 800(%rsp), %edi
+. 3710 0x12345678 8
+. 8B BC 24 20 03 00 00 C3
+
+movl $801, %edx
+. 3711 0x12345678 6
+. BA 21 03 00 00 C3
+
+movl $8067, %edx
+. 3712 0x12345678 6
+. BA 83 1F 00 00 C3
+
+movl $8077, %edx
+. 3713 0x12345678 6
+. BA 8D 1F 00 00 C3
+
+movl $8078, %edx
+. 3714 0x12345678 6
+. BA 8E 1F 00 00 C3
+
+movl $80, %eax
+. 3715 0x12345678 6
+. B8 50 00 00 00 C3
+
+movl $80, %edi
+. 3716 0x12345678 6
+. BF 50 00 00 00 C3
+
+movl 80(%rsp), %edi
+. 3717 0x12345678 5
+. 8B 7C 24 50 C3
+
+movl 80(%rsp), %esi
+. 3718 0x12345678 5
+. 8B 74 24 50 C3
+
+movl $8106, %edx
+. 3719 0x12345678 6
+. BA AA 1F 00 00 C3
+
+movl $8, 16(%rbx)
+. 3720 0x12345678 8
+. C7 43 10 08 00 00 00 C3
+
+movl $8180, %edx
+. 3721 0x12345678 6
+. BA F4 1F 00 00 C3
+
+movl $81920, 32(%rax)
+. 3722 0x12345678 8
+. C7 40 20 00 40 01 00 C3
+
+movl $81920, 32(%rbx)
+. 3723 0x12345678 8
+. C7 43 20 00 40 01 00 C3
+
+movl $81920, %edi
+. 3724 0x12345678 6
+. BF 00 40 01 00 C3
+
+movl $81920, %esi
+. 3725 0x12345678 6
+. BE 00 40 01 00 C3
+
+movl $81921, %edi
+. 3726 0x12345678 6
+. BF 01 40 01 00 C3
+
+movl $81922, 32(%rax)
+. 3727 0x12345678 8
+. C7 40 20 02 40 01 00 C3
+
+movl $81922, %edi
+. 3728 0x12345678 6
+. BF 02 40 01 00 C3
+
+movl $81923, 32(%rax)
+. 3729 0x12345678 8
+. C7 40 20 03 40 01 00 C3
+
+movl $81923, %edi
+. 3730 0x12345678 6
+. BF 03 40 01 00 C3
+
+movl $81924, %edi
+. 3731 0x12345678 6
+. BF 04 40 01 00 C3
+
+movl $81926, %esi
+. 3732 0x12345678 6
+. BE 06 40 01 00 C3
+
+movl $81927, %edi
+. 3733 0x12345678 6
+. BF 07 40 01 00 C3
+
+movl $81928, %esi
+. 3734 0x12345678 6
+. BE 08 40 01 00 C3
+
+movl $8205, %edx
+. 3735 0x12345678 6
+. BA 0D 20 00 00 C3
+
+movl $8246, %edx
+. 3736 0x12345678 6
+. BA 36 20 00 00 C3
+
+movl $8, 24(%rax)
+. 3737 0x12345678 8
+. C7 40 18 08 00 00 00 C3
+
+movl $826343424, %edi
+. 3738 0x12345678 6
+. BF 00 00 41 31 C3
+
+movl $826343424, %r14d
+. 3739 0x12345678 7
+. 41 BE 00 00 41 31 C3
+
+movl $8, 28(%rsp)
+. 3740 0x12345678 9
+. C7 44 24 1C 08 00 00 00 C3
+
+movl $8317, %edx
+. 3741 0x12345678 6
+. BA 7D 20 00 00 C3
+
+movl $836, %edx
+. 3742 0x12345678 6
+. BA 44 03 00 00 C3
+
+movl $837, %edx
+. 3743 0x12345678 6
+. BA 45 03 00 00 C3
+
+movl $838, %edx
+. 3744 0x12345678 6
+. BA 46 03 00 00 C3
+
+movl $839, %edx
+. 3745 0x12345678 6
+. BA 47 03 00 00 C3
+
+movl $841, %edx
+. 3746 0x12345678 6
+. BA 49 03 00 00 C3
+
+movl $842, %edx
+. 3747 0x12345678 6
+. BA 4A 03 00 00 C3
+
+movl $84, %eax
+. 3748 0x12345678 6
+. B8 54 00 00 00 C3
+
+movl 84(%rsp), %edi
+. 3749 0x12345678 5
+. 8B 7C 24 54 C3
+
+movl $8565, %edx
+. 3750 0x12345678 6
+. BA 75 21 00 00 C3
+
+movl $8600, %edx
+. 3751 0x12345678 6
+. BA 98 21 00 00 C3
+
+movl $86016, 28(%rax)
+. 3752 0x12345678 8
+. C7 40 1C 00 50 01 00 C3
+
+movl $86017, 100(%rbx)
+. 3753 0x12345678 8
+. C7 43 64 01 50 01 00 C3
+
+movl $86017, 112(%rbx)
+. 3754 0x12345678 8
+. C7 43 70 01 50 01 00 C3
+
+movl $86017, 124(%rbx)
+. 3755 0x12345678 8
+. C7 43 7C 01 50 01 00 C3
+
+movl $86017, 28(%rax)
+. 3756 0x12345678 8
+. C7 40 1C 01 50 01 00 C3
+
+movl $86017, 52(%rbx)
+. 3757 0x12345678 8
+. C7 43 34 01 50 01 00 C3
+
+movl $86017, 64(%rbx)
+. 3758 0x12345678 8
+. C7 43 40 01 50 01 00 C3
+
+movl $86017, 76(%rbx)
+. 3759 0x12345678 8
+. C7 43 4C 01 50 01 00 C3
+
+movl $86017, 88(%rbx)
+. 3760 0x12345678 8
+. C7 43 58 01 50 01 00 C3
+
+movl $86018, 100(%rax)
+. 3761 0x12345678 8
+. C7 40 64 02 50 01 00 C3
+
+movl $86018, 100(%rbx)
+. 3762 0x12345678 8
+. C7 43 64 02 50 01 00 C3
+
+movl $86018, 28(%rax)
+. 3763 0x12345678 8
+. C7 40 1C 02 50 01 00 C3
+
+movl $86018, 52(%rax)
+. 3764 0x12345678 8
+. C7 40 34 02 50 01 00 C3
+
+movl $86018, 52(%rbx)
+. 3765 0x12345678 8
+. C7 43 34 02 50 01 00 C3
+
+movl $86018, 64(%rax)
+. 3766 0x12345678 8
+. C7 40 40 02 50 01 00 C3
+
+movl $86018, 64(%rbx)
+. 3767 0x12345678 8
+. C7 43 40 02 50 01 00 C3
+
+movl $86018, 76(%rax)
+. 3768 0x12345678 8
+. C7 40 4C 02 50 01 00 C3
+
+movl $86018, 76(%rbx)
+. 3769 0x12345678 8
+. C7 43 4C 02 50 01 00 C3
+
+movl $86018, 88(%rax)
+. 3770 0x12345678 8
+. C7 40 58 02 50 01 00 C3
+
+movl $86018, 88(%rbx)
+. 3771 0x12345678 8
+. C7 43 58 02 50 01 00 C3
+
+movl $86019, 52(%rax)
+. 3772 0x12345678 8
+. C7 40 34 03 50 01 00 C3
+
+movl $8629, %edx
+. 3773 0x12345678 6
+. BA B5 21 00 00 C3
+
+movl $8656, %edx
+. 3774 0x12345678 6
+. BA D0 21 00 00 C3
+
+movl $8726, %edx
+. 3775 0x12345678 6
+. BA 16 22 00 00 C3
+
+movl $8, 72(%rbx)
+. 3776 0x12345678 8
+. C7 43 48 08 00 00 00 C3
+
+movl $8771, %edx
+. 3777 0x12345678 6
+. BA 43 22 00 00 C3
+
+movl $8781, %edx
+. 3778 0x12345678 6
+. BA 4D 22 00 00 C3
+
+movl $8798, %edx
+. 3779 0x12345678 6
+. BA 5E 22 00 00 C3
+
+movl $8811, %edx
+. 3780 0x12345678 6
+. BA 6B 22 00 00 C3
+
+movl $8, 84(%rax)
+. 3781 0x12345678 8
+. C7 40 54 08 00 00 00 C3
+
+movl $8, 84(%rbx)
+. 3782 0x12345678 8
+. C7 43 54 08 00 00 00 C3
+
+movl $88, 68(%rsp)
+. 3783 0x12345678 9
+. C7 44 24 44 58 00 00 00 C3
+
+movl $887, %edx
+. 3784 0x12345678 6
+. BA 77 03 00 00 C3
+
+movl $88, %eax
+. 3785 0x12345678 6
+. B8 58 00 00 00 C3
+
+movl $8, 8(%rbx)
+. 3786 0x12345678 8
+. C7 43 08 08 00 00 00 C3
+
+movl 88(%rsp), %edi
+. 3787 0x12345678 5
+. 8B 7C 24 58 C3
+
+movl $8920, %edx
+. 3788 0x12345678 6
+. BA D8 22 00 00 C3
+
+movl $892, %edx
+. 3789 0x12345678 6
+. BA 7C 03 00 00 C3
+
+movl $8, 92(%rax)
+. 3790 0x12345678 8
+. C7 40 5C 08 00 00 00 C3
+
+movl $8943, %edx
+. 3791 0x12345678 6
+. BA EF 22 00 00 C3
+
+movl $894, %edx
+. 3792 0x12345678 6
+. BA 7E 03 00 00 C3
+
+movl $895, %edx
+. 3793 0x12345678 6
+. BA 7F 03 00 00 C3
+
+movl $8962, %edx
+. 3794 0x12345678 6
+. BA 02 23 00 00 C3
+
+movl $896, %edx
+. 3795 0x12345678 6
+. BA 80 03 00 00 C3
+
+movl $897, %edx
+. 3796 0x12345678 6
+. BA 81 03 00 00 C3
+
+movl $8, %eax
+. 3797 0x12345678 6
+. B8 08 00 00 00 C3
+
+movl $8, %ebp
+. 3798 0x12345678 6
+. BD 08 00 00 00 C3
+
+movl $8, %edi
+. 3799 0x12345678 6
+. BF 08 00 00 00 C3
+
+movl $8, %edx
+. 3800 0x12345678 6
+. BA 08 00 00 00 C3
+
+movl $8, %esi
+. 3801 0x12345678 6
+. BE 08 00 00 00 C3
+
+movl 8(%r12), %edi
+. 3802 0x12345678 6
+. 41 8B 7C 24 08 C3
+
+movl 8(%r12), %edx
+. 3803 0x12345678 6
+. 41 8B 54 24 08 C3
+
+movl 8(%r13), %esi
+. 3804 0x12345678 5
+. 41 8B 75 08 C3
+
+movl 8(%r14), %edi
+. 3805 0x12345678 5
+. 41 8B 7E 08 C3
+
+movl $8, %r8d
+. 3806 0x12345678 7
+. 41 B8 08 00 00 00 C3
+
+movl $8, (%rax)
+. 3807 0x12345678 7
+. C7 00 08 00 00 00 C3
+
+movl 8(%rax), %eax
+. 3808 0x12345678 4
+. 8B 40 08 C3
+
+movl 8(%rax), %edi
+. 3809 0x12345678 4
+. 8B 78 08 C3
+
+movl 8(%rax), %edx
+. 3810 0x12345678 4
+. 8B 50 08 C3
+
+movl 8(%rax), %r10d
+. 3811 0x12345678 5
+. 44 8B 50 08 C3
+
+movl 8(%rax), %r11d
+. 3812 0x12345678 5
+. 44 8B 58 08 C3
+
+movl 8(%rax,%rcx), %edi
+. 3813 0x12345678 5
+. 8B 7C 08 08 C3
+
+movl 8(%rbp), %eax
+. 3814 0x12345678 4
+. 8B 45 08 C3
+
+movl 8(%rbp), %ecx
+. 3815 0x12345678 4
+. 8B 4D 08 C3
+
+movl 8(%rbp), %edi
+. 3816 0x12345678 4
+. 8B 7D 08 C3
+
+movl 8(%rbp), %esi
+. 3817 0x12345678 4
+. 8B 75 08 C3
+
+movl 8(%rbx), %eax
+. 3818 0x12345678 4
+. 8B 43 08 C3
+
+movl 8(%rbx), %edi
+. 3819 0x12345678 4
+. 8B 7B 08 C3
+
+movl 8(%rbx), %edx
+. 3820 0x12345678 4
+. 8B 53 08 C3
+
+movl 8(%rbx), %esi
+. 3821 0x12345678 4
+. 8B 73 08 C3
+
+movl 8(%rbx,%rax,4), %ecx
+. 3822 0x12345678 5
+. 8B 4C 83 08 C3
+
+movl 8(%rdi), %eax
+. 3823 0x12345678 4
+. 8B 47 08 C3
+
+movl 8(%rdi), %edi
+. 3824 0x12345678 4
+. 8B 7F 08 C3
+
+movl 8(%rdi), %edx
+. 3825 0x12345678 4
+. 8B 57 08 C3
+
+movl 8(%rdi), %esi
+. 3826 0x12345678 4
+. 8B 77 08 C3
+
+movl 8(%rdx), %eax
+. 3827 0x12345678 4
+. 8B 42 08 C3
+
+movl 8(%rdx), %edi
+. 3828 0x12345678 4
+. 8B 7A 08 C3
+
+movl 8(%rdx), %edx
+. 3829 0x12345678 4
+. 8B 52 08 C3
+
+movl 8(%rsi), %eax
+. 3830 0x12345678 4
+. 8B 46 08 C3
+
+movl 8(%rsi), %edi
+. 3831 0x12345678 4
+. 8B 7E 08 C3
+
+movl 8(%rsi,%rcx), %eax
+. 3832 0x12345678 5
+. 8B 44 0E 08 C3
+
+movl $8, (%rsp)
+. 3833 0x12345678 8
+. C7 04 24 08 00 00 00 C3
+
+movl 8(%rsp), %eax
+. 3834 0x12345678 5
+. 8B 44 24 08 C3
+
+movl 8(%rsp), %ecx
+. 3835 0x12345678 5
+. 8B 4C 24 08 C3
+
+movl 8(%rsp), %edi
+. 3836 0x12345678 5
+. 8B 7C 24 08 C3
+
+movl 8(%rsp), %edx
+. 3837 0x12345678 5
+. 8B 54 24 08 C3
+
+movl 8(%rsp), %esi
+. 3838 0x12345678 5
+. 8B 74 24 08 C3
+
+movl 8(%rsp), %r8d
+. 3839 0x12345678 6
+. 44 8B 44 24 08 C3
+
+movl $905, %edx
+. 3840 0x12345678 6
+. BA 89 03 00 00 C3
+
+movl $9088, %edx
+. 3841 0x12345678 6
+. BA 80 23 00 00 C3
+
+movl $9120, %edx
+. 3842 0x12345678 6
+. BA A0 23 00 00 C3
+
+movl $9142, %edx
+. 3843 0x12345678 6
+. BA B6 23 00 00 C3
+
+movl $9165, %edx
+. 3844 0x12345678 6
+. BA CD 23 00 00 C3
+
+movl $9188, %edx
+. 3845 0x12345678 6
+. BA E4 23 00 00 C3
+
+movl $9, 24(%rax)
+. 3846 0x12345678 8
+. C7 40 18 09 00 00 00 C3
+
+movl $9, 24(%rdx)
+. 3847 0x12345678 8
+. C7 42 18 09 00 00 00 C3
+
+movl $9257, %edx
+. 3848 0x12345678 6
+. BA 29 24 00 00 C3
+
+movl $9271, %edx
+. 3849 0x12345678 6
+. BA 37 24 00 00 C3
+
+movl $92, %eax
+. 3850 0x12345678 6
+. B8 5C 00 00 00 C3
+
+movl $92, %edx
+. 3851 0x12345678 6
+. BA 5C 00 00 00 C3
+
+movl 92(%rsp), %edi
+. 3852 0x12345678 5
+. 8B 7C 24 5C C3
+
+movl 92(%rsp), %edx
+. 3853 0x12345678 5
+. 8B 54 24 5C C3
+
+movl 92(%rsp), %esi
+. 3854 0x12345678 5
+. 8B 74 24 5C C3
+
+movl $93, %edx
+. 3855 0x12345678 6
+. BA 5D 00 00 00 C3
+
+movl $94208, %esi
+. 3856 0x12345678 6
+. BE 00 70 01 00 C3
+
+movl $94209, %esi
+. 3857 0x12345678 6
+. BE 01 70 01 00 C3
+
+movl $94210, %esi
+. 3858 0x12345678 6
+. BE 02 70 01 00 C3
+
+movl $9500, %edx
+. 3859 0x12345678 6
+. BA 1C 25 00 00 C3
+
+movl $957, %edx
+. 3860 0x12345678 6
+. BA BD 03 00 00 C3
+
+movl $96, %eax
+. 3861 0x12345678 6
+. B8 60 00 00 00 C3
+
+movl 96(%rsp), %edi
+. 3862 0x12345678 5
+. 8B 7C 24 60 C3
+
+movl 96(%rsp), %esi
+. 3863 0x12345678 5
+. 8B 74 24 60 C3
+
+movl $978, %edx
+. 3864 0x12345678 6
+. BA D2 03 00 00 C3
+
+movl $979, %edx
+. 3865 0x12345678 6
+. BA D3 03 00 00 C3
+
+movl $9836, %edx
+. 3866 0x12345678 6
+. BA 6C 26 00 00 C3
+
+movl $9847, %edx
+. 3867 0x12345678 6
+. BA 77 26 00 00 C3
+
+movl $9862, %edx
+. 3868 0x12345678 6
+. BA 86 26 00 00 C3
+
+movl $988, %edx
+. 3869 0x12345678 6
+. BA DC 03 00 00 C3
+
+movl $98, %eax
+. 3870 0x12345678 6
+. B8 62 00 00 00 C3
+
+movl $9963, %edx
+. 3871 0x12345678 6
+. BA EB 26 00 00 C3
+
+movl $9974, %edx
+. 3872 0x12345678 6
+. BA F6 26 00 00 C3
+
+movl $9989, %edx
+. 3873 0x12345678 6
+. BA 05 27 00 00 C3
+
+movl $9, %eax
+. 3874 0x12345678 6
+. B8 09 00 00 00 C3
+
+movl $9, %r13d
+. 3875 0x12345678 7
+. 41 BD 09 00 00 00 C3
+
+movl $9, %r14d
+. 3876 0x12345678 7
+. 41 BE 09 00 00 00 C3
+
+movl $9, (%rax)
+. 3877 0x12345678 7
+. C7 00 09 00 00 00 C3
+
+movl %eax, 100(%rsp)
+. 3878 0x12345678 5
+. 89 44 24 64 C3
+
+movl %eax, 104(%rsp)
+. 3879 0x12345678 5
+. 89 44 24 68 C3
+
+movl %eax, 108(%rsp)
+. 3880 0x12345678 5
+. 89 44 24 6C C3
+
+movl %eax, 112(%rsp)
+. 3881 0x12345678 5
+. 89 44 24 70 C3
+
+movl %eax, 116(%rsp)
+. 3882 0x12345678 5
+. 89 44 24 74 C3
+
+movl %eax, 124(%rsp)
+. 3883 0x12345678 5
+. 89 44 24 7C C3
+
+movl %eax, 128(%rsp)
+. 3884 0x12345678 8
+. 89 84 24 80 00 00 00 C3
+
+movl %eax, 12(%rbp)
+. 3885 0x12345678 4
+. 89 45 0C C3
+
+movl %eax, 12(%rbx)
+. 3886 0x12345678 4
+. 89 43 0C C3
+
+movl %eax, 12(%rcx)
+. 3887 0x12345678 4
+. 89 41 0C C3
+
+movl %eax, 12(%rsi)
+. 3888 0x12345678 4
+. 89 46 0C C3
+
+movl %eax, 12(%rsp)
+. 3889 0x12345678 5
+. 89 44 24 0C C3
+
+movl %eax, 132(%rsp)
+. 3890 0x12345678 8
+. 89 84 24 84 00 00 00 C3
+
+movl %eax, 136(%rsp)
+. 3891 0x12345678 8
+. 89 84 24 88 00 00 00 C3
+
+movl %eax, 140(%rsp)
+. 3892 0x12345678 8
+. 89 84 24 8C 00 00 00 C3
+
+movl %eax, 144(%rsp)
+. 3893 0x12345678 8
+. 89 84 24 90 00 00 00 C3
+
+movl %eax, 148(%rsp)
+. 3894 0x12345678 8
+. 89 84 24 94 00 00 00 C3
+
+movl %eax, 152(%rsp)
+. 3895 0x12345678 8
+. 89 84 24 98 00 00 00 C3
+
+movl %eax, 156(%rsp)
+. 3896 0x12345678 8
+. 89 84 24 9C 00 00 00 C3
+
+movl %eax, 160(%rsp)
+. 3897 0x12345678 8
+. 89 84 24 A0 00 00 00 C3
+
+movl %eax, 164(%rsp)
+. 3898 0x12345678 8
+. 89 84 24 A4 00 00 00 C3
+
+movl %eax, 16(%r12)
+. 3899 0x12345678 6
+. 41 89 44 24 10 C3
+
+movl %eax, 16(%rbx)
+. 3900 0x12345678 4
+. 89 43 10 C3
+
+movl %eax, 16(%rsi)
+. 3901 0x12345678 4
+. 89 46 10 C3
+
+movl %eax, 16(%rsp)
+. 3902 0x12345678 5
+. 89 44 24 10 C3
+
+movl %eax, 172(%rsp)
+. 3903 0x12345678 8
+. 89 84 24 AC 00 00 00 C3
+
+movl %eax, 176(%rsp)
+. 3904 0x12345678 8
+. 89 84 24 B0 00 00 00 C3
+
+movl %eax, 184(%rsp)
+. 3905 0x12345678 8
+. 89 84 24 B8 00 00 00 C3
+
+movl %eax, 188(%rsp)
+. 3906 0x12345678 8
+. 89 84 24 BC 00 00 00 C3
+
+movl %eax, 192(%rsp)
+. 3907 0x12345678 8
+. 89 84 24 C0 00 00 00 C3
+
+movl %eax, 196(%rsp)
+. 3908 0x12345678 8
+. 89 84 24 C4 00 00 00 C3
+
+movl %eax, 200(%rsp)
+. 3909 0x12345678 8
+. 89 84 24 C8 00 00 00 C3
+
+movl %eax, 204(%rsp)
+. 3910 0x12345678 8
+. 89 84 24 CC 00 00 00 C3
+
+movl %eax, 208(%rsp)
+. 3911 0x12345678 8
+. 89 84 24 D0 00 00 00 C3
+
+movl %eax, 20(%r12)
+. 3912 0x12345678 6
+. 41 89 44 24 14 C3
+
+movl %eax, 20(%r13)
+. 3913 0x12345678 5
+. 41 89 45 14 C3
+
+movl %eax, 20(%rbx)
+. 3914 0x12345678 4
+. 89 43 14 C3
+
+movl %eax, 20(%rsp)
+. 3915 0x12345678 5
+. 89 44 24 14 C3
+
+movl %eax, 216(%rsp)
+. 3916 0x12345678 8
+. 89 84 24 D8 00 00 00 C3
+
+movl %eax, 228(%rsp)
+. 3917 0x12345678 8
+. 89 84 24 E4 00 00 00 C3
+
+movl %eax, 232(%rsp)
+. 3918 0x12345678 8
+. 89 84 24 E8 00 00 00 C3
+
+movl %eax, 240(%rsp)
+. 3919 0x12345678 8
+. 89 84 24 F0 00 00 00 C3
+
+movl %eax, 244(%rsp)
+. 3920 0x12345678 8
+. 89 84 24 F4 00 00 00 C3
+
+movl %eax, 248(%rsp)
+. 3921 0x12345678 8
+. 89 84 24 F8 00 00 00 C3
+
+movl %eax, 24(%r13)
+. 3922 0x12345678 5
+. 41 89 45 18 C3
+
+movl %eax, 24(%rbx)
+. 3923 0x12345678 4
+. 89 43 18 C3
+
+movl %eax, 24(%rsp)
+. 3924 0x12345678 5
+. 89 44 24 18 C3
+
+movl %eax, 252(%rsp)
+. 3925 0x12345678 8
+. 89 84 24 FC 00 00 00 C3
+
+movl %eax, 272(%rsp)
+. 3926 0x12345678 8
+. 89 84 24 10 01 00 00 C3
+
+movl %eax, 276(%rsp)
+. 3927 0x12345678 8
+. 89 84 24 14 01 00 00 C3
+
+movl %eax, 284(%rsp)
+. 3928 0x12345678 8
+. 89 84 24 1C 01 00 00 C3
+
+movl %eax, 288(%rsp,%rdx,4)
+. 3929 0x12345678 8
+. 89 84 94 20 01 00 00 C3
+
+movl %eax, 28(%rbx)
+. 3930 0x12345678 4
+. 89 43 1C C3
+
+movl %eax, 28(%rsp)
+. 3931 0x12345678 5
+. 89 44 24 1C C3
+
+movl %eax, 32(%r12)
+. 3932 0x12345678 6
+. 41 89 44 24 20 C3
+
+movl %eax, 32(%r13)
+. 3933 0x12345678 5
+. 41 89 45 20 C3
+
+movl %eax, 32(%rbx)
+. 3934 0x12345678 4
+. 89 43 20 C3
+
+movl %eax, 32(%rdi)
+. 3935 0x12345678 4
+. 89 47 20 C3
+
+movl %eax, 32(%rdx)
+. 3936 0x12345678 4
+. 89 42 20 C3
+
+movl %eax, 32(%rsi)
+. 3937 0x12345678 4
+. 89 46 20 C3
+
+movl %eax, 32(%rsp)
+. 3938 0x12345678 5
+. 89 44 24 20 C3
+
+movl %eax, 36(%rbx)
+. 3939 0x12345678 4
+. 89 43 24 C3
+
+movl %eax, 36(%rsp)
+. 3940 0x12345678 5
+. 89 44 24 24 C3
+
+movl %eax, 40(%rbx)
+. 3941 0x12345678 4
+. 89 43 28 C3
+
+movl %eax, 40(%rsp)
+. 3942 0x12345678 5
+. 89 44 24 28 C3
+
+movl %eax, 44(%rbx)
+. 3943 0x12345678 4
+. 89 43 2C C3
+
+movl %eax, 44(%rsp)
+. 3944 0x12345678 5
+. 89 44 24 2C C3
+
+movl %eax, 48(%rbx)
+. 3945 0x12345678 4
+. 89 43 30 C3
+
+movl %eax, 48(%rsp)
+. 3946 0x12345678 5
+. 89 44 24 30 C3
+
+movl %eax, 4(%rbx)
+. 3947 0x12345678 4
+. 89 43 04 C3
+
+movl %eax, 4(%rsp)
+. 3948 0x12345678 5
+. 89 44 24 04 C3
+
+movl %eax, 52(%rbx)
+. 3949 0x12345678 4
+. 89 43 34 C3
+
+movl %eax, 52(%rsp)
+. 3950 0x12345678 5
+. 89 44 24 34 C3
+
+movl %eax, 540(%rsp)
+. 3951 0x12345678 8
+. 89 84 24 1C 02 00 00 C3
+
+movl %eax, 544(%rsp)
+. 3952 0x12345678 8
+. 89 84 24 20 02 00 00 C3
+
+movl %eax, 548(%rsp)
+. 3953 0x12345678 8
+. 89 84 24 24 02 00 00 C3
+
+movl %eax, 552(%rsp)
+. 3954 0x12345678 8
+. 89 84 24 28 02 00 00 C3
+
+movl %eax, 56(%rbx)
+. 3955 0x12345678 4
+. 89 43 38 C3
+
+movl %eax, 56(%rsp)
+. 3956 0x12345678 5
+. 89 44 24 38 C3
+
+movl %eax, 60(%rbx)
+. 3957 0x12345678 4
+. 89 43 3C C3
+
+movl %eax, 60(%rsp)
+. 3958 0x12345678 5
+. 89 44 24 3C C3
+
+movl %eax, 64(%rsp)
+. 3959 0x12345678 5
+. 89 44 24 40 C3
+
+movl %eax, 68(%rsp)
+. 3960 0x12345678 5
+. 89 44 24 44 C3
+
+movl %eax, 72(%rsp)
+. 3961 0x12345678 5
+. 89 44 24 48 C3
+
+movl %eax, 76(%rsp)
+. 3962 0x12345678 5
+. 89 44 24 4C C3
+
+movl %eax, 80(%rsp)
+. 3963 0x12345678 5
+. 89 44 24 50 C3
+
+movl %eax, 84(%rsp)
+. 3964 0x12345678 5
+. 89 44 24 54 C3
+
+movl %eax, 88(%rsp)
+. 3965 0x12345678 5
+. 89 44 24 58 C3
+
+movl %eax, 8(%rbx)
+. 3966 0x12345678 4
+. 89 43 08 C3
+
+movl %eax, 8(%rsi)
+. 3967 0x12345678 4
+. 89 46 08 C3
+
+movl %eax, 8(%rsp)
+. 3968 0x12345678 5
+. 89 44 24 08 C3
+
+movl %eax, 92(%rsp)
+. 3969 0x12345678 5
+. 89 44 24 5C C3
+
+movl %eax, 96(%rsp)
+. 3970 0x12345678 5
+. 89 44 24 60 C3
+
+movl %eax, %ebp
+. 3971 0x12345678 3
+. 89 C5 C3
+
+movl %eax, %ebx
+. 3972 0x12345678 3
+. 89 C3 C3
+
+movl %eax, %ecx
+. 3973 0x12345678 3
+. 89 C1 C3
+
+movl %eax, %edi
+. 3974 0x12345678 3
+. 89 C7 C3
+
+movl %eax, %edx
+. 3975 0x12345678 3
+. 89 C2 C3
+
+movl %eax, %esi
+. 3976 0x12345678 3
+. 89 C6 C3
+
+movl %eax, 12345(%rip)
+. 3977 0x12345678 7
+. 89 05 39 30 00 00 C3
+
+movl %eax, %r11d
+. 3978 0x12345678 4
+. 41 89 C3 C3
+
+movl %eax, %r12d
+. 3979 0x12345678 4
+. 41 89 C4 C3
+
+movl %eax, %r13d
+. 3980 0x12345678 4
+. 41 89 C5 C3
+
+movl %eax, (%r14)
+. 3981 0x12345678 4
+. 41 89 06 C3
+
+movl %eax, %r14d
+. 3982 0x12345678 4
+. 41 89 C6 C3
+
+movl %eax, (%r15)
+. 3983 0x12345678 4
+. 41 89 07 C3
+
+movl %eax, %r15d
+. 3984 0x12345678 4
+. 41 89 C7 C3
+
+movl %eax, %r8d
+. 3985 0x12345678 4
+. 41 89 C0 C3
+
+movl %eax, (%r8,%rdx,4)
+. 3986 0x12345678 5
+. 41 89 04 90 C3
+
+movl %eax, %r9d
+. 3987 0x12345678 4
+. 41 89 C1 C3
+
+movl %eax, (%rbp)
+. 3988 0x12345678 4
+. 89 45 00 C3
+
+movl %eax, (%rbx)
+. 3989 0x12345678 3
+. 89 03 C3
+
+movl %eax, (%rcx)
+. 3990 0x12345678 3
+. 89 01 C3
+
+movl %eax, (%rcx,%rdx,8)
+. 3991 0x12345678 4
+. 89 04 D1 C3
+
+movl %eax, (%rdx)
+. 3992 0x12345678 3
+. 89 02 C3
+
+movl %eax, (%rsi)
+. 3993 0x12345678 3
+. 89 06 C3
+
+movl %eax, (%rsi,%rdx)
+. 3994 0x12345678 4
+. 89 04 16 C3
+
+movl %eax, (%rsp)
+. 3995 0x12345678 4
+. 89 04 24 C3
+
+movl %ebp, 12(%rdx,%r13)
+. 3996 0x12345678 6
+. 42 89 6C 2A 0C C3
+
+movl %ebp, 12(%rdx,%rbx)
+. 3997 0x12345678 5
+. 89 6C 1A 0C C3
+
+movl %ebp, 24(%rbx)
+. 3998 0x12345678 4
+. 89 6B 18 C3
+
+movl %ebp, 4(%rax)
+. 3999 0x12345678 4
+. 89 68 04 C3
+
+movl %ebp, 572(%rsp)
+. 4000 0x12345678 8
+. 89 AC 24 3C 02 00 00 C3
+
+movl %ebp, 68(%rsp)
+. 4001 0x12345678 5
+. 89 6C 24 44 C3
+
+movl %ebp, 8(%rbx)
+. 4002 0x12345678 4
+. 89 6B 08 C3
+
+movl %ebp, 8(%rsp)
+. 4003 0x12345678 5
+. 89 6C 24 08 C3
+
+movl %ebp, %eax
+. 4004 0x12345678 3
+. 89 E8 C3
+
+movl %ebp, %ebx
+. 4005 0x12345678 3
+. 89 EB C3
+
+movl %ebp, %ecx
+. 4006 0x12345678 3
+. 89 E9 C3
+
+movl %ebp, %edi
+. 4007 0x12345678 3
+. 89 EF C3
+
+movl %ebp, %edx
+. 4008 0x12345678 3
+. 89 EA C3
+
+movl %ebp, %esi
+. 4009 0x12345678 3
+. 89 EE C3
+
+movl %ebp, %r12d
+. 4010 0x12345678 4
+. 41 89 EC C3
+
+movl %ebp, %r13d
+. 4011 0x12345678 4
+. 41 89 ED C3
+
+movl %ebp, (%r15)
+. 4012 0x12345678 4
+. 41 89 2F C3
+
+movl %ebp, %r8d
+. 4013 0x12345678 4
+. 41 89 E8 C3
+
+movl %ebp, %r9d
+. 4014 0x12345678 4
+. 41 89 E9 C3
+
+movl %ebp, (%rcx,%rax,8)
+. 4015 0x12345678 4
+. 89 2C C1 C3
+
+movl %ebp, (%rsp)
+. 4016 0x12345678 4
+. 89 2C 24 C3
+
+movl %ebx, 12(%rbp)
+. 4017 0x12345678 4
+. 89 5D 0C C3
+
+movl %ebx, 16(%rbp)
+. 4018 0x12345678 4
+. 89 5D 10 C3
+
+movl %ebx, 24(%rsp)
+. 4019 0x12345678 5
+. 89 5C 24 18 C3
+
+movl %ebx, 32(%rbp)
+. 4020 0x12345678 4
+. 89 5D 20 C3
+
+movl %ebx, 40(%rsp)
+. 4021 0x12345678 5
+. 89 5C 24 28 C3
+
+movl %ebx, 4(%rax)
+. 4022 0x12345678 4
+. 89 58 04 C3
+
+movl %ebx, 52(%rsp)
+. 4023 0x12345678 5
+. 89 5C 24 34 C3
+
+movl %ebx, 8(%rax)
+. 4024 0x12345678 4
+. 89 58 08 C3
+
+movl %ebx, 8(%rsp)
+. 4025 0x12345678 5
+. 89 5C 24 08 C3
+
+movl %ebx, %eax
+. 4026 0x12345678 3
+. 89 D8 C3
+
+movl %ebx, %ebp
+. 4027 0x12345678 3
+. 89 DD C3
+
+movl %ebx, %ecx
+. 4028 0x12345678 3
+. 89 D9 C3
+
+movl %ebx, %edi
+. 4029 0x12345678 3
+. 89 DF C3
+
+movl %ebx, %edx
+. 4030 0x12345678 3
+. 89 DA C3
+
+movl %ebx, %esi
+. 4031 0x12345678 3
+. 89 DE C3
+
+movl %ebx, %r12d
+. 4032 0x12345678 4
+. 41 89 DC C3
+
+movl %ebx, %r15d
+. 4033 0x12345678 4
+. 41 89 DF C3
+
+movl %ebx, %r8d
+. 4034 0x12345678 4
+. 41 89 D8 C3
+
+movl %ebx, (%rax)
+. 4035 0x12345678 3
+. 89 18 C3
+
+movl %ebx, (%rcx,%rax,4)
+. 4036 0x12345678 4
+. 89 1C 81 C3
+
+movl %ecx, 12(%rsp)
+. 4037 0x12345678 5
+. 89 4C 24 0C C3
+
+movl %ecx, 16(%rsp)
+. 4038 0x12345678 5
+. 89 4C 24 10 C3
+
+movl %ecx, 220(%rsp)
+. 4039 0x12345678 8
+. 89 8C 24 DC 00 00 00 C3
+
+movl %ecx, 24(%rbx)
+. 4040 0x12345678 4
+. 89 4B 18 C3
+
+movl %ecx, 24(%rsp)
+. 4041 0x12345678 5
+. 89 4C 24 18 C3
+
+movl %ecx, 28(%rsp)
+. 4042 0x12345678 5
+. 89 4C 24 1C C3
+
+movl %ecx, 32(%rdi)
+. 4043 0x12345678 4
+. 89 4F 20 C3
+
+movl %ecx, 32(%rsp)
+. 4044 0x12345678 5
+. 89 4C 24 20 C3
+
+movl %ecx, 36(%rsp)
+. 4045 0x12345678 5
+. 89 4C 24 24 C3
+
+movl %ecx, %eax
+. 4046 0x12345678 3
+. 89 C8 C3
+
+movl %ecx, %ebp
+. 4047 0x12345678 3
+. 89 CD C3
+
+movl %ecx, %ebx
+. 4048 0x12345678 3
+. 89 CB C3
+
+movl %ecx, %edi
+. 4049 0x12345678 3
+. 89 CF C3
+
+movl %ecx, %edx
+. 4050 0x12345678 3
+. 89 CA C3
+
+movl %ecx, %esi
+. 4051 0x12345678 3
+. 89 CE C3
+
+movl %ecx, %r10d
+. 4052 0x12345678 4
+. 41 89 CA C3
+
+movl %ecx, %r12d
+. 4053 0x12345678 4
+. 41 89 CC C3
+
+movl %ecx, %r13d
+. 4054 0x12345678 4
+. 41 89 CD C3
+
+movl %ecx, %r14d
+. 4055 0x12345678 4
+. 41 89 CE C3
+
+movl %ecx, %r15d
+. 4056 0x12345678 4
+. 41 89 CF C3
+
+movl %ecx, (%rsi)
+. 4057 0x12345678 3
+. 89 0E C3
+
+movl %edi, 120(%rsp)
+. 4058 0x12345678 5
+. 89 7C 24 78 C3
+
+movl %edi, 12(%rsi,%rax,4)
+. 4059 0x12345678 5
+. 89 7C 86 0C C3
+
+movl %edi, 16(%rbp)
+. 4060 0x12345678 4
+. 89 7D 10 C3
+
+movl %edi, 24(%rsp)
+. 4061 0x12345678 5
+. 89 7C 24 18 C3
+
+movl %edi, 40(%rsp)
+. 4062 0x12345678 5
+. 89 7C 24 28 C3
+
+movl %edi, 44(%rsp)
+. 4063 0x12345678 5
+. 89 7C 24 2C C3
+
+movl %edi, 48(%rbx)
+. 4064 0x12345678 4
+. 89 7B 30 C3
+
+movl %edi, 56(%rsp)
+. 4065 0x12345678 5
+. 89 7C 24 38 C3
+
+movl %edi, 8(%rax)
+. 4066 0x12345678 4
+. 89 78 08 C3
+
+movl %edi, %eax
+. 4067 0x12345678 3
+. 89 F8 C3
+
+movl %edi, %ebp
+. 4068 0x12345678 3
+. 89 FD C3
+
+movl %edi, %ebx
+. 4069 0x12345678 3
+. 89 FB C3
+
+movl %edi, %ecx
+. 4070 0x12345678 3
+. 89 F9 C3
+
+movl %edi, %edx
+. 4071 0x12345678 3
+. 89 FA C3
+
+movl %edi, %esi
+. 4072 0x12345678 3
+. 89 FE C3
+
+movl %edi, -9(%rip)
+. 4073 0x12345678 7
+. 89 3D F7 FF FF FF C3
+
+movl %edi, %r12d
+. 4074 0x12345678 4
+. 41 89 FC C3
+
+movl %edi, %r13d
+. 4075 0x12345678 4
+. 41 89 FD C3
+
+movl %edi, %r14d
+. 4076 0x12345678 4
+. 41 89 FE C3
+
+movl %edi, %r15d
+. 4077 0x12345678 4
+. 41 89 FF C3
+
+movl %edi, %r8d
+. 4078 0x12345678 4
+. 41 89 F8 C3
+
+movl %edi, %r9d
+. 4079 0x12345678 4
+. 41 89 F9 C3
+
+movl %edi, (%rsi)
+. 4080 0x12345678 3
+. 89 3E C3
+
+movl %edx, 12(%rsp)
+. 4081 0x12345678 5
+. 89 54 24 0C C3
+
+movl %edx, 16(%rax)
+. 4082 0x12345678 4
+. 89 50 10 C3
+
+movl %edx, 16(%rdi,%rax,4)
+. 4083 0x12345678 5
+. 89 54 87 10 C3
+
+movl %edx, -16(%rsp)
+. 4084 0x12345678 5
+. 89 54 24 F0 C3
+
+movl %edx, 16(%rsp)
+. 4085 0x12345678 5
+. 89 54 24 10 C3
+
+movl %edx, -20(%rsp)
+. 4086 0x12345678 5
+. 89 54 24 EC C3
+
+movl %edx, 20(%rsp)
+. 4087 0x12345678 5
+. 89 54 24 14 C3
+
+movl %edx, 212(%rsp)
+. 4088 0x12345678 8
+. 89 94 24 D4 00 00 00 C3
+
+movl %edx, 224(%rsp)
+. 4089 0x12345678 8
+. 89 94 24 E0 00 00 00 C3
+
+movl %edx, 24(%rax)
+. 4090 0x12345678 4
+. 89 50 18 C3
+
+movl %edx, 24(%rsp)
+. 4091 0x12345678 5
+. 89 54 24 18 C3
+
+movl %edx, 288(%rsp,%rax,4)
+. 4092 0x12345678 8
+. 89 94 84 20 01 00 00 C3
+
+movl %edx, 28(%rbx)
+. 4093 0x12345678 4
+. 89 53 1C C3
+
+movl %edx, 292(%rsp)
+. 4094 0x12345678 8
+. 89 94 24 24 01 00 00 C3
+
+movl %edx, 292(%rsp,%rax,4)
+. 4095 0x12345678 8
+. 89 94 84 24 01 00 00 C3
+
+movl %edx, 32(%rdi)
+. 4096 0x12345678 4
+. 89 57 20 C3
+
+movl %edx, 32(%rsp)
+. 4097 0x12345678 5
+. 89 54 24 20 C3
+
+movl %edx, 348(%rsp)
+. 4098 0x12345678 8
+. 89 94 24 5C 01 00 00 C3
+
+movl %edx, 40(%rsp)
+. 4099 0x12345678 5
+. 89 54 24 28 C3
+
+movl %edx, 4(%rsi)
+. 4100 0x12345678 4
+. 89 56 04 C3
+
+movl %edx, -8(%rsp)
+. 4101 0x12345678 5
+. 89 54 24 F8 C3
+
+movl %edx, 8(%rsp)
+. 4102 0x12345678 5
+. 89 54 24 08 C3
+
+movl %edx, %eax
+. 4103 0x12345678 3
+. 89 D0 C3
+
+movl %edx, %ebp
+. 4104 0x12345678 3
+. 89 D5 C3
+
+movl %edx, %ebx
+. 4105 0x12345678 3
+. 89 D3 C3
+
+movl %edx, %ecx
+. 4106 0x12345678 3
+. 89 D1 C3
+
+movl %edx, %edi
+. 4107 0x12345678 3
+. 89 D7 C3
+
+movl %edx, %esi
+. 4108 0x12345678 3
+. 89 D6 C3
+
+movl %edx, %r11d
+. 4109 0x12345678 4
+. 41 89 D3 C3
+
+movl %edx, (%r12)
+. 4110 0x12345678 5
+. 41 89 14 24 C3
+
+movl %edx, %r12d
+. 4111 0x12345678 4
+. 41 89 D4 C3
+
+movl %edx, %r13d
+. 4112 0x12345678 4
+. 41 89 D5 C3
+
+movl %edx, %r14d
+. 4113 0x12345678 4
+. 41 89 D6 C3
+
+movl %edx, %r15d
+. 4114 0x12345678 4
+. 41 89 D7 C3
+
+movl %edx, %r8d
+. 4115 0x12345678 4
+. 41 89 D0 C3
+
+movl %edx, %r9d
+. 4116 0x12345678 4
+. 41 89 D1 C3
+
+movl %edx, (%rdi,%rax,4)
+. 4117 0x12345678 4
+. 89 14 87 C3
+
+movl %edx, (%rdi,%rcx,4)
+. 4118 0x12345678 4
+. 89 14 8F C3
+
+movl %edx, (%rsp)
+. 4119 0x12345678 4
+. 89 14 24 C3
+
+movl %esi, 16(%rdi,%rax,4)
+. 4120 0x12345678 5
+. 89 74 87 10 C3
+
+movl %esi, 20(%rsp)
+. 4121 0x12345678 5
+. 89 74 24 14 C3
+
+movl %esi, 32(%rbp)
+. 4122 0x12345678 4
+. 89 75 20 C3
+
+movl %esi, 36(%rsp)
+. 4123 0x12345678 5
+. 89 74 24 24 C3
+
+movl %esi, 4(%rsp)
+. 4124 0x12345678 5
+. 89 74 24 04 C3
+
+movl %esi, 56(%rsp)
+. 4125 0x12345678 5
+. 89 74 24 38 C3
+
+movl %esi, 8(%rbx,%rcx)
+. 4126 0x12345678 5
+. 89 74 0B 08 C3
+
+movl %esi, %eax
+. 4127 0x12345678 3
+. 89 F0 C3
+
+movl %esi, %ebp
+. 4128 0x12345678 3
+. 89 F5 C3
+
+movl %esi, %ebx
+. 4129 0x12345678 3
+. 89 F3 C3
+
+movl %esi, %ecx
+. 4130 0x12345678 3
+. 89 F1 C3
+
+movl %esi, %edi
+. 4131 0x12345678 3
+. 89 F7 C3
+
+movl %esi, %edx
+. 4132 0x12345678 3
+. 89 F2 C3
+
+movl %esi, -545454(%rip)
+. 4133 0x12345678 7
+. 89 35 52 AD F7 FF C3
+
+movl %esi, %r10d
+. 4134 0x12345678 4
+. 41 89 F2 C3
+
+movl %esi, %r11d
+. 4135 0x12345678 4
+. 41 89 F3 C3
+
+movl %esi, %r12d
+. 4136 0x12345678 4
+. 41 89 F4 C3
+
+movl %esi, %r13d
+. 4137 0x12345678 4
+. 41 89 F5 C3
+
+movl %esi, %r14d
+. 4138 0x12345678 4
+. 41 89 F6 C3
+
+movl %esi, (%r14,%rax,4)
+. 4139 0x12345678 5
+. 41 89 34 86 C3
+
+movl %esi, %r15d
+. 4140 0x12345678 4
+. 41 89 F7 C3
+
+movl %esi, %r8d
+. 4141 0x12345678 4
+. 41 89 F0 C3
+
+movl %esi, %r9d
+. 4142 0x12345678 4
+. 41 89 F1 C3
+
+movl %esi, (%rdi,%rax,4)
+. 4143 0x12345678 4
+. 89 34 87 C3
+
+movl %r10d, %eax
+. 4144 0x12345678 4
+. 44 89 D0 C3
+
+movl %r10d, %edi
+. 4145 0x12345678 4
+. 44 89 D7 C3
+
+movl %r10d, %edx
+. 4146 0x12345678 4
+. 44 89 D2 C3
+
+movl %r10d, %r11d
+. 4147 0x12345678 4
+. 45 89 D3 C3
+
+movl %r10d, %r8d
+. 4148 0x12345678 4
+. 45 89 D0 C3
+
+movl %r10d, %r9d
+. 4149 0x12345678 4
+. 45 89 D1 C3
+
+movl %r11d, -4(%rsp)
+. 4150 0x12345678 6
+. 44 89 5C 24 FC C3
+
+movl %r11d, %eax
+. 4151 0x12345678 4
+. 44 89 D8 C3
+
+movl %r11d, %ecx
+. 4152 0x12345678 4
+. 44 89 D9 C3
+
+movl %r11d, %edx
+. 4153 0x12345678 4
+. 44 89 DA C3
+
+movl %r11d, %r8d
+. 4154 0x12345678 4
+. 45 89 D8 C3
+
+movl %r12d, 12(%rax)
+. 4155 0x12345678 5
+. 44 89 60 0C C3
+
+movl %r12d, 16(%rax)
+. 4156 0x12345678 5
+. 44 89 60 10 C3
+
+movl %r12d, 24(%rax)
+. 4157 0x12345678 5
+. 44 89 60 18 C3
+
+movl %r12d, 32(%rax)
+. 4158 0x12345678 5
+. 44 89 60 20 C3
+
+movl %r12d, 8(%rax)
+. 4159 0x12345678 5
+. 44 89 60 08 C3
+
+movl %r12d, %eax
+. 4160 0x12345678 4
+. 44 89 E0 C3
+
+movl %r12d, %ebp
+. 4161 0x12345678 4
+. 44 89 E5 C3
+
+movl %r12d, %ebx
+. 4162 0x12345678 4
+. 44 89 E3 C3
+
+movl %r12d, %ecx
+. 4163 0x12345678 4
+. 44 89 E1 C3
+
+movl %r12d, %edi
+. 4164 0x12345678 4
+. 44 89 E7 C3
+
+movl %r12d, %edx
+. 4165 0x12345678 4
+. 44 89 E2 C3
+
+movl %r12d, %esi
+. 4166 0x12345678 4
+. 44 89 E6 C3
+
+movl %r12d, %r8d
+. 4167 0x12345678 4
+. 45 89 E0 C3
+
+movl %r12d, (%rax)
+. 4168 0x12345678 4
+. 44 89 20 C3
+
+movl %r12d, (%rdx,%rax,4)
+. 4169 0x12345678 5
+. 44 89 24 82 C3
+
+movl (%r12), %eax
+. 4170 0x12345678 5
+. 41 8B 04 24 C3
+
+movl (%r12), %edx
+. 4171 0x12345678 5
+. 41 8B 14 24 C3
+
+movl %r13d, 12(%rax)
+. 4172 0x12345678 5
+. 44 89 68 0C C3
+
+movl %r13d, 16(%rax)
+. 4173 0x12345678 5
+. 44 89 68 10 C3
+
+movl %r13d, 24(%rax)
+. 4174 0x12345678 5
+. 44 89 68 18 C3
+
+movl %r13d, 540(%rsp)
+. 4175 0x12345678 9
+. 44 89 AC 24 1C 02 00 00 C3
+
+movl %r13d, 544(%rsp)
+. 4176 0x12345678 9
+. 44 89 AC 24 20 02 00 00 C3
+
+movl %r13d, 548(%rsp)
+. 4177 0x12345678 9
+. 44 89 AC 24 24 02 00 00 C3
+
+movl %r13d, 552(%rsp)
+. 4178 0x12345678 9
+. 44 89 AC 24 28 02 00 00 C3
+
+movl %r13d, %eax
+. 4179 0x12345678 4
+. 44 89 E8 C3
+
+movl %r13d, %ebp
+. 4180 0x12345678 4
+. 44 89 ED C3
+
+movl %r13d, %ecx
+. 4181 0x12345678 4
+. 44 89 E9 C3
+
+movl %r13d, %edi
+. 4182 0x12345678 4
+. 44 89 EF C3
+
+movl %r13d, %edx
+. 4183 0x12345678 4
+. 44 89 EA C3
+
+movl %r13d, %esi
+. 4184 0x12345678 4
+. 44 89 EE C3
+
+movl %r13d, %r8d
+. 4185 0x12345678 4
+. 45 89 E8 C3
+
+movl (%r13), %eax
+. 4186 0x12345678 5
+. 41 8B 45 00 C3
+
+movl (%r13), %ebx
+. 4187 0x12345678 5
+. 41 8B 5D 00 C3
+
+movl (%r13,%rax,4), %eax
+. 4188 0x12345678 6
+. 41 8B 44 85 00 C3
+
+movl %r14d, 20(%rbx)
+. 4189 0x12345678 5
+. 44 89 73 14 C3
+
+movl %r14d, %eax
+. 4190 0x12345678 4
+. 44 89 F0 C3
+
+movl %r14d, %ebp
+. 4191 0x12345678 4
+. 44 89 F5 C3
+
+movl %r14d, %ecx
+. 4192 0x12345678 4
+. 44 89 F1 C3
+
+movl %r14d, %edi
+. 4193 0x12345678 4
+. 44 89 F7 C3
+
+movl %r14d, %edx
+. 4194 0x12345678 4
+. 44 89 F2 C3
+
+movl %r14d, %esi
+. 4195 0x12345678 4
+. 44 89 F6 C3
+
+movl %r14d, %r8d
+. 4196 0x12345678 4
+. 45 89 F0 C3
+
+movl %r14d, %r9d
+. 4197 0x12345678 4
+. 45 89 F1 C3
+
+movl %r14d, (%rax)
+. 4198 0x12345678 4
+. 44 89 30 C3
+
+movl (%r14), %edi
+. 4199 0x12345678 4
+. 41 8B 3E C3
+
+movl (%r14,%rdx,4), %ebx
+. 4200 0x12345678 5
+. 41 8B 1C 96 C3
+
+movl %r15d, %eax
+. 4201 0x12345678 4
+. 44 89 F8 C3
+
+movl %r15d, %ebp
+. 4202 0x12345678 4
+. 44 89 FD C3
+
+movl %r15d, %ebx
+. 4203 0x12345678 4
+. 44 89 FB C3
+
+movl %r15d, %ecx
+. 4204 0x12345678 4
+. 44 89 F9 C3
+
+movl %r15d, %edi
+. 4205 0x12345678 4
+. 44 89 FF C3
+
+movl %r15d, %edx
+. 4206 0x12345678 4
+. 44 89 FA C3
+
+movl %r15d, %esi
+. 4207 0x12345678 4
+. 44 89 FE C3
+
+movl %r15d, %r13d
+. 4208 0x12345678 4
+. 45 89 FD C3
+
+movl %r15d, %r8d
+. 4209 0x12345678 4
+. 45 89 F8 C3
+
+movl (%r15), %edi
+. 4210 0x12345678 4
+. 41 8B 3F C3
+
+movl %r8d, 20(%rsp)
+. 4211 0x12345678 6
+. 44 89 44 24 14 C3
+
+movl %r8d, 24(%rsp)
+. 4212 0x12345678 6
+. 44 89 44 24 18 C3
+
+movl %r8d, 28(%rbx)
+. 4213 0x12345678 5
+. 44 89 43 1C C3
+
+movl %r8d, 40(%rsp)
+. 4214 0x12345678 6
+. 44 89 44 24 28 C3
+
+movl %r8d, 4(%rsi)
+. 4215 0x12345678 5
+. 44 89 46 04 C3
+
+movl %r8d, 8(%rsi)
+. 4216 0x12345678 5
+. 44 89 46 08 C3
+
+movl %r8d, 8(%rsp)
+. 4217 0x12345678 6
+. 44 89 44 24 08 C3
+
+movl %r8d, %eax
+. 4218 0x12345678 4
+. 44 89 C0 C3
+
+movl %r8d, %ebp
+. 4219 0x12345678 4
+. 44 89 C5 C3
+
+movl %r8d, %ebx
+. 4220 0x12345678 4
+. 44 89 C3 C3
+
+movl %r8d, %edi
+. 4221 0x12345678 4
+. 44 89 C7 C3
+
+movl %r8d, %edx
+. 4222 0x12345678 4
+. 44 89 C2 C3
+
+movl %r8d, %r12d
+. 4223 0x12345678 4
+. 45 89 C4 C3
+
+movl %r8d, %r13d
+. 4224 0x12345678 4
+. 45 89 C5 C3
+
+movl %r8d, %r14d
+. 4225 0x12345678 4
+. 45 89 C6 C3
+
+movl (%r8), %eax
+. 4226 0x12345678 4
+. 41 8B 00 C3
+
+movl %r9d, 20(%rsp)
+. 4227 0x12345678 6
+. 44 89 4C 24 14 C3
+
+movl %r9d, 40(%rsp)
+. 4228 0x12345678 6
+. 44 89 4C 24 28 C3
+
+movl %r9d, 48(%rsp)
+. 4229 0x12345678 6
+. 44 89 4C 24 30 C3
+
+movl %r9d, %eax
+. 4230 0x12345678 4
+. 44 89 C8 C3
+
+movl %r9d, %ebp
+. 4231 0x12345678 4
+. 44 89 CD C3
+
+movl %r9d, %ebx
+. 4232 0x12345678 4
+. 44 89 CB C3
+
+movl %r9d, %ecx
+. 4233 0x12345678 4
+. 44 89 C9 C3
+
+movl %r9d, %edi
+. 4234 0x12345678 4
+. 44 89 CF C3
+
+movl %r9d, %edx
+. 4235 0x12345678 4
+. 44 89 CA C3
+
+movl %r9d, %esi
+. 4236 0x12345678 4
+. 44 89 CE C3
+
+movl %r9d, %r11d
+. 4237 0x12345678 4
+. 45 89 CB C3
+
+movl %r9d, %r12d
+. 4238 0x12345678 4
+. 45 89 CC C3
+
+movl %r9d, %r13d
+. 4239 0x12345678 4
+. 45 89 CD C3
+
+movl %r9d, %r15d
+. 4240 0x12345678 4
+. 45 89 CF C3
+
+movl (%rax), %eax
+. 4241 0x12345678 3
+. 8B 00 C3
+
+movl (%rax), %ecx
+. 4242 0x12345678 3
+. 8B 08 C3
+
+movl (%rax), %edi
+. 4243 0x12345678 3
+. 8B 38 C3
+
+movl (%rax), %esi
+. 4244 0x12345678 3
+. 8B 30 C3
+
+movl (%rax,%rcx,8), %eax
+. 4245 0x12345678 4
+. 8B 04 C8 C3
+
+movl (%rax,%rdx,4), %eax
+. 4246 0x12345678 4
+. 8B 04 90 C3
+
+movl (%rax,%rdx,4), %ebx
+. 4247 0x12345678 4
+. 8B 1C 90 C3
+
+movl (%rax,%rdx,4), %esi
+. 4248 0x12345678 4
+. 8B 34 90 C3
+
+movl (%rax,%rdx,8), %ebp
+. 4249 0x12345678 4
+. 8B 2C D0 C3
+
+movl (%rbp), %eax
+. 4250 0x12345678 4
+. 8B 45 00 C3
+
+movl (%rbp), %edi
+. 4251 0x12345678 4
+. 8B 7D 00 C3
+
+movl (%rbp), %edx
+. 4252 0x12345678 4
+. 8B 55 00 C3
+
+movl (%rbp), %esi
+. 4253 0x12345678 4
+. 8B 75 00 C3
+
+movl (%rbp,%rbx,4), %edi
+. 4254 0x12345678 5
+. 8B 7C 9D 00 C3
+
+movl (%rbx), %eax
+. 4255 0x12345678 3
+. 8B 03 C3
+
+movl (%rbx), %ebp
+. 4256 0x12345678 3
+. 8B 2B C3
+
+movl (%rbx), %edx
+. 4257 0x12345678 3
+. 8B 13 C3
+
+movl (%rbx), %esi
+. 4258 0x12345678 3
+. 8B 33 C3
+
+movl (%rbx,%rax), %r15d
+. 4259 0x12345678 5
+. 44 8B 3C 03 C3
+
+movl (%rbx,%rbp,4), %edi
+. 4260 0x12345678 4
+. 8B 3C AB C3
+
+movl (%rcx), %eax
+. 4261 0x12345678 3
+. 8B 01 C3
+
+movl (%rcx), %edi
+. 4262 0x12345678 3
+. 8B 39 C3
+
+movl (%rcx), %edx
+. 4263 0x12345678 3
+. 8B 11 C3
+
+movl (%rcx,%rax,4), %eax
+. 4264 0x12345678 4
+. 8B 04 81 C3
+
+movl (%rcx,%rax,4), %edx
+. 4265 0x12345678 4
+. 8B 14 81 C3
+
+movl (%rcx,%rbx), %edi
+. 4266 0x12345678 4
+. 8B 3C 19 C3
+
+movl (%rcx,%rdx,8), %edx
+. 4267 0x12345678 4
+. 8B 14 D1 C3
+
+movl (%rcx,%rsi,8), %esi
+. 4268 0x12345678 4
+. 8B 34 F1 C3
+
+movl (%rdi), %eax
+. 4269 0x12345678 3
+. 8B 07 C3
+
+movl (%rdi), %ebp
+. 4270 0x12345678 3
+. 8B 2F C3
+
+movl (%rdi), %edi
+. 4271 0x12345678 3
+. 8B 3F C3
+
+movl (%rdi), %esi
+. 4272 0x12345678 3
+. 8B 37 C3
+
+movl (%rdi), %r12d
+. 4273 0x12345678 4
+. 44 8B 27 C3
+
+movl (%rdi,%rdx), %edi
+. 4274 0x12345678 4
+. 8B 3C 17 C3
+
+movl (%rdx), %eax
+. 4275 0x12345678 3
+. 8B 02 C3
+
+movl (%rdx), %ecx
+. 4276 0x12345678 3
+. 8B 0A C3
+
+movl (%rdx), %esi
+. 4277 0x12345678 3
+. 8B 32 C3
+
+movl (%rdx,%rax,4), %eax
+. 4278 0x12345678 4
+. 8B 04 82 C3
+
+movl (%rdx,%rax,4), %ebp
+. 4279 0x12345678 4
+. 8B 2C 82 C3
+
+movl (%rdx,%rax,4), %edi
+. 4280 0x12345678 4
+. 8B 3C 82 C3
+
+movl (%rdx,%rax), %r12d
+. 4281 0x12345678 5
+. 44 8B 24 02 C3
+
+movl (%rdx,%rbx), %edx
+. 4282 0x12345678 4
+. 8B 14 1A C3
+
+movl (%rdx,%rdi), %edi
+. 4283 0x12345678 4
+. 8B 3C 3A C3
+
+movl (%rsi), %eax
+. 4284 0x12345678 3
+. 8B 06 C3
+
+movl (%rsi), %edx
+. 4285 0x12345678 3
+. 8B 16 C3
+
+movl (%rsi,%r13), %edi
+. 4286 0x12345678 5
+. 42 8B 3C 2E C3
+
+movl (%rsi,%rax), %esi
+. 4287 0x12345678 4
+. 8B 34 06 C3
+
+movl (%rsi,%rbx,4), %eax
+. 4288 0x12345678 4
+. 8B 04 9E C3
+
+movl (%rsi,%rcx), %edx
+. 4289 0x12345678 4
+. 8B 14 0E C3
+
+movl (%rsi,%rdx,4), %eax
+. 4290 0x12345678 4
+. 8B 04 96 C3
+
+movl (%rsi,%rdx,4), %r13d
+. 4291 0x12345678 5
+. 44 8B 2C 96 C3
+
+movl (%rsi,%rdx), %ecx
+. 4292 0x12345678 4
+. 8B 0C 16 C3
+
+movl (%rsp), %edi
+. 4293 0x12345678 4
+. 8B 3C 24 C3
+
+movl (%rsp), %edx
+. 4294 0x12345678 4
+. 8B 14 24 C3
+
+movq $0, 112(%rsp)
+. 4295 0x12345678 10
+. 48 C7 44 24 70 00 00 00 00 C3
+
+movq $0, 120(%rsp)
+. 4296 0x12345678 10
+. 48 C7 44 24 78 00 00 00 00 C3
+
+movq $0, 16(%rax)
+. 4297 0x12345678 9
+. 48 C7 40 10 00 00 00 00 C3
+
+movq $0, 16(%rsp)
+. 4298 0x12345678 10
+. 48 C7 44 24 10 00 00 00 00 C3
+
+movq $0, 192(%rsp)
+. 4299 0x12345678 13
+. 48 C7 84 24 C0 00 00 00 00 00 00 00 C3
+
+movq $0, 24(%rax)
+. 4300 0x12345678 9
+. 48 C7 40 18 00 00 00 00 C3
+
+movq $0, 24(%rbx)
+. 4301 0x12345678 9
+. 48 C7 43 18 00 00 00 00 C3
+
+movq $0, 24(%rsp)
+. 4302 0x12345678 10
+. 48 C7 44 24 18 00 00 00 00 C3
+
+movq $0, 32(%rax)
+. 4303 0x12345678 9
+. 48 C7 40 20 00 00 00 00 C3
+
+movq $0, 40(%rax)
+. 4304 0x12345678 9
+. 48 C7 40 28 00 00 00 00 C3
+
+movq $0, 40(%rsp)
+. 4305 0x12345678 10
+. 48 C7 44 24 28 00 00 00 00 C3
+
+movq $0, 48(%rsp)
+. 4306 0x12345678 10
+. 48 C7 44 24 30 00 00 00 00 C3
+
+movq $0, 560(%rsp)
+. 4307 0x12345678 13
+. 48 C7 84 24 30 02 00 00 00 00 00 00 C3
+
+movq $0, 64(%rsp)
+. 4308 0x12345678 10
+. 48 C7 44 24 40 00 00 00 00 C3
+
+movq $0, 8(%rax)
+. 4309 0x12345678 9
+. 48 C7 40 08 00 00 00 00 C3
+
+movq $0, 8(%rbx)
+. 4310 0x12345678 9
+. 48 C7 43 08 00 00 00 00 C3
+
+movq $0, 8(%rdx)
+. 4311 0x12345678 9
+. 48 C7 42 08 00 00 00 00 C3
+
+movq $0, 8(%rsp)
+. 4312 0x12345678 10
+. 48 C7 44 24 08 00 00 00 00 C3
+
+movq $0, (%r14,%rax,8)
+. 4313 0x12345678 9
+. 49 C7 04 C6 00 00 00 00 C3
+
+movq $0, (%rax)
+. 4314 0x12345678 8
+. 48 C7 00 00 00 00 00 C3
+
+movq $0, (%rax,%rbp,8)
+. 4315 0x12345678 9
+. 48 C7 04 E8 00 00 00 00 C3
+
+movq $0, (%rax,%rcx,8)
+. 4316 0x12345678 9
+. 48 C7 04 C8 00 00 00 00 C3
+
+movq $0, (%rbp,%rax,8)
+. 4317 0x12345678 10
+. 48 C7 44 C5 00 00 00 00 00 C3
+
+movq $0, (%rcx,%rsi,8)
+. 4318 0x12345678 9
+. 48 C7 04 F1 00 00 00 00 C3
+
+movq $0, (%rsi,%rcx,8)
+. 4319 0x12345678 9
+. 48 C7 04 CE 00 00 00 00 C3
+
+movq 104(%rsp), %r12
+. 4320 0x12345678 6
+. 4C 8B 64 24 68 C3
+
+movq 104(%rsp), %r13
+. 4321 0x12345678 6
+. 4C 8B 6C 24 68 C3
+
+movq 104(%rsp), %r14
+. 4322 0x12345678 6
+. 4C 8B 74 24 68 C3
+
+movq 104(%rsp), %rbx
+. 4323 0x12345678 6
+. 48 8B 5C 24 68 C3
+
+movq 104(%rsp), %rdi
+. 4324 0x12345678 6
+. 48 8B 7C 24 68 C3
+
+movq 104(%rsp), %rdx
+. 4325 0x12345678 6
+. 48 8B 54 24 68 C3
+
+movq 112(%rsp), %r13
+. 4326 0x12345678 6
+. 4C 8B 6C 24 70 C3
+
+movq 112(%rsp), %r14
+. 4327 0x12345678 6
+. 4C 8B 74 24 70 C3
+
+movq 112(%rsp), %r15
+. 4328 0x12345678 6
+. 4C 8B 7C 24 70 C3
+
+movq 112(%rsp), %rax
+. 4329 0x12345678 6
+. 48 8B 44 24 70 C3
+
+movq 112(%rsp), %rbp
+. 4330 0x12345678 6
+. 48 8B 6C 24 70 C3
+
+movq 112(%rsp), %rbx
+. 4331 0x12345678 6
+. 48 8B 5C 24 70 C3
+
+movq 112(%rsp), %rcx
+. 4332 0x12345678 6
+. 48 8B 4C 24 70 C3
+
+movq 112(%rsp), %rdi
+. 4333 0x12345678 6
+. 48 8B 7C 24 70 C3
+
+movq 112(%rsp), %rdx
+. 4334 0x12345678 6
+. 48 8B 54 24 70 C3
+
+movq 112(%rsp), %rsi
+. 4335 0x12345678 6
+. 48 8B 74 24 70 C3
+
+movq 120(%rsp), %r12
+. 4336 0x12345678 6
+. 4C 8B 64 24 78 C3
+
+movq 120(%rsp), %r14
+. 4337 0x12345678 6
+. 4C 8B 74 24 78 C3
+
+movq 120(%rsp), %rax
+. 4338 0x12345678 6
+. 48 8B 44 24 78 C3
+
+movq 120(%rsp), %rcx
+. 4339 0x12345678 6
+. 48 8B 4C 24 78 C3
+
+movq 120(%rsp), %rsi
+. 4340 0x12345678 6
+. 48 8B 74 24 78 C3
+
+movq 128(%rsp), %r13
+. 4341 0x12345678 9
+. 4C 8B AC 24 80 00 00 00 C3
+
+movq 128(%rsp), %r15
+. 4342 0x12345678 9
+. 4C 8B BC 24 80 00 00 00 C3
+
+movq 128(%rsp), %rax
+. 4343 0x12345678 9
+. 48 8B 84 24 80 00 00 00 C3
+
+movq 128(%rsp), %rbx
+. 4344 0x12345678 9
+. 48 8B 9C 24 80 00 00 00 C3
+
+movq 136(%rsp), %r14
+. 4345 0x12345678 9
+. 4C 8B B4 24 88 00 00 00 C3
+
+movq 136(%rsp), %rax
+. 4346 0x12345678 9
+. 48 8B 84 24 88 00 00 00 C3
+
+movq 136(%rsp), %rbx
+. 4347 0x12345678 9
+. 48 8B 9C 24 88 00 00 00 C3
+
+movq 136(%rsp), %rdx
+. 4348 0x12345678 9
+. 48 8B 94 24 88 00 00 00 C3
+
+movq 144(%rsp), %r15
+. 4349 0x12345678 9
+. 4C 8B BC 24 90 00 00 00 C3
+
+movq 144(%rsp), %rbp
+. 4350 0x12345678 9
+. 48 8B AC 24 90 00 00 00 C3
+
+movq 152(%rsp), %r12
+. 4351 0x12345678 9
+. 4C 8B A4 24 98 00 00 00 C3
+
+movq 152(%rsp), %rax
+. 4352 0x12345678 9
+. 48 8B 84 24 98 00 00 00 C3
+
+movq 152(%rsp), %rbx
+. 4353 0x12345678 9
+. 48 8B 9C 24 98 00 00 00 C3
+
+movq 152(%rsp), %rcx
+. 4354 0x12345678 9
+. 48 8B 8C 24 98 00 00 00 C3
+
+movq 152(%rsp), %rsi
+. 4355 0x12345678 9
+. 48 8B B4 24 98 00 00 00 C3
+
+movq 160(%rsp), %r13
+. 4356 0x12345678 9
+. 4C 8B AC 24 A0 00 00 00 C3
+
+movq 160(%rsp), %rax
+. 4357 0x12345678 9
+. 48 8B 84 24 A0 00 00 00 C3
+
+movq 160(%rsp), %rbx
+. 4358 0x12345678 9
+. 48 8B 9C 24 A0 00 00 00 C3
+
+movq 160(%rsp), %rcx
+. 4359 0x12345678 9
+. 48 8B 8C 24 A0 00 00 00 C3
+
+movq 160(%rsp), %rdi
+. 4360 0x12345678 9
+. 48 8B BC 24 A0 00 00 00 C3
+
+movq 160(%rsp), %rdx
+. 4361 0x12345678 9
+. 48 8B 94 24 A0 00 00 00 C3
+
+movq 160(%rsp), %rsi
+. 4362 0x12345678 9
+. 48 8B B4 24 A0 00 00 00 C3
+
+movq 168(%rsp), %r14
+. 4363 0x12345678 9
+. 4C 8B B4 24 A8 00 00 00 C3
+
+movq 16(%r12), %r8
+. 4364 0x12345678 6
+. 4D 8B 44 24 10 C3
+
+movq 16(%r12), %rax
+. 4365 0x12345678 6
+. 49 8B 44 24 10 C3
+
+movq 16(%r12), %rdi
+. 4366 0x12345678 6
+. 49 8B 7C 24 10 C3
+
+movq 16(%r12), %rdx
+. 4367 0x12345678 6
+. 49 8B 54 24 10 C3
+
+movq 16(%r12), %rsi
+. 4368 0x12345678 6
+. 49 8B 74 24 10 C3
+
+movq 16(%r13), %rax
+. 4369 0x12345678 5
+. 49 8B 45 10 C3
+
+movq 16(%r13), %rbx
+. 4370 0x12345678 5
+. 49 8B 5D 10 C3
+
+movq 16(%r13), %rcx
+. 4371 0x12345678 5
+. 49 8B 4D 10 C3
+
+movq 16(%r13), %rdi
+. 4372 0x12345678 5
+. 49 8B 7D 10 C3
+
+movq 16(%r13), %rsi
+. 4373 0x12345678 5
+. 49 8B 75 10 C3
+
+movq 16(%r15), %rax
+. 4374 0x12345678 5
+. 49 8B 47 10 C3
+
+movq 16(%r15), %rdx
+. 4375 0x12345678 5
+. 49 8B 57 10 C3
+
+movq 16(%r8), %rax
+. 4376 0x12345678 5
+. 49 8B 40 10 C3
+
+movq 16(%rax), %rbx
+. 4377 0x12345678 5
+. 48 8B 58 10 C3
+
+movq 16(%rax), %rdi
+. 4378 0x12345678 5
+. 48 8B 78 10 C3
+
+movq 16(%rax), %rdx
+. 4379 0x12345678 5
+. 48 8B 50 10 C3
+
+movq 16(%rbp), %rax
+. 4380 0x12345678 5
+. 48 8B 45 10 C3
+
+movq 16(%rbp), %rbx
+. 4381 0x12345678 5
+. 48 8B 5D 10 C3
+
+movq 16(%rbp), %rdi
+. 4382 0x12345678 5
+. 48 8B 7D 10 C3
+
+movq 16(%rbp), %rdx
+. 4383 0x12345678 5
+. 48 8B 55 10 C3
+
+movq 16(%rbp), %rsi
+. 4384 0x12345678 5
+. 48 8B 75 10 C3
+
+movq 16(%rbx), %rax
+. 4385 0x12345678 5
+. 48 8B 43 10 C3
+
+movq 16(%rbx), %rbx
+. 4386 0x12345678 5
+. 48 8B 5B 10 C3
+
+movq 16(%rbx), %rcx
+. 4387 0x12345678 5
+. 48 8B 4B 10 C3
+
+movq 16(%rbx), %rdi
+. 4388 0x12345678 5
+. 48 8B 7B 10 C3
+
+movq 16(%rbx), %rdx
+. 4389 0x12345678 5
+. 48 8B 53 10 C3
+
+movq 16(%rbx), %rsi
+. 4390 0x12345678 5
+. 48 8B 73 10 C3
+
+movq 16(%rcx), %rax
+. 4391 0x12345678 5
+. 48 8B 41 10 C3
+
+movq 16(%rdi), %rax
+. 4392 0x12345678 5
+. 48 8B 47 10 C3
+
+movq 16(%rdi), %rcx
+. 4393 0x12345678 5
+. 48 8B 4F 10 C3
+
+movq 16(%rdi), %rdi
+. 4394 0x12345678 5
+. 48 8B 7F 10 C3
+
+movq 16(%rdi), %rdx
+. 4395 0x12345678 5
+. 48 8B 57 10 C3
+
+movq 16(%rdx), %r8
+. 4396 0x12345678 5
+. 4C 8B 42 10 C3
+
+movq 16(%rdx), %rcx
+. 4397 0x12345678 5
+. 48 8B 4A 10 C3
+
+movq 16(%rdx), %rdx
+. 4398 0x12345678 5
+. 48 8B 52 10 C3
+
+movq 16(%rsi), %rax
+. 4399 0x12345678 5
+. 48 8B 46 10 C3
+
+movq 16(%rsi), %rsi
+. 4400 0x12345678 5
+. 48 8B 76 10 C3
+
+movq 16(%rsp), %r12
+. 4401 0x12345678 6
+. 4C 8B 64 24 10 C3
+
+movq 16(%rsp), %r13
+. 4402 0x12345678 6
+. 4C 8B 6C 24 10 C3
+
+movq 16(%rsp), %rax
+. 4403 0x12345678 6
+. 48 8B 44 24 10 C3
+
+movq 16(%rsp), %rbp
+. 4404 0x12345678 6
+. 48 8B 6C 24 10 C3
+
+movq 16(%rsp), %rcx
+. 4405 0x12345678 6
+. 48 8B 4C 24 10 C3
+
+movq 16(%rsp), %rdx
+. 4406 0x12345678 6
+. 48 8B 54 24 10 C3
+
+movq 16(%rsp), %rsi
+. 4407 0x12345678 6
+. 48 8B 74 24 10 C3
+
+movq 176(%rsp), %r15
+. 4408 0x12345678 9
+. 4C 8B BC 24 B0 00 00 00 C3
+
+movq 176(%rsp), %rax
+. 4409 0x12345678 9
+. 48 8B 84 24 B0 00 00 00 C3
+
+movq 176(%rsp), %rcx
+. 4410 0x12345678 9
+. 48 8B 8C 24 B0 00 00 00 C3
+
+movq 176(%rsp), %rsi
+. 4411 0x12345678 9
+. 48 8B B4 24 B0 00 00 00 C3
+
+movq 192(%rsp), %rcx
+. 4412 0x12345678 9
+. 48 8B 8C 24 C0 00 00 00 C3
+
+movq 192(%rsp), %rdi
+. 4413 0x12345678 9
+. 48 8B BC 24 C0 00 00 00 C3
+
+movq 192(%rsp), %rdx
+. 4414 0x12345678 9
+. 48 8B 94 24 C0 00 00 00 C3
+
+movq 192(%rsp), %rsi
+. 4415 0x12345678 9
+. 48 8B B4 24 C0 00 00 00 C3
+
+movq $-1, %rax
+. 4416 0x12345678 8
+. 48 C7 C0 FF FF FF FF C3
+
+movq 200(%rsp), %rsi
+. 4417 0x12345678 9
+. 48 8B B4 24 C8 00 00 00 C3
+
+movq 224(%rsp), %r8
+. 4418 0x12345678 9
+. 4C 8B 84 24 E0 00 00 00 C3
+
+movq 232(%rsp), %r12
+. 4419 0x12345678 9
+. 4C 8B A4 24 E8 00 00 00 C3
+
+movq 240(%rsp), %rcx
+. 4420 0x12345678 9
+. 48 8B 8C 24 F0 00 00 00 C3
+
+movq 24(%r12), %rax
+. 4421 0x12345678 6
+. 49 8B 44 24 18 C3
+
+movq 24(%r12), %rdx
+. 4422 0x12345678 6
+. 49 8B 54 24 18 C3
+
+movq 24(%r12), %rsi
+. 4423 0x12345678 6
+. 49 8B 74 24 18 C3
+
+movq 24(%r13), %rdi
+. 4424 0x12345678 5
+. 49 8B 7D 18 C3
+
+movq 24(%r13), %rsi
+. 4425 0x12345678 5
+. 49 8B 75 18 C3
+
+movq 24(%r14), %rcx
+. 4426 0x12345678 5
+. 49 8B 4E 18 C3
+
+movq 24(%r14), %rsi
+. 4427 0x12345678 5
+. 49 8B 76 18 C3
+
+movq 24(%r15), %r12
+. 4428 0x12345678 5
+. 4D 8B 67 18 C3
+
+movq 24(%r15), %rsi
+. 4429 0x12345678 5
+. 49 8B 77 18 C3
+
+movq 24(%rax), %rdi
+. 4430 0x12345678 5
+. 48 8B 78 18 C3
+
+movq 24(%rax), %rsi
+. 4431 0x12345678 5
+. 48 8B 70 18 C3
+
+movq 24(%rbp), %rax
+. 4432 0x12345678 5
+. 48 8B 45 18 C3
+
+movq 24(%rbp), %rbx
+. 4433 0x12345678 5
+. 48 8B 5D 18 C3
+
+movq 24(%rbp), %rcx
+. 4434 0x12345678 5
+. 48 8B 4D 18 C3
+
+movq 24(%rbp), %rdi
+. 4435 0x12345678 5
+. 48 8B 7D 18 C3
+
+movq 24(%rbp), %rdx
+. 4436 0x12345678 5
+. 48 8B 55 18 C3
+
+movq 24(%rbp), %rsi
+. 4437 0x12345678 5
+. 48 8B 75 18 C3
+
+movq 24(%rbx), %rax
+. 4438 0x12345678 5
+. 48 8B 43 18 C3
+
+movq 24(%rbx), %rbx
+. 4439 0x12345678 5
+. 48 8B 5B 18 C3
+
+movq 24(%rbx), %rdi
+. 4440 0x12345678 5
+. 48 8B 7B 18 C3
+
+movq 24(%rbx), %rdx
+. 4441 0x12345678 5
+. 48 8B 53 18 C3
+
+movq 24(%rbx), %rsi
+. 4442 0x12345678 5
+. 48 8B 73 18 C3
+
+movq 24(%rcx), %rax
+. 4443 0x12345678 5
+. 48 8B 41 18 C3
+
+movq 24(%rcx), %rcx
+. 4444 0x12345678 5
+. 48 8B 49 18 C3
+
+movq 24(%rdi), %rax
+. 4445 0x12345678 5
+. 48 8B 47 18 C3
+
+movq 24(%rdi), %rdi
+. 4446 0x12345678 5
+. 48 8B 7F 18 C3
+
+movq 24(%rsi), %rax
+. 4447 0x12345678 5
+. 48 8B 46 18 C3
+
+movq 24(%rsi), %rdi
+. 4448 0x12345678 5
+. 48 8B 7E 18 C3
+
+movq 24(%rsi), %rsi
+. 4449 0x12345678 5
+. 48 8B 76 18 C3
+
+movq 24(%rsp), %r12
+. 4450 0x12345678 6
+. 4C 8B 64 24 18 C3
+
+movq 24(%rsp), %r13
+. 4451 0x12345678 6
+. 4C 8B 6C 24 18 C3
+
+movq 24(%rsp), %r14
+. 4452 0x12345678 6
+. 4C 8B 74 24 18 C3
+
+movq 24(%rsp), %rax
+. 4453 0x12345678 6
+. 48 8B 44 24 18 C3
+
+movq 24(%rsp), %rbx
+. 4454 0x12345678 6
+. 48 8B 5C 24 18 C3
+
+movq 24(%rsp), %rcx
+. 4455 0x12345678 6
+. 48 8B 4C 24 18 C3
+
+movq 24(%rsp), %rdi
+. 4456 0x12345678 6
+. 48 8B 7C 24 18 C3
+
+movq 24(%rsp), %rdx
+. 4457 0x12345678 6
+. 48 8B 54 24 18 C3
+
+movq 24(%rsp), %rsi
+. 4458 0x12345678 6
+. 48 8B 74 24 18 C3
+
+movq 256(%rsp), %r12
+. 4459 0x12345678 9
+. 4C 8B A4 24 00 01 00 00 C3
+
+movq 256(%rsp), %rax
+. 4460 0x12345678 9
+. 48 8B 84 24 00 01 00 00 C3
+
+movq 264(%rsp), %rcx
+. 4461 0x12345678 9
+. 48 8B 8C 24 08 01 00 00 C3
+
+movq 288(%rsp), %rcx
+. 4462 0x12345678 9
+. 48 8B 8C 24 20 01 00 00 C3
+
+movq 304(%rsp), %rax
+. 4463 0x12345678 9
+. 48 8B 84 24 30 01 00 00 C3
+
+movq 304(%rsp), %rbx
+. 4464 0x12345678 9
+. 48 8B 9C 24 30 01 00 00 C3
+
+movq 304(%rsp), %rcx
+. 4465 0x12345678 9
+. 48 8B 8C 24 30 01 00 00 C3
+
+movq 312(%rsp), %rax
+. 4466 0x12345678 9
+. 48 8B 84 24 38 01 00 00 C3
+
+movq 312(%rsp), %rdx
+. 4467 0x12345678 9
+. 48 8B 94 24 38 01 00 00 C3
+
+movq 32(%r12), %r12
+. 4468 0x12345678 6
+. 4D 8B 64 24 20 C3
+
+movq 32(%r12), %rax
+. 4469 0x12345678 6
+. 49 8B 44 24 20 C3
+
+movq 32(%r12), %rcx
+. 4470 0x12345678 6
+. 49 8B 4C 24 20 C3
+
+movq 32(%r12), %rdi
+. 4471 0x12345678 6
+. 49 8B 7C 24 20 C3
+
+movq 32(%r12), %rsi
+. 4472 0x12345678 6
+. 49 8B 74 24 20 C3
+
+movq 32(%r13), %rdi
+. 4473 0x12345678 5
+. 49 8B 7D 20 C3
+
+movq 32(%r13), %rdx
+. 4474 0x12345678 5
+. 49 8B 55 20 C3
+
+movq 32(%r13), %rsi
+. 4475 0x12345678 5
+. 49 8B 75 20 C3
+
+movq 32(%r15), %rsi
+. 4476 0x12345678 5
+. 49 8B 77 20 C3
+
+movq 32(%rbp), %rdi
+. 4477 0x12345678 5
+. 48 8B 7D 20 C3
+
+movq 32(%rbp), %rdx
+. 4478 0x12345678 5
+. 48 8B 55 20 C3
+
+movq 32(%rbp), %rsi
+. 4479 0x12345678 5
+. 48 8B 75 20 C3
+
+movq 32(%rbx), %rax
+. 4480 0x12345678 5
+. 48 8B 43 20 C3
+
+movq 32(%rbx), %rdi
+. 4481 0x12345678 5
+. 48 8B 7B 20 C3
+
+movq 32(%rbx), %rsi
+. 4482 0x12345678 5
+. 48 8B 73 20 C3
+
+movq 32(%rdi), %rax
+. 4483 0x12345678 5
+. 48 8B 47 20 C3
+
+movq 32(%rdi), %rdi
+. 4484 0x12345678 5
+. 48 8B 7F 20 C3
+
+movq 32(%rsp), %r13
+. 4485 0x12345678 6
+. 4C 8B 6C 24 20 C3
+
+movq 32(%rsp), %r14
+. 4486 0x12345678 6
+. 4C 8B 74 24 20 C3
+
+movq 32(%rsp), %r15
+. 4487 0x12345678 6
+. 4C 8B 7C 24 20 C3
+
+movq 32(%rsp), %rax
+. 4488 0x12345678 6
+. 48 8B 44 24 20 C3
+
+movq 32(%rsp), %rbp
+. 4489 0x12345678 6
+. 48 8B 6C 24 20 C3
+
+movq 32(%rsp), %rcx
+. 4490 0x12345678 6
+. 48 8B 4C 24 20 C3
+
+movq 32(%rsp), %rdi
+. 4491 0x12345678 6
+. 48 8B 7C 24 20 C3
+
+movq 32(%rsp), %rdx
+. 4492 0x12345678 6
+. 48 8B 54 24 20 C3
+
+movq 32(%rsp), %rsi
+. 4493 0x12345678 6
+. 48 8B 74 24 20 C3
+
+movq 40(%rsp), %r12
+. 4494 0x12345678 6
+. 4C 8B 64 24 28 C3
+
+movq 40(%rsp), %r14
+. 4495 0x12345678 6
+. 4C 8B 74 24 28 C3
+
+movq 40(%rsp), %rax
+. 4496 0x12345678 6
+. 48 8B 44 24 28 C3
+
+movq 40(%rsp), %rbx
+. 4497 0x12345678 6
+. 48 8B 5C 24 28 C3
+
+movq 40(%rsp), %rcx
+. 4498 0x12345678 6
+. 48 8B 4C 24 28 C3
+
+movq 40(%rsp), %rdi
+. 4499 0x12345678 6
+. 48 8B 7C 24 28 C3
+
+movq 40(%rsp), %rsi
+. 4500 0x12345678 6
+. 48 8B 74 24 28 C3
+
+movq 48(%rsp), %r13
+. 4501 0x12345678 6
+. 4C 8B 6C 24 30 C3
+
+movq 48(%rsp), %r15
+. 4502 0x12345678 6
+. 4C 8B 7C 24 30 C3
+
+movq 48(%rsp), %rax
+. 4503 0x12345678 6
+. 48 8B 44 24 30 C3
+
+movq 48(%rsp), %rbp
+. 4504 0x12345678 6
+. 48 8B 6C 24 30 C3
+
+movq 48(%rsp), %rcx
+. 4505 0x12345678 6
+. 48 8B 4C 24 30 C3
+
+movq 48(%rsp), %rdx
+. 4506 0x12345678 6
+. 48 8B 54 24 30 C3
+
+movq 48(%rsp), %rsi
+. 4507 0x12345678 6
+. 48 8B 74 24 30 C3
+
+movq 4(%rax), %rdx
+. 4508 0x12345678 5
+. 48 8B 50 04 C3
+
+movq 4(%rdi), %rdi
+. 4509 0x12345678 5
+. 48 8B 7F 04 C3
+
+movq 560(%rsp), %rax
+. 4510 0x12345678 9
+. 48 8B 84 24 30 02 00 00 C3
+
+movq 560(%rsp), %rdi
+. 4511 0x12345678 9
+. 48 8B BC 24 30 02 00 00 C3
+
+movq 56(%rsp), %r10
+. 4512 0x12345678 6
+. 4C 8B 54 24 38 C3
+
+movq 56(%rsp), %r12
+. 4513 0x12345678 6
+. 4C 8B 64 24 38 C3
+
+movq 56(%rsp), %r14
+. 4514 0x12345678 6
+. 4C 8B 74 24 38 C3
+
+movq 56(%rsp), %rcx
+. 4515 0x12345678 6
+. 48 8B 4C 24 38 C3
+
+movq 56(%rsp), %rdi
+. 4516 0x12345678 6
+. 48 8B 7C 24 38 C3
+
+movq 56(%rsp), %rdx
+. 4517 0x12345678 6
+. 48 8B 54 24 38 C3
+
+movq 64(%rsp), %r13
+. 4518 0x12345678 6
+. 4C 8B 6C 24 40 C3
+
+movq 64(%rsp), %r15
+. 4519 0x12345678 6
+. 4C 8B 7C 24 40 C3
+
+movq 64(%rsp), %rdi
+. 4520 0x12345678 6
+. 48 8B 7C 24 40 C3
+
+movq 72(%rsp), %r14
+. 4521 0x12345678 6
+. 4C 8B 74 24 48 C3
+
+movq 72(%rsp), %rbx
+. 4522 0x12345678 6
+. 48 8B 5C 24 48 C3
+
+movq 72(%rsp), %rsi
+. 4523 0x12345678 6
+. 48 8B 74 24 48 C3
+
+movq 80(%rsp), %r15
+. 4524 0x12345678 6
+. 4C 8B 7C 24 50 C3
+
+movq 80(%rsp), %rax
+. 4525 0x12345678 6
+. 48 8B 44 24 50 C3
+
+movq 80(%rsp), %rbp
+. 4526 0x12345678 6
+. 48 8B 6C 24 50 C3
+
+movq 80(%rsp), %rbx
+. 4527 0x12345678 6
+. 48 8B 5C 24 50 C3
+
+movq 80(%rsp), %rdi
+. 4528 0x12345678 6
+. 48 8B 7C 24 50 C3
+
+movq 80(%rsp), %rdx
+. 4529 0x12345678 6
+. 48 8B 54 24 50 C3
+
+movq 88(%rsp), %r12
+. 4530 0x12345678 6
+. 4C 8B 64 24 58 C3
+
+movq 88(%rsp), %rax
+. 4531 0x12345678 6
+. 48 8B 44 24 58 C3
+
+movq 88(%rsp), %rbp
+. 4532 0x12345678 6
+. 48 8B 6C 24 58 C3
+
+movq 88(%rsp), %rbx
+. 4533 0x12345678 6
+. 48 8B 5C 24 58 C3
+
+movq 88(%rsp), %rcx
+. 4534 0x12345678 6
+. 48 8B 4C 24 58 C3
+
+movq 88(%rsp), %rdi
+. 4535 0x12345678 6
+. 48 8B 7C 24 58 C3
+
+movq 88(%rsp), %rdx
+. 4536 0x12345678 6
+. 48 8B 54 24 58 C3
+
+movq 88(%rsp), %rsi
+. 4537 0x12345678 6
+. 48 8B 74 24 58 C3
+
+movq 8(%r12), %r12
+. 4538 0x12345678 6
+. 4D 8B 64 24 08 C3
+
+movq 8(%r12), %rax
+. 4539 0x12345678 6
+. 49 8B 44 24 08 C3
+
+movq 8(%r12), %rbx
+. 4540 0x12345678 6
+. 49 8B 5C 24 08 C3
+
+movq 8(%r12), %rcx
+. 4541 0x12345678 6
+. 49 8B 4C 24 08 C3
+
+movq 8(%r12), %rdi
+. 4542 0x12345678 6
+. 49 8B 7C 24 08 C3
+
+movq 8(%r12), %rsi
+. 4543 0x12345678 6
+. 49 8B 74 24 08 C3
+
+movq 8(%r13), %r13
+. 4544 0x12345678 5
+. 4D 8B 6D 08 C3
+
+movq 8(%r13), %rax
+. 4545 0x12345678 5
+. 49 8B 45 08 C3
+
+movq 8(%r13), %rbx
+. 4546 0x12345678 5
+. 49 8B 5D 08 C3
+
+movq 8(%r13), %rcx
+. 4547 0x12345678 5
+. 49 8B 4D 08 C3
+
+movq 8(%r13), %rdi
+. 4548 0x12345678 5
+. 49 8B 7D 08 C3
+
+movq 8(%r13), %rdx
+. 4549 0x12345678 5
+. 49 8B 55 08 C3
+
+movq 8(%r13), %rsi
+. 4550 0x12345678 5
+. 49 8B 75 08 C3
+
+movq 8(%r14), %rax
+. 4551 0x12345678 5
+. 49 8B 46 08 C3
+
+movq 8(%r14), %rcx
+. 4552 0x12345678 5
+. 49 8B 4E 08 C3
+
+movq 8(%r14), %rdi
+. 4553 0x12345678 5
+. 49 8B 7E 08 C3
+
+movq 8(%r14), %rdx
+. 4554 0x12345678 5
+. 49 8B 56 08 C3
+
+movq 8(%r15), %rax
+. 4555 0x12345678 5
+. 49 8B 47 08 C3
+
+movq 8(%r15), %rsi
+. 4556 0x12345678 5
+. 49 8B 77 08 C3
+
+movq 8(%r8), %rax
+. 4557 0x12345678 5
+. 49 8B 40 08 C3
+
+movq 8(%rax), %r9
+. 4558 0x12345678 5
+. 4C 8B 48 08 C3
+
+movq 8(%rax), %rax
+. 4559 0x12345678 5
+. 48 8B 40 08 C3
+
+movq 8(%rax), %rcx
+. 4560 0x12345678 5
+. 48 8B 48 08 C3
+
+movq 8(%rax), %rdi
+. 4561 0x12345678 5
+. 48 8B 78 08 C3
+
+movq 8(%rax), %rdx
+. 4562 0x12345678 5
+. 48 8B 50 08 C3
+
+movq 8(%rax), %rsi
+. 4563 0x12345678 5
+. 48 8B 70 08 C3
+
+movq 8(%rbp), %r13
+. 4564 0x12345678 5
+. 4C 8B 6D 08 C3
+
+movq 8(%rbp), %rax
+. 4565 0x12345678 5
+. 48 8B 45 08 C3
+
+movq 8(%rbp), %rbx
+. 4566 0x12345678 5
+. 48 8B 5D 08 C3
+
+movq 8(%rbp), %rdi
+. 4567 0x12345678 5
+. 48 8B 7D 08 C3
+
+movq 8(%rbp), %rdx
+. 4568 0x12345678 5
+. 48 8B 55 08 C3
+
+movq 8(%rbp), %rsi
+. 4569 0x12345678 5
+. 48 8B 75 08 C3
+
+movq 8(%rbx), %r12
+. 4570 0x12345678 5
+. 4C 8B 63 08 C3
+
+movq 8(%rbx), %r8
+. 4571 0x12345678 5
+. 4C 8B 43 08 C3
+
+movq 8(%rbx), %r9
+. 4572 0x12345678 5
+. 4C 8B 4B 08 C3
+
+movq 8(%rbx), %rax
+. 4573 0x12345678 5
+. 48 8B 43 08 C3
+
+movq 8(%rbx), %rbx
+. 4574 0x12345678 5
+. 48 8B 5B 08 C3
+
+movq 8(%rbx), %rdi
+. 4575 0x12345678 5
+. 48 8B 7B 08 C3
+
+movq 8(%rbx), %rdx
+. 4576 0x12345678 5
+. 48 8B 53 08 C3
+
+movq 8(%rbx), %rsi
+. 4577 0x12345678 5
+. 48 8B 73 08 C3
+
+movq 8(%rcx), %r8
+. 4578 0x12345678 5
+. 4C 8B 41 08 C3
+
+movq 8(%rcx), %rax
+. 4579 0x12345678 5
+. 48 8B 41 08 C3
+
+movq 8(%rcx), %rdx
+. 4580 0x12345678 5
+. 48 8B 51 08 C3
+
+movq 8(%rdi), %rax
+. 4581 0x12345678 5
+. 48 8B 47 08 C3
+
+movq 8(%rdi), %rcx
+. 4582 0x12345678 5
+. 48 8B 4F 08 C3
+
+movq 8(%rdi), %rdi
+. 4583 0x12345678 5
+. 48 8B 7F 08 C3
+
+movq 8(%rdi), %rdx
+. 4584 0x12345678 5
+. 48 8B 57 08 C3
+
+movq 8(%rdi), %rsi
+. 4585 0x12345678 5
+. 48 8B 77 08 C3
+
+movq 8(%rdx), %r8
+. 4586 0x12345678 5
+. 4C 8B 42 08 C3
+
+movq 8(%rdx), %rax
+. 4587 0x12345678 5
+. 48 8B 42 08 C3
+
+movq 8(%rdx), %rbx
+. 4588 0x12345678 5
+. 48 8B 5A 08 C3
+
+movq 8(%rdx), %rcx
+. 4589 0x12345678 5
+. 48 8B 4A 08 C3
+
+movq 8(%rdx), %rdi
+. 4590 0x12345678 5
+. 48 8B 7A 08 C3
+
+movq 8(%rdx), %rdx
+. 4591 0x12345678 5
+. 48 8B 52 08 C3
+
+movq 8(%rsi), %rax
+. 4592 0x12345678 5
+. 48 8B 46 08 C3
+
+movq 8(%rsi), %rdi
+. 4593 0x12345678 5
+. 48 8B 7E 08 C3
+
+movq 8(%rsi), %rdx
+. 4594 0x12345678 5
+. 48 8B 56 08 C3
+
+movq 8(%rsi), %rsi
+. 4595 0x12345678 5
+. 48 8B 76 08 C3
+
+movq 8(%rsp), %r12
+. 4596 0x12345678 6
+. 4C 8B 64 24 08 C3
+
+movq 8(%rsp), %r8
+. 4597 0x12345678 6
+. 4C 8B 44 24 08 C3
+
+movq 8(%rsp), %rax
+. 4598 0x12345678 6
+. 48 8B 44 24 08 C3
+
+movq 8(%rsp), %rbp
+. 4599 0x12345678 6
+. 48 8B 6C 24 08 C3
+
+movq 8(%rsp), %rbx
+. 4600 0x12345678 6
+. 48 8B 5C 24 08 C3
+
+movq 8(%rsp), %rcx
+. 4601 0x12345678 6
+. 48 8B 4C 24 08 C3
+
+movq 8(%rsp), %rsi
+. 4602 0x12345678 6
+. 48 8B 74 24 08 C3
+
+movq 96(%rsp), %r12
+. 4603 0x12345678 6
+. 4C 8B 64 24 60 C3
+
+movq 96(%rsp), %r13
+. 4604 0x12345678 6
+. 4C 8B 6C 24 60 C3
+
+movq 96(%rsp), %rbp
+. 4605 0x12345678 6
+. 48 8B 6C 24 60 C3
+
+movq 96(%rsp), %rcx
+. 4606 0x12345678 6
+. 48 8B 4C 24 60 C3
+
+movq 96(%rsp), %rdi
+. 4607 0x12345678 6
+. 48 8B 7C 24 60 C3
+
+movq 96(%rsp), %rsi
+. 4608 0x12345678 6
+. 48 8B 74 24 60 C3
+
+movq $999, 72(%rsp)
+. 4609 0x12345678 10
+. 48 C7 44 24 48 E7 03 00 00 C3
+
+movq $998, (%rsp)
+. 4610 0x12345678 9
+. 48 C7 04 24 E6 03 00 00 C3
+
+movq 333(,%rax,8), %rax
+. 4611 0x12345678 9
+. 48 8B 04 C5 4D 01 00 00 C3
+
+movq 5(,%rax,8), %rax
+. 4612 0x12345678 9
+. 48 8B 04 C5 05 00 00 00 C3
+
+movq 3(,%rax,8), %rax
+. 4613 0x12345678 9
+. 48 8B 04 C5 03 00 00 00 C3
+
+movq 6(,%rax,8), %rax
+. 4614 0x12345678 9
+. 48 8B 04 C5 06 00 00 00 C3
+
+movq 444(%rip), %rax
+. 4615 0x12345678 8
+. 48 8B 05 BC 01 00 00 C3
+
+movq 41(%rip), %rax
+. 4616 0x12345678 8
+. 48 8B 05 29 00 00 00 C3
+
+movq 42(%rip), %rbx
+. 4617 0x12345678 8
+. 48 8B 1D 2A 00 00 00 C3
+
+movq 43(%rip), %rdi
+. 4618 0x12345678 8
+. 48 8B 3D 2B 00 00 00 C3
+
+movq 2(,%rax,8), %rax
+. 4619 0x12345678 9
+. 48 8B 04 C5 02 00 00 00 C3
+
+movq 7(,%rax,8), %rax
+. 4620 0x12345678 9
+. 48 8B 04 C5 07 00 00 00 C3
+
+movq 0(,%rax,8), %rsi
+. 4621 0x12345678 9
+. 48 8B 34 C5 00 00 00 00 C3
+
+movq 1(,%rax,8), %rax
+. 4622 0x12345678 9
+. 48 8B 04 C5 01 00 00 00 C3
+
+movq %r10, 56(%rsp)
+. 4623 0x12345678 6
+. 4C 89 54 24 38 C3
+
+movq %r10, %rax
+. 4624 0x12345678 4
+. 4C 89 D0 C3
+
+movq %r11, %rax
+. 4625 0x12345678 4
+. 4C 89 D8 C3
+
+movq %r12, 16(%rax)
+. 4626 0x12345678 5
+. 4C 89 60 10 C3
+
+movq %r12, -16(%rsp)
+. 4627 0x12345678 6
+. 4C 89 64 24 F0 C3
+
+movq %r12, -24(%rsp)
+. 4628 0x12345678 6
+. 4C 89 64 24 E8 C3
+
+movq %r12, -32(%rsp)
+. 4629 0x12345678 6
+. 4C 89 64 24 E0 C3
+
+movq %r12, 8(%rax)
+. 4630 0x12345678 5
+. 4C 89 60 08 C3
+
+movq %r12, -8(%rsp)
+. 4631 0x12345678 6
+. 4C 89 64 24 F8 C3
+
+movq (%r12), %r13
+. 4632 0x12345678 5
+. 4D 8B 2C 24 C3
+
+movq (%r12), %r14
+. 4633 0x12345678 5
+. 4D 8B 34 24 C3
+
+movq %r12, %r8
+. 4634 0x12345678 4
+. 4D 89 E0 C3
+
+movq %r12, %r9
+. 4635 0x12345678 4
+. 4D 89 E1 C3
+
+movq (%r12), %rax
+. 4636 0x12345678 5
+. 49 8B 04 24 C3
+
+movq %r12, %rax
+. 4637 0x12345678 4
+. 4C 89 E0 C3
+
+movq %r12, (%rax,%rcx,8)
+. 4638 0x12345678 5
+. 4C 89 24 C8 C3
+
+movq (%r12,%rbx,8), %rax
+. 4639 0x12345678 5
+. 49 8B 04 DC C3
+
+movq (%r12,%rbx,8), %rsi
+. 4640 0x12345678 5
+. 49 8B 34 DC C3
+
+movq %r12, %rcx
+. 4641 0x12345678 4
+. 4C 89 E1 C3
+
+movq (%r12), %rdi
+. 4642 0x12345678 5
+. 49 8B 3C 24 C3
+
+movq %r12, %rdi
+. 4643 0x12345678 4
+. 4C 89 E7 C3
+
+movq %r12, %rdx
+. 4644 0x12345678 4
+. 4C 89 E2 C3
+
+movq %r12, %rsi
+. 4645 0x12345678 4
+. 4C 89 E6 C3
+
+movq %r13, 16(%rax)
+. 4646 0x12345678 5
+. 4C 89 68 10 C3
+
+movq %r13, -16(%rsp)
+. 4647 0x12345678 6
+. 4C 89 6C 24 F0 C3
+
+movq %r13, 24(%rax)
+. 4648 0x12345678 5
+. 4C 89 68 18 C3
+
+movq %r13, -24(%rsp)
+. 4649 0x12345678 6
+. 4C 89 6C 24 E8 C3
+
+movq %r13, 8(%rbx)
+. 4650 0x12345678 5
+. 4C 89 6B 08 C3
+
+movq %r13, -8(%rsp)
+. 4651 0x12345678 6
+. 4C 89 6C 24 F8 C3
+
+movq %r13, %r14
+. 4652 0x12345678 4
+. 4D 89 EE C3
+
+movq %r13, %r8
+. 4653 0x12345678 4
+. 4D 89 E8 C3
+
+movq %r13, %r9
+. 4654 0x12345678 4
+. 4D 89 E9 C3
+
+movq (%r13), %rax
+. 4655 0x12345678 5
+. 49 8B 45 00 C3
+
+movq %r13, %rax
+. 4656 0x12345678 4
+. 4C 89 E8 C3
+
+movq (%r13), %rbx
+. 4657 0x12345678 5
+. 49 8B 5D 00 C3
+
+movq (%r13,%rbx,8), %rsi
+. 4658 0x12345678 6
+. 49 8B 74 DD 00 C3
+
+movq %r13, %rcx
+. 4659 0x12345678 4
+. 4C 89 E9 C3
+
+movq (%r13), %rdi
+. 4660 0x12345678 5
+. 49 8B 7D 00 C3
+
+movq %r13, %rdi
+. 4661 0x12345678 4
+. 4C 89 EF C3
+
+movq (%r13), %rdx
+. 4662 0x12345678 5
+. 49 8B 55 00 C3
+
+movq %r13, %rdx
+. 4663 0x12345678 4
+. 4C 89 EA C3
+
+movq %r13, %rsi
+. 4664 0x12345678 4
+. 4C 89 EE C3
+
+movq %r13, (%rsp)
+. 4665 0x12345678 5
+. 4C 89 2C 24 C3
+
+movq %r14, -16(%rsp)
+. 4666 0x12345678 6
+. 4C 89 74 24 F0 C3
+
+movq %r14, 24(%rax)
+. 4667 0x12345678 5
+. 4C 89 70 18 C3
+
+movq %r14, 32(%rax)
+. 4668 0x12345678 5
+. 4C 89 70 20 C3
+
+movq %r14, 8(%r12)
+. 4669 0x12345678 6
+. 4D 89 74 24 08 C3
+
+movq %r14, -8(%rsp)
+. 4670 0x12345678 6
+. 4C 89 74 24 F8 C3
+
+movq %r14, %r8
+. 4671 0x12345678 4
+. 4D 89 F0 C3
+
+movq %r14, %r9
+. 4672 0x12345678 4
+. 4D 89 F1 C3
+
+movq (%r14), %rax
+. 4673 0x12345678 4
+. 49 8B 06 C3
+
+movq %r14, %rax
+. 4674 0x12345678 4
+. 4C 89 F0 C3
+
+movq %r14, (%rbx)
+. 4675 0x12345678 4
+. 4C 89 33 C3
+
+movq %r14, %rcx
+. 4676 0x12345678 4
+. 4C 89 F1 C3
+
+movq (%r14), %rdi
+. 4677 0x12345678 4
+. 49 8B 3E C3
+
+movq %r14, %rdi
+. 4678 0x12345678 4
+. 4C 89 F7 C3
+
+movq (%r14), %rdx
+. 4679 0x12345678 4
+. 49 8B 16 C3
+
+movq %r14, %rdx
+. 4680 0x12345678 4
+. 4C 89 F2 C3
+
+movq %r14, %rsi
+. 4681 0x12345678 4
+. 4C 89 F6 C3
+
+movq %r15, 16(%rbx)
+. 4682 0x12345678 5
+. 4C 89 7B 10 C3
+
+movq %r15, 32(%rax)
+. 4683 0x12345678 5
+. 4C 89 78 20 C3
+
+movq %r15, -8(%rsp)
+. 4684 0x12345678 6
+. 4C 89 7C 24 F8 C3
+
+movq %r15, 8(%rsp)
+. 4685 0x12345678 6
+. 4C 89 7C 24 08 C3
+
+movq %r15, %r8
+. 4686 0x12345678 4
+. 4D 89 F8 C3
+
+movq (%r15), %rax
+. 4687 0x12345678 4
+. 49 8B 07 C3
+
+movq %r15, (%rax,%rcx,8)
+. 4688 0x12345678 5
+. 4C 89 3C C8 C3
+
+movq %r15, %rbp
+. 4689 0x12345678 4
+. 4C 89 FD C3
+
+movq %r15, %rcx
+. 4690 0x12345678 4
+. 4C 89 F9 C3
+
+movq (%r15), %rdi
+. 4691 0x12345678 4
+. 49 8B 3F C3
+
+movq %r15, %rdi
+. 4692 0x12345678 4
+. 4C 89 FF C3
+
+movq %r15, %rdx
+. 4693 0x12345678 4
+. 4C 89 FA C3
+
+movq %r15, %rsi
+. 4694 0x12345678 4
+. 4C 89 FE C3
+
+movq %r8, 104(%rsp)
+. 4695 0x12345678 6
+. 4C 89 44 24 68 C3
+
+movq %r8, 136(%rsp)
+. 4696 0x12345678 9
+. 4C 89 84 24 88 00 00 00 C3
+
+movq %r8, 144(%rsp)
+. 4697 0x12345678 9
+. 4C 89 84 24 90 00 00 00 C3
+
+movq %r8, 24(%rsp)
+. 4698 0x12345678 6
+. 4C 89 44 24 18 C3
+
+movq %r8, 296(%rsp)
+. 4699 0x12345678 9
+. 4C 89 84 24 28 01 00 00 C3
+
+movq %r8, 32(%rsp)
+. 4700 0x12345678 6
+. 4C 89 44 24 20 C3
+
+movq %r8, 40(%rsp)
+. 4701 0x12345678 6
+. 4C 89 44 24 28 C3
+
+movq %r8, 48(%rsp)
+. 4702 0x12345678 6
+. 4C 89 44 24 30 C3
+
+movq %r8, 64(%rsp)
+. 4703 0x12345678 6
+. 4C 89 44 24 40 C3
+
+movq %r8, 8(%rbx)
+. 4704 0x12345678 5
+. 4C 89 43 08 C3
+
+movq %r8, 8(%rsi)
+. 4705 0x12345678 5
+. 4C 89 46 08 C3
+
+movq %r8, 8(%rsp)
+. 4706 0x12345678 6
+. 4C 89 44 24 08 C3
+
+movq %r8, %r15
+. 4707 0x12345678 4
+. 4D 89 C7 C3
+
+movq (%r8), %rax
+. 4708 0x12345678 4
+. 49 8B 00 C3
+
+movq %r8, %rax
+. 4709 0x12345678 4
+. 4C 89 C0 C3
+
+movq %r8, %rbp
+. 4710 0x12345678 4
+. 4C 89 C5 C3
+
+movq %r8, (%rbx)
+. 4711 0x12345678 4
+. 4C 89 03 C3
+
+movq %r8, %rbx
+. 4712 0x12345678 4
+. 4C 89 C3 C3
+
+movq %r8, %rsi
+. 4713 0x12345678 4
+. 4C 89 C6 C3
+
+movq %r8, (%rsp)
+. 4714 0x12345678 5
+. 4C 89 04 24 C3
+
+movq %r9, 128(%rsp)
+. 4715 0x12345678 9
+. 4C 89 8C 24 80 00 00 00 C3
+
+movq %r9, 16(%rbx)
+. 4716 0x12345678 5
+. 4C 89 4B 10 C3
+
+movq %r9, 24(%rsp)
+. 4717 0x12345678 6
+. 4C 89 4C 24 18 C3
+
+movq %r9, 32(%rsp)
+. 4718 0x12345678 6
+. 4C 89 4C 24 20 C3
+
+movq %r9, 72(%rsp)
+. 4719 0x12345678 6
+. 4C 89 4C 24 48 C3
+
+movq %r9, %r14
+. 4720 0x12345678 4
+. 4D 89 CE C3
+
+movq %r9, %rax
+. 4721 0x12345678 4
+. 4C 89 C8 C3
+
+movq (%r9,%rax,8), %rdi
+. 4722 0x12345678 5
+. 49 8B 3C C1 C3
+
+movq %r9, %rdx
+. 4723 0x12345678 4
+. 4C 89 CA C3
+
+movq %r9, (%rsp)
+. 4724 0x12345678 5
+. 4C 89 0C 24 C3
+
+movq %rax, 112(%rsp)
+. 4725 0x12345678 6
+. 48 89 44 24 70 C3
+
+movq %rax, 16(%r12)
+. 4726 0x12345678 6
+. 49 89 44 24 10 C3
+
+movq %rax, 16(%rbp)
+. 4727 0x12345678 5
+. 48 89 45 10 C3
+
+movq %rax, 16(%rbx)
+. 4728 0x12345678 5
+. 48 89 43 10 C3
+
+movq %rax, 16(%rsi)
+. 4729 0x12345678 5
+. 48 89 46 10 C3
+
+movq %rax, 16(%rsp)
+. 4730 0x12345678 6
+. 48 89 44 24 10 C3
+
+movq %rax, 176(%rsp)
+. 4731 0x12345678 9
+. 48 89 84 24 B0 00 00 00 C3
+
+movq %rax, 24(%r12)
+. 4732 0x12345678 6
+. 49 89 44 24 18 C3
+
+movq %rax, 24(%r13)
+. 4733 0x12345678 5
+. 49 89 45 18 C3
+
+movq %rax, 24(%rbp)
+. 4734 0x12345678 5
+. 48 89 45 18 C3
+
+movq %rax, 24(%rbx)
+. 4735 0x12345678 5
+. 48 89 43 18 C3
+
+movq %rax, 24(%rdx)
+. 4736 0x12345678 5
+. 48 89 42 18 C3
+
+movq %rax, 24(%rsi)
+. 4737 0x12345678 5
+. 48 89 46 18 C3
+
+movq %rax, 24(%rsp)
+. 4738 0x12345678 6
+. 48 89 44 24 18 C3
+
+movq %rax, 256(%rsp)
+. 4739 0x12345678 9
+. 48 89 84 24 00 01 00 00 C3
+
+movq %rax, 32(%r12)
+. 4740 0x12345678 6
+. 49 89 44 24 20 C3
+
+movq %rax, 32(%r15)
+. 4741 0x12345678 5
+. 49 89 47 20 C3
+
+movq %rax, 32(%rbp)
+. 4742 0x12345678 5
+. 48 89 45 20 C3
+
+movq %rax, 32(%rbx)
+. 4743 0x12345678 5
+. 48 89 43 20 C3
+
+movq %rax, 32(%rsp)
+. 4744 0x12345678 6
+. 48 89 44 24 20 C3
+
+movq %rax, 40(%rsp)
+. 4745 0x12345678 6
+. 48 89 44 24 28 C3
+
+movq %rax, 48(%rsp)
+. 4746 0x12345678 6
+. 48 89 44 24 30 C3
+
+movq %rax, 56(%rsp)
+. 4747 0x12345678 6
+. 48 89 44 24 38 C3
+
+movq %rax, 80(%rsp)
+. 4748 0x12345678 6
+. 48 89 44 24 50 C3
+
+movq %rax, 88(%rsp)
+. 4749 0x12345678 6
+. 48 89 44 24 58 C3
+
+movq %rax, 8(%r12)
+. 4750 0x12345678 6
+. 49 89 44 24 08 C3
+
+movq %rax, 8(%r14)
+. 4751 0x12345678 5
+. 49 89 46 08 C3
+
+movq %rax, 8(%r15)
+. 4752 0x12345678 5
+. 49 89 47 08 C3
+
+movq %rax, 8(%rbp)
+. 4753 0x12345678 5
+. 48 89 45 08 C3
+
+movq %rax, 8(%rbx)
+. 4754 0x12345678 5
+. 48 89 43 08 C3
+
+movq %rax, 8(%rcx)
+. 4755 0x12345678 5
+. 48 89 41 08 C3
+
+movq %rax, -8(%rcx,%rdx,8)
+. 4756 0x12345678 6
+. 48 89 44 D1 F8 C3
+
+movq %rax, 8(%rsi)
+. 4757 0x12345678 5
+. 48 89 46 08 C3
+
+movq %rax, 8(%rsp)
+. 4758 0x12345678 6
+. 48 89 44 24 08 C3
+
+movq %rax, 96(%rsp)
+. 4759 0x12345678 6
+. 48 89 44 24 60 C3
+
+movq %rax, 388989(%rip)
+. 4760 0x12345678 8
+. 48 89 05 7D EF 05 00 C3
+
+movq %rax, (%r12)
+. 4761 0x12345678 5
+. 49 89 04 24 C3
+
+movq %rax, %r12
+. 4762 0x12345678 4
+. 49 89 C4 C3
+
+movq (%rax,%r12,8), %rbx
+. 4763 0x12345678 5
+. 4A 8B 1C E0 C3
+
+movq %rax, (%r12,%rbx,8)
+. 4764 0x12345678 5
+. 49 89 04 DC C3
+
+movq %rax, (%r13)
+. 4765 0x12345678 5
+. 49 89 45 00 C3
+
+movq %rax, %r13
+. 4766 0x12345678 4
+. 49 89 C5 C3
+
+movq (%rax,%r13,8), %r12
+. 4767 0x12345678 5
+. 4E 8B 24 E8 C3
+
+movq %rax, (%r13,%rbx,8)
+. 4768 0x12345678 6
+. 49 89 44 DD 00 C3
+
+movq %rax, (%r13,%rdx,8)
+. 4769 0x12345678 6
+. 49 89 44 D5 00 C3
+
+movq %rax, (%r14)
+. 4770 0x12345678 4
+. 49 89 06 C3
+
+movq %rax, %r14
+. 4771 0x12345678 4
+. 49 89 C6 C3
+
+movq %rax, (%r14,%rbx,8)
+. 4772 0x12345678 5
+. 49 89 04 DE C3
+
+movq %rax, %r15
+. 4773 0x12345678 4
+. 49 89 C7 C3
+
+movq (%rax,%r15,8), %rax
+. 4774 0x12345678 5
+. 4A 8B 04 F8 C3
+
+movq %rax, %r8
+. 4775 0x12345678 4
+. 49 89 C0 C3
+
+movq %rax, (%r8,%rdx,8)
+. 4776 0x12345678 5
+. 49 89 04 D0 C3
+
+movq %rax, %r9
+. 4777 0x12345678 4
+. 49 89 C1 C3
+
+movq %rax, (%r9,%rdx,8)
+. 4778 0x12345678 5
+. 49 89 04 D1 C3
+
+movq (%rax), %rax
+. 4779 0x12345678 4
+. 48 8B 00 C3
+
+movq %rax, (%rbp)
+. 4780 0x12345678 5
+. 48 89 45 00 C3
+
+movq %rax, %rbp
+. 4781 0x12345678 4
+. 48 89 C5 C3
+
+movq (%rax,%rbp,8), %rbx
+. 4782 0x12345678 5
+. 48 8B 1C E8 C3
+
+movq %rax, (%rbp,%rbx,8)
+. 4783 0x12345678 6
+. 48 89 44 DD 00 C3
+
+movq %rax, (%rbx)
+. 4784 0x12345678 4
+. 48 89 03 C3
+
+movq %rax, %rbx
+. 4785 0x12345678 4
+. 48 89 C3 C3
+
+movq (%rax,%rbx,8), %rdi
+. 4786 0x12345678 5
+. 48 8B 3C D8 C3
+
+movq (%rax,%rbx,8), %rdx
+. 4787 0x12345678 5
+. 48 8B 14 D8 C3
+
+movq (%rax,%rbx,8), %rsi
+. 4788 0x12345678 5
+. 48 8B 34 D8 C3
+
+movq %rax, (%rbx,%r12,8)
+. 4789 0x12345678 5
+. 4A 89 04 E3 C3
+
+movq %rax, %rcx
+. 4790 0x12345678 4
+. 48 89 C1 C3
+
+movq (%rax,%rcx,8), %r13
+. 4791 0x12345678 5
+. 4C 8B 2C C8 C3
+
+movq (%rax,%rcx,8), %rax
+. 4792 0x12345678 5
+. 48 8B 04 C8 C3
+
+movq (%rax,%rcx,8), %rsi
+. 4793 0x12345678 5
+. 48 8B 34 C8 C3
+
+movq %rax, (%rcx,%rsi,8)
+. 4794 0x12345678 5
+. 48 89 04 F1 C3
+
+movq (%rax), %rdi
+. 4795 0x12345678 4
+. 48 8B 38 C3
+
+movq %rax, %rdi
+. 4796 0x12345678 4
+. 48 89 C7 C3
+
+movq (%rax,%rdi,8), %rax
+. 4797 0x12345678 5
+. 48 8B 04 F8 C3
+
+movq (%rax,%rdi,8), %rsi
+. 4798 0x12345678 5
+. 48 8B 34 F8 C3
+
+movq %rax, (%rdi,%rdx,8)
+. 4799 0x12345678 5
+. 48 89 04 D7 C3
+
+movq %rax, (%rdx)
+. 4800 0x12345678 4
+. 48 89 02 C3
+
+movq %rax, %rdx
+. 4801 0x12345678 4
+. 48 89 C2 C3
+
+movq (%rax,%rdx,8), %r12
+. 4802 0x12345678 5
+. 4C 8B 24 D0 C3
+
+movq (%rax,%rdx,8), %r13
+. 4803 0x12345678 5
+. 4C 8B 2C D0 C3
+
+movq (%rax,%rdx,8), %r14
+. 4804 0x12345678 5
+. 4C 8B 34 D0 C3
+
+movq (%rax,%rdx,8), %rax
+. 4805 0x12345678 5
+. 48 8B 04 D0 C3
+
+movq (%rax,%rdx,8), %rbp
+. 4806 0x12345678 5
+. 48 8B 2C D0 C3
+
+movq (%rax,%rdx,8), %rbx
+. 4807 0x12345678 5
+. 48 8B 1C D0 C3
+
+movq (%rax,%rdx,8), %rdi
+. 4808 0x12345678 5
+. 48 8B 3C D0 C3
+
+movq (%rax,%rdx,8), %rdx
+. 4809 0x12345678 5
+. 48 8B 14 D0 C3
+
+movq (%rax,%rdx,8), %rsi
+. 4810 0x12345678 5
+. 48 8B 34 D0 C3
+
+movq %rax, (%rdx,%r12,8)
+. 4811 0x12345678 5
+. 4A 89 04 E2 C3
+
+movq %rax, (%rdx,%r13,8)
+. 4812 0x12345678 5
+. 4A 89 04 EA C3
+
+movq %rax, (%rdx,%r15,8)
+. 4813 0x12345678 5
+. 4A 89 04 FA C3
+
+movq %rax, (%rdx,%rbp,8)
+. 4814 0x12345678 5
+. 48 89 04 EA C3
+
+movq %rax, (%rdx,%rbx,8)
+. 4815 0x12345678 5
+. 48 89 04 DA C3
+
+movq %rax, (%rsi)
+. 4816 0x12345678 4
+. 48 89 06 C3
+
+movq %rax, %rsi
+. 4817 0x12345678 4
+. 48 89 C6 C3
+
+movq (%rax,%rsi,8), %rdi
+. 4818 0x12345678 5
+. 48 8B 3C F0 C3
+
+movq %rax, (%rsi,%rcx,8)
+. 4819 0x12345678 5
+. 48 89 04 CE C3
+
+movq %rax, (%rsp)
+. 4820 0x12345678 5
+. 48 89 04 24 C3
+
+movq %rax, 16(%rip)
+. 4821 0x12345678 8
+. 48 89 05 10 00 00 00 C3
+
+movq %rax, 8(%rip)
+. 4822 0x12345678 8
+. 48 89 05 08 00 00 00 C3
+
+movq %rax, (%rip)
+. 4823 0x12345678 8
+. 48 89 05 00 00 00 00 C3
+
+movq %rbp, -16(%rsp)
+. 4824 0x12345678 6
+. 48 89 6C 24 F0 C3
+
+movq %rbp, -24(%rsp)
+. 4825 0x12345678 6
+. 48 89 6C 24 E8 C3
+
+movq %rbp, -32(%rsp)
+. 4826 0x12345678 6
+. 48 89 6C 24 E0 C3
+
+movq %rbp, -40(%rsp)
+. 4827 0x12345678 6
+. 48 89 6C 24 D8 C3
+
+movq %rbp, 8(%rax)
+. 4828 0x12345678 5
+. 48 89 68 08 C3
+
+movq %rbp, -8(%rsp)
+. 4829 0x12345678 6
+. 48 89 6C 24 F8 C3
+
+movq %rbp, 96(%rsp)
+. 4830 0x12345678 6
+. 48 89 6C 24 60 C3
+
+movq %rbp, %r8
+. 4831 0x12345678 4
+. 49 89 E8 C3
+
+movq %rbp, %r9
+. 4832 0x12345678 4
+. 49 89 E9 C3
+
+movq %rbp, %rax
+. 4833 0x12345678 4
+. 48 89 E8 C3
+
+movq (%rbp,%rax,8), %rbx
+. 4834 0x12345678 6
+. 48 8B 5C C5 00 C3
+
+movq %rbp, (%rax,%rcx,8)
+. 4835 0x12345678 5
+. 48 89 2C C8 C3
+
+movq (%rbp,%rbx,8), %rdi
+. 4836 0x12345678 6
+. 48 8B 7C DD 00 C3
+
+movq %rbp, %rcx
+. 4837 0x12345678 4
+. 48 89 E9 C3
+
+movq (%rbp), %rdi
+. 4838 0x12345678 5
+. 48 8B 7D 00 C3
+
+movq %rbp, %rdi
+. 4839 0x12345678 4
+. 48 89 EF C3
+
+movq (%rbp), %rdx
+. 4840 0x12345678 5
+. 48 8B 55 00 C3
+
+movq %rbp, %rdx
+. 4841 0x12345678 4
+. 48 89 EA C3
+
+movq %rbp, (%rdx,%rax,8)
+. 4842 0x12345678 5
+. 48 89 2C C2 C3
+
+movq (%rbp), %rsi
+. 4843 0x12345678 5
+. 48 8B 75 00 C3
+
+movq %rbp, %rsi
+. 4844 0x12345678 4
+. 48 89 EE C3
+
+movq %rbx, 112(%rsp)
+. 4845 0x12345678 6
+. 48 89 5C 24 70 C3
+
+movq %rbx, 16(%rax)
+. 4846 0x12345678 5
+. 48 89 58 10 C3
+
+movq %rbx, -16(%rsp)
+. 4847 0x12345678 6
+. 48 89 5C 24 F0 C3
+
+movq %rbx, -24(%rsp)
+. 4848 0x12345678 6
+. 48 89 5C 24 E8 C3
+
+movq %rbx, -32(%rsp)
+. 4849 0x12345678 6
+. 48 89 5C 24 E0 C3
+
+movq %rbx, -40(%rsp)
+. 4850 0x12345678 6
+. 48 89 5C 24 D8 C3
+
+movq %rbx, -48(%rsp)
+. 4851 0x12345678 6
+. 48 89 5C 24 D0 C3
+
+movq %rbx, 4(%rax)
+. 4852 0x12345678 5
+. 48 89 58 04 C3
+
+movq %rbx, 560(%rsp)
+. 4853 0x12345678 9
+. 48 89 9C 24 30 02 00 00 C3
+
+movq %rbx, 8(%rax)
+. 4854 0x12345678 5
+. 48 89 58 08 C3
+
+movq (%rbx,%r12,8), %rsi
+. 4855 0x12345678 5
+. 4A 8B 34 E3 C3
+
+movq %rbx, (%r13)
+. 4856 0x12345678 5
+. 49 89 5D 00 C3
+
+movq %rbx, %r8
+. 4857 0x12345678 4
+. 49 89 D8 C3
+
+movq (%rbx), %rax
+. 4858 0x12345678 4
+. 48 8B 03 C3
+
+movq %rbx, (%rax)
+. 4859 0x12345678 4
+. 48 89 18 C3
+
+movq %rbx, %rax
+. 4860 0x12345678 4
+. 48 89 D8 C3
+
+movq %rbx, %rcx
+. 4861 0x12345678 4
+. 48 89 D9 C3
+
+movq (%rbx,%rcx,8), %rax
+. 4862 0x12345678 5
+. 48 8B 04 CB C3
+
+movq (%rbx), %rdi
+. 4863 0x12345678 4
+. 48 8B 3B C3
+
+movq %rbx, %rdi
+. 4864 0x12345678 4
+. 48 89 DF C3
+
+movq (%rbx), %rdx
+. 4865 0x12345678 4
+. 48 8B 13 C3
+
+movq %rbx, %rdx
+. 4866 0x12345678 4
+. 48 89 DA C3
+
+movq (%rbx), %rsi
+. 4867 0x12345678 4
+. 48 8B 33 C3
+
+movq %rbx, %rsi
+. 4868 0x12345678 4
+. 48 89 DE C3
+
+movq %rbx, (%rsp)
+. 4869 0x12345678 5
+. 48 89 1C 24 C3
+
+movq %rcx, 144(%rsp)
+. 4870 0x12345678 9
+. 48 89 8C 24 90 00 00 00 C3
+
+movq %rcx, 24(%rsp)
+. 4871 0x12345678 6
+. 48 89 4C 24 18 C3
+
+movq %rcx, 32(%rsp)
+. 4872 0x12345678 6
+. 48 89 4C 24 20 C3
+
+movq %rcx, 40(%rsp)
+. 4873 0x12345678 6
+. 48 89 4C 24 28 C3
+
+movq %rcx, 56(%rsp)
+. 4874 0x12345678 6
+. 48 89 4C 24 38 C3
+
+movq %rcx, 64(%rsp)
+. 4875 0x12345678 6
+. 48 89 4C 24 40 C3
+
+movq %rcx, %r12
+. 4876 0x12345678 4
+. 49 89 CC C3
+
+movq %rcx, %r13
+. 4877 0x12345678 4
+. 49 89 CD C3
+
+movq (%rcx,%r13,8), %rbp
+. 4878 0x12345678 5
+. 4A 8B 2C E9 C3
+
+movq %rcx, %r14
+. 4879 0x12345678 4
+. 49 89 CE C3
+
+movq %rcx, (%r14,%rax,8)
+. 4880 0x12345678 5
+. 49 89 0C C6 C3
+
+movq %rcx, %r15
+. 4881 0x12345678 4
+. 49 89 CF C3
+
+movq %rcx, %r8
+. 4882 0x12345678 4
+. 49 89 C8 C3
+
+movq (%rcx), %rax
+. 4883 0x12345678 4
+. 48 8B 01 C3
+
+movq %rcx, %rax
+. 4884 0x12345678 4
+. 48 89 C8 C3
+
+movq (%rcx,%rax,8), %r12
+. 4885 0x12345678 5
+. 4C 8B 24 C1 C3
+
+movq (%rcx,%rax,8), %r13
+. 4886 0x12345678 5
+. 4C 8B 2C C1 C3
+
+movq (%rcx,%rax,8), %rax
+. 4887 0x12345678 5
+. 48 8B 04 C1 C3
+
+movq (%rcx,%rax,8), %rsi
+. 4888 0x12345678 5
+. 48 8B 34 C1 C3
+
+movq (%rcx), %rdi
+. 4889 0x12345678 4
+. 48 8B 39 C3
+
+movq %rcx, %rdi
+. 4890 0x12345678 4
+. 48 89 CF C3
+
+movq %rcx, %rdx
+. 4891 0x12345678 4
+. 48 89 CA C3
+
+movq %rcx, %rsi
+. 4892 0x12345678 4
+. 48 89 CE C3
+
+movq (%rcx,%rsi,8), %rbx
+. 4893 0x12345678 5
+. 48 8B 1C F1 C3
+
+movq %rdi, 120(%rsp)
+. 4894 0x12345678 6
+. 48 89 7C 24 78 C3
+
+movq %rdi, 160(%rsp)
+. 4895 0x12345678 9
+. 48 89 BC 24 A0 00 00 00 C3
+
+movq %rdi, 32(%rsp)
+. 4896 0x12345678 6
+. 48 89 7C 24 20 C3
+
+movq %rdi, 72(%rsp)
+. 4897 0x12345678 6
+. 48 89 7C 24 48 C3
+
+movq %rdi, 66(%rip)
+. 4898 0x12345678 8
+. 48 89 3D 42 00 00 00 C3
+
+movq (%rdi), %r10
+. 4899 0x12345678 4
+. 4C 8B 17 C3
+
+movq %rdi, %r10
+. 4900 0x12345678 4
+. 49 89 FA C3
+
+movq %rdi, %r11
+. 4901 0x12345678 4
+. 49 89 FB C3
+
+movq %rdi, %r12
+. 4902 0x12345678 4
+. 49 89 FC C3
+
+movq %rdi, %r13
+. 4903 0x12345678 4
+. 49 89 FD C3
+
+movq (%rdi), %r14
+. 4904 0x12345678 4
+. 4C 8B 37 C3
+
+movq %rdi, %r14
+. 4905 0x12345678 4
+. 49 89 FE C3
+
+movq %rdi, %r15
+. 4906 0x12345678 4
+. 49 89 FF C3
+
+movq %rdi, %r8
+. 4907 0x12345678 4
+. 49 89 F8 C3
+
+movq (%rdi), %r9
+. 4908 0x12345678 4
+. 4C 8B 0F C3
+
+movq %rdi, %r9
+. 4909 0x12345678 4
+. 49 89 F9 C3
+
+movq (%rdi), %rax
+. 4910 0x12345678 4
+. 48 8B 07 C3
+
+movq %rdi, %rax
+. 4911 0x12345678 4
+. 48 89 F8 C3
+
+movq (%rdi,%rax,8), %rax
+. 4912 0x12345678 5
+. 48 8B 04 C7 C3
+
+movq (%rdi,%rax,8), %rdx
+. 4913 0x12345678 5
+. 48 8B 14 C7 C3
+
+movq %rdi, (%rbp)
+. 4914 0x12345678 5
+. 48 89 7D 00 C3
+
+movq %rdi, %rbp
+. 4915 0x12345678 4
+. 48 89 FD C3
+
+movq %rdi, %rbx
+. 4916 0x12345678 4
+. 48 89 FB C3
+
+movq %rdi, %rcx
+. 4917 0x12345678 4
+. 48 89 F9 C3
+
+movq (%rdi), %rdi
+. 4918 0x12345678 4
+. 48 8B 3F C3
+
+movq (%rdi), %rdx
+. 4919 0x12345678 4
+. 48 8B 17 C3
+
+movq %rdi, %rdx
+. 4920 0x12345678 4
+. 48 89 FA C3
+
+movq (%rdi,%rdx,8), %rax
+. 4921 0x12345678 5
+. 48 8B 04 D7 C3
+
+movq %rdi, %rsi
+. 4922 0x12345678 4
+. 48 89 FE C3
+
+movq %rdi, 33(%rip)
+. 4923 0x12345678 8
+. 48 89 3D 21 00 00 00 C3
+
+movq %rdi, -898989(%rip)
+. 4924 0x12345678 8
+. 48 89 3D 53 48 F2 FF C3
+
+movq %rdx, 128(%rsp)
+. 4925 0x12345678 9
+. 48 89 94 24 80 00 00 00 C3
+
+movq %rdx, 152(%rsp)
+. 4926 0x12345678 9
+. 48 89 94 24 98 00 00 00 C3
+
+movq %rdx, 16(%rsp)
+. 4927 0x12345678 6
+. 48 89 54 24 10 C3
+
+movq %rdx, 24(%rsp)
+. 4928 0x12345678 6
+. 48 89 54 24 18 C3
+
+movq %rdx, 304(%rsp)
+. 4929 0x12345678 9
+. 48 89 94 24 30 01 00 00 C3
+
+movq %rdx, 32(%rsp)
+. 4930 0x12345678 6
+. 48 89 54 24 20 C3
+
+movq %rdx, 48(%rsp)
+. 4931 0x12345678 6
+. 48 89 54 24 30 C3
+
+movq %rdx, 4(%rcx)
+. 4932 0x12345678 5
+. 48 89 51 04 C3
+
+movq %rdx, -8(%rbx,%rax,8)
+. 4933 0x12345678 6
+. 48 89 54 C3 F8 C3
+
+movq %rdx, 8(%rcx)
+. 4934 0x12345678 5
+. 48 89 51 08 C3
+
+movq %rdx, 8989(%rip)
+. 4935 0x12345678 8
+. 48 89 15 1D 23 00 00 C3
+
+movq %rdx, %r12
+. 4936 0x12345678 4
+. 49 89 D4 C3
+
+movq %rdx, (%r12,%rax,8)
+. 4937 0x12345678 5
+. 49 89 14 C4 C3
+
+movq %rdx, %r13
+. 4938 0x12345678 4
+. 49 89 D5 C3
+
+movq %rdx, %r14
+. 4939 0x12345678 4
+. 49 89 D6 C3
+
+movq %rdx, %r15
+. 4940 0x12345678 4
+. 49 89 D7 C3
+
+movq (%rdx), %r8
+. 4941 0x12345678 4
+. 4C 8B 02 C3
+
+movq (%rdx), %rax
+. 4942 0x12345678 4
+. 48 8B 02 C3
+
+movq %rdx, (%rax)
+. 4943 0x12345678 4
+. 48 89 10 C3
+
+movq %rdx, %rax
+. 4944 0x12345678 4
+. 48 89 D0 C3
+
+movq (%rdx,%rax,8), %r12
+. 4945 0x12345678 5
+. 4C 8B 24 C2 C3
+
+movq (%rdx,%rax,8), %rdi
+. 4946 0x12345678 5
+. 48 8B 3C C2 C3
+
+movq (%rdx,%rax,8), %rdx
+. 4947 0x12345678 5
+. 48 8B 14 C2 C3
+
+movq (%rdx,%rax,8), %rsi
+. 4948 0x12345678 5
+. 48 8B 34 C2 C3
+
+movq %rdx, %rbp
+. 4949 0x12345678 4
+. 48 89 D5 C3
+
+movq %rdx, %rbx
+. 4950 0x12345678 4
+. 48 89 D3 C3
+
+movq (%rdx,%rbx,8), %rdi
+. 4951 0x12345678 5
+. 48 8B 3C DA C3
+
+movq %rdx, %rcx
+. 4952 0x12345678 4
+. 48 89 D1 C3
+
+movq (%rdx,%rcx,8), %rsi
+. 4953 0x12345678 5
+. 48 8B 34 CA C3
+
+movq %rdx, %rdi
+. 4954 0x12345678 4
+. 48 89 D7 C3
+
+movq (%rdx), %rdx
+. 4955 0x12345678 4
+. 48 8B 12 C3
+
+movq %rdx, %rsi
+. 4956 0x12345678 4
+. 48 89 D6 C3
+
+movq %rdx, (%rsp)
+. 4957 0x12345678 5
+. 48 89 14 24 C3
+
+movq %rsi, 136(%rsp)
+. 4958 0x12345678 9
+. 48 89 B4 24 88 00 00 00 C3
+
+movq %rsi, 152(%rsp)
+. 4959 0x12345678 9
+. 48 89 B4 24 98 00 00 00 C3
+
+movq %rsi, 160(%rsp)
+. 4960 0x12345678 9
+. 48 89 B4 24 A0 00 00 00 C3
+
+movq %rsi, 16(%rsp)
+. 4961 0x12345678 6
+. 48 89 74 24 10 C3
+
+movq %rsi, 24(%rsp)
+. 4962 0x12345678 6
+. 48 89 74 24 18 C3
+
+movq %rsi, 312(%rsp)
+. 4963 0x12345678 9
+. 48 89 B4 24 38 01 00 00 C3
+
+movq %rsi, 32(%rsp)
+. 4964 0x12345678 6
+. 48 89 74 24 20 C3
+
+movq %rsi, 40(%rsp)
+. 4965 0x12345678 6
+. 48 89 74 24 28 C3
+
+movq %rsi, 48(%rsp)
+. 4966 0x12345678 6
+. 48 89 74 24 30 C3
+
+movq %rsi, 56(%rsp)
+. 4967 0x12345678 6
+. 48 89 74 24 38 C3
+
+movq %rsi, 8(%rsp)
+. 4968 0x12345678 6
+. 48 89 74 24 08 C3
+
+movq %rsi, 77(%rip)
+. 4969 0x12345678 8
+. 48 89 35 4D 00 00 00 C3
+
+movq %rsi, %r10
+. 4970 0x12345678 4
+. 49 89 F2 C3
+
+movq %rsi, %r12
+. 4971 0x12345678 4
+. 49 89 F4 C3
+
+movq %rsi, %r13
+. 4972 0x12345678 4
+. 49 89 F5 C3
+
+movq %rsi, %r14
+. 4973 0x12345678 4
+. 49 89 F6 C3
+
+movq %rsi, %r15
+. 4974 0x12345678 4
+. 49 89 F7 C3
+
+movq %rsi, %r8
+. 4975 0x12345678 4
+. 49 89 F0 C3
+
+movq %rsi, %r9
+. 4976 0x12345678 4
+. 49 89 F1 C3
+
+movq %rsi, %rax
+. 4977 0x12345678 4
+. 48 89 F0 C3
+
+movq %rsi, %rbp
+. 4978 0x12345678 4
+. 48 89 F5 C3
+
+movq %rsi, %rbx
+. 4979 0x12345678 4
+. 48 89 F3 C3
+
+movq %rsi, %rcx
+. 4980 0x12345678 4
+. 48 89 F1 C3
+
+movq %rsi, %rdi
+. 4981 0x12345678 4
+. 48 89 F7 C3
+
+movq (%rsi), %rdx
+. 4982 0x12345678 4
+. 48 8B 16 C3
+
+movq %rsi, %rdx
+. 4983 0x12345678 4
+. 48 89 F2 C3
+
+movq (%rsi,%rdx,8), %rax
+. 4984 0x12345678 5
+. 48 8B 04 D6 C3
+
+movq %rsi, (%rsp)
+. 4985 0x12345678 5
+. 48 89 34 24 C3
+
+movq %rsi, 77(%rip)
+. 4986 0x12345678 8
+. 48 89 35 4D 00 00 00 C3
+
+movq (%rsp), %r9
+. 4987 0x12345678 5
+. 4C 8B 0C 24 C3
+
+movq (%rsp), %rax
+. 4988 0x12345678 5
+. 48 8B 04 24 C3
+
+movq (%rsp), %rbx
+. 4989 0x12345678 5
+. 48 8B 1C 24 C3
+
+movq (%rsp), %rdi
+. 4990 0x12345678 5
+. 48 8B 3C 24 C3
+
+movq (%rsp), %rdx
+. 4991 0x12345678 5
+. 48 8B 14 24 C3
+
+movq %rsp, %rdx
+. 4992 0x12345678 4
+. 48 89 E2 C3
+
+movq (%rsp), %rsi
+. 4993 0x12345678 5
+. 48 8B 34 24 C3
+
+movq 33(%rip), %rcx
+. 4994 0x12345678 8
+. 48 8B 0D 21 00 00 00 C3
+
+movq 66(%rip), %rdi
+. 4995 0x12345678 8
+. 48 8B 3D 42 00 00 00 C3
+
+movl %r12d, %eax
+. 4996 0x12345678 4
+. 44 89 E0 C3
+
+movl %r13d, %eax
+. 4997 0x12345678 4
+. 44 89 E8 C3
+
+movl %r13d, %edx
+. 4998 0x12345678 4
+. 44 89 EA C3
+
+movl %r14d, %edi
+. 4999 0x12345678 4
+. 44 89 F7 C3
+
+movl %r15d, %eax
+. 5000 0x12345678 4
+. 44 89 F8 C3
+
+movl %r15d, %ebx
+. 5001 0x12345678 4
+. 44 89 FB C3
+
+movl %r15d, %esi
+. 5002 0x12345678 4
+. 44 89 FE C3
+
+movl (%rax,%rdx,8), %ebx
+. 5003 0x12345678 4
+. 8B 1C D0 C3
+
+movl (%rdx), %r8d
+. 5004 0x12345678 4
+. 44 8B 02 C3
+
+movsbl 8(%rax),%edi
+. 5005 0x12345678 5
+. 0F BE 78 08 C3
+
+movsbl %ah, %ebx
+. 5006 0x12345678 4
+. 0F BE DC C3
+
+movsbl %ah, %edx
+. 5007 0x12345678 4
+. 0F BE D4 C3
+
+movsbl %al,%ebx
+. 5008 0x12345678 4
+. 0F BE D8 C3
+
+movsbl %al,%ecx
+. 5009 0x12345678 4
+. 0F BE C8 C3
+
+movsbl %al,%edi
+. 5010 0x12345678 4
+. 0F BE F8 C3
+
+movsbl %al,%edx
+. 5011 0x12345678 4
+. 0F BE D0 C3
+
+movsbl %al,%esi
+. 5012 0x12345678 4
+. 0F BE F0 C3
+
+movsbl %al,%r12d
+. 5013 0x12345678 5
+. 44 0F BE E0 C3
+
+movsbl %bh, %edi
+. 5014 0x12345678 4
+. 0F BE FF C3
+
+movsbl %bl,%ebx
+. 5015 0x12345678 4
+. 0F BE DB C3
+
+movsbl %ch, %edx
+. 5016 0x12345678 4
+. 0F BE D5 C3
+
+movsbl %cl,%eax
+. 5017 0x12345678 4
+. 0F BE C1 C3
+
+movsbl %cl,%ecx
+. 5018 0x12345678 4
+. 0F BE C9 C3
+
+movsbl %dil,%ecx
+. 5019 0x12345678 5
+. 40 0F BE CF C3
+
+movsbl %dl,%eax
+. 5020 0x12345678 4
+. 0F BE C2 C3
+
+movsbl %dl,%edx
+. 5021 0x12345678 4
+. 0F BE D2 C3
+
+movsbl %r11b,%edx
+. 5022 0x12345678 5
+. 41 0F BE D3 C3
+
+movsbl %r8b,%r8d
+. 5023 0x12345678 5
+. 45 0F BE C0 C3
+
+movsbl (%rax,%rcx),%edi
+. 5024 0x12345678 5
+. 0F BE 3C 08 C3
+
+movsbl (%rax,%rdx),%edi
+. 5025 0x12345678 5
+. 0F BE 3C 10 C3
+
+movsbl (%rax,%rsi),%edi
+. 5026 0x12345678 5
+. 0F BE 3C 30 C3
+
+movsbl (%rcx,%r12),%eax
+. 5027 0x12345678 6
+. 42 0F BE 04 21 C3
+
+movsbl (%rdi,%rax),%eax
+. 5028 0x12345678 5
+. 0F BE 04 07 C3
+
+movsbl (%rdx),%edi
+. 5029 0x12345678 4
+. 0F BE 3A C3
+
+movsbl (%rsp,%rax),%edi
+. 5030 0x12345678 5
+. 0F BE 3C 04 C3
+
+movsbq (%rdi,%rax),%rax
+. 5031 0x12345678 6
+. 48 0F BE 04 07 C3
+
+movslq 104(%rsp),%rdi
+. 5033 0x12345678 6
+. 48 63 7C 24 68 C3
+
+movslq 108(%rsp),%rax
+. 5034 0x12345678 6
+. 48 63 44 24 6C C3
+
+movslq 108(%rsp),%rdx
+. 5035 0x12345678 6
+. 48 63 54 24 6C C3
+
+movslq 120(%rsp),%rdi
+. 5036 0x12345678 6
+. 48 63 7C 24 78 C3
+
+movslq 124(%rsp),%rdi
+. 5037 0x12345678 6
+. 48 63 7C 24 7C C3
+
+movslq 124(%rsp),%rsi
+. 5038 0x12345678 6
+. 48 63 74 24 7C C3
+
+movslq 12(%rax),%rdi
+. 5039 0x12345678 5
+. 48 63 78 0C C3
+
+movslq 12(%rsp),%r15
+. 5040 0x12345678 6
+. 4C 63 7C 24 0C C3
+
+movslq 12(%rsp),%rax
+. 5041 0x12345678 6
+. 48 63 44 24 0C C3
+
+movslq 12(%rsp),%rdi
+. 5042 0x12345678 6
+. 48 63 7C 24 0C C3
+
+movslq 168(%rsp),%rax
+. 5043 0x12345678 9
+. 48 63 84 24 A8 00 00 00 C3
+
+movslq 16(%rsp),%rax
+. 5044 0x12345678 6
+. 48 63 44 24 10 C3
+
+movslq 16(%rsp),%rdi
+. 5045 0x12345678 6
+. 48 63 7C 24 10 C3
+
+movslq 176(%rsp),%rax
+. 5046 0x12345678 9
+. 48 63 84 24 B0 00 00 00 C3
+
+movslq 180(%rsp),%rax
+. 5047 0x12345678 9
+. 48 63 84 24 B4 00 00 00 C3
+
+movslq 184(%rsp),%rax
+. 5048 0x12345678 9
+. 48 63 84 24 B8 00 00 00 C3
+
+movslq 188(%rsp),%rax
+. 5049 0x12345678 9
+. 48 63 84 24 BC 00 00 00 C3
+
+movslq 20(%rsp),%rax
+. 5050 0x12345678 6
+. 48 63 44 24 14 C3
+
+movslq 24(%rbx),%rdi
+. 5051 0x12345678 5
+. 48 63 7B 18 C3
+
+movslq 28(%rsp),%rax
+. 5052 0x12345678 6
+. 48 63 44 24 1C C3
+
+movslq 44(%rsp),%rax
+. 5053 0x12345678 6
+. 48 63 44 24 2C C3
+
+movslq 8(%r12),%rax
+. 5054 0x12345678 6
+. 49 63 44 24 08 C3
+
+movslq 8(%rbx),%rax
+. 5055 0x12345678 5
+. 48 63 43 08 C3
+
+movslq 8(%rbx),%rbx
+. 5056 0x12345678 5
+. 48 63 5B 08 C3
+
+movslq 8(%rbx),%rdi
+. 5057 0x12345678 5
+. 48 63 7B 08 C3
+
+movslq 8(%rdx),%rax
+. 5058 0x12345678 5
+. 48 63 42 08 C3
+
+movslq 8(%rsi),%rax
+. 5059 0x12345678 5
+. 48 63 46 08 C3
+
+movslq %eax,%rdi
+. 5060 0x12345678 4
+. 48 63 F8 C3
+
+movslq %ebp,%r12
+. 5061 0x12345678 4
+. 4C 63 E5 C3
+
+movslq %ebp,%r13
+. 5062 0x12345678 4
+. 4C 63 ED C3
+
+movslq %ebp,%rax
+. 5063 0x12345678 4
+. 48 63 C5 C3
+
+movslq %ebp,%rbx
+. 5064 0x12345678 4
+. 48 63 DD C3
+
+movslq %ebp,%rcx
+. 5065 0x12345678 4
+. 48 63 CD C3
+
+movslq %ebp,%rdi
+. 5066 0x12345678 4
+. 48 63 FD C3
+
+movslq %ebp,%rdx
+. 5067 0x12345678 4
+. 48 63 D5 C3
+
+movslq %ebp,%rsi
+. 5068 0x12345678 4
+. 48 63 F5 C3
+
+movslq %ebx,%r13
+. 5069 0x12345678 4
+. 4C 63 EB C3
+
+movslq %ebx,%rax
+. 5070 0x12345678 4
+. 48 63 C3 C3
+
+movslq %ebx,%rcx
+. 5071 0x12345678 4
+. 48 63 CB C3
+
+movslq %ebx,%rdi
+. 5072 0x12345678 4
+. 48 63 FB C3
+
+movslq %ebx,%rdx
+. 5073 0x12345678 4
+. 48 63 D3 C3
+
+movslq %ecx,%rax
+. 5074 0x12345678 4
+. 48 63 C1 C3
+
+movslq %ecx,%rdi
+. 5075 0x12345678 4
+. 48 63 F9 C3
+
+movslq %ecx,%rdx
+. 5076 0x12345678 4
+. 48 63 D1 C3
+
+movslq %edi,%rax
+. 5077 0x12345678 4
+. 48 63 C7 C3
+
+movslq %edi,%rdi
+. 5078 0x12345678 4
+. 48 63 FF C3
+
+movslq %edx,%rax
+. 5079 0x12345678 4
+. 48 63 C2 C3
+
+movslq %edx,%rcx
+. 5080 0x12345678 4
+. 48 63 CA C3
+
+movslq %edx,%rdi
+. 5081 0x12345678 4
+. 48 63 FA C3
+
+movslq %edx,%rdx
+. 5082 0x12345678 4
+. 48 63 D2 C3
+
+movslq %edx,%rsi
+. 5083 0x12345678 4
+. 48 63 F2 C3
+
+movslq %esi,%rax
+. 5084 0x12345678 4
+. 48 63 C6 C3
+
+movslq %esi,%rcx
+. 5085 0x12345678 4
+. 48 63 CE C3
+
+movslq %esi,%rdi
+. 5086 0x12345678 4
+. 48 63 FE C3
+
+movslq %esi,%rdx
+. 5087 0x12345678 4
+. 48 63 D6 C3
+
+movslq %r12d,%r8
+. 5088 0x12345678 4
+. 4D 63 C4 C3
+
+movslq %r12d,%rax
+. 5089 0x12345678 4
+. 49 63 C4 C3
+
+movslq %r12d,%rbp
+. 5090 0x12345678 4
+. 49 63 EC C3
+
+movslq %r12d,%rbx
+. 5091 0x12345678 4
+. 49 63 DC C3
+
+movslq %r12d,%rcx
+. 5092 0x12345678 4
+. 49 63 CC C3
+
+movslq %r12d,%rdx
+. 5093 0x12345678 4
+. 49 63 D4 C3
+
+movslq %r12d,%rsi
+. 5094 0x12345678 4
+. 49 63 F4 C3
+
+movslq %r13d,%rax
+. 5095 0x12345678 4
+. 49 63 C5 C3
+
+movslq %r13d,%rbx
+. 5096 0x12345678 4
+. 49 63 DD C3
+
+movslq %r13d,%rdx
+. 5097 0x12345678 4
+. 49 63 D5 C3
+
+movslq %r14d,%rax
+. 5098 0x12345678 4
+. 49 63 C6 C3
+
+movslq %r14d,%rbp
+. 5099 0x12345678 4
+. 49 63 EE C3
+
+movslq %r14d,%rcx
+. 5100 0x12345678 4
+. 49 63 CE C3
+
+movslq %r14d,%rdx
+. 5101 0x12345678 4
+. 49 63 D6 C3
+
+movslq %r15d,%rax
+. 5102 0x12345678 4
+. 49 63 C7 C3
+
+movslq %r15d,%rbx
+. 5103 0x12345678 4
+. 49 63 DF C3
+
+movslq %r15d,%rdx
+. 5104 0x12345678 4
+. 49 63 D7 C3
+
+movslq %r8d,%rax
+. 5105 0x12345678 4
+. 49 63 C0 C3
+
+movslq %r8d,%rbx
+. 5106 0x12345678 4
+. 49 63 D8 C3
+
+movslq %r8d,%rcx
+. 5107 0x12345678 4
+. 49 63 C8 C3
+
+movslq %r8d,%rdx
+. 5108 0x12345678 4
+. 49 63 D0 C3
+
+movslq %r9d,%r8
+. 5109 0x12345678 4
+. 4D 63 C1 C3
+
+movslq %r9d,%rcx
+. 5110 0x12345678 4
+. 49 63 C9 C3
+
+movslq %r9d,%rdx
+. 5111 0x12345678 4
+. 49 63 D1 C3
+
+movslq (%rdx),%r8
+. 5112 0x12345678 4
+. 4C 63 02 C3
+
+movswl 2(%rcx,%rax,4),%eax
+. 5113 0x12345678 6
+. 0F BF 44 81 02 C3
+
+movswl 2(%rcx,%rdx),%eax
+. 5114 0x12345678 6
+. 0F BF 44 11 02 C3
+
+movswl 2(%rdi,%rsi),%eax
+. 5115 0x12345678 6
+. 0F BF 44 37 02 C3
+
+movswl 2(%rdx,%rax,4),%eax
+. 5116 0x12345678 6
+. 0F BF 44 82 02 C3
+
+movswl 2(%rsi,%rdx,4),%edx
+. 5117 0x12345678 6
+. 0F BF 54 96 02 C3
+
+movswl 4(%rax,%rdx),%esi
+. 5118 0x12345678 6
+. 0F BF 74 10 04 C3
+
+movswl 4(%rcx,%rdx,8),%eax
+. 5119 0x12345678 6
+. 0F BF 44 D1 04 C3
+
+movswl 4(%rcx,%rsi,8),%eax
+. 5120 0x12345678 6
+. 0F BF 44 F1 04 C3
+
+movswl 4(%rdi,%rax),%esi
+. 5121 0x12345678 6
+. 0F BF 74 07 04 C3
+
+movswl 4(%rdx,%rax),%esi
+. 5122 0x12345678 6
+. 0F BF 74 02 04 C3
+
+movswl 6(%rcx,%rdx,8),%eax
+. 5123 0x12345678 6
+. 0F BF 44 D1 06 C3
+
+movswl 6(%rcx,%rsi,8),%eax
+. 5124 0x12345678 6
+. 0F BF 44 F1 06 C3
+
+movswl %cx,%ecx
+. 5125 0x12345678 4
+. 0F BF C9 C3
+
+movswl %di,%eax
+. 5126 0x12345678 4
+. 0F BF C7 C3
+
+movswl %di,%edi
+. 5127 0x12345678 4
+. 0F BF FF C3
+
+movswl %dx,%edx
+. 5128 0x12345678 4
+. 0F BF D2 C3
+
+movswl %r8w,%r8d
+. 5129 0x12345678 5
+. 45 0F BF C0 C3
+
+movswl (%rdx,%rax,4),%eax
+. 5130 0x12345678 5
+. 0F BF 04 82 C3
+
+movswl %si,%esi
+. 5131 0x12345678 4
+. 0F BF F6 C3
+
+movswq %ax,%rax
+. 5132 0x12345678 5
+. 48 0F BF C0 C3
+
+movw $0, 22(%rbx,%rax,2)
+. 5133 0x12345678 8
+. 66 C7 44 43 16 00 00 C3
+
+movw $0, 22(%rcx,%rdx,2)
+. 5134 0x12345678 8
+. 66 C7 44 51 16 00 00 C3
+
+movw $0, 24(%r14)
+. 5135 0x12345678 8
+. 66 41 C7 46 18 00 00 C3
+
+movw $0, 24(%rax)
+. 5136 0x12345678 7
+. 66 C7 40 18 00 00 C3
+
+movw $0, 4(%rdx,%rax)
+. 5137 0x12345678 8
+. 66 C7 44 02 04 00 00 C3
+
+movw $0, 6(%rdx,%rax)
+. 5138 0x12345678 8
+. 66 C7 44 02 06 00 00 C3
+
+movw $1, 30(%r14)
+. 5139 0x12345678 8
+. 66 41 C7 46 1E 01 00 C3
+
+movw $1, 30(%rax)
+. 5140 0x12345678 7
+. 66 C7 40 1E 01 00 C3
+
+movw $-2, 2(%rdx,%rax)
+. 5141 0x12345678 8
+. 66 C7 44 02 02 FE FF C3
+
+movw $-2, (%rdx,%rax)
+. 5142 0x12345678 7
+. 66 C7 04 02 FE FF C3
+
+movw %ax, 30(%rbx)
+. 5143 0x12345678 5
+. 66 89 43 1E C3
+
+movw %ax, 30(%rcx)
+. 5144 0x12345678 5
+. 66 89 41 1E C3
+
+movw %ax, 4(%rcx,%rdx,4)
+. 5145 0x12345678 6
+. 66 89 44 91 04 C3
+
+movw %ax, 4(%rcx,%rdx,8)
+. 5146 0x12345678 6
+. 66 89 44 D1 04 C3
+
+movw %ax, 6(%rcx,%rdx,8)
+. 5147 0x12345678 6
+. 66 89 44 D1 06 C3
+
+movw %bx, 4(%rcx,%rax,8)
+. 5148 0x12345678 6
+. 66 89 5C C1 04 C3
+
+movw %bx, 8(%rax)
+. 5149 0x12345678 5
+. 66 89 58 08 C3
+
+movw %cx, (%rdx,%rax)
+. 5150 0x12345678 5
+. 66 89 0C 02 C3
+
+movw %r13w, 2(%rbx,%rax)
+. 5151 0x12345678 7
+. 66 44 89 6C 03 02 C3
+
+movw %r13w, 2(%rdi,%rax)
+. 5152 0x12345678 7
+. 66 44 89 6C 07 02 C3
+
+movw %r13w, 6(%rcx,%rax,8)
+. 5153 0x12345678 7
+. 66 44 89 6C C1 06 C3
+
+movzbl 106(%rsp), %edi
+. 5154 0x12345678 6
+. 0F B6 7C 24 6A C3
+
+movzbl 106(%rsp), %r8d
+. 5155 0x12345678 7
+. 44 0F B6 44 24 6A C3
+
+movzbl 112(%rsp), %eax
+. 5156 0x12345678 6
+. 0F B6 44 24 70 C3
+
+movzbl 112(%rsp), %edx
+. 5157 0x12345678 6
+. 0F B6 54 24 70 C3
+
+movzbl 11(%rsp), %ebx
+. 5158 0x12345678 6
+. 0F B6 5C 24 0B C3
+
+movzbl 11(%rsp), %edi
+. 5159 0x12345678 6
+. 0F B6 7C 24 0B C3
+
+movzbl 11(%rsp), %edx
+. 5160 0x12345678 6
+. 0F B6 54 24 0B C3
+
+movzbl 11(%rsp), %r12d
+. 5161 0x12345678 7
+. 44 0F B6 64 24 0B C3
+
+movzbl 127(%rsp), %eax
+. 5162 0x12345678 6
+. 0F B6 44 24 7F C3
+
+movzbl 127(%rsp), %edi
+. 5163 0x12345678 6
+. 0F B6 7C 24 7F C3
+
+movzbl 128(%rsp,%rax), %eax
+. 5164 0x12345678 9
+. 0F B6 84 04 80 00 00 00 C3
+
+movzbl 128(%rsp,%rdx), %edx
+. 5165 0x12345678 9
+. 0F B6 94 14 80 00 00 00 C3
+
+movzbl 139(%rsp), %edi
+. 5166 0x12345678 9
+. 0F B6 BC 24 8B 00 00 00 C3
+
+movzbl 139(%rsp), %r8d
+. 5167 0x12345678 10
+. 44 0F B6 84 24 8B 00 00 00 C3
+
+movzbl 140(%rdi), %eax
+. 5168 0x12345678 8
+. 0F B6 87 8C 00 00 00 C3
+
+movzbl 148(%rdi), %edx
+. 5169 0x12345678 8
+. 0F B6 97 94 00 00 00 C3
+
+movzbl 15(%rsp), %ebx
+. 5170 0x12345678 6
+. 0F B6 5C 24 0F C3
+
+movzbl 15(%rsp), %edi
+. 5171 0x12345678 6
+. 0F B6 7C 24 0F C3
+
+movzbl 15(%rsp), %edx
+. 5172 0x12345678 6
+. 0F B6 54 24 0F C3
+
+movzbl 15(%rsp), %r9d
+. 5173 0x12345678 7
+. 44 0F B6 4C 24 0F C3
+
+movzbl 16(%rbx), %eax
+. 5174 0x12345678 5
+. 0F B6 43 10 C3
+
+movzbl 16(%rsp), %eax
+. 5175 0x12345678 6
+. 0F B6 44 24 10 C3
+
+movzbl 16(%rsp), %edx
+. 5176 0x12345678 6
+. 0F B6 54 24 10 C3
+
+movzbl 19(%rsp), %ebp
+. 5177 0x12345678 6
+. 0F B6 6C 24 13 C3
+
+movzbl 19(%rsp), %ebx
+. 5178 0x12345678 6
+. 0F B6 5C 24 13 C3
+
+movzbl 19(%rsp), %edi
+. 5179 0x12345678 6
+. 0F B6 7C 24 13 C3
+
+movzbl 19(%rsp), %edx
+. 5180 0x12345678 6
+. 0F B6 54 24 13 C3
+
+movzbl 1(%r12), %eax
+. 5181 0x12345678 7
+. 41 0F B6 44 24 01 C3
+
+movzbl 1(%r12), %ecx
+. 5182 0x12345678 7
+. 41 0F B6 4C 24 01 C3
+
+movzbl 1(%r12), %edx
+. 5183 0x12345678 7
+. 41 0F B6 54 24 01 C3
+
+movzbl 1(%rdi,%rcx), %edx
+. 5184 0x12345678 6
+. 0F B6 54 0F 01 C3
+
+movzbl 1(%rdx), %edx
+. 5185 0x12345678 5
+. 0F B6 52 01 C3
+
+movzbl 1(%rsi,%rdx), %eax
+. 5186 0x12345678 6
+. 0F B6 44 16 01 C3
+
+movzbl 200(%rsp), %eax
+. 5187 0x12345678 9
+. 0F B6 84 24 C8 00 00 00 C3
+
+movzbl 22(%rsp), %edi
+. 5188 0x12345678 6
+. 0F B6 7C 24 16 C3
+
+movzbl 22(%rsp), %edx
+. 5189 0x12345678 6
+. 0F B6 54 24 16 C3
+
+movzbl 22(%rsp), %r8d
+. 5190 0x12345678 7
+. 44 0F B6 44 24 16 C3
+
+movzbl 23(%rsp), %ecx
+. 5191 0x12345678 6
+. 0F B6 4C 24 17 C3
+
+movzbl 23(%rsp), %edi
+. 5192 0x12345678 6
+. 0F B6 7C 24 17 C3
+
+movzbl 23(%rsp), %esi
+. 5193 0x12345678 6
+. 0F B6 74 24 17 C3
+
+movzbl 25(%rsp), %ecx
+. 5194 0x12345678 6
+. 0F B6 4C 24 19 C3
+
+movzbl 26(%rsp), %ebx
+. 5195 0x12345678 6
+. 0F B6 5C 24 1A C3
+
+movzbl 26(%rsp), %edi
+. 5196 0x12345678 6
+. 0F B6 7C 24 1A C3
+
+movzbl 270(%rsp), %ecx
+. 5197 0x12345678 9
+. 0F B6 8C 24 0E 01 00 00 C3
+
+movzbl 270(%rsp), %edi
+. 5198 0x12345678 9
+. 0F B6 BC 24 0E 01 00 00 C3
+
+movzbl 270(%rsp), %esi
+. 5199 0x12345678 9
+. 0F B6 B4 24 0E 01 00 00 C3
+
+movzbl 270(%rsp), %r12d
+. 5200 0x12345678 10
+. 44 0F B6 A4 24 0E 01 00 00 C3
+
+movzbl 271(%rsp), %ebx
+. 5201 0x12345678 9
+. 0F B6 9C 24 0F 01 00 00 C3
+
+movzbl 271(%rsp), %edi
+. 5202 0x12345678 9
+. 0F B6 BC 24 0F 01 00 00 C3
+
+movzbl 271(%rsp), %edx
+. 5203 0x12345678 9
+. 0F B6 94 24 0F 01 00 00 C3
+
+movzbl 271(%rsp), %r12d
+. 5204 0x12345678 10
+. 44 0F B6 A4 24 0F 01 00 00 C3
+
+movzbl 271(%rsp), %r14d
+. 5205 0x12345678 10
+. 44 0F B6 B4 24 0F 01 00 00 C3
+
+movzbl 27(%rsp), %ebx
+. 5206 0x12345678 6
+. 0F B6 5C 24 1B C3
+
+movzbl 27(%rsp), %r13d
+. 5207 0x12345678 7
+. 44 0F B6 6C 24 1B C3
+
+movzbl 280(%rsp), %eax
+. 5208 0x12345678 9
+. 0F B6 84 24 18 01 00 00 C3
+
+movzbl 2(%r12), %eax
+. 5209 0x12345678 7
+. 41 0F B6 44 24 02 C3
+
+movzbl 2(%r12), %ebx
+. 5210 0x12345678 7
+. 41 0F B6 5C 24 02 C3
+
+movzbl 2(%r12), %ecx
+. 5211 0x12345678 7
+. 41 0F B6 4C 24 02 C3
+
+movzbl 2(%r12), %edi
+. 5212 0x12345678 7
+. 41 0F B6 7C 24 02 C3
+
+movzbl 2(%r12), %edx
+. 5213 0x12345678 7
+. 41 0F B6 54 24 02 C3
+
+movzbl 2(%r12), %r12d
+. 5214 0x12345678 7
+. 45 0F B6 64 24 02 C3
+
+movzbl 2(%r12), %r13d
+. 5215 0x12345678 7
+. 45 0F B6 6C 24 02 C3
+
+movzbl 2(%r12), %r15d
+. 5216 0x12345678 7
+. 45 0F B6 7C 24 02 C3
+
+movzbl 2(%rax,%r12), %r12d
+. 5217 0x12345678 7
+. 46 0F B6 64 20 02 C3
+
+movzbl 2(%rdi,%rcx), %edx
+. 5218 0x12345678 6
+. 0F B6 54 0F 02 C3
+
+movzbl 30(%rsp), %ecx
+. 5219 0x12345678 6
+. 0F B6 4C 24 1E C3
+
+movzbl 30(%rsp), %edi
+. 5220 0x12345678 6
+. 0F B6 7C 24 1E C3
+
+movzbl 30(%rsp), %edx
+. 5221 0x12345678 6
+. 0F B6 54 24 1E C3
+
+movzbl 31(%rsp), %ebx
+. 5222 0x12345678 6
+. 0F B6 5C 24 1F C3
+
+movzbl 31(%rsp), %ecx
+. 5223 0x12345678 6
+. 0F B6 4C 24 1F C3
+
+movzbl 31(%rsp), %edi
+. 5224 0x12345678 6
+. 0F B6 7C 24 1F C3
+
+movzbl 31(%rsp), %r12d
+. 5225 0x12345678 7
+. 44 0F B6 64 24 1F C3
+
+movzbl 31(%rsp), %r15d
+. 5226 0x12345678 7
+. 44 0F B6 7C 24 1F C3
+
+movzbl 32(%rsp), %ecx
+. 5227 0x12345678 6
+. 0F B6 4C 24 20 C3
+
+movzbl 35(%rsp), %ebx
+. 5228 0x12345678 6
+. 0F B6 5C 24 23 C3
+
+movzbl 35(%rsp), %edi
+. 5229 0x12345678 6
+. 0F B6 7C 24 23 C3
+
+movzbl 35(%rsp), %r12d
+. 5230 0x12345678 7
+. 44 0F B6 64 24 23 C3
+
+movzbl 38(%rsp), %ecx
+. 5231 0x12345678 6
+. 0F B6 4C 24 26 C3
+
+movzbl 38(%rsp), %edi
+. 5232 0x12345678 6
+. 0F B6 7C 24 26 C3
+
+movzbl 38(%rsp), %edx
+. 5233 0x12345678 6
+. 0F B6 54 24 26 C3
+
+movzbl 38(%rsp), %r8d
+. 5234 0x12345678 7
+. 44 0F B6 44 24 26 C3
+
+movzbl 39(%rsp), %eax
+. 5235 0x12345678 6
+. 0F B6 44 24 27 C3
+
+movzbl 39(%rsp), %ebp
+. 5236 0x12345678 6
+. 0F B6 6C 24 27 C3
+
+movzbl 39(%rsp), %ebx
+. 5237 0x12345678 6
+. 0F B6 5C 24 27 C3
+
+movzbl 39(%rsp), %ecx
+. 5238 0x12345678 6
+. 0F B6 4C 24 27 C3
+
+movzbl 39(%rsp), %edi
+. 5239 0x12345678 6
+. 0F B6 7C 24 27 C3
+
+movzbl 39(%rsp), %edx
+. 5240 0x12345678 6
+. 0F B6 54 24 27 C3
+
+movzbl 39(%rsp), %esi
+. 5241 0x12345678 6
+. 0F B6 74 24 27 C3
+
+movzbl 39(%rsp), %r13d
+. 5242 0x12345678 7
+. 44 0F B6 6C 24 27 C3
+
+movzbl 3(%r12), %eax
+. 5243 0x12345678 7
+. 41 0F B6 44 24 03 C3
+
+movzbl 3(%r12), %ebx
+. 5244 0x12345678 7
+. 41 0F B6 5C 24 03 C3
+
+movzbl 3(%r12), %esi
+. 5245 0x12345678 7
+. 41 0F B6 74 24 03 C3
+
+movzbl 3(%r12), %r12d
+. 5246 0x12345678 7
+. 45 0F B6 64 24 03 C3
+
+movzbl 3(%r12), %r14d
+. 5247 0x12345678 7
+. 45 0F B6 74 24 03 C3
+
+movzbl 3(%rax,%r12), %r12d
+. 5248 0x12345678 7
+. 46 0F B6 64 20 03 C3
+
+movzbl 3(%rdi,%rcx), %eax
+. 5249 0x12345678 6
+. 0F B6 44 0F 03 C3
+
+movzbl 42(%rsp), %edx
+. 5250 0x12345678 6
+. 0F B6 54 24 2A C3
+
+movzbl 43(%rsp), %eax
+. 5251 0x12345678 6
+. 0F B6 44 24 2B C3
+
+movzbl 43(%rsp), %ebx
+. 5252 0x12345678 6
+. 0F B6 5C 24 2B C3
+
+movzbl 43(%rsp), %edi
+. 5253 0x12345678 6
+. 0F B6 7C 24 2B C3
+
+movzbl 43(%rsp), %esi
+. 5254 0x12345678 6
+. 0F B6 74 24 2B C3
+
+movzbl 43(%rsp), %r12d
+. 5255 0x12345678 7
+. 44 0F B6 64 24 2B C3
+
+movzbl 44(%rbp), %eax
+. 5256 0x12345678 5
+. 0F B6 45 2C C3
+
+movzbl 46(%rsp), %eax
+. 5257 0x12345678 6
+. 0F B6 44 24 2E C3
+
+movzbl 46(%rsp), %esi
+. 5258 0x12345678 6
+. 0F B6 74 24 2E C3
+
+movzbl 47(%rsp), %eax
+. 5259 0x12345678 6
+. 0F B6 44 24 2F C3
+
+movzbl 47(%rsp), %ebx
+. 5260 0x12345678 6
+. 0F B6 5C 24 2F C3
+
+movzbl 47(%rsp), %edi
+. 5261 0x12345678 6
+. 0F B6 7C 24 2F C3
+
+movzbl 47(%rsp), %r12d
+. 5262 0x12345678 7
+. 44 0F B6 64 24 2F C3
+
+movzbl 47(%rsp), %r13d
+. 5263 0x12345678 7
+. 44 0F B6 6C 24 2F C3
+
+movzbl 48(%rsp), %eax
+. 5264 0x12345678 6
+. 0F B6 44 24 30 C3
+
+movzbl 48(%rsp), %ebx
+. 5265 0x12345678 6
+. 0F B6 5C 24 30 C3
+
+movzbl 48(%rsp), %edx
+. 5266 0x12345678 6
+. 0F B6 54 24 30 C3
+
+movzbl 48(%rsp), %esi
+. 5267 0x12345678 6
+. 0F B6 74 24 30 C3
+
+movzbl 4(%r12), %r12d
+. 5268 0x12345678 7
+. 45 0F B6 64 24 04 C3
+
+movzbl 55(%rsp), %ebx
+. 5269 0x12345678 6
+. 0F B6 5C 24 37 C3
+
+movzbl 55(%rsp), %edi
+. 5270 0x12345678 6
+. 0F B6 7C 24 37 C3
+
+movzbl 55(%rsp), %r12d
+. 5271 0x12345678 7
+. 44 0F B6 64 24 37 C3
+
+movzbl 59(%rsp), %eax
+. 5272 0x12345678 6
+. 0F B6 44 24 3B C3
+
+movzbl 59(%rsp), %edi
+. 5273 0x12345678 6
+. 0F B6 7C 24 3B C3
+
+movzbl 59(%rsp), %esi
+. 5274 0x12345678 6
+. 0F B6 74 24 3B C3
+
+movzbl 60(%rsp), %ebp
+. 5275 0x12345678 6
+. 0F B6 6C 24 3C C3
+
+movzbl 61(%rsp), %edi
+. 5276 0x12345678 6
+. 0F B6 7C 24 3D C3
+
+movzbl 61(%rsp), %edx
+. 5277 0x12345678 6
+. 0F B6 54 24 3D C3
+
+movzbl 62(%rsp), %r8d
+. 5278 0x12345678 7
+. 44 0F B6 44 24 3E C3
+
+movzbl 64(%rsp), %ecx
+. 5279 0x12345678 6
+. 0F B6 4C 24 40 C3
+
+movzbl 67(%rsp), %ebx
+. 5280 0x12345678 6
+. 0F B6 5C 24 43 C3
+
+movzbl 67(%rsp), %edi
+. 5281 0x12345678 6
+. 0F B6 7C 24 43 C3
+
+movzbl 75(%rsp), %esi
+. 5282 0x12345678 6
+. 0F B6 74 24 4B C3
+
+movzbl 79(%rsp), %ecx
+. 5283 0x12345678 6
+. 0F B6 4C 24 4F C3
+
+movzbl 79(%rsp), %edi
+. 5284 0x12345678 6
+. 0F B6 7C 24 4F C3
+
+movzbl 79(%rsp), %edx
+. 5285 0x12345678 6
+. 0F B6 54 24 4F C3
+
+movzbl 79(%rsp), %r8d
+. 5286 0x12345678 7
+. 44 0F B6 44 24 4F C3
+
+movzbl 7(%rsp), %ebx
+. 5287 0x12345678 6
+. 0F B6 5C 24 07 C3
+
+movzbl 7(%rsp), %ecx
+. 5288 0x12345678 6
+. 0F B6 4C 24 07 C3
+
+movzbl 7(%rsp), %edi
+. 5289 0x12345678 6
+. 0F B6 7C 24 07 C3
+
+movzbl 7(%rsp), %edx
+. 5290 0x12345678 6
+. 0F B6 54 24 07 C3
+
+movzbl 7(%rsp), %r12d
+. 5291 0x12345678 7
+. 44 0F B6 64 24 07 C3
+
+movzbl 87(%rsp), %ebp
+. 5292 0x12345678 6
+. 0F B6 6C 24 57 C3
+
+movzbl 87(%rsp), %ebx
+. 5293 0x12345678 6
+. 0F B6 5C 24 57 C3
+
+movzbl 8(%rax), %eax
+. 5294 0x12345678 5
+. 0F B6 40 08 C3
+
+movzbl 8(%rax), %ecx
+. 5295 0x12345678 5
+. 0F B6 48 08 C3
+
+movzbl 8(%rax), %edi
+. 5296 0x12345678 5
+. 0F B6 78 08 C3
+
+movzbl 8(%rdi), %eax
+. 5297 0x12345678 5
+. 0F B6 47 08 C3
+
+movzbl 8(%rdi), %edi
+. 5298 0x12345678 5
+. 0F B6 7F 08 C3
+
+movzbl 8(%rdi), %esi
+. 5299 0x12345678 5
+. 0F B6 77 08 C3
+
+movzbl 8(%rdx), %ecx
+. 5300 0x12345678 5
+. 0F B6 4A 08 C3
+
+movzbl 8(%rsi), %eax
+. 5301 0x12345678 5
+. 0F B6 46 08 C3
+
+movzbl 8(%rsi), %edx
+. 5302 0x12345678 5
+. 0F B6 56 08 C3
+
+movzbl 95(%rsp), %edx
+. 5303 0x12345678 6
+. 0F B6 54 24 5F C3
+
+movzbl 96(%rsp), %eax
+. 5304 0x12345678 6
+. 0F B6 44 24 60 C3
+
+movzbl 96(%rsp), %ecx
+. 5305 0x12345678 6
+. 0F B6 4C 24 60 C3
+
+movzbl %ah, %ebx
+. 5306 0x12345678 4
+. 0F B6 DC C3
+
+movzbl %ah, %edx
+. 5307 0x12345678 4
+. 0F B6 D4 C3
+
+movzbl %al, %eax
+. 5308 0x12345678 4
+. 0F B6 C0 C3
+
+movzbl %al, %ebp
+. 5309 0x12345678 4
+. 0F B6 E8 C3
+
+movzbl %al, %ebx
+. 5310 0x12345678 4
+. 0F B6 D8 C3
+
+movzbl %al, %ecx
+. 5311 0x12345678 4
+. 0F B6 C8 C3
+
+movzbl %al, %edi
+. 5312 0x12345678 4
+. 0F B6 F8 C3
+
+movzbl %al, %edi
+. 5313 0x12345678 4
+. 0F B6 F8 C3
+
+movzbl %al,%edi
+. 5314 0x12345678 4
+. 0F B6 F8 C3
+
+movzbl %al, %edx
+. 5315 0x12345678 4
+. 0F B6 D0 C3
+
+movzbl %al, %edx
+. 5316 0x12345678 4
+. 0F B6 D0 C3
+
+movzbl %al, %esi
+. 5317 0x12345678 4
+. 0F B6 F0 C3
+
+movzbl %al, %r12d
+. 5318 0x12345678 5
+. 44 0F B6 E0 C3
+
+movzbl %al, %r13d
+. 5319 0x12345678 5
+. 44 0F B6 E8 C3
+
+movzbl %al, %r14d
+. 5320 0x12345678 5
+. 44 0F B6 F0 C3
+
+movzbl %al, %r15d
+. 5321 0x12345678 5
+. 44 0F B6 F8 C3
+
+movzbl %bh, %ebp
+. 5322 0x12345678 4
+. 0F B6 EF C3
+
+movzbl %bh, %ecx
+. 5323 0x12345678 4
+. 0F B6 CF C3
+
+movzbl %bh, %edi
+. 5324 0x12345678 4
+. 0F B6 FF C3
+
+movzbl %bh, %edx
+. 5325 0x12345678 4
+. 0F B6 D7 C3
+
+movzbl %bl, %eax
+. 5326 0x12345678 4
+. 0F B6 C3 C3
+
+movzbl %bl, %ebx
+. 5327 0x12345678 4
+. 0F B6 DB C3
+
+movzbl %bl, %ecx
+. 5328 0x12345678 4
+. 0F B6 CB C3
+
+movzbl %bl, %edi
+. 5329 0x12345678 4
+. 0F B6 FB C3
+
+movzbl %bl, %edx
+. 5330 0x12345678 4
+. 0F B6 D3 C3
+
+movzbl %bl, %esi
+. 5331 0x12345678 4
+. 0F B6 F3 C3
+
+movzbl %bpl, %eax
+. 5332 0x12345678 5
+. 40 0F B6 C5 C3
+
+movzbl %bpl, %ebp
+. 5333 0x12345678 5
+. 40 0F B6 ED C3
+
+movzbl %bpl, %ecx
+. 5334 0x12345678 5
+. 40 0F B6 CD C3
+
+movzbl %bpl, %edi
+. 5335 0x12345678 5
+. 40 0F B6 FD C3
+
+movzbl %bpl, %edx
+. 5336 0x12345678 5
+. 40 0F B6 D5 C3
+
+movzbl %bpl, %esi
+. 5337 0x12345678 5
+. 40 0F B6 F5 C3
+
+movzbl %bpl, %r9d
+. 5338 0x12345678 5
+. 44 0F B6 CD C3
+
+movzbl %ch, %ebx
+. 5339 0x12345678 4
+. 0F B6 DD C3
+
+movzbl %ch, %edi
+. 5340 0x12345678 4
+. 0F B6 FD C3
+
+movzbl %ch, %edx
+. 5341 0x12345678 4
+. 0F B6 D5 C3
+
+movzbl %cl, %eax
+. 5342 0x12345678 4
+. 0F B6 C1 C3
+
+movzbl %cl, %ebx
+. 5343 0x12345678 4
+. 0F B6 D9 C3
+
+movzbl %cl, %ecx
+. 5344 0x12345678 4
+. 0F B6 C9 C3
+
+movzbl %cl, %edx
+. 5345 0x12345678 4
+. 0F B6 D1 C3
+
+movzbl %cl, %esi
+. 5346 0x12345678 4
+. 0F B6 F1 C3
+
+movzbl %dh, %ebp
+. 5347 0x12345678 4
+. 0F B6 EE C3
+
+movzbl %dh, %edi
+. 5348 0x12345678 4
+. 0F B6 FE C3
+
+movzbl %dil, %eax
+. 5349 0x12345678 5
+. 40 0F B6 C7 C3
+
+movzbl %dil,%eax
+. 5350 0x12345678 5
+. 40 0F B6 C7 C3
+
+movzbl %dil, %edi
+. 5351 0x12345678 5
+. 40 0F B6 FF C3
+
+movzbl %dil, %edx
+. 5352 0x12345678 5
+. 40 0F B6 D7 C3
+
+movzbl %dl, %eax
+. 5353 0x12345678 4
+. 0F B6 C2 C3
+
+movzbl %dl, %ebx
+. 5354 0x12345678 4
+. 0F B6 DA C3
+
+movzbl %dl, %edi
+. 5355 0x12345678 4
+. 0F B6 FA C3
+
+movzbl %dl, %edx
+. 5356 0x12345678 4
+. 0F B6 D2 C3
+
+movzbl 14(%rip), %r8d
+. 5357 0x12345678 9
+. 44 0F B6 05 0E 00 00 00 C3
+
+movzbl 15(%rdx), %eax
+. 5358 0x12345678 5
+. 0F B6 42 0F C3
+
+movzbl 17(%rip), %eax
+. 5359 0x12345678 8
+. 0F B6 05 11 00 00 00 C3
+
+movzbl %r10b, %edx
+. 5360 0x12345678 5
+. 41 0F B6 D2 C3
+
+movzbl %r10b, %r10d
+. 5361 0x12345678 5
+. 45 0F B6 D2 C3
+
+movzbl %r11b, %edx
+. 5362 0x12345678 5
+. 41 0F B6 D3 C3
+
+movzbl %r11b, %r11d
+. 5363 0x12345678 5
+. 45 0F B6 DB C3
+
+movzbl %r12b, %eax
+. 5364 0x12345678 5
+. 41 0F B6 C4 C3
+
+movzbl %r12b, %ebx
+. 5365 0x12345678 5
+. 41 0F B6 DC C3
+
+movzbl %r12b, %edi
+. 5366 0x12345678 5
+. 41 0F B6 FC C3
+
+movzbl %r12b, %edx
+. 5367 0x12345678 5
+. 41 0F B6 D4 C3
+
+movzbl %r12b, %esi
+. 5368 0x12345678 5
+. 41 0F B6 F4 C3
+
+movzbl %r12b, %r13d
+. 5369 0x12345678 5
+. 45 0F B6 EC C3
+
+movzbl (%r12), %ebx
+. 5370 0x12345678 6
+. 41 0F B6 1C 24 C3
+
+movzbl %r13b, %eax
+. 5371 0x12345678 5
+. 41 0F B6 C5 C3
+
+movzbl %r13b, %ebx
+. 5372 0x12345678 5
+. 41 0F B6 DD C3
+
+movzbl %r13b, %edi
+. 5373 0x12345678 5
+. 41 0F B6 FD C3
+
+movzbl %r13b, %edx
+. 5374 0x12345678 5
+. 41 0F B6 D5 C3
+
+movzbl %r13b, %esi
+. 5375 0x12345678 5
+. 41 0F B6 F5 C3
+
+movzbl %r13b,%r12d
+. 5376 0x12345678 5
+. 45 0F B6 E5 C3
+
+movzbl %r14b, %eax
+. 5377 0x12345678 5
+. 41 0F B6 C6 C3
+
+movzbl %r14b, %ebp
+. 5378 0x12345678 5
+. 41 0F B6 EE C3
+
+movzbl %r14b, %ebx
+. 5379 0x12345678 5
+. 41 0F B6 DE C3
+
+movzbl %r14b, %edi
+. 5380 0x12345678 5
+. 41 0F B6 FE C3
+
+movzbl %r14b, %esi
+. 5381 0x12345678 5
+. 41 0F B6 F6 C3
+
+movzbl %r14b, %r14d
+. 5382 0x12345678 5
+. 45 0F B6 F6 C3
+
+movzbl %r15b, %ebx
+. 5383 0x12345678 5
+. 41 0F B6 DF C3
+
+movzbl %r15b, %ecx
+. 5384 0x12345678 5
+. 41 0F B6 CF C3
+
+movzbl %r15b, %edi
+. 5385 0x12345678 5
+. 41 0F B6 FF C3
+
+movzbl %r15b, %edx
+. 5386 0x12345678 5
+. 41 0F B6 D7 C3
+
+movzbl %r15b, %esi
+. 5387 0x12345678 5
+. 41 0F B6 F7 C3
+
+movzbl %r15b, %r12d
+. 5388 0x12345678 5
+. 45 0F B6 E7 C3
+
+movzbl %r8b, %r8d
+. 5389 0x12345678 5
+. 45 0F B6 C0 C3
+
+movzbl %r9b, %edx
+. 5390 0x12345678 5
+. 41 0F B6 D1 C3
+
+movzbl %r9b, %r9d
+. 5391 0x12345678 5
+. 45 0F B6 C9 C3
+
+movzbl (%rax,%rcx), %eax
+. 5392 0x12345678 5
+. 0F B6 04 08 C3
+
+movzbl (%rax,%rdx), %eax
+. 5393 0x12345678 5
+. 0F B6 04 10 C3
+
+movzbl (%rax,%rdx), %esi
+. 5394 0x12345678 5
+. 0F B6 34 10 C3
+
+movzbl (%rcx,%r12), %eax
+. 5395 0x12345678 6
+. 42 0F B6 04 21 C3
+
+movzbl (%rcx,%r12), %edx
+. 5396 0x12345678 6
+. 42 0F B6 14 21 C3
+
+movzbl (%rdi), %ecx
+. 5397 0x12345678 4
+. 0F B6 0F C3
+
+movzbl (%rdi,%rax), %eax
+. 5398 0x12345678 5
+. 0F B6 04 07 C3
+
+movzbl (%rdi,%rcx), %edx
+. 5399 0x12345678 5
+. 0F B6 14 0F C3
+
+movzbl (%rdi,%rdx), %edx
+. 5400 0x12345678 5
+. 0F B6 14 17 C3
+
+movzbl (%rdx), %eax
+. 5401 0x12345678 4
+. 0F B6 02 C3
+
+movzbl (%rdx,%rcx), %edx
+. 5402 0x12345678 5
+. 0F B6 14 0A C3
+
+movzbl (%rsi), %edx
+. 5403 0x12345678 4
+. 0F B6 16 C3
+
+movzbl (%rsi,%rdx), %edx
+. 5404 0x12345678 5
+. 0F B6 14 16 C3
+
+movzbl %sil, %edx
+. 5405 0x12345678 5
+. 40 0F B6 D6 C3
+
+movzbl %sil, %esi
+. 5406 0x12345678 5
+. 40 0F B6 F6 C3
+
+movzbl 6776(%rax), %eax
+. 5407 0x12345678 8
+. 0F B6 80 78 1A 00 00 C3
+
+movzbl 78(%rip), %eax
+. 5408 0x12345678 8
+. 0F B6 05 4E 00 00 00 C3
+
+movzbl 79(%rip), %ecx
+. 5409 0x12345678 8
+. 0F B6 0D 4F 00 00 00 C3
+
+movzbl 81(%rip), %esi
+. 5410 0x12345678 8
+. 0F B6 35 51 00 00 00 C3
+
+movzbl 83(%rip), %r8d
+. 5411 0x12345678 9
+. 44 0F B6 05 53 00 00 00 C3
+
+movzbq 32(%rsp), %rdi
+. 5412 0x12345678 7
+. 48 0F B6 7C 24 20 C3
+
+movzbq 64(%rsp), %rdi
+. 5413 0x12345678 7
+. 48 0F B6 7C 24 40 C3
+
+movzbq 96(%rsp), %rdi
+. 5414 0x12345678 7
+. 48 0F B6 7C 24 60 C3
+
+movzwl 24(%r12), %ebp
+. 5415 0x12345678 7
+. 41 0F B7 6C 24 18 C3
+
+movzwl 30(%r12), %ecx
+. 5416 0x12345678 7
+. 41 0F B7 4C 24 1E C3
+
+movzwl 30(%rbx), %eax
+. 5417 0x12345678 5
+. 0F B7 43 1E C3
+
+movzwl 30(%rcx), %eax
+. 5418 0x12345678 5
+. 0F B7 41 1E C3
+
+movzwl 8(%rax), %edi
+. 5419 0x12345678 5
+. 0F B7 78 08 C3
+
+movzwl 8(%rdi), %edi
+. 5420 0x12345678 5
+. 0F B7 7F 08 C3
+
+movzwl 8(%rdi), %esi
+. 5421 0x12345678 5
+. 0F B7 77 08 C3
+
+movzwl 8(%rsi), %eax
+. 5422 0x12345678 5
+. 0F B7 46 08 C3
+
+movzwl %ax, %eax
+. 5423 0x12345678 4
+. 0F B7 C0 C3
+
+movzwl %ax, %edx
+. 5424 0x12345678 4
+. 0F B7 D0 C3
+
+movzwl %ax,%edx
+. 5425 0x12345678 4
+. 0F B7 D0 C3
+
+movzwl %ax,%r14d
+. 5426 0x12345678 5
+. 44 0F B7 F0 C3
+
+movzwl %bp,%eax
+. 5427 0x12345678 4
+. 0F B7 C5 C3
+
+movzwl %cx, %eax
+. 5428 0x12345678 4
+. 0F B7 C1 C3
+
+movzwl %cx, %ecx
+. 5429 0x12345678 4
+. 0F B7 C9 C3
+
+movzwl %di, %edi
+. 5430 0x12345678 4
+. 0F B7 FF C3
+
+movzwl %dx, %edx
+. 5431 0x12345678 4
+. 0F B7 D2 C3
+
+movzwl (%r14,%rbx,4), %eax
+. 5432 0x12345678 6
+. 41 0F B7 04 9E C3
+
+movzwl %r15w,%eax
+. 5433 0x12345678 5
+. 41 0F B7 C7 C3
+
+movzwl %r8w, %ecx
+. 5434 0x12345678 5
+. 41 0F B7 C8 C3
+
+movzwl %r8w, %r8d
+. 5435 0x12345678 5
+. 45 0F B7 C0 C3
+
+movzwl (%rbx,%rax), %edx
+. 5436 0x12345678 5
+. 0F B7 14 03 C3
+
+movzwl (%rdi,%rbx,4), %eax
+. 5437 0x12345678 5
+. 0F B7 04 9F C3
+
+movzwl %si, %edi
+. 5438 0x12345678 4
+. 0F B7 FE C3
+
+movzwl %si, %esi
+. 5439 0x12345678 4
+. 0F B7 F6 C3
+
+movzwq 2(%rax,%rbx,8), %rdi
+. 5440 0x12345678 7
+. 48 0F B7 7C D8 02 C3
+
+movzwq 30(%r14), %rdx
+. 5441 0x12345678 6
+. 49 0F B7 56 1E C3
+
+movzwq 30(%rbx), %rdx
+. 5442 0x12345678 6
+. 48 0F B7 53 1E C3
+
+movzwq (%rax,%rbx,8), %rsi
+. 5443 0x12345678 6
+. 48 0F B7 34 D8 C3
+
+negl %eax
+. 5444 0x12345678 3
+. F7 D8 C3
+
+negl %edi
+. 5445 0x12345678 3
+. F7 DF C3
+
+negq %rsi
+. 5446 0x12345678 4
+. 48 F7 DE C3
+
+notl %eax
+. 5447 0x12345678 3
+. F7 D0 C3
+
+notl %ecx
+. 5448 0x12345678 3
+. F7 D1 C3
+
+notl %edi
+. 5449 0x12345678 3
+. F7 D7 C3
+
+notl %edx
+. 5450 0x12345678 3
+. F7 D2 C3
+
+notl %esi
+. 5451 0x12345678 3
+. F7 D6 C3
+
+notl %r10d
+. 5452 0x12345678 4
+. 41 F7 D2 C3
+
+notl %r8d
+. 5453 0x12345678 4
+. 41 F7 D0 C3
+
+notl %r9d
+. 5454 0x12345678 4
+. 41 F7 D1 C3
+
+notq %rdi
+. 5455 0x12345678 4
+. 48 F7 D7 C3
+
+orb $-128, %r14b
+. 5456 0x12345678 5
+. 41 80 CE 80 C3
+
+orb 46(%rsp), %al
+. 5457 0x12345678 5
+. 0A 44 24 2E C3
+
+orb 8(%rdx), %dil
+. 5458 0x12345678 5
+. 40 0A 7A 08 C3
+
+orb %cl, %al
+. 5459 0x12345678 3
+. 08 C8 C3
+
+orb %cl, %dl
+. 5460 0x12345678 3
+. 08 CA C3
+
+orb %dl, %al
+. 5461 0x12345678 3
+. 08 D0 C3
+
+orb %r12b, %al
+. 5462 0x12345678 4
+. 44 08 E0 C3
+
+orl $1024, %r14d
+. 5463 0x12345678 8
+. 41 81 CE 00 04 00 00 C3
+
+orl $1090519040, %eax
+. 5464 0x12345678 6
+. 0D 00 00 00 41 C3
+
+orl $1090519040, %edx
+. 5465 0x12345678 7
+. 81 CA 00 00 00 41 C3
+
+orl $1, 28(%rsp)
+. 5466 0x12345678 6
+. 83 4C 24 1C 01 C3
+
+orl $16, 28(%rsp)
+. 5467 0x12345678 6
+. 83 4C 24 1C 10 C3
+
+orl $16384, %r14d
+. 5468 0x12345678 8
+. 41 81 CE 00 40 00 00 C3
+
+orl $16, %eax
+. 5469 0x12345678 4
+. 83 C8 10 C3
+
+orl $1, %edx
+. 5470 0x12345678 4
+. 83 CA 01 C3
+
+orl $1, %r14d
+. 5471 0x12345678 5
+. 41 83 CE 01 C3
+
+orl $2048, %r14d
+. 5472 0x12345678 8
+. 41 81 CE 00 08 00 00 C3
+
+orl $2, 28(%rsp)
+. 5473 0x12345678 6
+. 83 4C 24 1C 02 C3
+
+orl $256, %r14d
+. 5474 0x12345678 8
+. 41 81 CE 00 01 00 00 C3
+
+orl $2, %edx
+. 5475 0x12345678 4
+. 83 CA 02 C3
+
+orl $2, %r14d
+. 5476 0x12345678 5
+. 41 83 CE 02 C3
+
+orl $32768, %r14d
+. 5477 0x12345678 8
+. 41 81 CE 00 80 00 00 C3
+
+orl $32, %eax
+. 5478 0x12345678 4
+. 83 C8 20 C3
+
+orl $4096, %r14d
+. 5479 0x12345678 8
+. 41 81 CE 00 10 00 00 C3
+
+orl $4, 28(%rsp)
+. 5480 0x12345678 6
+. 83 4C 24 1C 04 C3
+
+orl $4, %eax
+. 5481 0x12345678 4
+. 83 C8 04 C3
+
+orl $4, %r14d
+. 5482 0x12345678 5
+. 41 83 CE 04 C3
+
+orl -4(%rsp), %eax
+. 5483 0x12345678 5
+. 0B 44 24 FC C3
+
+orl $512, %r14d
+. 5484 0x12345678 8
+. 41 81 CE 00 02 00 00 C3
+
+orl $64, %eax
+. 5485 0x12345678 4
+. 83 C8 40 C3
+
+orl $8192, %r14d
+. 5486 0x12345678 8
+. 41 81 CE 00 20 00 00 C3
+
+orl $8, %eax
+. 5487 0x12345678 4
+. 83 C8 08 C3
+
+orl 8(%rdx), %edi
+. 5488 0x12345678 4
+. 0B 7A 08 C3
+
+orl %eax, %ebp
+. 5489 0x12345678 3
+. 09 C5 C3
+
+orl %eax, %ecx
+. 5490 0x12345678 3
+. 09 C1 C3
+
+orl %eax, %edi
+. 5491 0x12345678 3
+. 09 C7 C3
+
+orl %eax, %edx
+. 5492 0x12345678 3
+. 09 C2 C3
+
+orl %eax, %r12d
+. 5493 0x12345678 4
+. 41 09 C4 C3
+
+orl %ebx, %eax
+. 5494 0x12345678 3
+. 09 D8 C3
+
+orl %ebx, %ecx
+. 5495 0x12345678 3
+. 09 D9 C3
+
+orl %ebx, %edx
+. 5496 0x12345678 3
+. 09 DA C3
+
+orl %ebx, %esi
+. 5497 0x12345678 3
+. 09 DE C3
+
+orl %ecx, %eax
+. 5498 0x12345678 3
+. 09 C8 C3
+
+orl %ecx, %edx
+. 5499 0x12345678 3
+. 09 CA C3
+
+orl %ecx, %esi
+. 5500 0x12345678 3
+. 09 CE C3
+
+orl %edi, %eax
+. 5501 0x12345678 3
+. 09 F8 C3
+
+orl %edi, %ecx
+. 5502 0x12345678 3
+. 09 F9 C3
+
+orl %edi, %edx
+. 5503 0x12345678 3
+. 09 FA C3
+
+orl %edi, %esi
+. 5504 0x12345678 3
+. 09 FE C3
+
+orl %edx, -20(%rsp)
+. 5505 0x12345678 5
+. 09 54 24 EC C3
+
+orl %edx, -8(%rsp)
+. 5506 0x12345678 5
+. 09 54 24 F8 C3
+
+orl %edx, %eax
+. 5507 0x12345678 3
+. 09 D0 C3
+
+orl %edx, %ecx
+. 5508 0x12345678 3
+. 09 D1 C3
+
+orl %edx, %edi
+. 5509 0x12345678 3
+. 09 D7 C3
+
+orl %esi, %eax
+. 5510 0x12345678 3
+. 09 F0 C3
+
+orl %esi, %ecx
+. 5511 0x12345678 3
+. 09 F1 C3
+
+orl %esi, %edi
+. 5512 0x12345678 3
+. 09 F7 C3
+
+orl %esi, %edx
+. 5513 0x12345678 3
+. 09 F2 C3
+
+orl %r10d, %eax
+. 5514 0x12345678 4
+. 44 09 D0 C3
+
+orl %r11d, %eax
+. 5515 0x12345678 4
+. 44 09 D8 C3
+
+orl %r11d, %ecx
+. 5516 0x12345678 4
+. 44 09 D9 C3
+
+orl %r11d, %edx
+. 5517 0x12345678 4
+. 44 09 DA C3
+
+orl %r12d, %eax
+. 5518 0x12345678 4
+. 44 09 E0 C3
+
+orl %r12d, %edi
+. 5519 0x12345678 4
+. 44 09 E7 C3
+
+orl %r13d, %eax
+. 5520 0x12345678 4
+. 44 09 E8 C3
+
+orl %r15d, %eax
+. 5521 0x12345678 4
+. 44 09 F8 C3
+
+orl %r15d, %edx
+. 5522 0x12345678 4
+. 44 09 FA C3
+
+orl %r8d, %eax
+. 5523 0x12345678 4
+. 44 09 C0 C3
+
+orl %r8d, %edi
+. 5524 0x12345678 4
+. 44 09 C7 C3
+
+orl %r9d, %eax
+. 5525 0x12345678 4
+. 44 09 C8 C3
+
+orl %r9d, %ecx
+. 5526 0x12345678 4
+. 44 09 C9 C3
+
+orq 272(%rsp), %rcx
+. 5527 0x12345678 9
+. 48 0B 8C 24 10 01 00 00 C3
+
+orq %rax, %rdi
+. 5528 0x12345678 4
+. 48 09 C7 C3
+
+orq %rcx, %rax
+. 5529 0x12345678 4
+. 48 09 C8 C3
+
+orq %rdi, %rax
+. 5530 0x12345678 4
+. 48 09 F8 C3
+
+orq %rdx, %rax
+. 5531 0x12345678 4
+. 48 09 D0 C3
+
+orq %rsi, %rax
+. 5532 0x12345678 4
+. 48 09 F0 C3
+
+orq %rsi, %rdi
+. 5533 0x12345678 4
+. 48 09 F7 C3
+
+orw 8(%rdx), %di
+. 5534 0x12345678 5
+. 66 0B 7A 08 C3
+
+popq %r12
+. 5535 0x12345678 3
+. 41 5C C3
+
+popq %r13
+. 5536 0x12345678 3
+. 41 5D C3
+
+popq %r14
+. 5537 0x12345678 3
+. 41 5E C3
+
+popq %r15
+. 5538 0x12345678 3
+. 41 5F C3
+
+popq %rbp
+. 5539 0x12345678 2
+. 5D C3
+
+popq %rbx
+. 5540 0x12345678 2
+. 5B C3
+
+pushq %r12
+. 5541 0x12345678 3
+. 41 54 C3
+
+pushq %r13
+. 5542 0x12345678 3
+. 41 55 C3
+
+pushq %r14
+. 5543 0x12345678 3
+. 41 56 C3
+
+pushq %r15
+. 5544 0x12345678 3
+. 41 57 C3
+
+pushq %rbp
+. 5545 0x12345678 2
+. 55 C3
+
+pushq %rbx
+. 5546 0x12345678 2
+. 53 C3
+
+rep ; ret
+. 5547 0x12345678 2
+. F3 C3
+
+ret
+. 5548 0x12345678 1
+. C3
+
+rorl %cl, %r12d
+. 5549 0x12345678 4
+. 41 D3 CC C3
+
+sall $10, %edi
+. 5550 0x12345678 4
+. C1 E7 0A C3
+
+sall $15, %esi
+. 5551 0x12345678 4
+. C1 E6 0F C3
+
+sall $15, %r8d
+. 5552 0x12345678 5
+. 41 C1 E0 0F C3
+
+sall $16, %eax
+. 5553 0x12345678 4
+. C1 E0 10 C3
+
+sall $16, %ebp
+. 5554 0x12345678 4
+. C1 E5 10 C3
+
+sall $16, %ebx
+. 5555 0x12345678 4
+. C1 E3 10 C3
+
+sall $16, %ecx
+. 5556 0x12345678 4
+. C1 E1 10 C3
+
+sall $16, %edi
+. 5557 0x12345678 4
+. C1 E7 10 C3
+
+sall $16, %edx
+. 5558 0x12345678 4
+. C1 E2 10 C3
+
+sall $16, %esi
+. 5559 0x12345678 4
+. C1 E6 10 C3
+
+sall $16, %r11d
+. 5560 0x12345678 5
+. 41 C1 E3 10 C3
+
+sall $16, %r12d
+. 5561 0x12345678 5
+. 41 C1 E4 10 C3
+
+sall $16, %r8d
+. 5562 0x12345678 5
+. 41 C1 E0 10 C3
+
+sall $16, %r9d
+. 5563 0x12345678 5
+. 41 C1 E1 10 C3
+
+sall $24, %eax
+. 5564 0x12345678 4
+. C1 E0 18 C3
+
+sall $24, %ecx
+. 5565 0x12345678 4
+. C1 E1 18 C3
+
+sall $24, %edi
+. 5566 0x12345678 4
+. C1 E7 18 C3
+
+sall $24, %edx
+. 5567 0x12345678 4
+. C1 E2 18 C3
+
+sall $24, %esi
+. 5568 0x12345678 4
+. C1 E6 18 C3
+
+sall $25, %edi
+. 5569 0x12345678 4
+. C1 E7 19 C3
+
+sall $2, %edi
+. 5570 0x12345678 4
+. C1 E7 02 C3
+
+sall $3, %edi
+. 5571 0x12345678 4
+. C1 E7 03 C3
+
+sall $3, %edx
+. 5572 0x12345678 4
+. C1 E2 03 C3
+
+sall $7, %eax
+. 5573 0x12345678 4
+. C1 E0 07 C3
+
+sall $7, %edi
+. 5574 0x12345678 4
+. C1 E7 07 C3
+
+sall $7, %r10d
+. 5575 0x12345678 5
+. 41 C1 E2 07 C3
+
+sall $8, -4(%rsp)
+. 5576 0x12345678 6
+. C1 64 24 FC 08 C3
+
+sall $8, %eax
+. 5577 0x12345678 4
+. C1 E0 08 C3
+
+sall $8, %ebp
+. 5578 0x12345678 4
+. C1 E5 08 C3
+
+sall $8, %ebx
+. 5579 0x12345678 4
+. C1 E3 08 C3
+
+sall $8, %edi
+. 5580 0x12345678 4
+. C1 E7 08 C3
+
+sall $8, %edx
+. 5581 0x12345678 4
+. C1 E2 08 C3
+
+sall $8, %r8d
+. 5582 0x12345678 5
+. 41 C1 E0 08 C3
+
+sall $8, %r9d
+. 5583 0x12345678 5
+. 41 C1 E1 08 C3
+
+sall %cl, %eax
+. 5584 0x12345678 3
+. D3 E0 C3
+
+sall %cl, %edi
+. 5585 0x12345678 3
+. D3 E7 C3
+
+sall %cl, %edx
+. 5586 0x12345678 3
+. D3 E2 C3
+
+sall %cl, %esi
+. 5587 0x12345678 3
+. D3 E6 C3
+
+sall %cl, %r9d
+. 5588 0x12345678 4
+. 41 D3 E1 C3
+
+salq $2, %rax
+. 5589 0x12345678 5
+. 48 C1 E0 02 C3
+
+salq $2, %rbx
+. 5590 0x12345678 5
+. 48 C1 E3 02 C3
+
+salq $2, %rdi
+. 5591 0x12345678 5
+. 48 C1 E7 02 C3
+
+salq $2, %rdx
+. 5592 0x12345678 5
+. 48 C1 E2 02 C3
+
+salq $32, %rax
+. 5593 0x12345678 5
+. 48 C1 E0 20 C3
+
+salq $32, %rdi
+. 5594 0x12345678 5
+. 48 C1 E7 20 C3
+
+salq $3, %rdi
+. 5595 0x12345678 5
+. 48 C1 E7 03 C3
+
+salq $4, %rdi
+. 5596 0x12345678 5
+. 48 C1 E7 04 C3
+
+sarl $11, %eax
+. 5597 0x12345678 4
+. C1 F8 0B C3
+
+sarl $16, %eax
+. 5598 0x12345678 4
+. C1 F8 10 C3
+
+sarl $16, %ecx
+. 5599 0x12345678 4
+. C1 F9 10 C3
+
+sarl $16, %edx
+. 5600 0x12345678 4
+. C1 FA 10 C3
+
+sarl $21, %edi
+. 5601 0x12345678 4
+. C1 FF 15 C3
+
+sarl $24, %eax
+. 5602 0x12345678 4
+. C1 F8 18 C3
+
+sarl $24, %edx
+. 5603 0x12345678 4
+. C1 FA 18 C3
+
+sarl $24, %esi
+. 5604 0x12345678 4
+. C1 FE 18 C3
+
+sarl $2, %eax
+. 5605 0x12345678 4
+. C1 F8 02 C3
+
+sarl $3, %eax
+. 5606 0x12345678 4
+. C1 F8 03 C3
+
+sarl $4, %eax
+. 5607 0x12345678 4
+. C1 F8 04 C3
+
+sarl $6, %ebp
+. 5608 0x12345678 4
+. C1 FD 06 C3
+
+sarl $6, %edx
+. 5609 0x12345678 4
+. C1 FA 06 C3
+
+sarl $8, %edi
+. 5610 0x12345678 4
+. C1 FF 08 C3
+
+sarl $9, %edi
+. 5611 0x12345678 4
+. C1 FF 09 C3
+
+sarl %cl, %eax
+. 5612 0x12345678 3
+. D3 F8 C3
+
+sarl %cl, %edi
+. 5613 0x12345678 3
+. D3 FF C3
+
+sarl %cl, %edx
+. 5614 0x12345678 3
+. D3 FA C3
+
+sarl %cl, %r8d
+. 5615 0x12345678 4
+. 41 D3 F8 C3
+
+sarl %eax
+. 5616 0x12345678 3
+. D1 F8 C3
+
+sarl %edi
+. 5617 0x12345678 3
+. D1 FF C3
+
+sbbl %eax, %eax
+. 5618 0x12345678 3
+. 19 C0 C3
+
+sbbl %ebp, %ebp
+. 5619 0x12345678 3
+. 19 ED C3
+
+sbbl %ebx, %ebx
+. 5620 0x12345678 3
+. 19 DB C3
+
+sbbl %ecx, %ecx
+. 5621 0x12345678 3
+. 19 C9 C3
+
+sbbl %edi, %edi
+. 5622 0x12345678 3
+. 19 FF C3
+
+sbbl %edx, %edx
+. 5623 0x12345678 3
+. 19 D2 C3
+
+sbbl %esi, %esi
+. 5624 0x12345678 3
+. 19 F6 C3
+
+sbbl %r10d, %r10d
+. 5625 0x12345678 4
+. 45 19 D2 C3
+
+sbbl %r12d, %r12d
+. 5626 0x12345678 4
+. 45 19 E4 C3
+
+sbbl %r13d, %r13d
+. 5627 0x12345678 4
+. 45 19 ED C3
+
+sbbl %r8d, %r8d
+. 5628 0x12345678 4
+. 45 19 C0 C3
+
+sbbl %r9d, %r9d
+. 5629 0x12345678 4
+. 45 19 C9 C3
+
+sbbq %rdi, %rdi
+. 5630 0x12345678 4
+. 48 19 FF C3
+
+seta %dl
+. 5631 0x12345678 4
+. 0F 97 C2 C3
+
+setb %al
+. 5632 0x12345678 4
+. 0F 92 C0 C3
+
+setb %dil
+. 5633 0x12345678 5
+. 40 0F 92 C7 C3
+
+setb %dl
+. 5634 0x12345678 4
+. 0F 92 C2 C3
+
+setbe 283(%rsp)
+. 5635 0x12345678 9
+. 0F 96 84 24 1B 01 00 00 C3
+
+setbe 46(%rsp)
+. 5636 0x12345678 6
+. 0F 96 44 24 2E C3
+
+setbe %al
+. 5637 0x12345678 4
+. 0F 96 C0 C3
+
+setbe %cl
+. 5638 0x12345678 4
+. 0F 96 C1 C3
+
+setbe %dil
+. 5639 0x12345678 5
+. 40 0F 96 C7 C3
+
+setbe %dl
+. 5640 0x12345678 4
+. 0F 96 C2 C3
+
+sete 123(%rsp)
+. 5641 0x12345678 6
+. 0F 94 44 24 7B C3
+
+sete 15(%rsp)
+. 5642 0x12345678 6
+. 0F 94 44 24 0F C3
+
+sete 171(%rsp)
+. 5643 0x12345678 9
+. 0F 94 84 24 AB 00 00 00 C3
+
+sete 183(%rsp)
+. 5644 0x12345678 9
+. 0F 94 84 24 B7 00 00 00 C3
+
+sete 239(%rsp)
+. 5645 0x12345678 9
+. 0F 94 84 24 EF 00 00 00 C3
+
+sete %al
+. 5646 0x12345678 4
+. 0F 94 C0 C3
+
+sete %dil
+. 5647 0x12345678 5
+. 40 0F 94 C7 C3
+
+sete %dl
+. 5648 0x12345678 4
+. 0F 94 C2 C3
+
+sete %r12b
+. 5649 0x12345678 5
+. 41 0F 94 C4 C3
+
+sete %r13b
+. 5650 0x12345678 5
+. 41 0F 94 C5 C3
+
+sete %r15b
+. 5651 0x12345678 5
+. 41 0F 94 C7 C3
+
+sete %sil
+. 5652 0x12345678 5
+. 40 0F 94 C6 C3
+
+setge %al
+. 5653 0x12345678 4
+. 0F 9D C0 C3
+
+setge %dl
+. 5654 0x12345678 4
+. 0F 9D C2 C3
+
+setl 115(%rsp)
+. 5655 0x12345678 6
+. 0F 9C 44 24 73 C3
+
+setl %dil
+. 5656 0x12345678 5
+. 40 0F 9C C7 C3
+
+setle %al
+. 5657 0x12345678 4
+. 0F 9E C0 C3
+
+setle %dil
+. 5658 0x12345678 5
+. 40 0F 9E C7 C3
+
+setne %al
+. 5659 0x12345678 4
+. 0F 95 C0 C3
+
+setne %cl
+. 5660 0x12345678 4
+. 0F 95 C1 C3
+
+setne %dil
+. 5661 0x12345678 5
+. 40 0F 95 C7 C3
+
+setne %dl
+. 5662 0x12345678 4
+. 0F 95 C2 C3
+
+setne %r13b
+. 5663 0x12345678 5
+. 41 0F 95 C5 C3
+
+setne %sil
+. 5664 0x12345678 5
+. 40 0F 95 C6 C3
+
+setnp %dl
+. 5665 0x12345678 4
+. 0F 9B C2 C3
+
+sets %al
+. 5666 0x12345678 4
+. 0F 98 C0 C3
+
+shrb $2, %al
+. 5667 0x12345678 4
+. C0 E8 02 C3
+
+shrb $3, %al
+. 5668 0x12345678 4
+. C0 E8 03 C3
+
+shrb $3, %cl
+. 5669 0x12345678 4
+. C0 E9 03 C3
+
+shrb $3, %dil
+. 5670 0x12345678 5
+. 40 C0 EF 03 C3
+
+shrb $3, %dl
+. 5671 0x12345678 4
+. C0 EA 03 C3
+
+shrb $4, %al
+. 5672 0x12345678 4
+. C0 E8 04 C3
+
+shrb $4, %cl
+. 5673 0x12345678 4
+. C0 E9 04 C3
+
+shrb $6, %dl
+. 5674 0x12345678 4
+. C0 EA 06 C3
+
+shrb %al
+. 5675 0x12345678 3
+. D0 E8 C3
+
+shrb %bl
+. 5676 0x12345678 3
+. D0 EB C3
+
+shrb %dl
+. 5677 0x12345678 3
+. D0 EA C3
+
+shrl $10, %eax
+. 5678 0x12345678 4
+. C1 E8 0A C3
+
+shrl $10, %edi
+. 5679 0x12345678 4
+. C1 EF 0A C3
+
+shrl $10, %esi
+. 5680 0x12345678 4
+. C1 EE 0A C3
+
+shrl $11, %eax
+. 5681 0x12345678 4
+. C1 E8 0B C3
+
+shrl $11, %ebx
+. 5682 0x12345678 4
+. C1 EB 0B C3
+
+shrl $11, %ecx
+. 5683 0x12345678 4
+. C1 E9 0B C3
+
+shrl $12, %eax
+. 5684 0x12345678 4
+. C1 E8 0C C3
+
+shrl $12, %edx
+. 5685 0x12345678 4
+. C1 EA 0C C3
+
+shrl $13, %eax
+. 5686 0x12345678 4
+. C1 E8 0D C3
+
+shrl $14, %edi
+. 5687 0x12345678 4
+. C1 EF 0E C3
+
+shrl $16, %eax
+. 5688 0x12345678 4
+. C1 E8 10 C3
+
+shrl $16, %ebx
+. 5689 0x12345678 4
+. C1 EB 10 C3
+
+shrl $16, %ecx
+. 5690 0x12345678 4
+. C1 E9 10 C3
+
+shrl $16, %edi
+. 5691 0x12345678 4
+. C1 EF 10 C3
+
+shrl $16, %edx
+. 5692 0x12345678 4
+. C1 EA 10 C3
+
+shrl $16, %esi
+. 5693 0x12345678 4
+. C1 EE 10 C3
+
+shrl $16, %r11d
+. 5694 0x12345678 5
+. 41 C1 EB 10 C3
+
+shrl $16, %r8d
+. 5695 0x12345678 5
+. 41 C1 E8 10 C3
+
+shrl $16, %r9d
+. 5696 0x12345678 5
+. 41 C1 E9 10 C3
+
+shrl $20, %eax
+. 5697 0x12345678 4
+. C1 E8 14 C3
+
+shrl $20, %ebx
+. 5698 0x12345678 4
+. C1 EB 14 C3
+
+shrl $20, %edx
+. 5699 0x12345678 4
+. C1 EA 14 C3
+
+shrl $20, %r12d
+. 5700 0x12345678 5
+. 41 C1 EC 14 C3
+
+shrl $21, %eax
+. 5701 0x12345678 4
+. C1 E8 15 C3
+
+shrl $21, %ebp
+. 5702 0x12345678 4
+. C1 ED 15 C3
+
+shrl $21, %edi
+. 5703 0x12345678 4
+. C1 EF 15 C3
+
+shrl $22, %ebp
+. 5704 0x12345678 4
+. C1 ED 16 C3
+
+shrl $24, %eax
+. 5705 0x12345678 4
+. C1 E8 18 C3
+
+shrl $24, %ecx
+. 5706 0x12345678 4
+. C1 E9 18 C3
+
+shrl $24, %edi
+. 5707 0x12345678 4
+. C1 EF 18 C3
+
+shrl $24, %edx
+. 5708 0x12345678 4
+. C1 EA 18 C3
+
+shrl $24, %esi
+. 5709 0x12345678 4
+. C1 EE 18 C3
+
+shrl $25, %eax
+. 5710 0x12345678 4
+. C1 E8 19 C3
+
+shrl $25, %ebx
+. 5711 0x12345678 4
+. C1 EB 19 C3
+
+shrl $26, %eax
+. 5712 0x12345678 4
+. C1 E8 1A C3
+
+shrl $28, %ebp
+. 5713 0x12345678 4
+. C1 ED 1C C3
+
+shrl $28, %ecx
+. 5714 0x12345678 4
+. C1 E9 1C C3
+
+shrl $28, %edi
+. 5715 0x12345678 4
+. C1 EF 1C C3
+
+shrl $28, %edx
+. 5716 0x12345678 4
+. C1 EA 1C C3
+
+shrl $28, %esi
+. 5717 0x12345678 4
+. C1 EE 1C C3
+
+shrl $28, %r15d
+. 5718 0x12345678 5
+. 41 C1 EF 1C C3
+
+shrl $2, %eax
+. 5719 0x12345678 4
+. C1 E8 02 C3
+
+shrl $31, %eax
+. 5720 0x12345678 4
+. C1 E8 1F C3
+
+shrl $31, %ecx
+. 5721 0x12345678 4
+. C1 E9 1F C3
+
+shrl $31, %edi
+. 5722 0x12345678 4
+. C1 EF 1F C3
+
+shrl $31, %edx
+. 5723 0x12345678 4
+. C1 EA 1F C3
+
+shrl $3, %ebp
+. 5724 0x12345678 4
+. C1 ED 03 C3
+
+shrl $3, %edx
+. 5725 0x12345678 4
+. C1 EA 03 C3
+
+shrl $3, %r12d
+. 5726 0x12345678 5
+. 41 C1 EC 03 C3
+
+shrl $4, %eax
+. 5727 0x12345678 4
+. C1 E8 04 C3
+
+shrl $4, %edi
+. 5728 0x12345678 4
+. C1 EF 04 C3
+
+shrl $4, %edx
+. 5729 0x12345678 4
+. C1 EA 04 C3
+
+shrl $4, %r13d
+. 5730 0x12345678 5
+. 41 C1 ED 04 C3
+
+shrl $5, %eax
+. 5731 0x12345678 4
+. C1 E8 05 C3
+
+shrl $6, %eax
+. 5732 0x12345678 4
+. C1 E8 06 C3
+
+shrl $6, %ecx
+. 5733 0x12345678 4
+. C1 E9 06 C3
+
+shrl $6, %edi
+. 5734 0x12345678 4
+. C1 EF 06 C3
+
+shrl $7, %eax
+. 5735 0x12345678 4
+. C1 E8 07 C3
+
+shrl $7, %ecx
+. 5736 0x12345678 4
+. C1 E9 07 C3
+
+shrl $8, %eax
+. 5737 0x12345678 4
+. C1 E8 08 C3
+
+shrl $8, %ecx
+. 5738 0x12345678 4
+. C1 E9 08 C3
+
+shrl $8, %edx
+. 5739 0x12345678 4
+. C1 EA 08 C3
+
+shrl $8, %r13d
+. 5740 0x12345678 5
+. 41 C1 ED 08 C3
+
+shrl $8, %r8d
+. 5741 0x12345678 5
+. 41 C1 E8 08 C3
+
+shrl $8, %r9d
+. 5742 0x12345678 5
+. 41 C1 E9 08 C3
+
+shrl $9, %eax
+. 5743 0x12345678 4
+. C1 E8 09 C3
+
+shrl $9, %edi
+. 5744 0x12345678 4
+. C1 EF 09 C3
+
+shrl %cl, %eax
+. 5745 0x12345678 3
+. D3 E8 C3
+
+shrl %cl, %edi
+. 5746 0x12345678 3
+. D3 EF C3
+
+shrl %eax
+. 5747 0x12345678 3
+. D1 E8 C3
+
+shrl %ebx
+. 5748 0x12345678 3
+. D1 EB C3
+
+shrl %ecx
+. 5749 0x12345678 3
+. D1 E9 C3
+
+shrl %edi
+. 5750 0x12345678 3
+. D1 EF C3
+
+shrl %edx
+. 5751 0x12345678 3
+. D1 EA C3
+
+shrl %r11d
+. 5752 0x12345678 4
+. 41 D1 EB C3
+
+shrl %r12d
+. 5753 0x12345678 4
+. 41 D1 EC C3
+
+shrl %r8d
+. 5754 0x12345678 4
+. 41 D1 E8 C3
+
+shrq $32, %r10
+. 5755 0x12345678 5
+. 49 C1 EA 20 C3
+
+shrq $32, %r11
+. 5756 0x12345678 5
+. 49 C1 EB 20 C3
+
+shrq $32, %r8
+. 5757 0x12345678 5
+. 49 C1 E8 20 C3
+
+shrq $32, %r9
+. 5758 0x12345678 5
+. 49 C1 E9 20 C3
+
+shrq $32, %rax
+. 5759 0x12345678 5
+. 48 C1 E8 20 C3
+
+shrq $32, %rbx
+. 5760 0x12345678 5
+. 48 C1 EB 20 C3
+
+shrq $32, %rcx
+. 5761 0x12345678 5
+. 48 C1 E9 20 C3
+
+shrq $32, %rdi
+. 5762 0x12345678 5
+. 48 C1 EF 20 C3
+
+shrq $32, %rdx
+. 5763 0x12345678 5
+. 48 C1 EA 20 C3
+
+shrq $32, %rsi
+. 5764 0x12345678 5
+. 48 C1 EE 20 C3
+
+shrq $63, %rax
+. 5765 0x12345678 5
+. 48 C1 E8 3F C3
+
+subb 8(%rdx), %dil
+. 5766 0x12345678 5
+. 40 2A 7A 08 C3
+
+subb %bl, %cl
+. 5767 0x12345678 3
+. 28 D9 C3
+
+subb %cl, %bl
+. 5768 0x12345678 3
+. 28 CB C3
+
+subb %dl, %al
+. 5769 0x12345678 3
+. 28 D0 C3
+
+subb %dl, %dil
+. 5770 0x12345678 4
+. 40 28 D7 C3
+
+subb %dl, %r8b
+. 5771 0x12345678 4
+. 41 28 D0 C3
+
+subb %dl, %sil
+. 5772 0x12345678 4
+. 40 28 D6 C3
+
+subl $100, %eax
+. 5773 0x12345678 4
+. 83 E8 64 C3
+
+subl $12, %eax
+. 5774 0x12345678 4
+. 83 E8 0C C3
+
+subl $144, %eax
+. 5775 0x12345678 6
+. 2D 90 00 00 00 C3
+
+subl $144, %ebx
+. 5776 0x12345678 7
+. 81 EB 90 00 00 00 C3
+
+subl $192, %eax
+. 5777 0x12345678 6
+. 2D C0 00 00 00 C3
+
+subl $192, %ecx
+. 5778 0x12345678 7
+. 81 E9 C0 00 00 00 C3
+
+subl $192, %edx
+. 5779 0x12345678 7
+. 81 EA C0 00 00 00 C3
+
+subl $200, %ecx
+. 5780 0x12345678 7
+. 81 E9 C8 00 00 00 C3
+
+subl $200, %edx
+. 5781 0x12345678 7
+. 81 EA C8 00 00 00 C3
+
+subl $208, %eax
+. 5782 0x12345678 6
+. 2D D0 00 00 00 C3
+
+subl $20, %eax
+. 5783 0x12345678 4
+. 83 E8 14 C3
+
+subl $224, %eax
+. 5784 0x12345678 6
+. 2D E0 00 00 00 C3
+
+subl $224, %ecx
+. 5785 0x12345678 7
+. 81 E9 E0 00 00 00 C3
+
+subl $224, %edx
+. 5786 0x12345678 7
+. 81 EA E0 00 00 00 C3
+
+subl $232, %ecx
+. 5787 0x12345678 7
+. 81 E9 E8 00 00 00 C3
+
+subl $232, %edi
+. 5788 0x12345678 7
+. 81 EF E8 00 00 00 C3
+
+subl $232, %edx
+. 5789 0x12345678 7
+. 81 EA E8 00 00 00 C3
+
+subl $240, %ecx
+. 5790 0x12345678 7
+. 81 E9 F0 00 00 00 C3
+
+subl $240, %edi
+. 5791 0x12345678 7
+. 81 EF F0 00 00 00 C3
+
+subl $240, %edx
+. 5792 0x12345678 7
+. 81 EA F0 00 00 00 C3
+
+subl $248, %ecx
+. 5793 0x12345678 7
+. 81 E9 F8 00 00 00 C3
+
+subl $248, %edx
+. 5794 0x12345678 7
+. 81 EA F8 00 00 00 C3
+
+subl 292(%rsp), %ebp
+. 5795 0x12345678 8
+. 2B AC 24 24 01 00 00 C3
+
+subl $2, %eax
+. 5796 0x12345678 4
+. 83 E8 02 C3
+
+subl $3, %edx
+. 5797 0x12345678 4
+. 83 EA 03 C3
+
+subl $44, %eax
+. 5798 0x12345678 4
+. 83 E8 2C C3
+
+subl $46, %eax
+. 5799 0x12345678 4
+. 83 E8 2E C3
+
+subl $49, %eax
+. 5800 0x12345678 4
+. 83 E8 31 C3
+
+subl $4, %r9d
+. 5801 0x12345678 5
+. 41 83 E9 04 C3
+
+subl 4(%rsp), %ebp
+. 5802 0x12345678 5
+. 2B 6C 24 04 C3
+
+subl $64, %eax
+. 5803 0x12345678 4
+. 83 E8 40 C3
+
+subl $64, %ebx
+. 5804 0x12345678 4
+. 83 EB 40 C3
+
+subl $69631, %edi
+. 5805 0x12345678 7
+. 81 EF FF 0F 01 00 C3
+
+subl $69633, %eax
+. 5806 0x12345678 6
+. 2D 01 10 01 00 C3
+
+subl $69635, %eax
+. 5807 0x12345678 6
+. 2D 03 10 01 00 C3
+
+subl $73728, %eax
+. 5808 0x12345678 6
+. 2D 00 20 01 00 C3
+
+subl $77825, %eax
+. 5809 0x12345678 6
+. 2D 01 30 01 00 C3
+
+subl $77869, %eax
+. 5810 0x12345678 6
+. 2D 2D 30 01 00 C3
+
+subl $78092, %eax
+. 5811 0x12345678 6
+. 2D 0C 31 01 00 C3
+
+subl $83, %eax
+. 5812 0x12345678 4
+. 83 E8 53 C3
+
+subl $86018, %eax
+. 5813 0x12345678 6
+. 2D 02 50 01 00 C3
+
+subl 8(%rdx), %edi
+. 5814 0x12345678 4
+. 2B 7A 08 C3
+
+subl $96, %eax
+. 5815 0x12345678 4
+. 83 E8 60 C3
+
+subl %eax, %edi
+. 5816 0x12345678 3
+. 29 C7 C3
+
+subl %eax, %edx
+. 5817 0x12345678 3
+. 29 C2 C3
+
+subl %ebp, %r14d
+. 5818 0x12345678 4
+. 41 29 EE C3
+
+subl %ecx, %edx
+. 5819 0x12345678 3
+. 29 CA C3
+
+subl %ecx, %r8d
+. 5820 0x12345678 4
+. 41 29 C8 C3
+
+subl %edi, %eax
+. 5821 0x12345678 3
+. 29 F8 C3
+
+subl %edx, %eax
+. 5822 0x12345678 3
+. 29 D0 C3
+
+subl %edx, %ebx
+. 5823 0x12345678 3
+. 29 D3 C3
+
+subl %edx, %ecx
+. 5824 0x12345678 3
+. 29 D1 C3
+
+subl %edx, %edi
+. 5825 0x12345678 3
+. 29 D7 C3
+
+subl %edx, %esi
+. 5826 0x12345678 3
+. 29 D6 C3
+
+subl %edx, %r8d
+. 5827 0x12345678 4
+. 41 29 D0 C3
+
+subl %esi, %edi
+. 5828 0x12345678 3
+. 29 F7 C3
+
+subl %r10d, %ecx
+. 5829 0x12345678 4
+. 44 29 D1 C3
+
+subl %r14d, %edi
+. 5830 0x12345678 4
+. 44 29 F7 C3
+
+subl %r15d, %eax
+. 5831 0x12345678 4
+. 44 29 F8 C3
+
+subq $104, %rsp
+. 5832 0x12345678 5
+. 48 83 EC 68 C3
+
+subq $120, %rsp
+. 5833 0x12345678 5
+. 48 83 EC 78 C3
+
+subq $136, %rsp
+. 5834 0x12345678 8
+. 48 81 EC 88 00 00 00 C3
+
+subq $152, %rsp
+. 5835 0x12345678 8
+. 48 81 EC 98 00 00 00 C3
+
+subq 160(%rsp), %r15
+. 5836 0x12345678 9
+. 4C 2B BC 24 A0 00 00 00 C3
+
+subq $168, %rsp
+. 5837 0x12345678 8
+. 48 81 EC A8 00 00 00 C3
+
+subq $16, %rsp
+. 5838 0x12345678 5
+. 48 83 EC 10 C3
+
+subq $184, %rsp
+. 5839 0x12345678 8
+. 48 81 EC B8 00 00 00 C3
+
+subq $208, %rsp
+. 5840 0x12345678 8
+. 48 81 EC D0 00 00 00 C3
+
+subq $216, %rsp
+. 5841 0x12345678 8
+. 48 81 EC D8 00 00 00 C3
+
+subq $24, %rsp
+. 5842 0x12345678 5
+. 48 83 EC 18 C3
+
+subq $280, %rsp
+. 5843 0x12345678 8
+. 48 81 EC 18 01 00 00 C3
+
+subq $312, %rsp
+. 5844 0x12345678 8
+. 48 81 EC 38 01 00 00 C3
+
+subq $40, %rsp
+. 5845 0x12345678 5
+. 48 83 EC 28 C3
+
+subq $472, %rsp
+. 5846 0x12345678 8
+. 48 81 EC D8 01 00 00 C3
+
+subq $56, %rsp
+. 5847 0x12345678 5
+. 48 83 EC 38 C3
+
+subq $728, %rsp
+. 5848 0x12345678 8
+. 48 81 EC D8 02 00 00 C3
+
+subq $72, %rsp
+. 5849 0x12345678 5
+. 48 83 EC 48 C3
+
+subq $88, %rsp
+. 5850 0x12345678 5
+. 48 83 EC 58 C3
+
+subq $8, %rsp
+. 5851 0x12345678 5
+. 48 83 EC 08 C3
+
+subq $968, %rsp
+. 5852 0x12345678 8
+. 48 81 EC C8 03 00 00 C3
+
+subq %rax, %rdx
+. 5853 0x12345678 4
+. 48 29 C2 C3
+
+subw %cx, %dx
+. 5854 0x12345678 4
+. 66 29 CA C3
+
+subw %cx, %r8w
+. 5855 0x12345678 5
+. 66 41 29 C8 C3
+
+subw %dx, %ax
+. 5856 0x12345678 4
+. 66 29 D0 C3
+
+subw %si, %di
+. 5857 0x12345678 4
+. 66 29 F7 C3
+
+testb $16, 44(%rip)
+. 5858 0x12345678 8
+. F6 05 2C 00 00 00 10 C3
+
+testb $1, %al
+. 5859 0x12345678 3
+. A8 01 C3
+
+testb $1, %bl
+. 5860 0x12345678 4
+. F6 C3 01 C3
+
+testb $1, %dil
+. 5861 0x12345678 5
+. 40 F6 C7 01 C3
+
+testb $1, %r12b
+. 5862 0x12345678 5
+. 41 F6 C4 01 C3
+
+testb $1, 45(%rip)
+. 5863 0x12345678 8
+. F6 05 2D 00 00 00 01 C3
+
+testb $28, %dil
+. 5864 0x12345678 5
+. 40 F6 C7 1C C3
+
+testb $2, %al
+. 5865 0x12345678 3
+. A8 02 C3
+
+testb $2, %dl
+. 5866 0x12345678 4
+. F6 C2 02 C3
+
+testb $2, %r13b
+. 5867 0x12345678 5
+. 41 F6 C5 02 C3
+
+testb $2, %r14b
+. 5868 0x12345678 5
+. 41 F6 C6 02 C3
+
+testb $-32, %dl
+. 5869 0x12345678 4
+. F6 C2 E0 C3
+
+testb $32, 47(%rip)
+. 5870 0x12345678 8
+. F6 05 2F 00 00 00 20 C3
+
+testb $4, 28(%rsp)
+. 5871 0x12345678 6
+. F6 44 24 1C 04 C3
+
+testb $4, %al
+. 5872 0x12345678 3
+. A8 04 C3
+
+testb $4, %dil
+. 5873 0x12345678 5
+. 40 F6 C7 04 C3
+
+testb $4, %dl
+. 5874 0x12345678 4
+. F6 C2 04 C3
+
+testb $4, %r13b
+. 5875 0x12345678 5
+. 41 F6 C5 04 C3
+
+testb $4, 49(%rip)
+. 5876 0x12345678 8
+. F6 05 31 00 00 00 04 C3
+
+testb $64, 66(%rip)
+. 5877 0x12345678 8
+. F6 05 42 00 00 00 40 C3
+
+testb $7, 800(%rsp)
+. 5878 0x12345678 9
+. F6 84 24 20 03 00 00 07 C3
+
+testb $8, %al
+. 5879 0x12345678 3
+. A8 08 C3
+
+testb $8, %dl
+. 5880 0x12345678 4
+. F6 C2 08 C3
+
+testb $8, %r14b
+. 5881 0x12345678 5
+. 41 F6 C6 08 C3
+
+testb %al, %al
+. 5882 0x12345678 3
+. 84 C0 C3
+
+testb %bl, %bl
+. 5883 0x12345678 3
+. 84 DB C3
+
+testb %bpl, %bpl
+. 5884 0x12345678 4
+. 40 84 ED C3
+
+testb %cl, %cl
+. 5885 0x12345678 3
+. 84 C9 C3
+
+testb %dil, %dil
+. 5886 0x12345678 4
+. 40 84 FF C3
+
+testb %dl, %dl
+. 5887 0x12345678 3
+. 84 D2 C3
+
+testb %r11b, %r11b
+. 5888 0x12345678 4
+. 45 84 DB C3
+
+testb %r12b, %r12b
+. 5889 0x12345678 4
+. 45 84 E4 C3
+
+testb %r13b, %r13b
+. 5890 0x12345678 4
+. 45 84 ED C3
+
+testb %r14b, %r14b
+. 5891 0x12345678 4
+. 45 84 F6 C3
+
+testb %r15b, %r15b
+. 5892 0x12345678 4
+. 45 84 FF C3
+
+testb %r8b, %r8b
+. 5893 0x12345678 4
+. 45 84 C0 C3
+
+testb %sil, %sil
+. 5894 0x12345678 4
+. 40 84 F6 C3
+
+testl $1024, %edi
+. 5895 0x12345678 7
+. F7 C7 00 04 00 00 C3
+
+testl $1024, %r14d
+. 5896 0x12345678 8
+. 41 F7 C6 00 04 00 00 C3
+
+testl $16384, %edi
+. 5897 0x12345678 7
+. F7 C7 00 40 00 00 C3
+
+testl $16384, %r14d
+. 5898 0x12345678 8
+. 41 F7 C6 00 40 00 00 C3
+
+testl $16777216, %ebp
+. 5899 0x12345678 7
+. F7 C5 00 00 00 01 C3
+
+testl $16777216, %edi
+. 5900 0x12345678 7
+. F7 C7 00 00 00 01 C3
+
+testl $16777216, %edx
+. 5901 0x12345678 7
+. F7 C2 00 00 00 01 C3
+
+testl $16777216, %esi
+. 5902 0x12345678 7
+. F7 C6 00 00 00 01 C3
+
+testl $2048, %edi
+. 5903 0x12345678 7
+. F7 C7 00 08 00 00 C3
+
+testl $2048, %r14d
+. 5904 0x12345678 8
+. 41 F7 C6 00 08 00 00 C3
+
+testl $256, %r14d
+. 5905 0x12345678 8
+. 41 F7 C6 00 01 00 00 C3
+
+testl $4096, %edi
+. 5906 0x12345678 7
+. F7 C7 00 10 00 00 C3
+
+testl $4096, %r14d
+. 5907 0x12345678 8
+. 41 F7 C6 00 10 00 00 C3
+
+testl $512, %r14d
+. 5908 0x12345678 8
+. 41 F7 C6 00 02 00 00 C3
+
+testl $-65536, %eax
+. 5909 0x12345678 6
+. A9 00 00 FF FF C3
+
+testl $-65536, %ebp
+. 5910 0x12345678 7
+. F7 C5 00 00 FF FF C3
+
+testl $-65536, %ebx
+. 5911 0x12345678 7
+. F7 C3 00 00 FF FF C3
+
+testl $-65536, %r12d
+. 5912 0x12345678 8
+. 41 F7 C4 00 00 FF FF C3
+
+testl $768, %edi
+. 5913 0x12345678 7
+. F7 C7 00 03 00 00 C3
+
+testl $8192, %edi
+. 5914 0x12345678 7
+. F7 C7 00 20 00 00 C3
+
+testl $8192, %r14d
+. 5915 0x12345678 8
+. 41 F7 C6 00 20 00 00 C3
+
+testl %eax, %eax
+. 5916 0x12345678 3
+. 85 C0 C3
+
+testl %eax, %edi
+. 5917 0x12345678 3
+. 85 F8 C3
+
+testl %eax, %edx
+. 5918 0x12345678 3
+. 85 D0 C3
+
+testl %ebp, %ebp
+. 5919 0x12345678 3
+. 85 ED C3
+
+testl %ebx, %ebx
+. 5920 0x12345678 3
+. 85 DB C3
+
+testl %ecx, %ecx
+. 5921 0x12345678 3
+. 85 C9 C3
+
+testl %edi, %edi
+. 5922 0x12345678 3
+. 85 FF C3
+
+testl %edx, %edx
+. 5923 0x12345678 3
+. 85 D2 C3
+
+testl %esi, %esi
+. 5924 0x12345678 3
+. 85 F6 C3
+
+testl %r10d, %r10d
+. 5925 0x12345678 4
+. 45 85 D2 C3
+
+testl %r11d, %r11d
+. 5926 0x12345678 4
+. 45 85 DB C3
+
+testl %r12d, %r12d
+. 5927 0x12345678 4
+. 45 85 E4 C3
+
+testl %r13d, %r13d
+. 5928 0x12345678 4
+. 45 85 ED C3
+
+testl %r14d, %r14d
+. 5929 0x12345678 4
+. 45 85 F6 C3
+
+testl %r15d, %r15d
+. 5930 0x12345678 4
+. 45 85 FF C3
+
+testl %r8d, %r8d
+. 5931 0x12345678 4
+. 45 85 C0 C3
+
+testl %r9d, %r9d
+. 5932 0x12345678 4
+. 45 85 C9 C3
+
+testq %r12, %r12
+. 5933 0x12345678 4
+. 4D 85 E4 C3
+
+testq %r13, %r13
+. 5934 0x12345678 4
+. 4D 85 ED C3
+
+testq %r14, %r14
+. 5935 0x12345678 4
+. 4D 85 F6 C3
+
+testq %r15, %r15
+. 5936 0x12345678 4
+. 4D 85 FF C3
+
+testq %rax, %rax
+. 5937 0x12345678 4
+. 48 85 C0 C3
+
+testq %rbp, %rbp
+. 5938 0x12345678 4
+. 48 85 ED C3
+
+testq %rbx, %rbx
+. 5939 0x12345678 4
+. 48 85 DB C3
+
+testq %rcx, %rcx
+. 5940 0x12345678 4
+. 48 85 C9 C3
+
+testq %rdi, %rdi
+. 5941 0x12345678 4
+. 48 85 FF C3
+
+testq %rdx, %rdx
+. 5942 0x12345678 4
+. 48 85 D2 C3
+
+testq %rsi, %rsi
+. 5943 0x12345678 4
+. 48 85 F6 C3
+
+testw %ax, %ax
+. 5944 0x12345678 4
+. 66 85 C0 C3
+
+testw %bx, %bx
+. 5945 0x12345678 4
+. 66 85 DB C3
+
+testw %di, %di
+. 5946 0x12345678 4
+. 66 85 FF C3
+
+testw %dx, %dx
+. 5947 0x12345678 4
+. 66 85 D2 C3
+
+testw %r11w, %r11w
+. 5948 0x12345678 5
+. 66 45 85 DB C3
+
+testw %r14w, %r14w
+. 5949 0x12345678 5
+. 66 45 85 F6 C3
+
+testw %r8w, %r8w
+. 5950 0x12345678 5
+. 66 45 85 C0 C3
+
+testw %r9w, %r9w
+. 5951 0x12345678 5
+. 66 45 85 C9 C3
+
+testw %si, %si
+. 5952 0x12345678 4
+. 66 85 F6 C3
+
+xorb 8(%rdx), %dil
+. 5953 0x12345678 5
+. 40 32 7A 08 C3
+
+xorl $1, %eax
+. 5954 0x12345678 4
+. 83 F0 01 C3
+
+xorl $1, %edx
+. 5955 0x12345678 4
+. 83 F2 01 C3
+
+xorl 8(%rdx), %edi
+. 5956 0x12345678 4
+. 33 7A 08 C3
+
+xorl %eax, %eax
+. 5957 0x12345678 3
+. 31 C0 C3
+
+xorl %ebp, %ebp
+. 5958 0x12345678 3
+. 31 ED C3
+
+xorl %ebx, %ebx
+. 5959 0x12345678 3
+. 31 DB C3
+
+xorl %ecx, %ecx
+. 5960 0x12345678 3
+. 31 C9 C3
+
+xorl %edi, %edi
+. 5961 0x12345678 3
+. 31 FF C3
+
+xorl %edx, %ecx
+. 5962 0x12345678 3
+. 31 D1 C3
+
+xorl %edx, %edx
+. 5963 0x12345678 3
+. 31 D2 C3
+
+xorl %esi, %eax
+. 5964 0x12345678 3
+. 31 F0 C3
+
+xorl %esi, %ecx
+. 5965 0x12345678 3
+. 31 F1 C3
+
+xorl %esi, %edx
+. 5966 0x12345678 3
+. 31 F2 C3
+
+xorl %esi, %esi
+. 5967 0x12345678 3
+. 31 F6 C3
+
+xorl %r10d, %r10d
+. 5968 0x12345678 4
+. 45 31 D2 C3
+
+xorl %r11d, %r11d
+. 5969 0x12345678 4
+. 45 31 DB C3
+
+xorl %r12d, %r12d
+. 5970 0x12345678 4
+. 45 31 E4 C3
+
+xorl %r13d, %r13d
+. 5971 0x12345678 4
+. 45 31 ED C3
+
+xorl %r14d, %r14d
+. 5972 0x12345678 4
+. 45 31 F6 C3
+
+xorl %r15d, %r15d
+. 5973 0x12345678 4
+. 45 31 FF C3
+
+xorl %r8d, %r8d
+. 5974 0x12345678 4
+. 45 31 C0 C3
+
+xorl %r9d, %r9d
+. 5975 0x12345678 4
+. 45 31 C9 C3
+
+xorw %ax, %ax
+. 5976 0x12345678 4
+. 66 31 C0 C3
+
+xorw %di, %di
+. 5977 0x12345678 4
+. 66 31 FF C3
+
+xorw %dx, %dx
+. 5978 0x12345678 4
+. 66 31 D2 C3
+
+nop
+. 5979 0x12345678 2
+. 90 C3
+
+nop
+. 5980 0x12345678 2
+. 90 C3
+
diff --git a/VEX/orig_amd64/test2.sorted b/VEX/orig_amd64/test2.sorted
new file mode 100644
index 0000000..c3343be
--- /dev/null
+++ b/VEX/orig_amd64/test2.sorted
@@ -0,0 +1,5978 @@
+40027A08                                addb 8(%rdx), %dil
+83C666                                  addl $102, %esi
+83C26C                                  addl $108, %edx
+83C66C                                  addl $108, %esi
+4183C50A                                addl $10, %r13d
+83C673                                  addl $115, %esi
+83C380                                  addl $-128, %ebx
+83C780                                  addl $-128, %edi
+0344240C                                addl 12(%rsp), %eax
+03BC2488000000                          addl 136(%rsp), %edi
+83C10D                                  addl $13, %ecx
+83C310                                  addl $16, %ebx
+83C011                                  addl $17, %eax
+83C512                                  addl $18, %ebp
+03442414                                addl 20(%rsp), %eax
+4403742414                              addl 20(%rsp), %r14d
+83C315                                  addl $21, %ebx
+03442418                                addl 24(%rsp), %eax
+834424101C                              addl $28, 16(%rsp)
+0344241C                                addl 28(%rsp), %eax
+036C241C                                addl 28(%rsp), %ebp
+440374241C                              addl 28(%rsp), %r14d
+44037C241C                              addl 28(%rsp), %r15d
+83C01D                                  addl $29, %eax
+83C002                                  addl $2, %eax
+83C502                                  addl $2, %ebp
+83C302                                  addl $2, %ebx
+83C702                                  addl $2, %edi
+4183C402                                addl $2, %r12d
+4183C702                                addl $2, %r15d
+834424101F                              addl $31, 16(%rsp)
+03BC2438010000                          addl 312(%rsp), %edi
+4403AC2444010000                        addl 324(%rsp), %r13d
+03AC244C010000                          addl 332(%rsp), %ebp
+83C021                                  addl $33, %eax
+03AC2458010000                          addl 344(%rsp), %ebp
+03BC2458010000                          addl 344(%rsp), %edi
+03AC2460010000                          addl 352(%rsp), %ebp
+03AC2464010000                          addl 356(%rsp), %ebp
+03AC2470010000                          addl 368(%rsp), %ebp
+03442424                                addl 36(%rsp), %eax
+037C2424                                addl 36(%rsp), %edi
+8344241025                              addl $37, 16(%rsp)
+83C125                                  addl $37, %ecx
+83C503                                  addl $3, %ebp
+83C303                                  addl $3, %ebx
+83C703                                  addl $3, %edi
+8344241029                              addl $41, 16(%rsp)
+037C242C                                addl 44(%rsp), %edi
+038424C8010000                          addl 456(%rsp), %eax
+83C504                                  addl $4, %ebp
+83C304                                  addl $4, %ebx
+83C704                                  addl $4, %edi
+0384242C020000                          addl 556(%rsp), %eax
+03AC242C020000                          addl 556(%rsp), %ebp
+03442438                                addl 56(%rsp), %eax
+83C005                                  addl $5, %eax
+83C505                                  addl $5, %ebp
+0344243C                                addl 60(%rsp), %eax
+440364243C                              addl 60(%rsp), %r12d
+0502100100                              addl $69634, %eax
+440364244C                              addl 76(%rsp), %r12d
+81C71B300100                            addl $77851, %edi
+4181C41B300100                          addl $77851, %r12d
+4181C41C300100                          addl $77852, %r12d
+81C331300100                            addl $77873, %ebx
+81C332300100                            addl $77874, %ebx
+81C333300100                            addl $77875, %ebx
+81C53E300100                            addl $77886, %ebp
+0543300100                              addl $77891, %eax
+81C743300100                            addl $77891, %edi
+4181C443300100                          addl $77891, %r12d
+0544300100                              addl $77892, %eax
+81C744300100                            addl $77892, %edi
+0545300100                              addl $77893, %eax
+81C745300100                            addl $77893, %edi
+4181C445300100                          addl $77893, %r12d
+4181C545300100                          addl $77893, %r13d
+81C346300100                            addl $77894, %ebx
+81C54C300100                            addl $77900, %ebp
+81C54F300100                            addl $77903, %ebp
+83C707                                  addl $7, %edi
+4183C507                                addl $7, %r13d
+0501400100                              addl $81921, %eax
+83C008                                  addl $8, %eax
+037A08                                  addl 8(%rdx), %edi
+44037C2408                              addl 8(%rsp), %r15d
+01442414                                addl %eax, 20(%rsp)
+01442418                                addl %eax, 24(%rsp)
+01442424                                addl %eax, 36(%rsp)
+01442428                                addl %eax, 40(%rsp)
+0144242C                                addl %eax, 44(%rsp)
+01442438                                addl %eax, 56(%rsp)
+41014508                                addl %eax, 8(%r13)
+01C0                                    addl %eax, %eax
+01C7                                    addl %eax, %edi
+01C2                                    addl %eax, %edx
+01C6                                    addl %eax, %esi
+4101C7                                  addl %eax, %r15d
+016C2408                                addl %ebp, 8(%rsp)
+01E8                                    addl %ebp, %eax
+01EB                                    addl %ebp, %ebx
+01EF                                    addl %ebp, %edi
+01EE                                    addl %ebp, %esi
+014C247C                                addl %ecx, 124(%rsp)
+018C2424010000                          addl %ecx, 292(%rsp)
+014C2408                                addl %ecx, 8(%rsp)
+01C9                                    addl %ecx, %ecx
+01CF                                    addl %ecx, %edi
+01CA                                    addl %ecx, %edx
+4101C8                                  addl %ecx, %r8d
+017C240C                                addl %edi, 12(%rsp)
+01F8                                    addl %edi, %eax
+01FF                                    addl %edi, %edi
+01FE                                    addl %edi, %esi
+4101FF                                  addl %edi, %r15d
+013D63C55400                            addl %edi, 5555555(%rip)
+01D0                                    addl %edx, %eax
+01D3                                    addl %edx, %ebx
+01D1                                    addl %edx, %ecx
+01D7                                    addl %edx, %edi
+01D6                                    addl %edx, %esi
+4101D0                                  addl %edx, %r8d
+01F0                                    addl %esi, %eax
+01F7                                    addl %esi, %edi
+4101F5                                  addl %esi, %r13d
+033D01000000                            addl 1(%rip), %edi
+033502000000                            addl 2(%rip), %esi
+033D03000000                            addl 3(%rip), %edi
+033504000000                            addl 4(%rip), %esi
+4401642438                              addl %r12d, 56(%rsp)
+44016308                                addl %r12d, 8(%rbx)
+4501E7                                  addl %r12d, %r15d
+4401EF                                  addl %r13d, %edi
+4401EE                                  addl %r13d, %esi
+4401F0                                  addl %r14d, %eax
+4401F5                                  addl %r14d, %ebp
+44017C2418                              addl %r15d, 24(%rsp)
+4401F8                                  addl %r15d, %eax
+4401C7                                  addl %r8d, %edi
+4883C468                                addq $104, %rsp
+4883C478                                addq $120, %rsp
+4881C488000000                          addq $136, %rsp
+4881C498000000                          addq $152, %rsp
+4881C4A8000000                          addq $168, %rsp
+48035110                                addq 16(%rcx), %rdx
+48035610                                addq 16(%rsi), %rdx
+4883C410                                addq $16, %rsp
+4803442410                              addq 16(%rsp), %rax
+4881C4B8000000                          addq $184, %rsp
+4881C4D0000000                          addq $208, %rsp
+4881C4D8000000                          addq $216, %rsp
+4883C418                                addq $24, %rsp
+4803442418                              addq 24(%rsp), %rax
+4881C418010000                          addq $280, %rsp
+4881C438010000                          addq $312, %rsp
+4803442420                              addq 32(%rsp), %rax
+4883C428                                addq $40, %rsp
+4803442428                              addq 40(%rsp), %rax
+4881C4D8010000                          addq $472, %rsp
+4883C504                                addq $4, %rbp
+4883C304                                addq $4, %rbx
+4883C438                                addq $56, %rsp
+4881C4D8020000                          addq $728, %rsp
+4883C448                                addq $72, %rsp
+4883C458                                addq $88, %rsp
+4883C008                                addq $8, %rax
+4883C408                                addq $8, %rsp
+4881C4C8030000                          addq $968, %rsp
+48031507000000                          addq 7(%rip), %rdx
+4883C702                                addq $2, %rdi
+4805C61E0000                            addq $7878, %rax
+4D01FC                                  addq %r15, %r12
+4C01F8                                  addq %r15, %rax
+4C01FF                                  addq %r15, %rdi
+4C01FE                                  addq %r15, %rsi
+4801442470                              addq %rax, 112(%rsp)
+4801442420                              addq %rax, 32(%rsp)
+4801442430                              addq %rax, 48(%rsp)
+4801442460                              addq %rax, 96(%rsp)
+4901C5                                  addq %rax, %r13
+4901C7                                  addq %rax, %r15
+4801C5                                  addq %rax, %rbp
+4801C3                                  addq %rax, %rbx
+480105DD030000                          addq %rax, 989(%rip)
+4801E8                                  addq %rbp, %rax
+4801EB                                  addq %rbp, %rbx
+4801EF                                  addq %rbp, %rdi
+4801EE                                  addq %rbp, %rsi
+4801D8                                  addq %rbx, %rax
+480539300000                            addq $12345, %rax
+664101445616                            addw %ax, 22(%r14,%rdx,2)
+6601445316                              addw %ax, 22(%rbx,%rdx,2)
+40227A08                                andb 8(%rdx), %dil
+83E00C                                  andl $12, %eax
+83E60C                                  andl $12, %esi
+83E60D                                  andl $13, %esi
+836424140F                              andl $15, 20(%rsp)
+83E00F                                  andl $15, %eax
+83E10F                                  andl $15, %ecx
+83E20F                                  andl $15, %edx
+83E60F                                  andl $15, %esi
+4183E50F                                andl $15, %r13d
+81E1FF3F0000                            andl $16383, %ecx
+81E60000FF00                            andl $16711680, %esi
+4181E00000FF00                          andl $16711680, %r8d
+25FFFFFF00                              andl $16777215, %eax
+81E5FFFFFF00                            andl $16777215, %ebp
+81E3FFFFFF00                            andl $16777215, %ebx
+81E1FFFFFF00                            andl $16777215, %ecx
+81E2FFFFFF00                            andl $16777215, %edx
+4181E5FFFFFF00                          andl $16777215, %r13d
+4181E1FFFFFF00                          andl $16777215, %r9d
+25000000FF                              andl $-16777216, %eax
+81E700000001                            andl $16777216, %edi
+81E600000001                            andl $16777216, %esi
+4423A424B0000000                        andl 176(%rsp), %r12d
+81E7C0000000                            andl $192, %edi
+83E001                                  andl $1, %eax
+83E301                                  andl $1, %ebx
+83E101                                  andl $1, %ecx
+83E701                                  andl $1, %edi
+83E201                                  andl $1, %edx
+83E601                                  andl $1, %esi
+4183E001                                andl $1, %r8d
+25F9000000                              andl $249, %eax
+83E018                                  andl $24, %eax
+25FB000000                              andl $251, %eax
+81E7FE000000                            andl $254, %edi
+25FF000000                              andl $255, %eax
+83E71B                                  andl $27, %edi
+4183E402                                andl $2, %r12d
+83E11E                                  andl $30, %ecx
+83E01F                                  andl $31, %eax
+83E51F                                  andl $31, %ebp
+83E31F                                  andl $31, %ebx
+83E11F                                  andl $31, %ecx
+83E21F                                  andl $31, %edx
+4183E41F                                andl $31, %r12d
+83E003                                  andl $3, %eax
+83E503                                  andl $3, %ebp
+83E303                                  andl $3, %ebx
+83E1FD                                  andl $-3, %ecx
+83E603                                  andl $3, %esi
+81E5FF0F0000                            andl $4095, %ebp
+83E0FC                                  andl $-4, %eax
+83E3FC                                  andl $-4, %ebx
+83E304                                  andl $4, %ebx
+83E7FC                                  andl $-4, %edi
+4183E4FC                                andl $-4, %r12d
+4183E404                                andl $4, %r12d
+4183E5FC                                andl $-4, %r13d
+4183E504                                andl $4, %r13d
+4181E5FF010000                          andl $511, %r13d
+4181E600020000                          andl $512, %r14d
+83E0C7                                  andl $-57, %eax
+83E2C7                                  andl $-57, %edx
+4183E4C7                                andl $-57, %r12d
+83E005                                  andl $5, %eax
+83E03F                                  andl $63, %eax
+83E33F                                  andl $63, %ebx
+4183E43F                                andl $63, %r12d
+4183E73F                                andl $63, %r15d
+81E6FF03FFFF                            andl $-64513, %esi
+81E700FF0000                            andl $65280, %edi
+81E200FF0000                            andl $65280, %edx
+4181E200FF0000                          andl $65280, %r10d
+25FFFF0000                              andl $65535, %eax
+4181E6FFFF0000                          andl $65535, %r14d
+83E606                                  andl $6, %esi
+83E007                                  andl $7, %eax
+83E507                                  andl $7, %ebp
+83E107                                  andl $7, %ecx
+83E707                                  andl $7, %edi
+83E207                                  andl $7, %edx
+83E607                                  andl $7, %esi
+4183E407                                andl $7, %r12d
+4183E507                                andl $7, %r13d
+4183E707                                andl $7, %r15d
+4183E107                                andl $7, %r9d
+83E0F8                                  andl $-8, %eax
+83E7F8                                  andl $-8, %edi
+83E708                                  andl $8, %edi
+237A08                                  andl 8(%rdx), %edi
+83E009                                  andl $9, %eax
+21C5                                    andl %eax, %ebp
+21C1                                    andl %eax, %ecx
+21D8                                    andl %ebx, %eax
+4121DE                                  andl %ebx, %r14d
+21D0                                    andl %edx, %eax
+21D1                                    andl %edx, %ecx
+21F1                                    andl %esi, %ecx
+4421E0                                  andl %r12d, %eax
+4C21E6                                  andq %r12, %rsi
+4821442470                              andq %rax, 112(%rsp)
+4821442458                              andq %rax, 88(%rsp)
+4821C6                                  andq %rax, %rsi
+FF942480000000                          call *128(%rsp)
+FF942488000000                          call *136(%rsp)
+FF942490000000                          call *144(%rsp)
+FF542410                                call *16(%rsp)
+FF542418                                call *24(%rsp)
+FF942408010000                          call *264(%rsp)
+FF942410010000                          call *272(%rsp)
+FF942428010000                          call *296(%rsp)
+FF942410030000                          call *784(%rsp)
+FF942418030000                          call *792(%rsp)
+FF942428030000                          call *808(%rsp)
+41FFD2                                  call *%r10
+41FFD5                                  call *%r13
+41FFD6                                  call *%r14
+41FFD7                                  call *%r15
+FF152BC70400                            call *313131(%rip)
+FF15B5260000                            call *9909(%rip)
+99                                      cltd
+4898                                    cltq
+0F47D3                                  cmova %ebx, %edx
+0F47F1                                  cmova %ecx, %esi
+440F47D1                                cmova %ecx, %r10d
+0F47C2                                  cmova %edx, %eax
+0F47FA                                  cmova %edx, %edi
+440F47DA                                cmova %edx, %r11d
+440F47C2                                cmova %edx, %r8d
+440F47CA                                cmova %edx, %r9d
+410F43C1                                cmovae %r9d, %eax
+410F43D9                                cmovae %r9d, %ebx
+410F43C9                                cmovae %r9d, %ecx
+410F43F9                                cmovae %r9d, %edi
+410F43D1                                cmovae %r9d, %edx
+410F43F1                                cmovae %r9d, %esi
+450F43C1                                cmovae %r9d, %r8d
+440F42E0                                cmovb %eax, %r12d
+0F42D3                                  cmovb %ebx, %edx
+0F42F1                                  cmovb %ecx, %esi
+440F42D1                                cmovb %ecx, %r10d
+0F42C2                                  cmovb %edx, %eax
+0F42EA                                  cmovb %edx, %ebp
+0F42FA                                  cmovb %edx, %edi
+440F42DA                                cmovb %edx, %r11d
+440F42C2                                cmovb %edx, %r8d
+440F42CA                                cmovb %edx, %r9d
+0F46D0                                  cmovbe %eax, %edx
+0F46F0                                  cmovbe %eax, %esi
+0F44442468                              cmove 104(%rsp), %eax
+0F448C247C010000                        cmove 380(%rsp), %ecx
+0F44BC247C010000                        cmove 380(%rsp), %edi
+0F4494247C010000                        cmove 380(%rsp), %edx
+0F44B4247C010000                        cmove 380(%rsp), %esi
+0F448C248C010000                        cmove 396(%rsp), %ecx
+0F44BC248C010000                        cmove 396(%rsp), %edi
+0F4494248C010000                        cmove 396(%rsp), %edx
+0F44B4248C010000                        cmove 396(%rsp), %esi
+0F448C249C010000                        cmove 412(%rsp), %ecx
+0F44BC249C010000                        cmove 412(%rsp), %edi
+0F4494249C010000                        cmove 412(%rsp), %edx
+0F44B4249C010000                        cmove 412(%rsp), %esi
+0F44BC24F0010000                        cmove 496(%rsp), %edi
+0F44B424F0010000                        cmove 496(%rsp), %esi
+0F448C2400020000                        cmove 512(%rsp), %ecx
+0F44942400020000                        cmove 512(%rsp), %edx
+0F448C2410020000                        cmove 528(%rsp), %ecx
+0F44BC2410020000                        cmove 528(%rsp), %edi
+0F44942410020000                        cmove 528(%rsp), %edx
+0F44B42410020000                        cmove 528(%rsp), %esi
+0F44E8                                  cmove %eax, %ebp
+0F44D0                                  cmove %eax, %edx
+440F44E8                                cmove %eax, %r13d
+0F44D1                                  cmove %ecx, %edx
+480F44F8                                cmove %rax, %rdi
+480F44D0                                cmove %rax, %rdx
+480F44F0                                cmove %rax, %rsi
+480F44C5                                cmove %rbp, %rax
+480F44DF                                cmove %rdi, %rbx
+480F44F2                                cmove %rdx, %rsi
+480F44C6                                cmove %rsi, %rax
+0F4FF7                                  cmovg %edi, %esi
+0F4FC2                                  cmovg %edx, %eax
+0F4FCA                                  cmovg %edx, %ecx
+0F4DC1                                  cmovge %ecx, %eax
+0F4DF9                                  cmovge %ecx, %edi
+0F4DD1                                  cmovge %ecx, %edx
+0F4DF1                                  cmovge %ecx, %esi
+0F4DC2                                  cmovge %edx, %eax
+410F4DC2                                cmovge %r10d, %eax
+410F4DDA                                cmovge %r10d, %ebx
+410F4DCA                                cmovge %r10d, %ecx
+410F4DFA                                cmovge %r10d, %edi
+410F4DD2                                cmovge %r10d, %edx
+450F4DDA                                cmovge %r10d, %r11d
+450F4DEA                                cmovge %r10d, %r13d
+450F4DC2                                cmovge %r10d, %r8d
+450F4DCA                                cmovge %r10d, %r9d
+410F4DC8                                cmovge %r8d, %ecx
+450F4DD0                                cmovge %r8d, %r10d
+410F4DC1                                cmovge %r9d, %eax
+410F4DD9                                cmovge %r9d, %ebx
+410F4DC9                                cmovge %r9d, %ecx
+410F4DF9                                cmovge %r9d, %edi
+410F4DD1                                cmovge %r9d, %edx
+410F4DF1                                cmovge %r9d, %esi
+450F4DC1                                cmovge %r9d, %r8d
+0F4FCE                                  cmovg %esi, %ecx
+410F4FC0                                cmovg %r8d, %eax
+410F4FD0                                cmovg %r8d, %edx
+440F4CE8                                cmovl %eax, %r13d
+440F4CE3                                cmovl %ebx, %r12d
+0F4CF7                                  cmovl %edi, %esi
+0F4CEA                                  cmovl %edx, %ebp
+0F4CCA                                  cmovl %edx, %ecx
+440F4CE2                                cmovl %edx, %r12d
+0F4EC1                                  cmovle %ecx, %eax
+0F4ED9                                  cmovle %ecx, %ebx
+0F4EF9                                  cmovle %ecx, %edi
+0F4ED1                                  cmovle %ecx, %edx
+440F4ED9                                cmovle %ecx, %r11d
+440F4EC1                                cmovle %ecx, %r8d
+440F4EC9                                cmovle %ecx, %r9d
+410F4EC2                                cmovle %r10d, %eax
+410F4EDA                                cmovle %r10d, %ebx
+410F4ECA                                cmovle %r10d, %ecx
+410F4EFA                                cmovle %r10d, %edi
+410F4ED2                                cmovle %r10d, %edx
+410F4EF2                                cmovle %r10d, %esi
+450F4EDA                                cmovle %r10d, %r11d
+450F4EE2                                cmovle %r10d, %r12d
+450F4EC2                                cmovle %r10d, %r8d
+410F4EC0                                cmovle %r8d, %eax
+410F4EF8                                cmovle %r8d, %edi
+410F4ED0                                cmovle %r8d, %edx
+410F4EF0                                cmovle %r8d, %esi
+410F4CC0                                cmovl %r8d, %eax
+410F4CD0                                cmovl %r8d, %edx
+0F45442468                              cmovne 104(%rsp), %eax
+0F45942490000000                        cmovne 144(%rsp), %edx
+0F458C2498000000                        cmovne 152(%rsp), %ecx
+0F455C2410                              cmovne 16(%rsp), %ebx
+440F45642410                            cmovne 16(%rsp), %r12d
+0F455C2418                              cmovne 24(%rsp), %ebx
+0F455C2420                              cmovne 32(%rsp), %ebx
+0F455C2428                              cmovne 40(%rsp), %ebx
+0F455C2430                              cmovne 48(%rsp), %ebx
+0F45E8                                  cmovne %eax, %ebp
+0F45D8                                  cmovne %eax, %ebx
+0F45C8                                  cmovne %eax, %ecx
+0F45F8                                  cmovne %eax, %edi
+0F45F0                                  cmovne %eax, %esi
+440F45F0                                cmovne %eax, %r14d
+0F45C2                                  cmovne %edx, %eax
+410F45C2                                cmovne %r10d, %eax
+410F45DA                                cmovne %r10d, %ebx
+410F45CA                                cmovne %r10d, %ecx
+410F45D2                                cmovne %r10d, %edx
+450F45DA                                cmovne %r10d, %r11d
+450F45E2                                cmovne %r10d, %r12d
+450F45EA                                cmovne %r10d, %r13d
+450F45CA                                cmovne %r10d, %r9d
+410F45F6                                cmovne %r14d, %esi
+410F45C0                                cmovne %r8d, %eax
+410F45C8                                cmovne %r8d, %ecx
+410F45D0                                cmovne %r8d, %edx
+450F45D0                                cmovne %r8d, %r10d
+4C0F45F0                                cmovne %rax, %r14
+4C0F45C8                                cmovne %rax, %r9
+480F45D8                                cmovne %rax, %rbx
+480F45C8                                cmovne %rax, %rcx
+480F45F8                                cmovne %rax, %rdi
+480F45D0                                cmovne %rax, %rdx
+480F45F0                                cmovne %rax, %rsi
+4C0F45CA                                cmovne %rdx, %r9
+480F45C2                                cmovne %rdx, %rax
+0F49D0                                  cmovns %eax, %edx
+0F49CE                                  cmovns %esi, %ecx
+410F49D8                                cmovns %r8d, %ebx
+480F49D0                                cmovns %rax, %rdx
+0F48C1                                  cmovs %ecx, %eax
+0F48D9                                  cmovs %ecx, %ebx
+0F48F9                                  cmovs %ecx, %edi
+0F48D1                                  cmovs %ecx, %edx
+440F48D9                                cmovs %ecx, %r11d
+440F48C1                                cmovs %ecx, %r8d
+440F48C9                                cmovs %ecx, %r9d
+410F48C2                                cmovs %r10d, %eax
+410F48DA                                cmovs %r10d, %ebx
+410F48CA                                cmovs %r10d, %ecx
+410F48FA                                cmovs %r10d, %edi
+410F48D2                                cmovs %r10d, %edx
+410F48F2                                cmovs %r10d, %esi
+450F48C2                                cmovs %r10d, %r8d
+807C246700                              cmpb $0, 103(%rsp)
+807C246B00                              cmpb $0, 107(%rsp)
+807C247300                              cmpb $0, 115(%rsp)
+807C240B00                              cmpb $0, 11(%rsp)
+807C247B00                              cmpb $0, 123(%rsp)
+80BC248700000000                        cmpb $0, 135(%rsp)
+807C240D00                              cmpb $0, 13(%rsp)
+807C240E00                              cmpb $0, 14(%rsp)
+80BC249700000000                        cmpb $0, 151(%rsp)
+807C240F00                              cmpb $0, 15(%rsp)
+807C021000                              cmpb $0, 16(%rdx,%rax)
+807C241000                              cmpb $0, 16(%rsp)
+80BC24AB00000000                        cmpb $0, 171(%rsp)
+807B1100                                cmpb $0, 17(%rbx)
+80BC24B700000000                        cmpb $0, 183(%rsp)
+807B1200                                cmpb $0, 18(%rbx)
+807C241600                              cmpb $0, 22(%rsp)
+80BC24EF00000000                        cmpb $0, 239(%rsp)
+807C241700                              cmpb $0, 23(%rsp)
+80BC240E01000000                        cmpb $0, 270(%rsp)
+807C241B00                              cmpb $0, 27(%rsp)
+80BC241B01000000                        cmpb $0, 283(%rsp)
+807C241F00                              cmpb $0, 31(%rsp)
+80BC244B01000000                        cmpb $0, 331(%rsp)
+80BC247701000000                        cmpb $0, 375(%rsp)
+807C242500                              cmpb $0, 37(%rsp)
+807C242600                              cmpb $0, 38(%rsp)
+807C242700                              cmpb $0, 39(%rsp)
+807C242B00                              cmpb $0, 43(%rsp)
+41807C242C00                            cmpb $0, 44(%r12)
+41807D2C00                              cmpb $0, 44(%r13)
+80782C00                                cmpb $0, 44(%rax)
+807C242E00                              cmpb $0, 46(%rsp)
+807C242F00                              cmpb $0, 47(%rsp)
+807C243000                              cmpb $0, 48(%rsp)
+80780400                                cmpb $0, 4(%rax)
+807C080400                              cmpb $0, 4(%rax,%rcx)
+807C243700                              cmpb $0, 55(%rsp)
+80780500                                cmpb $0, 5(%rax)
+807C243C00                              cmpb $0, 60(%rsp)
+807C240600                              cmpb $0, 6(%rsp)
+80780800                                cmpb $0, 8(%rax)
+807F0800                                cmpb $0, 8(%rdi)
+803D03B3060000                          cmpb $0, 439043(%rip)
+803D0900000000                          cmpb $0, 9(%rip)
+42803C2000                              cmpb $0, (%rax,%r12)
+42803C2800                              cmpb $0, (%rax,%r13)
+803C3800                                cmpb $0, (%rax,%rdi)
+803C0300                                cmpb $0, (%rbx,%rax)
+42803C1100                              cmpb $0, (%rcx,%r10)
+42803C0900                              cmpb $0, (%rcx,%r9)
+803C3900                                cmpb $0, (%rcx,%rdi)
+803F00                                  cmpb $0, (%rdi)
+803C0700                                cmpb $0, (%rdi,%rax)
+803C0200                                cmpb $0, (%rdx,%rax)
+803E00                                  cmpb $0, (%rsi)
+41807C240164                            cmpb $100, 1(%r12)
+41807C240165                            cmpb $101, 1(%r12)
+41807C2401F6                            cmpb $-10, 1(%r12)
+41807C240166                            cmpb $102, 1(%r12)
+3C66                                    cmpb $102, %al
+41807C240167                            cmpb $103, 1(%r12)
+41807C240168                            cmpb $104, 1(%r12)
+41807C240169                            cmpb $105, 1(%r12)
+41807C24016A                            cmpb $106, 1(%r12)
+41807C24016B                            cmpb $107, 1(%r12)
+41807C24016C                            cmpb $108, 1(%r12)
+42803C216C                              cmpb $108, (%rcx,%r12)
+41807C24016D                            cmpb $109, 1(%r12)
+4080FF0A                                cmpb $10, %dil
+41807C24016E                            cmpb $110, 1(%r12)
+807C246B01                              cmpb $1, 107(%rsp)
+807C246F01                              cmpb $1, 111(%rsp)
+41807C24026F                            cmpb $111, 2(%r12)
+807C247001                              cmpb $1, 112(%rsp)
+80F96F                                  cmpb $111, %cl
+807C240B01                              cmpb $1, 11(%rsp)
+41807C240170                            cmpb $112, 1(%r12)
+41807C240270                            cmpb $112, 2(%r12)
+41807C240171                            cmpb $113, 1(%r12)
+80BC248A00000001                        cmpb $1, 138(%rsp)
+80FB71                                  cmpb $113, %bl
+80F98F                                  cmpb $-113, %cl
+41807C240172                            cmpb $114, 1(%r12)
+80FB72                                  cmpb $114, %bl
+41807C240173                            cmpb $115, 1(%r12)
+80FB73                                  cmpb $115, %bl
+807C240F01                              cmpb $1, 15(%rsp)
+41807C240174                            cmpb $116, 1(%r12)
+807C241001                              cmpb $1, 16(%rsp)
+41807C240175                            cmpb $117, 1(%r12)
+41807C240176                            cmpb $118, 1(%r12)
+80F90B                                  cmpb $11, %cl
+41807C2401F4                            cmpb $-12, 1(%r12)
+807C241601                              cmpb $1, 22(%rsp)
+80780183                                cmpb $-125, 1(%rax)
+41807C24017E                            cmpb $126, 1(%r12)
+41807C24027E                            cmpb $126, 2(%r12)
+41807C24017F                            cmpb $127, 1(%r12)
+41807C24027F                            cmpb $127, 2(%r12)
+807C241B01                              cmpb $1, 27(%rsp)
+3C80                                    cmpb $-128, %al
+80780E0D                                cmpb $13, 14(%rax)
+41807C2401F3                            cmpb $-13, 1(%r12)
+807C242601                              cmpb $1, 38(%rsp)
+80FBF3                                  cmpb $-13, %bl
+41807C2401F2                            cmpb $-14, 1(%r12)
+807C242A01                              cmpb $1, 42(%rsp)
+807C242F01                              cmpb $1, 47(%rsp)
+807C243001                              cmpb $1, 48(%rsp)
+80FBF2                                  cmpb $-14, %bl
+807C246A0F                              cmpb $15, 106(%rsp)
+80BC248B0000000F                        cmpb $15, 139(%rsp)
+807C24130F                              cmpb $15, 19(%rsp)
+41807C2401F1                            cmpb $-15, 1(%r12)
+41807C24010F                            cmpb $15, 1(%r12)
+807C24190F                              cmpb $15, 25(%rsp)
+807C241A0F                              cmpb $15, 26(%rsp)
+807C241B0F                              cmpb $15, 27(%rsp)
+807C243D0F                              cmpb $15, 61(%rsp)
+807C243E0F                              cmpb $15, 62(%rsp)
+807C243F0F                              cmpb $15, 63(%rsp)
+807C244F0F                              cmpb $15, 79(%rsp)
+3C0F                                    cmpb $15, %al
+80FB0F                                  cmpb $15, %bl
+41803C240F                              cmpb $15, (%r12)
+4180FC0F                                cmpb $15, %r12b
+4180FF0F                                cmpb $15, %r15b
+41807C240210                            cmpb $16, 2(%r12)
+807C244301                              cmpb $1, 67(%rsp)
+3CF0                                    cmpb $-16, %al
+3C10                                    cmpb $16, %al
+80F910                                  cmpb $16, %cl
+8038F0                                  cmpb $-16, (%rax)
+41807C2401EF                            cmpb $-17, 1(%r12)
+41807C240211                            cmpb $17, 2(%r12)
+41807C2401EE                            cmpb $-18, 1(%r12)
+41807C240112                            cmpb $18, 1(%r12)
+807F0801                                cmpb $1, 8(%rdi)
+80781113                                cmpb $19, 17(%rax)
+41807C2401ED                            cmpb $-19, 1(%r12)
+41807C240113                            cmpb $19, 1(%r12)
+3C01                                    cmpb $1, %al
+80FB01                                  cmpb $1, %bl
+80F901                                  cmpb $1, %cl
+4080FF01                                cmpb $1, %dil
+80FA01                                  cmpb $1, %dl
+4180FC01                                cmpb $1, %r12b
+4180FE01                                cmpb $1, %r14b
+4180F801                                cmpb $1, %r8b
+4080FE01                                cmpb $1, %sil
+41807C2401EC                            cmpb $-20, 1(%r12)
+41807C2401EB                            cmpb $-21, 1(%r12)
+41807C240115                            cmpb $21, 1(%r12)
+80FA15                                  cmpb $21, %dl
+41807C2401FE                            cmpb $-2, 1(%r12)
+41807C2401EA                            cmpb $-22, 1(%r12)
+41807C240116                            cmpb $22, 1(%r12)
+41807C2401E9                            cmpb $-23, 1(%r12)
+41807C240117                            cmpb $23, 1(%r12)
+41807C2401E8                            cmpb $-24, 1(%r12)
+41807C240118                            cmpb $24, 1(%r12)
+41807C2401E7                            cmpb $-25, 1(%r12)
+3C19                                    cmpb $25, %al
+41807C2401E6                            cmpb $-26, 1(%r12)
+41807C2402E6                            cmpb $-26, 2(%r12)
+41807C2401E5                            cmpb $-27, 1(%r12)
+8078081B                                cmpb $27, 8(%rax)
+41807C2401E4                            cmpb $-28, 1(%r12)
+41807C2401E3                            cmpb $-29, 1(%r12)
+8078021D                                cmpb $29, 2(%rax)
+80FBE3                                  cmpb $-29, %bl
+80F902                                  cmpb $2, %cl
+41807C2401E2                            cmpb $-30, 1(%r12)
+41807C2401E1                            cmpb $-31, 1(%r12)
+41807C2401FD                            cmpb $-3, 1(%r12)
+41807C2401E0                            cmpb $-32, 1(%r12)
+41807C2401DF                            cmpb $-33, 1(%r12)
+807C242603                              cmpb $3, 38(%rsp)
+4180FCDF                                cmpb $-33, %r12b
+41807C2401DE                            cmpb $-34, 1(%r12)
+4180FCDE                                cmpb $-34, %r12b
+41807C2401DD                            cmpb $-35, 1(%r12)
+4180FCDD                                cmpb $-35, %r12b
+80780503                                cmpb $3, 5(%rax)
+41807C2401DC                            cmpb $-36, 1(%r12)
+80780324                                cmpb $36, 3(%rax)
+4180FCDC                                cmpb $-36, %r12b
+41807C2401DB                            cmpb $-37, 1(%r12)
+3C25                                    cmpb $37, %al
+4180FCDB                                cmpb $-37, %r12b
+41807C2401DA                            cmpb $-38, 1(%r12)
+4180FCDA                                cmpb $-38, %r12b
+41807C2401D9                            cmpb $-39, 1(%r12)
+4180FCD9                                cmpb $-39, %r12b
+4080FD03                                cmpb $3, %bpl
+4180FE03                                cmpb $3, %r14b
+4080FE03                                cmpb $3, %sil
+41807C2401D8                            cmpb $-40, 1(%r12)
+3C28                                    cmpb $40, %al
+80F928                                  cmpb $40, %cl
+4180FCD8                                cmpb $-40, %r12b
+41807C2401D7                            cmpb $-41, 1(%r12)
+41807C240129                            cmpb $41, 1(%r12)
+41807C2401FC                            cmpb $-4, 1(%r12)
+41807C2401D6                            cmpb $-42, 1(%r12)
+41807C24012A                            cmpb $42, 1(%r12)
+41807C2402D6                            cmpb $-42, 2(%r12)
+41807C24022A                            cmpb $42, 2(%r12)
+41807C2401D5                            cmpb $-43, 1(%r12)
+41807C24012B                            cmpb $43, 1(%r12)
+41807C2401D4                            cmpb $-44, 1(%r12)
+41807C24012C                            cmpb $44, 1(%r12)
+41807C24022C                            cmpb $44, 2(%r12)
+42803C212C                              cmpb $44, (%rcx,%r12)
+41807C2401D3                            cmpb $-45, 1(%r12)
+42803C212D                              cmpb $45, (%rcx,%r12)
+41807C2401D2                            cmpb $-46, 1(%r12)
+3C2E                                    cmpb $46, %al
+41807C2401D1                            cmpb $-47, 1(%r12)
+42803C2130                              cmpb $48, (%rcx,%r12)
+4080FD04                                cmpb $4, %bpl
+80F904                                  cmpb $4, %cl
+80FA04                                  cmpb $4, %dl
+4180FD04                                cmpb $4, %r13b
+80780B05                                cmpb $5, 11(%rax)
+41807C2401FB                            cmpb $-5, 1(%r12)
+80780AC8                                cmpb $-56, 10(%rax)
+807807C8                                cmpb $-56, 7(%rax)
+41807C2401C6                            cmpb $-58, 1(%r12)
+41807C2401C5                            cmpb $-59, 1(%r12)
+3C05                                    cmpb $5, %al
+4180FD05                                cmpb $5, %r13b
+4180FF05                                cmpb $5, %r15b
+41807C2401C4                            cmpb $-60, 1(%r12)
+41807C2401C3                            cmpb $-61, 1(%r12)
+41807C2401FA                            cmpb $-6, 1(%r12)
+41807C2401C2                            cmpb $-62, 1(%r12)
+41807C2402C2                            cmpb $-62, 2(%r12)
+80780CC1                                cmpb $-63, 12(%rax)
+80780FC1                                cmpb $-63, 15(%rax)
+807803C1                                cmpb $-63, 3(%rax)
+807806C1                                cmpb $-63, 6(%rax)
+807809C1                                cmpb $-63, 9(%rax)
+8038C1                                  cmpb $-63, (%rax)
+80780DC0                                cmpb $-64, 13(%rax)
+807810C0                                cmpb $-64, 16(%rax)
+807801C0                                cmpb $-64, 1(%rax)
+807804C0                                cmpb $-64, 4(%rax)
+3CBF                                    cmpb $-65, %al
+80780244                                cmpb $68, 2(%rax)
+80F906                                  cmpb $6, %cl
+41807C2401F9                            cmpb $-7, 1(%r12)
+41807C240150                            cmpb $80, 1(%r12)
+41807C240151                            cmpb $81, 1(%r12)
+41807C240251                            cmpb $81, 2(%r12)
+41807C2401F8                            cmpb $-8, 1(%r12)
+41807C2401AE                            cmpb $-82, 1(%r12)
+41807C240152                            cmpb $82, 1(%r12)
+41807C240252                            cmpb $82, 2(%r12)
+4180FDAE                                cmpb $-82, %r13b
+41807C240153                            cmpb $83, 1(%r12)
+41807C240253                            cmpb $83, 2(%r12)
+42803C2153                              cmpb $83, (%rcx,%r12)
+41807C240154                            cmpb $84, 1(%r12)
+41807C240155                            cmpb $85, 1(%r12)
+41807C240156                            cmpb $86, 1(%r12)
+4180FDAA                                cmpb $-86, %r13b
+41807C240157                            cmpb $87, 1(%r12)
+3C57                                    cmpb $87, %al
+41807C240158                            cmpb $88, 1(%r12)
+41807C240258                            cmpb $88, 2(%r12)
+41807C240159                            cmpb $89, 1(%r12)
+41807C240259                            cmpb $89, 2(%r12)
+41807C24015A                            cmpb $90, 1(%r12)
+41807C24025A                            cmpb $90, 2(%r12)
+4180FDA6                                cmpb $-90, %r13b
+41807C24015B                            cmpb $91, 1(%r12)
+41807C24025B                            cmpb $91, 2(%r12)
+41807C24015C                            cmpb $92, 1(%r12)
+41807C24025C                            cmpb $92, 2(%r12)
+4180FDA4                                cmpb $-92, %r13b
+41807C24015D                            cmpb $93, 1(%r12)
+41807C24025D                            cmpb $93, 2(%r12)
+41807C24015E                            cmpb $94, 1(%r12)
+41807C24025E                            cmpb $94, 2(%r12)
+41807C24015F                            cmpb $95, 1(%r12)
+41807C24025F                            cmpb $95, 2(%r12)
+3C5F                                    cmpb $95, %al
+41807C240160                            cmpb $96, 1(%r12)
+41807C240161                            cmpb $97, 1(%r12)
+41807C240162                            cmpb $98, 1(%r12)
+41807C240163                            cmpb $99, 1(%r12)
+3C09                                    cmpb $9, %al
+80F909                                  cmpb $9, %cl
+384708                                  cmpb %al, 8(%rdi)
+384208                                  cmpb %al, 8(%rdx)
+38C3                                    cmpb %al, %bl
+38C2                                    cmpb %al, %dl
+38DA                                    cmpb %bl, %dl
+4138CA                                  cmpb %cl, %r10b
+4038CE                                  cmpb %cl, %sil
+4038FE                                  cmpb %dil, %sil
+38D0                                    cmpb %dl, %al
+38D3                                    cmpb %dl, %bl
+38D1                                    cmpb %dl, %cl
+4038D7                                  cmpb %dl, %dil
+4138D3                                  cmpb %dl, %r11b
+4138D0                                  cmpb %dl, %r8b
+4138D1                                  cmpb %dl, %r9b
+4538D8                                  cmpb %r11b, %r8b
+4438BC248B000000                        cmpb %r15b, 139(%rsp)
+44387C244F                              cmpb %r15b, 79(%rsp)
+83780800                                cmpl $0, 8(%rax)
+3D40420F00                              cmpl $1000000, %eax
+81780C10270000                          cmpl $10000, 12(%rax)
+81FB10270000                            cmpl $10000, %ebx
+813F10270000                            cmpl $10000, (%rdi)
+3DE8030000                              cmpl $1000, %eax
+83F864                                  cmpl $100, %eax
+83FA64                                  cmpl $100, %edx
+83F865                                  cmpl $101, %eax
+83FA65                                  cmpl $101, %edx
+83F866                                  cmpl $102, %eax
+83F867                                  cmpl $103, %eax
+81FE00000040                            cmpl $1073741824, %esi
+3B44246C                                cmpl 108(%rsp), %eax
+3B5C246C                                cmpl 108(%rsp), %ebx
+443B64246C                              cmpl 108(%rsp), %r12d
+83F80A                                  cmpl $10, %eax
+4183FD0A                                cmpl $10, %r13d
+4183F90A                                cmpl $10, %r9d
+837C246C01                              cmpl $1, 108(%rsp)
+83F80B                                  cmpl $11, %eax
+3B442478                                cmpl 120(%rsp), %eax
+3B6C2478                                cmpl 120(%rsp), %ebp
+3B5C2478                                cmpl 120(%rsp), %ebx
+3B4C2478                                cmpl 120(%rsp), %ecx
+443B642478                              cmpl 120(%rsp), %r12d
+443B4C2478                              cmpl 120(%rsp), %r9d
+83F87C                                  cmpl $124, %eax
+4183FC7C                                cmpl $124, %r12d
+837818FF                                cmpl $-1, 24(%rax)
+837C241801                              cmpl $1, 24(%rsp)
+3B44247C                                cmpl 124(%rsp), %eax
+3D80000000                              cmpl $128, %eax
+81FB80000000                            cmpl $128, %ebx
+81F980000000                            cmpl $128, %ecx
+81FF80000000                            cmpl $128, %edi
+81FA80000000                            cmpl $128, %edx
+81FE80000000                            cmpl $128, %esi
+4181F880000000                          cmpl $128, %r8d
+837C241C01                              cmpl $1, 28(%rsp)
+3D7FFFFFFF                              cmpl $-129, %eax
+81FB7FFFFFFF                            cmpl $-129, %ebx
+81F97FFFFFFF                            cmpl $-129, %ecx
+81FF7FFFFFFF                            cmpl $-129, %edi
+81FA7FFFFFFF                            cmpl $-129, %edx
+81FE7FFFFFFF                            cmpl $-129, %esi
+4181F87FFFFFFF                          cmpl $-129, %r8d
+83F80C                                  cmpl $12, %eax
+453B7D0C                                cmpl 12(%r13), %r15d
+413B460C                                cmpl 12(%r14), %eax
+453B660C                                cmpl 12(%r14), %r12d
+453B6E0C                                cmpl 12(%r14), %r13d
+3B500C                                  cmpl 12(%rax), %edx
+3B730C                                  cmpl 12(%rbx), %esi
+443B6B0C                                cmpl 12(%rbx), %r13d
+3B590C                                  cmpl 12(%rcx), %ebx
+3B470C                                  cmpl 12(%rdi), %eax
+3B770C                                  cmpl 12(%rdi), %esi
+3B420C                                  cmpl 12(%rdx), %eax
+3B5A0C                                  cmpl 12(%rdx), %ebx
+3B7E0C                                  cmpl 12(%rsi), %edi
+3B44240C                                cmpl 12(%rsp), %eax
+443B7C240C                              cmpl 12(%rsp), %r15d
+837C242401                              cmpl $1, 36(%rsp)
+3D8A000000                              cmpl $138, %eax
+4181FD8A000000                          cmpl $138, %r13d
+83FF0D                                  cmpl $13, %edi
+83FE0D                                  cmpl $13, %esi
+833F0D                                  cmpl $13, (%rdi)
+81FB90000000                            cmpl $144, %ebx
+83F80E                                  cmpl $14, %eax
+83FD0E                                  cmpl $14, %ebp
+83FF0E                                  cmpl $14, %edi
+83BC241002000001                        cmpl $1, 528(%rsp)
+3D9E000000                              cmpl $158, %eax
+81FB9F000000                            cmpl $159, %ebx
+83F80F                                  cmpl $15, %eax
+83FD0F                                  cmpl $15, %ebp
+83FF0F                                  cmpl $15, %edi
+4183FE0F                                cmpl $15, %r14d
+83BC8CB002000001                        cmpl $1, 688(%rsp,%rcx,4)
+83BCB4B002000001                        cmpl $1, 688(%rsp,%rsi,4)
+83F810                                  cmpl $16, %eax
+83FF10                                  cmpl $16, %edi
+83FA10                                  cmpl $16, %edx
+4183FF10                                cmpl $16, %r15d
+443B7510                                cmpl 16(%rbp), %r14d
+3B442410                                cmpl 16(%rsp), %eax
+3B542410                                cmpl 16(%rsp), %edx
+443B742410                              cmpl 16(%rsp), %r14d
+3DAE000000                              cmpl $174, %eax
+3DAF000000                              cmpl $175, %eax
+81FBAF000000                            cmpl $175, %ebx
+83F811                                  cmpl $17, %eax
+81FBB6000000                            cmpl $182, %ebx
+81FBB7000000                            cmpl $183, %ebx
+837C245801                              cmpl $1, 88(%rsp)
+83F812                                  cmpl $18, %eax
+83F912                                  cmpl $18, %ecx
+837808FF                                cmpl $-1, 8(%rax)
+837C030801                              cmpl $1, 8(%rbx,%rax)
+837C110801                              cmpl $1, 8(%rcx,%rdx)
+837C070801                              cmpl $1, 8(%rdi,%rax)
+837C020801                              cmpl $1, 8(%rdx,%rax)
+837C820801                              cmpl $1, 8(%rdx,%rax,4)
+837C060801                              cmpl $1, 8(%rsi,%rax)
+837C160801                              cmpl $1, 8(%rsi,%rdx)
+837C240801                              cmpl $1, 8(%rsp)
+81FFC0000000                            cmpl $192, %edi
+83F813                                  cmpl $19, %eax
+83F8FF                                  cmpl $-1, %eax
+83F801                                  cmpl $1, %eax
+83FD01                                  cmpl $1, %ebp
+83FB01                                  cmpl $1, %ebx
+83F9FF                                  cmpl $-1, %ecx
+83F901                                  cmpl $1, %ecx
+83FFFF                                  cmpl $-1, %edi
+83FF01                                  cmpl $1, %edi
+83FAFF                                  cmpl $-1, %edx
+83FA01                                  cmpl $1, %edx
+83FE01                                  cmpl $1, %esi
+41833C2401                              cmpl $1, (%r12)
+4183FC01                                cmpl $1, %r12d
+41837D0001                              cmpl $1, (%r13)
+4183FD01                                cmpl $1, %r13d
+41833EFF                                cmpl $-1, (%r14)
+4183FE01                                cmpl $1, %r14d
+41833C8601                              cmpl $1, (%r14,%rax,4)
+41833FFF                                cmpl $-1, (%r15)
+4183F8FF                                cmpl $-1, %r8d
+4183F901                                cmpl $1, %r9d
+8338FF                                  cmpl $-1, (%rax)
+833801                                  cmpl $1, (%rax)
+837D00FF                                cmpl $-1, (%rbp)
+833B01                                  cmpl $1, (%rbx)
+833F01                                  cmpl $1, (%rdi)
+3DCA000000                              cmpl $202, %eax
+4181FDCA000000                          cmpl $202, %r13d
+3DCD000000                              cmpl $205, %eax
+3B5814                                  cmpl 20(%rax), %ebx
+443B6514                                cmpl 20(%rbp), %r12d
+443B742414                              cmpl 20(%rsp), %r14d
+83BC240004000002                        cmpl $2, 1024(%rsp)
+837C246802                              cmpl $2, 104(%rsp)
+837C246C02                              cmpl $2, 108(%rsp)
+81FF00000080                            cmpl $-2147483648, %edi
+81FA00000080                            cmpl $-2147483648, %edx
+81FE00000080                            cmpl $-2147483648, %esi
+3DEA000000                              cmpl $234, %eax
+4181FDEA000000                          cmpl $234, %r13d
+837C242402                              cmpl $2, 36(%rsp)
+83F817                                  cmpl $23, %eax
+3DF0000000                              cmpl $240, %eax
+3DF2000000                              cmpl $242, %eax
+3DF3000000                              cmpl $243, %eax
+3B8424F8000000                          cmpl 248(%rsp), %eax
+443BA424F8000000                        cmpl 248(%rsp), %r12d
+83F818                                  cmpl $24, %eax
+3DFF000000                              cmpl $255, %eax
+81FFFF000000                            cmpl $255, %edi
+4181FEFF000000                          cmpl $255, %r14d
+3D00010000                              cmpl $256, %eax
+81FB00010000                            cmpl $256, %ebx
+81F900010000                            cmpl $256, %ecx
+81FF00010000                            cmpl $256, %edi
+81FA00010000                            cmpl $256, %edx
+81FE00010000                            cmpl $256, %esi
+4181F800010000                          cmpl $256, %r8d
+3D0A010000                              cmpl $266, %eax
+4181FD0A010000                          cmpl $266, %r13d
+83F81A                                  cmpl $26, %eax
+4183FC1A                                cmpl $26, %r12d
+83F81B                                  cmpl $27, %eax
+3D1C010000                              cmpl $284, %eax
+4181FC1C010000                          cmpl $284, %r12d
+3D1D010000                              cmpl $285, %eax
+83F81C                                  cmpl $28, %eax
+4183FC1C                                cmpl $28, %r12d
+413B6C241C                              cmpl 28(%r12), %ebp
+413B4C241C                              cmpl 28(%r12), %ecx
+413B4E1C                                cmpl 28(%r14), %ecx
+837C380802                              cmpl $2, 8(%rax,%rdi)
+837C030802                              cmpl $2, 8(%rbx,%rax)
+42837C290802                            cmpl $2, 8(%rcx,%r13)
+837C170802                              cmpl $2, 8(%rdi,%rdx)
+837C020802                              cmpl $2, 8(%rdx,%rax)
+837C160802                              cmpl $2, 8(%rsi,%rdx)
+3B44241C                                cmpl 28(%rsp), %eax
+83F802                                  cmpl $2, %eax
+83FD02                                  cmpl $2, %ebp
+83FBFE                                  cmpl $-2, %ebx
+83FB02                                  cmpl $2, %ebx
+83F902                                  cmpl $2, %ecx
+83FF02                                  cmpl $2, %edi
+83FA02                                  cmpl $2, %edx
+83FE02                                  cmpl $2, %esi
+41833C2402                              cmpl $2, (%r12)
+4183FC02                                cmpl $2, %r12d
+4183FDFE                                cmpl $-2, %r13d
+4183FD02                                cmpl $2, %r13d
+4183FE02                                cmpl $2, %r14d
+41833C86FE                              cmpl $-2, (%r14,%rax,4)
+41833C9EFE                              cmpl $-2, (%r14,%rbx,4)
+4183F902                                cmpl $2, %r9d
+833802                                  cmpl $2, (%rax)
+837D0002                                cmpl $2, (%rbp)
+833B02                                  cmpl $2, (%rbx)
+833F02                                  cmpl $2, (%rdi)
+833C9FFE                                cmpl $-2, (%rdi,%rbx,4)
+833A02                                  cmpl $2, (%rdx)
+833C9EFE                                cmpl $-2, (%rsi,%rbx,4)
+83BC240004000003                        cmpl $3, 1024(%rsp)
+3D3C010000                              cmpl $316, %eax
+4181FC3C010000                          cmpl $316, %r12d
+83F81F                                  cmpl $31, %eax
+83FB1F                                  cmpl $31, %ebx
+83F91F                                  cmpl $31, %ecx
+83FF1F                                  cmpl $31, %edi
+83FE1F                                  cmpl $31, %esi
+3D00800000                              cmpl $32768, %eax
+81FF00800000                            cmpl $32768, %edi
+81FA00800000                            cmpl $32768, %edx
+81FE00800000                            cmpl $32768, %esi
+4181F800800000                          cmpl $32768, %r8d
+3DFF7FFFFF                              cmpl $-32769, %eax
+81FFFF7FFFFF                            cmpl $-32769, %edi
+81FAFF7FFFFF                            cmpl $-32769, %edx
+81FEFF7FFFFF                            cmpl $-32769, %esi
+4181F8FF7FFFFF                          cmpl $-32769, %r8d
+83F820                                  cmpl $32, %eax
+4183FF20                                cmpl $32, %r15d
+443B6520                                cmpl 32(%rbp), %r12d
+443B6320                                cmpl 32(%rbx), %r12d
+837C242403                              cmpl $3, 36(%rsp)
+83F825                                  cmpl $37, %eax
+83F826                                  cmpl $38, %eax
+83FD26                                  cmpl $38, %ebp
+83FA26                                  cmpl $38, %edx
+83F803                                  cmpl $3, %eax
+83FB03                                  cmpl $3, %ebx
+83F903                                  cmpl $3, %ecx
+83FF03                                  cmpl $3, %edi
+83FE03                                  cmpl $3, %esi
+4183F903                                cmpl $3, %r9d
+833803                                  cmpl $3, (%rax)
+833903                                  cmpl $3, (%rcx)
+833F03                                  cmpl $3, (%rdi)
+833A03                                  cmpl $3, (%rdx)
+3D90010000                              cmpl $400, %eax
+3B5C2428                                cmpl 40(%rsp), %ebx
+83BC240004000004                        cmpl $4, 1024(%rsp)
+837C246804                              cmpl $4, 104(%rsp)
+3D9C010000                              cmpl $412, %eax
+4181FC9C010000                          cmpl $412, %r12d
+83F829                                  cmpl $41, %eax
+83F82B                                  cmpl $43, %eax
+837C242804                              cmpl $4, 40(%rsp)
+3DBC010000                              cmpl $444, %eax
+4181FCBC010000                          cmpl $444, %r12d
+837C243004                              cmpl $4, 48(%rsp)
+83F82E                                  cmpl $46, %eax
+3DDA010000                              cmpl $474, %eax
+4181FCDA010000                          cmpl $474, %r12d
+83F830                                  cmpl $48, %eax
+413B6C2430                              cmpl 48(%r12), %ebp
+3DF3010000                              cmpl $499, %eax
+83F804                                  cmpl $4, %eax
+83FD04                                  cmpl $4, %ebp
+83FB04                                  cmpl $4, %ebx
+83FF04                                  cmpl $4, %edi
+83FA04                                  cmpl $4, %edx
+83FE04                                  cmpl $4, %esi
+4183FD04                                cmpl $4, %r13d
+4183FE04                                cmpl $4, %r14d
+4183FF04                                cmpl $4, %r15d
+4183F804                                cmpl $4, %r8d
+837D0004                                cmpl $4, (%rbp)
+3B4104                                  cmpl 4(%rcx), %eax
+833F04                                  cmpl $4, (%rdi)
+3B4204                                  cmpl 4(%rdx), %eax
+443B6C2404                              cmpl 4(%rsp), %r13d
+3DF4010000                              cmpl $500, %eax
+8138E00EA0E1                            cmpl $-509604128, (%rax)
+817808E00DA0E1                          cmpl $-509604384, 8(%rax)
+817814E009A0E1                          cmpl $-509605408, 20(%rax)
+817810E006A0E1                          cmpl $-509606176, 16(%rax)
+81780CE002A0E1                          cmpl $-509607200, 12(%rax)
+817804E001A0E1                          cmpl $-509607456, 4(%rax)
+83F832                                  cmpl $50, %eax
+3D10020000                              cmpl $528, %eax
+81FA10020000                            cmpl $528, %edx
+83BC247001000005                        cmpl $5, 368(%rsp)
+83F836                                  cmpl $54, %eax
+83F837                                  cmpl $55, %eax
+83F805                                  cmpl $5, %eax
+83FD05                                  cmpl $5, %ebp
+83FB05                                  cmpl $5, %ebx
+83FF05                                  cmpl $5, %edi
+83FA05                                  cmpl $5, %edx
+41833C2405                              cmpl $5, (%r12)
+837D0005                                cmpl $5, (%rbp)
+833F05                                  cmpl $5, (%rdi)
+83F83C                                  cmpl $60, %eax
+4183FC3C                                cmpl $60, %r12d
+443B74243C                              cmpl 60(%rsp), %r14d
+83F83E                                  cmpl $62, %eax
+83FA3E                                  cmpl $62, %edx
+83F83F                                  cmpl $63, %eax
+83FB40                                  cmpl $64, %ebx
+81FEFFFF0000                            cmpl $65535, %esi
+3D00000100                              cmpl $65536, %eax
+81FF00000100                            cmpl $65536, %edi
+81FA00000100                            cmpl $65536, %edx
+4181F800000100                          cmpl $65536, %r8d
+3BB484A0020000                          cmpl 672(%rsp,%rax,4), %esi
+837C245006                              cmpl $6, 80(%rsp)
+817C2410FF0F0100                        cmpl $69631, 16(%rsp)
+817C2414FF0F0100                        cmpl $69631, 20(%rsp)
+817D1000100100                          cmpl $69632, 16(%rbp)
+817F0400100100                          cmpl $69632, 4(%rdi)
+3D00100100                              cmpl $69632, %eax
+81FD00100100                            cmpl $69632, %ebp
+813C9000100100                          cmpl $69632, (%rax,%rdx,4)
+3D01100100                              cmpl $69633, %eax
+81FD01100100                            cmpl $69633, %ebp
+81FB01100100                            cmpl $69633, %ebx
+81F901100100                            cmpl $69633, %ecx
+81FF01100100                            cmpl $69633, %edi
+81FA01100100                            cmpl $69633, %edx
+4181FC01100100                          cmpl $69633, %r12d
+4181FF01100100                          cmpl $69633, %r15d
+3D02100100                              cmpl $69634, %eax
+81FD02100100                            cmpl $69634, %ebp
+81FB02100100                            cmpl $69634, %ebx
+81F902100100                            cmpl $69634, %ecx
+81FF02100100                            cmpl $69634, %edi
+81FA02100100                            cmpl $69634, %edx
+4181FC02100100                          cmpl $69634, %r12d
+4181FF02100100                          cmpl $69634, %r15d
+3D03100100                              cmpl $69635, %eax
+81FD03100100                            cmpl $69635, %ebp
+81FB03100100                            cmpl $69635, %ebx
+81F903100100                            cmpl $69635, %ecx
+81FF03100100                            cmpl $69635, %edi
+81FA03100100                            cmpl $69635, %edx
+4181FC03100100                          cmpl $69635, %r12d
+4181FF03100100                          cmpl $69635, %r15d
+3D04100100                              cmpl $69636, %eax
+81FD04100100                            cmpl $69636, %ebp
+81FB04100100                            cmpl $69636, %ebx
+81F904100100                            cmpl $69636, %ecx
+81FF04100100                            cmpl $69636, %edi
+81FA04100100                            cmpl $69636, %edx
+3D05100100                              cmpl $69637, %eax
+3D06100100                              cmpl $69638, %eax
+3D07100100                              cmpl $69639, %eax
+83F806                                  cmpl $6, %eax
+83FD06                                  cmpl $6, %ebp
+83FF06                                  cmpl $6, %edi
+83FA06                                  cmpl $6, %edx
+41833C2406                              cmpl $6, (%r12)
+41833E06                                cmpl $6, (%r14)
+837D0006                                cmpl $6, (%rbp)
+833F06                                  cmpl $6, (%rdi)
+813800200100                            cmpl $73728, (%rax)
+813F00200100                            cmpl $73728, (%rdi)
+813801200100                            cmpl $73729, (%rax)
+813A01200100                            cmpl $73729, (%rdx)
+813F03200100                            cmpl $73731, (%rdi)
+81F904200100                            cmpl $73732, %ecx
+81FA04200100                            cmpl $73732, %edx
+813806200100                            cmpl $73734, (%rax)
+4181FD00300100                          cmpl $77824, %r13d
+81FD01300100                            cmpl $77825, %ebp
+81FF01300100                            cmpl $77825, %edi
+81FE01300100                            cmpl $77825, %esi
+4181F801300100                          cmpl $77825, %r8d
+3D03300100                              cmpl $77827, %eax
+81F903300100                            cmpl $77827, %ecx
+81F904300100                            cmpl $77828, %ecx
+81FD05300100                            cmpl $77829, %ebp
+81FF05300100                            cmpl $77829, %edi
+81FE05300100                            cmpl $77829, %esi
+4181F805300100                          cmpl $77829, %r8d
+817F0807300100                          cmpl $77831, 8(%rdi)
+3D07300100                              cmpl $77831, %eax
+81FE09300100                            cmpl $77833, %esi
+81FF0D300100                            cmpl $77837, %edi
+81FE0D300100                            cmpl $77837, %esi
+4181FD0D300100                          cmpl $77837, %r13d
+81F90F300100                            cmpl $77839, %ecx
+81F910300100                            cmpl $77840, %ecx
+81FF11300100                            cmpl $77841, %edi
+81FE11300100                            cmpl $77841, %esi
+4181FD11300100                          cmpl $77841, %r13d
+81F913300100                            cmpl $77843, %ecx
+81FD15300100                            cmpl $77845, %ebp
+81FF15300100                            cmpl $77845, %edi
+81FE15300100                            cmpl $77845, %esi
+4181FD15300100                          cmpl $77845, %r13d
+81FE19300100                            cmpl $77849, %esi
+81F91B300100                            cmpl $77851, %ecx
+81FF1B300100                            cmpl $77851, %edi
+817C24141C300100                        cmpl $77852, 20(%rsp)
+81FD1C300100                            cmpl $77852, %ebp
+4181FC1C300100                          cmpl $77852, %r12d
+81FE1D300100                            cmpl $77853, %esi
+81F91F300100                            cmpl $77855, %ecx
+81FF1F300100                            cmpl $77855, %edi
+817C241420300100                        cmpl $77856, 20(%rsp)
+81FD20300100                            cmpl $77856, %ebp
+4181FC20300100                          cmpl $77856, %r12d
+81FE21300100                            cmpl $77857, %esi
+817C241423300100                        cmpl $77859, 20(%rsp)
+81FF23300100                            cmpl $77859, %edi
+817C241424300100                        cmpl $77860, 20(%rsp)
+81FE25300100                            cmpl $77861, %esi
+81FE29300100                            cmpl $77865, %esi
+81FE2D300100                            cmpl $77869, %esi
+81FD9A300100                            cmpl $77978, %ebp
+4181FC9A300100                          cmpl $77978, %r12d
+81FD9B300100                            cmpl $77979, %ebp
+4181FC9B300100                          cmpl $77979, %r12d
+81FD9D300100                            cmpl $77981, %ebp
+4181FC9D300100                          cmpl $77981, %r12d
+81FD9E300100                            cmpl $77982, %ebp
+4181FC9E300100                          cmpl $77982, %r12d
+81FD9F300100                            cmpl $77983, %ebp
+4181FC9F300100                          cmpl $77983, %r12d
+83F84F                                  cmpl $79, %eax
+83FB4F                                  cmpl $79, %ebx
+83F807                                  cmpl $7, %eax
+83FD07                                  cmpl $7, %ebp
+83FB07                                  cmpl $7, %ebx
+83F907                                  cmpl $7, %ecx
+83FF07                                  cmpl $7, %edi
+83FA07                                  cmpl $7, %edx
+83FE07                                  cmpl $7, %esi
+4183FC07                                cmpl $7, %r12d
+833807                                  cmpl $7, (%rax)
+837D0007                                cmpl $7, (%rbp)
+833907                                  cmpl $7, (%rcx)
+833F07                                  cmpl $7, (%rdi)
+833A07                                  cmpl $7, (%rdx)
+833E07                                  cmpl $7, (%rsi)
+837C246808                              cmpl $8, 104(%rsp)
+41817E1000400100                        cmpl $81920, 16(%r14)
+817B2000400100                          cmpl $81920, 32(%rbx)
+81FF00004131                            cmpl $826343424, %edi
+837C242808                              cmpl $8, 40(%rsp)
+41817C241C00500100                      cmpl $86016, 28(%r12)
+41817D1C00500100                        cmpl $86016, 28(%r13)
+41817F1C00500100                        cmpl $86016, 28(%r15)
+817D1C00500100                          cmpl $86016, 28(%rbp)
+817F1C00500100                          cmpl $86016, 28(%rdi)
+81780400500100                          cmpl $86016, 4(%rax)
+81FF00500100                            cmpl $86016, %edi
+81FF01500100                            cmpl $86017, %edi
+81FF02500100                            cmpl $86018, %edi
+81FF03500100                            cmpl $86019, %edi
+83780808                                cmpl $8, 8(%rax)
+837C060808                              cmpl $8, 8(%rsi,%rax)
+837C240808                              cmpl $8, 8(%rsp)
+83F808                                  cmpl $8, %eax
+83FB08                                  cmpl $8, %ebx
+83FF08                                  cmpl $8, %edi
+83FA08                                  cmpl $8, %edx
+83FE08                                  cmpl $8, %esi
+4183FD08                                cmpl $8, %r13d
+413B7D08                                cmpl 8(%r13), %edi
+4183FE08                                cmpl $8, %r14d
+4183F808                                cmpl $8, %r8d
+443B7C2408                              cmpl 8(%rsp), %r15d
+3D9A030000                              cmpl $922, %eax
+4181FC9A030000                          cmpl $922, %r12d
+81FE00700100                            cmpl $94208, %esi
+81FE01700100                            cmpl $94209, %esi
+81FE02700100                            cmpl $94210, %esi
+83FB5E                                  cmpl $94, %ebx
+3DBA030000                              cmpl $954, %eax
+4181FCBA030000                          cmpl $954, %r12d
+83FB5F                                  cmpl $95, %ebx
+4183FC5F                                cmpl $95, %r12d
+83FB60                                  cmpl $96, %ebx
+81FADD030000                            cmpl $989, %edx
+3DE7030000                              cmpl $999, %eax
+83F809                                  cmpl $9, %eax
+4183FE09                                cmpl $9, %r14d
+833809                                  cmpl $9, (%rax)
+39442478                                cmpl %eax, 120(%rsp)
+3944247C                                cmpl %eax, 124(%rsp)
+3944160C                                cmpl %eax, 12(%rsi,%rdx)
+3984248C000000                          cmpl %eax, 140(%rsp)
+394510                                  cmpl %eax, 16(%rbp)
+39442410                                cmpl %eax, 16(%rsp)
+39442414                                cmpl %eax, 20(%rsp)
+39842420010000                          cmpl %eax, 288(%rsp)
+394520                                  cmpl %eax, 32(%rbp)
+41394604                                cmpl %eax, 4(%r14)
+394704                                  cmpl %eax, 4(%rdi)
+394508                                  cmpl %eax, 8(%rbp)
+394308                                  cmpl %eax, 8(%rbx)
+394708                                  cmpl %eax, 8(%rdi)
+394208                                  cmpl %eax, 8(%rdx)
+394608                                  cmpl %eax, 8(%rsi)
+39442408                                cmpl %eax, 8(%rsp)
+39C5                                    cmpl %eax, %ebp
+39C3                                    cmpl %eax, %ebx
+39C1                                    cmpl %eax, %ecx
+39C2                                    cmpl %eax, %edx
+39C6                                    cmpl %eax, %esi
+4139C4                                  cmpl %eax, %r12d
+4139C5                                  cmpl %eax, %r13d
+413906                                  cmpl %eax, (%r14)
+4139C7                                  cmpl %eax, %r15d
+4139C0                                  cmpl %eax, %r8d
+390493                                  cmpl %eax, (%rbx,%rdx,4)
+3907                                    cmpl %eax, (%rdi)
+396C020C                                cmpl %ebp, 12(%rdx,%rax)
+396C2404                                cmpl %ebp, 4(%rsp)
+396808                                  cmpl %ebp, 8(%rax)
+39E8                                    cmpl %ebp, %eax
+39EB                                    cmpl %ebp, %ebx
+39EA                                    cmpl %ebp, %edx
+4139EC                                  cmpl %ebp, %r12d
+4139ED                                  cmpl %ebp, %r13d
+392C81                                  cmpl %ebp, (%rcx,%rax,4)
+392C82                                  cmpl %ebp, (%rdx,%rax,4)
+395C2410                                cmpl %ebx, 16(%rsp)
+39D8                                    cmpl %ebx, %eax
+39DD                                    cmpl %ebx, %ebp
+4139DD                                  cmpl %ebx, %r13d
+4139DF                                  cmpl %ebx, %r15d
+39CB                                    cmpl %ecx, %ebx
+39CF                                    cmpl %ecx, %edi
+39CA                                    cmpl %ecx, %edx
+39CE                                    cmpl %ecx, %esi
+4139CD                                  cmpl %ecx, %r13d
+4139CE                                  cmpl %ecx, %r14d
+4139CF                                  cmpl %ecx, %r15d
+4139C8                                  cmpl %ecx, %r8d
+390B                                    cmpl %ecx, (%rbx)
+39F8                                    cmpl %edi, %eax
+39F9                                    cmpl %edi, %ecx
+39FA                                    cmpl %edi, %edx
+39FE                                    cmpl %edi, %esi
+3954240C                                cmpl %edx, 12(%rsp)
+39542410                                cmpl %edx, 16(%rsp)
+39948420010000                          cmpl %edx, 288(%rsp,%rax,4)
+39948424010000                          cmpl %edx, 292(%rsp,%rax,4)
+39542444                                cmpl %edx, 68(%rsp)
+39D0                                    cmpl %edx, %eax
+39D5                                    cmpl %edx, %ebp
+39D3                                    cmpl %edx, %ebx
+39D1                                    cmpl %edx, %ecx
+39D7                                    cmpl %edx, %edi
+39D6                                    cmpl %edx, %esi
+4139D4                                  cmpl %edx, %r12d
+4139D5                                  cmpl %edx, %r13d
+4139D6                                  cmpl %edx, %r14d
+4139D7                                  cmpl %edx, %r15d
+3913                                    cmpl %edx, (%rbx)
+391487                                  cmpl %edx, (%rdi,%rax,4)
+391486                                  cmpl %edx, (%rsi,%rax,4)
+3974030C                                cmpl %esi, 12(%rbx,%rax)
+39F0                                    cmpl %esi, %eax
+39F1                                    cmpl %esi, %ecx
+39F2                                    cmpl %esi, %edx
+3934D1                                  cmpl %esi, (%rcx,%rdx,8)
+393487                                  cmpl %esi, (%rdi,%rax,4)
+4439D1                                  cmpl %r10d, %ecx
+4539D4                                  cmpl %r10d, %r12d
+4539D0                                  cmpl %r10d, %r8d
+4539DC                                  cmpl %r11d, %r12d
+4539D9                                  cmpl %r11d, %r9d
+4439E0                                  cmpl %r12d, %eax
+4439E3                                  cmpl %r12d, %ebx
+4439AC84C0000000                        cmpl %r13d, 192(%rsp,%rax,4)
+4439ED                                  cmpl %r13d, %ebp
+4439742414                              cmpl %r14d, 20(%rsp)
+4439F5                                  cmpl %r14d, %ebp
+4439FD                                  cmpl %r15d, %ebp
+4439FB                                  cmpl %r15d, %ebx
+4439F9                                  cmpl %r15d, %ecx
+4539FC                                  cmpl %r15d, %r12d
+4539FE                                  cmpl %r15d, %r14d
+4539F8                                  cmpl %r15d, %r8d
+4439C1                                  cmpl %r8d, %ecx
+4439C2                                  cmpl %r8d, %edx
+4439C6                                  cmpl %r8d, %esi
+4439CE                                  cmpl %r9d, %esi
+3B10                                    cmpl (%rax), %edx
+3B02                                    cmpl (%rdx), %eax
+3B06                                    cmpl (%rsi), %eax
+3B16                                    cmpl (%rsi), %edx
+3B0510000000                            cmpl 16(%rip), %eax
+443B2D10000000                          cmpl 16(%rip), %r13d
+443B3D10000000                          cmpl 16(%rip), %r15d
+3B0D14000000                            cmpl 20(%rip), %ecx
+48837F1000                              cmpq $0, 16(%rdi)
+48837C241000                            cmpq $0, 16(%rsp)
+4883BC24C000000000                      cmpq $0, 192(%rsp)
+4883BC24C800000000                      cmpq $0, 200(%rsp)
+4883781800                              cmpq $0, 24(%rax)
+48837B1800                              cmpq $0, 24(%rbx)
+48837C241800                            cmpq $0, 24(%rsp)
+4883BC240801000000                      cmpq $0, 264(%rsp)
+4883BC241001000000                      cmpq $0, 272(%rsp)
+49837D2000                              cmpq $0, 32(%r13)
+48837D2000                              cmpq $0, 32(%rbp)
+4883BC243002000000                      cmpq $0, 560(%rsp)
+4883780800                              cmpq $0, 8(%rax)
+48837B0800                              cmpq $0, 8(%rbx)
+48837F0800                              cmpq $0, 8(%rdi)
+48837CDA0800                            cmpq $0, 8(%rdx,%rbx,8)
+49833CC400                              cmpq $0, (%r12,%rax,8)
+49837CC50000                            cmpq $0, (%r13,%rax,8)
+49833CC100                              cmpq $0, (%r9,%rax,8)
+48833800                                cmpq $0, (%rax)
+48833CD800                              cmpq $0, (%rax,%rbx,8)
+48833CC800                              cmpq $0, (%rax,%rcx,8)
+48833CD000                              cmpq $0, (%rax,%rdx,8)
+48837CC50000                            cmpq $0, (%rbp,%rax,8)
+48833B00                                cmpq $0, (%rbx)
+48833CC300                              cmpq $0, (%rbx,%rax,8)
+48833CCB00                              cmpq $0, (%rbx,%rcx,8)
+48833900                                cmpq $0, (%rcx)
+48833CC100                              cmpq $0, (%rcx,%rax,8)
+48833F00                                cmpq $0, (%rdi)
+48833CC700                              cmpq $0, (%rdi,%rax,8)
+48833A00                                cmpq $0, (%rdx)
+48833CC200                              cmpq $0, (%rdx,%rax,8)
+48833CDA00                              cmpq $0, (%rdx,%rbx,8)
+48833E00                                cmpq $0, (%rsi)
+48833CC600                              cmpq $0, (%rsi,%rax,8)
+48833C2400                              cmpq $0, (%rsp)
+4881FFFF000000                          cmpq $255, %rdi
+4881FEFFFF0000                          cmpq $65535, %rsi
+4D39E8                                  cmpq %r13, %r8
+4D393424                                cmpq %r14, (%r12)
+48394508                                cmpq %rax, 8(%rbp)
+48394708                                cmpq %rax, 8(%rdi)
+4839C6                                  cmpq %rax, %rsi
+48392CC8                                cmpq %rbp, (%rax,%rcx,8)
+48394A08                                cmpq %rcx, 8(%rdx)
+483914C8                                cmpq %rdx, (%rax,%rcx,8)
+6683BC248E00000001                      cmpw $1, 142(%rsp)
+663D8000                                cmpw $128, %ax
+6681FB8000                              cmpw $128, %bx
+6681F98000                              cmpw $128, %cx
+6681FF8000                              cmpw $128, %di
+6681FA8000                              cmpw $128, %dx
+664181FB8000                            cmpw $128, %r11w
+664181F88000                            cmpw $128, %r8w
+664181F98000                            cmpw $128, %r9w
+663D7FFF                                cmpw $-129, %ax
+6681FB7FFF                              cmpw $-129, %bx
+6681FF7FFF                              cmpw $-129, %di
+6681FA7FFF                              cmpw $-129, %dx
+664181FB7FFF                            cmpw $-129, %r11w
+664181F87FFF                            cmpw $-129, %r8w
+664181F97FFF                            cmpw $-129, %r9w
+6681FE7FFF                              cmpw $-129, %si
+6683F801                                cmpw $1, %ax
+6683F901                                cmpw $1, %cx
+6683FF01                                cmpw $1, %di
+6683FA01                                cmpw $1, %dx
+66837B1E02                              cmpw $2, 30(%rbx)
+663D0001                                cmpw $256, %ax
+6681FB0001                              cmpw $256, %bx
+6681F90001                              cmpw $256, %cx
+6681FF0001                              cmpw $256, %di
+6681FA0001                              cmpw $256, %dx
+664181FB0001                            cmpw $256, %r11w
+664181F80001                            cmpw $256, %r8w
+664181F90001                            cmpw $256, %r9w
+6683F802                                cmpw $2, %ax
+6683FAFE                                cmpw $-2, %dx
+66833C02FE                              cmpw $-2, (%rdx,%rax)
+66833C06FE                              cmpw $-2, (%rsi,%rax)
+6683F803                                cmpw $3, %ax
+6641817C44180F27                        cmpw $9999, 24(%r12,%rax,2)
+66394708                                cmpw %ax, 8(%rdi)
+664139C1                                cmpw %ax, %r9w
+6639FE                                  cmpw %di, %si
+6639D1                                  cmpw %dx, %cx
+664139D1                                cmpw %dx, %r9w
+664439D2                                cmpw %r10w, %dx
+664439C0                                cmpw %r8w, %ax
+664439C2                                cmpw %r8w, %dx
+98                                      cwtl
+FEC8                                    decb %al
+FECA                                    decb %dl
+41FECE                                  decb %r14b
+40FECE                                  decb %sil
+FFC8                                    decl %eax
+FFCD                                    decl %ebp
+FFCB                                    decl %ebx
+FFC9                                    decl %ecx
+FFCF                                    decl %edi
+FFCA                                    decl %edx
+FFCE                                    decl %esi
+41FFCD                                  decl %r13d
+41FFCE                                  decl %r14d
+41FFCF                                  decl %r15d
+49F7F0                                  divq %r8
+F7FF                                    idivl %edi
+410FAF4508                              imull 8(%r13), %eax
+410FAF4608                              imull 8(%r14), %eax
+0FAF4308                                imull 8(%rbx), %eax
+0FAF7A08                                imull 8(%rdx), %edi
+0FAFE8                                  imull %eax, %ebp
+0FAFD1                                  imull %ecx, %edx
+440FAFC1                                imull %ecx, %r8d
+0FAFC2                                  imull %edx, %eax
+0FAFFE                                  imull %esi, %edi
+4869D856555555                          imulq $1431655766, %rax, %rbx
+FF44246C                                incl 108(%rsp)
+FF442478                                incl 120(%rsp)
+FF44247C                                incl 124(%rsp)
+FF44240C                                incl 12(%rsp)
+FF84248C000000                          incl 140(%rsp)
+FF442410                                incl 16(%rsp)
+FF442414                                incl 20(%rsp)
+FF442418                                incl 24(%rsp)
+FF842420010000                          incl 288(%rsp)
+FF44241C                                incl 28(%rsp)
+FF442428                                incl 40(%rsp)
+FF442438                                incl 56(%rsp)
+FF442444                                incl 68(%rsp)
+FFC0                                    incl %eax
+FFC5                                    incl %ebp
+FFC3                                    incl %ebx
+FFC1                                    incl %ecx
+FFC7                                    incl %edi
+FFC2                                    incl %edx
+FFC6                                    incl %esi
+FF0501000000                            incl 1(%rip)
+FF0508000000                            incl 8(%rip)
+FF0509000000                            incl 9(%rip)
+FF0500000000                            incl 0(%rip)
+41FFC2                                  incl %r10d
+41FFC4                                  incl %r12d
+41FFC5                                  incl %r13d
+41FFC6                                  incl %r14d
+41FF0486                                incl (%r14,%rax,4)
+41FFC7                                  incl %r15d
+41FFC0                                  incl %r8d
+41FFC1                                  incl %r9d
+FF00                                    incl (%rax)
+FF059DFFFFFF                            incl -99(%rip)
+49FFC5                                  incq %r13
+48FFC0                                  incq %rax
+48FFC5                                  incq %rbp
+48FFC3                                  incq %rbx
+48FFC7                                  incq %rdi
+48FFC6                                  incq %rsi
+48FF0521000000                          incq 33(%rip)
+FF24C59E0F0000                          jmp *3998(,%rax,8)
+FFE2                                    jmp *%rdx
+468D24F500000000                        leal 0(,%r14,8), %r12d
+8D04D500000000                          leal 0(,%rdx,8), %eax
+8D3CD500000000                          leal 0(,%rdx,8), %edi
+8D14B500000000                          leal 0(,%rsi,4), %edx
+8D14F500000000                          leal 0(,%rsi,8), %edx
+8D5890                                  leal -112(%rax), %ebx
+8D4390                                  leal -112(%rbx), %eax
+8D5880                                  leal -128(%rax), %ebx
+8D7880                                  leal -128(%rax), %edi
+448DA07FFFFFFF                          leal -129(%rax), %r12d
+448DA070FFFFFF                          leal -144(%rax), %r12d
+448DB070FFFFFF                          leal -144(%rax), %r14d
+8D9A50FFFFFF                            leal -176(%rdx), %ebx
+8D9848FFFFFF                            leal -184(%rax), %ebx
+8D9A48FFFFFF                            leal -184(%rdx), %ebx
+8DA840FFFFFF                            leal -192(%rax), %ebp
+418D4424FF                              leal -1(%r12), %eax
+418D442401                              leal 1(%r12), %eax
+418D5C2401                              leal 1(%r12), %ebx
+418D7C2401                              leal 1(%r12), %edi
+418D45FF                                leal -1(%r13), %eax
+418D4501                                leal 1(%r13), %eax
+418D46FF                                leal -1(%r14), %eax
+418D4601                                leal 1(%r14), %eax
+418D7EFF                                leal -1(%r14), %edi
+418D4701                                leal 1(%r15), %eax
+418D4801                                leal 1(%r8), %ecx
+8D3CC5FFFFFFFF                          leal -1(,%rax,8), %edi
+448D6001                                leal 1(%rax), %r12d
+428D6C20FF                              leal -1(%rax,%r12), %ebp
+428D5420FF                              leal -1(%rax,%r12), %edx
+8D4428FF                                leal -1(%rax,%rbp), %eax
+8D5C28FF                                leal -1(%rax,%rbp), %ebx
+8D4418FF                                leal -1(%rax,%rbx), %eax
+448D7C1801                              leal 1(%rax,%rbx), %r15d
+8D45FF                                  leal -1(%rbp), %eax
+8D4501                                  leal 1(%rbp), %eax
+8D7DFF                                  leal -1(%rbp), %edi
+8D7D01                                  leal 1(%rbp), %edi
+8D75FF                                  leal -1(%rbp), %esi
+448D6501                                leal 1(%rbp), %r12d
+448D6D01                                leal 1(%rbp), %r13d
+448D641DFF                              leal -1(%rbp,%rbx), %r12d
+8D43FF                                  leal -1(%rbx), %eax
+8D7B01                                  leal 1(%rbx), %edi
+8D5301                                  leal 1(%rbx), %edx
+8D41FF                                  leal -1(%rcx), %eax
+8D4101                                  leal 1(%rcx), %eax
+8D7901                                  leal 1(%rcx), %edi
+468D440101                              leal 1(%rcx,%r8), %r8d
+8D541101                                leal 1(%rcx,%rdx), %edx
+8D4701                                  leal 1(%rdi), %eax
+8D6FFF                                  leal -1(%rdi), %ebp
+8D5701                                  leal 1(%rdi), %edx
+8D42FF                                  leal -1(%rdx), %eax
+8D4201                                  leal 1(%rdx), %eax
+8D7A01                                  leal 1(%rdx), %edi
+468D440201                              leal 1(%rdx,%r8), %r8d
+8D440201                                leal 1(%rdx,%rax), %eax
+448D542A01                              leal 1(%rdx,%rbp), %r10d
+8D5C1A01                                leal 1(%rdx,%rbx), %ebx
+8D4C0A01                                leal 1(%rdx,%rcx), %ecx
+8D7C3A01                                leal 1(%rdx,%rdi), %edi
+8D743201                                leal 1(%rdx,%rsi), %esi
+8D46FF                                  leal -1(%rsi), %eax
+8D4601                                  leal 1(%rsi), %eax
+8D7C3E01                                leal 1(%rsi,%rdi), %edi
+8DA838FFFFFF                            leal -200(%rax), %ebp
+448DB038FFFFFF                          leal -200(%rax), %r14d
+8DA830FFFFFF                            leal -208(%rax), %ebp
+8D9830FFFFFF                            leal -208(%rax), %ebx
+8D42EC                                  leal -20(%rdx), %eax
+8D9828FFFFFF                            leal -216(%rax), %ebx
+8D9820FFFFFF                            leal -224(%rax), %ebx
+8D9818FFFFFF                            leal -232(%rax), %ebx
+8D43E8                                  leal -24(%rbx), %eax
+418D47FE                                leal -2(%r15), %eax
+418D41FE                                leal -2(%r9), %eax
+8D048502000000                          leal 2(,%rax,4), %eax
+8D6802                                  leal 2(%rax), %ebp
+8D45FE                                  leal -2(%rbp), %eax
+8D5D02                                  leal 2(%rbp), %ebx
+8D7D02                                  leal 2(%rbp), %edi
+8D5502                                  leal 2(%rbp), %edx
+8D7502                                  leal 2(%rbp), %esi
+448D6D02                                leal 2(%rbp), %r13d
+8D4302                                  leal 2(%rbx), %eax
+8D7902                                  leal 2(%rcx), %edi
+8D5702                                  leal 2(%rdi), %edx
+8D4602                                  leal 2(%rsi), %eax
+8D42E0                                  leal -32(%rdx), %eax
+418D4626                                leal 38(%r14), %eax
+8D6803                                  leal 3(%rax), %ebp
+8D5D03                                  leal 3(%rbp), %ebx
+8D7D03                                  leal 3(%rbp), %edi
+8D5503                                  leal 3(%rbp), %edx
+8D7503                                  leal 3(%rbp), %esi
+448D6503                                leal 3(%rbp), %r12d
+448D6D03                                leal 3(%rbp), %r13d
+8D4703                                  leal 3(%rdi), %eax
+8D4203                                  leal 3(%rdx), %eax
+8D6C2A03                                leal 3(%rdx,%rbp), %ebp
+8D4603                                  leal 3(%rsi), %eax
+8D54122B                                leal 43(%rdx,%rdx), %edx
+418D4630                                leal 48(%r14), %eax
+8D6C28D0                                leal -48(%rax,%rbp), %ebp
+8D42D0                                  leal -48(%rdx), %eax
+448D7C1804                              leal 4(%rax,%rbx), %r15d
+8D4504                                  leal 4(%rbp), %eax
+8D7DFC                                  leal -4(%rbp), %edi
+8D7D04                                  leal 4(%rbp), %edi
+8D41FC                                  leal -4(%rcx), %eax
+8D47FC                                  leal -4(%rdi), %eax
+8D42FC                                  leal -4(%rdx), %eax
+8D6C2A04                                leal 4(%rdx,%rbp), %ebp
+8D46FC                                  leal -4(%rsi), %eax
+8D4EFC                                  leal -4(%rsi), %ecx
+448D68C0                                leal -64(%rax), %r13d
+8D3CDD40000000                          leal 64(,%rbx,8), %edi
+8D3CFD40000000                          leal 64(,%rdi,8), %edi
+8D8701F0FEFF                            leal -69631(%rdi), %eax
+8D83FFEFFEFF                            leal -69633(%rbx), %eax
+8D81FFEFFEFF                            leal -69633(%rcx), %eax
+8D87FFEFFEFF                            leal -69633(%rdi), %eax
+8D82FFEFFEFF                            leal -69633(%rdx), %eax
+478DAC2D02100100                        leal 69634(%r13,%r13), %r13d
+448D68B8                                leal -72(%rax), %r13d
+8D81FDDFFEFF                            leal -73731(%rcx), %eax
+8D87FDDFFEFF                            leal -73731(%rdi), %eax
+8D82FDDFFEFF                            leal -73731(%rdx), %eax
+8D87FFCFFEFF                            leal -77825(%rdi), %eax
+8D81F3CFFEFF                            leal -77837(%rcx), %eax
+418D86F4CEFEFF                          leal -78092(%r14), %eax
+8D43F9                                  leal -7(%rbx), %eax
+8D4207                                  leal 7(%rdx), %eax
+448D60B0                                leal -80(%rax), %r12d
+448D68B0                                leal -80(%rax), %r13d
+8D8700C0FEFF                            leal -81920(%rdi), %eax
+8D58A8                                  leal -88(%rax), %ebx
+8D5AA8                                  leal -88(%rdx), %ebx
+418D850090FEFF                          leal -94208(%r13), %eax
+8D429F                                  leal -97(%rdx), %eax
+438D042C                                leal (%r12,%r13), %eax
+418D2C19                                leal (%r9,%rbx), %ebp
+458D0C09                                leal (%r9,%rcx), %r9d
+428D3C60                                leal (%rax,%r12,2), %edi
+428D3C28                                leal (%rax,%r13), %edi
+8D0440                                  leal (%rax,%rax,2), %eax
+8D3C00                                  leal (%rax,%rax), %edi
+448D3C28                                leal (%rax,%rbp), %r15d
+8D0498                                  leal (%rax,%rbx,4), %eax
+8D04D8                                  leal (%rax,%rbx,8), %eax
+448D3C18                                leal (%rax,%rbx), %r15d
+8D0410                                  leal (%rax,%rdx), %eax
+428D442500                              leal (%rbp,%r12), %eax
+428D5C2500                              leal (%rbp,%r12), %ebx
+428D742500                              leal (%rbp,%r12), %esi
+448D441D00                              leal (%rbp,%rbx), %r8d
+8D3C2B                                  leal (%rbx,%rbp), %edi
+8D342B                                  leal (%rbx,%rbp), %esi
+8D3C09                                  leal (%rcx,%rcx), %edi
+8D049F                                  leal (%rdi,%rbx,4), %eax
+8D3C0F                                  leal (%rdi,%rcx), %edi
+448D1C2A                                leal (%rdx,%rbp), %r11d
+8D141A                                  leal (%rdx,%rbx), %edx
+448D143A                                leal (%rdx,%rdi), %r10d
+8D0412                                  leal (%rdx,%rdx), %eax
+8D3C12                                  leal (%rdx,%rdx), %edi
+8D3476                                  leal (%rsi,%rsi,2), %esi
+8D0436                                  leal (%rsi,%rsi), %eax
+8D1436                                  leal (%rsi,%rsi), %edx
+4C8D2C8500000000                        leaq 0(,%rax,4), %r13
+488D1C8500000000                        leaq 0(,%rax,4), %rbx
+488D0C8500000000                        leaq 0(,%rax,4), %rcx
+488D3C8500000000                        leaq 0(,%rax,4), %rdi
+488D148500000000                        leaq 0(,%rax,4), %rdx
+488D049500000000                        leaq 0(,%rdx,4), %rax
+488D542464                              leaq 100(%rsp), %rdx
+488D4C2468                              leaq 104(%rsp), %rcx
+4C8D44246C                              leaq 108(%rsp), %r8
+4C8D7C2470                              leaq 112(%rsp), %r15
+488D7C2474                              leaq 116(%rsp), %rdi
+488D742478                              leaq 120(%rsp), %rsi
+4C8DBC2480000000                        leaq 128(%rsp), %r15
+488D7C240C                              leaq 12(%rsp), %rdi
+488D74240C                              leaq 12(%rsp), %rsi
+4C8D842490000000                        leaq 144(%rsp), %r8
+488D9C2490000000                        leaq 144(%rsp), %rbx
+488DBC2490000000                        leaq 144(%rsp), %rdi
+488DBC2497000000                        leaq 151(%rsp), %rdi
+488D942498000000                        leaq 152(%rsp), %rdx
+488D8424A0000000                        leaq 160(%rsp), %rax
+488DBC24A8000000                        leaq 168(%rsp), %rdi
+488DB424A8000000                        leaq 168(%rsp), %rsi
+488D7D10                                leaq 16(%rbp), %rdi
+488D7B10                                leaq 16(%rbx), %rdi
+488D7210                                leaq 16(%rdx), %rsi
+4C8D6C2410                              leaq 16(%rsp), %r13
+4C8D742410                              leaq 16(%rsp), %r14
+4C8D7C2410                              leaq 16(%rsp), %r15
+488D4C2410                              leaq 16(%rsp), %rcx
+488D542410                              leaq 16(%rsp), %rdx
+488D742410                              leaq 16(%rsp), %rsi
+488DBC24AC000000                        leaq 172(%rsp), %rdi
+488D9424AC000000                        leaq 172(%rsp), %rdx
+4C8D8424B0000000                        leaq 176(%rsp), %r8
+488DBC24B0000000                        leaq 176(%rsp), %rdi
+488D7311                                leaq 17(%rbx), %rsi
+488DBC24B4000000                        leaq 180(%rsp), %rdi
+488DBC24B8000000                        leaq 184(%rsp), %rdi
+488DBC24BC000000                        leaq 188(%rsp), %rdi
+488DB424BC000000                        leaq 188(%rsp), %rsi
+498D6F12                                leaq 18(%r15), %rbp
+4C8DAC24C0000000                        leaq 192(%rsp), %r13
+488DBC24C0000000                        leaq 192(%rsp), %rdi
+498D4701                                leaq 1(%r15), %rax
+498D7F01                                leaq 1(%r15), %rdi
+488D4501                                leaq 1(%rbp), %rax
+488D7B01                                leaq 1(%rbx), %rdi
+488D7901                                leaq 1(%rcx), %rdi
+488D8424CF000000                        leaq 207(%rsp), %rax
+4C8D8C24D0000000                        leaq 208(%rsp), %r9
+488D8C24D0000000                        leaq 208(%rsp), %rcx
+488DBC24D0000000                        leaq 208(%rsp), %rdi
+488D9424D0000000                        leaq 208(%rsp), %rdx
+4C8D642414                              leaq 20(%rsp), %r12
+488D4C2414                              leaq 20(%rsp), %rcx
+488D542414                              leaq 20(%rsp), %rdx
+488D8424E0000000                        leaq 224(%rsp), %rax
+4C8DAC24F0000000                        leaq 240(%rsp), %r13
+488D8424F0000000                        leaq 240(%rsp), %rax
+488DBC24F0000000                        leaq 240(%rsp), %rdi
+488D742418                              leaq 24(%rsp), %rsi
+4C8DAC2410010000                        leaq 272(%rsp), %r13
+488D8C2410010000                        leaq 272(%rsp), %rcx
+488D7C241C                              leaq 28(%rsp), %rdi
+498D7F02                                leaq 2(%r15), %rdi
+488D7902                                leaq 2(%rcx), %rdi
+488DBC2444010000                        leaq 324(%rsp), %rdi
+4C8D6C2420                              leaq 32(%rsp), %r13
+4C8D742420                              leaq 32(%rsp), %r14
+4C8D442420                              leaq 32(%rsp), %r8
+488D442420                              leaq 32(%rsp), %rax
+488D5C2420                              leaq 32(%rsp), %rbx
+488D4C2420                              leaq 32(%rsp), %rcx
+488D542420                              leaq 32(%rsp), %rdx
+488D742420                              leaq 32(%rsp), %rsi
+4C8DBC244B010000                        leaq 331(%rsp), %r15
+488DBC244C010000                        leaq 332(%rsp), %rdi
+488D9C2450010000                        leaq 336(%rsp), %rbx
+488D8C2450010000                        leaq 336(%rsp), %rcx
+488DBC2450010000                        leaq 336(%rsp), %rdi
+488DBC2454010000                        leaq 340(%rsp), %rdi
+488DBC2458010000                        leaq 344(%rsp), %rdi
+488DBC2460010000                        leaq 352(%rsp), %rdi
+488DBC2464010000                        leaq 356(%rsp), %rdi
+488DBC2468010000                        leaq 360(%rsp), %rdi
+488DBC246C010000                        leaq 364(%rsp), %rdi
+488DBC2470010000                        leaq 368(%rsp), %rdi
+488D7C2424                              leaq 36(%rsp), %rdi
+4C8DBC2477010000                        leaq 375(%rsp), %r15
+488DB42478010000                        leaq 376(%rsp), %rsi
+488D94247C010000                        leaq 380(%rsp), %rdx
+488D8C2480010000                        leaq 384(%rsp), %rcx
+4C8D842484010000                        leaq 388(%rsp), %r8
+488DB42488010000                        leaq 392(%rsp), %rsi
+488D94248C010000                        leaq 396(%rsp), %rdx
+498D7F03                                leaq 3(%r15), %rdi
+488D7903                                leaq 3(%rcx), %rdi
+4C8DAC2490010000                        leaq 400(%rsp), %r13
+488D8C2490010000                        leaq 400(%rsp), %rcx
+488D942490010000                        leaq 400(%rsp), %rdx
+488DB42490010000                        leaq 400(%rsp), %rsi
+4C8D842494010000                        leaq 404(%rsp), %r8
+488DB42498010000                        leaq 408(%rsp), %rsi
+488D4C2428                              leaq 40(%rsp), %rcx
+488D742428                              leaq 40(%rsp), %rsi
+488D94249C010000                        leaq 412(%rsp), %rdx
+488D8C24A0010000                        leaq 416(%rsp), %rcx
+4C8D8424A4010000                        leaq 420(%rsp), %r8
+488DB424A8010000                        leaq 424(%rsp), %rsi
+488D9424AC010000                        leaq 428(%rsp), %rdx
+488D8C24B0010000                        leaq 432(%rsp), %rcx
+4C8D8424B4010000                        leaq 436(%rsp), %r8
+488D7C242B                              leaq 43(%rsp), %rdi
+488DB424B8010000                        leaq 440(%rsp), %rsi
+488D9424BC010000                        leaq 444(%rsp), %rdx
+488D8C24C0010000                        leaq 448(%rsp), %rcx
+4C8D44242C                              leaq 44(%rsp), %r8
+488D7C242C                              leaq 44(%rsp), %rdi
+488D74242C                              leaq 44(%rsp), %rsi
+4C8D8424C4010000                        leaq 452(%rsp), %r8
+488DBC24C8010000                        leaq 456(%rsp), %rdi
+488DB424CC010000                        leaq 460(%rsp), %rsi
+488D9424D0010000                        leaq 464(%rsp), %rdx
+488D8C24D4010000                        leaq 468(%rsp), %rcx
+4C8D8424D8010000                        leaq 472(%rsp), %r8
+488DB424DC010000                        leaq 476(%rsp), %rsi
+488D9424E0010000                        leaq 480(%rsp), %rdx
+488D8C24E4010000                        leaq 484(%rsp), %rcx
+4C8D8424E8010000                        leaq 488(%rsp), %r8
+498D5C9C30                              leaq 48(%r12,%rbx,4), %rbx
+498D448530                              leaq 48(%r13,%rax,4), %rax
+488D440530                              leaq 48(%rbp,%rax), %rax
+488D4C0330                              leaq 48(%rbx,%rax), %rcx
+4C8D442430                              leaq 48(%rsp), %r8
+488D6C2430                              leaq 48(%rsp), %rbp
+488D4C2430                              leaq 48(%rsp), %rcx
+488D742430                              leaq 48(%rsp), %rsi
+488DB424EC010000                        leaq 492(%rsp), %rsi
+488D9424F0010000                        leaq 496(%rsp), %rdx
+488D441804                              leaq 4(%rax,%rbx), %rax
+488D8C24F4010000                        leaq 500(%rsp), %rcx
+4C8D8424F8010000                        leaq 504(%rsp), %r8
+488DB424FC010000                        leaq 508(%rsp), %rsi
+488D942400020000                        leaq 512(%rsp), %rdx
+488D8C2404020000                        leaq 516(%rsp), %rcx
+4C8D842408020000                        leaq 520(%rsp), %r8
+488DB4240C020000                        leaq 524(%rsp), %rsi
+488D942410020000                        leaq 528(%rsp), %rdx
+488D542434                              leaq 52(%rsp), %rdx
+488D8C2414020000                        leaq 532(%rsp), %rcx
+4C8D842418020000                        leaq 536(%rsp), %r8
+488DB4241C020000                        leaq 540(%rsp), %rsi
+488D942420020000                        leaq 544(%rsp), %rdx
+488D8C2424020000                        leaq 548(%rsp), %rcx
+4C8D842428020000                        leaq 552(%rsp), %r8
+488DBC242C020000                        leaq 556(%rsp), %rdi
+4C8DB42440020000                        leaq 576(%rsp), %r14
+488D8C2440020000                        leaq 576(%rsp), %rcx
+488D942440020000                        leaq 576(%rsp), %rdx
+488D7C243C                              leaq 60(%rsp), %rdi
+488D8C2480020000                        leaq 640(%rsp), %rcx
+488D942480020000                        leaq 640(%rsp), %rdx
+4C8D742440                              leaq 64(%rsp), %r14
+488D4C2440                              leaq 64(%rsp), %rcx
+488D742440                              leaq 64(%rsp), %rsi
+488DBC24A0020000                        leaq 672(%rsp), %rdi
+488D8C24C0020000                        leaq 704(%rsp), %rcx
+4C8DAC2400030000                        leaq 768(%rsp), %r13
+4C8DB42400030000                        leaq 768(%rsp), %r14
+488D8C2400030000                        leaq 768(%rsp), %rcx
+488D942400030000                        leaq 768(%rsp), %rdx
+488D7C244C                              leaq 76(%rsp), %rdi
+488D4C2450                              leaq 80(%rsp), %rcx
+4C8DA42440030000                        leaq 832(%rsp), %r12
+4C8DAC2440030000                        leaq 832(%rsp), %r13
+4C8DB42440030000                        leaq 832(%rsp), %r14
+4C8DBC2440030000                        leaq 832(%rsp), %r15
+4C8DA42480030000                        leaq 896(%rsp), %r12
+4C8DAC2480030000                        leaq 896(%rsp), %r13
+4C8DB42480030000                        leaq 896(%rsp), %r14
+4C8DBC2480030000                        leaq 896(%rsp), %r15
+488D9C2480030000                        leaq 896(%rsp), %rbx
+488D8C2480030000                        leaq 896(%rsp), %rcx
+488D942480030000                        leaq 896(%rsp), %rdx
+488DB42480030000                        leaq 896(%rsp), %rsi
+488D4208                                leaq 8(%rdx), %rax
+488D44245C                              leaq 92(%rsp), %rax
+488D542460                              leaq 96(%rsp), %rdx
+498D041C                                leaq (%r12,%rbx), %rax
+488D0440                                leaq (%rax,%rax,2), %rax
+488D0480                                leaq (%rax,%rax,4), %rax
+488D1480                                leaq (%rax,%rax,4), %rdx
+488D0428                                leaq (%rax,%rbp), %rax
+4E8D6C2500                              leaq (%rbp,%r12), %r13
+4A8D5C2500                              leaq (%rbp,%r12), %rbx
+488D1C5B                                leaq (%rbx,%rbx,2), %rbx
+488D049B                                leaq (%rbx,%rbx,4), %rax
+488D3C9B                                leaq (%rbx,%rbx,4), %rdi
+488D0449                                leaq (%rcx,%rcx,2), %rax
+488D1449                                leaq (%rcx,%rcx,2), %rdx
+488D3C2F                                leaq (%rdi,%rbp), %rdi
+488D3C7F                                leaq (%rdi,%rdi,2), %rdi
+488D0452                                leaq (%rdx,%rdx,2), %rax
+488D0492                                leaq (%rdx,%rdx,4), %rax
+488D342E                                leaq (%rsi,%rbp), %rsi
+488D04B6                                leaq (%rsi,%rsi,4), %rax
+448B4C2468                              movl 104(%rsp), %r9d
+8B5C247C                                movl 124(%rsp), %ebx
+8B780C                                  movl 12(%rax), %edi
+8B4C24EC                                movl -20(%rsp), %ecx
+8B4218                                  movl 24(%rdx), %eax
+448BA42424010000                        movl 292(%rsp), %r12d
+8B442430                                movl 48(%rsp), %eax
+418B442408                              movl 8(%r12), %eax
+8B4008                                  movl 8(%rax), %eax
+8B7808                                  movl 8(%rax), %edi
+8B4508                                  movl 8(%rbp), %eax
+448B4108                                movl 8(%rcx), %r8d
+448B4208                                movl 8(%rdx), %r8d
+8B5424F8                                movl -8(%rsp), %edx
+8B542408                                movl 8(%rsp), %edx
+8B742408                                movl 8(%rsp), %esi
+48BFFF799F501344D33F                    movabsq $4599094494223104511, %rdi
+48BFEF39FAFE422EE63F                    movabsq $4604418534313441775, %rdi
+48BF000000000000F03F                    movabsq $4607182418800017408, %rdi
+48BFFE822B654715F73F                    movabsq $4609176140021203710, %rdi
+48BF182D4454FB210940                    movabsq $4614256656552045848, %rdi
+48BF71A379094F930A40                    movabsq $4614662735865160561, %rdi
+48BF000000000000F87F                    movabsq $9221120237041090560, %rdi
+C6878C00000000                          movb $0, 140(%rdi)
+C684249000000000                        movb $0, 144(%rsp)
+C6879C00000000                          movb $0, 156(%rdi)
+C6879D00000000                          movb $0, 157(%rdi)
+C6879E00000000                          movb $0, 158(%rdi)
+C644240F00                              movb $0, 15(%rsp)
+C68424A000000000                        movb $0, 160(%rsp)
+C68404A000000000                        movb $0, 160(%rsp,%rax)
+C6401000                                movb $0, 16(%rax)
+C644101000                              movb $0, 16(%rax,%rdx)
+C6431000                                movb $0, 16(%rbx)
+C644161000                              movb $0, 16(%rsi,%rdx)
+C6401100                                movb $0, 17(%rax)
+C6431100                                movb $0, 17(%rbx)
+C6401200                                movb $0, 18(%rax)
+C6431200                                movb $0, 18(%rbx)
+C684240E01000000                        movb $0, 270(%rsp)
+C684241B01000000                        movb $0, 283(%rsp)
+C684244B01000000                        movb $0, 331(%rsp)
+C684247701000000                        movb $0, 375(%rsp)
+C644242500                              movb $0, 37(%rsp)
+C644242700                              movb $0, 39(%rsp)
+C644242B00                              movb $0, 43(%rsp)
+C6402C00                                movb $0, 44(%rax)
+C644242F00                              movb $0, 47(%rsp)
+C644160400                              movb $0, 4(%rsi,%rdx)
+C6470800                                movb $0, 8(%rdi)
+C6050800000000                          movb $0, 8(%rip)
+C6400900                                movb $0, 9(%rax)
+C6050B00000000                          movb $0, 11(%rip)
+41C60600                                movb $0, (%r14)
+41C60700                                movb $0, (%r15)
+42C6042800                              movb $0, (%rax,%r13)
+C6040300                                movb $0, (%rbx,%rax)
+42C6041100                              movb $0, (%rcx,%r10)
+C60700                                  movb $0, (%rdi)
+C6040200                                movb $0, (%rdx,%rax)
+C684249700000001                        movb $1, 151(%rsp)
+C644240F01                              movb $1, 15(%rsp)
+C644111001                              movb $1, 16(%rcx,%rdx)
+C644242501                              movb $1, 37(%rsp)
+C644242701                              movb $1, 39(%rsp)
+C6402C01                                movb $1, 44(%rax)
+C644242F01                              movb $1, 47(%rsp)
+C644810401                              movb $1, 4(%rcx,%rax,4)
+C6050D00000001                          movb $1, 13(%rip)
+41C6042401                              movb $1, (%r12)
+41C6450001                              movb $1, (%r13)
+41C60701                                movb $1, (%r15)
+42C6042001                              movb $1, (%rax,%r12)
+C60701                                  movb $1, (%rdi)
+42C6043201                              movb $1, (%rdx,%r14)
+C6040201                                movb $1, (%rdx,%rax)
+C6056300000001                          movb $1, 99(%rip)
+C68404A00000002C                        movb $44, 160(%rsp,%rax)
+C604042C                                movb $44, (%rsp,%rax)
+C604042D                                movb $45, (%rsp,%rax)
+C6042430                                movb $48, (%rsp)
+C68404A000000031                        movb $49, 160(%rsp,%rax)
+C68404A000000052                        movb $82, 160(%rsp,%rax)
+88442467                                movb %al, 103(%rsp)
+8844246A                                movb %al, 106(%rsp)
+8844246B                                movb %al, 107(%rsp)
+8844246F                                movb %al, 111(%rsp)
+88442470                                movb %al, 112(%rsp)
+8844240B                                movb %al, 11(%rsp)
+8844247F                                movb %al, 127(%rsp)
+8884248A000000                          movb %al, 138(%rsp)
+8884248B000000                          movb %al, 139(%rsp)
+8844240E                                movb %al, 14(%rsp)
+8844240F                                movb %al, 15(%rsp)
+888414A0000000                          movb %al, 160(%rsp,%rdx)
+88442410                                movb %al, 16(%rsp)
+88442413                                movb %al, 19(%rsp)
+88442416                                movb %al, 22(%rsp)
+88442417                                movb %al, 23(%rsp)
+88442419                                movb %al, 25(%rsp)
+8844241A                                movb %al, 26(%rsp)
+8884240E010000                          movb %al, 270(%rsp)
+8884240F010000                          movb %al, 271(%rsp)
+8844241B                                movb %al, 27(%rsp)
+8844241E                                movb %al, 30(%rsp)
+8844241F                                movb %al, 31(%rsp)
+88442423                                movb %al, 35(%rsp)
+88442426                                movb %al, 38(%rsp)
+88442427                                movb %al, 39(%rsp)
+8844242B                                movb %al, 43(%rsp)
+88432C                                  movb %al, 44(%rbx)
+8844242F                                movb %al, 47(%rsp)
+88442430                                movb %al, 48(%rsp)
+8844243C                                movb %al, 60(%rsp)
+8844243D                                movb %al, 61(%rsp)
+8844243E                                movb %al, 62(%rsp)
+8844243F                                movb %al, 63(%rsp)
+88442443                                movb %al, 67(%rsp)
+88442406                                movb %al, 6(%rsp)
+8844244F                                movb %al, 79(%rsp)
+88442407                                movb %al, 7(%rsp)
+88442457                                movb %al, 87(%rsp)
+88040C                                  movb %al, (%rsp,%rcx)
+889C2487000000                          movb %bl, 135(%rsp)
+889C240F010000                          movb %bl, 271(%rsp)
+885C242E                                movb %bl, 46(%rsp)
+885808                                  movb %bl, 8(%rax)
+884C2417                                movb %cl, 23(%rsp)
+888C240F010000                          movb %cl, 271(%rsp)
+884C241B                                movb %cl, 27(%rsp)
+884C2420                                movb %cl, 32(%rsp)
+884C2427                                movb %cl, 39(%rsp)
+884C242B                                movb %cl, 43(%rsp)
+884C2430                                movb %cl, 48(%rsp)
+884C2440                                movb %cl, 64(%rsp)
+884C2460                                movb %cl, 96(%rsp)
+40887C242B                              movb %dil, 43(%rsp)
+40887C244B                              movb %dil, 75(%rsp)
+408838                                  movb %dil, (%rax)
+8854240D                                movb %dl, 13(%rsp)
+88542417                                movb %dl, 23(%rsp)
+8894240F010000                          movb %dl, 271(%rsp)
+8854241E                                movb %dl, 30(%rsp)
+8854241F                                movb %dl, 31(%rsp)
+88542423                                movb %dl, 35(%rsp)
+88542427                                movb %dl, 39(%rsp)
+8854242A                                movb %dl, 42(%rsp)
+8854242F                                movb %dl, 47(%rsp)
+88542430                                movb %dl, 48(%rsp)
+88542437                                movb %dl, 55(%rsp)
+8854243B                                movb %dl, 59(%rsp)
+88542443                                movb %dl, 67(%rsp)
+8854244F                                movb %dl, 79(%rsp)
+8854245F                                movb %dl, 95(%rsp)
+881408                                  movb %dl, (%rax,%rcx)
+881404                                  movb %dl, (%rsp,%rax)
+4488A4240F010000                        movb %r12b, 271(%rsp)
+448844240B                              movb %r8b, 11(%rsp)
+448844240F                              movb %r8b, 15(%rsp)
+4488441110                              movb %r8b, 16(%rcx,%rdx)
+4488441710                              movb %r8b, 16(%rdi,%rdx)
+4488441610                              movb %r8b, 16(%rsi,%rdx)
+4488442427                              movb %r8b, 39(%rsp)
+44884C2427                              movb %r9b, 39(%rsp)
+44880DA3120000                          movb %r9b, 4771(%rip)
+40887312                                movb %sil, 18(%rbx)
+89C0                                    movl %eax, %eax
+89C2                                    movl %eax, %edx
+89E8                                    movl %ebp, %eax
+4189EF                                  movl %ebp, %r15d
+89D8                                    movl %ebx, %eax
+89DB                                    movl %ebx, %ebx
+89DA                                    movl %ebx, %edx
+89CB                                    movl %ecx, %ebx
+89C9                                    movl %ecx, %ecx
+89F8                                    movl %edi, %eax
+89FF                                    movl %edi, %edi
+89D0                                    movl %edx, %eax
+89D2                                    movl %edx, %edx
+89F0                                    movl %esi, %eax
+89F2                                    movl %esi, %edx
+89F6                                    movl %esi, %esi
+C7476400000000                          movl $0, 100(%rdi)
+C7476800000000                          movl $0, 104(%rdi)
+C7476C00000000                          movl $0, 108(%rdi)
+C744246C00000000                        movl $0, 108(%rsp)
+C7477000000000                          movl $0, 112(%rdi)
+C7477400000000                          movl $0, 116(%rdi)
+C744247400000000                        movl $0, 116(%rsp)
+C7477800000000                          movl $0, 120(%rdi)
+C744247800000000                        movl $0, 120(%rsp)
+C7477C00000000                          movl $0, 124(%rdi)
+C744247C00000000                        movl $0, 124(%rsp)
+C7878000000000000000                    movl $0, 128(%rdi)
+C7400C00000000                          movl $0, 12(%rax)
+C7430C00000000                          movl $0, 12(%rbx)
+C7470C00000000                          movl $0, 12(%rdi)
+C744240C00000000                        movl $0, 12(%rsp)
+C7878400000000000000                    movl $0, 132(%rdi)
+C7878800000000000000                    movl $0, 136(%rdi)
+C784248C00000000000000                  movl $0, 140(%rsp)
+C7879000000000000000                    movl $0, 144(%rdi)
+C7879400000000000000                    movl $0, 148(%rdi)
+C7879800000000000000                    movl $0, 152(%rdi)
+C784249C00000000000000                  movl $0, 156(%rsp)
+C787A000000000000000                    movl $0, 160(%rdi)
+C7431000000000                          movl $0, 16(%rbx)
+C7471000000000                          movl $0, 16(%rdi)
+C744241000000000                        movl $0, 16(%rsp)
+C78424CC00000000000000                  movl $0, 204(%rsp)
+C7431400000000                          movl $0, 20(%rbx)
+C7471400000000                          movl $0, 20(%rdi)
+C744241400000000                        movl $0, 20(%rsp)
+C7401800000000                          movl $0, 24(%rax)
+C7471800000000                          movl $0, 24(%rdi)
+C784242001000000000000                  movl $0, 288(%rsp)
+C784842001000000000000                  movl $0, 288(%rsp,%rax,4)
+C7401C00000000                          movl $0, 28(%rax)
+C7471C00000000                          movl $0, 28(%rdi)
+C744241C00000000                        movl $0, 28(%rsp)
+C784242401000000000000                  movl $0, 292(%rsp)
+C7402000000000                          movl $0, 32(%rax)
+C7472000000000                          movl $0, 32(%rdi)
+C784245C01000000000000                  movl $0, 348(%rsp)
+C7472400000000                          movl $0, 36(%rdi)
+C7402800000000                          movl $0, 40(%rax)
+C7472800000000                          movl $0, 40(%rdi)
+C7472C00000000                          movl $0, 44(%rdi)
+C7403000000000                          movl $0, 48(%rax)
+C7473000000000                          movl $0, 48(%rdi)
+C7470400000000                          movl $0, 4(%rdi)
+C7473400000000                          movl $0, 52(%rdi)
+C7403800000000                          movl $0, 56(%rax)
+C7473800000000                          movl $0, 56(%rdi)
+C744243800000000                        movl $0, 56(%rsp)
+C784243C02000000000000                  movl $0, 572(%rsp)
+C7473C00000000                          movl $0, 60(%rdi)
+C7474000000000                          movl $0, 64(%rdi)
+C7474400000000                          movl $0, 68(%rdi)
+C7474800000000                          movl $0, 72(%rdi)
+C7474C00000000                          movl $0, 76(%rdi)
+C7475000000000                          movl $0, 80(%rdi)
+C7475400000000                          movl $0, 84(%rdi)
+C7475800000000                          movl $0, 88(%rdi)
+C744030800000000                        movl $0, 8(%rbx,%rax)
+C744130800000000                        movl $0, 8(%rbx,%rdx)
+C7470800000000                          movl $0, 8(%rdi)
+C744160800000000                        movl $0, 8(%rsi,%rdx)
+C744240800000000                        movl $0, 8(%rsp)
+C7475C00000000                          movl $0, 92(%rdi)
+C7476000000000                          movl $0, 96(%rdi)
+8B042500000000                          movl 0, %eax
+BE10270000                              movl $10000, %esi
+BAED030000                              movl $1005, %edx
+BAEE030000                              movl $1006, %edx
+B864000000                              movl $100, %eax
+8B7C2464                                movl 100(%rsp), %edi
+8B542464                                movl 100(%rsp), %edx
+8B742464                                movl 100(%rsp), %esi
+BAF5030000                              movl $1013, %edx
+C747140A000000                          movl $10, 20(%rdi)
+BAFF030000                              movl $1023, %edx
+BF00040000                              movl $1024, %edi
+8B842400040000                          movl 1024(%rsp), %eax
+BFFFFBFFFF                              movl $-1025, %edi
+BA04040000                              movl $1028, %edx
+BA05040000                              movl $1029, %edx
+BA3C280000                              movl $10300, %edx
+BA07040000                              movl $1031, %edx
+BA0A040000                              movl $1034, %edx
+BA0B040000                              movl $1035, %edx
+BA89280000                              movl $10377, %edx
+BA0D040000                              movl $1037, %edx
+C743280A000000                          movl $10, 40(%rbx)
+BA11040000                              movl $1041, %edx
+BA12040000                              movl $1042, %edx
+BAC5280000                              movl $10437, %edx
+BA13040000                              movl $1043, %edx
+BAD8280000                              movl $10456, %edx
+BA00290000                              movl $10496, %edx
+B868000000                              movl $104, %eax
+BA68000000                              movl $104, %edx
+8B4C2468                                movl 104(%rsp), %ecx
+8B7C2468                                movl 104(%rsp), %edi
+8B542468                                movl 104(%rsp), %edx
+8B742468                                movl 104(%rsp), %esi
+448B442468                              movl 104(%rsp), %r8d
+448B4C2468                              movl 104(%rsp), %r9d
+BA35290000                              movl $10549, %edx
+BA1E040000                              movl $1054, %edx
+BA1F040000                              movl $1055, %edx
+BA69000000                              movl $105, %edx
+BA6A000000                              movl $106, %edx
+BACF290000                              movl $10703, %edx
+B800000040                              movl $1073741824, %eax
+BF00000040                              movl $1073741824, %edi
+BE00000040                              movl $1073741824, %esi
+B801000040                              movl $1073741825, %eax
+B802000040                              movl $1073741826, %eax
+B803000040                              movl $1073741827, %eax
+B804000040                              movl $1073741828, %eax
+B805000040                              movl $1073741829, %eax
+B806000040                              movl $1073741830, %eax
+B807000040                              movl $1073741831, %eax
+B808000040                              movl $1073741832, %eax
+B809000040                              movl $1073741833, %eax
+B80A000040                              movl $1073741834, %eax
+B80B000040                              movl $1073741835, %eax
+B80C000040                              movl $1073741836, %eax
+B80D000040                              movl $1073741837, %eax
+B80E000040                              movl $1073741838, %eax
+B80F000040                              movl $1073741839, %eax
+BA6B000000                              movl $107, %edx
+C743286C000000                          movl $108, 40(%rbx)
+BA8F2A0000                              movl $10895, %edx
+B86C000000                              movl $108, %eax
+BA6C000000                              movl $108, %edx
+8B44246C                                movl 108(%rsp), %eax
+8B6C246C                                movl 108(%rsp), %ebp
+8B4C246C                                movl 108(%rsp), %ecx
+8B7C246C                                movl 108(%rsp), %edi
+8B54246C                                movl 108(%rsp), %edx
+BA9F2A0000                              movl $10911, %edx
+BAA92A0000                              movl $10921, %edx
+BAAA2A0000                              movl $10922, %edx
+BAF62A0000                              movl $10998, %edx
+B80A000000                              movl $10, %eax
+BF0A000000                              movl $10, %edi
+BA0A000000                              movl $10, %edx
+41BE0A000000                            movl $10, %r14d
+C7000A000000                            movl $10, (%rax)
+BAF82A0000                              movl $11000, %edx
+BA4C040000                              movl $1100, %edx
+BA4D040000                              movl $1101, %edx
+BA262B0000                              movl $11046, %edx
+C744246801000000                        movl $1, 104(%rsp)
+BA412B0000                              movl $11073, %edx
+BA422B0000                              movl $11074, %edx
+C744246C01000000                        movl $1, 108(%rsp)
+BA6E000000                              movl $110, %edx
+BA56040000                              movl $1110, %edx
+BA6F000000                              movl $111, %edx
+BAC42B0000                              movl $11204, %edx
+BAD42B0000                              movl $11220, %edx
+BAD92B0000                              movl $11225, %edx
+BA63040000                              movl $1123, %edx
+BAF92B0000                              movl $11257, %edx
+B870000000                              movl $112, %eax
+BA70000000                              movl $112, %edx
+C744030CFFFFFFFF                        movl $-1, 12(%rbx,%rax)
+C744010CFFFFFFFF                        movl $-1, 12(%rcx,%rax)
+C744160CFFFFFFFF                        movl $-1, 12(%rsi,%rdx)
+8B7C2470                                movl 112(%rsp), %edi
+BA70040000                              movl $1136, %edx
+B871000000                              movl $113, %eax
+BA71000000                              movl $113, %edx
+BAC22C0000                              movl $11458, %edx
+BA72000000                              movl $114, %edx
+BA402D0000                              movl $11584, %edx
+BA4C2D0000                              movl $11596, %edx
+BA87040000                              movl $1159, %edx
+BA73000000                              movl $115, %edx
+BAA12D0000                              movl $11681, %edx
+B874000000                              movl $116, %eax
+BA74000000                              movl $116, %edx
+C7442410FFFFFFFF                        movl $-1, 16(%rsp)
+C744241001000000                        movl $1, 16(%rsp)
+8B5C2474                                movl 116(%rsp), %ebx
+8B7C2474                                movl 116(%rsp), %edi
+8B542474                                movl 116(%rsp), %edx
+BA94040000                              movl $1172, %edx
+BA75000000                              movl $117, %edx
+BA76000000                              movl $118, %edx
+B877000000                              movl $119, %eax
+BF0B000000                              movl $11, %edi
+41BE0B000000                            movl $11, %r14d
+C7000B000000                            movl $11, (%rax)
+C7470C78000000                          movl $120, 12(%rdi)
+BAB5040000                              movl $1205, %edx
+B878000000                              movl $120, %eax
+BF78000000                              movl $120, %edi
+C74014FFFFFFFF                          movl $-1, 20(%rax)
+C7442414FFFFFFFF                        movl $-1, 20(%rsp)
+8B442478                                movl 120(%rsp), %eax
+8B4C2478                                movl 120(%rsp), %ecx
+8B542478                                movl 120(%rsp), %edx
+BABD040000                              movl $1213, %edx
+BABE040000                              movl $1214, %edx
+BA79000000                              movl $121, %edx
+BAC6040000                              movl $1222, %edx
+BACD040000                              movl $1229, %edx
+BAD7040000                              movl $1239, %edx
+B87C000000                              movl $124, %eax
+C74018FFFFFFFF                          movl $-1, 24(%rax)
+C7421801000000                          movl $1, 24(%rdx)
+8B44247C                                movl 124(%rsp), %eax
+8B5C247C                                movl 124(%rsp), %ebx
+8B4C247C                                movl 124(%rsp), %ecx
+8B7C247C                                movl 124(%rsp), %edi
+8B74247C                                movl 124(%rsp), %esi
+448B6C247C                              movl 124(%rsp), %r13d
+BAE2040000                              movl $1250, %edx
+BAE3040000                              movl $1251, %edx
+BAE4040000                              movl $1252, %edx
+C740440C000000                          movl $12, 68(%rax)
+41BA7F000000                            movl $127, %r10d
+41B97F000000                            movl $127, %r9d
+C7437880000000                          movl $128, 120(%rbx)
+BA01050000                              movl $1281, %edx
+C7434480000000                          movl $128, 68(%rbx)
+C744244880000000                        movl $128, 72(%rsp)
+C7405080000000                          movl $128, 80(%rax)
+C7435080000000                          movl $128, 80(%rbx)
+BA08050000                              movl $1288, %edx
+BA09050000                              movl $1289, %edx
+B980FFFFFF                              movl $-128, %ecx
+BF80000000                              movl $128, %edi
+41BA80FFFFFF                            movl $-128, %r10d
+C744241C01000000                        movl $1, 28(%rsp)
+8BBC2480000000                          movl 128(%rsp), %edi
+B80C000000                              movl $12, %eax
+BF0C000000                              movl $12, %edi
+41BD0C000000                            movl $12, %r13d
+41BE0C000000                            movl $12, %r14d
+418B400C                                movl 12(%r8), %eax
+C7000C000000                            movl $12, (%rax)
+8B400C                                  movl 12(%rax), %eax
+8B580C                                  movl 12(%rax), %ebx
+8B480C                                  movl 12(%rax), %ecx
+8B700C                                  movl 12(%rax), %esi
+8B4C380C                                movl 12(%rax,%rdi), %ecx
+448B6C380C                              movl 12(%rax,%rdi), %r13d
+8B74100C                                movl 12(%rax,%rdx), %esi
+8B5D0C                                  movl 12(%rbp), %ebx
+8B7D0C                                  movl 12(%rbp), %edi
+8B550C                                  movl 12(%rbp), %edx
+8B430C                                  movl 12(%rbx), %eax
+8B4B0C                                  movl 12(%rbx), %ecx
+8B7B0C                                  movl 12(%rbx), %edi
+8B530C                                  movl 12(%rbx), %edx
+8B74130C                                movl 12(%rbx,%rdx), %esi
+8B570C                                  movl 12(%rdi), %edx
+8B770C                                  movl 12(%rdi), %esi
+8B6C170C                                movl 12(%rdi,%rdx), %ebp
+8B4A0C                                  movl 12(%rdx), %ecx
+8B720C                                  movl 12(%rdx), %esi
+428B442A0C                              movl 12(%rdx,%r13), %eax
+8B460C                                  movl 12(%rsi), %eax
+8B440E0C                                movl 12(%rsi,%rcx), %eax
+8B44240C                                movl 12(%rsp), %eax
+8B4C240C                                movl 12(%rsp), %ecx
+8B7C240C                                movl 12(%rsp), %edi
+8B54240C                                movl 12(%rsp), %edx
+8B74240C                                movl 12(%rsp), %esi
+BA1A050000                              movl $1306, %edx
+BA1B050000                              movl $1307, %edx
+BA21050000                              movl $1313, %edx
+BA29050000                              movl $1321, %edx
+BA2C050000                              movl $1324, %edx
+BA2F050000                              movl $1327, %edx
+BF84000000                              movl $132, %edi
+C7442420FFFFFFFF                        movl $-1, 32(%rsp)
+8BBC2484000000                          movl 132(%rsp), %edi
+BA3A050000                              movl $1338, %edx
+BA3F050000                              movl $1343, %edx
+BA40050000                              movl $1344, %edx
+BA41050000                              movl $1345, %edx
+BA43050000                              movl $1347, %edx
+C784245C01000001000000                  movl $1, 348(%rsp)
+BA46050000                              movl $1350, %edx
+C7435088000000                          movl $136, 80(%rbx)
+C7405C88000000                          movl $136, 92(%rax)
+C7435C88000000                          movl $136, 92(%rbx)
+BF88000000                              movl $136, %edi
+BA88000000                              movl $136, %edx
+8B9C2488000000                          movl 136(%rsp), %ebx
+8B8C2488000000                          movl 136(%rsp), %ecx
+8BBC2488000000                          movl 136(%rsp), %edi
+C7842478010000FFFFFFFF                  movl $-1, 376(%rsp)
+C784247C010000FFFFFFFF                  movl $-1, 380(%rsp)
+C7842480010000FFFFFFFF                  movl $-1, 384(%rsp)
+BA6C050000                              movl $1388, %edx
+C7842484010000FFFFFFFF                  movl $-1, 388(%rsp)
+C7842488010000FFFFFFFF                  movl $-1, 392(%rsp)
+C784248C010000FFFFFFFF                  movl $-1, 396(%rsp)
+B80D000000                              movl $13, %eax
+41BE0D000000                            movl $13, %r14d
+C7000D000000                            movl $13, (%rax)
+C7842490010000FFFFFFFF                  movl $-1, 400(%rsp)
+C740688C000000                          movl $140, 104(%rax)
+C743688C000000                          movl $140, 104(%rbx)
+C7842494010000FFFFFFFF                  movl $-1, 404(%rsp)
+BA7F050000                              movl $1407, %edx
+C7842498010000FFFFFFFF                  movl $-1, 408(%rsp)
+C7435C8C000000                          movl $140, 92(%rbx)
+BF8C000000                              movl $140, %edi
+8B84248C000000                          movl 140(%rsp), %eax
+8B8C248C000000                          movl 140(%rsp), %ecx
+8BBC248C000000                          movl 140(%rsp), %edi
+8B94248C000000                          movl 140(%rsp), %edx
+C784249C010000FFFFFFFF                  movl $-1, 412(%rsp)
+C78424A0010000FFFFFFFF                  movl $-1, 416(%rsp)
+C78424A4010000FFFFFFFF                  movl $-1, 420(%rsp)
+BA8D050000                              movl $1421, %edx
+BA8E050000                              movl $1422, %edx
+BA8F050000                              movl $1423, %edx
+BA90050000                              movl $1424, %edx
+C78424A8010000FFFFFFFF                  movl $-1, 424(%rsp)
+C78424AC010000FFFFFFFF                  movl $-1, 428(%rsp)
+C78424B0010000FFFFFFFF                  movl $-1, 432(%rsp)
+C78424B4010000FFFFFFFF                  movl $-1, 436(%rsp)
+C78424B8010000FFFFFFFF                  movl $-1, 440(%rsp)
+C7838000000090000000                    movl $144, 128(%rbx)
+C78424BC010000FFFFFFFF                  movl $-1, 444(%rsp)
+C78424C0010000FFFFFFFF                  movl $-1, 448(%rsp)
+BF90000000                              movl $144, %edi
+8BB790000000                            movl 144(%rdi), %esi
+8BBC2490000000                          movl 144(%rsp), %edi
+BAAA050000                              movl $1450, %edx
+C78424C4010000FFFFFFFF                  movl $-1, 452(%rsp)
+BAB3050000                              movl $1459, %edx
+BA91000000                              movl $145, %edx
+C78424CC010000FFFFFFFF                  movl $-1, 460(%rsp)
+C78424D0010000FFFFFFFF                  movl $-1, 464(%rsp)
+C78424D4010000FFFFFFFF                  movl $-1, 468(%rsp)
+C78424D8010000FFFFFFFF                  movl $-1, 472(%rsp)
+C78424DC010000FFFFFFFF                  movl $-1, 476(%rsp)
+C78424E0010000FFFFFFFF                  movl $-1, 480(%rsp)
+C7437494000000                          movl $148, 116(%rbx)
+C78424E4010000FFFFFFFF                  movl $-1, 484(%rsp)
+C78424E8010000FFFFFFFF                  movl $-1, 488(%rsp)
+B894000000                              movl $148, %eax
+BF94000000                              movl $148, %edi
+8B8C2494000000                          movl 148(%rsp), %ecx
+8BBC2494000000                          movl 148(%rsp), %edi
+BAD2050000                              movl $1490, %edx
+C78424EC010000FFFFFFFF                  movl $-1, 492(%rsp)
+C78424F0010000FFFFFFFF                  movl $-1, 496(%rsp)
+BF0E000000                              movl $14, %edi
+41BE0E000000                            movl $14, %r14d
+C78424F4010000FFFFFFFF                  movl $-1, 500(%rsp)
+C78424F8010000FFFFFFFF                  movl $-1, 504(%rsp)
+C78424FC010000FFFFFFFF                  movl $-1, 508(%rsp)
+C7842400020000FFFFFFFF                  movl $-1, 512(%rsp)
+C7842404020000FFFFFFFF                  movl $-1, 516(%rsp)
+BA97000000                              movl $151, %edx
+C7842408020000FFFFFFFF                  movl $-1, 520(%rsp)
+C784240C020000FFFFFFFF                  movl $-1, 524(%rsp)
+C7842410020000FFFFFFFF                  movl $-1, 528(%rsp)
+BF98000000                              movl $152, %edi
+BA98000000                              movl $152, %edx
+8BBC2498000000                          movl 152(%rsp), %edi
+8B942498000000                          movl 152(%rsp), %edx
+BAFA050000                              movl $1530, %edx
+C7842414020000FFFFFFFF                  movl $-1, 532(%rsp)
+C7842418020000FFFFFFFF                  movl $-1, 536(%rsp)
+BA99000000                              movl $153, %edx
+BF9C000000                              movl $156, %edi
+8B84249C000000                          movl 156(%rsp), %eax
+8B8C249C000000                          movl 156(%rsp), %ecx
+8BBC249C000000                          movl 156(%rsp), %edi
+BF9D000000                              movl $157, %edi
+BF9E000000                              movl $158, %edi
+BA36060000                              movl $1590, %edx
+B80F000000                              movl $15, %eax
+BF0F000000                              movl $15, %edi
+41BE0F000000                            movl $15, %r14d
+BFA0000000                              movl $160, %edi
+BAA0000000                              movl $160, %edx
+8BBC24A0000000                          movl 160(%rsp), %edi
+BAA1000000                              movl $161, %edx
+BAA3000000                              movl $163, %edx
+B8A4000000                              movl $164, %eax
+BAA4000000                              movl $164, %edx
+8BBC24A4000000                          movl 164(%rsp), %edi
+BF0000FF00                              movl $16711680, %edi
+BAA8000000                              movl $168, %edx
+C744244401000000                        movl $1, 68(%rsp)
+C744240810000000                        movl $16, 8(%rsp)
+8BB424A8000000                          movl 168(%rsp), %esi
+B810000000                              movl $16, %eax
+BF10000000                              movl $16, %edi
+BA10000000                              movl $16, %edx
+418B442410                              movl 16(%r12), %eax
+418B742410                              movl 16(%r12), %esi
+41BE10000000                            movl $16, %r14d
+418B4010                                movl 16(%r8), %eax
+8B5D10                                  movl 16(%rbp), %ebx
+8B7D10                                  movl 16(%rbp), %edi
+8B7510                                  movl 16(%rbp), %esi
+448B6510                                movl 16(%rbp), %r12d
+8B7C9D10                                movl 16(%rbp,%rbx,4), %edi
+8B4310                                  movl 16(%rbx), %eax
+8B7B10                                  movl 16(%rbx), %edi
+8B7310                                  movl 16(%rbx), %esi
+8B44AB10                                movl 16(%rbx,%rbp,4), %eax
+8B4710                                  movl 16(%rdi), %eax
+8B7F10                                  movl 16(%rdi), %edi
+8B448710                                movl 16(%rdi,%rax,4), %eax
+8B548710                                movl 16(%rdi,%rax,4), %edx
+8B4210                                  movl 16(%rdx), %eax
+8B4610                                  movl 16(%rsi), %eax
+C7042410000000                          movl $16, (%rsp)
+8B442410                                movl 16(%rsp), %eax
+8B6C2410                                movl 16(%rsp), %ebp
+8B4C24F0                                movl -16(%rsp), %ecx
+8B4C2410                                movl 16(%rsp), %ecx
+8B7C2410                                movl 16(%rsp), %edi
+8B5424F0                                movl -16(%rsp), %edx
+8B542410                                movl 16(%rsp), %edx
+8B742410                                movl 16(%rsp), %esi
+448B7C2410                              movl 16(%rsp), %r15d
+448B442410                              movl 16(%rsp), %r8d
+C7442444AC000000                        movl $172, 68(%rsp)
+8BBC24AC000000                          movl 172(%rsp), %edi
+BAD8060000                              movl $1752, %edx
+BADE060000                              movl $1758, %edx
+BAE4060000                              movl $1764, %edx
+BF00450000                              movl $17664, %edi
+C7442444B0000000                        movl $176, 68(%rsp)
+8B8C24B0000000                          movl 176(%rsp), %ecx
+8BBC24B0000000                          movl 176(%rsp), %edi
+BAEA060000                              movl $1770, %edx
+BAB2000000                              movl $178, %edx
+B811000000                              movl $17, %eax
+BA0A070000                              movl $1802, %edx
+BA0B070000                              movl $1803, %edx
+B8B4000000                              movl $180, %eax
+BF00470000                              movl $18176, %edi
+BAB5000000                              movl $181, %edx
+BAB6000000                              movl $182, %edx
+BAB7000000                              movl $183, %edx
+BAB8000000                              movl $184, %edx
+8BBC24B8000000                          movl 184(%rsp), %edi
+BABA000000                              movl $186, %edx
+BFFFFFFF8F                              movl $-1879048193, %edi
+C744245801000000                        movl $1, 88(%rsp)
+8BBC24BC000000                          movl 188(%rsp), %edi
+C744010801000000                        movl $1, 8(%rcx,%rax)
+C744020801000000                        movl $1, 8(%rdx,%rax)
+C744240801000000                        movl $1, 8(%rsp)
+BABE000000                              movl $190, %edx
+BA76070000                              movl $1910, %edx
+BA77070000                              movl $1911, %edx
+C7401813000000                          movl $19, 24(%rax)
+BAC0000000                              movl $192, %edx
+8BBC24C0000000                          movl 192(%rsp), %edi
+BA98070000                              movl $1944, %edx
+BAA6070000                              movl $1958, %edx
+B8C4000000                              movl $196, %eax
+8BBC24C4000000                          movl 196(%rsp), %edi
+BAC5000000                              movl $197, %edx
+BAC7000000                              movl $199, %edx
+B8FFFFFFFF                              movl $-1, %eax
+B801000000                              movl $1, %eax
+BD01000000                              movl $1, %ebp
+B901000000                              movl $1, %ecx
+BFFFFFFFFF                              movl $-1, %edi
+BF01000000                              movl $1, %edi
+BA01000000                              movl $1, %edx
+BE01000000                              movl $1, %esi
+41BB01000000                            movl $1, %r11d
+41BC01000000                            movl $1, %r12d
+41BDFFFFFFFF                            movl $-1, %r13d
+41BEFFFFFFFF                            movl $-1, %r14d
+41BE01000000                            movl $1, %r14d
+41BF01000000                            movl $1, %r15d
+41B8FFFFFFFF                            movl $-1, %r8d
+41B801000000                            movl $1, %r8d
+41B901000000                            movl $1, %r9d
+C70001000000                            movl $1, (%rax)
+C70424FFFFFFFF                          movl $-1, (%rsp)
+8BBC24C8000000                          movl 200(%rsp), %edi
+BF00080000                              movl $2048, %edi
+8B8424CC000000                          movl 204(%rsp), %eax
+8B8C24CC000000                          movl 204(%rsp), %ecx
+8BBC24CC000000                          movl 204(%rsp), %edi
+BACD000000                              movl $205, %edx
+BACE000000                              movl $206, %edx
+BACF000000                              movl $207, %edx
+BA24080000                              movl $2084, %edx
+BAD0000000                              movl $208, %edx
+8BBC24D0000000                          movl 208(%rsp), %edi
+BA2C080000                              movl $2092, %edx
+BA2D080000                              movl $2093, %edx
+BA2F080000                              movl $2095, %edx
+BF00002000                              movl $2097152, %edi
+B814000000                              movl $20, %eax
+BF14000000                              movl $20, %edi
+BA14000000                              movl $20, %edx
+418B5C2414                              movl 20(%r12), %ebx
+418B542414                              movl 20(%r12), %edx
+418B4514                                movl 20(%r13), %eax
+418B6D14                                movl 20(%r13), %ebp
+418B4D14                                movl 20(%r13), %ecx
+418B5514                                movl 20(%r13), %edx
+458B7514                                movl 20(%r13), %r14d
+418B6E14                                movl 20(%r14), %ebp
+418B4E14                                movl 20(%r14), %ecx
+418B5614                                movl 20(%r14), %edx
+418B4F14                                movl 20(%r15), %ecx
+418B7014                                movl 20(%r8), %esi
+8B4014                                  movl 20(%rax), %eax
+8B5014                                  movl 20(%rax), %edx
+448B5014                                movl 20(%rax), %r10d
+448B7014                                movl 20(%rax), %r14d
+8B4314                                  movl 20(%rbx), %eax
+8B4B14                                  movl 20(%rbx), %ecx
+448B7314                                movl 20(%rbx), %r14d
+C70714000000                            movl $20, (%rdi)
+8B4F14                                  movl 20(%rdi), %ecx
+448B4714                                movl 20(%rdi), %r8d
+8B442414                                movl 20(%rsp), %eax
+8B7C2414                                movl 20(%rsp), %edi
+8B542414                                movl 20(%rsp), %edx
+8B742414                                movl 20(%rsp), %esi
+C744246C02000000                        movl $2, 108(%rsp)
+BAD2000000                              movl $210, %edx
+BA4C080000                              movl $2124, %edx
+B8D4000000                              movl $212, %eax
+8BBC24D4000000                          movl 212(%rsp), %edi
+BFD5000000                              movl $213, %edi
+BF00000080                              movl $-2147483648, %edi
+C784249400000002000000                  movl $2, 148(%rsp)
+C744871002000000                        movl $2, 16(%rdi,%rax,4)
+8BBC24D8000000                          movl 216(%rsp), %edi
+BA7C080000                              movl $2172, %edx
+BA7D080000                              movl $2173, %edx
+BA88080000                              movl $2184, %edx
+BA89080000                              movl $2185, %edx
+BA8A080000                              movl $2186, %edx
+BA8B080000                              movl $2187, %edx
+BA8C080000                              movl $2188, %edx
+BA93080000                              movl $2195, %edx
+BA96080000                              movl $2198, %edx
+BA97080000                              movl $2199, %edx
+BF15000000                              movl $21, %edi
+BA9D080000                              movl $2205, %edx
+BA9E080000                              movl $2206, %edx
+8BBC24DC000000                          movl 220(%rsp), %edi
+BAA3080000                              movl $2211, %edx
+BABB080000                              movl $2235, %edx
+BADF000000                              movl $223, %edx
+BAC0080000                              movl $2240, %edx
+8B8C24E0000000                          movl 224(%rsp), %ecx
+8BBC24E0000000                          movl 224(%rsp), %edi
+BAD0080000                              movl $2256, %edx
+BFD5080000                              movl $2261, %edi
+BAE5080000                              movl $2277, %edx
+BAE3000000                              movl $227, %edx
+BAED080000                              movl $2285, %edx
+B8E4000000                              movl $228, %eax
+8BBC24E4000000                          movl 228(%rsp), %edi
+BAFD080000                              movl $2301, %edx
+BAFE080000                              movl $2302, %edx
+BA08090000                              movl $2312, %edx
+BA0F090000                              movl $2319, %edx
+BA14090000                              movl $2324, %edx
+BAE8000000                              movl $232, %edx
+8BBC24E8000000                          movl 232(%rsp), %edi
+BA20090000                              movl $2336, %edx
+C784245C01000002000000                  movl $2, 348(%rsp)
+BAEA000000                              movl $234, %edx
+BA2E090000                              movl $2350, %edx
+BA30090000                              movl $2352, %edx
+BA33090000                              movl $2355, %edx
+BA3A090000                              movl $2362, %edx
+BA3B090000                              movl $2363, %edx
+BA5B090000                              movl $2395, %edx
+BA5C090000                              movl $2396, %edx
+BAF0000000                              movl $240, %edx
+8BBC24F0000000                          movl 240(%rsp), %edi
+C784249C00000018000000                  movl $24, 156(%rsp)
+B8F4000000                              movl $244, %eax
+8BBC24F4000000                          movl 244(%rsp), %edi
+BAF7000000                              movl $247, %edx
+BAF8000000                              movl $248, %edx
+8BBC24F8000000                          movl 248(%rsp), %edi
+BAF9000000                              movl $249, %edx
+B818000000                              movl $24, %eax
+BF18000000                              movl $24, %edi
+418B442418                              movl 24(%r12), %eax
+418B542418                              movl 24(%r12), %edx
+458B4C2418                              movl 24(%r12), %r9d
+418B4518                                movl 24(%r13), %eax
+418B5518                                movl 24(%r13), %edx
+458B6518                                movl 24(%r13), %r12d
+8B4018                                  movl 24(%rax), %eax
+8B4518                                  movl 24(%rbp), %eax
+8B5D18                                  movl 24(%rbp), %ebx
+8B5518                                  movl 24(%rbp), %edx
+8B7518                                  movl 24(%rbp), %esi
+448B6D18                                movl 24(%rbp), %r13d
+8B4B18                                  movl 24(%rbx), %ecx
+8B5318                                  movl 24(%rbx), %edx
+8B7318                                  movl 24(%rbx), %esi
+C7470402000000                          movl $2, 4(%rdi)
+8B7F18                                  movl 24(%rdi), %edi
+448B6718                                movl 24(%rdi), %r12d
+8B4218                                  movl 24(%rdx), %eax
+448B4A18                                movl 24(%rdx), %r9d
+8B5E18                                  movl 24(%rsi), %ebx
+8B442418                                movl 24(%rsp), %eax
+8B4C2418                                movl 24(%rsp), %ecx
+8B7C2418                                movl 24(%rsp), %edi
+8B542418                                movl 24(%rsp), %edx
+8B742418                                movl 24(%rsp), %esi
+BAFA000000                              movl $250, %edx
+8BBC24FC000000                          movl 252(%rsp), %edi
+BAFE000000                              movl $254, %edx
+B8FF000000                              movl $255, %eax
+BFFF000000                              movl $255, %edi
+41BAFF000000                            movl $255, %r10d
+41BCFF000000                            movl $255, %r12d
+41B9FF000000                            movl $255, %r9d
+BA00010000                              movl $256, %edx
+B819000000                              movl $25, %eax
+B804010000                              movl $260, %eax
+BA330A0000                              movl $2611, %edx
+BA390A0000                              movl $2617, %edx
+BA3D0A0000                              movl $2621, %edx
+BA550A0000                              movl $2645, %edx
+BA590A0000                              movl $2649, %edx
+BA09010000                              movl $265, %edx
+BA810A0000                              movl $2689, %edx
+BA9B0A0000                              movl $2715, %edx
+8BBC2410010000                          movl 272(%rsp), %edi
+BAAD0A0000                              movl $2733, %edx
+BA11010000                              movl $273, %edx
+BAB40A0000                              movl $2740, %edx
+BACD0A0000                              movl $2765, %edx
+BAD00A0000                              movl $2768, %edx
+B814010000                              movl $276, %eax
+BA14010000                              movl $276, %edx
+8BBC2414010000                          movl 276(%rsp), %edi
+8B942414010000                          movl 276(%rsp), %edx
+BAD30A0000                              movl $2771, %edx
+BAD60A0000                              movl $2774, %edx
+BAD80A0000                              movl $2776, %edx
+BAD90A0000                              movl $2777, %edx
+B816010000                              movl $278, %eax
+BAEF0A0000                              movl $2799, %edx
+B81B000000                              movl $27, %eax
+B818010000                              movl $280, %eax
+BA19010000                              movl $281, %edx
+B81A010000                              movl $282, %eax
+BA1B010000                              movl $283, %edx
+C743281C000000                          movl $28, 40(%rbx)
+B81C010000                              movl $284, %eax
+BA1C010000                              movl $284, %edx
+8B9C241C010000                          movl 284(%rsp), %ebx
+B81E010000                              movl $286, %eax
+BA1E010000                              movl $286, %edx
+BA3B0B0000                              movl $2875, %edx
+BA1F010000                              movl $287, %edx
+BA440B0000                              movl $2884, %edx
+BF20010000                              movl $288, %edi
+BA20010000                              movl $288, %edx
+C744245802000000                        movl $2, 88(%rsp)
+8B842420010000                          movl 288(%rsp), %eax
+8B8C2420010000                          movl 288(%rsp), %ecx
+B81C000000                              movl $28, %eax
+BF1C000000                              movl $28, %edi
+BA1C000000                              movl $28, %edx
+418B7C241C                              movl 28(%r12), %edi
+418B54241C                              movl 28(%r12), %edx
+418B4D1C                                movl 28(%r13), %ecx
+8B451C                                  movl 28(%rbp), %eax
+8B531C                                  movl 28(%rbx), %edx
+8B571C                                  movl 28(%rdi), %edx
+448B571C                                movl 28(%rdi), %r10d
+448B5F1C                                movl 28(%rdi), %r11d
+C7441A0802000000                        movl $2, 8(%rdx,%rbx)
+8B44241C                                movl 28(%rsp), %eax
+8B7C241C                                movl 28(%rsp), %edi
+8B74241C                                movl 28(%rsp), %esi
+BA630B0000                              movl $2915, %edx
+BA6C0B0000                              movl $2924, %edx
+BA710B0000                              movl $2929, %edx
+8BAC2424010000                          movl 292(%rsp), %ebp
+8BBC2424010000                          movl 292(%rsp), %edi
+8BB42424010000                          movl 292(%rsp), %esi
+BA26010000                              movl $294, %edx
+BF28010000                              movl $296, %edi
+8B842428010000                          movl 296(%rsp), %eax
+BA9B0B0000                              movl $2971, %edx
+BF1D000000                              movl $29, %edi
+B802000000                              movl $2, %eax
+B902000000                              movl $2, %ecx
+BF02000000                              movl $2, %edi
+BA02000000                              movl $2, %edx
+BE02000000                              movl $2, %esi
+41BE02000000                            movl $2, %r14d
+41C70486FEFFFFFF                        movl $-2, (%r14,%rax,4)
+C70002000000                            movl $2, (%rax)
+C70102000000                            movl $2, (%rcx)
+C70481FEFFFFFF                          movl $-2, (%rcx,%rax,4)
+C70202000000                            movl $2, (%rdx)
+BA2C010000                              movl $300, %edx
+BF30010000                              movl $304, %edi
+BA30010000                              movl $304, %edx
+BA31010000                              movl $305, %edx
+BA35010000                              movl $309, %edx
+BF1E000000                              movl $30, %edi
+C744244438010000                        movl $312, 68(%rsp)
+8B8C2438010000                          movl 312(%rsp), %ecx
+BA3A010000                              movl $314, %edx
+BA6D0C0000                              movl $3181, %edx
+BA6F0C0000                              movl $3183, %edx
+B81F000000                              movl $31, %eax
+BF1F000000                              movl $31, %edi
+BA830C0000                              movl $3203, %edx
+BA43010000                              movl $323, %edx
+C7401803000000                          movl $3, 24(%rax)
+BA45010000                              movl $325, %edx
+BA46010000                              movl $326, %edx
+B9FF7F0000                              movl $32767, %ecx
+41B9FF7F0000                            movl $32767, %r9d
+41BA0080FFFF                            movl $-32768, %r10d
+41B80080FFFF                            movl $-32768, %r8d
+BA47010000                              movl $327, %edx
+C744240820000000                        movl $32, 8(%rsp)
+BAE20C0000                              movl $3298, %edx
+BA49010000                              movl $329, %edx
+B820000000                              movl $32, %eax
+BF20000000                              movl $32, %edi
+BA20000000                              movl $32, %edx
+BE20000000                              movl $32, %esi
+418B4520                                movl 32(%r13), %eax
+41BE20000000                            movl $32, %r14d
+418B4620                                movl 32(%r14), %eax
+418B4720                                movl 32(%r15), %eax
+418B5F20                                movl 32(%r15), %ebx
+8B7820                                  movl 32(%rax), %edi
+8B5D20                                  movl 32(%rbp), %ebx
+8B7D20                                  movl 32(%rbp), %edi
+8B5520                                  movl 32(%rbp), %edx
+8B4720                                  movl 32(%rdi), %eax
+8B4F20                                  movl 32(%rdi), %ecx
+448B4720                                movl 32(%rdi), %r8d
+8B4220                                  movl 32(%rdx), %eax
+8B442420                                movl 32(%rsp), %eax
+8B4C2420                                movl 32(%rsp), %ecx
+8B7C2420                                movl 32(%rsp), %edi
+8B542420                                movl 32(%rsp), %edx
+8B742420                                movl 32(%rsp), %esi
+448B442420                              movl 32(%rsp), %r8d
+448B4C2420                              movl 32(%rsp), %r9d
+BAEE0C0000                              movl $3310, %edx
+BAF30C0000                              movl $3315, %edx
+BA0B0D0000                              movl $3339, %edx
+BA4D010000                              movl $333, %edx
+BA110D0000                              movl $3345, %edx
+BA1A0D0000                              movl $3354, %edx
+BA1D0D0000                              movl $3357, %edx
+BA250D0000                              movl $3365, %edx
+8B842450010000                          movl 336(%rsp), %eax
+8B842454010000                          movl 340(%rsp), %eax
+BA56010000                              movl $342, %edx
+BA59010000                              movl $345, %edx
+BA850D0000                              movl $3461, %edx
+BA880D0000                              movl $3464, %edx
+BA890D0000                              movl $3465, %edx
+BA8D0D0000                              movl $3469, %edx
+BA8F0D0000                              movl $3471, %edx
+BA910D0000                              movl $3473, %edx
+BA960D0000                              movl $3478, %edx
+BA5B010000                              movl $347, %edx
+448BA4245C010000                        movl 348(%rsp), %r12d
+BA5D010000                              movl $349, %edx
+BA5E010000                              movl $350, %edx
+BA5F010000                              movl $351, %edx
+BA60010000                              movl $352, %edx
+8B842468010000                          movl 360(%rsp), %eax
+BA3C0E0000                              movl $3644, %edx
+C744244003000000                        movl $3, 64(%rsp)
+8B84246C010000                          movl 364(%rsp), %eax
+B824000000                              movl $36, %eax
+BF24000000                              movl $36, %edi
+8B442424                                movl 36(%rsp), %eax
+8B7C2424                                movl 36(%rsp), %edi
+8B542424                                movl 36(%rsp), %edx
+8B742424                                movl 36(%rsp), %esi
+BA77010000                              movl $375, %edx
+8B8C2478010000                          movl 376(%rsp), %ecx
+8BBC2478010000                          movl 376(%rsp), %edi
+8B942478010000                          movl 376(%rsp), %edx
+8BB42478010000                          movl 376(%rsp), %esi
+B925000000                              movl $37, %ecx
+BF25000000                              movl $37, %edi
+8B8C2480010000                          movl 384(%rsp), %ecx
+8BBC2480010000                          movl 384(%rsp), %edi
+8B942480010000                          movl 384(%rsp), %edx
+8BB42480010000                          movl 384(%rsp), %esi
+BA84010000                              movl $388, %edx
+C744245803000000                        movl $3, 88(%rsp)
+8B8C2484010000                          movl 388(%rsp), %ecx
+8BBC2484010000                          movl 388(%rsp), %edi
+8B942484010000                          movl 388(%rsp), %edx
+8BB42484010000                          movl 388(%rsp), %esi
+BA86010000                              movl $390, %edx
+8B8C2488010000                          movl 392(%rsp), %ecx
+8BBC2488010000                          movl 392(%rsp), %edi
+8B942488010000                          movl 392(%rsp), %edx
+8BB42488010000                          movl 392(%rsp), %esi
+B803000000                              movl $3, %eax
+BD03000000                              movl $3, %ebp
+B903000000                              movl $3, %ecx
+BF03000000                              movl $3, %edi
+BA03000000                              movl $3, %edx
+BE03000000                              movl $3, %esi
+C70003000000                            movl $3, (%rax)
+C70103000000                            movl $3, (%rcx)
+C70203000000                            movl $3, (%rdx)
+8B8C2490010000                          movl 400(%rsp), %ecx
+8BBC2490010000                          movl 400(%rsp), %edi
+8B942490010000                          movl 400(%rsp), %edx
+8BB42490010000                          movl 400(%rsp), %esi
+BA92010000                              movl $402, %edx
+BA93010000                              movl $403, %edx
+BA94010000                              movl $404, %edx
+8B8C2494010000                          movl 404(%rsp), %ecx
+8BBC2494010000                          movl 404(%rsp), %edi
+8B942494010000                          movl 404(%rsp), %edx
+8BB42494010000                          movl 404(%rsp), %esi
+BA95010000                              movl $405, %edx
+BA96010000                              movl $406, %edx
+BA98010000                              movl $408, %edx
+8B8C2498010000                          movl 408(%rsp), %ecx
+8BBC2498010000                          movl 408(%rsp), %edi
+8B942498010000                          movl 408(%rsp), %edx
+8BB42498010000                          movl 408(%rsp), %esi
+B828000000                              movl $40, %eax
+BF28000000                              movl $40, %edi
+418B742428                              movl 40(%r12), %esi
+418B7D28                                movl 40(%r13), %edi
+418B7528                                movl 40(%r13), %esi
+8B4528                                  movl 40(%rbp), %eax
+8B442428                                movl 40(%rsp), %eax
+8B7C2428                                movl 40(%rsp), %edi
+8B542428                                movl 40(%rsp), %edx
+8B742428                                movl 40(%rsp), %esi
+C744246804000000                        movl $4, 104(%rsp)
+C7406C04000000                          movl $4, 108(%rax)
+C7436C04000000                          movl $4, 108(%rbx)
+C7838400000004000000                    movl $4, 132(%rbx)
+C784249C00000004000000                  movl $4, 156(%rsp)
+8B8C24A0010000                          movl 416(%rsp), %ecx
+8BBC24A0010000                          movl 416(%rsp), %edi
+8B9424A0010000                          movl 416(%rsp), %edx
+8BB424A0010000                          movl 416(%rsp), %esi
+C78424BC00000004000000                  movl $4, 188(%rsp)
+C744241404000000                        movl $4, 20(%rsp)
+8B8C24A4010000                          movl 420(%rsp), %ecx
+8BBC24A4010000                          movl 420(%rsp), %edi
+8B9424A4010000                          movl 420(%rsp), %edx
+8BB424A4010000                          movl 420(%rsp), %esi
+BAA8010000                              movl $424, %edx
+BAAC010000                              movl $428, %edx
+8BBC24AC010000                          movl 428(%rsp), %edi
+B8FFFFFFFF                              movl $4294967295, %eax
+BAB1010000                              movl $433, %edx
+8BBC24B4010000                          movl 436(%rsp), %edi
+BAB8010000                              movl $440, %edx
+BABA010000                              movl $442, %edx
+8BBC24BC010000                          movl 444(%rsp), %edi
+C7403004000000                          movl $4, 48(%rax)
+C7433004000000                          movl $4, 48(%rbx)
+B82C000000                              movl $44, %eax
+BF2C000000                              movl $44, %edi
+8B44242C                                movl 44(%rsp), %eax
+8B5C242C                                movl 44(%rsp), %ebx
+8B7C242C                                movl 44(%rsp), %edi
+8B54242C                                movl 44(%rsp), %edx
+8BBC24C4010000                          movl 452(%rsp), %edi
+C7403C04000000                          movl $4, 60(%rax)
+C7433C04000000                          movl $4, 60(%rbx)
+8BBC24CC010000                          movl 460(%rsp), %edi
+BACF010000                              movl $463, %edx
+8B9424D0010000                          movl 464(%rsp), %edx
+BAD1010000                              movl $465, %edx
+BAD2010000                              movl $466, %edx
+BAD3010000                              movl $467, %edx
+8BBC24D4010000                          movl 468(%rsp), %edi
+BAD5010000                              movl $469, %edx
+C7404804000000                          movl $4, 72(%rax)
+8B9424D8010000                          movl 472(%rsp), %edx
+BAD9010000                              movl $473, %edx
+BADC010000                              movl $476, %edx
+8BB424DC010000                          movl 476(%rsp), %esi
+BADD010000                              movl $477, %edx
+BADE010000                              movl $478, %edx
+BADF010000                              movl $479, %edx
+C7405004000000                          movl $4, 80(%rax)
+8B8C24E0010000                          movl 480(%rsp), %ecx
+BAE1010000                              movl $481, %edx
+BAE3010000                              movl $483, %edx
+C7405404000000                          movl $4, 84(%rax)
+C7435404000000                          movl $4, 84(%rbx)
+C744240430000000                        movl $48, 4(%rsp)
+8BB424E4010000                          movl 484(%rsp), %esi
+BAE7010000                              movl $487, %edx
+8B8C24E8010000                          movl 488(%rsp), %ecx
+B830000000                              movl $48, %eax
+BF30000000                              movl $48, %edi
+BA30000000                              movl $48, %edx
+418B5530                                movl 48(%r13), %edx
+418B7830                                movl 48(%r8), %edi
+C7400804000000                          movl $4, 8(%rax)
+8B5030                                  movl 48(%rax), %edx
+448B4030                                movl 48(%rax), %r8d
+448B4830                                movl 48(%rax), %r9d
+8B7D30                                  movl 48(%rbp), %edi
+8B442430                                movl 48(%rsp), %eax
+8B7C2430                                movl 48(%rsp), %edi
+8B542430                                movl 48(%rsp), %edx
+8B742430                                movl 48(%rsp), %esi
+8BBC24EC010000                          movl 492(%rsp), %edi
+8BB424EC010000                          movl 492(%rsp), %esi
+BA56130000                              movl $4950, %edx
+BA5D130000                              movl $4957, %edx
+BA5E130000                              movl $4958, %edx
+BAF0010000                              movl $496, %edx
+C7406004000000                          movl $4, 96(%rax)
+C7436004000000                          movl $4, 96(%rbx)
+BAF3010000                              movl $499, %edx
+B931000000                              movl $49, %ecx
+B804000000                              movl $4, %eax
+BD04000000                              movl $4, %ebp
+B904000000                              movl $4, %ecx
+BFFCFFFFFF                              movl $-4, %edi
+BF04000000                              movl $4, %edi
+8B142504000000                          movl 4, %edx
+BE04000000                              movl $4, %esi
+418B4504                                movl 4(%r13), %eax
+418B7D04                                movl 4(%r13), %edi
+41BE04000000                            movl $4, %r14d
+41B804000000                            movl $4, %r8d
+418B4004                                movl 4(%r8), %eax
+C70004000000                            movl $4, (%rax)
+8B4004                                  movl 4(%rax), %eax
+8B7804                                  movl 4(%rax), %edi
+8B7B04                                  movl 4(%rbx), %edi
+8B7F04                                  movl 4(%rdi), %edi
+8B7704                                  movl 4(%rdi), %esi
+8B4604                                  movl 4(%rsi), %eax
+8B5C2404                                movl 4(%rsp), %ebx
+8B4C2404                                movl 4(%rsp), %ecx
+8B7C2404                                movl 4(%rsp), %edi
+8B742404                                movl 4(%rsp), %esi
+C744241888130000                        movl $5000, 24(%rsp)
+8BBC24F4010000                          movl 500(%rsp), %edi
+8BB424F4010000                          movl 500(%rsp), %esi
+C7471032000000                          movl $50, 16(%rdi)
+BAF5010000                              movl $501, %edx
+BAF8010000                              movl $504, %edx
+8BBC24F8010000                          movl 504(%rsp), %edi
+8BB424F8010000                          movl 504(%rsp), %esi
+BAF9010000                              movl $505, %edx
+BAFB010000                              movl $507, %edx
+BAE0130000                              movl $5088, %edx
+BAE1130000                              movl $5089, %edx
+8B8C24FC010000                          movl 508(%rsp), %ecx
+8B9424FC010000                          movl 508(%rsp), %edx
+BAE4130000                              movl $5092, %edx
+BAE5130000                              movl $5093, %edx
+BAFD010000                              movl $509, %edx
+BAFF010000                              movl $511, %edx
+C7432800020000                          movl $512, 40(%rbx)
+BA00020000                              movl $512, %edx
+BA26140000                              movl $5158, %edx
+8B8C2404020000                          movl 516(%rsp), %ecx
+8B942404020000                          movl 516(%rsp), %edx
+BA3D140000                              movl $5181, %edx
+BA07020000                              movl $519, %edx
+BA50140000                              movl $5200, %edx
+BA52140000                              movl $5202, %edx
+8B8C2408020000                          movl 520(%rsp), %ecx
+8B942408020000                          movl 520(%rsp), %edx
+BA66140000                              movl $5222, %edx
+BA75140000                              movl $5237, %edx
+8B8C240C020000                          movl 524(%rsp), %ecx
+8BBC240C020000                          movl 524(%rsp), %edi
+8B94240C020000                          movl 524(%rsp), %edx
+8BB4240C020000                          movl 524(%rsp), %esi
+B834000000                              movl $52, %eax
+BF34000000                              movl $52, %edi
+418B448034                              movl 52(%r8,%rax,4), %eax
+8B7C2434                                movl 52(%rsp), %edi
+8B8C2414020000                          movl 532(%rsp), %ecx
+8BBC2414020000                          movl 532(%rsp), %edi
+8B942414020000                          movl 532(%rsp), %edx
+8BB42414020000                          movl 532(%rsp), %esi
+8B8C2418020000                          movl 536(%rsp), %ecx
+8BBC2418020000                          movl 536(%rsp), %edi
+8B942418020000                          movl 536(%rsp), %edx
+8BB42418020000                          movl 536(%rsp), %esi
+8BBC241C020000                          movl 540(%rsp), %edi
+BA1D020000                              movl $541, %edx
+BA20020000                              movl $544, %edx
+8BBC2420020000                          movl 544(%rsp), %edi
+8BB42420020000                          movl 544(%rsp), %esi
+C7403005000000                          movl $5, 48(%rax)
+C7433005000000                          movl $5, 48(%rbx)
+8BBC2424020000                          movl 548(%rsp), %edi
+8B942424020000                          movl 548(%rsp), %edx
+8BB42424020000                          movl 548(%rsp), %esi
+8B8C2428020000                          movl 552(%rsp), %ecx
+8BBC2428020000                          movl 552(%rsp), %edi
+8B94242C020000                          movl 556(%rsp), %edx
+BACF150000                              movl $5583, %edx
+BA30020000                              movl $560, %edx
+8B942430020000                          movl 560(%rsp), %edx
+BA37020000                              movl $567, %edx
+BA39020000                              movl $569, %edx
+B838000000                              movl $56, %eax
+BF38000000                              movl $56, %edi
+8B442438                                movl 56(%rsp), %eax
+8B7C2438                                movl 56(%rsp), %edi
+8B542438                                movl 56(%rsp), %edx
+8B742438                                movl 56(%rsp), %esi
+BA44160000                              movl $5700, %edx
+8B84243C020000                          movl 572(%rsp), %eax
+8B8C243C020000                          movl 572(%rsp), %ecx
+BA3E020000                              movl $574, %edx
+BA3F020000                              movl $575, %edx
+BACB160000                              movl $5835, %edx
+BA2A170000                              movl $5930, %edx
+B805000000                              movl $5, %eax
+BD05000000                              movl $5, %ebp
+BF05000000                              movl $5, %edi
+BA05000000                              movl $5, %edx
+BE05000000                              movl $5, %esi
+C70005000000                            movl $5, (%rax)
+C70105000000                            movl $5, (%rcx)
+C70205000000                            movl $5, (%rdx)
+BA5C020000                              movl $604, %edx
+C740383C000000                          movl $60, 56(%rax)
+C743383C000000                          movl $60, 56(%rbx)
+BA5F020000                              movl $607, %edx
+BA60020000                              movl $608, %edx
+B83C000000                              movl $60, %eax
+BF3C000000                              movl $60, %edi
+8B44243C                                movl 60(%rsp), %eax
+8B7C243C                                movl 60(%rsp), %edi
+8B54243C                                movl 60(%rsp), %edx
+8B74243C                                movl 60(%rsp), %esi
+BA64020000                              movl $612, %edx
+BA67020000                              movl $615, %edx
+BA68020000                              movl $616, %edx
+BA6F020000                              movl $623, %edx
+BA70020000                              movl $624, %edx
+BA6D180000                              movl $6253, %edx
+BA85180000                              movl $6277, %edx
+BA94180000                              movl $6292, %edx
+BA76020000                              movl $630, %edx
+BA77020000                              movl $631, %edx
+BF3F000000                              movl $63, %edi
+BA80020000                              movl $640, %edx
+BA81020000                              movl $641, %edx
+BA82020000                              movl $642, %edx
+BA84020000                              movl $644, %edx
+C7404440000000                          movl $64, 68(%rax)
+C7434440000000                          movl $64, 68(%rbx)
+C7404840000000                          movl $64, 72(%rax)
+C7434840000000                          movl $64, 72(%rbx)
+C744240840000000                        movl $64, 8(%rsp)
+B840000000                              movl $64, %eax
+BF40000000                              movl $64, %edi
+BA40000000                              movl $64, %edx
+41BE40000000                            movl $64, %r14d
+8B4C2440                                movl 64(%rsp), %ecx
+8B542440                                movl 64(%rsp), %edx
+BF00FF0000                              movl $65280, %edi
+BA8D020000                              movl $653, %edx
+B8FFFF0000                              movl $65535, %eax
+B9FFFF0000                              movl $65535, %ecx
+41BCFFFF0000                            movl $65535, %r12d
+41B9FFFF0000                            movl $65535, %r9d
+BA93020000                              movl $659, %edx
+BACC190000                              movl $6604, %edx
+BA98020000                              movl $664, %edx
+BA041A0000                              movl $6660, %edx
+428BAC84A0020000                        movl 672(%rsp,%r8,4), %ebp
+8BAC84A0020000                          movl 672(%rsp,%rax,4), %ebp
+8BAC94A0020000                          movl 672(%rsp,%rdx,4), %ebp
+BAA1020000                              movl $673, %edx
+BA591A0000                              movl $6745, %edx
+BAA2020000                              movl $674, %edx
+BA701A0000                              movl $6768, %edx
+BA821A0000                              movl $6786, %edx
+BA841A0000                              movl $6788, %edx
+BA971A0000                              movl $6807, %edx
+BAA8020000                              movl $680, %edx
+BAA9020000                              movl $681, %edx
+BAA61A0000                              movl $6822, %edx
+BAB91A0000                              movl $6841, %edx
+BAAC020000                              movl $684, %edx
+BAC91A0000                              movl $6857, %edx
+BACA1A0000                              movl $6858, %edx
+BACB1A0000                              movl $6859, %edx
+BACC1A0000                              movl $6860, %edx
+BAAE020000                              movl $686, %edx
+BAAF020000                              movl $687, %edx
+428B8484B0020000                        movl 688(%rsp,%r8,4), %eax
+8B8494B0020000                          movl 688(%rsp,%rdx,4), %eax
+BAF01A0000                              movl $6896, %edx
+BAF11A0000                              movl $6897, %edx
+BAF21A0000                              movl $6898, %edx
+BAF31A0000                              movl $6899, %edx
+B844000000                              movl $68, %eax
+BF44000000                              movl $68, %edi
+8B442444                                movl 68(%rsp), %eax
+8B6C2444                                movl 68(%rsp), %ebp
+8B5C2444                                movl 68(%rsp), %ebx
+8B7C2444                                movl 68(%rsp), %edi
+8B542444                                movl 68(%rsp), %edx
+BAB2020000                              movl $690, %edx
+BAB3020000                              movl $691, %edx
+C701FF0F0100                            movl $69631, (%rcx)
+C702FF0F0100                            movl $69631, (%rdx)
+C706FF0F0100                            movl $69631, (%rsi)
+B800100100                              movl $69632, %eax
+C70200100100                            movl $69632, (%rdx)
+C70600100100                            movl $69632, (%rsi)
+B801100100                              movl $69633, %eax
+BF01100100                              movl $69633, %edi
+BA01100100                              movl $69633, %edx
+BE01100100                              movl $69633, %esi
+C70101100100                            movl $69633, (%rcx)
+C70201100100                            movl $69633, (%rdx)
+C70601100100                            movl $69633, (%rsi)
+B802100100                              movl $69634, %eax
+BF02100100                              movl $69634, %edi
+BE02100100                              movl $69634, %esi
+C70102100100                            movl $69634, (%rcx)
+C70202100100                            movl $69634, (%rdx)
+C70602100100                            movl $69634, (%rsi)
+C744244003100100                        movl $69635, 64(%rsp)
+B803100100                              movl $69635, %eax
+BF03100100                              movl $69635, %edi
+BA03100100                              movl $69635, %edx
+BE03100100                              movl $69635, %esi
+C70103100100                            movl $69635, (%rcx)
+C70203100100                            movl $69635, (%rdx)
+C70603100100                            movl $69635, (%rsi)
+C744244004100100                        movl $69636, 64(%rsp)
+B804100100                              movl $69636, %eax
+BF04100100                              movl $69636, %edi
+BE04100100                              movl $69636, %esi
+C70104100100                            movl $69636, (%rcx)
+C70204100100                            movl $69636, (%rdx)
+C70604100100                            movl $69636, (%rsi)
+BF05100100                              movl $69637, %edi
+BE05100100                              movl $69637, %esi
+C70205100100                            movl $69637, (%rdx)
+C70605100100                            movl $69637, (%rsi)
+B806100100                              movl $69638, %eax
+BF06100100                              movl $69638, %edi
+BE06100100                              movl $69638, %esi
+C70106100100                            movl $69638, (%rcx)
+C70206100100                            movl $69638, (%rdx)
+C70606100100                            movl $69638, (%rsi)
+B807100100                              movl $69639, %eax
+BF07100100                              movl $69639, %edi
+BE07100100                              movl $69639, %esi
+C70107100100                            movl $69639, (%rcx)
+C70207100100                            movl $69639, (%rdx)
+C70607100100                            movl $69639, (%rsi)
+BAB8020000                              movl $696, %edx
+BF45000000                              movl $69, %edi
+B806000000                              movl $6, %eax
+BF06000000                              movl $6, %edi
+BA06000000                              movl $6, %edx
+BE06000000                              movl $6, %esi
+C70006000000                            movl $6, (%rax)
+C70106000000                            movl $6, (%rcx)
+C70206000000                            movl $6, (%rdx)
+BABE020000                              movl $702, %edx
+448B9424C0020000                        movl 704(%rsp), %r10d
+BAC4020000                              movl $708, %edx
+BAB41B0000                              movl $7092, %edx
+BA46000000                              movl $70, %edx
+BACA020000                              movl $714, %edx
+BA061C0000                              movl $7174, %edx
+BA1F1C0000                              movl $7199, %edx
+BAD1020000                              movl $721, %edx
+BAD2020000                              movl $722, %edx
+BAD3020000                              movl $723, %edx
+BA741C0000                              movl $7284, %edx
+B848000000                              movl $72, %eax
+BF48000000                              movl $72, %edi
+8B7C2448                                movl 72(%rsp), %edi
+BAE0020000                              movl $736, %edx
+C70000200100                            movl $73728, (%rax)
+C70001200100                            movl $73729, (%rax)
+C70002200100                            movl $73730, (%rax)
+C70003200100                            movl $73731, (%rax)
+C70004200100                            movl $73732, (%rax)
+C70005200100                            movl $73733, (%rax)
+C70006200100                            movl $73734, (%rax)
+C70007200100                            movl $73735, (%rax)
+BACF1C0000                              movl $7375, %edx
+BAE1020000                              movl $737, %edx
+BAE2020000                              movl $738, %edx
+BAF81C0000                              movl $7416, %edx
+BAE5020000                              movl $741, %edx
+BA051D0000                              movl $7429, %edx
+BA061D0000                              movl $7430, %edx
+BAE9020000                              movl $745, %edx
+BA341D0000                              movl $7476, %edx
+BA411D0000                              movl $7489, %edx
+C7433007000000                          movl $7, 48(%rbx)
+BAEF020000                              movl $751, %edx
+BAF9020000                              movl $761, %edx
+BAFC020000                              movl $764, %edx
+BAFF1D0000                              movl $7679, %edx
+B84C000000                              movl $76, %eax
+BF4C000000                              movl $76, %edi
+8B7C244C                                movl 76(%rsp), %edi
+8B54244C                                movl 76(%rsp), %edx
+BA161E0000                              movl $7702, %edx
+BA02030000                              movl $770, %edx
+BA2D1E0000                              movl $7725, %edx
+BA08030000                              movl $776, %edx
+BA5A1E0000                              movl $7770, %edx
+C744240C00300100                        movl $77824, 12(%rsp)
+C744242C00300100                        movl $77824, 44(%rsp)
+C744240800300100                        movl $77824, 8(%rsp)
+41BD00300100                            movl $77824, %r13d
+C70600300100                            movl $77824, (%rsi)
+C744240C01300100                        movl $77825, 12(%rsp)
+C744240801300100                        movl $77825, 8(%rsp)
+B801300100                              movl $77825, %eax
+BF01300100                              movl $77825, %edi
+BA01300100                              movl $77825, %edx
+BE01300100                              movl $77825, %esi
+BF03300100                              movl $77827, %edi
+BF04300100                              movl $77828, %edi
+41BD04300100                            movl $77828, %r13d
+C744240C05300100                        movl $77829, 12(%rsp)
+C744240805300100                        movl $77829, 8(%rsp)
+B805300100                              movl $77829, %eax
+BF05300100                              movl $77829, %edi
+BA05300100                              movl $77829, %edx
+BE05300100                              movl $77829, %esi
+BF07300100                              movl $77831, %edi
+BF08300100                              movl $77832, %edi
+41BD08300100                            movl $77832, %r13d
+B809300100                              movl $77833, %eax
+BE09300100                              movl $77833, %esi
+C744240C0D300100                        movl $77837, 12(%rsp)
+C74424080D300100                        movl $77837, 8(%rsp)
+B80D300100                              movl $77837, %eax
+BF0D300100                              movl $77837, %edi
+BA0D300100                              movl $77837, %edx
+BE0D300100                              movl $77837, %esi
+BF0F300100                              movl $77839, %edi
+BF10300100                              movl $77840, %edi
+41BD10300100                            movl $77840, %r13d
+C744240C11300100                        movl $77841, 12(%rsp)
+C744240811300100                        movl $77841, 8(%rsp)
+B811300100                              movl $77841, %eax
+BF11300100                              movl $77841, %edi
+BA11300100                              movl $77841, %edx
+BE11300100                              movl $77841, %esi
+BF13300100                              movl $77843, %edi
+BF14300100                              movl $77844, %edi
+41BD14300100                            movl $77844, %r13d
+C744240C15300100                        movl $77845, 12(%rsp)
+C744240815300100                        movl $77845, 8(%rsp)
+B815300100                              movl $77845, %eax
+BF15300100                              movl $77845, %edi
+BA15300100                              movl $77845, %edx
+BE15300100                              movl $77845, %esi
+BF17300100                              movl $77847, %edi
+41BD18300100                            movl $77848, %r13d
+B819300100                              movl $77849, %eax
+BF19300100                              movl $77849, %edi
+BE19300100                              movl $77849, %esi
+C74424141B300100                        movl $77851, 20(%rsp)
+C744241C1B300100                        movl $77851, 28(%rsp)
+BF1B300100                              movl $77851, %edi
+C74424141C300100                        movl $77852, 20(%rsp)
+B91C300100                              movl $77852, %ecx
+BF1C300100                              movl $77852, %edi
+BA1C300100                              movl $77852, %edx
+B81D300100                              movl $77853, %eax
+BE1D300100                              movl $77853, %esi
+C74424141F300100                        movl $77855, 20(%rsp)
+C744241C1F300100                        movl $77855, 28(%rsp)
+BF1F300100                              movl $77855, %edi
+C744241420300100                        movl $77856, 20(%rsp)
+B920300100                              movl $77856, %ecx
+BF20300100                              movl $77856, %edi
+BA20300100                              movl $77856, %edx
+B821300100                              movl $77857, %eax
+BE21300100                              movl $77857, %esi
+C744241423300100                        movl $77859, 20(%rsp)
+C744241C23300100                        movl $77859, 28(%rsp)
+BF23300100                              movl $77859, %edi
+C744241424300100                        movl $77860, 20(%rsp)
+B825300100                              movl $77861, %eax
+BF25300100                              movl $77861, %edi
+BE25300100                              movl $77861, %esi
+BF27300100                              movl $77863, %edi
+B829300100                              movl $77865, %eax
+BE29300100                              movl $77865, %esi
+BF2B300100                              movl $77867, %edi
+B82D300100                              movl $77869, %eax
+BF2D300100                              movl $77869, %edi
+BE2D300100                              movl $77869, %esi
+BF2F300100                              movl $77871, %edi
+BF30300100                              movl $77872, %edi
+BF36300100                              movl $77878, %edi
+BF37300100                              movl $77879, %edi
+BF38300100                              movl $77880, %edi
+BF39300100                              movl $77881, %edi
+BF3B300100                              movl $77883, %edi
+BF3E300100                              movl $77886, %edi
+BF40300100                              movl $77888, %edi
+BF41300100                              movl $77889, %edi
+BF42300100                              movl $77890, %edi
+BF43300100                              movl $77891, %edi
+BF44300100                              movl $77892, %edi
+BF45300100                              movl $77893, %edi
+BF46300100                              movl $77894, %edi
+BF47300100                              movl $77895, %edi
+BF48300100                              movl $77896, %edi
+BF49300100                              movl $77897, %edi
+BF4B300100                              movl $77899, %edi
+BA0A030000                              movl $778, %edx
+BF4C300100                              movl $77900, %edi
+C74424044D300100                        movl $77901, 4(%rsp)
+BF4D300100                              movl $77901, %edi
+BF4E300100                              movl $77902, %edi
+BF4F300100                              movl $77903, %edi
+C744240450300100                        movl $77904, 4(%rsp)
+BF50300100                              movl $77904, %edi
+BF51300100                              movl $77905, %edi
+BF52300100                              movl $77906, %edi
+BF53300100                              movl $77907, %edi
+BD59300100                              movl $77913, %ebp
+B959300100                              movl $77913, %ecx
+BE59300100                              movl $77913, %esi
+BD5A300100                              movl $77914, %ebp
+B95A300100                              movl $77914, %ecx
+BE5A300100                              movl $77914, %esi
+BD5B300100                              movl $77915, %ebp
+B95B300100                              movl $77915, %ecx
+BE5B300100                              movl $77915, %esi
+BD5C300100                              movl $77916, %ebp
+B95C300100                              movl $77916, %ecx
+BE5C300100                              movl $77916, %esi
+BF5D300100                              movl $77917, %edi
+BF5E300100                              movl $77918, %edi
+BF5F300100                              movl $77919, %edi
+BF60300100                              movl $77920, %edi
+BF61300100                              movl $77921, %edi
+BF62300100                              movl $77922, %edi
+BF63300100                              movl $77923, %edi
+BF64300100                              movl $77924, %edi
+BF65300100                              movl $77925, %edi
+BF66300100                              movl $77926, %edi
+BF67300100                              movl $77927, %edi
+BF68300100                              movl $77928, %edi
+BF69300100                              movl $77929, %edi
+BF6A300100                              movl $77930, %edi
+BF6B300100                              movl $77931, %edi
+BF6C300100                              movl $77932, %edi
+BF6D300100                              movl $77933, %edi
+BF6E300100                              movl $77934, %edi
+BF6F300100                              movl $77935, %edi
+BF71300100                              movl $77937, %edi
+BF72300100                              movl $77938, %edi
+BF73300100                              movl $77939, %edi
+BF74300100                              movl $77940, %edi
+BF75300100                              movl $77941, %edi
+BF76300100                              movl $77942, %edi
+BF77300100                              movl $77943, %edi
+BF79300100                              movl $77945, %edi
+41BD7D300100                            movl $77949, %r13d
+41BD7E300100                            movl $77950, %r13d
+41BD7F300100                            movl $77951, %r13d
+41BD80300100                            movl $77952, %r13d
+41BD81300100                            movl $77953, %r13d
+41BD82300100                            movl $77954, %r13d
+41BD83300100                            movl $77955, %r13d
+41BD84300100                            movl $77956, %r13d
+41BD85300100                            movl $77957, %r13d
+41BD86300100                            movl $77958, %r13d
+41BD87300100                            movl $77959, %r13d
+41BD88300100                            movl $77960, %r13d
+41BD89300100                            movl $77961, %r13d
+41BD8A300100                            movl $77962, %r13d
+41BD8B300100                            movl $77963, %r13d
+41BD8C300100                            movl $77964, %r13d
+41BD8D300100                            movl $77965, %r13d
+41BD8E300100                            movl $77966, %r13d
+41BD8F300100                            movl $77967, %r13d
+41BD90300100                            movl $77968, %r13d
+41BD91300100                            movl $77969, %r13d
+41BD92300100                            movl $77970, %r13d
+41BD93300100                            movl $77971, %r13d
+41BD94300100                            movl $77972, %r13d
+41BD95300100                            movl $77973, %r13d
+41BD96300100                            movl $77974, %r13d
+41BD97300100                            movl $77975, %r13d
+41BD98300100                            movl $77976, %r13d
+41BD99300100                            movl $77977, %r13d
+B99A300100                              movl $77978, %ecx
+BA9A300100                              movl $77978, %edx
+B99B300100                              movl $77979, %ecx
+BA9B300100                              movl $77979, %edx
+B99C300100                              movl $77980, %ecx
+BA9C300100                              movl $77980, %edx
+B99D300100                              movl $77981, %ecx
+BA9D300100                              movl $77981, %edx
+B99E300100                              movl $77982, %ecx
+BA9E300100                              movl $77982, %edx
+B99F300100                              movl $77983, %ecx
+BA9F300100                              movl $77983, %edx
+41BDA0300100                            movl $77984, %r13d
+41BDA1300100                            movl $77985, %r13d
+41BDA2300100                            movl $77986, %r13d
+41BDA3300100                            movl $77987, %r13d
+41BDA4300100                            movl $77988, %r13d
+41BDA5300100                            movl $77989, %r13d
+41BDA6300100                            movl $77990, %r13d
+41BDA7300100                            movl $77991, %r13d
+41BDA8300100                            movl $77992, %r13d
+B9A9300100                              movl $77993, %ecx
+B9AA300100                              movl $77994, %ecx
+B9AB300100                              movl $77995, %ecx
+B9AC300100                              movl $77996, %ecx
+B9AD300100                              movl $77997, %ecx
+B9AE300100                              movl $77998, %ecx
+C706AF300100                            movl $77999, (%rsi)
+BA0B030000                              movl $779, %edx
+C706B0300100                            movl $78000, (%rsi)
+C706B1300100                            movl $78001, (%rsi)
+C706B2300100                            movl $78002, (%rsi)
+B9B3300100                              movl $78003, %ecx
+B9B4300100                              movl $78004, %ecx
+B9B5300100                              movl $78005, %ecx
+B9B6300100                              movl $78006, %ecx
+B9B7300100                              movl $78007, %ecx
+B9B8300100                              movl $78008, %ecx
+B9B9300100                              movl $78009, %ecx
+B9BA300100                              movl $78010, %ecx
+B9BB300100                              movl $78011, %ecx
+C706BC300100                            movl $78012, (%rsi)
+C706BD300100                            movl $78013, (%rsi)
+C706BE300100                            movl $78014, (%rsi)
+C706BF300100                            movl $78015, (%rsi)
+B9C0300100                              movl $78016, %ecx
+B9C1300100                              movl $78017, %ecx
+B9C2300100                              movl $78018, %ecx
+B9C3300100                              movl $78019, %ecx
+B9C4300100                              movl $78020, %ecx
+B9C5300100                              movl $78021, %ecx
+B9C6300100                              movl $78022, %ecx
+B9C7300100                              movl $78023, %ecx
+B9C8300100                              movl $78024, %ecx
+C706C9300100                            movl $78025, (%rsi)
+C706CA300100                            movl $78026, (%rsi)
+C706CB300100                            movl $78027, (%rsi)
+C706CC300100                            movl $78028, (%rsi)
+B9CE300100                              movl $78030, %ecx
+B9D0300100                              movl $78032, %ecx
+B9D1300100                              movl $78033, %ecx
+B9D2300100                              movl $78034, %ecx
+B9D3300100                              movl $78035, %ecx
+B9D4300100                              movl $78036, %ecx
+B9D5300100                              movl $78037, %ecx
+C706D6300100                            movl $78038, (%rsi)
+C706D7300100                            movl $78039, (%rsi)
+C706D8300100                            movl $78040, (%rsi)
+C706D9300100                            movl $78041, (%rsi)
+BFDB300100                              movl $78043, %edi
+BFDD300100                              movl $78045, %edi
+BFDE300100                              movl $78046, %edi
+BFDF300100                              movl $78047, %edi
+BFE0300100                              movl $78048, %edi
+BFE1300100                              movl $78049, %edi
+BFE2300100                              movl $78050, %edi
+BFE4300100                              movl $78052, %edi
+BFE5300100                              movl $78053, %edi
+B9E6300100                              movl $78054, %ecx
+B9E7300100                              movl $78055, %ecx
+B9E8300100                              movl $78056, %ecx
+BFE8300100                              movl $78056, %edi
+B9ED300100                              movl $78061, %ecx
+B9EE300100                              movl $78062, %ecx
+B9EF300100                              movl $78063, %ecx
+B9F0300100                              movl $78064, %ecx
+B9F1300100                              movl $78065, %ecx
+B9F2300100                              movl $78066, %ecx
+B9F3300100                              movl $78067, %ecx
+B9F4300100                              movl $78068, %ecx
+B9F5300100                              movl $78069, %ecx
+B9F6300100                              movl $78070, %ecx
+B9F7300100                              movl $78071, %ecx
+B9F8300100                              movl $78072, %ecx
+B9F9300100                              movl $78073, %ecx
+B9FA300100                              movl $78074, %ecx
+B9FB300100                              movl $78075, %ecx
+B9FC300100                              movl $78076, %ecx
+B9FD300100                              movl $78077, %ecx
+B9FE300100                              movl $78078, %ecx
+B9FF300100                              movl $78079, %ecx
+B900310100                              movl $78080, %ecx
+B901310100                              movl $78081, %ecx
+B902310100                              movl $78082, %ecx
+B903310100                              movl $78083, %ecx
+B904310100                              movl $78084, %ecx
+B905310100                              movl $78085, %ecx
+B906310100                              movl $78086, %ecx
+B907310100                              movl $78087, %ecx
+B908310100                              movl $78088, %ecx
+B909310100                              movl $78089, %ecx
+B90A310100                              movl $78090, %ecx
+B90B310100                              movl $78091, %ecx
+B90C310100                              movl $78092, %ecx
+BA0C310100                              movl $78092, %edx
+B90D310100                              movl $78093, %ecx
+BA0D310100                              movl $78093, %edx
+B90E310100                              movl $78094, %ecx
+BA0E310100                              movl $78094, %edx
+B90F310100                              movl $78095, %ecx
+BA0F310100                              movl $78095, %edx
+B910310100                              movl $78096, %ecx
+BA10310100                              movl $78096, %edx
+B911310100                              movl $78097, %ecx
+BA11310100                              movl $78097, %edx
+B912310100                              movl $78098, %ecx
+BA12310100                              movl $78098, %edx
+B913310100                              movl $78099, %ecx
+BA13310100                              movl $78099, %edx
+BA0C030000                              movl $780, %edx
+B914310100                              movl $78100, %ecx
+B915310100                              movl $78101, %ecx
+B916310100                              movl $78102, %ecx
+B917310100                              movl $78103, %ecx
+B918310100                              movl $78104, %ecx
+B919310100                              movl $78105, %ecx
+B91A310100                              movl $78106, %ecx
+B91B310100                              movl $78107, %ecx
+B91C310100                              movl $78108, %ecx
+B91D310100                              movl $78109, %ecx
+B91E310100                              movl $78110, %ecx
+BA881E0000                              movl $7816, %edx
+BA0D030000                              movl $781, %edx
+BA0E030000                              movl $782, %edx
+BA14030000                              movl $788, %edx
+BADE1E0000                              movl $7902, %edx
+BAE81E0000                              movl $7912, %edx
+BAF21E0000                              movl $7922, %edx
+BA1A030000                              movl $794, %edx
+BA221F0000                              movl $7970, %edx
+BA2A1F0000                              movl $7978, %edx
+BA1D030000                              movl $797, %edx
+BA321F0000                              movl $7986, %edx
+BA3A1F0000                              movl $7994, %edx
+B807000000                              movl $7, %eax
+BF07000000                              movl $7, %edi
+BE07000000                              movl $7, %esi
+C70007000000                            movl $7, (%rax)
+BA431F0000                              movl $8003, %edx
+BA20030000                              movl $800, %edx
+8BBC2420030000                          movl 800(%rsp), %edi
+BA21030000                              movl $801, %edx
+BA831F0000                              movl $8067, %edx
+BA8D1F0000                              movl $8077, %edx
+BA8E1F0000                              movl $8078, %edx
+B850000000                              movl $80, %eax
+BF50000000                              movl $80, %edi
+8B7C2450                                movl 80(%rsp), %edi
+8B742450                                movl 80(%rsp), %esi
+BAAA1F0000                              movl $8106, %edx
+C7431008000000                          movl $8, 16(%rbx)
+BAF41F0000                              movl $8180, %edx
+C7402000400100                          movl $81920, 32(%rax)
+C7432000400100                          movl $81920, 32(%rbx)
+BF00400100                              movl $81920, %edi
+BE00400100                              movl $81920, %esi
+BF01400100                              movl $81921, %edi
+C7402002400100                          movl $81922, 32(%rax)
+BF02400100                              movl $81922, %edi
+C7402003400100                          movl $81923, 32(%rax)
+BF03400100                              movl $81923, %edi
+BF04400100                              movl $81924, %edi
+BE06400100                              movl $81926, %esi
+BF07400100                              movl $81927, %edi
+BE08400100                              movl $81928, %esi
+BA0D200000                              movl $8205, %edx
+BA36200000                              movl $8246, %edx
+C7401808000000                          movl $8, 24(%rax)
+BF00004131                              movl $826343424, %edi
+41BE00004131                            movl $826343424, %r14d
+C744241C08000000                        movl $8, 28(%rsp)
+BA7D200000                              movl $8317, %edx
+BA44030000                              movl $836, %edx
+BA45030000                              movl $837, %edx
+BA46030000                              movl $838, %edx
+BA47030000                              movl $839, %edx
+BA49030000                              movl $841, %edx
+BA4A030000                              movl $842, %edx
+B854000000                              movl $84, %eax
+8B7C2454                                movl 84(%rsp), %edi
+BA75210000                              movl $8565, %edx
+BA98210000                              movl $8600, %edx
+C7401C00500100                          movl $86016, 28(%rax)
+C7436401500100                          movl $86017, 100(%rbx)
+C7437001500100                          movl $86017, 112(%rbx)
+C7437C01500100                          movl $86017, 124(%rbx)
+C7401C01500100                          movl $86017, 28(%rax)
+C7433401500100                          movl $86017, 52(%rbx)
+C7434001500100                          movl $86017, 64(%rbx)
+C7434C01500100                          movl $86017, 76(%rbx)
+C7435801500100                          movl $86017, 88(%rbx)
+C7406402500100                          movl $86018, 100(%rax)
+C7436402500100                          movl $86018, 100(%rbx)
+C7401C02500100                          movl $86018, 28(%rax)
+C7403402500100                          movl $86018, 52(%rax)
+C7433402500100                          movl $86018, 52(%rbx)
+C7404002500100                          movl $86018, 64(%rax)
+C7434002500100                          movl $86018, 64(%rbx)
+C7404C02500100                          movl $86018, 76(%rax)
+C7434C02500100                          movl $86018, 76(%rbx)
+C7405802500100                          movl $86018, 88(%rax)
+C7435802500100                          movl $86018, 88(%rbx)
+C7403403500100                          movl $86019, 52(%rax)
+BAB5210000                              movl $8629, %edx
+BAD0210000                              movl $8656, %edx
+BA16220000                              movl $8726, %edx
+C7434808000000                          movl $8, 72(%rbx)
+BA43220000                              movl $8771, %edx
+BA4D220000                              movl $8781, %edx
+BA5E220000                              movl $8798, %edx
+BA6B220000                              movl $8811, %edx
+C7405408000000                          movl $8, 84(%rax)
+C7435408000000                          movl $8, 84(%rbx)
+C744244458000000                        movl $88, 68(%rsp)
+BA77030000                              movl $887, %edx
+B858000000                              movl $88, %eax
+C7430808000000                          movl $8, 8(%rbx)
+8B7C2458                                movl 88(%rsp), %edi
+BAD8220000                              movl $8920, %edx
+BA7C030000                              movl $892, %edx
+C7405C08000000                          movl $8, 92(%rax)
+BAEF220000                              movl $8943, %edx
+BA7E030000                              movl $894, %edx
+BA7F030000                              movl $895, %edx
+BA02230000                              movl $8962, %edx
+BA80030000                              movl $896, %edx
+BA81030000                              movl $897, %edx
+B808000000                              movl $8, %eax
+BD08000000                              movl $8, %ebp
+BF08000000                              movl $8, %edi
+BA08000000                              movl $8, %edx
+BE08000000                              movl $8, %esi
+418B7C2408                              movl 8(%r12), %edi
+418B542408                              movl 8(%r12), %edx
+418B7508                                movl 8(%r13), %esi
+418B7E08                                movl 8(%r14), %edi
+41B808000000                            movl $8, %r8d
+C70008000000                            movl $8, (%rax)
+8B4008                                  movl 8(%rax), %eax
+8B7808                                  movl 8(%rax), %edi
+8B5008                                  movl 8(%rax), %edx
+448B5008                                movl 8(%rax), %r10d
+448B5808                                movl 8(%rax), %r11d
+8B7C0808                                movl 8(%rax,%rcx), %edi
+8B4508                                  movl 8(%rbp), %eax
+8B4D08                                  movl 8(%rbp), %ecx
+8B7D08                                  movl 8(%rbp), %edi
+8B7508                                  movl 8(%rbp), %esi
+8B4308                                  movl 8(%rbx), %eax
+8B7B08                                  movl 8(%rbx), %edi
+8B5308                                  movl 8(%rbx), %edx
+8B7308                                  movl 8(%rbx), %esi
+8B4C8308                                movl 8(%rbx,%rax,4), %ecx
+8B4708                                  movl 8(%rdi), %eax
+8B7F08                                  movl 8(%rdi), %edi
+8B5708                                  movl 8(%rdi), %edx
+8B7708                                  movl 8(%rdi), %esi
+8B4208                                  movl 8(%rdx), %eax
+8B7A08                                  movl 8(%rdx), %edi
+8B5208                                  movl 8(%rdx), %edx
+8B4608                                  movl 8(%rsi), %eax
+8B7E08                                  movl 8(%rsi), %edi
+8B440E08                                movl 8(%rsi,%rcx), %eax
+C7042408000000                          movl $8, (%rsp)
+8B442408                                movl 8(%rsp), %eax
+8B4C2408                                movl 8(%rsp), %ecx
+8B7C2408                                movl 8(%rsp), %edi
+8B542408                                movl 8(%rsp), %edx
+8B742408                                movl 8(%rsp), %esi
+448B442408                              movl 8(%rsp), %r8d
+BA89030000                              movl $905, %edx
+BA80230000                              movl $9088, %edx
+BAA0230000                              movl $9120, %edx
+BAB6230000                              movl $9142, %edx
+BACD230000                              movl $9165, %edx
+BAE4230000                              movl $9188, %edx
+C7401809000000                          movl $9, 24(%rax)
+C7421809000000                          movl $9, 24(%rdx)
+BA29240000                              movl $9257, %edx
+BA37240000                              movl $9271, %edx
+B85C000000                              movl $92, %eax
+BA5C000000                              movl $92, %edx
+8B7C245C                                movl 92(%rsp), %edi
+8B54245C                                movl 92(%rsp), %edx
+8B74245C                                movl 92(%rsp), %esi
+BA5D000000                              movl $93, %edx
+BE00700100                              movl $94208, %esi
+BE01700100                              movl $94209, %esi
+BE02700100                              movl $94210, %esi
+BA1C250000                              movl $9500, %edx
+BABD030000                              movl $957, %edx
+B860000000                              movl $96, %eax
+8B7C2460                                movl 96(%rsp), %edi
+8B742460                                movl 96(%rsp), %esi
+BAD2030000                              movl $978, %edx
+BAD3030000                              movl $979, %edx
+BA6C260000                              movl $9836, %edx
+BA77260000                              movl $9847, %edx
+BA86260000                              movl $9862, %edx
+BADC030000                              movl $988, %edx
+B862000000                              movl $98, %eax
+BAEB260000                              movl $9963, %edx
+BAF6260000                              movl $9974, %edx
+BA05270000                              movl $9989, %edx
+B809000000                              movl $9, %eax
+41BD09000000                            movl $9, %r13d
+41BE09000000                            movl $9, %r14d
+C70009000000                            movl $9, (%rax)
+89442464                                movl %eax, 100(%rsp)
+89442468                                movl %eax, 104(%rsp)
+8944246C                                movl %eax, 108(%rsp)
+89442470                                movl %eax, 112(%rsp)
+89442474                                movl %eax, 116(%rsp)
+8944247C                                movl %eax, 124(%rsp)
+89842480000000                          movl %eax, 128(%rsp)
+89450C                                  movl %eax, 12(%rbp)
+89430C                                  movl %eax, 12(%rbx)
+89410C                                  movl %eax, 12(%rcx)
+89460C                                  movl %eax, 12(%rsi)
+8944240C                                movl %eax, 12(%rsp)
+89842484000000                          movl %eax, 132(%rsp)
+89842488000000                          movl %eax, 136(%rsp)
+8984248C000000                          movl %eax, 140(%rsp)
+89842490000000                          movl %eax, 144(%rsp)
+89842494000000                          movl %eax, 148(%rsp)
+89842498000000                          movl %eax, 152(%rsp)
+8984249C000000                          movl %eax, 156(%rsp)
+898424A0000000                          movl %eax, 160(%rsp)
+898424A4000000                          movl %eax, 164(%rsp)
+4189442410                              movl %eax, 16(%r12)
+894310                                  movl %eax, 16(%rbx)
+894610                                  movl %eax, 16(%rsi)
+89442410                                movl %eax, 16(%rsp)
+898424AC000000                          movl %eax, 172(%rsp)
+898424B0000000                          movl %eax, 176(%rsp)
+898424B8000000                          movl %eax, 184(%rsp)
+898424BC000000                          movl %eax, 188(%rsp)
+898424C0000000                          movl %eax, 192(%rsp)
+898424C4000000                          movl %eax, 196(%rsp)
+898424C8000000                          movl %eax, 200(%rsp)
+898424CC000000                          movl %eax, 204(%rsp)
+898424D0000000                          movl %eax, 208(%rsp)
+4189442414                              movl %eax, 20(%r12)
+41894514                                movl %eax, 20(%r13)
+894314                                  movl %eax, 20(%rbx)
+89442414                                movl %eax, 20(%rsp)
+898424D8000000                          movl %eax, 216(%rsp)
+898424E4000000                          movl %eax, 228(%rsp)
+898424E8000000                          movl %eax, 232(%rsp)
+898424F0000000                          movl %eax, 240(%rsp)
+898424F4000000                          movl %eax, 244(%rsp)
+898424F8000000                          movl %eax, 248(%rsp)
+41894518                                movl %eax, 24(%r13)
+894318                                  movl %eax, 24(%rbx)
+89442418                                movl %eax, 24(%rsp)
+898424FC000000                          movl %eax, 252(%rsp)
+89842410010000                          movl %eax, 272(%rsp)
+89842414010000                          movl %eax, 276(%rsp)
+8984241C010000                          movl %eax, 284(%rsp)
+89849420010000                          movl %eax, 288(%rsp,%rdx,4)
+89431C                                  movl %eax, 28(%rbx)
+8944241C                                movl %eax, 28(%rsp)
+4189442420                              movl %eax, 32(%r12)
+41894520                                movl %eax, 32(%r13)
+894320                                  movl %eax, 32(%rbx)
+894720                                  movl %eax, 32(%rdi)
+894220                                  movl %eax, 32(%rdx)
+894620                                  movl %eax, 32(%rsi)
+89442420                                movl %eax, 32(%rsp)
+894324                                  movl %eax, 36(%rbx)
+89442424                                movl %eax, 36(%rsp)
+894328                                  movl %eax, 40(%rbx)
+89442428                                movl %eax, 40(%rsp)
+89432C                                  movl %eax, 44(%rbx)
+8944242C                                movl %eax, 44(%rsp)
+894330                                  movl %eax, 48(%rbx)
+89442430                                movl %eax, 48(%rsp)
+894304                                  movl %eax, 4(%rbx)
+89442404                                movl %eax, 4(%rsp)
+894334                                  movl %eax, 52(%rbx)
+89442434                                movl %eax, 52(%rsp)
+8984241C020000                          movl %eax, 540(%rsp)
+89842420020000                          movl %eax, 544(%rsp)
+89842424020000                          movl %eax, 548(%rsp)
+89842428020000                          movl %eax, 552(%rsp)
+894338                                  movl %eax, 56(%rbx)
+89442438                                movl %eax, 56(%rsp)
+89433C                                  movl %eax, 60(%rbx)
+8944243C                                movl %eax, 60(%rsp)
+89442440                                movl %eax, 64(%rsp)
+89442444                                movl %eax, 68(%rsp)
+89442448                                movl %eax, 72(%rsp)
+8944244C                                movl %eax, 76(%rsp)
+89442450                                movl %eax, 80(%rsp)
+89442454                                movl %eax, 84(%rsp)
+89442458                                movl %eax, 88(%rsp)
+894308                                  movl %eax, 8(%rbx)
+894608                                  movl %eax, 8(%rsi)
+89442408                                movl %eax, 8(%rsp)
+8944245C                                movl %eax, 92(%rsp)
+89442460                                movl %eax, 96(%rsp)
+89C5                                    movl %eax, %ebp
+89C3                                    movl %eax, %ebx
+89C1                                    movl %eax, %ecx
+89C7                                    movl %eax, %edi
+89C2                                    movl %eax, %edx
+89C6                                    movl %eax, %esi
+890539300000                            movl %eax, 12345(%rip)
+4189C3                                  movl %eax, %r11d
+4189C4                                  movl %eax, %r12d
+4189C5                                  movl %eax, %r13d
+418906                                  movl %eax, (%r14)
+4189C6                                  movl %eax, %r14d
+418907                                  movl %eax, (%r15)
+4189C7                                  movl %eax, %r15d
+4189C0                                  movl %eax, %r8d
+41890490                                movl %eax, (%r8,%rdx,4)
+4189C1                                  movl %eax, %r9d
+894500                                  movl %eax, (%rbp)
+8903                                    movl %eax, (%rbx)
+8901                                    movl %eax, (%rcx)
+8904D1                                  movl %eax, (%rcx,%rdx,8)
+8902                                    movl %eax, (%rdx)
+8906                                    movl %eax, (%rsi)
+890416                                  movl %eax, (%rsi,%rdx)
+890424                                  movl %eax, (%rsp)
+42896C2A0C                              movl %ebp, 12(%rdx,%r13)
+896C1A0C                                movl %ebp, 12(%rdx,%rbx)
+896B18                                  movl %ebp, 24(%rbx)
+896804                                  movl %ebp, 4(%rax)
+89AC243C020000                          movl %ebp, 572(%rsp)
+896C2444                                movl %ebp, 68(%rsp)
+896B08                                  movl %ebp, 8(%rbx)
+896C2408                                movl %ebp, 8(%rsp)
+89E8                                    movl %ebp, %eax
+89EB                                    movl %ebp, %ebx
+89E9                                    movl %ebp, %ecx
+89EF                                    movl %ebp, %edi
+89EA                                    movl %ebp, %edx
+89EE                                    movl %ebp, %esi
+4189EC                                  movl %ebp, %r12d
+4189ED                                  movl %ebp, %r13d
+41892F                                  movl %ebp, (%r15)
+4189E8                                  movl %ebp, %r8d
+4189E9                                  movl %ebp, %r9d
+892CC1                                  movl %ebp, (%rcx,%rax,8)
+892C24                                  movl %ebp, (%rsp)
+895D0C                                  movl %ebx, 12(%rbp)
+895D10                                  movl %ebx, 16(%rbp)
+895C2418                                movl %ebx, 24(%rsp)
+895D20                                  movl %ebx, 32(%rbp)
+895C2428                                movl %ebx, 40(%rsp)
+895804                                  movl %ebx, 4(%rax)
+895C2434                                movl %ebx, 52(%rsp)
+895808                                  movl %ebx, 8(%rax)
+895C2408                                movl %ebx, 8(%rsp)
+89D8                                    movl %ebx, %eax
+89DD                                    movl %ebx, %ebp
+89D9                                    movl %ebx, %ecx
+89DF                                    movl %ebx, %edi
+89DA                                    movl %ebx, %edx
+89DE                                    movl %ebx, %esi
+4189DC                                  movl %ebx, %r12d
+4189DF                                  movl %ebx, %r15d
+4189D8                                  movl %ebx, %r8d
+8918                                    movl %ebx, (%rax)
+891C81                                  movl %ebx, (%rcx,%rax,4)
+894C240C                                movl %ecx, 12(%rsp)
+894C2410                                movl %ecx, 16(%rsp)
+898C24DC000000                          movl %ecx, 220(%rsp)
+894B18                                  movl %ecx, 24(%rbx)
+894C2418                                movl %ecx, 24(%rsp)
+894C241C                                movl %ecx, 28(%rsp)
+894F20                                  movl %ecx, 32(%rdi)
+894C2420                                movl %ecx, 32(%rsp)
+894C2424                                movl %ecx, 36(%rsp)
+89C8                                    movl %ecx, %eax
+89CD                                    movl %ecx, %ebp
+89CB                                    movl %ecx, %ebx
+89CF                                    movl %ecx, %edi
+89CA                                    movl %ecx, %edx
+89CE                                    movl %ecx, %esi
+4189CA                                  movl %ecx, %r10d
+4189CC                                  movl %ecx, %r12d
+4189CD                                  movl %ecx, %r13d
+4189CE                                  movl %ecx, %r14d
+4189CF                                  movl %ecx, %r15d
+890E                                    movl %ecx, (%rsi)
+897C2478                                movl %edi, 120(%rsp)
+897C860C                                movl %edi, 12(%rsi,%rax,4)
+897D10                                  movl %edi, 16(%rbp)
+897C2418                                movl %edi, 24(%rsp)
+897C2428                                movl %edi, 40(%rsp)
+897C242C                                movl %edi, 44(%rsp)
+897B30                                  movl %edi, 48(%rbx)
+897C2438                                movl %edi, 56(%rsp)
+897808                                  movl %edi, 8(%rax)
+89F8                                    movl %edi, %eax
+89FD                                    movl %edi, %ebp
+89FB                                    movl %edi, %ebx
+89F9                                    movl %edi, %ecx
+89FA                                    movl %edi, %edx
+89FE                                    movl %edi, %esi
+893DF7FFFFFF                            movl %edi, -9(%rip)
+4189FC                                  movl %edi, %r12d
+4189FD                                  movl %edi, %r13d
+4189FE                                  movl %edi, %r14d
+4189FF                                  movl %edi, %r15d
+4189F8                                  movl %edi, %r8d
+4189F9                                  movl %edi, %r9d
+893E                                    movl %edi, (%rsi)
+8954240C                                movl %edx, 12(%rsp)
+895010                                  movl %edx, 16(%rax)
+89548710                                movl %edx, 16(%rdi,%rax,4)
+895424F0                                movl %edx, -16(%rsp)
+89542410                                movl %edx, 16(%rsp)
+895424EC                                movl %edx, -20(%rsp)
+89542414                                movl %edx, 20(%rsp)
+899424D4000000                          movl %edx, 212(%rsp)
+899424E0000000                          movl %edx, 224(%rsp)
+895018                                  movl %edx, 24(%rax)
+89542418                                movl %edx, 24(%rsp)
+89948420010000                          movl %edx, 288(%rsp,%rax,4)
+89531C                                  movl %edx, 28(%rbx)
+89942424010000                          movl %edx, 292(%rsp)
+89948424010000                          movl %edx, 292(%rsp,%rax,4)
+895720                                  movl %edx, 32(%rdi)
+89542420                                movl %edx, 32(%rsp)
+8994245C010000                          movl %edx, 348(%rsp)
+89542428                                movl %edx, 40(%rsp)
+895604                                  movl %edx, 4(%rsi)
+895424F8                                movl %edx, -8(%rsp)
+89542408                                movl %edx, 8(%rsp)
+89D0                                    movl %edx, %eax
+89D5                                    movl %edx, %ebp
+89D3                                    movl %edx, %ebx
+89D1                                    movl %edx, %ecx
+89D7                                    movl %edx, %edi
+89D6                                    movl %edx, %esi
+4189D3                                  movl %edx, %r11d
+41891424                                movl %edx, (%r12)
+4189D4                                  movl %edx, %r12d
+4189D5                                  movl %edx, %r13d
+4189D6                                  movl %edx, %r14d
+4189D7                                  movl %edx, %r15d
+4189D0                                  movl %edx, %r8d
+4189D1                                  movl %edx, %r9d
+891487                                  movl %edx, (%rdi,%rax,4)
+89148F                                  movl %edx, (%rdi,%rcx,4)
+891424                                  movl %edx, (%rsp)
+89748710                                movl %esi, 16(%rdi,%rax,4)
+89742414                                movl %esi, 20(%rsp)
+897520                                  movl %esi, 32(%rbp)
+89742424                                movl %esi, 36(%rsp)
+89742404                                movl %esi, 4(%rsp)
+89742438                                movl %esi, 56(%rsp)
+89740B08                                movl %esi, 8(%rbx,%rcx)
+89F0                                    movl %esi, %eax
+89F5                                    movl %esi, %ebp
+89F3                                    movl %esi, %ebx
+89F1                                    movl %esi, %ecx
+89F7                                    movl %esi, %edi
+89F2                                    movl %esi, %edx
+893552ADF7FF                            movl %esi, -545454(%rip)
+4189F2                                  movl %esi, %r10d
+4189F3                                  movl %esi, %r11d
+4189F4                                  movl %esi, %r12d
+4189F5                                  movl %esi, %r13d
+4189F6                                  movl %esi, %r14d
+41893486                                movl %esi, (%r14,%rax,4)
+4189F7                                  movl %esi, %r15d
+4189F0                                  movl %esi, %r8d
+4189F1                                  movl %esi, %r9d
+893487                                  movl %esi, (%rdi,%rax,4)
+4489D0                                  movl %r10d, %eax
+4489D7                                  movl %r10d, %edi
+4489D2                                  movl %r10d, %edx
+4589D3                                  movl %r10d, %r11d
+4589D0                                  movl %r10d, %r8d
+4589D1                                  movl %r10d, %r9d
+44895C24FC                              movl %r11d, -4(%rsp)
+4489D8                                  movl %r11d, %eax
+4489D9                                  movl %r11d, %ecx
+4489DA                                  movl %r11d, %edx
+4589D8                                  movl %r11d, %r8d
+4489600C                                movl %r12d, 12(%rax)
+44896010                                movl %r12d, 16(%rax)
+44896018                                movl %r12d, 24(%rax)
+44896020                                movl %r12d, 32(%rax)
+44896008                                movl %r12d, 8(%rax)
+4489E0                                  movl %r12d, %eax
+4489E5                                  movl %r12d, %ebp
+4489E3                                  movl %r12d, %ebx
+4489E1                                  movl %r12d, %ecx
+4489E7                                  movl %r12d, %edi
+4489E2                                  movl %r12d, %edx
+4489E6                                  movl %r12d, %esi
+4589E0                                  movl %r12d, %r8d
+448920                                  movl %r12d, (%rax)
+44892482                                movl %r12d, (%rdx,%rax,4)
+418B0424                                movl (%r12), %eax
+418B1424                                movl (%r12), %edx
+4489680C                                movl %r13d, 12(%rax)
+44896810                                movl %r13d, 16(%rax)
+44896818                                movl %r13d, 24(%rax)
+4489AC241C020000                        movl %r13d, 540(%rsp)
+4489AC2420020000                        movl %r13d, 544(%rsp)
+4489AC2424020000                        movl %r13d, 548(%rsp)
+4489AC2428020000                        movl %r13d, 552(%rsp)
+4489E8                                  movl %r13d, %eax
+4489ED                                  movl %r13d, %ebp
+4489E9                                  movl %r13d, %ecx
+4489EF                                  movl %r13d, %edi
+4489EA                                  movl %r13d, %edx
+4489EE                                  movl %r13d, %esi
+4589E8                                  movl %r13d, %r8d
+418B4500                                movl (%r13), %eax
+418B5D00                                movl (%r13), %ebx
+418B448500                              movl (%r13,%rax,4), %eax
+44897314                                movl %r14d, 20(%rbx)
+4489F0                                  movl %r14d, %eax
+4489F5                                  movl %r14d, %ebp
+4489F1                                  movl %r14d, %ecx
+4489F7                                  movl %r14d, %edi
+4489F2                                  movl %r14d, %edx
+4489F6                                  movl %r14d, %esi
+4589F0                                  movl %r14d, %r8d
+4589F1                                  movl %r14d, %r9d
+448930                                  movl %r14d, (%rax)
+418B3E                                  movl (%r14), %edi
+418B1C96                                movl (%r14,%rdx,4), %ebx
+4489F8                                  movl %r15d, %eax
+4489FD                                  movl %r15d, %ebp
+4489FB                                  movl %r15d, %ebx
+4489F9                                  movl %r15d, %ecx
+4489FF                                  movl %r15d, %edi
+4489FA                                  movl %r15d, %edx
+4489FE                                  movl %r15d, %esi
+4589FD                                  movl %r15d, %r13d
+4589F8                                  movl %r15d, %r8d
+418B3F                                  movl (%r15), %edi
+4489442414                              movl %r8d, 20(%rsp)
+4489442418                              movl %r8d, 24(%rsp)
+4489431C                                movl %r8d, 28(%rbx)
+4489442428                              movl %r8d, 40(%rsp)
+44894604                                movl %r8d, 4(%rsi)
+44894608                                movl %r8d, 8(%rsi)
+4489442408                              movl %r8d, 8(%rsp)
+4489C0                                  movl %r8d, %eax
+4489C5                                  movl %r8d, %ebp
+4489C3                                  movl %r8d, %ebx
+4489C7                                  movl %r8d, %edi
+4489C2                                  movl %r8d, %edx
+4589C4                                  movl %r8d, %r12d
+4589C5                                  movl %r8d, %r13d
+4589C6                                  movl %r8d, %r14d
+418B00                                  movl (%r8), %eax
+44894C2414                              movl %r9d, 20(%rsp)
+44894C2428                              movl %r9d, 40(%rsp)
+44894C2430                              movl %r9d, 48(%rsp)
+4489C8                                  movl %r9d, %eax
+4489CD                                  movl %r9d, %ebp
+4489CB                                  movl %r9d, %ebx
+4489C9                                  movl %r9d, %ecx
+4489CF                                  movl %r9d, %edi
+4489CA                                  movl %r9d, %edx
+4489CE                                  movl %r9d, %esi
+4589CB                                  movl %r9d, %r11d
+4589CC                                  movl %r9d, %r12d
+4589CD                                  movl %r9d, %r13d
+4589CF                                  movl %r9d, %r15d
+8B00                                    movl (%rax), %eax
+8B08                                    movl (%rax), %ecx
+8B38                                    movl (%rax), %edi
+8B30                                    movl (%rax), %esi
+8B04C8                                  movl (%rax,%rcx,8), %eax
+8B0490                                  movl (%rax,%rdx,4), %eax
+8B1C90                                  movl (%rax,%rdx,4), %ebx
+8B3490                                  movl (%rax,%rdx,4), %esi
+8B2CD0                                  movl (%rax,%rdx,8), %ebp
+8B4500                                  movl (%rbp), %eax
+8B7D00                                  movl (%rbp), %edi
+8B5500                                  movl (%rbp), %edx
+8B7500                                  movl (%rbp), %esi
+8B7C9D00                                movl (%rbp,%rbx,4), %edi
+8B03                                    movl (%rbx), %eax
+8B2B                                    movl (%rbx), %ebp
+8B13                                    movl (%rbx), %edx
+8B33                                    movl (%rbx), %esi
+448B3C03                                movl (%rbx,%rax), %r15d
+8B3CAB                                  movl (%rbx,%rbp,4), %edi
+8B01                                    movl (%rcx), %eax
+8B39                                    movl (%rcx), %edi
+8B11                                    movl (%rcx), %edx
+8B0481                                  movl (%rcx,%rax,4), %eax
+8B1481                                  movl (%rcx,%rax,4), %edx
+8B3C19                                  movl (%rcx,%rbx), %edi
+8B14D1                                  movl (%rcx,%rdx,8), %edx
+8B34F1                                  movl (%rcx,%rsi,8), %esi
+8B07                                    movl (%rdi), %eax
+8B2F                                    movl (%rdi), %ebp
+8B3F                                    movl (%rdi), %edi
+8B37                                    movl (%rdi), %esi
+448B27                                  movl (%rdi), %r12d
+8B3C17                                  movl (%rdi,%rdx), %edi
+8B02                                    movl (%rdx), %eax
+8B0A                                    movl (%rdx), %ecx
+8B32                                    movl (%rdx), %esi
+8B0482                                  movl (%rdx,%rax,4), %eax
+8B2C82                                  movl (%rdx,%rax,4), %ebp
+8B3C82                                  movl (%rdx,%rax,4), %edi
+448B2402                                movl (%rdx,%rax), %r12d
+8B141A                                  movl (%rdx,%rbx), %edx
+8B3C3A                                  movl (%rdx,%rdi), %edi
+8B06                                    movl (%rsi), %eax
+8B16                                    movl (%rsi), %edx
+428B3C2E                                movl (%rsi,%r13), %edi
+8B3406                                  movl (%rsi,%rax), %esi
+8B049E                                  movl (%rsi,%rbx,4), %eax
+8B140E                                  movl (%rsi,%rcx), %edx
+8B0496                                  movl (%rsi,%rdx,4), %eax
+448B2C96                                movl (%rsi,%rdx,4), %r13d
+8B0C16                                  movl (%rsi,%rdx), %ecx
+8B3C24                                  movl (%rsp), %edi
+8B1424                                  movl (%rsp), %edx
+48C744247000000000                      movq $0, 112(%rsp)
+48C744247800000000                      movq $0, 120(%rsp)
+48C7401000000000                        movq $0, 16(%rax)
+48C744241000000000                      movq $0, 16(%rsp)
+48C78424C000000000000000                movq $0, 192(%rsp)
+48C7401800000000                        movq $0, 24(%rax)
+48C7431800000000                        movq $0, 24(%rbx)
+48C744241800000000                      movq $0, 24(%rsp)
+48C7402000000000                        movq $0, 32(%rax)
+48C7402800000000                        movq $0, 40(%rax)
+48C744242800000000                      movq $0, 40(%rsp)
+48C744243000000000                      movq $0, 48(%rsp)
+48C784243002000000000000                movq $0, 560(%rsp)
+48C744244000000000                      movq $0, 64(%rsp)
+48C7400800000000                        movq $0, 8(%rax)
+48C7430800000000                        movq $0, 8(%rbx)
+48C7420800000000                        movq $0, 8(%rdx)
+48C744240800000000                      movq $0, 8(%rsp)
+49C704C600000000                        movq $0, (%r14,%rax,8)
+48C70000000000                          movq $0, (%rax)
+48C704E800000000                        movq $0, (%rax,%rbp,8)
+48C704C800000000                        movq $0, (%rax,%rcx,8)
+48C744C50000000000                      movq $0, (%rbp,%rax,8)
+48C704F100000000                        movq $0, (%rcx,%rsi,8)
+48C704CE00000000                        movq $0, (%rsi,%rcx,8)
+4C8B642468                              movq 104(%rsp), %r12
+4C8B6C2468                              movq 104(%rsp), %r13
+4C8B742468                              movq 104(%rsp), %r14
+488B5C2468                              movq 104(%rsp), %rbx
+488B7C2468                              movq 104(%rsp), %rdi
+488B542468                              movq 104(%rsp), %rdx
+4C8B6C2470                              movq 112(%rsp), %r13
+4C8B742470                              movq 112(%rsp), %r14
+4C8B7C2470                              movq 112(%rsp), %r15
+488B442470                              movq 112(%rsp), %rax
+488B6C2470                              movq 112(%rsp), %rbp
+488B5C2470                              movq 112(%rsp), %rbx
+488B4C2470                              movq 112(%rsp), %rcx
+488B7C2470                              movq 112(%rsp), %rdi
+488B542470                              movq 112(%rsp), %rdx
+488B742470                              movq 112(%rsp), %rsi
+4C8B642478                              movq 120(%rsp), %r12
+4C8B742478                              movq 120(%rsp), %r14
+488B442478                              movq 120(%rsp), %rax
+488B4C2478                              movq 120(%rsp), %rcx
+488B742478                              movq 120(%rsp), %rsi
+4C8BAC2480000000                        movq 128(%rsp), %r13
+4C8BBC2480000000                        movq 128(%rsp), %r15
+488B842480000000                        movq 128(%rsp), %rax
+488B9C2480000000                        movq 128(%rsp), %rbx
+4C8BB42488000000                        movq 136(%rsp), %r14
+488B842488000000                        movq 136(%rsp), %rax
+488B9C2488000000                        movq 136(%rsp), %rbx
+488B942488000000                        movq 136(%rsp), %rdx
+4C8BBC2490000000                        movq 144(%rsp), %r15
+488BAC2490000000                        movq 144(%rsp), %rbp
+4C8BA42498000000                        movq 152(%rsp), %r12
+488B842498000000                        movq 152(%rsp), %rax
+488B9C2498000000                        movq 152(%rsp), %rbx
+488B8C2498000000                        movq 152(%rsp), %rcx
+488BB42498000000                        movq 152(%rsp), %rsi
+4C8BAC24A0000000                        movq 160(%rsp), %r13
+488B8424A0000000                        movq 160(%rsp), %rax
+488B9C24A0000000                        movq 160(%rsp), %rbx
+488B8C24A0000000                        movq 160(%rsp), %rcx
+488BBC24A0000000                        movq 160(%rsp), %rdi
+488B9424A0000000                        movq 160(%rsp), %rdx
+488BB424A0000000                        movq 160(%rsp), %rsi
+4C8BB424A8000000                        movq 168(%rsp), %r14
+4D8B442410                              movq 16(%r12), %r8
+498B442410                              movq 16(%r12), %rax
+498B7C2410                              movq 16(%r12), %rdi
+498B542410                              movq 16(%r12), %rdx
+498B742410                              movq 16(%r12), %rsi
+498B4510                                movq 16(%r13), %rax
+498B5D10                                movq 16(%r13), %rbx
+498B4D10                                movq 16(%r13), %rcx
+498B7D10                                movq 16(%r13), %rdi
+498B7510                                movq 16(%r13), %rsi
+498B4710                                movq 16(%r15), %rax
+498B5710                                movq 16(%r15), %rdx
+498B4010                                movq 16(%r8), %rax
+488B5810                                movq 16(%rax), %rbx
+488B7810                                movq 16(%rax), %rdi
+488B5010                                movq 16(%rax), %rdx
+488B4510                                movq 16(%rbp), %rax
+488B5D10                                movq 16(%rbp), %rbx
+488B7D10                                movq 16(%rbp), %rdi
+488B5510                                movq 16(%rbp), %rdx
+488B7510                                movq 16(%rbp), %rsi
+488B4310                                movq 16(%rbx), %rax
+488B5B10                                movq 16(%rbx), %rbx
+488B4B10                                movq 16(%rbx), %rcx
+488B7B10                                movq 16(%rbx), %rdi
+488B5310                                movq 16(%rbx), %rdx
+488B7310                                movq 16(%rbx), %rsi
+488B4110                                movq 16(%rcx), %rax
+488B4710                                movq 16(%rdi), %rax
+488B4F10                                movq 16(%rdi), %rcx
+488B7F10                                movq 16(%rdi), %rdi
+488B5710                                movq 16(%rdi), %rdx
+4C8B4210                                movq 16(%rdx), %r8
+488B4A10                                movq 16(%rdx), %rcx
+488B5210                                movq 16(%rdx), %rdx
+488B4610                                movq 16(%rsi), %rax
+488B7610                                movq 16(%rsi), %rsi
+4C8B642410                              movq 16(%rsp), %r12
+4C8B6C2410                              movq 16(%rsp), %r13
+488B442410                              movq 16(%rsp), %rax
+488B6C2410                              movq 16(%rsp), %rbp
+488B4C2410                              movq 16(%rsp), %rcx
+488B542410                              movq 16(%rsp), %rdx
+488B742410                              movq 16(%rsp), %rsi
+4C8BBC24B0000000                        movq 176(%rsp), %r15
+488B8424B0000000                        movq 176(%rsp), %rax
+488B8C24B0000000                        movq 176(%rsp), %rcx
+488BB424B0000000                        movq 176(%rsp), %rsi
+488B8C24C0000000                        movq 192(%rsp), %rcx
+488BBC24C0000000                        movq 192(%rsp), %rdi
+488B9424C0000000                        movq 192(%rsp), %rdx
+488BB424C0000000                        movq 192(%rsp), %rsi
+48C7C0FFFFFFFF                          movq $-1, %rax
+488BB424C8000000                        movq 200(%rsp), %rsi
+4C8B8424E0000000                        movq 224(%rsp), %r8
+4C8BA424E8000000                        movq 232(%rsp), %r12
+488B8C24F0000000                        movq 240(%rsp), %rcx
+498B442418                              movq 24(%r12), %rax
+498B542418                              movq 24(%r12), %rdx
+498B742418                              movq 24(%r12), %rsi
+498B7D18                                movq 24(%r13), %rdi
+498B7518                                movq 24(%r13), %rsi
+498B4E18                                movq 24(%r14), %rcx
+498B7618                                movq 24(%r14), %rsi
+4D8B6718                                movq 24(%r15), %r12
+498B7718                                movq 24(%r15), %rsi
+488B7818                                movq 24(%rax), %rdi
+488B7018                                movq 24(%rax), %rsi
+488B4518                                movq 24(%rbp), %rax
+488B5D18                                movq 24(%rbp), %rbx
+488B4D18                                movq 24(%rbp), %rcx
+488B7D18                                movq 24(%rbp), %rdi
+488B5518                                movq 24(%rbp), %rdx
+488B7518                                movq 24(%rbp), %rsi
+488B4318                                movq 24(%rbx), %rax
+488B5B18                                movq 24(%rbx), %rbx
+488B7B18                                movq 24(%rbx), %rdi
+488B5318                                movq 24(%rbx), %rdx
+488B7318                                movq 24(%rbx), %rsi
+488B4118                                movq 24(%rcx), %rax
+488B4918                                movq 24(%rcx), %rcx
+488B4718                                movq 24(%rdi), %rax
+488B7F18                                movq 24(%rdi), %rdi
+488B4618                                movq 24(%rsi), %rax
+488B7E18                                movq 24(%rsi), %rdi
+488B7618                                movq 24(%rsi), %rsi
+4C8B642418                              movq 24(%rsp), %r12
+4C8B6C2418                              movq 24(%rsp), %r13
+4C8B742418                              movq 24(%rsp), %r14
+488B442418                              movq 24(%rsp), %rax
+488B5C2418                              movq 24(%rsp), %rbx
+488B4C2418                              movq 24(%rsp), %rcx
+488B7C2418                              movq 24(%rsp), %rdi
+488B542418                              movq 24(%rsp), %rdx
+488B742418                              movq 24(%rsp), %rsi
+4C8BA42400010000                        movq 256(%rsp), %r12
+488B842400010000                        movq 256(%rsp), %rax
+488B8C2408010000                        movq 264(%rsp), %rcx
+488B8C2420010000                        movq 288(%rsp), %rcx
+488B842430010000                        movq 304(%rsp), %rax
+488B9C2430010000                        movq 304(%rsp), %rbx
+488B8C2430010000                        movq 304(%rsp), %rcx
+488B842438010000                        movq 312(%rsp), %rax
+488B942438010000                        movq 312(%rsp), %rdx
+4D8B642420                              movq 32(%r12), %r12
+498B442420                              movq 32(%r12), %rax
+498B4C2420                              movq 32(%r12), %rcx
+498B7C2420                              movq 32(%r12), %rdi
+498B742420                              movq 32(%r12), %rsi
+498B7D20                                movq 32(%r13), %rdi
+498B5520                                movq 32(%r13), %rdx
+498B7520                                movq 32(%r13), %rsi
+498B7720                                movq 32(%r15), %rsi
+488B7D20                                movq 32(%rbp), %rdi
+488B5520                                movq 32(%rbp), %rdx
+488B7520                                movq 32(%rbp), %rsi
+488B4320                                movq 32(%rbx), %rax
+488B7B20                                movq 32(%rbx), %rdi
+488B7320                                movq 32(%rbx), %rsi
+488B4720                                movq 32(%rdi), %rax
+488B7F20                                movq 32(%rdi), %rdi
+4C8B6C2420                              movq 32(%rsp), %r13
+4C8B742420                              movq 32(%rsp), %r14
+4C8B7C2420                              movq 32(%rsp), %r15
+488B442420                              movq 32(%rsp), %rax
+488B6C2420                              movq 32(%rsp), %rbp
+488B4C2420                              movq 32(%rsp), %rcx
+488B7C2420                              movq 32(%rsp), %rdi
+488B542420                              movq 32(%rsp), %rdx
+488B742420                              movq 32(%rsp), %rsi
+4C8B642428                              movq 40(%rsp), %r12
+4C8B742428                              movq 40(%rsp), %r14
+488B442428                              movq 40(%rsp), %rax
+488B5C2428                              movq 40(%rsp), %rbx
+488B4C2428                              movq 40(%rsp), %rcx
+488B7C2428                              movq 40(%rsp), %rdi
+488B742428                              movq 40(%rsp), %rsi
+4C8B6C2430                              movq 48(%rsp), %r13
+4C8B7C2430                              movq 48(%rsp), %r15
+488B442430                              movq 48(%rsp), %rax
+488B6C2430                              movq 48(%rsp), %rbp
+488B4C2430                              movq 48(%rsp), %rcx
+488B542430                              movq 48(%rsp), %rdx
+488B742430                              movq 48(%rsp), %rsi
+488B5004                                movq 4(%rax), %rdx
+488B7F04                                movq 4(%rdi), %rdi
+488B842430020000                        movq 560(%rsp), %rax
+488BBC2430020000                        movq 560(%rsp), %rdi
+4C8B542438                              movq 56(%rsp), %r10
+4C8B642438                              movq 56(%rsp), %r12
+4C8B742438                              movq 56(%rsp), %r14
+488B4C2438                              movq 56(%rsp), %rcx
+488B7C2438                              movq 56(%rsp), %rdi
+488B542438                              movq 56(%rsp), %rdx
+4C8B6C2440                              movq 64(%rsp), %r13
+4C8B7C2440                              movq 64(%rsp), %r15
+488B7C2440                              movq 64(%rsp), %rdi
+4C8B742448                              movq 72(%rsp), %r14
+488B5C2448                              movq 72(%rsp), %rbx
+488B742448                              movq 72(%rsp), %rsi
+4C8B7C2450                              movq 80(%rsp), %r15
+488B442450                              movq 80(%rsp), %rax
+488B6C2450                              movq 80(%rsp), %rbp
+488B5C2450                              movq 80(%rsp), %rbx
+488B7C2450                              movq 80(%rsp), %rdi
+488B542450                              movq 80(%rsp), %rdx
+4C8B642458                              movq 88(%rsp), %r12
+488B442458                              movq 88(%rsp), %rax
+488B6C2458                              movq 88(%rsp), %rbp
+488B5C2458                              movq 88(%rsp), %rbx
+488B4C2458                              movq 88(%rsp), %rcx
+488B7C2458                              movq 88(%rsp), %rdi
+488B542458                              movq 88(%rsp), %rdx
+488B742458                              movq 88(%rsp), %rsi
+4D8B642408                              movq 8(%r12), %r12
+498B442408                              movq 8(%r12), %rax
+498B5C2408                              movq 8(%r12), %rbx
+498B4C2408                              movq 8(%r12), %rcx
+498B7C2408                              movq 8(%r12), %rdi
+498B742408                              movq 8(%r12), %rsi
+4D8B6D08                                movq 8(%r13), %r13
+498B4508                                movq 8(%r13), %rax
+498B5D08                                movq 8(%r13), %rbx
+498B4D08                                movq 8(%r13), %rcx
+498B7D08                                movq 8(%r13), %rdi
+498B5508                                movq 8(%r13), %rdx
+498B7508                                movq 8(%r13), %rsi
+498B4608                                movq 8(%r14), %rax
+498B4E08                                movq 8(%r14), %rcx
+498B7E08                                movq 8(%r14), %rdi
+498B5608                                movq 8(%r14), %rdx
+498B4708                                movq 8(%r15), %rax
+498B7708                                movq 8(%r15), %rsi
+498B4008                                movq 8(%r8), %rax
+4C8B4808                                movq 8(%rax), %r9
+488B4008                                movq 8(%rax), %rax
+488B4808                                movq 8(%rax), %rcx
+488B7808                                movq 8(%rax), %rdi
+488B5008                                movq 8(%rax), %rdx
+488B7008                                movq 8(%rax), %rsi
+4C8B6D08                                movq 8(%rbp), %r13
+488B4508                                movq 8(%rbp), %rax
+488B5D08                                movq 8(%rbp), %rbx
+488B7D08                                movq 8(%rbp), %rdi
+488B5508                                movq 8(%rbp), %rdx
+488B7508                                movq 8(%rbp), %rsi
+4C8B6308                                movq 8(%rbx), %r12
+4C8B4308                                movq 8(%rbx), %r8
+4C8B4B08                                movq 8(%rbx), %r9
+488B4308                                movq 8(%rbx), %rax
+488B5B08                                movq 8(%rbx), %rbx
+488B7B08                                movq 8(%rbx), %rdi
+488B5308                                movq 8(%rbx), %rdx
+488B7308                                movq 8(%rbx), %rsi
+4C8B4108                                movq 8(%rcx), %r8
+488B4108                                movq 8(%rcx), %rax
+488B5108                                movq 8(%rcx), %rdx
+488B4708                                movq 8(%rdi), %rax
+488B4F08                                movq 8(%rdi), %rcx
+488B7F08                                movq 8(%rdi), %rdi
+488B5708                                movq 8(%rdi), %rdx
+488B7708                                movq 8(%rdi), %rsi
+4C8B4208                                movq 8(%rdx), %r8
+488B4208                                movq 8(%rdx), %rax
+488B5A08                                movq 8(%rdx), %rbx
+488B4A08                                movq 8(%rdx), %rcx
+488B7A08                                movq 8(%rdx), %rdi
+488B5208                                movq 8(%rdx), %rdx
+488B4608                                movq 8(%rsi), %rax
+488B7E08                                movq 8(%rsi), %rdi
+488B5608                                movq 8(%rsi), %rdx
+488B7608                                movq 8(%rsi), %rsi
+4C8B642408                              movq 8(%rsp), %r12
+4C8B442408                              movq 8(%rsp), %r8
+488B442408                              movq 8(%rsp), %rax
+488B6C2408                              movq 8(%rsp), %rbp
+488B5C2408                              movq 8(%rsp), %rbx
+488B4C2408                              movq 8(%rsp), %rcx
+488B742408                              movq 8(%rsp), %rsi
+4C8B642460                              movq 96(%rsp), %r12
+4C8B6C2460                              movq 96(%rsp), %r13
+488B6C2460                              movq 96(%rsp), %rbp
+488B4C2460                              movq 96(%rsp), %rcx
+488B7C2460                              movq 96(%rsp), %rdi
+488B742460                              movq 96(%rsp), %rsi
+48C7442448E7030000                      movq $999, 72(%rsp)
+48C70424E6030000                        movq $998, (%rsp)
+488B04C54D010000                        movq 333(,%rax,8), %rax
+488B04C505000000                        movq 5(,%rax,8), %rax
+488B04C503000000                        movq 3(,%rax,8), %rax
+488B04C506000000                        movq 6(,%rax,8), %rax
+488B05BC010000                          movq 444(%rip), %rax
+488B0529000000                          movq 41(%rip), %rax
+488B1D2A000000                          movq 42(%rip), %rbx
+488B3D2B000000                          movq 43(%rip), %rdi
+488B04C502000000                        movq 2(,%rax,8), %rax
+488B04C507000000                        movq 7(,%rax,8), %rax
+488B34C500000000                        movq 0(,%rax,8), %rsi
+488B04C501000000                        movq 1(,%rax,8), %rax
+4C89542438                              movq %r10, 56(%rsp)
+4C89D0                                  movq %r10, %rax
+4C89D8                                  movq %r11, %rax
+4C896010                                movq %r12, 16(%rax)
+4C896424F0                              movq %r12, -16(%rsp)
+4C896424E8                              movq %r12, -24(%rsp)
+4C896424E0                              movq %r12, -32(%rsp)
+4C896008                                movq %r12, 8(%rax)
+4C896424F8                              movq %r12, -8(%rsp)
+4D8B2C24                                movq (%r12), %r13
+4D8B3424                                movq (%r12), %r14
+4D89E0                                  movq %r12, %r8
+4D89E1                                  movq %r12, %r9
+498B0424                                movq (%r12), %rax
+4C89E0                                  movq %r12, %rax
+4C8924C8                                movq %r12, (%rax,%rcx,8)
+498B04DC                                movq (%r12,%rbx,8), %rax
+498B34DC                                movq (%r12,%rbx,8), %rsi
+4C89E1                                  movq %r12, %rcx
+498B3C24                                movq (%r12), %rdi
+4C89E7                                  movq %r12, %rdi
+4C89E2                                  movq %r12, %rdx
+4C89E6                                  movq %r12, %rsi
+4C896810                                movq %r13, 16(%rax)
+4C896C24F0                              movq %r13, -16(%rsp)
+4C896818                                movq %r13, 24(%rax)
+4C896C24E8                              movq %r13, -24(%rsp)
+4C896B08                                movq %r13, 8(%rbx)
+4C896C24F8                              movq %r13, -8(%rsp)
+4D89EE                                  movq %r13, %r14
+4D89E8                                  movq %r13, %r8
+4D89E9                                  movq %r13, %r9
+498B4500                                movq (%r13), %rax
+4C89E8                                  movq %r13, %rax
+498B5D00                                movq (%r13), %rbx
+498B74DD00                              movq (%r13,%rbx,8), %rsi
+4C89E9                                  movq %r13, %rcx
+498B7D00                                movq (%r13), %rdi
+4C89EF                                  movq %r13, %rdi
+498B5500                                movq (%r13), %rdx
+4C89EA                                  movq %r13, %rdx
+4C89EE                                  movq %r13, %rsi
+4C892C24                                movq %r13, (%rsp)
+4C897424F0                              movq %r14, -16(%rsp)
+4C897018                                movq %r14, 24(%rax)
+4C897020                                movq %r14, 32(%rax)
+4D89742408                              movq %r14, 8(%r12)
+4C897424F8                              movq %r14, -8(%rsp)
+4D89F0                                  movq %r14, %r8
+4D89F1                                  movq %r14, %r9
+498B06                                  movq (%r14), %rax
+4C89F0                                  movq %r14, %rax
+4C8933                                  movq %r14, (%rbx)
+4C89F1                                  movq %r14, %rcx
+498B3E                                  movq (%r14), %rdi
+4C89F7                                  movq %r14, %rdi
+498B16                                  movq (%r14), %rdx
+4C89F2                                  movq %r14, %rdx
+4C89F6                                  movq %r14, %rsi
+4C897B10                                movq %r15, 16(%rbx)
+4C897820                                movq %r15, 32(%rax)
+4C897C24F8                              movq %r15, -8(%rsp)
+4C897C2408                              movq %r15, 8(%rsp)
+4D89F8                                  movq %r15, %r8
+498B07                                  movq (%r15), %rax
+4C893CC8                                movq %r15, (%rax,%rcx,8)
+4C89FD                                  movq %r15, %rbp
+4C89F9                                  movq %r15, %rcx
+498B3F                                  movq (%r15), %rdi
+4C89FF                                  movq %r15, %rdi
+4C89FA                                  movq %r15, %rdx
+4C89FE                                  movq %r15, %rsi
+4C89442468                              movq %r8, 104(%rsp)
+4C89842488000000                        movq %r8, 136(%rsp)
+4C89842490000000                        movq %r8, 144(%rsp)
+4C89442418                              movq %r8, 24(%rsp)
+4C89842428010000                        movq %r8, 296(%rsp)
+4C89442420                              movq %r8, 32(%rsp)
+4C89442428                              movq %r8, 40(%rsp)
+4C89442430                              movq %r8, 48(%rsp)
+4C89442440                              movq %r8, 64(%rsp)
+4C894308                                movq %r8, 8(%rbx)
+4C894608                                movq %r8, 8(%rsi)
+4C89442408                              movq %r8, 8(%rsp)
+4D89C7                                  movq %r8, %r15
+498B00                                  movq (%r8), %rax
+4C89C0                                  movq %r8, %rax
+4C89C5                                  movq %r8, %rbp
+4C8903                                  movq %r8, (%rbx)
+4C89C3                                  movq %r8, %rbx
+4C89C6                                  movq %r8, %rsi
+4C890424                                movq %r8, (%rsp)
+4C898C2480000000                        movq %r9, 128(%rsp)
+4C894B10                                movq %r9, 16(%rbx)
+4C894C2418                              movq %r9, 24(%rsp)
+4C894C2420                              movq %r9, 32(%rsp)
+4C894C2448                              movq %r9, 72(%rsp)
+4D89CE                                  movq %r9, %r14
+4C89C8                                  movq %r9, %rax
+498B3CC1                                movq (%r9,%rax,8), %rdi
+4C89CA                                  movq %r9, %rdx
+4C890C24                                movq %r9, (%rsp)
+4889442470                              movq %rax, 112(%rsp)
+4989442410                              movq %rax, 16(%r12)
+48894510                                movq %rax, 16(%rbp)
+48894310                                movq %rax, 16(%rbx)
+48894610                                movq %rax, 16(%rsi)
+4889442410                              movq %rax, 16(%rsp)
+48898424B0000000                        movq %rax, 176(%rsp)
+4989442418                              movq %rax, 24(%r12)
+49894518                                movq %rax, 24(%r13)
+48894518                                movq %rax, 24(%rbp)
+48894318                                movq %rax, 24(%rbx)
+48894218                                movq %rax, 24(%rdx)
+48894618                                movq %rax, 24(%rsi)
+4889442418                              movq %rax, 24(%rsp)
+4889842400010000                        movq %rax, 256(%rsp)
+4989442420                              movq %rax, 32(%r12)
+49894720                                movq %rax, 32(%r15)
+48894520                                movq %rax, 32(%rbp)
+48894320                                movq %rax, 32(%rbx)
+4889442420                              movq %rax, 32(%rsp)
+4889442428                              movq %rax, 40(%rsp)
+4889442430                              movq %rax, 48(%rsp)
+4889442438                              movq %rax, 56(%rsp)
+4889442450                              movq %rax, 80(%rsp)
+4889442458                              movq %rax, 88(%rsp)
+4989442408                              movq %rax, 8(%r12)
+49894608                                movq %rax, 8(%r14)
+49894708                                movq %rax, 8(%r15)
+48894508                                movq %rax, 8(%rbp)
+48894308                                movq %rax, 8(%rbx)
+48894108                                movq %rax, 8(%rcx)
+488944D1F8                              movq %rax, -8(%rcx,%rdx,8)
+48894608                                movq %rax, 8(%rsi)
+4889442408                              movq %rax, 8(%rsp)
+4889442460                              movq %rax, 96(%rsp)
+4889057DEF0500                          movq %rax, 388989(%rip)
+49890424                                movq %rax, (%r12)
+4989C4                                  movq %rax, %r12
+4A8B1CE0                                movq (%rax,%r12,8), %rbx
+498904DC                                movq %rax, (%r12,%rbx,8)
+49894500                                movq %rax, (%r13)
+4989C5                                  movq %rax, %r13
+4E8B24E8                                movq (%rax,%r13,8), %r12
+498944DD00                              movq %rax, (%r13,%rbx,8)
+498944D500                              movq %rax, (%r13,%rdx,8)
+498906                                  movq %rax, (%r14)
+4989C6                                  movq %rax, %r14
+498904DE                                movq %rax, (%r14,%rbx,8)
+4989C7                                  movq %rax, %r15
+4A8B04F8                                movq (%rax,%r15,8), %rax
+4989C0                                  movq %rax, %r8
+498904D0                                movq %rax, (%r8,%rdx,8)
+4989C1                                  movq %rax, %r9
+498904D1                                movq %rax, (%r9,%rdx,8)
+488B00                                  movq (%rax), %rax
+48894500                                movq %rax, (%rbp)
+4889C5                                  movq %rax, %rbp
+488B1CE8                                movq (%rax,%rbp,8), %rbx
+488944DD00                              movq %rax, (%rbp,%rbx,8)
+488903                                  movq %rax, (%rbx)
+4889C3                                  movq %rax, %rbx
+488B3CD8                                movq (%rax,%rbx,8), %rdi
+488B14D8                                movq (%rax,%rbx,8), %rdx
+488B34D8                                movq (%rax,%rbx,8), %rsi
+4A8904E3                                movq %rax, (%rbx,%r12,8)
+4889C1                                  movq %rax, %rcx
+4C8B2CC8                                movq (%rax,%rcx,8), %r13
+488B04C8                                movq (%rax,%rcx,8), %rax
+488B34C8                                movq (%rax,%rcx,8), %rsi
+488904F1                                movq %rax, (%rcx,%rsi,8)
+488B38                                  movq (%rax), %rdi
+4889C7                                  movq %rax, %rdi
+488B04F8                                movq (%rax,%rdi,8), %rax
+488B34F8                                movq (%rax,%rdi,8), %rsi
+488904D7                                movq %rax, (%rdi,%rdx,8)
+488902                                  movq %rax, (%rdx)
+4889C2                                  movq %rax, %rdx
+4C8B24D0                                movq (%rax,%rdx,8), %r12
+4C8B2CD0                                movq (%rax,%rdx,8), %r13
+4C8B34D0                                movq (%rax,%rdx,8), %r14
+488B04D0                                movq (%rax,%rdx,8), %rax
+488B2CD0                                movq (%rax,%rdx,8), %rbp
+488B1CD0                                movq (%rax,%rdx,8), %rbx
+488B3CD0                                movq (%rax,%rdx,8), %rdi
+488B14D0                                movq (%rax,%rdx,8), %rdx
+488B34D0                                movq (%rax,%rdx,8), %rsi
+4A8904E2                                movq %rax, (%rdx,%r12,8)
+4A8904EA                                movq %rax, (%rdx,%r13,8)
+4A8904FA                                movq %rax, (%rdx,%r15,8)
+488904EA                                movq %rax, (%rdx,%rbp,8)
+488904DA                                movq %rax, (%rdx,%rbx,8)
+488906                                  movq %rax, (%rsi)
+4889C6                                  movq %rax, %rsi
+488B3CF0                                movq (%rax,%rsi,8), %rdi
+488904CE                                movq %rax, (%rsi,%rcx,8)
+48890424                                movq %rax, (%rsp)
+48890510000000                          movq %rax, 16(%rip)
+48890508000000                          movq %rax, 8(%rip)
+48890500000000                          movq %rax, (%rip)
+48896C24F0                              movq %rbp, -16(%rsp)
+48896C24E8                              movq %rbp, -24(%rsp)
+48896C24E0                              movq %rbp, -32(%rsp)
+48896C24D8                              movq %rbp, -40(%rsp)
+48896808                                movq %rbp, 8(%rax)
+48896C24F8                              movq %rbp, -8(%rsp)
+48896C2460                              movq %rbp, 96(%rsp)
+4989E8                                  movq %rbp, %r8
+4989E9                                  movq %rbp, %r9
+4889E8                                  movq %rbp, %rax
+488B5CC500                              movq (%rbp,%rax,8), %rbx
+48892CC8                                movq %rbp, (%rax,%rcx,8)
+488B7CDD00                              movq (%rbp,%rbx,8), %rdi
+4889E9                                  movq %rbp, %rcx
+488B7D00                                movq (%rbp), %rdi
+4889EF                                  movq %rbp, %rdi
+488B5500                                movq (%rbp), %rdx
+4889EA                                  movq %rbp, %rdx
+48892CC2                                movq %rbp, (%rdx,%rax,8)
+488B7500                                movq (%rbp), %rsi
+4889EE                                  movq %rbp, %rsi
+48895C2470                              movq %rbx, 112(%rsp)
+48895810                                movq %rbx, 16(%rax)
+48895C24F0                              movq %rbx, -16(%rsp)
+48895C24E8                              movq %rbx, -24(%rsp)
+48895C24E0                              movq %rbx, -32(%rsp)
+48895C24D8                              movq %rbx, -40(%rsp)
+48895C24D0                              movq %rbx, -48(%rsp)
+48895804                                movq %rbx, 4(%rax)
+48899C2430020000                        movq %rbx, 560(%rsp)
+48895808                                movq %rbx, 8(%rax)
+4A8B34E3                                movq (%rbx,%r12,8), %rsi
+49895D00                                movq %rbx, (%r13)
+4989D8                                  movq %rbx, %r8
+488B03                                  movq (%rbx), %rax
+488918                                  movq %rbx, (%rax)
+4889D8                                  movq %rbx, %rax
+4889D9                                  movq %rbx, %rcx
+488B04CB                                movq (%rbx,%rcx,8), %rax
+488B3B                                  movq (%rbx), %rdi
+4889DF                                  movq %rbx, %rdi
+488B13                                  movq (%rbx), %rdx
+4889DA                                  movq %rbx, %rdx
+488B33                                  movq (%rbx), %rsi
+4889DE                                  movq %rbx, %rsi
+48891C24                                movq %rbx, (%rsp)
+48898C2490000000                        movq %rcx, 144(%rsp)
+48894C2418                              movq %rcx, 24(%rsp)
+48894C2420                              movq %rcx, 32(%rsp)
+48894C2428                              movq %rcx, 40(%rsp)
+48894C2438                              movq %rcx, 56(%rsp)
+48894C2440                              movq %rcx, 64(%rsp)
+4989CC                                  movq %rcx, %r12
+4989CD                                  movq %rcx, %r13
+4A8B2CE9                                movq (%rcx,%r13,8), %rbp
+4989CE                                  movq %rcx, %r14
+49890CC6                                movq %rcx, (%r14,%rax,8)
+4989CF                                  movq %rcx, %r15
+4989C8                                  movq %rcx, %r8
+488B01                                  movq (%rcx), %rax
+4889C8                                  movq %rcx, %rax
+4C8B24C1                                movq (%rcx,%rax,8), %r12
+4C8B2CC1                                movq (%rcx,%rax,8), %r13
+488B04C1                                movq (%rcx,%rax,8), %rax
+488B34C1                                movq (%rcx,%rax,8), %rsi
+488B39                                  movq (%rcx), %rdi
+4889CF                                  movq %rcx, %rdi
+4889CA                                  movq %rcx, %rdx
+4889CE                                  movq %rcx, %rsi
+488B1CF1                                movq (%rcx,%rsi,8), %rbx
+48897C2478                              movq %rdi, 120(%rsp)
+4889BC24A0000000                        movq %rdi, 160(%rsp)
+48897C2420                              movq %rdi, 32(%rsp)
+48897C2448                              movq %rdi, 72(%rsp)
+48893D42000000                          movq %rdi, 66(%rip)
+4C8B17                                  movq (%rdi), %r10
+4989FA                                  movq %rdi, %r10
+4989FB                                  movq %rdi, %r11
+4989FC                                  movq %rdi, %r12
+4989FD                                  movq %rdi, %r13
+4C8B37                                  movq (%rdi), %r14
+4989FE                                  movq %rdi, %r14
+4989FF                                  movq %rdi, %r15
+4989F8                                  movq %rdi, %r8
+4C8B0F                                  movq (%rdi), %r9
+4989F9                                  movq %rdi, %r9
+488B07                                  movq (%rdi), %rax
+4889F8                                  movq %rdi, %rax
+488B04C7                                movq (%rdi,%rax,8), %rax
+488B14C7                                movq (%rdi,%rax,8), %rdx
+48897D00                                movq %rdi, (%rbp)
+4889FD                                  movq %rdi, %rbp
+4889FB                                  movq %rdi, %rbx
+4889F9                                  movq %rdi, %rcx
+488B3F                                  movq (%rdi), %rdi
+488B17                                  movq (%rdi), %rdx
+4889FA                                  movq %rdi, %rdx
+488B04D7                                movq (%rdi,%rdx,8), %rax
+4889FE                                  movq %rdi, %rsi
+48893D21000000                          movq %rdi, 33(%rip)
+48893D5348F2FF                          movq %rdi, -898989(%rip)
+4889942480000000                        movq %rdx, 128(%rsp)
+4889942498000000                        movq %rdx, 152(%rsp)
+4889542410                              movq %rdx, 16(%rsp)
+4889542418                              movq %rdx, 24(%rsp)
+4889942430010000                        movq %rdx, 304(%rsp)
+4889542420                              movq %rdx, 32(%rsp)
+4889542430                              movq %rdx, 48(%rsp)
+48895104                                movq %rdx, 4(%rcx)
+488954C3F8                              movq %rdx, -8(%rbx,%rax,8)
+48895108                                movq %rdx, 8(%rcx)
+4889151D230000                          movq %rdx, 8989(%rip)
+4989D4                                  movq %rdx, %r12
+498914C4                                movq %rdx, (%r12,%rax,8)
+4989D5                                  movq %rdx, %r13
+4989D6                                  movq %rdx, %r14
+4989D7                                  movq %rdx, %r15
+4C8B02                                  movq (%rdx), %r8
+488B02                                  movq (%rdx), %rax
+488910                                  movq %rdx, (%rax)
+4889D0                                  movq %rdx, %rax
+4C8B24C2                                movq (%rdx,%rax,8), %r12
+488B3CC2                                movq (%rdx,%rax,8), %rdi
+488B14C2                                movq (%rdx,%rax,8), %rdx
+488B34C2                                movq (%rdx,%rax,8), %rsi
+4889D5                                  movq %rdx, %rbp
+4889D3                                  movq %rdx, %rbx
+488B3CDA                                movq (%rdx,%rbx,8), %rdi
+4889D1                                  movq %rdx, %rcx
+488B34CA                                movq (%rdx,%rcx,8), %rsi
+4889D7                                  movq %rdx, %rdi
+488B12                                  movq (%rdx), %rdx
+4889D6                                  movq %rdx, %rsi
+48891424                                movq %rdx, (%rsp)
+4889B42488000000                        movq %rsi, 136(%rsp)
+4889B42498000000                        movq %rsi, 152(%rsp)
+4889B424A0000000                        movq %rsi, 160(%rsp)
+4889742410                              movq %rsi, 16(%rsp)
+4889742418                              movq %rsi, 24(%rsp)
+4889B42438010000                        movq %rsi, 312(%rsp)
+4889742420                              movq %rsi, 32(%rsp)
+4889742428                              movq %rsi, 40(%rsp)
+4889742430                              movq %rsi, 48(%rsp)
+4889742438                              movq %rsi, 56(%rsp)
+4889742408                              movq %rsi, 8(%rsp)
+4889354D000000                          movq %rsi, 77(%rip)
+4989F2                                  movq %rsi, %r10
+4989F4                                  movq %rsi, %r12
+4989F5                                  movq %rsi, %r13
+4989F6                                  movq %rsi, %r14
+4989F7                                  movq %rsi, %r15
+4989F0                                  movq %rsi, %r8
+4989F1                                  movq %rsi, %r9
+4889F0                                  movq %rsi, %rax
+4889F5                                  movq %rsi, %rbp
+4889F3                                  movq %rsi, %rbx
+4889F1                                  movq %rsi, %rcx
+4889F7                                  movq %rsi, %rdi
+488B16                                  movq (%rsi), %rdx
+4889F2                                  movq %rsi, %rdx
+488B04D6                                movq (%rsi,%rdx,8), %rax
+48893424                                movq %rsi, (%rsp)
+4889354D000000                          movq %rsi, 77(%rip)
+4C8B0C24                                movq (%rsp), %r9
+488B0424                                movq (%rsp), %rax
+488B1C24                                movq (%rsp), %rbx
+488B3C24                                movq (%rsp), %rdi
+488B1424                                movq (%rsp), %rdx
+4889E2                                  movq %rsp, %rdx
+488B3424                                movq (%rsp), %rsi
+488B0D21000000                          movq 33(%rip), %rcx
+488B3D42000000                          movq 66(%rip), %rdi
+4489E0                                  movl %r12d, %eax
+4489E8                                  movl %r13d, %eax
+4489EA                                  movl %r13d, %edx
+4489F7                                  movl %r14d, %edi
+4489F8                                  movl %r15d, %eax
+4489FB                                  movl %r15d, %ebx
+4489FE                                  movl %r15d, %esi
+8B1CD0                                  movl (%rax,%rdx,8), %ebx
+448B02                                  movl (%rdx), %r8d
+0FBE7808                                movsbl 8(%rax),%edi
+0FBEDC                                  movsbl %ah, %ebx
+0FBED4                                  movsbl %ah, %edx
+0FBED8                                  movsbl %al,%ebx
+0FBEC8                                  movsbl %al,%ecx
+0FBEF8                                  movsbl %al,%edi
+0FBED0                                  movsbl %al,%edx
+0FBEF0                                  movsbl %al,%esi
+440FBEE0                                movsbl %al,%r12d
+0FBEFF                                  movsbl %bh, %edi
+0FBEDB                                  movsbl %bl,%ebx
+0FBED5                                  movsbl %ch, %edx
+0FBEC1                                  movsbl %cl,%eax
+0FBEC9                                  movsbl %cl,%ecx
+400FBECF                                movsbl %dil,%ecx
+0FBEC2                                  movsbl %dl,%eax
+0FBED2                                  movsbl %dl,%edx
+410FBED3                                movsbl %r11b,%edx
+450FBEC0                                movsbl %r8b,%r8d
+0FBE3C08                                movsbl (%rax,%rcx),%edi
+0FBE3C10                                movsbl (%rax,%rdx),%edi
+0FBE3C30                                movsbl (%rax,%rsi),%edi
+420FBE0421                              movsbl (%rcx,%r12),%eax
+0FBE0407                                movsbl (%rdi,%rax),%eax
+0FBE3A                                  movsbl (%rdx),%edi
+0FBE3C04                                movsbl (%rsp,%rax),%edi
+480FBE0407                              movsbq (%rdi,%rax),%rax
+48637C2468                              movslq 104(%rsp),%rdi
+486344246C                              movslq 108(%rsp),%rax
+486354246C                              movslq 108(%rsp),%rdx
+48637C2478                              movslq 120(%rsp),%rdi
+48637C247C                              movslq 124(%rsp),%rdi
+486374247C                              movslq 124(%rsp),%rsi
+4863780C                                movslq 12(%rax),%rdi
+4C637C240C                              movslq 12(%rsp),%r15
+486344240C                              movslq 12(%rsp),%rax
+48637C240C                              movslq 12(%rsp),%rdi
+48638424A8000000                        movslq 168(%rsp),%rax
+4863442410                              movslq 16(%rsp),%rax
+48637C2410                              movslq 16(%rsp),%rdi
+48638424B0000000                        movslq 176(%rsp),%rax
+48638424B4000000                        movslq 180(%rsp),%rax
+48638424B8000000                        movslq 184(%rsp),%rax
+48638424BC000000                        movslq 188(%rsp),%rax
+4863442414                              movslq 20(%rsp),%rax
+48637B18                                movslq 24(%rbx),%rdi
+486344241C                              movslq 28(%rsp),%rax
+486344242C                              movslq 44(%rsp),%rax
+4963442408                              movslq 8(%r12),%rax
+48634308                                movslq 8(%rbx),%rax
+48635B08                                movslq 8(%rbx),%rbx
+48637B08                                movslq 8(%rbx),%rdi
+48634208                                movslq 8(%rdx),%rax
+48634608                                movslq 8(%rsi),%rax
+4863F8                                  movslq %eax,%rdi
+4C63E5                                  movslq %ebp,%r12
+4C63ED                                  movslq %ebp,%r13
+4863C5                                  movslq %ebp,%rax
+4863DD                                  movslq %ebp,%rbx
+4863CD                                  movslq %ebp,%rcx
+4863FD                                  movslq %ebp,%rdi
+4863D5                                  movslq %ebp,%rdx
+4863F5                                  movslq %ebp,%rsi
+4C63EB                                  movslq %ebx,%r13
+4863C3                                  movslq %ebx,%rax
+4863CB                                  movslq %ebx,%rcx
+4863FB                                  movslq %ebx,%rdi
+4863D3                                  movslq %ebx,%rdx
+4863C1                                  movslq %ecx,%rax
+4863F9                                  movslq %ecx,%rdi
+4863D1                                  movslq %ecx,%rdx
+4863C7                                  movslq %edi,%rax
+4863FF                                  movslq %edi,%rdi
+4863C2                                  movslq %edx,%rax
+4863CA                                  movslq %edx,%rcx
+4863FA                                  movslq %edx,%rdi
+4863D2                                  movslq %edx,%rdx
+4863F2                                  movslq %edx,%rsi
+4863C6                                  movslq %esi,%rax
+4863CE                                  movslq %esi,%rcx
+4863FE                                  movslq %esi,%rdi
+4863D6                                  movslq %esi,%rdx
+4D63C4                                  movslq %r12d,%r8
+4963C4                                  movslq %r12d,%rax
+4963EC                                  movslq %r12d,%rbp
+4963DC                                  movslq %r12d,%rbx
+4963CC                                  movslq %r12d,%rcx
+4963D4                                  movslq %r12d,%rdx
+4963F4                                  movslq %r12d,%rsi
+4963C5                                  movslq %r13d,%rax
+4963DD                                  movslq %r13d,%rbx
+4963D5                                  movslq %r13d,%rdx
+4963C6                                  movslq %r14d,%rax
+4963EE                                  movslq %r14d,%rbp
+4963CE                                  movslq %r14d,%rcx
+4963D6                                  movslq %r14d,%rdx
+4963C7                                  movslq %r15d,%rax
+4963DF                                  movslq %r15d,%rbx
+4963D7                                  movslq %r15d,%rdx
+4963C0                                  movslq %r8d,%rax
+4963D8                                  movslq %r8d,%rbx
+4963C8                                  movslq %r8d,%rcx
+4963D0                                  movslq %r8d,%rdx
+4D63C1                                  movslq %r9d,%r8
+4963C9                                  movslq %r9d,%rcx
+4963D1                                  movslq %r9d,%rdx
+4C6302                                  movslq (%rdx),%r8
+0FBF448102                              movswl 2(%rcx,%rax,4),%eax
+0FBF441102                              movswl 2(%rcx,%rdx),%eax
+0FBF443702                              movswl 2(%rdi,%rsi),%eax
+0FBF448202                              movswl 2(%rdx,%rax,4),%eax
+0FBF549602                              movswl 2(%rsi,%rdx,4),%edx
+0FBF741004                              movswl 4(%rax,%rdx),%esi
+0FBF44D104                              movswl 4(%rcx,%rdx,8),%eax
+0FBF44F104                              movswl 4(%rcx,%rsi,8),%eax
+0FBF740704                              movswl 4(%rdi,%rax),%esi
+0FBF740204                              movswl 4(%rdx,%rax),%esi
+0FBF44D106                              movswl 6(%rcx,%rdx,8),%eax
+0FBF44F106                              movswl 6(%rcx,%rsi,8),%eax
+0FBFC9                                  movswl %cx,%ecx
+0FBFC7                                  movswl %di,%eax
+0FBFFF                                  movswl %di,%edi
+0FBFD2                                  movswl %dx,%edx
+450FBFC0                                movswl %r8w,%r8d
+0FBF0482                                movswl (%rdx,%rax,4),%eax
+0FBFF6                                  movswl %si,%esi
+480FBFC0                                movswq %ax,%rax
+66C74443160000                          movw $0, 22(%rbx,%rax,2)
+66C74451160000                          movw $0, 22(%rcx,%rdx,2)
+6641C746180000                          movw $0, 24(%r14)
+66C740180000                            movw $0, 24(%rax)
+66C74402040000                          movw $0, 4(%rdx,%rax)
+66C74402060000                          movw $0, 6(%rdx,%rax)
+6641C7461E0100                          movw $1, 30(%r14)
+66C7401E0100                            movw $1, 30(%rax)
+66C7440202FEFF                          movw $-2, 2(%rdx,%rax)
+66C70402FEFF                            movw $-2, (%rdx,%rax)
+6689431E                                movw %ax, 30(%rbx)
+6689411E                                movw %ax, 30(%rcx)
+6689449104                              movw %ax, 4(%rcx,%rdx,4)
+668944D104                              movw %ax, 4(%rcx,%rdx,8)
+668944D106                              movw %ax, 6(%rcx,%rdx,8)
+66895CC104                              movw %bx, 4(%rcx,%rax,8)
+66895808                                movw %bx, 8(%rax)
+66890C02                                movw %cx, (%rdx,%rax)
+6644896C0302                            movw %r13w, 2(%rbx,%rax)
+6644896C0702                            movw %r13w, 2(%rdi,%rax)
+6644896CC106                            movw %r13w, 6(%rcx,%rax,8)
+0FB67C246A                              movzbl 106(%rsp), %edi
+440FB644246A                            movzbl 106(%rsp), %r8d
+0FB6442470                              movzbl 112(%rsp), %eax
+0FB6542470                              movzbl 112(%rsp), %edx
+0FB65C240B                              movzbl 11(%rsp), %ebx
+0FB67C240B                              movzbl 11(%rsp), %edi
+0FB654240B                              movzbl 11(%rsp), %edx
+440FB664240B                            movzbl 11(%rsp), %r12d
+0FB644247F                              movzbl 127(%rsp), %eax
+0FB67C247F                              movzbl 127(%rsp), %edi
+0FB6840480000000                        movzbl 128(%rsp,%rax), %eax
+0FB6941480000000                        movzbl 128(%rsp,%rdx), %edx
+0FB6BC248B000000                        movzbl 139(%rsp), %edi
+440FB684248B000000                      movzbl 139(%rsp), %r8d
+0FB6878C000000                          movzbl 140(%rdi), %eax
+0FB69794000000                          movzbl 148(%rdi), %edx
+0FB65C240F                              movzbl 15(%rsp), %ebx
+0FB67C240F                              movzbl 15(%rsp), %edi
+0FB654240F                              movzbl 15(%rsp), %edx
+440FB64C240F                            movzbl 15(%rsp), %r9d
+0FB64310                                movzbl 16(%rbx), %eax
+0FB6442410                              movzbl 16(%rsp), %eax
+0FB6542410                              movzbl 16(%rsp), %edx
+0FB66C2413                              movzbl 19(%rsp), %ebp
+0FB65C2413                              movzbl 19(%rsp), %ebx
+0FB67C2413                              movzbl 19(%rsp), %edi
+0FB6542413                              movzbl 19(%rsp), %edx
+410FB6442401                            movzbl 1(%r12), %eax
+410FB64C2401                            movzbl 1(%r12), %ecx
+410FB6542401                            movzbl 1(%r12), %edx
+0FB6540F01                              movzbl 1(%rdi,%rcx), %edx
+0FB65201                                movzbl 1(%rdx), %edx
+0FB6441601                              movzbl 1(%rsi,%rdx), %eax
+0FB68424C8000000                        movzbl 200(%rsp), %eax
+0FB67C2416                              movzbl 22(%rsp), %edi
+0FB6542416                              movzbl 22(%rsp), %edx
+440FB6442416                            movzbl 22(%rsp), %r8d
+0FB64C2417                              movzbl 23(%rsp), %ecx
+0FB67C2417                              movzbl 23(%rsp), %edi
+0FB6742417                              movzbl 23(%rsp), %esi
+0FB64C2419                              movzbl 25(%rsp), %ecx
+0FB65C241A                              movzbl 26(%rsp), %ebx
+0FB67C241A                              movzbl 26(%rsp), %edi
+0FB68C240E010000                        movzbl 270(%rsp), %ecx
+0FB6BC240E010000                        movzbl 270(%rsp), %edi
+0FB6B4240E010000                        movzbl 270(%rsp), %esi
+440FB6A4240E010000                      movzbl 270(%rsp), %r12d
+0FB69C240F010000                        movzbl 271(%rsp), %ebx
+0FB6BC240F010000                        movzbl 271(%rsp), %edi
+0FB694240F010000                        movzbl 271(%rsp), %edx
+440FB6A4240F010000                      movzbl 271(%rsp), %r12d
+440FB6B4240F010000                      movzbl 271(%rsp), %r14d
+0FB65C241B                              movzbl 27(%rsp), %ebx
+440FB66C241B                            movzbl 27(%rsp), %r13d
+0FB6842418010000                        movzbl 280(%rsp), %eax
+410FB6442402                            movzbl 2(%r12), %eax
+410FB65C2402                            movzbl 2(%r12), %ebx
+410FB64C2402                            movzbl 2(%r12), %ecx
+410FB67C2402                            movzbl 2(%r12), %edi
+410FB6542402                            movzbl 2(%r12), %edx
+450FB6642402                            movzbl 2(%r12), %r12d
+450FB66C2402                            movzbl 2(%r12), %r13d
+450FB67C2402                            movzbl 2(%r12), %r15d
+460FB6642002                            movzbl 2(%rax,%r12), %r12d
+0FB6540F02                              movzbl 2(%rdi,%rcx), %edx
+0FB64C241E                              movzbl 30(%rsp), %ecx
+0FB67C241E                              movzbl 30(%rsp), %edi
+0FB654241E                              movzbl 30(%rsp), %edx
+0FB65C241F                              movzbl 31(%rsp), %ebx
+0FB64C241F                              movzbl 31(%rsp), %ecx
+0FB67C241F                              movzbl 31(%rsp), %edi
+440FB664241F                            movzbl 31(%rsp), %r12d
+440FB67C241F                            movzbl 31(%rsp), %r15d
+0FB64C2420                              movzbl 32(%rsp), %ecx
+0FB65C2423                              movzbl 35(%rsp), %ebx
+0FB67C2423                              movzbl 35(%rsp), %edi
+440FB6642423                            movzbl 35(%rsp), %r12d
+0FB64C2426                              movzbl 38(%rsp), %ecx
+0FB67C2426                              movzbl 38(%rsp), %edi
+0FB6542426                              movzbl 38(%rsp), %edx
+440FB6442426                            movzbl 38(%rsp), %r8d
+0FB6442427                              movzbl 39(%rsp), %eax
+0FB66C2427                              movzbl 39(%rsp), %ebp
+0FB65C2427                              movzbl 39(%rsp), %ebx
+0FB64C2427                              movzbl 39(%rsp), %ecx
+0FB67C2427                              movzbl 39(%rsp), %edi
+0FB6542427                              movzbl 39(%rsp), %edx
+0FB6742427                              movzbl 39(%rsp), %esi
+440FB66C2427                            movzbl 39(%rsp), %r13d
+410FB6442403                            movzbl 3(%r12), %eax
+410FB65C2403                            movzbl 3(%r12), %ebx
+410FB6742403                            movzbl 3(%r12), %esi
+450FB6642403                            movzbl 3(%r12), %r12d
+450FB6742403                            movzbl 3(%r12), %r14d
+460FB6642003                            movzbl 3(%rax,%r12), %r12d
+0FB6440F03                              movzbl 3(%rdi,%rcx), %eax
+0FB654242A                              movzbl 42(%rsp), %edx
+0FB644242B                              movzbl 43(%rsp), %eax
+0FB65C242B                              movzbl 43(%rsp), %ebx
+0FB67C242B                              movzbl 43(%rsp), %edi
+0FB674242B                              movzbl 43(%rsp), %esi
+440FB664242B                            movzbl 43(%rsp), %r12d
+0FB6452C                                movzbl 44(%rbp), %eax
+0FB644242E                              movzbl 46(%rsp), %eax
+0FB674242E                              movzbl 46(%rsp), %esi
+0FB644242F                              movzbl 47(%rsp), %eax
+0FB65C242F                              movzbl 47(%rsp), %ebx
+0FB67C242F                              movzbl 47(%rsp), %edi
+440FB664242F                            movzbl 47(%rsp), %r12d
+440FB66C242F                            movzbl 47(%rsp), %r13d
+0FB6442430                              movzbl 48(%rsp), %eax
+0FB65C2430                              movzbl 48(%rsp), %ebx
+0FB6542430                              movzbl 48(%rsp), %edx
+0FB6742430                              movzbl 48(%rsp), %esi
+450FB6642404                            movzbl 4(%r12), %r12d
+0FB65C2437                              movzbl 55(%rsp), %ebx
+0FB67C2437                              movzbl 55(%rsp), %edi
+440FB6642437                            movzbl 55(%rsp), %r12d
+0FB644243B                              movzbl 59(%rsp), %eax
+0FB67C243B                              movzbl 59(%rsp), %edi
+0FB674243B                              movzbl 59(%rsp), %esi
+0FB66C243C                              movzbl 60(%rsp), %ebp
+0FB67C243D                              movzbl 61(%rsp), %edi
+0FB654243D                              movzbl 61(%rsp), %edx
+440FB644243E                            movzbl 62(%rsp), %r8d
+0FB64C2440                              movzbl 64(%rsp), %ecx
+0FB65C2443                              movzbl 67(%rsp), %ebx
+0FB67C2443                              movzbl 67(%rsp), %edi
+0FB674244B                              movzbl 75(%rsp), %esi
+0FB64C244F                              movzbl 79(%rsp), %ecx
+0FB67C244F                              movzbl 79(%rsp), %edi
+0FB654244F                              movzbl 79(%rsp), %edx
+440FB644244F                            movzbl 79(%rsp), %r8d
+0FB65C2407                              movzbl 7(%rsp), %ebx
+0FB64C2407                              movzbl 7(%rsp), %ecx
+0FB67C2407                              movzbl 7(%rsp), %edi
+0FB6542407                              movzbl 7(%rsp), %edx
+440FB6642407                            movzbl 7(%rsp), %r12d
+0FB66C2457                              movzbl 87(%rsp), %ebp
+0FB65C2457                              movzbl 87(%rsp), %ebx
+0FB64008                                movzbl 8(%rax), %eax
+0FB64808                                movzbl 8(%rax), %ecx
+0FB67808                                movzbl 8(%rax), %edi
+0FB64708                                movzbl 8(%rdi), %eax
+0FB67F08                                movzbl 8(%rdi), %edi
+0FB67708                                movzbl 8(%rdi), %esi
+0FB64A08                                movzbl 8(%rdx), %ecx
+0FB64608                                movzbl 8(%rsi), %eax
+0FB65608                                movzbl 8(%rsi), %edx
+0FB654245F                              movzbl 95(%rsp), %edx
+0FB6442460                              movzbl 96(%rsp), %eax
+0FB64C2460                              movzbl 96(%rsp), %ecx
+0FB6DC                                  movzbl %ah, %ebx
+0FB6D4                                  movzbl %ah, %edx
+0FB6C0                                  movzbl %al, %eax
+0FB6E8                                  movzbl %al, %ebp
+0FB6D8                                  movzbl %al, %ebx
+0FB6C8                                  movzbl %al, %ecx
+0FB6F8                                  movzbl %al, %edi
+0FB6F8                                  movzbl %al, %edi
+0FB6F8                                  movzbl %al,%edi
+0FB6D0                                  movzbl %al, %edx
+0FB6D0                                  movzbl %al, %edx
+0FB6F0                                  movzbl %al, %esi
+440FB6E0                                movzbl %al, %r12d
+440FB6E8                                movzbl %al, %r13d
+440FB6F0                                movzbl %al, %r14d
+440FB6F8                                movzbl %al, %r15d
+0FB6EF                                  movzbl %bh, %ebp
+0FB6CF                                  movzbl %bh, %ecx
+0FB6FF                                  movzbl %bh, %edi
+0FB6D7                                  movzbl %bh, %edx
+0FB6C3                                  movzbl %bl, %eax
+0FB6DB                                  movzbl %bl, %ebx
+0FB6CB                                  movzbl %bl, %ecx
+0FB6FB                                  movzbl %bl, %edi
+0FB6D3                                  movzbl %bl, %edx
+0FB6F3                                  movzbl %bl, %esi
+400FB6C5                                movzbl %bpl, %eax
+400FB6ED                                movzbl %bpl, %ebp
+400FB6CD                                movzbl %bpl, %ecx
+400FB6FD                                movzbl %bpl, %edi
+400FB6D5                                movzbl %bpl, %edx
+400FB6F5                                movzbl %bpl, %esi
+440FB6CD                                movzbl %bpl, %r9d
+0FB6DD                                  movzbl %ch, %ebx
+0FB6FD                                  movzbl %ch, %edi
+0FB6D5                                  movzbl %ch, %edx
+0FB6C1                                  movzbl %cl, %eax
+0FB6D9                                  movzbl %cl, %ebx
+0FB6C9                                  movzbl %cl, %ecx
+0FB6D1                                  movzbl %cl, %edx
+0FB6F1                                  movzbl %cl, %esi
+0FB6EE                                  movzbl %dh, %ebp
+0FB6FE                                  movzbl %dh, %edi
+400FB6C7                                movzbl %dil, %eax
+400FB6C7                                movzbl %dil,%eax
+400FB6FF                                movzbl %dil, %edi
+400FB6D7                                movzbl %dil, %edx
+0FB6C2                                  movzbl %dl, %eax
+0FB6DA                                  movzbl %dl, %ebx
+0FB6FA                                  movzbl %dl, %edi
+0FB6D2                                  movzbl %dl, %edx
+440FB6050E000000                        movzbl 14(%rip), %r8d
+0FB6420F                                movzbl 15(%rdx), %eax
+0FB60511000000                          movzbl 17(%rip), %eax
+410FB6D2                                movzbl %r10b, %edx
+450FB6D2                                movzbl %r10b, %r10d
+410FB6D3                                movzbl %r11b, %edx
+450FB6DB                                movzbl %r11b, %r11d
+410FB6C4                                movzbl %r12b, %eax
+410FB6DC                                movzbl %r12b, %ebx
+410FB6FC                                movzbl %r12b, %edi
+410FB6D4                                movzbl %r12b, %edx
+410FB6F4                                movzbl %r12b, %esi
+450FB6EC                                movzbl %r12b, %r13d
+410FB61C24                              movzbl (%r12), %ebx
+410FB6C5                                movzbl %r13b, %eax
+410FB6DD                                movzbl %r13b, %ebx
+410FB6FD                                movzbl %r13b, %edi
+410FB6D5                                movzbl %r13b, %edx
+410FB6F5                                movzbl %r13b, %esi
+450FB6E5                                movzbl %r13b,%r12d
+410FB6C6                                movzbl %r14b, %eax
+410FB6EE                                movzbl %r14b, %ebp
+410FB6DE                                movzbl %r14b, %ebx
+410FB6FE                                movzbl %r14b, %edi
+410FB6F6                                movzbl %r14b, %esi
+450FB6F6                                movzbl %r14b, %r14d
+410FB6DF                                movzbl %r15b, %ebx
+410FB6CF                                movzbl %r15b, %ecx
+410FB6FF                                movzbl %r15b, %edi
+410FB6D7                                movzbl %r15b, %edx
+410FB6F7                                movzbl %r15b, %esi
+450FB6E7                                movzbl %r15b, %r12d
+450FB6C0                                movzbl %r8b, %r8d
+410FB6D1                                movzbl %r9b, %edx
+450FB6C9                                movzbl %r9b, %r9d
+0FB60408                                movzbl (%rax,%rcx), %eax
+0FB60410                                movzbl (%rax,%rdx), %eax
+0FB63410                                movzbl (%rax,%rdx), %esi
+420FB60421                              movzbl (%rcx,%r12), %eax
+420FB61421                              movzbl (%rcx,%r12), %edx
+0FB60F                                  movzbl (%rdi), %ecx
+0FB60407                                movzbl (%rdi,%rax), %eax
+0FB6140F                                movzbl (%rdi,%rcx), %edx
+0FB61417                                movzbl (%rdi,%rdx), %edx
+0FB602                                  movzbl (%rdx), %eax
+0FB6140A                                movzbl (%rdx,%rcx), %edx
+0FB616                                  movzbl (%rsi), %edx
+0FB61416                                movzbl (%rsi,%rdx), %edx
+400FB6D6                                movzbl %sil, %edx
+400FB6F6                                movzbl %sil, %esi
+0FB680781A0000                          movzbl 6776(%rax), %eax
+0FB6054E000000                          movzbl 78(%rip), %eax
+0FB60D4F000000                          movzbl 79(%rip), %ecx
+0FB63551000000                          movzbl 81(%rip), %esi
+440FB60553000000                        movzbl 83(%rip), %r8d
+480FB67C2420                            movzbq 32(%rsp), %rdi
+480FB67C2440                            movzbq 64(%rsp), %rdi
+480FB67C2460                            movzbq 96(%rsp), %rdi
+410FB76C2418                            movzwl 24(%r12), %ebp
+410FB74C241E                            movzwl 30(%r12), %ecx
+0FB7431E                                movzwl 30(%rbx), %eax
+0FB7411E                                movzwl 30(%rcx), %eax
+0FB77808                                movzwl 8(%rax), %edi
+0FB77F08                                movzwl 8(%rdi), %edi
+0FB77708                                movzwl 8(%rdi), %esi
+0FB74608                                movzwl 8(%rsi), %eax
+0FB7C0                                  movzwl %ax, %eax
+0FB7D0                                  movzwl %ax, %edx
+0FB7D0                                  movzwl %ax,%edx
+440FB7F0                                movzwl %ax,%r14d
+0FB7C5                                  movzwl %bp,%eax
+0FB7C1                                  movzwl %cx, %eax
+0FB7C9                                  movzwl %cx, %ecx
+0FB7FF                                  movzwl %di, %edi
+0FB7D2                                  movzwl %dx, %edx
+410FB7049E                              movzwl (%r14,%rbx,4), %eax
+410FB7C7                                movzwl %r15w,%eax
+410FB7C8                                movzwl %r8w, %ecx
+450FB7C0                                movzwl %r8w, %r8d
+0FB71403                                movzwl (%rbx,%rax), %edx
+0FB7049F                                movzwl (%rdi,%rbx,4), %eax
+0FB7FE                                  movzwl %si, %edi
+0FB7F6                                  movzwl %si, %esi
+480FB77CD802                            movzwq 2(%rax,%rbx,8), %rdi
+490FB7561E                              movzwq 30(%r14), %rdx
+480FB7531E                              movzwq 30(%rbx), %rdx
+480FB734D8                              movzwq (%rax,%rbx,8), %rsi
+F7D8                                    negl %eax
+F7DF                                    negl %edi
+48F7DE                                  negq %rsi
+F7D0                                    notl %eax
+F7D1                                    notl %ecx
+F7D7                                    notl %edi
+F7D2                                    notl %edx
+F7D6                                    notl %esi
+41F7D2                                  notl %r10d
+41F7D0                                  notl %r8d
+41F7D1                                  notl %r9d
+48F7D7                                  notq %rdi
+4180CE80                                orb $-128, %r14b
+0A44242E                                orb 46(%rsp), %al
+400A7A08                                orb 8(%rdx), %dil
+08C8                                    orb %cl, %al
+08CA                                    orb %cl, %dl
+08D0                                    orb %dl, %al
+4408E0                                  orb %r12b, %al
+4181CE00040000                          orl $1024, %r14d
+0D00000041                              orl $1090519040, %eax
+81CA00000041                            orl $1090519040, %edx
+834C241C01                              orl $1, 28(%rsp)
+834C241C10                              orl $16, 28(%rsp)
+4181CE00400000                          orl $16384, %r14d
+83C810                                  orl $16, %eax
+83CA01                                  orl $1, %edx
+4183CE01                                orl $1, %r14d
+4181CE00080000                          orl $2048, %r14d
+834C241C02                              orl $2, 28(%rsp)
+4181CE00010000                          orl $256, %r14d
+83CA02                                  orl $2, %edx
+4183CE02                                orl $2, %r14d
+4181CE00800000                          orl $32768, %r14d
+83C820                                  orl $32, %eax
+4181CE00100000                          orl $4096, %r14d
+834C241C04                              orl $4, 28(%rsp)
+83C804                                  orl $4, %eax
+4183CE04                                orl $4, %r14d
+0B4424FC                                orl -4(%rsp), %eax
+4181CE00020000                          orl $512, %r14d
+83C840                                  orl $64, %eax
+4181CE00200000                          orl $8192, %r14d
+83C808                                  orl $8, %eax
+0B7A08                                  orl 8(%rdx), %edi
+09C5                                    orl %eax, %ebp
+09C1                                    orl %eax, %ecx
+09C7                                    orl %eax, %edi
+09C2                                    orl %eax, %edx
+4109C4                                  orl %eax, %r12d
+09D8                                    orl %ebx, %eax
+09D9                                    orl %ebx, %ecx
+09DA                                    orl %ebx, %edx
+09DE                                    orl %ebx, %esi
+09C8                                    orl %ecx, %eax
+09CA                                    orl %ecx, %edx
+09CE                                    orl %ecx, %esi
+09F8                                    orl %edi, %eax
+09F9                                    orl %edi, %ecx
+09FA                                    orl %edi, %edx
+09FE                                    orl %edi, %esi
+095424EC                                orl %edx, -20(%rsp)
+095424F8                                orl %edx, -8(%rsp)
+09D0                                    orl %edx, %eax
+09D1                                    orl %edx, %ecx
+09D7                                    orl %edx, %edi
+09F0                                    orl %esi, %eax
+09F1                                    orl %esi, %ecx
+09F7                                    orl %esi, %edi
+09F2                                    orl %esi, %edx
+4409D0                                  orl %r10d, %eax
+4409D8                                  orl %r11d, %eax
+4409D9                                  orl %r11d, %ecx
+4409DA                                  orl %r11d, %edx
+4409E0                                  orl %r12d, %eax
+4409E7                                  orl %r12d, %edi
+4409E8                                  orl %r13d, %eax
+4409F8                                  orl %r15d, %eax
+4409FA                                  orl %r15d, %edx
+4409C0                                  orl %r8d, %eax
+4409C7                                  orl %r8d, %edi
+4409C8                                  orl %r9d, %eax
+4409C9                                  orl %r9d, %ecx
+480B8C2410010000                        orq 272(%rsp), %rcx
+4809C7                                  orq %rax, %rdi
+4809C8                                  orq %rcx, %rax
+4809F8                                  orq %rdi, %rax
+4809D0                                  orq %rdx, %rax
+4809F0                                  orq %rsi, %rax
+4809F7                                  orq %rsi, %rdi
+660B7A08                                orw 8(%rdx), %di
+415C                                    popq %r12
+415D                                    popq %r13
+415E                                    popq %r14
+415F                                    popq %r15
+5D                                      popq %rbp
+5B                                      popq %rbx
+4154                                    pushq %r12
+4155                                    pushq %r13
+4156                                    pushq %r14
+4157                                    pushq %r15
+55                                      pushq %rbp
+53                                      pushq %rbx
+F3C3                                    rep ; ret
+41D3CC                                  rorl %cl, %r12d
+C1E70A                                  sall $10, %edi
+C1E60F                                  sall $15, %esi
+41C1E00F                                sall $15, %r8d
+C1E010                                  sall $16, %eax
+C1E510                                  sall $16, %ebp
+C1E310                                  sall $16, %ebx
+C1E110                                  sall $16, %ecx
+C1E710                                  sall $16, %edi
+C1E210                                  sall $16, %edx
+C1E610                                  sall $16, %esi
+41C1E310                                sall $16, %r11d
+41C1E410                                sall $16, %r12d
+41C1E010                                sall $16, %r8d
+41C1E110                                sall $16, %r9d
+C1E018                                  sall $24, %eax
+C1E118                                  sall $24, %ecx
+C1E718                                  sall $24, %edi
+C1E218                                  sall $24, %edx
+C1E618                                  sall $24, %esi
+C1E719                                  sall $25, %edi
+C1E702                                  sall $2, %edi
+C1E703                                  sall $3, %edi
+C1E203                                  sall $3, %edx
+C1E007                                  sall $7, %eax
+C1E707                                  sall $7, %edi
+41C1E207                                sall $7, %r10d
+C16424FC08                              sall $8, -4(%rsp)
+C1E008                                  sall $8, %eax
+C1E508                                  sall $8, %ebp
+C1E308                                  sall $8, %ebx
+C1E708                                  sall $8, %edi
+C1E208                                  sall $8, %edx
+41C1E008                                sall $8, %r8d
+41C1E108                                sall $8, %r9d
+D3E0                                    sall %cl, %eax
+D3E7                                    sall %cl, %edi
+D3E2                                    sall %cl, %edx
+D3E6                                    sall %cl, %esi
+41D3E1                                  sall %cl, %r9d
+48C1E002                                salq $2, %rax
+48C1E302                                salq $2, %rbx
+48C1E702                                salq $2, %rdi
+48C1E202                                salq $2, %rdx
+48C1E020                                salq $32, %rax
+48C1E720                                salq $32, %rdi
+48C1E703                                salq $3, %rdi
+48C1E704                                salq $4, %rdi
+C1F80B                                  sarl $11, %eax
+C1F810                                  sarl $16, %eax
+C1F910                                  sarl $16, %ecx
+C1FA10                                  sarl $16, %edx
+C1FF15                                  sarl $21, %edi
+C1F818                                  sarl $24, %eax
+C1FA18                                  sarl $24, %edx
+C1FE18                                  sarl $24, %esi
+C1F802                                  sarl $2, %eax
+C1F803                                  sarl $3, %eax
+C1F804                                  sarl $4, %eax
+C1FD06                                  sarl $6, %ebp
+C1FA06                                  sarl $6, %edx
+C1FF08                                  sarl $8, %edi
+C1FF09                                  sarl $9, %edi
+D3F8                                    sarl %cl, %eax
+D3FF                                    sarl %cl, %edi
+D3FA                                    sarl %cl, %edx
+41D3F8                                  sarl %cl, %r8d
+D1F8                                    sarl %eax
+D1FF                                    sarl %edi
+19C0                                    sbbl %eax, %eax
+19ED                                    sbbl %ebp, %ebp
+19DB                                    sbbl %ebx, %ebx
+19C9                                    sbbl %ecx, %ecx
+19FF                                    sbbl %edi, %edi
+19D2                                    sbbl %edx, %edx
+19F6                                    sbbl %esi, %esi
+4519D2                                  sbbl %r10d, %r10d
+4519E4                                  sbbl %r12d, %r12d
+4519ED                                  sbbl %r13d, %r13d
+4519C0                                  sbbl %r8d, %r8d
+4519C9                                  sbbl %r9d, %r9d
+4819FF                                  sbbq %rdi, %rdi
+0F97C2                                  seta %dl
+0F92C0                                  setb %al
+400F92C7                                setb %dil
+0F92C2                                  setb %dl
+0F9684241B010000                        setbe 283(%rsp)
+0F9644242E                              setbe 46(%rsp)
+0F96C0                                  setbe %al
+0F96C1                                  setbe %cl
+400F96C7                                setbe %dil
+0F96C2                                  setbe %dl
+0F9444247B                              sete 123(%rsp)
+0F9444240F                              sete 15(%rsp)
+0F948424AB000000                        sete 171(%rsp)
+0F948424B7000000                        sete 183(%rsp)
+0F948424EF000000                        sete 239(%rsp)
+0F94C0                                  sete %al
+400F94C7                                sete %dil
+0F94C2                                  sete %dl
+410F94C4                                sete %r12b
+410F94C5                                sete %r13b
+410F94C7                                sete %r15b
+400F94C6                                sete %sil
+0F9DC0                                  setge %al
+0F9DC2                                  setge %dl
+0F9C442473                              setl 115(%rsp)
+400F9CC7                                setl %dil
+0F9EC0                                  setle %al
+400F9EC7                                setle %dil
+0F95C0                                  setne %al
+0F95C1                                  setne %cl
+400F95C7                                setne %dil
+0F95C2                                  setne %dl
+410F95C5                                setne %r13b
+400F95C6                                setne %sil
+0F9BC2                                  setnp %dl
+0F98C0                                  sets %al
+C0E802                                  shrb $2, %al
+C0E803                                  shrb $3, %al
+C0E903                                  shrb $3, %cl
+40C0EF03                                shrb $3, %dil
+C0EA03                                  shrb $3, %dl
+C0E804                                  shrb $4, %al
+C0E904                                  shrb $4, %cl
+C0EA06                                  shrb $6, %dl
+D0E8                                    shrb %al
+D0EB                                    shrb %bl
+D0EA                                    shrb %dl
+C1E80A                                  shrl $10, %eax
+C1EF0A                                  shrl $10, %edi
+C1EE0A                                  shrl $10, %esi
+C1E80B                                  shrl $11, %eax
+C1EB0B                                  shrl $11, %ebx
+C1E90B                                  shrl $11, %ecx
+C1E80C                                  shrl $12, %eax
+C1EA0C                                  shrl $12, %edx
+C1E80D                                  shrl $13, %eax
+C1EF0E                                  shrl $14, %edi
+C1E810                                  shrl $16, %eax
+C1EB10                                  shrl $16, %ebx
+C1E910                                  shrl $16, %ecx
+C1EF10                                  shrl $16, %edi
+C1EA10                                  shrl $16, %edx
+C1EE10                                  shrl $16, %esi
+41C1EB10                                shrl $16, %r11d
+41C1E810                                shrl $16, %r8d
+41C1E910                                shrl $16, %r9d
+C1E814                                  shrl $20, %eax
+C1EB14                                  shrl $20, %ebx
+C1EA14                                  shrl $20, %edx
+41C1EC14                                shrl $20, %r12d
+C1E815                                  shrl $21, %eax
+C1ED15                                  shrl $21, %ebp
+C1EF15                                  shrl $21, %edi
+C1ED16                                  shrl $22, %ebp
+C1E818                                  shrl $24, %eax
+C1E918                                  shrl $24, %ecx
+C1EF18                                  shrl $24, %edi
+C1EA18                                  shrl $24, %edx
+C1EE18                                  shrl $24, %esi
+C1E819                                  shrl $25, %eax
+C1EB19                                  shrl $25, %ebx
+C1E81A                                  shrl $26, %eax
+C1ED1C                                  shrl $28, %ebp
+C1E91C                                  shrl $28, %ecx
+C1EF1C                                  shrl $28, %edi
+C1EA1C                                  shrl $28, %edx
+C1EE1C                                  shrl $28, %esi
+41C1EF1C                                shrl $28, %r15d
+C1E802                                  shrl $2, %eax
+C1E81F                                  shrl $31, %eax
+C1E91F                                  shrl $31, %ecx
+C1EF1F                                  shrl $31, %edi
+C1EA1F                                  shrl $31, %edx
+C1ED03                                  shrl $3, %ebp
+C1EA03                                  shrl $3, %edx
+41C1EC03                                shrl $3, %r12d
+C1E804                                  shrl $4, %eax
+C1EF04                                  shrl $4, %edi
+C1EA04                                  shrl $4, %edx
+41C1ED04                                shrl $4, %r13d
+C1E805                                  shrl $5, %eax
+C1E806                                  shrl $6, %eax
+C1E906                                  shrl $6, %ecx
+C1EF06                                  shrl $6, %edi
+C1E807                                  shrl $7, %eax
+C1E907                                  shrl $7, %ecx
+C1E808                                  shrl $8, %eax
+C1E908                                  shrl $8, %ecx
+C1EA08                                  shrl $8, %edx
+41C1ED08                                shrl $8, %r13d
+41C1E808                                shrl $8, %r8d
+41C1E908                                shrl $8, %r9d
+C1E809                                  shrl $9, %eax
+C1EF09                                  shrl $9, %edi
+D3E8                                    shrl %cl, %eax
+D3EF                                    shrl %cl, %edi
+D1E8                                    shrl %eax
+D1EB                                    shrl %ebx
+D1E9                                    shrl %ecx
+D1EF                                    shrl %edi
+D1EA                                    shrl %edx
+41D1EB                                  shrl %r11d
+41D1EC                                  shrl %r12d
+41D1E8                                  shrl %r8d
+49C1EA20                                shrq $32, %r10
+49C1EB20                                shrq $32, %r11
+49C1E820                                shrq $32, %r8
+49C1E920                                shrq $32, %r9
+48C1E820                                shrq $32, %rax
+48C1EB20                                shrq $32, %rbx
+48C1E920                                shrq $32, %rcx
+48C1EF20                                shrq $32, %rdi
+48C1EA20                                shrq $32, %rdx
+48C1EE20                                shrq $32, %rsi
+48C1E83F                                shrq $63, %rax
+402A7A08                                subb 8(%rdx), %dil
+28D9                                    subb %bl, %cl
+28CB                                    subb %cl, %bl
+28D0                                    subb %dl, %al
+4028D7                                  subb %dl, %dil
+4128D0                                  subb %dl, %r8b
+4028D6                                  subb %dl, %sil
+83E864                                  subl $100, %eax
+83E80C                                  subl $12, %eax
+2D90000000                              subl $144, %eax
+81EB90000000                            subl $144, %ebx
+2DC0000000                              subl $192, %eax
+81E9C0000000                            subl $192, %ecx
+81EAC0000000                            subl $192, %edx
+81E9C8000000                            subl $200, %ecx
+81EAC8000000                            subl $200, %edx
+2DD0000000                              subl $208, %eax
+83E814                                  subl $20, %eax
+2DE0000000                              subl $224, %eax
+81E9E0000000                            subl $224, %ecx
+81EAE0000000                            subl $224, %edx
+81E9E8000000                            subl $232, %ecx
+81EFE8000000                            subl $232, %edi
+81EAE8000000                            subl $232, %edx
+81E9F0000000                            subl $240, %ecx
+81EFF0000000                            subl $240, %edi
+81EAF0000000                            subl $240, %edx
+81E9F8000000                            subl $248, %ecx
+81EAF8000000                            subl $248, %edx
+2BAC2424010000                          subl 292(%rsp), %ebp
+83E802                                  subl $2, %eax
+83EA03                                  subl $3, %edx
+83E82C                                  subl $44, %eax
+83E82E                                  subl $46, %eax
+83E831                                  subl $49, %eax
+4183E904                                subl $4, %r9d
+2B6C2404                                subl 4(%rsp), %ebp
+83E840                                  subl $64, %eax
+83EB40                                  subl $64, %ebx
+81EFFF0F0100                            subl $69631, %edi
+2D01100100                              subl $69633, %eax
+2D03100100                              subl $69635, %eax
+2D00200100                              subl $73728, %eax
+2D01300100                              subl $77825, %eax
+2D2D300100                              subl $77869, %eax
+2D0C310100                              subl $78092, %eax
+83E853                                  subl $83, %eax
+2D02500100                              subl $86018, %eax
+2B7A08                                  subl 8(%rdx), %edi
+83E860                                  subl $96, %eax
+29C7                                    subl %eax, %edi
+29C2                                    subl %eax, %edx
+4129EE                                  subl %ebp, %r14d
+29CA                                    subl %ecx, %edx
+4129C8                                  subl %ecx, %r8d
+29F8                                    subl %edi, %eax
+29D0                                    subl %edx, %eax
+29D3                                    subl %edx, %ebx
+29D1                                    subl %edx, %ecx
+29D7                                    subl %edx, %edi
+29D6                                    subl %edx, %esi
+4129D0                                  subl %edx, %r8d
+29F7                                    subl %esi, %edi
+4429D1                                  subl %r10d, %ecx
+4429F7                                  subl %r14d, %edi
+4429F8                                  subl %r15d, %eax
+4883EC68                                subq $104, %rsp
+4883EC78                                subq $120, %rsp
+4881EC88000000                          subq $136, %rsp
+4881EC98000000                          subq $152, %rsp
+4C2BBC24A0000000                        subq 160(%rsp), %r15
+4881ECA8000000                          subq $168, %rsp
+4883EC10                                subq $16, %rsp
+4881ECB8000000                          subq $184, %rsp
+4881ECD0000000                          subq $208, %rsp
+4881ECD8000000                          subq $216, %rsp
+4883EC18                                subq $24, %rsp
+4881EC18010000                          subq $280, %rsp
+4881EC38010000                          subq $312, %rsp
+4883EC28                                subq $40, %rsp
+4881ECD8010000                          subq $472, %rsp
+4883EC38                                subq $56, %rsp
+4881ECD8020000                          subq $728, %rsp
+4883EC48                                subq $72, %rsp
+4883EC58                                subq $88, %rsp
+4883EC08                                subq $8, %rsp
+4881ECC8030000                          subq $968, %rsp
+4829C2                                  subq %rax, %rdx
+6629CA                                  subw %cx, %dx
+664129C8                                subw %cx, %r8w
+6629D0                                  subw %dx, %ax
+6629F7                                  subw %si, %di
+F6052C00000010                          testb $16, 44(%rip)
+A801                                    testb $1, %al
+F6C301                                  testb $1, %bl
+40F6C701                                testb $1, %dil
+41F6C401                                testb $1, %r12b
+F6052D00000001                          testb $1, 45(%rip)
+40F6C71C                                testb $28, %dil
+A802                                    testb $2, %al
+F6C202                                  testb $2, %dl
+41F6C502                                testb $2, %r13b
+41F6C602                                testb $2, %r14b
+F6C2E0                                  testb $-32, %dl
+F6052F00000020                          testb $32, 47(%rip)
+F644241C04                              testb $4, 28(%rsp)
+A804                                    testb $4, %al
+40F6C704                                testb $4, %dil
+F6C204                                  testb $4, %dl
+41F6C504                                testb $4, %r13b
+F6053100000004                          testb $4, 49(%rip)
+F6054200000040                          testb $64, 66(%rip)
+F684242003000007                        testb $7, 800(%rsp)
+A808                                    testb $8, %al
+F6C208                                  testb $8, %dl
+41F6C608                                testb $8, %r14b
+84C0                                    testb %al, %al
+84DB                                    testb %bl, %bl
+4084ED                                  testb %bpl, %bpl
+84C9                                    testb %cl, %cl
+4084FF                                  testb %dil, %dil
+84D2                                    testb %dl, %dl
+4584DB                                  testb %r11b, %r11b
+4584E4                                  testb %r12b, %r12b
+4584ED                                  testb %r13b, %r13b
+4584F6                                  testb %r14b, %r14b
+4584FF                                  testb %r15b, %r15b
+4584C0                                  testb %r8b, %r8b
+4084F6                                  testb %sil, %sil
+F7C700040000                            testl $1024, %edi
+41F7C600040000                          testl $1024, %r14d
+F7C700400000                            testl $16384, %edi
+41F7C600400000                          testl $16384, %r14d
+F7C500000001                            testl $16777216, %ebp
+F7C700000001                            testl $16777216, %edi
+F7C200000001                            testl $16777216, %edx
+F7C600000001                            testl $16777216, %esi
+F7C700080000                            testl $2048, %edi
+41F7C600080000                          testl $2048, %r14d
+41F7C600010000                          testl $256, %r14d
+F7C700100000                            testl $4096, %edi
+41F7C600100000                          testl $4096, %r14d
+41F7C600020000                          testl $512, %r14d
+A90000FFFF                              testl $-65536, %eax
+F7C50000FFFF                            testl $-65536, %ebp
+F7C30000FFFF                            testl $-65536, %ebx
+41F7C40000FFFF                          testl $-65536, %r12d
+F7C700030000                            testl $768, %edi
+F7C700200000                            testl $8192, %edi
+41F7C600200000                          testl $8192, %r14d
+85C0                                    testl %eax, %eax
+85F8                                    testl %eax, %edi
+85D0                                    testl %eax, %edx
+85ED                                    testl %ebp, %ebp
+85DB                                    testl %ebx, %ebx
+85C9                                    testl %ecx, %ecx
+85FF                                    testl %edi, %edi
+85D2                                    testl %edx, %edx
+85F6                                    testl %esi, %esi
+4585D2                                  testl %r10d, %r10d
+4585DB                                  testl %r11d, %r11d
+4585E4                                  testl %r12d, %r12d
+4585ED                                  testl %r13d, %r13d
+4585F6                                  testl %r14d, %r14d
+4585FF                                  testl %r15d, %r15d
+4585C0                                  testl %r8d, %r8d
+4585C9                                  testl %r9d, %r9d
+4D85E4                                  testq %r12, %r12
+4D85ED                                  testq %r13, %r13
+4D85F6                                  testq %r14, %r14
+4D85FF                                  testq %r15, %r15
+4885C0                                  testq %rax, %rax
+4885ED                                  testq %rbp, %rbp
+4885DB                                  testq %rbx, %rbx
+4885C9                                  testq %rcx, %rcx
+4885FF                                  testq %rdi, %rdi
+4885D2                                  testq %rdx, %rdx
+4885F6                                  testq %rsi, %rsi
+6685C0                                  testw %ax, %ax
+6685DB                                  testw %bx, %bx
+6685FF                                  testw %di, %di
+6685D2                                  testw %dx, %dx
+664585DB                                testw %r11w, %r11w
+664585F6                                testw %r14w, %r14w
+664585C0                                testw %r8w, %r8w
+664585C9                                testw %r9w, %r9w
+6685F6                                  testw %si, %si
+40327A08                                xorb 8(%rdx), %dil
+83F001                                  xorl $1, %eax
+83F201                                  xorl $1, %edx
+337A08                                  xorl 8(%rdx), %edi
+31C0                                    xorl %eax, %eax
+31ED                                    xorl %ebp, %ebp
+31DB                                    xorl %ebx, %ebx
+31C9                                    xorl %ecx, %ecx
+31FF                                    xorl %edi, %edi
+31D1                                    xorl %edx, %ecx
+31D2                                    xorl %edx, %edx
+31F0                                    xorl %esi, %eax
+31F1                                    xorl %esi, %ecx
+31F2                                    xorl %esi, %edx
+31F6                                    xorl %esi, %esi
+4531D2                                  xorl %r10d, %r10d
+4531DB                                  xorl %r11d, %r11d
+4531E4                                  xorl %r12d, %r12d
+4531ED                                  xorl %r13d, %r13d
+4531F6                                  xorl %r14d, %r14d
+4531FF                                  xorl %r15d, %r15d
+4531C0                                  xorl %r8d, %r8d
+4531C9                                  xorl %r9d, %r9d
+6631C0                                  xorw %ax, %ax
+6631FF                                  xorw %di, %di
+6631D2                                  xorw %dx, %dx
+90                                      nop
+90                                      nop
diff --git a/VEX/orig_arm/nanoarm b/VEX/orig_arm/nanoarm
new file mode 100644
index 0000000..747098a
--- /dev/null
+++ b/VEX/orig_arm/nanoarm
@@ -0,0 +1,7 @@
+       0:       e1a0c00d        mov     ip, sp
+       4:       e92dd810        stmdb   sp!, {r4, fp, ip, lr, pc}
+       8:       e24cb004        sub     fp, ip, #5      ; 0x4
+       c:       e3a00014        mov     r0, #20 ; 0x14
+      10:       ebfffffe        bl      0 <newHHW>
+. 0 00008000 20
+. 0d c0 a0 e1 10 d8 2d e9 05 b0 4c e2 14 00 a0 e3 fe ff ff eb
diff --git a/VEX/orig_arm/nanoarm.orig b/VEX/orig_arm/nanoarm.orig
new file mode 100644
index 0000000..90742df
--- /dev/null
+++ b/VEX/orig_arm/nanoarm.orig
@@ -0,0 +1,19 @@
+       0:       e1a0c00d        mov     ip, sp
+. 0 00008000 4
+. 0d c0 a0 e1
+
+       4:       e92dd810        stmdb   sp!, {r4, fp, ip, lr, pc}
+. 1 00008004 4
+. 10 d8 2d e9
+
+       8:       e24cb004        sub     fp, ip, #4      ; 0x4
+. 2 00008008 4
+. 04 b0 4c e2
+
+       c:       e3a00014        mov     r0, #20 ; 0x14
+. 3 0000800C 4
+. 14 00 a0 e3
+
+      10:       ebfffffe        bl      0 <newHHW>
+. 4 00008010 4
+. fe ff ff eb
diff --git a/VEX/orig_ppc32/date.orig b/VEX/orig_ppc32/date.orig
new file mode 100644
index 0000000..24d729b
--- /dev/null
+++ b/VEX/orig_ppc32/date.orig
@@ -0,0 +1,138635 @@
+==== BB 0 _start(0x254804D4) approx BBs exec'd 0 ====
+
+	0x254804D4:  7C230B78  or r3,r1,r1
+	   0: GETL       	R1, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254804D8:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x254804DC:  3821FFF0  addi r1,r1,-16
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0xFFFFFFF0, t4
+	   8: PUTL       	t4, R1
+	   9: INCEIPL       	$4
+
+	0x254804E0:  90810000  stw r4,0(r1)
+	  10: GETL       	R4, t6
+	  11: GETL       	R1, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x254804E4:  4BFF1581  bl 0x25471A64
+	  14: MOVL       	$0x254804E8, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25471A64  ($4)
+
+
+
+. 0 254804D4 20
+. 7C 23 0B 78 38 80 00 00 38 21 FF F0 90 81 00 00 4B FF 15 81
+==== BB 1 _dl_start(0x25471A64) approx BBs exec'd 0 ====
+
+	0x25471A64:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25471A68:  3960004B  li r11,75
+	   3: MOVL       	$0x4B, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x25471A6C:  9421FD50  stwu r1,-688(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFD50, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x25471A70:  7D6903A6  mtctr r11
+	  12: GETL       	R11, t8
+	  13: PUTL       	t8, CTR
+	  14: INCEIPL       	$4
+
+	0x25471A74:  92E1028C  stw r23,652(r1)
+	  15: GETL       	R23, t10
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x28C, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25471A78:  900102B4  stw r0,692(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2B4, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x25471A7C:  7C771B78  or r23,r3,r3
+	  25: GETL       	R3, t18
+	  26: PUTL       	t18, R23
+	  27: INCEIPL       	$4
+
+	0x25471A80:  93010290  stw r24,656(r1)
+	  28: GETL       	R24, t20
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x290, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0x25471A84:  38000000  li r0,0
+	  33: MOVL       	$0x0, t24
+	  34: PUTL       	t24, R0
+	  35: INCEIPL       	$4
+
+	0x25471A88:  93210294  stw r25,660(r1)
+	  36: GETL       	R25, t26
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x294, t28
+	  39: STL       	t26, (t28)
+	  40: INCEIPL       	$4
+
+	0x25471A8C:  39210030  addi r9,r1,48
+	  41: GETL       	R1, t30
+	  42: ADDL       	$0x30, t30
+	  43: PUTL       	t30, R9
+	  44: INCEIPL       	$4
+
+	0x25471A90:  93410298  stw r26,664(r1)
+	  45: GETL       	R26, t32
+	  46: GETL       	R1, t34
+	  47: ADDL       	$0x298, t34
+	  48: STL       	t32, (t34)
+	  49: INCEIPL       	$4
+
+	0x25471A94:  9361029C  stw r27,668(r1)
+	  50: GETL       	R27, t36
+	  51: GETL       	R1, t38
+	  52: ADDL       	$0x29C, t38
+	  53: STL       	t36, (t38)
+	  54: INCEIPL       	$4
+
+	0x25471A98:  938102A0  stw r28,672(r1)
+	  55: GETL       	R28, t40
+	  56: GETL       	R1, t42
+	  57: ADDL       	$0x2A0, t42
+	  58: STL       	t40, (t42)
+	  59: INCEIPL       	$4
+
+	0x25471A9C:  93A102A4  stw r29,676(r1)
+	  60: GETL       	R29, t44
+	  61: GETL       	R1, t46
+	  62: ADDL       	$0x2A4, t46
+	  63: STL       	t44, (t46)
+	  64: INCEIPL       	$4
+
+	0x25471AA0:  93C102A8  stw r30,680(r1)
+	  65: GETL       	R30, t48
+	  66: GETL       	R1, t50
+	  67: ADDL       	$0x2A8, t50
+	  68: STL       	t48, (t50)
+	  69: INCEIPL       	$4
+
+	0x25471AA4:  93E102AC  stw r31,684(r1)
+	  70: GETL       	R31, t52
+	  71: GETL       	R1, t54
+	  72: ADDL       	$0x2AC, t54
+	  73: STL       	t52, (t54)
+	  74: INCEIPL       	$4
+
+	0x25471AA8:  90090000  stw r0,0(r9)
+	  75: GETL       	R0, t56
+	  76: GETL       	R9, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0x25471AAC:  39290004  addi r9,r9,4
+	  79: GETL       	R9, t60
+	  80: ADDL       	$0x4, t60
+	  81: PUTL       	t60, R9
+	  82: INCEIPL       	$4
+
+	0x25471AB0:  4200FFF8  bc 16,0,0x25471AA8
+	  83: GETL       	CTR, t62
+	  84: ADDL       	$0xFFFFFFFF, t62
+	  85: PUTL       	t62, CTR
+	  86: JIFZL       	t62, $0x25471AB4
+	  87: JMPo       	$0x25471AA8  ($4)
+
+
+
+. 1 25471A64 80
+. 7C 08 02 A6 39 60 00 4B 94 21 FD 50 7D 69 03 A6 92 E1 02 8C 90 01 02 B4 7C 77 1B 78 93 01 02 90 38 00 00 00 93 21 02 94 39 21 00 30 93 41 02 98 93 61 02 9C 93 81 02 A0 93 A1 02 A4 93 C1 02 A8 93 E1 02 AC 90 09 00 00 39 29 00 04 42 00 FF F8
+==== BB 2 (0x25471AA8) approx BBs exec'd 0 ====
+
+	0x25471AA8:  90090000  stw r0,0(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25471AAC:  39290004  addi r9,r9,4
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25471AB0:  4200FFF8  bc 16,0,0x25471AA8
+	   8: GETL       	CTR, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, CTR
+	  11: JIFZL       	t6, $0x25471AB4
+	  12: JMPo       	$0x25471AA8  ($4)
+
+
+
+. 2 25471AA8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+==== BB 3 (0x25471AB4) approx BBs exec'd 0 ====
+
+	0x25471AB4:  48000009  bl 0x25471ABC
+	   0: MOVL       	$0x25471AB8, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25471ABC  ($4)
+
+
+
+. 3 25471AB4 4
+. 48 00 00 09
+==== BB 4 (0x25471ABC) approx BBs exec'd 0 ====
+
+	0x25471ABC:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x25471AC0:  3B010010  addi r24,r1,16
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R24
+	   6: INCEIPL       	$4
+
+	0x25471AC4:  4802553D  bl 0x25497000
+	   7: MOVL       	$0x25471AC8, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 4 25471ABC 12
+. 7C E8 02 A6 3B 01 00 10 48 02 55 3D
+==== BB 5 (0x25497000) approx BBs exec'd 0 ====
+
+	0x25497000:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0x25497004, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+
+. 5 25497000 4
+. 4E 80 00 21
+==== BB 6 (0x25471AC8) approx BBs exec'd 0 ====
+
+	0x25471AC8:  7D4802A6  mflr r10
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x25471ACC:  81870000  lwz r12,0(r7)
+	   3: GETL       	R7, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R12
+	   6: INCEIPL       	$4
+
+	0x25471AD0:  810A0000  lwz r8,0(r10)
+	   7: GETL       	R10, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R8
+	  10: INCEIPL       	$4
+
+	0x25471AD4:  5586302E  rlwinm r6,r12,6,0,23
+	  11: GETL       	R12, t10
+	  12: ROLL       	$0x6, t10
+	  13: ANDL       	$0xFFFFFF00, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x25471AD8:  7CC43670  srawi r4,r6,6
+	  16: GETL       	R6, t12
+	  17: SARL       	$0x6, t12  (-wCa)
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0x25471ADC:  7CA83850  subf r5,r8,r7
+	  20: GETL       	R8, t14
+	  21: GETL       	R7, t16
+	  22: SUBL       	t14, t16
+	  23: PUTL       	t16, R5
+	  24: INCEIPL       	$4
+
+	0x25471AE0:  39010030  addi r8,r1,48
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0x30, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0x25471AE4:  7CE52214  add r7,r5,r4
+	  29: GETL       	R5, t20
+	  30: GETL       	R4, t22
+	  31: ADDL       	t20, t22
+	  32: PUTL       	t22, R7
+	  33: INCEIPL       	$4
+
+	0x25471AE8:  90E10010  stw r7,16(r1)
+	  34: GETL       	R7, t24
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x10, t26
+	  37: STL       	t24, (t26)
+	  38: INCEIPL       	$4
+
+	0x25471AEC:  806A0000  lwz r3,0(r10)
+	  39: GETL       	R10, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R3
+	  42: INCEIPL       	$4
+
+	0x25471AF0:  7C07182E  lwzx r0,r7,r3
+	  43: GETL       	R3, t32
+	  44: GETL       	R7, t34
+	  45: ADDL       	t34, t32
+	  46: LDL       	(t32), t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0x25471AF4:  7D271A14  add r9,r7,r3
+	  49: GETL       	R7, t38
+	  50: GETL       	R3, t40
+	  51: ADDL       	t38, t40
+	  52: PUTL       	t40, R9
+	  53: INCEIPL       	$4
+
+	0x25471AF8:  91210018  stw r9,24(r1)
+	  54: GETL       	R9, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x18, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x25471AFC:  7D2A4B78  or r10,r9,r9
+	  59: GETL       	R9, t46
+	  60: PUTL       	t46, R10
+	  61: INCEIPL       	$4
+
+	0x25471B00:  2F800000  cmpi cr7,r0,0
+	  62: GETL       	R0, t48
+	  63: CMP0L       	t48, t50  (-rSo)
+	  64: ICRFL       	t50, $0x7, CR
+	  65: INCEIPL       	$4
+
+	0x25471B04:  419E0068  bc 12,30,0x25471B6C
+	  66: Js30o       	$0x25471B6C
+
+
+
+. 6 25471AC8 64
+. 7D 48 02 A6 81 87 00 00 81 0A 00 00 55 86 30 2E 7C C4 36 70 7C A8 38 50 39 01 00 30 7C E5 22 14 90 E1 00 10 80 6A 00 00 7C 07 18 2E 7D 27 1A 14 91 21 00 18 7D 2A 4B 78 2F 80 00 00 41 9E 00 68
+==== BB 7 (0x25471B08) approx BBs exec'd 0 ====
+
+	0x25471B08:  3FE06FFF  lis r31,28671
+	   0: MOVL       	$0x6FFF0000, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x25471B0C:  3F607000  lis r27,28672
+	   3: MOVL       	$0x70000000, t2
+	   4: PUTL       	t2, R27
+	   5: INCEIPL       	$4
+
+	0x25471B10:  3F406FFF  lis r26,28671
+	   6: MOVL       	$0x6FFF0000, t4
+	   7: PUTL       	t4, R26
+	   8: INCEIPL       	$4
+
+	0x25471B14:  3F206FFF  lis r25,28671
+	   9: MOVL       	$0x6FFF0000, t6
+	  10: PUTL       	t6, R25
+	  11: INCEIPL       	$4
+
+	0x25471B18:  3FA06FFF  lis r29,28671
+	  12: MOVL       	$0x6FFF0000, t8
+	  13: PUTL       	t8, R29
+	  14: INCEIPL       	$4
+
+	0x25471B1C:  3F806FFF  lis r28,28671
+	  15: MOVL       	$0x6FFF0000, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0x25471B20:  7C0B0378  or r11,r0,r0
+	  18: GETL       	R0, t12
+	  19: PUTL       	t12, R11
+	  20: INCEIPL       	$4
+
+	0x25471B24:  63E6FFFF  ori r6,r31,0xFFFF
+	  21: MOVL       	$0x6FFFFFFF, t14
+	  22: PUTL       	t14, R6
+	  23: INCEIPL       	$4
+
+	0x25471B28:  63650021  ori r5,r27,0x21
+	  24: MOVL       	$0x70000021, t16
+	  25: PUTL       	t16, R5
+	  26: INCEIPL       	$4
+
+	0x25471B2C:  6344FDFF  ori r4,r26,0xFDFF
+	  27: MOVL       	$0x6FFFFDFF, t18
+	  28: PUTL       	t18, R4
+	  29: INCEIPL       	$4
+
+	0x25471B30:  6323FE34  ori r3,r25,0xFE34
+	  30: MOVL       	$0x6FFFFE34, t20
+	  31: PUTL       	t20, R3
+	  32: INCEIPL       	$4
+
+	0x25471B34:  63BDFEFF  ori r29,r29,0xFEFF
+	  33: MOVL       	$0x6FFFFEFF, t22
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x25471B38:  639CFF40  ori r28,r28,0xFF40
+	  36: MOVL       	$0x6FFFFF40, t24
+	  37: PUTL       	t24, R28
+	  38: INCEIPL       	$4
+
+	0x25471B3C:  2C0B0021  cmpi cr0,r11,33
+	  39: GETL       	R11, t26
+	  40: MOVL       	$0x21, t30
+	  41: CMPL       	t26, t30, t28  (-rSo)
+	  42: ICRFL       	t28, $0x0, CR
+	  43: INCEIPL       	$4
+
+	0x25471B40:  5560103A  rlwinm r0,r11,2,0,29
+	  44: GETL       	R11, t32
+	  45: SHLL       	$0x2, t32
+	  46: PUTL       	t32, R0
+	  47: INCEIPL       	$4
+
+	0x25471B44:  40810018  bc 4,1,0x25471B5C
+	  48: Jc01o       	$0x25471B5C
+
+
+
+. 7 25471B08 64
+. 3F E0 6F FF 3F 60 70 00 3F 40 6F FF 3F 20 6F FF 3F A0 6F FF 3F 80 6F FF 7C 0B 03 78 63 E6 FF FF 63 65 00 21 63 44 FD FF 63 23 FE 34 63 BD FE FF 63 9C FF 40 2C 0B 00 21 55 60 10 3A 40 81 00 18
+==== BB 8 (0x25471B5C) approx BBs exec'd 0 ====
+
+	0x25471B5C:  7D48012E  stwx r10,r8,r0
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R10, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x25471B60:  856A0008  lwzu r11,8(r10)
+	   6: GETL       	R10, t6
+	   7: ADDL       	$0x8, t6
+	   8: PUTL       	t6, R10
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25471B64:  2C8B0000  cmpi cr1,r11,0
+	  12: GETL       	R11, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x25471B68:  4086FFD4  bc 4,6,0x25471B3C
+	  16: Jc06o       	$0x25471B3C
+
+
+
+. 8 25471B5C 16
+. 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+==== BB 9 (0x25471B3C) approx BBs exec'd 0 ====
+
+	0x25471B3C:  2C0B0021  cmpi cr0,r11,33
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x21, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25471B40:  5560103A  rlwinm r0,r11,2,0,29
+	   5: GETL       	R11, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x25471B44:  40810018  bc 4,1,0x25471B5C
+	   9: Jc01o       	$0x25471B5C
+
+
+
+. 9 25471B3C 12
+. 2C 0B 00 21 55 60 10 3A 40 81 00 18
+==== BB 10 (0x25471B48) approx BBs exec'd 0 ====
+
+	0x25471B48:  7C0B3050  subf r0,r11,r6
+	   0: GETL       	R11, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471B4C:  2880000F  cmpli cr1,r0,15
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0xF, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25471B50:  418502D8  bc 12,5,0x25471E28
+	  10: Js05o       	$0x25471E28
+
+
+
+. 10 25471B48 12
+. 7C 0B 30 50 28 80 00 0F 41 85 02 D8
+==== BB 11 (0x25471B54) approx BBs exec'd 0 ====
+
+	0x25471B54:  7C0B2850  subf r0,r11,r5
+	   0: GETL       	R11, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471B58:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25471B5C:  7D48012E  stwx r10,r8,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x25471B60:  856A0008  lwzu r11,8(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x8, t12
+	  17: PUTL       	t12, R10
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x25471B64:  2C8B0000  cmpi cr1,r11,0
+	  21: GETL       	R11, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0x25471B68:  4086FFD4  bc 4,6,0x25471B3C
+	  25: Jc06o       	$0x25471B3C
+
+
+
+. 11 25471B54 24
+. 7C 0B 28 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+==== BB 12 (0x25471E28) approx BBs exec'd 0 ====
+
+	0x25471E28:  5579083C  rlwinm r25,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x25471E2C:  7F2C0E70  srawi r12,r25,1
+	   4: GETL       	R25, t2
+	   5: SARL       	$0x1, t2  (-wCa)
+	   6: PUTL       	t2, R12
+	   7: INCEIPL       	$4
+
+	0x25471E30:  7D8960F8  nor r9,r12,r12
+	   8: GETL       	R12, t4
+	   9: NOTL       	t4
+	  10: PUTL       	t4, R9
+	  11: INCEIPL       	$4
+
+	0x25471E34:  200C0031  subfic r0,r12,49
+	  12: GETL       	R12, t6
+	  13: MOVL       	$0x31, t8
+	  14: SBBL       	t6, t8  (-wCa)
+	  15: PUTL       	t8, R0
+	  16: INCEIPL       	$4
+
+	0x25471E38:  2B090002  cmpli cr6,r9,2
+	  17: GETL       	R9, t10
+	  18: MOVL       	$0x2, t14
+	  19: CMPUL       	t10, t14, t12  (-rSo)
+	  20: ICRFL       	t12, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x25471E3C:  40B9FD1C  bc 5,25,0x25471B58
+	  22: Jc25o       	$0x25471B58
+
+
+
+. 12 25471E28 24
+. 55 79 08 3C 7F 2C 0E 70 7D 89 60 F8 20 0C 00 31 2B 09 00 02 40 B9 FD 1C
+==== BB 13 (0x25471E40) approx BBs exec'd 0 ====
+
+	0x25471E40:  7F4B2050  subf r26,r11,r4
+	   0: GETL       	R11, t0
+	   1: GETL       	R4, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25471E44:  2B9A000B  cmpli cr7,r26,11
+	   5: GETL       	R26, t4
+	   6: MOVL       	$0xB, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x25471E48:  419D0010  bc 12,29,0x25471E58
+	  10: Js29o       	$0x25471E58
+
+
+
+. 13 25471E40 12
+. 7F 4B 20 50 2B 9A 00 0B 41 9D 00 10
+==== BB 14 (0x25471E4C) approx BBs exec'd 0 ====
+
+	0x25471E4C:  7C0B1850  subf r0,r11,r3
+	   0: GETL       	R11, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471E50:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25471E54:  4BFFFD08  b 0x25471B5C
+	   9: JMPo       	$0x25471B5C  ($4)
+
+
+
+. 14 25471E4C 12
+. 7C 0B 18 50 54 00 10 3A 4B FF FD 08
+==== BB 15 (0x25471B6C) approx BBs exec'd 0 ====
+
+	0x25471B6C:  81780000  lwz r11,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25471B70:  2F0B0000  cmpi cr6,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x25471B74:  419A00B0  bc 12,26,0x25471C24
+	   8: Js26o       	$0x25471C24
+
+
+
+. 15 25471B6C 12
+. 81 78 00 00 2F 0B 00 00 41 9A 00 B0
+==== BB 16 (0x25471B78) approx BBs exec'd 0 ====
+
+	0x25471B78:  81280010  lwz r9,16(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471B7C:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25471B80:  419E0010  bc 12,30,0x25471B90
+	   9: Js30o       	$0x25471B90
+
+
+
+. 16 25471B78 12
+. 81 28 00 10 2F 89 00 00 41 9E 00 10
+==== BB 17 (0x25471B84) approx BBs exec'd 0 ====
+
+	0x25471B84:  81490004  lwz r10,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25471B88:  7C6A5A14  add r3,r10,r11
+	   5: GETL       	R10, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25471B8C:  90690004  stw r3,4(r9)
+	  10: GETL       	R3, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471B90:  8128000C  lwz r9,12(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471B94:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25471B98:  41820010  bc 12,2,0x25471BA8
+	  24: Js02o       	$0x25471BA8
+
+
+
+. 17 25471B84 24
+. 81 49 00 04 7C 6A 5A 14 90 69 00 04 81 28 00 0C 2C 09 00 00 41 82 00 10
+==== BB 18 (0x25471B9C) approx BBs exec'd 0 ====
+
+	0x25471B9C:  80A90004  lwz r5,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25471BA0:  7C855A14  add r4,r5,r11
+	   5: GETL       	R5, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25471BA4:  90890004  stw r4,4(r9)
+	  10: GETL       	R4, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BA8:  81280014  lwz r9,20(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x14, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BAC:  2C890000  cmpi cr1,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x1, CR
+	  23: INCEIPL       	$4
+
+	0x25471BB0:  41860010  bc 12,6,0x25471BC0
+	  24: Js06o       	$0x25471BC0
+
+
+
+. 18 25471B9C 24
+. 80 A9 00 04 7C 85 5A 14 90 89 00 04 81 28 00 14 2C 89 00 00 41 86 00 10
+==== BB 19 (0x25471BB4) approx BBs exec'd 0 ====
+
+	0x25471BB4:  80E90004  lwz r7,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25471BB8:  7CC75A14  add r6,r7,r11
+	   5: GETL       	R7, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25471BBC:  90C90004  stw r6,4(r9)
+	  10: GETL       	R6, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BC0:  81280018  lwz r9,24(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x18, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BC4:  2F090000  cmpi cr6,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25471BC8:  419A0010  bc 12,26,0x25471BD8
+	  24: Js26o       	$0x25471BD8
+
+
+
+. 19 25471BB4 24
+. 80 E9 00 04 7C C7 5A 14 90 C9 00 04 81 28 00 18 2F 09 00 00 41 9A 00 10
+==== BB 20 (0x25471BCC) approx BBs exec'd 0 ====
+
+	0x25471BCC:  83A90004  lwz r29,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25471BD0:  7F9D5A14  add r28,r29,r11
+	   5: GETL       	R29, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25471BD4:  93890004  stw r28,4(r9)
+	  10: GETL       	R28, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BD8:  8128001C  lwz r9,28(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BDC:  2F890000  cmpi cr7,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0x25471BE0:  419E0010  bc 12,30,0x25471BF0
+	  24: Js30o       	$0x25471BF0
+
+
+
+. 20 25471BCC 24
+. 83 A9 00 04 7F 9D 5A 14 93 89 00 04 81 28 00 1C 2F 89 00 00 41 9E 00 10
+==== BB 21 (0x25471BE4) approx BBs exec'd 0 ====
+
+	0x25471BE4:  80090004  lwz r0,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471BE8:  7FE05A14  add r31,r0,r11
+	   5: GETL       	R0, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0x25471BEC:  93E90004  stw r31,4(r9)
+	  10: GETL       	R31, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BF0:  8128005C  lwz r9,92(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x5C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BF4:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25471BF8:  41820010  bc 12,2,0x25471C08
+	  24: Js02o       	$0x25471C08
+
+
+
+. 21 25471BE4 24
+. 80 09 00 04 7F E0 5A 14 93 E9 00 04 81 28 00 5C 2C 09 00 00 41 82 00 10
+==== BB 22 (0x25471BFC) approx BBs exec'd 0 ====
+
+	0x25471BFC:  83290004  lwz r25,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25471C00:  7D995A14  add r12,r25,r11
+	   5: GETL       	R25, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25471C04:  91890004  stw r12,4(r9)
+	  10: GETL       	R12, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471C08:  812800C4  lwz r9,196(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471C0C:  2C890000  cmpi cr1,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x1, CR
+	  23: INCEIPL       	$4
+
+	0x25471C10:  41860010  bc 12,6,0x25471C20
+	  24: Js06o       	$0x25471C20
+
+
+
+. 22 25471BFC 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 C4 2C 89 00 00 41 86 00 10
+==== BB 23 (0x25471C14) approx BBs exec'd 0 ====
+
+	0x25471C14:  83490004  lwz r26,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25471C18:  7D1A5A14  add r8,r26,r11
+	   5: GETL       	R26, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25471C1C:  91090004  stw r8,4(r9)
+	  10: GETL       	R8, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471C20:  80E10010  lwz r7,16(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x10, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x25471C24:  2F070000  cmpi cr6,r7,0
+	  20: GETL       	R7, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25471C28:  3960FFFF  li r11,-1
+	  24: MOVL       	$0xFFFFFFFF, t20
+	  25: PUTL       	t20, R11
+	  26: INCEIPL       	$4
+
+	0x25471C2C:  9161023C  stw r11,572(r1)
+	  27: GETL       	R11, t22
+	  28: GETL       	R1, t24
+	  29: ADDL       	$0x23C, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0x25471C30:  409A0010  bc 4,26,0x25471C40
+	  32: Jc26o       	$0x25471C40
+
+
+
+. 23 25471C14 32
+. 83 49 00 04 7D 1A 5A 14 91 09 00 04 80 E1 00 10 2F 07 00 00 39 60 FF FF 91 61 02 3C 40 9A 00 10
+==== BB 24 (0x25471C40) approx BBs exec'd 0 ====
+
+	0x25471C40:  7F03C378  or r3,r24,r24
+	   0: GETL       	R24, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25471C44:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25471C48:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25471C4C:  3B600000  li r27,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R27
+	  11: INCEIPL       	$4
+
+	0x25471C50:  4800EBE1  bl 0x25480830
+	  12: MOVL       	$0x25471C54, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25480830  ($4)
+
+
+
+. 24 25471C40 20
+. 7F 03 C3 78 38 80 00 00 38 A0 00 00 3B 60 00 00 48 00 EB E1
+==== BB 25 __elf_machine_runtime_setup(0x25480830) approx BBs exec'd 0 ====
+
+	0x25480830:  7CC802A6  mflr r6
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x25480834:  9421FFD0  stwu r1,-48(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFD0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x25480838:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x2548083C:  480167C5  bl 0x25497000
+	  12: MOVL       	$0x25480840, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 25 25480830 16
+. 7C C8 02 A6 94 21 FF D0 7D 80 00 26 48 01 67 C5
+==== BB 26 (0x25480840) approx BBs exec'd 0 ====
+
+	0x25480840:  93210014  stw r25,20(r1)
+	   0: GETL       	R25, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25480844:  7C992378  or r25,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0x25480848:  90C10034  stw r6,52(r1)
+	   8: GETL       	R6, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x34, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2548084C:  8003007C  lwz r0,124(r3)
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x7C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0x25480850:  93A10024  stw r29,36(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x24, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25480854:  7C7D1B78  or r29,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25480858:  2F800000  cmpi cr7,r0,0
+	  26: GETL       	R0, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0x2548085C:  93C10028  stw r30,40(r1)
+	  30: GETL       	R30, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x28, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25480860:  92E1000C  stw r23,12(r1)
+	  35: GETL       	R23, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0xC, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25480864:  7FC802A6  mflr r30
+	  40: GETL       	LR, t32
+	  41: PUTL       	t32, R30
+	  42: INCEIPL       	$4
+
+	0x25480868:  93010010  stw r24,16(r1)
+	  43: GETL       	R24, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x10, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x2548086C:  93410018  stw r26,24(r1)
+	  48: GETL       	R26, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x18, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0x25480870:  9361001C  stw r27,28(r1)
+	  53: GETL       	R27, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x1C, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x25480874:  93810020  stw r28,32(r1)
+	  58: GETL       	R28, t46
+	  59: GETL       	R1, t48
+	  60: ADDL       	$0x20, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x25480878:  93E1002C  stw r31,44(r1)
+	  63: GETL       	R31, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x2C, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x2548087C:  91810008  stw r12,8(r1)
+	  68: GETL       	R12, t54
+	  69: GETL       	R1, t56
+	  70: ADDL       	$0x8, t56
+	  71: STL       	t54, (t56)
+	  72: INCEIPL       	$4
+
+	0x25480880:  419E0294  bc 12,30,0x25480B14
+	  73: Js30o       	$0x25480B14
+
+
+
+. 26 25480840 68
+. 93 21 00 14 7C 99 23 78 90 C1 00 34 80 03 00 7C 93 A1 00 24 7C 7D 1B 78 2F 80 00 00 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 93 41 00 18 93 61 00 1C 93 81 00 20 93 E1 00 2C 91 81 00 08 41 9E 02 94
+==== BB 27 (0x25480884) approx BBs exec'd 0 ====
+
+	0x25480884:  82E30028  lwz r23,40(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x28, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25480888:  3D40AAAA  lis r10,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x2548088C:  6148AAAB  ori r8,r10,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0x25480890:  8083002C  lwz r4,44(r3)
+	  11: GETL       	R3, t8
+	  12: ADDL       	$0x2C, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0x25480894:  80F70004  lwz r7,4(r23)
+	  16: GETL       	R23, t12
+	  17: ADDL       	$0x4, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R7
+	  20: INCEIPL       	$4
+
+	0x25480898:  83E40004  lwz r31,4(r4)
+	  21: GETL       	R4, t16
+	  22: ADDL       	$0x4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x2548089C:  7D674016  mulhwu r11,r7,r8
+	  26: GETL       	R7, t20
+	  27: GETL       	R8, t22
+	  28: UMULHL       	t20, t22
+	  29: PUTL       	t22, R11
+	  30: INCEIPL       	$4
+
+	0x254808A0:  557BE8FE  rlwinm r27,r11,29,3,31
+	  31: GETL       	R11, t24
+	  32: SHRL       	$0x3, t24
+	  33: PUTL       	t24, R27
+	  34: INCEIPL       	$4
+
+	0x254808A4:  281B2000  cmpli cr0,r27,8192
+	  35: GETL       	R27, t26
+	  36: MOVL       	$0x2000, t30
+	  37: CMPUL       	t26, t30, t28  (-rSo)
+	  38: ICRFL       	t28, $0x0, CR
+	  39: INCEIPL       	$4
+
+	0x254808A8:  5763083C  rlwinm r3,r27,1,0,30
+	  40: GETL       	R27, t32
+	  41: SHLL       	$0x1, t32
+	  42: PUTL       	t32, R3
+	  43: INCEIPL       	$4
+
+	0x254808AC:  38030012  addi r0,r3,18
+	  44: GETL       	R3, t34
+	  45: ADDL       	$0x12, t34
+	  46: PUTL       	t34, R0
+	  47: INCEIPL       	$4
+
+	0x254808B0:  7D201A14  add r9,r0,r3
+	  48: GETL       	R0, t36
+	  49: GETL       	R3, t38
+	  50: ADDL       	t36, t38
+	  51: PUTL       	t38, R9
+	  52: INCEIPL       	$4
+
+	0x254808B4:  3AE9C000  addi r23,r9,-16384
+	  53: GETL       	R9, t40
+	  54: ADDL       	$0xFFFFC000, t40
+	  55: PUTL       	t40, R23
+	  56: INCEIPL       	$4
+
+	0x254808B8:  4081029C  bc 4,1,0x25480B54
+	  57: Jc01o       	$0x25480B54
+
+
+
+. 27 25480884 56
+. 82 E3 00 28 3D 40 AA AA 61 48 AA AB 80 83 00 2C 80 F7 00 04 83 E4 00 04 7D 67 40 16 55 7B E8 FE 28 1B 20 00 57 63 08 3C 38 03 00 12 7D 20 1A 14 3A E9 C0 00 40 81 02 9C
+==== BB 28 (0x25480B54) approx BBs exec'd 0 ====
+
+	0x25480B54:  7C170378  or r23,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R23
+	   2: INCEIPL       	$4
+
+	0x25480B58:  4BFFFD64  b 0x254808BC
+	   3: JMPo       	$0x254808BC  ($4)
+
+
+
+. 28 25480B54 8
+. 7C 17 03 78 4B FF FD 64
+==== BB 29 (0x254808BC) approx BBs exec'd 0 ====
+
+	0x254808BC:  56E8103A  rlwinm r8,r23,2,0,29
+	   0: GETL       	R23, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x254808C0:  2E190000  cmpi cr4,r25,0
+	   4: GETL       	R25, t2
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x254808C4:  7F48FA14  add r26,r8,r31
+	   8: GETL       	R8, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x254808C8:  3C607D69  lis r3,32105
+	  13: MOVL       	$0x7D690000, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x254808CC:  3CFA0001  addis r7,r26,1
+	  16: GETL       	R26, t12
+	  17: ADDL       	$0x10000, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x254808D0:  574B043E  rlwinm r11,r26,0,16,31
+	  20: GETL       	R26, t14
+	  21: ANDL       	$0xFFFF, t14
+	  22: PUTL       	t14, R11
+	  23: INCEIPL       	$4
+
+	0x254808D4:  38878000  addi r4,r7,-32768
+	  24: GETL       	R7, t16
+	  25: ADDL       	$0xFFFF8000, t16
+	  26: PUTL       	t16, R4
+	  27: INCEIPL       	$4
+
+	0x254808D8:  3CC04E80  lis r6,20096
+	  28: MOVL       	$0x4E800000, t18
+	  29: PUTL       	t18, R6
+	  30: INCEIPL       	$4
+
+	0x254808DC:  5489843E  rlwinm r9,r4,16,16,31
+	  31: GETL       	R4, t20
+	  32: SHRL       	$0x10, t20
+	  33: PUTL       	t20, R9
+	  34: INCEIPL       	$4
+
+	0x254808E0:  6560816B  oris r0,r11,0x816B
+	  35: GETL       	R11, t22
+	  36: ORL       	$0x816B0000, t22
+	  37: PUTL       	t22, R0
+	  38: INCEIPL       	$4
+
+	0x254808E4:  652C3D6B  oris r12,r9,0x3D6B
+	  39: GETL       	R9, t24
+	  40: ORL       	$0x3D6B0000, t24
+	  41: PUTL       	t24, R12
+	  42: INCEIPL       	$4
+
+	0x254808E8:  607C03A6  ori r28,r3,0x3A6
+	  43: MOVL       	$0x7D6903A6, t26
+	  44: PUTL       	t26, R28
+	  45: INCEIPL       	$4
+
+	0x254808EC:  60D80420  ori r24,r6,0x420
+	  46: MOVL       	$0x4E800420, t28
+	  47: PUTL       	t28, R24
+	  48: INCEIPL       	$4
+
+	0x254808F0:  919F0000  stw r12,0(r31)
+	  49: GETL       	R12, t30
+	  50: GETL       	R31, t32
+	  51: STL       	t30, (t32)
+	  52: INCEIPL       	$4
+
+	0x254808F4:  901F0004  stw r0,4(r31)
+	  53: GETL       	R0, t34
+	  54: GETL       	R31, t36
+	  55: ADDL       	$0x4, t36
+	  56: STL       	t34, (t36)
+	  57: INCEIPL       	$4
+
+	0x254808F8:  939F0008  stw r28,8(r31)
+	  58: GETL       	R28, t38
+	  59: GETL       	R31, t40
+	  60: ADDL       	$0x8, t40
+	  61: STL       	t38, (t40)
+	  62: INCEIPL       	$4
+
+	0x254808FC:  931F000C  stw r24,12(r31)
+	  63: GETL       	R24, t42
+	  64: GETL       	R31, t44
+	  65: ADDL       	$0xC, t44
+	  66: STL       	t42, (t44)
+	  67: INCEIPL       	$4
+
+	0x25480900:  41920178  bc 12,18,0x25480A78
+	  68: Js18o       	$0x25480A78
+
+
+
+. 29 254808BC 72
+. 56 E8 10 3A 2E 19 00 00 7F 48 FA 14 3C 60 7D 69 3C FA 00 01 57 4B 04 3E 38 87 80 00 3C C0 4E 80 54 89 84 3E 65 60 81 6B 65 2C 3D 6B 60 7C 03 A6 60 D8 04 20 91 9F 00 00 90 1F 00 04 93 9F 00 08 93 1F 00 0C 41 92 01 78
+==== BB 30 (0x25480A78) approx BBs exec'd 0 ====
+
+	0x25480A78:  817E04F0  lwz r11,1264(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25480A7C:  3159FFFF  addic r10,r25,-1
+	   5: GETL       	R25, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x25480A80:  7CAAC910  subfe r5,r10,r25
+	   9: GETL       	R10, t6
+	  10: GETL       	R25, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x25480A84:  314BFFFF  addic r10,r11,-1
+	  14: GETL       	R11, t10
+	  15: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  16: PUTL       	t10, R10
+	  17: INCEIPL       	$4
+
+	0x25480A88:  7C8A5910  subfe r4,r10,r11
+	  18: GETL       	R10, t12
+	  19: GETL       	R11, t14
+	  20: SBBL       	t12, t14  (-rCa-wCa)
+	  21: PUTL       	t14, R4
+	  22: INCEIPL       	$4
+
+	0x25480A8C:  7CAA2039  and. r10,r5,r4
+	  23: GETL       	R5, t16
+	  24: GETL       	R4, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R10
+	  27: CMP0L       	t18, t20  (-rSo)
+	  28: ICRFL       	t20, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0x25480A90:  39400004  li r10,4
+	  30: MOVL       	$0x4, t22
+	  31: PUTL       	t22, R10
+	  32: INCEIPL       	$4
+
+	0x25480A94:  41820010  bc 12,2,0x25480AA4
+	  33: Js02o       	$0x25480AA4
+
+
+
+. 30 25480A78 32
+. 81 7E 04 F0 31 59 FF FF 7C AA C9 10 31 4B FF FF 7C 8A 59 10 7C AA 20 39 39 40 00 04 41 82 00 10
+==== BB 31 (0x25480AA4) approx BBs exec'd 0 ====
+
+	0x25480AA4:  419200B8  bc 12,18,0x25480B5C
+	   0: Js18o       	$0x25480B5C
+
+
+
+. 31 25480AA4 4
+. 41 92 00 B8
+==== BB 32 (0x25480B5C) approx BBs exec'd 0 ====
+
+	0x25480B5C:  39600006  li r11,6
+	   0: MOVL       	$0x6, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x25480B60:  4BFFFF4C  b 0x25480AAC
+	   3: JMPo       	$0x25480AAC  ($4)
+
+
+
+. 32 25480B5C 8
+. 39 60 00 06 4B FF FF 4C
+==== BB 33 (0x25480AAC) approx BBs exec'd 0 ====
+
+	0x25480AAC:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25480AB0:  48000014  b 0x25480AC4
+	   3: JMPo       	$0x25480AC4  ($4)
+
+
+
+. 33 25480AAC 8
+. 39 20 00 00 48 00 00 14
+==== BB 34 (0x25480AC4) approx BBs exec'd 0 ====
+
+	0x25480AC4:  7E095840  cmpl cr4,r9,r11
+	   0: GETL       	R9, t0
+	   1: GETL       	R11, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x25480AC8:  4190FFEC  bc 12,16,0x25480AB4
+	   5: Js16o       	$0x25480AB4
+
+
+
+. 34 25480AC4 8
+. 7E 09 58 40 41 90 FF EC
+==== BB 35 (0x25480AB4) approx BBs exec'd 0 ====
+
+	0x25480AB4:  553A103A  rlwinm r26,r9,2,0,29
+	   0: GETL       	R9, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25480AB8:  7EFAFA14  add r23,r26,r31
+	   4: GETL       	R26, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R23
+	   8: INCEIPL       	$4
+
+	0x25480ABC:  7C00B86C  dcbst r0,r23
+	   9: INCEIPL       	$4
+
+	0x25480AC0:  7D295214  add r9,r9,r10
+	  10: GETL       	R9, t6
+	  11: GETL       	R10, t8
+	  12: ADDL       	t6, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x25480AC4:  7E095840  cmpl cr4,r9,r11
+	  15: GETL       	R9, t10
+	  16: GETL       	R11, t12
+	  17: CMPUL       	t10, t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x4, CR
+	  19: INCEIPL       	$4
+
+	0x25480AC8:  4190FFEC  bc 12,16,0x25480AB4
+	  20: Js16o       	$0x25480AB4
+
+
+
+. 35 25480AB4 24
+. 55 3A 10 3A 7E FA FA 14 7C 00 B8 6C 7D 29 52 14 7E 09 58 40 41 90 FF EC
+==== BB 36 (0x25480ACC) approx BBs exec'd 0 ====
+
+	0x25480ACC:  5568103A  rlwinm r8,r11,2,0,29
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x25480AD0:  7CE8FA14  add r7,r8,r31
+	   4: GETL       	R8, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x25480AD4:  3927FFFC  addi r9,r7,-4
+	   9: GETL       	R7, t6
+	  10: ADDL       	$0xFFFFFFFC, t6
+	  11: PUTL       	t6, R9
+	  12: INCEIPL       	$4
+
+	0x25480AD8:  7C00486C  dcbst r0,r9
+	  13: INCEIPL       	$4
+
+	0x25480ADC:  7C0004AC  sync
+	  14: INCEIPL       	$4
+
+	0x25480AE0:  39200000  li r9,0
+	  15: MOVL       	$0x0, t8
+	  16: PUTL       	t8, R9
+	  17: INCEIPL       	$4
+
+	0x25480AE4:  48000014  b 0x25480AF8
+	  18: JMPo       	$0x25480AF8  ($4)
+
+
+
+. 36 25480ACC 28
+. 55 68 10 3A 7C E8 FA 14 39 27 FF FC 7C 00 48 6C 7C 00 04 AC 39 20 00 00 48 00 00 14
+==== BB 37 (0x25480AF8) approx BBs exec'd 0 ====
+
+	0x25480AF8:  7F895840  cmpl cr7,r9,r11
+	   0: GETL       	R9, t0
+	   1: GETL       	R11, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25480AFC:  419CFFEC  bc 12,28,0x25480AE8
+	   5: Js28o       	$0x25480AE8
+
+
+
+. 37 25480AF8 8
+. 7F 89 58 40 41 9C FF EC
+==== BB 38 (0x25480AE8) approx BBs exec'd 0 ====
+
+	0x25480AE8:  553B103A  rlwinm r27,r9,2,0,29
+	   0: GETL       	R9, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25480AEC:  7CDBFA14  add r6,r27,r31
+	   4: GETL       	R27, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25480AF0:  7C0037AC  icbi r0,r6
+	   9: GETL       	R6, t6
+	  10: CALLM_So       	
+	  11: PUSHL       	t6
+	  12: CALLMo       	$0x68
+	  13: CALLM_Eo       	
+	  14: INCEIPL       	$4
+
+	0x25480AF4:  7D295214  add r9,r9,r10
+	  15: GETL       	R9, t8
+	  16: GETL       	R10, t10
+	  17: ADDL       	t8, t10
+	  18: PUTL       	t10, R9
+	  19: INCEIPL       	$4
+
+	0x25480AF8:  7F895840  cmpl cr7,r9,r11
+	  20: GETL       	R9, t12
+	  21: GETL       	R11, t14
+	  22: CMPUL       	t12, t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25480AFC:  419CFFEC  bc 12,28,0x25480AE8
+	  25: Js28o       	$0x25480AE8
+
+
+
+. 38 25480AE8 24
+. 55 3B 10 3A 7C DB FA 14 7C 00 37 AC 7D 29 52 14 7F 89 58 40 41 9C FF EC
+==== BB 39 (0x25480B00) approx BBs exec'd 0 ====
+
+	0x25480B00:  7C68FA14  add r3,r8,r31
+	   0: GETL       	R8, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25480B04:  3BE3FFFC  addi r31,r3,-4
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0xFFFFFFFC, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0x25480B08:  7C00FFAC  icbi r0,r31
+	   9: GETL       	R31, t6
+	  10: CALLM_So       	
+	  11: PUSHL       	t6
+	  12: CALLMo       	$0x68
+	  13: CALLM_Eo       	
+	  14: INCEIPL       	$4
+
+	0x25480B0C:  7C0004AC  sync
+	  15: INCEIPL       	$4
+
+	0x25480B10:  4C00012C  	  16: INCEIPL       	$4
+
+	0x25480B14:  7F23CB78  or r3,r25,r25
+	  17: GETL       	R25, t8
+	  18: PUTL       	t8, R3
+	  19: INCEIPL       	$4
+
+	0x25480B18:  81010008  lwz r8,8(r1)
+	  20: GETL       	R1, t10
+	  21: ADDL       	$0x8, t10
+	  22: LDL       	(t10), t12
+	  23: PUTL       	t12, R8
+	  24: INCEIPL       	$4
+
+	0x25480B1C:  83210034  lwz r25,52(r1)
+	  25: GETL       	R1, t14
+	  26: ADDL       	$0x34, t14
+	  27: LDL       	(t14), t16
+	  28: PUTL       	t16, R25
+	  29: INCEIPL       	$4
+
+	0x25480B20:  82E1000C  lwz r23,12(r1)
+	  30: GETL       	R1, t18
+	  31: ADDL       	$0xC, t18
+	  32: LDL       	(t18), t20
+	  33: PUTL       	t20, R23
+	  34: INCEIPL       	$4
+
+	0x25480B24:  7D008120  mtcrf 0x8,r8
+	  35: GETL       	R8, t22
+	  36: ICRFL       	t22, $0x4, CR
+	  37: INCEIPL       	$4
+
+	0x25480B28:  7F2803A6  mtlr r25
+	  38: GETL       	R25, t24
+	  39: PUTL       	t24, LR
+	  40: INCEIPL       	$4
+
+	0x25480B2C:  83010010  lwz r24,16(r1)
+	  41: GETL       	R1, t26
+	  42: ADDL       	$0x10, t26
+	  43: LDL       	(t26), t28
+	  44: PUTL       	t28, R24
+	  45: INCEIPL       	$4
+
+	0x25480B30:  83210014  lwz r25,20(r1)
+	  46: GETL       	R1, t30
+	  47: ADDL       	$0x14, t30
+	  48: LDL       	(t30), t32
+	  49: PUTL       	t32, R25
+	  50: INCEIPL       	$4
+
+	0x25480B34:  83410018  lwz r26,24(r1)
+	  51: GETL       	R1, t34
+	  52: ADDL       	$0x18, t34
+	  53: LDL       	(t34), t36
+	  54: PUTL       	t36, R26
+	  55: INCEIPL       	$4
+
+	0x25480B38:  8361001C  lwz r27,28(r1)
+	  56: GETL       	R1, t38
+	  57: ADDL       	$0x1C, t38
+	  58: LDL       	(t38), t40
+	  59: PUTL       	t40, R27
+	  60: INCEIPL       	$4
+
+	0x25480B3C:  83810020  lwz r28,32(r1)
+	  61: GETL       	R1, t42
+	  62: ADDL       	$0x20, t42
+	  63: LDL       	(t42), t44
+	  64: PUTL       	t44, R28
+	  65: INCEIPL       	$4
+
+	0x25480B40:  83A10024  lwz r29,36(r1)
+	  66: GETL       	R1, t46
+	  67: ADDL       	$0x24, t46
+	  68: LDL       	(t46), t48
+	  69: PUTL       	t48, R29
+	  70: INCEIPL       	$4
+
+	0x25480B44:  83C10028  lwz r30,40(r1)
+	  71: GETL       	R1, t50
+	  72: ADDL       	$0x28, t50
+	  73: LDL       	(t50), t52
+	  74: PUTL       	t52, R30
+	  75: INCEIPL       	$4
+
+	0x25480B48:  83E1002C  lwz r31,44(r1)
+	  76: GETL       	R1, t54
+	  77: ADDL       	$0x2C, t54
+	  78: LDL       	(t54), t56
+	  79: PUTL       	t56, R31
+	  80: INCEIPL       	$4
+
+	0x25480B4C:  38210030  addi r1,r1,48
+	  81: GETL       	R1, t58
+	  82: ADDL       	$0x30, t58
+	  83: PUTL       	t58, R1
+	  84: INCEIPL       	$4
+
+	0x25480B50:  4E800020  blr
+	  85: GETL       	LR, t60
+	  86: JMPo-r       	t60  ($4)
+
+
+
+. 39 25480B00 84
+. 7C 68 FA 14 3B E3 FF FC 7C 00 FF AC 7C 00 04 AC 4C 00 01 2C 7F 23 CB 78 81 01 00 08 83 21 00 34 82 E1 00 0C 7D 00 81 20 7F 28 03 A6 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 40 (0x25471C54) approx BBs exec'd 0 ====
+
+	0x25471C54:  38E00000  li r7,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x25471C58:  8161004C  lwz r11,76(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x4C, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25471C5C:  39400001  li r10,1
+	   8: MOVL       	$0x1, t6
+	   9: PUTL       	t6, R10
+	  10: INCEIPL       	$4
+
+	0x25471C60:  91410264  stw r10,612(r1)
+	  11: GETL       	R10, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x264, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25471C64:  2C0B0000  cmpi cr0,r11,0
+	  16: GETL       	R11, t12
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x25471C68:  93610260  stw r27,608(r1)
+	  20: GETL       	R27, t16
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x260, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25471C6C:  93610270  stw r27,624(r1)
+	  25: GETL       	R27, t20
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x270, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25471C70:  93610258  stw r27,600(r1)
+	  30: GETL       	R27, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x258, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25471C74:  9361026C  stw r27,620(r1)
+	  35: GETL       	R27, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x26C, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25471C78:  90E10254  stw r7,596(r1)
+	  40: GETL       	R7, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x254, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0x25471C7C:  41820018  bc 12,2,0x25471C94
+	  45: Js02o       	$0x25471C94
+
+
+
+. 40 25471C54 44
+. 38 E0 00 00 81 61 00 4C 39 40 00 01 91 41 02 64 2C 0B 00 00 93 61 02 60 93 61 02 70 93 61 02 58 93 61 02 6C 90 E1 02 54 41 82 00 18
+==== BB 41 (0x25471C80) approx BBs exec'd 0 ====
+
+	0x25471C80:  80AB0004  lwz r5,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25471C84:  80810050  lwz r4,80(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x50, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25471C88:  90A10250  stw r5,592(r1)
+	  10: GETL       	R5, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x250, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471C8C:  80E40004  lwz r7,4(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x25471C90:  90E10254  stw r7,596(r1)
+	  20: GETL       	R7, t16
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x254, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25471C94:  2C830000  cmpi cr1,r3,0
+	  25: GETL       	R3, t20
+	  26: CMP0L       	t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x1, CR
+	  28: INCEIPL       	$4
+
+	0x25471C98:  41860010  bc 12,6,0x25471CA8
+	  29: Js06o       	$0x25471CA8
+
+
+
+. 41 25471C80 28
+. 80 AB 00 04 80 81 00 50 90 A1 02 50 80 E4 00 04 90 E1 02 54 2C 83 00 00 41 86 00 10
+==== BB 42 (0x25471CA8) approx BBs exec'd 0 ====
+
+	0x25471CA8:  3B610008  addi r27,r1,8
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25471CAC:  3B200002  li r25,2
+	   4: MOVL       	$0x2, t2
+	   5: PUTL       	t2, R25
+	   6: INCEIPL       	$4
+
+	0x25471CB0:  814100D0  lwz r10,208(r1)
+	   7: GETL       	R1, t4
+	   8: ADDL       	$0xD0, t4
+	   9: LDL       	(t4), t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0x25471CB4:  81210048  lwz r9,72(r1)
+	  12: GETL       	R1, t8
+	  13: ADDL       	$0x48, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R9
+	  16: INCEIPL       	$4
+
+	0x25471CB8:  2F8A0000  cmpi cr7,r10,0
+	  17: GETL       	R10, t12
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x25471CBC:  811B0248  lwz r8,584(r27)
+	  21: GETL       	R27, t16
+	  22: ADDL       	$0x248, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R8
+	  25: INCEIPL       	$4
+
+	0x25471CC0:  817B024C  lwz r11,588(r27)
+	  26: GETL       	R27, t20
+	  27: ADDL       	$0x24C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R11
+	  30: INCEIPL       	$4
+
+	0x25471CC4:  83490004  lwz r26,4(r9)
+	  31: GETL       	R9, t24
+	  32: ADDL       	$0x4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0x25471CC8:  39200000  li r9,0
+	  36: MOVL       	$0x0, t28
+	  37: PUTL       	t28, R9
+	  38: INCEIPL       	$4
+
+	0x25471CCC:  7F885A14  add r28,r8,r11
+	  39: GETL       	R8, t30
+	  40: GETL       	R11, t32
+	  41: ADDL       	t30, t32
+	  42: PUTL       	t32, R28
+	  43: INCEIPL       	$4
+
+	0x25471CD0:  83A10010  lwz r29,16(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x10, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R29
+	  48: INCEIPL       	$4
+
+	0x25471CD4:  419E0008  bc 12,30,0x25471CDC
+	  49: Js30o       	$0x25471CDC
+
+
+
+. 42 25471CA8 48
+. 3B 61 00 08 3B 20 00 02 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+==== BB 43 (0x25471CD8) approx BBs exec'd 0 ====
+
+	0x25471CD8:  812A0004  lwz r9,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471CDC:  3CA0AAAA  lis r5,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25471CE0:  60A4AAAB  ori r4,r5,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x25471CE4:  7D4B2016  mulhwu r10,r11,r4
+	  11: GETL       	R11, t8
+	  12: GETL       	R4, t10
+	  13: UMULHL       	t8, t10
+	  14: PUTL       	t10, R10
+	  15: INCEIPL       	$4
+
+	0x25471CE8:  7D0B4378  or r11,r8,r8
+	  16: GETL       	R8, t12
+	  17: PUTL       	t12, R11
+	  18: INCEIPL       	$4
+
+	0x25471CEC:  5540E8FE  rlwinm r0,r10,29,3,31
+	  19: GETL       	R10, t14
+	  20: SHRL       	$0x3, t14
+	  21: PUTL       	t14, R0
+	  22: INCEIPL       	$4
+
+	0x25471CF0:  7C004840  cmpl cr0,r0,r9
+	  23: GETL       	R0, t16
+	  24: GETL       	R9, t18
+	  25: CMPUL       	t16, t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x0, CR
+	  27: INCEIPL       	$4
+
+	0x25471CF4:  40810008  bc 4,1,0x25471CFC
+	  28: Jc01o       	$0x25471CFC
+
+
+
+. 43 25471CD8 32
+. 81 2A 00 04 3C A0 AA AA 60 A4 AA AB 7D 4B 20 16 7D 0B 43 78 55 40 E8 FE 7C 00 48 40 40 81 00 08
+==== BB 44 (0x25471CF8) approx BBs exec'd 0 ====
+
+	0x25471CF8:  7D204B78  or r0,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25471CFC:  1C60000C  mulli r3,r0,12
+	   3: GETL       	R0, t2
+	   4: MULL       	$0xC, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x25471D00:  7FEB1A14  add r31,r11,r3
+	   7: GETL       	R11, t4
+	   8: GETL       	R3, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R31
+	  11: INCEIPL       	$4
+
+	0x25471D04:  7F8BF840  cmpl cr7,r11,r31
+	  12: GETL       	R11, t8
+	  13: GETL       	R31, t10
+	  14: CMPUL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x25471D08:  409C0020  bc 4,28,0x25471D28
+	  17: Jc28o       	$0x25471D28
+
+
+
+. 44 25471CF8 20
+. 7D 20 4B 78 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+==== BB 45 (0x25471D0C) approx BBs exec'd 0 ====
+
+	0x25471D0C:  818B0008  lwz r12,8(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x25471D10:  80CB0000  lwz r6,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x25471D14:  396B000C  addi r11,r11,12
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0xC, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x25471D18:  7CECEA14  add r7,r12,r29
+	  13: GETL       	R12, t10
+	  14: GETL       	R29, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0x25471D1C:  7F8BF840  cmpl cr7,r11,r31
+	  18: GETL       	R11, t14
+	  19: GETL       	R31, t16
+	  20: CMPUL       	t14, t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x25471D20:  7CE6E92E  stwx r7,r6,r29
+	  23: GETL       	R29, t20
+	  24: GETL       	R6, t22
+	  25: ADDL       	t22, t20
+	  26: GETL       	R7, t24
+	  27: STL       	t24, (t20)
+	  28: INCEIPL       	$4
+
+	0x25471D24:  419CFFE8  bc 12,28,0x25471D0C
+	  29: Js28o       	$0x25471D0C
+
+
+
+. 45 25471D0C 28
+. 81 8B 00 08 80 CB 00 00 39 6B 00 0C 7C EC EA 14 7F 8B F8 40 7C E6 E9 2E 41 9C FF E8
+==== BB 46 (0x25471D28) approx BBs exec'd 0 ====
+
+	0x25471D28:  7C9FE040  cmpl cr1,r31,r28
+	   0: GETL       	R31, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25471D2C:  4084008C  bc 4,4,0x25471DB8
+	   5: Jc04o       	$0x25471DB8
+
+
+
+. 46 25471D28 8
+. 7C 9F E0 40 40 84 00 8C
+==== BB 47 (0x25471D30) approx BBs exec'd 0 ====
+
+	0x25471D30:  811F0004  lwz r8,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25471D34:  817F0000  lwz r11,0(r31)
+	   5: GETL       	R31, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x25471D38:  550A063E  rlwinm r10,r8,0,24,31
+	   9: GETL       	R8, t8
+	  10: ANDL       	$0xFF, t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25471D3C:  5500E136  rlwinm r0,r8,28,4,27
+	  13: GETL       	R8, t10
+	  14: ROLL       	$0x1C, t10
+	  15: ANDL       	$0xFFFFFF0, t10
+	  16: PUTL       	t10, R0
+	  17: INCEIPL       	$4
+
+	0x25471D40:  2F0A0016  cmpi cr6,r10,22
+	  18: GETL       	R10, t12
+	  19: MOVL       	$0x16, t16
+	  20: CMPL       	t12, t16, t14  (-rSo)
+	  21: ICRFL       	t14, $0x6, CR
+	  22: INCEIPL       	$4
+
+	0x25471D44:  7CC0D214  add r6,r0,r26
+	  23: GETL       	R0, t18
+	  24: GETL       	R26, t20
+	  25: ADDL       	t18, t20
+	  26: PUTL       	t20, R6
+	  27: INCEIPL       	$4
+
+	0x25471D48:  7D0BEA14  add r8,r11,r29
+	  28: GETL       	R11, t22
+	  29: GETL       	R29, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R8
+	  32: INCEIPL       	$4
+
+	0x25471D4C:  419A00C4  bc 12,26,0x25471E10
+	  33: Js26o       	$0x25471E10
+
+
+
+. 47 25471D30 32
+. 81 1F 00 04 81 7F 00 00 55 0A 06 3E 55 00 E1 36 2F 0A 00 16 7C C0 D2 14 7D 0B EA 14 41 9A 00 C4
+==== BB 48 (0x25471D50) approx BBs exec'd 0 ====
+
+	0x25471D50:  2F8A0000  cmpi cr7,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25471D54:  419E0058  bc 12,30,0x25471DAC
+	   4: Js30o       	$0x25471DAC
+
+
+
+. 48 25471D50 8
+. 2F 8A 00 00 41 9E 00 58
+==== BB 49 (0x25471D58) approx BBs exec'd 0 ====
+
+	0x25471D58:  8966000C  lbz r11,12(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25471D5C:  5569E13F  rlwinm. r9,r11,28,4,31
+	   5: GETL       	R11, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25471D60:  4182014C  bc 12,2,0x25471EAC
+	  11: Js02o       	$0x25471EAC
+
+
+
+. 49 25471D58 12
+. 89 66 00 0C 55 69 E1 3F 41 82 01 4C
+==== BB 50 (0x25471D64) approx BBs exec'd 0 ====
+
+	0x25471D64:  A006000E  lhz r0,14(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0xE, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471D68:  2F000000  cmpi cr6,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25471D6C:  39200000  li r9,0
+	   9: MOVL       	$0x0, t8
+	  10: PUTL       	t8, R9
+	  11: INCEIPL       	$4
+
+	0x25471D70:  419A0008  bc 12,26,0x25471D78
+	  12: Js26o       	$0x25471D78
+
+
+
+. 50 25471D64 16
+. A0 06 00 0E 2F 00 00 00 39 20 00 00 41 9A 00 08
+==== BB 51 (0x25471D74) approx BBs exec'd 0 ====
+
+	0x25471D74:  81210010  lwz r9,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471D78:  80A60004  lwz r5,4(r6)
+	   5: GETL       	R6, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0x25471D7C:  7D292A14  add r9,r9,r5
+	  10: GETL       	R9, t8
+	  11: GETL       	R5, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25471D80:  2F8A0001  cmpi cr7,r10,1
+	  15: GETL       	R10, t12
+	  16: MOVL       	$0x1, t16
+	  17: CMPL       	t12, t16, t14  (-rSo)
+	  18: ICRFL       	t14, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0x25471D84:  807F0008  lwz r3,8(r31)
+	  20: GETL       	R31, t18
+	  21: ADDL       	$0x8, t18
+	  22: LDL       	(t18), t20
+	  23: PUTL       	t20, R3
+	  24: INCEIPL       	$4
+
+	0x25471D88:  7D291A14  add r9,r9,r3
+	  25: GETL       	R9, t22
+	  26: GETL       	R3, t24
+	  27: ADDL       	t22, t24
+	  28: PUTL       	t24, R9
+	  29: INCEIPL       	$4
+
+	0x25471D8C:  419E0078  bc 12,30,0x25471E04
+	  30: Js30o       	$0x25471E04
+
+
+
+. 51 25471D74 28
+. 81 21 00 10 80 A6 00 04 7D 29 2A 14 2F 8A 00 01 80 7F 00 08 7D 29 1A 14 41 9E 00 78
+==== BB 52 (0x25471D90) approx BBs exec'd 0 ====
+
+	0x25471D90:  2C0A0014  cmpi cr0,r10,20
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x14, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25471D94:  41820070  bc 12,2,0x25471E04
+	   5: Js02o       	$0x25471E04
+
+
+
+. 52 25471D90 8
+. 2C 0A 00 14 41 82 00 70
+==== BB 53 (0x25471E04) approx BBs exec'd 0 ====
+
+	0x25471E04:  3BFF000C  addi r31,r31,12
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25471E08:  91280000  stw r9,0(r8)
+	   4: GETL       	R9, t2
+	   5: GETL       	R8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25471E0C:  4BFFFFA4  b 0x25471DB0
+	   8: JMPo       	$0x25471DB0  ($4)
+
+
+
+. 53 25471E04 12
+. 3B FF 00 0C 91 28 00 00 4B FF FF A4
+==== BB 54 (0x25471DB0) approx BBs exec'd 0 ====
+
+	0x25471DB0:  7C9FE040  cmpl cr1,r31,r28
+	   0: GETL       	R31, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25471DB4:  4184FF7C  bc 12,4,0x25471D30
+	   5: Js04o       	$0x25471D30
+
+
+
+. 54 25471DB0 8
+. 7C 9F E0 40 41 84 FF 7C
+==== BB 55 (0x25471D98) approx BBs exec'd 0 ====
+
+	0x25471D98:  7F03C378  or r3,r24,r24
+	   0: GETL       	R24, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25471D9C:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25471DA0:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25471DA4:  7CC73378  or r7,r6,r6
+	   9: GETL       	R6, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x25471DA8:  4800EF01  bl 0x25480CA8
+	  12: MOVL       	$0x25471DAC, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25480CA8  ($4)
+
+
+
+. 55 25471D98 20
+. 7F 03 C3 78 7F E4 FB 78 38 A0 00 00 7C C7 33 78 48 00 EF 01
+==== BB 56 __process_machine_rela(0x25480CA8) approx BBs exec'd 0 ====
+
+	0x25480CA8:  2B8A004D  cmpli cr7,r10,77
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x4D, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25480CAC:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25480CB0:  9421FFD0  stwu r1,-48(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFD0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25480CB4:  4801634D  bl 0x25497000
+	  14: MOVL       	$0x25480CB8, t12
+	  15: PUTL       	t12, LR
+	  16: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 56 25480CA8 16
+. 2B 8A 00 4D 7C 08 02 A6 94 21 FF D0 48 01 63 4D
+==== BB 57 (0x25480CB8) approx BBs exec'd 0 ====
+
+	0x25480CB8:  93410018  stw r26,24(r1)
+	   0: GETL       	R26, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25480CBC:  7D3A4B78  or r26,r9,r9
+	   5: GETL       	R9, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x25480CC0:  9361001C  stw r27,28(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25480CC4:  7CDB3378  or r27,r6,r6
+	  13: GETL       	R6, t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0x25480CC8:  93810020  stw r28,32(r1)
+	  16: GETL       	R28, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x20, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x25480CCC:  7C9C2378  or r28,r4,r4
+	  21: GETL       	R4, t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0x25480CD0:  93A10024  stw r29,36(r1)
+	  24: GETL       	R29, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x24, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25480CD4:  7CBD2B78  or r29,r5,r5
+	  29: GETL       	R5, t22
+	  30: PUTL       	t22, R29
+	  31: INCEIPL       	$4
+
+	0x25480CD8:  93C10028  stw r30,40(r1)
+	  32: GETL       	R30, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x28, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25480CDC:  7FC802A6  mflr r30
+	  37: GETL       	LR, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0x25480CE0:  93E1002C  stw r31,44(r1)
+	  40: GETL       	R31, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x2C, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x25480CE4:  7D5F5378  or r31,r10,r10
+	  45: GETL       	R10, t34
+	  46: PUTL       	t34, R31
+	  47: INCEIPL       	$4
+
+	0x25480CE8:  90010034  stw r0,52(r1)
+	  48: GETL       	R0, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x34, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25480CEC:  90610008  stw r3,8(r1)
+	  53: GETL       	R3, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x8, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25480CF0:  90E10010  stw r7,16(r1)
+	  58: GETL       	R7, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x10, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x25480CF4:  9101000C  stw r8,12(r1)
+	  63: GETL       	R8, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0xC, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25480CF8:  419D0140  bc 12,29,0x25480E38
+	  68: Js29o       	$0x25480E38
+
+
+
+. 57 25480CB8 68
+. 93 41 00 18 7D 3A 4B 78 93 61 00 1C 7C DB 33 78 93 81 00 20 7C 9C 23 78 93 A1 00 24 7C BD 2B 78 93 C1 00 28 7F C8 02 A6 93 E1 00 2C 7D 5F 53 78 90 01 00 34 90 61 00 08 90 E1 00 10 91 01 00 0C 41 9D 01 40
+==== BB 58 (0x25480CFC) approx BBs exec'd 0 ====
+
+	0x25480CFC:  817E0450  lwz r11,1104(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x450, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25480D00:  5544103A  rlwinm r4,r10,2,0,29
+	   5: GETL       	R10, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25480D04:  7C64582E  lwzx r3,r4,r11
+	   9: GETL       	R11, t6
+	  10: GETL       	R4, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x25480D08:  7D235A14  add r9,r3,r11
+	  15: GETL       	R3, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25480D0C:  7D2903A6  mtctr r9
+	  20: GETL       	R9, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x25480D10:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 58 25480CFC 24
+. 81 7E 04 50 55 44 10 3A 7C 64 58 2E 7D 23 5A 14 7D 29 03 A6 4E 80 04 20
+==== BB 59 (0x25480FD8) approx BBs exec'd 0 ====
+
+	0x25480FD8:  80A1000C  lwz r5,12(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25480FDC:  7FA5D050  subf r29,r5,r26
+	   5: GETL       	R5, t4
+	   6: GETL       	R26, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25480FE0:  57BF3032  rlwinm r31,r29,6,0,25
+	  10: GETL       	R29, t8
+	  11: SHLL       	$0x6, t8
+	  12: PUTL       	t8, R31
+	  13: INCEIPL       	$4
+
+	0x25480FE4:  7FE93670  srawi r9,r31,6
+	  14: GETL       	R31, t10
+	  15: SARL       	$0x6, t10  (-wCa)
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x25480FE8:  7F89E800  cmp cr7,r9,r29
+	  18: GETL       	R9, t12
+	  19: GETL       	R29, t14
+	  20: CMPL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x25480FEC:  419E0254  bc 12,30,0x25481240
+	  23: Js30o       	$0x25481240
+
+
+
+. 59 25480FD8 24
+. 80 A1 00 0C 7F A5 D0 50 57 BF 30 32 7F E9 36 70 7F 89 E8 00 41 9E 02 54
+==== BB 60 (0x25481240) approx BBs exec'd 0 ====
+
+	0x25481240:  553A01BA  rlwinm r26,r9,0,6,29
+	   0: GETL       	R9, t0
+	   1: ANDL       	$0x3FFFFFC, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25481244:  67404800  oris r0,r26,0x4800
+	   4: GETL       	R26, t2
+	   5: ORL       	$0x48000000, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x25481248:  90050000  stw r0,0(r5)
+	   8: GETL       	R0, t4
+	   9: GETL       	R5, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x2548124C:  4BFFFBB4  b 0x25480E00
+	  12: JMPo       	$0x25480E00  ($4)
+
+
+
+. 60 25481240 16
+. 55 3A 01 BA 67 40 48 00 90 05 00 00 4B FF FB B4
+==== BB 61 (0x25480E00) approx BBs exec'd 0 ====
+
+	0x25480E00:  7C00286C  dcbst r0,r5
+	   0: INCEIPL       	$4
+
+	0x25480E04:  7C0004AC  sync
+	   1: INCEIPL       	$4
+
+	0x25480E08:  8381000C  lwz r28,12(r1)
+	   2: GETL       	R1, t0
+	   3: ADDL       	$0xC, t0
+	   4: LDL       	(t0), t2
+	   5: PUTL       	t2, R28
+	   6: INCEIPL       	$4
+
+	0x25480E0C:  7C00E7AC  icbi r0,r28
+	   7: GETL       	R28, t4
+	   8: CALLM_So       	
+	   9: PUSHL       	t4
+	  10: CALLMo       	$0x68
+	  11: CALLM_Eo       	
+	  12: INCEIPL       	$4
+
+	0x25480E10:  80610034  lwz r3,52(r1)
+	  13: GETL       	R1, t6
+	  14: ADDL       	$0x34, t6
+	  15: LDL       	(t6), t8
+	  16: PUTL       	t8, R3
+	  17: INCEIPL       	$4
+
+	0x25480E14:  83410018  lwz r26,24(r1)
+	  18: GETL       	R1, t10
+	  19: ADDL       	$0x18, t10
+	  20: LDL       	(t10), t12
+	  21: PUTL       	t12, R26
+	  22: INCEIPL       	$4
+
+	0x25480E18:  8361001C  lwz r27,28(r1)
+	  23: GETL       	R1, t14
+	  24: ADDL       	$0x1C, t14
+	  25: LDL       	(t14), t16
+	  26: PUTL       	t16, R27
+	  27: INCEIPL       	$4
+
+	0x25480E1C:  7C6803A6  mtlr r3
+	  28: GETL       	R3, t18
+	  29: PUTL       	t18, LR
+	  30: INCEIPL       	$4
+
+	0x25480E20:  83810020  lwz r28,32(r1)
+	  31: GETL       	R1, t20
+	  32: ADDL       	$0x20, t20
+	  33: LDL       	(t20), t22
+	  34: PUTL       	t22, R28
+	  35: INCEIPL       	$4
+
+	0x25480E24:  83A10024  lwz r29,36(r1)
+	  36: GETL       	R1, t24
+	  37: ADDL       	$0x24, t24
+	  38: LDL       	(t24), t26
+	  39: PUTL       	t26, R29
+	  40: INCEIPL       	$4
+
+	0x25480E28:  83C10028  lwz r30,40(r1)
+	  41: GETL       	R1, t28
+	  42: ADDL       	$0x28, t28
+	  43: LDL       	(t28), t30
+	  44: PUTL       	t30, R30
+	  45: INCEIPL       	$4
+
+	0x25480E2C:  83E1002C  lwz r31,44(r1)
+	  46: GETL       	R1, t32
+	  47: ADDL       	$0x2C, t32
+	  48: LDL       	(t32), t34
+	  49: PUTL       	t34, R31
+	  50: INCEIPL       	$4
+
+	0x25480E30:  38210030  addi r1,r1,48
+	  51: GETL       	R1, t36
+	  52: ADDL       	$0x30, t36
+	  53: PUTL       	t36, R1
+	  54: INCEIPL       	$4
+
+	0x25480E34:  4E800020  blr
+	  55: GETL       	LR, t38
+	  56: JMPo-r       	t38  ($4)
+
+
+
+. 61 25480E00 56
+. 7C 00 28 6C 7C 00 04 AC 83 81 00 0C 7C 00 E7 AC 80 61 00 34 83 41 00 18 83 61 00 1C 7C 68 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 62 (0x25471DAC) approx BBs exec'd 0 ====
+
+	0x25471DAC:  3BFF000C  addi r31,r31,12
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25471DB0:  7C9FE040  cmpl cr1,r31,r28
+	   4: GETL       	R31, t2
+	   5: GETL       	R28, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25471DB4:  4184FF7C  bc 12,4,0x25471D30
+	   9: Js04o       	$0x25471D30
+
+
+
+. 62 25471DAC 12
+. 3B FF 00 0C 7C 9F E0 40 41 84 FF 7C
+==== BB 63 (0x25471DB8) approx BBs exec'd 0 ====
+
+	0x25471DB8:  3739FFFF  addic. r25,r25,-1
+	   0: GETL       	R25, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R25
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25471DBC:  3B7B000C  addi r27,r27,12
+	   6: GETL       	R27, t4
+	   7: ADDL       	$0xC, t4
+	   8: PUTL       	t4, R27
+	   9: INCEIPL       	$4
+
+	0x25471DC0:  4080FEF0  bc 4,0,0x25471CB0
+	  10: Jc00o       	$0x25471CB0
+
+
+
+. 63 25471DB8 12
+. 37 39 FF FF 3B 7B 00 0C 40 80 FE F0
+==== BB 64 (0x25471CB0) approx BBs exec'd 0 ====
+
+	0x25471CB0:  814100D0  lwz r10,208(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xD0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25471CB4:  81210048  lwz r9,72(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x48, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25471CB8:  2F8A0000  cmpi cr7,r10,0
+	  10: GETL       	R10, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25471CBC:  811B0248  lwz r8,584(r27)
+	  14: GETL       	R27, t12
+	  15: ADDL       	$0x248, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R8
+	  18: INCEIPL       	$4
+
+	0x25471CC0:  817B024C  lwz r11,588(r27)
+	  19: GETL       	R27, t16
+	  20: ADDL       	$0x24C, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R11
+	  23: INCEIPL       	$4
+
+	0x25471CC4:  83490004  lwz r26,4(r9)
+	  24: GETL       	R9, t20
+	  25: ADDL       	$0x4, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R26
+	  28: INCEIPL       	$4
+
+	0x25471CC8:  39200000  li r9,0
+	  29: MOVL       	$0x0, t24
+	  30: PUTL       	t24, R9
+	  31: INCEIPL       	$4
+
+	0x25471CCC:  7F885A14  add r28,r8,r11
+	  32: GETL       	R8, t26
+	  33: GETL       	R11, t28
+	  34: ADDL       	t26, t28
+	  35: PUTL       	t28, R28
+	  36: INCEIPL       	$4
+
+	0x25471CD0:  83A10010  lwz r29,16(r1)
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x10, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R29
+	  41: INCEIPL       	$4
+
+	0x25471CD4:  419E0008  bc 12,30,0x25471CDC
+	  42: Js30o       	$0x25471CDC
+
+
+
+. 64 25471CB0 40
+. 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+==== BB 65 (0x25471CFC) approx BBs exec'd 0 ====
+
+	0x25471CFC:  1C60000C  mulli r3,r0,12
+	   0: GETL       	R0, t0
+	   1: MULL       	$0xC, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x25471D00:  7FEB1A14  add r31,r11,r3
+	   4: GETL       	R11, t2
+	   5: GETL       	R3, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0x25471D04:  7F8BF840  cmpl cr7,r11,r31
+	   9: GETL       	R11, t6
+	  10: GETL       	R31, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25471D08:  409C0020  bc 4,28,0x25471D28
+	  14: Jc28o       	$0x25471D28
+
+
+
+. 65 25471CFC 16
+. 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+==== BB 66 (0x25471DC4) approx BBs exec'd 0 ====
+
+	0x25471DC4:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25471DC8:  7F04C378  or r4,r24,r24
+	   3: GETL       	R24, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25471DCC:  4BFFFBBD  bl 0x25471988
+	   6: MOVL       	$0x25471DD0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25471988  ($4)
+
+
+
+. 66 25471DC4 12
+. 7E E3 BB 78 7F 04 C3 78 4B FF FB BD
+==== BB 67 _dl_start_final(0x25471988) approx BBs exec'd 0 ====
+
+	0x25471988:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547198C:  7D2802A6  mflr r9
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x25471990:  48025671  bl 0x25497000
+	   9: MOVL       	$0x25471994, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 67 25471988 12
+. 94 21 FF E0 7D 28 02 A6 48 02 56 71
+==== BB 68 (0x25471994) approx BBs exec'd 0 ====
+
+	0x25471994:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25471998:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547199C:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254719A0:  91210024  stw r9,36(r1)
+	  13: GETL       	R9, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254719A4:  38A0012C  li r5,300
+	  18: MOVL       	$0x12C, t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0x254719A8:  80040000  lwz r0,0(r4)
+	  21: GETL       	R4, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0x254719AC:  83BE04C8  lwz r29,1224(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x4C8, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R29
+	  29: INCEIPL       	$4
+
+	0x254719B0:  81840008  lwz r12,8(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R12
+	  34: INCEIPL       	$4
+
+	0x254719B4:  38840020  addi r4,r4,32
+	  35: GETL       	R4, t28
+	  36: ADDL       	$0x20, t28
+	  37: PUTL       	t28, R4
+	  38: INCEIPL       	$4
+
+	0x254719B8:  901D01B8  stw r0,440(r29)
+	  39: GETL       	R0, t30
+	  40: GETL       	R29, t32
+	  41: ADDL       	$0x1B8, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x254719BC:  919D01C0  stw r12,448(r29)
+	  44: GETL       	R12, t34
+	  45: GETL       	R29, t36
+	  46: ADDL       	$0x1C0, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x254719C0:  9361000C  stw r27,12(r1)
+	  49: GETL       	R27, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0xC, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0x254719C4:  7C7B1B78  or r27,r3,r3
+	  54: GETL       	R3, t42
+	  55: PUTL       	t42, R27
+	  56: INCEIPL       	$4
+
+	0x254719C8:  93810010  stw r28,16(r1)
+	  57: GETL       	R28, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x10, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0x254719CC:  387D01D8  addi r3,r29,472
+	  62: GETL       	R29, t48
+	  63: ADDL       	$0x1D8, t48
+	  64: PUTL       	t48, R3
+	  65: INCEIPL       	$4
+
+	0x254719D0:  3B9D01B8  addi r28,r29,440
+	  66: GETL       	R29, t50
+	  67: ADDL       	$0x1B8, t50
+	  68: PUTL       	t50, R28
+	  69: INCEIPL       	$4
+
+	0x254719D4:  4801220D  bl 0x25483BE0
+	  70: MOVL       	$0x254719D8, t52
+	  71: PUTL       	t52, LR
+	  72: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 68 25471994 68
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 91 21 00 24 38 A0 01 2C 80 04 00 00 83 BE 04 C8 81 84 00 08 38 84 00 20 90 1D 01 B8 91 9D 01 C0 93 61 00 0C 7C 7B 1B 78 93 81 00 10 38 7D 01 D8 3B 9D 01 B8 48 01 22 0D
+==== BB 69 memcpy(0x25483BE0) approx BBs exec'd 0 ====
+
+	0x25483BE0:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25483BE4:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25483BE8:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFE0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25483BEC:  9361000C  stw r27,12(r1)
+	  14: GETL       	R27, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0xC, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25483BF0:  7C7B1B78  or r27,r3,r3
+	  19: GETL       	R3, t16
+	  20: PUTL       	t16, R27
+	  21: INCEIPL       	$4
+
+	0x25483BF4:  93A10014  stw r29,20(r1)
+	  22: GETL       	R29, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x14, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x25483BF8:  7C9D2378  or r29,r4,r4
+	  27: GETL       	R4, t22
+	  28: PUTL       	t22, R29
+	  29: INCEIPL       	$4
+
+	0x25483BFC:  93E1001C  stw r31,28(r1)
+	  30: GETL       	R31, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x1C, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25483C00:  7C7F1B78  or r31,r3,r3
+	  35: GETL       	R3, t28
+	  36: PUTL       	t28, R31
+	  37: INCEIPL       	$4
+
+	0x25483C04:  93810010  stw r28,16(r1)
+	  38: GETL       	R28, t30
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x10, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0x25483C08:  93C10018  stw r30,24(r1)
+	  43: GETL       	R30, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x18, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25483C0C:  90010024  stw r0,36(r1)
+	  48: GETL       	R0, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x24, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0x25483C10:  409D0058  bc 4,29,0x25483C68
+	  53: Jc29o       	$0x25483C68
+
+
+
+. 69 25483BE0 52
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 7B 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C 7F 1B 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+==== BB 70 (0x25483C14) approx BBs exec'd 0 ====
+
+	0x25483C14:  7C8300D0  neg r4,r3
+	   0: GETL       	R3, t0
+	   1: NEGL       	t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25483C18:  548307BE  rlwinm r3,r4,0,30,31
+	   4: GETL       	R4, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25483C1C:  7C691B79  or. r9,r3,r3
+	   8: GETL       	R3, t4
+	   9: PUTL       	t4, R9
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25483C20:  7F832850  subf r28,r3,r5
+	  13: GETL       	R3, t8
+	  14: GETL       	R5, t10
+	  15: SUBL       	t8, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0x25483C24:  4182001C  bc 12,2,0x25483C40
+	  18: Js02o       	$0x25483C40
+
+
+
+. 70 25483C14 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+==== BB 71 (0x25483C40) approx BBs exec'd 0 ====
+
+	0x25483C40:  73A00003  andi. r0,r29,0x3
+	   0: GETL       	R29, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483C44:  4082006C  bc 4,2,0x25483CB0
+	   6: Jc02o       	$0x25483CB0
+
+
+
+. 71 25483C40 8
+. 73 A0 00 03 40 82 00 6C
+==== BB 72 (0x25483C48) approx BBs exec'd 0 ====
+
+	0x25483C48:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25483C4C:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25483C50:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x25483C54:  48000071  bl 0x25483CC4
+	  10: MOVL       	$0x25483C58, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483CC4  ($4)
+
+
+
+. 72 25483C48 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 00 71
+==== BB 73 _wordcopy_fwd_aligned(0x25483CC4) approx BBs exec'd 0 ====
+
+	0x25483CC4:  54A0077E  rlwinm r0,r5,0,29,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x7, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25483CC8:  7D8802A6  mflr r12
+	   4: GETL       	LR, t2
+	   5: PUTL       	t2, R12
+	   6: INCEIPL       	$4
+
+	0x25483CCC:  2B800007  cmpli cr7,r0,7
+	   7: GETL       	R0, t4
+	   8: MOVL       	$0x7, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x25483CD0:  48013331  bl 0x25497000
+	  12: MOVL       	$0x25483CD4, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 73 25483CC4 16
+. 54 A0 07 7E 7D 88 02 A6 2B 80 00 07 48 01 33 31
+==== BB 74 (0x25483CD4) approx BBs exec'd 0 ====
+
+	0x25483CD4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25483CD8:  93C10008  stw r30,8(r1)
+	   6: GETL       	R30, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x25483CDC:  7FC802A6  mflr r30
+	  11: GETL       	LR, t8
+	  12: PUTL       	t8, R30
+	  13: INCEIPL       	$4
+
+	0x25483CE0:  7D8803A6  mtlr r12
+	  14: GETL       	R12, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0x25483CE4:  419D0028  bc 12,29,0x25483D0C
+	  17: Js29o       	$0x25483D0C
+
+
+
+. 74 25483CD4 20
+. 94 21 FF F0 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 41 9D 00 28
+==== BB 75 (0x25483CE8) approx BBs exec'd 0 ====
+
+	0x25483CE8:  817E0490  lwz r11,1168(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x490, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25483CEC:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25483CF0:  7CCB002E  lwzx r6,r11,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0x25483CF4:  7D265A14  add r9,r6,r11
+	  15: GETL       	R6, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25483CF8:  7D2903A6  mtctr r9
+	  20: GETL       	R9, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x25483CFC:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 75 25483CE8 24
+. 81 7E 04 90 54 00 10 3A 7C CB 00 2E 7D 26 5A 14 7D 29 03 A6 4E 80 04 20
+==== BB 76 (0x25483D94) approx BBs exec'd 0 ====
+
+	0x25483D94:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25483D98:  3863FFE8  addi r3,r3,-24
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFE8, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483D9C:  3884FFEC  addi r4,r4,-20
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0xFFFFFFEC, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x25483DA0:  38A50005  addi r5,r5,5
+	  12: GETL       	R5, t8
+	  13: ADDL       	$0x5, t8
+	  14: PUTL       	t8, R5
+	  15: INCEIPL       	$4
+
+	0x25483DA4:  4BFFFF98  b 0x25483D3C
+	  16: JMPo       	$0x25483D3C  ($4)
+
+
+
+. 76 25483D94 20
+. 81 24 00 00 38 63 FF E8 38 84 FF EC 38 A5 00 05 4B FF FF 98
+==== BB 77 (0x25483D3C) approx BBs exec'd 0 ====
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x18, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  10: GETL       	R5, t8
+	  11: ADCL       	$0xFFFFFFF8, t8  (-wCa)
+	  12: PUTL       	t8, R5
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0x1C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  21: GETL       	R0, t16
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R4
+	  29: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x20, t22
+	  32: PUTL       	t22, R3
+	  33: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  34: Jc02o       	$0x25483D0C
+
+
+
+. 77 25483D3C 32
+. 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 78 (0x25483D0C) approx BBs exec'd 0 ====
+
+	0x25483D0C:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25483D10:  91230000  stw r9,0(r3)
+	   4: GETL       	R9, t4
+	   5: GETL       	R3, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0x25483D14:  81240004  lwz r9,4(r4)
+	   8: GETL       	R4, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R9
+	  12: INCEIPL       	$4
+
+	0x25483D18:  90030004  stw r0,4(r3)
+	  13: GETL       	R0, t12
+	  14: GETL       	R3, t14
+	  15: ADDL       	$0x4, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0x25483D1C:  80040008  lwz r0,8(r4)
+	  18: GETL       	R4, t16
+	  19: ADDL       	$0x8, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R0
+	  22: INCEIPL       	$4
+
+	0x25483D20:  91230008  stw r9,8(r3)
+	  23: GETL       	R9, t20
+	  24: GETL       	R3, t22
+	  25: ADDL       	$0x8, t22
+	  26: STL       	t20, (t22)
+	  27: INCEIPL       	$4
+
+	0x25483D24:  8124000C  lwz r9,12(r4)
+	  28: GETL       	R4, t24
+	  29: ADDL       	$0xC, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R9
+	  32: INCEIPL       	$4
+
+	0x25483D28:  9003000C  stw r0,12(r3)
+	  33: GETL       	R0, t28
+	  34: GETL       	R3, t30
+	  35: ADDL       	$0xC, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0x25483D2C:  80040010  lwz r0,16(r4)
+	  38: GETL       	R4, t32
+	  39: ADDL       	$0x10, t32
+	  40: LDL       	(t32), t34
+	  41: PUTL       	t34, R0
+	  42: INCEIPL       	$4
+
+	0x25483D30:  91230010  stw r9,16(r3)
+	  43: GETL       	R9, t36
+	  44: GETL       	R3, t38
+	  45: ADDL       	$0x10, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	  48: GETL       	R4, t40
+	  49: ADDL       	$0x14, t40
+	  50: LDL       	(t40), t42
+	  51: PUTL       	t42, R9
+	  52: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	  53: GETL       	R0, t44
+	  54: GETL       	R3, t46
+	  55: ADDL       	$0x14, t46
+	  56: STL       	t44, (t46)
+	  57: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  58: GETL       	R4, t48
+	  59: ADDL       	$0x18, t48
+	  60: LDL       	(t48), t50
+	  61: PUTL       	t50, R0
+	  62: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  63: GETL       	R9, t52
+	  64: GETL       	R3, t54
+	  65: ADDL       	$0x18, t54
+	  66: STL       	t52, (t54)
+	  67: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  68: GETL       	R5, t56
+	  69: ADCL       	$0xFFFFFFF8, t56  (-wCa)
+	  70: PUTL       	t56, R5
+	  71: CMP0L       	t56, t58  (-rSo)
+	  72: ICRFL       	t58, $0x0, CR
+	  73: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  74: GETL       	R4, t60
+	  75: ADDL       	$0x1C, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R9
+	  78: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  79: GETL       	R0, t64
+	  80: GETL       	R3, t66
+	  81: ADDL       	$0x1C, t66
+	  82: STL       	t64, (t66)
+	  83: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  84: GETL       	R4, t68
+	  85: ADDL       	$0x20, t68
+	  86: PUTL       	t68, R4
+	  87: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  88: GETL       	R3, t70
+	  89: ADDL       	$0x20, t70
+	  90: PUTL       	t70, R3
+	  91: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  92: Jc02o       	$0x25483D0C
+
+
+
+. 78 25483D0C 80
+. 80 04 00 00 91 23 00 00 81 24 00 04 90 03 00 04 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 79 (0x25483D5C) approx BBs exec'd 0 ====
+
+	0x25483D5C:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0x25483D60:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x25483D64:  91230000  stw r9,0(r3)
+	   9: GETL       	R9, t6
+	  10: GETL       	R3, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25483D68:  4E800020  blr
+	  13: GETL       	LR, t10
+	  14: JMPo-r       	t10  ($4)
+
+
+
+. 79 25483D5C 16
+. 83 C1 00 08 38 21 00 10 91 23 00 00 4E 80 00 20
+==== BB 80 (0x25483C58) approx BBs exec'd 0 ====
+
+	0x25483C58:  5786003A  rlwinm r6,r28,0,0,29
+	   0: GETL       	R28, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25483C5C:  578507BE  rlwinm r5,r28,0,30,31
+	   4: GETL       	R28, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x25483C60:  7FFF3214  add r31,r31,r6
+	   8: GETL       	R31, t4
+	   9: GETL       	R6, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R31
+	  12: INCEIPL       	$4
+
+	0x25483C64:  7FBD3214  add r29,r29,r6
+	  13: GETL       	R29, t8
+	  14: GETL       	R6, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x25483C68:  2C850000  cmpi cr1,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0x25483C6C:  4186001C  bc 12,6,0x25483C88
+	  22: Js06o       	$0x25483C88
+
+
+
+. 80 25483C58 24
+. 57 86 00 3A 57 85 07 BE 7F FF 32 14 7F BD 32 14 2C 85 00 00 41 86 00 1C
+==== BB 81 (0x25483C88) approx BBs exec'd 0 ====
+
+	0x25483C88:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25483C8C:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483C90:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x25483C94:  8361000C  lwz r27,12(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x25483C98:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x25483C9C:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25483CA0:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x25483CA4:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x25483CA8:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x25483CAC:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+
+. 81 25483C88 40
+. 81 01 00 24 7F 63 DB 78 83 81 00 10 83 61 00 0C 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 82 (0x254719D8) approx BBs exec'd 0 ====
+
+	0x254719D8:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254719DC:  48006B91  bl 0x2547856C
+	   3: MOVL       	$0x254719E0, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547856C  ($4)
+
+
+
+. 82 254719D8 8
+. 7F 83 E3 78 48 00 6B 91
+==== BB 83 _dl_setup_hash(0x2547856C) approx BBs exec'd 0 ====
+
+	0x2547856C:  81230030  lwz r9,48(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25478570:  9421FFF0  stwu r1,-16(r1)
+	   5: GETL       	R1, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xFFFFFFF0, t6
+	   8: PUTL       	t6, R1
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x25478574:  2F890000  cmpi cr7,r9,0
+	  11: GETL       	R9, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x25478578:  419E0024  bc 12,30,0x2547859C
+	  15: Js30o       	$0x2547859C
+
+
+
+. 83 2547856C 16
+. 81 23 00 30 94 21 FF F0 2F 89 00 00 41 9E 00 24
+==== BB 84 (0x2547857C) approx BBs exec'd 0 ====
+
+	0x2547857C:  81690004  lwz r11,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25478580:  814B0000  lwz r10,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x25478584:  388B0008  addi r4,r11,8
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0x8, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25478588:  90830170  stw r4,368(r3)
+	  13: GETL       	R4, t10
+	  14: GETL       	R3, t12
+	  15: ADDL       	$0x170, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547858C:  5549103A  rlwinm r9,r10,2,0,29
+	  18: GETL       	R10, t14
+	  19: SHLL       	$0x2, t14
+	  20: PUTL       	t14, R9
+	  21: INCEIPL       	$4
+
+	0x25478590:  9143016C  stw r10,364(r3)
+	  22: GETL       	R10, t16
+	  23: GETL       	R3, t18
+	  24: ADDL       	$0x16C, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25478594:  7C044A14  add r0,r4,r9
+	  27: GETL       	R4, t20
+	  28: GETL       	R9, t22
+	  29: ADDL       	t20, t22
+	  30: PUTL       	t22, R0
+	  31: INCEIPL       	$4
+
+	0x25478598:  90030174  stw r0,372(r3)
+	  32: GETL       	R0, t24
+	  33: GETL       	R3, t26
+	  34: ADDL       	$0x174, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x2547859C:  38210010  addi r1,r1,16
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x10, t28
+	  39: PUTL       	t28, R1
+	  40: INCEIPL       	$4
+
+	0x254785A0:  4E800020  blr
+	  41: GETL       	LR, t30
+	  42: JMPo-r       	t30  ($4)
+
+
+
+. 84 2547857C 40
+. 81 69 00 04 81 4B 00 00 38 8B 00 08 90 83 01 70 55 49 10 3A 91 43 01 6C 7C 04 4A 14 90 03 01 74 38 21 00 10 4E 80 00 20
+==== BB 85 (0x254719E0) approx BBs exec'd 0 ====
+
+	0x254719E0:  80FE04FC  lwz r7,1276(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254719E4:  80DE0500  lwz r6,1280(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x500, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x254719E8:  3940FFFF  li r10,-1
+	  10: MOVL       	$0xFFFFFFFF, t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x254719EC:  80BE04A4  lwz r5,1188(r30)
+	  13: GETL       	R30, t10
+	  14: ADDL       	$0x4A4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x254719F0:  39000001  li r8,1
+	  18: MOVL       	$0x1, t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0x254719F4:  817E04C0  lwz r11,1216(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x4C0, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R11
+	  25: INCEIPL       	$4
+
+	0x254719F8:  7F63DB78  or r3,r27,r27
+	  26: GETL       	R27, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x254719FC:  809E0024  lwz r4,36(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x24, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R4
+	  33: INCEIPL       	$4
+
+	0x25471A00:  915D03E4  stw r10,996(r29)
+	  34: GETL       	R10, t26
+	  35: GETL       	R29, t28
+	  36: ADDL       	$0x3E4, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x25471A04:  939D01CC  stw r28,460(r29)
+	  39: GETL       	R28, t30
+	  40: GETL       	R29, t32
+	  41: ADDL       	$0x1CC, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x25471A08:  911D0330  stw r8,816(r29)
+	  44: GETL       	R8, t34
+	  45: GETL       	R29, t36
+	  46: ADDL       	$0x330, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x25471A0C:  90FD0358  stw r7,856(r29)
+	  49: GETL       	R7, t38
+	  50: GETL       	R29, t40
+	  51: ADDL       	$0x358, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0x25471A10:  90DD035C  stw r6,860(r29)
+	  54: GETL       	R6, t42
+	  55: GETL       	R29, t44
+	  56: ADDL       	$0x35C, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x25471A14:  90BD0360  stw r5,864(r29)
+	  59: GETL       	R5, t46
+	  60: GETL       	R29, t48
+	  61: ADDL       	$0x360, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0x25471A18:  902B0000  stw r1,0(r11)
+	  64: GETL       	R1, t50
+	  65: GETL       	R11, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x25471A1C:  4800D629  bl 0x2547F044
+	  68: MOVL       	$0x25471A20, t54
+	  69: PUTL       	t54, LR
+	  70: JMPo-c       	$0x2547F044  ($4)
+
+
+
+. 85 254719E0 64
+. 80 FE 04 FC 80 DE 05 00 39 40 FF FF 80 BE 04 A4 39 00 00 01 81 7E 04 C0 7F 63 DB 78 80 9E 00 24 91 5D 03 E4 93 9D 01 CC 91 1D 03 30 90 FD 03 58 90 DD 03 5C 90 BD 03 60 90 2B 00 00 48 00 D6 29
+==== BB 86 _dl_sysdep_start(0x2547F044) approx BBs exec'd 0 ====
+
+	0x2547F044:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0x2547F048:  9421FDE0  stwu r1,-544(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFDE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547F04C:  38E30004  addi r7,r3,4
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x4, t6
+	  11: PUTL       	t6, R7
+	  12: INCEIPL       	$4
+
+	0x2547F050:  48017FB1  bl 0x25497000
+	  13: MOVL       	$0x2547F054, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 86 2547F044 16
+. 7D 88 02 A6 94 21 FD E0 38 E3 00 04 48 01 7F B1
+==== BB 87 (0x2547F054) approx BBs exec'd 0 ====
+
+	0x2547F054:  93C10218  stw r30,536(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x218, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547F058:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547F05C:  91810224  stw r12,548(r1)
+	   8: GETL       	R12, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x224, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547F060:  81030000  lwz r8,0(r3)
+	  13: GETL       	R3, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R8
+	  16: INCEIPL       	$4
+
+	0x2547F064:  80BE04C0  lwz r5,1216(r30)
+	  17: GETL       	R30, t14
+	  18: ADDL       	$0x4C0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R5
+	  21: INCEIPL       	$4
+
+	0x2547F068:  5506103A  rlwinm r6,r8,2,0,29
+	  22: GETL       	R8, t18
+	  23: SHLL       	$0x2, t18
+	  24: PUTL       	t18, R6
+	  25: INCEIPL       	$4
+
+	0x2547F06C:  813E04D4  lwz r9,1236(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4D4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x2547F070:  7D463A14  add r10,r6,r7
+	  31: GETL       	R6, t24
+	  32: GETL       	R7, t26
+	  33: ADDL       	t24, t26
+	  34: PUTL       	t26, R10
+	  35: INCEIPL       	$4
+
+	0x2547F074:  90650000  stw r3,0(r5)
+	  36: GETL       	R3, t28
+	  37: GETL       	R5, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x2547F078:  800A0004  lwz r0,4(r10)
+	  40: GETL       	R10, t32
+	  41: ADDL       	$0x4, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R0
+	  44: INCEIPL       	$4
+
+	0x2547F07C:  394A0004  addi r10,r10,4
+	  45: GETL       	R10, t36
+	  46: ADDL       	$0x4, t36
+	  47: PUTL       	t36, R10
+	  48: INCEIPL       	$4
+
+	0x2547F080:  807E04D0  lwz r3,1232(r30)
+	  49: GETL       	R30, t38
+	  50: ADDL       	$0x4D0, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R3
+	  53: INCEIPL       	$4
+
+	0x2547F084:  2F800000  cmpi cr7,r0,0
+	  54: GETL       	R0, t42
+	  55: CMP0L       	t42, t44  (-rSo)
+	  56: ICRFL       	t44, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x2547F088:  817E04B8  lwz r11,1208(r30)
+	  58: GETL       	R30, t46
+	  59: ADDL       	$0x4B8, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R11
+	  62: INCEIPL       	$4
+
+	0x2547F08C:  92A101F4  stw r21,500(r1)
+	  63: GETL       	R21, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x1F4, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x2547F090:  3AA00000  li r21,0
+	  68: MOVL       	$0x0, t54
+	  69: PUTL       	t54, R21
+	  70: INCEIPL       	$4
+
+	0x2547F094:  92C101F8  stw r22,504(r1)
+	  71: GETL       	R22, t56
+	  72: GETL       	R1, t58
+	  73: ADDL       	$0x1F8, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0x2547F098:  3AC00000  li r22,0
+	  76: MOVL       	$0x0, t60
+	  77: PUTL       	t60, R22
+	  78: INCEIPL       	$4
+
+	0x2547F09C:  92E101FC  stw r23,508(r1)
+	  79: GETL       	R23, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x1FC, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x2547F0A0:  7C972378  or r23,r4,r4
+	  84: GETL       	R4, t66
+	  85: PUTL       	t66, R23
+	  86: INCEIPL       	$4
+
+	0x2547F0A4:  93010200  stw r24,512(r1)
+	  87: GETL       	R24, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x200, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x2547F0A8:  3B000000  li r24,0
+	  92: MOVL       	$0x0, t72
+	  93: PUTL       	t72, R24
+	  94: INCEIPL       	$4
+
+	0x2547F0AC:  93210204  stw r25,516(r1)
+	  95: GETL       	R25, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x204, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x2547F0B0:  3B200000  li r25,0
+	 100: MOVL       	$0x0, t78
+	 101: PUTL       	t78, R25
+	 102: INCEIPL       	$4
+
+	0x2547F0B4:  93E1021C  stw r31,540(r1)
+	 103: GETL       	R31, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x21C, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x2547F0B8:  3BE00000  li r31,0
+	 108: MOVL       	$0x0, t84
+	 109: PUTL       	t84, R31
+	 110: INCEIPL       	$4
+
+	0x2547F0BC:  93410208  stw r26,520(r1)
+	 111: GETL       	R26, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x208, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x2547F0C0:  9361020C  stw r27,524(r1)
+	 116: GETL       	R27, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x20C, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x2547F0C4:  93810210  stw r28,528(r1)
+	 121: GETL       	R28, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x210, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0x2547F0C8:  93A10214  stw r29,532(r1)
+	 126: GETL       	R29, t98
+	 127: GETL       	R1, t100
+	 128: ADDL       	$0x214, t100
+	 129: STL       	t98, (t100)
+	 130: INCEIPL       	$4
+
+	0x2547F0CC:  91030000  stw r8,0(r3)
+	 131: GETL       	R8, t102
+	 132: GETL       	R3, t104
+	 133: STL       	t102, (t104)
+	 134: INCEIPL       	$4
+
+	0x2547F0D0:  90E90000  stw r7,0(r9)
+	 135: GETL       	R7, t106
+	 136: GETL       	R9, t108
+	 137: STL       	t106, (t108)
+	 138: INCEIPL       	$4
+
+	0x2547F0D4:  914B0000  stw r10,0(r11)
+	 139: GETL       	R10, t110
+	 140: GETL       	R11, t112
+	 141: STL       	t110, (t112)
+	 142: INCEIPL       	$4
+
+	0x2547F0D8:  419E0010  bc 12,30,0x2547F0E8
+	 143: Js30o       	$0x2547F0E8
+
+
+
+. 87 2547F054 136
+. 93 C1 02 18 7F C8 02 A6 91 81 02 24 81 03 00 00 80 BE 04 C0 55 06 10 3A 81 3E 04 D4 7D 46 3A 14 90 65 00 00 80 0A 00 04 39 4A 00 04 80 7E 04 D0 2F 80 00 00 81 7E 04 B8 92 A1 01 F4 3A A0 00 00 92 C1 01 F8 3A C0 00 00 92 E1 01 FC 7C 97 23 78 93 01 02 00 3B 00 00 00 93 21 02 04 3B 20 00 00 93 E1 02 1C 3B E0 00 00 93 41 02 08 93 61 02 0C 93 81 02 10 93 A1 02 14 91 03 00 00 90 E9 00 00 91 4B 00 00 41 9E 00 10
+==== BB 88 (0x2547F0DC) approx BBs exec'd 0 ====
+
+	0x2547F0DC:  848A0004  lwzu r4,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R10
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x2547F0E0:  2F840000  cmpi cr7,r4,0
+	   6: GETL       	R4, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x2547F0E4:  409EFFF8  bc 4,30,0x2547F0DC
+	  10: Jc30o       	$0x2547F0DC
+
+
+
+. 88 2547F0DC 12
+. 84 8A 00 04 2F 84 00 00 40 9E FF F8
+==== BB 89 (0x2547F0E8) approx BBs exec'd 0 ====
+
+	0x2547F0E8:  3B6A0013  addi r27,r10,19
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x13, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x2547F0EC:  380A0004  addi r0,r10,4
+	   4: GETL       	R10, t2
+	   5: ADDL       	$0x4, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x2547F0F0:  576B0036  rlwinm r11,r27,0,0,27
+	   8: GETL       	R27, t4
+	   9: ANDL       	$0xFFFFFFF0, t4
+	  10: PUTL       	t4, R11
+	  11: INCEIPL       	$4
+
+	0x2547F0F4:  834B0000  lwz r26,0(r11)
+	  12: GETL       	R11, t6
+	  13: LDL       	(t6), t8
+	  14: PUTL       	t8, R26
+	  15: INCEIPL       	$4
+
+	0x2547F0F8:  2F9A0010  cmpi cr7,r26,16
+	  16: GETL       	R26, t10
+	  17: MOVL       	$0x10, t14
+	  18: CMPL       	t10, t14, t12  (-rSo)
+	  19: ICRFL       	t12, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x2547F0FC:  419D0008  bc 12,29,0x2547F104
+	  21: Js29o       	$0x2547F104
+
+
+
+. 89 2547F0E8 24
+. 3B 6A 00 13 38 0A 00 04 57 6B 00 36 83 4B 00 00 2F 9A 00 10 41 9D 00 08
+==== BB 90 (0x2547F104) approx BBs exec'd 0 ====
+
+	0x2547F104:  7C080378  or r8,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x2547F108:  835E04F4  lwz r26,1268(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x4F4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547F10C:  81680000  lwz r11,0(r8)
+	   8: GETL       	R8, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x2547F110:  3B800000  li r28,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0x2547F114:  813E03EC  lwz r9,1004(r30)
+	  15: GETL       	R30, t12
+	  16: ADDL       	$0x3EC, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x2547F118:  2C0B0000  cmpi cr0,r11,0
+	  20: GETL       	R11, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x2547F11C:  83BE04E0  lwz r29,1248(r30)
+	  24: GETL       	R30, t20
+	  25: ADDL       	$0x4E0, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R29
+	  28: INCEIPL       	$4
+
+	0x2547F120:  90090000  stw r0,0(r9)
+	  29: GETL       	R0, t24
+	  30: GETL       	R9, t26
+	  31: STL       	t24, (t26)
+	  32: INCEIPL       	$4
+
+	0x2547F124:  93A101E0  stw r29,480(r1)
+	  33: GETL       	R29, t28
+	  34: GETL       	R1, t30
+	  35: ADDL       	$0x1E0, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0x2547F128:  939A000C  stw r28,12(r26)
+	  38: GETL       	R28, t32
+	  39: GETL       	R26, t34
+	  40: ADDL       	$0xC, t34
+	  41: STL       	t32, (t34)
+	  42: INCEIPL       	$4
+
+	0x2547F12C:  4182004C  bc 12,2,0x2547F178
+	  43: Js02o       	$0x2547F178
+
+
+
+. 90 2547F104 44
+. 7C 08 03 78 83 5E 04 F4 81 68 00 00 3B 80 00 00 81 3E 03 EC 2C 0B 00 00 83 BE 04 E0 90 09 00 00 93 A1 01 E0 93 9A 00 0C 41 82 00 4C
+==== BB 91 (0x2547F130) approx BBs exec'd 0 ====
+
+	0x2547F130:  7D6A5B78  or r10,r11,r11
+	   0: GETL       	R11, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x2547F134:  380AFFFD  addi r0,r10,-3
+	   3: GETL       	R10, t2
+	   4: ADDL       	$0xFFFFFFFD, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0x2547F138:  28800014  cmpli cr1,r0,20
+	   7: GETL       	R0, t4
+	   8: MOVL       	$0x14, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x2547F13C:  41850024  bc 12,5,0x2547F160
+	  12: Js05o       	$0x2547F160
+
+
+
+. 91 2547F130 16
+. 7D 6A 5B 78 38 0A FF FD 28 80 00 14 41 85 00 24
+==== BB 92 (0x2547F140) approx BBs exec'd 0 ====
+
+	0x2547F140:  817E03F4  lwz r11,1012(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x3F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547F144:  5405103A  rlwinm r5,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547F148:  7C65582E  lwzx r3,r5,r11
+	   9: GETL       	R11, t6
+	  10: GETL       	R5, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x2547F14C:  7CE35A14  add r7,r3,r11
+	  15: GETL       	R3, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x2547F150:  7CE903A6  mtctr r7
+	  20: GETL       	R7, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x2547F154:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 92 2547F140 24
+. 81 7E 03 F4 54 05 10 3A 7C 65 58 2E 7C E3 5A 14 7C E9 03 A6 4E 80 04 20
+==== BB 93 (0x2547F160) approx BBs exec'd 0 ====
+
+	0x2547F160:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F164:  7CAB5030  slw r11,r5,r10
+	   3: GETL       	R5, t4
+	   4: GETL       	R10, t2
+	   5: SHLL       	t2, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547F168:  85480008  lwzu r10,8(r8)
+	   8: GETL       	R8, t6
+	   9: ADDL       	$0x8, t6
+	  10: PUTL       	t6, R8
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547F16C:  7FFF5B78  or r31,r31,r11
+	  14: GETL       	R31, t10
+	  15: GETL       	R11, t12
+	  16: ORL       	t12, t10
+	  17: PUTL       	t10, R31
+	  18: INCEIPL       	$4
+
+	0x2547F170:  2F8A0000  cmpi cr7,r10,0
+	  19: GETL       	R10, t14
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x2547F174:  409EFFC0  bc 4,30,0x2547F134
+	  23: Jc30o       	$0x2547F134
+
+
+
+. 93 2547F160 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+==== BB 94 (0x2547F134) approx BBs exec'd 0 ====
+
+	0x2547F134:  380AFFFD  addi r0,r10,-3
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0xFFFFFFFD, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x2547F138:  28800014  cmpli cr1,r0,20
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x14, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547F13C:  41850024  bc 12,5,0x2547F160
+	   9: Js05o       	$0x2547F160
+
+
+
+. 94 2547F134 12
+. 38 0A FF FD 28 80 00 14 41 85 00 24
+==== BB 95 (0x2547F3D4) approx BBs exec'd 0 ====
+
+	0x2547F3D4:  813E04F0  lwz r9,1264(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F3D8:  2F090000  cmpi cr6,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547F3DC:  41BAFD84  bc 13,26,0x2547F160
+	   9: Js26o       	$0x2547F160
+
+
+
+. 95 2547F3D4 12
+. 81 3E 04 F0 2F 09 00 00 41 BA FD 84
+==== BB 96 (0x2547F3E0) approx BBs exec'd 0 ====
+
+	0x2547F3E0:  80680004  lwz r3,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547F3E4:  90690000  stw r3,0(r9)
+	   5: GETL       	R3, t4
+	   6: GETL       	R9, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547F3E8:  81480000  lwz r10,0(r8)
+	   9: GETL       	R8, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R10
+	  12: INCEIPL       	$4
+
+	0x2547F3EC:  38A00001  li r5,1
+	  13: MOVL       	$0x1, t12
+	  14: PUTL       	t12, R5
+	  15: INCEIPL       	$4
+
+	0x2547F3F0:  7CAB5030  slw r11,r5,r10
+	  16: GETL       	R5, t16
+	  17: GETL       	R10, t14
+	  18: SHLL       	t14, t16
+	  19: PUTL       	t16, R11
+	  20: INCEIPL       	$4
+
+	0x2547F3F4:  85480008  lwzu r10,8(r8)
+	  21: GETL       	R8, t18
+	  22: ADDL       	$0x8, t18
+	  23: PUTL       	t18, R8
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R10
+	  26: INCEIPL       	$4
+
+	0x2547F3F8:  7FFF5B78  or r31,r31,r11
+	  27: GETL       	R31, t22
+	  28: GETL       	R11, t24
+	  29: ORL       	t24, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0x2547F3FC:  2F8A0000  cmpi cr7,r10,0
+	  32: GETL       	R10, t26
+	  33: CMP0L       	t26, t28  (-rSo)
+	  34: ICRFL       	t28, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0x2547F400:  409EFD34  bc 4,30,0x2547F134
+	  36: Jc30o       	$0x2547F134
+
+
+
+. 96 2547F3E0 36
+. 80 68 00 04 90 69 00 00 81 48 00 00 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+==== BB 97 (0x2547F158) approx BBs exec'd 0 ====
+
+	0x2547F158:  83A80004  lwz r29,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547F15C:  93BA003C  stw r29,60(r26)
+	   5: GETL       	R29, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x3C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547F160:  38A00001  li r5,1
+	  10: MOVL       	$0x1, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x2547F164:  7CAB5030  slw r11,r5,r10
+	  13: GETL       	R5, t12
+	  14: GETL       	R10, t10
+	  15: SHLL       	t10, t12
+	  16: PUTL       	t12, R11
+	  17: INCEIPL       	$4
+
+	0x2547F168:  85480008  lwzu r10,8(r8)
+	  18: GETL       	R8, t14
+	  19: ADDL       	$0x8, t14
+	  20: PUTL       	t14, R8
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R10
+	  23: INCEIPL       	$4
+
+	0x2547F16C:  7FFF5B78  or r31,r31,r11
+	  24: GETL       	R31, t18
+	  25: GETL       	R11, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R31
+	  28: INCEIPL       	$4
+
+	0x2547F170:  2F8A0000  cmpi cr7,r10,0
+	  29: GETL       	R10, t22
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547F174:  409EFFC0  bc 4,30,0x2547F134
+	  33: Jc30o       	$0x2547F134
+
+
+
+. 97 2547F158 32
+. 83 A8 00 04 93 BA 00 3C 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+==== BB 98 (0x2547F4BC) approx BBs exec'd 0 ====
+
+	0x2547F4BC:  81480004  lwz r10,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547F4C0:  915A0004  stw r10,4(r26)
+	   5: GETL       	R10, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x4, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547F4C4:  81480000  lwz r10,0(r8)
+	  10: GETL       	R8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0x2547F4C8:  4BFFFF24  b 0x2547F3EC
+	  14: JMPo       	$0x2547F3EC  ($4)
+
+
+
+. 98 2547F4BC 16
+. 81 48 00 04 91 5A 00 04 81 48 00 00 4B FF FF 24
+==== BB 99 (0x2547F3EC) approx BBs exec'd 0 ====
+
+	0x2547F3EC:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F3F0:  7CAB5030  slw r11,r5,r10
+	   3: GETL       	R5, t4
+	   4: GETL       	R10, t2
+	   5: SHLL       	t2, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547F3F4:  85480008  lwzu r10,8(r8)
+	   8: GETL       	R8, t6
+	   9: ADDL       	$0x8, t6
+	  10: PUTL       	t6, R8
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547F3F8:  7FFF5B78  or r31,r31,r11
+	  14: GETL       	R31, t10
+	  15: GETL       	R11, t12
+	  16: ORL       	t12, t10
+	  17: PUTL       	t10, R31
+	  18: INCEIPL       	$4
+
+	0x2547F3FC:  2F8A0000  cmpi cr7,r10,0
+	  19: GETL       	R10, t14
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x2547F400:  409EFD34  bc 4,30,0x2547F134
+	  23: Jc30o       	$0x2547F134
+
+
+
+. 99 2547F3EC 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+==== BB 100 (0x2547F418) approx BBs exec'd 0 ====
+
+	0x2547F418:  81280004  lwz r9,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F41C:  913A001C  stw r9,28(r26)
+	   5: GETL       	R9, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x1C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547F420:  81480000  lwz r10,0(r8)
+	  10: GETL       	R8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0x2547F424:  4BFFFFC8  b 0x2547F3EC
+	  14: JMPo       	$0x2547F3EC  ($4)
+
+
+
+. 100 2547F418 16
+. 81 28 00 04 91 3A 00 1C 81 48 00 00 4B FF FF C8
+==== BB 101 (0x2547F4EC) approx BBs exec'd 0 ====
+
+	0x2547F4EC:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F4F0:  82C80004  lwz r22,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x2547F4F4:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F4F8:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F4FC:  7FFF5B78  or r31,r31,r11
+	  19: GETL       	R31, t14
+	  20: GETL       	R11, t16
+	  21: ORL       	t16, t14
+	  22: PUTL       	t14, R31
+	  23: INCEIPL       	$4
+
+	0x2547F500:  2F8A0000  cmpi cr7,r10,0
+	  24: GETL       	R10, t18
+	  25: CMP0L       	t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547F504:  409EFC30  bc 4,30,0x2547F134
+	  28: Jc30o       	$0x2547F134
+
+
+
+. 101 2547F4EC 28
+. 38 A0 00 01 82 C8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 30
+==== BB 102 (0x2547F4CC) approx BBs exec'd 0 ====
+
+	0x2547F4CC:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F4D0:  82A80004  lwz r21,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R21
+	   7: INCEIPL       	$4
+
+	0x2547F4D4:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F4D8:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F4DC:  7FFF5B78  or r31,r31,r11
+	  19: GETL       	R31, t14
+	  20: GETL       	R11, t16
+	  21: ORL       	t16, t14
+	  22: PUTL       	t14, R31
+	  23: INCEIPL       	$4
+
+	0x2547F4E0:  2F8A0000  cmpi cr7,r10,0
+	  24: GETL       	R10, t18
+	  25: CMP0L       	t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547F4E4:  409EFC50  bc 4,30,0x2547F134
+	  28: Jc30o       	$0x2547F134
+
+
+
+. 102 2547F4CC 28
+. 38 A0 00 01 82 A8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 50
+==== BB 103 (0x2547F494) approx BBs exec'd 0 ====
+
+	0x2547F494:  80080004  lwz r0,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547F498:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547F49C:  81480000  lwz r10,0(r8)
+	   8: GETL       	R8, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R10
+	  11: INCEIPL       	$4
+
+	0x2547F4A0:  900101E0  stw r0,480(r1)
+	  12: GETL       	R0, t10
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0x1E0, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0x2547F4A4:  7CAB5030  slw r11,r5,r10
+	  17: GETL       	R5, t16
+	  18: GETL       	R10, t14
+	  19: SHLL       	t14, t16
+	  20: PUTL       	t16, R11
+	  21: INCEIPL       	$4
+
+	0x2547F4A8:  85480008  lwzu r10,8(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0x8, t18
+	  24: PUTL       	t18, R8
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R10
+	  27: INCEIPL       	$4
+
+	0x2547F4AC:  7FFF5B78  or r31,r31,r11
+	  28: GETL       	R31, t22
+	  29: GETL       	R11, t24
+	  30: ORL       	t24, t22
+	  31: PUTL       	t22, R31
+	  32: INCEIPL       	$4
+
+	0x2547F4B0:  2F8A0000  cmpi cr7,r10,0
+	  33: GETL       	R10, t26
+	  34: CMP0L       	t26, t28  (-rSo)
+	  35: ICRFL       	t28, $0x7, CR
+	  36: INCEIPL       	$4
+
+	0x2547F4B4:  409EFC80  bc 4,30,0x2547F134
+	  37: Jc30o       	$0x2547F134
+
+
+
+. 103 2547F494 36
+. 80 08 00 04 38 A0 00 01 81 48 00 00 90 01 01 E0 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 80
+==== BB 104 (0x2547F470) approx BBs exec'd 0 ====
+
+	0x2547F470:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F474:  80C80004  lwz r6,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x2547F478:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F47C:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F480:  7F393278  xor r25,r25,r6
+	  19: GETL       	R25, t14
+	  20: GETL       	R6, t16
+	  21: XORL       	t14, t16
+	  22: PUTL       	t16, R25
+	  23: INCEIPL       	$4
+
+	0x2547F484:  7FFF5B78  or r31,r31,r11
+	  24: GETL       	R31, t18
+	  25: GETL       	R11, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R31
+	  28: INCEIPL       	$4
+
+	0x2547F488:  2F8A0000  cmpi cr7,r10,0
+	  29: GETL       	R10, t22
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547F48C:  409EFCA8  bc 4,30,0x2547F134
+	  33: Jc30o       	$0x2547F134
+
+
+
+. 104 2547F470 32
+. 38 A0 00 01 80 C8 00 04 7C AB 50 30 85 48 00 08 7F 39 32 78 7F FF 5B 78 2F 8A 00 00 40 9E FC A8
+==== BB 105 (0x2547F44C) approx BBs exec'd 0 ====
+
+	0x2547F44C:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F450:  81880004  lwz r12,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x2547F454:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F458:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F45C:  7F186278  xor r24,r24,r12
+	  19: GETL       	R24, t14
+	  20: GETL       	R12, t16
+	  21: XORL       	t14, t16
+	  22: PUTL       	t16, R24
+	  23: INCEIPL       	$4
+
+	0x2547F460:  7FFF5B78  or r31,r31,r11
+	  24: GETL       	R31, t18
+	  25: GETL       	R11, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R31
+	  28: INCEIPL       	$4
+
+	0x2547F464:  2F8A0000  cmpi cr7,r10,0
+	  29: GETL       	R10, t22
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547F468:  409EFCCC  bc 4,30,0x2547F134
+	  33: Jc30o       	$0x2547F134
+
+
+
+. 105 2547F44C 32
+. 38 A0 00 01 81 88 00 04 7C AB 50 30 85 48 00 08 7F 18 62 78 7F FF 5B 78 2F 8A 00 00 40 9E FC CC
+==== BB 106 (0x2547F3A4) approx BBs exec'd 0 ====
+
+	0x2547F3A4:  83680004  lwz r27,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547F3A8:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547F3AC:  809E04B4  lwz r4,1204(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x4B4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2547F3B0:  3BE0FFFF  li r31,-1
+	  13: MOVL       	$0xFFFFFFFF, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0x2547F3B4:  81480000  lwz r10,0(r8)
+	  16: GETL       	R8, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R10
+	  19: INCEIPL       	$4
+
+	0x2547F3B8:  93640000  stw r27,0(r4)
+	  20: GETL       	R27, t16
+	  21: GETL       	R4, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x2547F3BC:  7CAB5030  slw r11,r5,r10
+	  24: GETL       	R5, t22
+	  25: GETL       	R10, t20
+	  26: SHLL       	t20, t22
+	  27: PUTL       	t22, R11
+	  28: INCEIPL       	$4
+
+	0x2547F3C0:  85480008  lwzu r10,8(r8)
+	  29: GETL       	R8, t24
+	  30: ADDL       	$0x8, t24
+	  31: PUTL       	t24, R8
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R10
+	  34: INCEIPL       	$4
+
+	0x2547F3C4:  7FFF5B78  or r31,r31,r11
+	  35: GETL       	R31, t28
+	  36: GETL       	R11, t30
+	  37: ORL       	t30, t28
+	  38: PUTL       	t28, R31
+	  39: INCEIPL       	$4
+
+	0x2547F3C8:  2F8A0000  cmpi cr7,r10,0
+	  40: GETL       	R10, t32
+	  41: CMP0L       	t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x7, CR
+	  43: INCEIPL       	$4
+
+	0x2547F3CC:  409EFD68  bc 4,30,0x2547F134
+	  44: Jc30o       	$0x2547F134
+
+
+
+. 106 2547F3A4 44
+. 83 68 00 04 38 A0 00 01 80 9E 04 B4 3B E0 FF FF 81 48 00 00 93 64 00 00 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 68
+==== BB 107 (0x2547F3D0) approx BBs exec'd 0 ====
+
+	0x2547F3D0:  4BFFFDA8  b 0x2547F178
+	   0: JMPo       	$0x2547F178  ($4)
+
+
+
+. 107 2547F3D0 4
+. 4B FF FD A8
+==== BB 108 (0x2547F178) approx BBs exec'd 0 ====
+
+	0x2547F178:  38610050  addi r3,r1,80
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x50, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x2547F17C:  4800364D  bl 0x254827C8
+	   4: MOVL       	$0x2547F180, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x254827C8  ($4)
+
+
+
+. 108 2547F178 8
+. 38 61 00 50 48 00 36 4D
+==== BB 109 uname(0x254827C8) approx BBs exec'd 0 ====
+
+	0x254827C8:  3800007A  li r0,122
+	   0: MOVL       	$0x7A, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254827CC:  44000002  sc
+	   3: JMPo-sys       	$0x254827D0  ($4)
+
+
+
+. 109 254827C8 8
+. 38 00 00 7A 44 00 00 02
+==== BB 110 (0x254827D0) approx BBs exec'd 0 ====
+
+	0x254827D0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 110 254827D0 4
+. 4C A3 00 20
+==== BB 111 (0x2547F180) approx BBs exec'd 0 ====
+
+	0x2547F180:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547F184:  3BA10010  addi r29,r1,16
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x2547F188:  408201DC  bc 4,2,0x2547F364
+	   8: Jc02o       	$0x2547F364
+
+
+
+. 111 2547F180 12
+. 2C 03 00 00 3B A1 00 10 40 82 01 DC
+==== BB 112 (0x2547F18C) approx BBs exec'd 0 ====
+
+	0x2547F18C:  3BA100D2  addi r29,r1,210
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xD2, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547F190:  893D0000  lbz r9,0(r29)
+	   4: GETL       	R29, t2
+	   5: LDB       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x2547F194:  7FA3EB78  or r3,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x2547F198:  38E00000  li r7,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x2547F19C:  39000000  li r8,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R8
+	  16: INCEIPL       	$4
+
+	0x2547F1A0:  3949FFD0  addi r10,r9,-48
+	  17: GETL       	R9, t12
+	  18: ADDL       	$0xFFFFFFD0, t12
+	  19: PUTL       	t12, R10
+	  20: INCEIPL       	$4
+
+	0x2547F1A4:  2B8A0009  cmpli cr7,r10,9
+	  21: GETL       	R10, t14
+	  22: MOVL       	$0x9, t18
+	  23: CMPUL       	t14, t18, t16  (-rSo)
+	  24: ICRFL       	t16, $0x7, CR
+	  25: INCEIPL       	$4
+
+	0x2547F1A8:  419D0068  bc 12,29,0x2547F210
+	  26: Js29o       	$0x2547F210
+
+
+
+. 112 2547F18C 32
+. 3B A1 00 D2 89 3D 00 00 7F A3 EB 78 38 E0 00 00 39 00 00 00 39 49 FF D0 2B 8A 00 09 41 9D 00 68
+==== BB 113 (0x2547F1AC) approx BBs exec'd 0 ====
+
+	0x2547F1AC:  89630001  lbz r11,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547F1B0:  3929FFD0  addi r9,r9,-48
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFD0, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547F1B4:  39430001  addi r10,r3,1
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x1, t6
+	  11: PUTL       	t6, R10
+	  12: INCEIPL       	$4
+
+	0x2547F1B8:  380BFFD0  addi r0,r11,-48
+	  13: GETL       	R11, t8
+	  14: ADDL       	$0xFFFFFFD0, t8
+	  15: PUTL       	t8, R0
+	  16: INCEIPL       	$4
+
+	0x2547F1BC:  2B800009  cmpli cr7,r0,9
+	  17: GETL       	R0, t10
+	  18: MOVL       	$0x9, t14
+	  19: CMPUL       	t10, t14, t12  (-rSo)
+	  20: ICRFL       	t12, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0x2547F1C0:  419D0020  bc 12,29,0x2547F1E0
+	  22: Js29o       	$0x2547F1E0
+
+
+
+. 113 2547F1AC 24
+. 89 63 00 01 39 29 FF D0 39 43 00 01 38 0B FF D0 2B 80 00 09 41 9D 00 20
+==== BB 114 (0x2547F1E0) approx BBs exec'd 0 ====
+
+	0x2547F1E0:  2C8B002E  cmpi cr1,r11,46
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x2E, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x2547F1E4:  54FB402E  rlwinm r27,r7,8,0,23
+	   5: GETL       	R7, t6
+	   6: SHLL       	$0x8, t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0x2547F1E8:  7F674B78  or r7,r27,r9
+	   9: GETL       	R27, t8
+	  10: GETL       	R9, t10
+	  11: ORL       	t10, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x2547F1EC:  39080001  addi r8,r8,1
+	  14: GETL       	R8, t12
+	  15: ADDL       	$0x1, t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0x2547F1F0:  386A0001  addi r3,r10,1
+	  18: GETL       	R10, t14
+	  19: ADDL       	$0x1, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0x2547F1F4:  40860014  bc 4,6,0x2547F208
+	  22: Jc06o       	$0x2547F208
+
+
+
+. 114 2547F1E0 24
+. 2C 8B 00 2E 54 FB 40 2E 7F 67 4B 78 39 08 00 01 38 6A 00 01 40 86 00 14
+==== BB 115 (0x2547F1F8) approx BBs exec'd 0 ====
+
+	0x2547F1F8:  892A0001  lbz r9,1(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F1FC:  3B89FFD0  addi r28,r9,-48
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFD0, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x2547F200:  281C0009  cmpli cr0,r28,9
+	   9: GETL       	R28, t6
+	  10: MOVL       	$0x9, t10
+	  11: CMPUL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x2547F204:  4081FFA8  bc 4,1,0x2547F1AC
+	  14: Jc01o       	$0x2547F1AC
+
+
+
+. 115 2547F1F8 16
+. 89 2A 00 01 3B 89 FF D0 28 1C 00 09 40 81 FF A8
+==== BB 116 (0x2547F208) approx BBs exec'd 0 ====
+
+	0x2547F208:  2F080002  cmpi cr6,r8,2
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x2547F20C:  41990010  bc 12,25,0x2547F21C
+	   5: Js25o       	$0x2547F21C
+
+
+
+. 116 2547F208 8
+. 2F 08 00 02 41 99 00 10
+==== BB 117 (0x2547F21C) approx BBs exec'd 0 ====
+
+	0x2547F21C:  3CA00002  lis r5,2
+	   0: MOVL       	$0x20000, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F220:  60AB0204  ori r11,r5,0x204
+	   3: MOVL       	$0x20204, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x2547F224:  7F875840  cmpl cr7,r7,r11
+	   6: GETL       	R7, t4
+	   7: GETL       	R11, t6
+	   8: CMPUL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x2547F228:  409D03B4  bc 4,29,0x2547F5DC
+	  11: Jc29o       	$0x2547F5DC
+
+
+
+. 117 2547F21C 16
+. 3C A0 00 02 60 AB 02 04 7F 87 58 40 40 9D 03 B4
+==== BB 118 (0x2547F22C) approx BBs exec'd 0 ====
+
+	0x2547F22C:  90FA0008  stw r7,8(r26)
+	   0: GETL       	R7, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547F230:  3880006E  li r4,110
+	   5: MOVL       	$0x6E, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547F234:  48003BCD  bl 0x25482E00
+	   8: MOVL       	$0x2547F238, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25482E00  ($4)
+
+
+
+. 118 2547F22C 12
+. 90 FA 00 08 38 80 00 6E 48 00 3B CD
+==== BB 119 strchr(0x25482E00) approx BBs exec'd 0 ====
+
+	0x25482E00:  5084442E  rlwimi r4,r4,8,16,23
+	   0: GETL       	R4, t0
+	   1: GETL       	R4, t2
+	   2: ROLL       	$0x8, t2
+	   3: ANDL       	$0xFF00, t2
+	   4: ANDL       	$0xFFFF00FF, t0
+	   5: ORL       	t0, t2
+	   6: PUTL       	t2, R4
+	   7: INCEIPL       	$4
+
+	0x25482E04:  3960FFFF  li r11,-1
+	   8: MOVL       	$0xFFFFFFFF, t4
+	   9: PUTL       	t4, R11
+	  10: INCEIPL       	$4
+
+	0x25482E08:  5084801E  rlwimi r4,r4,16,0,15
+	  11: GETL       	R4, t6
+	  12: GETL       	R4, t8
+	  13: ROLL       	$0x10, t8
+	  14: ANDL       	$0xFFFF0000, t8
+	  15: ANDL       	$0xFFFF, t6
+	  16: ORL       	t6, t8
+	  17: PUTL       	t8, R4
+	  18: INCEIPL       	$4
+
+	0x25482E0C:  546A1EF8  rlwinm r10,r3,3,27,28
+	  19: GETL       	R3, t10
+	  20: ROLL       	$0x3, t10
+	  21: ANDL       	$0x18, t10
+	  22: PUTL       	t10, R10
+	  23: INCEIPL       	$4
+
+	0x25482E10:  3CC0FEFF  lis r6,-257
+	  24: MOVL       	$0xFEFF0000, t12
+	  25: PUTL       	t12, R6
+	  26: INCEIPL       	$4
+
+	0x25482E14:  3CE07F7F  lis r7,32639
+	  27: MOVL       	$0x7F7F0000, t14
+	  28: PUTL       	t14, R7
+	  29: INCEIPL       	$4
+
+	0x25482E18:  5468003A  rlwinm r8,r3,0,0,29
+	  30: GETL       	R3, t16
+	  31: ANDL       	$0xFFFFFFFC, t16
+	  32: PUTL       	t16, R8
+	  33: INCEIPL       	$4
+
+	0x25482E1C:  38C6FEFF  addi r6,r6,-257
+	  34: MOVL       	$0xFEFEFEFF, t18
+	  35: PUTL       	t18, R6
+	  36: INCEIPL       	$4
+
+	0x25482E20:  38E77F7F  addi r7,r7,32639
+	  37: MOVL       	$0x7F7F7F7F, t20
+	  38: PUTL       	t20, R7
+	  39: INCEIPL       	$4
+
+	0x25482E24:  80A80000  lwz r5,0(r8)
+	  40: GETL       	R8, t22
+	  41: LDL       	(t22), t24
+	  42: PUTL       	t24, R5
+	  43: INCEIPL       	$4
+
+	0x25482E28:  7D6B5430  srw r11,r11,r10
+	  44: GETL       	R11, t28
+	  45: GETL       	R10, t26
+	  46: SHRL       	t26, t28
+	  47: PUTL       	t28, R11
+	  48: INCEIPL       	$4
+
+	0x25482E2C:  7CA55B38  orc r5,r5,r11
+	  49: GETL       	R5, t30
+	  50: GETL       	R11, t32
+	  51: NOTL       	t32
+	  52: ORL       	t30, t32
+	  53: PUTL       	t32, R5
+	  54: INCEIPL       	$4
+
+	0x25482E30:  7C062A14  add r0,r6,r5
+	  55: GETL       	R6, t34
+	  56: GETL       	R5, t36
+	  57: ADDL       	t34, t36
+	  58: PUTL       	t36, R0
+	  59: INCEIPL       	$4
+
+	0x25482E34:  7CE928F8  nor r9,r7,r5
+	  60: GETL       	R7, t38
+	  61: GETL       	R5, t40
+	  62: ORL       	t40, t38
+	  63: NOTL       	t38
+	  64: PUTL       	t38, R9
+	  65: INCEIPL       	$4
+
+	0x25482E38:  7C004839  and. r0,r0,r9
+	  66: GETL       	R0, t42
+	  67: GETL       	R9, t44
+	  68: ANDL       	t42, t44
+	  69: PUTL       	t44, R0
+	  70: CMP0L       	t44, t46  (-rSo)
+	  71: ICRFL       	t46, $0x0, CR
+	  72: INCEIPL       	$4
+
+	0x25482E3C:  7C8C2A78  xor r12,r4,r5
+	  73: GETL       	R4, t48
+	  74: GETL       	R5, t50
+	  75: XORL       	t48, t50
+	  76: PUTL       	t50, R12
+	  77: INCEIPL       	$4
+
+	0x25482E40:  7D8C5B38  orc r12,r12,r11
+	  78: GETL       	R12, t52
+	  79: GETL       	R11, t54
+	  80: NOTL       	t54
+	  81: ORL       	t52, t54
+	  82: PUTL       	t54, R12
+	  83: INCEIPL       	$4
+
+	0x25482E44:  48000020  b 0x25482E64
+	  84: JMPo       	$0x25482E64  ($4)
+
+
+
+. 119 25482E00 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+==== BB 120 (0x25482E64) approx BBs exec'd 0 ====
+
+	0x25482E64:  7C066214  add r0,r6,r12
+	   0: GETL       	R6, t0
+	   1: GETL       	R12, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482E68:  7CE960F8  nor r9,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0x25482E6C:  4182FFDC  bc 12,2,0x25482E48
+	  11: Js02o       	$0x25482E48
+
+
+
+. 120 25482E64 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+==== BB 121 (0x25482E48) approx BBs exec'd 0 ====
+
+	0x25482E48:  84A80004  lwzu r5,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R8
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482E4C:  7C004839  and. r0,r0,r9
+	   6: GETL       	R0, t4
+	   7: GETL       	R9, t6
+	   8: ANDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25482E50:  7C062A14  add r0,r6,r5
+	  13: GETL       	R6, t10
+	  14: GETL       	R5, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0x25482E54:  7CE928F8  nor r9,r7,r5
+	  18: GETL       	R7, t14
+	  19: GETL       	R5, t16
+	  20: ORL       	t16, t14
+	  21: NOTL       	t14
+	  22: PUTL       	t14, R9
+	  23: INCEIPL       	$4
+
+	0x25482E58:  4082005C  bc 4,2,0x25482EB4
+	  24: Jc02o       	$0x25482EB4
+
+
+
+. 121 25482E48 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+==== BB 122 (0x25482E5C) approx BBs exec'd 0 ====
+
+	0x25482E5C:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x25482E60:  7C8C2A78  xor r12,r4,r5
+	   7: GETL       	R4, t6
+	   8: GETL       	R5, t8
+	   9: XORL       	t6, t8
+	  10: PUTL       	t8, R12
+	  11: INCEIPL       	$4
+
+	0x25482E64:  7C066214  add r0,r6,r12
+	  12: GETL       	R6, t10
+	  13: GETL       	R12, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0x25482E68:  7CE960F8  nor r9,r7,r12
+	  17: GETL       	R7, t14
+	  18: GETL       	R12, t16
+	  19: ORL       	t16, t14
+	  20: NOTL       	t14
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x25482E6C:  4182FFDC  bc 12,2,0x25482E48
+	  23: Js02o       	$0x25482E48
+
+
+
+. 122 25482E5C 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+==== BB 123 (0x25482E70) approx BBs exec'd 0 ====
+
+	0x25482E70:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x25482E74:  38600000  li r3,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25482E78:  4D820020  bclr 12,2
+	  10: GETL       	LR, t8
+	  11: Js02o-r       	t8
+
+
+
+. 123 25482E70 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+==== BB 124 (0x2547F238) approx BBs exec'd 0 ====
+
+	0x2547F238:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547F23C:  418602F4  bc 12,6,0x2547F530
+	   4: Js06o       	$0x2547F530
+
+
+
+. 124 2547F238 8
+. 2C 83 00 00 41 86 02 F4
+==== BB 125 (0x2547F530) approx BBs exec'd 0 ====
+
+	0x2547F530:  813A0008  lwz r9,8(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F534:  3C000002  lis r0,2
+	   5: MOVL       	$0x20000, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547F538:  60030544  ori r3,r0,0x544
+	   8: MOVL       	$0x20544, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x2547F53C:  7C891840  cmpl cr1,r9,r3
+	  11: GETL       	R9, t8
+	  12: GETL       	R3, t10
+	  13: CMPUL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x2547F540:  41A5FD24  bc 13,5,0x2547F264
+	  16: Js05o       	$0x2547F264
+
+
+
+. 125 2547F530 20
+. 81 3A 00 08 3C 00 00 02 60 03 05 44 7C 89 18 40 41 A5 FD 24
+==== BB 126 (0x2547F264) approx BBs exec'd 0 ====
+
+	0x2547F264:  2F1FFFFF  cmpi cr6,r31,-1
+	   0: GETL       	R31, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x2547F268:  83BE04B4  lwz r29,1204(r30)
+	   5: GETL       	R30, t6
+	   6: ADDL       	$0x4B4, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R29
+	   9: INCEIPL       	$4
+
+	0x2547F26C:  419A0030  bc 12,26,0x2547F29C
+	  10: Js26o       	$0x2547F29C
+
+
+
+. 126 2547F264 12
+. 2F 1F FF FF 83 BE 04 B4 41 9A 00 30
+==== BB 127 (0x2547F29C) approx BBs exec'd 0 ====
+
+	0x2547F29C:  831A0004  lwz r24,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x2547F2A0:  2F980000  cmpi cr7,r24,0
+	   5: GETL       	R24, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547F2A4:  409E000C  bc 4,30,0x2547F2B0
+	   9: Jc30o       	$0x2547F2B0
+
+
+
+. 127 2547F29C 12
+. 83 1A 00 04 2F 98 00 00 40 9E 00 0C
+==== BB 128 (0x2547F2B0) approx BBs exec'd 0 ====
+
+	0x2547F2B0:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547F2B4:  839E0500  lwz r28,1280(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x500, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547F2B8:  48001119  bl 0x254803D0
+	   8: MOVL       	$0x2547F2BC, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x254803D0  ($4)
+
+
+
+. 128 2547F2B0 12
+. 38 60 00 00 83 9E 05 00 48 00 11 19
+==== BB 129 brk(0x254803D0) approx BBs exec'd 0 ====
+
+	0x254803D0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254803D4:  90610008  stw r3,8(r1)
+	   6: GETL       	R3, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x254803D8:  3800002D  li r0,45
+	  11: MOVL       	$0x2D, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x254803DC:  44000002  sc
+	  14: JMPo-sys       	$0x254803E0  ($4)
+
+
+
+. 129 254803D0 16
+. 94 21 FF F0 90 61 00 08 38 00 00 2D 44 00 00 02
+==== BB 130 (0x254803E0) approx BBs exec'd 0 ====
+
+	0x254803E0:  80C10008  lwz r6,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254803E4:  7C8802A6  mflr r4
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254803E8:  48016C19  bl 0x25497000
+	   8: MOVL       	$0x254803EC, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 130 254803E0 12
+. 80 C1 00 08 7C 88 02 A6 48 01 6C 19
+==== BB 131 (0x254803EC) approx BBs exec'd 0 ====
+
+	0x254803EC:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x254803F0:  80A50504  lwz r5,1284(r5)
+	   3: GETL       	R5, t2
+	   4: ADDL       	$0x504, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x254803F4:  7C8803A6  mtlr r4
+	   8: GETL       	R4, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x254803F8:  90650000  stw r3,0(r5)
+	  11: GETL       	R3, t8
+	  12: GETL       	R5, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254803FC:  7C061840  cmpl cr0,r6,r3
+	  15: GETL       	R6, t12
+	  16: GETL       	R3, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x25480400:  38210010  addi r1,r1,16
+	  20: GETL       	R1, t18
+	  21: ADDL       	$0x10, t18
+	  22: PUTL       	t18, R1
+	  23: INCEIPL       	$4
+
+	0x25480404:  38600000  li r3,0
+	  24: MOVL       	$0x0, t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0x25480408:  4CA10020  bclr 5,1
+	  27: GETL       	LR, t22
+	  28: Jc01o-r       	t22
+
+
+
+. 131 254803EC 32
+. 7C A8 02 A6 80 A5 05 04 7C 88 03 A6 90 65 00 00 7C 06 18 40 38 21 00 10 38 60 00 00 4C A1 00 20
+==== BB 132 (0x2547F2BC) approx BBs exec'd 0 ====
+
+	0x2547F2BC:  813E0504  lwz r9,1284(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x504, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F2C0:  809A0004  lwz r4,4(r26)
+	   5: GETL       	R26, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547F2C4:  81690000  lwz r11,0(r9)
+	  10: GETL       	R9, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x2547F2C8:  7F2400D0  neg r25,r4
+	  14: GETL       	R4, t12
+	  15: NEGL       	t12
+	  16: PUTL       	t12, R25
+	  17: INCEIPL       	$4
+
+	0x2547F2CC:  7C8BE040  cmpl cr1,r11,r28
+	  18: GETL       	R11, t14
+	  19: GETL       	R28, t16
+	  20: CMPUL       	t14, t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x2547F2D0:  7F645A14  add r27,r4,r11
+	  23: GETL       	R4, t20
+	  24: GETL       	R11, t22
+	  25: ADDL       	t20, t22
+	  26: PUTL       	t22, R27
+	  27: INCEIPL       	$4
+
+	0x2547F2D4:  399BFFFF  addi r12,r27,-1
+	  28: GETL       	R27, t24
+	  29: ADDL       	$0xFFFFFFFF, t24
+	  30: PUTL       	t24, R12
+	  31: INCEIPL       	$4
+
+	0x2547F2D8:  7D83C838  and r3,r12,r25
+	  32: GETL       	R12, t26
+	  33: GETL       	R25, t28
+	  34: ANDL       	t26, t28
+	  35: PUTL       	t28, R3
+	  36: INCEIPL       	$4
+
+	0x2547F2DC:  408402D8  bc 4,4,0x2547F5B4
+	  37: Jc04o       	$0x2547F5B4
+
+
+
+. 132 2547F2BC 36
+. 81 3E 05 04 80 9A 00 04 81 69 00 00 7F 24 00 D0 7C 8B E0 40 7F 64 5A 14 39 9B FF FF 7D 83 C8 38 40 84 02 D8
+==== BB 133 (0x2547F2E0) approx BBs exec'd 0 ====
+
+	0x2547F2E0:  807A000C  lwz r3,12(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547F2E4:  2C030000  cmpi cr0,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547F2E8:  4182000C  bc 12,2,0x2547F2F4
+	   9: Js02o       	$0x2547F2F4
+
+
+
+. 133 2547F2E0 12
+. 80 7A 00 0C 2C 03 00 00 41 82 00 0C
+==== BB 134 (0x2547F2F4) approx BBs exec'd 0 ====
+
+	0x2547F2F4:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547F2F8:  4800111D  bl 0x25480414
+	   3: MOVL       	$0x2547F2FC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25480414  ($4)
+
+
+
+. 134 2547F2F4 8
+. 38 60 00 00 48 00 11 1D
+==== BB 135 sbrk(0x25480414) approx BBs exec'd 0 ====
+
+	0x25480414:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25480418:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2548041C:  48016BE5  bl 0x25497000
+	   9: MOVL       	$0x25480420, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 135 25480414 12
+. 94 21 FF E0 7C 88 02 A6 48 01 6B E5
+==== BB 136 (0x25480420) approx BBs exec'd 0 ====
+
+	0x25480420:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25480424:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25480428:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2548042C:  90810024  stw r4,36(r1)
+	  13: GETL       	R4, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25480430:  93A10014  stw r29,20(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25480434:  7C7D1B78  or r29,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25480438:  83FE0504  lwz r31,1284(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x504, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0x2548043C:  38600000  li r3,0
+	  31: MOVL       	$0x0, t24
+	  32: PUTL       	t24, R3
+	  33: INCEIPL       	$4
+
+	0x25480440:  801F0000  lwz r0,0(r31)
+	  34: GETL       	R31, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R0
+	  37: INCEIPL       	$4
+
+	0x25480444:  2F800000  cmpi cr7,r0,0
+	  38: GETL       	R0, t30
+	  39: CMP0L       	t30, t32  (-rSo)
+	  40: ICRFL       	t32, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0x25480448:  419E003C  bc 12,30,0x25480484
+	  42: Js30o       	$0x25480484
+
+
+
+. 136 25480420 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 81 00 24 93 A1 00 14 7C 7D 1B 78 83 FE 05 04 38 60 00 00 80 1F 00 00 2F 80 00 00 41 9E 00 3C
+==== BB 137 (0x2548044C) approx BBs exec'd 0 ====
+
+	0x2548044C:  813E049C  lwz r9,1180(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x49C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25480450:  80A90000  lwz r5,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R5
+	   8: INCEIPL       	$4
+
+	0x25480454:  2C050000  cmpi cr0,r5,0
+	   9: GETL       	R5, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25480458:  4082002C  bc 4,2,0x25480484
+	  13: Jc02o       	$0x25480484
+
+
+
+. 137 2548044C 16
+. 81 3E 04 9C 80 A9 00 00 2C 05 00 00 40 82 00 2C
+==== BB 138 (0x2548045C) approx BBs exec'd 0 ====
+
+	0x2548045C:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25480460:  409A003C  bc 4,26,0x2548049C
+	   4: Jc26o       	$0x2548049C
+
+
+
+. 138 2548045C 8
+. 2F 1D 00 00 40 9A 00 3C
+==== BB 139 (0x25480464) approx BBs exec'd 0 ====
+
+	0x25480464:  807F0000  lwz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25480468:  80C10024  lwz r6,36(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x2548046C:  83A10014  lwz r29,20(r1)
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0x25480470:  83C10018  lwz r30,24(r1)
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x18, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R30
+	  18: INCEIPL       	$4
+
+	0x25480474:  7CC803A6  mtlr r6
+	  19: GETL       	R6, t16
+	  20: PUTL       	t16, LR
+	  21: INCEIPL       	$4
+
+	0x25480478:  83E1001C  lwz r31,28(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R31
+	  26: INCEIPL       	$4
+
+	0x2548047C:  38210020  addi r1,r1,32
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x20, t22
+	  29: PUTL       	t22, R1
+	  30: INCEIPL       	$4
+
+	0x25480480:  4E800020  blr
+	  31: GETL       	LR, t24
+	  32: JMPo-r       	t24  ($4)
+
+
+
+. 139 25480464 32
+. 80 7F 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 140 (0x2547F2FC) approx BBs exec'd 0 ====
+
+	0x2547F2FC:  7F83E000  cmp cr7,r3,r28
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2547F300:  419E02C4  bc 12,30,0x2547F5C4
+	   5: Js30o       	$0x2547F5C4
+
+
+
+. 140 2547F2FC 8
+. 7F 83 E0 00 41 9E 02 C4
+==== BB 141 (0x2547F304) approx BBs exec'd 0 ====
+
+	0x2547F304:  80BD0000  lwz r5,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x2547F308:  2C850000  cmpi cr1,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547F30C:  408602A0  bc 4,6,0x2547F5AC
+	   8: Jc06o       	$0x2547F5AC
+
+
+
+. 141 2547F304 12
+. 80 BD 00 00 2C 85 00 00 40 86 02 A0
+==== BB 142 (0x2547F310) approx BBs exec'd 0 ====
+
+	0x2547F310:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547F314:  7EE803A6  mtlr r23
+	   3: GETL       	R23, t2
+	   4: PUTL       	t2, LR
+	   5: INCEIPL       	$4
+
+	0x2547F318:  7EA4AB78  or r4,r21,r21
+	   6: GETL       	R21, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547F31C:  38A101E0  addi r5,r1,480
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x1E0, t6
+	  11: PUTL       	t6, R5
+	  12: INCEIPL       	$4
+
+	0x2547F320:  4E800021  blrl
+	  13: GETL       	LR, t8
+	  14: MOVL       	$0x2547F324, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-r       	t8  ($4)
+
+
+
+. 142 2547F310 20
+. 7E C3 B3 78 7E E8 03 A6 7E A4 AB 78 38 A1 01 E0 4E 80 00 21
+==== BB 143 dl_main(0x254721F0) approx BBs exec'd 0 ====
+
+	0x254721F0:  9421FF30  stwu r1,-208(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF30, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254721F4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254721F8:  48024E09  bl 0x25497000
+	   9: MOVL       	$0x254721FC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 143 254721F0 12
+. 94 21 FF 30 7C 08 02 A6 48 02 4E 09
+==== BB 144 (0x254721FC) approx BBs exec'd 0 ====
+
+	0x254721FC:  93C100C8  stw r30,200(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xC8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472200:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25472204:  92010090  stw r16,144(r1)
+	   8: GETL       	R16, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x90, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25472208:  900100D4  stw r0,212(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xD4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547220C:  7D600026  mfcr r11
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x25472210:  91C10088  stw r14,136(r1)
+	  21: GETL       	R14, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x88, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25472214:  821E04B4  lwz r16,1204(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4B4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R16
+	  30: INCEIPL       	$4
+
+	0x25472218:  819E04B8  lwz r12,1208(r30)
+	  31: GETL       	R30, t24
+	  32: ADDL       	$0x4B8, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R12
+	  35: INCEIPL       	$4
+
+	0x2547221C:  81500000  lwz r10,0(r16)
+	  36: GETL       	R16, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R10
+	  39: INCEIPL       	$4
+
+	0x25472220:  81DE04C8  lwz r14,1224(r30)
+	  40: GETL       	R30, t32
+	  41: ADDL       	$0x4C8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R14
+	  44: INCEIPL       	$4
+
+	0x25472224:  2F8A0000  cmpi cr7,r10,0
+	  45: GETL       	R10, t36
+	  46: CMP0L       	t36, t38  (-rSo)
+	  47: ICRFL       	t38, $0x7, CR
+	  48: INCEIPL       	$4
+
+	0x25472228:  813E0020  lwz r9,32(r30)
+	  49: GETL       	R30, t40
+	  50: ADDL       	$0x20, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R9
+	  53: INCEIPL       	$4
+
+	0x2547222C:  91610084  stw r11,132(r1)
+	  54: GETL       	R11, t44
+	  55: GETL       	R1, t46
+	  56: ADDL       	$0x84, t46
+	  57: STL       	t44, (t46)
+	  58: INCEIPL       	$4
+
+	0x25472230:  811E04D8  lwz r8,1240(r30)
+	  59: GETL       	R30, t48
+	  60: ADDL       	$0x4D8, t48
+	  61: LDL       	(t48), t50
+	  62: PUTL       	t50, R8
+	  63: INCEIPL       	$4
+
+	0x25472234:  816C0000  lwz r11,0(r12)
+	  64: GETL       	R12, t52
+	  65: LDL       	(t52), t54
+	  66: PUTL       	t54, R11
+	  67: INCEIPL       	$4
+
+	0x25472238:  80FE04B0  lwz r7,1200(r30)
+	  68: GETL       	R30, t56
+	  69: ADDL       	$0x4B0, t56
+	  70: LDL       	(t56), t58
+	  71: PUTL       	t58, R7
+	  72: INCEIPL       	$4
+
+	0x2547223C:  80DE001C  lwz r6,28(r30)
+	  73: GETL       	R30, t60
+	  74: ADDL       	$0x1C, t60
+	  75: LDL       	(t60), t62
+	  76: PUTL       	t62, R6
+	  77: INCEIPL       	$4
+
+	0x25472240:  801E04A8  lwz r0,1192(r30)
+	  78: GETL       	R30, t64
+	  79: ADDL       	$0x4A8, t64
+	  80: LDL       	(t64), t66
+	  81: PUTL       	t66, R0
+	  82: INCEIPL       	$4
+
+	0x25472244:  93E100CC  stw r31,204(r1)
+	  83: GETL       	R31, t68
+	  84: GETL       	R1, t70
+	  85: ADDL       	$0xCC, t70
+	  86: STL       	t68, (t70)
+	  87: INCEIPL       	$4
+
+	0x25472248:  7C3F0B78  or r31,r1,r1
+	  88: GETL       	R1, t72
+	  89: PUTL       	t72, R31
+	  90: INCEIPL       	$4
+
+	0x2547224C:  91E1008C  stw r15,140(r1)
+	  91: GETL       	R15, t74
+	  92: GETL       	R1, t76
+	  93: ADDL       	$0x8C, t76
+	  94: STL       	t74, (t76)
+	  95: INCEIPL       	$4
+
+	0x25472250:  92410098  stw r18,152(r1)
+	  96: GETL       	R18, t78
+	  97: GETL       	R1, t80
+	  98: ADDL       	$0x98, t80
+	  99: STL       	t78, (t80)
+	 100: INCEIPL       	$4
+
+	0x25472254:  3A400000  li r18,0
+	 101: MOVL       	$0x0, t82
+	 102: PUTL       	t82, R18
+	 103: INCEIPL       	$4
+
+	0x25472258:  9261009C  stw r19,156(r1)
+	 104: GETL       	R19, t84
+	 105: GETL       	R1, t86
+	 106: ADDL       	$0x9C, t86
+	 107: STL       	t84, (t86)
+	 108: INCEIPL       	$4
+
+	0x2547225C:  3A600000  li r19,0
+	 109: MOVL       	$0x0, t88
+	 110: PUTL       	t88, R19
+	 111: INCEIPL       	$4
+
+	0x25472260:  928100A0  stw r20,160(r1)
+	 112: GETL       	R20, t90
+	 113: GETL       	R1, t92
+	 114: ADDL       	$0xA0, t92
+	 115: STL       	t90, (t92)
+	 116: INCEIPL       	$4
+
+	0x25472264:  3A800000  li r20,0
+	 117: MOVL       	$0x0, t94
+	 118: PUTL       	t94, R20
+	 119: INCEIPL       	$4
+
+	0x25472268:  92A100A4  stw r21,164(r1)
+	 120: GETL       	R21, t96
+	 121: GETL       	R1, t98
+	 122: ADDL       	$0xA4, t98
+	 123: STL       	t96, (t98)
+	 124: INCEIPL       	$4
+
+	0x2547226C:  3AA00000  li r21,0
+	 125: MOVL       	$0x0, t100
+	 126: PUTL       	t100, R21
+	 127: INCEIPL       	$4
+
+	0x25472270:  92C100A8  stw r22,168(r1)
+	 128: GETL       	R22, t102
+	 129: GETL       	R1, t104
+	 130: ADDL       	$0xA8, t104
+	 131: STL       	t102, (t104)
+	 132: INCEIPL       	$4
+
+	0x25472274:  7CB62B78  or r22,r5,r5
+	 133: GETL       	R5, t106
+	 134: PUTL       	t106, R22
+	 135: INCEIPL       	$4
+
+	0x25472278:  92E100AC  stw r23,172(r1)
+	 136: GETL       	R23, t108
+	 137: GETL       	R1, t110
+	 138: ADDL       	$0xAC, t110
+	 139: STL       	t108, (t110)
+	 140: INCEIPL       	$4
+
+	0x2547227C:  7C972378  or r23,r4,r4
+	 141: GETL       	R4, t112
+	 142: PUTL       	t112, R23
+	 143: INCEIPL       	$4
+
+	0x25472280:  930100B0  stw r24,176(r1)
+	 144: GETL       	R24, t114
+	 145: GETL       	R1, t116
+	 146: ADDL       	$0xB0, t116
+	 147: STL       	t114, (t116)
+	 148: INCEIPL       	$4
+
+	0x25472284:  7C781B78  or r24,r3,r3
+	 149: GETL       	R3, t118
+	 150: PUTL       	t118, R24
+	 151: INCEIPL       	$4
+
+	0x25472288:  932100B4  stw r25,180(r1)
+	 152: GETL       	R25, t120
+	 153: GETL       	R1, t122
+	 154: ADDL       	$0xB4, t122
+	 155: STL       	t120, (t122)
+	 156: INCEIPL       	$4
+
+	0x2547228C:  3B200000  li r25,0
+	 157: MOVL       	$0x0, t124
+	 158: PUTL       	t124, R25
+	 159: INCEIPL       	$4
+
+	0x25472290:  912E03FC  stw r9,1020(r14)
+	 160: GETL       	R9, t126
+	 161: GETL       	R14, t128
+	 162: ADDL       	$0x3FC, t128
+	 163: STL       	t126, (t128)
+	 164: INCEIPL       	$4
+
+	0x25472294:  92210094  stw r17,148(r1)
+	 165: GETL       	R17, t130
+	 166: GETL       	R1, t132
+	 167: ADDL       	$0x94, t132
+	 168: STL       	t130, (t132)
+	 169: INCEIPL       	$4
+
+	0x25472298:  934100B8  stw r26,184(r1)
+	 170: GETL       	R26, t134
+	 171: GETL       	R1, t136
+	 172: ADDL       	$0xB8, t136
+	 173: STL       	t134, (t136)
+	 174: INCEIPL       	$4
+
+	0x2547229C:  936100BC  stw r27,188(r1)
+	 175: GETL       	R27, t138
+	 176: GETL       	R1, t140
+	 177: ADDL       	$0xBC, t140
+	 178: STL       	t138, (t140)
+	 179: INCEIPL       	$4
+
+	0x254722A0:  938100C0  stw r28,192(r1)
+	 180: GETL       	R28, t142
+	 181: GETL       	R1, t144
+	 182: ADDL       	$0xC0, t144
+	 183: STL       	t142, (t144)
+	 184: INCEIPL       	$4
+
+	0x254722A4:  93A100C4  stw r29,196(r1)
+	 185: GETL       	R29, t146
+	 186: GETL       	R1, t148
+	 187: ADDL       	$0xC4, t148
+	 188: STL       	t146, (t148)
+	 189: INCEIPL       	$4
+
+	0x254722A8:  910E01B4  stw r8,436(r14)
+	 190: GETL       	R8, t150
+	 191: GETL       	R14, t152
+	 192: ADDL       	$0x1B4, t152
+	 193: STL       	t150, (t152)
+	 194: INCEIPL       	$4
+
+	0x254722AC:  90EE042C  stw r7,1068(r14)
+	 195: GETL       	R7, t154
+	 196: GETL       	R14, t156
+	 197: ADDL       	$0x42C, t156
+	 198: STL       	t154, (t156)
+	 199: INCEIPL       	$4
+
+	0x254722B0:  90CE03F8  stw r6,1016(r14)
+	 200: GETL       	R6, t158
+	 201: GETL       	R14, t160
+	 202: ADDL       	$0x3F8, t160
+	 203: STL       	t158, (t160)
+	 204: INCEIPL       	$4
+
+	0x254722B4:  900E0404  stw r0,1028(r14)
+	 205: GETL       	R0, t162
+	 206: GETL       	R14, t164
+	 207: ADDL       	$0x404, t164
+	 208: STL       	t162, (t164)
+	 209: INCEIPL       	$4
+
+	0x254722B8:  917F0038  stw r11,56(r31)
+	 210: GETL       	R11, t166
+	 211: GETL       	R31, t168
+	 212: ADDL       	$0x38, t168
+	 213: STL       	t166, (t168)
+	 214: INCEIPL       	$4
+
+	0x254722BC:  81FE04F4  lwz r15,1268(r30)
+	 215: GETL       	R30, t170
+	 216: ADDL       	$0x4F4, t170
+	 217: LDL       	(t170), t172
+	 218: PUTL       	t172, R15
+	 219: INCEIPL       	$4
+
+	0x254722C0:  813E007C  lwz r9,124(r30)
+	 220: GETL       	R30, t174
+	 221: ADDL       	$0x7C, t174
+	 222: LDL       	(t174), t176
+	 223: PUTL       	t176, R9
+	 224: INCEIPL       	$4
+
+	0x254722C4:  419E0008  bc 12,30,0x254722CC
+	 225: Js30o       	$0x254722CC
+
+
+
+. 144 254721FC 204
+. 93 C1 00 C8 7F C8 02 A6 92 01 00 90 90 01 00 D4 7D 60 00 26 91 C1 00 88 82 1E 04 B4 81 9E 04 B8 81 50 00 00 81 DE 04 C8 2F 8A 00 00 81 3E 00 20 91 61 00 84 81 1E 04 D8 81 6C 00 00 80 FE 04 B0 80 DE 00 1C 80 1E 04 A8 93 E1 00 CC 7C 3F 0B 78 91 E1 00 8C 92 41 00 98 3A 40 00 00 92 61 00 9C 3A 60 00 00 92 81 00 A0 3A 80 00 00 92 A1 00 A4 3A A0 00 00 92 C1 00 A8 7C B6 2B 78 92 E1 00 AC 7C 97 23 78 93 01 00 B0 7C 78 1B 78 93 21 00 B4 3B 20 00 00 91 2E 03 FC 92 21 00 94 93 41 00 B8 93 61 00 BC 93 81 00 C0 93 A1 00 C4 91 0E 01 B4 90 EE 04 2C 90 CE 03 F8 90 0E 04 04 91 7F 00 38 81 FE 04 F4 81 3E 00 7C 41 9E 00 08
+==== BB 145 (0x254722CC) approx BBs exec'd 0 ====
+
+	0x254722CC:  823E0040  lwz r17,64(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x40, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0x254722D0:  912F0054  stw r9,84(r15)
+	   5: GETL       	R9, t4
+	   6: GETL       	R15, t6
+	   7: ADDL       	$0x54, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x254722D4:  387F0038  addi r3,r31,56
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x38, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x254722D8:  4800D951  bl 0x2547FC28
+	  14: MOVL       	$0x254722DC, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x2547FC28  ($4)
+
+
+
+. 145 254722CC 16
+. 82 3E 00 40 91 2F 00 54 38 7F 00 38 48 00 D9 51
+==== BB 146 _dl_next_ld_env_entry(0x2547FC28) approx BBs exec'd 0 ====
+
+	0x2547FC28:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547FC2C:  39400000  li r10,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547FC30:  81630000  lwz r11,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547FC34:  812B0000  lwz r9,0(r11)
+	  13: GETL       	R11, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0x2547FC38:  2F890000  cmpi cr7,r9,0
+	  17: GETL       	R9, t14
+	  18: CMP0L       	t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x2547FC3C:  419E001C  bc 12,30,0x2547FC58
+	  21: Js30o       	$0x2547FC58
+
+
+
+. 146 2547FC28 24
+. 94 21 FF F0 39 40 00 00 81 63 00 00 81 2B 00 00 2F 89 00 00 41 9E 00 1C
+==== BB 147 (0x2547FC40) approx BBs exec'd 0 ====
+
+	0x2547FC40:  88090000  lbz r0,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547FC44:  2C00004C  cmpi cr0,r0,76
+	   4: GETL       	R0, t4
+	   5: MOVL       	$0x4C, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547FC48:  4182001C  bc 12,2,0x2547FC64
+	   9: Js02o       	$0x2547FC64
+
+
+
+. 147 2547FC40 12
+. 88 09 00 00 2C 00 00 4C 41 82 00 1C
+==== BB 148 (0x2547FC4C) approx BBs exec'd 0 ====
+
+	0x2547FC4C:  852B0004  lwzu r9,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R11
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x2547FC50:  2F890000  cmpi cr7,r9,0
+	   6: GETL       	R9, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x2547FC54:  409EFFEC  bc 4,30,0x2547FC40
+	  10: Jc30o       	$0x2547FC40
+
+
+
+. 148 2547FC4C 12
+. 85 2B 00 04 2F 89 00 00 40 9E FF EC
+==== BB 149 (0x2547FC64) approx BBs exec'd 0 ====
+
+	0x2547FC64:  88890001  lbz r4,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547FC68:  2C840044  cmpi cr1,r4,68
+	   5: GETL       	R4, t4
+	   6: MOVL       	$0x44, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x2547FC6C:  4086FFE0  bc 4,6,0x2547FC4C
+	  10: Jc06o       	$0x2547FC4C
+
+
+
+. 149 2547FC64 12
+. 88 89 00 01 2C 84 00 44 40 86 FF E0
+==== BB 150 (0x2547FC70) approx BBs exec'd 0 ====
+
+	0x2547FC70:  88A90002  lbz r5,2(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x2, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547FC74:  2F05005F  cmpi cr6,r5,95
+	   5: GETL       	R5, t4
+	   6: MOVL       	$0x5F, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x2547FC78:  409AFFD4  bc 4,26,0x2547FC4C
+	  10: Jc26o       	$0x2547FC4C
+
+
+
+. 150 2547FC70 12
+. 88 A9 00 02 2F 05 00 5F 40 9A FF D4
+==== BB 151 (0x2547FC7C) approx BBs exec'd 0 ====
+
+	0x2547FC7C:  38CB0004  addi r6,r11,4
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x2547FC80:  39490003  addi r10,r9,3
+	   4: GETL       	R9, t2
+	   5: ADDL       	$0x3, t2
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x2547FC84:  90C30000  stw r6,0(r3)
+	   8: GETL       	R6, t4
+	   9: GETL       	R3, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x2547FC88:  4BFFFFD0  b 0x2547FC58
+	  12: JMPo       	$0x2547FC58  ($4)
+
+
+
+. 151 2547FC7C 16
+. 38 CB 00 04 39 49 00 03 90 C3 00 00 4B FF FF D0
+==== BB 152 (0x2547FC58) approx BBs exec'd 0 ====
+
+	0x2547FC58:  7D435378  or r3,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547FC5C:  38210010  addi r1,r1,16
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R1
+	   6: INCEIPL       	$4
+
+	0x2547FC60:  4E800020  blr
+	   7: GETL       	LR, t4
+	   8: JMPo-r       	t4  ($4)
+
+
+
+. 152 2547FC58 12
+. 7D 43 53 78 38 21 00 10 4E 80 00 20
+==== BB 153 (0x254722DC) approx BBs exec'd 0 ====
+
+	0x254722DC:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254722E0:  41820064  bc 12,2,0x25472344
+	   5: Js02o       	$0x25472344
+
+
+
+. 153 254722DC 8
+. 7C 7D 1B 79 41 82 00 64
+==== BB 154 (0x254722E4) approx BBs exec'd 0 ====
+
+	0x254722E4:  881D0000  lbz r0,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x254722E8:  39600000  li r11,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R11
+	   6: INCEIPL       	$4
+
+	0x254722EC:  7C090379  or. r9,r0,r0
+	   7: GETL       	R0, t6
+	   8: PUTL       	t6, R9
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x254722F0:  40A20018  bc 5,2,0x25472308
+	  12: Jc02o       	$0x25472308
+
+
+
+. 154 254722E4 16
+. 88 1D 00 00 39 60 00 00 7C 09 03 79 40 A2 00 18
+==== BB 155 (0x25472308) approx BBs exec'd 0 ====
+
+	0x25472308:  2C89003D  cmpi cr1,r9,61
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x3D, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x2547230C:  4086FFEC  bc 4,6,0x254722F8
+	   5: Jc06o       	$0x254722F8
+
+
+
+. 155 25472308 8
+. 2C 89 00 3D 40 86 FF EC
+==== BB 156 (0x254722F8) approx BBs exec'd 0 ====
+
+	0x254722F8:  396B0001  addi r11,r11,1
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0x254722FC:  7C1D58AE  lbzx r0,r29,r11
+	   4: GETL       	R11, t2
+	   5: GETL       	R29, t4
+	   6: ADDL       	t4, t2
+	   7: LDB       	(t2), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x25472300:  7C090379  or. r9,r0,r0
+	  10: GETL       	R0, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25472304:  4182000C  bc 12,2,0x25472310
+	  15: Js02o       	$0x25472310
+
+
+
+. 156 254722F8 16
+. 39 6B 00 01 7C 1D 58 AE 7C 09 03 79 41 82 00 0C
+==== BB 157 (0x25472310) approx BBs exec'd 0 ====
+
+	0x25472310:  2D80003D  cmpi cr3,r0,61
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x3D, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x3, CR
+	   4: INCEIPL       	$4
+
+	0x25472314:  408EFFC0  bc 4,14,0x254722D4
+	   5: Jc14o       	$0x254722D4
+
+
+
+. 157 25472310 8
+. 2D 80 00 3D 40 8E FF C0
+==== BB 158 (0x25472318) approx BBs exec'd 0 ====
+
+	0x25472318:  380BFFFC  addi r0,r11,-4
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x2547231C:  2A000010  cmpli cr4,r0,16
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x10, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x25472320:  41B1FFB4  bc 13,17,0x254722D4
+	   9: Js17o       	$0x254722D4
+
+
+
+. 158 25472318 12
+. 38 0B FF FC 2A 00 00 10 41 B1 FF B4
+==== BB 159 (0x25472324) approx BBs exec'd 0 ====
+
+	0x25472324:  80BE0080  lwz r5,128(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x80, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25472328:  541A103A  rlwinm r26,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R26
+	   8: INCEIPL       	$4
+
+	0x2547232C:  7C9A282E  lwzx r4,r26,r5
+	   9: GETL       	R5, t6
+	  10: GETL       	R26, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x25472330:  7C642A14  add r3,r4,r5
+	  15: GETL       	R4, t12
+	  16: GETL       	R5, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0x25472334:  7C6903A6  mtctr r3
+	  20: GETL       	R3, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x25472338:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 159 25472324 24
+. 80 BE 00 80 54 1A 10 3A 7C 9A 28 2E 7C 64 2A 14 7C 69 03 A6 4E 80 04 20
+==== BB 160 (0x25473248) approx BBs exec'd 0 ====
+
+	0x25473248:  809E00C4  lwz r4,196(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xC4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547324C:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25473250:  38A0000C  li r5,12
+	   8: MOVL       	$0xC, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25473254:  48010045  bl 0x25483298
+	  11: MOVL       	$0x25473258, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+
+. 160 25473248 16
+. 80 9E 00 C4 7F A3 EB 78 38 A0 00 0C 48 01 00 45
+==== BB 161 memcmp(0x25483298) approx BBs exec'd 0 ====
+
+	0x25483298:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2548329C:  9421FFE0  stwu r1,-32(r1)
+	   5: GETL       	R1, t6
+	   6: GETL       	R1, t8
+	   7: ADDL       	$0xFFFFFFE0, t8
+	   8: PUTL       	t8, R1
+	   9: STL       	t6, (t8)
+	  10: INCEIPL       	$4
+
+	0x254832A0:  93810010  stw r28,16(r1)
+	  11: GETL       	R28, t10
+	  12: GETL       	R1, t12
+	  13: ADDL       	$0x10, t12
+	  14: STL       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0x254832A4:  7C6B1B78  or r11,r3,r3
+	  16: GETL       	R3, t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0x254832A8:  93A10014  stw r29,20(r1)
+	  19: GETL       	R29, t16
+	  20: GETL       	R1, t18
+	  21: ADDL       	$0x14, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x254832AC:  93C10018  stw r30,24(r1)
+	  24: GETL       	R30, t20
+	  25: GETL       	R1, t22
+	  26: ADDL       	$0x18, t22
+	  27: STL       	t20, (t22)
+	  28: INCEIPL       	$4
+
+	0x254832B0:  93E1001C  stw r31,28(r1)
+	  29: GETL       	R31, t24
+	  30: GETL       	R1, t26
+	  31: ADDL       	$0x1C, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0x254832B4:  409D00BC  bc 4,29,0x25483370
+	  34: Jc29o       	$0x25483370
+
+
+
+. 161 25483298 32
+. 2B 85 00 0F 94 21 FF E0 93 81 00 10 7C 6B 1B 78 93 A1 00 14 93 C1 00 18 93 E1 00 1C 40 9D 00 BC
+==== BB 162 (0x25483370) approx BBs exec'd 0 ====
+
+	0x25483370:  2C050000  cmpi cr0,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25483374:  41820024  bc 12,2,0x25483398
+	   4: Js02o       	$0x25483398
+
+
+
+. 162 25483370 8
+. 2C 05 00 00 41 82 00 24
+==== BB 163 (0x25483378) approx BBs exec'd 0 ====
+
+	0x25483378:  898B0000  lbz r12,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R12
+	   3: INCEIPL       	$4
+
+	0x2548337C:  396B0001  addi r11,r11,1
+	   4: GETL       	R11, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25483380:  8BA40000  lbz r29,0(r4)
+	   8: GETL       	R4, t6
+	   9: LDB       	(t6), t8
+	  10: PUTL       	t8, R29
+	  11: INCEIPL       	$4
+
+	0x25483384:  38840001  addi r4,r4,1
+	  12: GETL       	R4, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0x25483388:  7C7D6051  subf. r3,r29,r12
+	  16: GETL       	R29, t12
+	  17: GETL       	R12, t14
+	  18: SUBL       	t12, t14
+	  19: PUTL       	t14, R3
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x0, CR
+	  22: INCEIPL       	$4
+
+	0x2548338C:  40820010  bc 4,2,0x2548339C
+	  23: Jc02o       	$0x2548339C
+
+
+
+. 163 25483378 24
+. 89 8B 00 00 39 6B 00 01 8B A4 00 00 38 84 00 01 7C 7D 60 51 40 82 00 10
+==== BB 164 (0x25483390) approx BBs exec'd 0 ====
+
+	0x25483390:  34A5FFFF  addic. r5,r5,-1
+	   0: GETL       	R5, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R5
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483394:  4082FFE4  bc 4,2,0x25483378
+	   6: Jc02o       	$0x25483378
+
+
+
+. 164 25483390 8
+. 34 A5 FF FF 40 82 FF E4
+==== BB 165 (0x25483398) approx BBs exec'd 0 ====
+
+	0x25483398:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2548339C:  83810010  lwz r28,16(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x254833A0:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x254833A4:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x254833A8:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x254833AC:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0x254833B0:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+
+. 165 25483398 28
+. 38 60 00 00 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 166 (0x25473258) approx BBs exec'd 0 ====
+
+	0x25473258:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547325C:  409E0428  bc 4,30,0x25473684
+	   4: Jc30o       	$0x25473684
+
+
+
+. 166 25473258 8
+. 2F 83 00 00 40 9E 04 28
+==== BB 167 (0x25473260) approx BBs exec'd 0 ====
+
+	0x25473260:  813E004C  lwz r9,76(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25473264:  381D000D  addi r0,r29,13
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0xD, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25473268:  90090000  stw r0,0(r9)
+	   9: GETL       	R0, t6
+	  10: GETL       	R9, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547326C:  4BFFF068  b 0x254722D4
+	  13: JMPo       	$0x254722D4  ($4)
+
+
+
+. 167 25473260 16
+. 81 3E 00 4C 38 1D 00 0D 90 09 00 00 4B FF F0 68
+==== BB 168 (0x254722D4) approx BBs exec'd 0 ====
+
+	0x254722D4:  387F0038  addi r3,r31,56
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x38, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x254722D8:  4800D951  bl 0x2547FC28
+	   4: MOVL       	$0x254722DC, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x2547FC28  ($4)
+
+
+
+. 168 254722D4 8
+. 38 7F 00 38 48 00 D9 51
+==== BB 169 (0x25473218) approx BBs exec'd 0 ====
+
+	0x25473218:  809E00C0  lwz r4,192(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xC0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547321C:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25473220:  38A00007  li r5,7
+	   8: MOVL       	$0x7, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25473224:  48010075  bl 0x25483298
+	  11: MOVL       	$0x25473228, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+
+. 169 25473218 16
+. 80 9E 00 C0 7F A3 EB 78 38 A0 00 07 48 01 00 75
+==== BB 170 (0x2548339C) approx BBs exec'd 0 ====
+
+	0x2548339C:  83810010  lwz r28,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x254833A0:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x254833A4:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x254833A8:  83E1001C  lwz r31,28(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R31
+	  19: INCEIPL       	$4
+
+	0x254833AC:  38210020  addi r1,r1,32
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x20, t16
+	  22: PUTL       	t16, R1
+	  23: INCEIPL       	$4
+
+	0x254833B0:  4E800020  blr
+	  24: GETL       	LR, t18
+	  25: JMPo-r       	t18  ($4)
+
+
+
+. 170 2548339C 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 171 (0x25473228) approx BBs exec'd 0 ====
+
+	0x25473228:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547322C:  4086048C  bc 4,6,0x254736B8
+	   4: Jc06o       	$0x254736B8
+
+
+
+. 171 25473228 8
+. 2C 83 00 00 40 86 04 8C
+==== BB 172 (0x254736B8) approx BBs exec'd 0 ====
+
+	0x254736B8:  809E00E0  lwz r4,224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xE0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254736BC:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254736C0:  38A00007  li r5,7
+	   8: MOVL       	$0x7, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x254736C4:  4800FBD5  bl 0x25483298
+	  11: MOVL       	$0x254736C8, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+
+. 172 254736B8 16
+. 80 9E 00 E0 7F A3 EB 78 38 A0 00 07 48 00 FB D5
+==== BB 173 (0x254736C8) approx BBs exec'd 0 ====
+
+	0x254736C8:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x254736CC:  40820088  bc 4,2,0x25473754
+	   4: Jc02o       	$0x25473754
+
+
+
+. 173 254736C8 8
+. 2C 03 00 00 40 82 00 88
+==== BB 174 (0x254736D0) approx BBs exec'd 0 ====
+
+	0x254736D0:  38FD0008  addi r7,r29,8
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R7
+	   3: INCEIPL       	$4
+
+	0x254736D4:  90F10000  stw r7,0(r17)
+	   4: GETL       	R7, t2
+	   5: GETL       	R17, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254736D8:  4BFFEBFC  b 0x254722D4
+	   8: JMPo       	$0x254722D4  ($4)
+
+
+
+. 174 254736D0 12
+. 38 FD 00 08 90 F1 00 00 4B FF EB FC
+==== BB 175 (0x25472344) approx BBs exec'd 0 ====
+
+	0x25472344:  83700000  lwz r27,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R27
+	   3: INCEIPL       	$4
+
+	0x25472348:  933F0030  stw r25,48(r31)
+	   4: GETL       	R25, t4
+	   5: GETL       	R31, t6
+	   6: ADDL       	$0x30, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547234C:  2D9B0000  cmpi cr3,r27,0
+	   9: GETL       	R27, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x3, CR
+	  12: INCEIPL       	$4
+
+	0x25472350:  408E128C  bc 4,14,0x254735DC
+	  13: Jc14o       	$0x254735DC
+
+
+
+. 175 25472344 16
+. 83 70 00 00 93 3F 00 30 2D 9B 00 00 40 8E 12 8C
+==== BB 176 (0x25472354) approx BBs exec'd 0 ====
+
+	0x25472354:  813E0034  lwz r9,52(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472358:  3014FFFF  addic r0,r20,-1
+	   5: GETL       	R20, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547235C:  7F40A110  subfe r26,r0,r20
+	   9: GETL       	R0, t6
+	  10: GETL       	R20, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R26
+	  13: INCEIPL       	$4
+
+	0x25472360:  83A90000  lwz r29,0(r9)
+	  14: GETL       	R9, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0x25472364:  313DFFFF  addic r9,r29,-1
+	  18: GETL       	R29, t14
+	  19: ADCL       	$0xFFFFFFFF, t14  (-wCa)
+	  20: PUTL       	t14, R9
+	  21: INCEIPL       	$4
+
+	0x25472368:  7F89E910  subfe r28,r9,r29
+	  22: GETL       	R9, t16
+	  23: GETL       	R29, t18
+	  24: SBBL       	t16, t18  (-rCa-wCa)
+	  25: PUTL       	t18, R28
+	  26: INCEIPL       	$4
+
+	0x2547236C:  7F80D039  and. r0,r28,r26
+	  27: GETL       	R28, t20
+	  28: GETL       	R26, t22
+	  29: ANDL       	t20, t22
+	  30: PUTL       	t22, R0
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0x25472370:  40820B58  bc 4,2,0x25472EC8
+	  34: Jc02o       	$0x25472EC8
+
+
+
+. 176 25472354 32
+. 81 3E 00 34 30 14 FF FF 7F 40 A1 10 83 A9 00 00 31 3D FF FF 7F 89 E9 10 7F 80 D0 39 40 82 0B 58
+==== BB 177 (0x25472374) approx BBs exec'd 0 ====
+
+	0x25472374:  83560000  lwz r26,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0x25472378:  80BE04E0  lwz r5,1248(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x4E0, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R5
+	   8: INCEIPL       	$4
+
+	0x2547237C:  7D9A2800  cmp cr3,r26,r5
+	   9: GETL       	R26, t8
+	  10: GETL       	R5, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x3, CR
+	  13: INCEIPL       	$4
+
+	0x25472380:  418E0940  bc 12,14,0x25472CC0
+	  14: Js14o       	$0x25472CC0
+
+
+
+. 177 25472374 16
+. 83 56 00 00 80 BE 04 E0 7D 9A 28 00 41 8E 09 40
+==== BB 178 (0x25472384) approx BBs exec'd 0 ====
+
+	0x25472384:  807E0084  lwz r3,132(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25472388:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547238C:  38C00000  li r6,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x25472390:  38E00000  li r7,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x25472394:  7C641B78  or r4,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0x25472398:  39000000  li r8,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R8
+	  19: INCEIPL       	$4
+
+	0x2547239C:  48006D59  bl 0x254790F4
+	  20: MOVL       	$0x254723A0, t14
+	  21: PUTL       	t14, LR
+	  22: JMPo-c       	$0x254790F4  ($4)
+
+
+
+. 178 25472384 28
+. 80 7E 00 84 38 A0 00 00 38 C0 00 00 38 E0 00 00 7C 64 1B 78 39 00 00 00 48 00 6D 59
+==== BB 179 _dl_new_object(0x254790F4) approx BBs exec'd 0 ====
+
+	0x254790F4:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254790F8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254790FC:  4801DF05  bl 0x25497000
+	   9: MOVL       	$0x25479100, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 179 254790F4 12
+. 94 21 FF D0 7C 08 02 A6 48 01 DF 05
+==== BB 180 (0x25479100) approx BBs exec'd 0 ====
+
+	0x25479100:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25479104:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25479108:  92C10008  stw r22,8(r1)
+	   8: GETL       	R22, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x8, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547910C:  92E1000C  stw r23,12(r1)
+	  13: GETL       	R23, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xC, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25479110:  7C771B78  or r23,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R23
+	  20: INCEIPL       	$4
+
+	0x25479114:  93010010  stw r24,16(r1)
+	  21: GETL       	R24, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25479118:  7C832378  or r3,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x2547911C:  93210014  stw r25,20(r1)
+	  29: GETL       	R25, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x14, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0x25479120:  7CB82B78  or r24,r5,r5
+	  34: GETL       	R5, t26
+	  35: PUTL       	t26, R24
+	  36: INCEIPL       	$4
+
+	0x25479124:  93410018  stw r26,24(r1)
+	  37: GETL       	R26, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x18, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x25479128:  7CF63B78  or r22,r7,r7
+	  42: GETL       	R7, t32
+	  43: PUTL       	t32, R22
+	  44: INCEIPL       	$4
+
+	0x2547912C:  9361001C  stw r27,28(r1)
+	  45: GETL       	R27, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x1C, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x25479130:  7C9A2378  or r26,r4,r4
+	  50: GETL       	R4, t38
+	  51: PUTL       	t38, R26
+	  52: INCEIPL       	$4
+
+	0x25479134:  93810020  stw r28,32(r1)
+	  53: GETL       	R28, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x20, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25479138:  7D194378  or r25,r8,r8
+	  58: GETL       	R8, t44
+	  59: PUTL       	t44, R25
+	  60: INCEIPL       	$4
+
+	0x2547913C:  93E1002C  stw r31,44(r1)
+	  61: GETL       	R31, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x2C, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0x25479140:  7CDC3378  or r28,r6,r6
+	  66: GETL       	R6, t50
+	  67: PUTL       	t50, R28
+	  68: INCEIPL       	$4
+
+	0x25479144:  90010034  stw r0,52(r1)
+	  69: GETL       	R0, t52
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x34, t54
+	  72: STL       	t52, (t54)
+	  73: INCEIPL       	$4
+
+	0x25479148:  93A10024  stw r29,36(r1)
+	  74: GETL       	R29, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x24, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0x2547914C:  48009E85  bl 0x25482FD0
+	  79: MOVL       	$0x25479150, t60
+	  80: PUTL       	t60, LR
+	  81: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 180 25479100 80
+. 93 C1 00 28 7F C8 02 A6 92 C1 00 08 92 E1 00 0C 7C 77 1B 78 93 01 00 10 7C 83 23 78 93 21 00 14 7C B8 2B 78 93 41 00 18 7C F6 3B 78 93 61 00 1C 7C 9A 23 78 93 81 00 20 7D 19 43 78 93 E1 00 2C 7C DC 33 78 90 01 00 34 93 A1 00 24 48 00 9E 85
+==== BB 181 strlen(0x25482FD0) approx BBs exec'd 0 ====
+
+	0x25482FD0:  5464003A  rlwinm r4,r3,0,0,29
+	   0: GETL       	R3, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25482FD4:  3CE07F7F  lis r7,32639
+	   4: MOVL       	$0x7F7F0000, t2
+	   5: PUTL       	t2, R7
+	   6: INCEIPL       	$4
+
+	0x25482FD8:  54651EF8  rlwinm r5,r3,3,27,28
+	   7: GETL       	R3, t4
+	   8: ROLL       	$0x3, t4
+	   9: ANDL       	$0x18, t4
+	  10: PUTL       	t4, R5
+	  11: INCEIPL       	$4
+
+	0x25482FDC:  81040000  lwz r8,0(r4)
+	  12: GETL       	R4, t6
+	  13: LDL       	(t6), t8
+	  14: PUTL       	t8, R8
+	  15: INCEIPL       	$4
+
+	0x25482FE0:  3920FFFF  li r9,-1
+	  16: MOVL       	$0xFFFFFFFF, t10
+	  17: PUTL       	t10, R9
+	  18: INCEIPL       	$4
+
+	0x25482FE4:  38E77F7F  addi r7,r7,32639
+	  19: MOVL       	$0x7F7F7F7F, t12
+	  20: PUTL       	t12, R7
+	  21: INCEIPL       	$4
+
+	0x25482FE8:  7D292C30  srw r9,r9,r5
+	  22: GETL       	R9, t16
+	  23: GETL       	R5, t14
+	  24: SHRL       	t14, t16
+	  25: PUTL       	t16, R9
+	  26: INCEIPL       	$4
+
+	0x25482FEC:  7CE04038  and r0,r7,r8
+	  27: GETL       	R7, t18
+	  28: GETL       	R8, t20
+	  29: ANDL       	t18, t20
+	  30: PUTL       	t20, R0
+	  31: INCEIPL       	$4
+
+	0x25482FF0:  7CEA4378  or r10,r7,r8
+	  32: GETL       	R7, t22
+	  33: GETL       	R8, t24
+	  34: ORL       	t24, t22
+	  35: PUTL       	t22, R10
+	  36: INCEIPL       	$4
+
+	0x25482FF4:  7C003A14  add r0,r0,r7
+	  37: GETL       	R0, t26
+	  38: GETL       	R7, t28
+	  39: ADDL       	t26, t28
+	  40: PUTL       	t28, R0
+	  41: INCEIPL       	$4
+
+	0x25482FF8:  7D4000F8  nor r0,r10,r0
+	  42: GETL       	R10, t30
+	  43: GETL       	R0, t32
+	  44: ORL       	t32, t30
+	  45: NOTL       	t30
+	  46: PUTL       	t30, R0
+	  47: INCEIPL       	$4
+
+	0x25482FFC:  7C084839  and. r8,r0,r9
+	  48: GETL       	R0, t34
+	  49: GETL       	R9, t36
+	  50: ANDL       	t34, t36
+	  51: PUTL       	t36, R8
+	  52: CMP0L       	t36, t38  (-rSo)
+	  53: ICRFL       	t38, $0x0, CR
+	  54: INCEIPL       	$4
+
+	0x25483000:  7C601120  mtcrf 0x1,r3
+	  55: GETL       	R3, t40
+	  56: ICRFL       	t40, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x25483004:  40820070  bc 4,2,0x25483074
+	  58: Jc02o       	$0x25483074
+
+
+
+. 181 25482FD0 56
+. 54 64 00 3A 3C E0 7F 7F 54 65 1E F8 81 04 00 00 39 20 FF FF 38 E7 7F 7F 7D 29 2C 30 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 40 00 F8 7C 08 48 39 7C 60 11 20 40 82 00 70
+==== BB 182 (0x25483074) approx BBs exec'd 0 ====
+
+	0x25483074:  7D0B0034  cntlzw r11,r8
+	   0: GETL       	R8, t0
+	   1: CNTLZL       	t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0x25483078:  7C032050  subf r0,r3,r4
+	   4: GETL       	R3, t2
+	   5: GETL       	R4, t4
+	   6: SUBL       	t2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2548307C:  556BE8FE  rlwinm r11,r11,29,3,31
+	   9: GETL       	R11, t6
+	  10: SHRL       	$0x3, t6
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0x25483080:  7C605A14  add r3,r0,r11
+	  13: GETL       	R0, t8
+	  14: GETL       	R11, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0x25483084:  4E800020  blr
+	  18: GETL       	LR, t12
+	  19: JMPo-r       	t12  ($4)
+
+
+
+. 182 25483074 20
+. 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+==== BB 183 (0x25479150) approx BBs exec'd 0 ====
+
+	0x25479150:  38800001  li r4,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25479154:  7C691B78  or r9,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x25479158:  3863024D  addi r3,r3,589
+	   6: GETL       	R3, t4
+	   7: ADDL       	$0x24D, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x2547915C:  3B690001  addi r27,r9,1
+	  10: GETL       	R9, t6
+	  11: ADDL       	$0x1, t6
+	  12: PUTL       	t6, R27
+	  13: INCEIPL       	$4
+
+	0x25479160:  4801E8CD  bl 0x25497A2C
+	  14: MOVL       	$0x25479164, t8
+	  15: PUTL       	t8, LR
+	  16: JMPo-c       	$0x25497A2C  ($4)
+
+
+
+. 183 25479150 20
+. 38 80 00 01 7C 69 1B 78 38 63 02 4D 3B 69 00 01 48 01 E8 CD
+==== BB 184 (0x25497A2C) approx BBs exec'd 0 ====
+
+	0x25497A2C:  4BFE848C  b 0x2547FEB8
+	   0: JMPo       	$0x2547FEB8  ($4)
+
+
+
+. 184 25497A2C 4
+. 4B FE 84 8C
+==== BB 185 calloc(0x2547FEB8) approx BBs exec'd 0 ====
+
+	0x2547FEB8:  7C6321D6  mullw r3,r3,r4
+	   0: GETL       	R3, t0
+	   1: GETL       	R4, t2
+	   2: MULL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547FEBC:  7CA802A6  mflr r5
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547FEC0:  9421FFF0  stwu r1,-16(r1)
+	   8: GETL       	R1, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xFFFFFFF0, t8
+	  11: PUTL       	t8, R1
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547FEC4:  90A10014  stw r5,20(r1)
+	  14: GETL       	R5, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x2547FEC8:  48017B5D  bl 0x25497A24
+	  19: MOVL       	$0x2547FECC, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 185 2547FEB8 20
+. 7C 63 21 D6 7C A8 02 A6 94 21 FF F0 90 A1 00 14 48 01 7B 5D
+==== BB 186 (0x25497A24) approx BBs exec'd 0 ====
+
+	0x25497A24:  4BFE846C  b 0x2547FE90
+	   0: JMPo       	$0x2547FE90  ($4)
+
+
+
+. 186 25497A24 4
+. 4B FE 84 6C
+==== BB 187 malloc(0x2547FE90) approx BBs exec'd 0 ====
+
+	0x2547FE90:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547FE94:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547FE98:  90810014  stw r4,20(r1)
+	   9: GETL       	R4, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547FE9C:  7C641B78  or r4,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0x2547FEA0:  38600008  li r3,8
+	  17: MOVL       	$0x8, t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x2547FEA4:  48017B79  bl 0x25497A1C
+	  20: MOVL       	$0x2547FEA8, t14
+	  21: PUTL       	t14, LR
+	  22: JMPo-c       	$0x25497A1C  ($4)
+
+
+
+. 187 2547FE90 24
+. 7C 88 02 A6 94 21 FF F0 90 81 00 14 7C 64 1B 78 38 60 00 08 48 01 7B 79
+==== BB 188 (0x25497A1C) approx BBs exec'd 0 ====
+
+	0x25497A1C:  4BFE832C  b 0x2547FD48
+	   0: JMPo       	$0x2547FD48  ($4)
+
+
+
+. 188 25497A1C 4
+. 4B FE 83 2C
+==== BB 189 __libc_memalign(0x2547FD48) approx BBs exec'd 0 ====
+
+	0x2547FD48:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547FD4C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547FD50:  480172B1  bl 0x25497000
+	   9: MOVL       	$0x2547FD54, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 189 2547FD48 12
+. 94 21 FF E0 7C 08 02 A6 48 01 72 B1
+==== BB 190 (0x2547FD54) approx BBs exec'd 0 ====
+
+	0x2547FD54:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547FD58:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547FD5C:  9361000C  stw r27,12(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547FD60:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547FD64:  7C6A1B78  or r10,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R10
+	  20: INCEIPL       	$4
+
+	0x2547FD68:  93810010  stw r28,16(r1)
+	  21: GETL       	R28, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547FD6C:  38A00003  li r5,3
+	  26: MOVL       	$0x3, t20
+	  27: PUTL       	t20, R5
+	  28: INCEIPL       	$4
+
+	0x2547FD70:  837E0418  lwz r27,1048(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x418, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R27
+	  33: INCEIPL       	$4
+
+	0x2547FD74:  38C00022  li r6,34
+	  34: MOVL       	$0x22, t26
+	  35: PUTL       	t26, R6
+	  36: INCEIPL       	$4
+
+	0x2547FD78:  93A10014  stw r29,20(r1)
+	  37: GETL       	R29, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x14, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x2547FD7C:  38E0FFFF  li r7,-1
+	  42: MOVL       	$0xFFFFFFFF, t32
+	  43: PUTL       	t32, R7
+	  44: INCEIPL       	$4
+
+	0x2547FD80:  817B0000  lwz r11,0(r27)
+	  45: GETL       	R27, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R11
+	  48: INCEIPL       	$4
+
+	0x2547FD84:  7FA300D0  neg r29,r3
+	  49: GETL       	R3, t38
+	  50: NEGL       	t38
+	  51: PUTL       	t38, R29
+	  52: INCEIPL       	$4
+
+	0x2547FD88:  93E1001C  stw r31,28(r1)
+	  53: GETL       	R31, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x1C, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x2547FD8C:  39000000  li r8,0
+	  58: MOVL       	$0x0, t44
+	  59: PUTL       	t44, R8
+	  60: INCEIPL       	$4
+
+	0x2547FD90:  2F8B0000  cmpi cr7,r11,0
+	  61: GETL       	R11, t46
+	  62: CMP0L       	t46, t48  (-rSo)
+	  63: ICRFL       	t48, $0x7, CR
+	  64: INCEIPL       	$4
+
+	0x2547FD94:  819E0500  lwz r12,1280(r30)
+	  65: GETL       	R30, t50
+	  66: ADDL       	$0x500, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R12
+	  69: INCEIPL       	$4
+
+	0x2547FD98:  38600000  li r3,0
+	  70: MOVL       	$0x0, t54
+	  71: PUTL       	t54, R3
+	  72: INCEIPL       	$4
+
+	0x2547FD9C:  7C9C2378  or r28,r4,r4
+	  73: GETL       	R4, t56
+	  74: PUTL       	t56, R28
+	  75: INCEIPL       	$4
+
+	0x2547FDA0:  83FE041C  lwz r31,1052(r30)
+	  76: GETL       	R30, t58
+	  77: ADDL       	$0x41C, t58
+	  78: LDL       	(t58), t60
+	  79: PUTL       	t60, R31
+	  80: INCEIPL       	$4
+
+	0x2547FDA4:  409E0024  bc 4,30,0x2547FDC8
+	  81: Jc30o       	$0x2547FDC8
+
+
+
+. 190 2547FD54 84
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 90 01 00 24 7C 6A 1B 78 93 81 00 10 38 A0 00 03 83 7E 04 18 38 C0 00 22 93 A1 00 14 38 E0 FF FF 81 7B 00 00 7F A3 00 D0 93 E1 00 1C 39 00 00 00 2F 8B 00 00 81 9E 05 00 38 60 00 00 7C 9C 23 78 83 FE 04 1C 40 9E 00 24
+==== BB 191 (0x2547FDA8) approx BBs exec'd 0 ====
+
+	0x2547FDA8:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547FDAC:  919F0000  stw r12,0(r31)
+	   5: GETL       	R12, t4
+	   6: GETL       	R31, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547FDB0:  81690004  lwz r11,4(r9)
+	   9: GETL       	R9, t8
+	  10: ADDL       	$0x4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x2547FDB4:  7C8B6214  add r4,r11,r12
+	  14: GETL       	R11, t12
+	  15: GETL       	R12, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R4
+	  18: INCEIPL       	$4
+
+	0x2547FDB8:  3924FFFF  addi r9,r4,-1
+	  19: GETL       	R4, t16
+	  20: ADDL       	$0xFFFFFFFF, t16
+	  21: PUTL       	t16, R9
+	  22: INCEIPL       	$4
+
+	0x2547FDBC:  7C8B00D0  neg r4,r11
+	  23: GETL       	R11, t18
+	  24: NEGL       	t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x2547FDC0:  7D2B2038  and r11,r9,r4
+	  27: GETL       	R9, t20
+	  28: GETL       	R4, t22
+	  29: ANDL       	t20, t22
+	  30: PUTL       	t22, R11
+	  31: INCEIPL       	$4
+
+	0x2547FDC4:  917B0000  stw r11,0(r27)
+	  32: GETL       	R11, t24
+	  33: GETL       	R27, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547FDC8:  813F0000  lwz r9,0(r31)
+	  36: GETL       	R31, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R9
+	  39: INCEIPL       	$4
+
+	0x2547FDCC:  7C895214  add r4,r9,r10
+	  40: GETL       	R9, t32
+	  41: GETL       	R10, t34
+	  42: ADDL       	t32, t34
+	  43: PUTL       	t34, R4
+	  44: INCEIPL       	$4
+
+	0x2547FDD0:  3804FFFF  addi r0,r4,-1
+	  45: GETL       	R4, t36
+	  46: ADDL       	$0xFFFFFFFF, t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0x2547FDD4:  7C0AE838  and r10,r0,r29
+	  49: GETL       	R0, t38
+	  50: GETL       	R29, t40
+	  51: ANDL       	t38, t40
+	  52: PUTL       	t40, R10
+	  53: INCEIPL       	$4
+
+	0x2547FDD8:  7D8AE214  add r12,r10,r28
+	  54: GETL       	R10, t42
+	  55: GETL       	R28, t44
+	  56: ADDL       	t42, t44
+	  57: PUTL       	t44, R12
+	  58: INCEIPL       	$4
+
+	0x2547FDDC:  915F0000  stw r10,0(r31)
+	  59: GETL       	R10, t46
+	  60: GETL       	R31, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x2547FDE0:  7C0C5840  cmpl cr0,r12,r11
+	  63: GETL       	R12, t50
+	  64: GETL       	R11, t52
+	  65: CMPUL       	t50, t52, t54  (-rSo)
+	  66: ICRFL       	t54, $0x0, CR
+	  67: INCEIPL       	$4
+
+	0x2547FDE4:  4080003C  bc 4,0,0x2547FE20
+	  68: Jc00o       	$0x2547FE20
+
+
+
+. 191 2547FDA8 64
+. 81 3E 04 F4 91 9F 00 00 81 69 00 04 7C 8B 62 14 39 24 FF FF 7C 8B 00 D0 7D 2B 20 38 91 7B 00 00 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+==== BB 192 (0x2547FDE8) approx BBs exec'd 0 ====
+
+	0x2547FDE8:  807F0000  lwz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x2547FDEC:  80C10024  lwz r6,36(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x2547FDF0:  7D03E214  add r8,r3,r28
+	   9: GETL       	R3, t8
+	  10: GETL       	R28, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R8
+	  13: INCEIPL       	$4
+
+	0x2547FDF4:  80FE0420  lwz r7,1056(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x420, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R7
+	  18: INCEIPL       	$4
+
+	0x2547FDF8:  911F0000  stw r8,0(r31)
+	  19: GETL       	R8, t16
+	  20: GETL       	R31, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0x2547FDFC:  7CC803A6  mtlr r6
+	  23: GETL       	R6, t20
+	  24: PUTL       	t20, LR
+	  25: INCEIPL       	$4
+
+	0x2547FE00:  8361000C  lwz r27,12(r1)
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0xC, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R27
+	  30: INCEIPL       	$4
+
+	0x2547FE04:  83810010  lwz r28,16(r1)
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x10, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R28
+	  35: INCEIPL       	$4
+
+	0x2547FE08:  83A10014  lwz r29,20(r1)
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x14, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R29
+	  40: INCEIPL       	$4
+
+	0x2547FE0C:  83C10018  lwz r30,24(r1)
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x18, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R30
+	  45: INCEIPL       	$4
+
+	0x2547FE10:  83E1001C  lwz r31,28(r1)
+	  46: GETL       	R1, t38
+	  47: ADDL       	$0x1C, t38
+	  48: LDL       	(t38), t40
+	  49: PUTL       	t40, R31
+	  50: INCEIPL       	$4
+
+	0x2547FE14:  38210020  addi r1,r1,32
+	  51: GETL       	R1, t42
+	  52: ADDL       	$0x20, t42
+	  53: PUTL       	t42, R1
+	  54: INCEIPL       	$4
+
+	0x2547FE18:  90670000  stw r3,0(r7)
+	  55: GETL       	R3, t44
+	  56: GETL       	R7, t46
+	  57: STL       	t44, (t46)
+	  58: INCEIPL       	$4
+
+	0x2547FE1C:  4E800020  blr
+	  59: GETL       	LR, t48
+	  60: JMPo-r       	t48  ($4)
+
+
+
+. 192 2547FDE8 56
+. 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 1F 00 00 7C C8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+==== BB 193 (0x2547FEA8) approx BBs exec'd 0 ====
+
+	0x2547FEA8:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547FEAC:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x2547FEB0:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x2547FEB4:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 193 2547FEA8 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 194 (0x2547FECC) approx BBs exec'd 0 ====
+
+	0x2547FECC:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547FED0:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x2547FED4:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x2547FED8:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 194 2547FECC 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 195 (0x25479164) approx BBs exec'd 0 ====
+
+	0x25479164:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25479168:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547916C:  41820174  bc 12,2,0x254792E0
+	   8: Js02o       	$0x254792E0
+
+
+
+. 195 25479164 12
+. 7C 7F 1B 79 38 60 00 00 41 82 01 74
+==== BB 196 (0x25479170) approx BBs exec'd 0 ====
+
+	0x25479170:  3BBF0240  addi r29,r31,576
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x240, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x25479174:  7F44D378  or r4,r26,r26
+	   4: GETL       	R26, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x25479178:  7F65DB78  or r5,r27,r27
+	   7: GETL       	R27, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x2547917C:  93FF0014  stw r31,20(r31)
+	  10: GETL       	R31, t6
+	  11: GETL       	R31, t8
+	  12: ADDL       	$0x14, t8
+	  13: STL       	t6, (t8)
+	  14: INCEIPL       	$4
+
+	0x25479180:  93BF001C  stw r29,28(r31)
+	  15: GETL       	R29, t10
+	  16: GETL       	R31, t12
+	  17: ADDL       	$0x1C, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25479184:  387F024C  addi r3,r31,588
+	  20: GETL       	R31, t14
+	  21: ADDL       	$0x24C, t14
+	  22: PUTL       	t14, R3
+	  23: INCEIPL       	$4
+
+	0x25479188:  4800AA59  bl 0x25483BE0
+	  24: MOVL       	$0x2547918C, t16
+	  25: PUTL       	t16, LR
+	  26: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 196 25479170 28
+. 3B BF 02 40 7F 44 D3 78 7F 65 DB 78 93 FF 00 14 93 BF 00 1C 38 7F 02 4C 48 00 AA 59
+==== BB 197 (0x25483C68) approx BBs exec'd 0 ====
+
+	0x25483C68:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25483C6C:  4186001C  bc 12,6,0x25483C88
+	   4: Js06o       	$0x25483C88
+
+
+
+. 197 25483C68 8
+. 2C 85 00 00 41 86 00 1C
+==== BB 198 (0x25483C70) approx BBs exec'd 0 ====
+
+	0x25483C70:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25483C74:  88FD0000  lbz r7,0(r29)
+	   3: GETL       	R29, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0x25483C78:  3BBD0001  addi r29,r29,1
+	   7: GETL       	R29, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R29
+	  10: INCEIPL       	$4
+
+	0x25483C7C:  98FF0000  stb r7,0(r31)
+	  11: GETL       	R7, t8
+	  12: GETL       	R31, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25483C80:  3BFF0001  addi r31,r31,1
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0x25483C84:  4200FFF0  bc 16,0,0x25483C74
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0x25483C88
+	  23: JMPo       	$0x25483C74  ($4)
+
+
+
+. 198 25483C70 24
+. 7C A9 03 A6 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+==== BB 199 (0x2547918C) approx BBs exec'd 0 ====
+
+	0x2547918C:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25479190:  90BD0008  stw r5,8(r29)
+	   3: GETL       	R5, t2
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25479194:  1D590018  mulli r10,r25,24
+	   8: GETL       	R25, t6
+	   9: MULL       	$0x18, t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0x25479198:  80DF0180  lwz r6,384(r31)
+	  12: GETL       	R31, t8
+	  13: ADDL       	$0x180, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R6
+	  16: INCEIPL       	$4
+
+	0x2547919C:  3880FFFF  li r4,-1
+	  17: MOVL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0x254791A0:  80BE04C8  lwz r5,1224(r30)
+	  20: GETL       	R30, t14
+	  21: ADDL       	$0x4C8, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R5
+	  24: INCEIPL       	$4
+
+	0x254791A4:  5306F002  rlwimi r6,r24,30,0,1
+	  25: GETL       	R6, t18
+	  26: GETL       	R24, t20
+	  27: ROLL       	$0x1E, t20
+	  28: ANDL       	$0xC0000000, t20
+	  29: ANDL       	$0x3FFFFFFF, t18
+	  30: ORL       	t18, t20
+	  31: PUTL       	t20, R6
+	  32: INCEIPL       	$4
+
+	0x254791A8:  939F0168  stw r28,360(r31)
+	  33: GETL       	R28, t22
+	  34: GETL       	R31, t24
+	  35: ADDL       	$0x168, t24
+	  36: STL       	t22, (t24)
+	  37: INCEIPL       	$4
+
+	0x254791AC:  90DF0180  stw r6,384(r31)
+	  38: GETL       	R6, t26
+	  39: GETL       	R31, t28
+	  40: ADDL       	$0x180, t28
+	  41: STL       	t26, (t28)
+	  42: INCEIPL       	$4
+
+	0x254791B0:  38DF01AC  addi r6,r31,428
+	  43: GETL       	R31, t30
+	  44: ADDL       	$0x1AC, t30
+	  45: PUTL       	t30, R6
+	  46: INCEIPL       	$4
+
+	0x254791B4:  7D6A282E  lwzx r11,r10,r5
+	  47: GETL       	R5, t32
+	  48: GETL       	R10, t34
+	  49: ADDL       	t34, t32
+	  50: LDL       	(t32), t36
+	  51: PUTL       	t36, R11
+	  52: INCEIPL       	$4
+
+	0x254791B8:  907F0240  stw r3,576(r31)
+	  53: GETL       	R3, t38
+	  54: GETL       	R31, t40
+	  55: ADDL       	$0x240, t40
+	  56: STL       	t38, (t40)
+	  57: INCEIPL       	$4
+
+	0x254791BC:  38600004  li r3,4
+	  58: MOVL       	$0x4, t42
+	  59: PUTL       	t42, R3
+	  60: INCEIPL       	$4
+
+	0x254791C0:  2F8B0000  cmpi cr7,r11,0
+	  61: GETL       	R11, t44
+	  62: CMP0L       	t44, t46  (-rSo)
+	  63: ICRFL       	t46, $0x7, CR
+	  64: INCEIPL       	$4
+
+	0x254791C4:  909F022C  stw r4,556(r31)
+	  65: GETL       	R4, t48
+	  66: GETL       	R31, t50
+	  67: ADDL       	$0x22C, t50
+	  68: STL       	t48, (t50)
+	  69: INCEIPL       	$4
+
+	0x254791C8:  907F01BC  stw r3,444(r31)
+	  70: GETL       	R3, t52
+	  71: GETL       	R31, t54
+	  72: ADDL       	$0x1BC, t54
+	  73: STL       	t52, (t54)
+	  74: INCEIPL       	$4
+
+	0x254791CC:  38800000  li r4,0
+	  75: MOVL       	$0x0, t56
+	  76: PUTL       	t56, R4
+	  77: INCEIPL       	$4
+
+	0x254791D0:  92FF0004  stw r23,4(r31)
+	  78: GETL       	R23, t58
+	  79: GETL       	R31, t60
+	  80: ADDL       	$0x4, t60
+	  81: STL       	t58, (t60)
+	  82: INCEIPL       	$4
+
+	0x254791D4:  933F0018  stw r25,24(r31)
+	  83: GETL       	R25, t62
+	  84: GETL       	R31, t64
+	  85: ADDL       	$0x18, t64
+	  86: STL       	t62, (t64)
+	  87: INCEIPL       	$4
+
+	0x254791D8:  90DF01C0  stw r6,448(r31)
+	  88: GETL       	R6, t66
+	  89: GETL       	R31, t68
+	  90: ADDL       	$0x1C0, t68
+	  91: STL       	t66, (t68)
+	  92: INCEIPL       	$4
+
+	0x254791DC:  409E013C  bc 4,30,0x25479318
+	  93: Jc30o       	$0x25479318
+
+
+
+. 199 2547918C 84
+. 38 A0 00 01 90 BD 00 08 1D 59 00 18 80 DF 01 80 38 80 FF FF 80 BE 04 C8 53 06 F0 02 93 9F 01 68 90 DF 01 80 38 DF 01 AC 7D 6A 28 2E 90 7F 02 40 38 60 00 04 2F 8B 00 00 90 9F 02 2C 90 7F 01 BC 38 80 00 00 92 FF 00 04 93 3F 00 18 90 DF 01 C0 40 9E 01 3C
+==== BB 200 (0x254791E0) approx BBs exec'd 0 ====
+
+	0x254791E0:  7FEA292E  stwx r31,r10,r5
+	   0: GETL       	R5, t0
+	   1: GETL       	R10, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R31, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x254791E4:  2F1C0000  cmpi cr6,r28,0
+	   6: GETL       	R28, t6
+	   7: CMP0L       	t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254791E8:  7D4A2A14  add r10,r10,r5
+	  10: GETL       	R10, t10
+	  11: GETL       	R5, t12
+	  12: ADDL       	t10, t12
+	  13: PUTL       	t12, R10
+	  14: INCEIPL       	$4
+
+	0x254791EC:  81650198  lwz r11,408(r5)
+	  15: GETL       	R5, t14
+	  16: ADDL       	$0x198, t14
+	  17: LDL       	(t14), t16
+	  18: PUTL       	t16, R11
+	  19: INCEIPL       	$4
+
+	0x254791F0:  832A0004  lwz r25,4(r10)
+	  20: GETL       	R10, t18
+	  21: ADDL       	$0x4, t18
+	  22: LDL       	(t18), t20
+	  23: PUTL       	t20, R25
+	  24: INCEIPL       	$4
+
+	0x254791F4:  8185019C  lwz r12,412(r5)
+	  25: GETL       	R5, t22
+	  26: ADDL       	$0x19C, t22
+	  27: LDL       	(t22), t24
+	  28: PUTL       	t24, R12
+	  29: INCEIPL       	$4
+
+	0x254791F8:  3B190001  addi r24,r25,1
+	  30: GETL       	R25, t26
+	  31: ADDL       	$0x1, t26
+	  32: PUTL       	t26, R24
+	  33: INCEIPL       	$4
+
+	0x254791FC:  310C0001  addic r8,r12,1
+	  34: GETL       	R12, t28
+	  35: ADCL       	$0x1, t28  (-wCa)
+	  36: PUTL       	t28, R8
+	  37: INCEIPL       	$4
+
+	0x25479200:  7CEB0194  addze r7,r11
+	  38: GETL       	R11, t30
+	  39: ADCL       	$0x0, t30  (-rCa-wCa)
+	  40: PUTL       	t30, R7
+	  41: INCEIPL       	$4
+
+	0x25479204:  930A0004  stw r24,4(r10)
+	  42: GETL       	R24, t32
+	  43: GETL       	R10, t34
+	  44: ADDL       	$0x4, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0x25479208:  90E50198  stw r7,408(r5)
+	  47: GETL       	R7, t36
+	  48: GETL       	R5, t38
+	  49: ADDL       	$0x198, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547920C:  9105019C  stw r8,412(r5)
+	  52: GETL       	R8, t40
+	  53: GETL       	R5, t42
+	  54: ADDL       	$0x19C, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x25479210:  40BA000C  bc 5,26,0x2547921C
+	  57: Jc26o       	$0x2547921C
+
+
+
+. 200 254791E0 52
+. 7F EA 29 2E 2F 1C 00 00 7D 4A 2A 14 81 65 01 98 83 2A 00 04 81 85 01 9C 3B 19 00 01 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 BA 00 0C
+==== BB 201 (0x25479214) approx BBs exec'd 0 ====
+
+	0x25479214:  48000164  b 0x25479378
+	   0: JMPo       	$0x25479378  ($4)
+
+
+
+. 201 25479214 4
+. 48 00 01 64
+==== BB 202 (0x25479378) approx BBs exec'd 0 ====
+
+	0x25479378:  7FFCFB78  or r28,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x2547937C:  4BFFFEAC  b 0x25479228
+	   3: JMPo       	$0x25479228  ($4)
+
+
+
+. 202 25479378 8
+. 7F FC FB 78 4B FF FE AC
+==== BB 203 (0x25479228) approx BBs exec'd 0 ====
+
+	0x25479228:  2F840000  cmpi cr7,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547922C:  393C0158  addi r9,r28,344
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0x158, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25479230:  419E0010  bc 12,30,0x25479240
+	   8: Js30o       	$0x25479240
+
+
+
+. 203 25479228 12
+. 2F 84 00 00 39 3C 01 58 41 9E 00 10
+==== BB 204 (0x25479240) approx BBs exec'd 0 ====
+
+	0x25479240:  56CCEFFE  rlwinm r12,r22,29,31,31
+	   0: GETL       	R22, t0
+	   1: ROLL       	$0x1D, t0
+	   2: ANDL       	$0x1, t0
+	   3: PUTL       	t0, R12
+	   4: INCEIPL       	$4
+
+	0x25479244:  7D8B2039  and. r11,r12,r4
+	   5: GETL       	R12, t2
+	   6: GETL       	R4, t4
+	   7: ANDL       	t2, t4
+	   8: PUTL       	t4, R11
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25479248:  41820010  bc 12,2,0x25479258
+	  12: Js02o       	$0x25479258
+
+
+
+. 204 25479240 12
+. 56 CC EF FE 7D 8B 20 39 41 82 00 10
+==== BB 205 (0x25479258) approx BBs exec'd 0 ====
+
+	0x25479258:  549A103A  rlwinm r26,r4,2,0,29
+	   0: GETL       	R4, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547925C:  7D3A312E  stwx r9,r26,r6
+	   4: GETL       	R6, t2
+	   5: GETL       	R26, t4
+	   6: ADDL       	t4, t2
+	   7: GETL       	R9, t6
+	   8: STL       	t6, (t2)
+	   9: INCEIPL       	$4
+
+	0x25479260:  3B7F0158  addi r27,r31,344
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x158, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x25479264:  937F01C4  stw r27,452(r31)
+	  14: GETL       	R27, t10
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x1C4, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25479268:  8BB70000  lbz r29,0(r23)
+	  19: GETL       	R23, t14
+	  20: LDB       	(t14), t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0x2547926C:  2F1D0000  cmpi cr6,r29,0
+	  23: GETL       	R29, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x6, CR
+	  26: INCEIPL       	$4
+
+	0x25479270:  419A006C  bc 12,26,0x254792DC
+	  27: Js26o       	$0x254792DC
+
+
+
+. 205 25479258 28
+. 54 9A 10 3A 7D 3A 31 2E 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+==== BB 206 (0x254792DC) approx BBs exec'd 0 ====
+
+	0x254792DC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254792E0:  83810034  lwz r28,52(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x34, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x254792E4:  82C10008  lwz r22,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0x254792E8:  7F8803A6  mtlr r28
+	  13: GETL       	R28, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x254792EC:  82E1000C  lwz r23,12(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0xC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R23
+	  20: INCEIPL       	$4
+
+	0x254792F0:  83010010  lwz r24,16(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R24
+	  25: INCEIPL       	$4
+
+	0x254792F4:  83210014  lwz r25,20(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R25
+	  30: INCEIPL       	$4
+
+	0x254792F8:  83410018  lwz r26,24(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x18, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0x254792FC:  8361001C  lwz r27,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R27
+	  40: INCEIPL       	$4
+
+	0x25479300:  83810020  lwz r28,32(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R28
+	  45: INCEIPL       	$4
+
+	0x25479304:  83A10024  lwz r29,36(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x24, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R29
+	  50: INCEIPL       	$4
+
+	0x25479308:  83C10028  lwz r30,40(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x28, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R30
+	  55: INCEIPL       	$4
+
+	0x2547930C:  83E1002C  lwz r31,44(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x2C, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R31
+	  60: INCEIPL       	$4
+
+	0x25479310:  38210030  addi r1,r1,48
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x30, t48
+	  63: PUTL       	t48, R1
+	  64: INCEIPL       	$4
+
+	0x25479314:  4E800020  blr
+	  65: GETL       	LR, t50
+	  66: JMPo-r       	t50  ($4)
+
+
+
+. 206 254792DC 60
+. 7F E3 FB 78 83 81 00 34 82 C1 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 207 (0x254723A0) approx BBs exec'd 0 ====
+
+	0x254723A0:  828E0000  lwz r20,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R20
+	   3: INCEIPL       	$4
+
+	0x254723A4:  2D940000  cmpi cr3,r20,0
+	   4: GETL       	R20, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x3, CR
+	   7: INCEIPL       	$4
+
+	0x254723A8:  418E0C90  bc 12,14,0x25473038
+	   8: Js14o       	$0x25473038
+
+
+
+. 207 254723A0 12
+. 82 8E 00 00 2D 94 00 00 41 8E 0C 90
+==== BB 208 (0x254723AC) approx BBs exec'd 0 ====
+
+	0x254723AC:  80D60000  lwz r6,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x254723B0:  9314014C  stw r24,332(r20)
+	   4: GETL       	R24, t4
+	   5: GETL       	R20, t6
+	   6: ADDL       	$0x14C, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x254723B4:  90D40150  stw r6,336(r20)
+	   9: GETL       	R6, t8
+	  10: GETL       	R20, t10
+	  11: ADDL       	$0x150, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x254723B8:  B2F40154  sth r23,340(r20)
+	  14: GETL       	R23, t12
+	  15: GETL       	R20, t14
+	  16: ADDL       	$0x154, t14
+	  17: STW       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x254723BC:  56EC2834  rlwinm r12,r23,5,0,26
+	  19: GETL       	R23, t16
+	  20: SHLL       	$0x5, t16
+	  21: PUTL       	t16, R12
+	  22: INCEIPL       	$4
+
+	0x254723C0:  81140178  lwz r8,376(r20)
+	  23: GETL       	R20, t18
+	  24: ADDL       	$0x178, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R8
+	  27: INCEIPL       	$4
+
+	0x254723C4:  7C8CC214  add r4,r12,r24
+	  28: GETL       	R12, t22
+	  29: GETL       	R24, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R4
+	  32: INCEIPL       	$4
+
+	0x254723C8:  80F4017C  lwz r7,380(r20)
+	  33: GETL       	R20, t26
+	  34: ADDL       	$0x17C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R7
+	  37: INCEIPL       	$4
+
+	0x254723CC:  7E04C040  cmpl cr4,r4,r24
+	  38: GETL       	R4, t30
+	  39: GETL       	R24, t32
+	  40: CMPUL       	t30, t32, t34  (-rSo)
+	  41: ICRFL       	t34, $0x4, CR
+	  42: INCEIPL       	$4
+
+	0x254723D0:  39280001  addi r9,r8,1
+	  43: GETL       	R8, t36
+	  44: ADDL       	$0x1, t36
+	  45: PUTL       	t36, R9
+	  46: INCEIPL       	$4
+
+	0x254723D4:  3B270001  addi r25,r7,1
+	  47: GETL       	R7, t38
+	  48: ADDL       	$0x1, t38
+	  49: PUTL       	t38, R25
+	  50: INCEIPL       	$4
+
+	0x254723D8:  39000000  li r8,0
+	  51: MOVL       	$0x0, t40
+	  52: PUTL       	t40, R8
+	  53: INCEIPL       	$4
+
+	0x254723DC:  38E00000  li r7,0
+	  54: MOVL       	$0x0, t42
+	  55: PUTL       	t42, R7
+	  56: INCEIPL       	$4
+
+	0x254723E0:  3B80FFFF  li r28,-1
+	  57: MOVL       	$0xFFFFFFFF, t44
+	  58: PUTL       	t44, R28
+	  59: INCEIPL       	$4
+
+	0x254723E4:  7F0AC378  or r10,r24,r24
+	  60: GETL       	R24, t46
+	  61: PUTL       	t46, R10
+	  62: INCEIPL       	$4
+
+	0x254723E8:  939401A0  stw r28,416(r20)
+	  63: GETL       	R28, t48
+	  64: GETL       	R20, t50
+	  65: ADDL       	$0x1A0, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x254723EC:  91340178  stw r9,376(r20)
+	  68: GETL       	R9, t52
+	  69: GETL       	R20, t54
+	  70: ADDL       	$0x178, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x254723F0:  9334017C  stw r25,380(r20)
+	  73: GETL       	R25, t56
+	  74: GETL       	R20, t58
+	  75: ADDL       	$0x17C, t58
+	  76: STL       	t56, (t58)
+	  77: INCEIPL       	$4
+
+	0x254723F4:  911401A4  stw r8,420(r20)
+	  78: GETL       	R8, t60
+	  79: GETL       	R20, t62
+	  80: ADDL       	$0x1A4, t62
+	  81: STL       	t60, (t62)
+	  82: INCEIPL       	$4
+
+	0x254723F8:  90F401A8  stw r7,424(r20)
+	  83: GETL       	R7, t64
+	  84: GETL       	R20, t66
+	  85: ADDL       	$0x1A8, t66
+	  86: STL       	t64, (t66)
+	  87: INCEIPL       	$4
+
+	0x254723FC:  409100C8  bc 4,17,0x254724C4
+	  88: Jc17o       	$0x254724C4
+
+
+
+. 208 254723AC 84
+. 80 D6 00 00 93 14 01 4C 90 D4 01 50 B2 F4 01 54 56 EC 28 34 81 14 01 78 7C 8C C2 14 80 F4 01 7C 7E 04 C0 40 39 28 00 01 3B 27 00 01 39 00 00 00 38 E0 00 00 3B 80 FF FF 7F 0A C3 78 93 94 01 A0 91 34 01 78 93 34 01 7C 91 14 01 A4 90 F4 01 A8 40 91 00 C8
+==== BB 209 (0x25472400) approx BBs exec'd 0 ====
+
+	0x25472400:  3F606474  lis r27,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0x25472404:  83BE0044  lwz r29,68(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x44, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25472408:  80DE0048  lwz r6,72(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x48, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0x2547240C:  6363E552  ori r3,r27,0xE552
+	  13: MOVL       	$0x6474E552, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x25472410:  38A00001  li r5,1
+	  16: MOVL       	$0x1, t12
+	  17: PUTL       	t12, R5
+	  18: INCEIPL       	$4
+
+	0x25472414:  48000028  b 0x2547243C
+	  19: JMPo       	$0x2547243C  ($4)
+
+
+
+. 209 25472400 24
+. 3F 60 64 74 83 BE 00 44 80 DE 00 48 63 63 E5 52 38 A0 00 01 48 00 00 28
+==== BB 210 (0x2547243C) approx BBs exec'd 0 ====
+
+	0x2547243C:  800A0000  lwz r0,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25472440:  2F000006  cmpi cr6,r0,6
+	   4: GETL       	R0, t4
+	   5: MOVL       	$0x6, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25472444:  419A0798  bc 12,26,0x25472BDC
+	   9: Js26o       	$0x25472BDC
+
+
+
+. 210 2547243C 12
+. 80 0A 00 00 2F 00 00 06 41 9A 07 98
+==== BB 211 (0x25472BDC) approx BBs exec'd 0 ====
+
+	0x25472BDC:  816A0008  lwz r11,8(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25472BE0:  7F4BC050  subf r26,r11,r24
+	   5: GETL       	R11, t4
+	   6: GETL       	R24, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25472BE4:  93540000  stw r26,0(r20)
+	  10: GETL       	R26, t8
+	  11: GETL       	R20, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25472BE8:  4BFFF848  b 0x25472430
+	  14: JMPo       	$0x25472430  ($4)
+
+
+
+. 211 25472BDC 16
+. 81 6A 00 08 7F 4B C0 50 93 54 00 00 4B FF F8 48
+==== BB 212 (0x25472430) approx BBs exec'd 0 ====
+
+	0x25472430:  394A0020  addi r10,r10,32
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x20, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0x25472434:  7F845040  cmpl cr7,r4,r10
+	   4: GETL       	R4, t2
+	   5: GETL       	R10, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25472438:  409D0084  bc 4,29,0x254724BC
+	   9: Jc29o       	$0x254724BC
+
+
+
+. 212 25472430 12
+. 39 4A 00 20 7F 84 50 40 40 9D 00 84
+==== BB 213 (0x25472448) approx BBs exec'd 0 ====
+
+	0x25472448:  2B800006  cmpli cr7,r0,6
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2547244C:  40BDFFCC  bc 5,29,0x25472418
+	   5: Jc29o       	$0x25472418
+
+
+
+. 213 25472448 8
+. 2B 80 00 06 40 BD FF CC
+==== BB 214 (0x25472418) approx BBs exec'd 0 ====
+
+	0x25472418:  2C000002  cmpi cr0,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547241C:  41820854  bc 12,2,0x25472C70
+	   5: Js02o       	$0x25472C70
+
+
+
+. 214 25472418 8
+. 2C 00 00 02 41 82 08 54
+==== BB 215 (0x25472420) approx BBs exec'd 0 ====
+
+	0x25472420:  28800002  cmpli cr1,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25472424:  418507E4  bc 12,5,0x25472C08
+	   5: Js05o       	$0x25472C08
+
+
+
+. 215 25472420 8
+. 28 80 00 02 41 85 07 E4
+==== BB 216 (0x25472C08) approx BBs exec'd 0 ====
+
+	0x25472C08:  2F000003  cmpi cr6,r0,3
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x3, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25472C0C:  409AF824  bc 4,26,0x25472430
+	   5: Jc26o       	$0x25472430
+
+
+
+. 216 25472C08 8
+. 2F 00 00 03 40 9A F8 24
+==== BB 217 (0x25472C10) approx BBs exec'd 0 ====
+
+	0x25472C10:  82AE01C0  lwz r21,448(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25472C14:  7FA7EB78  or r7,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x25472C18:  81740000  lwz r11,0(r20)
+	   8: GETL       	R20, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25472C1C:  2F150000  cmpi cr6,r21,0
+	  12: GETL       	R21, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x6, CR
+	  15: INCEIPL       	$4
+
+	0x25472C20:  812A0008  lwz r9,8(r10)
+	  16: GETL       	R10, t14
+	  17: ADDL       	$0x8, t14
+	  18: LDL       	(t14), t16
+	  19: PUTL       	t16, R9
+	  20: INCEIPL       	$4
+
+	0x25472C24:  93AE01D4  stw r29,468(r14)
+	  21: GETL       	R29, t18
+	  22: GETL       	R14, t20
+	  23: ADDL       	$0x1D4, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0x25472C28:  7C0B4A14  add r0,r11,r9
+	  26: GETL       	R11, t22
+	  27: GETL       	R9, t24
+	  28: ADDL       	t22, t24
+	  29: PUTL       	t24, R0
+	  30: INCEIPL       	$4
+
+	0x25472C2C:  901D0000  stw r0,0(r29)
+	  31: GETL       	R0, t26
+	  32: GETL       	R29, t28
+	  33: STL       	t26, (t28)
+	  34: INCEIPL       	$4
+
+	0x25472C30:  409A0444  bc 4,26,0x25473074
+	  35: Jc26o       	$0x25473074
+
+
+
+. 217 25472C10 36
+. 82 AE 01 C0 7F A7 EB 78 81 74 00 00 2F 15 00 00 81 2A 00 08 93 AE 01 D4 7C 0B 4A 14 90 1D 00 00 40 9A 04 44
+==== BB 218 (0x25473074) approx BBs exec'd 0 ====
+
+	0x25473074:  3AA00001  li r21,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R21
+	   2: INCEIPL       	$4
+
+	0x25473078:  4BFFF3B8  b 0x25472430
+	   3: JMPo       	$0x25472430  ($4)
+
+
+
+. 218 25473074 8
+. 3A A0 00 01 4B FF F3 B8
+==== BB 219 (0x25472428) approx BBs exec'd 0 ====
+
+	0x25472428:  2E000001  cmpi cr4,r0,1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547242C:  41920B80  bc 12,18,0x25472FAC
+	   5: Js18o       	$0x25472FAC
+
+
+
+. 219 25472428 8
+. 2E 00 00 01 41 92 0B 80
+==== BB 220 (0x25472FAC) approx BBs exec'd 0 ====
+
+	0x25472FAC:  80EA001C  lwz r7,28(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25472FB0:  810A0008  lwz r8,8(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25472FB4:  7D8700D0  neg r12,r7
+	  10: GETL       	R7, t8
+	  11: NEGL       	t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x25472FB8:  80F40000  lwz r7,0(r20)
+	  14: GETL       	R20, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0x25472FBC:  7D1C6038  and r28,r8,r12
+	  18: GETL       	R8, t14
+	  19: GETL       	R12, t16
+	  20: ANDL       	t14, t16
+	  21: PUTL       	t16, R28
+	  22: INCEIPL       	$4
+
+	0x25472FC0:  813401A0  lwz r9,416(r20)
+	  23: GETL       	R20, t18
+	  24: ADDL       	$0x1A0, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R9
+	  27: INCEIPL       	$4
+
+	0x25472FC4:  7C07E214  add r0,r7,r28
+	  28: GETL       	R7, t22
+	  29: GETL       	R28, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R0
+	  32: INCEIPL       	$4
+
+	0x25472FC8:  7F090040  cmpl cr6,r9,r0
+	  33: GETL       	R9, t26
+	  34: GETL       	R0, t28
+	  35: CMPUL       	t26, t28, t30  (-rSo)
+	  36: ICRFL       	t30, $0x6, CR
+	  37: INCEIPL       	$4
+
+	0x25472FCC:  419900B0  bc 12,25,0x2547307C
+	  38: Js25o       	$0x2547307C
+
+
+
+. 220 25472FAC 36
+. 80 EA 00 1C 81 0A 00 08 7D 87 00 D0 80 F4 00 00 7D 1C 60 38 81 34 01 A0 7C 07 E2 14 7F 09 00 40 41 99 00 B0
+==== BB 221 (0x2547307C) approx BBs exec'd 0 ====
+
+	0x2547307C:  901401A0  stw r0,416(r20)
+	   0: GETL       	R0, t0
+	   1: GETL       	R20, t2
+	   2: ADDL       	$0x1A0, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25473080:  810A0008  lwz r8,8(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25473084:  4BFFFF4C  b 0x25472FD0
+	  10: JMPo       	$0x25472FD0  ($4)
+
+
+
+. 221 2547307C 12
+. 90 14 01 A0 81 0A 00 08 4B FF FF 4C
+==== BB 222 (0x25472FD0) approx BBs exec'd 0 ====
+
+	0x25472FD0:  836A0014  lwz r27,20(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25472FD4:  7EE74214  add r23,r7,r8
+	   5: GETL       	R7, t4
+	   6: GETL       	R8, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0x25472FD8:  811401A4  lwz r8,420(r20)
+	  10: GETL       	R20, t8
+	  11: ADDL       	$0x1A4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0x25472FDC:  7D77DA14  add r11,r23,r27
+	  15: GETL       	R23, t12
+	  16: GETL       	R27, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R11
+	  19: INCEIPL       	$4
+
+	0x25472FE0:  7F885840  cmpl cr7,r8,r11
+	  20: GETL       	R8, t16
+	  21: GETL       	R11, t18
+	  22: CMPUL       	t16, t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25472FE4:  409C0008  bc 4,28,0x25472FEC
+	  25: Jc28o       	$0x25472FEC
+
+
+
+. 222 25472FD0 24
+. 83 6A 00 14 7E E7 42 14 81 14 01 A4 7D 77 DA 14 7F 88 58 40 40 9C 00 08
+==== BB 223 (0x25472FE8) approx BBs exec'd 0 ====
+
+	0x25472FE8:  917401A4  stw r11,420(r20)
+	   0: GETL       	R11, t0
+	   1: GETL       	R20, t2
+	   2: ADDL       	$0x1A4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472FEC:  834A0018  lwz r26,24(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25472FF0:  73490001  andi. r9,r26,0x1
+	  10: GETL       	R26, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R9
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25472FF4:  41A2F43C  bc 13,2,0x25472430
+	  16: Js02o       	$0x25472430
+
+
+
+. 223 25472FE8 16
+. 91 74 01 A4 83 4A 00 18 73 49 00 01 41 A2 F4 3C
+==== BB 224 (0x25472FF8) approx BBs exec'd 0 ====
+
+	0x25472FF8:  801401A8  lwz r0,424(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x1A8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25472FFC:  7C805840  cmpl cr1,r0,r11
+	   5: GETL       	R0, t4
+	   6: GETL       	R11, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25473000:  40A4F430  bc 5,4,0x25472430
+	  10: Jc04o       	$0x25472430
+
+
+
+. 224 25472FF8 12
+. 80 14 01 A8 7C 80 58 40 40 A4 F4 30
+==== BB 225 (0x25473004) approx BBs exec'd 0 ====
+
+	0x25473004:  917401A8  stw r11,424(r20)
+	   0: GETL       	R11, t0
+	   1: GETL       	R20, t2
+	   2: ADDL       	$0x1A8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25473008:  4BFFF428  b 0x25472430
+	   5: JMPo       	$0x25472430  ($4)
+
+
+
+. 225 25473004 8
+. 91 74 01 A8 4B FF F4 28
+==== BB 226 (0x25472C70) approx BBs exec'd 0 ====
+
+	0x25472C70:  82D40000  lwz r22,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R22
+	   3: INCEIPL       	$4
+
+	0x25472C74:  832A0008  lwz r25,8(r10)
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x8, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x25472C78:  7C16CA14  add r0,r22,r25
+	   9: GETL       	R22, t8
+	  10: GETL       	R25, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0x25472C7C:  90140008  stw r0,8(r20)
+	  14: GETL       	R0, t12
+	  15: GETL       	R20, t14
+	  16: ADDL       	$0x8, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25472C80:  4BFFF7B0  b 0x25472430
+	  19: JMPo       	$0x25472430  ($4)
+
+
+
+. 226 25472C70 20
+. 82 D4 00 00 83 2A 00 08 7C 16 CA 14 90 14 00 08 4B FF F7 B0
+==== BB 227 (0x25472450) approx BBs exec'd 0 ====
+
+	0x25472450:  3EE06474  lis r23,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R23
+	   2: INCEIPL       	$4
+
+	0x25472454:  62E9E551  ori r9,r23,0xE551
+	   3: MOVL       	$0x6474E551, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x25472458:  7F804800  cmp cr7,r0,r9
+	   6: GETL       	R0, t4
+	   7: GETL       	R9, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x2547245C:  419E0828  bc 12,30,0x25472C84
+	  11: Js30o       	$0x25472C84
+
+
+
+. 227 25472450 16
+. 3E E0 64 74 62 E9 E5 51 7F 80 48 00 41 9E 08 28
+==== BB 228 (0x25472460) approx BBs exec'd 0 ====
+
+	0x25472460:  7C004840  cmpl cr0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25472464:  41810788  bc 12,1,0x25472BEC
+	   5: Js01o       	$0x25472BEC
+
+
+
+. 228 25472460 8
+. 7C 00 48 40 41 81 07 88
+==== BB 229 (0x25472468) approx BBs exec'd 0 ====
+
+	0x25472468:  2C800007  cmpi cr1,r0,7
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x7, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x2547246C:  4086FFC4  bc 4,6,0x25472430
+	   5: Jc06o       	$0x25472430
+
+
+
+. 229 25472468 8
+. 2C 80 00 07 40 86 FF C4
+==== BB 230 (0x25472C84) approx BBs exec'd 0 ====
+
+	0x25472C84:  80EA0018  lwz r7,24(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25472C88:  90EE0400  stw r7,1024(r14)
+	   5: GETL       	R7, t4
+	   6: GETL       	R14, t6
+	   7: ADDL       	$0x400, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25472C8C:  4BFFF7A4  b 0x25472430
+	  10: JMPo       	$0x25472430  ($4)
+
+
+
+. 230 25472C84 12
+. 80 EA 00 18 90 EE 04 00 4B FF F7 A4
+==== BB 231 (0x254724BC) approx BBs exec'd 0 ====
+
+	0x254724BC:  811401A4  lwz r8,420(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x1A4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254724C0:  80F401A8  lwz r7,424(r20)
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x1A8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x254724C4:  81340218  lwz r9,536(r20)
+	  10: GETL       	R20, t8
+	  11: ADDL       	$0x218, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x254724C8:  2C090000  cmpi cr0,r9,0
+	  15: GETL       	R9, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x254724CC:  41820010  bc 12,2,0x254724DC
+	  19: Js02o       	$0x254724DC
+
+
+
+. 231 254724BC 20
+. 81 14 01 A4 80 F4 01 A8 81 34 02 18 2C 09 00 00 41 82 00 10
+==== BB 232 (0x254724DC) approx BBs exec'd 0 ====
+
+	0x254724DC:  2C880000  cmpi cr1,r8,0
+	   0: GETL       	R8, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x254724E0:  4086000C  bc 4,6,0x254724EC
+	   4: Jc06o       	$0x254724EC
+
+
+
+. 232 254724DC 8
+. 2C 88 00 00 40 86 00 0C
+==== BB 233 (0x254724EC) approx BBs exec'd 0 ====
+
+	0x254724EC:  2E070000  cmpi cr4,r7,0
+	   0: GETL       	R7, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x254724F0:  4092000C  bc 4,18,0x254724FC
+	   4: Jc18o       	$0x254724FC
+
+
+
+. 233 254724EC 8
+. 2E 07 00 00 40 92 00 0C
+==== BB 234 (0x254724FC) approx BBs exec'd 0 ====
+
+	0x254724FC:  80AE01D4  lwz r5,468(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1D4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25472500:  2F050000  cmpi cr6,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25472504:  409A001C  bc 4,26,0x25472520
+	   9: Jc26o       	$0x25472520
+
+
+
+. 234 254724FC 12
+. 80 AE 01 D4 2F 05 00 00 40 9A 00 1C
+==== BB 235 (0x25472520) approx BBs exec'd 0 ====
+
+	0x25472520:  2E130000  cmpi cr4,r19,0
+	   0: GETL       	R19, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25472524:  409201A8  bc 4,18,0x254726CC
+	   4: Jc18o       	$0x254726CC
+
+
+
+. 235 25472520 8
+. 2E 13 00 00 40 92 01 A8
+==== BB 236 (0x25472528) approx BBs exec'd 0 ====
+
+	0x25472528:  81540008  lwz r10,8(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547252C:  2C0A0000  cmpi cr0,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25472530:  41820194  bc 12,2,0x254726C4
+	   9: Js02o       	$0x254726C4
+
+
+
+. 236 25472528 12
+. 81 54 00 08 2C 0A 00 00 41 82 01 94
+==== BB 237 (0x25472534) approx BBs exec'd 0 ====
+
+	0x25472534:  816A0000  lwz r11,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25472538:  39140020  addi r8,r20,32
+	   4: GETL       	R20, t4
+	   5: ADDL       	$0x20, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x2547253C:  2C8B0000  cmpi cr1,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25472540:  41860064  bc 12,6,0x254725A4
+	  12: Js06o       	$0x254725A4
+
+
+
+. 237 25472534 16
+. 81 6A 00 00 39 14 00 20 2C 8B 00 00 41 86 00 64
+==== BB 238 (0x25472544) approx BBs exec'd 0 ====
+
+	0x25472544:  3EC06FFF  lis r22,28671
+	   0: MOVL       	$0x6FFF0000, t0
+	   1: PUTL       	t0, R22
+	   2: INCEIPL       	$4
+
+	0x25472548:  3CC07000  lis r6,28672
+	   3: MOVL       	$0x70000000, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x2547254C:  3C006FFF  lis r0,28671
+	   6: MOVL       	$0x6FFF0000, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25472550:  3F406FFF  lis r26,28671
+	   9: MOVL       	$0x6FFF0000, t6
+	  10: PUTL       	t6, R26
+	  11: INCEIPL       	$4
+
+	0x25472554:  3EE06FFF  lis r23,28671
+	  12: MOVL       	$0x6FFF0000, t8
+	  13: PUTL       	t8, R23
+	  14: INCEIPL       	$4
+
+	0x25472558:  3E606FFF  lis r19,28671
+	  15: MOVL       	$0x6FFF0000, t10
+	  16: PUTL       	t10, R19
+	  17: INCEIPL       	$4
+
+	0x2547255C:  62C7FFFF  ori r7,r22,0xFFFF
+	  18: MOVL       	$0x6FFFFFFF, t12
+	  19: PUTL       	t12, R7
+	  20: INCEIPL       	$4
+
+	0x25472560:  60C60021  ori r6,r6,0x21
+	  21: MOVL       	$0x70000021, t14
+	  22: PUTL       	t14, R6
+	  23: INCEIPL       	$4
+
+	0x25472564:  6005FDFF  ori r5,r0,0xFDFF
+	  24: MOVL       	$0x6FFFFDFF, t16
+	  25: PUTL       	t16, R5
+	  26: INCEIPL       	$4
+
+	0x25472568:  6344FE34  ori r4,r26,0xFE34
+	  27: MOVL       	$0x6FFFFE34, t18
+	  28: PUTL       	t18, R4
+	  29: INCEIPL       	$4
+
+	0x2547256C:  62E3FEFF  ori r3,r23,0xFEFF
+	  30: MOVL       	$0x6FFFFEFF, t20
+	  31: PUTL       	t20, R3
+	  32: INCEIPL       	$4
+
+	0x25472570:  627DFF40  ori r29,r19,0xFF40
+	  33: MOVL       	$0x6FFFFF40, t22
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x25472574:  2F0B0021  cmpi cr6,r11,33
+	  36: GETL       	R11, t24
+	  37: MOVL       	$0x21, t28
+	  38: CMPL       	t24, t28, t26  (-rSo)
+	  39: ICRFL       	t26, $0x6, CR
+	  40: INCEIPL       	$4
+
+	0x25472578:  5560103A  rlwinm r0,r11,2,0,29
+	  41: GETL       	R11, t30
+	  42: SHLL       	$0x2, t30
+	  43: PUTL       	t30, R0
+	  44: INCEIPL       	$4
+
+	0x2547257C:  40990018  bc 4,25,0x25472594
+	  45: Jc25o       	$0x25472594
+
+
+
+. 238 25472544 60
+. 3E C0 6F FF 3C C0 70 00 3C 00 6F FF 3F 40 6F FF 3E E0 6F FF 3E 60 6F FF 62 C7 FF FF 60 C6 00 21 60 05 FD FF 63 44 FE 34 62 E3 FE FF 62 7D FF 40 2F 0B 00 21 55 60 10 3A 40 99 00 18
+==== BB 239 (0x25472594) approx BBs exec'd 0 ====
+
+	0x25472594:  7D48012E  stwx r10,r8,r0
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R10, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x25472598:  856A0008  lwzu r11,8(r10)
+	   6: GETL       	R10, t6
+	   7: ADDL       	$0x8, t6
+	   8: PUTL       	t6, R10
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x2547259C:  2F8B0000  cmpi cr7,r11,0
+	  12: GETL       	R11, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0x254725A0:  409EFFD4  bc 4,30,0x25472574
+	  16: Jc30o       	$0x25472574
+
+
+
+. 239 25472594 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+==== BB 240 (0x25472574) approx BBs exec'd 0 ====
+
+	0x25472574:  2F0B0021  cmpi cr6,r11,33
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x21, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25472578:  5560103A  rlwinm r0,r11,2,0,29
+	   5: GETL       	R11, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x2547257C:  40990018  bc 4,25,0x25472594
+	   9: Jc25o       	$0x25472594
+
+
+
+. 240 25472574 12
+. 2F 0B 00 21 55 60 10 3A 40 99 00 18
+==== BB 241 (0x25472580) approx BBs exec'd 0 ====
+
+	0x25472580:  7F2B3850  subf r25,r11,r7
+	   0: GETL       	R11, t0
+	   1: GETL       	R7, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25472584:  2B99000F  cmpli cr7,r25,15
+	   5: GETL       	R25, t4
+	   6: MOVL       	$0xF, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x25472588:  419D0708  bc 12,29,0x25472C90
+	  10: Js29o       	$0x25472C90
+
+
+
+. 241 25472580 12
+. 7F 2B 38 50 2B 99 00 0F 41 9D 07 08
+==== BB 242 (0x2547258C) approx BBs exec'd 0 ====
+
+	0x2547258C:  7C0B3050  subf r0,r11,r6
+	   0: GETL       	R11, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25472590:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25472594:  7D48012E  stwx r10,r8,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x25472598:  856A0008  lwzu r11,8(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x8, t12
+	  17: PUTL       	t12, R10
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x2547259C:  2F8B0000  cmpi cr7,r11,0
+	  21: GETL       	R11, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x254725A0:  409EFFD4  bc 4,30,0x25472574
+	  25: Jc30o       	$0x25472574
+
+
+
+. 242 2547258C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+==== BB 243 (0x25472C90) approx BBs exec'd 0 ====
+
+	0x25472C90:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0x25472C94:  7D9C0E70  srawi r28,r12,1
+	   4: GETL       	R12, t2
+	   5: SARL       	$0x1, t2  (-wCa)
+	   6: PUTL       	t2, R28
+	   7: INCEIPL       	$4
+
+	0x25472C98:  7F89E0F8  nor r9,r28,r28
+	   8: GETL       	R28, t4
+	   9: NOTL       	t4
+	  10: PUTL       	t4, R9
+	  11: INCEIPL       	$4
+
+	0x25472C9C:  201C0031  subfic r0,r28,49
+	  12: GETL       	R28, t6
+	  13: MOVL       	$0x31, t8
+	  14: SBBL       	t6, t8  (-wCa)
+	  15: PUTL       	t8, R0
+	  16: INCEIPL       	$4
+
+	0x25472CA0:  28090002  cmpli cr0,r9,2
+	  17: GETL       	R9, t10
+	  18: MOVL       	$0x2, t14
+	  19: CMPUL       	t10, t14, t12  (-rSo)
+	  20: ICRFL       	t12, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0x25472CA4:  40A1F8EC  bc 5,1,0x25472590
+	  22: Jc01o       	$0x25472590
+
+
+
+. 243 25472C90 24
+. 55 6C 08 3C 7D 9C 0E 70 7F 89 E0 F8 20 1C 00 31 28 09 00 02 40 A1 F8 EC
+==== BB 244 (0x25472CA8) approx BBs exec'd 0 ====
+
+	0x25472CA8:  7F6B2850  subf r27,r11,r5
+	   0: GETL       	R11, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25472CAC:  289B000B  cmpli cr1,r27,11
+	   5: GETL       	R27, t4
+	   6: MOVL       	$0xB, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25472CB0:  418502E4  bc 12,5,0x25472F94
+	  10: Js05o       	$0x25472F94
+
+
+
+. 244 25472CA8 12
+. 7F 6B 28 50 28 9B 00 0B 41 85 02 E4
+==== BB 245 (0x25472F94) approx BBs exec'd 0 ====
+
+	0x25472F94:  7F0B1850  subf r24,r11,r3
+	   0: GETL       	R11, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x25472F98:  2B180009  cmpli cr6,r24,9
+	   5: GETL       	R24, t4
+	   6: MOVL       	$0x9, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25472F9C:  41B9F5FC  bc 13,25,0x25472598
+	  10: Js25o       	$0x25472598
+
+
+
+. 245 25472F94 12
+. 7F 0B 18 50 2B 18 00 09 41 B9 F5 FC
+==== BB 246 (0x25472FA0) approx BBs exec'd 0 ====
+
+	0x25472FA0:  7C0BE850  subf r0,r11,r29
+	   0: GETL       	R11, t0
+	   1: GETL       	R29, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25472FA4:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25472FA8:  4BFFF5EC  b 0x25472594
+	   9: JMPo       	$0x25472594  ($4)
+
+
+
+. 246 25472FA0 12
+. 7C 0B E8 50 54 00 10 3A 4B FF F5 EC
+==== BB 247 (0x25472CB4) approx BBs exec'd 0 ====
+
+	0x25472CB4:  7C0B2050  subf r0,r11,r4
+	   0: GETL       	R11, t0
+	   1: GETL       	R4, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25472CB8:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25472CBC:  4BFFF8D8  b 0x25472594
+	   9: JMPo       	$0x25472594  ($4)
+
+
+
+. 247 25472CB4 12
+. 7C 0B 20 50 54 00 10 3A 4B FF F8 D8
+==== BB 248 (0x254725A4) approx BBs exec'd 0 ====
+
+	0x254725A4:  81740000  lwz r11,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x254725A8:  2C0B0000  cmpi cr0,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x254725AC:  418200AC  bc 12,2,0x25472658
+	   8: Js02o       	$0x25472658
+
+
+
+. 248 254725A4 12
+. 81 74 00 00 2C 0B 00 00 41 82 00 AC
+==== BB 249 (0x25472658) approx BBs exec'd 0 ====
+
+	0x25472658:  81280078  lwz r9,120(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x78, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547265C:  2C090000  cmpi cr0,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25472660:  41820030  bc 12,2,0x25472690
+	   9: Js02o       	$0x25472690
+
+
+
+. 249 25472658 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+==== BB 250 (0x25472690) approx BBs exec'd 0 ====
+
+	0x25472690:  81280098  lwz r9,152(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x98, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472694:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25472698:  41860018  bc 12,6,0x254726B0
+	   9: Js06o       	$0x254726B0
+
+
+
+. 250 25472690 12
+. 81 28 00 98 2C 89 00 00 41 86 00 18
+==== BB 251 (0x254726B0) approx BBs exec'd 0 ====
+
+	0x254726B0:  81680074  lwz r11,116(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254726B4:  2F0B0000  cmpi cr6,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x254726B8:  419A000C  bc 12,26,0x254726C4
+	   9: Js26o       	$0x254726C4
+
+
+
+. 251 254726B0 12
+. 81 68 00 74 2F 0B 00 00 41 9A 00 0C
+==== BB 252 (0x254726C4) approx BBs exec'd 0 ====
+
+	0x254726C4:  7E83A378  or r3,r20,r20
+	   0: GETL       	R20, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254726C8:  48005EA5  bl 0x2547856C
+	   3: MOVL       	$0x254726CC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547856C  ($4)
+
+
+
+. 252 254726C4 8
+. 7E 83 A3 78 48 00 5E A5
+==== BB 253 (0x254726CC) approx BBs exec'd 0 ====
+
+	0x254726CC:  811F0030  lwz r8,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254726D0:  2F880002  cmpi cr7,r8,2
+	   5: GETL       	R8, t4
+	   6: MOVL       	$0x2, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x254726D4:  419E0A04  bc 12,30,0x254730D8
+	  10: Js30o       	$0x254730D8
+
+
+
+. 253 254726CC 12
+. 81 1F 00 30 2F 88 00 02 41 9E 0A 04
+==== BB 254 (0x254726D8) approx BBs exec'd 0 ====
+
+	0x254726D8:  4192088C  bc 12,18,0x25472F64
+	   0: Js18o       	$0x25472F64
+
+
+
+. 254 254726D8 4
+. 41 92 08 8C
+==== BB 255 (0x25472F64) approx BBs exec'd 0 ====
+
+	0x25472F64:  82BE004C  lwz r21,76(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25472F68:  80750000  lwz r3,0(r21)
+	   5: GETL       	R21, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0x25472F6C:  48002169  bl 0x254750D4
+	   9: MOVL       	$0x25472F70, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0x254750D4  ($4)
+
+
+
+. 255 25472F64 12
+. 82 BE 00 4C 80 75 00 00 48 00 21 69
+==== BB 256 _dl_init_paths(0x254750D4) approx BBs exec'd 0 ====
+
+	0x254750D4:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254750D8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254750DC:  48021F25  bl 0x25497000
+	   9: MOVL       	$0x254750E0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 256 254750D4 12
+. 94 21 FF D0 7C 08 02 A6 48 02 1F 25
+==== BB 257 (0x254750E0) approx BBs exec'd 0 ====
+
+	0x254750E0:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254750E4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x254750E8:  93010010  stw r24,16(r1)
+	   8: GETL       	R24, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254750EC:  90010034  stw r0,52(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254750F0:  93210014  stw r25,20(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x254750F4:  7C791B78  or r25,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x254750F8:  831E04F4  lwz r24,1268(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4F4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R24
+	  30: INCEIPL       	$4
+
+	0x254750FC:  80BE0154  lwz r5,340(r30)
+	  31: GETL       	R30, t24
+	  32: ADDL       	$0x154, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R5
+	  35: INCEIPL       	$4
+
+	0x25475100:  80980010  lwz r4,16(r24)
+	  36: GETL       	R24, t28
+	  37: ADDL       	$0x10, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R4
+	  40: INCEIPL       	$4
+
+	0x25475104:  80DE015C  lwz r6,348(r30)
+	  41: GETL       	R30, t32
+	  42: ADDL       	$0x15C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R6
+	  45: INCEIPL       	$4
+
+	0x25475108:  8078000C  lwz r3,12(r24)
+	  46: GETL       	R24, t36
+	  47: ADDL       	$0xC, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R3
+	  50: INCEIPL       	$4
+
+	0x2547510C:  9361001C  stw r27,28(r1)
+	  51: GETL       	R27, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x1C, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0x25475110:  93810020  stw r28,32(r1)
+	  56: GETL       	R28, t44
+	  57: GETL       	R1, t46
+	  58: ADDL       	$0x20, t46
+	  59: STL       	t44, (t46)
+	  60: INCEIPL       	$4
+
+	0x25475114:  93E1002C  stw r31,44(r1)
+	  61: GETL       	R31, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x2C, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0x25475118:  7C3F0B78  or r31,r1,r1
+	  66: GETL       	R1, t52
+	  67: PUTL       	t52, R31
+	  68: INCEIPL       	$4
+
+	0x2547511C:  92E1000C  stw r23,12(r1)
+	  69: GETL       	R23, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0xC, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0x25475120:  93410018  stw r26,24(r1)
+	  74: GETL       	R26, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x18, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x25475124:  93A10024  stw r29,36(r1)
+	  79: GETL       	R29, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x24, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25475128:  4800A629  bl 0x2547F750
+	  84: MOVL       	$0x2547512C, t66
+	  85: PUTL       	t66, LR
+	  86: JMPo-c       	$0x2547F750  ($4)
+
+
+
+. 257 254750E0 76
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 21 00 14 7C 79 1B 78 83 1E 04 F4 80 BE 01 54 80 98 00 10 80 DE 01 5C 80 78 00 0C 93 61 00 1C 93 81 00 20 93 E1 00 2C 7C 3F 0B 78 92 E1 00 0C 93 41 00 18 93 A1 00 24 48 00 A6 29
+==== BB 258 _dl_important_hwcaps(0x2547F750) approx BBs exec'd 0 ====
+
+	0x2547F750:  9421FFB0  stwu r1,-80(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFB0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547F754:  7CE802A6  mflr r7
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x2547F758:  480178A9  bl 0x25497000
+	   9: MOVL       	$0x2547F75C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 258 2547F750 12
+. 94 21 FF B0 7C E8 02 A6 48 01 78 A9
+==== BB 259 (0x2547F75C) approx BBs exec'd 0 ====
+
+	0x2547F75C:  93C10048  stw r30,72(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547F760:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547F764:  93810040  stw r28,64(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x40, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547F768:  90E10054  stw r7,84(r1)
+	  13: GETL       	R7, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x54, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547F76C:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0x2547F770:  92010010  stw r16,16(r1)
+	  21: GETL       	R16, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547F774:  7CD03378  or r16,r6,r6
+	  26: GETL       	R6, t20
+	  27: PUTL       	t20, R16
+	  28: INCEIPL       	$4
+
+	0x2547F778:  813E04F4  lwz r9,1268(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4F4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x2547F77C:  9261001C  stw r19,28(r1)
+	  34: GETL       	R19, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x1C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x2547F780:  7C932378  or r19,r4,r4
+	  39: GETL       	R4, t30
+	  40: PUTL       	t30, R19
+	  41: INCEIPL       	$4
+
+	0x2547F784:  81690040  lwz r11,64(r9)
+	  42: GETL       	R9, t32
+	  43: ADDL       	$0x40, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R11
+	  46: INCEIPL       	$4
+
+	0x2547F788:  8009003C  lwz r0,60(r9)
+	  47: GETL       	R9, t36
+	  48: ADDL       	$0x3C, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R0
+	  51: INCEIPL       	$4
+
+	0x2547F78C:  92810020  stw r20,32(r1)
+	  52: GETL       	R20, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x20, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x2547F790:  7CB42B78  or r20,r5,r5
+	  57: GETL       	R5, t44
+	  58: PUTL       	t44, R20
+	  59: INCEIPL       	$4
+
+	0x2547F794:  7C1C5838  and r28,r0,r11
+	  60: GETL       	R0, t46
+	  61: GETL       	R11, t48
+	  62: ANDL       	t46, t48
+	  63: PUTL       	t48, R28
+	  64: INCEIPL       	$4
+
+	0x2547F798:  92C10028  stw r22,40(r1)
+	  65: GETL       	R22, t50
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x28, t52
+	  68: STL       	t50, (t52)
+	  69: INCEIPL       	$4
+
+	0x2547F79C:  2F9C0000  cmpi cr7,r28,0
+	  70: GETL       	R28, t54
+	  71: CMP0L       	t54, t56  (-rSo)
+	  72: ICRFL       	t56, $0x7, CR
+	  73: INCEIPL       	$4
+
+	0x2547F7A0:  93E1004C  stw r31,76(r1)
+	  74: GETL       	R31, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x4C, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x2547F7A4:  92210014  stw r17,20(r1)
+	  79: GETL       	R17, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x14, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x2547F7A8:  7C3F0B78  or r31,r1,r1
+	  84: GETL       	R1, t66
+	  85: PUTL       	t66, R31
+	  86: INCEIPL       	$4
+
+	0x2547F7AC:  92410018  stw r18,24(r1)
+	  87: GETL       	R18, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x18, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x2547F7B0:  7C761B78  or r22,r3,r3
+	  92: GETL       	R3, t72
+	  93: PUTL       	t72, R22
+	  94: INCEIPL       	$4
+
+	0x2547F7B4:  92A10024  stw r21,36(r1)
+	  95: GETL       	R21, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x24, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x2547F7B8:  3003FFFF  addic r0,r3,-1
+	 100: GETL       	R3, t78
+	 101: ADCL       	$0xFFFFFFFF, t78  (-wCa)
+	 102: PUTL       	t78, R0
+	 103: INCEIPL       	$4
+
+	0x2547F7BC:  7D401910  subfe r10,r0,r3
+	 104: GETL       	R0, t80
+	 105: GETL       	R3, t82
+	 106: SBBL       	t80, t82  (-rCa-wCa)
+	 107: PUTL       	t82, R10
+	 108: INCEIPL       	$4
+
+	0x2547F7C0:  92E1002C  stw r23,44(r1)
+	 109: GETL       	R23, t84
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x2C, t86
+	 112: STL       	t84, (t86)
+	 113: INCEIPL       	$4
+
+	0x2547F7C4:  39600000  li r11,0
+	 114: MOVL       	$0x0, t88
+	 115: PUTL       	t88, R11
+	 116: INCEIPL       	$4
+
+	0x2547F7C8:  93010030  stw r24,48(r1)
+	 117: GETL       	R24, t90
+	 118: GETL       	R1, t92
+	 119: ADDL       	$0x30, t92
+	 120: STL       	t90, (t92)
+	 121: INCEIPL       	$4
+
+	0x2547F7CC:  93210034  stw r25,52(r1)
+	 122: GETL       	R25, t94
+	 123: GETL       	R1, t96
+	 124: ADDL       	$0x34, t96
+	 125: STL       	t94, (t96)
+	 126: INCEIPL       	$4
+
+	0x2547F7D0:  93410038  stw r26,56(r1)
+	 127: GETL       	R26, t98
+	 128: GETL       	R1, t100
+	 129: ADDL       	$0x38, t100
+	 130: STL       	t98, (t100)
+	 131: INCEIPL       	$4
+
+	0x2547F7D4:  9361003C  stw r27,60(r1)
+	 132: GETL       	R27, t102
+	 133: GETL       	R1, t104
+	 134: ADDL       	$0x3C, t104
+	 135: STL       	t102, (t104)
+	 136: INCEIPL       	$4
+
+	0x2547F7D8:  93A10044  stw r29,68(r1)
+	 137: GETL       	R29, t106
+	 138: GETL       	R1, t108
+	 139: ADDL       	$0x44, t108
+	 140: STL       	t106, (t108)
+	 141: INCEIPL       	$4
+
+	0x2547F7DC:  9181000C  stw r12,12(r1)
+	 142: GETL       	R12, t110
+	 143: GETL       	R1, t112
+	 144: ADDL       	$0xC, t112
+	 145: STL       	t110, (t112)
+	 146: INCEIPL       	$4
+
+	0x2547F7E0:  419E0028  bc 12,30,0x2547F808
+	 147: Js30o       	$0x2547F808
+
+
+
+. 259 2547F75C 136
+. 93 C1 00 48 7F C8 02 A6 93 81 00 40 90 E1 00 54 7D 80 00 26 92 01 00 10 7C D0 33 78 81 3E 04 F4 92 61 00 1C 7C 93 23 78 81 69 00 40 80 09 00 3C 92 81 00 20 7C B4 2B 78 7C 1C 58 38 92 C1 00 28 2F 9C 00 00 93 E1 00 4C 92 21 00 14 7C 3F 0B 78 92 41 00 18 7C 76 1B 78 92 A1 00 24 30 03 FF FF 7D 40 19 10 92 E1 00 2C 39 60 00 00 93 01 00 30 93 21 00 34 93 41 00 38 93 61 00 3C 93 A1 00 44 91 81 00 0C 41 9E 00 28
+==== BB 260 (0x2547F808) approx BBs exec'd 0 ====
+
+	0x2547F808:  3B0A0001  addi r24,r10,1
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547F80C:  82210000  lwz r17,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R17
+	   7: INCEIPL       	$4
+
+	0x2547F810:  57121838  rlwinm r18,r24,3,0,28
+	   8: GETL       	R24, t6
+	   9: SHLL       	$0x3, t6
+	  10: PUTL       	t6, R18
+	  11: INCEIPL       	$4
+
+	0x2547F814:  3B200000  li r25,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R25
+	  14: INCEIPL       	$4
+
+	0x2547F818:  3AF2001E  addi r23,r18,30
+	  15: GETL       	R18, t10
+	  16: ADDL       	$0x1E, t10
+	  17: PUTL       	t10, R23
+	  18: INCEIPL       	$4
+
+	0x2547F81C:  3B400000  li r26,0
+	  19: MOVL       	$0x0, t12
+	  20: PUTL       	t12, R26
+	  21: INCEIPL       	$4
+
+	0x2547F820:  56F50036  rlwinm r21,r23,0,0,27
+	  22: GETL       	R23, t14
+	  23: ANDL       	$0xFFFFFFF0, t14
+	  24: PUTL       	t14, R21
+	  25: INCEIPL       	$4
+
+	0x2547F824:  7D5500D0  neg r10,r21
+	  26: GETL       	R21, t16
+	  27: NEGL       	t16
+	  28: PUTL       	t16, R10
+	  29: INCEIPL       	$4
+
+	0x2547F828:  7E21516E  stwux r17,r1,r10
+	  30: GETL       	R10, t18
+	  31: GETL       	R1, t20
+	  32: ADDL       	t20, t18
+	  33: PUTL       	t18, R1
+	  34: GETL       	R17, t22
+	  35: STL       	t22, (t18)
+	  36: INCEIPL       	$4
+
+	0x2547F82C:  39010017  addi r8,r1,23
+	  37: GETL       	R1, t24
+	  38: ADDL       	$0x17, t24
+	  39: PUTL       	t24, R8
+	  40: INCEIPL       	$4
+
+	0x2547F830:  551B0036  rlwinm r27,r8,0,0,27
+	  41: GETL       	R8, t26
+	  42: ANDL       	$0xFFFFFFF0, t26
+	  43: PUTL       	t26, R27
+	  44: INCEIPL       	$4
+
+	0x2547F834:  419E0048  bc 12,30,0x2547F87C
+	  45: Js30o       	$0x2547F87C
+
+
+
+. 260 2547F808 48
+. 3B 0A 00 01 82 21 00 00 57 12 18 38 3B 20 00 00 3A F2 00 1E 3B 40 00 00 56 F5 00 36 7D 55 00 D0 7E 21 51 6E 39 01 00 17 55 1B 00 36 41 9E 00 48
+==== BB 261 (0x2547F87C) approx BBs exec'd 0 ====
+
+	0x2547F87C:  2F960000  cmpi cr7,r22,0
+	   0: GETL       	R22, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547F880:  419E0018  bc 12,30,0x2547F898
+	   4: Js30o       	$0x2547F898
+
+
+
+. 261 2547F87C 8
+. 2F 96 00 00 41 9E 00 18
+==== BB 262 (0x2547F898) approx BBs exec'd 0 ====
+
+	0x2547F898:  2E180001  cmpi cr4,r24,1
+	   0: GETL       	R24, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547F89C:  57291838  rlwinm r9,r25,3,0,28
+	   5: GETL       	R25, t6
+	   6: SHLL       	$0x3, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x2547F8A0:  833E0410  lwz r25,1040(r30)
+	   9: GETL       	R30, t8
+	  10: ADDL       	$0x410, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R25
+	  13: INCEIPL       	$4
+
+	0x2547F8A4:  7E69DA14  add r19,r9,r27
+	  14: GETL       	R9, t12
+	  15: GETL       	R27, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R19
+	  18: INCEIPL       	$4
+
+	0x2547F8A8:  3AC00003  li r22,3
+	  19: MOVL       	$0x3, t16
+	  20: PUTL       	t16, R22
+	  21: INCEIPL       	$4
+
+	0x2547F8AC:  7F29D92E  stwx r25,r9,r27
+	  22: GETL       	R27, t18
+	  23: GETL       	R9, t20
+	  24: ADDL       	t20, t18
+	  25: GETL       	R25, t22
+	  26: STL       	t22, (t18)
+	  27: INCEIPL       	$4
+
+	0x2547F8B0:  92D30004  stw r22,4(r19)
+	  28: GETL       	R22, t24
+	  29: GETL       	R19, t26
+	  30: ADDL       	$0x4, t26
+	  31: STL       	t24, (t26)
+	  32: INCEIPL       	$4
+
+	0x2547F8B4:  41920324  bc 12,18,0x2547FBD8
+	  33: Js18o       	$0x2547FBD8
+
+
+
+. 262 2547F898 32
+. 2E 18 00 01 57 29 18 38 83 3E 04 10 7E 69 DA 14 3A C0 00 03 7F 29 D9 2E 92 D3 00 04 41 92 03 24
+==== BB 263 (0x2547FBD8) approx BBs exec'd 0 ====
+
+	0x2547FBD8:  819B0004  lwz r12,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x2547FBDC:  390C0001  addi r8,r12,1
+	   5: GETL       	R12, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2547FBE0:  4BFFFD30  b 0x2547F910
+	   9: JMPo       	$0x2547F910  ($4)
+
+
+
+. 263 2547FBD8 12
+. 81 9B 00 04 39 0C 00 01 4B FF FD 30
+==== BB 264 (0x2547F910) approx BBs exec'd 0 ====
+
+	0x2547F910:  3BA00001  li r29,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0x2547F914:  7FAAC030  slw r10,r29,r24
+	   3: GETL       	R29, t4
+	   4: GETL       	R24, t2
+	   5: SHLL       	t2, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x2547F918:  555A1838  rlwinm r26,r10,3,0,28
+	   8: GETL       	R10, t6
+	   9: SHLL       	$0x3, t6
+	  10: PUTL       	t6, R26
+	  11: INCEIPL       	$4
+
+	0x2547F91C:  91540000  stw r10,0(r20)
+	  12: GETL       	R10, t8
+	  13: GETL       	R20, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547F920:  7C7A4214  add r3,r26,r8
+	  16: GETL       	R26, t12
+	  17: GETL       	R8, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0x2547F924:  48018101  bl 0x25497A24
+	  21: MOVL       	$0x2547F928, t16
+	  22: PUTL       	t16, LR
+	  23: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 264 2547F910 24
+. 3B A0 00 01 7F AA C0 30 55 5A 18 38 91 54 00 00 7C 7A 42 14 48 01 81 01
+==== BB 265 (0x2547FDC8) approx BBs exec'd 0 ====
+
+	0x2547FDC8:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547FDCC:  7C895214  add r4,r9,r10
+	   4: GETL       	R9, t4
+	   5: GETL       	R10, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x2547FDD0:  3804FFFF  addi r0,r4,-1
+	   9: GETL       	R4, t8
+	  10: ADDL       	$0xFFFFFFFF, t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x2547FDD4:  7C0AE838  and r10,r0,r29
+	  13: GETL       	R0, t10
+	  14: GETL       	R29, t12
+	  15: ANDL       	t10, t12
+	  16: PUTL       	t12, R10
+	  17: INCEIPL       	$4
+
+	0x2547FDD8:  7D8AE214  add r12,r10,r28
+	  18: GETL       	R10, t14
+	  19: GETL       	R28, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R12
+	  22: INCEIPL       	$4
+
+	0x2547FDDC:  915F0000  stw r10,0(r31)
+	  23: GETL       	R10, t18
+	  24: GETL       	R31, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x2547FDE0:  7C0C5840  cmpl cr0,r12,r11
+	  27: GETL       	R12, t22
+	  28: GETL       	R11, t24
+	  29: CMPUL       	t22, t24, t26  (-rSo)
+	  30: ICRFL       	t26, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0x2547FDE4:  4080003C  bc 4,0,0x2547FE20
+	  32: Jc00o       	$0x2547FE20
+
+
+
+. 265 2547FDC8 32
+. 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+==== BB 266 (0x2547F928) approx BBs exec'd 0 ====
+
+	0x2547F928:  7C761B79  or. r22,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R22
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547F92C:  418202E8  bc 12,2,0x2547FC14
+	   5: Js02o       	$0x2547FC14
+
+
+
+. 266 2547F928 8
+. 7C 76 1B 79 41 82 02 E8
+==== BB 267 (0x2547F930) approx BBs exec'd 0 ====
+
+	0x2547F930:  419201FC  bc 12,18,0x2547FB2C
+	   0: Js18o       	$0x2547FB2C
+
+
+
+. 267 2547F930 4
+. 41 92 01 FC
+==== BB 268 (0x2547FB2C) approx BBs exec'd 0 ====
+
+	0x2547FB2C:  83340000  lwz r25,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R25
+	   3: INCEIPL       	$4
+
+	0x2547FB30:  39000000  li r8,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R8
+	   6: INCEIPL       	$4
+
+	0x2547FB34:  827B0004  lwz r19,4(r27)
+	   7: GETL       	R27, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R19
+	  11: INCEIPL       	$4
+
+	0x2547FB38:  3B00002F  li r24,47
+	  12: MOVL       	$0x2F, t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0x2547FB3C:  57291838  rlwinm r9,r25,3,0,28
+	  15: GETL       	R25, t12
+	  16: SHLL       	$0x3, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0x2547FB40:  9116000C  stw r8,12(r22)
+	  19: GETL       	R8, t14
+	  20: GETL       	R22, t16
+	  21: ADDL       	$0xC, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547FB44:  7C69B214  add r3,r9,r22
+	  24: GETL       	R9, t18
+	  25: GETL       	R22, t20
+	  26: ADDL       	t18, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x2547FB48:  3B930001  addi r28,r19,1
+	  29: GETL       	R19, t22
+	  30: ADDL       	$0x1, t22
+	  31: PUTL       	t22, R28
+	  32: INCEIPL       	$4
+
+	0x2547FB4C:  93960004  stw r28,4(r22)
+	  33: GETL       	R28, t24
+	  34: GETL       	R22, t26
+	  35: ADDL       	$0x4, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x2547FB50:  3A400002  li r18,2
+	  38: MOVL       	$0x2, t28
+	  39: PUTL       	t28, R18
+	  40: INCEIPL       	$4
+
+	0x2547FB54:  90760000  stw r3,0(r22)
+	  41: GETL       	R3, t30
+	  42: GETL       	R22, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547FB58:  90760008  stw r3,8(r22)
+	  45: GETL       	R3, t34
+	  46: GETL       	R22, t36
+	  47: ADDL       	$0x8, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x2547FB5C:  80BB0004  lwz r5,4(r27)
+	  50: GETL       	R27, t38
+	  51: ADDL       	$0x4, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R5
+	  54: INCEIPL       	$4
+
+	0x2547FB60:  809B0000  lwz r4,0(r27)
+	  55: GETL       	R27, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R4
+	  58: INCEIPL       	$4
+
+	0x2547FB64:  48003EBD  bl 0x25483A20
+	  59: MOVL       	$0x2547FB68, t46
+	  60: PUTL       	t46, LR
+	  61: JMPo-c       	$0x25483A20  ($4)
+
+
+
+. 268 2547FB2C 60
+. 83 34 00 00 39 00 00 00 82 7B 00 04 3B 00 00 2F 57 29 18 38 91 16 00 0C 7C 69 B2 14 3B 93 00 01 93 96 00 04 3A 40 00 02 90 76 00 00 90 76 00 08 80 BB 00 04 80 9B 00 00 48 00 3E BD
+==== BB 269 mempcpy(0x25483A20) approx BBs exec'd 0 ====
+
+	0x25483A20:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25483A24:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25483A28:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFE0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25483A2C:  93A10014  stw r29,20(r1)
+	  14: GETL       	R29, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0x14, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25483A30:  7C7D1B78  or r29,r3,r3
+	  19: GETL       	R3, t16
+	  20: PUTL       	t16, R29
+	  21: INCEIPL       	$4
+
+	0x25483A34:  93E1001C  stw r31,28(r1)
+	  22: GETL       	R31, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x1C, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x25483A38:  7C9F2378  or r31,r4,r4
+	  27: GETL       	R4, t22
+	  28: PUTL       	t22, R31
+	  29: INCEIPL       	$4
+
+	0x25483A3C:  93810010  stw r28,16(r1)
+	  30: GETL       	R28, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x10, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25483A40:  93C10018  stw r30,24(r1)
+	  35: GETL       	R30, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x18, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25483A44:  90010024  stw r0,36(r1)
+	  40: GETL       	R0, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x24, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0x25483A48:  409D0058  bc 4,29,0x25483AA0
+	  45: Jc29o       	$0x25483AA0
+
+
+
+. 269 25483A20 44
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+==== BB 270 (0x25483AA0) approx BBs exec'd 0 ====
+
+	0x25483AA0:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25483AA4:  4186001C  bc 12,6,0x25483AC0
+	   4: Js06o       	$0x25483AC0
+
+
+
+. 270 25483AA0 8
+. 2C 85 00 00 41 86 00 1C
+==== BB 271 (0x25483AA8) approx BBs exec'd 0 ====
+
+	0x25483AA8:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25483AAC:  88FF0000  lbz r7,0(r31)
+	   3: GETL       	R31, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0x25483AB0:  3BFF0001  addi r31,r31,1
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0x25483AB4:  98FD0000  stb r7,0(r29)
+	  11: GETL       	R7, t8
+	  12: GETL       	R29, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25483AB8:  3BBD0001  addi r29,r29,1
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x25483ABC:  4200FFF0  bc 16,0,0x25483AAC
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0x25483AC0
+	  23: JMPo       	$0x25483AAC  ($4)
+
+
+
+. 271 25483AA8 24
+. 7C A9 03 A6 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+==== BB 272 (0x25483AAC) approx BBs exec'd 0 ====
+
+	0x25483AAC:  88FF0000  lbz r7,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25483AB0:  3BFF0001  addi r31,r31,1
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x25483AB4:  98FD0000  stb r7,0(r29)
+	   8: GETL       	R7, t6
+	   9: GETL       	R29, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25483AB8:  3BBD0001  addi r29,r29,1
+	  12: GETL       	R29, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x25483ABC:  4200FFF0  bc 16,0,0x25483AAC
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0x25483AC0
+	  20: JMPo       	$0x25483AAC  ($4)
+
+
+
+. 272 25483AAC 20
+. 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+==== BB 273 (0x25483AC0) approx BBs exec'd 0 ====
+
+	0x25483AC0:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25483AC4:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483AC8:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x25483ACC:  83A10014  lwz r29,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0x25483AD0:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x25483AD4:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0x25483AD8:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0x25483ADC:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0x25483AE0:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+
+. 273 25483AC0 36
+. 81 01 00 24 7F A3 EB 78 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 274 (0x2547FB68) approx BBs exec'd 0 ====
+
+	0x2547FB68:  9B030000  stb r24,0(r3)
+	   0: GETL       	R24, t0
+	   1: GETL       	R3, t2
+	   2: STB       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547FB6C:  92540000  stw r18,0(r20)
+	   4: GETL       	R18, t4
+	   5: GETL       	R20, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0x2547FB70:  83760004  lwz r27,4(r22)
+	   8: GETL       	R22, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R27
+	  12: INCEIPL       	$4
+
+	0x2547FB74:  7EC3B378  or r3,r22,r22
+	  13: GETL       	R22, t12
+	  14: PUTL       	t12, R3
+	  15: INCEIPL       	$4
+
+	0x2547FB78:  93700000  stw r27,0(r16)
+	  16: GETL       	R27, t14
+	  17: GETL       	R16, t16
+	  18: STL       	t14, (t16)
+	  19: INCEIPL       	$4
+
+	0x2547FB7C:  81010000  lwz r8,0(r1)
+	  20: GETL       	R1, t18
+	  21: LDL       	(t18), t20
+	  22: PUTL       	t20, R8
+	  23: INCEIPL       	$4
+
+	0x2547FB80:  82080004  lwz r16,4(r8)
+	  24: GETL       	R8, t22
+	  25: ADDL       	$0x4, t22
+	  26: LDL       	(t22), t24
+	  27: PUTL       	t24, R16
+	  28: INCEIPL       	$4
+
+	0x2547FB84:  80E8FFBC  lwz r7,-68(r8)
+	  29: GETL       	R8, t26
+	  30: ADDL       	$0xFFFFFFBC, t26
+	  31: LDL       	(t26), t28
+	  32: PUTL       	t28, R7
+	  33: INCEIPL       	$4
+
+	0x2547FB88:  7E0803A6  mtlr r16
+	  34: GETL       	R16, t30
+	  35: PUTL       	t30, LR
+	  36: INCEIPL       	$4
+
+	0x2547FB8C:  8228FFC4  lwz r17,-60(r8)
+	  37: GETL       	R8, t32
+	  38: ADDL       	$0xFFFFFFC4, t32
+	  39: LDL       	(t32), t34
+	  40: PUTL       	t34, R17
+	  41: INCEIPL       	$4
+
+	0x2547FB90:  8208FFC0  lwz r16,-64(r8)
+	  42: GETL       	R8, t36
+	  43: ADDL       	$0xFFFFFFC0, t36
+	  44: LDL       	(t36), t38
+	  45: PUTL       	t38, R16
+	  46: INCEIPL       	$4
+
+	0x2547FB94:  7CE08120  mtcrf 0x8,r7
+	  47: GETL       	R7, t40
+	  48: ICRFL       	t40, $0x4, CR
+	  49: INCEIPL       	$4
+
+	0x2547FB98:  8248FFC8  lwz r18,-56(r8)
+	  50: GETL       	R8, t42
+	  51: ADDL       	$0xFFFFFFC8, t42
+	  52: LDL       	(t42), t44
+	  53: PUTL       	t44, R18
+	  54: INCEIPL       	$4
+
+	0x2547FB9C:  8268FFCC  lwz r19,-52(r8)
+	  55: GETL       	R8, t46
+	  56: ADDL       	$0xFFFFFFCC, t46
+	  57: LDL       	(t46), t48
+	  58: PUTL       	t48, R19
+	  59: INCEIPL       	$4
+
+	0x2547FBA0:  8288FFD0  lwz r20,-48(r8)
+	  60: GETL       	R8, t50
+	  61: ADDL       	$0xFFFFFFD0, t50
+	  62: LDL       	(t50), t52
+	  63: PUTL       	t52, R20
+	  64: INCEIPL       	$4
+
+	0x2547FBA4:  82A8FFD4  lwz r21,-44(r8)
+	  65: GETL       	R8, t54
+	  66: ADDL       	$0xFFFFFFD4, t54
+	  67: LDL       	(t54), t56
+	  68: PUTL       	t56, R21
+	  69: INCEIPL       	$4
+
+	0x2547FBA8:  82C8FFD8  lwz r22,-40(r8)
+	  70: GETL       	R8, t58
+	  71: ADDL       	$0xFFFFFFD8, t58
+	  72: LDL       	(t58), t60
+	  73: PUTL       	t60, R22
+	  74: INCEIPL       	$4
+
+	0x2547FBAC:  82E8FFDC  lwz r23,-36(r8)
+	  75: GETL       	R8, t62
+	  76: ADDL       	$0xFFFFFFDC, t62
+	  77: LDL       	(t62), t64
+	  78: PUTL       	t64, R23
+	  79: INCEIPL       	$4
+
+	0x2547FBB0:  8308FFE0  lwz r24,-32(r8)
+	  80: GETL       	R8, t66
+	  81: ADDL       	$0xFFFFFFE0, t66
+	  82: LDL       	(t66), t68
+	  83: PUTL       	t68, R24
+	  84: INCEIPL       	$4
+
+	0x2547FBB4:  8328FFE4  lwz r25,-28(r8)
+	  85: GETL       	R8, t70
+	  86: ADDL       	$0xFFFFFFE4, t70
+	  87: LDL       	(t70), t72
+	  88: PUTL       	t72, R25
+	  89: INCEIPL       	$4
+
+	0x2547FBB8:  8348FFE8  lwz r26,-24(r8)
+	  90: GETL       	R8, t74
+	  91: ADDL       	$0xFFFFFFE8, t74
+	  92: LDL       	(t74), t76
+	  93: PUTL       	t76, R26
+	  94: INCEIPL       	$4
+
+	0x2547FBBC:  8368FFEC  lwz r27,-20(r8)
+	  95: GETL       	R8, t78
+	  96: ADDL       	$0xFFFFFFEC, t78
+	  97: LDL       	(t78), t80
+	  98: PUTL       	t80, R27
+	  99: INCEIPL       	$4
+
+	0x2547FBC0:  8388FFF0  lwz r28,-16(r8)
+	 100: GETL       	R8, t82
+	 101: ADDL       	$0xFFFFFFF0, t82
+	 102: LDL       	(t82), t84
+	 103: PUTL       	t84, R28
+	 104: INCEIPL       	$4
+
+	0x2547FBC4:  83A8FFF4  lwz r29,-12(r8)
+	 105: GETL       	R8, t86
+	 106: ADDL       	$0xFFFFFFF4, t86
+	 107: LDL       	(t86), t88
+	 108: PUTL       	t88, R29
+	 109: INCEIPL       	$4
+
+	0x2547FBC8:  83C8FFF8  lwz r30,-8(r8)
+	 110: GETL       	R8, t90
+	 111: ADDL       	$0xFFFFFFF8, t90
+	 112: LDL       	(t90), t92
+	 113: PUTL       	t92, R30
+	 114: INCEIPL       	$4
+
+	0x2547FBCC:  83E8FFFC  lwz r31,-4(r8)
+	 115: GETL       	R8, t94
+	 116: ADDL       	$0xFFFFFFFC, t94
+	 117: LDL       	(t94), t96
+	 118: PUTL       	t96, R31
+	 119: INCEIPL       	$4
+
+	0x2547FBD0:  7D014378  or r1,r8,r8
+	 120: GETL       	R8, t98
+	 121: PUTL       	t98, R1
+	 122: INCEIPL       	$4
+
+	0x2547FBD4:  4E800020  blr
+	 123: GETL       	LR, t100
+	 124: JMPo-r       	t100  ($4)
+
+
+
+. 274 2547FB68 112
+. 9B 03 00 00 92 54 00 00 83 76 00 04 7E C3 B3 78 93 70 00 00 81 01 00 00 82 08 00 04 80 E8 FF BC 7E 08 03 A6 82 28 FF C4 82 08 FF C0 7C E0 81 20 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+==== BB 275 (0x2547512C) approx BBs exec'd 0 ====
+
+	0x2547512C:  813E0160  lwz r9,352(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x160, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475130:  837E0164  lwz r27,356(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x164, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0x25475134:  90690000  stw r3,0(r9)
+	  10: GETL       	R3, t8
+	  11: GETL       	R9, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475138:  3860000C  li r3,12
+	  14: MOVL       	$0xC, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x2547513C:  480228E9  bl 0x25497A24
+	  17: MOVL       	$0x25475140, t14
+	  18: PUTL       	t14, LR
+	  19: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 275 2547512C 20
+. 81 3E 01 60 83 7E 01 64 90 69 00 00 38 60 00 0C 48 02 28 E9
+==== BB 276 (0x25475140) approx BBs exec'd 0 ====
+
+	0x25475140:  7C7C1B79  or. r28,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R28
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25475144:  907B0000  stw r3,0(r27)
+	   5: GETL       	R3, t4
+	   6: GETL       	R27, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25475148:  41820188  bc 12,2,0x254752D0
+	   9: Js02o       	$0x254752D0
+
+
+
+. 276 25475140 12
+. 7C 7C 1B 79 90 7B 00 00 41 82 01 88
+==== BB 277 (0x2547514C) approx BBs exec'd 0 ====
+
+	0x2547514C:  835E0154  lwz r26,340(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25475150:  3D40CCCC  lis r10,-13108
+	   5: MOVL       	$0xCCCC0000, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x25475154:  6146CCCD  ori r6,r10,0xCCCD
+	   8: MOVL       	$0xCCCCCCCD, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x25475158:  811A0000  lwz r8,0(r26)
+	  11: GETL       	R26, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0x2547515C:  5507103A  rlwinm r7,r8,2,0,29
+	  15: GETL       	R8, t12
+	  16: SHLL       	$0x2, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x25475160:  38A70027  addi r5,r7,39
+	  19: GETL       	R7, t14
+	  20: ADDL       	$0x27, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x25475164:  7C653016  mulhwu r3,r5,r6
+	  23: GETL       	R5, t16
+	  24: GETL       	R6, t18
+	  25: UMULHL       	t16, t18
+	  26: PUTL       	t18, R3
+	  27: INCEIPL       	$4
+
+	0x25475168:  547DE13E  rlwinm r29,r3,28,4,31
+	  28: GETL       	R3, t20
+	  29: SHRL       	$0x4, t20
+	  30: PUTL       	t20, R29
+	  31: INCEIPL       	$4
+
+	0x2547516C:  1C7D0140  mulli r3,r29,320
+	  32: GETL       	R29, t22
+	  33: MULL       	$0x140, t22
+	  34: PUTL       	t22, R3
+	  35: INCEIPL       	$4
+
+	0x25475170:  480228B5  bl 0x25497A24
+	  36: MOVL       	$0x25475174, t24
+	  37: PUTL       	t24, LR
+	  38: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 277 2547514C 40
+. 83 5E 01 54 3D 40 CC CC 61 46 CC CD 81 1A 00 00 55 07 10 3A 38 A7 00 27 7C 65 30 16 54 7D E1 3E 1C 7D 01 40 48 02 28 B5
+==== BB 278 (0x25475174) approx BBs exec'd 0 ====
+
+	0x25475174:  809B0000  lwz r4,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25475178:  907C0000  stw r3,0(r28)
+	   4: GETL       	R3, t4
+	   5: GETL       	R28, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0x2547517C:  81240000  lwz r9,0(r4)
+	   8: GETL       	R4, t8
+	   9: LDL       	(t8), t10
+	  10: PUTL       	t10, R9
+	  11: INCEIPL       	$4
+
+	0x25475180:  80DE0188  lwz r6,392(r30)
+	  12: GETL       	R30, t12
+	  13: ADDL       	$0x188, t12
+	  14: LDL       	(t12), t14
+	  15: PUTL       	t14, R6
+	  16: INCEIPL       	$4
+
+	0x25475184:  2F890000  cmpi cr7,r9,0
+	  17: GETL       	R9, t16
+	  18: CMP0L       	t16, t18  (-rSo)
+	  19: ICRFL       	t18, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x25475188:  419E014C  bc 12,30,0x254752D4
+	  21: Js30o       	$0x254752D4
+
+
+
+. 278 25475174 24
+. 80 9B 00 00 90 7C 00 00 81 24 00 00 80 DE 01 88 2F 89 00 00 41 9E 01 4C
+==== BB 279 (0x2547518C) approx BBs exec'd 0 ====
+
+	0x2547518C:  82FE04C8  lwz r23,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25475190:  39600000  li r11,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25475194:  1C7D0014  mulli r3,r29,20
+	   8: GETL       	R29, t6
+	   9: MULL       	$0x14, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x25475198:  80FA0000  lwz r7,0(r26)
+	  12: GETL       	R26, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R7
+	  15: INCEIPL       	$4
+
+	0x2547519C:  811E014C  lwz r8,332(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x14C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0x254751A0:  39400000  li r10,0
+	  21: MOVL       	$0x0, t16
+	  22: PUTL       	t16, R10
+	  23: INCEIPL       	$4
+
+	0x254751A4:  809E0194  lwz r4,404(r30)
+	  24: GETL       	R30, t18
+	  25: ADDL       	$0x194, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R4
+	  28: INCEIPL       	$4
+
+	0x254751A8:  38C00000  li r6,0
+	  29: MOVL       	$0x0, t22
+	  30: PUTL       	t22, R6
+	  31: INCEIPL       	$4
+
+	0x254751AC:  80BE0148  lwz r5,328(r30)
+	  32: GETL       	R30, t24
+	  33: ADDL       	$0x148, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R5
+	  36: INCEIPL       	$4
+
+	0x254751B0:  917B0004  stw r11,4(r27)
+	  37: GETL       	R11, t28
+	  38: GETL       	R27, t30
+	  39: ADDL       	$0x4, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x254751B4:  7D2B4B78  or r11,r9,r9
+	  42: GETL       	R9, t32
+	  43: PUTL       	t32, R11
+	  44: INCEIPL       	$4
+
+	0x254751B8:  913701B0  stw r9,432(r23)
+	  45: GETL       	R9, t34
+	  46: GETL       	R23, t36
+	  47: ADDL       	$0x1B0, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x254751BC:  555B103A  rlwinm r27,r10,2,0,29
+	  50: GETL       	R10, t38
+	  51: SHLL       	$0x2, t38
+	  52: PUTL       	t38, R27
+	  53: INCEIPL       	$4
+
+	0x254751C0:  7C863840  cmpl cr1,r6,r7
+	  54: GETL       	R6, t40
+	  55: GETL       	R7, t42
+	  56: CMPUL       	t40, t42, t44  (-rSo)
+	  57: ICRFL       	t44, $0x1, CR
+	  58: INCEIPL       	$4
+
+	0x254751C4:  7F5B282E  lwzx r26,r27,r5
+	  59: GETL       	R5, t46
+	  60: GETL       	R27, t48
+	  61: ADDL       	t48, t46
+	  62: LDL       	(t46), t50
+	  63: PUTL       	t50, R26
+	  64: INCEIPL       	$4
+
+	0x254751C8:  917C0000  stw r11,0(r28)
+	  65: GETL       	R11, t52
+	  66: GETL       	R28, t54
+	  67: STL       	t52, (t54)
+	  68: INCEIPL       	$4
+
+	0x254751CC:  3B9C0004  addi r28,r28,4
+	  69: GETL       	R28, t56
+	  70: ADDL       	$0x4, t56
+	  71: PUTL       	t56, R28
+	  72: INCEIPL       	$4
+
+	0x254751D0:  7D9A4214  add r12,r26,r8
+	  73: GETL       	R26, t58
+	  74: GETL       	R8, t60
+	  75: ADDL       	t58, t60
+	  76: PUTL       	t60, R12
+	  77: INCEIPL       	$4
+
+	0x254751D4:  910B000C  stw r8,12(r11)
+	  78: GETL       	R8, t62
+	  79: GETL       	R11, t64
+	  80: ADDL       	$0xC, t64
+	  81: STL       	t62, (t64)
+	  82: INCEIPL       	$4
+
+	0x254751D8:  908B0004  stw r4,4(r11)
+	  83: GETL       	R4, t66
+	  84: GETL       	R11, t68
+	  85: ADDL       	$0x4, t68
+	  86: STL       	t66, (t68)
+	  87: INCEIPL       	$4
+
+	0x254751DC:  390C0001  addi r8,r12,1
+	  88: GETL       	R12, t70
+	  89: ADDL       	$0x1, t70
+	  90: PUTL       	t70, R8
+	  91: INCEIPL       	$4
+
+	0x254751E0:  90CB0008  stw r6,8(r11)
+	  92: GETL       	R6, t72
+	  93: GETL       	R11, t74
+	  94: ADDL       	$0x8, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0x254751E4:  934B0010  stw r26,16(r11)
+	  97: GETL       	R26, t76
+	  98: GETL       	R11, t78
+	  99: ADDL       	$0x10, t78
+	 100: STL       	t76, (t78)
+	 101: INCEIPL       	$4
+
+	0x254751E8:  4084001C  bc 4,4,0x25475204
+	 102: Jc04o       	$0x25475204
+
+
+
+. 279 2547518C 96
+. 82 FE 04 C8 39 60 00 00 1C 7D 00 14 80 FA 00 00 81 1E 01 4C 39 40 00 00 80 9E 01 94 38 C0 00 00 80 BE 01 48 91 7B 00 04 7D 2B 4B 78 91 37 01 B0 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+==== BB 280 (0x254751EC) approx BBs exec'd 0 ====
+
+	0x254751EC:  7CE903A6  mtctr r7
+	   0: GETL       	R7, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x254751F0:  38000000  li r0,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x254751F4:  392B0014  addi r9,r11,20
+	   6: GETL       	R11, t4
+	   7: ADDL       	$0x14, t4
+	   8: PUTL       	t4, R9
+	   9: INCEIPL       	$4
+
+	0x254751F8:  90090000  stw r0,0(r9)
+	  10: GETL       	R0, t6
+	  11: GETL       	R9, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x254751FC:  39290004  addi r9,r9,4
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x4, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x25475200:  4200FFF8  bc 16,0,0x254751F8
+	  18: GETL       	CTR, t12
+	  19: ADDL       	$0xFFFFFFFF, t12
+	  20: PUTL       	t12, CTR
+	  21: JIFZL       	t12, $0x25475204
+	  22: JMPo       	$0x254751F8  ($4)
+
+
+
+. 280 254751EC 24
+. 7C E9 03 A6 38 00 00 00 39 2B 00 14 90 09 00 00 39 29 00 04 42 00 FF F8
+==== BB 281 (0x254751F8) approx BBs exec'd 0 ====
+
+	0x254751F8:  90090000  stw r0,0(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x254751FC:  39290004  addi r9,r9,4
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25475200:  4200FFF8  bc 16,0,0x254751F8
+	   8: GETL       	CTR, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, CTR
+	  11: JIFZL       	t6, $0x25475204
+	  12: JMPo       	$0x254751F8  ($4)
+
+
+
+. 281 254751F8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+==== BB 282 (0x25475204) approx BBs exec'd 0 ====
+
+	0x25475204:  2F0A0001  cmpi cr6,r10,1
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475208:  394A0001  addi r10,r10,1
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0x1, t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x2547520C:  419A00BC  bc 12,26,0x254752C8
+	   9: Js26o       	$0x254752C8
+
+
+
+. 282 25475204 12
+. 2F 0A 00 01 39 4A 00 01 41 9A 00 BC
+==== BB 283 (0x25475210) approx BBs exec'd 0 ====
+
+	0x25475210:  7C035A14  add r0,r3,r11
+	   0: GETL       	R3, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475214:  280A0001  cmpli cr0,r10,1
+	   5: GETL       	R10, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25475218:  900B0000  stw r0,0(r11)
+	  10: GETL       	R0, t10
+	  11: GETL       	R11, t12
+	  12: STL       	t10, (t12)
+	  13: INCEIPL       	$4
+
+	0x2547521C:  7D6B1A14  add r11,r11,r3
+	  14: GETL       	R11, t14
+	  15: GETL       	R3, t16
+	  16: ADDL       	t14, t16
+	  17: PUTL       	t16, R11
+	  18: INCEIPL       	$4
+
+	0x25475220:  40A1FF9C  bc 5,1,0x254751BC
+	  19: Jc01o       	$0x254751BC
+
+
+
+. 283 25475210 20
+. 7C 03 5A 14 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+==== BB 284 (0x254751BC) approx BBs exec'd 0 ====
+
+	0x254751BC:  555B103A  rlwinm r27,r10,2,0,29
+	   0: GETL       	R10, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x254751C0:  7C863840  cmpl cr1,r6,r7
+	   4: GETL       	R6, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254751C4:  7F5B282E  lwzx r26,r27,r5
+	   9: GETL       	R5, t8
+	  10: GETL       	R27, t10
+	  11: ADDL       	t10, t8
+	  12: LDL       	(t8), t12
+	  13: PUTL       	t12, R26
+	  14: INCEIPL       	$4
+
+	0x254751C8:  917C0000  stw r11,0(r28)
+	  15: GETL       	R11, t14
+	  16: GETL       	R28, t16
+	  17: STL       	t14, (t16)
+	  18: INCEIPL       	$4
+
+	0x254751CC:  3B9C0004  addi r28,r28,4
+	  19: GETL       	R28, t18
+	  20: ADDL       	$0x4, t18
+	  21: PUTL       	t18, R28
+	  22: INCEIPL       	$4
+
+	0x254751D0:  7D9A4214  add r12,r26,r8
+	  23: GETL       	R26, t20
+	  24: GETL       	R8, t22
+	  25: ADDL       	t20, t22
+	  26: PUTL       	t22, R12
+	  27: INCEIPL       	$4
+
+	0x254751D4:  910B000C  stw r8,12(r11)
+	  28: GETL       	R8, t24
+	  29: GETL       	R11, t26
+	  30: ADDL       	$0xC, t26
+	  31: STL       	t24, (t26)
+	  32: INCEIPL       	$4
+
+	0x254751D8:  908B0004  stw r4,4(r11)
+	  33: GETL       	R4, t28
+	  34: GETL       	R11, t30
+	  35: ADDL       	$0x4, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0x254751DC:  390C0001  addi r8,r12,1
+	  38: GETL       	R12, t32
+	  39: ADDL       	$0x1, t32
+	  40: PUTL       	t32, R8
+	  41: INCEIPL       	$4
+
+	0x254751E0:  90CB0008  stw r6,8(r11)
+	  42: GETL       	R6, t34
+	  43: GETL       	R11, t36
+	  44: ADDL       	$0x8, t36
+	  45: STL       	t34, (t36)
+	  46: INCEIPL       	$4
+
+	0x254751E4:  934B0010  stw r26,16(r11)
+	  47: GETL       	R26, t38
+	  48: GETL       	R11, t40
+	  49: ADDL       	$0x10, t40
+	  50: STL       	t38, (t40)
+	  51: INCEIPL       	$4
+
+	0x254751E8:  4084001C  bc 4,4,0x25475204
+	  52: Jc04o       	$0x25475204
+
+
+
+. 284 254751BC 48
+. 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+==== BB 285 (0x254752C8) approx BBs exec'd 0 ====
+
+	0x254752C8:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254752CC:  4BFFFF48  b 0x25475214
+	   3: JMPo       	$0x25475214  ($4)
+
+
+
+. 285 254752C8 8
+. 38 00 00 00 4B FF FF 48
+==== BB 286 (0x25475214) approx BBs exec'd 0 ====
+
+	0x25475214:  280A0001  cmpli cr0,r10,1
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25475218:  900B0000  stw r0,0(r11)
+	   5: GETL       	R0, t6
+	   6: GETL       	R11, t8
+	   7: STL       	t6, (t8)
+	   8: INCEIPL       	$4
+
+	0x2547521C:  7D6B1A14  add r11,r11,r3
+	   9: GETL       	R11, t10
+	  10: GETL       	R3, t12
+	  11: ADDL       	t10, t12
+	  12: PUTL       	t12, R11
+	  13: INCEIPL       	$4
+
+	0x25475220:  40A1FF9C  bc 5,1,0x254751BC
+	  14: Jc01o       	$0x254751BC
+
+
+
+. 286 25475214 16
+. 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+==== BB 287 (0x25475224) approx BBs exec'd 0 ====
+
+	0x25475224:  83B70000  lwz r29,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x25475228:  38000009  li r0,9
+	   4: MOVL       	$0x9, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0x2547522C:  809E0158  lwz r4,344(r30)
+	   7: GETL       	R30, t6
+	   8: ADDL       	$0x158, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x25475230:  39200000  li r9,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25475234:  2F9D0000  cmpi cr7,r29,0
+	  15: GETL       	R29, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25475238:  913C0000  stw r9,0(r28)
+	  19: GETL       	R9, t16
+	  20: GETL       	R28, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0x2547523C:  90040000  stw r0,0(r4)
+	  23: GETL       	R0, t20
+	  24: GETL       	R4, t22
+	  25: STL       	t20, (t22)
+	  26: INCEIPL       	$4
+
+	0x25475240:  419E0028  bc 12,30,0x25475268
+	  27: Js30o       	$0x25475268
+
+
+
+. 287 25475224 32
+. 83 B7 00 00 38 00 00 09 80 9E 01 58 39 20 00 00 2F 9D 00 00 91 3C 00 00 90 04 00 00 41 9E 00 28
+==== BB 288 (0x25475244) approx BBs exec'd 0 ====
+
+	0x25475244:  839D0094  lwz r28,148(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25475248:  2C9C0000  cmpi cr1,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547524C:  408601BC  bc 4,6,0x25475408
+	   9: Jc06o       	$0x25475408
+
+
+
+. 288 25475244 12
+. 83 9D 00 94 2C 9C 00 00 40 86 01 BC
+==== BB 289 (0x25475250) approx BBs exec'd 0 ====
+
+	0x25475250:  817D005C  lwz r11,92(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x5C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25475254:  3800FFFF  li r0,-1
+	   5: MOVL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25475258:  901D01E0  stw r0,480(r29)
+	   8: GETL       	R0, t6
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0x1E0, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547525C:  2F0B0000  cmpi cr6,r11,0
+	  13: GETL       	R11, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25475260:  409A0084  bc 4,26,0x254752E4
+	  17: Jc26o       	$0x254752E4
+
+
+
+. 289 25475250 20
+. 81 7D 00 5C 38 00 FF FF 90 1D 01 E0 2F 0B 00 00 40 9A 00 84
+==== BB 290 (0x25475264) approx BBs exec'd 0 ====
+
+	0x25475264:  901D018C  stw r0,396(r29)
+	   0: GETL       	R0, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x18C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475268:  2C190000  cmpi cr0,r25,0
+	   5: GETL       	R25, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547526C:  41820010  bc 12,2,0x2547527C
+	   9: Js02o       	$0x2547527C
+
+
+
+. 290 25475264 12
+. 90 1D 01 8C 2C 19 00 00 41 82 00 10
+==== BB 291 (0x25475270) approx BBs exec'd 0 ====
+
+	0x25475270:  8BB90000  lbz r29,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x25475274:  2F9D0000  cmpi cr7,r29,0
+	   4: GETL       	R29, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x25475278:  409E0094  bc 4,30,0x2547530C
+	   8: Jc30o       	$0x2547530C
+
+
+
+. 291 25475270 12
+. 8B B9 00 00 2F 9D 00 00 40 9E 00 94
+==== BB 292 (0x2547530C) approx BBs exec'd 0 ====
+
+	0x2547530C:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475310:  7F3DCB78  or r29,r25,r25
+	   3: GETL       	R25, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x25475314:  4800DCBD  bl 0x25482FD0
+	   6: MOVL       	$0x25475318, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 292 2547530C 12
+. 7F 23 CB 78 7F 3D CB 78 48 00 DC BD
+==== BB 293 (0x25483008) approx BBs exec'd 0 ====
+
+	0x25483008:  3CC0FEFF  lis r6,-257
+	   0: MOVL       	$0xFEFF0000, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2548300C:  38C6FEFF  addi r6,r6,-257
+	   3: MOVL       	$0xFEFEFEFF, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x25483010:  419D001C  bc 12,29,0x2548302C
+	   6: Js29o       	$0x2548302C
+
+
+
+. 293 25483008 12
+. 3C C0 FE FF 38 C6 FE FF 41 9D 00 1C
+==== BB 294 (0x25483014) approx BBs exec'd 0 ====
+
+	0x25483014:  85040004  lwzu r8,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0x25483018:  7CE04038  and r0,r7,r8
+	   6: GETL       	R7, t4
+	   7: GETL       	R8, t6
+	   8: ANDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x2548301C:  7CEA4378  or r10,r7,r8
+	  11: GETL       	R7, t8
+	  12: GETL       	R8, t10
+	  13: ORL       	t10, t8
+	  14: PUTL       	t8, R10
+	  15: INCEIPL       	$4
+
+	0x25483020:  7C003A14  add r0,r0,r7
+	  16: GETL       	R0, t12
+	  17: GETL       	R7, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x25483024:  7D4800F9  nor. r8,r10,r0
+	  21: GETL       	R10, t16
+	  22: GETL       	R0, t18
+	  23: ORL       	t18, t16
+	  24: NOTL       	t16
+	  25: PUTL       	t16, R8
+	  26: CMP0L       	t16, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x25483028:  4082004C  bc 4,2,0x25483074
+	  29: Jc02o       	$0x25483074
+
+
+
+. 294 25483014 24
+. 85 04 00 04 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 48 00 F9 40 82 00 4C
+==== BB 295 (0x2548302C) approx BBs exec'd 0 ====
+
+	0x2548302C:  81040004  lwz r8,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25483030:  85240008  lwzu r9,8(r4)
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0x25483034:  7C064214  add r0,r6,r8
+	  11: GETL       	R6, t8
+	  12: GETL       	R8, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: INCEIPL       	$4
+
+	0x25483038:  7CEA40F8  nor r10,r7,r8
+	  16: GETL       	R7, t12
+	  17: GETL       	R8, t14
+	  18: ORL       	t14, t12
+	  19: NOTL       	t12
+	  20: PUTL       	t12, R10
+	  21: INCEIPL       	$4
+
+	0x2548303C:  7C005039  and. r0,r0,r10
+	  22: GETL       	R0, t16
+	  23: GETL       	R10, t18
+	  24: ANDL       	t16, t18
+	  25: PUTL       	t18, R0
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x25483040:  7D664A14  add r11,r6,r9
+	  29: GETL       	R6, t22
+	  30: GETL       	R9, t24
+	  31: ADDL       	t22, t24
+	  32: PUTL       	t24, R11
+	  33: INCEIPL       	$4
+
+	0x25483044:  7CEC48F8  nor r12,r7,r9
+	  34: GETL       	R7, t26
+	  35: GETL       	R9, t28
+	  36: ORL       	t28, t26
+	  37: NOTL       	t26
+	  38: PUTL       	t26, R12
+	  39: INCEIPL       	$4
+
+	0x25483048:  4082001C  bc 4,2,0x25483064
+	  40: Jc02o       	$0x25483064
+
+
+
+. 295 2548302C 32
+. 81 04 00 04 85 24 00 08 7C 06 42 14 7C EA 40 F8 7C 00 50 39 7D 66 4A 14 7C EC 48 F8 40 82 00 1C
+==== BB 296 (0x2548304C) approx BBs exec'd 0 ====
+
+	0x2548304C:  7D606039  and. r0,r11,r12
+	   0: GETL       	R11, t0
+	   1: GETL       	R12, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x25483050:  4182FFDC  bc 12,2,0x2548302C
+	   7: Js02o       	$0x2548302C
+
+
+
+. 296 2548304C 8
+. 7D 60 60 39 41 82 FF DC
+==== BB 297 (0x25483054) approx BBs exec'd 0 ====
+
+	0x25483054:  7CE04838  and r0,r7,r9
+	   0: GETL       	R7, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483058:  7C003A14  add r0,r0,r7
+	   5: GETL       	R0, t4
+	   6: GETL       	R7, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x2548305C:  7D880078  andc r8,r12,r0
+	  10: GETL       	R12, t8
+	  11: GETL       	R0, t10
+	  12: NOTL       	t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R8
+	  15: INCEIPL       	$4
+
+	0x25483060:  48000014  b 0x25483074
+	  16: JMPo       	$0x25483074  ($4)
+
+
+
+. 297 25483054 16
+. 7C E0 48 38 7C 00 3A 14 7D 88 00 78 48 00 00 14
+==== BB 298 (0x25475318) approx BBs exec'd 0 ====
+
+	0x25475318:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547531C:  3B63001F  addi r27,r3,31
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0x1F, t2
+	   5: PUTL       	t2, R27
+	   6: INCEIPL       	$4
+
+	0x25475320:  81810000  lwz r12,0(r1)
+	   7: GETL       	R1, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x25475324:  577A0036  rlwinm r26,r27,0,0,27
+	  11: GETL       	R27, t8
+	  12: ANDL       	$0xFFFFFFF0, t8
+	  13: PUTL       	t8, R26
+	  14: INCEIPL       	$4
+
+	0x25475328:  7C791B78  or r25,r3,r3
+	  15: GETL       	R3, t10
+	  16: PUTL       	t10, R25
+	  17: INCEIPL       	$4
+
+	0x2547532C:  7D7A00D0  neg r11,r26
+	  18: GETL       	R26, t12
+	  19: NEGL       	t12
+	  20: PUTL       	t12, R11
+	  21: INCEIPL       	$4
+
+	0x25475330:  38B90001  addi r5,r25,1
+	  22: GETL       	R25, t14
+	  23: ADDL       	$0x1, t14
+	  24: PUTL       	t14, R5
+	  25: INCEIPL       	$4
+
+	0x25475334:  7D81596E  stwux r12,r1,r11
+	  26: GETL       	R11, t16
+	  27: GETL       	R1, t18
+	  28: ADDL       	t18, t16
+	  29: PUTL       	t16, R1
+	  30: GETL       	R12, t20
+	  31: STL       	t20, (t16)
+	  32: INCEIPL       	$4
+
+	0x25475338:  39410017  addi r10,r1,23
+	  33: GETL       	R1, t22
+	  34: ADDL       	$0x17, t22
+	  35: PUTL       	t22, R10
+	  36: INCEIPL       	$4
+
+	0x2547533C:  55430036  rlwinm r3,r10,0,0,27
+	  37: GETL       	R10, t24
+	  38: ANDL       	$0xFFFFFFF0, t24
+	  39: PUTL       	t24, R3
+	  40: INCEIPL       	$4
+
+	0x25475340:  4800E8A1  bl 0x25483BE0
+	  41: MOVL       	$0x25475344, t26
+	  42: PUTL       	t26, LR
+	  43: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 298 25475318 44
+. 7F A4 EB 78 3B 63 00 1F 81 81 00 00 57 7A 00 36 7C 79 1B 78 7D 7A 00 D0 38 B9 00 01 7D 81 59 6E 39 41 00 17 55 43 00 36 48 00 E8 A1
+==== BB 299 (0x25483CB0) approx BBs exec'd 0 ====
+
+	0x25483CB0:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25483CB4:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25483CB8:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x25483CBC:  4800015D  bl 0x25483E18
+	  10: MOVL       	$0x25483CC0, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483E18  ($4)
+
+
+
+. 299 25483CB0 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 01 5D
+==== BB 300 _wordcopy_fwd_dest_aligned(0x25483E18) approx BBs exec'd 0 ====
+
+	0x25483E18:  54A007BE  rlwinm r0,r5,0,30,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25483E1C:  54881EF8  rlwinm r8,r4,3,27,28
+	   4: GETL       	R4, t2
+	   5: ROLL       	$0x3, t2
+	   6: ANDL       	$0x18, t2
+	   7: PUTL       	t2, R8
+	   8: INCEIPL       	$4
+
+	0x25483E20:  2F800001  cmpi cr7,r0,1
+	   9: GETL       	R0, t4
+	  10: MOVL       	$0x1, t8
+	  11: CMPL       	t4, t8, t6  (-rSo)
+	  12: ICRFL       	t6, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25483E24:  9421FFF0  stwu r1,-16(r1)
+	  14: GETL       	R1, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0xFFFFFFF0, t12
+	  17: PUTL       	t12, R1
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25483E28:  5484003A  rlwinm r4,r4,0,0,29
+	  20: GETL       	R4, t14
+	  21: ANDL       	$0xFFFFFFFC, t14
+	  22: PUTL       	t14, R4
+	  23: INCEIPL       	$4
+
+	0x25483E2C:  20E80020  subfic r7,r8,32
+	  24: GETL       	R8, t16
+	  25: MOVL       	$0x20, t18
+	  26: SBBL       	t16, t18  (-wCa)
+	  27: PUTL       	t18, R7
+	  28: INCEIPL       	$4
+
+	0x25483E30:  419E00B8  bc 12,30,0x25483EE8
+	  29: Js30o       	$0x25483EE8
+
+
+
+. 300 25483E18 28
+. 54 A0 07 BE 54 88 1E F8 2F 80 00 01 94 21 FF F0 54 84 00 3A 20 E8 00 20 41 9E 00 B8
+==== BB 301 (0x25483E34) approx BBs exec'd 0 ====
+
+	0x25483E34:  28000001  cmpli cr0,r0,1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25483E38:  4180008C  bc 12,0,0x25483EC4
+	   5: Js00o       	$0x25483EC4
+
+
+
+. 301 25483E34 8
+. 28 00 00 01 41 80 00 8C
+==== BB 302 (0x25483EC4) approx BBs exec'd 0 ====
+
+	0x25483EC4:  81440000  lwz r10,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x25483EC8:  3863FFFC  addi r3,r3,-4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483ECC:  85640004  lwzu r11,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: PUTL       	t6, R4
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0x25483ED0:  4BFFFF90  b 0x25483E60
+	  14: JMPo       	$0x25483E60  ($4)
+
+
+
+. 302 25483EC4 16
+. 81 44 00 00 38 63 FF FC 85 64 00 04 4B FF FF 90
+==== BB 303 (0x25483E60) approx BBs exec'd 0 ====
+
+	0x25483E60:  7D404030  slw r0,r10,r8
+	   0: GETL       	R10, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483E64:  7D6A3C30  srw r10,r11,r7
+	   5: GETL       	R11, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25483E68:  7C0C5378  or r12,r0,r10
+	  10: GETL       	R0, t8
+	  11: GETL       	R10, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0x25483E6C:  81440004  lwz r10,4(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R10
+	  19: INCEIPL       	$4
+
+	0x25483E70:  91830004  stw r12,4(r3)
+	  20: GETL       	R12, t16
+	  21: GETL       	R3, t18
+	  22: ADDL       	$0x4, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25483E74:  7D6C4030  slw r12,r11,r8
+	  25: GETL       	R11, t22
+	  26: GETL       	R8, t20
+	  27: SHLL       	t20, t22
+	  28: PUTL       	t22, R12
+	  29: INCEIPL       	$4
+
+	0x25483E78:  7D4B3C30  srw r11,r10,r7
+	  30: GETL       	R10, t26
+	  31: GETL       	R7, t24
+	  32: SHRL       	t24, t26
+	  33: PUTL       	t26, R11
+	  34: INCEIPL       	$4
+
+	0x25483E7C:  7D865B78  or r6,r12,r11
+	  35: GETL       	R12, t28
+	  36: GETL       	R11, t30
+	  37: ORL       	t30, t28
+	  38: PUTL       	t28, R6
+	  39: INCEIPL       	$4
+
+	0x25483E80:  81640008  lwz r11,8(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0x8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R11
+	  44: INCEIPL       	$4
+
+	0x25483E84:  90C30008  stw r6,8(r3)
+	  45: GETL       	R6, t36
+	  46: GETL       	R3, t38
+	  47: ADDL       	$0x8, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0x25483E88:  34A5FFFC  addic. r5,r5,-4
+	  50: GETL       	R5, t40
+	  51: ADCL       	$0xFFFFFFFC, t40  (-wCa)
+	  52: PUTL       	t40, R5
+	  53: CMP0L       	t40, t42  (-rSo)
+	  54: ICRFL       	t42, $0x0, CR
+	  55: INCEIPL       	$4
+
+	0x25483E8C:  7D464030  slw r6,r10,r8
+	  56: GETL       	R10, t46
+	  57: GETL       	R8, t44
+	  58: SHLL       	t44, t46
+	  59: PUTL       	t46, R6
+	  60: INCEIPL       	$4
+
+	0x25483E90:  7D693C30  srw r9,r11,r7
+	  61: GETL       	R11, t50
+	  62: GETL       	R7, t48
+	  63: SHRL       	t48, t50
+	  64: PUTL       	t50, R9
+	  65: INCEIPL       	$4
+
+	0x25483E94:  8144000C  lwz r10,12(r4)
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0xC, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R10
+	  70: INCEIPL       	$4
+
+	0x25483E98:  7CC04B78  or r0,r6,r9
+	  71: GETL       	R6, t56
+	  72: GETL       	R9, t58
+	  73: ORL       	t58, t56
+	  74: PUTL       	t56, R0
+	  75: INCEIPL       	$4
+
+	0x25483E9C:  38840010  addi r4,r4,16
+	  76: GETL       	R4, t60
+	  77: ADDL       	$0x10, t60
+	  78: PUTL       	t60, R4
+	  79: INCEIPL       	$4
+
+	0x25483EA0:  9003000C  stw r0,12(r3)
+	  80: GETL       	R0, t62
+	  81: GETL       	R3, t64
+	  82: ADDL       	$0xC, t64
+	  83: STL       	t62, (t64)
+	  84: INCEIPL       	$4
+
+	0x25483EA4:  38630010  addi r3,r3,16
+	  85: GETL       	R3, t66
+	  86: ADDL       	$0x10, t66
+	  87: PUTL       	t66, R3
+	  88: INCEIPL       	$4
+
+	0x25483EA8:  40A2FFA4  bc 5,2,0x25483E4C
+	  89: Jc02o       	$0x25483E4C
+
+
+
+. 303 25483E60 76
+. 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+==== BB 304 (0x25483E4C) approx BBs exec'd 0 ====
+
+	0x25483E4C:  7D664030  slw r6,r11,r8
+	   0: GETL       	R11, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25483E50:  7D493C30  srw r9,r10,r7
+	   5: GETL       	R10, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25483E54:  7CC04B78  or r0,r6,r9
+	  10: GETL       	R6, t8
+	  11: GETL       	R9, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R0
+	  14: INCEIPL       	$4
+
+	0x25483E58:  81640000  lwz r11,0(r4)
+	  15: GETL       	R4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0x25483E5C:  90030000  stw r0,0(r3)
+	  19: GETL       	R0, t16
+	  20: GETL       	R3, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0x25483E60:  7D404030  slw r0,r10,r8
+	  23: GETL       	R10, t22
+	  24: GETL       	R8, t20
+	  25: SHLL       	t20, t22
+	  26: PUTL       	t22, R0
+	  27: INCEIPL       	$4
+
+	0x25483E64:  7D6A3C30  srw r10,r11,r7
+	  28: GETL       	R11, t26
+	  29: GETL       	R7, t24
+	  30: SHRL       	t24, t26
+	  31: PUTL       	t26, R10
+	  32: INCEIPL       	$4
+
+	0x25483E68:  7C0C5378  or r12,r0,r10
+	  33: GETL       	R0, t28
+	  34: GETL       	R10, t30
+	  35: ORL       	t30, t28
+	  36: PUTL       	t28, R12
+	  37: INCEIPL       	$4
+
+	0x25483E6C:  81440004  lwz r10,4(r4)
+	  38: GETL       	R4, t32
+	  39: ADDL       	$0x4, t32
+	  40: LDL       	(t32), t34
+	  41: PUTL       	t34, R10
+	  42: INCEIPL       	$4
+
+	0x25483E70:  91830004  stw r12,4(r3)
+	  43: GETL       	R12, t36
+	  44: GETL       	R3, t38
+	  45: ADDL       	$0x4, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0x25483E74:  7D6C4030  slw r12,r11,r8
+	  48: GETL       	R11, t42
+	  49: GETL       	R8, t40
+	  50: SHLL       	t40, t42
+	  51: PUTL       	t42, R12
+	  52: INCEIPL       	$4
+
+	0x25483E78:  7D4B3C30  srw r11,r10,r7
+	  53: GETL       	R10, t46
+	  54: GETL       	R7, t44
+	  55: SHRL       	t44, t46
+	  56: PUTL       	t46, R11
+	  57: INCEIPL       	$4
+
+	0x25483E7C:  7D865B78  or r6,r12,r11
+	  58: GETL       	R12, t48
+	  59: GETL       	R11, t50
+	  60: ORL       	t50, t48
+	  61: PUTL       	t48, R6
+	  62: INCEIPL       	$4
+
+	0x25483E80:  81640008  lwz r11,8(r4)
+	  63: GETL       	R4, t52
+	  64: ADDL       	$0x8, t52
+	  65: LDL       	(t52), t54
+	  66: PUTL       	t54, R11
+	  67: INCEIPL       	$4
+
+	0x25483E84:  90C30008  stw r6,8(r3)
+	  68: GETL       	R6, t56
+	  69: GETL       	R3, t58
+	  70: ADDL       	$0x8, t58
+	  71: STL       	t56, (t58)
+	  72: INCEIPL       	$4
+
+	0x25483E88:  34A5FFFC  addic. r5,r5,-4
+	  73: GETL       	R5, t60
+	  74: ADCL       	$0xFFFFFFFC, t60  (-wCa)
+	  75: PUTL       	t60, R5
+	  76: CMP0L       	t60, t62  (-rSo)
+	  77: ICRFL       	t62, $0x0, CR
+	  78: INCEIPL       	$4
+
+	0x25483E8C:  7D464030  slw r6,r10,r8
+	  79: GETL       	R10, t66
+	  80: GETL       	R8, t64
+	  81: SHLL       	t64, t66
+	  82: PUTL       	t66, R6
+	  83: INCEIPL       	$4
+
+	0x25483E90:  7D693C30  srw r9,r11,r7
+	  84: GETL       	R11, t70
+	  85: GETL       	R7, t68
+	  86: SHRL       	t68, t70
+	  87: PUTL       	t70, R9
+	  88: INCEIPL       	$4
+
+	0x25483E94:  8144000C  lwz r10,12(r4)
+	  89: GETL       	R4, t72
+	  90: ADDL       	$0xC, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R10
+	  93: INCEIPL       	$4
+
+	0x25483E98:  7CC04B78  or r0,r6,r9
+	  94: GETL       	R6, t76
+	  95: GETL       	R9, t78
+	  96: ORL       	t78, t76
+	  97: PUTL       	t76, R0
+	  98: INCEIPL       	$4
+
+	0x25483E9C:  38840010  addi r4,r4,16
+	  99: GETL       	R4, t80
+	 100: ADDL       	$0x10, t80
+	 101: PUTL       	t80, R4
+	 102: INCEIPL       	$4
+
+	0x25483EA0:  9003000C  stw r0,12(r3)
+	 103: GETL       	R0, t82
+	 104: GETL       	R3, t84
+	 105: ADDL       	$0xC, t84
+	 106: STL       	t82, (t84)
+	 107: INCEIPL       	$4
+
+	0x25483EA4:  38630010  addi r3,r3,16
+	 108: GETL       	R3, t86
+	 109: ADDL       	$0x10, t86
+	 110: PUTL       	t86, R3
+	 111: INCEIPL       	$4
+
+	0x25483EA8:  40A2FFA4  bc 5,2,0x25483E4C
+	 112: Jc02o       	$0x25483E4C
+
+
+
+. 304 25483E4C 96
+. 7D 66 40 30 7D 49 3C 30 7C C0 4B 78 81 64 00 00 90 03 00 00 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+==== BB 305 (0x25483EAC) approx BBs exec'd 0 ====
+
+	0x25483EAC:  7D654030  slw r5,r11,r8
+	   0: GETL       	R11, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25483EB0:  7D483C30  srw r8,r10,r7
+	   5: GETL       	R10, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25483EB4:  7CA44378  or r4,r5,r8
+	  10: GETL       	R5, t8
+	  11: GETL       	R8, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0x25483EB8:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x10, t12
+	  17: PUTL       	t12, R1
+	  18: INCEIPL       	$4
+
+	0x25483EBC:  90830000  stw r4,0(r3)
+	  19: GETL       	R4, t14
+	  20: GETL       	R3, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25483EC0:  4E800020  blr
+	  23: GETL       	LR, t18
+	  24: JMPo-r       	t18  ($4)
+
+
+
+. 305 25483EAC 24
+. 7D 65 40 30 7D 48 3C 30 7C A4 43 78 38 21 00 10 90 83 00 00 4E 80 00 20
+==== BB 306 (0x25483CC0) approx BBs exec'd 0 ====
+
+	0x25483CC0:  4BFFFF98  b 0x25483C58
+	   0: JMPo       	$0x25483C58  ($4)
+
+
+
+. 306 25483CC0 4
+. 4B FF FF 98
+==== BB 307 (0x25483C74) approx BBs exec'd 0 ====
+
+	0x25483C74:  88FD0000  lbz r7,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25483C78:  3BBD0001  addi r29,r29,1
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25483C7C:  98FF0000  stb r7,0(r31)
+	   8: GETL       	R7, t6
+	   9: GETL       	R31, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25483C80:  3BFF0001  addi r31,r31,1
+	  12: GETL       	R31, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0x25483C84:  4200FFF0  bc 16,0,0x25483C74
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0x25483C88
+	  20: JMPo       	$0x25483C74  ($4)
+
+
+
+. 307 25483C74 20
+. 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+==== BB 308 (0x25475344) approx BBs exec'd 0 ====
+
+	0x25475344:  893D0000  lbz r9,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25475348:  7C7C1B78  or r28,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R28
+	   6: INCEIPL       	$4
+
+	0x2547534C:  38600001  li r3,1
+	   7: MOVL       	$0x1, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25475350:  48000018  b 0x25475368
+	  10: JMPo       	$0x25475368  ($4)
+
+
+
+. 308 25475344 16
+. 89 3D 00 00 7C 7C 1B 78 38 60 00 01 48 00 00 18
+==== BB 309 (0x25475368) approx BBs exec'd 0 ====
+
+	0x25475368:  2C890000  cmpi cr1,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547536C:  4086FFE8  bc 4,6,0x25475354
+	   4: Jc06o       	$0x25475354
+
+
+
+. 309 25475368 8
+. 2C 89 00 00 40 86 FF E8
+==== BB 310 (0x25475354) approx BBs exec'd 0 ====
+
+	0x25475354:  3809FFC6  addi r0,r9,-58
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xFFFFFFC6, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25475358:  8D3D0001  lbzu r9,1(r29)
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x1, t2
+	   6: PUTL       	t2, R29
+	   7: LDB       	(t2), t4
+	   8: PUTL       	t4, R9
+	   9: INCEIPL       	$4
+
+	0x2547535C:  20800001  subfic r4,r0,1
+	  10: GETL       	R0, t6
+	  11: MOVL       	$0x1, t8
+	  12: SBBL       	t6, t8  (-wCa)
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0x25475360:  7C830194  addze r4,r3
+	  15: GETL       	R3, t10
+	  16: ADCL       	$0x0, t10  (-rCa-wCa)
+	  17: PUTL       	t10, R4
+	  18: INCEIPL       	$4
+
+	0x25475364:  7C832378  or r3,r4,r4
+	  19: GETL       	R4, t12
+	  20: PUTL       	t12, R3
+	  21: INCEIPL       	$4
+
+	0x25475368:  2C890000  cmpi cr1,r9,0
+	  22: GETL       	R9, t14
+	  23: CMP0L       	t14, t16  (-rSo)
+	  24: ICRFL       	t16, $0x1, CR
+	  25: INCEIPL       	$4
+
+	0x2547536C:  4086FFE8  bc 4,6,0x25475354
+	  26: Jc06o       	$0x25475354
+
+
+
+. 310 25475354 28
+. 38 09 FF C6 8D 3D 00 01 20 80 00 01 7C 83 01 94 7C 83 23 78 2C 89 00 00 40 86 FF E8
+==== BB 311 (0x25475370) approx BBs exec'd 0 ====
+
+	0x25475370:  5469103A  rlwinm r9,r3,2,0,29
+	   0: GETL       	R3, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25475374:  83BE0168  lwz r29,360(r30)
+	   4: GETL       	R30, t2
+	   5: ADDL       	$0x168, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x25475378:  38690004  addi r3,r9,4
+	   9: GETL       	R9, t6
+	  10: ADDL       	$0x4, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x2547537C:  480226A9  bl 0x25497A24
+	  13: MOVL       	$0x25475380, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 311 25475370 16
+. 54 69 10 3A 83 BE 01 68 38 69 00 04 48 02 26 A9
+==== BB 312 (0x25475380) approx BBs exec'd 0 ====
+
+	0x25475380:  80DE0188  lwz r6,392(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x188, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25475384:  2F030000  cmpi cr6,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475388:  7C641B78  or r4,r3,r3
+	   9: GETL       	R3, t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x2547538C:  907D0000  stw r3,0(r29)
+	  12: GETL       	R3, t10
+	  13: GETL       	R29, t12
+	  14: STL       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0x25475390:  41BAFF44  bc 13,26,0x254752D4
+	  16: Js26o       	$0x254752D4
+
+
+
+. 312 25475380 20
+. 80 DE 01 88 2F 03 00 00 7C 64 1B 78 90 7D 00 00 41 BA FF 44
+==== BB 313 (0x25475394) approx BBs exec'd 0 ====
+
+	0x25475394:  807E04B4  lwz r3,1204(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4B4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475398:  39000000  li r8,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x2547539C:  80BE01A0  lwz r5,416(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1A0, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x254753A0:  80C30000  lwz r6,0(r3)
+	  13: GETL       	R3, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R6
+	  16: INCEIPL       	$4
+
+	0x254753A4:  7F83E378  or r3,r28,r28
+	  17: GETL       	R28, t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0x254753A8:  80FE01A4  lwz r7,420(r30)
+	  20: GETL       	R30, t16
+	  21: ADDL       	$0x1A4, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R7
+	  24: INCEIPL       	$4
+
+	0x254753AC:  4BFFF79D  bl 0x25474B48
+	  25: MOVL       	$0x254753B0, t20
+	  26: PUTL       	t20, LR
+	  27: JMPo-c       	$0x25474B48  ($4)
+
+
+
+. 313 25475394 28
+. 80 7E 04 B4 39 00 00 00 80 BE 01 A0 80 C3 00 00 7F 83 E3 78 80 FE 01 A4 4B FF F7 9D
+==== BB 314 fillin_rpath(0x25474B48) approx BBs exec'd 0 ====
+
+	0x25474B48:  9421FFA0  stwu r1,-96(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFA0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25474B4C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25474B50:  480224B1  bl 0x25497000
+	   9: MOVL       	$0x25474B54, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 314 25474B48 12
+. 94 21 FF A0 7C 08 02 A6 48 02 24 B1
+==== BB 315 (0x25474B54) approx BBs exec'd 0 ====
+
+	0x25474B54:  93C10058  stw r30,88(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25474B58:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25474B5C:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x25474B60:  91E1001C  stw r15,28(r1)
+	  11: GETL       	R15, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x1C, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25474B64:  2D880000  cmpi cr3,r8,0
+	  16: GETL       	R8, t12
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x3, CR
+	  19: INCEIPL       	$4
+
+	0x25474B68:  92010020  stw r16,32(r1)
+	  20: GETL       	R16, t16
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x20, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25474B6C:  92210024  stw r17,36(r1)
+	  25: GETL       	R17, t20
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x24, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25474B70:  92410028  stw r18,40(r1)
+	  30: GETL       	R18, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x28, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25474B74:  81FE0148  lwz r15,328(r30)
+	  35: GETL       	R30, t28
+	  36: ADDL       	$0x148, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R15
+	  39: INCEIPL       	$4
+
+	0x25474B78:  823E04C8  lwz r17,1224(r30)
+	  40: GETL       	R30, t32
+	  41: ADDL       	$0x4C8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R17
+	  44: INCEIPL       	$4
+
+	0x25474B7C:  821E0154  lwz r16,340(r30)
+	  45: GETL       	R30, t36
+	  46: ADDL       	$0x154, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R16
+	  49: INCEIPL       	$4
+
+	0x25474B80:  825E0158  lwz r18,344(r30)
+	  50: GETL       	R30, t40
+	  51: ADDL       	$0x158, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R18
+	  54: INCEIPL       	$4
+
+	0x25474B84:  9261002C  stw r19,44(r1)
+	  55: GETL       	R19, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x2C, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25474B88:  7CF33B78  or r19,r7,r7
+	  60: GETL       	R7, t48
+	  61: PUTL       	t48, R19
+	  62: INCEIPL       	$4
+
+	0x25474B8C:  92810030  stw r20,48(r1)
+	  63: GETL       	R20, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x30, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x25474B90:  7CD43378  or r20,r6,r6
+	  68: GETL       	R6, t54
+	  69: PUTL       	t54, R20
+	  70: INCEIPL       	$4
+
+	0x25474B94:  92A10034  stw r21,52(r1)
+	  71: GETL       	R21, t56
+	  72: GETL       	R1, t58
+	  73: ADDL       	$0x34, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0x25474B98:  7CB52B78  or r21,r5,r5
+	  76: GETL       	R5, t60
+	  77: PUTL       	t60, R21
+	  78: INCEIPL       	$4
+
+	0x25474B9C:  92C10038  stw r22,56(r1)
+	  79: GETL       	R22, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x38, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25474BA0:  7C962378  or r22,r4,r4
+	  84: GETL       	R4, t66
+	  85: PUTL       	t66, R22
+	  86: INCEIPL       	$4
+
+	0x25474BA4:  92E1003C  stw r23,60(r1)
+	  87: GETL       	R23, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x3C, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x25474BA8:  7D174378  or r23,r8,r8
+	  92: GETL       	R8, t72
+	  93: PUTL       	t72, R23
+	  94: INCEIPL       	$4
+
+	0x25474BAC:  93010040  stw r24,64(r1)
+	  95: GETL       	R24, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x40, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x25474BB0:  3B000000  li r24,0
+	 100: MOVL       	$0x0, t78
+	 101: PUTL       	t78, R24
+	 102: INCEIPL       	$4
+
+	0x25474BB4:  93210044  stw r25,68(r1)
+	 103: GETL       	R25, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x44, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x25474BB8:  93410048  stw r26,72(r1)
+	 108: GETL       	R26, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0x48, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25474BBC:  9361004C  stw r27,76(r1)
+	 113: GETL       	R27, t88
+	 114: GETL       	R1, t90
+	 115: ADDL       	$0x4C, t90
+	 116: STL       	t88, (t90)
+	 117: INCEIPL       	$4
+
+	0x25474BC0:  93810050  stw r28,80(r1)
+	 118: GETL       	R28, t92
+	 119: GETL       	R1, t94
+	 120: ADDL       	$0x50, t94
+	 121: STL       	t92, (t94)
+	 122: INCEIPL       	$4
+
+	0x25474BC4:  93A10054  stw r29,84(r1)
+	 123: GETL       	R29, t96
+	 124: GETL       	R1, t98
+	 125: ADDL       	$0x54, t98
+	 126: STL       	t96, (t98)
+	 127: INCEIPL       	$4
+
+	0x25474BC8:  93E1005C  stw r31,92(r1)
+	 128: GETL       	R31, t100
+	 129: GETL       	R1, t102
+	 130: ADDL       	$0x5C, t102
+	 131: STL       	t100, (t102)
+	 132: INCEIPL       	$4
+
+	0x25474BCC:  90010064  stw r0,100(r1)
+	 133: GETL       	R0, t104
+	 134: GETL       	R1, t106
+	 135: ADDL       	$0x64, t106
+	 136: STL       	t104, (t106)
+	 137: INCEIPL       	$4
+
+	0x25474BD0:  91810018  stw r12,24(r1)
+	 138: GETL       	R12, t108
+	 139: GETL       	R1, t110
+	 140: ADDL       	$0x18, t110
+	 141: STL       	t108, (t110)
+	 142: INCEIPL       	$4
+
+	0x25474BD4:  90610008  stw r3,8(r1)
+	 143: GETL       	R3, t112
+	 144: GETL       	R1, t114
+	 145: ADDL       	$0x8, t114
+	 146: STL       	t112, (t114)
+	 147: INCEIPL       	$4
+
+	0x25474BD8:  38610008  addi r3,r1,8
+	 148: GETL       	R1, t116
+	 149: ADDL       	$0x8, t116
+	 150: PUTL       	t116, R3
+	 151: INCEIPL       	$4
+
+	0x25474BDC:  7EA4AB78  or r4,r21,r21
+	 152: GETL       	R21, t118
+	 153: PUTL       	t118, R4
+	 154: INCEIPL       	$4
+
+	0x25474BE0:  4800B75D  bl 0x2548033C
+	 155: MOVL       	$0x25474BE4, t120
+	 156: PUTL       	t120, LR
+	 157: JMPo-c       	$0x2548033C  ($4)
+
+
+
+. 315 25474B54 144
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 E1 00 1C 2D 88 00 00 92 01 00 20 92 21 00 24 92 41 00 28 81 FE 01 48 82 3E 04 C8 82 1E 01 54 82 5E 01 58 92 61 00 2C 7C F3 3B 78 92 81 00 30 7C D4 33 78 92 A1 00 34 7C B5 2B 78 92 C1 00 38 7C 96 23 78 92 E1 00 3C 7D 17 43 78 93 01 00 40 3B 00 00 00 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 93 E1 00 5C 90 01 00 64 91 81 00 18 90 61 00 08 38 61 00 08 7E A4 AB 78 48 00 B7 5D
+==== BB 316 __strsep_g(0x2548033C) approx BBs exec'd 0 ====
+
+	0x2548033C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25480340:  7C681B78  or r8,r3,r3
+	   6: GETL       	R3, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x25480344:  80630000  lwz r3,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x25480348:  2F830000  cmpi cr7,r3,0
+	  13: GETL       	R3, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x2548034C:  419E0054  bc 12,30,0x254803A0
+	  17: Js30o       	$0x254803A0
+
+
+
+. 316 2548033C 20
+. 94 21 FF F0 7C 68 1B 78 80 63 00 00 2F 83 00 00 41 9E 00 54
+==== BB 317 (0x25480350) approx BBs exec'd 0 ====
+
+	0x25480350:  88030000  lbz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25480354:  7C6A1B78  or r10,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0x25480358:  39200000  li r9,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2548035C:  2C000000  cmpi cr0,r0,0
+	  10: GETL       	R0, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x25480360:  4182003C  bc 12,2,0x2548039C
+	  14: Js02o       	$0x2548039C
+
+
+
+. 317 25480350 20
+. 88 03 00 00 7C 6A 1B 78 39 20 00 00 2C 00 00 00 41 82 00 3C
+==== BB 318 (0x25480364) approx BBs exec'd 0 ====
+
+	0x25480364:  7C0B0378  or r11,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x25480368:  88040000  lbz r0,0(r4)
+	   3: GETL       	R4, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0x2548036C:  7C892378  or r9,r4,r4
+	   7: GETL       	R4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25480370:  7C805800  cmp cr1,r0,r11
+	  10: GETL       	R0, t8
+	  11: GETL       	R11, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x25480374:  41860010  bc 12,6,0x25480384
+	  15: Js06o       	$0x25480384
+
+
+
+. 318 25480364 20
+. 7C 0B 03 78 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+==== BB 319 (0x25480378) approx BBs exec'd 0 ====
+
+	0x25480378:  8C090001  lbzu r0,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x2548037C:  2F000000  cmpi cr6,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25480380:  409AFFF0  bc 4,26,0x25480370
+	  10: Jc26o       	$0x25480370
+
+
+
+. 319 25480378 12
+. 8C 09 00 01 2F 00 00 00 40 9A FF F0
+==== BB 320 (0x25480370) approx BBs exec'd 0 ====
+
+	0x25480370:  7C805800  cmp cr1,r0,r11
+	   0: GETL       	R0, t0
+	   1: GETL       	R11, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25480374:  41860010  bc 12,6,0x25480384
+	   5: Js06o       	$0x25480384
+
+
+
+. 320 25480370 8
+. 7C 80 58 00 41 86 00 10
+==== BB 321 (0x25480384) approx BBs exec'd 0 ====
+
+	0x25480384:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25480388:  409E0020  bc 4,30,0x254803A8
+	   4: Jc30o       	$0x254803A8
+
+
+
+. 321 25480384 8
+. 2F 80 00 00 40 9E 00 20
+==== BB 322 (0x2548038C) approx BBs exec'd 0 ====
+
+	0x2548038C:  8D6A0001  lbzu r11,1(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R10
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x25480390:  2C0B0000  cmpi cr0,r11,0
+	   6: GETL       	R11, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25480394:  4082FFD4  bc 4,2,0x25480368
+	  10: Jc02o       	$0x25480368
+
+
+
+. 322 2548038C 12
+. 8D 6A 00 01 2C 0B 00 00 40 82 FF D4
+==== BB 323 (0x25480368) approx BBs exec'd 0 ====
+
+	0x25480368:  88040000  lbz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2548036C:  7C892378  or r9,r4,r4
+	   4: GETL       	R4, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x25480370:  7C805800  cmp cr1,r0,r11
+	   7: GETL       	R0, t6
+	   8: GETL       	R11, t8
+	   9: CMPL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25480374:  41860010  bc 12,6,0x25480384
+	  12: Js06o       	$0x25480384
+
+
+
+. 323 25480368 16
+. 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+==== BB 324 (0x25480398) approx BBs exec'd 0 ====
+
+	0x25480398:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x2548039C:  91280000  stw r9,0(r8)
+	   3: GETL       	R9, t2
+	   4: GETL       	R8, t4
+	   5: STL       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x254803A0:  38210010  addi r1,r1,16
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x10, t6
+	   9: PUTL       	t6, R1
+	  10: INCEIPL       	$4
+
+	0x254803A4:  4E800020  blr
+	  11: GETL       	LR, t8
+	  12: JMPo-r       	t8  ($4)
+
+
+
+. 324 25480398 16
+. 39 20 00 00 91 28 00 00 38 21 00 10 4E 80 00 20
+==== BB 325 (0x25474BE4) approx BBs exec'd 0 ====
+
+	0x25474BE4:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25474BE8:  7C7F1B78  or r31,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R31
+	   6: INCEIPL       	$4
+
+	0x25474BEC:  7C7C1B78  or r28,r3,r3
+	   7: GETL       	R3, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25474BF0:  4192014C  bc 12,18,0x25474D3C
+	  10: Js18o       	$0x25474D3C
+
+
+
+. 325 25474BE4 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 41 92 01 4C
+==== BB 326 (0x25474BF4) approx BBs exec'd 0 ====
+
+	0x25474BF4:  4800E3DD  bl 0x25482FD0
+	   0: MOVL       	$0x25474BF8, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 326 25474BF4 4
+. 48 00 E3 DD
+==== BB 327 (0x25483064) approx BBs exec'd 0 ====
+
+	0x25483064:  7CE04038  and r0,r7,r8
+	   0: GETL       	R7, t0
+	   1: GETL       	R8, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483068:  3884FFFC  addi r4,r4,-4
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0xFFFFFFFC, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2548306C:  7C003A14  add r0,r0,r7
+	   9: GETL       	R0, t6
+	  10: GETL       	R7, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x25483070:  7D480078  andc r8,r10,r0
+	  14: GETL       	R10, t10
+	  15: GETL       	R0, t12
+	  16: NOTL       	t12
+	  17: ANDL       	t10, t12
+	  18: PUTL       	t12, R8
+	  19: INCEIPL       	$4
+
+	0x25483074:  7D0B0034  cntlzw r11,r8
+	  20: GETL       	R8, t14
+	  21: CNTLZL       	t14
+	  22: PUTL       	t14, R11
+	  23: INCEIPL       	$4
+
+	0x25483078:  7C032050  subf r0,r3,r4
+	  24: GETL       	R3, t16
+	  25: GETL       	R4, t18
+	  26: SUBL       	t16, t18
+	  27: PUTL       	t18, R0
+	  28: INCEIPL       	$4
+
+	0x2548307C:  556BE8FE  rlwinm r11,r11,29,3,31
+	  29: GETL       	R11, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R11
+	  32: INCEIPL       	$4
+
+	0x25483080:  7C605A14  add r3,r0,r11
+	  33: GETL       	R0, t22
+	  34: GETL       	R11, t24
+	  35: ADDL       	t22, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0x25483084:  4E800020  blr
+	  38: GETL       	LR, t26
+	  39: JMPo-r       	t26  ($4)
+
+
+
+. 327 25483064 36
+. 7C E0 40 38 38 84 FF FC 7C 00 3A 14 7D 48 00 78 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+==== BB 328 (0x25474BF8) approx BBs exec'd 0 ====
+
+	0x25474BF8:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25474BFC:  408201A8  bc 4,2,0x25474DA4
+	   5: Jc02o       	$0x25474DA4
+
+
+
+. 328 25474BF8 8
+. 7C 7D 1B 79 40 82 01 A8
+==== BB 329 (0x25474DA4) approx BBs exec'd 0 ====
+
+	0x25474DA4:  2B9D0001  cmpli cr7,r29,1
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25474DA8:  409D0024  bc 4,29,0x25474DCC
+	   5: Jc29o       	$0x25474DCC
+
+
+
+. 329 25474DA4 8
+. 2B 9D 00 01 40 9D 00 24
+==== BB 330 (0x25474DAC) approx BBs exec'd 0 ====
+
+	0x25474DAC:  7D3FEA14  add r9,r31,r29
+	   0: GETL       	R31, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25474DB0:  8869FFFF  lbz r3,-1(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: LDB       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25474DB4:  2C83002F  cmpi cr1,r3,47
+	  10: GETL       	R3, t8
+	  11: MOVL       	$0x2F, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x25474DB8:  40860014  bc 4,6,0x25474DCC
+	  15: Jc06o       	$0x25474DCC
+
+
+
+. 330 25474DAC 16
+. 7D 3F EA 14 88 69 FF FF 2C 83 00 2F 40 86 00 14
+==== BB 331 (0x25474DCC) approx BBs exec'd 0 ====
+
+	0x25474DCC:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25474DD0:  41BAFE34  bc 13,26,0x25474C04
+	   4: Js26o       	$0x25474C04
+
+
+
+. 331 25474DCC 8
+. 2F 1D 00 00 41 BA FE 34
+==== BB 332 (0x25474DD4) approx BBs exec'd 0 ====
+
+	0x25474DD4:  7D3CEA14  add r9,r28,r29
+	   0: GETL       	R28, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25474DD8:  8889FFFF  lbz r4,-1(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: LDB       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25474DDC:  2C04002F  cmpi cr0,r4,47
+	  10: GETL       	R4, t8
+	  11: MOVL       	$0x2F, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25474DE0:  41A2FE24  bc 13,2,0x25474C04
+	  15: Js02o       	$0x25474C04
+
+
+
+. 332 25474DD4 16
+. 7D 3C EA 14 88 89 FF FF 2C 04 00 2F 41 A2 FE 24
+==== BB 333 (0x25474DE4) approx BBs exec'd 0 ====
+
+	0x25474DE4:  2F940000  cmpi cr7,r20,0
+	   0: GETL       	R20, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25474DE8:  38A0002F  li r5,47
+	   4: MOVL       	$0x2F, t4
+	   5: PUTL       	t4, R5
+	   6: INCEIPL       	$4
+
+	0x25474DEC:  7CBCE9AE  stbx r5,r28,r29
+	   7: GETL       	R29, t6
+	   8: GETL       	R28, t8
+	   9: ADDL       	t8, t6
+	  10: GETL       	R5, t10
+	  11: STB       	t10, (t6)
+	  12: INCEIPL       	$4
+
+	0x25474DF0:  3BBD0001  addi r29,r29,1
+	  13: GETL       	R29, t12
+	  14: ADDL       	$0x1, t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0x25474DF4:  419EFE18  bc 12,30,0x25474C0C
+	  17: Js30o       	$0x25474C0C
+
+
+
+. 333 25474DE4 20
+. 2F 94 00 00 38 A0 00 2F 7C BC E9 AE 3B BD 00 01 41 9E FE 18
+==== BB 334 (0x25474C0C) approx BBs exec'd 0 ====
+
+	0x25474C0C:  83F101B0  lwz r31,432(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x1B0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25474C10:  7E398B78  or r25,r17,r17
+	   5: GETL       	R17, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0x25474C14:  2E1F0000  cmpi cr4,r31,0
+	   8: GETL       	R31, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x4, CR
+	  11: INCEIPL       	$4
+
+	0x25474C18:  41920024  bc 12,18,0x25474C3C
+	  12: Js18o       	$0x25474C3C
+
+
+
+. 334 25474C0C 16
+. 83 F1 01 B0 7E 39 8B 78 2E 1F 00 00 41 92 00 24
+==== BB 335 (0x25474C1C) approx BBs exec'd 0 ====
+
+	0x25474C1C:  815F0010  lwz r10,16(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25474C20:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25474C24:  7FA5EB78  or r5,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25474C28:  7F8AE800  cmp cr7,r10,r29
+	  11: GETL       	R10, t8
+	  12: GETL       	R29, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0x25474C2C:  419E0240  bc 12,30,0x25474E6C
+	  16: Js30o       	$0x25474E6C
+
+
+
+. 335 25474C1C 20
+. 81 5F 00 10 7F 83 E3 78 7F A5 EB 78 7F 8A E8 00 41 9E 02 40
+==== BB 336 (0x25474C30) approx BBs exec'd 0 ====
+
+	0x25474C30:  83FF0000  lwz r31,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0x25474C34:  2E1F0000  cmpi cr4,r31,0
+	   4: GETL       	R31, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x25474C38:  4092FFE4  bc 4,18,0x25474C1C
+	   8: Jc18o       	$0x25474C1C
+
+
+
+. 336 25474C30 12
+. 83 FF 00 00 2E 1F 00 00 40 92 FF E4
+==== BB 337 (0x25474C3C) approx BBs exec'd 0 ====
+
+	0x25474C3C:  418E0284  bc 12,14,0x25474EC0
+	   0: Js14o       	$0x25474EC0
+
+
+
+. 337 25474C3C 4
+. 41 8E 02 84
+==== BB 338 (0x25474EC0) approx BBs exec'd 0 ====
+
+	0x25474EC0:  80700000  lwz r3,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25474EC4:  3B600000  li r27,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R27
+	   6: INCEIPL       	$4
+
+	0x25474EC8:  7E1A8378  or r26,r16,r16
+	   7: GETL       	R16, t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25474ECC:  5460103A  rlwinm r0,r3,2,0,29
+	  10: GETL       	R3, t8
+	  11: SHLL       	$0x2, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x25474ED0:  7D80DA14  add r12,r0,r27
+	  14: GETL       	R0, t10
+	  15: GETL       	R27, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0x25474ED4:  7FECEA14  add r31,r12,r29
+	  19: GETL       	R12, t14
+	  20: GETL       	R29, t16
+	  21: ADDL       	t14, t16
+	  22: PUTL       	t16, R31
+	  23: INCEIPL       	$4
+
+	0x25474ED8:  387F0015  addi r3,r31,21
+	  24: GETL       	R31, t18
+	  25: ADDL       	$0x15, t18
+	  26: PUTL       	t18, R3
+	  27: INCEIPL       	$4
+
+	0x25474EDC:  48022B49  bl 0x25497A24
+	  28: MOVL       	$0x25474EE0, t20
+	  29: PUTL       	t20, LR
+	  30: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 338 25474EC0 32
+. 80 70 00 00 3B 60 00 00 7E 1A 83 78 54 60 10 3A 7D 80 DA 14 7F EC EA 14 38 7F 00 15 48 02 2B 49
+==== BB 339 (0x25474EE0) approx BBs exec'd 0 ====
+
+	0x25474EE0:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25474EE4:  4082FD8C  bc 4,2,0x25474C70
+	   5: Jc02o       	$0x25474C70
+
+
+
+. 339 25474EE0 8
+. 7C 7F 1B 79 40 82 FD 8C
+==== BB 340 (0x25474C70) approx BBs exec'd 0 ====
+
+	0x25474C70:  80F00000  lwz r7,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25474C74:  7F84E378  or r4,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0x25474C78:  54E6103A  rlwinm r6,r7,2,0,29
+	   7: GETL       	R7, t6
+	   8: SHLL       	$0x2, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x25474C7C:  7CA6FA14  add r5,r6,r31
+	  11: GETL       	R6, t8
+	  12: GETL       	R31, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25474C80:  38650014  addi r3,r5,20
+	  16: GETL       	R5, t12
+	  17: ADDL       	$0x14, t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x25474C84:  7FA5EB78  or r5,r29,r29
+	  20: GETL       	R29, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x25474C88:  907F000C  stw r3,12(r31)
+	  23: GETL       	R3, t16
+	  24: GETL       	R31, t18
+	  25: ADDL       	$0xC, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x25474C8C:  4800ED95  bl 0x25483A20
+	  28: MOVL       	$0x25474C90, t20
+	  29: PUTL       	t20, LR
+	  30: JMPo-c       	$0x25483A20  ($4)
+
+
+
+. 340 25474C70 32
+. 80 F0 00 00 7F 84 E3 78 54 E6 10 3A 7C A6 FA 14 38 65 00 14 7F A5 EB 78 90 7F 00 0C 48 00 ED 95
+==== BB 341 (0x25483A4C) approx BBs exec'd 0 ====
+
+	0x25483A4C:  7C8300D0  neg r4,r3
+	   0: GETL       	R3, t0
+	   1: NEGL       	t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25483A50:  548307BE  rlwinm r3,r4,0,30,31
+	   4: GETL       	R4, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25483A54:  7C691B79  or. r9,r3,r3
+	   8: GETL       	R3, t4
+	   9: PUTL       	t4, R9
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25483A58:  7F832850  subf r28,r3,r5
+	  13: GETL       	R3, t8
+	  14: GETL       	R5, t10
+	  15: SUBL       	t8, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0x25483A5C:  4182001C  bc 12,2,0x25483A78
+	  18: Js02o       	$0x25483A78
+
+
+
+. 341 25483A4C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+==== BB 342 (0x25483A78) approx BBs exec'd 0 ====
+
+	0x25483A78:  73E00003  andi. r0,r31,0x3
+	   0: GETL       	R31, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483A7C:  40820068  bc 4,2,0x25483AE4
+	   6: Jc02o       	$0x25483AE4
+
+
+
+. 342 25483A78 8
+. 73 E0 00 03 40 82 00 68
+==== BB 343 (0x25483A80) approx BBs exec'd 0 ====
+
+	0x25483A80:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25483A84:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25483A88:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x25483A8C:  48000239  bl 0x25483CC4
+	  10: MOVL       	$0x25483A90, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483CC4  ($4)
+
+
+
+. 343 25483A80 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 02 39
+==== BB 344 (0x25483DA8) approx BBs exec'd 0 ====
+
+	0x25483DA8:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25483DAC:  3884FFF0  addi r4,r4,-16
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25483DB0:  81240014  lwz r9,20(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x25483DB4:  3863FFEC  addi r3,r3,-20
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFEC, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x25483DB8:  38A50004  addi r5,r5,4
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x4, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0x25483DBC:  90030014  stw r0,20(r3)
+	  21: GETL       	R0, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x14, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x25483DC0:  4BFFFF7C  b 0x25483D3C
+	  26: JMPo       	$0x25483D3C  ($4)
+
+
+
+. 344 25483DA8 28
+. 80 04 00 00 38 84 FF F0 81 24 00 14 38 63 FF EC 38 A5 00 04 90 03 00 14 4B FF FF 7C
+==== BB 345 (0x25483A90) approx BBs exec'd 0 ====
+
+	0x25483A90:  5786003A  rlwinm r6,r28,0,0,29
+	   0: GETL       	R28, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25483A94:  578507BE  rlwinm r5,r28,0,30,31
+	   4: GETL       	R28, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x25483A98:  7FBD3214  add r29,r29,r6
+	   8: GETL       	R29, t4
+	   9: GETL       	R6, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R29
+	  12: INCEIPL       	$4
+
+	0x25483A9C:  7FFF3214  add r31,r31,r6
+	  13: GETL       	R31, t8
+	  14: GETL       	R6, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R31
+	  17: INCEIPL       	$4
+
+	0x25483AA0:  2C850000  cmpi cr1,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0x25483AA4:  4186001C  bc 12,6,0x25483AC0
+	  22: Js06o       	$0x25483AC0
+
+
+
+. 345 25483A90 24
+. 57 86 00 3A 57 85 07 BE 7F BD 32 14 7F FF 32 14 2C 85 00 00 41 86 00 1C
+==== BB 346 (0x25474C90) approx BBs exec'd 0 ====
+
+	0x25474C90:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25474C94:  99230000  stb r9,0(r3)
+	   3: GETL       	R9, t2
+	   4: GETL       	R3, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x25474C98:  80920000  lwz r4,0(r18)
+	   7: GETL       	R18, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R4
+	  10: INCEIPL       	$4
+
+	0x25474C9C:  93BF0010  stw r29,16(r31)
+	  11: GETL       	R29, t10
+	  12: GETL       	R31, t12
+	  13: ADDL       	$0x10, t12
+	  14: STL       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0x25474CA0:  7F9D2040  cmpl cr7,r29,r4
+	  16: GETL       	R29, t14
+	  17: GETL       	R4, t16
+	  18: CMPUL       	t14, t16, t18  (-rSo)
+	  19: ICRFL       	t18, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x25474CA4:  409D0008  bc 4,29,0x25474CAC
+	  21: Jc29o       	$0x25474CAC
+
+
+
+. 346 25474C90 24
+. 39 20 00 00 99 23 00 00 80 92 00 00 93 BF 00 10 7F 9D 20 40 40 9D 00 08
+==== BB 347 (0x25474CA8) approx BBs exec'd 0 ====
+
+	0x25474CA8:  93B20000  stw r29,0(r18)
+	   0: GETL       	R29, t0
+	   1: GETL       	R18, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25474CAC:  813A0000  lwz r9,0(r26)
+	   4: GETL       	R26, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R9
+	   7: INCEIPL       	$4
+
+	0x25474CB0:  895C0000  lbz r10,0(r28)
+	   8: GETL       	R28, t8
+	   9: LDB       	(t8), t10
+	  10: PUTL       	t10, R10
+	  11: INCEIPL       	$4
+
+	0x25474CB4:  2C890000  cmpi cr1,r9,0
+	  12: GETL       	R9, t12
+	  13: CMP0L       	t12, t14  (-rSo)
+	  14: ICRFL       	t14, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x25474CB8:  6948002F  xori r8,r10,0x2F
+	  16: GETL       	R10, t16
+	  17: XORL       	$0x2F, t16
+	  18: PUTL       	t16, R8
+	  19: INCEIPL       	$4
+
+	0x25474CBC:  7F8800D0  neg r28,r8
+	  20: GETL       	R8, t18
+	  21: NEGL       	t18
+	  22: PUTL       	t18, R28
+	  23: INCEIPL       	$4
+
+	0x25474CC0:  578317BC  rlwinm r3,r28,2,30,30
+	  24: GETL       	R28, t20
+	  25: ROLL       	$0x2, t20
+	  26: ANDL       	$0x2, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x25474CC4:  41860018  bc 12,6,0x25474CDC
+	  29: Js06o       	$0x25474CDC
+
+
+
+. 347 25474CA8 32
+. 93 B2 00 00 81 3A 00 00 89 5C 00 00 2C 89 00 00 69 48 00 2F 7F 88 00 D0 57 83 17 BC 41 86 00 18
+==== BB 348 (0x25474CC8) approx BBs exec'd 0 ====
+
+	0x25474CC8:  7D2903A6  mtctr r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25474CCC:  397F0014  addi r11,r31,20
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x14, t2
+	   5: PUTL       	t2, R11
+	   6: INCEIPL       	$4
+
+	0x25474CD0:  906B0000  stw r3,0(r11)
+	   7: GETL       	R3, t4
+	   8: GETL       	R11, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x25474CD4:  396B0004  addi r11,r11,4
+	  11: GETL       	R11, t8
+	  12: ADDL       	$0x4, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25474CD8:  4200FFF8  bc 16,0,0x25474CD0
+	  15: GETL       	CTR, t10
+	  16: ADDL       	$0xFFFFFFFF, t10
+	  17: PUTL       	t10, CTR
+	  18: JIFZL       	t10, $0x25474CDC
+	  19: JMPo       	$0x25474CD0  ($4)
+
+
+
+. 348 25474CC8 20
+. 7D 29 03 A6 39 7F 00 14 90 6B 00 00 39 6B 00 04 42 00 FF F8
+==== BB 349 (0x25474CD0) approx BBs exec'd 0 ====
+
+	0x25474CD0:  906B0000  stw r3,0(r11)
+	   0: GETL       	R3, t0
+	   1: GETL       	R11, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25474CD4:  396B0004  addi r11,r11,4
+	   4: GETL       	R11, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25474CD8:  4200FFF8  bc 16,0,0x25474CD0
+	   8: GETL       	CTR, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, CTR
+	  11: JIFZL       	t6, $0x25474CDC
+	  12: JMPo       	$0x25474CD0  ($4)
+
+
+
+. 349 25474CD0 12
+. 90 6B 00 00 39 6B 00 04 42 00 FF F8
+==== BB 350 (0x25474CDC) approx BBs exec'd 0 ====
+
+	0x25474CDC:  927F0004  stw r19,4(r31)
+	   0: GETL       	R19, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25474CE0:  418E021C  bc 12,14,0x25474EFC
+	   5: Js14o       	$0x25474EFC
+
+
+
+. 350 25474CDC 8
+. 92 7F 00 04 41 8E 02 1C
+==== BB 351 (0x25474EFC) approx BBs exec'd 0 ====
+
+	0x25474EFC:  92FF0008  stw r23,8(r31)
+	   0: GETL       	R23, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25474F00:  4BFFFE08  b 0x25474D08
+	   5: JMPo       	$0x25474D08  ($4)
+
+
+
+. 351 25474EFC 8
+. 92 FF 00 08 4B FF FE 08
+==== BB 352 (0x25474D08) approx BBs exec'd 0 ====
+
+	0x25474D08:  801901B0  lwz r0,432(r25)
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1B0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25474D0C:  571D103A  rlwinm r29,r24,2,0,29
+	   5: GETL       	R24, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x25474D10:  38610008  addi r3,r1,8
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x8, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x25474D14:  7EA4AB78  or r4,r21,r21
+	  13: GETL       	R21, t8
+	  14: PUTL       	t8, R4
+	  15: INCEIPL       	$4
+
+	0x25474D18:  901F0000  stw r0,0(r31)
+	  16: GETL       	R0, t10
+	  17: GETL       	R31, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25474D1C:  3B180001  addi r24,r24,1
+	  20: GETL       	R24, t14
+	  21: ADDL       	$0x1, t14
+	  22: PUTL       	t14, R24
+	  23: INCEIPL       	$4
+
+	0x25474D20:  93F901B0  stw r31,432(r25)
+	  24: GETL       	R31, t16
+	  25: GETL       	R25, t18
+	  26: ADDL       	$0x1B0, t18
+	  27: STL       	t16, (t18)
+	  28: INCEIPL       	$4
+
+	0x25474D24:  7FFDB12E  stwx r31,r29,r22
+	  29: GETL       	R22, t20
+	  30: GETL       	R29, t22
+	  31: ADDL       	t22, t20
+	  32: GETL       	R31, t24
+	  33: STL       	t24, (t20)
+	  34: INCEIPL       	$4
+
+	0x25474D28:  4800B615  bl 0x2548033C
+	  35: MOVL       	$0x25474D2C, t26
+	  36: PUTL       	t26, LR
+	  37: JMPo-c       	$0x2548033C  ($4)
+
+
+
+. 352 25474D08 36
+. 80 19 01 B0 57 1D 10 3A 38 61 00 08 7E A4 AB 78 90 1F 00 00 3B 18 00 01 93 F9 01 B0 7F FD B1 2E 48 00 B6 15
+==== BB 353 (0x254803A0) approx BBs exec'd 0 ====
+
+	0x254803A0:  38210010  addi r1,r1,16
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R1
+	   3: INCEIPL       	$4
+
+	0x254803A4:  4E800020  blr
+	   4: GETL       	LR, t2
+	   5: JMPo-r       	t2  ($4)
+
+
+
+. 353 254803A0 8
+. 38 21 00 10 4E 80 00 20
+==== BB 354 (0x25474D2C) approx BBs exec'd 0 ====
+
+	0x25474D2C:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25474D30:  7C7F1B78  or r31,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R31
+	   6: INCEIPL       	$4
+
+	0x25474D34:  7C7C1B78  or r28,r3,r3
+	   7: GETL       	R3, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25474D38:  4092FEBC  bc 4,18,0x25474BF4
+	  10: Jc18o       	$0x25474BF4
+
+
+
+. 354 25474D2C 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 40 92 FE BC
+==== BB 355 (0x25474D3C) approx BBs exec'd 0 ====
+
+	0x25474D3C:  81E10064  lwz r15,100(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x64, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x25474D40:  5710103A  rlwinm r16,r24,2,0,29
+	   5: GETL       	R24, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R16
+	   8: INCEIPL       	$4
+
+	0x25474D44:  81810018  lwz r12,24(r1)
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x18, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x25474D48:  7C70B12E  stwx r3,r16,r22
+	  14: GETL       	R22, t10
+	  15: GETL       	R16, t12
+	  16: ADDL       	t12, t10
+	  17: GETL       	R3, t14
+	  18: STL       	t14, (t10)
+	  19: INCEIPL       	$4
+
+	0x25474D4C:  7DE803A6  mtlr r15
+	  20: GETL       	R15, t16
+	  21: PUTL       	t16, LR
+	  22: INCEIPL       	$4
+
+	0x25474D50:  7EC3B378  or r3,r22,r22
+	  23: GETL       	R22, t18
+	  24: PUTL       	t18, R3
+	  25: INCEIPL       	$4
+
+	0x25474D54:  81E1001C  lwz r15,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R15
+	  30: INCEIPL       	$4
+
+	0x25474D58:  82010020  lwz r16,32(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R16
+	  35: INCEIPL       	$4
+
+	0x25474D5C:  7D818120  mtcrf 0x18,r12
+	  36: GETL       	R12, t28
+	  37: ICRFL       	t28, $0x3, CR
+	  38: ICRFL       	t28, $0x4, CR
+	  39: INCEIPL       	$4
+
+	0x25474D60:  82210024  lwz r17,36(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x24, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R17
+	  44: INCEIPL       	$4
+
+	0x25474D64:  82410028  lwz r18,40(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x28, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R18
+	  49: INCEIPL       	$4
+
+	0x25474D68:  8261002C  lwz r19,44(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x2C, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R19
+	  54: INCEIPL       	$4
+
+	0x25474D6C:  82810030  lwz r20,48(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x30, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R20
+	  59: INCEIPL       	$4
+
+	0x25474D70:  82A10034  lwz r21,52(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x34, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R21
+	  64: INCEIPL       	$4
+
+	0x25474D74:  82C10038  lwz r22,56(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x38, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R22
+	  69: INCEIPL       	$4
+
+	0x25474D78:  82E1003C  lwz r23,60(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x3C, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R23
+	  74: INCEIPL       	$4
+
+	0x25474D7C:  83010040  lwz r24,64(r1)
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x40, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R24
+	  79: INCEIPL       	$4
+
+	0x25474D80:  83210044  lwz r25,68(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x44, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R25
+	  84: INCEIPL       	$4
+
+	0x25474D84:  83410048  lwz r26,72(r1)
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x48, t66
+	  87: LDL       	(t66), t68
+	  88: PUTL       	t68, R26
+	  89: INCEIPL       	$4
+
+	0x25474D88:  8361004C  lwz r27,76(r1)
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x4C, t70
+	  92: LDL       	(t70), t72
+	  93: PUTL       	t72, R27
+	  94: INCEIPL       	$4
+
+	0x25474D8C:  83810050  lwz r28,80(r1)
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x50, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R28
+	  99: INCEIPL       	$4
+
+	0x25474D90:  83A10054  lwz r29,84(r1)
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x54, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R29
+	 104: INCEIPL       	$4
+
+	0x25474D94:  83C10058  lwz r30,88(r1)
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x58, t82
+	 107: LDL       	(t82), t84
+	 108: PUTL       	t84, R30
+	 109: INCEIPL       	$4
+
+	0x25474D98:  83E1005C  lwz r31,92(r1)
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x5C, t86
+	 112: LDL       	(t86), t88
+	 113: PUTL       	t88, R31
+	 114: INCEIPL       	$4
+
+	0x25474D9C:  38210060  addi r1,r1,96
+	 115: GETL       	R1, t90
+	 116: ADDL       	$0x60, t90
+	 117: PUTL       	t90, R1
+	 118: INCEIPL       	$4
+
+	0x25474DA0:  4E800020  blr
+	 119: GETL       	LR, t92
+	 120: JMPo-r       	t92  ($4)
+
+
+
+. 355 25474D3C 104
+. 81 E1 00 64 57 10 10 3A 81 81 00 18 7C 70 B1 2E 7D E8 03 A6 7E C3 B3 78 81 E1 00 1C 82 01 00 20 7D 81 81 20 82 21 00 24 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+==== BB 356 (0x254753B0) approx BBs exec'd 0 ====
+
+	0x254753B0:  807D0000  lwz r3,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x254753B4:  83830000  lwz r28,0(r3)
+	   4: GETL       	R3, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R28
+	   7: INCEIPL       	$4
+
+	0x254753B8:  2C1C0000  cmpi cr0,r28,0
+	   8: GETL       	R28, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x254753BC:  41820074  bc 12,2,0x25475430
+	  12: Js02o       	$0x25475430
+
+
+
+. 356 254753B0 16
+. 80 7D 00 00 83 83 00 00 2C 1C 00 00 41 82 00 74
+==== BB 357 (0x254753C0) approx BBs exec'd 0 ====
+
+	0x254753C0:  38E00000  li r7,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254753C4:  90FD0004  stw r7,4(r29)
+	   3: GETL       	R7, t2
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x4, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254753C8:  83B701B0  lwz r29,432(r23)
+	   8: GETL       	R23, t6
+	   9: ADDL       	$0x1B0, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x254753CC:  93B80060  stw r29,96(r24)
+	  13: GETL       	R29, t10
+	  14: GETL       	R24, t12
+	  15: ADDL       	$0x60, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254753D0:  81410000  lwz r10,0(r1)
+	  18: GETL       	R1, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R10
+	  21: INCEIPL       	$4
+
+	0x254753D4:  82EA0004  lwz r23,4(r10)
+	  22: GETL       	R10, t18
+	  23: ADDL       	$0x4, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R23
+	  26: INCEIPL       	$4
+
+	0x254753D8:  830AFFE0  lwz r24,-32(r10)
+	  27: GETL       	R10, t22
+	  28: ADDL       	$0xFFFFFFE0, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R24
+	  31: INCEIPL       	$4
+
+	0x254753DC:  7EE803A6  mtlr r23
+	  32: GETL       	R23, t26
+	  33: PUTL       	t26, LR
+	  34: INCEIPL       	$4
+
+	0x254753E0:  832AFFE4  lwz r25,-28(r10)
+	  35: GETL       	R10, t28
+	  36: ADDL       	$0xFFFFFFE4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R25
+	  39: INCEIPL       	$4
+
+	0x254753E4:  82EAFFDC  lwz r23,-36(r10)
+	  40: GETL       	R10, t32
+	  41: ADDL       	$0xFFFFFFDC, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R23
+	  44: INCEIPL       	$4
+
+	0x254753E8:  834AFFE8  lwz r26,-24(r10)
+	  45: GETL       	R10, t36
+	  46: ADDL       	$0xFFFFFFE8, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R26
+	  49: INCEIPL       	$4
+
+	0x254753EC:  836AFFEC  lwz r27,-20(r10)
+	  50: GETL       	R10, t40
+	  51: ADDL       	$0xFFFFFFEC, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R27
+	  54: INCEIPL       	$4
+
+	0x254753F0:  838AFFF0  lwz r28,-16(r10)
+	  55: GETL       	R10, t44
+	  56: ADDL       	$0xFFFFFFF0, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R28
+	  59: INCEIPL       	$4
+
+	0x254753F4:  83AAFFF4  lwz r29,-12(r10)
+	  60: GETL       	R10, t48
+	  61: ADDL       	$0xFFFFFFF4, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R29
+	  64: INCEIPL       	$4
+
+	0x254753F8:  83CAFFF8  lwz r30,-8(r10)
+	  65: GETL       	R10, t52
+	  66: ADDL       	$0xFFFFFFF8, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R30
+	  69: INCEIPL       	$4
+
+	0x254753FC:  83EAFFFC  lwz r31,-4(r10)
+	  70: GETL       	R10, t56
+	  71: ADDL       	$0xFFFFFFFC, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R31
+	  74: INCEIPL       	$4
+
+	0x25475400:  7D415378  or r1,r10,r10
+	  75: GETL       	R10, t60
+	  76: PUTL       	t60, R1
+	  77: INCEIPL       	$4
+
+	0x25475404:  4E800020  blr
+	  78: GETL       	LR, t62
+	  79: JMPo-r       	t62  ($4)
+
+
+
+. 357 254753C0 72
+. 38 E0 00 00 90 FD 00 04 83 B7 01 B0 93 B8 00 60 81 41 00 00 82 EA 00 04 83 0A FF E0 7E E8 03 A6 83 2A FF E4 82 EA FF DC 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+==== BB 358 (0x25472F70) approx BBs exec'd 0 ====
+
+	0x25472F70:  4BFFF76C  b 0x254726DC
+	   0: JMPo       	$0x254726DC  ($4)
+
+
+
+. 358 25472F70 4
+. 4B FF F7 6C
+==== BB 359 (0x254726DC) approx BBs exec'd 0 ====
+
+	0x254726DC:  80EE01BC  lwz r7,444(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1BC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254726E0:  2E070000  cmpi cr4,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254726E4:  40920010  bc 4,18,0x254726F4
+	   9: Jc18o       	$0x254726F4
+
+
+
+. 359 254726DC 12
+. 80 EE 01 BC 2E 07 00 00 40 92 00 10
+==== BB 360 (0x254726E8) approx BBs exec'd 0 ====
+
+	0x254726E8:  83AE01D4  lwz r29,468(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1D4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254726EC:  815D0000  lwz r10,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x254726F0:  914E01BC  stw r10,444(r14)
+	   9: GETL       	R10, t8
+	  10: GETL       	R14, t10
+	  11: ADDL       	$0x1BC, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x254726F4:  834F004C  lwz r26,76(r15)
+	  14: GETL       	R15, t12
+	  15: ADDL       	$0x4C, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R26
+	  18: INCEIPL       	$4
+
+	0x254726F8:  3AE00001  li r23,1
+	  19: MOVL       	$0x1, t16
+	  20: PUTL       	t16, R23
+	  21: INCEIPL       	$4
+
+	0x254726FC:  808E0338  lwz r4,824(r14)
+	  22: GETL       	R14, t18
+	  23: ADDL       	$0x338, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R4
+	  26: INCEIPL       	$4
+
+	0x25472700:  386E01B8  addi r3,r14,440
+	  27: GETL       	R14, t22
+	  28: ADDL       	$0x1B8, t22
+	  29: PUTL       	t22, R3
+	  30: INCEIPL       	$4
+
+	0x25472704:  2C1AFFFE  cmpi cr0,r26,-2
+	  31: GETL       	R26, t24
+	  32: MOVL       	$0xFFFFFFFE, t28
+	  33: CMPL       	t24, t28, t26  (-rSo)
+	  34: ICRFL       	t26, $0x0, CR
+	  35: INCEIPL       	$4
+
+	0x25472708:  826E0004  lwz r19,4(r14)
+	  36: GETL       	R14, t30
+	  37: ADDL       	$0x4, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R19
+	  40: INCEIPL       	$4
+
+	0x2547270C:  816E0198  lwz r11,408(r14)
+	  41: GETL       	R14, t34
+	  42: ADDL       	$0x198, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R11
+	  45: INCEIPL       	$4
+
+	0x25472710:  52E4F002  rlwimi r4,r23,30,0,1
+	  46: GETL       	R4, t38
+	  47: GETL       	R23, t40
+	  48: ROLL       	$0x1E, t40
+	  49: ANDL       	$0xC0000000, t40
+	  50: ANDL       	$0x3FFFFFFF, t38
+	  51: ORL       	t38, t40
+	  52: PUTL       	t40, R4
+	  53: INCEIPL       	$4
+
+	0x25472714:  818E019C  lwz r12,412(r14)
+	  54: GETL       	R14, t42
+	  55: ADDL       	$0x19C, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R12
+	  58: INCEIPL       	$4
+
+	0x25472718:  38B30001  addi r5,r19,1
+	  59: GETL       	R19, t46
+	  60: ADDL       	$0x1, t46
+	  61: PUTL       	t46, R5
+	  62: INCEIPL       	$4
+
+	0x2547271C:  908E0338  stw r4,824(r14)
+	  63: GETL       	R4, t48
+	  64: GETL       	R14, t50
+	  65: ADDL       	$0x338, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25472720:  310C0001  addic r8,r12,1
+	  68: GETL       	R12, t52
+	  69: ADCL       	$0x1, t52  (-wCa)
+	  70: PUTL       	t52, R8
+	  71: INCEIPL       	$4
+
+	0x25472724:  7CEB0194  addze r7,r11
+	  72: GETL       	R11, t54
+	  73: ADCL       	$0x0, t54  (-rCa-wCa)
+	  74: PUTL       	t54, R7
+	  75: INCEIPL       	$4
+
+	0x25472728:  90AE0004  stw r5,4(r14)
+	  76: GETL       	R5, t56
+	  77: GETL       	R14, t58
+	  78: ADDL       	$0x4, t58
+	  79: STL       	t56, (t58)
+	  80: INCEIPL       	$4
+
+	0x2547272C:  9074000C  stw r3,12(r20)
+	  81: GETL       	R3, t60
+	  82: GETL       	R20, t62
+	  83: ADDL       	$0xC, t62
+	  84: STL       	t60, (t62)
+	  85: INCEIPL       	$4
+
+	0x25472730:  90EE0198  stw r7,408(r14)
+	  86: GETL       	R7, t64
+	  87: GETL       	R14, t66
+	  88: ADDL       	$0x198, t66
+	  89: STL       	t64, (t66)
+	  90: INCEIPL       	$4
+
+	0x25472734:  910E019C  stw r8,412(r14)
+	  91: GETL       	R8, t68
+	  92: GETL       	R14, t70
+	  93: ADDL       	$0x19C, t70
+	  94: STL       	t68, (t70)
+	  95: INCEIPL       	$4
+
+	0x25472738:  928E01C8  stw r20,456(r14)
+	  96: GETL       	R20, t72
+	  97: GETL       	R14, t74
+	  98: ADDL       	$0x1C8, t74
+	  99: STL       	t72, (t74)
+	 100: INCEIPL       	$4
+
+	0x2547273C:  41820838  bc 12,2,0x25472F74
+	 101: Js02o       	$0x25472F74
+
+
+
+. 360 254726E8 88
+. 83 AE 01 D4 81 5D 00 00 91 4E 01 BC 83 4F 00 4C 3A E0 00 01 80 8E 03 38 38 6E 01 B8 2C 1A FF FE 82 6E 00 04 81 6E 01 98 52 E4 F0 02 81 8E 01 9C 38 B3 00 01 90 8E 03 38 31 0C 00 01 7C EB 01 94 90 AE 00 04 90 74 00 0C 90 EE 01 98 91 0E 01 9C 92 8E 01 C8 41 82 08 38
+==== BB 361 (0x25472F74) approx BBs exec'd 0 ====
+
+	0x25472F74:  80140000  lwz r0,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25472F78:  7DE97B78  or r9,r15,r15
+	   4: GETL       	R15, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x25472F7C:  2F000000  cmpi cr6,r0,0
+	   7: GETL       	R0, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x6, CR
+	  10: INCEIPL       	$4
+
+	0x25472F80:  3800FFFF  li r0,-1
+	  11: MOVL       	$0xFFFFFFFF, t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0x25472F84:  419A0008  bc 12,26,0x25472F8C
+	  14: Js26o       	$0x25472F8C
+
+
+
+. 361 25472F74 20
+. 80 14 00 00 7D E9 7B 78 2F 00 00 00 38 00 FF FF 41 9A 00 08
+==== BB 362 (0x25472F8C) approx BBs exec'd 0 ====
+
+	0x25472F8C:  9009004C  stw r0,76(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x4C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472F90:  4BFFF7B0  b 0x25472740
+	   5: JMPo       	$0x25472740  ($4)
+
+
+
+. 362 25472F8C 8
+. 90 09 00 4C 4B FF F7 B0
+==== BB 363 (0x25472740) approx BBs exec'd 0 ====
+
+	0x25472740:  818E0358  lwz r12,856(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x358, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x25472744:  A16C002C  lhz r11,44(r12)
+	   5: GETL       	R12, t4
+	   6: ADDL       	$0x2C, t4
+	   7: LDW       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25472748:  838C001C  lwz r28,28(r12)
+	  10: GETL       	R12, t8
+	  11: ADDL       	$0x1C, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0x2547274C:  B16E030C  sth r11,780(r14)
+	  15: GETL       	R11, t12
+	  16: GETL       	R14, t14
+	  17: ADDL       	$0x30C, t14
+	  18: STW       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25472750:  A32C002C  lhz r25,44(r12)
+	  20: GETL       	R12, t16
+	  21: ADDL       	$0x2C, t16
+	  22: LDW       	(t16), t18
+	  23: PUTL       	t18, R25
+	  24: INCEIPL       	$4
+
+	0x25472754:  7D6CE214  add r11,r12,r28
+	  25: GETL       	R12, t20
+	  26: GETL       	R28, t22
+	  27: ADDL       	t20, t22
+	  28: PUTL       	t22, R11
+	  29: INCEIPL       	$4
+
+	0x25472758:  916E0304  stw r11,772(r14)
+	  30: GETL       	R11, t24
+	  31: GETL       	R14, t26
+	  32: ADDL       	$0x304, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x2547275C:  2F990000  cmpi cr7,r25,0
+	  35: GETL       	R25, t28
+	  36: CMP0L       	t28, t30  (-rSo)
+	  37: ICRFL       	t30, $0x7, CR
+	  38: INCEIPL       	$4
+
+	0x25472760:  3959FFFF  addi r10,r25,-1
+	  39: GETL       	R25, t32
+	  40: ADDL       	$0xFFFFFFFF, t32
+	  41: PUTL       	t32, R10
+	  42: INCEIPL       	$4
+
+	0x25472764:  419E002C  bc 12,30,0x25472790
+	  43: Js30o       	$0x25472790
+
+
+
+. 363 25472740 40
+. 81 8E 03 58 A1 6C 00 2C 83 8C 00 1C B1 6E 03 0C A3 2C 00 2C 7D 6C E2 14 91 6E 03 04 2F 99 00 00 39 59 FF FF 41 9E 00 2C
+==== BB 364 (0x25472768) approx BBs exec'd 0 ====
+
+	0x25472768:  3D206474  lis r9,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x2547276C:  6127E552  ori r7,r9,0xE552
+	   3: MOVL       	$0x6474E552, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x25472770:  55582834  rlwinm r24,r10,5,0,26
+	   6: GETL       	R10, t4
+	   7: SHLL       	$0x5, t4
+	   8: PUTL       	t4, R24
+	   9: INCEIPL       	$4
+
+	0x25472774:  7F78582E  lwzx r27,r24,r11
+	  10: GETL       	R11, t6
+	  11: GETL       	R24, t8
+	  12: ADDL       	t8, t6
+	  13: LDL       	(t6), t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0x25472778:  7D185A14  add r8,r24,r11
+	  16: GETL       	R24, t12
+	  17: GETL       	R11, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0x2547277C:  7C9B3800  cmp cr1,r27,r7
+	  21: GETL       	R27, t16
+	  22: GETL       	R7, t18
+	  23: CMPL       	t16, t18, t20  (-rSo)
+	  24: ICRFL       	t20, $0x1, CR
+	  25: INCEIPL       	$4
+
+	0x25472780:  41860C80  bc 12,6,0x25473400
+	  26: Js06o       	$0x25473400
+
+
+
+. 364 25472768 28
+. 3D 20 64 74 61 27 E5 52 55 58 28 34 7F 78 58 2E 7D 18 5A 14 7C 9B 38 00 41 86 0C 80
+==== BB 365 (0x25473400) approx BBs exec'd 0 ====
+
+	0x25473400:  83B10000  lwz r29,0(r17)
+	   0: GETL       	R17, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x25473404:  3B400000  li r26,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R26
+	   6: INCEIPL       	$4
+
+	0x25473408:  82C80008  lwz r22,8(r8)
+	   7: GETL       	R8, t6
+	   8: ADDL       	$0x8, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R22
+	  11: INCEIPL       	$4
+
+	0x2547340C:  3B200000  li r25,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R25
+	  14: INCEIPL       	$4
+
+	0x25473410:  2C1D0000  cmpi cr0,r29,0
+	  15: GETL       	R29, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25473414:  92CE03EC  stw r22,1004(r14)
+	  19: GETL       	R22, t16
+	  20: GETL       	R14, t18
+	  21: ADDL       	$0x3EC, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x25473418:  80C80014  lwz r6,20(r8)
+	  24: GETL       	R8, t20
+	  25: ADDL       	$0x14, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R6
+	  28: INCEIPL       	$4
+
+	0x2547341C:  90CE03F0  stw r6,1008(r14)
+	  29: GETL       	R6, t24
+	  30: GETL       	R14, t26
+	  31: ADDL       	$0x3F0, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0x25473420:  4182F384  bc 12,2,0x254727A4
+	  34: Js02o       	$0x254727A4
+
+
+
+. 365 25473400 36
+. 83 B1 00 00 3B 40 00 00 82 C8 00 08 3B 20 00 00 2C 1D 00 00 92 CE 03 EC 80 C8 00 14 90 CE 03 F0 41 82 F3 84
+==== BB 366 (0x25473424) approx BBs exec'd 0 ====
+
+	0x25473424:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25473428:  4800FBA9  bl 0x25482FD0
+	   3: MOVL       	$0x2547342C, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 366 25473424 8
+. 7F A3 EB 78 48 00 FB A9
+==== BB 367 (0x2547342C) approx BBs exec'd 0 ====
+
+	0x2547342C:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25473430:  7C711B78  or r17,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R17
+	   5: INCEIPL       	$4
+
+	0x25473434:  3863001F  addi r3,r3,31
+	   6: GETL       	R3, t4
+	   7: ADDL       	$0x1F, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x25473438:  546A0036  rlwinm r10,r3,0,0,27
+	  10: GETL       	R3, t6
+	  11: ANDL       	$0xFFFFFFF0, t6
+	  12: PUTL       	t6, R10
+	  13: INCEIPL       	$4
+
+	0x2547343C:  80E10000  lwz r7,0(r1)
+	  14: GETL       	R1, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0x25473440:  7EAA00D0  neg r21,r10
+	  18: GETL       	R10, t12
+	  19: NEGL       	t12
+	  20: PUTL       	t12, R21
+	  21: INCEIPL       	$4
+
+	0x25473444:  38B10001  addi r5,r17,1
+	  22: GETL       	R17, t14
+	  23: ADDL       	$0x1, t14
+	  24: PUTL       	t14, R5
+	  25: INCEIPL       	$4
+
+	0x25473448:  7CE1A96E  stwux r7,r1,r21
+	  26: GETL       	R21, t16
+	  27: GETL       	R1, t18
+	  28: ADDL       	t18, t16
+	  29: PUTL       	t16, R1
+	  30: GETL       	R7, t20
+	  31: STL       	t20, (t16)
+	  32: INCEIPL       	$4
+
+	0x2547344C:  39010017  addi r8,r1,23
+	  33: GETL       	R1, t22
+	  34: ADDL       	$0x17, t22
+	  35: PUTL       	t22, R8
+	  36: INCEIPL       	$4
+
+	0x25473450:  55030036  rlwinm r3,r8,0,0,27
+	  37: GETL       	R8, t24
+	  38: ANDL       	$0xFFFFFFF0, t24
+	  39: PUTL       	t24, R3
+	  40: INCEIPL       	$4
+
+	0x25473454:  4801078D  bl 0x25483BE0
+	  41: MOVL       	$0x25473458, t26
+	  42: PUTL       	t26, LR
+	  43: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 367 2547342C 44
+. 7F A4 EB 78 7C 71 1B 78 38 63 00 1F 54 6A 00 36 80 E1 00 00 7E AA 00 D0 38 B1 00 01 7C E1 A9 6E 39 01 00 17 55 03 00 36 48 01 07 8D
+==== BB 368 (0x25483D6C) approx BBs exec'd 0 ====
+
+	0x25483D6C:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25483D70:  3863FFFC  addi r3,r3,-4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483D74:  81240004  lwz r9,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x25483D78:  90030004  stw r0,4(r3)
+	  13: GETL       	R0, t10
+	  14: GETL       	R3, t12
+	  15: ADDL       	$0x4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25483D7C:  4BFFFFA0  b 0x25483D1C
+	  18: JMPo       	$0x25483D1C  ($4)
+
+
+
+. 368 25483D6C 20
+. 80 04 00 00 38 63 FF FC 81 24 00 04 90 03 00 04 4B FF FF A0
+==== BB 369 (0x25483D1C) approx BBs exec'd 0 ====
+
+	0x25483D1C:  80040008  lwz r0,8(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483D20:  91230008  stw r9,8(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D24:  8124000C  lwz r9,12(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25483D28:  9003000C  stw r0,12(r3)
+	  15: GETL       	R0, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0xC, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25483D2C:  80040010  lwz r0,16(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x10, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0x25483D30:  91230010  stw r9,16(r3)
+	  25: GETL       	R9, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x10, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x14, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R9
+	  34: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	  35: GETL       	R0, t28
+	  36: GETL       	R3, t30
+	  37: ADDL       	$0x14, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0x18, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R0
+	  44: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  45: GETL       	R9, t36
+	  46: GETL       	R3, t38
+	  47: ADDL       	$0x18, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  50: GETL       	R5, t40
+	  51: ADCL       	$0xFFFFFFF8, t40  (-wCa)
+	  52: PUTL       	t40, R5
+	  53: CMP0L       	t40, t42  (-rSo)
+	  54: ICRFL       	t42, $0x0, CR
+	  55: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  56: GETL       	R4, t44
+	  57: ADDL       	$0x1C, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R9
+	  60: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  61: GETL       	R0, t48
+	  62: GETL       	R3, t50
+	  63: ADDL       	$0x1C, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0x20, t52
+	  68: PUTL       	t52, R4
+	  69: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  70: GETL       	R3, t54
+	  71: ADDL       	$0x20, t54
+	  72: PUTL       	t54, R3
+	  73: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  74: Jc02o       	$0x25483D0C
+
+
+
+. 369 25483D1C 64
+. 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 370 (0x25473458) approx BBs exec'd 0 ====
+
+	0x25473458:  907F0044  stw r3,68(r31)
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x44, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547345C:  809E00D0  lwz r4,208(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0xD0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25473460:  387F0044  addi r3,r31,68
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x44, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x25473464:  4800CED9  bl 0x2548033C
+	  14: MOVL       	$0x25473468, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x2548033C  ($4)
+
+
+
+. 370 25473458 16
+. 90 7F 00 44 80 9E 00 D0 38 7F 00 44 48 00 CE D9
+==== BB 371 (0x25473468) approx BBs exec'd 0 ====
+
+	0x25473468:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547346C:  41A2F338  bc 13,2,0x254727A4
+	   5: Js02o       	$0x254727A4
+
+
+
+. 371 25473468 8
+. 7C 7D 1B 79 41 A2 F3 38
+==== BB 372 (0x25473470) approx BBs exec'd 0 ====
+
+	0x25473470:  88BD0000  lbz r5,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x25473474:  2F050000  cmpi cr6,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x25473478:  41BAFFE4  bc 13,26,0x2547345C
+	   8: Js26o       	$0x2547345C
+
+
+
+. 372 25473470 12
+. 88 BD 00 00 2F 05 00 00 41 BA FF E4
+==== BB 373 (0x2547347C) approx BBs exec'd 0 ====
+
+	0x2547347C:  80900000  lwz r4,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25473480:  2F840000  cmpi cr7,r4,0
+	   4: GETL       	R4, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x25473484:  409E0774  bc 4,30,0x25473BF8
+	   8: Jc30o       	$0x25473BF8
+
+
+
+. 373 2547347C 12
+. 80 90 00 00 2F 84 00 00 40 9E 07 74
+==== BB 374 (0x25473488) approx BBs exec'd 0 ====
+
+	0x25473488:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547348C:  38A00001  li r5,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25473490:  38C00001  li r6,1
+	   6: MOVL       	$0x1, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25473494:  38E00000  li r7,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x25473498:  39000000  li r8,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R8
+	  14: INCEIPL       	$4
+
+	0x2547349C:  39200000  li r9,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254734A0:  7E83A378  or r3,r20,r20
+	  18: GETL       	R20, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0x254734A4:  48003845  bl 0x25476CE8
+	  21: MOVL       	$0x254734A8, t14
+	  22: PUTL       	t14, LR
+	  23: JMPo-c       	$0x25476CE8  ($4)
+
+
+
+. 374 25473488 32
+. 7F A4 EB 78 38 A0 00 01 38 C0 00 01 38 E0 00 00 39 00 00 00 39 20 00 00 7E 83 A3 78 48 00 38 45
+==== BB 375 _dl_map_object(0x25476CE8) approx BBs exec'd 0 ====
+
+	0x25476CE8:  9421FD70  stwu r1,-656(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFD70, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25476CEC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25476CF0:  48020311  bl 0x25497000
+	   9: MOVL       	$0x25476CF4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 375 25476CE8 12
+. 94 21 FD 70 7C 08 02 A6 48 02 03 11
+==== BB 376 (0x25476CF4) approx BBs exec'd 0 ====
+
+	0x25476CF4:  93C10288  stw r30,648(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x288, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476CF8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25476CFC:  9261025C  stw r19,604(r1)
+	   8: GETL       	R19, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x25C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25476D00:  92A10264  stw r21,612(r1)
+	  13: GETL       	R21, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x264, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25476D04:  1E690018  mulli r19,r9,24
+	  18: GETL       	R9, t14
+	  19: MULL       	$0x18, t14
+	  20: PUTL       	t14, R19
+	  21: INCEIPL       	$4
+
+	0x25476D08:  90010294  stw r0,660(r1)
+	  22: GETL       	R0, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x294, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25476D0C:  7D800026  mfcr r12
+	  27: GETL       	CR, t20
+	  28: PUTL       	t20, R12
+	  29: INCEIPL       	$4
+
+	0x25476D10:  82BE04C8  lwz r21,1224(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0x4C8, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R21
+	  34: INCEIPL       	$4
+
+	0x25476D14:  93A10284  stw r29,644(r1)
+	  35: GETL       	R29, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x284, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x25476D18:  7FB3A82E  lwzx r29,r19,r21
+	  40: GETL       	R21, t30
+	  41: GETL       	R19, t32
+	  42: ADDL       	t32, t30
+	  43: LDL       	(t30), t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0x25476D1C:  92010250  stw r16,592(r1)
+	  46: GETL       	R16, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x250, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0x25476D20:  7CD03378  or r16,r6,r6
+	  51: GETL       	R6, t40
+	  52: PUTL       	t40, R16
+	  53: INCEIPL       	$4
+
+	0x25476D24:  2F9D0000  cmpi cr7,r29,0
+	  54: GETL       	R29, t42
+	  55: CMP0L       	t42, t44  (-rSo)
+	  56: ICRFL       	t44, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x25476D28:  92210254  stw r17,596(r1)
+	  58: GETL       	R17, t46
+	  59: GETL       	R1, t48
+	  60: ADDL       	$0x254, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x25476D2C:  92E1026C  stw r23,620(r1)
+	  63: GETL       	R23, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x26C, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x25476D30:  7CF13B78  or r17,r7,r7
+	  68: GETL       	R7, t54
+	  69: PUTL       	t54, R17
+	  70: INCEIPL       	$4
+
+	0x25476D34:  93010270  stw r24,624(r1)
+	  71: GETL       	R24, t56
+	  72: GETL       	R1, t58
+	  73: ADDL       	$0x270, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0x25476D38:  7CB72B78  or r23,r5,r5
+	  76: GETL       	R5, t60
+	  77: PUTL       	t60, R23
+	  78: INCEIPL       	$4
+
+	0x25476D3C:  93210274  stw r25,628(r1)
+	  79: GETL       	R25, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x274, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25476D40:  7D184378  or r24,r8,r8
+	  84: GETL       	R8, t66
+	  85: PUTL       	t66, R24
+	  86: INCEIPL       	$4
+
+	0x25476D44:  9361027C  stw r27,636(r1)
+	  87: GETL       	R27, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x27C, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x25476D48:  7D394B78  or r25,r9,r9
+	  92: GETL       	R9, t72
+	  93: PUTL       	t72, R25
+	  94: INCEIPL       	$4
+
+	0x25476D4C:  93E1028C  stw r31,652(r1)
+	  95: GETL       	R31, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x28C, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x25476D50:  7C7B1B78  or r27,r3,r3
+	 100: GETL       	R3, t78
+	 101: PUTL       	t78, R27
+	 102: INCEIPL       	$4
+
+	0x25476D54:  91C10248  stw r14,584(r1)
+	 103: GETL       	R14, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x248, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x25476D58:  7C9F2378  or r31,r4,r4
+	 108: GETL       	R4, t84
+	 109: PUTL       	t84, R31
+	 110: INCEIPL       	$4
+
+	0x25476D5C:  91E1024C  stw r15,588(r1)
+	 111: GETL       	R15, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x24C, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x25476D60:  92410258  stw r18,600(r1)
+	 116: GETL       	R18, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x258, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x25476D64:  92810260  stw r20,608(r1)
+	 121: GETL       	R20, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x260, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0x25476D68:  92C10268  stw r22,616(r1)
+	 126: GETL       	R22, t98
+	 127: GETL       	R1, t100
+	 128: ADDL       	$0x268, t100
+	 129: STL       	t98, (t100)
+	 130: INCEIPL       	$4
+
+	0x25476D6C:  93410278  stw r26,632(r1)
+	 131: GETL       	R26, t102
+	 132: GETL       	R1, t104
+	 133: ADDL       	$0x278, t104
+	 134: STL       	t102, (t104)
+	 135: INCEIPL       	$4
+
+	0x25476D70:  93810280  stw r28,640(r1)
+	 136: GETL       	R28, t106
+	 137: GETL       	R1, t108
+	 138: ADDL       	$0x280, t108
+	 139: STL       	t106, (t108)
+	 140: INCEIPL       	$4
+
+	0x25476D74:  91810244  stw r12,580(r1)
+	 141: GETL       	R12, t110
+	 142: GETL       	R1, t112
+	 143: ADDL       	$0x244, t112
+	 144: STL       	t110, (t112)
+	 145: INCEIPL       	$4
+
+	0x25476D78:  419E0040  bc 12,30,0x25476DB8
+	 146: Js30o       	$0x25476DB8
+
+
+
+. 376 25476CF4 136
+. 93 C1 02 88 7F C8 02 A6 92 61 02 5C 92 A1 02 64 1E 69 00 18 90 01 02 94 7D 80 00 26 82 BE 04 C8 93 A1 02 84 7F B3 A8 2E 92 01 02 50 7C D0 33 78 2F 9D 00 00 92 21 02 54 92 E1 02 6C 7C F1 3B 78 93 01 02 70 7C B7 2B 78 93 21 02 74 7D 18 43 78 93 61 02 7C 7D 39 4B 78 93 E1 02 8C 7C 7B 1B 78 91 C1 02 48 7C 9F 23 78 91 E1 02 4C 92 41 02 58 92 81 02 60 92 C1 02 68 93 41 02 78 93 81 02 80 91 81 02 44 41 9E 00 40
+==== BB 377 (0x25476D7C) approx BBs exec'd 0 ====
+
+	0x25476D7C:  3AC00000  li r22,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R22
+	   2: INCEIPL       	$4
+
+	0x25476D80:  80BD0180  lwz r5,384(r29)
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0x180, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25476D84:  7FA4EB78  or r4,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x25476D88:  7FE3FB78  or r3,r31,r31
+	  11: GETL       	R31, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x25476D8C:  74A90040  andis. r9,r5,0x40
+	  14: GETL       	R5, t10
+	  15: ANDL       	$0x400000, t10
+	  16: PUTL       	t10, R9
+	  17: CMP0L       	t10, t12  (-rSo)
+	  18: ICRFL       	t12, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x25476D90:  4082001C  bc 4,2,0x25476DAC
+	  20: Jc02o       	$0x25476DAC
+
+
+
+. 377 25476D7C 24
+. 3A C0 00 00 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+==== BB 378 (0x25476D94) approx BBs exec'd 0 ====
+
+	0x25476D94:  48005941  bl 0x2547C6D4
+	   0: MOVL       	$0x25476D98, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547C6D4  ($4)
+
+
+
+. 378 25476D94 4
+. 48 00 59 41
+==== BB 379 _dl_name_match_p(0x2547C6D4) approx BBs exec'd 0 ====
+
+	0x2547C6D4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2547C6D8:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547C6DC:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547C6E0:  7C9F2378  or r31,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0x2547C6E4:  93A10014  stw r29,20(r1)
+	  17: GETL       	R29, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0x2547C6E8:  7C7D1B78  or r29,r3,r3
+	  22: GETL       	R3, t16
+	  23: PUTL       	t16, R29
+	  24: INCEIPL       	$4
+
+	0x2547C6EC:  90010024  stw r0,36(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x24, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x2547C6F0:  80840004  lwz r4,4(r4)
+	  30: GETL       	R4, t22
+	  31: ADDL       	$0x4, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R4
+	  34: INCEIPL       	$4
+
+	0x2547C6F4:  93C10018  stw r30,24(r1)
+	  35: GETL       	R30, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x18, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x2547C6F8:  480067E9  bl 0x25482EE0
+	  40: MOVL       	$0x2547C6FC, t30
+	  41: PUTL       	t30, LR
+	  42: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 379 2547C6D4 40
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C 9F 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 80 84 00 04 93 C1 00 18 48 00 67 E9
+==== BB 380 strcmp(0x25482EE0) approx BBs exec'd 0 ====
+
+	0x25482EE0:  7C801B78  or r0,r4,r3
+	   0: GETL       	R4, t0
+	   1: GETL       	R3, t2
+	   2: ORL       	t2, t0
+	   3: PUTL       	t0, R0
+	   4: INCEIPL       	$4
+
+	0x25482EE4:  540007BF  rlwinm. r0,r0,0,30,31
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x3, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25482EE8:  3CE0FEFF  lis r7,-257
+	  11: MOVL       	$0xFEFF0000, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x25482EEC:  40820094  bc 4,2,0x25482F80
+	  14: Jc02o       	$0x25482F80
+
+
+
+. 380 25482EE0 16
+. 7C 80 1B 78 54 00 07 BF 3C E0 FE FF 40 82 00 94
+==== BB 381 (0x25482EF0) approx BBs exec'd 0 ====
+
+	0x25482EF0:  80A30000  lwz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x25482EF4:  80C40000  lwz r6,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0x25482EF8:  3D007F7F  lis r8,32639
+	   8: MOVL       	$0x7F7F0000, t8
+	   9: PUTL       	t8, R8
+	  10: INCEIPL       	$4
+
+	0x25482EFC:  38E7FEFF  addi r7,r7,-257
+	  11: GETL       	R7, t10
+	  12: ADDL       	$0xFFFFFEFF, t10
+	  13: PUTL       	t10, R7
+	  14: INCEIPL       	$4
+
+	0x25482F00:  39087F7F  addi r8,r8,32639
+	  15: MOVL       	$0x7F7F7F7F, t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0x25482F04:  48000010  b 0x25482F14
+	  18: JMPo       	$0x25482F14  ($4)
+
+
+
+. 381 25482EF0 24
+. 80 A3 00 00 80 C4 00 00 3D 00 7F 7F 38 E7 FE FF 39 08 7F 7F 48 00 00 10
+==== BB 382 (0x25482F14) approx BBs exec'd 0 ====
+
+	0x25482F14:  7C072A14  add r0,r7,r5
+	   0: GETL       	R7, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482F18:  7D0928F8  nor r9,r8,r5
+	   5: GETL       	R8, t4
+	   6: GETL       	R5, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0x25482F1C:  7C004839  and. r0,r0,r9
+	  11: GETL       	R0, t8
+	  12: GETL       	R9, t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0x25482F20:  7C853000  cmp cr1,r5,r6
+	  18: GETL       	R5, t14
+	  19: GETL       	R6, t16
+	  20: CMPL       	t14, t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x25482F24:  4182FFE4  bc 12,2,0x25482F08
+	  23: Js02o       	$0x25482F08
+
+
+
+. 382 25482F14 20
+. 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+==== BB 383 (0x25482F08) approx BBs exec'd 0 ====
+
+	0x25482F08:  84A30004  lwzu r5,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R3
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482F0C:  40860054  bc 4,6,0x25482F60
+	   6: Jc06o       	$0x25482F60
+
+
+
+. 383 25482F08 8
+. 84 A3 00 04 40 86 00 54
+==== BB 384 (0x25482F60) approx BBs exec'd 0 ====
+
+	0x25482F60:  80A3FFFC  lwz r5,-4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25482F64:  7CAA3279  xor. r10,r5,r6
+	   5: GETL       	R5, t4
+	   6: GETL       	R6, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25482F68:  7C662850  subf r3,r6,r5
+	  12: GETL       	R6, t10
+	  13: GETL       	R5, t12
+	  14: SUBL       	t10, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x25482F6C:  4CA00020  bclr 5,0
+	  17: GETL       	LR, t14
+	  18: Jc00o-r       	t14
+
+
+
+. 384 25482F60 16
+. 80 A3 FF FC 7C AA 32 79 7C 66 28 50 4C A0 00 20
+==== BB 385 (0x2547C6FC) approx BBs exec'd 0 ====
+
+	0x2547C6FC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547C700:  38600001  li r3,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2547C704:  419E0030  bc 12,30,0x2547C734
+	   7: Js30o       	$0x2547C734
+
+
+
+. 385 2547C6FC 12
+. 2F 83 00 00 38 60 00 01 41 9E 00 30
+==== BB 386 (0x2547C708) approx BBs exec'd 0 ====
+
+	0x2547C708:  83FF001C  lwz r31,28(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547C70C:  4800001C  b 0x2547C728
+	   5: JMPo       	$0x2547C728  ($4)
+
+
+
+. 386 2547C708 8
+. 83 FF 00 1C 48 00 00 1C
+==== BB 387 (0x2547C728) approx BBs exec'd 0 ====
+
+	0x2547C728:  2C1F0000  cmpi cr0,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547C72C:  4082FFE4  bc 4,2,0x2547C710
+	   4: Jc02o       	$0x2547C710
+
+
+
+. 387 2547C728 8
+. 2C 1F 00 00 40 82 FF E4
+==== BB 388 (0x2547C710) approx BBs exec'd 0 ====
+
+	0x2547C710:  809F0000  lwz r4,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x2547C714:  7FA3EB78  or r3,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2547C718:  480067C9  bl 0x25482EE0
+	   7: MOVL       	$0x2547C71C, t6
+	   8: PUTL       	t6, LR
+	   9: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 388 2547C710 12
+. 80 9F 00 00 7F A3 EB 78 48 00 67 C9
+==== BB 389 (0x2547C71C) approx BBs exec'd 0 ====
+
+	0x2547C71C:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547C720:  41860030  bc 12,6,0x2547C750
+	   4: Js06o       	$0x2547C750
+
+
+
+. 389 2547C71C 8
+. 2C 83 00 00 41 86 00 30
+==== BB 390 (0x2547C724) approx BBs exec'd 0 ====
+
+	0x2547C724:  83FF0004  lwz r31,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547C728:  2C1F0000  cmpi cr0,r31,0
+	   5: GETL       	R31, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547C72C:  4082FFE4  bc 4,2,0x2547C710
+	   9: Jc02o       	$0x2547C710
+
+
+
+. 390 2547C724 12
+. 83 FF 00 04 2C 1F 00 00 40 82 FF E4
+==== BB 391 (0x2547C730) approx BBs exec'd 0 ====
+
+	0x2547C730:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547C734:  80810024  lwz r4,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547C738:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547C73C:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x2547C740:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547C744:  83E1001C  lwz r31,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x2547C748:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0x2547C74C:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+
+. 391 2547C730 32
+. 38 60 00 00 80 81 00 24 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 392 (0x25476D98) approx BBs exec'd 0 ====
+
+	0x25476D98:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25476D9C:  408601B8  bc 4,6,0x25476F54
+	   4: Jc06o       	$0x25476F54
+
+
+
+. 392 25476D98 8
+. 2C 83 00 00 40 86 01 B8
+==== BB 393 (0x25476DA0) approx BBs exec'd 0 ====
+
+	0x25476DA0:  807D0180  lwz r3,384(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25476DA4:  74690080  andis. r9,r3,0x80
+	   5: GETL       	R3, t4
+	   6: ANDL       	$0x800000, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25476DA8:  418200F8  bc 12,2,0x25476EA0
+	  11: Js02o       	$0x25476EA0
+
+
+
+. 393 25476DA0 12
+. 80 7D 01 80 74 69 00 80 41 82 00 F8
+==== BB 394 (0x25476EA0) approx BBs exec'd 0 ====
+
+	0x25476EA0:  817D0058  lwz r11,88(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x58, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25476EA4:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25476EA8:  2D8B0000  cmpi cr3,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x3, CR
+	  11: INCEIPL       	$4
+
+	0x25476EAC:  41AEFF00  bc 13,14,0x25476DAC
+	  12: Js14o       	$0x25476DAC
+
+
+
+. 394 25476EA0 16
+. 81 7D 00 58 7F E3 FB 78 2D 8B 00 00 41 AE FF 00
+==== BB 395 (0x25476DAC) approx BBs exec'd 0 ====
+
+	0x25476DAC:  83BD000C  lwz r29,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25476DB0:  2C9D0000  cmpi cr1,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25476DB4:  4086FFCC  bc 4,6,0x25476D80
+	   9: Jc06o       	$0x25476D80
+
+
+
+. 395 25476DAC 12
+. 83 BD 00 0C 2C 9D 00 00 40 86 FF CC
+==== BB 396 (0x25476D80) approx BBs exec'd 0 ====
+
+	0x25476D80:  80BD0180  lwz r5,384(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25476D84:  7FA4EB78  or r4,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25476D88:  7FE3FB78  or r3,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x25476D8C:  74A90040  andis. r9,r5,0x40
+	  11: GETL       	R5, t8
+	  12: ANDL       	$0x400000, t8
+	  13: PUTL       	t8, R9
+	  14: CMP0L       	t8, t10  (-rSo)
+	  15: ICRFL       	t10, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x25476D90:  4082001C  bc 4,2,0x25476DAC
+	  17: Jc02o       	$0x25476DAC
+
+
+
+. 396 25476D80 20
+. 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+==== BB 397 (0x25476EB0) approx BBs exec'd 0 ====
+
+	0x25476EB0:  813D0034  lwz r9,52(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25476EB4:  816B0004  lwz r11,4(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25476EB8:  80890004  lwz r4,4(r9)
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x25476EBC:  7F845A14  add r28,r4,r11
+	  15: GETL       	R4, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R28
+	  19: INCEIPL       	$4
+
+	0x25476EC0:  7F84E378  or r4,r28,r28
+	  20: GETL       	R28, t16
+	  21: PUTL       	t16, R4
+	  22: INCEIPL       	$4
+
+	0x25476EC4:  4800C01D  bl 0x25482EE0
+	  23: MOVL       	$0x25476EC8, t18
+	  24: PUTL       	t18, LR
+	  25: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 397 25476EB0 24
+. 81 3D 00 34 81 6B 00 04 80 89 00 04 7F 84 5A 14 7F 84 E3 78 48 00 C0 1D
+==== BB 398 (0x25482F80) approx BBs exec'd 0 ====
+
+	0x25482F80:  88A30000  lbz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x25482F84:  88C40000  lbz r6,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDB       	(t4), t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0x25482F88:  48000010  b 0x25482F98
+	   8: JMPo       	$0x25482F98  ($4)
+
+
+
+. 398 25482F80 12
+. 88 A3 00 00 88 C4 00 00 48 00 00 10
+==== BB 399 (0x25482F98) approx BBs exec'd 0 ====
+
+	0x25482F98:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25482F9C:  41860020  bc 12,6,0x25482FBC
+	   4: Js06o       	$0x25482FBC
+
+
+
+. 399 25482F98 8
+. 2C 85 00 00 41 86 00 20
+==== BB 400 (0x25482FA0) approx BBs exec'd 0 ====
+
+	0x25482FA0:  7C053000  cmp cr0,r5,r6
+	   0: GETL       	R5, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25482FA4:  40820018  bc 4,2,0x25482FBC
+	   5: Jc02o       	$0x25482FBC
+
+
+
+. 400 25482FA0 8
+. 7C 05 30 00 40 82 00 18
+==== BB 401 (0x25482FBC) approx BBs exec'd 0 ====
+
+	0x25482FBC:  7C662850  subf r3,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25482FC0:  4E800020  blr
+	   5: GETL       	LR, t4
+	   6: JMPo-r       	t4  ($4)
+
+
+
+. 401 25482FBC 8
+. 7C 66 28 50 4E 80 00 20
+==== BB 402 (0x25476EC8) approx BBs exec'd 0 ====
+
+	0x25476EC8:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25476ECC:  4092FEE0  bc 4,18,0x25476DAC
+	   4: Jc18o       	$0x25476DAC
+
+
+
+. 402 25476EC8 8
+. 2E 03 00 00 40 92 FE E0
+==== BB 403 (0x25476DB8) approx BBs exec'd 0 ====
+
+	0x25476DB8:  829E04F4  lwz r20,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0x25476DBC:  2D9B0000  cmpi cr3,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x3, CR
+	   8: INCEIPL       	$4
+
+	0x25476DC0:  81540000  lwz r10,0(r20)
+	   9: GETL       	R20, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R10
+	  12: INCEIPL       	$4
+
+	0x25476DC4:  71490040  andi. r9,r10,0x40
+	  13: GETL       	R10, t12
+	  14: ANDL       	$0x40, t12
+	  15: PUTL       	t12, R9
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25476DC8:  40820390  bc 4,2,0x25477158
+	  19: Jc02o       	$0x25477158
+
+
+
+. 403 25476DB8 20
+. 82 9E 04 F4 2D 9B 00 00 81 54 00 00 71 49 00 40 40 82 03 90
+==== BB 404 (0x25476DCC) approx BBs exec'd 0 ====
+
+	0x25476DCC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476DD0:  3880002F  li r4,47
+	   3: MOVL       	$0x2F, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25476DD4:  4800C02D  bl 0x25482E00
+	   6: MOVL       	$0x25476DD8, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25482E00  ($4)
+
+
+
+. 404 25476DCC 12
+. 7F E3 FB 78 38 80 00 2F 48 00 C0 2D
+==== BB 405 (0x25482EB4) approx BBs exec'd 0 ====
+
+	0x25482EB4:  7CE06038  and r0,r7,r12
+	   0: GETL       	R7, t0
+	   1: GETL       	R12, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482EB8:  7CEA6378  or r10,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R10
+	   9: INCEIPL       	$4
+
+	0x25482EBC:  7C003A14  add r0,r0,r7
+	  10: GETL       	R0, t8
+	  11: GETL       	R7, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25482EC0:  7D4900F8  nor r9,r10,r0
+	  15: GETL       	R10, t12
+	  16: GETL       	R0, t14
+	  17: ORL       	t14, t12
+	  18: NOTL       	t12
+	  19: PUTL       	t12, R9
+	  20: INCEIPL       	$4
+
+	0x25482EC4:  7D240034  cntlzw r4,r9
+	  21: GETL       	R9, t16
+	  22: CNTLZL       	t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0x25482EC8:  3908FFFC  addi r8,r8,-4
+	  25: GETL       	R8, t18
+	  26: ADDL       	$0xFFFFFFFC, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0x25482ECC:  5484E8FE  rlwinm r4,r4,29,3,31
+	  29: GETL       	R4, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R4
+	  32: INCEIPL       	$4
+
+	0x25482ED0:  7C682214  add r3,r8,r4
+	  33: GETL       	R8, t22
+	  34: GETL       	R4, t24
+	  35: ADDL       	t22, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0x25482ED4:  4E800020  blr
+	  38: GETL       	LR, t26
+	  39: JMPo-r       	t26  ($4)
+
+
+
+. 405 25482EB4 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+==== BB 406 (0x25476DD8) approx BBs exec'd 0 ====
+
+	0x25476DD8:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25476DDC:  419A0290  bc 12,26,0x2547706C
+	   4: Js26o       	$0x2547706C
+
+
+
+. 406 25476DD8 8
+. 2F 03 00 00 41 9A 02 90
+==== BB 407 (0x25476DE0) approx BBs exec'd 0 ====
+
+	0x25476DE0:  418E01D8  bc 12,14,0x25476FB8
+	   0: Js14o       	$0x25476FB8
+
+
+
+. 407 25476DE0 4
+. 41 8E 01 D8
+==== BB 408 (0x25476DE4) approx BBs exec'd 0 ====
+
+	0x25476DE4:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476DE8:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25476DEC:  4BFFD941  bl 0x2547472C
+	   6: MOVL       	$0x25476DF0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x2547472C  ($4)
+
+
+
+. 408 25476DE4 12
+. 7F 63 DB 78 7F E4 FB 78 4B FF D9 41
+==== BB 409 expand_dynamic_string_token(0x2547472C) approx BBs exec'd 0 ====
+
+	0x2547472C:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25474730:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25474734:  480228CD  bl 0x25497000
+	   9: MOVL       	$0x25474738, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 409 2547472C 12
+. 94 21 FF D0 7C 08 02 A6 48 02 28 CD
+==== BB 410 (0x25474738) approx BBs exec'd 0 ====
+
+	0x25474738:  7D800026  mfcr r12
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0x2547473C:  93C10028  stw r30,40(r1)
+	   3: GETL       	R30, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x28, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25474740:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0x25474744:  93A10024  stw r29,36(r1)
+	  11: GETL       	R29, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x24, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25474748:  7C9D2378  or r29,r4,r4
+	  16: GETL       	R4, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x2547474C:  93410018  stw r26,24(r1)
+	  19: GETL       	R26, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x18, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x25474750:  38800024  li r4,36
+	  24: MOVL       	$0x24, t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x25474754:  93810020  stw r28,32(r1)
+	  27: GETL       	R28, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x25474758:  7C7C1B78  or r28,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R28
+	  34: INCEIPL       	$4
+
+	0x2547475C:  92E1000C  stw r23,12(r1)
+	  35: GETL       	R23, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0xC, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x25474760:  7FA3EB78  or r3,r29,r29
+	  40: GETL       	R29, t30
+	  41: PUTL       	t30, R3
+	  42: INCEIPL       	$4
+
+	0x25474764:  93010010  stw r24,16(r1)
+	  43: GETL       	R24, t32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x10, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0x25474768:  93210014  stw r25,20(r1)
+	  48: GETL       	R25, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x14, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x2547476C:  9361001C  stw r27,28(r1)
+	  53: GETL       	R27, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x1C, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25474770:  93E1002C  stw r31,44(r1)
+	  58: GETL       	R31, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x2C, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x25474774:  90010034  stw r0,52(r1)
+	  63: GETL       	R0, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x34, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25474778:  91810008  stw r12,8(r1)
+	  68: GETL       	R12, t52
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x8, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x2547477C:  4800E685  bl 0x25482E00
+	  73: MOVL       	$0x25474780, t56
+	  74: PUTL       	t56, LR
+	  75: JMPo-c       	$0x25482E00  ($4)
+
+
+
+. 410 25474738 72
+. 7D 80 00 26 93 C1 00 28 7F C8 02 A6 93 A1 00 24 7C 9D 23 78 93 41 00 18 38 80 00 24 93 81 00 20 7C 7C 1B 78 92 E1 00 0C 7F A3 EB 78 93 01 00 10 93 21 00 14 93 61 00 1C 93 E1 00 2C 90 01 00 34 91 81 00 08 48 00 E6 85
+==== BB 411 (0x25474780) approx BBs exec'd 0 ====
+
+	0x25474780:  7C7A1B79  or. r26,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R26
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25474784:  40820074  bc 4,2,0x254747F8
+	   5: Jc02o       	$0x254747F8
+
+
+
+. 411 25474780 8
+. 7C 7A 1B 79 40 82 00 74
+==== BB 412 (0x25474788) approx BBs exec'd 0 ====
+
+	0x25474788:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547478C:  4800E845  bl 0x25482FD0
+	   3: MOVL       	$0x25474790, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 412 25474788 8
+. 7F A3 EB 78 48 00 E8 45
+==== BB 413 (0x25474790) approx BBs exec'd 0 ====
+
+	0x25474790:  3BE30001  addi r31,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25474794:  7FE3FB78  or r3,r31,r31
+	   4: GETL       	R31, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x25474798:  4802328D  bl 0x25497A24
+	   7: MOVL       	$0x2547479C, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 413 25474790 12
+. 3B E3 00 01 7F E3 FB 78 48 02 32 8D
+==== BB 414 (0x2547FE20) approx BBs exec'd 0 ====
+
+	0x2547FE20:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547FE24:  81890004  lwz r12,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x2547FE28:  7C8CE214  add r4,r12,r28
+	  10: GETL       	R12, t8
+	  11: GETL       	R28, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x2547FE2C:  7D4C00D0  neg r10,r12
+	  15: GETL       	R12, t12
+	  16: NEGL       	t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547FE30:  3964FFFF  addi r11,r4,-1
+	  19: GETL       	R4, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, R11
+	  22: INCEIPL       	$4
+
+	0x2547FE34:  7D7D5038  and r29,r11,r10
+	  23: GETL       	R11, t16
+	  24: GETL       	R10, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R29
+	  27: INCEIPL       	$4
+
+	0x2547FE38:  7FA4EB78  or r4,r29,r29
+	  28: GETL       	R29, t20
+	  29: PUTL       	t20, R4
+	  30: INCEIPL       	$4
+
+	0x2547FE3C:  4800295D  bl 0x25482798
+	  31: MOVL       	$0x2547FE40, t22
+	  32: PUTL       	t22, LR
+	  33: JMPo-c       	$0x25482798  ($4)
+
+
+
+. 414 2547FE20 32
+. 81 3E 04 F4 81 89 00 04 7C 8C E2 14 7D 4C 00 D0 39 64 FF FF 7D 7D 50 38 7F A4 EB 78 48 00 29 5D
+==== BB 415 mmap(0x25482798) approx BBs exec'd 0 ====
+
+	0x25482798:  3800005A  li r0,90
+	   0: MOVL       	$0x5A, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2548279C:  44000002  sc
+	   3: JMPo-sys       	$0x254827A0  ($4)
+
+
+
+. 415 25482798 8
+. 38 00 00 5A 44 00 00 02
+==== BB 416 (0x254827A0) approx BBs exec'd 0 ====
+
+	0x254827A0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 416 254827A0 4
+. 4C A3 00 20
+==== BB 417 (0x2547FE40) approx BBs exec'd 0 ====
+
+	0x2547FE40:  80BB0000  lwz r5,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x2547FE44:  7D23EA14  add r9,r3,r29
+	   4: GETL       	R3, t4
+	   5: GETL       	R29, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x2547FE48:  7C832800  cmp cr1,r3,r5
+	   9: GETL       	R3, t8
+	  10: GETL       	R5, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547FE4C:  41860008  bc 12,6,0x2547FE54
+	  14: Js06o       	$0x2547FE54
+
+
+
+. 417 2547FE40 16
+. 80 BB 00 00 7D 23 EA 14 7C 83 28 00 41 86 00 08
+==== BB 418 (0x2547FE50) approx BBs exec'd 0 ====
+
+	0x2547FE50:  907F0000  stw r3,0(r31)
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547FE54:  807F0000  lwz r3,0(r31)
+	   4: GETL       	R31, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R3
+	   7: INCEIPL       	$4
+
+	0x2547FE58:  80C10024  lwz r6,36(r1)
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0x24, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R6
+	  12: INCEIPL       	$4
+
+	0x2547FE5C:  7D03E214  add r8,r3,r28
+	  13: GETL       	R3, t12
+	  14: GETL       	R28, t14
+	  15: ADDL       	t12, t14
+	  16: PUTL       	t14, R8
+	  17: INCEIPL       	$4
+
+	0x2547FE60:  80FE0420  lwz r7,1056(r30)
+	  18: GETL       	R30, t16
+	  19: ADDL       	$0x420, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R7
+	  22: INCEIPL       	$4
+
+	0x2547FE64:  913B0000  stw r9,0(r27)
+	  23: GETL       	R9, t20
+	  24: GETL       	R27, t22
+	  25: STL       	t20, (t22)
+	  26: INCEIPL       	$4
+
+	0x2547FE68:  7CC803A6  mtlr r6
+	  27: GETL       	R6, t24
+	  28: PUTL       	t24, LR
+	  29: INCEIPL       	$4
+
+	0x2547FE6C:  911F0000  stw r8,0(r31)
+	  30: GETL       	R8, t26
+	  31: GETL       	R31, t28
+	  32: STL       	t26, (t28)
+	  33: INCEIPL       	$4
+
+	0x2547FE70:  8361000C  lwz r27,12(r1)
+	  34: GETL       	R1, t30
+	  35: ADDL       	$0xC, t30
+	  36: LDL       	(t30), t32
+	  37: PUTL       	t32, R27
+	  38: INCEIPL       	$4
+
+	0x2547FE74:  83810010  lwz r28,16(r1)
+	  39: GETL       	R1, t34
+	  40: ADDL       	$0x10, t34
+	  41: LDL       	(t34), t36
+	  42: PUTL       	t36, R28
+	  43: INCEIPL       	$4
+
+	0x2547FE78:  83A10014  lwz r29,20(r1)
+	  44: GETL       	R1, t38
+	  45: ADDL       	$0x14, t38
+	  46: LDL       	(t38), t40
+	  47: PUTL       	t40, R29
+	  48: INCEIPL       	$4
+
+	0x2547FE7C:  83C10018  lwz r30,24(r1)
+	  49: GETL       	R1, t42
+	  50: ADDL       	$0x18, t42
+	  51: LDL       	(t42), t44
+	  52: PUTL       	t44, R30
+	  53: INCEIPL       	$4
+
+	0x2547FE80:  83E1001C  lwz r31,28(r1)
+	  54: GETL       	R1, t46
+	  55: ADDL       	$0x1C, t46
+	  56: LDL       	(t46), t48
+	  57: PUTL       	t48, R31
+	  58: INCEIPL       	$4
+
+	0x2547FE84:  38210020  addi r1,r1,32
+	  59: GETL       	R1, t50
+	  60: ADDL       	$0x20, t50
+	  61: PUTL       	t50, R1
+	  62: INCEIPL       	$4
+
+	0x2547FE88:  90670000  stw r3,0(r7)
+	  63: GETL       	R3, t52
+	  64: GETL       	R7, t54
+	  65: STL       	t52, (t54)
+	  66: INCEIPL       	$4
+
+	0x2547FE8C:  4E800020  blr
+	  67: GETL       	LR, t56
+	  68: JMPo-r       	t56  ($4)
+
+
+
+. 418 2547FE50 64
+. 90 7F 00 00 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 3B 00 00 7C C8 03 A6 91 1F 00 00 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+==== BB 419 (0x2547479C) approx BBs exec'd 0 ====
+
+	0x2547479C:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254747A0:  2C030000  cmpi cr0,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x254747A4:  41820014  bc 12,2,0x254747B8
+	   7: Js02o       	$0x254747B8
+
+
+
+. 419 2547479C 12
+. 38 00 00 00 2C 03 00 00 41 82 00 14
+==== BB 420 (0x254747A8) approx BBs exec'd 0 ====
+
+	0x254747A8:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254747AC:  7FE5FB78  or r5,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x254747B0:  4800F431  bl 0x25483BE0
+	   6: MOVL       	$0x254747B4, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 420 254747A8 12
+. 7F A4 EB 78 7F E5 FB 78 48 00 F4 31
+==== BB 421 (0x254747B4) approx BBs exec'd 0 ====
+
+	0x254747B4:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254747B8:  7C030378  or r3,r0,r0
+	   3: GETL       	R0, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0x254747BC:  83810034  lwz r28,52(r1)
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x34, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0x254747C0:  81410008  lwz r10,8(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R10
+	  15: INCEIPL       	$4
+
+	0x254747C4:  7F8803A6  mtlr r28
+	  16: GETL       	R28, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0x254747C8:  82E1000C  lwz r23,12(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0xC, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R23
+	  23: INCEIPL       	$4
+
+	0x254747CC:  83010010  lwz r24,16(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x10, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R24
+	  28: INCEIPL       	$4
+
+	0x254747D0:  7D408120  mtcrf 0x8,r10
+	  29: GETL       	R10, t22
+	  30: ICRFL       	t22, $0x4, CR
+	  31: INCEIPL       	$4
+
+	0x254747D4:  83210014  lwz r25,20(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x14, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R25
+	  36: INCEIPL       	$4
+
+	0x254747D8:  83410018  lwz r26,24(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x18, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R26
+	  41: INCEIPL       	$4
+
+	0x254747DC:  8361001C  lwz r27,28(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x1C, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R27
+	  46: INCEIPL       	$4
+
+	0x254747E0:  83810020  lwz r28,32(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x20, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R28
+	  51: INCEIPL       	$4
+
+	0x254747E4:  83A10024  lwz r29,36(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x24, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R29
+	  56: INCEIPL       	$4
+
+	0x254747E8:  83C10028  lwz r30,40(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x28, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R30
+	  61: INCEIPL       	$4
+
+	0x254747EC:  83E1002C  lwz r31,44(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x2C, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R31
+	  66: INCEIPL       	$4
+
+	0x254747F0:  38210030  addi r1,r1,48
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x30, t52
+	  69: PUTL       	t52, R1
+	  70: INCEIPL       	$4
+
+	0x254747F4:  4E800020  blr
+	  71: GETL       	LR, t54
+	  72: JMPo-r       	t54  ($4)
+
+
+
+. 421 254747B4 68
+. 7C 60 1B 78 7C 03 03 78 83 81 00 34 81 41 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 7D 40 81 20 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 422 (0x25476DF0) approx BBs exec'd 0 ====
+
+	0x25476DF0:  3B40FFFF  li r26,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R26
+	   2: INCEIPL       	$4
+
+	0x25476DF4:  2F030000  cmpi cr6,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0x25476DF8:  90610228  stw r3,552(r1)
+	   7: GETL       	R3, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0x228, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25476DFC:  2E1AFFFF  cmpi cr4,r26,-1
+	  12: GETL       	R26, t10
+	  13: MOVL       	$0xFFFFFFFF, t14
+	  14: CMPL       	t10, t14, t12  (-rSo)
+	  15: ICRFL       	t12, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x25476E00:  409A0200  bc 4,26,0x25477000
+	  17: Jc26o       	$0x25477000
+
+
+
+. 422 25476DF0 20
+. 3B 40 FF FF 2F 03 00 00 90 61 02 28 2E 1A FF FF 40 9A 02 00
+==== BB 423 (0x25477000) approx BBs exec'd 0 ====
+
+	0x25477000:  38810018  addi r4,r1,24
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x18, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25477004:  4BFFF441  bl 0x25476444
+	   4: MOVL       	$0x25477008, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x25476444  ($4)
+
+
+
+. 423 25477000 8
+. 38 81 00 18 4B FF F4 41
+==== BB 424 open_verify(0x25476444) approx BBs exec'd 0 ====
+
+	0x25476444:  9421FFA0  stwu r1,-96(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFA0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25476448:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547644C:  48020BB5  bl 0x25497000
+	   9: MOVL       	$0x25476450, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 424 25476444 12
+. 94 21 FF A0 7C 08 02 A6 48 02 0B B5
+==== BB 425 (0x25476450) approx BBs exec'd 0 ====
+
+	0x25476450:  93C10058  stw r30,88(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476454:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25476458:  93010040  stw r24,64(r1)
+	   8: GETL       	R24, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x40, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547645C:  93E1005C  stw r31,92(r1)
+	  13: GETL       	R31, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x5C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25476460:  7C982378  or r24,r4,r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, R24
+	  20: INCEIPL       	$4
+
+	0x25476464:  7C3F0B78  or r31,r1,r1
+	  21: GETL       	R1, t16
+	  22: PUTL       	t16, R31
+	  23: INCEIPL       	$4
+
+	0x25476468:  38800000  li r4,0
+	  24: MOVL       	$0x0, t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x2547646C:  92C10038  stw r22,56(r1)
+	  27: GETL       	R22, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x38, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x25476470:  7C761B78  or r22,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R22
+	  34: INCEIPL       	$4
+
+	0x25476474:  93210044  stw r25,68(r1)
+	  35: GETL       	R25, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x44, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x25476478:  9361004C  stw r27,76(r1)
+	  40: GETL       	R27, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x4C, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547647C:  3B600000  li r27,0
+	  45: MOVL       	$0x0, t34
+	  46: PUTL       	t34, R27
+	  47: INCEIPL       	$4
+
+	0x25476480:  92A10034  stw r21,52(r1)
+	  48: GETL       	R21, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x34, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25476484:  92E1003C  stw r23,60(r1)
+	  53: GETL       	R23, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x3C, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25476488:  93410048  stw r26,72(r1)
+	  58: GETL       	R26, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x48, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547648C:  93810050  stw r28,80(r1)
+	  63: GETL       	R28, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x50, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25476490:  93A10054  stw r29,84(r1)
+	  68: GETL       	R29, t52
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x54, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x25476494:  90010064  stw r0,100(r1)
+	  73: GETL       	R0, t56
+	  74: GETL       	R1, t58
+	  75: ADDL       	$0x64, t58
+	  76: STL       	t56, (t58)
+	  77: INCEIPL       	$4
+
+	0x25476498:  4800BB49  bl 0x25481FE0
+	  78: MOVL       	$0x2547649C, t60
+	  79: PUTL       	t60, LR
+	  80: JMPo-c       	$0x25481FE0  ($4)
+
+
+
+. 425 25476450 76
+. 93 C1 00 58 7F C8 02 A6 93 01 00 40 93 E1 00 5C 7C 98 23 78 7C 3F 0B 78 38 80 00 00 92 C1 00 38 7C 76 1B 78 93 21 00 44 93 61 00 4C 3B 60 00 00 92 A1 00 34 92 E1 00 3C 93 41 00 48 93 81 00 50 93 A1 00 54 90 01 00 64 48 00 BB 49
+==== BB 426 open(0x25481FE0) approx BBs exec'd 0 ====
+
+	0x25481FE0:  38000005  li r0,5
+	   0: MOVL       	$0x5, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481FE4:  44000002  sc
+	   3: JMPo-sys       	$0x25481FE8  ($4)
+
+
+
+. 426 25481FE0 8
+. 38 00 00 05 44 00 00 02
+==== BB 427 (0x25481FE8) approx BBs exec'd 0 ====
+
+	0x25481FE8:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 427 25481FE8 4
+. 4C A3 00 20
+==== BB 428 (0x2547649C) approx BBs exec'd 0 ====
+
+	0x2547649C:  2F83FFFF  cmpi cr7,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254764A0:  7C791B78  or r25,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R25
+	   7: INCEIPL       	$4
+
+	0x254764A4:  419E00CC  bc 12,30,0x25476570
+	   8: Js30o       	$0x25476570
+
+
+
+. 428 2547649C 12
+. 2F 83 FF FF 7C 79 1B 78 41 9E 00 CC
+==== BB 429 (0x254764A8) approx BBs exec'd 0 ====
+
+	0x254764A8:  82FE0514  lwz r23,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x254764AC:  3B980004  addi r28,r24,4
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0x4, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x254764B0:  7F84E378  or r4,r28,r28
+	   9: GETL       	R28, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x254764B4:  38A00200  li r5,512
+	  12: MOVL       	$0x200, t8
+	  13: PUTL       	t8, R5
+	  14: INCEIPL       	$4
+
+	0x254764B8:  93770000  stw r27,0(r23)
+	  15: GETL       	R27, t10
+	  16: GETL       	R23, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x254764BC:  4800BB45  bl 0x25482000
+	  19: MOVL       	$0x254764C0, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25482000  ($4)
+
+
+
+. 429 254764A8 24
+. 82 FE 05 14 3B 98 00 04 7F 84 E3 78 38 A0 02 00 93 77 00 00 48 00 BB 45
+==== BB 430 read(0x25482000) approx BBs exec'd 0 ====
+
+	0x25482000:  38000003  li r0,3
+	   0: MOVL       	$0x3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25482004:  44000002  sc
+	   3: JMPo-sys       	$0x25482008  ($4)
+
+
+
+. 430 25482000 8
+. 38 00 00 03 44 00 00 02
+==== BB 431 (0x25482008) approx BBs exec'd 0 ====
+
+	0x25482008:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 431 25482008 4
+. 4C A3 00 20
+==== BB 432 (0x254764C0) approx BBs exec'd 0 ====
+
+	0x254764C0:  2C030033  cmpi cr0,r3,51
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x33, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254764C4:  7C7D1B78  or r29,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R29
+	   7: INCEIPL       	$4
+
+	0x254764C8:  90780000  stw r3,0(r24)
+	   8: GETL       	R3, t8
+	   9: GETL       	R24, t10
+	  10: STL       	t8, (t10)
+	  11: INCEIPL       	$4
+
+	0x254764CC:  4081014C  bc 4,1,0x25476618
+	  12: Jc01o       	$0x25476618
+
+
+
+. 432 254764C0 16
+. 2C 03 00 33 7C 7D 1B 78 90 78 00 00 40 81 01 4C
+==== BB 433 (0x254764D0) approx BBs exec'd 0 ====
+
+	0x254764D0:  809E016C  lwz r4,364(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x16C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254764D4:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254764D8:  38A00009  li r5,9
+	   8: MOVL       	$0x9, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x254764DC:  4800CDBD  bl 0x25483298
+	  11: MOVL       	$0x254764E0, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+
+. 433 254764D0 16
+. 80 9E 01 6C 7F 83 E3 78 38 A0 00 09 48 00 CD BD
+==== BB 434 (0x254764E0) approx BBs exec'd 0 ====
+
+	0x254764E0:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254764E4:  409A0234  bc 4,26,0x25476718
+	   4: Jc26o       	$0x25476718
+
+
+
+. 434 254764E0 8
+. 2F 03 00 00 40 9A 02 34
+==== BB 435 (0x254764E8) approx BBs exec'd 0 ====
+
+	0x254764E8:  819C0014  lwz r12,20(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x254764EC:  811E0200  lwz r8,512(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x200, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x254764F0:  2C8C0001  cmpi cr1,r12,1
+	  10: GETL       	R12, t8
+	  11: MOVL       	$0x1, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x254764F4:  40860138  bc 4,6,0x2547662C
+	  15: Jc06o       	$0x2547662C
+
+
+
+. 435 254764E8 16
+. 81 9C 00 14 81 1E 02 00 2C 8C 00 01 40 86 01 38
+==== BB 436 (0x254764F8) approx BBs exec'd 0 ====
+
+	0x254764F8:  A11C0012  lhz r8,18(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x12, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254764FC:  2F080014  cmpi cr6,r8,20
+	   5: GETL       	R8, t4
+	   6: MOVL       	$0x14, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25476500:  409A01D0  bc 4,26,0x254766D0
+	  10: Jc26o       	$0x254766D0
+
+
+
+. 436 254764F8 12
+. A1 1C 00 12 2F 08 00 14 40 9A 01 D0
+==== BB 437 (0x25476504) approx BBs exec'd 0 ====
+
+	0x25476504:  A01C0010  lhz r0,16(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25476508:  2F800003  cmpi cr7,r0,3
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0x3, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x2547650C:  409E0278  bc 4,30,0x25476784
+	  10: Jc30o       	$0x25476784
+
+
+
+. 437 25476504 12
+. A0 1C 00 10 2F 80 00 03 40 9E 02 78
+==== BB 438 (0x25476510) approx BBs exec'd 0 ====
+
+	0x25476510:  A2BC002A  lhz r21,42(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x2A, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25476514:  811E0204  lwz r8,516(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x204, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25476518:  2C950020  cmpi cr1,r21,32
+	  10: GETL       	R21, t8
+	  11: MOVL       	$0x20, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547651C:  40860110  bc 4,6,0x2547662C
+	  15: Jc06o       	$0x2547662C
+
+
+
+. 438 25476510 16
+. A2 BC 00 2A 81 1E 02 04 2C 95 00 20 40 86 01 10
+==== BB 439 (0x25476520) approx BBs exec'd 0 ====
+
+	0x25476520:  A17C002C  lhz r11,44(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x2C, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25476524:  809C001C  lwz r4,28(r28)
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25476528:  557B2834  rlwinm r27,r11,5,0,26
+	  10: GETL       	R11, t8
+	  11: SHLL       	$0x5, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x2547652C:  7F44DA14  add r26,r4,r27
+	  14: GETL       	R4, t10
+	  15: GETL       	R27, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R26
+	  18: INCEIPL       	$4
+
+	0x25476530:  7F1AE840  cmpl cr6,r26,r29
+	  19: GETL       	R26, t14
+	  20: GETL       	R29, t16
+	  21: CMPUL       	t14, t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25476534:  7FA4C214  add r29,r4,r24
+	  24: GETL       	R4, t20
+	  25: GETL       	R24, t22
+	  26: ADDL       	t20, t22
+	  27: PUTL       	t22, R29
+	  28: INCEIPL       	$4
+
+	0x25476538:  3B5D0004  addi r26,r29,4
+	  29: GETL       	R29, t24
+	  30: ADDL       	$0x4, t24
+	  31: PUTL       	t24, R26
+	  32: INCEIPL       	$4
+
+	0x2547653C:  41990108  bc 12,25,0x25476644
+	  33: Js25o       	$0x25476644
+
+
+
+. 439 25476520 32
+. A1 7C 00 2C 80 9C 00 1C 55 7B 28 34 7F 44 DA 14 7F 1A E8 40 7F A4 C2 14 3B 5D 00 04 41 99 01 08
+==== BB 440 (0x25476540) approx BBs exec'd 0 ====
+
+	0x25476540:  557B2834  rlwinm r27,r11,5,0,26
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25476544:  7F5DD378  or r29,r26,r26
+	   4: GETL       	R26, t2
+	   5: PUTL       	t2, R29
+	   6: INCEIPL       	$4
+
+	0x25476548:  7C1BD214  add r0,r27,r26
+	   7: GETL       	R27, t4
+	   8: GETL       	R26, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0x2547654C:  4800001C  b 0x25476568
+	  12: JMPo       	$0x25476568  ($4)
+
+
+
+. 440 25476540 16
+. 55 7B 28 34 7F 5D D3 78 7C 1B D2 14 48 00 00 1C
+==== BB 441 (0x25476568) approx BBs exec'd 0 ====
+
+	0x25476568:  7C00E840  cmpl cr0,r0,r29
+	   0: GETL       	R0, t0
+	   1: GETL       	R29, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547656C:  4181FFE4  bc 12,1,0x25476550
+	   5: Js01o       	$0x25476550
+
+
+
+. 441 25476568 8
+. 7C 00 E8 40 41 81 FF E4
+==== BB 442 (0x25476550) approx BBs exec'd 0 ====
+
+	0x25476550:  809D0000  lwz r4,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25476554:  2C840004  cmpi cr1,r4,4
+	   4: GETL       	R4, t4
+	   5: MOVL       	$0x4, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25476558:  4186005C  bc 12,6,0x254765B4
+	   9: Js06o       	$0x254765B4
+
+
+
+. 442 25476550 12
+. 80 9D 00 00 2C 84 00 04 41 86 00 5C
+==== BB 443 (0x2547655C) approx BBs exec'd 0 ====
+
+	0x2547655C:  55662834  rlwinm r6,r11,5,0,26
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25476560:  3BBD0020  addi r29,r29,32
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x20, t2
+	   6: PUTL       	t2, R29
+	   7: INCEIPL       	$4
+
+	0x25476564:  7C06D214  add r0,r6,r26
+	   8: GETL       	R6, t4
+	   9: GETL       	R26, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R0
+	  12: INCEIPL       	$4
+
+	0x25476568:  7C00E840  cmpl cr0,r0,r29
+	  13: GETL       	R0, t8
+	  14: GETL       	R29, t10
+	  15: CMPUL       	t8, t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0x2547656C:  4181FFE4  bc 12,1,0x25476550
+	  18: Js01o       	$0x25476550
+
+
+
+. 443 2547655C 20
+. 55 66 28 34 3B BD 00 20 7C 06 D2 14 7C 00 E8 40 41 81 FF E4
+==== BB 444 (0x25476570) approx BBs exec'd 0 ====
+
+	0x25476570:  80810000  lwz r4,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25476574:  7F23CB78  or r3,r25,r25
+	   4: GETL       	R25, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25476578:  82E40004  lwz r23,4(r4)
+	   7: GETL       	R4, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R23
+	  11: INCEIPL       	$4
+
+	0x2547657C:  82A4FFD4  lwz r21,-44(r4)
+	  12: GETL       	R4, t10
+	  13: ADDL       	$0xFFFFFFD4, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R21
+	  16: INCEIPL       	$4
+
+	0x25476580:  7EE803A6  mtlr r23
+	  17: GETL       	R23, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0x25476584:  82C4FFD8  lwz r22,-40(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0xFFFFFFD8, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R22
+	  24: INCEIPL       	$4
+
+	0x25476588:  82E4FFDC  lwz r23,-36(r4)
+	  25: GETL       	R4, t20
+	  26: ADDL       	$0xFFFFFFDC, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R23
+	  29: INCEIPL       	$4
+
+	0x2547658C:  8304FFE0  lwz r24,-32(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0xFFFFFFE0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R24
+	  34: INCEIPL       	$4
+
+	0x25476590:  8324FFE4  lwz r25,-28(r4)
+	  35: GETL       	R4, t28
+	  36: ADDL       	$0xFFFFFFE4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R25
+	  39: INCEIPL       	$4
+
+	0x25476594:  8344FFE8  lwz r26,-24(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0xFFFFFFE8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R26
+	  44: INCEIPL       	$4
+
+	0x25476598:  8364FFEC  lwz r27,-20(r4)
+	  45: GETL       	R4, t36
+	  46: ADDL       	$0xFFFFFFEC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R27
+	  49: INCEIPL       	$4
+
+	0x2547659C:  8384FFF0  lwz r28,-16(r4)
+	  50: GETL       	R4, t40
+	  51: ADDL       	$0xFFFFFFF0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R28
+	  54: INCEIPL       	$4
+
+	0x254765A0:  83A4FFF4  lwz r29,-12(r4)
+	  55: GETL       	R4, t44
+	  56: ADDL       	$0xFFFFFFF4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R29
+	  59: INCEIPL       	$4
+
+	0x254765A4:  83C4FFF8  lwz r30,-8(r4)
+	  60: GETL       	R4, t48
+	  61: ADDL       	$0xFFFFFFF8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R30
+	  64: INCEIPL       	$4
+
+	0x254765A8:  83E4FFFC  lwz r31,-4(r4)
+	  65: GETL       	R4, t52
+	  66: ADDL       	$0xFFFFFFFC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R31
+	  69: INCEIPL       	$4
+
+	0x254765AC:  7C812378  or r1,r4,r4
+	  70: GETL       	R4, t56
+	  71: PUTL       	t56, R1
+	  72: INCEIPL       	$4
+
+	0x254765B0:  4E800020  blr
+	  73: GETL       	LR, t58
+	  74: JMPo-r       	t58  ($4)
+
+
+
+. 444 25476570 68
+. 80 81 00 00 7F 23 CB 78 82 E4 00 04 82 A4 FF D4 7E E8 03 A6 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+==== BB 445 (0x25477008) approx BBs exec'd 0 ====
+
+	0x25477008:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547700C:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R26
+	   7: INCEIPL       	$4
+
+	0x25477010:  4092FDF4  bc 4,18,0x25476E04
+	   8: Jc18o       	$0x25476E04
+
+
+
+. 445 25477008 12
+. 2E 03 FF FF 7C 7A 1B 78 40 92 FD F4
+==== BB 446 (0x25476E04) approx BBs exec'd 0 ====
+
+	0x25476E04:  571227FE  rlwinm r18,r24,4,31,31
+	   0: GETL       	R24, t0
+	   1: ROLL       	$0x4, t0
+	   2: ANDL       	$0x1, t0
+	   3: PUTL       	t0, R18
+	   4: INCEIPL       	$4
+
+	0x25476E08:  32D2FFFF  addic r22,r18,-1
+	   5: GETL       	R18, t2
+	   6: ADCL       	$0xFFFFFFFF, t2  (-wCa)
+	   7: PUTL       	t2, R22
+	   8: INCEIPL       	$4
+
+	0x25476E0C:  7ED6B110  subfe r22,r22,r22
+	   9: GETL       	R22, t4
+	  10: GETL       	R22, t6
+	  11: SBBL       	t4, t6  (-rCa-wCa)
+	  12: PUTL       	t6, R22
+	  13: INCEIPL       	$4
+
+	0x25476E10:  7F7CB038  and r28,r27,r22
+	  14: GETL       	R27, t8
+	  15: GETL       	R22, t10
+	  16: ANDL       	t8, t10
+	  17: PUTL       	t10, R28
+	  18: INCEIPL       	$4
+
+	0x25476E14:  4092020C  bc 4,18,0x25477020
+	  19: Jc18o       	$0x25477020
+
+
+
+. 446 25476E04 20
+. 57 12 27 FE 32 D2 FF FF 7E D6 B1 10 7F 7C B0 38 40 92 02 0C
+==== BB 447 (0x25477020) approx BBs exec'd 0 ====
+
+	0x25477020:  80FE04C0  lwz r7,1216(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25477024:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25477028:  80C10228  lwz r6,552(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x228, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0x2547702C:  7F44D378  or r4,r26,r26
+	  13: GETL       	R26, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0x25477030:  81C70000  lwz r14,0(r7)
+	  16: GETL       	R7, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R14
+	  19: INCEIPL       	$4
+
+	0x25477034:  7E088378  or r8,r16,r16
+	  20: GETL       	R16, t16
+	  21: PUTL       	t16, R8
+	  22: INCEIPL       	$4
+
+	0x25477038:  7F87E378  or r7,r28,r28
+	  23: GETL       	R28, t18
+	  24: PUTL       	t18, R7
+	  25: INCEIPL       	$4
+
+	0x2547703C:  7F09C378  or r9,r24,r24
+	  26: GETL       	R24, t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0x25477040:  38A10018  addi r5,r1,24
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x18, t22
+	  31: PUTL       	t22, R5
+	  32: INCEIPL       	$4
+
+	0x25477044:  3941022C  addi r10,r1,556
+	  33: GETL       	R1, t24
+	  34: ADDL       	$0x22C, t24
+	  35: PUTL       	t24, R10
+	  36: INCEIPL       	$4
+
+	0x25477048:  91C1022C  stw r14,556(r1)
+	  37: GETL       	R14, t26
+	  38: GETL       	R1, t28
+	  39: ADDL       	$0x22C, t28
+	  40: STL       	t26, (t28)
+	  41: INCEIPL       	$4
+
+	0x2547704C:  93210008  stw r25,8(r1)
+	  42: GETL       	R25, t30
+	  43: GETL       	R1, t32
+	  44: ADDL       	$0x8, t32
+	  45: STL       	t30, (t32)
+	  46: INCEIPL       	$4
+
+	0x25477050:  4BFFE4B9  bl 0x25475508
+	  47: MOVL       	$0x25477054, t34
+	  48: PUTL       	t34, LR
+	  49: JMPo-c       	$0x25475508  ($4)
+
+
+
+. 447 25477020 52
+. 80 FE 04 C0 7F E3 FB 78 80 C1 02 28 7F 44 D3 78 81 C7 00 00 7E 08 83 78 7F 87 E3 78 7F 09 C3 78 38 A1 00 18 39 41 02 2C 91 C1 02 2C 93 21 00 08 4B FF E4 B9
+==== BB 448 _dl_map_object_from_fd(0x25475508) approx BBs exec'd 0 ====
+
+	0x25475508:  9421FF00  stwu r1,-256(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF00, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547550C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25475510:  48021AF1  bl 0x25497000
+	   9: MOVL       	$0x25475514, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 448 25475508 12
+. 94 21 FF 00 7C 08 02 A6 48 02 1A F1
+==== BB 449 (0x25475514) approx BBs exec'd 0 ====
+
+	0x25475514:  7D800026  mfcr r12
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0x25475518:  93C100F8  stw r30,248(r1)
+	   3: GETL       	R30, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xF8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x2547551C:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0x25475520:  93E100FC  stw r31,252(r1)
+	  11: GETL       	R31, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0xFC, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25475524:  7C3F0B78  or r31,r1,r1
+	  16: GETL       	R1, t12
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0x25475528:  90010104  stw r0,260(r1)
+	  19: GETL       	R0, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x104, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547552C:  920100C0  stw r16,192(r1)
+	  24: GETL       	R16, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0xC0, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25475530:  7CD03378  or r16,r6,r6
+	  29: GETL       	R6, t22
+	  30: PUTL       	t22, R16
+	  31: INCEIPL       	$4
+
+	0x25475534:  926100CC  stw r19,204(r1)
+	  32: GETL       	R19, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0xCC, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25475538:  7C932378  or r19,r4,r4
+	  37: GETL       	R4, t28
+	  38: PUTL       	t28, R19
+	  39: INCEIPL       	$4
+
+	0x2547553C:  928100D0  stw r20,208(r1)
+	  40: GETL       	R20, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0xD0, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x25475540:  7C741B78  or r20,r3,r3
+	  45: GETL       	R3, t34
+	  46: PUTL       	t34, R20
+	  47: INCEIPL       	$4
+
+	0x25475544:  92C100D8  stw r22,216(r1)
+	  48: GETL       	R22, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0xD8, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25475548:  38600003  li r3,3
+	  53: MOVL       	$0x3, t40
+	  54: PUTL       	t40, R3
+	  55: INCEIPL       	$4
+
+	0x2547554C:  92E100DC  stw r23,220(r1)
+	  56: GETL       	R23, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0xDC, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x25475550:  7D364B78  or r22,r9,r9
+	  61: GETL       	R9, t46
+	  62: PUTL       	t46, R22
+	  63: INCEIPL       	$4
+
+	0x25475554:  932100E4  stw r25,228(r1)
+	  64: GETL       	R25, t48
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0xE4, t50
+	  67: STL       	t48, (t50)
+	  68: INCEIPL       	$4
+
+	0x25475558:  7CF73B78  or r23,r7,r7
+	  69: GETL       	R7, t52
+	  70: PUTL       	t52, R23
+	  71: INCEIPL       	$4
+
+	0x2547555C:  934100E8  stw r26,232(r1)
+	  72: GETL       	R26, t54
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0xE8, t56
+	  75: STL       	t54, (t56)
+	  76: INCEIPL       	$4
+
+	0x25475560:  7D194378  or r25,r8,r8
+	  77: GETL       	R8, t58
+	  78: PUTL       	t58, R25
+	  79: INCEIPL       	$4
+
+	0x25475564:  936100EC  stw r27,236(r1)
+	  80: GETL       	R27, t60
+	  81: GETL       	R1, t62
+	  82: ADDL       	$0xEC, t62
+	  83: STL       	t60, (t62)
+	  84: INCEIPL       	$4
+
+	0x25475568:  3B400000  li r26,0
+	  85: MOVL       	$0x0, t64
+	  86: PUTL       	t64, R26
+	  87: INCEIPL       	$4
+
+	0x2547556C:  938100F0  stw r28,240(r1)
+	  88: GETL       	R28, t66
+	  89: GETL       	R1, t68
+	  90: ADDL       	$0xF0, t68
+	  91: STL       	t66, (t68)
+	  92: INCEIPL       	$4
+
+	0x25475570:  7CBC2B78  or r28,r5,r5
+	  93: GETL       	R5, t70
+	  94: PUTL       	t70, R28
+	  95: INCEIPL       	$4
+
+	0x25475574:  91C100B8  stw r14,184(r1)
+	  96: GETL       	R14, t72
+	  97: GETL       	R1, t74
+	  98: ADDL       	$0xB8, t74
+	  99: STL       	t72, (t74)
+	 100: INCEIPL       	$4
+
+	0x25475578:  38BF0028  addi r5,r31,40
+	 101: GETL       	R31, t76
+	 102: ADDL       	$0x28, t76
+	 103: PUTL       	t76, R5
+	 104: INCEIPL       	$4
+
+	0x2547557C:  91E100BC  stw r15,188(r1)
+	 105: GETL       	R15, t78
+	 106: GETL       	R1, t80
+	 107: ADDL       	$0xBC, t80
+	 108: STL       	t78, (t80)
+	 109: INCEIPL       	$4
+
+	0x25475580:  922100C4  stw r17,196(r1)
+	 110: GETL       	R17, t82
+	 111: GETL       	R1, t84
+	 112: ADDL       	$0xC4, t84
+	 113: STL       	t82, (t84)
+	 114: INCEIPL       	$4
+
+	0x25475584:  924100C8  stw r18,200(r1)
+	 115: GETL       	R18, t86
+	 116: GETL       	R1, t88
+	 117: ADDL       	$0xC8, t88
+	 118: STL       	t86, (t88)
+	 119: INCEIPL       	$4
+
+	0x25475588:  92A100D4  stw r21,212(r1)
+	 120: GETL       	R21, t90
+	 121: GETL       	R1, t92
+	 122: ADDL       	$0xD4, t92
+	 123: STL       	t90, (t92)
+	 124: INCEIPL       	$4
+
+	0x2547558C:  930100E0  stw r24,224(r1)
+	 125: GETL       	R24, t94
+	 126: GETL       	R1, t96
+	 127: ADDL       	$0xE0, t96
+	 128: STL       	t94, (t96)
+	 129: INCEIPL       	$4
+
+	0x25475590:  93A100F4  stw r29,244(r1)
+	 130: GETL       	R29, t98
+	 131: GETL       	R1, t100
+	 132: ADDL       	$0xF4, t100
+	 133: STL       	t98, (t100)
+	 134: INCEIPL       	$4
+
+	0x25475594:  918100B4  stw r12,180(r1)
+	 135: GETL       	R12, t102
+	 136: GETL       	R1, t104
+	 137: ADDL       	$0xB4, t104
+	 138: STL       	t102, (t104)
+	 139: INCEIPL       	$4
+
+	0x25475598:  915F0098  stw r10,152(r31)
+	 140: GETL       	R10, t106
+	 141: GETL       	R31, t108
+	 142: ADDL       	$0x98, t108
+	 143: STL       	t106, (t108)
+	 144: INCEIPL       	$4
+
+	0x2547559C:  837F0108  lwz r27,264(r31)
+	 145: GETL       	R31, t110
+	 146: ADDL       	$0x108, t110
+	 147: LDL       	(t110), t112
+	 148: PUTL       	t112, R27
+	 149: INCEIPL       	$4
+
+	0x254755A0:  4800C905  bl 0x25481EA4
+	 150: MOVL       	$0x254755A4, t114
+	 151: PUTL       	t114, LR
+	 152: JMPo-c       	$0x25481EA4  ($4)
+
+
+
+. 449 25475514 144
+. 7D 80 00 26 93 C1 00 F8 7F C8 02 A6 93 E1 00 FC 7C 3F 0B 78 90 01 01 04 92 01 00 C0 7C D0 33 78 92 61 00 CC 7C 93 23 78 92 81 00 D0 7C 74 1B 78 92 C1 00 D8 38 60 00 03 92 E1 00 DC 7D 36 4B 78 93 21 00 E4 7C F7 3B 78 93 41 00 E8 7D 19 43 78 93 61 00 EC 3B 40 00 00 93 81 00 F0 7C BC 2B 78 91 C1 00 B8 38 BF 00 28 91 E1 00 BC 92 21 00 C4 92 41 00 C8 92 A1 00 D4 93 01 00 E0 93 A1 00 F4 91 81 00 B4 91 5F 00 98 83 7F 01 08 48 00 C9 05
+==== BB 450 __GI___fxstat64(0x25481EA4) approx BBs exec'd 0 ====
+
+	0x25481EA4:  9421FF80  stwu r1,-128(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF80, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25481EA8:  7CC802A6  mflr r6
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25481EAC:  48015155  bl 0x25497000
+	   9: MOVL       	$0x25481EB0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 450 25481EA4 12
+. 94 21 FF 80 7C C8 02 A6 48 01 51 55
+==== BB 451 (0x25481EB0) approx BBs exec'd 0 ====
+
+	0x25481EB0:  93C10078  stw r30,120(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x78, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25481EB4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25481EB8:  9361006C  stw r27,108(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x6C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25481EBC:  90C10084  stw r6,132(r1)
+	  13: GETL       	R6, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x84, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25481EC0:  93210064  stw r25,100(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x64, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25481EC4:  7C791B78  or r25,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x25481EC8:  837E04EC  lwz r27,1260(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4EC, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0x25481ECC:  93810070  stw r28,112(r1)
+	  31: GETL       	R28, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x70, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x25481ED0:  7CBC2B78  or r28,r5,r5
+	  36: GETL       	R5, t28
+	  37: PUTL       	t28, R28
+	  38: INCEIPL       	$4
+
+	0x25481ED4:  801B0000  lwz r0,0(r27)
+	  39: GETL       	R27, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x25481ED8:  93E1007C  stw r31,124(r1)
+	  43: GETL       	R31, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x7C, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25481EDC:  7C9F2378  or r31,r4,r4
+	  48: GETL       	R4, t38
+	  49: PUTL       	t38, R31
+	  50: INCEIPL       	$4
+
+	0x25481EE0:  2F800000  cmpi cr7,r0,0
+	  51: GETL       	R0, t40
+	  52: CMP0L       	t40, t42  (-rSo)
+	  53: ICRFL       	t42, $0x7, CR
+	  54: INCEIPL       	$4
+
+	0x25481EE4:  93410068  stw r26,104(r1)
+	  55: GETL       	R26, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x68, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25481EE8:  93A10074  stw r29,116(r1)
+	  60: GETL       	R29, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0x74, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x25481EEC:  409E0048  bc 4,30,0x25481F34
+	  65: Jc30o       	$0x25481F34
+
+
+
+. 451 25481EB0 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+==== BB 452 (0x25481EF0) approx BBs exec'd 0 ====
+
+	0x25481EF0:  83BE0514  lwz r29,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25481EF4:  7C832378  or r3,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25481EF8:  380000C5  li r0,197
+	   8: MOVL       	$0xC5, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x25481EFC:  7CA42B78  or r4,r5,r5
+	  11: GETL       	R5, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25481F00:  835D0000  lwz r26,0(r29)
+	  14: GETL       	R29, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0x25481F04:  44000002  sc
+	  18: JMPo-sys       	$0x25481F08  ($4)
+
+
+
+. 452 25481EF0 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C5 7C A4 2B 78 83 5D 00 00 44 00 00 02
+==== BB 453 (0x25481F08) approx BBs exec'd 0 ====
+
+	0x25481F08:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481F0C:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25481F10:  408200C4  bc 4,2,0x25481FD4
+	   9: Jc02o       	$0x25481FD4
+
+
+
+. 453 25481F08 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+==== BB 454 (0x25481F14) approx BBs exec'd 0 ====
+
+	0x25481F14:  2C83FFFF  cmpi cr1,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25481F18:  40A60044  bc 5,6,0x25481F5C
+	   5: Jc06o       	$0x25481F5C
+
+
+
+. 454 25481F14 8
+. 2C 83 FF FF 40 A6 00 44
+==== BB 455 (0x25481F5C) approx BBs exec'd 0 ====
+
+	0x25481F5C:  80A10084  lwz r5,132(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25481F60:  83210064  lwz r25,100(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x64, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x25481F64:  83410068  lwz r26,104(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x68, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25481F68:  7CA803A6  mtlr r5
+	  15: GETL       	R5, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x25481F6C:  8361006C  lwz r27,108(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x6C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0x25481F70:  83810070  lwz r28,112(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x70, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0x25481F74:  83A10074  lwz r29,116(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x74, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R29
+	  32: INCEIPL       	$4
+
+	0x25481F78:  83C10078  lwz r30,120(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x78, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R30
+	  37: INCEIPL       	$4
+
+	0x25481F7C:  83E1007C  lwz r31,124(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x7C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0x25481F80:  38210080  addi r1,r1,128
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x80, t34
+	  45: PUTL       	t34, R1
+	  46: INCEIPL       	$4
+
+	0x25481F84:  4E800020  blr
+	  47: GETL       	LR, t36
+	  48: JMPo-r       	t36  ($4)
+
+
+
+. 455 25481F5C 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+==== BB 456 (0x254755A4) approx BBs exec'd 0 ====
+
+	0x254755A4:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x254755A8:  935F00A0  stw r26,160(r31)
+	   4: GETL       	R26, t4
+	   5: GETL       	R31, t6
+	   6: ADDL       	$0xA0, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x254755AC:  419C082C  bc 12,28,0x25475DD8
+	   9: Js28o       	$0x25475DD8
+
+
+
+. 456 254755A4 12
+. 2F 83 00 00 93 5F 00 A0 41 9C 08 2C
+==== BB 457 (0x254755B0) approx BBs exec'd 0 ====
+
+	0x254755B0:  1CBB0018  mulli r5,r27,24
+	   0: GETL       	R27, t0
+	   1: MULL       	$0x18, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x254755B4:  809E04C8  lwz r4,1224(r30)
+	   4: GETL       	R30, t2
+	   5: ADDL       	$0x4C8, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x254755B8:  7FA5202E  lwzx r29,r5,r4
+	   9: GETL       	R4, t6
+	  10: GETL       	R5, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x254755BC:  2C1D0000  cmpi cr0,r29,0
+	  15: GETL       	R29, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x254755C0:  41820148  bc 12,2,0x25475708
+	  19: Js02o       	$0x25475708
+
+
+
+. 457 254755B0 20
+. 1C BB 00 18 80 9E 04 C8 7F A5 20 2E 2C 1D 00 00 41 82 01 48
+==== BB 458 (0x254755C4) approx BBs exec'd 0 ====
+
+	0x254755C4:  817F0030  lwz r11,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254755C8:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x254755CC:  48000010  b 0x254755DC
+	   8: JMPo       	$0x254755DC  ($4)
+
+
+
+. 458 254755C4 12
+. 81 7F 00 30 3B 40 00 00 48 00 00 10
+==== BB 459 (0x254755DC) approx BBs exec'd 0 ====
+
+	0x254755DC:  80DD01D8  lwz r6,472(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1D8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254755E0:  7C865800  cmp cr1,r6,r11
+	   5: GETL       	R6, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x254755E4:  4086FFEC  bc 4,6,0x254755D0
+	  10: Jc06o       	$0x254755D0
+
+
+
+. 459 254755DC 12
+. 80 DD 01 D8 7C 86 58 00 40 86 FF EC
+==== BB 460 (0x254755E8) approx BBs exec'd 0 ====
+
+	0x254755E8:  811D01DC  lwz r8,476(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1DC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254755EC:  80FF0034  lwz r7,52(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x254755F0:  7E083800  cmp cr4,r8,r7
+	  10: GETL       	R8, t8
+	  11: GETL       	R7, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x4, CR
+	  14: INCEIPL       	$4
+
+	0x254755F4:  4092FFDC  bc 4,18,0x254755D0
+	  15: Jc18o       	$0x254755D0
+
+
+
+. 460 254755E8 16
+. 81 1D 01 DC 80 FF 00 34 7E 08 38 00 40 92 FF DC
+==== BB 461 (0x254755D0) approx BBs exec'd 0 ====
+
+	0x254755D0:  83BD000C  lwz r29,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254755D4:  2E1D0000  cmpi cr4,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254755D8:  41920130  bc 12,18,0x25475708
+	   9: Js18o       	$0x25475708
+
+
+
+. 461 254755D0 12
+. 83 BD 00 0C 2E 1D 00 00 41 92 01 30
+==== BB 462 (0x25475708) approx BBs exec'd 0 ====
+
+	0x25475708:  2F1B0000  cmpi cr6,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547570C:  409A06E0  bc 4,26,0x25475DEC
+	   4: Jc26o       	$0x25475DEC
+
+
+
+. 462 25475708 8
+. 2F 1B 00 00 40 9A 06 E0
+==== BB 463 (0x25475710) approx BBs exec'd 0 ====
+
+	0x25475710:  72C00004  andi. r0,r22,0x4
+	   0: GETL       	R22, t0
+	   1: ANDL       	$0x4, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475714:  38600000  li r3,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x25475718:  40A2FF8C  bc 5,2,0x254756A4
+	   9: Jc02o       	$0x254756A4
+
+
+
+. 463 25475710 12
+. 72 C0 00 04 38 60 00 00 40 A2 FF 8C
+==== BB 464 (0x2547571C) approx BBs exec'd 0 ====
+
+	0x2547571C:  823E04F4  lwz r17,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0x25475720:  80910000  lwz r4,0(r17)
+	   5: GETL       	R17, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25475724:  70890040  andi. r9,r4,0x40
+	   9: GETL       	R4, t8
+	  10: ANDL       	$0x40, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25475728:  40820738  bc 4,2,0x25475E60
+	  15: Jc02o       	$0x25475E60
+
+
+
+. 464 2547571C 16
+. 82 3E 04 F4 80 91 00 00 70 89 00 40 40 82 07 38
+==== BB 465 (0x2547572C) approx BBs exec'd 0 ====
+
+	0x2547572C:  7F25CB78  or r5,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25475730:  7F68DB78  or r8,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0x25475734:  7E038378  or r3,r16,r16
+	   6: GETL       	R16, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x25475738:  7E84A378  or r4,r20,r20
+	   9: GETL       	R20, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x2547573C:  7EE6BB78  or r6,r23,r23
+	  12: GETL       	R23, t8
+	  13: PUTL       	t8, R6
+	  14: INCEIPL       	$4
+
+	0x25475740:  7EC7B378  or r7,r22,r22
+	  15: GETL       	R22, t10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0x25475744:  480039B1  bl 0x254790F4
+	  18: MOVL       	$0x25475748, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x254790F4  ($4)
+
+
+
+. 465 2547572C 28
+. 7F 25 CB 78 7F 68 DB 78 7E 03 83 78 7E 84 A3 78 7E E6 BB 78 7E C7 B3 78 48 00 39 B1
+==== BB 466 (0x25479318) approx BBs exec'd 0 ====
+
+	0x25479318:  7D695B78  or r9,r11,r11
+	   0: GETL       	R11, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x2547931C:  48000008  b 0x25479324
+	   3: JMPo       	$0x25479324  ($4)
+
+
+
+. 466 25479318 8
+. 7D 69 5B 78 48 00 00 08
+==== BB 467 (0x25479324) approx BBs exec'd 0 ====
+
+	0x25479324:  8009000C  lwz r0,12(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25479328:  2C800000  cmpi cr1,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547932C:  4086FFF4  bc 4,6,0x25479320
+	   9: Jc06o       	$0x25479320
+
+
+
+. 467 25479324 12
+. 80 09 00 0C 2C 80 00 00 40 86 FF F4
+==== BB 468 (0x25479320) approx BBs exec'd 0 ====
+
+	0x25479320:  7C090378  or r9,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25479324:  8009000C  lwz r0,12(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25479328:  2C800000  cmpi cr1,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x2547932C:  4086FFF4  bc 4,6,0x25479320
+	  12: Jc06o       	$0x25479320
+
+
+
+. 468 25479320 16
+. 7C 09 03 78 80 09 00 0C 2C 80 00 00 40 86 FF F4
+==== BB 469 (0x25479330) approx BBs exec'd 0 ====
+
+	0x25479330:  93E9000C  stw r31,12(r9)
+	   0: GETL       	R31, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25479334:  2F1C0000  cmpi cr6,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25479338:  913F0010  stw r9,16(r31)
+	   9: GETL       	R9, t8
+	  10: GETL       	R31, t10
+	  11: ADDL       	$0x10, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x2547933C:  38800001  li r4,1
+	  14: MOVL       	$0x1, t12
+	  15: PUTL       	t12, R4
+	  16: INCEIPL       	$4
+
+	0x25479340:  7D0A282E  lwzx r8,r10,r5
+	  17: GETL       	R5, t14
+	  18: GETL       	R10, t16
+	  19: ADDL       	t16, t14
+	  20: LDL       	(t14), t18
+	  21: PUTL       	t18, R8
+	  22: INCEIPL       	$4
+
+	0x25479344:  7D4A2A14  add r10,r10,r5
+	  23: GETL       	R10, t20
+	  24: GETL       	R5, t22
+	  25: ADDL       	t20, t22
+	  26: PUTL       	t22, R10
+	  27: INCEIPL       	$4
+
+	0x25479348:  832A0004  lwz r25,4(r10)
+	  28: GETL       	R10, t24
+	  29: ADDL       	$0x4, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R25
+	  32: INCEIPL       	$4
+
+	0x2547934C:  81650198  lwz r11,408(r5)
+	  33: GETL       	R5, t28
+	  34: ADDL       	$0x198, t28
+	  35: LDL       	(t28), t30
+	  36: PUTL       	t30, R11
+	  37: INCEIPL       	$4
+
+	0x25479350:  38E80158  addi r7,r8,344
+	  38: GETL       	R8, t32
+	  39: ADDL       	$0x158, t32
+	  40: PUTL       	t32, R7
+	  41: INCEIPL       	$4
+
+	0x25479354:  8185019C  lwz r12,412(r5)
+	  42: GETL       	R5, t34
+	  43: ADDL       	$0x19C, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R12
+	  46: INCEIPL       	$4
+
+	0x25479358:  3B190001  addi r24,r25,1
+	  47: GETL       	R25, t38
+	  48: ADDL       	$0x1, t38
+	  49: PUTL       	t38, R24
+	  50: INCEIPL       	$4
+
+	0x2547935C:  90E60000  stw r7,0(r6)
+	  51: GETL       	R7, t40
+	  52: GETL       	R6, t42
+	  53: STL       	t40, (t42)
+	  54: INCEIPL       	$4
+
+	0x25479360:  310C0001  addic r8,r12,1
+	  55: GETL       	R12, t44
+	  56: ADCL       	$0x1, t44  (-wCa)
+	  57: PUTL       	t44, R8
+	  58: INCEIPL       	$4
+
+	0x25479364:  7CEB0194  addze r7,r11
+	  59: GETL       	R11, t46
+	  60: ADCL       	$0x0, t46  (-rCa-wCa)
+	  61: PUTL       	t46, R7
+	  62: INCEIPL       	$4
+
+	0x25479368:  930A0004  stw r24,4(r10)
+	  63: GETL       	R24, t48
+	  64: GETL       	R10, t50
+	  65: ADDL       	$0x4, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x2547936C:  90E50198  stw r7,408(r5)
+	  68: GETL       	R7, t52
+	  69: GETL       	R5, t54
+	  70: ADDL       	$0x198, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x25479370:  9105019C  stw r8,412(r5)
+	  73: GETL       	R8, t56
+	  74: GETL       	R5, t58
+	  75: ADDL       	$0x19C, t58
+	  76: STL       	t56, (t58)
+	  77: INCEIPL       	$4
+
+	0x25479374:  409AFEA8  bc 4,26,0x2547921C
+	  78: Jc26o       	$0x2547921C
+
+
+
+. 469 25479330 72
+. 93 E9 00 0C 2F 1C 00 00 91 3F 00 10 38 80 00 01 7D 0A 28 2E 7D 4A 2A 14 83 2A 00 04 81 65 01 98 38 E8 01 58 81 85 01 9C 3B 19 00 01 90 E6 00 00 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 9A FE A8
+==== BB 470 (0x2547921C) approx BBs exec'd 0 ====
+
+	0x2547921C:  801C0168  lwz r0,360(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x168, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25479220:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25479224:  4082FFF4  bc 4,2,0x25479218
+	   9: Jc02o       	$0x25479218
+
+
+
+. 470 2547921C 12
+. 80 1C 01 68 2C 00 00 00 40 82 FF F4
+==== BB 471 (0x25479234) approx BBs exec'd 0 ====
+
+	0x25479234:  81660000  lwz r11,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25479238:  7C895800  cmp cr1,r9,r11
+	   4: GETL       	R9, t4
+	   5: GETL       	R11, t6
+	   6: CMPL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547923C:  41860024  bc 12,6,0x25479260
+	   9: Js06o       	$0x25479260
+
+
+
+. 471 25479234 12
+. 81 66 00 00 7C 89 58 00 41 86 00 24
+==== BB 472 (0x25479260) approx BBs exec'd 0 ====
+
+	0x25479260:  3B7F0158  addi r27,r31,344
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x158, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25479264:  937F01C4  stw r27,452(r31)
+	   4: GETL       	R27, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x1C4, t4
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x25479268:  8BB70000  lbz r29,0(r23)
+	   9: GETL       	R23, t6
+	  10: LDB       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547926C:  2F1D0000  cmpi cr6,r29,0
+	  13: GETL       	R29, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25479270:  419A006C  bc 12,26,0x254792DC
+	  17: Js26o       	$0x254792DC
+
+
+
+. 472 25479260 20
+. 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+==== BB 473 (0x25479274) approx BBs exec'd 0 ====
+
+	0x25479274:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25479278:  3B800000  li r28,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R28
+	   5: INCEIPL       	$4
+
+	0x2547927C:  48009D55  bl 0x25482FD0
+	   6: MOVL       	$0x25479280, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 473 25479274 12
+. 7E E3 BB 78 3B 80 00 00 48 00 9D 55
+==== BB 474 (0x25479280) approx BBs exec'd 0 ====
+
+	0x25479280:  2F9D002F  cmpi cr7,r29,47
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x2F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479284:  3B630001  addi r27,r3,1
+	   5: GETL       	R3, t6
+	   6: ADDL       	$0x1, t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0x25479288:  835E0514  lwz r26,1300(r30)
+	   9: GETL       	R30, t8
+	  10: ADDL       	$0x514, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R26
+	  13: INCEIPL       	$4
+
+	0x2547928C:  7F7DDB78  or r29,r27,r27
+	  14: GETL       	R27, t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0x25479290:  419E0154  bc 12,30,0x254793E4
+	  17: Js30o       	$0x254793E4
+
+
+
+. 474 25479280 20
+. 2F 9D 00 2F 3B 63 00 01 83 5E 05 14 7F 7D DB 78 41 9E 01 54
+==== BB 475 (0x254793E4) approx BBs exec'd 0 ====
+
+	0x254793E4:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254793E8:  4801E63D  bl 0x25497A24
+	   3: MOVL       	$0x254793EC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 475 254793E4 8
+. 7F 63 DB 78 48 01 E6 3D
+==== BB 476 (0x254793EC) approx BBs exec'd 0 ====
+
+	0x254793EC:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x254793F0:  7C7C1B78  or r28,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R28
+	   6: INCEIPL       	$4
+
+	0x254793F4:  4086FFB0  bc 4,6,0x254793A4
+	   7: Jc06o       	$0x254793A4
+
+
+
+. 476 254793EC 12
+. 2C 83 00 00 7C 7C 1B 78 40 86 FF B0
+==== BB 477 (0x254793A4) approx BBs exec'd 0 ====
+
+	0x254793A4:  7EE4BB78  or r4,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254793A8:  7F65DB78  or r5,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x254793AC:  4800A675  bl 0x25483A20
+	   6: MOVL       	$0x254793B0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483A20  ($4)
+
+
+
+. 477 254793A4 12
+. 7E E4 BB 78 7F 65 DB 78 48 00 A6 75
+==== BB 478 (0x254793B0) approx BBs exec'd 0 ====
+
+	0x254793B0:  8EE3FFFF  lbzu r23,-1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R23
+	   5: INCEIPL       	$4
+
+	0x254793B4:  2C17002F  cmpi cr0,r23,47
+	   6: GETL       	R23, t4
+	   7: MOVL       	$0x2F, t8
+	   8: CMPL       	t4, t8, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x254793B8:  4082FFF8  bc 4,2,0x254793B0
+	  11: Jc02o       	$0x254793B0
+
+
+
+. 478 254793B0 12
+. 8E E3 FF FF 2C 17 00 2F 40 82 FF F8
+==== BB 479 (0x254793BC) approx BBs exec'd 0 ====
+
+	0x254793BC:  7F03E000  cmp cr6,r3,r28
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x254793C0:  419A0014  bc 12,26,0x254793D4
+	   5: Js26o       	$0x254793D4
+
+
+
+. 479 254793BC 8
+. 7F 03 E0 00 41 9A 00 14
+==== BB 480 (0x254793C4) approx BBs exec'd 0 ====
+
+	0x254793C4:  38C00000  li r6,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x254793C8:  98C30000  stb r6,0(r3)
+	   3: GETL       	R6, t2
+	   4: GETL       	R3, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x254793CC:  939F019C  stw r28,412(r31)
+	   7: GETL       	R28, t6
+	   8: GETL       	R31, t8
+	   9: ADDL       	$0x19C, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x254793D0:  4BFFFF0C  b 0x254792DC
+	  12: JMPo       	$0x254792DC  ($4)
+
+
+
+. 480 254793C4 16
+. 38 C0 00 00 98 C3 00 00 93 9F 01 9C 4B FF FF 0C
+==== BB 481 (0x25475748) approx BBs exec'd 0 ====
+
+	0x25475748:  3B1C0004  addi r24,r28,4
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547574C:  7C7A1B79  or. r26,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R26
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25475750:  41820708  bc 12,2,0x25475E58
+	   9: Js02o       	$0x25475E58
+
+
+
+. 481 25475748 12
+. 3B 1C 00 04 7C 7A 1B 79 41 82 07 08
+==== BB 482 (0x25475754) approx BBs exec'd 0 ====
+
+	0x25475754:  A1F8002C  lhz r15,44(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x2C, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x25475758:  81D80018  lwz r14,24(r24)
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R14
+	   9: INCEIPL       	$4
+
+	0x2547575C:  A1580010  lhz r10,16(r24)
+	  10: GETL       	R24, t8
+	  11: ADDL       	$0x10, t8
+	  12: LDW       	(t8), t10
+	  13: PUTL       	t10, R10
+	  14: INCEIPL       	$4
+
+	0x25475760:  B1FA0154  sth r15,340(r26)
+	  15: GETL       	R15, t12
+	  16: GETL       	R26, t14
+	  17: ADDL       	$0x154, t14
+	  18: STW       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25475764:  91DA0150  stw r14,336(r26)
+	  20: GETL       	R14, t16
+	  21: GETL       	R26, t18
+	  22: ADDL       	$0x150, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25475768:  A118002C  lhz r8,44(r24)
+	  25: GETL       	R24, t20
+	  26: ADDL       	$0x2C, t20
+	  27: LDW       	(t20), t22
+	  28: PUTL       	t22, R8
+	  29: INCEIPL       	$4
+
+	0x2547576C:  8378001C  lwz r27,28(r24)
+	  30: GETL       	R24, t24
+	  31: ADDL       	$0x1C, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R27
+	  34: INCEIPL       	$4
+
+	0x25475770:  551D2834  rlwinm r29,r8,5,0,26
+	  35: GETL       	R8, t28
+	  36: SHLL       	$0x5, t28
+	  37: PUTL       	t28, R29
+	  38: INCEIPL       	$4
+
+	0x25475774:  80DC0000  lwz r6,0(r28)
+	  39: GETL       	R28, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R6
+	  42: INCEIPL       	$4
+
+	0x25475778:  7CFBEA14  add r7,r27,r29
+	  43: GETL       	R27, t34
+	  44: GETL       	R29, t36
+	  45: ADDL       	t34, t36
+	  46: PUTL       	t36, R7
+	  47: INCEIPL       	$4
+
+	0x2547577C:  7F3BE214  add r25,r27,r28
+	  48: GETL       	R27, t38
+	  49: GETL       	R28, t40
+	  50: ADDL       	t38, t40
+	  51: PUTL       	t40, R25
+	  52: INCEIPL       	$4
+
+	0x25475780:  7F873040  cmpl cr7,r7,r6
+	  53: GETL       	R7, t42
+	  54: GETL       	R6, t44
+	  55: CMPUL       	t42, t44, t46  (-rSo)
+	  56: ICRFL       	t46, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x25475784:  915F009C  stw r10,156(r31)
+	  58: GETL       	R10, t48
+	  59: GETL       	R31, t50
+	  60: ADDL       	$0x9C, t50
+	  61: STL       	t48, (t50)
+	  62: INCEIPL       	$4
+
+	0x25475788:  3B790004  addi r27,r25,4
+	  63: GETL       	R25, t52
+	  64: ADDL       	$0x4, t52
+	  65: PUTL       	t52, R27
+	  66: INCEIPL       	$4
+
+	0x2547578C:  419D0438  bc 12,29,0x25475BC4
+	  67: Js29o       	$0x25475BC4
+
+
+
+. 482 25475754 60
+. A1 F8 00 2C 81 D8 00 18 A1 58 00 10 B1 FA 01 54 91 DA 01 50 A1 18 00 2C 83 78 00 1C 55 1D 28 34 80 DC 00 00 7C FB EA 14 7F 3B E2 14 7F 87 30 40 91 5F 00 9C 3B 79 00 04 41 9D 04 38
+==== BB 483 (0x25475790) approx BBs exec'd 0 ====
+
+	0x25475790:  A11A0154  lhz r8,340(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475794:  7C320B78  or r18,r1,r1
+	   5: GETL       	R1, t4
+	   6: PUTL       	t4, R18
+	   7: INCEIPL       	$4
+
+	0x25475798:  80810000  lwz r4,0(r1)
+	   8: GETL       	R1, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x2547579C:  3BA00007  li r29,7
+	  12: MOVL       	$0x7, t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x254757A0:  1CE80018  mulli r7,r8,24
+	  15: GETL       	R8, t12
+	  16: MULL       	$0x18, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x254757A4:  3B800000  li r28,0
+	  19: MOVL       	$0x0, t14
+	  20: PUTL       	t14, R28
+	  21: INCEIPL       	$4
+
+	0x254757A8:  38C7001E  addi r6,r7,30
+	  22: GETL       	R7, t16
+	  23: ADDL       	$0x1E, t16
+	  24: PUTL       	t16, R6
+	  25: INCEIPL       	$4
+
+	0x254757AC:  54D902B6  rlwinm r25,r6,0,10,27
+	  26: GETL       	R6, t18
+	  27: ANDL       	$0x3FFFF0, t18
+	  28: PUTL       	t18, R25
+	  29: INCEIPL       	$4
+
+	0x254757B0:  7CB900D0  neg r5,r25
+	  30: GETL       	R25, t20
+	  31: NEGL       	t20
+	  32: PUTL       	t20, R5
+	  33: INCEIPL       	$4
+
+	0x254757B4:  7C81296E  stwux r4,r1,r5
+	  34: GETL       	R5, t22
+	  35: GETL       	R1, t24
+	  36: ADDL       	t24, t22
+	  37: PUTL       	t22, R1
+	  38: GETL       	R4, t26
+	  39: STL       	t26, (t22)
+	  40: INCEIPL       	$4
+
+	0x254757B8:  A0FA0154  lhz r7,340(r26)
+	  41: GETL       	R26, t28
+	  42: ADDL       	$0x154, t28
+	  43: LDW       	(t28), t30
+	  44: PUTL       	t30, R7
+	  45: INCEIPL       	$4
+
+	0x254757BC:  3861002F  addi r3,r1,47
+	  46: GETL       	R1, t32
+	  47: ADDL       	$0x2F, t32
+	  48: PUTL       	t32, R3
+	  49: INCEIPL       	$4
+
+	0x254757C0:  93BF00A4  stw r29,164(r31)
+	  50: GETL       	R29, t34
+	  51: GETL       	R31, t36
+	  52: ADDL       	$0xA4, t36
+	  53: STL       	t34, (t36)
+	  54: INCEIPL       	$4
+
+	0x254757C4:  54750036  rlwinm r21,r3,0,0,27
+	  55: GETL       	R3, t38
+	  56: ANDL       	$0xFFFFFFF0, t38
+	  57: PUTL       	t38, R21
+	  58: INCEIPL       	$4
+
+	0x254757C8:  54E92834  rlwinm r9,r7,5,0,26
+	  59: GETL       	R7, t40
+	  60: SHLL       	$0x5, t40
+	  61: PUTL       	t40, R9
+	  62: INCEIPL       	$4
+
+	0x254757CC:  939F00A8  stw r28,168(r31)
+	  63: GETL       	R28, t42
+	  64: GETL       	R31, t44
+	  65: ADDL       	$0xA8, t44
+	  66: STL       	t42, (t44)
+	  67: INCEIPL       	$4
+
+	0x254757D0:  7C09DA14  add r0,r9,r27
+	  68: GETL       	R9, t46
+	  69: GETL       	R27, t48
+	  70: ADDL       	t46, t48
+	  71: PUTL       	t48, R0
+	  72: INCEIPL       	$4
+
+	0x254757D4:  7F7DDB78  or r29,r27,r27
+	  73: GETL       	R27, t50
+	  74: PUTL       	t50, R29
+	  75: INCEIPL       	$4
+
+	0x254757D8:  7E00D840  cmpl cr4,r0,r27
+	  76: GETL       	R0, t52
+	  77: GETL       	R27, t54
+	  78: CMPUL       	t52, t54, t56  (-rSo)
+	  79: ICRFL       	t56, $0x4, CR
+	  80: INCEIPL       	$4
+
+	0x254757DC:  409103A8  bc 4,17,0x25475B84
+	  81: Jc17o       	$0x25475B84
+
+
+
+. 483 25475790 80
+. A1 1A 01 54 7C 32 0B 78 80 81 00 00 3B A0 00 07 1C E8 00 18 3B 80 00 00 38 C7 00 1E 54 D9 02 B6 7C B9 00 D0 7C 81 29 6E A0 FA 01 54 38 61 00 2F 93 BF 00 A4 54 75 00 36 54 E9 28 34 93 9F 00 A8 7C 09 DA 14 7F 7D DB 78 7E 00 D8 40 40 91 03 A8
+==== BB 484 (0x254757E0) approx BBs exec'd 0 ====
+
+	0x254757E0:  3B200000  li r25,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x254757E4:  48000028  b 0x2547580C
+	   3: JMPo       	$0x2547580C  ($4)
+
+
+
+. 484 254757E0 8
+. 3B 20 00 00 48 00 00 28
+==== BB 485 (0x2547580C) approx BBs exec'd 0 ====
+
+	0x2547580C:  813D0000  lwz r9,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25475810:  2F090006  cmpi cr6,r9,6
+	   4: GETL       	R9, t4
+	   5: MOVL       	$0x6, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475814:  419A0094  bc 12,26,0x254758A8
+	   9: Js26o       	$0x254758A8
+
+
+
+. 485 2547580C 12
+. 81 3D 00 00 2F 09 00 06 41 9A 00 94
+==== BB 486 (0x25475818) approx BBs exec'd 0 ====
+
+	0x25475818:  28090006  cmpli cr0,r9,6
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547581C:  40A1FFCC  bc 5,1,0x254757E8
+	   5: Jc01o       	$0x254757E8
+
+
+
+. 486 25475818 8
+. 28 09 00 06 40 A1 FF CC
+==== BB 487 (0x254757E8) approx BBs exec'd 0 ====
+
+	0x254757E8:  2F890001  cmpi cr7,r9,1
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254757EC:  419E02C4  bc 12,30,0x25475AB0
+	   5: Js30o       	$0x25475AB0
+
+
+
+. 487 254757E8 8
+. 2F 89 00 01 41 9E 02 C4
+==== BB 488 (0x25475AB0) approx BBs exec'd 0 ====
+
+	0x25475AB0:  81310004  lwz r9,4(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475AB4:  817D001C  lwz r11,28(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25475AB8:  3909FFFF  addi r8,r9,-1
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R8
+	  13: INCEIPL       	$4
+
+	0x25475ABC:  7D694039  and. r9,r11,r8
+	  14: GETL       	R11, t10
+	  15: GETL       	R8, t12
+	  16: ANDL       	t10, t12
+	  17: PUTL       	t12, R9
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x25475AC0:  4082062C  bc 4,2,0x254760EC
+	  21: Jc02o       	$0x254760EC
+
+
+
+. 488 25475AB0 20
+. 81 31 00 04 81 7D 00 1C 39 09 FF FF 7D 69 40 39 40 82 06 2C
+==== BB 489 (0x25475AC4) approx BBs exec'd 0 ====
+
+	0x25475AC4:  815D0008  lwz r10,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475AC8:  38ABFFFF  addi r5,r11,-1
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25475ACC:  80DD0004  lwz r6,4(r29)
+	   9: GETL       	R29, t6
+	  10: ADDL       	$0x4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0x25475AD0:  7C865050  subf r4,r6,r10
+	  14: GETL       	R6, t10
+	  15: GETL       	R10, t12
+	  16: SUBL       	t10, t12
+	  17: PUTL       	t12, R4
+	  18: INCEIPL       	$4
+
+	0x25475AD4:  7C8B2839  and. r11,r4,r5
+	  19: GETL       	R4, t14
+	  20: GETL       	R5, t16
+	  21: ANDL       	t14, t16
+	  22: PUTL       	t16, R11
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x25475AD8:  40820650  bc 4,2,0x25476128
+	  26: Jc02o       	$0x25476128
+
+
+
+. 489 25475AC4 24
+. 81 5D 00 08 38 AB FF FF 80 DD 00 04 7C 86 50 50 7C 8B 28 39 40 82 06 50
+==== BB 490 (0x25475ADC) approx BBs exec'd 0 ====
+
+	0x25475ADC:  7D4A4078  andc r10,r10,r8
+	   0: GETL       	R10, t0
+	   1: GETL       	R8, t2
+	   2: NOTL       	t2
+	   3: ANDL       	t0, t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0x25475AE0:  3B9C0001  addi r28,r28,1
+	   6: GETL       	R28, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R28
+	   9: INCEIPL       	$4
+
+	0x25475AE4:  7D55C92E  stwx r10,r21,r25
+	  10: GETL       	R25, t6
+	  11: GETL       	R21, t8
+	  12: ADDL       	t8, t6
+	  13: GETL       	R10, t10
+	  14: STL       	t10, (t6)
+	  15: INCEIPL       	$4
+
+	0x25475AE8:  289C0001  cmpli cr1,r28,1
+	  16: GETL       	R28, t12
+	  17: MOVL       	$0x1, t16
+	  18: CMPUL       	t12, t16, t14  (-rSo)
+	  19: ICRFL       	t14, $0x1, CR
+	  20: INCEIPL       	$4
+
+	0x25475AEC:  813D0008  lwz r9,8(r29)
+	  21: GETL       	R29, t18
+	  22: ADDL       	$0x8, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R9
+	  25: INCEIPL       	$4
+
+	0x25475AF0:  801D0010  lwz r0,16(r29)
+	  26: GETL       	R29, t22
+	  27: ADDL       	$0x10, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R0
+	  30: INCEIPL       	$4
+
+	0x25475AF4:  81F10004  lwz r15,4(r17)
+	  31: GETL       	R17, t26
+	  32: ADDL       	$0x4, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R15
+	  35: INCEIPL       	$4
+
+	0x25475AF8:  7C690214  add r3,r9,r0
+	  36: GETL       	R9, t30
+	  37: GETL       	R0, t32
+	  38: ADDL       	t30, t32
+	  39: PUTL       	t32, R3
+	  40: INCEIPL       	$4
+
+	0x25475AFC:  7D837A14  add r12,r3,r15
+	  41: GETL       	R3, t34
+	  42: GETL       	R15, t36
+	  43: ADDL       	t34, t36
+	  44: PUTL       	t36, R12
+	  45: INCEIPL       	$4
+
+	0x25475B00:  7DCF00D0  neg r14,r15
+	  46: GETL       	R15, t38
+	  47: NEGL       	t38
+	  48: PUTL       	t38, R14
+	  49: INCEIPL       	$4
+
+	0x25475B04:  396CFFFF  addi r11,r12,-1
+	  50: GETL       	R12, t40
+	  51: ADDL       	$0xFFFFFFFF, t40
+	  52: PUTL       	t40, R11
+	  53: INCEIPL       	$4
+
+	0x25475B08:  7D687038  and r8,r11,r14
+	  54: GETL       	R11, t42
+	  55: GETL       	R14, t44
+	  56: ANDL       	t42, t44
+	  57: PUTL       	t44, R8
+	  58: INCEIPL       	$4
+
+	0x25475B0C:  7D75CA14  add r11,r21,r25
+	  59: GETL       	R21, t46
+	  60: GETL       	R25, t48
+	  61: ADDL       	t46, t48
+	  62: PUTL       	t48, R11
+	  63: INCEIPL       	$4
+
+	0x25475B10:  910B0004  stw r8,4(r11)
+	  64: GETL       	R8, t50
+	  65: GETL       	R11, t52
+	  66: ADDL       	$0x4, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0x25475B14:  3B390018  addi r25,r25,24
+	  69: GETL       	R25, t54
+	  70: ADDL       	$0x18, t54
+	  71: PUTL       	t54, R25
+	  72: INCEIPL       	$4
+
+	0x25475B18:  809D0008  lwz r4,8(r29)
+	  73: GETL       	R29, t56
+	  74: ADDL       	$0x8, t56
+	  75: LDL       	(t56), t58
+	  76: PUTL       	t58, R4
+	  77: INCEIPL       	$4
+
+	0x25475B1C:  80DD0010  lwz r6,16(r29)
+	  78: GETL       	R29, t60
+	  79: ADDL       	$0x10, t60
+	  80: LDL       	(t60), t62
+	  81: PUTL       	t62, R6
+	  82: INCEIPL       	$4
+
+	0x25475B20:  7CA43214  add r5,r4,r6
+	  83: GETL       	R4, t64
+	  84: GETL       	R6, t66
+	  85: ADDL       	t64, t66
+	  86: PUTL       	t66, R5
+	  87: INCEIPL       	$4
+
+	0x25475B24:  90AB0008  stw r5,8(r11)
+	  88: GETL       	R5, t68
+	  89: GETL       	R11, t70
+	  90: ADDL       	$0x8, t70
+	  91: STL       	t68, (t70)
+	  92: INCEIPL       	$4
+
+	0x25475B28:  813D0008  lwz r9,8(r29)
+	  93: GETL       	R29, t72
+	  94: ADDL       	$0x8, t72
+	  95: LDL       	(t72), t74
+	  96: PUTL       	t74, R9
+	  97: INCEIPL       	$4
+
+	0x25475B2C:  801D0014  lwz r0,20(r29)
+	  98: GETL       	R29, t76
+	  99: ADDL       	$0x14, t76
+	 100: LDL       	(t76), t78
+	 101: PUTL       	t78, R0
+	 102: INCEIPL       	$4
+
+	0x25475B30:  7C690214  add r3,r9,r0
+	 103: GETL       	R9, t80
+	 104: GETL       	R0, t82
+	 105: ADDL       	t80, t82
+	 106: PUTL       	t82, R3
+	 107: INCEIPL       	$4
+
+	0x25475B34:  906B000C  stw r3,12(r11)
+	 108: GETL       	R3, t84
+	 109: GETL       	R11, t86
+	 110: ADDL       	$0xC, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25475B38:  81910004  lwz r12,4(r17)
+	 113: GETL       	R17, t88
+	 114: ADDL       	$0x4, t88
+	 115: LDL       	(t88), t90
+	 116: PUTL       	t90, R12
+	 117: INCEIPL       	$4
+
+	0x25475B3C:  81DD0004  lwz r14,4(r29)
+	 118: GETL       	R29, t92
+	 119: ADDL       	$0x4, t92
+	 120: LDL       	(t92), t94
+	 121: PUTL       	t94, R14
+	 122: INCEIPL       	$4
+
+	0x25475B40:  7DEC00D0  neg r15,r12
+	 123: GETL       	R12, t96
+	 124: NEGL       	t96
+	 125: PUTL       	t96, R15
+	 126: INCEIPL       	$4
+
+	0x25475B44:  7DC87838  and r8,r14,r15
+	 127: GETL       	R14, t98
+	 128: GETL       	R15, t100
+	 129: ANDL       	t98, t100
+	 130: PUTL       	t100, R8
+	 131: INCEIPL       	$4
+
+	0x25475B48:  910B0010  stw r8,16(r11)
+	 132: GETL       	R8, t102
+	 133: GETL       	R11, t104
+	 134: ADDL       	$0x10, t104
+	 135: STL       	t102, (t104)
+	 136: INCEIPL       	$4
+
+	0x25475B4C:  40850018  bc 4,5,0x25475B64
+	 137: Jc05o       	$0x25475B64
+
+
+
+. 490 25475ADC 116
+. 7D 4A 40 78 3B 9C 00 01 7D 55 C9 2E 28 9C 00 01 81 3D 00 08 80 1D 00 10 81 F1 00 04 7C 69 02 14 7D 83 7A 14 7D CF 00 D0 39 6C FF FF 7D 68 70 38 7D 75 CA 14 91 0B 00 04 3B 39 00 18 80 9D 00 08 80 DD 00 10 7C A4 32 14 90 AB 00 08 81 3D 00 08 80 1D 00 14 7C 69 02 14 90 6B 00 0C 81 91 00 04 81 DD 00 04 7D EC 00 D0 7D C8 78 38 91 0B 00 10 40 85 00 18
+==== BB 491 (0x25475B64) approx BBs exec'd 0 ====
+
+	0x25475B64:  807D0018  lwz r3,24(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475B68:  3D807351  lis r12,29521
+	   5: MOVL       	$0x73510000, t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x25475B6C:  618E6240  ori r14,r12,0x6240
+	   8: MOVL       	$0x73516240, t6
+	   9: PUTL       	t6, R14
+	  10: INCEIPL       	$4
+
+	0x25475B70:  546F16FA  rlwinm r15,r3,2,27,29
+	  11: GETL       	R3, t8
+	  12: ROLL       	$0x2, t8
+	  13: ANDL       	$0x1C, t8
+	  14: PUTL       	t8, R15
+	  15: INCEIPL       	$4
+
+	0x25475B74:  7DC87E30  sraw r8,r14,r15
+	  16: GETL       	R14, t12
+	  17: GETL       	R15, t10
+	  18: SARL       	t10, t12  (-wCa)
+	  19: PUTL       	t12, R8
+	  20: INCEIPL       	$4
+
+	0x25475B78:  5506073E  rlwinm r6,r8,0,28,31
+	  21: GETL       	R8, t14
+	  22: ANDL       	$0xF, t14
+	  23: PUTL       	t14, R6
+	  24: INCEIPL       	$4
+
+	0x25475B7C:  90CB0014  stw r6,20(r11)
+	  25: GETL       	R6, t16
+	  26: GETL       	R11, t18
+	  27: ADDL       	$0x14, t18
+	  28: STL       	t16, (t18)
+	  29: INCEIPL       	$4
+
+	0x25475B80:  4BFFFC78  b 0x254757F8
+	  30: JMPo       	$0x254757F8  ($4)
+
+
+
+. 491 25475B64 32
+. 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+==== BB 492 (0x254757F8) approx BBs exec'd 0 ====
+
+	0x254757F8:  54EF2834  rlwinm r15,r7,5,0,26
+	   0: GETL       	R7, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R15
+	   3: INCEIPL       	$4
+
+	0x254757FC:  3BBD0020  addi r29,r29,32
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x20, t2
+	   6: PUTL       	t2, R29
+	   7: INCEIPL       	$4
+
+	0x25475800:  7DCFDA14  add r14,r15,r27
+	   8: GETL       	R15, t4
+	   9: GETL       	R27, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R14
+	  12: INCEIPL       	$4
+
+	0x25475804:  7C8EE840  cmpl cr1,r14,r29
+	  13: GETL       	R14, t8
+	  14: GETL       	R29, t10
+	  15: CMPUL       	t8, t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x25475808:  408500BC  bc 4,5,0x254758C4
+	  18: Jc05o       	$0x254758C4
+
+
+
+. 492 254757F8 20
+. 54 EF 28 34 3B BD 00 20 7D CF DA 14 7C 8E E8 40 40 85 00 BC
+==== BB 493 (0x25475B50) approx BBs exec'd 0 ====
+
+	0x25475B50:  808BFFEC  lwz r4,-20(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xFFFFFFEC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25475B54:  7E045000  cmp cr4,r4,r10
+	   5: GETL       	R4, t4
+	   6: GETL       	R10, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x4, CR
+	   9: INCEIPL       	$4
+
+	0x25475B58:  4192000C  bc 12,18,0x25475B64
+	  10: Js18o       	$0x25475B64
+
+
+
+. 493 25475B50 12
+. 80 8B FF EC 7E 04 50 00 41 92 00 0C
+==== BB 494 (0x25475B5C) approx BBs exec'd 0 ====
+
+	0x25475B5C:  39400001  li r10,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x25475B60:  915F00A8  stw r10,168(r31)
+	   3: GETL       	R10, t2
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0xA8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25475B64:  807D0018  lwz r3,24(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x25475B68:  3D807351  lis r12,29521
+	  13: MOVL       	$0x73510000, t10
+	  14: PUTL       	t10, R12
+	  15: INCEIPL       	$4
+
+	0x25475B6C:  618E6240  ori r14,r12,0x6240
+	  16: MOVL       	$0x73516240, t12
+	  17: PUTL       	t12, R14
+	  18: INCEIPL       	$4
+
+	0x25475B70:  546F16FA  rlwinm r15,r3,2,27,29
+	  19: GETL       	R3, t14
+	  20: ROLL       	$0x2, t14
+	  21: ANDL       	$0x1C, t14
+	  22: PUTL       	t14, R15
+	  23: INCEIPL       	$4
+
+	0x25475B74:  7DC87E30  sraw r8,r14,r15
+	  24: GETL       	R14, t18
+	  25: GETL       	R15, t16
+	  26: SARL       	t16, t18  (-wCa)
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0x25475B78:  5506073E  rlwinm r6,r8,0,28,31
+	  29: GETL       	R8, t20
+	  30: ANDL       	$0xF, t20
+	  31: PUTL       	t20, R6
+	  32: INCEIPL       	$4
+
+	0x25475B7C:  90CB0014  stw r6,20(r11)
+	  33: GETL       	R6, t22
+	  34: GETL       	R11, t24
+	  35: ADDL       	$0x14, t24
+	  36: STL       	t22, (t24)
+	  37: INCEIPL       	$4
+
+	0x25475B80:  4BFFFC78  b 0x254757F8
+	  38: JMPo       	$0x254757F8  ($4)
+
+
+
+. 494 25475B5C 40
+. 39 40 00 01 91 5F 00 A8 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+==== BB 495 (0x254757F0) approx BBs exec'd 0 ====
+
+	0x254757F0:  2C890002  cmpi cr1,r9,2
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254757F4:  418603B8  bc 12,6,0x25475BAC
+	   5: Js06o       	$0x25475BAC
+
+
+
+. 495 254757F0 8
+. 2C 89 00 02 41 86 03 B8
+==== BB 496 (0x25475BAC) approx BBs exec'd 0 ====
+
+	0x25475BAC:  807D0014  lwz r3,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475BB0:  819D0008  lwz r12,8(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25475BB4:  546BE8FE  rlwinm r11,r3,29,3,31
+	  10: GETL       	R3, t8
+	  11: SHRL       	$0x3, t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0x25475BB8:  919A0008  stw r12,8(r26)
+	  14: GETL       	R12, t10
+	  15: GETL       	R26, t12
+	  16: ADDL       	$0x8, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25475BBC:  B17A0156  sth r11,342(r26)
+	  19: GETL       	R11, t14
+	  20: GETL       	R26, t16
+	  21: ADDL       	$0x156, t16
+	  22: STW       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x25475BC0:  4BFFFC38  b 0x254757F8
+	  24: JMPo       	$0x254757F8  ($4)
+
+
+
+. 496 25475BAC 24
+. 80 7D 00 14 81 9D 00 08 54 6B E8 FE 91 9A 00 08 B1 7A 01 56 4B FF FC 38
+==== BB 497 (0x25475820) approx BBs exec'd 0 ====
+
+	0x25475820:  3DC06474  lis r14,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R14
+	   2: INCEIPL       	$4
+
+	0x25475824:  61C0E551  ori r0,r14,0xE551
+	   3: MOVL       	$0x6474E551, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x25475828:  7E090000  cmp cr4,r9,r0
+	   6: GETL       	R9, t4
+	   7: GETL       	R0, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x4, CR
+	  10: INCEIPL       	$4
+
+	0x2547582C:  41920278  bc 12,18,0x25475AA4
+	  11: Js18o       	$0x25475AA4
+
+
+
+. 497 25475820 16
+. 3D C0 64 74 61 C0 E5 51 7E 09 00 00 41 92 02 78
+==== BB 498 (0x25475AA4) approx BBs exec'd 0 ====
+
+	0x25475AA4:  815D0018  lwz r10,24(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475AA8:  915F00A4  stw r10,164(r31)
+	   5: GETL       	R10, t4
+	   6: GETL       	R31, t6
+	   7: ADDL       	$0xA4, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25475AAC:  4BFFFD4C  b 0x254757F8
+	  10: JMPo       	$0x254757F8  ($4)
+
+
+
+. 498 25475AA4 12
+. 81 5D 00 18 91 5F 00 A4 4B FF FD 4C
+==== BB 499 (0x254758C4) approx BBs exec'd 0 ====
+
+	0x254758C4:  2E1C0000  cmpi cr4,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x254758C8:  419202BC  bc 12,18,0x25475B84
+	   4: Js18o       	$0x25475B84
+
+
+
+. 499 254758C4 8
+. 2E 1C 00 00 41 92 02 BC
+==== BB 500 (0x254758CC) approx BBs exec'd 0 ====
+
+	0x254758CC:  1DFC0018  mulli r15,r28,24
+	   0: GETL       	R28, t0
+	   1: MULL       	$0x18, t0
+	   2: PUTL       	t0, R15
+	   3: INCEIPL       	$4
+
+	0x254758D0:  839F009C  lwz r28,156(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x9C, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x254758D4:  81750000  lwz r11,0(r21)
+	   9: GETL       	R21, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x254758D8:  7EBDAB78  or r29,r21,r21
+	  13: GETL       	R21, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x254758DC:  2E1C0003  cmpi cr4,r28,3
+	  16: GETL       	R28, t12
+	  17: MOVL       	$0x3, t16
+	  18: CMPL       	t12, t16, t14  (-rSo)
+	  19: ICRFL       	t14, $0x4, CR
+	  20: INCEIPL       	$4
+
+	0x254758E0:  7F8FAA14  add r28,r15,r21
+	  21: GETL       	R15, t18
+	  22: GETL       	R21, t20
+	  23: ADDL       	t18, t20
+	  24: PUTL       	t20, R28
+	  25: INCEIPL       	$4
+
+	0x254758E4:  833CFFF4  lwz r25,-12(r28)
+	  26: GETL       	R28, t22
+	  27: ADDL       	$0xFFFFFFF4, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R25
+	  30: INCEIPL       	$4
+
+	0x254758E8:  7DCBC850  subf r14,r11,r25
+	  31: GETL       	R11, t26
+	  32: GETL       	R25, t28
+	  33: SUBL       	t26, t28
+	  34: PUTL       	t28, R14
+	  35: INCEIPL       	$4
+
+	0x254758EC:  40920818  bc 4,18,0x25476104
+	  36: Jc18o       	$0x25476104
+
+
+
+. 500 254758CC 36
+. 1D FC 00 18 83 9F 00 9C 81 75 00 00 7E BD AB 78 2E 1C 00 03 7F 8F AA 14 83 3C FF F4 7D CB C8 50 40 92 08 18
+==== BB 501 (0x254758F0) approx BBs exec'd 0 ====
+
+	0x254758F0:  8191004C  lwz r12,76(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x4C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x254758F4:  7DC47378  or r4,r14,r14
+	   5: GETL       	R14, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254758F8:  7EE3BB78  or r3,r23,r23
+	   8: GETL       	R23, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x254758FC:  7D656038  and r5,r11,r12
+	  11: GETL       	R11, t8
+	  12: GETL       	R12, t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25475900:  4800ADE9  bl 0x254806E8
+	  16: MOVL       	$0x25475904, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0x254806E8  ($4)
+
+
+
+. 501 254758F0 20
+. 81 91 00 4C 7D C4 73 78 7E E3 BB 78 7D 65 60 38 48 00 AD E9
+==== BB 502 __elf_preferred_address(0x254806E8) approx BBs exec'd 0 ====
+
+	0x254806E8:  7CA32B79  or. r3,r5,r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, R3
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254806EC:  7D8802A6  mflr r12
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x254806F0:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xFFFFFFE0, t8
+	  11: PUTL       	t8, R1
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x254806F4:  4801690D  bl 0x25497000
+	  14: MOVL       	$0x254806F8, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 502 254806E8 16
+. 7C A3 2B 79 7D 88 02 A6 94 21 FF E0 48 01 69 0D
+==== BB 503 (0x254806F8) approx BBs exec'd 0 ====
+
+	0x254806F8:  93A10014  stw r29,20(r1)
+	   0: GETL       	R29, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254806FC:  7C9D2378  or r29,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25480700:  93C10018  stw r30,24(r1)
+	   8: GETL       	R30, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25480704:  7FC802A6  mflr r30
+	  13: GETL       	LR, t10
+	  14: PUTL       	t10, R30
+	  15: INCEIPL       	$4
+
+	0x25480708:  93810010  stw r28,16(r1)
+	  16: GETL       	R28, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x10, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2548070C:  93E1001C  stw r31,28(r1)
+	  21: GETL       	R31, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25480710:  7D8803A6  mtlr r12
+	  26: GETL       	R12, t20
+	  27: PUTL       	t20, LR
+	  28: INCEIPL       	$4
+
+	0x25480714:  408200FC  bc 4,2,0x25480810
+	  29: Jc02o       	$0x25480810
+
+
+
+. 503 254806F8 32
+. 93 A1 00 14 7C 9D 23 78 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 7D 88 03 A6 40 82 00 FC
+==== BB 504 (0x25480718) approx BBs exec'd 0 ====
+
+	0x25480718:  3CC00003  lis r6,3
+	   0: MOVL       	$0x30000, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2548071C:  83FE04F4  lwz r31,1268(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x4F4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x25480720:  809E04C8  lwz r4,1224(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x4C8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25480724:  60C6FFFF  ori r6,r6,0xFFFF
+	  13: MOVL       	$0x3FFFF, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x25480728:  3CE07000  lis r7,28672
+	  16: MOVL       	$0x70000000, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x2548072C:  3980000F  li r12,15
+	  19: MOVL       	$0xF, t14
+	  20: PUTL       	t14, R12
+	  21: INCEIPL       	$4
+
+	0x25480730:  81640000  lwz r11,0(r4)
+	  22: GETL       	R4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R11
+	  25: INCEIPL       	$4
+
+	0x25480734:  38840018  addi r4,r4,24
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x18, t20
+	  28: PUTL       	t20, R4
+	  29: INCEIPL       	$4
+
+	0x25480738:  2F8B0000  cmpi cr7,r11,0
+	  30: GETL       	R11, t22
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0x2548073C:  419E009C  bc 12,30,0x254807D8
+	  34: Js30o       	$0x254807D8
+
+
+
+. 504 25480718 40
+. 3C C0 00 03 83 FE 04 F4 80 9E 04 C8 60 C6 FF FF 3C E0 70 00 39 80 00 0F 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+==== BB 505 (0x25480740) approx BBs exec'd 0 ====
+
+	0x25480740:  813F0004  lwz r9,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25480744:  38A9FFFF  addi r5,r9,-1
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25480748:  7CA328F8  nor r3,r5,r5
+	   9: GETL       	R5, t6
+	  10: NOTL       	t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x2548074C:  800B01A4  lwz r0,420(r11)
+	  13: GETL       	R11, t8
+	  14: ADDL       	$0x1A4, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R0
+	  17: INCEIPL       	$4
+
+	0x25480750:  838B01A0  lwz r28,416(r11)
+	  18: GETL       	R11, t12
+	  19: ADDL       	$0x1A0, t12
+	  20: LDL       	(t12), t14
+	  21: PUTL       	t14, R28
+	  22: INCEIPL       	$4
+
+	0x25480754:  7C082B78  or r8,r0,r5
+	  23: GETL       	R0, t16
+	  24: GETL       	R5, t18
+	  25: ORL       	t18, t16
+	  26: PUTL       	t16, R8
+	  27: INCEIPL       	$4
+
+	0x25480758:  7C883840  cmpl cr1,r8,r7
+	  28: GETL       	R8, t20
+	  29: GETL       	R7, t22
+	  30: CMPUL       	t20, t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x1, CR
+	  32: INCEIPL       	$4
+
+	0x2548075C:  7F8A1838  and r10,r28,r3
+	  33: GETL       	R28, t26
+	  34: GETL       	R3, t28
+	  35: ANDL       	t26, t28
+	  36: PUTL       	t28, R10
+	  37: INCEIPL       	$4
+
+	0x25480760:  40840010  bc 4,4,0x25480770
+	  38: Jc04o       	$0x25480770
+
+
+
+. 505 25480740 36
+. 81 3F 00 04 38 A9 FF FF 7C A3 28 F8 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+==== BB 506 (0x25480764) approx BBs exec'd 0 ====
+
+	0x25480764:  838B0180  lwz r28,384(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25480768:  7789C000  andis. r9,r28,0xC000
+	   5: GETL       	R28, t4
+	   6: ANDL       	$0xC0000000, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2548076C:  4082000C  bc 4,2,0x25480778
+	  11: Jc02o       	$0x25480778
+
+
+
+. 506 25480764 12
+. 83 8B 01 80 77 89 C0 00 40 82 00 0C
+==== BB 507 (0x25480770) approx BBs exec'd 0 ====
+
+	0x25480770:  7F075040  cmpl cr6,r7,r10
+	   0: GETL       	R7, t0
+	   1: GETL       	R10, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25480774:  40980054  bc 4,24,0x254807C8
+	   5: Jc24o       	$0x254807C8
+
+
+
+. 507 25480770 8
+. 7F 07 50 40 40 98 00 54
+==== BB 508 (0x254807C8) approx BBs exec'd 0 ====
+
+	0x254807C8:  7D475378  or r7,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254807CC:  816B000C  lwz r11,12(r11)
+	   3: GETL       	R11, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x254807D0:  2C8B0000  cmpi cr1,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x254807D4:  4086FF78  bc 4,6,0x2548074C
+	  12: Jc06o       	$0x2548074C
+
+
+
+. 508 254807C8 16
+. 7D 47 53 78 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+==== BB 509 (0x2548074C) approx BBs exec'd 0 ====
+
+	0x2548074C:  800B01A4  lwz r0,420(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1A4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25480750:  838B01A0  lwz r28,416(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x1A0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25480754:  7C082B78  or r8,r0,r5
+	  10: GETL       	R0, t8
+	  11: GETL       	R5, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R8
+	  14: INCEIPL       	$4
+
+	0x25480758:  7C883840  cmpl cr1,r8,r7
+	  15: GETL       	R8, t12
+	  16: GETL       	R7, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x2548075C:  7F8A1838  and r10,r28,r3
+	  20: GETL       	R28, t18
+	  21: GETL       	R3, t20
+	  22: ANDL       	t18, t20
+	  23: PUTL       	t20, R10
+	  24: INCEIPL       	$4
+
+	0x25480760:  40840010  bc 4,4,0x25480770
+	  25: Jc04o       	$0x25480770
+
+
+
+. 509 2548074C 24
+. 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+==== BB 510 (0x25480778) approx BBs exec'd 0 ====
+
+	0x25480778:  7C064010  subfc r0,r6,r8
+	   0: GETL       	R6, t0
+	   1: GETL       	R8, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2548077C:  38000000  li r0,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25480780:  7C000114  adde r0,r0,r0
+	   8: GETL       	R0, t6
+	   9: GETL       	R0, t8
+	  10: ADCL       	t6, t8  (-rCa-wCa)
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x25480784:  7D2A3010  subfc r9,r10,r6
+	  13: GETL       	R10, t10
+	  14: GETL       	R6, t12
+	  15: SBBL       	t10, t12  (-wCa)
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x25480788:  39200000  li r9,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x2548078C:  7D294914  adde r9,r9,r9
+	  21: GETL       	R9, t16
+	  22: GETL       	R9, t18
+	  23: ADCL       	t16, t18  (-rCa-wCa)
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0x25480790:  7C1C4839  and. r28,r0,r9
+	  26: GETL       	R0, t20
+	  27: GETL       	R9, t22
+	  28: ANDL       	t20, t22
+	  29: PUTL       	t22, R28
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0x25480794:  40820094  bc 4,2,0x25480828
+	  33: Jc02o       	$0x25480828
+
+
+
+. 510 25480778 32
+. 7C 06 40 10 38 00 00 00 7C 00 01 14 7D 2A 30 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 40 82 00 94
+==== BB 511 (0x25480798) approx BBs exec'd 0 ====
+
+	0x25480798:  7C083810  subfc r0,r8,r7
+	   0: GETL       	R8, t0
+	   1: GETL       	R7, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2548079C:  38000000  li r0,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x254807A0:  7C000114  adde r0,r0,r0
+	   8: GETL       	R0, t6
+	   9: GETL       	R0, t8
+	  10: ADCL       	t6, t8  (-rCa-wCa)
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x254807A4:  7D265010  subfc r9,r6,r10
+	  13: GETL       	R6, t10
+	  14: GETL       	R10, t12
+	  15: SBBL       	t10, t12  (-wCa)
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x254807A8:  39200000  li r9,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x254807AC:  7D294914  adde r9,r9,r9
+	  21: GETL       	R9, t16
+	  22: GETL       	R9, t18
+	  23: ADCL       	t16, t18  (-rCa-wCa)
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0x254807B0:  7C1C4839  and. r28,r0,r9
+	  26: GETL       	R0, t20
+	  27: GETL       	R9, t22
+	  28: ANDL       	t20, t22
+	  29: PUTL       	t22, R28
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0x254807B4:  41820018  bc 12,2,0x254807CC
+	  33: Js02o       	$0x254807CC
+
+
+
+. 511 25480798 32
+. 7C 08 38 10 38 00 00 00 7C 00 01 14 7D 26 50 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 41 82 00 18
+==== BB 512 (0x254807CC) approx BBs exec'd 0 ====
+
+	0x254807CC:  816B000C  lwz r11,12(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254807D0:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254807D4:  4086FF78  bc 4,6,0x2548074C
+	   9: Jc06o       	$0x2548074C
+
+
+
+. 512 254807CC 12
+. 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+==== BB 513 (0x254807D8) approx BBs exec'd 0 ====
+
+	0x254807D8:  358CFFFF  addic. r12,r12,-1
+	   0: GETL       	R12, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R12
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x254807DC:  4080FF54  bc 4,0,0x25480730
+	   6: Jc00o       	$0x25480730
+
+
+
+. 513 254807D8 8
+. 35 8C FF FF 40 80 FF 54
+==== BB 514 (0x25480730) approx BBs exec'd 0 ====
+
+	0x25480730:  81640000  lwz r11,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25480734:  38840018  addi r4,r4,24
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25480738:  2F8B0000  cmpi cr7,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2548073C:  419E009C  bc 12,30,0x254807D8
+	  12: Js30o       	$0x254807D8
+
+
+
+. 514 25480730 16
+. 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+==== BB 515 (0x254807E0) approx BBs exec'd 0 ====
+
+	0x254807E0:  3C67FFFF  addis r3,r7,-1
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0xFFFF0000, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x254807E4:  80FF0004  lwz r7,4(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x254807E8:  7F033040  cmpl cr6,r3,r6
+	   9: GETL       	R3, t6
+	  10: GETL       	R6, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x254807EC:  3887FFFF  addi r4,r7,-1
+	  14: GETL       	R7, t12
+	  15: ADDL       	$0xFFFFFFFF, t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0x254807F0:  7C85EB78  or r5,r4,r29
+	  18: GETL       	R4, t14
+	  19: GETL       	R29, t16
+	  20: ORL       	t16, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x254807F4:  38850001  addi r4,r5,1
+	  23: GETL       	R5, t18
+	  24: ADDL       	$0x1, t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x254807F8:  40990014  bc 4,25,0x2548080C
+	  27: Jc25o       	$0x2548080C
+
+
+
+. 515 254807E0 28
+. 3C 67 FF FF 80 FF 00 04 7F 03 30 40 38 87 FF FF 7C 85 EB 78 38 85 00 01 40 99 00 14
+==== BB 516 (0x254807FC) approx BBs exec'd 0 ====
+
+	0x254807FC:  7D061850  subf r8,r6,r3
+	   0: GETL       	R6, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25480800:  7C641850  subf r3,r4,r3
+	   5: GETL       	R4, t4
+	   6: GETL       	R3, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25480804:  7F882040  cmpl cr7,r8,r4
+	  10: GETL       	R8, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x25480808:  409C0008  bc 4,28,0x25480810
+	  15: Jc28o       	$0x25480810
+
+
+
+. 516 254807FC 16
+. 7D 06 18 50 7C 64 18 50 7F 88 20 40 40 9C 00 08
+==== BB 517 (0x25480810) approx BBs exec'd 0 ====
+
+	0x25480810:  83810010  lwz r28,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25480814:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25480818:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x2548081C:  83E1001C  lwz r31,28(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R31
+	  19: INCEIPL       	$4
+
+	0x25480820:  38210020  addi r1,r1,32
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x20, t16
+	  22: PUTL       	t16, R1
+	  23: INCEIPL       	$4
+
+	0x25480824:  4E800020  blr
+	  24: GETL       	LR, t18
+	  25: JMPo-r       	t18  ($4)
+
+
+
+. 517 25480810 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 518 (0x25475904) approx BBs exec'd 0 ====
+
+	0x25475904:  80B50014  lwz r5,20(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475908:  81150010  lwz r8,16(r21)
+	   5: GETL       	R21, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x2547590C:  7DC47378  or r4,r14,r14
+	  10: GETL       	R14, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25475910:  38C00802  li r6,2050
+	  13: MOVL       	$0x802, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x25475914:  7E679B78  or r7,r19,r19
+	  16: GETL       	R19, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x25475918:  4800CE81  bl 0x25482798
+	  19: MOVL       	$0x2547591C, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25482798  ($4)
+
+
+
+. 518 25475904 24
+. 80 B5 00 14 81 15 00 10 7D C4 73 78 38 C0 08 02 7E 67 9B 78 48 00 CE 81
+==22130== Reading syms from /home/sewardj/valgrind-2.2.0-ppc/Inst/lib/valgrind/vg_inject.so (0xFFDD000)
+==== BB 519 (0x2547591C) approx BBs exec'd 0 ====
+
+	0x2547591C:  2F03FFFF  cmpi cr6,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475920:  907A01A0  stw r3,416(r26)
+	   5: GETL       	R3, t6
+	   6: GETL       	R26, t8
+	   7: ADDL       	$0x1A0, t8
+	   8: STL       	t6, (t8)
+	   9: INCEIPL       	$4
+
+	0x25475924:  419A013C  bc 12,26,0x25475A60
+	  10: Js26o       	$0x25475A60
+
+
+
+. 519 2547591C 12
+. 2F 03 FF FF 90 7A 01 A0 41 9A 01 3C
+==== BB 520 (0x25475928) approx BBs exec'd 0 ====
+
+	0x25475928:  817F00A8  lwz r11,168(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547592C:  7CA37214  add r5,r3,r14
+	   5: GETL       	R3, t4
+	   6: GETL       	R14, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0x25475930:  90BA01A4  stw r5,420(r26)
+	  10: GETL       	R5, t8
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x1A4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475934:  2F8B0000  cmpi cr7,r11,0
+	  15: GETL       	R11, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25475938:  80F50000  lwz r7,0(r21)
+	  19: GETL       	R21, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R7
+	  22: INCEIPL       	$4
+
+	0x2547593C:  7C071850  subf r0,r7,r3
+	  23: GETL       	R7, t20
+	  24: GETL       	R3, t22
+	  25: SUBL       	t20, t22
+	  26: PUTL       	t22, R0
+	  27: INCEIPL       	$4
+
+	0x25475940:  901A0000  stw r0,0(r26)
+	  28: GETL       	R0, t24
+	  29: GETL       	R26, t26
+	  30: STL       	t24, (t26)
+	  31: INCEIPL       	$4
+
+	0x25475944:  409E0534  bc 4,30,0x25475E78
+	  32: Jc30o       	$0x25475E78
+
+
+
+. 520 25475928 32
+. 81 7F 00 A8 7C A3 72 14 90 BA 01 A4 2F 8B 00 00 80 F5 00 00 7C 07 18 50 90 1A 00 00 40 9E 05 34
+==== BB 521 (0x25475E78) approx BBs exec'd 0 ====
+
+	0x25475E78:  80750004  lwz r3,4(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475E7C:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25475E80:  809CFFF4  lwz r4,-12(r28)
+	   8: GETL       	R28, t6
+	   9: ADDL       	$0xFFFFFFF4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25475E84:  7C832050  subf r4,r3,r4
+	  13: GETL       	R3, t10
+	  14: GETL       	R4, t12
+	  15: SUBL       	t10, t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0x25475E88:  7C601A14  add r3,r0,r3
+	  18: GETL       	R0, t14
+	  19: GETL       	R3, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0x25475E8C:  4800C92D  bl 0x254827B8
+	  23: MOVL       	$0x25475E90, t18
+	  24: PUTL       	t18, LR
+	  25: JMPo-c       	$0x254827B8  ($4)
+
+
+
+. 521 25475E78 24
+. 80 75 00 04 38 A0 00 00 80 9C FF F4 7C 83 20 50 7C 60 1A 14 48 00 C9 2D
+==== BB 522 mprotect(0x254827B8) approx BBs exec'd 0 ====
+
+	0x254827B8:  3800007D  li r0,125
+	   0: MOVL       	$0x7D, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254827BC:  44000002  sc
+	   3: JMPo-sys       	$0x254827C0  ($4)
+
+
+
+. 522 254827B8 8
+. 38 00 00 7D 44 00 00 02
+==== BB 523 (0x254827C0) approx BBs exec'd 0 ====
+
+	0x254827C0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 523 254827C0 4
+. 4C A3 00 20
+==== BB 524 (0x25475E90) approx BBs exec'd 0 ====
+
+	0x25475E90:  4BFFFAB8  b 0x25475948
+	   0: JMPo       	$0x25475948  ($4)
+
+
+
+. 524 25475E90 4
+. 4B FF FA B8
+==== BB 525 (0x25475948) approx BBs exec'd 0 ====
+
+	0x25475948:  80DD0014  lwz r6,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547594C:  70C90004  andi. r9,r6,0x4
+	   5: GETL       	R6, t4
+	   6: ANDL       	$0x4, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475950:  41820014  bc 12,2,0x25475964
+	  11: Js02o       	$0x25475964
+
+
+
+. 525 25475948 12
+. 80 DD 00 14 70 C9 00 04 41 82 00 14
+==== BB 526 (0x25475954) approx BBs exec'd 0 ====
+
+	0x25475954:  839D0004  lwz r28,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25475958:  833A0000  lwz r25,0(r26)
+	   5: GETL       	R26, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x2547595C:  7D19E214  add r8,r25,r28
+	   9: GETL       	R25, t8
+	  10: GETL       	R28, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R8
+	  13: INCEIPL       	$4
+
+	0x25475960:  911A01A8  stw r8,424(r26)
+	  14: GETL       	R8, t12
+	  15: GETL       	R26, t14
+	  16: ADDL       	$0x1A8, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25475964:  819A014C  lwz r12,332(r26)
+	  19: GETL       	R26, t16
+	  20: ADDL       	$0x14C, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R12
+	  23: INCEIPL       	$4
+
+	0x25475968:  2F8C0000  cmpi cr7,r12,0
+	  24: GETL       	R12, t20
+	  25: CMP0L       	t20, t22  (-rSo)
+	  26: ICRFL       	t22, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547596C:  409E0038  bc 4,30,0x254759A4
+	  28: Jc30o       	$0x254759A4
+
+
+
+. 526 25475954 28
+. 83 9D 00 04 83 3A 00 00 7D 19 E2 14 91 1A 01 A8 81 9A 01 4C 2F 8C 00 00 40 9E 00 38
+==== BB 527 (0x25475970) approx BBs exec'd 0 ====
+
+	0x25475970:  815D0010  lwz r10,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475974:  8178001C  lwz r11,28(r24)
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25475978:  7C8A5840  cmpl cr1,r10,r11
+	  10: GETL       	R10, t8
+	  11: GETL       	R11, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547597C:  41850028  bc 12,5,0x254759A4
+	  15: Js05o       	$0x254759A4
+
+
+
+. 527 25475970 16
+. 81 5D 00 10 81 78 00 1C 7C 8A 58 40 41 85 00 28
+==== BB 528 (0x25475980) approx BBs exec'd 0 ====
+
+	0x25475980:  801D0004  lwz r0,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475984:  811D0000  lwz r8,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0x25475988:  A138002C  lhz r9,44(r24)
+	   9: GETL       	R24, t8
+	  10: ADDL       	$0x2C, t8
+	  11: LDW       	(t8), t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547598C:  7C880050  subf r4,r8,r0
+	  14: GETL       	R8, t12
+	  15: GETL       	R0, t14
+	  16: SUBL       	t12, t14
+	  17: PUTL       	t14, R4
+	  18: INCEIPL       	$4
+
+	0x25475990:  55232834  rlwinm r3,r9,5,0,26
+	  19: GETL       	R9, t16
+	  20: SHLL       	$0x5, t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0x25475994:  7CA45214  add r5,r4,r10
+	  23: GETL       	R4, t18
+	  24: GETL       	R10, t20
+	  25: ADDL       	t18, t20
+	  26: PUTL       	t20, R5
+	  27: INCEIPL       	$4
+
+	0x25475998:  7CEB1A14  add r7,r11,r3
+	  28: GETL       	R11, t22
+	  29: GETL       	R3, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R7
+	  32: INCEIPL       	$4
+
+	0x2547599C:  7F053840  cmpl cr6,r5,r7
+	  33: GETL       	R5, t26
+	  34: GETL       	R7, t28
+	  35: CMPUL       	t26, t28, t30  (-rSo)
+	  36: ICRFL       	t30, $0x6, CR
+	  37: INCEIPL       	$4
+
+	0x254759A0:  4098069C  bc 4,24,0x2547603C
+	  38: Jc24o       	$0x2547603C
+
+
+
+. 528 25475980 36
+. 80 1D 00 04 81 1D 00 00 A1 38 00 2C 7C 88 00 50 55 23 28 34 7C A4 52 14 7C EB 1A 14 7F 05 38 40 40 98 06 9C
+==== BB 529 (0x2547603C) approx BBs exec'd 0 ====
+
+	0x2547603C:  7EE85A14  add r23,r8,r11
+	   0: GETL       	R8, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25476040:  7D6AB850  subf r11,r10,r23
+	   5: GETL       	R10, t4
+	   6: GETL       	R23, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25476044:  917A014C  stw r11,332(r26)
+	  10: GETL       	R11, t8
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x14C, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25476048:  4BFFF95C  b 0x254759A4
+	  15: JMPo       	$0x254759A4  ($4)
+
+
+
+. 529 2547603C 16
+. 7E E8 5A 14 7D 6A B8 50 91 7A 01 4C 4B FF F9 5C
+==== BB 530 (0x254759A4) approx BBs exec'd 0 ====
+
+	0x254759A4:  809D000C  lwz r4,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254759A8:  807D0008  lwz r3,8(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x254759AC:  7C041840  cmpl cr0,r4,r3
+	  10: GETL       	R4, t8
+	  11: GETL       	R3, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x254759B0:  40810068  bc 4,1,0x25475A18
+	  15: Jc01o       	$0x25475A18
+
+
+
+. 530 254759A4 16
+. 80 9D 00 0C 80 7D 00 08 7C 04 18 40 40 81 00 68
+==== BB 531 (0x25475A18) approx BBs exec'd 0 ====
+
+	0x25475A18:  3BBD0018  addi r29,r29,24
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x25475A1C:  7F357A14  add r25,r21,r15
+	   4: GETL       	R21, t2
+	   5: GETL       	R15, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R25
+	   8: INCEIPL       	$4
+
+	0x25475A20:  7F99E840  cmpl cr7,r25,r29
+	   9: GETL       	R25, t6
+	  10: GETL       	R29, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25475A24:  409D01F8  bc 4,29,0x25475C1C
+	  14: Jc29o       	$0x25475C1C
+
+
+
+. 531 25475A18 16
+. 3B BD 00 18 7F 35 7A 14 7F 99 E8 40 40 9D 01 F8
+==== BB 532 (0x25475A28) approx BBs exec'd 0 ====
+
+	0x25475A28:  809D0004  lwz r4,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25475A2C:  801D0000  lwz r0,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x25475A30:  7C840040  cmpl cr1,r4,r0
+	   9: GETL       	R4, t8
+	  10: GETL       	R0, t10
+	  11: CMPUL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25475A34:  4085FF14  bc 4,5,0x25475948
+	  14: Jc05o       	$0x25475948
+
+
+
+. 532 25475A28 16
+. 80 9D 00 04 80 1D 00 00 7C 84 00 40 40 85 FF 14
+==== BB 533 (0x25475A38) approx BBs exec'd 0 ====
+
+	0x25475A38:  815A0000  lwz r10,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x25475A3C:  7C802050  subf r4,r0,r4
+	   4: GETL       	R0, t4
+	   5: GETL       	R4, t6
+	   6: SUBL       	t4, t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25475A40:  80BD0014  lwz r5,20(r29)
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0x14, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0x25475A44:  38C00812  li r6,2066
+	  14: MOVL       	$0x812, t12
+	  15: PUTL       	t12, R6
+	  16: INCEIPL       	$4
+
+	0x25475A48:  811D0010  lwz r8,16(r29)
+	  17: GETL       	R29, t14
+	  18: ADDL       	$0x10, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R8
+	  21: INCEIPL       	$4
+
+	0x25475A4C:  7C6A0214  add r3,r10,r0
+	  22: GETL       	R10, t18
+	  23: GETL       	R0, t20
+	  24: ADDL       	t18, t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0x25475A50:  7E679B78  or r7,r19,r19
+	  27: GETL       	R19, t22
+	  28: PUTL       	t22, R7
+	  29: INCEIPL       	$4
+
+	0x25475A54:  4800CD45  bl 0x25482798
+	  30: MOVL       	$0x25475A58, t24
+	  31: PUTL       	t24, LR
+	  32: JMPo-c       	$0x25482798  ($4)
+
+
+
+. 533 25475A38 32
+. 81 5A 00 00 7C 80 20 50 80 BD 00 14 38 C0 08 12 81 1D 00 10 7C 6A 02 14 7E 67 9B 78 48 00 CD 45
+==== BB 534 (0x25475A58) approx BBs exec'd 0 ====
+
+	0x25475A58:  2F03FFFF  cmpi cr6,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475A5C:  409AFEEC  bc 4,26,0x25475948
+	   5: Jc26o       	$0x25475948
+
+
+
+. 534 25475A58 8
+. 2F 03 FF FF 40 9A FE EC
+==== BB 535 (0x254759B4) approx BBs exec'd 0 ====
+
+	0x254759B4:  80DA0000  lwz r6,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x254759B8:  81710004  lwz r11,4(r17)
+	   4: GETL       	R17, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x254759BC:  7F261A14  add r25,r6,r3
+	   9: GETL       	R6, t8
+	  10: GETL       	R3, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R25
+	  13: INCEIPL       	$4
+
+	0x254759C0:  7EE62214  add r23,r6,r4
+	  14: GETL       	R6, t12
+	  15: GETL       	R4, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R23
+	  18: INCEIPL       	$4
+
+	0x254759C4:  7D0BCA14  add r8,r11,r25
+	  19: GETL       	R11, t16
+	  20: GETL       	R25, t18
+	  21: ADDL       	t16, t18
+	  22: PUTL       	t18, R8
+	  23: INCEIPL       	$4
+
+	0x254759C8:  7C6B00D0  neg r3,r11
+	  24: GETL       	R11, t20
+	  25: NEGL       	t20
+	  26: PUTL       	t20, R3
+	  27: INCEIPL       	$4
+
+	0x254759CC:  3948FFFF  addi r10,r8,-1
+	  28: GETL       	R8, t22
+	  29: ADDL       	$0xFFFFFFFF, t22
+	  30: PUTL       	t22, R10
+	  31: INCEIPL       	$4
+
+	0x254759D0:  7D5C1838  and r28,r10,r3
+	  32: GETL       	R10, t24
+	  33: GETL       	R3, t26
+	  34: ANDL       	t24, t26
+	  35: PUTL       	t26, R28
+	  36: INCEIPL       	$4
+
+	0x254759D4:  7F97E040  cmpl cr7,r23,r28
+	  37: GETL       	R23, t28
+	  38: GETL       	R28, t30
+	  39: CMPUL       	t28, t30, t32  (-rSo)
+	  40: ICRFL       	t32, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0x254759D8:  409C0008  bc 4,28,0x254759E0
+	  42: Jc28o       	$0x254759E0
+
+
+
+. 535 254759B4 40
+. 80 DA 00 00 81 71 00 04 7F 26 1A 14 7E E6 22 14 7D 0B CA 14 7C 6B 00 D0 39 48 FF FF 7D 5C 18 38 7F 97 E0 40 40 9C 00 08
+==== BB 536 (0x254759DC) approx BBs exec'd 0 ====
+
+	0x254759DC:  7EFCBB78  or r28,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x254759E0:  7C9CC840  cmpl cr1,r28,r25
+	   3: GETL       	R28, t2
+	   4: GETL       	R25, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x254759E4:  4085002C  bc 4,5,0x25475A10
+	   8: Jc05o       	$0x25475A10
+
+
+
+. 536 254759DC 12
+. 7E FC BB 78 7C 9C C8 40 40 85 00 2C
+==== BB 537 (0x254759E8) approx BBs exec'd 0 ====
+
+	0x254759E8:  80BD0014  lwz r5,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254759EC:  70AA0002  andi. r10,r5,0x2
+	   5: GETL       	R5, t4
+	   6: ANDL       	$0x2, t4
+	   7: PUTL       	t4, R10
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x254759F0:  41820838  bc 12,2,0x25476228
+	  11: Js02o       	$0x25476228
+
+
+
+. 537 254759E8 12
+. 80 BD 00 14 70 AA 00 02 41 82 08 38
+==== BB 538 (0x254759F4) approx BBs exec'd 0 ====
+
+	0x254759F4:  7CB9E050  subf r5,r25,r28
+	   0: GETL       	R25, t0
+	   1: GETL       	R28, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254759F8:  7F23CB78  or r3,r25,r25
+	   5: GETL       	R25, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254759FC:  38800000  li r4,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x25475A00:  4800DD45  bl 0x25483744
+	  11: MOVL       	$0x25475A04, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483744  ($4)
+
+
+
+. 538 254759F4 16
+. 7C B9 E0 50 7F 23 CB 78 38 80 00 00 48 00 DD 45
+==== BB 539 memset(0x25483744) approx BBs exec'd 0 ====
+
+	0x25483744:  28850004  cmpli cr1,r5,4
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x4, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25483748:  70670003  andi. r7,r3,0x3
+	   5: GETL       	R3, t6
+	   6: ANDL       	$0x3, t6
+	   7: PUTL       	t6, R7
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2548374C:  7C661B78  or r6,r3,r3
+	  11: GETL       	R3, t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0x25483750:  40850190  bc 4,5,0x254838E0
+	  14: Jc05o       	$0x254838E0
+
+
+
+. 539 25483744 16
+. 28 85 00 04 70 67 00 03 7C 66 1B 78 40 85 01 90
+==== BB 540 (0x25483754) approx BBs exec'd 0 ====
+
+	0x25483754:  2A85001F  cmpli cr5,r5,31
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x1F, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x5, CR
+	   4: INCEIPL       	$4
+
+	0x25483758:  5084442E  rlwimi r4,r4,8,16,23
+	   5: GETL       	R4, t6
+	   6: GETL       	R4, t8
+	   7: ROLL       	$0x8, t8
+	   8: ANDL       	$0xFF00, t8
+	   9: ANDL       	$0xFFFF00FF, t6
+	  10: ORL       	t6, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2548375C:  41A20024  bc 13,2,0x25483780
+	  13: Js02o       	$0x25483780
+
+
+
+. 540 25483754 12
+. 2A 85 00 1F 50 84 44 2E 41 A2 00 24
+==== BB 541 (0x25483780) approx BBs exec'd 0 ====
+
+	0x25483780:  7CA01120  mtcrf 0x1,r5
+	   0: GETL       	R5, t0
+	   1: ICRFL       	t0, $0x7, CR
+	   2: INCEIPL       	$4
+
+	0x25483784:  5084801E  rlwimi r4,r4,16,0,15
+	   3: GETL       	R4, t2
+	   4: GETL       	R4, t4
+	   5: ROLL       	$0x10, t4
+	   6: ANDL       	$0xFFFF0000, t4
+	   7: ANDL       	$0xFFFF, t2
+	   8: ORL       	t2, t4
+	   9: PUTL       	t4, R4
+	  10: INCEIPL       	$4
+
+	0x25483788:  40950198  bc 4,21,0x25483920
+	  11: Jc21o       	$0x25483920
+
+
+
+. 541 25483780 12
+. 7C A0 11 20 50 84 80 1E 40 95 01 98
+==== BB 542 (0x2548378C) approx BBs exec'd 0 ====
+
+	0x2548378C:  70C7001C  andi. r7,r6,0x1C
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0x1C, t0
+	   2: PUTL       	t0, R7
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483790:  20E70020  subfic r7,r7,32
+	   6: GETL       	R7, t4
+	   7: MOVL       	$0x20, t6
+	   8: SBBL       	t4, t6  (-wCa)
+	   9: PUTL       	t6, R7
+	  10: INCEIPL       	$4
+
+	0x25483794:  41820040  bc 12,2,0x254837D4
+	  11: Js02o       	$0x254837D4
+
+
+
+. 542 2548378C 12
+. 70 C7 00 1C 20 E7 00 20 41 82 00 40
+==== BB 543 (0x25483798) approx BBs exec'd 0 ====
+
+	0x25483798:  7CE01120  mtcrf 0x1,r7
+	   0: GETL       	R7, t0
+	   1: ICRFL       	t0, $0x7, CR
+	   2: INCEIPL       	$4
+
+	0x2548379C:  7CC63A14  add r6,r6,r7
+	   3: GETL       	R6, t2
+	   4: GETL       	R7, t4
+	   5: ADDL       	t2, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x254837A0:  7CA72850  subf r5,r7,r5
+	   8: GETL       	R7, t6
+	   9: GETL       	R5, t8
+	  10: SUBL       	t6, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x254837A4:  28870010  cmpli cr1,r7,16
+	  13: GETL       	R7, t10
+	  14: MOVL       	$0x10, t14
+	  15: CMPUL       	t10, t14, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x254837A8:  7CC83378  or r8,r6,r6
+	  18: GETL       	R6, t16
+	  19: PUTL       	t16, R8
+	  20: INCEIPL       	$4
+
+	0x254837AC:  409C000C  bc 4,28,0x254837B8
+	  21: Jc28o       	$0x254837B8
+
+
+
+. 543 25483798 24
+. 7C E0 11 20 7C C6 3A 14 7C A7 28 50 28 87 00 10 7C C8 33 78 40 9C 00 0C
+==== BB 544 (0x254837B8) approx BBs exec'd 0 ====
+
+	0x254837B8:  41840014  bc 12,4,0x254837CC
+	   0: Js04o       	$0x254837CC
+
+
+
+. 544 254837B8 4
+. 41 84 00 14
+==== BB 545 (0x254837CC) approx BBs exec'd 0 ====
+
+	0x254837CC:  409D0008  bc 4,29,0x254837D4
+	   0: Jc29o       	$0x254837D4
+
+
+
+. 545 254837CC 4
+. 40 9D 00 08
+==== BB 546 (0x254837D0) approx BBs exec'd 0 ====
+
+	0x254837D0:  9088FFFC  stw r4,-4(r8)
+	   0: GETL       	R4, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254837D4:  28840000  cmpli cr1,r4,0
+	   5: GETL       	R4, t4
+	   6: MOVL       	$0x0, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x254837D8:  54A70035  rlwinm. r7,r5,0,0,26
+	  10: GETL       	R5, t10
+	  11: ANDL       	$0xFFFFFFE0, t10
+	  12: PUTL       	t10, R7
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x254837DC:  7CA01120  mtcrf 0x1,r5
+	  16: GETL       	R5, t14
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x254837E0:  418601A0  bc 12,6,0x25483980
+	  19: Js06o       	$0x25483980
+
+
+
+. 546 254837D0 20
+. 90 88 FF FC 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+==== BB 547 (0x25483980) approx BBs exec'd 0 ====
+
+	0x25483980:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25483984:  4182FF9C  bc 12,2,0x25483920
+	   3: Js02o       	$0x25483920
+
+
+
+. 547 25483980 8
+. 7C 08 02 A6 41 82 FF 9C
+==== BB 548 (0x25483988) approx BBs exec'd 0 ====
+
+	0x25483988:  48013679  bl 0x25497000
+	   0: MOVL       	$0x2548398C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 548 25483988 4
+. 48 01 36 79
+==== BB 549 (0x2548398C) approx BBs exec'd 0 ====
+
+	0x2548398C:  7D2802A6  mflr r9
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25483990:  812904F0  lwz r9,1264(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0x4F0, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25483994:  81090000  lwz r8,0(r9)
+	   8: GETL       	R9, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0x25483998:  7C0803A6  mtlr r0
+	  12: GETL       	R0, t10
+	  13: PUTL       	t10, LR
+	  14: INCEIPL       	$4
+
+	0x2548399C:  28880000  cmpli cr1,r8,0
+	  15: GETL       	R8, t12
+	  16: MOVL       	$0x0, t16
+	  17: CMPUL       	t12, t16, t14  (-rSo)
+	  18: ICRFL       	t14, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x254839A0:  4186FE44  bc 12,6,0x254837E4
+	  20: Js06o       	$0x254837E4
+
+
+
+. 549 2548398C 24
+. 7D 28 02 A6 81 29 04 F0 81 09 00 00 7C 08 03 A6 28 88 00 00 41 86 FE 44
+==== BB 550 (0x254839A4) approx BBs exec'd 0 ====
+
+	0x254839A4:  28880020  cmpli cr1,r8,32
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x20, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254839A8:  4186FEBC  bc 12,6,0x25483864
+	   5: Js06o       	$0x25483864
+
+
+
+. 550 254839A4 8
+. 28 88 00 20 41 86 FE BC
+==== BB 551 (0x25483864) approx BBs exec'd 0 ====
+
+	0x25483864:  54A506FE  rlwinm r5,r5,0,27,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x1F, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25483868:  7CE02120  mtcrf 0x2,r7
+	   4: GETL       	R7, t2
+	   5: ICRFL       	t2, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0x2548386C:  54E0C9FF  rlwinm. r0,r7,25,7,31
+	   7: GETL       	R7, t4
+	   8: SHRL       	$0x7, t4
+	   9: PUTL       	t4, R0
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25483870:  7C0903A6  mtctr r0
+	  13: GETL       	R0, t8
+	  14: PUTL       	t8, CTR
+	  15: INCEIPL       	$4
+
+	0x25483874:  38E00020  li r7,32
+	  16: MOVL       	$0x20, t10
+	  17: PUTL       	t10, R7
+	  18: INCEIPL       	$4
+
+	0x25483878:  3900FFC0  li r8,-64
+	  19: MOVL       	$0xFFFFFFC0, t12
+	  20: PUTL       	t12, R8
+	  21: INCEIPL       	$4
+
+	0x2548387C:  28850010  cmpli cr1,r5,16
+	  22: GETL       	R5, t14
+	  23: MOVL       	$0x10, t18
+	  24: CMPUL       	t14, t18, t16  (-rSo)
+	  25: ICRFL       	t16, $0x1, CR
+	  26: INCEIPL       	$4
+
+	0x25483880:  409A000C  bc 4,26,0x2548388C
+	  27: Jc26o       	$0x2548388C
+
+
+
+. 551 25483864 32
+. 54 A5 06 FE 7C E0 21 20 54 E0 C9 FF 7C 09 03 A6 38 E0 00 20 39 00 FF C0 28 85 00 10 40 9A 00 0C
+==== BB 552 (0x2548388C) approx BBs exec'd 0 ====
+
+	0x2548388C:  3920FFE0  li r9,-32
+	   0: MOVL       	$0xFFFFFFE0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25483890:  40990010  bc 4,25,0x254838A0
+	   3: Jc25o       	$0x254838A0
+
+
+
+. 552 2548388C 8
+. 39 20 FF E0 40 99 00 10
+==== BB 553 (0x254838A0) approx BBs exec'd 0 ====
+
+	0x254838A0:  2A850000  cmpli cr5,r5,0
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x0, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x5, CR
+	   4: INCEIPL       	$4
+
+	0x254838A4:  4182007C  bc 12,2,0x25483920
+	   5: Js02o       	$0x25483920
+
+
+
+. 553 254838A0 8
+. 2A 85 00 00 41 82 00 7C
+==== BB 554 (0x254838A8) approx BBs exec'd 0 ====
+
+	0x254838A8:  7C0037EC  dcbz r0,r6
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0xFFFFFFE0, t0
+	   2: MOVL       	$0x0, t2
+	   3: STL       	t2, (t0)
+	   4: ADDL       	$0x4, t0
+	   5: STL       	t2, (t0)
+	   6: ADDL       	$0x4, t0
+	   7: STL       	t2, (t0)
+	   8: ADDL       	$0x4, t0
+	   9: STL       	t2, (t0)
+	  10: ADDL       	$0x4, t0
+	  11: STL       	t2, (t0)
+	  12: ADDL       	$0x4, t0
+	  13: STL       	t2, (t0)
+	  14: ADDL       	$0x4, t0
+	  15: STL       	t2, (t0)
+	  16: ADDL       	$0x4, t0
+	  17: STL       	t2, (t0)
+	  18: INCEIPL       	$4
+
+	0x254838AC:  7C0737EC  dcbz r7,r6
+	  19: GETL       	R6, t4
+	  20: GETL       	R7, t6
+	  21: ADDL       	t6, t4
+	  22: ANDL       	$0xFFFFFFE0, t4
+	  23: MOVL       	$0x0, t8
+	  24: STL       	t8, (t4)
+	  25: ADDL       	$0x4, t4
+	  26: STL       	t8, (t4)
+	  27: ADDL       	$0x4, t4
+	  28: STL       	t8, (t4)
+	  29: ADDL       	$0x4, t4
+	  30: STL       	t8, (t4)
+	  31: ADDL       	$0x4, t4
+	  32: STL       	t8, (t4)
+	  33: ADDL       	$0x4, t4
+	  34: STL       	t8, (t4)
+	  35: ADDL       	$0x4, t4
+	  36: STL       	t8, (t4)
+	  37: ADDL       	$0x4, t4
+	  38: STL       	t8, (t4)
+	  39: INCEIPL       	$4
+
+	0x254838B0:  38C60080  addi r6,r6,128
+	  40: GETL       	R6, t10
+	  41: ADDL       	$0x80, t10
+	  42: PUTL       	t10, R6
+	  43: INCEIPL       	$4
+
+	0x254838B4:  7C0837EC  dcbz r8,r6
+	  44: GETL       	R6, t12
+	  45: GETL       	R8, t14
+	  46: ADDL       	t14, t12
+	  47: ANDL       	$0xFFFFFFE0, t12
+	  48: MOVL       	$0x0, t16
+	  49: STL       	t16, (t12)
+	  50: ADDL       	$0x4, t12
+	  51: STL       	t16, (t12)
+	  52: ADDL       	$0x4, t12
+	  53: STL       	t16, (t12)
+	  54: ADDL       	$0x4, t12
+	  55: STL       	t16, (t12)
+	  56: ADDL       	$0x4, t12
+	  57: STL       	t16, (t12)
+	  58: ADDL       	$0x4, t12
+	  59: STL       	t16, (t12)
+	  60: ADDL       	$0x4, t12
+	  61: STL       	t16, (t12)
+	  62: ADDL       	$0x4, t12
+	  63: STL       	t16, (t12)
+	  64: INCEIPL       	$4
+
+	0x254838B8:  7C0937EC  dcbz r9,r6
+	  65: GETL       	R6, t18
+	  66: GETL       	R9, t20
+	  67: ADDL       	t20, t18
+	  68: ANDL       	$0xFFFFFFE0, t18
+	  69: MOVL       	$0x0, t22
+	  70: STL       	t22, (t18)
+	  71: ADDL       	$0x4, t18
+	  72: STL       	t22, (t18)
+	  73: ADDL       	$0x4, t18
+	  74: STL       	t22, (t18)
+	  75: ADDL       	$0x4, t18
+	  76: STL       	t22, (t18)
+	  77: ADDL       	$0x4, t18
+	  78: STL       	t22, (t18)
+	  79: ADDL       	$0x4, t18
+	  80: STL       	t22, (t18)
+	  81: ADDL       	$0x4, t18
+	  82: STL       	t22, (t18)
+	  83: ADDL       	$0x4, t18
+	  84: STL       	t22, (t18)
+	  85: INCEIPL       	$4
+
+	0x254838BC:  4200FFEC  bc 16,0,0x254838A8
+	  86: GETL       	CTR, t24
+	  87: ADDL       	$0xFFFFFFFF, t24
+	  88: PUTL       	t24, CTR
+	  89: JIFZL       	t24, $0x254838C0
+	  90: JMPo       	$0x254838A8  ($4)
+
+
+
+. 554 254838A8 24
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 80 7C 08 37 EC 7C 09 37 EC 42 00 FF EC
+==== BB 555 (0x254838C0) approx BBs exec'd 0 ====
+
+	0x254838C0:  4D960020  bclr 12,22
+	   0: GETL       	LR, t0
+	   1: Js22o-r       	t0
+
+
+
+. 555 254838C0 4
+. 4D 96 00 20
+==== BB 556 (0x254838C4) approx BBs exec'd 0 ====
+
+	0x254838C4:  48000060  b 0x25483924
+	   0: JMPo       	$0x25483924  ($4)
+
+
+
+. 556 254838C4 4
+. 48 00 00 60
+==== BB 557 (0x25483924) approx BBs exec'd 0 ====
+
+	0x25483924:  7CC62A14  add r6,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25483928:  419F0020  bc 12,31,0x25483948
+	   5: Js31o       	$0x25483948
+
+
+
+. 557 25483924 8
+. 7C C6 2A 14 41 9F 00 20
+==== BB 558 (0x2548392C) approx BBs exec'd 0 ====
+
+	0x2548392C:  419E0024  bc 12,30,0x25483950
+	   0: Js30o       	$0x25483950
+
+
+
+. 558 2548392C 4
+. 41 9E 00 24
+==== BB 559 (0x25483930) approx BBs exec'd 0 ====
+
+	0x25483930:  419D0028  bc 12,29,0x25483958
+	   0: Js29o       	$0x25483958
+
+
+
+. 559 25483930 4
+. 41 9D 00 28
+==== BB 560 (0x25483958) approx BBs exec'd 0 ====
+
+	0x25483958:  9486FFFC  stwu r4,-4(r6)
+	   0: GETL       	R4, t0
+	   1: GETL       	R6, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: PUTL       	t2, R6
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2548395C:  41840014  bc 12,4,0x25483970
+	   6: Js04o       	$0x25483970
+
+
+
+. 560 25483958 8
+. 94 86 FF FC 41 84 00 14
+==== BB 561 (0x25483970) approx BBs exec'd 0 ====
+
+	0x25483970:  4C9C0020  bclr 4,28
+	   0: GETL       	LR, t0
+	   1: Jc28o-r       	t0
+
+
+
+. 561 25483970 4
+. 4C 9C 00 20
+==== BB 562 (0x25475A04) approx BBs exec'd 0 ====
+
+	0x25475A04:  80BD0014  lwz r5,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475A08:  70A00002  andi. r0,r5,0x2
+	   5: GETL       	R5, t4
+	   6: ANDL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475A0C:  41820808  bc 12,2,0x25476214
+	  11: Js02o       	$0x25476214
+
+
+
+. 562 25475A04 12
+. 80 BD 00 14 70 A0 00 02 41 82 08 08
+==== BB 563 (0x25475A10) approx BBs exec'd 0 ====
+
+	0x25475A10:  7F97E040  cmpl cr7,r23,r28
+	   0: GETL       	R23, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25475A14:  419D06AC  bc 12,29,0x254760C0
+	   5: Js29o       	$0x254760C0
+
+
+
+. 563 25475A10 8
+. 7F 97 E0 40 41 9D 06 AC
+==== BB 564 (0x25475C1C) approx BBs exec'd 0 ====
+
+	0x25475C1C:  82A10000  lwz r21,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R21
+	   3: INCEIPL       	$4
+
+	0x25475C20:  7E419378  or r1,r18,r18
+	   4: GETL       	R18, t4
+	   5: PUTL       	t4, R1
+	   6: INCEIPL       	$4
+
+	0x25475C24:  92A10000  stw r21,0(r1)
+	   7: GETL       	R21, t6
+	   8: GETL       	R1, t8
+	   9: STL       	t6, (t8)
+	  10: INCEIPL       	$4
+
+	0x25475C28:  813A0008  lwz r9,8(r26)
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x8, t10
+	  13: LDL       	(t10), t12
+	  14: PUTL       	t12, R9
+	  15: INCEIPL       	$4
+
+	0x25475C2C:  2F090000  cmpi cr6,r9,0
+	  16: GETL       	R9, t14
+	  17: CMP0L       	t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0x25475C30:  409A0264  bc 4,26,0x25475E94
+	  20: Jc26o       	$0x25475E94
+
+
+
+. 564 25475C1C 24
+. 82 A1 00 00 7E 41 93 78 92 A1 00 00 81 3A 00 08 2F 09 00 00 40 9A 02 64
+==== BB 565 (0x25475E94) approx BBs exec'd 0 ====
+
+	0x25475E94:  825A0000  lwz r18,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R18
+	   3: INCEIPL       	$4
+
+	0x25475E98:  7D299214  add r9,r9,r18
+	   4: GETL       	R9, t4
+	   5: GETL       	R18, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x25475E9C:  913A0008  stw r9,8(r26)
+	   9: GETL       	R9, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	$0x8, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475EA0:  4BFFFD98  b 0x25475C38
+	  14: JMPo       	$0x25475C38  ($4)
+
+
+
+. 565 25475E94 16
+. 82 5A 00 00 7D 29 92 14 91 3A 00 08 4B FF FD 98
+==== BB 566 (0x25475C38) approx BBs exec'd 0 ====
+
+	0x25475C38:  2E090000  cmpi cr4,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25475C3C:  7D2A4B78  or r10,r9,r9
+	   4: GETL       	R9, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0x25475C40:  41920264  bc 12,18,0x25475EA4
+	   7: Js18o       	$0x25475EA4
+
+
+
+. 566 25475C38 12
+. 2E 09 00 00 7D 2A 4B 78 41 92 02 64
+==== BB 567 (0x25475C44) approx BBs exec'd 0 ====
+
+	0x25475C44:  81690000  lwz r11,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25475C48:  391A0020  addi r8,r26,32
+	   4: GETL       	R26, t4
+	   5: ADDL       	$0x20, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x25475C4C:  2C0B0000  cmpi cr0,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25475C50:  41820064  bc 12,2,0x25475CB4
+	  12: Js02o       	$0x25475CB4
+
+
+
+. 567 25475C44 16
+. 81 69 00 00 39 1A 00 20 2C 0B 00 00 41 82 00 64
+==== BB 568 (0x25475C54) approx BBs exec'd 0 ====
+
+	0x25475C54:  3C607000  lis r3,28672
+	   0: MOVL       	$0x70000000, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475C58:  3CE06FFF  lis r7,28671
+	   3: MOVL       	$0x6FFF0000, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x25475C5C:  3CA06FFF  lis r5,28671
+	   6: MOVL       	$0x6FFF0000, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25475C60:  3F806FFF  lis r28,28671
+	   9: MOVL       	$0x6FFF0000, t6
+	  10: PUTL       	t6, R28
+	  11: INCEIPL       	$4
+
+	0x25475C64:  3DE06FFF  lis r15,28671
+	  12: MOVL       	$0x6FFF0000, t8
+	  13: PUTL       	t8, R15
+	  14: INCEIPL       	$4
+
+	0x25475C68:  3FA06FFF  lis r29,28671
+	  15: MOVL       	$0x6FFF0000, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x25475C6C:  60660021  ori r6,r3,0x21
+	  18: MOVL       	$0x70000021, t12
+	  19: PUTL       	t12, R6
+	  20: INCEIPL       	$4
+
+	0x25475C70:  60E7FFFF  ori r7,r7,0xFFFF
+	  21: MOVL       	$0x6FFFFFFF, t14
+	  22: PUTL       	t14, R7
+	  23: INCEIPL       	$4
+
+	0x25475C74:  60A5FDFF  ori r5,r5,0xFDFF
+	  24: MOVL       	$0x6FFFFDFF, t16
+	  25: PUTL       	t16, R5
+	  26: INCEIPL       	$4
+
+	0x25475C78:  6384FE34  ori r4,r28,0xFE34
+	  27: MOVL       	$0x6FFFFE34, t18
+	  28: PUTL       	t18, R4
+	  29: INCEIPL       	$4
+
+	0x25475C7C:  61E3FEFF  ori r3,r15,0xFEFF
+	  30: MOVL       	$0x6FFFFEFF, t20
+	  31: PUTL       	t20, R3
+	  32: INCEIPL       	$4
+
+	0x25475C80:  63BDFF40  ori r29,r29,0xFF40
+	  33: MOVL       	$0x6FFFFF40, t22
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x25475C84:  2F8B0021  cmpi cr7,r11,33
+	  36: GETL       	R11, t24
+	  37: MOVL       	$0x21, t28
+	  38: CMPL       	t24, t28, t26  (-rSo)
+	  39: ICRFL       	t26, $0x7, CR
+	  40: INCEIPL       	$4
+
+	0x25475C88:  5560103A  rlwinm r0,r11,2,0,29
+	  41: GETL       	R11, t30
+	  42: SHLL       	$0x2, t30
+	  43: PUTL       	t30, R0
+	  44: INCEIPL       	$4
+
+	0x25475C8C:  409D0018  bc 4,29,0x25475CA4
+	  45: Jc29o       	$0x25475CA4
+
+
+
+. 568 25475C54 60
+. 3C 60 70 00 3C E0 6F FF 3C A0 6F FF 3F 80 6F FF 3D E0 6F FF 3F A0 6F FF 60 66 00 21 60 E7 FF FF 60 A5 FD FF 63 84 FE 34 61 E3 FE FF 63 BD FF 40 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+==== BB 569 (0x25475CA4) approx BBs exec'd 0 ====
+
+	0x25475CA4:  7D48012E  stwx r10,r8,r0
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R10, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x25475CA8:  856A0008  lwzu r11,8(r10)
+	   6: GETL       	R10, t6
+	   7: ADDL       	$0x8, t6
+	   8: PUTL       	t6, R10
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25475CAC:  2F8B0000  cmpi cr7,r11,0
+	  12: GETL       	R11, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0x25475CB0:  409EFFD4  bc 4,30,0x25475C84
+	  16: Jc30o       	$0x25475C84
+
+
+
+. 569 25475CA4 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+==== BB 570 (0x25475C84) approx BBs exec'd 0 ====
+
+	0x25475C84:  2F8B0021  cmpi cr7,r11,33
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x21, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25475C88:  5560103A  rlwinm r0,r11,2,0,29
+	   5: GETL       	R11, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x25475C8C:  409D0018  bc 4,29,0x25475CA4
+	   9: Jc29o       	$0x25475CA4
+
+
+
+. 570 25475C84 12
+. 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+==== BB 571 (0x25475C90) approx BBs exec'd 0 ====
+
+	0x25475C90:  7D2B3850  subf r9,r11,r7
+	   0: GETL       	R11, t0
+	   1: GETL       	R7, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475C94:  2889000F  cmpli cr1,r9,15
+	   5: GETL       	R9, t4
+	   6: MOVL       	$0xF, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25475C98:  418503BC  bc 12,5,0x25476054
+	  10: Js05o       	$0x25476054
+
+
+
+. 571 25475C90 12
+. 7D 2B 38 50 28 89 00 0F 41 85 03 BC
+==== BB 572 (0x25475C9C) approx BBs exec'd 0 ====
+
+	0x25475C9C:  7C0B3050  subf r0,r11,r6
+	   0: GETL       	R11, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475CA0:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25475CA4:  7D48012E  stwx r10,r8,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x25475CA8:  856A0008  lwzu r11,8(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x8, t12
+	  17: PUTL       	t12, R10
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x25475CAC:  2F8B0000  cmpi cr7,r11,0
+	  21: GETL       	R11, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25475CB0:  409EFFD4  bc 4,30,0x25475C84
+	  25: Jc30o       	$0x25475C84
+
+
+
+. 572 25475C9C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+==== BB 573 (0x25475CB4) approx BBs exec'd 0 ====
+
+	0x25475CB4:  817A0000  lwz r11,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25475CB8:  2C8B0000  cmpi cr1,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x25475CBC:  418600AC  bc 12,6,0x25475D68
+	   8: Js06o       	$0x25475D68
+
+
+
+. 573 25475CB4 12
+. 81 7A 00 00 2C 8B 00 00 41 86 00 AC
+==== BB 574 (0x25475CC0) approx BBs exec'd 0 ====
+
+	0x25475CC0:  81280010  lwz r9,16(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475CC4:  2F090000  cmpi cr6,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475CC8:  419A0010  bc 12,26,0x25475CD8
+	   9: Js26o       	$0x25475CD8
+
+
+
+. 574 25475CC0 12
+. 81 28 00 10 2F 09 00 00 41 9A 00 10
+==== BB 575 (0x25475CCC) approx BBs exec'd 0 ====
+
+	0x25475CCC:  81490004  lwz r10,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475CD0:  7C8A5A14  add r4,r10,r11
+	   5: GETL       	R10, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25475CD4:  90890004  stw r4,4(r9)
+	  10: GETL       	R4, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475CD8:  8128000C  lwz r9,12(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475CDC:  2E090000  cmpi cr4,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x4, CR
+	  23: INCEIPL       	$4
+
+	0x25475CE0:  41920010  bc 12,18,0x25475CF0
+	  24: Js18o       	$0x25475CF0
+
+
+
+. 575 25475CCC 24
+. 81 49 00 04 7C 8A 5A 14 90 89 00 04 81 28 00 0C 2E 09 00 00 41 92 00 10
+==== BB 576 (0x25475CE4) approx BBs exec'd 0 ====
+
+	0x25475CE4:  82490004  lwz r18,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x25475CE8:  7CD25A14  add r6,r18,r11
+	   5: GETL       	R18, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25475CEC:  90C90004  stw r6,4(r9)
+	  10: GETL       	R6, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475CF0:  81280014  lwz r9,20(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x14, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475CF4:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25475CF8:  41820010  bc 12,2,0x25475D08
+	  24: Js02o       	$0x25475D08
+
+
+
+. 576 25475CE4 24
+. 82 49 00 04 7C D2 5A 14 90 C9 00 04 81 28 00 14 2C 09 00 00 41 82 00 10
+==== BB 577 (0x25475CFC) approx BBs exec'd 0 ====
+
+	0x25475CFC:  81E90004  lwz r15,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x25475D00:  7FAF5A14  add r29,r15,r11
+	   5: GETL       	R15, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25475D04:  93A90004  stw r29,4(r9)
+	  10: GETL       	R29, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D08:  81280018  lwz r9,24(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x18, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D0C:  2F890000  cmpi cr7,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0x25475D10:  419E0010  bc 12,30,0x25475D20
+	  24: Js30o       	$0x25475D20
+
+
+
+. 577 25475CFC 24
+. 81 E9 00 04 7F AF 5A 14 93 A9 00 04 81 28 00 18 2F 89 00 00 41 9E 00 10
+==== BB 578 (0x25475D14) approx BBs exec'd 0 ====
+
+	0x25475D14:  80A90004  lwz r5,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475D18:  7F855A14  add r28,r5,r11
+	   5: GETL       	R5, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25475D1C:  93890004  stw r28,4(r9)
+	  10: GETL       	R28, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D20:  8128001C  lwz r9,28(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D24:  2C890000  cmpi cr1,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x1, CR
+	  23: INCEIPL       	$4
+
+	0x25475D28:  41860010  bc 12,6,0x25475D38
+	  24: Js06o       	$0x25475D38
+
+
+
+. 578 25475D14 24
+. 80 A9 00 04 7F 85 5A 14 93 89 00 04 81 28 00 1C 2C 89 00 00 41 86 00 10
+==== BB 579 (0x25475D2C) approx BBs exec'd 0 ====
+
+	0x25475D2C:  80E90004  lwz r7,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25475D30:  7C675A14  add r3,r7,r11
+	   5: GETL       	R7, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25475D34:  90690004  stw r3,4(r9)
+	  10: GETL       	R3, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D38:  8128005C  lwz r9,92(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x5C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D3C:  2F090000  cmpi cr6,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25475D40:  419A0010  bc 12,26,0x25475D50
+	  24: Js26o       	$0x25475D50
+
+
+
+. 579 25475D2C 24
+. 80 E9 00 04 7C 67 5A 14 90 69 00 04 81 28 00 5C 2F 09 00 00 41 9A 00 10
+==== BB 580 (0x25475D44) approx BBs exec'd 0 ====
+
+	0x25475D44:  80090004  lwz r0,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475D48:  7EE05A14  add r23,r0,r11
+	   5: GETL       	R0, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0x25475D4C:  92E90004  stw r23,4(r9)
+	  10: GETL       	R23, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D50:  812800C4  lwz r9,196(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D54:  2E090000  cmpi cr4,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x4, CR
+	  23: INCEIPL       	$4
+
+	0x25475D58:  41920010  bc 12,18,0x25475D68
+	  24: Js18o       	$0x25475D68
+
+
+
+. 580 25475D44 24
+. 80 09 00 04 7E E0 5A 14 92 E9 00 04 81 28 00 C4 2E 09 00 00 41 92 00 10
+==== BB 581 (0x25475D5C) approx BBs exec'd 0 ====
+
+	0x25475D5C:  83290004  lwz r25,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25475D60:  7D995A14  add r12,r25,r11
+	   5: GETL       	R25, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25475D64:  91890004  stw r12,4(r9)
+	  10: GETL       	R12, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D68:  81280078  lwz r9,120(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x78, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D6C:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25475D70:  41820030  bc 12,2,0x25475DA0
+	  24: Js02o       	$0x25475DA0
+
+
+
+. 581 25475D5C 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 78 2C 09 00 00 41 82 00 30
+==== BB 582 (0x25475DA0) approx BBs exec'd 0 ====
+
+	0x25475DA0:  81280098  lwz r9,152(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x98, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475DA4:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25475DA8:  419E0354  bc 12,30,0x254760FC
+	   9: Js30o       	$0x254760FC
+
+
+
+. 582 25475DA0 12
+. 81 28 00 98 2F 89 00 00 41 9E 03 54
+==== BB 583 (0x25475DAC) approx BBs exec'd 0 ====
+
+	0x25475DAC:  81690004  lwz r11,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25475DB0:  71600001  andi. r0,r11,0x1
+	   5: GETL       	R11, t4
+	   6: ANDL       	$0x1, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475DB4:  917A01FC  stw r11,508(r26)
+	  11: GETL       	R11, t8
+	  12: GETL       	R26, t10
+	  13: ADDL       	$0x1FC, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25475DB8:  41820008  bc 12,2,0x25475DC0
+	  16: Js02o       	$0x25475DC0
+
+
+
+. 583 25475DAC 16
+. 81 69 00 04 71 60 00 01 91 7A 01 FC 41 82 00 08
+==== BB 584 (0x25475DC0) approx BBs exec'd 0 ====
+
+	0x25475DC0:  81280074  lwz r9,116(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475DC4:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25475DC8:  418600E0  bc 12,6,0x25475EA8
+	   9: Js06o       	$0x25475EA8
+
+
+
+. 584 25475DC0 12
+. 81 28 00 74 2C 89 00 00 41 86 00 E0
+==== BB 585 (0x25475EA8) approx BBs exec'd 0 ====
+
+	0x25475EA8:  71690040  andi. r9,r11,0x40
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x40, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475EAC:  408202A4  bc 4,2,0x25476150
+	   6: Jc02o       	$0x25476150
+
+
+
+. 585 25475EA8 8
+. 71 69 00 40 40 82 02 A4
+==== BB 586 (0x25475EB0) approx BBs exec'd 0 ====
+
+	0x25475EB0:  813A014C  lwz r9,332(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x14C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475EB4:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25475EB8:  419E01CC  bc 12,30,0x25476084
+	   9: Js30o       	$0x25476084
+
+
+
+. 586 25475EB0 12
+. 81 3A 01 4C 2F 89 00 00 41 9E 01 CC
+==== BB 587 (0x25475EBC) approx BBs exec'd 0 ====
+
+	0x25475EBC:  825A0000  lwz r18,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R18
+	   3: INCEIPL       	$4
+
+	0x25475EC0:  7CC99214  add r6,r9,r18
+	   4: GETL       	R9, t4
+	   5: GETL       	R18, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x25475EC4:  90DA014C  stw r6,332(r26)
+	   9: GETL       	R6, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	$0x14C, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475EC8:  80BE04C8  lwz r5,1224(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x4C8, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R5
+	  18: INCEIPL       	$4
+
+	0x25475ECC:  839F00A4  lwz r28,164(r31)
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0xA4, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R28
+	  23: INCEIPL       	$4
+
+	0x25475ED0:  81E50400  lwz r15,1024(r5)
+	  24: GETL       	R5, t20
+	  25: ADDL       	$0x400, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R15
+	  28: INCEIPL       	$4
+
+	0x25475ED4:  7F9D7878  andc r29,r28,r15
+	  29: GETL       	R28, t24
+	  30: GETL       	R15, t26
+	  31: NOTL       	t26
+	  32: ANDL       	t24, t26
+	  33: PUTL       	t26, R29
+	  34: INCEIPL       	$4
+
+	0x25475ED8:  73AB0001  andi. r11,r29,0x1
+	  35: GETL       	R29, t28
+	  36: ANDL       	$0x1, t28
+	  37: PUTL       	t28, R11
+	  38: CMP0L       	t28, t30  (-rSo)
+	  39: ICRFL       	t30, $0x0, CR
+	  40: INCEIPL       	$4
+
+	0x25475EDC:  408202B0  bc 4,2,0x2547618C
+	  41: Jc02o       	$0x2547618C
+
+
+
+. 587 25475EBC 36
+. 82 5A 00 00 7C C9 92 14 90 DA 01 4C 80 BE 04 C8 83 9F 00 A4 81 E5 04 00 7F 9D 78 78 73 AB 00 01 40 82 02 B0
+==== BB 588 (0x25475EE0) approx BBs exec'd 0 ====
+
+	0x25475EE0:  813A0218  lwz r9,536(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x218, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475EE4:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25475EE8:  419E0010  bc 12,30,0x25475EF8
+	   9: Js30o       	$0x25475EF8
+
+
+
+. 588 25475EE0 12
+. 81 3A 02 18 2F 89 00 00 41 9E 00 10
+==== BB 589 (0x25475EF8) approx BBs exec'd 0 ====
+
+	0x25475EF8:  7E639B78  or r3,r19,r19
+	   0: GETL       	R19, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475EFC:  4800C0F5  bl 0x25481FF0
+	   3: MOVL       	$0x25475F00, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25481FF0  ($4)
+
+
+
+. 589 25475EF8 8
+. 7E 63 9B 78 48 00 C0 F5
+==== BB 590 close(0x25481FF0) approx BBs exec'd 0 ====
+
+	0x25481FF0:  38000006  li r0,6
+	   0: MOVL       	$0x6, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481FF4:  44000002  sc
+	   3: JMPo-sys       	$0x25481FF8  ($4)
+
+
+
+. 590 25481FF0 8
+. 38 00 00 06 44 00 00 02
+==== BB 591 (0x25481FF8) approx BBs exec'd 0 ====
+
+	0x25481FF8:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 591 25481FF8 4
+. 4C A3 00 20
+==== BB 592 (0x25475F00) approx BBs exec'd 0 ====
+
+	0x25475F00:  811E01C8  lwz r8,456(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475F04:  2C830000  cmpi cr1,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25475F08:  40A6FB68  bc 5,6,0x25475A70
+	   9: Jc06o       	$0x25475A70
+
+
+
+. 592 25475F00 12
+. 81 1E 01 C8 2C 83 00 00 40 A6 FB 68
+==== BB 593 (0x25475F0C) approx BBs exec'd 0 ====
+
+	0x25475F0C:  817A0180  lwz r11,384(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25475F10:  81FF009C  lwz r15,156(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x9C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R15
+	   9: INCEIPL       	$4
+
+	0x25475F14:  557D0002  rlwinm r29,r11,0,0,1
+	  10: GETL       	R11, t8
+	  11: ANDL       	$0xC0000000, t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0x25475F18:  69F30002  xori r19,r15,0x2
+	  14: GETL       	R15, t10
+	  15: XORL       	$0x2, t10
+	  16: PUTL       	t10, R19
+	  17: INCEIPL       	$4
+
+	0x25475F1C:  21330000  subfic r9,r19,0
+	  18: GETL       	R19, t12
+	  19: MOVL       	$0x0, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x25475F20:  7E699914  adde r19,r9,r19
+	  23: GETL       	R9, t16
+	  24: GETL       	R19, t18
+	  25: ADCL       	t16, t18  (-rCa-wCa)
+	  26: PUTL       	t18, R19
+	  27: INCEIPL       	$4
+
+	0x25475F24:  6FB24000  xoris r18,r29,0x4000
+	  28: GETL       	R29, t20
+	  29: XORL       	$0x40000000, t20
+	  30: PUTL       	t20, R18
+	  31: INCEIPL       	$4
+
+	0x25475F28:  21520000  subfic r10,r18,0
+	  32: GETL       	R18, t22
+	  33: MOVL       	$0x0, t24
+	  34: SBBL       	t22, t24  (-wCa)
+	  35: PUTL       	t24, R10
+	  36: INCEIPL       	$4
+
+	0x25475F2C:  7E4A9114  adde r18,r10,r18
+	  37: GETL       	R10, t26
+	  38: GETL       	R18, t28
+	  39: ADCL       	t26, t28  (-rCa-wCa)
+	  40: PUTL       	t28, R18
+	  41: INCEIPL       	$4
+
+	0x25475F30:  7E4A9839  and. r10,r18,r19
+	  42: GETL       	R18, t30
+	  43: GETL       	R19, t32
+	  44: ANDL       	t30, t32
+	  45: PUTL       	t32, R10
+	  46: CMP0L       	t32, t34  (-rSo)
+	  47: ICRFL       	t34, $0x0, CR
+	  48: INCEIPL       	$4
+
+	0x25475F34:  3A60FFFF  li r19,-1
+	  49: MOVL       	$0xFFFFFFFF, t36
+	  50: PUTL       	t36, R19
+	  51: INCEIPL       	$4
+
+	0x25475F38:  408200BC  bc 4,2,0x25475FF4
+	  52: Jc02o       	$0x25475FF4
+
+
+
+. 593 25475F0C 48
+. 81 7A 01 80 81 FF 00 9C 55 7D 00 02 69 F3 00 02 21 33 00 00 7E 69 99 14 6F B2 40 00 21 52 00 00 7E 4A 91 14 7E 4A 98 39 3A 60 FF FF 40 82 00 BC
+==== BB 594 (0x25475F3C) approx BBs exec'd 0 ====
+
+	0x25475F3C:  807A0150  lwz r3,336(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x150, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475F40:  80FA0000  lwz r7,0(r26)
+	   5: GETL       	R26, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R7
+	   8: INCEIPL       	$4
+
+	0x25475F44:  7D633A14  add r11,r3,r7
+	   9: GETL       	R3, t8
+	  10: GETL       	R7, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x25475F48:  917A0150  stw r11,336(r26)
+	  14: GETL       	R11, t12
+	  15: GETL       	R26, t14
+	  16: ADDL       	$0x150, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25475F4C:  80B10000  lwz r5,0(r17)
+	  19: GETL       	R17, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R5
+	  22: INCEIPL       	$4
+
+	0x25475F50:  70A90040  andi. r9,r5,0x40
+	  23: GETL       	R5, t20
+	  24: ANDL       	$0x40, t20
+	  25: PUTL       	t20, R9
+	  26: CMP0L       	t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x25475F54:  40820278  bc 4,2,0x254761CC
+	  29: Jc02o       	$0x254761CC
+
+
+
+. 594 25475F3C 28
+. 80 7A 01 50 80 FA 00 00 7D 63 3A 14 91 7A 01 50 80 B1 00 00 70 A9 00 40 40 82 02 78
+==== BB 595 (0x25475F58) approx BBs exec'd 0 ====
+
+	0x25475F58:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475F5C:  48002611  bl 0x2547856C
+	   3: MOVL       	$0x25475F60, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547856C  ($4)
+
+
+
+. 595 25475F58 8
+. 7F 43 D3 78 48 00 26 11
+==== BB 596 (0x25475F60) approx BBs exec'd 0 ====
+
+	0x25475F60:  72C00008  andi. r0,r22,0x8
+	   0: GETL       	R22, t0
+	   1: ANDL       	$0x8, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475F64:  40820010  bc 4,2,0x25475F74
+	   6: Jc02o       	$0x25475F74
+
+
+
+. 596 25475F60 8
+. 72 C0 00 08 40 82 00 10
+==== BB 597 (0x25475F68) approx BBs exec'd 0 ====
+
+	0x25475F68:  81DA0060  lwz r14,96(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R14
+	   4: INCEIPL       	$4
+
+	0x25475F6C:  2F0E0000  cmpi cr6,r14,0
+	   5: GETL       	R14, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475F70:  409A02E0  bc 4,26,0x25476250
+	   9: Jc26o       	$0x25476250
+
+
+
+. 597 25475F68 12
+. 81 DA 00 60 2F 0E 00 00 40 9A 02 E0
+==== BB 598 (0x25475F74) approx BBs exec'd 0 ====
+
+	0x25475F74:  811A01FC  lwz r8,508(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475F78:  71090020  andi. r9,r8,0x20
+	   5: GETL       	R8, t4
+	   6: ANDL       	$0x20, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475F7C:  4182000C  bc 12,2,0x25475F88
+	  11: Js02o       	$0x25475F88
+
+
+
+. 598 25475F74 12
+. 81 1A 01 FC 71 09 00 20 41 82 00 0C
+==== BB 599 (0x25475F80) approx BBs exec'd 0 ====
+
+	0x25475F80:  813E04C8  lwz r9,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475F84:  934901A0  stw r26,416(r9)
+	   5: GETL       	R26, t4
+	   6: GETL       	R9, t6
+	   7: ADDL       	$0x1A0, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25475F88:  80910050  lwz r4,80(r17)
+	  10: GETL       	R17, t8
+	  11: ADDL       	$0x50, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x25475F8C:  C81F0028  lfd f0,40(r31)
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x28, t12
+	  17: FPU_RQ       	(t12), 0x0:0x0
+	  18: INCEIPL       	$4
+
+	0x25475F90:  2C840000  cmpi cr1,r4,0
+	  19: GETL       	R4, t14
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x25475F94:  D81A01D0  stfd f0,464(r26)
+	  23: GETL       	R26, t18
+	  24: ADDL       	$0x1D0, t18
+	  25: FPU_WQ       	0x0:0x0, (t18)
+	  26: INCEIPL       	$4
+
+	0x25475F98:  C9BF0030  lfd f13,48(r31)
+	  27: GETL       	R31, t20
+	  28: ADDL       	$0x30, t20
+	  29: FPU_RQ       	(t20), 0x0:0xD
+	  30: INCEIPL       	$4
+
+	0x25475F9C:  D9BA01D8  stfd f13,472(r26)
+	  31: GETL       	R26, t22
+	  32: ADDL       	$0x1D8, t22
+	  33: FPU_WQ       	0x0:0xD, (t22)
+	  34: INCEIPL       	$4
+
+	0x25475FA0:  4186FEB0  bc 12,6,0x25475E50
+	  35: Js06o       	$0x25475E50
+
+
+
+. 599 25475F80 36
+. 81 3E 04 C8 93 49 01 A0 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+==== BB 600 (0x25475E50) approx BBs exec'd 0 ====
+
+	0x25475E50:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475E54:  4BFFF850  b 0x254756A4
+	   3: JMPo       	$0x254756A4  ($4)
+
+
+
+. 600 25475E50 8
+. 7F 43 D3 78 4B FF F8 50
+==== BB 601 (0x254756A4) approx BBs exec'd 0 ====
+
+	0x254756A4:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x254756A8:  83450004  lwz r26,4(r5)
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R26
+	   8: INCEIPL       	$4
+
+	0x254756AC:  8185FFB4  lwz r12,-76(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x254756B0:  7F4803A6  mtlr r26
+	  14: GETL       	R26, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x254756B4:  81C5FFB8  lwz r14,-72(r5)
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0x254756B8:  81E5FFBC  lwz r15,-68(r5)
+	  22: GETL       	R5, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0x254756BC:  7D808120  mtcrf 0x8,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x254756C0:  8205FFC0  lwz r16,-64(r5)
+	  30: GETL       	R5, t24
+	  31: ADDL       	$0xFFFFFFC0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R16
+	  34: INCEIPL       	$4
+
+	0x254756C4:  8225FFC4  lwz r17,-60(r5)
+	  35: GETL       	R5, t28
+	  36: ADDL       	$0xFFFFFFC4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R17
+	  39: INCEIPL       	$4
+
+	0x254756C8:  8245FFC8  lwz r18,-56(r5)
+	  40: GETL       	R5, t32
+	  41: ADDL       	$0xFFFFFFC8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R18
+	  44: INCEIPL       	$4
+
+	0x254756CC:  8265FFCC  lwz r19,-52(r5)
+	  45: GETL       	R5, t36
+	  46: ADDL       	$0xFFFFFFCC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R19
+	  49: INCEIPL       	$4
+
+	0x254756D0:  8285FFD0  lwz r20,-48(r5)
+	  50: GETL       	R5, t40
+	  51: ADDL       	$0xFFFFFFD0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R20
+	  54: INCEIPL       	$4
+
+	0x254756D4:  82A5FFD4  lwz r21,-44(r5)
+	  55: GETL       	R5, t44
+	  56: ADDL       	$0xFFFFFFD4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R21
+	  59: INCEIPL       	$4
+
+	0x254756D8:  82C5FFD8  lwz r22,-40(r5)
+	  60: GETL       	R5, t48
+	  61: ADDL       	$0xFFFFFFD8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R22
+	  64: INCEIPL       	$4
+
+	0x254756DC:  82E5FFDC  lwz r23,-36(r5)
+	  65: GETL       	R5, t52
+	  66: ADDL       	$0xFFFFFFDC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R23
+	  69: INCEIPL       	$4
+
+	0x254756E0:  8305FFE0  lwz r24,-32(r5)
+	  70: GETL       	R5, t56
+	  71: ADDL       	$0xFFFFFFE0, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R24
+	  74: INCEIPL       	$4
+
+	0x254756E4:  8325FFE4  lwz r25,-28(r5)
+	  75: GETL       	R5, t60
+	  76: ADDL       	$0xFFFFFFE4, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R25
+	  79: INCEIPL       	$4
+
+	0x254756E8:  8345FFE8  lwz r26,-24(r5)
+	  80: GETL       	R5, t64
+	  81: ADDL       	$0xFFFFFFE8, t64
+	  82: LDL       	(t64), t66
+	  83: PUTL       	t66, R26
+	  84: INCEIPL       	$4
+
+	0x254756EC:  8365FFEC  lwz r27,-20(r5)
+	  85: GETL       	R5, t68
+	  86: ADDL       	$0xFFFFFFEC, t68
+	  87: LDL       	(t68), t70
+	  88: PUTL       	t70, R27
+	  89: INCEIPL       	$4
+
+	0x254756F0:  8385FFF0  lwz r28,-16(r5)
+	  90: GETL       	R5, t72
+	  91: ADDL       	$0xFFFFFFF0, t72
+	  92: LDL       	(t72), t74
+	  93: PUTL       	t74, R28
+	  94: INCEIPL       	$4
+
+	0x254756F4:  83A5FFF4  lwz r29,-12(r5)
+	  95: GETL       	R5, t76
+	  96: ADDL       	$0xFFFFFFF4, t76
+	  97: LDL       	(t76), t78
+	  98: PUTL       	t78, R29
+	  99: INCEIPL       	$4
+
+	0x254756F8:  83C5FFF8  lwz r30,-8(r5)
+	 100: GETL       	R5, t80
+	 101: ADDL       	$0xFFFFFFF8, t80
+	 102: LDL       	(t80), t82
+	 103: PUTL       	t82, R30
+	 104: INCEIPL       	$4
+
+	0x254756FC:  83E5FFFC  lwz r31,-4(r5)
+	 105: GETL       	R5, t84
+	 106: ADDL       	$0xFFFFFFFC, t84
+	 107: LDL       	(t84), t86
+	 108: PUTL       	t86, R31
+	 109: INCEIPL       	$4
+
+	0x25475700:  7CA12B78  or r1,r5,r5
+	 110: GETL       	R5, t88
+	 111: PUTL       	t88, R1
+	 112: INCEIPL       	$4
+
+	0x25475704:  4E800020  blr
+	 113: GETL       	LR, t90
+	 114: JMPo-r       	t90  ($4)
+
+
+
+. 601 254756A4 100
+. 80 A1 00 00 83 45 00 04 81 85 FF B4 7F 48 03 A6 81 C5 FF B8 81 E5 FF BC 7D 80 81 20 82 05 FF C0 82 25 FF C4 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+==== BB 602 (0x25477054) approx BBs exec'd 0 ====
+
+	0x25477054:  4BFFFF04  b 0x25476F58
+	   0: JMPo       	$0x25476F58  ($4)
+
+
+
+. 602 25477054 4
+. 4B FF FF 04
+==== BB 603 (0x25476F58) approx BBs exec'd 0 ====
+
+	0x25476F58:  83E10294  lwz r31,660(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x294, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25476F5C:  81810244  lwz r12,580(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x244, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25476F60:  7FE803A6  mtlr r31
+	  10: GETL       	R31, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x25476F64:  81C10248  lwz r14,584(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x248, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R14
+	  17: INCEIPL       	$4
+
+	0x25476F68:  81E1024C  lwz r15,588(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x24C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R15
+	  22: INCEIPL       	$4
+
+	0x25476F6C:  7D818120  mtcrf 0x18,r12
+	  23: GETL       	R12, t18
+	  24: ICRFL       	t18, $0x3, CR
+	  25: ICRFL       	t18, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0x25476F70:  82010250  lwz r16,592(r1)
+	  27: GETL       	R1, t20
+	  28: ADDL       	$0x250, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R16
+	  31: INCEIPL       	$4
+
+	0x25476F74:  82210254  lwz r17,596(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x254, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R17
+	  36: INCEIPL       	$4
+
+	0x25476F78:  82410258  lwz r18,600(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x258, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R18
+	  41: INCEIPL       	$4
+
+	0x25476F7C:  8261025C  lwz r19,604(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x25C, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R19
+	  46: INCEIPL       	$4
+
+	0x25476F80:  82810260  lwz r20,608(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x260, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R20
+	  51: INCEIPL       	$4
+
+	0x25476F84:  82A10264  lwz r21,612(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x264, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R21
+	  56: INCEIPL       	$4
+
+	0x25476F88:  82C10268  lwz r22,616(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x268, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R22
+	  61: INCEIPL       	$4
+
+	0x25476F8C:  82E1026C  lwz r23,620(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x26C, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R23
+	  66: INCEIPL       	$4
+
+	0x25476F90:  83010270  lwz r24,624(r1)
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x270, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R24
+	  71: INCEIPL       	$4
+
+	0x25476F94:  83210274  lwz r25,628(r1)
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x274, t56
+	  74: LDL       	(t56), t58
+	  75: PUTL       	t58, R25
+	  76: INCEIPL       	$4
+
+	0x25476F98:  83410278  lwz r26,632(r1)
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x278, t60
+	  79: LDL       	(t60), t62
+	  80: PUTL       	t62, R26
+	  81: INCEIPL       	$4
+
+	0x25476F9C:  8361027C  lwz r27,636(r1)
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x27C, t64
+	  84: LDL       	(t64), t66
+	  85: PUTL       	t66, R27
+	  86: INCEIPL       	$4
+
+	0x25476FA0:  83810280  lwz r28,640(r1)
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x280, t68
+	  89: LDL       	(t68), t70
+	  90: PUTL       	t70, R28
+	  91: INCEIPL       	$4
+
+	0x25476FA4:  83A10284  lwz r29,644(r1)
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x284, t72
+	  94: LDL       	(t72), t74
+	  95: PUTL       	t74, R29
+	  96: INCEIPL       	$4
+
+	0x25476FA8:  83C10288  lwz r30,648(r1)
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x288, t76
+	  99: LDL       	(t76), t78
+	 100: PUTL       	t78, R30
+	 101: INCEIPL       	$4
+
+	0x25476FAC:  83E1028C  lwz r31,652(r1)
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x28C, t80
+	 104: LDL       	(t80), t82
+	 105: PUTL       	t82, R31
+	 106: INCEIPL       	$4
+
+	0x25476FB0:  38210290  addi r1,r1,656
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x290, t84
+	 109: PUTL       	t84, R1
+	 110: INCEIPL       	$4
+
+	0x25476FB4:  4E800020  blr
+	 111: GETL       	LR, t86
+	 112: JMPo-r       	t86  ($4)
+
+
+
+. 603 25476F58 96
+. 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+==== BB 604 (0x254734A8) approx BBs exec'd 0 ====
+
+	0x254734A8:  809E00D0  lwz r4,208(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xD0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254734AC:  82E30178  lwz r23,376(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x178, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0x254734B0:  3A770001  addi r19,r23,1
+	  10: GETL       	R23, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R19
+	  13: INCEIPL       	$4
+
+	0x254734B4:  92630178  stw r19,376(r3)
+	  14: GETL       	R19, t10
+	  15: GETL       	R3, t12
+	  16: ADDL       	$0x178, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x254734B8:  387F0044  addi r3,r31,68
+	  19: GETL       	R31, t14
+	  20: ADDL       	$0x44, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0x254734BC:  6A7D0001  xori r29,r19,0x1
+	  23: GETL       	R19, t16
+	  24: XORL       	$0x1, t16
+	  25: PUTL       	t16, R29
+	  26: INCEIPL       	$4
+
+	0x254734C0:  23BD0000  subfic r29,r29,0
+	  27: GETL       	R29, t18
+	  28: MOVL       	$0x0, t20
+	  29: SBBL       	t18, t20  (-wCa)
+	  30: PUTL       	t20, R29
+	  31: INCEIPL       	$4
+
+	0x254734C4:  7FB90194  addze r29,r25
+	  32: GETL       	R25, t22
+	  33: ADCL       	$0x0, t22  (-rCa-wCa)
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x254734C8:  4800CE75  bl 0x2548033C
+	  36: MOVL       	$0x254734CC, t24
+	  37: PUTL       	t24, LR
+	  38: JMPo-c       	$0x2548033C  ($4)
+
+
+
+. 604 254734A8 36
+. 80 9E 00 D0 82 E3 01 78 3A 77 00 01 92 63 01 78 38 7F 00 44 6A 7D 00 01 23 BD 00 00 7F B9 01 94 48 00 CE 75
+==== BB 605 (0x254734CC) approx BBs exec'd 0 ====
+
+	0x254734CC:  7FB9EB78  or r25,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x254734D0:  7C7D1B79  or. r29,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R29
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x254734D4:  4082FF9C  bc 4,2,0x25473470
+	   8: Jc02o       	$0x25473470
+
+
+
+. 605 254734CC 12
+. 7F B9 EB 78 7C 7D 1B 79 40 82 FF 9C
+==== BB 606 (0x254734D8) approx BBs exec'd 0 ====
+
+	0x254734D8:  807E003C  lwz r3,60(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x3C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254734DC:  38800004  li r4,4
+	   5: MOVL       	$0x4, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254734E0:  4800EB51  bl 0x25482030
+	   8: MOVL       	$0x254734E4, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25482030  ($4)
+
+
+
+. 606 254734D8 12
+. 80 7E 00 3C 38 80 00 04 48 00 EB 51
+==== BB 607 access(0x25482030) approx BBs exec'd 0 ====
+
+	0x25482030:  38000021  li r0,33
+	   0: MOVL       	$0x21, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25482034:  44000002  sc
+	   3: JMPo-sys       	$0x25482038  ($4)
+
+
+
+. 607 25482030 8
+. 38 00 00 21 44 00 00 02
+==== BB 608 (0x25482038) approx BBs exec'd 0 ====
+
+	0x25482038:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 608 25482038 4
+. 4C A3 00 20
+==== BB 609 (0x2548203C) approx BBs exec'd 0 ====
+
+	0x2548203C:  4BFFF2F4  b 0x25481330
+	   0: JMPo       	$0x25481330  ($4)
+
+
+
+. 609 2548203C 4
+. 4B FF F2 F4
+==== BB 610 __syscall_error(0x25481330) approx BBs exec'd 0 ====
+
+	0x25481330:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25481334:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25481338:  48015CC9  bl 0x25497000
+	   9: MOVL       	$0x2548133C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 610 25481330 12
+. 94 21 FF F0 7D 88 02 A6 48 01 5C C9
+==== BB 611 (0x2548133C) approx BBs exec'd 0 ====
+
+	0x2548133C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25481340:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25481344:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x25481348:  813E0514  lwz r9,1300(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x514, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0x2548134C:  83C10008  lwz r30,8(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x8, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0x25481350:  38210010  addi r1,r1,16
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: PUTL       	t16, R1
+	  24: INCEIPL       	$4
+
+	0x25481354:  90690000  stw r3,0(r9)
+	  25: GETL       	R3, t18
+	  26: GETL       	R9, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25481358:  3860FFFF  li r3,-1
+	  29: MOVL       	$0xFFFFFFFF, t22
+	  30: PUTL       	t22, R3
+	  31: INCEIPL       	$4
+
+	0x2548135C:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 611 2548133C 36
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 3E 05 14 83 C1 00 08 38 21 00 10 90 69 00 00 38 60 FF FF 4E 80 00 20
+==== BB 612 (0x254734E4) approx BBs exec'd 0 ====
+
+	0x254734E4:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x254734E8:  4092F2D0  bc 4,18,0x254727B8
+	   4: Jc18o       	$0x254727B8
+
+
+
+. 612 254734E4 8
+. 2E 03 00 00 40 92 F2 D0
+==== BB 613 (0x254727B8) approx BBs exec'd 0 ====
+
+	0x254727B8:  2C190000  cmpi cr0,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x254727BC:  408213F0  bc 4,2,0x25473BAC
+	   4: Jc02o       	$0x25473BAC
+
+
+
+. 613 254727B8 8
+. 2C 19 00 00 40 82 13 F0
+==== BB 614 (0x25473BAC) approx BBs exec'd 0 ====
+
+	0x25473BAC:  5736103A  rlwinm r22,r25,2,0,29
+	   0: GETL       	R25, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R22
+	   3: INCEIPL       	$4
+
+	0x25473BB0:  83A10000  lwz r29,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25473BB4:  38D6001E  addi r6,r22,30
+	   8: GETL       	R22, t6
+	   9: ADDL       	$0x1E, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x25473BB8:  39600000  li r11,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25473BBC:  54C00036  rlwinm r0,r6,0,0,27
+	  15: GETL       	R6, t10
+	  16: ANDL       	$0xFFFFFFF0, t10
+	  17: PUTL       	t10, R0
+	  18: INCEIPL       	$4
+
+	0x25473BC0:  7F4000D0  neg r26,r0
+	  19: GETL       	R0, t12
+	  20: NEGL       	t12
+	  21: PUTL       	t12, R26
+	  22: INCEIPL       	$4
+
+	0x25473BC4:  7FA1D16E  stwux r29,r1,r26
+	  23: GETL       	R26, t14
+	  24: GETL       	R1, t16
+	  25: ADDL       	t16, t14
+	  26: PUTL       	t14, R1
+	  27: GETL       	R29, t18
+	  28: STL       	t18, (t14)
+	  29: INCEIPL       	$4
+
+	0x25473BC8:  3B610017  addi r27,r1,23
+	  30: GETL       	R1, t20
+	  31: ADDL       	$0x17, t20
+	  32: PUTL       	t20, R27
+	  33: INCEIPL       	$4
+
+	0x25473BCC:  812E01C4  lwz r9,452(r14)
+	  34: GETL       	R14, t22
+	  35: ADDL       	$0x1C4, t22
+	  36: LDL       	(t22), t24
+	  37: PUTL       	t24, R9
+	  38: INCEIPL       	$4
+
+	0x25473BD0:  577A0036  rlwinm r26,r27,0,0,27
+	  39: GETL       	R27, t26
+	  40: ANDL       	$0xFFFFFFF0, t26
+	  41: PUTL       	t26, R26
+	  42: INCEIPL       	$4
+
+	0x25473BD4:  556C103A  rlwinm r12,r11,2,0,29
+	  43: GETL       	R11, t28
+	  44: SHLL       	$0x2, t28
+	  45: PUTL       	t28, R12
+	  46: INCEIPL       	$4
+
+	0x25473BD8:  396B0001  addi r11,r11,1
+	  47: MOVL       	$0x1, t30
+	  48: PUTL       	t30, R11
+	  49: INCEIPL       	$4
+
+	0x25473BDC:  7D2CD12E  stwx r9,r12,r26
+	  50: GETL       	R26, t32
+	  51: GETL       	R12, t34
+	  52: ADDL       	t34, t32
+	  53: GETL       	R9, t36
+	  54: STL       	t36, (t32)
+	  55: INCEIPL       	$4
+
+	0x25473BE0:  8129000C  lwz r9,12(r9)
+	  56: GETL       	R9, t38
+	  57: ADDL       	$0xC, t38
+	  58: LDL       	(t38), t40
+	  59: PUTL       	t40, R9
+	  60: INCEIPL       	$4
+
+	0x25473BE4:  2F890000  cmpi cr7,r9,0
+	  61: GETL       	R9, t42
+	  62: CMP0L       	t42, t44  (-rSo)
+	  63: ICRFL       	t44, $0x7, CR
+	  64: INCEIPL       	$4
+
+	0x25473BE8:  409EFFEC  bc 4,30,0x25473BD4
+	  65: Jc30o       	$0x25473BD4
+
+
+
+. 614 25473BAC 64
+. 57 36 10 3A 83 A1 00 00 38 D6 00 1E 39 60 00 00 54 C0 00 36 7F 40 00 D0 7F A1 D1 6E 3B 61 00 17 81 2E 01 C4 57 7A 00 36 55 6C 10 3A 39 6B 00 01 7D 2C D1 2E 81 29 00 0C 2F 89 00 00 40 9E FF EC
+==== BB 615 (0x25473BEC) approx BBs exec'd 0 ====
+
+	0x25473BEC:  4BFFEBD4  b 0x254727C0
+	   0: JMPo       	$0x254727C0  ($4)
+
+
+
+. 615 25473BEC 4
+. 4B FF EB D4
+==== BB 616 (0x254727C0) approx BBs exec'd 0 ====
+
+	0x254727C0:  817F0030  lwz r11,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254727C4:  38E00000  li r7,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x254727C8:  7F44D378  or r4,r26,r26
+	   8: GETL       	R26, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x254727CC:  7F25CB78  or r5,r25,r25
+	  11: GETL       	R25, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x254727D0:  69660003  xori r6,r11,0x3
+	  14: GETL       	R11, t10
+	  15: XORL       	$0x3, t10
+	  16: PUTL       	t10, R6
+	  17: INCEIPL       	$4
+
+	0x254727D4:  20060000  subfic r0,r6,0
+	  18: GETL       	R6, t12
+	  19: MOVL       	$0x0, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R0
+	  22: INCEIPL       	$4
+
+	0x254727D8:  7CC03114  adde r6,r0,r6
+	  23: GETL       	R0, t16
+	  24: GETL       	R6, t18
+	  25: ADCL       	t16, t18  (-rCa-wCa)
+	  26: PUTL       	t18, R6
+	  27: INCEIPL       	$4
+
+	0x254727DC:  7E83A378  or r3,r20,r20
+	  28: GETL       	R20, t20
+	  29: PUTL       	t20, R3
+	  30: INCEIPL       	$4
+
+	0x254727E0:  480079E5  bl 0x2547A1C4
+	  31: MOVL       	$0x254727E4, t22
+	  32: PUTL       	t22, LR
+	  33: JMPo-c       	$0x2547A1C4  ($4)
+
+
+
+. 616 254727C0 36
+. 81 7F 00 30 38 E0 00 00 7F 44 D3 78 7F 25 CB 78 69 66 00 03 20 06 00 00 7C C0 31 14 7E 83 A3 78 48 00 79 E5
+==== BB 617 _dl_map_object_deps(0x2547A1C4) approx BBs exec'd 0 ====
+
+	0x2547A1C4:  9421FF60  stwu r1,-160(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF60, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547A1C8:  7D2802A6  mflr r9
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547A1CC:  91E1005C  stw r15,92(r1)
+	   9: GETL       	R15, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x5C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547A1D0:  1DE5000C  mulli r15,r5,12
+	  14: GETL       	R5, t10
+	  15: MULL       	$0xC, t10
+	  16: PUTL       	t10, R15
+	  17: INCEIPL       	$4
+
+	0x2547A1D4:  91C10058  stw r14,88(r1)
+	  18: GETL       	R14, t12
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x58, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x2547A1D8:  4801CE29  bl 0x25497000
+	  23: MOVL       	$0x2547A1DC, t16
+	  24: PUTL       	t16, LR
+	  25: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 617 2547A1C4 24
+. 94 21 FF 60 7D 28 02 A6 91 E1 00 5C 1D E5 00 0C 91 C1 00 58 48 01 CE 29
+==== BB 618 (0x2547A1DC) approx BBs exec'd 0 ====
+
+	0x2547A1DC:  39CF0036  addi r14,r15,54
+	   0: GETL       	R15, t0
+	   1: ADDL       	$0x36, t0
+	   2: PUTL       	t0, R14
+	   3: INCEIPL       	$4
+
+	0x2547A1E0:  81410000  lwz r10,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x2547A1E4:  55CC0036  rlwinm r12,r14,0,0,27
+	   8: GETL       	R14, t6
+	   9: ANDL       	$0xFFFFFFF0, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x2547A1E8:  93A10094  stw r29,148(r1)
+	  12: GETL       	R29, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x94, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x2547A1EC:  7D0C00D0  neg r8,r12
+	  17: GETL       	R12, t12
+	  18: NEGL       	t12
+	  19: PUTL       	t12, R8
+	  20: INCEIPL       	$4
+
+	0x2547A1F0:  3BA00000  li r29,0
+	  21: MOVL       	$0x0, t14
+	  22: PUTL       	t14, R29
+	  23: INCEIPL       	$4
+
+	0x2547A1F4:  912100A4  stw r9,164(r1)
+	  24: GETL       	R9, t16
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0xA4, t18
+	  27: STL       	t16, (t18)
+	  28: INCEIPL       	$4
+
+	0x2547A1F8:  7F9D2840  cmpl cr7,r29,r5
+	  29: GETL       	R29, t20
+	  30: GETL       	R5, t22
+	  31: CMPUL       	t20, t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0x2547A1FC:  92E1007C  stw r23,124(r1)
+	  34: GETL       	R23, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x7C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x2547A200:  7C771B78  or r23,r3,r3
+	  39: GETL       	R3, t30
+	  40: PUTL       	t30, R23
+	  41: INCEIPL       	$4
+
+	0x2547A204:  93C10098  stw r30,152(r1)
+	  42: GETL       	R30, t32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x98, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0x2547A208:  38000000  li r0,0
+	  47: MOVL       	$0x0, t36
+	  48: PUTL       	t36, R0
+	  49: INCEIPL       	$4
+
+	0x2547A20C:  93E1009C  stw r31,156(r1)
+	  50: GETL       	R31, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x9C, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0x2547A210:  7C3F0B78  or r31,r1,r1
+	  55: GETL       	R1, t42
+	  56: PUTL       	t42, R31
+	  57: INCEIPL       	$4
+
+	0x2547A214:  92010060  stw r16,96(r1)
+	  58: GETL       	R16, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x60, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547A218:  7FC802A6  mflr r30
+	  63: GETL       	LR, t48
+	  64: PUTL       	t48, R30
+	  65: INCEIPL       	$4
+
+	0x2547A21C:  92210064  stw r17,100(r1)
+	  66: GETL       	R17, t50
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x64, t52
+	  69: STL       	t50, (t52)
+	  70: INCEIPL       	$4
+
+	0x2547A220:  7CCF3378  or r15,r6,r6
+	  71: GETL       	R6, t54
+	  72: PUTL       	t54, R15
+	  73: INCEIPL       	$4
+
+	0x2547A224:  92410068  stw r18,104(r1)
+	  74: GETL       	R18, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x68, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0x2547A228:  7CEE3B78  or r14,r7,r7
+	  79: GETL       	R7, t60
+	  80: PUTL       	t60, R14
+	  81: INCEIPL       	$4
+
+	0x2547A22C:  9261006C  stw r19,108(r1)
+	  82: GETL       	R19, t62
+	  83: GETL       	R1, t64
+	  84: ADDL       	$0x6C, t64
+	  85: STL       	t62, (t64)
+	  86: INCEIPL       	$4
+
+	0x2547A230:  92810070  stw r20,112(r1)
+	  87: GETL       	R20, t66
+	  88: GETL       	R1, t68
+	  89: ADDL       	$0x70, t68
+	  90: STL       	t66, (t68)
+	  91: INCEIPL       	$4
+
+	0x2547A234:  92A10074  stw r21,116(r1)
+	  92: GETL       	R21, t70
+	  93: GETL       	R1, t72
+	  94: ADDL       	$0x74, t72
+	  95: STL       	t70, (t72)
+	  96: INCEIPL       	$4
+
+	0x2547A238:  92C10078  stw r22,120(r1)
+	  97: GETL       	R22, t74
+	  98: GETL       	R1, t76
+	  99: ADDL       	$0x78, t76
+	 100: STL       	t74, (t76)
+	 101: INCEIPL       	$4
+
+	0x2547A23C:  93010080  stw r24,128(r1)
+	 102: GETL       	R24, t78
+	 103: GETL       	R1, t80
+	 104: ADDL       	$0x80, t80
+	 105: STL       	t78, (t80)
+	 106: INCEIPL       	$4
+
+	0x2547A240:  93210084  stw r25,132(r1)
+	 107: GETL       	R25, t82
+	 108: GETL       	R1, t84
+	 109: ADDL       	$0x84, t84
+	 110: STL       	t82, (t84)
+	 111: INCEIPL       	$4
+
+	0x2547A244:  93410088  stw r26,136(r1)
+	 112: GETL       	R26, t86
+	 113: GETL       	R1, t88
+	 114: ADDL       	$0x88, t88
+	 115: STL       	t86, (t88)
+	 116: INCEIPL       	$4
+
+	0x2547A248:  9361008C  stw r27,140(r1)
+	 117: GETL       	R27, t90
+	 118: GETL       	R1, t92
+	 119: ADDL       	$0x8C, t92
+	 120: STL       	t90, (t92)
+	 121: INCEIPL       	$4
+
+	0x2547A24C:  93810090  stw r28,144(r1)
+	 122: GETL       	R28, t94
+	 123: GETL       	R1, t96
+	 124: ADDL       	$0x90, t96
+	 125: STL       	t94, (t96)
+	 126: INCEIPL       	$4
+
+	0x2547A250:  7D41416E  stwux r10,r1,r8
+	 127: GETL       	R8, t98
+	 128: GETL       	R1, t100
+	 129: ADDL       	t100, t98
+	 130: PUTL       	t98, R1
+	 131: GETL       	R10, t102
+	 132: STL       	t102, (t98)
+	 133: INCEIPL       	$4
+
+	0x2547A254:  39000001  li r8,1
+	 134: MOVL       	$0x1, t104
+	 135: PUTL       	t104, R8
+	 136: INCEIPL       	$4
+
+	0x2547A258:  81230180  lwz r9,384(r3)
+	 137: GETL       	R3, t106
+	 138: ADDL       	$0x180, t106
+	 139: LDL       	(t106), t108
+	 140: PUTL       	t108, R9
+	 141: INCEIPL       	$4
+
+	0x2547A25C:  38610017  addi r3,r1,23
+	 142: GETL       	R1, t110
+	 143: ADDL       	$0x17, t110
+	 144: PUTL       	t110, R3
+	 145: INCEIPL       	$4
+
+	0x2547A260:  546A0036  rlwinm r10,r3,0,0,27
+	 146: GETL       	R3, t112
+	 147: ANDL       	$0xFFFFFFF0, t112
+	 148: PUTL       	t112, R10
+	 149: INCEIPL       	$4
+
+	0x2547A264:  5109C94C  rlwimi r9,r8,25,5,6
+	 150: GETL       	R9, t114
+	 151: GETL       	R8, t116
+	 152: ROLL       	$0x19, t116
+	 153: ANDL       	$0x6000000, t116
+	 154: ANDL       	$0xF9FFFFFF, t114
+	 155: ORL       	t114, t116
+	 156: PUTL       	t116, R9
+	 157: INCEIPL       	$4
+
+	0x2547A268:  396A000C  addi r11,r10,12
+	 158: GETL       	R10, t118
+	 159: ADDL       	$0xC, t118
+	 160: PUTL       	t118, R11
+	 161: INCEIPL       	$4
+
+	0x2547A26C:  900A0000  stw r0,0(r10)
+	 162: GETL       	R0, t120
+	 163: GETL       	R10, t122
+	 164: STL       	t120, (t122)
+	 165: INCEIPL       	$4
+
+	0x2547A270:  916A0008  stw r11,8(r10)
+	 166: GETL       	R11, t124
+	 167: GETL       	R10, t126
+	 168: ADDL       	$0x8, t126
+	 169: STL       	t124, (t126)
+	 170: INCEIPL       	$4
+
+	0x2547A274:  91370180  stw r9,384(r23)
+	 171: GETL       	R9, t128
+	 172: GETL       	R23, t130
+	 173: ADDL       	$0x180, t130
+	 174: STL       	t128, (t130)
+	 175: INCEIPL       	$4
+
+	0x2547A278:  915F0008  stw r10,8(r31)
+	 176: GETL       	R10, t132
+	 177: GETL       	R31, t134
+	 178: ADDL       	$0x8, t134
+	 179: STL       	t132, (t134)
+	 180: INCEIPL       	$4
+
+	0x2547A27C:  92EA0004  stw r23,4(r10)
+	 181: GETL       	R23, t136
+	 182: GETL       	R10, t138
+	 183: ADDL       	$0x4, t138
+	 184: STL       	t136, (t138)
+	 185: INCEIPL       	$4
+
+	0x2547A280:  911F000C  stw r8,12(r31)
+	 186: GETL       	R8, t140
+	 187: GETL       	R31, t142
+	 188: ADDL       	$0xC, t142
+	 189: STL       	t140, (t142)
+	 190: INCEIPL       	$4
+
+	0x2547A284:  409C005C  bc 4,28,0x2547A2E0
+	 191: Jc28o       	$0x2547A2E0
+
+
+
+. 618 2547A1DC 172
+. 39 CF 00 36 81 41 00 00 55 CC 00 36 93 A1 00 94 7D 0C 00 D0 3B A0 00 00 91 21 00 A4 7F 9D 28 40 92 E1 00 7C 7C 77 1B 78 93 C1 00 98 38 00 00 00 93 E1 00 9C 7C 3F 0B 78 92 01 00 60 7F C8 02 A6 92 21 00 64 7C CF 33 78 92 41 00 68 7C EE 3B 78 92 61 00 6C 92 81 00 70 92 A1 00 74 92 C1 00 78 93 01 00 80 93 21 00 84 93 41 00 88 93 61 00 8C 93 81 00 90 7D 41 41 6E 39 00 00 01 81 23 01 80 38 61 00 17 54 6A 00 36 51 09 C9 4C 39 6A 00 0C 90 0A 00 00 91 6A 00 08 91 37 01 80 91 5F 00 08 92 EA 00 04 91 1F 00 0C 40 9C 00 5C
+==== BB 619 (0x2547A288) approx BBs exec'd 0 ====
+
+	0x2547A288:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x2547A28C:  7D475378  or r7,r10,r10
+	   3: GETL       	R10, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x2547A290:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A294:  38C00001  li r6,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x2547A298:  48000008  b 0x2547A2A0
+	  12: JMPo       	$0x2547A2A0  ($4)
+
+
+
+. 619 2547A288 20
+. 7C A9 03 A6 7D 47 53 78 38 A0 00 00 38 C0 00 01 48 00 00 08
+==== BB 620 (0x2547A2A0) approx BBs exec'd 0 ====
+
+	0x2547A2A0:  1E68000C  mulli r19,r8,12
+	   0: GETL       	R8, t0
+	   1: MULL       	$0xC, t0
+	   2: PUTL       	t0, R19
+	   3: INCEIPL       	$4
+
+	0x2547A2A4:  57B5103A  rlwinm r21,r29,2,0,29
+	   4: GETL       	R29, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R21
+	   7: INCEIPL       	$4
+
+	0x2547A2A8:  7E15202E  lwzx r16,r21,r4
+	   8: GETL       	R4, t4
+	   9: GETL       	R21, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R16
+	  13: INCEIPL       	$4
+
+	0x2547A2AC:  39280001  addi r9,r8,1
+	  14: GETL       	R8, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x2547A2B0:  7E533A14  add r18,r19,r7
+	  18: GETL       	R19, t12
+	  19: GETL       	R7, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R18
+	  22: INCEIPL       	$4
+
+	0x2547A2B4:  7CB3392E  stwx r5,r19,r7
+	  23: GETL       	R7, t16
+	  24: GETL       	R19, t18
+	  25: ADDL       	t18, t16
+	  26: GETL       	R5, t20
+	  27: STL       	t20, (t16)
+	  28: INCEIPL       	$4
+
+	0x2547A2B8:  3A92000C  addi r20,r18,12
+	  29: GETL       	R18, t22
+	  30: ADDL       	$0xC, t22
+	  31: PUTL       	t22, R20
+	  32: INCEIPL       	$4
+
+	0x2547A2BC:  92120004  stw r16,4(r18)
+	  33: GETL       	R16, t24
+	  34: GETL       	R18, t26
+	  35: ADDL       	$0x4, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x2547A2C0:  92920008  stw r20,8(r18)
+	  38: GETL       	R20, t28
+	  39: GETL       	R18, t30
+	  40: ADDL       	$0x8, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0x2547A2C4:  3BBD0001  addi r29,r29,1
+	  43: GETL       	R29, t32
+	  44: ADDL       	$0x1, t32
+	  45: PUTL       	t32, R29
+	  46: INCEIPL       	$4
+
+	0x2547A2C8:  82300180  lwz r17,384(r16)
+	  47: GETL       	R16, t34
+	  48: ADDL       	$0x180, t34
+	  49: LDL       	(t34), t36
+	  50: PUTL       	t36, R17
+	  51: INCEIPL       	$4
+
+	0x2547A2CC:  913F000C  stw r9,12(r31)
+	  52: GETL       	R9, t38
+	  53: GETL       	R31, t40
+	  54: ADDL       	$0xC, t40
+	  55: STL       	t38, (t40)
+	  56: INCEIPL       	$4
+
+	0x2547A2D0:  50D1C94C  rlwimi r17,r6,25,5,6
+	  57: GETL       	R17, t42
+	  58: GETL       	R6, t44
+	  59: ROLL       	$0x19, t44
+	  60: ANDL       	$0x6000000, t44
+	  61: ANDL       	$0xF9FFFFFF, t42
+	  62: ORL       	t42, t44
+	  63: PUTL       	t44, R17
+	  64: INCEIPL       	$4
+
+	0x2547A2D4:  92300180  stw r17,384(r16)
+	  65: GETL       	R17, t46
+	  66: GETL       	R16, t48
+	  67: ADDL       	$0x180, t48
+	  68: STL       	t46, (t48)
+	  69: INCEIPL       	$4
+
+	0x2547A2D8:  4200FFC4  bc 16,0,0x2547A29C
+	  70: GETL       	CTR, t50
+	  71: ADDL       	$0xFFFFFFFF, t50
+	  72: PUTL       	t50, CTR
+	  73: JIFZL       	t50, $0x2547A2DC
+	  74: JMPo       	$0x2547A29C  ($4)
+
+
+
+. 620 2547A2A0 60
+. 1E 68 00 0C 57 B5 10 3A 7E 15 20 2E 39 28 00 01 7E 53 3A 14 7C B3 39 2E 3A 92 00 0C 92 12 00 04 92 92 00 08 3B BD 00 01 82 30 01 80 91 3F 00 0C 50 D1 C9 4C 92 30 01 80 42 00 FF C4
+==== BB 621 (0x2547A2DC) approx BBs exec'd 0 ====
+
+	0x2547A2DC:  7D284B78  or r8,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x2547A2E0:  831F0008  lwz r24,8(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R24
+	   7: INCEIPL       	$4
+
+	0x2547A2E4:  1EC8000C  mulli r22,r8,12
+	   8: GETL       	R8, t6
+	   9: MULL       	$0xC, t6
+	  10: PUTL       	t6, R22
+	  11: INCEIPL       	$4
+
+	0x2547A2E8:  821E0514  lwz r16,1300(r30)
+	  12: GETL       	R30, t8
+	  13: ADDL       	$0x514, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R16
+	  16: INCEIPL       	$4
+
+	0x2547A2EC:  3A200000  li r17,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R17
+	  19: INCEIPL       	$4
+
+	0x2547A2F0:  7F1BC379  or. r27,r24,r24
+	  20: GETL       	R24, t14
+	  21: PUTL       	t14, R27
+	  22: CMP0L       	t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0x2547A2F4:  7CF6C214  add r7,r22,r24
+	  25: GETL       	R22, t18
+	  26: GETL       	R24, t20
+	  27: ADDL       	t18, t20
+	  28: PUTL       	t20, R7
+	  29: INCEIPL       	$4
+
+	0x2547A2F8:  80900000  lwz r4,0(r16)
+	  30: GETL       	R16, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R4
+	  33: INCEIPL       	$4
+
+	0x2547A2FC:  38A7FFF4  addi r5,r7,-12
+	  34: GETL       	R7, t26
+	  35: ADDL       	$0xFFFFFFF4, t26
+	  36: PUTL       	t26, R5
+	  37: INCEIPL       	$4
+
+	0x2547A300:  38C00000  li r6,0
+	  38: MOVL       	$0x0, t28
+	  39: PUTL       	t28, R6
+	  40: INCEIPL       	$4
+
+	0x2547A304:  7CB32B78  or r19,r5,r5
+	  41: GETL       	R5, t30
+	  42: PUTL       	t30, R19
+	  43: INCEIPL       	$4
+
+	0x2547A308:  90C50008  stw r6,8(r5)
+	  44: GETL       	R6, t32
+	  45: GETL       	R5, t34
+	  46: ADDL       	$0x8, t34
+	  47: STL       	t32, (t34)
+	  48: INCEIPL       	$4
+
+	0x2547A30C:  923F0034  stw r17,52(r31)
+	  49: GETL       	R17, t36
+	  50: GETL       	R31, t38
+	  51: ADDL       	$0x34, t38
+	  52: STL       	t36, (t38)
+	  53: INCEIPL       	$4
+
+	0x2547A310:  909F0038  stw r4,56(r31)
+	  54: GETL       	R4, t40
+	  55: GETL       	R31, t42
+	  56: ADDL       	$0x38, t42
+	  57: STL       	t40, (t42)
+	  58: INCEIPL       	$4
+
+	0x2547A314:  92300000  stw r17,0(r16)
+	  59: GETL       	R17, t44
+	  60: GETL       	R16, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547A318:  418202AC  bc 12,2,0x2547A5C4
+	  63: Js02o       	$0x2547A5C4
+
+
+
+. 621 2547A2DC 64
+. 7D 28 4B 78 83 1F 00 08 1E C8 00 0C 82 1E 05 14 3A 20 00 00 7F 1B C3 79 7C F6 C2 14 80 90 00 00 38 A7 FF F4 38 C0 00 00 7C B3 2B 78 90 C5 00 08 92 3F 00 34 90 9F 00 38 92 30 00 00 41 82 02 AC
+==== BB 622 (0x2547A31C) approx BBs exec'd 0 ====
+
+	0x2547A31C:  835B0004  lwz r26,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547A320:  3B800001  li r28,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547A324:  939B0000  stw r28,0(r27)
+	   8: GETL       	R28, t6
+	   9: GETL       	R27, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x2547A328:  3B000000  li r24,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0x2547A32C:  833A0158  lwz r25,344(r26)
+	  15: GETL       	R26, t12
+	  16: ADDL       	$0x158, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R25
+	  19: INCEIPL       	$4
+
+	0x2547A330:  3AC00000  li r22,0
+	  20: MOVL       	$0x0, t16
+	  21: PUTL       	t16, R22
+	  22: INCEIPL       	$4
+
+	0x2547A334:  2C990000  cmpi cr1,r25,0
+	  23: GETL       	R25, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x1, CR
+	  26: INCEIPL       	$4
+
+	0x2547A338:  40860030  bc 4,6,0x2547A368
+	  27: Jc06o       	$0x2547A368
+
+
+
+. 622 2547A31C 32
+. 83 5B 00 04 3B 80 00 01 93 9B 00 00 3B 00 00 00 83 3A 01 58 3A C0 00 00 2C 99 00 00 40 86 00 30
+==== BB 623 (0x2547A33C) approx BBs exec'd 0 ====
+
+	0x2547A33C:  807A01E8  lwz r3,488(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547A340:  7F4BBA78  xor r11,r26,r23
+	   5: GETL       	R26, t4
+	   6: GETL       	R23, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x2547A344:  314BFFFF  addic r10,r11,-1
+	  10: GETL       	R11, t8
+	  11: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547A348:  7FAA5910  subfe r29,r10,r11
+	  14: GETL       	R10, t10
+	  15: GETL       	R11, t12
+	  16: SBBL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x2547A34C:  21230000  subfic r9,r3,0
+	  19: GETL       	R3, t14
+	  20: MOVL       	$0x0, t16
+	  21: SBBL       	t14, t16  (-wCa)
+	  22: PUTL       	t16, R9
+	  23: INCEIPL       	$4
+
+	0x2547A350:  7C091914  adde r0,r9,r3
+	  24: GETL       	R9, t18
+	  25: GETL       	R3, t20
+	  26: ADCL       	t18, t20  (-rCa-wCa)
+	  27: PUTL       	t20, R0
+	  28: INCEIPL       	$4
+
+	0x2547A354:  7C0BE839  and. r11,r0,r29
+	  29: GETL       	R0, t22
+	  30: GETL       	R29, t24
+	  31: ANDL       	t22, t24
+	  32: PUTL       	t24, R11
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x0, CR
+	  35: INCEIPL       	$4
+
+	0x2547A358:  41820010  bc 12,2,0x2547A368
+	  36: Js02o       	$0x2547A368
+
+
+
+. 623 2547A33C 32
+. 80 7A 01 E8 7F 4B BA 78 31 4B FF FF 7F AA 59 10 21 23 00 00 7C 09 19 14 7C 0B E8 39 41 82 00 10
+==== BB 624 (0x2547A368) approx BBs exec'd 0 ====
+
+	0x2547A368:  82BA0024  lwz r21,36(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547A36C:  2F950000  cmpi cr7,r21,0
+	   5: GETL       	R21, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547A370:  409E001C  bc 4,30,0x2547A38C
+	   9: Jc30o       	$0x2547A38C
+
+
+
+. 624 2547A368 12
+. 82 BA 00 24 2F 95 00 00 40 9E 00 1C
+==== BB 625 (0x2547A38C) approx BBs exec'd 0 ====
+
+	0x2547A38C:  80DA0034  lwz r6,52(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547A390:  7F72DB78  or r18,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R18
+	   7: INCEIPL       	$4
+
+	0x2547A394:  839A0008  lwz r28,8(r26)
+	   8: GETL       	R26, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x2547A398:  82A60004  lwz r21,4(r6)
+	  13: GETL       	R6, t10
+	  14: ADDL       	$0x4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R21
+	  17: INCEIPL       	$4
+
+	0x2547A39C:  91FF0014  stw r15,20(r31)
+	  18: GETL       	R15, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547A3A0:  91DF0018  stw r14,24(r31)
+	  23: GETL       	R14, t18
+	  24: GETL       	R31, t20
+	  25: ADDL       	$0x18, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x2547A3A4:  813C0000  lwz r9,0(r28)
+	  28: GETL       	R28, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R9
+	  31: INCEIPL       	$4
+
+	0x2547A3A8:  92BF001C  stw r21,28(r31)
+	  32: GETL       	R21, t26
+	  33: GETL       	R31, t28
+	  34: ADDL       	$0x1C, t28
+	  35: STL       	t26, (t28)
+	  36: INCEIPL       	$4
+
+	0x2547A3AC:  2F090000  cmpi cr6,r9,0
+	  37: GETL       	R9, t30
+	  38: CMP0L       	t30, t32  (-rSo)
+	  39: ICRFL       	t32, $0x6, CR
+	  40: INCEIPL       	$4
+
+	0x2547A3B0:  935F0010  stw r26,16(r31)
+	  41: GETL       	R26, t34
+	  42: GETL       	R31, t36
+	  43: ADDL       	$0x10, t36
+	  44: STL       	t34, (t36)
+	  45: INCEIPL       	$4
+
+	0x2547A3B4:  419A0504  bc 12,26,0x2547A8B8
+	  46: Js26o       	$0x2547A8B8
+
+
+
+. 625 2547A38C 44
+. 80 DA 00 34 7F 72 DB 78 83 9A 00 08 82 A6 00 04 91 FF 00 14 91 DF 00 18 81 3C 00 00 92 BF 00 1C 2F 09 00 00 93 5F 00 10 41 9A 05 04
+==== BB 626 (0x2547A3B8) approx BBs exec'd 0 ====
+
+	0x2547A3B8:  3F207FFF  lis r25,32767
+	   0: MOVL       	$0x7FFF0000, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x2547A3BC:  6327FFFD  ori r7,r25,0xFFFD
+	   3: MOVL       	$0x7FFFFFFD, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x2547A3C0:  90FF0040  stw r7,64(r31)
+	   6: GETL       	R7, t4
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x40, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x2547A3C4:  480000BC  b 0x2547A480
+	  11: JMPo       	$0x2547A480  ($4)
+
+
+
+. 626 2547A3B8 16
+. 3F 20 7F FF 63 27 FF FD 90 FF 00 40 48 00 00 BC
+==== BB 627 (0x2547A480) approx BBs exec'd 0 ====
+
+	0x2547A480:  2F890001  cmpi cr7,r9,1
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2547A484:  419EFF44  bc 12,30,0x2547A3C8
+	   5: Js30o       	$0x2547A3C8
+
+
+
+. 627 2547A480 8
+. 2F 89 00 01 41 9E FF 44
+==== BB 628 (0x2547A3C8) approx BBs exec'd 0 ====
+
+	0x2547A3C8:  801C0004  lwz r0,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547A3CC:  38800024  li r4,36
+	   5: MOVL       	$0x24, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547A3D0:  7FA0AA14  add r29,r0,r21
+	   8: GETL       	R0, t6
+	   9: GETL       	R21, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547A3D4:  7FA3EB78  or r3,r29,r29
+	  13: GETL       	R29, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x2547A3D8:  7FB9EB78  or r25,r29,r29
+	  16: GETL       	R29, t12
+	  17: PUTL       	t12, R25
+	  18: INCEIPL       	$4
+
+	0x2547A3DC:  48008A25  bl 0x25482E00
+	  19: MOVL       	$0x2547A3E0, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25482E00  ($4)
+
+
+
+. 628 2547A3C8 24
+. 80 1C 00 04 38 80 00 24 7F A0 AA 14 7F A3 EB 78 7F B9 EB 78 48 00 8A 25
+==== BB 629 (0x2547A3E0) approx BBs exec'd 0 ====
+
+	0x2547A3E0:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A3E4:  40860588  bc 4,6,0x2547A96C
+	   4: Jc06o       	$0x2547A96C
+
+
+
+. 629 2547A3E0 8
+. 2C 83 00 00 40 86 05 88
+==== BB 630 (0x2547A3E8) approx BBs exec'd 0 ====
+
+	0x2547A3E8:  80BE02F8  lwz r5,760(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x2F8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547A3EC:  387F0030  addi r3,r31,48
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x30, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x2547A3F0:  389F0034  addi r4,r31,52
+	   9: GETL       	R31, t6
+	  10: ADDL       	$0x34, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0x2547A3F4:  38DF0010  addi r6,r31,16
+	  13: GETL       	R31, t8
+	  14: ADDL       	$0x10, t8
+	  15: PUTL       	t8, R6
+	  16: INCEIPL       	$4
+
+	0x2547A3F8:  93BF0020  stw r29,32(r31)
+	  17: GETL       	R29, t10
+	  18: GETL       	R31, t12
+	  19: ADDL       	$0x20, t12
+	  20: STL       	t10, (t12)
+	  21: INCEIPL       	$4
+
+	0x2547A3FC:  480011A5  bl 0x2547B5A0
+	  22: MOVL       	$0x2547A400, t14
+	  23: PUTL       	t14, LR
+	  24: JMPo-c       	$0x2547B5A0  ($4)
+
+
+
+. 630 2547A3E8 24
+. 80 BE 02 F8 38 7F 00 30 38 9F 00 34 38 DF 00 10 93 BF 00 20 48 00 11 A5
+==== BB 631 _dl_catch_error(0x2547B5A0) approx BBs exec'd 0 ====
+
+	0x2547B5A0:  9421FD60  stwu r1,-672(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFD60, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547B5A4:  7D4802A6  mflr r10
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547B5A8:  4801BA59  bl 0x25497000
+	   9: MOVL       	$0x2547B5AC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 631 2547B5A0 12
+. 94 21 FD 60 7D 48 02 A6 48 01 BA 59
+==== BB 632 (0x2547B5AC) approx BBs exec'd 0 ====
+
+	0x2547B5AC:  93C10298  stw r30,664(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x298, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B5B0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547B5B4:  39000000  li r8,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0x2547B5B8:  914102A4  stw r10,676(r1)
+	  11: GETL       	R10, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x2A4, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547B5BC:  93E1029C  stw r31,668(r1)
+	  16: GETL       	R31, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x29C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547B5C0:  80FE04C8  lwz r7,1224(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x4C8, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R7
+	  25: INCEIPL       	$4
+
+	0x2547B5C4:  91010014  stw r8,20(r1)
+	  26: GETL       	R8, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x14, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x2547B5C8:  812701B4  lwz r9,436(r7)
+	  31: GETL       	R7, t24
+	  32: ADDL       	$0x1B4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R9
+	  35: INCEIPL       	$4
+
+	0x2547B5CC:  90810274  stw r4,628(r1)
+	  36: GETL       	R4, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x274, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0x2547B5D0:  7D2903A6  mtctr r9
+	  41: GETL       	R9, t32
+	  42: PUTL       	t32, CTR
+	  43: INCEIPL       	$4
+
+	0x2547B5D4:  90A10278  stw r5,632(r1)
+	  44: GETL       	R5, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x278, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x2547B5D8:  90C1027C  stw r6,636(r1)
+	  49: GETL       	R6, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x27C, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0x2547B5DC:  90610270  stw r3,624(r1)
+	  54: GETL       	R3, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x270, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x2547B5E0:  4E800421  bctrl
+	  59: MOVL       	$0x2547B5E4, t46
+	  60: PUTL       	t46, LR
+	  61: GETL       	CTR, t48
+	  62: JMPo-c       	t48  ($4)
+
+
+
+. 632 2547B5AC 56
+. 93 C1 02 98 7F C8 02 A6 39 00 00 00 91 41 02 A4 93 E1 02 9C 80 FE 04 C8 91 01 00 14 81 27 01 B4 90 81 02 74 7D 29 03 A6 90 A1 02 78 90 C1 02 7C 90 61 02 70 4E 80 04 21
+==== BB 633 _dl_initial_error_catch_tsd(0x2547185C) approx BBs exec'd 0 ====
+
+	0x2547185C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25471860:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25471864:  4802579D  bl 0x25497000
+	   9: MOVL       	$0x25471868, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 633 2547185C 12
+. 94 21 FF F0 7D 88 02 A6 48 02 57 9D
+==== BB 634 (0x25471868) approx BBs exec'd 0 ====
+
+	0x25471868:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547186C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25471870:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x25471874:  807E0018  lwz r3,24(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x18, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x25471878:  83C10008  lwz r30,8(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x8, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0x2547187C:  38210010  addi r1,r1,16
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: PUTL       	t16, R1
+	  24: INCEIPL       	$4
+
+	0x25471880:  4E800020  blr
+	  25: GETL       	LR, t18
+	  26: JMPo-r       	t18  ($4)
+
+
+
+. 634 25471868 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 7E 00 18 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 635 (0x2547B5E4) approx BBs exec'd 0 ====
+
+	0x2547B5E4:  80030000  lwz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547B5E8:  90610280  stw r3,640(r1)
+	   4: GETL       	R3, t4
+	   5: GETL       	R1, t6
+	   6: ADDL       	$0x280, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547B5EC:  38610020  addi r3,r1,32
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x2547B5F0:  90010008  stw r0,8(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x8, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547B5F4:  480072BD  bl 0x254828B0
+	  18: MOVL       	$0x2547B5F8, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0x254828B0  ($4)
+
+
+
+. 635 2547B5E4 20
+. 80 03 00 00 90 61 02 80 38 61 00 20 90 01 00 08 48 00 72 BD
+==== BB 636 _setjmp(0x254828B0) approx BBs exec'd 0 ====
+
+	0x254828B0:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254828B4:  480001DC  b 0x25482A90
+	   3: JMPo       	$0x25482A90  ($4)
+
+
+
+. 636 254828B0 8
+. 38 80 00 00 48 00 01 DC
+==== BB 637 __sigsetjmp(0x25482A90) approx BBs exec'd 0 ====
+
+	0x25482A90:  90230000  stw r1,0(r3)
+	   0: GETL       	R1, t0
+	   1: GETL       	R3, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25482A94:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0x25482A98:  91C3000C  stw r14,12(r3)
+	   7: GETL       	R14, t6
+	   8: GETL       	R3, t8
+	   9: ADDL       	$0xC, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25482A9C:  D9C30058  stfd f14,88(r3)
+	  12: GETL       	R3, t10
+	  13: ADDL       	$0x58, t10
+	  14: FPU_WQ       	0x0:0xE, (t10)
+	  15: INCEIPL       	$4
+
+	0x25482AA0:  90030008  stw r0,8(r3)
+	  16: GETL       	R0, t12
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x8, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x25482AA4:  91E30010  stw r15,16(r3)
+	  21: GETL       	R15, t16
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25482AA8:  D9E30060  stfd f15,96(r3)
+	  26: GETL       	R3, t20
+	  27: ADDL       	$0x60, t20
+	  28: FPU_WQ       	0x0:0xF, (t20)
+	  29: INCEIPL       	$4
+
+	0x25482AAC:  7C000026  mfcr r0
+	  30: GETL       	CR, t22
+	  31: PUTL       	t22, R0
+	  32: INCEIPL       	$4
+
+	0x25482AB0:  92030014  stw r16,20(r3)
+	  33: GETL       	R16, t24
+	  34: GETL       	R3, t26
+	  35: ADDL       	$0x14, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x25482AB4:  DA030068  stfd f16,104(r3)
+	  38: GETL       	R3, t28
+	  39: ADDL       	$0x68, t28
+	  40: FPU_WQ       	0x0:0x10, (t28)
+	  41: INCEIPL       	$4
+
+	0x25482AB8:  90030054  stw r0,84(r3)
+	  42: GETL       	R0, t30
+	  43: GETL       	R3, t32
+	  44: ADDL       	$0x54, t32
+	  45: STL       	t30, (t32)
+	  46: INCEIPL       	$4
+
+	0x25482ABC:  92230018  stw r17,24(r3)
+	  47: GETL       	R17, t34
+	  48: GETL       	R3, t36
+	  49: ADDL       	$0x18, t36
+	  50: STL       	t34, (t36)
+	  51: INCEIPL       	$4
+
+	0x25482AC0:  DA230070  stfd f17,112(r3)
+	  52: GETL       	R3, t38
+	  53: ADDL       	$0x70, t38
+	  54: FPU_WQ       	0x0:0x11, (t38)
+	  55: INCEIPL       	$4
+
+	0x25482AC4:  9243001C  stw r18,28(r3)
+	  56: GETL       	R18, t40
+	  57: GETL       	R3, t42
+	  58: ADDL       	$0x1C, t42
+	  59: STL       	t40, (t42)
+	  60: INCEIPL       	$4
+
+	0x25482AC8:  DA430078  stfd f18,120(r3)
+	  61: GETL       	R3, t44
+	  62: ADDL       	$0x78, t44
+	  63: FPU_WQ       	0x0:0x12, (t44)
+	  64: INCEIPL       	$4
+
+	0x25482ACC:  92630020  stw r19,32(r3)
+	  65: GETL       	R19, t46
+	  66: GETL       	R3, t48
+	  67: ADDL       	$0x20, t48
+	  68: STL       	t46, (t48)
+	  69: INCEIPL       	$4
+
+	0x25482AD0:  DA630080  stfd f19,128(r3)
+	  70: GETL       	R3, t50
+	  71: ADDL       	$0x80, t50
+	  72: FPU_WQ       	0x0:0x13, (t50)
+	  73: INCEIPL       	$4
+
+	0x25482AD4:  92830024  stw r20,36(r3)
+	  74: GETL       	R20, t52
+	  75: GETL       	R3, t54
+	  76: ADDL       	$0x24, t54
+	  77: STL       	t52, (t54)
+	  78: INCEIPL       	$4
+
+	0x25482AD8:  DA830088  stfd f20,136(r3)
+	  79: GETL       	R3, t56
+	  80: ADDL       	$0x88, t56
+	  81: FPU_WQ       	0x0:0x14, (t56)
+	  82: INCEIPL       	$4
+
+	0x25482ADC:  92A30028  stw r21,40(r3)
+	  83: GETL       	R21, t58
+	  84: GETL       	R3, t60
+	  85: ADDL       	$0x28, t60
+	  86: STL       	t58, (t60)
+	  87: INCEIPL       	$4
+
+	0x25482AE0:  DAA30090  stfd f21,144(r3)
+	  88: GETL       	R3, t62
+	  89: ADDL       	$0x90, t62
+	  90: FPU_WQ       	0x0:0x15, (t62)
+	  91: INCEIPL       	$4
+
+	0x25482AE4:  92C3002C  stw r22,44(r3)
+	  92: GETL       	R22, t64
+	  93: GETL       	R3, t66
+	  94: ADDL       	$0x2C, t66
+	  95: STL       	t64, (t66)
+	  96: INCEIPL       	$4
+
+	0x25482AE8:  DAC30098  stfd f22,152(r3)
+	  97: GETL       	R3, t68
+	  98: ADDL       	$0x98, t68
+	  99: FPU_WQ       	0x0:0x16, (t68)
+	 100: INCEIPL       	$4
+
+	0x25482AEC:  92E30030  stw r23,48(r3)
+	 101: GETL       	R23, t70
+	 102: GETL       	R3, t72
+	 103: ADDL       	$0x30, t72
+	 104: STL       	t70, (t72)
+	 105: INCEIPL       	$4
+
+	0x25482AF0:  DAE300A0  stfd f23,160(r3)
+	 106: GETL       	R3, t74
+	 107: ADDL       	$0xA0, t74
+	 108: FPU_WQ       	0x0:0x17, (t74)
+	 109: INCEIPL       	$4
+
+	0x25482AF4:  93030034  stw r24,52(r3)
+	 110: GETL       	R24, t76
+	 111: GETL       	R3, t78
+	 112: ADDL       	$0x34, t78
+	 113: STL       	t76, (t78)
+	 114: INCEIPL       	$4
+
+	0x25482AF8:  DB0300A8  stfd f24,168(r3)
+	 115: GETL       	R3, t80
+	 116: ADDL       	$0xA8, t80
+	 117: FPU_WQ       	0x0:0x18, (t80)
+	 118: INCEIPL       	$4
+
+	0x25482AFC:  93230038  stw r25,56(r3)
+	 119: GETL       	R25, t82
+	 120: GETL       	R3, t84
+	 121: ADDL       	$0x38, t84
+	 122: STL       	t82, (t84)
+	 123: INCEIPL       	$4
+
+	0x25482B00:  DB2300B0  stfd f25,176(r3)
+	 124: GETL       	R3, t86
+	 125: ADDL       	$0xB0, t86
+	 126: FPU_WQ       	0x0:0x19, (t86)
+	 127: INCEIPL       	$4
+
+	0x25482B04:  9343003C  stw r26,60(r3)
+	 128: GETL       	R26, t88
+	 129: GETL       	R3, t90
+	 130: ADDL       	$0x3C, t90
+	 131: STL       	t88, (t90)
+	 132: INCEIPL       	$4
+
+	0x25482B08:  DB4300B8  stfd f26,184(r3)
+	 133: GETL       	R3, t92
+	 134: ADDL       	$0xB8, t92
+	 135: FPU_WQ       	0x0:0x1A, (t92)
+	 136: INCEIPL       	$4
+
+	0x25482B0C:  93630040  stw r27,64(r3)
+	 137: GETL       	R27, t94
+	 138: GETL       	R3, t96
+	 139: ADDL       	$0x40, t96
+	 140: STL       	t94, (t96)
+	 141: INCEIPL       	$4
+
+	0x25482B10:  DB6300C0  stfd f27,192(r3)
+	 142: GETL       	R3, t98
+	 143: ADDL       	$0xC0, t98
+	 144: FPU_WQ       	0x0:0x1B, (t98)
+	 145: INCEIPL       	$4
+
+	0x25482B14:  93830044  stw r28,68(r3)
+	 146: GETL       	R28, t100
+	 147: GETL       	R3, t102
+	 148: ADDL       	$0x44, t102
+	 149: STL       	t100, (t102)
+	 150: INCEIPL       	$4
+
+	0x25482B18:  DB8300C8  stfd f28,200(r3)
+	 151: GETL       	R3, t104
+	 152: ADDL       	$0xC8, t104
+	 153: FPU_WQ       	0x0:0x1C, (t104)
+	 154: INCEIPL       	$4
+
+	0x25482B1C:  93A30048  stw r29,72(r3)
+	 155: GETL       	R29, t106
+	 156: GETL       	R3, t108
+	 157: ADDL       	$0x48, t108
+	 158: STL       	t106, (t108)
+	 159: INCEIPL       	$4
+
+	0x25482B20:  DBA300D0  stfd f29,208(r3)
+	 160: GETL       	R3, t110
+	 161: ADDL       	$0xD0, t110
+	 162: FPU_WQ       	0x0:0x1D, (t110)
+	 163: INCEIPL       	$4
+
+	0x25482B24:  93C3004C  stw r30,76(r3)
+	 164: GETL       	R30, t112
+	 165: GETL       	R3, t114
+	 166: ADDL       	$0x4C, t114
+	 167: STL       	t112, (t114)
+	 168: INCEIPL       	$4
+
+	0x25482B28:  DBC300D8  stfd f30,216(r3)
+	 169: GETL       	R3, t116
+	 170: ADDL       	$0xD8, t116
+	 171: FPU_WQ       	0x0:0x1E, (t116)
+	 172: INCEIPL       	$4
+
+	0x25482B2C:  93E30050  stw r31,80(r3)
+	 173: GETL       	R31, t118
+	 174: GETL       	R3, t120
+	 175: ADDL       	$0x50, t120
+	 176: STL       	t118, (t120)
+	 177: INCEIPL       	$4
+
+	0x25482B30:  DBE300E0  stfd f31,224(r3)
+	 178: GETL       	R3, t122
+	 179: ADDL       	$0xE0, t122
+	 180: FPU_WQ       	0x0:0x1F, (t122)
+	 181: INCEIPL       	$4
+
+	0x25482B34:  7CC802A6  mflr r6
+	 182: GETL       	LR, t124
+	 183: PUTL       	t124, R6
+	 184: INCEIPL       	$4
+
+	0x25482B38:  480144C9  bl 0x25497000
+	 185: MOVL       	$0x25482B3C, t126
+	 186: PUTL       	t126, LR
+	 187: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 637 25482A90 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 01 44 C9
+==== BB 638 (0x25482B3C) approx BBs exec'd 0 ====
+
+	0x25482B3C:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25482B40:  80A504E4  lwz r5,1252(r5)
+	   3: GETL       	R5, t2
+	   4: ADDL       	$0x4E4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25482B44:  7CC803A6  mtlr r6
+	   8: GETL       	R6, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x25482B48:  80A5003C  lwz r5,60(r5)
+	  11: GETL       	R5, t8
+	  12: ADDL       	$0x3C, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25482B4C:  74A51000  andis. r5,r5,0x1000
+	  16: GETL       	R5, t12
+	  17: ANDL       	$0x10000000, t12
+	  18: PUTL       	t12, R5
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0x25482B50:  41820158  bc 12,2,0x25482CA8
+	  22: Js02o       	$0x25482CA8
+
+
+
+. 638 25482B3C 24
+. 7C A8 02 A6 80 A5 04 E4 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+==== BB 639 (0x25482B54) approx BBs exec'd 0 ====
+
+	0x25482B54:  38A30100  addi r5,r3,256
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x100, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25482B58:  70A6000F  andi. r6,r5,0xF
+	   4: GETL       	R5, t2
+	   5: ANDL       	$0xF, t2
+	   6: PUTL       	t2, R6
+	   7: CMP0L       	t2, t4  (-rSo)
+	   8: ICRFL       	t4, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25482B5C:  7C0042A6  	  10: VEC_TRL       	2:0x0, t6
+	  11: PUTL       	t6, R0
+	  12: INCEIPL       	$4
+
+	0x25482B60:  900300F8  stw r0,248(r3)
+	  13: GETL       	R0, t8
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0xF8, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0x25482B64:  38C50010  addi r6,r5,16
+	  18: GETL       	R5, t12
+	  19: ADDL       	$0x10, t12
+	  20: PUTL       	t12, R6
+	  21: INCEIPL       	$4
+
+	0x25482B68:  41A200E8  bc 13,2,0x25482C50
+	  22: Js02o       	$0x25482C50
+
+
+
+. 639 25482B54 24
+. 38 A3 01 00 70 A6 00 0F 7C 00 42 A6 90 03 00 F8 38 C5 00 10 41 A2 00 E8
+==== BB 640 (0x25482C50) approx BBs exec'd 0 ====
+
+	0x25482C50:  7E8029CE  stvx vr20,r0,r5
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0xFFFFFFF0, t0
+	   2: VEC_WQQ       	0x0:0x14, (t0)
+	   3: INCEIPL       	$4
+
+	0x25482C54:  38A50020  addi r5,r5,32
+	   4: GETL       	R5, t2
+	   5: ADDL       	$0x20, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x25482C58:  7EA031CE  stvx vr21,r0,r6
+	   8: GETL       	R6, t4
+	   9: ANDL       	$0xFFFFFFF0, t4
+	  10: VEC_WQQ       	0x0:0x15, (t4)
+	  11: INCEIPL       	$4
+
+	0x25482C5C:  38C60020  addi r6,r6,32
+	  12: GETL       	R6, t6
+	  13: ADDL       	$0x20, t6
+	  14: PUTL       	t6, R6
+	  15: INCEIPL       	$4
+
+	0x25482C60:  7EC029CE  stvx vr22,r0,r5
+	  16: GETL       	R5, t8
+	  17: ANDL       	$0xFFFFFFF0, t8
+	  18: VEC_WQQ       	0x0:0x16, (t8)
+	  19: INCEIPL       	$4
+
+	0x25482C64:  38A50020  addi r5,r5,32
+	  20: GETL       	R5, t10
+	  21: ADDL       	$0x20, t10
+	  22: PUTL       	t10, R5
+	  23: INCEIPL       	$4
+
+	0x25482C68:  7EE031CE  stvx vr23,r0,r6
+	  24: GETL       	R6, t12
+	  25: ANDL       	$0xFFFFFFF0, t12
+	  26: VEC_WQQ       	0x0:0x17, (t12)
+	  27: INCEIPL       	$4
+
+	0x25482C6C:  38C60020  addi r6,r6,32
+	  28: GETL       	R6, t14
+	  29: ADDL       	$0x20, t14
+	  30: PUTL       	t14, R6
+	  31: INCEIPL       	$4
+
+	0x25482C70:  7F0029CE  stvx vr24,r0,r5
+	  32: GETL       	R5, t16
+	  33: ANDL       	$0xFFFFFFF0, t16
+	  34: VEC_WQQ       	0x0:0x18, (t16)
+	  35: INCEIPL       	$4
+
+	0x25482C74:  38A50020  addi r5,r5,32
+	  36: GETL       	R5, t18
+	  37: ADDL       	$0x20, t18
+	  38: PUTL       	t18, R5
+	  39: INCEIPL       	$4
+
+	0x25482C78:  7F2031CE  stvx vr25,r0,r6
+	  40: GETL       	R6, t20
+	  41: ANDL       	$0xFFFFFFF0, t20
+	  42: VEC_WQQ       	0x0:0x19, (t20)
+	  43: INCEIPL       	$4
+
+	0x25482C7C:  38C60020  addi r6,r6,32
+	  44: GETL       	R6, t22
+	  45: ADDL       	$0x20, t22
+	  46: PUTL       	t22, R6
+	  47: INCEIPL       	$4
+
+	0x25482C80:  7F4029CE  stvx vr26,r0,r5
+	  48: GETL       	R5, t24
+	  49: ANDL       	$0xFFFFFFF0, t24
+	  50: VEC_WQQ       	0x0:0x1A, (t24)
+	  51: INCEIPL       	$4
+
+	0x25482C84:  38A50020  addi r5,r5,32
+	  52: GETL       	R5, t26
+	  53: ADDL       	$0x20, t26
+	  54: PUTL       	t26, R5
+	  55: INCEIPL       	$4
+
+	0x25482C88:  7F6031CE  stvx vr27,r0,r6
+	  56: GETL       	R6, t28
+	  57: ANDL       	$0xFFFFFFF0, t28
+	  58: VEC_WQQ       	0x0:0x1B, (t28)
+	  59: INCEIPL       	$4
+
+	0x25482C8C:  38C60020  addi r6,r6,32
+	  60: GETL       	R6, t30
+	  61: ADDL       	$0x20, t30
+	  62: PUTL       	t30, R6
+	  63: INCEIPL       	$4
+
+	0x25482C90:  7F8029CE  stvx vr28,r0,r5
+	  64: GETL       	R5, t32
+	  65: ANDL       	$0xFFFFFFF0, t32
+	  66: VEC_WQQ       	0x0:0x1C, (t32)
+	  67: INCEIPL       	$4
+
+	0x25482C94:  38A50020  addi r5,r5,32
+	  68: GETL       	R5, t34
+	  69: ADDL       	$0x20, t34
+	  70: PUTL       	t34, R5
+	  71: INCEIPL       	$4
+
+	0x25482C98:  7FA031CE  stvx vr29,r0,r6
+	  72: GETL       	R6, t36
+	  73: ANDL       	$0xFFFFFFF0, t36
+	  74: VEC_WQQ       	0x0:0x1D, (t36)
+	  75: INCEIPL       	$4
+
+	0x25482C9C:  38C60020  addi r6,r6,32
+	  76: GETL       	R6, t38
+	  77: ADDL       	$0x20, t38
+	  78: PUTL       	t38, R6
+	  79: INCEIPL       	$4
+
+	0x25482CA0:  7FC029CE  stvx vr30,r0,r5
+	  80: GETL       	R5, t40
+	  81: ANDL       	$0xFFFFFFF0, t40
+	  82: VEC_WQQ       	0x0:0x1E, (t40)
+	  83: INCEIPL       	$4
+
+	0x25482CA4:  7FE031CE  stvx vr31,r0,r6
+	  84: GETL       	R6, t42
+	  85: ANDL       	$0xFFFFFFF0, t42
+	  86: VEC_WQQ       	0x0:0x1F, (t42)
+	  87: INCEIPL       	$4
+
+	0x25482CA8:  4BFFD2F0  b 0x2547FF98
+	  88: JMPo       	$0x2547FF98  ($4)
+
+
+
+. 640 25482C50 92
+. 7E 80 29 CE 38 A5 00 20 7E A0 31 CE 38 C6 00 20 7E C0 29 CE 38 A5 00 20 7E E0 31 CE 38 C6 00 20 7F 00 29 CE 38 A5 00 20 7F 20 31 CE 38 C6 00 20 7F 40 29 CE 38 A5 00 20 7F 60 31 CE 38 C6 00 20 7F 80 29 CE 38 A5 00 20 7F A0 31 CE 38 C6 00 20 7F C0 29 CE 7F E0 31 CE 4B FF D2 F0
+==== BB 641 __sigjmp_save(0x2547FF98) approx BBs exec'd 0 ====
+
+	0x2547FF98:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547FF9C:  38000000  li r0,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547FFA0:  900301C0  stw r0,448(r3)
+	   9: GETL       	R0, t6
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1C0, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547FFA4:  38210010  addi r1,r1,16
+	  14: GETL       	R1, t10
+	  15: ADDL       	$0x10, t10
+	  16: PUTL       	t10, R1
+	  17: INCEIPL       	$4
+
+	0x2547FFA8:  38600000  li r3,0
+	  18: MOVL       	$0x0, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0x2547FFAC:  4E800020  blr
+	  21: GETL       	LR, t14
+	  22: JMPo-r       	t14  ($4)
+
+
+
+. 641 2547FF98 24
+. 94 21 FF F0 38 00 00 00 90 03 01 C0 38 21 00 10 38 60 00 00 4E 80 00 20
+==== BB 642 (0x2547B5F8) approx BBs exec'd 0 ====
+
+	0x2547B5F8:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547B5FC:  8061027C  lwz r3,636(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x27C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547B600:  7FEAFB78  or r10,r31,r31
+	  10: GETL       	R31, t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x2547B604:  2F9FFFFF  cmpi cr7,r31,-1
+	  13: GETL       	R31, t10
+	  14: MOVL       	$0xFFFFFFFF, t14
+	  15: CMPL       	t10, t14, t12  (-rSo)
+	  16: ICRFL       	t12, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0x2547B608:  40820054  bc 4,2,0x2547B65C
+	  18: Jc02o       	$0x2547B65C
+
+
+
+. 642 2547B5F8 20
+. 7C 7F 1B 79 80 61 02 7C 7F EA FB 78 2F 9F FF FF 40 82 00 54
+==== BB 643 (0x2547B60C) approx BBs exec'd 0 ====
+
+	0x2547B60C:  81210280  lwz r9,640(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x280, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547B610:  38010010  addi r0,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547B614:  81810278  lwz r12,632(r1)
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x278, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x2547B618:  90090000  stw r0,0(r9)
+	  14: GETL       	R0, t10
+	  15: GETL       	R9, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547B61C:  7D8903A6  mtctr r12
+	  18: GETL       	R12, t14
+	  19: PUTL       	t14, CTR
+	  20: INCEIPL       	$4
+
+	0x2547B620:  4E800421  bctrl
+	  21: MOVL       	$0x2547B624, t16
+	  22: PUTL       	t16, LR
+	  23: GETL       	CTR, t18
+	  24: JMPo-c       	t18  ($4)
+
+
+
+. 643 2547B60C 24
+. 81 21 02 80 38 01 00 10 81 81 02 78 90 09 00 00 7D 89 03 A6 4E 80 04 21
+==== BB 644 openaux(0x2547A0BC) approx BBs exec'd 0 ====
+
+	0x2547A0BC:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547A0C0:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547A0C4:  93E10008  stw r31,8(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547A0C8:  7C7F1B78  or r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0x2547A0CC:  38A00000  li r5,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R5
+	  19: INCEIPL       	$4
+
+	0x2547A0D0:  90810014  stw r4,20(r1)
+	  20: GETL       	R4, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x2547A0D4:  80630000  lwz r3,0(r3)
+	  25: GETL       	R3, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x2547A0D8:  809F0010  lwz r4,16(r31)
+	  29: GETL       	R31, t22
+	  30: ADDL       	$0x10, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R4
+	  33: INCEIPL       	$4
+
+	0x2547A0DC:  80030180  lwz r0,384(r3)
+	  34: GETL       	R3, t26
+	  35: ADDL       	$0x180, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R0
+	  38: INCEIPL       	$4
+
+	0x2547A0E0:  7409C000  andis. r9,r0,0xC000
+	  39: GETL       	R0, t30
+	  40: ANDL       	$0xC0000000, t30
+	  41: PUTL       	t30, R9
+	  42: CMP0L       	t30, t32  (-rSo)
+	  43: ICRFL       	t32, $0x0, CR
+	  44: INCEIPL       	$4
+
+	0x2547A0E4:  540617BE  rlwinm r6,r0,2,30,31
+	  45: GETL       	R0, t34
+	  46: SHRL       	$0x1E, t34
+	  47: PUTL       	t34, R6
+	  48: INCEIPL       	$4
+
+	0x2547A0E8:  40820008  bc 4,2,0x2547A0F0
+	  49: Jc02o       	$0x2547A0F0
+
+
+
+. 644 2547A0BC 48
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 38 A0 00 00 90 81 00 14 80 63 00 00 80 9F 00 10 80 03 01 80 74 09 C0 00 54 06 17 BE 40 82 00 08
+==== BB 645 (0x2547A0EC) approx BBs exec'd 0 ====
+
+	0x2547A0EC:  38C00001  li r6,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2547A0F0:  80FF0004  lwz r7,4(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x2547A0F4:  811F0008  lwz r8,8(r31)
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0x2547A0F8:  81230018  lwz r9,24(r3)
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x2547A0FC:  4BFFCBED  bl 0x25476CE8
+	  18: MOVL       	$0x2547A100, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0x25476CE8  ($4)
+
+
+
+. 645 2547A0EC 20
+. 38 C0 00 01 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+==== BB 646 (0x25482FA8) approx BBs exec'd 0 ====
+
+	0x25482FA8:  8CA30001  lbzu r5,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482FAC:  8CC40001  lbzu r6,1(r4)
+	   6: GETL       	R4, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R4
+	   9: LDB       	(t4), t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x25482FB0:  2C850000  cmpi cr1,r5,0
+	  12: GETL       	R5, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x25482FB4:  7C053000  cmp cr0,r5,r6
+	  16: GETL       	R5, t12
+	  17: GETL       	R6, t14
+	  18: CMPL       	t12, t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x25482FB8:  4086FFD4  bc 4,6,0x25482F8C
+	  21: Jc06o       	$0x25482F8C
+
+
+
+. 646 25482FA8 20
+. 8C A3 00 01 8C C4 00 01 2C 85 00 00 7C 05 30 00 40 86 FF D4
+==== BB 647 (0x25482F8C) approx BBs exec'd 0 ====
+
+	0x25482F8C:  8CA30001  lbzu r5,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482F90:  40820034  bc 4,2,0x25482FC4
+	   6: Jc02o       	$0x25482FC4
+
+
+
+. 647 25482F8C 8
+. 8C A3 00 01 40 82 00 34
+==== BB 648 (0x25482FC4) approx BBs exec'd 0 ====
+
+	0x25482FC4:  88A3FFFF  lbz r5,-1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25482FC8:  7C662850  subf r3,r6,r5
+	   5: GETL       	R6, t4
+	   6: GETL       	R5, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25482FCC:  4E800020  blr
+	  10: GETL       	LR, t8
+	  11: JMPo-r       	t8  ($4)
+
+
+
+. 648 25482FC4 12
+. 88 A3 FF FF 7C 66 28 50 4E 80 00 20
+==== BB 649 (0x2547706C) approx BBs exec'd 0 ====
+
+	0x2547706C:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25477070:  4800BF61  bl 0x25482FD0
+	   3: MOVL       	$0x25477074, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 649 2547706C 8
+. 7F E3 FB 78 48 00 BF 61
+==== BB 650 (0x25477074) approx BBs exec'd 0 ====
+
+	0x25477074:  83540000  lwz r26,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0x25477078:  3AC30001  addi r22,r3,1
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x2547707C:  73490001  andi. r9,r26,0x1
+	   8: GETL       	R26, t6
+	   9: ANDL       	$0x1, t6
+	  10: PUTL       	t6, R9
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x25477080:  408203C4  bc 4,2,0x25477444
+	  14: Jc02o       	$0x25477444
+
+
+
+. 650 25477074 16
+. 83 54 00 00 3A C3 00 01 73 49 00 01 40 82 03 C4
+==== BB 651 (0x25477084) approx BBs exec'd 0 ====
+
+	0x25477084:  3B40FFFF  li r26,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R26
+	   2: INCEIPL       	$4
+
+	0x25477088:  418E0020  bc 12,14,0x254770A8
+	   3: Js14o       	$0x254770A8
+
+
+
+. 651 25477084 8
+. 3B 40 FF FF 41 8E 00 20
+==== BB 652 (0x2547708C) approx BBs exec'd 0 ====
+
+	0x2547708C:  839B0094  lwz r28,148(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25477090:  7F6CFE70  srawi r12,r27,31
+	   5: GETL       	R27, t4
+	   6: SARL       	$0x1F, t4  (-wCa)
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25477094:  7D9DDA78  xor r29,r12,r27
+	   9: GETL       	R12, t6
+	  10: GETL       	R27, t8
+	  11: XORL       	t6, t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0x25477098:  2E1AFFFF  cmpi cr4,r26,-1
+	  14: GETL       	R26, t10
+	  15: MOVL       	$0xFFFFFFFF, t14
+	  16: CMPL       	t10, t14, t12  (-rSo)
+	  17: ICRFL       	t12, $0x4, CR
+	  18: INCEIPL       	$4
+
+	0x2547709C:  2F9C0000  cmpi cr7,r28,0
+	  19: GETL       	R28, t16
+	  20: CMP0L       	t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x254770A0:  7DCCE850  subf r14,r12,r29
+	  23: GETL       	R12, t20
+	  24: GETL       	R29, t22
+	  25: SUBL       	t20, t22
+	  26: PUTL       	t22, R14
+	  27: INCEIPL       	$4
+
+	0x254770A4:  409E016C  bc 4,30,0x25477210
+	  28: Jc30o       	$0x25477210
+
+
+
+. 652 2547708C 28
+. 83 9B 00 94 7F 6C FE 70 7D 9D DA 78 2E 1A FF FF 2F 9C 00 00 7D CC E8 50 40 9E 01 6C
+==== BB 653 (0x254770A8) approx BBs exec'd 0 ====
+
+	0x254770A8:  2C9B0000  cmpi cr1,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x254770AC:  7F65FE70  srawi r5,r27,31
+	   4: GETL       	R27, t4
+	   5: SARL       	$0x1F, t4  (-wCa)
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x254770B0:  7CA0DA78  xor r0,r5,r27
+	   8: GETL       	R5, t6
+	   9: GETL       	R27, t8
+	  10: XORL       	t6, t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x254770B4:  7F7DDB78  or r29,r27,r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x254770B8:  7DC50050  subf r14,r5,r0
+	  16: GETL       	R5, t12
+	  17: GETL       	R0, t14
+	  18: SUBL       	t12, t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0x254770BC:  41860370  bc 12,6,0x2547742C
+	  21: Js06o       	$0x2547742C
+
+
+
+. 653 254770A8 24
+. 2C 9B 00 00 7F 65 FE 70 7C A0 DA 78 7F 7D DB 78 7D C5 00 50 41 86 03 70
+==== BB 654 (0x254770C0) approx BBs exec'd 0 ====
+
+	0x254770C0:  825E019C  lwz r18,412(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x19C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x254770C4:  39E0FFFF  li r15,-1
+	   5: MOVL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R15
+	   7: INCEIPL       	$4
+
+	0x254770C8:  48000024  b 0x254770EC
+	   8: JMPo       	$0x254770EC  ($4)
+
+
+
+. 654 254770C0 12
+. 82 5E 01 9C 39 E0 FF FF 48 00 00 24
+==== BB 655 (0x254770EC) approx BBs exec'd 0 ====
+
+	0x254770EC:  807D018C  lwz r3,396(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254770F0:  3B9D018C  addi r28,r29,396
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x18C, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x254770F4:  38000000  li r0,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0x254770F8:  2E03FFFF  cmpi cr4,r3,-1
+	  12: GETL       	R3, t8
+	  13: MOVL       	$0xFFFFFFFF, t12
+	  14: CMPL       	t8, t12, t10  (-rSo)
+	  15: ICRFL       	t10, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x254770FC:  2F030000  cmpi cr6,r3,0
+	  17: GETL       	R3, t14
+	  18: CMP0L       	t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x6, CR
+	  20: INCEIPL       	$4
+
+	0x25477100:  4192002C  bc 12,18,0x2547712C
+	  21: Js18o       	$0x2547712C
+
+
+
+. 655 254770EC 24
+. 80 7D 01 8C 3B 9D 01 8C 38 00 00 00 2E 03 FF FF 2F 03 00 00 41 92 00 2C
+==== BB 656 (0x2547712C) approx BBs exec'd 0 ====
+
+	0x2547712C:  2C000000  cmpi cr0,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25477130:  7F86E378  or r6,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x25477134:  7FE3FB78  or r3,r31,r31
+	   7: GETL       	R31, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25477138:  7EC4B378  or r4,r22,r22
+	  10: GETL       	R22, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2547713C:  7EE5BB78  or r5,r23,r23
+	  13: GETL       	R23, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25477140:  38E10228  addi r7,r1,552
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x228, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x25477144:  39010018  addi r8,r1,24
+	  20: GETL       	R1, t14
+	  21: ADDL       	$0x18, t14
+	  22: PUTL       	t14, R8
+	  23: INCEIPL       	$4
+
+	0x25477148:  4182FF84  bc 12,2,0x254770CC
+	  24: Js02o       	$0x254770CC
+
+
+
+. 656 2547712C 32
+. 2C 00 00 00 7F 86 E3 78 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 41 82 FF 84
+==== BB 657 (0x254770CC) approx BBs exec'd 0 ====
+
+	0x254770CC:  2E1AFFFF  cmpi cr4,r26,-1
+	   0: GETL       	R26, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x254770D0:  83BD0168  lwz r29,360(r29)
+	   5: GETL       	R29, t6
+	   6: ADDL       	$0x168, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R29
+	   9: INCEIPL       	$4
+
+	0x254770D4:  313DFFFF  addic r9,r29,-1
+	  10: GETL       	R29, t10
+	  11: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x254770D8:  7CC9E910  subfe r6,r9,r29
+	  14: GETL       	R9, t12
+	  15: GETL       	R29, t14
+	  16: SBBL       	t12, t14  (-rCa-wCa)
+	  17: PUTL       	t14, R6
+	  18: INCEIPL       	$4
+
+	0x254770DC:  7D200026  mfcr r9
+	  19: GETL       	CR, t16
+	  20: PUTL       	t16, R9
+	  21: INCEIPL       	$4
+
+	0x254770E0:  55299FFE  rlwinm r9,r9,19,31,31
+	  22: GETL       	R9, t18
+	  23: ROLL       	$0x13, t18
+	  24: ANDL       	$0x1, t18
+	  25: PUTL       	t18, R9
+	  26: INCEIPL       	$4
+
+	0x254770E4:  7D2B3039  and. r11,r9,r6
+	  27: GETL       	R9, t20
+	  28: GETL       	R6, t22
+	  29: ANDL       	t20, t22
+	  30: PUTL       	t22, R11
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0x254770E8:  418200B0  bc 12,2,0x25477198
+	  34: Js02o       	$0x25477198
+
+
+
+. 657 254770CC 32
+. 2E 1A FF FF 83 BD 01 68 31 3D FF FF 7C C9 E9 10 7D 20 00 26 55 29 9F FE 7D 2B 30 39 41 82 00 B0
+==== BB 658 (0x25477198) approx BBs exec'd 0 ====
+
+	0x25477198:  409200A8  bc 4,18,0x25477240
+	   0: Jc18o       	$0x25477240
+
+
+
+. 658 25477198 4
+. 40 92 00 A8
+==== BB 659 (0x2547719C) approx BBs exec'd 0 ====
+
+	0x2547719C:  80B50000  lwz r5,0(r21)
+	   0: GETL       	R21, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x254771A0:  2F850000  cmpi cr7,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x254771A4:  419E0068  bc 12,30,0x2547720C
+	   8: Js30o       	$0x2547720C
+
+
+
+. 659 2547719C 12
+. 80 B5 00 00 2F 85 00 00 41 9E 00 68
+==== BB 660 (0x254771A8) approx BBs exec'd 0 ====
+
+	0x254771A8:  83A50180  lwz r29,384(r5)
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254771AC:  7CAADA78  xor r10,r5,r27
+	   5: GETL       	R5, t4
+	   6: GETL       	R27, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x254771B0:  300AFFFF  addic r0,r10,-1
+	  10: GETL       	R10, t8
+	  11: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x254771B4:  7D005110  subfe r8,r0,r10
+	  14: GETL       	R0, t10
+	  15: GETL       	R10, t12
+	  16: SBBL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R8
+	  18: INCEIPL       	$4
+
+	0x254771B8:  57BC0002  rlwinm r28,r29,0,0,1
+	  19: GETL       	R29, t14
+	  20: ANDL       	$0xC0000000, t14
+	  21: PUTL       	t14, R28
+	  22: INCEIPL       	$4
+
+	0x254771BC:  6F928000  xoris r18,r28,0x8000
+	  23: GETL       	R28, t16
+	  24: XORL       	$0x80000000, t16
+	  25: PUTL       	t16, R18
+	  26: INCEIPL       	$4
+
+	0x254771C0:  3132FFFF  addic r9,r18,-1
+	  27: GETL       	R18, t18
+	  28: ADCL       	$0xFFFFFFFF, t18  (-wCa)
+	  29: PUTL       	t18, R9
+	  30: INCEIPL       	$4
+
+	0x254771C4:  7DE99110  subfe r15,r9,r18
+	  31: GETL       	R9, t20
+	  32: GETL       	R18, t22
+	  33: SBBL       	t20, t22  (-rCa-wCa)
+	  34: PUTL       	t22, R15
+	  35: INCEIPL       	$4
+
+	0x254771C8:  7DE04039  and. r0,r15,r8
+	  36: GETL       	R15, t24
+	  37: GETL       	R8, t26
+	  38: ANDL       	t24, t26
+	  39: PUTL       	t26, R0
+	  40: CMP0L       	t26, t28  (-rSo)
+	  41: ICRFL       	t28, $0x0, CR
+	  42: INCEIPL       	$4
+
+	0x254771CC:  41820040  bc 12,2,0x2547720C
+	  43: Js02o       	$0x2547720C
+
+
+
+. 660 254771A8 40
+. 83 A5 01 80 7C AA DA 78 30 0A FF FF 7D 00 51 10 57 BC 00 02 6F 92 80 00 31 32 FF FF 7D E9 91 10 7D E0 40 39 41 82 00 40
+==== BB 661 (0x2547720C) approx BBs exec'd 0 ====
+
+	0x2547720C:  40920034  bc 4,18,0x25477240
+	   0: Jc18o       	$0x25477240
+
+
+
+. 661 2547720C 4
+. 40 92 00 34
+==== BB 662 (0x25477210) approx BBs exec'd 0 ====
+
+	0x25477210:  80DE0168  lwz r6,360(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x168, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25477214:  80A60000  lwz r5,0(r6)
+	   5: GETL       	R6, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R5
+	   8: INCEIPL       	$4
+
+	0x25477218:  2C85FFFF  cmpi cr1,r5,-1
+	   9: GETL       	R5, t8
+	  10: MOVL       	$0xFFFFFFFF, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547721C:  41860024  bc 12,6,0x25477240
+	  14: Js06o       	$0x25477240
+
+
+
+. 662 25477210 16
+. 80 DE 01 68 80 A6 00 00 2C 85 FF FF 41 86 00 24
+==== BB 663 (0x25477220) approx BBs exec'd 0 ====
+
+	0x25477220:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25477224:  7EC4B378  or r4,r22,r22
+	   3: GETL       	R22, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25477228:  7EE5BB78  or r5,r23,r23
+	   6: GETL       	R23, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547722C:  38E10228  addi r7,r1,552
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x228, t6
+	  11: PUTL       	t6, R7
+	  12: INCEIPL       	$4
+
+	0x25477230:  39010018  addi r8,r1,24
+	  13: GETL       	R1, t8
+	  14: ADDL       	$0x18, t8
+	  15: PUTL       	t8, R8
+	  16: INCEIPL       	$4
+
+	0x25477234:  4BFFF561  bl 0x25476794
+	  17: MOVL       	$0x25477238, t10
+	  18: PUTL       	t10, LR
+	  19: JMPo-c       	$0x25476794  ($4)
+
+
+
+. 663 25477220 24
+. 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 4B FF F5 61
+==== BB 664 open_path(0x25476794) approx BBs exec'd 0 ====
+
+	0x25476794:  9421FF00  stwu r1,-256(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF00, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25476798:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547679C:  48020865  bl 0x25497000
+	   9: MOVL       	$0x254767A0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 664 25476794 12
+. 94 21 FF 00 7C 08 02 A6 48 02 08 65
+==== BB 665 (0x254767A0) approx BBs exec'd 0 ====
+
+	0x254767A0:  93C100F8  stw r30,248(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xF8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254767A4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x254767A8:  924100C8  stw r18,200(r1)
+	   8: GETL       	R18, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC8, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254767AC:  926100CC  stw r19,204(r1)
+	  13: GETL       	R19, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xCC, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254767B0:  7D200026  mfcr r9
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x254767B4:  90010104  stw r0,260(r1)
+	  21: GETL       	R0, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x104, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x254767B8:  38000000  li r0,0
+	  26: MOVL       	$0x0, t20
+	  27: PUTL       	t20, R0
+	  28: INCEIPL       	$4
+
+	0x254767BC:  825E015C  lwz r18,348(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x15C, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R18
+	  33: INCEIPL       	$4
+
+	0x254767C0:  827E0158  lwz r19,344(r30)
+	  34: GETL       	R30, t26
+	  35: ADDL       	$0x158, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R19
+	  38: INCEIPL       	$4
+
+	0x254767C4:  920100C0  stw r16,192(r1)
+	  39: GETL       	R16, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0xC0, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x254767C8:  922100C4  stw r17,196(r1)
+	  44: GETL       	R17, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0xC4, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x254767CC:  82130000  lwz r16,0(r19)
+	  49: GETL       	R19, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R16
+	  52: INCEIPL       	$4
+
+	0x254767D0:  82320000  lwz r17,0(r18)
+	  53: GETL       	R18, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R17
+	  56: INCEIPL       	$4
+
+	0x254767D4:  7D124378  or r18,r8,r8
+	  57: GETL       	R8, t46
+	  58: PUTL       	t46, R18
+	  59: INCEIPL       	$4
+
+	0x254767D8:  91E100BC  stw r15,188(r1)
+	  60: GETL       	R15, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0xBC, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x254767DC:  7DF08A14  add r15,r16,r17
+	  65: GETL       	R16, t52
+	  66: GETL       	R17, t54
+	  67: ADDL       	t52, t54
+	  68: PUTL       	t54, R15
+	  69: INCEIPL       	$4
+
+	0x254767E0:  91C100B8  stw r14,184(r1)
+	  70: GETL       	R14, t56
+	  71: GETL       	R1, t58
+	  72: ADDL       	$0xB8, t58
+	  73: STL       	t56, (t58)
+	  74: INCEIPL       	$4
+
+	0x254767E4:  93E100FC  stw r31,252(r1)
+	  75: GETL       	R31, t60
+	  76: GETL       	R1, t62
+	  77: ADDL       	$0xFC, t62
+	  78: STL       	t60, (t62)
+	  79: INCEIPL       	$4
+
+	0x254767E8:  7DCF2214  add r14,r15,r4
+	  80: GETL       	R15, t64
+	  81: GETL       	R4, t66
+	  82: ADDL       	t64, t66
+	  83: PUTL       	t66, R14
+	  84: INCEIPL       	$4
+
+	0x254767EC:  7C3F0B78  or r31,r1,r1
+	  85: GETL       	R1, t68
+	  86: PUTL       	t68, R31
+	  87: INCEIPL       	$4
+
+	0x254767F0:  398E001E  addi r12,r14,30
+	  88: GETL       	R14, t70
+	  89: ADDL       	$0x1E, t70
+	  90: PUTL       	t70, R12
+	  91: INCEIPL       	$4
+
+	0x254767F4:  90DF0084  stw r6,132(r31)
+	  92: GETL       	R6, t72
+	  93: GETL       	R31, t74
+	  94: ADDL       	$0x84, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0x254767F8:  558B0036  rlwinm r11,r12,0,0,27
+	  97: GETL       	R12, t76
+	  98: ANDL       	$0xFFFFFFF0, t76
+	  99: PUTL       	t76, R11
+	 100: INCEIPL       	$4
+
+	0x254767FC:  81410000  lwz r10,0(r1)
+	 101: GETL       	R1, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R10
+	 104: INCEIPL       	$4
+
+	0x25476800:  7C711B78  or r17,r3,r3
+	 105: GETL       	R3, t82
+	 106: PUTL       	t82, R17
+	 107: INCEIPL       	$4
+
+	0x25476804:  92A100D4  stw r21,212(r1)
+	 108: GETL       	R21, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0xD4, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25476808:  39E00000  li r15,0
+	 113: MOVL       	$0x0, t88
+	 114: PUTL       	t88, R15
+	 115: INCEIPL       	$4
+
+	0x2547680C:  912100B4  stw r9,180(r1)
+	 116: GETL       	R9, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0xB4, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x25476810:  7D2B00D0  neg r9,r11
+	 121: GETL       	R11, t94
+	 122: NEGL       	t94
+	 123: PUTL       	t94, R9
+	 124: INCEIPL       	$4
+
+	0x25476814:  82A60000  lwz r21,0(r6)
+	 125: GETL       	R6, t96
+	 126: LDL       	(t96), t98
+	 127: PUTL       	t98, R21
+	 128: INCEIPL       	$4
+
+	0x25476818:  92C100D8  stw r22,216(r1)
+	 129: GETL       	R22, t100
+	 130: GETL       	R1, t102
+	 131: ADDL       	$0xD8, t102
+	 132: STL       	t100, (t102)
+	 133: INCEIPL       	$4
+
+	0x2547681C:  7C962378  or r22,r4,r4
+	 134: GETL       	R4, t104
+	 135: PUTL       	t104, R22
+	 136: INCEIPL       	$4
+
+	0x25476820:  934100E8  stw r26,232(r1)
+	 137: GETL       	R26, t106
+	 138: GETL       	R1, t108
+	 139: ADDL       	$0xE8, t108
+	 140: STL       	t106, (t108)
+	 141: INCEIPL       	$4
+
+	0x25476824:  3B40FFFF  li r26,-1
+	 142: MOVL       	$0xFFFFFFFF, t110
+	 143: PUTL       	t110, R26
+	 144: INCEIPL       	$4
+
+	0x25476828:  936100EC  stw r27,236(r1)
+	 145: GETL       	R27, t112
+	 146: GETL       	R1, t114
+	 147: ADDL       	$0xEC, t114
+	 148: STL       	t112, (t114)
+	 149: INCEIPL       	$4
+
+	0x2547682C:  2E1AFFFF  cmpi cr4,r26,-1
+	 150: GETL       	R26, t116
+	 151: MOVL       	$0xFFFFFFFF, t120
+	 152: CMPL       	t116, t120, t118  (-rSo)
+	 153: ICRFL       	t118, $0x4, CR
+	 154: INCEIPL       	$4
+
+	0x25476830:  93A100F4  stw r29,244(r1)
+	 155: GETL       	R29, t122
+	 156: GETL       	R1, t124
+	 157: ADDL       	$0xF4, t124
+	 158: STL       	t122, (t124)
+	 159: INCEIPL       	$4
+
+	0x25476834:  928100D0  stw r20,208(r1)
+	 160: GETL       	R20, t126
+	 161: GETL       	R1, t128
+	 162: ADDL       	$0xD0, t128
+	 163: STL       	t126, (t128)
+	 164: INCEIPL       	$4
+
+	0x25476838:  92E100DC  stw r23,220(r1)
+	 165: GETL       	R23, t130
+	 166: GETL       	R1, t132
+	 167: ADDL       	$0xDC, t132
+	 168: STL       	t130, (t132)
+	 169: INCEIPL       	$4
+
+	0x2547683C:  930100E0  stw r24,224(r1)
+	 170: GETL       	R24, t134
+	 171: GETL       	R1, t136
+	 172: ADDL       	$0xE0, t136
+	 173: STL       	t134, (t136)
+	 174: INCEIPL       	$4
+
+	0x25476840:  932100E4  stw r25,228(r1)
+	 175: GETL       	R25, t138
+	 176: GETL       	R1, t140
+	 177: ADDL       	$0xE4, t140
+	 178: STL       	t138, (t140)
+	 179: INCEIPL       	$4
+
+	0x25476844:  938100F0  stw r28,240(r1)
+	 180: GETL       	R28, t142
+	 181: GETL       	R1, t144
+	 182: ADDL       	$0xF0, t144
+	 183: STL       	t142, (t144)
+	 184: INCEIPL       	$4
+
+	0x25476848:  7D41496E  stwux r10,r1,r9
+	 185: GETL       	R9, t146
+	 186: GETL       	R1, t148
+	 187: ADDL       	t148, t146
+	 188: PUTL       	t146, R1
+	 189: GETL       	R10, t150
+	 190: STL       	t150, (t146)
+	 191: INCEIPL       	$4
+
+	0x2547684C:  38810017  addi r4,r1,23
+	 192: GETL       	R1, t152
+	 193: ADDL       	$0x17, t152
+	 194: PUTL       	t152, R4
+	 195: INCEIPL       	$4
+
+	0x25476850:  83B50000  lwz r29,0(r21)
+	 196: GETL       	R21, t154
+	 197: LDL       	(t154), t156
+	 198: PUTL       	t156, R29
+	 199: INCEIPL       	$4
+
+	0x25476854:  81DE04F4  lwz r14,1268(r30)
+	 200: GETL       	R30, t158
+	 201: ADDL       	$0x4F4, t158
+	 202: LDL       	(t158), t160
+	 203: PUTL       	t160, R14
+	 204: INCEIPL       	$4
+
+	0x25476858:  549B0036  rlwinm r27,r4,0,0,27
+	 205: GETL       	R4, t162
+	 206: ANDL       	$0xFFFFFFF0, t162
+	 207: PUTL       	t162, R27
+	 208: INCEIPL       	$4
+
+	0x2547685C:  90BF0080  stw r5,128(r31)
+	 209: GETL       	R5, t164
+	 210: GETL       	R31, t166
+	 211: ADDL       	$0x80, t166
+	 212: STL       	t164, (t166)
+	 213: INCEIPL       	$4
+
+	0x25476860:  90FF0088  stw r7,136(r31)
+	 214: GETL       	R7, t168
+	 215: GETL       	R31, t170
+	 216: ADDL       	$0x88, t170
+	 217: STL       	t168, (t170)
+	 218: INCEIPL       	$4
+
+	0x25476864:  901F008C  stw r0,140(r31)
+	 219: GETL       	R0, t172
+	 220: GETL       	R31, t174
+	 221: ADDL       	$0x8C, t174
+	 222: STL       	t172, (t174)
+	 223: INCEIPL       	$4
+
+	0x25476868:  806E0000  lwz r3,0(r14)
+	 224: GETL       	R14, t176
+	 225: LDL       	(t176), t178
+	 226: PUTL       	t178, R3
+	 227: INCEIPL       	$4
+
+	0x2547686C:  3A600000  li r19,0
+	 228: MOVL       	$0x0, t180
+	 229: PUTL       	t180, R19
+	 230: INCEIPL       	$4
+
+	0x25476870:  3AE00000  li r23,0
+	 231: MOVL       	$0x0, t182
+	 232: PUTL       	t182, R23
+	 233: INCEIPL       	$4
+
+	0x25476874:  70690001  andi. r9,r3,0x1
+	 234: GETL       	R3, t184
+	 235: ANDL       	$0x1, t184
+	 236: PUTL       	t184, R9
+	 237: CMP0L       	t184, t186  (-rSo)
+	 238: ICRFL       	t186, $0x0, CR
+	 239: INCEIPL       	$4
+
+	0x25476878:  4082025C  bc 4,2,0x25476AD4
+	 240: Jc02o       	$0x25476AD4
+
+
+
+. 665 254767A0 220
+. 93 C1 00 F8 7F C8 02 A6 92 41 00 C8 92 61 00 CC 7D 20 00 26 90 01 01 04 38 00 00 00 82 5E 01 5C 82 7E 01 58 92 01 00 C0 92 21 00 C4 82 13 00 00 82 32 00 00 7D 12 43 78 91 E1 00 BC 7D F0 8A 14 91 C1 00 B8 93 E1 00 FC 7D CF 22 14 7C 3F 0B 78 39 8E 00 1E 90 DF 00 84 55 8B 00 36 81 41 00 00 7C 71 1B 78 92 A1 00 D4 39 E0 00 00 91 21 00 B4 7D 2B 00 D0 82 A6 00 00 92 C1 00 D8 7C 96 23 78 93 41 00 E8 3B 40 FF FF 93 61 00 EC 2E 1A FF FF 93 A1 00 F4 92 81 00 D0 92 E1 00 DC 93 01 00 E0 93 21 00 E4 93 81 00 F0 7D 41 49 6E 38 81 00 17 83 B5 00 00 81 DE 04 F4 54 9B 00 36 90 BF 00 80 90 FF 00 88 90 1F 00 8C 80 6E 00 00 3A 60 00 00 3A E0 00 00 70 69 00 01 40 82 02 5C
+==== BB 666 (0x2547687C) approx BBs exec'd 0 ====
+
+	0x2547687C:  809D000C  lwz r4,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25476880:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25476884:  80BD0010  lwz r5,16(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x25476888:  3B800000  li r28,0
+	  13: MOVL       	$0x0, t10
+	  14: PUTL       	t10, R28
+	  15: INCEIPL       	$4
+
+	0x2547688C:  4800D195  bl 0x25483A20
+	  16: MOVL       	$0x25476890, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0x25483A20  ($4)
+
+
+
+. 666 2547687C 20
+. 80 9D 00 0C 7F 63 DB 78 80 BD 00 10 3B 80 00 00 48 00 D1 95
+==== BB 667 (0x25476890) approx BBs exec'd 0 ====
+
+	0x25476890:  7C781B78  or r24,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x25476894:  409201A0  bc 4,18,0x25476A34
+	   3: Jc18o       	$0x25476A34
+
+
+
+. 667 25476890 8
+. 7C 78 1B 78 40 92 01 A0
+==== BB 668 (0x25476898) approx BBs exec'd 0 ====
+
+	0x25476898:  833E0154  lwz r25,340(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x2547689C:  81990000  lwz r12,0(r25)
+	   5: GETL       	R25, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0x254768A0:  7F9C6040  cmpl cr7,r28,r12
+	   9: GETL       	R28, t8
+	  10: GETL       	R12, t10
+	  11: CMPUL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x254768A4:  409C014C  bc 4,28,0x254769F0
+	  14: Jc28o       	$0x254769F0
+
+
+
+. 668 25476898 16
+. 83 3E 01 54 81 99 00 00 7F 9C 60 40 40 9C 01 4C
+==== BB 669 (0x254768A8) approx BBs exec'd 0 ====
+
+	0x254768A8:  817E0228  lwz r11,552(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x228, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254768AC:  3A800002  li r20,2
+	   5: MOVL       	$0x2, t4
+	   6: PUTL       	t4, R20
+	   7: INCEIPL       	$4
+
+	0x254768B0:  820B0000  lwz r16,0(r11)
+	   8: GETL       	R11, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R16
+	  11: INCEIPL       	$4
+
+	0x254768B4:  4800001C  b 0x254768D0
+	  12: JMPo       	$0x254768D0  ($4)
+
+
+
+. 669 254768A8 16
+. 81 7E 02 28 3A 80 00 02 82 0B 00 00 48 00 00 1C
+==== BB 670 (0x254768D0) approx BBs exec'd 0 ====
+
+	0x254768D0:  80DD0014  lwz r6,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254768D4:  2C860001  cmpi cr1,r6,1
+	   5: GETL       	R6, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x254768D8:  41A6FFE0  bc 13,6,0x254768B8
+	  10: Js06o       	$0x254768B8
+
+
+
+. 670 254768D0 12
+. 80 DD 00 14 2C 86 00 01 41 A6 FF E0
+==== BB 671 (0x254768DC) approx BBs exec'd 0 ====
+
+	0x254768DC:  813E0160  lwz r9,352(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x160, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x254768E0:  578A1838  rlwinm r10,r28,3,0,28
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x3, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x254768E4:  7F03C378  or r3,r24,r24
+	   9: GETL       	R24, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x254768E8:  80A90000  lwz r5,0(r9)
+	  12: GETL       	R9, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x254768EC:  7E6A2A14  add r19,r10,r5
+	  16: GETL       	R10, t12
+	  17: GETL       	R5, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R19
+	  20: INCEIPL       	$4
+
+	0x254768F0:  7C8A282E  lwzx r4,r10,r5
+	  21: GETL       	R5, t16
+	  22: GETL       	R10, t18
+	  23: ADDL       	t18, t16
+	  24: LDL       	(t16), t20
+	  25: PUTL       	t20, R4
+	  26: INCEIPL       	$4
+
+	0x254768F4:  80B30004  lwz r5,4(r19)
+	  27: GETL       	R19, t22
+	  28: ADDL       	$0x4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R5
+	  31: INCEIPL       	$4
+
+	0x254768F8:  4800D129  bl 0x25483A20
+	  32: MOVL       	$0x254768FC, t26
+	  33: PUTL       	t26, LR
+	  34: JMPo-c       	$0x25483A20  ($4)
+
+
+
+. 671 254768DC 32
+. 81 3E 01 60 57 8A 18 38 7F 03 C3 78 80 A9 00 00 7E 6A 2A 14 7C 8A 28 2E 80 B3 00 04 48 00 D1 29
+==== BB 672 (0x254768FC) approx BBs exec'd 0 ====
+
+	0x254768FC:  7E248B78  or r4,r17,r17
+	   0: GETL       	R17, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25476900:  7EC5B378  or r5,r22,r22
+	   3: GETL       	R22, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25476904:  4800D11D  bl 0x25483A20
+	   6: MOVL       	$0x25476908, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483A20  ($4)
+
+
+
+. 672 254768FC 12
+. 7E 24 8B 78 7E C5 B3 78 48 00 D1 1D
+==== BB 673 (0x25476908) approx BBs exec'd 0 ====
+
+	0x25476908:  834E0000  lwz r26,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0x2547690C:  7E7B1850  subf r19,r27,r3
+	   4: GETL       	R27, t4
+	   5: GETL       	R3, t6
+	   6: SUBL       	t4, t6
+	   7: PUTL       	t6, R19
+	   8: INCEIPL       	$4
+
+	0x25476910:  73490001  andi. r9,r26,0x1
+	   9: GETL       	R26, t8
+	  10: ANDL       	$0x1, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25476914:  408201AC  bc 4,2,0x25476AC0
+	  15: Jc02o       	$0x25476AC0
+
+
+
+. 673 25476908 16
+. 83 4E 00 00 7E 7B 18 50 73 49 00 01 40 82 01 AC
+==== BB 674 (0x25476918) approx BBs exec'd 0 ====
+
+	0x25476918:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547691C:  7E449378  or r4,r18,r18
+	   3: GETL       	R18, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25476920:  4BFFFB25  bl 0x25476444
+	   6: MOVL       	$0x25476924, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25476444  ($4)
+
+
+
+. 674 25476918 12
+. 7F 63 DB 78 7E 44 93 78 4B FF FB 25
+==== BB 675 (0x25481FEC) approx BBs exec'd 0 ====
+
+	0x25481FEC:  4BFFF344  b 0x25481330
+	   0: JMPo       	$0x25481330  ($4)
+
+
+
+. 675 25481FEC 4
+. 4B FF F3 44
+==== BB 676 (0x25476924) approx BBs exec'd 0 ====
+
+	0x25476924:  801D0014  lwz r0,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25476928:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547692C:  2E03FFFF  cmpi cr4,r3,-1
+	   8: GETL       	R3, t6
+	   9: MOVL       	$0xFFFFFFFF, t10
+	  10: CMPL       	t6, t10, t8  (-rSo)
+	  11: ICRFL       	t8, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0x25476930:  2F000000  cmpi cr6,r0,0
+	  13: GETL       	R0, t12
+	  14: CMP0L       	t12, t14  (-rSo)
+	  15: ICRFL       	t14, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25476934:  409A000C  bc 4,26,0x25476940
+	  17: Jc26o       	$0x25476940
+
+
+
+. 676 25476924 20
+. 80 1D 00 14 7C 7A 1B 78 2E 03 FF FF 2F 00 00 00 40 9A 00 0C
+==== BB 677 (0x25476938) approx BBs exec'd 0 ====
+
+	0x25476938:  41920078  bc 12,18,0x254769B0
+	   0: Js18o       	$0x254769B0
+
+
+
+. 677 25476938 4
+. 41 92 00 78
+==== BB 678 (0x254769B0) approx BBs exec'd 0 ====
+
+	0x254769B0:  7C769850  subf r3,r22,r19
+	   0: GETL       	R22, t0
+	   1: GETL       	R19, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254769B4:  7F64DB78  or r4,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254769B8:  7CFB1A14  add r7,r27,r3
+	   8: GETL       	R27, t6
+	   9: GETL       	R3, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0x254769BC:  38BF0010  addi r5,r31,16
+	  13: GETL       	R31, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0x254769C0:  9807FFFF  stb r0,-1(r7)
+	  17: GETL       	R0, t12
+	  18: GETL       	R7, t14
+	  19: ADDL       	$0xFFFFFFFF, t14
+	  20: STB       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0x254769C4:  38600003  li r3,3
+	  22: MOVL       	$0x3, t16
+	  23: PUTL       	t16, R3
+	  24: INCEIPL       	$4
+
+	0x254769C8:  4800B3A1  bl 0x25481D68
+	  25: MOVL       	$0x254769CC, t18
+	  26: PUTL       	t18, LR
+	  27: JMPo-c       	$0x25481D68  ($4)
+
+
+
+. 678 254769B0 28
+. 7C 76 98 50 7F 64 DB 78 7C FB 1A 14 38 BF 00 10 98 07 FF FF 38 60 00 03 48 00 B3 A1
+==== BB 679 __GI___xstat64(0x25481D68) approx BBs exec'd 0 ====
+
+	0x25481D68:  9421FF80  stwu r1,-128(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF80, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25481D6C:  7CC802A6  mflr r6
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25481D70:  48015291  bl 0x25497000
+	   9: MOVL       	$0x25481D74, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 679 25481D68 12
+. 94 21 FF 80 7C C8 02 A6 48 01 52 91
+==== BB 680 (0x25481D74) approx BBs exec'd 0 ====
+
+	0x25481D74:  93C10078  stw r30,120(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x78, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25481D78:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25481D7C:  9361006C  stw r27,108(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x6C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25481D80:  90C10084  stw r6,132(r1)
+	  13: GETL       	R6, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x84, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25481D84:  93210064  stw r25,100(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x64, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25481D88:  7C791B78  or r25,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x25481D8C:  837E04EC  lwz r27,1260(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4EC, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0x25481D90:  93810070  stw r28,112(r1)
+	  31: GETL       	R28, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x70, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x25481D94:  7CBC2B78  or r28,r5,r5
+	  36: GETL       	R5, t28
+	  37: PUTL       	t28, R28
+	  38: INCEIPL       	$4
+
+	0x25481D98:  801B0000  lwz r0,0(r27)
+	  39: GETL       	R27, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x25481D9C:  93E1007C  stw r31,124(r1)
+	  43: GETL       	R31, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x7C, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25481DA0:  7C9F2378  or r31,r4,r4
+	  48: GETL       	R4, t38
+	  49: PUTL       	t38, R31
+	  50: INCEIPL       	$4
+
+	0x25481DA4:  2F800000  cmpi cr7,r0,0
+	  51: GETL       	R0, t40
+	  52: CMP0L       	t40, t42  (-rSo)
+	  53: ICRFL       	t42, $0x7, CR
+	  54: INCEIPL       	$4
+
+	0x25481DA8:  93410068  stw r26,104(r1)
+	  55: GETL       	R26, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x68, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25481DAC:  93A10074  stw r29,116(r1)
+	  60: GETL       	R29, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0x74, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x25481DB0:  409E0048  bc 4,30,0x25481DF8
+	  65: Jc30o       	$0x25481DF8
+
+
+
+. 680 25481D74 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+==== BB 681 (0x25481DB4) approx BBs exec'd 0 ====
+
+	0x25481DB4:  83BE0514  lwz r29,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25481DB8:  7C832378  or r3,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25481DBC:  380000C3  li r0,195
+	   8: MOVL       	$0xC3, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x25481DC0:  7CA42B78  or r4,r5,r5
+	  11: GETL       	R5, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25481DC4:  835D0000  lwz r26,0(r29)
+	  14: GETL       	R29, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0x25481DC8:  44000002  sc
+	  18: JMPo-sys       	$0x25481DCC  ($4)
+
+
+
+. 681 25481DB4 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C3 7C A4 2B 78 83 5D 00 00 44 00 00 02
+==== BB 682 (0x25481DCC) approx BBs exec'd 0 ====
+
+	0x25481DCC:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481DD0:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25481DD4:  408200C4  bc 4,2,0x25481E98
+	   9: Jc02o       	$0x25481E98
+
+
+
+. 682 25481DCC 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+==== BB 683 (0x25481E98) approx BBs exec'd 0 ====
+
+	0x25481E98:  907D0000  stw r3,0(r29)
+	   0: GETL       	R3, t0
+	   1: GETL       	R29, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25481E9C:  3860FFFF  li r3,-1
+	   4: MOVL       	$0xFFFFFFFF, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25481EA0:  4BFFFF38  b 0x25481DD8
+	   7: JMPo       	$0x25481DD8  ($4)
+
+
+
+. 683 25481E98 12
+. 90 7D 00 00 38 60 FF FF 4B FF FF 38
+==== BB 684 (0x25481DD8) approx BBs exec'd 0 ====
+
+	0x25481DD8:  2C83FFFF  cmpi cr1,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25481DDC:  40A60044  bc 5,6,0x25481E20
+	   5: Jc06o       	$0x25481E20
+
+
+
+. 684 25481DD8 8
+. 2C 83 FF FF 40 A6 00 44
+==== BB 685 (0x25481DE0) approx BBs exec'd 0 ====
+
+	0x25481DE0:  809D0000  lwz r4,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25481DE4:  2F040026  cmpi cr6,r4,38
+	   4: GETL       	R4, t4
+	   5: MOVL       	$0x26, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25481DE8:  40BA0038  bc 5,26,0x25481E20
+	   9: Jc26o       	$0x25481E20
+
+
+
+. 685 25481DE0 12
+. 80 9D 00 00 2F 04 00 26 40 BA 00 38
+==== BB 686 (0x25481E20) approx BBs exec'd 0 ====
+
+	0x25481E20:  80A10084  lwz r5,132(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25481E24:  83210064  lwz r25,100(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x64, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x25481E28:  83410068  lwz r26,104(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x68, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25481E2C:  7CA803A6  mtlr r5
+	  15: GETL       	R5, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x25481E30:  8361006C  lwz r27,108(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x6C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0x25481E34:  83810070  lwz r28,112(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x70, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0x25481E38:  83A10074  lwz r29,116(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x74, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R29
+	  32: INCEIPL       	$4
+
+	0x25481E3C:  83C10078  lwz r30,120(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x78, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R30
+	  37: INCEIPL       	$4
+
+	0x25481E40:  83E1007C  lwz r31,124(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x7C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0x25481E44:  38210080  addi r1,r1,128
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x80, t34
+	  45: PUTL       	t34, R1
+	  46: INCEIPL       	$4
+
+	0x25481E48:  4E800020  blr
+	  47: GETL       	LR, t36
+	  48: JMPo-r       	t36  ($4)
+
+
+
+. 686 25481E20 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+==== BB 687 (0x254769CC) approx BBs exec'd 0 ====
+
+	0x254769CC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x254769D0:  409E0014  bc 4,30,0x254769E4
+	   4: Jc30o       	$0x254769E4
+
+
+
+. 687 254769CC 8
+. 2F 83 00 00 40 9E 00 14
+==== BB 688 (0x254769E4) approx BBs exec'd 0 ====
+
+	0x254769E4:  38800001  li r4,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254769E8:  909D0014  stw r4,20(r29)
+	   3: GETL       	R4, t2
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x14, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254769EC:  4BFFFF54  b 0x25476940
+	   8: JMPo       	$0x25476940  ($4)
+
+
+
+. 688 254769E4 12
+. 38 80 00 01 90 9D 00 14 4B FF FF 54
+==== BB 689 (0x25476940) approx BBs exec'd 0 ====
+
+	0x25476940:  817D0014  lwz r11,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25476944:  696C0002  xori r12,r11,0x2
+	   5: GETL       	R11, t4
+	   6: XORL       	$0x2, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25476948:  212C0000  subfic r9,r12,0
+	   9: GETL       	R12, t6
+	  10: MOVL       	$0x0, t8
+	  11: SBBL       	t6, t8  (-wCa)
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0x2547694C:  7D896114  adde r12,r9,r12
+	  14: GETL       	R9, t10
+	  15: GETL       	R12, t12
+	  16: ADCL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0x25476950:  7EF76378  or r23,r23,r12
+	  19: GETL       	R23, t14
+	  20: GETL       	R12, t16
+	  21: ORL       	t16, t14
+	  22: PUTL       	t14, R23
+	  23: INCEIPL       	$4
+
+	0x25476954:  41B2FF64  bc 13,18,0x254768B8
+	  24: Js18o       	$0x254768B8
+
+
+
+. 689 25476940 24
+. 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+==== BB 690 (0x254768B8) approx BBs exec'd 0 ====
+
+	0x254768B8:  3B9C0001  addi r28,r28,1
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x254768BC:  3BBD0004  addi r29,r29,4
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x4, t2
+	   6: PUTL       	t2, R29
+	   7: INCEIPL       	$4
+
+	0x254768C0:  40920174  bc 4,18,0x25476A34
+	   8: Jc18o       	$0x25476A34
+
+
+
+. 690 254768B8 12
+. 3B 9C 00 01 3B BD 00 04 40 92 01 74
+==== BB 691 (0x254768C4) approx BBs exec'd 0 ====
+
+	0x254768C4:  81390000  lwz r9,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x254768C8:  7C9C4840  cmpl cr1,r28,r9
+	   4: GETL       	R28, t4
+	   5: GETL       	R9, t6
+	   6: CMPUL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254768CC:  40840124  bc 4,4,0x254769F0
+	   9: Jc04o       	$0x254769F0
+
+
+
+. 691 254768C4 12
+. 81 39 00 00 7C 9C 48 40 40 84 01 24
+==== BB 692 (0x254769D4) approx BBs exec'd 0 ====
+
+	0x254769D4:  811F0020  lwz r8,32(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254769D8:  55000426  rlwinm r0,r8,0,16,19
+	   5: GETL       	R8, t4
+	   6: ANDL       	$0xF000, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254769DC:  2C804000  cmpi cr1,r0,16384
+	   9: GETL       	R0, t6
+	  10: MOVL       	$0x4000, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x254769E0:  41A6FF5C  bc 13,6,0x2547693C
+	  14: Js06o       	$0x2547693C
+
+
+
+. 692 254769D4 16
+. 81 1F 00 20 55 00 04 26 2C 80 40 00 41 A6 FF 5C
+==== BB 693 (0x2547693C) approx BBs exec'd 0 ====
+
+	0x2547693C:  929D0014  stw r20,20(r29)
+	   0: GETL       	R20, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476940:  817D0014  lwz r11,20(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25476944:  696C0002  xori r12,r11,0x2
+	  10: GETL       	R11, t8
+	  11: XORL       	$0x2, t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x25476948:  212C0000  subfic r9,r12,0
+	  14: GETL       	R12, t10
+	  15: MOVL       	$0x0, t12
+	  16: SBBL       	t10, t12  (-wCa)
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0x2547694C:  7D896114  adde r12,r9,r12
+	  19: GETL       	R9, t14
+	  20: GETL       	R12, t16
+	  21: ADCL       	t14, t16  (-rCa-wCa)
+	  22: PUTL       	t16, R12
+	  23: INCEIPL       	$4
+
+	0x25476950:  7EF76378  or r23,r23,r12
+	  24: GETL       	R23, t18
+	  25: GETL       	R12, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R23
+	  28: INCEIPL       	$4
+
+	0x25476954:  41B2FF64  bc 13,18,0x254768B8
+	  29: Js18o       	$0x254768B8
+
+
+
+. 693 2547693C 28
+. 92 9D 00 14 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+==== BB 694 (0x254769F0) approx BBs exec'd 0 ====
+
+	0x254769F0:  40920044  bc 4,18,0x25476A34
+	   0: Jc18o       	$0x25476A34
+
+
+
+. 694 254769F0 4
+. 40 92 00 44
+==== BB 695 (0x254769F4) approx BBs exec'd 0 ====
+
+	0x254769F4:  2F170000  cmpi cr6,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254769F8:  419A001C  bc 12,26,0x25476A14
+	   4: Js26o       	$0x25476A14
+
+
+
+. 695 254769F4 8
+. 2F 17 00 00 41 9A 00 1C
+==== BB 696 (0x254769FC) approx BBs exec'd 0 ====
+
+	0x254769FC:  83BE0514  lwz r29,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25476A00:  813D0000  lwz r9,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x25476A04:  2F890002  cmpi cr7,r9,2
+	   9: GETL       	R9, t8
+	  10: MOVL       	$0x2, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25476A08:  419E000C  bc 12,30,0x25476A14
+	  14: Js30o       	$0x25476A14
+
+
+
+. 696 254769FC 16
+. 83 BE 05 14 81 3D 00 00 2F 89 00 02 41 9E 00 0C
+==== BB 697 (0x25476A14) approx BBs exec'd 0 ====
+
+	0x25476A14:  87B50004  lwzu r29,4(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R21
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x25476A18:  7DEFBB78  or r15,r15,r23
+	   6: GETL       	R15, t4
+	   7: GETL       	R23, t6
+	   8: ORL       	t6, t4
+	   9: PUTL       	t4, R15
+	  10: INCEIPL       	$4
+
+	0x25476A1C:  2C9D0000  cmpi cr1,r29,0
+	  11: GETL       	R29, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x25476A20:  4086FE48  bc 4,6,0x25476868
+	  15: Jc06o       	$0x25476868
+
+
+
+. 697 25476A14 16
+. 87 B5 00 04 7D EF BB 78 2C 9D 00 00 40 86 FE 48
+==== BB 698 (0x25476A24) approx BBs exec'd 0 ====
+
+	0x25476A24:  2E0F0000  cmpi cr4,r15,0
+	   0: GETL       	R15, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25476A28:  4192026C  bc 12,18,0x25476C94
+	   4: Js18o       	$0x25476C94
+
+
+
+. 698 25476A24 8
+. 2E 0F 00 00 41 92 02 6C
+==== BB 699 (0x25476A2C) approx BBs exec'd 0 ====
+
+	0x25476A2C:  3860FFFF  li r3,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476A30:  4800002C  b 0x25476A5C
+	   3: JMPo       	$0x25476A5C  ($4)
+
+
+
+. 699 25476A2C 8
+. 38 60 FF FF 48 00 00 2C
+==== BB 700 (0x25476A5C) approx BBs exec'd 0 ====
+
+	0x25476A5C:  81010000  lwz r8,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x25476A60:  81E80004  lwz r15,4(r8)
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R15
+	   8: INCEIPL       	$4
+
+	0x25476A64:  8088FFB4  lwz r4,-76(r8)
+	   9: GETL       	R8, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0x25476A68:  7DE803A6  mtlr r15
+	  14: GETL       	R15, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x25476A6C:  81C8FFB8  lwz r14,-72(r8)
+	  17: GETL       	R8, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0x25476A70:  81E8FFBC  lwz r15,-68(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0x25476A74:  7C808120  mtcrf 0x8,r4
+	  27: GETL       	R4, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x25476A78:  8208FFC0  lwz r16,-64(r8)
+	  30: GETL       	R8, t24
+	  31: ADDL       	$0xFFFFFFC0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R16
+	  34: INCEIPL       	$4
+
+	0x25476A7C:  8228FFC4  lwz r17,-60(r8)
+	  35: GETL       	R8, t28
+	  36: ADDL       	$0xFFFFFFC4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R17
+	  39: INCEIPL       	$4
+
+	0x25476A80:  8248FFC8  lwz r18,-56(r8)
+	  40: GETL       	R8, t32
+	  41: ADDL       	$0xFFFFFFC8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R18
+	  44: INCEIPL       	$4
+
+	0x25476A84:  8268FFCC  lwz r19,-52(r8)
+	  45: GETL       	R8, t36
+	  46: ADDL       	$0xFFFFFFCC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R19
+	  49: INCEIPL       	$4
+
+	0x25476A88:  8288FFD0  lwz r20,-48(r8)
+	  50: GETL       	R8, t40
+	  51: ADDL       	$0xFFFFFFD0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R20
+	  54: INCEIPL       	$4
+
+	0x25476A8C:  82A8FFD4  lwz r21,-44(r8)
+	  55: GETL       	R8, t44
+	  56: ADDL       	$0xFFFFFFD4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R21
+	  59: INCEIPL       	$4
+
+	0x25476A90:  82C8FFD8  lwz r22,-40(r8)
+	  60: GETL       	R8, t48
+	  61: ADDL       	$0xFFFFFFD8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R22
+	  64: INCEIPL       	$4
+
+	0x25476A94:  82E8FFDC  lwz r23,-36(r8)
+	  65: GETL       	R8, t52
+	  66: ADDL       	$0xFFFFFFDC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R23
+	  69: INCEIPL       	$4
+
+	0x25476A98:  8308FFE0  lwz r24,-32(r8)
+	  70: GETL       	R8, t56
+	  71: ADDL       	$0xFFFFFFE0, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R24
+	  74: INCEIPL       	$4
+
+	0x25476A9C:  8328FFE4  lwz r25,-28(r8)
+	  75: GETL       	R8, t60
+	  76: ADDL       	$0xFFFFFFE4, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R25
+	  79: INCEIPL       	$4
+
+	0x25476AA0:  8348FFE8  lwz r26,-24(r8)
+	  80: GETL       	R8, t64
+	  81: ADDL       	$0xFFFFFFE8, t64
+	  82: LDL       	(t64), t66
+	  83: PUTL       	t66, R26
+	  84: INCEIPL       	$4
+
+	0x25476AA4:  8368FFEC  lwz r27,-20(r8)
+	  85: GETL       	R8, t68
+	  86: ADDL       	$0xFFFFFFEC, t68
+	  87: LDL       	(t68), t70
+	  88: PUTL       	t70, R27
+	  89: INCEIPL       	$4
+
+	0x25476AA8:  8388FFF0  lwz r28,-16(r8)
+	  90: GETL       	R8, t72
+	  91: ADDL       	$0xFFFFFFF0, t72
+	  92: LDL       	(t72), t74
+	  93: PUTL       	t74, R28
+	  94: INCEIPL       	$4
+
+	0x25476AAC:  83A8FFF4  lwz r29,-12(r8)
+	  95: GETL       	R8, t76
+	  96: ADDL       	$0xFFFFFFF4, t76
+	  97: LDL       	(t76), t78
+	  98: PUTL       	t78, R29
+	  99: INCEIPL       	$4
+
+	0x25476AB0:  83C8FFF8  lwz r30,-8(r8)
+	 100: GETL       	R8, t80
+	 101: ADDL       	$0xFFFFFFF8, t80
+	 102: LDL       	(t80), t82
+	 103: PUTL       	t82, R30
+	 104: INCEIPL       	$4
+
+	0x25476AB4:  83E8FFFC  lwz r31,-4(r8)
+	 105: GETL       	R8, t84
+	 106: ADDL       	$0xFFFFFFFC, t84
+	 107: LDL       	(t84), t86
+	 108: PUTL       	t86, R31
+	 109: INCEIPL       	$4
+
+	0x25476AB8:  7D014378  or r1,r8,r8
+	 110: GETL       	R8, t88
+	 111: PUTL       	t88, R1
+	 112: INCEIPL       	$4
+
+	0x25476ABC:  4E800020  blr
+	 113: GETL       	LR, t90
+	 114: JMPo-r       	t90  ($4)
+
+
+
+. 700 25476A5C 100
+. 81 01 00 00 81 E8 00 04 80 88 FF B4 7D E8 03 A6 81 C8 FF B8 81 E8 FF BC 7C 80 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+==== BB 701 (0x25477238) approx BBs exec'd 0 ====
+
+	0x25477238:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547723C:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R26
+	   7: INCEIPL       	$4
+
+	0x25477240:  7D6E00D0  neg r11,r14
+	   8: GETL       	R14, t8
+	   9: NEGL       	t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25477244:  7CE00026  mfcr r7
+	  12: GETL       	CR, t10
+	  13: PUTL       	t10, R7
+	  14: INCEIPL       	$4
+
+	0x25477248:  54E79FFE  rlwinm r7,r7,19,31,31
+	  15: GETL       	R7, t12
+	  16: ROLL       	$0x13, t12
+	  17: ANDL       	$0x1, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x2547724C:  556E0FFE  rlwinm r14,r11,1,31,31
+	  20: GETL       	R11, t14
+	  21: SHRL       	$0x1F, t14
+	  22: PUTL       	t14, R14
+	  23: INCEIPL       	$4
+
+	0x25477250:  7CEB7039  and. r11,r7,r14
+	  24: GETL       	R7, t16
+	  25: GETL       	R14, t18
+	  26: ANDL       	t16, t18
+	  27: PUTL       	t18, R11
+	  28: CMP0L       	t18, t20  (-rSo)
+	  29: ICRFL       	t20, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0x25477254:  41820044  bc 12,2,0x25477298
+	  31: Js02o       	$0x25477298
+
+
+
+. 701 25477238 32
+. 2E 03 FF FF 7C 7A 1B 78 7D 6E 00 D0 7C E0 00 26 54 E7 9F FE 55 6E 0F FE 7C EB 70 39 41 82 00 44
+==== BB 702 (0x25477258) approx BBs exec'd 0 ====
+
+	0x25477258:  801B01E0  lwz r0,480(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x1E0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547725C:  3BBB01E0  addi r29,r27,480
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x1E0, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x25477260:  39200000  li r9,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x25477264:  2F00FFFF  cmpi cr6,r0,-1
+	  12: GETL       	R0, t8
+	  13: MOVL       	$0xFFFFFFFF, t12
+	  14: CMPL       	t8, t12, t10  (-rSo)
+	  15: ICRFL       	t10, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25477268:  419A0028  bc 12,26,0x25477290
+	  17: Js26o       	$0x25477290
+
+
+
+. 702 25477258 20
+. 80 1B 01 E0 3B BB 01 E0 39 20 00 00 2F 00 FF FF 41 9A 00 28
+==== BB 703 (0x25477290) approx BBs exec'd 0 ====
+
+	0x25477290:  2C090000  cmpi cr0,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25477294:  408200DC  bc 4,2,0x25477370
+	   4: Jc02o       	$0x25477370
+
+
+
+. 703 25477290 8
+. 2C 09 00 00 40 82 00 DC
+==== BB 704 (0x25477298) approx BBs exec'd 0 ====
+
+	0x25477298:  4092008C  bc 4,18,0x25477324
+	   0: Jc18o       	$0x25477324
+
+
+
+. 704 25477298 4
+. 40 92 00 8C
+==== BB 705 (0x2547729C) approx BBs exec'd 0 ====
+
+	0x2547729C:  2F170000  cmpi cr6,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254772A0:  409A01E4  bc 4,26,0x25477484
+	   4: Jc26o       	$0x25477484
+
+
+
+. 705 2547729C 8
+. 2F 17 00 00 40 9A 01 E4
+==== BB 706 (0x254772A4) approx BBs exec'd 0 ====
+
+	0x254772A4:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254772A8:  48000949  bl 0x25477BF0
+	   3: MOVL       	$0x254772AC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25477BF0  ($4)
+
+
+
+. 706 254772A4 8
+. 7F E3 FB 78 48 00 09 49
+==== BB 707 _dl_load_cache_lookup(0x25477BF0) approx BBs exec'd 0 ====
+
+	0x25477BF0:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25477BF4:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25477BF8:  4801F409  bl 0x25497000
+	   9: MOVL       	$0x25477BFC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 707 25477BF0 12
+. 94 21 FF C0 7C 88 02 A6 48 01 F4 09
+==== BB 708 (0x25477BFC) approx BBs exec'd 0 ====
+
+	0x25477BFC:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25477C00:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25477C04:  9261000C  stw r19,12(r1)
+	   8: GETL       	R19, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25477C08:  90810044  stw r4,68(r1)
+	  13: GETL       	R4, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x44, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25477C0C:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0x25477C10:  92E1001C  stw r23,28(r1)
+	  21: GETL       	R23, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25477C14:  7C771B78  or r23,r3,r3
+	  26: GETL       	R3, t20
+	  27: PUTL       	t20, R23
+	  28: INCEIPL       	$4
+
+	0x25477C18:  827E04F4  lwz r19,1268(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4F4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R19
+	  33: INCEIPL       	$4
+
+	0x25477C1C:  92810010  stw r20,16(r1)
+	  34: GETL       	R20, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x10, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x25477C20:  80130000  lwz r0,0(r19)
+	  39: GETL       	R19, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x25477C24:  92A10014  stw r21,20(r1)
+	  43: GETL       	R21, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x14, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25477C28:  70090001  andi. r9,r0,0x1
+	  48: GETL       	R0, t38
+	  49: ANDL       	$0x1, t38
+	  50: PUTL       	t38, R9
+	  51: CMP0L       	t38, t40  (-rSo)
+	  52: ICRFL       	t40, $0x0, CR
+	  53: INCEIPL       	$4
+
+	0x25477C2C:  92C10018  stw r22,24(r1)
+	  54: GETL       	R22, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x18, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x25477C30:  93010020  stw r24,32(r1)
+	  59: GETL       	R24, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x20, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0x25477C34:  93210024  stw r25,36(r1)
+	  64: GETL       	R25, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x24, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0x25477C38:  93410028  stw r26,40(r1)
+	  69: GETL       	R26, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0x28, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0x25477C3C:  9361002C  stw r27,44(r1)
+	  74: GETL       	R27, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x2C, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x25477C40:  93810030  stw r28,48(r1)
+	  79: GETL       	R28, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x30, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25477C44:  93A10034  stw r29,52(r1)
+	  84: GETL       	R29, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x34, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0x25477C48:  93E1003C  stw r31,60(r1)
+	  89: GETL       	R31, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x3C, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0x25477C4C:  91810008  stw r12,8(r1)
+	  94: GETL       	R12, t74
+	  95: GETL       	R1, t76
+	  96: ADDL       	$0x8, t76
+	  97: STL       	t74, (t76)
+	  98: INCEIPL       	$4
+
+	0x25477C50:  4082037C  bc 4,2,0x25477FCC
+	  99: Jc02o       	$0x25477FCC
+
+
+
+. 708 25477BFC 88
+. 93 C1 00 38 7F C8 02 A6 92 61 00 0C 90 81 00 44 7D 80 00 26 92 E1 00 1C 7C 77 1B 78 82 7E 04 F4 92 81 00 10 80 13 00 00 92 A1 00 14 70 09 00 01 92 C1 00 18 93 01 00 20 93 21 00 24 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 93 E1 00 3C 91 81 00 08 40 82 03 7C
+==== BB 709 (0x25477C54) approx BBs exec'd 0 ====
+
+	0x25477C54:  82DE0254  lwz r22,596(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x254, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R22
+	   4: INCEIPL       	$4
+
+	0x25477C58:  80760000  lwz r3,0(r22)
+	   5: GETL       	R22, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0x25477C5C:  2F830000  cmpi cr7,r3,0
+	   9: GETL       	R3, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x25477C60:  419E00FC  bc 12,30,0x25477D5C
+	  13: Js30o       	$0x25477D5C
+
+
+
+. 709 25477C54 16
+. 82 DE 02 54 80 76 00 00 2F 83 00 00 41 9E 00 FC
+==== BB 710 (0x25477D5C) approx BBs exec'd 0 ====
+
+	0x25477D5C:  807E0260  lwz r3,608(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x260, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477D60:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25477D64:  809E025C  lwz r4,604(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x25C, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25477D68:  48004245  bl 0x2547BFAC
+	  13: MOVL       	$0x25477D6C, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0x2547BFAC  ($4)
+
+
+
+. 710 25477D5C 16
+. 80 7E 02 60 38 A0 00 01 80 9E 02 5C 48 00 42 45
+==== BB 711 _dl_sysdep_read_whole_file(0x2547BFAC) approx BBs exec'd 0 ====
+
+	0x2547BFAC:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2547BFB0:  9421FF60  stwu r1,-160(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF60, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547BFB4:  93810090  stw r28,144(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x90, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BFB8:  7C9C2378  or r28,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R28
+	  16: INCEIPL       	$4
+
+	0x2547BFBC:  38800000  li r4,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0x2547BFC0:  9361008C  stw r27,140(r1)
+	  20: GETL       	R27, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x8C, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x2547BFC4:  93A10094  stw r29,148(r1)
+	  25: GETL       	R29, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x94, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x2547BFC8:  7CBD2B78  or r29,r5,r5
+	  30: GETL       	R5, t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0x2547BFCC:  93E1009C  stw r31,156(r1)
+	  33: GETL       	R31, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x9C, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x2547BFD0:  3B60FFFF  li r27,-1
+	  38: MOVL       	$0xFFFFFFFF, t28
+	  39: PUTL       	t28, R27
+	  40: INCEIPL       	$4
+
+	0x2547BFD4:  93C10098  stw r30,152(r1)
+	  41: GETL       	R30, t30
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x98, t32
+	  44: STL       	t30, (t32)
+	  45: INCEIPL       	$4
+
+	0x2547BFD8:  900100A4  stw r0,164(r1)
+	  46: GETL       	R0, t34
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0xA4, t36
+	  49: STL       	t34, (t36)
+	  50: INCEIPL       	$4
+
+	0x2547BFDC:  48006005  bl 0x25481FE0
+	  51: MOVL       	$0x2547BFE0, t38
+	  52: PUTL       	t38, LR
+	  53: JMPo-c       	$0x25481FE0  ($4)
+
+
+
+. 711 2547BFAC 52
+. 7C 08 02 A6 94 21 FF 60 93 81 00 90 7C 9C 23 78 38 80 00 00 93 61 00 8C 93 A1 00 94 7C BD 2B 78 93 E1 00 9C 3B 60 FF FF 93 C1 00 98 90 01 00 A4 48 00 60 05
+==== BB 712 (0x2547BFE0) approx BBs exec'd 0 ====
+
+	0x2547BFE0:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547BFE4:  38A10010  addi r5,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547BFE8:  38600003  li r3,3
+	   9: MOVL       	$0x3, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x2547BFEC:  7FE4FB78  or r4,r31,r31
+	  12: GETL       	R31, t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0x2547BFF0:  41800040  bc 12,0,0x2547C030
+	  15: Js00o       	$0x2547C030
+
+
+
+. 712 2547BFE0 20
+. 7C 7F 1B 79 38 A1 00 10 38 60 00 03 7F E4 FB 78 41 80 00 40
+==== BB 713 (0x2547BFF4) approx BBs exec'd 0 ====
+
+	0x2547BFF4:  48005EB1  bl 0x25481EA4
+	   0: MOVL       	$0x2547BFF8, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25481EA4  ($4)
+
+
+
+. 713 2547BFF4 4
+. 48 00 5E B1
+==== BB 714 (0x2547BFF8) approx BBs exec'd 0 ====
+
+	0x2547BFF8:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547BFFC:  419C002C  bc 12,28,0x2547C028
+	   4: Js28o       	$0x2547C028
+
+
+
+. 714 2547BFF8 8
+. 2F 83 00 00 41 9C 00 2C
+==== BB 715 (0x2547C000) approx BBs exec'd 0 ====
+
+	0x2547C000:  81210044  lwz r9,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547C004:  7FA5EB78  or r5,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547C008:  38C00002  li r6,2
+	   8: MOVL       	$0x2, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x2547C00C:  7FE7FB78  or r7,r31,r31
+	  11: GETL       	R31, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x2547C010:  2C890000  cmpi cr1,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x2547C014:  7D244B78  or r4,r9,r9
+	  18: GETL       	R9, t14
+	  19: PUTL       	t14, R4
+	  20: INCEIPL       	$4
+
+	0x2547C018:  39000000  li r8,0
+	  21: MOVL       	$0x0, t16
+	  22: PUTL       	t16, R8
+	  23: INCEIPL       	$4
+
+	0x2547C01C:  38600000  li r3,0
+	  24: MOVL       	$0x0, t18
+	  25: PUTL       	t18, R3
+	  26: INCEIPL       	$4
+
+	0x2547C020:  913C0000  stw r9,0(r28)
+	  27: GETL       	R9, t20
+	  28: GETL       	R28, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x2547C024:  40860034  bc 4,6,0x2547C058
+	  31: Jc06o       	$0x2547C058
+
+
+
+. 715 2547C000 40
+. 81 21 00 44 7F A5 EB 78 38 C0 00 02 7F E7 FB 78 2C 89 00 00 7D 24 4B 78 39 00 00 00 38 60 00 00 91 3C 00 00 40 86 00 34
+==== BB 716 (0x2547C058) approx BBs exec'd 0 ====
+
+	0x2547C058:  48006741  bl 0x25482798
+	   0: MOVL       	$0x2547C05C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25482798  ($4)
+
+
+
+. 716 2547C058 4
+. 48 00 67 41
+==== BB 717 (0x2547C05C) approx BBs exec'd 0 ====
+
+	0x2547C05C:  7C7B1B78  or r27,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0x2547C060:  4BFFFFC8  b 0x2547C028
+	   3: JMPo       	$0x2547C028  ($4)
+
+
+
+. 717 2547C05C 8
+. 7C 7B 1B 78 4B FF FF C8
+==== BB 718 (0x2547C028) approx BBs exec'd 0 ====
+
+	0x2547C028:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547C02C:  48005FC5  bl 0x25481FF0
+	   3: MOVL       	$0x2547C030, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25481FF0  ($4)
+
+
+
+. 718 2547C028 8
+. 7F E3 FB 78 48 00 5F C5
+==== BB 719 (0x2547C030) approx BBs exec'd 0 ====
+
+	0x2547C030:  808100A4  lwz r4,164(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xA4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547C034:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547C038:  83810090  lwz r28,144(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x90, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x2547C03C:  8361008C  lwz r27,140(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x2547C040:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547C044:  83A10094  lwz r29,148(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x94, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x2547C048:  83C10098  lwz r30,152(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x98, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x2547C04C:  83E1009C  lwz r31,156(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x9C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x2547C050:  382100A0  addi r1,r1,160
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0xA0, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x2547C054:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+
+. 719 2547C030 40
+. 80 81 00 A4 7F 63 DB 78 83 81 00 90 83 61 00 8C 7C 88 03 A6 83 A1 00 94 83 C1 00 98 83 E1 00 9C 38 21 00 A0 4E 80 00 20
+==== BB 720 (0x25477D6C) approx BBs exec'd 0 ====
+
+	0x25477D6C:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x25477D70:  7C7F1B78  or r31,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R31
+	   7: INCEIPL       	$4
+
+	0x25477D74:  41920154  bc 12,18,0x25477EC8
+	   8: Js18o       	$0x25477EC8
+
+
+
+. 720 25477D6C 12
+. 2E 03 FF FF 7C 7F 1B 78 41 92 01 54
+==== BB 721 (0x25477D78) approx BBs exec'd 0 ====
+
+	0x25477D78:  813E025C  lwz r9,604(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x25C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25477D7C:  83A90000  lwz r29,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0x25477D80:  289D0010  cmpli cr1,r29,16
+	   9: GETL       	R29, t8
+	  10: MOVL       	$0x10, t12
+	  11: CMPUL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25477D84:  408500E8  bc 4,5,0x25477E6C
+	  14: Jc05o       	$0x25477E6C
+
+
+
+. 721 25477D78 16
+. 81 3E 02 5C 83 A9 00 00 28 9D 00 10 40 85 00 E8
+==== BB 722 (0x25477D88) approx BBs exec'd 0 ====
+
+	0x25477D88:  809E0264  lwz r4,612(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x264, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25477D8C:  38A0000B  li r5,11
+	   5: MOVL       	$0xB, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25477D90:  4800B509  bl 0x25483298
+	   8: MOVL       	$0x25477D94, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25483298  ($4)
+
+
+
+. 722 25477D88 12
+. 80 9E 02 64 38 A0 00 0B 48 00 B5 09
+==== BB 723 (0x25477D94) approx BBs exec'd 0 ====
+
+	0x25477D94:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25477D98:  409A00D4  bc 4,26,0x25477E6C
+	   4: Jc26o       	$0x25477E6C
+
+
+
+. 723 25477D94 8
+. 2F 03 00 00 40 9A 00 D4
+==== BB 724 (0x25477D9C) approx BBs exec'd 0 ====
+
+	0x25477D9C:  815F000C  lwz r10,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25477DA0:  93F60000  stw r31,0(r22)
+	   5: GETL       	R31, t4
+	   6: GETL       	R22, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25477DA4:  1D0A000C  mulli r8,r10,12
+	   9: GETL       	R10, t8
+	  10: MULL       	$0xC, t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0x25477DA8:  839E0258  lwz r28,600(r30)
+	  13: GETL       	R30, t10
+	  14: ADDL       	$0x258, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x25477DAC:  38E80017  addi r7,r8,23
+	  18: GETL       	R8, t14
+	  19: ADDL       	$0x17, t14
+	  20: PUTL       	t14, R7
+	  21: INCEIPL       	$4
+
+	0x25477DB0:  54E50038  rlwinm r5,r7,0,0,28
+	  22: GETL       	R7, t16
+	  23: ANDL       	$0xFFFFFFF8, t16
+	  24: PUTL       	t16, R5
+	  25: INCEIPL       	$4
+
+	0x25477DB4:  38C50030  addi r6,r5,48
+	  26: GETL       	R5, t18
+	  27: ADDL       	$0x30, t18
+	  28: PUTL       	t18, R6
+	  29: INCEIPL       	$4
+
+	0x25477DB8:  7C7F2A14  add r3,r31,r5
+	  30: GETL       	R31, t20
+	  31: GETL       	R5, t22
+	  32: ADDL       	t20, t22
+	  33: PUTL       	t22, R3
+	  34: INCEIPL       	$4
+
+	0x25477DBC:  7E06E840  cmpl cr4,r6,r29
+	  35: GETL       	R6, t24
+	  36: GETL       	R29, t26
+	  37: CMPUL       	t24, t26, t28  (-rSo)
+	  38: ICRFL       	t28, $0x4, CR
+	  39: INCEIPL       	$4
+
+	0x25477DC0:  4191001C  bc 12,17,0x25477DDC
+	  40: Js17o       	$0x25477DDC
+
+
+
+. 724 25477D9C 40
+. 81 5F 00 0C 93 F6 00 00 1D 0A 00 0C 83 9E 02 58 38 E8 00 17 54 E5 00 38 38 C5 00 30 7C 7F 2A 14 7E 06 E8 40 41 91 00 1C
+==== BB 725 (0x25477DC4) approx BBs exec'd 0 ====
+
+	0x25477DC4:  809E0268  lwz r4,616(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x268, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25477DC8:  38A00014  li r5,20
+	   5: MOVL       	$0x14, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25477DCC:  907C0000  stw r3,0(r28)
+	   8: GETL       	R3, t6
+	   9: GETL       	R28, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25477DD0:  4800B4C9  bl 0x25483298
+	  12: MOVL       	$0x25477DD4, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0x25483298  ($4)
+
+
+
+. 725 25477DC4 16
+. 80 9E 02 68 38 A0 00 14 90 7C 00 00 48 00 B4 C9
+==== BB 726 (0x254832B8) approx BBs exec'd 0 ====
+
+	0x254832B8:  548007BE  rlwinm r0,r4,0,30,31
+	   0: GETL       	R4, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x254832BC:  2C000000  cmpi cr0,r0,0
+	   4: GETL       	R0, t2
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x254832C0:  4182002C  bc 12,2,0x254832EC
+	   8: Js02o       	$0x254832EC
+
+
+
+. 726 254832B8 12
+. 54 80 07 BE 2C 00 00 00 41 82 00 2C
+==== BB 727 (0x254832EC) approx BBs exec'd 0 ====
+
+	0x254832EC:  556907BE  rlwinm r9,r11,0,30,31
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x254832F0:  2F090000  cmpi cr6,r9,0
+	   4: GETL       	R9, t2
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x254832F4:  409A00C0  bc 4,26,0x254833B4
+	   8: Jc26o       	$0x254833B4
+
+
+
+. 727 254832EC 12
+. 55 69 07 BE 2F 09 00 00 40 9A 00 C0
+==== BB 728 (0x254832F8) approx BBs exec'd 0 ====
+
+	0x254832F8:  54A8F0BE  rlwinm r8,r5,30,2,31
+	   0: GETL       	R5, t0
+	   1: SHRL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x254832FC:  7D6A5B78  or r10,r11,r11
+	   4: GETL       	R11, t2
+	   5: PUTL       	t2, R10
+	   6: INCEIPL       	$4
+
+	0x25483300:  550007BE  rlwinm r0,r8,0,30,31
+	   7: GETL       	R8, t4
+	   8: ANDL       	$0x3, t4
+	   9: PUTL       	t4, R0
+	  10: INCEIPL       	$4
+
+	0x25483304:  7C892378  or r9,r4,r4
+	  11: GETL       	R4, t6
+	  12: PUTL       	t6, R9
+	  13: INCEIPL       	$4
+
+	0x25483308:  2F800001  cmpi cr7,r0,1
+	  14: GETL       	R0, t8
+	  15: MOVL       	$0x1, t12
+	  16: CMPL       	t8, t12, t10  (-rSo)
+	  17: ICRFL       	t10, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2548330C:  419E0150  bc 12,30,0x2548345C
+	  19: Js30o       	$0x2548345C
+
+
+
+. 728 254832F8 24
+. 54 A8 F0 BE 7D 6A 5B 78 55 00 07 BE 7C 89 23 78 2F 80 00 01 41 9E 01 50
+==== BB 729 (0x2548345C) approx BBs exec'd 0 ====
+
+	0x2548345C:  806B0000  lwz r3,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25483460:  3908FFFF  addi r8,r8,-1
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x25483464:  80C40000  lwz r6,0(r4)
+	   8: GETL       	R4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R6
+	  11: INCEIPL       	$4
+
+	0x25483468:  394B0004  addi r10,r11,4
+	  12: GETL       	R11, t10
+	  13: ADDL       	$0x4, t10
+	  14: PUTL       	t10, R10
+	  15: INCEIPL       	$4
+
+	0x2548346C:  39240004  addi r9,r4,4
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0x4, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x25483470:  7F033000  cmp cr6,r3,r6
+	  20: GETL       	R3, t14
+	  21: GETL       	R6, t16
+	  22: CMPL       	t14, t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x6, CR
+	  24: INCEIPL       	$4
+
+	0x25483474:  80EA0000  lwz r7,0(r10)
+	  25: GETL       	R10, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R7
+	  28: INCEIPL       	$4
+
+	0x25483478:  80090000  lwz r0,0(r9)
+	  29: GETL       	R9, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R0
+	  32: INCEIPL       	$4
+
+	0x2548347C:  419A00C8  bc 12,26,0x25483544
+	  33: Js26o       	$0x25483544
+
+
+
+. 729 2548345C 36
+. 80 6B 00 00 39 08 FF FF 80 C4 00 00 39 4B 00 04 39 24 00 04 7F 03 30 00 80 EA 00 00 80 09 00 00 41 9A 00 C8
+==== BB 730 (0x25483544) approx BBs exec'd 0 ====
+
+	0x25483544:  7F870000  cmp cr7,r7,r0
+	   0: GETL       	R7, t0
+	   1: GETL       	R0, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25483548:  806A0004  lwz r3,4(r10)
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0x4, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R3
+	   9: INCEIPL       	$4
+
+	0x2548354C:  80C90004  lwz r6,4(r9)
+	  10: GETL       	R9, t10
+	  11: ADDL       	$0x4, t10
+	  12: LDL       	(t10), t12
+	  13: PUTL       	t12, R6
+	  14: INCEIPL       	$4
+
+	0x25483550:  409EFDF4  bc 4,30,0x25483344
+	  15: Jc30o       	$0x25483344
+
+
+
+. 730 25483544 16
+. 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+==== BB 731 (0x25483554) approx BBs exec'd 0 ====
+
+	0x25483554:  7C033000  cmp cr0,r3,r6
+	   0: GETL       	R3, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25483558:  80EA0008  lwz r7,8(r10)
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0x8, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R7
+	   9: INCEIPL       	$4
+
+	0x2548355C:  80090008  lwz r0,8(r9)
+	  10: GETL       	R9, t10
+	  11: ADDL       	$0x8, t10
+	  12: LDL       	(t10), t12
+	  13: PUTL       	t12, R0
+	  14: INCEIPL       	$4
+
+	0x25483560:  41A2FDD4  bc 13,2,0x25483334
+	  15: Js02o       	$0x25483334
+
+
+
+. 731 25483554 16
+. 7C 03 30 00 80 EA 00 08 80 09 00 08 41 A2 FD D4
+==== BB 732 (0x25483334) approx BBs exec'd 0 ====
+
+	0x25483334:  7C870000  cmp cr1,r7,r0
+	   0: GETL       	R7, t0
+	   1: GETL       	R0, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25483338:  806A000C  lwz r3,12(r10)
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0xC, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R3
+	   9: INCEIPL       	$4
+
+	0x2548333C:  80C9000C  lwz r6,12(r9)
+	  10: GETL       	R9, t10
+	  11: ADDL       	$0xC, t10
+	  12: LDL       	(t10), t12
+	  13: PUTL       	t12, R6
+	  14: INCEIPL       	$4
+
+	0x25483340:  41860148  bc 12,6,0x25483488
+	  15: Js06o       	$0x25483488
+
+
+
+. 732 25483334 16
+. 7C 87 00 00 80 6A 00 0C 80 C9 00 0C 41 86 01 48
+==== BB 733 (0x25483488) approx BBs exec'd 0 ====
+
+	0x25483488:  3908FFFC  addi r8,r8,-4
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2548348C:  394A0010  addi r10,r10,16
+	   4: GETL       	R10, t2
+	   5: ADDL       	$0x10, t2
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x25483490:  2F080000  cmpi cr6,r8,0
+	   8: GETL       	R8, t4
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25483494:  39290010  addi r9,r9,16
+	  12: GETL       	R9, t8
+	  13: ADDL       	$0x10, t8
+	  14: PUTL       	t8, R9
+	  15: INCEIPL       	$4
+
+	0x25483498:  40BAFFD8  bc 5,26,0x25483470
+	  16: Jc26o       	$0x25483470
+
+
+
+. 733 25483488 20
+. 39 08 FF FC 39 4A 00 10 2F 08 00 00 39 29 00 10 40 BA FF D8
+==== BB 734 (0x2548349C) approx BBs exec'd 0 ====
+
+	0x2548349C:  7F833000  cmp cr7,r3,r6
+	   0: GETL       	R3, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254834A0:  7C033010  subfc r0,r3,r6
+	   5: GETL       	R3, t6
+	   6: GETL       	R6, t8
+	   7: SBBL       	t6, t8  (-wCa)
+	   8: PUTL       	t8, R0
+	   9: INCEIPL       	$4
+
+	0x254834A4:  7C000110  subfe r0,r0,r0
+	  10: GETL       	R0, t10
+	  11: GETL       	R0, t12
+	  12: SBBL       	t10, t12  (-rCa-wCa)
+	  13: PUTL       	t12, R0
+	  14: INCEIPL       	$4
+
+	0x254834A8:  7C0003B8  nand r0,r0,r0
+	  15: GETL       	R0, t14
+	  16: GETL       	R0, t16
+	  17: ANDL       	t14, t16
+	  18: NOTL       	t16
+	  19: PUTL       	t16, R0
+	  20: INCEIPL       	$4
+
+	0x254834AC:  60000001  ori r0,r0,0x1
+	  21: GETL       	R0, t18
+	  22: ORL       	$0x1, t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0x254834B0:  409EFEA4  bc 4,30,0x25483354
+	  25: Jc30o       	$0x25483354
+
+
+
+. 734 2548349C 24
+. 7F 83 30 00 7C 03 30 10 7C 00 01 10 7C 00 03 B8 60 00 00 01 40 9E FE A4
+==== BB 735 (0x254834B4) approx BBs exec'd 0 ====
+
+	0x254834B4:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254834B8:  4BFFFE9C  b 0x25483354
+	   3: JMPo       	$0x25483354  ($4)
+
+
+
+. 735 254834B4 8
+. 38 00 00 00 4B FF FE 9C
+==== BB 736 (0x25483354) approx BBs exec'd 0 ====
+
+	0x25483354:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25483358:  7C030378  or r3,r0,r0
+	   4: GETL       	R0, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2548335C:  409E0040  bc 4,30,0x2548339C
+	   7: Jc30o       	$0x2548339C
+
+
+
+. 736 25483354 12
+. 2F 80 00 00 7C 03 03 78 40 9E 00 40
+==== BB 737 (0x25483360) approx BBs exec'd 0 ====
+
+	0x25483360:  54BC003A  rlwinm r28,r5,0,0,29
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x25483364:  54A507BE  rlwinm r5,r5,0,30,31
+	   4: GETL       	R5, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x25483368:  7C84E214  add r4,r4,r28
+	   8: GETL       	R4, t4
+	   9: GETL       	R28, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0x2548336C:  7D6BE214  add r11,r11,r28
+	  13: GETL       	R11, t8
+	  14: GETL       	R28, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R11
+	  17: INCEIPL       	$4
+
+	0x25483370:  2C050000  cmpi cr0,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0x25483374:  41820024  bc 12,2,0x25483398
+	  22: Js02o       	$0x25483398
+
+
+
+. 737 25483360 24
+. 54 BC 00 3A 54 A5 07 BE 7C 84 E2 14 7D 6B E2 14 2C 05 00 00 41 82 00 24
+==== BB 738 (0x25477DD4) approx BBs exec'd 0 ====
+
+	0x25477DD4:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25477DD8:  41A2FE8C  bc 13,2,0x25477C64
+	   4: Js02o       	$0x25477C64
+
+
+
+. 738 25477DD4 8
+. 2C 03 00 00 41 A2 FE 8C
+==== BB 739 (0x25477C64) approx BBs exec'd 0 ====
+
+	0x25477C64:  81560000  lwz r10,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x25477C68:  38600000  li r3,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25477C6C:  2F0AFFFF  cmpi cr6,r10,-1
+	   7: GETL       	R10, t6
+	   8: MOVL       	$0xFFFFFFFF, t10
+	   9: CMPL       	t6, t10, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25477C70:  419A0094  bc 12,26,0x25477D04
+	  12: Js26o       	$0x25477D04
+
+
+
+. 739 25477C64 16
+. 81 56 00 00 38 60 00 00 2F 0A FF FF 41 9A 00 94
+==== BB 740 (0x25477C74) approx BBs exec'd 0 ====
+
+	0x25477C74:  831E0258  lwz r24,600(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x258, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x25477C78:  3AA00000  li r21,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R21
+	   7: INCEIPL       	$4
+
+	0x25477C7C:  83780000  lwz r27,0(r24)
+	   8: GETL       	R24, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0x25477C80:  2E1BFFFF  cmpi cr4,r27,-1
+	  12: GETL       	R27, t10
+	  13: MOVL       	$0xFFFFFFFF, t14
+	  14: CMPL       	t10, t14, t12  (-rSo)
+	  15: ICRFL       	t12, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x25477C84:  41920164  bc 12,18,0x25477DE8
+	  17: Js18o       	$0x25477DE8
+
+
+
+. 740 25477C74 20
+. 83 1E 02 58 3A A0 00 00 83 78 00 00 2E 1B FF FF 41 92 01 64
+==== BB 741 (0x25477C88) approx BBs exec'd 0 ====
+
+	0x25477C88:  83FB0014  lwz r31,20(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25477C8C:  839E025C  lwz r28,604(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x25C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25477C90:  3B5FFFFF  addi r26,r31,-1
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R26
+	  13: INCEIPL       	$4
+
+	0x25477C94:  7F95D000  cmp cr7,r21,r26
+	  14: GETL       	R21, t10
+	  15: GETL       	R26, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25477C98:  83BC0000  lwz r29,0(r28)
+	  19: GETL       	R28, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R29
+	  22: INCEIPL       	$4
+
+	0x25477C9C:  3B800000  li r28,0
+	  23: MOVL       	$0x0, t20
+	  24: PUTL       	t20, R28
+	  25: INCEIPL       	$4
+
+	0x25477CA0:  7ECAEA14  add r22,r10,r29
+	  26: GETL       	R10, t22
+	  27: GETL       	R29, t24
+	  28: ADDL       	t22, t24
+	  29: PUTL       	t24, R22
+	  30: INCEIPL       	$4
+
+	0x25477CA4:  7F3BB050  subf r25,r27,r22
+	  31: GETL       	R27, t26
+	  32: GETL       	R22, t28
+	  33: SUBL       	t26, t28
+	  34: PUTL       	t28, R25
+	  35: INCEIPL       	$4
+
+	0x25477CA8:  419D004C  bc 12,29,0x25477CF4
+	  36: Js29o       	$0x25477CF4
+
+
+
+. 741 25477C88 36
+. 83 FB 00 14 83 9E 02 5C 3B 5F FF FF 7F 95 D0 00 83 BC 00 00 3B 80 00 00 7E CA EA 14 7F 3B B0 50 41 9D 00 4C
+==== BB 742 (0x25477CAC) approx BBs exec'd 0 ====
+
+	0x25477CAC:  7C7CD214  add r3,r28,r26
+	   0: GETL       	R28, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477CB0:  80980000  lwz r4,0(r24)
+	   5: GETL       	R24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25477CB4:  7C7D0E70  srawi r29,r3,1
+	   9: GETL       	R3, t8
+	  10: SARL       	$0x1, t8  (-wCa)
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x25477CB8:  7FBD0194  addze r29,r29
+	  13: GETL       	R29, t10
+	  14: ADCL       	$0x0, t10  (-rCa-wCa)
+	  15: PUTL       	t10, R29
+	  16: INCEIPL       	$4
+
+	0x25477CBC:  7EE3BB78  or r3,r23,r23
+	  17: GETL       	R23, t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x25477CC0:  1FFD0018  mulli r31,r29,24
+	  20: GETL       	R29, t14
+	  21: MULL       	$0x18, t14
+	  22: PUTL       	t14, R31
+	  23: INCEIPL       	$4
+
+	0x25477CC4:  7D9F2214  add r12,r31,r4
+	  24: GETL       	R31, t16
+	  25: GETL       	R4, t18
+	  26: ADDL       	t16, t18
+	  27: PUTL       	t18, R12
+	  28: INCEIPL       	$4
+
+	0x25477CC8:  800C0034  lwz r0,52(r12)
+	  29: GETL       	R12, t20
+	  30: ADDL       	$0x34, t20
+	  31: LDL       	(t20), t22
+	  32: PUTL       	t22, R0
+	  33: INCEIPL       	$4
+
+	0x25477CCC:  7C00C840  cmpl cr0,r0,r25
+	  34: GETL       	R0, t24
+	  35: GETL       	R25, t26
+	  36: CMPUL       	t24, t26, t28  (-rSo)
+	  37: ICRFL       	t28, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0x25477CD0:  7C9B0214  add r4,r27,r0
+	  39: GETL       	R27, t30
+	  40: GETL       	R0, t32
+	  41: ADDL       	t30, t32
+	  42: PUTL       	t32, R4
+	  43: INCEIPL       	$4
+
+	0x25477CD4:  40800020  bc 4,0,0x25477CF4
+	  44: Jc00o       	$0x25477CF4
+
+
+
+. 742 25477CAC 44
+. 7C 7C D2 14 80 98 00 00 7C 7D 0E 70 7F BD 01 94 7E E3 BB 78 1F FD 00 18 7D 9F 22 14 80 0C 00 34 7C 00 C8 40 7C 9B 02 14 40 80 00 20
+==== BB 743 (0x25477CD8) approx BBs exec'd 0 ====
+
+	0x25477CD8:  4BFFFDFD  bl 0x25477AD4
+	   0: MOVL       	$0x25477CDC, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25477AD4  ($4)
+
+
+
+. 743 25477CD8 4
+. 4B FF FD FD
+==== BB 744 _dl_cache_libcmp(0x25477AD4) approx BBs exec'd 0 ====
+
+	0x25477AD4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25477AD8:  7C671B78  or r7,r3,r3
+	   6: GETL       	R3, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x25477ADC:  89430000  lbz r10,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDB       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25477AE0:  2F8A0000  cmpi cr7,r10,0
+	  13: GETL       	R10, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x25477AE4:  419E00DC  bc 12,30,0x25477BC0
+	  17: Js30o       	$0x25477BC0
+
+
+
+. 744 25477AD4 20
+. 94 21 FF F0 7C 67 1B 78 89 43 00 00 2F 8A 00 00 41 9E 00 DC
+==== BB 745 (0x25477AE8) approx BBs exec'd 0 ====
+
+	0x25477AE8:  88640000  lbz r3,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25477AEC:  380AFFD0  addi r0,r10,-48
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0xFFFFFFD0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25477AF0:  7D4B5378  or r11,r10,r10
+	   8: GETL       	R10, t6
+	   9: PUTL       	t6, R11
+	  10: INCEIPL       	$4
+
+	0x25477AF4:  28000009  cmpli cr0,r0,9
+	  11: GETL       	R0, t8
+	  12: MOVL       	$0x9, t12
+	  13: CMPUL       	t8, t12, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25477AF8:  4181008C  bc 12,1,0x25477B84
+	  16: Js01o       	$0x25477B84
+
+
+
+. 745 25477AE8 20
+. 88 64 00 00 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+==== BB 746 (0x25477B84) approx BBs exec'd 0 ====
+
+	0x25477B84:  3903FFD0  addi r8,r3,-48
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x25477B88:  7C601B78  or r0,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0x25477B8C:  28080009  cmpli cr0,r8,9
+	   7: GETL       	R8, t4
+	   8: MOVL       	$0x9, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25477B90:  40810048  bc 4,1,0x25477BD8
+	  12: Jc01o       	$0x25477BD8
+
+
+
+. 746 25477B84 16
+. 39 03 FF D0 7C 60 1B 78 28 08 00 09 40 81 00 48
+==== BB 747 (0x25477B94) approx BBs exec'd 0 ====
+
+	0x25477B94:  5543063E  rlwinm r3,r10,0,24,31
+	   0: GETL       	R10, t0
+	   1: ANDL       	$0xFF, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x25477B98:  5400063E  rlwinm r0,r0,0,24,31
+	   4: GETL       	R0, t2
+	   5: ANDL       	$0xFF, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x25477B9C:  7C830000  cmp cr1,r3,r0
+	   8: GETL       	R3, t4
+	   9: GETL       	R0, t6
+	  10: CMPL       	t4, t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0x25477BA0:  40860044  bc 4,6,0x25477BE4
+	  13: Jc06o       	$0x25477BE4
+
+
+
+. 747 25477B94 16
+. 55 43 06 3E 54 00 06 3E 7C 83 00 00 40 86 00 44
+==== BB 748 (0x25477BA4) approx BBs exec'd 0 ====
+
+	0x25477BA4:  8D470001  lbzu r10,1(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R7
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0x25477BA8:  8C640001  lbzu r3,1(r4)
+	   6: GETL       	R4, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R4
+	   9: LDB       	(t4), t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x25477BAC:  2F0A0000  cmpi cr6,r10,0
+	  12: GETL       	R10, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x6, CR
+	  15: INCEIPL       	$4
+
+	0x25477BB0:  409AFF3C  bc 4,26,0x25477AEC
+	  16: Jc26o       	$0x25477AEC
+
+
+
+. 748 25477BA4 16
+. 8D 47 00 01 8C 64 00 01 2F 0A 00 00 40 9A FF 3C
+==== BB 749 (0x25477AEC) approx BBs exec'd 0 ====
+
+	0x25477AEC:  380AFFD0  addi r0,r10,-48
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25477AF0:  7D4B5378  or r11,r10,r10
+	   4: GETL       	R10, t2
+	   5: PUTL       	t2, R11
+	   6: INCEIPL       	$4
+
+	0x25477AF4:  28000009  cmpli cr0,r0,9
+	   7: GETL       	R0, t4
+	   8: MOVL       	$0x9, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25477AF8:  4181008C  bc 12,1,0x25477B84
+	  12: Js01o       	$0x25477B84
+
+
+
+. 749 25477AEC 16
+. 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+==== BB 750 (0x25477BE4) approx BBs exec'd 0 ====
+
+	0x25477BE4:  7C601850  subf r3,r0,r3
+	   0: GETL       	R0, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477BE8:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x25477BEC:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 750 25477BE4 12
+. 7C 60 18 50 38 21 00 10 4E 80 00 20
+==== BB 751 (0x25477CDC) approx BBs exec'd 0 ====
+
+	0x25477CDC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25477CE0:  419E0214  bc 12,30,0x25477EF4
+	   4: Js30o       	$0x25477EF4
+
+
+
+. 751 25477CDC 8
+. 2F 83 00 00 41 9E 02 14
+==== BB 752 (0x25477CE4) approx BBs exec'd 0 ====
+
+	0x25477CE4:  409C006C  bc 4,28,0x25477D50
+	   0: Jc28o       	$0x25477D50
+
+
+
+. 752 25477CE4 4
+. 40 9C 00 6C
+==== BB 753 (0x25477D50) approx BBs exec'd 0 ====
+
+	0x25477D50:  3B5DFFFF  addi r26,r29,-1
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25477D54:  7F9CD000  cmp cr7,r28,r26
+	   4: GETL       	R28, t2
+	   5: GETL       	R26, t4
+	   6: CMPL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25477D58:  4BFFFF98  b 0x25477CF0
+	   9: JMPo       	$0x25477CF0  ($4)
+
+
+
+. 753 25477D50 12
+. 3B 5D FF FF 7F 9C D0 00 4B FF FF 98
+==== BB 754 (0x25477CF0) approx BBs exec'd 0 ====
+
+	0x25477CF0:  409DFFBC  bc 4,29,0x25477CAC
+	   0: Jc29o       	$0x25477CAC
+
+
+
+. 754 25477CF0 4
+. 40 9D FF BC
+==== BB 755 (0x25477CE8) approx BBs exec'd 0 ====
+
+	0x25477CE8:  3B9D0001  addi r28,r29,1
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x25477CEC:  7F9CD000  cmp cr7,r28,r26
+	   4: GETL       	R28, t2
+	   5: GETL       	R26, t4
+	   6: CMPL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25477CF0:  409DFFBC  bc 4,29,0x25477CAC
+	   9: Jc29o       	$0x25477CAC
+
+
+
+. 755 25477CE8 12
+. 3B 9D 00 01 7F 9C D0 00 40 9D FF BC
+==== BB 756 (0x25477AFC) approx BBs exec'd 0 ====
+
+	0x25477AFC:  38A3FFD0  addi r5,r3,-48
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25477B00:  7C691B78  or r9,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R9
+	   6: INCEIPL       	$4
+
+	0x25477B04:  28850009  cmpli cr1,r5,9
+	   7: GETL       	R5, t4
+	   8: MOVL       	$0x9, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25477B08:  418500C4  bc 12,5,0x25477BCC
+	  12: Js05o       	$0x25477BCC
+
+
+
+. 756 25477AFC 16
+. 38 A3 FF D0 7C 69 1B 78 28 85 00 09 41 85 00 C4
+==== BB 757 (0x25477B0C) approx BBs exec'd 0 ====
+
+	0x25477B0C:  8D470001  lbzu r10,1(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R7
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0x25477B10:  556B063E  rlwinm r11,r11,0,24,31
+	   6: GETL       	R11, t4
+	   7: ANDL       	$0xFF, t4
+	   8: PUTL       	t4, R11
+	   9: INCEIPL       	$4
+
+	0x25477B14:  5529063E  rlwinm r9,r9,0,24,31
+	  10: GETL       	R9, t6
+	  11: ANDL       	$0xFF, t6
+	  12: PUTL       	t6, R9
+	  13: INCEIPL       	$4
+
+	0x25477B18:  390BFFD0  addi r8,r11,-48
+	  14: GETL       	R11, t8
+	  15: ADDL       	$0xFFFFFFD0, t8
+	  16: PUTL       	t8, R8
+	  17: INCEIPL       	$4
+
+	0x25477B1C:  386AFFD0  addi r3,r10,-48
+	  18: GETL       	R10, t10
+	  19: ADDL       	$0xFFFFFFD0, t10
+	  20: PUTL       	t10, R3
+	  21: INCEIPL       	$4
+
+	0x25477B20:  3969FFD0  addi r11,r9,-48
+	  22: GETL       	R9, t12
+	  23: ADDL       	$0xFFFFFFD0, t12
+	  24: PUTL       	t12, R11
+	  25: INCEIPL       	$4
+
+	0x25477B24:  2B830009  cmpli cr7,r3,9
+	  26: GETL       	R3, t14
+	  27: MOVL       	$0x9, t18
+	  28: CMPUL       	t14, t18, t16  (-rSo)
+	  29: ICRFL       	t16, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0x25477B28:  38840001  addi r4,r4,1
+	  31: GETL       	R4, t20
+	  32: ADDL       	$0x1, t20
+	  33: PUTL       	t20, R4
+	  34: INCEIPL       	$4
+
+	0x25477B2C:  419D0020  bc 12,29,0x25477B4C
+	  35: Js29o       	$0x25477B4C
+
+
+
+. 757 25477B0C 36
+. 8D 47 00 01 55 6B 06 3E 55 29 06 3E 39 0B FF D0 38 6A FF D0 39 69 FF D0 2B 83 00 09 38 84 00 01 41 9D 00 20
+==== BB 758 (0x25477B4C) approx BBs exec'd 0 ====
+
+	0x25477B4C:  88640000  lbz r3,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25477B50:  48000014  b 0x25477B64
+	   4: JMPo       	$0x25477B64  ($4)
+
+
+
+. 758 25477B4C 8
+. 88 64 00 00 48 00 00 14
+==== BB 759 (0x25477B64) approx BBs exec'd 0 ====
+
+	0x25477B64:  38A3FFD0  addi r5,r3,-48
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25477B68:  2B050009  cmpli cr6,r5,9
+	   4: GETL       	R5, t2
+	   5: MOVL       	$0x9, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25477B6C:  4099FFE8  bc 4,25,0x25477B54
+	   9: Jc25o       	$0x25477B54
+
+
+
+. 759 25477B64 12
+. 38 A3 FF D0 2B 05 00 09 40 99 FF E8
+==== BB 760 (0x25477B70) approx BBs exec'd 0 ====
+
+	0x25477B70:  7F885800  cmp cr7,r8,r11
+	   0: GETL       	R8, t0
+	   1: GETL       	R11, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25477B74:  41BE0038  bc 13,30,0x25477BAC
+	   5: Js30o       	$0x25477BAC
+
+
+
+. 760 25477B70 8
+. 7F 88 58 00 41 BE 00 38
+==== BB 761 (0x25477BAC) approx BBs exec'd 0 ====
+
+	0x25477BAC:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25477BB0:  409AFF3C  bc 4,26,0x25477AEC
+	   4: Jc26o       	$0x25477AEC
+
+
+
+. 761 25477BAC 8
+. 2F 0A 00 00 40 9A FF 3C
+==== BB 762 (0x25477BB4) approx BBs exec'd 0 ====
+
+	0x25477BB4:  7C635050  subf r3,r3,r10
+	   0: GETL       	R3, t0
+	   1: GETL       	R10, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477BB8:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x25477BBC:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 762 25477BB4 12
+. 7C 63 50 50 38 21 00 10 4E 80 00 20
+==== BB 763 (0x25477EF4) approx BBs exec'd 0 ====
+
+	0x25477EF4:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25477EF8:  7FB6EB78  or r22,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R22
+	   6: INCEIPL       	$4
+
+	0x25477EFC:  7FFCFB78  or r28,r31,r31
+	   7: GETL       	R31, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25477F00:  41BD0020  bc 13,29,0x25477F20
+	  10: Js29o       	$0x25477F20
+
+
+
+. 763 25477EF4 16
+. 2F 9D 00 00 7F B6 EB 78 7F FC FB 78 41 BD 00 20
+==== BB 764 (0x25477F20) approx BBs exec'd 0 ====
+
+	0x25477F20:  80D80000  lwz r6,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x25477F24:  7EE3BB78  or r3,r23,r23
+	   4: GETL       	R23, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25477F28:  3B9CFFE8  addi r28,r28,-24
+	   7: GETL       	R28, t6
+	   8: ADDL       	$0xFFFFFFE8, t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0x25477F2C:  7D3F3214  add r9,r31,r6
+	  11: GETL       	R31, t8
+	  12: GETL       	R6, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0x25477F30:  80A9001C  lwz r5,28(r9)
+	  16: GETL       	R9, t12
+	  17: ADDL       	$0x1C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0x25477F34:  7C85C840  cmpl cr1,r5,r25
+	  21: GETL       	R5, t16
+	  22: GETL       	R25, t18
+	  23: CMPUL       	t16, t18, t20  (-rSo)
+	  24: ICRFL       	t20, $0x1, CR
+	  25: INCEIPL       	$4
+
+	0x25477F38:  7C9B2A14  add r4,r27,r5
+	  26: GETL       	R27, t22
+	  27: GETL       	R5, t24
+	  28: ADDL       	t22, t24
+	  29: PUTL       	t24, R4
+	  30: INCEIPL       	$4
+
+	0x25477F3C:  4184FFCC  bc 12,4,0x25477F08
+	  31: Js04o       	$0x25477F08
+
+
+
+. 764 25477F20 32
+. 80 D8 00 00 7E E3 BB 78 3B 9C FF E8 7D 3F 32 14 80 A9 00 1C 7C 85 C8 40 7C 9B 2A 14 41 84 FF CC
+==== BB 765 (0x25477F08) approx BBs exec'd 0 ====
+
+	0x25477F08:  4BFFFBCD  bl 0x25477AD4
+	   0: MOVL       	$0x25477F0C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25477AD4  ($4)
+
+
+
+. 765 25477F08 4
+. 4B FF FB CD
+==== BB 766 (0x25477F0C) approx BBs exec'd 0 ====
+
+	0x25477F0C:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25477F10:  409A0030  bc 4,26,0x25477F40
+	   4: Jc26o       	$0x25477F40
+
+
+
+. 766 25477F0C 8
+. 2F 03 00 00 40 9A 00 30
+==== BB 767 (0x25477F14) approx BBs exec'd 0 ====
+
+	0x25477F14:  37BDFFFF  addic. r29,r29,-1
+	   0: GETL       	R29, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R29
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25477F18:  7F9FE378  or r31,r28,r28
+	   6: GETL       	R28, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0x25477F1C:  408100A8  bc 4,1,0x25477FC4
+	   9: Jc01o       	$0x25477FC4
+
+
+
+. 767 25477F14 12
+. 37 BD FF FF 7F 9F E3 78 40 81 00 A8
+==== BB 768 (0x25477F40) approx BBs exec'd 0 ====
+
+	0x25477F40:  7E1DB000  cmp cr4,r29,r22
+	   0: GETL       	R29, t0
+	   1: GETL       	R22, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x25477F44:  80F80000  lwz r7,0(r24)
+	   5: GETL       	R24, t6
+	   6: LDL       	(t6), t8
+	   7: PUTL       	t8, R7
+	   8: INCEIPL       	$4
+
+	0x25477F48:  7FFF3A14  add r31,r31,r7
+	   9: GETL       	R31, t10
+	  10: GETL       	R7, t12
+	  11: ADDL       	t10, t12
+	  12: PUTL       	t12, R31
+	  13: INCEIPL       	$4
+
+	0x25477F4C:  3B9F0030  addi r28,r31,48
+	  14: GETL       	R31, t14
+	  15: ADDL       	$0x30, t14
+	  16: PUTL       	t14, R28
+	  17: INCEIPL       	$4
+
+	0x25477F50:  40910024  bc 4,17,0x25477F74
+	  18: Jc17o       	$0x25477F74
+
+
+
+. 768 25477F40 20
+. 7E 1D B0 00 80 F8 00 00 7F FF 3A 14 3B 9F 00 30 40 91 00 24
+==== BB 769 (0x25477F74) approx BBs exec'd 0 ====
+
+	0x25477F74:  811F0030  lwz r8,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25477F78:  691F0001  xori r31,r8,0x1
+	   5: GETL       	R8, t4
+	   6: XORL       	$0x1, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0x25477F7C:  213F0000  subfic r9,r31,0
+	   9: GETL       	R31, t6
+	  10: MOVL       	$0x0, t8
+	  11: SBBL       	t6, t8  (-wCa)
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0x25477F80:  7FE9F914  adde r31,r9,r31
+	  14: GETL       	R9, t10
+	  15: GETL       	R31, t12
+	  16: ADCL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0x25477F84:  69140003  xori r20,r8,0x3
+	  19: GETL       	R8, t14
+	  20: XORL       	$0x3, t14
+	  21: PUTL       	t14, R20
+	  22: INCEIPL       	$4
+
+	0x25477F88:  21540000  subfic r10,r20,0
+	  23: GETL       	R20, t16
+	  24: MOVL       	$0x0, t18
+	  25: SBBL       	t16, t18  (-wCa)
+	  26: PUTL       	t18, R10
+	  27: INCEIPL       	$4
+
+	0x25477F8C:  7E8AA114  adde r20,r10,r20
+	  28: GETL       	R10, t20
+	  29: GETL       	R20, t22
+	  30: ADCL       	t20, t22  (-rCa-wCa)
+	  31: PUTL       	t22, R20
+	  32: INCEIPL       	$4
+
+	0x25477F90:  7FEBA379  or. r11,r31,r20
+	  33: GETL       	R31, t24
+	  34: GETL       	R20, t26
+	  35: ORL       	t26, t24
+	  36: PUTL       	t24, R11
+	  37: CMP0L       	t24, t28  (-rSo)
+	  38: ICRFL       	t28, $0x0, CR
+	  39: INCEIPL       	$4
+
+	0x25477F94:  41820024  bc 12,2,0x25477FB8
+	  40: Js02o       	$0x25477FB8
+
+
+
+. 769 25477F74 36
+. 81 1F 00 30 69 1F 00 01 21 3F 00 00 7F E9 F9 14 69 14 00 03 21 54 00 00 7E 8A A1 14 7F EB A3 79 41 82 00 24
+==== BB 770 (0x25477F98) approx BBs exec'd 0 ====
+
+	0x25477F98:  80FC0008  lwz r7,8(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25477F9C:  7F07C840  cmpl cr6,r7,r25
+	   5: GETL       	R7, t4
+	   6: GETL       	R25, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25477FA0:  40980018  bc 4,24,0x25477FB8
+	  10: Jc24o       	$0x25477FB8
+
+
+
+. 770 25477F98 12
+. 80 FC 00 08 7F 07 C8 40 40 98 00 18
+==== BB 771 (0x25477FA4) approx BBs exec'd 0 ====
+
+	0x25477FA4:  2E150000  cmpi cr4,r21,0
+	   0: GETL       	R21, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25477FA8:  41920128  bc 12,18,0x254780D0
+	   4: Js18o       	$0x254780D0
+
+
+
+. 771 25477FA4 8
+. 2E 15 00 00 41 92 01 28
+==== BB 772 (0x254780D0) approx BBs exec'd 0 ====
+
+	0x254780D0:  81330008  lwz r9,8(r19)
+	   0: GETL       	R19, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x254780D4:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254780D8:  41860010  bc 12,6,0x254780E8
+	   9: Js06o       	$0x254780E8
+
+
+
+. 772 254780D0 12
+. 81 33 00 08 2C 89 00 00 41 86 00 10
+==== BB 773 (0x254780DC) approx BBs exec'd 0 ====
+
+	0x254780DC:  817C000C  lwz r11,12(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254780E0:  7C0B4840  cmpl cr0,r11,r9
+	   5: GETL       	R11, t4
+	   6: GETL       	R9, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x254780E4:  4181FED4  bc 12,1,0x25477FB8
+	  10: Js01o       	$0x25477FB8
+
+
+
+. 773 254780DC 12
+. 81 7C 00 0C 7C 0B 48 40 41 81 FE D4
+==== BB 774 (0x254780E8) approx BBs exec'd 0 ====
+
+	0x254780E8:  80D3003C  lwz r6,60(r19)
+	   0: GETL       	R19, t0
+	   1: ADDL       	$0x3C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254780EC:  3A800000  li r20,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R20
+	   7: INCEIPL       	$4
+
+	0x254780F0:  807C0010  lwz r3,16(r28)
+	   8: GETL       	R28, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x254780F4:  66898000  oris r9,r20,0x8000
+	  13: MOVL       	$0x80000000, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0x254780F8:  801C0014  lwz r0,20(r28)
+	  16: GETL       	R28, t12
+	  17: ADDL       	$0x14, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x254780FC:  7D2548F8  nor r5,r9,r9
+	  21: GETL       	R9, t16
+	  22: NOTL       	t16
+	  23: PUTL       	t16, R5
+	  24: INCEIPL       	$4
+
+	0x25478100:  7CCC30F8  nor r12,r6,r6
+	  25: GETL       	R6, t18
+	  26: NOTL       	t18
+	  27: PUTL       	t18, R12
+	  28: INCEIPL       	$4
+
+	0x25478104:  7C642838  and r4,r3,r5
+	  29: GETL       	R3, t20
+	  30: GETL       	R5, t22
+	  31: ANDL       	t20, t22
+	  32: PUTL       	t22, R4
+	  33: INCEIPL       	$4
+
+	0x25478108:  7C1C6038  and r28,r0,r12
+	  34: GETL       	R0, t24
+	  35: GETL       	R12, t26
+	  36: ANDL       	t24, t26
+	  37: PUTL       	t26, R28
+	  38: INCEIPL       	$4
+
+	0x2547810C:  7C80E379  or. r0,r4,r28
+	  39: GETL       	R4, t28
+	  40: GETL       	R28, t30
+	  41: ORL       	t30, t28
+	  42: PUTL       	t28, R0
+	  43: CMP0L       	t28, t32  (-rSo)
+	  44: ICRFL       	t32, $0x0, CR
+	  45: INCEIPL       	$4
+
+	0x25478110:  4082FEA8  bc 4,2,0x25477FB8
+	  46: Jc02o       	$0x25477FB8
+
+
+
+. 774 254780E8 44
+. 80 D3 00 3C 3A 80 00 00 80 7C 00 10 66 89 80 00 80 1C 00 14 7D 25 48 F8 7C CC 30 F8 7C 64 28 38 7C 1C 60 38 7C 80 E3 79 40 82 FE A8
+==== BB 775 (0x25478114) approx BBs exec'd 0 ====
+
+	0x25478114:  83F30038  lwz r31,56(r19)
+	   0: GETL       	R19, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25478118:  7EA7DA14  add r21,r7,r27
+	   5: GETL       	R7, t4
+	   6: GETL       	R27, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R21
+	   9: INCEIPL       	$4
+
+	0x2547811C:  7F1F4000  cmp cr6,r31,r8
+	  10: GETL       	R31, t8
+	  11: GETL       	R8, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x25478120:  409AFE98  bc 4,26,0x25477FB8
+	  15: Jc26o       	$0x25477FB8
+
+
+
+. 775 25478114 16
+. 83 F3 00 38 7E A7 DA 14 7F 1F 40 00 40 9A FE 98
+==== BB 776 (0x25478124) approx BBs exec'd 0 ====
+
+	0x25478124:  4BFFFBD0  b 0x25477CF4
+	   0: JMPo       	$0x25477CF4  ($4)
+
+
+
+. 776 25478124 4
+. 4B FF FB D0
+==== BB 777 (0x25477CF4) approx BBs exec'd 0 ====
+
+	0x25477CF4:  82F30000  lwz r23,0(r19)
+	   0: GETL       	R19, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R23
+	   3: INCEIPL       	$4
+
+	0x25477CF8:  72EB0001  andi. r11,r23,0x1
+	   4: GETL       	R23, t4
+	   5: ANDL       	$0x1, t4
+	   6: PUTL       	t4, R11
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25477CFC:  408201AC  bc 4,2,0x25477EA8
+	  10: Jc02o       	$0x25477EA8
+
+
+
+. 777 25477CF4 12
+. 82 F3 00 00 72 EB 00 01 40 82 01 AC
+==== BB 778 (0x25477D00) approx BBs exec'd 0 ====
+
+	0x25477D00:  7EA3AB78  or r3,r21,r21
+	   0: GETL       	R21, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25477D04:  82610044  lwz r19,68(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x44, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R19
+	   7: INCEIPL       	$4
+
+	0x25477D08:  80E10008  lwz r7,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0x25477D0C:  7E6803A6  mtlr r19
+	  13: GETL       	R19, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x25477D10:  82810010  lwz r20,16(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R20
+	  20: INCEIPL       	$4
+
+	0x25477D14:  8261000C  lwz r19,12(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R19
+	  25: INCEIPL       	$4
+
+	0x25477D18:  7CE08120  mtcrf 0x8,r7
+	  26: GETL       	R7, t20
+	  27: ICRFL       	t20, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0x25477D1C:  82A10014  lwz r21,20(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x14, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R21
+	  33: INCEIPL       	$4
+
+	0x25477D20:  82C10018  lwz r22,24(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x18, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R22
+	  38: INCEIPL       	$4
+
+	0x25477D24:  82E1001C  lwz r23,28(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x1C, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R23
+	  43: INCEIPL       	$4
+
+	0x25477D28:  83010020  lwz r24,32(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x20, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R24
+	  48: INCEIPL       	$4
+
+	0x25477D2C:  83210024  lwz r25,36(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x24, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R25
+	  53: INCEIPL       	$4
+
+	0x25477D30:  83410028  lwz r26,40(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x28, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R26
+	  58: INCEIPL       	$4
+
+	0x25477D34:  8361002C  lwz r27,44(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x2C, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R27
+	  63: INCEIPL       	$4
+
+	0x25477D38:  83810030  lwz r28,48(r1)
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x30, t50
+	  66: LDL       	(t50), t52
+	  67: PUTL       	t52, R28
+	  68: INCEIPL       	$4
+
+	0x25477D3C:  83A10034  lwz r29,52(r1)
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x34, t54
+	  71: LDL       	(t54), t56
+	  72: PUTL       	t56, R29
+	  73: INCEIPL       	$4
+
+	0x25477D40:  83C10038  lwz r30,56(r1)
+	  74: GETL       	R1, t58
+	  75: ADDL       	$0x38, t58
+	  76: LDL       	(t58), t60
+	  77: PUTL       	t60, R30
+	  78: INCEIPL       	$4
+
+	0x25477D44:  83E1003C  lwz r31,60(r1)
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0x3C, t62
+	  81: LDL       	(t62), t64
+	  82: PUTL       	t64, R31
+	  83: INCEIPL       	$4
+
+	0x25477D48:  38210040  addi r1,r1,64
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x40, t66
+	  86: PUTL       	t66, R1
+	  87: INCEIPL       	$4
+
+	0x25477D4C:  4E800020  blr
+	  88: GETL       	LR, t68
+	  89: JMPo-r       	t68  ($4)
+
+
+
+. 778 25477D00 80
+. 7E A3 AB 78 82 61 00 44 80 E1 00 08 7E 68 03 A6 82 81 00 10 82 61 00 0C 7C E0 81 20 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 779 (0x254772AC) approx BBs exec'd 0 ====
+
+	0x254772AC:  7C721B79  or. r18,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R18
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254772B0:  7F800026  mfcr r28
+	   5: GETL       	CR, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x254772B4:  41820020  bc 12,2,0x254772D4
+	   8: Js02o       	$0x254772D4
+
+
+
+. 779 254772AC 12
+. 7C 72 1B 79 7F 80 00 26 41 82 00 20
+==== BB 780 (0x254772B8) approx BBs exec'd 0 ====
+
+	0x254772B8:  418E017C  bc 12,14,0x25477434
+	   0: Js14o       	$0x25477434
+
+
+
+. 780 254772B8 4
+. 41 8E 01 7C
+==== BB 781 (0x254772BC) approx BBs exec'd 0 ====
+
+	0x254772BC:  7F69DB78  or r9,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x254772C0:  83A901FC  lwz r29,508(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0x1FC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x254772C4:  73A90800  andi. r9,r29,0x800
+	   8: GETL       	R29, t6
+	   9: ANDL       	$0x800, t6
+	  10: PUTL       	t6, R9
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x254772C8:  408201D4  bc 4,2,0x2547749C
+	  14: Jc02o       	$0x2547749C
+
+
+
+. 781 254772BC 16
+. 7F 69 DB 78 83 A9 01 FC 73 A9 08 00 40 82 01 D4
+==== BB 782 (0x254772CC) approx BBs exec'd 0 ====
+
+	0x254772CC:  7F880120  mtcrf 0x80,r28
+	   0: GETL       	R28, t0
+	   1: ICRFL       	t0, $0x0, CR
+	   2: INCEIPL       	$4
+
+	0x254772D0:  408200C8  bc 4,2,0x25477398
+	   3: Jc02o       	$0x25477398
+
+
+
+. 782 254772CC 8
+. 7F 88 01 20 40 82 00 C8
+==== BB 783 (0x25477398) approx BBs exec'd 0 ====
+
+	0x25477398:  7E439378  or r3,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547739C:  38810018  addi r4,r1,24
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x18, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x254773A0:  4BFFF0A5  bl 0x25476444
+	   7: MOVL       	$0x254773A4, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25476444  ($4)
+
+
+
+. 783 25477398 12
+. 7E 43 93 78 38 81 00 18 4B FF F0 A5
+==== BB 784 (0x254765B4) approx BBs exec'd 0 ====
+
+	0x254765B4:  80FD0010  lwz r7,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254765B8:  2F070020  cmpi cr6,r7,32
+	   5: GETL       	R7, t4
+	   6: MOVL       	$0x20, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254765BC:  409AFFA0  bc 4,26,0x2547655C
+	  10: Jc26o       	$0x2547655C
+
+
+
+. 784 254765B4 12
+. 80 FD 00 10 2F 07 00 20 40 9A FF A0
+==== BB 785 (0x254765C0) approx BBs exec'd 0 ====
+
+	0x254765C0:  815D001C  lwz r10,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254765C4:  2B8A0003  cmpli cr7,r10,3
+	   5: GETL       	R10, t4
+	   6: MOVL       	$0x3, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x254765C8:  40BDFF94  bc 5,29,0x2547655C
+	  10: Jc29o       	$0x2547655C
+
+
+
+. 785 254765C0 12
+. 81 5D 00 1C 2B 8A 00 03 40 BD FF 94
+==== BB 786 (0x254765CC) approx BBs exec'd 0 ====
+
+	0x254765CC:  819D0004  lwz r12,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x254765D0:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x254765D4:  81180000  lwz r8,0(r24)
+	   8: GETL       	R24, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0x254765D8:  3ABF0010  addi r21,r31,16
+	  12: GETL       	R31, t10
+	  13: ADDL       	$0x10, t10
+	  14: PUTL       	t10, R21
+	  15: INCEIPL       	$4
+
+	0x254765DC:  380C0020  addi r0,r12,32
+	  16: GETL       	R12, t12
+	  17: ADDL       	$0x20, t12
+	  18: PUTL       	t12, R0
+	  19: INCEIPL       	$4
+
+	0x254765E0:  7D6CC214  add r11,r12,r24
+	  20: GETL       	R12, t14
+	  21: GETL       	R24, t16
+	  22: ADDL       	t14, t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0x254765E4:  7C004040  cmpl cr0,r0,r8
+	  25: GETL       	R0, t18
+	  26: GETL       	R8, t20
+	  27: CMPUL       	t18, t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0x254765E8:  7F23CB78  or r3,r25,r25
+	  30: GETL       	R25, t24
+	  31: PUTL       	t24, R3
+	  32: INCEIPL       	$4
+
+	0x254765EC:  7D846378  or r4,r12,r12
+	  33: GETL       	R12, t26
+	  34: PUTL       	t26, R4
+	  35: INCEIPL       	$4
+
+	0x254765F0:  3B6B0004  addi r27,r11,4
+	  36: GETL       	R11, t28
+	  37: ADDL       	$0x4, t28
+	  38: PUTL       	t28, R27
+	  39: INCEIPL       	$4
+
+	0x254765F4:  418100FC  bc 12,1,0x254766F0
+	  40: Js01o       	$0x254766F0
+
+
+
+. 786 254765CC 44
+. 81 9D 00 04 38 A0 00 00 81 18 00 00 3A BF 00 10 38 0C 00 20 7D 6C C2 14 7C 00 40 40 7F 23 CB 78 7D 84 63 78 3B 6B 00 04 41 81 00 FC
+==== BB 787 (0x254765F8) approx BBs exec'd 0 ====
+
+	0x254765F8:  809E0170  lwz r4,368(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x170, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254765FC:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25476600:  38A00010  li r5,16
+	   8: MOVL       	$0x10, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25476604:  4800CC95  bl 0x25483298
+	  11: MOVL       	$0x25476608, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+
+. 787 254765F8 16
+. 80 9E 01 70 7F 63 DB 78 38 A0 00 10 48 00 CC 95
+==== BB 788 (0x25483310) approx BBs exec'd 0 ====
+
+	0x25483310:  28000001  cmpli cr0,r0,1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25483314:  41800228  bc 12,0,0x2548353C
+	   5: Js00o       	$0x2548353C
+
+
+
+. 788 25483310 8
+. 28 00 00 01 41 80 02 28
+==== BB 789 (0x2548353C) approx BBs exec'd 0 ====
+
+	0x2548353C:  80EB0000  lwz r7,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25483540:  80040000  lwz r0,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25483544:  7F870000  cmp cr7,r7,r0
+	   8: GETL       	R7, t8
+	   9: GETL       	R0, t10
+	  10: CMPL       	t8, t10, t12  (-rSo)
+	  11: ICRFL       	t12, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x25483548:  806A0004  lwz r3,4(r10)
+	  13: GETL       	R10, t14
+	  14: ADDL       	$0x4, t14
+	  15: LDL       	(t14), t16
+	  16: PUTL       	t16, R3
+	  17: INCEIPL       	$4
+
+	0x2548354C:  80C90004  lwz r6,4(r9)
+	  18: GETL       	R9, t18
+	  19: ADDL       	$0x4, t18
+	  20: LDL       	(t18), t20
+	  21: PUTL       	t20, R6
+	  22: INCEIPL       	$4
+
+	0x25483550:  409EFDF4  bc 4,30,0x25483344
+	  23: Jc30o       	$0x25483344
+
+
+
+. 789 2548353C 24
+. 80 EB 00 00 80 04 00 00 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+==== BB 790 (0x25476608) approx BBs exec'd 0 ====
+
+	0x25476608:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547660C:  419A0084  bc 12,26,0x25476690
+	   4: Js26o       	$0x25476690
+
+
+
+. 790 25476608 8
+. 2F 03 00 00 41 9A 00 84
+==== BB 791 (0x25476690) approx BBs exec'd 0 ====
+
+	0x25476690:  813B0010  lwz r9,16(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25476694:  8B5B0017  lbz r26,23(r27)
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x17, t4
+	   7: LDB       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25476698:  8BBB001B  lbz r29,27(r27)
+	  10: GETL       	R27, t8
+	  11: ADDL       	$0x1B, t8
+	  12: LDB       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x2547669C:  2F890000  cmpi cr7,r9,0
+	  15: GETL       	R9, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x254766A0:  5755402E  rlwinm r21,r26,8,0,23
+	  19: GETL       	R26, t16
+	  20: SHLL       	$0x8, t16
+	  21: PUTL       	t16, R21
+	  22: INCEIPL       	$4
+
+	0x254766A4:  8B1B001F  lbz r24,31(r27)
+	  23: GETL       	R27, t18
+	  24: ADDL       	$0x1F, t18
+	  25: LDB       	(t18), t20
+	  26: PUTL       	t20, R24
+	  27: INCEIPL       	$4
+
+	0x254766A8:  7F95EA14  add r28,r21,r29
+	  28: GETL       	R21, t22
+	  29: GETL       	R29, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R28
+	  32: INCEIPL       	$4
+
+	0x254766AC:  5796402E  rlwinm r22,r28,8,0,23
+	  33: GETL       	R28, t26
+	  34: SHLL       	$0x8, t26
+	  35: PUTL       	t26, R22
+	  36: INCEIPL       	$4
+
+	0x254766B0:  7C16C214  add r0,r22,r24
+	  37: GETL       	R22, t28
+	  38: GETL       	R24, t30
+	  39: ADDL       	t28, t30
+	  40: PUTL       	t30, R0
+	  41: INCEIPL       	$4
+
+	0x254766B4:  409E001C  bc 4,30,0x254766D0
+	  42: Jc30o       	$0x254766D0
+
+
+
+. 791 25476690 40
+. 81 3B 00 10 8B 5B 00 17 8B BB 00 1B 2F 89 00 00 57 55 40 2E 8B 1B 00 1F 7F 95 EA 14 57 96 40 2E 7C 16 C2 14 40 9E 00 1C
+==== BB 792 (0x254766B8) approx BBs exec'd 0 ====
+
+	0x254766B8:  807E04F4  lwz r3,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254766BC:  81230008  lwz r9,8(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x254766C0:  2C090000  cmpi cr0,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x254766C4:  41A2FEAC  bc 13,2,0x25476570
+	  14: Js02o       	$0x25476570
+
+
+
+. 792 254766B8 16
+. 80 7E 04 F4 81 23 00 08 2C 09 00 00 41 A2 FE AC
+==== BB 793 (0x254766C8) approx BBs exec'd 0 ====
+
+	0x254766C8:  7C890040  cmpl cr1,r9,r0
+	   0: GETL       	R9, t0
+	   1: GETL       	R0, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254766CC:  40A4FEA4  bc 5,4,0x25476570
+	   5: Jc04o       	$0x25476570
+
+
+
+. 793 254766C8 8
+. 7C 89 00 40 40 A4 FE A4
+==== BB 794 (0x254773A4) approx BBs exec'd 0 ====
+
+	0x254773A4:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x254773A8:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R26
+	   7: INCEIPL       	$4
+
+	0x254773AC:  41B2FF2C  bc 13,18,0x254772D8
+	   8: Js18o       	$0x254772D8
+
+
+
+. 794 254773A4 12
+. 2E 03 FF FF 7C 7A 1B 78 41 B2 FF 2C
+==== BB 795 (0x254773B0) approx BBs exec'd 0 ====
+
+	0x254773B0:  7E439378  or r3,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254773B4:  4800BC1D  bl 0x25482FD0
+	   3: MOVL       	$0x254773B8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 795 254773B0 8
+. 7E 43 93 78 48 00 BC 1D
+==== BB 796 (0x254773B8) approx BBs exec'd 0 ====
+
+	0x254773B8:  3BA30001  addi r29,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x254773BC:  7FA3EB78  or r3,r29,r29
+	   4: GETL       	R29, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x254773C0:  48020665  bl 0x25497A24
+	   7: MOVL       	$0x254773C4, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 796 254773B8 12
+. 3B A3 00 01 7F A3 EB 78 48 02 06 65
+==== BB 797 (0x254773C4) approx BBs exec'd 0 ====
+
+	0x254773C4:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254773C8:  2F030000  cmpi cr6,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0x254773CC:  419A0014  bc 12,26,0x254773E0
+	   7: Js26o       	$0x254773E0
+
+
+
+. 797 254773C4 12
+. 38 00 00 00 2F 03 00 00 41 9A 00 14
+==== BB 798 (0x254773D0) approx BBs exec'd 0 ====
+
+	0x254773D0:  7E449378  or r4,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254773D4:  7FA5EB78  or r5,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x254773D8:  4800C809  bl 0x25483BE0
+	   6: MOVL       	$0x254773DC, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 798 254773D0 12
+. 7E 44 93 78 7F A5 EB 78 48 00 C8 09
+==== BB 799 (0x25483EE8) approx BBs exec'd 0 ====
+
+	0x25483EE8:  81640000  lwz r11,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25483EEC:  38A5FFFF  addi r5,r5,-1
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25483EF0:  81440004  lwz r10,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25483EF4:  38840008  addi r4,r4,8
+	  13: GETL       	R4, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0x25483EF8:  4BFFFF54  b 0x25483E4C
+	  17: JMPo       	$0x25483E4C  ($4)
+
+
+
+. 799 25483EE8 20
+. 81 64 00 00 38 A5 FF FF 81 44 00 04 38 84 00 08 4B FF FF 54
+==== BB 800 (0x254773DC) approx BBs exec'd 0 ====
+
+	0x254773DC:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254773E0:  2F800000  cmpi cr7,r0,0
+	   3: GETL       	R0, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x7, CR
+	   6: INCEIPL       	$4
+
+	0x254773E4:  90010228  stw r0,552(r1)
+	   7: GETL       	R0, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0x228, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x254773E8:  409EFEEC  bc 4,30,0x254772D4
+	  12: Jc30o       	$0x254772D4
+
+
+
+. 800 254773DC 16
+. 7C 60 1B 78 2F 80 00 00 90 01 02 28 40 9E FE EC
+==== BB 801 (0x254772D4) approx BBs exec'd 0 ====
+
+	0x254772D4:  40920050  bc 4,18,0x25477324
+	   0: Jc18o       	$0x25477324
+
+
+
+. 801 254772D4 4
+. 40 92 00 50
+==== BB 802 (0x25477324) approx BBs exec'd 0 ====
+
+	0x25477324:  82F40000  lwz r23,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R23
+	   3: INCEIPL       	$4
+
+	0x25477328:  72E90001  andi. r9,r23,0x1
+	   4: GETL       	R23, t4
+	   5: ANDL       	$0x1, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547732C:  4182FAD8  bc 12,2,0x25476E04
+	  10: Js02o       	$0x25476E04
+
+
+
+. 802 25477324 12
+. 82 F4 00 00 72 E9 00 01 41 82 FA D8
+==== BB 803 (0x25483DC4) approx BBs exec'd 0 ====
+
+	0x25483DC4:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25483DC8:  3884FFF4  addi r4,r4,-12
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFF4, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25483DCC:  80040010  lwz r0,16(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x25483DD0:  3863FFF0  addi r3,r3,-16
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFF0, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x25483DD4:  38A50003  addi r5,r5,3
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x3, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0x25483DD8:  91230010  stw r9,16(r3)
+	  21: GETL       	R9, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x10, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x25483DDC:  4BFFFF58  b 0x25483D34
+	  26: JMPo       	$0x25483D34  ($4)
+
+
+
+. 803 25483DC4 28
+. 81 24 00 00 38 84 FF F4 80 04 00 10 38 63 FF F0 38 A5 00 03 91 23 00 10 4B FF FF 58
+==== BB 804 (0x25483D34) approx BBs exec'd 0 ====
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	   5: GETL       	R0, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  15: GETL       	R9, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x18, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  20: GETL       	R5, t16
+	  21: ADCL       	$0xFFFFFFF8, t16  (-wCa)
+	  22: PUTL       	t16, R5
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  31: GETL       	R0, t24
+	  32: GETL       	R3, t26
+	  33: ADDL       	$0x1C, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  36: GETL       	R4, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  40: GETL       	R3, t30
+	  41: ADDL       	$0x20, t30
+	  42: PUTL       	t30, R3
+	  43: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  44: Jc02o       	$0x25483D0C
+
+
+
+. 804 25483D34 40
+. 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 805 (0x254758A8) approx BBs exec'd 0 ====
+
+	0x254758A8:  54EF2834  rlwinm r15,r7,5,0,26
+	   0: GETL       	R7, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R15
+	   3: INCEIPL       	$4
+
+	0x254758AC:  801D0008  lwz r0,8(r29)
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x8, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254758B0:  7DCFDA14  add r14,r15,r27
+	   9: GETL       	R15, t6
+	  10: GETL       	R27, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R14
+	  13: INCEIPL       	$4
+
+	0x254758B4:  3BBD0020  addi r29,r29,32
+	  14: GETL       	R29, t10
+	  15: ADDL       	$0x20, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x254758B8:  7C8EE840  cmpl cr1,r14,r29
+	  18: GETL       	R14, t12
+	  19: GETL       	R29, t14
+	  20: CMPUL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x254758BC:  901A014C  stw r0,332(r26)
+	  23: GETL       	R0, t18
+	  24: GETL       	R26, t20
+	  25: ADDL       	$0x14C, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x254758C0:  4185FF4C  bc 12,5,0x2547580C
+	  28: Js05o       	$0x2547580C
+
+
+
+. 805 254758A8 28
+. 54 EF 28 34 80 1D 00 08 7D CF DA 14 3B BD 00 20 7C 8E E8 40 90 1A 01 4C 41 85 FF 4C
+==== BB 806 (0x25475830) approx BBs exec'd 0 ====
+
+	0x25475830:  7F090040  cmpl cr6,r9,r0
+	   0: GETL       	R9, t0
+	   1: GETL       	R0, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475834:  4199024C  bc 12,25,0x25475A80
+	   5: Js25o       	$0x25475A80
+
+
+
+. 806 25475830 8
+. 7F 09 00 40 41 99 02 4C
+==== BB 807 (0x25475838) approx BBs exec'd 0 ====
+
+	0x25475838:  2C090007  cmpi cr0,r9,7
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x7, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547583C:  4082FFBC  bc 4,2,0x254757F8
+	   5: Jc02o       	$0x254757F8
+
+
+
+. 807 25475838 8
+. 2C 09 00 07 40 82 FF BC
+==== BB 808 (0x25475A80) approx BBs exec'd 0 ====
+
+	0x25475A80:  3D406474  lis r10,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x25475A84:  614FE552  ori r15,r10,0xE552
+	   3: MOVL       	$0x6474E552, t2
+	   4: PUTL       	t2, R15
+	   5: INCEIPL       	$4
+
+	0x25475A88:  7F897800  cmp cr7,r9,r15
+	   6: GETL       	R9, t4
+	   7: GETL       	R15, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x25475A8C:  409EFD6C  bc 4,30,0x254757F8
+	  11: Jc30o       	$0x254757F8
+
+
+
+. 808 25475A80 16
+. 3D 40 64 74 61 4F E5 52 7F 89 78 00 40 9E FD 6C
+==== BB 809 (0x25475A90) approx BBs exec'd 0 ====
+
+	0x25475A90:  811D0008  lwz r8,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475A94:  911A0234  stw r8,564(r26)
+	   5: GETL       	R8, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x234, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25475A98:  80DD0014  lwz r6,20(r29)
+	  10: GETL       	R29, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0x25475A9C:  90DA0238  stw r6,568(r26)
+	  15: GETL       	R6, t12
+	  16: GETL       	R26, t14
+	  17: ADDL       	$0x238, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25475AA0:  4BFFFD58  b 0x254757F8
+	  20: JMPo       	$0x254757F8  ($4)
+
+
+
+. 809 25475A90 20
+. 81 1D 00 08 91 1A 02 34 80 DD 00 14 90 DA 02 38 4B FF FD 58
+==22130== Reading syms from /lib/tls/librt-2.3.4.so (0xEE80000)
+==22130==    object doesn't have any debug info
+==== BB 810 (0x254759E0) approx BBs exec'd 0 ====
+
+	0x254759E0:  7C9CC840  cmpl cr1,r28,r25
+	   0: GETL       	R28, t0
+	   1: GETL       	R25, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254759E4:  4085002C  bc 4,5,0x25475A10
+	   5: Jc05o       	$0x25475A10
+
+
+
+. 810 254759E0 8
+. 7C 9C C8 40 40 85 00 2C
+==== BB 811 (0x25483884) approx BBs exec'd 0 ====
+
+	0x25483884:  7C0037EC  dcbz r0,r6
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0xFFFFFFE0, t0
+	   2: MOVL       	$0x0, t2
+	   3: STL       	t2, (t0)
+	   4: ADDL       	$0x4, t0
+	   5: STL       	t2, (t0)
+	   6: ADDL       	$0x4, t0
+	   7: STL       	t2, (t0)
+	   8: ADDL       	$0x4, t0
+	   9: STL       	t2, (t0)
+	  10: ADDL       	$0x4, t0
+	  11: STL       	t2, (t0)
+	  12: ADDL       	$0x4, t0
+	  13: STL       	t2, (t0)
+	  14: ADDL       	$0x4, t0
+	  15: STL       	t2, (t0)
+	  16: ADDL       	$0x4, t0
+	  17: STL       	t2, (t0)
+	  18: INCEIPL       	$4
+
+	0x25483888:  38C60020  addi r6,r6,32
+	  19: GETL       	R6, t4
+	  20: ADDL       	$0x20, t4
+	  21: PUTL       	t4, R6
+	  22: INCEIPL       	$4
+
+	0x2548388C:  3920FFE0  li r9,-32
+	  23: MOVL       	$0xFFFFFFE0, t6
+	  24: PUTL       	t6, R9
+	  25: INCEIPL       	$4
+
+	0x25483890:  40990010  bc 4,25,0x254838A0
+	  26: Jc25o       	$0x254838A0
+
+
+
+. 811 25483884 16
+. 7C 00 37 EC 38 C6 00 20 39 20 FF E0 40 99 00 10
+==== BB 812 (0x25483894) approx BBs exec'd 0 ====
+
+	0x25483894:  7C0037EC  dcbz r0,r6
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0xFFFFFFE0, t0
+	   2: MOVL       	$0x0, t2
+	   3: STL       	t2, (t0)
+	   4: ADDL       	$0x4, t0
+	   5: STL       	t2, (t0)
+	   6: ADDL       	$0x4, t0
+	   7: STL       	t2, (t0)
+	   8: ADDL       	$0x4, t0
+	   9: STL       	t2, (t0)
+	  10: ADDL       	$0x4, t0
+	  11: STL       	t2, (t0)
+	  12: ADDL       	$0x4, t0
+	  13: STL       	t2, (t0)
+	  14: ADDL       	$0x4, t0
+	  15: STL       	t2, (t0)
+	  16: ADDL       	$0x4, t0
+	  17: STL       	t2, (t0)
+	  18: INCEIPL       	$4
+
+	0x25483898:  7C0737EC  dcbz r7,r6
+	  19: GETL       	R6, t4
+	  20: GETL       	R7, t6
+	  21: ADDL       	t6, t4
+	  22: ANDL       	$0xFFFFFFE0, t4
+	  23: MOVL       	$0x0, t8
+	  24: STL       	t8, (t4)
+	  25: ADDL       	$0x4, t4
+	  26: STL       	t8, (t4)
+	  27: ADDL       	$0x4, t4
+	  28: STL       	t8, (t4)
+	  29: ADDL       	$0x4, t4
+	  30: STL       	t8, (t4)
+	  31: ADDL       	$0x4, t4
+	  32: STL       	t8, (t4)
+	  33: ADDL       	$0x4, t4
+	  34: STL       	t8, (t4)
+	  35: ADDL       	$0x4, t4
+	  36: STL       	t8, (t4)
+	  37: ADDL       	$0x4, t4
+	  38: STL       	t8, (t4)
+	  39: INCEIPL       	$4
+
+	0x2548389C:  38C60040  addi r6,r6,64
+	  40: GETL       	R6, t10
+	  41: ADDL       	$0x40, t10
+	  42: PUTL       	t10, R6
+	  43: INCEIPL       	$4
+
+	0x254838A0:  2A850000  cmpli cr5,r5,0
+	  44: GETL       	R5, t12
+	  45: MOVL       	$0x0, t16
+	  46: CMPUL       	t12, t16, t14  (-rSo)
+	  47: ICRFL       	t14, $0x5, CR
+	  48: INCEIPL       	$4
+
+	0x254838A4:  4182007C  bc 12,2,0x25483920
+	  49: Js02o       	$0x25483920
+
+
+
+. 812 25483894 20
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 40 2A 85 00 00 41 82 00 7C
+==== BB 813 (0x254760C0) approx BBs exec'd 0 ====
+
+	0x254760C0:  80BD0014  lwz r5,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254760C4:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254760C8:  7C9CB850  subf r4,r28,r23
+	   8: GETL       	R28, t6
+	   9: GETL       	R23, t8
+	  10: SUBL       	t6, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x254760CC:  38C00032  li r6,50
+	  13: MOVL       	$0x32, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x254760D0:  38E0FFFF  li r7,-1
+	  16: MOVL       	$0xFFFFFFFF, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x254760D4:  39000000  li r8,0
+	  19: MOVL       	$0x0, t14
+	  20: PUTL       	t14, R8
+	  21: INCEIPL       	$4
+
+	0x254760D8:  4800C6C1  bl 0x25482798
+	  22: MOVL       	$0x254760DC, t16
+	  23: PUTL       	t16, LR
+	  24: JMPo-c       	$0x25482798  ($4)
+
+
+
+. 813 254760C0 28
+. 80 BD 00 14 7F 83 E3 78 7C 9C B8 50 38 C0 00 32 38 E0 FF FF 39 00 00 00 48 00 C6 C1
+==== BB 814 (0x254760DC) approx BBs exec'd 0 ====
+
+	0x254760DC:  2C83FFFF  cmpi cr1,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254760E0:  4086F938  bc 4,6,0x25475A18
+	   5: Jc06o       	$0x25475A18
+
+
+
+. 814 254760DC 8
+. 2C 83 FF FF 40 86 F9 38
+==== BB 815 (0x25476054) approx BBs exec'd 0 ====
+
+	0x25476054:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0x25476058:  7D800E70  srawi r0,r12,1
+	   4: GETL       	R12, t2
+	   5: SARL       	$0x1, t2  (-wCa)
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x2547605C:  7C1700F8  nor r23,r0,r0
+	   8: GETL       	R0, t4
+	   9: NOTL       	t4
+	  10: PUTL       	t4, R23
+	  11: INCEIPL       	$4
+
+	0x25476060:  20000031  subfic r0,r0,49
+	  12: GETL       	R0, t6
+	  13: MOVL       	$0x31, t8
+	  14: SBBL       	t6, t8  (-wCa)
+	  15: PUTL       	t8, R0
+	  16: INCEIPL       	$4
+
+	0x25476064:  2B170002  cmpli cr6,r23,2
+	  17: GETL       	R23, t10
+	  18: MOVL       	$0x2, t14
+	  19: CMPUL       	t10, t14, t12  (-rSo)
+	  20: ICRFL       	t12, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x25476068:  40B9FC38  bc 5,25,0x25475CA0
+	  22: Jc25o       	$0x25475CA0
+
+
+
+. 815 25476054 24
+. 55 6C 08 3C 7D 80 0E 70 7C 17 00 F8 20 00 00 31 2B 17 00 02 40 B9 FC 38
+==== BB 816 (0x2547606C) approx BBs exec'd 0 ====
+
+	0x2547606C:  7F2B2850  subf r25,r11,r5
+	   0: GETL       	R11, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25476070:  2A19000B  cmpli cr4,r25,11
+	   5: GETL       	R25, t4
+	   6: MOVL       	$0xB, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x4, CR
+	   9: INCEIPL       	$4
+
+	0x25476074:  419100C4  bc 12,17,0x25476138
+	  10: Js17o       	$0x25476138
+
+
+
+. 816 2547606C 12
+. 7F 2B 28 50 2A 19 00 0B 41 91 00 C4
+==== BB 817 (0x25476078) approx BBs exec'd 0 ====
+
+	0x25476078:  7C0B2050  subf r0,r11,r4
+	   0: GETL       	R11, t0
+	   1: GETL       	R4, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547607C:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25476080:  4BFFFC24  b 0x25475CA4
+	   9: JMPo       	$0x25475CA4  ($4)
+
+
+
+. 817 25476078 12
+. 7C 0B 20 50 54 00 10 3A 4B FF FC 24
+==== BB 818 (0x25475D68) approx BBs exec'd 0 ====
+
+	0x25475D68:  81280078  lwz r9,120(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x78, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475D6C:  2C090000  cmpi cr0,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25475D70:  41820030  bc 12,2,0x25475DA0
+	   9: Js02o       	$0x25475DA0
+
+
+
+. 818 25475D68 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+==== BB 819 (0x25475F88) approx BBs exec'd 0 ====
+
+	0x25475F88:  80910050  lwz r4,80(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25475F8C:  C81F0028  lfd f0,40(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x28, t4
+	   7: FPU_RQ       	(t4), 0x0:0x0
+	   8: INCEIPL       	$4
+
+	0x25475F90:  2C840000  cmpi cr1,r4,0
+	   9: GETL       	R4, t6
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0x25475F94:  D81A01D0  stfd f0,464(r26)
+	  13: GETL       	R26, t10
+	  14: ADDL       	$0x1D0, t10
+	  15: FPU_WQ       	0x0:0x0, (t10)
+	  16: INCEIPL       	$4
+
+	0x25475F98:  C9BF0030  lfd f13,48(r31)
+	  17: GETL       	R31, t12
+	  18: ADDL       	$0x30, t12
+	  19: FPU_RQ       	(t12), 0x0:0xD
+	  20: INCEIPL       	$4
+
+	0x25475F9C:  D9BA01D8  stfd f13,472(r26)
+	  21: GETL       	R26, t14
+	  22: ADDL       	$0x1D8, t14
+	  23: FPU_WQ       	0x0:0xD, (t14)
+	  24: INCEIPL       	$4
+
+	0x25475FA0:  4186FEB0  bc 12,6,0x25475E50
+	  25: Js06o       	$0x25475E50
+
+
+
+. 819 25475F88 28
+. 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+==== BB 820 (0x2547A100) approx BBs exec'd 0 ====
+
+	0x2547A100:  907F0014  stw r3,20(r31)
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547A104:  80610014  lwz r3,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547A108:  83E10008  lwz r31,8(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x2547A10C:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x10, t12
+	  17: PUTL       	t12, R1
+	  18: INCEIPL       	$4
+
+	0x2547A110:  7C6803A6  mtlr r3
+	  19: GETL       	R3, t14
+	  20: PUTL       	t14, LR
+	  21: INCEIPL       	$4
+
+	0x2547A114:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 820 2547A100 24
+. 90 7F 00 14 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+==== BB 821 (0x2547B624) approx BBs exec'd 0 ====
+
+	0x2547B624:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547B628:  81610008  lwz r11,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547B62C:  80C10280  lwz r6,640(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x280, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0x2547B630:  80A10270  lwz r5,624(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x270, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x2547B634:  80810274  lwz r4,628(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x274, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R4
+	  22: INCEIPL       	$4
+
+	0x2547B638:  93E50000  stw r31,0(r5)
+	  23: GETL       	R31, t18
+	  24: GETL       	R5, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x2547B63C:  91660000  stw r11,0(r6)
+	  27: GETL       	R11, t22
+	  28: GETL       	R6, t24
+	  29: STL       	t22, (t24)
+	  30: INCEIPL       	$4
+
+	0x2547B640:  93E40000  stw r31,0(r4)
+	  31: GETL       	R31, t26
+	  32: GETL       	R4, t28
+	  33: STL       	t26, (t28)
+	  34: INCEIPL       	$4
+
+	0x2547B644:  814102A4  lwz r10,676(r1)
+	  35: GETL       	R1, t30
+	  36: ADDL       	$0x2A4, t30
+	  37: LDL       	(t30), t32
+	  38: PUTL       	t32, R10
+	  39: INCEIPL       	$4
+
+	0x2547B648:  83C10298  lwz r30,664(r1)
+	  40: GETL       	R1, t34
+	  41: ADDL       	$0x298, t34
+	  42: LDL       	(t34), t36
+	  43: PUTL       	t36, R30
+	  44: INCEIPL       	$4
+
+	0x2547B64C:  83E1029C  lwz r31,668(r1)
+	  45: GETL       	R1, t38
+	  46: ADDL       	$0x29C, t38
+	  47: LDL       	(t38), t40
+	  48: PUTL       	t40, R31
+	  49: INCEIPL       	$4
+
+	0x2547B650:  7D4803A6  mtlr r10
+	  50: GETL       	R10, t42
+	  51: PUTL       	t42, LR
+	  52: INCEIPL       	$4
+
+	0x2547B654:  382102A0  addi r1,r1,672
+	  53: GETL       	R1, t44
+	  54: ADDL       	$0x2A0, t44
+	  55: PUTL       	t44, R1
+	  56: INCEIPL       	$4
+
+	0x2547B658:  4E800020  blr
+	  57: GETL       	LR, t46
+	  58: JMPo-r       	t46  ($4)
+
+
+
+. 821 2547B624 56
+. 38 60 00 00 81 61 00 08 80 C1 02 80 80 A1 02 70 80 81 02 74 93 E5 00 00 91 66 00 00 93 E4 00 00 81 41 02 A4 83 C1 02 98 83 E1 02 9C 7D 48 03 A6 38 21 02 A0 4E 80 00 20
+==== BB 822 (0x2547A400) approx BBs exec'd 0 ====
+
+	0x2547A400:  833F0034  lwz r25,52(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x2547A404:  2F190000  cmpi cr6,r25,0
+	   5: GETL       	R25, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A408:  409A01AC  bc 4,26,0x2547A5B4
+	   9: Jc26o       	$0x2547A5B4
+
+
+
+. 822 2547A400 12
+. 83 3F 00 34 2F 19 00 00 40 9A 01 AC
+==== BB 823 (0x2547A40C) approx BBs exec'd 0 ====
+
+	0x2547A40C:  815F0024  lwz r10,36(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547A410:  808A0180  lwz r4,384(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x180, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547A414:  54893FBE  rlwinm r9,r4,7,30,31
+	  10: GETL       	R4, t8
+	  11: ROLL       	$0x7, t8
+	  12: ANDL       	$0x3, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x2547A418:  2F890000  cmpi cr7,r9,0
+	  15: GETL       	R9, t10
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547A41C:  409E0044  bc 4,30,0x2547A460
+	  19: Jc30o       	$0x2547A460
+
+
+
+. 823 2547A40C 20
+. 81 5F 00 24 80 8A 01 80 54 89 3F BE 2F 89 00 00 40 9E 00 44
+==== BB 824 (0x2547A420) approx BBs exec'd 0 ====
+
+	0x2547A420:  80010000  lwz r0,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547A424:  38C00001  li r6,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x2547A428:  80FF000C  lwz r7,12(r31)
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0xC, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R7
+	  11: INCEIPL       	$4
+
+	0x2547A42C:  9401FFE0  stwu r0,-32(r1)
+	  12: GETL       	R0, t10
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0xFFFFFFE0, t12
+	  15: PUTL       	t12, R1
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547A430:  3A810017  addi r20,r1,23
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x17, t14
+	  20: PUTL       	t14, R20
+	  21: INCEIPL       	$4
+
+	0x2547A434:  56850036  rlwinm r5,r20,0,0,27
+	  22: GETL       	R20, t16
+	  23: ANDL       	$0xFFFFFFF0, t16
+	  24: PUTL       	t16, R5
+	  25: INCEIPL       	$4
+
+	0x2547A438:  91250008  stw r9,8(r5)
+	  26: GETL       	R9, t18
+	  27: GETL       	R5, t20
+	  28: ADDL       	$0x8, t20
+	  29: STL       	t18, (t20)
+	  30: INCEIPL       	$4
+
+	0x2547A43C:  91250000  stw r9,0(r5)
+	  31: GETL       	R9, t22
+	  32: GETL       	R5, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0x2547A440:  39270001  addi r9,r7,1
+	  35: GETL       	R7, t26
+	  36: ADDL       	$0x1, t26
+	  37: PUTL       	t26, R9
+	  38: INCEIPL       	$4
+
+	0x2547A444:  90B30008  stw r5,8(r19)
+	  39: GETL       	R5, t28
+	  40: GETL       	R19, t30
+	  41: ADDL       	$0x8, t30
+	  42: STL       	t28, (t30)
+	  43: INCEIPL       	$4
+
+	0x2547A448:  91450004  stw r10,4(r5)
+	  44: GETL       	R10, t32
+	  45: GETL       	R5, t34
+	  46: ADDL       	$0x4, t34
+	  47: STL       	t32, (t34)
+	  48: INCEIPL       	$4
+
+	0x2547A44C:  826A0180  lwz r19,384(r10)
+	  49: GETL       	R10, t36
+	  50: ADDL       	$0x180, t36
+	  51: LDL       	(t36), t38
+	  52: PUTL       	t38, R19
+	  53: INCEIPL       	$4
+
+	0x2547A450:  913F000C  stw r9,12(r31)
+	  54: GETL       	R9, t40
+	  55: GETL       	R31, t42
+	  56: ADDL       	$0xC, t42
+	  57: STL       	t40, (t42)
+	  58: INCEIPL       	$4
+
+	0x2547A454:  50D3C94C  rlwimi r19,r6,25,5,6
+	  59: GETL       	R19, t44
+	  60: GETL       	R6, t46
+	  61: ROLL       	$0x19, t46
+	  62: ANDL       	$0x6000000, t46
+	  63: ANDL       	$0xF9FFFFFF, t44
+	  64: ORL       	t44, t46
+	  65: PUTL       	t46, R19
+	  66: INCEIPL       	$4
+
+	0x2547A458:  926A0180  stw r19,384(r10)
+	  67: GETL       	R19, t48
+	  68: GETL       	R10, t50
+	  69: ADDL       	$0x180, t50
+	  70: STL       	t48, (t50)
+	  71: INCEIPL       	$4
+
+	0x2547A45C:  7CB32B78  or r19,r5,r5
+	  72: GETL       	R5, t52
+	  73: PUTL       	t52, R19
+	  74: INCEIPL       	$4
+
+	0x2547A460:  2C980000  cmpi cr1,r24,0
+	  75: GETL       	R24, t54
+	  76: CMP0L       	t54, t56  (-rSo)
+	  77: ICRFL       	t56, $0x1, CR
+	  78: INCEIPL       	$4
+
+	0x2547A464:  41860010  bc 12,6,0x2547A474
+	  79: Js06o       	$0x2547A474
+
+
+
+. 824 2547A420 72
+. 80 01 00 00 38 C0 00 01 80 FF 00 0C 94 01 FF E0 3A 81 00 17 56 85 00 36 91 25 00 08 91 25 00 00 39 27 00 01 90 B3 00 08 91 45 00 04 82 6A 01 80 91 3F 00 0C 50 D3 C9 4C 92 6A 01 80 7C B3 2B 78 2C 98 00 00 41 86 00 10
+==== BB 825 (0x2547A474) approx BBs exec'd 0 ====
+
+	0x2547A474:  853C0008  lwzu r9,8(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R28
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x2547A478:  2C090000  cmpi cr0,r9,0
+	   6: GETL       	R9, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547A47C:  4182043C  bc 12,2,0x2547A8B8
+	  10: Js02o       	$0x2547A8B8
+
+
+
+. 825 2547A474 12
+. 85 3C 00 08 2C 09 00 00 41 82 04 3C
+==== BB 826 (0x25482F94) approx BBs exec'd 0 ====
+
+	0x25482F94:  8CC40001  lbzu r6,1(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x25482F98:  2C850000  cmpi cr1,r5,0
+	   6: GETL       	R5, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25482F9C:  41860020  bc 12,6,0x25482FBC
+	  10: Js06o       	$0x25482FBC
+
+
+
+. 826 25482F94 12
+. 8C C4 00 01 2C 85 00 00 41 86 00 20
+==== BB 827 (0x25475840) approx BBs exec'd 0 ====
+
+	0x25475840:  801D0014  lwz r0,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475844:  2F000000  cmpi cr6,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475848:  41BAFFB0  bc 13,26,0x254757F8
+	   9: Js26o       	$0x254757F8
+
+
+
+. 827 25475840 12
+. 80 1D 00 14 2F 00 00 00 41 BA FF B0
+==== BB 828 (0x2547584C) approx BBs exec'd 0 ====
+
+	0x2547584C:  901A0220  stw r0,544(r26)
+	   0: GETL       	R0, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	$0x220, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475850:  817D001C  lwz r11,28(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25475854:  917A0224  stw r11,548(r26)
+	  10: GETL       	R11, t8
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x224, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475858:  813D001C  lwz r9,28(r29)
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x2547585C:  2F890000  cmpi cr7,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0x25475860:  419E07EC  bc 12,30,0x2547604C
+	  24: Js30o       	$0x2547604C
+
+
+
+. 828 2547584C 24
+. 90 1A 02 20 81 7D 00 1C 91 7A 02 24 81 3D 00 1C 2F 89 00 00 41 9E 07 EC
+==== BB 829 (0x25475864) approx BBs exec'd 0 ====
+
+	0x25475864:  80BD0008  lwz r5,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475868:  3929FFFF  addi r9,r9,-1
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547586C:  7CA04838  and r0,r5,r9
+	   9: GETL       	R5, t6
+	  10: GETL       	R9, t8
+	  11: ANDL       	t6, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x25475870:  901A0228  stw r0,552(r26)
+	  14: GETL       	R0, t10
+	  15: GETL       	R26, t12
+	  16: ADDL       	$0x228, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25475874:  81DA0180  lwz r14,384(r26)
+	  19: GETL       	R26, t14
+	  20: ADDL       	$0x180, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R14
+	  23: INCEIPL       	$4
+
+	0x25475878:  3CC04000  lis r6,16384
+	  24: MOVL       	$0x40000000, t18
+	  25: PUTL       	t18, R6
+	  26: INCEIPL       	$4
+
+	0x2547587C:  815D0010  lwz r10,16(r29)
+	  27: GETL       	R29, t20
+	  28: ADDL       	$0x10, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R10
+	  31: INCEIPL       	$4
+
+	0x25475880:  55C80002  rlwinm r8,r14,0,0,1
+	  32: GETL       	R14, t24
+	  33: ANDL       	$0xC0000000, t24
+	  34: PUTL       	t24, R8
+	  35: INCEIPL       	$4
+
+	0x25475884:  7C083000  cmp cr0,r8,r6
+	  36: GETL       	R8, t26
+	  37: GETL       	R6, t28
+	  38: CMPL       	t26, t28, t30  (-rSo)
+	  39: ICRFL       	t30, $0x0, CR
+	  40: INCEIPL       	$4
+
+	0x25475888:  915A021C  stw r10,540(r26)
+	  41: GETL       	R10, t32
+	  42: GETL       	R26, t34
+	  43: ADDL       	$0x21C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0x2547588C:  809D0008  lwz r4,8(r29)
+	  46: GETL       	R29, t36
+	  47: ADDL       	$0x8, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R4
+	  50: INCEIPL       	$4
+
+	0x25475890:  909A0218  stw r4,536(r26)
+	  51: GETL       	R4, t40
+	  52: GETL       	R26, t42
+	  53: ADDL       	$0x218, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0x25475894:  40820A24  bc 4,2,0x254762B8
+	  56: Jc02o       	$0x254762B8
+
+
+
+. 829 25475864 52
+. 80 BD 00 08 39 29 FF FF 7C A0 48 38 90 1A 02 28 81 DA 01 80 3C C0 40 00 81 5D 00 10 55 C8 00 02 7C 08 30 00 91 5A 02 1C 80 9D 00 08 90 9A 02 18 40 82 0A 24
+==== BB 830 (0x25475898) approx BBs exec'd 0 ====
+
+	0x25475898:  4800837D  bl 0x2547DC14
+	   0: MOVL       	$0x2547589C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547DC14  ($4)
+
+
+
+. 830 25475898 4
+. 48 00 83 7D
+==== BB 831 _dl_next_tls_modid(0x2547DC14) approx BBs exec'd 0 ====
+
+	0x2547DC14:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DC18:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547DC1C:  480193E5  bl 0x25497000
+	   9: MOVL       	$0x2547DC20, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 831 2547DC14 12
+. 94 21 FF F0 7D 88 02 A6 48 01 93 E5
+==== BB 832 (0x2547DC20) approx BBs exec'd 0 ====
+
+	0x2547DC20:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DC24:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547DC28:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x2547DC2C:  80DE04C8  lwz r6,1224(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x4C8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x2547DC30:  8806040C  lbz r0,1036(r6)
+	  16: GETL       	R6, t12
+	  17: ADDL       	$0x40C, t12
+	  18: LDB       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x2547DC34:  2F800000  cmpi cr7,r0,0
+	  21: GETL       	R0, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x2547DC38:  409E0020  bc 4,30,0x2547DC58
+	  25: Jc30o       	$0x2547DC58
+
+
+
+. 832 2547DC20 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 DE 04 C8 88 06 04 0C 2F 80 00 00 40 9E 00 20
+==== BB 833 (0x2547DC3C) approx BBs exec'd 0 ====
+
+	0x2547DC3C:  81460408  lwz r10,1032(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547DC40:  390A0001  addi r8,r10,1
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2547DC44:  91060408  stw r8,1032(r6)
+	   9: GETL       	R8, t6
+	  10: GETL       	R6, t8
+	  11: ADDL       	$0x408, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547DC48:  7D034378  or r3,r8,r8
+	  14: GETL       	R8, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x2547DC4C:  83C10008  lwz r30,8(r1)
+	  17: GETL       	R1, t12
+	  18: ADDL       	$0x8, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R30
+	  21: INCEIPL       	$4
+
+	0x2547DC50:  38210010  addi r1,r1,16
+	  22: GETL       	R1, t16
+	  23: ADDL       	$0x10, t16
+	  24: PUTL       	t16, R1
+	  25: INCEIPL       	$4
+
+	0x2547DC54:  4E800020  blr
+	  26: GETL       	LR, t18
+	  27: JMPo-r       	t18  ($4)
+
+
+
+. 833 2547DC3C 28
+. 81 46 04 08 39 0A 00 01 91 06 04 08 7D 03 43 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 834 (0x2547589C) approx BBs exec'd 0 ====
+
+	0x2547589C:  A0FA0154  lhz r7,340(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254758A0:  907A0230  stw r3,560(r26)
+	   5: GETL       	R3, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x230, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x254758A4:  4BFFFF54  b 0x254757F8
+	  10: JMPo       	$0x254757F8  ($4)
+
+
+
+. 834 2547589C 12
+. A0 FA 01 54 90 7A 02 30 4B FF FF 54
+==22130== Reading syms from /lib/tls/libc-2.3.4.so (0xFE60000)
+==22130==    object doesn't have any debug info
+==== BB 835 (0x254837B0) approx BBs exec'd 0 ====
+
+	0x254837B0:  9088FFFC  stw r4,-4(r8)
+	   0: GETL       	R4, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254837B4:  9488FFF8  stwu r4,-8(r8)
+	   5: GETL       	R4, t4
+	   6: GETL       	R8, t6
+	   7: ADDL       	$0xFFFFFFF8, t6
+	   8: PUTL       	t6, R8
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x254837B8:  41840014  bc 12,4,0x254837CC
+	  11: Js04o       	$0x254837CC
+
+
+
+. 835 254837B0 12
+. 90 88 FF FC 94 88 FF F8 41 84 00 14
+==== BB 836 (0x254837D4) approx BBs exec'd 0 ====
+
+	0x254837D4:  28840000  cmpli cr1,r4,0
+	   0: GETL       	R4, t0
+	   1: MOVL       	$0x0, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254837D8:  54A70035  rlwinm. r7,r5,0,0,26
+	   5: GETL       	R5, t6
+	   6: ANDL       	$0xFFFFFFE0, t6
+	   7: PUTL       	t6, R7
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x254837DC:  7CA01120  mtcrf 0x1,r5
+	  11: GETL       	R5, t10
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x254837E0:  418601A0  bc 12,6,0x25483980
+	  14: Js06o       	$0x25483980
+
+
+
+. 836 254837D4 16
+. 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+==== BB 837 (0x25475D74) approx BBs exec'd 0 ====
+
+	0x25475D74:  80090004  lwz r0,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475D78:  700A0002  andi. r10,r0,0x2
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x2, t4
+	   7: PUTL       	t4, R10
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475D7C:  901A0200  stw r0,512(r26)
+	  11: GETL       	R0, t8
+	  12: GETL       	R26, t10
+	  13: ADDL       	$0x200, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25475D80:  41820008  bc 12,2,0x25475D88
+	  16: Js02o       	$0x25475D88
+
+
+
+. 837 25475D74 16
+. 80 09 00 04 70 0A 00 02 90 1A 02 00 41 82 00 08
+==== BB 838 (0x25475D88) approx BBs exec'd 0 ====
+
+	0x25475D88:  700B0004  andi. r11,r0,0x4
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x4, t0
+	   2: PUTL       	t0, R11
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475D8C:  41820008  bc 12,2,0x25475D94
+	   6: Js02o       	$0x25475D94
+
+
+
+. 838 25475D88 8
+. 70 0B 00 04 41 82 00 08
+==== BB 839 (0x25475D94) approx BBs exec'd 0 ====
+
+	0x25475D94:  700A0008  andi. r10,r0,0x8
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x8, t0
+	   2: PUTL       	t0, R10
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475D98:  41820008  bc 12,2,0x25475DA0
+	   6: Js02o       	$0x25475DA0
+
+
+
+. 839 25475D94 8
+. 70 0A 00 08 41 82 00 08
+==== BB 840 (0x25475D9C) approx BBs exec'd 0 ====
+
+	0x25475D9C:  91280060  stw r9,96(r8)
+	   0: GETL       	R9, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475DA0:  81280098  lwz r9,152(r8)
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0x98, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25475DA4:  2F890000  cmpi cr7,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25475DA8:  419E0354  bc 12,30,0x254760FC
+	  14: Js30o       	$0x254760FC
+
+
+
+. 840 25475D9C 16
+. 91 28 00 60 81 28 00 98 2F 89 00 00 41 9E 03 54
+==== BB 841 (0x25475DBC) approx BBs exec'd 0 ====
+
+	0x25475DBC:  91280060  stw r9,96(r8)
+	   0: GETL       	R9, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475DC0:  81280074  lwz r9,116(r8)
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0x74, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25475DC4:  2C890000  cmpi cr1,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25475DC8:  418600E0  bc 12,6,0x25475EA8
+	  14: Js06o       	$0x25475EA8
+
+
+
+. 841 25475DBC 16
+. 91 28 00 60 81 28 00 74 2C 89 00 00 41 86 00 E0
+==== BB 842 (0x25475EEC) approx BBs exec'd 0 ====
+
+	0x25475EEC:  80DA0000  lwz r6,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x25475EF0:  7D493214  add r10,r9,r6
+	   4: GETL       	R9, t4
+	   5: GETL       	R6, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x25475EF4:  915A0218  stw r10,536(r26)
+	   9: GETL       	R10, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	$0x218, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475EF8:  7E639B78  or r3,r19,r19
+	  14: GETL       	R19, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x25475EFC:  4800C0F5  bl 0x25481FF0
+	  17: MOVL       	$0x25475F00, t14
+	  18: PUTL       	t14, LR
+	  19: JMPo-c       	$0x25481FF0  ($4)
+
+
+
+. 842 25475EEC 20
+. 80 DA 00 00 7D 49 32 14 91 5A 02 18 7E 63 9B 78 48 00 C0 F5
+==== BB 843 (0x2547A488) approx BBs exec'd 0 ====
+
+	0x2547A488:  807F0040  lwz r3,64(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x40, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547A48C:  7C091800  cmp cr0,r9,r3
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547A490:  41820014  bc 12,2,0x2547A4A4
+	  10: Js02o       	$0x2547A4A4
+
+
+
+. 843 2547A488 12
+. 80 7F 00 40 7C 09 18 00 41 82 00 14
+==== BB 844 (0x2547A494) approx BBs exec'd 0 ====
+
+	0x2547A494:  3D407FFF  lis r10,32767
+	   0: MOVL       	$0x7FFF0000, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x2547A498:  6148FFFF  ori r8,r10,0xFFFF
+	   3: MOVL       	$0x7FFFFFFF, t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0x2547A49C:  7F094000  cmp cr6,r9,r8
+	   6: GETL       	R9, t4
+	   7: GETL       	R8, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x6, CR
+	  10: INCEIPL       	$4
+
+	0x2547A4A0:  409AFFD4  bc 4,26,0x2547A474
+	  11: Jc26o       	$0x2547A474
+
+
+
+. 844 2547A494 16
+. 3D 40 7F FF 61 48 FF FF 7F 09 40 00 40 9A FF D4
+==== BB 845 (0x2547A8B8) approx BBs exec'd 0 ====
+
+	0x2547A8B8:  2F980000  cmpi cr7,r24,0
+	   0: GETL       	R24, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547A8BC:  419E005C  bc 12,30,0x2547A918
+	   4: Js30o       	$0x2547A918
+
+
+
+. 845 2547A8B8 8
+. 2F 98 00 00 41 9E 00 5C
+==== BB 846 (0x2547A918) approx BBs exec'd 0 ====
+
+	0x2547A918:  801B0000  lwz r0,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547A91C:  2F1B0000  cmpi cr6,r27,0
+	   4: GETL       	R27, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x2547A920:  2C000000  cmpi cr0,r0,0
+	   8: GETL       	R0, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x2547A924:  4182001C  bc 12,2,0x2547A940
+	  12: Js02o       	$0x2547A940
+
+
+
+. 846 2547A918 16
+. 80 1B 00 00 2F 1B 00 00 2C 00 00 00 41 82 00 1C
+==== BB 847 (0x2547A928) approx BBs exec'd 0 ====
+
+	0x2547A928:  837B0008  lwz r27,8(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547A92C:  2F1B0000  cmpi cr6,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A930:  41BAFC94  bc 13,26,0x2547A5C4
+	   9: Js26o       	$0x2547A5C4
+
+
+
+. 847 2547A928 12
+. 83 7B 00 08 2F 1B 00 00 41 BA FC 94
+==== BB 848 (0x2547A934) approx BBs exec'd 0 ====
+
+	0x2547A934:  801B0000  lwz r0,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547A938:  2C000000  cmpi cr0,r0,0
+	   4: GETL       	R0, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x2547A93C:  4082FFEC  bc 4,2,0x2547A928
+	   8: Jc02o       	$0x2547A928
+
+
+
+. 848 2547A934 12
+. 80 1B 00 00 2C 00 00 00 40 82 FF EC
+==== BB 849 (0x2547A940) approx BBs exec'd 0 ====
+
+	0x2547A940:  409AF9DC  bc 4,26,0x2547A31C
+	   0: Jc26o       	$0x2547A31C
+
+
+
+. 849 2547A940 4
+. 40 9A F9 DC
+==== BB 850 (0x2547A35C) approx BBs exec'd 0 ====
+
+	0x2547A35C:  A13A0156  lhz r9,342(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x156, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547A360:  2F090000  cmpi cr6,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A364:  409A05E4  bc 4,26,0x2547A948
+	   9: Jc26o       	$0x2547A948
+
+
+
+. 850 2547A35C 12
+. A1 3A 01 56 2F 09 00 00 40 9A 05 E4
+==== BB 851 (0x2547A948) approx BBs exec'd 0 ====
+
+	0x2547A948:  5534103A  rlwinm r20,r9,2,0,29
+	   0: GETL       	R9, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R20
+	   3: INCEIPL       	$4
+
+	0x2547A94C:  81810000  lwz r12,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x2547A950:  3A54001E  addi r18,r20,30
+	   8: GETL       	R20, t6
+	   9: ADDL       	$0x1E, t6
+	  10: PUTL       	t6, R18
+	  11: INCEIPL       	$4
+
+	0x2547A954:  56490376  rlwinm r9,r18,0,13,27
+	  12: GETL       	R18, t8
+	  13: ANDL       	$0x7FFF0, t8
+	  14: PUTL       	t8, R9
+	  15: INCEIPL       	$4
+
+	0x2547A958:  7D4900D0  neg r10,r9
+	  16: GETL       	R9, t10
+	  17: NEGL       	t10
+	  18: PUTL       	t10, R10
+	  19: INCEIPL       	$4
+
+	0x2547A95C:  7D81516E  stwux r12,r1,r10
+	  20: GETL       	R10, t12
+	  21: GETL       	R1, t14
+	  22: ADDL       	t14, t12
+	  23: PUTL       	t12, R1
+	  24: GETL       	R12, t16
+	  25: STL       	t16, (t12)
+	  26: INCEIPL       	$4
+
+	0x2547A960:  39010017  addi r8,r1,23
+	  27: GETL       	R1, t18
+	  28: ADDL       	$0x17, t18
+	  29: PUTL       	t18, R8
+	  30: INCEIPL       	$4
+
+	0x2547A964:  55180036  rlwinm r24,r8,0,0,27
+	  31: GETL       	R8, t20
+	  32: ANDL       	$0xFFFFFFF0, t20
+	  33: PUTL       	t20, R24
+	  34: INCEIPL       	$4
+
+	0x2547A968:  4BFFFA00  b 0x2547A368
+	  35: JMPo       	$0x2547A368  ($4)
+
+
+
+. 851 2547A948 36
+. 55 34 10 3A 81 81 00 00 3A 54 00 1E 56 49 03 76 7D 49 00 D0 7D 81 51 6E 39 01 00 17 55 18 00 36 4B FF FA 00
+==== BB 852 (0x2547A0F0) approx BBs exec'd 0 ====
+
+	0x2547A0F0:  80FF0004  lwz r7,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547A0F4:  811F0008  lwz r8,8(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x2547A0F8:  81230018  lwz r9,24(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x2547A0FC:  4BFFCBED  bl 0x25476CE8
+	  15: MOVL       	$0x2547A100, t12
+	  16: PUTL       	t12, LR
+	  17: JMPo-c       	$0x25476CE8  ($4)
+
+
+
+. 852 2547A0F0 16
+. 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+==== BB 853 (0x25482F10) approx BBs exec'd 0 ====
+
+	0x25482F10:  84C40004  lwzu r6,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x25482F14:  7C072A14  add r0,r7,r5
+	   6: GETL       	R7, t4
+	   7: GETL       	R5, t6
+	   8: ADDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x25482F18:  7D0928F8  nor r9,r8,r5
+	  11: GETL       	R8, t8
+	  12: GETL       	R5, t10
+	  13: ORL       	t10, t8
+	  14: NOTL       	t8
+	  15: PUTL       	t8, R9
+	  16: INCEIPL       	$4
+
+	0x25482F1C:  7C004839  and. r0,r0,r9
+	  17: GETL       	R0, t12
+	  18: GETL       	R9, t14
+	  19: ANDL       	t12, t14
+	  20: PUTL       	t14, R0
+	  21: CMP0L       	t14, t16  (-rSo)
+	  22: ICRFL       	t16, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25482F20:  7C853000  cmp cr1,r5,r6
+	  24: GETL       	R5, t18
+	  25: GETL       	R6, t20
+	  26: CMPL       	t18, t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x1, CR
+	  28: INCEIPL       	$4
+
+	0x25482F24:  4182FFE4  bc 12,2,0x25482F08
+	  29: Js02o       	$0x25482F08
+
+
+
+. 853 25482F10 24
+. 84 C4 00 04 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+==== BB 854 (0x25482F28) approx BBs exec'd 0 ====
+
+	0x25482F28:  7D002838  and r0,r8,r5
+	   0: GETL       	R8, t0
+	   1: GETL       	R5, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482F2C:  4186002C  bc 12,6,0x25482F58
+	   5: Js06o       	$0x25482F58
+
+
+
+. 854 25482F28 8
+. 7D 00 28 38 41 86 00 2C
+==== BB 855 (0x25482F30) approx BBs exec'd 0 ====
+
+	0x25482F30:  7C004214  add r0,r0,r8
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482F34:  7CAA3279  xor. r10,r5,r6
+	   5: GETL       	R5, t4
+	   6: GETL       	R6, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25482F38:  7D290078  andc r9,r9,r0
+	  12: GETL       	R9, t10
+	  13: GETL       	R0, t12
+	  14: NOTL       	t12
+	  15: ANDL       	t10, t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x25482F3C:  41800034  bc 12,0,0x25482F70
+	  18: Js00o       	$0x25482F70
+
+
+
+. 855 25482F30 16
+. 7C 00 42 14 7C AA 32 79 7D 29 00 78 41 80 00 34
+==== BB 856 (0x25482F40) approx BBs exec'd 0 ====
+
+	0x25482F40:  7D4A0034  cntlzw r10,r10
+	   0: GETL       	R10, t0
+	   1: CNTLZL       	t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0x25482F44:  7D290034  cntlzw r9,r9
+	   4: GETL       	R9, t2
+	   5: CNTLZL       	t2
+	   6: PUTL       	t2, R9
+	   7: INCEIPL       	$4
+
+	0x25482F48:  39290007  addi r9,r9,7
+	   8: GETL       	R9, t4
+	   9: ADDL       	$0x7, t4
+	  10: PUTL       	t4, R9
+	  11: INCEIPL       	$4
+
+	0x25482F4C:  7C895000  cmp cr1,r9,r10
+	  12: GETL       	R9, t6
+	  13: GETL       	R10, t8
+	  14: CMPL       	t6, t8, t10  (-rSo)
+	  15: ICRFL       	t10, $0x1, CR
+	  16: INCEIPL       	$4
+
+	0x25482F50:  7C662850  subf r3,r6,r5
+	  17: GETL       	R6, t12
+	  18: GETL       	R5, t14
+	  19: SUBL       	t12, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0x25482F54:  4CA40020  bclr 5,4
+	  22: GETL       	LR, t16
+	  23: Jc04o-r       	t16
+
+
+
+. 856 25482F40 24
+. 7D 4A 00 34 7D 29 00 34 39 29 00 07 7C 89 50 00 7C 66 28 50 4C A4 00 20
+==== BB 857 (0x25482F58) approx BBs exec'd 0 ====
+
+	0x25482F58:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25482F5C:  4E800020  blr
+	   3: GETL       	LR, t2
+	   4: JMPo-r       	t2  ($4)
+
+
+
+. 857 25482F58 8
+. 38 60 00 00 4E 80 00 20
+==== BB 858 (0x2547C750) approx BBs exec'd 0 ====
+
+	0x2547C750:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547C754:  38600001  li r3,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547C758:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547C75C:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x2547C760:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547C764:  83E1001C  lwz r31,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x2547C768:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0x2547C76C:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+
+. 858 2547C750 32
+. 80 81 00 24 38 60 00 01 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 859 (0x25476F54) approx BBs exec'd 0 ====
+
+	0x25476F54:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476F58:  83E10294  lwz r31,660(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x294, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x25476F5C:  81810244  lwz r12,580(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x244, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R12
+	  12: INCEIPL       	$4
+
+	0x25476F60:  7FE803A6  mtlr r31
+	  13: GETL       	R31, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x25476F64:  81C10248  lwz r14,584(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x248, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0x25476F68:  81E1024C  lwz r15,588(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x24C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R15
+	  25: INCEIPL       	$4
+
+	0x25476F6C:  7D818120  mtcrf 0x18,r12
+	  26: GETL       	R12, t20
+	  27: ICRFL       	t20, $0x3, CR
+	  28: ICRFL       	t20, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x25476F70:  82010250  lwz r16,592(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x250, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R16
+	  34: INCEIPL       	$4
+
+	0x25476F74:  82210254  lwz r17,596(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x254, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R17
+	  39: INCEIPL       	$4
+
+	0x25476F78:  82410258  lwz r18,600(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x258, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R18
+	  44: INCEIPL       	$4
+
+	0x25476F7C:  8261025C  lwz r19,604(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x25C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R19
+	  49: INCEIPL       	$4
+
+	0x25476F80:  82810260  lwz r20,608(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x260, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R20
+	  54: INCEIPL       	$4
+
+	0x25476F84:  82A10264  lwz r21,612(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x264, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R21
+	  59: INCEIPL       	$4
+
+	0x25476F88:  82C10268  lwz r22,616(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x268, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R22
+	  64: INCEIPL       	$4
+
+	0x25476F8C:  82E1026C  lwz r23,620(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x26C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R23
+	  69: INCEIPL       	$4
+
+	0x25476F90:  83010270  lwz r24,624(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x270, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R24
+	  74: INCEIPL       	$4
+
+	0x25476F94:  83210274  lwz r25,628(r1)
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x274, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R25
+	  79: INCEIPL       	$4
+
+	0x25476F98:  83410278  lwz r26,632(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x278, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R26
+	  84: INCEIPL       	$4
+
+	0x25476F9C:  8361027C  lwz r27,636(r1)
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x27C, t66
+	  87: LDL       	(t66), t68
+	  88: PUTL       	t68, R27
+	  89: INCEIPL       	$4
+
+	0x25476FA0:  83810280  lwz r28,640(r1)
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x280, t70
+	  92: LDL       	(t70), t72
+	  93: PUTL       	t72, R28
+	  94: INCEIPL       	$4
+
+	0x25476FA4:  83A10284  lwz r29,644(r1)
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x284, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R29
+	  99: INCEIPL       	$4
+
+	0x25476FA8:  83C10288  lwz r30,648(r1)
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x288, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R30
+	 104: INCEIPL       	$4
+
+	0x25476FAC:  83E1028C  lwz r31,652(r1)
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x28C, t82
+	 107: LDL       	(t82), t84
+	 108: PUTL       	t84, R31
+	 109: INCEIPL       	$4
+
+	0x25476FB0:  38210290  addi r1,r1,656
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x290, t86
+	 112: PUTL       	t86, R1
+	 113: INCEIPL       	$4
+
+	0x25476FB4:  4E800020  blr
+	 114: GETL       	LR, t88
+	 115: JMPo-r       	t88  ($4)
+
+
+
+. 859 25476F54 100
+. 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+==== BB 860 (0x2547A460) approx BBs exec'd 0 ====
+
+	0x2547A460:  2C980000  cmpi cr1,r24,0
+	   0: GETL       	R24, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A464:  41860010  bc 12,6,0x2547A474
+	   4: Js06o       	$0x2547A474
+
+
+
+. 860 2547A460 8
+. 2C 98 00 00 41 86 00 10
+==== BB 861 (0x2547A468) approx BBs exec'd 0 ====
+
+	0x2547A468:  56CB103A  rlwinm r11,r22,2,0,29
+	   0: GETL       	R22, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0x2547A46C:  3AD60001  addi r22,r22,1
+	   4: GETL       	R22, t2
+	   5: ADDL       	$0x1, t2
+	   6: PUTL       	t2, R22
+	   7: INCEIPL       	$4
+
+	0x2547A470:  7D4BC12E  stwx r10,r11,r24
+	   8: GETL       	R24, t4
+	   9: GETL       	R11, t6
+	  10: ADDL       	t6, t4
+	  11: GETL       	R10, t8
+	  12: STL       	t8, (t4)
+	  13: INCEIPL       	$4
+
+	0x2547A474:  853C0008  lwzu r9,8(r28)
+	  14: GETL       	R28, t10
+	  15: ADDL       	$0x8, t10
+	  16: PUTL       	t10, R28
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x2547A478:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t14
+	  21: CMP0L       	t14, t16  (-rSo)
+	  22: ICRFL       	t16, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x2547A47C:  4182043C  bc 12,2,0x2547A8B8
+	  24: Js02o       	$0x2547A8B8
+
+
+
+. 861 2547A468 24
+. 56 CB 10 3A 3A D6 00 01 7D 4B C1 2E 85 3C 00 08 2C 09 00 00 41 82 04 3C
+==== BB 862 (0x2547A8C0) approx BBs exec'd 0 ====
+
+	0x2547A8C0:  3BB60001  addi r29,r22,1
+	   0: GETL       	R22, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547A8C4:  56DC103A  rlwinm r28,r22,2,0,29
+	   4: GETL       	R22, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R28
+	   7: INCEIPL       	$4
+
+	0x2547A8C8:  3A400000  li r18,0
+	   8: MOVL       	$0x0, t4
+	   9: PUTL       	t4, R18
+	  10: INCEIPL       	$4
+
+	0x2547A8CC:  57B61838  rlwinm r22,r29,3,0,28
+	  11: GETL       	R29, t6
+	  12: SHLL       	$0x3, t6
+	  13: PUTL       	t6, R22
+	  14: INCEIPL       	$4
+
+	0x2547A8D0:  7E5CC12E  stwx r18,r28,r24
+	  15: GETL       	R24, t8
+	  16: GETL       	R28, t10
+	  17: ADDL       	t10, t8
+	  18: GETL       	R18, t12
+	  19: STL       	t12, (t8)
+	  20: INCEIPL       	$4
+
+	0x2547A8D4:  38760004  addi r3,r22,4
+	  21: GETL       	R22, t14
+	  22: ADDL       	$0x4, t14
+	  23: PUTL       	t14, R3
+	  24: INCEIPL       	$4
+
+	0x2547A8D8:  4801D14D  bl 0x25497A24
+	  25: MOVL       	$0x2547A8DC, t16
+	  26: PUTL       	t16, LR
+	  27: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 862 2547A8C0 28
+. 3B B6 00 01 56 DC 10 3A 3A 40 00 00 57 B6 18 38 7E 5C C1 2E 38 76 00 04 48 01 D1 4D
+==== BB 863 (0x2547A8DC) approx BBs exec'd 0 ====
+
+	0x2547A8DC:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A8E0:  7C691B78  or r9,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x2547A8E4:  907A01E8  stw r3,488(r26)
+	   7: GETL       	R3, t6
+	   8: GETL       	R26, t8
+	   9: ADDL       	$0x1E8, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x2547A8E8:  418605C4  bc 12,6,0x2547AEAC
+	  12: Js06o       	$0x2547AEAC
+
+
+
+. 863 2547A8DC 16
+. 2C 83 00 00 7C 69 1B 78 90 7A 01 E8 41 86 05 C4
+==== BB 864 (0x2547A8EC) approx BBs exec'd 0 ====
+
+	0x2547A8EC:  57B5103A  rlwinm r21,r29,2,0,29
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R21
+	   3: INCEIPL       	$4
+
+	0x2547A8F0:  7F04C378  or r4,r24,r24
+	   4: GETL       	R24, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x2547A8F4:  7EA5AB78  or r5,r21,r21
+	   7: GETL       	R21, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x2547A8F8:  93490000  stw r26,0(r9)
+	  10: GETL       	R26, t6
+	  11: GETL       	R9, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547A8FC:  38630004  addi r3,r3,4
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0x4, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0x2547A900:  480092E1  bl 0x25483BE0
+	  18: MOVL       	$0x2547A904, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 864 2547A8EC 24
+. 57 B5 10 3A 7F 04 C3 78 7E A5 AB 78 93 49 00 00 38 63 00 04 48 00 92 E1
+==== BB 865 (0x2547A904) approx BBs exec'd 0 ====
+
+	0x2547A904:  809A01E8  lwz r4,488(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547A908:  7EA5AB78  or r5,r21,r21
+	   5: GETL       	R21, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547A90C:  7F552214  add r26,r21,r4
+	   8: GETL       	R21, t6
+	   9: GETL       	R4, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547A910:  387A0004  addi r3,r26,4
+	  13: GETL       	R26, t10
+	  14: ADDL       	$0x4, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x2547A914:  480092CD  bl 0x25483BE0
+	  17: MOVL       	$0x2547A918, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 865 2547A904 20
+. 80 9A 01 E8 7E A5 AB 78 7F 55 22 14 38 7A 00 04 48 00 92 CD
+==== BB 866 (0x25477104) approx BBs exec'd 0 ====
+
+	0x25477104:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25477108:  409A0024  bc 4,26,0x2547712C
+	   3: Jc26o       	$0x2547712C
+
+
+
+. 866 25477104 8
+. 38 00 00 01 40 9A 00 24
+==== BB 867 (0x2547710C) approx BBs exec'd 0 ====
+
+	0x2547710C:  817D005C  lwz r11,92(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x5C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25477110:  7FA5EB78  or r5,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25477114:  7F83E378  or r3,r28,r28
+	   8: GETL       	R28, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x25477118:  7E469378  or r6,r18,r18
+	  11: GETL       	R18, t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0x2547711C:  2F0B0000  cmpi cr6,r11,0
+	  14: GETL       	R11, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0x25477120:  38000000  li r0,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x25477124:  409A0218  bc 4,26,0x2547733C
+	  21: Jc26o       	$0x2547733C
+
+
+
+. 867 2547710C 28
+. 81 7D 00 5C 7F A5 EB 78 7F 83 E3 78 7E 46 93 78 2F 0B 00 00 38 00 00 00 40 9A 02 18
+==== BB 868 (0x25477128) approx BBs exec'd 0 ====
+
+	0x25477128:  91FD018C  stw r15,396(r29)
+	   0: GETL       	R15, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x18C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547712C:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25477130:  7F86E378  or r6,r28,r28
+	   9: GETL       	R28, t8
+	  10: PUTL       	t8, R6
+	  11: INCEIPL       	$4
+
+	0x25477134:  7FE3FB78  or r3,r31,r31
+	  12: GETL       	R31, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x25477138:  7EC4B378  or r4,r22,r22
+	  15: GETL       	R22, t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0x2547713C:  7EE5BB78  or r5,r23,r23
+	  18: GETL       	R23, t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0x25477140:  38E10228  addi r7,r1,552
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x228, t16
+	  23: PUTL       	t16, R7
+	  24: INCEIPL       	$4
+
+	0x25477144:  39010018  addi r8,r1,24
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0x18, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0x25477148:  4182FF84  bc 12,2,0x254770CC
+	  29: Js02o       	$0x254770CC
+
+
+
+. 868 25477128 36
+. 91 FD 01 8C 2C 00 00 00 7F 86 E3 78 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 41 82 FF 84
+==== BB 869 (0x254771D0) approx BBs exec'd 0 ====
+
+	0x254771D0:  8005018C  lwz r0,396(r5)
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x18C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x254771D4:  3BA5018C  addi r29,r5,396
+	   5: GETL       	R5, t4
+	   6: ADDL       	$0x18C, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x254771D8:  39200000  li r9,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x254771DC:  2C80FFFF  cmpi cr1,r0,-1
+	  12: GETL       	R0, t8
+	  13: MOVL       	$0xFFFFFFFF, t12
+	  14: CMPL       	t8, t12, t10  (-rSo)
+	  15: ICRFL       	t10, $0x1, CR
+	  16: INCEIPL       	$4
+
+	0x254771E0:  41860024  bc 12,6,0x25477204
+	  17: Js06o       	$0x25477204
+
+
+
+. 869 254771D0 20
+. 80 05 01 8C 3B A5 01 8C 39 20 00 00 2C 80 FF FF 41 86 00 24
+==== BB 870 (0x25477204) approx BBs exec'd 0 ====
+
+	0x25477204:  2C090000  cmpi cr0,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25477208:  408201F8  bc 4,2,0x25477400
+	   4: Jc02o       	$0x25477400
+
+
+
+. 870 25477204 8
+. 2C 09 00 00 40 82 01 F8
+==== BB 871 (0x25483A60) approx BBs exec'd 0 ====
+
+	0x25483A60:  7D2903A6  mtctr r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25483A64:  88BF0000  lbz r5,0(r31)
+	   3: GETL       	R31, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R5
+	   6: INCEIPL       	$4
+
+	0x25483A68:  3BFF0001  addi r31,r31,1
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0x25483A6C:  98BD0000  stb r5,0(r29)
+	  11: GETL       	R5, t8
+	  12: GETL       	R29, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25483A70:  3BBD0001  addi r29,r29,1
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x25483A74:  4200FFF0  bc 16,0,0x25483A64
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0x25483A78
+	  23: JMPo       	$0x25483A64  ($4)
+
+
+
+. 871 25483A60 24
+. 7D 29 03 A6 88 BF 00 00 3B FF 00 01 98 BD 00 00 3B BD 00 01 42 00 FF F0
+==== BB 872 (0x25476958) approx BBs exec'd 0 ====
+
+	0x25476958:  80DF0080  lwz r6,128(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x80, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547695C:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25476960:  4182FF58  bc 12,2,0x254768B8
+	   9: Js02o       	$0x254768B8
+
+
+
+. 872 25476958 12
+. 80 DF 00 80 2C 06 00 00 41 82 FF 58
+==== BB 873 (0x25476A34) approx BBs exec'd 0 ====
+
+	0x25476A34:  7E639B78  or r3,r19,r19
+	   0: GETL       	R19, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476A38:  48020FED  bl 0x25497A24
+	   3: MOVL       	$0x25476A3C, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 873 25476A34 8
+. 7E 63 9B 78 48 02 0F ED
+==== BB 874 (0x25476A3C) approx BBs exec'd 0 ====
+
+	0x25476A3C:  82BF0088  lwz r21,136(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x88, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25476A40:  2E030000  cmpi cr4,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x25476A44:  90750000  stw r3,0(r21)
+	   9: GETL       	R3, t8
+	  10: GETL       	R21, t10
+	  11: STL       	t8, (t10)
+	  12: INCEIPL       	$4
+
+	0x25476A48:  41920284  bc 12,18,0x25476CCC
+	  13: Js18o       	$0x25476CCC
+
+
+
+. 874 25476A3C 16
+. 82 BF 00 88 2E 03 00 00 90 75 00 00 41 92 02 84
+==== BB 875 (0x25476A4C) approx BBs exec'd 0 ====
+
+	0x25476A4C:  7F64DB78  or r4,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25476A50:  7E659B78  or r5,r19,r19
+	   3: GETL       	R19, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25476A54:  4800D18D  bl 0x25483BE0
+	   6: MOVL       	$0x25476A58, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 875 25476A4C 12
+. 7F 64 DB 78 7E 65 9B 78 48 00 D1 8D
+==== BB 876 (0x25476A58) approx BBs exec'd 0 ====
+
+	0x25476A58:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476A5C:  81010000  lwz r8,0(r1)
+	   3: GETL       	R1, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R8
+	   6: INCEIPL       	$4
+
+	0x25476A60:  81E80004  lwz r15,4(r8)
+	   7: GETL       	R8, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R15
+	  11: INCEIPL       	$4
+
+	0x25476A64:  8088FFB4  lwz r4,-76(r8)
+	  12: GETL       	R8, t10
+	  13: ADDL       	$0xFFFFFFB4, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R4
+	  16: INCEIPL       	$4
+
+	0x25476A68:  7DE803A6  mtlr r15
+	  17: GETL       	R15, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0x25476A6C:  81C8FFB8  lwz r14,-72(r8)
+	  20: GETL       	R8, t16
+	  21: ADDL       	$0xFFFFFFB8, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R14
+	  24: INCEIPL       	$4
+
+	0x25476A70:  81E8FFBC  lwz r15,-68(r8)
+	  25: GETL       	R8, t20
+	  26: ADDL       	$0xFFFFFFBC, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R15
+	  29: INCEIPL       	$4
+
+	0x25476A74:  7C808120  mtcrf 0x8,r4
+	  30: GETL       	R4, t24
+	  31: ICRFL       	t24, $0x4, CR
+	  32: INCEIPL       	$4
+
+	0x25476A78:  8208FFC0  lwz r16,-64(r8)
+	  33: GETL       	R8, t26
+	  34: ADDL       	$0xFFFFFFC0, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R16
+	  37: INCEIPL       	$4
+
+	0x25476A7C:  8228FFC4  lwz r17,-60(r8)
+	  38: GETL       	R8, t30
+	  39: ADDL       	$0xFFFFFFC4, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R17
+	  42: INCEIPL       	$4
+
+	0x25476A80:  8248FFC8  lwz r18,-56(r8)
+	  43: GETL       	R8, t34
+	  44: ADDL       	$0xFFFFFFC8, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R18
+	  47: INCEIPL       	$4
+
+	0x25476A84:  8268FFCC  lwz r19,-52(r8)
+	  48: GETL       	R8, t38
+	  49: ADDL       	$0xFFFFFFCC, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R19
+	  52: INCEIPL       	$4
+
+	0x25476A88:  8288FFD0  lwz r20,-48(r8)
+	  53: GETL       	R8, t42
+	  54: ADDL       	$0xFFFFFFD0, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R20
+	  57: INCEIPL       	$4
+
+	0x25476A8C:  82A8FFD4  lwz r21,-44(r8)
+	  58: GETL       	R8, t46
+	  59: ADDL       	$0xFFFFFFD4, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R21
+	  62: INCEIPL       	$4
+
+	0x25476A90:  82C8FFD8  lwz r22,-40(r8)
+	  63: GETL       	R8, t50
+	  64: ADDL       	$0xFFFFFFD8, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R22
+	  67: INCEIPL       	$4
+
+	0x25476A94:  82E8FFDC  lwz r23,-36(r8)
+	  68: GETL       	R8, t54
+	  69: ADDL       	$0xFFFFFFDC, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R23
+	  72: INCEIPL       	$4
+
+	0x25476A98:  8308FFE0  lwz r24,-32(r8)
+	  73: GETL       	R8, t58
+	  74: ADDL       	$0xFFFFFFE0, t58
+	  75: LDL       	(t58), t60
+	  76: PUTL       	t60, R24
+	  77: INCEIPL       	$4
+
+	0x25476A9C:  8328FFE4  lwz r25,-28(r8)
+	  78: GETL       	R8, t62
+	  79: ADDL       	$0xFFFFFFE4, t62
+	  80: LDL       	(t62), t64
+	  81: PUTL       	t64, R25
+	  82: INCEIPL       	$4
+
+	0x25476AA0:  8348FFE8  lwz r26,-24(r8)
+	  83: GETL       	R8, t66
+	  84: ADDL       	$0xFFFFFFE8, t66
+	  85: LDL       	(t66), t68
+	  86: PUTL       	t68, R26
+	  87: INCEIPL       	$4
+
+	0x25476AA4:  8368FFEC  lwz r27,-20(r8)
+	  88: GETL       	R8, t70
+	  89: ADDL       	$0xFFFFFFEC, t70
+	  90: LDL       	(t70), t72
+	  91: PUTL       	t72, R27
+	  92: INCEIPL       	$4
+
+	0x25476AA8:  8388FFF0  lwz r28,-16(r8)
+	  93: GETL       	R8, t74
+	  94: ADDL       	$0xFFFFFFF0, t74
+	  95: LDL       	(t74), t76
+	  96: PUTL       	t76, R28
+	  97: INCEIPL       	$4
+
+	0x25476AAC:  83A8FFF4  lwz r29,-12(r8)
+	  98: GETL       	R8, t78
+	  99: ADDL       	$0xFFFFFFF4, t78
+	 100: LDL       	(t78), t80
+	 101: PUTL       	t80, R29
+	 102: INCEIPL       	$4
+
+	0x25476AB0:  83C8FFF8  lwz r30,-8(r8)
+	 103: GETL       	R8, t82
+	 104: ADDL       	$0xFFFFFFF8, t82
+	 105: LDL       	(t82), t84
+	 106: PUTL       	t84, R30
+	 107: INCEIPL       	$4
+
+	0x25476AB4:  83E8FFFC  lwz r31,-4(r8)
+	 108: GETL       	R8, t86
+	 109: ADDL       	$0xFFFFFFFC, t86
+	 110: LDL       	(t86), t88
+	 111: PUTL       	t88, R31
+	 112: INCEIPL       	$4
+
+	0x25476AB8:  7D014378  or r1,r8,r8
+	 113: GETL       	R8, t90
+	 114: PUTL       	t90, R1
+	 115: INCEIPL       	$4
+
+	0x25476ABC:  4E800020  blr
+	 116: GETL       	LR, t92
+	 117: JMPo-r       	t92  ($4)
+
+
+
+. 876 25476A58 104
+. 7F 43 D3 78 81 01 00 00 81 E8 00 04 80 88 FF B4 7D E8 03 A6 81 C8 FF B8 81 E8 FF BC 7C 80 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+==== BB 877 (0x25479218) approx BBs exec'd 0 ====
+
+	0x25479218:  7C1C0378  or r28,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x2547921C:  801C0168  lwz r0,360(r28)
+	   3: GETL       	R28, t2
+	   4: ADDL       	$0x168, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25479220:  2C000000  cmpi cr0,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25479224:  4082FFF4  bc 4,2,0x25479218
+	  12: Jc02o       	$0x25479218
+
+
+
+. 877 25479218 16
+. 7C 1C 03 78 80 1C 01 68 2C 00 00 00 40 82 FF F4
+==== BB 878 (0x254807B8) approx BBs exec'd 0 ====
+
+	0x254807B8:  7D283850  subf r9,r8,r7
+	   0: GETL       	R8, t0
+	   1: GETL       	R7, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x254807BC:  7F865050  subf r28,r6,r10
+	   5: GETL       	R6, t4
+	   6: GETL       	R10, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x254807C0:  7F89E040  cmpl cr7,r9,r28
+	  10: GETL       	R9, t8
+	  11: GETL       	R28, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x254807C4:  409C0064  bc 4,28,0x25480828
+	  15: Jc28o       	$0x25480828
+
+
+
+. 878 254807B8 16
+. 7D 28 38 50 7F 86 50 50 7F 89 E0 40 40 9C 00 64
+==22130== Reading syms from /home/sewardj/valgrind-2.2.0-ppc/Inst/lib/valgrind/libpthread.so (0xEE2F000)
+==== BB 879 (0x2547618C) approx BBs exec'd 0 ====
+
+	0x2547618C:  2F160000  cmpi cr6,r22,0
+	   0: GETL       	R22, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25476190:  419801EC  bc 12,24,0x2547637C
+	   4: Js24o       	$0x2547637C
+
+
+
+. 879 2547618C 8
+. 2F 16 00 00 41 98 01 EC
+==== BB 880 (0x25476194) approx BBs exec'd 0 ====
+
+	0x25476194:  82BE050C  lwz r21,1292(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x50C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25476198:  83750000  lwz r27,0(r21)
+	   5: GETL       	R21, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0x2547619C:  63780004  ori r24,r27,0x4
+	   9: GETL       	R27, t8
+	  10: ORL       	$0x4, t8
+	  11: PUTL       	t8, R24
+	  12: INCEIPL       	$4
+
+	0x254761A0:  93150000  stw r24,0(r21)
+	  13: GETL       	R24, t10
+	  14: GETL       	R21, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0x254761A4:  809E04C8  lwz r4,1224(r30)
+	  17: GETL       	R30, t14
+	  18: ADDL       	$0x4C8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R4
+	  21: INCEIPL       	$4
+
+	0x254761A8:  807F0098  lwz r3,152(r31)
+	  22: GETL       	R31, t18
+	  23: ADDL       	$0x98, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0x254761AC:  81040404  lwz r8,1028(r4)
+	  27: GETL       	R4, t22
+	  28: ADDL       	$0x404, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R8
+	  31: INCEIPL       	$4
+
+	0x254761B0:  7D0903A6  mtctr r8
+	  32: GETL       	R8, t26
+	  33: PUTL       	t26, CTR
+	  34: INCEIPL       	$4
+
+	0x254761B4:  4E800421  bctrl
+	  35: MOVL       	$0x254761B8, t28
+	  36: PUTL       	t28, LR
+	  37: GETL       	CTR, t30
+	  38: JMPo-c       	t30  ($4)
+
+
+
+. 880 25476194 36
+. 82 BE 05 0C 83 75 00 00 63 78 00 04 93 15 00 00 80 9E 04 C8 80 7F 00 98 81 04 04 04 7D 09 03 A6 4E 80 04 21
+==== BB 881 __GI__dl_make_stack_executable(0x2547E8C0) approx BBs exec'd 0 ====
+
+	0x2547E8C0:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547E8C4:  7CE802A6  mflr r7
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x2547E8C8:  48018739  bl 0x25497000
+	   9: MOVL       	$0x2547E8CC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 881 2547E8C0 12
+. 94 21 FF D0 7C E8 02 A6 48 01 87 39
+==== BB 882 (0x2547E8CC) approx BBs exec'd 0 ====
+
+	0x2547E8CC:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547E8D0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547E8D4:  93410018  stw r26,24(r1)
+	   8: GETL       	R26, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547E8D8:  90E10034  stw r7,52(r1)
+	  13: GETL       	R7, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547E8DC:  93210014  stw r25,20(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547E8E0:  7C791B78  or r25,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x2547E8E4:  835E04F4  lwz r26,1268(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4F4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R26
+	  30: INCEIPL       	$4
+
+	0x2547E8E8:  80C10000  lwz r6,0(r1)
+	  31: GETL       	R1, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R6
+	  34: INCEIPL       	$4
+
+	0x2547E8EC:  80BA0004  lwz r5,4(r26)
+	  35: GETL       	R26, t28
+	  36: ADDL       	$0x4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R5
+	  39: INCEIPL       	$4
+
+	0x2547E8F0:  809A00A4  lwz r4,164(r26)
+	  40: GETL       	R26, t32
+	  41: ADDL       	$0xA4, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R4
+	  44: INCEIPL       	$4
+
+	0x2547E8F4:  81390000  lwz r9,0(r25)
+	  45: GETL       	R25, t36
+	  46: LDL       	(t36), t38
+	  47: PUTL       	t38, R9
+	  48: INCEIPL       	$4
+
+	0x2547E8F8:  7C0500D0  neg r0,r5
+	  49: GETL       	R5, t40
+	  50: NEGL       	t40
+	  51: PUTL       	t40, R0
+	  52: INCEIPL       	$4
+
+	0x2547E8FC:  93E1002C  stw r31,44(r1)
+	  53: GETL       	R31, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x2C, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x2547E900:  7C8903A6  mtctr r4
+	  58: GETL       	R4, t46
+	  59: PUTL       	t46, CTR
+	  60: INCEIPL       	$4
+
+	0x2547E904:  9361001C  stw r27,28(r1)
+	  61: GETL       	R27, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x1C, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0x2547E908:  3880000C  li r4,12
+	  66: MOVL       	$0xC, t52
+	  67: PUTL       	t52, R4
+	  68: INCEIPL       	$4
+
+	0x2547E90C:  93810020  stw r28,32(r1)
+	  69: GETL       	R28, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0x20, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0x2547E910:  7D3F0038  and r31,r9,r0
+	  74: GETL       	R9, t58
+	  75: GETL       	R0, t60
+	  76: ANDL       	t58, t60
+	  77: PUTL       	t60, R31
+	  78: INCEIPL       	$4
+
+	0x2547E914:  93A10024  stw r29,36(r1)
+	  79: GETL       	R29, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x24, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x2547E918:  80660004  lwz r3,4(r6)
+	  84: GETL       	R6, t66
+	  85: ADDL       	$0x4, t66
+	  86: LDL       	(t66), t68
+	  87: PUTL       	t68, R3
+	  88: INCEIPL       	$4
+
+	0x2547E91C:  4E800421  bctrl
+	  89: MOVL       	$0x2547E920, t70
+	  90: PUTL       	t70, LR
+	  91: GETL       	CTR, t72
+	  92: JMPo-c       	t72  ($4)
+
+
+
+. 882 2547E8CC 84
+. 93 C1 00 28 7F C8 02 A6 93 41 00 18 90 E1 00 34 93 21 00 14 7C 79 1B 78 83 5E 04 F4 80 C1 00 00 80 BA 00 04 80 9A 00 A4 81 39 00 00 7C 05 00 D0 93 E1 00 2C 7C 89 03 A6 93 61 00 1C 38 80 00 0C 93 81 00 20 7D 3F 00 38 93 A1 00 24 80 66 00 04 4E 80 04 21
+==== BB 883 _dl_check_caller(0x2547EA74) approx BBs exec'd 0 ====
+
+	0x2547EA74:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547EA78:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547EA7C:  48018585  bl 0x25497000
+	   9: MOVL       	$0x2547EA80, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 883 2547EA74 12
+. 94 21 FF C0 7C 08 02 A6 48 01 85 85
+==== BB 884 (0x2547EA80) approx BBs exec'd 0 ====
+
+	0x2547EA80:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547EA84:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547EA88:  93010020  stw r24,32(r1)
+	   8: GETL       	R24, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547EA8C:  92810010  stw r20,16(r1)
+	  13: GETL       	R20, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547EA90:  5494E8FE  rlwinm r20,r4,29,3,31
+	  18: GETL       	R4, t14
+	  19: SHRL       	$0x3, t14
+	  20: PUTL       	t14, R20
+	  21: INCEIPL       	$4
+
+	0x2547EA94:  92A10014  stw r21,20(r1)
+	  22: GETL       	R21, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x14, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x2547EA98:  5495F0BE  rlwinm r21,r4,30,2,31
+	  27: GETL       	R4, t20
+	  28: SHRL       	$0x2, t20
+	  29: PUTL       	t20, R21
+	  30: INCEIPL       	$4
+
+	0x2547EA9C:  831E04C8  lwz r24,1224(r30)
+	  31: GETL       	R30, t22
+	  32: ADDL       	$0x4C8, t22
+	  33: LDL       	(t22), t24
+	  34: PUTL       	t24, R24
+	  35: INCEIPL       	$4
+
+	0x2547EAA0:  92C10018  stw r22,24(r1)
+	  36: GETL       	R22, t26
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x18, t28
+	  39: STL       	t26, (t28)
+	  40: INCEIPL       	$4
+
+	0x2547EAA4:  5496F87E  rlwinm r22,r4,31,1,31
+	  41: GETL       	R4, t30
+	  42: SHRL       	$0x1, t30
+	  43: PUTL       	t30, R22
+	  44: INCEIPL       	$4
+
+	0x2547EAA8:  92E1001C  stw r23,28(r1)
+	  45: GETL       	R23, t32
+	  46: GETL       	R1, t34
+	  47: ADDL       	$0x1C, t34
+	  48: STL       	t32, (t34)
+	  49: INCEIPL       	$4
+
+	0x2547EAAC:  549707FE  rlwinm r23,r4,0,31,31
+	  50: GETL       	R4, t36
+	  51: ANDL       	$0x1, t36
+	  52: PUTL       	t36, R23
+	  53: INCEIPL       	$4
+
+	0x2547EAB0:  93210024  stw r25,36(r1)
+	  54: GETL       	R25, t38
+	  55: GETL       	R1, t40
+	  56: ADDL       	$0x24, t40
+	  57: STL       	t38, (t40)
+	  58: INCEIPL       	$4
+
+	0x2547EAB4:  7C992378  or r25,r4,r4
+	  59: GETL       	R4, t42
+	  60: PUTL       	t42, R25
+	  61: INCEIPL       	$4
+
+	0x2547EAB8:  93410028  stw r26,40(r1)
+	  62: GETL       	R26, t44
+	  63: GETL       	R1, t46
+	  64: ADDL       	$0x28, t46
+	  65: STL       	t44, (t46)
+	  66: INCEIPL       	$4
+
+	0x2547EABC:  3B400000  li r26,0
+	  67: MOVL       	$0x0, t48
+	  68: PUTL       	t48, R26
+	  69: INCEIPL       	$4
+
+	0x2547EAC0:  9361002C  stw r27,44(r1)
+	  70: GETL       	R27, t50
+	  71: GETL       	R1, t52
+	  72: ADDL       	$0x2C, t52
+	  73: STL       	t50, (t52)
+	  74: INCEIPL       	$4
+
+	0x2547EAC4:  7F1BC378  or r27,r24,r24
+	  75: GETL       	R24, t54
+	  76: PUTL       	t54, R27
+	  77: INCEIPL       	$4
+
+	0x2547EAC8:  93810030  stw r28,48(r1)
+	  78: GETL       	R28, t56
+	  79: GETL       	R1, t58
+	  80: ADDL       	$0x30, t58
+	  81: STL       	t56, (t58)
+	  82: INCEIPL       	$4
+
+	0x2547EACC:  7C7C1B78  or r28,r3,r3
+	  83: GETL       	R3, t60
+	  84: PUTL       	t60, R28
+	  85: INCEIPL       	$4
+
+	0x2547EAD0:  93A10034  stw r29,52(r1)
+	  86: GETL       	R29, t62
+	  87: GETL       	R1, t64
+	  88: ADDL       	$0x34, t64
+	  89: STL       	t62, (t64)
+	  90: INCEIPL       	$4
+
+	0x2547EAD4:  93E1003C  stw r31,60(r1)
+	  91: GETL       	R31, t66
+	  92: GETL       	R1, t68
+	  93: ADDL       	$0x3C, t68
+	  94: STL       	t66, (t68)
+	  95: INCEIPL       	$4
+
+	0x2547EAD8:  90010044  stw r0,68(r1)
+	  96: GETL       	R0, t70
+	  97: GETL       	R1, t72
+	  98: ADDL       	$0x44, t72
+	  99: STL       	t70, (t72)
+	 100: INCEIPL       	$4
+
+	0x2547EADC:  83BB0000  lwz r29,0(r27)
+	 101: GETL       	R27, t74
+	 102: LDL       	(t74), t76
+	 103: PUTL       	t76, R29
+	 104: INCEIPL       	$4
+
+	0x2547EAE0:  3B7B0018  addi r27,r27,24
+	 105: GETL       	R27, t78
+	 106: ADDL       	$0x18, t78
+	 107: PUTL       	t78, R27
+	 108: INCEIPL       	$4
+
+	0x2547EAE4:  2F9D0000  cmpi cr7,r29,0
+	 109: GETL       	R29, t80
+	 110: CMP0L       	t80, t82  (-rSo)
+	 111: ICRFL       	t82, $0x7, CR
+	 112: INCEIPL       	$4
+
+	0x2547EAE8:  419E0028  bc 12,30,0x2547EB10
+	 113: Js30o       	$0x2547EB10
+
+
+
+. 884 2547EA80 108
+. 93 C1 00 38 7F C8 02 A6 93 01 00 20 92 81 00 10 54 94 E8 FE 92 A1 00 14 54 95 F0 BE 83 1E 04 C8 92 C1 00 18 54 96 F8 7E 92 E1 00 1C 54 97 07 FE 93 21 00 24 7C 99 23 78 93 41 00 28 3B 40 00 00 93 61 00 2C 7F 1B C3 78 93 81 00 30 7C 7C 1B 78 93 A1 00 34 93 E1 00 3C 90 01 00 44 83 BB 00 00 3B 7B 00 18 2F 9D 00 00 41 9E 00 28
+==== BB 885 (0x2547EAEC) approx BBs exec'd 0 ====
+
+	0x2547EAEC:  807D01A0  lwz r3,416(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1A0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547EAF0:  7C03E040  cmpl cr0,r3,r28
+	   5: GETL       	R3, t4
+	   6: GETL       	R28, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547EAF4:  41810010  bc 12,1,0x2547EB04
+	  10: Js01o       	$0x2547EB04
+
+
+
+. 885 2547EAEC 12
+. 80 7D 01 A0 7C 03 E0 40 41 81 00 10
+==== BB 886 (0x2547EAF8) approx BBs exec'd 0 ====
+
+	0x2547EAF8:  809D01A8  lwz r4,424(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1A8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547EAFC:  7C84E040  cmpl cr1,r4,r28
+	   5: GETL       	R4, t4
+	   6: GETL       	R28, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x2547EB00:  41850084  bc 12,5,0x2547EB84
+	  10: Js05o       	$0x2547EB84
+
+
+
+. 886 2547EAF8 12
+. 80 9D 01 A8 7C 84 E0 40 41 85 00 84
+==== BB 887 (0x2547EB04) approx BBs exec'd 0 ====
+
+	0x2547EB04:  83BD000C  lwz r29,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547EB08:  2F9D0000  cmpi cr7,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547EB0C:  409EFFE0  bc 4,30,0x2547EAEC
+	   9: Jc30o       	$0x2547EAEC
+
+
+
+. 887 2547EB04 12
+. 83 BD 00 0C 2F 9D 00 00 40 9E FF E0
+==== BB 888 (0x2547EB84) approx BBs exec'd 0 ====
+
+	0x2547EB84:  2F170000  cmpi cr6,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547EB88:  419A0018  bc 12,26,0x2547EBA0
+	   4: Js26o       	$0x2547EBA0
+
+
+
+. 888 2547EB84 8
+. 2F 17 00 00 41 9A 00 18
+==== BB 889 (0x2547EBA0) approx BBs exec'd 0 ====
+
+	0x2547EBA0:  72C00001  andi. r0,r22,0x1
+	   0: GETL       	R22, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547EBA4:  41820018  bc 12,2,0x2547EBBC
+	   6: Js02o       	$0x2547EBBC
+
+
+
+. 889 2547EBA0 8
+. 72 C0 00 01 41 82 00 18
+==== BB 890 (0x2547EBBC) approx BBs exec'd 0 ====
+
+	0x2547EBBC:  72A00001  andi. r0,r21,0x1
+	   0: GETL       	R21, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547EBC0:  41820018  bc 12,2,0x2547EBD8
+	   6: Js02o       	$0x2547EBD8
+
+
+
+. 890 2547EBBC 8
+. 72 A0 00 01 41 82 00 18
+==== BB 891 (0x2547EBC4) approx BBs exec'd 0 ====
+
+	0x2547EBC4:  807E03E4  lwz r3,996(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x3E4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547EBC8:  809D0004  lwz r4,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547EBCC:  48004315  bl 0x25482EE0
+	  10: MOVL       	$0x2547EBD0, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 891 2547EBC4 12
+. 80 7E 03 E4 80 9D 00 04 48 00 43 15
+==== BB 892 (0x2547EBD0) approx BBs exec'd 0 ====
+
+	0x2547EBD0:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547EBD4:  419A042C  bc 12,26,0x2547F000
+	   4: Js26o       	$0x2547F000
+
+
+
+. 892 2547EBD0 8
+. 2F 03 00 00 41 9A 04 2C
+==== BB 893 (0x2547EBD8) approx BBs exec'd 0 ====
+
+	0x2547EBD8:  72800001  andi. r0,r20,0x1
+	   0: GETL       	R20, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547EBDC:  41820018  bc 12,2,0x2547EBF4
+	   6: Js02o       	$0x2547EBF4
+
+
+
+. 893 2547EBD8 8
+. 72 80 00 01 41 82 00 18
+==== BB 894 (0x2547EBE0) approx BBs exec'd 0 ====
+
+	0x2547EBE0:  807E03E8  lwz r3,1000(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x3E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547EBE4:  809D0004  lwz r4,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547EBE8:  480042F9  bl 0x25482EE0
+	  10: MOVL       	$0x2547EBEC, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 894 2547EBE0 12
+. 80 7E 03 E8 80 9D 00 04 48 00 42 F9
+==== BB 895 (0x2547EBEC) approx BBs exec'd 0 ====
+
+	0x2547EBEC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547EBF0:  419E0410  bc 12,30,0x2547F000
+	   4: Js30o       	$0x2547F000
+
+
+
+. 895 2547EBEC 8
+. 2F 83 00 00 41 9E 04 10
+==== BB 896 (0x2547EBF4) approx BBs exec'd 0 ====
+
+	0x2547EBF4:  83FD001C  lwz r31,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547EBF8:  2C9F0000  cmpi cr1,r31,0
+	   5: GETL       	R31, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547EBFC:  41A6FF14  bc 13,6,0x2547EB10
+	   9: Js06o       	$0x2547EB10
+
+
+
+. 896 2547EBF4 12
+. 83 FD 00 1C 2C 9F 00 00 41 A6 FF 14
+==== BB 897 (0x2547EC00) approx BBs exec'd 0 ====
+
+	0x2547EC00:  2F970000  cmpi cr7,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547EC04:  56C807FE  rlwinm r8,r22,0,31,31
+	   4: GETL       	R22, t4
+	   5: ANDL       	$0x1, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x2547EC08:  56A707FE  rlwinm r7,r21,0,31,31
+	   8: GETL       	R21, t6
+	   9: ANDL       	$0x1, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x2547EC0C:  2C880000  cmpi cr1,r8,0
+	  12: GETL       	R8, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x2547EC10:  2F070000  cmpi cr6,r7,0
+	  16: GETL       	R7, t12
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0x2547EC14:  72800001  andi. r0,r20,0x1
+	  20: GETL       	R20, t16
+	  21: ANDL       	$0x1, t16
+	  22: PUTL       	t16, R0
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x2547EC18:  409E004C  bc 4,30,0x2547EC64
+	  26: Jc30o       	$0x2547EC64
+
+
+
+. 897 2547EC00 28
+. 2F 97 00 00 56 C8 07 FE 56 A7 07 FE 2C 88 00 00 2F 07 00 00 72 80 00 01 40 9E 00 4C
+==== BB 898 (0x2547EC1C) approx BBs exec'd 0 ====
+
+	0x2547EC1C:  408200B8  bc 4,2,0x2547ECD4
+	   0: Jc02o       	$0x2547ECD4
+
+
+
+. 898 2547EC1C 4
+. 40 82 00 B8
+==== BB 899 (0x2547ECD4) approx BBs exec'd 0 ====
+
+	0x2547ECD4:  419A01AC  bc 12,26,0x2547EE80
+	   0: Js26o       	$0x2547EE80
+
+
+
+. 899 2547ECD4 4
+. 41 9A 01 AC
+==== BB 900 (0x2547ECD8) approx BBs exec'd 0 ====
+
+	0x2547ECD8:  4186020C  bc 12,6,0x2547EEE4
+	   0: Js06o       	$0x2547EEE4
+
+
+
+. 900 2547ECD8 4
+. 41 86 02 0C
+==== BB 901 (0x2547EEE4) approx BBs exec'd 0 ====
+
+	0x2547EEE4:  83BF0000  lwz r29,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x2547EEE8:  807E03E4  lwz r3,996(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x3E4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0x2547EEEC:  7FA4EB78  or r4,r29,r29
+	   9: GETL       	R29, t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x2547EEF0:  48003FF1  bl 0x25482EE0
+	  12: MOVL       	$0x2547EEF4, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 901 2547EEE4 16
+. 83 BF 00 00 80 7E 03 E4 7F A4 EB 78 48 00 3F F1
+==== BB 902 (0x2547EEF4) approx BBs exec'd 0 ====
+
+	0x2547EEF4:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547EEF8:  2F030000  cmpi cr6,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0x2547EEFC:  807E03E8  lwz r3,1000(r30)
+	   7: GETL       	R30, t6
+	   8: ADDL       	$0x3E8, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R3
+	  11: INCEIPL       	$4
+
+	0x2547EF00:  419A0100  bc 12,26,0x2547F000
+	  12: Js26o       	$0x2547F000
+
+
+
+. 902 2547EEF4 16
+. 7F A4 EB 78 2F 03 00 00 80 7E 03 E8 41 9A 01 00
+==== BB 903 (0x2547EF04) approx BBs exec'd 0 ====
+
+	0x2547EF04:  48003FDD  bl 0x25482EE0
+	   0: MOVL       	$0x2547EF08, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 903 2547EF04 4
+. 48 00 3F DD
+==== BB 904 (0x2547EF08) approx BBs exec'd 0 ====
+
+	0x2547EF08:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547EF0C:  418600F4  bc 12,6,0x2547F000
+	   4: Js06o       	$0x2547F000
+
+
+
+. 904 2547EF08 8
+. 2C 83 00 00 41 86 00 F4
+==== BB 905 (0x2547EF10) approx BBs exec'd 0 ====
+
+	0x2547EF10:  83FF0004  lwz r31,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547EF14:  2C1F0000  cmpi cr0,r31,0
+	   5: GETL       	R31, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547EF18:  4082FFCC  bc 4,2,0x2547EEE4
+	   9: Jc02o       	$0x2547EEE4
+
+
+
+. 905 2547EF10 12
+. 83 FF 00 04 2C 1F 00 00 40 82 FF CC
+==== BB 906 (0x2547EF1C) approx BBs exec'd 0 ====
+
+	0x2547EF1C:  4BFFFBF4  b 0x2547EB10
+	   0: JMPo       	$0x2547EB10  ($4)
+
+
+
+. 906 2547EF1C 4
+. 4B FF FB F4
+==== BB 907 (0x2547EB10) approx BBs exec'd 0 ====
+
+	0x2547EB10:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547EB14:  2C1A000F  cmpi cr0,r26,15
+	   4: GETL       	R26, t2
+	   5: MOVL       	$0xF, t6
+	   6: CMPL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547EB18:  4081FFC4  bc 4,1,0x2547EADC
+	   9: Jc01o       	$0x2547EADC
+
+
+
+. 907 2547EB10 12
+. 3B 5A 00 01 2C 1A 00 0F 40 81 FF C4
+==== BB 908 (0x2547EADC) approx BBs exec'd 0 ====
+
+	0x2547EADC:  83BB0000  lwz r29,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x2547EAE0:  3B7B0018  addi r27,r27,24
+	   4: GETL       	R27, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0x2547EAE4:  2F9D0000  cmpi cr7,r29,0
+	   8: GETL       	R29, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2547EAE8:  419E0028  bc 12,30,0x2547EB10
+	  12: Js30o       	$0x2547EB10
+
+
+
+. 908 2547EADC 16
+. 83 BB 00 00 3B 7B 00 18 2F 9D 00 00 41 9E 00 28
+==== BB 909 (0x2547EB1C) approx BBs exec'd 0 ====
+
+	0x2547EB1C:  73200008  andi. r0,r25,0x8
+	   0: GETL       	R25, t0
+	   1: ANDL       	$0x8, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547EB20:  41820020  bc 12,2,0x2547EB40
+	   6: Js02o       	$0x2547EB40
+
+
+
+. 909 2547EB1C 8
+. 73 20 00 08 41 82 00 20
+==== BB 910 (0x2547EB24) approx BBs exec'd 0 ====
+
+	0x2547EB24:  80B80358  lwz r5,856(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x358, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547EB28:  7F85E040  cmpl cr7,r5,r28
+	   5: GETL       	R5, t4
+	   6: GETL       	R28, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x2547EB2C:  419D0014  bc 12,29,0x2547EB40
+	  10: Js29o       	$0x2547EB40
+
+
+
+. 910 2547EB24 12
+. 80 B8 03 58 7F 85 E0 40 41 9D 00 14
+==== BB 911 (0x2547EB30) approx BBs exec'd 0 ====
+
+	0x2547EB30:  80D80360  lwz r6,864(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x360, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547EB34:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547EB38:  7C86E040  cmpl cr1,r6,r28
+	   8: GETL       	R6, t6
+	   9: GETL       	R28, t8
+	  10: CMPUL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0x2547EB3C:  41850008  bc 12,5,0x2547EB44
+	  13: Js05o       	$0x2547EB44
+
+
+
+. 911 2547EB30 16
+. 80 D8 03 60 38 60 00 00 7C 86 E0 40 41 85 00 08
+==== BB 912 (0x2547EB44) approx BBs exec'd 0 ====
+
+	0x2547EB44:  81210044  lwz r9,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547EB48:  82810010  lwz r20,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R20
+	   9: INCEIPL       	$4
+
+	0x2547EB4C:  82A10014  lwz r21,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R21
+	  14: INCEIPL       	$4
+
+	0x2547EB50:  7D2803A6  mtlr r9
+	  15: GETL       	R9, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x2547EB54:  82C10018  lwz r22,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R22
+	  22: INCEIPL       	$4
+
+	0x2547EB58:  82E1001C  lwz r23,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R23
+	  27: INCEIPL       	$4
+
+	0x2547EB5C:  83010020  lwz r24,32(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R24
+	  32: INCEIPL       	$4
+
+	0x2547EB60:  83210024  lwz r25,36(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x24, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R25
+	  37: INCEIPL       	$4
+
+	0x2547EB64:  83410028  lwz r26,40(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x28, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R26
+	  42: INCEIPL       	$4
+
+	0x2547EB68:  8361002C  lwz r27,44(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x2C, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R27
+	  47: INCEIPL       	$4
+
+	0x2547EB6C:  83810030  lwz r28,48(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x30, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R28
+	  52: INCEIPL       	$4
+
+	0x2547EB70:  83A10034  lwz r29,52(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x34, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R29
+	  57: INCEIPL       	$4
+
+	0x2547EB74:  83C10038  lwz r30,56(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x38, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R30
+	  62: INCEIPL       	$4
+
+	0x2547EB78:  83E1003C  lwz r31,60(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x3C, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R31
+	  67: INCEIPL       	$4
+
+	0x2547EB7C:  38210040  addi r1,r1,64
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x40, t54
+	  70: PUTL       	t54, R1
+	  71: INCEIPL       	$4
+
+	0x2547EB80:  4E800020  blr
+	  72: GETL       	LR, t56
+	  73: JMPo-r       	t56  ($4)
+
+
+
+. 912 2547EB44 64
+. 81 21 00 44 82 81 00 10 82 A1 00 14 7D 28 03 A6 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 913 (0x2547E920) approx BBs exec'd 0 ====
+
+	0x2547E920:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547E924:  409E011C  bc 4,30,0x2547EA40
+	   4: Jc30o       	$0x2547EA40
+
+
+
+. 913 2547E920 8
+. 2F 83 00 00 40 9E 01 1C
+==== BB 914 (0x2547E928) approx BBs exec'd 0 ====
+
+	0x2547E928:  811E04C0  lwz r8,1216(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547E92C:  81790000  lwz r11,0(r25)
+	   5: GETL       	R25, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x2547E930:  80680000  lwz r3,0(r8)
+	   9: GETL       	R8, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R3
+	  12: INCEIPL       	$4
+
+	0x2547E934:  7C0B1800  cmp cr0,r11,r3
+	  13: GETL       	R11, t12
+	  14: GETL       	R3, t14
+	  15: CMPL       	t12, t14, t16  (-rSo)
+	  16: ICRFL       	t16, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0x2547E938:  40820108  bc 4,2,0x2547EA40
+	  18: Jc02o       	$0x2547EA40
+
+
+
+. 914 2547E928 20
+. 81 1E 04 C0 81 79 00 00 80 68 00 00 7C 0B 18 00 40 82 01 08
+==== BB 915 (0x2547E93C) approx BBs exec'd 0 ====
+
+	0x2547E93C:  83BE03D8  lwz r29,984(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x3D8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547E940:  839E050C  lwz r28,1292(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x50C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x2547E944:  895D0000  lbz r10,0(r29)
+	  10: GETL       	R29, t8
+	  11: LDB       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0x2547E948:  837E0514  lwz r27,1300(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x514, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R27
+	  18: INCEIPL       	$4
+
+	0x2547E94C:  2C8A0000  cmpi cr1,r10,0
+	  19: GETL       	R10, t16
+	  20: CMP0L       	t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x2547E950:  418600BC  bc 12,6,0x2547EA0C
+	  23: Js06o       	$0x2547EA0C
+
+
+
+. 915 2547E93C 24
+. 83 BE 03 D8 83 9E 05 0C 89 5D 00 00 83 7E 05 14 2C 8A 00 00 41 86 00 BC
+==== BB 916 (0x2547EA0C) approx BBs exec'd 0 ====
+
+	0x2547EA0C:  809A0004  lwz r4,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547EA10:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547EA14:  80BC0000  lwz r5,0(r28)
+	   8: GETL       	R28, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x2547EA18:  48003DA1  bl 0x254827B8
+	  12: MOVL       	$0x2547EA1C, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0x254827B8  ($4)
+
+
+
+. 916 2547EA0C 16
+. 80 9A 00 04 7F E3 FB 78 80 BC 00 00 48 00 3D A1
+==== BB 917 (0x254827C4) approx BBs exec'd 0 ====
+
+	0x254827C4:  4BFFEB6C  b 0x25481330
+	   0: JMPo       	$0x25481330  ($4)
+
+
+
+. 917 254827C4 4
+. 4B FF EB 6C
+==== BB 918 (0x2547EA1C) approx BBs exec'd 0 ====
+
+	0x2547EA1C:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547EA20:  419AFFA0  bc 12,26,0x2547E9C0
+	   4: Js26o       	$0x2547E9C0
+
+
+
+. 918 2547EA1C 8
+. 2F 03 00 00 41 9A FF A0
+==== BB 919 (0x2547EA24) approx BBs exec'd 0 ====
+
+	0x2547EA24:  837E0514  lwz r27,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547EA28:  801B0000  lwz r0,0(r27)
+	   5: GETL       	R27, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x2547EA2C:  2F800016  cmpi cr7,r0,22
+	   9: GETL       	R0, t8
+	  10: MOVL       	$0x16, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x2547EA30:  409EFFAC  bc 4,30,0x2547E9DC
+	  14: Jc30o       	$0x2547E9DC
+
+
+
+. 919 2547EA24 16
+. 83 7E 05 14 80 1B 00 00 2F 80 00 16 40 9E FF AC
+==== BB 920 (0x2547EA34) approx BBs exec'd 0 ====
+
+	0x2547EA34:  39800001  li r12,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0x2547EA38:  999D0000  stb r12,0(r29)
+	   3: GETL       	R12, t2
+	   4: GETL       	R29, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x2547EA3C:  4BFFFF18  b 0x2547E954
+	   7: JMPo       	$0x2547E954  ($4)
+
+
+
+. 920 2547EA34 12
+. 39 80 00 01 99 9D 00 00 4B FF FF 18
+==== BB 921 (0x2547E954) approx BBs exec'd 0 ====
+
+	0x2547E954:  83BA0004  lwz r29,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547E958:  7D3DFA14  add r9,r29,r31
+	   5: GETL       	R29, t4
+	   6: GETL       	R31, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2547E95C:  57BF1838  rlwinm r31,r29,3,0,28
+	  10: GETL       	R29, t8
+	  11: SHLL       	$0x3, t8
+	  12: PUTL       	t8, R31
+	  13: INCEIPL       	$4
+
+	0x2547E960:  7FBF4850  subf r29,r31,r9
+	  14: GETL       	R31, t10
+	  15: GETL       	R9, t12
+	  16: SUBL       	t10, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x2547E964:  801C0000  lwz r0,0(r28)
+	  19: GETL       	R28, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R0
+	  22: INCEIPL       	$4
+
+	0x2547E968:  7FE4FB78  or r4,r31,r31
+	  23: GETL       	R31, t18
+	  24: PUTL       	t18, R4
+	  25: INCEIPL       	$4
+
+	0x2547E96C:  7FA3EB78  or r3,r29,r29
+	  26: GETL       	R29, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x2547E970:  5405020C  rlwinm r5,r0,0,8,6
+	  29: GETL       	R0, t22
+	  30: ANDL       	$0xFEFFFFFF, t22
+	  31: PUTL       	t22, R5
+	  32: INCEIPL       	$4
+
+	0x2547E974:  48003E45  bl 0x254827B8
+	  33: MOVL       	$0x2547E978, t24
+	  34: PUTL       	t24, LR
+	  35: JMPo-c       	$0x254827B8  ($4)
+
+
+
+. 921 2547E954 36
+. 83 BA 00 04 7D 3D FA 14 57 BF 18 38 7F BF 48 50 80 1C 00 00 7F E4 FB 78 7F A3 EB 78 54 05 02 0C 48 00 3E 45
+==== BB 922 (0x2547E978) approx BBs exec'd 0 ====
+
+	0x2547E978:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547E97C:  40820024  bc 4,2,0x2547E9A0
+	   4: Jc02o       	$0x2547E9A0
+
+
+
+. 922 2547E978 8
+. 2C 03 00 00 40 82 00 24
+==== BB 923 (0x2547E9A0) approx BBs exec'd 0 ====
+
+	0x2547E9A0:  801B0000  lwz r0,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547E9A4:  2C80000C  cmpi cr1,r0,12
+	   4: GETL       	R0, t4
+	   5: MOVL       	$0xC, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547E9A8:  40860034  bc 4,6,0x2547E9DC
+	   9: Jc06o       	$0x2547E9DC
+
+
+
+. 923 2547E9A0 12
+. 80 1B 00 00 2C 80 00 0C 40 86 00 34
+==== BB 924 (0x2547E9AC) approx BBs exec'd 0 ====
+
+	0x2547E9AC:  80BA0004  lwz r5,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547E9B0:  7F05F800  cmp cr6,r5,r31
+	   5: GETL       	R5, t4
+	   6: GETL       	R31, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x2547E9B4:  57FFF87E  rlwinm r31,r31,31,1,31
+	  10: GETL       	R31, t10
+	  11: SHRL       	$0x1, t10
+	  12: PUTL       	t10, R31
+	  13: INCEIPL       	$4
+
+	0x2547E9B8:  7FBDFA14  add r29,r29,r31
+	  14: GETL       	R29, t12
+	  15: GETL       	R31, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R29
+	  18: INCEIPL       	$4
+
+	0x2547E9BC:  409AFFA8  bc 4,26,0x2547E964
+	  19: Jc26o       	$0x2547E964
+
+
+
+. 924 2547E9AC 20
+. 80 BA 00 04 7F 05 F8 00 57 FF F8 7E 7F BD FA 14 40 9A FF A8
+==== BB 925 (0x2547E964) approx BBs exec'd 0 ====
+
+	0x2547E964:  801C0000  lwz r0,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547E968:  7FE4FB78  or r4,r31,r31
+	   4: GETL       	R31, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0x2547E96C:  7FA3EB78  or r3,r29,r29
+	   7: GETL       	R29, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547E970:  5405020C  rlwinm r5,r0,0,8,6
+	  10: GETL       	R0, t8
+	  11: ANDL       	$0xFEFFFFFF, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x2547E974:  48003E45  bl 0x254827B8
+	  14: MOVL       	$0x2547E978, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x254827B8  ($4)
+
+
+
+. 925 2547E964 20
+. 80 1C 00 00 7F E4 FB 78 7F A3 EB 78 54 05 02 0C 48 00 3E 45
+==== BB 926 (0x2547E980) approx BBs exec'd 0 ====
+
+	0x2547E980:  801C0000  lwz r0,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547E984:  7FBFE850  subf r29,r31,r29
+	   4: GETL       	R31, t4
+	   5: GETL       	R29, t6
+	   6: SUBL       	t4, t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0x2547E988:  7FE4FB78  or r4,r31,r31
+	   9: GETL       	R31, t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x2547E98C:  7FA3EB78  or r3,r29,r29
+	  12: GETL       	R29, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x2547E990:  5405020C  rlwinm r5,r0,0,8,6
+	  15: GETL       	R0, t12
+	  16: ANDL       	$0xFEFFFFFF, t12
+	  17: PUTL       	t12, R5
+	  18: INCEIPL       	$4
+
+	0x2547E994:  48003E25  bl 0x254827B8
+	  19: MOVL       	$0x2547E998, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x254827B8  ($4)
+
+
+
+. 926 2547E980 24
+. 80 1C 00 00 7F BF E8 50 7F E4 FB 78 7F A3 EB 78 54 05 02 0C 48 00 3E 25
+==== BB 927 (0x2547E998) approx BBs exec'd 0 ====
+
+	0x2547E998:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547E99C:  41A2FFE4  bc 13,2,0x2547E980
+	   4: Js02o       	$0x2547E980
+
+
+
+. 927 2547E998 8
+. 2C 03 00 00 41 A2 FF E4
+==== BB 928 (0x2547E9C0) approx BBs exec'd 0 ====
+
+	0x2547E9C0:  835E04C8  lwz r26,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547E9C4:  3B800000  li r28,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547E9C8:  93990000  stw r28,0(r25)
+	   8: GETL       	R28, t6
+	   9: GETL       	R25, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x2547E9CC:  38000000  li r0,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x2547E9D0:  837A0400  lwz r27,1024(r26)
+	  15: GETL       	R26, t12
+	  16: ADDL       	$0x400, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R27
+	  19: INCEIPL       	$4
+
+	0x2547E9D4:  63790001  ori r25,r27,0x1
+	  20: GETL       	R27, t16
+	  21: ORL       	$0x1, t16
+	  22: PUTL       	t16, R25
+	  23: INCEIPL       	$4
+
+	0x2547E9D8:  933A0400  stw r25,1024(r26)
+	  24: GETL       	R25, t18
+	  25: GETL       	R26, t20
+	  26: ADDL       	$0x400, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x2547E9DC:  83E10034  lwz r31,52(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x34, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R31
+	  33: INCEIPL       	$4
+
+	0x2547E9E0:  7C030378  or r3,r0,r0
+	  34: GETL       	R0, t26
+	  35: PUTL       	t26, R3
+	  36: INCEIPL       	$4
+
+	0x2547E9E4:  83210014  lwz r25,20(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x14, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R25
+	  41: INCEIPL       	$4
+
+	0x2547E9E8:  7FE803A6  mtlr r31
+	  42: GETL       	R31, t32
+	  43: PUTL       	t32, LR
+	  44: INCEIPL       	$4
+
+	0x2547E9EC:  83410018  lwz r26,24(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x18, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R26
+	  49: INCEIPL       	$4
+
+	0x2547E9F0:  8361001C  lwz r27,28(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x1C, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R27
+	  54: INCEIPL       	$4
+
+	0x2547E9F4:  83810020  lwz r28,32(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x20, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R28
+	  59: INCEIPL       	$4
+
+	0x2547E9F8:  83A10024  lwz r29,36(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x24, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R29
+	  64: INCEIPL       	$4
+
+	0x2547E9FC:  83C10028  lwz r30,40(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x28, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R30
+	  69: INCEIPL       	$4
+
+	0x2547EA00:  83E1002C  lwz r31,44(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x2C, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R31
+	  74: INCEIPL       	$4
+
+	0x2547EA04:  38210030  addi r1,r1,48
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x30, t58
+	  77: PUTL       	t58, R1
+	  78: INCEIPL       	$4
+
+	0x2547EA08:  4E800020  blr
+	  79: GETL       	LR, t60
+	  80: JMPo-r       	t60  ($4)
+
+
+
+. 928 2547E9C0 76
+. 83 5E 04 C8 3B 80 00 00 93 99 00 00 38 00 00 00 83 7A 04 00 63 79 00 01 93 3A 04 00 83 E1 00 34 7C 03 03 78 83 21 00 14 7F E8 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 929 (0x254761B8) approx BBs exec'd 0 ====
+
+	0x254761B8:  811E01E0  lwz r8,480(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1E0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254761BC:  7C6B1B79  or. r11,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R11
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x254761C0:  917F00A0  stw r11,160(r31)
+	  10: GETL       	R11, t8
+	  11: GETL       	R31, t10
+	  12: ADDL       	$0xA0, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254761C4:  4182FD1C  bc 12,2,0x25475EE0
+	  15: Js02o       	$0x25475EE0
+
+
+
+. 929 254761B8 16
+. 81 1E 01 E0 7C 6B 1B 79 91 7F 00 A0 41 82 FD 1C
+==== BB 930 (0x25476ED0) approx BBs exec'd 0 ====
+
+	0x25476ED0:  809D001C  lwz r4,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25476ED4:  3B600000  li r27,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0x25476ED8:  48000020  b 0x25476EF8
+	   8: JMPo       	$0x25476EF8  ($4)
+
+
+
+. 930 25476ED0 12
+. 80 9D 00 1C 3B 60 00 00 48 00 00 20
+==== BB 931 (0x25476EF8) approx BBs exec'd 0 ====
+
+	0x25476EF8:  2F040000  cmpi cr6,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25476EFC:  409AFFE0  bc 4,26,0x25476EDC
+	   4: Jc26o       	$0x25476EDC
+
+
+
+. 931 25476EF8 8
+. 2F 04 00 00 40 9A FF E0
+==== BB 932 (0x25476EDC) approx BBs exec'd 0 ====
+
+	0x25476EDC:  7C9B2378  or r27,r4,r4
+	   0: GETL       	R4, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0x25476EE0:  80840000  lwz r4,0(r4)
+	   3: GETL       	R4, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0x25476EE4:  7F83E378  or r3,r28,r28
+	   7: GETL       	R28, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25476EE8:  4800BFF9  bl 0x25482EE0
+	  10: MOVL       	$0x25476EEC, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 932 25476EDC 16
+. 7C 9B 23 78 80 84 00 00 7F 83 E3 78 48 00 BF F9
+==== BB 933 (0x25476EEC) approx BBs exec'd 0 ====
+
+	0x25476EEC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25476EF0:  419E0058  bc 12,30,0x25476F48
+	   4: Js30o       	$0x25476F48
+
+
+
+. 933 25476EEC 8
+. 2F 83 00 00 41 9E 00 58
+==== BB 934 (0x25476EF4) approx BBs exec'd 0 ====
+
+	0x25476EF4:  809B0004  lwz r4,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25476EF8:  2F040000  cmpi cr6,r4,0
+	   5: GETL       	R4, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25476EFC:  409AFFE0  bc 4,26,0x25476EDC
+	   9: Jc26o       	$0x25476EDC
+
+
+
+. 934 25476EF4 12
+. 80 9B 00 04 2F 04 00 00 40 9A FF E0
+==== BB 935 (0x25476F00) approx BBs exec'd 0 ====
+
+	0x25476F00:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476F04:  4800C0CD  bl 0x25482FD0
+	   3: MOVL       	$0x25476F08, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+
+. 935 25476F00 8
+. 7F 83 E3 78 48 00 C0 CD
+==== BB 936 (0x25476F08) approx BBs exec'd 0 ====
+
+	0x25476F08:  7C661B78  or r6,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x25476F0C:  3863000D  addi r3,r3,13
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0xD, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x25476F10:  3B460001  addi r26,r6,1
+	   7: GETL       	R6, t4
+	   8: ADDL       	$0x1, t4
+	   9: PUTL       	t4, R26
+	  10: INCEIPL       	$4
+
+	0x25476F14:  48020B11  bl 0x25497A24
+	  11: MOVL       	$0x25476F18, t6
+	  12: PUTL       	t6, LR
+	  13: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 936 25476F08 16
+. 7C 66 1B 78 38 63 00 0D 3B 46 00 01 48 02 0B 11
+==== BB 937 (0x25476F18) approx BBs exec'd 0 ====
+
+	0x25476F18:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25476F1C:  80DE01AC  lwz r6,428(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1AC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25476F20:  7F84E378  or r4,r28,r28
+	  10: GETL       	R28, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25476F24:  4182013C  bc 12,2,0x25477060
+	  13: Js02o       	$0x25477060
+
+
+
+. 937 25476F18 16
+. 7C 7F 1B 79 80 DE 01 AC 7F 84 E3 78 41 82 01 3C
+==== BB 938 (0x25476F28) approx BBs exec'd 0 ====
+
+	0x25476F28:  7F84E378  or r4,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25476F2C:  7F45D378  or r5,r26,r26
+	   3: GETL       	R26, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25476F30:  387F000C  addi r3,r31,12
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0xC, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x25476F34:  4800CCAD  bl 0x25483BE0
+	  10: MOVL       	$0x25476F38, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 938 25476F28 16
+. 7F 84 E3 78 7F 45 D3 78 38 7F 00 0C 48 00 CC AD
+==== BB 939 (0x25476F38) approx BBs exec'd 0 ====
+
+	0x25476F38:  92DF0004  stw r22,4(r31)
+	   0: GETL       	R22, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476F3C:  907F0000  stw r3,0(r31)
+	   5: GETL       	R3, t4
+	   6: GETL       	R31, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25476F40:  92DF0008  stw r22,8(r31)
+	   9: GETL       	R22, t8
+	  10: GETL       	R31, t10
+	  11: ADDL       	$0x8, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25476F44:  93FB0004  stw r31,4(r27)
+	  14: GETL       	R31, t12
+	  15: GETL       	R27, t14
+	  16: ADDL       	$0x4, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25476F48:  811D0180  lwz r8,384(r29)
+	  19: GETL       	R29, t16
+	  20: ADDL       	$0x180, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R8
+	  23: INCEIPL       	$4
+
+	0x25476F4C:  65070080  oris r7,r8,0x80
+	  24: GETL       	R8, t20
+	  25: ORL       	$0x800000, t20
+	  26: PUTL       	t20, R7
+	  27: INCEIPL       	$4
+
+	0x25476F50:  90FD0180  stw r7,384(r29)
+	  28: GETL       	R7, t22
+	  29: GETL       	R29, t24
+	  30: ADDL       	$0x180, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0x25476F54:  7FA3EB78  or r3,r29,r29
+	  33: GETL       	R29, t26
+	  34: PUTL       	t26, R3
+	  35: INCEIPL       	$4
+
+	0x25476F58:  83E10294  lwz r31,660(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x294, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0x25476F5C:  81810244  lwz r12,580(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x244, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R12
+	  45: INCEIPL       	$4
+
+	0x25476F60:  7FE803A6  mtlr r31
+	  46: GETL       	R31, t36
+	  47: PUTL       	t36, LR
+	  48: INCEIPL       	$4
+
+	0x25476F64:  81C10248  lwz r14,584(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x248, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R14
+	  53: INCEIPL       	$4
+
+	0x25476F68:  81E1024C  lwz r15,588(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x24C, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R15
+	  58: INCEIPL       	$4
+
+	0x25476F6C:  7D818120  mtcrf 0x18,r12
+	  59: GETL       	R12, t46
+	  60: ICRFL       	t46, $0x3, CR
+	  61: ICRFL       	t46, $0x4, CR
+	  62: INCEIPL       	$4
+
+	0x25476F70:  82010250  lwz r16,592(r1)
+	  63: GETL       	R1, t48
+	  64: ADDL       	$0x250, t48
+	  65: LDL       	(t48), t50
+	  66: PUTL       	t50, R16
+	  67: INCEIPL       	$4
+
+	0x25476F74:  82210254  lwz r17,596(r1)
+	  68: GETL       	R1, t52
+	  69: ADDL       	$0x254, t52
+	  70: LDL       	(t52), t54
+	  71: PUTL       	t54, R17
+	  72: INCEIPL       	$4
+
+	0x25476F78:  82410258  lwz r18,600(r1)
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0x258, t56
+	  75: LDL       	(t56), t58
+	  76: PUTL       	t58, R18
+	  77: INCEIPL       	$4
+
+	0x25476F7C:  8261025C  lwz r19,604(r1)
+	  78: GETL       	R1, t60
+	  79: ADDL       	$0x25C, t60
+	  80: LDL       	(t60), t62
+	  81: PUTL       	t62, R19
+	  82: INCEIPL       	$4
+
+	0x25476F80:  82810260  lwz r20,608(r1)
+	  83: GETL       	R1, t64
+	  84: ADDL       	$0x260, t64
+	  85: LDL       	(t64), t66
+	  86: PUTL       	t66, R20
+	  87: INCEIPL       	$4
+
+	0x25476F84:  82A10264  lwz r21,612(r1)
+	  88: GETL       	R1, t68
+	  89: ADDL       	$0x264, t68
+	  90: LDL       	(t68), t70
+	  91: PUTL       	t70, R21
+	  92: INCEIPL       	$4
+
+	0x25476F88:  82C10268  lwz r22,616(r1)
+	  93: GETL       	R1, t72
+	  94: ADDL       	$0x268, t72
+	  95: LDL       	(t72), t74
+	  96: PUTL       	t74, R22
+	  97: INCEIPL       	$4
+
+	0x25476F8C:  82E1026C  lwz r23,620(r1)
+	  98: GETL       	R1, t76
+	  99: ADDL       	$0x26C, t76
+	 100: LDL       	(t76), t78
+	 101: PUTL       	t78, R23
+	 102: INCEIPL       	$4
+
+	0x25476F90:  83010270  lwz r24,624(r1)
+	 103: GETL       	R1, t80
+	 104: ADDL       	$0x270, t80
+	 105: LDL       	(t80), t82
+	 106: PUTL       	t82, R24
+	 107: INCEIPL       	$4
+
+	0x25476F94:  83210274  lwz r25,628(r1)
+	 108: GETL       	R1, t84
+	 109: ADDL       	$0x274, t84
+	 110: LDL       	(t84), t86
+	 111: PUTL       	t86, R25
+	 112: INCEIPL       	$4
+
+	0x25476F98:  83410278  lwz r26,632(r1)
+	 113: GETL       	R1, t88
+	 114: ADDL       	$0x278, t88
+	 115: LDL       	(t88), t90
+	 116: PUTL       	t90, R26
+	 117: INCEIPL       	$4
+
+	0x25476F9C:  8361027C  lwz r27,636(r1)
+	 118: GETL       	R1, t92
+	 119: ADDL       	$0x27C, t92
+	 120: LDL       	(t92), t94
+	 121: PUTL       	t94, R27
+	 122: INCEIPL       	$4
+
+	0x25476FA0:  83810280  lwz r28,640(r1)
+	 123: GETL       	R1, t96
+	 124: ADDL       	$0x280, t96
+	 125: LDL       	(t96), t98
+	 126: PUTL       	t98, R28
+	 127: INCEIPL       	$4
+
+	0x25476FA4:  83A10284  lwz r29,644(r1)
+	 128: GETL       	R1, t100
+	 129: ADDL       	$0x284, t100
+	 130: LDL       	(t100), t102
+	 131: PUTL       	t102, R29
+	 132: INCEIPL       	$4
+
+	0x25476FA8:  83C10288  lwz r30,648(r1)
+	 133: GETL       	R1, t104
+	 134: ADDL       	$0x288, t104
+	 135: LDL       	(t104), t106
+	 136: PUTL       	t106, R30
+	 137: INCEIPL       	$4
+
+	0x25476FAC:  83E1028C  lwz r31,652(r1)
+	 138: GETL       	R1, t108
+	 139: ADDL       	$0x28C, t108
+	 140: LDL       	(t108), t110
+	 141: PUTL       	t110, R31
+	 142: INCEIPL       	$4
+
+	0x25476FB0:  38210290  addi r1,r1,656
+	 143: GETL       	R1, t112
+	 144: ADDL       	$0x290, t112
+	 145: PUTL       	t112, R1
+	 146: INCEIPL       	$4
+
+	0x25476FB4:  4E800020  blr
+	 147: GETL       	LR, t114
+	 148: JMPo-r       	t114  ($4)
+
+
+
+. 939 25476F38 128
+. 92 DF 00 04 90 7F 00 00 92 DF 00 08 93 FB 00 04 81 1D 01 80 65 07 00 80 90 FD 01 80 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+==== BB 940 (0x2547726C) approx BBs exec'd 0 ====
+
+	0x2547726C:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25477270:  39200001  li r9,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x25477274:  409E001C  bc 4,30,0x25477290
+	   7: Jc30o       	$0x25477290
+
+
+
+. 940 2547726C 12
+. 2F 80 00 00 39 20 00 01 40 9E 00 1C
+==== BB 941 (0x25477278) approx BBs exec'd 0 ====
+
+	0x25477278:  817B0094  lwz r11,148(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547727C:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25477280:  408601DC  bc 4,6,0x2547745C
+	   9: Jc06o       	$0x2547745C
+
+
+
+. 941 25477278 12
+. 81 7B 00 94 2C 8B 00 00 40 86 01 DC
+==== BB 942 (0x25477284) approx BBs exec'd 0 ====
+
+	0x25477284:  3880FFFF  li r4,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25477288:  39200000  li r9,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x2547728C:  909B01E0  stw r4,480(r27)
+	   6: GETL       	R4, t4
+	   7: GETL       	R27, t6
+	   8: ADDL       	$0x1E0, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x25477290:  2C090000  cmpi cr0,r9,0
+	  11: GETL       	R9, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25477294:  408200DC  bc 4,2,0x25477370
+	  15: Jc02o       	$0x25477370
+
+
+
+. 942 25477284 20
+. 38 80 FF FF 39 20 00 00 90 9B 01 E0 2C 09 00 00 40 82 00 DC
+==22130== Reading syms from /lib/libdl-2.3.4.so (0xFFB0000)
+==22130==    object doesn't have any debug info
+==== BB 943 (0x25483920) approx BBs exec'd 0 ====
+
+	0x25483920:  28850010  cmpli cr1,r5,16
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x10, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25483924:  7CC62A14  add r6,r6,r5
+	   5: GETL       	R6, t6
+	   6: GETL       	R5, t8
+	   7: ADDL       	t6, t8
+	   8: PUTL       	t8, R6
+	   9: INCEIPL       	$4
+
+	0x25483928:  419F0020  bc 12,31,0x25483948
+	  10: Js31o       	$0x25483948
+
+
+
+. 943 25483920 12
+. 28 85 00 10 7C C6 2A 14 41 9F 00 20
+==== BB 944 (0x25483960) approx BBs exec'd 0 ====
+
+	0x25483960:  9086FFFC  stw r4,-4(r6)
+	   0: GETL       	R4, t0
+	   1: GETL       	R6, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25483964:  9086FFF8  stw r4,-8(r6)
+	   5: GETL       	R4, t4
+	   6: GETL       	R6, t6
+	   7: ADDL       	$0xFFFFFFF8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483968:  9086FFF4  stw r4,-12(r6)
+	  10: GETL       	R4, t8
+	  11: GETL       	R6, t10
+	  12: ADDL       	$0xFFFFFFF4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x2548396C:  9486FFF0  stwu r4,-16(r6)
+	  15: GETL       	R4, t12
+	  16: GETL       	R6, t14
+	  17: ADDL       	$0xFFFFFFF0, t14
+	  18: PUTL       	t14, R6
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x25483970:  4C9C0020  bclr 4,28
+	  21: GETL       	LR, t16
+	  22: Jc28o-r       	t16
+
+
+
+. 944 25483960 20
+. 90 86 FF FC 90 86 FF F8 90 86 FF F4 94 86 FF F0 4C 9C 00 20
+==== BB 945 (0x254760FC) approx BBs exec'd 0 ====
+
+	0x254760FC:  817A01FC  lwz r11,508(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25476100:  4BFFFCC0  b 0x25475DC0
+	   5: JMPo       	$0x25475DC0  ($4)
+
+
+
+. 945 254760FC 8
+. 81 7A 01 FC 4B FF FC C0
+==== BB 946 (0x2547A374) approx BBs exec'd 0 ====
+
+	0x2547A374:  809A00F0  lwz r4,240(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xF0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547A378:  2C840000  cmpi cr1,r4,0
+	   5: GETL       	R4, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547A37C:  40860010  bc 4,6,0x2547A38C
+	   9: Jc06o       	$0x2547A38C
+
+
+
+. 946 2547A374 12
+. 80 9A 00 F0 2C 84 00 00 40 86 00 10
+==== BB 947 (0x2547A380) approx BBs exec'd 0 ====
+
+	0x2547A380:  80BA00E8  lwz r5,232(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xE8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547A384:  2C050000  cmpi cr0,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547A388:  41820530  bc 12,2,0x2547A8B8
+	   9: Js02o       	$0x2547A8B8
+
+
+
+. 947 2547A380 12
+. 80 BA 00 E8 2C 05 00 00 41 82 05 30
+==== BB 948 (0x2547A5C4) approx BBs exec'd 0 ====
+
+	0x2547A5C4:  83100000  lwz r24,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R24
+	   3: INCEIPL       	$4
+
+	0x2547A5C8:  827F0038  lwz r19,56(r31)
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x38, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R19
+	   8: INCEIPL       	$4
+
+	0x2547A5CC:  21780000  subfic r11,r24,0
+	   9: GETL       	R24, t8
+	  10: MOVL       	$0x0, t10
+	  11: SBBL       	t8, t10  (-wCa)
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x2547A5D0:  7DCBC114  adde r14,r11,r24
+	  14: GETL       	R11, t12
+	  15: GETL       	R24, t14
+	  16: ADCL       	t12, t14  (-rCa-wCa)
+	  17: PUTL       	t14, R14
+	  18: INCEIPL       	$4
+
+	0x2547A5D4:  3153FFFF  addic r10,r19,-1
+	  19: GETL       	R19, t16
+	  20: ADCL       	$0xFFFFFFFF, t16  (-wCa)
+	  21: PUTL       	t16, R10
+	  22: INCEIPL       	$4
+
+	0x2547A5D8:  7F6A9910  subfe r27,r10,r19
+	  23: GETL       	R10, t18
+	  24: GETL       	R19, t20
+	  25: SBBL       	t18, t20  (-rCa-wCa)
+	  26: PUTL       	t20, R27
+	  27: INCEIPL       	$4
+
+	0x2547A5DC:  7DC8D839  and. r8,r14,r27
+	  28: GETL       	R14, t22
+	  29: GETL       	R27, t24
+	  30: ANDL       	t22, t24
+	  31: PUTL       	t24, R8
+	  32: CMP0L       	t24, t26  (-rSo)
+	  33: ICRFL       	t26, $0x0, CR
+	  34: INCEIPL       	$4
+
+	0x2547A5E0:  4182000C  bc 12,2,0x2547A5EC
+	  35: Js02o       	$0x2547A5EC
+
+
+
+. 948 2547A5C4 32
+. 83 10 00 00 82 7F 00 38 21 78 00 00 7D CB C1 14 31 53 FF FF 7F 6A 99 10 7D C8 D8 39 41 82 00 0C
+==== BB 949 (0x2547A5E4) approx BBs exec'd 0 ====
+
+	0x2547A5E4:  811F0038  lwz r8,56(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547A5E8:  91100000  stw r8,0(r16)
+	   5: GETL       	R8, t4
+	   6: GETL       	R16, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547A5EC:  807701E8  lwz r3,488(r23)
+	   9: GETL       	R23, t8
+	  10: ADDL       	$0x1E8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0x2547A5F0:  2F030000  cmpi cr6,r3,0
+	  14: GETL       	R3, t12
+	  15: CMP0L       	t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0x2547A5F4:  419A0018  bc 12,26,0x2547A60C
+	  18: Js26o       	$0x2547A60C
+
+
+
+. 949 2547A5E4 20
+. 81 1F 00 38 91 10 00 00 80 77 01 E8 2F 03 00 00 41 9A 00 18
+==== BB 950 (0x2547A60C) approx BBs exec'd 0 ====
+
+	0x2547A60C:  80DF000C  lwz r6,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547A610:  54C51838  rlwinm r5,r6,3,0,28
+	   5: GETL       	R6, t4
+	   6: SHLL       	$0x3, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A614:  38650004  addi r3,r5,4
+	   9: GETL       	R5, t6
+	  10: ADDL       	$0x4, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x2547A618:  4801D40D  bl 0x25497A24
+	  13: MOVL       	$0x2547A61C, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 950 2547A60C 16
+. 80 DF 00 0C 54 C5 18 38 38 65 00 04 48 01 D4 0D
+==== BB 951 (0x2547A61C) approx BBs exec'd 0 ====
+
+	0x2547A61C:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A620:  907701E8  stw r3,488(r23)
+	   4: GETL       	R3, t4
+	   5: GETL       	R23, t6
+	   6: ADDL       	$0x1E8, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547A624:  4186089C  bc 12,6,0x2547AEC0
+	   9: Js06o       	$0x2547AEC0
+
+
+
+. 951 2547A61C 12
+. 2C 83 00 00 90 77 01 E8 41 86 08 9C
+==== BB 952 (0x2547A628) approx BBs exec'd 0 ====
+
+	0x2547A628:  80FF0008  lwz r7,8(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547A62C:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547A630:  829F000C  lwz r20,12(r31)
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R20
+	  12: INCEIPL       	$4
+
+	0x2547A634:  2C070000  cmpi cr0,r7,0
+	  13: GETL       	R7, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x2547A638:  569D103A  rlwinm r29,r20,2,0,29
+	  17: GETL       	R20, t14
+	  18: SHLL       	$0x2, t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0x2547A63C:  9297015C  stw r20,348(r23)
+	  21: GETL       	R20, t16
+	  22: GETL       	R23, t18
+	  23: ADDL       	$0x15C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547A640:  7D23EA14  add r9,r3,r29
+	  26: GETL       	R3, t20
+	  27: GETL       	R29, t22
+	  28: ADDL       	t20, t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x2547A644:  90BF000C  stw r5,12(r31)
+	  31: GETL       	R5, t24
+	  32: GETL       	R31, t26
+	  33: ADDL       	$0xC, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547A648:  38890004  addi r4,r9,4
+	  36: GETL       	R9, t28
+	  37: ADDL       	$0x4, t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x2547A64C:  90970158  stw r4,344(r23)
+	  40: GETL       	R4, t30
+	  41: GETL       	R23, t32
+	  42: ADDL       	$0x158, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547A650:  41820050  bc 12,2,0x2547A6A0
+	  45: Js02o       	$0x2547A6A0
+
+
+
+. 952 2547A628 44
+. 80 FF 00 08 38 A0 00 00 82 9F 00 0C 2C 07 00 00 56 9D 10 3A 92 97 01 5C 7D 23 EA 14 90 BF 00 0C 38 89 00 04 90 97 01 58 41 82 00 50
+==== BB 953 (0x2547A654) approx BBs exec'd 0 ====
+
+	0x2547A654:  2F0F0000  cmpi cr6,r15,0
+	   0: GETL       	R15, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547A658:  38C00000  li r6,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x2547A65C:  409A01F8  bc 4,26,0x2547A854
+	   7: Jc26o       	$0x2547A854
+
+
+
+. 953 2547A654 12
+. 2F 0F 00 00 38 C0 00 00 40 9A 01 F8
+==== BB 954 (0x2547A660) approx BBs exec'd 0 ====
+
+	0x2547A660:  817F000C  lwz r11,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547A664:  80070004  lwz r0,4(r7)
+	   5: GETL       	R7, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x2547A668:  83370158  lwz r25,344(r23)
+	  10: GETL       	R23, t8
+	  11: ADDL       	$0x158, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R25
+	  14: INCEIPL       	$4
+
+	0x2547A66C:  5576103A  rlwinm r22,r11,2,0,29
+	  15: GETL       	R11, t12
+	  16: SHLL       	$0x2, t12
+	  17: PUTL       	t12, R22
+	  18: INCEIPL       	$4
+
+	0x2547A670:  398B0001  addi r12,r11,1
+	  19: GETL       	R11, t14
+	  20: ADDL       	$0x1, t14
+	  21: PUTL       	t14, R12
+	  22: INCEIPL       	$4
+
+	0x2547A674:  7C16C92E  stwx r0,r22,r25
+	  23: GETL       	R25, t16
+	  24: GETL       	R22, t18
+	  25: ADDL       	t18, t16
+	  26: GETL       	R0, t20
+	  27: STL       	t20, (t16)
+	  28: INCEIPL       	$4
+
+	0x2547A678:  81E70004  lwz r15,4(r7)
+	  29: GETL       	R7, t22
+	  30: ADDL       	$0x4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R15
+	  33: INCEIPL       	$4
+
+	0x2547A67C:  806F0180  lwz r3,384(r15)
+	  34: GETL       	R15, t26
+	  35: ADDL       	$0x180, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R3
+	  38: INCEIPL       	$4
+
+	0x2547A680:  919F000C  stw r12,12(r31)
+	  39: GETL       	R12, t30
+	  40: GETL       	R31, t32
+	  41: ADDL       	$0xC, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x2547A684:  50C3C94C  rlwimi r3,r6,25,5,6
+	  44: GETL       	R3, t34
+	  45: GETL       	R6, t36
+	  46: ROLL       	$0x19, t36
+	  47: ANDL       	$0x6000000, t36
+	  48: ANDL       	$0xF9FFFFFF, t34
+	  49: ORL       	t34, t36
+	  50: PUTL       	t36, R3
+	  51: INCEIPL       	$4
+
+	0x2547A688:  906F0180  stw r3,384(r15)
+	  52: GETL       	R3, t38
+	  53: GETL       	R15, t40
+	  54: ADDL       	$0x180, t40
+	  55: STL       	t38, (t40)
+	  56: INCEIPL       	$4
+
+	0x2547A68C:  80E70008  lwz r7,8(r7)
+	  57: GETL       	R7, t42
+	  58: ADDL       	$0x8, t42
+	  59: LDL       	(t42), t44
+	  60: PUTL       	t44, R7
+	  61: INCEIPL       	$4
+
+	0x2547A690:  2F070000  cmpi cr6,r7,0
+	  62: GETL       	R7, t46
+	  63: CMP0L       	t46, t48  (-rSo)
+	  64: ICRFL       	t48, $0x6, CR
+	  65: INCEIPL       	$4
+
+	0x2547A694:  409AFFCC  bc 4,26,0x2547A660
+	  66: Jc26o       	$0x2547A660
+
+
+
+. 954 2547A660 56
+. 81 7F 00 0C 80 07 00 04 83 37 01 58 55 76 10 3A 39 8B 00 01 7C 16 C9 2E 81 E7 00 04 80 6F 01 80 91 9F 00 0C 50 C3 C9 4C 90 6F 01 80 80 E7 00 08 2F 07 00 00 40 9A FF CC
+==== BB 955 (0x2547A698) approx BBs exec'd 0 ====
+
+	0x2547A698:  80BF000C  lwz r5,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547A69C:  80970158  lwz r4,344(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x158, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547A6A0:  839E04F4  lwz r28,1268(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x4F4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0x2547A6A4:  80FC0000  lwz r7,0(r28)
+	  15: GETL       	R28, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R7
+	  18: INCEIPL       	$4
+
+	0x2547A6A8:  70E80400  andi. r8,r7,0x400
+	  19: GETL       	R7, t16
+	  20: ANDL       	$0x400, t16
+	  21: PUTL       	t16, R8
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0x2547A6AC:  408204E0  bc 4,2,0x2547AB8C
+	  25: Jc02o       	$0x2547AB8C
+
+
+
+. 955 2547A698 24
+. 80 BF 00 0C 80 97 01 58 83 9E 04 F4 80 FC 00 00 70 E8 04 00 40 82 04 E0
+==== BB 956 (0x2547A6B0) approx BBs exec'd 0 ====
+
+	0x2547A6B0:  80D701F0  lwz r6,496(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x1F0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547A6B4:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547A6B8:  2B860000  cmpli cr7,r6,0
+	   8: GETL       	R6, t6
+	   9: MOVL       	$0x0, t10
+	  10: CMPUL       	t6, t10, t8  (-rSo)
+	  11: ICRFL       	t8, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x2547A6BC:  409D0048  bc 4,29,0x2547A704
+	  13: Jc29o       	$0x2547A704
+
+
+
+. 956 2547A6B0 16
+. 80 D7 01 F0 38 60 00 00 2B 86 00 00 40 9D 00 48
+==== BB 957 (0x2547A704) approx BBs exec'd 0 ====
+
+	0x2547A704:  807701E8  lwz r3,488(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547A708:  54A5103A  rlwinm r5,r5,2,0,29
+	   5: GETL       	R5, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A70C:  3B000001  li r24,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R24
+	  11: INCEIPL       	$4
+
+	0x2547A710:  480094D1  bl 0x25483BE0
+	  12: MOVL       	$0x2547A714, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25483BE0  ($4)
+
+
+
+. 957 2547A704 16
+. 80 77 01 E8 54 A5 10 3A 3B 00 00 01 48 00 94 D1
+==== BB 958 (0x25483DFC) approx BBs exec'd 0 ====
+
+	0x25483DFC:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25483E00:  3884FFFC  addi r4,r4,-4
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25483E04:  80040008  lwz r0,8(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x25483E08:  3863FFF8  addi r3,r3,-8
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFF8, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x25483E0C:  38A50001  addi r5,r5,1
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x1, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0x25483E10:  91230008  stw r9,8(r3)
+	  21: GETL       	R9, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x8, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x25483E14:  4BFFFF10  b 0x25483D24
+	  26: JMPo       	$0x25483D24  ($4)
+
+
+
+. 958 25483DFC 28
+. 81 24 00 00 38 84 FF FC 80 04 00 08 38 63 FF F8 38 A5 00 01 91 23 00 08 4B FF FF 10
+==== BB 959 (0x25483D24) approx BBs exec'd 0 ====
+
+	0x25483D24:  8124000C  lwz r9,12(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25483D28:  9003000C  stw r0,12(r3)
+	   5: GETL       	R0, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0xC, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D2C:  80040010  lwz r0,16(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x10, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25483D30:  91230010  stw r9,16(r3)
+	  15: GETL       	R9, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x10, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x14, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R9
+	  24: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	  25: GETL       	R0, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x14, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x18, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R0
+	  34: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  35: GETL       	R9, t28
+	  36: GETL       	R3, t30
+	  37: ADDL       	$0x18, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  40: GETL       	R5, t32
+	  41: ADCL       	$0xFFFFFFF8, t32  (-wCa)
+	  42: PUTL       	t32, R5
+	  43: CMP0L       	t32, t34  (-rSo)
+	  44: ICRFL       	t34, $0x0, CR
+	  45: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  46: GETL       	R4, t36
+	  47: ADDL       	$0x1C, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R9
+	  50: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  51: GETL       	R0, t40
+	  52: GETL       	R3, t42
+	  53: ADDL       	$0x1C, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  56: GETL       	R4, t44
+	  57: ADDL       	$0x20, t44
+	  58: PUTL       	t44, R4
+	  59: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  60: GETL       	R3, t46
+	  61: ADDL       	$0x20, t46
+	  62: PUTL       	t46, R3
+	  63: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  64: Jc02o       	$0x25483D0C
+
+
+
+. 959 25483D24 56
+. 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 960 (0x2547A714) approx BBs exec'd 0 ====
+
+	0x2547A714:  80FF000C  lwz r7,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547A718:  7C183840  cmpl cr0,r24,r7
+	   5: GETL       	R24, t4
+	   6: GETL       	R7, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547A71C:  4080032C  bc 4,0,0x2547AA48
+	  10: Jc00o       	$0x2547AA48
+
+
+
+. 960 2547A714 12
+. 80 FF 00 0C 7C 18 38 40 40 80 03 2C
+==== BB 961 (0x2547A720) approx BBs exec'd 0 ====
+
+	0x2547A720:  815701E8  lwz r10,488(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547A724:  82570158  lwz r18,344(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x158, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R18
+	   9: INCEIPL       	$4
+
+	0x2547A728:  5715103A  rlwinm r21,r24,2,0,29
+	  10: GETL       	R24, t8
+	  11: SHLL       	$0x2, t8
+	  12: PUTL       	t8, R21
+	  13: INCEIPL       	$4
+
+	0x2547A72C:  806A0004  lwz r3,4(r10)
+	  14: GETL       	R10, t10
+	  15: ADDL       	$0x4, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0x2547A730:  3B200001  li r25,1
+	  19: MOVL       	$0x1, t14
+	  20: PUTL       	t14, R25
+	  21: INCEIPL       	$4
+
+	0x2547A734:  7F55902E  lwzx r26,r21,r18
+	  22: GETL       	R18, t16
+	  23: GETL       	R21, t18
+	  24: ADDL       	t18, t16
+	  25: LDL       	(t16), t20
+	  26: PUTL       	t20, R26
+	  27: INCEIPL       	$4
+
+	0x2547A738:  7F83D000  cmp cr7,r3,r26
+	  28: GETL       	R3, t22
+	  29: GETL       	R26, t24
+	  30: CMPL       	t22, t24, t26  (-rSo)
+	  31: ICRFL       	t26, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547A73C:  419E0018  bc 12,30,0x2547A754
+	  33: Js30o       	$0x2547A754
+
+
+
+. 961 2547A720 32
+. 81 57 01 E8 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+==== BB 962 (0x2547A754) approx BBs exec'd 0 ====
+
+	0x2547A754:  3B790001  addi r27,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x2547A758:  7C9B3840  cmpl cr1,r27,r7
+	   4: GETL       	R27, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547A75C:  4084007C  bc 4,4,0x2547A7D8
+	   9: Jc04o       	$0x2547A7D8
+
+
+
+. 962 2547A754 12
+. 3B 79 00 01 7C 9B 38 40 40 84 00 7C
+==== BB 963 (0x2547A760) approx BBs exec'd 0 ====
+
+	0x2547A760:  5768103A  rlwinm r8,r27,2,0,29
+	   0: GETL       	R27, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2547A764:  7E68502E  lwzx r19,r8,r10
+	   4: GETL       	R10, t2
+	   5: GETL       	R8, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R19
+	   9: INCEIPL       	$4
+
+	0x2547A768:  817301E8  lwz r11,488(r19)
+	  10: GETL       	R19, t8
+	  11: ADDL       	$0x1E8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547A76C:  2F0B0000  cmpi cr6,r11,0
+	  15: GETL       	R11, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0x2547A770:  419A005C  bc 12,26,0x2547A7CC
+	  19: Js26o       	$0x2547A7CC
+
+
+
+. 963 2547A760 20
+. 57 68 10 3A 7E 68 50 2E 81 73 01 E8 2F 0B 00 00 41 9A 00 5C
+==== BB 964 (0x2547A774) approx BBs exec'd 0 ====
+
+	0x2547A774:  812B0000  lwz r9,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547A778:  2F890000  cmpi cr7,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x2547A77C:  419E0050  bc 12,30,0x2547A7CC
+	   8: Js30o       	$0x2547A7CC
+
+
+
+. 964 2547A774 12
+. 81 2B 00 00 2F 89 00 00 41 9E 00 50
+==== BB 965 (0x2547A780) approx BBs exec'd 0 ====
+
+	0x2547A780:  7E19D850  subf r16,r25,r27
+	   0: GETL       	R25, t0
+	   1: GETL       	R27, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R16
+	   4: INCEIPL       	$4
+
+	0x2547A784:  5605103A  rlwinm r5,r16,2,0,29
+	   5: GETL       	R16, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A788:  48000010  b 0x2547A798
+	   9: JMPo       	$0x2547A798  ($4)
+
+
+
+. 965 2547A780 12
+. 7E 19 D8 50 56 05 10 3A 48 00 00 10
+==== BB 966 (0x2547A798) approx BBs exec'd 0 ====
+
+	0x2547A798:  7C09D000  cmp cr0,r9,r26
+	   0: GETL       	R9, t0
+	   1: GETL       	R26, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547A79C:  396B0004  addi r11,r11,4
+	   5: GETL       	R11, t6
+	   6: ADDL       	$0x4, t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x2547A7A0:  4082FFEC  bc 4,2,0x2547A78C
+	   9: Jc02o       	$0x2547A78C
+
+
+
+. 966 2547A798 12
+. 7C 09 D0 00 39 6B 00 04 40 82 FF EC
+==== BB 967 (0x2547A78C) approx BBs exec'd 0 ====
+
+	0x2547A78C:  812B0000  lwz r9,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547A790:  2C890000  cmpi cr1,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547A794:  41860038  bc 12,6,0x2547A7CC
+	   8: Js06o       	$0x2547A7CC
+
+
+
+. 967 2547A78C 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 38
+==== BB 968 (0x2547A7CC) approx BBs exec'd 0 ====
+
+	0x2547A7CC:  3B7B0001  addi r27,r27,1
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x2547A7D0:  7C9B3840  cmpl cr1,r27,r7
+	   4: GETL       	R27, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547A7D4:  4184FF8C  bc 12,4,0x2547A760
+	   9: Js04o       	$0x2547A760
+
+
+
+. 968 2547A7CC 12
+. 3B 7B 00 01 7C 9B 38 40 41 84 FF 8C
+==== BB 969 (0x2547A7D8) approx BBs exec'd 0 ====
+
+	0x2547A7D8:  3B180001  addi r24,r24,1
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547A7DC:  7F183840  cmpl cr6,r24,r7
+	   4: GETL       	R24, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A7E0:  4198FF44  bc 12,24,0x2547A724
+	   9: Js24o       	$0x2547A724
+
+
+
+. 969 2547A7D8 12
+. 3B 18 00 01 7F 18 38 40 41 98 FF 44
+==== BB 970 (0x2547A724) approx BBs exec'd 0 ====
+
+	0x2547A724:  82570158  lwz r18,344(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x158, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x2547A728:  5715103A  rlwinm r21,r24,2,0,29
+	   5: GETL       	R24, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R21
+	   8: INCEIPL       	$4
+
+	0x2547A72C:  806A0004  lwz r3,4(r10)
+	   9: GETL       	R10, t6
+	  10: ADDL       	$0x4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x2547A730:  3B200001  li r25,1
+	  14: MOVL       	$0x1, t10
+	  15: PUTL       	t10, R25
+	  16: INCEIPL       	$4
+
+	0x2547A734:  7F55902E  lwzx r26,r21,r18
+	  17: GETL       	R18, t12
+	  18: GETL       	R21, t14
+	  19: ADDL       	t14, t12
+	  20: LDL       	(t12), t16
+	  21: PUTL       	t16, R26
+	  22: INCEIPL       	$4
+
+	0x2547A738:  7F83D000  cmp cr7,r3,r26
+	  23: GETL       	R3, t18
+	  24: GETL       	R26, t20
+	  25: CMPL       	t18, t20, t22  (-rSo)
+	  26: ICRFL       	t22, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547A73C:  419E0018  bc 12,30,0x2547A754
+	  28: Js30o       	$0x2547A754
+
+
+
+. 970 2547A724 28
+. 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+==== BB 971 (0x2547A740) approx BBs exec'd 0 ====
+
+	0x2547A740:  3B390001  addi r25,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x2547A744:  572E103A  rlwinm r14,r25,2,0,29
+	   4: GETL       	R25, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R14
+	   7: INCEIPL       	$4
+
+	0x2547A748:  7F6E502E  lwzx r27,r14,r10
+	   8: GETL       	R10, t4
+	   9: GETL       	R14, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x2547A74C:  7F9BD000  cmp cr7,r27,r26
+	  14: GETL       	R27, t10
+	  15: GETL       	R26, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547A750:  409EFFF0  bc 4,30,0x2547A740
+	  19: Jc30o       	$0x2547A740
+
+
+
+. 971 2547A740 20
+. 3B 39 00 01 57 2E 10 3A 7F 6E 50 2E 7F 9B D0 00 40 9E FF F0
+==== BB 972 (0x2547A7A4) approx BBs exec'd 0 ====
+
+	0x2547A7A4:  5734103A  rlwinm r20,r25,2,0,29
+	   0: GETL       	R25, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R20
+	   3: INCEIPL       	$4
+
+	0x2547A7A8:  7FA8502E  lwzx r29,r8,r10
+	   4: GETL       	R10, t2
+	   5: GETL       	R8, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547A7AC:  7CCAA214  add r6,r10,r20
+	  10: GETL       	R10, t8
+	  11: GETL       	R20, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0x2547A7B0:  3B390001  addi r25,r25,1
+	  15: GETL       	R25, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R25
+	  18: INCEIPL       	$4
+
+	0x2547A7B4:  7CC43378  or r4,r6,r6
+	  19: GETL       	R6, t14
+	  20: PUTL       	t14, R4
+	  21: INCEIPL       	$4
+
+	0x2547A7B8:  38660004  addi r3,r6,4
+	  22: GETL       	R6, t16
+	  23: ADDL       	$0x4, t16
+	  24: PUTL       	t16, R3
+	  25: INCEIPL       	$4
+
+	0x2547A7BC:  48008DE1  bl 0x2548359C
+	  26: MOVL       	$0x2547A7C0, t18
+	  27: PUTL       	t18, LR
+	  28: JMPo-c       	$0x2548359C  ($4)
+
+
+
+. 972 2547A7A4 28
+. 57 34 10 3A 7F A8 50 2E 7C CA A2 14 3B 39 00 01 7C C4 33 78 38 66 00 04 48 00 8D E1
+==== BB 973 memmove(0x2548359C) approx BBs exec'd 0 ====
+
+	0x2548359C:  7C041850  subf r0,r4,r3
+	   0: GETL       	R4, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x254835A0:  7CC802A6  mflr r6
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x254835A4:  7F802840  cmpl cr7,r0,r5
+	   8: GETL       	R0, t6
+	   9: GETL       	R5, t8
+	  10: CMPUL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x254835A8:  9421FFE0  stwu r1,-32(r1)
+	  13: GETL       	R1, t12
+	  14: GETL       	R1, t14
+	  15: ADDL       	$0xFFFFFFE0, t14
+	  16: PUTL       	t14, R1
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x254835AC:  93810010  stw r28,16(r1)
+	  19: GETL       	R28, t16
+	  20: GETL       	R1, t18
+	  21: ADDL       	$0x10, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x254835B0:  7C7C1B78  or r28,r3,r3
+	  24: GETL       	R3, t20
+	  25: PUTL       	t20, R28
+	  26: INCEIPL       	$4
+
+	0x254835B4:  93A10014  stw r29,20(r1)
+	  27: GETL       	R29, t22
+	  28: GETL       	R1, t24
+	  29: ADDL       	$0x14, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0x254835B8:  7C7D1B78  or r29,r3,r3
+	  32: GETL       	R3, t26
+	  33: PUTL       	t26, R29
+	  34: INCEIPL       	$4
+
+	0x254835BC:  93E1001C  stw r31,28(r1)
+	  35: GETL       	R31, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x1C, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x254835C0:  7C9F2378  or r31,r4,r4
+	  40: GETL       	R4, t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0x254835C4:  9361000C  stw r27,12(r1)
+	  43: GETL       	R27, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0xC, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x254835C8:  93C10018  stw r30,24(r1)
+	  48: GETL       	R30, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x18, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0x254835CC:  90C10024  stw r6,36(r1)
+	  53: GETL       	R6, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x24, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x254835D0:  419C0054  bc 12,28,0x25483624
+	  58: Js28o       	$0x25483624
+
+
+
+. 973 2548359C 56
+. 7C 04 18 50 7C C8 02 A6 7F 80 28 40 94 21 FF E0 93 81 00 10 7C 7C 1B 78 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 61 00 0C 93 C1 00 18 90 C1 00 24 41 9C 00 54
+==== BB 974 (0x254835D4) approx BBs exec'd 0 ====
+
+	0x254835D4:  2805000F  cmpli cr0,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254835D8:  418100E8  bc 12,1,0x254836C0
+	   5: Js01o       	$0x254836C0
+
+
+
+. 974 254835D4 8
+. 28 05 00 0F 41 81 00 E8
+==== BB 975 (0x254835DC) approx BBs exec'd 0 ====
+
+	0x254835DC:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x254835E0:  4186001C  bc 12,6,0x254835FC
+	   4: Js06o       	$0x254835FC
+
+
+
+. 975 254835DC 8
+. 2C 85 00 00 41 86 00 1C
+==== BB 976 (0x254835E4) approx BBs exec'd 0 ====
+
+	0x254835E4:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x254835E8:  891F0000  lbz r8,0(r31)
+	   3: GETL       	R31, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R8
+	   6: INCEIPL       	$4
+
+	0x254835EC:  3BFF0001  addi r31,r31,1
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0x254835F0:  991D0000  stb r8,0(r29)
+	  11: GETL       	R8, t8
+	  12: GETL       	R29, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254835F4:  3BBD0001  addi r29,r29,1
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x254835F8:  4200FFF0  bc 16,0,0x254835E8
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0x254835FC
+	  23: JMPo       	$0x254835E8  ($4)
+
+
+
+. 976 254835E4 24
+. 7C A9 03 A6 89 1F 00 00 3B FF 00 01 99 1D 00 00 3B BD 00 01 42 00 FF F0
+==== BB 977 (0x254835E8) approx BBs exec'd 0 ====
+
+	0x254835E8:  891F0000  lbz r8,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x254835EC:  3BFF0001  addi r31,r31,1
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x254835F0:  991D0000  stb r8,0(r29)
+	   8: GETL       	R8, t6
+	   9: GETL       	R29, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x254835F4:  3BBD0001  addi r29,r29,1
+	  12: GETL       	R29, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x254835F8:  4200FFF0  bc 16,0,0x254835E8
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0x254835FC
+	  20: JMPo       	$0x254835E8  ($4)
+
+
+
+. 977 254835E8 20
+. 89 1F 00 00 3B FF 00 01 99 1D 00 00 3B BD 00 01 42 00 FF F0
+==== BB 978 (0x254835FC) approx BBs exec'd 0 ====
+
+	0x254835FC:  83610024  lwz r27,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25483600:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483604:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x25483608:  7F6803A6  mtlr r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x2548360C:  83810010  lwz r28,16(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0x25483610:  8361000C  lwz r27,12(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0x25483614:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x25483618:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x2548361C:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x25483620:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+
+. 978 254835FC 40
+. 83 61 00 24 7F 83 E3 78 83 A1 00 14 7F 68 03 A6 83 81 00 10 83 61 00 0C 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 979 (0x2547A7C0) approx BBs exec'd 0 ====
+
+	0x2547A7C0:  815701E8  lwz r10,488(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547A7C4:  80FF000C  lwz r7,12(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x2547A7C8:  7FB4512E  stwx r29,r20,r10
+	  10: GETL       	R10, t8
+	  11: GETL       	R20, t10
+	  12: ADDL       	t10, t8
+	  13: GETL       	R29, t12
+	  14: STL       	t12, (t8)
+	  15: INCEIPL       	$4
+
+	0x2547A7CC:  3B7B0001  addi r27,r27,1
+	  16: GETL       	R27, t14
+	  17: ADDL       	$0x1, t14
+	  18: PUTL       	t14, R27
+	  19: INCEIPL       	$4
+
+	0x2547A7D0:  7C9B3840  cmpl cr1,r27,r7
+	  20: GETL       	R27, t16
+	  21: GETL       	R7, t18
+	  22: CMPUL       	t16, t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0x2547A7D4:  4184FF8C  bc 12,4,0x2547A760
+	  25: Js04o       	$0x2547A760
+
+
+
+. 979 2547A7C0 24
+. 81 57 01 E8 80 FF 00 0C 7F B4 51 2E 3B 7B 00 01 7C 9B 38 40 41 84 FF 8C
+==== BB 980 (0x25483624) approx BBs exec'd 0 ====
+
+	0x25483624:  2B05000F  cmpli cr6,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25483628:  7FA42A14  add r29,r4,r5
+	   5: GETL       	R4, t6
+	   6: GETL       	R5, t8
+	   7: ADDL       	t6, t8
+	   8: PUTL       	t8, R29
+	   9: INCEIPL       	$4
+
+	0x2548362C:  7FE32A14  add r31,r3,r5
+	  10: GETL       	R3, t10
+	  11: GETL       	R5, t12
+	  12: ADDL       	t10, t12
+	  13: PUTL       	t12, R31
+	  14: INCEIPL       	$4
+
+	0x25483630:  41990044  bc 12,25,0x25483674
+	  15: Js25o       	$0x25483674
+
+
+
+. 980 25483624 16
+. 2B 05 00 0F 7F A4 2A 14 7F E3 2A 14 41 99 00 44
+==== BB 981 (0x25483634) approx BBs exec'd 0 ====
+
+	0x25483634:  2F850000  cmpi cr7,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25483638:  41BEFFC4  bc 13,30,0x254835FC
+	   4: Js30o       	$0x254835FC
+
+
+
+. 981 25483634 8
+. 2F 85 00 00 41 BE FF C4
+==== BB 982 (0x2548363C) approx BBs exec'd 0 ====
+
+	0x2548363C:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25483640:  8D9DFFFF  lbzu r12,-1(r29)
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0xFFFFFFFF, t2
+	   5: PUTL       	t2, R29
+	   6: LDB       	(t2), t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25483644:  9D9FFFFF  stbu r12,-1(r31)
+	   9: GETL       	R12, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R31
+	  13: STB       	t6, (t8)
+	  14: INCEIPL       	$4
+
+	0x25483648:  4200FFF8  bc 16,0,0x25483640
+	  15: GETL       	CTR, t10
+	  16: ADDL       	$0xFFFFFFFF, t10
+	  17: PUTL       	t10, CTR
+	  18: JIFZL       	t10, $0x2548364C
+	  19: JMPo       	$0x25483640  ($4)
+
+
+
+. 982 2548363C 16
+. 7C A9 03 A6 8D 9D FF FF 9D 9F FF FF 42 00 FF F8
+==== BB 983 (0x25483640) approx BBs exec'd 0 ====
+
+	0x25483640:  8D9DFFFF  lbzu r12,-1(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R29
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0x25483644:  9D9FFFFF  stbu r12,-1(r31)
+	   6: GETL       	R12, t4
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0xFFFFFFFF, t6
+	   9: PUTL       	t6, R31
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x25483648:  4200FFF8  bc 16,0,0x25483640
+	  12: GETL       	CTR, t8
+	  13: ADDL       	$0xFFFFFFFF, t8
+	  14: PUTL       	t8, CTR
+	  15: JIFZL       	t8, $0x2548364C
+	  16: JMPo       	$0x25483640  ($4)
+
+
+
+. 983 25483640 12
+. 8D 9D FF FF 9D 9F FF FF 42 00 FF F8
+==== BB 984 (0x2548364C) approx BBs exec'd 0 ====
+
+	0x2548364C:  83610024  lwz r27,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25483650:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483654:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x25483658:  7F6803A6  mtlr r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x2548365C:  83810010  lwz r28,16(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0x25483660:  8361000C  lwz r27,12(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0x25483664:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x25483668:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x2548366C:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x25483670:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+
+. 984 2548364C 40
+. 83 61 00 24 7F 83 E3 78 83 A1 00 14 7F 68 03 A6 83 81 00 10 83 61 00 0C 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 985 (0x2547A7E4) approx BBs exec'd 0 ====
+
+	0x2547A7E4:  2F910000  cmpi cr7,r17,0
+	   0: GETL       	R17, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547A7E8:  54F7103A  rlwinm r23,r7,2,0,29
+	   4: GETL       	R7, t4
+	   5: SHLL       	$0x2, t4
+	   6: PUTL       	t4, R23
+	   7: INCEIPL       	$4
+
+	0x2547A7EC:  3B400000  li r26,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R26
+	  10: INCEIPL       	$4
+
+	0x2547A7F0:  7F57512E  stwx r26,r23,r10
+	  11: GETL       	R10, t8
+	  12: GETL       	R23, t10
+	  13: ADDL       	t10, t8
+	  14: GETL       	R26, t12
+	  15: STL       	t12, (t8)
+	  16: INCEIPL       	$4
+
+	0x2547A7F4:  409E06DC  bc 4,30,0x2547AED0
+	  17: Jc30o       	$0x2547AED0
+
+
+
+. 985 2547A7E4 20
+. 2F 91 00 00 54 F7 10 3A 3B 40 00 00 7F 57 51 2E 40 9E 06 DC
+==== BB 986 (0x2547A7F8) approx BBs exec'd 0 ====
+
+	0x2547A7F8:  81010000  lwz r8,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x2547A7FC:  82280004  lwz r17,4(r8)
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R17
+	   8: INCEIPL       	$4
+
+	0x2547A800:  81C8FFB8  lwz r14,-72(r8)
+	   9: GETL       	R8, t8
+	  10: ADDL       	$0xFFFFFFB8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R14
+	  13: INCEIPL       	$4
+
+	0x2547A804:  7E2803A6  mtlr r17
+	  14: GETL       	R17, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x2547A808:  81E8FFBC  lwz r15,-68(r8)
+	  17: GETL       	R8, t14
+	  18: ADDL       	$0xFFFFFFBC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R15
+	  21: INCEIPL       	$4
+
+	0x2547A80C:  8208FFC0  lwz r16,-64(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0xFFFFFFC0, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R16
+	  26: INCEIPL       	$4
+
+	0x2547A810:  8228FFC4  lwz r17,-60(r8)
+	  27: GETL       	R8, t22
+	  28: ADDL       	$0xFFFFFFC4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R17
+	  31: INCEIPL       	$4
+
+	0x2547A814:  8248FFC8  lwz r18,-56(r8)
+	  32: GETL       	R8, t26
+	  33: ADDL       	$0xFFFFFFC8, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R18
+	  36: INCEIPL       	$4
+
+	0x2547A818:  8268FFCC  lwz r19,-52(r8)
+	  37: GETL       	R8, t30
+	  38: ADDL       	$0xFFFFFFCC, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R19
+	  41: INCEIPL       	$4
+
+	0x2547A81C:  8288FFD0  lwz r20,-48(r8)
+	  42: GETL       	R8, t34
+	  43: ADDL       	$0xFFFFFFD0, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R20
+	  46: INCEIPL       	$4
+
+	0x2547A820:  82A8FFD4  lwz r21,-44(r8)
+	  47: GETL       	R8, t38
+	  48: ADDL       	$0xFFFFFFD4, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R21
+	  51: INCEIPL       	$4
+
+	0x2547A824:  82C8FFD8  lwz r22,-40(r8)
+	  52: GETL       	R8, t42
+	  53: ADDL       	$0xFFFFFFD8, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R22
+	  56: INCEIPL       	$4
+
+	0x2547A828:  82E8FFDC  lwz r23,-36(r8)
+	  57: GETL       	R8, t46
+	  58: ADDL       	$0xFFFFFFDC, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R23
+	  61: INCEIPL       	$4
+
+	0x2547A82C:  8308FFE0  lwz r24,-32(r8)
+	  62: GETL       	R8, t50
+	  63: ADDL       	$0xFFFFFFE0, t50
+	  64: LDL       	(t50), t52
+	  65: PUTL       	t52, R24
+	  66: INCEIPL       	$4
+
+	0x2547A830:  8328FFE4  lwz r25,-28(r8)
+	  67: GETL       	R8, t54
+	  68: ADDL       	$0xFFFFFFE4, t54
+	  69: LDL       	(t54), t56
+	  70: PUTL       	t56, R25
+	  71: INCEIPL       	$4
+
+	0x2547A834:  8348FFE8  lwz r26,-24(r8)
+	  72: GETL       	R8, t58
+	  73: ADDL       	$0xFFFFFFE8, t58
+	  74: LDL       	(t58), t60
+	  75: PUTL       	t60, R26
+	  76: INCEIPL       	$4
+
+	0x2547A838:  8368FFEC  lwz r27,-20(r8)
+	  77: GETL       	R8, t62
+	  78: ADDL       	$0xFFFFFFEC, t62
+	  79: LDL       	(t62), t64
+	  80: PUTL       	t64, R27
+	  81: INCEIPL       	$4
+
+	0x2547A83C:  8388FFF0  lwz r28,-16(r8)
+	  82: GETL       	R8, t66
+	  83: ADDL       	$0xFFFFFFF0, t66
+	  84: LDL       	(t66), t68
+	  85: PUTL       	t68, R28
+	  86: INCEIPL       	$4
+
+	0x2547A840:  83A8FFF4  lwz r29,-12(r8)
+	  87: GETL       	R8, t70
+	  88: ADDL       	$0xFFFFFFF4, t70
+	  89: LDL       	(t70), t72
+	  90: PUTL       	t72, R29
+	  91: INCEIPL       	$4
+
+	0x2547A844:  83C8FFF8  lwz r30,-8(r8)
+	  92: GETL       	R8, t74
+	  93: ADDL       	$0xFFFFFFF8, t74
+	  94: LDL       	(t74), t76
+	  95: PUTL       	t76, R30
+	  96: INCEIPL       	$4
+
+	0x2547A848:  83E8FFFC  lwz r31,-4(r8)
+	  97: GETL       	R8, t78
+	  98: ADDL       	$0xFFFFFFFC, t78
+	  99: LDL       	(t78), t80
+	 100: PUTL       	t80, R31
+	 101: INCEIPL       	$4
+
+	0x2547A84C:  7D014378  or r1,r8,r8
+	 102: GETL       	R8, t82
+	 103: PUTL       	t82, R1
+	 104: INCEIPL       	$4
+
+	0x2547A850:  4E800020  blr
+	 105: GETL       	LR, t84
+	 106: JMPo-r       	t84  ($4)
+
+
+
+. 986 2547A7F8 92
+. 81 01 00 00 82 28 00 04 81 C8 FF B8 7E 28 03 A6 81 E8 FF BC 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+==== BB 987 (0x254727E4) approx BBs exec'd 0 ====
+
+	0x254727E4:  80F4015C  lwz r7,348(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x15C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254727E8:  2C870000  cmpi cr1,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254727EC:  4186003C  bc 12,6,0x25472828
+	   9: Js06o       	$0x25472828
+
+
+
+. 987 254727E4 12
+. 80 F4 01 5C 2C 87 00 00 41 86 00 3C
+==== BB 988 (0x254727F0) approx BBs exec'd 0 ====
+
+	0x254727F0:  81140158  lwz r8,344(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x158, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254727F4:  7CE903A6  mtctr r7
+	   5: GETL       	R7, t4
+	   6: PUTL       	t4, CTR
+	   7: INCEIPL       	$4
+
+	0x254727F8:  38E7FFFF  addi r7,r7,-1
+	   8: GETL       	R7, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x254727FC:  54F1103A  rlwinm r17,r7,2,0,29
+	  12: GETL       	R7, t8
+	  13: SHLL       	$0x2, t8
+	  14: PUTL       	t8, R17
+	  15: INCEIPL       	$4
+
+	0x25472800:  7F11402E  lwzx r24,r17,r8
+	  16: GETL       	R8, t10
+	  17: GETL       	R17, t12
+	  18: ADDL       	t12, t10
+	  19: LDL       	(t10), t14
+	  20: PUTL       	t14, R24
+	  21: INCEIPL       	$4
+
+	0x25472804:  82B80180  lwz r21,384(r24)
+	  22: GETL       	R24, t16
+	  23: ADDL       	$0x180, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R21
+	  26: INCEIPL       	$4
+
+	0x25472808:  66A80800  oris r8,r21,0x800
+	  27: GETL       	R21, t20
+	  28: ORL       	$0x8000000, t20
+	  29: PUTL       	t20, R8
+	  30: INCEIPL       	$4
+
+	0x2547280C:  91180180  stw r8,384(r24)
+	  31: GETL       	R8, t22
+	  32: GETL       	R24, t24
+	  33: ADDL       	$0x180, t24
+	  34: STL       	t22, (t24)
+	  35: INCEIPL       	$4
+
+	0x25472810:  81140158  lwz r8,344(r20)
+	  36: GETL       	R20, t26
+	  37: ADDL       	$0x158, t26
+	  38: LDL       	(t26), t28
+	  39: PUTL       	t28, R8
+	  40: INCEIPL       	$4
+
+	0x25472814:  7F31402E  lwzx r25,r17,r8
+	  41: GETL       	R8, t30
+	  42: GETL       	R17, t32
+	  43: ADDL       	t32, t30
+	  44: LDL       	(t30), t34
+	  45: PUTL       	t34, R25
+	  46: INCEIPL       	$4
+
+	0x25472818:  81390178  lwz r9,376(r25)
+	  47: GETL       	R25, t36
+	  48: ADDL       	$0x178, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R9
+	  51: INCEIPL       	$4
+
+	0x2547281C:  3B890001  addi r28,r9,1
+	  52: GETL       	R9, t40
+	  53: ADDL       	$0x1, t40
+	  54: PUTL       	t40, R28
+	  55: INCEIPL       	$4
+
+	0x25472820:  93990178  stw r28,376(r25)
+	  56: GETL       	R28, t42
+	  57: GETL       	R25, t44
+	  58: ADDL       	$0x178, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x25472824:  4200FFD4  bc 16,0,0x254727F8
+	  61: GETL       	CTR, t46
+	  62: ADDL       	$0xFFFFFFFF, t46
+	  63: PUTL       	t46, CTR
+	  64: JIFZL       	t46, $0x25472828
+	  65: JMPo       	$0x254727F8  ($4)
+
+
+
+. 988 254727F0 56
+. 81 14 01 58 7C E9 03 A6 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+==== BB 989 (0x254727F8) approx BBs exec'd 0 ====
+
+	0x254727F8:  38E7FFFF  addi r7,r7,-1
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R7
+	   3: INCEIPL       	$4
+
+	0x254727FC:  54F1103A  rlwinm r17,r7,2,0,29
+	   4: GETL       	R7, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R17
+	   7: INCEIPL       	$4
+
+	0x25472800:  7F11402E  lwzx r24,r17,r8
+	   8: GETL       	R8, t4
+	   9: GETL       	R17, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R24
+	  13: INCEIPL       	$4
+
+	0x25472804:  82B80180  lwz r21,384(r24)
+	  14: GETL       	R24, t10
+	  15: ADDL       	$0x180, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R21
+	  18: INCEIPL       	$4
+
+	0x25472808:  66A80800  oris r8,r21,0x800
+	  19: GETL       	R21, t14
+	  20: ORL       	$0x8000000, t14
+	  21: PUTL       	t14, R8
+	  22: INCEIPL       	$4
+
+	0x2547280C:  91180180  stw r8,384(r24)
+	  23: GETL       	R8, t16
+	  24: GETL       	R24, t18
+	  25: ADDL       	$0x180, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x25472810:  81140158  lwz r8,344(r20)
+	  28: GETL       	R20, t20
+	  29: ADDL       	$0x158, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R8
+	  32: INCEIPL       	$4
+
+	0x25472814:  7F31402E  lwzx r25,r17,r8
+	  33: GETL       	R8, t24
+	  34: GETL       	R17, t26
+	  35: ADDL       	t26, t24
+	  36: LDL       	(t24), t28
+	  37: PUTL       	t28, R25
+	  38: INCEIPL       	$4
+
+	0x25472818:  81390178  lwz r9,376(r25)
+	  39: GETL       	R25, t30
+	  40: ADDL       	$0x178, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R9
+	  43: INCEIPL       	$4
+
+	0x2547281C:  3B890001  addi r28,r9,1
+	  44: GETL       	R9, t34
+	  45: ADDL       	$0x1, t34
+	  46: PUTL       	t34, R28
+	  47: INCEIPL       	$4
+
+	0x25472820:  93990178  stw r28,376(r25)
+	  48: GETL       	R28, t36
+	  49: GETL       	R25, t38
+	  50: ADDL       	$0x178, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25472824:  4200FFD4  bc 16,0,0x254727F8
+	  53: GETL       	CTR, t40
+	  54: ADDL       	$0xFFFFFFFF, t40
+	  55: PUTL       	t40, CTR
+	  56: JIFZL       	t40, $0x25472828
+	  57: JMPo       	$0x254727F8  ($4)
+
+
+
+. 989 254727F8 48
+. 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+==== BB 990 (0x25472828) approx BBs exec'd 0 ====
+
+	0x25472828:  816E01C8  lwz r11,456(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547282C:  80EE01C4  lwz r7,452(r14)
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x1C4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x25472830:  90EB000C  stw r7,12(r11)
+	  10: GETL       	R7, t8
+	  11: GETL       	R11, t10
+	  12: ADDL       	$0xC, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25472834:  812E01C4  lwz r9,452(r14)
+	  15: GETL       	R14, t12
+	  16: ADDL       	$0x1C4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25472838:  2E090000  cmpi cr4,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x4, CR
+	  23: INCEIPL       	$4
+
+	0x2547283C:  41920008  bc 12,18,0x25472844
+	  24: Js18o       	$0x25472844
+
+
+
+. 990 25472828 24
+. 81 6E 01 C8 80 EE 01 C4 90 EB 00 0C 81 2E 01 C4 2E 09 00 00 41 92 00 08
+==== BB 991 (0x25472840) approx BBs exec'd 0 ====
+
+	0x25472840:  91690010  stw r11,16(r9)
+	   0: GETL       	R11, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x10, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472844:  814E0330  lwz r10,816(r14)
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x330, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25472848:  2B0A0001  cmpli cr6,r10,1
+	  10: GETL       	R10, t8
+	  11: MOVL       	$0x1, t12
+	  12: CMPUL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x2547284C:  40991358  bc 4,25,0x25473BA4
+	  15: Jc25o       	$0x25473BA4
+
+
+
+. 991 25472840 16
+. 91 69 00 10 81 4E 03 30 2B 0A 00 01 40 99 13 58
+==== BB 992 (0x25472850) approx BBs exec'd 0 ====
+
+	0x25472850:  81540158  lwz r10,344(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x158, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25472854:  390E01B8  addi r8,r14,440
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x1B8, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x25472858:  39200001  li r9,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x2547285C:  39600004  li r11,4
+	  12: MOVL       	$0x4, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25472860:  806A0004  lwz r3,4(r10)
+	  15: GETL       	R10, t10
+	  16: ADDL       	$0x4, t10
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x25472864:  7F834000  cmp cr7,r3,r8
+	  20: GETL       	R3, t14
+	  21: GETL       	R8, t16
+	  22: CMPL       	t14, t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25472868:  419E0018  bc 12,30,0x25472880
+	  25: Js30o       	$0x25472880
+
+
+
+. 992 25472850 28
+. 81 54 01 58 39 0E 01 B8 39 20 00 01 39 60 00 04 80 6A 00 04 7F 83 40 00 41 9E 00 18
+==== BB 993 (0x2547286C) approx BBs exec'd 0 ====
+
+	0x2547286C:  39290001  addi r9,r9,1
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25472870:  552B103A  rlwinm r11,r9,2,0,29
+	   4: GETL       	R9, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R11
+	   7: INCEIPL       	$4
+
+	0x25472874:  7C8B502E  lwzx r4,r11,r10
+	   8: GETL       	R10, t4
+	   9: GETL       	R11, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25472878:  7F844000  cmp cr7,r4,r8
+	  14: GETL       	R4, t10
+	  15: GETL       	R8, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547287C:  409EFFF0  bc 4,30,0x2547286C
+	  19: Jc30o       	$0x2547286C
+
+
+
+. 993 2547286C 20
+. 39 29 00 01 55 2B 10 3A 7C 8B 50 2E 7F 84 40 00 40 9E FF F0
+==== BB 994 (0x25472880) approx BBs exec'd 0 ====
+
+	0x25472880:  811F0030  lwz r8,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25472884:  7D4B5214  add r10,r11,r10
+	   5: GETL       	R11, t4
+	   6: GETL       	R10, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25472888:  816AFFFC  lwz r11,-4(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0xFFFFFFFC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547288C:  2C080000  cmpi cr0,r8,0
+	  15: GETL       	R8, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25472890:  916E01C8  stw r11,456(r14)
+	  19: GETL       	R11, t16
+	  20: GETL       	R14, t18
+	  21: ADDL       	$0x1C8, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x25472894:  40820F34  bc 4,2,0x254737C8
+	  24: Jc02o       	$0x254737C8
+
+
+
+. 994 25472880 24
+. 81 1F 00 30 7D 4B 52 14 81 6A FF FC 2C 08 00 00 91 6E 01 C8 40 82 0F 34
+==== BB 995 (0x25472898) approx BBs exec'd 0 ====
+
+	0x25472898:  8274015C  lwz r19,348(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x15C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0x2547289C:  3AE90001  addi r23,r9,1
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R23
+	   8: INCEIPL       	$4
+
+	0x254728A0:  38000000  li r0,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0x254728A4:  7DC97378  or r9,r14,r14
+	  12: GETL       	R14, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x254728A8:  7F979840  cmpl cr7,r23,r19
+	  15: GETL       	R23, t10
+	  16: GETL       	R19, t12
+	  17: CMPUL       	t10, t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0x254728AC:  409C0008  bc 4,28,0x254728B4
+	  20: Jc28o       	$0x254728B4
+
+
+
+. 995 25472898 24
+. 82 74 01 5C 3A E9 00 01 38 00 00 00 7D C9 73 78 7F 97 98 40 40 9C 00 08
+==== BB 996 (0x254728B0) approx BBs exec'd 0 ====
+
+	0x254728B0:  800A0004  lwz r0,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x254728B4:  900901C4  stw r0,452(r9)
+	   5: GETL       	R0, t4
+	   6: GETL       	R9, t6
+	   7: ADDL       	$0x1C4, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x254728B8:  820E01C8  lwz r16,456(r14)
+	  10: GETL       	R14, t8
+	  11: ADDL       	$0x1C8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R16
+	  14: INCEIPL       	$4
+
+	0x254728BC:  380E01B8  addi r0,r14,440
+	  15: GETL       	R14, t12
+	  16: ADDL       	$0x1B8, t12
+	  17: PUTL       	t12, R0
+	  18: INCEIPL       	$4
+
+	0x254728C0:  9010000C  stw r0,12(r16)
+	  19: GETL       	R0, t14
+	  20: GETL       	R16, t16
+	  21: ADDL       	$0xC, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x254728C4:  812E01C4  lwz r9,452(r14)
+	  24: GETL       	R14, t18
+	  25: ADDL       	$0x1C4, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0x254728C8:  2C890000  cmpi cr1,r9,0
+	  29: GETL       	R9, t22
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x1, CR
+	  32: INCEIPL       	$4
+
+	0x254728CC:  41860008  bc 12,6,0x254728D4
+	  33: Js06o       	$0x254728D4
+
+
+
+. 996 254728B0 32
+. 80 0A 00 04 90 09 01 C4 82 0E 01 C8 38 0E 01 B8 90 10 00 0C 81 2E 01 C4 2C 89 00 00 41 86 00 08
+==== BB 997 (0x254728D0) approx BBs exec'd 0 ====
+
+	0x254728D0:  90090010  stw r0,16(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x10, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254728D4:  807E002C  lwz r3,44(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x2C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x254728D8:  691A0003  xori r26,r8,0x3
+	  10: GETL       	R8, t8
+	  11: XORL       	$0x3, t8
+	  12: PUTL       	t8, R26
+	  13: INCEIPL       	$4
+
+	0x254728DC:  201A0000  subfic r0,r26,0
+	  14: GETL       	R26, t10
+	  15: MOVL       	$0x0, t12
+	  16: SBBL       	t10, t12  (-wCa)
+	  17: PUTL       	t12, R0
+	  18: INCEIPL       	$4
+
+	0x254728E0:  7F40D114  adde r26,r0,r26
+	  19: GETL       	R0, t14
+	  20: GETL       	R26, t16
+	  21: ADCL       	t14, t16  (-rCa-wCa)
+	  22: PUTL       	t16, R26
+	  23: INCEIPL       	$4
+
+	0x254728E4:  809E0014  lwz r4,20(r30)
+	  24: GETL       	R30, t18
+	  25: ADDL       	$0x14, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R4
+	  28: INCEIPL       	$4
+
+	0x254728E8:  21480000  subfic r10,r8,0
+	  29: GETL       	R8, t22
+	  30: MOVL       	$0x0, t24
+	  31: SBBL       	t22, t24  (-wCa)
+	  32: PUTL       	t24, R10
+	  33: INCEIPL       	$4
+
+	0x254728EC:  7FAA4114  adde r29,r10,r8
+	  34: GETL       	R10, t26
+	  35: GETL       	R8, t28
+	  36: ADCL       	t26, t28  (-rCa-wCa)
+	  37: PUTL       	t28, R29
+	  38: INCEIPL       	$4
+
+	0x254728F0:  38BF0058  addi r5,r31,88
+	  39: GETL       	R31, t30
+	  40: ADDL       	$0x58, t30
+	  41: PUTL       	t30, R5
+	  42: INCEIPL       	$4
+
+	0x254728F4:  93BF0058  stw r29,88(r31)
+	  43: GETL       	R29, t32
+	  44: GETL       	R31, t34
+	  45: ADDL       	$0x58, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0x254728F8:  935F005C  stw r26,92(r31)
+	  48: GETL       	R26, t36
+	  49: GETL       	R31, t38
+	  50: ADDL       	$0x5C, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x254728FC:  48008D9D  bl 0x2547B698
+	  53: MOVL       	$0x25472900, t40
+	  54: PUTL       	t40, LR
+	  55: JMPo-c       	$0x2547B698  ($4)
+
+
+
+. 997 254728D0 48
+. 90 09 00 10 80 7E 00 2C 69 1A 00 03 20 1A 00 00 7F 40 D1 14 80 9E 00 14 21 48 00 00 7F AA 41 14 38 BF 00 58 93 BF 00 58 93 5F 00 5C 48 00 8D 9D
+==== BB 998 _dl_receive_error(0x2547B698) approx BBs exec'd 0 ====
+
+	0x2547B698:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547B69C:  7CC802A6  mflr r6
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x2547B6A0:  4801B961  bl 0x25497000
+	   9: MOVL       	$0x2547B6A4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 998 2547B698 12
+. 94 21 FF D0 7C C8 02 A6 48 01 B9 61
+==== BB 999 (0x2547B6A4) approx BBs exec'd 0 ====
+
+	0x2547B6A4:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B6A8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547B6AC:  9361001C  stw r27,28(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547B6B0:  7CBB2B78  or r27,r5,r5
+	  13: GETL       	R5, t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0x2547B6B4:  90C10034  stw r6,52(r1)
+	  16: GETL       	R6, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x34, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547B6B8:  92E1000C  stw r23,12(r1)
+	  21: GETL       	R23, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547B6BC:  7C972378  or r23,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R23
+	  28: INCEIPL       	$4
+
+	0x2547B6C0:  80BE04C8  lwz r5,1224(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4C8, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R5
+	  33: INCEIPL       	$4
+
+	0x2547B6C4:  93210014  stw r25,20(r1)
+	  34: GETL       	R25, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x14, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x2547B6C8:  7C791B78  or r25,r3,r3
+	  39: GETL       	R3, t30
+	  40: PUTL       	t30, R25
+	  41: INCEIPL       	$4
+
+	0x2547B6CC:  812501B4  lwz r9,436(r5)
+	  42: GETL       	R5, t32
+	  43: ADDL       	$0x1B4, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R9
+	  46: INCEIPL       	$4
+
+	0x2547B6D0:  93410018  stw r26,24(r1)
+	  47: GETL       	R26, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x18, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547B6D4:  93810020  stw r28,32(r1)
+	  52: GETL       	R28, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x20, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x2547B6D8:  7D2903A6  mtctr r9
+	  57: GETL       	R9, t44
+	  58: PUTL       	t44, CTR
+	  59: INCEIPL       	$4
+
+	0x2547B6DC:  93A10024  stw r29,36(r1)
+	  60: GETL       	R29, t46
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x24, t48
+	  63: STL       	t46, (t48)
+	  64: INCEIPL       	$4
+
+	0x2547B6E0:  839E031C  lwz r28,796(r30)
+	  65: GETL       	R30, t50
+	  66: ADDL       	$0x31C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R28
+	  69: INCEIPL       	$4
+
+	0x2547B6E4:  4E800421  bctrl
+	  70: MOVL       	$0x2547B6E8, t54
+	  71: PUTL       	t54, LR
+	  72: GETL       	CTR, t56
+	  73: JMPo-c       	t56  ($4)
+
+
+
+. 999 2547B6A4 68
+. 93 C1 00 28 7F C8 02 A6 93 61 00 1C 7C BB 2B 78 90 C1 00 34 92 E1 00 0C 7C 97 23 78 80 BE 04 C8 93 21 00 14 7C 79 1B 78 81 25 01 B4 93 41 00 18 93 81 00 20 7D 29 03 A6 93 A1 00 24 83 9E 03 1C 4E 80 04 21
+==== BB 1000 (0x2547B6E8) approx BBs exec'd 0 ====
+
+	0x2547B6E8:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547B6EC:  7C7D1B78  or r29,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x2547B6F0:  7EE803A6  mtlr r23
+	   6: GETL       	R23, t4
+	   7: PUTL       	t4, LR
+	   8: INCEIPL       	$4
+
+	0x2547B6F4:  835D0000  lwz r26,0(r29)
+	   9: GETL       	R29, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547B6F8:  7F63DB78  or r3,r27,r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x2547B6FC:  909D0000  stw r4,0(r29)
+	  16: GETL       	R4, t12
+	  17: GETL       	R29, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x2547B700:  837C0000  lwz r27,0(r28)
+	  20: GETL       	R28, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R27
+	  23: INCEIPL       	$4
+
+	0x2547B704:  933C0000  stw r25,0(r28)
+	  24: GETL       	R25, t20
+	  25: GETL       	R28, t22
+	  26: STL       	t20, (t22)
+	  27: INCEIPL       	$4
+
+	0x2547B708:  4E800021  blrl
+	  28: GETL       	LR, t24
+	  29: MOVL       	$0x2547B70C, t26
+	  30: PUTL       	t26, LR
+	  31: JMPo-r       	t24  ($4)
+
+
+
+. 1000 2547B6E8 36
+. 38 80 00 00 7C 7D 1B 78 7E E8 03 A6 83 5D 00 00 7F 63 DB 78 90 9D 00 00 83 7C 00 00 93 3C 00 00 4E 80 00 21
+==== BB 1001 version_check_doit(0x254717D4) approx BBs exec'd 0 ====
+
+	0x254717D4:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254717D8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254717DC:  48025825  bl 0x25497000
+	   9: MOVL       	$0x254717E0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1001 254717D4 12
+. 94 21 FF E0 7C 08 02 A6 48 02 58 25
+==== BB 1002 (0x254717E0) approx BBs exec'd 0 ====
+
+	0x254717E0:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254717E4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x254717E8:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254717EC:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254717F0:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0x254717F4:  80BF0004  lwz r5,4(r31)
+	  21: GETL       	R31, t16
+	  22: ADDL       	$0x4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R5
+	  25: INCEIPL       	$4
+
+	0x254717F8:  38800001  li r4,1
+	  26: MOVL       	$0x1, t20
+	  27: PUTL       	t20, R4
+	  28: INCEIPL       	$4
+
+	0x254717FC:  813E04C8  lwz r9,1224(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4C8, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x25471800:  93A10014  stw r29,20(r1)
+	  34: GETL       	R29, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x14, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x25471804:  80690000  lwz r3,0(r9)
+	  39: GETL       	R9, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R3
+	  42: INCEIPL       	$4
+
+	0x25471808:  4800B7C1  bl 0x2547CFC8
+	  43: MOVL       	$0x2547180C, t34
+	  44: PUTL       	t34, LR
+	  45: JMPo-c       	$0x2547CFC8  ($4)
+
+
+
+. 1002 254717E0 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 7C 7F 1B 78 80 BF 00 04 38 80 00 01 81 3E 04 C8 93 A1 00 14 80 69 00 00 48 00 B7 C1
+==== BB 1003 _dl_check_all_versions(0x2547CFC8) approx BBs exec'd 0 ====
+
+	0x2547CFC8:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547CFCC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547CFD0:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547CFD4:  7C7F1B79  or. r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x2547CFD8:  93410008  stw r26,8(r1)
+	  19: GETL       	R26, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x8, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547CFDC:  7C9A2378  or r26,r4,r4
+	  24: GETL       	R4, t18
+	  25: PUTL       	t18, R26
+	  26: INCEIPL       	$4
+
+	0x2547CFE0:  9361000C  stw r27,12(r1)
+	  27: GETL       	R27, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0xC, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x2547CFE4:  7CBB2B78  or r27,r5,r5
+	  32: GETL       	R5, t24
+	  33: PUTL       	t24, R27
+	  34: INCEIPL       	$4
+
+	0x2547CFE8:  93810010  stw r28,16(r1)
+	  35: GETL       	R28, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x10, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x2547CFEC:  3B800000  li r28,0
+	  40: MOVL       	$0x0, t30
+	  41: PUTL       	t30, R28
+	  42: INCEIPL       	$4
+
+	0x2547CFF0:  93A10014  stw r29,20(r1)
+	  43: GETL       	R29, t32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x14, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0x2547CFF4:  93C10018  stw r30,24(r1)
+	  48: GETL       	R30, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x18, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x2547CFF8:  90010024  stw r0,36(r1)
+	  53: GETL       	R0, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x24, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x2547CFFC:  40A20018  bc 5,2,0x2547D014
+	  58: Jc02o       	$0x2547D014
+
+
+
+. 1003 2547CFC8 56
+. 94 21 FF E0 7C 08 02 A6 93 E1 00 1C 7C 7F 1B 79 93 41 00 08 7C 9A 23 78 93 61 00 0C 7C BB 2B 78 93 81 00 10 3B 80 00 00 93 A1 00 14 93 C1 00 18 90 01 00 24 40 A2 00 18
+==== BB 1004 (0x2547D014) approx BBs exec'd 0 ====
+
+	0x2547D014:  80DF0180  lwz r6,384(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547D018:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547D01C:  7F44D378  or r4,r26,r26
+	   8: GETL       	R26, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x2547D020:  7F65DB78  or r5,r27,r27
+	  11: GETL       	R27, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x2547D024:  74C90040  andis. r9,r6,0x40
+	  14: GETL       	R6, t10
+	  15: ANDL       	$0x400000, t10
+	  16: PUTL       	t10, R9
+	  17: CMP0L       	t10, t12  (-rSo)
+	  18: ICRFL       	t12, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x2547D028:  3BA00000  li r29,0
+	  20: MOVL       	$0x0, t14
+	  21: PUTL       	t14, R29
+	  22: INCEIPL       	$4
+
+	0x2547D02C:  4082FFD8  bc 4,2,0x2547D004
+	  23: Jc02o       	$0x2547D004
+
+
+
+. 1004 2547D014 28
+. 80 DF 01 80 7F E3 FB 78 7F 44 D3 78 7F 65 DB 78 74 C9 00 40 3B A0 00 00 40 82 FF D8
+==== BB 1005 (0x2547D030) approx BBs exec'd 0 ====
+
+	0x2547D030:  4BFFFB41  bl 0x2547CB70
+	   0: MOVL       	$0x2547D034, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547CB70  ($4)
+
+
+
+. 1005 2547D030 4
+. 4B FF FB 41
+==== BB 1006 _dl_check_map_versions(0x2547CB70) approx BBs exec'd 0 ====
+
+	0x2547CB70:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2547CB74:  9421FF60  stwu r1,-160(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF60, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547CB78:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x2547CB7C:  4801A485  bl 0x25497000
+	  12: MOVL       	$0x2547CB80, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1006 2547CB70 16
+. 7C 08 02 A6 94 21 FF 60 7D 80 00 26 48 01 A4 85
+==== BB 1007 (0x2547CB80) approx BBs exec'd 0 ====
+
+	0x2547CB80:  92010060  stw r16,96(r1)
+	   0: GETL       	R16, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547CB84:  3A000000  li r16,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R16
+	   7: INCEIPL       	$4
+
+	0x2547CB88:  900100A4  stw r0,164(r1)
+	   8: GETL       	R0, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xA4, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547CB8C:  81230034  lwz r9,52(r3)
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x34, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x2547CB90:  92A10074  stw r21,116(r1)
+	  18: GETL       	R21, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x74, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547CB94:  7C952378  or r21,r4,r4
+	  23: GETL       	R4, t18
+	  24: PUTL       	t18, R21
+	  25: INCEIPL       	$4
+
+	0x2547CB98:  2F890000  cmpi cr7,r9,0
+	  26: GETL       	R9, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0x2547CB9C:  92C10078  stw r22,120(r1)
+	  30: GETL       	R22, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x78, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x2547CBA0:  93010080  stw r24,128(r1)
+	  35: GETL       	R24, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x80, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x2547CBA4:  3AC00000  li r22,0
+	  40: MOVL       	$0x0, t32
+	  41: PUTL       	t32, R22
+	  42: INCEIPL       	$4
+
+	0x2547CBA8:  9361008C  stw r27,140(r1)
+	  43: GETL       	R27, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x8C, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x2547CBAC:  3B000000  li r24,0
+	  48: MOVL       	$0x0, t38
+	  49: PUTL       	t38, R24
+	  50: INCEIPL       	$4
+
+	0x2547CBB0:  93C10098  stw r30,152(r1)
+	  51: GETL       	R30, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x98, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0x2547CBB4:  7C7B1B78  or r27,r3,r3
+	  56: GETL       	R3, t44
+	  57: PUTL       	t44, R27
+	  58: INCEIPL       	$4
+
+	0x2547CBB8:  93E1009C  stw r31,156(r1)
+	  59: GETL       	R31, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x9C, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0x2547CBBC:  7FC802A6  mflr r30
+	  64: GETL       	LR, t50
+	  65: PUTL       	t50, R30
+	  66: INCEIPL       	$4
+
+	0x2547CBC0:  92210064  stw r17,100(r1)
+	  67: GETL       	R17, t52
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x64, t54
+	  70: STL       	t52, (t54)
+	  71: INCEIPL       	$4
+
+	0x2547CBC4:  7C3F0B78  or r31,r1,r1
+	  72: GETL       	R1, t56
+	  73: PUTL       	t56, R31
+	  74: INCEIPL       	$4
+
+	0x2547CBC8:  92410068  stw r18,104(r1)
+	  75: GETL       	R18, t58
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x68, t60
+	  78: STL       	t58, (t60)
+	  79: INCEIPL       	$4
+
+	0x2547CBCC:  38600000  li r3,0
+	  80: MOVL       	$0x0, t62
+	  81: PUTL       	t62, R3
+	  82: INCEIPL       	$4
+
+	0x2547CBD0:  9261006C  stw r19,108(r1)
+	  83: GETL       	R19, t64
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x6C, t66
+	  86: STL       	t64, (t66)
+	  87: INCEIPL       	$4
+
+	0x2547CBD4:  92810070  stw r20,112(r1)
+	  88: GETL       	R20, t68
+	  89: GETL       	R1, t70
+	  90: ADDL       	$0x70, t70
+	  91: STL       	t68, (t70)
+	  92: INCEIPL       	$4
+
+	0x2547CBD8:  92E1007C  stw r23,124(r1)
+	  93: GETL       	R23, t72
+	  94: GETL       	R1, t74
+	  95: ADDL       	$0x7C, t74
+	  96: STL       	t72, (t74)
+	  97: INCEIPL       	$4
+
+	0x2547CBDC:  93210084  stw r25,132(r1)
+	  98: GETL       	R25, t76
+	  99: GETL       	R1, t78
+	 100: ADDL       	$0x84, t78
+	 101: STL       	t76, (t78)
+	 102: INCEIPL       	$4
+
+	0x2547CBE0:  93410088  stw r26,136(r1)
+	 103: GETL       	R26, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x88, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x2547CBE4:  93810090  stw r28,144(r1)
+	 108: GETL       	R28, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0x90, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x2547CBE8:  93A10094  stw r29,148(r1)
+	 113: GETL       	R29, t88
+	 114: GETL       	R1, t90
+	 115: ADDL       	$0x94, t90
+	 116: STL       	t88, (t90)
+	 117: INCEIPL       	$4
+
+	0x2547CBEC:  9181005C  stw r12,92(r1)
+	 118: GETL       	R12, t92
+	 119: GETL       	R1, t94
+	 120: ADDL       	$0x5C, t94
+	 121: STL       	t92, (t94)
+	 122: INCEIPL       	$4
+
+	0x2547CBF0:  419E0180  bc 12,30,0x2547CD70
+	 123: Js30o       	$0x2547CD70
+
+
+
+. 1007 2547CB80 116
+. 92 01 00 60 3A 00 00 00 90 01 00 A4 81 23 00 34 92 A1 00 74 7C 95 23 78 2F 89 00 00 92 C1 00 78 93 01 00 80 3A C0 00 00 93 61 00 8C 3B 00 00 00 93 C1 00 98 7C 7B 1B 78 93 E1 00 9C 7F C8 02 A6 92 21 00 64 7C 3F 0B 78 92 41 00 68 38 60 00 00 92 61 00 6C 92 81 00 70 92 E1 00 7C 93 21 00 84 93 41 00 88 93 81 00 90 93 A1 00 94 91 81 00 5C 41 9E 01 80
+==== BB 1008 (0x2547CBF4) approx BBs exec'd 0 ====
+
+	0x2547CBF4:  825B00AC  lwz r18,172(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xAC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x2547CBF8:  83290004  lwz r25,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x2547CBFC:  2D920000  cmpi cr3,r18,0
+	  10: GETL       	R18, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x3, CR
+	  13: INCEIPL       	$4
+
+	0x2547CC00:  823B00B4  lwz r17,180(r27)
+	  14: GETL       	R27, t12
+	  15: ADDL       	$0xB4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R17
+	  18: INCEIPL       	$4
+
+	0x2547CC04:  418E0128  bc 12,14,0x2547CD2C
+	  19: Js14o       	$0x2547CD2C
+
+
+
+. 1008 2547CBF4 20
+. 82 5B 00 AC 83 29 00 04 2D 92 00 00 82 3B 00 B4 41 8E 01 28
+==== BB 1009 (0x2547CC08) approx BBs exec'd 0 ====
+
+	0x2547CC08:  817B0000  lwz r11,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x2547CC0C:  2E050000  cmpi cr4,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x2547CC10:  81320004  lwz r9,4(r18)
+	   8: GETL       	R18, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R9
+	  12: INCEIPL       	$4
+
+	0x2547CC14:  829E04C8  lwz r20,1224(r30)
+	  13: GETL       	R30, t12
+	  14: ADDL       	$0x4C8, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R20
+	  17: INCEIPL       	$4
+
+	0x2547CC18:  7C8B4A2E  lhzx r4,r11,r9
+	  18: GETL       	R9, t16
+	  19: GETL       	R11, t18
+	  20: ADDL       	t18, t16
+	  21: LDW       	(t16), t20
+	  22: PUTL       	t20, R4
+	  23: INCEIPL       	$4
+
+	0x2547CC1C:  7EEB4A14  add r23,r11,r9
+	  24: GETL       	R11, t22
+	  25: GETL       	R9, t24
+	  26: ADDL       	t22, t24
+	  27: PUTL       	t24, R23
+	  28: INCEIPL       	$4
+
+	0x2547CC20:  827E04D4  lwz r19,1236(r30)
+	  29: GETL       	R30, t26
+	  30: ADDL       	$0x4D4, t26
+	  31: LDL       	(t26), t28
+	  32: PUTL       	t28, R19
+	  33: INCEIPL       	$4
+
+	0x2547CC24:  2C040001  cmpi cr0,r4,1
+	  34: GETL       	R4, t30
+	  35: MOVL       	$0x1, t34
+	  36: CMPL       	t30, t34, t32  (-rSo)
+	  37: ICRFL       	t32, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0x2547CC28:  408202D0  bc 4,2,0x2547CEF8
+	  39: Jc02o       	$0x2547CEF8
+
+
+
+. 1009 2547CC08 36
+. 81 7B 00 00 2E 05 00 00 81 32 00 04 82 9E 04 C8 7C 8B 4A 2E 7E EB 4A 14 82 7E 04 D4 2C 04 00 01 40 82 02 D0
+==== BB 1010 (0x2547CC2C) approx BBs exec'd 0 ====
+
+	0x2547CC2C:  835B0018  lwz r26,24(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547CC30:  83970004  lwz r28,4(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x2547CC34:  1E1A0018  mulli r16,r26,24
+	  10: GETL       	R26, t8
+	  11: MULL       	$0x18, t8
+	  12: PUTL       	t8, R16
+	  13: INCEIPL       	$4
+
+	0x2547CC38:  7F5CCA14  add r26,r28,r25
+	  14: GETL       	R28, t10
+	  15: GETL       	R25, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R26
+	  18: INCEIPL       	$4
+
+	0x2547CC3C:  7FB0A02E  lwzx r29,r16,r20
+	  19: GETL       	R20, t14
+	  20: GETL       	R16, t16
+	  21: ADDL       	t16, t14
+	  22: LDL       	(t14), t18
+	  23: PUTL       	t18, R29
+	  24: INCEIPL       	$4
+
+	0x2547CC40:  4800001C  b 0x2547CC5C
+	  25: JMPo       	$0x2547CC5C  ($4)
+
+
+
+. 1010 2547CC2C 24
+. 83 5B 00 18 83 97 00 04 1E 1A 00 18 7F 5C CA 14 7F B0 A0 2E 48 00 00 1C
+==== BB 1011 (0x2547CC5C) approx BBs exec'd 0 ====
+
+	0x2547CC5C:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547CC60:  409AFFE4  bc 4,26,0x2547CC44
+	   4: Jc26o       	$0x2547CC44
+
+
+
+. 1011 2547CC5C 8
+. 2F 1D 00 00 40 9A FF E4
+==== BB 1012 (0x2547CC44) approx BBs exec'd 0 ====
+
+	0x2547CC44:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547CC48:  7F43D378  or r3,r26,r26
+	   3: GETL       	R26, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0x2547CC4C:  4BFFFA89  bl 0x2547C6D4
+	   6: MOVL       	$0x2547CC50, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x2547C6D4  ($4)
+
+
+
+. 1012 2547CC44 12
+. 7F A4 EB 78 7F 43 D3 78 4B FF FA 89
+==== BB 1013 (0x2547CC50) approx BBs exec'd 0 ====
+
+	0x2547CC50:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547CC54:  409E0220  bc 4,30,0x2547CE74
+	   4: Jc30o       	$0x2547CE74
+
+
+
+. 1013 2547CC50 8
+. 2F 83 00 00 40 9E 02 20
+==== BB 1014 (0x2547CC58) approx BBs exec'd 0 ====
+
+	0x2547CC58:  83BD000C  lwz r29,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547CC5C:  2F1D0000  cmpi cr6,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547CC60:  409AFFE4  bc 4,26,0x2547CC44
+	   9: Jc26o       	$0x2547CC44
+
+
+
+. 1014 2547CC58 12
+. 83 BD 00 0C 2F 1D 00 00 40 9A FF E4
+==== BB 1015 (0x2547CE74) approx BBs exec'd 0 ====
+
+	0x2547CE74:  7FBCEB78  or r28,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x2547CE78:  4192FE30  bc 12,18,0x2547CCA8
+	   3: Js18o       	$0x2547CCA8
+
+
+
+. 1015 2547CE74 8
+. 7F BC EB 78 41 92 FE 30
+==== BB 1016 (0x2547CCA8) approx BBs exec'd 0 ====
+
+	0x2547CCA8:  80970008  lwz r4,8(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547CCAC:  7FA4BA14  add r29,r4,r23
+	   5: GETL       	R4, t4
+	   6: GETL       	R23, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547CCB0:  7E7A9B78  or r26,r19,r19
+	  10: GETL       	R19, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547CCB4:  811B0004  lwz r8,4(r27)
+	  13: GETL       	R27, t10
+	  14: ADDL       	$0x4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0x2547CCB8:  7EA7AB78  or r7,r21,r21
+	  18: GETL       	R21, t14
+	  19: PUTL       	t14, R7
+	  20: INCEIPL       	$4
+
+	0x2547CCBC:  88A80000  lbz r5,0(r8)
+	  21: GETL       	R8, t16
+	  22: LDB       	(t16), t18
+	  23: PUTL       	t18, R5
+	  24: INCEIPL       	$4
+
+	0x2547CCC0:  7D034378  or r3,r8,r8
+	  25: GETL       	R8, t20
+	  26: PUTL       	t20, R3
+	  27: INCEIPL       	$4
+
+	0x2547CCC4:  2C850000  cmpi cr1,r5,0
+	  28: GETL       	R5, t22
+	  29: CMP0L       	t22, t24  (-rSo)
+	  30: ICRFL       	t24, $0x1, CR
+	  31: INCEIPL       	$4
+
+	0x2547CCC8:  4086000C  bc 4,6,0x2547CCD4
+	  32: Jc06o       	$0x2547CCD4
+
+
+
+. 1016 2547CCA8 36
+. 80 97 00 08 7F A4 BA 14 7E 7A 9B 78 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+==== BB 1017 (0x2547CCCC) approx BBs exec'd 0 ====
+
+	0x2547CCCC:  80DA0000  lwz r6,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x2547CCD0:  80660000  lwz r3,0(r6)
+	   4: GETL       	R6, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R3
+	   7: INCEIPL       	$4
+
+	0x2547CCD4:  815D0008  lwz r10,8(r29)
+	   8: GETL       	R29, t8
+	   9: ADDL       	$0x8, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R10
+	  12: INCEIPL       	$4
+
+	0x2547CCD8:  A21D0004  lhz r16,4(r29)
+	  13: GETL       	R29, t12
+	  14: ADDL       	$0x4, t12
+	  15: LDW       	(t12), t14
+	  16: PUTL       	t14, R16
+	  17: INCEIPL       	$4
+
+	0x2547CCDC:  809D0000  lwz r4,0(r29)
+	  18: GETL       	R29, t16
+	  19: LDL       	(t16), t18
+	  20: PUTL       	t18, R4
+	  21: INCEIPL       	$4
+
+	0x2547CCE0:  7CAACA14  add r5,r10,r25
+	  22: GETL       	R10, t20
+	  23: GETL       	R25, t22
+	  24: ADDL       	t20, t22
+	  25: PUTL       	t22, R5
+	  26: INCEIPL       	$4
+
+	0x2547CCE4:  80DC0014  lwz r6,20(r28)
+	  27: GETL       	R28, t24
+	  28: ADDL       	$0x14, t24
+	  29: LDL       	(t24), t26
+	  30: PUTL       	t26, R6
+	  31: INCEIPL       	$4
+
+	0x2547CCE8:  560807BC  rlwinm r8,r16,0,30,30
+	  32: GETL       	R16, t28
+	  33: ANDL       	$0x2, t28
+	  34: PUTL       	t28, R8
+	  35: INCEIPL       	$4
+
+	0x2547CCEC:  4BFFFA85  bl 0x2547C770
+	  36: MOVL       	$0x2547CCF0, t30
+	  37: PUTL       	t30, LR
+	  38: JMPo-c       	$0x2547C770  ($4)
+
+
+
+. 1017 2547CCCC 36
+. 80 DA 00 00 80 66 00 00 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+==== BB 1018 match_symbol(0x2547C770) approx BBs exec'd 0 ====
+
+	0x2547C770:  9421FF20  stwu r1,-224(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF20, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547C774:  7D4802A6  mflr r10
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547C778:  4801A889  bl 0x25497000
+	   9: MOVL       	$0x2547C77C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1018 2547C770 12
+. 94 21 FF 20 7D 48 02 A6 48 01 A8 89
+==== BB 1019 (0x2547C77C) approx BBs exec'd 0 ====
+
+	0x2547C77C:  93C100D8  stw r30,216(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xD8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547C780:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547C784:  926100AC  stw r19,172(r1)
+	   8: GETL       	R19, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xAC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547C788:  914100E4  stw r10,228(r1)
+	  13: GETL       	R10, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xE4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547C78C:  3A600000  li r19,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R19
+	  20: INCEIPL       	$4
+
+	0x2547C790:  81660034  lwz r11,52(r6)
+	  21: GETL       	R6, t16
+	  22: ADDL       	$0x34, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R11
+	  25: INCEIPL       	$4
+
+	0x2547C794:  813E04F4  lwz r9,1268(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4F4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x2547C798:  92C100B8  stw r22,184(r1)
+	  31: GETL       	R22, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0xB8, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547C79C:  7CF63B78  or r22,r7,r7
+	  36: GETL       	R7, t28
+	  37: PUTL       	t28, R22
+	  38: INCEIPL       	$4
+
+	0x2547C7A0:  80090000  lwz r0,0(r9)
+	  39: GETL       	R9, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x2547C7A4:  92E100BC  stw r23,188(r1)
+	  43: GETL       	R23, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0xBC, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x2547C7A8:  7D174378  or r23,r8,r8
+	  48: GETL       	R8, t38
+	  49: PUTL       	t38, R23
+	  50: INCEIPL       	$4
+
+	0x2547C7AC:  70090010  andi. r9,r0,0x10
+	  51: GETL       	R0, t40
+	  52: ANDL       	$0x10, t40
+	  53: PUTL       	t40, R9
+	  54: CMP0L       	t40, t42  (-rSo)
+	  55: ICRFL       	t42, $0x0, CR
+	  56: INCEIPL       	$4
+
+	0x2547C7B0:  930100C0  stw r24,192(r1)
+	  57: GETL       	R24, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0xC0, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0x2547C7B4:  932100C4  stw r25,196(r1)
+	  62: GETL       	R25, t48
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0xC4, t50
+	  65: STL       	t48, (t50)
+	  66: INCEIPL       	$4
+
+	0x2547C7B8:  7C781B78  or r24,r3,r3
+	  67: GETL       	R3, t52
+	  68: PUTL       	t52, R24
+	  69: INCEIPL       	$4
+
+	0x2547C7BC:  936100CC  stw r27,204(r1)
+	  70: GETL       	R27, t54
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0xCC, t56
+	  73: STL       	t54, (t56)
+	  74: INCEIPL       	$4
+
+	0x2547C7C0:  7CD93378  or r25,r6,r6
+	  75: GETL       	R6, t58
+	  76: PUTL       	t58, R25
+	  77: INCEIPL       	$4
+
+	0x2547C7C4:  938100D0  stw r28,208(r1)
+	  78: GETL       	R28, t60
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0xD0, t62
+	  81: STL       	t60, (t62)
+	  82: INCEIPL       	$4
+
+	0x2547C7C8:  7C9B2378  or r27,r4,r4
+	  83: GETL       	R4, t64
+	  84: PUTL       	t64, R27
+	  85: INCEIPL       	$4
+
+	0x2547C7CC:  93E100DC  stw r31,220(r1)
+	  86: GETL       	R31, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0xDC, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0x2547C7D0:  7CBC2B78  or r28,r5,r5
+	  91: GETL       	R5, t70
+	  92: PUTL       	t70, R28
+	  93: INCEIPL       	$4
+
+	0x2547C7D4:  928100B0  stw r20,176(r1)
+	  94: GETL       	R20, t72
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0xB0, t74
+	  97: STL       	t72, (t74)
+	  98: INCEIPL       	$4
+
+	0x2547C7D8:  7C3F0B78  or r31,r1,r1
+	  99: GETL       	R1, t76
+	 100: PUTL       	t76, R31
+	 101: INCEIPL       	$4
+
+	0x2547C7DC:  92A100B4  stw r21,180(r1)
+	 102: GETL       	R21, t78
+	 103: GETL       	R1, t80
+	 104: ADDL       	$0xB4, t80
+	 105: STL       	t78, (t80)
+	 106: INCEIPL       	$4
+
+	0x2547C7E0:  934100C8  stw r26,200(r1)
+	 107: GETL       	R26, t82
+	 108: GETL       	R1, t84
+	 109: ADDL       	$0xC8, t84
+	 110: STL       	t82, (t84)
+	 111: INCEIPL       	$4
+
+	0x2547C7E4:  93A100D4  stw r29,212(r1)
+	 112: GETL       	R29, t86
+	 113: GETL       	R1, t88
+	 114: ADDL       	$0xD4, t88
+	 115: STL       	t86, (t88)
+	 116: INCEIPL       	$4
+
+	0x2547C7E8:  834B0004  lwz r26,4(r11)
+	 117: GETL       	R11, t90
+	 118: ADDL       	$0x4, t90
+	 119: LDL       	(t90), t92
+	 120: PUTL       	t92, R26
+	 121: INCEIPL       	$4
+
+	0x2547C7EC:  40820230  bc 4,2,0x2547CA1C
+	 122: Jc02o       	$0x2547CA1C
+
+
+
+. 1019 2547C77C 116
+. 93 C1 00 D8 7F C8 02 A6 92 61 00 AC 91 41 00 E4 3A 60 00 00 81 66 00 34 81 3E 04 F4 92 C1 00 B8 7C F6 3B 78 80 09 00 00 92 E1 00 BC 7D 17 43 78 70 09 00 10 93 01 00 C0 93 21 00 C4 7C 78 1B 78 93 61 00 CC 7C D9 33 78 93 81 00 D0 7C 9B 23 78 93 E1 00 DC 7C BC 2B 78 92 81 00 B0 7C 3F 0B 78 92 A1 00 B4 93 41 00 C8 93 A1 00 D4 83 4B 00 04 40 82 02 30
+==== BB 1020 (0x2547C7F0) approx BBs exec'd 0 ====
+
+	0x2547C7F0:  813900B4  lwz r9,180(r25)
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0xB4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547C7F4:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547C7F8:  41860264  bc 12,6,0x2547CA5C
+	   9: Js06o       	$0x2547CA5C
+
+
+
+. 1020 2547C7F0 12
+. 81 39 00 B4 2C 89 00 00 41 86 02 64
+==== BB 1021 (0x2547C7FC) approx BBs exec'd 0 ====
+
+	0x2547C7FC:  81690004  lwz r11,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547C800:  3A9F0008  addi r20,r31,8
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R20
+	   8: INCEIPL       	$4
+
+	0x2547C804:  80190000  lwz r0,0(r25)
+	   9: GETL       	R25, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x2547C808:  82BE0358  lwz r21,856(r30)
+	  13: GETL       	R30, t10
+	  14: ADDL       	$0x358, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R21
+	  17: INCEIPL       	$4
+
+	0x2547C80C:  7FA05A14  add r29,r0,r11
+	  18: GETL       	R0, t14
+	  19: GETL       	R11, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0x2547C810:  A13D0000  lhz r9,0(r29)
+	  23: GETL       	R29, t18
+	  24: LDW       	(t18), t20
+	  25: PUTL       	t20, R9
+	  26: INCEIPL       	$4
+
+	0x2547C814:  7F83E378  or r3,r28,r28
+	  27: GETL       	R28, t22
+	  28: PUTL       	t22, R3
+	  29: INCEIPL       	$4
+
+	0x2547C818:  2F890001  cmpi cr7,r9,1
+	  30: GETL       	R9, t24
+	  31: MOVL       	$0x1, t28
+	  32: CMPL       	t24, t28, t26  (-rSo)
+	  33: ICRFL       	t26, $0x7, CR
+	  34: INCEIPL       	$4
+
+	0x2547C81C:  409E0124  bc 4,30,0x2547C940
+	  35: Jc30o       	$0x2547C940
+
+
+
+. 1021 2547C7FC 36
+. 81 69 00 04 3A 9F 00 08 80 19 00 00 82 BE 03 58 7F A0 5A 14 A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+==== BB 1022 (0x2547C820) approx BBs exec'd 0 ====
+
+	0x2547C820:  809D0008  lwz r4,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547C824:  7C84D800  cmp cr1,r4,r27
+	   5: GETL       	R4, t4
+	   6: GETL       	R27, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x2547C828:  418600B4  bc 12,6,0x2547C8DC
+	  10: Js06o       	$0x2547C8DC
+
+
+
+. 1022 2547C820 12
+. 80 9D 00 08 7C 84 D8 00 41 86 00 B4
+==== BB 1023 (0x2547C82C) approx BBs exec'd 0 ====
+
+	0x2547C82C:  80FD0010  lwz r7,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547C830:  2F870000  cmpi cr7,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547C834:  7FBD3A14  add r29,r29,r7
+	   9: GETL       	R29, t8
+	  10: GETL       	R7, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0x2547C838:  409EFFD8  bc 4,30,0x2547C810
+	  14: Jc30o       	$0x2547C810
+
+
+
+. 1023 2547C82C 16
+. 80 FD 00 10 2F 87 00 00 7F BD 3A 14 40 9E FF D8
+==== BB 1024 (0x2547C810) approx BBs exec'd 0 ====
+
+	0x2547C810:  A13D0000  lhz r9,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDW       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547C814:  7F83E378  or r3,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2547C818:  2F890001  cmpi cr7,r9,1
+	   7: GETL       	R9, t6
+	   8: MOVL       	$0x1, t10
+	   9: CMPL       	t6, t10, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2547C81C:  409E0124  bc 4,30,0x2547C940
+	  12: Jc30o       	$0x2547C940
+
+
+
+. 1024 2547C810 16
+. A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+==== BB 1025 (0x2547C8DC) approx BBs exec'd 0 ====
+
+	0x2547C8DC:  80BD000C  lwz r5,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547C8E0:  7CC5E82E  lwzx r6,r5,r29
+	   5: GETL       	R29, t4
+	   6: GETL       	R5, t6
+	   7: ADDL       	t6, t4
+	   8: LDL       	(t4), t8
+	   9: PUTL       	t8, R6
+	  10: INCEIPL       	$4
+
+	0x2547C8E4:  7C86D214  add r4,r6,r26
+	  11: GETL       	R6, t10
+	  12: GETL       	R26, t12
+	  13: ADDL       	t10, t12
+	  14: PUTL       	t12, R4
+	  15: INCEIPL       	$4
+
+	0x2547C8E8:  480065F9  bl 0x25482EE0
+	  16: MOVL       	$0x2547C8EC, t14
+	  17: PUTL       	t14, LR
+	  18: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 1025 2547C8DC 16
+. 80 BD 00 0C 7C C5 E8 2E 7C 86 D2 14 48 00 65 F9
+==== BB 1026 (0x2547C8EC) approx BBs exec'd 0 ====
+
+	0x2547C8EC:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547C8F0:  40BAFF3C  bc 5,26,0x2547C82C
+	   4: Jc26o       	$0x2547C82C
+
+
+
+. 1026 2547C8EC 8
+. 2F 03 00 00 40 BA FF 3C
+==== BB 1027 (0x2547C8F4) approx BBs exec'd 0 ====
+
+	0x2547C8F4:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547C8F8:  80C10000  lwz r6,0(r1)
+	   3: GETL       	R1, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x2547C8FC:  80E60004  lwz r7,4(r6)
+	   7: GETL       	R6, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R7
+	  11: INCEIPL       	$4
+
+	0x2547C900:  8266FFCC  lwz r19,-52(r6)
+	  12: GETL       	R6, t10
+	  13: ADDL       	$0xFFFFFFCC, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R19
+	  16: INCEIPL       	$4
+
+	0x2547C904:  8286FFD0  lwz r20,-48(r6)
+	  17: GETL       	R6, t14
+	  18: ADDL       	$0xFFFFFFD0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R20
+	  21: INCEIPL       	$4
+
+	0x2547C908:  7CE803A6  mtlr r7
+	  22: GETL       	R7, t18
+	  23: PUTL       	t18, LR
+	  24: INCEIPL       	$4
+
+	0x2547C90C:  82A6FFD4  lwz r21,-44(r6)
+	  25: GETL       	R6, t20
+	  26: ADDL       	$0xFFFFFFD4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R21
+	  29: INCEIPL       	$4
+
+	0x2547C910:  82C6FFD8  lwz r22,-40(r6)
+	  30: GETL       	R6, t24
+	  31: ADDL       	$0xFFFFFFD8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R22
+	  34: INCEIPL       	$4
+
+	0x2547C914:  82E6FFDC  lwz r23,-36(r6)
+	  35: GETL       	R6, t28
+	  36: ADDL       	$0xFFFFFFDC, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R23
+	  39: INCEIPL       	$4
+
+	0x2547C918:  8306FFE0  lwz r24,-32(r6)
+	  40: GETL       	R6, t32
+	  41: ADDL       	$0xFFFFFFE0, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R24
+	  44: INCEIPL       	$4
+
+	0x2547C91C:  8326FFE4  lwz r25,-28(r6)
+	  45: GETL       	R6, t36
+	  46: ADDL       	$0xFFFFFFE4, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R25
+	  49: INCEIPL       	$4
+
+	0x2547C920:  8346FFE8  lwz r26,-24(r6)
+	  50: GETL       	R6, t40
+	  51: ADDL       	$0xFFFFFFE8, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R26
+	  54: INCEIPL       	$4
+
+	0x2547C924:  8366FFEC  lwz r27,-20(r6)
+	  55: GETL       	R6, t44
+	  56: ADDL       	$0xFFFFFFEC, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R27
+	  59: INCEIPL       	$4
+
+	0x2547C928:  8386FFF0  lwz r28,-16(r6)
+	  60: GETL       	R6, t48
+	  61: ADDL       	$0xFFFFFFF0, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R28
+	  64: INCEIPL       	$4
+
+	0x2547C92C:  83A6FFF4  lwz r29,-12(r6)
+	  65: GETL       	R6, t52
+	  66: ADDL       	$0xFFFFFFF4, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R29
+	  69: INCEIPL       	$4
+
+	0x2547C930:  83C6FFF8  lwz r30,-8(r6)
+	  70: GETL       	R6, t56
+	  71: ADDL       	$0xFFFFFFF8, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R30
+	  74: INCEIPL       	$4
+
+	0x2547C934:  83E6FFFC  lwz r31,-4(r6)
+	  75: GETL       	R6, t60
+	  76: ADDL       	$0xFFFFFFFC, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R31
+	  79: INCEIPL       	$4
+
+	0x2547C938:  7CC13378  or r1,r6,r6
+	  80: GETL       	R6, t64
+	  81: PUTL       	t64, R1
+	  82: INCEIPL       	$4
+
+	0x2547C93C:  4E800020  blr
+	  83: GETL       	LR, t66
+	  84: JMPo-r       	t66  ($4)
+
+
+
+. 1027 2547C8F4 76
+. 38 60 00 00 80 C1 00 00 80 E6 00 04 82 66 FF CC 82 86 FF D0 7C E8 03 A6 82 A6 FF D4 82 C6 FF D8 82 E6 FF DC 83 06 FF E0 83 26 FF E4 83 46 FF E8 83 66 FF EC 83 86 FF F0 83 A6 FF F4 83 C6 FF F8 83 E6 FF FC 7C C1 33 78 4E 80 00 20
+==== BB 1028 (0x2547CCF0) approx BBs exec'd 0 ====
+
+	0x2547CCF0:  A17D0006  lhz r11,6(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x6, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547CCF4:  7ED61B78  or r22,r22,r3
+	   5: GETL       	R22, t4
+	   6: GETL       	R3, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R22
+	   9: INCEIPL       	$4
+
+	0x2547CCF8:  5560047E  rlwinm r0,r11,0,17,31
+	  10: GETL       	R11, t8
+	  11: ANDL       	$0x7FFF, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x2547CCFC:  7F00C040  cmpl cr6,r0,r24
+	  14: GETL       	R0, t10
+	  15: GETL       	R24, t12
+	  16: CMPUL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0x2547CD00:  40990008  bc 4,25,0x2547CD08
+	  19: Jc25o       	$0x2547CD08
+
+
+
+. 1028 2547CCF0 20
+. A1 7D 00 06 7E D6 1B 78 55 60 04 7E 7F 00 C0 40 40 99 00 08
+==== BB 1029 (0x2547CD04) approx BBs exec'd 0 ====
+
+	0x2547CD04:  7C180378  or r24,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x2547CD08:  807D000C  lwz r3,12(r29)
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547CD0C:  2F830000  cmpi cr7,r3,0
+	   8: GETL       	R3, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2547CD10:  7FBD1A14  add r29,r29,r3
+	  12: GETL       	R29, t10
+	  13: GETL       	R3, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0x2547CD14:  409EFFA0  bc 4,30,0x2547CCB4
+	  17: Jc30o       	$0x2547CCB4
+
+
+
+. 1029 2547CD04 20
+. 7C 18 03 78 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+==== BB 1030 (0x2547CD18) approx BBs exec'd 0 ====
+
+	0x2547CD18:  8017000C  lwz r0,12(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547CD1C:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547CD20:  4182000C  bc 12,2,0x2547CD2C
+	   9: Js02o       	$0x2547CD2C
+
+
+
+. 1030 2547CD18 12
+. 80 17 00 0C 2C 00 00 00 41 82 00 0C
+==== BB 1031 (0x2547CD24) approx BBs exec'd 0 ====
+
+	0x2547CD24:  7EF70214  add r23,r23,r0
+	   0: GETL       	R23, t0
+	   1: GETL       	R0, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x2547CD28:  4BFFFF04  b 0x2547CC2C
+	   5: JMPo       	$0x2547CC2C  ($4)
+
+
+
+. 1031 2547CD24 8
+. 7E F7 02 14 4B FF FF 04
+==== BB 1032 (0x2547CD08) approx BBs exec'd 0 ====
+
+	0x2547CD08:  807D000C  lwz r3,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547CD0C:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547CD10:  7FBD1A14  add r29,r29,r3
+	   9: GETL       	R29, t8
+	  10: GETL       	R3, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0x2547CD14:  409EFFA0  bc 4,30,0x2547CCB4
+	  14: Jc30o       	$0x2547CCB4
+
+
+
+. 1032 2547CD08 16
+. 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+==== BB 1033 (0x2547CCB4) approx BBs exec'd 0 ====
+
+	0x2547CCB4:  811B0004  lwz r8,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547CCB8:  7EA7AB78  or r7,r21,r21
+	   5: GETL       	R21, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x2547CCBC:  88A80000  lbz r5,0(r8)
+	   8: GETL       	R8, t6
+	   9: LDB       	(t6), t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x2547CCC0:  7D034378  or r3,r8,r8
+	  12: GETL       	R8, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x2547CCC4:  2C850000  cmpi cr1,r5,0
+	  15: GETL       	R5, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x1, CR
+	  18: INCEIPL       	$4
+
+	0x2547CCC8:  4086000C  bc 4,6,0x2547CCD4
+	  19: Jc06o       	$0x2547CCD4
+
+
+
+. 1033 2547CCB4 24
+. 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+==== BB 1034 (0x2547CD2C) approx BBs exec'd 0 ====
+
+	0x2547CD2C:  2E110000  cmpi cr4,r17,0
+	   0: GETL       	R17, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x2547CD30:  41920034  bc 12,18,0x2547CD64
+	   4: Js18o       	$0x2547CD64
+
+
+
+. 1034 2547CD2C 8
+. 2E 11 00 00 41 92 00 34
+==== BB 1035 (0x2547CD64) approx BBs exec'd 0 ====
+
+	0x2547CD64:  2F980000  cmpi cr7,r24,0
+	   0: GETL       	R24, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547CD68:  409E0064  bc 4,30,0x2547CDCC
+	   4: Jc30o       	$0x2547CDCC
+
+
+
+. 1035 2547CD64 8
+. 2F 98 00 00 40 9E 00 64
+==== BB 1036 (0x2547CDCC) approx BBs exec'd 0 ====
+
+	0x2547CDCC:  3BB80001  addi r29,r24,1
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547CDD0:  38800010  li r4,16
+	   4: MOVL       	$0x10, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x2547CDD4:  7FA3EB78  or r3,r29,r29
+	   7: GETL       	R29, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x2547CDD8:  3A00000C  li r16,12
+	  10: MOVL       	$0xC, t6
+	  11: PUTL       	t6, R16
+	  12: INCEIPL       	$4
+
+	0x2547CDDC:  4801AC51  bl 0x25497A2C
+	  13: MOVL       	$0x2547CDE0, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497A2C  ($4)
+
+
+
+. 1036 2547CDCC 20
+. 3B B8 00 01 38 80 00 10 7F A3 EB 78 3A 00 00 0C 48 01 AC 51
+==== BB 1037 (0x2547CDE0) approx BBs exec'd 0 ====
+
+	0x2547CDE0:  80DE0378  lwz r6,888(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x378, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547CDE4:  2C030000  cmpi cr0,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547CDE8:  7C651B78  or r5,r3,r3
+	   9: GETL       	R3, t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x2547CDEC:  907B0188  stw r3,392(r27)
+	  12: GETL       	R3, t10
+	  13: GETL       	R27, t12
+	  14: ADDL       	$0x188, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0x2547CDF0:  418201AC  bc 12,2,0x2547CF9C
+	  17: Js02o       	$0x2547CF9C
+
+
+
+. 1037 2547CDE0 20
+. 80 DE 03 78 2C 03 00 00 7C 65 1B 78 90 7B 01 88 41 82 01 AC
+==== BB 1038 (0x2547CDF4) approx BBs exec'd 0 ====
+
+	0x2547CDF4:  839B00E4  lwz r28,228(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xE4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547CDF8:  93BB0184  stw r29,388(r27)
+	   5: GETL       	R29, t4
+	   6: GETL       	R27, t6
+	   7: ADDL       	$0x184, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547CDFC:  831C0004  lwz r24,4(r28)
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0x2547CE00:  931B0198  stw r24,408(r27)
+	  15: GETL       	R24, t12
+	  16: GETL       	R27, t14
+	  17: ADDL       	$0x198, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x2547CE04:  418E0088  bc 12,14,0x2547CE8C
+	  20: Js14o       	$0x2547CE8C
+
+
+
+. 1038 2547CDF4 20
+. 83 9B 00 E4 93 BB 01 84 83 1C 00 04 93 1B 01 98 41 8E 00 88
+==== BB 1039 (0x2547CE08) approx BBs exec'd 0 ====
+
+	0x2547CE08:  83520004  lwz r26,4(r18)
+	   0: GETL       	R18, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547CE0C:  825B0000  lwz r18,0(r27)
+	   5: GETL       	R27, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R18
+	   8: INCEIPL       	$4
+
+	0x2547CE10:  7CD2D214  add r6,r18,r26
+	   9: GETL       	R18, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0x2547CE14:  81860008  lwz r12,8(r6)
+	  14: GETL       	R6, t12
+	  15: ADDL       	$0x8, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R12
+	  18: INCEIPL       	$4
+
+	0x2547CE18:  7CEC3214  add r7,r12,r6
+	  19: GETL       	R12, t16
+	  20: GETL       	R6, t18
+	  21: ADDL       	t16, t18
+	  22: PUTL       	t18, R7
+	  23: INCEIPL       	$4
+
+	0x2547CE1C:  A2A70006  lhz r21,6(r7)
+	  24: GETL       	R7, t20
+	  25: ADDL       	$0x6, t20
+	  26: LDW       	(t20), t22
+	  27: PUTL       	t22, R21
+	  28: INCEIPL       	$4
+
+	0x2547CE20:  80670000  lwz r3,0(r7)
+	  29: GETL       	R7, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R3
+	  32: INCEIPL       	$4
+
+	0x2547CE24:  56A42376  rlwinm r4,r21,4,13,27
+	  33: GETL       	R21, t28
+	  34: ROLL       	$0x4, t28
+	  35: ANDL       	$0x7FFF0, t28
+	  36: PUTL       	t28, R4
+	  37: INCEIPL       	$4
+
+	0x2547CE28:  56B00420  rlwinm r16,r21,0,16,16
+	  38: GETL       	R21, t30
+	  39: ANDL       	$0x8000, t30
+	  40: PUTL       	t30, R16
+	  41: INCEIPL       	$4
+
+	0x2547CE2C:  7FA42A14  add r29,r4,r5
+	  42: GETL       	R4, t32
+	  43: GETL       	R5, t34
+	  44: ADDL       	t32, t34
+	  45: PUTL       	t34, R29
+	  46: INCEIPL       	$4
+
+	0x2547CE30:  907D0004  stw r3,4(r29)
+	  47: GETL       	R3, t36
+	  48: GETL       	R29, t38
+	  49: ADDL       	$0x4, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547CE34:  921D0008  stw r16,8(r29)
+	  52: GETL       	R16, t40
+	  53: GETL       	R29, t42
+	  54: ADDL       	$0x8, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x2547CE38:  8167000C  lwz r11,12(r7)
+	  57: GETL       	R7, t44
+	  58: ADDL       	$0xC, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R11
+	  61: INCEIPL       	$4
+
+	0x2547CE3C:  81470008  lwz r10,8(r7)
+	  62: GETL       	R7, t48
+	  63: ADDL       	$0x8, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R10
+	  66: INCEIPL       	$4
+
+	0x2547CE40:  2D8B0000  cmpi cr3,r11,0
+	  67: GETL       	R11, t52
+	  68: CMP0L       	t52, t54  (-rSo)
+	  69: ICRFL       	t54, $0x3, CR
+	  70: INCEIPL       	$4
+
+	0x2547CE44:  81060004  lwz r8,4(r6)
+	  71: GETL       	R6, t56
+	  72: ADDL       	$0x4, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R8
+	  75: INCEIPL       	$4
+
+	0x2547CE48:  7D2ACA14  add r9,r10,r25
+	  76: GETL       	R10, t60
+	  77: GETL       	R25, t62
+	  78: ADDL       	t60, t62
+	  79: PUTL       	t62, R9
+	  80: INCEIPL       	$4
+
+	0x2547CE4C:  7CE75A14  add r7,r7,r11
+	  81: GETL       	R7, t64
+	  82: GETL       	R11, t66
+	  83: ADDL       	t64, t66
+	  84: PUTL       	t66, R7
+	  85: INCEIPL       	$4
+
+	0x2547CE50:  7C08CA14  add r0,r8,r25
+	  86: GETL       	R8, t68
+	  87: GETL       	R25, t70
+	  88: ADDL       	t68, t70
+	  89: PUTL       	t70, R0
+	  90: INCEIPL       	$4
+
+	0x2547CE54:  7D24292E  stwx r9,r4,r5
+	  91: GETL       	R5, t72
+	  92: GETL       	R4, t74
+	  93: ADDL       	t74, t72
+	  94: GETL       	R9, t76
+	  95: STL       	t76, (t72)
+	  96: INCEIPL       	$4
+
+	0x2547CE58:  901D000C  stw r0,12(r29)
+	  97: GETL       	R0, t78
+	  98: GETL       	R29, t80
+	  99: ADDL       	$0xC, t80
+	 100: STL       	t78, (t80)
+	 101: INCEIPL       	$4
+
+	0x2547CE5C:  408EFFC0  bc 4,14,0x2547CE1C
+	 102: Jc14o       	$0x2547CE1C
+
+
+
+. 1039 2547CE08 88
+. 83 52 00 04 82 5B 00 00 7C D2 D2 14 81 86 00 08 7C EC 32 14 A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+==== BB 1040 (0x2547CE60) approx BBs exec'd 0 ====
+
+	0x2547CE60:  8006000C  lwz r0,12(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547CE64:  2C800000  cmpi cr1,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547CE68:  41860024  bc 12,6,0x2547CE8C
+	   9: Js06o       	$0x2547CE8C
+
+
+
+. 1040 2547CE60 12
+. 80 06 00 0C 2C 80 00 00 41 86 00 24
+==== BB 1041 (0x2547CE6C) approx BBs exec'd 0 ====
+
+	0x2547CE6C:  7CC60214  add r6,r6,r0
+	   0: GETL       	R6, t0
+	   1: GETL       	R0, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547CE70:  4BFFFFA4  b 0x2547CE14
+	   5: JMPo       	$0x2547CE14  ($4)
+
+
+
+. 1041 2547CE6C 8
+. 7C C6 02 14 4B FF FF A4
+==== BB 1042 (0x2547CE14) approx BBs exec'd 0 ====
+
+	0x2547CE14:  81860008  lwz r12,8(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x2547CE18:  7CEC3214  add r7,r12,r6
+	   5: GETL       	R12, t4
+	   6: GETL       	R6, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x2547CE1C:  A2A70006  lhz r21,6(r7)
+	  10: GETL       	R7, t8
+	  11: ADDL       	$0x6, t8
+	  12: LDW       	(t8), t10
+	  13: PUTL       	t10, R21
+	  14: INCEIPL       	$4
+
+	0x2547CE20:  80670000  lwz r3,0(r7)
+	  15: GETL       	R7, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R3
+	  18: INCEIPL       	$4
+
+	0x2547CE24:  56A42376  rlwinm r4,r21,4,13,27
+	  19: GETL       	R21, t16
+	  20: ROLL       	$0x4, t16
+	  21: ANDL       	$0x7FFF0, t16
+	  22: PUTL       	t16, R4
+	  23: INCEIPL       	$4
+
+	0x2547CE28:  56B00420  rlwinm r16,r21,0,16,16
+	  24: GETL       	R21, t18
+	  25: ANDL       	$0x8000, t18
+	  26: PUTL       	t18, R16
+	  27: INCEIPL       	$4
+
+	0x2547CE2C:  7FA42A14  add r29,r4,r5
+	  28: GETL       	R4, t20
+	  29: GETL       	R5, t22
+	  30: ADDL       	t20, t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0x2547CE30:  907D0004  stw r3,4(r29)
+	  33: GETL       	R3, t24
+	  34: GETL       	R29, t26
+	  35: ADDL       	$0x4, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x2547CE34:  921D0008  stw r16,8(r29)
+	  38: GETL       	R16, t28
+	  39: GETL       	R29, t30
+	  40: ADDL       	$0x8, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0x2547CE38:  8167000C  lwz r11,12(r7)
+	  43: GETL       	R7, t32
+	  44: ADDL       	$0xC, t32
+	  45: LDL       	(t32), t34
+	  46: PUTL       	t34, R11
+	  47: INCEIPL       	$4
+
+	0x2547CE3C:  81470008  lwz r10,8(r7)
+	  48: GETL       	R7, t36
+	  49: ADDL       	$0x8, t36
+	  50: LDL       	(t36), t38
+	  51: PUTL       	t38, R10
+	  52: INCEIPL       	$4
+
+	0x2547CE40:  2D8B0000  cmpi cr3,r11,0
+	  53: GETL       	R11, t40
+	  54: CMP0L       	t40, t42  (-rSo)
+	  55: ICRFL       	t42, $0x3, CR
+	  56: INCEIPL       	$4
+
+	0x2547CE44:  81060004  lwz r8,4(r6)
+	  57: GETL       	R6, t44
+	  58: ADDL       	$0x4, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R8
+	  61: INCEIPL       	$4
+
+	0x2547CE48:  7D2ACA14  add r9,r10,r25
+	  62: GETL       	R10, t48
+	  63: GETL       	R25, t50
+	  64: ADDL       	t48, t50
+	  65: PUTL       	t50, R9
+	  66: INCEIPL       	$4
+
+	0x2547CE4C:  7CE75A14  add r7,r7,r11
+	  67: GETL       	R7, t52
+	  68: GETL       	R11, t54
+	  69: ADDL       	t52, t54
+	  70: PUTL       	t54, R7
+	  71: INCEIPL       	$4
+
+	0x2547CE50:  7C08CA14  add r0,r8,r25
+	  72: GETL       	R8, t56
+	  73: GETL       	R25, t58
+	  74: ADDL       	t56, t58
+	  75: PUTL       	t58, R0
+	  76: INCEIPL       	$4
+
+	0x2547CE54:  7D24292E  stwx r9,r4,r5
+	  77: GETL       	R5, t60
+	  78: GETL       	R4, t62
+	  79: ADDL       	t62, t60
+	  80: GETL       	R9, t64
+	  81: STL       	t64, (t60)
+	  82: INCEIPL       	$4
+
+	0x2547CE58:  901D000C  stw r0,12(r29)
+	  83: GETL       	R0, t66
+	  84: GETL       	R29, t68
+	  85: ADDL       	$0xC, t68
+	  86: STL       	t66, (t68)
+	  87: INCEIPL       	$4
+
+	0x2547CE5C:  408EFFC0  bc 4,14,0x2547CE1C
+	  88: Jc14o       	$0x2547CE1C
+
+
+
+. 1042 2547CE14 76
+. 81 86 00 08 7C EC 32 14 A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+==== BB 1043 (0x2547CE1C) approx BBs exec'd 0 ====
+
+	0x2547CE1C:  A2A70006  lhz r21,6(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x6, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547CE20:  80670000  lwz r3,0(r7)
+	   5: GETL       	R7, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0x2547CE24:  56A42376  rlwinm r4,r21,4,13,27
+	   9: GETL       	R21, t8
+	  10: ROLL       	$0x4, t8
+	  11: ANDL       	$0x7FFF0, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x2547CE28:  56B00420  rlwinm r16,r21,0,16,16
+	  14: GETL       	R21, t10
+	  15: ANDL       	$0x8000, t10
+	  16: PUTL       	t10, R16
+	  17: INCEIPL       	$4
+
+	0x2547CE2C:  7FA42A14  add r29,r4,r5
+	  18: GETL       	R4, t12
+	  19: GETL       	R5, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R29
+	  22: INCEIPL       	$4
+
+	0x2547CE30:  907D0004  stw r3,4(r29)
+	  23: GETL       	R3, t16
+	  24: GETL       	R29, t18
+	  25: ADDL       	$0x4, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x2547CE34:  921D0008  stw r16,8(r29)
+	  28: GETL       	R16, t20
+	  29: GETL       	R29, t22
+	  30: ADDL       	$0x8, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0x2547CE38:  8167000C  lwz r11,12(r7)
+	  33: GETL       	R7, t24
+	  34: ADDL       	$0xC, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R11
+	  37: INCEIPL       	$4
+
+	0x2547CE3C:  81470008  lwz r10,8(r7)
+	  38: GETL       	R7, t28
+	  39: ADDL       	$0x8, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R10
+	  42: INCEIPL       	$4
+
+	0x2547CE40:  2D8B0000  cmpi cr3,r11,0
+	  43: GETL       	R11, t32
+	  44: CMP0L       	t32, t34  (-rSo)
+	  45: ICRFL       	t34, $0x3, CR
+	  46: INCEIPL       	$4
+
+	0x2547CE44:  81060004  lwz r8,4(r6)
+	  47: GETL       	R6, t36
+	  48: ADDL       	$0x4, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R8
+	  51: INCEIPL       	$4
+
+	0x2547CE48:  7D2ACA14  add r9,r10,r25
+	  52: GETL       	R10, t40
+	  53: GETL       	R25, t42
+	  54: ADDL       	t40, t42
+	  55: PUTL       	t42, R9
+	  56: INCEIPL       	$4
+
+	0x2547CE4C:  7CE75A14  add r7,r7,r11
+	  57: GETL       	R7, t44
+	  58: GETL       	R11, t46
+	  59: ADDL       	t44, t46
+	  60: PUTL       	t46, R7
+	  61: INCEIPL       	$4
+
+	0x2547CE50:  7C08CA14  add r0,r8,r25
+	  62: GETL       	R8, t48
+	  63: GETL       	R25, t50
+	  64: ADDL       	t48, t50
+	  65: PUTL       	t50, R0
+	  66: INCEIPL       	$4
+
+	0x2547CE54:  7D24292E  stwx r9,r4,r5
+	  67: GETL       	R5, t52
+	  68: GETL       	R4, t54
+	  69: ADDL       	t54, t52
+	  70: GETL       	R9, t56
+	  71: STL       	t56, (t52)
+	  72: INCEIPL       	$4
+
+	0x2547CE58:  901D000C  stw r0,12(r29)
+	  73: GETL       	R0, t58
+	  74: GETL       	R29, t60
+	  75: ADDL       	$0xC, t60
+	  76: STL       	t58, (t60)
+	  77: INCEIPL       	$4
+
+	0x2547CE5C:  408EFFC0  bc 4,14,0x2547CE1C
+	  78: Jc14o       	$0x2547CE1C
+
+
+
+. 1043 2547CE1C 68
+. A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+==== BB 1044 (0x2547CE8C) approx BBs exec'd 0 ====
+
+	0x2547CE8C:  41B2FEE0  bc 13,18,0x2547CD6C
+	   0: Js18o       	$0x2547CD6C
+
+
+
+. 1044 2547CE8C 4
+. 41 B2 FE E0
+==== BB 1045 (0x2547CD6C) approx BBs exec'd 0 ====
+
+	0x2547CD6C:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547CD70:  80A10000  lwz r5,0(r1)
+	   3: GETL       	R1, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R5
+	   6: INCEIPL       	$4
+
+	0x2547CD74:  82C50004  lwz r22,4(r5)
+	   7: GETL       	R5, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R22
+	  11: INCEIPL       	$4
+
+	0x2547CD78:  8185FFBC  lwz r12,-68(r5)
+	  12: GETL       	R5, t10
+	  13: ADDL       	$0xFFFFFFBC, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R12
+	  16: INCEIPL       	$4
+
+	0x2547CD7C:  7EC803A6  mtlr r22
+	  17: GETL       	R22, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0x2547CD80:  8205FFC0  lwz r16,-64(r5)
+	  20: GETL       	R5, t16
+	  21: ADDL       	$0xFFFFFFC0, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R16
+	  24: INCEIPL       	$4
+
+	0x2547CD84:  8225FFC4  lwz r17,-60(r5)
+	  25: GETL       	R5, t20
+	  26: ADDL       	$0xFFFFFFC4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R17
+	  29: INCEIPL       	$4
+
+	0x2547CD88:  7D818120  mtcrf 0x18,r12
+	  30: GETL       	R12, t24
+	  31: ICRFL       	t24, $0x3, CR
+	  32: ICRFL       	t24, $0x4, CR
+	  33: INCEIPL       	$4
+
+	0x2547CD8C:  8245FFC8  lwz r18,-56(r5)
+	  34: GETL       	R5, t26
+	  35: ADDL       	$0xFFFFFFC8, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R18
+	  38: INCEIPL       	$4
+
+	0x2547CD90:  8265FFCC  lwz r19,-52(r5)
+	  39: GETL       	R5, t30
+	  40: ADDL       	$0xFFFFFFCC, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R19
+	  43: INCEIPL       	$4
+
+	0x2547CD94:  8285FFD0  lwz r20,-48(r5)
+	  44: GETL       	R5, t34
+	  45: ADDL       	$0xFFFFFFD0, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R20
+	  48: INCEIPL       	$4
+
+	0x2547CD98:  82A5FFD4  lwz r21,-44(r5)
+	  49: GETL       	R5, t38
+	  50: ADDL       	$0xFFFFFFD4, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R21
+	  53: INCEIPL       	$4
+
+	0x2547CD9C:  82C5FFD8  lwz r22,-40(r5)
+	  54: GETL       	R5, t42
+	  55: ADDL       	$0xFFFFFFD8, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R22
+	  58: INCEIPL       	$4
+
+	0x2547CDA0:  82E5FFDC  lwz r23,-36(r5)
+	  59: GETL       	R5, t46
+	  60: ADDL       	$0xFFFFFFDC, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R23
+	  63: INCEIPL       	$4
+
+	0x2547CDA4:  8305FFE0  lwz r24,-32(r5)
+	  64: GETL       	R5, t50
+	  65: ADDL       	$0xFFFFFFE0, t50
+	  66: LDL       	(t50), t52
+	  67: PUTL       	t52, R24
+	  68: INCEIPL       	$4
+
+	0x2547CDA8:  8325FFE4  lwz r25,-28(r5)
+	  69: GETL       	R5, t54
+	  70: ADDL       	$0xFFFFFFE4, t54
+	  71: LDL       	(t54), t56
+	  72: PUTL       	t56, R25
+	  73: INCEIPL       	$4
+
+	0x2547CDAC:  8345FFE8  lwz r26,-24(r5)
+	  74: GETL       	R5, t58
+	  75: ADDL       	$0xFFFFFFE8, t58
+	  76: LDL       	(t58), t60
+	  77: PUTL       	t60, R26
+	  78: INCEIPL       	$4
+
+	0x2547CDB0:  8365FFEC  lwz r27,-20(r5)
+	  79: GETL       	R5, t62
+	  80: ADDL       	$0xFFFFFFEC, t62
+	  81: LDL       	(t62), t64
+	  82: PUTL       	t64, R27
+	  83: INCEIPL       	$4
+
+	0x2547CDB4:  8385FFF0  lwz r28,-16(r5)
+	  84: GETL       	R5, t66
+	  85: ADDL       	$0xFFFFFFF0, t66
+	  86: LDL       	(t66), t68
+	  87: PUTL       	t68, R28
+	  88: INCEIPL       	$4
+
+	0x2547CDB8:  83A5FFF4  lwz r29,-12(r5)
+	  89: GETL       	R5, t70
+	  90: ADDL       	$0xFFFFFFF4, t70
+	  91: LDL       	(t70), t72
+	  92: PUTL       	t72, R29
+	  93: INCEIPL       	$4
+
+	0x2547CDBC:  83C5FFF8  lwz r30,-8(r5)
+	  94: GETL       	R5, t74
+	  95: ADDL       	$0xFFFFFFF8, t74
+	  96: LDL       	(t74), t76
+	  97: PUTL       	t76, R30
+	  98: INCEIPL       	$4
+
+	0x2547CDC0:  83E5FFFC  lwz r31,-4(r5)
+	  99: GETL       	R5, t78
+	 100: ADDL       	$0xFFFFFFFC, t78
+	 101: LDL       	(t78), t80
+	 102: PUTL       	t80, R31
+	 103: INCEIPL       	$4
+
+	0x2547CDC4:  7CA12B78  or r1,r5,r5
+	 104: GETL       	R5, t82
+	 105: PUTL       	t82, R1
+	 106: INCEIPL       	$4
+
+	0x2547CDC8:  4E800020  blr
+	 107: GETL       	LR, t84
+	 108: JMPo-r       	t84  ($4)
+
+
+
+. 1045 2547CD6C 96
+. 7E C3 B3 78 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+==== BB 1046 (0x2547D034) approx BBs exec'd 0 ====
+
+	0x2547D034:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547D038:  41BEFFCC  bc 13,30,0x2547D004
+	   4: Js30o       	$0x2547D004
+
+
+
+. 1046 2547D034 8
+. 2F 83 00 00 41 BE FF CC
+==== BB 1047 (0x2547D004) approx BBs exec'd 0 ====
+
+	0x2547D004:  83FF000C  lwz r31,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547D008:  7F9CEB78  or r28,r28,r29
+	   5: GETL       	R28, t4
+	   6: GETL       	R29, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R28
+	   9: INCEIPL       	$4
+
+	0x2547D00C:  2C9F0000  cmpi cr1,r31,0
+	  10: GETL       	R31, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547D010:  41860040  bc 12,6,0x2547D050
+	  14: Js06o       	$0x2547D050
+
+
+
+. 1047 2547D004 16
+. 83 FF 00 0C 7F 9C EB 78 2C 9F 00 00 41 86 00 40
+==== BB 1048 (0x2547CCD4) approx BBs exec'd 0 ====
+
+	0x2547CCD4:  815D0008  lwz r10,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547CCD8:  A21D0004  lhz r16,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDW       	(t4), t6
+	   8: PUTL       	t6, R16
+	   9: INCEIPL       	$4
+
+	0x2547CCDC:  809D0000  lwz r4,0(r29)
+	  10: GETL       	R29, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0x2547CCE0:  7CAACA14  add r5,r10,r25
+	  14: GETL       	R10, t12
+	  15: GETL       	R25, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R5
+	  18: INCEIPL       	$4
+
+	0x2547CCE4:  80DC0014  lwz r6,20(r28)
+	  19: GETL       	R28, t16
+	  20: ADDL       	$0x14, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R6
+	  23: INCEIPL       	$4
+
+	0x2547CCE8:  560807BC  rlwinm r8,r16,0,30,30
+	  24: GETL       	R16, t20
+	  25: ANDL       	$0x2, t20
+	  26: PUTL       	t20, R8
+	  27: INCEIPL       	$4
+
+	0x2547CCEC:  4BFFFA85  bl 0x2547C770
+	  28: MOVL       	$0x2547CCF0, t22
+	  29: PUTL       	t22, LR
+	  30: JMPo-c       	$0x2547C770  ($4)
+
+
+
+. 1048 2547CCD4 28
+. 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+==== BB 1049 (0x2547CD34) approx BBs exec'd 0 ====
+
+	0x2547CD34:  82BB0000  lwz r21,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R21
+	   3: INCEIPL       	$4
+
+	0x2547CD38:  82F10004  lwz r23,4(r17)
+	   4: GETL       	R17, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R23
+	   8: INCEIPL       	$4
+
+	0x2547CD3C:  7D35BA14  add r9,r21,r23
+	   9: GETL       	R21, t8
+	  10: GETL       	R23, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547CD40:  A2690004  lhz r19,4(r9)
+	  14: GETL       	R9, t12
+	  15: ADDL       	$0x4, t12
+	  16: LDW       	(t12), t14
+	  17: PUTL       	t14, R19
+	  18: INCEIPL       	$4
+
+	0x2547CD44:  5660047E  rlwinm r0,r19,0,17,31
+	  19: GETL       	R19, t16
+	  20: ANDL       	$0x7FFF, t16
+	  21: PUTL       	t16, R0
+	  22: INCEIPL       	$4
+
+	0x2547CD48:  7C80C040  cmpl cr1,r0,r24
+	  23: GETL       	R0, t18
+	  24: GETL       	R24, t20
+	  25: CMPUL       	t18, t20, t22  (-rSo)
+	  26: ICRFL       	t22, $0x1, CR
+	  27: INCEIPL       	$4
+
+	0x2547CD4C:  40850008  bc 4,5,0x2547CD54
+	  28: Jc05o       	$0x2547CD54
+
+
+
+. 1049 2547CD34 28
+. 82 BB 00 00 82 F1 00 04 7D 35 BA 14 A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+==== BB 1050 (0x2547CD54) approx BBs exec'd 0 ====
+
+	0x2547CD54:  82890010  lwz r20,16(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0x2547CD58:  2F140000  cmpi cr6,r20,0
+	   5: GETL       	R20, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547CD5C:  7D29A214  add r9,r9,r20
+	   9: GETL       	R9, t8
+	  10: GETL       	R20, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547CD60:  409AFFE0  bc 4,26,0x2547CD40
+	  14: Jc26o       	$0x2547CD40
+
+
+
+. 1050 2547CD54 16
+. 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+==== BB 1051 (0x2547CD40) approx BBs exec'd 0 ====
+
+	0x2547CD40:  A2690004  lhz r19,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0x2547CD44:  5660047E  rlwinm r0,r19,0,17,31
+	   5: GETL       	R19, t4
+	   6: ANDL       	$0x7FFF, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547CD48:  7C80C040  cmpl cr1,r0,r24
+	   9: GETL       	R0, t6
+	  10: GETL       	R24, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547CD4C:  40850008  bc 4,5,0x2547CD54
+	  14: Jc05o       	$0x2547CD54
+
+
+
+. 1051 2547CD40 16
+. A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+==== BB 1052 (0x2547CE90) approx BBs exec'd 0 ====
+
+	0x2547CE90:  80DB0000  lwz r6,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x2547CE94:  38E00000  li r7,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0x2547CE98:  83710004  lwz r27,4(r17)
+	   7: GETL       	R17, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0x2547CE9C:  7C66DA14  add r3,r6,r27
+	  12: GETL       	R6, t10
+	  13: GETL       	R27, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x2547CEA0:  A2230002  lhz r17,2(r3)
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x2, t14
+	  19: LDW       	(t14), t16
+	  20: PUTL       	t16, R17
+	  21: INCEIPL       	$4
+
+	0x2547CEA4:  8103000C  lwz r8,12(r3)
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0xC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R8
+	  26: INCEIPL       	$4
+
+	0x2547CEA8:  72290001  andi. r9,r17,0x1
+	  27: GETL       	R17, t22
+	  28: ANDL       	$0x1, t22
+	  29: PUTL       	t22, R9
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0x2547CEAC:  40820028  bc 4,2,0x2547CED4
+	  33: Jc02o       	$0x2547CED4
+
+
+
+. 1052 2547CE90 32
+. 80 DB 00 00 38 E0 00 00 83 71 00 04 7C 66 DA 14 A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+==== BB 1053 (0x2547CED4) approx BBs exec'd 0 ====
+
+	0x2547CED4:  82430010  lwz r18,16(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x2547CED8:  2E120000  cmpi cr4,r18,0
+	   5: GETL       	R18, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547CEDC:  7C639214  add r3,r3,r18
+	   9: GETL       	R3, t8
+	  10: GETL       	R18, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0x2547CEE0:  4092FFC0  bc 4,18,0x2547CEA0
+	  14: Jc18o       	$0x2547CEA0
+
+
+
+. 1053 2547CED4 16
+. 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+==== BB 1054 (0x2547CEA0) approx BBs exec'd 0 ====
+
+	0x2547CEA0:  A2230002  lhz r17,2(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x2, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0x2547CEA4:  8103000C  lwz r8,12(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x2547CEA8:  72290001  andi. r9,r17,0x1
+	  10: GETL       	R17, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R9
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x2547CEAC:  40820028  bc 4,2,0x2547CED4
+	  16: Jc02o       	$0x2547CED4
+
+
+
+. 1054 2547CEA0 16
+. A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+==== BB 1055 (0x2547CEB0) approx BBs exec'd 0 ====
+
+	0x2547CEB0:  A3430004  lhz r26,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547CEB4:  83830008  lwz r28,8(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x2547CEB8:  57572376  rlwinm r23,r26,4,13,27
+	  10: GETL       	R26, t8
+	  11: ROLL       	$0x4, t8
+	  12: ANDL       	$0x7FFF0, t8
+	  13: PUTL       	t8, R23
+	  14: INCEIPL       	$4
+
+	0x2547CEBC:  7F172A14  add r24,r23,r5
+	  15: GETL       	R23, t10
+	  16: GETL       	R5, t12
+	  17: ADDL       	t10, t12
+	  18: PUTL       	t12, R24
+	  19: INCEIPL       	$4
+
+	0x2547CEC0:  93980004  stw r28,4(r24)
+	  20: GETL       	R28, t14
+	  21: GETL       	R24, t16
+	  22: ADDL       	$0x4, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x2547CEC4:  7E88182E  lwzx r20,r8,r3
+	  25: GETL       	R3, t18
+	  26: GETL       	R8, t20
+	  27: ADDL       	t20, t18
+	  28: LDL       	(t18), t22
+	  29: PUTL       	t22, R20
+	  30: INCEIPL       	$4
+
+	0x2547CEC8:  90F8000C  stw r7,12(r24)
+	  31: GETL       	R7, t24
+	  32: GETL       	R24, t26
+	  33: ADDL       	$0xC, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547CECC:  7E74CA14  add r19,r20,r25
+	  36: GETL       	R20, t28
+	  37: GETL       	R25, t30
+	  38: ADDL       	t28, t30
+	  39: PUTL       	t30, R19
+	  40: INCEIPL       	$4
+
+	0x2547CED0:  7E77292E  stwx r19,r23,r5
+	  41: GETL       	R5, t32
+	  42: GETL       	R23, t34
+	  43: ADDL       	t34, t32
+	  44: GETL       	R19, t36
+	  45: STL       	t36, (t32)
+	  46: INCEIPL       	$4
+
+	0x2547CED4:  82430010  lwz r18,16(r3)
+	  47: GETL       	R3, t38
+	  48: ADDL       	$0x10, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R18
+	  51: INCEIPL       	$4
+
+	0x2547CED8:  2E120000  cmpi cr4,r18,0
+	  52: GETL       	R18, t42
+	  53: CMP0L       	t42, t44  (-rSo)
+	  54: ICRFL       	t44, $0x4, CR
+	  55: INCEIPL       	$4
+
+	0x2547CEDC:  7C639214  add r3,r3,r18
+	  56: GETL       	R3, t46
+	  57: GETL       	R18, t48
+	  58: ADDL       	t46, t48
+	  59: PUTL       	t48, R3
+	  60: INCEIPL       	$4
+
+	0x2547CEE0:  4092FFC0  bc 4,18,0x2547CEA0
+	  61: Jc18o       	$0x2547CEA0
+
+
+
+. 1055 2547CEB0 52
+. A3 43 00 04 83 83 00 08 57 57 23 76 7F 17 2A 14 93 98 00 04 7E 88 18 2E 90 F8 00 0C 7E 74 CA 14 7E 77 29 2E 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+==== BB 1056 (0x2547CEE4) approx BBs exec'd 0 ====
+
+	0x2547CEE4:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547CEE8:  4BFFFE88  b 0x2547CD70
+	   3: JMPo       	$0x2547CD70  ($4)
+
+
+
+. 1056 2547CEE4 8
+. 7E C3 B3 78 4B FF FE 88
+==== BB 1057 (0x2547CD70) approx BBs exec'd 0 ====
+
+	0x2547CD70:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x2547CD74:  82C50004  lwz r22,4(r5)
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R22
+	   8: INCEIPL       	$4
+
+	0x2547CD78:  8185FFBC  lwz r12,-68(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0xFFFFFFBC, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x2547CD7C:  7EC803A6  mtlr r22
+	  14: GETL       	R22, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x2547CD80:  8205FFC0  lwz r16,-64(r5)
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFC0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R16
+	  21: INCEIPL       	$4
+
+	0x2547CD84:  8225FFC4  lwz r17,-60(r5)
+	  22: GETL       	R5, t18
+	  23: ADDL       	$0xFFFFFFC4, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R17
+	  26: INCEIPL       	$4
+
+	0x2547CD88:  7D818120  mtcrf 0x18,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x3, CR
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0x2547CD8C:  8245FFC8  lwz r18,-56(r5)
+	  31: GETL       	R5, t24
+	  32: ADDL       	$0xFFFFFFC8, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R18
+	  35: INCEIPL       	$4
+
+	0x2547CD90:  8265FFCC  lwz r19,-52(r5)
+	  36: GETL       	R5, t28
+	  37: ADDL       	$0xFFFFFFCC, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R19
+	  40: INCEIPL       	$4
+
+	0x2547CD94:  8285FFD0  lwz r20,-48(r5)
+	  41: GETL       	R5, t32
+	  42: ADDL       	$0xFFFFFFD0, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R20
+	  45: INCEIPL       	$4
+
+	0x2547CD98:  82A5FFD4  lwz r21,-44(r5)
+	  46: GETL       	R5, t36
+	  47: ADDL       	$0xFFFFFFD4, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R21
+	  50: INCEIPL       	$4
+
+	0x2547CD9C:  82C5FFD8  lwz r22,-40(r5)
+	  51: GETL       	R5, t40
+	  52: ADDL       	$0xFFFFFFD8, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R22
+	  55: INCEIPL       	$4
+
+	0x2547CDA0:  82E5FFDC  lwz r23,-36(r5)
+	  56: GETL       	R5, t44
+	  57: ADDL       	$0xFFFFFFDC, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R23
+	  60: INCEIPL       	$4
+
+	0x2547CDA4:  8305FFE0  lwz r24,-32(r5)
+	  61: GETL       	R5, t48
+	  62: ADDL       	$0xFFFFFFE0, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R24
+	  65: INCEIPL       	$4
+
+	0x2547CDA8:  8325FFE4  lwz r25,-28(r5)
+	  66: GETL       	R5, t52
+	  67: ADDL       	$0xFFFFFFE4, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R25
+	  70: INCEIPL       	$4
+
+	0x2547CDAC:  8345FFE8  lwz r26,-24(r5)
+	  71: GETL       	R5, t56
+	  72: ADDL       	$0xFFFFFFE8, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R26
+	  75: INCEIPL       	$4
+
+	0x2547CDB0:  8365FFEC  lwz r27,-20(r5)
+	  76: GETL       	R5, t60
+	  77: ADDL       	$0xFFFFFFEC, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R27
+	  80: INCEIPL       	$4
+
+	0x2547CDB4:  8385FFF0  lwz r28,-16(r5)
+	  81: GETL       	R5, t64
+	  82: ADDL       	$0xFFFFFFF0, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R28
+	  85: INCEIPL       	$4
+
+	0x2547CDB8:  83A5FFF4  lwz r29,-12(r5)
+	  86: GETL       	R5, t68
+	  87: ADDL       	$0xFFFFFFF4, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R29
+	  90: INCEIPL       	$4
+
+	0x2547CDBC:  83C5FFF8  lwz r30,-8(r5)
+	  91: GETL       	R5, t72
+	  92: ADDL       	$0xFFFFFFF8, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R30
+	  95: INCEIPL       	$4
+
+	0x2547CDC0:  83E5FFFC  lwz r31,-4(r5)
+	  96: GETL       	R5, t76
+	  97: ADDL       	$0xFFFFFFFC, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R31
+	 100: INCEIPL       	$4
+
+	0x2547CDC4:  7CA12B78  or r1,r5,r5
+	 101: GETL       	R5, t80
+	 102: PUTL       	t80, R1
+	 103: INCEIPL       	$4
+
+	0x2547CDC8:  4E800020  blr
+	 104: GETL       	LR, t82
+	 105: JMPo-r       	t82  ($4)
+
+
+
+. 1057 2547CD70 92
+. 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+==== BB 1058 (0x2547CD50) approx BBs exec'd 0 ====
+
+	0x2547CD50:  7C180378  or r24,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x2547CD54:  82890010  lwz r20,16(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0x10, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R20
+	   7: INCEIPL       	$4
+
+	0x2547CD58:  2F140000  cmpi cr6,r20,0
+	   8: GETL       	R20, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x2547CD5C:  7D29A214  add r9,r9,r20
+	  12: GETL       	R9, t10
+	  13: GETL       	R20, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0x2547CD60:  409AFFE0  bc 4,26,0x2547CD40
+	  17: Jc26o       	$0x2547CD40
+
+
+
+. 1058 2547CD50 20
+. 7C 18 03 78 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+==== BB 1059 (0x2547D050) approx BBs exec'd 0 ====
+
+	0x2547D050:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547D054:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547D058:  83410008  lwz r26,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547D05C:  8361000C  lwz r27,12(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x2547D060:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547D064:  83810010  lwz r28,16(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R28
+	  25: INCEIPL       	$4
+
+	0x2547D068:  83A10014  lwz r29,20(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R29
+	  30: INCEIPL       	$4
+
+	0x2547D06C:  83C10018  lwz r30,24(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x18, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R30
+	  35: INCEIPL       	$4
+
+	0x2547D070:  83E1001C  lwz r31,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0x2547D074:  38210020  addi r1,r1,32
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: PUTL       	t32, R1
+	  44: INCEIPL       	$4
+
+	0x2547D078:  4E800020  blr
+	  45: GETL       	LR, t34
+	  46: JMPo-r       	t34  ($4)
+
+
+
+. 1059 2547D050 44
+. 80 81 00 24 7F 83 E3 78 83 41 00 08 83 61 00 0C 7C 88 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1060 (0x2547180C) approx BBs exec'd 0 ====
+
+	0x2547180C:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25471810:  419E0014  bc 12,30,0x25471824
+	   4: Js30o       	$0x25471824
+
+
+
+. 1060 2547180C 8
+. 2F 83 00 00 41 9E 00 14
+==== BB 1061 (0x25471824) approx BBs exec'd 0 ====
+
+	0x25471824:  80610024  lwz r3,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25471828:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547182C:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x25471830:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x25471834:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x25471838:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0x2547183C:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+
+. 1061 25471824 28
+. 80 61 00 24 83 A1 00 14 83 C1 00 18 7C 68 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1062 (0x2547B70C) approx BBs exec'd 0 ====
+
+	0x2547B70C:  935D0000  stw r26,0(r29)
+	   0: GETL       	R26, t0
+	   1: GETL       	R29, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547B710:  80010034  lwz r0,52(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x34, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x2547B714:  937C0000  stw r27,0(r28)
+	   9: GETL       	R27, t8
+	  10: GETL       	R28, t10
+	  11: STL       	t8, (t10)
+	  12: INCEIPL       	$4
+
+	0x2547B718:  82E1000C  lwz r23,12(r1)
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0xC, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R23
+	  17: INCEIPL       	$4
+
+	0x2547B71C:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t16
+	  19: PUTL       	t16, LR
+	  20: INCEIPL       	$4
+
+	0x2547B720:  83210014  lwz r25,20(r1)
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x14, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R25
+	  25: INCEIPL       	$4
+
+	0x2547B724:  83410018  lwz r26,24(r1)
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x18, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R26
+	  30: INCEIPL       	$4
+
+	0x2547B728:  8361001C  lwz r27,28(r1)
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x1C, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R27
+	  35: INCEIPL       	$4
+
+	0x2547B72C:  83810020  lwz r28,32(r1)
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x20, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R28
+	  40: INCEIPL       	$4
+
+	0x2547B730:  83A10024  lwz r29,36(r1)
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x24, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R29
+	  45: INCEIPL       	$4
+
+	0x2547B734:  83C10028  lwz r30,40(r1)
+	  46: GETL       	R1, t38
+	  47: ADDL       	$0x28, t38
+	  48: LDL       	(t38), t40
+	  49: PUTL       	t40, R30
+	  50: INCEIPL       	$4
+
+	0x2547B738:  38210030  addi r1,r1,48
+	  51: GETL       	R1, t42
+	  52: ADDL       	$0x30, t42
+	  53: PUTL       	t42, R1
+	  54: INCEIPL       	$4
+
+	0x2547B73C:  4E800020  blr
+	  55: GETL       	LR, t44
+	  56: JMPo-r       	t44  ($4)
+
+
+
+. 1062 2547B70C 52
+. 93 5D 00 00 80 01 00 34 93 7C 00 00 82 E1 00 0C 7C 08 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 38 21 00 30 4E 80 00 20
+==== BB 1063 (0x25472900) approx BBs exec'd 0 ====
+
+	0x25472900:  836E03D8  lwz r27,984(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x3D8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25472904:  2E1B0000  cmpi cr4,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x25472908:  40920AEC  bc 4,18,0x254733F4
+	   9: Jc18o       	$0x254733F4
+
+
+
+. 1063 25472900 12
+. 83 6E 03 D8 2E 1B 00 00 40 92 0A EC
+==== BB 1064 (0x2547290C) approx BBs exec'd 0 ====
+
+	0x2547290C:  816E0408  lwz r11,1032(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25472910:  3ACB003F  addi r22,r11,63
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x3F, t4
+	   7: PUTL       	t4, R22
+	   8: INCEIPL       	$4
+
+	0x25472914:  916E0414  stw r11,1044(r14)
+	   9: GETL       	R11, t6
+	  10: GETL       	R14, t8
+	  11: ADDL       	$0x414, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x25472918:  56D91838  rlwinm r25,r22,3,0,28
+	  14: GETL       	R22, t10
+	  15: SHLL       	$0x3, t10
+	  16: PUTL       	t10, R25
+	  17: INCEIPL       	$4
+
+	0x2547291C:  38790008  addi r3,r25,8
+	  18: GETL       	R25, t12
+	  19: ADDL       	$0x8, t12
+	  20: PUTL       	t12, R3
+	  21: INCEIPL       	$4
+
+	0x25472920:  48025105  bl 0x25497A24
+	  22: MOVL       	$0x25472924, t14
+	  23: PUTL       	t14, LR
+	  24: JMPo-c       	$0x25497A24  ($4)
+
+
+
+. 1064 2547290C 24
+. 81 6E 04 08 3A CB 00 3F 91 6E 04 14 56 D9 18 38 38 79 00 08 48 02 51 05
+==== BB 1065 (0x25472924) approx BBs exec'd 0 ====
+
+	0x25472924:  7F25CB78  or r5,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25472928:  7C6C1B78  or r12,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0x2547292C:  38800000  li r4,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25472930:  38630008  addi r3,r3,8
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x8, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x25472934:  918E0410  stw r12,1040(r14)
+	  13: GETL       	R12, t8
+	  14: GETL       	R14, t10
+	  15: ADDL       	$0x410, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0x25472938:  48010E0D  bl 0x25483744
+	  18: MOVL       	$0x2547293C, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x25483744  ($4)
+
+
+
+. 1065 25472924 24
+. 7F 25 CB 78 7C 6C 1B 78 38 80 00 00 38 63 00 08 91 8E 04 10 48 01 0E 0D
+==== BB 1066 (0x254837BC) approx BBs exec'd 0 ====
+
+	0x254837BC:  9088FFFC  stw r4,-4(r8)
+	   0: GETL       	R4, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254837C0:  9088FFF8  stw r4,-8(r8)
+	   5: GETL       	R4, t4
+	   6: GETL       	R8, t6
+	   7: ADDL       	$0xFFFFFFF8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x254837C4:  9088FFF4  stw r4,-12(r8)
+	  10: GETL       	R4, t8
+	  11: GETL       	R8, t10
+	  12: ADDL       	$0xFFFFFFF4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254837C8:  9488FFF0  stwu r4,-16(r8)
+	  15: GETL       	R4, t12
+	  16: GETL       	R8, t14
+	  17: ADDL       	$0xFFFFFFF0, t14
+	  18: PUTL       	t14, R8
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x254837CC:  409D0008  bc 4,29,0x254837D4
+	  21: Jc29o       	$0x254837D4
+
+
+
+. 1066 254837BC 20
+. 90 88 FF FC 90 88 FF F8 90 88 FF F4 94 88 FF F0 40 9D 00 08
+==== BB 1067 (0x25483934) approx BBs exec'd 0 ====
+
+	0x25483934:  4084002C  bc 4,4,0x25483960
+	   0: Jc04o       	$0x25483960
+
+
+
+. 1067 25483934 4
+. 40 84 00 2C
+==== BB 1068 (0x25483938) approx BBs exec'd 0 ====
+
+	0x25483938:  4C9C0020  bclr 4,28
+	   0: GETL       	LR, t0
+	   1: Jc28o-r       	t0
+
+
+
+. 1068 25483938 4
+. 4C 9C 00 20
+==== BB 1069 (0x2548393C) approx BBs exec'd 0 ====
+
+	0x2548393C:  9086FFFC  stw r4,-4(r6)
+	   0: GETL       	R4, t0
+	   1: GETL       	R6, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25483940:  9086FFF8  stw r4,-8(r6)
+	   5: GETL       	R4, t4
+	   6: GETL       	R6, t6
+	   7: ADDL       	$0xFFFFFFF8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483944:  4E800020  blr
+	  10: GETL       	LR, t8
+	  11: JMPo-r       	t8  ($4)
+
+
+
+. 1069 2548393C 12
+. 90 86 FF FC 90 86 FF F8 4E 80 00 20
+==== BB 1070 (0x2547293C) approx BBs exec'd 0 ====
+
+	0x2547293C:  80CE0410  lwz r6,1040(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x410, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25472940:  38000000  li r0,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25472944:  7E89A378  or r9,r20,r20
+	   8: GETL       	R20, t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0x25472948:  90060004  stw r0,4(r6)
+	  11: GETL       	R0, t8
+	  12: GETL       	R6, t10
+	  13: ADDL       	$0x4, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547294C:  92C60000  stw r22,0(r6)
+	  16: GETL       	R22, t12
+	  17: GETL       	R6, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25472950:  418E0024  bc 12,14,0x25472974
+	  20: Js14o       	$0x25472974
+
+
+
+. 1070 2547293C 24
+. 80 CE 04 10 38 00 00 00 7E 89 A3 78 90 06 00 04 92 C6 00 00 41 8E 00 24
+==== BB 1071 (0x25472954) approx BBs exec'd 0 ====
+
+	0x25472954:  38630004  addi r3,r3,4
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x25472958:  83890220  lwz r28,544(r9)
+	   4: GETL       	R9, t2
+	   5: ADDL       	$0x220, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x2547295C:  2F1C0000  cmpi cr6,r28,0
+	   9: GETL       	R28, t6
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0x25472960:  419A0008  bc 12,26,0x25472968
+	  13: Js26o       	$0x25472968
+
+
+
+. 1071 25472954 16
+. 38 63 00 04 83 89 02 20 2F 1C 00 00 41 9A 00 08
+==== BB 1072 (0x25472968) approx BBs exec'd 0 ====
+
+	0x25472968:  8129000C  lwz r9,12(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547296C:  2C090000  cmpi cr0,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25472970:  4082FFE8  bc 4,2,0x25472958
+	   9: Jc02o       	$0x25472958
+
+
+
+. 1072 25472968 12
+. 81 29 00 0C 2C 09 00 00 40 82 FF E8
+==== BB 1073 (0x25472958) approx BBs exec'd 0 ====
+
+	0x25472958:  83890220  lwz r28,544(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x220, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547295C:  2F1C0000  cmpi cr6,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25472960:  419A0008  bc 12,26,0x25472968
+	   9: Js26o       	$0x25472968
+
+
+
+. 1073 25472958 12
+. 83 89 02 20 2F 1C 00 00 41 9A 00 08
+==== BB 1074 (0x25472964) approx BBs exec'd 0 ====
+
+	0x25472964:  95230008  stwu r9,8(r3)
+	   0: GETL       	R9, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x8, t2
+	   3: PUTL       	t2, R3
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25472968:  8129000C  lwz r9,12(r9)
+	   6: GETL       	R9, t4
+	   7: ADDL       	$0xC, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0x2547296C:  2C090000  cmpi cr0,r9,0
+	  11: GETL       	R9, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25472970:  4082FFE8  bc 4,2,0x25472958
+	  15: Jc02o       	$0x25472958
+
+
+
+. 1074 25472964 16
+. 95 23 00 08 81 29 00 0C 2C 09 00 00 40 82 FF E8
+==== BB 1075 (0x25472974) approx BBs exec'd 0 ====
+
+	0x25472974:  4800B359  bl 0x2547DCCC
+	   0: MOVL       	$0x25472978, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547DCCC  ($4)
+
+
+
+. 1075 25472974 4
+. 48 00 B3 59
+==== BB 1076 _dl_determine_tlsoffset(0x2547DCCC) approx BBs exec'd 0 ====
+
+	0x2547DCCC:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DCD0:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547DCD4:  4801932D  bl 0x25497000
+	   9: MOVL       	$0x2547DCD8, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1076 2547DCCC 12
+. 94 21 FF E0 7D 88 02 A6 48 01 93 2D
+==== BB 1077 (0x2547DCD8) approx BBs exec'd 0 ====
+
+	0x2547DCD8:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DCDC:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547DCE0:  9361000C  stw r27,12(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547DCE4:  93810010  stw r28,16(r1)
+	  13: GETL       	R28, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547DCE8:  38600000  li r3,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0x2547DCEC:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547DCF0:  7D8803A6  mtlr r12
+	  26: GETL       	R12, t20
+	  27: PUTL       	t20, LR
+	  28: INCEIPL       	$4
+
+	0x2547DCF4:  837E04C8  lwz r27,1224(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4C8, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R27
+	  33: INCEIPL       	$4
+
+	0x2547DCF8:  3B800020  li r28,32
+	  34: MOVL       	$0x20, t26
+	  35: PUTL       	t26, R28
+	  36: INCEIPL       	$4
+
+	0x2547DCFC:  93E1001C  stw r31,28(r1)
+	  37: GETL       	R31, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x2547DD00:  3BA00000  li r29,0
+	  42: MOVL       	$0x0, t32
+	  43: PUTL       	t32, R29
+	  44: INCEIPL       	$4
+
+	0x2547DD04:  813B0410  lwz r9,1040(r27)
+	  45: GETL       	R27, t34
+	  46: ADDL       	$0x410, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R9
+	  49: INCEIPL       	$4
+
+	0x2547DD08:  3BE00000  li r31,0
+	  50: MOVL       	$0x0, t38
+	  51: PUTL       	t38, R31
+	  52: INCEIPL       	$4
+
+	0x2547DD0C:  39800001  li r12,1
+	  53: MOVL       	$0x1, t40
+	  54: PUTL       	t40, R12
+	  55: INCEIPL       	$4
+
+	0x2547DD10:  38A90008  addi r5,r9,8
+	  56: GETL       	R9, t42
+	  57: ADDL       	$0x8, t42
+	  58: PUTL       	t42, R5
+	  59: INCEIPL       	$4
+
+	0x2547DD14:  8005000C  lwz r0,12(r5)
+	  60: GETL       	R5, t44
+	  61: ADDL       	$0xC, t44
+	  62: LDL       	(t44), t46
+	  63: PUTL       	t46, R0
+	  64: INCEIPL       	$4
+
+	0x2547DD18:  2F800000  cmpi cr7,r0,0
+	  65: GETL       	R0, t48
+	  66: CMP0L       	t48, t50  (-rSo)
+	  67: ICRFL       	t50, $0x7, CR
+	  68: INCEIPL       	$4
+
+	0x2547DD1C:  419E0104  bc 12,30,0x2547DE20
+	  69: Js30o       	$0x2547DE20
+
+
+
+. 1077 2547DCD8 72
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 93 81 00 10 38 60 00 00 93 A1 00 14 7D 88 03 A6 83 7E 04 C8 3B 80 00 20 93 E1 00 1C 3B A0 00 00 81 3B 04 10 3B E0 00 00 39 80 00 01 38 A9 00 08 80 05 00 0C 2F 80 00 00 41 9E 01 04
+==== BB 1078 (0x2547DD20) approx BBs exec'd 0 ====
+
+	0x2547DD20:  38C00008  li r6,8
+	   0: MOVL       	$0x8, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2547DD24:  48000058  b 0x2547DD7C
+	   3: JMPo       	$0x2547DD7C  ($4)
+
+
+
+. 1078 2547DD20 8
+. 38 C0 00 08 48 00 00 58
+==== BB 1079 (0x2547DD7C) approx BBs exec'd 0 ====
+
+	0x2547DD7C:  7D662A14  add r11,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547DD80:  814B0004  lwz r10,4(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x2547DD84:  816A0224  lwz r11,548(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0x224, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547DD88:  810A0228  lwz r8,552(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x228, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R8
+	  19: INCEIPL       	$4
+
+	0x2547DD8C:  7C1C5840  cmpl cr0,r28,r11
+	  20: GETL       	R28, t16
+	  21: GETL       	R11, t18
+	  22: CMPUL       	t16, t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0x2547DD90:  388BFFFF  addi r4,r11,-1
+	  25: GETL       	R11, t22
+	  26: ADDL       	$0xFFFFFFFF, t22
+	  27: PUTL       	t22, R4
+	  28: INCEIPL       	$4
+
+	0x2547DD94:  7CE800D0  neg r7,r8
+	  29: GETL       	R8, t24
+	  30: NEGL       	t24
+	  31: PUTL       	t24, R7
+	  32: INCEIPL       	$4
+
+	0x2547DD98:  7CE82038  and r8,r7,r4
+	  33: GETL       	R7, t26
+	  34: GETL       	R4, t28
+	  35: ANDL       	t26, t28
+	  36: PUTL       	t28, R8
+	  37: INCEIPL       	$4
+
+	0x2547DD9C:  40800008  bc 4,0,0x2547DDA4
+	  38: Jc00o       	$0x2547DDA4
+
+
+
+. 1079 2547DD7C 36
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 81 0A 02 28 7C 1C 58 40 38 8B FF FF 7C E8 00 D0 7C E8 20 38 40 80 00 08
+==== BB 1080 (0x2547DDA4) approx BBs exec'd 0 ====
+
+	0x2547DDA4:  80EA0220  lwz r7,544(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x220, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547DDA8:  7C9FE850  subf r4,r31,r29
+	   5: GETL       	R31, t4
+	   6: GETL       	R29, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547DDAC:  7C872040  cmpl cr1,r7,r4
+	  10: GETL       	R7, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547DDB0:  40A5FF78  bc 5,5,0x2547DD28
+	  15: Jc05o       	$0x2547DD28
+
+
+
+. 1080 2547DDA4 16
+. 80 EA 02 20 7C 9F E8 50 7C 87 20 40 40 A5 FF 78
+==== BB 1081 (0x2547DDB4) approx BBs exec'd 0 ====
+
+	0x2547DDB4:  7D662A14  add r11,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547DDB8:  814B0004  lwz r10,4(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x2547DDBC:  816A0224  lwz r11,548(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0x224, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547DDC0:  7CCB1A14  add r6,r11,r3
+	  15: GETL       	R11, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R6
+	  19: INCEIPL       	$4
+
+	0x2547DDC4:  3806FFFF  addi r0,r6,-1
+	  20: GETL       	R6, t16
+	  21: ADDL       	$0xFFFFFFFF, t16
+	  22: PUTL       	t16, R0
+	  23: INCEIPL       	$4
+
+	0x2547DDC8:  7D205B96  divwu r9, r0, r11
+	  24: GETL       	R0, t20
+	  25: GETL       	R11, t18
+	  26: UDIVL       	t18, t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0x2547DDCC:  7D2959D6  mullw r9,r9,r11
+	  29: GETL       	R9, t22
+	  30: GETL       	R11, t24
+	  31: MULL       	t22, t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x2547DDD0:  7CE34850  subf r7,r3,r9
+	  34: GETL       	R3, t26
+	  35: GETL       	R9, t28
+	  36: SUBL       	t26, t28
+	  37: PUTL       	t28, R7
+	  38: INCEIPL       	$4
+
+	0x2547DDD4:  7C074040  cmpl cr0,r7,r8
+	  39: GETL       	R7, t30
+	  40: GETL       	R8, t32
+	  41: CMPUL       	t30, t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x0, CR
+	  43: INCEIPL       	$4
+
+	0x2547DDD8:  40800008  bc 4,0,0x2547DDE0
+	  44: Jc00o       	$0x2547DDE0
+
+
+
+. 1081 2547DDB4 40
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 7C CB 1A 14 38 06 FF FF 7D 20 5B 96 7D 29 59 D6 7C E3 48 50 7C 07 40 40 40 80 00 08
+==== BB 1082 (0x2547DDE0) approx BBs exec'd 0 ====
+
+	0x2547DDE0:  7D684850  subf r11,r8,r9
+	   0: GETL       	R8, t0
+	   1: GETL       	R9, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547DDE4:  7CE35850  subf r7,r3,r11
+	   5: GETL       	R3, t4
+	   6: GETL       	R11, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x2547DDE8:  916A022C  stw r11,556(r10)
+	  10: GETL       	R11, t8
+	  11: GETL       	R10, t10
+	  12: ADDL       	$0x22C, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x2547DDEC:  7C872040  cmpl cr1,r7,r4
+	  15: GETL       	R7, t12
+	  16: GETL       	R4, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x2547DDF0:  4085000C  bc 4,5,0x2547DDFC
+	  20: Jc05o       	$0x2547DDFC
+
+
+
+. 1082 2547DDE0 20
+. 7D 68 48 50 7C E3 58 50 91 6A 02 2C 7C 87 20 40 40 85 00 0C
+==== BB 1083 (0x2547DDFC) approx BBs exec'd 0 ====
+
+	0x2547DDFC:  806A0220  lwz r3,544(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x220, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547DE00:  398C0001  addi r12,r12,1
+	   5: GETL       	R12, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547DE04:  55861838  rlwinm r6,r12,3,0,28
+	   9: GETL       	R12, t6
+	  10: SHLL       	$0x3, t6
+	  11: PUTL       	t6, R6
+	  12: INCEIPL       	$4
+
+	0x2547DE08:  7D434A14  add r10,r3,r9
+	  13: GETL       	R3, t8
+	  14: GETL       	R9, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R10
+	  17: INCEIPL       	$4
+
+	0x2547DE0C:  7C685050  subf r3,r8,r10
+	  18: GETL       	R8, t12
+	  19: GETL       	R10, t14
+	  20: SUBL       	t12, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0x2547DE10:  7D062A14  add r8,r6,r5
+	  23: GETL       	R6, t16
+	  24: GETL       	R5, t18
+	  25: ADDL       	t16, t18
+	  26: PUTL       	t18, R8
+	  27: INCEIPL       	$4
+
+	0x2547DE14:  80880004  lwz r4,4(r8)
+	  28: GETL       	R8, t20
+	  29: ADDL       	$0x4, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R4
+	  32: INCEIPL       	$4
+
+	0x2547DE18:  2F040000  cmpi cr6,r4,0
+	  33: GETL       	R4, t24
+	  34: CMP0L       	t24, t26  (-rSo)
+	  35: ICRFL       	t26, $0x6, CR
+	  36: INCEIPL       	$4
+
+	0x2547DE1C:  409AFF60  bc 4,26,0x2547DD7C
+	  37: Jc26o       	$0x2547DD7C
+
+
+
+. 1083 2547DDFC 36
+. 80 6A 02 20 39 8C 00 01 55 86 18 38 7D 43 4A 14 7C 68 50 50 7D 06 2A 14 80 88 00 04 2F 04 00 00 40 9A FF 60
+==== BB 1084 (0x2547DE20) approx BBs exec'd 0 ====
+
+	0x2547DE20:  3BA3069F  addi r29,r3,1695
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x69F, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547DE24:  939B0420  stw r28,1056(r27)
+	   4: GETL       	R28, t2
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x420, t4
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547DE28:  57A50034  rlwinm r5,r29,0,0,26
+	   9: GETL       	R29, t6
+	  10: ANDL       	$0xFFFFFFE0, t6
+	  11: PUTL       	t6, R5
+	  12: INCEIPL       	$4
+
+	0x2547DE2C:  907B041C  stw r3,1052(r27)
+	  13: GETL       	R3, t8
+	  14: GETL       	R27, t10
+	  15: ADDL       	$0x41C, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0x2547DE30:  90BB0418  stw r5,1048(r27)
+	  18: GETL       	R5, t12
+	  19: GETL       	R27, t14
+	  20: ADDL       	$0x418, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x2547DE34:  83810010  lwz r28,16(r1)
+	  23: GETL       	R1, t16
+	  24: ADDL       	$0x10, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R28
+	  27: INCEIPL       	$4
+
+	0x2547DE38:  8361000C  lwz r27,12(r1)
+	  28: GETL       	R1, t20
+	  29: ADDL       	$0xC, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R27
+	  32: INCEIPL       	$4
+
+	0x2547DE3C:  83A10014  lwz r29,20(r1)
+	  33: GETL       	R1, t24
+	  34: ADDL       	$0x14, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R29
+	  37: INCEIPL       	$4
+
+	0x2547DE40:  83C10018  lwz r30,24(r1)
+	  38: GETL       	R1, t28
+	  39: ADDL       	$0x18, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R30
+	  42: INCEIPL       	$4
+
+	0x2547DE44:  83E1001C  lwz r31,28(r1)
+	  43: GETL       	R1, t32
+	  44: ADDL       	$0x1C, t32
+	  45: LDL       	(t32), t34
+	  46: PUTL       	t34, R31
+	  47: INCEIPL       	$4
+
+	0x2547DE48:  38210020  addi r1,r1,32
+	  48: GETL       	R1, t36
+	  49: ADDL       	$0x20, t36
+	  50: PUTL       	t36, R1
+	  51: INCEIPL       	$4
+
+	0x2547DE4C:  4E800020  blr
+	  52: GETL       	LR, t38
+	  53: JMPo-r       	t38  ($4)
+
+
+
+. 1084 2547DE20 48
+. 3B A3 06 9F 93 9B 04 20 57 A5 00 34 90 7B 04 1C 90 BB 04 18 83 81 00 10 83 61 00 0C 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1085 (0x25472978) approx BBs exec'd 0 ====
+
+	0x25472978:  4800B57D  bl 0x2547DEF4
+	   0: MOVL       	$0x2547297C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547DEF4  ($4)
+
+
+
+. 1085 25472978 4
+. 48 00 B5 7D
+==== BB 1086 _dl_allocate_tls_storage(0x2547DEF4) approx BBs exec'd 0 ====
+
+	0x2547DEF4:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DEF8:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x2547DEFC:  48019105  bl 0x25497000
+	   9: MOVL       	$0x2547DF00, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1086 2547DEF4 12
+. 94 21 FF E0 7C 68 02 A6 48 01 91 05
+==== BB 1087 (0x2547DF00) approx BBs exec'd 0 ====
+
+	0x2547DF00:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DF04:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547DF08:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547DF0C:  90610024  stw r3,36(r1)
+	  13: GETL       	R3, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547DF10:  93A10014  stw r29,20(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547DF14:  839E04C8  lwz r28,1224(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x4C8, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0x2547DF18:  9361000C  stw r27,12(r1)
+	  28: GETL       	R27, t22
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0xC, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0x2547DF1C:  807C0420  lwz r3,1056(r28)
+	  33: GETL       	R28, t26
+	  34: ADDL       	$0x420, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R3
+	  37: INCEIPL       	$4
+
+	0x2547DF20:  809C0418  lwz r4,1048(r28)
+	  38: GETL       	R28, t30
+	  39: ADDL       	$0x418, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R4
+	  42: INCEIPL       	$4
+
+	0x2547DF24:  38A3046F  addi r5,r3,1135
+	  43: GETL       	R3, t34
+	  44: ADDL       	$0x46F, t34
+	  45: PUTL       	t34, R5
+	  46: INCEIPL       	$4
+
+	0x2547DF28:  7D2300D0  neg r9,r3
+	  47: GETL       	R3, t36
+	  48: NEGL       	t36
+	  49: PUTL       	t36, R9
+	  50: INCEIPL       	$4
+
+	0x2547DF2C:  7CA04838  and r0,r5,r9
+	  51: GETL       	R5, t38
+	  52: GETL       	R9, t40
+	  53: ANDL       	t38, t40
+	  54: PUTL       	t40, R0
+	  55: INCEIPL       	$4
+
+	0x2547DF30:  93E1001C  stw r31,28(r1)
+	  56: GETL       	R31, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x1C, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x2547DF34:  7FA40214  add r29,r4,r0
+	  61: GETL       	R4, t46
+	  62: GETL       	R0, t48
+	  63: ADDL       	t46, t48
+	  64: PUTL       	t48, R29
+	  65: INCEIPL       	$4
+
+	0x2547DF38:  7FA4EB78  or r4,r29,r29
+	  66: GETL       	R29, t50
+	  67: PUTL       	t50, R4
+	  68: INCEIPL       	$4
+
+	0x2547DF3C:  48019AE1  bl 0x25497A1C
+	  69: MOVL       	$0x2547DF40, t52
+	  70: PUTL       	t52, LR
+	  71: JMPo-c       	$0x25497A1C  ($4)
+
+
+
+. 1087 2547DF00 64
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 90 61 00 24 93 A1 00 14 83 9E 04 C8 93 61 00 0C 80 7C 04 20 80 9C 04 18 38 A3 04 6F 7D 23 00 D0 7C A0 48 38 93 E1 00 1C 7F A4 02 14 7F A4 EB 78 48 01 9A E1
+==== BB 1088 (0x2547DF40) approx BBs exec'd 0 ====
+
+	0x2547DF40:  38A00470  li r5,1136
+	   0: MOVL       	$0x470, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547DF44:  7C7F1B79  or. r31,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R31
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x2547DF48:  38800000  li r4,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x2547DF4C:  7FFBFB78  or r27,r31,r31
+	  11: GETL       	R31, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x2547DF50:  7C1FEA14  add r0,r31,r29
+	  14: GETL       	R31, t10
+	  15: GETL       	R29, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R0
+	  18: INCEIPL       	$4
+
+	0x2547DF54:  4182004C  bc 12,2,0x2547DFA0
+	  19: Js02o       	$0x2547DFA0
+
+
+
+. 1088 2547DF40 24
+. 38 A0 04 70 7C 7F 1B 79 38 80 00 00 7F FB FB 78 7C 1F EA 14 41 82 00 4C
+==== BB 1089 (0x2547DF58) approx BBs exec'd 0 ====
+
+	0x2547DF58:  80FC0418  lwz r7,1048(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x418, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547DF5C:  7FA70050  subf r29,r7,r0
+	   5: GETL       	R7, t4
+	   6: GETL       	R0, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547DF60:  387DFB90  addi r3,r29,-1136
+	  10: GETL       	R29, t8
+	  11: ADDL       	$0xFFFFFB90, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x2547DF64:  480057E1  bl 0x25483744
+	  14: MOVL       	$0x2547DF68, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25483744  ($4)
+
+
+
+. 1089 2547DF58 16
+. 80 FC 04 18 7F A7 00 50 38 7D FB 90 48 00 57 E1
+==== BB 1090 (0x2547DF68) approx BBs exec'd 0 ====
+
+	0x2547DF68:  80DC0408  lwz r6,1032(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547DF6C:  38800004  li r4,4
+	   5: MOVL       	$0x4, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547DF70:  38660010  addi r3,r6,16
+	   8: GETL       	R6, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x2547DF74:  3BE6000E  addi r31,r6,14
+	  12: GETL       	R6, t8
+	  13: ADDL       	$0xE, t8
+	  14: PUTL       	t8, R31
+	  15: INCEIPL       	$4
+
+	0x2547DF78:  48019AB5  bl 0x25497A2C
+	  16: MOVL       	$0x2547DF7C, t10
+	  17: PUTL       	t10, LR
+	  18: JMPo-c       	$0x25497A2C  ($4)
+
+
+
+. 1090 2547DF68 20
+. 80 DC 04 08 38 80 00 04 38 66 00 10 3B E6 00 0E 48 01 9A B5
+==== BB 1091 (0x2547DF7C) approx BBs exec'd 0 ====
+
+	0x2547DF7C:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547DF80:  38030004  addi r0,r3,4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547DF84:  41860044  bc 12,6,0x2547DFC8
+	   8: Js06o       	$0x2547DFC8
+
+
+
+. 1091 2547DF7C 12
+. 2C 83 00 00 38 03 00 04 41 86 00 44
+==== BB 1092 (0x2547DF88) approx BBs exec'd 0 ====
+
+	0x2547DF88:  93E30000  stw r31,0(r3)
+	   0: GETL       	R31, t0
+	   1: GETL       	R3, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547DF8C:  901DFFFC  stw r0,-4(r29)
+	   4: GETL       	R0, t4
+	   5: GETL       	R29, t6
+	   6: ADDL       	$0xFFFFFFFC, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547DF90:  2F9D0000  cmpi cr7,r29,0
+	   9: GETL       	R29, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x2547DF94:  7FBFEB78  or r31,r29,r29
+	  13: GETL       	R29, t12
+	  14: PUTL       	t12, R31
+	  15: INCEIPL       	$4
+
+	0x2547DF98:  7F63DB78  or r3,r27,r27
+	  16: GETL       	R27, t14
+	  17: PUTL       	t14, R3
+	  18: INCEIPL       	$4
+
+	0x2547DF9C:  419E0034  bc 12,30,0x2547DFD0
+	  19: Js30o       	$0x2547DFD0
+
+
+
+. 1092 2547DF88 24
+. 93 E3 00 00 90 1D FF FC 2F 9D 00 00 7F BF EB 78 7F 63 DB 78 41 9E 00 34
+==== BB 1093 (0x2547DFA0) approx BBs exec'd 0 ====
+
+	0x2547DFA0:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547DFA4:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547DFA8:  8361000C  lwz r27,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0x2547DFAC:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x2547DFB0:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547DFB4:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x2547DFB8:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x2547DFBC:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x2547DFC0:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x2547DFC4:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+
+. 1093 2547DFA0 40
+. 81 01 00 24 7F E3 FB 78 83 61 00 0C 83 81 00 10 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1094 (0x2547297C) approx BBs exec'd 0 ====
+
+	0x2547297C:  7C711B79  or. r17,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R17
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25472980:  41820E04  bc 12,2,0x25473784
+	   5: Js02o       	$0x25473784
+
+
+
+. 1094 2547297C 8
+. 7C 71 1B 79 41 82 0E 04
+==== BB 1095 (0x25472984) approx BBs exec'd 0 ====
+
+	0x25472984:  813F0030  lwz r9,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472988:  39540158  addi r10,r20,344
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x158, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547298C:  8311FFFC  lwz r24,-4(r17)
+	   9: GETL       	R17, t6
+	  10: ADDL       	$0xFFFFFFFC, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R24
+	  13: INCEIPL       	$4
+
+	0x25472990:  2F890000  cmpi cr7,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0x25472994:  915F0078  stw r10,120(r31)
+	  18: GETL       	R10, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x78, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25472998:  930E0424  stw r24,1060(r14)
+	  23: GETL       	R24, t18
+	  24: GETL       	R14, t20
+	  25: ADDL       	$0x424, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x2547299C:  409E0E38  bc 4,30,0x254737D4
+	  28: Jc30o       	$0x254737D4
+
+
+
+. 1095 25472984 28
+. 81 3F 00 30 39 54 01 58 83 11 FF FC 2F 89 00 00 91 5F 00 78 93 0E 04 24 40 9E 0E 38
+==== BB 1096 (0x254729A0) approx BBs exec'd 0 ====
+
+	0x254729A0:  8074013C  lwz r3,316(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x13C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254729A4:  2E120000  cmpi cr4,r18,0
+	   5: GETL       	R18, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254729A8:  2F830000  cmpi cr7,r3,0
+	   9: GETL       	R3, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x254729AC:  419E0118  bc 12,30,0x25472AC4
+	  13: Js30o       	$0x25472AC4
+
+
+
+. 1096 254729A0 16
+. 80 74 01 3C 2E 12 00 00 2F 83 00 00 41 9E 01 18
+==== BB 1097 (0x254729B0) approx BBs exec'd 0 ====
+
+	0x254729B0:  826F0050  lwz r19,80(r15)
+	   0: GETL       	R15, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0x254729B4:  2C130000  cmpi cr0,r19,0
+	   5: GETL       	R19, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x254729B8:  4082010C  bc 4,2,0x25472AC4
+	   9: Jc02o       	$0x25472AC4
+
+
+
+. 1097 254729B0 12
+. 82 6F 00 50 2C 13 00 00 40 82 01 0C
+==== BB 1098 (0x254729BC) approx BBs exec'd 0 ====
+
+	0x254729BC:  83940114  lwz r28,276(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x114, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x254729C0:  8314015C  lwz r24,348(r20)
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x15C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0x254729C4:  83A30004  lwz r29,4(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x254729C8:  817C0004  lwz r11,4(r28)
+	  15: GETL       	R28, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R11
+	  19: INCEIPL       	$4
+
+	0x254729CC:  5706103A  rlwinm r6,r24,2,0,29
+	  20: GETL       	R24, t16
+	  21: SHLL       	$0x2, t16
+	  22: PUTL       	t16, R6
+	  23: INCEIPL       	$4
+
+	0x254729D0:  83940158  lwz r28,344(r20)
+	  24: GETL       	R20, t18
+	  25: ADDL       	$0x158, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R28
+	  28: INCEIPL       	$4
+
+	0x254729D4:  7F4BEA14  add r26,r11,r29
+	  29: GETL       	R11, t22
+	  30: GETL       	R29, t24
+	  31: ADDL       	t22, t24
+	  32: PUTL       	t24, R26
+	  33: INCEIPL       	$4
+
+	0x254729D8:  82F40034  lwz r23,52(r20)
+	  34: GETL       	R20, t26
+	  35: ADDL       	$0x34, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R23
+	  38: INCEIPL       	$4
+
+	0x254729DC:  7F66E214  add r27,r6,r28
+	  39: GETL       	R6, t30
+	  40: GETL       	R28, t32
+	  41: ADDL       	t30, t32
+	  42: PUTL       	t32, R27
+	  43: INCEIPL       	$4
+
+	0x254729E0:  7E1AE810  subfc r16,r26,r29
+	  44: GETL       	R26, t34
+	  45: GETL       	R29, t36
+	  46: SBBL       	t34, t36  (-wCa)
+	  47: PUTL       	t36, R16
+	  48: INCEIPL       	$4
+
+	0x254729E4:  7E108110  subfe r16,r16,r16
+	  49: GETL       	R16, t38
+	  50: GETL       	R16, t40
+	  51: SBBL       	t38, t40  (-rCa-wCa)
+	  52: PUTL       	t40, R16
+	  53: INCEIPL       	$4
+
+	0x254729E8:  7E1000D0  neg r16,r16
+	  54: GETL       	R16, t42
+	  55: NEGL       	t42
+	  56: PUTL       	t42, R16
+	  57: INCEIPL       	$4
+
+	0x254729EC:  7EDBE010  subfc r22,r27,r28
+	  58: GETL       	R27, t44
+	  59: GETL       	R28, t46
+	  60: SBBL       	t44, t46  (-wCa)
+	  61: PUTL       	t46, R22
+	  62: INCEIPL       	$4
+
+	0x254729F0:  7ED6B110  subfe r22,r22,r22
+	  63: GETL       	R22, t48
+	  64: GETL       	R22, t50
+	  65: SBBL       	t48, t50  (-rCa-wCa)
+	  66: PUTL       	t50, R22
+	  67: INCEIPL       	$4
+
+	0x254729F4:  7ED600D0  neg r22,r22
+	  68: GETL       	R22, t52
+	  69: NEGL       	t52
+	  70: PUTL       	t52, R22
+	  71: INCEIPL       	$4
+
+	0x254729F8:  83370004  lwz r25,4(r23)
+	  72: GETL       	R23, t54
+	  73: ADDL       	$0x4, t54
+	  74: LDL       	(t54), t56
+	  75: PUTL       	t56, R25
+	  76: INCEIPL       	$4
+
+	0x254729FC:  7EC98039  and. r9,r22,r16
+	  77: GETL       	R22, t58
+	  78: GETL       	R16, t60
+	  79: ANDL       	t58, t60
+	  80: PUTL       	t60, R9
+	  81: CMP0L       	t60, t62  (-rSo)
+	  82: ICRFL       	t62, $0x0, CR
+	  83: INCEIPL       	$4
+
+	0x25472A00:  41820090  bc 12,2,0x25472A90
+	  84: Js02o       	$0x25472A90
+
+
+
+. 1098 254729BC 72
+. 83 94 01 14 83 14 01 5C 83 A3 00 04 81 7C 00 04 57 06 10 3A 83 94 01 58 7F 4B EA 14 82 F4 00 34 7F 66 E2 14 7E 1A E8 10 7E 10 81 10 7E 10 00 D0 7E DB E0 10 7E D6 B1 10 7E D6 00 D0 83 37 00 04 7E C9 80 39 41 82 00 90
+==== BB 1099 (0x25472A04) approx BBs exec'd 0 ====
+
+	0x25472A04:  809C0000  lwz r4,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25472A08:  7C84A000  cmp cr1,r4,r20
+	   4: GETL       	R4, t4
+	   5: GETL       	R20, t6
+	   6: CMPL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25472A0C:  41860060  bc 12,6,0x25472A6C
+	   9: Js06o       	$0x25472A6C
+
+
+
+. 1099 25472A04 12
+. 80 9C 00 00 7C 84 A0 00 41 86 00 60
+==== BB 1100 (0x25472A6C) approx BBs exec'd 0 ====
+
+	0x25472A6C:  3B9C0004  addi r28,r28,4
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x25472A70:  7C9AE810  subfc r4,r26,r29
+	   4: GETL       	R26, t2
+	   5: GETL       	R29, t4
+	   6: SBBL       	t2, t4  (-wCa)
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25472A74:  7C842110  subfe r4,r4,r4
+	   9: GETL       	R4, t6
+	  10: GETL       	R4, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25472A78:  7C8400D0  neg r4,r4
+	  14: GETL       	R4, t10
+	  15: NEGL       	t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0x25472A7C:  7CBBE010  subfc r5,r27,r28
+	  18: GETL       	R27, t12
+	  19: GETL       	R28, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x25472A80:  7CA52910  subfe r5,r5,r5
+	  23: GETL       	R5, t16
+	  24: GETL       	R5, t18
+	  25: SBBL       	t16, t18  (-rCa-wCa)
+	  26: PUTL       	t18, R5
+	  27: INCEIPL       	$4
+
+	0x25472A84:  7CA500D0  neg r5,r5
+	  28: GETL       	R5, t20
+	  29: NEGL       	t20
+	  30: PUTL       	t20, R5
+	  31: INCEIPL       	$4
+
+	0x25472A88:  7CAA2039  and. r10,r5,r4
+	  32: GETL       	R5, t22
+	  33: GETL       	R4, t24
+	  34: ANDL       	t22, t24
+	  35: PUTL       	t24, R10
+	  36: CMP0L       	t24, t26  (-rSo)
+	  37: ICRFL       	t26, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0x25472A8C:  4082FF78  bc 4,2,0x25472A04
+	  39: Jc02o       	$0x25472A04
+
+
+
+. 1100 25472A6C 36
+. 3B 9C 00 04 7C 9A E8 10 7C 84 21 10 7C 84 00 D0 7C BB E0 10 7C A5 29 10 7C A5 00 D0 7C AA 20 39 40 82 FF 78
+==== BB 1101 (0x25472A10) approx BBs exec'd 0 ====
+
+	0x25472A10:  81040000  lwz r8,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x25472A14:  2E080000  cmpi cr4,r8,0
+	   4: GETL       	R8, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x25472A18:  40920078  bc 4,18,0x25472A90
+	   8: Jc18o       	$0x25472A90
+
+
+
+. 1101 25472A10 12
+. 81 04 00 00 2E 08 00 00 40 92 00 78
+==== BB 1102 (0x25472A90) approx BBs exec'd 0 ====
+
+	0x25472A90:  7F80DA78  xor r0,r28,r27
+	   0: GETL       	R28, t0
+	   1: GETL       	R27, t2
+	   2: XORL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25472A94:  21600000  subfic r11,r0,0
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0x0, t6
+	   7: SBBL       	t4, t6  (-wCa)
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25472A98:  7C0B0114  adde r0,r11,r0
+	  10: GETL       	R11, t8
+	  11: GETL       	R0, t10
+	  12: ADCL       	t8, t10  (-rCa-wCa)
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25472A9C:  7FACD278  xor r12,r29,r26
+	  15: GETL       	R29, t12
+	  16: GETL       	R26, t14
+	  17: XORL       	t12, t14
+	  18: PUTL       	t14, R12
+	  19: INCEIPL       	$4
+
+	0x25472AA0:  214C0000  subfic r10,r12,0
+	  20: GETL       	R12, t16
+	  21: MOVL       	$0x0, t18
+	  22: SBBL       	t16, t18  (-wCa)
+	  23: PUTL       	t18, R10
+	  24: INCEIPL       	$4
+
+	0x25472AA4:  7D8A6114  adde r12,r10,r12
+	  25: GETL       	R10, t20
+	  26: GETL       	R12, t22
+	  27: ADCL       	t20, t22  (-rCa-wCa)
+	  28: PUTL       	t22, R12
+	  29: INCEIPL       	$4
+
+	0x25472AA8:  7C0B6039  and. r11,r0,r12
+	  30: GETL       	R0, t24
+	  31: GETL       	R12, t26
+	  32: ANDL       	t24, t26
+	  33: PUTL       	t26, R11
+	  34: CMP0L       	t26, t28  (-rSo)
+	  35: ICRFL       	t28, $0x0, CR
+	  36: INCEIPL       	$4
+
+	0x25472AAC:  41820008  bc 12,2,0x25472AB4
+	  37: Js02o       	$0x25472AB4
+
+
+
+. 1102 25472A90 32
+. 7F 80 DA 78 21 60 00 00 7C 0B 01 14 7F AC D2 78 21 4C 00 00 7D 8A 61 14 7C 0B 60 39 41 82 00 08
+==== BB 1103 (0x25472AB4) approx BBs exec'd 0 ====
+
+	0x25472AB4:  83AF0000  lwz r29,0(r15)
+	   0: GETL       	R15, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x25472AB8:  2E120000  cmpi cr4,r18,0
+	   4: GETL       	R18, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x25472ABC:  73A90001  andi. r9,r29,0x1
+	   8: GETL       	R29, t8
+	   9: ANDL       	$0x1, t8
+	  10: PUTL       	t8, R9
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x25472AC0:  40821240  bc 4,2,0x25473D00
+	  14: Jc02o       	$0x25473D00
+
+
+
+. 1103 25472AB4 16
+. 83 AF 00 00 2E 12 00 00 73 A9 00 01 40 82 12 40
+==== BB 1104 (0x25472AC4) approx BBs exec'd 0 ====
+
+	0x25472AC4:  806E01B8  lwz r3,440(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1B8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25472AC8:  4800947D  bl 0x2547BF44
+	   5: MOVL       	$0x25472ACC, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0x2547BF44  ($4)
+
+
+
+. 1104 25472AC4 8
+. 80 6E 01 B8 48 00 94 7D
+==== BB 1105 _dl_debug_initialize(0x2547BF44) approx BBs exec'd 0 ====
+
+	0x2547BF44:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547BF48:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547BF4C:  4801B0B5  bl 0x25497000
+	   9: MOVL       	$0x2547BF50, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1105 2547BF44 12
+. 94 21 FF F0 7D 88 02 A6 48 01 B0 B5
+==== BB 1106 (0x2547BF50) approx BBs exec'd 0 ====
+
+	0x2547BF50:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547BF54:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547BF58:  7C681B78  or r8,r3,r3
+	   8: GETL       	R3, t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0x2547BF5C:  7D8803A6  mtlr r12
+	  11: GETL       	R12, t8
+	  12: PUTL       	t8, LR
+	  13: INCEIPL       	$4
+
+	0x2547BF60:  815E0518  lwz r10,1304(r30)
+	  14: GETL       	R30, t10
+	  15: ADDL       	$0x518, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547BF64:  800A0008  lwz r0,8(r10)
+	  19: GETL       	R10, t14
+	  20: ADDL       	$0x8, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R0
+	  23: INCEIPL       	$4
+
+	0x2547BF68:  7D435378  or r3,r10,r10
+	  24: GETL       	R10, t18
+	  25: PUTL       	t18, R3
+	  26: INCEIPL       	$4
+
+	0x2547BF6C:  2F800000  cmpi cr7,r0,0
+	  27: GETL       	R0, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0x2547BF70:  409E0024  bc 4,30,0x2547BF94
+	  31: Jc30o       	$0x2547BF94
+
+
+
+. 1106 2547BF50 36
+. 93 C1 00 08 7F C8 02 A6 7C 68 1B 78 7D 88 03 A6 81 5E 05 18 80 0A 00 08 7D 43 53 78 2F 80 00 00 40 9E 00 24
+==== BB 1107 (0x2547BF74) approx BBs exec'd 0 ====
+
+	0x2547BF74:  813E04C8  lwz r9,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BF78:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547BF7C:  809E0508  lwz r4,1288(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x508, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2547BF80:  81690000  lwz r11,0(r9)
+	  13: GETL       	R9, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R11
+	  16: INCEIPL       	$4
+
+	0x2547BF84:  90AA0000  stw r5,0(r10)
+	  17: GETL       	R5, t14
+	  18: GETL       	R10, t16
+	  19: STL       	t14, (t16)
+	  20: INCEIPL       	$4
+
+	0x2547BF88:  910A0010  stw r8,16(r10)
+	  21: GETL       	R8, t18
+	  22: GETL       	R10, t20
+	  23: ADDL       	$0x10, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0x2547BF8C:  916A0004  stw r11,4(r10)
+	  26: GETL       	R11, t22
+	  27: GETL       	R10, t24
+	  28: ADDL       	$0x4, t24
+	  29: STL       	t22, (t24)
+	  30: INCEIPL       	$4
+
+	0x2547BF90:  908A0008  stw r4,8(r10)
+	  31: GETL       	R4, t26
+	  32: GETL       	R10, t28
+	  33: ADDL       	$0x8, t28
+	  34: STL       	t26, (t28)
+	  35: INCEIPL       	$4
+
+	0x2547BF94:  83C10008  lwz r30,8(r1)
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x8, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R30
+	  40: INCEIPL       	$4
+
+	0x2547BF98:  38210010  addi r1,r1,16
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x10, t34
+	  43: PUTL       	t34, R1
+	  44: INCEIPL       	$4
+
+	0x2547BF9C:  4E800020  blr
+	  45: GETL       	LR, t36
+	  46: JMPo-r       	t36  ($4)
+
+
+
+. 1107 2547BF74 44
+. 81 3E 04 C8 38 A0 00 01 80 9E 05 08 81 69 00 00 90 AA 00 00 91 0A 00 10 91 6A 00 04 90 8A 00 08 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1108 (0x25472ACC) approx BBs exec'd 0 ====
+
+	0x25472ACC:  81340074  lwz r9,116(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472AD0:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x25472AD4:  2F090000  cmpi cr6,r9,0
+	   8: GETL       	R9, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25472AD8:  419A0008  bc 12,26,0x25472AE0
+	  12: Js26o       	$0x25472AE0
+
+
+
+. 1108 25472ACC 16
+. 81 34 00 74 7C 7A 1B 78 2F 09 00 00 41 9A 00 08
+==== BB 1109 (0x25472ADC) approx BBs exec'd 0 ====
+
+	0x25472ADC:  90690004  stw r3,4(r9)
+	   0: GETL       	R3, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472AE0:  812E022C  lwz r9,556(r14)
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x22C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25472AE4:  2F890000  cmpi cr7,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25472AE8:  419E0008  bc 12,30,0x25472AF0
+	  14: Js30o       	$0x25472AF0
+
+
+
+. 1109 25472ADC 16
+. 90 69 00 04 81 2E 02 2C 2F 89 00 00 41 9E 00 08
+==== BB 1110 (0x25472AF0) approx BBs exec'd 0 ====
+
+	0x25472AF0:  825F0078  lwz r18,120(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x78, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x25472AF4:  C8140158  lfd f0,344(r20)
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x158, t4
+	   7: FPU_RQ       	(t4), 0x0:0x0
+	   8: INCEIPL       	$4
+
+	0x25472AF8:  924E0008  stw r18,8(r14)
+	   9: GETL       	R18, t6
+	  10: GETL       	R14, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x25472AFC:  D80F0014  stfd f0,20(r15)
+	  14: GETL       	R15, t10
+	  15: ADDL       	$0x14, t10
+	  16: FPU_WQ       	0x0:0x0, (t10)
+	  17: INCEIPL       	$4
+
+	0x25472B00:  924E0010  stw r18,16(r14)
+	  18: GETL       	R18, t12
+	  19: GETL       	R14, t14
+	  20: ADDL       	$0x10, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x25472B04:  41920844  bc 12,18,0x25473348
+	  23: Js18o       	$0x25473348
+
+
+
+. 1110 25472AF0 24
+. 82 5F 00 78 C8 14 01 58 92 4E 00 08 D8 0F 00 14 92 4E 00 10 41 92 08 44
+==== BB 1111 (0x25473348) approx BBs exec'd 0 ====
+
+	0x25473348:  8174000C  lwz r11,12(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547334C:  7E9DA378  or r29,r20,r20
+	   5: GETL       	R20, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25473350:  820F0050  lwz r16,80(r15)
+	   8: GETL       	R15, t6
+	   9: ADDL       	$0x50, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R16
+	  12: INCEIPL       	$4
+
+	0x25473354:  2C0B0000  cmpi cr0,r11,0
+	  13: GETL       	R11, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x25473358:  82EF0028  lwz r23,40(r15)
+	  17: GETL       	R15, t14
+	  18: ADDL       	$0x28, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R23
+	  21: INCEIPL       	$4
+
+	0x2547335C:  3150FFFF  addic r10,r16,-1
+	  22: GETL       	R16, t18
+	  23: ADCL       	$0xFFFFFFFF, t18  (-wCa)
+	  24: PUTL       	t18, R10
+	  25: INCEIPL       	$4
+
+	0x25473360:  7F6A8110  subfe r27,r10,r16
+	  26: GETL       	R10, t20
+	  27: GETL       	R16, t22
+	  28: SBBL       	t20, t22  (-rCa-wCa)
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0x25473364:  7EF3DB78  or r19,r23,r27
+	  31: GETL       	R23, t24
+	  32: GETL       	R27, t26
+	  33: ORL       	t26, t24
+	  34: PUTL       	t24, R19
+	  35: INCEIPL       	$4
+
+	0x25473368:  926F0028  stw r19,40(r15)
+	  36: GETL       	R19, t28
+	  37: GETL       	R15, t30
+	  38: ADDL       	$0x28, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0x2547336C:  41820018  bc 12,2,0x25473384
+	  41: Js02o       	$0x25473384
+
+
+
+. 1111 25473348 40
+. 81 74 00 0C 7E 9D A3 78 82 0F 00 50 2C 0B 00 00 82 EF 00 28 31 50 FF FF 7F 6A 81 10 7E F3 DB 78 92 6F 00 28 41 82 00 18
+==== BB 1112 (0x25473370) approx BBs exec'd 0 ====
+
+	0x25473370:  7D605B78  or r0,r11,r11
+	   0: GETL       	R11, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25473374:  7C1D0378  or r29,r0,r0
+	   3: GETL       	R0, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x25473378:  801D000C  lwz r0,12(r29)
+	   6: GETL       	R29, t4
+	   7: ADDL       	$0xC, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x2547337C:  2F000000  cmpi cr6,r0,0
+	  11: GETL       	R0, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x25473380:  409AFFF4  bc 4,26,0x25473374
+	  15: Jc26o       	$0x25473374
+
+
+
+. 1112 25473370 20
+. 7D 60 5B 78 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+==== BB 1113 (0x25473374) approx BBs exec'd 0 ====
+
+	0x25473374:  7C1D0378  or r29,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0x25473378:  801D000C  lwz r0,12(r29)
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547337C:  2F000000  cmpi cr6,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25473380:  409AFFF4  bc 4,26,0x25473374
+	  12: Jc26o       	$0x25473374
+
+
+
+. 1113 25473374 16
+. 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+==== BB 1114 (0x25473384) approx BBs exec'd 0 ====
+
+	0x25473384:  82DD001C  lwz r22,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R22
+	   4: INCEIPL       	$4
+
+	0x25473388:  81360004  lwz r9,4(r22)
+	   5: GETL       	R22, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2547338C:  2F890000  cmpi cr7,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25473390:  409E0420  bc 4,30,0x254737B0
+	  14: Jc30o       	$0x254737B0
+
+
+
+. 1114 25473384 16
+. 82 DD 00 1C 81 36 00 04 2F 89 00 00 40 9E 04 20
+==== BB 1115 (0x25473394) approx BBs exec'd 0 ====
+
+	0x25473394:  3B8E01B8  addi r28,r14,440
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1B8, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x25473398:  7D9CE800  cmp cr3,r28,r29
+	   4: GETL       	R28, t2
+	   5: GETL       	R29, t4
+	   6: CMPL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x3, CR
+	   8: INCEIPL       	$4
+
+	0x2547339C:  418E0018  bc 12,14,0x254733B4
+	   9: Js14o       	$0x254733B4
+
+
+
+. 1115 25473394 12
+. 3B 8E 01 B8 7D 9C E8 00 41 8E 00 18
+==== BB 1116 (0x254733A0) approx BBs exec'd 0 ====
+
+	0x254733A0:  809D01C0  lwz r4,448(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254733A4:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254733A8:  80AF0028  lwz r5,40(r15)
+	   8: GETL       	R15, t6
+	   9: ADDL       	$0x28, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x254733AC:  7F66DB78  or r6,r27,r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x254733B0:  48006211  bl 0x254795C0
+	  16: MOVL       	$0x254733B4, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0x254795C0  ($4)
+
+
+
+. 1116 254733A0 20
+. 80 9D 01 C0 7F A3 EB 78 80 AF 00 28 7F 66 DB 78 48 00 62 11
+==== BB 1117 _dl_relocate_object(0x254795C0) approx BBs exec'd 0 ====
+
+	0x254795C0:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254795C4:  9421FF50  stwu r1,-176(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF50, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x254795C8:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x254795CC:  93E100AC  stw r31,172(r1)
+	  12: GETL       	R31, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xAC, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x254795D0:  4801DA31  bl 0x25497000
+	  17: MOVL       	$0x254795D4, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1117 254795C0 20
+. 7C E8 02 A6 94 21 FF 50 7D 80 00 26 93 E1 00 AC 48 01 DA 31
+==== BB 1118 (0x254795D4) approx BBs exec'd 0 ====
+
+	0x254795D4:  7C3F0B78  or r31,r1,r1
+	   0: GETL       	R1, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x254795D8:  90E100B4  stw r7,180(r1)
+	   3: GETL       	R7, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xB4, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254795DC:  80030180  lwz r0,384(r3)
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x180, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x254795E0:  92210074  stw r17,116(r1)
+	  13: GETL       	R17, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x74, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254795E4:  3A200000  li r17,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R17
+	  20: INCEIPL       	$4
+
+	0x254795E8:  74082000  andis. r8,r0,0x2000
+	  21: GETL       	R0, t16
+	  22: ANDL       	$0x20000000, t16
+	  23: PUTL       	t16, R8
+	  24: CMP0L       	t16, t18  (-rSo)
+	  25: ICRFL       	t18, $0x0, CR
+	  26: INCEIPL       	$4
+
+	0x254795EC:  9361009C  stw r27,156(r1)
+	  27: GETL       	R27, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x9C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x254795F0:  938100A0  stw r28,160(r1)
+	  32: GETL       	R28, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0xA0, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x254795F4:  7CDB3378  or r27,r6,r6
+	  37: GETL       	R6, t28
+	  38: PUTL       	t28, R27
+	  39: INCEIPL       	$4
+
+	0x254795F8:  93C100A8  stw r30,168(r1)
+	  40: GETL       	R30, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0xA8, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x254795FC:  7CBC2B78  or r28,r5,r5
+	  45: GETL       	R5, t34
+	  46: PUTL       	t34, R28
+	  47: INCEIPL       	$4
+
+	0x25479600:  92010070  stw r16,112(r1)
+	  48: GETL       	R16, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x70, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25479604:  7FC802A6  mflr r30
+	  53: GETL       	LR, t40
+	  54: PUTL       	t40, R30
+	  55: INCEIPL       	$4
+
+	0x25479608:  92410078  stw r18,120(r1)
+	  56: GETL       	R18, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x78, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x2547960C:  9261007C  stw r19,124(r1)
+	  61: GETL       	R19, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x7C, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0x25479610:  92810080  stw r20,128(r1)
+	  66: GETL       	R20, t50
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x80, t52
+	  69: STL       	t50, (t52)
+	  70: INCEIPL       	$4
+
+	0x25479614:  92A10084  stw r21,132(r1)
+	  71: GETL       	R21, t54
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x84, t56
+	  74: STL       	t54, (t56)
+	  75: INCEIPL       	$4
+
+	0x25479618:  92C10088  stw r22,136(r1)
+	  76: GETL       	R22, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x88, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0x2547961C:  92E1008C  stw r23,140(r1)
+	  81: GETL       	R23, t62
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x8C, t64
+	  84: STL       	t62, (t64)
+	  85: INCEIPL       	$4
+
+	0x25479620:  93010090  stw r24,144(r1)
+	  86: GETL       	R24, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x90, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0x25479624:  93210094  stw r25,148(r1)
+	  91: GETL       	R25, t70
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x94, t72
+	  94: STL       	t70, (t72)
+	  95: INCEIPL       	$4
+
+	0x25479628:  93410098  stw r26,152(r1)
+	  96: GETL       	R26, t74
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x98, t76
+	  99: STL       	t74, (t76)
+	 100: INCEIPL       	$4
+
+	0x2547962C:  93A100A4  stw r29,164(r1)
+	 101: GETL       	R29, t78
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0xA4, t80
+	 104: STL       	t78, (t80)
+	 105: INCEIPL       	$4
+
+	0x25479630:  9181006C  stw r12,108(r1)
+	 106: GETL       	R12, t82
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x6C, t84
+	 109: STL       	t82, (t84)
+	 110: INCEIPL       	$4
+
+	0x25479634:  909F0010  stw r4,16(r31)
+	 111: GETL       	R4, t86
+	 112: GETL       	R31, t88
+	 113: ADDL       	$0x10, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x25479638:  907F000C  stw r3,12(r31)
+	 116: GETL       	R3, t90
+	 117: GETL       	R31, t92
+	 118: ADDL       	$0xC, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x2547963C:  40820118  bc 4,2,0x25479754
+	 121: Jc02o       	$0x25479754
+
+
+
+. 1118 254795D4 108
+. 7C 3F 0B 78 90 E1 00 B4 80 03 01 80 92 21 00 74 3A 20 00 00 74 08 20 00 93 61 00 9C 93 81 00 A0 7C DB 33 78 93 C1 00 A8 7C BC 2B 78 92 01 00 70 7F C8 02 A6 92 41 00 78 92 61 00 7C 92 81 00 80 92 A1 00 84 92 C1 00 88 92 E1 00 8C 93 01 00 90 93 21 00 94 93 41 00 98 93 A1 00 A4 91 81 00 6C 90 9F 00 10 90 7F 00 0C 40 82 01 18
+==== BB 1119 (0x25479640) approx BBs exec'd 0 ====
+
+	0x25479640:  2D860000  cmpi cr3,r6,0
+	   0: GETL       	R6, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x3, CR
+	   3: INCEIPL       	$4
+
+	0x25479644:  418E016C  bc 12,14,0x254797B0
+	   4: Js14o       	$0x254797B0
+
+
+
+. 1119 25479640 8
+. 2D 86 00 00 41 8E 01 6C
+==== BB 1120 (0x254797B0) approx BBs exec'd 0 ====
+
+	0x254797B0:  80A30080  lwz r5,128(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x80, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254797B4:  3085FFFF  addic r4,r5,-1
+	   5: GETL       	R5, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x254797B8:  7C842110  subfe r4,r4,r4
+	   9: GETL       	R4, t6
+	  10: GETL       	R4, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x254797BC:  7F9C2038  and r28,r28,r4
+	  14: GETL       	R28, t10
+	  15: GETL       	R4, t12
+	  16: ANDL       	t10, t12
+	  17: PUTL       	t12, R28
+	  18: INCEIPL       	$4
+
+	0x254797C0:  4BFFFE88  b 0x25479648
+	  19: JMPo       	$0x25479648  ($4)
+
+
+
+. 1120 254797B0 20
+. 80 A3 00 80 30 85 FF FF 7C 84 21 10 7F 9C 20 38 4B FF FE 88
+==== BB 1121 (0x25479648) approx BBs exec'd 0 ====
+
+	0x25479648:  835E04F4  lwz r26,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547964C:  80DA0000  lwz r6,0(r26)
+	   5: GETL       	R26, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x25479650:  70C90020  andi. r9,r6,0x20
+	   9: GETL       	R6, t8
+	  10: ANDL       	$0x20, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25479654:  408207B4  bc 4,2,0x25479E08
+	  15: Jc02o       	$0x25479E08
+
+
+
+. 1121 25479648 16
+. 83 5E 04 F4 80 DA 00 00 70 C9 00 20 40 82 07 B4
+==== BB 1122 (0x25479658) approx BBs exec'd 0 ====
+
+	0x25479658:  807F000C  lwz r3,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547965C:  81430078  lwz r10,120(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x78, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25479660:  2E0A0000  cmpi cr4,r10,0
+	  10: GETL       	R10, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x4, CR
+	  13: INCEIPL       	$4
+
+	0x25479664:  409206C0  bc 4,18,0x25479D24
+	  14: Jc18o       	$0x25479D24
+
+
+
+. 1122 25479658 16
+. 80 7F 00 0C 81 43 00 78 2E 0A 00 00 40 92 06 C0
+==== BB 1123 (0x25479668) approx BBs exec'd 0 ====
+
+	0x25479668:  80C30034  lwz r6,52(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547966C:  7F84E378  or r4,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25479670:  7F65DB78  or r5,r27,r27
+	   8: GETL       	R27, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25479674:  3B800001  li r28,1
+	  11: MOVL       	$0x1, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x25479678:  83A60004  lwz r29,4(r6)
+	  14: GETL       	R6, t10
+	  15: ADDL       	$0x4, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x2547967C:  3B600000  li r27,0
+	  19: MOVL       	$0x0, t14
+	  20: PUTL       	t14, R27
+	  21: INCEIPL       	$4
+
+	0x25479680:  93BF0008  stw r29,8(r31)
+	  22: GETL       	R29, t16
+	  23: GETL       	R31, t18
+	  24: ADDL       	$0x8, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25479684:  480071AD  bl 0x25480830
+	  27: MOVL       	$0x25479688, t20
+	  28: PUTL       	t20, LR
+	  29: JMPo-c       	$0x25480830  ($4)
+
+
+
+. 1123 25479668 32
+. 80 C3 00 34 7F 84 E3 78 7F 65 DB 78 3B 80 00 01 83 A6 00 04 3B 60 00 00 93 BF 00 08 48 00 71 AD
+==== BB 1124 (0x25480904) approx BBs exec'd 0 ====
+
+	0x25480904:  2F850000  cmpi cr7,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25480908:  3B9F0018  addi r28,r31,24
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2548090C:  419E0264  bc 12,30,0x25480B70
+	   8: Js30o       	$0x25480B70
+
+
+
+. 1124 25480904 12
+. 2F 85 00 00 3B 9F 00 18 41 9E 02 64
+==== BB 1125 (0x25480B70) approx BBs exec'd 0 ====
+
+	0x25480B70:  831E04CC  lwz r24,1228(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4CC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x25480B74:  4BFFFDA0  b 0x25480914
+	   5: JMPo       	$0x25480914  ($4)
+
+
+
+. 1125 25480B70 8
+. 83 1E 04 CC 4B FF FD A0
+==== BB 1126 (0x25480914) approx BBs exec'd 0 ====
+
+	0x25480914:  409E02C0  bc 4,30,0x25480BD4
+	   0: Jc30o       	$0x25480BD4
+
+
+
+. 1126 25480914 4
+. 40 9E 02 C0
+==== BB 1127 (0x25480918) approx BBs exec'd 0 ====
+
+	0x25480918:  3C98FE00  addis r4,r24,-512
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0xFE000000, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x2548091C:  3C00FC00  lis r0,-1024
+	   4: MOVL       	$0xFC000000, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0x25480920:  38640003  addi r3,r4,3
+	   7: GETL       	R4, t4
+	   8: ADDL       	$0x3, t4
+	   9: PUTL       	t4, R3
+	  10: INCEIPL       	$4
+
+	0x25480924:  60060002  ori r6,r0,0x2
+	  11: MOVL       	$0xFC000002, t6
+	  12: PUTL       	t6, R6
+	  13: INCEIPL       	$4
+
+	0x25480928:  39800000  li r12,0
+	  14: MOVL       	$0x0, t8
+	  15: PUTL       	t8, R12
+	  16: INCEIPL       	$4
+
+	0x2548092C:  7F033040  cmpl cr6,r3,r6
+	  17: GETL       	R3, t10
+	  18: GETL       	R6, t12
+	  19: CMPUL       	t10, t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x25480930:  618A8000  ori r10,r12,0x8000
+	  22: MOVL       	$0x8000, t16
+	  23: PUTL       	t16, R10
+	  24: INCEIPL       	$4
+
+	0x25480934:  7CBA00D0  neg r5,r26
+	  25: GETL       	R26, t18
+	  26: NEGL       	t18
+	  27: PUTL       	t18, R5
+	  28: INCEIPL       	$4
+
+	0x25480938:  7D1A5050  subf r8,r26,r10
+	  29: GETL       	R26, t20
+	  30: GETL       	R10, t22
+	  31: SUBL       	t20, t22
+	  32: PUTL       	t22, R8
+	  33: INCEIPL       	$4
+
+	0x2548093C:  54A9043E  rlwinm r9,r5,0,16,31
+	  34: GETL       	R5, t24
+	  35: ANDL       	$0xFFFF, t24
+	  36: PUTL       	t24, R9
+	  37: INCEIPL       	$4
+
+	0x25480940:  5507843E  rlwinm r7,r8,16,16,31
+	  38: GETL       	R8, t26
+	  39: SHRL       	$0x10, t26
+	  40: PUTL       	t26, R7
+	  41: INCEIPL       	$4
+
+	0x25480944:  3D60556C  lis r11,21868
+	  42: MOVL       	$0x556C0000, t28
+	  43: PUTL       	t28, R11
+	  44: INCEIPL       	$4
+
+	0x25480948:  3C807D6C  lis r4,32108
+	  45: MOVL       	$0x7D6C0000, t30
+	  46: PUTL       	t30, R4
+	  47: INCEIPL       	$4
+
+	0x2548094C:  64E03D6B  oris r0,r7,0x3D6B
+	  48: GETL       	R7, t32
+	  49: ORL       	$0x3D6B0000, t32
+	  50: PUTL       	t32, R0
+	  51: INCEIPL       	$4
+
+	0x25480950:  6523396B  oris r3,r9,0x396B
+	  52: GETL       	R9, t34
+	  53: ORL       	$0x396B0000, t34
+	  54: PUTL       	t34, R3
+	  55: INCEIPL       	$4
+
+	0x25480954:  6166083C  ori r6,r11,0x83C
+	  56: MOVL       	$0x556C083C, t36
+	  57: PUTL       	t36, R6
+	  58: INCEIPL       	$4
+
+	0x25480958:  608C5A14  ori r12,r4,0x5A14
+	  59: MOVL       	$0x7D6C5A14, t38
+	  60: PUTL       	t38, R12
+	  61: INCEIPL       	$4
+
+	0x2548095C:  901C0000  stw r0,0(r28)
+	  62: GETL       	R0, t40
+	  63: GETL       	R28, t42
+	  64: STL       	t40, (t42)
+	  65: INCEIPL       	$4
+
+	0x25480960:  907C0004  stw r3,4(r28)
+	  66: GETL       	R3, t44
+	  67: GETL       	R28, t46
+	  68: ADDL       	$0x4, t46
+	  69: STL       	t44, (t46)
+	  70: INCEIPL       	$4
+
+	0x25480964:  90DC0008  stw r6,8(r28)
+	  71: GETL       	R6, t48
+	  72: GETL       	R28, t50
+	  73: ADDL       	$0x8, t50
+	  74: STL       	t48, (t50)
+	  75: INCEIPL       	$4
+
+	0x25480968:  919C000C  stw r12,12(r28)
+	  76: GETL       	R12, t52
+	  77: GETL       	R28, t54
+	  78: ADDL       	$0xC, t54
+	  79: STL       	t52, (t54)
+	  80: INCEIPL       	$4
+
+	0x2548096C:  4099020C  bc 4,25,0x25480B78
+	  81: Jc25o       	$0x25480B78
+
+
+
+. 1127 25480918 88
+. 3C 98 FE 00 3C 00 FC 00 38 64 00 03 60 06 00 02 39 80 00 00 7F 03 30 40 61 8A 80 00 7C BA 00 D0 7D 1A 50 50 54 A9 04 3E 55 07 84 3E 3D 60 55 6C 3C 80 7D 6C 64 E0 3D 6B 65 23 39 6B 61 66 08 3C 60 8C 5A 14 90 1C 00 00 90 7C 00 04 90 DC 00 08 91 9C 00 0C 40 99 02 0C
+==== BB 1128 (0x25480B78) approx BBs exec'd 0 ====
+
+	0x25480B78:  3C980001  addis r4,r24,1
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x10000, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25480B7C:  3C7D0001  addis r3,r29,1
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x10000, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25480B80:  38C48000  addi r6,r4,-32768
+	   8: GETL       	R4, t4
+	   9: ADDL       	$0xFFFF8000, t4
+	  10: PUTL       	t4, R6
+	  11: INCEIPL       	$4
+
+	0x25480B84:  39838000  addi r12,r3,-32768
+	  12: GETL       	R3, t6
+	  13: ADDL       	$0xFFFF8000, t6
+	  14: PUTL       	t6, R12
+	  15: INCEIPL       	$4
+
+	0x25480B88:  54C9843E  rlwinm r9,r6,16,16,31
+	  16: GETL       	R6, t8
+	  17: SHRL       	$0x10, t8
+	  18: PUTL       	t8, R9
+	  19: INCEIPL       	$4
+
+	0x25480B8C:  57A7043E  rlwinm r7,r29,0,16,31
+	  20: GETL       	R29, t10
+	  21: ANDL       	$0xFFFF, t10
+	  22: PUTL       	t10, R7
+	  23: INCEIPL       	$4
+
+	0x25480B90:  5708043E  rlwinm r8,r24,0,16,31
+	  24: GETL       	R24, t12
+	  25: ANDL       	$0xFFFF, t12
+	  26: PUTL       	t12, R8
+	  27: INCEIPL       	$4
+
+	0x25480B94:  558B843E  rlwinm r11,r12,16,16,31
+	  28: GETL       	R12, t14
+	  29: SHRL       	$0x10, t14
+	  30: PUTL       	t14, R11
+	  31: INCEIPL       	$4
+
+	0x25480B98:  3D407D89  lis r10,32137
+	  32: MOVL       	$0x7D890000, t16
+	  33: PUTL       	t16, R10
+	  34: INCEIPL       	$4
+
+	0x25480B9C:  3CA04E80  lis r5,20096
+	  35: MOVL       	$0x4E800000, t18
+	  36: PUTL       	t18, R5
+	  37: INCEIPL       	$4
+
+	0x25480BA0:  65183980  oris r24,r8,0x3980
+	  38: GETL       	R8, t20
+	  39: ORL       	$0x39800000, t20
+	  40: PUTL       	t20, R24
+	  41: INCEIPL       	$4
+
+	0x25480BA4:  653D3D8C  oris r29,r9,0x3D8C
+	  42: GETL       	R9, t22
+	  43: ORL       	$0x3D8C0000, t22
+	  44: PUTL       	t22, R29
+	  45: INCEIPL       	$4
+
+	0x25480BA8:  614403A6  ori r4,r10,0x3A6
+	  46: MOVL       	$0x7D8903A6, t24
+	  47: PUTL       	t24, R4
+	  48: INCEIPL       	$4
+
+	0x25480BAC:  64E33980  oris r3,r7,0x3980
+	  49: GETL       	R7, t26
+	  50: ORL       	$0x39800000, t26
+	  51: PUTL       	t26, R3
+	  52: INCEIPL       	$4
+
+	0x25480BB0:  65663D8C  oris r6,r11,0x3D8C
+	  53: GETL       	R11, t28
+	  54: ORL       	$0x3D8C0000, t28
+	  55: PUTL       	t28, R6
+	  56: INCEIPL       	$4
+
+	0x25480BB4:  60A00420  ori r0,r5,0x420
+	  57: MOVL       	$0x4E800420, t30
+	  58: PUTL       	t30, R0
+	  59: INCEIPL       	$4
+
+	0x25480BB8:  901C0024  stw r0,36(r28)
+	  60: GETL       	R0, t32
+	  61: GETL       	R28, t34
+	  62: ADDL       	$0x24, t34
+	  63: STL       	t32, (t34)
+	  64: INCEIPL       	$4
+
+	0x25480BBC:  931C0010  stw r24,16(r28)
+	  65: GETL       	R24, t36
+	  66: GETL       	R28, t38
+	  67: ADDL       	$0x10, t38
+	  68: STL       	t36, (t38)
+	  69: INCEIPL       	$4
+
+	0x25480BC0:  93BC0014  stw r29,20(r28)
+	  70: GETL       	R29, t40
+	  71: GETL       	R28, t42
+	  72: ADDL       	$0x14, t42
+	  73: STL       	t40, (t42)
+	  74: INCEIPL       	$4
+
+	0x25480BC4:  909C0018  stw r4,24(r28)
+	  75: GETL       	R4, t44
+	  76: GETL       	R28, t46
+	  77: ADDL       	$0x18, t46
+	  78: STL       	t44, (t46)
+	  79: INCEIPL       	$4
+
+	0x25480BC8:  907C001C  stw r3,28(r28)
+	  80: GETL       	R3, t48
+	  81: GETL       	R28, t50
+	  82: ADDL       	$0x1C, t50
+	  83: STL       	t48, (t50)
+	  84: INCEIPL       	$4
+
+	0x25480BCC:  90DC0020  stw r6,32(r28)
+	  85: GETL       	R6, t52
+	  86: GETL       	R28, t54
+	  87: ADDL       	$0x20, t54
+	  88: STL       	t52, (t54)
+	  89: INCEIPL       	$4
+
+	0x25480BD0:  4BFFFDD0  b 0x254809A0
+	  90: JMPo       	$0x254809A0  ($4)
+
+
+
+. 1128 25480B78 92
+. 3C 98 00 01 3C 7D 00 01 38 C4 80 00 39 83 80 00 54 C9 84 3E 57 A7 04 3E 57 08 04 3E 55 8B 84 3E 3D 40 7D 89 3C A0 4E 80 65 18 39 80 65 3D 3D 8C 61 44 03 A6 64 E3 39 80 65 66 3D 8C 60 A0 04 20 90 1C 00 24 93 1C 00 10 93 BC 00 14 90 9C 00 18 90 7C 00 1C 90 DC 00 20 4B FF FD D0
+==== BB 1129 (0x254809A0) approx BBs exec'd 0 ====
+
+	0x254809A0:  39000000  li r8,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x254809A4:  38800012  li r4,18
+	   3: MOVL       	$0x12, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x254809A8:  7F88D840  cmpl cr7,r8,r27
+	   6: GETL       	R8, t4
+	   7: GETL       	R27, t6
+	   8: CMPUL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x254809AC:  409C0054  bc 4,28,0x25480A00
+	  11: Jc28o       	$0x25480A00
+
+
+
+. 1129 254809A0 16
+. 39 00 00 00 38 80 00 12 7F 88 D8 40 40 9C 00 54
+==== BB 1130 (0x254809B0) approx BBs exec'd 0 ====
+
+	0x254809B0:  38E0FFD4  li r7,-44
+	   0: MOVL       	$0xFFFFFFD4, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254809B4:  550A143A  rlwinm r10,r8,2,16,29
+	   3: GETL       	R8, t2
+	   4: ROLL       	$0x2, t2
+	   5: ANDL       	$0xFFFC, t2
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x254809B8:  39080001  addi r8,r8,1
+	   8: GETL       	R8, t4
+	   9: ADDL       	$0x1, t4
+	  10: PUTL       	t4, R8
+	  11: INCEIPL       	$4
+
+	0x254809BC:  7D7B4010  subfc r11,r27,r8
+	  12: GETL       	R27, t6
+	  13: GETL       	R8, t8
+	  14: SBBL       	t6, t8  (-wCa)
+	  15: PUTL       	t8, R11
+	  16: INCEIPL       	$4
+
+	0x254809C0:  7D6B5910  subfe r11,r11,r11
+	  17: GETL       	R11, t10
+	  18: GETL       	R11, t12
+	  19: SBBL       	t10, t12  (-rCa-wCa)
+	  20: PUTL       	t12, R11
+	  21: INCEIPL       	$4
+
+	0x254809C4:  7D6B00D0  neg r11,r11
+	  22: GETL       	R11, t14
+	  23: NEGL       	t14
+	  24: PUTL       	t14, R11
+	  25: INCEIPL       	$4
+
+	0x254809C8:  21881FFF  subfic r12,r8,8191
+	  26: GETL       	R8, t16
+	  27: MOVL       	$0x1FFF, t18
+	  28: SBBL       	t16, t18  (-wCa)
+	  29: PUTL       	t18, R12
+	  30: INCEIPL       	$4
+
+	0x254809CC:  39800000  li r12,0
+	  31: MOVL       	$0x0, t20
+	  32: PUTL       	t20, R12
+	  33: INCEIPL       	$4
+
+	0x254809D0:  7D8C6114  adde r12,r12,r12
+	  34: GETL       	R12, t22
+	  35: GETL       	R12, t24
+	  36: ADCL       	t22, t24  (-rCa-wCa)
+	  37: PUTL       	t24, R12
+	  38: INCEIPL       	$4
+
+	0x254809D4:  7D696039  and. r9,r11,r12
+	  39: GETL       	R11, t26
+	  40: GETL       	R12, t28
+	  41: ANDL       	t26, t28
+	  42: PUTL       	t28, R9
+	  43: CMP0L       	t28, t30  (-rSo)
+	  44: ICRFL       	t30, $0x0, CR
+	  45: INCEIPL       	$4
+
+	0x254809D8:  5498103A  rlwinm r24,r4,2,0,29
+	  46: GETL       	R4, t32
+	  47: SHLL       	$0x2, t32
+	  48: PUTL       	t32, R24
+	  49: INCEIPL       	$4
+
+	0x254809DC:  54E501BA  rlwinm r5,r7,0,6,29
+	  50: GETL       	R7, t34
+	  51: ANDL       	$0x3FFFFFC, t34
+	  52: PUTL       	t34, R5
+	  53: INCEIPL       	$4
+
+	0x254809E0:  65403960  oris r0,r10,0x3960
+	  54: GETL       	R10, t36
+	  55: ORL       	$0x39600000, t36
+	  56: PUTL       	t36, R0
+	  57: INCEIPL       	$4
+
+	0x254809E4:  64BD4800  oris r29,r5,0x4800
+	  58: GETL       	R5, t38
+	  59: ORL       	$0x48000000, t38
+	  60: PUTL       	t38, R29
+	  61: INCEIPL       	$4
+
+	0x254809E8:  7F98FA14  add r28,r24,r31
+	  62: GETL       	R24, t40
+	  63: GETL       	R31, t42
+	  64: ADDL       	t40, t42
+	  65: PUTL       	t42, R28
+	  66: INCEIPL       	$4
+
+	0x254809EC:  7C18F92E  stwx r0,r24,r31
+	  67: GETL       	R31, t44
+	  68: GETL       	R24, t46
+	  69: ADDL       	t46, t44
+	  70: GETL       	R0, t48
+	  71: STL       	t48, (t44)
+	  72: INCEIPL       	$4
+
+	0x254809F0:  38840002  addi r4,r4,2
+	  73: GETL       	R4, t50
+	  74: ADDL       	$0x2, t50
+	  75: PUTL       	t50, R4
+	  76: INCEIPL       	$4
+
+	0x254809F4:  93BC0004  stw r29,4(r28)
+	  77: GETL       	R29, t52
+	  78: GETL       	R28, t54
+	  79: ADDL       	$0x4, t54
+	  80: STL       	t52, (t54)
+	  81: INCEIPL       	$4
+
+	0x254809F8:  38E7FFF8  addi r7,r7,-8
+	  82: MOVL       	$0xFFFFFFCC, t56
+	  83: PUTL       	t56, R7
+	  84: INCEIPL       	$4
+
+	0x254809FC:  4082FFB8  bc 4,2,0x254809B4
+	  85: Jc02o       	$0x254809B4
+
+
+
+. 1130 254809B0 80
+. 38 E0 FF D4 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+==== BB 1131 (0x254809B4) approx BBs exec'd 0 ====
+
+	0x254809B4:  550A143A  rlwinm r10,r8,2,16,29
+	   0: GETL       	R8, t0
+	   1: ROLL       	$0x2, t0
+	   2: ANDL       	$0xFFFC, t0
+	   3: PUTL       	t0, R10
+	   4: INCEIPL       	$4
+
+	0x254809B8:  39080001  addi r8,r8,1
+	   5: GETL       	R8, t2
+	   6: ADDL       	$0x1, t2
+	   7: PUTL       	t2, R8
+	   8: INCEIPL       	$4
+
+	0x254809BC:  7D7B4010  subfc r11,r27,r8
+	   9: GETL       	R27, t4
+	  10: GETL       	R8, t6
+	  11: SBBL       	t4, t6  (-wCa)
+	  12: PUTL       	t6, R11
+	  13: INCEIPL       	$4
+
+	0x254809C0:  7D6B5910  subfe r11,r11,r11
+	  14: GETL       	R11, t8
+	  15: GETL       	R11, t10
+	  16: SBBL       	t8, t10  (-rCa-wCa)
+	  17: PUTL       	t10, R11
+	  18: INCEIPL       	$4
+
+	0x254809C4:  7D6B00D0  neg r11,r11
+	  19: GETL       	R11, t12
+	  20: NEGL       	t12
+	  21: PUTL       	t12, R11
+	  22: INCEIPL       	$4
+
+	0x254809C8:  21881FFF  subfic r12,r8,8191
+	  23: GETL       	R8, t14
+	  24: MOVL       	$0x1FFF, t16
+	  25: SBBL       	t14, t16  (-wCa)
+	  26: PUTL       	t16, R12
+	  27: INCEIPL       	$4
+
+	0x254809CC:  39800000  li r12,0
+	  28: MOVL       	$0x0, t18
+	  29: PUTL       	t18, R12
+	  30: INCEIPL       	$4
+
+	0x254809D0:  7D8C6114  adde r12,r12,r12
+	  31: GETL       	R12, t20
+	  32: GETL       	R12, t22
+	  33: ADCL       	t20, t22  (-rCa-wCa)
+	  34: PUTL       	t22, R12
+	  35: INCEIPL       	$4
+
+	0x254809D4:  7D696039  and. r9,r11,r12
+	  36: GETL       	R11, t24
+	  37: GETL       	R12, t26
+	  38: ANDL       	t24, t26
+	  39: PUTL       	t26, R9
+	  40: CMP0L       	t26, t28  (-rSo)
+	  41: ICRFL       	t28, $0x0, CR
+	  42: INCEIPL       	$4
+
+	0x254809D8:  5498103A  rlwinm r24,r4,2,0,29
+	  43: GETL       	R4, t30
+	  44: SHLL       	$0x2, t30
+	  45: PUTL       	t30, R24
+	  46: INCEIPL       	$4
+
+	0x254809DC:  54E501BA  rlwinm r5,r7,0,6,29
+	  47: GETL       	R7, t32
+	  48: ANDL       	$0x3FFFFFC, t32
+	  49: PUTL       	t32, R5
+	  50: INCEIPL       	$4
+
+	0x254809E0:  65403960  oris r0,r10,0x3960
+	  51: GETL       	R10, t34
+	  52: ORL       	$0x39600000, t34
+	  53: PUTL       	t34, R0
+	  54: INCEIPL       	$4
+
+	0x254809E4:  64BD4800  oris r29,r5,0x4800
+	  55: GETL       	R5, t36
+	  56: ORL       	$0x48000000, t36
+	  57: PUTL       	t36, R29
+	  58: INCEIPL       	$4
+
+	0x254809E8:  7F98FA14  add r28,r24,r31
+	  59: GETL       	R24, t38
+	  60: GETL       	R31, t40
+	  61: ADDL       	t38, t40
+	  62: PUTL       	t40, R28
+	  63: INCEIPL       	$4
+
+	0x254809EC:  7C18F92E  stwx r0,r24,r31
+	  64: GETL       	R31, t42
+	  65: GETL       	R24, t44
+	  66: ADDL       	t44, t42
+	  67: GETL       	R0, t46
+	  68: STL       	t46, (t42)
+	  69: INCEIPL       	$4
+
+	0x254809F0:  38840002  addi r4,r4,2
+	  70: GETL       	R4, t48
+	  71: ADDL       	$0x2, t48
+	  72: PUTL       	t48, R4
+	  73: INCEIPL       	$4
+
+	0x254809F4:  93BC0004  stw r29,4(r28)
+	  74: GETL       	R29, t50
+	  75: GETL       	R28, t52
+	  76: ADDL       	$0x4, t52
+	  77: STL       	t50, (t52)
+	  78: INCEIPL       	$4
+
+	0x254809F8:  38E7FFF8  addi r7,r7,-8
+	  79: GETL       	R7, t54
+	  80: ADDL       	$0xFFFFFFF8, t54
+	  81: PUTL       	t54, R7
+	  82: INCEIPL       	$4
+
+	0x254809FC:  4082FFB8  bc 4,2,0x254809B4
+	  83: Jc02o       	$0x254809B4
+
+
+
+. 1131 254809B4 76
+. 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+==== BB 1132 (0x25480A00) approx BBs exec'd 0 ====
+
+	0x25480A00:  7C88D840  cmpl cr1,r8,r27
+	   0: GETL       	R8, t0
+	   1: GETL       	R27, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25480A04:  40840074  bc 4,4,0x25480A78
+	   5: Jc04o       	$0x25480A78
+
+
+
+. 1132 25480A00 8
+. 7C 88 D8 40 40 84 00 74
+==== BB 1133 (0x25480A98) approx BBs exec'd 0 ====
+
+	0x25480A98:  800B0000  lwz r0,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25480A9C:  2F000000  cmpi cr6,r0,0
+	   4: GETL       	R0, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x25480AA0:  409A00C4  bc 4,26,0x25480B64
+	   8: Jc26o       	$0x25480B64
+
+
+
+. 1133 25480A98 12
+. 80 0B 00 00 2F 00 00 00 40 9A 00 C4
+==== BB 1134 (0x25480B64) approx BBs exec'd 0 ====
+
+	0x25480B64:  7C0A1670  srawi r10,r0,2
+	   0: GETL       	R0, t0
+	   1: SARL       	$0x2, t0  (-wCa)
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0x25480B68:  7D4A0194  addze r10,r10
+	   4: GETL       	R10, t2
+	   5: ADCL       	$0x0, t2  (-rCa-wCa)
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x25480B6C:  4BFFFF38  b 0x25480AA4
+	   8: JMPo       	$0x25480AA4  ($4)
+
+
+
+. 1134 25480B64 12
+. 7C 0A 16 70 7D 4A 01 94 4B FF FF 38
+==== BB 1135 (0x25480AA8) approx BBs exec'd 0 ====
+
+	0x25480AA8:  7EEBBB78  or r11,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x25480AAC:  39200000  li r9,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x25480AB0:  48000014  b 0x25480AC4
+	   6: JMPo       	$0x25480AC4  ($4)
+
+
+
+. 1135 25480AA8 12
+. 7E EB BB 78 39 20 00 00 48 00 00 14
+==== BB 1136 (0x25479688) approx BBs exec'd 0 ====
+
+	0x25479688:  83BF000C  lwz r29,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547968C:  38C00000  li r6,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x25479690:  939F0034  stw r28,52(r31)
+	   8: GETL       	R28, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x34, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25479694:  817D003C  lwz r11,60(r29)
+	  13: GETL       	R29, t10
+	  14: ADDL       	$0x3C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R11
+	  17: INCEIPL       	$4
+
+	0x25479698:  937F0030  stw r27,48(r31)
+	  18: GETL       	R27, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x30, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547969C:  2C8B0000  cmpi cr1,r11,0
+	  23: GETL       	R11, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x1, CR
+	  26: INCEIPL       	$4
+
+	0x254796A0:  937F0040  stw r27,64(r31)
+	  27: GETL       	R27, t22
+	  28: GETL       	R31, t24
+	  29: ADDL       	$0x40, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0x254796A4:  937F0028  stw r27,40(r31)
+	  32: GETL       	R27, t26
+	  33: GETL       	R31, t28
+	  34: ADDL       	$0x28, t28
+	  35: STL       	t26, (t28)
+	  36: INCEIPL       	$4
+
+	0x254796A8:  937F003C  stw r27,60(r31)
+	  37: GETL       	R27, t30
+	  38: GETL       	R31, t32
+	  39: ADDL       	$0x3C, t32
+	  40: STL       	t30, (t32)
+	  41: INCEIPL       	$4
+
+	0x254796AC:  90DF0024  stw r6,36(r31)
+	  42: GETL       	R6, t34
+	  43: GETL       	R31, t36
+	  44: ADDL       	$0x24, t36
+	  45: STL       	t34, (t36)
+	  46: INCEIPL       	$4
+
+	0x254796B0:  41860018  bc 12,6,0x254796C8
+	  47: Js06o       	$0x254796C8
+
+
+
+. 1136 25479688 44
+. 83 BF 00 0C 38 C0 00 00 93 9F 00 34 81 7D 00 3C 93 7F 00 30 2C 8B 00 00 93 7F 00 40 93 7F 00 28 93 7F 00 3C 90 DF 00 24 41 86 00 18
+==== BB 1137 (0x254796B4) approx BBs exec'd 0 ====
+
+	0x254796B4:  810B0004  lwz r8,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254796B8:  809D0040  lwz r4,64(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x40, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x254796BC:  911F0020  stw r8,32(r31)
+	  10: GETL       	R8, t8
+	  11: GETL       	R31, t10
+	  12: ADDL       	$0x20, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254796C0:  80C40004  lwz r6,4(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R6
+	  19: INCEIPL       	$4
+
+	0x254796C4:  90DF0024  stw r6,36(r31)
+	  20: GETL       	R6, t16
+	  21: GETL       	R31, t18
+	  22: ADDL       	$0x24, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x254796C8:  2E030000  cmpi cr4,r3,0
+	  25: GETL       	R3, t20
+	  26: CMP0L       	t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0x254796CC:  41920010  bc 12,18,0x254796DC
+	  29: Js18o       	$0x254796DC
+
+
+
+. 1137 254796B4 28
+. 81 0B 00 04 80 9D 00 40 91 1F 00 20 80 C4 00 04 90 DF 00 24 2E 03 00 00 41 92 00 10
+==== BB 1138 (0x254796D0) approx BBs exec'd 0 ====
+
+	0x254796D0:  817D0070  lwz r11,112(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x70, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254796D4:  2F0B0000  cmpi cr6,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x254796D8:  409A033C  bc 4,26,0x25479A14
+	   9: Jc26o       	$0x25479A14
+
+
+
+. 1138 254796D0 12
+. 81 7D 00 70 2F 0B 00 00 40 9A 03 3C
+==== BB 1139 (0x25479A14) approx BBs exec'd 0 ====
+
+	0x25479A14:  835D007C  lwz r26,124(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x7C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479A18:  833D0028  lwz r25,40(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x28, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x25479A1C:  829A0004  lwz r20,4(r26)
+	  10: GETL       	R26, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R20
+	  14: INCEIPL       	$4
+
+	0x25479A20:  82BF0020  lwz r21,32(r31)
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x20, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R21
+	  19: INCEIPL       	$4
+
+	0x25479A24:  929F002C  stw r20,44(r31)
+	  20: GETL       	R20, t16
+	  21: GETL       	R31, t18
+	  22: ADDL       	$0x2C, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25479A28:  82590004  lwz r18,4(r25)
+	  25: GETL       	R25, t20
+	  26: ADDL       	$0x4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R18
+	  29: INCEIPL       	$4
+
+	0x25479A2C:  7D553214  add r10,r21,r6
+	  30: GETL       	R21, t24
+	  31: GETL       	R6, t26
+	  32: ADDL       	t24, t26
+	  33: PUTL       	t26, R10
+	  34: INCEIPL       	$4
+
+	0x25479A30:  7D35A050  subf r9,r21,r20
+	  35: GETL       	R21, t28
+	  36: GETL       	R20, t30
+	  37: SUBL       	t28, t30
+	  38: PUTL       	t30, R9
+	  39: INCEIPL       	$4
+
+	0x25479A34:  7E149214  add r16,r20,r18
+	  40: GETL       	R20, t32
+	  41: GETL       	R18, t34
+	  42: ADDL       	t32, t34
+	  43: PUTL       	t34, R16
+	  44: INCEIPL       	$4
+
+	0x25479A38:  913F0024  stw r9,36(r31)
+	  45: GETL       	R9, t36
+	  46: GETL       	R31, t38
+	  47: ADDL       	$0x24, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0x25479A3C:  7E705050  subf r19,r16,r10
+	  50: GETL       	R16, t40
+	  51: GETL       	R10, t42
+	  52: SUBL       	t40, t42
+	  53: PUTL       	t42, R19
+	  54: INCEIPL       	$4
+
+	0x25479A40:  925F0030  stw r18,48(r31)
+	  55: GETL       	R18, t44
+	  56: GETL       	R31, t46
+	  57: ADDL       	$0x30, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25479A44:  927F003C  stw r19,60(r31)
+	  60: GETL       	R19, t48
+	  61: GETL       	R31, t50
+	  62: ADDL       	$0x3C, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x25479A48:  921F0038  stw r16,56(r31)
+	  65: GETL       	R16, t52
+	  66: GETL       	R31, t54
+	  67: ADDL       	$0x38, t54
+	  68: STL       	t52, (t54)
+	  69: INCEIPL       	$4
+
+	0x25479A4C:  4BFFFC90  b 0x254796DC
+	  70: JMPo       	$0x254796DC  ($4)
+
+
+
+. 1139 25479A14 60
+. 83 5D 00 7C 83 3D 00 28 82 9A 00 04 82 BF 00 20 92 9F 00 2C 82 59 00 04 7D 55 32 14 7D 35 A0 50 7E 14 92 14 91 3F 00 24 7E 70 50 50 92 5F 00 30 92 7F 00 3C 92 1F 00 38 4B FF FC 90
+==== BB 1140 (0x254796DC) approx BBs exec'd 0 ====
+
+	0x254796DC:  821E04C8  lwz r16,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R16
+	   4: INCEIPL       	$4
+
+	0x254796E0:  3ABF0008  addi r21,r31,8
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R21
+	   8: INCEIPL       	$4
+
+	0x254796E4:  3A400002  li r18,2
+	   9: MOVL       	$0x2, t6
+	  10: PUTL       	t6, R18
+	  11: INCEIPL       	$4
+
+	0x254796E8:  80B50020  lwz r5,32(r21)
+	  12: GETL       	R21, t8
+	  13: ADDL       	$0x20, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0x254796EC:  7FB6EB78  or r22,r29,r29
+	  17: GETL       	R29, t12
+	  18: PUTL       	t12, R22
+	  19: INCEIPL       	$4
+
+	0x254796F0:  81750018  lwz r11,24(r21)
+	  20: GETL       	R21, t14
+	  21: ADDL       	$0x18, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0x254796F4:  2C050000  cmpi cr0,r5,0
+	  25: GETL       	R5, t18
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x254796F8:  8115001C  lwz r8,28(r21)
+	  29: GETL       	R21, t22
+	  30: ADDL       	$0x1C, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R8
+	  33: INCEIPL       	$4
+
+	0x254796FC:  7D695B78  or r9,r11,r11
+	  34: GETL       	R11, t26
+	  35: PUTL       	t26, R9
+	  36: INCEIPL       	$4
+
+	0x25479700:  833D0000  lwz r25,0(r29)
+	  37: GETL       	R29, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R25
+	  40: INCEIPL       	$4
+
+	0x25479704:  7F0B4214  add r24,r11,r8
+	  41: GETL       	R11, t32
+	  42: GETL       	R8, t34
+	  43: ADDL       	t32, t34
+	  44: PUTL       	t34, R24
+	  45: INCEIPL       	$4
+
+	0x25479708:  418200BC  bc 12,2,0x254797C4
+	  46: Js02o       	$0x254797C4
+
+
+
+. 1140 254796DC 48
+. 82 1E 04 C8 3A BF 00 08 3A 40 00 02 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+==== BB 1141 (0x254797C4) approx BBs exec'd 0 ====
+
+	0x254797C4:  815D00C0  lwz r10,192(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254797C8:  38E00000  li r7,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x254797CC:  807D0038  lwz r3,56(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x38, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x254797D0:  2F8A0000  cmpi cr7,r10,0
+	  13: GETL       	R10, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x254797D4:  82630004  lwz r19,4(r3)
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x4, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R19
+	  21: INCEIPL       	$4
+
+	0x254797D8:  419E0008  bc 12,30,0x254797E0
+	  22: Js30o       	$0x254797E0
+
+
+
+. 1141 254797C4 24
+. 81 5D 00 C0 38 E0 00 00 80 7D 00 38 2F 8A 00 00 82 63 00 04 41 9E 00 08
+==== BB 1142 (0x254797DC) approx BBs exec'd 0 ====
+
+	0x254797DC:  80EA0004  lwz r7,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254797E0:  3C00AAAA  lis r0,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x254797E4:  600CAAAB  ori r12,r0,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x254797E8:  7EE86016  mulhwu r23,r8,r12
+	  11: GETL       	R8, t8
+	  12: GETL       	R12, t10
+	  13: UMULHL       	t8, t10
+	  14: PUTL       	t10, R23
+	  15: INCEIPL       	$4
+
+	0x254797EC:  56E9E8FE  rlwinm r9,r23,29,3,31
+	  16: GETL       	R23, t12
+	  17: SHRL       	$0x3, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x254797F0:  7C893840  cmpl cr1,r9,r7
+	  20: GETL       	R9, t14
+	  21: GETL       	R7, t16
+	  22: CMPUL       	t14, t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0x254797F4:  40850008  bc 4,5,0x254797FC
+	  25: Jc05o       	$0x254797FC
+
+
+
+. 1142 254797DC 28
+. 80 EA 00 04 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+==== BB 1143 (0x254797F8) approx BBs exec'd 0 ====
+
+	0x254797F8:  7CE93B78  or r9,r7,r7
+	   0: GETL       	R7, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x254797FC:  3B7001B8  addi r27,r16,440
+	   3: GETL       	R16, t2
+	   4: ADDL       	$0x1B8, t2
+	   5: PUTL       	t2, R27
+	   6: INCEIPL       	$4
+
+	0x25479800:  1CE9000C  mulli r7,r9,12
+	   7: GETL       	R9, t4
+	   8: MULL       	$0xC, t4
+	   9: PUTL       	t4, R7
+	  10: INCEIPL       	$4
+
+	0x25479804:  7E1DD800  cmp cr4,r29,r27
+	  11: GETL       	R29, t6
+	  12: GETL       	R27, t8
+	  13: CMPL       	t6, t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x4, CR
+	  15: INCEIPL       	$4
+
+	0x25479808:  7F8B3A14  add r28,r11,r7
+	  16: GETL       	R11, t12
+	  17: GETL       	R7, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0x2547980C:  41920018  bc 12,18,0x25479824
+	  21: Js18o       	$0x25479824
+
+
+
+. 1143 254797F8 24
+. 7C E9 3B 78 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+==== BB 1144 (0x25479810) approx BBs exec'd 0 ====
+
+	0x25479810:  2F190000  cmpi cr6,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479814:  409A01CC  bc 4,26,0x254799E0
+	   4: Jc26o       	$0x254799E0
+
+
+
+. 1144 25479810 8
+. 2F 19 00 00 40 9A 01 CC
+==== BB 1145 (0x25479818) approx BBs exec'd 0 ====
+
+	0x25479818:  80DD011C  lwz r6,284(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x11C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547981C:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25479820:  418201C0  bc 12,2,0x254799E0
+	   9: Js02o       	$0x254799E0
+
+
+
+. 1145 25479818 12
+. 80 DD 01 1C 2C 06 00 00 41 82 01 C0
+==== BB 1146 (0x25479824) approx BBs exec'd 0 ====
+
+	0x25479824:  813D00E4  lwz r9,228(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xE4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25479828:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547982C:  419E0280  bc 12,30,0x25479AAC
+	   9: Js30o       	$0x25479AAC
+
+
+
+. 1146 25479824 12
+. 81 3D 00 E4 2F 89 00 00 41 9E 02 80
+==== BB 1147 (0x25479830) approx BBs exec'd 0 ====
+
+	0x25479830:  7C9CC040  cmpl cr1,r28,r24
+	   0: GETL       	R28, t0
+	   1: GETL       	R24, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25479834:  82890004  lwz r20,4(r9)
+	   5: GETL       	R9, t6
+	   6: ADDL       	$0x4, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R20
+	   9: INCEIPL       	$4
+
+	0x25479838:  40A4FEE8  bc 5,4,0x25479720
+	  10: Jc04o       	$0x25479720
+
+
+
+. 1147 25479830 12
+. 7C 9C C0 40 82 89 00 04 40 A4 FE E8
+==== BB 1148 (0x2547983C) approx BBs exec'd 0 ====
+
+	0x2547983C:  819C0004  lwz r12,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x25479840:  81560188  lwz r10,392(r22)
+	   5: GETL       	R22, t4
+	   6: ADDL       	$0x188, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25479844:  5597C23E  rlwinm r23,r12,24,8,31
+	  10: GETL       	R12, t8
+	  11: SHRL       	$0x8, t8
+	  12: PUTL       	t8, R23
+	  13: INCEIPL       	$4
+
+	0x25479848:  559D063E  rlwinm r29,r12,0,24,31
+	  14: GETL       	R12, t10
+	  15: ANDL       	$0xFF, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x2547984C:  56EB083C  rlwinm r11,r23,1,0,30
+	  18: GETL       	R23, t12
+	  19: SHLL       	$0x1, t12
+	  20: PUTL       	t12, R11
+	  21: INCEIPL       	$4
+
+	0x25479850:  2E1D0016  cmpi cr4,r29,22
+	  22: GETL       	R29, t14
+	  23: MOVL       	$0x16, t18
+	  24: CMPL       	t14, t18, t16  (-rSo)
+	  25: ICRFL       	t16, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0x25479854:  7CABA22E  lhzx r5,r11,r20
+	  27: GETL       	R20, t20
+	  28: GETL       	R11, t22
+	  29: ADDL       	t22, t20
+	  30: LDW       	(t20), t24
+	  31: PUTL       	t24, R5
+	  32: INCEIPL       	$4
+
+	0x25479858:  56E32036  rlwinm r3,r23,4,0,27
+	  33: GETL       	R23, t26
+	  34: SHLL       	$0x4, t26
+	  35: PUTL       	t26, R3
+	  36: INCEIPL       	$4
+
+	0x2547985C:  817C0000  lwz r11,0(r28)
+	  37: GETL       	R28, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R11
+	  40: INCEIPL       	$4
+
+	0x25479860:  7F639A14  add r27,r3,r19
+	  41: GETL       	R3, t32
+	  42: GETL       	R19, t34
+	  43: ADDL       	t32, t34
+	  44: PUTL       	t34, R27
+	  45: INCEIPL       	$4
+
+	0x25479864:  54BA2376  rlwinm r26,r5,4,13,27
+	  46: GETL       	R5, t36
+	  47: ROLL       	$0x4, t36
+	  48: ANDL       	$0x7FFF0, t36
+	  49: PUTL       	t36, R26
+	  50: INCEIPL       	$4
+
+	0x25479868:  937F0050  stw r27,80(r31)
+	  51: GETL       	R27, t38
+	  52: GETL       	R31, t40
+	  53: ADDL       	$0x50, t40
+	  54: STL       	t38, (t40)
+	  55: INCEIPL       	$4
+
+	0x2547986C:  7D4AD214  add r10,r10,r26
+	  56: GETL       	R10, t42
+	  57: GETL       	R26, t44
+	  58: ADDL       	t42, t44
+	  59: PUTL       	t44, R10
+	  60: INCEIPL       	$4
+
+	0x25479870:  7EEBCA14  add r23,r11,r25
+	  61: GETL       	R11, t46
+	  62: GETL       	R25, t48
+	  63: ADDL       	t46, t48
+	  64: PUTL       	t48, R23
+	  65: INCEIPL       	$4
+
+	0x25479874:  7F66DB78  or r6,r27,r27
+	  66: GETL       	R27, t50
+	  67: PUTL       	t50, R6
+	  68: INCEIPL       	$4
+
+	0x25479878:  3B400000  li r26,0
+	  69: MOVL       	$0x0, t52
+	  70: PUTL       	t52, R26
+	  71: INCEIPL       	$4
+
+	0x2547987C:  419201DC  bc 12,18,0x25479A58
+	  72: Js18o       	$0x25479A58
+
+
+
+. 1148 2547983C 68
+. 81 9C 00 04 81 56 01 88 55 97 C2 3E 55 9D 06 3E 56 EB 08 3C 2E 1D 00 16 7C AB A2 2E 56 E3 20 36 81 7C 00 00 7F 63 9A 14 54 BA 23 76 93 7F 00 50 7D 4A D2 14 7E EB CA 14 7F 66 DB 78 3B 40 00 00 41 92 01 DC
+==== BB 1149 (0x25479880) approx BBs exec'd 0 ====
+
+	0x25479880:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479884:  419A0148  bc 12,26,0x254799CC
+	   4: Js26o       	$0x254799CC
+
+
+
+. 1149 25479880 8
+. 2F 1D 00 00 41 9A 01 48
+==== BB 1150 (0x25479888) approx BBs exec'd 0 ====
+
+	0x25479888:  889B000C  lbz r4,12(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547988C:  5488E13F  rlwinm. r8,r4,28,4,31
+	   5: GETL       	R4, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R8
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25479890:  4182062C  bc 12,2,0x25479EBC
+	  11: Js02o       	$0x25479EBC
+
+
+
+. 1150 25479888 12
+. 88 9B 00 0C 54 88 E1 3F 41 82 06 2C
+==== BB 1151 (0x25479894) approx BBs exec'd 0 ====
+
+	0x25479894:  8B5B000C  lbz r26,12(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479898:  5749E13F  rlwinm. r9,r26,28,4,31
+	   5: GETL       	R26, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2547989C:  418201B4  bc 12,2,0x25479A50
+	  11: Js02o       	$0x25479A50
+
+
+
+. 1151 25479894 12
+. 8B 5B 00 0C 57 49 E1 3F 41 82 01 B4
+==== BB 1152 (0x254798A0) approx BBs exec'd 0 ====
+
+	0x254798A0:  809F000C  lwz r4,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254798A4:  2E1D0015  cmpi cr4,r29,21
+	   5: GETL       	R29, t4
+	   6: MOVL       	$0x15, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x4, CR
+	   9: INCEIPL       	$4
+
+	0x254798A8:  2C9D000A  cmpi cr1,r29,10
+	  10: GETL       	R29, t10
+	  11: MOVL       	$0xA, t14
+	  12: CMPL       	t10, t14, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x254798AC:  2F1D0013  cmpi cr6,r29,19
+	  15: GETL       	R29, t16
+	  16: MOVL       	$0x13, t20
+	  17: CMPL       	t16, t20, t18  (-rSo)
+	  18: ICRFL       	t18, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0x254798B0:  80C40208  lwz r6,520(r4)
+	  20: GETL       	R4, t22
+	  21: ADDL       	$0x208, t22
+	  22: LDL       	(t22), t24
+	  23: PUTL       	t24, R6
+	  24: INCEIPL       	$4
+
+	0x254798B4:  7F86D800  cmp cr7,r6,r27
+	  25: GETL       	R6, t26
+	  26: GETL       	R27, t28
+	  27: CMPL       	t26, t28, t30  (-rSo)
+	  28: ICRFL       	t30, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0x254798B8:  419E062C  bc 12,30,0x25479EE4
+	  30: Js30o       	$0x25479EE4
+
+
+
+. 1152 254798A0 28
+. 80 9F 00 0C 2E 1D 00 15 2C 9D 00 0A 2F 1D 00 13 80 C4 02 08 7F 86 D8 00 41 9E 06 2C
+==== BB 1153 (0x254798BC) approx BBs exec'd 0 ====
+
+	0x254798BC:  7CC00026  mfcr r6
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x254798C0:  54C69FFE  rlwinm r6,r6,19,31,31
+	   3: GETL       	R6, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R6
+	   7: INCEIPL       	$4
+
+	0x254798C4:  39000000  li r8,0
+	   8: MOVL       	$0x0, t4
+	   9: PUTL       	t4, R8
+	  10: INCEIPL       	$4
+
+	0x254798C8:  7F400026  mfcr r26
+	  11: GETL       	CR, t6
+	  12: PUTL       	t6, R26
+	  13: INCEIPL       	$4
+
+	0x254798CC:  575A3FFE  rlwinm r26,r26,7,31,31
+	  14: GETL       	R26, t8
+	  15: ROLL       	$0x7, t8
+	  16: ANDL       	$0x1, t8
+	  17: PUTL       	t8, R26
+	  18: INCEIPL       	$4
+
+	0x254798D0:  7CCBD379  or. r11,r6,r26
+	  19: GETL       	R6, t10
+	  20: GETL       	R26, t12
+	  21: ORL       	t12, t10
+	  22: PUTL       	t10, R11
+	  23: CMP0L       	t10, t14  (-rSo)
+	  24: ICRFL       	t14, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x254798D4:  40820130  bc 4,2,0x25479A04
+	  26: Jc02o       	$0x25479A04
+
+
+
+. 1153 254798BC 28
+. 7C C0 00 26 54 C6 9F FE 39 00 00 00 7F 40 00 26 57 5A 3F FE 7C CB D3 79 40 82 01 30
+==== BB 1154 (0x254798D8) approx BBs exec'd 0 ====
+
+	0x254798D8:  387DFFBC  addi r3,r29,-68
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xFFFFFFBC, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x254798DC:  6BA50002  xori r5,r29,0x2
+	   4: GETL       	R29, t2
+	   5: XORL       	$0x2, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x254798E0:  21650000  subfic r11,r5,0
+	   8: GETL       	R5, t4
+	   9: MOVL       	$0x0, t6
+	  10: SBBL       	t4, t6  (-wCa)
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0x254798E4:  7CAB2914  adde r5,r11,r5
+	  13: GETL       	R11, t8
+	  14: GETL       	R5, t10
+	  15: ADCL       	t8, t10  (-rCa-wCa)
+	  16: PUTL       	t10, R5
+	  17: INCEIPL       	$4
+
+	0x254798E8:  2123000A  subfic r9,r3,10
+	  18: GETL       	R3, t12
+	  19: MOVL       	$0xA, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x254798EC:  39200000  li r9,0
+	  23: MOVL       	$0x0, t16
+	  24: PUTL       	t16, R9
+	  25: INCEIPL       	$4
+
+	0x254798F0:  7D294914  adde r9,r9,r9
+	  26: GETL       	R9, t18
+	  27: GETL       	R9, t20
+	  28: ADCL       	t18, t20  (-rCa-wCa)
+	  29: PUTL       	t20, R9
+	  30: INCEIPL       	$4
+
+	0x254798F4:  7D2B2B79  or. r11,r9,r5
+	  31: GETL       	R9, t22
+	  32: GETL       	R5, t24
+	  33: ORL       	t24, t22
+	  34: PUTL       	t22, R11
+	  35: CMP0L       	t22, t26  (-rSo)
+	  36: ICRFL       	t26, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0x254798F8:  4082010C  bc 4,2,0x25479A04
+	  38: Jc02o       	$0x25479A04
+
+
+
+. 1154 254798D8 36
+. 38 7D FF BC 6B A5 00 02 21 65 00 00 7C AB 29 14 21 23 00 0A 39 20 00 00 7D 29 49 14 7D 2B 2B 79 40 82 01 0C
+==== BB 1155 (0x254798FC) approx BBs exec'd 0 ====
+
+	0x254798FC:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25479900:  5400DFFE  rlwinm r0,r0,27,31,31
+	   3: GETL       	R0, t2
+	   4: ROLL       	$0x1B, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x25479904:  2C8A0000  cmpi cr1,r10,0
+	   8: GETL       	R10, t4
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25479908:  93640208  stw r27,520(r4)
+	  12: GETL       	R27, t8
+	  13: GETL       	R4, t10
+	  14: ADDL       	$0x208, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x2547990C:  38E00000  li r7,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x25479910:  540C083C  rlwinm r12,r0,1,0,30
+	  20: GETL       	R0, t14
+	  21: SHLL       	$0x1, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0x25479914:  39600001  li r11,1
+	  24: MOVL       	$0x1, t16
+	  25: PUTL       	t16, R11
+	  26: INCEIPL       	$4
+
+	0x25479918:  7D884378  or r8,r12,r8
+	  27: GETL       	R12, t18
+	  28: GETL       	R8, t20
+	  29: ORL       	t20, t18
+	  30: PUTL       	t18, R8
+	  31: INCEIPL       	$4
+
+	0x2547991C:  9104020C  stw r8,524(r4)
+	  32: GETL       	R8, t22
+	  33: GETL       	R4, t24
+	  34: ADDL       	$0x20C, t24
+	  35: STL       	t22, (t24)
+	  36: INCEIPL       	$4
+
+	0x25479920:  41860018  bc 12,6,0x25479938
+	  37: Js06o       	$0x25479938
+
+
+
+. 1155 254798FC 40
+. 7C 00 00 26 54 00 DF FE 2C 8A 00 00 93 64 02 08 38 E0 00 00 54 0C 08 3C 39 60 00 01 7D 88 43 78 91 04 02 0C 41 86 00 18
+==== BB 1156 (0x25479924) approx BBs exec'd 0 ====
+
+	0x25479924:  834A0004  lwz r26,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479928:  2E1A0000  cmpi cr4,r26,0
+	   5: GETL       	R26, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547992C:  4192000C  bc 12,18,0x25479938
+	   9: Js18o       	$0x25479938
+
+
+
+. 1156 25479924 12
+. 83 4A 00 04 2E 1A 00 00 41 92 00 0C
+==== BB 1157 (0x25479930) approx BBs exec'd 0 ====
+
+	0x25479930:  7D475378  or r7,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x25479934:  39600000  li r11,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x25479938:  80DF0050  lwz r6,80(r31)
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0x50, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x2547993C:  7D695B78  or r9,r11,r11
+	  11: GETL       	R11, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0x25479940:  815F0008  lwz r10,8(r31)
+	  14: GETL       	R31, t10
+	  15: ADDL       	$0x8, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x25479944:  80A60000  lwz r5,0(r6)
+	  19: GETL       	R6, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R5
+	  22: INCEIPL       	$4
+
+	0x25479948:  80DF0010  lwz r6,16(r31)
+	  23: GETL       	R31, t18
+	  24: ADDL       	$0x10, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R6
+	  27: INCEIPL       	$4
+
+	0x2547994C:  7C655214  add r3,r5,r10
+	  28: GETL       	R5, t22
+	  29: GETL       	R10, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R3
+	  32: INCEIPL       	$4
+
+	0x25479950:  38BF0050  addi r5,r31,80
+	  33: GETL       	R31, t26
+	  34: ADDL       	$0x50, t26
+	  35: PUTL       	t26, R5
+	  36: INCEIPL       	$4
+
+	0x25479954:  39400000  li r10,0
+	  37: MOVL       	$0x0, t28
+	  38: PUTL       	t28, R10
+	  39: INCEIPL       	$4
+
+	0x25479958:  4BFFEC4D  bl 0x254785A4
+	  40: MOVL       	$0x2547995C, t30
+	  41: PUTL       	t30, LR
+	  42: JMPo-c       	$0x254785A4  ($4)
+
+
+
+. 1157 25479930 44
+. 7D 47 53 78 39 60 00 00 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+==== BB 1158 _dl_lookup_symbol_x(0x254785A4) approx BBs exec'd 0 ====
+
+	0x254785A4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254785A8:  7D800026  mfcr r12
+	   3: GETL       	CR, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0x254785AC:  9421FF60  stwu r1,-160(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFF60, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x254785B0:  4801EA51  bl 0x25497000
+	  12: MOVL       	$0x254785B4, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1158 254785A4 16
+. 7C 08 02 A6 7D 80 00 26 94 21 FF 60 48 01 EA 51
+==== BB 1159 (0x254785B4) approx BBs exec'd 0 ====
+
+	0x254785B4:  9261006C  stw r19,108(r1)
+	   0: GETL       	R19, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x6C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254785B8:  7C932378  or r19,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R19
+	   7: INCEIPL       	$4
+
+	0x254785BC:  92810070  stw r20,112(r1)
+	   8: GETL       	R20, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x70, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254785C0:  7D144378  or r20,r8,r8
+	  13: GETL       	R8, t10
+	  14: PUTL       	t10, R20
+	  15: INCEIPL       	$4
+
+	0x254785C4:  92A10074  stw r21,116(r1)
+	  16: GETL       	R21, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x74, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x254785C8:  7CD53378  or r21,r6,r6
+	  21: GETL       	R6, t16
+	  22: PUTL       	t16, R21
+	  23: INCEIPL       	$4
+
+	0x254785CC:  92C10078  stw r22,120(r1)
+	  24: GETL       	R22, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x78, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x254785D0:  7CB62B78  or r22,r5,r5
+	  29: GETL       	R5, t22
+	  30: PUTL       	t22, R22
+	  31: INCEIPL       	$4
+
+	0x254785D4:  92E1007C  stw r23,124(r1)
+	  32: GETL       	R23, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x7C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x254785D8:  7D374B78  or r23,r9,r9
+	  37: GETL       	R9, t28
+	  38: PUTL       	t28, R23
+	  39: INCEIPL       	$4
+
+	0x254785DC:  93010080  stw r24,128(r1)
+	  40: GETL       	R24, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x80, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x254785E0:  7CF83B78  or r24,r7,r7
+	  45: GETL       	R7, t34
+	  46: PUTL       	t34, R24
+	  47: INCEIPL       	$4
+
+	0x254785E4:  93210084  stw r25,132(r1)
+	  48: GETL       	R25, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x84, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x254785E8:  7C791B78  or r25,r3,r3
+	  53: GETL       	R3, t40
+	  54: PUTL       	t40, R25
+	  55: INCEIPL       	$4
+
+	0x254785EC:  93410088  stw r26,136(r1)
+	  56: GETL       	R26, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x88, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x254785F0:  7D5A5378  or r26,r10,r10
+	  61: GETL       	R10, t46
+	  62: PUTL       	t46, R26
+	  63: INCEIPL       	$4
+
+	0x254785F4:  93810090  stw r28,144(r1)
+	  64: GETL       	R28, t48
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x90, t50
+	  67: STL       	t48, (t50)
+	  68: INCEIPL       	$4
+
+	0x254785F8:  3B800000  li r28,0
+	  69: MOVL       	$0x0, t52
+	  70: PUTL       	t52, R28
+	  71: INCEIPL       	$4
+
+	0x254785FC:  93C10098  stw r30,152(r1)
+	  72: GETL       	R30, t54
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0x98, t56
+	  75: STL       	t54, (t56)
+	  76: INCEIPL       	$4
+
+	0x25478600:  7FC802A6  mflr r30
+	  77: GETL       	LR, t58
+	  78: PUTL       	t58, R30
+	  79: INCEIPL       	$4
+
+	0x25478604:  93E1009C  stw r31,156(r1)
+	  80: GETL       	R31, t60
+	  81: GETL       	R1, t62
+	  82: ADDL       	$0x9C, t62
+	  83: STL       	t60, (t62)
+	  84: INCEIPL       	$4
+
+	0x25478608:  7C3F0B78  or r31,r1,r1
+	  85: GETL       	R1, t64
+	  86: PUTL       	t64, R31
+	  87: INCEIPL       	$4
+
+	0x2547860C:  92210064  stw r17,100(r1)
+	  88: GETL       	R17, t66
+	  89: GETL       	R1, t68
+	  90: ADDL       	$0x64, t68
+	  91: STL       	t66, (t68)
+	  92: INCEIPL       	$4
+
+	0x25478610:  92410068  stw r18,104(r1)
+	  93: GETL       	R18, t70
+	  94: GETL       	R1, t72
+	  95: ADDL       	$0x68, t72
+	  96: STL       	t70, (t72)
+	  97: INCEIPL       	$4
+
+	0x25478614:  9361008C  stw r27,140(r1)
+	  98: GETL       	R27, t74
+	  99: GETL       	R1, t76
+	 100: ADDL       	$0x8C, t76
+	 101: STL       	t74, (t76)
+	 102: INCEIPL       	$4
+
+	0x25478618:  93A10094  stw r29,148(r1)
+	 103: GETL       	R29, t78
+	 104: GETL       	R1, t80
+	 105: ADDL       	$0x94, t80
+	 106: STL       	t78, (t80)
+	 107: INCEIPL       	$4
+
+	0x2547861C:  900100A4  stw r0,164(r1)
+	 108: GETL       	R0, t82
+	 109: GETL       	R1, t84
+	 110: ADDL       	$0xA4, t84
+	 111: STL       	t82, (t84)
+	 112: INCEIPL       	$4
+
+	0x25478620:  91810060  stw r12,96(r1)
+	 113: GETL       	R12, t86
+	 114: GETL       	R1, t88
+	 115: ADDL       	$0x60, t88
+	 116: STL       	t86, (t88)
+	 117: INCEIPL       	$4
+
+	0x25478624:  88030000  lbz r0,0(r3)
+	 118: GETL       	R3, t90
+	 119: LDB       	(t90), t92
+	 120: PUTL       	t92, R0
+	 121: INCEIPL       	$4
+
+	0x25478628:  2F800000  cmpi cr7,r0,0
+	 122: GETL       	R0, t94
+	 123: CMP0L       	t94, t96  (-rSo)
+	 124: ICRFL       	t96, $0x7, CR
+	 125: INCEIPL       	$4
+
+	0x2547862C:  419E0018  bc 12,30,0x25478644
+	 126: Js30o       	$0x25478644
+
+
+
+. 1159 254785B4 124
+. 92 61 00 6C 7C 93 23 78 92 81 00 70 7D 14 43 78 92 A1 00 74 7C D5 33 78 92 C1 00 78 7C B6 2B 78 92 E1 00 7C 7D 37 4B 78 93 01 00 80 7C F8 3B 78 93 21 00 84 7C 79 1B 78 93 41 00 88 7D 5A 53 78 93 81 00 90 3B 80 00 00 93 C1 00 98 7F C8 02 A6 93 E1 00 9C 7C 3F 0B 78 92 21 00 64 92 41 00 68 93 61 00 8C 93 A1 00 94 90 01 00 A4 91 81 00 60 88 03 00 00 2F 80 00 00 41 9E 00 18
+==== BB 1160 (0x25478630) approx BBs exec'd 0 ====
+
+	0x25478630:  89630001  lbz r11,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25478634:  7C1C0378  or r28,r0,r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x25478638:  39230001  addi r9,r3,1
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x2547863C:  2C0B0000  cmpi cr0,r11,0
+	  12: GETL       	R11, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25478640:  4082015C  bc 4,2,0x2547879C
+	  16: Jc02o       	$0x2547879C
+
+
+
+. 1160 25478630 20
+. 89 63 00 01 7C 1C 03 78 39 23 00 01 2C 0B 00 00 40 82 01 5C
+==== BB 1161 (0x2547879C) approx BBs exec'd 0 ====
+
+	0x2547879C:  89490001  lbz r10,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254787A0:  54032036  rlwinm r3,r0,4,0,27
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x254787A4:  7F835A14  add r28,r3,r11
+	   9: GETL       	R3, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787A8:  39290001  addi r9,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254787AC:  2C8A0000  cmpi cr1,r10,0
+	  18: GETL       	R10, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0x254787B0:  41A6FE94  bc 13,6,0x25478644
+	  22: Js06o       	$0x25478644
+
+
+
+. 1161 2547879C 24
+. 89 49 00 01 54 03 20 36 7F 83 5A 14 39 29 00 01 2C 8A 00 00 41 A6 FE 94
+==== BB 1162 (0x254787B4) approx BBs exec'd 0 ====
+
+	0x254787B4:  89690001  lbz r11,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254787B8:  57842036  rlwinm r4,r28,4,0,27
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x254787BC:  7F845214  add r28,r4,r10
+	   9: GETL       	R4, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787C0:  39290001  addi r9,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254787C4:  2E0B0000  cmpi cr4,r11,0
+	  18: GETL       	R11, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x4, CR
+	  21: INCEIPL       	$4
+
+	0x254787C8:  41B2FE7C  bc 13,18,0x25478644
+	  22: Js18o       	$0x25478644
+
+
+
+. 1162 254787B4 24
+. 89 69 00 01 57 84 20 36 7F 84 52 14 39 29 00 01 2E 0B 00 00 41 B2 FE 7C
+==== BB 1163 (0x254787CC) approx BBs exec'd 0 ====
+
+	0x254787CC:  89490001  lbz r10,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254787D0:  57852036  rlwinm r5,r28,4,0,27
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x254787D4:  7F855A14  add r28,r5,r11
+	   9: GETL       	R5, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787D8:  39290001  addi r9,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254787DC:  2F0A0000  cmpi cr6,r10,0
+	  18: GETL       	R10, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x254787E0:  41BAFE64  bc 13,26,0x25478644
+	  22: Js26o       	$0x25478644
+
+
+
+. 1163 254787CC 24
+. 89 49 00 01 57 85 20 36 7F 85 5A 14 39 29 00 01 2F 0A 00 00 41 BA FE 64
+==== BB 1164 (0x254787E4) approx BBs exec'd 0 ====
+
+	0x254787E4:  89690001  lbz r11,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254787E8:  57862036  rlwinm r6,r28,4,0,27
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x254787EC:  7F865214  add r28,r6,r10
+	   9: GETL       	R6, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787F0:  39490001  addi r10,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R10
+	  17: INCEIPL       	$4
+
+	0x254787F4:  2F8B0000  cmpi cr7,r11,0
+	  18: GETL       	R11, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0x254787F8:  41BEFE4C  bc 13,30,0x25478644
+	  22: Js30o       	$0x25478644
+
+
+
+. 1164 254787E4 24
+. 89 69 00 01 57 86 20 36 7F 86 52 14 39 49 00 01 2F 8B 00 00 41 BE FE 4C
+==== BB 1165 (0x254787FC) approx BBs exec'd 0 ====
+
+	0x254787FC:  57922036  rlwinm r18,r28,4,0,27
+	   0: GETL       	R28, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R18
+	   3: INCEIPL       	$4
+
+	0x25478800:  7E325A14  add r17,r18,r11
+	   4: GETL       	R18, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R17
+	   8: INCEIPL       	$4
+
+	0x25478804:  8D6A0001  lbzu r11,1(r10)
+	   9: GETL       	R10, t6
+	  10: ADDL       	$0x1, t6
+	  11: PUTL       	t6, R10
+	  12: LDB       	(t6), t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25478808:  56280006  rlwinm r8,r17,0,0,3
+	  15: GETL       	R17, t10
+	  16: ANDL       	$0xF0000000, t10
+	  17: PUTL       	t10, R8
+	  18: INCEIPL       	$4
+
+	0x2547880C:  2F8B0000  cmpi cr7,r11,0
+	  19: GETL       	R11, t12
+	  20: CMP0L       	t12, t14  (-rSo)
+	  21: ICRFL       	t14, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x25478810:  7E274278  xor r7,r17,r8
+	  23: GETL       	R17, t16
+	  24: GETL       	R8, t18
+	  25: XORL       	t16, t18
+	  26: PUTL       	t18, R7
+	  27: INCEIPL       	$4
+
+	0x25478814:  5509463E  rlwinm r9,r8,8,24,31
+	  28: GETL       	R8, t20
+	  29: SHRL       	$0x18, t20
+	  30: PUTL       	t20, R9
+	  31: INCEIPL       	$4
+
+	0x25478818:  7CFC4A78  xor r28,r7,r9
+	  32: GETL       	R7, t22
+	  33: GETL       	R9, t24
+	  34: XORL       	t22, t24
+	  35: PUTL       	t24, R28
+	  36: INCEIPL       	$4
+
+	0x2547881C:  4BFFFFDC  b 0x254787F8
+	  37: JMPo       	$0x254787F8  ($4)
+
+
+
+. 1165 254787FC 36
+. 57 92 20 36 7E 32 5A 14 8D 6A 00 01 56 28 00 06 2F 8B 00 00 7E 27 42 78 55 09 46 3E 7C FC 4A 78 4B FF FF DC
+==== BB 1166 (0x254787F8) approx BBs exec'd 0 ====
+
+	0x254787F8:  41BEFE4C  bc 13,30,0x25478644
+	   0: Js30o       	$0x25478644
+
+
+
+. 1166 254787F8 4
+. 41 BE FE 4C
+==== BB 1167 (0x25478644) approx BBs exec'd 0 ====
+
+	0x25478644:  825E04C8  lwz r18,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x25478648:  2E1A0000  cmpi cr4,r26,0
+	   5: GETL       	R26, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547864C:  3B600000  li r27,0
+	   9: MOVL       	$0x0, t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0x25478650:  39400000  li r10,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R10
+	  14: INCEIPL       	$4
+
+	0x25478654:  83B201A8  lwz r29,424(r18)
+	  15: GETL       	R18, t12
+	  16: ADDL       	$0x1A8, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R29
+	  19: INCEIPL       	$4
+
+	0x25478658:  937F0048  stw r27,72(r31)
+	  20: GETL       	R27, t16
+	  21: GETL       	R31, t18
+	  22: ADDL       	$0x48, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x2547865C:  3B600000  li r27,0
+	  25: MOVL       	$0x0, t20
+	  26: PUTL       	t20, R27
+	  27: INCEIPL       	$4
+
+	0x25478660:  397D0001  addi r11,r29,1
+	  28: GETL       	R29, t22
+	  29: ADDL       	$0x1, t22
+	  30: PUTL       	t22, R11
+	  31: INCEIPL       	$4
+
+	0x25478664:  915F004C  stw r10,76(r31)
+	  32: GETL       	R10, t24
+	  33: GETL       	R31, t26
+	  34: ADDL       	$0x4C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25478668:  917201A8  stw r11,424(r18)
+	  37: GETL       	R11, t28
+	  38: GETL       	R18, t30
+	  39: ADDL       	$0x1A8, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x2547866C:  7EBDAB78  or r29,r21,r21
+	  42: GETL       	R21, t32
+	  43: PUTL       	t32, R29
+	  44: INCEIPL       	$4
+
+	0x25478670:  40920538  bc 4,18,0x25478BA8
+	  45: Jc18o       	$0x25478BA8
+
+
+
+. 1167 25478644 48
+. 82 5E 04 C8 2E 1A 00 00 3B 60 00 00 39 40 00 00 83 B2 01 A8 93 7F 00 48 3B 60 00 00 39 7D 00 01 91 5F 00 4C 91 72 01 A8 7E BD AB 78 40 92 05 38
+==== BB 1168 (0x25478674) approx BBs exec'd 0 ====
+
+	0x25478674:  81750000  lwz r11,0(r21)
+	   0: GETL       	R21, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25478678:  2C8B0000  cmpi cr1,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547867C:  7F68DB78  or r8,r27,r27
+	   8: GETL       	R27, t8
+	   9: PUTL       	t8, R8
+	  10: INCEIPL       	$4
+
+	0x25478680:  4186004C  bc 12,6,0x254786CC
+	  11: Js06o       	$0x254786CC
+
+
+
+. 1168 25478674 16
+. 81 75 00 00 2C 8B 00 00 7F 68 DB 78 41 86 00 4C
+==== BB 1169 (0x25478684) approx BBs exec'd 0 ====
+
+	0x25478684:  80F50000  lwz r7,0(r21)
+	   0: GETL       	R21, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25478688:  3A3F0018  addi r17,r31,24
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R17
+	   7: INCEIPL       	$4
+
+	0x2547868C:  80B60000  lwz r5,0(r22)
+	   8: GETL       	R22, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x25478690:  7F23CB78  or r3,r25,r25
+	  12: GETL       	R25, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x25478694:  7F84E378  or r4,r28,r28
+	  15: GETL       	R28, t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0x25478698:  38DF0048  addi r6,r31,72
+	  18: GETL       	R31, t14
+	  19: ADDL       	$0x48, t14
+	  20: PUTL       	t14, R6
+	  21: INCEIPL       	$4
+
+	0x2547869C:  7F09C378  or r9,r24,r24
+	  22: GETL       	R24, t16
+	  23: PUTL       	t16, R9
+	  24: INCEIPL       	$4
+
+	0x254786A0:  7EEABB78  or r10,r23,r23
+	  25: GETL       	R23, t18
+	  26: PUTL       	t18, R10
+	  27: INCEIPL       	$4
+
+	0x254786A4:  93410008  stw r26,8(r1)
+	  28: GETL       	R26, t20
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x8, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0x254786A8:  9281000C  stw r20,12(r1)
+	  33: GETL       	R20, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0xC, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x254786AC:  4BFFFAFD  bl 0x254781A8
+	  38: MOVL       	$0x254786B0, t28
+	  39: PUTL       	t28, LR
+	  40: JMPo-c       	$0x254781A8  ($4)
+
+
+
+. 1169 25478684 44
+. 80 F5 00 00 3A 3F 00 18 80 B6 00 00 7F 23 CB 78 7F 84 E3 78 38 DF 00 48 7F 09 C3 78 7E EA BB 78 93 41 00 08 92 81 00 0C 4B FF FA FD
+==== BB 1170 do_lookup_x(0x254781A8) approx BBs exec'd 0 ====
+
+	0x254781A8:  7D6802A6  mflr r11
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x254781AC:  9421FF90  stwu r1,-112(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF90, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x254781B0:  4801EE51  bl 0x25497000
+	   9: MOVL       	$0x254781B4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1170 254781A8 12
+. 7D 68 02 A6 94 21 FF 90 48 01 EE 51
+==== BB 1171 (0x254781B4) approx BBs exec'd 0 ====
+
+	0x254781B4:  93C10068  stw r30,104(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x68, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254781B8:  80010078  lwz r0,120(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x78, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x254781BC:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x254781C0:  91610074  stw r11,116(r1)
+	  13: GETL       	R11, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x74, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254781C4:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0x254781C8:  91E1002C  stw r15,44(r1)
+	  21: GETL       	R15, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x2C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x254781CC:  92010030  stw r16,48(r1)
+	  26: GETL       	R16, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x30, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x254781D0:  81E70004  lwz r15,4(r7)
+	  31: GETL       	R7, t24
+	  32: ADDL       	$0x4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R15
+	  35: INCEIPL       	$4
+
+	0x254781D4:  82070000  lwz r16,0(r7)
+	  36: GETL       	R7, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R16
+	  39: INCEIPL       	$4
+
+	0x254781D8:  91C10028  stw r14,40(r1)
+	  40: GETL       	R14, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x28, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0x254781DC:  7C8E2378  or r14,r4,r4
+	  45: GETL       	R4, t36
+	  46: PUTL       	t36, R14
+	  47: INCEIPL       	$4
+
+	0x254781E0:  92210034  stw r17,52(r1)
+	  48: GETL       	R17, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x34, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0x254781E4:  92A10044  stw r21,68(r1)
+	  53: GETL       	R21, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x44, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x254781E8:  7D354B78  or r21,r9,r9
+	  58: GETL       	R9, t46
+	  59: PUTL       	t46, R21
+	  60: INCEIPL       	$4
+
+	0x254781EC:  93410058  stw r26,88(r1)
+	  61: GETL       	R26, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x58, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0x254781F0:  3120FFFF  addic r9,r0,-1
+	  66: GETL       	R0, t52
+	  67: ADCL       	$0xFFFFFFFF, t52  (-wCa)
+	  68: PUTL       	t52, R9
+	  69: INCEIPL       	$4
+
+	0x254781F4:  7E290110  subfe r17,r9,r0
+	  70: GETL       	R9, t54
+	  71: GETL       	R0, t56
+	  72: SBBL       	t54, t56  (-rCa-wCa)
+	  73: PUTL       	t56, R17
+	  74: INCEIPL       	$4
+
+	0x254781F8:  7D1A4378  or r26,r8,r8
+	  75: GETL       	R8, t58
+	  76: PUTL       	t58, R26
+	  77: INCEIPL       	$4
+
+	0x254781FC:  92410038  stw r18,56(r1)
+	  78: GETL       	R18, t60
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0x38, t62
+	  81: STL       	t60, (t62)
+	  82: INCEIPL       	$4
+
+	0x25478200:  9261003C  stw r19,60(r1)
+	  83: GETL       	R19, t64
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x3C, t66
+	  86: STL       	t64, (t66)
+	  87: INCEIPL       	$4
+
+	0x25478204:  92810040  stw r20,64(r1)
+	  88: GETL       	R20, t68
+	  89: GETL       	R1, t70
+	  90: ADDL       	$0x40, t70
+	  91: STL       	t68, (t70)
+	  92: INCEIPL       	$4
+
+	0x25478208:  92C10048  stw r22,72(r1)
+	  93: GETL       	R22, t72
+	  94: GETL       	R1, t74
+	  95: ADDL       	$0x48, t74
+	  96: STL       	t72, (t74)
+	  97: INCEIPL       	$4
+
+	0x2547820C:  92E1004C  stw r23,76(r1)
+	  98: GETL       	R23, t76
+	  99: GETL       	R1, t78
+	 100: ADDL       	$0x4C, t78
+	 101: STL       	t76, (t78)
+	 102: INCEIPL       	$4
+
+	0x25478210:  93010050  stw r24,80(r1)
+	 103: GETL       	R24, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x50, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x25478214:  93210054  stw r25,84(r1)
+	 108: GETL       	R25, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0x54, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25478218:  9361005C  stw r27,92(r1)
+	 113: GETL       	R27, t88
+	 114: GETL       	R1, t90
+	 115: ADDL       	$0x5C, t90
+	 116: STL       	t88, (t90)
+	 117: INCEIPL       	$4
+
+	0x2547821C:  93810060  stw r28,96(r1)
+	 118: GETL       	R28, t92
+	 119: GETL       	R1, t94
+	 120: ADDL       	$0x60, t94
+	 121: STL       	t92, (t94)
+	 122: INCEIPL       	$4
+
+	0x25478220:  93A10064  stw r29,100(r1)
+	 123: GETL       	R29, t96
+	 124: GETL       	R1, t98
+	 125: ADDL       	$0x64, t98
+	 126: STL       	t96, (t98)
+	 127: INCEIPL       	$4
+
+	0x25478224:  93E1006C  stw r31,108(r1)
+	 128: GETL       	R31, t100
+	 129: GETL       	R1, t102
+	 130: ADDL       	$0x6C, t102
+	 131: STL       	t100, (t102)
+	 132: INCEIPL       	$4
+
+	0x25478228:  91810024  stw r12,36(r1)
+	 133: GETL       	R12, t104
+	 134: GETL       	R1, t106
+	 135: ADDL       	$0x24, t106
+	 136: STL       	t104, (t106)
+	 137: INCEIPL       	$4
+
+	0x2547822C:  90610008  stw r3,8(r1)
+	 138: GETL       	R3, t108
+	 139: GETL       	R1, t110
+	 140: ADDL       	$0x8, t110
+	 141: STL       	t108, (t110)
+	 142: INCEIPL       	$4
+
+	0x25478230:  90A1000C  stw r5,12(r1)
+	 143: GETL       	R5, t112
+	 144: GETL       	R1, t114
+	 145: ADDL       	$0xC, t114
+	 146: STL       	t112, (t114)
+	 147: INCEIPL       	$4
+
+	0x25478234:  90C10010  stw r6,16(r1)
+	 148: GETL       	R6, t116
+	 149: GETL       	R1, t118
+	 150: ADDL       	$0x10, t118
+	 151: STL       	t116, (t118)
+	 152: INCEIPL       	$4
+
+	0x25478238:  91410014  stw r10,20(r1)
+	 153: GETL       	R10, t120
+	 154: GETL       	R1, t122
+	 155: ADDL       	$0x14, t122
+	 156: STL       	t120, (t122)
+	 157: INCEIPL       	$4
+
+	0x2547823C:  48000010  b 0x2547824C
+	 158: JMPo       	$0x2547824C  ($4)
+
+
+
+. 1171 254781B4 140
+. 93 C1 00 68 80 01 00 78 7F C8 02 A6 91 61 00 74 7D 80 00 26 91 E1 00 2C 92 01 00 30 81 E7 00 04 82 07 00 00 91 C1 00 28 7C 8E 23 78 92 21 00 34 92 A1 00 44 7D 35 4B 78 93 41 00 58 31 20 FF FF 7E 29 01 10 7D 1A 43 78 92 41 00 38 92 61 00 3C 92 81 00 40 92 C1 00 48 92 E1 00 4C 93 01 00 50 93 21 00 54 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 91 81 00 24 90 61 00 08 90 A1 00 0C 90 C1 00 10 91 41 00 14 48 00 00 10
+==== BB 1172 (0x2547824C) approx BBs exec'd 0 ====
+
+	0x2547824C:  5745103A  rlwinm r5,r26,2,0,29
+	   0: GETL       	R26, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25478250:  80810078  lwz r4,120(r1)
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x78, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25478254:  7D25802E  lwzx r9,r5,r16
+	   9: GETL       	R16, t6
+	  10: GETL       	R5, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25478258:  3A800000  li r20,0
+	  15: MOVL       	$0x0, t12
+	  16: PUTL       	t12, R20
+	  17: INCEIPL       	$4
+
+	0x2547825C:  3A600000  li r19,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R19
+	  20: INCEIPL       	$4
+
+	0x25478260:  83890014  lwz r28,20(r9)
+	  21: GETL       	R9, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R28
+	  25: INCEIPL       	$4
+
+	0x25478264:  7F832278  xor r3,r28,r4
+	  26: GETL       	R28, t20
+	  27: GETL       	R4, t22
+	  28: XORL       	t20, t22
+	  29: PUTL       	t22, R3
+	  30: INCEIPL       	$4
+
+	0x25478268:  21630000  subfic r11,r3,0
+	  31: GETL       	R3, t24
+	  32: MOVL       	$0x0, t26
+	  33: SBBL       	t24, t26  (-wCa)
+	  34: PUTL       	t26, R11
+	  35: INCEIPL       	$4
+
+	0x2547826C:  7C6B1914  adde r3,r11,r3
+	  36: GETL       	R11, t28
+	  37: GETL       	R3, t30
+	  38: ADCL       	t28, t30  (-rCa-wCa)
+	  39: PUTL       	t30, R3
+	  40: INCEIPL       	$4
+
+	0x25478270:  7E291839  and. r9,r17,r3
+	  41: GETL       	R17, t32
+	  42: GETL       	R3, t34
+	  43: ANDL       	t32, t34
+	  44: PUTL       	t34, R9
+	  45: CMP0L       	t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x0, CR
+	  47: INCEIPL       	$4
+
+	0x25478274:  4082FFCC  bc 4,2,0x25478240
+	  48: Jc02o       	$0x25478240
+
+
+
+. 1172 2547824C 44
+. 57 45 10 3A 80 81 00 78 7D 25 80 2E 3A 80 00 00 3A 60 00 00 83 89 00 14 7F 83 22 78 21 63 00 00 7C 6B 19 14 7E 29 18 39 40 82 FF CC
+==== BB 1173 (0x25478278) approx BBs exec'd 0 ====
+
+	0x25478278:  80C1007C  lwz r6,124(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x7C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547827C:  54CBFFFF  rlwinm. r11,r6,31,31,31
+	   5: GETL       	R6, t4
+	   6: ROLL       	$0x1F, t4
+	   7: ANDL       	$0x1, t4
+	   8: PUTL       	t4, R11
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25478280:  41820010  bc 12,2,0x25478290
+	  12: Js02o       	$0x25478290
+
+
+
+. 1173 25478278 12
+. 80 C1 00 7C 54 CB FF FF 41 82 00 10
+==== BB 1174 (0x25478290) approx BBs exec'd 0 ====
+
+	0x25478290:  815E04F4  lwz r10,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25478294:  810A0000  lwz r8,0(r10)
+	   5: GETL       	R10, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0x25478298:  71090008  andi. r9,r8,0x8
+	   9: GETL       	R8, t8
+	  10: ANDL       	$0x8, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x2547829C:  40820248  bc 4,2,0x254784E4
+	  15: Jc02o       	$0x254784E4
+
+
+
+. 1174 25478290 16
+. 81 5E 04 F4 81 0A 00 00 71 09 00 08 40 82 02 48
+==== BB 1175 (0x254782A0) approx BBs exec'd 0 ====
+
+	0x254782A0:  801C016C  lwz r0,364(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x16C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x254782A4:  833C0170  lwz r25,368(r28)
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x170, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x254782A8:  7D8E0396  divwu r12, r14, r0
+	  10: GETL       	R14, t10
+	  11: GETL       	R0, t8
+	  12: UDIVL       	t8, t10
+	  13: PUTL       	t10, R12
+	  14: INCEIPL       	$4
+
+	0x254782AC:  817C0038  lwz r11,56(r28)
+	  15: GETL       	R28, t12
+	  16: ADDL       	$0x38, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R11
+	  19: INCEIPL       	$4
+
+	0x254782B0:  807C0034  lwz r3,52(r28)
+	  20: GETL       	R28, t16
+	  21: ADDL       	$0x34, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R3
+	  24: INCEIPL       	$4
+
+	0x254782B4:  830B0004  lwz r24,4(r11)
+	  25: GETL       	R11, t20
+	  26: ADDL       	$0x4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R24
+	  29: INCEIPL       	$4
+
+	0x254782B8:  82430004  lwz r18,4(r3)
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0x4, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R18
+	  34: INCEIPL       	$4
+
+	0x254782BC:  82DC0198  lwz r22,408(r28)
+	  35: GETL       	R28, t28
+	  36: ADDL       	$0x198, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R22
+	  39: INCEIPL       	$4
+
+	0x254782C0:  7FEC01D6  mullw r31,r12,r0
+	  40: GETL       	R12, t32
+	  41: GETL       	R0, t34
+	  42: MULL       	t32, t34
+	  43: PUTL       	t34, R31
+	  44: INCEIPL       	$4
+
+	0x254782C4:  7FBF7050  subf r29,r31,r14
+	  45: GETL       	R31, t36
+	  46: GETL       	R14, t38
+	  47: SUBL       	t36, t38
+	  48: PUTL       	t38, R29
+	  49: INCEIPL       	$4
+
+	0x254782C8:  57BB103A  rlwinm r27,r29,2,0,29
+	  50: GETL       	R29, t40
+	  51: SHLL       	$0x2, t40
+	  52: PUTL       	t40, R27
+	  53: INCEIPL       	$4
+
+	0x254782CC:  7FFBC82E  lwzx r31,r27,r25
+	  54: GETL       	R25, t42
+	  55: GETL       	R27, t44
+	  56: ADDL       	t44, t42
+	  57: LDL       	(t42), t46
+	  58: PUTL       	t46, R31
+	  59: INCEIPL       	$4
+
+	0x254782D0:  2E1F0000  cmpi cr4,r31,0
+	  60: GETL       	R31, t48
+	  61: CMP0L       	t48, t50  (-rSo)
+	  62: ICRFL       	t50, $0x4, CR
+	  63: INCEIPL       	$4
+
+	0x254782D4:  419200F8  bc 12,18,0x254783CC
+	  64: Js18o       	$0x254783CC
+
+
+
+. 1175 254782A0 56
+. 80 1C 01 6C 83 3C 01 70 7D 8E 03 96 81 7C 00 38 80 7C 00 34 83 0B 00 04 82 43 00 04 82 DC 01 98 7F EC 01 D6 7F BF 70 50 57 BB 10 3A 7F FB C8 2E 2E 1F 00 00 41 92 00 F8
+==== BB 1176 (0x254783CC) approx BBs exec'd 0 ====
+
+	0x254783CC:  7F000026  mfcr r24
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x254783D0:  57189FFE  rlwinm r24,r24,19,31,31
+	   3: GETL       	R24, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R24
+	   7: INCEIPL       	$4
+
+	0x254783D4:  3175FFFF  addic r11,r21,-1
+	   8: GETL       	R21, t4
+	   9: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	  10: PUTL       	t4, R11
+	  11: INCEIPL       	$4
+
+	0x254783D8:  7ECBA910  subfe r22,r11,r21
+	  12: GETL       	R11, t6
+	  13: GETL       	R21, t8
+	  14: SBBL       	t6, t8  (-rCa-wCa)
+	  15: PUTL       	t8, R22
+	  16: INCEIPL       	$4
+
+	0x254783DC:  7F0BB039  and. r11,r24,r22
+	  17: GETL       	R24, t10
+	  18: GETL       	R22, t12
+	  19: ANDL       	t10, t12
+	  20: PUTL       	t12, R11
+	  21: CMP0L       	t12, t14  (-rSo)
+	  22: ICRFL       	t14, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x254783E0:  41A2FE60  bc 13,2,0x25478240
+	  24: Js02o       	$0x25478240
+
+
+
+. 1176 254783CC 24
+. 7F 00 00 26 57 18 9F FE 31 75 FF FF 7E CB A9 10 7F 0B B0 39 41 A2 FE 60
+==== BB 1177 (0x254783E4) approx BBs exec'd 0 ====
+
+	0x254783E4:  8075000C  lwz r3,12(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254783E8:  2E030000  cmpi cr4,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254783EC:  4192FE54  bc 12,18,0x25478240
+	   9: Js18o       	$0x25478240
+
+
+
+. 1177 254783E4 12
+. 80 75 00 0C 2E 03 00 00 41 92 FE 54
+==== BB 1178 (0x254783F0) approx BBs exec'd 0 ====
+
+	0x254783F0:  7F84E378  or r4,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254783F4:  480042E1  bl 0x2547C6D4
+	   3: MOVL       	$0x254783F8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547C6D4  ($4)
+
+
+
+. 1178 254783F0 8
+. 7F 84 E3 78 48 00 42 E1
+==== BB 1179 (0x254783F8) approx BBs exec'd 0 ====
+
+	0x254783F8:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254783FC:  419AFE44  bc 12,26,0x25478240
+	   4: Js26o       	$0x25478240
+
+
+
+. 1179 254783F8 8
+. 2F 03 00 00 41 9A FE 44
+==== BB 1180 (0x25478240) approx BBs exec'd 0 ====
+
+	0x25478240:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25478244:  7F9A7840  cmpl cr7,r26,r15
+	   4: GETL       	R26, t2
+	   5: GETL       	R15, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25478248:  409C01DC  bc 4,28,0x25478424
+	   9: Jc28o       	$0x25478424
+
+
+
+. 1180 25478240 12
+. 3B 5A 00 01 7F 9A 78 40 40 9C 01 DC
+==== BB 1181 (0x254782D8) approx BBs exec'd 0 ====
+
+	0x254782D8:  2D960000  cmpi cr3,r22,0
+	   0: GETL       	R22, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x3, CR
+	   3: INCEIPL       	$4
+
+	0x254782DC:  48000028  b 0x25478304
+	   4: JMPo       	$0x25478304  ($4)
+
+
+
+. 1181 254782D8 8
+. 2D 96 00 00 48 00 00 28
+==== BB 1182 (0x25478304) approx BBs exec'd 0 ====
+
+	0x25478304:  57E92036  rlwinm r9,r31,4,0,27
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25478308:  7FA9C214  add r29,r9,r24
+	   4: GETL       	R9, t2
+	   5: GETL       	R24, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x2547830C:  809D0004  lwz r4,4(r29)
+	   9: GETL       	R29, t6
+	  10: ADDL       	$0x4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25478310:  2C840000  cmpi cr1,r4,0
+	  14: GETL       	R4, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x25478314:  41A6FFCC  bc 13,6,0x254782E0
+	  18: Js06o       	$0x254782E0
+
+
+
+. 1182 25478304 20
+. 57 E9 20 36 7F A9 C2 14 80 9D 00 04 2C 84 00 00 41 A6 FF CC
+==== BB 1183 (0x254782E0) approx BBs exec'd 0 ====
+
+	0x254782E0:  88DD000C  lbz r6,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254782E4:  54C5073E  rlwinm r5,r6,0,28,31
+	   5: GETL       	R6, t4
+	   6: ANDL       	$0xF, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x254782E8:  2F050006  cmpi cr6,r5,6
+	   9: GETL       	R5, t6
+	  10: MOVL       	$0x6, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x254782EC:  419A002C  bc 12,26,0x25478318
+	  14: Js26o       	$0x25478318
+
+
+
+. 1183 254782E0 16
+. 88 DD 00 0C 54 C5 07 3E 2F 05 00 06 41 9A 00 2C
+==== BB 1184 (0x254782F0) approx BBs exec'd 0 ====
+
+	0x254782F0:  83BC0174  lwz r29,372(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x174, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254782F4:  57E7103A  rlwinm r7,r31,2,0,29
+	   5: GETL       	R31, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x254782F8:  7FE7E82E  lwzx r31,r7,r29
+	   9: GETL       	R29, t6
+	  10: GETL       	R7, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x254782FC:  2E1F0000  cmpi cr4,r31,0
+	  15: GETL       	R31, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x4, CR
+	  18: INCEIPL       	$4
+
+	0x25478300:  41920108  bc 12,18,0x25478408
+	  19: Js18o       	$0x25478408
+
+
+
+. 1184 254782F0 20
+. 83 BC 01 74 57 E7 10 3A 7F E7 E8 2E 2E 1F 00 00 41 92 01 08
+==== BB 1185 (0x25478408) approx BBs exec'd 0 ====
+
+	0x25478408:  2D940001  cmpi cr3,r20,1
+	   0: GETL       	R20, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x3, CR
+	   4: INCEIPL       	$4
+
+	0x2547840C:  408EFFC0  bc 4,14,0x254783CC
+	   5: Jc14o       	$0x254783CC
+
+
+
+. 1185 25478408 8
+. 2D 94 00 01 40 8E FF C0
+==== BB 1186 (0x25478318) approx BBs exec'd 0 ====
+
+	0x25478318:  A0FD000E  lhz r7,14(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xE, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547831C:  2C070000  cmpi cr0,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25478320:  40820010  bc 4,2,0x25478330
+	   9: Jc02o       	$0x25478330
+
+
+
+. 1186 25478318 12
+. A0 FD 00 0E 2C 07 00 00 40 82 00 10
+==== BB 1187 (0x25478330) approx BBs exec'd 0 ====
+
+	0x25478330:  8B7D000C  lbz r27,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25478334:  5760073E  rlwinm r0,r27,0,28,31
+	   5: GETL       	R27, t4
+	   6: ANDL       	$0xF, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25478338:  2F800002  cmpi cr7,r0,2
+	   9: GETL       	R0, t6
+	  10: MOVL       	$0x2, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x2547833C:  409D000C  bc 4,29,0x25478348
+	  14: Jc29o       	$0x25478348
+
+
+
+. 1187 25478330 16
+. 8B 7D 00 0C 57 60 07 3E 2F 80 00 02 40 9D 00 0C
+==== BB 1188 (0x25478348) approx BBs exec'd 0 ====
+
+	0x25478348:  8141000C  lwz r10,12(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547834C:  7F1D5000  cmp cr6,r29,r10
+	   5: GETL       	R29, t4
+	   6: GETL       	R10, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25478350:  419A001C  bc 12,26,0x2547836C
+	  10: Js26o       	$0x2547836C
+
+
+
+. 1188 25478348 12
+. 81 41 00 0C 7F 1D 50 00 41 9A 00 1C
+==== BB 1189 (0x25478354) approx BBs exec'd 0 ====
+
+	0x25478354:  7EE9C02E  lwzx r23,r9,r24
+	   0: GETL       	R24, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R23
+	   5: INCEIPL       	$4
+
+	0x25478358:  80810008  lwz r4,8(r1)
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x8, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R4
+	  10: INCEIPL       	$4
+
+	0x2547835C:  7C779214  add r3,r23,r18
+	  11: GETL       	R23, t10
+	  12: GETL       	R18, t12
+	  13: ADDL       	t10, t12
+	  14: PUTL       	t12, R3
+	  15: INCEIPL       	$4
+
+	0x25478360:  4800AB81  bl 0x25482EE0
+	  16: MOVL       	$0x25478364, t14
+	  17: PUTL       	t14, LR
+	  18: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 1189 25478354 16
+. 7E E9 C0 2E 80 81 00 08 7C 77 92 14 48 00 AB 81
+==== BB 1190 (0x25478364) approx BBs exec'd 0 ====
+
+	0x25478364:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25478368:  4082FF88  bc 4,2,0x254782F0
+	   4: Jc02o       	$0x254782F0
+
+
+
+. 1190 25478364 8
+. 2C 03 00 00 40 82 FF 88
+==== BB 1191 (0x2547836C) approx BBs exec'd 0 ====
+
+	0x2547836C:  2F950000  cmpi cr7,r21,0
+	   0: GETL       	R21, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25478370:  419E0130  bc 12,30,0x254784A0
+	   4: Js30o       	$0x254784A0
+
+
+
+. 1191 2547836C 8
+. 2F 95 00 00 41 9E 01 30
+==== BB 1192 (0x25478374) approx BBs exec'd 0 ====
+
+	0x25478374:  418E0044  bc 12,14,0x254783B8
+	   0: Js14o       	$0x254783B8
+
+
+
+. 1192 25478374 4
+. 41 8E 00 44
+==== BB 1193 (0x25478378) approx BBs exec'd 0 ====
+
+	0x25478378:  57F9083C  rlwinm r25,r31,1,0,30
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x2547837C:  815C0188  lwz r10,392(r28)
+	   4: GETL       	R28, t2
+	   5: ADDL       	$0x188, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x25478380:  7EF9B22E  lhzx r23,r25,r22
+	   9: GETL       	R22, t6
+	  10: GETL       	R25, t8
+	  11: ADDL       	t8, t6
+	  12: LDW       	(t6), t10
+	  13: PUTL       	t10, R23
+	  14: INCEIPL       	$4
+
+	0x25478384:  81350004  lwz r9,4(r21)
+	  15: GETL       	R21, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25478388:  56EB2376  rlwinm r11,r23,4,13,27
+	  20: GETL       	R23, t16
+	  21: ROLL       	$0x4, t16
+	  22: ANDL       	$0x7FFF0, t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0x2547838C:  7D8B5214  add r12,r11,r10
+	  25: GETL       	R11, t18
+	  26: GETL       	R10, t20
+	  27: ADDL       	t18, t20
+	  28: PUTL       	t20, R12
+	  29: INCEIPL       	$4
+
+	0x25478390:  832C0004  lwz r25,4(r12)
+	  30: GETL       	R12, t22
+	  31: ADDL       	$0x4, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R25
+	  34: INCEIPL       	$4
+
+	0x25478394:  7C994800  cmp cr1,r25,r9
+	  35: GETL       	R25, t26
+	  36: GETL       	R9, t28
+	  37: CMPL       	t26, t28, t30  (-rSo)
+	  38: ICRFL       	t30, $0x1, CR
+	  39: INCEIPL       	$4
+
+	0x25478398:  418600F0  bc 12,6,0x25478488
+	  40: Js06o       	$0x25478488
+
+
+
+. 1193 25478378 36
+. 57 F9 08 3C 81 5C 01 88 7E F9 B2 2E 81 35 00 04 56 EB 23 76 7D 8B 52 14 83 2C 00 04 7C 99 48 00 41 86 00 F0
+==== BB 1194 (0x25478488) approx BBs exec'd 0 ====
+
+	0x25478488:  7C6B502E  lwzx r3,r11,r10
+	   0: GETL       	R10, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R3
+	   5: INCEIPL       	$4
+
+	0x2547848C:  80950000  lwz r4,0(r21)
+	   6: GETL       	R21, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R4
+	   9: INCEIPL       	$4
+
+	0x25478490:  4800AA51  bl 0x25482EE0
+	  10: MOVL       	$0x25478494, t10
+	  11: PUTL       	t10, LR
+	  12: JMPo-c       	$0x25482EE0  ($4)
+
+
+
+. 1194 25478488 12
+. 7C 6B 50 2E 80 95 00 00 48 00 AA 51
+==== BB 1195 (0x25478494) approx BBs exec'd 0 ====
+
+	0x25478494:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25478498:  409AFF04  bc 4,26,0x2547839C
+	   4: Jc26o       	$0x2547839C
+
+
+
+. 1195 25478494 8
+. 2F 03 00 00 40 9A FF 04
+==== BB 1196 (0x2547849C) approx BBs exec'd 0 ====
+
+	0x2547849C:  4BFFFF1C  b 0x254783B8
+	   0: JMPo       	$0x254783B8  ($4)
+
+
+
+. 1196 2547849C 4
+. 4B FF FF 1C
+==== BB 1197 (0x254783B8) approx BBs exec'd 0 ====
+
+	0x254783B8:  5760E13E  rlwinm r0,r27,28,4,31
+	   0: GETL       	R27, t0
+	   1: SHRL       	$0x4, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x254783BC:  2F800001  cmpi cr7,r0,1
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x1, t6
+	   6: CMPL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x254783C0:  419E017C  bc 12,30,0x2547853C
+	   9: Js30o       	$0x2547853C
+
+
+
+. 1197 254783B8 12
+. 57 60 E1 3E 2F 80 00 01 41 9E 01 7C
+==== BB 1198 (0x2547853C) approx BBs exec'd 0 ====
+
+	0x2547853C:  81C10010  lwz r14,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R14
+	   4: INCEIPL       	$4
+
+	0x25478540:  38600001  li r3,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25478544:  938E0004  stw r28,4(r14)
+	   8: GETL       	R28, t6
+	   9: GETL       	R14, t8
+	  10: ADDL       	$0x4, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25478548:  93AE0000  stw r29,0(r14)
+	  13: GETL       	R29, t10
+	  14: GETL       	R14, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0x2547854C:  4BFFFEDC  b 0x25478428
+	  17: JMPo       	$0x25478428  ($4)
+
+
+
+. 1198 2547853C 20
+. 81 C1 00 10 38 60 00 01 93 8E 00 04 93 AE 00 00 4B FF FE DC
+==== BB 1199 (0x25478428) approx BBs exec'd 0 ====
+
+	0x25478428:  81E10074  lwz r15,116(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x2547842C:  81810024  lwz r12,36(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25478430:  7DE803A6  mtlr r15
+	  10: GETL       	R15, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x25478434:  81C10028  lwz r14,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R14
+	  17: INCEIPL       	$4
+
+	0x25478438:  81E1002C  lwz r15,44(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x2C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R15
+	  22: INCEIPL       	$4
+
+	0x2547843C:  7D818120  mtcrf 0x18,r12
+	  23: GETL       	R12, t18
+	  24: ICRFL       	t18, $0x3, CR
+	  25: ICRFL       	t18, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0x25478440:  82010030  lwz r16,48(r1)
+	  27: GETL       	R1, t20
+	  28: ADDL       	$0x30, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R16
+	  31: INCEIPL       	$4
+
+	0x25478444:  82210034  lwz r17,52(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x34, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R17
+	  36: INCEIPL       	$4
+
+	0x25478448:  82410038  lwz r18,56(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x38, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R18
+	  41: INCEIPL       	$4
+
+	0x2547844C:  8261003C  lwz r19,60(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x3C, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R19
+	  46: INCEIPL       	$4
+
+	0x25478450:  82810040  lwz r20,64(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x40, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R20
+	  51: INCEIPL       	$4
+
+	0x25478454:  82A10044  lwz r21,68(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x44, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R21
+	  56: INCEIPL       	$4
+
+	0x25478458:  82C10048  lwz r22,72(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x48, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R22
+	  61: INCEIPL       	$4
+
+	0x2547845C:  82E1004C  lwz r23,76(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x4C, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R23
+	  66: INCEIPL       	$4
+
+	0x25478460:  83010050  lwz r24,80(r1)
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x50, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R24
+	  71: INCEIPL       	$4
+
+	0x25478464:  83210054  lwz r25,84(r1)
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x54, t56
+	  74: LDL       	(t56), t58
+	  75: PUTL       	t58, R25
+	  76: INCEIPL       	$4
+
+	0x25478468:  83410058  lwz r26,88(r1)
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x58, t60
+	  79: LDL       	(t60), t62
+	  80: PUTL       	t62, R26
+	  81: INCEIPL       	$4
+
+	0x2547846C:  8361005C  lwz r27,92(r1)
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x5C, t64
+	  84: LDL       	(t64), t66
+	  85: PUTL       	t66, R27
+	  86: INCEIPL       	$4
+
+	0x25478470:  83810060  lwz r28,96(r1)
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x60, t68
+	  89: LDL       	(t68), t70
+	  90: PUTL       	t70, R28
+	  91: INCEIPL       	$4
+
+	0x25478474:  83A10064  lwz r29,100(r1)
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x64, t72
+	  94: LDL       	(t72), t74
+	  95: PUTL       	t74, R29
+	  96: INCEIPL       	$4
+
+	0x25478478:  83C10068  lwz r30,104(r1)
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x68, t76
+	  99: LDL       	(t76), t78
+	 100: PUTL       	t78, R30
+	 101: INCEIPL       	$4
+
+	0x2547847C:  83E1006C  lwz r31,108(r1)
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x6C, t80
+	 104: LDL       	(t80), t82
+	 105: PUTL       	t82, R31
+	 106: INCEIPL       	$4
+
+	0x25478480:  38210070  addi r1,r1,112
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x70, t84
+	 109: PUTL       	t84, R1
+	 110: INCEIPL       	$4
+
+	0x25478484:  4E800020  blr
+	 111: GETL       	LR, t86
+	 112: JMPo-r       	t86  ($4)
+
+
+
+. 1199 25478428 96
+. 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 1200 (0x254786B0) approx BBs exec'd 0 ====
+
+	0x254786B0:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x254786B4:  39000000  li r8,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R8
+	   6: INCEIPL       	$4
+
+	0x254786B8:  41810014  bc 12,1,0x254786CC
+	   7: Js01o       	$0x254786CC
+
+
+
+. 1200 254786B0 12
+. 2C 03 00 00 39 00 00 00 41 81 00 14
+==== BB 1201 (0x254786CC) approx BBs exec'd 0 ====
+
+	0x254786CC:  815F0048  lwz r10,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254786D0:  2F8A0000  cmpi cr7,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x254786D4:  419E0504  bc 12,30,0x25478BD8
+	   9: Js30o       	$0x25478BD8
+
+
+
+. 1201 254786CC 12
+. 81 5F 00 48 2F 8A 00 00 41 9E 05 04
+==== BB 1202 (0x254786D8) approx BBs exec'd 0 ====
+
+	0x254786D8:  80B60000  lwz r5,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x254786DC:  2F850000  cmpi cr7,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x254786E0:  419E0140  bc 12,30,0x25478820
+	   8: Js30o       	$0x25478820
+
+
+
+. 1202 254786D8 12
+. 80 B6 00 00 2F 85 00 00 41 9E 01 40
+==== BB 1203 (0x254786E4) approx BBs exec'd 0 ====
+
+	0x254786E4:  88E5000D  lbz r7,13(r5)
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0xD, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254786E8:  39200000  li r9,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x254786EC:  2E090000  cmpi cr4,r9,0
+	   8: GETL       	R9, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x4, CR
+	  11: INCEIPL       	$4
+
+	0x254786F0:  54E607BE  rlwinm r6,r7,0,30,31
+	  12: GETL       	R7, t10
+	  13: ANDL       	$0x3, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x254786F4:  2F060003  cmpi cr6,r6,3
+	  16: GETL       	R6, t12
+	  17: MOVL       	$0x3, t16
+	  18: CMPL       	t12, t16, t14  (-rSo)
+	  19: ICRFL       	t14, $0x6, CR
+	  20: INCEIPL       	$4
+
+	0x254786F8:  419A0234  bc 12,26,0x2547892C
+	  21: Js26o       	$0x2547892C
+
+
+
+. 1203 254786E4 24
+. 88 E5 00 0D 39 20 00 00 2E 09 00 00 54 E6 07 BE 2F 06 00 03 41 9A 02 34
+==== BB 1204 (0x254786FC) approx BBs exec'd 0 ====
+
+	0x254786FC:  3B7F0048  addi r27,r31,72
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25478700:  839B0004  lwz r28,4(r27)
+	   4: GETL       	R27, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x25478704:  3D808000  lis r12,-32768
+	   9: MOVL       	$0x80000000, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x25478708:  801C0180  lwz r0,384(r28)
+	  12: GETL       	R28, t8
+	  13: ADDL       	$0x180, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0x2547870C:  541D0002  rlwinm r29,r0,0,0,1
+	  17: GETL       	R0, t12
+	  18: ANDL       	$0xC0000000, t12
+	  19: PUTL       	t12, R29
+	  20: INCEIPL       	$4
+
+	0x25478710:  7F1D6000  cmp cr6,r29,r12
+	  21: GETL       	R29, t14
+	  22: GETL       	R12, t16
+	  23: CMPL       	t14, t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x6, CR
+	  25: INCEIPL       	$4
+
+	0x25478714:  419A05BC  bc 12,26,0x25478CD0
+	  26: Js26o       	$0x25478CD0
+
+
+
+. 1204 254786FC 28
+. 3B 7F 00 48 83 9B 00 04 3D 80 80 00 80 1C 01 80 54 1D 00 02 7F 1D 60 00 41 9A 05 BC
+==== BB 1205 (0x25478718) approx BBs exec'd 0 ====
+
+	0x25478718:  82BB0004  lwz r21,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547871C:  83BE04F4  lwz r29,1268(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x4F4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25478720:  83550180  lwz r26,384(r21)
+	  10: GETL       	R21, t8
+	  11: ADDL       	$0x180, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25478724:  67570010  oris r23,r26,0x10
+	  15: GETL       	R26, t12
+	  16: ORL       	$0x100000, t12
+	  17: PUTL       	t12, R23
+	  18: INCEIPL       	$4
+
+	0x25478728:  92F50180  stw r23,384(r21)
+	  19: GETL       	R23, t14
+	  20: GETL       	R21, t16
+	  21: ADDL       	$0x180, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547872C:  801D0000  lwz r0,0(r29)
+	  24: GETL       	R29, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0x25478730:  70090404  andi. r9,r0,0x404
+	  28: GETL       	R0, t22
+	  29: ANDL       	$0x404, t22
+	  30: PUTL       	t22, R9
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0x25478734:  408202A0  bc 4,2,0x254789D4
+	  34: Jc02o       	$0x254789D4
+
+
+
+. 1205 25478718 32
+. 82 BB 00 04 83 BE 04 F4 83 55 01 80 67 57 00 10 92 F5 01 80 80 1D 00 00 70 09 04 04 40 82 02 A0
+==== BB 1206 (0x25478738) approx BBs exec'd 0 ====
+
+	0x25478738:  807B0004  lwz r3,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547873C:  801F0048  lwz r0,72(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x48, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x25478740:  90160000  stw r0,0(r22)
+	  10: GETL       	R0, t8
+	  11: GETL       	R22, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25478744:  80E10000  lwz r7,0(r1)
+	  14: GETL       	R1, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R7
+	  17: INCEIPL       	$4
+
+	0x25478748:  82C70004  lwz r22,4(r7)
+	  18: GETL       	R7, t16
+	  19: ADDL       	$0x4, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R22
+	  22: INCEIPL       	$4
+
+	0x2547874C:  8107FFC0  lwz r8,-64(r7)
+	  23: GETL       	R7, t20
+	  24: ADDL       	$0xFFFFFFC0, t20
+	  25: LDL       	(t20), t22
+	  26: PUTL       	t22, R8
+	  27: INCEIPL       	$4
+
+	0x25478750:  7EC803A6  mtlr r22
+	  28: GETL       	R22, t24
+	  29: PUTL       	t24, LR
+	  30: INCEIPL       	$4
+
+	0x25478754:  8227FFC4  lwz r17,-60(r7)
+	  31: GETL       	R7, t26
+	  32: ADDL       	$0xFFFFFFC4, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R17
+	  35: INCEIPL       	$4
+
+	0x25478758:  8247FFC8  lwz r18,-56(r7)
+	  36: GETL       	R7, t30
+	  37: ADDL       	$0xFFFFFFC8, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R18
+	  40: INCEIPL       	$4
+
+	0x2547875C:  7D008120  mtcrf 0x8,r8
+	  41: GETL       	R8, t34
+	  42: ICRFL       	t34, $0x4, CR
+	  43: INCEIPL       	$4
+
+	0x25478760:  8267FFCC  lwz r19,-52(r7)
+	  44: GETL       	R7, t36
+	  45: ADDL       	$0xFFFFFFCC, t36
+	  46: LDL       	(t36), t38
+	  47: PUTL       	t38, R19
+	  48: INCEIPL       	$4
+
+	0x25478764:  8287FFD0  lwz r20,-48(r7)
+	  49: GETL       	R7, t40
+	  50: ADDL       	$0xFFFFFFD0, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R20
+	  53: INCEIPL       	$4
+
+	0x25478768:  82A7FFD4  lwz r21,-44(r7)
+	  54: GETL       	R7, t44
+	  55: ADDL       	$0xFFFFFFD4, t44
+	  56: LDL       	(t44), t46
+	  57: PUTL       	t46, R21
+	  58: INCEIPL       	$4
+
+	0x2547876C:  82C7FFD8  lwz r22,-40(r7)
+	  59: GETL       	R7, t48
+	  60: ADDL       	$0xFFFFFFD8, t48
+	  61: LDL       	(t48), t50
+	  62: PUTL       	t50, R22
+	  63: INCEIPL       	$4
+
+	0x25478770:  82E7FFDC  lwz r23,-36(r7)
+	  64: GETL       	R7, t52
+	  65: ADDL       	$0xFFFFFFDC, t52
+	  66: LDL       	(t52), t54
+	  67: PUTL       	t54, R23
+	  68: INCEIPL       	$4
+
+	0x25478774:  8307FFE0  lwz r24,-32(r7)
+	  69: GETL       	R7, t56
+	  70: ADDL       	$0xFFFFFFE0, t56
+	  71: LDL       	(t56), t58
+	  72: PUTL       	t58, R24
+	  73: INCEIPL       	$4
+
+	0x25478778:  8327FFE4  lwz r25,-28(r7)
+	  74: GETL       	R7, t60
+	  75: ADDL       	$0xFFFFFFE4, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R25
+	  78: INCEIPL       	$4
+
+	0x2547877C:  8347FFE8  lwz r26,-24(r7)
+	  79: GETL       	R7, t64
+	  80: ADDL       	$0xFFFFFFE8, t64
+	  81: LDL       	(t64), t66
+	  82: PUTL       	t66, R26
+	  83: INCEIPL       	$4
+
+	0x25478780:  8367FFEC  lwz r27,-20(r7)
+	  84: GETL       	R7, t68
+	  85: ADDL       	$0xFFFFFFEC, t68
+	  86: LDL       	(t68), t70
+	  87: PUTL       	t70, R27
+	  88: INCEIPL       	$4
+
+	0x25478784:  8387FFF0  lwz r28,-16(r7)
+	  89: GETL       	R7, t72
+	  90: ADDL       	$0xFFFFFFF0, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R28
+	  93: INCEIPL       	$4
+
+	0x25478788:  83A7FFF4  lwz r29,-12(r7)
+	  94: GETL       	R7, t76
+	  95: ADDL       	$0xFFFFFFF4, t76
+	  96: LDL       	(t76), t78
+	  97: PUTL       	t78, R29
+	  98: INCEIPL       	$4
+
+	0x2547878C:  83C7FFF8  lwz r30,-8(r7)
+	  99: GETL       	R7, t80
+	 100: ADDL       	$0xFFFFFFF8, t80
+	 101: LDL       	(t80), t82
+	 102: PUTL       	t82, R30
+	 103: INCEIPL       	$4
+
+	0x25478790:  83E7FFFC  lwz r31,-4(r7)
+	 104: GETL       	R7, t84
+	 105: ADDL       	$0xFFFFFFFC, t84
+	 106: LDL       	(t84), t86
+	 107: PUTL       	t86, R31
+	 108: INCEIPL       	$4
+
+	0x25478794:  7CE13B78  or r1,r7,r7
+	 109: GETL       	R7, t88
+	 110: PUTL       	t88, R1
+	 111: INCEIPL       	$4
+
+	0x25478798:  4E800020  blr
+	 112: GETL       	LR, t90
+	 113: JMPo-r       	t90  ($4)
+
+
+
+. 1206 25478738 100
+. 80 7B 00 04 80 1F 00 48 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+==== BB 1207 (0x2547995C) approx BBs exec'd 0 ====
+
+	0x2547995C:  811F0050  lwz r8,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25479960:  809F000C  lwz r4,12(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25479964:  7C7A1B78  or r26,r3,r3
+	  10: GETL       	R3, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x25479968:  7D064378  or r6,r8,r8
+	  13: GETL       	R8, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x2547996C:  90640210  stw r3,528(r4)
+	  16: GETL       	R3, t12
+	  17: GETL       	R4, t14
+	  18: ADDL       	$0x210, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x25479970:  91040214  stw r8,532(r4)
+	  21: GETL       	R8, t16
+	  22: GETL       	R4, t18
+	  23: ADDL       	$0x214, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25479974:  2F060000  cmpi cr6,r6,0
+	  26: GETL       	R6, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x6, CR
+	  29: INCEIPL       	$4
+
+	0x25479978:  39200000  li r9,0
+	  30: MOVL       	$0x0, t24
+	  31: PUTL       	t24, R9
+	  32: INCEIPL       	$4
+
+	0x2547997C:  419A0010  bc 12,26,0x2547998C
+	  33: Js26o       	$0x2547998C
+
+
+
+. 1207 2547995C 36
+. 81 1F 00 50 80 9F 00 0C 7C 7A 1B 78 7D 06 43 78 90 64 02 10 91 04 02 14 2F 06 00 00 39 20 00 00 41 9A 00 10
+==== BB 1208 (0x25479980) approx BBs exec'd 0 ====
+
+	0x25479980:  80FA0000  lwz r7,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25479984:  81660004  lwz r11,4(r6)
+	   4: GETL       	R6, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x25479988:  7D275A14  add r9,r7,r11
+	   9: GETL       	R7, t8
+	  10: GETL       	R11, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547998C:  2F9D0044  cmpi cr7,r29,68
+	  14: GETL       	R29, t12
+	  15: MOVL       	$0x44, t16
+	  16: CMPL       	t12, t16, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25479990:  801C0008  lwz r0,8(r28)
+	  19: GETL       	R28, t18
+	  20: ADDL       	$0x8, t18
+	  21: LDL       	(t18), t20
+	  22: PUTL       	t20, R0
+	  23: INCEIPL       	$4
+
+	0x25479994:  7D290214  add r9,r9,r0
+	  24: GETL       	R9, t22
+	  25: GETL       	R0, t24
+	  26: ADDL       	t22, t24
+	  27: PUTL       	t24, R9
+	  28: INCEIPL       	$4
+
+	0x25479998:  419E0100  bc 12,30,0x25479A98
+	  29: Js30o       	$0x25479A98
+
+
+
+. 1208 25479980 28
+. 80 FA 00 00 81 66 00 04 7D 27 5A 14 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+==== BB 1209 (0x2547999C) approx BBs exec'd 0 ====
+
+	0x2547999C:  419D00D0  bc 12,29,0x25479A6C
+	   0: Js29o       	$0x25479A6C
+
+
+
+. 1209 2547999C 4
+. 41 9D 00 D0
+==== BB 1210 (0x254799A0) approx BBs exec'd 0 ====
+
+	0x254799A0:  2F9D0001  cmpi cr7,r29,1
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254799A4:  419E00EC  bc 12,30,0x25479A90
+	   5: Js30o       	$0x25479A90
+
+
+
+. 1210 254799A0 8
+. 2F 9D 00 01 41 9E 00 EC
+==== BB 1211 (0x25479A90) approx BBs exec'd 0 ====
+
+	0x25479A90:  91370000  stw r9,0(r23)
+	   0: GETL       	R9, t0
+	   1: GETL       	R23, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25479A94:  4BFFFF38  b 0x254799CC
+	   4: JMPo       	$0x254799CC  ($4)
+
+
+
+. 1211 25479A90 8
+. 91 37 00 00 4B FF FF 38
+==== BB 1212 (0x254799CC) approx BBs exec'd 0 ====
+
+	0x254799CC:  3B9C000C  addi r28,r28,12
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x254799D0:  7E1CC040  cmpl cr4,r28,r24
+	   4: GETL       	R28, t2
+	   5: GETL       	R24, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254799D4:  4190FE68  bc 12,16,0x2547983C
+	   9: Js16o       	$0x2547983C
+
+
+
+. 1212 254799CC 12
+. 3B 9C 00 0C 7E 1C C0 40 41 90 FE 68
+==== BB 1213 (0x25479938) approx BBs exec'd 0 ====
+
+	0x25479938:  80DF0050  lwz r6,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547993C:  7D695B78  or r9,r11,r11
+	   5: GETL       	R11, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25479940:  815F0008  lwz r10,8(r31)
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25479944:  80A60000  lwz r5,0(r6)
+	  13: GETL       	R6, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R5
+	  16: INCEIPL       	$4
+
+	0x25479948:  80DF0010  lwz r6,16(r31)
+	  17: GETL       	R31, t14
+	  18: ADDL       	$0x10, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R6
+	  21: INCEIPL       	$4
+
+	0x2547994C:  7C655214  add r3,r5,r10
+	  22: GETL       	R5, t18
+	  23: GETL       	R10, t20
+	  24: ADDL       	t18, t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0x25479950:  38BF0050  addi r5,r31,80
+	  27: GETL       	R31, t22
+	  28: ADDL       	$0x50, t22
+	  29: PUTL       	t22, R5
+	  30: INCEIPL       	$4
+
+	0x25479954:  39400000  li r10,0
+	  31: MOVL       	$0x0, t24
+	  32: PUTL       	t24, R10
+	  33: INCEIPL       	$4
+
+	0x25479958:  4BFFEC4D  bl 0x254785A4
+	  34: MOVL       	$0x2547995C, t26
+	  35: PUTL       	t26, LR
+	  36: JMPo-c       	$0x254785A4  ($4)
+
+
+
+. 1213 25479938 36
+. 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+==== BB 1214 (0x25478324) approx BBs exec'd 0 ====
+
+	0x25478324:  8101007C  lwz r8,124(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x7C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25478328:  71000001  andi. r0,r8,0x1
+	   5: GETL       	R8, t4
+	   6: ANDL       	$0x1, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2547832C:  40A2FFC4  bc 5,2,0x254782F0
+	  11: Jc02o       	$0x254782F0
+
+
+
+. 1214 25478324 12
+. 81 01 00 7C 71 00 00 01 40 A2 FF C4
+==== BB 1215 (0x25478424) approx BBs exec'd 0 ====
+
+	0x25478424:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25478428:  81E10074  lwz r15,116(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x74, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R15
+	   7: INCEIPL       	$4
+
+	0x2547842C:  81810024  lwz r12,36(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x24, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R12
+	  12: INCEIPL       	$4
+
+	0x25478430:  7DE803A6  mtlr r15
+	  13: GETL       	R15, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x25478434:  81C10028  lwz r14,40(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x28, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0x25478438:  81E1002C  lwz r15,44(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R15
+	  25: INCEIPL       	$4
+
+	0x2547843C:  7D818120  mtcrf 0x18,r12
+	  26: GETL       	R12, t20
+	  27: ICRFL       	t20, $0x3, CR
+	  28: ICRFL       	t20, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x25478440:  82010030  lwz r16,48(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x30, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R16
+	  34: INCEIPL       	$4
+
+	0x25478444:  82210034  lwz r17,52(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x34, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R17
+	  39: INCEIPL       	$4
+
+	0x25478448:  82410038  lwz r18,56(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x38, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R18
+	  44: INCEIPL       	$4
+
+	0x2547844C:  8261003C  lwz r19,60(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x3C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R19
+	  49: INCEIPL       	$4
+
+	0x25478450:  82810040  lwz r20,64(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x40, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R20
+	  54: INCEIPL       	$4
+
+	0x25478454:  82A10044  lwz r21,68(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x44, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R21
+	  59: INCEIPL       	$4
+
+	0x25478458:  82C10048  lwz r22,72(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x48, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R22
+	  64: INCEIPL       	$4
+
+	0x2547845C:  82E1004C  lwz r23,76(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x4C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R23
+	  69: INCEIPL       	$4
+
+	0x25478460:  83010050  lwz r24,80(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x50, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R24
+	  74: INCEIPL       	$4
+
+	0x25478464:  83210054  lwz r25,84(r1)
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x54, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R25
+	  79: INCEIPL       	$4
+
+	0x25478468:  83410058  lwz r26,88(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x58, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R26
+	  84: INCEIPL       	$4
+
+	0x2547846C:  8361005C  lwz r27,92(r1)
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x5C, t66
+	  87: LDL       	(t66), t68
+	  88: PUTL       	t68, R27
+	  89: INCEIPL       	$4
+
+	0x25478470:  83810060  lwz r28,96(r1)
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x60, t70
+	  92: LDL       	(t70), t72
+	  93: PUTL       	t72, R28
+	  94: INCEIPL       	$4
+
+	0x25478474:  83A10064  lwz r29,100(r1)
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x64, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R29
+	  99: INCEIPL       	$4
+
+	0x25478478:  83C10068  lwz r30,104(r1)
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x68, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R30
+	 104: INCEIPL       	$4
+
+	0x2547847C:  83E1006C  lwz r31,108(r1)
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x6C, t82
+	 107: LDL       	(t82), t84
+	 108: PUTL       	t84, R31
+	 109: INCEIPL       	$4
+
+	0x25478480:  38210070  addi r1,r1,112
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x70, t86
+	 112: PUTL       	t86, R1
+	 113: INCEIPL       	$4
+
+	0x25478484:  4E800020  blr
+	 114: GETL       	LR, t88
+	 115: JMPo-r       	t88  ($4)
+
+
+
+. 1215 25478424 100
+. 38 60 00 00 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 1216 (0x254786BC) approx BBs exec'd 0 ====
+
+	0x254786BC:  4180016C  bc 12,0,0x25478828
+	   0: Js00o       	$0x25478828
+
+
+
+. 1216 254786BC 4
+. 41 80 01 6C
+==== BB 1217 (0x254786C0) approx BBs exec'd 0 ====
+
+	0x254786C0:  84FD0004  lwzu r7,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R29
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x254786C4:  2F070000  cmpi cr6,r7,0
+	   6: GETL       	R7, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254786C8:  409AFFC4  bc 4,26,0x2547868C
+	  10: Jc26o       	$0x2547868C
+
+
+
+. 1217 254786C0 12
+. 84 FD 00 04 2F 07 00 00 40 9A FF C4
+==== BB 1218 (0x25478BD8) approx BBs exec'd 0 ====
+
+	0x25478BD8:  81360000  lwz r9,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25478BDC:  2C090000  cmpi cr0,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x25478BE0:  408201B8  bc 4,2,0x25478D98
+	   8: Jc02o       	$0x25478D98
+
+
+
+. 1218 25478BD8 12
+. 81 36 00 00 2C 09 00 00 40 82 01 B8
+==== BB 1219 (0x25478D98) approx BBs exec'd 0 ====
+
+	0x25478D98:  8BA9000C  lbz r29,12(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25478D9C:  57BBE13E  rlwinm r27,r29,28,4,31
+	   5: GETL       	R29, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R27
+	   8: INCEIPL       	$4
+
+	0x25478DA0:  2C9B0002  cmpi cr1,r27,2
+	   9: GETL       	R27, t6
+	  10: MOVL       	$0x2, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25478DA4:  4086FE40  bc 4,6,0x25478BE4
+	  14: Jc06o       	$0x25478BE4
+
+
+
+. 1219 25478D98 16
+. 8B A9 00 0C 57 BB E1 3E 2C 9B 00 02 40 86 FE 40
+==== BB 1220 (0x25478DA8) approx BBs exec'd 0 ====
+
+	0x25478DA8:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25478DAC:  38000000  li r0,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x25478DB0:  4BFFF990  b 0x25478740
+	   6: JMPo       	$0x25478740  ($4)
+
+
+
+. 1220 25478DA8 12
+. 38 60 00 00 38 00 00 00 4B FF F9 90
+==== BB 1221 (0x25478740) approx BBs exec'd 0 ====
+
+	0x25478740:  90160000  stw r0,0(r22)
+	   0: GETL       	R0, t0
+	   1: GETL       	R22, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25478744:  80E10000  lwz r7,0(r1)
+	   4: GETL       	R1, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R7
+	   7: INCEIPL       	$4
+
+	0x25478748:  82C70004  lwz r22,4(r7)
+	   8: GETL       	R7, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R22
+	  12: INCEIPL       	$4
+
+	0x2547874C:  8107FFC0  lwz r8,-64(r7)
+	  13: GETL       	R7, t12
+	  14: ADDL       	$0xFFFFFFC0, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R8
+	  17: INCEIPL       	$4
+
+	0x25478750:  7EC803A6  mtlr r22
+	  18: GETL       	R22, t16
+	  19: PUTL       	t16, LR
+	  20: INCEIPL       	$4
+
+	0x25478754:  8227FFC4  lwz r17,-60(r7)
+	  21: GETL       	R7, t18
+	  22: ADDL       	$0xFFFFFFC4, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R17
+	  25: INCEIPL       	$4
+
+	0x25478758:  8247FFC8  lwz r18,-56(r7)
+	  26: GETL       	R7, t22
+	  27: ADDL       	$0xFFFFFFC8, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R18
+	  30: INCEIPL       	$4
+
+	0x2547875C:  7D008120  mtcrf 0x8,r8
+	  31: GETL       	R8, t26
+	  32: ICRFL       	t26, $0x4, CR
+	  33: INCEIPL       	$4
+
+	0x25478760:  8267FFCC  lwz r19,-52(r7)
+	  34: GETL       	R7, t28
+	  35: ADDL       	$0xFFFFFFCC, t28
+	  36: LDL       	(t28), t30
+	  37: PUTL       	t30, R19
+	  38: INCEIPL       	$4
+
+	0x25478764:  8287FFD0  lwz r20,-48(r7)
+	  39: GETL       	R7, t32
+	  40: ADDL       	$0xFFFFFFD0, t32
+	  41: LDL       	(t32), t34
+	  42: PUTL       	t34, R20
+	  43: INCEIPL       	$4
+
+	0x25478768:  82A7FFD4  lwz r21,-44(r7)
+	  44: GETL       	R7, t36
+	  45: ADDL       	$0xFFFFFFD4, t36
+	  46: LDL       	(t36), t38
+	  47: PUTL       	t38, R21
+	  48: INCEIPL       	$4
+
+	0x2547876C:  82C7FFD8  lwz r22,-40(r7)
+	  49: GETL       	R7, t40
+	  50: ADDL       	$0xFFFFFFD8, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R22
+	  53: INCEIPL       	$4
+
+	0x25478770:  82E7FFDC  lwz r23,-36(r7)
+	  54: GETL       	R7, t44
+	  55: ADDL       	$0xFFFFFFDC, t44
+	  56: LDL       	(t44), t46
+	  57: PUTL       	t46, R23
+	  58: INCEIPL       	$4
+
+	0x25478774:  8307FFE0  lwz r24,-32(r7)
+	  59: GETL       	R7, t48
+	  60: ADDL       	$0xFFFFFFE0, t48
+	  61: LDL       	(t48), t50
+	  62: PUTL       	t50, R24
+	  63: INCEIPL       	$4
+
+	0x25478778:  8327FFE4  lwz r25,-28(r7)
+	  64: GETL       	R7, t52
+	  65: ADDL       	$0xFFFFFFE4, t52
+	  66: LDL       	(t52), t54
+	  67: PUTL       	t54, R25
+	  68: INCEIPL       	$4
+
+	0x2547877C:  8347FFE8  lwz r26,-24(r7)
+	  69: GETL       	R7, t56
+	  70: ADDL       	$0xFFFFFFE8, t56
+	  71: LDL       	(t56), t58
+	  72: PUTL       	t58, R26
+	  73: INCEIPL       	$4
+
+	0x25478780:  8367FFEC  lwz r27,-20(r7)
+	  74: GETL       	R7, t60
+	  75: ADDL       	$0xFFFFFFEC, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R27
+	  78: INCEIPL       	$4
+
+	0x25478784:  8387FFF0  lwz r28,-16(r7)
+	  79: GETL       	R7, t64
+	  80: ADDL       	$0xFFFFFFF0, t64
+	  81: LDL       	(t64), t66
+	  82: PUTL       	t66, R28
+	  83: INCEIPL       	$4
+
+	0x25478788:  83A7FFF4  lwz r29,-12(r7)
+	  84: GETL       	R7, t68
+	  85: ADDL       	$0xFFFFFFF4, t68
+	  86: LDL       	(t68), t70
+	  87: PUTL       	t70, R29
+	  88: INCEIPL       	$4
+
+	0x2547878C:  83C7FFF8  lwz r30,-8(r7)
+	  89: GETL       	R7, t72
+	  90: ADDL       	$0xFFFFFFF8, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R30
+	  93: INCEIPL       	$4
+
+	0x25478790:  83E7FFFC  lwz r31,-4(r7)
+	  94: GETL       	R7, t76
+	  95: ADDL       	$0xFFFFFFFC, t76
+	  96: LDL       	(t76), t78
+	  97: PUTL       	t78, R31
+	  98: INCEIPL       	$4
+
+	0x25478794:  7CE13B78  or r1,r7,r7
+	  99: GETL       	R7, t80
+	 100: PUTL       	t80, R1
+	 101: INCEIPL       	$4
+
+	0x25478798:  4E800020  blr
+	 102: GETL       	LR, t82
+	 103: JMPo-r       	t82  ($4)
+
+
+
+. 1221 25478740 92
+. 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+==== BB 1222 (0x2547998C) approx BBs exec'd 0 ====
+
+	0x2547998C:  2F9D0044  cmpi cr7,r29,68
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x44, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479990:  801C0008  lwz r0,8(r28)
+	   5: GETL       	R28, t6
+	   6: ADDL       	$0x8, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R0
+	   9: INCEIPL       	$4
+
+	0x25479994:  7D290214  add r9,r9,r0
+	  10: GETL       	R9, t10
+	  11: GETL       	R0, t12
+	  12: ADDL       	t10, t12
+	  13: PUTL       	t12, R9
+	  14: INCEIPL       	$4
+
+	0x25479998:  419E0100  bc 12,30,0x25479A98
+	  15: Js30o       	$0x25479A98
+
+
+
+. 1222 2547998C 16
+. 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+==== BB 1223 (0x254784A0) approx BBs exec'd 0 ====
+
+	0x254784A0:  41AEFF18  bc 13,14,0x254783B8
+	   0: Js14o       	$0x254783B8
+
+
+
+. 1223 254784A0 4
+. 41 AE FF 18
+==== BB 1224 (0x254784A4) approx BBs exec'd 0 ====
+
+	0x254784A4:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254784A8:  5469FFFF  rlwinm. r9,r3,31,31,31
+	   5: GETL       	R3, t4
+	   6: ROLL       	$0x1F, t4
+	   7: ANDL       	$0x1, t4
+	   8: PUTL       	t4, R9
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x254784AC:  4182006C  bc 12,2,0x25478518
+	  12: Js02o       	$0x25478518
+
+
+
+. 1224 254784A4 12
+. 80 61 00 14 54 69 FF FF 41 82 00 6C
+==== BB 1225 (0x25478518) approx BBs exec'd 0 ====
+
+	0x25478518:  57E9083C  rlwinm r9,r31,1,0,30
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x2547851C:  7CC9B22E  lhzx r6,r9,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R9, t4
+	   6: ADDL       	t4, t2
+	   7: LDW       	(t2), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25478520:  54C5047E  rlwinm r5,r6,0,17,31
+	  10: GETL       	R6, t8
+	  11: ANDL       	$0x7FFF, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x25478524:  2F850002  cmpi cr7,r5,2
+	  14: GETL       	R5, t10
+	  15: MOVL       	$0x2, t14
+	  16: CMPL       	t10, t14, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25478528:  4BFFFF98  b 0x254784C0
+	  19: JMPo       	$0x254784C0  ($4)
+
+
+
+. 1225 25478518 20
+. 57 E9 08 3C 7C C9 B2 2E 54 C5 04 7E 2F 85 00 02 4B FF FF 98
+==== BB 1226 (0x254784C0) approx BBs exec'd 0 ====
+
+	0x254784C0:  40BDFEF8  bc 5,29,0x254783B8
+	   0: Jc29o       	$0x254783B8
+
+
+
+. 1226 254784C0 4
+. 40 BD FE F8
+==== BB 1227 (0x254799A8) approx BBs exec'd 0 ====
+
+	0x254799A8:  2C1D0014  cmpi cr0,r29,20
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x14, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254799AC:  418200E4  bc 12,2,0x25479A90
+	   5: Js02o       	$0x25479A90
+
+
+
+. 1227 254799A8 8
+. 2C 1D 00 14 41 82 00 E4
+==== BB 1228 (0x254799D8) approx BBs exec'd 0 ====
+
+	0x254799D8:  83BF000C  lwz r29,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254799DC:  4BFFFD44  b 0x25479720
+	   5: JMPo       	$0x25479720  ($4)
+
+
+
+. 1228 254799D8 8
+. 83 BF 00 0C 4B FF FD 44
+==== BB 1229 (0x25479720) approx BBs exec'd 0 ====
+
+	0x25479720:  3652FFFF  addic. r18,r18,-1
+	   0: GETL       	R18, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R18
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25479724:  3AB5000C  addi r21,r21,12
+	   6: GETL       	R21, t4
+	   7: ADDL       	$0xC, t4
+	   8: PUTL       	t4, R21
+	   9: INCEIPL       	$4
+
+	0x25479728:  4080FFC0  bc 4,0,0x254796E8
+	  10: Jc00o       	$0x254796E8
+
+
+
+. 1229 25479720 12
+. 36 52 FF FF 3A B5 00 0C 40 80 FF C0
+==== BB 1230 (0x254796E8) approx BBs exec'd 0 ====
+
+	0x254796E8:  80B50020  lwz r5,32(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254796EC:  7FB6EB78  or r22,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x254796F0:  81750018  lwz r11,24(r21)
+	   8: GETL       	R21, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x254796F4:  2C050000  cmpi cr0,r5,0
+	  13: GETL       	R5, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x254796F8:  8115001C  lwz r8,28(r21)
+	  17: GETL       	R21, t14
+	  18: ADDL       	$0x1C, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R8
+	  21: INCEIPL       	$4
+
+	0x254796FC:  7D695B78  or r9,r11,r11
+	  22: GETL       	R11, t18
+	  23: PUTL       	t18, R9
+	  24: INCEIPL       	$4
+
+	0x25479700:  833D0000  lwz r25,0(r29)
+	  25: GETL       	R29, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R25
+	  28: INCEIPL       	$4
+
+	0x25479704:  7F0B4214  add r24,r11,r8
+	  29: GETL       	R11, t24
+	  30: GETL       	R8, t26
+	  31: ADDL       	t24, t26
+	  32: PUTL       	t26, R24
+	  33: INCEIPL       	$4
+
+	0x25479708:  418200BC  bc 12,2,0x254797C4
+	  34: Js02o       	$0x254797C4
+
+
+
+. 1230 254796E8 36
+. 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+==== BB 1231 (0x2547970C) approx BBs exec'd 0 ====
+
+	0x2547970C:  7F8BC040  cmpl cr7,r11,r24
+	   0: GETL       	R11, t0
+	   1: GETL       	R24, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479710:  409C0010  bc 4,28,0x25479720
+	   5: Jc28o       	$0x25479720
+
+
+
+. 1231 2547970C 8
+. 7F 8B C0 40 40 9C 00 10
+==== BB 1232 (0x25479714) approx BBs exec'd 0 ====
+
+	0x25479714:  3929000C  addi r9,r9,12
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25479718:  7F89C040  cmpl cr7,r9,r24
+	   4: GETL       	R9, t2
+	   5: GETL       	R24, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547971C:  419CFFF8  bc 12,28,0x25479714
+	   9: Js28o       	$0x25479714
+
+
+
+. 1232 25479714 12
+. 39 29 00 0C 7F 89 C0 40 41 9C FF F8
+==== BB 1233 (0x254797FC) approx BBs exec'd 0 ====
+
+	0x254797FC:  3B7001B8  addi r27,r16,440
+	   0: GETL       	R16, t0
+	   1: ADDL       	$0x1B8, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25479800:  1CE9000C  mulli r7,r9,12
+	   4: GETL       	R9, t2
+	   5: MULL       	$0xC, t2
+	   6: PUTL       	t2, R7
+	   7: INCEIPL       	$4
+
+	0x25479804:  7E1DD800  cmp cr4,r29,r27
+	   8: GETL       	R29, t4
+	   9: GETL       	R27, t6
+	  10: CMPL       	t4, t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0x25479808:  7F8B3A14  add r28,r11,r7
+	  13: GETL       	R11, t10
+	  14: GETL       	R7, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x2547980C:  41920018  bc 12,18,0x25479824
+	  18: Js18o       	$0x25479824
+
+
+
+. 1233 254797FC 20
+. 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+==== BB 1234 (0x2547972C) approx BBs exec'd 0 ====
+
+	0x2547972C:  408E0710  bc 4,14,0x25479E3C
+	   0: Jc14o       	$0x25479E3C
+
+
+
+. 1234 2547972C 4
+. 40 8E 07 10
+==== BB 1235 (0x25479730) approx BBs exec'd 0 ====
+
+	0x25479730:  827D0180  lwz r19,384(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0x25479734:  2F910000  cmpi cr7,r17,0
+	   5: GETL       	R17, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25479738:  66722000  oris r18,r19,0x2000
+	   9: GETL       	R19, t8
+	  10: ORL       	$0x20000000, t8
+	  11: PUTL       	t8, R18
+	  12: INCEIPL       	$4
+
+	0x2547973C:  925D0180  stw r18,384(r29)
+	  13: GETL       	R18, t10
+	  14: GETL       	R29, t12
+	  15: ADDL       	$0x180, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25479740:  409E05C0  bc 4,30,0x25479D00
+	  18: Jc30o       	$0x25479D00
+
+
+
+. 1235 25479730 20
+. 82 7D 01 80 2F 91 00 00 66 72 20 00 92 5D 01 80 40 9E 05 C0
+==== BB 1236 (0x25479744) approx BBs exec'd 0 ====
+
+	0x25479744:  807F000C  lwz r3,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25479748:  82A30238  lwz r21,568(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x238, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R21
+	   9: INCEIPL       	$4
+
+	0x2547974C:  2F950000  cmpi cr7,r21,0
+	  10: GETL       	R21, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25479750:  409E02BC  bc 4,30,0x25479A0C
+	  14: Jc30o       	$0x25479A0C
+
+
+
+. 1236 25479744 16
+. 80 7F 00 0C 82 A3 02 38 2F 95 00 00 40 9E 02 BC
+==== BB 1237 (0x25479A0C) approx BBs exec'd 0 ====
+
+	0x25479A0C:  4BFFFB1D  bl 0x25479528
+	   0: MOVL       	$0x25479A10, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25479528  ($4)
+
+
+
+. 1237 25479A0C 4
+. 4B FF FB 1D
+==== BB 1238 _dl_protect_relro(0x25479528) approx BBs exec'd 0 ====
+
+	0x25479528:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547952C:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25479530:  4801DAD1  bl 0x25497000
+	   9: MOVL       	$0x25479534, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1238 25479528 12
+. 94 21 FF F0 7D 88 02 A6 48 01 DA D1
+==== BB 1239 (0x25479534) approx BBs exec'd 0 ====
+
+	0x25479534:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25479538:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547953C:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25479540:  91810014  stw r12,20(r1)
+	  13: GETL       	R12, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25479544:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0x25479548:  80A30000  lwz r5,0(r3)
+	  21: GETL       	R3, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R5
+	  24: INCEIPL       	$4
+
+	0x2547954C:  813E04F4  lwz r9,1268(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x4F4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R9
+	  29: INCEIPL       	$4
+
+	0x25479550:  81030234  lwz r8,564(r3)
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0x234, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R8
+	  34: INCEIPL       	$4
+
+	0x25479554:  80E30238  lwz r7,568(r3)
+	  35: GETL       	R3, t28
+	  36: ADDL       	$0x238, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R7
+	  39: INCEIPL       	$4
+
+	0x25479558:  80C90004  lwz r6,4(r9)
+	  40: GETL       	R9, t32
+	  41: ADDL       	$0x4, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R6
+	  44: INCEIPL       	$4
+
+	0x2547955C:  7C654214  add r3,r5,r8
+	  45: GETL       	R5, t36
+	  46: GETL       	R8, t38
+	  47: ADDL       	t36, t38
+	  48: PUTL       	t38, R3
+	  49: INCEIPL       	$4
+
+	0x25479560:  7C833A14  add r4,r3,r7
+	  50: GETL       	R3, t40
+	  51: GETL       	R7, t42
+	  52: ADDL       	t40, t42
+	  53: PUTL       	t42, R4
+	  54: INCEIPL       	$4
+
+	0x25479564:  38A00001  li r5,1
+	  55: MOVL       	$0x1, t44
+	  56: PUTL       	t44, R5
+	  57: INCEIPL       	$4
+
+	0x25479568:  7D4600D0  neg r10,r6
+	  58: GETL       	R6, t46
+	  59: NEGL       	t46
+	  60: PUTL       	t46, R10
+	  61: INCEIPL       	$4
+
+	0x2547956C:  7C605038  and r0,r3,r10
+	  62: GETL       	R3, t48
+	  63: GETL       	R10, t50
+	  64: ANDL       	t48, t50
+	  65: PUTL       	t50, R0
+	  66: INCEIPL       	$4
+
+	0x25479570:  7C8B5038  and r11,r4,r10
+	  67: GETL       	R4, t52
+	  68: GETL       	R10, t54
+	  69: ANDL       	t52, t54
+	  70: PUTL       	t54, R11
+	  71: INCEIPL       	$4
+
+	0x25479574:  7F805800  cmp cr7,r0,r11
+	  72: GETL       	R0, t56
+	  73: GETL       	R11, t58
+	  74: CMPL       	t56, t58, t60  (-rSo)
+	  75: ICRFL       	t60, $0x7, CR
+	  76: INCEIPL       	$4
+
+	0x25479578:  7C030378  or r3,r0,r0
+	  77: GETL       	R0, t62
+	  78: PUTL       	t62, R3
+	  79: INCEIPL       	$4
+
+	0x2547957C:  7C805850  subf r4,r0,r11
+	  80: GETL       	R0, t64
+	  81: GETL       	R11, t66
+	  82: SUBL       	t64, t66
+	  83: PUTL       	t66, R4
+	  84: INCEIPL       	$4
+
+	0x25479580:  419E0010  bc 12,30,0x25479590
+	  85: Js30o       	$0x25479590
+
+
+
+. 1239 25479534 80
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 91 81 00 14 7C 7F 1B 78 80 A3 00 00 81 3E 04 F4 81 03 02 34 80 E3 02 38 80 C9 00 04 7C 65 42 14 7C 83 3A 14 38 A0 00 01 7D 46 00 D0 7C 60 50 38 7C 8B 50 38 7F 80 58 00 7C 03 03 78 7C 80 58 50 41 9E 00 10
+==== BB 1240 (0x25479584) approx BBs exec'd 0 ====
+
+	0x25479584:  48009235  bl 0x254827B8
+	   0: MOVL       	$0x25479588, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x254827B8  ($4)
+
+
+
+. 1240 25479584 4
+. 48 00 92 35
+==== BB 1241 (0x25479588) approx BBs exec'd 0 ====
+
+	0x25479588:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547958C:  4180001C  bc 12,0,0x254795A8
+	   4: Js00o       	$0x254795A8
+
+
+
+. 1241 25479588 8
+. 2C 03 00 00 41 80 00 1C
+==== BB 1242 (0x25479590) approx BBs exec'd 0 ====
+
+	0x25479590:  81610014  lwz r11,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25479594:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0x25479598:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x2547959C:  7D6803A6  mtlr r11
+	  15: GETL       	R11, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x254795A0:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0x254795A4:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1242 25479590 24
+. 81 61 00 14 83 C1 00 08 83 E1 00 0C 7D 68 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1243 (0x25479A10) approx BBs exec'd 0 ====
+
+	0x25479A10:  4BFFFD44  b 0x25479754
+	   0: JMPo       	$0x25479754  ($4)
+
+
+
+. 1243 25479A10 4
+. 4B FF FD 44
+==== BB 1244 (0x25479754) approx BBs exec'd 0 ====
+
+	0x25479754:  80810000  lwz r4,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25479758:  83240004  lwz r25,4(r4)
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x2547975C:  8184FFBC  lwz r12,-68(r4)
+	   9: GETL       	R4, t8
+	  10: ADDL       	$0xFFFFFFBC, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x25479760:  7F2803A6  mtlr r25
+	  14: GETL       	R25, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x25479764:  8204FFC0  lwz r16,-64(r4)
+	  17: GETL       	R4, t14
+	  18: ADDL       	$0xFFFFFFC0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R16
+	  21: INCEIPL       	$4
+
+	0x25479768:  8224FFC4  lwz r17,-60(r4)
+	  22: GETL       	R4, t18
+	  23: ADDL       	$0xFFFFFFC4, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R17
+	  26: INCEIPL       	$4
+
+	0x2547976C:  7D818120  mtcrf 0x18,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x3, CR
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0x25479770:  8244FFC8  lwz r18,-56(r4)
+	  31: GETL       	R4, t24
+	  32: ADDL       	$0xFFFFFFC8, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R18
+	  35: INCEIPL       	$4
+
+	0x25479774:  8264FFCC  lwz r19,-52(r4)
+	  36: GETL       	R4, t28
+	  37: ADDL       	$0xFFFFFFCC, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R19
+	  40: INCEIPL       	$4
+
+	0x25479778:  8284FFD0  lwz r20,-48(r4)
+	  41: GETL       	R4, t32
+	  42: ADDL       	$0xFFFFFFD0, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R20
+	  45: INCEIPL       	$4
+
+	0x2547977C:  82A4FFD4  lwz r21,-44(r4)
+	  46: GETL       	R4, t36
+	  47: ADDL       	$0xFFFFFFD4, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R21
+	  50: INCEIPL       	$4
+
+	0x25479780:  82C4FFD8  lwz r22,-40(r4)
+	  51: GETL       	R4, t40
+	  52: ADDL       	$0xFFFFFFD8, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R22
+	  55: INCEIPL       	$4
+
+	0x25479784:  82E4FFDC  lwz r23,-36(r4)
+	  56: GETL       	R4, t44
+	  57: ADDL       	$0xFFFFFFDC, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R23
+	  60: INCEIPL       	$4
+
+	0x25479788:  8304FFE0  lwz r24,-32(r4)
+	  61: GETL       	R4, t48
+	  62: ADDL       	$0xFFFFFFE0, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R24
+	  65: INCEIPL       	$4
+
+	0x2547978C:  8324FFE4  lwz r25,-28(r4)
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0xFFFFFFE4, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R25
+	  70: INCEIPL       	$4
+
+	0x25479790:  8344FFE8  lwz r26,-24(r4)
+	  71: GETL       	R4, t56
+	  72: ADDL       	$0xFFFFFFE8, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R26
+	  75: INCEIPL       	$4
+
+	0x25479794:  8364FFEC  lwz r27,-20(r4)
+	  76: GETL       	R4, t60
+	  77: ADDL       	$0xFFFFFFEC, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R27
+	  80: INCEIPL       	$4
+
+	0x25479798:  8384FFF0  lwz r28,-16(r4)
+	  81: GETL       	R4, t64
+	  82: ADDL       	$0xFFFFFFF0, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R28
+	  85: INCEIPL       	$4
+
+	0x2547979C:  83A4FFF4  lwz r29,-12(r4)
+	  86: GETL       	R4, t68
+	  87: ADDL       	$0xFFFFFFF4, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R29
+	  90: INCEIPL       	$4
+
+	0x254797A0:  83C4FFF8  lwz r30,-8(r4)
+	  91: GETL       	R4, t72
+	  92: ADDL       	$0xFFFFFFF8, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R30
+	  95: INCEIPL       	$4
+
+	0x254797A4:  83E4FFFC  lwz r31,-4(r4)
+	  96: GETL       	R4, t76
+	  97: ADDL       	$0xFFFFFFFC, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R31
+	 100: INCEIPL       	$4
+
+	0x254797A8:  7C812378  or r1,r4,r4
+	 101: GETL       	R4, t80
+	 102: PUTL       	t80, R1
+	 103: INCEIPL       	$4
+
+	0x254797AC:  4E800020  blr
+	 104: GETL       	LR, t82
+	 105: JMPo-r       	t82  ($4)
+
+
+
+. 1244 25479754 92
+. 80 81 00 00 83 24 00 04 81 84 FF BC 7F 28 03 A6 82 04 FF C0 82 24 FF C4 7D 81 81 20 82 44 FF C8 82 64 FF CC 82 84 FF D0 82 A4 FF D4 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+==== BB 1245 (0x254733B4) approx BBs exec'd 0 ====
+
+	0x254733B4:  83BD0010  lwz r29,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254733B8:  2E1D0000  cmpi cr4,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254733BC:  4092FFC8  bc 4,18,0x25473384
+	   9: Jc18o       	$0x25473384
+
+
+
+. 1245 254733B4 12
+. 83 BD 00 10 2E 1D 00 00 40 92 FF C8
+==== BB 1246 (0x254737B0) approx BBs exec'd 0 ====
+
+	0x254737B0:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254737B4:  90090008  stw r0,8(r9)
+	   3: GETL       	R0, t2
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254737B8:  81290004  lwz r9,4(r9)
+	   8: GETL       	R9, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x254737BC:  2C890000  cmpi cr1,r9,0
+	  13: GETL       	R9, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x1, CR
+	  16: INCEIPL       	$4
+
+	0x254737C0:  4186FBD4  bc 12,6,0x25473394
+	  17: Js06o       	$0x25473394
+
+
+
+. 1246 254737B0 20
+. 38 00 00 01 90 09 00 08 81 29 00 04 2C 89 00 00 41 86 FB D4
+==== BB 1247 (0x254799E0) approx BBs exec'd 0 ====
+
+	0x254799E0:  7F8BE040  cmpl cr7,r11,r28
+	   0: GETL       	R11, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254799E4:  40BCFE40  bc 5,28,0x25479824
+	   5: Jc28o       	$0x25479824
+
+
+
+. 1247 254799E0 8
+. 7F 8B E0 40 40 BC FE 40
+==== BB 1248 (0x254799E8) approx BBs exec'd 0 ====
+
+	0x254799E8:  812B0008  lwz r9,8(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x254799EC:  808B0000  lwz r4,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x254799F0:  396B000C  addi r11,r11,12
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0xC, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x254799F4:  7F8BE040  cmpl cr7,r11,r28
+	  13: GETL       	R11, t10
+	  14: GETL       	R28, t12
+	  15: CMPUL       	t10, t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0x254799F8:  7D09CA14  add r8,r9,r25
+	  18: GETL       	R9, t16
+	  19: GETL       	R25, t18
+	  20: ADDL       	t16, t18
+	  21: PUTL       	t18, R8
+	  22: INCEIPL       	$4
+
+	0x254799FC:  7D04C92E  stwx r8,r4,r25
+	  23: GETL       	R25, t20
+	  24: GETL       	R4, t22
+	  25: ADDL       	t22, t20
+	  26: GETL       	R8, t24
+	  27: STL       	t24, (t20)
+	  28: INCEIPL       	$4
+
+	0x25479A00:  4BFFFFE4  b 0x254799E4
+	  29: JMPo       	$0x254799E4  ($4)
+
+
+
+. 1248 254799E8 28
+. 81 2B 00 08 80 8B 00 00 39 6B 00 0C 7F 8B E0 40 7D 09 CA 14 7D 04 C9 2E 4B FF FF E4
+==== BB 1249 (0x254799E4) approx BBs exec'd 0 ====
+
+	0x254799E4:  40BCFE40  bc 5,28,0x25479824
+	   0: Jc28o       	$0x25479824
+
+
+
+. 1249 254799E4 4
+. 40 BC FE 40
+==== BB 1250 (0x254783C4) approx BBs exec'd 0 ====
+
+	0x254783C4:  2C000002  cmpi cr0,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254783C8:  41820164  bc 12,2,0x2547852C
+	   5: Js02o       	$0x2547852C
+
+
+
+. 1250 254783C4 8
+. 2C 00 00 02 41 82 01 64
+==== BB 1251 (0x2547852C) approx BBs exec'd 0 ====
+
+	0x2547852C:  829E04F4  lwz r20,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0x25478530:  82740030  lwz r19,48(r20)
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x30, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R19
+	   9: INCEIPL       	$4
+
+	0x25478534:  2C930000  cmpi cr1,r19,0
+	  10: GETL       	R19, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25478538:  40860018  bc 4,6,0x25478550
+	  14: Jc06o       	$0x25478550
+
+
+
+. 1251 2547852C 16
+. 82 9E 04 F4 82 74 00 30 2C 93 00 00 40 86 00 18
+==== BB 1252 (0x2547839C) approx BBs exec'd 0 ====
+
+	0x2547839C:  80150008  lwz r0,8(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x254783A0:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x254783A4:  4082FF4C  bc 4,2,0x254782F0
+	   9: Jc02o       	$0x254782F0
+
+
+
+. 1252 2547839C 12
+. 80 15 00 08 2C 00 00 00 40 82 FF 4C
+==== BB 1253 (0x254783A8) approx BBs exec'd 0 ====
+
+	0x254783A8:  2F990000  cmpi cr7,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x254783AC:  40BEFF44  bc 5,30,0x254782F0
+	   4: Jc30o       	$0x254782F0
+
+
+
+. 1253 254783A8 8
+. 2F 99 00 00 40 BE FF 44
+==== BB 1254 (0x25479EE4) approx BBs exec'd 0 ====
+
+	0x25479EE4:  7D200026  mfcr r9
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25479EE8:  55299FFE  rlwinm r9,r9,19,31,31
+	   3: GETL       	R9, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R9
+	   7: INCEIPL       	$4
+
+	0x25479EEC:  39600000  li r11,0
+	   8: MOVL       	$0x0, t4
+	   9: PUTL       	t4, R11
+	  10: INCEIPL       	$4
+
+	0x25479EF0:  7CA00026  mfcr r5
+	  11: GETL       	CR, t6
+	  12: PUTL       	t6, R5
+	  13: INCEIPL       	$4
+
+	0x25479EF4:  54A53FFE  rlwinm r5,r5,7,31,31
+	  14: GETL       	R5, t8
+	  15: ROLL       	$0x7, t8
+	  16: ANDL       	$0x1, t8
+	  17: PUTL       	t8, R5
+	  18: INCEIPL       	$4
+
+	0x25479EF8:  7D282B79  or. r8,r9,r5
+	  19: GETL       	R9, t10
+	  20: GETL       	R5, t12
+	  21: ORL       	t12, t10
+	  22: PUTL       	t10, R8
+	  23: CMP0L       	t10, t14  (-rSo)
+	  24: ICRFL       	t14, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x25479EFC:  40820058  bc 4,2,0x25479F54
+	  26: Jc02o       	$0x25479F54
+
+
+
+. 1254 25479EE4 28
+. 7D 20 00 26 55 29 9F FE 39 60 00 00 7C A0 00 26 54 A5 3F FE 7D 28 2B 79 40 82 00 58
+==== BB 1255 (0x25479F00) approx BBs exec'd 0 ====
+
+	0x25479F00:  381DFFBC  addi r0,r29,-68
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xFFFFFFBC, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25479F04:  6BA30002  xori r3,r29,0x2
+	   4: GETL       	R29, t2
+	   5: XORL       	$0x2, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25479F08:  21030000  subfic r8,r3,0
+	   8: GETL       	R3, t4
+	   9: MOVL       	$0x0, t6
+	  10: SBBL       	t4, t6  (-wCa)
+	  11: PUTL       	t6, R8
+	  12: INCEIPL       	$4
+
+	0x25479F0C:  7C681914  adde r3,r8,r3
+	  13: GETL       	R8, t8
+	  14: GETL       	R3, t10
+	  15: ADCL       	t8, t10  (-rCa-wCa)
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0x25479F10:  2180000A  subfic r12,r0,10
+	  18: GETL       	R0, t12
+	  19: MOVL       	$0xA, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R12
+	  22: INCEIPL       	$4
+
+	0x25479F14:  39800000  li r12,0
+	  23: MOVL       	$0x0, t16
+	  24: PUTL       	t16, R12
+	  25: INCEIPL       	$4
+
+	0x25479F18:  7D8C6114  adde r12,r12,r12
+	  26: GETL       	R12, t18
+	  27: GETL       	R12, t20
+	  28: ADCL       	t18, t20  (-rCa-wCa)
+	  29: PUTL       	t20, R12
+	  30: INCEIPL       	$4
+
+	0x25479F1C:  7D881B79  or. r8,r12,r3
+	  31: GETL       	R12, t22
+	  32: GETL       	R3, t24
+	  33: ORL       	t24, t22
+	  34: PUTL       	t22, R8
+	  35: CMP0L       	t22, t26  (-rSo)
+	  36: ICRFL       	t26, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0x25479F20:  40820034  bc 4,2,0x25479F54
+	  38: Jc02o       	$0x25479F54
+
+
+
+. 1255 25479F00 36
+. 38 1D FF BC 6B A3 00 02 21 03 00 00 7C 68 19 14 21 80 00 0A 39 80 00 00 7D 8C 61 14 7D 88 1B 79 40 82 00 34
+==== BB 1256 (0x25479F24) approx BBs exec'd 0 ====
+
+	0x25479F24:  2F1D0013  cmpi cr6,r29,19
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x13, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25479F28:  8124020C  lwz r9,524(r4)
+	   5: GETL       	R4, t6
+	   6: ADDL       	$0x20C, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R9
+	   9: INCEIPL       	$4
+
+	0x25479F2C:  419A00B4  bc 12,26,0x25479FE0
+	  10: Js26o       	$0x25479FE0
+
+
+
+. 1256 25479F24 12
+. 2F 1D 00 13 81 24 02 0C 41 9A 00 B4
+==== BB 1257 (0x25479F30) approx BBs exec'd 0 ====
+
+	0x25479F30:  7F8B4800  cmp cr7,r11,r9
+	   0: GETL       	R11, t0
+	   1: GETL       	R9, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479F34:  409EF988  bc 4,30,0x254798BC
+	   5: Jc30o       	$0x254798BC
+
+
+
+. 1257 25479F30 8
+. 7F 8B 48 00 40 9E F9 88
+==== BB 1258 (0x25479F38) approx BBs exec'd 0 ====
+
+	0x25479F38:  817001AC  lwz r11,428(r16)
+	   0: GETL       	R16, t0
+	   1: ADDL       	$0x1AC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25479F3C:  80C40214  lwz r6,532(r4)
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x214, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25479F40:  83440210  lwz r26,528(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x210, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25479F44:  394B0001  addi r10,r11,1
+	  15: GETL       	R11, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x25479F48:  915001AC  stw r10,428(r16)
+	  19: GETL       	R10, t14
+	  20: GETL       	R16, t16
+	  21: ADDL       	$0x1AC, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x25479F4C:  90DF0050  stw r6,80(r31)
+	  24: GETL       	R6, t18
+	  25: GETL       	R31, t20
+	  26: ADDL       	$0x50, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25479F50:  4BFFFA24  b 0x25479974
+	  29: JMPo       	$0x25479974  ($4)
+
+
+
+. 1258 25479F38 28
+. 81 70 01 AC 80 C4 02 14 83 44 02 10 39 4B 00 01 91 50 01 AC 90 DF 00 50 4B FF FA 24
+==== BB 1259 (0x25479974) approx BBs exec'd 0 ====
+
+	0x25479974:  2F060000  cmpi cr6,r6,0
+	   0: GETL       	R6, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479978:  39200000  li r9,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x2547997C:  419A0010  bc 12,26,0x2547998C
+	   7: Js26o       	$0x2547998C
+
+
+
+. 1259 25479974 12
+. 2F 06 00 00 39 20 00 00 41 9A 00 10
+==== BB 1260 (0x25479EBC) approx BBs exec'd 0 ====
+
+	0x25479EBC:  A11B000E  lhz r8,14(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xE, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25479EC0:  2F880000  cmpi cr7,r8,0
+	   5: GETL       	R8, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25479EC4:  41BEF9D0  bc 13,30,0x25479894
+	   9: Js30o       	$0x25479894
+
+
+
+. 1260 25479EBC 12
+. A1 1B 00 0E 2F 88 00 00 41 BE F9 D0
+==== BB 1261 (0x25479A50) approx BBs exec'd 0 ====
+
+	0x25479A50:  835F000C  lwz r26,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479A54:  4BFFFF20  b 0x25479974
+	   5: JMPo       	$0x25479974  ($4)
+
+
+
+. 1261 25479A50 8
+. 83 5F 00 0C 4B FF FF 20
+==== BB 1262 (0x25479A98) approx BBs exec'd 0 ====
+
+	0x25479A98:  2F1A0000  cmpi cr6,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479A9C:  41BAFF30  bc 13,26,0x254799CC
+	   4: Js26o       	$0x254799CC
+
+
+
+. 1262 25479A98 8
+. 2F 1A 00 00 41 BA FF 30
+==== BB 1263 (0x25479AA0) approx BBs exec'd 0 ====
+
+	0x25479AA0:  837A0230  lwz r27,560(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x230, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25479AA4:  93770000  stw r27,0(r23)
+	   5: GETL       	R27, t4
+	   6: GETL       	R23, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25479AA8:  4BFFFF24  b 0x254799CC
+	   9: JMPo       	$0x254799CC  ($4)
+
+
+
+. 1263 25479AA0 12
+. 83 7A 02 30 93 77 00 00 4B FF FF 24
+==== BB 1264 (0x25479A6C) approx BBs exec'd 0 ====
+
+	0x25479A6C:  2C9D0049  cmpi cr1,r29,73
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x49, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25479A70:  41860238  bc 12,6,0x25479CA8
+	   5: Js06o       	$0x25479CA8
+
+
+
+. 1264 25479A6C 8
+. 2C 9D 00 49 41 86 02 38
+==== BB 1265 (0x25479CA8) approx BBs exec'd 0 ====
+
+	0x25479CA8:  2C1A0000  cmpi cr0,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25479CAC:  41A2FD20  bc 13,2,0x254799CC
+	   4: Js02o       	$0x254799CC
+
+
+
+. 1265 25479CA8 8
+. 2C 1A 00 00 41 A2 FD 20
+==== BB 1266 (0x25479CB0) approx BBs exec'd 0 ====
+
+	0x25479CB0:  817A022C  lwz r11,556(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x22C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25479CB4:  2C8BFFFF  cmpi cr1,r11,-1
+	   5: GETL       	R11, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25479CB8:  4186034C  bc 12,6,0x2547A004
+	  10: Js06o       	$0x2547A004
+
+
+
+. 1266 25479CB0 12
+. 81 7A 02 2C 2C 8B FF FF 41 86 03 4C
+==== BB 1267 (0x25479CBC) approx BBs exec'd 0 ====
+
+	0x25479CBC:  83460004  lwz r26,4(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479CC0:  7D8BD214  add r12,r11,r26
+	   5: GETL       	R11, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25479CC4:  7C6C0214  add r3,r12,r0
+	  10: GETL       	R12, t8
+	  11: GETL       	R0, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x25479CC8:  39239000  addi r9,r3,-28672
+	  15: GETL       	R3, t12
+	  16: ADDL       	$0xFFFF9000, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0x25479CCC:  91370000  stw r9,0(r23)
+	  19: GETL       	R9, t14
+	  20: GETL       	R23, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25479CD0:  4BFFFCFC  b 0x254799CC
+	  23: JMPo       	$0x254799CC  ($4)
+
+
+
+. 1267 25479CBC 24
+. 83 46 00 04 7D 8B D2 14 7C 6C 02 14 39 23 90 00 91 37 00 00 4B FF FC FC
+==== BB 1268 (0x25479A04) approx BBs exec'd 0 ====
+
+	0x25479A04:  39000001  li r8,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x25479A08:  4BFFFEF4  b 0x254798FC
+	   3: JMPo       	$0x254798FC  ($4)
+
+
+
+. 1268 25479A04 8
+. 39 00 00 01 4B FF FE F4
+==== BB 1269 (0x254799B0) approx BBs exec'd 0 ====
+
+	0x254799B0:  7F45D378  or r5,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x254799B4:  7F67DB78  or r7,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x254799B8:  7EE8BB78  or r8,r23,r23
+	   6: GETL       	R23, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x254799BC:  7FAAEB78  or r10,r29,r29
+	   9: GETL       	R29, t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0x254799C0:  7EC3B378  or r3,r22,r22
+	  12: GETL       	R22, t8
+	  13: PUTL       	t8, R3
+	  14: INCEIPL       	$4
+
+	0x254799C4:  7F84E378  or r4,r28,r28
+	  15: GETL       	R28, t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0x254799C8:  480072E1  bl 0x25480CA8
+	  18: MOVL       	$0x254799CC, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x25480CA8  ($4)
+
+
+
+. 1269 254799B0 28
+. 7F 45 D3 78 7F 67 DB 78 7E E8 BB 78 7F AA EB 78 7E C3 B3 78 7F 84 E3 78 48 00 72 E1
+==== BB 1270 (0x25480FF0) approx BBs exec'd 0 ====
+
+	0x25480FF0:  3D3AFE00  addis r9,r26,-512
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xFE000000, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25480FF4:  3D60FC00  lis r11,-1024
+	   4: MOVL       	$0xFC000000, t2
+	   5: PUTL       	t2, R11
+	   6: INCEIPL       	$4
+
+	0x25480FF8:  38690003  addi r3,r9,3
+	   7: GETL       	R9, t4
+	   8: ADDL       	$0x3, t4
+	   9: PUTL       	t4, R3
+	  10: INCEIPL       	$4
+
+	0x25480FFC:  61600002  ori r0,r11,0x2
+	  11: MOVL       	$0xFC000002, t6
+	  12: PUTL       	t6, R0
+	  13: INCEIPL       	$4
+
+	0x25481000:  7C030040  cmpl cr0,r3,r0
+	  14: GETL       	R3, t8
+	  15: GETL       	R0, t10
+	  16: CMPUL       	t8, t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25481004:  40810194  bc 4,1,0x25481198
+	  19: Jc01o       	$0x25481198
+
+
+
+. 1270 25480FF0 24
+. 3D 3A FE 00 3D 60 FC 00 38 69 00 03 61 60 00 02 7C 03 00 40 40 81 01 94
+==== BB 1271 (0x25481198) approx BBs exec'd 0 ====
+
+	0x25481198:  81610008  lwz r11,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2548119C:  814B002C  lwz r10,44(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x2C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x254811A0:  810A0004  lwz r8,4(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0x254811A4:  7CE82850  subf r7,r8,r5
+	  15: GETL       	R8, t12
+	  16: GETL       	R5, t14
+	  17: SUBL       	t12, t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x254811A8:  7CEA1670  srawi r10,r7,2
+	  20: GETL       	R7, t16
+	  21: SARL       	$0x2, t16  (-wCa)
+	  22: PUTL       	t16, R10
+	  23: INCEIPL       	$4
+
+	0x254811AC:  288A4011  cmpli cr1,r10,16401
+	  24: GETL       	R10, t18
+	  25: MOVL       	$0x4011, t22
+	  26: CMPUL       	t18, t22, t20  (-rSo)
+	  27: ICRFL       	t20, $0x1, CR
+	  28: INCEIPL       	$4
+
+	0x254811B0:  418500A0  bc 12,5,0x25481250
+	  29: Js05o       	$0x25481250
+
+
+
+. 1271 25481198 28
+. 81 61 00 08 81 4B 00 2C 81 0A 00 04 7C E8 28 50 7C EA 16 70 28 8A 40 11 41 85 00 A0
+==== BB 1272 (0x254811B4) approx BBs exec'd 0 ====
+
+	0x254811B4:  816B0028  lwz r11,40(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x28, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254811B8:  3C60AAAA  lis r3,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254811BC:  6060AAAB  ori r0,r3,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x254811C0:  3BAAFFEE  addi r29,r10,-18
+	  11: GETL       	R10, t8
+	  12: ADDL       	$0xFFFFFFEE, t8
+	  13: PUTL       	t8, R29
+	  14: INCEIPL       	$4
+
+	0x254811C4:  83EB0004  lwz r31,4(r11)
+	  15: GETL       	R11, t10
+	  16: ADDL       	$0x4, t10
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R31
+	  19: INCEIPL       	$4
+
+	0x254811C8:  57ABF87E  rlwinm r11,r29,31,1,31
+	  20: GETL       	R29, t14
+	  21: SHRL       	$0x1, t14
+	  22: PUTL       	t14, R11
+	  23: INCEIPL       	$4
+
+	0x254811CC:  7F7F0016  mulhwu r27,r31,r0
+	  24: GETL       	R31, t16
+	  25: GETL       	R0, t18
+	  26: UMULHL       	t16, t18
+	  27: PUTL       	t18, R27
+	  28: INCEIPL       	$4
+
+	0x254811D0:  577CE8FE  rlwinm r28,r27,29,3,31
+	  29: GETL       	R27, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R28
+	  32: INCEIPL       	$4
+
+	0x254811D4:  2B1C2000  cmpli cr6,r28,8192
+	  33: GETL       	R28, t22
+	  34: MOVL       	$0x2000, t26
+	  35: CMPUL       	t22, t26, t24  (-rSo)
+	  36: ICRFL       	t24, $0x6, CR
+	  37: INCEIPL       	$4
+
+	0x254811D8:  5787083C  rlwinm r7,r28,1,0,30
+	  38: GETL       	R28, t28
+	  39: SHLL       	$0x1, t28
+	  40: PUTL       	t28, R7
+	  41: INCEIPL       	$4
+
+	0x254811DC:  39270012  addi r9,r7,18
+	  42: GETL       	R7, t30
+	  43: ADDL       	$0x12, t30
+	  44: PUTL       	t30, R9
+	  45: INCEIPL       	$4
+
+	0x254811E0:  552C103A  rlwinm r12,r9,2,0,29
+	  46: GETL       	R9, t32
+	  47: SHLL       	$0x2, t32
+	  48: PUTL       	t32, R12
+	  49: INCEIPL       	$4
+
+	0x254811E4:  7CCC4214  add r6,r12,r8
+	  50: GETL       	R12, t34
+	  51: GETL       	R8, t36
+	  52: ADDL       	t34, t36
+	  53: PUTL       	t36, R6
+	  54: INCEIPL       	$4
+
+	0x254811E8:  40990014  bc 4,25,0x254811FC
+	  55: Jc25o       	$0x254811FC
+
+
+
+. 1272 254811B4 56
+. 81 6B 00 28 3C 60 AA AA 60 60 AA AB 3B AA FF EE 83 EB 00 04 57 AB F8 7E 7F 7F 00 16 57 7C E8 FE 2B 1C 20 00 57 87 08 3C 39 27 00 12 55 2C 10 3A 7C CC 42 14 40 99 00 14
+==== BB 1273 (0x254811FC) approx BBs exec'd 0 ====
+
+	0x254811FC:  1FEAFFFC  mulli r31,r10,-4
+	   0: GETL       	R10, t0
+	   1: MULL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25481200:  556C103A  rlwinm r12,r11,2,0,29
+	   4: GETL       	R11, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R12
+	   7: INCEIPL       	$4
+
+	0x25481204:  559B043E  rlwinm r27,r12,0,16,31
+	   8: GETL       	R12, t4
+	   9: ANDL       	$0xFFFF, t4
+	  10: PUTL       	t4, R27
+	  11: INCEIPL       	$4
+
+	0x25481208:  7F4C312E  stwx r26,r12,r6
+	  12: GETL       	R6, t6
+	  13: GETL       	R12, t8
+	  14: ADDL       	t8, t6
+	  15: GETL       	R26, t10
+	  16: STL       	t10, (t6)
+	  17: INCEIPL       	$4
+
+	0x2548120C:  3BBFFFFC  addi r29,r31,-4
+	  18: GETL       	R31, t12
+	  19: ADDL       	$0xFFFFFFFC, t12
+	  20: PUTL       	t12, R29
+	  21: INCEIPL       	$4
+
+	0x25481210:  676A3960  oris r10,r27,0x3960
+	  22: GETL       	R27, t14
+	  23: ORL       	$0x39600000, t14
+	  24: PUTL       	t14, R10
+	  25: INCEIPL       	$4
+
+	0x25481214:  57BC01BA  rlwinm r28,r29,0,6,29
+	  26: GETL       	R29, t16
+	  27: ANDL       	$0x3FFFFFC, t16
+	  28: PUTL       	t16, R28
+	  29: INCEIPL       	$4
+
+	0x25481218:  91450000  stw r10,0(r5)
+	  30: GETL       	R10, t18
+	  31: GETL       	R5, t20
+	  32: STL       	t18, (t20)
+	  33: INCEIPL       	$4
+
+	0x2548121C:  67874800  oris r7,r28,0x4800
+	  34: GETL       	R28, t22
+	  35: ORL       	$0x48000000, t22
+	  36: PUTL       	t22, R7
+	  37: INCEIPL       	$4
+
+	0x25481220:  94E50004  stwu r7,4(r5)
+	  38: GETL       	R7, t24
+	  39: GETL       	R5, t26
+	  40: ADDL       	$0x4, t26
+	  41: PUTL       	t26, R5
+	  42: STL       	t24, (t26)
+	  43: INCEIPL       	$4
+
+	0x25481224:  7C00286C  dcbst r0,r5
+	  44: INCEIPL       	$4
+
+	0x25481228:  7C0004AC  sync
+	  45: INCEIPL       	$4
+
+	0x2548122C:  80A1000C  lwz r5,12(r1)
+	  46: GETL       	R1, t28
+	  47: ADDL       	$0xC, t28
+	  48: LDL       	(t28), t30
+	  49: PUTL       	t30, R5
+	  50: INCEIPL       	$4
+
+	0x25481230:  39050004  addi r8,r5,4
+	  51: GETL       	R5, t32
+	  52: ADDL       	$0x4, t32
+	  53: PUTL       	t32, R8
+	  54: INCEIPL       	$4
+
+	0x25481234:  7C0047AC  icbi r0,r8
+	  55: GETL       	R8, t34
+	  56: CALLM_So       	
+	  57: PUSHL       	t34
+	  58: CALLMo       	$0x68
+	  59: CALLM_Eo       	
+	  60: INCEIPL       	$4
+
+	0x25481238:  80A1000C  lwz r5,12(r1)
+	  61: GETL       	R1, t36
+	  62: ADDL       	$0xC, t36
+	  63: LDL       	(t36), t38
+	  64: PUTL       	t38, R5
+	  65: INCEIPL       	$4
+
+	0x2548123C:  4BFFFBC4  b 0x25480E00
+	  66: JMPo       	$0x25480E00  ($4)
+
+
+
+. 1273 254811FC 68
+. 1F EA FF FC 55 6C 10 3A 55 9B 04 3E 7F 4C 31 2E 3B BF FF FC 67 6A 39 60 57 BC 01 BA 91 45 00 00 67 87 48 00 94 E5 00 04 7C 00 28 6C 7C 00 04 AC 80 A1 00 0C 39 05 00 04 7C 00 47 AC 80 A1 00 0C 4B FF FB C4
+==== BB 1274 (0x254797E0) approx BBs exec'd 0 ====
+
+	0x254797E0:  3C00AAAA  lis r0,-21846
+	   0: MOVL       	$0xAAAA0000, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254797E4:  600CAAAB  ori r12,r0,0xAAAB
+	   3: MOVL       	$0xAAAAAAAB, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0x254797E8:  7EE86016  mulhwu r23,r8,r12
+	   6: GETL       	R8, t4
+	   7: GETL       	R12, t6
+	   8: UMULHL       	t4, t6
+	   9: PUTL       	t6, R23
+	  10: INCEIPL       	$4
+
+	0x254797EC:  56E9E8FE  rlwinm r9,r23,29,3,31
+	  11: GETL       	R23, t8
+	  12: SHRL       	$0x3, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x254797F0:  7C893840  cmpl cr1,r9,r7
+	  15: GETL       	R9, t10
+	  16: GETL       	R7, t12
+	  17: CMPUL       	t10, t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x254797F4:  40850008  bc 4,5,0x254797FC
+	  20: Jc05o       	$0x254797FC
+
+
+
+. 1274 254797E0 24
+. 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+==== BB 1275 (0x254733C0) approx BBs exec'd 0 ====
+
+	0x254733C0:  4800C225  bl 0x2547F5E4
+	   0: MOVL       	$0x254733C4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547F5E4  ($4)
+
+
+
+. 1275 254733C0 4
+. 48 00 C2 25
+==== BB 1276 _dl_sysdep_start_cleanup(0x2547F5E4) approx BBs exec'd 0 ====
+
+	0x2547F5E4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547F5E8:  38210010  addi r1,r1,16
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x10, t4
+	   8: PUTL       	t4, R1
+	   9: INCEIPL       	$4
+
+	0x2547F5EC:  4E800020  blr
+	  10: GETL       	LR, t6
+	  11: JMPo-r       	t6  ($4)
+
+
+
+. 1276 2547F5E4 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+==== BB 1277 (0x254733C4) approx BBs exec'd 0 ====
+
+	0x254733C4:  80CE01A4  lwz r6,420(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1A4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254733C8:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x254733CC:  4082094C  bc 4,2,0x25473D18
+	   9: Jc02o       	$0x25473D18
+
+
+
+. 1277 254733C4 12
+. 80 CE 01 A4 2C 06 00 00 40 82 09 4C
+==== BB 1278 (0x254733D0) approx BBs exec'd 0 ====
+
+	0x254733D0:  816E0330  lwz r11,816(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x330, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254733D4:  2B0B0001  cmpli cr6,r11,1
+	   5: GETL       	R11, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254733D8:  4099F784  bc 4,25,0x25472B5C
+	  10: Jc25o       	$0x25472B5C
+
+
+
+. 1278 254733D0 12
+. 81 6E 03 30 2B 0B 00 01 40 99 F7 84
+==== BB 1279 (0x254733DC) approx BBs exec'd 0 ====
+
+	0x254733DC:  809401C0  lwz r4,448(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x1C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254733E0:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254733E4:  38A00000  li r5,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x254733E8:  38C00000  li r6,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0x254733EC:  480061D5  bl 0x254795C0
+	  14: MOVL       	$0x254733F0, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x254795C0  ($4)
+
+
+
+. 1279 254733DC 20
+. 80 94 01 C0 7F 83 E3 78 38 A0 00 00 38 C0 00 00 48 00 61 D5
+==== BB 1280 (0x254733F0) approx BBs exec'd 0 ====
+
+	0x254733F0:  4BFFF76C  b 0x25472B5C
+	   0: JMPo       	$0x25472B5C  ($4)
+
+
+
+. 1280 254733F0 4
+. 4B FF F7 6C
+==== BB 1281 (0x25472B5C) approx BBs exec'd 0 ====
+
+	0x25472B5C:  7E238B78  or r3,r17,r17
+	   0: GETL       	R17, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25472B60:  4800B479  bl 0x2547DFD8
+	   3: MOVL       	$0x25472B64, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547DFD8  ($4)
+
+
+
+. 1281 25472B5C 8
+. 7E 23 8B 78 48 00 B4 79
+==== BB 1282 __GI__dl_allocate_tls_init(0x2547DFD8) approx BBs exec'd 0 ====
+
+	0x2547DFD8:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DFDC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547DFE0:  93210014  stw r25,20(r1)
+	   9: GETL       	R25, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547DFE4:  7C791B79  or. r25,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R25
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x2547DFE8:  48019019  bl 0x25497000
+	  19: MOVL       	$0x2547DFEC, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1282 2547DFD8 20
+. 94 21 FF D0 7C 08 02 A6 93 21 00 14 7C 79 1B 79 48 01 90 19
+==== BB 1283 (0x2547DFEC) approx BBs exec'd 0 ====
+
+	0x2547DFEC:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DFF0:  92E1000C  stw r23,12(r1)
+	   5: GETL       	R23, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xC, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547DFF4:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x2547DFF8:  93010010  stw r24,16(r1)
+	  13: GETL       	R24, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547DFFC:  38600000  li r3,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0x2547E000:  93410018  stw r26,24(r1)
+	  21: GETL       	R26, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x18, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547E004:  9361001C  stw r27,28(r1)
+	  26: GETL       	R27, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x1C, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x2547E008:  93810020  stw r28,32(r1)
+	  31: GETL       	R28, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x20, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547E00C:  93A10024  stw r29,36(r1)
+	  36: GETL       	R29, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x24, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0x2547E010:  93E1002C  stw r31,44(r1)
+	  41: GETL       	R31, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x2C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0x2547E014:  90010034  stw r0,52(r1)
+	  46: GETL       	R0, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x34, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0x2547E018:  418200D8  bc 12,2,0x2547E0F0
+	  51: Js02o       	$0x2547E0F0
+
+
+
+. 1283 2547DFEC 48
+. 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 38 60 00 00 93 41 00 18 93 61 00 1C 93 81 00 20 93 A1 00 24 93 E1 00 2C 90 01 00 34 41 82 00 D8
+==== BB 1284 (0x2547E01C) approx BBs exec'd 0 ====
+
+	0x2547E01C:  831E04C8  lwz r24,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x2547E020:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547E024:  82F9FFFC  lwz r23,-4(r25)
+	   8: GETL       	R25, t6
+	   9: ADDL       	$0xFFFFFFFC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0x2547E028:  83780410  lwz r27,1040(r24)
+	  13: GETL       	R24, t10
+	  14: ADDL       	$0x410, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x2547E02C:  817B0000  lwz r11,0(r27)
+	  18: GETL       	R27, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R11
+	  21: INCEIPL       	$4
+
+	0x2547E030:  201A0000  subfic r0,r26,0
+	  22: GETL       	R26, t18
+	  23: MOVL       	$0x0, t20
+	  24: SBBL       	t18, t20  (-wCa)
+	  25: PUTL       	t20, R0
+	  26: INCEIPL       	$4
+
+	0x2547E034:  7FE0D114  adde r31,r0,r26
+	  27: GETL       	R0, t22
+	  28: GETL       	R26, t24
+	  29: ADCL       	t22, t24  (-rCa-wCa)
+	  30: PUTL       	t24, R31
+	  31: INCEIPL       	$4
+
+	0x2547E038:  7D3AFA14  add r9,r26,r31
+	  32: GETL       	R26, t26
+	  33: GETL       	R31, t28
+	  34: ADDL       	t26, t28
+	  35: PUTL       	t28, R9
+	  36: INCEIPL       	$4
+
+	0x2547E03C:  7F8BF840  cmpl cr7,r11,r31
+	  37: GETL       	R11, t30
+	  38: GETL       	R31, t32
+	  39: CMPUL       	t30, t32, t34  (-rSo)
+	  40: ICRFL       	t34, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0x2547E040:  409D0064  bc 4,29,0x2547E0A4
+	  42: Jc29o       	$0x2547E0A4
+
+
+
+. 1284 2547E01C 40
+. 83 1E 04 C8 3B 40 00 00 82 F9 FF FC 83 78 04 10 81 7B 00 00 20 1A 00 00 7F E0 D1 14 7D 3A FA 14 7F 8B F8 40 40 9D 00 64
+==== BB 1285 (0x2547E044) approx BBs exec'd 0 ====
+
+	0x2547E044:  57E31838  rlwinm r3,r31,3,0,28
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x3, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x2547E048:  7D23DA14  add r9,r3,r27
+	   4: GETL       	R3, t2
+	   5: GETL       	R27, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547E04C:  3B89000C  addi r28,r9,12
+	   9: GETL       	R9, t6
+	  10: ADDL       	$0xC, t6
+	  11: PUTL       	t6, R28
+	  12: INCEIPL       	$4
+
+	0x2547E050:  80980408  lwz r4,1032(r24)
+	  13: GETL       	R24, t8
+	  14: ADDL       	$0x408, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0x2547E054:  7D3AFA14  add r9,r26,r31
+	  18: GETL       	R26, t12
+	  19: GETL       	R31, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x2547E058:  7C892040  cmpl cr1,r9,r4
+	  23: GETL       	R9, t16
+	  24: GETL       	R4, t18
+	  25: CMPUL       	t16, t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x1, CR
+	  27: INCEIPL       	$4
+
+	0x2547E05C:  41850048  bc 12,5,0x2547E0A4
+	  28: Js05o       	$0x2547E0A4
+
+
+
+. 1285 2547E044 28
+. 57 E3 18 38 7D 23 DA 14 3B 89 00 0C 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+==== BB 1286 (0x2547E060) approx BBs exec'd 0 ====
+
+	0x2547E060:  813C0000  lwz r9,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547E064:  2F090000  cmpi cr6,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x2547E068:  419A0028  bc 12,26,0x2547E090
+	   8: Js26o       	$0x2547E090
+
+
+
+. 1286 2547E060 12
+. 81 3C 00 00 2F 09 00 00 41 9A 00 28
+==== BB 1287 (0x2547E06C) approx BBs exec'd 0 ====
+
+	0x2547E06C:  8169022C  lwz r11,556(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x22C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547E070:  2C0BFFFF  cmpi cr0,r11,-1
+	   5: GETL       	R11, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547E074:  7D4BCA14  add r10,r11,r25
+	  10: GETL       	R11, t10
+	  11: GETL       	R25, t12
+	  12: ADDL       	t10, t12
+	  13: PUTL       	t12, R10
+	  14: INCEIPL       	$4
+
+	0x2547E078:  7D435378  or r3,r10,r10
+	  15: GETL       	R10, t14
+	  16: PUTL       	t14, R3
+	  17: INCEIPL       	$4
+
+	0x2547E07C:  40820040  bc 4,2,0x2547E0BC
+	  18: Jc02o       	$0x2547E0BC
+
+
+
+. 1287 2547E06C 20
+. 81 69 02 2C 2C 0B FF FF 7D 4B CA 14 7D 43 53 78 40 82 00 40
+==== BB 1288 (0x2547E0BC) approx BBs exec'd 0 ====
+
+	0x2547E0BC:  81690230  lwz r11,560(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x230, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547E0C0:  5568103A  rlwinm r8,r11,2,0,29
+	   5: GETL       	R11, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2547E0C4:  7D48B92E  stwx r10,r8,r23
+	   9: GETL       	R23, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x2547E0C8:  80A9021C  lwz r5,540(r9)
+	  15: GETL       	R9, t12
+	  16: ADDL       	$0x21C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R5
+	  19: INCEIPL       	$4
+
+	0x2547E0CC:  80E90220  lwz r7,544(r9)
+	  20: GETL       	R9, t16
+	  21: ADDL       	$0x220, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R7
+	  24: INCEIPL       	$4
+
+	0x2547E0D0:  80890218  lwz r4,536(r9)
+	  25: GETL       	R9, t20
+	  26: ADDL       	$0x218, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R4
+	  29: INCEIPL       	$4
+
+	0x2547E0D4:  7FA53850  subf r29,r5,r7
+	  30: GETL       	R5, t24
+	  31: GETL       	R7, t26
+	  32: SUBL       	t24, t26
+	  33: PUTL       	t26, R29
+	  34: INCEIPL       	$4
+
+	0x2547E0D8:  48005949  bl 0x25483A20
+	  35: MOVL       	$0x2547E0DC, t28
+	  36: PUTL       	t28, LR
+	  37: JMPo-c       	$0x25483A20  ($4)
+
+
+
+. 1288 2547E0BC 32
+. 81 69 02 30 55 68 10 3A 7D 48 B9 2E 80 A9 02 1C 80 E9 02 20 80 89 02 18 7F A5 38 50 48 00 59 49
+==== BB 1289 (0x2547E0DC) approx BBs exec'd 0 ====
+
+	0x2547E0DC:  7FA5EB78  or r5,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547E0E0:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x2547E0E4:  48005661  bl 0x25483744
+	   6: MOVL       	$0x2547E0E8, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483744  ($4)
+
+
+
+. 1289 2547E0DC 12
+. 7F A5 EB 78 38 80 00 00 48 00 56 61
+==== BB 1290 (0x2547E0E8) approx BBs exec'd 0 ====
+
+	0x2547E0E8:  4BFFFFA4  b 0x2547E08C
+	   0: JMPo       	$0x2547E08C  ($4)
+
+
+
+. 1290 2547E0E8 4
+. 4B FF FF A4
+==== BB 1291 (0x2547E08C) approx BBs exec'd 0 ====
+
+	0x2547E08C:  817B0000  lwz r11,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x2547E090:  3BFF0001  addi r31,r31,1
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x2547E094:  3B9C0008  addi r28,r28,8
+	   8: GETL       	R28, t6
+	   9: ADDL       	$0x8, t6
+	  10: PUTL       	t6, R28
+	  11: INCEIPL       	$4
+
+	0x2547E098:  7F8BF840  cmpl cr7,r11,r31
+	  12: GETL       	R11, t8
+	  13: GETL       	R31, t10
+	  14: CMPUL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x2547E09C:  419DFFB4  bc 12,29,0x2547E050
+	  17: Js29o       	$0x2547E050
+
+
+
+. 1291 2547E08C 20
+. 81 7B 00 00 3B FF 00 01 3B 9C 00 08 7F 8B F8 40 41 9D FF B4
+==== BB 1292 (0x2547E050) approx BBs exec'd 0 ====
+
+	0x2547E050:  80980408  lwz r4,1032(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547E054:  7D3AFA14  add r9,r26,r31
+	   5: GETL       	R26, t4
+	   6: GETL       	R31, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2547E058:  7C892040  cmpl cr1,r9,r4
+	  10: GETL       	R9, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547E05C:  41850048  bc 12,5,0x2547E0A4
+	  15: Js05o       	$0x2547E0A4
+
+
+
+. 1292 2547E050 16
+. 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+==== BB 1293 (0x2547E0A4) approx BBs exec'd 0 ====
+
+	0x2547E0A4:  81580408  lwz r10,1032(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547E0A8:  7D3A4B78  or r26,r9,r9
+	   5: GETL       	R9, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547E0AC:  7C8A4840  cmpl cr1,r10,r9
+	   8: GETL       	R10, t6
+	   9: GETL       	R9, t8
+	  10: CMPUL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0x2547E0B0:  4085003C  bc 4,5,0x2547E0EC
+	  13: Jc05o       	$0x2547E0EC
+
+
+
+. 1293 2547E0A4 16
+. 81 58 04 08 7D 3A 4B 78 7C 8A 48 40 40 85 00 3C
+==== BB 1294 (0x2547E0EC) approx BBs exec'd 0 ====
+
+	0x2547E0EC:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547E0F0:  81810034  lwz r12,52(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x34, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x2547E0F4:  82E1000C  lwz r23,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0x2547E0F8:  83010010  lwz r24,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R24
+	  17: INCEIPL       	$4
+
+	0x2547E0FC:  7D8803A6  mtlr r12
+	  18: GETL       	R12, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547E100:  83210014  lwz r25,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x2547E104:  83410018  lwz r26,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R26
+	  30: INCEIPL       	$4
+
+	0x2547E108:  8361001C  lwz r27,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R27
+	  35: INCEIPL       	$4
+
+	0x2547E10C:  83810020  lwz r28,32(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R28
+	  40: INCEIPL       	$4
+
+	0x2547E110:  83A10024  lwz r29,36(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0x2547E114:  83C10028  lwz r30,40(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x28, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R30
+	  50: INCEIPL       	$4
+
+	0x2547E118:  83E1002C  lwz r31,44(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x2C, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0x2547E11C:  38210030  addi r1,r1,48
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x30, t44
+	  58: PUTL       	t44, R1
+	  59: INCEIPL       	$4
+
+	0x2547E120:  4E800020  blr
+	  60: GETL       	LR, t46
+	  61: JMPo-r       	t46  ($4)
+
+
+
+. 1294 2547E0EC 56
+. 7F 23 CB 78 81 81 00 34 82 E1 00 0C 83 01 00 10 7D 88 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 1295 (0x25472B64) approx BBs exec'd 0 ====
+
+	0x25472B64:  38517000  addi r2,r17,28672
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x7000, t0
+	   2: PUTL       	t0, R2
+	   3: INCEIPL       	$4
+
+	0x25472B68:  3B000001  li r24,1
+	   4: MOVL       	$0x1, t2
+	   5: PUTL       	t2, R24
+	   6: INCEIPL       	$4
+
+	0x25472B6C:  931A000C  stw r24,12(r26)
+	   7: GETL       	R24, t4
+	   8: GETL       	R26, t6
+	   9: ADDL       	$0xC, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x25472B70:  48009431  bl 0x2547BFA0
+	  12: MOVL       	$0x25472B74, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x2547BFA0  ($4)
+
+
+
+. 1295 25472B64 16
+. 38 51 70 00 3B 00 00 01 93 1A 00 0C 48 00 94 31
+==== BB 1296 __GI__dl_debug_state(0x2547BFA0) approx BBs exec'd 0 ====
+
+	0x2547BFA0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547BFA4:  38210010  addi r1,r1,16
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x10, t4
+	   8: PUTL       	t4, R1
+	   9: INCEIPL       	$4
+
+	0x2547BFA8:  4E800020  blr
+	  10: GETL       	LR, t6
+	  11: JMPo-r       	t6  ($4)
+
+
+
+. 1296 2547BFA0 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+==== BB 1297 (0x25472B74) approx BBs exec'd 0 ====
+
+	0x25472B74:  480055BD  bl 0x25478130
+	   0: MOVL       	$0x25472B78, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25478130  ($4)
+
+
+
+. 1297 25472B74 4
+. 48 00 55 BD
+==== BB 1298 _dl_unload_cache(0x25478130) approx BBs exec'd 0 ====
+
+	0x25478130:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25478134:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x25478138:  4801EEC9  bl 0x25497000
+	   9: MOVL       	$0x2547813C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1298 25478130 12
+. 94 21 FF F0 7C 68 02 A6 48 01 EE C9
+==== BB 1299 (0x2547813C) approx BBs exec'd 0 ====
+
+	0x2547813C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25478140:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25478144:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25478148:  90610014  stw r3,20(r1)
+	  13: GETL       	R3, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547814C:  3800FFFD  li r0,-3
+	  18: MOVL       	$0xFFFFFFFD, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x25478150:  83FE0254  lwz r31,596(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x254, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x25478154:  807F0000  lwz r3,0(r31)
+	  26: GETL       	R31, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R3
+	  29: INCEIPL       	$4
+
+	0x25478158:  3923FFFF  addi r9,r3,-1
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0xFFFFFFFF, t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x2547815C:  7F890040  cmpl cr7,r9,r0
+	  34: GETL       	R9, t26
+	  35: GETL       	R0, t28
+	  36: CMPUL       	t26, t28, t30  (-rSo)
+	  37: ICRFL       	t30, $0x7, CR
+	  38: INCEIPL       	$4
+
+	0x25478160:  409D001C  bc 4,29,0x2547817C
+	  39: Jc29o       	$0x2547817C
+
+
+
+. 1299 2547813C 40
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 61 00 14 38 00 FF FD 83 FE 02 54 80 7F 00 00 39 23 FF FF 7F 89 00 40 40 9D 00 1C
+==== BB 1300 (0x2547817C) approx BBs exec'd 0 ====
+
+	0x2547817C:  80BE025C  lwz r5,604(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x25C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25478180:  80850000  lwz r4,0(r5)
+	   5: GETL       	R5, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25478184:  4800A625  bl 0x254827A8
+	   9: MOVL       	$0x25478188, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0x254827A8  ($4)
+
+
+
+. 1300 2547817C 12
+. 80 BE 02 5C 80 85 00 00 48 00 A6 25
+==== BB 1301 munmap(0x254827A8) approx BBs exec'd 0 ====
+
+	0x254827A8:  3800005B  li r0,91
+	   0: MOVL       	$0x5B, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254827AC:  44000002  sc
+	   3: JMPo-sys       	$0x254827B0  ($4)
+
+
+
+. 1301 254827A8 8
+. 38 00 00 5B 44 00 00 02
+==== BB 1302 (0x254827B0) approx BBs exec'd 0 ====
+
+	0x254827B0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 1302 254827B0 4
+. 4C A3 00 20
+==== BB 1303 (0x25478188) approx BBs exec'd 0 ====
+
+	0x25478188:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547818C:  80C10014  lwz r6,20(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x14, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x25478190:  909F0000  stw r4,0(r31)
+	   8: GETL       	R4, t6
+	   9: GETL       	R31, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25478194:  83C10008  lwz r30,8(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x8, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R30
+	  16: INCEIPL       	$4
+
+	0x25478198:  7CC803A6  mtlr r6
+	  17: GETL       	R6, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0x2547819C:  83E1000C  lwz r31,12(r1)
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0xC, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R31
+	  24: INCEIPL       	$4
+
+	0x254781A0:  38210010  addi r1,r1,16
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x10, t20
+	  27: PUTL       	t20, R1
+	  28: INCEIPL       	$4
+
+	0x254781A4:  4E800020  blr
+	  29: GETL       	LR, t22
+	  30: JMPo-r       	t22  ($4)
+
+
+
+. 1303 25478188 32
+. 38 80 00 00 80 C1 00 14 90 9F 00 00 83 C1 00 08 7C C8 03 A6 83 E1 00 0C 38 21 00 10 4E 80 00 20
+==== BB 1304 (0x25472B78) approx BBs exec'd 0 ====
+
+	0x25472B78:  81010000  lwz r8,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x25472B7C:  82280004  lwz r17,4(r8)
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R17
+	   8: INCEIPL       	$4
+
+	0x25472B80:  8188FFB4  lwz r12,-76(r8)
+	   9: GETL       	R8, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x25472B84:  7E2803A6  mtlr r17
+	  14: GETL       	R17, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x25472B88:  81C8FFB8  lwz r14,-72(r8)
+	  17: GETL       	R8, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0x25472B8C:  81E8FFBC  lwz r15,-68(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0x25472B90:  7D818120  mtcrf 0x18,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x3, CR
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0x25472B94:  8208FFC0  lwz r16,-64(r8)
+	  31: GETL       	R8, t24
+	  32: ADDL       	$0xFFFFFFC0, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R16
+	  35: INCEIPL       	$4
+
+	0x25472B98:  8228FFC4  lwz r17,-60(r8)
+	  36: GETL       	R8, t28
+	  37: ADDL       	$0xFFFFFFC4, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R17
+	  40: INCEIPL       	$4
+
+	0x25472B9C:  8248FFC8  lwz r18,-56(r8)
+	  41: GETL       	R8, t32
+	  42: ADDL       	$0xFFFFFFC8, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R18
+	  45: INCEIPL       	$4
+
+	0x25472BA0:  8268FFCC  lwz r19,-52(r8)
+	  46: GETL       	R8, t36
+	  47: ADDL       	$0xFFFFFFCC, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R19
+	  50: INCEIPL       	$4
+
+	0x25472BA4:  8288FFD0  lwz r20,-48(r8)
+	  51: GETL       	R8, t40
+	  52: ADDL       	$0xFFFFFFD0, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R20
+	  55: INCEIPL       	$4
+
+	0x25472BA8:  82A8FFD4  lwz r21,-44(r8)
+	  56: GETL       	R8, t44
+	  57: ADDL       	$0xFFFFFFD4, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R21
+	  60: INCEIPL       	$4
+
+	0x25472BAC:  82C8FFD8  lwz r22,-40(r8)
+	  61: GETL       	R8, t48
+	  62: ADDL       	$0xFFFFFFD8, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R22
+	  65: INCEIPL       	$4
+
+	0x25472BB0:  82E8FFDC  lwz r23,-36(r8)
+	  66: GETL       	R8, t52
+	  67: ADDL       	$0xFFFFFFDC, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R23
+	  70: INCEIPL       	$4
+
+	0x25472BB4:  8308FFE0  lwz r24,-32(r8)
+	  71: GETL       	R8, t56
+	  72: ADDL       	$0xFFFFFFE0, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R24
+	  75: INCEIPL       	$4
+
+	0x25472BB8:  8328FFE4  lwz r25,-28(r8)
+	  76: GETL       	R8, t60
+	  77: ADDL       	$0xFFFFFFE4, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R25
+	  80: INCEIPL       	$4
+
+	0x25472BBC:  8348FFE8  lwz r26,-24(r8)
+	  81: GETL       	R8, t64
+	  82: ADDL       	$0xFFFFFFE8, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R26
+	  85: INCEIPL       	$4
+
+	0x25472BC0:  8368FFEC  lwz r27,-20(r8)
+	  86: GETL       	R8, t68
+	  87: ADDL       	$0xFFFFFFEC, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R27
+	  90: INCEIPL       	$4
+
+	0x25472BC4:  8388FFF0  lwz r28,-16(r8)
+	  91: GETL       	R8, t72
+	  92: ADDL       	$0xFFFFFFF0, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R28
+	  95: INCEIPL       	$4
+
+	0x25472BC8:  83A8FFF4  lwz r29,-12(r8)
+	  96: GETL       	R8, t76
+	  97: ADDL       	$0xFFFFFFF4, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R29
+	 100: INCEIPL       	$4
+
+	0x25472BCC:  83C8FFF8  lwz r30,-8(r8)
+	 101: GETL       	R8, t80
+	 102: ADDL       	$0xFFFFFFF8, t80
+	 103: LDL       	(t80), t82
+	 104: PUTL       	t82, R30
+	 105: INCEIPL       	$4
+
+	0x25472BD0:  83E8FFFC  lwz r31,-4(r8)
+	 106: GETL       	R8, t84
+	 107: ADDL       	$0xFFFFFFFC, t84
+	 108: LDL       	(t84), t86
+	 109: PUTL       	t86, R31
+	 110: INCEIPL       	$4
+
+	0x25472BD4:  7D014378  or r1,r8,r8
+	 111: GETL       	R8, t88
+	 112: PUTL       	t88, R1
+	 113: INCEIPL       	$4
+
+	0x25472BD8:  4E800020  blr
+	 114: GETL       	LR, t90
+	 115: JMPo-r       	t90  ($4)
+
+
+
+. 1304 25472B78 100
+. 81 01 00 00 82 28 00 04 81 88 FF B4 7E 28 03 A6 81 C8 FF B8 81 E8 FF BC 7D 81 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+==== BB 1305 (0x2547F324) approx BBs exec'd 0 ====
+
+	0x2547F324:  82A10224  lwz r21,548(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x224, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547F328:  806101E0  lwz r3,480(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x1E0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547F32C:  7EA803A6  mtlr r21
+	  10: GETL       	R21, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x2547F330:  82C101F8  lwz r22,504(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x1F8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R22
+	  17: INCEIPL       	$4
+
+	0x2547F334:  82A101F4  lwz r21,500(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1F4, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R21
+	  22: INCEIPL       	$4
+
+	0x2547F338:  82E101FC  lwz r23,508(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1FC, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R23
+	  27: INCEIPL       	$4
+
+	0x2547F33C:  83010200  lwz r24,512(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x200, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R24
+	  32: INCEIPL       	$4
+
+	0x2547F340:  83210204  lwz r25,516(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x204, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R25
+	  37: INCEIPL       	$4
+
+	0x2547F344:  83410208  lwz r26,520(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x208, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R26
+	  42: INCEIPL       	$4
+
+	0x2547F348:  8361020C  lwz r27,524(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x20C, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R27
+	  47: INCEIPL       	$4
+
+	0x2547F34C:  83810210  lwz r28,528(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x210, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R28
+	  52: INCEIPL       	$4
+
+	0x2547F350:  83A10214  lwz r29,532(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x214, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R29
+	  57: INCEIPL       	$4
+
+	0x2547F354:  83C10218  lwz r30,536(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x218, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R30
+	  62: INCEIPL       	$4
+
+	0x2547F358:  83E1021C  lwz r31,540(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x21C, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R31
+	  67: INCEIPL       	$4
+
+	0x2547F35C:  38210220  addi r1,r1,544
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x220, t54
+	  70: PUTL       	t54, R1
+	  71: INCEIPL       	$4
+
+	0x2547F360:  4E800020  blr
+	  72: GETL       	LR, t56
+	  73: JMPo-r       	t56  ($4)
+
+
+
+. 1305 2547F324 64
+. 82 A1 02 24 80 61 01 E0 7E A8 03 A6 82 C1 01 F8 82 A1 01 F4 82 E1 01 FC 83 01 02 00 83 21 02 04 83 41 02 08 83 61 02 0C 83 81 02 10 83 A1 02 14 83 C1 02 18 83 E1 02 1C 38 21 02 20 4E 80 00 20
+==== BB 1306 (0x25471A20) approx BBs exec'd 0 ====
+
+	0x25471A20:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471A24:  7C7D1B78  or r29,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25471A28:  38600000  li r3,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x25471A2C:  80090000  lwz r0,0(r9)
+	  11: GETL       	R9, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25471A30:  70090080  andi. r9,r0,0x80
+	  15: GETL       	R0, t12
+	  16: ANDL       	$0x80, t12
+	  17: PUTL       	t12, R9
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x25471A34:  40820028  bc 4,2,0x25471A5C
+	  21: Jc02o       	$0x25471A5C
+
+
+
+. 1306 25471A20 24
+. 81 3E 04 F4 7C 7D 1B 78 38 60 00 00 80 09 00 00 70 09 00 80 40 82 00 28
+==== BB 1307 (0x25471A38) approx BBs exec'd 0 ====
+
+	0x25471A38:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25471A3C:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25471A40:  8361000C  lwz r27,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0x25471A44:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x25471A48:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x25471A4C:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25471A50:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x25471A54:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0x25471A58:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+
+. 1307 25471A38 36
+. 80 81 00 24 7F A3 EB 78 83 61 00 0C 83 81 00 10 7C 88 03 A6 83 A1 00 14 83 C1 00 18 38 21 00 20 4E 80 00 20
+==== BB 1308 (0x25471DD0) approx BBs exec'd 0 ====
+
+	0x25471DD0:  82E102B4  lwz r23,692(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x2B4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25471DD4:  83010290  lwz r24,656(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x290, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0x25471DD8:  7EE803A6  mtlr r23
+	  10: GETL       	R23, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x25471DDC:  83210294  lwz r25,660(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x294, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R25
+	  17: INCEIPL       	$4
+
+	0x25471DE0:  82E1028C  lwz r23,652(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x28C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R23
+	  22: INCEIPL       	$4
+
+	0x25471DE4:  83410298  lwz r26,664(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x298, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R26
+	  27: INCEIPL       	$4
+
+	0x25471DE8:  8361029C  lwz r27,668(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x29C, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R27
+	  32: INCEIPL       	$4
+
+	0x25471DEC:  838102A0  lwz r28,672(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x2A0, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R28
+	  37: INCEIPL       	$4
+
+	0x25471DF0:  83A102A4  lwz r29,676(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x2A4, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R29
+	  42: INCEIPL       	$4
+
+	0x25471DF4:  83C102A8  lwz r30,680(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x2A8, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R30
+	  47: INCEIPL       	$4
+
+	0x25471DF8:  83E102AC  lwz r31,684(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x2AC, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R31
+	  52: INCEIPL       	$4
+
+	0x25471DFC:  382102B0  addi r1,r1,688
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x2B0, t42
+	  55: PUTL       	t42, R1
+	  56: INCEIPL       	$4
+
+	0x25471E00:  4E800020  blr
+	  57: GETL       	LR, t44
+	  58: JMPo-r       	t44  ($4)
+
+
+
+. 1308 25471DD0 52
+. 82 E1 02 B4 83 01 02 90 7E E8 03 A6 83 21 02 94 82 E1 02 8C 83 41 02 98 83 61 02 9C 83 81 02 A0 83 A1 02 A4 83 C1 02 A8 83 E1 02 AC 38 21 02 B0 4E 80 00 20
+==== BB 1309 (0x254804E8) approx BBs exec'd 0 ====
+
+	0x254804E8:  48016B19  bl 0x25497000
+	   0: MOVL       	$0x254804EC, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1309 254804E8 4
+. 48 01 6B 19
+==== BB 1310 (0x254804EC) approx BBs exec'd 0 ====
+
+	0x254804EC:  7FE802A6  mflr r31
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x254804F0:  7C7E1B78  or r30,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R30
+	   5: INCEIPL       	$4
+
+	0x254804F4:  839F0498  lwz r28,1176(r31)
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0x498, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0x254804F8:  83BF04D0  lwz r29,1232(r31)
+	  11: GETL       	R31, t8
+	  12: ADDL       	$0x4D0, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x254804FC:  837F04DC  lwz r27,1244(r31)
+	  16: GETL       	R31, t12
+	  17: ADDL       	$0x4DC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0x25480500:  807C0000  lwz r3,0(r28)
+	  21: GETL       	R28, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R3
+	  24: INCEIPL       	$4
+
+	0x25480504:  809D0000  lwz r4,0(r29)
+	  25: GETL       	R29, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R4
+	  28: INCEIPL       	$4
+
+	0x25480508:  80BB0000  lwz r5,0(r27)
+	  29: GETL       	R27, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R5
+	  32: INCEIPL       	$4
+
+	0x2548050C:  5486103A  rlwinm r6,r4,2,0,29
+	  33: GETL       	R4, t28
+	  34: SHLL       	$0x2, t28
+	  35: PUTL       	t28, R6
+	  36: INCEIPL       	$4
+
+	0x25480510:  7CC53214  add r6,r5,r6
+	  37: GETL       	R5, t30
+	  38: GETL       	R6, t32
+	  39: ADDL       	t30, t32
+	  40: PUTL       	t32, R6
+	  41: INCEIPL       	$4
+
+	0x25480514:  38C60004  addi r6,r6,4
+	  42: GETL       	R6, t34
+	  43: ADDL       	$0x4, t34
+	  44: PUTL       	t34, R6
+	  45: INCEIPL       	$4
+
+	0x25480518:  4BFFB229  bl 0x2547B740
+	  46: MOVL       	$0x2548051C, t36
+	  47: PUTL       	t36, LR
+	  48: JMPo-c       	$0x2547B740  ($4)
+
+
+
+. 1310 254804EC 48
+. 7F E8 02 A6 7C 7E 1B 78 83 9F 04 98 83 BF 04 D0 83 7F 04 DC 80 7C 00 00 80 9D 00 00 80 BB 00 00 54 86 10 3A 7C C5 32 14 38 C6 00 04 4B FF B2 29
+==== BB 1311 _dl_init_internal(0x2547B740) approx BBs exec'd 0 ====
+
+	0x2547B740:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547B744:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547B748:  4801B8B9  bl 0x25497000
+	   9: MOVL       	$0x2547B74C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1311 2547B740 12
+. 94 21 FF C0 7C 08 02 A6 48 01 B8 B9
+==== BB 1312 (0x2547B74C) approx BBs exec'd 0 ====
+
+	0x2547B74C:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B750:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547B754:  92C10018  stw r22,24(r1)
+	   8: GETL       	R22, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547B758:  90010044  stw r0,68(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x44, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547B75C:  93E1003C  stw r31,60(r1)
+	  18: GETL       	R31, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x3C, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547B760:  82DE04C8  lwz r22,1224(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x4C8, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R22
+	  27: INCEIPL       	$4
+
+	0x2547B764:  92E1001C  stw r23,28(r1)
+	  28: GETL       	R23, t22
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x1C, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0x2547B768:  7C972378  or r23,r4,r4
+	  33: GETL       	R4, t26
+	  34: PUTL       	t26, R23
+	  35: INCEIPL       	$4
+
+	0x2547B76C:  83F601A0  lwz r31,416(r22)
+	  36: GETL       	R22, t28
+	  37: ADDL       	$0x1A0, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0x2547B770:  93010020  stw r24,32(r1)
+	  41: GETL       	R24, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x20, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0x2547B774:  7CB82B78  or r24,r5,r5
+	  46: GETL       	R5, t36
+	  47: PUTL       	t36, R24
+	  48: INCEIPL       	$4
+
+	0x2547B778:  2F9F0000  cmpi cr7,r31,0
+	  49: GETL       	R31, t38
+	  50: CMP0L       	t38, t40  (-rSo)
+	  51: ICRFL       	t40, $0x7, CR
+	  52: INCEIPL       	$4
+
+	0x2547B77C:  93210024  stw r25,36(r1)
+	  53: GETL       	R25, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x24, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x2547B780:  93410028  stw r26,40(r1)
+	  58: GETL       	R26, t46
+	  59: GETL       	R1, t48
+	  60: ADDL       	$0x28, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x2547B784:  7CD93378  or r25,r6,r6
+	  63: GETL       	R6, t50
+	  64: PUTL       	t50, R25
+	  65: INCEIPL       	$4
+
+	0x2547B788:  9361002C  stw r27,44(r1)
+	  66: GETL       	R27, t52
+	  67: GETL       	R1, t54
+	  68: ADDL       	$0x2C, t54
+	  69: STL       	t52, (t54)
+	  70: INCEIPL       	$4
+
+	0x2547B78C:  7C7A1B78  or r26,r3,r3
+	  71: GETL       	R3, t56
+	  72: PUTL       	t56, R26
+	  73: INCEIPL       	$4
+
+	0x2547B790:  93A10034  stw r29,52(r1)
+	  74: GETL       	R29, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x34, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x2547B794:  92810010  stw r20,16(r1)
+	  79: GETL       	R20, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x10, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x2547B798:  92A10014  stw r21,20(r1)
+	  84: GETL       	R21, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x14, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0x2547B79C:  93810030  stw r28,48(r1)
+	  89: GETL       	R28, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x30, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0x2547B7A0:  83A300A0  lwz r29,160(r3)
+	  94: GETL       	R3, t74
+	  95: ADDL       	$0xA0, t74
+	  96: LDL       	(t74), t76
+	  97: PUTL       	t76, R29
+	  98: INCEIPL       	$4
+
+	0x2547B7A4:  836300A4  lwz r27,164(r3)
+	  99: GETL       	R3, t78
+	 100: ADDL       	$0xA4, t78
+	 101: LDL       	(t78), t80
+	 102: PUTL       	t80, R27
+	 103: INCEIPL       	$4
+
+	0x2547B7A8:  409E019C  bc 4,30,0x2547B944
+	 104: Jc30o       	$0x2547B944
+
+
+
+. 1312 2547B74C 96
+. 93 C1 00 38 7F C8 02 A6 92 C1 00 18 90 01 00 44 93 E1 00 3C 82 DE 04 C8 92 E1 00 1C 7C 97 23 78 83 F6 01 A0 93 01 00 20 7C B8 2B 78 2F 9F 00 00 93 21 00 24 93 41 00 28 7C D9 33 78 93 61 00 2C 7C 7A 1B 78 93 A1 00 34 92 81 00 10 92 A1 00 14 93 81 00 30 83 A3 00 A0 83 63 00 A4 40 9E 01 9C
+==== BB 1313 (0x2547B944) approx BBs exec'd 0 ====
+
+	0x2547B944:  801F0180  lwz r0,384(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547B948:  74091000  andis. r9,r0,0x1000
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x10000000, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2547B94C:  40820110  bc 4,2,0x2547BA5C
+	  11: Jc02o       	$0x2547BA5C
+
+
+
+. 1313 2547B944 12
+. 80 1F 01 80 74 09 10 00 40 82 01 10
+==== BB 1314 (0x2547B950) approx BBs exec'd 0 ====
+
+	0x2547B950:  64091000  oris r9,r0,0x1000
+	   0: GETL       	R0, t0
+	   1: ORL       	$0x10000000, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x2547B954:  809F0004  lwz r4,4(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547B958:  913F0180  stw r9,384(r31)
+	   9: GETL       	R9, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x180, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547B95C:  88640000  lbz r3,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDB       	(t10), t12
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0x2547B960:  2F030000  cmpi cr6,r3,0
+	  18: GETL       	R3, t14
+	  19: CMP0L       	t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x2547B964:  419A00F0  bc 12,26,0x2547BA54
+	  22: Js26o       	$0x2547BA54
+
+
+
+. 1314 2547B950 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 00 F0
+==== BB 1315 (0x2547B968) approx BBs exec'd 0 ====
+
+	0x2547B968:  815F0050  lwz r10,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547B96C:  2C8A0000  cmpi cr1,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B970:  40860010  bc 4,6,0x2547B980
+	   9: Jc06o       	$0x2547B980
+
+
+
+. 1315 2547B968 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 10
+==== BB 1316 (0x2547B980) approx BBs exec'd 0 ====
+
+	0x2547B980:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547B984:  80C90000  lwz r6,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x2547B988:  70C90002  andi. r9,r6,0x2
+	   9: GETL       	R6, t8
+	  10: ANDL       	$0x2, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x2547B98C:  40820130  bc 4,2,0x2547BABC
+	  15: Jc02o       	$0x2547BABC
+
+
+
+. 1316 2547B980 16
+. 81 3E 04 F4 80 C9 00 00 70 C9 00 02 40 82 01 30
+==== BB 1317 (0x2547B990) approx BBs exec'd 0 ====
+
+	0x2547B990:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547B994:  409A00D4  bc 4,26,0x2547BA68
+	   4: Jc26o       	$0x2547BA68
+
+
+
+. 1317 2547B990 8
+. 2F 0A 00 00 40 9A 00 D4
+==== BB 1318 (0x2547BA68) approx BBs exec'd 0 ====
+
+	0x2547BA68:  818A0004  lwz r12,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x2547BA6C:  7EE3BB78  or r3,r23,r23
+	   5: GETL       	R23, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547BA70:  811F0000  lwz r8,0(r31)
+	   8: GETL       	R31, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0x2547BA74:  7F04C378  or r4,r24,r24
+	  12: GETL       	R24, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x2547BA78:  7F25CB78  or r5,r25,r25
+	  15: GETL       	R25, t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x2547BA7C:  7CE86214  add r7,r8,r12
+	  18: GETL       	R8, t14
+	  19: GETL       	R12, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R7
+	  22: INCEIPL       	$4
+
+	0x2547BA80:  7CE903A6  mtctr r7
+	  23: GETL       	R7, t18
+	  24: PUTL       	t18, CTR
+	  25: INCEIPL       	$4
+
+	0x2547BA84:  4E800421  bctrl
+	  26: MOVL       	$0x2547BA88, t20
+	  27: PUTL       	t20, LR
+	  28: GETL       	CTR, t22
+	  29: JMPo-c       	t22  ($4)
+
+
+
+. 1318 2547BA68 32
+. 81 8A 00 04 7E E3 BB 78 81 1F 00 00 7F 04 C3 78 7F 25 CB 78 7C E8 62 14 7C E9 03 A6 4E 80 04 21
+==== BB 1319 (0xFFDE898) approx BBs exec'd 0 ====
+
+	0xFFDE898:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE89C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE8A0:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFDE8A4:  4800001D  bl 0xFFDE8C0
+	  14: MOVL       	$0xFFDE8A8, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFDE8C0  ($4)
+
+
+
+. 1319 FFDE898 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+==== BB 1320 (0xFFDE8C0) approx BBs exec'd 0 ====
+
+	0xFFDE8C0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE8C4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE8C8:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFDE8CC:  90010014  stw r0,20(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFFDE8D0:  48010839  bl 0xFFEF108
+	  19: MOVL       	$0xFFDE8D4, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFEF108  ($4)
+
+
+
+. 1320 FFDE8C0 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 08 39
+==== BB 1321 (0xFFEF108) approx BBs exec'd 0 ====
+
+	0xFFEF108:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0xFFEF10C, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+
+. 1321 FFEF108 4
+. 4E 80 00 21
+==== BB 1322 (0xFFDE8D4) approx BBs exec'd 0 ====
+
+	0xFFDE8D4:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xFFDE8D8:  801E000C  lwz r0,12(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFFDE8DC:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFFDE8E0:  419E000C  bc 12,30,0xFFDE8EC
+	  12: Js30o       	$0xFFDE8EC
+
+
+
+. 1322 FFDE8D4 16
+. 7F C8 02 A6 80 1E 00 0C 2F 80 00 00 41 9E 00 0C
+==== BB 1323 (0xFFDE8EC) approx BBs exec'd 0 ====
+
+	0xFFDE8EC:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDE8F0:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFDE8F4:  83C10008  lwz r30,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFFDE8F8:  38210010  addi r1,r1,16
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0xFFDE8FC:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1323 FFDE8EC 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1324 (0xFFDE8A8) approx BBs exec'd 0 ====
+
+	0xFFDE8A8:  48000115  bl 0xFFDE9BC
+	   0: MOVL       	$0xFFDE8AC, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFDE9BC  ($4)
+
+
+
+. 1324 FFDE8A8 4
+. 48 00 01 15
+==== BB 1325 (0xFFDE9BC) approx BBs exec'd 0 ====
+
+	0xFFDE9BC:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE9C0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE9C4:  429F0005  bcl 20,31,0xFFDE9C8
+	   9: MOVL       	$0xFFDE9C8, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFDE9C8:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFDE9CC:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFDE9D0:  90010014  stw r0,20(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFDE9D4:  801EFFF0  lwz r0,-16(r30)
+	  25: GETL       	R30, t18
+	  26: ADDL       	$0xFFFFFFF0, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0xFFDE9D8:  7FC0F214  add r30,r0,r30
+	  30: GETL       	R0, t22
+	  31: GETL       	R30, t24
+	  32: ADDL       	t22, t24
+	  33: PUTL       	t24, R30
+	  34: INCEIPL       	$4
+
+	0xFFDE9DC:  807E8010  lwz r3,-32752(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFF8010, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R3
+	  39: INCEIPL       	$4
+
+	0xFFDE9E0:  80030000  lwz r0,0(r3)
+	  40: GETL       	R3, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R0
+	  43: INCEIPL       	$4
+
+	0xFFDE9E4:  2F800000  cmpi cr7,r0,0
+	  44: GETL       	R0, t34
+	  45: CMP0L       	t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x7, CR
+	  47: INCEIPL       	$4
+
+	0xFFDE9E8:  419E0018  bc 12,30,0xFFDEA00
+	  48: Js30o       	$0xFFDEA00
+
+
+
+. 1325 FFDE9BC 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+==== BB 1326 (0xFFDEA00) approx BBs exec'd 0 ====
+
+	0xFFDEA00:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDEA04:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFFDEA08:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFFDEA0C:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFFDEA10:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1326 FFDEA00 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 1327 (0xFFDE8AC) approx BBs exec'd 0 ====
+
+	0xFFDE8AC:  48000659  bl 0xFFDEF04
+	   0: MOVL       	$0xFFDE8B0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFDEF04  ($4)
+
+
+
+. 1327 FFDE8AC 4
+. 48 00 06 59
+==== BB 1328 (0xFFDEF04) approx BBs exec'd 0 ====
+
+	0xFFDEF04:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDEF08:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDEF0C:  429F0005  bcl 20,31,0xFFDEF10
+	   9: MOVL       	$0xFFDEF10, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFDEF10:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFDEF14:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFDEF18:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFDEF1C:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFFDEF20:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFFDEF24:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xFFDEF28:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFFDEF2C:  8009FFFC  lwz r0,-4(r9)
+	  45: GETL       	R9, t34
+	  46: ADDL       	$0xFFFFFFFC, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R0
+	  49: INCEIPL       	$4
+
+	0xFFDEF30:  3BE9FFFC  addi r31,r9,-4
+	  50: GETL       	R9, t38
+	  51: ADDL       	$0xFFFFFFFC, t38
+	  52: PUTL       	t38, R31
+	  53: INCEIPL       	$4
+
+	0xFFDEF34:  48000010  b 0xFFDEF44
+	  54: JMPo       	$0xFFDEF44  ($4)
+
+
+
+. 1328 FFDEF04 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+==== BB 1329 (0xFFDEF44) approx BBs exec'd 0 ====
+
+	0xFFDEF44:  2F80FFFF  cmpi cr7,r0,-1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFFDEF48:  409EFFF0  bc 4,30,0xFFDEF38
+	   5: Jc30o       	$0xFFDEF38
+
+
+
+. 1329 FFDEF44 8
+. 2F 80 FF FF 40 9E FF F0
+==== BB 1330 (0xFFDEF4C) approx BBs exec'd 0 ====
+
+	0xFFDEF4C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDEF50:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFFDEF54:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFFDEF58:  7C0803A6  mtlr r0
+	  15: GETL       	R0, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFFDEF5C:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFFDEF60:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1330 FFDEF4C 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1331 (0xFFDE8B0) approx BBs exec'd 0 ====
+
+	0xFFDE8B0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDE8B4:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFDE8B8:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFFDE8BC:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 1331 FFDE8B0 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1332 (0x2547BA88) approx BBs exec'd 0 ====
+
+	0x2547BA88:  4BFFFF10  b 0x2547B998
+	   0: JMPo       	$0x2547B998  ($4)
+
+
+
+. 1332 2547BA88 4
+. 4B FF FF 10
+==== BB 1333 (0x2547B998) approx BBs exec'd 0 ====
+
+	0x2547B998:  817F0084  lwz r11,132(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547B99C:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B9A0:  418600BC  bc 12,6,0x2547BA5C
+	   9: Js06o       	$0x2547BA5C
+
+
+
+. 1333 2547B998 12
+. 81 7F 00 84 2C 8B 00 00 41 86 00 BC
+==== BB 1334 (0x2547BA5C) approx BBs exec'd 0 ====
+
+	0x2547BA5C:  3B800000  li r28,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x2547BA60:  939601A0  stw r28,416(r22)
+	   3: GETL       	R28, t2
+	   4: GETL       	R22, t4
+	   5: ADDL       	$0x1A0, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x2547BA64:  4BFFFD48  b 0x2547B7AC
+	   8: JMPo       	$0x2547B7AC  ($4)
+
+
+
+. 1334 2547BA5C 12
+. 3B 80 00 00 93 96 01 A0 4B FF FD 48
+==== BB 1335 (0x2547B7AC) approx BBs exec'd 0 ====
+
+	0x2547B7AC:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547B7B0:  409E0240  bc 4,30,0x2547B9F0
+	   4: Jc30o       	$0x2547B9F0
+
+
+
+. 1335 2547B7AC 8
+. 2F 9D 00 00 40 9E 02 40
+==== BB 1336 (0x2547B7B4) approx BBs exec'd 0 ====
+
+	0x2547B7B4:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547B7B8:  3A800001  li r20,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R20
+	   5: INCEIPL       	$4
+
+	0x2547B7BC:  48000789  bl 0x2547BF44
+	   6: MOVL       	$0x2547B7C0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x2547BF44  ($4)
+
+
+
+. 1336 2547B7B4 12
+. 38 60 00 00 3A 80 00 01 48 00 07 89
+==== BB 1337 (0x2547BF94) approx BBs exec'd 0 ====
+
+	0x2547BF94:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0x2547BF98:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x2547BF9C:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 1337 2547BF94 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1338 (0x2547B7C0) approx BBs exec'd 0 ====
+
+	0x2547B7C0:  9283000C  stw r20,12(r3)
+	   0: GETL       	R20, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B7C4:  7C761B78  or r22,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x2547B7C8:  480007D9  bl 0x2547BFA0
+	   8: MOVL       	$0x2547B7CC, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x2547BFA0  ($4)
+
+
+
+. 1338 2547B7C0 12
+. 92 83 00 0C 7C 76 1B 78 48 00 07 D9
+==== BB 1339 (0x2547B7CC) approx BBs exec'd 0 ====
+
+	0x2547B7CC:  83BA015C  lwz r29,348(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x15C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547B7D0:  2F9D0000  cmpi cr7,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547B7D4:  3B9DFFFF  addi r28,r29,-1
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0xFFFFFFFF, t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x2547B7D8:  419E0058  bc 12,30,0x2547B830
+	  13: Js30o       	$0x2547B830
+
+
+
+. 1339 2547B7CC 16
+. 83 BA 01 5C 2F 9D 00 00 3B 9D FF FF 41 9E 00 58
+==== BB 1340 (0x2547B7DC) approx BBs exec'd 0 ====
+
+	0x2547B7DC:  82BA01E8  lwz r21,488(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547B7E0:  5780103A  rlwinm r0,r28,2,0,29
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547B7E4:  7FF5002E  lwzx r31,r21,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R21, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x2547B7E8:  801F0180  lwz r0,384(r31)
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x180, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R0
+	  19: INCEIPL       	$4
+
+	0x2547B7EC:  74091000  andis. r9,r0,0x1000
+	  20: GETL       	R0, t16
+	  21: ANDL       	$0x10000000, t16
+	  22: PUTL       	t16, R9
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x2547B7F0:  40820034  bc 4,2,0x2547B824
+	  26: Jc02o       	$0x2547B824
+
+
+
+. 1340 2547B7DC 24
+. 82 BA 01 E8 57 80 10 3A 7F F5 00 2E 80 1F 01 80 74 09 10 00 40 82 00 34
+==== BB 1341 (0x2547B7F4) approx BBs exec'd 0 ====
+
+	0x2547B7F4:  64091000  oris r9,r0,0x1000
+	   0: GETL       	R0, t0
+	   1: ORL       	$0x10000000, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x2547B7F8:  809F0004  lwz r4,4(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547B7FC:  913F0180  stw r9,384(r31)
+	   9: GETL       	R9, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x180, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547B800:  88640000  lbz r3,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDB       	(t10), t12
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0x2547B804:  2F030000  cmpi cr6,r3,0
+	  18: GETL       	R3, t14
+	  19: CMP0L       	t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x2547B808:  419A0104  bc 12,26,0x2547B90C
+	  22: Js26o       	$0x2547B90C
+
+
+
+. 1341 2547B7F4 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 01 04
+==== BB 1342 (0x2547B80C) approx BBs exec'd 0 ====
+
+	0x2547B80C:  815F0050  lwz r10,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547B810:  2C8A0000  cmpi cr1,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B814:  40860064  bc 4,6,0x2547B878
+	   9: Jc06o       	$0x2547B878
+
+
+
+. 1342 2547B80C 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 64
+==== BB 1343 (0x2547B818) approx BBs exec'd 0 ====
+
+	0x2547B818:  80BF0084  lwz r5,132(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547B81C:  2F850000  cmpi cr7,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547B820:  409E0058  bc 4,30,0x2547B878
+	   9: Jc30o       	$0x2547B878
+
+
+
+. 1343 2547B818 12
+. 80 BF 00 84 2F 85 00 00 40 9E 00 58
+==== BB 1344 (0x2547B824) approx BBs exec'd 0 ====
+
+	0x2547B824:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547B828:  3B9CFFFF  addi r28,r28,-1
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547B82C:  409EFFB0  bc 4,30,0x2547B7DC
+	   8: Jc30o       	$0x2547B7DC
+
+
+
+. 1344 2547B824 12
+. 2F 9C 00 00 3B 9C FF FF 40 9E FF B0
+==== BB 1345 (0x2547B878) approx BBs exec'd 0 ====
+
+	0x2547B878:  80DE04F4  lwz r6,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547B87C:  83660000  lwz r27,0(r6)
+	   5: GETL       	R6, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0x2547B880:  73690002  andi. r9,r27,0x2
+	   9: GETL       	R27, t8
+	  10: ANDL       	$0x2, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x2547B884:  4082009C  bc 4,2,0x2547B920
+	  15: Jc02o       	$0x2547B920
+
+
+
+. 1345 2547B878 16
+. 80 DE 04 F4 83 66 00 00 73 69 00 02 40 82 00 9C
+==== BB 1346 (0x2547B888) approx BBs exec'd 0 ====
+
+	0x2547B888:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547B88C:  409A005C  bc 4,26,0x2547B8E8
+	   4: Jc26o       	$0x2547B8E8
+
+
+
+. 1346 2547B888 8
+. 2F 0A 00 00 40 9A 00 5C
+==== BB 1347 (0x2547B8E8) approx BBs exec'd 0 ====
+
+	0x2547B8E8:  810A0004  lwz r8,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547B8EC:  7EE3BB78  or r3,r23,r23
+	   5: GETL       	R23, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547B8F0:  80FF0000  lwz r7,0(r31)
+	   8: GETL       	R31, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R7
+	  11: INCEIPL       	$4
+
+	0x2547B8F4:  7F25CB78  or r5,r25,r25
+	  12: GETL       	R25, t10
+	  13: PUTL       	t10, R5
+	  14: INCEIPL       	$4
+
+	0x2547B8F8:  7C874214  add r4,r7,r8
+	  15: GETL       	R7, t12
+	  16: GETL       	R8, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R4
+	  19: INCEIPL       	$4
+
+	0x2547B8FC:  7C8903A6  mtctr r4
+	  20: GETL       	R4, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x2547B900:  7F04C378  or r4,r24,r24
+	  23: GETL       	R24, t18
+	  24: PUTL       	t18, R4
+	  25: INCEIPL       	$4
+
+	0x2547B904:  4E800421  bctrl
+	  26: MOVL       	$0x2547B908, t20
+	  27: PUTL       	t20, LR
+	  28: GETL       	CTR, t22
+	  29: JMPo-c       	t22  ($4)
+
+
+
+. 1347 2547B8E8 32
+. 81 0A 00 04 7E E3 BB 78 80 FF 00 00 7F 25 CB 78 7C 87 42 14 7C 89 03 A6 7F 04 C3 78 4E 80 04 21
+==== BB 1348 _init(0xFE7B620) approx BBs exec'd 0 ====
+
+	0xFE7B620:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE7B624:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE7B628:  4812C829  bl 0xFFA7E50
+	   9: MOVL       	$0xFE7B62C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1348 FE7B620 12
+. 94 21 FF E0 7C 08 02 A6 48 12 C8 29
+==== BB 1349 (0xFFA7E50) approx BBs exec'd 0 ====
+
+	0xFFA7E50:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0xFFA7E54, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+
+. 1349 FFA7E50 4
+. 4E 80 00 21
+==== BB 1350 (0xFE7B62C) approx BBs exec'd 0 ====
+
+	0xFE7B62C:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE7B630:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE7B634:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE7B638:  93A10014  stw r29,20(r1)
+	  13: GETL       	R29, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE7B63C:  7CBC2B78  or r28,r5,r5
+	  18: GETL       	R5, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFE7B640:  93E1001C  stw r31,28(r1)
+	  21: GETL       	R31, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE7B644:  7C9D2378  or r29,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFE7B648:  817E1CE4  lwz r11,7396(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1CE4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R11
+	  33: INCEIPL       	$4
+
+	0xFE7B64C:  7C7F1B78  or r31,r3,r3
+	  34: GETL       	R3, t26
+	  35: PUTL       	t26, R31
+	  36: INCEIPL       	$4
+
+	0xFE7B650:  811E1AA4  lwz r8,6820(r30)
+	  37: GETL       	R30, t28
+	  38: ADDL       	$0x1AA4, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R8
+	  41: INCEIPL       	$4
+
+	0xFE7B654:  39200000  li r9,0
+	  42: MOVL       	$0x0, t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFE7B658:  2F8B0000  cmpi cr7,r11,0
+	  45: GETL       	R11, t34
+	  46: CMP0L       	t34, t36  (-rSo)
+	  47: ICRFL       	t36, $0x7, CR
+	  48: INCEIPL       	$4
+
+	0xFE7B65C:  90010024  stw r0,36(r1)
+	  49: GETL       	R0, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x24, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFE7B660:  7D0A4378  or r10,r8,r8
+	  54: GETL       	R8, t42
+	  55: PUTL       	t42, R10
+	  56: INCEIPL       	$4
+
+	0xFE7B664:  419E0010  bc 12,30,0xFE7B674
+	  57: Js30o       	$0xFE7B674
+
+
+
+. 1350 FE7B62C 60
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 7C BC 2B 78 93 E1 00 1C 7C 9D 23 78 81 7E 1C E4 7C 7F 1B 78 81 1E 1A A4 39 20 00 00 2F 8B 00 00 90 01 00 24 7D 0A 43 78 41 9E 00 10
+==== BB 1351 (0xFE7B674) approx BBs exec'd 0 ====
+
+	0xFE7B674:  912A0000  stw r9,0(r10)
+	   0: GETL       	R9, t0
+	   1: GETL       	R10, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE7B678:  80880000  lwz r4,0(r8)
+	   4: GETL       	R8, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R4
+	   7: INCEIPL       	$4
+
+	0xFE7B67C:  2C840000  cmpi cr1,r4,0
+	   8: GETL       	R4, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE7B680:  40860024  bc 4,6,0xFE7B6A4
+	  12: Jc06o       	$0xFE7B6A4
+
+
+
+. 1351 FE7B674 16
+. 91 2A 00 00 80 88 00 00 2C 84 00 00 40 86 00 24
+==== BB 1352 (0xFE7B684) approx BBs exec'd 0 ====
+
+	0xFE7B684:  813E1BF4  lwz r9,7156(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BF4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE7B688:  817E1BC8  lwz r11,7112(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1BC8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFE7B68C:  81490000  lwz r10,0(r9)
+	  10: GETL       	R9, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0xFE7B690:  80AB0034  lwz r5,52(r11)
+	  14: GETL       	R11, t12
+	  15: ADDL       	$0x34, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R5
+	  18: INCEIPL       	$4
+
+	0xFE7B694:  7D435378  or r3,r10,r10
+	  19: GETL       	R10, t16
+	  20: PUTL       	t16, R3
+	  21: INCEIPL       	$4
+
+	0xFE7B698:  7F055000  cmp cr6,r5,r10
+	  22: GETL       	R5, t18
+	  23: GETL       	R10, t20
+	  24: CMPL       	t18, t20, t22  (-rSo)
+	  25: ICRFL       	t22, $0x6, CR
+	  26: INCEIPL       	$4
+
+	0xFE7B69C:  419A0008  bc 12,26,0xFE7B6A4
+	  27: Js26o       	$0xFE7B6A4
+
+
+
+. 1352 FE7B684 28
+. 81 3E 1B F4 81 7E 1B C8 81 49 00 00 80 AB 00 34 7D 43 53 78 7F 05 50 00 41 9A 00 08
+==== BB 1353 (0xFE7B6A4) approx BBs exec'd 0 ====
+
+	0xFE7B6A4:  819E1A8C  lwz r12,6796(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1A8C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFE7B6A8:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE7B6AC:  811E1DD4  lwz r8,7636(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1DD4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0xFE7B6B0:  7FA4EB78  or r4,r29,r29
+	  13: GETL       	R29, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFE7B6B4:  80FE1B84  lwz r7,7044(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x1B84, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R7
+	  20: INCEIPL       	$4
+
+	0xFE7B6B8:  7F85E378  or r5,r28,r28
+	  21: GETL       	R28, t16
+	  22: PUTL       	t16, R5
+	  23: INCEIPL       	$4
+
+	0xFE7B6BC:  93EC0000  stw r31,0(r12)
+	  24: GETL       	R31, t18
+	  25: GETL       	R12, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0xFE7B6C0:  93A80000  stw r29,0(r8)
+	  28: GETL       	R29, t22
+	  29: GETL       	R8, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0xFE7B6C4:  93870000  stw r28,0(r7)
+	  32: GETL       	R28, t26
+	  33: GETL       	R7, t28
+	  34: STL       	t26, (t28)
+	  35: INCEIPL       	$4
+
+	0xFE7B6C8:  480B6421  bl 0xFF31AE8
+	  36: MOVL       	$0xFE7B6CC, t30
+	  37: PUTL       	t30, LR
+	  38: JMPo-c       	$0xFF31AE8  ($4)
+
+
+
+. 1353 FE7B6A4 40
+. 81 9E 1A 8C 7F E3 FB 78 81 1E 1D D4 7F A4 EB 78 80 FE 1B 84 7F 85 E3 78 93 EC 00 00 93 A8 00 00 93 87 00 00 48 0B 64 21
+==== BB 1354 __init_misc(0xFF31AE8) approx BBs exec'd 0 ====
+
+	0xFF31AE8:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF31AEC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFF31AF0:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFF31AF4:  7C9D2379  or. r29,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R29
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFF31AF8:  48076359  bl 0xFFA7E50
+	  19: MOVL       	$0xFF31AFC, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1354 FF31AE8 20
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 7C 9D 23 79 48 07 63 59
+==== BB 1355 (0xFF31AFC) approx BBs exec'd 0 ====
+
+	0xFF31AFC:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF31B00:  93E1001C  stw r31,28(r1)
+	   5: GETL       	R31, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x1C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFF31B04:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFF31B08:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF31B0C:  4182003C  bc 12,2,0xFF31B48
+	  18: Js02o       	$0xFF31B48
+
+
+
+. 1355 FF31AFC 20
+. 93 C1 00 18 93 E1 00 1C 7F C8 02 A6 90 01 00 24 41 82 00 3C
+==== BB 1356 (0xFF31B10) approx BBs exec'd 0 ====
+
+	0xFF31B10:  83FD0000  lwz r31,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFF31B14:  3880002F  li r4,47
+	   4: MOVL       	$0x2F, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFF31B18:  2F9F0000  cmpi cr7,r31,0
+	   7: GETL       	R31, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFF31B1C:  7FE3FB78  or r3,r31,r31
+	  11: GETL       	R31, t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0xFF31B20:  419E0028  bc 12,30,0xFF31B48
+	  14: Js30o       	$0xFF31B48
+
+
+
+. 1356 FF31B10 20
+. 83 FD 00 00 38 80 00 2F 2F 9F 00 00 7F E3 FB 78 41 9E 00 28
+==== BB 1357 (0xFF31B24) approx BBs exec'd 0 ====
+
+	0xFF31B24:  4BFA5459  bl 0xFED6F7C
+	   0: MOVL       	$0xFF31B28, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED6F7C  ($4)
+
+
+
+. 1357 FF31B24 4
+. 4B FA 54 59
+==== BB 1358 strrchr(0xFED6F7C) approx BBs exec'd 0 ====
+
+	0xFED6F7C:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED6F80:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFED6F84:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFED6F88:  549D063E  rlwinm r29,r4,0,24,31
+	  14: GETL       	R4, t10
+	  15: ANDL       	$0xFF, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0xFED6F8C:  2F9D0000  cmpi cr7,r29,0
+	  18: GETL       	R29, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0xFED6F90:  93C10018  stw r30,24(r1)
+	  22: GETL       	R30, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFED6F94:  90010024  stw r0,36(r1)
+	  27: GETL       	R0, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x24, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFED6F98:  7C601B78  or r0,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFED6F9C:  93E1001C  stw r31,28(r1)
+	  35: GETL       	R31, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFED6FA0:  409E0024  bc 4,30,0xFED6FC4
+	  40: Jc30o       	$0xFED6FC4
+
+
+
+. 1358 FED6F7C 40
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 54 9D 06 3E 2F 9D 00 00 93 C1 00 18 90 01 00 24 7C 60 1B 78 93 E1 00 1C 40 9E 00 24
+==== BB 1359 (0xFED6FC4) approx BBs exec'd 0 ====
+
+	0xFED6FC4:  3BE00000  li r31,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFED6FC8:  4800000C  b 0xFED6FD4
+	   3: JMPo       	$0xFED6FD4  ($4)
+
+
+
+. 1359 FED6FC4 8
+. 3B E0 00 00 48 00 00 0C
+==== BB 1360 (0xFED6FD4) approx BBs exec'd 0 ====
+
+	0xFED6FD4:  7C030378  or r3,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED6FD8:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFED6FDC:  4BFFF2F1  bl 0xFED62CC
+	   6: MOVL       	$0xFED6FE0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED62CC  ($4)
+
+
+
+. 1360 FED6FD4 12
+. 7C 03 03 78 7F A4 EB 78 4B FF F2 F1
+==== BB 1361 strchr(0xFED62CC) approx BBs exec'd 0 ====
+
+	0xFED62CC:  5084442E  rlwimi r4,r4,8,16,23
+	   0: GETL       	R4, t0
+	   1: GETL       	R4, t2
+	   2: ROLL       	$0x8, t2
+	   3: ANDL       	$0xFF00, t2
+	   4: ANDL       	$0xFFFF00FF, t0
+	   5: ORL       	t0, t2
+	   6: PUTL       	t2, R4
+	   7: INCEIPL       	$4
+
+	0xFED62D0:  3960FFFF  li r11,-1
+	   8: MOVL       	$0xFFFFFFFF, t4
+	   9: PUTL       	t4, R11
+	  10: INCEIPL       	$4
+
+	0xFED62D4:  5084801E  rlwimi r4,r4,16,0,15
+	  11: GETL       	R4, t6
+	  12: GETL       	R4, t8
+	  13: ROLL       	$0x10, t8
+	  14: ANDL       	$0xFFFF0000, t8
+	  15: ANDL       	$0xFFFF, t6
+	  16: ORL       	t6, t8
+	  17: PUTL       	t8, R4
+	  18: INCEIPL       	$4
+
+	0xFED62D8:  546A1EF8  rlwinm r10,r3,3,27,28
+	  19: GETL       	R3, t10
+	  20: ROLL       	$0x3, t10
+	  21: ANDL       	$0x18, t10
+	  22: PUTL       	t10, R10
+	  23: INCEIPL       	$4
+
+	0xFED62DC:  3CC0FEFF  lis r6,-257
+	  24: MOVL       	$0xFEFF0000, t12
+	  25: PUTL       	t12, R6
+	  26: INCEIPL       	$4
+
+	0xFED62E0:  3CE07F7F  lis r7,32639
+	  27: MOVL       	$0x7F7F0000, t14
+	  28: PUTL       	t14, R7
+	  29: INCEIPL       	$4
+
+	0xFED62E4:  5468003A  rlwinm r8,r3,0,0,29
+	  30: GETL       	R3, t16
+	  31: ANDL       	$0xFFFFFFFC, t16
+	  32: PUTL       	t16, R8
+	  33: INCEIPL       	$4
+
+	0xFED62E8:  38C6FEFF  addi r6,r6,-257
+	  34: MOVL       	$0xFEFEFEFF, t18
+	  35: PUTL       	t18, R6
+	  36: INCEIPL       	$4
+
+	0xFED62EC:  38E77F7F  addi r7,r7,32639
+	  37: MOVL       	$0x7F7F7F7F, t20
+	  38: PUTL       	t20, R7
+	  39: INCEIPL       	$4
+
+	0xFED62F0:  80A80000  lwz r5,0(r8)
+	  40: GETL       	R8, t22
+	  41: LDL       	(t22), t24
+	  42: PUTL       	t24, R5
+	  43: INCEIPL       	$4
+
+	0xFED62F4:  7D6B5430  srw r11,r11,r10
+	  44: GETL       	R11, t28
+	  45: GETL       	R10, t26
+	  46: SHRL       	t26, t28
+	  47: PUTL       	t28, R11
+	  48: INCEIPL       	$4
+
+	0xFED62F8:  7CA55B38  orc r5,r5,r11
+	  49: GETL       	R5, t30
+	  50: GETL       	R11, t32
+	  51: NOTL       	t32
+	  52: ORL       	t30, t32
+	  53: PUTL       	t32, R5
+	  54: INCEIPL       	$4
+
+	0xFED62FC:  7C062A14  add r0,r6,r5
+	  55: GETL       	R6, t34
+	  56: GETL       	R5, t36
+	  57: ADDL       	t34, t36
+	  58: PUTL       	t36, R0
+	  59: INCEIPL       	$4
+
+	0xFED6300:  7CE928F8  nor r9,r7,r5
+	  60: GETL       	R7, t38
+	  61: GETL       	R5, t40
+	  62: ORL       	t40, t38
+	  63: NOTL       	t38
+	  64: PUTL       	t38, R9
+	  65: INCEIPL       	$4
+
+	0xFED6304:  7C004839  and. r0,r0,r9
+	  66: GETL       	R0, t42
+	  67: GETL       	R9, t44
+	  68: ANDL       	t42, t44
+	  69: PUTL       	t44, R0
+	  70: CMP0L       	t44, t46  (-rSo)
+	  71: ICRFL       	t46, $0x0, CR
+	  72: INCEIPL       	$4
+
+	0xFED6308:  7C8C2A78  xor r12,r4,r5
+	  73: GETL       	R4, t48
+	  74: GETL       	R5, t50
+	  75: XORL       	t48, t50
+	  76: PUTL       	t50, R12
+	  77: INCEIPL       	$4
+
+	0xFED630C:  7D8C5B38  orc r12,r12,r11
+	  78: GETL       	R12, t52
+	  79: GETL       	R11, t54
+	  80: NOTL       	t54
+	  81: ORL       	t52, t54
+	  82: PUTL       	t54, R12
+	  83: INCEIPL       	$4
+
+	0xFED6310:  48000020  b 0xFED6330
+	  84: JMPo       	$0xFED6330  ($4)
+
+
+
+. 1361 FED62CC 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+==== BB 1362 (0xFED6330) approx BBs exec'd 0 ====
+
+	0xFED6330:  7C066214  add r0,r6,r12
+	   0: GETL       	R6, t0
+	   1: GETL       	R12, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED6334:  7CE960F8  nor r9,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0xFED6338:  4182FFDC  bc 12,2,0xFED6314
+	  11: Js02o       	$0xFED6314
+
+
+
+. 1362 FED6330 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+==== BB 1363 (0xFED6314) approx BBs exec'd 0 ====
+
+	0xFED6314:  84A80004  lwzu r5,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R8
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFED6318:  7C004839  and. r0,r0,r9
+	   6: GETL       	R0, t4
+	   7: GETL       	R9, t6
+	   8: ANDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFED631C:  7C062A14  add r0,r6,r5
+	  13: GETL       	R6, t10
+	  14: GETL       	R5, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0xFED6320:  7CE928F8  nor r9,r7,r5
+	  18: GETL       	R7, t14
+	  19: GETL       	R5, t16
+	  20: ORL       	t16, t14
+	  21: NOTL       	t14
+	  22: PUTL       	t14, R9
+	  23: INCEIPL       	$4
+
+	0xFED6324:  4082005C  bc 4,2,0xFED6380
+	  24: Jc02o       	$0xFED6380
+
+
+
+. 1363 FED6314 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+==== BB 1364 (0xFED6328) approx BBs exec'd 0 ====
+
+	0xFED6328:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED632C:  7C8C2A78  xor r12,r4,r5
+	   7: GETL       	R4, t6
+	   8: GETL       	R5, t8
+	   9: XORL       	t6, t8
+	  10: PUTL       	t8, R12
+	  11: INCEIPL       	$4
+
+	0xFED6330:  7C066214  add r0,r6,r12
+	  12: GETL       	R6, t10
+	  13: GETL       	R12, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xFED6334:  7CE960F8  nor r9,r7,r12
+	  17: GETL       	R7, t14
+	  18: GETL       	R12, t16
+	  19: ORL       	t16, t14
+	  20: NOTL       	t14
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0xFED6338:  4182FFDC  bc 12,2,0xFED6314
+	  23: Js02o       	$0xFED6314
+
+
+
+. 1364 FED6328 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+==== BB 1365 (0xFED633C) approx BBs exec'd 0 ====
+
+	0xFED633C:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED6340:  38600000  li r3,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFED6344:  4D820020  bclr 12,2
+	  10: GETL       	LR, t8
+	  11: Js02o-r       	t8
+
+
+
+. 1365 FED633C 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+==== BB 1366 (0xFED6FE0) approx BBs exec'd 0 ====
+
+	0xFED6FE0:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFED6FE4:  4082FFE8  bc 4,2,0xFED6FCC
+	   4: Jc02o       	$0xFED6FCC
+
+
+
+. 1366 FED6FE0 8
+. 2C 03 00 00 40 82 FF E8
+==== BB 1367 (0xFED6FE8) approx BBs exec'd 0 ====
+
+	0xFED6FE8:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFED6FEC:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED6FF0:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFED6FF4:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xFED6FF8:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFED6FFC:  83E1001C  lwz r31,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0xFED7000:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0xFED7004:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+
+. 1367 FED6FE8 32
+. 80 81 00 24 7F E3 FB 78 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1368 (0xFF31B28) approx BBs exec'd 0 ====
+
+	0xFF31B28:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFF31B2C:  38630001  addi r3,r3,1
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF31B30:  41860034  bc 12,6,0xFF31B64
+	   8: Js06o       	$0xFF31B64
+
+
+
+. 1368 FF31B28 12
+. 2C 83 00 00 38 63 00 01 41 86 00 34
+==== BB 1369 (0xFF31B64) approx BBs exec'd 0 ====
+
+	0xFF31B64:  813E1C34  lwz r9,7220(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1C34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFF31B68:  93E90000  stw r31,0(r9)
+	   5: GETL       	R31, t4
+	   6: GETL       	R9, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFF31B6C:  4BFFFFD0  b 0xFF31B3C
+	   9: JMPo       	$0xFF31B3C  ($4)
+
+
+
+. 1369 FF31B64 12
+. 81 3E 1C 34 93 E9 00 00 4B FF FF D0
+==== BB 1370 (0xFF31B3C) approx BBs exec'd 0 ====
+
+	0xFF31B3C:  80BD0000  lwz r5,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFF31B40:  807E1E24  lwz r3,7716(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1E24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFF31B44:  90A30000  stw r5,0(r3)
+	   9: GETL       	R5, t8
+	  10: GETL       	R3, t10
+	  11: STL       	t8, (t10)
+	  12: INCEIPL       	$4
+
+	0xFF31B48:  80C10024  lwz r6,36(r1)
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0x24, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R6
+	  17: INCEIPL       	$4
+
+	0xFF31B4C:  83A10014  lwz r29,20(r1)
+	  18: GETL       	R1, t16
+	  19: ADDL       	$0x14, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R29
+	  22: INCEIPL       	$4
+
+	0xFF31B50:  83C10018  lwz r30,24(r1)
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x18, t20
+	  25: LDL       	(t20), t22
+	  26: PUTL       	t22, R30
+	  27: INCEIPL       	$4
+
+	0xFF31B54:  7CC803A6  mtlr r6
+	  28: GETL       	R6, t24
+	  29: PUTL       	t24, LR
+	  30: INCEIPL       	$4
+
+	0xFF31B58:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x1C, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R31
+	  35: INCEIPL       	$4
+
+	0xFF31B5C:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x20, t30
+	  38: PUTL       	t30, R1
+	  39: INCEIPL       	$4
+
+	0xFF31B60:  4E800020  blr
+	  40: GETL       	LR, t32
+	  41: JMPo-r       	t32  ($4)
+
+
+
+. 1370 FF31B3C 40
+. 80 BD 00 00 80 7E 1E 24 90 A3 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1371 (0xFE7B6CC) approx BBs exec'd 0 ====
+
+	0xFE7B6CC:  4BFFFE95  bl 0xFE7B560
+	   0: MOVL       	$0xFE7B6D0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFE7B560  ($4)
+
+
+
+. 1371 FE7B6CC 4
+. 4B FF FE 95
+==== BB 1372 __libc_global_ctors(0xFE7B560) approx BBs exec'd 0 ====
+
+	0xFE7B560:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE7B564:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE7B568:  4812C8E9  bl 0xFFA7E50
+	   9: MOVL       	$0xFE7B56C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1372 FE7B560 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 E9
+==== BB 1373 (0xFE7B56C) approx BBs exec'd 0 ====
+
+	0xFE7B56C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE7B570:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE7B574:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE7B578:  90010014  stw r0,20(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE7B57C:  813E0018  lwz r9,24(r30)
+	  18: GETL       	R30, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R9
+	  22: INCEIPL       	$4
+
+	0xFE7B580:  80090004  lwz r0,4(r9)
+	  23: GETL       	R9, t18
+	  24: ADDL       	$0x4, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0xFE7B584:  3BE90004  addi r31,r9,4
+	  28: GETL       	R9, t22
+	  29: ADDL       	$0x4, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0xFE7B588:  2F800000  cmpi cr7,r0,0
+	  32: GETL       	R0, t24
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0xFE7B58C:  419E0018  bc 12,30,0xFE7B5A4
+	  36: Js30o       	$0xFE7B5A4
+
+
+
+. 1373 FE7B56C 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 18 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+==== BB 1374 (0xFE7B590) approx BBs exec'd 0 ====
+
+	0xFE7B590:  7C0903A6  mtctr r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFE7B594:  4E800421  bctrl
+	   3: MOVL       	$0xFE7B598, t2
+	   4: PUTL       	t2, LR
+	   5: GETL       	CTR, t4
+	   6: JMPo-c       	t4  ($4)
+
+
+
+. 1374 FE7B590 8
+. 7C 09 03 A6 4E 80 04 21
+==== BB 1375 _IO_check_libio(0xFECD850) approx BBs exec'd 0 ====
+
+	0xFECD850:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFECD854:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECD858:  480DA5F9  bl 0xFFA7E50
+	   9: MOVL       	$0xFECD85C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1375 FECD850 12
+. 7D 88 02 A6 94 21 FF F0 48 0D A5 F9
+==== BB 1376 (0xFECD85C) approx BBs exec'd 0 ====
+
+	0xFECD85C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECD860:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECD864:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFECD868:  801E1DC8  lwz r0,7624(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x1DC8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R0
+	  15: INCEIPL       	$4
+
+	0xFECD86C:  811E1B00  lwz r8,6912(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x1B00, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0xFECD870:  2F800000  cmpi cr7,r0,0
+	  21: GETL       	R0, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0xFECD874:  815E1BCC  lwz r10,7116(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x1BCC, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R10
+	  29: INCEIPL       	$4
+
+	0xFECD878:  80FE1AE8  lwz r7,6888(r30)
+	  30: GETL       	R30, t24
+	  31: ADDL       	$0x1AE8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R7
+	  34: INCEIPL       	$4
+
+	0xFECD87C:  3800FFB4  li r0,-76
+	  35: MOVL       	$0xFFFFFFB4, t28
+	  36: PUTL       	t28, R0
+	  37: INCEIPL       	$4
+
+	0xFECD880:  419E0010  bc 12,30,0xFECD890
+	  38: Js30o       	$0xFECD890
+
+
+
+. 1376 FECD85C 40
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 1E 1D C8 81 1E 1B 00 2F 80 00 00 81 5E 1B CC 80 FE 1A E8 38 00 FF B4 41 9E 00 10
+==== BB 1377 (0xFECD884) approx BBs exec'd 0 ====
+
+	0xFECD884:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0xFECD888:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFECD88C:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 1377 FECD884 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1378 (0xFE7B598) approx BBs exec'd 0 ====
+
+	0xFE7B598:  841F0004  lwzu r0,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R31
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFE7B59C:  2F800000  cmpi cr7,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFE7B5A0:  409EFFF0  bc 4,30,0xFE7B590
+	  10: Jc30o       	$0xFE7B590
+
+
+
+. 1378 FE7B598 12
+. 84 1F 00 04 2F 80 00 00 40 9E FF F0
+==== BB 1379 (0xFE7B5A4) approx BBs exec'd 0 ====
+
+	0xFE7B5A4:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE7B5A8:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFE7B5AC:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFE7B5B0:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFE7B5B4:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFE7B5B8:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1379 FE7B5A4 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1380 (0xFE7B6D0) approx BBs exec'd 0 ====
+
+	0xFE7B6D0:  80C10024  lwz r6,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFE7B6D4:  83810010  lwz r28,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFE7B6D8:  83A10014  lwz r29,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0xFE7B6DC:  7CC803A6  mtlr r6
+	  15: GETL       	R6, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFE7B6E0:  83C10018  lwz r30,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0xFE7B6E4:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0xFE7B6E8:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0xFE7B6EC:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 1380 FE7B6D0 32
+. 80 C1 00 24 83 81 00 10 83 A1 00 14 7C C8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1381 (0x2547B908) approx BBs exec'd 0 ====
+
+	0x2547B908:  4BFFFF88  b 0x2547B890
+	   0: JMPo       	$0x2547B890  ($4)
+
+
+
+. 1381 2547B908 4
+. 4B FF FF 88
+==== BB 1382 (0x2547B890) approx BBs exec'd 0 ====
+
+	0x2547B890:  817F0084  lwz r11,132(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547B894:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B898:  41A6FF8C  bc 13,6,0x2547B824
+	   9: Js06o       	$0x2547B824
+
+
+
+. 1382 2547B890 12
+. 81 7F 00 84 2C 8B 00 00 41 A6 FF 8C
+==== BB 1383 (0xFFB0D34) approx BBs exec'd 0 ====
+
+	0xFFB0D34:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFB0D38:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFB0D3C:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFB0D40:  4800001D  bl 0xFFB0D5C
+	  14: MOVL       	$0xFFB0D44, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFB0D5C  ($4)
+
+
+
+. 1383 FFB0D34 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+==== BB 1384 (0xFFB0D5C) approx BBs exec'd 0 ====
+
+	0xFFB0D5C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFB0D60:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFB0D64:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFB0D68:  90010014  stw r0,20(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFFB0D6C:  4801229D  bl 0xFFC3008
+	  19: MOVL       	$0xFFB0D70, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFC3008  ($4)
+
+
+
+. 1384 FFB0D5C 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 22 9D
+==== BB 1385 (0xFFC3008) approx BBs exec'd 0 ====
+
+	0xFFC3008:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0xFFC300C, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+
+. 1385 FFC3008 4
+. 4E 80 00 21
+==== BB 1386 (0xFFB0D70) approx BBs exec'd 0 ====
+
+	0xFFB0D70:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xFFB0D74:  801E008C  lwz r0,140(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x8C, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFFB0D78:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFFB0D7C:  419E000C  bc 12,30,0xFFB0D88
+	  12: Js30o       	$0xFFB0D88
+
+
+
+. 1386 FFB0D70 16
+. 7F C8 02 A6 80 1E 00 8C 2F 80 00 00 41 9E 00 0C
+==== BB 1387 (0xFFB0D88) approx BBs exec'd 0 ====
+
+	0xFFB0D88:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFB0D8C:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFB0D90:  83C10008  lwz r30,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFFB0D94:  38210010  addi r1,r1,16
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0xFFB0D98:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1387 FFB0D88 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1388 (0xFFB0D44) approx BBs exec'd 0 ====
+
+	0xFFB0D44:  48000115  bl 0xFFB0E58
+	   0: MOVL       	$0xFFB0D48, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFB0E58  ($4)
+
+
+
+. 1388 FFB0D44 4
+. 48 00 01 15
+==== BB 1389 (0xFFB0E58) approx BBs exec'd 0 ====
+
+	0xFFB0E58:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFB0E5C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFB0E60:  429F0005  bcl 20,31,0xFFB0E64
+	   9: MOVL       	$0xFFB0E64, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFB0E64:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFB0E68:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFB0E6C:  90010014  stw r0,20(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFB0E70:  801EFFF0  lwz r0,-16(r30)
+	  25: GETL       	R30, t18
+	  26: ADDL       	$0xFFFFFFF0, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0xFFB0E74:  7FC0F214  add r30,r0,r30
+	  30: GETL       	R0, t22
+	  31: GETL       	R30, t24
+	  32: ADDL       	t22, t24
+	  33: PUTL       	t24, R30
+	  34: INCEIPL       	$4
+
+	0xFFB0E78:  807E8010  lwz r3,-32752(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFF8010, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R3
+	  39: INCEIPL       	$4
+
+	0xFFB0E7C:  80030000  lwz r0,0(r3)
+	  40: GETL       	R3, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R0
+	  43: INCEIPL       	$4
+
+	0xFFB0E80:  2F800000  cmpi cr7,r0,0
+	  44: GETL       	R0, t34
+	  45: CMP0L       	t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x7, CR
+	  47: INCEIPL       	$4
+
+	0xFFB0E84:  419E0018  bc 12,30,0xFFB0E9C
+	  48: Js30o       	$0xFFB0E9C
+
+
+
+. 1389 FFB0E58 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+==== BB 1390 (0xFFB0E9C) approx BBs exec'd 0 ====
+
+	0xFFB0E9C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFB0EA0:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFFB0EA4:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFFB0EA8:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFFB0EAC:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1390 FFB0E9C 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 1391 (0xFFB0D48) approx BBs exec'd 0 ====
+
+	0xFFB0D48:  480010FD  bl 0xFFB1E44
+	   0: MOVL       	$0xFFB0D4C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFB1E44  ($4)
+
+
+
+. 1391 FFB0D48 4
+. 48 00 10 FD
+==== BB 1392 (0xFFB1E44) approx BBs exec'd 0 ====
+
+	0xFFB1E44:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFB1E48:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFB1E4C:  429F0005  bcl 20,31,0xFFB1E50
+	   9: MOVL       	$0xFFB1E50, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFB1E50:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFB1E54:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFB1E58:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFB1E5C:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFFB1E60:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFFB1E64:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xFFB1E68:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFFB1E6C:  8009FFFC  lwz r0,-4(r9)
+	  45: GETL       	R9, t34
+	  46: ADDL       	$0xFFFFFFFC, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R0
+	  49: INCEIPL       	$4
+
+	0xFFB1E70:  3BE9FFFC  addi r31,r9,-4
+	  50: GETL       	R9, t38
+	  51: ADDL       	$0xFFFFFFFC, t38
+	  52: PUTL       	t38, R31
+	  53: INCEIPL       	$4
+
+	0xFFB1E74:  48000010  b 0xFFB1E84
+	  54: JMPo       	$0xFFB1E84  ($4)
+
+
+
+. 1392 FFB1E44 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+==== BB 1393 (0xFFB1E84) approx BBs exec'd 0 ====
+
+	0xFFB1E84:  2F80FFFF  cmpi cr7,r0,-1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFFB1E88:  409EFFF0  bc 4,30,0xFFB1E78
+	   5: Jc30o       	$0xFFB1E78
+
+
+
+. 1393 FFB1E84 8
+. 2F 80 FF FF 40 9E FF F0
+==== BB 1394 (0xFFB1E8C) approx BBs exec'd 0 ====
+
+	0xFFB1E8C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFB1E90:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFFB1E94:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFFB1E98:  7C0803A6  mtlr r0
+	  15: GETL       	R0, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFFB1E9C:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFFB1EA0:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1394 FFB1E8C 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1395 (0xFFB0D4C) approx BBs exec'd 0 ====
+
+	0xFFB0D4C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFB0D50:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFB0D54:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFFB0D58:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 1395 FFB0D4C 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1396 (0xEE34224) approx BBs exec'd 0 ====
+
+	0xEE34224:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE34228:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE3422C:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE34230:  4800001D  bl 0xEE3424C
+	  14: MOVL       	$0xEE34234, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xEE3424C  ($4)
+
+
+
+. 1396 EE34224 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+==== BB 1397 (0xEE3424C) approx BBs exec'd 0 ====
+
+	0xEE3424C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE34250:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE34254:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE34258:  90010014  stw r0,20(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xEE3425C:  4801B075  bl 0xEE4F2D0
+	  19: MOVL       	$0xEE34260, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xEE4F2D0  ($4)
+
+
+
+. 1397 EE3424C 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 B0 75
+==== BB 1398 (0xEE4F2D0) approx BBs exec'd 0 ====
+
+	0xEE4F2D0:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0xEE4F2D4, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+
+. 1398 EE4F2D0 4
+. 4E 80 00 21
+==== BB 1399 (0xEE34260) approx BBs exec'd 0 ====
+
+	0xEE34260:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xEE34264:  801E0638  lwz r0,1592(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x638, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xEE34268:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xEE3426C:  419E000C  bc 12,30,0xEE34278
+	  12: Js30o       	$0xEE34278
+
+
+
+. 1399 EE34260 16
+. 7F C8 02 A6 80 1E 06 38 2F 80 00 00 41 9E 00 0C
+==== BB 1400 (0xEE34278) approx BBs exec'd 0 ====
+
+	0xEE34278:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE3427C:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE34280:  83C10008  lwz r30,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xEE34284:  38210010  addi r1,r1,16
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0xEE34288:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1400 EE34278 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1401 (0xEE34234) approx BBs exec'd 0 ====
+
+	0xEE34234:  48000115  bl 0xEE34348
+	   0: MOVL       	$0xEE34238, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xEE34348  ($4)
+
+
+
+. 1401 EE34234 4
+. 48 00 01 15
+==== BB 1402 (0xEE34348) approx BBs exec'd 0 ====
+
+	0xEE34348:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE3434C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE34350:  429F0005  bcl 20,31,0xEE34354
+	   9: MOVL       	$0xEE34354, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xEE34354:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE34358:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xEE3435C:  90010014  stw r0,20(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xEE34360:  801EFFF0  lwz r0,-16(r30)
+	  25: GETL       	R30, t18
+	  26: ADDL       	$0xFFFFFFF0, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0xEE34364:  7FC0F214  add r30,r0,r30
+	  30: GETL       	R0, t22
+	  31: GETL       	R30, t24
+	  32: ADDL       	t22, t24
+	  33: PUTL       	t24, R30
+	  34: INCEIPL       	$4
+
+	0xEE34368:  807E8010  lwz r3,-32752(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFF8010, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R3
+	  39: INCEIPL       	$4
+
+	0xEE3436C:  80030000  lwz r0,0(r3)
+	  40: GETL       	R3, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R0
+	  43: INCEIPL       	$4
+
+	0xEE34370:  2F800000  cmpi cr7,r0,0
+	  44: GETL       	R0, t34
+	  45: CMP0L       	t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x7, CR
+	  47: INCEIPL       	$4
+
+	0xEE34374:  419E0018  bc 12,30,0xEE3438C
+	  48: Js30o       	$0xEE3438C
+
+
+
+. 1402 EE34348 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+==== BB 1403 (0xEE3438C) approx BBs exec'd 0 ====
+
+	0xEE3438C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE34390:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xEE34394:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xEE34398:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xEE3439C:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1403 EE3438C 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 1404 (0xEE34238) approx BBs exec'd 0 ====
+
+	0xEE34238:  48008E71  bl 0xEE3D0A8
+	   0: MOVL       	$0xEE3423C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xEE3D0A8  ($4)
+
+
+
+. 1404 EE34238 4
+. 48 00 8E 71
+==== BB 1405 (0xEE3D0A8) approx BBs exec'd 0 ====
+
+	0xEE3D0A8:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE3D0AC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE3D0B0:  429F0005  bcl 20,31,0xEE3D0B4
+	   9: MOVL       	$0xEE3D0B4, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xEE3D0B4:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE3D0B8:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xEE3D0BC:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xEE3D0C0:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xEE3D0C4:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xEE3D0C8:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xEE3D0CC:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xEE3D0D0:  8009FFFC  lwz r0,-4(r9)
+	  45: GETL       	R9, t34
+	  46: ADDL       	$0xFFFFFFFC, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R0
+	  49: INCEIPL       	$4
+
+	0xEE3D0D4:  3BE9FFFC  addi r31,r9,-4
+	  50: GETL       	R9, t38
+	  51: ADDL       	$0xFFFFFFFC, t38
+	  52: PUTL       	t38, R31
+	  53: INCEIPL       	$4
+
+	0xEE3D0D8:  48000010  b 0xEE3D0E8
+	  54: JMPo       	$0xEE3D0E8  ($4)
+
+
+
+. 1405 EE3D0A8 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+==== BB 1406 (0xEE3D0E8) approx BBs exec'd 0 ====
+
+	0xEE3D0E8:  2F80FFFF  cmpi cr7,r0,-1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xEE3D0EC:  409EFFF0  bc 4,30,0xEE3D0DC
+	   5: Jc30o       	$0xEE3D0DC
+
+
+
+. 1406 EE3D0E8 8
+. 2F 80 FF FF 40 9E FF F0
+==== BB 1407 (0xEE3D0DC) approx BBs exec'd 0 ====
+
+	0xEE3D0DC:  7C0803A6  mtlr r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, LR
+	   2: INCEIPL       	$4
+
+	0xEE3D0E0:  4E800021  blrl
+	   3: GETL       	LR, t2
+	   4: MOVL       	$0xEE3D0E4, t4
+	   5: PUTL       	t4, LR
+	   6: JMPo-r       	t2  ($4)
+
+
+
+. 1407 EE3D0DC 8
+. 7C 08 03 A6 4E 80 00 21
+==== BB 1408 init_global_thread_specific_state(0xEE383D0) approx BBs exec'd 0 ====
+
+	0xEE383D0:  9421FF80  stwu r1,-128(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF80, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE383D4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE383D8:  93A10074  stw r29,116(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x74, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE383DC:  93C10078  stw r30,120(r1)
+	  14: GETL       	R30, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x78, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xEE383E0:  90010084  stw r0,132(r1)
+	  19: GETL       	R0, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x84, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xEE383E4:  48016EED  bl 0xEE4F2D0
+	  24: MOVL       	$0xEE383E8, t18
+	  25: PUTL       	t18, LR
+	  26: JMPo-c       	$0xEE4F2D0  ($4)
+
+
+
+. 1408 EE383D0 24
+. 94 21 FF 80 7C 08 02 A6 93 A1 00 74 93 C1 00 78 90 01 00 84 48 01 6E ED
+==== BB 1409 (0xEE383E8) approx BBs exec'd 0 ====
+
+	0xEE383E8:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xEE383EC:  813E00A8  lwz r9,168(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0xA8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xEE383F0:  80090000  lwz r0,0(r9)
+	   8: GETL       	R9, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R0
+	  11: INCEIPL       	$4
+
+	0xEE383F4:  2F800000  cmpi cr7,r0,0
+	  12: GETL       	R0, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xEE383F8:  40BE0138  bc 5,30,0xEE38530
+	  16: Jc30o       	$0xEE38530
+
+
+
+. 1409 EE383E8 20
+. 7F C8 02 A6 81 3E 00 A8 80 09 00 00 2F 80 00 00 40 BE 01 38
+==== BB 1410 (0xEE383FC) approx BBs exec'd 0 ====
+
+	0xEE383FC:  807E00AC  lwz r3,172(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xAC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xEE38400:  4801760D  bl 0xEE4FA0C
+	   5: MOVL       	$0xEE38404, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xEE4FA0C  ($4)
+
+
+
+. 1410 EE383FC 8
+. 80 7E 00 AC 48 01 76 0D
+==== BB 1411 (0xEE4FA0C) approx BBs exec'd 0 ====
+
+	0xEE4FA0C:  39600008  li r11,8
+	   0: MOVL       	$0x8, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xEE4FA10:  4BFFFFC4  b 0xEE4F9D4
+	   3: JMPo       	$0xEE4F9D4  ($4)
+
+
+
+. 1411 EE4FA0C 8
+. 39 60 00 08 4B FF FF C4
+==== BB 1412 (0xEE4F9D4) approx BBs exec'd 0 ====
+
+	0xEE4F9D4:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xEE4F9D8:  7D6C5A14  add r11,r12,r11
+	   4: GETL       	R12, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xEE4F9DC:  3980AF5C  li r12,-20644
+	   9: MOVL       	$0xFFFFAF5C, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xEE4F9E0:  3D8C2548  addis r12,r12,9544
+	  12: MOVL       	$0x2547AF5C, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0xEE4F9E4:  7D8903A6  mtctr r12
+	  15: GETL       	R12, t10
+	  16: PUTL       	t10, CTR
+	  17: INCEIPL       	$4
+
+	0xEE4F9E8:  39808880  li r12,-30592
+	  18: MOVL       	$0xFFFF8880, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0xEE4F9EC:  3D8C2547  addis r12,r12,9543
+	  21: MOVL       	$0x25468880, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0xEE4F9F0:  4E800420  bctr
+	  24: GETL       	CTR, t16
+	  25: JMPo       	t16  ($4)
+
+
+
+. 1412 EE4F9D4 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 88 80 3D 8C 25 47 4E 80 04 20
+==== BB 1413 _dl_runtime_resolve(0x2547AF5C) approx BBs exec'd 0 ====
+
+	0x2547AF5C:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547AF60:  9001000C  stw r0,12(r1)
+	   6: GETL       	R0, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xC, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x2547AF64:  90610010  stw r3,16(r1)
+	  11: GETL       	R3, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x10, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547AF68:  90810014  stw r4,20(r1)
+	  16: GETL       	R4, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x14, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547AF6C:  7D836378  or r3,r12,r12
+	  21: GETL       	R12, t16
+	  22: PUTL       	t16, R3
+	  23: INCEIPL       	$4
+
+	0x2547AF70:  90A10018  stw r5,24(r1)
+	  24: GETL       	R5, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x18, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x2547AF74:  7D645B78  or r4,r11,r11
+	  29: GETL       	R11, t22
+	  30: PUTL       	t22, R4
+	  31: INCEIPL       	$4
+
+	0x2547AF78:  90C1001C  stw r6,28(r1)
+	  32: GETL       	R6, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x1C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x2547AF7C:  7C0802A6  mflr r0
+	  37: GETL       	LR, t28
+	  38: PUTL       	t28, R0
+	  39: INCEIPL       	$4
+
+	0x2547AF80:  90E10020  stw r7,32(r1)
+	  40: GETL       	R7, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547AF84:  90010030  stw r0,48(r1)
+	  45: GETL       	R0, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x30, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x2547AF88:  91010024  stw r8,36(r1)
+	  50: GETL       	R8, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x24, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0x2547AF8C:  7C000026  mfcr r0
+	  55: GETL       	CR, t42
+	  56: PUTL       	t42, R0
+	  57: INCEIPL       	$4
+
+	0x2547AF90:  91210028  stw r9,40(r1)
+	  58: GETL       	R9, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x28, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547AF94:  9141002C  stw r10,44(r1)
+	  63: GETL       	R10, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x2C, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x2547AF98:  90010008  stw r0,8(r1)
+	  68: GETL       	R0, t52
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x8, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x2547AF9C:  480000C9  bl 0x2547B064
+	  73: MOVL       	$0x2547AFA0, t56
+	  74: PUTL       	t56, LR
+	  75: JMPo-c       	$0x2547B064  ($4)
+
+
+
+. 1413 2547AF5C 68
+. 94 21 FF C0 90 01 00 0C 90 61 00 10 90 81 00 14 7D 83 63 78 90 A1 00 18 7D 64 5B 78 90 C1 00 1C 7C 08 02 A6 90 E1 00 20 90 01 00 30 91 01 00 24 7C 00 00 26 91 21 00 28 91 41 00 2C 90 01 00 08 48 00 00 C9
+==== BB 1414 fixup(0x2547B064) approx BBs exec'd 0 ====
+
+	0x2547B064:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x2547B068:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547B06C:  93810010  stw r28,16(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547B070:  4801BF91  bl 0x25497000
+	  14: MOVL       	$0x2547B074, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 1414 2547B064 16
+. 7C E8 02 A6 94 21 FF E0 93 81 00 10 48 01 BF 91
+==== BB 1415 (0x2547B074) approx BBs exec'd 0 ====
+
+	0x2547B074:  93A10014  stw r29,20(r1)
+	   0: GETL       	R29, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B078:  90E10024  stw r7,36(r1)
+	   5: GETL       	R7, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x24, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547B07C:  80C3007C  lwz r6,124(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x7C, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0x2547B080:  93C10018  stw r30,24(r1)
+	  15: GETL       	R30, t12
+	  16: GETL       	R1, t14
+	  17: ADDL       	$0x18, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x2547B084:  7FC802A6  mflr r30
+	  20: GETL       	LR, t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0x2547B088:  93E1001C  stw r31,28(r1)
+	  23: GETL       	R31, t18
+	  24: GETL       	R1, t20
+	  25: ADDL       	$0x1C, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x2547B08C:  7C7F1B78  or r31,r3,r3
+	  28: GETL       	R3, t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0x2547B090:  81460004  lwz r10,4(r6)
+	  31: GETL       	R6, t24
+	  32: ADDL       	$0x4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R10
+	  35: INCEIPL       	$4
+
+	0x2547B094:  80A30038  lwz r5,56(r3)
+	  36: GETL       	R3, t28
+	  37: ADDL       	$0x38, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R5
+	  40: INCEIPL       	$4
+
+	0x2547B098:  7FAA2214  add r29,r10,r4
+	  41: GETL       	R10, t32
+	  42: GETL       	R4, t34
+	  43: ADDL       	t32, t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0x2547B09C:  7D6A202E  lwzx r11,r10,r4
+	  46: GETL       	R4, t36
+	  47: GETL       	R10, t38
+	  48: ADDL       	t38, t36
+	  49: LDL       	(t36), t40
+	  50: PUTL       	t40, R11
+	  51: INCEIPL       	$4
+
+	0x2547B0A0:  807D0004  lwz r3,4(r29)
+	  52: GETL       	R29, t42
+	  53: ADDL       	$0x4, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R3
+	  56: INCEIPL       	$4
+
+	0x2547B0A4:  81850004  lwz r12,4(r5)
+	  57: GETL       	R5, t46
+	  58: ADDL       	$0x4, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R12
+	  61: INCEIPL       	$4
+
+	0x2547B0A8:  5466C23E  rlwinm r6,r3,24,8,31
+	  62: GETL       	R3, t50
+	  63: SHRL       	$0x8, t50
+	  64: PUTL       	t50, R6
+	  65: INCEIPL       	$4
+
+	0x2547B0AC:  807F0000  lwz r3,0(r31)
+	  66: GETL       	R31, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R3
+	  69: INCEIPL       	$4
+
+	0x2547B0B0:  54C52036  rlwinm r5,r6,4,0,27
+	  70: GETL       	R6, t56
+	  71: SHLL       	$0x4, t56
+	  72: PUTL       	t56, R5
+	  73: INCEIPL       	$4
+
+	0x2547B0B4:  7D056214  add r8,r5,r12
+	  74: GETL       	R5, t58
+	  75: GETL       	R12, t60
+	  76: ADDL       	t58, t60
+	  77: PUTL       	t60, R8
+	  78: INCEIPL       	$4
+
+	0x2547B0B8:  7F835A14  add r28,r3,r11
+	  79: GETL       	R3, t62
+	  80: GETL       	R11, t64
+	  81: ADDL       	t62, t64
+	  82: PUTL       	t64, R28
+	  83: INCEIPL       	$4
+
+	0x2547B0BC:  8808000D  lbz r0,13(r8)
+	  84: GETL       	R8, t66
+	  85: ADDL       	$0xD, t66
+	  86: LDB       	(t66), t68
+	  87: PUTL       	t68, R0
+	  88: INCEIPL       	$4
+
+	0x2547B0C0:  70090003  andi. r9,r0,0x3
+	  89: GETL       	R0, t70
+	  90: ANDL       	$0x3, t70
+	  91: PUTL       	t70, R9
+	  92: CMP0L       	t70, t72  (-rSo)
+	  93: ICRFL       	t72, $0x0, CR
+	  94: INCEIPL       	$4
+
+	0x2547B0C4:  813F0034  lwz r9,52(r31)
+	  95: GETL       	R31, t74
+	  96: ADDL       	$0x34, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R9
+	  99: INCEIPL       	$4
+
+	0x2547B0C8:  80890004  lwz r4,4(r9)
+	 100: GETL       	R9, t78
+	 101: ADDL       	$0x4, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R4
+	 104: INCEIPL       	$4
+
+	0x2547B0CC:  91010008  stw r8,8(r1)
+	 105: GETL       	R8, t82
+	 106: GETL       	R1, t84
+	 107: ADDL       	$0x8, t84
+	 108: STL       	t82, (t84)
+	 109: INCEIPL       	$4
+
+	0x2547B0D0:  408200E0  bc 4,2,0x2547B1B0
+	 110: Jc02o       	$0x2547B1B0
+
+
+
+. 1415 2547B074 96
+. 93 A1 00 14 90 E1 00 24 80 C3 00 7C 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 7C 7F 1B 78 81 46 00 04 80 A3 00 38 7F AA 22 14 7D 6A 20 2E 80 7D 00 04 81 85 00 04 54 66 C2 3E 80 7F 00 00 54 C5 20 36 7D 05 62 14 7F 83 5A 14 88 08 00 0D 70 09 00 03 81 3F 00 34 80 89 00 04 91 01 00 08 40 82 00 E0
+==== BB 1416 (0x2547B0D4) approx BBs exec'd 0 ====
+
+	0x2547B0D4:  815F00E4  lwz r10,228(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xE4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547B0D8:  38E00000  li r7,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x2547B0DC:  2F8A0000  cmpi cr7,r10,0
+	   8: GETL       	R10, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2547B0E0:  409E009C  bc 4,30,0x2547B17C
+	  12: Jc30o       	$0x2547B17C
+
+
+
+. 1416 2547B0D4 16
+. 81 5F 00 E4 38 E0 00 00 2F 8A 00 00 40 9E 00 9C
+==== BB 1417 (0x2547B17C) approx BBs exec'd 0 ====
+
+	0x2547B17C:  816A0004  lwz r11,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547B180:  54C9083C  rlwinm r9,r6,1,0,30
+	   5: GETL       	R6, t4
+	   6: SHLL       	$0x1, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547B184:  80DF0188  lwz r6,392(r31)
+	   9: GETL       	R31, t6
+	  10: ADDL       	$0x188, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0x2547B188:  7D095A2E  lhzx r8,r9,r11
+	  14: GETL       	R11, t10
+	  15: GETL       	R9, t12
+	  16: ADDL       	t12, t10
+	  17: LDW       	(t10), t14
+	  18: PUTL       	t14, R8
+	  19: INCEIPL       	$4
+
+	0x2547B18C:  55072376  rlwinm r7,r8,4,13,27
+	  20: GETL       	R8, t16
+	  21: ROLL       	$0x4, t16
+	  22: ANDL       	$0x7FFF0, t16
+	  23: PUTL       	t16, R7
+	  24: INCEIPL       	$4
+
+	0x2547B190:  7D663A14  add r11,r6,r7
+	  25: GETL       	R6, t18
+	  26: GETL       	R7, t20
+	  27: ADDL       	t18, t20
+	  28: PUTL       	t20, R11
+	  29: INCEIPL       	$4
+
+	0x2547B194:  806B0004  lwz r3,4(r11)
+	  30: GETL       	R11, t22
+	  31: ADDL       	$0x4, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R3
+	  34: INCEIPL       	$4
+
+	0x2547B198:  7C6AFE70  srawi r10,r3,31
+	  35: GETL       	R3, t26
+	  36: SARL       	$0x1F, t26  (-wCa)
+	  37: PUTL       	t26, R10
+	  38: INCEIPL       	$4
+
+	0x2547B19C:  7D401A78  xor r0,r10,r3
+	  39: GETL       	R10, t28
+	  40: GETL       	R3, t30
+	  41: XORL       	t28, t30
+	  42: PUTL       	t30, R0
+	  43: INCEIPL       	$4
+
+	0x2547B1A0:  7D205050  subf r9,r0,r10
+	  44: GETL       	R0, t32
+	  45: GETL       	R10, t34
+	  46: SUBL       	t32, t34
+	  47: PUTL       	t34, R9
+	  48: INCEIPL       	$4
+
+	0x2547B1A4:  7D28FE70  srawi r8,r9,31
+	  49: GETL       	R9, t36
+	  50: SARL       	$0x1F, t36  (-wCa)
+	  51: PUTL       	t36, R8
+	  52: INCEIPL       	$4
+
+	0x2547B1A8:  7D674038  and r7,r11,r8
+	  53: GETL       	R11, t38
+	  54: GETL       	R8, t40
+	  55: ANDL       	t38, t40
+	  56: PUTL       	t40, R7
+	  57: INCEIPL       	$4
+
+	0x2547B1AC:  4BFFFF38  b 0x2547B0E4
+	  58: JMPo       	$0x2547B0E4  ($4)
+
+
+
+. 1417 2547B17C 52
+. 81 6A 00 04 54 C9 08 3C 80 DF 01 88 7D 09 5A 2E 55 07 23 76 7D 66 3A 14 80 6B 00 04 7C 6A FE 70 7D 40 1A 78 7D 20 50 50 7D 28 FE 70 7D 67 40 38 4B FF FF 38
+==== BB 1418 (0x2547B0E4) approx BBs exec'd 0 ====
+
+	0x2547B0E4:  7C05602E  lwzx r0,r5,r12
+	   0: GETL       	R12, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R0
+	   5: INCEIPL       	$4
+
+	0x2547B0E8:  39400000  li r10,0
+	   6: MOVL       	$0x0, t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x2547B0EC:  80DF01C0  lwz r6,448(r31)
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x1C0, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0x2547B0F0:  38A10008  addi r5,r1,8
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x8, t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x2547B0F4:  7C602214  add r3,r0,r4
+	  18: GETL       	R0, t14
+	  19: GETL       	R4, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0x2547B0F8:  39000001  li r8,1
+	  23: MOVL       	$0x1, t18
+	  24: PUTL       	t18, R8
+	  25: INCEIPL       	$4
+
+	0x2547B0FC:  7FE4FB78  or r4,r31,r31
+	  26: GETL       	R31, t20
+	  27: PUTL       	t20, R4
+	  28: INCEIPL       	$4
+
+	0x2547B100:  39200001  li r9,1
+	  29: MOVL       	$0x1, t22
+	  30: PUTL       	t22, R9
+	  31: INCEIPL       	$4
+
+	0x2547B104:  4BFFD4A1  bl 0x254785A4
+	  32: MOVL       	$0x2547B108, t24
+	  33: PUTL       	t24, LR
+	  34: JMPo-c       	$0x254785A4  ($4)
+
+
+
+. 1418 2547B0E4 36
+. 7C 05 60 2E 39 40 00 00 80 DF 01 C0 38 A1 00 08 7C 60 22 14 39 00 00 01 7F E4 FB 78 39 20 00 01 4B FF D4 A1
+==== BB 1419 (0x2547B108) approx BBs exec'd 0 ====
+
+	0x2547B108:  39400000  li r10,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x2547B10C:  81610008  lwz r11,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547B110:  2C8B0000  cmpi cr1,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x2547B114:  41860018  bc 12,6,0x2547B12C
+	  12: Js06o       	$0x2547B12C
+
+
+
+. 1419 2547B108 16
+. 39 40 00 00 81 61 00 08 2C 8B 00 00 41 86 00 18
+==== BB 1420 (0x2547B118) approx BBs exec'd 0 ====
+
+	0x2547B118:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547B11C:  419A00A0  bc 12,26,0x2547B1BC
+	   4: Js26o       	$0x2547B1BC
+
+
+
+. 1420 2547B118 8
+. 2F 03 00 00 41 9A 00 A0
+==== BB 1421 (0x2547B120) approx BBs exec'd 0 ====
+
+	0x2547B120:  81830000  lwz r12,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R12
+	   3: INCEIPL       	$4
+
+	0x2547B124:  808B0004  lwz r4,4(r11)
+	   4: GETL       	R11, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x2547B128:  7D4C2214  add r10,r12,r4
+	   9: GETL       	R12, t8
+	  10: GETL       	R4, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0x2547B12C:  80FE04F4  lwz r7,1268(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x4F4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R7
+	  18: INCEIPL       	$4
+
+	0x2547B130:  80DD0008  lwz r6,8(r29)
+	  19: GETL       	R29, t16
+	  20: ADDL       	$0x8, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R6
+	  23: INCEIPL       	$4
+
+	0x2547B134:  8067002C  lwz r3,44(r7)
+	  24: GETL       	R7, t20
+	  25: ADDL       	$0x2C, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R3
+	  28: INCEIPL       	$4
+
+	0x2547B138:  7D665214  add r11,r6,r10
+	  29: GETL       	R6, t24
+	  30: GETL       	R10, t26
+	  31: ADDL       	t24, t26
+	  32: PUTL       	t26, R11
+	  33: INCEIPL       	$4
+
+	0x2547B13C:  2C030000  cmpi cr0,r3,0
+	  34: GETL       	R3, t28
+	  35: CMP0L       	t28, t30  (-rSo)
+	  36: ICRFL       	t30, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0x2547B140:  7D635B78  or r3,r11,r11
+	  38: GETL       	R11, t32
+	  39: PUTL       	t32, R3
+	  40: INCEIPL       	$4
+
+	0x2547B144:  40820018  bc 4,2,0x2547B15C
+	  41: Jc02o       	$0x2547B15C
+
+
+
+. 1421 2547B120 40
+. 81 83 00 00 80 8B 00 04 7D 4C 22 14 80 FE 04 F4 80 DD 00 08 80 67 00 2C 7D 66 52 14 2C 03 00 00 7D 63 5B 78 40 82 00 18
+==== BB 1422 (0x2547B148) approx BBs exec'd 0 ====
+
+	0x2547B148:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547B14C:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x2547B150:  7F85E378  or r5,r28,r28
+	   6: GETL       	R28, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547B154:  7D665B78  or r6,r11,r11
+	   9: GETL       	R11, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x2547B158:  4800540D  bl 0x25480564
+	  12: MOVL       	$0x2547B15C, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25480564  ($4)
+
+
+
+. 1422 2547B148 20
+. 7F E3 FB 78 7F A4 EB 78 7F 85 E3 78 7D 66 5B 78 48 00 54 0D
+==== BB 1423 __elf_machine_fixup_plt(0x25480564) approx BBs exec'd 0 ====
+
+	0x25480564:  7D453050  subf r10,r5,r6
+	   0: GETL       	R5, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25480568:  3D06FE00  addis r8,r6,-512
+	   5: GETL       	R6, t4
+	   6: ADDL       	$0xFE000000, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2548056C:  554C3032  rlwinm r12,r10,6,0,25
+	   9: GETL       	R10, t6
+	  10: SHLL       	$0x6, t6
+	  11: PUTL       	t6, R12
+	  12: INCEIPL       	$4
+
+	0x25480570:  3CE0FC00  lis r7,-1024
+	  13: MOVL       	$0xFC000000, t8
+	  14: PUTL       	t8, R7
+	  15: INCEIPL       	$4
+
+	0x25480574:  7D843670  srawi r4,r12,6
+	  16: GETL       	R12, t10
+	  17: SARL       	$0x6, t10  (-wCa)
+	  18: PUTL       	t10, R4
+	  19: INCEIPL       	$4
+
+	0x25480578:  39280003  addi r9,r8,3
+	  20: GETL       	R8, t12
+	  21: ADDL       	$0x3, t12
+	  22: PUTL       	t12, R9
+	  23: INCEIPL       	$4
+
+	0x2548057C:  7F845000  cmp cr7,r4,r10
+	  24: GETL       	R4, t14
+	  25: GETL       	R10, t16
+	  26: CMPL       	t14, t16, t18  (-rSo)
+	  27: ICRFL       	t18, $0x7, CR
+	  28: INCEIPL       	$4
+
+	0x25480580:  60E00002  ori r0,r7,0x2
+	  29: MOVL       	$0xFC000002, t20
+	  30: PUTL       	t20, R0
+	  31: INCEIPL       	$4
+
+	0x25480584:  548B01BA  rlwinm r11,r4,0,6,29
+	  32: GETL       	R4, t22
+	  33: ANDL       	$0x3FFFFFC, t22
+	  34: PUTL       	t22, R11
+	  35: INCEIPL       	$4
+
+	0x25480588:  9421FFF0  stwu r1,-16(r1)
+	  36: GETL       	R1, t24
+	  37: GETL       	R1, t26
+	  38: ADDL       	$0xFFFFFFF0, t26
+	  39: PUTL       	t26, R1
+	  40: STL       	t24, (t26)
+	  41: INCEIPL       	$4
+
+	0x2548058C:  7F090040  cmpl cr6,r9,r0
+	  42: GETL       	R9, t28
+	  43: GETL       	R0, t30
+	  44: CMPUL       	t28, t30, t32  (-rSo)
+	  45: ICRFL       	t32, $0x6, CR
+	  46: INCEIPL       	$4
+
+	0x25480590:  656B4800  oris r11,r11,0x4800
+	  47: GETL       	R11, t34
+	  48: ORL       	$0x48000000, t34
+	  49: PUTL       	t34, R11
+	  50: INCEIPL       	$4
+
+	0x25480594:  419E00EC  bc 12,30,0x25480680
+	  51: Js30o       	$0x25480680
+
+
+
+. 1423 25480564 52
+. 7D 45 30 50 3D 06 FE 00 55 4C 30 32 3C E0 FC 00 7D 84 36 70 39 28 00 03 7F 84 50 00 60 E0 00 02 54 8B 01 BA 94 21 FF F0 7F 09 00 40 65 6B 48 00 41 9E 00 EC
+==== BB 1424 (0x25480680) approx BBs exec'd 0 ====
+
+	0x25480680:  91650000  stw r11,0(r5)
+	   0: GETL       	R11, t0
+	   1: GETL       	R5, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25480684:  7C00286C  dcbst r0,r5
+	   4: INCEIPL       	$4
+
+	0x25480688:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0x2548068C:  7C002FAC  icbi r0,r5
+	   6: GETL       	R5, t4
+	   7: CALLM_So       	
+	   8: PUSHL       	t4
+	   9: CALLMo       	$0x68
+	  10: CALLM_Eo       	
+	  11: INCEIPL       	$4
+
+	0x25480690:  7C0004AC  sync
+	  12: INCEIPL       	$4
+
+	0x25480694:  4C00012C  	  13: INCEIPL       	$4
+
+	0x25480698:  7CC33378  or r3,r6,r6
+	  14: GETL       	R6, t6
+	  15: PUTL       	t6, R3
+	  16: INCEIPL       	$4
+
+	0x2548069C:  38210010  addi r1,r1,16
+	  17: GETL       	R1, t8
+	  18: ADDL       	$0x10, t8
+	  19: PUTL       	t8, R1
+	  20: INCEIPL       	$4
+
+	0x254806A0:  4E800020  blr
+	  21: GETL       	LR, t10
+	  22: JMPo-r       	t10  ($4)
+
+
+
+. 1424 25480680 36
+. 91 65 00 00 7C 00 28 6C 7C 00 04 AC 7C 00 2F AC 7C 00 04 AC 4C 00 01 2C 7C C3 33 78 38 21 00 10 4E 80 00 20
+==== BB 1425 (0x2547B15C) approx BBs exec'd 0 ====
+
+	0x2547B15C:  83810024  lwz r28,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547B160:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547B164:  7F8803A6  mtlr r28
+	  10: GETL       	R28, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x2547B168:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x2547B16C:  83810010  lwz r28,16(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R28
+	  22: INCEIPL       	$4
+
+	0x2547B170:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0x2547B174:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0x2547B178:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 1425 2547B15C 32
+. 83 81 00 24 83 A1 00 14 7F 88 03 A6 83 C1 00 18 83 81 00 10 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1426 (0x2547AFA0) approx BBs exec'd 0 ====
+
+	0x2547AFA0:  7C6903A6  mtctr r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x2547AFA4:  80010030  lwz r0,48(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x30, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547AFA8:  8141002C  lwz r10,44(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x2C, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x2547AFAC:  81210028  lwz r9,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x2547AFB0:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547AFB4:  81010024  lwz r8,36(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x24, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R8
+	  25: INCEIPL       	$4
+
+	0x2547AFB8:  80010008  lwz r0,8(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x8, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R0
+	  30: INCEIPL       	$4
+
+	0x2547AFBC:  80E10020  lwz r7,32(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R7
+	  35: INCEIPL       	$4
+
+	0x2547AFC0:  80C1001C  lwz r6,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R6
+	  40: INCEIPL       	$4
+
+	0x2547AFC4:  7C0FF120  mtcr r0
+	  41: GETL       	R0, t32
+	  42: PUTL       	t32, CR
+	  43: INCEIPL       	$4
+
+	0x2547AFC8:  80A10018  lwz r5,24(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x18, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R5
+	  48: INCEIPL       	$4
+
+	0x2547AFCC:  80810014  lwz r4,20(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x14, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R4
+	  53: INCEIPL       	$4
+
+	0x2547AFD0:  80610010  lwz r3,16(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x10, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R3
+	  58: INCEIPL       	$4
+
+	0x2547AFD4:  8001000C  lwz r0,12(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0xC, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R0
+	  63: INCEIPL       	$4
+
+	0x2547AFD8:  38210040  addi r1,r1,64
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x40, t50
+	  66: PUTL       	t50, R1
+	  67: INCEIPL       	$4
+
+	0x2547AFDC:  4E800420  bctr
+	  68: GETL       	CTR, t52
+	  69: JMPo       	t52  ($4)
+
+
+
+. 1426 2547AFA0 64
+. 7C 69 03 A6 80 01 00 30 81 41 00 2C 81 21 00 28 7C 08 03 A6 81 01 00 24 80 01 00 08 80 E1 00 20 80 C1 00 1C 7C 0F F1 20 80 A1 00 18 80 81 00 14 80 61 00 10 80 01 00 0C 38 21 00 40 4E 80 04 20
+==== BB 1427 pthread_mutex_lock(0xEE367D4) approx BBs exec'd 0 ====
+
+	0xEE367D4:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE367D8:  39200000  li r9,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xEE367DC:  3800300C  li r0,12300
+	   9: MOVL       	$0x300C, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0xEE367E0:  90010010  stw r0,16(r1)
+	  12: GETL       	R0, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE367E4:  90610014  stw r3,20(r1)
+	  17: GETL       	R3, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xEE367E8:  91210018  stw r9,24(r1)
+	  22: GETL       	R9, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xEE367EC:  9121001C  stw r9,28(r1)
+	  27: GETL       	R9, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xEE367F0:  91210020  stw r9,32(r1)
+	  32: GETL       	R9, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xEE367F4:  38610010  addi r3,r1,16
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x10, t28
+	  39: PUTL       	t28, R3
+	  40: INCEIPL       	$4
+
+	0xEE367F8:  7C03D808  client_request
+	  41: JMPo-cli       	$0xEE367FC  ($4)
+
+
+
+. 1427 EE367D4 40
+. 94 21 FF C0 39 20 00 00 38 00 30 0C 90 01 00 10 90 61 00 14 91 21 00 18 91 21 00 1C 91 21 00 20 38 61 00 10 7C 03 D8 08
+==== BB 1428 (0xEE367FC) approx BBs exec'd 0 ====
+
+	0xEE367FC:  38210040  addi r1,r1,64
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x40, t0
+	   2: PUTL       	t0, R1
+	   3: INCEIPL       	$4
+
+	0xEE36800:  4E800020  blr
+	   4: GETL       	LR, t2
+	   5: JMPo-r       	t2  ($4)
+
+
+
+. 1428 EE367FC 8
+. 38 21 00 40 4E 80 00 20
+==== BB 1429 (0xEE38404) approx BBs exec'd 0 ====
+
+	0xEE38404:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xEE38408:  41BE000C  bc 13,30,0xEE38414
+	   4: Js30o       	$0xEE38414
+
+
+
+. 1429 EE38404 8
+. 2F 83 00 00 41 BE 00 0C
+==== BB 1430 (0xEE38414) approx BBs exec'd 0 ====
+
+	0xEE38414:  813E00A8  lwz r9,168(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xEE38418:  80090000  lwz r0,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0xEE3841C:  2F800000  cmpi cr7,r0,0
+	   9: GETL       	R0, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xEE38420:  41BE001C  bc 13,30,0xEE3843C
+	  13: Js30o       	$0xEE3843C
+
+
+
+. 1430 EE38414 16
+. 81 3E 00 A8 80 09 00 00 2F 80 00 00 41 BE 00 1C
+==== BB 1431 (0xEE3843C) approx BBs exec'd 0 ====
+
+	0xEE3843C:  38001001  li r0,4097
+	   0: MOVL       	$0x1001, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xEE38440:  90010010  stw r0,16(r1)
+	   3: GETL       	R0, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xEE38444:  38000000  li r0,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0xEE38448:  90010014  stw r0,20(r1)
+	  11: GETL       	R0, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xEE3844C:  90010018  stw r0,24(r1)
+	  16: GETL       	R0, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x18, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xEE38450:  9001001C  stw r0,28(r1)
+	  21: GETL       	R0, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xEE38454:  90010020  stw r0,32(r1)
+	  26: GETL       	R0, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x20, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xEE38458:  38610010  addi r3,r1,16
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x10, t24
+	  33: PUTL       	t24, R3
+	  34: INCEIPL       	$4
+
+	0xEE3845C:  7C03D808  client_request
+	  35: JMPo-cli       	$0xEE38460  ($4)
+
+
+
+. 1431 EE3843C 36
+. 38 00 10 01 90 01 00 10 38 00 00 00 90 01 00 14 90 01 00 18 90 01 00 1C 90 01 00 20 38 61 00 10 7C 03 D8 08
+==== BB 1432 (0xEE38460) approx BBs exec'd 0 ====
+
+	0xEE38460:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xEE38464:  40BE0040  bc 5,30,0xEE384A4
+	   4: Jc30o       	$0xEE384A4
+
+
+
+. 1432 EE38460 8
+. 2F 83 00 00 40 BE 00 40
+==== BB 1433 (0xEE384A4) approx BBs exec'd 0 ====
+
+	0xEE384A4:  3800300B  li r0,12299
+	   0: MOVL       	$0x300B, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xEE384A8:  90010010  stw r0,16(r1)
+	   3: GETL       	R0, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xEE384AC:  38000000  li r0,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0xEE384B0:  90010014  stw r0,20(r1)
+	  11: GETL       	R0, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xEE384B4:  90010018  stw r0,24(r1)
+	  16: GETL       	R0, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x18, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xEE384B8:  9001001C  stw r0,28(r1)
+	  21: GETL       	R0, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xEE384BC:  90010020  stw r0,32(r1)
+	  26: GETL       	R0, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x20, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xEE384C0:  38610010  addi r3,r1,16
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x10, t24
+	  33: PUTL       	t24, R3
+	  34: INCEIPL       	$4
+
+	0xEE384C4:  7C03D808  client_request
+	  35: JMPo-cli       	$0xEE384C8  ($4)
+
+
+
+. 1433 EE384A4 36
+. 38 00 30 0B 90 01 00 10 38 00 00 00 90 01 00 14 90 01 00 18 90 01 00 1C 90 01 00 20 38 61 00 10 7C 03 D8 08
+==== BB 1434 (0xEE384C8) approx BBs exec'd 0 ====
+
+	0xEE384C8:  3803FFFF  addi r0,r3,-1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xEE384CC:  2B800062  cmpli cr7,r0,98
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x62, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xEE384D0:  40BD000C  bc 5,29,0xEE384DC
+	   9: Jc29o       	$0xEE384DC
+
+
+
+. 1434 EE384C8 12
+. 38 03 FF FF 2B 80 00 62 40 BD 00 0C
+==== BB 1435 (0xEE384DC) approx BBs exec'd 0 ====
+
+	0xEE384DC:  2F830001  cmpi cr7,r3,1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xEE384E0:  419E0018  bc 12,30,0xEE384F8
+	   5: Js30o       	$0xEE384F8
+
+
+
+. 1435 EE384DC 8
+. 2F 83 00 01 41 9E 00 18
+==== BB 1436 (0xEE384F8) approx BBs exec'd 0 ====
+
+	0xEE384F8:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xEE384FC:  813E00A8  lwz r9,168(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0xA8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xEE38500:  90090000  stw r0,0(r9)
+	   8: GETL       	R0, t6
+	   9: GETL       	R9, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xEE38504:  48000045  bl 0xEE38548
+	  12: MOVL       	$0xEE38508, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0xEE38548  ($4)
+
+
+
+. 1436 EE384F8 16
+. 38 00 00 01 81 3E 00 A8 90 09 00 00 48 00 00 45
+==== BB 1437 init_thread_specific_state(0xEE38548) approx BBs exec'd 0 ====
+
+	0xEE38548:  9421FF80  stwu r1,-128(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF80, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE3854C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE38550:  93A10074  stw r29,116(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x74, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE38554:  93C10078  stw r30,120(r1)
+	  14: GETL       	R30, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x78, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xEE38558:  90010084  stw r0,132(r1)
+	  19: GETL       	R0, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x84, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xEE3855C:  48016D75  bl 0xEE4F2D0
+	  24: MOVL       	$0xEE38560, t18
+	  25: PUTL       	t18, LR
+	  26: JMPo-c       	$0xEE4F2D0  ($4)
+
+
+
+. 1437 EE38548 24
+. 94 21 FF 80 7C 08 02 A6 93 A1 00 74 93 C1 00 78 90 01 00 84 48 01 6D 75
+==== BB 1438 (0xEE38560) approx BBs exec'd 0 ====
+
+	0xEE38560:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xEE38564:  38001001  li r0,4097
+	   3: MOVL       	$0x1001, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xEE38568:  90010010  stw r0,16(r1)
+	   6: GETL       	R0, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x10, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xEE3856C:  38000000  li r0,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0xEE38570:  90010014  stw r0,20(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xEE38574:  90010018  stw r0,24(r1)
+	  19: GETL       	R0, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x18, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xEE38578:  9001001C  stw r0,28(r1)
+	  24: GETL       	R0, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x1C, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xEE3857C:  90010020  stw r0,32(r1)
+	  29: GETL       	R0, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x20, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xEE38580:  38610010  addi r3,r1,16
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x10, t26
+	  36: PUTL       	t26, R3
+	  37: INCEIPL       	$4
+
+	0xEE38584:  7C03D808  client_request
+	  38: JMPo-cli       	$0xEE38588  ($4)
+
+
+
+. 1438 EE38560 40
+. 7F C8 02 A6 38 00 10 01 90 01 00 10 38 00 00 00 90 01 00 14 90 01 00 18 90 01 00 1C 90 01 00 20 38 61 00 10 7C 03 D8 08
+==== BB 1439 (0xEE38588) approx BBs exec'd 0 ====
+
+	0xEE38588:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xEE3858C:  40BE0040  bc 5,30,0xEE385CC
+	   4: Jc30o       	$0xEE385CC
+
+
+
+. 1439 EE38588 8
+. 2F 83 00 00 40 BE 00 40
+==== BB 1440 (0xEE385CC) approx BBs exec'd 0 ====
+
+	0xEE385CC:  3800300B  li r0,12299
+	   0: MOVL       	$0x300B, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xEE385D0:  90010010  stw r0,16(r1)
+	   3: GETL       	R0, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xEE385D4:  38000000  li r0,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0xEE385D8:  90010014  stw r0,20(r1)
+	  11: GETL       	R0, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xEE385DC:  90010018  stw r0,24(r1)
+	  16: GETL       	R0, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x18, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xEE385E0:  9001001C  stw r0,28(r1)
+	  21: GETL       	R0, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xEE385E4:  90010020  stw r0,32(r1)
+	  26: GETL       	R0, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x20, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xEE385E8:  38610010  addi r3,r1,16
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x10, t24
+	  33: PUTL       	t24, R3
+	  34: INCEIPL       	$4
+
+	0xEE385EC:  7C03D808  client_request
+	  35: JMPo-cli       	$0xEE385F0  ($4)
+
+
+
+. 1440 EE385CC 36
+. 38 00 30 0B 90 01 00 10 38 00 00 00 90 01 00 14 90 01 00 18 90 01 00 1C 90 01 00 20 38 61 00 10 7C 03 D8 08
+==== BB 1441 (0xEE385F0) approx BBs exec'd 0 ====
+
+	0xEE385F0:  3803FFFF  addi r0,r3,-1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xEE385F4:  2B800062  cmpli cr7,r0,98
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x62, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xEE385F8:  40BD000C  bc 5,29,0xEE38604
+	   9: Jc29o       	$0xEE38604
+
+
+
+. 1441 EE385F0 12
+. 38 03 FF FF 2B 80 00 62 40 BD 00 0C
+==== BB 1442 (0xEE38604) approx BBs exec'd 0 ====
+
+	0xEE38604:  815E00B8  lwz r10,184(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xB8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xEE38608:  1D630234  mulli r11,r3,564
+	   5: GETL       	R3, t4
+	   6: MULL       	$0x234, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xEE3860C:  7D2B5214  add r9,r11,r10
+	   9: GETL       	R11, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xEE38610:  38000000  li r0,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xEE38614:  7C0B512E  stwx r0,r11,r10
+	  17: GETL       	R10, t12
+	  18: GETL       	R11, t14
+	  19: ADDL       	t14, t12
+	  20: GETL       	R0, t16
+	  21: STL       	t16, (t12)
+	  22: INCEIPL       	$4
+
+	0xEE38618:  90090004  stw r0,4(r9)
+	  23: GETL       	R0, t18
+	  24: GETL       	R9, t20
+	  25: ADDL       	$0x4, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0xEE3861C:  90090008  stw r0,8(r9)
+	  28: GETL       	R0, t22
+	  29: GETL       	R9, t24
+	  30: ADDL       	$0x8, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0xEE38620:  9009000C  stw r0,12(r9)
+	  33: GETL       	R0, t26
+	  34: GETL       	R9, t28
+	  35: ADDL       	$0xC, t28
+	  36: STL       	t26, (t28)
+	  37: INCEIPL       	$4
+
+	0xEE38624:  39600000  li r11,0
+	  38: MOVL       	$0x0, t30
+	  39: PUTL       	t30, R11
+	  40: INCEIPL       	$4
+
+	0xEE38628:  7D475378  or r7,r10,r10
+	  41: GETL       	R10, t32
+	  42: PUTL       	t32, R7
+	  43: INCEIPL       	$4
+
+	0xEE3862C:  1C03008D  mulli r0,r3,141
+	  44: GETL       	R3, t34
+	  45: MULL       	$0x8D, t34
+	  46: PUTL       	t34, R0
+	  47: INCEIPL       	$4
+
+	0xEE38630:  39400000  li r10,0
+	  48: MOVL       	$0x0, t36
+	  49: PUTL       	t36, R10
+	  50: INCEIPL       	$4
+
+	0xEE38634:  7D205A14  add r9,r0,r11
+	  51: GETL       	R0, t38
+	  52: GETL       	R11, t40
+	  53: ADDL       	t38, t40
+	  54: PUTL       	t40, R9
+	  55: INCEIPL       	$4
+
+	0xEE38638:  5529103A  rlwinm r9,r9,2,0,29
+	  56: GETL       	R9, t42
+	  57: SHLL       	$0x2, t42
+	  58: PUTL       	t42, R9
+	  59: INCEIPL       	$4
+
+	0xEE3863C:  7D293A14  add r9,r9,r7
+	  60: GETL       	R9, t44
+	  61: GETL       	R7, t46
+	  62: ADDL       	t44, t46
+	  63: PUTL       	t46, R9
+	  64: INCEIPL       	$4
+
+	0xEE38640:  91490218  stw r10,536(r9)
+	  65: GETL       	R10, t48
+	  66: GETL       	R9, t50
+	  67: ADDL       	$0x218, t50
+	  68: STL       	t48, (t50)
+	  69: INCEIPL       	$4
+
+	0xEE38644:  396B0001  addi r11,r11,1
+	  70: MOVL       	$0x1, t52
+	  71: PUTL       	t52, R11
+	  72: INCEIPL       	$4
+
+	0xEE38648:  2F8B0006  cmpi cr7,r11,6
+	  73: GETL       	R11, t54
+	  74: MOVL       	$0x6, t58
+	  75: CMPL       	t54, t58, t56  (-rSo)
+	  76: ICRFL       	t56, $0x7, CR
+	  77: INCEIPL       	$4
+
+	0xEE3864C:  409DFFE8  bc 4,29,0xEE38634
+	  78: Jc29o       	$0xEE38634
+
+
+
+. 1442 EE38604 76
+. 81 5E 00 B8 1D 63 02 34 7D 2B 52 14 38 00 00 00 7C 0B 51 2E 90 09 00 04 90 09 00 08 90 09 00 0C 39 60 00 00 7D 47 53 78 1C 03 00 8D 39 40 00 00 7D 20 5A 14 55 29 10 3A 7D 29 3A 14 91 49 02 18 39 6B 00 01 2F 8B 00 06 40 9D FF E8
+==== BB 1443 (0xEE38634) approx BBs exec'd 0 ====
+
+	0xEE38634:  7D205A14  add r9,r0,r11
+	   0: GETL       	R0, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xEE38638:  5529103A  rlwinm r9,r9,2,0,29
+	   5: GETL       	R9, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xEE3863C:  7D293A14  add r9,r9,r7
+	   9: GETL       	R9, t6
+	  10: GETL       	R7, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xEE38640:  91490218  stw r10,536(r9)
+	  14: GETL       	R10, t10
+	  15: GETL       	R9, t12
+	  16: ADDL       	$0x218, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xEE38644:  396B0001  addi r11,r11,1
+	  19: GETL       	R11, t14
+	  20: ADDL       	$0x1, t14
+	  21: PUTL       	t14, R11
+	  22: INCEIPL       	$4
+
+	0xEE38648:  2F8B0006  cmpi cr7,r11,6
+	  23: GETL       	R11, t16
+	  24: MOVL       	$0x6, t20
+	  25: CMPL       	t16, t20, t18  (-rSo)
+	  26: ICRFL       	t18, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0xEE3864C:  409DFFE8  bc 4,29,0xEE38634
+	  28: Jc29o       	$0xEE38634
+
+
+
+. 1443 EE38634 28
+. 7D 20 5A 14 55 29 10 3A 7D 29 3A 14 91 49 02 18 39 6B 00 01 2F 8B 00 06 40 9D FF E8
+==== BB 1444 (0xEE38650) approx BBs exec'd 0 ====
+
+	0xEE38650:  3860FFFF  li r3,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xEE38654:  48017459  bl 0xEE4FAAC
+	   3: MOVL       	$0xEE38658, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xEE4FAAC  ($4)
+
+
+
+. 1444 EE38650 8
+. 38 60 FF FF 48 01 74 59
+==== BB 1445 (0xEE4FAAC) approx BBs exec'd 0 ====
+
+	0xEE4FAAC:  39600058  li r11,88
+	   0: MOVL       	$0x58, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xEE4FAB0:  4BFFFF24  b 0xEE4F9D4
+	   3: JMPo       	$0xEE4F9D4  ($4)
+
+
+
+. 1445 EE4FAAC 8
+. 39 60 00 58 4B FF FF 24
+==== BB 1446 uselocale(0xFE8919C) approx BBs exec'd 0 ====
+
+	0xFE8919C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE891A0:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFE891A4:  4811ECAD  bl 0xFFA7E50
+	   9: MOVL       	$0xFE891A8, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1446 FE8919C 12
+. 94 21 FF F0 7D 88 02 A6 48 11 EC AD
+==== BB 1447 (0xFE891A8) approx BBs exec'd 0 ====
+
+	0xFE891A8:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE891AC:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE891B0:  2C030000  cmpi cr0,r3,0
+	   8: GETL       	R3, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFE891B4:  7D8803A6  mtlr r12
+	  12: GETL       	R12, t10
+	  13: PUTL       	t10, LR
+	  14: INCEIPL       	$4
+
+	0xFE891B8:  2F83FFFF  cmpi cr7,r3,-1
+	  15: GETL       	R3, t12
+	  16: MOVL       	$0xFFFFFFFF, t16
+	  17: CMPL       	t12, t16, t14  (-rSo)
+	  18: ICRFL       	t14, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0xFE891BC:  80BE1D50  lwz r5,7504(r30)
+	  20: GETL       	R30, t18
+	  21: ADDL       	$0x1D50, t18
+	  22: LDL       	(t18), t20
+	  23: PUTL       	t20, R5
+	  24: INCEIPL       	$4
+
+	0xFE891C0:  809E1CF8  lwz r4,7416(r30)
+	  25: GETL       	R30, t22
+	  26: ADDL       	$0x1CF8, t22
+	  27: LDL       	(t22), t24
+	  28: PUTL       	t24, R4
+	  29: INCEIPL       	$4
+
+	0xFE891C4:  7D051214  add r8,r5,r2
+	  30: GETL       	R5, t26
+	  31: GETL       	R2, t28
+	  32: ADDL       	t26, t28
+	  33: PUTL       	t28, R8
+	  34: INCEIPL       	$4
+
+	0xFE891C8:  817E1B80  lwz r11,7040(r30)
+	  35: GETL       	R30, t30
+	  36: ADDL       	$0x1B80, t30
+	  37: LDL       	(t30), t32
+	  38: PUTL       	t32, R11
+	  39: INCEIPL       	$4
+
+	0xFE891CC:  80080000  lwz r0,0(r8)
+	  40: GETL       	R8, t34
+	  41: LDL       	(t34), t36
+	  42: PUTL       	t36, R0
+	  43: INCEIPL       	$4
+
+	0xFE891D0:  813E1D8C  lwz r9,7564(r30)
+	  44: GETL       	R30, t38
+	  45: ADDL       	$0x1D8C, t38
+	  46: LDL       	(t38), t40
+	  47: PUTL       	t40, R9
+	  48: INCEIPL       	$4
+
+	0xFE891D4:  7CE41214  add r7,r4,r2
+	  49: GETL       	R4, t42
+	  50: GETL       	R2, t44
+	  51: ADDL       	t42, t44
+	  52: PUTL       	t44, R7
+	  53: INCEIPL       	$4
+
+	0xFE891D8:  7CCB1214  add r6,r11,r2
+	  54: GETL       	R11, t46
+	  55: GETL       	R2, t48
+	  56: ADDL       	t46, t48
+	  57: PUTL       	t48, R6
+	  58: INCEIPL       	$4
+
+	0xFE891DC:  7CA91214  add r5,r9,r2
+	  59: GETL       	R9, t50
+	  60: GETL       	R2, t52
+	  61: ADDL       	t50, t52
+	  62: PUTL       	t52, R5
+	  63: INCEIPL       	$4
+
+	0xFE891E0:  809E1D78  lwz r4,7544(r30)
+	  64: GETL       	R30, t54
+	  65: ADDL       	$0x1D78, t54
+	  66: LDL       	(t54), t56
+	  67: PUTL       	t56, R4
+	  68: INCEIPL       	$4
+
+	0xFE891E4:  7C0A0378  or r10,r0,r0
+	  69: GETL       	R0, t58
+	  70: PUTL       	t58, R10
+	  71: INCEIPL       	$4
+
+	0xFE891E8:  41820024  bc 12,2,0xFE8920C
+	  72: Js02o       	$0xFE8920C
+
+
+
+. 1447 FE891A8 68
+. 93 C1 00 08 7F C8 02 A6 2C 03 00 00 7D 88 03 A6 2F 83 FF FF 80 BE 1D 50 80 9E 1C F8 7D 05 12 14 81 7E 1B 80 80 08 00 00 81 3E 1D 8C 7C E4 12 14 7C CB 12 14 7C A9 12 14 80 9E 1D 78 7C 0A 03 78 41 82 00 24
+==== BB 1448 (0xFE891EC) approx BBs exec'd 0 ====
+
+	0xFE891EC:  419E004C  bc 12,30,0xFE89238
+	   0: Js30o       	$0xFE89238
+
+
+
+. 1448 FE891EC 4
+. 41 9E 00 4C
+==== BB 1449 (0xFE89238) approx BBs exec'd 0 ====
+
+	0xFE89238:  7C832378  or r3,r4,r4
+	   0: GETL       	R4, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8923C:  4BFFFFB4  b 0xFE891F0
+	   3: JMPo       	$0xFE891F0  ($4)
+
+
+
+. 1449 FE89238 8
+. 7C 83 23 78 4B FF FF B4
+==== BB 1450 (0xFE891F0) approx BBs exec'd 0 ====
+
+	0xFE891F0:  8003003C  lwz r0,60(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x3C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE891F4:  81230034  lwz r9,52(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFE891F8:  81630038  lwz r11,56(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x38, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0xFE891FC:  90680000  stw r3,0(r8)
+	  15: GETL       	R3, t12
+	  16: GETL       	R8, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFE89200:  91270000  stw r9,0(r7)
+	  19: GETL       	R9, t16
+	  20: GETL       	R7, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0xFE89204:  91660000  stw r11,0(r6)
+	  23: GETL       	R11, t20
+	  24: GETL       	R6, t22
+	  25: STL       	t20, (t22)
+	  26: INCEIPL       	$4
+
+	0xFE89208:  90050000  stw r0,0(r5)
+	  27: GETL       	R0, t24
+	  28: GETL       	R5, t26
+	  29: STL       	t24, (t26)
+	  30: INCEIPL       	$4
+
+	0xFE8920C:  7F8A2000  cmp cr7,r10,r4
+	  31: GETL       	R10, t28
+	  32: GETL       	R4, t30
+	  33: CMPL       	t28, t30, t32  (-rSo)
+	  34: ICRFL       	t32, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0xFE89210:  419E0014  bc 12,30,0xFE89224
+	  36: Js30o       	$0xFE89224
+
+
+
+. 1450 FE891F0 36
+. 80 03 00 3C 81 23 00 34 81 63 00 38 90 68 00 00 91 27 00 00 91 66 00 00 90 05 00 00 7F 8A 20 00 41 9E 00 14
+==== BB 1451 (0xFE89224) approx BBs exec'd 0 ====
+
+	0xFE89224:  3940FFFF  li r10,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFE89228:  83C10008  lwz r30,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE8922C:  7D435378  or r3,r10,r10
+	   8: GETL       	R10, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFE89230:  38210010  addi r1,r1,16
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x10, t8
+	  13: PUTL       	t8, R1
+	  14: INCEIPL       	$4
+
+	0xFE89234:  4E800020  blr
+	  15: GETL       	LR, t10
+	  16: JMPo-r       	t10  ($4)
+
+
+
+. 1451 FE89224 20
+. 39 40 FF FF 83 C1 00 08 7D 43 53 78 38 21 00 10 4E 80 00 20
+==== BB 1452 (0xEE38658) approx BBs exec'd 0 ====
+
+	0xEE38658:  80010084  lwz r0,132(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE3865C:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE38660:  83A10074  lwz r29,116(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x74, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xEE38664:  83C10078  lwz r30,120(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x78, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xEE38668:  38210080  addi r1,r1,128
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x80, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xEE3866C:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1452 EE38658 24
+. 80 01 00 84 7C 08 03 A6 83 A1 00 74 83 C1 00 78 38 21 00 80 4E 80 00 20
+==== BB 1453 (0xEE38508) approx BBs exec'd 0 ====
+
+	0xEE38508:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xEE3850C:  809E00B0  lwz r4,176(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0xB0, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xEE38510:  38A00000  li r5,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0xEE38514:  480175D9  bl 0xEE4FAEC
+	  11: MOVL       	$0xEE38518, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0xEE4FAEC  ($4)
+
+
+
+. 1453 EE38508 16
+. 38 60 00 00 80 9E 00 B0 38 A0 00 00 48 01 75 D9
+==== BB 1454 (0xEE4FAEC) approx BBs exec'd 0 ====
+
+	0xEE4FAEC:  39600078  li r11,120
+	   0: MOVL       	$0x78, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xEE4FAF0:  4BFFFEE4  b 0xEE4F9D4
+	   3: JMPo       	$0xEE4F9D4  ($4)
+
+
+
+. 1454 EE4FAEC 8
+. 39 60 00 78 4B FF FE E4
+==== BB 1455 _pthread_cleanup_push(0xEE357AC) approx BBs exec'd 0 ====
+
+	0xEE357AC:  9421FF70  stwu r1,-144(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF70, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE357B0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE357B4:  93810080  stw r28,128(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x80, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE357B8:  93A10084  stw r29,132(r1)
+	  14: GETL       	R29, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x84, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xEE357BC:  93C10088  stw r30,136(r1)
+	  19: GETL       	R30, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x88, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xEE357C0:  93E1008C  stw r31,140(r1)
+	  24: GETL       	R31, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x8C, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xEE357C4:  90010094  stw r0,148(r1)
+	  29: GETL       	R0, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x94, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xEE357C8:  48019B09  bl 0xEE4F2D0
+	  34: MOVL       	$0xEE357CC, t26
+	  35: PUTL       	t26, LR
+	  36: JMPo-c       	$0xEE4F2D0  ($4)
+
+
+
+. 1455 EE357AC 32
+. 94 21 FF 70 7C 08 02 A6 93 81 00 80 93 A1 00 84 93 C1 00 88 93 E1 00 8C 90 01 00 94 48 01 9B 09
+==== BB 1456 (0xEE357CC) approx BBs exec'd 0 ====
+
+	0xEE357CC:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xEE357D0:  7C9F2378  or r31,r4,r4
+	   3: GETL       	R4, t2
+	   4: PUTL       	t2, R31
+	   5: INCEIPL       	$4
+
+	0xEE357D4:  7CBC2B78  or r28,r5,r5
+	   6: GETL       	R5, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0xEE357D8:  38001001  li r0,4097
+	   9: MOVL       	$0x1001, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0xEE357DC:  90010020  stw r0,32(r1)
+	  12: GETL       	R0, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x20, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE357E0:  38000000  li r0,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R0
+	  19: INCEIPL       	$4
+
+	0xEE357E4:  90010024  stw r0,36(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x24, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xEE357E8:  90010028  stw r0,40(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x28, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xEE357EC:  9001002C  stw r0,44(r1)
+	  30: GETL       	R0, t22
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x2C, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0xEE357F0:  90010030  stw r0,48(r1)
+	  35: GETL       	R0, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x30, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xEE357F4:  38610020  addi r3,r1,32
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x20, t30
+	  42: PUTL       	t30, R3
+	  43: INCEIPL       	$4
+
+	0xEE357F8:  7C03D808  client_request
+	  44: JMPo-cli       	$0xEE357FC  ($4)
+
+
+
+. 1456 EE357CC 48
+. 7F C8 02 A6 7C 9F 23 78 7C BC 2B 78 38 00 10 01 90 01 00 20 38 00 00 00 90 01 00 24 90 01 00 28 90 01 00 2C 90 01 00 30 38 61 00 20 7C 03 D8 08
+==== BB 1457 (0xEE357FC) approx BBs exec'd 0 ====
+
+	0xEE357FC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xEE35800:  40BE0040  bc 5,30,0xEE35840
+	   4: Jc30o       	$0xEE35840
+
+
+
+. 1457 EE357FC 8
+. 2F 83 00 00 40 BE 00 40
+==== BB 1458 (0xEE35840) approx BBs exec'd 0 ====
+
+	0xEE35840:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xEE35844:  90010010  stw r0,16(r1)
+	   3: GETL       	R0, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xEE35848:  93E10014  stw r31,20(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xEE3584C:  93810018  stw r28,24(r1)
+	  13: GETL       	R28, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x18, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xEE35850:  38003020  li r0,12320
+	  18: MOVL       	$0x3020, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xEE35854:  90010020  stw r0,32(r1)
+	  21: GETL       	R0, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x20, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xEE35858:  38010010  addi r0,r1,16
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x10, t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0xEE3585C:  90010024  stw r0,36(r1)
+	  30: GETL       	R0, t22
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0xEE35860:  38000000  li r0,0
+	  35: MOVL       	$0x0, t26
+	  36: PUTL       	t26, R0
+	  37: INCEIPL       	$4
+
+	0xEE35864:  90010028  stw r0,40(r1)
+	  38: GETL       	R0, t28
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x28, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0xEE35868:  9001002C  stw r0,44(r1)
+	  43: GETL       	R0, t32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x2C, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0xEE3586C:  90010030  stw r0,48(r1)
+	  48: GETL       	R0, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x30, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0xEE35870:  38610020  addi r3,r1,32
+	  53: GETL       	R1, t40
+	  54: ADDL       	$0x20, t40
+	  55: PUTL       	t40, R3
+	  56: INCEIPL       	$4
+
+	0xEE35874:  7C03D808  client_request
+	  57: JMPo-cli       	$0xEE35878  ($4)
+
+
+
+. 1458 EE35840 56
+. 38 00 00 01 90 01 00 10 93 E1 00 14 93 81 00 18 38 00 30 20 90 01 00 20 38 01 00 10 90 01 00 24 38 00 00 00 90 01 00 28 90 01 00 2C 90 01 00 30 38 61 00 20 7C 03 D8 08
+==== BB 1459 (0xEE35878) approx BBs exec'd 0 ====
+
+	0xEE35878:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xEE3587C:  41BE0018  bc 13,30,0xEE35894
+	   4: Js30o       	$0xEE35894
+
+
+
+. 1459 EE35878 8
+. 2F 83 00 00 41 BE 00 18
+==== BB 1460 (0xEE35894) approx BBs exec'd 0 ====
+
+	0xEE35894:  80010094  lwz r0,148(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE35898:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE3589C:  83810080  lwz r28,128(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x80, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xEE358A0:  83A10084  lwz r29,132(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x84, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xEE358A4:  83C10088  lwz r30,136(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x88, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0xEE358A8:  83E1008C  lwz r31,140(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x8C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0xEE358AC:  38210090  addi r1,r1,144
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x90, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0xEE358B0:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 1460 EE35894 32
+. 80 01 00 94 7C 08 03 A6 83 81 00 80 83 A1 00 84 83 C1 00 88 83 E1 00 8C 38 21 00 90 4E 80 00 20
+==== BB 1461 (0xEE38518) approx BBs exec'd 0 ====
+
+	0xEE38518:  807E00AC  lwz r3,172(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xAC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xEE3851C:  48017559  bl 0xEE4FA74
+	   5: MOVL       	$0xEE38520, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xEE4FA74  ($4)
+
+
+
+. 1461 EE38518 8
+. 80 7E 00 AC 48 01 75 59
+==== BB 1462 (0xEE4FA74) approx BBs exec'd 0 ====
+
+	0xEE4FA74:  3960003C  li r11,60
+	   0: MOVL       	$0x3C, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xEE4FA78:  4BFFFF5C  b 0xEE4F9D4
+	   3: JMPo       	$0xEE4F9D4  ($4)
+
+
+
+. 1462 EE4FA74 8
+. 39 60 00 3C 4B FF FF 5C
+==== BB 1463 pthread_mutex_unlock(0xEE36834) approx BBs exec'd 0 ====
+
+	0xEE36834:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE36838:  39200000  li r9,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xEE3683C:  3800300E  li r0,12302
+	   9: MOVL       	$0x300E, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0xEE36840:  90010010  stw r0,16(r1)
+	  12: GETL       	R0, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE36844:  90610014  stw r3,20(r1)
+	  17: GETL       	R3, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xEE36848:  91210018  stw r9,24(r1)
+	  22: GETL       	R9, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xEE3684C:  9121001C  stw r9,28(r1)
+	  27: GETL       	R9, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xEE36850:  91210020  stw r9,32(r1)
+	  32: GETL       	R9, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xEE36854:  38610010  addi r3,r1,16
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x10, t28
+	  39: PUTL       	t28, R3
+	  40: INCEIPL       	$4
+
+	0xEE36858:  7C03D808  client_request
+	  41: JMPo-cli       	$0xEE3685C  ($4)
+
+
+
+. 1463 EE36834 40
+. 94 21 FF C0 39 20 00 00 38 00 30 0E 90 01 00 10 90 61 00 14 91 21 00 18 91 21 00 1C 91 21 00 20 38 61 00 10 7C 03 D8 08
+==== BB 1464 (0xEE3685C) approx BBs exec'd 0 ====
+
+	0xEE3685C:  38210040  addi r1,r1,64
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x40, t0
+	   2: PUTL       	t0, R1
+	   3: INCEIPL       	$4
+
+	0xEE36860:  4E800020  blr
+	   4: GETL       	LR, t2
+	   5: JMPo-r       	t2  ($4)
+
+
+
+. 1464 EE3685C 8
+. 38 21 00 40 4E 80 00 20
+==== BB 1465 (0xEE38520) approx BBs exec'd 0 ====
+
+	0xEE38520:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xEE38524:  41BE000C  bc 13,30,0xEE38530
+	   4: Js30o       	$0xEE38530
+
+
+
+. 1465 EE38520 8
+. 2F 83 00 00 41 BE 00 0C
+==== BB 1466 (0xEE38530) approx BBs exec'd 0 ====
+
+	0xEE38530:  80010084  lwz r0,132(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE38534:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE38538:  83A10074  lwz r29,116(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x74, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xEE3853C:  83C10078  lwz r30,120(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x78, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xEE38540:  38210080  addi r1,r1,128
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x80, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xEE38544:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1466 EE38530 24
+. 80 01 00 84 7C 08 03 A6 83 A1 00 74 83 C1 00 78 38 21 00 80 4E 80 00 20
+==== BB 1467 (0xEE3D0E4) approx BBs exec'd 0 ====
+
+	0xEE3D0E4:  841FFFFC  lwzu r0,-4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R31
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xEE3D0E8:  2F80FFFF  cmpi cr7,r0,-1
+	   6: GETL       	R0, t4
+	   7: MOVL       	$0xFFFFFFFF, t8
+	   8: CMPL       	t4, t8, t6  (-rSo)
+	   9: ICRFL       	t6, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xEE3D0EC:  409EFFF0  bc 4,30,0xEE3D0DC
+	  11: Jc30o       	$0xEE3D0DC
+
+
+
+. 1467 EE3D0E4 12
+. 84 1F FF FC 2F 80 FF FF 40 9E FF F0
+==== BB 1468 (0xEE3D0F0) approx BBs exec'd 0 ====
+
+	0xEE3D0F0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE3D0F4:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xEE3D0F8:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xEE3D0FC:  7C0803A6  mtlr r0
+	  15: GETL       	R0, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xEE3D100:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xEE3D104:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1468 EE3D0F0 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1469 (0xEE3423C) approx BBs exec'd 0 ====
+
+	0xEE3423C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE34240:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE34244:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xEE34248:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 1469 EE3423C 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1470 (0xEE81FCC) approx BBs exec'd 0 ====
+
+	0xEE81FCC:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE81FD0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE81FD4:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE81FD8:  4800001D  bl 0xEE81FF4
+	  14: MOVL       	$0xEE81FDC, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xEE81FF4  ($4)
+
+
+
+. 1470 EE81FCC 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+==== BB 1471 (0xEE81FF4) approx BBs exec'd 0 ====
+
+	0xEE81FF4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE81FF8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE81FFC:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE82000:  90010014  stw r0,20(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xEE82004:  48016041  bl 0xEE98044
+	  19: MOVL       	$0xEE82008, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xEE98044  ($4)
+
+
+
+. 1471 EE81FF4 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 60 41
+==== BB 1472 (0xEE98044) approx BBs exec'd 0 ====
+
+	0xEE98044:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0xEE98048, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+
+. 1472 EE98044 4
+. 4E 80 00 21
+==== BB 1473 (0xEE82008) approx BBs exec'd 0 ====
+
+	0xEE82008:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xEE8200C:  801E00FC  lwz r0,252(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0xFC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xEE82010:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xEE82014:  419E000C  bc 12,30,0xEE82020
+	  12: Js30o       	$0xEE82020
+
+
+
+. 1473 EE82008 16
+. 7F C8 02 A6 80 1E 00 FC 2F 80 00 00 41 9E 00 0C
+==== BB 1474 (0xEE82020) approx BBs exec'd 0 ====
+
+	0xEE82020:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE82024:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE82028:  83C10008  lwz r30,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xEE8202C:  38210010  addi r1,r1,16
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0xEE82030:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1474 EE82020 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1475 (0xEE81FDC) approx BBs exec'd 0 ====
+
+	0xEE81FDC:  48000115  bl 0xEE820F0
+	   0: MOVL       	$0xEE81FE0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xEE820F0  ($4)
+
+
+
+. 1475 EE81FDC 4
+. 48 00 01 15
+==== BB 1476 (0xEE820F0) approx BBs exec'd 0 ====
+
+	0xEE820F0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE820F4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE820F8:  429F0005  bcl 20,31,0xEE820FC
+	   9: MOVL       	$0xEE820FC, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xEE820FC:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE82100:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xEE82104:  90010014  stw r0,20(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xEE82108:  801EFFF0  lwz r0,-16(r30)
+	  25: GETL       	R30, t18
+	  26: ADDL       	$0xFFFFFFF0, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0xEE8210C:  7FC0F214  add r30,r0,r30
+	  30: GETL       	R0, t22
+	  31: GETL       	R30, t24
+	  32: ADDL       	t22, t24
+	  33: PUTL       	t24, R30
+	  34: INCEIPL       	$4
+
+	0xEE82110:  807E8010  lwz r3,-32752(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFF8010, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R3
+	  39: INCEIPL       	$4
+
+	0xEE82114:  80030000  lwz r0,0(r3)
+	  40: GETL       	R3, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R0
+	  43: INCEIPL       	$4
+
+	0xEE82118:  2F800000  cmpi cr7,r0,0
+	  44: GETL       	R0, t34
+	  45: CMP0L       	t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x7, CR
+	  47: INCEIPL       	$4
+
+	0xEE8211C:  419E0018  bc 12,30,0xEE82134
+	  48: Js30o       	$0xEE82134
+
+
+
+. 1476 EE820F0 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+==== BB 1477 (0xEE82134) approx BBs exec'd 0 ====
+
+	0xEE82134:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE82138:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xEE8213C:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xEE82140:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xEE82144:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1477 EE82134 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 1478 (0xEE81FE0) approx BBs exec'd 0 ====
+
+	0xEE81FE0:  48005759  bl 0xEE87738
+	   0: MOVL       	$0xEE81FE4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xEE87738  ($4)
+
+
+
+. 1478 EE81FE0 4
+. 48 00 57 59
+==== BB 1479 (0xEE87738) approx BBs exec'd 0 ====
+
+	0xEE87738:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE8773C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE87740:  429F0005  bcl 20,31,0xEE87744
+	   9: MOVL       	$0xEE87744, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xEE87744:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE87748:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xEE8774C:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xEE87750:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xEE87754:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xEE87758:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xEE8775C:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xEE87760:  8009FFFC  lwz r0,-4(r9)
+	  45: GETL       	R9, t34
+	  46: ADDL       	$0xFFFFFFFC, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R0
+	  49: INCEIPL       	$4
+
+	0xEE87764:  3BE9FFFC  addi r31,r9,-4
+	  50: GETL       	R9, t38
+	  51: ADDL       	$0xFFFFFFFC, t38
+	  52: PUTL       	t38, R31
+	  53: INCEIPL       	$4
+
+	0xEE87768:  48000010  b 0xEE87778
+	  54: JMPo       	$0xEE87778  ($4)
+
+
+
+. 1479 EE87738 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+==== BB 1480 (0xEE87778) approx BBs exec'd 0 ====
+
+	0xEE87778:  2F80FFFF  cmpi cr7,r0,-1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xEE8777C:  409EFFF0  bc 4,30,0xEE8776C
+	   5: Jc30o       	$0xEE8776C
+
+
+
+. 1480 EE87778 8
+. 2F 80 FF FF 40 9E FF F0
+==== BB 1481 (0xEE87780) approx BBs exec'd 0 ====
+
+	0xEE87780:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE87784:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xEE87788:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xEE8778C:  7C0803A6  mtlr r0
+	  15: GETL       	R0, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xEE87790:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xEE87794:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 1481 EE87780 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1482 (0xEE81FE4) approx BBs exec'd 0 ====
+
+	0xEE81FE4:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE81FE8:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE81FEC:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xEE81FF0:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 1482 EE81FE4 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 1483 (0x2547B90C) approx BBs exec'd 0 ====
+
+	0x2547B90C:  7520C000  andis. r0,r9,0xC000
+	   0: GETL       	R9, t0
+	   1: ANDL       	$0xC0000000, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547B910:  40A2FEFC  bc 5,2,0x2547B80C
+	   6: Jc02o       	$0x2547B80C
+
+
+
+. 1483 2547B90C 8
+. 75 20 C0 00 40 A2 FE FC
+==== BB 1484 (0x2547B914) approx BBs exec'd 0 ====
+
+	0x2547B914:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547B918:  3B9CFFFF  addi r28,r28,-1
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547B91C:  4BFFFF10  b 0x2547B82C
+	   8: JMPo       	$0x2547B82C  ($4)
+
+
+
+. 1484 2547B914 12
+. 2F 9C 00 00 3B 9C FF FF 4B FF FF 10
+==== BB 1485 (0x2547B82C) approx BBs exec'd 0 ====
+
+	0x2547B82C:  409EFFB0  bc 4,30,0x2547B7DC
+	   0: Jc30o       	$0x2547B7DC
+
+
+
+. 1485 2547B82C 4
+. 40 9E FF B0
+==== BB 1486 (0x2547B830) approx BBs exec'd 0 ====
+
+	0x2547B830:  82E10044  lwz r23,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x2547B834:  3B000000  li r24,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R24
+	   7: INCEIPL       	$4
+
+	0x2547B838:  83C10038  lwz r30,56(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x38, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x2547B83C:  7EE803A6  mtlr r23
+	  13: GETL       	R23, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x2547B840:  83E1003C  lwz r31,60(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x3C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0x2547B844:  9316000C  stw r24,12(r22)
+	  21: GETL       	R24, t16
+	  22: GETL       	R22, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547B848:  82810010  lwz r20,16(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x10, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R20
+	  30: INCEIPL       	$4
+
+	0x2547B84C:  82A10014  lwz r21,20(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x14, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R21
+	  35: INCEIPL       	$4
+
+	0x2547B850:  82C10018  lwz r22,24(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x18, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R22
+	  40: INCEIPL       	$4
+
+	0x2547B854:  82E1001C  lwz r23,28(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x1C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R23
+	  45: INCEIPL       	$4
+
+	0x2547B858:  83010020  lwz r24,32(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x20, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R24
+	  50: INCEIPL       	$4
+
+	0x2547B85C:  83210024  lwz r25,36(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x24, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R25
+	  55: INCEIPL       	$4
+
+	0x2547B860:  83410028  lwz r26,40(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x28, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R26
+	  60: INCEIPL       	$4
+
+	0x2547B864:  8361002C  lwz r27,44(r1)
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x2C, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R27
+	  65: INCEIPL       	$4
+
+	0x2547B868:  83810030  lwz r28,48(r1)
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x30, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R28
+	  70: INCEIPL       	$4
+
+	0x2547B86C:  83A10034  lwz r29,52(r1)
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x34, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R29
+	  75: INCEIPL       	$4
+
+	0x2547B870:  38210040  addi r1,r1,64
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x40, t60
+	  78: PUTL       	t60, R1
+	  79: INCEIPL       	$4
+
+	0x2547B874:  4800072C  b 0x2547BFA0
+	  80: JMPo       	$0x2547BFA0  ($4)
+
+
+
+. 1486 2547B830 72
+. 82 E1 00 44 3B 00 00 00 83 C1 00 38 7E E8 03 A6 83 E1 00 3C 93 16 00 0C 82 81 00 10 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 38 21 00 40 48 00 07 2C
+==== BB 1487 (0x2548051C) approx BBs exec'd 0 ====
+
+	0x2548051C:  807D0000  lwz r3,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25480520:  809B0000  lwz r4,0(r27)
+	   4: GETL       	R27, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R4
+	   7: INCEIPL       	$4
+
+	0x25480524:  5465103A  rlwinm r5,r3,2,0,29
+	   8: GETL       	R3, t8
+	   9: SHLL       	$0x2, t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x25480528:  7CC42A14  add r6,r4,r5
+	  12: GETL       	R4, t10
+	  13: GETL       	R5, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R6
+	  16: INCEIPL       	$4
+
+	0x2548052C:  38A60004  addi r5,r6,4
+	  17: GETL       	R6, t14
+	  18: ADDL       	$0x4, t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0x25480530:  84060004  lwzu r0,4(r6)
+	  21: GETL       	R6, t16
+	  22: ADDL       	$0x4, t16
+	  23: PUTL       	t16, R6
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R0
+	  26: INCEIPL       	$4
+
+	0x25480534:  2C000000  cmpi cr0,r0,0
+	  27: GETL       	R0, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0x25480538:  4082FFF8  bc 4,2,0x25480530
+	  31: Jc02o       	$0x25480530
+
+
+
+. 1487 2548051C 32
+. 80 7D 00 00 80 9B 00 00 54 65 10 3A 7C C4 2A 14 38 A6 00 04 84 06 00 04 2C 00 00 00 40 82 FF F8
+==== BB 1488 (0x25480530) approx BBs exec'd 0 ====
+
+	0x25480530:  84060004  lwzu r0,4(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R6
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x25480534:  2C000000  cmpi cr0,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25480538:  4082FFF8  bc 4,2,0x25480530
+	  10: Jc02o       	$0x25480530
+
+
+
+. 1488 25480530 12
+. 84 06 00 04 2C 00 00 00 40 82 FF F8
+==== BB 1489 (0x2548053C) approx BBs exec'd 0 ====
+
+	0x2548053C:  38C60004  addi r6,r6,4
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25480540:  80FF04AC  lwz r7,1196(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4AC, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x25480544:  7FC903A6  mtctr r30
+	   9: GETL       	R30, t6
+	  10: PUTL       	t6, CTR
+	  11: INCEIPL       	$4
+
+	0x25480548:  3BE00000  li r31,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R31
+	  14: INCEIPL       	$4
+
+	0x2548054C:  93E10000  stw r31,0(r1)
+	  15: GETL       	R31, t10
+	  16: GETL       	R1, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25480550:  7FE803A6  mtlr r31
+	  19: GETL       	R31, t14
+	  20: PUTL       	t14, LR
+	  21: INCEIPL       	$4
+
+	0x25480554:  93E10004  stw r31,4(r1)
+	  22: GETL       	R31, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x4, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25480558:  93E10008  stw r31,8(r1)
+	  27: GETL       	R31, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x8, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x2548055C:  93E1000C  stw r31,12(r1)
+	  32: GETL       	R31, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0xC, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25480560:  4E800420  bctr
+	  37: GETL       	CTR, t28
+	  38: JMPo       	t28  ($4)
+
+
+
+. 1489 2548053C 40
+. 38 C6 00 04 80 FF 04 AC 7F C9 03 A6 3B E0 00 00 93 E1 00 00 7F E8 03 A6 93 E1 00 04 93 E1 00 08 93 E1 00 0C 4E 80 04 20
+==== BB 1490 (0x10000FF4) approx BBs exec'd 0 ====
+
+	0x10000FF4:  7C290B78  or r9,r1,r1
+	   0: GETL       	R1, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x10000FF8:  54210036  rlwinm r1,r1,0,0,27
+	   3: GETL       	R1, t2
+	   4: ANDL       	$0xFFFFFFF0, t2
+	   5: PUTL       	t2, R1
+	   6: INCEIPL       	$4
+
+	0x10000FFC:  38000000  li r0,0
+	   7: MOVL       	$0x0, t4
+	   8: PUTL       	t4, R0
+	   9: INCEIPL       	$4
+
+	0x10001000:  9421FFF0  stwu r1,-16(r1)
+	  10: GETL       	R1, t6
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0xFFFFFFF0, t8
+	  13: PUTL       	t8, R1
+	  14: STL       	t6, (t8)
+	  15: INCEIPL       	$4
+
+	0x10001004:  7C0803A6  mtlr r0
+	  16: GETL       	R0, t10
+	  17: PUTL       	t10, LR
+	  18: INCEIPL       	$4
+
+	0x10001008:  90010000  stw r0,0(r1)
+	  19: GETL       	R0, t12
+	  20: GETL       	R1, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x1000100C:  3D001000  lis r8,4096
+	  23: MOVL       	$0x10000000, t16
+	  24: PUTL       	t16, R8
+	  25: INCEIPL       	$4
+
+	0x10001010:  85A87DF8  lwzu r13,32248(r8)
+	  26: MOVL       	$0x10007DF8, t18
+	  27: PUTL       	t18, R8
+	  28: LDL       	(t18), t20
+	  29: PUTL       	t20, R13
+	  30: INCEIPL       	$4
+
+	0x10001014:  48019C20  b 0x1001AC34
+	  31: JMPo       	$0x1001AC34  ($4)
+
+
+
+. 1490 10000FF4 36
+. 7C 29 0B 78 54 21 00 36 38 00 00 00 94 21 FF F0 7C 08 03 A6 90 01 00 00 3D 00 10 00 85 A8 7D F8 48 01 9C 20
+==== BB 1491 (0x1001AC34) approx BBs exec'd 0 ====
+
+	0x1001AC34:  39600080  li r11,128
+	   0: MOVL       	$0x80, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AC38:  4BFFFED4  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 1491 1001AC34 8
+. 39 60 00 80 4B FF FE D4
+==== BB 1492 (0x1001AB0C) approx BBs exec'd 0 ====
+
+	0x1001AB0C:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0x1001AB10:  7D6C5A14  add r11,r12,r11
+	   4: GETL       	R12, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0x1001AB14:  3980AF5C  li r12,-20644
+	   9: MOVL       	$0xFFFFAF5C, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x1001AB18:  3D8C2548  addis r12,r12,9544
+	  12: MOVL       	$0x2547AF5C, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0x1001AB1C:  7D8903A6  mtctr r12
+	  15: GETL       	R12, t10
+	  16: PUTL       	t10, CTR
+	  17: INCEIPL       	$4
+
+	0x1001AB20:  39807A88  li r12,31368
+	  18: MOVL       	$0x7A88, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0x1001AB24:  3D8C2549  addis r12,r12,9545
+	  21: MOVL       	$0x25497A88, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0x1001AB28:  4E800420  bctr
+	  24: GETL       	CTR, t16
+	  25: JMPo       	t16  ($4)
+
+
+
+. 1492 1001AB0C 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 7A 88 3D 8C 25 49 4E 80 04 20
+==== BB 1493 __libc_start_main(0xFE7B8C4) approx BBs exec'd 0 ====
+
+	0xFE7B8C4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE7B8C8:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE7B8CC:  4812C585  bl 0xFFA7E50
+	   9: MOVL       	$0xFE7B8D0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1493 FE7B8C4 12
+. 7C 08 02 A6 94 21 FF F0 48 12 C5 85
+==== BB 1494 (0xFE7B8D0) approx BBs exec'd 0 ====
+
+	0xFE7B8D0:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE7B8D4:  7D2A4B78  or r10,r9,r9
+	   5: GETL       	R9, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFE7B8D8:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0xFE7B8DC:  90010014  stw r0,20(r1)
+	  11: GETL       	R0, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFE7B8E0:  7C6C1B78  or r12,r3,r3
+	  16: GETL       	R3, t12
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0xFE7B8E4:  80090000  lwz r0,0(r9)
+	  19: GETL       	R9, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R0
+	  22: INCEIPL       	$4
+
+	0xFE7B8E8:  7C852378  or r5,r4,r4
+	  23: GETL       	R4, t18
+	  24: PUTL       	t18, R5
+	  25: INCEIPL       	$4
+
+	0xFE7B8EC:  7CE93B78  or r9,r7,r7
+	  26: GETL       	R7, t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0xFE7B8F0:  7D074378  or r7,r8,r8
+	  29: GETL       	R8, t22
+	  30: PUTL       	t22, R7
+	  31: INCEIPL       	$4
+
+	0xFE7B8F4:  2F800000  cmpi cr7,r0,0
+	  32: GETL       	R0, t24
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0xFE7B8F8:  419E0034  bc 12,30,0xFE7B92C
+	  36: Js30o       	$0xFE7B92C
+
+
+
+. 1494 FE7B8D0 44
+. 93 C1 00 08 7D 2A 4B 78 7F C8 02 A6 90 01 00 14 7C 6C 1B 78 80 09 00 00 7C 85 23 78 7C E9 3B 78 7D 07 43 78 2F 80 00 00 41 9E 00 34
+==== BB 1495 (0xFE7B92C) approx BBs exec'd 0 ====
+
+	0xFE7B92C:  80060000  lwz r0,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFE7B930:  7CCB3378  or r11,r6,r6
+	   4: GETL       	R6, t4
+	   5: PUTL       	t4, R11
+	   6: INCEIPL       	$4
+
+	0xFE7B934:  2C800000  cmpi cr1,r0,0
+	   7: GETL       	R0, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x1, CR
+	  10: INCEIPL       	$4
+
+	0xFE7B938:  4186003C  bc 12,6,0xFE7B974
+	  11: Js06o       	$0xFE7B974
+
+
+
+. 1495 FE7B92C 16
+. 80 06 00 00 7C CB 33 78 2C 80 00 00 41 86 00 3C
+==== BB 1496 (0xFE7B93C) approx BBs exec'd 0 ====
+
+	0xFE7B93C:  811E1CB0  lwz r8,7344(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CB0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE7B940:  48000010  b 0xFE7B950
+	   5: JMPo       	$0xFE7B950  ($4)
+
+
+
+. 1496 FE7B93C 8
+. 81 1E 1C B0 48 00 00 10
+==== BB 1497 (0xFE7B950) approx BBs exec'd 0 ====
+
+	0xFE7B950:  2F800013  cmpi cr7,r0,19
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x13, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE7B954:  2F080000  cmpi cr6,r8,0
+	   5: GETL       	R8, t6
+	   6: CMP0L       	t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFE7B958:  409EFFEC  bc 4,30,0xFE7B944
+	   9: Jc30o       	$0xFE7B944
+
+
+
+. 1497 FE7B950 12
+. 2F 80 00 13 2F 08 00 00 40 9E FF EC
+==== BB 1498 (0xFE7B944) approx BBs exec'd 0 ====
+
+	0xFE7B944:  840B0008  lwzu r0,8(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R11
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFE7B948:  2F000000  cmpi cr6,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFE7B94C:  419A0028  bc 12,26,0xFE7B974
+	  10: Js26o       	$0xFE7B974
+
+
+
+. 1498 FE7B944 12
+. 84 0B 00 08 2F 00 00 00 41 9A 00 28
+==== BB 1499 (0xFE7B95C) approx BBs exec'd 0 ====
+
+	0xFE7B95C:  41BAFFE8  bc 13,26,0xFE7B944
+	   0: Js26o       	$0xFE7B944
+
+
+
+. 1499 FE7B95C 4
+. 41 BA FF E8
+==== BB 1500 (0xFE7B960) approx BBs exec'd 0 ====
+
+	0xFE7B960:  808B0004  lwz r4,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE7B964:  90880000  stw r4,0(r8)
+	   5: GETL       	R4, t4
+	   6: GETL       	R8, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE7B968:  840B0008  lwzu r0,8(r11)
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0x8, t8
+	  11: PUTL       	t8, R11
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFE7B96C:  2F000000  cmpi cr6,r0,0
+	  15: GETL       	R0, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFE7B970:  409AFFE0  bc 4,26,0xFE7B950
+	  19: Jc26o       	$0xFE7B950
+
+
+
+. 1500 FE7B960 20
+. 80 8B 00 04 90 88 00 00 84 0B 00 08 2F 00 00 00 40 9A FF E0
+==== BB 1501 (0xFE7B974) approx BBs exec'd 0 ====
+
+	0xFE7B974:  8107000C  lwz r8,12(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE7B978:  7D846378  or r4,r12,r12
+	   5: GETL       	R12, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE7B97C:  80670004  lwz r3,4(r7)
+	   8: GETL       	R7, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFE7B980:  80E70008  lwz r7,8(r7)
+	  13: GETL       	R7, t10
+	  14: ADDL       	$0x8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0xFE7B984:  4BFFFD91  bl 0xFE7B714
+	  18: MOVL       	$0xFE7B988, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0xFE7B714  ($4)
+
+
+
+. 1501 FE7B974 20
+. 81 07 00 0C 7D 84 63 78 80 67 00 04 80 E7 00 08 4B FF FD 91
+==== BB 1502 generic_start_main(0xFE7B714) approx BBs exec'd 0 ====
+
+	0xFE7B714:  9421FDE0  stwu r1,-544(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFDE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE7B718:  7D4802A6  mflr r10
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFE7B71C:  4812C735  bl 0xFFA7E50
+	   9: MOVL       	$0xFE7B720, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1502 FE7B714 12
+. 94 21 FD E0 7D 48 02 A6 48 12 C7 35
+==== BB 1503 (0xFE7B720) approx BBs exec'd 0 ====
+
+	0xFE7B720:  93C10218  stw r30,536(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x218, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE7B724:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE7B728:  93A10214  stw r29,532(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x214, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE7B72C:  5480103A  rlwinm r0,r4,2,0,29
+	  13: GETL       	R4, t10
+	  14: SHLL       	$0x2, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFE7B730:  93810210  stw r28,528(r1)
+	  17: GETL       	R28, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x210, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFE7B734:  7D602A14  add r11,r0,r5
+	  22: GETL       	R0, t16
+	  23: GETL       	R5, t18
+	  24: ADDL       	t16, t18
+	  25: PUTL       	t18, R11
+	  26: INCEIPL       	$4
+
+	0xFE7B738:  93E1021C  stw r31,540(r1)
+	  27: GETL       	R31, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x21C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFE7B73C:  83BE1CE4  lwz r29,7396(r30)
+	  32: GETL       	R30, t24
+	  33: ADDL       	$0x1CE4, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R29
+	  36: INCEIPL       	$4
+
+	0xFE7B740:  7CFC3B78  or r28,r7,r7
+	  37: GETL       	R7, t28
+	  38: PUTL       	t28, R28
+	  39: INCEIPL       	$4
+
+	0xFE7B744:  91410224  stw r10,548(r1)
+	  40: GETL       	R10, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x224, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFE7B748:  7D1F4378  or r31,r8,r8
+	  45: GETL       	R8, t34
+	  46: PUTL       	t34, R31
+	  47: INCEIPL       	$4
+
+	0xFE7B74C:  2F9D0000  cmpi cr7,r29,0
+	  48: GETL       	R29, t36
+	  49: CMP0L       	t36, t38  (-rSo)
+	  50: ICRFL       	t38, $0x7, CR
+	  51: INCEIPL       	$4
+
+	0xFE7B750:  906101F0  stw r3,496(r1)
+	  52: GETL       	R3, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x1F0, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFE7B754:  9361020C  stw r27,524(r1)
+	  57: GETL       	R27, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x20C, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0xFE7B758:  7D234B78  or r3,r9,r9
+	  62: GETL       	R9, t48
+	  63: PUTL       	t48, R3
+	  64: INCEIPL       	$4
+
+	0xFE7B75C:  908101F4  stw r4,500(r1)
+	  65: GETL       	R4, t50
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x1F4, t52
+	  68: STL       	t50, (t52)
+	  69: INCEIPL       	$4
+
+	0xFE7B760:  396B0004  addi r11,r11,4
+	  70: GETL       	R11, t54
+	  71: ADDL       	$0x4, t54
+	  72: PUTL       	t54, R11
+	  73: INCEIPL       	$4
+
+	0xFE7B764:  90A101F8  stw r5,504(r1)
+	  74: GETL       	R5, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x1F8, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0xFE7B768:  39400000  li r10,0
+	  79: MOVL       	$0x0, t60
+	  80: PUTL       	t60, R10
+	  81: INCEIPL       	$4
+
+	0xFE7B76C:  90C101FC  stw r6,508(r1)
+	  82: GETL       	R6, t62
+	  83: GETL       	R1, t64
+	  84: ADDL       	$0x1FC, t64
+	  85: STL       	t62, (t64)
+	  86: INCEIPL       	$4
+
+	0xFE7B770:  419E0010  bc 12,30,0xFE7B780
+	  87: Js30o       	$0xFE7B780
+
+
+
+. 1503 FE7B720 84
+. 93 C1 02 18 7F C8 02 A6 93 A1 02 14 54 80 10 3A 93 81 02 10 7D 60 2A 14 93 E1 02 1C 83 BE 1C E4 7C FC 3B 78 91 41 02 24 7D 1F 43 78 2F 9D 00 00 90 61 01 F0 93 61 02 0C 7D 23 4B 78 90 81 01 F4 39 6B 00 04 90 A1 01 F8 39 40 00 00 90 C1 01 FC 41 9E 00 10
+==== BB 1504 (0xFE7B780) approx BBs exec'd 0 ====
+
+	0xFE7B780:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE7B784:  837E1B84  lwz r27,7044(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1B84, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFE7B788:  813E1AA4  lwz r9,6820(r30)
+	   9: GETL       	R30, t8
+	  10: ADDL       	$0x1AA4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0xFE7B78C:  917B0000  stw r11,0(r27)
+	  14: GETL       	R11, t12
+	  15: GETL       	R27, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0xFE7B790:  91490000  stw r10,0(r9)
+	  18: GETL       	R10, t16
+	  19: GETL       	R9, t18
+	  20: STL       	t16, (t18)
+	  21: INCEIPL       	$4
+
+	0xFE7B794:  41860010  bc 12,6,0xFE7B7A4
+	  22: Js06o       	$0xFE7B7A4
+
+
+
+. 1504 FE7B780 24
+. 2C 83 00 00 83 7E 1B 84 81 3E 1A A4 91 7B 00 00 91 49 00 00 41 86 00 10
+==== BB 1505 (0xFE7B798) approx BBs exec'd 0 ====
+
+	0xFE7B798:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE7B79C:  38A00000  li r5,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFE7B7A0:  48018731  bl 0xFE93ED0
+	   6: MOVL       	$0xFE7B7A4, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFE93ED0  ($4)
+
+
+
+. 1505 FE7B798 12
+. 38 80 00 00 38 A0 00 00 48 01 87 31
+==== BB 1506 __cxa_atexit_internal(0xFE93ED0) approx BBs exec'd 0 ====
+
+	0xFE93ED0:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE93ED4:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE93ED8:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE93EDC:  7CBF2B78  or r31,r5,r5
+	  14: GETL       	R5, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFE93EE0:  93810010  stw r28,16(r1)
+	  17: GETL       	R28, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFE93EE4:  7C9C2378  or r28,r4,r4
+	  22: GETL       	R4, t16
+	  23: PUTL       	t16, R28
+	  24: INCEIPL       	$4
+
+	0xFE93EE8:  93A10014  stw r29,20(r1)
+	  25: GETL       	R29, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFE93EEC:  7C7D1B78  or r29,r3,r3
+	  30: GETL       	R3, t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0xFE93EF0:  90010024  stw r0,36(r1)
+	  33: GETL       	R0, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x24, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0xFE93EF4:  93C10018  stw r30,24(r1)
+	  38: GETL       	R30, t28
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x18, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0xFE93EF8:  4BFFFE35  bl 0xFE93D2C
+	  43: MOVL       	$0xFE93EFC, t32
+	  44: PUTL       	t32, LR
+	  45: JMPo-c       	$0xFE93D2C  ($4)
+
+
+
+. 1506 FE93ED0 44
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C BF 2B 78 93 81 00 10 7C 9C 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 93 C1 00 18 4B FF FE 35
+==== BB 1507 __new_exitfn(0xFE93D2C) approx BBs exec'd 0 ====
+
+	0xFE93D2C:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE93D30:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFE93D34:  4811411D  bl 0xFFA7E50
+	   9: MOVL       	$0xFE93D38, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1507 FE93D2C 12
+. 94 21 FF E0 7C 68 02 A6 48 11 41 1D
+==== BB 1508 (0xFE93D38) approx BBs exec'd 0 ====
+
+	0xFE93D38:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE93D3C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE93D40:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE93D44:  93A10014  stw r29,20(r1)
+	  13: GETL       	R29, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE93D48:  38000001  li r0,1
+	  18: MOVL       	$0x1, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFE93D4C:  9361000C  stw r27,12(r1)
+	  21: GETL       	R27, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE93D50:  3BA00000  li r29,0
+	  26: MOVL       	$0x0, t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFE93D54:  93E1001C  stw r31,28(r1)
+	  29: GETL       	R31, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE93D58:  90610024  stw r3,36(r1)
+	  34: GETL       	R3, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x24, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFE93D5C:  839E02B4  lwz r28,692(r30)
+	  39: GETL       	R30, t30
+	  40: ADDL       	$0x2B4, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R28
+	  43: INCEIPL       	$4
+
+	0xFE93D60:  7D20E028  lwarx r9,r0,r28
+	  44: GETL       	R28, t34
+	  45: LOCKo       	
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R9
+	  48: INCEIPL       	$4
+
+	0xFE93D64:  7C09E800  cmp cr0,r9,r29
+	  49: GETL       	R9, t38
+	  50: GETL       	R29, t40
+	  51: CMPL       	t38, t40, t42  (-rSo)
+	  52: ICRFL       	t42, $0x0, CR
+	  53: INCEIPL       	$4
+
+	0xFE93D68:  4082000C  bc 4,2,0xFE93D74
+	  54: Jc02o       	$0xFE93D74
+
+
+
+. 1508 FE93D38 52
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 38 00 00 01 93 61 00 0C 3B A0 00 00 93 E1 00 1C 90 61 00 24 83 9E 02 B4 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+==== BB 1509 (0xFE93D6C) approx BBs exec'd 0 ====
+
+	0xFE93D6C:  7C00E12D  stwcx. r0,r0,r28
+	   0: GETL       	R28, t0
+	   1: GETL       	R0, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE93D70:  40A2FFF0  bc 5,2,0xFE93D60
+	   6: Jc02o       	$0xFE93D60
+
+
+
+. 1509 FE93D6C 8
+. 7C 00 E1 2D 40 A2 FF F0
+==== BB 1510 (0xFE93D60) approx BBs exec'd 0 ====
+
+	0xFE93D60:  7D20E028  lwarx r9,r0,r28
+	   0: GETL       	R28, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE93D64:  7C09E800  cmp cr0,r9,r29
+	   5: GETL       	R9, t4
+	   6: GETL       	R29, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE93D68:  4082000C  bc 4,2,0xFE93D74
+	  10: Jc02o       	$0xFE93D74
+
+
+
+. 1510 FE93D60 12
+. 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+==== BB 1511 (0xFE93D74) approx BBs exec'd 0 ====
+
+	0xFE93D74:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFE93D78:  2F890000  cmpi cr7,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE93D7C:  409E0134  bc 4,30,0xFE93EB0
+	   5: Jc30o       	$0xFE93EB0
+
+
+
+. 1511 FE93D74 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 34
+==== BB 1512 (0xFE93D80) approx BBs exec'd 0 ====
+
+	0xFE93D80:  837E1AFC  lwz r27,6908(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1AFC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFE93D84:  83FB0000  lwz r31,0(r27)
+	   5: GETL       	R27, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R31
+	   8: INCEIPL       	$4
+
+	0xFE93D88:  2F1F0000  cmpi cr6,r31,0
+	   9: GETL       	R31, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFE93D8C:  419A0050  bc 12,26,0xFE93DDC
+	  13: Js26o       	$0xFE93DDC
+
+
+
+. 1512 FE93D80 16
+. 83 7E 1A FC 83 FB 00 00 2F 1F 00 00 41 9A 00 50
+==== BB 1513 (0xFE93D90) approx BBs exec'd 0 ====
+
+	0xFE93D90:  817F0004  lwz r11,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFE93D94:  3BA00000  li r29,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFE93D98:  280B0000  cmpli cr0,r11,0
+	   8: GETL       	R11, t6
+	   9: MOVL       	$0x0, t10
+	  10: CMPUL       	t6, t10, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFE93D9C:  40810024  bc 4,1,0xFE93DC0
+	  13: Jc01o       	$0xFE93DC0
+
+
+
+. 1513 FE93D90 16
+. 81 7F 00 04 3B A0 00 00 28 0B 00 00 40 81 00 24
+==== BB 1514 (0xFE93DC0) approx BBs exec'd 0 ====
+
+	0xFE93DC0:  7C0BE840  cmpl cr0,r11,r29
+	   0: GETL       	R11, t0
+	   1: GETL       	R29, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE93DC4:  41810104  bc 12,1,0xFE93EC8
+	   5: Js01o       	$0xFE93EC8
+
+
+
+. 1514 FE93DC0 8
+. 7C 0B E8 40 41 81 01 04
+==== BB 1515 (0xFE93DC8) approx BBs exec'd 0 ====
+
+	0xFE93DC8:  288B001F  cmpli cr1,r11,31
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x1F, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFE93DCC:  408500F0  bc 4,5,0xFE93EBC
+	   5: Jc05o       	$0xFE93EBC
+
+
+
+. 1515 FE93DC8 8
+. 28 8B 00 1F 40 85 00 F0
+==== BB 1516 (0xFE93EBC) approx BBs exec'd 0 ====
+
+	0xFE93EBC:  392B0001  addi r9,r11,1
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFE93EC0:  7D7D5B78  or r29,r11,r11
+	   4: GETL       	R11, t2
+	   5: PUTL       	t2, R29
+	   6: INCEIPL       	$4
+
+	0xFE93EC4:  913F0004  stw r9,4(r31)
+	   7: GETL       	R9, t4
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x4, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFE93EC8:  40BAFF40  bc 5,26,0xFE93E08
+	  12: Jc26o       	$0xFE93E08
+
+
+
+. 1516 FE93EBC 16
+. 39 2B 00 01 7D 7D 5B 78 91 3F 00 04 40 BA FF 40
+==== BB 1517 (0xFE93E08) approx BBs exec'd 0 ====
+
+	0xFE93E08:  57AA2036  rlwinm r10,r29,4,0,27
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0xFE93E0C:  39000001  li r8,1
+	   4: MOVL       	$0x1, t2
+	   5: PUTL       	t2, R8
+	   6: INCEIPL       	$4
+
+	0xFE93E10:  7CEAFA14  add r7,r10,r31
+	   7: GETL       	R10, t4
+	   8: GETL       	R31, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0xFE93E14:  91070008  stw r8,8(r7)
+	  12: GETL       	R8, t8
+	  13: GETL       	R7, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFE93E18:  39800000  li r12,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R12
+	  19: INCEIPL       	$4
+
+	0xFE93E1C:  7C0004AC  sync
+	  20: INCEIPL       	$4
+
+	0xFE93E20:  7D60E028  lwarx r11,r0,r28
+	  21: GETL       	R28, t14
+	  22: LOCKo       	
+	  23: LDL       	(t14), t16
+	  24: PUTL       	t16, R11
+	  25: INCEIPL       	$4
+
+	0xFE93E24:  7D80E12D  stwcx. r12,r0,r28
+	  26: GETL       	R28, t18
+	  27: GETL       	R12, t20
+	  28: LOCKo       	
+	  29: STL       	t20, (t18)  (-rSo)
+	  30: ICRFL       	cr, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0xFE93E28:  40A2FFF8  bc 5,2,0xFE93E20
+	  32: Jc02o       	$0xFE93E20
+
+
+
+. 1517 FE93E08 36
+. 57 AA 20 36 39 00 00 01 7C EA FA 14 91 07 00 08 39 80 00 00 7C 00 04 AC 7D 60 E0 28 7D 80 E1 2D 40 A2 FF F8
+==== BB 1518 (0xFE93E2C) approx BBs exec'd 0 ====
+
+	0xFE93E2C:  2F8B0001  cmpi cr7,r11,1
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE93E30:  419D0060  bc 12,29,0xFE93E90
+	   5: Js29o       	$0xFE93E90
+
+
+
+. 1518 FE93E2C 8
+. 2F 8B 00 01 41 9D 00 60
+==== BB 1519 (0xFE93E34) approx BBs exec'd 0 ====
+
+	0xFE93E34:  419A0034  bc 12,26,0xFE93E68
+	   0: Js26o       	$0xFE93E68
+
+
+
+. 1519 FE93E34 4
+. 41 9A 00 34
+==== BB 1520 (0xFE93E38) approx BBs exec'd 0 ====
+
+	0xFE93E38:  57BC2036  rlwinm r28,r29,4,0,27
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0xFE93E3C:  83A10024  lwz r29,36(r1)
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x24, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFE93E40:  7F7CFA14  add r27,r28,r31
+	   9: GETL       	R28, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0xFE93E44:  83C10018  lwz r30,24(r1)
+	  14: GETL       	R1, t10
+	  15: ADDL       	$0x18, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R30
+	  18: INCEIPL       	$4
+
+	0xFE93E48:  387B0008  addi r3,r27,8
+	  19: GETL       	R27, t14
+	  20: ADDL       	$0x8, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0xFE93E4C:  7FA803A6  mtlr r29
+	  23: GETL       	R29, t16
+	  24: PUTL       	t16, LR
+	  25: INCEIPL       	$4
+
+	0xFE93E50:  8361000C  lwz r27,12(r1)
+	  26: GETL       	R1, t18
+	  27: ADDL       	$0xC, t18
+	  28: LDL       	(t18), t20
+	  29: PUTL       	t20, R27
+	  30: INCEIPL       	$4
+
+	0xFE93E54:  83810010  lwz r28,16(r1)
+	  31: GETL       	R1, t22
+	  32: ADDL       	$0x10, t22
+	  33: LDL       	(t22), t24
+	  34: PUTL       	t24, R28
+	  35: INCEIPL       	$4
+
+	0xFE93E58:  83A10014  lwz r29,20(r1)
+	  36: GETL       	R1, t26
+	  37: ADDL       	$0x14, t26
+	  38: LDL       	(t26), t28
+	  39: PUTL       	t28, R29
+	  40: INCEIPL       	$4
+
+	0xFE93E5C:  83E1001C  lwz r31,28(r1)
+	  41: GETL       	R1, t30
+	  42: ADDL       	$0x1C, t30
+	  43: LDL       	(t30), t32
+	  44: PUTL       	t32, R31
+	  45: INCEIPL       	$4
+
+	0xFE93E60:  38210020  addi r1,r1,32
+	  46: GETL       	R1, t34
+	  47: ADDL       	$0x20, t34
+	  48: PUTL       	t34, R1
+	  49: INCEIPL       	$4
+
+	0xFE93E64:  4E800020  blr
+	  50: GETL       	LR, t36
+	  51: JMPo-r       	t36  ($4)
+
+
+
+. 1520 FE93E38 48
+. 57 BC 20 36 83 A1 00 24 7F 7C FA 14 83 C1 00 18 38 7B 00 08 7F A8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1521 (0xFE93EFC) approx BBs exec'd 0 ====
+
+	0xFE93EFC:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE93F00:  3800FFFF  li r0,-1
+	   4: MOVL       	$0xFFFFFFFF, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFE93F04:  4182001C  bc 12,2,0xFE93F20
+	   7: Js02o       	$0xFE93F20
+
+
+
+. 1521 FE93EFC 12
+. 2C 03 00 00 38 00 FF FF 41 82 00 1C
+==== BB 1522 (0xFE93F08) approx BBs exec'd 0 ====
+
+	0xFE93F08:  38800004  li r4,4
+	   0: MOVL       	$0x4, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE93F0C:  93E3000C  stw r31,12(r3)
+	   3: GETL       	R31, t2
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xC, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFE93F10:  90830000  stw r4,0(r3)
+	   8: GETL       	R4, t6
+	   9: GETL       	R3, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE93F14:  38000000  li r0,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFE93F18:  93A30004  stw r29,4(r3)
+	  15: GETL       	R29, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x4, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFE93F1C:  93830008  stw r28,8(r3)
+	  20: GETL       	R28, t16
+	  21: GETL       	R3, t18
+	  22: ADDL       	$0x8, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFE93F20:  80A10024  lwz r5,36(r1)
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x24, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R5
+	  29: INCEIPL       	$4
+
+	0xFE93F24:  7C030378  or r3,r0,r0
+	  30: GETL       	R0, t24
+	  31: PUTL       	t24, R3
+	  32: INCEIPL       	$4
+
+	0xFE93F28:  83810010  lwz r28,16(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x10, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R28
+	  37: INCEIPL       	$4
+
+	0xFE93F2C:  83A10014  lwz r29,20(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x14, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R29
+	  42: INCEIPL       	$4
+
+	0xFE93F30:  7CA803A6  mtlr r5
+	  43: GETL       	R5, t34
+	  44: PUTL       	t34, LR
+	  45: INCEIPL       	$4
+
+	0xFE93F34:  83C10018  lwz r30,24(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x18, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R30
+	  50: INCEIPL       	$4
+
+	0xFE93F38:  83E1001C  lwz r31,28(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x1C, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0xFE93F3C:  38210020  addi r1,r1,32
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x20, t44
+	  58: PUTL       	t44, R1
+	  59: INCEIPL       	$4
+
+	0xFE93F40:  4E800020  blr
+	  60: GETL       	LR, t46
+	  61: JMPo-r       	t46  ($4)
+
+
+
+. 1522 FE93F08 60
+. 38 80 00 04 93 E3 00 0C 90 83 00 00 38 00 00 00 93 A3 00 04 93 83 00 08 80 A1 00 24 7C 03 03 78 83 81 00 10 83 A1 00 14 7C A8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1523 (0xFE7B7A4) approx BBs exec'd 0 ====
+
+	0xFE7B7A4:  2F1F0000  cmpi cr6,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE7B7A8:  419A0014  bc 12,26,0xFE7B7BC
+	   4: Js26o       	$0xFE7B7BC
+
+
+
+. 1523 FE7B7A4 8
+. 2F 1F 00 00 41 9A 00 14
+==== BB 1524 (0xFE7B7AC) approx BBs exec'd 0 ====
+
+	0xFE7B7AC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE7B7B0:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE7B7B4:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE7B7B8:  48018719  bl 0xFE93ED0
+	   9: MOVL       	$0xFE7B7BC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFE93ED0  ($4)
+
+
+
+. 1524 FE7B7AC 16
+. 7F E3 FB 78 38 80 00 00 38 A0 00 00 48 01 87 19
+==== BB 1525 (0xFE93DA0) approx BBs exec'd 0 ====
+
+	0xFE93DA0:  393F0008  addi r9,r31,8
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFE93DA4:  80890000  lwz r4,0(r9)
+	   4: GETL       	R9, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE93DA8:  39290010  addi r9,r9,16
+	   8: GETL       	R9, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0xFE93DAC:  2C840000  cmpi cr1,r4,0
+	  12: GETL       	R4, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0xFE93DB0:  41860010  bc 12,6,0xFE93DC0
+	  16: Js06o       	$0xFE93DC0
+
+
+
+. 1525 FE93DA0 20
+. 39 3F 00 08 80 89 00 00 39 29 00 10 2C 84 00 00 41 86 00 10
+==== BB 1526 (0xFE93DB4) approx BBs exec'd 0 ====
+
+	0xFE93DB4:  3BBD0001  addi r29,r29,1
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0xFE93DB8:  7F8BE840  cmpl cr7,r11,r29
+	   4: GETL       	R11, t2
+	   5: GETL       	R29, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFE93DBC:  419DFFE8  bc 12,29,0xFE93DA4
+	   9: Js29o       	$0xFE93DA4
+
+
+
+. 1526 FE93DB4 12
+. 3B BD 00 01 7F 8B E8 40 41 9D FF E8
+==== BB 1527 (0xFE7B7BC) approx BBs exec'd 0 ====
+
+	0xFE7B7BC:  83FE1BC8  lwz r31,7112(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFE7B7C0:  807F0000  lwz r3,0(r31)
+	   5: GETL       	R31, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFE7B7C4:  70690002  andi. r9,r3,0x2
+	   9: GETL       	R3, t8
+	  10: ANDL       	$0x2, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFE7B7C8:  7FA00026  mfcr r29
+	  15: GETL       	CR, t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFE7B7CC:  40820080  bc 4,2,0xFE7B84C
+	  18: Jc02o       	$0xFE7B84C
+
+
+
+. 1527 FE7B7BC 20
+. 83 FE 1B C8 80 7F 00 00 70 69 00 02 7F A0 00 26 40 82 00 80
+==== BB 1528 (0xFE7B7D0) approx BBs exec'd 0 ====
+
+	0xFE7B7D0:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE7B7D4:  419E001C  bc 12,30,0xFE7B7F0
+	   4: Js30o       	$0xFE7B7F0
+
+
+
+. 1528 FE7B7D0 8
+. 2F 9C 00 00 41 9E 00 1C
+==== BB 1529 (0xFE7B7D8) approx BBs exec'd 0 ====
+
+	0xFE7B7D8:  80BB0000  lwz r5,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFE7B7DC:  7F8803A6  mtlr r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, LR
+	   6: INCEIPL       	$4
+
+	0xFE7B7E0:  806101F4  lwz r3,500(r1)
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x1F4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R3
+	  11: INCEIPL       	$4
+
+	0xFE7B7E4:  808101F8  lwz r4,504(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x1F8, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R4
+	  16: INCEIPL       	$4
+
+	0xFE7B7E8:  80C101FC  lwz r6,508(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x1FC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R6
+	  21: INCEIPL       	$4
+
+	0xFE7B7EC:  4E800021  blrl
+	  22: GETL       	LR, t18
+	  23: MOVL       	$0xFE7B7F0, t20
+	  24: PUTL       	t20, LR
+	  25: JMPo-r       	t18  ($4)
+
+
+
+. 1529 FE7B7D8 24
+. 80 BB 00 00 7F 88 03 A6 80 61 01 F4 80 81 01 F8 80 C1 01 FC 4E 80 00 21
+==== BB 1530 (0x10007A24) approx BBs exec'd 0 ====
+
+	0x10007A24:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10007A28:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x10007A2C:  429F0005  bcl 20,31,0x10007A30
+	   9: MOVL       	$0x10007A30, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10007A30:  93C10018  stw r30,24(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x10007A34:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0x10007A38:  93810010  stw r28,16(r1)
+	  20: GETL       	R28, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x10007A3C:  93E1001C  stw r31,28(r1)
+	  25: GETL       	R31, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x10007A40:  93A10014  stw r29,20(r1)
+	  30: GETL       	R29, t22
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x14, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0x10007A44:  809EFFF0  lwz r4,-16(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFFFFF0, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x10007A48:  90A10024  stw r5,36(r1)
+	  40: GETL       	R5, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x10007A4C:  7FC4F214  add r30,r4,r30
+	  45: GETL       	R4, t34
+	  46: GETL       	R30, t36
+	  47: ADDL       	t34, t36
+	  48: PUTL       	t36, R30
+	  49: INCEIPL       	$4
+
+	0x10007A50:  4BFF957D  bl 0x10000FCC
+	  50: MOVL       	$0x10007A54, t38
+	  51: PUTL       	t38, LR
+	  52: JMPo-c       	$0x10000FCC  ($4)
+
+
+
+. 1530 10007A24 48
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 93 A1 00 14 80 9E FF F0 90 A1 00 24 7F C4 F2 14 4B FF 95 7D
+==== BB 1531 (0x10000FCC) approx BBs exec'd 0 ====
+
+	0x10000FCC:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10000FD0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10000FD4:  90010024  stw r0,36(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x24, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x10000FD8:  48000041  bl 0x10001018
+	  14: MOVL       	$0x10000FDC, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x10001018  ($4)
+
+
+
+. 1531 10000FCC 16
+. 94 21 FF E0 7C 08 02 A6 90 01 00 24 48 00 00 41
+==== BB 1532 (0x10001018) approx BBs exec'd 0 ====
+
+	0x10001018:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x1000101C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10001020:  93C10018  stw r30,24(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x10001024:  90010024  stw r0,36(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x24, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x10001028:  48019885  bl 0x1001A8AC
+	  19: MOVL       	$0x1000102C, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x1001A8AC  ($4)
+
+
+
+. 1532 10001018 20
+. 94 21 FF E0 7C 08 02 A6 93 C1 00 18 90 01 00 24 48 01 98 85
+==== BB 1533 (0x1001A8AC) approx BBs exec'd 0 ====
+
+	0x1001A8AC:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0x1001A8B0, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+
+. 1533 1001A8AC 4
+. 4E 80 00 21
+==== BB 1534 (0x1000102C) approx BBs exec'd 0 ====
+
+	0x1000102C:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0x10001030:  801E020C  lwz r0,524(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x20C, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x10001034:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x10001038:  419E000C  bc 12,30,0x10001044
+	  12: Js30o       	$0x10001044
+
+
+
+. 1534 1000102C 16
+. 7F C8 02 A6 80 1E 02 0C 2F 80 00 00 41 9E 00 0C
+==== BB 1535 (0x10001044) approx BBs exec'd 0 ====
+
+	0x10001044:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10001048:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0x1000104C:  83C10018  lwz r30,24(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x10001050:  38210020  addi r1,r1,32
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x20, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0x10001054:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1535 10001044 20
+. 80 01 00 24 7C 08 03 A6 83 C1 00 18 38 21 00 20 4E 80 00 20
+==== BB 1536 (0x10000FDC) approx BBs exec'd 0 ====
+
+	0x10000FDC:  48000105  bl 0x100010E0
+	   0: MOVL       	$0x10000FE0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x100010E0  ($4)
+
+
+
+. 1536 10000FDC 4
+. 48 00 01 05
+==== BB 1537 (0x100010E0) approx BBs exec'd 0 ====
+
+	0x100010E0:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100010E4:  3D601002  lis r11,4098
+	   3: MOVL       	$0x10020000, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x100010E8:  9421FFF0  stwu r1,-16(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFFF0, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x100010EC:  3D200000  lis r9,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x100010F0:  39290000  addi r9,r9,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x100010F4:  386BA010  addi r3,r11,-24560
+	  18: MOVL       	$0x1001A010, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0x100010F8:  90010014  stw r0,20(r1)
+	  21: GETL       	R0, t14
+	  22: GETL       	R1, t16
+	  23: ADDL       	$0x14, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x100010FC:  800BA010  lwz r0,-24560(r11)
+	  26: MOVL       	$0x1001A010, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0x10001100:  2F800000  cmpi cr7,r0,0
+	  30: GETL       	R0, t22
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0x10001104:  419E0014  bc 12,30,0x10001118
+	  34: Js30o       	$0x10001118
+
+
+
+. 1537 100010E0 40
+. 7C 08 02 A6 3D 60 10 02 94 21 FF F0 3D 20 00 00 39 29 00 00 38 6B A0 10 90 01 00 14 80 0B A0 10 2F 80 00 00 41 9E 00 14
+==== BB 1538 (0x10001118) approx BBs exec'd 0 ====
+
+	0x10001118:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x1000111C:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x10001120:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10001124:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 1538 10001118 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 1539 (0x10000FE0) approx BBs exec'd 0 ====
+
+	0x10000FE0:  48006D91  bl 0x10007D70
+	   0: MOVL       	$0x10000FE4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x10007D70  ($4)
+
+
+
+. 1539 10000FE0 4
+. 48 00 6D 91
+==== BB 1540 (0x10007D70) approx BBs exec'd 0 ====
+
+	0x10007D70:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10007D74:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x10007D78:  3D201002  lis r9,4098
+	   9: MOVL       	$0x10020000, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x10007D7C:  93E1000C  stw r31,12(r1)
+	  12: GETL       	R31, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x10007D80:  3929A004  addi r9,r9,-24572
+	  17: MOVL       	$0x1001A004, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x10007D84:  90010014  stw r0,20(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x10007D88:  3BE9FFFC  addi r31,r9,-4
+	  25: MOVL       	$0x1001A000, t18
+	  26: PUTL       	t18, R31
+	  27: INCEIPL       	$4
+
+	0x10007D8C:  8009FFFC  lwz r0,-4(r9)
+	  28: MOVL       	$0x1001A000, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R0
+	  31: INCEIPL       	$4
+
+	0x10007D90:  48000010  b 0x10007DA0
+	  32: JMPo       	$0x10007DA0  ($4)
+
+
+
+. 1540 10007D70 36
+. 7C 08 02 A6 94 21 FF F0 3D 20 10 02 93 E1 00 0C 39 29 A0 04 90 01 00 14 3B E9 FF FC 80 09 FF FC 48 00 00 10
+==== BB 1541 (0x10007DA0) approx BBs exec'd 0 ====
+
+	0x10007DA0:  2F80FFFF  cmpi cr7,r0,-1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10007DA4:  409EFFF0  bc 4,30,0x10007D94
+	   5: Jc30o       	$0x10007D94
+
+
+
+. 1541 10007DA0 8
+. 2F 80 FF FF 40 9E FF F0
+==== BB 1542 (0x10007DA8) approx BBs exec'd 0 ====
+
+	0x10007DA8:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10007DAC:  83E1000C  lwz r31,12(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0x10007DB0:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0x10007DB4:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0x10007DB8:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 1542 10007DA8 20
+. 80 01 00 14 83 E1 00 0C 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 1543 (0x10000FE4) approx BBs exec'd 0 ====
+
+	0x10000FE4:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10000FE8:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0x10000FEC:  38210020  addi r1,r1,32
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x20, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0x10000FF0:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 1543 10000FE4 16
+. 80 01 00 24 7C 08 03 A6 38 21 00 20 4E 80 00 20
+==== BB 1544 (0x10007A54) approx BBs exec'd 0 ====
+
+	0x10007A54:  839E8004  lwz r28,-32764(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8004, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x10007A58:  807E8000  lwz r3,-32768(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0xFFFF8000, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x10007A5C:  3BE00000  li r31,0
+	  10: MOVL       	$0x0, t8
+	  11: PUTL       	t8, R31
+	  12: INCEIPL       	$4
+
+	0x10007A60:  7C1C1850  subf r0,r28,r3
+	  13: GETL       	R28, t10
+	  14: GETL       	R3, t12
+	  15: SUBL       	t10, t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0x10007A64:  7C001670  srawi r0,r0,2
+	  18: GETL       	R0, t14
+	  19: SARL       	$0x2, t14  (-wCa)
+	  20: PUTL       	t14, R0
+	  21: INCEIPL       	$4
+
+	0x10007A68:  7F9F0040  cmpl cr7,r31,r0
+	  22: GETL       	R31, t16
+	  23: GETL       	R0, t18
+	  24: CMPUL       	t16, t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x7, CR
+	  26: INCEIPL       	$4
+
+	0x10007A6C:  409C0024  bc 4,28,0x10007A90
+	  27: Jc28o       	$0x10007A90
+
+
+
+. 1544 10007A54 28
+. 83 9E 80 04 80 7E 80 00 3B E0 00 00 7C 1C 18 50 7C 00 16 70 7F 9F 00 40 40 9C 00 24
+==== BB 1545 (0x10007A90) approx BBs exec'd 0 ====
+
+	0x10007A90:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x10007A94:  83810010  lwz r28,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x10007A98:  83A10014  lwz r29,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x10007A9C:  7D0803A6  mtlr r8
+	  15: GETL       	R8, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x10007AA0:  83C10018  lwz r30,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0x10007AA4:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0x10007AA8:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0x10007AAC:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 1545 10007A90 32
+. 81 01 00 24 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1546 (0xFE7B7F0) approx BBs exec'd 0 ====
+
+	0xFE7B7F0:  7FA80120  mtcrf 0x80,r29
+	   0: GETL       	R29, t0
+	   1: ICRFL       	t0, $0x0, CR
+	   2: INCEIPL       	$4
+
+	0xFE7B7F4:  408200B0  bc 4,2,0xFE7B8A4
+	   3: Jc02o       	$0xFE7B8A4
+
+
+
+. 1546 FE7B7F0 8
+. 7F A8 01 20 40 82 00 B0
+==== BB 1547 (0xFE7B7F8) approx BBs exec'd 0 ====
+
+	0xFE7B7F8:  38610010  addi r3,r1,16
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE7B7FC:  48014F9D  bl 0xFE90798
+	   4: MOVL       	$0xFE7B800, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0xFE90798  ($4)
+
+
+
+. 1547 FE7B7F8 8
+. 38 61 00 10 48 01 4F 9D
+==== BB 1548 __GI__setjmp(0xFE90798) approx BBs exec'd 0 ====
+
+	0xFE90798:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE9079C:  4BFFFC94  b 0xFE90430
+	   3: JMPo       	$0xFE90430  ($4)
+
+
+
+. 1548 FE90798 8
+. 38 80 00 00 4B FF FC 94
+==== BB 1549 __sigsetjmp@@GLIBC_2.3.4(0xFE90430) approx BBs exec'd 0 ====
+
+	0xFE90430:  90230000  stw r1,0(r3)
+	   0: GETL       	R1, t0
+	   1: GETL       	R3, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE90434:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFE90438:  91C3000C  stw r14,12(r3)
+	   7: GETL       	R14, t6
+	   8: GETL       	R3, t8
+	   9: ADDL       	$0xC, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE9043C:  D9C30058  stfd f14,88(r3)
+	  12: GETL       	R3, t10
+	  13: ADDL       	$0x58, t10
+	  14: FPU_WQ       	0x0:0xE, (t10)
+	  15: INCEIPL       	$4
+
+	0xFE90440:  90030008  stw r0,8(r3)
+	  16: GETL       	R0, t12
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x8, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFE90444:  91E30010  stw r15,16(r3)
+	  21: GETL       	R15, t16
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE90448:  D9E30060  stfd f15,96(r3)
+	  26: GETL       	R3, t20
+	  27: ADDL       	$0x60, t20
+	  28: FPU_WQ       	0x0:0xF, (t20)
+	  29: INCEIPL       	$4
+
+	0xFE9044C:  7C000026  mfcr r0
+	  30: GETL       	CR, t22
+	  31: PUTL       	t22, R0
+	  32: INCEIPL       	$4
+
+	0xFE90450:  92030014  stw r16,20(r3)
+	  33: GETL       	R16, t24
+	  34: GETL       	R3, t26
+	  35: ADDL       	$0x14, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0xFE90454:  DA030068  stfd f16,104(r3)
+	  38: GETL       	R3, t28
+	  39: ADDL       	$0x68, t28
+	  40: FPU_WQ       	0x0:0x10, (t28)
+	  41: INCEIPL       	$4
+
+	0xFE90458:  90030054  stw r0,84(r3)
+	  42: GETL       	R0, t30
+	  43: GETL       	R3, t32
+	  44: ADDL       	$0x54, t32
+	  45: STL       	t30, (t32)
+	  46: INCEIPL       	$4
+
+	0xFE9045C:  92230018  stw r17,24(r3)
+	  47: GETL       	R17, t34
+	  48: GETL       	R3, t36
+	  49: ADDL       	$0x18, t36
+	  50: STL       	t34, (t36)
+	  51: INCEIPL       	$4
+
+	0xFE90460:  DA230070  stfd f17,112(r3)
+	  52: GETL       	R3, t38
+	  53: ADDL       	$0x70, t38
+	  54: FPU_WQ       	0x0:0x11, (t38)
+	  55: INCEIPL       	$4
+
+	0xFE90464:  9243001C  stw r18,28(r3)
+	  56: GETL       	R18, t40
+	  57: GETL       	R3, t42
+	  58: ADDL       	$0x1C, t42
+	  59: STL       	t40, (t42)
+	  60: INCEIPL       	$4
+
+	0xFE90468:  DA430078  stfd f18,120(r3)
+	  61: GETL       	R3, t44
+	  62: ADDL       	$0x78, t44
+	  63: FPU_WQ       	0x0:0x12, (t44)
+	  64: INCEIPL       	$4
+
+	0xFE9046C:  92630020  stw r19,32(r3)
+	  65: GETL       	R19, t46
+	  66: GETL       	R3, t48
+	  67: ADDL       	$0x20, t48
+	  68: STL       	t46, (t48)
+	  69: INCEIPL       	$4
+
+	0xFE90470:  DA630080  stfd f19,128(r3)
+	  70: GETL       	R3, t50
+	  71: ADDL       	$0x80, t50
+	  72: FPU_WQ       	0x0:0x13, (t50)
+	  73: INCEIPL       	$4
+
+	0xFE90474:  92830024  stw r20,36(r3)
+	  74: GETL       	R20, t52
+	  75: GETL       	R3, t54
+	  76: ADDL       	$0x24, t54
+	  77: STL       	t52, (t54)
+	  78: INCEIPL       	$4
+
+	0xFE90478:  DA830088  stfd f20,136(r3)
+	  79: GETL       	R3, t56
+	  80: ADDL       	$0x88, t56
+	  81: FPU_WQ       	0x0:0x14, (t56)
+	  82: INCEIPL       	$4
+
+	0xFE9047C:  92A30028  stw r21,40(r3)
+	  83: GETL       	R21, t58
+	  84: GETL       	R3, t60
+	  85: ADDL       	$0x28, t60
+	  86: STL       	t58, (t60)
+	  87: INCEIPL       	$4
+
+	0xFE90480:  DAA30090  stfd f21,144(r3)
+	  88: GETL       	R3, t62
+	  89: ADDL       	$0x90, t62
+	  90: FPU_WQ       	0x0:0x15, (t62)
+	  91: INCEIPL       	$4
+
+	0xFE90484:  92C3002C  stw r22,44(r3)
+	  92: GETL       	R22, t64
+	  93: GETL       	R3, t66
+	  94: ADDL       	$0x2C, t66
+	  95: STL       	t64, (t66)
+	  96: INCEIPL       	$4
+
+	0xFE90488:  DAC30098  stfd f22,152(r3)
+	  97: GETL       	R3, t68
+	  98: ADDL       	$0x98, t68
+	  99: FPU_WQ       	0x0:0x16, (t68)
+	 100: INCEIPL       	$4
+
+	0xFE9048C:  92E30030  stw r23,48(r3)
+	 101: GETL       	R23, t70
+	 102: GETL       	R3, t72
+	 103: ADDL       	$0x30, t72
+	 104: STL       	t70, (t72)
+	 105: INCEIPL       	$4
+
+	0xFE90490:  DAE300A0  stfd f23,160(r3)
+	 106: GETL       	R3, t74
+	 107: ADDL       	$0xA0, t74
+	 108: FPU_WQ       	0x0:0x17, (t74)
+	 109: INCEIPL       	$4
+
+	0xFE90494:  93030034  stw r24,52(r3)
+	 110: GETL       	R24, t76
+	 111: GETL       	R3, t78
+	 112: ADDL       	$0x34, t78
+	 113: STL       	t76, (t78)
+	 114: INCEIPL       	$4
+
+	0xFE90498:  DB0300A8  stfd f24,168(r3)
+	 115: GETL       	R3, t80
+	 116: ADDL       	$0xA8, t80
+	 117: FPU_WQ       	0x0:0x18, (t80)
+	 118: INCEIPL       	$4
+
+	0xFE9049C:  93230038  stw r25,56(r3)
+	 119: GETL       	R25, t82
+	 120: GETL       	R3, t84
+	 121: ADDL       	$0x38, t84
+	 122: STL       	t82, (t84)
+	 123: INCEIPL       	$4
+
+	0xFE904A0:  DB2300B0  stfd f25,176(r3)
+	 124: GETL       	R3, t86
+	 125: ADDL       	$0xB0, t86
+	 126: FPU_WQ       	0x0:0x19, (t86)
+	 127: INCEIPL       	$4
+
+	0xFE904A4:  9343003C  stw r26,60(r3)
+	 128: GETL       	R26, t88
+	 129: GETL       	R3, t90
+	 130: ADDL       	$0x3C, t90
+	 131: STL       	t88, (t90)
+	 132: INCEIPL       	$4
+
+	0xFE904A8:  DB4300B8  stfd f26,184(r3)
+	 133: GETL       	R3, t92
+	 134: ADDL       	$0xB8, t92
+	 135: FPU_WQ       	0x0:0x1A, (t92)
+	 136: INCEIPL       	$4
+
+	0xFE904AC:  93630040  stw r27,64(r3)
+	 137: GETL       	R27, t94
+	 138: GETL       	R3, t96
+	 139: ADDL       	$0x40, t96
+	 140: STL       	t94, (t96)
+	 141: INCEIPL       	$4
+
+	0xFE904B0:  DB6300C0  stfd f27,192(r3)
+	 142: GETL       	R3, t98
+	 143: ADDL       	$0xC0, t98
+	 144: FPU_WQ       	0x0:0x1B, (t98)
+	 145: INCEIPL       	$4
+
+	0xFE904B4:  93830044  stw r28,68(r3)
+	 146: GETL       	R28, t100
+	 147: GETL       	R3, t102
+	 148: ADDL       	$0x44, t102
+	 149: STL       	t100, (t102)
+	 150: INCEIPL       	$4
+
+	0xFE904B8:  DB8300C8  stfd f28,200(r3)
+	 151: GETL       	R3, t104
+	 152: ADDL       	$0xC8, t104
+	 153: FPU_WQ       	0x0:0x1C, (t104)
+	 154: INCEIPL       	$4
+
+	0xFE904BC:  93A30048  stw r29,72(r3)
+	 155: GETL       	R29, t106
+	 156: GETL       	R3, t108
+	 157: ADDL       	$0x48, t108
+	 158: STL       	t106, (t108)
+	 159: INCEIPL       	$4
+
+	0xFE904C0:  DBA300D0  stfd f29,208(r3)
+	 160: GETL       	R3, t110
+	 161: ADDL       	$0xD0, t110
+	 162: FPU_WQ       	0x0:0x1D, (t110)
+	 163: INCEIPL       	$4
+
+	0xFE904C4:  93C3004C  stw r30,76(r3)
+	 164: GETL       	R30, t112
+	 165: GETL       	R3, t114
+	 166: ADDL       	$0x4C, t114
+	 167: STL       	t112, (t114)
+	 168: INCEIPL       	$4
+
+	0xFE904C8:  DBC300D8  stfd f30,216(r3)
+	 169: GETL       	R3, t116
+	 170: ADDL       	$0xD8, t116
+	 171: FPU_WQ       	0x0:0x1E, (t116)
+	 172: INCEIPL       	$4
+
+	0xFE904CC:  93E30050  stw r31,80(r3)
+	 173: GETL       	R31, t118
+	 174: GETL       	R3, t120
+	 175: ADDL       	$0x50, t120
+	 176: STL       	t118, (t120)
+	 177: INCEIPL       	$4
+
+	0xFE904D0:  DBE300E0  stfd f31,224(r3)
+	 178: GETL       	R3, t122
+	 179: ADDL       	$0xE0, t122
+	 180: FPU_WQ       	0x0:0x1F, (t122)
+	 181: INCEIPL       	$4
+
+	0xFE904D4:  7CC802A6  mflr r6
+	 182: GETL       	LR, t124
+	 183: PUTL       	t124, R6
+	 184: INCEIPL       	$4
+
+	0xFE904D8:  48117979  bl 0xFFA7E50
+	 185: MOVL       	$0xFE904DC, t126
+	 186: PUTL       	t126, LR
+	 187: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1549 FE90430 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 11 79 79
+==== BB 1550 (0xFE904DC) approx BBs exec'd 0 ====
+
+	0xFE904DC:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE904E0:  80A51BC8  lwz r5,7112(r5)
+	   3: GETL       	R5, t2
+	   4: ADDL       	$0x1BC8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFE904E4:  7CC803A6  mtlr r6
+	   8: GETL       	R6, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFE904E8:  80A5003C  lwz r5,60(r5)
+	  11: GETL       	R5, t8
+	  12: ADDL       	$0x3C, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0xFE904EC:  74A51000  andis. r5,r5,0x1000
+	  16: GETL       	R5, t12
+	  17: ANDL       	$0x10000000, t12
+	  18: PUTL       	t12, R5
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0xFE904F0:  41820158  bc 12,2,0xFE90648
+	  22: Js02o       	$0xFE90648
+
+
+
+. 1550 FE904DC 24
+. 7C A8 02 A6 80 A5 1B C8 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+==== BB 1551 (0xFE904F4) approx BBs exec'd 0 ====
+
+	0xFE904F4:  38A30100  addi r5,r3,256
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x100, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0xFE904F8:  70A6000F  andi. r6,r5,0xF
+	   4: GETL       	R5, t2
+	   5: ANDL       	$0xF, t2
+	   6: PUTL       	t2, R6
+	   7: CMP0L       	t2, t4  (-rSo)
+	   8: ICRFL       	t4, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE904FC:  7C0042A6  	  10: VEC_TRL       	2:0x0, t6
+	  11: PUTL       	t6, R0
+	  12: INCEIPL       	$4
+
+	0xFE90500:  900300F8  stw r0,248(r3)
+	  13: GETL       	R0, t8
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0xF8, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0xFE90504:  38C50010  addi r6,r5,16
+	  18: GETL       	R5, t12
+	  19: ADDL       	$0x10, t12
+	  20: PUTL       	t12, R6
+	  21: INCEIPL       	$4
+
+	0xFE90508:  41A200E8  bc 13,2,0xFE905F0
+	  22: Js02o       	$0xFE905F0
+
+
+
+. 1551 FE904F4 24
+. 38 A3 01 00 70 A6 00 0F 7C 00 42 A6 90 03 00 F8 38 C5 00 10 41 A2 00 E8
+==== BB 1552 (0xFE905F0) approx BBs exec'd 0 ====
+
+	0xFE905F0:  7E8029CE  stvx vr20,r0,r5
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0xFFFFFFF0, t0
+	   2: VEC_WQQ       	0x0:0x14, (t0)
+	   3: INCEIPL       	$4
+
+	0xFE905F4:  38A50020  addi r5,r5,32
+	   4: GETL       	R5, t2
+	   5: ADDL       	$0x20, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0xFE905F8:  7EA031CE  stvx vr21,r0,r6
+	   8: GETL       	R6, t4
+	   9: ANDL       	$0xFFFFFFF0, t4
+	  10: VEC_WQQ       	0x0:0x15, (t4)
+	  11: INCEIPL       	$4
+
+	0xFE905FC:  38C60020  addi r6,r6,32
+	  12: GETL       	R6, t6
+	  13: ADDL       	$0x20, t6
+	  14: PUTL       	t6, R6
+	  15: INCEIPL       	$4
+
+	0xFE90600:  7EC029CE  stvx vr22,r0,r5
+	  16: GETL       	R5, t8
+	  17: ANDL       	$0xFFFFFFF0, t8
+	  18: VEC_WQQ       	0x0:0x16, (t8)
+	  19: INCEIPL       	$4
+
+	0xFE90604:  38A50020  addi r5,r5,32
+	  20: GETL       	R5, t10
+	  21: ADDL       	$0x20, t10
+	  22: PUTL       	t10, R5
+	  23: INCEIPL       	$4
+
+	0xFE90608:  7EE031CE  stvx vr23,r0,r6
+	  24: GETL       	R6, t12
+	  25: ANDL       	$0xFFFFFFF0, t12
+	  26: VEC_WQQ       	0x0:0x17, (t12)
+	  27: INCEIPL       	$4
+
+	0xFE9060C:  38C60020  addi r6,r6,32
+	  28: GETL       	R6, t14
+	  29: ADDL       	$0x20, t14
+	  30: PUTL       	t14, R6
+	  31: INCEIPL       	$4
+
+	0xFE90610:  7F0029CE  stvx vr24,r0,r5
+	  32: GETL       	R5, t16
+	  33: ANDL       	$0xFFFFFFF0, t16
+	  34: VEC_WQQ       	0x0:0x18, (t16)
+	  35: INCEIPL       	$4
+
+	0xFE90614:  38A50020  addi r5,r5,32
+	  36: GETL       	R5, t18
+	  37: ADDL       	$0x20, t18
+	  38: PUTL       	t18, R5
+	  39: INCEIPL       	$4
+
+	0xFE90618:  7F2031CE  stvx vr25,r0,r6
+	  40: GETL       	R6, t20
+	  41: ANDL       	$0xFFFFFFF0, t20
+	  42: VEC_WQQ       	0x0:0x19, (t20)
+	  43: INCEIPL       	$4
+
+	0xFE9061C:  38C60020  addi r6,r6,32
+	  44: GETL       	R6, t22
+	  45: ADDL       	$0x20, t22
+	  46: PUTL       	t22, R6
+	  47: INCEIPL       	$4
+
+	0xFE90620:  7F4029CE  stvx vr26,r0,r5
+	  48: GETL       	R5, t24
+	  49: ANDL       	$0xFFFFFFF0, t24
+	  50: VEC_WQQ       	0x0:0x1A, (t24)
+	  51: INCEIPL       	$4
+
+	0xFE90624:  38A50020  addi r5,r5,32
+	  52: GETL       	R5, t26
+	  53: ADDL       	$0x20, t26
+	  54: PUTL       	t26, R5
+	  55: INCEIPL       	$4
+
+	0xFE90628:  7F6031CE  stvx vr27,r0,r6
+	  56: GETL       	R6, t28
+	  57: ANDL       	$0xFFFFFFF0, t28
+	  58: VEC_WQQ       	0x0:0x1B, (t28)
+	  59: INCEIPL       	$4
+
+	0xFE9062C:  38C60020  addi r6,r6,32
+	  60: GETL       	R6, t30
+	  61: ADDL       	$0x20, t30
+	  62: PUTL       	t30, R6
+	  63: INCEIPL       	$4
+
+	0xFE90630:  7F8029CE  stvx vr28,r0,r5
+	  64: GETL       	R5, t32
+	  65: ANDL       	$0xFFFFFFF0, t32
+	  66: VEC_WQQ       	0x0:0x1C, (t32)
+	  67: INCEIPL       	$4
+
+	0xFE90634:  38A50020  addi r5,r5,32
+	  68: GETL       	R5, t34
+	  69: ADDL       	$0x20, t34
+	  70: PUTL       	t34, R5
+	  71: INCEIPL       	$4
+
+	0xFE90638:  7FA031CE  stvx vr29,r0,r6
+	  72: GETL       	R6, t36
+	  73: ANDL       	$0xFFFFFFF0, t36
+	  74: VEC_WQQ       	0x0:0x1D, (t36)
+	  75: INCEIPL       	$4
+
+	0xFE9063C:  38C60020  addi r6,r6,32
+	  76: GETL       	R6, t38
+	  77: ADDL       	$0x20, t38
+	  78: PUTL       	t38, R6
+	  79: INCEIPL       	$4
+
+	0xFE90640:  7FC029CE  stvx vr30,r0,r5
+	  80: GETL       	R5, t40
+	  81: ANDL       	$0xFFFFFFF0, t40
+	  82: VEC_WQQ       	0x0:0x1E, (t40)
+	  83: INCEIPL       	$4
+
+	0xFE90644:  7FE031CE  stvx vr31,r0,r6
+	  84: GETL       	R6, t42
+	  85: ANDL       	$0xFFFFFFF0, t42
+	  86: VEC_WQQ       	0x0:0x1F, (t42)
+	  87: INCEIPL       	$4
+
+	0xFE90648:  480000AC  b 0xFE906F4
+	  88: JMPo       	$0xFE906F4  ($4)
+
+
+
+. 1552 FE905F0 92
+. 7E 80 29 CE 38 A5 00 20 7E A0 31 CE 38 C6 00 20 7E C0 29 CE 38 A5 00 20 7E E0 31 CE 38 C6 00 20 7F 00 29 CE 38 A5 00 20 7F 20 31 CE 38 C6 00 20 7F 40 29 CE 38 A5 00 20 7F 60 31 CE 38 C6 00 20 7F 80 29 CE 38 A5 00 20 7F A0 31 CE 38 C6 00 20 7F C0 29 CE 7F E0 31 CE 48 00 00 AC
+==== BB 1553 __vmx__sigjmp_save(0xFE906F4) approx BBs exec'd 0 ====
+
+	0xFE906F4:  2F840000  cmpi cr7,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE906F8:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFE906FC:  9421FFE0  stwu r1,-32(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFE0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE90700:  38A301C4  addi r5,r3,452
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x1C4, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFE90704:  93A10014  stw r29,20(r1)
+	  17: GETL       	R29, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFE90708:  38800000  li r4,0
+	  22: MOVL       	$0x0, t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0xFE9070C:  93E1001C  stw r31,28(r1)
+	  25: GETL       	R31, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFE90710:  3BA00000  li r29,0
+	  30: MOVL       	$0x0, t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0xFE90714:  7C7F1B78  or r31,r3,r3
+	  33: GETL       	R3, t24
+	  34: PUTL       	t24, R31
+	  35: INCEIPL       	$4
+
+	0xFE90718:  93C10018  stw r30,24(r1)
+	  36: GETL       	R30, t26
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x18, t28
+	  39: STL       	t26, (t28)
+	  40: INCEIPL       	$4
+
+	0xFE9071C:  90010024  stw r0,36(r1)
+	  41: GETL       	R0, t30
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x24, t32
+	  44: STL       	t30, (t32)
+	  45: INCEIPL       	$4
+
+	0xFE90720:  38600000  li r3,0
+	  46: MOVL       	$0x0, t34
+	  47: PUTL       	t34, R3
+	  48: INCEIPL       	$4
+
+	0xFE90724:  409E0028  bc 4,30,0xFE9074C
+	  49: Jc30o       	$0xFE9074C
+
+
+
+. 1553 FE906F4 52
+. 2F 84 00 00 7C 08 02 A6 94 21 FF E0 38 A3 01 C4 93 A1 00 14 38 80 00 00 93 E1 00 1C 3B A0 00 00 7C 7F 1B 78 93 C1 00 18 90 01 00 24 38 60 00 00 40 9E 00 28
+==== BB 1554 (0xFE90728) approx BBs exec'd 0 ====
+
+	0xFE90728:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE9072C:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE90730:  93BF01C0  stw r29,448(r31)
+	   8: GETL       	R29, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x1C0, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE90734:  83A10014  lwz r29,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFE90738:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFE9073C:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0xFE90740:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFE90744:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFE90748:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+
+. 1554 FE90728 36
+. 80 81 00 24 38 60 00 00 93 BF 01 C0 83 A1 00 14 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1555 (0xFE7B800) approx BBs exec'd 0 ====
+
+	0xFE7B800:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE7B804:  40860068  bc 4,6,0xFE7B86C
+	   4: Jc06o       	$0xFE7B86C
+
+
+
+. 1555 FE7B800 8
+. 2C 83 00 00 40 86 00 68
+==== BB 1556 (0xFE7B808) approx BBs exec'd 0 ====
+
+	0xFE7B808:  83A28BF4  lwz r29,-29708(r2)
+	   0: GETL       	R2, t0
+	   1: ADDL       	$0xFFFF8BF4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFE7B80C:  3B610010  addi r27,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R27
+	   8: INCEIPL       	$4
+
+	0xFE7B810:  819E1B84  lwz r12,7044(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x1B84, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0xFE7B814:  93A101E0  stw r29,480(r1)
+	  14: GETL       	R29, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x1E0, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFE7B818:  83828BF0  lwz r28,-29712(r2)
+	  19: GETL       	R2, t14
+	  20: ADDL       	$0xFFFF8BF0, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0xFE7B81C:  810101F0  lwz r8,496(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x1F0, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R8
+	  28: INCEIPL       	$4
+
+	0xFE7B820:  938101E4  stw r28,484(r1)
+	  29: GETL       	R28, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1E4, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE7B824:  806101F4  lwz r3,500(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x1F4, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R3
+	  38: INCEIPL       	$4
+
+	0xFE7B828:  7D0803A6  mtlr r8
+	  39: GETL       	R8, t30
+	  40: PUTL       	t30, LR
+	  41: INCEIPL       	$4
+
+	0xFE7B82C:  93628BF4  stw r27,-29708(r2)
+	  42: GETL       	R27, t32
+	  43: GETL       	R2, t34
+	  44: ADDL       	$0xFFFF8BF4, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0xFE7B830:  808101F8  lwz r4,504(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x1F8, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R4
+	  51: INCEIPL       	$4
+
+	0xFE7B834:  80AC0000  lwz r5,0(r12)
+	  52: GETL       	R12, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R5
+	  55: INCEIPL       	$4
+
+	0xFE7B838:  80C101FC  lwz r6,508(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x1FC, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R6
+	  60: INCEIPL       	$4
+
+	0xFE7B83C:  4E800021  blrl
+	  61: GETL       	LR, t48
+	  62: MOVL       	$0xFE7B840, t50
+	  63: PUTL       	t50, LR
+	  64: JMPo-r       	t48  ($4)
+
+
+
+. 1556 FE7B808 56
+. 83 A2 8B F4 3B 61 00 10 81 9E 1B 84 93 A1 01 E0 83 82 8B F0 81 01 01 F0 93 81 01 E4 80 61 01 F4 7D 08 03 A6 93 62 8B F4 80 81 01 F8 80 AC 00 00 80 C1 01 FC 4E 80 00 21
+==== BB 1557 (0x1000156C) approx BBs exec'd 0 ====
+
+	0x1000156C:  9421FF10  stwu r1,-240(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF10, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10001570:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10001574:  48019339  bl 0x1001A8AC
+	   9: MOVL       	$0x10001578, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x1001A8AC  ($4)
+
+
+
+. 1557 1000156C 12
+. 94 21 FF 10 7C 08 02 A6 48 01 93 39
+==== BB 1558 (0x10001578) approx BBs exec'd 0 ====
+
+	0x10001578:  93C100E8  stw r30,232(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xE8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x1000157C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x10001580:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x10001584:  93E100EC  stw r31,236(r1)
+	  11: GETL       	R31, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0xEC, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x10001588:  7C9F2378  or r31,r4,r4
+	  16: GETL       	R4, t12
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0x1000158C:  924100B8  stw r18,184(r1)
+	  19: GETL       	R18, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0xB8, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x10001590:  3A400000  li r18,0
+	  24: MOVL       	$0x0, t18
+	  25: PUTL       	t18, R18
+	  26: INCEIPL       	$4
+
+	0x10001594:  926100BC  stw r19,188(r1)
+	  27: GETL       	R19, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0xBC, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x10001598:  3A600000  li r19,0
+	  32: MOVL       	$0x0, t24
+	  33: PUTL       	t24, R19
+	  34: INCEIPL       	$4
+
+	0x1000159C:  928100C0  stw r20,192(r1)
+	  35: GETL       	R20, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0xC0, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x100015A0:  3A800000  li r20,0
+	  40: MOVL       	$0x0, t30
+	  41: PUTL       	t30, R20
+	  42: INCEIPL       	$4
+
+	0x100015A4:  92A100C4  stw r21,196(r1)
+	  43: GETL       	R21, t32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0xC4, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0x100015A8:  3AA00000  li r21,0
+	  48: MOVL       	$0x0, t36
+	  49: PUTL       	t36, R21
+	  50: INCEIPL       	$4
+
+	0x100015AC:  92C100C8  stw r22,200(r1)
+	  51: GETL       	R22, t38
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0xC8, t40
+	  54: STL       	t38, (t40)
+	  55: INCEIPL       	$4
+
+	0x100015B0:  3AC00000  li r22,0
+	  56: MOVL       	$0x0, t42
+	  57: PUTL       	t42, R22
+	  58: INCEIPL       	$4
+
+	0x100015B4:  93A100E4  stw r29,228(r1)
+	  59: GETL       	R29, t44
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0xE4, t46
+	  62: STL       	t44, (t46)
+	  63: INCEIPL       	$4
+
+	0x100015B8:  7C7D1B78  or r29,r3,r3
+	  64: GETL       	R3, t48
+	  65: PUTL       	t48, R29
+	  66: INCEIPL       	$4
+
+	0x100015BC:  900100F4  stw r0,244(r1)
+	  67: GETL       	R0, t50
+	  68: GETL       	R1, t52
+	  69: ADDL       	$0xF4, t52
+	  70: STL       	t50, (t52)
+	  71: INCEIPL       	$4
+
+	0x100015C0:  920100B0  stw r16,176(r1)
+	  72: GETL       	R16, t54
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0xB0, t56
+	  75: STL       	t54, (t56)
+	  76: INCEIPL       	$4
+
+	0x100015C4:  922100B4  stw r17,180(r1)
+	  77: GETL       	R17, t58
+	  78: GETL       	R1, t60
+	  79: ADDL       	$0xB4, t60
+	  80: STL       	t58, (t60)
+	  81: INCEIPL       	$4
+
+	0x100015C8:  92E100CC  stw r23,204(r1)
+	  82: GETL       	R23, t62
+	  83: GETL       	R1, t64
+	  84: ADDL       	$0xCC, t64
+	  85: STL       	t62, (t64)
+	  86: INCEIPL       	$4
+
+	0x100015CC:  930100D0  stw r24,208(r1)
+	  87: GETL       	R24, t66
+	  88: GETL       	R1, t68
+	  89: ADDL       	$0xD0, t68
+	  90: STL       	t66, (t68)
+	  91: INCEIPL       	$4
+
+	0x100015D0:  932100D4  stw r25,212(r1)
+	  92: GETL       	R25, t70
+	  93: GETL       	R1, t72
+	  94: ADDL       	$0xD4, t72
+	  95: STL       	t70, (t72)
+	  96: INCEIPL       	$4
+
+	0x100015D4:  934100D8  stw r26,216(r1)
+	  97: GETL       	R26, t74
+	  98: GETL       	R1, t76
+	  99: ADDL       	$0xD8, t76
+	 100: STL       	t74, (t76)
+	 101: INCEIPL       	$4
+
+	0x100015D8:  936100DC  stw r27,220(r1)
+	 102: GETL       	R27, t78
+	 103: GETL       	R1, t80
+	 104: ADDL       	$0xDC, t80
+	 105: STL       	t78, (t80)
+	 106: INCEIPL       	$4
+
+	0x100015DC:  938100E0  stw r28,224(r1)
+	 107: GETL       	R28, t82
+	 108: GETL       	R1, t84
+	 109: ADDL       	$0xE0, t84
+	 110: STL       	t82, (t84)
+	 111: INCEIPL       	$4
+
+	0x100015E0:  918100AC  stw r12,172(r1)
+	 112: GETL       	R12, t86
+	 113: GETL       	R1, t88
+	 114: ADDL       	$0xAC, t88
+	 115: STL       	t86, (t88)
+	 116: INCEIPL       	$4
+
+	0x100015E4:  48001FF5  bl 0x100035D8
+	 117: MOVL       	$0x100015E8, t90
+	 118: PUTL       	t90, LR
+	 119: JMPo-c       	$0x100035D8  ($4)
+
+
+
+. 1558 10001578 112
+. 93 C1 00 E8 7F C8 02 A6 7D 80 00 26 93 E1 00 EC 7C 9F 23 78 92 41 00 B8 3A 40 00 00 92 61 00 BC 3A 60 00 00 92 81 00 C0 3A 80 00 00 92 A1 00 C4 3A A0 00 00 92 C1 00 C8 3A C0 00 00 93 A1 00 E4 7C 7D 1B 78 90 01 00 F4 92 01 00 B0 92 21 00 B4 92 E1 00 CC 93 01 00 D0 93 21 00 D4 93 41 00 D8 93 61 00 DC 93 81 00 E0 91 81 00 AC 48 00 1F F5
+==== BB 1559 (0x100035D8) approx BBs exec'd 0 ====
+
+	0x100035D8:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x100035DC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x100035E0:  480172CD  bl 0x1001A8AC
+	   9: MOVL       	$0x100035E4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x1001A8AC  ($4)
+
+
+
+. 1559 100035D8 12
+. 94 21 FF E0 7C 08 02 A6 48 01 72 CD
+==== BB 1560 (0x100035E4) approx BBs exec'd 0 ====
+
+	0x100035E4:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x100035E8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x100035EC:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x100035F0:  3FE00003  lis r31,3
+	  13: MOVL       	$0x30000, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0x100035F4:  90010024  stw r0,36(r1)
+	  16: GETL       	R0, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x24, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x100035F8:  807E0128  lwz r3,296(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x128, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R3
+	  25: INCEIPL       	$4
+
+	0x100035FC:  48017591  bl 0x1001AB8C
+	  26: MOVL       	$0x10003600, t20
+	  27: PUTL       	t20, LR
+	  28: JMPo-c       	$0x1001AB8C  ($4)
+
+
+
+. 1560 100035E4 28
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 3F E0 00 03 90 01 00 24 80 7E 01 28 48 01 75 91
+==== BB 1561 (0x1001AB8C) approx BBs exec'd 0 ====
+
+	0x1001AB8C:  3960002C  li r11,44
+	   0: MOVL       	$0x2C, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AB90:  4BFFFF7C  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 1561 1001AB8C 8
+. 39 60 00 2C 4B FF FF 7C
+==== BB 1562 getenv(0xFE93198) approx BBs exec'd 0 ====
+
+	0xFE93198:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE9319C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE931A0:  48114CB1  bl 0xFFA7E50
+	   9: MOVL       	$0xFE931A4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1562 FE93198 12
+. 94 21 FF E0 7C 08 02 A6 48 11 4C B1
+==== BB 1563 (0xFE931A4) approx BBs exec'd 0 ====
+
+	0xFE931A4:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE931A8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE931AC:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE931B0:  93410008  stw r26,8(r1)
+	  13: GETL       	R26, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x8, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE931B4:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0xFE931B8:  9361000C  stw r27,12(r1)
+	  21: GETL       	R27, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE931BC:  93810010  stw r28,16(r1)
+	  26: GETL       	R28, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x10, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFE931C0:  93A10014  stw r29,20(r1)
+	  31: GETL       	R29, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x14, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFE931C4:  90010024  stw r0,36(r1)
+	  36: GETL       	R0, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x24, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFE931C8:  48043911  bl 0xFED6AD8
+	  41: MOVL       	$0xFE931CC, t32
+	  42: PUTL       	t32, LR
+	  43: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 1563 FE931A4 40
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 41 00 08 7C 7F 1B 78 93 61 00 0C 93 81 00 10 93 A1 00 14 90 01 00 24 48 04 39 11
+==== BB 1564 strlen(0xFED6AD8) approx BBs exec'd 0 ====
+
+	0xFED6AD8:  5464003A  rlwinm r4,r3,0,0,29
+	   0: GETL       	R3, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFED6ADC:  3CE07F7F  lis r7,32639
+	   4: MOVL       	$0x7F7F0000, t2
+	   5: PUTL       	t2, R7
+	   6: INCEIPL       	$4
+
+	0xFED6AE0:  54651EF8  rlwinm r5,r3,3,27,28
+	   7: GETL       	R3, t4
+	   8: ROLL       	$0x3, t4
+	   9: ANDL       	$0x18, t4
+	  10: PUTL       	t4, R5
+	  11: INCEIPL       	$4
+
+	0xFED6AE4:  81040000  lwz r8,0(r4)
+	  12: GETL       	R4, t6
+	  13: LDL       	(t6), t8
+	  14: PUTL       	t8, R8
+	  15: INCEIPL       	$4
+
+	0xFED6AE8:  3920FFFF  li r9,-1
+	  16: MOVL       	$0xFFFFFFFF, t10
+	  17: PUTL       	t10, R9
+	  18: INCEIPL       	$4
+
+	0xFED6AEC:  38E77F7F  addi r7,r7,32639
+	  19: MOVL       	$0x7F7F7F7F, t12
+	  20: PUTL       	t12, R7
+	  21: INCEIPL       	$4
+
+	0xFED6AF0:  7D292C30  srw r9,r9,r5
+	  22: GETL       	R9, t16
+	  23: GETL       	R5, t14
+	  24: SHRL       	t14, t16
+	  25: PUTL       	t16, R9
+	  26: INCEIPL       	$4
+
+	0xFED6AF4:  7CE04038  and r0,r7,r8
+	  27: GETL       	R7, t18
+	  28: GETL       	R8, t20
+	  29: ANDL       	t18, t20
+	  30: PUTL       	t20, R0
+	  31: INCEIPL       	$4
+
+	0xFED6AF8:  7CEA4378  or r10,r7,r8
+	  32: GETL       	R7, t22
+	  33: GETL       	R8, t24
+	  34: ORL       	t24, t22
+	  35: PUTL       	t22, R10
+	  36: INCEIPL       	$4
+
+	0xFED6AFC:  7C003A14  add r0,r0,r7
+	  37: GETL       	R0, t26
+	  38: GETL       	R7, t28
+	  39: ADDL       	t26, t28
+	  40: PUTL       	t28, R0
+	  41: INCEIPL       	$4
+
+	0xFED6B00:  7D4000F8  nor r0,r10,r0
+	  42: GETL       	R10, t30
+	  43: GETL       	R0, t32
+	  44: ORL       	t32, t30
+	  45: NOTL       	t30
+	  46: PUTL       	t30, R0
+	  47: INCEIPL       	$4
+
+	0xFED6B04:  7C084839  and. r8,r0,r9
+	  48: GETL       	R0, t34
+	  49: GETL       	R9, t36
+	  50: ANDL       	t34, t36
+	  51: PUTL       	t36, R8
+	  52: CMP0L       	t36, t38  (-rSo)
+	  53: ICRFL       	t38, $0x0, CR
+	  54: INCEIPL       	$4
+
+	0xFED6B08:  7C601120  mtcrf 0x1,r3
+	  55: GETL       	R3, t40
+	  56: ICRFL       	t40, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0xFED6B0C:  40820070  bc 4,2,0xFED6B7C
+	  58: Jc02o       	$0xFED6B7C
+
+
+
+. 1564 FED6AD8 56
+. 54 64 00 3A 3C E0 7F 7F 54 65 1E F8 81 04 00 00 39 20 FF FF 38 E7 7F 7F 7D 29 2C 30 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 40 00 F8 7C 08 48 39 7C 60 11 20 40 82 00 70
+==== BB 1565 (0xFED6B10) approx BBs exec'd 0 ====
+
+	0xFED6B10:  3CC0FEFF  lis r6,-257
+	   0: MOVL       	$0xFEFF0000, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFED6B14:  38C6FEFF  addi r6,r6,-257
+	   3: MOVL       	$0xFEFEFEFF, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED6B18:  419D001C  bc 12,29,0xFED6B34
+	   6: Js29o       	$0xFED6B34
+
+
+
+. 1565 FED6B10 12
+. 3C C0 FE FF 38 C6 FE FF 41 9D 00 1C
+==== BB 1566 (0xFED6B34) approx BBs exec'd 0 ====
+
+	0xFED6B34:  81040004  lwz r8,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFED6B38:  85240008  lwzu r9,8(r4)
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFED6B3C:  7C064214  add r0,r6,r8
+	  11: GETL       	R6, t8
+	  12: GETL       	R8, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: INCEIPL       	$4
+
+	0xFED6B40:  7CEA40F8  nor r10,r7,r8
+	  16: GETL       	R7, t12
+	  17: GETL       	R8, t14
+	  18: ORL       	t14, t12
+	  19: NOTL       	t12
+	  20: PUTL       	t12, R10
+	  21: INCEIPL       	$4
+
+	0xFED6B44:  7C005039  and. r0,r0,r10
+	  22: GETL       	R0, t16
+	  23: GETL       	R10, t18
+	  24: ANDL       	t16, t18
+	  25: PUTL       	t18, R0
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0xFED6B48:  7D664A14  add r11,r6,r9
+	  29: GETL       	R6, t22
+	  30: GETL       	R9, t24
+	  31: ADDL       	t22, t24
+	  32: PUTL       	t24, R11
+	  33: INCEIPL       	$4
+
+	0xFED6B4C:  7CEC48F8  nor r12,r7,r9
+	  34: GETL       	R7, t26
+	  35: GETL       	R9, t28
+	  36: ORL       	t28, t26
+	  37: NOTL       	t26
+	  38: PUTL       	t26, R12
+	  39: INCEIPL       	$4
+
+	0xFED6B50:  4082001C  bc 4,2,0xFED6B6C
+	  40: Jc02o       	$0xFED6B6C
+
+
+
+. 1566 FED6B34 32
+. 81 04 00 04 85 24 00 08 7C 06 42 14 7C EA 40 F8 7C 00 50 39 7D 66 4A 14 7C EC 48 F8 40 82 00 1C
+==== BB 1567 (0xFED6B54) approx BBs exec'd 0 ====
+
+	0xFED6B54:  7D606039  and. r0,r11,r12
+	   0: GETL       	R11, t0
+	   1: GETL       	R12, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED6B58:  4182FFDC  bc 12,2,0xFED6B34
+	   7: Js02o       	$0xFED6B34
+
+
+
+. 1567 FED6B54 8
+. 7D 60 60 39 41 82 FF DC
+==== BB 1568 (0xFED6B6C) approx BBs exec'd 0 ====
+
+	0xFED6B6C:  7CE04038  and r0,r7,r8
+	   0: GETL       	R7, t0
+	   1: GETL       	R8, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED6B70:  3884FFFC  addi r4,r4,-4
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0xFFFFFFFC, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFED6B74:  7C003A14  add r0,r0,r7
+	   9: GETL       	R0, t6
+	  10: GETL       	R7, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0xFED6B78:  7D480078  andc r8,r10,r0
+	  14: GETL       	R10, t10
+	  15: GETL       	R0, t12
+	  16: NOTL       	t12
+	  17: ANDL       	t10, t12
+	  18: PUTL       	t12, R8
+	  19: INCEIPL       	$4
+
+	0xFED6B7C:  7D0B0034  cntlzw r11,r8
+	  20: GETL       	R8, t14
+	  21: CNTLZL       	t14
+	  22: PUTL       	t14, R11
+	  23: INCEIPL       	$4
+
+	0xFED6B80:  7C032050  subf r0,r3,r4
+	  24: GETL       	R3, t16
+	  25: GETL       	R4, t18
+	  26: SUBL       	t16, t18
+	  27: PUTL       	t18, R0
+	  28: INCEIPL       	$4
+
+	0xFED6B84:  556BE8FE  rlwinm r11,r11,29,3,31
+	  29: GETL       	R11, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R11
+	  32: INCEIPL       	$4
+
+	0xFED6B88:  7C605A14  add r3,r0,r11
+	  33: GETL       	R0, t22
+	  34: GETL       	R11, t24
+	  35: ADDL       	t22, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0xFED6B8C:  4E800020  blr
+	  38: GETL       	LR, t26
+	  39: JMPo-r       	t26  ($4)
+
+
+
+. 1568 FED6B6C 36
+. 7C E0 40 38 38 84 FF FC 7C 00 3A 14 7D 48 00 78 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+==== BB 1569 (0xFE931CC) approx BBs exec'd 0 ====
+
+	0xFE931CC:  813E1B84  lwz r9,7044(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE931D0:  81290000  lwz r9,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFE931D4:  2F890000  cmpi cr7,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFE931D8:  419E0010  bc 12,30,0xFE931E8
+	  13: Js30o       	$0xFE931E8
+
+
+
+. 1569 FE931CC 16
+. 81 3E 1B 84 81 29 00 00 2F 89 00 00 41 9E 00 10
+==== BB 1570 (0xFE931DC) approx BBs exec'd 0 ====
+
+	0xFE931DC:  897F0000  lbz r11,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFE931E0:  2C0B0000  cmpi cr0,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE931E4:  40820030  bc 4,2,0xFE93214
+	   8: Jc02o       	$0xFE93214
+
+
+
+. 1570 FE931DC 12
+. 89 7F 00 00 2C 0B 00 00 40 82 00 30
+==== BB 1571 (0xFE93214) approx BBs exec'd 0 ====
+
+	0xFE93214:  881F0001  lbz r0,1(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE93218:  2C800000  cmpi cr1,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFE9321C:  40860048  bc 4,6,0xFE93264
+	   9: Jc06o       	$0xFE93264
+
+
+
+. 1571 FE93214 12
+. 88 1F 00 01 2C 80 00 00 40 86 00 48
+==== BB 1572 (0xFE93264) approx BBs exec'd 0 ====
+
+	0xFE93264:  81490000  lwz r10,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE93268:  5408402E  rlwinm r8,r0,8,0,23
+	   4: GETL       	R0, t4
+	   5: SHLL       	$0x8, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0xFE9326C:  556C063E  rlwinm r12,r11,0,24,31
+	   8: GETL       	R11, t6
+	   9: ANDL       	$0xFF, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xFE93270:  7D3D4B78  or r29,r9,r9
+	  12: GETL       	R9, t8
+	  13: PUTL       	t8, R29
+	  14: INCEIPL       	$4
+
+	0xFE93274:  2C8A0000  cmpi cr1,r10,0
+	  15: GETL       	R10, t10
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x1, CR
+	  18: INCEIPL       	$4
+
+	0xFE93278:  7D9A4378  or r26,r12,r8
+	  19: GETL       	R12, t14
+	  20: GETL       	R8, t16
+	  21: ORL       	t16, t14
+	  22: PUTL       	t14, R26
+	  23: INCEIPL       	$4
+
+	0xFE9327C:  3B83FFFE  addi r28,r3,-2
+	  24: GETL       	R3, t18
+	  25: ADDL       	$0xFFFFFFFE, t18
+	  26: PUTL       	t18, R28
+	  27: INCEIPL       	$4
+
+	0xFE93280:  3B7F0002  addi r27,r31,2
+	  28: GETL       	R31, t20
+	  29: ADDL       	$0x2, t20
+	  30: PUTL       	t20, R27
+	  31: INCEIPL       	$4
+
+	0xFE93284:  41A6FF64  bc 13,6,0xFE931E8
+	  32: Js06o       	$0xFE931E8
+
+
+
+. 1572 FE93264 36
+. 81 49 00 00 54 08 40 2E 55 6C 06 3E 7D 3D 4B 78 2C 8A 00 00 7D 9A 43 78 3B 83 FF FE 3B 7F 00 02 41 A6 FF 64
+==== BB 1573 (0xFE93288) approx BBs exec'd 0 ====
+
+	0xFE93288:  7D5F5378  or r31,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFE9328C:  48000010  b 0xFE9329C
+	   3: JMPo       	$0xFE9329C  ($4)
+
+
+
+. 1573 FE93288 8
+. 7D 5F 53 78 48 00 00 10
+==== BB 1574 (0xFE9329C) approx BBs exec'd 0 ====
+
+	0xFE9329C:  881F0001  lbz r0,1(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE932A0:  387F0002  addi r3,r31,2
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x2, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFE932A4:  893F0000  lbz r9,0(r31)
+	   9: GETL       	R31, t6
+	  10: LDB       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0xFE932A8:  7F64DB78  or r4,r27,r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFE932AC:  540B402E  rlwinm r11,r0,8,0,23
+	  16: GETL       	R0, t12
+	  17: SHLL       	$0x8, t12
+	  18: PUTL       	t12, R11
+	  19: INCEIPL       	$4
+
+	0xFE932B0:  7F85E378  or r5,r28,r28
+	  20: GETL       	R28, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0xFE932B4:  7D6A4B78  or r10,r11,r9
+	  23: GETL       	R11, t16
+	  24: GETL       	R9, t18
+	  25: ORL       	t18, t16
+	  26: PUTL       	t16, R10
+	  27: INCEIPL       	$4
+
+	0xFE932B8:  7F1A5000  cmp cr6,r26,r10
+	  28: GETL       	R26, t20
+	  29: GETL       	R10, t22
+	  30: CMPL       	t20, t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x6, CR
+	  32: INCEIPL       	$4
+
+	0xFE932BC:  409AFFD4  bc 4,26,0xFE93290
+	  33: Jc26o       	$0xFE93290
+
+
+
+. 1574 FE9329C 36
+. 88 1F 00 01 38 7F 00 02 89 3F 00 00 7F 64 DB 78 54 0B 40 2E 7F 85 E3 78 7D 6A 4B 78 7F 1A 50 00 40 9A FF D4
+==== BB 1575 (0xFE93290) approx BBs exec'd 0 ====
+
+	0xFE93290:  87FD0004  lwzu r31,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R29
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R31
+	   5: INCEIPL       	$4
+
+	0xFE93294:  2C9F0000  cmpi cr1,r31,0
+	   6: GETL       	R31, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFE93298:  41A6FF50  bc 13,6,0xFE931E8
+	  10: Js06o       	$0xFE931E8
+
+
+
+. 1575 FE93290 12
+. 87 FD 00 04 2C 9F 00 00 41 A6 FF 50
+==== BB 1576 (0xFE931E8) approx BBs exec'd 0 ====
+
+	0xFE931E8:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE931EC:  83410024  lwz r26,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFE931F0:  8361000C  lwz r27,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0xFE931F4:  7F4803A6  mtlr r26
+	  13: GETL       	R26, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFE931F8:  83810010  lwz r28,16(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFE931FC:  83410008  lwz r26,8(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x8, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R26
+	  25: INCEIPL       	$4
+
+	0xFE93200:  83A10014  lwz r29,20(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R29
+	  30: INCEIPL       	$4
+
+	0xFE93204:  83C10018  lwz r30,24(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x18, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R30
+	  35: INCEIPL       	$4
+
+	0xFE93208:  83E1001C  lwz r31,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0xFE9320C:  38210020  addi r1,r1,32
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: PUTL       	t32, R1
+	  44: INCEIPL       	$4
+
+	0xFE93210:  4E800020  blr
+	  45: GETL       	LR, t34
+	  46: JMPo-r       	t34  ($4)
+
+
+
+. 1576 FE931E8 44
+. 38 60 00 00 83 41 00 24 83 61 00 0C 7F 48 03 A6 83 81 00 10 83 41 00 08 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1577 (0x10003600) approx BBs exec'd 0 ====
+
+	0x10003600:  63FF0DB0  ori r31,r31,0xDB0
+	   0: GETL       	R31, t0
+	   1: ORL       	$0xDB0, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x10003604:  7C691B79  or. r9,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x10003608:  4182001C  bc 12,2,0x10003624
+	   9: Js02o       	$0x10003624
+
+
+
+. 1577 10003600 12
+. 63 FF 0D B0 7C 69 1B 79 41 82 00 1C
+==== BB 1578 (0x10003624) approx BBs exec'd 0 ====
+
+	0x10003624:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10003628:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x1000362C:  83C10018  lwz r30,24(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x10003630:  83E1001C  lwz r31,28(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x1C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R31
+	  17: INCEIPL       	$4
+
+	0x10003634:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x10003638:  38210020  addi r1,r1,32
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x20, t16
+	  23: PUTL       	t16, R1
+	  24: INCEIPL       	$4
+
+	0x1000363C:  4E800020  blr
+	  25: GETL       	LR, t18
+	  26: JMPo-r       	t18  ($4)
+
+
+
+. 1578 10003624 28
+. 80 01 00 24 7F E3 FB 78 83 C1 00 18 83 E1 00 1C 7C 08 03 A6 38 21 00 20 4E 80 00 20
+==== BB 1579 (0x100015E8) approx BBs exec'd 0 ====
+
+	0x100015E8:  3C000003  lis r0,3
+	   0: MOVL       	$0x30000, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100015EC:  60000DAF  ori r0,r0,0xDAF
+	   3: MOVL       	$0x30DAF, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x100015F0:  7F830000  cmp cr7,r3,r0
+	   6: GETL       	R3, t4
+	   7: GETL       	R0, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x100015F4:  419D038C  bc 12,29,0x10001980
+	  11: Js29o       	$0x10001980
+
+
+
+. 1579 100015E8 16
+. 3C 00 00 03 60 00 0D AF 7F 83 00 00 41 9D 03 8C
+==== BB 1580 (0x10001980) approx BBs exec'd 0 ====
+
+	0x10001980:  807E00A8  lwz r3,168(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x10001984:  839E00AC  lwz r28,172(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0xAC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x10001988:  48019205  bl 0x1001AB8C
+	  10: MOVL       	$0x1000198C, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0x1001AB8C  ($4)
+
+
+
+. 1580 10001980 12
+. 80 7E 00 A8 83 9E 00 AC 48 01 92 05
+==== BB 1581 (0x1001AB8C) approx BBs exec'd 0 ====
+
+	0x1001AB8C:  4BE7860C  b 0xFE93198
+	   0: JMPo       	$0xFE93198  ($4)
+
+
+
+. 1581 1001AB8C 4
+. 4B E7 86 0C
+==== BB 1582 (0xFED6B1C) approx BBs exec'd 0 ====
+
+	0xFED6B1C:  85040004  lwzu r8,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0xFED6B20:  7CE04038  and r0,r7,r8
+	   6: GETL       	R7, t4
+	   7: GETL       	R8, t6
+	   8: ANDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0xFED6B24:  7CEA4378  or r10,r7,r8
+	  11: GETL       	R7, t8
+	  12: GETL       	R8, t10
+	  13: ORL       	t10, t8
+	  14: PUTL       	t8, R10
+	  15: INCEIPL       	$4
+
+	0xFED6B28:  7C003A14  add r0,r0,r7
+	  16: GETL       	R0, t12
+	  17: GETL       	R7, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFED6B2C:  7D4800F9  nor. r8,r10,r0
+	  21: GETL       	R10, t16
+	  22: GETL       	R0, t18
+	  23: ORL       	t18, t16
+	  24: NOTL       	t16
+	  25: PUTL       	t16, R8
+	  26: CMP0L       	t16, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0xFED6B30:  4082004C  bc 4,2,0xFED6B7C
+	  29: Jc02o       	$0xFED6B7C
+
+
+
+. 1582 FED6B1C 24
+. 85 04 00 04 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 48 00 F9 40 82 00 4C
+==== BB 1583 (0xFED6B5C) approx BBs exec'd 0 ====
+
+	0xFED6B5C:  7CE04838  and r0,r7,r9
+	   0: GETL       	R7, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED6B60:  7C003A14  add r0,r0,r7
+	   5: GETL       	R0, t4
+	   6: GETL       	R7, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFED6B64:  7D880078  andc r8,r12,r0
+	  10: GETL       	R12, t8
+	  11: GETL       	R0, t10
+	  12: NOTL       	t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R8
+	  15: INCEIPL       	$4
+
+	0xFED6B68:  48000014  b 0xFED6B7C
+	  16: JMPo       	$0xFED6B7C  ($4)
+
+
+
+. 1583 FED6B5C 16
+. 7C E0 48 38 7C 00 3A 14 7D 88 00 78 48 00 00 14
+==== BB 1584 (0xFED6B7C) approx BBs exec'd 0 ====
+
+	0xFED6B7C:  7D0B0034  cntlzw r11,r8
+	   0: GETL       	R8, t0
+	   1: CNTLZL       	t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFED6B80:  7C032050  subf r0,r3,r4
+	   4: GETL       	R3, t2
+	   5: GETL       	R4, t4
+	   6: SUBL       	t2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFED6B84:  556BE8FE  rlwinm r11,r11,29,3,31
+	   9: GETL       	R11, t6
+	  10: SHRL       	$0x3, t6
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0xFED6B88:  7C605A14  add r3,r0,r11
+	  13: GETL       	R0, t8
+	  14: GETL       	R11, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFED6B8C:  4E800020  blr
+	  18: GETL       	LR, t12
+	  19: JMPo-r       	t12  ($4)
+
+
+
+. 1584 FED6B7C 20
+. 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+==== BB 1585 (0x1000198C) approx BBs exec'd 0 ====
+
+	0x1000198C:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10001990:  40BEFC6C  bc 5,30,0x100015FC
+	   4: Jc30o       	$0x100015FC
+
+
+
+. 1585 1000198C 8
+. 2F 83 00 00 40 BE FC 6C
+==== BB 1586 (0x10001994) approx BBs exec'd 0 ====
+
+	0x10001994:  4BFFFC64  b 0x100015F8
+	   0: JMPo       	$0x100015F8  ($4)
+
+
+
+. 1586 10001994 4
+. 4B FF FC 64
+==== BB 1587 (0x100015F8) approx BBs exec'd 0 ====
+
+	0x100015F8:  839E0084  lwz r28,132(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x100015FC:  801F0000  lwz r0,0(r31)
+	   5: GETL       	R31, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x10001600:  38600006  li r3,6
+	   9: MOVL       	$0x6, t8
+	  10: PUTL       	t8, R3
+	  11: INCEIPL       	$4
+
+	0x10001604:  813E01E4  lwz r9,484(r30)
+	  12: GETL       	R30, t10
+	  13: ADDL       	$0x1E4, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0x10001608:  809E0074  lwz r4,116(r30)
+	  17: GETL       	R30, t14
+	  18: ADDL       	$0x74, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R4
+	  21: INCEIPL       	$4
+
+	0x1000160C:  90090000  stw r0,0(r9)
+	  22: GETL       	R0, t18
+	  23: GETL       	R9, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0x10001610:  48019595  bl 0x1001ABA4
+	  26: MOVL       	$0x10001614, t22
+	  27: PUTL       	t22, LR
+	  28: JMPo-c       	$0x1001ABA4  ($4)
+
+
+
+. 1587 100015F8 28
+. 83 9E 00 84 80 1F 00 00 38 60 00 06 81 3E 01 E4 80 9E 00 74 90 09 00 00 48 01 95 95
+==== BB 1588 (0x1001ABA4) approx BBs exec'd 0 ====
+
+	0x1001ABA4:  39600038  li r11,56
+	   0: MOVL       	$0x38, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ABA8:  4BFFFF64  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 1588 1001ABA4 8
+. 39 60 00 38 4B FF FF 64
+==== BB 1589 setlocale(0xFE86870) approx BBs exec'd 0 ====
+
+	0xFE86870:  9421FF20  stwu r1,-224(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF20, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE86874:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE86878:  938100D0  stw r28,208(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xD0, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE8687C:  7C7C1B79  or. r28,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R28
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFE86880:  481215D1  bl 0xFFA7E50
+	  19: MOVL       	$0xFE86884, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1589 FE86870 20
+. 94 21 FF 20 7C 08 02 A6 93 81 00 D0 7C 7C 1B 79 48 12 15 D1
+==== BB 1590 (0xFE86884) approx BBs exec'd 0 ====
+
+	0xFE86884:  936100CC  stw r27,204(r1)
+	   0: GETL       	R27, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xCC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE86888:  93C100D8  stw r30,216(r1)
+	   5: GETL       	R30, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xD8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8688C:  7C9B2378  or r27,r4,r4
+	  10: GETL       	R4, t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0xFE86890:  93E100DC  stw r31,220(r1)
+	  13: GETL       	R31, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xDC, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE86894:  7FC802A6  mflr r30
+	  18: GETL       	LR, t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFE86898:  924100A8  stw r18,168(r1)
+	  21: GETL       	R18, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xA8, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE8689C:  7C3F0B78  or r31,r1,r1
+	  26: GETL       	R1, t20
+	  27: PUTL       	t20, R31
+	  28: INCEIPL       	$4
+
+	0xFE868A0:  926100AC  stw r19,172(r1)
+	  29: GETL       	R19, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0xAC, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE868A4:  928100B0  stw r20,176(r1)
+	  34: GETL       	R20, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0xB0, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFE868A8:  92A100B4  stw r21,180(r1)
+	  39: GETL       	R21, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0xB4, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFE868AC:  92C100B8  stw r22,184(r1)
+	  44: GETL       	R22, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0xB8, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFE868B0:  92E100BC  stw r23,188(r1)
+	  49: GETL       	R23, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0xBC, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFE868B4:  930100C0  stw r24,192(r1)
+	  54: GETL       	R24, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0xC0, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFE868B8:  932100C4  stw r25,196(r1)
+	  59: GETL       	R25, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0xC4, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFE868BC:  934100C8  stw r26,200(r1)
+	  64: GETL       	R26, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0xC8, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFE868C0:  93A100D4  stw r29,212(r1)
+	  69: GETL       	R29, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0xD4, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0xFE868C4:  900100E4  stw r0,228(r1)
+	  74: GETL       	R0, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0xE4, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFE868C8:  4180020C  bc 12,0,0xFE86AD4
+	  79: Js00o       	$0xFE86AD4
+
+
+
+. 1590 FE86884 72
+. 93 61 00 CC 93 C1 00 D8 7C 9B 23 78 93 E1 00 DC 7F C8 02 A6 92 41 00 A8 7C 3F 0B 78 92 61 00 AC 92 81 00 B0 92 A1 00 B4 92 C1 00 B8 92 E1 00 BC 93 01 00 C0 93 21 00 C4 93 41 00 C8 93 A1 00 D4 90 01 00 E4 41 80 02 0C
+==== BB 1591 (0xFE868CC) approx BBs exec'd 0 ====
+
+	0xFE868CC:  2F9C000C  cmpi cr7,r28,12
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0xC, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE868D0:  419D0204  bc 12,29,0xFE86AD4
+	   5: Js29o       	$0xFE86AD4
+
+
+
+. 1591 FE868CC 8
+. 2F 9C 00 0C 41 9D 02 04
+==== BB 1592 (0xFE868D4) approx BBs exec'd 0 ====
+
+	0xFE868D4:  2C840000  cmpi cr1,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE868D8:  41860214  bc 12,6,0xFE86AEC
+	   4: Js06o       	$0xFE86AEC
+
+
+
+. 1592 FE868D4 8
+. 2C 84 00 00 41 86 02 14
+==== BB 1593 (0xFE868DC) approx BBs exec'd 0 ====
+
+	0xFE868DC:  831E1D78  lwz r24,7544(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D78, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFE868E0:  579A103A  rlwinm r26,r28,2,0,29
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R26
+	   8: INCEIPL       	$4
+
+	0xFE868E4:  7C832378  or r3,r4,r4
+	   9: GETL       	R4, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0xFE868E8:  7CBAC214  add r5,r26,r24
+	  12: GETL       	R26, t8
+	  13: GETL       	R24, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFE868EC:  83A50040  lwz r29,64(r5)
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x40, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R29
+	  21: INCEIPL       	$4
+
+	0xFE868F0:  7FA4EB78  or r4,r29,r29
+	  22: GETL       	R29, t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0xFE868F4:  4804FABD  bl 0xFED63B0
+	  25: MOVL       	$0xFE868F8, t18
+	  26: PUTL       	t18, LR
+	  27: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 1593 FE868DC 28
+. 83 1E 1D 78 57 9A 10 3A 7C 83 23 78 7C BA C2 14 83 A5 00 40 7F A4 EB 78 48 04 FA BD
+==== BB 1594 strcmp(0xFED63B0) approx BBs exec'd 0 ====
+
+	0xFED63B0:  7C801B78  or r0,r4,r3
+	   0: GETL       	R4, t0
+	   1: GETL       	R3, t2
+	   2: ORL       	t2, t0
+	   3: PUTL       	t0, R0
+	   4: INCEIPL       	$4
+
+	0xFED63B4:  540007BF  rlwinm. r0,r0,0,30,31
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x3, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFED63B8:  3CE0FEFF  lis r7,-257
+	  11: MOVL       	$0xFEFF0000, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0xFED63BC:  40820094  bc 4,2,0xFED6450
+	  14: Jc02o       	$0xFED6450
+
+
+
+. 1594 FED63B0 16
+. 7C 80 1B 78 54 00 07 BF 3C E0 FE FF 40 82 00 94
+==== BB 1595 (0xFED63C0) approx BBs exec'd 0 ====
+
+	0xFED63C0:  80A30000  lwz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFED63C4:  80C40000  lwz r6,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0xFED63C8:  3D007F7F  lis r8,32639
+	   8: MOVL       	$0x7F7F0000, t8
+	   9: PUTL       	t8, R8
+	  10: INCEIPL       	$4
+
+	0xFED63CC:  38E7FEFF  addi r7,r7,-257
+	  11: GETL       	R7, t10
+	  12: ADDL       	$0xFFFFFEFF, t10
+	  13: PUTL       	t10, R7
+	  14: INCEIPL       	$4
+
+	0xFED63D0:  39087F7F  addi r8,r8,32639
+	  15: MOVL       	$0x7F7F7F7F, t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0xFED63D4:  48000010  b 0xFED63E4
+	  18: JMPo       	$0xFED63E4  ($4)
+
+
+
+. 1595 FED63C0 24
+. 80 A3 00 00 80 C4 00 00 3D 00 7F 7F 38 E7 FE FF 39 08 7F 7F 48 00 00 10
+==== BB 1596 (0xFED63E4) approx BBs exec'd 0 ====
+
+	0xFED63E4:  7C072A14  add r0,r7,r5
+	   0: GETL       	R7, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED63E8:  7D0928F8  nor r9,r8,r5
+	   5: GETL       	R8, t4
+	   6: GETL       	R5, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0xFED63EC:  7C004839  and. r0,r0,r9
+	  11: GETL       	R0, t8
+	  12: GETL       	R9, t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFED63F0:  7C853000  cmp cr1,r5,r6
+	  18: GETL       	R5, t14
+	  19: GETL       	R6, t16
+	  20: CMPL       	t14, t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0xFED63F4:  4182FFE4  bc 12,2,0xFED63D8
+	  23: Js02o       	$0xFED63D8
+
+
+
+. 1596 FED63E4 20
+. 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+==== BB 1597 (0xFED63F8) approx BBs exec'd 0 ====
+
+	0xFED63F8:  7D002838  and r0,r8,r5
+	   0: GETL       	R8, t0
+	   1: GETL       	R5, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED63FC:  4186002C  bc 12,6,0xFED6428
+	   5: Js06o       	$0xFED6428
+
+
+
+. 1597 FED63F8 8
+. 7D 00 28 38 41 86 00 2C
+==== BB 1598 (0xFED6400) approx BBs exec'd 0 ====
+
+	0xFED6400:  7C004214  add r0,r0,r8
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED6404:  7CAA3279  xor. r10,r5,r6
+	   5: GETL       	R5, t4
+	   6: GETL       	R6, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFED6408:  7D290078  andc r9,r9,r0
+	  12: GETL       	R9, t10
+	  13: GETL       	R0, t12
+	  14: NOTL       	t12
+	  15: ANDL       	t10, t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0xFED640C:  41800034  bc 12,0,0xFED6440
+	  18: Js00o       	$0xFED6440
+
+
+
+. 1598 FED6400 16
+. 7C 00 42 14 7C AA 32 79 7D 29 00 78 41 80 00 34
+==== BB 1599 (0xFED6410) approx BBs exec'd 0 ====
+
+	0xFED6410:  7D4A0034  cntlzw r10,r10
+	   0: GETL       	R10, t0
+	   1: CNTLZL       	t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0xFED6414:  7D290034  cntlzw r9,r9
+	   4: GETL       	R9, t2
+	   5: CNTLZL       	t2
+	   6: PUTL       	t2, R9
+	   7: INCEIPL       	$4
+
+	0xFED6418:  39290007  addi r9,r9,7
+	   8: GETL       	R9, t4
+	   9: ADDL       	$0x7, t4
+	  10: PUTL       	t4, R9
+	  11: INCEIPL       	$4
+
+	0xFED641C:  7C895000  cmp cr1,r9,r10
+	  12: GETL       	R9, t6
+	  13: GETL       	R10, t8
+	  14: CMPL       	t6, t8, t10  (-rSo)
+	  15: ICRFL       	t10, $0x1, CR
+	  16: INCEIPL       	$4
+
+	0xFED6420:  7C662850  subf r3,r6,r5
+	  17: GETL       	R6, t12
+	  18: GETL       	R5, t14
+	  19: SUBL       	t12, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0xFED6424:  4CA40020  bclr 5,4
+	  22: GETL       	LR, t16
+	  23: Jc04o-r       	t16
+
+
+
+. 1599 FED6410 24
+. 7D 4A 00 34 7D 29 00 34 39 29 00 07 7C 89 50 00 7C 66 28 50 4C A4 00 20
+==== BB 1600 (0xFE868F8) approx BBs exec'd 0 ====
+
+	0xFE868F8:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE868FC:  7FA3EB78  or r3,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFE86900:  409A0050  bc 4,26,0xFE86950
+	   7: Jc26o       	$0xFE86950
+
+
+
+. 1600 FE868F8 12
+. 2F 03 00 00 7F A3 EB 78 40 9A 00 50
+==== BB 1601 (0xFE86950) approx BBs exec'd 0 ====
+
+	0xFE86950:  807E0100  lwz r3,256(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x100, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE86954:  38C00000  li r6,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0xFE86958:  90DF0094  stw r6,148(r31)
+	   8: GETL       	R6, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x94, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8695C:  90DF0090  stw r6,144(r31)
+	  13: GETL       	R6, t10
+	  14: GETL       	R31, t12
+	  15: ADDL       	$0x90, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE86960:  4800C839  bl 0xFE93198
+	  18: MOVL       	$0xFE86964, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0xFE93198  ($4)
+
+
+
+. 1601 FE86950 20
+. 80 7E 01 00 38 C0 00 00 90 DF 00 94 90 DF 00 90 48 00 C8 39
+==== BB 1602 (0xFE932C0) approx BBs exec'd 0 ====
+
+	0xFE932C0:  48043AA1  bl 0xFED6D60
+	   0: MOVL       	$0xFE932C4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED6D60  ($4)
+
+
+
+. 1602 FE932C0 4
+. 48 04 3A A1
+==== BB 1603 strncmp(0xFED6D60) approx BBs exec'd 0 ====
+
+	0xFED6D60:  7C001A2C  dcbt r0,r3
+	   0: INCEIPL       	$4
+
+	0xFED6D64:  7C801B78  or r0,r4,r3
+	   1: GETL       	R4, t0
+	   2: GETL       	R3, t2
+	   3: ORL       	t2, t0
+	   4: PUTL       	t0, R0
+	   5: INCEIPL       	$4
+
+	0xFED6D68:  3D207F7F  lis r9,32639
+	   6: MOVL       	$0x7F7F0000, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xFED6D6C:  7C00222C  dcbt r0,r4
+	   9: INCEIPL       	$4
+
+	0xFED6D70:  540007BF  rlwinm. r0,r0,0,30,31
+	  10: GETL       	R0, t6
+	  11: ANDL       	$0x3, t6
+	  12: PUTL       	t6, R0
+	  13: CMP0L       	t6, t8  (-rSo)
+	  14: ICRFL       	t8, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFED6D74:  3D00FEFF  lis r8,-257
+	  16: MOVL       	$0xFEFF0000, t10
+	  17: PUTL       	t10, R8
+	  18: INCEIPL       	$4
+
+	0xFED6D78:  408200E0  bc 4,2,0xFED6E58
+	  19: Jc02o       	$0xFED6E58
+
+
+
+. 1603 FED6D60 28
+. 7C 00 1A 2C 7C 80 1B 78 3D 20 7F 7F 7C 00 22 2C 54 00 07 BF 3D 00 FE FF 40 82 00 E0
+==== BB 1604 (0xFED6E58) approx BBs exec'd 0 ====
+
+	0xFED6E58:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFED6E5C:  2C050000  cmpi cr0,r5,0
+	   3: GETL       	R5, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED6E60:  88C30000  lbz r6,0(r3)
+	   7: GETL       	R3, t6
+	   8: LDB       	(t6), t8
+	   9: PUTL       	t8, R6
+	  10: INCEIPL       	$4
+
+	0xFED6E64:  88E40000  lbz r7,0(r4)
+	  11: GETL       	R4, t10
+	  12: LDB       	(t10), t12
+	  13: PUTL       	t12, R7
+	  14: INCEIPL       	$4
+
+	0xFED6E68:  41810010  bc 12,1,0xFED6E78
+	  15: Js01o       	$0xFED6E78
+
+
+
+. 1604 FED6E58 20
+. 7C A9 03 A6 2C 05 00 00 88 C3 00 00 88 E4 00 00 41 81 00 10
+==== BB 1605 (0xFED6E78) approx BBs exec'd 0 ====
+
+	0xFED6E78:  42400038  bc 18,0,0xFED6EB0
+	   0: GETL       	CTR, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, CTR
+	   3: SETZL       	t0, NoValue
+	   4: JIFZL       	t0, $0xFED6E7C
+	   5: JMPo       	$0xFED6EB0  ($4)
+
+
+
+. 1605 FED6E78 4
+. 42 40 00 38
+==== BB 1606 (0xFED6E7C) approx BBs exec'd 0 ====
+
+	0xFED6E7C:  2C860000  cmpi cr1,r6,0
+	   0: GETL       	R6, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED6E80:  7C063800  cmp cr0,r6,r7
+	   4: GETL       	R6, t4
+	   5: GETL       	R7, t6
+	   6: CMPL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFED6E84:  4186002C  bc 12,6,0xFED6EB0
+	   9: Js06o       	$0xFED6EB0
+
+
+
+. 1606 FED6E7C 12
+. 2C 86 00 00 7C 06 38 00 41 86 00 2C
+==== BB 1607 (0xFED6E88) approx BBs exec'd 0 ====
+
+	0xFED6E88:  8CC30001  lbzu r6,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED6E8C:  40820020  bc 4,2,0xFED6EAC
+	   6: Jc02o       	$0xFED6EAC
+
+
+
+. 1607 FED6E88 8
+. 8C C3 00 01 40 82 00 20
+==== BB 1608 (0xFED6EAC) approx BBs exec'd 0 ====
+
+	0xFED6EAC:  8CC3FFFF  lbzu r6,-1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED6EB0:  7C673050  subf r3,r7,r6
+	   6: GETL       	R7, t4
+	   7: GETL       	R6, t6
+	   8: SUBL       	t4, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFED6EB4:  4E800020  blr
+	  11: GETL       	LR, t8
+	  12: JMPo-r       	t8  ($4)
+
+
+
+. 1608 FED6EAC 12
+. 8C C3 FF FF 7C 67 30 50 4E 80 00 20
+==== BB 1609 (0xFE932C4) approx BBs exec'd 0 ====
+
+	0xFE932C4:  7D3FE214  add r9,r31,r28
+	   0: GETL       	R31, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE932C8:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFE932CC:  40BEFFC4  bc 5,30,0xFE93290
+	   9: Jc30o       	$0xFE93290
+
+
+
+. 1609 FE932C4 12
+. 7D 3F E2 14 2F 83 00 00 40 BE FF C4
+==== BB 1610 (0xFE86964) approx BBs exec'd 0 ====
+
+	0xFE86964:  7C691B79  or. r9,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R9
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE86968:  41820010  bc 12,2,0xFE86978
+	   5: Js02o       	$0xFE86978
+
+
+
+. 1610 FE86964 8
+. 7C 69 1B 79 41 82 00 10
+==== BB 1611 (0xFE86978) approx BBs exec'd 0 ====
+
+	0xFE86978:  2C1C0006  cmpi cr0,r28,6
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8697C:  41820184  bc 12,2,0xFE86B00
+	   5: Js02o       	$0xFE86B00
+
+
+
+. 1611 FE86978 8
+. 2C 1C 00 06 41 82 01 84
+==== BB 1612 (0xFE86B00) approx BBs exec'd 0 ====
+
+	0xFE86B00:  3900000D  li r8,13
+	   0: MOVL       	$0xD, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0xFE86B04:  39600000  li r11,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0xFE86B08:  7D0903A6  mtctr r8
+	   6: GETL       	R8, t4
+	   7: PUTL       	t4, CTR
+	   8: INCEIPL       	$4
+
+	0xFE86B0C:  393F0010  addi r9,r31,16
+	   9: GETL       	R31, t6
+	  10: ADDL       	$0x10, t6
+	  11: PUTL       	t6, R9
+	  12: INCEIPL       	$4
+
+	0xFE86B10:  2F8B0006  cmpi cr7,r11,6
+	  13: GETL       	R11, t8
+	  14: MOVL       	$0x6, t12
+	  15: CMPL       	t8, t12, t10  (-rSo)
+	  16: ICRFL       	t10, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0xFE86B14:  396B0001  addi r11,r11,1
+	  18: MOVL       	$0x1, t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0xFE86B18:  419E0008  bc 12,30,0xFE86B20
+	  21: Js30o       	$0xFE86B20
+
+
+
+. 1612 FE86B00 28
+. 39 00 00 0D 39 60 00 00 7D 09 03 A6 39 3F 00 10 2F 8B 00 06 39 6B 00 01 41 9E 00 08
+==== BB 1613 (0xFE86B1C) approx BBs exec'd 0 ====
+
+	0xFE86B1C:  93690000  stw r27,0(r9)
+	   0: GETL       	R27, t0
+	   1: GETL       	R9, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE86B20:  39290004  addi r9,r9,4
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFE86B24:  4200FFEC  bc 16,0,0xFE86B10
+	   8: GETL       	CTR, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, CTR
+	  11: JIFZL       	t6, $0xFE86B28
+	  12: JMPo       	$0xFE86B10  ($4)
+
+
+
+. 1613 FE86B1C 12
+. 93 69 00 00 39 29 00 04 42 00 FF EC
+==== BB 1614 (0xFE86B10) approx BBs exec'd 0 ====
+
+	0xFE86B10:  2F8B0006  cmpi cr7,r11,6
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE86B14:  396B0001  addi r11,r11,1
+	   5: GETL       	R11, t6
+	   6: ADDL       	$0x1, t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0xFE86B18:  419E0008  bc 12,30,0xFE86B20
+	   9: Js30o       	$0xFE86B20
+
+
+
+. 1614 FE86B10 12
+. 2F 8B 00 06 39 6B 00 01 41 9E 00 08
+==== BB 1615 (0xFE86B20) approx BBs exec'd 0 ====
+
+	0xFE86B20:  39290004  addi r9,r9,4
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFE86B24:  4200FFEC  bc 16,0,0xFE86B10
+	   4: GETL       	CTR, t2
+	   5: ADDL       	$0xFFFFFFFF, t2
+	   6: PUTL       	t2, CTR
+	   7: JIFZL       	t2, $0xFE86B28
+	   8: JMPo       	$0xFE86B10  ($4)
+
+
+
+. 1615 FE86B20 8
+. 39 29 00 04 42 00 FF EC
+==== BB 1616 (0xFE86B28) approx BBs exec'd 0 ====
+
+	0xFE86B28:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE86B2C:  3880003B  li r4,59
+	   3: MOVL       	$0x3B, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE86B30:  4804F79D  bl 0xFED62CC
+	   6: MOVL       	$0xFE86B34, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED62CC  ($4)
+
+
+
+. 1616 FE86B28 12
+. 7F 63 DB 78 38 80 00 3B 48 04 F7 9D
+==== BB 1617 (0xFE86B34) approx BBs exec'd 0 ====
+
+	0xFE86B34:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE86B38:  408601F8  bc 4,6,0xFE86D30
+	   4: Jc06o       	$0xFE86D30
+
+
+
+. 1617 FE86B34 8
+. 2C 83 00 00 40 86 01 F8
+==== BB 1618 (0xFE86B3C) approx BBs exec'd 0 ====
+
+	0xFE86B3C:  833E1B94  lwz r25,7060(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFE86B40:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFE86B44:  3B600001  li r27,1
+	   8: MOVL       	$0x1, t6
+	   9: PUTL       	t6, R27
+	  10: INCEIPL       	$4
+
+	0xFE86B48:  7EE0C828  lwarx r23,r0,r25
+	  11: GETL       	R25, t8
+	  12: LOCKo       	
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R23
+	  15: INCEIPL       	$4
+
+	0xFE86B4C:  7C17D000  cmp cr0,r23,r26
+	  16: GETL       	R23, t12
+	  17: GETL       	R26, t14
+	  18: CMPL       	t12, t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0xFE86B50:  4082000C  bc 4,2,0xFE86B5C
+	  21: Jc02o       	$0xFE86B5C
+
+
+
+. 1618 FE86B3C 24
+. 83 3E 1B 94 3B 40 00 00 3B 60 00 01 7E E0 C8 28 7C 17 D0 00 40 82 00 0C
+==== BB 1619 (0xFE86B54) approx BBs exec'd 0 ====
+
+	0xFE86B54:  7F60C92D  stwcx. r27,r0,r25
+	   0: GETL       	R25, t0
+	   1: GETL       	R27, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE86B58:  40A2FFF0  bc 5,2,0xFE86B48
+	   6: Jc02o       	$0xFE86B48
+
+
+
+. 1619 FE86B54 8
+. 7F 60 C9 2D 40 A2 FF F0
+==== BB 1620 (0xFE86B48) approx BBs exec'd 0 ====
+
+	0xFE86B48:  7EE0C828  lwarx r23,r0,r25
+	   0: GETL       	R25, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFE86B4C:  7C17D000  cmp cr0,r23,r26
+	   5: GETL       	R23, t4
+	   6: GETL       	R26, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE86B50:  4082000C  bc 4,2,0xFE86B5C
+	  10: Jc02o       	$0xFE86B5C
+
+
+
+. 1620 FE86B48 12
+. 7E E0 C8 28 7C 17 D0 00 40 82 00 0C
+==== BB 1621 (0xFE86B5C) approx BBs exec'd 0 ====
+
+	0xFE86B5C:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFE86B60:  2F170000  cmpi cr6,r23,0
+	   1: GETL       	R23, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFE86B64:  409A0384  bc 4,26,0xFE86EE8
+	   5: Jc26o       	$0xFE86EE8
+
+
+
+. 1621 FE86B5C 12
+. 4C 00 01 2C 2F 17 00 00 40 9A 03 84
+==== BB 1622 (0xFE86B68) approx BBs exec'd 0 ====
+
+	0xFE86B68:  82BE1CB8  lwz r21,7352(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CB8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0xFE86B6C:  3B80000C  li r28,12
+	   5: MOVL       	$0xC, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFE86B70:  3ADF0008  addi r22,r31,8
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x8, t6
+	  10: PUTL       	t6, R22
+	  11: INCEIPL       	$4
+
+	0xFE86B74:  3AFF0010  addi r23,r31,16
+	  12: GETL       	R31, t8
+	  13: ADDL       	$0x10, t8
+	  14: PUTL       	t8, R23
+	  15: INCEIPL       	$4
+
+	0xFE86B78:  48000018  b 0xFE86B90
+	  16: JMPo       	$0xFE86B90  ($4)
+
+
+
+. 1622 FE86B68 20
+. 82 BE 1C B8 3B 80 00 0C 3A DF 00 08 3A FF 00 10 48 00 00 18
+==== BB 1623 (0xFE86B90) approx BBs exec'd 0 ====
+
+	0xFE86B90:  2C1C0006  cmpi cr0,r28,6
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE86B94:  579D103A  rlwinm r29,r28,2,0,29
+	   5: GETL       	R28, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0xFE86B98:  7F85E378  or r5,r28,r28
+	   9: GETL       	R28, t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0xFE86B9C:  7CD7EA14  add r6,r23,r29
+	  12: GETL       	R23, t10
+	  13: GETL       	R29, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R6
+	  16: INCEIPL       	$4
+
+	0xFE86BA0:  7F7DB214  add r27,r29,r22
+	  17: GETL       	R29, t14
+	  18: GETL       	R22, t16
+	  19: ADDL       	t14, t16
+	  20: PUTL       	t16, R27
+	  21: INCEIPL       	$4
+
+	0xFE86BA4:  41A2FFDC  bc 13,2,0xFE86B80
+	  22: Js02o       	$0xFE86B80
+
+
+
+. 1623 FE86B90 24
+. 2C 1C 00 06 57 9D 10 3A 7F 85 E3 78 7C D7 EA 14 7F 7D B2 14 41 A2 FF DC
+==== BB 1624 (0xFE86BA8) approx BBs exec'd 0 ====
+
+	0xFE86BA8:  809F0094  lwz r4,148(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE86BAC:  807F0090  lwz r3,144(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x90, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFE86BB0:  48000345  bl 0xFE86EF4
+	  10: MOVL       	$0xFE86BB4, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0xFE86EF4  ($4)
+
+
+
+. 1624 FE86BA8 12
+. 80 9F 00 94 80 7F 00 90 48 00 03 45
+==== BB 1625 _nl_find_locale(0xFE86EF4) approx BBs exec'd 0 ====
+
+	0xFE86EF4:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFE86EF8:  9421FFA0  stwu r1,-96(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFA0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE86EFC:  93A10054  stw r29,84(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x54, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE86F00:  48120F51  bl 0xFFA7E50
+	  14: MOVL       	$0xFE86F04, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1625 FE86EF4 16
+. 7C E8 02 A6 94 21 FF A0 93 A1 00 54 48 12 0F 51
+==== BB 1626 (0xFE86F04) approx BBs exec'd 0 ====
+
+	0xFE86F04:  93010040  stw r24,64(r1)
+	   0: GETL       	R24, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x40, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE86F08:  7C982378  or r24,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R24
+	   7: INCEIPL       	$4
+
+	0xFE86F0C:  90E10064  stw r7,100(r1)
+	   8: GETL       	R7, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x64, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE86F10:  83A60000  lwz r29,0(r6)
+	  13: GETL       	R6, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0xFE86F14:  93210044  stw r25,68(r1)
+	  17: GETL       	R25, t14
+	  18: GETL       	R1, t16
+	  19: ADDL       	$0x44, t16
+	  20: STL       	t14, (t16)
+	  21: INCEIPL       	$4
+
+	0xFE86F18:  7CB92B78  or r25,r5,r5
+	  22: GETL       	R5, t18
+	  23: PUTL       	t18, R25
+	  24: INCEIPL       	$4
+
+	0xFE86F1C:  9361004C  stw r27,76(r1)
+	  25: GETL       	R27, t20
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x4C, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0xFE86F20:  7C7B1B78  or r27,r3,r3
+	  30: GETL       	R3, t24
+	  31: PUTL       	t24, R27
+	  32: INCEIPL       	$4
+
+	0xFE86F24:  93810050  stw r28,80(r1)
+	  33: GETL       	R28, t26
+	  34: GETL       	R1, t28
+	  35: ADDL       	$0x50, t28
+	  36: STL       	t26, (t28)
+	  37: INCEIPL       	$4
+
+	0xFE86F28:  7CDC3378  or r28,r6,r6
+	  38: GETL       	R6, t30
+	  39: PUTL       	t30, R28
+	  40: INCEIPL       	$4
+
+	0xFE86F2C:  93C10058  stw r30,88(r1)
+	  41: GETL       	R30, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x58, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFE86F30:  7FC802A6  mflr r30
+	  46: GETL       	LR, t36
+	  47: PUTL       	t36, R30
+	  48: INCEIPL       	$4
+
+	0xFE86F34:  93E1005C  stw r31,92(r1)
+	  49: GETL       	R31, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x5C, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFE86F38:  7C3F0B78  or r31,r1,r1
+	  54: GETL       	R1, t42
+	  55: PUTL       	t42, R31
+	  56: INCEIPL       	$4
+
+	0xFE86F3C:  92C10038  stw r22,56(r1)
+	  57: GETL       	R22, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x38, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0xFE86F40:  92E1003C  stw r23,60(r1)
+	  62: GETL       	R23, t48
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x3C, t50
+	  65: STL       	t48, (t50)
+	  66: INCEIPL       	$4
+
+	0xFE86F44:  93410048  stw r26,72(r1)
+	  67: GETL       	R26, t52
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x48, t54
+	  70: STL       	t52, (t54)
+	  71: INCEIPL       	$4
+
+	0xFE86F48:  881D0000  lbz r0,0(r29)
+	  72: GETL       	R29, t56
+	  73: LDB       	(t56), t58
+	  74: PUTL       	t58, R0
+	  75: INCEIPL       	$4
+
+	0xFE86F4C:  2F800000  cmpi cr7,r0,0
+	  76: GETL       	R0, t60
+	  77: CMP0L       	t60, t62  (-rSo)
+	  78: ICRFL       	t62, $0x7, CR
+	  79: INCEIPL       	$4
+
+	0xFE86F50:  419E00C8  bc 12,30,0xFE87018
+	  80: Js30o       	$0xFE87018
+
+
+
+. 1626 FE86F04 80
+. 93 01 00 40 7C 98 23 78 90 E1 00 64 83 A6 00 00 93 21 00 44 7C B9 2B 78 93 61 00 4C 7C 7B 1B 78 93 81 00 50 7C DC 33 78 93 C1 00 58 7F C8 02 A6 93 E1 00 5C 7C 3F 0B 78 92 C1 00 38 92 E1 00 3C 93 41 00 48 88 1D 00 00 2F 80 00 00 41 9E 00 C8
+==== BB 1627 (0xFE87018) approx BBs exec'd 0 ====
+
+	0xFE87018:  807E0108  lwz r3,264(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x108, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8701C:  4800C17D  bl 0xFE93198
+	   5: MOVL       	$0xFE87020, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFE93198  ($4)
+
+
+
+. 1627 FE87018 8
+. 80 7E 01 08 48 00 C1 7D
+==== BB 1628 (0xFE87020) approx BBs exec'd 0 ====
+
+	0xFE87020:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE87024:  7C7D1B78  or r29,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFE87028:  907C0000  stw r3,0(r28)
+	   7: GETL       	R3, t6
+	   8: GETL       	R28, t8
+	   9: STL       	t6, (t8)
+	  10: INCEIPL       	$4
+
+	0xFE8702C:  41820038  bc 12,2,0xFE87064
+	  11: Js02o       	$0xFE87064
+
+
+
+. 1628 FE87020 16
+. 2C 03 00 00 7C 7D 1B 78 90 7C 00 00 41 82 00 38
+==== BB 1629 (0xFE87064) approx BBs exec'd 0 ====
+
+	0xFE87064:  813E1DC4  lwz r9,7620(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE87068:  5724103A  rlwinm r4,r25,2,0,29
+	   5: GETL       	R25, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFE8706C:  7C64482E  lwzx r3,r4,r9
+	   9: GETL       	R9, t6
+	  10: GETL       	R4, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0xFE87070:  4800C129  bl 0xFE93198
+	  15: MOVL       	$0xFE87074, t12
+	  16: PUTL       	t12, LR
+	  17: JMPo-c       	$0xFE93198  ($4)
+
+
+
+. 1629 FE87064 16
+. 81 3E 1D C4 57 24 10 3A 7C 64 48 2E 48 00 C1 29
+==== BB 1630 (0xFE87074) approx BBs exec'd 0 ====
+
+	0xFE87074:  7C7D1B78  or r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFE87078:  907C0000  stw r3,0(r28)
+	   3: GETL       	R3, t2
+	   4: GETL       	R28, t4
+	   5: STL       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFE8707C:  4BFFFFC0  b 0xFE8703C
+	   7: JMPo       	$0xFE8703C  ($4)
+
+
+
+. 1630 FE87074 12
+. 7C 7D 1B 78 90 7C 00 00 4B FF FF C0
+==== BB 1631 (0xFE8703C) approx BBs exec'd 0 ====
+
+	0xFE8703C:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE87040:  419A0010  bc 12,26,0xFE87050
+	   4: Js26o       	$0xFE87050
+
+
+
+. 1631 FE8703C 8
+. 2F 1D 00 00 41 9A 00 10
+==== BB 1632 (0xFE87050) approx BBs exec'd 0 ====
+
+	0xFE87050:  807E010C  lwz r3,268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x10C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE87054:  4800C145  bl 0xFE93198
+	   5: MOVL       	$0xFE87058, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFE93198  ($4)
+
+
+
+. 1632 FE87050 8
+. 80 7E 01 0C 48 00 C1 45
+==== BB 1633 (0xFED6E90) approx BBs exec'd 0 ====
+
+	0xFED6E90:  8CE40001  lbzu r7,1(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0xFED6E94:  4240001C  bc 18,0,0xFED6EB0
+	   6: GETL       	CTR, t4
+	   7: ADDL       	$0xFFFFFFFF, t4
+	   8: PUTL       	t4, CTR
+	   9: SETZL       	t4, NoValue
+	  10: JIFZL       	t4, $0xFED6E98
+	  11: JMPo       	$0xFED6EB0  ($4)
+
+
+
+. 1633 FED6E90 8
+. 8C E4 00 01 42 40 00 1C
+==== BB 1634 (0xFED6EB0) approx BBs exec'd 0 ====
+
+	0xFED6EB0:  7C673050  subf r3,r7,r6
+	   0: GETL       	R7, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFED6EB4:  4E800020  blr
+	   5: GETL       	LR, t4
+	   6: JMPo-r       	t4  ($4)
+
+
+
+. 1634 FED6EB0 8
+. 7C 67 30 50 4E 80 00 20
+==== BB 1635 (0xFE932D0) approx BBs exec'd 0 ====
+
+	0xFE932D0:  8BE90002  lbz r31,2(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x2, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFE932D4:  2C1F003D  cmpi cr0,r31,61
+	   5: GETL       	R31, t4
+	   6: MOVL       	$0x3D, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE932D8:  4082FFB8  bc 4,2,0xFE93290
+	  10: Jc02o       	$0xFE93290
+
+
+
+. 1635 FE932D0 12
+. 8B E9 00 02 2C 1F 00 3D 40 82 FF B8
+==== BB 1636 (0xFE932DC) approx BBs exec'd 0 ====
+
+	0xFE932DC:  38690003  addi r3,r9,3
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x3, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE932E0:  4BFFFF0C  b 0xFE931EC
+	   4: JMPo       	$0xFE931EC  ($4)
+
+
+
+. 1636 FE932DC 8
+. 38 69 00 03 4B FF FF 0C
+==== BB 1637 (0xFE931EC) approx BBs exec'd 0 ====
+
+	0xFE931EC:  83410024  lwz r26,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFE931F0:  8361000C  lwz r27,12(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0xFE931F4:  7F4803A6  mtlr r26
+	  10: GETL       	R26, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFE931F8:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFE931FC:  83410008  lwz r26,8(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x8, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R26
+	  22: INCEIPL       	$4
+
+	0xFE93200:  83A10014  lwz r29,20(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x14, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R29
+	  27: INCEIPL       	$4
+
+	0xFE93204:  83C10018  lwz r30,24(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x18, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R30
+	  32: INCEIPL       	$4
+
+	0xFE93208:  83E1001C  lwz r31,28(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x1C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R31
+	  37: INCEIPL       	$4
+
+	0xFE9320C:  38210020  addi r1,r1,32
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x20, t30
+	  40: PUTL       	t30, R1
+	  41: INCEIPL       	$4
+
+	0xFE93210:  4E800020  blr
+	  42: GETL       	LR, t32
+	  43: JMPo-r       	t32  ($4)
+
+
+
+. 1637 FE931EC 40
+. 83 41 00 24 83 61 00 0C 7F 48 03 A6 83 81 00 10 83 41 00 08 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1638 (0xFE87058) approx BBs exec'd 0 ====
+
+	0xFE87058:  7C7D1B78  or r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFE8705C:  907C0000  stw r3,0(r28)
+	   3: GETL       	R3, t2
+	   4: GETL       	R28, t4
+	   5: STL       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFE87060:  4BFFFEF4  b 0xFE86F54
+	   7: JMPo       	$0xFE86F54  ($4)
+
+
+
+. 1638 FE87058 12
+. 7C 7D 1B 78 90 7C 00 00 4B FF FE F4
+==== BB 1639 (0xFE86F54) approx BBs exec'd 0 ====
+
+	0xFE86F54:  2C1D0000  cmpi cr0,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE86F58:  41820010  bc 12,2,0xFE86F68
+	   4: Js02o       	$0xFE86F68
+
+
+
+. 1639 FE86F54 8
+. 2C 1D 00 00 41 82 00 10
+==== BB 1640 (0xFE86F5C) approx BBs exec'd 0 ====
+
+	0xFE86F5C:  88DD0000  lbz r6,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFE86F60:  2C860000  cmpi cr1,r6,0
+	   4: GETL       	R6, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFE86F64:  4086008C  bc 4,6,0xFE86FF0
+	   8: Jc06o       	$0xFE86FF0
+
+
+
+. 1640 FE86F5C 12
+. 88 DD 00 00 2C 86 00 00 40 86 00 8C
+==== BB 1641 (0xFE86FF0) approx BBs exec'd 0 ====
+
+	0xFE86FF0:  815E1D70  lwz r10,7536(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D70, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFE86FF4:  810A0000  lwz r8,0(r10)
+	   5: GETL       	R10, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0xFE86FF8:  2F080000  cmpi cr6,r8,0
+	   9: GETL       	R8, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFE86FFC:  419AFF74  bc 12,26,0xFE86F70
+	  13: Js26o       	$0xFE86F70
+
+
+
+. 1641 FE86FF0 16
+. 81 5E 1D 70 81 0A 00 00 2F 08 00 00 41 9A FF 74
+==== BB 1642 (0xFE86F70) approx BBs exec'd 0 ====
+
+	0xFE86F70:  809E1CB8  lwz r4,7352(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CB8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE86F74:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE86F78:  4804F439  bl 0xFED63B0
+	   8: MOVL       	$0xFE86F7C, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 1642 FE86F70 12
+. 80 9E 1C B8 7F A3 EB 78 48 04 F4 39
+==== BB 1643 (0xFED6450) approx BBs exec'd 0 ====
+
+	0xFED6450:  88A30000  lbz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFED6454:  88C40000  lbz r6,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDB       	(t4), t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0xFED6458:  48000010  b 0xFED6468
+	   8: JMPo       	$0xFED6468  ($4)
+
+
+
+. 1643 FED6450 12
+. 88 A3 00 00 88 C4 00 00 48 00 00 10
+==== BB 1644 (0xFED6468) approx BBs exec'd 0 ====
+
+	0xFED6468:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED646C:  41860020  bc 12,6,0xFED648C
+	   4: Js06o       	$0xFED648C
+
+
+
+. 1644 FED6468 8
+. 2C 85 00 00 41 86 00 20
+==== BB 1645 (0xFED6470) approx BBs exec'd 0 ====
+
+	0xFED6470:  7C053000  cmp cr0,r5,r6
+	   0: GETL       	R5, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFED6474:  40820018  bc 4,2,0xFED648C
+	   5: Jc02o       	$0xFED648C
+
+
+
+. 1645 FED6470 8
+. 7C 05 30 00 40 82 00 18
+==== BB 1646 (0xFED648C) approx BBs exec'd 0 ====
+
+	0xFED648C:  7C662850  subf r3,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFED6490:  4E800020  blr
+	   5: GETL       	LR, t4
+	   6: JMPo-r       	t4  ($4)
+
+
+
+. 1646 FED648C 8
+. 7C 66 28 50 4E 80 00 20
+==== BB 1647 (0xFE86F7C) approx BBs exec'd 0 ====
+
+	0xFE86F7C:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE86F80:  41820100  bc 12,2,0xFE87080
+	   4: Js02o       	$0xFE87080
+
+
+
+. 1647 FE86F7C 8
+. 2C 03 00 00 41 82 01 00
+==== BB 1648 (0xFE86F84) approx BBs exec'd 0 ====
+
+	0xFE86F84:  809E1CC8  lwz r4,7368(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE86F88:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE86F8C:  4804F425  bl 0xFED63B0
+	   8: MOVL       	$0xFE86F90, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 1648 FE86F84 12
+. 80 9E 1C C8 7F A3 EB 78 48 04 F4 25
+==== BB 1649 (0xFE86F90) approx BBs exec'd 0 ====
+
+	0xFE86F90:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE86F94:  418600EC  bc 12,6,0xFE87080
+	   4: Js06o       	$0xFE87080
+
+
+
+. 1649 FE86F90 8
+. 2C 83 00 00 41 86 00 EC
+==== BB 1650 (0xFE86F98) approx BBs exec'd 0 ====
+
+	0xFE86F98:  2F1B0000  cmpi cr6,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE86F9C:  409A0140  bc 4,26,0xFE870DC
+	   4: Jc26o       	$0xFE870DC
+
+
+
+. 1650 FE86F98 8
+. 2F 1B 00 00 40 9A 01 40
+==== BB 1651 (0xFE86FA0) approx BBs exec'd 0 ====
+
+	0xFE86FA0:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE86FA4:  7F84E378  or r4,r28,r28
+	   3: GETL       	R28, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE86FA8:  48000C49  bl 0xFE87BF0
+	   6: MOVL       	$0xFE86FAC, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFE87BF0  ($4)
+
+
+
+. 1651 FE86FA0 12
+. 7F 23 CB 78 7F 84 E3 78 48 00 0C 49
+==== BB 1652 _nl_load_locale_from_archive(0xFE87BF0) approx BBs exec'd 0 ====
+
+	0xFE87BF0:  9421FDF0  stwu r1,-528(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFDF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE87BF4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE87BF8:  48120259  bl 0xFFA7E50
+	   9: MOVL       	$0xFE87BFC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1652 FE87BF0 12
+. 94 21 FD F0 7C 08 02 A6 48 12 02 59
+==== BB 1653 (0xFE87BFC) approx BBs exec'd 0 ====
+
+	0xFE87BFC:  93C10208  stw r30,520(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x208, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE87C00:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE87C04:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0xFE87C08:  91E101CC  stw r15,460(r1)
+	  11: GETL       	R15, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x1CC, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFE87C0C:  91C101C8  stw r14,456(r1)
+	  16: GETL       	R14, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x1C8, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFE87C10:  7C6E1B78  or r14,r3,r3
+	  21: GETL       	R3, t16
+	  22: PUTL       	t16, R14
+	  23: INCEIPL       	$4
+
+	0xFE87C14:  81FE0128  lwz r15,296(r30)
+	  24: GETL       	R30, t18
+	  25: ADDL       	$0x128, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R15
+	  28: INCEIPL       	$4
+
+	0xFE87C18:  3860001E  li r3,30
+	  29: MOVL       	$0x1E, t22
+	  30: PUTL       	t22, R3
+	  31: INCEIPL       	$4
+
+	0xFE87C1C:  93E1020C  stw r31,524(r1)
+	  32: GETL       	R31, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFE87C20:  7C3F0B78  or r31,r1,r1
+	  37: GETL       	R1, t28
+	  38: PUTL       	t28, R31
+	  39: INCEIPL       	$4
+
+	0xFE87C24:  90010214  stw r0,532(r1)
+	  40: GETL       	R0, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x214, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFE87C28:  924101D8  stw r18,472(r1)
+	  45: GETL       	R18, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x1D8, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFE87C2C:  7C922378  or r18,r4,r4
+	  50: GETL       	R4, t38
+	  51: PUTL       	t38, R18
+	  52: INCEIPL       	$4
+
+	0xFE87C30:  926101DC  stw r19,476(r1)
+	  53: GETL       	R19, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x1DC, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0xFE87C34:  3A60FFFF  li r19,-1
+	  58: MOVL       	$0xFFFFFFFF, t44
+	  59: PUTL       	t44, R19
+	  60: INCEIPL       	$4
+
+	0xFE87C38:  928101E0  stw r20,480(r1)
+	  61: GETL       	R20, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x1E0, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0xFE87C3C:  92E101EC  stw r23,492(r1)
+	  66: GETL       	R23, t50
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x1EC, t52
+	  69: STL       	t50, (t52)
+	  70: INCEIPL       	$4
+
+	0xFE87C40:  93810200  stw r28,512(r1)
+	  71: GETL       	R28, t54
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x200, t56
+	  74: STL       	t54, (t56)
+	  75: INCEIPL       	$4
+
+	0xFE87C44:  920101D0  stw r16,464(r1)
+	  76: GETL       	R16, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x1D0, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0xFE87C48:  922101D4  stw r17,468(r1)
+	  81: GETL       	R17, t62
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x1D4, t64
+	  84: STL       	t62, (t64)
+	  85: INCEIPL       	$4
+
+	0xFE87C4C:  92A101E4  stw r21,484(r1)
+	  86: GETL       	R21, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x1E4, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0xFE87C50:  92C101E8  stw r22,488(r1)
+	  91: GETL       	R22, t70
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x1E8, t72
+	  94: STL       	t70, (t72)
+	  95: INCEIPL       	$4
+
+	0xFE87C54:  930101F0  stw r24,496(r1)
+	  96: GETL       	R24, t74
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x1F0, t76
+	  99: STL       	t74, (t76)
+	 100: INCEIPL       	$4
+
+	0xFE87C58:  932101F4  stw r25,500(r1)
+	 101: GETL       	R25, t78
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x1F4, t80
+	 104: STL       	t78, (t80)
+	 105: INCEIPL       	$4
+
+	0xFE87C5C:  934101F8  stw r26,504(r1)
+	 106: GETL       	R26, t82
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x1F8, t84
+	 109: STL       	t82, (t84)
+	 110: INCEIPL       	$4
+
+	0xFE87C60:  936101FC  stw r27,508(r1)
+	 111: GETL       	R27, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x1FC, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0xFE87C64:  93A10204  stw r29,516(r1)
+	 116: GETL       	R29, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x204, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0xFE87C68:  918101C4  stw r12,452(r1)
+	 121: GETL       	R12, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x1C4, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0xFE87C6C:  82E40000  lwz r23,0(r4)
+	 126: GETL       	R4, t98
+	 127: LDL       	(t98), t100
+	 128: PUTL       	t100, R23
+	 129: INCEIPL       	$4
+
+	0xFE87C70:  4806F9B1  bl 0xFEF7620
+	 130: MOVL       	$0xFE87C74, t102
+	 131: PUTL       	t102, LR
+	 132: JMPo-c       	$0xFEF7620  ($4)
+
+
+
+. 1653 FE87BFC 120
+. 93 C1 02 08 7F C8 02 A6 7D 80 00 26 91 E1 01 CC 91 C1 01 C8 7C 6E 1B 78 81 FE 01 28 38 60 00 1E 93 E1 02 0C 7C 3F 0B 78 90 01 02 14 92 41 01 D8 7C 92 23 78 92 61 01 DC 3A 60 FF FF 92 81 01 E0 92 E1 01 EC 93 81 02 00 92 01 01 D0 92 21 01 D4 92 A1 01 E4 92 C1 01 E8 93 01 01 F0 93 21 01 F4 93 41 01 F8 93 61 01 FC 93 A1 02 04 91 81 01 C4 82 E4 00 00 48 06 F9 B1
+==== BB 1654 sysconf(0xFEF7620) approx BBs exec'd 0 ====
+
+	0xFEF7620:  2F830022  cmpi cr7,r3,34
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x22, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEF7624:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFEF7628:  9421FDD0  stwu r1,-560(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFDD0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFEF762C:  480B0825  bl 0xFFA7E50
+	  14: MOVL       	$0xFEF7630, t12
+	  15: PUTL       	t12, LR
+	  16: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1654 FEF7620 16
+. 2F 83 00 22 7C 08 02 A6 94 21 FD D0 48 0B 08 25
+==== BB 1655 (0xFEF7630) approx BBs exec'd 0 ====
+
+	0xFEF7630:  93410218  stw r26,536(r1)
+	   0: GETL       	R26, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x218, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEF7634:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFEF7638:  93C10228  stw r30,552(r1)
+	   8: GETL       	R30, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x228, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEF763C:  7FC802A6  mflr r30
+	  13: GETL       	LR, t10
+	  14: PUTL       	t10, R30
+	  15: INCEIPL       	$4
+
+	0xFEF7640:  93E1022C  stw r31,556(r1)
+	  16: GETL       	R31, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x22C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEF7644:  7C3F0B78  or r31,r1,r1
+	  21: GETL       	R1, t16
+	  22: PUTL       	t16, R31
+	  23: INCEIPL       	$4
+
+	0xFEF7648:  92C10208  stw r22,520(r1)
+	  24: GETL       	R22, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x208, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFEF764C:  92E1020C  stw r23,524(r1)
+	  29: GETL       	R23, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x20C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFEF7650:  93010210  stw r24,528(r1)
+	  34: GETL       	R24, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x210, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFEF7654:  93210214  stw r25,532(r1)
+	  39: GETL       	R25, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x214, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFEF7658:  9361021C  stw r27,540(r1)
+	  44: GETL       	R27, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x21C, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFEF765C:  93810220  stw r28,544(r1)
+	  49: GETL       	R28, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x220, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFEF7660:  93A10224  stw r29,548(r1)
+	  54: GETL       	R29, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x224, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFEF7664:  90010234  stw r0,564(r1)
+	  59: GETL       	R0, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x234, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFEF7668:  419E0090  bc 12,30,0xFEF76F8
+	  64: Js30o       	$0xFEF76F8
+
+
+
+. 1655 FEF7630 60
+. 93 41 02 18 7C 7A 1B 78 93 C1 02 28 7F C8 02 A6 93 E1 02 2C 7C 3F 0B 78 92 C1 02 08 92 E1 02 0C 93 01 02 10 93 21 02 14 93 61 02 1C 93 81 02 20 93 A1 02 24 90 01 02 34 41 9E 00 90
+==== BB 1656 (0xFEF766C) approx BBs exec'd 0 ====
+
+	0xFEF766C:  409D002C  bc 4,29,0xFEF7698
+	   0: Jc29o       	$0xFEF7698
+
+
+
+. 1656 FEF766C 4
+. 40 9D 00 2C
+==== BB 1657 (0xFEF7698) approx BBs exec'd 0 ====
+
+	0xFEF7698:  2F830003  cmpi cr7,r3,3
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x3, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEF769C:  409EFFDC  bc 4,30,0xFEF7678
+	   5: Jc30o       	$0xFEF7678
+
+
+
+. 1657 FEF7698 8
+. 2F 83 00 03 40 9E FF DC
+==== BB 1658 (0xFEF7678) approx BBs exec'd 0 ====
+
+	0xFEF7678:  281A00EC  cmpli cr0,r26,236
+	   0: GETL       	R26, t0
+	   1: MOVL       	$0xEC, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEF767C:  4181002C  bc 12,1,0xFEF76A8
+	   5: Js01o       	$0xFEF76A8
+
+
+
+. 1658 FEF7678 8
+. 28 1A 00 EC 41 81 00 2C
+==== BB 1659 (0xFEF7680) approx BBs exec'd 0 ====
+
+	0xFEF7680:  817E0AB0  lwz r11,2736(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xAB0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFEF7684:  5757103A  rlwinm r23,r26,2,0,29
+	   5: GETL       	R26, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R23
+	   8: INCEIPL       	$4
+
+	0xFEF7688:  7ED7582E  lwzx r22,r23,r11
+	   9: GETL       	R11, t6
+	  10: GETL       	R23, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R22
+	  14: INCEIPL       	$4
+
+	0xFEF768C:  7D965A14  add r12,r22,r11
+	  15: GETL       	R22, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R12
+	  19: INCEIPL       	$4
+
+	0xFEF7690:  7D8903A6  mtctr r12
+	  20: GETL       	R12, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0xFEF7694:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 1659 FEF7680 24
+. 81 7E 0A B0 57 57 10 3A 7E D7 58 2E 7D 96 5A 14 7D 89 03 A6 4E 80 04 20
+==== BB 1660 (0xFEF7954) approx BBs exec'd 0 ====
+
+	0xFEF7954:  48031FA9  bl 0xFF298FC
+	   0: MOVL       	$0xFEF7958, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFF298FC  ($4)
+
+
+
+. 1660 FEF7954 4
+. 48 03 1F A9
+==== BB 1661 getpagesize(0xFF298FC) approx BBs exec'd 0 ====
+
+	0xFF298FC:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFF29900:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFF29904:  4807E54D  bl 0xFFA7E50
+	   9: MOVL       	$0xFF29908, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1661 FF298FC 12
+. 7D 88 02 A6 94 21 FF F0 48 07 E5 4D
+==== BB 1662 (0xFF29908) approx BBs exec'd 0 ====
+
+	0xFF29908:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF2990C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF29910:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFF29914:  813E1BC8  lwz r9,7112(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x1BC8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0xFF29918:  80090004  lwz r0,4(r9)
+	  16: GETL       	R9, t12
+	  17: ADDL       	$0x4, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFF2991C:  2F800000  cmpi cr7,r0,0
+	  21: GETL       	R0, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0xFF29920:  419E0014  bc 12,30,0xFF29934
+	  25: Js30o       	$0xFF29934
+
+
+
+. 1662 FF29908 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 3E 1B C8 80 09 00 04 2F 80 00 00 41 9E 00 14
+==== BB 1663 (0xFF29924) approx BBs exec'd 0 ====
+
+	0xFF29924:  7C030378  or r3,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFF29928:  83C10008  lwz r30,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF2992C:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFF29930:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 1663 FF29924 16
+. 7C 03 03 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1664 (0xFEF7958) approx BBs exec'd 0 ====
+
+	0xFEF7958:  4BFFFD64  b 0xFEF76BC
+	   0: JMPo       	$0xFEF76BC  ($4)
+
+
+
+. 1664 FEF7958 4
+. 4B FF FD 64
+==== BB 1665 (0xFEF76BC) approx BBs exec'd 0 ====
+
+	0xFEF76BC:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFEF76C0:  83A50004  lwz r29,4(r5)
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0xFEF76C4:  82C5FFD8  lwz r22,-40(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0xFFFFFFD8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R22
+	  13: INCEIPL       	$4
+
+	0xFEF76C8:  7FA803A6  mtlr r29
+	  14: GETL       	R29, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFEF76CC:  82E5FFDC  lwz r23,-36(r5)
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFDC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R23
+	  21: INCEIPL       	$4
+
+	0xFEF76D0:  8305FFE0  lwz r24,-32(r5)
+	  22: GETL       	R5, t18
+	  23: ADDL       	$0xFFFFFFE0, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R24
+	  26: INCEIPL       	$4
+
+	0xFEF76D4:  8325FFE4  lwz r25,-28(r5)
+	  27: GETL       	R5, t22
+	  28: ADDL       	$0xFFFFFFE4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R25
+	  31: INCEIPL       	$4
+
+	0xFEF76D8:  8345FFE8  lwz r26,-24(r5)
+	  32: GETL       	R5, t26
+	  33: ADDL       	$0xFFFFFFE8, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R26
+	  36: INCEIPL       	$4
+
+	0xFEF76DC:  8365FFEC  lwz r27,-20(r5)
+	  37: GETL       	R5, t30
+	  38: ADDL       	$0xFFFFFFEC, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R27
+	  41: INCEIPL       	$4
+
+	0xFEF76E0:  8385FFF0  lwz r28,-16(r5)
+	  42: GETL       	R5, t34
+	  43: ADDL       	$0xFFFFFFF0, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R28
+	  46: INCEIPL       	$4
+
+	0xFEF76E4:  83A5FFF4  lwz r29,-12(r5)
+	  47: GETL       	R5, t38
+	  48: ADDL       	$0xFFFFFFF4, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R29
+	  51: INCEIPL       	$4
+
+	0xFEF76E8:  83C5FFF8  lwz r30,-8(r5)
+	  52: GETL       	R5, t42
+	  53: ADDL       	$0xFFFFFFF8, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R30
+	  56: INCEIPL       	$4
+
+	0xFEF76EC:  83E5FFFC  lwz r31,-4(r5)
+	  57: GETL       	R5, t46
+	  58: ADDL       	$0xFFFFFFFC, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R31
+	  61: INCEIPL       	$4
+
+	0xFEF76F0:  7CA12B78  or r1,r5,r5
+	  62: GETL       	R5, t50
+	  63: PUTL       	t50, R1
+	  64: INCEIPL       	$4
+
+	0xFEF76F4:  4E800020  blr
+	  65: GETL       	LR, t52
+	  66: JMPo-r       	t52  ($4)
+
+
+
+. 1665 FEF76BC 60
+. 80 A1 00 00 83 A5 00 04 82 C5 FF D8 7F A8 03 A6 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+==== BB 1666 (0xFE87C74) approx BBs exec'd 0 ====
+
+	0xFE87C74:  838F0000  lwz r28,0(r15)
+	   0: GETL       	R15, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFE87C78:  7C741B78  or r20,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R20
+	   6: INCEIPL       	$4
+
+	0xFE87C7C:  2F9C0000  cmpi cr7,r28,0
+	   7: GETL       	R28, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFE87C80:  419E0034  bc 12,30,0xFE87CB4
+	  11: Js30o       	$0xFE87CB4
+
+
+
+. 1666 FE87C74 16
+. 83 8F 00 00 7C 74 1B 78 2F 9C 00 00 41 9E 00 34
+==== BB 1667 (0xFE87CB4) approx BBs exec'd 0 ====
+
+	0xFE87CB4:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE87CB8:  3880002E  li r4,46
+	   3: MOVL       	$0x2E, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE87CBC:  4804E611  bl 0xFED62CC
+	   6: MOVL       	$0xFE87CC0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED62CC  ($4)
+
+
+
+. 1667 FE87CB4 12
+. 7E E3 BB 78 38 80 00 2E 48 04 E6 11
+==== BB 1668 (0xFED6380) approx BBs exec'd 0 ====
+
+	0xFED6380:  7CE06038  and r0,r7,r12
+	   0: GETL       	R7, t0
+	   1: GETL       	R12, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED6384:  7CEA6378  or r10,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R10
+	   9: INCEIPL       	$4
+
+	0xFED6388:  7C003A14  add r0,r0,r7
+	  10: GETL       	R0, t8
+	  11: GETL       	R7, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFED638C:  7D4900F8  nor r9,r10,r0
+	  15: GETL       	R10, t12
+	  16: GETL       	R0, t14
+	  17: ORL       	t14, t12
+	  18: NOTL       	t12
+	  19: PUTL       	t12, R9
+	  20: INCEIPL       	$4
+
+	0xFED6390:  7D240034  cntlzw r4,r9
+	  21: GETL       	R9, t16
+	  22: CNTLZL       	t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0xFED6394:  3908FFFC  addi r8,r8,-4
+	  25: GETL       	R8, t18
+	  26: ADDL       	$0xFFFFFFFC, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0xFED6398:  5484E8FE  rlwinm r4,r4,29,3,31
+	  29: GETL       	R4, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R4
+	  32: INCEIPL       	$4
+
+	0xFED639C:  7C682214  add r3,r8,r4
+	  33: GETL       	R8, t22
+	  34: GETL       	R4, t24
+	  35: ADDL       	t22, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0xFED63A0:  4E800020  blr
+	  38: GETL       	LR, t26
+	  39: JMPo-r       	t26  ($4)
+
+
+
+. 1668 FED6380 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+==== BB 1669 (0xFE87CC0) approx BBs exec'd 0 ====
+
+	0xFE87CC0:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE87CC4:  419A0018  bc 12,26,0xFE87CDC
+	   4: Js26o       	$0xFE87CDC
+
+
+
+. 1669 FE87CC0 8
+. 2F 03 00 00 41 9A 00 18
+==== BB 1670 (0xFE87CC8) approx BBs exec'd 0 ====
+
+	0xFE87CC8:  88030001  lbz r0,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE87CCC:  2F800040  cmpi cr7,r0,64
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0x40, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFE87CD0:  419E000C  bc 12,30,0xFE87CDC
+	  10: Js30o       	$0xFE87CDC
+
+
+
+. 1670 FE87CC8 12
+. 88 03 00 01 2F 80 00 40 41 9E 00 0C
+==== BB 1671 (0xFE87CD4) approx BBs exec'd 0 ====
+
+	0xFE87CD4:  2C000000  cmpi cr0,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE87CD8:  40820584  bc 4,2,0xFE8825C
+	   4: Jc02o       	$0xFE8825C
+
+
+
+. 1671 FE87CD4 8
+. 2C 00 00 00 40 82 05 84
+==== BB 1672 (0xFE8825C) approx BBs exec'd 0 ====
+
+	0xFE8825C:  3B630001  addi r27,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0xFE88260:  38800040  li r4,64
+	   4: MOVL       	$0x40, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0xFE88264:  7F63DB78  or r3,r27,r27
+	   7: GETL       	R27, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0xFE88268:  48050F81  bl 0xFED91E8
+	  10: MOVL       	$0xFE8826C, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFED91E8  ($4)
+
+
+
+. 1672 FE8825C 16
+. 3B 63 00 01 38 80 00 40 7F 63 DB 78 48 05 0F 81
+==== BB 1673 strchrnul(0xFED91E8) approx BBs exec'd 0 ====
+
+	0xFED91E8:  546007BE  rlwinm r0,r3,0,30,31
+	   0: GETL       	R3, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFED91EC:  9421FFE0  stwu r1,-32(r1)
+	   4: GETL       	R1, t2
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0xFFFFFFE0, t4
+	   7: PUTL       	t4, R1
+	   8: STL       	t2, (t4)
+	   9: INCEIPL       	$4
+
+	0xFED91F0:  2F800000  cmpi cr7,r0,0
+	  10: GETL       	R0, t6
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFED91F4:  5484063E  rlwinm r4,r4,0,24,31
+	  14: GETL       	R4, t10
+	  15: ANDL       	$0xFF, t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0xFED91F8:  419E0024  bc 12,30,0xFED921C
+	  18: Js30o       	$0xFED921C
+
+
+
+. 1673 FED91E8 20
+. 54 60 07 BE 94 21 FF E0 2F 80 00 00 54 84 06 3E 41 9E 00 24
+==== BB 1674 (0xFED91FC) approx BBs exec'd 0 ====
+
+	0xFED91FC:  88A30000  lbz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFED9200:  7C052000  cmp cr0,r5,r4
+	   4: GETL       	R5, t4
+	   5: GETL       	R4, t6
+	   6: CMPL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFED9204:  2F050000  cmpi cr6,r5,0
+	   9: GETL       	R5, t10
+	  10: CMP0L       	t10, t12  (-rSo)
+	  11: ICRFL       	t12, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFED9208:  418200DC  bc 12,2,0xFED92E4
+	  13: Js02o       	$0xFED92E4
+
+
+
+. 1674 FED91FC 16
+. 88 A3 00 00 7C 05 20 00 2F 05 00 00 41 82 00 DC
+==== BB 1675 (0xFED920C) approx BBs exec'd 0 ====
+
+	0xFED920C:  419A00D8  bc 12,26,0xFED92E4
+	   0: Js26o       	$0xFED92E4
+
+
+
+. 1675 FED920C 4
+. 41 9A 00 D8
+==== BB 1676 (0xFED9210) approx BBs exec'd 0 ====
+
+	0xFED9210:  38630001  addi r3,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFED9214:  70600003  andi. r0,r3,0x3
+	   4: GETL       	R3, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R0
+	   7: CMP0L       	t2, t4  (-rSo)
+	   8: ICRFL       	t4, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED9218:  4082FFE4  bc 4,2,0xFED91FC
+	  10: Jc02o       	$0xFED91FC
+
+
+
+. 1676 FED9210 12
+. 38 63 00 01 70 60 00 03 40 82 FF E4
+==== BB 1677 (0xFED921C) approx BBs exec'd 0 ====
+
+	0xFED921C:  548A402E  rlwinm r10,r4,8,0,23
+	   0: GETL       	R4, t0
+	   1: SHLL       	$0x8, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0xFED9220:  3CE07EFE  lis r7,32510
+	   4: MOVL       	$0x7EFE0000, t2
+	   5: PUTL       	t2, R7
+	   6: INCEIPL       	$4
+
+	0xFED9224:  7C865378  or r6,r4,r10
+	   7: GETL       	R4, t4
+	   8: GETL       	R10, t6
+	   9: ORL       	t6, t4
+	  10: PUTL       	t4, R6
+	  11: INCEIPL       	$4
+
+	0xFED9228:  3D008101  lis r8,-32511
+	  12: MOVL       	$0x81010000, t8
+	  13: PUTL       	t8, R8
+	  14: INCEIPL       	$4
+
+	0xFED922C:  54C9801E  rlwinm r9,r6,16,0,15
+	  15: GETL       	R6, t10
+	  16: SHLL       	$0x10, t10
+	  17: PUTL       	t10, R9
+	  18: INCEIPL       	$4
+
+	0xFED9230:  60E7FEFF  ori r7,r7,0xFEFF
+	  19: MOVL       	$0x7EFEFEFF, t12
+	  20: PUTL       	t12, R7
+	  21: INCEIPL       	$4
+
+	0xFED9234:  7CC54B78  or r5,r6,r9
+	  22: GETL       	R6, t14
+	  23: GETL       	R9, t16
+	  24: ORL       	t16, t14
+	  25: PUTL       	t14, R5
+	  26: INCEIPL       	$4
+
+	0xFED9238:  61080100  ori r8,r8,0x100
+	  27: MOVL       	$0x81010100, t18
+	  28: PUTL       	t18, R8
+	  29: INCEIPL       	$4
+
+	0xFED923C:  80C30000  lwz r6,0(r3)
+	  30: GETL       	R3, t20
+	  31: LDL       	(t20), t22
+	  32: PUTL       	t22, R6
+	  33: INCEIPL       	$4
+
+	0xFED9240:  38630004  addi r3,r3,4
+	  34: GETL       	R3, t24
+	  35: ADDL       	$0x4, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0xFED9244:  7CCC2A78  xor r12,r6,r5
+	  38: GETL       	R6, t26
+	  39: GETL       	R5, t28
+	  40: XORL       	t26, t28
+	  41: PUTL       	t28, R12
+	  42: INCEIPL       	$4
+
+	0xFED9248:  7D463A14  add r10,r6,r7
+	  43: GETL       	R6, t30
+	  44: GETL       	R7, t32
+	  45: ADDL       	t30, t32
+	  46: PUTL       	t32, R10
+	  47: INCEIPL       	$4
+
+	0xFED924C:  7C0C3A14  add r0,r12,r7
+	  48: GETL       	R12, t34
+	  49: GETL       	R7, t36
+	  50: ADDL       	t34, t36
+	  51: PUTL       	t36, R0
+	  52: INCEIPL       	$4
+
+	0xFED9250:  7CC95278  xor r9,r6,r10
+	  53: GETL       	R6, t38
+	  54: GETL       	R10, t40
+	  55: XORL       	t38, t40
+	  56: PUTL       	t40, R9
+	  57: INCEIPL       	$4
+
+	0xFED9254:  7D8B0278  xor r11,r12,r0
+	  58: GETL       	R12, t42
+	  59: GETL       	R0, t44
+	  60: XORL       	t42, t44
+	  61: PUTL       	t44, R11
+	  62: INCEIPL       	$4
+
+	0xFED9258:  7D004879  andc. r0,r8,r9
+	  63: GETL       	R8, t46
+	  64: GETL       	R9, t48
+	  65: NOTL       	t48
+	  66: ANDL       	t46, t48
+	  67: PUTL       	t48, R0
+	  68: CMP0L       	t48, t50  (-rSo)
+	  69: ICRFL       	t50, $0x0, CR
+	  70: INCEIPL       	$4
+
+	0xFED925C:  3923FFFC  addi r9,r3,-4
+	  71: GETL       	R3, t52
+	  72: ADDL       	$0xFFFFFFFC, t52
+	  73: PUTL       	t52, R9
+	  74: INCEIPL       	$4
+
+	0xFED9260:  7C000026  mfcr r0
+	  75: GETL       	CR, t54
+	  76: PUTL       	t54, R0
+	  77: INCEIPL       	$4
+
+	0xFED9264:  9001000C  stw r0,12(r1)
+	  78: GETL       	R0, t56
+	  79: GETL       	R1, t58
+	  80: ADDL       	$0xC, t58
+	  81: STL       	t56, (t58)
+	  82: INCEIPL       	$4
+
+	0xFED9268:  7D005879  andc. r0,r8,r11
+	  83: GETL       	R8, t60
+	  84: GETL       	R11, t62
+	  85: NOTL       	t62
+	  86: ANDL       	t60, t62
+	  87: PUTL       	t62, R0
+	  88: CMP0L       	t62, t64  (-rSo)
+	  89: ICRFL       	t64, $0x0, CR
+	  90: INCEIPL       	$4
+
+	0xFED926C:  8181000C  lwz r12,12(r1)
+	  91: GETL       	R1, t66
+	  92: ADDL       	$0xC, t66
+	  93: LDL       	(t66), t68
+	  94: PUTL       	t68, R12
+	  95: INCEIPL       	$4
+
+	0xFED9270:  3963FFFD  addi r11,r3,-3
+	  96: GETL       	R3, t70
+	  97: ADDL       	$0xFFFFFFFD, t70
+	  98: PUTL       	t70, R11
+	  99: INCEIPL       	$4
+
+	0xFED9274:  558CE03E  rlwinm r12,r12,28,0,31
+	 100: GETL       	R12, t72
+	 101: ROLL       	$0x1C, t72
+	 102: PUTL       	t72, R12
+	 103: INCEIPL       	$4
+
+	0xFED9278:  7D840120  mtcrf 0x40,r12
+	 104: GETL       	R12, t74
+	 105: ICRFL       	t74, $0x1, CR
+	 106: INCEIPL       	$4
+
+	0xFED927C:  558C203E  rlwinm r12,r12,4,0,31
+	 107: GETL       	R12, t76
+	 108: ROLL       	$0x4, t76
+	 109: PUTL       	t76, R12
+	 110: INCEIPL       	$4
+
+	0xFED9280:  40860008  bc 4,6,0xFED9288
+	 111: Jc06o       	$0xFED9288
+
+
+
+. 1677 FED921C 104
+. 54 8A 40 2E 3C E0 7E FE 7C 86 53 78 3D 00 81 01 54 C9 80 1E 60 E7 FE FF 7C C5 4B 78 61 08 01 00 80 C3 00 00 38 63 00 04 7C CC 2A 78 7D 46 3A 14 7C 0C 3A 14 7C C9 52 78 7D 8B 02 78 7D 00 48 79 39 23 FF FC 7C 00 00 26 90 01 00 0C 7D 00 58 79 81 81 00 0C 39 63 FF FD 55 8C E0 3E 7D 84 01 20 55 8C 20 3E 40 86 00 08
+==== BB 1678 (0xFED9288) approx BBs exec'd 0 ====
+
+	0xFED9288:  88C3FFFC  lbz r6,-4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED928C:  3943FFFE  addi r10,r3,-2
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0xFFFFFFFE, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFED9290:  7F862000  cmp cr7,r6,r4
+	   9: GETL       	R6, t6
+	  10: GETL       	R4, t8
+	  11: CMPL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFED9294:  2F060000  cmpi cr6,r6,0
+	  14: GETL       	R6, t12
+	  15: CMP0L       	t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0xFED9298:  38C3FFFF  addi r6,r3,-1
+	  18: GETL       	R3, t16
+	  19: ADDL       	$0xFFFFFFFF, t16
+	  20: PUTL       	t16, R6
+	  21: INCEIPL       	$4
+
+	0xFED929C:  419E0050  bc 12,30,0xFED92EC
+	  22: Js30o       	$0xFED92EC
+
+
+
+. 1678 FED9288 24
+. 88 C3 FF FC 39 43 FF FE 7F 86 20 00 2F 06 00 00 38 C3 FF FF 41 9E 00 50
+==== BB 1679 (0xFED92A0) approx BBs exec'd 0 ====
+
+	0xFED92A0:  419A004C  bc 12,26,0xFED92EC
+	   0: Js26o       	$0xFED92EC
+
+
+
+. 1679 FED92A0 4
+. 41 9A 00 4C
+==== BB 1680 (0xFED92A4) approx BBs exec'd 0 ====
+
+	0xFED92A4:  8923FFFD  lbz r9,-3(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFD, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFED92A8:  7C892000  cmp cr1,r9,r4
+	   5: GETL       	R9, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFED92AC:  2F090000  cmpi cr6,r9,0
+	  10: GETL       	R9, t10
+	  11: CMP0L       	t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFED92B0:  41860048  bc 12,6,0xFED92F8
+	  14: Js06o       	$0xFED92F8
+
+
+
+. 1680 FED92A4 16
+. 89 23 FF FD 7C 89 20 00 2F 09 00 00 41 86 00 48
+==== BB 1681 (0xFED92B4) approx BBs exec'd 0 ====
+
+	0xFED92B4:  419A0044  bc 12,26,0xFED92F8
+	   0: Js26o       	$0xFED92F8
+
+
+
+. 1681 FED92B4 4
+. 41 9A 00 44
+==== BB 1682 (0xFED92B8) approx BBs exec'd 0 ====
+
+	0xFED92B8:  8963FFFE  lbz r11,-2(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFE, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFED92BC:  7C0B2000  cmp cr0,r11,r4
+	   5: GETL       	R11, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED92C0:  2F0B0000  cmpi cr6,r11,0
+	  10: GETL       	R11, t10
+	  11: CMP0L       	t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFED92C4:  41820040  bc 12,2,0xFED9304
+	  14: Js02o       	$0xFED9304
+
+
+
+. 1682 FED92B8 16
+. 89 63 FF FE 7C 0B 20 00 2F 0B 00 00 41 82 00 40
+==== BB 1683 (0xFED92C8) approx BBs exec'd 0 ====
+
+	0xFED92C8:  419A003C  bc 12,26,0xFED9304
+	   0: Js26o       	$0xFED9304
+
+
+
+. 1683 FED92C8 4
+. 41 9A 00 3C
+==== BB 1684 (0xFED9304) approx BBs exec'd 0 ====
+
+	0xFED9304:  7D435378  or r3,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED9308:  38210020  addi r1,r1,32
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x20, t2
+	   5: PUTL       	t2, R1
+	   6: INCEIPL       	$4
+
+	0xFED930C:  4E800020  blr
+	   7: GETL       	LR, t4
+	   8: JMPo-r       	t4  ($4)
+
+
+
+. 1684 FED9304 12
+. 7D 43 53 78 38 21 00 20 4E 80 00 20
+==== BB 1685 (0xFE8826C) approx BBs exec'd 0 ====
+
+	0xFE8826C:  7FBB1850  subf r29,r27,r3
+	   0: GETL       	R27, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFE88270:  7C791B78  or r25,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0xFE88274:  7FA4EB78  or r4,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFE88278:  7F63DB78  or r3,r27,r27
+	  11: GETL       	R27, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFE8827C:  48005B29  bl 0xFE8DDA4
+	  14: MOVL       	$0xFE88280, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFE8DDA4  ($4)
+
+
+
+. 1685 FE8826C 20
+. 7F BB 18 50 7C 79 1B 78 7F A4 EB 78 7F 63 DB 78 48 00 5B 29
+==== BB 1686 _nl_normalize_codeset(0xFE8DDA4) approx BBs exec'd 0 ====
+
+	0xFE8DDA4:  38A00000  li r5,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE8DDA8:  7D800026  mfcr r12
+	   3: GETL       	CR, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0xFE8DDAC:  7F852040  cmpl cr7,r5,r4
+	   6: GETL       	R5, t4
+	   7: GETL       	R4, t6
+	   8: CMPUL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFE8DDB0:  7C0802A6  mflr r0
+	  11: GETL       	LR, t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0xFE8DDB4:  9421FFE0  stwu r1,-32(r1)
+	  14: GETL       	R1, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0xFFFFFFE0, t14
+	  17: PUTL       	t14, R1
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFE8DDB8:  4811A099  bl 0xFFA7E50
+	  20: MOVL       	$0xFE8DDBC, t16
+	  21: PUTL       	t16, LR
+	  22: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1686 FE8DDA4 24
+. 38 A0 00 00 7D 80 00 26 7F 85 20 40 7C 08 02 A6 94 21 FF E0 48 11 A0 99
+==== BB 1687 (0xFE8DDBC) approx BBs exec'd 0 ====
+
+	0xFE8DDBC:  93A10014  stw r29,20(r1)
+	   0: GETL       	R29, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8DDC0:  38C00001  li r6,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0xFE8DDC4:  93C10018  stw r30,24(r1)
+	   8: GETL       	R30, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8DDC8:  7C9D2378  or r29,r4,r4
+	  13: GETL       	R4, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFE8DDCC:  93E1001C  stw r31,28(r1)
+	  16: GETL       	R31, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x1C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFE8DDD0:  7FC802A6  mflr r30
+	  21: GETL       	LR, t16
+	  22: PUTL       	t16, R30
+	  23: INCEIPL       	$4
+
+	0xFE8DDD4:  93810010  stw r28,16(r1)
+	  24: GETL       	R28, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x10, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFE8DDD8:  7C7F1B78  or r31,r3,r3
+	  29: GETL       	R3, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0xFE8DDDC:  90010024  stw r0,36(r1)
+	  32: GETL       	R0, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x24, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFE8DDE0:  38E00000  li r7,0
+	  37: MOVL       	$0x0, t28
+	  38: PUTL       	t28, R7
+	  39: INCEIPL       	$4
+
+	0xFE8DDE4:  9181000C  stw r12,12(r1)
+	  40: GETL       	R12, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0xC, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFE8DDE8:  409C0180  bc 4,28,0xFE8DF68
+	  45: Jc28o       	$0xFE8DF68
+
+
+
+. 1687 FE8DDBC 48
+. 93 A1 00 14 38 C0 00 01 93 C1 00 18 7C 9D 23 78 93 E1 00 1C 7F C8 02 A6 93 81 00 10 7C 7F 1B 78 90 01 00 24 38 E0 00 00 91 81 00 0C 40 9C 01 80
+==== BB 1688 (0xFE8DDEC) approx BBs exec'd 0 ====
+
+	0xFE8DDEC:  813E1CF8  lwz r9,7416(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CF8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE8DDF0:  817E1D50  lwz r11,7504(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1D50, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFE8DDF4:  7FA903A6  mtctr r29
+	  10: GETL       	R29, t8
+	  11: PUTL       	t8, CTR
+	  12: INCEIPL       	$4
+
+	0xFE8DDF8:  7D091214  add r8,r9,r2
+	  13: GETL       	R9, t10
+	  14: GETL       	R2, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0xFE8DDFC:  7C8B1214  add r4,r11,r2
+	  18: GETL       	R11, t14
+	  19: GETL       	R2, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R4
+	  22: INCEIPL       	$4
+
+	0xFE8DE00:  80680000  lwz r3,0(r8)
+	  23: GETL       	R8, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0xFE8DE04:  2C030000  cmpi cr0,r3,0
+	  27: GETL       	R3, t22
+	  28: CMP0L       	t22, t24  (-rSo)
+	  29: ICRFL       	t24, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFE8DE08:  41820140  bc 12,2,0xFE8DF48
+	  31: Js02o       	$0xFE8DF48
+
+
+
+. 1688 FE8DDEC 32
+. 81 3E 1C F8 81 7E 1D 50 7F A9 03 A6 7D 09 12 14 7C 8B 12 14 80 68 00 00 2C 03 00 00 41 82 01 40
+==== BB 1689 (0xFE8DE0C) approx BBs exec'd 0 ====
+
+	0xFE8DE0C:  7D7F38AE  lbzx r11,r31,r7
+	   0: GETL       	R7, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t2, t0
+	   3: LDB       	(t0), t4
+	   4: PUTL       	t4, R11
+	   5: INCEIPL       	$4
+
+	0xFE8DE10:  81480000  lwz r10,0(r8)
+	   6: GETL       	R8, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R10
+	   9: INCEIPL       	$4
+
+	0xFE8DE14:  5569083C  rlwinm r9,r11,1,0,30
+	  10: GETL       	R11, t10
+	  11: SHLL       	$0x1, t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0xFE8DE18:  7C09522E  lhzx r0,r9,r10
+	  14: GETL       	R10, t12
+	  15: GETL       	R9, t14
+	  16: ADDL       	t14, t12
+	  17: LDW       	(t12), t16
+	  18: PUTL       	t16, R0
+	  19: INCEIPL       	$4
+
+	0xFE8DE1C:  2F8A0000  cmpi cr7,r10,0
+	  20: GETL       	R10, t18
+	  21: CMP0L       	t18, t20  (-rSo)
+	  22: ICRFL       	t20, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0xFE8DE20:  70090800  andi. r9,r0,0x800
+	  24: GETL       	R0, t22
+	  25: ANDL       	$0x800, t22
+	  26: PUTL       	t22, R9
+	  27: CMP0L       	t22, t24  (-rSo)
+	  28: ICRFL       	t24, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0xFE8DE24:  41820024  bc 12,2,0xFE8DE48
+	  30: Js02o       	$0xFE8DE48
+
+
+
+. 1689 FE8DE0C 28
+. 7D 7F 38 AE 81 48 00 00 55 69 08 3C 7C 09 52 2E 2F 8A 00 00 70 09 08 00 41 82 00 24
+==== BB 1690 (0xFE8DE28) approx BBs exec'd 0 ====
+
+	0xFE8DE28:  38A50001  addi r5,r5,1
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0xFE8DE2C:  419E0144  bc 12,30,0xFE8DF70
+	   4: Js30o       	$0xFE8DF70
+
+
+
+. 1690 FE8DE28 8
+. 38 A5 00 01 41 9E 01 44
+==== BB 1691 (0xFE8DE30) approx BBs exec'd 0 ====
+
+	0xFE8DE30:  5569083C  rlwinm r9,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFE8DE34:  7C09522E  lhzx r0,r9,r10
+	   4: GETL       	R10, t2
+	   5: GETL       	R9, t4
+	   6: ADDL       	t4, t2
+	   7: LDW       	(t2), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFE8DE38:  540BF7FE  rlwinm r11,r0,30,31,31
+	  10: GETL       	R0, t8
+	  11: ROLL       	$0x1E, t8
+	  12: ANDL       	$0x1, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0xFE8DE3C:  318BFFFF  addic r12,r11,-1
+	  15: GETL       	R11, t10
+	  16: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  17: PUTL       	t10, R12
+	  18: INCEIPL       	$4
+
+	0xFE8DE40:  7D8C6110  subfe r12,r12,r12
+	  19: GETL       	R12, t12
+	  20: GETL       	R12, t14
+	  21: SBBL       	t12, t14  (-rCa-wCa)
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0xFE8DE44:  7CC66038  and r6,r6,r12
+	  24: GETL       	R6, t16
+	  25: GETL       	R12, t18
+	  26: ANDL       	t16, t18
+	  27: PUTL       	t18, R6
+	  28: INCEIPL       	$4
+
+	0xFE8DE48:  38E70001  addi r7,r7,1
+	  29: GETL       	R7, t20
+	  30: ADDL       	$0x1, t20
+	  31: PUTL       	t20, R7
+	  32: INCEIPL       	$4
+
+	0xFE8DE4C:  4200FFB4  bc 16,0,0xFE8DE00
+	  33: GETL       	CTR, t22
+	  34: ADDL       	$0xFFFFFFFF, t22
+	  35: PUTL       	t22, CTR
+	  36: JIFZL       	t22, $0xFE8DE50
+	  37: JMPo       	$0xFE8DE00  ($4)
+
+
+
+. 1691 FE8DE30 32
+. 55 69 08 3C 7C 09 52 2E 54 0B F7 FE 31 8B FF FF 7D 8C 61 10 7C C6 60 38 38 E7 00 01 42 00 FF B4
+==== BB 1692 (0xFE8DE00) approx BBs exec'd 0 ====
+
+	0xFE8DE00:  80680000  lwz r3,0(r8)
+	   0: GETL       	R8, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8DE04:  2C030000  cmpi cr0,r3,0
+	   4: GETL       	R3, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE8DE08:  41820140  bc 12,2,0xFE8DF48
+	   8: Js02o       	$0xFE8DF48
+
+
+
+. 1692 FE8DE00 12
+. 80 68 00 00 2C 03 00 00 41 82 01 40
+==== BB 1693 (0xFE8DE48) approx BBs exec'd 0 ====
+
+	0xFE8DE48:  38E70001  addi r7,r7,1
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R7
+	   3: INCEIPL       	$4
+
+	0xFE8DE4C:  4200FFB4  bc 16,0,0xFE8DE00
+	   4: GETL       	CTR, t2
+	   5: ADDL       	$0xFFFFFFFF, t2
+	   6: PUTL       	t2, CTR
+	   7: JIFZL       	t2, $0xFE8DE50
+	   8: JMPo       	$0xFE8DE00  ($4)
+
+
+
+. 1693 FE8DE48 8
+. 38 E7 00 01 42 00 FF B4
+==== BB 1694 (0xFE8DE50) approx BBs exec'd 0 ====
+
+	0xFE8DE50:  2E060000  cmpi cr4,r6,0
+	   0: GETL       	R6, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DE54:  419200EC  bc 12,18,0xFE8DF40
+	   4: Js18o       	$0xFE8DF40
+
+
+
+. 1694 FE8DE50 8
+. 2E 06 00 00 41 92 00 EC
+==== BB 1695 (0xFE8DF40) approx BBs exec'd 0 ====
+
+	0xFE8DF40:  38650001  addi r3,r5,1
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE8DF44:  4BFFFF18  b 0xFE8DE5C
+	   4: JMPo       	$0xFE8DE5C  ($4)
+
+
+
+. 1695 FE8DF40 8
+. 38 65 00 01 4B FF FF 18
+==== BB 1696 (0xFE8DE5C) approx BBs exec'd 0 ====
+
+	0xFE8DE5C:  4811C4AD  bl 0xFFAA308
+	   0: MOVL       	$0xFE8DE60, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 1696 FE8DE5C 4
+. 48 11 C4 AD
+==== BB 1697 (0xFFAA308) approx BBs exec'd 0 ====
+
+	0xFFAA308:  4BF275D8  b 0xFED18E0
+	   0: JMPo       	$0xFED18E0  ($4)
+
+
+
+. 1697 FFAA308 4
+. 4B F2 75 D8
+==== BB 1698 malloc(0xFED18E0) approx BBs exec'd 0 ====
+
+	0xFED18E0:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED18E4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFED18E8:  480D6569  bl 0xFFA7E50
+	   9: MOVL       	$0xFED18EC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1698 FED18E0 12
+. 94 21 FF E0 7C 08 02 A6 48 0D 65 69
+==== BB 1699 (0xFED18EC) approx BBs exec'd 0 ====
+
+	0xFED18EC:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED18F0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFED18F4:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFED18F8:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFED18FC:  7C7D1B78  or r29,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0xFED1900:  93410008  stw r26,8(r1)
+	  21: GETL       	R26, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x8, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFED1904:  813E1A7C  lwz r9,6780(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1A7C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0xFED1908:  9361000C  stw r27,12(r1)
+	  31: GETL       	R27, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0xC, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFED190C:  80090000  lwz r0,0(r9)
+	  36: GETL       	R9, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R0
+	  39: INCEIPL       	$4
+
+	0xFED1910:  93810010  stw r28,16(r1)
+	  40: GETL       	R28, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x10, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0xFED1914:  2F800000  cmpi cr7,r0,0
+	  45: GETL       	R0, t36
+	  46: CMP0L       	t36, t38  (-rSo)
+	  47: ICRFL       	t38, $0x7, CR
+	  48: INCEIPL       	$4
+
+	0xFED1918:  93E1001C  stw r31,28(r1)
+	  49: GETL       	R31, t40
+	  50: GETL       	R1, t42
+	  51: ADDL       	$0x1C, t42
+	  52: STL       	t40, (t42)
+	  53: INCEIPL       	$4
+
+	0xFED191C:  419E003C  bc 12,30,0xFED1958
+	  54: Js30o       	$0xFED1958
+
+
+
+. 1699 FED18EC 52
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 90 01 00 24 7C 7D 1B 78 93 41 00 08 81 3E 1A 7C 93 61 00 0C 80 09 00 00 93 81 00 10 2F 80 00 00 93 E1 00 1C 41 9E 00 3C
+==== BB 1700 (0xFED1920) approx BBs exec'd 0 ====
+
+	0xFED1920:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFED1924:  7C0903A6  mtctr r0
+	   4: GETL       	R0, t4
+	   5: PUTL       	t4, CTR
+	   6: INCEIPL       	$4
+
+	0xFED1928:  80850004  lwz r4,4(r5)
+	   7: GETL       	R5, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0xFED192C:  4E800421  bctrl
+	  12: MOVL       	$0xFED1930, t10
+	  13: PUTL       	t10, LR
+	  14: GETL       	CTR, t12
+	  15: JMPo-c       	t12  ($4)
+
+
+
+. 1700 FED1920 16
+. 80 A1 00 00 7C 09 03 A6 80 85 00 04 4E 80 04 21
+==== BB 1701 malloc_hook_ini(0xFED2A68) approx BBs exec'd 0 ====
+
+	0xFED2A68:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED2A6C:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFED2A70:  480D53E1  bl 0xFFA7E50
+	   9: MOVL       	$0xFED2A74, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1701 FED2A68 12
+. 94 21 FF E0 7C 88 02 A6 48 0D 53 E1
+==== BB 1702 (0xFED2A74) approx BBs exec'd 0 ====
+
+	0xFED2A74:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED2A78:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFED2A7C:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFED2A80:  7C7D1B78  or r29,r3,r3
+	  13: GETL       	R3, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFED2A84:  38600000  li r3,0
+	  16: MOVL       	$0x0, t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0xFED2A88:  90810024  stw r4,36(r1)
+	  19: GETL       	R4, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x24, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFED2A8C:  813E1A7C  lwz r9,6780(r30)
+	  24: GETL       	R30, t18
+	  25: ADDL       	$0x1A7C, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0xFED2A90:  93E1001C  stw r31,28(r1)
+	  29: GETL       	R31, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFED2A94:  90690000  stw r3,0(r9)
+	  34: GETL       	R3, t26
+	  35: GETL       	R9, t28
+	  36: STL       	t26, (t28)
+	  37: INCEIPL       	$4
+
+	0xFED2A98:  4BFFFBDD  bl 0xFED2674
+	  38: MOVL       	$0xFED2A9C, t30
+	  39: PUTL       	t30, LR
+	  40: JMPo-c       	$0xFED2674  ($4)
+
+
+
+. 1702 FED2A74 40
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 78 38 60 00 00 90 81 00 24 81 3E 1A 7C 93 E1 00 1C 90 69 00 00 4B FF FB DD
+==== BB 1703 ptmalloc_init(0xFED2674) approx BBs exec'd 0 ====
+
+	0xFED2674:  9421FFB0  stwu r1,-80(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFB0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED2678:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFED267C:  480D57D5  bl 0xFFA7E50
+	   9: MOVL       	$0xFED2680, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1703 FED2674 12
+. 94 21 FF B0 7C 68 02 A6 48 0D 57 D5
+==== BB 1704 (0xFED2680) approx BBs exec'd 0 ====
+
+	0xFED2680:  93C10048  stw r30,72(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED2684:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFED2688:  9361003C  stw r27,60(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x3C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFED268C:  90610054  stw r3,84(r1)
+	  13: GETL       	R3, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x54, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFED2690:  93E1004C  stw r31,76(r1)
+	  18: GETL       	R31, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x4C, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFED2694:  3BE00000  li r31,0
+	  23: MOVL       	$0x0, t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0xFED2698:  837E1C84  lwz r27,7300(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1C84, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0xFED269C:  93410038  stw r26,56(r1)
+	  31: GETL       	R26, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x38, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFED26A0:  801B0000  lwz r0,0(r27)
+	  36: GETL       	R27, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R0
+	  39: INCEIPL       	$4
+
+	0xFED26A4:  93810040  stw r28,64(r1)
+	  40: GETL       	R28, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x40, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0xFED26A8:  2F800000  cmpi cr7,r0,0
+	  45: GETL       	R0, t36
+	  46: CMP0L       	t36, t38  (-rSo)
+	  47: ICRFL       	t38, $0x7, CR
+	  48: INCEIPL       	$4
+
+	0xFED26AC:  93A10044  stw r29,68(r1)
+	  49: GETL       	R29, t40
+	  50: GETL       	R1, t42
+	  51: ADDL       	$0x44, t42
+	  52: STL       	t40, (t42)
+	  53: INCEIPL       	$4
+
+	0xFED26B0:  419C002C  bc 12,28,0xFED26DC
+	  54: Js28o       	$0xFED26DC
+
+
+
+. 1704 FED2680 52
+. 93 C1 00 48 7F C8 02 A6 93 61 00 3C 90 61 00 54 93 E1 00 4C 3B E0 00 00 83 7E 1C 84 93 41 00 38 80 1B 00 00 93 81 00 40 2F 80 00 00 93 A1 00 44 41 9C 00 2C
+==== BB 1705 (0xFED26DC) approx BBs exec'd 0 ====
+
+	0xFED26DC:  83BE0628  lwz r29,1576(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x628, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFED26E0:  3CA00002  lis r5,2
+	   5: MOVL       	$0x20000, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFED26E4:  3CC00001  lis r6,1
+	   8: MOVL       	$0x10000, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0xFED26E8:  93FB0000  stw r31,0(r27)
+	  11: GETL       	R31, t8
+	  12: GETL       	R27, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFED26EC:  90DD0010  stw r6,16(r29)
+	  15: GETL       	R6, t12
+	  16: GETL       	R29, t14
+	  17: ADDL       	$0x10, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFED26F0:  90BD0000  stw r5,0(r29)
+	  20: GETL       	R5, t16
+	  21: GETL       	R29, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0xFED26F4:  90BD0004  stw r5,4(r29)
+	  24: GETL       	R5, t20
+	  25: GETL       	R29, t22
+	  26: ADDL       	$0x4, t22
+	  27: STL       	t20, (t22)
+	  28: INCEIPL       	$4
+
+	0xFED26F8:  90BD0008  stw r5,8(r29)
+	  29: GETL       	R5, t24
+	  30: GETL       	R29, t26
+	  31: ADDL       	$0x8, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0xFED26FC:  48057201  bl 0xFF298FC
+	  34: MOVL       	$0xFED2700, t28
+	  35: PUTL       	t28, LR
+	  36: JMPo-c       	$0xFF298FC  ($4)
+
+
+
+. 1705 FED26DC 36
+. 83 BE 06 28 3C A0 00 02 3C C0 00 01 93 FB 00 00 90 DD 00 10 90 BD 00 00 90 BD 00 04 90 BD 00 08 48 05 72 01
+==== BB 1706 (0xFED2700) approx BBs exec'd 0 ====
+
+	0xFED2700:  813E1BF8  lwz r9,7160(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BF8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFED2704:  839E05FC  lwz r28,1532(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x5FC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFED2708:  80890000  lwz r4,0(r9)
+	  10: GETL       	R9, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0xFED270C:  907D0018  stw r3,24(r29)
+	  14: GETL       	R3, t12
+	  15: GETL       	R29, t14
+	  16: ADDL       	$0x18, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFED2710:  2C040000  cmpi cr0,r4,0
+	  19: GETL       	R4, t16
+	  20: CMP0L       	t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x0, CR
+	  22: INCEIPL       	$4
+
+	0xFED2714:  93FC0000  stw r31,0(r28)
+	  23: GETL       	R31, t20
+	  24: GETL       	R28, t22
+	  25: STL       	t20, (t22)
+	  26: INCEIPL       	$4
+
+	0xFED2718:  939C0448  stw r28,1096(r28)
+	  27: GETL       	R28, t24
+	  28: GETL       	R28, t26
+	  29: ADDL       	$0x448, t26
+	  30: STL       	t24, (t26)
+	  31: INCEIPL       	$4
+
+	0xFED271C:  41820100  bc 12,2,0xFED281C
+	  32: Js02o       	$0xFED281C
+
+
+
+. 1706 FED2700 32
+. 81 3E 1B F8 83 9E 05 FC 80 89 00 00 90 7D 00 18 2C 04 00 00 93 FC 00 00 93 9C 04 48 41 82 01 00
+==== BB 1707 (0xFED281C) approx BBs exec'd 0 ====
+
+	0xFED281C:  807E0648  lwz r3,1608(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x648, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFED2820:  38810010  addi r4,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFED2824:  38A10020  addi r5,r1,32
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x20, t6
+	  11: PUTL       	t6, R5
+	  12: INCEIPL       	$4
+
+	0xFED2828:  38C00000  li r6,0
+	  13: MOVL       	$0x0, t8
+	  14: PUTL       	t8, R6
+	  15: INCEIPL       	$4
+
+	0xFED282C:  4809A5E1  bl 0xFF6CE0C
+	  16: MOVL       	$0xFED2830, t10
+	  17: PUTL       	t10, LR
+	  18: JMPo-c       	$0xFF6CE0C  ($4)
+
+
+
+. 1707 FED281C 20
+. 80 7E 06 48 38 81 00 10 38 A1 00 20 38 C0 00 00 48 09 A5 E1
+==== BB 1708 __GI__dl_addr(0xFF6CE0C) approx BBs exec'd 0 ====
+
+	0xFF6CE0C:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF6CE10:  7CE802A6  mflr r7
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0xFF6CE14:  4803B03D  bl 0xFFA7E50
+	   9: MOVL       	$0xFF6CE18, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1708 FF6CE0C 12
+. 94 21 FF D0 7C E8 02 A6 48 03 B0 3D
+==== BB 1709 (0xFF6CE18) approx BBs exec'd 0 ====
+
+	0xFF6CE18:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF6CE1C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF6CE20:  9361001C  stw r27,28(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF6CE24:  90E10034  stw r7,52(r1)
+	  13: GETL       	R7, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF6CE28:  93210014  stw r25,20(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFF6CE2C:  7CD93378  or r25,r6,r6
+	  23: GETL       	R6, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0xFF6CE30:  837E1B98  lwz r27,7064(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1B98, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0xFF6CE34:  93410018  stw r26,24(r1)
+	  31: GETL       	R26, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x18, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFF6CE38:  7CBA2B78  or r26,r5,r5
+	  36: GETL       	R5, t28
+	  37: PUTL       	t28, R26
+	  38: INCEIPL       	$4
+
+	0xFF6CE3C:  801B03F8  lwz r0,1016(r27)
+	  39: GETL       	R27, t30
+	  40: ADDL       	$0x3F8, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R0
+	  43: INCEIPL       	$4
+
+	0xFF6CE40:  93810020  stw r28,32(r1)
+	  44: GETL       	R28, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x20, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFF6CE44:  7C9C2378  or r28,r4,r4
+	  49: GETL       	R4, t38
+	  50: PUTL       	t38, R28
+	  51: INCEIPL       	$4
+
+	0xFF6CE48:  93E1002C  stw r31,44(r1)
+	  52: GETL       	R31, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x2C, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFF6CE4C:  7C0903A6  mtctr r0
+	  57: GETL       	R0, t44
+	  58: PUTL       	t44, CTR
+	  59: INCEIPL       	$4
+
+	0xFF6CE50:  7C7F1B78  or r31,r3,r3
+	  60: GETL       	R3, t46
+	  61: PUTL       	t46, R31
+	  62: INCEIPL       	$4
+
+	0xFF6CE54:  93A10024  stw r29,36(r1)
+	  63: GETL       	R29, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x24, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0xFF6CE58:  387B0180  addi r3,r27,384
+	  68: GETL       	R27, t52
+	  69: ADDL       	$0x180, t52
+	  70: PUTL       	t52, R3
+	  71: INCEIPL       	$4
+
+	0xFF6CE5C:  4E800421  bctrl
+	  72: MOVL       	$0xFF6CE60, t54
+	  73: PUTL       	t54, LR
+	  74: GETL       	CTR, t56
+	  75: JMPo-c       	t56  ($4)
+
+
+
+. 1709 FF6CE18 72
+. 93 C1 00 28 7F C8 02 A6 93 61 00 1C 90 E1 00 34 93 21 00 14 7C D9 33 78 83 7E 1B 98 93 41 00 18 7C BA 2B 78 80 1B 03 F8 93 81 00 20 7C 9C 23 78 93 E1 00 2C 7C 09 03 A6 7C 7F 1B 78 93 A1 00 24 38 7B 01 80 4E 80 04 21
+==== BB 1710 rtld_lock_default_lock_recursive(0x25471884) approx BBs exec'd 0 ====
+
+	0x25471884:  80830004  lwz r4,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25471888:  9421FFF0  stwu r1,-16(r1)
+	   5: GETL       	R1, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xFFFFFFF0, t6
+	   8: PUTL       	t6, R1
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x2547188C:  39240001  addi r9,r4,1
+	  11: GETL       	R4, t8
+	  12: ADDL       	$0x1, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x25471890:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t10
+	  16: ADDL       	$0x10, t10
+	  17: PUTL       	t10, R1
+	  18: INCEIPL       	$4
+
+	0x25471894:  91230004  stw r9,4(r3)
+	  19: GETL       	R9, t12
+	  20: GETL       	R3, t14
+	  21: ADDL       	$0x4, t14
+	  22: STL       	t12, (t14)
+	  23: INCEIPL       	$4
+
+	0x25471898:  4E800020  blr
+	  24: GETL       	LR, t16
+	  25: JMPo-r       	t16  ($4)
+
+
+
+. 1710 25471884 24
+. 80 83 00 04 94 21 FF F0 39 24 00 01 38 21 00 10 91 23 00 04 4E 80 00 20
+==== BB 1711 (0xFF6CE60) approx BBs exec'd 0 ====
+
+	0xFF6CE60:  38A00000  li r5,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFF6CE64:  7F67DB78  or r7,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0xFF6CE68:  38DB0168  addi r6,r27,360
+	   6: GETL       	R27, t4
+	   7: ADDL       	$0x168, t4
+	   8: PUTL       	t4, R6
+	   9: INCEIPL       	$4
+
+	0xFF6CE6C:  81070000  lwz r8,0(r7)
+	  10: GETL       	R7, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R8
+	  13: INCEIPL       	$4
+
+	0xFF6CE70:  38E70018  addi r7,r7,24
+	  14: GETL       	R7, t10
+	  15: ADDL       	$0x18, t10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0xFF6CE74:  2F880000  cmpi cr7,r8,0
+	  18: GETL       	R8, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0xFF6CE78:  419E006C  bc 12,30,0xFF6CEE4
+	  22: Js30o       	$0xFF6CEE4
+
+
+
+. 1711 FF6CE60 28
+. 38 A0 00 00 7F 67 DB 78 38 DB 01 68 81 07 00 00 38 E7 00 18 2F 88 00 00 41 9E 00 6C
+==== BB 1712 (0xFF6CE7C) approx BBs exec'd 0 ====
+
+	0xFF6CE7C:  806801A0  lwz r3,416(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x1A0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFF6CE80:  7C03F840  cmpl cr0,r3,r31
+	   5: GETL       	R3, t4
+	   6: GETL       	R31, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFF6CE84:  41810054  bc 12,1,0xFF6CED8
+	  10: Js01o       	$0xFF6CED8
+
+
+
+. 1712 FF6CE7C 12
+. 80 68 01 A0 7C 03 F8 40 41 81 00 54
+==== BB 1713 (0xFF6CED8) approx BBs exec'd 0 ====
+
+	0xFF6CED8:  8108000C  lwz r8,12(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFF6CEDC:  4BFFFF98  b 0xFF6CE74
+	   5: JMPo       	$0xFF6CE74  ($4)
+
+
+
+. 1713 FF6CED8 8
+. 81 08 00 0C 4B FF FF 98
+==== BB 1714 (0xFF6CE74) approx BBs exec'd 0 ====
+
+	0xFF6CE74:  2F880000  cmpi cr7,r8,0
+	   0: GETL       	R8, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFF6CE78:  419E006C  bc 12,30,0xFF6CEE4
+	   4: Js30o       	$0xFF6CEE4
+
+
+
+. 1714 FF6CE74 8
+. 2F 88 00 00 41 9E 00 6C
+==== BB 1715 (0xFF6CE88) approx BBs exec'd 0 ====
+
+	0xFF6CE88:  808801A4  lwz r4,420(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x1A4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFF6CE8C:  7C84F840  cmpl cr1,r4,r31
+	   5: GETL       	R4, t4
+	   6: GETL       	R31, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFF6CE90:  40850048  bc 4,5,0xFF6CED8
+	  10: Jc05o       	$0xFF6CED8
+
+
+
+. 1715 FF6CE88 12
+. 80 88 01 A4 7C 84 F8 40 40 85 00 48
+==== BB 1716 (0xFF6CE94) approx BBs exec'd 0 ====
+
+	0xFF6CE94:  A1680154  lhz r11,340(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFF6CE98:  2F0B0000  cmpi cr6,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFF6CE9C:  419A0044  bc 12,26,0xFF6CEE0
+	   9: Js26o       	$0xFF6CEE0
+
+
+
+. 1716 FF6CE94 12
+. A1 68 01 54 2F 0B 00 00 41 9A 00 44
+==== BB 1717 (0xFF6CEA0) approx BBs exec'd 0 ====
+
+	0xFF6CEA0:  8148014C  lwz r10,332(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x14C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFF6CEA4:  396BFFFF  addi r11,r11,-1
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFF6CEA8:  55692834  rlwinm r9,r11,5,0,26
+	   9: GETL       	R11, t6
+	  10: SHLL       	$0x5, t6
+	  11: PUTL       	t6, R9
+	  12: INCEIPL       	$4
+
+	0xFF6CEAC:  7D89502E  lwzx r12,r9,r10
+	  13: GETL       	R10, t8
+	  14: GETL       	R9, t10
+	  15: ADDL       	t10, t8
+	  16: LDL       	(t8), t12
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0xFF6CEB0:  2F8C0001  cmpi cr7,r12,1
+	  19: GETL       	R12, t14
+	  20: MOVL       	$0x1, t18
+	  21: CMPL       	t14, t18, t16  (-rSo)
+	  22: ICRFL       	t16, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0xFF6CEB4:  409EFFF0  bc 4,30,0xFF6CEA4
+	  24: Jc30o       	$0xFF6CEA4
+
+
+
+. 1717 FF6CEA0 24
+. 81 48 01 4C 39 6B FF FF 55 69 28 34 7D 89 50 2E 2F 8C 00 01 40 9E FF F0
+==== BB 1718 (0xFF6CEA4) approx BBs exec'd 0 ====
+
+	0xFF6CEA4:  396BFFFF  addi r11,r11,-1
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFF6CEA8:  55692834  rlwinm r9,r11,5,0,26
+	   4: GETL       	R11, t2
+	   5: SHLL       	$0x5, t2
+	   6: PUTL       	t2, R9
+	   7: INCEIPL       	$4
+
+	0xFF6CEAC:  7D89502E  lwzx r12,r9,r10
+	   8: GETL       	R10, t4
+	   9: GETL       	R9, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0xFF6CEB0:  2F8C0001  cmpi cr7,r12,1
+	  14: GETL       	R12, t10
+	  15: MOVL       	$0x1, t14
+	  16: CMPL       	t10, t14, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFF6CEB4:  409EFFF0  bc 4,30,0xFF6CEA4
+	  19: Jc30o       	$0xFF6CEA4
+
+
+
+. 1718 FF6CEA4 20
+. 39 6B FF FF 55 69 28 34 7D 89 50 2E 2F 8C 00 01 40 9E FF F0
+==== BB 1719 (0xFF6CEB8) approx BBs exec'd 0 ====
+
+	0xFF6CEB8:  7C695214  add r3,r9,r10
+	   0: GETL       	R9, t0
+	   1: GETL       	R10, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFF6CEBC:  80080000  lwz r0,0(r8)
+	   5: GETL       	R8, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0xFF6CEC0:  81630008  lwz r11,8(r3)
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0x8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0xFF6CEC4:  81430014  lwz r10,20(r3)
+	  14: GETL       	R3, t12
+	  15: ADDL       	$0x14, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R10
+	  18: INCEIPL       	$4
+
+	0xFF6CEC8:  7FA05A14  add r29,r0,r11
+	  19: GETL       	R0, t16
+	  20: GETL       	R11, t18
+	  21: ADDL       	t16, t18
+	  22: PUTL       	t18, R29
+	  23: INCEIPL       	$4
+
+	0xFF6CECC:  7D3D5214  add r9,r29,r10
+	  24: GETL       	R29, t20
+	  25: GETL       	R10, t22
+	  26: ADDL       	t20, t22
+	  27: PUTL       	t22, R9
+	  28: INCEIPL       	$4
+
+	0xFF6CED0:  7C09F840  cmpl cr0,r9,r31
+	  29: GETL       	R9, t24
+	  30: GETL       	R31, t26
+	  31: CMPUL       	t24, t26, t28  (-rSo)
+	  32: ICRFL       	t28, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0xFF6CED4:  4181000C  bc 12,1,0xFF6CEE0
+	  34: Js01o       	$0xFF6CEE0
+
+
+
+. 1719 FF6CEB8 32
+. 7C 69 52 14 80 08 00 00 81 63 00 08 81 43 00 14 7F A0 5A 14 7D 3D 52 14 7C 09 F8 40 41 81 00 0C
+==== BB 1720 (0xFF6CEE0) approx BBs exec'd 0 ====
+
+	0xFF6CEE0:  7D054378  or r5,r8,r8
+	   0: GETL       	R8, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFF6CEE4:  7C873000  cmp cr1,r7,r6
+	   3: GETL       	R7, t2
+	   4: GETL       	R6, t4
+	   5: CMPL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFF6CEE8:  4085FF84  bc 4,5,0xFF6CE6C
+	   8: Jc05o       	$0xFF6CE6C
+
+
+
+. 1720 FF6CEE0 12
+. 7D 05 43 78 7C 87 30 00 40 85 FF 84
+==== BB 1721 (0xFF6CE6C) approx BBs exec'd 0 ====
+
+	0xFF6CE6C:  81070000  lwz r8,0(r7)
+	   0: GETL       	R7, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0xFF6CE70:  38E70018  addi r7,r7,24
+	   4: GETL       	R7, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0xFF6CE74:  2F880000  cmpi cr7,r8,0
+	   8: GETL       	R8, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFF6CE78:  419E006C  bc 12,30,0xFF6CEE4
+	  12: Js30o       	$0xFF6CEE4
+
+
+
+. 1721 FF6CE6C 16
+. 81 07 00 00 38 E7 00 18 2F 88 00 00 41 9E 00 6C
+==== BB 1722 (0xFF6CEE4) approx BBs exec'd 0 ====
+
+	0xFF6CEE4:  7C873000  cmp cr1,r7,r6
+	   0: GETL       	R7, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFF6CEE8:  4085FF84  bc 4,5,0xFF6CE6C
+	   5: Jc05o       	$0xFF6CE6C
+
+
+
+. 1722 FF6CEE4 8
+. 7C 87 30 00 40 85 FF 84
+==== BB 1723 (0xFF6CEEC) approx BBs exec'd 0 ====
+
+	0xFF6CEEC:  2F050000  cmpi cr6,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFF6CEF0:  3BA00000  li r29,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFF6CEF4:  419A012C  bc 12,26,0xFF6D020
+	   7: Js26o       	$0xFF6D020
+
+
+
+. 1723 FF6CEEC 12
+. 2F 05 00 00 3B A0 00 00 41 9A 01 2C
+==== BB 1724 (0xFF6CEF8) approx BBs exec'd 0 ====
+
+	0xFF6CEF8:  80850004  lwz r4,4(r5)
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFF6CEFC:  80E501A0  lwz r7,416(r5)
+	   5: GETL       	R5, t4
+	   6: ADDL       	$0x1A0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0xFF6CF00:  909C0000  stw r4,0(r28)
+	  10: GETL       	R4, t8
+	  11: GETL       	R28, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFF6CF04:  81050004  lwz r8,4(r5)
+	  14: GETL       	R5, t12
+	  15: ADDL       	$0x4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R8
+	  18: INCEIPL       	$4
+
+	0xFF6CF08:  90FC0004  stw r7,4(r28)
+	  19: GETL       	R7, t16
+	  20: GETL       	R28, t18
+	  21: ADDL       	$0x4, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0xFF6CF0C:  88C80000  lbz r6,0(r8)
+	  24: GETL       	R8, t20
+	  25: LDB       	(t20), t22
+	  26: PUTL       	t22, R6
+	  27: INCEIPL       	$4
+
+	0xFF6CF10:  2F860000  cmpi cr7,r6,0
+	  28: GETL       	R6, t24
+	  29: CMP0L       	t24, t26  (-rSo)
+	  30: ICRFL       	t26, $0x7, CR
+	  31: INCEIPL       	$4
+
+	0xFF6CF14:  419E0164  bc 12,30,0xFF6D078
+	  32: Js30o       	$0xFF6D078
+
+
+
+. 1724 FF6CEF8 32
+. 80 85 00 04 80 E5 01 A0 90 9C 00 00 81 05 00 04 90 FC 00 04 88 C8 00 00 2F 86 00 00 41 9E 01 64
+==== BB 1725 (0xFF6CF18) approx BBs exec'd 0 ====
+
+	0xFF6CF18:  81050030  lwz r8,48(r5)
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFF6CF1C:  80E50034  lwz r7,52(r5)
+	   5: GETL       	R5, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0xFF6CF20:  2C880000  cmpi cr1,r8,0
+	  10: GETL       	R8, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0xFF6CF24:  81650038  lwz r11,56(r5)
+	  14: GETL       	R5, t12
+	  15: ADDL       	$0x38, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0xFF6CF28:  80670004  lwz r3,4(r7)
+	  19: GETL       	R7, t16
+	  20: ADDL       	$0x4, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R3
+	  23: INCEIPL       	$4
+
+	0xFF6CF2C:  80C50048  lwz r6,72(r5)
+	  24: GETL       	R5, t20
+	  25: ADDL       	$0x48, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R6
+	  28: INCEIPL       	$4
+
+	0xFF6CF30:  814B0004  lwz r10,4(r11)
+	  29: GETL       	R11, t24
+	  30: ADDL       	$0x4, t24
+	  31: LDL       	(t24), t26
+	  32: PUTL       	t26, R10
+	  33: INCEIPL       	$4
+
+	0xFF6CF34:  7C671B78  or r7,r3,r3
+	  34: GETL       	R3, t28
+	  35: PUTL       	t28, R7
+	  36: INCEIPL       	$4
+
+	0xFF6CF38:  83A60004  lwz r29,4(r6)
+	  37: GETL       	R6, t30
+	  38: ADDL       	$0x4, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R29
+	  41: INCEIPL       	$4
+
+	0xFF6CF3C:  41860014  bc 12,6,0xFF6CF50
+	  42: Js06o       	$0xFF6CF50
+
+
+
+. 1725 FF6CF18 40
+. 81 05 00 30 80 E5 00 34 2C 88 00 00 81 65 00 38 80 67 00 04 80 C5 00 48 81 4B 00 04 7C 67 1B 78 83 A6 00 04 41 86 00 14
+==== BB 1726 (0xFF6CF40) approx BBs exec'd 0 ====
+
+	0xFF6CF40:  80880004  lwz r4,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFF6CF44:  81040004  lwz r8,4(r4)
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFF6CF48:  55002036  rlwinm r0,r8,4,0,27
+	  10: GETL       	R8, t8
+	  11: SHLL       	$0x4, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0xFF6CF4C:  7CE05214  add r7,r0,r10
+	  14: GETL       	R0, t10
+	  15: GETL       	R10, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0xFF6CF50:  7F8A3840  cmpl cr7,r10,r7
+	  19: GETL       	R10, t14
+	  20: GETL       	R7, t16
+	  21: CMPUL       	t14, t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0xFF6CF54:  38800000  li r4,0
+	  24: MOVL       	$0x0, t20
+	  25: PUTL       	t20, R4
+	  26: INCEIPL       	$4
+
+	0xFF6CF58:  2C040000  cmpi cr0,r4,0
+	  27: GETL       	R4, t22
+	  28: CMP0L       	t22, t24  (-rSo)
+	  29: ICRFL       	t24, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFF6CF5C:  409C0084  bc 4,28,0xFF6CFE0
+	  31: Jc28o       	$0xFF6CFE0
+
+
+
+. 1726 FF6CF40 32
+. 80 88 00 04 81 04 00 04 55 00 20 36 7C E0 52 14 7F 8A 38 40 38 80 00 00 2C 04 00 00 40 9C 00 84
+==== BB 1727 (0xFF6CF60) approx BBs exec'd 0 ====
+
+	0xFF6CF60:  80C50000  lwz r6,0(r5)
+	   0: GETL       	R5, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFF6CF64:  810A0004  lwz r8,4(r10)
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0xFF6CF68:  7D664214  add r11,r6,r8
+	   9: GETL       	R6, t8
+	  10: GETL       	R8, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0xFF6CF6C:  7C8BF840  cmpl cr1,r11,r31
+	  14: GETL       	R11, t12
+	  15: GETL       	R31, t14
+	  16: CMPUL       	t12, t14, t16  (-rSo)
+	  17: ICRFL       	t16, $0x1, CR
+	  18: INCEIPL       	$4
+
+	0xFF6CF70:  41850064  bc 12,5,0xFF6CFD4
+	  19: Js05o       	$0xFF6CFD4
+
+
+
+. 1727 FF6CF60 20
+. 80 C5 00 00 81 0A 00 04 7D 66 42 14 7C 8B F8 40 41 85 00 64
+==== BB 1728 (0xFF6CF74) approx BBs exec'd 0 ====
+
+	0xFF6CF74:  892A000C  lbz r9,12(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFF6CF78:  552C073E  rlwinm r12,r9,0,28,31
+	   5: GETL       	R9, t4
+	   6: ANDL       	$0xF, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFF6CF7C:  2F8C0006  cmpi cr7,r12,6
+	   9: GETL       	R12, t6
+	  10: MOVL       	$0x6, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFF6CF80:  419E0054  bc 12,30,0xFF6CFD4
+	  14: Js30o       	$0xFF6CFD4
+
+
+
+. 1728 FF6CF74 16
+. 89 2A 00 0C 55 2C 07 3E 2F 8C 00 06 41 9E 00 54
+==== BB 1729 (0xFF6CF84) approx BBs exec'd 0 ====
+
+	0xFF6CF84:  800A0008  lwz r0,8(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFF6CF88:  2C800000  cmpi cr1,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFF6CF8C:  4086000C  bc 4,6,0xFF6CF98
+	   9: Jc06o       	$0xFF6CF98
+
+
+
+. 1729 FF6CF84 12
+. 80 0A 00 08 2C 80 00 00 40 86 00 0C
+==== BB 1730 (0xFF6CF90) approx BBs exec'd 0 ====
+
+	0xFF6CF90:  7F8BF800  cmp cr7,r11,r31
+	   0: GETL       	R11, t0
+	   1: GETL       	R31, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFF6CF94:  419E0010  bc 12,30,0xFF6CFA4
+	   5: Js30o       	$0xFF6CFA4
+
+
+
+. 1730 FF6CF90 8
+. 7F 8B F8 00 41 9E 00 10
+==== BB 1731 (0xFF6CF98) approx BBs exec'd 0 ====
+
+	0xFF6CF98:  7C0B0214  add r0,r11,r0
+	   0: GETL       	R11, t0
+	   1: GETL       	R0, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFF6CF9C:  7C80F840  cmpl cr1,r0,r31
+	   5: GETL       	R0, t4
+	   6: GETL       	R31, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFF6CFA0:  40850034  bc 4,5,0xFF6CFD4
+	  10: Jc05o       	$0xFF6CFD4
+
+
+
+. 1731 FF6CF98 12
+. 7C 0B 02 14 7C 80 F8 40 40 85 00 34
+==== BB 1732 (0xFF6CFD4) approx BBs exec'd 0 ====
+
+	0xFF6CFD4:  394A0010  addi r10,r10,16
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0xFF6CFD8:  7C8A3840  cmpl cr1,r10,r7
+	   4: GETL       	R10, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFF6CFDC:  4184FF88  bc 12,4,0xFF6CF64
+	   9: Js04o       	$0xFF6CF64
+
+
+
+. 1732 FF6CFD4 12
+. 39 4A 00 10 7C 8A 38 40 41 84 FF 88
+==== BB 1733 (0xFF6CF64) approx BBs exec'd 0 ====
+
+	0xFF6CF64:  810A0004  lwz r8,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFF6CF68:  7D664214  add r11,r6,r8
+	   5: GETL       	R6, t4
+	   6: GETL       	R8, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFF6CF6C:  7C8BF840  cmpl cr1,r11,r31
+	  10: GETL       	R11, t8
+	  11: GETL       	R31, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFF6CF70:  41850064  bc 12,5,0xFF6CFD4
+	  15: Js05o       	$0xFF6CFD4
+
+
+
+. 1733 FF6CF64 16
+. 81 0A 00 04 7D 66 42 14 7C 8B F8 40 41 85 00 64
+==== BB 1734 (0xFF6CFE0) approx BBs exec'd 0 ====
+
+	0xFF6CFE0:  2F9A0000  cmpi cr7,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFF6CFE4:  419E0008  bc 12,30,0xFF6CFEC
+	   4: Js30o       	$0xFF6CFEC
+
+
+
+. 1734 FF6CFE0 8
+. 2F 9A 00 00 41 9E 00 08
+==== BB 1735 (0xFF6CFE8) approx BBs exec'd 0 ====
+
+	0xFF6CFE8:  90BA0000  stw r5,0(r26)
+	   0: GETL       	R5, t0
+	   1: GETL       	R26, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFF6CFEC:  2C990000  cmpi cr1,r25,0
+	   4: GETL       	R25, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFF6CFF0:  41860008  bc 12,6,0xFF6CFF8
+	   8: Js06o       	$0xFF6CFF8
+
+
+
+. 1735 FF6CFE8 12
+. 90 BA 00 00 2C 99 00 00 41 86 00 08
+==== BB 1736 (0xFF6CFF8) approx BBs exec'd 0 ====
+
+	0xFF6CFF8:  41820068  bc 12,2,0xFF6D060
+	   0: Js02o       	$0xFF6D060
+
+
+
+. 1736 FF6CFF8 4
+. 41 82 00 68
+==== BB 1737 (0xFF6D060) approx BBs exec'd 0 ====
+
+	0xFF6D060:  909C000C  stw r4,12(r28)
+	   0: GETL       	R4, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF6D064:  909C0008  stw r4,8(r28)
+	   5: GETL       	R4, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFF6D068:  4BFFFFB4  b 0xFF6D01C
+	  10: JMPo       	$0xFF6D01C  ($4)
+
+
+
+. 1737 FF6D060 12
+. 90 9C 00 0C 90 9C 00 08 4B FF FF B4
+==== BB 1738 (0xFF6D01C) approx BBs exec'd 0 ====
+
+	0xFF6D01C:  3BA00001  li r29,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFF6D020:  839B03FC  lwz r28,1020(r27)
+	   3: GETL       	R27, t2
+	   4: ADDL       	$0x3FC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFF6D024:  387B0180  addi r3,r27,384
+	   8: GETL       	R27, t6
+	   9: ADDL       	$0x180, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0xFF6D028:  7F8903A6  mtctr r28
+	  12: GETL       	R28, t8
+	  13: PUTL       	t8, CTR
+	  14: INCEIPL       	$4
+
+	0xFF6D02C:  4E800421  bctrl
+	  15: MOVL       	$0xFF6D030, t10
+	  16: PUTL       	t10, LR
+	  17: GETL       	CTR, t12
+	  18: JMPo-c       	t12  ($4)
+
+
+
+. 1738 FF6D01C 20
+. 3B A0 00 01 83 9B 03 FC 38 7B 01 80 7F 89 03 A6 4E 80 04 21
+==== BB 1739 rtld_lock_default_unlock_recursive(0x2547189C) approx BBs exec'd 0 ====
+
+	0x2547189C:  80830004  lwz r4,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254718A0:  9421FFF0  stwu r1,-16(r1)
+	   5: GETL       	R1, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xFFFFFFF0, t6
+	   8: PUTL       	t6, R1
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x254718A4:  3924FFFF  addi r9,r4,-1
+	  11: GETL       	R4, t8
+	  12: ADDL       	$0xFFFFFFFF, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x254718A8:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t10
+	  16: ADDL       	$0x10, t10
+	  17: PUTL       	t10, R1
+	  18: INCEIPL       	$4
+
+	0x254718AC:  91230004  stw r9,4(r3)
+	  19: GETL       	R9, t12
+	  20: GETL       	R3, t14
+	  21: ADDL       	$0x4, t14
+	  22: STL       	t12, (t14)
+	  23: INCEIPL       	$4
+
+	0x254718B0:  4E800020  blr
+	  24: GETL       	LR, t16
+	  25: JMPo-r       	t16  ($4)
+
+
+
+. 1739 2547189C 24
+. 80 83 00 04 94 21 FF F0 39 24 FF FF 38 21 00 10 91 23 00 04 4E 80 00 20
+==== BB 1740 (0xFF6D030) approx BBs exec'd 0 ====
+
+	0xFF6D030:  83610034  lwz r27,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFF6D034:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF6D038:  83210014  lwz r25,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R25
+	  12: INCEIPL       	$4
+
+	0xFF6D03C:  7F6803A6  mtlr r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFF6D040:  83410018  lwz r26,24(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x18, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R26
+	  20: INCEIPL       	$4
+
+	0xFF6D044:  8361001C  lwz r27,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0xFF6D048:  83810020  lwz r28,32(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFF6D04C:  83A10024  lwz r29,36(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0xFF6D050:  83C10028  lwz r30,40(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x28, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R30
+	  40: INCEIPL       	$4
+
+	0xFF6D054:  83E1002C  lwz r31,44(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x2C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R31
+	  45: INCEIPL       	$4
+
+	0xFF6D058:  38210030  addi r1,r1,48
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x30, t36
+	  48: PUTL       	t36, R1
+	  49: INCEIPL       	$4
+
+	0xFF6D05C:  4E800020  blr
+	  50: GETL       	LR, t38
+	  51: JMPo-r       	t38  ($4)
+
+
+
+. 1740 FF6D030 48
+. 83 61 00 34 7F A3 EB 78 83 21 00 14 7F 68 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 1741 (0xFED2830) approx BBs exec'd 0 ====
+
+	0xFED2830:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED2834:  41A6FEF8  bc 13,6,0xFED272C
+	   4: Js06o       	$0xFED272C
+
+
+
+. 1741 FED2830 8
+. 2C 83 00 00 41 A6 FE F8
+==== BB 1742 (0xFED2838) approx BBs exec'd 0 ====
+
+	0xFED2838:  81010020  lwz r8,32(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFED283C:  80E80018  lwz r7,24(r8)
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0xFED2840:  2F070000  cmpi cr6,r7,0
+	  10: GETL       	R7, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFED2844:  41BAFEE8  bc 13,26,0xFED272C
+	  14: Js26o       	$0xFED272C
+
+
+
+. 1742 FED2838 16
+. 81 01 00 20 80 E8 00 18 2F 07 00 00 41 BA FE E8
+==== BB 1743 (0xFED272C) approx BBs exec'd 0 ====
+
+	0xFED272C:  809E0600  lwz r4,1536(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x600, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFED2730:  807E1B4C  lwz r3,6988(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1B4C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFED2734:  7FA41214  add r29,r4,r2
+	  10: GETL       	R4, t8
+	  11: GETL       	R2, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0xFED2738:  3BE00000  li r31,0
+	  15: MOVL       	$0x0, t12
+	  16: PUTL       	t12, R31
+	  17: INCEIPL       	$4
+
+	0xFED273C:  939D0000  stw r28,0(r29)
+	  18: GETL       	R28, t14
+	  19: GETL       	R29, t16
+	  20: STL       	t14, (t16)
+	  21: INCEIPL       	$4
+
+	0xFED2740:  839E05F8  lwz r28,1528(r30)
+	  22: GETL       	R30, t18
+	  23: ADDL       	$0x5F8, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R28
+	  26: INCEIPL       	$4
+
+	0xFED2744:  80C30000  lwz r6,0(r3)
+	  27: GETL       	R3, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R6
+	  30: INCEIPL       	$4
+
+	0xFED2748:  809E0618  lwz r4,1560(r30)
+	  31: GETL       	R30, t26
+	  32: ADDL       	$0x618, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R4
+	  35: INCEIPL       	$4
+
+	0xFED274C:  807E05F4  lwz r3,1524(r30)
+	  36: GETL       	R30, t30
+	  37: ADDL       	$0x5F4, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R3
+	  40: INCEIPL       	$4
+
+	0xFED2750:  80BE061C  lwz r5,1564(r30)
+	  41: GETL       	R30, t34
+	  42: ADDL       	$0x61C, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R5
+	  45: INCEIPL       	$4
+
+	0xFED2754:  835E1B6C  lwz r26,7020(r30)
+	  46: GETL       	R30, t38
+	  47: ADDL       	$0x1B6C, t38
+	  48: LDL       	(t38), t40
+	  49: PUTL       	t40, R26
+	  50: INCEIPL       	$4
+
+	0xFED2758:  93FC0000  stw r31,0(r28)
+	  51: GETL       	R31, t42
+	  52: GETL       	R28, t44
+	  53: STL       	t42, (t44)
+	  54: INCEIPL       	$4
+
+	0xFED275C:  3B800000  li r28,0
+	  55: MOVL       	$0x0, t46
+	  56: PUTL       	t46, R28
+	  57: INCEIPL       	$4
+
+	0xFED2760:  4806D4E1  bl 0xFF3FC40
+	  58: MOVL       	$0xFED2764, t48
+	  59: PUTL       	t48, LR
+	  60: JMPo-c       	$0xFF3FC40  ($4)
+
+
+
+. 1743 FED272C 56
+. 80 9E 06 00 80 7E 1B 4C 7F A4 12 14 3B E0 00 00 93 9D 00 00 83 9E 05 F8 80 C3 00 00 80 9E 06 18 80 7E 05 F4 80 BE 06 1C 83 5E 1B 6C 93 FC 00 00 3B 80 00 00 48 06 D4 E1
+==== BB 1744 __GI___register_atfork(0xFF3FC40) approx BBs exec'd 0 ====
+
+	0xFF3FC40:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF3FC44:  7CE802A6  mflr r7
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0xFF3FC48:  48068209  bl 0xFFA7E50
+	   9: MOVL       	$0xFF3FC4C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1744 FF3FC40 12
+. 94 21 FF D0 7C E8 02 A6 48 06 82 09
+==== BB 1745 (0xFF3FC4C) approx BBs exec'd 0 ====
+
+	0xFF3FC4C:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF3FC50:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF3FC54:  93410018  stw r26,24(r1)
+	   8: GETL       	R26, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF3FC58:  9361001C  stw r27,28(r1)
+	  13: GETL       	R27, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x1C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF3FC5C:  7CDA3378  or r26,r6,r6
+	  18: GETL       	R6, t14
+	  19: PUTL       	t14, R26
+	  20: INCEIPL       	$4
+
+	0xFF3FC60:  93810020  stw r28,32(r1)
+	  21: GETL       	R28, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x20, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFF3FC64:  7CBB2B78  or r27,r5,r5
+	  26: GETL       	R5, t20
+	  27: PUTL       	t20, R27
+	  28: INCEIPL       	$4
+
+	0xFF3FC68:  93A10024  stw r29,36(r1)
+	  29: GETL       	R29, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x24, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFF3FC6C:  7C9C2378  or r28,r4,r4
+	  34: GETL       	R4, t26
+	  35: PUTL       	t26, R28
+	  36: INCEIPL       	$4
+
+	0xFF3FC70:  93E1002C  stw r31,44(r1)
+	  37: GETL       	R31, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x2C, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFF3FC74:  7C7D1B78  or r29,r3,r3
+	  42: GETL       	R3, t32
+	  43: PUTL       	t32, R29
+	  44: INCEIPL       	$4
+
+	0xFF3FC78:  93210014  stw r25,20(r1)
+	  45: GETL       	R25, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x14, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFF3FC7C:  39600000  li r11,0
+	  50: MOVL       	$0x0, t38
+	  51: PUTL       	t38, R11
+	  52: INCEIPL       	$4
+
+	0xFF3FC80:  90E10034  stw r7,52(r1)
+	  53: GETL       	R7, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x34, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0xFF3FC84:  38000001  li r0,1
+	  58: MOVL       	$0x1, t44
+	  59: PUTL       	t44, R0
+	  60: INCEIPL       	$4
+
+	0xFF3FC88:  83FE1D14  lwz r31,7444(r30)
+	  61: GETL       	R30, t46
+	  62: ADDL       	$0x1D14, t46
+	  63: LDL       	(t46), t48
+	  64: PUTL       	t48, R31
+	  65: INCEIPL       	$4
+
+	0xFF3FC8C:  7D20F828  lwarx r9,r0,r31
+	  66: GETL       	R31, t50
+	  67: LOCKo       	
+	  68: LDL       	(t50), t52
+	  69: PUTL       	t52, R9
+	  70: INCEIPL       	$4
+
+	0xFF3FC90:  7C095800  cmp cr0,r9,r11
+	  71: GETL       	R9, t54
+	  72: GETL       	R11, t56
+	  73: CMPL       	t54, t56, t58  (-rSo)
+	  74: ICRFL       	t58, $0x0, CR
+	  75: INCEIPL       	$4
+
+	0xFF3FC94:  4082000C  bc 4,2,0xFF3FCA0
+	  76: Jc02o       	$0xFF3FCA0
+
+
+
+. 1745 FF3FC4C 76
+. 93 C1 00 28 7F C8 02 A6 93 41 00 18 93 61 00 1C 7C DA 33 78 93 81 00 20 7C BB 2B 78 93 A1 00 24 7C 9C 23 78 93 E1 00 2C 7C 7D 1B 78 93 21 00 14 39 60 00 00 90 E1 00 34 38 00 00 01 83 FE 1D 14 7D 20 F8 28 7C 09 58 00 40 82 00 0C
+==== BB 1746 (0xFF3FC98) approx BBs exec'd 0 ====
+
+	0xFF3FC98:  7C00F92D  stwcx. r0,r0,r31
+	   0: GETL       	R31, t0
+	   1: GETL       	R0, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFF3FC9C:  40A2FFF0  bc 5,2,0xFF3FC8C
+	   6: Jc02o       	$0xFF3FC8C
+
+
+
+. 1746 FF3FC98 8
+. 7C 00 F9 2D 40 A2 FF F0
+==== BB 1747 (0xFF3FC8C) approx BBs exec'd 0 ====
+
+	0xFF3FC8C:  7D20F828  lwarx r9,r0,r31
+	   0: GETL       	R31, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFF3FC90:  7C095800  cmp cr0,r9,r11
+	   5: GETL       	R9, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFF3FC94:  4082000C  bc 4,2,0xFF3FCA0
+	  10: Jc02o       	$0xFF3FCA0
+
+
+
+. 1747 FF3FC8C 12
+. 7D 20 F8 28 7C 09 58 00 40 82 00 0C
+==== BB 1748 (0xFF3FCA0) approx BBs exec'd 0 ====
+
+	0xFF3FCA0:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFF3FCA4:  2F890000  cmpi cr7,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFF3FCA8:  409E015C  bc 4,30,0xFF3FE04
+	   5: Jc30o       	$0xFF3FE04
+
+
+
+. 1748 FF3FCA0 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 5C
+==== BB 1749 (0xFF3FCAC) approx BBs exec'd 0 ====
+
+	0xFF3FCAC:  807E10F8  lwz r3,4344(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x10F8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFF3FCB0:  7C791B78  or r25,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0xFF3FCB4:  39400000  li r10,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R10
+	  10: INCEIPL       	$4
+
+	0xFF3FCB8:  39630018  addi r11,r3,24
+	  11: GETL       	R3, t8
+	  12: ADDL       	$0x18, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0xFF3FCBC:  39000000  li r8,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R8
+	  17: INCEIPL       	$4
+
+	0xFF3FCC0:  48000008  b 0xFF3FCC8
+	  18: JMPo       	$0xFF3FCC8  ($4)
+
+
+
+. 1749 FF3FCAC 24
+. 80 7E 10 F8 7C 79 1B 78 39 40 00 00 39 63 00 18 39 00 00 00 48 00 00 08
+==== BB 1750 (0xFF3FCC8) approx BBs exec'd 0 ====
+
+	0xFF3FCC8:  808B0000  lwz r4,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFF3FCCC:  394A0001  addi r10,r10,1
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFF3FCD0:  7D094378  or r9,r8,r8
+	   8: GETL       	R8, t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFF3FCD4:  2B0A002F  cmpli cr6,r10,47
+	  11: GETL       	R10, t8
+	  12: MOVL       	$0x2F, t12
+	  13: CMPUL       	t8, t12, t10  (-rSo)
+	  14: ICRFL       	t10, $0x6, CR
+	  15: INCEIPL       	$4
+
+	0xFF3FCD8:  2C040000  cmpi cr0,r4,0
+	  16: GETL       	R4, t14
+	  17: CMP0L       	t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFF3FCDC:  3908001C  addi r8,r8,28
+	  20: GETL       	R8, t18
+	  21: ADDL       	$0x1C, t18
+	  22: PUTL       	t18, R8
+	  23: INCEIPL       	$4
+
+	0xFF3FCE0:  396B001C  addi r11,r11,28
+	  24: GETL       	R11, t20
+	  25: ADDL       	$0x1C, t20
+	  26: PUTL       	t20, R11
+	  27: INCEIPL       	$4
+
+	0xFF3FCE4:  4082FFE0  bc 4,2,0xFF3FCC4
+	  28: Jc02o       	$0xFF3FCC4
+
+
+
+. 1750 FF3FCC8 32
+. 80 8B 00 00 39 4A 00 01 7D 09 43 78 2B 0A 00 2F 2C 04 00 00 39 08 00 1C 39 6B 00 1C 40 82 FF E0
+==== BB 1751 (0xFF3FCE8) approx BBs exec'd 0 ====
+
+	0xFF3FCE8:  7C691A14  add r3,r9,r3
+	   0: GETL       	R9, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFF3FCEC:  39400001  li r10,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFF3FCF0:  39630004  addi r11,r3,4
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x4, t6
+	  10: PUTL       	t6, R11
+	  11: INCEIPL       	$4
+
+	0xFF3FCF4:  39000000  li r8,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R8
+	  14: INCEIPL       	$4
+
+	0xFF3FCF8:  2F0B0000  cmpi cr6,r11,0
+	  15: GETL       	R11, t10
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFF3FCFC:  914B0014  stw r10,20(r11)
+	  19: GETL       	R10, t14
+	  20: GETL       	R11, t16
+	  21: ADDL       	$0x14, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFF3FD00:  910B0018  stw r8,24(r11)
+	  24: GETL       	R8, t18
+	  25: GETL       	R11, t20
+	  26: ADDL       	$0x18, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFF3FD04:  419A0024  bc 12,26,0xFF3FD28
+	  29: Js26o       	$0xFF3FD28
+
+
+
+. 1751 FF3FCE8 32
+. 7C 69 1A 14 39 40 00 01 39 63 00 04 39 00 00 00 2F 0B 00 00 91 4B 00 14 91 0B 00 18 41 9A 00 24
+==== BB 1752 (0xFF3FD08) approx BBs exec'd 0 ====
+
+	0xFF3FD08:  819E1CD4  lwz r12,7380(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CD4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFF3FD0C:  93AB0004  stw r29,4(r11)
+	   5: GETL       	R29, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	$0x4, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFF3FD10:  832C0000  lwz r25,0(r12)
+	  10: GETL       	R12, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R25
+	  13: INCEIPL       	$4
+
+	0xFF3FD14:  938B0008  stw r28,8(r11)
+	  14: GETL       	R28, t12
+	  15: GETL       	R11, t14
+	  16: ADDL       	$0x8, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFF3FD18:  93230004  stw r25,4(r3)
+	  19: GETL       	R25, t16
+	  20: GETL       	R3, t18
+	  21: ADDL       	$0x4, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0xFF3FD1C:  936B000C  stw r27,12(r11)
+	  24: GETL       	R27, t20
+	  25: GETL       	R11, t22
+	  26: ADDL       	$0xC, t22
+	  27: STL       	t20, (t22)
+	  28: INCEIPL       	$4
+
+	0xFF3FD20:  916C0000  stw r11,0(r12)
+	  29: GETL       	R11, t24
+	  30: GETL       	R12, t26
+	  31: STL       	t24, (t26)
+	  32: INCEIPL       	$4
+
+	0xFF3FD24:  934B0010  stw r26,16(r11)
+	  33: GETL       	R26, t28
+	  34: GETL       	R11, t30
+	  35: ADDL       	$0x10, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0xFF3FD28:  3B400000  li r26,0
+	  38: MOVL       	$0x0, t32
+	  39: PUTL       	t32, R26
+	  40: INCEIPL       	$4
+
+	0xFF3FD2C:  7C0004AC  sync
+	  41: INCEIPL       	$4
+
+	0xFF3FD30:  7C60F828  lwarx r3,r0,r31
+	  42: GETL       	R31, t34
+	  43: LOCKo       	
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R3
+	  46: INCEIPL       	$4
+
+	0xFF3FD34:  7F40F92D  stwcx. r26,r0,r31
+	  47: GETL       	R31, t38
+	  48: GETL       	R26, t40
+	  49: LOCKo       	
+	  50: STL       	t40, (t38)  (-rSo)
+	  51: ICRFL       	cr, $0x0, CR
+	  52: INCEIPL       	$4
+
+	0xFF3FD38:  40A2FFF8  bc 5,2,0xFF3FD30
+	  53: Jc02o       	$0xFF3FD30
+
+
+
+. 1752 FF3FD08 52
+. 81 9E 1C D4 93 AB 00 04 83 2C 00 00 93 8B 00 08 93 23 00 04 93 6B 00 0C 91 6C 00 00 93 4B 00 10 3B 40 00 00 7C 00 04 AC 7C 60 F8 28 7F 40 F9 2D 40 A2 FF F8
+==== BB 1753 (0xFF3FD3C) approx BBs exec'd 0 ====
+
+	0xFF3FD3C:  2C030001  cmpi cr0,r3,1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFF3FD40:  418100A4  bc 12,1,0xFF3FDE4
+	   5: Js01o       	$0xFF3FDE4
+
+
+
+. 1753 FF3FD3C 8
+. 2C 03 00 01 41 81 00 A4
+==== BB 1754 (0xFF3FD44) approx BBs exec'd 0 ====
+
+	0xFF3FD44:  409A0034  bc 4,26,0xFF3FD78
+	   0: Jc26o       	$0xFF3FD78
+
+
+
+. 1754 FF3FD44 4
+. 40 9A 00 34
+==== BB 1755 (0xFF3FD78) approx BBs exec'd 0 ====
+
+	0xFF3FD78:  83610034  lwz r27,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFF3FD7C:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF3FD80:  83210014  lwz r25,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R25
+	  12: INCEIPL       	$4
+
+	0xFF3FD84:  7F6803A6  mtlr r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFF3FD88:  83410018  lwz r26,24(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x18, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R26
+	  20: INCEIPL       	$4
+
+	0xFF3FD8C:  8361001C  lwz r27,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0xFF3FD90:  83810020  lwz r28,32(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFF3FD94:  83A10024  lwz r29,36(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0xFF3FD98:  83C10028  lwz r30,40(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x28, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R30
+	  40: INCEIPL       	$4
+
+	0xFF3FD9C:  83E1002C  lwz r31,44(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x2C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R31
+	  45: INCEIPL       	$4
+
+	0xFF3FDA0:  38210030  addi r1,r1,48
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x30, t36
+	  48: PUTL       	t36, R1
+	  49: INCEIPL       	$4
+
+	0xFF3FDA4:  4E800020  blr
+	  50: GETL       	LR, t38
+	  51: JMPo-r       	t38  ($4)
+
+
+
+. 1755 FF3FD78 48
+. 83 61 00 34 38 60 00 00 83 21 00 14 7F 68 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 1756 (0xFED2764) approx BBs exec'd 0 ====
+
+	0xFED2764:  813A0000  lwz r9,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFED2768:  819E1D70  lwz r12,7536(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1D70, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0xFED276C:  2F890000  cmpi cr7,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFED2770:  83EC0000  lwz r31,0(r12)
+	  13: GETL       	R12, t12
+	  14: LDL       	(t12), t14
+	  15: PUTL       	t14, R31
+	  16: INCEIPL       	$4
+
+	0xFED2774:  419E0068  bc 12,30,0xFED27DC
+	  17: Js30o       	$0xFED27DC
+
+
+
+. 1756 FED2764 20
+. 81 3A 00 00 81 9E 1D 70 2F 89 00 00 83 EC 00 00 41 9E 00 68
+==== BB 1757 (0xFED2778) approx BBs exec'd 0 ====
+
+	0xFED2778:  7D204B78  or r0,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFED277C:  3B410024  addi r26,r1,36
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: PUTL       	t2, R26
+	   6: INCEIPL       	$4
+
+	0xFED2780:  91210024  stw r9,36(r1)
+	   7: GETL       	R9, t4
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x24, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED2784:  7C090378  or r9,r0,r0
+	  12: GETL       	R0, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0xFED2788:  3BA00000  li r29,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0xFED278C:  81690000  lwz r11,0(r9)
+	  18: GETL       	R9, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R11
+	  21: INCEIPL       	$4
+
+	0xFED2790:  2C0B0000  cmpi cr0,r11,0
+	  22: GETL       	R11, t16
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0xFED2794:  4182001C  bc 12,2,0xFED27B0
+	  26: Js02o       	$0xFED27B0
+
+
+
+. 1757 FED2778 32
+. 7D 20 4B 78 3B 41 00 24 91 21 00 24 7C 09 03 78 3B A0 00 00 81 69 00 00 2C 0B 00 00 41 82 00 1C
+==== BB 1758 (0xFED2798) approx BBs exec'd 0 ====
+
+	0xFED2798:  88AB0000  lbz r5,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFED279C:  2C85004D  cmpi cr1,r5,77
+	   4: GETL       	R5, t4
+	   5: MOVL       	$0x4D, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFED27A0:  418600C0  bc 12,6,0xFED2860
+	   9: Js06o       	$0xFED2860
+
+
+
+. 1758 FED2798 12
+. 88 AB 00 00 2C 85 00 4D 41 86 00 C0
+==== BB 1759 (0xFED27A4) approx BBs exec'd 0 ====
+
+	0xFED27A4:  85690004  lwzu r11,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R9
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0xFED27A8:  2C0B0000  cmpi cr0,r11,0
+	   6: GETL       	R11, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED27AC:  4082FFEC  bc 4,2,0xFED2798
+	  10: Jc02o       	$0xFED2798
+
+
+
+. 1759 FED27A4 12
+. 85 69 00 04 2C 0B 00 00 40 82 FF EC
+==== BB 1760 (0xFED2860) approx BBs exec'd 0 ====
+
+	0xFED2860:  88CB0001  lbz r6,1(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED2864:  2F060041  cmpi cr6,r6,65
+	   5: GETL       	R6, t4
+	   6: MOVL       	$0x41, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFED2868:  409AFF3C  bc 4,26,0xFED27A4
+	  10: Jc26o       	$0xFED27A4
+
+
+
+. 1760 FED2860 12
+. 88 CB 00 01 2F 06 00 41 40 9A FF 3C
+==== BB 1761 (0xFED286C) approx BBs exec'd 0 ====
+
+	0xFED286C:  88EB0002  lbz r7,2(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x2, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFED2870:  2F87004C  cmpi cr7,r7,76
+	   5: GETL       	R7, t4
+	   6: MOVL       	$0x4C, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFED2874:  409EFF30  bc 4,30,0xFED27A4
+	  10: Jc30o       	$0xFED27A4
+
+
+
+. 1761 FED286C 12
+. 88 EB 00 02 2F 87 00 4C 40 9E FF 30
+==== BB 1762 (0xFED27B0) approx BBs exec'd 0 ====
+
+	0xFED27B0:  2C1D0000  cmpi cr0,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFED27B4:  4082011C  bc 4,2,0xFED28D0
+	   4: Jc02o       	$0xFED28D0
+
+
+
+. 1762 FED27B0 8
+. 2C 1D 00 00 40 82 01 1C
+==== BB 1763 (0xFED27B8) approx BBs exec'd 0 ====
+
+	0xFED27B8:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFED27BC:  419E0020  bc 12,30,0xFED27DC
+	   4: Js30o       	$0xFED27DC
+
+
+
+. 1763 FED27B8 8
+. 2F 9C 00 00 41 9E 00 20
+==== BB 1764 (0xFED27DC) approx BBs exec'd 0 ====
+
+	0xFED27DC:  835E1A88  lwz r26,6792(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1A88, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFED27E0:  813A0000  lwz r9,0(r26)
+	   5: GETL       	R26, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFED27E4:  2F090000  cmpi cr6,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFED27E8:  409A0064  bc 4,26,0xFED284C
+	  13: Jc26o       	$0xFED284C
+
+
+
+. 1764 FED27DC 16
+. 83 5E 1A 88 81 3A 00 00 2F 09 00 00 40 9A 00 64
+==== BB 1765 (0xFED27EC) approx BBs exec'd 0 ====
+
+	0xFED27EC:  3B800001  li r28,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0xFED27F0:  939B0000  stw r28,0(r27)
+	   3: GETL       	R28, t2
+	   4: GETL       	R27, t4
+	   5: STL       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFED27F4:  83610054  lwz r27,84(r1)
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x54, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0xFED27F8:  83410038  lwz r26,56(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x38, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R26
+	  16: INCEIPL       	$4
+
+	0xFED27FC:  7F6803A6  mtlr r27
+	  17: GETL       	R27, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0xFED2800:  83810040  lwz r28,64(r1)
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x40, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R28
+	  24: INCEIPL       	$4
+
+	0xFED2804:  8361003C  lwz r27,60(r1)
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x3C, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R27
+	  29: INCEIPL       	$4
+
+	0xFED2808:  83A10044  lwz r29,68(r1)
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x44, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R29
+	  34: INCEIPL       	$4
+
+	0xFED280C:  83C10048  lwz r30,72(r1)
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x48, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R30
+	  39: INCEIPL       	$4
+
+	0xFED2810:  83E1004C  lwz r31,76(r1)
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x4C, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R31
+	  44: INCEIPL       	$4
+
+	0xFED2814:  38210050  addi r1,r1,80
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x50, t36
+	  47: PUTL       	t36, R1
+	  48: INCEIPL       	$4
+
+	0xFED2818:  4E800020  blr
+	  49: GETL       	LR, t38
+	  50: JMPo-r       	t38  ($4)
+
+
+
+. 1765 FED27EC 48
+. 3B 80 00 01 93 9B 00 00 83 61 00 54 83 41 00 38 7F 68 03 A6 83 81 00 40 83 61 00 3C 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+==== BB 1766 (0xFED2A9C) approx BBs exec'd 0 ====
+
+	0xFED2A9C:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED2AA0:  83C10018  lwz r30,24(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFED2AA4:  7FA3EB78  or r3,r29,r29
+	  10: GETL       	R29, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFED2AA8:  83E1001C  lwz r31,28(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x1C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R31
+	  17: INCEIPL       	$4
+
+	0xFED2AAC:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFED2AB0:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0xFED2AB4:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0xFED2AB8:  4BFFEE28  b 0xFED18E0
+	  30: JMPo       	$0xFED18E0  ($4)
+
+
+
+. 1766 FED2A9C 32
+. 80 01 00 24 83 C1 00 18 7F A3 EB 78 83 E1 00 1C 7C 08 03 A6 83 A1 00 14 38 21 00 20 4B FF EE 28
+==== BB 1767 (0xFED1958) approx BBs exec'd 0 ====
+
+	0xFED1958:  809E0600  lwz r4,1536(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x600, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFED195C:  7C641214  add r3,r4,r2
+	   5: GETL       	R4, t4
+	   6: GETL       	R2, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFED1960:  83E30000  lwz r31,0(r3)
+	  10: GETL       	R3, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R31
+	  13: INCEIPL       	$4
+
+	0xFED1964:  2C1F0000  cmpi cr0,r31,0
+	  14: GETL       	R31, t12
+	  15: CMP0L       	t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFED1968:  41820090  bc 12,2,0xFED19F8
+	  18: Js02o       	$0xFED19F8
+
+
+
+. 1767 FED1958 20
+. 80 9E 06 00 7C 64 12 14 83 E3 00 00 2C 1F 00 00 41 82 00 90
+==== BB 1768 (0xFED196C) approx BBs exec'd 0 ====
+
+	0xFED196C:  38E00001  li r7,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFED1970:  7CC0F828  lwarx r6,r0,r31
+	   3: GETL       	R31, t2
+	   4: LOCKo       	
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0xFED1974:  2C060000  cmpi cr0,r6,0
+	   8: GETL       	R6, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFED1978:  4082000C  bc 4,2,0xFED1984
+	  12: Jc02o       	$0xFED1984
+
+
+
+. 1768 FED196C 16
+. 38 E0 00 01 7C C0 F8 28 2C 06 00 00 40 82 00 0C
+==== BB 1769 (0xFED197C) approx BBs exec'd 0 ====
+
+	0xFED197C:  7CE0F92D  stwcx. r7,r0,r31
+	   0: GETL       	R31, t0
+	   1: GETL       	R7, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED1980:  40A2FFF0  bc 5,2,0xFED1970
+	   6: Jc02o       	$0xFED1970
+
+
+
+. 1769 FED197C 8
+. 7C E0 F9 2D 40 A2 FF F0
+==== BB 1770 (0xFED1970) approx BBs exec'd 0 ====
+
+	0xFED1970:  7CC0F828  lwarx r6,r0,r31
+	   0: GETL       	R31, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED1974:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFED1978:  4082000C  bc 4,2,0xFED1984
+	   9: Jc02o       	$0xFED1984
+
+
+
+. 1770 FED1970 12
+. 7C C0 F8 28 2C 06 00 00 40 82 00 0C
+==== BB 1771 (0xFED1984) approx BBs exec'd 0 ====
+
+	0xFED1984:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFED1988:  2C860000  cmpi cr1,r6,0
+	   1: GETL       	R6, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFED198C:  4086006C  bc 4,6,0xFED19F8
+	   5: Jc06o       	$0xFED19F8
+
+
+
+. 1771 FED1984 12
+. 4C 00 01 2C 2C 86 00 00 40 86 00 6C
+==== BB 1772 (0xFED1990) approx BBs exec'd 0 ====
+
+	0xFED1990:  2F1F0000  cmpi cr6,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFED1994:  38600000  li r3,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFED1998:  41BAFF98  bc 13,26,0xFED1930
+	   7: Js26o       	$0xFED1930
+
+
+
+. 1772 FED1990 12
+. 2F 1F 00 00 38 60 00 00 41 BA FF 98
+==== BB 1773 (0xFED199C) approx BBs exec'd 0 ====
+
+	0xFED199C:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED19A0:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFED19A4:  4BFFDC39  bl 0xFECF5DC
+	   6: MOVL       	$0xFED19A8, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFECF5DC  ($4)
+
+
+
+. 1773 FED199C 12
+. 7F E3 FB 78 7F A4 EB 78 4B FF DC 39
+==== BB 1774 _int_malloc(0xFECF5DC) approx BBs exec'd 0 ====
+
+	0xFECF5DC:  3800FFDF  li r0,-33
+	   0: MOVL       	$0xFFFFFFDF, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFECF5E0:  7D800026  mfcr r12
+	   3: GETL       	CR, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0xFECF5E4:  7F840040  cmpl cr7,r4,r0
+	   6: GETL       	R4, t4
+	   7: GETL       	R0, t6
+	   8: CMPUL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFECF5E8:  7CA802A6  mflr r5
+	  11: GETL       	LR, t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0xFECF5EC:  9421FF90  stwu r1,-112(r1)
+	  14: GETL       	R1, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0xFFFFFF90, t14
+	  17: PUTL       	t14, R1
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFECF5F0:  480D8861  bl 0xFFA7E50
+	  20: MOVL       	$0xFECF5F4, t16
+	  21: PUTL       	t16, LR
+	  22: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1774 FECF5DC 24
+. 38 00 FF DF 7D 80 00 26 7F 84 00 40 7C A8 02 A6 94 21 FF 90 48 0D 88 61
+==== BB 1775 (0xFECF5F4) approx BBs exec'd 0 ====
+
+	0xFECF5F4:  93810060  stw r28,96(r1)
+	   0: GETL       	R28, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECF5F8:  7C7C1B78  or r28,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFECF5FC:  93C10068  stw r30,104(r1)
+	   8: GETL       	R30, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x68, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECF600:  7FC802A6  mflr r30
+	  13: GETL       	LR, t10
+	  14: PUTL       	t10, R30
+	  15: INCEIPL       	$4
+
+	0xFECF604:  91C10028  stw r14,40(r1)
+	  16: GETL       	R14, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x28, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFECF608:  91E1002C  stw r15,44(r1)
+	  21: GETL       	R15, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x2C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECF60C:  92010030  stw r16,48(r1)
+	  26: GETL       	R16, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x30, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFECF610:  92210034  stw r17,52(r1)
+	  31: GETL       	R17, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x34, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFECF614:  92410038  stw r18,56(r1)
+	  36: GETL       	R18, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x38, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFECF618:  9261003C  stw r19,60(r1)
+	  41: GETL       	R19, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x3C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFECF61C:  92810040  stw r20,64(r1)
+	  46: GETL       	R20, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x40, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0xFECF620:  92A10044  stw r21,68(r1)
+	  51: GETL       	R21, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x44, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFECF624:  92C10048  stw r22,72(r1)
+	  56: GETL       	R22, t44
+	  57: GETL       	R1, t46
+	  58: ADDL       	$0x48, t46
+	  59: STL       	t44, (t46)
+	  60: INCEIPL       	$4
+
+	0xFECF628:  92E1004C  stw r23,76(r1)
+	  61: GETL       	R23, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x4C, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0xFECF62C:  93010050  stw r24,80(r1)
+	  66: GETL       	R24, t52
+	  67: GETL       	R1, t54
+	  68: ADDL       	$0x50, t54
+	  69: STL       	t52, (t54)
+	  70: INCEIPL       	$4
+
+	0xFECF630:  93210054  stw r25,84(r1)
+	  71: GETL       	R25, t56
+	  72: GETL       	R1, t58
+	  73: ADDL       	$0x54, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0xFECF634:  93410058  stw r26,88(r1)
+	  76: GETL       	R26, t60
+	  77: GETL       	R1, t62
+	  78: ADDL       	$0x58, t62
+	  79: STL       	t60, (t62)
+	  80: INCEIPL       	$4
+
+	0xFECF638:  9361005C  stw r27,92(r1)
+	  81: GETL       	R27, t64
+	  82: GETL       	R1, t66
+	  83: ADDL       	$0x5C, t66
+	  84: STL       	t64, (t66)
+	  85: INCEIPL       	$4
+
+	0xFECF63C:  93A10064  stw r29,100(r1)
+	  86: GETL       	R29, t68
+	  87: GETL       	R1, t70
+	  88: ADDL       	$0x64, t70
+	  89: STL       	t68, (t70)
+	  90: INCEIPL       	$4
+
+	0xFECF640:  93E1006C  stw r31,108(r1)
+	  91: GETL       	R31, t72
+	  92: GETL       	R1, t74
+	  93: ADDL       	$0x6C, t74
+	  94: STL       	t72, (t74)
+	  95: INCEIPL       	$4
+
+	0xFECF644:  90A10074  stw r5,116(r1)
+	  96: GETL       	R5, t76
+	  97: GETL       	R1, t78
+	  98: ADDL       	$0x74, t78
+	  99: STL       	t76, (t78)
+	 100: INCEIPL       	$4
+
+	0xFECF648:  91810024  stw r12,36(r1)
+	 101: GETL       	R12, t80
+	 102: GETL       	R1, t82
+	 103: ADDL       	$0x24, t82
+	 104: STL       	t80, (t82)
+	 105: INCEIPL       	$4
+
+	0xFECF64C:  419D0490  bc 12,29,0xFECFADC
+	 106: Js29o       	$0xFECFADC
+
+
+
+. 1775 FECF5F4 92
+. 93 81 00 60 7C 7C 1B 78 93 C1 00 68 7F C8 02 A6 91 C1 00 28 91 E1 00 2C 92 01 00 30 92 21 00 34 92 41 00 38 92 61 00 3C 92 81 00 40 92 A1 00 44 92 C1 00 48 92 E1 00 4C 93 01 00 50 93 21 00 54 93 41 00 58 93 61 00 5C 93 A1 00 64 93 E1 00 6C 90 A1 00 74 91 81 00 24 41 9D 04 90
+==== BB 1776 (0xFECF650) approx BBs exec'd 0 ====
+
+	0xFECF650:  3864000B  addi r3,r4,11
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0xB, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFECF654:  2803000F  cmpli cr0,r3,15
+	   4: GETL       	R3, t2
+	   5: MOVL       	$0xF, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECF658:  54790038  rlwinm r25,r3,0,0,28
+	   9: GETL       	R3, t8
+	  10: ANDL       	$0xFFFFFFF8, t8
+	  11: PUTL       	t8, R25
+	  12: INCEIPL       	$4
+
+	0xFECF65C:  408100B8  bc 4,1,0xFECF714
+	  13: Jc01o       	$0xFECF714
+
+
+
+. 1776 FECF650 16
+. 38 64 00 0B 28 03 00 0F 54 79 00 38 40 81 00 B8
+==== BB 1777 (0xFECF660) approx BBs exec'd 0 ====
+
+	0xFECF660:  815C0004  lwz r10,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECF664:  7C8AC840  cmpl cr1,r10,r25
+	   5: GETL       	R10, t4
+	   6: GETL       	R25, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFECF668:  418400BC  bc 12,4,0xFECF724
+	  10: Js04o       	$0xFECF724
+
+
+
+. 1777 FECF660 12
+. 81 5C 00 04 7C 8A C8 40 41 84 00 BC
+==== BB 1778 (0xFECF724) approx BBs exec'd 0 ====
+
+	0xFECF724:  2B9901FF  cmpli cr7,r25,511
+	   0: GETL       	R25, t0
+	   1: MOVL       	$0x1FF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECF728:  419D0060  bc 12,29,0xFECF788
+	   5: Js29o       	$0xFECF788
+
+
+
+. 1778 FECF724 8
+. 2B 99 01 FF 41 9D 00 60
+==== BB 1779 (0xFECF72C) approx BBs exec'd 0 ====
+
+	0xFECF72C:  7E59E214  add r18,r25,r28
+	   0: GETL       	R25, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0xFECF730:  5738E8FE  rlwinm r24,r25,29,3,31
+	   5: GETL       	R25, t4
+	   6: SHRL       	$0x3, t4
+	   7: PUTL       	t4, R24
+	   8: INCEIPL       	$4
+
+	0xFECF734:  39520030  addi r10,r18,48
+	   9: GETL       	R18, t6
+	  10: ADDL       	$0x30, t6
+	  11: PUTL       	t6, R10
+	  12: INCEIPL       	$4
+
+	0xFECF738:  806A000C  lwz r3,12(r10)
+	  13: GETL       	R10, t8
+	  14: ADDL       	$0xC, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFECF73C:  7C835000  cmp cr1,r3,r10
+	  18: GETL       	R3, t12
+	  19: GETL       	R10, t14
+	  20: CMPL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0xFECF740:  41860060  bc 12,6,0xFECF7A0
+	  23: Js06o       	$0xFECF7A0
+
+
+
+. 1779 FECF72C 24
+. 7E 59 E2 14 57 38 E8 FE 39 52 00 30 80 6A 00 0C 7C 83 50 00 41 86 00 60
+==== BB 1780 (0xFECF744) approx BBs exec'd 0 ====
+
+	0xFECF744:  2D830000  cmpi cr3,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x3, CR
+	   3: INCEIPL       	$4
+
+	0xFECF748:  418E0220  bc 12,14,0xFECF968
+	   4: Js14o       	$0xFECF968
+
+
+
+. 1780 FECF744 8
+. 2D 83 00 00 41 8E 02 20
+==== BB 1781 (0xFECF968) approx BBs exec'd 0 ====
+
+	0xFECF968:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFECF96C:  4BFFEB59  bl 0xFECE4C4
+	   3: MOVL       	$0xFECF970, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECE4C4  ($4)
+
+
+
+. 1781 FECF968 8
+. 7F 83 E3 78 4B FF EB 59
+==== BB 1782 malloc_consolidate(0xFECE4C4) approx BBs exec'd 0 ====
+
+	0xFECE4C4:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECE4C8:  9421FF90  stwu r1,-112(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF90, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECE4CC:  480D9985  bl 0xFFA7E50
+	   9: MOVL       	$0xFECE4D0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1782 FECE4C4 12
+. 7C 88 02 A6 94 21 FF 90 48 0D 99 85
+==== BB 1783 (0xFECE4D0) approx BBs exec'd 0 ====
+
+	0xFECE4D0:  3800007F  li r0,127
+	   0: MOVL       	$0x7F, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFECE4D4:  93010050  stw r24,80(r1)
+	   3: GETL       	R24, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x50, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFECE4D8:  39230038  addi r9,r3,56
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x38, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0xFECE4DC:  90810074  stw r4,116(r1)
+	  12: GETL       	R4, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x74, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFECE4E0:  7C781B78  or r24,r3,r3
+	  17: GETL       	R3, t12
+	  18: PUTL       	t12, R24
+	  19: INCEIPL       	$4
+
+	0xFECE4E4:  81630004  lwz r11,4(r3)
+	  20: GETL       	R3, t14
+	  21: ADDL       	$0x4, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0xFECE4E8:  7C0903A6  mtctr r0
+	  25: GETL       	R0, t18
+	  26: PUTL       	t18, CTR
+	  27: INCEIPL       	$4
+
+	0xFECE4EC:  93C10068  stw r30,104(r1)
+	  28: GETL       	R30, t20
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x68, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0xFECE4F0:  2F8B0000  cmpi cr7,r11,0
+	  33: GETL       	R11, t24
+	  34: CMP0L       	t24, t26  (-rSo)
+	  35: ICRFL       	t26, $0x7, CR
+	  36: INCEIPL       	$4
+
+	0xFECE4F4:  91C10028  stw r14,40(r1)
+	  37: GETL       	R14, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x28, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFECE4F8:  91E1002C  stw r15,44(r1)
+	  42: GETL       	R15, t32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x2C, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0xFECE4FC:  7FC802A6  mflr r30
+	  47: GETL       	LR, t36
+	  48: PUTL       	t36, R30
+	  49: INCEIPL       	$4
+
+	0xFECE500:  92010030  stw r16,48(r1)
+	  50: GETL       	R16, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x30, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0xFECE504:  92210034  stw r17,52(r1)
+	  55: GETL       	R17, t42
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x34, t44
+	  58: STL       	t42, (t44)
+	  59: INCEIPL       	$4
+
+	0xFECE508:  92410038  stw r18,56(r1)
+	  60: GETL       	R18, t46
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x38, t48
+	  63: STL       	t46, (t48)
+	  64: INCEIPL       	$4
+
+	0xFECE50C:  9261003C  stw r19,60(r1)
+	  65: GETL       	R19, t50
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x3C, t52
+	  68: STL       	t50, (t52)
+	  69: INCEIPL       	$4
+
+	0xFECE510:  92810040  stw r20,64(r1)
+	  70: GETL       	R20, t54
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x40, t56
+	  73: STL       	t54, (t56)
+	  74: INCEIPL       	$4
+
+	0xFECE514:  92A10044  stw r21,68(r1)
+	  75: GETL       	R21, t58
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x44, t60
+	  78: STL       	t58, (t60)
+	  79: INCEIPL       	$4
+
+	0xFECE518:  92C10048  stw r22,72(r1)
+	  80: GETL       	R22, t62
+	  81: GETL       	R1, t64
+	  82: ADDL       	$0x48, t64
+	  83: STL       	t62, (t64)
+	  84: INCEIPL       	$4
+
+	0xFECE51C:  92E1004C  stw r23,76(r1)
+	  85: GETL       	R23, t66
+	  86: GETL       	R1, t68
+	  87: ADDL       	$0x4C, t68
+	  88: STL       	t66, (t68)
+	  89: INCEIPL       	$4
+
+	0xFECE520:  93210054  stw r25,84(r1)
+	  90: GETL       	R25, t70
+	  91: GETL       	R1, t72
+	  92: ADDL       	$0x54, t72
+	  93: STL       	t70, (t72)
+	  94: INCEIPL       	$4
+
+	0xFECE524:  93410058  stw r26,88(r1)
+	  95: GETL       	R26, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x58, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0xFECE528:  9361005C  stw r27,92(r1)
+	 100: GETL       	R27, t78
+	 101: GETL       	R1, t80
+	 102: ADDL       	$0x5C, t80
+	 103: STL       	t78, (t80)
+	 104: INCEIPL       	$4
+
+	0xFECE52C:  93810060  stw r28,96(r1)
+	 105: GETL       	R28, t82
+	 106: GETL       	R1, t84
+	 107: ADDL       	$0x60, t84
+	 108: STL       	t82, (t84)
+	 109: INCEIPL       	$4
+
+	0xFECE530:  93A10064  stw r29,100(r1)
+	 110: GETL       	R29, t86
+	 111: GETL       	R1, t88
+	 112: ADDL       	$0x64, t88
+	 113: STL       	t86, (t88)
+	 114: INCEIPL       	$4
+
+	0xFECE534:  93E1006C  stw r31,108(r1)
+	 115: GETL       	R31, t90
+	 116: GETL       	R1, t92
+	 117: ADDL       	$0x6C, t92
+	 118: STL       	t90, (t92)
+	 119: INCEIPL       	$4
+
+	0xFECE538:  419E0128  bc 12,30,0xFECE660
+	 120: Js30o       	$0xFECE660
+
+
+
+. 1783 FECE4D0 108
+. 38 00 00 7F 93 01 00 50 39 23 00 38 90 81 00 74 7C 78 1B 78 81 63 00 04 7C 09 03 A6 93 C1 00 68 2F 8B 00 00 91 C1 00 28 91 E1 00 2C 7F C8 02 A6 92 01 00 30 92 21 00 34 92 41 00 38 92 61 00 3C 92 81 00 40 92 A1 00 44 92 C1 00 48 92 E1 00 4C 93 21 00 54 93 41 00 58 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 41 9E 01 28
+==== BB 1784 (0xFECE660) approx BBs exec'd 0 ====
+
+	0xFECE660:  9129000C  stw r9,12(r9)
+	   0: GETL       	R9, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECE664:  91290008  stw r9,8(r9)
+	   5: GETL       	R9, t4
+	   6: GETL       	R9, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECE668:  39290008  addi r9,r9,8
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0x8, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFECE66C:  4200FFF4  bc 16,0,0xFECE660
+	  14: GETL       	CTR, t10
+	  15: ADDL       	$0xFFFFFFFF, t10
+	  16: PUTL       	t10, CTR
+	  17: JIFZL       	t10, $0xFECE670
+	  18: JMPo       	$0xFECE660  ($4)
+
+
+
+. 1784 FECE660 16
+. 91 29 00 0C 91 29 00 08 39 29 00 08 42 00 FF F4
+==== BB 1785 (0xFECE670) approx BBs exec'd 0 ====
+
+	0xFECE670:  81DE05FC  lwz r14,1532(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R14
+	   4: INCEIPL       	$4
+
+	0xFECE674:  7C987000  cmp cr1,r24,r14
+	   5: GETL       	R24, t4
+	   6: GETL       	R14, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFECE678:  41860008  bc 12,6,0xFECE680
+	  10: Js06o       	$0xFECE680
+
+
+
+. 1785 FECE670 12
+. 81 DE 05 FC 7C 98 70 00 41 86 00 08
+==== BB 1786 (0xFECE680) approx BBs exec'd 0 ====
+
+	0xFECE680:  557207BC  rlwinm r18,r11,0,30,30
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x2, t0
+	   2: PUTL       	t0, R18
+	   3: INCEIPL       	$4
+
+	0xFECE684:  3A180038  addi r16,r24,56
+	   4: GETL       	R24, t2
+	   5: ADDL       	$0x38, t2
+	   6: PUTL       	t2, R16
+	   7: INCEIPL       	$4
+
+	0xFECE688:  624F0049  ori r15,r18,0x49
+	   8: GETL       	R18, t4
+	   9: ORL       	$0x49, t4
+	  10: PUTL       	t4, R15
+	  11: INCEIPL       	$4
+
+	0xFECE68C:  92180030  stw r16,48(r24)
+	  12: GETL       	R16, t6
+	  13: GETL       	R24, t8
+	  14: ADDL       	$0x30, t8
+	  15: STL       	t6, (t8)
+	  16: INCEIPL       	$4
+
+	0xFECE690:  91F80004  stw r15,4(r24)
+	  17: GETL       	R15, t10
+	  18: GETL       	R24, t12
+	  19: ADDL       	$0x4, t12
+	  20: STL       	t10, (t12)
+	  21: INCEIPL       	$4
+
+	0xFECE694:  82610074  lwz r19,116(r1)
+	  22: GETL       	R1, t14
+	  23: ADDL       	$0x74, t14
+	  24: LDL       	(t14), t16
+	  25: PUTL       	t16, R19
+	  26: INCEIPL       	$4
+
+	0xFECE698:  81C10028  lwz r14,40(r1)
+	  27: GETL       	R1, t18
+	  28: ADDL       	$0x28, t18
+	  29: LDL       	(t18), t20
+	  30: PUTL       	t20, R14
+	  31: INCEIPL       	$4
+
+	0xFECE69C:  7E6803A6  mtlr r19
+	  32: GETL       	R19, t22
+	  33: PUTL       	t22, LR
+	  34: INCEIPL       	$4
+
+	0xFECE6A0:  81E1002C  lwz r15,44(r1)
+	  35: GETL       	R1, t24
+	  36: ADDL       	$0x2C, t24
+	  37: LDL       	(t24), t26
+	  38: PUTL       	t26, R15
+	  39: INCEIPL       	$4
+
+	0xFECE6A4:  82010030  lwz r16,48(r1)
+	  40: GETL       	R1, t28
+	  41: ADDL       	$0x30, t28
+	  42: LDL       	(t28), t30
+	  43: PUTL       	t30, R16
+	  44: INCEIPL       	$4
+
+	0xFECE6A8:  82210034  lwz r17,52(r1)
+	  45: GETL       	R1, t32
+	  46: ADDL       	$0x34, t32
+	  47: LDL       	(t32), t34
+	  48: PUTL       	t34, R17
+	  49: INCEIPL       	$4
+
+	0xFECE6AC:  82410038  lwz r18,56(r1)
+	  50: GETL       	R1, t36
+	  51: ADDL       	$0x38, t36
+	  52: LDL       	(t36), t38
+	  53: PUTL       	t38, R18
+	  54: INCEIPL       	$4
+
+	0xFECE6B0:  8261003C  lwz r19,60(r1)
+	  55: GETL       	R1, t40
+	  56: ADDL       	$0x3C, t40
+	  57: LDL       	(t40), t42
+	  58: PUTL       	t42, R19
+	  59: INCEIPL       	$4
+
+	0xFECE6B4:  82810040  lwz r20,64(r1)
+	  60: GETL       	R1, t44
+	  61: ADDL       	$0x40, t44
+	  62: LDL       	(t44), t46
+	  63: PUTL       	t46, R20
+	  64: INCEIPL       	$4
+
+	0xFECE6B8:  82A10044  lwz r21,68(r1)
+	  65: GETL       	R1, t48
+	  66: ADDL       	$0x44, t48
+	  67: LDL       	(t48), t50
+	  68: PUTL       	t50, R21
+	  69: INCEIPL       	$4
+
+	0xFECE6BC:  82C10048  lwz r22,72(r1)
+	  70: GETL       	R1, t52
+	  71: ADDL       	$0x48, t52
+	  72: LDL       	(t52), t54
+	  73: PUTL       	t54, R22
+	  74: INCEIPL       	$4
+
+	0xFECE6C0:  82E1004C  lwz r23,76(r1)
+	  75: GETL       	R1, t56
+	  76: ADDL       	$0x4C, t56
+	  77: LDL       	(t56), t58
+	  78: PUTL       	t58, R23
+	  79: INCEIPL       	$4
+
+	0xFECE6C4:  83010050  lwz r24,80(r1)
+	  80: GETL       	R1, t60
+	  81: ADDL       	$0x50, t60
+	  82: LDL       	(t60), t62
+	  83: PUTL       	t62, R24
+	  84: INCEIPL       	$4
+
+	0xFECE6C8:  83210054  lwz r25,84(r1)
+	  85: GETL       	R1, t64
+	  86: ADDL       	$0x54, t64
+	  87: LDL       	(t64), t66
+	  88: PUTL       	t66, R25
+	  89: INCEIPL       	$4
+
+	0xFECE6CC:  83410058  lwz r26,88(r1)
+	  90: GETL       	R1, t68
+	  91: ADDL       	$0x58, t68
+	  92: LDL       	(t68), t70
+	  93: PUTL       	t70, R26
+	  94: INCEIPL       	$4
+
+	0xFECE6D0:  8361005C  lwz r27,92(r1)
+	  95: GETL       	R1, t72
+	  96: ADDL       	$0x5C, t72
+	  97: LDL       	(t72), t74
+	  98: PUTL       	t74, R27
+	  99: INCEIPL       	$4
+
+	0xFECE6D4:  83810060  lwz r28,96(r1)
+	 100: GETL       	R1, t76
+	 101: ADDL       	$0x60, t76
+	 102: LDL       	(t76), t78
+	 103: PUTL       	t78, R28
+	 104: INCEIPL       	$4
+
+	0xFECE6D8:  83A10064  lwz r29,100(r1)
+	 105: GETL       	R1, t80
+	 106: ADDL       	$0x64, t80
+	 107: LDL       	(t80), t82
+	 108: PUTL       	t82, R29
+	 109: INCEIPL       	$4
+
+	0xFECE6DC:  83C10068  lwz r30,104(r1)
+	 110: GETL       	R1, t84
+	 111: ADDL       	$0x68, t84
+	 112: LDL       	(t84), t86
+	 113: PUTL       	t86, R30
+	 114: INCEIPL       	$4
+
+	0xFECE6E0:  83E1006C  lwz r31,108(r1)
+	 115: GETL       	R1, t88
+	 116: ADDL       	$0x6C, t88
+	 117: LDL       	(t88), t90
+	 118: PUTL       	t90, R31
+	 119: INCEIPL       	$4
+
+	0xFECE6E4:  38210070  addi r1,r1,112
+	 120: GETL       	R1, t92
+	 121: ADDL       	$0x70, t92
+	 122: PUTL       	t92, R1
+	 123: INCEIPL       	$4
+
+	0xFECE6E8:  4E800020  blr
+	 124: GETL       	LR, t94
+	 125: JMPo-r       	t94  ($4)
+
+
+
+. 1786 FECE680 108
+. 55 72 07 BC 3A 18 00 38 62 4F 00 49 92 18 00 30 91 F8 00 04 82 61 00 74 81 C1 00 28 7E 68 03 A6 81 E1 00 2C 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 1787 (0xFECF970) approx BBs exec'd 0 ====
+
+	0xFECF970:  4BFFFE30  b 0xFECF7A0
+	   0: JMPo       	$0xFECF7A0  ($4)
+
+
+
+. 1787 FECF970 4
+. 4B FF FE 30
+==== BB 1788 (0xFECF7A0) approx BBs exec'd 0 ====
+
+	0xFECF7A0:  82BE05FC  lwz r21,1532(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0xFECF7A4:  63340001  ori r20,r25,0x1
+	   5: GETL       	R25, t4
+	   6: ORL       	$0x1, t4
+	   7: PUTL       	t4, R20
+	   8: INCEIPL       	$4
+
+	0xFECF7A8:  825E0678  lwz r18,1656(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x678, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R18
+	  13: INCEIPL       	$4
+
+	0xFECF7AC:  3B5C0038  addi r26,r28,56
+	  14: GETL       	R28, t10
+	  15: ADDL       	$0x38, t10
+	  16: PUTL       	t10, R26
+	  17: INCEIPL       	$4
+
+	0xFECF7B0:  7E1CA800  cmp cr4,r28,r21
+	  18: GETL       	R28, t12
+	  19: GETL       	R21, t14
+	  20: CMPL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x4, CR
+	  22: INCEIPL       	$4
+
+	0xFECF7B4:  81DE062C  lwz r14,1580(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x62C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R14
+	  27: INCEIPL       	$4
+
+	0xFECF7B8:  7EB6AB78  or r22,r21,r21
+	  28: GETL       	R21, t22
+	  29: PUTL       	t22, R22
+	  30: INCEIPL       	$4
+
+	0xFECF7BC:  7F8FAA78  xor r15,r28,r21
+	  31: GETL       	R28, t24
+	  32: GETL       	R21, t26
+	  33: XORL       	t24, t26
+	  34: PUTL       	t26, R15
+	  35: INCEIPL       	$4
+
+	0xFECF7C0:  4D900000  mcrf cr3,cr4
+	  36: ICRFL       	CR, $0x3, CR
+	  37: INCEIPL       	$4
+
+	0xFECF7C4:  7E93A378  or r19,r20,r20
+	  38: GETL       	R20, t28
+	  39: PUTL       	t28, R19
+	  40: INCEIPL       	$4
+
+	0xFECF7C8:  3A390010  addi r17,r25,16
+	  41: GETL       	R25, t30
+	  42: ADDL       	$0x10, t30
+	  43: PUTL       	t30, R17
+	  44: INCEIPL       	$4
+
+	0xFECF7CC:  3A010010  addi r16,r1,16
+	  45: GETL       	R1, t32
+	  46: ADDL       	$0x10, t32
+	  47: PUTL       	t32, R16
+	  48: INCEIPL       	$4
+
+	0xFECF7D0:  83FC0044  lwz r31,68(r28)
+	  49: GETL       	R28, t34
+	  50: ADDL       	$0x44, t34
+	  51: LDL       	(t34), t36
+	  52: PUTL       	t36, R31
+	  53: INCEIPL       	$4
+
+	0xFECF7D4:  7F1FD000  cmp cr6,r31,r26
+	  54: GETL       	R31, t38
+	  55: GETL       	R26, t40
+	  56: CMPL       	t38, t40, t42  (-rSo)
+	  57: ICRFL       	t42, $0x6, CR
+	  58: INCEIPL       	$4
+
+	0xFECF7D8:  419A019C  bc 12,26,0xFECF974
+	  59: Js26o       	$0xFECF974
+
+
+
+. 1788 FECF7A0 60
+. 82 BE 05 FC 63 34 00 01 82 5E 06 78 3B 5C 00 38 7E 1C A8 00 81 DE 06 2C 7E B6 AB 78 7F 8F AA 78 4D 90 00 00 7E 93 A3 78 3A 39 00 10 3A 01 00 10 83 FC 00 44 7F 1F D0 00 41 9A 01 9C
+==== BB 1789 (0xFECF974) approx BBs exec'd 0 ====
+
+	0xFECF974:  281901FF  cmpli cr0,r25,511
+	   0: GETL       	R25, t0
+	   1: MOVL       	$0x1FF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFECF978:  4081002C  bc 4,1,0xFECF9A4
+	   5: Jc01o       	$0xFECF9A4
+
+
+
+. 1789 FECF974 8
+. 28 19 01 FF 40 81 00 2C
+==== BB 1790 (0xFECF9A4) approx BBs exec'd 0 ====
+
+	0xFECF9A4:  39780001  addi r11,r24,1
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFECF9A8:  7F8AB278  xor r10,r28,r22
+	   4: GETL       	R28, t2
+	   5: GETL       	R22, t4
+	   6: XORL       	t2, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFECF9AC:  5564D97E  rlwinm r4,r11,27,5,31
+	   9: GETL       	R11, t6
+	  10: SHRL       	$0x5, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0xFECF9B0:  55661838  rlwinm r6,r11,3,0,28
+	  13: GETL       	R11, t8
+	  14: SHLL       	$0x3, t8
+	  15: PUTL       	t8, R6
+	  16: INCEIPL       	$4
+
+	0xFECF9B4:  5483103A  rlwinm r3,r4,2,0,29
+	  17: GETL       	R4, t10
+	  18: SHLL       	$0x2, t10
+	  19: PUTL       	t10, R3
+	  20: INCEIPL       	$4
+
+	0xFECF9B8:  557D06FE  rlwinm r29,r11,0,27,31
+	  21: GETL       	R11, t12
+	  22: ANDL       	$0x1F, t12
+	  23: PUTL       	t12, R29
+	  24: INCEIPL       	$4
+
+	0xFECF9BC:  7CE3E214  add r7,r3,r28
+	  25: GETL       	R3, t14
+	  26: GETL       	R28, t16
+	  27: ADDL       	t14, t16
+	  28: PUTL       	t16, R7
+	  29: INCEIPL       	$4
+
+	0xFECF9C0:  7D06E214  add r8,r6,r28
+	  30: GETL       	R6, t18
+	  31: GETL       	R28, t20
+	  32: ADDL       	t18, t20
+	  33: PUTL       	t20, R8
+	  34: INCEIPL       	$4
+
+	0xFECF9C4:  549B402E  rlwinm r27,r4,8,0,23
+	  35: GETL       	R4, t22
+	  36: SHLL       	$0x8, t22
+	  37: PUTL       	t22, R27
+	  38: INCEIPL       	$4
+
+	0xFECF9C8:  3BE00001  li r31,1
+	  39: MOVL       	$0x1, t24
+	  40: PUTL       	t24, R31
+	  41: INCEIPL       	$4
+
+	0xFECF9CC:  80A70438  lwz r5,1080(r7)
+	  42: GETL       	R7, t26
+	  43: ADDL       	$0x438, t26
+	  44: LDL       	(t26), t28
+	  45: PUTL       	t28, R5
+	  46: INCEIPL       	$4
+
+	0xFECF9D0:  7F1CB000  cmp cr6,r28,r22
+	  47: GETL       	R28, t30
+	  48: GETL       	R22, t32
+	  49: CMPL       	t30, t32, t34  (-rSo)
+	  50: ICRFL       	t34, $0x6, CR
+	  51: INCEIPL       	$4
+
+	0xFECF9D4:  289901FF  cmpli cr1,r25,511
+	  52: GETL       	R25, t36
+	  53: MOVL       	$0x1FF, t40
+	  54: CMPUL       	t36, t40, t38  (-rSo)
+	  55: ICRFL       	t38, $0x1, CR
+	  56: INCEIPL       	$4
+
+	0xFECF9D8:  7FE6E830  slw r6,r31,r29
+	  57: GETL       	R31, t44
+	  58: GETL       	R29, t42
+	  59: SHLL       	t42, t44
+	  60: PUTL       	t44, R6
+	  61: INCEIPL       	$4
+
+	0xFECF9DC:  39680030  addi r11,r8,48
+	  62: GETL       	R8, t46
+	  63: ADDL       	$0x30, t46
+	  64: PUTL       	t46, R11
+	  65: INCEIPL       	$4
+
+	0xFECF9E0:  300AFFFF  addic r0,r10,-1
+	  66: GETL       	R10, t48
+	  67: ADCL       	$0xFFFFFFFF, t48  (-wCa)
+	  68: PUTL       	t48, R0
+	  69: INCEIPL       	$4
+
+	0xFECF9E4:  7FA05110  subfe r29,r0,r10
+	  70: GETL       	R0, t50
+	  71: GETL       	R10, t52
+	  72: SBBL       	t50, t52  (-rCa-wCa)
+	  73: PUTL       	t52, R29
+	  74: INCEIPL       	$4
+
+	0xFECF9E8:  7D1BE214  add r8,r27,r28
+	  75: GETL       	R27, t54
+	  76: GETL       	R28, t56
+	  77: ADDL       	t54, t56
+	  78: PUTL       	t56, R8
+	  79: INCEIPL       	$4
+
+	0xFECF9EC:  21260000  subfic r9,r6,0
+	  80: GETL       	R6, t58
+	  81: MOVL       	$0x0, t60
+	  82: SBBL       	t58, t60  (-wCa)
+	  83: PUTL       	t60, R9
+	  84: INCEIPL       	$4
+
+	0xFECF9F0:  7EA93114  adde r21,r9,r6
+	  85: GETL       	R9, t62
+	  86: GETL       	R6, t64
+	  87: ADCL       	t62, t64  (-rCa-wCa)
+	  88: PUTL       	t64, R21
+	  89: INCEIPL       	$4
+
+	0xFECF9F4:  7D262810  subfc r9,r6,r5
+	  90: GETL       	R6, t66
+	  91: GETL       	R5, t68
+	  92: SBBL       	t66, t68  (-wCa)
+	  93: PUTL       	t68, R9
+	  94: INCEIPL       	$4
+
+	0xFECF9F8:  7D294910  subfe r9,r9,r9
+	  95: GETL       	R9, t70
+	  96: GETL       	R9, t72
+	  97: SBBL       	t70, t72  (-rCa-wCa)
+	  98: PUTL       	t72, R9
+	  99: INCEIPL       	$4
+
+	0xFECF9FC:  7D2900D0  neg r9,r9
+	 100: GETL       	R9, t74
+	 101: NEGL       	t74
+	 102: PUTL       	t74, R9
+	 103: INCEIPL       	$4
+
+	0xFECFA00:  7D2AAB79  or. r10,r9,r21
+	 104: GETL       	R9, t76
+	 105: GETL       	R21, t78
+	 106: ORL       	t78, t76
+	 107: PUTL       	t76, R10
+	 108: CMP0L       	t76, t80  (-rSo)
+	 109: ICRFL       	t80, $0x0, CR
+	 110: INCEIPL       	$4
+
+	0xFECFA04:  41820034  bc 12,2,0xFECFA38
+	 111: Js02o       	$0xFECFA38
+
+
+
+. 1790 FECF9A4 100
+. 39 78 00 01 7F 8A B2 78 55 64 D9 7E 55 66 18 38 54 83 10 3A 55 7D 06 FE 7C E3 E2 14 7D 06 E2 14 54 9B 40 2E 3B E0 00 01 80 A7 04 38 7F 1C B0 00 28 99 01 FF 7F E6 E8 30 39 68 00 30 30 0A FF FF 7F A0 51 10 7D 1B E2 14 21 26 00 00 7E A9 31 14 7D 26 28 10 7D 29 49 10 7D 29 00 D0 7D 2A AB 79 41 82 00 34
+==== BB 1791 (0xFECFA08) approx BBs exec'd 0 ====
+
+	0xFECFA08:  39270438  addi r9,r7,1080
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x438, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFECFA0C:  38840001  addi r4,r4,1
+	   4: GETL       	R4, t2
+	   5: ADDL       	$0x1, t2
+	   6: PUTL       	t2, R4
+	   7: INCEIPL       	$4
+
+	0xFECFA10:  38E70004  addi r7,r7,4
+	   8: GETL       	R7, t4
+	   9: ADDL       	$0x4, t4
+	  10: PUTL       	t4, R7
+	  11: INCEIPL       	$4
+
+	0xFECFA14:  2B840003  cmpli cr7,r4,3
+	  12: GETL       	R4, t6
+	  13: MOVL       	$0x3, t10
+	  14: CMPUL       	t6, t10, t8  (-rSo)
+	  15: ICRFL       	t8, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0xFECFA18:  5483103A  rlwinm r3,r4,2,0,29
+	  17: GETL       	R4, t12
+	  18: SHLL       	$0x2, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0xFECFA1C:  39080100  addi r8,r8,256
+	  21: GETL       	R8, t14
+	  22: ADDL       	$0x100, t14
+	  23: PUTL       	t14, R8
+	  24: INCEIPL       	$4
+
+	0xFECFA20:  419D0164  bc 12,29,0xFECFB84
+	  25: Js29o       	$0xFECFB84
+
+
+
+. 1791 FECFA08 28
+. 39 27 04 38 38 84 00 01 38 E7 00 04 2B 84 00 03 54 83 10 3A 39 08 01 00 41 9D 01 64
+==== BB 1792 (0xFECFA24) approx BBs exec'd 0 ====
+
+	0xFECFA24:  84A90004  lwzu r5,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R9
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFECFA28:  2C050000  cmpi cr0,r5,0
+	   6: GETL       	R5, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECFA2C:  4182FFE0  bc 12,2,0xFECFA0C
+	  10: Js02o       	$0xFECFA0C
+
+
+
+. 1792 FECFA24 12
+. 84 A9 00 04 2C 05 00 00 41 82 FF E0
+==== BB 1793 (0xFECFA0C) approx BBs exec'd 0 ====
+
+	0xFECFA0C:  38840001  addi r4,r4,1
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFECFA10:  38E70004  addi r7,r7,4
+	   4: GETL       	R7, t2
+	   5: ADDL       	$0x4, t2
+	   6: PUTL       	t2, R7
+	   7: INCEIPL       	$4
+
+	0xFECFA14:  2B840003  cmpli cr7,r4,3
+	   8: GETL       	R4, t4
+	   9: MOVL       	$0x3, t8
+	  10: CMPUL       	t4, t8, t6  (-rSo)
+	  11: ICRFL       	t6, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFECFA18:  5483103A  rlwinm r3,r4,2,0,29
+	  13: GETL       	R4, t10
+	  14: SHLL       	$0x2, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFECFA1C:  39080100  addi r8,r8,256
+	  17: GETL       	R8, t12
+	  18: ADDL       	$0x100, t12
+	  19: PUTL       	t12, R8
+	  20: INCEIPL       	$4
+
+	0xFECFA20:  419D0164  bc 12,29,0xFECFB84
+	  21: Js29o       	$0xFECFB84
+
+
+
+. 1793 FECFA0C 24
+. 38 84 00 01 38 E7 00 04 2B 84 00 03 54 83 10 3A 39 08 01 00 41 9D 01 64
+==== BB 1794 (0xFECFB84) approx BBs exec'd 0 ====
+
+	0xFECFB84:  83BC0030  lwz r29,48(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFECFB88:  82FD0004  lwz r23,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0xFECFB8C:  56E00038  rlwinm r0,r23,0,0,28
+	  10: GETL       	R23, t8
+	  11: ANDL       	$0xFFFFFFF8, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0xFECFB90:  7F110040  cmpl cr6,r17,r0
+	  14: GETL       	R17, t10
+	  15: GETL       	R0, t12
+	  16: CMPUL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFECFB94:  4099031C  bc 4,25,0xFECFEB0
+	  19: Jc25o       	$0xFECFEB0
+
+
+
+. 1794 FECFB84 20
+. 83 BC 00 30 82 FD 00 04 56 E0 00 38 7F 11 00 40 40 99 03 1C
+==== BB 1795 (0xFECFB98) approx BBs exec'd 0 ====
+
+	0xFECFB98:  819C0004  lwz r12,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFECFB9C:  718A0001  andi. r10,r12,0x1
+	   5: GETL       	R12, t4
+	   6: ANDL       	$0x1, t4
+	   7: PUTL       	t4, R10
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFECFBA0:  408201A0  bc 4,2,0xFECFD40
+	  11: Jc02o       	$0xFECFD40
+
+
+
+. 1795 FECFB98 12
+. 81 9C 00 04 71 8A 00 01 40 82 01 A0
+==== BB 1796 (0xFECFD40) approx BBs exec'd 0 ====
+
+	0xFECFD40:  82FE0628  lwz r23,1576(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x628, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFECFD44:  82770008  lwz r19,8(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R19
+	   9: INCEIPL       	$4
+
+	0xFECFD48:  80B70018  lwz r5,24(r23)
+	  10: GETL       	R23, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R5
+	  14: INCEIPL       	$4
+
+	0xFECFD4C:  7D93C840  cmpl cr3,r19,r25
+	  15: GETL       	R19, t12
+	  16: GETL       	R25, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x3, CR
+	  19: INCEIPL       	$4
+
+	0xFECFD50:  3A65FFFF  addi r19,r5,-1
+	  20: GETL       	R5, t18
+	  21: ADDL       	$0xFFFFFFFF, t18
+	  22: PUTL       	t18, R19
+	  23: INCEIPL       	$4
+
+	0xFECFD54:  418D0028  bc 12,13,0xFECFD7C
+	  24: Js13o       	$0xFECFD7C
+
+
+
+. 1796 FECFD40 24
+. 82 FE 06 28 82 77 00 08 80 B7 00 18 7D 93 C8 40 3A 65 FF FF 41 8D 00 28
+==== BB 1797 (0xFECFD7C) approx BBs exec'd 0 ====
+
+	0xFECFD7C:  817E0680  lwz r11,1664(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x680, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFECFD80:  7FB6EB78  or r22,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0xFECFD84:  83FD0004  lwz r31,4(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R31
+	  12: INCEIPL       	$4
+
+	0xFECFD88:  3B000000  li r24,0
+	  13: MOVL       	$0x0, t10
+	  14: PUTL       	t10, R24
+	  15: INCEIPL       	$4
+
+	0xFECFD8C:  81EB0000  lwz r15,0(r11)
+	  16: GETL       	R11, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R15
+	  19: INCEIPL       	$4
+
+	0xFECFD90:  3B600000  li r27,0
+	  20: MOVL       	$0x0, t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0xFECFD94:  57F50038  rlwinm r21,r31,0,0,28
+	  23: GETL       	R31, t18
+	  24: ANDL       	$0xFFFFFFF8, t18
+	  25: PUTL       	t18, R21
+	  26: INCEIPL       	$4
+
+	0xFECFD98:  7C9C7800  cmp cr1,r28,r15
+	  27: GETL       	R28, t20
+	  28: GETL       	R15, t22
+	  29: CMPL       	t20, t22, t24  (-rSo)
+	  30: ICRFL       	t24, $0x1, CR
+	  31: INCEIPL       	$4
+
+	0xFECFD9C:  7E1DAA14  add r16,r29,r21
+	  32: GETL       	R29, t26
+	  33: GETL       	R21, t28
+	  34: ADDL       	t26, t28
+	  35: PUTL       	t28, R16
+	  36: INCEIPL       	$4
+
+	0xFECFDA0:  41860164  bc 12,6,0xFECFF04
+	  37: Js06o       	$0xFECFF04
+
+
+
+. 1797 FECFD7C 40
+. 81 7E 06 80 7F B6 EB 78 83 FD 00 04 3B 00 00 00 81 EB 00 00 3B 60 00 00 57 F5 00 38 7C 9C 78 00 7E 1D AA 14 41 86 01 64
+==== BB 1798 (0xFECFF04) approx BBs exec'd 0 ====
+
+	0xFECFF04:  80BC0004  lwz r5,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECFF08:  81970004  lwz r12,4(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0xFECFF0C:  70AA0002  andi. r10,r5,0x2
+	  10: GETL       	R5, t8
+	  11: ANDL       	$0x2, t8
+	  12: PUTL       	t8, R10
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECFF10:  7E4CCA14  add r18,r12,r25
+	  16: GETL       	R12, t12
+	  17: GETL       	R25, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R18
+	  20: INCEIPL       	$4
+
+	0xFECFF14:  38120010  addi r0,r18,16
+	  21: GETL       	R18, t16
+	  22: ADDL       	$0x10, t16
+	  23: PUTL       	t16, R0
+	  24: INCEIPL       	$4
+
+	0xFECFF18:  40820008  bc 4,2,0xFECFF20
+	  25: Jc02o       	$0xFECFF20
+
+
+
+. 1798 FECFF04 24
+. 80 BC 00 04 81 97 00 04 70 AA 00 02 7E 4C CA 14 38 12 00 10 40 82 00 08
+==== BB 1799 (0xFECFF1C) approx BBs exec'd 0 ====
+
+	0xFECFF1C:  7C150050  subf r0,r21,r0
+	   0: GETL       	R21, t0
+	   1: GETL       	R0, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFECFF20:  7C009A14  add r0,r0,r19
+	   5: GETL       	R0, t4
+	   6: GETL       	R19, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFECFF24:  7E7298F8  nor r18,r19,r19
+	  10: GETL       	R19, t8
+	  11: NOTL       	t8
+	  12: PUTL       	t8, R18
+	  13: INCEIPL       	$4
+
+	0xFECFF28:  7C1F9039  and. r31,r0,r18
+	  14: GETL       	R0, t10
+	  15: GETL       	R18, t12
+	  16: ANDL       	t10, t12
+	  17: PUTL       	t12, R31
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0xFECFF2C:  408101B4  bc 4,1,0xFED00E0
+	  21: Jc01o       	$0xFED00E0
+
+
+
+. 1799 FECFF1C 20
+. 7C 15 00 50 7C 00 9A 14 7E 72 98 F8 7C 1F 90 39 40 81 01 B4
+==== BB 1800 (0xFECFF30) approx BBs exec'd 0 ====
+
+	0xFECFF30:  81DE1A70  lwz r14,6768(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1A70, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R14
+	   4: INCEIPL       	$4
+
+	0xFECFF34:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFECFF38:  810E0000  lwz r8,0(r14)
+	   8: GETL       	R14, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0xFECFF3C:  7D0903A6  mtctr r8
+	  12: GETL       	R8, t10
+	  13: PUTL       	t10, CTR
+	  14: INCEIPL       	$4
+
+	0xFECFF40:  4E800421  bctrl
+	  15: MOVL       	$0xFECFF44, t12
+	  16: PUTL       	t12, LR
+	  17: GETL       	CTR, t14
+	  18: JMPo-c       	t14  ($4)
+
+
+
+. 1800 FECFF30 20
+. 81 DE 1A 70 7F E3 FB 78 81 0E 00 00 7D 09 03 A6 4E 80 04 21
+==== BB 1801 __GI___default_morecore(0xFED4188) approx BBs exec'd 0 ====
+
+	0xFED4188:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFED418C:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFED4190:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFED4194:  48054BDD  bl 0xFF28D70
+	  14: MOVL       	$0xFED4198, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFF28D70  ($4)
+
+
+
+. 1801 FED4188 16
+. 7C 08 02 A6 94 21 FF F0 90 01 00 14 48 05 4B DD
+==== BB 1802 sbrk(0xFF28D70) approx BBs exec'd 0 ====
+
+	0xFF28D70:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF28D74:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFF28D78:  4807F0D9  bl 0xFFA7E50
+	   9: MOVL       	$0xFF28D7C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1802 FF28D70 12
+. 94 21 FF E0 7C 88 02 A6 48 07 F0 D9
+==== BB 1803 (0xFF28D7C) approx BBs exec'd 0 ====
+
+	0xFF28D7C:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF28D80:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF28D84:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF28D88:  90810024  stw r4,36(r1)
+	  13: GETL       	R4, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF28D8C:  93A10014  stw r29,20(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFF28D90:  7C7D1B78  or r29,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0xFF28D94:  83FE1D6C  lwz r31,7532(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1D6C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFF28D98:  38600000  li r3,0
+	  31: MOVL       	$0x0, t24
+	  32: PUTL       	t24, R3
+	  33: INCEIPL       	$4
+
+	0xFF28D9C:  801F0000  lwz r0,0(r31)
+	  34: GETL       	R31, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R0
+	  37: INCEIPL       	$4
+
+	0xFF28DA0:  2F800000  cmpi cr7,r0,0
+	  38: GETL       	R0, t30
+	  39: CMP0L       	t30, t32  (-rSo)
+	  40: ICRFL       	t32, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0xFF28DA4:  419E003C  bc 12,30,0xFF28DE0
+	  42: Js30o       	$0xFF28DE0
+
+
+
+. 1803 FF28D7C 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 81 00 24 93 A1 00 14 7C 7D 1B 78 83 FE 1D 6C 38 60 00 00 80 1F 00 00 2F 80 00 00 41 9E 00 3C
+==== BB 1804 (0xFF28DE0) approx BBs exec'd 0 ====
+
+	0xFF28DE0:  4BFFFF4D  bl 0xFF28D2C
+	   0: MOVL       	$0xFF28DE4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFF28D2C  ($4)
+
+
+
+. 1804 FF28DE0 4
+. 4B FF FF 4D
+==== BB 1805 brk(0xFF28D2C) approx BBs exec'd 0 ====
+
+	0xFF28D2C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF28D30:  90610008  stw r3,8(r1)
+	   6: GETL       	R3, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFF28D34:  3800002D  li r0,45
+	  11: MOVL       	$0x2D, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0xFF28D38:  44000002  sc
+	  14: JMPo-sys       	$0xFF28D3C  ($4)
+
+
+
+. 1805 FF28D2C 16
+. 94 21 FF F0 90 61 00 08 38 00 00 2D 44 00 00 02
+==== BB 1806 (0xFF28D3C) approx BBs exec'd 0 ====
+
+	0xFF28D3C:  80C10008  lwz r6,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFF28D40:  7C8802A6  mflr r4
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFF28D44:  4807F10D  bl 0xFFA7E50
+	   8: MOVL       	$0xFF28D48, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1806 FF28D3C 12
+. 80 C1 00 08 7C 88 02 A6 48 07 F1 0D
+==== BB 1807 (0xFF28D48) approx BBs exec'd 0 ====
+
+	0xFF28D48:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFF28D4C:  80A51D6C  lwz r5,7532(r5)
+	   3: GETL       	R5, t2
+	   4: ADDL       	$0x1D6C, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFF28D50:  7C8803A6  mtlr r4
+	   8: GETL       	R4, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFF28D54:  90650000  stw r3,0(r5)
+	  11: GETL       	R3, t8
+	  12: GETL       	R5, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFF28D58:  7C061840  cmpl cr0,r6,r3
+	  15: GETL       	R6, t12
+	  16: GETL       	R3, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFF28D5C:  38210010  addi r1,r1,16
+	  20: GETL       	R1, t18
+	  21: ADDL       	$0x10, t18
+	  22: PUTL       	t18, R1
+	  23: INCEIPL       	$4
+
+	0xFF28D60:  38600000  li r3,0
+	  24: MOVL       	$0x0, t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0xFF28D64:  4CA10020  bclr 5,1
+	  27: GETL       	LR, t22
+	  28: Jc01o-r       	t22
+
+
+
+. 1807 FF28D48 32
+. 7C A8 02 A6 80 A5 1D 6C 7C 88 03 A6 90 65 00 00 7C 06 18 40 38 21 00 10 38 60 00 00 4C A1 00 20
+==== BB 1808 (0xFF28DE4) approx BBs exec'd 0 ====
+
+	0xFF28DE4:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFF28DE8:  3860FFFF  li r3,-1
+	   4: MOVL       	$0xFFFFFFFF, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFF28DEC:  41A4FFD8  bc 13,4,0xFF28DC4
+	   7: Js04o       	$0xFF28DC4
+
+
+
+. 1808 FF28DE4 12
+. 2C 83 00 00 38 60 FF FF 41 A4 FF D8
+==== BB 1809 (0xFF28DF0) approx BBs exec'd 0 ====
+
+	0xFF28DF0:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFF28DF4:  419AFFCC  bc 12,26,0xFF28DC0
+	   4: Js26o       	$0xFF28DC0
+
+
+
+. 1809 FF28DF0 8
+. 2F 1D 00 00 41 9A FF CC
+==== BB 1810 (0xFF28DF8) approx BBs exec'd 0 ====
+
+	0xFF28DF8:  83FF0000  lwz r31,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFF28DFC:  7C7FEA14  add r3,r31,r29
+	   4: GETL       	R31, t4
+	   5: GETL       	R29, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFF28E00:  4BFFFF2D  bl 0xFF28D2C
+	   9: MOVL       	$0xFF28E04, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xFF28D2C  ($4)
+
+
+
+. 1810 FF28DF8 12
+. 83 FF 00 00 7C 7F EA 14 4B FF FF 2D
+==== BB 1811 (0xFF28E04) approx BBs exec'd 0 ====
+
+	0xFF28E04:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFF28E08:  3860FFFF  li r3,-1
+	   4: MOVL       	$0xFFFFFFFF, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFF28E0C:  41BCFFB8  bc 13,28,0xFF28DC4
+	   7: Js28o       	$0xFF28DC4
+
+
+
+. 1811 FF28E04 12
+. 2F 83 00 00 38 60 FF FF 41 BC FF B8
+==== BB 1812 (0xFF28E10) approx BBs exec'd 0 ====
+
+	0xFF28E10:  80C10024  lwz r6,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFF28E14:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF28E18:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFF28E1C:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xFF28E20:  7CC803A6  mtlr r6
+	  18: GETL       	R6, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFF28E24:  83E1001C  lwz r31,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0xFF28E28:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0xFF28E2C:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+
+. 1812 FF28E10 32
+. 80 C1 00 24 7F E3 FB 78 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1813 (0xFED4198) approx BBs exec'd 0 ====
+
+	0xFED4198:  2F83FFFF  cmpi cr7,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFED419C:  38000000  li r0,0
+	   5: MOVL       	$0x0, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFED41A0:  419E0008  bc 12,30,0xFED41A8
+	   8: Js30o       	$0xFED41A8
+
+
+
+. 1813 FED4198 12
+. 2F 83 FF FF 38 00 00 00 41 9E 00 08
+==== BB 1814 (0xFED41A4) approx BBs exec'd 0 ====
+
+	0xFED41A4:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFED41A8:  80810014  lwz r4,20(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x14, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFED41AC:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFED41B0:  38210010  addi r1,r1,16
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x10, t8
+	  13: PUTL       	t8, R1
+	  14: INCEIPL       	$4
+
+	0xFED41B4:  7C8803A6  mtlr r4
+	  15: GETL       	R4, t10
+	  16: PUTL       	t10, LR
+	  17: INCEIPL       	$4
+
+	0xFED41B8:  4E800020  blr
+	  18: GETL       	LR, t12
+	  19: JMPo-r       	t12  ($4)
+
+
+
+. 1814 FED41A4 24
+. 7C 60 1B 78 80 81 00 14 7C 03 03 78 38 21 00 10 7C 88 03 A6 4E 80 00 20
+==== BB 1815 (0xFECFF44) approx BBs exec'd 0 ====
+
+	0xFECFF44:  7C7B1B79  or. r27,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R27
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFECFF48:  7F400026  mfcr r26
+	   5: GETL       	CR, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFECFF4C:  4182019C  bc 12,2,0xFED00E8
+	   8: Js02o       	$0xFED00E8
+
+
+
+. 1815 FECFF44 12
+. 7C 7B 1B 79 7F 40 00 26 41 82 01 9C
+==== BB 1816 (0xFECFF50) approx BBs exec'd 0 ====
+
+	0xFECFF50:  815E1BB8  lwz r10,7096(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BB8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECFF54:  812A0000  lwz r9,0(r10)
+	   5: GETL       	R10, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFECFF58:  2E090000  cmpi cr4,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0xFECFF5C:  41B2000C  bc 13,18,0xFECFF68
+	  13: Js18o       	$0xFECFF68
+
+
+
+. 1816 FECFF50 16
+. 81 5E 1B B8 81 2A 00 00 2E 09 00 00 41 B2 00 0C
+==== BB 1817 (0xFECFF68) approx BBs exec'd 0 ====
+
+	0xFECFF68:  575A203E  rlwinm r26,r26,4,0,31
+	   0: GETL       	R26, t0
+	   1: ROLL       	$0x4, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0xFECFF6C:  7F401120  mtcrf 0x1,r26
+	   4: GETL       	R26, t2
+	   5: ICRFL       	t2, $0x7, CR
+	   6: INCEIPL       	$4
+
+	0xFECFF70:  575AE03E  rlwinm r26,r26,28,0,31
+	   7: GETL       	R26, t4
+	   8: ROLL       	$0x1C, t4
+	   9: PUTL       	t4, R26
+	  10: INCEIPL       	$4
+
+	0xFECFF74:  419E004C  bc 12,30,0xFECFFC0
+	  11: Js30o       	$0xFECFFC0
+
+
+
+. 1817 FECFF68 16
+. 57 5A 20 3E 7F 40 11 20 57 5A E0 3E 41 9E 00 4C
+==== BB 1818 (0xFECFF78) approx BBs exec'd 0 ====
+
+	0xFECFF78:  83570028  lwz r26,40(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x28, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFECFF7C:  2E1A0000  cmpi cr4,r26,0
+	   5: GETL       	R26, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0xFECFF80:  40920008  bc 4,18,0xFECFF88
+	   9: Jc18o       	$0xFECFF88
+
+
+
+. 1818 FECFF78 12
+. 83 57 00 28 2E 1A 00 00 40 92 00 08
+==== BB 1819 (0xFECFF84) approx BBs exec'd 0 ====
+
+	0xFECFF84:  93770028  stw r27,40(r23)
+	   0: GETL       	R27, t0
+	   1: GETL       	R23, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECFF88:  2F180000  cmpi cr6,r24,0
+	   5: GETL       	R24, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFECFF8C:  7F658278  xor r5,r27,r16
+	   9: GETL       	R27, t8
+	  10: GETL       	R16, t10
+	  11: XORL       	t8, t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0xFECFF90:  20050000  subfic r0,r5,0
+	  14: GETL       	R5, t12
+	  15: MOVL       	$0x0, t14
+	  16: SBBL       	t12, t14  (-wCa)
+	  17: PUTL       	t14, R0
+	  18: INCEIPL       	$4
+
+	0xFECFF94:  7CA02914  adde r5,r0,r5
+	  19: GETL       	R0, t16
+	  20: GETL       	R5, t18
+	  21: ADCL       	t16, t18  (-rCa-wCa)
+	  22: PUTL       	t18, R5
+	  23: INCEIPL       	$4
+
+	0xFECFF98:  82FC044C  lwz r23,1100(r28)
+	  24: GETL       	R28, t20
+	  25: ADDL       	$0x44C, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R23
+	  28: INCEIPL       	$4
+
+	0xFECFF9C:  7D800026  mfcr r12
+	  29: GETL       	CR, t24
+	  30: PUTL       	t24, R12
+	  31: INCEIPL       	$4
+
+	0xFECFFA0:  558CDFFE  rlwinm r12,r12,27,31,31
+	  32: GETL       	R12, t26
+	  33: ROLL       	$0x1B, t26
+	  34: ANDL       	$0x1, t26
+	  35: PUTL       	t26, R12
+	  36: INCEIPL       	$4
+
+	0xFECFFA4:  7CAA6039  and. r10,r5,r12
+	  37: GETL       	R5, t28
+	  38: GETL       	R12, t30
+	  39: ANDL       	t28, t30
+	  40: PUTL       	t30, R10
+	  41: CMP0L       	t30, t32  (-rSo)
+	  42: ICRFL       	t32, $0x0, CR
+	  43: INCEIPL       	$4
+
+	0xFECFFA8:  7D57FA14  add r10,r23,r31
+	  44: GETL       	R23, t34
+	  45: GETL       	R31, t36
+	  46: ADDL       	t34, t36
+	  47: PUTL       	t36, R10
+	  48: INCEIPL       	$4
+
+	0xFECFFAC:  41820254  bc 12,2,0xFED0200
+	  49: Js02o       	$0xFED0200
+
+
+
+. 1819 FECFF84 44
+. 93 77 00 28 2F 18 00 00 7F 65 82 78 20 05 00 00 7C A0 29 14 82 FC 04 4C 7D 80 00 26 55 8C DF FE 7C AA 60 39 7D 57 FA 14 41 82 02 54
+==== BB 1820 (0xFED0200) approx BBs exec'd 0 ====
+
+	0xFED0200:  811C0004  lwz r8,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFED0204:  3175FFFF  addic r11,r21,-1
+	   5: GETL       	R21, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFED0208:  7FABA910  subfe r29,r11,r21
+	   9: GETL       	R11, t6
+	  10: GETL       	R21, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0xFED020C:  69000002  xori r0,r8,0x2
+	  14: GETL       	R8, t10
+	  15: XORL       	$0x2, t10
+	  16: PUTL       	t10, R0
+	  17: INCEIPL       	$4
+
+	0xFED0210:  5400FFFE  rlwinm r0,r0,31,31,31
+	  18: GETL       	R0, t12
+	  19: ROLL       	$0x1F, t12
+	  20: ANDL       	$0x1, t12
+	  21: PUTL       	t12, R0
+	  22: INCEIPL       	$4
+
+	0xFED0214:  7C0BE839  and. r11,r0,r29
+	  23: GETL       	R0, t14
+	  24: GETL       	R29, t16
+	  25: ANDL       	t14, t16
+	  26: PUTL       	t16, R11
+	  27: CMP0L       	t16, t18  (-rSo)
+	  28: ICRFL       	t18, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0xFED0218:  4182000C  bc 12,2,0xFED0224
+	  30: Js02o       	$0xFED0224
+
+
+
+. 1820 FED0200 28
+. 81 1C 00 04 31 75 FF FF 7F AB A9 10 69 00 00 02 54 00 FF FE 7C 0B E8 39 41 82 00 0C
+==== BB 1821 (0xFED0224) approx BBs exec'd 0 ====
+
+	0xFED0224:  2C800000  cmpi cr1,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED0228:  3BA00000  li r29,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFED022C:  7F7ADB78  or r26,r27,r27
+	   7: GETL       	R27, t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0xFED0230:  41860104  bc 12,6,0xFED0334
+	  10: Js06o       	$0xFED0334
+
+
+
+. 1821 FED0224 16
+. 2C 80 00 00 3B A0 00 00 7F 7A DB 78 41 86 01 04
+==== BB 1822 (0xFED0234) approx BBs exec'd 0 ====
+
+	0xFED0234:  2F150000  cmpi cr6,r21,0
+	   0: GETL       	R21, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFED0238:  419A00F4  bc 12,26,0xFED032C
+	   4: Js26o       	$0xFED032C
+
+
+
+. 1822 FED0234 8
+. 2F 15 00 00 41 9A 00 F4
+==== BB 1823 (0xFED032C) approx BBs exec'd 0 ====
+
+	0xFED032C:  915C044C  stw r10,1100(r28)
+	   0: GETL       	R10, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	$0x44C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED0330:  4BFFFF18  b 0xFED0248
+	   5: JMPo       	$0xFED0248  ($4)
+
+
+
+. 1823 FED032C 8
+. 91 5C 04 4C 4B FF FF 18
+==== BB 1824 (0xFED0248) approx BBs exec'd 0 ====
+
+	0xFED0248:  73600007  andi. r0,r27,0x7
+	   0: GETL       	R27, t0
+	   1: ANDL       	$0x7, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED024C:  4182000C  bc 12,2,0xFED0258
+	   6: Js02o       	$0xFED0258
+
+
+
+. 1824 FED0248 8
+. 73 60 00 07 41 82 00 0C
+==== BB 1825 (0xFED0250) approx BBs exec'd 0 ====
+
+	0xFED0250:  23A00008  subfic r29,r0,8
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x8, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFED0254:  7F5BEA14  add r26,r27,r29
+	   5: GETL       	R27, t4
+	   6: GETL       	R29, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0xFED0258:  7C9BFA14  add r4,r27,r31
+	  10: GETL       	R27, t8
+	  11: GETL       	R31, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0xFED025C:  7D5DAA14  add r10,r29,r21
+	  15: GETL       	R29, t12
+	  16: GETL       	R21, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R10
+	  19: INCEIPL       	$4
+
+	0xFED0260:  7D245214  add r9,r4,r10
+	  20: GETL       	R4, t16
+	  21: GETL       	R10, t18
+	  22: ADDL       	t16, t18
+	  23: PUTL       	t18, R9
+	  24: INCEIPL       	$4
+
+	0xFED0264:  7D699A14  add r11,r9,r19
+	  25: GETL       	R9, t20
+	  26: GETL       	R19, t22
+	  27: ADDL       	t20, t22
+	  28: PUTL       	t22, R11
+	  29: INCEIPL       	$4
+
+	0xFED0268:  7D7B9038  and r27,r11,r18
+	  30: GETL       	R11, t24
+	  31: GETL       	R18, t26
+	  32: ANDL       	t24, t26
+	  33: PUTL       	t26, R27
+	  34: INCEIPL       	$4
+
+	0xFED026C:  7FE9D850  subf r31,r9,r27
+	  35: GETL       	R9, t28
+	  36: GETL       	R27, t30
+	  37: SUBL       	t28, t30
+	  38: PUTL       	t30, R31
+	  39: INCEIPL       	$4
+
+	0xFED0270:  837E1A70  lwz r27,6768(r30)
+	  40: GETL       	R30, t32
+	  41: ADDL       	$0x1A70, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R27
+	  44: INCEIPL       	$4
+
+	0xFED0274:  7FAAFA14  add r29,r10,r31
+	  45: GETL       	R10, t36
+	  46: GETL       	R31, t38
+	  47: ADDL       	t36, t38
+	  48: PUTL       	t38, R29
+	  49: INCEIPL       	$4
+
+	0xFED0278:  827B0000  lwz r19,0(r27)
+	  50: GETL       	R27, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R19
+	  53: INCEIPL       	$4
+
+	0xFED027C:  7FA3EB78  or r3,r29,r29
+	  54: GETL       	R29, t44
+	  55: PUTL       	t44, R3
+	  56: INCEIPL       	$4
+
+	0xFED0280:  7E6903A6  mtctr r19
+	  57: GETL       	R19, t46
+	  58: PUTL       	t46, CTR
+	  59: INCEIPL       	$4
+
+	0xFED0284:  4E800421  bctrl
+	  60: MOVL       	$0xFED0288, t48
+	  61: PUTL       	t48, LR
+	  62: GETL       	CTR, t50
+	  63: JMPo-c       	t50  ($4)
+
+
+
+. 1825 FED0250 56
+. 23 A0 00 08 7F 5B EA 14 7C 9B FA 14 7D 5D AA 14 7D 24 52 14 7D 69 9A 14 7D 7B 90 38 7F E9 D8 50 83 7E 1A 70 7F AA FA 14 82 7B 00 00 7F A3 EB 78 7E 69 03 A6 4E 80 04 21
+==== BB 1826 (0xFF28DA8) approx BBs exec'd 0 ====
+
+	0xFF28DA8:  813E1AA4  lwz r9,6820(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1AA4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFF28DAC:  80A90000  lwz r5,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R5
+	   8: INCEIPL       	$4
+
+	0xFF28DB0:  2C050000  cmpi cr0,r5,0
+	   9: GETL       	R5, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFF28DB4:  4082002C  bc 4,2,0xFF28DE0
+	  13: Jc02o       	$0xFF28DE0
+
+
+
+. 1826 FF28DA8 16
+. 81 3E 1A A4 80 A9 00 00 2C 05 00 00 40 82 00 2C
+==== BB 1827 (0xFF28DB8) approx BBs exec'd 0 ====
+
+	0xFF28DB8:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFF28DBC:  409A003C  bc 4,26,0xFF28DF8
+	   4: Jc26o       	$0xFF28DF8
+
+
+
+. 1827 FF28DB8 8
+. 2F 1D 00 00 40 9A 00 3C
+==== BB 1828 (0xFED0288) approx BBs exec'd 0 ====
+
+	0xFED0288:  7C781B79  or. r24,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R24
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFED028C:  7FE00026  mfcr r31
+	   5: GETL       	CR, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFED0290:  41820080  bc 12,2,0xFED0310
+	   8: Js02o       	$0xFED0310
+
+
+
+. 1828 FED0288 12
+. 7C 78 1B 79 7F E0 00 26 41 82 00 80
+==== BB 1829 (0xFED0294) approx BBs exec'd 0 ====
+
+	0xFED0294:  807E1BB8  lwz r3,7096(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BB8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFED0298:  81230000  lwz r9,0(r3)
+	   5: GETL       	R3, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFED029C:  2F890000  cmpi cr7,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFED02A0:  41BE000C  bc 13,30,0xFED02AC
+	  13: Js30o       	$0xFED02AC
+
+
+
+. 1829 FED0294 16
+. 80 7E 1B B8 81 23 00 00 2F 89 00 00 41 BE 00 0C
+==== BB 1830 (0xFED02AC) approx BBs exec'd 0 ====
+
+	0xFED02AC:  57FF803E  rlwinm r31,r31,16,0,31
+	   0: GETL       	R31, t0
+	   1: ROLL       	$0x10, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0xFED02B0:  7FE08120  mtcrf 0x8,r31
+	   4: GETL       	R31, t2
+	   5: ICRFL       	t2, $0x4, CR
+	   6: INCEIPL       	$4
+
+	0xFED02B4:  57FF803E  rlwinm r31,r31,16,0,31
+	   7: GETL       	R31, t4
+	   8: ROLL       	$0x10, t4
+	   9: PUTL       	t4, R31
+	  10: INCEIPL       	$4
+
+	0xFED02B8:  41B2FD08  bc 13,18,0xFECFFC0
+	  11: Js18o       	$0xFECFFC0
+
+
+
+. 1830 FED02AC 16
+. 57 FF 80 3E 7F E0 81 20 57 FF 80 3E 41 B2 FD 08
+==== BB 1831 (0xFED02BC) approx BBs exec'd 0 ====
+
+	0xFED02BC:  7CBAC050  subf r5,r26,r24
+	   0: GETL       	R26, t0
+	   1: GETL       	R24, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFED02C0:  2D950000  cmpi cr3,r21,0
+	   5: GETL       	R21, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x3, CR
+	   8: INCEIPL       	$4
+
+	0xFED02C4:  7D85EA14  add r12,r5,r29
+	   9: GETL       	R5, t8
+	  10: GETL       	R29, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0xFED02C8:  7F48D378  or r8,r26,r26
+	  14: GETL       	R26, t12
+	  15: PUTL       	t12, R8
+	  16: INCEIPL       	$4
+
+	0xFED02CC:  61970001  ori r23,r12,0x1
+	  17: GETL       	R12, t14
+	  18: ORL       	$0x1, t14
+	  19: PUTL       	t14, R23
+	  20: INCEIPL       	$4
+
+	0xFED02D0:  911C0030  stw r8,48(r28)
+	  21: GETL       	R8, t16
+	  22: GETL       	R28, t18
+	  23: ADDL       	$0x30, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFED02D4:  92FA0004  stw r23,4(r26)
+	  26: GETL       	R23, t20
+	  27: GETL       	R26, t22
+	  28: ADDL       	$0x4, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFED02D8:  835C044C  lwz r26,1100(r28)
+	  31: GETL       	R28, t24
+	  32: ADDL       	$0x44C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0xFED02DC:  7D5AEA14  add r10,r26,r29
+	  36: GETL       	R26, t28
+	  37: GETL       	R29, t30
+	  38: ADDL       	t28, t30
+	  39: PUTL       	t30, R10
+	  40: INCEIPL       	$4
+
+	0xFED02E0:  915C044C  stw r10,1100(r28)
+	  41: GETL       	R10, t32
+	  42: GETL       	R28, t34
+	  43: ADDL       	$0x44C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFED02E4:  41AEFB74  bc 13,14,0xFECFE58
+	  46: Js14o       	$0xFECFE58
+
+
+
+. 1831 FED02BC 44
+. 7C BA C0 50 2D 95 00 00 7D 85 EA 14 7F 48 D3 78 61 97 00 01 91 1C 00 30 92 FA 00 04 83 5C 04 4C 7D 5A EA 14 91 5C 04 4C 41 AE FB 74
+==== BB 1832 (0xFECFE58) approx BBs exec'd 0 ====
+
+	0xFECFE58:  82DC0450  lwz r22,1104(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x450, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R22
+	   4: INCEIPL       	$4
+
+	0xFECFE5C:  7F0AB040  cmpl cr6,r10,r22
+	   5: GETL       	R10, t4
+	   6: GETL       	R22, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFECFE60:  40990008  bc 4,25,0xFECFE68
+	  10: Jc25o       	$0xFECFE68
+
+
+
+. 1832 FECFE58 12
+. 82 DC 04 50 7F 0A B0 40 40 99 00 08
+==== BB 1833 (0xFECFE64) approx BBs exec'd 0 ====
+
+	0xFECFE64:  915C0450  stw r10,1104(r28)
+	   0: GETL       	R10, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	$0x450, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECFE68:  81C80004  lwz r14,4(r8)
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R14
+	   9: INCEIPL       	$4
+
+	0xFECFE6C:  55CB0038  rlwinm r11,r14,0,0,28
+	  10: GETL       	R14, t8
+	  11: ANDL       	$0xFFFFFFF8, t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0xFECFE70:  7F8B8840  cmpl cr7,r11,r17
+	  14: GETL       	R11, t10
+	  15: GETL       	R17, t12
+	  16: CMPUL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFECFE74:  419C0078  bc 12,28,0xFECFEEC
+	  19: Js28o       	$0xFECFEEC
+
+
+
+. 1833 FECFE64 20
+. 91 5C 04 50 81 C8 00 04 55 CB 00 38 7F 8B 88 40 41 9C 00 78
+==== BB 1834 (0xFECFE78) approx BBs exec'd 0 ====
+
+	0xFECFE78:  7F897A78  xor r9,r28,r15
+	   0: GETL       	R28, t0
+	   1: GETL       	R15, t2
+	   2: XORL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECFE7C:  7E795850  subf r19,r25,r11
+	   5: GETL       	R25, t4
+	   6: GETL       	R11, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R19
+	   9: INCEIPL       	$4
+
+	0xFECFE80:  3149FFFF  addic r10,r9,-1
+	  10: GETL       	R9, t8
+	  11: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0xFECFE84:  7F6A4910  subfe r27,r10,r9
+	  14: GETL       	R10, t10
+	  15: GETL       	R9, t12
+	  16: SBBL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R27
+	  18: INCEIPL       	$4
+
+	0xFECFE88:  38680008  addi r3,r8,8
+	  19: GETL       	R8, t14
+	  20: ADDL       	$0x8, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0xFECFE8C:  577F103A  rlwinm r31,r27,2,0,29
+	  23: GETL       	R27, t16
+	  24: SHLL       	$0x2, t16
+	  25: PUTL       	t16, R31
+	  26: INCEIPL       	$4
+
+	0xFECFE90:  7E28CA14  add r17,r8,r25
+	  27: GETL       	R8, t18
+	  28: GETL       	R25, t20
+	  29: ADDL       	t18, t20
+	  30: PUTL       	t20, R17
+	  31: INCEIPL       	$4
+
+	0xFECFE94:  7FE6A378  or r6,r31,r20
+	  32: GETL       	R31, t22
+	  33: GETL       	R20, t24
+	  34: ORL       	t24, t22
+	  35: PUTL       	t22, R6
+	  36: INCEIPL       	$4
+
+	0xFECFE98:  626F0001  ori r15,r19,0x1
+	  37: GETL       	R19, t26
+	  38: ORL       	$0x1, t26
+	  39: PUTL       	t26, R15
+	  40: INCEIPL       	$4
+
+	0xFECFE9C:  90C80004  stw r6,4(r8)
+	  41: GETL       	R6, t28
+	  42: GETL       	R8, t30
+	  43: ADDL       	$0x4, t30
+	  44: STL       	t28, (t30)
+	  45: INCEIPL       	$4
+
+	0xFECFEA0:  7C681B78  or r8,r3,r3
+	  46: GETL       	R3, t32
+	  47: PUTL       	t32, R8
+	  48: INCEIPL       	$4
+
+	0xFECFEA4:  91F10004  stw r15,4(r17)
+	  49: GETL       	R15, t34
+	  50: GETL       	R17, t36
+	  51: ADDL       	$0x4, t36
+	  52: STL       	t34, (t36)
+	  53: INCEIPL       	$4
+
+	0xFECFEA8:  923C0030  stw r17,48(r28)
+	  54: GETL       	R17, t38
+	  55: GETL       	R28, t40
+	  56: ADDL       	$0x30, t40
+	  57: STL       	t38, (t40)
+	  58: INCEIPL       	$4
+
+	0xFECFEAC:  4BFFF804  b 0xFECF6B0
+	  59: JMPo       	$0xFECF6B0  ($4)
+
+
+
+. 1834 FECFE78 56
+. 7F 89 7A 78 7E 79 58 50 31 49 FF FF 7F 6A 49 10 38 68 00 08 57 7F 10 3A 7E 28 CA 14 7F E6 A3 78 62 6F 00 01 90 C8 00 04 7C 68 1B 78 91 F1 00 04 92 3C 00 30 4B FF F8 04
+==== BB 1835 (0xFECF6B0) approx BBs exec'd 0 ====
+
+	0xFECF6B0:  83810074  lwz r28,116(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFECF6B4:  7D034378  or r3,r8,r8
+	   5: GETL       	R8, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFECF6B8:  81810024  lwz r12,36(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x24, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R12
+	  12: INCEIPL       	$4
+
+	0xFECF6BC:  7F8803A6  mtlr r28
+	  13: GETL       	R28, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFECF6C0:  81C10028  lwz r14,40(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x28, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0xFECF6C4:  81E1002C  lwz r15,44(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R15
+	  25: INCEIPL       	$4
+
+	0xFECF6C8:  7D818120  mtcrf 0x18,r12
+	  26: GETL       	R12, t20
+	  27: ICRFL       	t20, $0x3, CR
+	  28: ICRFL       	t20, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0xFECF6CC:  82010030  lwz r16,48(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x30, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R16
+	  34: INCEIPL       	$4
+
+	0xFECF6D0:  82210034  lwz r17,52(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x34, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R17
+	  39: INCEIPL       	$4
+
+	0xFECF6D4:  82410038  lwz r18,56(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x38, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R18
+	  44: INCEIPL       	$4
+
+	0xFECF6D8:  8261003C  lwz r19,60(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x3C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R19
+	  49: INCEIPL       	$4
+
+	0xFECF6DC:  82810040  lwz r20,64(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x40, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R20
+	  54: INCEIPL       	$4
+
+	0xFECF6E0:  82A10044  lwz r21,68(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x44, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R21
+	  59: INCEIPL       	$4
+
+	0xFECF6E4:  82C10048  lwz r22,72(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x48, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R22
+	  64: INCEIPL       	$4
+
+	0xFECF6E8:  82E1004C  lwz r23,76(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x4C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R23
+	  69: INCEIPL       	$4
+
+	0xFECF6EC:  83010050  lwz r24,80(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x50, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R24
+	  74: INCEIPL       	$4
+
+	0xFECF6F0:  83210054  lwz r25,84(r1)
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x54, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R25
+	  79: INCEIPL       	$4
+
+	0xFECF6F4:  83410058  lwz r26,88(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x58, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R26
+	  84: INCEIPL       	$4
+
+	0xFECF6F8:  8361005C  lwz r27,92(r1)
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x5C, t66
+	  87: LDL       	(t66), t68
+	  88: PUTL       	t68, R27
+	  89: INCEIPL       	$4
+
+	0xFECF6FC:  83810060  lwz r28,96(r1)
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x60, t70
+	  92: LDL       	(t70), t72
+	  93: PUTL       	t72, R28
+	  94: INCEIPL       	$4
+
+	0xFECF700:  83A10064  lwz r29,100(r1)
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x64, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R29
+	  99: INCEIPL       	$4
+
+	0xFECF704:  83C10068  lwz r30,104(r1)
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x68, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R30
+	 104: INCEIPL       	$4
+
+	0xFECF708:  83E1006C  lwz r31,108(r1)
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x6C, t82
+	 107: LDL       	(t82), t84
+	 108: PUTL       	t84, R31
+	 109: INCEIPL       	$4
+
+	0xFECF70C:  38210070  addi r1,r1,112
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x70, t86
+	 112: PUTL       	t86, R1
+	 113: INCEIPL       	$4
+
+	0xFECF710:  4E800020  blr
+	 114: GETL       	LR, t88
+	 115: JMPo-r       	t88  ($4)
+
+
+
+. 1835 FECF6B0 100
+. 83 81 00 74 7D 03 43 78 81 81 00 24 7F 88 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 1836 (0xFED19A8) approx BBs exec'd 0 ====
+
+	0xFED19A8:  7C7C1B79  or. r28,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R28
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFED19AC:  41820060  bc 12,2,0xFED1A0C
+	   5: Js02o       	$0xFED1A0C
+
+
+
+. 1836 FED19A8 8
+. 7C 7C 1B 79 41 82 00 60
+==== BB 1837 (0xFED19B0) approx BBs exec'd 0 ====
+
+	0xFED19B0:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFED19B4:  7C0004AC  sync
+	   3: INCEIPL       	$4
+
+	0xFED19B8:  7FA0F828  lwarx r29,r0,r31
+	   4: GETL       	R31, t2
+	   5: LOCKo       	
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFED19BC:  7C00F92D  stwcx. r0,r0,r31
+	   9: GETL       	R31, t6
+	  10: GETL       	R0, t8
+	  11: LOCKo       	
+	  12: STL       	t8, (t6)  (-rSo)
+	  13: ICRFL       	cr, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFED19C0:  40A2FFF8  bc 5,2,0xFED19B8
+	  15: Jc02o       	$0xFED19B8
+
+
+
+. 1837 FED19B0 20
+. 38 00 00 00 7C 00 04 AC 7F A0 F8 28 7C 00 F9 2D 40 A2 FF F8
+==== BB 1838 (0xFED19C4) approx BBs exec'd 0 ====
+
+	0xFED19C4:  2F9D0001  cmpi cr7,r29,1
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFED19C8:  419D00DC  bc 12,29,0xFED1AA4
+	   5: Js29o       	$0xFED1AA4
+
+
+
+. 1838 FED19C4 8
+. 2F 9D 00 01 41 9D 00 DC
+==== BB 1839 (0xFED19CC) approx BBs exec'd 0 ====
+
+	0xFED19CC:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED19D0:  83810024  lwz r28,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFED19D4:  83410008  lwz r26,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0xFED19D8:  7F8803A6  mtlr r28
+	  13: GETL       	R28, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFED19DC:  8361000C  lwz r27,12(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0xC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0xFED19E0:  83810010  lwz r28,16(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R28
+	  25: INCEIPL       	$4
+
+	0xFED19E4:  83A10014  lwz r29,20(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R29
+	  30: INCEIPL       	$4
+
+	0xFED19E8:  83C10018  lwz r30,24(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x18, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R30
+	  35: INCEIPL       	$4
+
+	0xFED19EC:  83E1001C  lwz r31,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0xFED19F0:  38210020  addi r1,r1,32
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: PUTL       	t32, R1
+	  44: INCEIPL       	$4
+
+	0xFED19F4:  4E800020  blr
+	  45: GETL       	LR, t34
+	  46: JMPo-r       	t34  ($4)
+
+
+
+. 1839 FED19CC 44
+. 7F 83 E3 78 83 81 00 24 83 41 00 08 7F 88 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1840 (0xFED1930) approx BBs exec'd 0 ====
+
+	0xFED1930:  83810024  lwz r28,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFED1934:  83410008  lwz r26,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0xFED1938:  7F8803A6  mtlr r28
+	  10: GETL       	R28, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFED193C:  8361000C  lwz r27,12(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0xFED1940:  83810010  lwz r28,16(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R28
+	  22: INCEIPL       	$4
+
+	0xFED1944:  83A10014  lwz r29,20(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x14, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R29
+	  27: INCEIPL       	$4
+
+	0xFED1948:  83C10018  lwz r30,24(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x18, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R30
+	  32: INCEIPL       	$4
+
+	0xFED194C:  83E1001C  lwz r31,28(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x1C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R31
+	  37: INCEIPL       	$4
+
+	0xFED1950:  38210020  addi r1,r1,32
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x20, t30
+	  40: PUTL       	t30, R1
+	  41: INCEIPL       	$4
+
+	0xFED1954:  4E800020  blr
+	  42: GETL       	LR, t32
+	  43: JMPo-r       	t32  ($4)
+
+
+
+. 1840 FED1930 40
+. 83 81 00 24 83 41 00 08 7F 88 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1841 (0xFE8DE60) approx BBs exec'd 0 ====
+
+	0xFE8DE60:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DE64:  418600B4  bc 12,6,0xFE8DF18
+	   4: Js06o       	$0xFE8DF18
+
+
+
+. 1841 FE8DE60 8
+. 2C 83 00 00 41 86 00 B4
+==== BB 1842 (0xFE8DE68) approx BBs exec'd 0 ====
+
+	0xFE8DE68:  419200F8  bc 12,18,0xFE8DF60
+	   0: Js18o       	$0xFE8DF60
+
+
+
+. 1842 FE8DE68 4
+. 41 92 00 F8
+==== BB 1843 (0xFE8DF60) approx BBs exec'd 0 ====
+
+	0xFE8DF60:  7C671B78  or r7,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFE8DF64:  4BFFFF18  b 0xFE8DE7C
+	   3: JMPo       	$0xFE8DE7C  ($4)
+
+
+
+. 1843 FE8DF60 8
+. 7C 67 1B 78 4B FF FF 18
+==== BB 1844 (0xFE8DE7C) approx BBs exec'd 0 ====
+
+	0xFE8DE7C:  38A00000  li r5,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE8DE80:  7E05E840  cmpl cr4,r5,r29
+	   3: GETL       	R5, t2
+	   4: GETL       	R29, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0xFE8DE84:  4090008C  bc 4,16,0xFE8DF10
+	   8: Jc16o       	$0xFE8DF10
+
+
+
+. 1844 FE8DE7C 12
+. 38 A0 00 00 7E 05 E8 40 40 90 00 8C
+==== BB 1845 (0xFE8DE88) approx BBs exec'd 0 ====
+
+	0xFE8DE88:  839E1CF8  lwz r28,7416(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CF8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFE8DE8C:  815E1D50  lwz r10,7504(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1D50, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0xFE8DE90:  811E1B80  lwz r8,7040(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x1B80, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0xFE8DE94:  7FA903A6  mtctr r29
+	  15: GETL       	R29, t12
+	  16: PUTL       	t12, CTR
+	  17: INCEIPL       	$4
+
+	0xFE8DE98:  7CDC1214  add r6,r28,r2
+	  18: GETL       	R28, t14
+	  19: GETL       	R2, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R6
+	  22: INCEIPL       	$4
+
+	0xFE8DE9C:  7D881214  add r12,r8,r2
+	  23: GETL       	R8, t18
+	  24: GETL       	R2, t20
+	  25: ADDL       	t18, t20
+	  26: PUTL       	t20, R12
+	  27: INCEIPL       	$4
+
+	0xFE8DEA0:  7F8A1214  add r28,r10,r2
+	  28: GETL       	R10, t22
+	  29: GETL       	R2, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R28
+	  32: INCEIPL       	$4
+
+	0xFE8DEA4:  48000028  b 0xFE8DECC
+	  33: JMPo       	$0xFE8DECC  ($4)
+
+
+
+. 1845 FE8DE88 32
+. 83 9E 1C F8 81 5E 1D 50 81 1E 1B 80 7F A9 03 A6 7C DC 12 14 7D 88 12 14 7F 8A 12 14 48 00 00 28
+==== BB 1846 (0xFE8DECC) approx BBs exec'd 0 ====
+
+	0xFE8DECC:  83A60000  lwz r29,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0xFE8DED0:  7CE43B78  or r4,r7,r7
+	   4: GETL       	R7, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFE8DED4:  2F1D0000  cmpi cr6,r29,0
+	   7: GETL       	R29, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x6, CR
+	  10: INCEIPL       	$4
+
+	0xFE8DED8:  419A00B4  bc 12,26,0xFE8DF8C
+	  11: Js26o       	$0xFE8DF8C
+
+
+
+. 1846 FE8DECC 16
+. 83 A6 00 00 7C E4 3B 78 2F 1D 00 00 41 9A 00 B4
+==== BB 1847 (0xFE8DEDC) approx BBs exec'd 0 ====
+
+	0xFE8DEDC:  7D1F28AE  lbzx r8,r31,r5
+	   0: GETL       	R5, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t2, t0
+	   3: LDB       	(t0), t4
+	   4: PUTL       	t4, R8
+	   5: INCEIPL       	$4
+
+	0xFE8DEE0:  81460000  lwz r10,0(r6)
+	   6: GETL       	R6, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R10
+	   9: INCEIPL       	$4
+
+	0xFE8DEE4:  550B083C  rlwinm r11,r8,1,0,30
+	  10: GETL       	R8, t10
+	  11: SHLL       	$0x1, t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0xFE8DEE8:  3BA8FFD0  addi r29,r8,-48
+	  14: GETL       	R8, t12
+	  15: ADDL       	$0xFFFFFFD0, t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFE8DEEC:  7C0B522E  lhzx r0,r11,r10
+	  18: GETL       	R10, t14
+	  19: GETL       	R11, t16
+	  20: ADDL       	t16, t14
+	  21: LDW       	(t14), t18
+	  22: PUTL       	t18, R0
+	  23: INCEIPL       	$4
+
+	0xFE8DEF0:  2B9D0009  cmpli cr7,r29,9
+	  24: GETL       	R29, t20
+	  25: MOVL       	$0x9, t24
+	  26: CMPUL       	t20, t24, t22  (-rSo)
+	  27: ICRFL       	t22, $0x7, CR
+	  28: INCEIPL       	$4
+
+	0xFE8DEF4:  700B0004  andi. r11,r0,0x4
+	  29: GETL       	R0, t26
+	  30: ANDL       	$0x4, t26
+	  31: PUTL       	t26, R11
+	  32: CMP0L       	t26, t28  (-rSo)
+	  33: ICRFL       	t28, $0x0, CR
+	  34: INCEIPL       	$4
+
+	0xFE8DEF8:  40A2FFB0  bc 5,2,0xFE8DEA8
+	  35: Jc02o       	$0xFE8DEA8
+
+
+
+. 1847 FE8DEDC 32
+. 7D 1F 28 AE 81 46 00 00 55 0B 08 3C 3B A8 FF D0 7C 0B 52 2E 2B 9D 00 09 70 0B 00 04 40 A2 FF B0
+==== BB 1848 (0xFE8DEA8) approx BBs exec'd 0 ====
+
+	0xFE8DEA8:  814C0000  lwz r10,0(r12)
+	   0: GETL       	R12, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE8DEAC:  38E70001  addi r7,r7,1
+	   4: GETL       	R7, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0xFE8DEB0:  2F8A0000  cmpi cr7,r10,0
+	   8: GETL       	R10, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFE8DEB4:  419E00F0  bc 12,30,0xFE8DFA4
+	  12: Js30o       	$0xFE8DFA4
+
+
+
+. 1848 FE8DEA8 16
+. 81 4C 00 00 38 E7 00 01 2F 8A 00 00 41 9E 00 F0
+==== BB 1849 (0xFE8DEB8) approx BBs exec'd 0 ====
+
+	0xFE8DEB8:  550B103A  rlwinm r11,r8,2,0,29
+	   0: GETL       	R8, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFE8DEBC:  7C0B502E  lwzx r0,r11,r10
+	   4: GETL       	R10, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFE8DEC0:  98040000  stb r0,0(r4)
+	  10: GETL       	R0, t8
+	  11: GETL       	R4, t10
+	  12: STB       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFE8DEC4:  38A50001  addi r5,r5,1
+	  14: GETL       	R5, t12
+	  15: ADDL       	$0x1, t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0xFE8DEC8:  42400048  bc 18,0,0xFE8DF10
+	  18: GETL       	CTR, t14
+	  19: ADDL       	$0xFFFFFFFF, t14
+	  20: PUTL       	t14, CTR
+	  21: SETZL       	t14, NoValue
+	  22: JIFZL       	t14, $0xFE8DECC
+	  23: JMPo       	$0xFE8DF10  ($4)
+
+
+
+. 1849 FE8DEB8 20
+. 55 0B 10 3A 7C 0B 50 2E 98 04 00 00 38 A5 00 01 42 40 00 48
+==== BB 1850 (0xFE8DEFC) approx BBs exec'd 0 ====
+
+	0xFE8DEFC:  41BDFFC8  bc 13,29,0xFE8DEC4
+	   0: Js29o       	$0xFE8DEC4
+
+
+
+. 1850 FE8DEFC 4
+. 41 BD FF C8
+==== BB 1851 (0xFE8DEC4) approx BBs exec'd 0 ====
+
+	0xFE8DEC4:  38A50001  addi r5,r5,1
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0xFE8DEC8:  42400048  bc 18,0,0xFE8DF10
+	   4: GETL       	CTR, t2
+	   5: ADDL       	$0xFFFFFFFF, t2
+	   6: PUTL       	t2, CTR
+	   7: SETZL       	t2, NoValue
+	   8: JIFZL       	t2, $0xFE8DECC
+	   9: JMPo       	$0xFE8DF10  ($4)
+
+
+
+. 1851 FE8DEC4 8
+. 38 A5 00 01 42 40 00 48
+==== BB 1852 (0xFE8DF00) approx BBs exec'd 0 ====
+
+	0xFE8DF00:  99070000  stb r8,0(r7)
+	   0: GETL       	R8, t0
+	   1: GETL       	R7, t2
+	   2: STB       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8DF04:  38A50001  addi r5,r5,1
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFE8DF08:  38E70001  addi r7,r7,1
+	   8: GETL       	R7, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0xFE8DF0C:  4200FFC0  bc 16,0,0xFE8DECC
+	  12: GETL       	CTR, t8
+	  13: ADDL       	$0xFFFFFFFF, t8
+	  14: PUTL       	t8, CTR
+	  15: JIFZL       	t8, $0xFE8DF10
+	  16: JMPo       	$0xFE8DECC  ($4)
+
+
+
+. 1852 FE8DF00 16
+. 99 07 00 00 38 A5 00 01 38 E7 00 01 42 00 FF C0
+==== BB 1853 (0xFE8DF10) approx BBs exec'd 0 ====
+
+	0xFE8DF10:  38C00000  li r6,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFE8DF14:  98C70000  stb r6,0(r7)
+	   3: GETL       	R6, t2
+	   4: GETL       	R7, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFE8DF18:  83E10024  lwz r31,36(r1)
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x24, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R31
+	  11: INCEIPL       	$4
+
+	0xFE8DF1C:  80E1000C  lwz r7,12(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0xC, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R7
+	  16: INCEIPL       	$4
+
+	0xFE8DF20:  7FE803A6  mtlr r31
+	  17: GETL       	R31, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0xFE8DF24:  83810010  lwz r28,16(r1)
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x10, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R28
+	  24: INCEIPL       	$4
+
+	0xFE8DF28:  83A10014  lwz r29,20(r1)
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x14, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R29
+	  29: INCEIPL       	$4
+
+	0xFE8DF2C:  7CE08120  mtcrf 0x8,r7
+	  30: GETL       	R7, t24
+	  31: ICRFL       	t24, $0x4, CR
+	  32: INCEIPL       	$4
+
+	0xFE8DF30:  83C10018  lwz r30,24(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x18, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R30
+	  37: INCEIPL       	$4
+
+	0xFE8DF34:  83E1001C  lwz r31,28(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0xFE8DF38:  38210020  addi r1,r1,32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x20, t34
+	  45: PUTL       	t34, R1
+	  46: INCEIPL       	$4
+
+	0xFE8DF3C:  4E800020  blr
+	  47: GETL       	LR, t36
+	  48: JMPo-r       	t36  ($4)
+
+
+
+. 1853 FE8DF10 48
+. 38 C0 00 00 98 C7 00 00 83 E1 00 24 80 E1 00 0C 7F E8 03 A6 83 81 00 10 83 A1 00 14 7C E0 81 20 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1854 (0xFE88280) approx BBs exec'd 0 ====
+
+	0xFE88280:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0xFE88284:  7C7A1B79  or. r26,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R26
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE88288:  41A2FB70  bc 13,2,0xFE87DF8
+	   8: Js02o       	$0xFE87DF8
+
+
+
+. 1854 FE88280 12
+. 39 20 00 00 7C 7A 1B 79 41 A2 FB 70
+==== BB 1855 (0xFE8828C) approx BBs exec'd 0 ====
+
+	0xFE8828C:  7F64DB78  or r4,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE88290:  7FA5EB78  or r5,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFE88294:  4804EACD  bl 0xFED6D60
+	   6: MOVL       	$0xFE88298, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED6D60  ($4)
+
+
+
+. 1855 FE8828C 12
+. 7F 64 DB 78 7F A5 EB 78 48 04 EA CD
+==== BB 1856 (0xFE88298) approx BBs exec'd 0 ====
+
+	0xFE88298:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8829C:  40860010  bc 4,6,0xFE882AC
+	   4: Jc06o       	$0xFE882AC
+
+
+
+. 1856 FE88298 8
+. 2C 83 00 00 40 86 00 10
+==== BB 1857 (0xFE882AC) approx BBs exec'd 0 ====
+
+	0xFE882AC:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE882B0:  4804E829  bl 0xFED6AD8
+	   3: MOVL       	$0xFE882B4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 1857 FE882AC 8
+. 7F 43 D3 78 48 04 E8 29
+==== BB 1858 (0xFE882B4) approx BBs exec'd 0 ====
+
+	0xFE882B4:  7C7C1B78  or r28,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0xFE882B8:  7F23CB78  or r3,r25,r25
+	   3: GETL       	R25, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFE882BC:  4804E81D  bl 0xFED6AD8
+	   6: MOVL       	$0xFE882C0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 1858 FE882B4 12
+. 7C 7C 1B 78 7F 23 CB 78 48 04 E8 1D
+==== BB 1859 (0xFE882C0) approx BBs exec'd 0 ====
+
+	0xFE882C0:  7CB7D850  subf r5,r23,r27
+	   0: GETL       	R23, t0
+	   1: GETL       	R27, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE882C4:  7E05E214  add r16,r5,r28
+	   5: GETL       	R5, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R16
+	   9: INCEIPL       	$4
+
+	0xFE882C8:  3B630001  addi r27,r3,1
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0xFE882CC:  7D70DA14  add r11,r16,r27
+	  14: GETL       	R16, t10
+	  15: GETL       	R27, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R11
+	  18: INCEIPL       	$4
+
+	0xFE882D0:  80E10000  lwz r7,0(r1)
+	  19: GETL       	R1, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R7
+	  22: INCEIPL       	$4
+
+	0xFE882D4:  394B001E  addi r10,r11,30
+	  23: GETL       	R11, t18
+	  24: ADDL       	$0x1E, t18
+	  25: PUTL       	t18, R10
+	  26: INCEIPL       	$4
+
+	0xFE882D8:  55480036  rlwinm r8,r10,0,0,27
+	  27: GETL       	R10, t20
+	  28: ANDL       	$0xFFFFFFF0, t20
+	  29: PUTL       	t20, R8
+	  30: INCEIPL       	$4
+
+	0xFE882DC:  7CC800D0  neg r6,r8
+	  31: GETL       	R8, t22
+	  32: NEGL       	t22
+	  33: PUTL       	t22, R6
+	  34: INCEIPL       	$4
+
+	0xFE882E0:  7CE1316E  stwux r7,r1,r6
+	  35: GETL       	R6, t24
+	  36: GETL       	R1, t26
+	  37: ADDL       	t26, t24
+	  38: PUTL       	t24, R1
+	  39: GETL       	R7, t28
+	  40: STL       	t28, (t24)
+	  41: INCEIPL       	$4
+
+	0xFE882E4:  38810017  addi r4,r1,23
+	  42: GETL       	R1, t30
+	  43: ADDL       	$0x17, t30
+	  44: PUTL       	t30, R4
+	  45: INCEIPL       	$4
+
+	0xFE882E8:  549D0036  rlwinm r29,r4,0,0,27
+	  46: GETL       	R4, t32
+	  47: ANDL       	$0xFFFFFFF0, t32
+	  48: PUTL       	t32, R29
+	  49: INCEIPL       	$4
+
+	0xFE882EC:  7EE4BB78  or r4,r23,r23
+	  50: GETL       	R23, t34
+	  51: PUTL       	t34, R4
+	  52: INCEIPL       	$4
+
+	0xFE882F0:  7FA3EB78  or r3,r29,r29
+	  53: GETL       	R29, t36
+	  54: PUTL       	t36, R3
+	  55: INCEIPL       	$4
+
+	0xFE882F4:  7FB7EB78  or r23,r29,r29
+	  56: GETL       	R29, t38
+	  57: PUTL       	t38, R23
+	  58: INCEIPL       	$4
+
+	0xFE882F8:  4804FD09  bl 0xFED8000
+	  59: MOVL       	$0xFE882FC, t40
+	  60: PUTL       	t40, LR
+	  61: JMPo-c       	$0xFED8000  ($4)
+
+
+
+. 1859 FE882C0 60
+. 7C B7 D8 50 7E 05 E2 14 3B 63 00 01 7D 70 DA 14 80 E1 00 00 39 4B 00 1E 55 48 00 36 7C C8 00 D0 7C E1 31 6E 38 81 00 17 54 9D 00 36 7E E4 BB 78 7F A3 EB 78 7F B7 EB 78 48 04 FD 09
+==== BB 1860 mempcpy(0xFED8000) approx BBs exec'd 0 ====
+
+	0xFED8000:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFED8004:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFED8008:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFE0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFED800C:  93A10014  stw r29,20(r1)
+	  14: GETL       	R29, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0x14, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFED8010:  7C7D1B78  or r29,r3,r3
+	  19: GETL       	R3, t16
+	  20: PUTL       	t16, R29
+	  21: INCEIPL       	$4
+
+	0xFED8014:  93E1001C  stw r31,28(r1)
+	  22: GETL       	R31, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x1C, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0xFED8018:  7C9F2378  or r31,r4,r4
+	  27: GETL       	R4, t22
+	  28: PUTL       	t22, R31
+	  29: INCEIPL       	$4
+
+	0xFED801C:  93810010  stw r28,16(r1)
+	  30: GETL       	R28, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x10, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0xFED8020:  93C10018  stw r30,24(r1)
+	  35: GETL       	R30, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x18, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0xFED8024:  90010024  stw r0,36(r1)
+	  40: GETL       	R0, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x24, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0xFED8028:  409D0058  bc 4,29,0xFED8080
+	  45: Jc29o       	$0xFED8080
+
+
+
+. 1860 FED8000 44
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+==== BB 1861 (0xFED8080) approx BBs exec'd 0 ====
+
+	0xFED8080:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED8084:  4186001C  bc 12,6,0xFED80A0
+	   4: Js06o       	$0xFED80A0
+
+
+
+. 1861 FED8080 8
+. 2C 85 00 00 41 86 00 1C
+==== BB 1862 (0xFED8088) approx BBs exec'd 0 ====
+
+	0xFED8088:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFED808C:  88FF0000  lbz r7,0(r31)
+	   3: GETL       	R31, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0xFED8090:  3BFF0001  addi r31,r31,1
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0xFED8094:  98FD0000  stb r7,0(r29)
+	  11: GETL       	R7, t8
+	  12: GETL       	R29, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFED8098:  3BBD0001  addi r29,r29,1
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0xFED809C:  4200FFF0  bc 16,0,0xFED808C
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0xFED80A0
+	  23: JMPo       	$0xFED808C  ($4)
+
+
+
+. 1862 FED8088 24
+. 7C A9 03 A6 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+==== BB 1863 (0xFED808C) approx BBs exec'd 0 ====
+
+	0xFED808C:  88FF0000  lbz r7,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0xFED8090:  3BFF0001  addi r31,r31,1
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFED8094:  98FD0000  stb r7,0(r29)
+	   8: GETL       	R7, t6
+	   9: GETL       	R29, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFED8098:  3BBD0001  addi r29,r29,1
+	  12: GETL       	R29, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFED809C:  4200FFF0  bc 16,0,0xFED808C
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0xFED80A0
+	  20: JMPo       	$0xFED808C  ($4)
+
+
+
+. 1863 FED808C 20
+. 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+==== BB 1864 (0xFED80A0) approx BBs exec'd 0 ====
+
+	0xFED80A0:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFED80A4:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED80A8:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xFED80AC:  83A10014  lwz r29,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFED80B0:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFED80B4:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0xFED80B8:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFED80BC:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFED80C0:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+
+. 1864 FED80A0 36
+. 81 01 00 24 7F A3 EB 78 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1865 (0xFE882FC) approx BBs exec'd 0 ====
+
+	0xFE882FC:  7F44D378  or r4,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE88300:  7F85E378  or r5,r28,r28
+	   3: GETL       	R28, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFE88304:  4804FCFD  bl 0xFED8000
+	   6: MOVL       	$0xFE88308, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED8000  ($4)
+
+
+
+. 1865 FE882FC 12
+. 7F 44 D3 78 7F 85 E3 78 48 04 FC FD
+==== BB 1866 (0xFE88308) approx BBs exec'd 0 ====
+
+	0xFE88308:  7F24CB78  or r4,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8830C:  7F65DB78  or r5,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFE88310:  480503C9  bl 0xFED86D8
+	   6: MOVL       	$0xFE88314, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 1866 FE88308 12
+. 7F 24 CB 78 7F 65 DB 78 48 05 03 C9
+==== BB 1867 memcpy(0xFED86D8) approx BBs exec'd 0 ====
+
+	0xFED86D8:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFED86DC:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFED86E0:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFE0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFED86E4:  9361000C  stw r27,12(r1)
+	  14: GETL       	R27, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0xC, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFED86E8:  7C7B1B78  or r27,r3,r3
+	  19: GETL       	R3, t16
+	  20: PUTL       	t16, R27
+	  21: INCEIPL       	$4
+
+	0xFED86EC:  93A10014  stw r29,20(r1)
+	  22: GETL       	R29, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x14, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0xFED86F0:  7C9D2378  or r29,r4,r4
+	  27: GETL       	R4, t22
+	  28: PUTL       	t22, R29
+	  29: INCEIPL       	$4
+
+	0xFED86F4:  93E1001C  stw r31,28(r1)
+	  30: GETL       	R31, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x1C, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0xFED86F8:  7C7F1B78  or r31,r3,r3
+	  35: GETL       	R3, t28
+	  36: PUTL       	t28, R31
+	  37: INCEIPL       	$4
+
+	0xFED86FC:  93810010  stw r28,16(r1)
+	  38: GETL       	R28, t30
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x10, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0xFED8700:  93C10018  stw r30,24(r1)
+	  43: GETL       	R30, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x18, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0xFED8704:  90010024  stw r0,36(r1)
+	  48: GETL       	R0, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x24, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0xFED8708:  409D0058  bc 4,29,0xFED8760
+	  53: Jc29o       	$0xFED8760
+
+
+
+. 1867 FED86D8 52
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 7B 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C 7F 1B 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+==== BB 1868 (0xFED8760) approx BBs exec'd 0 ====
+
+	0xFED8760:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED8764:  4186001C  bc 12,6,0xFED8780
+	   4: Js06o       	$0xFED8780
+
+
+
+. 1868 FED8760 8
+. 2C 85 00 00 41 86 00 1C
+==== BB 1869 (0xFED8768) approx BBs exec'd 0 ====
+
+	0xFED8768:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFED876C:  88FD0000  lbz r7,0(r29)
+	   3: GETL       	R29, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0xFED8770:  3BBD0001  addi r29,r29,1
+	   7: GETL       	R29, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R29
+	  10: INCEIPL       	$4
+
+	0xFED8774:  98FF0000  stb r7,0(r31)
+	  11: GETL       	R7, t8
+	  12: GETL       	R31, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFED8778:  3BFF0001  addi r31,r31,1
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0xFED877C:  4200FFF0  bc 16,0,0xFED876C
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0xFED8780
+	  23: JMPo       	$0xFED876C  ($4)
+
+
+
+. 1869 FED8768 24
+. 7C A9 03 A6 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+==== BB 1870 (0xFED8780) approx BBs exec'd 0 ====
+
+	0xFED8780:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFED8784:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED8788:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xFED878C:  8361000C  lwz r27,12(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0xFED8790:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFED8794:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0xFED8798:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0xFED879C:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0xFED87A0:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0xFED87A4:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+
+. 1870 FED8780 40
+. 81 01 00 24 7F 63 DB 78 83 81 00 10 83 61 00 0C 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1871 (0xFE88314) approx BBs exec'd 0 ====
+
+	0xFE88314:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE88318:  48122019  bl 0xFFAA330
+	   3: MOVL       	$0xFE8831C, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFFAA330  ($4)
+
+
+
+. 1871 FE88314 8
+. 7F 43 D3 78 48 12 20 19
+==== BB 1872 (0xFFAA330) approx BBs exec'd 0 ====
+
+	0xFFAA330:  4BF25160  b 0xFECF490
+	   0: JMPo       	$0xFECF490  ($4)
+
+
+
+. 1872 FFAA330 4
+. 4B F2 51 60
+==== BB 1873 free(0xFECF490) approx BBs exec'd 0 ====
+
+	0xFECF490:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFECF494:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFECF498:  480D89B9  bl 0xFFA7E50
+	   9: MOVL       	$0xFECF49C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1873 FECF490 12
+. 94 21 FF E0 7C 08 02 A6 48 0D 89 B9
+==== BB 1874 (0xFECF49C) approx BBs exec'd 0 ====
+
+	0xFECF49C:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECF4A0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECF4A4:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECF4A8:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECF4AC:  7C7C1B78  or r28,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFECF4B0:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECF4B4:  813E1D88  lwz r9,7560(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1D88, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0xFECF4B8:  93E1001C  stw r31,28(r1)
+	  31: GETL       	R31, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x1C, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFECF4BC:  80090000  lwz r0,0(r9)
+	  36: GETL       	R9, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R0
+	  39: INCEIPL       	$4
+
+	0xFECF4C0:  2F800000  cmpi cr7,r0,0
+	  40: GETL       	R0, t32
+	  41: CMP0L       	t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x7, CR
+	  43: INCEIPL       	$4
+
+	0xFECF4C4:  419E0034  bc 12,30,0xFECF4F8
+	  44: Js30o       	$0xFECF4F8
+
+
+
+. 1874 FECF49C 44
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 90 01 00 24 7C 7C 1B 78 93 A1 00 14 81 3E 1D 88 93 E1 00 1C 80 09 00 00 2F 80 00 00 41 9E 00 34
+==== BB 1875 (0xFECF4F8) approx BBs exec'd 0 ====
+
+	0xFECF4F8:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFECF4FC:  41A2FFDC  bc 13,2,0xFECF4D8
+	   4: Js02o       	$0xFECF4D8
+
+
+
+. 1875 FECF4F8 8
+. 2C 03 00 00 41 A2 FF DC
+==== BB 1876 (0xFECF500) approx BBs exec'd 0 ====
+
+	0xFECF500:  3903FFF8  addi r8,r3,-8
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFF8, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0xFECF504:  80880004  lwz r4,4(r8)
+	   4: GETL       	R8, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFECF508:  70800002  andi. r0,r4,0x2
+	   9: GETL       	R4, t6
+	  10: ANDL       	$0x2, t6
+	  11: PUTL       	t6, R0
+	  12: CMP0L       	t6, t8  (-rSo)
+	  13: ICRFL       	t8, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFECF50C:  40820088  bc 4,2,0xFECF594
+	  15: Jc02o       	$0xFECF594
+
+
+
+. 1876 FECF500 16
+. 39 03 FF F8 80 88 00 04 70 80 00 02 40 82 00 88
+==== BB 1877 (0xFECF510) approx BBs exec'd 0 ====
+
+	0xFECF510:  70800004  andi. r0,r4,0x4
+	   0: GETL       	R4, t0
+	   1: ANDL       	$0x4, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECF514:  83FE05FC  lwz r31,1532(r30)
+	   6: GETL       	R30, t4
+	   7: ADDL       	$0x5FC, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0xFECF518:  40820070  bc 4,2,0xFECF588
+	  11: Jc02o       	$0xFECF588
+
+
+
+. 1877 FECF510 12
+. 70 80 00 04 83 FE 05 FC 40 82 00 70
+==== BB 1878 (0xFECF51C) approx BBs exec'd 0 ====
+
+	0xFECF51C:  3BA00000  li r29,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFECF520:  38000001  li r0,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFECF524:  7D00F828  lwarx r8,r0,r31
+	   6: GETL       	R31, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0xFECF528:  7C08E800  cmp cr0,r8,r29
+	  11: GETL       	R8, t8
+	  12: GETL       	R29, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECF52C:  4082000C  bc 4,2,0xFECF538
+	  16: Jc02o       	$0xFECF538
+
+
+
+. 1878 FECF51C 20
+. 3B A0 00 00 38 00 00 01 7D 00 F8 28 7C 08 E8 00 40 82 00 0C
+==== BB 1879 (0xFECF530) approx BBs exec'd 0 ====
+
+	0xFECF530:  7C00F92D  stwcx. r0,r0,r31
+	   0: GETL       	R31, t0
+	   1: GETL       	R0, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECF534:  40A2FFF0  bc 5,2,0xFECF524
+	   6: Jc02o       	$0xFECF524
+
+
+
+. 1879 FECF530 8
+. 7C 00 F9 2D 40 A2 FF F0
+==== BB 1880 (0xFECF524) approx BBs exec'd 0 ====
+
+	0xFECF524:  7D00F828  lwarx r8,r0,r31
+	   0: GETL       	R31, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFECF528:  7C08E800  cmp cr0,r8,r29
+	   5: GETL       	R8, t4
+	   6: GETL       	R29, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECF52C:  4082000C  bc 4,2,0xFECF538
+	  10: Jc02o       	$0xFECF538
+
+
+
+. 1880 FECF524 12
+. 7D 00 F8 28 7C 08 E8 00 40 82 00 0C
+==== BB 1881 (0xFECF538) approx BBs exec'd 0 ====
+
+	0xFECF538:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFECF53C:  2C880000  cmpi cr1,r8,0
+	   1: GETL       	R8, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECF540:  40860090  bc 4,6,0xFECF5D0
+	   5: Jc06o       	$0xFECF5D0
+
+
+
+. 1881 FECF538 12
+. 4C 00 01 2C 2C 88 00 00 40 86 00 90
+==== BB 1882 (0xFECF544) approx BBs exec'd 0 ====
+
+	0xFECF544:  7F84E378  or r4,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECF548:  7FE3FB78  or r3,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFECF54C:  4BFFF64D  bl 0xFECEB98
+	   6: MOVL       	$0xFECF550, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFECEB98  ($4)
+
+
+
+. 1882 FECF544 12
+. 7F 84 E3 78 7F E3 FB 78 4B FF F6 4D
+==== BB 1883 _int_free(0xFECEB98) approx BBs exec'd 0 ====
+
+	0xFECEB98:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFECEB9C:  9421FFA0  stwu r1,-96(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFA0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECEBA0:  93E1005C  stw r31,92(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x5C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECEBA4:  3BE4FFF8  addi r31,r4,-8
+	  14: GETL       	R4, t10
+	  15: ADDL       	$0xFFFFFFF8, t10
+	  16: PUTL       	t10, R31
+	  17: INCEIPL       	$4
+
+	0xFECEBA8:  93A10054  stw r29,84(r1)
+	  18: GETL       	R29, t12
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x54, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0xFECEBAC:  480D92A5  bl 0xFFA7E50
+	  23: MOVL       	$0xFECEBB0, t16
+	  24: PUTL       	t16, LR
+	  25: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1883 FECEB98 24
+. 7C A8 02 A6 94 21 FF A0 93 E1 00 5C 3B E4 FF F8 93 A1 00 54 48 0D 92 A5
+==== BB 1884 (0xFECEBB0) approx BBs exec'd 0 ====
+
+	0xFECEBB0:  90A10064  stw r5,100(r1)
+	   0: GETL       	R5, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x64, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECEBB4:  813F0004  lwz r9,4(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFECEBB8:  93810050  stw r28,80(r1)
+	  10: GETL       	R28, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x50, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFECEBBC:  7C7C1B78  or r28,r3,r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFECEBC0:  553D0038  rlwinm r29,r9,0,0,28
+	  18: GETL       	R9, t14
+	  19: ANDL       	$0xFFFFFFF8, t14
+	  20: PUTL       	t14, R29
+	  21: INCEIPL       	$4
+
+	0xFECEBC4:  93C10058  stw r30,88(r1)
+	  22: GETL       	R30, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x58, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFECEBC8:  7C1D00D0  neg r0,r29
+	  27: GETL       	R29, t20
+	  28: NEGL       	t20
+	  29: PUTL       	t20, R0
+	  30: INCEIPL       	$4
+
+	0xFECEBCC:  92210024  stw r17,36(r1)
+	  31: GETL       	R17, t22
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x24, t24
+	  34: STL       	t22, (t24)
+	  35: INCEIPL       	$4
+
+	0xFECEBD0:  7F9F0040  cmpl cr7,r31,r0
+	  36: GETL       	R31, t26
+	  37: GETL       	R0, t28
+	  38: CMPUL       	t26, t28, t30  (-rSo)
+	  39: ICRFL       	t30, $0x7, CR
+	  40: INCEIPL       	$4
+
+	0xFECEBD4:  92410028  stw r18,40(r1)
+	  41: GETL       	R18, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x28, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFECEBD8:  9261002C  stw r19,44(r1)
+	  46: GETL       	R19, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x2C, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0xFECEBDC:  7FC802A6  mflr r30
+	  51: GETL       	LR, t40
+	  52: PUTL       	t40, R30
+	  53: INCEIPL       	$4
+
+	0xFECEBE0:  92810030  stw r20,48(r1)
+	  54: GETL       	R20, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x30, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFECEBE4:  92A10034  stw r21,52(r1)
+	  59: GETL       	R21, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x34, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFECEBE8:  92C10038  stw r22,56(r1)
+	  64: GETL       	R22, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x38, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFECEBEC:  92E1003C  stw r23,60(r1)
+	  69: GETL       	R23, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0x3C, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0xFECEBF0:  93010040  stw r24,64(r1)
+	  74: GETL       	R24, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x40, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFECEBF4:  93210044  stw r25,68(r1)
+	  79: GETL       	R25, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x44, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0xFECEBF8:  93410048  stw r26,72(r1)
+	  84: GETL       	R26, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x48, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0xFECEBFC:  9361004C  stw r27,76(r1)
+	  89: GETL       	R27, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x4C, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0xFECEC00:  419D02A0  bc 12,29,0xFECEEA0
+	  94: Js29o       	$0xFECEEA0
+
+
+
+. 1884 FECEBB0 84
+. 90 A1 00 64 81 3F 00 04 93 81 00 50 7C 7C 1B 78 55 3D 00 38 93 C1 00 58 7C 1D 00 D0 92 21 00 24 7F 9F 00 40 92 41 00 28 92 61 00 2C 7F C8 02 A6 92 81 00 30 92 A1 00 34 92 C1 00 38 92 E1 00 3C 93 01 00 40 93 21 00 44 93 41 00 48 93 61 00 4C 41 9D 02 A0
+==== BB 1885 (0xFECEC04) approx BBs exec'd 0 ====
+
+	0xFECEC04:  73E00007  andi. r0,r31,0x7
+	   0: GETL       	R31, t0
+	   1: ANDL       	$0x7, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECEC08:  40820298  bc 4,2,0xFECEEA0
+	   6: Jc02o       	$0xFECEEA0
+
+
+
+. 1885 FECEC04 8
+. 73 E0 00 07 40 82 02 98
+==== BB 1886 (0xFECEC0C) approx BBs exec'd 0 ====
+
+	0xFECEC0C:  81630004  lwz r11,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFECEC10:  7F0BE840  cmpl cr6,r11,r29
+	   5: GETL       	R11, t4
+	   6: GETL       	R29, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFECEC14:  409801DC  bc 4,24,0xFECEDF0
+	  10: Jc24o       	$0xFECEDF0
+
+
+
+. 1886 FECEC0C 12
+. 81 63 00 04 7F 0B E8 40 40 98 01 DC
+==== BB 1887 (0xFECEDF0) approx BBs exec'd 0 ====
+
+	0xFECEDF0:  7CDFEA14  add r6,r31,r29
+	   0: GETL       	R31, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFECEDF4:  81260004  lwz r9,4(r6)
+	   5: GETL       	R6, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFECEDF8:  2B890008  cmpli cr7,r9,8
+	  10: GETL       	R9, t8
+	  11: MOVL       	$0x8, t12
+	  12: CMPUL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFECEDFC:  409D0154  bc 4,29,0xFECEF50
+	  15: Jc29o       	$0xFECEF50
+
+
+
+. 1887 FECEDF0 16
+. 7C DF EA 14 81 26 00 04 2B 89 00 08 40 9D 01 54
+==== BB 1888 (0xFECEE00) approx BBs exec'd 0 ====
+
+	0xFECEE00:  8103044C  lwz r8,1100(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x44C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFECEE04:  552A0038  rlwinm r10,r9,0,0,28
+	   5: GETL       	R9, t4
+	   6: ANDL       	$0xFFFFFFF8, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFECEE08:  7C8A4040  cmpl cr1,r10,r8
+	   9: GETL       	R10, t6
+	  10: GETL       	R8, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0xFECEE0C:  40840144  bc 4,4,0xFECEF50
+	  14: Jc04o       	$0xFECEF50
+
+
+
+. 1888 FECEE00 16
+. 81 03 04 4C 55 2A 00 38 7C 8A 40 40 40 84 01 44
+==== BB 1889 (0xFECEE10) approx BBs exec'd 0 ====
+
+	0xFECEE10:  57AAF87A  rlwinm r10,r29,31,1,29
+	   0: GETL       	R29, t0
+	   1: ROLL       	$0x1F, t0
+	   2: ANDL       	$0x7FFFFFFC, t0
+	   3: PUTL       	t0, R10
+	   4: INCEIPL       	$4
+
+	0xFECEE14:  556C003C  rlwinm r12,r11,0,0,30
+	   5: GETL       	R11, t2
+	   6: ANDL       	$0xFFFFFFFE, t2
+	   7: PUTL       	t2, R12
+	   8: INCEIPL       	$4
+
+	0xFECEE18:  7D2A182E  lwzx r9,r10,r3
+	   9: GETL       	R3, t4
+	  10: GETL       	R10, t6
+	  11: ADDL       	t6, t4
+	  12: LDL       	(t4), t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0xFECEE1C:  91830004  stw r12,4(r3)
+	  15: GETL       	R12, t10
+	  16: GETL       	R3, t12
+	  17: ADDL       	$0x4, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0xFECEE20:  7C09F800  cmp cr0,r9,r31
+	  20: GETL       	R9, t14
+	  21: GETL       	R31, t16
+	  22: CMPL       	t14, t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0xFECEE24:  837E0664  lwz r27,1636(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x664, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R27
+	  29: INCEIPL       	$4
+
+	0xFECEE28:  4182005C  bc 12,2,0xFECEE84
+	  30: Js02o       	$0xFECEE84
+
+
+
+. 1889 FECEE10 28
+. 57 AA F8 7A 55 6C 00 3C 7D 2A 18 2E 91 83 00 04 7C 09 F8 00 83 7E 06 64 41 82 00 5C
+==== BB 1890 (0xFECEE2C) approx BBs exec'd 0 ====
+
+	0xFECEE2C:  913F0008  stw r9,8(r31)
+	   0: GETL       	R9, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECEE30:  7FEA192E  stwx r31,r10,r3
+	   5: GETL       	R3, t4
+	   6: GETL       	R10, t6
+	   7: ADDL       	t6, t4
+	   8: GETL       	R31, t8
+	   9: STL       	t8, (t4)
+	  10: INCEIPL       	$4
+
+	0xFECEE34:  83A10064  lwz r29,100(r1)
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x64, t10
+	  13: LDL       	(t10), t12
+	  14: PUTL       	t12, R29
+	  15: INCEIPL       	$4
+
+	0xFECEE38:  82210024  lwz r17,36(r1)
+	  16: GETL       	R1, t14
+	  17: ADDL       	$0x24, t14
+	  18: LDL       	(t14), t16
+	  19: PUTL       	t16, R17
+	  20: INCEIPL       	$4
+
+	0xFECEE3C:  7FA803A6  mtlr r29
+	  21: GETL       	R29, t18
+	  22: PUTL       	t18, LR
+	  23: INCEIPL       	$4
+
+	0xFECEE40:  82410028  lwz r18,40(r1)
+	  24: GETL       	R1, t20
+	  25: ADDL       	$0x28, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R18
+	  28: INCEIPL       	$4
+
+	0xFECEE44:  8261002C  lwz r19,44(r1)
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x2C, t24
+	  31: LDL       	(t24), t26
+	  32: PUTL       	t26, R19
+	  33: INCEIPL       	$4
+
+	0xFECEE48:  82810030  lwz r20,48(r1)
+	  34: GETL       	R1, t28
+	  35: ADDL       	$0x30, t28
+	  36: LDL       	(t28), t30
+	  37: PUTL       	t30, R20
+	  38: INCEIPL       	$4
+
+	0xFECEE4C:  82A10034  lwz r21,52(r1)
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x34, t32
+	  41: LDL       	(t32), t34
+	  42: PUTL       	t34, R21
+	  43: INCEIPL       	$4
+
+	0xFECEE50:  82C10038  lwz r22,56(r1)
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x38, t36
+	  46: LDL       	(t36), t38
+	  47: PUTL       	t38, R22
+	  48: INCEIPL       	$4
+
+	0xFECEE54:  82E1003C  lwz r23,60(r1)
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x3C, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R23
+	  53: INCEIPL       	$4
+
+	0xFECEE58:  83010040  lwz r24,64(r1)
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x40, t44
+	  56: LDL       	(t44), t46
+	  57: PUTL       	t46, R24
+	  58: INCEIPL       	$4
+
+	0xFECEE5C:  83210044  lwz r25,68(r1)
+	  59: GETL       	R1, t48
+	  60: ADDL       	$0x44, t48
+	  61: LDL       	(t48), t50
+	  62: PUTL       	t50, R25
+	  63: INCEIPL       	$4
+
+	0xFECEE60:  83410048  lwz r26,72(r1)
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x48, t52
+	  66: LDL       	(t52), t54
+	  67: PUTL       	t54, R26
+	  68: INCEIPL       	$4
+
+	0xFECEE64:  8361004C  lwz r27,76(r1)
+	  69: GETL       	R1, t56
+	  70: ADDL       	$0x4C, t56
+	  71: LDL       	(t56), t58
+	  72: PUTL       	t58, R27
+	  73: INCEIPL       	$4
+
+	0xFECEE68:  83810050  lwz r28,80(r1)
+	  74: GETL       	R1, t60
+	  75: ADDL       	$0x50, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R28
+	  78: INCEIPL       	$4
+
+	0xFECEE6C:  83A10054  lwz r29,84(r1)
+	  79: GETL       	R1, t64
+	  80: ADDL       	$0x54, t64
+	  81: LDL       	(t64), t66
+	  82: PUTL       	t66, R29
+	  83: INCEIPL       	$4
+
+	0xFECEE70:  83C10058  lwz r30,88(r1)
+	  84: GETL       	R1, t68
+	  85: ADDL       	$0x58, t68
+	  86: LDL       	(t68), t70
+	  87: PUTL       	t70, R30
+	  88: INCEIPL       	$4
+
+	0xFECEE74:  83E1005C  lwz r31,92(r1)
+	  89: GETL       	R1, t72
+	  90: ADDL       	$0x5C, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R31
+	  93: INCEIPL       	$4
+
+	0xFECEE78:  38210060  addi r1,r1,96
+	  94: GETL       	R1, t76
+	  95: ADDL       	$0x60, t76
+	  96: PUTL       	t76, R1
+	  97: INCEIPL       	$4
+
+	0xFECEE7C:  4E800020  blr
+	  98: GETL       	LR, t78
+	  99: JMPo-r       	t78  ($4)
+
+
+
+. 1890 FECEE2C 84
+. 91 3F 00 08 7F EA 19 2E 83 A1 00 64 82 21 00 24 7F A8 03 A6 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+==== BB 1891 (0xFECF550) approx BBs exec'd 0 ====
+
+	0xFECF550:  7C0004AC  sync
+	   0: INCEIPL       	$4
+
+	0xFECF554:  7F80F828  lwarx r28,r0,r31
+	   1: GETL       	R31, t0
+	   2: LOCKo       	
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R28
+	   5: INCEIPL       	$4
+
+	0xFECF558:  7FA0F92D  stwcx. r29,r0,r31
+	   6: GETL       	R31, t4
+	   7: GETL       	R29, t6
+	   8: LOCKo       	
+	   9: STL       	t6, (t4)  (-rSo)
+	  10: ICRFL       	cr, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFECF55C:  40A2FFF8  bc 5,2,0xFECF554
+	  12: Jc02o       	$0xFECF554
+
+
+
+. 1891 FECF550 16
+. 7C 00 04 AC 7F 80 F8 28 7F A0 F9 2D 40 A2 FF F8
+==== BB 1892 (0xFECF560) approx BBs exec'd 0 ====
+
+	0xFECF560:  2F1C0001  cmpi cr6,r28,1
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFECF564:  4099FF74  bc 4,25,0xFECF4D8
+	   5: Jc25o       	$0xFECF4D8
+
+
+
+. 1892 FECF560 8
+. 2F 1C 00 01 40 99 FF 74
+==== BB 1893 (0xFECF4D8) approx BBs exec'd 0 ====
+
+	0xFECF4D8:  80A10024  lwz r5,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECF4DC:  83810010  lwz r28,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFECF4E0:  83A10014  lwz r29,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0xFECF4E4:  7CA803A6  mtlr r5
+	  15: GETL       	R5, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFECF4E8:  83C10018  lwz r30,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0xFECF4EC:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0xFECF4F0:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0xFECF4F4:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 1893 FECF4D8 32
+. 80 A1 00 24 83 81 00 10 83 A1 00 14 7C A8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 1894 (0xFE8831C) approx BBs exec'd 0 ====
+
+	0xFE8831C:  4BFFF9C0  b 0xFE87CDC
+	   0: JMPo       	$0xFE87CDC  ($4)
+
+
+
+. 1894 FE8831C 4
+. 4B FF F9 C0
+==== BB 1895 (0xFE87CDC) approx BBs exec'd 0 ====
+
+	0xFE87CDC:  823E012C  lwz r17,300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x12C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0xFE87CE0:  82DE0130  lwz r22,304(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x130, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R22
+	   9: INCEIPL       	$4
+
+	0xFE87CE4:  80B10000  lwz r5,0(r17)
+	  10: GETL       	R17, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0xFE87CE8:  2F050000  cmpi cr6,r5,0
+	  14: GETL       	R5, t12
+	  15: CMP0L       	t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0xFE87CEC:  419A0454  bc 12,26,0xFE88140
+	  18: Js26o       	$0xFE88140
+
+
+
+. 1895 FE87CDC 20
+. 82 3E 01 2C 82 DE 01 30 80 B1 00 00 2F 05 00 00 41 9A 04 54
+==== BB 1896 (0xFE88140) approx BBs exec'd 0 ====
+
+	0xFE88140:  807E0120  lwz r3,288(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x120, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE88144:  3C800001  lis r4,1
+	   5: MOVL       	$0x10000, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE88148:  92D10000  stw r22,0(r17)
+	   8: GETL       	R22, t6
+	   9: GETL       	R17, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8814C:  48099BD5  bl 0xFF21D20
+	  12: MOVL       	$0xFE88150, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0xFF21D20  ($4)
+
+
+
+. 1896 FE88140 16
+. 80 7E 01 20 3C 80 00 01 92 D1 00 00 48 09 9B D5
+==== BB 1897 __open_nocancel(0xFF21D20) approx BBs exec'd 0 ====
+
+	0xFF21D20:  38000005  li r0,5
+	   0: MOVL       	$0x5, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF21D24:  44000002  sc
+	   3: JMPo-sys       	$0xFF21D28  ($4)
+
+
+
+. 1897 FF21D20 8
+. 38 00 00 05 44 00 00 02
+==== BB 1898 (0xFF21D28) approx BBs exec'd 0 ====
+
+	0xFF21D28:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 1898 FF21D28 4
+. 4C A3 00 20
+==== BB 1899 (0xFE88150) approx BBs exec'd 0 ====
+
+	0xFE88150:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0xFE88154:  7C731B79  or. r19,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R19
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE88158:  7F200026  mfcr r25
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R25
+	  10: INCEIPL       	$4
+
+	0xFE8815C:  41A0FC9C  bc 13,0,0xFE87DF8
+	  11: Js00o       	$0xFE87DF8
+
+
+
+. 1899 FE88150 16
+. 39 20 00 00 7C 73 1B 79 7F 20 00 26 41 A0 FC 9C
+==== BB 1900 (0xFE88160) approx BBs exec'd 0 ====
+
+	0xFE88160:  80BE0134  lwz r5,308(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x134, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE88164:  38600003  li r3,3
+	   5: MOVL       	$0x3, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE88168:  7E649B78  or r4,r19,r19
+	   8: GETL       	R19, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFE8816C:  48099155  bl 0xFF212C0
+	  11: MOVL       	$0xFE88170, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0xFF212C0  ($4)
+
+
+
+. 1900 FE88160 16
+. 80 BE 01 34 38 60 00 03 7E 64 9B 78 48 09 91 55
+==== BB 1901 __fxstat64@@GLIBC_2.2(0xFF212C0) approx BBs exec'd 0 ====
+
+	0xFF212C0:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFF212C4:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFF212C8:  48086B89  bl 0xFFA7E50
+	   9: MOVL       	$0xFF212CC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1901 FF212C0 12
+. 7D 88 02 A6 94 21 FF F0 48 08 6B 89
+==== BB 1902 (0xFF212CC) approx BBs exec'd 0 ====
+
+	0xFF212CC:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF212D0:  7C832378  or r3,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF212D4:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0xFF212D8:  7CA42B78  or r4,r5,r5
+	  11: GETL       	R5, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFF212DC:  380000C5  li r0,197
+	  14: MOVL       	$0xC5, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFF212E0:  7D8803A6  mtlr r12
+	  17: GETL       	R12, t12
+	  18: PUTL       	t12, LR
+	  19: INCEIPL       	$4
+
+	0xFF212E4:  44000002  sc
+	  20: JMPo-sys       	$0xFF212E8  ($4)
+
+
+
+. 1902 FF212CC 28
+. 93 C1 00 08 7C 83 23 78 7F C8 02 A6 7C A4 2B 78 38 00 00 C5 7D 88 03 A6 44 00 00 02
+==== BB 1903 (0xFF212E8) approx BBs exec'd 0 ====
+
+	0xFF212E8:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF212EC:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFF212F0:  813E1C4C  lwz r9,7244(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x1C4C, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFF212F4:  7D291214  add r9,r9,r2
+	  14: GETL       	R9, t10
+	  15: GETL       	R2, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFF212F8:  40820010  bc 4,2,0xFF21308
+	  19: Jc02o       	$0xFF21308
+
+
+
+. 1903 FF212E8 20
+. 7C 00 00 26 74 09 10 00 81 3E 1C 4C 7D 29 12 14 40 82 00 10
+==== BB 1904 (0xFF212FC) approx BBs exec'd 0 ====
+
+	0xFF212FC:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0xFF21300:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFF21304:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 1904 FF212FC 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 1905 (0xFE88170) approx BBs exec'd 0 ====
+
+	0xFE88170:  2F83FFFF  cmpi cr7,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE88174:  41BEFC50  bc 13,30,0xFE87DC4
+	   5: Js30o       	$0xFE87DC4
+
+
+
+. 1905 FE88170 8
+. 2F 83 FF FF 41 BE FC 50
+==== BB 1906 (0xFE88178) approx BBs exec'd 0 ====
+
+	0xFE88178:  82BE0134  lwz r21,308(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x134, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0xFE8817C:  83550030  lwz r26,48(r21)
+	   5: GETL       	R21, t4
+	   6: ADDL       	$0x30, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0xFE88180:  83750034  lwz r27,52(r21)
+	  10: GETL       	R21, t8
+	  11: ADDL       	$0x34, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0xFE88184:  2F9A0000  cmpi cr7,r26,0
+	  15: GETL       	R26, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFE88188:  419C0018  bc 12,28,0xFE881A0
+	  19: Js28o       	$0xFE881A0
+
+
+
+. 1906 FE88178 20
+. 82 BE 01 34 83 55 00 30 83 75 00 34 2F 9A 00 00 41 9C 00 18
+==== BB 1907 (0xFE8818C) approx BBs exec'd 0 ====
+
+	0xFE8818C:  409E0010  bc 4,30,0xFE8819C
+	   0: Jc30o       	$0xFE8819C
+
+
+
+. 1907 FE8818C 4
+. 40 9E 00 10
+==== BB 1908 (0xFE88190) approx BBs exec'd 0 ====
+
+	0xFE88190:  3F000020  lis r24,32
+	   0: MOVL       	$0x200000, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0xFE88194:  7F1BC040  cmpl cr6,r27,r24
+	   3: GETL       	R27, t2
+	   4: GETL       	R24, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0xFE88198:  41980008  bc 12,24,0xFE881A0
+	   8: Js24o       	$0xFE881A0
+
+
+
+. 1908 FE88190 12
+. 3F 00 00 20 7F 1B C0 40 41 98 00 08
+==== BB 1909 (0xFE8819C) approx BBs exec'd 0 ====
+
+	0xFE8819C:  3F600020  lis r27,32
+	   0: MOVL       	$0x200000, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0xFE881A0:  38600000  li r3,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFE881A4:  7F64DB78  or r4,r27,r27
+	   6: GETL       	R27, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFE881A8:  38A00001  li r5,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R5
+	  11: INCEIPL       	$4
+
+	0xFE881AC:  38C00002  li r6,2
+	  12: MOVL       	$0x2, t8
+	  13: PUTL       	t8, R6
+	  14: INCEIPL       	$4
+
+	0xFE881B0:  7E679B78  or r7,r19,r19
+	  15: GETL       	R19, t10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0xFE881B4:  39200000  li r9,0
+	  18: MOVL       	$0x0, t12
+	  19: PUTL       	t12, R9
+	  20: INCEIPL       	$4
+
+	0xFE881B8:  39400000  li r10,0
+	  21: MOVL       	$0x0, t14
+	  22: PUTL       	t14, R10
+	  23: INCEIPL       	$4
+
+	0xFE881BC:  7F78DB78  or r24,r27,r27
+	  24: GETL       	R27, t16
+	  25: PUTL       	t16, R24
+	  26: INCEIPL       	$4
+
+	0xFE881C0:  480A6009  bl 0xFF2E1C8
+	  27: MOVL       	$0xFE881C4, t18
+	  28: PUTL       	t18, LR
+	  29: JMPo-c       	$0xFF2E1C8  ($4)
+
+
+
+. 1909 FE8819C 40
+. 3F 60 00 20 38 60 00 00 7F 64 DB 78 38 A0 00 01 38 C0 00 02 7E 67 9B 78 39 20 00 00 39 40 00 00 7F 78 DB 78 48 0A 60 09
+==== BB 1910 mmap64(0xFF2E1C8) approx BBs exec'd 0 ====
+
+	0xFF2E1C8:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF2E1CC:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFF2E1D0:  48079C81  bl 0xFFA7E50
+	   9: MOVL       	$0xFF2E1D4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1910 FF2E1C8 12
+. 7C 08 02 A6 94 21 FF E0 48 07 9C 81
+==== BB 1911 (0xFF2E1D4) approx BBs exec'd 0 ====
+
+	0xFF2E1D4:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFF2E1D8:  554C053E  rlwinm r12,r10,0,20,31
+	   3: GETL       	R10, t2
+	   4: ANDL       	$0xFFF, t2
+	   5: PUTL       	t2, R12
+	   6: INCEIPL       	$4
+
+	0xFF2E1DC:  93C10018  stw r30,24(r1)
+	   7: GETL       	R30, t4
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x18, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFF2E1E0:  7FC802A6  mflr r30
+	  12: GETL       	LR, t8
+	  13: PUTL       	t8, R30
+	  14: INCEIPL       	$4
+
+	0xFF2E1E4:  93810010  stw r28,16(r1)
+	  15: GETL       	R28, t10
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0xFF2E1E8:  93A10014  stw r29,20(r1)
+	  20: GETL       	R29, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFF2E1EC:  555DA33E  rlwinm r29,r10,20,12,31
+	  25: GETL       	R10, t18
+	  26: SHRL       	$0xC, t18
+	  27: PUTL       	t18, R29
+	  28: INCEIPL       	$4
+
+	0xFF2E1F0:  513DA016  rlwimi r29,r9,20,0,11
+	  29: GETL       	R29, t20
+	  30: GETL       	R9, t22
+	  31: ROLL       	$0x14, t22
+	  32: ANDL       	$0xFFF00000, t22
+	  33: ANDL       	$0xFFFFF, t20
+	  34: ORL       	t20, t22
+	  35: PUTL       	t22, R29
+	  36: INCEIPL       	$4
+
+	0xFF2E1F4:  7D3C6670  srawi r28,r9,12
+	  37: GETL       	R9, t24
+	  38: SARL       	$0xC, t24  (-wCa)
+	  39: PUTL       	t24, R28
+	  40: INCEIPL       	$4
+
+	0xFF2E1F8:  7C0803A6  mtlr r0
+	  41: GETL       	R0, t26
+	  42: PUTL       	t26, LR
+	  43: INCEIPL       	$4
+
+	0xFF2E1FC:  7D606379  or. r0,r11,r12
+	  44: GETL       	R11, t28
+	  45: GETL       	R12, t30
+	  46: ORL       	t30, t28
+	  47: PUTL       	t28, R0
+	  48: CMP0L       	t28, t32  (-rSo)
+	  49: ICRFL       	t32, $0x0, CR
+	  50: INCEIPL       	$4
+
+	0xFF2E200:  813E1C4C  lwz r9,7244(r30)
+	  51: GETL       	R30, t34
+	  52: ADDL       	$0x1C4C, t34
+	  53: LDL       	(t34), t36
+	  54: PUTL       	t36, R9
+	  55: INCEIPL       	$4
+
+	0xFF2E204:  7FA8EB78  or r8,r29,r29
+	  56: GETL       	R29, t38
+	  57: PUTL       	t38, R8
+	  58: INCEIPL       	$4
+
+	0xFF2E208:  380000C0  li r0,192
+	  59: MOVL       	$0xC0, t40
+	  60: PUTL       	t40, R0
+	  61: INCEIPL       	$4
+
+	0xFF2E20C:  7D291214  add r9,r9,r2
+	  62: GETL       	R9, t42
+	  63: GETL       	R2, t44
+	  64: ADDL       	t42, t44
+	  65: PUTL       	t44, R9
+	  66: INCEIPL       	$4
+
+	0xFF2E210:  3960FFFF  li r11,-1
+	  67: MOVL       	$0xFFFFFFFF, t46
+	  68: PUTL       	t46, R11
+	  69: INCEIPL       	$4
+
+	0xFF2E214:  41820024  bc 12,2,0xFF2E238
+	  70: Js02o       	$0xFF2E238
+
+
+
+. 1911 FF2E1D4 68
+. 39 60 00 00 55 4C 05 3E 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 55 5D A3 3E 51 3D A0 16 7D 3C 66 70 7C 08 03 A6 7D 60 63 79 81 3E 1C 4C 7F A8 EB 78 38 00 00 C0 7D 29 12 14 39 60 FF FF 41 82 00 24
+==== BB 1912 (0xFF2E238) approx BBs exec'd 0 ====
+
+	0xFF2E238:  44000002  sc
+	   0: JMPo-sys       	$0xFF2E23C  ($4)
+
+
+
+. 1912 FF2E238 4
+. 44 00 00 02
+==== BB 1913 (0xFF2E23C) approx BBs exec'd 0 ====
+
+	0xFF2E23C:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF2E240:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFF2E244:  809E1C4C  lwz r4,7244(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x1C4C, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFF2E248:  7D241214  add r9,r4,r2
+	  14: GETL       	R4, t10
+	  15: GETL       	R2, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFF2E24C:  40820020  bc 4,2,0xFF2E26C
+	  19: Jc02o       	$0xFF2E26C
+
+
+
+. 1913 FF2E23C 20
+. 7C 00 00 26 74 09 10 00 80 9E 1C 4C 7D 24 12 14 40 82 00 20
+==== BB 1914 (0xFF2E250) approx BBs exec'd 0 ====
+
+	0xFF2E250:  7C6B1B78  or r11,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFF2E254:  7D635B78  or r3,r11,r11
+	   3: GETL       	R11, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFF2E258:  83810010  lwz r28,16(r1)
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x10, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0xFF2E25C:  83A10014  lwz r29,20(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x14, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFF2E260:  83C10018  lwz r30,24(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x18, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFF2E264:  38210020  addi r1,r1,32
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x20, t16
+	  23: PUTL       	t16, R1
+	  24: INCEIPL       	$4
+
+	0xFF2E268:  4E800020  blr
+	  25: GETL       	LR, t18
+	  26: JMPo-r       	t18  ($4)
+
+
+
+. 1914 FF2E250 28
+. 7C 6B 1B 78 7D 63 5B 78 83 81 00 10 83 A1 00 14 83 C1 00 18 38 21 00 20 4E 80 00 20
+==== BB 1915 (0xFE881C4) approx BBs exec'd 0 ====
+
+	0xFE881C4:  2F83FFFF  cmpi cr7,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE881C8:  7C7D1B78  or r29,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R29
+	   7: INCEIPL       	$4
+
+	0xFE881CC:  41BEFBF8  bc 13,30,0xFE87DC4
+	   8: Js30o       	$0xFE87DC4
+
+
+
+. 1915 FE881C4 12
+. 2F 83 FF FF 7C 7D 1B 78 41 BE FB F8
+==== BB 1916 (0xFE881D0) approx BBs exec'd 0 ====
+
+	0xFE881D0:  81030024  lwz r8,36(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE881D4:  83830014  lwz r28,20(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFE881D8:  1C88006C  mulli r4,r8,108
+	  10: GETL       	R8, t8
+	  11: MULL       	$0x6C, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFE881DC:  80E30018  lwz r7,24(r3)
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0x18, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0xFE881E0:  80C30020  lwz r6,32(r3)
+	  19: GETL       	R3, t14
+	  20: ADDL       	$0x20, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R6
+	  23: INCEIPL       	$4
+
+	0xFE881E4:  7D5C3A14  add r10,r28,r7
+	  24: GETL       	R28, t18
+	  25: GETL       	R7, t20
+	  26: ADDL       	t18, t20
+	  27: PUTL       	t20, R10
+	  28: INCEIPL       	$4
+
+	0xFE881E8:  81230010  lwz r9,16(r3)
+	  29: GETL       	R3, t22
+	  30: ADDL       	$0x10, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0xFE881EC:  7F862214  add r28,r6,r4
+	  34: GETL       	R6, t26
+	  35: GETL       	R4, t28
+	  36: ADDL       	t26, t28
+	  37: PUTL       	t28, R28
+	  38: INCEIPL       	$4
+
+	0xFE881F0:  81830008  lwz r12,8(r3)
+	  39: GETL       	R3, t30
+	  40: ADDL       	$0x8, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R12
+	  43: INCEIPL       	$4
+
+	0xFE881F4:  7C9C5000  cmp cr1,r28,r10
+	  44: GETL       	R28, t34
+	  45: GETL       	R10, t36
+	  46: CMPL       	t34, t36, t38  (-rSo)
+	  47: ICRFL       	t38, $0x1, CR
+	  48: INCEIPL       	$4
+
+	0xFE881F8:  1C09000C  mulli r0,r9,12
+	  49: GETL       	R9, t40
+	  50: MULL       	$0xC, t40
+	  51: PUTL       	t40, R0
+	  52: INCEIPL       	$4
+
+	0xFE881FC:  7D2C0214  add r9,r12,r0
+	  53: GETL       	R12, t42
+	  54: GETL       	R0, t44
+	  55: ADDL       	t42, t44
+	  56: PUTL       	t44, R9
+	  57: INCEIPL       	$4
+
+	0xFE88200:  40840008  bc 4,4,0xFE88208
+	  58: Jc04o       	$0xFE88208
+
+
+
+. 1916 FE881D0 52
+. 81 03 00 24 83 83 00 14 1C 88 00 6C 80 E3 00 18 80 C3 00 20 7D 5C 3A 14 81 23 00 10 7F 86 22 14 81 83 00 08 7C 9C 50 00 1C 09 00 0C 7D 2C 02 14 40 84 00 08
+==== BB 1917 (0xFE88208) approx BBs exec'd 0 ====
+
+	0xFE88208:  7C1C4800  cmp cr0,r28,r9
+	   0: GETL       	R28, t0
+	   1: GETL       	R9, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8820C:  40800008  bc 4,0,0xFE88214
+	   5: Jc00o       	$0xFE88214
+
+
+
+. 1917 FE88208 8
+. 7C 1C 48 00 40 80 00 08
+==== BB 1918 (0xFE88214) approx BBs exec'd 0 ====
+
+	0xFE88214:  7E1CC040  cmpl cr4,r28,r24
+	   0: GETL       	R28, t0
+	   1: GETL       	R24, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0xFE88218:  41910288  bc 12,17,0xFE884A0
+	   5: Js17o       	$0xFE884A0
+
+
+
+. 1918 FE88214 8
+. 7E 1C C0 40 41 91 02 88
+==== BB 1919 (0xFE8821C) approx BBs exec'd 0 ====
+
+	0xFE8821C:  80B50030  lwz r5,48(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE88220:  3B200000  li r25,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0xFE88224:  7F85C800  cmp cr7,r5,r25
+	   8: GETL       	R5, t6
+	   9: GETL       	R25, t8
+	  10: CMPL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFE88228:  419D0028  bc 12,29,0xFE88250
+	  13: Js29o       	$0xFE88250
+
+
+
+. 1919 FE8821C 16
+. 80 B5 00 30 3B 20 00 00 7F 85 C8 00 41 9D 00 28
+==== BB 1920 (0xFE8822C) approx BBs exec'd 0 ====
+
+	0xFE8822C:  409E0010  bc 4,30,0xFE8823C
+	   0: Jc30o       	$0xFE8823C
+
+
+
+. 1920 FE8822C 4
+. 40 9E 00 10
+==== BB 1921 (0xFE88230) approx BBs exec'd 0 ====
+
+	0xFE88230:  83550034  lwz r26,52(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFE88234:  7C9AC040  cmpl cr1,r26,r24
+	   5: GETL       	R26, t4
+	   6: GETL       	R24, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFE88238:  41850018  bc 12,5,0xFE88250
+	  10: Js05o       	$0xFE88250
+
+
+
+. 1921 FE88230 12
+. 83 55 00 34 7C 9A C0 40 41 85 00 18
+==== BB 1922 (0xFE88250) approx BBs exec'd 0 ====
+
+	0xFE88250:  93B60000  stw r29,0(r22)
+	   0: GETL       	R29, t0
+	   1: GETL       	R22, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE88254:  93160008  stw r24,8(r22)
+	   4: GETL       	R24, t4
+	   5: GETL       	R22, t6
+	   6: ADDL       	$0x8, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE88258:  4BFFFA98  b 0xFE87CF0
+	   9: JMPo       	$0xFE87CF0  ($4)
+
+
+
+. 1922 FE88250 12
+. 93 B6 00 00 93 16 00 08 4B FF FA 98
+==== BB 1923 (0xFE87CF0) approx BBs exec'd 0 ====
+
+	0xFE87CF0:  82B60000  lwz r21,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R21
+	   3: INCEIPL       	$4
+
+	0xFE87CF4:  2C150000  cmpi cr0,r21,0
+	   4: GETL       	R21, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE87CF8:  41820628  bc 12,2,0xFE88320
+	   8: Js02o       	$0xFE88320
+
+
+
+. 1923 FE87CF0 12
+. 82 B6 00 00 2C 15 00 00 41 82 06 28
+==== BB 1924 (0xFE87CFC) approx BBs exec'd 0 ====
+
+	0xFE87CFC:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE87D00:  4804EDD9  bl 0xFED6AD8
+	   3: MOVL       	$0xFE87D04, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 1924 FE87CFC 8
+. 7E E3 BB 78 48 04 ED D9
+==== BB 1925 (0xFE87D04) approx BBs exec'd 0 ====
+
+	0xFE87D04:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFE87D08:  7E0B1840  cmpl cr4,r11,r3
+	   3: GETL       	R11, t2
+	   4: GETL       	R3, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0xFE87D0C:  4090001C  bc 4,16,0xFE87D28
+	   8: Jc16o       	$0xFE87D28
+
+
+
+. 1925 FE87D04 12
+. 39 60 00 00 7E 0B 18 40 40 90 00 1C
+==== BB 1926 (0xFE87D10) approx BBs exec'd 0 ====
+
+	0xFE87D10:  7C6903A6  mtctr r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFE87D14:  7F0BB8AE  lbzx r24,r11,r23
+	   3: GETL       	R23, t2
+	   4: GETL       	R11, t4
+	   5: ADDL       	t4, t2
+	   6: LDB       	(t2), t6
+	   7: PUTL       	t6, R24
+	   8: INCEIPL       	$4
+
+	0xFE87D18:  546C483E  rlwinm r12,r3,9,0,31
+	   9: GETL       	R3, t8
+	  10: ROLL       	$0x9, t8
+	  11: PUTL       	t8, R12
+	  12: INCEIPL       	$4
+
+	0xFE87D1C:  396B0001  addi r11,r11,1
+	  13: GETL       	R11, t10
+	  14: ADDL       	$0x1, t10
+	  15: PUTL       	t10, R11
+	  16: INCEIPL       	$4
+
+	0xFE87D20:  7C6CC214  add r3,r12,r24
+	  17: GETL       	R12, t12
+	  18: GETL       	R24, t14
+	  19: ADDL       	t12, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0xFE87D24:  4200FFF0  bc 16,0,0xFE87D14
+	  22: GETL       	CTR, t16
+	  23: ADDL       	$0xFFFFFFFF, t16
+	  24: PUTL       	t16, CTR
+	  25: JIFZL       	t16, $0xFE87D28
+	  26: JMPo       	$0xFE87D14  ($4)
+
+
+
+. 1926 FE87D10 24
+. 7C 69 03 A6 7F 0B B8 AE 54 6C 48 3E 39 6B 00 01 7C 6C C2 14 42 00 FF F0
+==== BB 1927 (0xFE87D14) approx BBs exec'd 0 ====
+
+	0xFE87D14:  7F0BB8AE  lbzx r24,r11,r23
+	   0: GETL       	R23, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t2, t0
+	   3: LDB       	(t0), t4
+	   4: PUTL       	t4, R24
+	   5: INCEIPL       	$4
+
+	0xFE87D18:  546C483E  rlwinm r12,r3,9,0,31
+	   6: GETL       	R3, t6
+	   7: ROLL       	$0x9, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0xFE87D1C:  396B0001  addi r11,r11,1
+	  10: GETL       	R11, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0xFE87D20:  7C6CC214  add r3,r12,r24
+	  14: GETL       	R12, t10
+	  15: GETL       	R24, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0xFE87D24:  4200FFF0  bc 16,0,0xFE87D14
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0xFE87D28
+	  23: JMPo       	$0xFE87D14  ($4)
+
+
+
+. 1927 FE87D14 20
+. 7F 0B B8 AE 54 6C 48 3E 39 6B 00 01 7C 6C C2 14 42 00 FF F0
+==== BB 1928 (0xFE87D28) approx BBs exec'd 0 ====
+
+	0xFE87D28:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE87D2C:  7C791B78  or r25,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R25
+	   6: INCEIPL       	$4
+
+	0xFE87D30:  409A0008  bc 4,26,0xFE87D38
+	   7: Jc26o       	$0xFE87D38
+
+
+
+. 1928 FE87D28 12
+. 2F 03 00 00 7C 79 1B 78 40 9A 00 08
+==== BB 1929 (0xFE87D38) approx BBs exec'd 0 ====
+
+	0xFE87D38:  80760000  lwz r3,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE87D3C:  83430010  lwz r26,16(r3)
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x10, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R26
+	   8: INCEIPL       	$4
+
+	0xFE87D40:  81030008  lwz r8,8(r3)
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0x8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R8
+	  13: INCEIPL       	$4
+
+	0xFE87D44:  38FAFFFE  addi r7,r26,-2
+	  14: GETL       	R26, t12
+	  15: ADDL       	$0xFFFFFFFE, t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0xFE87D48:  7CD9D396  divwu r6, r25, r26
+	  18: GETL       	R25, t16
+	  19: GETL       	R26, t14
+	  20: UDIVL       	t14, t16
+	  21: PUTL       	t16, R6
+	  22: INCEIPL       	$4
+
+	0xFE87D4C:  7F681A14  add r27,r8,r3
+	  23: GETL       	R8, t18
+	  24: GETL       	R3, t20
+	  25: ADDL       	t18, t20
+	  26: PUTL       	t20, R27
+	  27: INCEIPL       	$4
+
+	0xFE87D50:  7F993B96  divwu r28, r25, r7
+	  28: GETL       	R25, t24
+	  29: GETL       	R7, t22
+	  30: UDIVL       	t22, t24
+	  31: PUTL       	t24, R28
+	  32: INCEIPL       	$4
+
+	0xFE87D54:  7C9C39D6  mullw r4,r28,r7
+	  33: GETL       	R28, t26
+	  34: GETL       	R7, t28
+	  35: MULL       	t26, t28
+	  36: PUTL       	t28, R4
+	  37: INCEIPL       	$4
+
+	0xFE87D58:  7C06D1D6  mullw r0,r6,r26
+	  38: GETL       	R6, t30
+	  39: GETL       	R26, t32
+	  40: MULL       	t30, t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0xFE87D5C:  7D24C850  subf r9,r4,r25
+	  43: GETL       	R4, t34
+	  44: GETL       	R25, t36
+	  45: SUBL       	t34, t36
+	  46: PUTL       	t36, R9
+	  47: INCEIPL       	$4
+
+	0xFE87D60:  3B090001  addi r24,r9,1
+	  48: GETL       	R9, t38
+	  49: ADDL       	$0x1, t38
+	  50: PUTL       	t38, R24
+	  51: INCEIPL       	$4
+
+	0xFE87D64:  7FA0C850  subf r29,r0,r25
+	  52: GETL       	R0, t40
+	  53: GETL       	R25, t42
+	  54: SUBL       	t40, t42
+	  55: PUTL       	t42, R29
+	  56: INCEIPL       	$4
+
+	0xFE87D68:  1F9D000C  mulli r28,r29,12
+	  57: GETL       	R29, t44
+	  58: MULL       	$0xC, t44
+	  59: PUTL       	t44, R28
+	  60: INCEIPL       	$4
+
+	0xFE87D6C:  7EE3BB78  or r3,r23,r23
+	  61: GETL       	R23, t46
+	  62: PUTL       	t46, R3
+	  63: INCEIPL       	$4
+
+	0xFE87D70:  7FBDC214  add r29,r29,r24
+	  64: GETL       	R29, t48
+	  65: GETL       	R24, t50
+	  66: ADDL       	t48, t50
+	  67: PUTL       	t50, R29
+	  68: INCEIPL       	$4
+
+	0xFE87D74:  7D5CDA14  add r10,r28,r27
+	  69: GETL       	R28, t52
+	  70: GETL       	R27, t54
+	  71: ADDL       	t52, t54
+	  72: PUTL       	t54, R10
+	  73: INCEIPL       	$4
+
+	0xFE87D78:  812A0004  lwz r9,4(r10)
+	  74: GETL       	R10, t56
+	  75: ADDL       	$0x4, t56
+	  76: LDL       	(t56), t58
+	  77: PUTL       	t58, R9
+	  78: INCEIPL       	$4
+
+	0xFE87D7C:  2F890000  cmpi cr7,r9,0
+	  79: GETL       	R9, t60
+	  80: CMP0L       	t60, t62  (-rSo)
+	  81: ICRFL       	t62, $0x7, CR
+	  82: INCEIPL       	$4
+
+	0xFE87D80:  419E0038  bc 12,30,0xFE87DB8
+	  83: Js30o       	$0xFE87DB8
+
+
+
+. 1929 FE87D38 76
+. 80 76 00 00 83 43 00 10 81 03 00 08 38 FA FF FE 7C D9 D3 96 7F 68 1A 14 7F 99 3B 96 7C 9C 39 D6 7C 06 D1 D6 7D 24 C8 50 3B 09 00 01 7F A0 C8 50 1F 9D 00 0C 7E E3 BB 78 7F BD C2 14 7D 5C DA 14 81 2A 00 04 2F 89 00 00 41 9E 00 38
+==== BB 1930 (0xFE87D84) approx BBs exec'd 0 ====
+
+	0xFE87D84:  7D7CD82E  lwzx r11,r28,r27
+	   0: GETL       	R27, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R11
+	   5: INCEIPL       	$4
+
+	0xFE87D88:  7C8BC800  cmp cr1,r11,r25
+	   6: GETL       	R11, t6
+	   7: GETL       	R25, t8
+	   8: CMPL       	t6, t8, t10  (-rSo)
+	   9: ICRFL       	t10, $0x1, CR
+	  10: INCEIPL       	$4
+
+	0xFE87D8C:  418600D4  bc 12,6,0xFE87E60
+	  11: Js06o       	$0xFE87E60
+
+
+
+. 1930 FE87D84 12
+. 7D 7C D8 2E 7C 8B C8 00 41 86 00 D4
+==== BB 1931 (0xFE87E60) approx BBs exec'd 0 ====
+
+	0xFE87E60:  82160000  lwz r16,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R16
+	   3: INCEIPL       	$4
+
+	0xFE87E64:  7C904A14  add r4,r16,r9
+	   4: GETL       	R16, t4
+	   5: GETL       	R9, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0xFE87E68:  4804E549  bl 0xFED63B0
+	   9: MOVL       	$0xFE87E6C, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 1931 FE87E60 12
+. 82 16 00 00 7C 90 4A 14 48 04 E5 49
+==== BB 1932 (0xFED63D8) approx BBs exec'd 0 ====
+
+	0xFED63D8:  84A30004  lwzu r5,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R3
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFED63DC:  40860054  bc 4,6,0xFED6430
+	   6: Jc06o       	$0xFED6430
+
+
+
+. 1932 FED63D8 8
+. 84 A3 00 04 40 86 00 54
+==== BB 1933 (0xFED63E0) approx BBs exec'd 0 ====
+
+	0xFED63E0:  84C40004  lwzu r6,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED63E4:  7C072A14  add r0,r7,r5
+	   6: GETL       	R7, t4
+	   7: GETL       	R5, t6
+	   8: ADDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0xFED63E8:  7D0928F8  nor r9,r8,r5
+	  11: GETL       	R8, t8
+	  12: GETL       	R5, t10
+	  13: ORL       	t10, t8
+	  14: NOTL       	t8
+	  15: PUTL       	t8, R9
+	  16: INCEIPL       	$4
+
+	0xFED63EC:  7C004839  and. r0,r0,r9
+	  17: GETL       	R0, t12
+	  18: GETL       	R9, t14
+	  19: ANDL       	t12, t14
+	  20: PUTL       	t14, R0
+	  21: CMP0L       	t14, t16  (-rSo)
+	  22: ICRFL       	t16, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0xFED63F0:  7C853000  cmp cr1,r5,r6
+	  24: GETL       	R5, t18
+	  25: GETL       	R6, t20
+	  26: CMPL       	t18, t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x1, CR
+	  28: INCEIPL       	$4
+
+	0xFED63F4:  4182FFE4  bc 12,2,0xFED63D8
+	  29: Js02o       	$0xFED63D8
+
+
+
+. 1933 FED63E0 24
+. 84 C4 00 04 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+==== BB 1934 (0xFED6428) approx BBs exec'd 0 ====
+
+	0xFED6428:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED642C:  4E800020  blr
+	   3: GETL       	LR, t2
+	   4: JMPo-r       	t2  ($4)
+
+
+
+. 1934 FED6428 8
+. 38 60 00 00 4E 80 00 20
+==== BB 1935 (0xFE87E6C) approx BBs exec'd 0 ====
+
+	0xFE87E6C:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE87E70:  4082FF20  bc 4,2,0xFE87D90
+	   4: Jc02o       	$0xFE87D90
+
+
+
+. 1935 FE87E6C 8
+. 2C 03 00 00 40 82 FF 20
+==== BB 1936 (0xFE87E74) approx BBs exec'd 0 ====
+
+	0xFE87E74:  7EFCDA14  add r23,r28,r27
+	   0: GETL       	R28, t0
+	   1: GETL       	R27, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFE87E78:  80970008  lwz r4,8(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFE87E7C:  2F040000  cmpi cr6,r4,0
+	  10: GETL       	R4, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFE87E80:  419A04A0  bc 12,26,0xFE88320
+	  14: Js26o       	$0xFE88320
+
+
+
+. 1936 FE87E74 16
+. 7E FC DA 14 80 97 00 08 2F 04 00 00 41 9A 04 A0
+==== BB 1937 (0xFE87E84) approx BBs exec'd 0 ====
+
+	0xFE87E84:  83B60000  lwz r29,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0xFE87E88:  3AA00000  li r21,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R21
+	   6: INCEIPL       	$4
+
+	0xFE87E8C:  3AC0000D  li r22,13
+	   7: MOVL       	$0xD, t6
+	   8: PUTL       	t6, R22
+	   9: INCEIPL       	$4
+
+	0xFE87E90:  7C9D2214  add r4,r29,r4
+	  10: GETL       	R29, t8
+	  11: GETL       	R4, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0xFE87E94:  7EC903A6  mtctr r22
+	  15: GETL       	R22, t12
+	  16: PUTL       	t12, CTR
+	  17: INCEIPL       	$4
+
+	0xFE87E98:  39600000  li r11,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0xFE87E9C:  391F0008  addi r8,r31,8
+	  21: GETL       	R31, t16
+	  22: ADDL       	$0x8, t16
+	  23: PUTL       	t16, R8
+	  24: INCEIPL       	$4
+
+	0xFE87EA0:  38C00000  li r6,0
+	  25: MOVL       	$0x0, t18
+	  26: PUTL       	t18, R6
+	  27: INCEIPL       	$4
+
+	0xFE87EA4:  2F8B0006  cmpi cr7,r11,6
+	  28: GETL       	R11, t20
+	  29: MOVL       	$0x6, t24
+	  30: CMPL       	t20, t24, t22  (-rSo)
+	  31: ICRFL       	t22, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0xFE87EA8:  56BB2036  rlwinm r27,r21,4,0,27
+	  33: GETL       	R21, t26
+	  34: SHLL       	$0x4, t26
+	  35: PUTL       	t26, R27
+	  36: INCEIPL       	$4
+
+	0xFE87EAC:  7CFB4214  add r7,r27,r8
+	  37: GETL       	R27, t28
+	  38: GETL       	R8, t30
+	  39: ADDL       	t28, t30
+	  40: PUTL       	t30, R7
+	  41: INCEIPL       	$4
+
+	0xFE87EB0:  39470078  addi r10,r7,120
+	  42: GETL       	R7, t32
+	  43: ADDL       	$0x78, t32
+	  44: PUTL       	t32, R10
+	  45: INCEIPL       	$4
+
+	0xFE87EB4:  419E0020  bc 12,30,0xFE87ED4
+	  46: Js30o       	$0xFE87ED4
+
+
+
+. 1937 FE87E84 52
+. 83 B6 00 00 3A A0 00 00 3A C0 00 0D 7C 9D 22 14 7E C9 03 A6 39 60 00 00 39 1F 00 08 38 C0 00 00 2F 8B 00 06 56 BB 20 36 7C FB 42 14 39 47 00 78 41 9E 00 20
+==== BB 1938 (0xFE87EB8) approx BBs exec'd 0 ====
+
+	0xFE87EB8:  80A40004  lwz r5,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE87EBC:  3AB50001  addi r21,r21,1
+	   5: GETL       	R21, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R21
+	   8: INCEIPL       	$4
+
+	0xFE87EC0:  90CA000C  stw r6,12(r10)
+	   9: GETL       	R6, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0xC, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE87EC4:  90A70078  stw r5,120(r7)
+	  14: GETL       	R5, t10
+	  15: GETL       	R7, t12
+	  16: ADDL       	$0x78, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFE87EC8:  83240008  lwz r25,8(r4)
+	  19: GETL       	R4, t14
+	  20: ADDL       	$0x8, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R25
+	  23: INCEIPL       	$4
+
+	0xFE87ECC:  916A0008  stw r11,8(r10)
+	  24: GETL       	R11, t18
+	  25: GETL       	R10, t20
+	  26: ADDL       	$0x8, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFE87ED0:  932A0004  stw r25,4(r10)
+	  29: GETL       	R25, t22
+	  30: GETL       	R10, t24
+	  31: ADDL       	$0x4, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE87ED4:  396B0001  addi r11,r11,1
+	  34: GETL       	R11, t26
+	  35: ADDL       	$0x1, t26
+	  36: PUTL       	t26, R11
+	  37: INCEIPL       	$4
+
+	0xFE87ED8:  38840008  addi r4,r4,8
+	  38: GETL       	R4, t28
+	  39: ADDL       	$0x8, t28
+	  40: PUTL       	t28, R4
+	  41: INCEIPL       	$4
+
+	0xFE87EDC:  4200FFC8  bc 16,0,0xFE87EA4
+	  42: GETL       	CTR, t30
+	  43: ADDL       	$0xFFFFFFFF, t30
+	  44: PUTL       	t30, CTR
+	  45: JIFZL       	t30, $0xFE87EE0
+	  46: JMPo       	$0xFE87EA4  ($4)
+
+
+
+. 1938 FE87EB8 40
+. 80 A4 00 04 3A B5 00 01 90 CA 00 0C 90 A7 00 78 83 24 00 08 91 6A 00 08 93 2A 00 04 39 6B 00 01 38 84 00 08 42 00 FF C8
+==== BB 1939 (0xFE87EA4) approx BBs exec'd 0 ====
+
+	0xFE87EA4:  2F8B0006  cmpi cr7,r11,6
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE87EA8:  56BB2036  rlwinm r27,r21,4,0,27
+	   5: GETL       	R21, t6
+	   6: SHLL       	$0x4, t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFE87EAC:  7CFB4214  add r7,r27,r8
+	   9: GETL       	R27, t8
+	  10: GETL       	R8, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R7
+	  13: INCEIPL       	$4
+
+	0xFE87EB0:  39470078  addi r10,r7,120
+	  14: GETL       	R7, t12
+	  15: ADDL       	$0x78, t12
+	  16: PUTL       	t12, R10
+	  17: INCEIPL       	$4
+
+	0xFE87EB4:  419E0020  bc 12,30,0xFE87ED4
+	  18: Js30o       	$0xFE87ED4
+
+
+
+. 1939 FE87EA4 20
+. 2F 8B 00 06 56 BB 20 36 7C FB 42 14 39 47 00 78 41 9E 00 20
+==== BB 1940 (0xFE87ED4) approx BBs exec'd 0 ====
+
+	0xFE87ED4:  396B0001  addi r11,r11,1
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFE87ED8:  38840008  addi r4,r4,8
+	   4: GETL       	R4, t2
+	   5: ADDL       	$0x8, t2
+	   6: PUTL       	t2, R4
+	   7: INCEIPL       	$4
+
+	0xFE87EDC:  4200FFC8  bc 16,0,0xFE87EA4
+	   8: GETL       	CTR, t4
+	   9: ADDL       	$0xFFFFFFFF, t4
+	  10: PUTL       	t4, CTR
+	  11: JIFZL       	t4, $0xFE87EE0
+	  12: JMPo       	$0xFE87EA4  ($4)
+
+
+
+. 1940 FE87ED4 12
+. 39 6B 00 01 38 84 00 08 42 00 FF C8
+==== BB 1941 (0xFE87EE0) approx BBs exec'd 0 ====
+
+	0xFE87EE0:  80DE0124  lwz r6,292(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x124, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFE87EE4:  387F0080  addi r3,r31,128
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x80, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFE87EE8:  7EA4AB78  or r4,r21,r21
+	   9: GETL       	R21, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0xFE87EEC:  38A00010  li r5,16
+	  12: MOVL       	$0x10, t8
+	  13: PUTL       	t8, R5
+	  14: INCEIPL       	$4
+
+	0xFE87EF0:  3B400000  li r26,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R26
+	  17: INCEIPL       	$4
+
+	0xFE87EF4:  4800B09D  bl 0xFE92F90
+	  18: MOVL       	$0xFE87EF8, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0xFE92F90  ($4)
+
+
+
+. 1941 FE87EE0 24
+. 80 DE 01 24 38 7F 00 80 7E A4 AB 78 38 A0 00 10 3B 40 00 00 48 00 B0 9D
+==== BB 1942 qsort(0xFE92F90) approx BBs exec'd 0 ====
+
+	0xFE92F90:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE92F94:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE92F98:  93810020  stw r28,32(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x20, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE92F9C:  7F8429D6  mullw r28,r4,r5
+	  14: GETL       	R4, t10
+	  15: GETL       	R5, t12
+	  16: MULL       	t10, t12
+	  17: PUTL       	t12, R28
+	  18: INCEIPL       	$4
+
+	0xFE92FA0:  48114EB1  bl 0xFFA7E50
+	  19: MOVL       	$0xFE92FA4, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1942 FE92F90 20
+. 94 21 FF D0 7C 08 02 A6 93 81 00 20 7F 84 29 D6 48 11 4E B1
+==== BB 1943 (0xFE92FA4) approx BBs exec'd 0 ====
+
+	0xFE92FA4:  92E1000C  stw r23,12(r1)
+	   0: GETL       	R23, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE92FA8:  93010010  stw r24,16(r1)
+	   5: GETL       	R24, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x10, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE92FAC:  7CD73378  or r23,r6,r6
+	  10: GETL       	R6, t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFE92FB0:  93210014  stw r25,20(r1)
+	  13: GETL       	R25, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE92FB4:  7C781B78  or r24,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R24
+	  20: INCEIPL       	$4
+
+	0xFE92FB8:  2B9C03FF  cmpli cr7,r28,1023
+	  21: GETL       	R28, t16
+	  22: MOVL       	$0x3FF, t20
+	  23: CMPUL       	t16, t20, t18  (-rSo)
+	  24: ICRFL       	t18, $0x7, CR
+	  25: INCEIPL       	$4
+
+	0xFE92FBC:  93410018  stw r26,24(r1)
+	  26: GETL       	R26, t22
+	  27: GETL       	R1, t24
+	  28: ADDL       	$0x18, t24
+	  29: STL       	t22, (t24)
+	  30: INCEIPL       	$4
+
+	0xFE92FC0:  93C10028  stw r30,40(r1)
+	  31: GETL       	R30, t26
+	  32: GETL       	R1, t28
+	  33: ADDL       	$0x28, t28
+	  34: STL       	t26, (t28)
+	  35: INCEIPL       	$4
+
+	0xFE92FC4:  7C9A2378  or r26,r4,r4
+	  36: GETL       	R4, t30
+	  37: PUTL       	t30, R26
+	  38: INCEIPL       	$4
+
+	0xFE92FC8:  93E1002C  stw r31,44(r1)
+	  39: GETL       	R31, t32
+	  40: GETL       	R1, t34
+	  41: ADDL       	$0x2C, t34
+	  42: STL       	t32, (t34)
+	  43: INCEIPL       	$4
+
+	0xFE92FCC:  7FC802A6  mflr r30
+	  44: GETL       	LR, t36
+	  45: PUTL       	t36, R30
+	  46: INCEIPL       	$4
+
+	0xFE92FD0:  92C10008  stw r22,8(r1)
+	  47: GETL       	R22, t38
+	  48: GETL       	R1, t40
+	  49: ADDL       	$0x8, t40
+	  50: STL       	t38, (t40)
+	  51: INCEIPL       	$4
+
+	0xFE92FD4:  7C3F0B78  or r31,r1,r1
+	  52: GETL       	R1, t42
+	  53: PUTL       	t42, R31
+	  54: INCEIPL       	$4
+
+	0xFE92FD8:  9361001C  stw r27,28(r1)
+	  55: GETL       	R27, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x1C, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0xFE92FDC:  7CB92B78  or r25,r5,r5
+	  60: GETL       	R5, t48
+	  61: PUTL       	t48, R25
+	  62: INCEIPL       	$4
+
+	0xFE92FE0:  93A10024  stw r29,36(r1)
+	  63: GETL       	R29, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x24, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0xFE92FE4:  90010034  stw r0,52(r1)
+	  68: GETL       	R0, t54
+	  69: GETL       	R1, t56
+	  70: ADDL       	$0x34, t56
+	  71: STL       	t54, (t56)
+	  72: INCEIPL       	$4
+
+	0xFE92FE8:  409D0140  bc 4,29,0xFE93128
+	  73: Jc29o       	$0xFE93128
+
+
+
+. 1943 FE92FA4 72
+. 92 E1 00 0C 93 01 00 10 7C D7 33 78 93 21 00 14 7C 78 1B 78 2B 9C 03 FF 93 41 00 18 93 C1 00 28 7C 9A 23 78 93 E1 00 2C 7F C8 02 A6 92 C1 00 08 7C 3F 0B 78 93 61 00 1C 7C B9 2B 78 93 A1 00 24 90 01 00 34 40 9D 01 40
+==== BB 1944 (0xFE93128) approx BBs exec'd 0 ====
+
+	0xFE93128:  397C001E  addi r11,r28,30
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x1E, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFE9312C:  81210000  lwz r9,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFE93130:  556A0036  rlwinm r10,r11,0,0,27
+	   8: GETL       	R11, t6
+	   9: ANDL       	$0xFFFFFFF0, t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0xFE93134:  7D0A00D0  neg r8,r10
+	  12: GETL       	R10, t8
+	  13: NEGL       	t8
+	  14: PUTL       	t8, R8
+	  15: INCEIPL       	$4
+
+	0xFE93138:  7D21416E  stwux r9,r1,r8
+	  16: GETL       	R8, t10
+	  17: GETL       	R1, t12
+	  18: ADDL       	t12, t10
+	  19: PUTL       	t10, R1
+	  20: GETL       	R9, t14
+	  21: STL       	t14, (t10)
+	  22: INCEIPL       	$4
+
+	0xFE9313C:  38E10017  addi r7,r1,23
+	  23: GETL       	R1, t16
+	  24: ADDL       	$0x17, t16
+	  25: PUTL       	t16, R7
+	  26: INCEIPL       	$4
+
+	0xFE93140:  54E70036  rlwinm r7,r7,0,0,27
+	  27: GETL       	R7, t18
+	  28: ANDL       	$0xFFFFFFF0, t18
+	  29: PUTL       	t18, R7
+	  30: INCEIPL       	$4
+
+	0xFE93144:  4BFFFBE5  bl 0xFE92D28
+	  31: MOVL       	$0xFE93148, t20
+	  32: PUTL       	t20, LR
+	  33: JMPo-c       	$0xFE92D28  ($4)
+
+
+
+. 1944 FE93128 32
+. 39 7C 00 1E 81 21 00 00 55 6A 00 36 7D 0A 00 D0 7D 21 41 6E 38 E1 00 17 54 E7 00 36 4B FF FB E5
+==== BB 1945 msort_with_tmp(0xFE92D28) approx BBs exec'd 0 ====
+
+	0xFE92D28:  2B840001  cmpli cr7,r4,1
+	   0: GETL       	R4, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE92D2C:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFE92D30:  9421FFC0  stwu r1,-64(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFC0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFE92D34:  9261000C  stw r19,12(r1)
+	  14: GETL       	R19, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0xC, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFE92D38:  7CF33B78  or r19,r7,r7
+	  19: GETL       	R7, t16
+	  20: PUTL       	t16, R19
+	  21: INCEIPL       	$4
+
+	0xFE92D3C:  92810010  stw r20,16(r1)
+	  22: GETL       	R20, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x10, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0xFE92D40:  7C741B78  or r20,r3,r3
+	  27: GETL       	R3, t22
+	  28: PUTL       	t22, R20
+	  29: INCEIPL       	$4
+
+	0xFE92D44:  92C10018  stw r22,24(r1)
+	  30: GETL       	R22, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x18, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0xFE92D48:  7CD63378  or r22,r6,r6
+	  35: GETL       	R6, t28
+	  36: PUTL       	t28, R22
+	  37: INCEIPL       	$4
+
+	0xFE92D4C:  92E1001C  stw r23,28(r1)
+	  38: GETL       	R23, t30
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x1C, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0xFE92D50:  7C972378  or r23,r4,r4
+	  43: GETL       	R4, t34
+	  44: PUTL       	t34, R23
+	  45: INCEIPL       	$4
+
+	0xFE92D54:  93A10034  stw r29,52(r1)
+	  46: GETL       	R29, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x34, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0xFE92D58:  7CBD2B78  or r29,r5,r5
+	  51: GETL       	R5, t40
+	  52: PUTL       	t40, R29
+	  53: INCEIPL       	$4
+
+	0xFE92D5C:  92A10014  stw r21,20(r1)
+	  54: GETL       	R21, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x14, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFE92D60:  93010020  stw r24,32(r1)
+	  59: GETL       	R24, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x20, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFE92D64:  93210024  stw r25,36(r1)
+	  64: GETL       	R25, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x24, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFE92D68:  93410028  stw r26,40(r1)
+	  69: GETL       	R26, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0x28, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0xFE92D6C:  9361002C  stw r27,44(r1)
+	  74: GETL       	R27, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x2C, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFE92D70:  93810030  stw r28,48(r1)
+	  79: GETL       	R28, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x30, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0xFE92D74:  93C10038  stw r30,56(r1)
+	  84: GETL       	R30, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x38, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0xFE92D78:  93E1003C  stw r31,60(r1)
+	  89: GETL       	R31, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x3C, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0xFE92D7C:  90010044  stw r0,68(r1)
+	  94: GETL       	R0, t74
+	  95: GETL       	R1, t76
+	  96: ADDL       	$0x44, t76
+	  97: STL       	t74, (t76)
+	  98: INCEIPL       	$4
+
+	0xFE92D80:  419D0048  bc 12,29,0xFE92DC8
+	  99: Js29o       	$0xFE92DC8
+
+
+
+. 1945 FE92D28 92
+. 2B 84 00 01 7C 08 02 A6 94 21 FF C0 92 61 00 0C 7C F3 3B 78 92 81 00 10 7C 74 1B 78 92 C1 00 18 7C D6 33 78 92 E1 00 1C 7C 97 23 78 93 A1 00 34 7C BD 2B 78 92 A1 00 14 93 01 00 20 93 21 00 24 93 41 00 28 93 61 00 2C 93 81 00 30 93 C1 00 38 93 E1 00 3C 90 01 00 44 41 9D 00 48
+==== BB 1946 (0xFE92DC8) approx BBs exec'd 0 ====
+
+	0xFE92DC8:  5499F87E  rlwinm r25,r4,31,1,31
+	   0: GETL       	R4, t0
+	   1: SHRL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0xFE92DCC:  7C7A1B78  or r26,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R26
+	   6: INCEIPL       	$4
+
+	0xFE92DD0:  7D1929D6  mullw r8,r25,r5
+	   7: GETL       	R25, t4
+	   8: GETL       	R5, t6
+	   9: MULL       	t4, t6
+	  10: PUTL       	t6, R8
+	  11: INCEIPL       	$4
+
+	0xFE92DD4:  7F24CB78  or r4,r25,r25
+	  12: GETL       	R25, t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0xFE92DD8:  7F79B850  subf r27,r25,r23
+	  15: GETL       	R25, t10
+	  16: GETL       	R23, t12
+	  17: SUBL       	t10, t12
+	  18: PUTL       	t12, R27
+	  19: INCEIPL       	$4
+
+	0xFE92DDC:  7E7C9B78  or r28,r19,r19
+	  20: GETL       	R19, t14
+	  21: PUTL       	t14, R28
+	  22: INCEIPL       	$4
+
+	0xFE92DE0:  7FE81A14  add r31,r8,r3
+	  23: GETL       	R8, t16
+	  24: GETL       	R3, t18
+	  25: ADDL       	t16, t18
+	  26: PUTL       	t18, R31
+	  27: INCEIPL       	$4
+
+	0xFE92DE4:  4BFFFF45  bl 0xFE92D28
+	  28: MOVL       	$0xFE92DE8, t20
+	  29: PUTL       	t20, LR
+	  30: JMPo-c       	$0xFE92D28  ($4)
+
+
+
+. 1946 FE92DC8 32
+. 54 99 F8 7E 7C 7A 1B 78 7D 19 29 D6 7F 24 CB 78 7F 79 B8 50 7E 7C 9B 78 7F E8 1A 14 4B FF FF 45
+==== BB 1947 (0xFE92D84) approx BBs exec'd 0 ====
+
+	0xFE92D84:  82610044  lwz r19,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0xFE92D88:  82810010  lwz r20,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R20
+	   9: INCEIPL       	$4
+
+	0xFE92D8C:  7E6803A6  mtlr r19
+	  10: GETL       	R19, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFE92D90:  82A10014  lwz r21,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R21
+	  17: INCEIPL       	$4
+
+	0xFE92D94:  8261000C  lwz r19,12(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R19
+	  22: INCEIPL       	$4
+
+	0xFE92D98:  82C10018  lwz r22,24(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R22
+	  27: INCEIPL       	$4
+
+	0xFE92D9C:  82E1001C  lwz r23,28(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R23
+	  32: INCEIPL       	$4
+
+	0xFE92DA0:  83010020  lwz r24,32(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R24
+	  37: INCEIPL       	$4
+
+	0xFE92DA4:  83210024  lwz r25,36(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x24, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R25
+	  42: INCEIPL       	$4
+
+	0xFE92DA8:  83410028  lwz r26,40(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x28, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R26
+	  47: INCEIPL       	$4
+
+	0xFE92DAC:  8361002C  lwz r27,44(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x2C, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R27
+	  52: INCEIPL       	$4
+
+	0xFE92DB0:  83810030  lwz r28,48(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x30, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R28
+	  57: INCEIPL       	$4
+
+	0xFE92DB4:  83A10034  lwz r29,52(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x34, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R29
+	  62: INCEIPL       	$4
+
+	0xFE92DB8:  83C10038  lwz r30,56(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x38, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R30
+	  67: INCEIPL       	$4
+
+	0xFE92DBC:  83E1003C  lwz r31,60(r1)
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x3C, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R31
+	  72: INCEIPL       	$4
+
+	0xFE92DC0:  38210040  addi r1,r1,64
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x40, t58
+	  75: PUTL       	t58, R1
+	  76: INCEIPL       	$4
+
+	0xFE92DC4:  4E800020  blr
+	  77: GETL       	LR, t60
+	  78: JMPo-r       	t60  ($4)
+
+
+
+. 1947 FE92D84 68
+. 82 61 00 44 82 81 00 10 7E 68 03 A6 82 A1 00 14 82 61 00 0C 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 1948 (0xFE92DE8) approx BBs exec'd 0 ====
+
+	0xFE92DE8:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE92DEC:  7F64DB78  or r4,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE92DF0:  7FA5EB78  or r5,r29,r29
+	   6: GETL       	R29, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE92DF4:  7EC6B378  or r6,r22,r22
+	   9: GETL       	R22, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0xFE92DF8:  7E679B78  or r7,r19,r19
+	  12: GETL       	R19, t8
+	  13: PUTL       	t8, R7
+	  14: INCEIPL       	$4
+
+	0xFE92DFC:  4BFFFF2D  bl 0xFE92D28
+	  15: MOVL       	$0xFE92E00, t10
+	  16: PUTL       	t10, LR
+	  17: JMPo-c       	$0xFE92D28  ($4)
+
+
+
+. 1948 FE92DE8 24
+. 7F E3 FB 78 7F 64 DB 78 7F A5 EB 78 7E C6 B3 78 7E 67 9B 78 4B FF FF 2D
+==== BB 1949 (0xFE92E00) approx BBs exec'd 0 ====
+
+	0xFE92E00:  2C1D0004  cmpi cr0,r29,4
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x4, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE92E04:  418200E4  bc 12,2,0xFE92EE8
+	   5: Js02o       	$0xFE92EE8
+
+
+
+. 1949 FE92E00 8
+. 2C 1D 00 04 41 82 00 E4
+==== BB 1950 (0xFE92E08) approx BBs exec'd 0 ====
+
+	0xFE92E08:  3139FFFF  addic r9,r25,-1
+	   0: GETL       	R25, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFE92E0C:  7CE9C910  subfe r7,r9,r25
+	   4: GETL       	R9, t2
+	   5: GETL       	R25, t4
+	   6: SBBL       	t2, t4  (-rCa-wCa)
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0xFE92E10:  317BFFFF  addic r11,r27,-1
+	   9: GETL       	R27, t6
+	  10: ADCL       	$0xFFFFFFFF, t6  (-wCa)
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0xFE92E14:  7CCBD910  subfe r6,r11,r27
+	  13: GETL       	R11, t8
+	  14: GETL       	R27, t10
+	  15: SBBL       	t8, t10  (-rCa-wCa)
+	  16: PUTL       	t10, R6
+	  17: INCEIPL       	$4
+
+	0xFE92E18:  7CEB3039  and. r11,r7,r6
+	  18: GETL       	R7, t12
+	  19: GETL       	R6, t14
+	  20: ANDL       	t12, t14
+	  21: PUTL       	t14, R11
+	  22: CMP0L       	t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0xFE92E1C:  7F38FE70  srawi r24,r25,31
+	  25: GETL       	R25, t18
+	  26: SARL       	$0x1F, t18  (-wCa)
+	  27: PUTL       	t18, R24
+	  28: INCEIPL       	$4
+
+	0xFE92E20:  7F65FE70  srawi r5,r27,31
+	  29: GETL       	R27, t20
+	  30: SARL       	$0x1F, t20  (-wCa)
+	  31: PUTL       	t20, R5
+	  32: INCEIPL       	$4
+
+	0xFE92E24:  7F15CA78  xor r21,r24,r25
+	  33: GETL       	R24, t22
+	  34: GETL       	R25, t24
+	  35: XORL       	t22, t24
+	  36: PUTL       	t24, R21
+	  37: INCEIPL       	$4
+
+	0xFE92E28:  7CA4DA78  xor r4,r5,r27
+	  38: GETL       	R5, t26
+	  39: GETL       	R27, t28
+	  40: XORL       	t26, t28
+	  41: PUTL       	t28, R4
+	  42: INCEIPL       	$4
+
+	0xFE92E2C:  7EB8A850  subf r21,r24,r21
+	  43: GETL       	R24, t30
+	  44: GETL       	R21, t32
+	  45: SUBL       	t30, t32
+	  46: PUTL       	t32, R21
+	  47: INCEIPL       	$4
+
+	0xFE92E30:  7F052050  subf r24,r5,r4
+	  48: GETL       	R5, t34
+	  49: GETL       	R4, t36
+	  50: SUBL       	t34, t36
+	  51: PUTL       	t36, R24
+	  52: INCEIPL       	$4
+
+	0xFE92E34:  4082005C  bc 4,2,0xFE92E90
+	  53: Jc02o       	$0xFE92E90
+
+
+
+. 1950 FE92E08 48
+. 31 39 FF FF 7C E9 C9 10 31 7B FF FF 7C CB D9 10 7C EB 30 39 7F 38 FE 70 7F 65 FE 70 7F 15 CA 78 7C A4 DA 78 7E B8 A8 50 7F 05 20 50 40 82 00 5C
+==== BB 1951 (0xFE92E90) approx BBs exec'd 0 ====
+
+	0xFE92E90:  7FE4FB78  or r4,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE92E94:  7F43D378  or r3,r26,r26
+	   3: GETL       	R26, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFE92E98:  7EC903A6  mtctr r22
+	   6: GETL       	R22, t4
+	   7: PUTL       	t4, CTR
+	   8: INCEIPL       	$4
+
+	0xFE92E9C:  4E800421  bctrl
+	   9: MOVL       	$0xFE92EA0, t6
+	  10: PUTL       	t6, LR
+	  11: GETL       	CTR, t8
+	  12: JMPo-c       	t8  ($4)
+
+
+
+. 1951 FE92E90 16
+. 7F E4 FB 78 7F 43 D3 78 7E C9 03 A6 4E 80 04 21
+==== BB 1952 rangecmp(0xFE87BD8) approx BBs exec'd 0 ====
+
+	0xFE87BD8:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE87BDC:  80030000  lwz r0,0(r3)
+	   6: GETL       	R3, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFE87BE0:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFE87BE4:  80640000  lwz r3,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0xFE87BE8:  7C630050  subf r3,r3,r0
+	  18: GETL       	R3, t14
+	  19: GETL       	R0, t16
+	  20: SUBL       	t14, t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0xFE87BEC:  4E800020  blr
+	  23: GETL       	LR, t18
+	  24: JMPo-r       	t18  ($4)
+
+
+
+. 1952 FE87BD8 24
+. 94 21 FF F0 80 03 00 00 38 21 00 10 80 64 00 00 7C 63 00 50 4E 80 00 20
+==== BB 1953 (0xFE92EA0) approx BBs exec'd 0 ====
+
+	0xFE92EA0:  7F44D378  or r4,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE92EA4:  2F030000  cmpi cr6,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0xFE92EA8:  7FA5EB78  or r5,r29,r29
+	   7: GETL       	R29, t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFE92EAC:  7F83E378  or r3,r28,r28
+	  10: GETL       	R28, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFE92EB0:  4199FFA8  bc 12,25,0xFE92E58
+	  13: Js25o       	$0xFE92E58
+
+
+
+. 1953 FE92EA0 20
+. 7F 44 D3 78 2F 03 00 00 7F A5 EB 78 7F 83 E3 78 41 99 FF A8
+==== BB 1954 (0xFE92EB4) approx BBs exec'd 0 ====
+
+	0xFE92EB4:  3B39FFFF  addi r25,r25,-1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0xFE92EB8:  48045149  bl 0xFED8000
+	   4: MOVL       	$0xFE92EBC, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0xFED8000  ($4)
+
+
+
+. 1954 FE92EB4 8
+. 3B 39 FF FF 48 04 51 49
+==== BB 1955 (0xFED802C) approx BBs exec'd 0 ====
+
+	0xFED802C:  7C8300D0  neg r4,r3
+	   0: GETL       	R3, t0
+	   1: NEGL       	t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFED8030:  548307BE  rlwinm r3,r4,0,30,31
+	   4: GETL       	R4, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0xFED8034:  7C691B79  or. r9,r3,r3
+	   8: GETL       	R3, t4
+	   9: PUTL       	t4, R9
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFED8038:  7F832850  subf r28,r3,r5
+	  13: GETL       	R3, t8
+	  14: GETL       	R5, t10
+	  15: SUBL       	t8, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0xFED803C:  4182001C  bc 12,2,0xFED8058
+	  18: Js02o       	$0xFED8058
+
+
+
+. 1955 FED802C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+==== BB 1956 (0xFED8058) approx BBs exec'd 0 ====
+
+	0xFED8058:  73E00003  andi. r0,r31,0x3
+	   0: GETL       	R31, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED805C:  40820068  bc 4,2,0xFED80C4
+	   6: Jc02o       	$0xFED80C4
+
+
+
+. 1956 FED8058 8
+. 73 E0 00 03 40 82 00 68
+==== BB 1957 (0xFED8060) approx BBs exec'd 0 ====
+
+	0xFED8060:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED8064:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFED8068:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0xFED806C:  48000751  bl 0xFED87BC
+	  10: MOVL       	$0xFED8070, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFED87BC  ($4)
+
+
+
+. 1957 FED8060 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 07 51
+==== BB 1958 _wordcopy_fwd_aligned(0xFED87BC) approx BBs exec'd 0 ====
+
+	0xFED87BC:  54A0077E  rlwinm r0,r5,0,29,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x7, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFED87C0:  7D8802A6  mflr r12
+	   4: GETL       	LR, t2
+	   5: PUTL       	t2, R12
+	   6: INCEIPL       	$4
+
+	0xFED87C4:  2B800007  cmpli cr7,r0,7
+	   7: GETL       	R0, t4
+	   8: MOVL       	$0x7, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFED87C8:  480CF689  bl 0xFFA7E50
+	  12: MOVL       	$0xFED87CC, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 1958 FED87BC 16
+. 54 A0 07 7E 7D 88 02 A6 2B 80 00 07 48 0C F6 89
+==== BB 1959 (0xFED87CC) approx BBs exec'd 0 ====
+
+	0xFED87CC:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED87D0:  93C10008  stw r30,8(r1)
+	   6: GETL       	R30, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFED87D4:  7FC802A6  mflr r30
+	  11: GETL       	LR, t8
+	  12: PUTL       	t8, R30
+	  13: INCEIPL       	$4
+
+	0xFED87D8:  7D8803A6  mtlr r12
+	  14: GETL       	R12, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFED87DC:  419D0028  bc 12,29,0xFED8804
+	  17: Js29o       	$0xFED8804
+
+
+
+. 1959 FED87CC 20
+. 94 21 FF F0 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 41 9D 00 28
+==== BB 1960 (0xFED87E0) approx BBs exec'd 0 ====
+
+	0xFED87E0:  817E07C8  lwz r11,1992(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x7C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFED87E4:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFED87E8:  7CCB002E  lwzx r6,r11,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0xFED87EC:  7D265A14  add r9,r6,r11
+	  15: GETL       	R6, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0xFED87F0:  7D2903A6  mtctr r9
+	  20: GETL       	R9, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0xFED87F4:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 1960 FED87E0 24
+. 81 7E 07 C8 54 00 10 3A 7C CB 00 2E 7D 26 5A 14 7D 29 03 A6 4E 80 04 20
+==== BB 1961 (0xFED88A0) approx BBs exec'd 0 ====
+
+	0xFED88A0:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFED88A4:  3884FFF0  addi r4,r4,-16
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFED88A8:  81240014  lwz r9,20(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0xFED88AC:  3863FFEC  addi r3,r3,-20
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFEC, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFED88B0:  38A50004  addi r5,r5,4
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x4, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0xFED88B4:  90030014  stw r0,20(r3)
+	  21: GETL       	R0, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x14, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0xFED88B8:  4BFFFF7C  b 0xFED8834
+	  26: JMPo       	$0xFED8834  ($4)
+
+
+
+. 1961 FED88A0 28
+. 80 04 00 00 38 84 FF F0 81 24 00 14 38 63 FF EC 38 A5 00 04 90 03 00 14 4B FF FF 7C
+==== BB 1962 (0xFED8834) approx BBs exec'd 0 ====
+
+	0xFED8834:  80040018  lwz r0,24(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED8838:  91230018  stw r9,24(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x18, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFED883C:  34A5FFF8  addic. r5,r5,-8
+	  10: GETL       	R5, t8
+	  11: ADCL       	$0xFFFFFFF8, t8  (-wCa)
+	  12: PUTL       	t8, R5
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFED8840:  8124001C  lwz r9,28(r4)
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0x1C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0xFED8844:  9003001C  stw r0,28(r3)
+	  21: GETL       	R0, t16
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFED8848:  38840020  addi r4,r4,32
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R4
+	  29: INCEIPL       	$4
+
+	0xFED884C:  38630020  addi r3,r3,32
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x20, t22
+	  32: PUTL       	t22, R3
+	  33: INCEIPL       	$4
+
+	0xFED8850:  40A2FFB4  bc 5,2,0xFED8804
+	  34: Jc02o       	$0xFED8804
+
+
+
+. 1962 FED8834 32
+. 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 1963 (0xFED8854) approx BBs exec'd 0 ====
+
+	0xFED8854:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0xFED8858:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFED885C:  91230000  stw r9,0(r3)
+	   9: GETL       	R9, t6
+	  10: GETL       	R3, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFED8860:  4E800020  blr
+	  13: GETL       	LR, t10
+	  14: JMPo-r       	t10  ($4)
+
+
+
+. 1963 FED8854 16
+. 83 C1 00 08 38 21 00 10 91 23 00 00 4E 80 00 20
+==== BB 1964 (0xFED8070) approx BBs exec'd 0 ====
+
+	0xFED8070:  5786003A  rlwinm r6,r28,0,0,29
+	   0: GETL       	R28, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0xFED8074:  578507BE  rlwinm r5,r28,0,30,31
+	   4: GETL       	R28, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0xFED8078:  7FBD3214  add r29,r29,r6
+	   8: GETL       	R29, t4
+	   9: GETL       	R6, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R29
+	  12: INCEIPL       	$4
+
+	0xFED807C:  7FFF3214  add r31,r31,r6
+	  13: GETL       	R31, t8
+	  14: GETL       	R6, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R31
+	  17: INCEIPL       	$4
+
+	0xFED8080:  2C850000  cmpi cr1,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0xFED8084:  4186001C  bc 12,6,0xFED80A0
+	  22: Js06o       	$0xFED80A0
+
+
+
+. 1964 FED8070 24
+. 57 86 00 3A 57 85 07 BE 7F BD 32 14 7F FF 32 14 2C 85 00 00 41 86 00 1C
+==== BB 1965 (0xFE92EBC) approx BBs exec'd 0 ====
+
+	0xFE92EBC:  7F2BFE70  srawi r11,r25,31
+	   0: GETL       	R25, t0
+	   1: SARL       	$0x1F, t0  (-wCa)
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFE92EC0:  7C7C1B78  or r28,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R28
+	   6: INCEIPL       	$4
+
+	0xFE92EC4:  7D6ACA78  xor r10,r11,r25
+	   7: GETL       	R11, t4
+	   8: GETL       	R25, t6
+	   9: XORL       	t4, t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0xFE92EC8:  7F5AEA14  add r26,r26,r29
+	  12: GETL       	R26, t8
+	  13: GETL       	R29, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R26
+	  16: INCEIPL       	$4
+
+	0xFE92ECC:  7EAB5050  subf r21,r11,r10
+	  17: GETL       	R11, t12
+	  18: GETL       	R10, t14
+	  19: SUBL       	t12, t14
+	  20: PUTL       	t14, R21
+	  21: INCEIPL       	$4
+
+	0xFE92ED0:  4BFFFFB0  b 0xFE92E80
+	  22: JMPo       	$0xFE92E80  ($4)
+
+
+
+. 1965 FE92EBC 24
+. 7F 2B FE 70 7C 7C 1B 78 7D 6A CA 78 7F 5A EA 14 7E AB 50 50 4B FF FF B0
+==== BB 1966 (0xFE92E80) approx BBs exec'd 0 ====
+
+	0xFE92E80:  7D1500D0  neg r8,r21
+	   0: GETL       	R21, t0
+	   1: NEGL       	t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0xFE92E84:  7C7800D0  neg r3,r24
+	   4: GETL       	R24, t2
+	   5: NEGL       	t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0xFE92E88:  7C6B4039  and. r11,r3,r8
+	   8: GETL       	R3, t4
+	   9: GETL       	R8, t6
+	  10: ANDL       	t4, t6
+	  11: PUTL       	t6, R11
+	  12: CMP0L       	t6, t8  (-rSo)
+	  13: ICRFL       	t8, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFE92E8C:  40A0FFAC  bc 5,0,0xFE92E38
+	  15: Jc00o       	$0xFE92E38
+
+
+
+. 1966 FE92E80 16
+. 7D 15 00 D0 7C 78 00 D0 7C 6B 40 39 40 A0 FF AC
+==== BB 1967 (0xFE92E38) approx BBs exec'd 0 ====
+
+	0xFE92E38:  2F990000  cmpi cr7,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE92E3C:  409E0098  bc 4,30,0xFE92ED4
+	   4: Jc30o       	$0xFE92ED4
+
+
+
+. 1967 FE92E38 8
+. 2F 99 00 00 40 9E 00 98
+==== BB 1968 (0xFE92E40) approx BBs exec'd 0 ====
+
+	0xFE92E40:  7EDBB850  subf r22,r27,r23
+	   0: GETL       	R27, t0
+	   1: GETL       	R23, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R22
+	   4: INCEIPL       	$4
+
+	0xFE92E44:  7E83A378  or r3,r20,r20
+	   5: GETL       	R20, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE92E48:  7CB6E9D6  mullw r5,r22,r29
+	   8: GETL       	R22, t6
+	   9: GETL       	R29, t8
+	  10: MULL       	t6, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0xFE92E4C:  7E649B78  or r4,r19,r19
+	  13: GETL       	R19, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFE92E50:  48045889  bl 0xFED86D8
+	  16: MOVL       	$0xFE92E54, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 1968 FE92E40 20
+. 7E DB B8 50 7E 83 A3 78 7C B6 E9 D6 7E 64 9B 78 48 04 58 89
+==== BB 1969 (0xFED870C) approx BBs exec'd 0 ====
+
+	0xFED870C:  7C8300D0  neg r4,r3
+	   0: GETL       	R3, t0
+	   1: NEGL       	t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFED8710:  548307BE  rlwinm r3,r4,0,30,31
+	   4: GETL       	R4, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0xFED8714:  7C691B79  or. r9,r3,r3
+	   8: GETL       	R3, t4
+	   9: PUTL       	t4, R9
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFED8718:  7F832850  subf r28,r3,r5
+	  13: GETL       	R3, t8
+	  14: GETL       	R5, t10
+	  15: SUBL       	t8, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0xFED871C:  4182001C  bc 12,2,0xFED8738
+	  18: Js02o       	$0xFED8738
+
+
+
+. 1969 FED870C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+==== BB 1970 (0xFED8738) approx BBs exec'd 0 ====
+
+	0xFED8738:  73A00003  andi. r0,r29,0x3
+	   0: GETL       	R29, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED873C:  4082006C  bc 4,2,0xFED87A8
+	   6: Jc02o       	$0xFED87A8
+
+
+
+. 1970 FED8738 8
+. 73 A0 00 03 40 82 00 6C
+==== BB 1971 (0xFED8740) approx BBs exec'd 0 ====
+
+	0xFED8740:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED8744:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFED8748:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0xFED874C:  48000071  bl 0xFED87BC
+	  10: MOVL       	$0xFED8750, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFED87BC  ($4)
+
+
+
+. 1971 FED8740 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 00 71
+==== BB 1972 (0xFED8750) approx BBs exec'd 0 ====
+
+	0xFED8750:  5786003A  rlwinm r6,r28,0,0,29
+	   0: GETL       	R28, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0xFED8754:  578507BE  rlwinm r5,r28,0,30,31
+	   4: GETL       	R28, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0xFED8758:  7FFF3214  add r31,r31,r6
+	   8: GETL       	R31, t4
+	   9: GETL       	R6, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R31
+	  12: INCEIPL       	$4
+
+	0xFED875C:  7FBD3214  add r29,r29,r6
+	  13: GETL       	R29, t8
+	  14: GETL       	R6, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0xFED8760:  2C850000  cmpi cr1,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0xFED8764:  4186001C  bc 12,6,0xFED8780
+	  22: Js06o       	$0xFED8780
+
+
+
+. 1972 FED8750 24
+. 57 86 00 3A 57 85 07 BE 7F FF 32 14 7F BD 32 14 2C 85 00 00 41 86 00 1C
+==== BB 1973 (0xFE92E54) approx BBs exec'd 0 ====
+
+	0xFE92E54:  4BFFFF30  b 0xFE92D84
+	   0: JMPo       	$0xFE92D84  ($4)
+
+
+
+. 1973 FE92E54 4
+. 4B FF FF 30
+==== BB 1974 (0xFE92E58) approx BBs exec'd 0 ====
+
+	0xFE92E58:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE92E5C:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE92E60:  3B7BFFFF  addi r27,r27,-1
+	   6: GETL       	R27, t4
+	   7: ADDL       	$0xFFFFFFFF, t4
+	   8: PUTL       	t4, R27
+	   9: INCEIPL       	$4
+
+	0xFE92E64:  7FA5EB78  or r5,r29,r29
+	  10: GETL       	R29, t6
+	  11: PUTL       	t6, R5
+	  12: INCEIPL       	$4
+
+	0xFE92E68:  48045199  bl 0xFED8000
+	  13: MOVL       	$0xFE92E6C, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0xFED8000  ($4)
+
+
+
+. 1974 FE92E58 20
+. 7F 83 E3 78 7F E4 FB 78 3B 7B FF FF 7F A5 EB 78 48 04 51 99
+==== BB 1975 (0xFE92E6C) approx BBs exec'd 0 ====
+
+	0xFE92E6C:  7F60FE70  srawi r0,r27,31
+	   0: GETL       	R27, t0
+	   1: SARL       	$0x1F, t0  (-wCa)
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFE92E70:  7C0CDA78  xor r12,r0,r27
+	   4: GETL       	R0, t2
+	   5: GETL       	R27, t4
+	   6: XORL       	t2, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFE92E74:  7C7C1B78  or r28,r3,r3
+	   9: GETL       	R3, t6
+	  10: PUTL       	t6, R28
+	  11: INCEIPL       	$4
+
+	0xFE92E78:  7FFFEA14  add r31,r31,r29
+	  12: GETL       	R31, t8
+	  13: GETL       	R29, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFE92E7C:  7F006050  subf r24,r0,r12
+	  17: GETL       	R0, t12
+	  18: GETL       	R12, t14
+	  19: SUBL       	t12, t14
+	  20: PUTL       	t14, R24
+	  21: INCEIPL       	$4
+
+	0xFE92E80:  7D1500D0  neg r8,r21
+	  22: GETL       	R21, t16
+	  23: NEGL       	t16
+	  24: PUTL       	t16, R8
+	  25: INCEIPL       	$4
+
+	0xFE92E84:  7C7800D0  neg r3,r24
+	  26: GETL       	R24, t18
+	  27: NEGL       	t18
+	  28: PUTL       	t18, R3
+	  29: INCEIPL       	$4
+
+	0xFE92E88:  7C6B4039  and. r11,r3,r8
+	  30: GETL       	R3, t20
+	  31: GETL       	R8, t22
+	  32: ANDL       	t20, t22
+	  33: PUTL       	t22, R11
+	  34: CMP0L       	t22, t24  (-rSo)
+	  35: ICRFL       	t24, $0x0, CR
+	  36: INCEIPL       	$4
+
+	0xFE92E8C:  40A0FFAC  bc 5,0,0xFE92E38
+	  37: Jc00o       	$0xFE92E38
+
+
+
+. 1975 FE92E6C 36
+. 7F 60 FE 70 7C 0C DA 78 7C 7C 1B 78 7F FF EA 14 7F 00 60 50 7D 15 00 D0 7C 78 00 D0 7C 6B 40 39 40 A0 FF AC
+==== BB 1976 (0xFED8864) approx BBs exec'd 0 ====
+
+	0xFED8864:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFED8868:  3863FFFC  addi r3,r3,-4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED886C:  81240004  lwz r9,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0xFED8870:  90030004  stw r0,4(r3)
+	  13: GETL       	R0, t10
+	  14: GETL       	R3, t12
+	  15: ADDL       	$0x4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFED8874:  4BFFFFA0  b 0xFED8814
+	  18: JMPo       	$0xFED8814  ($4)
+
+
+
+. 1976 FED8864 20
+. 80 04 00 00 38 63 FF FC 81 24 00 04 90 03 00 04 4B FF FF A0
+==== BB 1977 (0xFED8814) approx BBs exec'd 0 ====
+
+	0xFED8814:  80040008  lwz r0,8(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED8818:  91230008  stw r9,8(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFED881C:  8124000C  lwz r9,12(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0xFED8820:  9003000C  stw r0,12(r3)
+	  15: GETL       	R0, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0xC, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFED8824:  80040010  lwz r0,16(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x10, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0xFED8828:  91230010  stw r9,16(r3)
+	  25: GETL       	R9, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x10, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0xFED882C:  81240014  lwz r9,20(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x14, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R9
+	  34: INCEIPL       	$4
+
+	0xFED8830:  90030014  stw r0,20(r3)
+	  35: GETL       	R0, t28
+	  36: GETL       	R3, t30
+	  37: ADDL       	$0x14, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0xFED8834:  80040018  lwz r0,24(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0x18, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R0
+	  44: INCEIPL       	$4
+
+	0xFED8838:  91230018  stw r9,24(r3)
+	  45: GETL       	R9, t36
+	  46: GETL       	R3, t38
+	  47: ADDL       	$0x18, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0xFED883C:  34A5FFF8  addic. r5,r5,-8
+	  50: GETL       	R5, t40
+	  51: ADCL       	$0xFFFFFFF8, t40  (-wCa)
+	  52: PUTL       	t40, R5
+	  53: CMP0L       	t40, t42  (-rSo)
+	  54: ICRFL       	t42, $0x0, CR
+	  55: INCEIPL       	$4
+
+	0xFED8840:  8124001C  lwz r9,28(r4)
+	  56: GETL       	R4, t44
+	  57: ADDL       	$0x1C, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R9
+	  60: INCEIPL       	$4
+
+	0xFED8844:  9003001C  stw r0,28(r3)
+	  61: GETL       	R0, t48
+	  62: GETL       	R3, t50
+	  63: ADDL       	$0x1C, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0xFED8848:  38840020  addi r4,r4,32
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0x20, t52
+	  68: PUTL       	t52, R4
+	  69: INCEIPL       	$4
+
+	0xFED884C:  38630020  addi r3,r3,32
+	  70: GETL       	R3, t54
+	  71: ADDL       	$0x20, t54
+	  72: PUTL       	t54, R3
+	  73: INCEIPL       	$4
+
+	0xFED8850:  40A2FFB4  bc 5,2,0xFED8804
+	  74: Jc02o       	$0xFED8804
+
+
+
+. 1977 FED8814 64
+. 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 1978 (0xFED8804) approx BBs exec'd 0 ====
+
+	0xFED8804:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFED8808:  91230000  stw r9,0(r3)
+	   4: GETL       	R9, t4
+	   5: GETL       	R3, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFED880C:  81240004  lwz r9,4(r4)
+	   8: GETL       	R4, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R9
+	  12: INCEIPL       	$4
+
+	0xFED8810:  90030004  stw r0,4(r3)
+	  13: GETL       	R0, t12
+	  14: GETL       	R3, t14
+	  15: ADDL       	$0x4, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0xFED8814:  80040008  lwz r0,8(r4)
+	  18: GETL       	R4, t16
+	  19: ADDL       	$0x8, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R0
+	  22: INCEIPL       	$4
+
+	0xFED8818:  91230008  stw r9,8(r3)
+	  23: GETL       	R9, t20
+	  24: GETL       	R3, t22
+	  25: ADDL       	$0x8, t22
+	  26: STL       	t20, (t22)
+	  27: INCEIPL       	$4
+
+	0xFED881C:  8124000C  lwz r9,12(r4)
+	  28: GETL       	R4, t24
+	  29: ADDL       	$0xC, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R9
+	  32: INCEIPL       	$4
+
+	0xFED8820:  9003000C  stw r0,12(r3)
+	  33: GETL       	R0, t28
+	  34: GETL       	R3, t30
+	  35: ADDL       	$0xC, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0xFED8824:  80040010  lwz r0,16(r4)
+	  38: GETL       	R4, t32
+	  39: ADDL       	$0x10, t32
+	  40: LDL       	(t32), t34
+	  41: PUTL       	t34, R0
+	  42: INCEIPL       	$4
+
+	0xFED8828:  91230010  stw r9,16(r3)
+	  43: GETL       	R9, t36
+	  44: GETL       	R3, t38
+	  45: ADDL       	$0x10, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0xFED882C:  81240014  lwz r9,20(r4)
+	  48: GETL       	R4, t40
+	  49: ADDL       	$0x14, t40
+	  50: LDL       	(t40), t42
+	  51: PUTL       	t42, R9
+	  52: INCEIPL       	$4
+
+	0xFED8830:  90030014  stw r0,20(r3)
+	  53: GETL       	R0, t44
+	  54: GETL       	R3, t46
+	  55: ADDL       	$0x14, t46
+	  56: STL       	t44, (t46)
+	  57: INCEIPL       	$4
+
+	0xFED8834:  80040018  lwz r0,24(r4)
+	  58: GETL       	R4, t48
+	  59: ADDL       	$0x18, t48
+	  60: LDL       	(t48), t50
+	  61: PUTL       	t50, R0
+	  62: INCEIPL       	$4
+
+	0xFED8838:  91230018  stw r9,24(r3)
+	  63: GETL       	R9, t52
+	  64: GETL       	R3, t54
+	  65: ADDL       	$0x18, t54
+	  66: STL       	t52, (t54)
+	  67: INCEIPL       	$4
+
+	0xFED883C:  34A5FFF8  addic. r5,r5,-8
+	  68: GETL       	R5, t56
+	  69: ADCL       	$0xFFFFFFF8, t56  (-wCa)
+	  70: PUTL       	t56, R5
+	  71: CMP0L       	t56, t58  (-rSo)
+	  72: ICRFL       	t58, $0x0, CR
+	  73: INCEIPL       	$4
+
+	0xFED8840:  8124001C  lwz r9,28(r4)
+	  74: GETL       	R4, t60
+	  75: ADDL       	$0x1C, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R9
+	  78: INCEIPL       	$4
+
+	0xFED8844:  9003001C  stw r0,28(r3)
+	  79: GETL       	R0, t64
+	  80: GETL       	R3, t66
+	  81: ADDL       	$0x1C, t66
+	  82: STL       	t64, (t66)
+	  83: INCEIPL       	$4
+
+	0xFED8848:  38840020  addi r4,r4,32
+	  84: GETL       	R4, t68
+	  85: ADDL       	$0x20, t68
+	  86: PUTL       	t68, R4
+	  87: INCEIPL       	$4
+
+	0xFED884C:  38630020  addi r3,r3,32
+	  88: GETL       	R3, t70
+	  89: ADDL       	$0x20, t70
+	  90: PUTL       	t70, R3
+	  91: INCEIPL       	$4
+
+	0xFED8850:  40A2FFB4  bc 5,2,0xFED8804
+	  92: Jc02o       	$0xFED8804
+
+
+
+. 1978 FED8804 80
+. 80 04 00 00 91 23 00 00 81 24 00 04 90 03 00 04 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 1979 (0xFE93148) approx BBs exec'd 0 ====
+
+	0xFE93148:  80E10000  lwz r7,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0xFE9314C:  82E70004  lwz r23,4(r7)
+	   4: GETL       	R7, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R23
+	   8: INCEIPL       	$4
+
+	0xFE93150:  82C7FFD8  lwz r22,-40(r7)
+	   9: GETL       	R7, t8
+	  10: ADDL       	$0xFFFFFFD8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R22
+	  13: INCEIPL       	$4
+
+	0xFE93154:  7EE803A6  mtlr r23
+	  14: GETL       	R23, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFE93158:  8307FFE0  lwz r24,-32(r7)
+	  17: GETL       	R7, t14
+	  18: ADDL       	$0xFFFFFFE0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R24
+	  21: INCEIPL       	$4
+
+	0xFE9315C:  82E7FFDC  lwz r23,-36(r7)
+	  22: GETL       	R7, t18
+	  23: ADDL       	$0xFFFFFFDC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R23
+	  26: INCEIPL       	$4
+
+	0xFE93160:  8327FFE4  lwz r25,-28(r7)
+	  27: GETL       	R7, t22
+	  28: ADDL       	$0xFFFFFFE4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R25
+	  31: INCEIPL       	$4
+
+	0xFE93164:  8347FFE8  lwz r26,-24(r7)
+	  32: GETL       	R7, t26
+	  33: ADDL       	$0xFFFFFFE8, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R26
+	  36: INCEIPL       	$4
+
+	0xFE93168:  8367FFEC  lwz r27,-20(r7)
+	  37: GETL       	R7, t30
+	  38: ADDL       	$0xFFFFFFEC, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R27
+	  41: INCEIPL       	$4
+
+	0xFE9316C:  8387FFF0  lwz r28,-16(r7)
+	  42: GETL       	R7, t34
+	  43: ADDL       	$0xFFFFFFF0, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R28
+	  46: INCEIPL       	$4
+
+	0xFE93170:  83A7FFF4  lwz r29,-12(r7)
+	  47: GETL       	R7, t38
+	  48: ADDL       	$0xFFFFFFF4, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R29
+	  51: INCEIPL       	$4
+
+	0xFE93174:  83C7FFF8  lwz r30,-8(r7)
+	  52: GETL       	R7, t42
+	  53: ADDL       	$0xFFFFFFF8, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R30
+	  56: INCEIPL       	$4
+
+	0xFE93178:  83E7FFFC  lwz r31,-4(r7)
+	  57: GETL       	R7, t46
+	  58: ADDL       	$0xFFFFFFFC, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R31
+	  61: INCEIPL       	$4
+
+	0xFE9317C:  7CE13B78  or r1,r7,r7
+	  62: GETL       	R7, t50
+	  63: PUTL       	t50, R1
+	  64: INCEIPL       	$4
+
+	0xFE93180:  4E800020  blr
+	  65: GETL       	LR, t52
+	  66: JMPo-r       	t52  ($4)
+
+
+
+. 1979 FE93148 60
+. 80 E1 00 00 82 E7 00 04 82 C7 FF D8 7E E8 03 A6 83 07 FF E0 82 E7 FF DC 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+==== BB 1980 (0xFE87EF8) approx BBs exec'd 0 ====
+
+	0xFE87EF8:  7C9AA800  cmp cr1,r26,r21
+	   0: GETL       	R26, t0
+	   1: GETL       	R21, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFE87EFC:  83710000  lwz r27,0(r17)
+	   5: GETL       	R17, t6
+	   6: LDL       	(t6), t8
+	   7: PUTL       	t8, R27
+	   8: INCEIPL       	$4
+
+	0xFE87F00:  7F71DB78  or r17,r27,r27
+	   9: GETL       	R27, t10
+	  10: PUTL       	t10, R17
+	  11: INCEIPL       	$4
+
+	0xFE87F04:  4084017C  bc 4,4,0xFE88080
+	  12: Jc04o       	$0xFE88080
+
+
+
+. 1980 FE87EF8 16
+. 7C 9A A8 00 83 71 00 00 7F 71 DB 78 40 84 01 7C
+==== BB 1981 (0xFE87F08) approx BBs exec'd 0 ====
+
+	0xFE87F08:  819E0134  lwz r12,308(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x134, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFE87F0C:  3B14FFFF  addi r24,r20,-1
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R24
+	   8: INCEIPL       	$4
+
+	0xFE87F10:  2E1B0000  cmpi cr4,r27,0
+	   9: GETL       	R27, t6
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0xFE87F14:  931F01B0  stw r24,432(r31)
+	  13: GETL       	R24, t10
+	  14: GETL       	R31, t12
+	  15: ADDL       	$0x1B0, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE87F18:  919F01B4  stw r12,436(r31)
+	  18: GETL       	R12, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x1B4, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFE87F1C:  3B3F0008  addi r25,r31,8
+	  23: GETL       	R31, t18
+	  24: ADDL       	$0x8, t18
+	  25: PUTL       	t18, R25
+	  26: INCEIPL       	$4
+
+	0xFE87F20:  7F10C0F8  nor r16,r24,r24
+	  27: GETL       	R24, t20
+	  28: NOTL       	t20
+	  29: PUTL       	t20, R16
+	  30: INCEIPL       	$4
+
+	0xFE87F24:  41920460  bc 12,18,0xFE88384
+	  31: Js18o       	$0xFE88384
+
+
+
+. 1981 FE87F08 32
+. 81 9E 01 34 3B 14 FF FF 2E 1B 00 00 93 1F 01 B0 91 9F 01 B4 3B 3F 00 08 7F 10 C0 F8 41 92 04 60
+==== BB 1982 (0xFE87F28) approx BBs exec'd 0 ====
+
+	0xFE87F28:  57582036  rlwinm r24,r26,4,0,27
+	   0: GETL       	R26, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0xFE87F2C:  7D18CA14  add r8,r24,r25
+	   4: GETL       	R24, t2
+	   5: GETL       	R25, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0xFE87F30:  38E80078  addi r7,r8,120
+	   9: GETL       	R8, t6
+	  10: ADDL       	$0x78, t6
+	  11: PUTL       	t6, R7
+	  12: INCEIPL       	$4
+
+	0xFE87F34:  839B0004  lwz r28,4(r27)
+	  13: GETL       	R27, t8
+	  14: ADDL       	$0x4, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0xFE87F38:  807B0008  lwz r3,8(r27)
+	  18: GETL       	R27, t12
+	  19: ADDL       	$0x8, t12
+	  20: LDL       	(t12), t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0xFE87F3C:  80880078  lwz r4,120(r8)
+	  23: GETL       	R8, t16
+	  24: ADDL       	$0x78, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R4
+	  27: INCEIPL       	$4
+
+	0xFE87F40:  80C70004  lwz r6,4(r7)
+	  28: GETL       	R7, t20
+	  29: ADDL       	$0x4, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R6
+	  32: INCEIPL       	$4
+
+	0xFE87F44:  7D3C1A14  add r9,r28,r3
+	  33: GETL       	R28, t24
+	  34: GETL       	R3, t26
+	  35: ADDL       	t24, t26
+	  36: PUTL       	t26, R9
+	  37: INCEIPL       	$4
+
+	0xFE87F48:  7C043214  add r0,r4,r6
+	  38: GETL       	R4, t28
+	  39: GETL       	R6, t30
+	  40: ADDL       	t28, t30
+	  41: PUTL       	t30, R0
+	  42: INCEIPL       	$4
+
+	0xFE87F4C:  7C090040  cmpl cr0,r9,r0
+	  43: GETL       	R9, t32
+	  44: GETL       	R0, t34
+	  45: CMPUL       	t32, t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x0, CR
+	  47: INCEIPL       	$4
+
+	0xFE87F50:  41810438  bc 12,1,0xFE88388
+	  48: Js01o       	$0xFE88388
+
+
+
+. 1982 FE87F28 44
+. 57 58 20 36 7D 18 CA 14 38 E8 00 78 83 9B 00 04 80 7B 00 08 80 88 00 78 80 C7 00 04 7D 3C 1A 14 7C 04 32 14 7C 09 00 40 41 81 04 38
+==== BB 1983 (0xFE88388) approx BBs exec'd 0 ====
+
+	0xFE88388:  41B2FBDC  bc 13,18,0xFE87F64
+	   0: Js18o       	$0xFE87F64
+
+
+
+. 1983 FE88388 4
+. 41 B2 FB DC
+==== BB 1984 (0xFE8838C) approx BBs exec'd 0 ====
+
+	0xFE8838C:  7CF8CA14  add r7,r24,r25
+	   0: GETL       	R24, t0
+	   1: GETL       	R25, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFE88390:  817B0004  lwz r11,4(r27)
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFE88394:  81470078  lwz r10,120(r7)
+	  10: GETL       	R7, t8
+	  11: ADDL       	$0x78, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R10
+	  14: INCEIPL       	$4
+
+	0xFE88398:  39070078  addi r8,r7,120
+	  15: GETL       	R7, t12
+	  16: ADDL       	$0x78, t12
+	  17: PUTL       	t12, R8
+	  18: INCEIPL       	$4
+
+	0xFE8839C:  7F0B5040  cmpl cr6,r11,r10
+	  19: GETL       	R11, t14
+	  20: GETL       	R10, t16
+	  21: CMPUL       	t14, t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0xFE883A0:  41B9FBC4  bc 13,25,0xFE87F64
+	  24: Js25o       	$0xFE87F64
+
+
+
+. 1984 FE8838C 24
+. 7C F8 CA 14 81 7B 00 04 81 47 00 78 39 07 00 78 7F 0B 50 40 41 B9 FB C4
+==== BB 1985 (0xFE883A4) approx BBs exec'd 0 ====
+
+	0xFE883A4:  80E80004  lwz r7,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFE883A8:  83BB0008  lwz r29,8(r27)
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFE883AC:  7ECA3A14  add r22,r10,r7
+	  10: GETL       	R10, t8
+	  11: GETL       	R7, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R22
+	  14: INCEIPL       	$4
+
+	0xFE883B0:  7EEBEA14  add r23,r11,r29
+	  15: GETL       	R11, t12
+	  16: GETL       	R29, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R23
+	  19: INCEIPL       	$4
+
+	0xFE883B4:  7F96B840  cmpl cr7,r22,r23
+	  20: GETL       	R22, t16
+	  21: GETL       	R23, t18
+	  22: CMPUL       	t16, t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0xFE883B8:  41BDFBAC  bc 13,29,0xFE87F64
+	  25: Js29o       	$0xFE87F64
+
+
+
+. 1985 FE883A4 24
+. 80 E8 00 04 83 BB 00 08 7E CA 3A 14 7E EB EA 14 7F 96 B8 40 41 BD FB AC
+==== BB 1986 (0xFE883BC) approx BBs exec'd 0 ====
+
+	0xFE883BC:  80080008  lwz r0,8(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE883C0:  819B0000  lwz r12,0(r27)
+	   5: GETL       	R27, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0xFE883C4:  54051838  rlwinm r5,r0,3,0,28
+	   9: GETL       	R0, t8
+	  10: SHLL       	$0x3, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0xFE883C8:  7F0C5214  add r24,r12,r10
+	  13: GETL       	R12, t10
+	  14: GETL       	R10, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R24
+	  17: INCEIPL       	$4
+
+	0xFE883CC:  7D05CA14  add r8,r5,r25
+	  18: GETL       	R5, t14
+	  19: GETL       	R25, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R8
+	  22: INCEIPL       	$4
+
+	0xFE883D0:  7D4BC050  subf r10,r11,r24
+	  23: GETL       	R11, t18
+	  24: GETL       	R24, t20
+	  25: SUBL       	t18, t20
+	  26: PUTL       	t20, R10
+	  27: INCEIPL       	$4
+
+	0xFE883D4:  90E8000C  stw r7,12(r8)
+	  28: GETL       	R7, t22
+	  29: GETL       	R8, t24
+	  30: ADDL       	$0xC, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0xFE883D8:  91480008  stw r10,8(r8)
+	  33: GETL       	R10, t26
+	  34: GETL       	R8, t28
+	  35: ADDL       	$0x8, t28
+	  36: STL       	t26, (t28)
+	  37: INCEIPL       	$4
+
+	0xFE883DC:  4BFFFC98  b 0xFE88074
+	  38: JMPo       	$0xFE88074  ($4)
+
+
+
+. 1986 FE883BC 36
+. 80 08 00 08 81 9B 00 00 54 05 18 38 7F 0C 52 14 7D 05 CA 14 7D 4B C0 50 90 E8 00 0C 91 48 00 08 4B FF FC 98
+==== BB 1987 (0xFE88074) approx BBs exec'd 0 ====
+
+	0xFE88074:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0xFE88078:  7F9AA800  cmp cr7,r26,r21
+	   4: GETL       	R26, t2
+	   5: GETL       	R21, t4
+	   6: CMPL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFE8807C:  419CFEA8  bc 12,28,0xFE87F24
+	   9: Js28o       	$0xFE87F24
+
+
+
+. 1987 FE88074 12
+. 3B 5A 00 01 7F 9A A8 00 41 9C FE A8
+==== BB 1988 (0xFE87F24) approx BBs exec'd 0 ====
+
+	0xFE87F24:  41920460  bc 12,18,0xFE88384
+	   0: Js18o       	$0xFE88384
+
+
+
+. 1988 FE87F24 4
+. 41 92 04 60
+==== BB 1989 (0xFE88080) approx BBs exec'd 0 ====
+
+	0xFE88080:  2E130000  cmpi cr4,r19,0
+	   0: GETL       	R19, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE88084:  41900014  bc 12,16,0xFE88098
+	   4: Js16o       	$0xFE88098
+
+
+
+. 1989 FE88080 8
+. 2E 13 00 00 41 90 00 14
+==== BB 1990 (0xFE88088) approx BBs exec'd 0 ====
+
+	0xFE88088:  7E639B78  or r3,r19,r19
+	   0: GETL       	R19, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8808C:  38000006  li r0,6
+	   3: MOVL       	$0x6, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFE88090:  44000002  sc
+	   6: JMPo-sys       	$0xFE88094  ($4)
+
+
+
+. 1990 FE88088 12
+. 7E 63 9B 78 38 00 00 06 44 00 00 02
+==== BB 1991 (0xFE88094) approx BBs exec'd 0 ====
+
+	0xFE88094:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE88098:  3860003C  li r3,60
+	   3: MOVL       	$0x3C, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFE8809C:  4812226D  bl 0xFFAA308
+	   6: MOVL       	$0xFE880A0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 1991 FE88094 12
+. 7C 00 00 26 38 60 00 3C 48 12 22 6D
+==== BB 1992 (0xFECF66C) approx BBs exec'd 0 ====
+
+	0xFECF66C:  5729E8FE  rlwinm r9,r25,29,3,31
+	   0: GETL       	R25, t0
+	   1: SHRL       	$0x3, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFECF670:  3809FFFE  addi r0,r9,-2
+	   4: GETL       	R9, t2
+	   5: ADDL       	$0xFFFFFFFE, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0xFECF674:  5406103A  rlwinm r6,r0,2,0,29
+	   8: GETL       	R0, t4
+	   9: SHLL       	$0x2, t4
+	  10: PUTL       	t4, R6
+	  11: INCEIPL       	$4
+
+	0xFECF678:  7D66E214  add r11,r6,r28
+	  12: GETL       	R6, t6
+	  13: GETL       	R28, t8
+	  14: ADDL       	t6, t8
+	  15: PUTL       	t8, R11
+	  16: INCEIPL       	$4
+
+	0xFECF67C:  83AB0008  lwz r29,8(r11)
+	  17: GETL       	R11, t10
+	  18: ADDL       	$0x8, t10
+	  19: LDL       	(t10), t12
+	  20: PUTL       	t12, R29
+	  21: INCEIPL       	$4
+
+	0xFECF680:  3B6B0008  addi r27,r11,8
+	  22: GETL       	R11, t14
+	  23: ADDL       	$0x8, t14
+	  24: PUTL       	t14, R27
+	  25: INCEIPL       	$4
+
+	0xFECF684:  2D9D0000  cmpi cr3,r29,0
+	  26: GETL       	R29, t16
+	  27: CMP0L       	t16, t18  (-rSo)
+	  28: ICRFL       	t18, $0x3, CR
+	  29: INCEIPL       	$4
+
+	0xFECF688:  418E009C  bc 12,14,0xFECF724
+	  30: Js14o       	$0xFECF724
+
+
+
+. 1992 FECF66C 32
+. 57 29 E8 FE 38 09 FF FE 54 06 10 3A 7D 66 E2 14 83 AB 00 08 3B 6B 00 08 2D 9D 00 00 41 8E 00 9C
+==== BB 1993 (0xFECFEB0) approx BBs exec'd 0 ====
+
+	0xFECFEB0:  829E05FC  lwz r20,1532(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0xFECFEB4:  7D190050  subf r8,r25,r0
+	   5: GETL       	R25, t4
+	   6: GETL       	R0, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFECFEB8:  63310001  ori r17,r25,0x1
+	  10: GETL       	R25, t8
+	  11: ORL       	$0x1, t8
+	  12: PUTL       	t8, R17
+	  13: INCEIPL       	$4
+
+	0xFECFEBC:  610A0001  ori r10,r8,0x1
+	  14: GETL       	R8, t10
+	  15: ORL       	$0x1, t10
+	  16: PUTL       	t10, R10
+	  17: INCEIPL       	$4
+
+	0xFECFEC0:  7F93A278  xor r19,r28,r20
+	  18: GETL       	R28, t12
+	  19: GETL       	R20, t14
+	  20: XORL       	t12, t14
+	  21: PUTL       	t14, R19
+	  22: INCEIPL       	$4
+
+	0xFECFEC4:  7DDDCA14  add r14,r29,r25
+	  23: GETL       	R29, t16
+	  24: GETL       	R25, t18
+	  25: ADDL       	t16, t18
+	  26: PUTL       	t18, R14
+	  27: INCEIPL       	$4
+
+	0xFECFEC8:  3173FFFF  addic r11,r19,-1
+	  28: GETL       	R19, t20
+	  29: ADCL       	$0xFFFFFFFF, t20  (-wCa)
+	  30: PUTL       	t20, R11
+	  31: INCEIPL       	$4
+
+	0xFECFECC:  7E4B9910  subfe r18,r11,r19
+	  32: GETL       	R11, t22
+	  33: GETL       	R19, t24
+	  34: SBBL       	t22, t24  (-rCa-wCa)
+	  35: PUTL       	t24, R18
+	  36: INCEIPL       	$4
+
+	0xFECFED0:  391D0008  addi r8,r29,8
+	  37: GETL       	R29, t26
+	  38: ADDL       	$0x8, t26
+	  39: PUTL       	t26, R8
+	  40: INCEIPL       	$4
+
+	0xFECFED4:  5650103A  rlwinm r16,r18,2,0,29
+	  41: GETL       	R18, t28
+	  42: SHLL       	$0x2, t28
+	  43: PUTL       	t28, R16
+	  44: INCEIPL       	$4
+
+	0xFECFED8:  91DC0030  stw r14,48(r28)
+	  45: GETL       	R14, t30
+	  46: GETL       	R28, t32
+	  47: ADDL       	$0x30, t32
+	  48: STL       	t30, (t32)
+	  49: INCEIPL       	$4
+
+	0xFECFEDC:  7E0F8B78  or r15,r16,r17
+	  50: GETL       	R16, t34
+	  51: GETL       	R17, t36
+	  52: ORL       	t36, t34
+	  53: PUTL       	t34, R15
+	  54: INCEIPL       	$4
+
+	0xFECFEE0:  91FD0004  stw r15,4(r29)
+	  55: GETL       	R15, t38
+	  56: GETL       	R29, t40
+	  57: ADDL       	$0x4, t40
+	  58: STL       	t38, (t40)
+	  59: INCEIPL       	$4
+
+	0xFECFEE4:  914E0004  stw r10,4(r14)
+	  60: GETL       	R10, t42
+	  61: GETL       	R14, t44
+	  62: ADDL       	$0x4, t44
+	  63: STL       	t42, (t44)
+	  64: INCEIPL       	$4
+
+	0xFECFEE8:  4BFFF7C8  b 0xFECF6B0
+	  65: JMPo       	$0xFECF6B0  ($4)
+
+
+
+. 1993 FECFEB0 60
+. 82 9E 05 FC 7D 19 00 50 63 31 00 01 61 0A 00 01 7F 93 A2 78 7D DD CA 14 31 73 FF FF 7E 4B 99 10 39 1D 00 08 56 50 10 3A 91 DC 00 30 7E 0F 8B 78 91 FD 00 04 91 4E 00 04 4B FF F7 C8
+==== BB 1994 (0xFE880A0) approx BBs exec'd 0 ====
+
+	0xFE880A0:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0xFE880A4:  7C7A1B79  or. r26,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R26
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE880A8:  41A2FD50  bc 13,2,0xFE87DF8
+	   8: Js02o       	$0xFE87DF8
+
+
+
+. 1994 FE880A0 12
+. 39 20 00 00 7C 7A 1B 79 41 A2 FD 50
+==== BB 1995 (0xFE880AC) approx BBs exec'd 0 ====
+
+	0xFE880AC:  80720000  lwz r3,0(r18)
+	   0: GETL       	R18, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE880B0:  4804E6ED  bl 0xFED679C
+	   4: MOVL       	$0xFE880B4, t4
+	   5: PUTL       	t4, LR
+	   6: JMPo-c       	$0xFED679C  ($4)
+
+
+
+. 1995 FE880AC 8
+. 80 72 00 00 48 04 E6 ED
+==== BB 1996 strdup(0xFED679C) approx BBs exec'd 0 ====
+
+	0xFED679C:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFED67A0:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFED67A4:  93810010  stw r28,16(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFED67A8:  93A10014  stw r29,20(r1)
+	  14: GETL       	R29, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFED67AC:  7C7D1B78  or r29,r3,r3
+	  19: GETL       	R3, t14
+	  20: PUTL       	t14, R29
+	  21: INCEIPL       	$4
+
+	0xFED67B0:  90010024  stw r0,36(r1)
+	  22: GETL       	R0, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x24, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFED67B4:  93C10018  stw r30,24(r1)
+	  27: GETL       	R30, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x18, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFED67B8:  48000321  bl 0xFED6AD8
+	  32: MOVL       	$0xFED67BC, t24
+	  33: PUTL       	t24, LR
+	  34: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 1996 FED679C 32
+. 7C 08 02 A6 94 21 FF E0 93 81 00 10 93 A1 00 14 7C 7D 1B 78 90 01 00 24 93 C1 00 18 48 00 03 21
+==== BB 1997 (0xFED67BC) approx BBs exec'd 0 ====
+
+	0xFED67BC:  3B830001  addi r28,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0xFED67C0:  7F83E378  or r3,r28,r28
+	   4: GETL       	R28, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0xFED67C4:  480D3B45  bl 0xFFAA308
+	   7: MOVL       	$0xFED67C8, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 1997 FED67BC 12
+. 3B 83 00 01 7F 83 E3 78 48 0D 3B 45
+==== BB 1998 (0xFECF68C) approx BBs exec'd 0 ====
+
+	0xFECF68C:  815D0004  lwz r10,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECF690:  3B9D0008  addi r28,r29,8
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0xFECF694:  5548E8FE  rlwinm r8,r10,29,3,31
+	   9: GETL       	R10, t6
+	  10: SHRL       	$0x3, t6
+	  11: PUTL       	t6, R8
+	  12: INCEIPL       	$4
+
+	0xFECF698:  38E8FFFE  addi r7,r8,-2
+	  13: GETL       	R8, t8
+	  14: ADDL       	$0xFFFFFFFE, t8
+	  15: PUTL       	t8, R7
+	  16: INCEIPL       	$4
+
+	0xFECF69C:  7E070000  cmp cr4,r7,r0
+	  17: GETL       	R7, t10
+	  18: GETL       	R0, t12
+	  19: CMPL       	t10, t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x4, CR
+	  21: INCEIPL       	$4
+
+	0xFECF6A0:  40920620  bc 4,18,0xFECFCC0
+	  22: Jc18o       	$0xFECFCC0
+
+
+
+. 1998 FECF68C 24
+. 81 5D 00 04 3B 9D 00 08 55 48 E8 FE 38 E8 FF FE 7E 07 00 00 40 92 06 20
+==== BB 1999 (0xFECF6A4) approx BBs exec'd 0 ====
+
+	0xFECF6A4:  823D0008  lwz r17,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0xFECF6A8:  7F88E378  or r8,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0xFECF6AC:  923B0000  stw r17,0(r27)
+	   8: GETL       	R17, t6
+	   9: GETL       	R27, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFECF6B0:  83810074  lwz r28,116(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x74, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R28
+	  16: INCEIPL       	$4
+
+	0xFECF6B4:  7D034378  or r3,r8,r8
+	  17: GETL       	R8, t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0xFECF6B8:  81810024  lwz r12,36(r1)
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x24, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R12
+	  24: INCEIPL       	$4
+
+	0xFECF6BC:  7F8803A6  mtlr r28
+	  25: GETL       	R28, t20
+	  26: PUTL       	t20, LR
+	  27: INCEIPL       	$4
+
+	0xFECF6C0:  81C10028  lwz r14,40(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x28, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R14
+	  32: INCEIPL       	$4
+
+	0xFECF6C4:  81E1002C  lwz r15,44(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x2C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R15
+	  37: INCEIPL       	$4
+
+	0xFECF6C8:  7D818120  mtcrf 0x18,r12
+	  38: GETL       	R12, t30
+	  39: ICRFL       	t30, $0x3, CR
+	  40: ICRFL       	t30, $0x4, CR
+	  41: INCEIPL       	$4
+
+	0xFECF6CC:  82010030  lwz r16,48(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x30, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R16
+	  46: INCEIPL       	$4
+
+	0xFECF6D0:  82210034  lwz r17,52(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x34, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R17
+	  51: INCEIPL       	$4
+
+	0xFECF6D4:  82410038  lwz r18,56(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x38, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R18
+	  56: INCEIPL       	$4
+
+	0xFECF6D8:  8261003C  lwz r19,60(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x3C, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R19
+	  61: INCEIPL       	$4
+
+	0xFECF6DC:  82810040  lwz r20,64(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x40, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R20
+	  66: INCEIPL       	$4
+
+	0xFECF6E0:  82A10044  lwz r21,68(r1)
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x44, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R21
+	  71: INCEIPL       	$4
+
+	0xFECF6E4:  82C10048  lwz r22,72(r1)
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x48, t56
+	  74: LDL       	(t56), t58
+	  75: PUTL       	t58, R22
+	  76: INCEIPL       	$4
+
+	0xFECF6E8:  82E1004C  lwz r23,76(r1)
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x4C, t60
+	  79: LDL       	(t60), t62
+	  80: PUTL       	t62, R23
+	  81: INCEIPL       	$4
+
+	0xFECF6EC:  83010050  lwz r24,80(r1)
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x50, t64
+	  84: LDL       	(t64), t66
+	  85: PUTL       	t66, R24
+	  86: INCEIPL       	$4
+
+	0xFECF6F0:  83210054  lwz r25,84(r1)
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x54, t68
+	  89: LDL       	(t68), t70
+	  90: PUTL       	t70, R25
+	  91: INCEIPL       	$4
+
+	0xFECF6F4:  83410058  lwz r26,88(r1)
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x58, t72
+	  94: LDL       	(t72), t74
+	  95: PUTL       	t74, R26
+	  96: INCEIPL       	$4
+
+	0xFECF6F8:  8361005C  lwz r27,92(r1)
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x5C, t76
+	  99: LDL       	(t76), t78
+	 100: PUTL       	t78, R27
+	 101: INCEIPL       	$4
+
+	0xFECF6FC:  83810060  lwz r28,96(r1)
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x60, t80
+	 104: LDL       	(t80), t82
+	 105: PUTL       	t82, R28
+	 106: INCEIPL       	$4
+
+	0xFECF700:  83A10064  lwz r29,100(r1)
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x64, t84
+	 109: LDL       	(t84), t86
+	 110: PUTL       	t86, R29
+	 111: INCEIPL       	$4
+
+	0xFECF704:  83C10068  lwz r30,104(r1)
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x68, t88
+	 114: LDL       	(t88), t90
+	 115: PUTL       	t90, R30
+	 116: INCEIPL       	$4
+
+	0xFECF708:  83E1006C  lwz r31,108(r1)
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x6C, t92
+	 119: LDL       	(t92), t94
+	 120: PUTL       	t94, R31
+	 121: INCEIPL       	$4
+
+	0xFECF70C:  38210070  addi r1,r1,112
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x70, t96
+	 124: PUTL       	t96, R1
+	 125: INCEIPL       	$4
+
+	0xFECF710:  4E800020  blr
+	 126: GETL       	LR, t98
+	 127: JMPo-r       	t98  ($4)
+
+
+
+. 1999 FECF6A4 112
+. 82 3D 00 08 7F 88 E3 78 92 3B 00 00 83 81 00 74 7D 03 43 78 81 81 00 24 7F 88 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 2000 (0xFED67C8) approx BBs exec'd 0 ====
+
+	0xFED67C8:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFED67CC:  2F830000  cmpi cr7,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x7, CR
+	   6: INCEIPL       	$4
+
+	0xFED67D0:  7F85E378  or r5,r28,r28
+	   7: GETL       	R28, t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFED67D4:  38000000  li r0,0
+	  10: MOVL       	$0x0, t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0xFED67D8:  419E000C  bc 12,30,0xFED67E4
+	  13: Js30o       	$0xFED67E4
+
+
+
+. 2000 FED67C8 20
+. 7F A4 EB 78 2F 83 00 00 7F 85 E3 78 38 00 00 00 41 9E 00 0C
+==== BB 2001 (0xFED67DC) approx BBs exec'd 0 ====
+
+	0xFED67DC:  48001EFD  bl 0xFED86D8
+	   0: MOVL       	$0xFED67E0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2001 FED67DC 4
+. 48 00 1E FD
+==== BB 2002 (0xFED876C) approx BBs exec'd 0 ====
+
+	0xFED876C:  88FD0000  lbz r7,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0xFED8770:  3BBD0001  addi r29,r29,1
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFED8774:  98FF0000  stb r7,0(r31)
+	   8: GETL       	R7, t6
+	   9: GETL       	R31, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFED8778:  3BFF0001  addi r31,r31,1
+	  12: GETL       	R31, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0xFED877C:  4200FFF0  bc 16,0,0xFED876C
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0xFED8780
+	  20: JMPo       	$0xFED876C  ($4)
+
+
+
+. 2002 FED876C 20
+. 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+==== BB 2003 (0xFED67E0) approx BBs exec'd 0 ====
+
+	0xFED67E0:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFED67E4:  80810024  lwz r4,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFED67E8:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFED67EC:  83810010  lwz r28,16(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x10, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R28
+	  15: INCEIPL       	$4
+
+	0xFED67F0:  83A10014  lwz r29,20(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x14, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0xFED67F4:  7C8803A6  mtlr r4
+	  21: GETL       	R4, t16
+	  22: PUTL       	t16, LR
+	  23: INCEIPL       	$4
+
+	0xFED67F8:  83C10018  lwz r30,24(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x18, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R30
+	  28: INCEIPL       	$4
+
+	0xFED67FC:  38210020  addi r1,r1,32
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x20, t22
+	  31: PUTL       	t22, R1
+	  32: INCEIPL       	$4
+
+	0xFED6800:  4E800020  blr
+	  33: GETL       	LR, t24
+	  34: JMPo-r       	t24  ($4)
+
+
+
+. 2003 FED67E0 36
+. 7C 60 1B 78 80 81 00 24 7C 03 03 78 83 81 00 10 83 A1 00 14 7C 88 03 A6 83 C1 00 18 38 21 00 20 4E 80 00 20
+==== BB 2004 (0xFE880B4) approx BBs exec'd 0 ====
+
+	0xFE880B4:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE880B8:  907A0004  stw r3,4(r26)
+	   4: GETL       	R3, t4
+	   5: GETL       	R26, t6
+	   6: ADDL       	$0x4, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE880BC:  4186044C  bc 12,6,0xFE88508
+	   9: Js06o       	$0xFE88508
+
+
+
+. 2004 FE880B4 12
+. 2C 83 00 00 90 7A 00 04 41 86 04 4C
+==== BB 2005 (0xFE880C0) approx BBs exec'd 0 ====
+
+	0xFE880C0:  822F0000  lwz r17,0(r15)
+	   0: GETL       	R15, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R17
+	   3: INCEIPL       	$4
+
+	0xFE880C4:  3B800000  li r28,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R28
+	   6: INCEIPL       	$4
+
+	0xFE880C8:  3B000002  li r24,2
+	   7: MOVL       	$0x2, t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0xFE880CC:  3B20FFFF  li r25,-1
+	  10: MOVL       	$0xFFFFFFFF, t8
+	  11: PUTL       	t8, R25
+	  12: INCEIPL       	$4
+
+	0xFE880D0:  923A0000  stw r17,0(r26)
+	  13: GETL       	R17, t10
+	  14: GETL       	R26, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0xFE880D4:  3BBA0008  addi r29,r26,8
+	  17: GETL       	R26, t14
+	  18: ADDL       	$0x8, t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0xFE880D8:  934F0000  stw r26,0(r15)
+	  21: GETL       	R26, t16
+	  22: GETL       	R15, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFE880DC:  3B7F0008  addi r27,r31,8
+	  25: GETL       	R31, t20
+	  26: ADDL       	$0x8, t20
+	  27: PUTL       	t20, R27
+	  28: INCEIPL       	$4
+
+	0xFE880E0:  2F1C0006  cmpi cr6,r28,6
+	  29: GETL       	R28, t22
+	  30: MOVL       	$0x6, t26
+	  31: CMPL       	t22, t26, t24  (-rSo)
+	  32: ICRFL       	t24, $0x6, CR
+	  33: INCEIPL       	$4
+
+	0xFE880E4:  7F83E378  or r3,r28,r28
+	  34: GETL       	R28, t28
+	  35: PUTL       	t28, R3
+	  36: INCEIPL       	$4
+
+	0xFE880E8:  3B9C0001  addi r28,r28,1
+	  37: MOVL       	$0x1, t30
+	  38: PUTL       	t30, R28
+	  39: INCEIPL       	$4
+
+	0xFE880EC:  419A002C  bc 12,26,0xFE88118
+	  40: Js26o       	$0xFE88118
+
+
+
+. 2005 FE880C0 48
+. 82 2F 00 00 3B 80 00 00 3B 00 00 02 3B 20 FF FF 92 3A 00 00 3B BA 00 08 93 4F 00 00 3B 7F 00 08 2F 1C 00 06 7F 83 E3 78 3B 9C 00 01 41 9A 00 2C
+==== BB 2006 (0xFE880F0) approx BBs exec'd 0 ====
+
+	0xFE880F0:  809B0008  lwz r4,8(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE880F4:  80BB000C  lwz r5,12(r27)
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFE880F8:  4BFFF579  bl 0xFE87670
+	  10: MOVL       	$0xFE880FC, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0xFE87670  ($4)
+
+
+
+. 2006 FE880F0 12
+. 80 9B 00 08 80 BB 00 0C 4B FF F5 79
+==== BB 2007 _nl_intern_locale_data(0xFE87670) approx BBs exec'd 0 ====
+
+	0xFE87670:  2B850007  cmpli cr7,r5,7
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x7, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE87674:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFE87678:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFE0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFE8767C:  481207D5  bl 0xFFA7E50
+	  14: MOVL       	$0xFE87680, t12
+	  15: PUTL       	t12, LR
+	  16: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2007 FE87670 16
+. 2B 85 00 07 7C 08 02 A6 94 21 FF E0 48 12 07 D5
+==== BB 2008 (0xFE87680) approx BBs exec'd 0 ====
+
+	0xFE87680:  93810010  stw r28,16(r1)
+	   0: GETL       	R28, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x10, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE87684:  7C7C1B78  or r28,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFE87688:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8768C:  7C9D2378  or r29,r4,r4
+	  13: GETL       	R4, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFE87690:  93C10018  stw r30,24(r1)
+	  16: GETL       	R30, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x18, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFE87694:  7FC802A6  mflr r30
+	  21: GETL       	LR, t16
+	  22: PUTL       	t16, R30
+	  23: INCEIPL       	$4
+
+	0xFE87698:  93E1001C  stw r31,28(r1)
+	  24: GETL       	R31, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x1C, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFE8769C:  7CBF2B78  or r31,r5,r5
+	  29: GETL       	R5, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0xFE876A0:  9361000C  stw r27,12(r1)
+	  32: GETL       	R27, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0xC, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFE876A4:  90010024  stw r0,36(r1)
+	  37: GETL       	R0, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x24, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFE876A8:  409D0188  bc 4,29,0xFE87830
+	  42: Jc29o       	$0xFE87830
+
+
+
+. 2008 FE87680 44
+. 93 81 00 10 7C 7C 1B 78 93 A1 00 14 7C 9D 23 78 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 7C BF 2B 78 93 61 00 0C 90 01 00 24 40 9D 01 88
+==== BB 2009 (0xFE876AC) approx BBs exec'd 0 ====
+
+	0xFE876AC:  6C642003  xoris r4,r3,0x2003
+	   0: GETL       	R3, t0
+	   1: XORL       	$0x20030000, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFE876B0:  813D0000  lwz r9,0(r29)
+	   4: GETL       	R29, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFE876B4:  68831115  xori r3,r4,0x1115
+	   8: GETL       	R4, t6
+	   9: XORL       	$0x1115, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0xFE876B8:  7C091800  cmp cr0,r9,r3
+	  12: GETL       	R9, t8
+	  13: GETL       	R3, t10
+	  14: CMPL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFE876BC:  40820174  bc 4,2,0xFE87830
+	  17: Jc02o       	$0xFE87830
+
+
+
+. 2009 FE876AC 20
+. 6C 64 20 03 81 3D 00 00 68 83 11 15 7C 09 18 00 40 82 01 74
+==== BB 2010 (0xFE876C0) approx BBs exec'd 0 ====
+
+	0xFE876C0:  80DE0114  lwz r6,276(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x114, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFE876C4:  579B103A  rlwinm r27,r28,2,0,29
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R27
+	   8: INCEIPL       	$4
+
+	0xFE876C8:  807D0004  lwz r3,4(r29)
+	   9: GETL       	R29, t6
+	  10: ADDL       	$0x4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFE876CC:  7CBB302E  lwzx r5,r27,r6
+	  14: GETL       	R6, t10
+	  15: GETL       	R27, t12
+	  16: ADDL       	t12, t10
+	  17: LDL       	(t10), t14
+	  18: PUTL       	t14, R5
+	  19: INCEIPL       	$4
+
+	0xFE876D0:  7C832840  cmpl cr1,r3,r5
+	  20: GETL       	R3, t16
+	  21: GETL       	R5, t18
+	  22: CMPUL       	t16, t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0xFE876D4:  4184015C  bc 12,4,0xFE87830
+	  25: Js04o       	$0xFE87830
+
+
+
+. 2010 FE876C0 24
+. 80 DE 01 14 57 9B 10 3A 80 7D 00 04 7C BB 30 2E 7C 83 28 40 41 84 01 5C
+==== BB 2011 (0xFE876D8) approx BBs exec'd 0 ====
+
+	0xFE876D8:  5463103A  rlwinm r3,r3,2,0,29
+	   0: GETL       	R3, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE876DC:  38E30008  addi r7,r3,8
+	   4: GETL       	R3, t2
+	   5: ADDL       	$0x8, t2
+	   6: PUTL       	t2, R7
+	   7: INCEIPL       	$4
+
+	0xFE876E0:  7F07F840  cmpl cr6,r7,r31
+	   8: GETL       	R7, t4
+	   9: GETL       	R31, t6
+	  10: CMPUL       	t4, t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFE876E4:  4098014C  bc 4,24,0xFE87830
+	  13: Jc24o       	$0xFE87830
+
+
+
+. 2011 FE876D8 16
+. 54 63 10 3A 38 E3 00 08 7F 07 F8 40 40 98 01 4C
+==== BB 2012 (0xFE876E8) approx BBs exec'd 0 ====
+
+	0xFE876E8:  38630024  addi r3,r3,36
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x24, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE876EC:  48122C1D  bl 0xFFAA308
+	   4: MOVL       	$0xFE876F0, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 2012 FE876E8 8
+. 38 63 00 24 48 12 2C 1D
+==== BB 2013 (0xFE876F0) approx BBs exec'd 0 ====
+
+	0xFE876F0:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFE876F4:  2F830000  cmpi cr7,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x7, CR
+	   6: INCEIPL       	$4
+
+	0xFE876F8:  419E0110  bc 12,30,0xFE87808
+	   7: Js30o       	$0xFE87808
+
+
+
+. 2013 FE876F0 12
+. 39 60 00 00 2F 83 00 00 41 9E 01 10
+==== BB 2014 (0xFE876FC) approx BBs exec'd 0 ====
+
+	0xFE876FC:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE87700:  7FE5FB78  or r5,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFE87704:  9003001C  stw r0,28(r3)
+	   6: GETL       	R0, t4
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x1C, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFE87708:  38E00000  li r7,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0xFE8770C:  90030014  stw r0,20(r3)
+	  14: GETL       	R0, t10
+	  15: GETL       	R3, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFE87710:  90030018  stw r0,24(r3)
+	  19: GETL       	R0, t14
+	  20: GETL       	R3, t16
+	  21: ADDL       	$0x18, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFE87714:  819D0004  lwz r12,4(r29)
+	  24: GETL       	R29, t18
+	  25: ADDL       	$0x4, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R12
+	  28: INCEIPL       	$4
+
+	0xFE87718:  93A30004  stw r29,4(r3)
+	  29: GETL       	R29, t22
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0x4, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE8771C:  280C0000  cmpli cr0,r12,0
+	  34: GETL       	R12, t26
+	  35: MOVL       	$0x0, t30
+	  36: CMPUL       	t26, t30, t28  (-rSo)
+	  37: ICRFL       	t28, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0xFE87720:  93E30008  stw r31,8(r3)
+	  39: GETL       	R31, t32
+	  40: GETL       	R3, t34
+	  41: ADDL       	$0x8, t34
+	  42: STL       	t32, (t34)
+	  43: INCEIPL       	$4
+
+	0xFE87724:  90030010  stw r0,16(r3)
+	  44: GETL       	R0, t36
+	  45: GETL       	R3, t38
+	  46: ADDL       	$0x10, t38
+	  47: STL       	t36, (t38)
+	  48: INCEIPL       	$4
+
+	0xFE87728:  91830020  stw r12,32(r3)
+	  49: GETL       	R12, t40
+	  50: GETL       	R3, t42
+	  51: ADDL       	$0x20, t42
+	  52: STL       	t40, (t42)
+	  53: INCEIPL       	$4
+
+	0xFE8772C:  408100D8  bc 4,1,0xFE87804
+	  54: Jc01o       	$0xFE87804
+
+
+
+. 2014 FE876FC 52
+. 38 00 00 00 7F E5 FB 78 90 03 00 1C 38 E0 00 00 90 03 00 14 90 03 00 18 81 9D 00 04 93 A3 00 04 28 0C 00 00 93 E3 00 08 90 03 00 10 91 83 00 20 40 81 00 D8
+==== BB 2015 (0xFE87730) approx BBs exec'd 0 ====
+
+	0xFE87730:  201C0000  subfic r0,r28,0
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x0, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE87734:  7CC0E114  adde r6,r0,r28
+	   5: GETL       	R0, t4
+	   6: GETL       	R28, t6
+	   7: ADCL       	t4, t6  (-rCa-wCa)
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0xFE87738:  839E1C4C  lwz r28,7244(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x1C4C, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0xFE8773C:  7FFC1214  add r31,r28,r2
+	  15: GETL       	R28, t12
+	  16: GETL       	R2, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R31
+	  19: INCEIPL       	$4
+
+	0xFE87740:  809E0118  lwz r4,280(r30)
+	  20: GETL       	R30, t16
+	  21: ADDL       	$0x118, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R4
+	  24: INCEIPL       	$4
+
+	0xFE87744:  39030024  addi r8,r3,36
+	  25: GETL       	R3, t20
+	  26: ADDL       	$0x24, t20
+	  27: PUTL       	t20, R8
+	  28: INCEIPL       	$4
+
+	0xFE87748:  54EA103A  rlwinm r10,r7,2,0,29
+	  29: GETL       	R7, t22
+	  30: SHLL       	$0x2, t22
+	  31: PUTL       	t22, R10
+	  32: INCEIPL       	$4
+
+	0xFE8774C:  7D2AEA14  add r9,r10,r29
+	  33: GETL       	R10, t24
+	  34: GETL       	R29, t26
+	  35: ADDL       	t24, t26
+	  36: PUTL       	t26, R9
+	  37: INCEIPL       	$4
+
+	0xFE87750:  21870046  subfic r12,r7,70
+	  38: GETL       	R7, t28
+	  39: MOVL       	$0x46, t30
+	  40: SBBL       	t28, t30  (-wCa)
+	  41: PUTL       	t30, R12
+	  42: INCEIPL       	$4
+
+	0xFE87754:  7D8C6110  subfe r12,r12,r12
+	  43: GETL       	R12, t32
+	  44: GETL       	R12, t34
+	  45: SBBL       	t32, t34  (-rCa-wCa)
+	  46: PUTL       	t34, R12
+	  47: INCEIPL       	$4
+
+	0xFE87758:  7D8C00D0  neg r12,r12
+	  48: GETL       	R12, t36
+	  49: NEGL       	t36
+	  50: PUTL       	t36, R12
+	  51: INCEIPL       	$4
+
+	0xFE8775C:  81690008  lwz r11,8(r9)
+	  52: GETL       	R9, t38
+	  53: ADDL       	$0x8, t38
+	  54: LDL       	(t38), t40
+	  55: PUTL       	t40, R11
+	  56: INCEIPL       	$4
+
+	0xFE87760:  7CC96039  and. r9,r6,r12
+	  57: GETL       	R6, t42
+	  58: GETL       	R12, t44
+	  59: ANDL       	t42, t44
+	  60: PUTL       	t44, R9
+	  61: CMP0L       	t44, t46  (-rSo)
+	  62: ICRFL       	t46, $0x0, CR
+	  63: INCEIPL       	$4
+
+	0xFE87764:  7C855840  cmpl cr1,r5,r11
+	  64: GETL       	R5, t48
+	  65: GETL       	R11, t50
+	  66: CMPUL       	t48, t50, t52  (-rSo)
+	  67: ICRFL       	t52, $0x1, CR
+	  68: INCEIPL       	$4
+
+	0xFE87768:  41840064  bc 12,4,0xFE877CC
+	  69: Js04o       	$0xFE877CC
+
+
+
+. 2015 FE87730 60
+. 20 1C 00 00 7C C0 E1 14 83 9E 1C 4C 7F FC 12 14 80 9E 01 18 39 03 00 24 54 EA 10 3A 7D 2A EA 14 21 87 00 46 7D 8C 61 10 7D 8C 00 D0 81 69 00 08 7C C9 60 39 7C 85 58 40 41 84 00 64
+==== BB 2016 (0xFE8776C) approx BBs exec'd 0 ====
+
+	0xFE8776C:  40820018  bc 4,2,0xFE87784
+	   0: Jc02o       	$0xFE87784
+
+
+
+. 2016 FE8776C 4
+. 40 82 00 18
+==== BB 2017 (0xFE87770) approx BBs exec'd 0 ====
+
+	0xFE87770:  7F9B202E  lwzx r28,r27,r4
+	   0: GETL       	R4, t0
+	   1: GETL       	R27, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R28
+	   5: INCEIPL       	$4
+
+	0xFE87774:  71600003  andi. r0,r11,0x3
+	   6: GETL       	R11, t6
+	   7: ANDL       	$0x3, t6
+	   8: PUTL       	t6, R0
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFE87778:  7CAAE02E  lwzx r5,r10,r28
+	  12: GETL       	R28, t10
+	  13: GETL       	R10, t12
+	  14: ADDL       	t12, t10
+	  15: LDL       	(t10), t14
+	  16: PUTL       	t14, R5
+	  17: INCEIPL       	$4
+
+	0xFE8777C:  2F050005  cmpi cr6,r5,5
+	  18: GETL       	R5, t16
+	  19: MOVL       	$0x5, t20
+	  20: CMPL       	t16, t20, t18  (-rSo)
+	  21: ICRFL       	t18, $0x6, CR
+	  22: INCEIPL       	$4
+
+	0xFE87780:  419A00EC  bc 12,26,0xFE8786C
+	  23: Js26o       	$0xFE8786C
+
+
+
+. 2017 FE87770 20
+. 7F 9B 20 2E 71 60 00 03 7C AA E0 2E 2F 05 00 05 41 9A 00 EC
+==== BB 2018 (0xFE87784) approx BBs exec'd 0 ====
+
+	0xFE87784:  81430004  lwz r10,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFE87788:  7C0A5A14  add r0,r10,r11
+	   5: GETL       	R10, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFE8778C:  90080000  stw r0,0(r8)
+	  10: GETL       	R0, t8
+	  11: GETL       	R8, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFE87790:  38E70001  addi r7,r7,1
+	  14: GETL       	R7, t12
+	  15: ADDL       	$0x1, t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0xFE87794:  81630020  lwz r11,32(r3)
+	  18: GETL       	R3, t14
+	  19: ADDL       	$0x20, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R11
+	  22: INCEIPL       	$4
+
+	0xFE87798:  39080004  addi r8,r8,4
+	  23: GETL       	R8, t18
+	  24: ADDL       	$0x4, t18
+	  25: PUTL       	t18, R8
+	  26: INCEIPL       	$4
+
+	0xFE8779C:  7F8B3840  cmpl cr7,r11,r7
+	  27: GETL       	R11, t20
+	  28: GETL       	R7, t22
+	  29: CMPUL       	t20, t22, t24  (-rSo)
+	  30: ICRFL       	t24, $0x7, CR
+	  31: INCEIPL       	$4
+
+	0xFE877A0:  409D0064  bc 4,29,0xFE87804
+	  32: Jc29o       	$0xFE87804
+
+
+
+. 2018 FE87784 32
+. 81 43 00 04 7C 0A 5A 14 90 08 00 00 38 E7 00 01 81 63 00 20 39 08 00 04 7F 8B 38 40 40 9D 00 64
+==== BB 2019 (0xFE877A4) approx BBs exec'd 0 ====
+
+	0xFE877A4:  54EA103A  rlwinm r10,r7,2,0,29
+	   0: GETL       	R7, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0xFE877A8:  80A30008  lwz r5,8(r3)
+	   4: GETL       	R3, t2
+	   5: ADDL       	$0x8, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE877AC:  7D2AEA14  add r9,r10,r29
+	   9: GETL       	R10, t6
+	  10: GETL       	R29, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFE877B0:  21870046  subfic r12,r7,70
+	  14: GETL       	R7, t10
+	  15: MOVL       	$0x46, t12
+	  16: SBBL       	t10, t12  (-wCa)
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0xFE877B4:  7D8C6110  subfe r12,r12,r12
+	  19: GETL       	R12, t14
+	  20: GETL       	R12, t16
+	  21: SBBL       	t14, t16  (-rCa-wCa)
+	  22: PUTL       	t16, R12
+	  23: INCEIPL       	$4
+
+	0xFE877B8:  7D8C00D0  neg r12,r12
+	  24: GETL       	R12, t18
+	  25: NEGL       	t18
+	  26: PUTL       	t18, R12
+	  27: INCEIPL       	$4
+
+	0xFE877BC:  81690008  lwz r11,8(r9)
+	  28: GETL       	R9, t20
+	  29: ADDL       	$0x8, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R11
+	  32: INCEIPL       	$4
+
+	0xFE877C0:  7CC96039  and. r9,r6,r12
+	  33: GETL       	R6, t24
+	  34: GETL       	R12, t26
+	  35: ANDL       	t24, t26
+	  36: PUTL       	t26, R9
+	  37: CMP0L       	t26, t28  (-rSo)
+	  38: ICRFL       	t28, $0x0, CR
+	  39: INCEIPL       	$4
+
+	0xFE877C4:  7C855840  cmpl cr1,r5,r11
+	  40: GETL       	R5, t30
+	  41: GETL       	R11, t32
+	  42: CMPUL       	t30, t32, t34  (-rSo)
+	  43: ICRFL       	t34, $0x1, CR
+	  44: INCEIPL       	$4
+
+	0xFE877C8:  4084FFA4  bc 4,4,0xFE8776C
+	  45: Jc04o       	$0xFE8776C
+
+
+
+. 2019 FE877A4 40
+. 54 EA 10 3A 80 A3 00 08 7D 2A EA 14 21 87 00 46 7D 8C 61 10 7D 8C 00 D0 81 69 00 08 7C C9 60 39 7C 85 58 40 40 84 FF A4
+==== BB 2020 (0xFE8786C) approx BBs exec'd 0 ====
+
+	0xFE8786C:  40A2FF60  bc 5,2,0xFE877CC
+	   0: Jc02o       	$0xFE877CC
+
+
+
+. 2020 FE8786C 4
+. 40 A2 FF 60
+==== BB 2021 (0xFE87870) approx BBs exec'd 0 ====
+
+	0xFE87870:  81830004  lwz r12,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFE87874:  7C0C582E  lwzx r0,r12,r11
+	   5: GETL       	R11, t4
+	   6: GETL       	R12, t6
+	   7: ADDL       	t6, t4
+	   8: LDL       	(t4), t8
+	   9: PUTL       	t8, R0
+	  10: INCEIPL       	$4
+
+	0xFE87878:  4BFFFF14  b 0xFE8778C
+	  11: JMPo       	$0xFE8778C  ($4)
+
+
+
+. 2021 FE87870 12
+. 81 83 00 04 7C 0C 58 2E 4B FF FF 14
+==== BB 2022 (0xFE8778C) approx BBs exec'd 0 ====
+
+	0xFE8778C:  90080000  stw r0,0(r8)
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE87790:  38E70001  addi r7,r7,1
+	   4: GETL       	R7, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0xFE87794:  81630020  lwz r11,32(r3)
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x20, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0xFE87798:  39080004  addi r8,r8,4
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x4, t10
+	  15: PUTL       	t10, R8
+	  16: INCEIPL       	$4
+
+	0xFE8779C:  7F8B3840  cmpl cr7,r11,r7
+	  17: GETL       	R11, t12
+	  18: GETL       	R7, t14
+	  19: CMPUL       	t12, t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0xFE877A0:  409D0064  bc 4,29,0xFE87804
+	  22: Jc29o       	$0xFE87804
+
+
+
+. 2022 FE8778C 24
+. 90 08 00 00 38 E7 00 01 81 63 00 20 39 08 00 04 7F 8B 38 40 40 9D 00 64
+==== BB 2023 (0xFE87804) approx BBs exec'd 0 ====
+
+	0xFE87804:  7C6B1B78  or r11,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFE87808:  83A10024  lwz r29,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFE8780C:  7D635B78  or r3,r11,r11
+	   8: GETL       	R11, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFE87810:  8361000C  lwz r27,12(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0xC, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0xFE87814:  7FA803A6  mtlr r29
+	  16: GETL       	R29, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0xFE87818:  83810010  lwz r28,16(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x10, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0xFE8781C:  83A10014  lwz r29,20(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x14, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFE87820:  83C10018  lwz r30,24(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x18, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R30
+	  33: INCEIPL       	$4
+
+	0xFE87824:  83E1001C  lwz r31,28(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x1C, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R31
+	  38: INCEIPL       	$4
+
+	0xFE87828:  38210020  addi r1,r1,32
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x20, t30
+	  41: PUTL       	t30, R1
+	  42: INCEIPL       	$4
+
+	0xFE8782C:  4E800020  blr
+	  43: GETL       	LR, t32
+	  44: JMPo-r       	t32  ($4)
+
+
+
+. 2023 FE87804 44
+. 7C 6B 1B 78 83 A1 00 24 7D 63 5B 78 83 61 00 0C 7F A8 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2024 (0xFE880FC) approx BBs exec'd 0 ====
+
+	0xFE880FC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE88100:  907D0000  stw r3,0(r29)
+	   4: GETL       	R3, t4
+	   5: GETL       	R29, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFE88104:  419E0014  bc 12,30,0xFE88118
+	   8: Js30o       	$0xFE88118
+
+
+
+. 2024 FE880FC 12
+. 2F 83 00 00 90 7D 00 00 41 9E 00 14
+==== BB 2025 (0xFE88108) approx BBs exec'd 0 ====
+
+	0xFE88108:  81FA0004  lwz r15,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0xFE8810C:  93230018  stw r25,24(r3)
+	   5: GETL       	R25, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x18, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE88110:  91E30000  stw r15,0(r3)
+	  10: GETL       	R15, t8
+	  11: GETL       	R3, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFE88114:  9303000C  stw r24,12(r3)
+	  14: GETL       	R24, t12
+	  15: GETL       	R3, t14
+	  16: ADDL       	$0xC, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFE88118:  2E1C000C  cmpi cr4,r28,12
+	  19: GETL       	R28, t16
+	  20: MOVL       	$0xC, t20
+	  21: CMPL       	t16, t20, t18  (-rSo)
+	  22: ICRFL       	t18, $0x4, CR
+	  23: INCEIPL       	$4
+
+	0xFE8811C:  3B7B0008  addi r27,r27,8
+	  24: GETL       	R27, t22
+	  25: ADDL       	$0x8, t22
+	  26: PUTL       	t22, R27
+	  27: INCEIPL       	$4
+
+	0xFE88120:  3BBD0004  addi r29,r29,4
+	  28: GETL       	R29, t24
+	  29: ADDL       	$0x4, t24
+	  30: PUTL       	t24, R29
+	  31: INCEIPL       	$4
+
+	0xFE88124:  4091FFBC  bc 4,17,0xFE880E0
+	  32: Jc17o       	$0xFE880E0
+
+
+
+. 2025 FE88108 32
+. 81 FA 00 04 93 23 00 18 91 E3 00 00 93 03 00 0C 2E 1C 00 0C 3B 7B 00 08 3B BD 00 04 40 91 FF BC
+==== BB 2026 (0xFE880E0) approx BBs exec'd 0 ====
+
+	0xFE880E0:  2F1C0006  cmpi cr6,r28,6
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFE880E4:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t6
+	   6: PUTL       	t6, R3
+	   7: INCEIPL       	$4
+
+	0xFE880E8:  3B9C0001  addi r28,r28,1
+	   8: GETL       	R28, t8
+	   9: ADDL       	$0x1, t8
+	  10: PUTL       	t8, R28
+	  11: INCEIPL       	$4
+
+	0xFE880EC:  419A002C  bc 12,26,0xFE88118
+	  12: Js26o       	$0xFE88118
+
+
+
+. 2026 FE880E0 16
+. 2F 1C 00 06 7F 83 E3 78 3B 9C 00 01 41 9A 00 2C
+==== BB 2027 (0xFE88118) approx BBs exec'd 0 ====
+
+	0xFE88118:  2E1C000C  cmpi cr4,r28,12
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0xC, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0xFE8811C:  3B7B0008  addi r27,r27,8
+	   5: GETL       	R27, t6
+	   6: ADDL       	$0x8, t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFE88120:  3BBD0004  addi r29,r29,4
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0x4, t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFE88124:  4091FFBC  bc 4,17,0xFE880E0
+	  13: Jc17o       	$0xFE880E0
+
+
+
+. 2027 FE88118 16
+. 2E 1C 00 0C 3B 7B 00 08 3B BD 00 04 40 91 FF BC
+==== BB 2028 (0xFE88128) approx BBs exec'd 0 ====
+
+	0xFE88128:  55D4103A  rlwinm r20,r14,2,0,29
+	   0: GETL       	R14, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R20
+	   3: INCEIPL       	$4
+
+	0xFE8812C:  81DA0004  lwz r14,4(r26)
+	   4: GETL       	R26, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R14
+	   8: INCEIPL       	$4
+
+	0xFE88130:  7E74D214  add r19,r20,r26
+	   9: GETL       	R20, t6
+	  10: GETL       	R26, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R19
+	  13: INCEIPL       	$4
+
+	0xFE88134:  81330008  lwz r9,8(r19)
+	  14: GETL       	R19, t10
+	  15: ADDL       	$0x8, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFE88138:  91D20000  stw r14,0(r18)
+	  19: GETL       	R14, t14
+	  20: GETL       	R18, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFE8813C:  4BFFFCBC  b 0xFE87DF8
+	  23: JMPo       	$0xFE87DF8  ($4)
+
+
+
+. 2028 FE88128 24
+. 55 D4 10 3A 81 DA 00 04 7E 74 D2 14 81 33 00 08 91 D2 00 00 4B FF FC BC
+==== BB 2029 (0xFE87DF8) approx BBs exec'd 0 ====
+
+	0xFE87DF8:  81010000  lwz r8,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0xFE87DFC:  7D234B78  or r3,r9,r9
+	   4: GETL       	R9, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFE87E00:  82480004  lwz r18,4(r8)
+	   7: GETL       	R8, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R18
+	  11: INCEIPL       	$4
+
+	0xFE87E04:  8148FFB4  lwz r10,-76(r8)
+	  12: GETL       	R8, t10
+	  13: ADDL       	$0xFFFFFFB4, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R10
+	  16: INCEIPL       	$4
+
+	0xFE87E08:  7E4803A6  mtlr r18
+	  17: GETL       	R18, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0xFE87E0C:  81C8FFB8  lwz r14,-72(r8)
+	  20: GETL       	R8, t16
+	  21: ADDL       	$0xFFFFFFB8, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R14
+	  24: INCEIPL       	$4
+
+	0xFE87E10:  81E8FFBC  lwz r15,-68(r8)
+	  25: GETL       	R8, t20
+	  26: ADDL       	$0xFFFFFFBC, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R15
+	  29: INCEIPL       	$4
+
+	0xFE87E14:  7D408120  mtcrf 0x8,r10
+	  30: GETL       	R10, t24
+	  31: ICRFL       	t24, $0x4, CR
+	  32: INCEIPL       	$4
+
+	0xFE87E18:  8208FFC0  lwz r16,-64(r8)
+	  33: GETL       	R8, t26
+	  34: ADDL       	$0xFFFFFFC0, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R16
+	  37: INCEIPL       	$4
+
+	0xFE87E1C:  8228FFC4  lwz r17,-60(r8)
+	  38: GETL       	R8, t30
+	  39: ADDL       	$0xFFFFFFC4, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R17
+	  42: INCEIPL       	$4
+
+	0xFE87E20:  8248FFC8  lwz r18,-56(r8)
+	  43: GETL       	R8, t34
+	  44: ADDL       	$0xFFFFFFC8, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R18
+	  47: INCEIPL       	$4
+
+	0xFE87E24:  8268FFCC  lwz r19,-52(r8)
+	  48: GETL       	R8, t38
+	  49: ADDL       	$0xFFFFFFCC, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R19
+	  52: INCEIPL       	$4
+
+	0xFE87E28:  8288FFD0  lwz r20,-48(r8)
+	  53: GETL       	R8, t42
+	  54: ADDL       	$0xFFFFFFD0, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R20
+	  57: INCEIPL       	$4
+
+	0xFE87E2C:  82A8FFD4  lwz r21,-44(r8)
+	  58: GETL       	R8, t46
+	  59: ADDL       	$0xFFFFFFD4, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R21
+	  62: INCEIPL       	$4
+
+	0xFE87E30:  82C8FFD8  lwz r22,-40(r8)
+	  63: GETL       	R8, t50
+	  64: ADDL       	$0xFFFFFFD8, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R22
+	  67: INCEIPL       	$4
+
+	0xFE87E34:  82E8FFDC  lwz r23,-36(r8)
+	  68: GETL       	R8, t54
+	  69: ADDL       	$0xFFFFFFDC, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R23
+	  72: INCEIPL       	$4
+
+	0xFE87E38:  8308FFE0  lwz r24,-32(r8)
+	  73: GETL       	R8, t58
+	  74: ADDL       	$0xFFFFFFE0, t58
+	  75: LDL       	(t58), t60
+	  76: PUTL       	t60, R24
+	  77: INCEIPL       	$4
+
+	0xFE87E3C:  8328FFE4  lwz r25,-28(r8)
+	  78: GETL       	R8, t62
+	  79: ADDL       	$0xFFFFFFE4, t62
+	  80: LDL       	(t62), t64
+	  81: PUTL       	t64, R25
+	  82: INCEIPL       	$4
+
+	0xFE87E40:  8348FFE8  lwz r26,-24(r8)
+	  83: GETL       	R8, t66
+	  84: ADDL       	$0xFFFFFFE8, t66
+	  85: LDL       	(t66), t68
+	  86: PUTL       	t68, R26
+	  87: INCEIPL       	$4
+
+	0xFE87E44:  8368FFEC  lwz r27,-20(r8)
+	  88: GETL       	R8, t70
+	  89: ADDL       	$0xFFFFFFEC, t70
+	  90: LDL       	(t70), t72
+	  91: PUTL       	t72, R27
+	  92: INCEIPL       	$4
+
+	0xFE87E48:  8388FFF0  lwz r28,-16(r8)
+	  93: GETL       	R8, t74
+	  94: ADDL       	$0xFFFFFFF0, t74
+	  95: LDL       	(t74), t76
+	  96: PUTL       	t76, R28
+	  97: INCEIPL       	$4
+
+	0xFE87E4C:  83A8FFF4  lwz r29,-12(r8)
+	  98: GETL       	R8, t78
+	  99: ADDL       	$0xFFFFFFF4, t78
+	 100: LDL       	(t78), t80
+	 101: PUTL       	t80, R29
+	 102: INCEIPL       	$4
+
+	0xFE87E50:  83C8FFF8  lwz r30,-8(r8)
+	 103: GETL       	R8, t82
+	 104: ADDL       	$0xFFFFFFF8, t82
+	 105: LDL       	(t82), t84
+	 106: PUTL       	t84, R30
+	 107: INCEIPL       	$4
+
+	0xFE87E54:  83E8FFFC  lwz r31,-4(r8)
+	 108: GETL       	R8, t86
+	 109: ADDL       	$0xFFFFFFFC, t86
+	 110: LDL       	(t86), t88
+	 111: PUTL       	t88, R31
+	 112: INCEIPL       	$4
+
+	0xFE87E58:  7D014378  or r1,r8,r8
+	 113: GETL       	R8, t90
+	 114: PUTL       	t90, R1
+	 115: INCEIPL       	$4
+
+	0xFE87E5C:  4E800020  blr
+	 116: GETL       	LR, t92
+	 117: JMPo-r       	t92  ($4)
+
+
+
+. 2029 FE87DF8 104
+. 81 01 00 00 7D 23 4B 78 82 48 00 04 81 48 FF B4 7E 48 03 A6 81 C8 FF B8 81 E8 FF BC 7D 40 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+==== BB 2030 (0xFE86FAC) approx BBs exec'd 0 ====
+
+	0xFE86FAC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE86FB0:  419E0120  bc 12,30,0xFE870D0
+	   4: Js30o       	$0xFE870D0
+
+
+
+. 2030 FE86FAC 8
+. 2F 83 00 00 41 9E 01 20
+==== BB 2031 (0xFE86FB4) approx BBs exec'd 0 ====
+
+	0xFE86FB4:  80E10000  lwz r7,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0xFE86FB8:  83A70004  lwz r29,4(r7)
+	   4: GETL       	R7, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0xFE86FBC:  82C7FFD8  lwz r22,-40(r7)
+	   9: GETL       	R7, t8
+	  10: ADDL       	$0xFFFFFFD8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R22
+	  13: INCEIPL       	$4
+
+	0xFE86FC0:  7FA803A6  mtlr r29
+	  14: GETL       	R29, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFE86FC4:  82E7FFDC  lwz r23,-36(r7)
+	  17: GETL       	R7, t14
+	  18: ADDL       	$0xFFFFFFDC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R23
+	  21: INCEIPL       	$4
+
+	0xFE86FC8:  8307FFE0  lwz r24,-32(r7)
+	  22: GETL       	R7, t18
+	  23: ADDL       	$0xFFFFFFE0, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R24
+	  26: INCEIPL       	$4
+
+	0xFE86FCC:  8327FFE4  lwz r25,-28(r7)
+	  27: GETL       	R7, t22
+	  28: ADDL       	$0xFFFFFFE4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R25
+	  31: INCEIPL       	$4
+
+	0xFE86FD0:  8347FFE8  lwz r26,-24(r7)
+	  32: GETL       	R7, t26
+	  33: ADDL       	$0xFFFFFFE8, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R26
+	  36: INCEIPL       	$4
+
+	0xFE86FD4:  8367FFEC  lwz r27,-20(r7)
+	  37: GETL       	R7, t30
+	  38: ADDL       	$0xFFFFFFEC, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R27
+	  41: INCEIPL       	$4
+
+	0xFE86FD8:  8387FFF0  lwz r28,-16(r7)
+	  42: GETL       	R7, t34
+	  43: ADDL       	$0xFFFFFFF0, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R28
+	  46: INCEIPL       	$4
+
+	0xFE86FDC:  83A7FFF4  lwz r29,-12(r7)
+	  47: GETL       	R7, t38
+	  48: ADDL       	$0xFFFFFFF4, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R29
+	  51: INCEIPL       	$4
+
+	0xFE86FE0:  83C7FFF8  lwz r30,-8(r7)
+	  52: GETL       	R7, t42
+	  53: ADDL       	$0xFFFFFFF8, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R30
+	  56: INCEIPL       	$4
+
+	0xFE86FE4:  83E7FFFC  lwz r31,-4(r7)
+	  57: GETL       	R7, t46
+	  58: ADDL       	$0xFFFFFFFC, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R31
+	  61: INCEIPL       	$4
+
+	0xFE86FE8:  7CE13B78  or r1,r7,r7
+	  62: GETL       	R7, t50
+	  63: PUTL       	t50, R1
+	  64: INCEIPL       	$4
+
+	0xFE86FEC:  4E800020  blr
+	  65: GETL       	LR, t52
+	  66: JMPo-r       	t52  ($4)
+
+
+
+. 2031 FE86FB4 60
+. 80 E1 00 00 83 A7 00 04 82 C7 FF D8 7F A8 03 A6 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+==== BB 2032 (0xFE86BB4) approx BBs exec'd 0 ====
+
+	0xFE86BB4:  7C9DC214  add r4,r29,r24
+	   0: GETL       	R29, t0
+	   1: GETL       	R24, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE86BB8:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFE86BBC:  907B0048  stw r3,72(r27)
+	   9: GETL       	R3, t8
+	  10: GETL       	R27, t10
+	  11: ADDL       	$0x48, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFE86BC0:  419E0050  bc 12,30,0xFE86C10
+	  14: Js30o       	$0xFE86C10
+
+
+
+. 2032 FE86BB4 16
+. 7C 9D C2 14 2F 83 00 00 90 7B 00 48 41 9E 00 50
+==== BB 2033 (0xFE86BC4) approx BBs exec'd 0 ====
+
+	0xFE86BC4:  81230018  lwz r9,24(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE86BC8:  2C89FFFF  cmpi cr1,r9,-1
+	   5: GETL       	R9, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFE86BCC:  4186000C  bc 12,6,0xFE86BD8
+	  10: Js06o       	$0xFE86BD8
+
+
+
+. 2033 FE86BC4 12
+. 81 23 00 18 2C 89 FF FF 41 86 00 0C
+==== BB 2034 (0xFE86BD8) approx BBs exec'd 0 ====
+
+	0xFE86BD8:  83BB0008  lwz r29,8(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFE86BDC:  7F1DA800  cmp cr6,r29,r21
+	   5: GETL       	R29, t4
+	   6: GETL       	R21, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFE86BE0:  7FA3EB78  or r3,r29,r29
+	  10: GETL       	R29, t10
+	  11: PUTL       	t10, R3
+	  12: INCEIPL       	$4
+
+	0xFE86BE4:  41BAFF9C  bc 13,26,0xFE86B80
+	  13: Js26o       	$0xFE86B80
+
+
+
+. 2034 FE86BD8 16
+. 83 BB 00 08 7F 1D A8 00 7F A3 EB 78 41 BA FF 9C
+==== BB 2035 (0xFE86BE8) approx BBs exec'd 0 ====
+
+	0xFE86BE8:  83440040  lwz r26,64(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x40, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFE86BEC:  7F44D378  or r4,r26,r26
+	   5: GETL       	R26, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE86BF0:  4804F7C1  bl 0xFED63B0
+	   8: MOVL       	$0xFE86BF4, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2035 FE86BE8 12
+. 83 44 00 40 7F 44 D3 78 48 04 F7 C1
+==== BB 2036 (0xFED6430) approx BBs exec'd 0 ====
+
+	0xFED6430:  80A3FFFC  lwz r5,-4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFED6434:  7CAA3279  xor. r10,r5,r6
+	   5: GETL       	R5, t4
+	   6: GETL       	R6, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFED6438:  7C662850  subf r3,r6,r5
+	  12: GETL       	R6, t10
+	  13: GETL       	R5, t12
+	  14: SUBL       	t10, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0xFED643C:  4CA00020  bclr 5,0
+	  17: GETL       	LR, t14
+	  18: Jc00o-r       	t14
+
+
+
+. 2036 FED6430 16
+. 80 A3 FF FC 7C AA 32 79 7C 66 28 50 4C A0 00 20
+==== BB 2037 (0xFE86BF4) approx BBs exec'd 0 ====
+
+	0xFE86BF4:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE86BF8:  7FA3EB78  or r3,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFE86BFC:  4182FF80  bc 12,2,0xFE86B7C
+	   7: Js02o       	$0xFE86B7C
+
+
+
+. 2037 FE86BF4 12
+. 2C 03 00 00 7F A3 EB 78 41 82 FF 80
+==== BB 2038 (0xFE86C00) approx BBs exec'd 0 ====
+
+	0xFE86C00:  4804FB9D  bl 0xFED679C
+	   0: MOVL       	$0xFE86C04, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED679C  ($4)
+
+
+
+. 2038 FE86C00 4
+. 48 04 FB 9D
+==== BB 2039 (0xFE86C04) approx BBs exec'd 0 ====
+
+	0xFE86C04:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE86C08:  907B0008  stw r3,8(r27)
+	   4: GETL       	R3, t4
+	   5: GETL       	R27, t6
+	   6: ADDL       	$0x8, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE86C0C:  409EFF74  bc 4,30,0xFE86B80
+	   9: Jc30o       	$0xFE86B80
+
+
+
+. 2039 FE86C04 12
+. 2F 83 00 00 90 7B 00 08 40 9E FF 74
+==== BB 2040 (0xFE86B80) approx BBs exec'd 0 ====
+
+	0xFE86B80:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE86B84:  3B9CFFFF  addi r28,r28,-1
+	   3: GETL       	R28, t2
+	   4: ADDL       	$0xFFFFFFFF, t2
+	   5: PUTL       	t2, R28
+	   6: INCEIPL       	$4
+
+	0xFE86B88:  2C830000  cmpi cr1,r3,0
+	   7: GETL       	R3, t4
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x1, CR
+	  10: INCEIPL       	$4
+
+	0xFE86B8C:  40850084  bc 4,5,0xFE86C10
+	  11: Jc05o       	$0xFE86C10
+
+
+
+. 2040 FE86B80 16
+. 7F 83 E3 78 3B 9C FF FF 2C 83 00 00 40 85 00 84
+==== BB 2041 (0xFE87C84) approx BBs exec'd 0 ====
+
+	0xFE87C84:  55DB103A  rlwinm r27,r14,2,0,29
+	   0: GETL       	R14, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0xFE87C88:  83BC0004  lwz r29,4(r28)
+	   4: GETL       	R28, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFE87C8C:  7EE3BB78  or r3,r23,r23
+	   9: GETL       	R23, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0xFE87C90:  7C1DB800  cmp cr0,r29,r23
+	  12: GETL       	R29, t8
+	  13: GETL       	R23, t10
+	  14: CMPL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFE87C94:  7FA4EB78  or r4,r29,r29
+	  17: GETL       	R29, t14
+	  18: PUTL       	t14, R4
+	  19: INCEIPL       	$4
+
+	0xFE87C98:  41820154  bc 12,2,0xFE87DEC
+	  20: Js02o       	$0xFE87DEC
+
+
+
+. 2041 FE87C84 24
+. 55 DB 10 3A 83 BC 00 04 7E E3 BB 78 7C 1D B8 00 7F A4 EB 78 41 82 01 54
+==== BB 2042 (0xFE87C9C) approx BBs exec'd 0 ====
+
+	0xFE87C9C:  4804E715  bl 0xFED63B0
+	   0: MOVL       	$0xFE87CA0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2042 FE87C9C 4
+. 48 04 E7 15
+==== BB 2043 (0xFED6478) approx BBs exec'd 0 ====
+
+	0xFED6478:  8CA30001  lbzu r5,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFED647C:  8CC40001  lbzu r6,1(r4)
+	   6: GETL       	R4, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R4
+	   9: LDB       	(t4), t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0xFED6480:  2C850000  cmpi cr1,r5,0
+	  12: GETL       	R5, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0xFED6484:  7C053000  cmp cr0,r5,r6
+	  16: GETL       	R5, t12
+	  17: GETL       	R6, t14
+	  18: CMPL       	t12, t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0xFED6488:  4086FFD4  bc 4,6,0xFED645C
+	  21: Jc06o       	$0xFED645C
+
+
+
+. 2043 FED6478 20
+. 8C A3 00 01 8C C4 00 01 2C 85 00 00 7C 05 30 00 40 86 FF D4
+==== BB 2044 (0xFED645C) approx BBs exec'd 0 ====
+
+	0xFED645C:  8CA30001  lbzu r5,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFED6460:  40820034  bc 4,2,0xFED6494
+	   6: Jc02o       	$0xFED6494
+
+
+
+. 2044 FED645C 8
+. 8C A3 00 01 40 82 00 34
+==== BB 2045 (0xFED6464) approx BBs exec'd 0 ====
+
+	0xFED6464:  8CC40001  lbzu r6,1(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED6468:  2C850000  cmpi cr1,r5,0
+	   6: GETL       	R5, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFED646C:  41860020  bc 12,6,0xFED648C
+	  10: Js06o       	$0xFED648C
+
+
+
+. 2045 FED6464 12
+. 8C C4 00 01 2C 85 00 00 41 86 00 20
+==== BB 2046 (0xFE87CA0) approx BBs exec'd 0 ====
+
+	0xFE87CA0:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE87CA4:  41860148  bc 12,6,0xFE87DEC
+	   4: Js06o       	$0xFE87DEC
+
+
+
+. 2046 FE87CA0 8
+. 2C 83 00 00 41 86 01 48
+==== BB 2047 (0xFE87DEC) approx BBs exec'd 0 ====
+
+	0xFE87DEC:  7D3BE214  add r9,r27,r28
+	   0: GETL       	R27, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE87DF0:  93B20000  stw r29,0(r18)
+	   5: GETL       	R29, t4
+	   6: GETL       	R18, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE87DF4:  81290008  lwz r9,8(r9)
+	   9: GETL       	R9, t8
+	  10: ADDL       	$0x8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0xFE87DF8:  81010000  lwz r8,0(r1)
+	  14: GETL       	R1, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R8
+	  17: INCEIPL       	$4
+
+	0xFE87DFC:  7D234B78  or r3,r9,r9
+	  18: GETL       	R9, t16
+	  19: PUTL       	t16, R3
+	  20: INCEIPL       	$4
+
+	0xFE87E00:  82480004  lwz r18,4(r8)
+	  21: GETL       	R8, t18
+	  22: ADDL       	$0x4, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R18
+	  25: INCEIPL       	$4
+
+	0xFE87E04:  8148FFB4  lwz r10,-76(r8)
+	  26: GETL       	R8, t22
+	  27: ADDL       	$0xFFFFFFB4, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R10
+	  30: INCEIPL       	$4
+
+	0xFE87E08:  7E4803A6  mtlr r18
+	  31: GETL       	R18, t26
+	  32: PUTL       	t26, LR
+	  33: INCEIPL       	$4
+
+	0xFE87E0C:  81C8FFB8  lwz r14,-72(r8)
+	  34: GETL       	R8, t28
+	  35: ADDL       	$0xFFFFFFB8, t28
+	  36: LDL       	(t28), t30
+	  37: PUTL       	t30, R14
+	  38: INCEIPL       	$4
+
+	0xFE87E10:  81E8FFBC  lwz r15,-68(r8)
+	  39: GETL       	R8, t32
+	  40: ADDL       	$0xFFFFFFBC, t32
+	  41: LDL       	(t32), t34
+	  42: PUTL       	t34, R15
+	  43: INCEIPL       	$4
+
+	0xFE87E14:  7D408120  mtcrf 0x8,r10
+	  44: GETL       	R10, t36
+	  45: ICRFL       	t36, $0x4, CR
+	  46: INCEIPL       	$4
+
+	0xFE87E18:  8208FFC0  lwz r16,-64(r8)
+	  47: GETL       	R8, t38
+	  48: ADDL       	$0xFFFFFFC0, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R16
+	  51: INCEIPL       	$4
+
+	0xFE87E1C:  8228FFC4  lwz r17,-60(r8)
+	  52: GETL       	R8, t42
+	  53: ADDL       	$0xFFFFFFC4, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R17
+	  56: INCEIPL       	$4
+
+	0xFE87E20:  8248FFC8  lwz r18,-56(r8)
+	  57: GETL       	R8, t46
+	  58: ADDL       	$0xFFFFFFC8, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R18
+	  61: INCEIPL       	$4
+
+	0xFE87E24:  8268FFCC  lwz r19,-52(r8)
+	  62: GETL       	R8, t50
+	  63: ADDL       	$0xFFFFFFCC, t50
+	  64: LDL       	(t50), t52
+	  65: PUTL       	t52, R19
+	  66: INCEIPL       	$4
+
+	0xFE87E28:  8288FFD0  lwz r20,-48(r8)
+	  67: GETL       	R8, t54
+	  68: ADDL       	$0xFFFFFFD0, t54
+	  69: LDL       	(t54), t56
+	  70: PUTL       	t56, R20
+	  71: INCEIPL       	$4
+
+	0xFE87E2C:  82A8FFD4  lwz r21,-44(r8)
+	  72: GETL       	R8, t58
+	  73: ADDL       	$0xFFFFFFD4, t58
+	  74: LDL       	(t58), t60
+	  75: PUTL       	t60, R21
+	  76: INCEIPL       	$4
+
+	0xFE87E30:  82C8FFD8  lwz r22,-40(r8)
+	  77: GETL       	R8, t62
+	  78: ADDL       	$0xFFFFFFD8, t62
+	  79: LDL       	(t62), t64
+	  80: PUTL       	t64, R22
+	  81: INCEIPL       	$4
+
+	0xFE87E34:  82E8FFDC  lwz r23,-36(r8)
+	  82: GETL       	R8, t66
+	  83: ADDL       	$0xFFFFFFDC, t66
+	  84: LDL       	(t66), t68
+	  85: PUTL       	t68, R23
+	  86: INCEIPL       	$4
+
+	0xFE87E38:  8308FFE0  lwz r24,-32(r8)
+	  87: GETL       	R8, t70
+	  88: ADDL       	$0xFFFFFFE0, t70
+	  89: LDL       	(t70), t72
+	  90: PUTL       	t72, R24
+	  91: INCEIPL       	$4
+
+	0xFE87E3C:  8328FFE4  lwz r25,-28(r8)
+	  92: GETL       	R8, t74
+	  93: ADDL       	$0xFFFFFFE4, t74
+	  94: LDL       	(t74), t76
+	  95: PUTL       	t76, R25
+	  96: INCEIPL       	$4
+
+	0xFE87E40:  8348FFE8  lwz r26,-24(r8)
+	  97: GETL       	R8, t78
+	  98: ADDL       	$0xFFFFFFE8, t78
+	  99: LDL       	(t78), t80
+	 100: PUTL       	t80, R26
+	 101: INCEIPL       	$4
+
+	0xFE87E44:  8368FFEC  lwz r27,-20(r8)
+	 102: GETL       	R8, t82
+	 103: ADDL       	$0xFFFFFFEC, t82
+	 104: LDL       	(t82), t84
+	 105: PUTL       	t84, R27
+	 106: INCEIPL       	$4
+
+	0xFE87E48:  8388FFF0  lwz r28,-16(r8)
+	 107: GETL       	R8, t86
+	 108: ADDL       	$0xFFFFFFF0, t86
+	 109: LDL       	(t86), t88
+	 110: PUTL       	t88, R28
+	 111: INCEIPL       	$4
+
+	0xFE87E4C:  83A8FFF4  lwz r29,-12(r8)
+	 112: GETL       	R8, t90
+	 113: ADDL       	$0xFFFFFFF4, t90
+	 114: LDL       	(t90), t92
+	 115: PUTL       	t92, R29
+	 116: INCEIPL       	$4
+
+	0xFE87E50:  83C8FFF8  lwz r30,-8(r8)
+	 117: GETL       	R8, t94
+	 118: ADDL       	$0xFFFFFFF8, t94
+	 119: LDL       	(t94), t96
+	 120: PUTL       	t96, R30
+	 121: INCEIPL       	$4
+
+	0xFE87E54:  83E8FFFC  lwz r31,-4(r8)
+	 122: GETL       	R8, t98
+	 123: ADDL       	$0xFFFFFFFC, t98
+	 124: LDL       	(t98), t100
+	 125: PUTL       	t100, R31
+	 126: INCEIPL       	$4
+
+	0xFE87E58:  7D014378  or r1,r8,r8
+	 127: GETL       	R8, t102
+	 128: PUTL       	t102, R1
+	 129: INCEIPL       	$4
+
+	0xFE87E5C:  4E800020  blr
+	 130: GETL       	LR, t104
+	 131: JMPo-r       	t104  ($4)
+
+
+
+. 2047 FE87DEC 116
+. 7D 3B E2 14 93 B2 00 00 81 29 00 08 81 01 00 00 7D 23 4B 78 82 48 00 04 81 48 FF B4 7E 48 03 A6 81 C8 FF B8 81 E8 FF BC 7D 40 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+==== BB 2048 (0xFE86C10) approx BBs exec'd 0 ====
+
+	0xFE86C10:  2F1C0000  cmpi cr6,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE86C14:  3AE00000  li r23,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R23
+	   6: INCEIPL       	$4
+
+	0xFE86C18:  419801F8  bc 12,24,0xFE86E10
+	   7: Js24o       	$0xFE86E10
+
+
+
+. 2048 FE86C10 12
+. 2F 1C 00 00 3A E0 00 00 41 98 01 F8
+==== BB 2049 (0xFE86E10) approx BBs exec'd 0 ====
+
+	0xFE86E10:  38600006  li r3,6
+	   0: MOVL       	$0x6, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE86E14:  389F0010  addi r4,r31,16
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0xFE86E18:  4BFFF801  bl 0xFE86618
+	   7: MOVL       	$0xFE86E1C, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0xFE86618  ($4)
+
+
+
+. 2049 FE86E10 12
+. 38 60 00 06 38 9F 00 10 4B FF F8 01
+==== BB 2050 new_composite_name(0xFE86618) approx BBs exec'd 0 ====
+
+	0xFE86618:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8661C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE86620:  48121831  bl 0xFFA7E50
+	   9: MOVL       	$0xFE86624, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2050 FE86618 12
+. 94 21 FF C0 7C 08 02 A6 48 12 18 31
+==== BB 2051 (0xFE86624) approx BBs exec'd 0 ====
+
+	0xFE86624:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE86628:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE8662C:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0xFE86630:  2E030006  cmpi cr4,r3,6
+	  11: GETL       	R3, t8
+	  12: MOVL       	$0x6, t12
+	  13: CMPL       	t8, t12, t10  (-rSo)
+	  14: ICRFL       	t10, $0x4, CR
+	  15: INCEIPL       	$4
+
+	0xFE86634:  92A10014  stw r21,20(r1)
+	  16: GETL       	R21, t14
+	  17: GETL       	R1, t16
+	  18: ADDL       	$0x14, t16
+	  19: STL       	t14, (t16)
+	  20: INCEIPL       	$4
+
+	0xFE86638:  92C10018  stw r22,24(r1)
+	  21: GETL       	R22, t18
+	  22: GETL       	R1, t20
+	  23: ADDL       	$0x18, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0xFE8663C:  82BE1D78  lwz r21,7544(r30)
+	  26: GETL       	R30, t22
+	  27: ADDL       	$0x1D78, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R21
+	  30: INCEIPL       	$4
+
+	0xFE86640:  82DE1D1C  lwz r22,7452(r30)
+	  31: GETL       	R30, t26
+	  32: ADDL       	$0x1D1C, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R22
+	  35: INCEIPL       	$4
+
+	0xFE86644:  92E1001C  stw r23,28(r1)
+	  36: GETL       	R23, t30
+	  37: GETL       	R1, t32
+	  38: ADDL       	$0x1C, t32
+	  39: STL       	t30, (t32)
+	  40: INCEIPL       	$4
+
+	0xFE86648:  7C771B78  or r23,r3,r3
+	  41: GETL       	R3, t34
+	  42: PUTL       	t34, R23
+	  43: INCEIPL       	$4
+
+	0xFE8664C:  93010020  stw r24,32(r1)
+	  44: GETL       	R24, t36
+	  45: GETL       	R1, t38
+	  46: ADDL       	$0x20, t38
+	  47: STL       	t36, (t38)
+	  48: INCEIPL       	$4
+
+	0xFE86650:  7C982378  or r24,r4,r4
+	  49: GETL       	R4, t40
+	  50: PUTL       	t40, R24
+	  51: INCEIPL       	$4
+
+	0xFE86654:  93210024  stw r25,36(r1)
+	  52: GETL       	R25, t42
+	  53: GETL       	R1, t44
+	  54: ADDL       	$0x24, t44
+	  55: STL       	t42, (t44)
+	  56: INCEIPL       	$4
+
+	0xFE86658:  3B200000  li r25,0
+	  57: MOVL       	$0x0, t46
+	  58: PUTL       	t46, R25
+	  59: INCEIPL       	$4
+
+	0xFE8665C:  93410028  stw r26,40(r1)
+	  60: GETL       	R26, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0x28, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0xFE86660:  3B400000  li r26,0
+	  65: MOVL       	$0x0, t52
+	  66: PUTL       	t52, R26
+	  67: INCEIPL       	$4
+
+	0xFE86664:  9361002C  stw r27,44(r1)
+	  68: GETL       	R27, t54
+	  69: GETL       	R1, t56
+	  70: ADDL       	$0x2C, t56
+	  71: STL       	t54, (t56)
+	  72: INCEIPL       	$4
+
+	0xFE86668:  3B600001  li r27,1
+	  73: MOVL       	$0x1, t58
+	  74: PUTL       	t58, R27
+	  75: INCEIPL       	$4
+
+	0xFE8666C:  93810030  stw r28,48(r1)
+	  76: GETL       	R28, t60
+	  77: GETL       	R1, t62
+	  78: ADDL       	$0x30, t62
+	  79: STL       	t60, (t62)
+	  80: INCEIPL       	$4
+
+	0xFE86670:  3B800000  li r28,0
+	  81: MOVL       	$0x0, t64
+	  82: PUTL       	t64, R28
+	  83: INCEIPL       	$4
+
+	0xFE86674:  93A10034  stw r29,52(r1)
+	  84: GETL       	R29, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x34, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0xFE86678:  93E1003C  stw r31,60(r1)
+	  89: GETL       	R31, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x3C, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0xFE8667C:  90010044  stw r0,68(r1)
+	  94: GETL       	R0, t74
+	  95: GETL       	R1, t76
+	  96: ADDL       	$0x44, t76
+	  97: STL       	t74, (t76)
+	  98: INCEIPL       	$4
+
+	0xFE86680:  91810010  stw r12,16(r1)
+	  99: GETL       	R12, t78
+	 100: GETL       	R1, t80
+	 101: ADDL       	$0x10, t80
+	 102: STL       	t78, (t80)
+	 103: INCEIPL       	$4
+
+	0xFE86684:  48000064  b 0xFE866E8
+	 104: JMPo       	$0xFE866E8  ($4)
+
+
+
+. 2051 FE86624 100
+. 93 C1 00 38 7F C8 02 A6 7D 80 00 26 2E 03 00 06 92 A1 00 14 92 C1 00 18 82 BE 1D 78 82 DE 1D 1C 92 E1 00 1C 7C 77 1B 78 93 01 00 20 7C 98 23 78 93 21 00 24 3B 20 00 00 93 41 00 28 3B 40 00 00 93 61 00 2C 3B 60 00 01 93 81 00 30 3B 80 00 00 93 A1 00 34 93 E1 00 3C 90 01 00 44 91 81 00 10 48 00 00 64
+==== BB 2052 (0xFE866E8) approx BBs exec'd 0 ====
+
+	0xFE866E8:  2F9C0006  cmpi cr7,r28,6
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE866EC:  41BEFFF0  bc 13,30,0xFE866DC
+	   5: Js30o       	$0xFE866DC
+
+
+
+. 2052 FE866E8 8
+. 2F 9C 00 06 41 BE FF F0
+==== BB 2053 (0xFE866F0) approx BBs exec'd 0 ====
+
+	0xFE866F0:  7F97E000  cmp cr7,r23,r28
+	   0: GETL       	R23, t0
+	   1: GETL       	R28, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE866F4:  579F103A  rlwinm r31,r28,2,0,29
+	   5: GETL       	R28, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R31
+	   8: INCEIPL       	$4
+
+	0xFE866F8:  41B2FF90  bc 13,18,0xFE86688
+	   9: Js18o       	$0xFE86688
+
+
+
+. 2053 FE866F0 12
+. 7F 97 E0 00 57 9F 10 3A 41 B2 FF 90
+==== BB 2054 (0xFE86688) approx BBs exec'd 0 ====
+
+	0xFE86688:  7FBFC02E  lwzx r29,r31,r24
+	   0: GETL       	R24, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R29
+	   5: INCEIPL       	$4
+
+	0xFE8668C:  7FA3EB78  or r3,r29,r29
+	   6: GETL       	R29, t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFE86690:  48050449  bl 0xFED6AD8
+	   9: MOVL       	$0xFE86694, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2054 FE86688 12
+. 7F BF C0 2E 7F A3 EB 78 48 05 04 49
+==== BB 2055 (0xFE86694) approx BBs exec'd 0 ====
+
+	0xFE86694:  301BFFFF  addic r0,r27,-1
+	   0: GETL       	R27, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFE86698:  7D60D910  subfe r11,r0,r27
+	   4: GETL       	R0, t2
+	   5: GETL       	R27, t4
+	   6: SBBL       	t2, t4  (-rCa-wCa)
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFE8669C:  7C7A1B78  or r26,r3,r3
+	   9: GETL       	R3, t6
+	  10: PUTL       	t6, R26
+	  11: INCEIPL       	$4
+
+	0xFE866A0:  207C0000  subfic r3,r28,0
+	  12: GETL       	R28, t8
+	  13: MOVL       	$0x0, t10
+	  14: SBBL       	t8, t10  (-wCa)
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFE866A4:  7C6301D4  addme r3,r3
+	  17: GETL       	R3, t12
+	  18: ADCL       	$0xFFFFFFFF, t12  (-rCa-wCa)
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0xFE866A8:  54630FFE  rlwinm r3,r3,1,31,31
+	  21: GETL       	R3, t14
+	  22: SHRL       	$0x1F, t14
+	  23: PUTL       	t14, R3
+	  24: INCEIPL       	$4
+
+	0xFE866AC:  7C695839  and. r9,r3,r11
+	  25: GETL       	R3, t16
+	  26: GETL       	R11, t18
+	  27: ANDL       	t16, t18
+	  28: PUTL       	t18, R9
+	  29: CMP0L       	t18, t20  (-rSo)
+	  30: ICRFL       	t20, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0xFE866B0:  7CBFB02E  lwzx r5,r31,r22
+	  32: GETL       	R22, t22
+	  33: GETL       	R31, t24
+	  34: ADDL       	t24, t22
+	  35: LDL       	(t22), t26
+	  36: PUTL       	t26, R5
+	  37: INCEIPL       	$4
+
+	0xFE866B4:  7FA3EB78  or r3,r29,r29
+	  38: GETL       	R29, t28
+	  39: PUTL       	t28, R3
+	  40: INCEIPL       	$4
+
+	0xFE866B8:  7C85D214  add r4,r5,r26
+	  41: GETL       	R5, t30
+	  42: GETL       	R26, t32
+	  43: ADDL       	t30, t32
+	  44: PUTL       	t32, R4
+	  45: INCEIPL       	$4
+
+	0xFE866BC:  7D24CA14  add r9,r4,r25
+	  46: GETL       	R4, t34
+	  47: GETL       	R25, t36
+	  48: ADDL       	t34, t36
+	  49: PUTL       	t36, R9
+	  50: INCEIPL       	$4
+
+	0xFE866C0:  3B290002  addi r25,r9,2
+	  51: GETL       	R9, t38
+	  52: ADDL       	$0x2, t38
+	  53: PUTL       	t38, R25
+	  54: INCEIPL       	$4
+
+	0xFE866C4:  41820018  bc 12,2,0xFE866DC
+	  55: Js02o       	$0xFE866DC
+
+
+
+. 2055 FE86694 52
+. 30 1B FF FF 7D 60 D9 10 7C 7A 1B 78 20 7C 00 00 7C 63 01 D4 54 63 0F FE 7C 69 58 39 7C BF B0 2E 7F A3 EB 78 7C 85 D2 14 7D 24 CA 14 3B 29 00 02 41 82 00 18
+==== BB 2056 (0xFE866DC) approx BBs exec'd 0 ====
+
+	0xFE866DC:  3B9C0001  addi r28,r28,1
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0xFE866E0:  2C9C000C  cmpi cr1,r28,12
+	   4: GETL       	R28, t2
+	   5: MOVL       	$0xC, t6
+	   6: CMPL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFE866E4:  41850038  bc 12,5,0xFE8671C
+	   9: Js05o       	$0xFE8671C
+
+
+
+. 2056 FE866DC 12
+. 3B 9C 00 01 2C 9C 00 0C 41 85 00 38
+==== BB 2057 (0xFE866C8) approx BBs exec'd 0 ====
+
+	0xFE866C8:  80980000  lwz r4,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFE866CC:  4804FCE5  bl 0xFED63B0
+	   4: MOVL       	$0xFE866D0, t4
+	   5: PUTL       	t4, LR
+	   6: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2057 FE866C8 8
+. 80 98 00 00 48 04 FC E5
+==== BB 2058 (0xFE866D0) approx BBs exec'd 0 ====
+
+	0xFE866D0:  30C3FFFF  addic r6,r3,-1
+	   0: GETL       	R3, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0xFE866D4:  7CC63110  subfe r6,r6,r6
+	   4: GETL       	R6, t2
+	   5: GETL       	R6, t4
+	   6: SBBL       	t2, t4  (-rCa-wCa)
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0xFE866D8:  7F7B3038  and r27,r27,r6
+	   9: GETL       	R27, t6
+	  10: GETL       	R6, t8
+	  11: ANDL       	t6, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0xFE866DC:  3B9C0001  addi r28,r28,1
+	  14: GETL       	R28, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0xFE866E0:  2C9C000C  cmpi cr1,r28,12
+	  18: GETL       	R28, t12
+	  19: MOVL       	$0xC, t16
+	  20: CMPL       	t12, t16, t14  (-rSo)
+	  21: ICRFL       	t14, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0xFE866E4:  41850038  bc 12,5,0xFE8671C
+	  23: Js05o       	$0xFE8671C
+
+
+
+. 2058 FE866D0 24
+. 30 C3 FF FF 7C C6 31 10 7F 7B 30 38 3B 9C 00 01 2C 9C 00 0C 41 85 00 38
+==== BB 2059 (0xFE8671C) approx BBs exec'd 0 ====
+
+	0xFE8671C:  2E1B0000  cmpi cr4,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE86720:  41920078  bc 12,18,0xFE86798
+	   4: Js18o       	$0xFE86798
+
+
+
+. 2059 FE8671C 8
+. 2E 1B 00 00 41 92 00 78
+==== BB 2060 (0xFE86724) approx BBs exec'd 0 ====
+
+	0xFE86724:  83F80000  lwz r31,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFE86728:  809E1CB8  lwz r4,7352(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1CB8, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0xFE8672C:  7FE3FB78  or r3,r31,r31
+	   9: GETL       	R31, t8
+	  10: PUTL       	t8, R3
+	  11: INCEIPL       	$4
+
+	0xFE86730:  4804FC81  bl 0xFED63B0
+	  12: MOVL       	$0xFE86734, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2060 FE86724 16
+. 83 F8 00 00 80 9E 1C B8 7F E3 FB 78 48 04 FC 81
+==== BB 2061 (0xFE86734) approx BBs exec'd 0 ====
+
+	0xFE86734:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE86738:  419A0018  bc 12,26,0xFE86750
+	   4: Js26o       	$0xFE86750
+
+
+
+. 2061 FE86734 8
+. 2F 03 00 00 41 9A 00 18
+==== BB 2062 (0xFE8673C) approx BBs exec'd 0 ====
+
+	0xFE8673C:  809E1CC8  lwz r4,7368(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE86740:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE86744:  4804FC6D  bl 0xFED63B0
+	   8: MOVL       	$0xFE86748, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2062 FE8673C 12
+. 80 9E 1C C8 7F E3 FB 78 48 04 FC 6D
+==== BB 2063 (0xFE86748) approx BBs exec'd 0 ====
+
+	0xFE86748:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8674C:  409E00E4  bc 4,30,0xFE86830
+	   4: Jc30o       	$0xFE86830
+
+
+
+. 2063 FE86748 8
+. 2F 83 00 00 40 9E 00 E4
+==== BB 2064 (0xFE86830) approx BBs exec'd 0 ====
+
+	0xFE86830:  3BFA0001  addi r31,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0xFE86834:  7FE3FB78  or r3,r31,r31
+	   4: GETL       	R31, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0xFE86838:  48123AD1  bl 0xFFAA308
+	   7: MOVL       	$0xFE8683C, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 2064 FE86830 12
+. 3B FA 00 01 7F E3 FB 78 48 12 3A D1
+==== BB 2065 (0xFE8683C) approx BBs exec'd 0 ====
+
+	0xFE8683C:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE86840:  2C030000  cmpi cr0,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFE86844:  41820014  bc 12,2,0xFE86858
+	   7: Js02o       	$0xFE86858
+
+
+
+. 2065 FE8683C 12
+. 38 00 00 00 2C 03 00 00 41 82 00 14
+==== BB 2066 (0xFE86848) approx BBs exec'd 0 ====
+
+	0xFE86848:  80980000  lwz r4,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFE8684C:  7FE5FB78  or r5,r31,r31
+	   4: GETL       	R31, t4
+	   5: PUTL       	t4, R5
+	   6: INCEIPL       	$4
+
+	0xFE86850:  48051E89  bl 0xFED86D8
+	   7: MOVL       	$0xFE86854, t6
+	   8: PUTL       	t6, LR
+	   9: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2066 FE86848 12
+. 80 98 00 00 7F E5 FB 78 48 05 1E 89
+==== BB 2067 (0xFE86854) approx BBs exec'd 0 ====
+
+	0xFE86854:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE86858:  7C030378  or r3,r0,r0
+	   3: GETL       	R0, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFE8685C:  4BFFFEF8  b 0xFE86754
+	   6: JMPo       	$0xFE86754  ($4)
+
+
+
+. 2067 FE86854 12
+. 7C 60 1B 78 7C 03 03 78 4B FF FE F8
+==== BB 2068 (0xFE86754) approx BBs exec'd 0 ====
+
+	0xFE86754:  81410044  lwz r10,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFE86758:  81010010  lwz r8,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFE8675C:  82A10014  lwz r21,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R21
+	  14: INCEIPL       	$4
+
+	0xFE86760:  7D4803A6  mtlr r10
+	  15: GETL       	R10, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFE86764:  82C10018  lwz r22,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R22
+	  22: INCEIPL       	$4
+
+	0xFE86768:  7D008120  mtcrf 0x8,r8
+	  23: GETL       	R8, t18
+	  24: ICRFL       	t18, $0x4, CR
+	  25: INCEIPL       	$4
+
+	0xFE8676C:  82E1001C  lwz r23,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R23
+	  30: INCEIPL       	$4
+
+	0xFE86770:  83010020  lwz r24,32(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R24
+	  35: INCEIPL       	$4
+
+	0xFE86774:  83210024  lwz r25,36(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x24, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R25
+	  40: INCEIPL       	$4
+
+	0xFE86778:  83410028  lwz r26,40(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x28, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R26
+	  45: INCEIPL       	$4
+
+	0xFE8677C:  8361002C  lwz r27,44(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x2C, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R27
+	  50: INCEIPL       	$4
+
+	0xFE86780:  83810030  lwz r28,48(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x30, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R28
+	  55: INCEIPL       	$4
+
+	0xFE86784:  83A10034  lwz r29,52(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x34, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R29
+	  60: INCEIPL       	$4
+
+	0xFE86788:  83C10038  lwz r30,56(r1)
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x38, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R30
+	  65: INCEIPL       	$4
+
+	0xFE8678C:  83E1003C  lwz r31,60(r1)
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x3C, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R31
+	  70: INCEIPL       	$4
+
+	0xFE86790:  38210040  addi r1,r1,64
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x40, t56
+	  73: PUTL       	t56, R1
+	  74: INCEIPL       	$4
+
+	0xFE86794:  4E800020  blr
+	  75: GETL       	LR, t58
+	  76: JMPo-r       	t58  ($4)
+
+
+
+. 2068 FE86754 68
+. 81 41 00 44 81 01 00 10 82 A1 00 14 7D 48 03 A6 82 C1 00 18 7D 00 81 20 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 2069 (0xFE86E1C) approx BBs exec'd 0 ====
+
+	0xFE86E1C:  7C771B79  or. r23,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R23
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE86E20:  41A2FDFC  bc 13,2,0xFE86C1C
+	   5: Js02o       	$0xFE86C1C
+
+
+
+. 2069 FE86E1C 8
+. 7C 77 1B 79 41 A2 FD FC
+==== BB 2070 (0xFE86E24) approx BBs exec'd 0 ====
+
+	0xFE86E24:  82BE00FC  lwz r21,252(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0xFE86E28:  3B600000  li r27,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0xFE86E2C:  82DE1CB8  lwz r22,7352(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1CB8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0xFE86E30:  3BBF0008  addi r29,r31,8
+	  13: GETL       	R31, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R29
+	  16: INCEIPL       	$4
+
+	0xFE86E34:  2F9B0006  cmpi cr7,r27,6
+	  17: GETL       	R27, t12
+	  18: MOVL       	$0x6, t16
+	  19: CMPL       	t12, t16, t14  (-rSo)
+	  20: ICRFL       	t14, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0xFE86E38:  5769103A  rlwinm r9,r27,2,0,29
+	  22: GETL       	R27, t18
+	  23: SHLL       	$0x2, t18
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0xFE86E3C:  7F89EA14  add r28,r9,r29
+	  26: GETL       	R9, t20
+	  27: GETL       	R29, t22
+	  28: ADDL       	t20, t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFE86E40:  3B7B0001  addi r27,r27,1
+	  31: MOVL       	$0x1, t24
+	  32: PUTL       	t24, R27
+	  33: INCEIPL       	$4
+
+	0xFE86E44:  7F49C214  add r26,r9,r24
+	  34: GETL       	R9, t26
+	  35: GETL       	R24, t28
+	  36: ADDL       	t26, t28
+	  37: PUTL       	t28, R26
+	  38: INCEIPL       	$4
+
+	0xFE86E48:  419E0038  bc 12,30,0xFE86E80
+	  39: Js30o       	$0xFE86E80
+
+
+
+. 2070 FE86E24 40
+. 82 BE 00 FC 3B 60 00 00 82 DE 1C B8 3B BF 00 08 2F 9B 00 06 57 69 10 3A 7F 89 EA 14 3B 7B 00 01 7F 49 C2 14 41 9E 00 38
+==== BB 2071 (0xFE86E4C) approx BBs exec'd 0 ====
+
+	0xFE86E4C:  7D69A82E  lwzx r11,r9,r21
+	   0: GETL       	R21, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R11
+	   5: INCEIPL       	$4
+
+	0xFE86E50:  809C0048  lwz r4,72(r28)
+	   6: GETL       	R28, t6
+	   7: ADDL       	$0x48, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R4
+	  10: INCEIPL       	$4
+
+	0xFE86E54:  2C8B0000  cmpi cr1,r11,0
+	  11: GETL       	R11, t10
+	  12: CMP0L       	t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFE86E58:  7C89C12E  stwx r4,r9,r24
+	  15: GETL       	R24, t14
+	  16: GETL       	R9, t16
+	  17: ADDL       	t16, t14
+	  18: GETL       	R4, t18
+	  19: STL       	t18, (t14)
+	  20: INCEIPL       	$4
+
+	0xFE86E5C:  40860060  bc 4,6,0xFE86EBC
+	  21: Jc06o       	$0xFE86EBC
+
+
+
+. 2071 FE86E4C 20
+. 7D 69 A8 2E 80 9C 00 48 2C 8B 00 00 7C 89 C1 2E 40 86 00 60
+==== BB 2072 (0xFE86EBC) approx BBs exec'd 0 ====
+
+	0xFE86EBC:  7D6903A6  mtctr r11
+	   0: GETL       	R11, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFE86EC0:  4E800421  bctrl
+	   3: MOVL       	$0xFE86EC4, t2
+	   4: PUTL       	t2, LR
+	   5: GETL       	CTR, t4
+	   6: JMPo-c       	t4  ($4)
+
+
+
+. 2072 FE86EBC 8
+. 7D 69 03 A6 4E 80 04 21
+==== BB 2073 _nl_postload_ctype(0xFE89240) approx BBs exec'd 0 ====
+
+	0xFE89240:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE89244:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE89248:  4811EC09  bl 0xFFA7E50
+	   9: MOVL       	$0xFE8924C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2073 FE89240 12
+. 94 21 FF F0 7C 08 02 A6 48 11 EC 09
+==== BB 2074 (0xFE8924C) approx BBs exec'd 0 ====
+
+	0xFE8924C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE89250:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE89254:  7C0803A6  mtlr r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFE89258:  811E1D78  lwz r8,7544(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x1D78, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R8
+	  15: INCEIPL       	$4
+
+	0xFE8925C:  817E1D50  lwz r11,7504(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x1D50, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0xFE89260:  7D8B1214  add r12,r11,r2
+	  21: GETL       	R11, t16
+	  22: GETL       	R2, t18
+	  23: ADDL       	t16, t18
+	  24: PUTL       	t18, R12
+	  25: INCEIPL       	$4
+
+	0xFE89264:  807E1CF8  lwz r3,7416(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1CF8, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R3
+	  30: INCEIPL       	$4
+
+	0xFE89268:  80A80000  lwz r5,0(r8)
+	  31: GETL       	R8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R5
+	  34: INCEIPL       	$4
+
+	0xFE8926C:  813E1D8C  lwz r9,7564(r30)
+	  35: GETL       	R30, t28
+	  36: ADDL       	$0x1D8C, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R9
+	  39: INCEIPL       	$4
+
+	0xFE89270:  817E1B80  lwz r11,7040(r30)
+	  40: GETL       	R30, t32
+	  41: ADDL       	$0x1B80, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R11
+	  44: INCEIPL       	$4
+
+	0xFE89274:  7D291214  add r9,r9,r2
+	  45: GETL       	R9, t36
+	  46: GETL       	R2, t38
+	  47: ADDL       	t36, t38
+	  48: PUTL       	t38, R9
+	  49: INCEIPL       	$4
+
+	0xFE89278:  80C50024  lwz r6,36(r5)
+	  50: GETL       	R5, t40
+	  51: ADDL       	$0x24, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R6
+	  54: INCEIPL       	$4
+
+	0xFE8927C:  38E50024  addi r7,r5,36
+	  55: GETL       	R5, t44
+	  56: ADDL       	$0x24, t44
+	  57: PUTL       	t44, R7
+	  58: INCEIPL       	$4
+
+	0xFE89280:  7D6B1214  add r11,r11,r2
+	  59: GETL       	R11, t46
+	  60: GETL       	R2, t48
+	  61: ADDL       	t46, t48
+	  62: PUTL       	t48, R11
+	  63: INCEIPL       	$4
+
+	0xFE89284:  38A60100  addi r5,r6,256
+	  64: GETL       	R6, t50
+	  65: ADDL       	$0x100, t50
+	  66: PUTL       	t50, R5
+	  67: INCEIPL       	$4
+
+	0xFE89288:  80CC0000  lwz r6,0(r12)
+	  68: GETL       	R12, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R6
+	  71: INCEIPL       	$4
+
+	0xFE8928C:  90A80034  stw r5,52(r8)
+	  72: GETL       	R5, t56
+	  73: GETL       	R8, t58
+	  74: ADDL       	$0x34, t58
+	  75: STL       	t56, (t58)
+	  76: INCEIPL       	$4
+
+	0xFE89290:  8147000C  lwz r10,12(r7)
+	  77: GETL       	R7, t60
+	  78: ADDL       	$0xC, t60
+	  79: LDL       	(t60), t62
+	  80: PUTL       	t62, R10
+	  81: INCEIPL       	$4
+
+	0xFE89294:  7F864000  cmp cr7,r6,r8
+	  82: GETL       	R6, t64
+	  83: GETL       	R8, t66
+	  84: CMPL       	t64, t66, t68  (-rSo)
+	  85: ICRFL       	t68, $0x7, CR
+	  86: INCEIPL       	$4
+
+	0xFE89298:  394A0200  addi r10,r10,512
+	  87: GETL       	R10, t70
+	  88: ADDL       	$0x200, t70
+	  89: PUTL       	t70, R10
+	  90: INCEIPL       	$4
+
+	0xFE8929C:  91480038  stw r10,56(r8)
+	  91: GETL       	R10, t72
+	  92: GETL       	R8, t74
+	  93: ADDL       	$0x38, t74
+	  94: STL       	t72, (t74)
+	  95: INCEIPL       	$4
+
+	0xFE892A0:  80870004  lwz r4,4(r7)
+	  96: GETL       	R7, t76
+	  97: ADDL       	$0x4, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R4
+	 100: INCEIPL       	$4
+
+	0xFE892A4:  7CE31214  add r7,r3,r2
+	 101: GETL       	R3, t80
+	 102: GETL       	R2, t82
+	 103: ADDL       	t80, t82
+	 104: PUTL       	t82, R7
+	 105: INCEIPL       	$4
+
+	0xFE892A8:  38040200  addi r0,r4,512
+	 106: GETL       	R4, t84
+	 107: ADDL       	$0x200, t84
+	 108: PUTL       	t84, R0
+	 109: INCEIPL       	$4
+
+	0xFE892AC:  9008003C  stw r0,60(r8)
+	 110: GETL       	R0, t86
+	 111: GETL       	R8, t88
+	 112: ADDL       	$0x3C, t88
+	 113: STL       	t86, (t88)
+	 114: INCEIPL       	$4
+
+	0xFE892B0:  419E0068  bc 12,30,0xFE89318
+	 115: Js30o       	$0xFE89318
+
+
+
+. 2074 FE8924C 104
+. 93 C1 00 08 7F C8 02 A6 7C 08 03 A6 81 1E 1D 78 81 7E 1D 50 7D 8B 12 14 80 7E 1C F8 80 A8 00 00 81 3E 1D 8C 81 7E 1B 80 7D 29 12 14 80 C5 00 24 38 E5 00 24 7D 6B 12 14 38 A6 01 00 80 CC 00 00 90 A8 00 34 81 47 00 0C 7F 86 40 00 39 4A 02 00 91 48 00 38 80 87 00 04 7C E3 12 14 38 04 02 00 90 08 00 3C 41 9E 00 68
+==== BB 2075 (0xFE89318) approx BBs exec'd 0 ====
+
+	0xFE89318:  90A70000  stw r5,0(r7)
+	   0: GETL       	R5, t0
+	   1: GETL       	R7, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8931C:  90090000  stw r0,0(r9)
+	   4: GETL       	R0, t4
+	   5: GETL       	R9, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFE89320:  914B0000  stw r10,0(r11)
+	   8: GETL       	R10, t8
+	   9: GETL       	R11, t10
+	  10: STL       	t8, (t10)
+	  11: INCEIPL       	$4
+
+	0xFE89324:  4BFFFF90  b 0xFE892B4
+	  12: JMPo       	$0xFE892B4  ($4)
+
+
+
+. 2075 FE89318 16
+. 90 A7 00 00 90 09 00 00 91 4B 00 00 4B FF FF 90
+==== BB 2076 (0xFE892B4) approx BBs exec'd 0 ====
+
+	0xFE892B4:  80A60000  lwz r5,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFE892B8:  813E1DE0  lwz r9,7648(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1DE0, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFE892BC:  81450024  lwz r10,36(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0x24, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0xFE892C0:  81050028  lwz r8,40(r5)
+	  14: GETL       	R5, t12
+	  15: ADDL       	$0x28, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R8
+	  18: INCEIPL       	$4
+
+	0xFE892C4:  80E50030  lwz r7,48(r5)
+	  19: GETL       	R5, t16
+	  20: ADDL       	$0x30, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R7
+	  23: INCEIPL       	$4
+
+	0xFE892C8:  380A0100  addi r0,r10,256
+	  24: GETL       	R10, t20
+	  25: ADDL       	$0x100, t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0xFE892CC:  80650064  lwz r3,100(r5)
+	  28: GETL       	R5, t22
+	  29: ADDL       	$0x64, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R3
+	  32: INCEIPL       	$4
+
+	0xFE892D0:  39680200  addi r11,r8,512
+	  33: GETL       	R8, t26
+	  34: ADDL       	$0x200, t26
+	  35: PUTL       	t26, R11
+	  36: INCEIPL       	$4
+
+	0xFE892D4:  81850038  lwz r12,56(r5)
+	  37: GETL       	R5, t28
+	  38: ADDL       	$0x38, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R12
+	  41: INCEIPL       	$4
+
+	0xFE892D8:  38C70200  addi r6,r7,512
+	  42: GETL       	R7, t32
+	  43: ADDL       	$0x200, t32
+	  44: PUTL       	t32, R6
+	  45: INCEIPL       	$4
+
+	0xFE892DC:  80850060  lwz r4,96(r5)
+	  46: GETL       	R5, t34
+	  47: ADDL       	$0x60, t34
+	  48: LDL       	(t34), t36
+	  49: PUTL       	t36, R4
+	  50: INCEIPL       	$4
+
+	0xFE892E0:  815E1C7C  lwz r10,7292(r30)
+	  51: GETL       	R30, t38
+	  52: ADDL       	$0x1C7C, t38
+	  53: LDL       	(t38), t40
+	  54: PUTL       	t40, R10
+	  55: INCEIPL       	$4
+
+	0xFE892E4:  80BE1BDC  lwz r5,7132(r30)
+	  56: GETL       	R30, t42
+	  57: ADDL       	$0x1BDC, t42
+	  58: LDL       	(t42), t44
+	  59: PUTL       	t44, R5
+	  60: INCEIPL       	$4
+
+	0xFE892E8:  80FE1E18  lwz r7,7704(r30)
+	  61: GETL       	R30, t46
+	  62: ADDL       	$0x1E18, t46
+	  63: LDL       	(t46), t48
+	  64: PUTL       	t48, R7
+	  65: INCEIPL       	$4
+
+	0xFE892EC:  811E1D84  lwz r8,7556(r30)
+	  66: GETL       	R30, t50
+	  67: ADDL       	$0x1D84, t50
+	  68: LDL       	(t50), t52
+	  69: PUTL       	t52, R8
+	  70: INCEIPL       	$4
+
+	0xFE892F0:  90090000  stw r0,0(r9)
+	  71: GETL       	R0, t54
+	  72: GETL       	R9, t56
+	  73: STL       	t54, (t56)
+	  74: INCEIPL       	$4
+
+	0xFE892F4:  813E1BC0  lwz r9,7104(r30)
+	  75: GETL       	R30, t58
+	  76: ADDL       	$0x1BC0, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R9
+	  79: INCEIPL       	$4
+
+	0xFE892F8:  83C10008  lwz r30,8(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x8, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R30
+	  84: INCEIPL       	$4
+
+	0xFE892FC:  38210010  addi r1,r1,16
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x10, t66
+	  87: PUTL       	t66, R1
+	  88: INCEIPL       	$4
+
+	0xFE89300:  91650000  stw r11,0(r5)
+	  89: GETL       	R11, t68
+	  90: GETL       	R5, t70
+	  91: STL       	t68, (t70)
+	  92: INCEIPL       	$4
+
+	0xFE89304:  90CA0000  stw r6,0(r10)
+	  93: GETL       	R6, t72
+	  94: GETL       	R10, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0xFE89308:  91870000  stw r12,0(r7)
+	  97: GETL       	R12, t76
+	  98: GETL       	R7, t78
+	  99: STL       	t76, (t78)
+	 100: INCEIPL       	$4
+
+	0xFE8930C:  90890000  stw r4,0(r9)
+	 101: GETL       	R4, t80
+	 102: GETL       	R9, t82
+	 103: STL       	t80, (t82)
+	 104: INCEIPL       	$4
+
+	0xFE89310:  90680000  stw r3,0(r8)
+	 105: GETL       	R3, t84
+	 106: GETL       	R8, t86
+	 107: STL       	t84, (t86)
+	 108: INCEIPL       	$4
+
+	0xFE89314:  4E800020  blr
+	 109: GETL       	LR, t88
+	 110: JMPo-r       	t88  ($4)
+
+
+
+. 2076 FE892B4 100
+. 80 A6 00 00 81 3E 1D E0 81 45 00 24 81 05 00 28 80 E5 00 30 38 0A 01 00 80 65 00 64 39 68 02 00 81 85 00 38 38 C7 02 00 80 85 00 60 81 5E 1C 7C 80 BE 1B DC 80 FE 1E 18 81 1E 1D 84 90 09 00 00 81 3E 1B C0 83 C1 00 08 38 21 00 10 91 65 00 00 90 CA 00 00 91 87 00 00 90 89 00 00 90 68 00 00 4E 80 00 20
+==== BB 2077 (0xFE86EC4) approx BBs exec'd 0 ====
+
+	0xFE86EC4:  4BFFFF9C  b 0xFE86E60
+	   0: JMPo       	$0xFE86E60  ($4)
+
+
+
+. 2077 FE86EC4 4
+. 4B FF FF 9C
+==== BB 2078 (0xFE86E60) approx BBs exec'd 0 ====
+
+	0xFE86E60:  807A0040  lwz r3,64(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x40, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE86E64:  839C0008  lwz r28,8(r28)
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFE86E68:  7F03B000  cmp cr6,r3,r22
+	  10: GETL       	R3, t8
+	  11: GETL       	R22, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0xFE86E6C:  7C03E000  cmp cr0,r3,r28
+	  15: GETL       	R3, t14
+	  16: GETL       	R28, t16
+	  17: CMPL       	t14, t16, t18  (-rSo)
+	  18: ICRFL       	t18, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFE86E70:  41820010  bc 12,2,0xFE86E80
+	  20: Js02o       	$0xFE86E80
+
+
+
+. 2078 FE86E60 20
+. 80 7A 00 40 83 9C 00 08 7F 03 B0 00 7C 03 E0 00 41 82 00 10
+==== BB 2079 (0xFE86E74) approx BBs exec'd 0 ====
+
+	0xFE86E74:  419A0008  bc 12,26,0xFE86E7C
+	   0: Js26o       	$0xFE86E7C
+
+
+
+. 2079 FE86E74 4
+. 41 9A 00 08
+==== BB 2080 (0xFE86E7C) approx BBs exec'd 0 ====
+
+	0xFE86E7C:  939A0040  stw r28,64(r26)
+	   0: GETL       	R28, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	$0x40, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE86E80:  2F1B000C  cmpi cr6,r27,12
+	   5: GETL       	R27, t4
+	   6: MOVL       	$0xC, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFE86E84:  4099FFB0  bc 4,25,0xFE86E34
+	  10: Jc25o       	$0xFE86E34
+
+
+
+. 2080 FE86E7C 12
+. 93 9A 00 40 2F 1B 00 0C 40 99 FF B0
+==== BB 2081 (0xFE86E34) approx BBs exec'd 0 ====
+
+	0xFE86E34:  2F9B0006  cmpi cr7,r27,6
+	   0: GETL       	R27, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE86E38:  5769103A  rlwinm r9,r27,2,0,29
+	   5: GETL       	R27, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFE86E3C:  7F89EA14  add r28,r9,r29
+	   9: GETL       	R9, t8
+	  10: GETL       	R29, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R28
+	  13: INCEIPL       	$4
+
+	0xFE86E40:  3B7B0001  addi r27,r27,1
+	  14: GETL       	R27, t12
+	  15: ADDL       	$0x1, t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0xFE86E44:  7F49C214  add r26,r9,r24
+	  18: GETL       	R9, t14
+	  19: GETL       	R24, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R26
+	  22: INCEIPL       	$4
+
+	0xFE86E48:  419E0038  bc 12,30,0xFE86E80
+	  23: Js30o       	$0xFE86E80
+
+
+
+. 2081 FE86E34 24
+. 2F 9B 00 06 57 69 10 3A 7F 89 EA 14 3B 7B 00 01 7F 49 C2 14 41 9E 00 38
+==== BB 2082 (0xFE86E80) approx BBs exec'd 0 ====
+
+	0xFE86E80:  2F1B000C  cmpi cr6,r27,12
+	   0: GETL       	R27, t0
+	   1: MOVL       	$0xC, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFE86E84:  4099FFB0  bc 4,25,0xFE86E34
+	   5: Jc25o       	$0xFE86E34
+
+
+
+. 2082 FE86E80 8
+. 2F 1B 00 0C 40 99 FF B0
+==== BB 2083 (0xFE86E88) approx BBs exec'd 0 ====
+
+	0xFE86E88:  80780058  lwz r3,88(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x58, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE86E8C:  7F83B800  cmp cr7,r3,r23
+	   5: GETL       	R3, t4
+	   6: GETL       	R23, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFE86E90:  419E0018  bc 12,30,0xFE86EA8
+	  10: Js30o       	$0xFE86EA8
+
+
+
+. 2083 FE86E88 12
+. 80 78 00 58 7F 83 B8 00 41 9E 00 18
+==== BB 2084 (0xFE86E94) approx BBs exec'd 0 ====
+
+	0xFE86E94:  839E1CB8  lwz r28,7352(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CB8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFE86E98:  7C83E000  cmp cr1,r3,r28
+	   5: GETL       	R3, t4
+	   6: GETL       	R28, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFE86E9C:  41860008  bc 12,6,0xFE86EA4
+	  10: Js06o       	$0xFE86EA4
+
+
+
+. 2084 FE86E94 12
+. 83 9E 1C B8 7C 83 E0 00 41 86 00 08
+==== BB 2085 (0xFE86EA4) approx BBs exec'd 0 ====
+
+	0xFE86EA4:  92F80058  stw r23,88(r24)
+	   0: GETL       	R23, t0
+	   1: GETL       	R24, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE86EA8:  831E1B1C  lwz r24,6940(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1B1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0xFE86EAC:  80D80000  lwz r6,0(r24)
+	  10: GETL       	R24, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0xFE86EB0:  38A60001  addi r5,r6,1
+	  14: GETL       	R6, t12
+	  15: ADDL       	$0x1, t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0xFE86EB4:  90B80000  stw r5,0(r24)
+	  18: GETL       	R5, t14
+	  19: GETL       	R24, t16
+	  20: STL       	t14, (t16)
+	  21: INCEIPL       	$4
+
+	0xFE86EB8:  4BFFFDB8  b 0xFE86C70
+	  22: JMPo       	$0xFE86C70  ($4)
+
+
+
+. 2085 FE86EA4 24
+. 92 F8 00 58 83 1E 1B 1C 80 D8 00 00 38 A6 00 01 90 B8 00 00 4B FF FD B8
+==== BB 2086 (0xFE86C70) approx BBs exec'd 0 ====
+
+	0xFE86C70:  39400000  li r10,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFE86C74:  7C0004AC  sync
+	   3: INCEIPL       	$4
+
+	0xFE86C78:  7D00C828  lwarx r8,r0,r25
+	   4: GETL       	R25, t2
+	   5: LOCKo       	
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0xFE86C7C:  7D40C92D  stwcx. r10,r0,r25
+	   9: GETL       	R25, t6
+	  10: GETL       	R10, t8
+	  11: LOCKo       	
+	  12: STL       	t8, (t6)  (-rSo)
+	  13: ICRFL       	cr, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFE86C80:  40A2FFF8  bc 5,2,0xFE86C78
+	  15: Jc02o       	$0xFE86C78
+
+
+
+. 2086 FE86C70 20
+. 39 40 00 00 7C 00 04 AC 7D 00 C8 28 7D 40 C9 2D 40 A2 FF F8
+==== BB 2087 (0xFE86C84) approx BBs exec'd 0 ====
+
+	0xFE86C84:  2F080001  cmpi cr6,r8,1
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFE86C88:  41990240  bc 12,25,0xFE86EC8
+	   5: Js25o       	$0xFE86EC8
+
+
+
+. 2087 FE86C84 8
+. 2F 08 00 01 41 99 02 40
+==== BB 2088 (0xFE86C8C) approx BBs exec'd 0 ====
+
+	0xFE86C8C:  807F0090  lwz r3,144(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x90, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE86C90:  481236A1  bl 0xFFAA330
+	   5: MOVL       	$0xFE86C94, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFFAA330  ($4)
+
+
+
+. 2088 FE86C8C 8
+. 80 7F 00 90 48 12 36 A1
+==== BB 2089 (0xFE86C94) approx BBs exec'd 0 ====
+
+	0xFE86C94:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE86C98:  4BFFFC6C  b 0xFE86904
+	   3: JMPo       	$0xFE86904  ($4)
+
+
+
+. 2089 FE86C94 8
+. 7E E3 BB 78 4B FF FC 6C
+==== BB 2090 (0xFE86904) approx BBs exec'd 0 ====
+
+	0xFE86904:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFE86908:  83650004  lwz r27,4(r5)
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFE8690C:  8245FFC8  lwz r18,-56(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0xFFFFFFC8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R18
+	  13: INCEIPL       	$4
+
+	0xFE86910:  7F6803A6  mtlr r27
+	  14: GETL       	R27, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFE86914:  8265FFCC  lwz r19,-52(r5)
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFCC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R19
+	  21: INCEIPL       	$4
+
+	0xFE86918:  8285FFD0  lwz r20,-48(r5)
+	  22: GETL       	R5, t18
+	  23: ADDL       	$0xFFFFFFD0, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R20
+	  26: INCEIPL       	$4
+
+	0xFE8691C:  82A5FFD4  lwz r21,-44(r5)
+	  27: GETL       	R5, t22
+	  28: ADDL       	$0xFFFFFFD4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R21
+	  31: INCEIPL       	$4
+
+	0xFE86920:  82C5FFD8  lwz r22,-40(r5)
+	  32: GETL       	R5, t26
+	  33: ADDL       	$0xFFFFFFD8, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R22
+	  36: INCEIPL       	$4
+
+	0xFE86924:  82E5FFDC  lwz r23,-36(r5)
+	  37: GETL       	R5, t30
+	  38: ADDL       	$0xFFFFFFDC, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R23
+	  41: INCEIPL       	$4
+
+	0xFE86928:  8305FFE0  lwz r24,-32(r5)
+	  42: GETL       	R5, t34
+	  43: ADDL       	$0xFFFFFFE0, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R24
+	  46: INCEIPL       	$4
+
+	0xFE8692C:  8325FFE4  lwz r25,-28(r5)
+	  47: GETL       	R5, t38
+	  48: ADDL       	$0xFFFFFFE4, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R25
+	  51: INCEIPL       	$4
+
+	0xFE86930:  8345FFE8  lwz r26,-24(r5)
+	  52: GETL       	R5, t42
+	  53: ADDL       	$0xFFFFFFE8, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R26
+	  56: INCEIPL       	$4
+
+	0xFE86934:  8365FFEC  lwz r27,-20(r5)
+	  57: GETL       	R5, t46
+	  58: ADDL       	$0xFFFFFFEC, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R27
+	  61: INCEIPL       	$4
+
+	0xFE86938:  8385FFF0  lwz r28,-16(r5)
+	  62: GETL       	R5, t50
+	  63: ADDL       	$0xFFFFFFF0, t50
+	  64: LDL       	(t50), t52
+	  65: PUTL       	t52, R28
+	  66: INCEIPL       	$4
+
+	0xFE8693C:  83A5FFF4  lwz r29,-12(r5)
+	  67: GETL       	R5, t54
+	  68: ADDL       	$0xFFFFFFF4, t54
+	  69: LDL       	(t54), t56
+	  70: PUTL       	t56, R29
+	  71: INCEIPL       	$4
+
+	0xFE86940:  83C5FFF8  lwz r30,-8(r5)
+	  72: GETL       	R5, t58
+	  73: ADDL       	$0xFFFFFFF8, t58
+	  74: LDL       	(t58), t60
+	  75: PUTL       	t60, R30
+	  76: INCEIPL       	$4
+
+	0xFE86944:  83E5FFFC  lwz r31,-4(r5)
+	  77: GETL       	R5, t62
+	  78: ADDL       	$0xFFFFFFFC, t62
+	  79: LDL       	(t62), t64
+	  80: PUTL       	t64, R31
+	  81: INCEIPL       	$4
+
+	0xFE86948:  7CA12B78  or r1,r5,r5
+	  82: GETL       	R5, t66
+	  83: PUTL       	t66, R1
+	  84: INCEIPL       	$4
+
+	0xFE8694C:  4E800020  blr
+	  85: GETL       	LR, t68
+	  86: JMPo-r       	t68  ($4)
+
+
+
+. 2090 FE86904 76
+. 80 A1 00 00 83 65 00 04 82 45 FF C8 7F 68 03 A6 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+==== BB 2091 (0x10001614) approx BBs exec'd 0 ====
+
+	0x10001614:  809E0088  lwz r4,136(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x88, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x10001618:  807E008C  lwz r3,140(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x8C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x1000161C:  837E0204  lwz r27,516(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x204, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0x10001620:  480195CD  bl 0x1001ABEC
+	  15: MOVL       	$0x10001624, t12
+	  16: PUTL       	t12, LR
+	  17: JMPo-c       	$0x1001ABEC  ($4)
+
+
+
+. 2091 10001614 16
+. 80 9E 00 88 80 7E 00 8C 83 7E 02 04 48 01 95 CD
+==== BB 2092 (0x1001ABEC) approx BBs exec'd 0 ====
+
+	0x1001ABEC:  3960005C  li r11,92
+	   0: MOVL       	$0x5C, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ABF0:  4BFFFF1C  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2092 1001ABEC 8
+. 39 60 00 5C 4B FF FF 1C
+==== BB 2093 bindtextdomain(0xFE8A420) approx BBs exec'd 0 ====
+
+	0xFE8A420:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE8A424:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE8A428:  90810008  stw r4,8(r1)
+	   9: GETL       	R4, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE8A42C:  38810008  addi r4,r1,8
+	  14: GETL       	R1, t10
+	  15: ADDL       	$0x8, t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0xFE8A430:  90A10024  stw r5,36(r1)
+	  18: GETL       	R5, t12
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x24, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0xFE8A434:  38A00000  li r5,0
+	  23: MOVL       	$0x0, t16
+	  24: PUTL       	t16, R5
+	  25: INCEIPL       	$4
+
+	0xFE8A438:  4BFFFBB1  bl 0xFE89FE8
+	  26: MOVL       	$0xFE8A43C, t18
+	  27: PUTL       	t18, LR
+	  28: JMPo-c       	$0xFE89FE8  ($4)
+
+
+
+. 2093 FE8A420 28
+. 7C A8 02 A6 94 21 FF E0 90 81 00 08 38 81 00 08 90 A1 00 24 38 A0 00 00 4B FF FB B1
+==== BB 2094 set_binding_values(0xFE89FE8) approx BBs exec'd 0 ====
+
+	0xFE89FE8:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE89FEC:  7D800026  mfcr r12
+	   6: GETL       	CR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFE89FF0:  93810020  stw r28,32(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x20, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE89FF4:  7C7C1B79  or. r28,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R28
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFE89FF8:  7C0802A6  mflr r0
+	  19: GETL       	LR, t14
+	  20: PUTL       	t14, R0
+	  21: INCEIPL       	$4
+
+	0xFE89FFC:  4811DE55  bl 0xFFA7E50
+	  22: MOVL       	$0xFE8A000, t16
+	  23: PUTL       	t16, LR
+	  24: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2094 FE89FE8 24
+. 94 21 FF D0 7D 80 00 26 93 81 00 20 7C 7C 1B 79 7C 08 02 A6 48 11 DE 55
+==== BB 2095 (0xFE8A000) approx BBs exec'd 0 ====
+
+	0xFE8A000:  93410018  stw r26,24(r1)
+	   0: GETL       	R26, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8A004:  7CBA2B78  or r26,r5,r5
+	   5: GETL       	R5, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFE8A008:  93A10024  stw r29,36(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x24, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8A00C:  7C9D2378  or r29,r4,r4
+	  13: GETL       	R4, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFE8A010:  93C10028  stw r30,40(r1)
+	  16: GETL       	R30, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x28, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFE8A014:  7FC802A6  mflr r30
+	  21: GETL       	LR, t16
+	  22: PUTL       	t16, R30
+	  23: INCEIPL       	$4
+
+	0xFE8A018:  92E1000C  stw r23,12(r1)
+	  24: GETL       	R23, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0xC, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFE8A01C:  93010010  stw r24,16(r1)
+	  29: GETL       	R24, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x10, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE8A020:  93210014  stw r25,20(r1)
+	  34: GETL       	R25, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x14, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFE8A024:  9361001C  stw r27,28(r1)
+	  39: GETL       	R27, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x1C, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFE8A028:  93E1002C  stw r31,44(r1)
+	  44: GETL       	R31, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x2C, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFE8A02C:  90010034  stw r0,52(r1)
+	  49: GETL       	R0, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x34, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFE8A030:  91810008  stw r12,8(r1)
+	  54: GETL       	R12, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x8, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFE8A034:  4182014C  bc 12,2,0xFE8A180
+	  59: Js02o       	$0xFE8A180
+
+
+
+. 2095 FE8A000 56
+. 93 41 00 18 7C BA 2B 78 93 A1 00 24 7C 9D 23 78 93 C1 00 28 7F C8 02 A6 92 E1 00 0C 93 01 00 10 93 21 00 14 93 61 00 1C 93 E1 00 2C 90 01 00 34 91 81 00 08 41 82 01 4C
+==== BB 2096 (0xFE8A038) approx BBs exec'd 0 ====
+
+	0xFE8A038:  887C0000  lbz r3,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8A03C:  2F830000  cmpi cr7,r3,0
+	   4: GETL       	R3, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFE8A040:  419E0140  bc 12,30,0xFE8A180
+	   8: Js30o       	$0xFE8A180
+
+
+
+. 2096 FE8A038 12
+. 88 7C 00 00 2F 83 00 00 41 9E 01 40
+==== BB 2097 (0xFE8A044) approx BBs exec'd 0 ====
+
+	0xFE8A044:  82FE1B48  lwz r23,6984(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFE8A048:  801700A4  lwz r0,164(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0xA4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFE8A04C:  2E000000  cmpi cr4,r0,0
+	  10: GETL       	R0, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x4, CR
+	  13: INCEIPL       	$4
+
+	0xFE8A050:  4092018C  bc 4,18,0xFE8A1DC
+	  14: Jc18o       	$0xFE8A1DC
+
+
+
+. 2097 FE8A044 16
+. 82 FE 1B 48 80 17 00 A4 2E 00 00 00 40 92 01 8C
+==== BB 2098 (0xFE8A054) approx BBs exec'd 0 ====
+
+	0xFE8A054:  833E1AC8  lwz r25,6856(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1AC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFE8A058:  3B000000  li r24,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R24
+	   7: INCEIPL       	$4
+
+	0xFE8A05C:  83F90000  lwz r31,0(r25)
+	   8: GETL       	R25, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R31
+	  11: INCEIPL       	$4
+
+	0xFE8A060:  48000020  b 0xFE8A080
+	  12: JMPo       	$0xFE8A080  ($4)
+
+
+
+. 2098 FE8A054 16
+. 83 3E 1A C8 3B 00 00 00 83 F9 00 00 48 00 00 20
+==== BB 2099 (0xFE8A080) approx BBs exec'd 0 ====
+
+	0xFE8A080:  2E1F0000  cmpi cr4,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE8A084:  4092FFE0  bc 4,18,0xFE8A064
+	   4: Jc18o       	$0xFE8A064
+
+
+
+. 2099 FE8A080 8
+. 2E 1F 00 00 40 92 FF E0
+==== BB 2100 (0xFE8A088) approx BBs exec'd 0 ====
+
+	0xFE8A088:  2E1D0000  cmpi cr4,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE8A08C:  41920014  bc 12,18,0xFE8A0A0
+	   4: Js18o       	$0xFE8A0A0
+
+
+
+. 2100 FE8A088 8
+. 2E 1D 00 00 41 92 00 14
+==== BB 2101 (0xFE8A090) approx BBs exec'd 0 ====
+
+	0xFE8A090:  817D0000  lwz r11,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFE8A094:  2D9A0000  cmpi cr3,r26,0
+	   4: GETL       	R26, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x3, CR
+	   7: INCEIPL       	$4
+
+	0xFE8A098:  2F0B0000  cmpi cr6,r11,0
+	   8: GETL       	R11, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFE8A09C:  409A0018  bc 4,26,0xFE8A0B4
+	  12: Jc26o       	$0xFE8A0B4
+
+
+
+. 2101 FE8A090 16
+. 81 7D 00 00 2D 9A 00 00 2F 0B 00 00 40 9A 00 18
+==== BB 2102 (0xFE8A0B4) approx BBs exec'd 0 ====
+
+	0xFE8A0B4:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8A0B8:  4804CA21  bl 0xFED6AD8
+	   3: MOVL       	$0xFE8A0BC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2102 FE8A0B4 8
+. 7F 83 E3 78 48 04 CA 21
+==== BB 2103 (0xFE8A0BC) approx BBs exec'd 0 ====
+
+	0xFE8A0BC:  7C7B1B78  or r27,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0xFE8A0C0:  38630011  addi r3,r3,17
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0x11, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0xFE8A0C4:  3BFB0001  addi r31,r27,1
+	   7: GETL       	R27, t4
+	   8: ADDL       	$0x1, t4
+	   9: PUTL       	t4, R31
+	  10: INCEIPL       	$4
+
+	0xFE8A0C8:  48120241  bl 0xFFAA308
+	  11: MOVL       	$0xFE8A0CC, t6
+	  12: PUTL       	t6, LR
+	  13: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 2103 FE8A0BC 16
+. 7C 7B 1B 78 38 63 00 11 3B FB 00 01 48 12 02 41
+==== BB 2104 (0xFE8A0CC) approx BBs exec'd 0 ====
+
+	0xFE8A0CC:  7C7B1B79  or. r27,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R27
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8A0D0:  418202E0  bc 12,2,0xFE8A3B0
+	   5: Js02o       	$0xFE8A3B0
+
+
+
+. 2104 FE8A0CC 8
+. 7C 7B 1B 79 41 82 02 E0
+==== BB 2105 (0xFE8A0D4) approx BBs exec'd 0 ====
+
+	0xFE8A0D4:  7FE5FB78  or r5,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE8A0D8:  387B0010  addi r3,r27,16
+	   3: GETL       	R27, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0xFE8A0DC:  7F84E378  or r4,r28,r28
+	   7: GETL       	R28, t4
+	   8: PUTL       	t4, R4
+	   9: INCEIPL       	$4
+
+	0xFE8A0E0:  4804E5F9  bl 0xFED86D8
+	  10: MOVL       	$0xFE8A0E4, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2105 FE8A0D4 16
+. 7F E5 FB 78 38 7B 00 10 7F 84 E3 78 48 04 E5 F9
+==== BB 2106 (0xFE8A0E4) approx BBs exec'd 0 ====
+
+	0xFE8A0E4:  41920108  bc 12,18,0xFE8A1EC
+	   0: Js18o       	$0xFE8A1EC
+
+
+
+. 2106 FE8A0E4 4
+. 41 92 01 08
+==== BB 2107 (0xFE8A0E8) approx BBs exec'd 0 ====
+
+	0xFE8A0E8:  83FD0000  lwz r31,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFE8A0EC:  807E1C48  lwz r3,7240(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1C48, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFE8A0F0:  2C9F0000  cmpi cr1,r31,0
+	   9: GETL       	R31, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0xFE8A0F4:  4186001C  bc 12,6,0xFE8A110
+	  13: Js06o       	$0xFE8A110
+
+
+
+. 2107 FE8A0E8 16
+. 83 FD 00 00 80 7E 1C 48 2C 9F 00 00 41 86 00 1C
+==== BB 2108 (0xFE8A0F8) approx BBs exec'd 0 ====
+
+	0xFE8A0F8:  7C641B78  or r4,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8A0FC:  7FE3FB78  or r3,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFE8A100:  4804C2B1  bl 0xFED63B0
+	   6: MOVL       	$0xFE8A104, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2108 FE8A0F8 12
+. 7C 64 1B 78 7F E3 FB 78 48 04 C2 B1
+==== BB 2109 (0xFE8A104) approx BBs exec'd 0 ====
+
+	0xFE8A104:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE8A108:  807E1C48  lwz r3,7240(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1C48, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFE8A10C:  409A028C  bc 4,26,0xFE8A398
+	   9: Jc26o       	$0xFE8A398
+
+
+
+. 2109 FE8A104 12
+. 2F 03 00 00 80 7E 1C 48 40 9A 02 8C
+==== BB 2110 (0xFE8A110) approx BBs exec'd 0 ====
+
+	0xFE8A110:  39800000  li r12,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFE8A114:  907D0000  stw r3,0(r29)
+	   3: GETL       	R3, t2
+	   4: GETL       	R29, t4
+	   5: STL       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFE8A118:  919B0008  stw r12,8(r27)
+	   7: GETL       	R12, t6
+	   8: GETL       	R27, t8
+	   9: ADDL       	$0x8, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8A11C:  907B0004  stw r3,4(r27)
+	  12: GETL       	R3, t10
+	  13: GETL       	R27, t12
+	  14: ADDL       	$0x4, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0xFE8A120:  408E00E0  bc 4,14,0xFE8A200
+	  17: Jc14o       	$0xFE8A200
+
+
+
+. 2110 FE8A110 20
+. 39 80 00 00 90 7D 00 00 91 9B 00 08 90 7B 00 04 40 8E 00 E0
+==== BB 2111 (0xFE8A124) approx BBs exec'd 0 ====
+
+	0xFE8A124:  935B000C  stw r26,12(r27)
+	   0: GETL       	R26, t0
+	   1: GETL       	R27, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8A128:  83F90000  lwz r31,0(r25)
+	   5: GETL       	R25, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R31
+	   8: INCEIPL       	$4
+
+	0xFE8A12C:  2E1F0000  cmpi cr4,r31,0
+	   9: GETL       	R31, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0xFE8A130:  41920100  bc 12,18,0xFE8A230
+	  13: Js18o       	$0xFE8A230
+
+
+
+. 2111 FE8A124 16
+. 93 5B 00 0C 83 F9 00 00 2E 1F 00 00 41 92 01 00
+==== BB 2112 (0xFE8A230) approx BBs exec'd 0 ====
+
+	0xFE8A230:  93FB0000  stw r31,0(r27)
+	   0: GETL       	R31, t0
+	   1: GETL       	R27, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8A234:  93790000  stw r27,0(r25)
+	   4: GETL       	R27, t4
+	   5: GETL       	R25, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFE8A238:  835E1B1C  lwz r26,6940(r30)
+	   8: GETL       	R30, t8
+	   9: ADDL       	$0x1B1C, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R26
+	  12: INCEIPL       	$4
+
+	0xFE8A23C:  80DA0000  lwz r6,0(r26)
+	  13: GETL       	R26, t12
+	  14: LDL       	(t12), t14
+	  15: PUTL       	t14, R6
+	  16: INCEIPL       	$4
+
+	0xFE8A240:  38A60001  addi r5,r6,1
+	  17: GETL       	R6, t16
+	  18: ADDL       	$0x1, t16
+	  19: PUTL       	t16, R5
+	  20: INCEIPL       	$4
+
+	0xFE8A244:  90BA0000  stw r5,0(r26)
+	  21: GETL       	R5, t18
+	  22: GETL       	R26, t20
+	  23: STL       	t18, (t20)
+	  24: INCEIPL       	$4
+
+	0xFE8A248:  801700A8  lwz r0,168(r23)
+	  25: GETL       	R23, t22
+	  26: ADDL       	$0xA8, t22
+	  27: LDL       	(t22), t24
+	  28: PUTL       	t24, R0
+	  29: INCEIPL       	$4
+
+	0xFE8A24C:  2E000000  cmpi cr4,r0,0
+	  30: GETL       	R0, t26
+	  31: CMP0L       	t26, t28  (-rSo)
+	  32: ICRFL       	t28, $0x4, CR
+	  33: INCEIPL       	$4
+
+	0xFE8A250:  4192FF50  bc 12,18,0xFE8A1A0
+	  34: Js18o       	$0xFE8A1A0
+
+
+
+. 2112 FE8A230 36
+. 93 FB 00 00 93 79 00 00 83 5E 1B 1C 80 DA 00 00 38 A6 00 01 90 BA 00 00 80 17 00 A8 2E 00 00 00 41 92 FF 50
+==== BB 2113 (0xFE8A1A0) approx BBs exec'd 0 ====
+
+	0xFE8A1A0:  82E10034  lwz r23,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFE8A1A4:  81810008  lwz r12,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0xFE8A1A8:  7EE803A6  mtlr r23
+	  10: GETL       	R23, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFE8A1AC:  83010010  lwz r24,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R24
+	  17: INCEIPL       	$4
+
+	0xFE8A1B0:  82E1000C  lwz r23,12(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R23
+	  22: INCEIPL       	$4
+
+	0xFE8A1B4:  7D818120  mtcrf 0x18,r12
+	  23: GETL       	R12, t18
+	  24: ICRFL       	t18, $0x3, CR
+	  25: ICRFL       	t18, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0xFE8A1B8:  83210014  lwz r25,20(r1)
+	  27: GETL       	R1, t20
+	  28: ADDL       	$0x14, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R25
+	  31: INCEIPL       	$4
+
+	0xFE8A1BC:  83410018  lwz r26,24(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x18, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R26
+	  36: INCEIPL       	$4
+
+	0xFE8A1C0:  8361001C  lwz r27,28(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x1C, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R27
+	  41: INCEIPL       	$4
+
+	0xFE8A1C4:  83810020  lwz r28,32(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x20, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R28
+	  46: INCEIPL       	$4
+
+	0xFE8A1C8:  83A10024  lwz r29,36(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x24, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R29
+	  51: INCEIPL       	$4
+
+	0xFE8A1CC:  83C10028  lwz r30,40(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x28, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R30
+	  56: INCEIPL       	$4
+
+	0xFE8A1D0:  83E1002C  lwz r31,44(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x2C, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R31
+	  61: INCEIPL       	$4
+
+	0xFE8A1D4:  38210030  addi r1,r1,48
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x30, t48
+	  64: PUTL       	t48, R1
+	  65: INCEIPL       	$4
+
+	0xFE8A1D8:  4E800020  blr
+	  66: GETL       	LR, t50
+	  67: JMPo-r       	t50  ($4)
+
+
+
+. 2113 FE8A1A0 60
+. 82 E1 00 34 81 81 00 08 7E E8 03 A6 83 01 00 10 82 E1 00 0C 7D 81 81 20 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 2114 (0xFE8A43C) approx BBs exec'd 0 ====
+
+	0xFE8A43C:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8A440:  80610008  lwz r3,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFE8A444:  38210020  addi r1,r1,32
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x20, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFE8A448:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFE8A44C:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 2114 FE8A43C 20
+. 80 01 00 24 80 61 00 08 38 21 00 20 7C 08 03 A6 4E 80 00 20
+==== BB 2115 (0x10001624) approx BBs exec'd 0 ====
+
+	0x10001624:  807E008C  lwz r3,140(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x10001628:  821E0018  lwz r16,24(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R16
+	   9: INCEIPL       	$4
+
+	0x1000162C:  48019621  bl 0x1001AC4C
+	  10: MOVL       	$0x10001630, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0x1001AC4C  ($4)
+
+
+
+. 2115 10001624 12
+. 80 7E 00 8C 82 1E 00 18 48 01 96 21
+==== BB 2116 (0x1001AC4C) approx BBs exec'd 0 ====
+
+	0x1001AC4C:  3960008C  li r11,140
+	   0: MOVL       	$0x8C, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AC50:  4BFFFEBC  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2116 1001AC4C 8
+. 39 60 00 8C 4B FF FE BC
+==== BB 2117 textdomain(0xFE8D790) approx BBs exec'd 0 ====
+
+	0xFE8D790:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8D794:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE8D798:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE8D79C:  7C7F1B79  or. r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFE8D7A0:  4811A6B1  bl 0xFFA7E50
+	  19: MOVL       	$0xFE8D7A4, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2117 FE8D790 20
+. 94 21 FF E0 7C 08 02 A6 93 E1 00 1C 7C 7F 1B 79 48 11 A6 B1
+==== BB 2118 (0xFE8D7A4) approx BBs exec'd 0 ====
+
+	0xFE8D7A4:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8D7A8:  93410008  stw r26,8(r1)
+	   5: GETL       	R26, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8D7AC:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFE8D7B0:  9361000C  stw r27,12(r1)
+	  13: GETL       	R27, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xC, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE8D7B4:  93810010  stw r28,16(r1)
+	  18: GETL       	R28, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x10, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFE8D7B8:  93A10014  stw r29,20(r1)
+	  23: GETL       	R29, t18
+	  24: GETL       	R1, t20
+	  25: ADDL       	$0x14, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0xFE8D7BC:  90010024  stw r0,36(r1)
+	  28: GETL       	R0, t22
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x24, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0xFE8D7C0:  41820108  bc 12,2,0xFE8D8C8
+	  33: Js02o       	$0xFE8D8C8
+
+
+
+. 2118 FE8D7A4 32
+. 93 C1 00 18 93 41 00 08 7F C8 02 A6 93 61 00 0C 93 81 00 10 93 A1 00 14 90 01 00 24 41 82 01 08
+==== BB 2119 (0xFE8D7C4) approx BBs exec'd 0 ====
+
+	0xFE8D7C4:  835E1B48  lwz r26,6984(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFE8D7C8:  801A00A4  lwz r0,164(r26)
+	   5: GETL       	R26, t4
+	   6: ADDL       	$0xA4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFE8D7CC:  2F800000  cmpi cr7,r0,0
+	  10: GETL       	R0, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFE8D7D0:  409E00D0  bc 4,30,0xFE8D8A0
+	  14: Jc30o       	$0xFE8D8A0
+
+
+
+. 2119 FE8D7C4 16
+. 83 5E 1B 48 80 1A 00 A4 2F 80 00 00 40 9E 00 D0
+==== BB 2120 (0xFE8D7D4) approx BBs exec'd 0 ====
+
+	0xFE8D7D4:  887F0000  lbz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8D7D8:  837E1B14  lwz r27,6932(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1B14, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFE8D7DC:  2C830000  cmpi cr1,r3,0
+	   9: GETL       	R3, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0xFE8D7E0:  839B0000  lwz r28,0(r27)
+	  13: GETL       	R27, t12
+	  14: LDL       	(t12), t14
+	  15: PUTL       	t14, R28
+	  16: INCEIPL       	$4
+
+	0xFE8D7E4:  4186009C  bc 12,6,0xFE8D880
+	  17: Js06o       	$0xFE8D880
+
+
+
+. 2120 FE8D7D4 20
+. 88 7F 00 00 83 7E 1B 14 2C 83 00 00 83 9B 00 00 41 86 00 9C
+==== BB 2121 (0xFE8D7E8) approx BBs exec'd 0 ====
+
+	0xFE8D7E8:  809E1DD0  lwz r4,7632(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DD0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8D7EC:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE8D7F0:  48048BC1  bl 0xFED63B0
+	   8: MOVL       	$0xFE8D7F4, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2121 FE8D7E8 12
+. 80 9E 1D D0 7F E3 FB 78 48 04 8B C1
+==== BB 2122 (0xFE8D7F4) approx BBs exec'd 0 ====
+
+	0xFE8D7F4:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D7F8:  419A0088  bc 12,26,0xFE8D880
+	   4: Js26o       	$0xFE8D880
+
+
+
+. 2122 FE8D7F4 8
+. 2F 03 00 00 41 9A 00 88
+==== BB 2123 (0xFE8D7FC) approx BBs exec'd 0 ====
+
+	0xFE8D7FC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D800:  7F84E378  or r4,r28,r28
+	   3: GETL       	R28, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8D804:  48048BAD  bl 0xFED63B0
+	   6: MOVL       	$0xFE8D808, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2123 FE8D7FC 12
+. 7F E3 FB 78 7F 84 E3 78 48 04 8B AD
+==== BB 2124 (0xFE8D808) approx BBs exec'd 0 ====
+
+	0xFE8D808:  7F9DE379  or. r29,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D80C:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFE8D810:  409E00A0  bc 4,30,0xFE8D8B0
+	   9: Jc30o       	$0xFE8D8B0
+
+
+
+. 2124 FE8D808 12
+. 7F 9D E3 79 2F 83 00 00 40 9E 00 A0
+==== BB 2125 (0xFE8D8B0) approx BBs exec'd 0 ====
+
+	0xFE8D8B0:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D8B4:  48048EE9  bl 0xFED679C
+	   3: MOVL       	$0xFE8D8B8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED679C  ($4)
+
+
+
+. 2125 FE8D8B0 8
+. 7F E3 FB 78 48 04 8E E9
+==== BB 2126 (0xFE8D8B8) approx BBs exec'd 0 ====
+
+	0xFE8D8B8:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D8BC:  41A2FF88  bc 13,2,0xFE8D844
+	   5: Js02o       	$0xFE8D844
+
+
+
+. 2126 FE8D8B8 8
+. 7C 7D 1B 79 41 A2 FF 88
+==== BB 2127 (0xFE8D8C0) approx BBs exec'd 0 ====
+
+	0xFE8D8C0:  93BB0000  stw r29,0(r27)
+	   0: GETL       	R29, t0
+	   1: GETL       	R27, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8D8C4:  4BFFFF50  b 0xFE8D814
+	   4: JMPo       	$0xFE8D814  ($4)
+
+
+
+. 2127 FE8D8C0 8
+. 93 BB 00 00 4B FF FF 50
+==== BB 2128 (0xFE8D814) approx BBs exec'd 0 ====
+
+	0xFE8D814:  41820030  bc 12,2,0xFE8D844
+	   0: Js02o       	$0xFE8D844
+
+
+
+. 2128 FE8D814 4
+. 41 82 00 30
+==== BB 2129 (0xFE8D818) approx BBs exec'd 0 ====
+
+	0xFE8D818:  80BE1B1C  lwz r5,6940(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE8D81C:  7C1CE800  cmp cr0,r28,r29
+	   5: GETL       	R28, t4
+	   6: GETL       	R29, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE8D820:  80C50000  lwz r6,0(r5)
+	  10: GETL       	R5, t10
+	  11: LDL       	(t10), t12
+	  12: PUTL       	t12, R6
+	  13: INCEIPL       	$4
+
+	0xFE8D824:  39660001  addi r11,r6,1
+	  14: GETL       	R6, t14
+	  15: ADDL       	$0x1, t14
+	  16: PUTL       	t14, R11
+	  17: INCEIPL       	$4
+
+	0xFE8D828:  91650000  stw r11,0(r5)
+	  18: GETL       	R11, t16
+	  19: GETL       	R5, t18
+	  20: STL       	t16, (t18)
+	  21: INCEIPL       	$4
+
+	0xFE8D82C:  41820018  bc 12,2,0xFE8D844
+	  22: Js02o       	$0xFE8D844
+
+
+
+. 2129 FE8D818 24
+. 80 BE 1B 1C 7C 1C E8 00 80 C5 00 00 39 66 00 01 91 65 00 00 41 82 00 18
+==== BB 2130 (0xFE8D830) approx BBs exec'd 0 ====
+
+	0xFE8D830:  80FE1DD0  lwz r7,7632(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DD0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFE8D834:  7C9C3800  cmp cr1,r28,r7
+	   5: GETL       	R28, t4
+	   6: GETL       	R7, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFE8D838:  4186000C  bc 12,6,0xFE8D844
+	  10: Js06o       	$0xFE8D844
+
+
+
+. 2130 FE8D830 12
+. 80 FE 1D D0 7C 9C 38 00 41 86 00 0C
+==== BB 2131 (0xFE8D844) approx BBs exec'd 0 ====
+
+	0xFE8D844:  801A00A8  lwz r0,168(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8D848:  2F000000  cmpi cr6,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFE8D84C:  409A0044  bc 4,26,0xFE8D890
+	   9: Jc26o       	$0xFE8D890
+
+
+
+. 2131 FE8D844 12
+. 80 1A 00 A8 2F 00 00 00 40 9A 00 44
+==== BB 2132 (0xFE8D850) approx BBs exec'd 0 ====
+
+	0xFE8D850:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE8D854:  7FA0EB78  or r0,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFE8D858:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFE8D85C:  83410008  lwz r26,8(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R26
+	  15: INCEIPL       	$4
+
+	0xFE8D860:  8361000C  lwz r27,12(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0xC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0xFE8D864:  7D0803A6  mtlr r8
+	  21: GETL       	R8, t16
+	  22: PUTL       	t16, LR
+	  23: INCEIPL       	$4
+
+	0xFE8D868:  83810010  lwz r28,16(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x10, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R28
+	  28: INCEIPL       	$4
+
+	0xFE8D86C:  83A10014  lwz r29,20(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x14, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R29
+	  33: INCEIPL       	$4
+
+	0xFE8D870:  83C10018  lwz r30,24(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x18, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R30
+	  38: INCEIPL       	$4
+
+	0xFE8D874:  83E1001C  lwz r31,28(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x1C, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R31
+	  43: INCEIPL       	$4
+
+	0xFE8D878:  38210020  addi r1,r1,32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x20, t34
+	  46: PUTL       	t34, R1
+	  47: INCEIPL       	$4
+
+	0xFE8D87C:  4E800020  blr
+	  48: GETL       	LR, t36
+	  49: JMPo-r       	t36  ($4)
+
+
+
+. 2132 FE8D850 48
+. 81 01 00 24 7F A0 EB 78 7C 03 03 78 83 41 00 08 83 61 00 0C 7D 08 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2133 (0x10001630) approx BBs exec'd 0 ====
+
+	0x10001630:  807E01DC  lwz r3,476(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x10001634:  823E0014  lwz r17,20(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R17
+	   9: INCEIPL       	$4
+
+	0x10001638:  831E000C  lwz r24,12(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0x1000163C:  48006505  bl 0x10007B40
+	  15: MOVL       	$0x10001640, t12
+	  16: PUTL       	t12, LR
+	  17: JMPo-c       	$0x10007B40  ($4)
+
+
+
+. 2133 10001630 16
+. 80 7E 01 DC 82 3E 00 14 83 1E 00 0C 48 00 65 05
+==== BB 2134 (0x10007B40) approx BBs exec'd 0 ====
+
+	0x10007B40:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x10007B44:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x10007B48:  429F0005  bcl 20,31,0x10007B4C
+	   9: MOVL       	$0x10007B4C, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10007B4C:  93C10018  stw r30,24(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x10007B50:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0x10007B54:  38A00000  li r5,0
+	  20: MOVL       	$0x0, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x10007B58:  90810024  stw r4,36(r1)
+	  23: GETL       	R4, t16
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x24, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x10007B5C:  38800000  li r4,0
+	  28: MOVL       	$0x0, t20
+	  29: PUTL       	t20, R4
+	  30: INCEIPL       	$4
+
+	0x10007B60:  801EFFF0  lwz r0,-16(r30)
+	  31: GETL       	R30, t22
+	  32: ADDL       	$0xFFFFFFF0, t22
+	  33: LDL       	(t22), t24
+	  34: PUTL       	t24, R0
+	  35: INCEIPL       	$4
+
+	0x10007B64:  7FC0F214  add r30,r0,r30
+	  36: GETL       	R0, t26
+	  37: GETL       	R30, t28
+	  38: ADDL       	t26, t28
+	  39: PUTL       	t28, R30
+	  40: INCEIPL       	$4
+
+	0x10007B68:  813E8000  lwz r9,-32768(r30)
+	  41: GETL       	R30, t30
+	  42: ADDL       	$0xFFFF8000, t30
+	  43: LDL       	(t30), t32
+	  44: PUTL       	t32, R9
+	  45: INCEIPL       	$4
+
+	0x10007B6C:  2F890000  cmpi cr7,r9,0
+	  46: GETL       	R9, t34
+	  47: CMP0L       	t34, t36  (-rSo)
+	  48: ICRFL       	t36, $0x7, CR
+	  49: INCEIPL       	$4
+
+	0x10007B70:  419E0008  bc 12,30,0x10007B78
+	  50: Js30o       	$0x10007B78
+
+
+
+. 2134 10007B40 52
+. 7C 88 02 A6 94 21 FF E0 42 9F 00 05 93 C1 00 18 7F C8 02 A6 38 A0 00 00 90 81 00 24 38 80 00 00 80 1E FF F0 7F C0 F2 14 81 3E 80 00 2F 89 00 00 41 9E 00 08
+==== BB 2135 (0x10007B74) approx BBs exec'd 0 ====
+
+	0x10007B74:  80A90000  lwz r5,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x10007B78:  48013035  bl 0x1001ABAC
+	   4: MOVL       	$0x10007B7C, t4
+	   5: PUTL       	t4, LR
+	   6: JMPo-c       	$0x1001ABAC  ($4)
+
+
+
+. 2135 10007B74 8
+. 80 A9 00 00 48 01 30 35
+==== BB 2136 (0x1001ABAC) approx BBs exec'd 0 ====
+
+	0x1001ABAC:  3960003C  li r11,60
+	   0: MOVL       	$0x3C, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ABB0:  4BFFFF5C  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2136 1001ABAC 8
+. 39 60 00 3C 4B FF FF 5C
+==== BB 2137 (0xFE93DA4) approx BBs exec'd 0 ====
+
+	0xFE93DA4:  80890000  lwz r4,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFE93DA8:  39290010  addi r9,r9,16
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x10, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFE93DAC:  2C840000  cmpi cr1,r4,0
+	   8: GETL       	R4, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE93DB0:  41860010  bc 12,6,0xFE93DC0
+	  12: Js06o       	$0xFE93DC0
+
+
+
+. 2137 FE93DA4 16
+. 80 89 00 00 39 29 00 10 2C 84 00 00 41 86 00 10
+==== BB 2138 (0x10007B7C) approx BBs exec'd 0 ====
+
+	0x10007B7C:  80A10024  lwz r5,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x10007B80:  83C10018  lwz r30,24(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0x10007B84:  38210020  addi r1,r1,32
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x20, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0x10007B88:  7CA803A6  mtlr r5
+	  14: GETL       	R5, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0x10007B8C:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 2138 10007B7C 20
+. 80 A1 00 24 83 C1 00 18 38 21 00 20 7C A8 03 A6 4E 80 00 20
+==== BB 2139 (0x10001640) approx BBs exec'd 0 ====
+
+	0x10001640:  80DE001C  lwz r6,28(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x10001644:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x10001648:  7FE4FB78  or r4,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x1000164C:  7F85E378  or r5,r28,r28
+	  11: GETL       	R28, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x10001650:  38E00000  li r7,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R7
+	  16: INCEIPL       	$4
+
+	0x10001654:  48019629  bl 0x1001AC7C
+	  17: MOVL       	$0x10001658, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0x1001AC7C  ($4)
+
+
+
+. 2139 10001640 24
+. 80 DE 00 1C 7F A3 EB 78 7F E4 FB 78 7F 85 E3 78 38 E0 00 00 48 01 96 29
+==== BB 2140 (0x1001AC7C) approx BBs exec'd 0 ====
+
+	0x1001AC7C:  396000A4  li r11,164
+	   0: MOVL       	$0xA4, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AC80:  4BFFFE8C  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2140 1001AC7C 8
+. 39 60 00 A4 4B FF FE 8C
+==== BB 2141 getopt_long(0xFF17FB4) approx BBs exec'd 0 ====
+
+	0xFF17FB4:  7D0802A6  mflr r8
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0xFF17FB8:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFF17FBC:  91010014  stw r8,20(r1)
+	   9: GETL       	R8, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFF17FC0:  39000000  li r8,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R8
+	  16: INCEIPL       	$4
+
+	0xFF17FC4:  4BFFFF45  bl 0xFF17F08
+	  17: MOVL       	$0xFF17FC8, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0xFF17F08  ($4)
+
+
+
+. 2141 FF17FB4 20
+. 7D 08 02 A6 94 21 FF F0 91 01 00 14 39 00 00 00 4B FF FF 45
+==== BB 2142 _getopt_internal(0xFF17F08) approx BBs exec'd 0 ====
+
+	0xFF17F08:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF17F0C:  7D2802A6  mflr r9
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xFF17F10:  4808FF41  bl 0xFFA7E50
+	   9: MOVL       	$0xFF17F14, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2142 FF17F08 12
+. 94 21 FF E0 7D 28 02 A6 48 08 FF 41
+==== BB 2143 (0xFF17F14) approx BBs exec'd 0 ====
+
+	0xFF17F14:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF17F18:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF17F1C:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF17F20:  91210024  stw r9,36(r1)
+	  13: GETL       	R9, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF17F24:  93A10014  stw r29,20(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFF17F28:  839E1C14  lwz r28,7188(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x1C14, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0xFF17F2C:  817E1B58  lwz r11,7000(r30)
+	  28: GETL       	R30, t22
+	  29: ADDL       	$0x1B58, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R11
+	  32: INCEIPL       	$4
+
+	0xFF17F30:  83BE0BAC  lwz r29,2988(r30)
+	  33: GETL       	R30, t26
+	  34: ADDL       	$0xBAC, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R29
+	  37: INCEIPL       	$4
+
+	0xFF17F34:  801C0000  lwz r0,0(r28)
+	  38: GETL       	R28, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R0
+	  41: INCEIPL       	$4
+
+	0xFF17F38:  818B0000  lwz r12,0(r11)
+	  42: GETL       	R11, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R12
+	  45: INCEIPL       	$4
+
+	0xFF17F3C:  7FA9EB78  or r9,r29,r29
+	  46: GETL       	R29, t38
+	  47: PUTL       	t38, R9
+	  48: INCEIPL       	$4
+
+	0xFF17F40:  901D0000  stw r0,0(r29)
+	  49: GETL       	R0, t40
+	  50: GETL       	R29, t42
+	  51: STL       	t40, (t42)
+	  52: INCEIPL       	$4
+
+	0xFF17F44:  919D0004  stw r12,4(r29)
+	  53: GETL       	R12, t44
+	  54: GETL       	R29, t46
+	  55: ADDL       	$0x4, t46
+	  56: STL       	t44, (t46)
+	  57: INCEIPL       	$4
+
+	0xFF17F48:  4BFFEDE1  bl 0xFF16D28
+	  58: MOVL       	$0xFF17F4C, t48
+	  59: PUTL       	t48, LR
+	  60: JMPo-c       	$0xFF16D28  ($4)
+
+
+
+. 2143 FF17F14 56
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 91 21 00 24 93 A1 00 14 83 9E 1C 14 81 7E 1B 58 83 BE 0B AC 80 1C 00 00 81 8B 00 00 7F A9 EB 78 90 1D 00 00 91 9D 00 04 4B FF ED E1
+==== BB 2144 _getopt_internal_r(0xFF16D28) approx BBs exec'd 0 ====
+
+	0xFF16D28:  7D6802A6  mflr r11
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFF16D2C:  7D800026  mfcr r12
+	   3: GETL       	CR, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0xFF16D30:  9421FF60  stwu r1,-160(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFF60, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFF16D34:  4809111D  bl 0xFFA7E50
+	  12: MOVL       	$0xFF16D38, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2144 FF16D28 16
+. 7D 68 02 A6 7D 80 00 26 94 21 FF 60 48 09 11 1D
+==== BB 2145 (0xFF16D38) approx BBs exec'd 0 ====
+
+	0xFF16D38:  91E1005C  stw r15,92(r1)
+	   0: GETL       	R15, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x5C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF16D3C:  7CEF3B78  or r15,r7,r7
+	   5: GETL       	R7, t4
+	   6: PUTL       	t4, R15
+	   7: INCEIPL       	$4
+
+	0xFF16D40:  92010060  stw r16,96(r1)
+	   8: GETL       	R16, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x60, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF16D44:  92210064  stw r17,100(r1)
+	  13: GETL       	R17, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x64, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF16D48:  7D114378  or r17,r8,r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, R17
+	  20: INCEIPL       	$4
+
+	0xFF16D4C:  92410068  stw r18,104(r1)
+	  21: GETL       	R18, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x68, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFF16D50:  7CD23378  or r18,r6,r6
+	  26: GETL       	R6, t20
+	  27: PUTL       	t20, R18
+	  28: INCEIPL       	$4
+
+	0xFF16D54:  92C10078  stw r22,120(r1)
+	  29: GETL       	R22, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x78, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFF16D58:  7CB62B78  or r22,r5,r5
+	  34: GETL       	R5, t26
+	  35: PUTL       	t26, R22
+	  36: INCEIPL       	$4
+
+	0xFF16D5C:  93210084  stw r25,132(r1)
+	  37: GETL       	R25, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x84, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFF16D60:  7C791B79  or. r25,r3,r3
+	  42: GETL       	R3, t32
+	  43: PUTL       	t32, R25
+	  44: CMP0L       	t32, t34  (-rSo)
+	  45: ICRFL       	t34, $0x0, CR
+	  46: INCEIPL       	$4
+
+	0xFF16D64:  93810090  stw r28,144(r1)
+	  47: GETL       	R28, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x90, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFF16D68:  7D3C4B78  or r28,r9,r9
+	  52: GETL       	R9, t40
+	  53: PUTL       	t40, R28
+	  54: INCEIPL       	$4
+
+	0xFF16D6C:  93C10098  stw r30,152(r1)
+	  55: GETL       	R30, t42
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x98, t44
+	  58: STL       	t42, (t44)
+	  59: INCEIPL       	$4
+
+	0xFF16D70:  7FC802A6  mflr r30
+	  60: GETL       	LR, t46
+	  61: PUTL       	t46, R30
+	  62: INCEIPL       	$4
+
+	0xFF16D74:  93E1009C  stw r31,156(r1)
+	  63: GETL       	R31, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x9C, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0xFF16D78:  7C9F2378  or r31,r4,r4
+	  68: GETL       	R4, t52
+	  69: PUTL       	t52, R31
+	  70: INCEIPL       	$4
+
+	0xFF16D7C:  916100A4  stw r11,164(r1)
+	  71: GETL       	R11, t54
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0xA4, t56
+	  74: STL       	t54, (t56)
+	  75: INCEIPL       	$4
+
+	0xFF16D80:  91C10058  stw r14,88(r1)
+	  76: GETL       	R14, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x58, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0xFF16D84:  9261006C  stw r19,108(r1)
+	  81: GETL       	R19, t62
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x6C, t64
+	  84: STL       	t62, (t64)
+	  85: INCEIPL       	$4
+
+	0xFF16D88:  92810070  stw r20,112(r1)
+	  86: GETL       	R20, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x70, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0xFF16D8C:  92A10074  stw r21,116(r1)
+	  91: GETL       	R21, t70
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x74, t72
+	  94: STL       	t70, (t72)
+	  95: INCEIPL       	$4
+
+	0xFF16D90:  92E1007C  stw r23,124(r1)
+	  96: GETL       	R23, t74
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x7C, t76
+	  99: STL       	t74, (t76)
+	 100: INCEIPL       	$4
+
+	0xFF16D94:  93010080  stw r24,128(r1)
+	 101: GETL       	R24, t78
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x80, t80
+	 104: STL       	t78, (t80)
+	 105: INCEIPL       	$4
+
+	0xFF16D98:  93410088  stw r26,136(r1)
+	 106: GETL       	R26, t82
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x88, t84
+	 109: STL       	t82, (t84)
+	 110: INCEIPL       	$4
+
+	0xFF16D9C:  9361008C  stw r27,140(r1)
+	 111: GETL       	R27, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x8C, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0xFF16DA0:  93A10094  stw r29,148(r1)
+	 116: GETL       	R29, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x94, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0xFF16DA4:  91810054  stw r12,84(r1)
+	 121: GETL       	R12, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x54, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0xFF16DA8:  89450000  lbz r10,0(r5)
+	 126: GETL       	R5, t98
+	 127: LDB       	(t98), t100
+	 128: PUTL       	t100, R10
+	 129: INCEIPL       	$4
+
+	0xFF16DAC:  81290004  lwz r9,4(r9)
+	 130: GETL       	R9, t102
+	 131: ADDL       	$0x4, t102
+	 132: LDL       	(t102), t104
+	 133: PUTL       	t104, R9
+	 134: INCEIPL       	$4
+
+	0xFF16DB0:  6945003A  xori r5,r10,0x3A
+	 135: GETL       	R10, t106
+	 136: XORL       	$0x3A, t106
+	 137: PUTL       	t106, R5
+	 138: INCEIPL       	$4
+
+	0xFF16DB4:  7C6500D0  neg r3,r5
+	 139: GETL       	R5, t108
+	 140: NEGL       	t108
+	 141: PUTL       	t108, R3
+	 142: INCEIPL       	$4
+
+	0xFF16DB8:  38A0FFFF  li r5,-1
+	 143: MOVL       	$0xFFFFFFFF, t110
+	 144: PUTL       	t110, R5
+	 145: INCEIPL       	$4
+
+	0xFF16DBC:  7C60FE70  srawi r0,r3,31
+	 146: GETL       	R3, t112
+	 147: SARL       	$0x1F, t112  (-wCa)
+	 148: PUTL       	t112, R0
+	 149: INCEIPL       	$4
+
+	0xFF16DC0:  7D300038  and r16,r9,r0
+	 150: GETL       	R9, t114
+	 151: GETL       	R0, t116
+	 152: ANDL       	t114, t116
+	 153: PUTL       	t116, R16
+	 154: INCEIPL       	$4
+
+	0xFF16DC4:  408101DC  bc 4,1,0xFF16FA0
+	 155: Jc01o       	$0xFF16FA0
+
+
+
+. 2145 FF16D38 144
+. 91 E1 00 5C 7C EF 3B 78 92 01 00 60 92 21 00 64 7D 11 43 78 92 41 00 68 7C D2 33 78 92 C1 00 78 7C B6 2B 78 93 21 00 84 7C 79 1B 79 93 81 00 90 7D 3C 4B 78 93 C1 00 98 7F C8 02 A6 93 E1 00 9C 7C 9F 23 78 91 61 00 A4 91 C1 00 58 92 61 00 6C 92 81 00 70 92 A1 00 74 92 E1 00 7C 93 01 00 80 93 41 00 88 93 61 00 8C 93 A1 00 94 91 81 00 54 89 45 00 00 81 29 00 04 69 45 00 3A 7C 65 00 D0 38 A0 FF FF 7C 60 FE 70 7D 30 00 38 40 81 01 DC
+==== BB 2146 (0xFF16DC8) approx BBs exec'd 0 ====
+
+	0xFF16DC8:  813C0000  lwz r9,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFF16DCC:  3BA00000  li r29,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFF16DD0:  93BC000C  stw r29,12(r28)
+	   7: GETL       	R29, t6
+	   8: GETL       	R28, t8
+	   9: ADDL       	$0xC, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFF16DD4:  2F890000  cmpi cr7,r9,0
+	  12: GETL       	R9, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFF16DD8:  419E0130  bc 12,30,0xFF16F08
+	  16: Js30o       	$0xFF16F08
+
+
+
+. 2146 FF16DC8 20
+. 81 3C 00 00 3B A0 00 00 93 BC 00 0C 2F 89 00 00 41 9E 01 30
+==== BB 2147 (0xFF16DDC) approx BBs exec'd 0 ====
+
+	0xFF16DDC:  809C0010  lwz r4,16(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFF16DE0:  2C840000  cmpi cr1,r4,0
+	   5: GETL       	R4, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFF16DE4:  4186012C  bc 12,6,0xFF16F10
+	   9: Js06o       	$0xFF16F10
+
+
+
+. 2147 FF16DDC 12
+. 80 9C 00 10 2C 84 00 00 41 86 01 2C
+==== BB 2148 (0xFF16F10) approx BBs exec'd 0 ====
+
+	0xFF16F10:  807E0BB0  lwz r3,2992(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xBB0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFF16F14:  913C0020  stw r9,32(r28)
+	   5: GETL       	R9, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	$0x20, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFF16F18:  913C0024  stw r9,36(r28)
+	  10: GETL       	R9, t8
+	  11: GETL       	R28, t10
+	  12: ADDL       	$0x24, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFF16F1C:  93BC0014  stw r29,20(r28)
+	  15: GETL       	R29, t12
+	  16: GETL       	R28, t14
+	  17: ADDL       	$0x14, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFF16F20:  4BF7C279  bl 0xFE93198
+	  20: MOVL       	$0xFF16F24, t16
+	  21: PUTL       	t16, LR
+	  22: JMPo-c       	$0xFE93198  ($4)
+
+
+
+. 2148 FF16F10 20
+. 80 7E 0B B0 91 3C 00 20 91 3C 00 24 93 BC 00 14 4B F7 C2 79
+==== BB 2149 (0xFF16F24) approx BBs exec'd 0 ====
+
+	0xFF16F24:  7ECBB378  or r11,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFF16F28:  3003FFFF  addic r0,r3,-1
+	   3: GETL       	R3, t2
+	   4: ADCL       	$0xFFFFFFFF, t2  (-wCa)
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0xFF16F2C:  7D201910  subfe r9,r0,r3
+	   7: GETL       	R0, t4
+	   8: GETL       	R3, t6
+	   9: SBBL       	t4, t6  (-rCa-wCa)
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0xFF16F30:  913C001C  stw r9,28(r28)
+	  12: GETL       	R9, t8
+	  13: GETL       	R28, t10
+	  14: ADDL       	$0x1C, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFF16F34:  88160000  lbz r0,0(r22)
+	  17: GETL       	R22, t12
+	  18: LDB       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFF16F38:  2E00002D  cmpi cr4,r0,45
+	  21: GETL       	R0, t16
+	  22: MOVL       	$0x2D, t20
+	  23: CMPL       	t16, t20, t18  (-rSo)
+	  24: ICRFL       	t18, $0x4, CR
+	  25: INCEIPL       	$4
+
+	0xFF16F3C:  41920254  bc 12,18,0xFF17190
+	  26: Js18o       	$0xFF17190
+
+
+
+. 2149 FF16F24 28
+. 7E CB B3 78 30 03 FF FF 7D 20 19 10 91 3C 00 1C 88 16 00 00 2E 00 00 2D 41 92 02 54
+==== BB 2150 (0xFF16F40) approx BBs exec'd 0 ====
+
+	0xFF16F40:  2F00002B  cmpi cr6,r0,43
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2B, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFF16F44:  419A0258  bc 12,26,0xFF1719C
+	   5: Js26o       	$0xFF1719C
+
+
+
+. 2150 FF16F40 8
+. 2F 00 00 2B 41 9A 02 58
+==== BB 2151 (0xFF16F48) approx BBs exec'd 0 ====
+
+	0xFF16F48:  2C090000  cmpi cr0,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFF16F4C:  38000001  li r0,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFF16F50:  40820250  bc 4,2,0xFF171A0
+	   7: Jc02o       	$0xFF171A0
+
+
+
+. 2151 FF16F48 12
+. 2C 09 00 00 38 00 00 01 40 82 02 50
+==== BB 2152 (0xFF16F54) approx BBs exec'd 0 ====
+
+	0xFF16F54:  901C0018  stw r0,24(r28)
+	   0: GETL       	R0, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF16F58:  38C00001  li r6,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0xFF16F5C:  7D765B78  or r22,r11,r11
+	   8: GETL       	R11, t6
+	   9: PUTL       	t6, R22
+	  10: INCEIPL       	$4
+
+	0xFF16F60:  90DC0010  stw r6,16(r28)
+	  11: GETL       	R6, t8
+	  12: GETL       	R28, t10
+	  13: ADDL       	$0x10, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFF16F64:  4BFFFE84  b 0xFF16DE8
+	  16: JMPo       	$0xFF16DE8  ($4)
+
+
+
+. 2152 FF16F54 20
+. 90 1C 00 18 38 C0 00 01 7D 76 5B 78 90 DC 00 10 4B FF FE 84
+==== BB 2153 (0xFF16DE8) approx BBs exec'd 0 ====
+
+	0xFF16DE8:  835C0014  lwz r26,20(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFF16DEC:  2F9A0000  cmpi cr7,r26,0
+	   5: GETL       	R26, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFF16DF0:  419E0014  bc 12,30,0xFF16E04
+	   9: Js30o       	$0xFF16E04
+
+
+
+. 2153 FF16DE8 12
+. 83 5C 00 14 2F 9A 00 00 41 9E 00 14
+==== BB 2154 (0xFF16E04) approx BBs exec'd 0 ====
+
+	0xFF16E04:  80BC0024  lwz r5,36(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFF16E08:  811C0000  lwz r8,0(r28)
+	   5: GETL       	R28, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0xFF16E0C:  7E054000  cmp cr4,r5,r8
+	   9: GETL       	R5, t8
+	  10: GETL       	R8, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x4, CR
+	  13: INCEIPL       	$4
+
+	0xFF16E10:  4091000C  bc 4,17,0xFF16E1C
+	  14: Jc17o       	$0xFF16E1C
+
+
+
+. 2154 FF16E04 16
+. 80 BC 00 24 81 1C 00 00 7E 05 40 00 40 91 00 0C
+==== BB 2155 (0xFF16E1C) approx BBs exec'd 0 ====
+
+	0xFF16E1C:  83BC0020  lwz r29,32(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFF16E20:  7F1D4000  cmp cr6,r29,r8
+	   5: GETL       	R29, t4
+	   6: GETL       	R8, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFF16E24:  4099000C  bc 4,25,0xFF16E30
+	  10: Jc25o       	$0xFF16E30
+
+
+
+. 2155 FF16E1C 12
+. 83 BC 00 20 7F 1D 40 00 40 99 00 0C
+==== BB 2156 (0xFF16E30) approx BBs exec'd 0 ====
+
+	0xFF16E30:  837C0018  lwz r27,24(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFF16E34:  2C1B0001  cmpi cr0,r27,1
+	   5: GETL       	R27, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFF16E38:  418202C4  bc 12,2,0xFF170FC
+	  10: Js02o       	$0xFF170FC
+
+
+
+. 2156 FF16E30 12
+. 83 7C 00 18 2C 1B 00 01 41 82 02 C4
+==== BB 2157 (0xFF170FC) approx BBs exec'd 0 ====
+
+	0xFF170FC:  7F9D2800  cmp cr7,r29,r5
+	   0: GETL       	R29, t0
+	   1: GETL       	R5, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFF17100:  419E0248  bc 12,30,0xFF17348
+	   5: Js30o       	$0xFF17348
+
+
+
+. 2157 FF170FC 8
+. 7F 9D 28 00 41 9E 02 48
+==== BB 2158 (0xFF17348) approx BBs exec'd 0 ====
+
+	0xFF17348:  7F854000  cmp cr7,r5,r8
+	   0: GETL       	R5, t0
+	   1: GETL       	R8, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFF1734C:  419E000C  bc 12,30,0xFF17358
+	   5: Js30o       	$0xFF17358
+
+
+
+. 2158 FF17348 8
+. 7F 85 40 00 41 9E 00 0C
+==== BB 2159 (0xFF17358) approx BBs exec'd 0 ====
+
+	0xFF17358:  7C88C800  cmp cr1,r8,r25
+	   0: GETL       	R8, t0
+	   1: GETL       	R25, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFF1735C:  5500103A  rlwinm r0,r8,2,0,29
+	   5: GETL       	R8, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0xFF17360:  40840028  bc 4,4,0xFF17388
+	   9: Jc04o       	$0xFF17388
+
+
+
+. 2159 FF17358 12
+. 7C 88 C8 00 55 00 10 3A 40 84 00 28
+==== BB 2160 (0xFF17388) approx BBs exec'd 0 ====
+
+	0xFF17388:  7D054378  or r5,r8,r8
+	   0: GETL       	R8, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFF1738C:  911C0024  stw r8,36(r28)
+	   3: GETL       	R8, t2
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0x24, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFF17390:  4BFFFAAC  b 0xFF16E3C
+	   8: JMPo       	$0xFF16E3C  ($4)
+
+
+
+. 2160 FF17388 12
+. 7D 05 43 78 91 1C 00 24 4B FF FA AC
+==== BB 2161 (0xFF16E3C) approx BBs exec'd 0 ====
+
+	0xFF16E3C:  7C08C800  cmp cr0,r8,r25
+	   0: GETL       	R8, t0
+	   1: GETL       	R25, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFF16E40:  418201DC  bc 12,2,0xFF1701C
+	   5: Js02o       	$0xFF1701C
+
+
+
+. 2161 FF16E3C 8
+. 7C 08 C8 00 41 82 01 DC
+==== BB 2162 (0xFF1701C) approx BBs exec'd 0 ====
+
+	0xFF1701C:  7F08C800  cmp cr6,r8,r25
+	   0: GETL       	R8, t0
+	   1: GETL       	R25, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFF17020:  419A07D8  bc 12,26,0xFF177F8
+	   5: Js26o       	$0xFF177F8
+
+
+
+. 2162 FF1701C 8
+. 7F 08 C8 00 41 9A 07 D8
+==== BB 2163 (0xFF177F8) approx BBs exec'd 0 ====
+
+	0xFF177F8:  7F9D2800  cmp cr7,r29,r5
+	   0: GETL       	R29, t0
+	   1: GETL       	R5, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFF177FC:  419E0008  bc 12,30,0xFF17804
+	   5: Js30o       	$0xFF17804
+
+
+
+. 2163 FF177F8 8
+. 7F 9D 28 00 41 9E 00 08
+==== BB 2164 (0xFF17804) approx BBs exec'd 0 ====
+
+	0xFF17804:  38A0FFFF  li r5,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFF17808:  4BFFF798  b 0xFF16FA0
+	   3: JMPo       	$0xFF16FA0  ($4)
+
+
+
+. 2164 FF17804 8
+. 38 A0 FF FF 4B FF F7 98
+==== BB 2165 (0xFF16FA0) approx BBs exec'd 0 ====
+
+	0xFF16FA0:  800100A4  lwz r0,164(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xA4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFF16FA4:  7CA32B78  or r3,r5,r5
+	   5: GETL       	R5, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF16FA8:  81410054  lwz r10,84(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x54, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0xFF16FAC:  81C10058  lwz r14,88(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x58, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R14
+	  17: INCEIPL       	$4
+
+	0xFF16FB0:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFF16FB4:  81E1005C  lwz r15,92(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x5C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R15
+	  25: INCEIPL       	$4
+
+	0xFF16FB8:  7D408120  mtcrf 0x8,r10
+	  26: GETL       	R10, t20
+	  27: ICRFL       	t20, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0xFF16FBC:  82010060  lwz r16,96(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x60, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R16
+	  33: INCEIPL       	$4
+
+	0xFF16FC0:  82210064  lwz r17,100(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x64, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R17
+	  38: INCEIPL       	$4
+
+	0xFF16FC4:  82410068  lwz r18,104(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x68, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R18
+	  43: INCEIPL       	$4
+
+	0xFF16FC8:  8261006C  lwz r19,108(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x6C, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R19
+	  48: INCEIPL       	$4
+
+	0xFF16FCC:  82810070  lwz r20,112(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x70, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R20
+	  53: INCEIPL       	$4
+
+	0xFF16FD0:  82A10074  lwz r21,116(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x74, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R21
+	  58: INCEIPL       	$4
+
+	0xFF16FD4:  82C10078  lwz r22,120(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x78, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R22
+	  63: INCEIPL       	$4
+
+	0xFF16FD8:  82E1007C  lwz r23,124(r1)
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x7C, t50
+	  66: LDL       	(t50), t52
+	  67: PUTL       	t52, R23
+	  68: INCEIPL       	$4
+
+	0xFF16FDC:  83010080  lwz r24,128(r1)
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x80, t54
+	  71: LDL       	(t54), t56
+	  72: PUTL       	t56, R24
+	  73: INCEIPL       	$4
+
+	0xFF16FE0:  83210084  lwz r25,132(r1)
+	  74: GETL       	R1, t58
+	  75: ADDL       	$0x84, t58
+	  76: LDL       	(t58), t60
+	  77: PUTL       	t60, R25
+	  78: INCEIPL       	$4
+
+	0xFF16FE4:  83410088  lwz r26,136(r1)
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0x88, t62
+	  81: LDL       	(t62), t64
+	  82: PUTL       	t64, R26
+	  83: INCEIPL       	$4
+
+	0xFF16FE8:  8361008C  lwz r27,140(r1)
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x8C, t66
+	  86: LDL       	(t66), t68
+	  87: PUTL       	t68, R27
+	  88: INCEIPL       	$4
+
+	0xFF16FEC:  83810090  lwz r28,144(r1)
+	  89: GETL       	R1, t70
+	  90: ADDL       	$0x90, t70
+	  91: LDL       	(t70), t72
+	  92: PUTL       	t72, R28
+	  93: INCEIPL       	$4
+
+	0xFF16FF0:  83A10094  lwz r29,148(r1)
+	  94: GETL       	R1, t74
+	  95: ADDL       	$0x94, t74
+	  96: LDL       	(t74), t76
+	  97: PUTL       	t76, R29
+	  98: INCEIPL       	$4
+
+	0xFF16FF4:  83C10098  lwz r30,152(r1)
+	  99: GETL       	R1, t78
+	 100: ADDL       	$0x98, t78
+	 101: LDL       	(t78), t80
+	 102: PUTL       	t80, R30
+	 103: INCEIPL       	$4
+
+	0xFF16FF8:  83E1009C  lwz r31,156(r1)
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x9C, t82
+	 106: LDL       	(t82), t84
+	 107: PUTL       	t84, R31
+	 108: INCEIPL       	$4
+
+	0xFF16FFC:  382100A0  addi r1,r1,160
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0xA0, t86
+	 111: PUTL       	t86, R1
+	 112: INCEIPL       	$4
+
+	0xFF17000:  4E800020  blr
+	 113: GETL       	LR, t88
+	 114: JMPo-r       	t88  ($4)
+
+
+
+. 2165 FF16FA0 100
+. 80 01 00 A4 7C A3 2B 78 81 41 00 54 81 C1 00 58 7C 08 03 A6 81 E1 00 5C 7D 40 81 20 82 01 00 60 82 21 00 64 82 41 00 68 82 61 00 6C 82 81 00 70 82 A1 00 74 82 C1 00 78 82 E1 00 7C 83 01 00 80 83 21 00 84 83 41 00 88 83 61 00 8C 83 81 00 90 83 A1 00 94 83 C1 00 98 83 E1 00 9C 38 21 00 A0 4E 80 00 20
+==== BB 2166 (0xFF17F4C) approx BBs exec'd 0 ====
+
+	0xFF17F4C:  809D0000  lwz r4,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFF17F50:  80010024  lwz r0,36(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0xFF17F54:  811D0008  lwz r8,8(r29)
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0x8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R8
+	  13: INCEIPL       	$4
+
+	0xFF17F58:  815D000C  lwz r10,12(r29)
+	  14: GETL       	R29, t12
+	  15: ADDL       	$0xC, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R10
+	  18: INCEIPL       	$4
+
+	0xFF17F5C:  7C0803A6  mtlr r0
+	  19: GETL       	R0, t16
+	  20: PUTL       	t16, LR
+	  21: INCEIPL       	$4
+
+	0xFF17F60:  813E1B34  lwz r9,6964(r30)
+	  22: GETL       	R30, t18
+	  23: ADDL       	$0x1B34, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R9
+	  26: INCEIPL       	$4
+
+	0xFF17F64:  817E1D28  lwz r11,7464(r30)
+	  27: GETL       	R30, t22
+	  28: ADDL       	$0x1D28, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R11
+	  31: INCEIPL       	$4
+
+	0xFF17F68:  909C0000  stw r4,0(r28)
+	  32: GETL       	R4, t26
+	  33: GETL       	R28, t28
+	  34: STL       	t26, (t28)
+	  35: INCEIPL       	$4
+
+	0xFF17F6C:  83A10014  lwz r29,20(r1)
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x14, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R29
+	  40: INCEIPL       	$4
+
+	0xFF17F70:  83810010  lwz r28,16(r1)
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x10, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R28
+	  45: INCEIPL       	$4
+
+	0xFF17F74:  83C10018  lwz r30,24(r1)
+	  46: GETL       	R1, t38
+	  47: ADDL       	$0x18, t38
+	  48: LDL       	(t38), t40
+	  49: PUTL       	t40, R30
+	  50: INCEIPL       	$4
+
+	0xFF17F78:  38210020  addi r1,r1,32
+	  51: GETL       	R1, t42
+	  52: ADDL       	$0x20, t42
+	  53: PUTL       	t42, R1
+	  54: INCEIPL       	$4
+
+	0xFF17F7C:  91490000  stw r10,0(r9)
+	  55: GETL       	R10, t44
+	  56: GETL       	R9, t46
+	  57: STL       	t44, (t46)
+	  58: INCEIPL       	$4
+
+	0xFF17F80:  910B0000  stw r8,0(r11)
+	  59: GETL       	R8, t48
+	  60: GETL       	R11, t50
+	  61: STL       	t48, (t50)
+	  62: INCEIPL       	$4
+
+	0xFF17F84:  4E800020  blr
+	  63: GETL       	LR, t52
+	  64: JMPo-r       	t52  ($4)
+
+
+
+. 2166 FF17F4C 60
+. 80 9D 00 00 80 01 00 24 81 1D 00 08 81 5D 00 0C 7C 08 03 A6 81 3E 1B 34 81 7E 1D 28 90 9C 00 00 83 A1 00 14 83 81 00 10 83 C1 00 18 38 21 00 20 91 49 00 00 91 0B 00 00 4E 80 00 20
+==== BB 2167 (0xFF17FC8) approx BBs exec'd 0 ====
+
+	0xFF17FC8:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFF17FCC:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFF17FD0:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFF17FD4:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2167 FF17FC8 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 2168 (0x10001658) approx BBs exec'd 0 ====
+
+	0x10001658:  2F83FFFF  cmpi cr7,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x1000165C:  419E004C  bc 12,30,0x100016A8
+	   5: Js30o       	$0x100016A8
+
+
+
+. 2168 10001658 8
+. 2F 83 FF FF 41 9E 00 4C
+==== BB 2169 (0x100016A8) approx BBs exec'd 0 ====
+
+	0x100016A8:  3135FFFF  addic r9,r21,-1
+	   0: GETL       	R21, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x100016AC:  7C09A910  subfe r0,r9,r21
+	   4: GETL       	R9, t2
+	   5: GETL       	R21, t4
+	   6: SBBL       	t2, t4  (-rCa-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x100016B0:  837E0208  lwz r27,520(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x208, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x100016B4:  3173FFFF  addic r11,r19,-1
+	  14: GETL       	R19, t10
+	  15: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  16: PUTL       	t10, R11
+	  17: INCEIPL       	$4
+
+	0x100016B8:  7C000194  addze r0,r0
+	  18: GETL       	R0, t12
+	  19: ADCL       	$0x0, t12  (-rCa-wCa)
+	  20: PUTL       	t12, R0
+	  21: INCEIPL       	$4
+
+	0x100016BC:  3134FFFF  addic r9,r20,-1
+	  22: GETL       	R20, t14
+	  23: ADCL       	$0xFFFFFFFF, t14  (-wCa)
+	  24: PUTL       	t14, R9
+	  25: INCEIPL       	$4
+
+	0x100016C0:  7F800194  addze r28,r0
+	  26: GETL       	R0, t16
+	  27: ADCL       	$0x0, t16  (-rCa-wCa)
+	  28: PUTL       	t16, R28
+	  29: INCEIPL       	$4
+
+	0x100016C4:  801B0000  lwz r0,0(r27)
+	  30: GETL       	R27, t18
+	  31: LDL       	(t18), t20
+	  32: PUTL       	t20, R0
+	  33: INCEIPL       	$4
+
+	0x100016C8:  2F9C0001  cmpi cr7,r28,1
+	  34: GETL       	R28, t22
+	  35: MOVL       	$0x1, t26
+	  36: CMPL       	t22, t26, t24  (-rSo)
+	  37: ICRFL       	t24, $0x7, CR
+	  38: INCEIPL       	$4
+
+	0x100016CC:  7FA0E850  subf r29,r0,r29
+	  39: GETL       	R0, t28
+	  40: GETL       	R29, t30
+	  41: SUBL       	t28, t30
+	  42: PUTL       	t30, R29
+	  43: INCEIPL       	$4
+
+	0x100016D0:  419D03E8  bc 12,29,0x10001AB8
+	  44: Js29o       	$0x10001AB8
+
+
+
+. 2169 100016A8 44
+. 31 35 FF FF 7C 09 A9 10 83 7E 02 08 31 73 FF FF 7C 00 01 94 31 34 FF FF 7F 80 01 94 80 1B 00 00 2F 9C 00 01 7F A0 E8 50 41 9D 03 E8
+==== BB 2170 (0x100016D4) approx BBs exec'd 0 ====
+
+	0x100016D4:  313CFFFF  addic r9,r28,-1
+	   0: GETL       	R28, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x100016D8:  7C09E110  subfe r0,r9,r28
+	   4: GETL       	R9, t2
+	   5: GETL       	R28, t4
+	   6: SBBL       	t2, t4  (-rCa-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x100016DC:  7ECB0039  and. r11,r22,r0
+	   9: GETL       	R22, t6
+	  10: GETL       	R0, t8
+	  11: ANDL       	t6, t8
+	  12: PUTL       	t8, R11
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x100016E0:  408203AC  bc 4,2,0x10001A8C
+	  16: Jc02o       	$0x10001A8C
+
+
+
+. 2170 100016D4 16
+. 31 3C FF FF 7C 09 E1 10 7E CB 00 39 40 82 03 AC
+==== BB 2171 (0x100016E4) approx BBs exec'd 0 ====
+
+	0x100016E4:  2E1D0001  cmpi cr4,r29,1
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x100016E8:  41910354  bc 12,17,0x10001A3C
+	   5: Js17o       	$0x10001A3C
+
+
+
+. 2171 100016E4 8
+. 2E 1D 00 01 41 91 03 54
+==== BB 2172 (0x100016EC) approx BBs exec'd 0 ====
+
+	0x100016EC:  7EDCE378  or r28,r22,r28
+	   0: GETL       	R22, t0
+	   1: GETL       	R28, t2
+	   2: ORL       	t2, t0
+	   3: PUTL       	t0, R28
+	   4: INCEIPL       	$4
+
+	0x100016F0:  7C000026  mfcr r0
+	   5: GETL       	CR, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x100016F4:  54009FFE  rlwinm r0,r0,19,31,31
+	   8: GETL       	R0, t6
+	   9: ROLL       	$0x13, t6
+	  10: ANDL       	$0x1, t6
+	  11: PUTL       	t6, R0
+	  12: INCEIPL       	$4
+
+	0x100016F8:  317CFFFF  addic r11,r28,-1
+	  13: GETL       	R28, t8
+	  14: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  15: PUTL       	t8, R11
+	  16: INCEIPL       	$4
+
+	0x100016FC:  7D2BE110  subfe r9,r11,r28
+	  17: GETL       	R11, t10
+	  18: GETL       	R28, t12
+	  19: SBBL       	t10, t12  (-rCa-wCa)
+	  20: PUTL       	t12, R9
+	  21: INCEIPL       	$4
+
+	0x10001700:  7D2B0039  and. r11,r9,r0
+	  22: GETL       	R9, t14
+	  23: GETL       	R0, t16
+	  24: ANDL       	t14, t16
+	  25: PUTL       	t16, R11
+	  26: CMP0L       	t16, t18  (-rSo)
+	  27: ICRFL       	t18, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x10001704:  41820054  bc 12,2,0x10001758
+	  29: Js02o       	$0x10001758
+
+
+
+. 2172 100016EC 28
+. 7E DC E3 78 7C 00 00 26 54 00 9F FE 31 7C FF FF 7D 2B E1 10 7D 2B 00 39 41 82 00 54
+==== BB 2173 (0x10001758) approx BBs exec'd 0 ====
+
+	0x10001758:  813E000C  lwz r9,12(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x1000175C:  2D960000  cmpi cr3,r22,0
+	   5: GETL       	R22, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x3, CR
+	   8: INCEIPL       	$4
+
+	0x10001760:  80090000  lwz r0,0(r9)
+	   9: GETL       	R9, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R0
+	  12: INCEIPL       	$4
+
+	0x10001764:  3120FFFF  addic r9,r0,-1
+	  13: GETL       	R0, t12
+	  14: ADCL       	$0xFFFFFFFF, t12  (-wCa)
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0x10001768:  7D690110  subfe r11,r9,r0
+	  17: GETL       	R9, t14
+	  18: GETL       	R0, t16
+	  19: SBBL       	t14, t16  (-rCa-wCa)
+	  20: PUTL       	t16, R11
+	  21: INCEIPL       	$4
+
+	0x1000176C:  7D60B079  andc. r0,r11,r22
+	  22: GETL       	R11, t18
+	  23: GETL       	R22, t20
+	  24: NOTL       	t20
+	  25: ANDL       	t18, t20
+	  26: PUTL       	t20, R0
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0x10001770:  41820034  bc 12,2,0x100017A4
+	  30: Js02o       	$0x100017A4
+
+
+
+. 2173 10001758 28
+. 81 3E 00 0C 2D 96 00 00 80 09 00 00 31 20 FF FF 7D 69 01 10 7D 60 B0 79 41 82 00 34
+==== BB 2174 (0x100017A4) approx BBs exec'd 0 ====
+
+	0x100017A4:  418E0008  bc 12,14,0x100017AC
+	   0: Js14o       	$0x100017AC
+
+
+
+. 2174 100017A4 4
+. 41 8E 00 08
+==== BB 2175 (0x100017AC) approx BBs exec'd 0 ====
+
+	0x100017AC:  2F950000  cmpi cr7,r21,0
+	   0: GETL       	R21, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x100017B0:  419E036C  bc 12,30,0x10001B1C
+	   4: Js30o       	$0x10001B1C
+
+
+
+. 2175 100017AC 8
+. 2F 95 00 00 41 9E 03 6C
+==== BB 2176 (0x10001B1C) approx BBs exec'd 0 ====
+
+	0x10001B1C:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10001B20:  3BA00001  li r29,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0x10001B24:  3B800000  li r28,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x10001B28:  409E0078  bc 4,30,0x10001BA0
+	  10: Jc30o       	$0x10001BA0
+
+
+
+. 2176 10001B1C 16
+. 2F 9C 00 00 3B A0 00 01 3B 80 00 00 40 9E 00 78
+==== BB 2177 (0x10001B2C) approx BBs exec'd 0 ====
+
+	0x10001B2C:  4092015C  bc 4,18,0x10001C88
+	   0: Jc18o       	$0x10001C88
+
+
+
+. 2177 10001B2C 4
+. 40 92 01 5C
+==== BB 2178 (0x10001C88) approx BBs exec'd 0 ====
+
+	0x10001C88:  809E00D8  lwz r4,216(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xD8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x10001C8C:  38A00005  li r5,5
+	   5: MOVL       	$0x5, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x10001C90:  38600000  li r3,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x10001C94:  48018F01  bl 0x1001AB94
+	  11: MOVL       	$0x10001C98, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x1001AB94  ($4)
+
+
+
+. 2178 10001C88 16
+. 80 9E 00 D8 38 A0 00 05 38 60 00 00 48 01 8F 01
+==== BB 2179 (0x1001AB94) approx BBs exec'd 0 ====
+
+	0x1001AB94:  39600030  li r11,48
+	   0: MOVL       	$0x30, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AB98:  4BFFFF74  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2179 1001AB94 8
+. 39 60 00 30 4B FF FF 74
+==== BB 2180 __dcgettext_internal(0xFE8A480) approx BBs exec'd 0 ====
+
+	0xFE8A480:  7CC802A6  mflr r6
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFE8A484:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE8A488:  7CA82B78  or r8,r5,r5
+	   9: GETL       	R5, t6
+	  10: PUTL       	t6, R8
+	  11: INCEIPL       	$4
+
+	0xFE8A48C:  38E00000  li r7,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R7
+	  14: INCEIPL       	$4
+
+	0xFE8A490:  38A00000  li r5,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R5
+	  17: INCEIPL       	$4
+
+	0xFE8A494:  90C10014  stw r6,20(r1)
+	  18: GETL       	R6, t12
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x14, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0xFE8A498:  38C00000  li r6,0
+	  23: MOVL       	$0x0, t16
+	  24: PUTL       	t16, R6
+	  25: INCEIPL       	$4
+
+	0xFE8A49C:  480009E1  bl 0xFE8AE7C
+	  26: MOVL       	$0xFE8A4A0, t18
+	  27: PUTL       	t18, LR
+	  28: JMPo-c       	$0xFE8AE7C  ($4)
+
+
+
+. 2180 FE8A480 32
+. 7C C8 02 A6 94 21 FF F0 7C A8 2B 78 38 E0 00 00 38 A0 00 00 90 C1 00 14 38 C0 00 00 48 00 09 E1
+==== BB 2181 __dcigettext(0xFE8AE7C) approx BBs exec'd 0 ====
+
+	0xFE8AE7C:  9421FF90  stwu r1,-112(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF90, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8AE80:  7D800026  mfcr r12
+	   6: GETL       	CR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFE8AE84:  92C10048  stw r22,72(r1)
+	   9: GETL       	R22, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x48, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE8AE88:  7C962379  or. r22,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R22
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFE8AE8C:  7C0802A6  mflr r0
+	  19: GETL       	LR, t14
+	  20: PUTL       	t14, R0
+	  21: INCEIPL       	$4
+
+	0xFE8AE90:  93E1006C  stw r31,108(r1)
+	  22: GETL       	R31, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x6C, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFE8AE94:  4811CFBD  bl 0xFFA7E50
+	  27: MOVL       	$0xFE8AE98, t20
+	  28: PUTL       	t20, LR
+	  29: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2181 FE8AE7C 28
+. 94 21 FF 90 7D 80 00 26 92 C1 00 48 7C 96 23 79 7C 08 02 A6 93 E1 00 6C 48 11 CF BD
+==== BB 2182 (0xFE8AE98) approx BBs exec'd 0 ====
+
+	0xFE8AE98:  7C3F0B78  or r31,r1,r1
+	   0: GETL       	R1, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFE8AE9C:  91C10028  stw r14,40(r1)
+	   3: GETL       	R14, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x28, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFE8AEA0:  7CAE2B78  or r14,r5,r5
+	   8: GETL       	R5, t6
+	   9: PUTL       	t6, R14
+	  10: INCEIPL       	$4
+
+	0xFE8AEA4:  92410038  stw r18,56(r1)
+	  11: GETL       	R18, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x38, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFE8AEA8:  7D124378  or r18,r8,r8
+	  16: GETL       	R8, t12
+	  17: PUTL       	t12, R18
+	  18: INCEIPL       	$4
+
+	0xFE8AEAC:  92A10044  stw r21,68(r1)
+	  19: GETL       	R21, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x44, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFE8AEB0:  7C751B78  or r21,r3,r3
+	  24: GETL       	R3, t18
+	  25: PUTL       	t18, R21
+	  26: INCEIPL       	$4
+
+	0xFE8AEB4:  93410058  stw r26,88(r1)
+	  27: GETL       	R26, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x58, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFE8AEB8:  38600000  li r3,0
+	  32: MOVL       	$0x0, t24
+	  33: PUTL       	t24, R3
+	  34: INCEIPL       	$4
+
+	0xFE8AEBC:  93C10068  stw r30,104(r1)
+	  35: GETL       	R30, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x68, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFE8AEC0:  7CDA3378  or r26,r6,r6
+	  40: GETL       	R6, t30
+	  41: PUTL       	t30, R26
+	  42: INCEIPL       	$4
+
+	0xFE8AEC4:  91E1002C  stw r15,44(r1)
+	  43: GETL       	R15, t32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x2C, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0xFE8AEC8:  7FC802A6  mflr r30
+	  48: GETL       	LR, t36
+	  49: PUTL       	t36, R30
+	  50: INCEIPL       	$4
+
+	0xFE8AECC:  92010030  stw r16,48(r1)
+	  51: GETL       	R16, t38
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x30, t40
+	  54: STL       	t38, (t40)
+	  55: INCEIPL       	$4
+
+	0xFE8AED0:  92210034  stw r17,52(r1)
+	  56: GETL       	R17, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x34, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0xFE8AED4:  9261003C  stw r19,60(r1)
+	  61: GETL       	R19, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x3C, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0xFE8AED8:  92810040  stw r20,64(r1)
+	  66: GETL       	R20, t50
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x40, t52
+	  69: STL       	t50, (t52)
+	  70: INCEIPL       	$4
+
+	0xFE8AEDC:  92E1004C  stw r23,76(r1)
+	  71: GETL       	R23, t54
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x4C, t56
+	  74: STL       	t54, (t56)
+	  75: INCEIPL       	$4
+
+	0xFE8AEE0:  93010050  stw r24,80(r1)
+	  76: GETL       	R24, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x50, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0xFE8AEE4:  93210054  stw r25,84(r1)
+	  81: GETL       	R25, t62
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x54, t64
+	  84: STL       	t62, (t64)
+	  85: INCEIPL       	$4
+
+	0xFE8AEE8:  9361005C  stw r27,92(r1)
+	  86: GETL       	R27, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x5C, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0xFE8AEEC:  93810060  stw r28,96(r1)
+	  91: GETL       	R28, t70
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x60, t72
+	  94: STL       	t70, (t72)
+	  95: INCEIPL       	$4
+
+	0xFE8AEF0:  93A10064  stw r29,100(r1)
+	  96: GETL       	R29, t74
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x64, t76
+	  99: STL       	t74, (t76)
+	 100: INCEIPL       	$4
+
+	0xFE8AEF4:  90010074  stw r0,116(r1)
+	 101: GETL       	R0, t78
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x74, t80
+	 104: STL       	t78, (t80)
+	 105: INCEIPL       	$4
+
+	0xFE8AEF8:  91810024  stw r12,36(r1)
+	 106: GETL       	R12, t82
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x24, t84
+	 109: STL       	t82, (t84)
+	 110: INCEIPL       	$4
+
+	0xFE8AEFC:  90FF000C  stw r7,12(r31)
+	 111: GETL       	R7, t86
+	 112: GETL       	R31, t88
+	 113: ADDL       	$0xC, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0xFE8AF00:  4182034C  bc 12,2,0xFE8B24C
+	 116: Js02o       	$0xFE8B24C
+
+
+
+. 2182 FE8AE98 108
+. 7C 3F 0B 78 91 C1 00 28 7C AE 2B 78 92 41 00 38 7D 12 43 78 92 A1 00 44 7C 75 1B 78 93 41 00 58 38 60 00 00 93 C1 00 68 7C DA 33 78 91 E1 00 2C 7F C8 02 A6 92 01 00 30 92 21 00 34 92 61 00 3C 92 81 00 40 92 E1 00 4C 93 01 00 50 93 21 00 54 93 61 00 5C 93 81 00 60 93 A1 00 64 90 01 00 74 91 81 00 24 90 FF 00 0C 41 82 03 4C
+==== BB 2183 (0xFE8AF04) approx BBs exec'd 0 ====
+
+	0xFE8AF04:  69030006  xori r3,r8,0x6
+	   0: GETL       	R8, t0
+	   1: XORL       	$0x6, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE8AF08:  21230000  subfic r9,r3,0
+	   4: GETL       	R3, t2
+	   5: MOVL       	$0x0, t4
+	   6: SBBL       	t2, t4  (-wCa)
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xFE8AF0C:  7C691914  adde r3,r9,r3
+	   9: GETL       	R9, t6
+	  10: GETL       	R3, t8
+	  11: ADCL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFE8AF10:  2128000C  subfic r9,r8,12
+	  14: GETL       	R8, t10
+	  15: MOVL       	$0xC, t12
+	  16: SBBL       	t10, t12  (-wCa)
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFE8AF14:  7D294910  subfe r9,r9,r9
+	  19: GETL       	R9, t14
+	  20: GETL       	R9, t16
+	  21: SBBL       	t14, t16  (-rCa-wCa)
+	  22: PUTL       	t16, R9
+	  23: INCEIPL       	$4
+
+	0xFE8AF18:  7D2900D0  neg r9,r9
+	  24: GETL       	R9, t18
+	  25: NEGL       	t18
+	  26: PUTL       	t18, R9
+	  27: INCEIPL       	$4
+
+	0xFE8AF1C:  7D2B1B79  or. r11,r9,r3
+	  28: GETL       	R9, t20
+	  29: GETL       	R3, t22
+	  30: ORL       	t22, t20
+	  31: PUTL       	t20, R11
+	  32: CMP0L       	t20, t24  (-rSo)
+	  33: ICRFL       	t24, $0x0, CR
+	  34: INCEIPL       	$4
+
+	0xFE8AF20:  40820314  bc 4,2,0xFE8B234
+	  35: Jc02o       	$0xFE8B234
+
+
+
+. 2183 FE8AF04 32
+. 69 03 00 06 21 23 00 00 7C 69 19 14 21 28 00 0C 7D 29 49 10 7D 29 00 D0 7D 2B 1B 79 40 82 03 14
+==== BB 2184 (0xFE8AF24) approx BBs exec'd 0 ====
+
+	0xFE8AF24:  81FE1B48  lwz r15,6984(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0xFE8AF28:  800F00A0  lwz r0,160(r15)
+	   5: GETL       	R15, t4
+	   6: ADDL       	$0xA0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFE8AF2C:  2C800000  cmpi cr1,r0,0
+	  10: GETL       	R0, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0xFE8AF30:  408603FC  bc 4,6,0xFE8B32C
+	  14: Jc06o       	$0xFE8B32C
+
+
+
+. 2184 FE8AF24 16
+. 81 FE 1B 48 80 0F 00 A0 2C 80 00 00 40 86 03 FC
+==== BB 2185 (0xFE8AF34) approx BBs exec'd 0 ====
+
+	0xFE8AF34:  2D950000  cmpi cr3,r21,0
+	   0: GETL       	R21, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x3, CR
+	   3: INCEIPL       	$4
+
+	0xFE8AF38:  418E0424  bc 12,14,0xFE8B35C
+	   4: Js14o       	$0xFE8B35C
+
+
+
+. 2185 FE8AF34 8
+. 2D 95 00 00 41 8E 04 24
+==== BB 2186 (0xFE8B35C) approx BBs exec'd 0 ====
+
+	0xFE8B35C:  809E1B14  lwz r4,6932(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8B360:  82A40000  lwz r21,0(r4)
+	   5: GETL       	R4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R21
+	   8: INCEIPL       	$4
+
+	0xFE8B364:  4BFFFBD8  b 0xFE8AF3C
+	   9: JMPo       	$0xFE8AF3C  ($4)
+
+
+
+. 2186 FE8B35C 12
+. 80 9E 1B 14 82 A4 00 00 4B FF FB D8
+==== BB 2187 (0xFE8AF3C) approx BBs exec'd 0 ====
+
+	0xFE8AF3C:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8AF40:  4804BB99  bl 0xFED6AD8
+	   3: MOVL       	$0xFE8AF44, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2187 FE8AF3C 8
+. 7E C3 B3 78 48 04 BB 99
+==== BB 2188 (0xFE8AF44) approx BBs exec'd 0 ====
+
+	0xFE8AF44:  81610000  lwz r11,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFE8AF48:  39430037  addi r10,r3,55
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x37, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFE8AF4C:  38A30001  addi r5,r3,1
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R5
+	  11: INCEIPL       	$4
+
+	0xFE8AF50:  55480036  rlwinm r8,r10,0,0,27
+	  12: GETL       	R10, t8
+	  13: ANDL       	$0xFFFFFFF0, t8
+	  14: PUTL       	t8, R8
+	  15: INCEIPL       	$4
+
+	0xFE8AF54:  90BF0014  stw r5,20(r31)
+	  16: GETL       	R5, t10
+	  17: GETL       	R31, t12
+	  18: ADDL       	$0x14, t12
+	  19: STL       	t10, (t12)
+	  20: INCEIPL       	$4
+
+	0xFE8AF58:  7CE800D0  neg r7,r8
+	  21: GETL       	R8, t14
+	  22: NEGL       	t14
+	  23: PUTL       	t14, R7
+	  24: INCEIPL       	$4
+
+	0xFE8AF5C:  80BF0014  lwz r5,20(r31)
+	  25: GETL       	R31, t16
+	  26: ADDL       	$0x14, t16
+	  27: LDL       	(t16), t18
+	  28: PUTL       	t18, R5
+	  29: INCEIPL       	$4
+
+	0xFE8AF60:  7D61396E  stwux r11,r1,r7
+	  30: GETL       	R7, t20
+	  31: GETL       	R1, t22
+	  32: ADDL       	t22, t20
+	  33: PUTL       	t20, R1
+	  34: GETL       	R11, t24
+	  35: STL       	t24, (t20)
+	  36: INCEIPL       	$4
+
+	0xFE8AF64:  7EC4B378  or r4,r22,r22
+	  37: GETL       	R22, t26
+	  38: PUTL       	t26, R4
+	  39: INCEIPL       	$4
+
+	0xFE8AF68:  38C10017  addi r6,r1,23
+	  40: GETL       	R1, t28
+	  41: ADDL       	$0x17, t28
+	  42: PUTL       	t28, R6
+	  43: INCEIPL       	$4
+
+	0xFE8AF6C:  54DD0036  rlwinm r29,r6,0,0,27
+	  44: GETL       	R6, t30
+	  45: ANDL       	$0xFFFFFFF0, t30
+	  46: PUTL       	t30, R29
+	  47: INCEIPL       	$4
+
+	0xFE8AF70:  387D0018  addi r3,r29,24
+	  48: GETL       	R29, t32
+	  49: ADDL       	$0x18, t32
+	  50: PUTL       	t32, R3
+	  51: INCEIPL       	$4
+
+	0xFE8AF74:  4804D765  bl 0xFED86D8
+	  52: MOVL       	$0xFE8AF78, t34
+	  53: PUTL       	t34, LR
+	  54: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2188 FE8AF44 52
+. 81 61 00 00 39 43 00 37 38 A3 00 01 55 48 00 36 90 BF 00 14 7C E8 00 D0 80 BF 00 14 7D 61 39 6E 7E C4 B3 78 38 C1 00 17 54 DD 00 36 38 7D 00 18 48 04 D7 65
+==== BB 2189 (0xFE8AF78) approx BBs exec'd 0 ====
+
+	0xFE8AF78:  800F00A0  lwz r0,160(r15)
+	   0: GETL       	R15, t0
+	   1: ADDL       	$0xA0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8AF7C:  92BD0000  stw r21,0(r29)
+	   5: GETL       	R21, t4
+	   6: GETL       	R29, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE8AF80:  2E000000  cmpi cr4,r0,0
+	   9: GETL       	R0, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0xFE8AF84:  925D0004  stw r18,4(r29)
+	  13: GETL       	R18, t12
+	  14: GETL       	R29, t14
+	  15: ADDL       	$0x4, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0xFE8AF88:  40920394  bc 4,18,0xFE8B31C
+	  18: Jc18o       	$0xFE8B31C
+
+
+
+. 2189 FE8AF78 20
+. 80 0F 00 A0 92 BD 00 00 2E 00 00 00 92 5D 00 04 40 92 03 94
+==== BB 2190 (0xFE8AF8C) approx BBs exec'd 0 ====
+
+	0xFE8AF8C:  809E018C  lwz r4,396(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x18C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8AF90:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE8AF94:  80BE0174  lwz r5,372(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x174, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0xFE8AF98:  480A4831  bl 0xFF2F7C8
+	  13: MOVL       	$0xFE8AF9C, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0xFF2F7C8  ($4)
+
+
+
+. 2190 FE8AF8C 16
+. 80 9E 01 8C 7F A3 EB 78 80 BE 01 74 48 0A 48 31
+==== BB 2191 tfind(0xFF2F7C8) approx BBs exec'd 0 ====
+
+	0xFF2F7C8:  2C040000  cmpi cr0,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFF2F7CC:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFF2F7D0:  9421FFE0  stwu r1,-32(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFE0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF2F7D4:  93810010  stw r28,16(r1)
+	  13: GETL       	R28, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF2F7D8:  7C7C1B78  or r28,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFF2F7DC:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFF2F7E0:  38600000  li r3,0
+	  26: MOVL       	$0x0, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0xFF2F7E4:  93C10018  stw r30,24(r1)
+	  29: GETL       	R30, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x18, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFF2F7E8:  7CBD2B78  or r29,r5,r5
+	  34: GETL       	R5, t26
+	  35: PUTL       	t26, R29
+	  36: INCEIPL       	$4
+
+	0xFF2F7EC:  93E1001C  stw r31,28(r1)
+	  37: GETL       	R31, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFF2F7F0:  90010024  stw r0,36(r1)
+	  42: GETL       	R0, t32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x24, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0xFF2F7F4:  41820048  bc 12,2,0xFF2F83C
+	  47: Js02o       	$0xFF2F83C
+
+
+
+. 2191 FF2F7C8 48
+. 2C 04 00 00 7C 08 02 A6 94 21 FF E0 93 81 00 10 7C 7C 1B 78 93 A1 00 14 38 60 00 00 93 C1 00 18 7C BD 2B 78 93 E1 00 1C 90 01 00 24 41 82 00 48
+==== BB 2192 (0xFF2F7F8) approx BBs exec'd 0 ====
+
+	0xFF2F7F8:  80640000  lwz r3,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFF2F7FC:  2F830000  cmpi cr7,r3,0
+	   4: GETL       	R3, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFF2F800:  419E0038  bc 12,30,0xFF2F838
+	   8: Js30o       	$0xFF2F838
+
+
+
+. 2192 FF2F7F8 12
+. 80 64 00 00 2F 83 00 00 41 9E 00 38
+==== BB 2193 (0xFF2F838) approx BBs exec'd 0 ====
+
+	0xFF2F838:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFF2F83C:  80810024  lwz r4,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFF2F840:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xFF2F844:  83A10014  lwz r29,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFF2F848:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFF2F84C:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0xFF2F850:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFF2F854:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFF2F858:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+
+. 2193 FF2F838 36
+. 38 60 00 00 80 81 00 24 83 81 00 10 83 A1 00 14 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2194 (0xFE8AF9C) approx BBs exec'd 0 ====
+
+	0xFE8AF9C:  800F00A8  lwz r0,168(r15)
+	   0: GETL       	R15, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8AFA0:  7C701B78  or r16,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R16
+	   7: INCEIPL       	$4
+
+	0xFE8AFA4:  2F000000  cmpi cr6,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFE8AFA8:  409A0364  bc 4,26,0xFE8B30C
+	  12: Jc26o       	$0xFE8B30C
+
+
+
+. 2194 FE8AF9C 16
+. 80 0F 00 A8 7C 70 1B 78 2F 00 00 00 40 9A 03 64
+==== BB 2195 (0xFE8AFAC) approx BBs exec'd 0 ====
+
+	0xFE8AFAC:  2D900000  cmpi cr3,r16,0
+	   0: GETL       	R16, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x3, CR
+	   3: INCEIPL       	$4
+
+	0xFE8AFB0:  418E001C  bc 12,14,0xFE8AFCC
+	   4: Js14o       	$0xFE8AFCC
+
+
+
+. 2195 FE8AFAC 8
+. 2D 90 00 00 41 8E 00 1C
+==== BB 2196 (0xFE8AFCC) approx BBs exec'd 0 ====
+
+	0xFE8AFCC:  839E1AC8  lwz r28,6856(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1AC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFE8AFD0:  831E1C4C  lwz r24,7244(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1C4C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0xFE8AFD4:  7F781214  add r27,r24,r2
+	  10: GETL       	R24, t8
+	  11: GETL       	R2, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0xFE8AFD8:  82FC0000  lwz r23,0(r28)
+	  15: GETL       	R28, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R23
+	  18: INCEIPL       	$4
+
+	0xFE8AFDC:  833B0000  lwz r25,0(r27)
+	  19: GETL       	R27, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R25
+	  22: INCEIPL       	$4
+
+	0xFE8AFE0:  2E170000  cmpi cr4,r23,0
+	  23: GETL       	R23, t20
+	  24: CMP0L       	t20, t22  (-rSo)
+	  25: ICRFL       	t22, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0xFE8AFE4:  933F0010  stw r25,16(r31)
+	  27: GETL       	R25, t24
+	  28: GETL       	R31, t26
+	  29: ADDL       	$0x10, t26
+	  30: STL       	t24, (t26)
+	  31: INCEIPL       	$4
+
+	0xFE8AFE8:  41920028  bc 12,18,0xFE8B010
+	  32: Js18o       	$0xFE8B010
+
+
+
+. 2196 FE8AFCC 32
+. 83 9E 1A C8 83 1E 1C 4C 7F 78 12 14 82 FC 00 00 83 3B 00 00 2E 17 00 00 93 3F 00 10 41 92 00 28
+==== BB 2197 (0xFE8AFEC) approx BBs exec'd 0 ====
+
+	0xFE8AFEC:  38970010  addi r4,r23,16
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFE8AFF0:  7EA3AB78  or r3,r21,r21
+	   4: GETL       	R21, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0xFE8AFF4:  4804B3BD  bl 0xFED63B0
+	   7: MOVL       	$0xFE8AFF8, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2197 FE8AFEC 12
+. 38 97 00 10 7E A3 AB 78 48 04 B3 BD
+==== BB 2198 (0xFE8AFF8) approx BBs exec'd 0 ====
+
+	0xFE8AFF8:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8AFFC:  419E036C  bc 12,30,0xFE8B368
+	   4: Js30o       	$0xFE8B368
+
+
+
+. 2198 FE8AFF8 8
+. 2F 83 00 00 41 9E 03 6C
+==== BB 2199 (0xFE8B368) approx BBs exec'd 0 ====
+
+	0xFE8B368:  41B2FCA8  bc 13,18,0xFE8B010
+	   0: Js18o       	$0xFE8B010
+
+
+
+. 2199 FE8B368 4
+. 41 B2 FC A8
+==== BB 2200 (0xFE8B36C) approx BBs exec'd 0 ====
+
+	0xFE8B36C:  80770004  lwz r3,4(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8B370:  89830000  lbz r12,0(r3)
+	   5: GETL       	R3, t4
+	   6: LDB       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0xFE8B374:  7C731B78  or r19,r3,r3
+	   9: GETL       	R3, t8
+	  10: PUTL       	t8, R19
+	  11: INCEIPL       	$4
+
+	0xFE8B378:  2F0C002F  cmpi cr6,r12,47
+	  12: GETL       	R12, t10
+	  13: MOVL       	$0x2F, t14
+	  14: CMPL       	t10, t14, t12  (-rSo)
+	  15: ICRFL       	t12, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0xFE8B37C:  41BAFC98  bc 13,26,0xFE8B014
+	  17: Js26o       	$0xFE8B014
+
+
+
+. 2200 FE8B36C 20
+. 80 77 00 04 89 83 00 00 7C 73 1B 78 2F 0C 00 2F 41 BA FC 98
+==== BB 2201 (0xFE8B014) approx BBs exec'd 0 ====
+
+	0xFE8B014:  807E0194  lwz r3,404(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x194, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8B018:  5654103A  rlwinm r20,r18,2,0,29
+	   5: GETL       	R18, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R20
+	   8: INCEIPL       	$4
+
+	0xFE8B01C:  823E1DC4  lwz r17,7620(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x1DC4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R17
+	  13: INCEIPL       	$4
+
+	0xFE8B020:  7F94882E  lwzx r28,r20,r17
+	  14: GETL       	R17, t10
+	  15: GETL       	R20, t12
+	  16: ADDL       	t12, t10
+	  17: LDL       	(t10), t14
+	  18: PUTL       	t14, R28
+	  19: INCEIPL       	$4
+
+	0xFE8B024:  48008175  bl 0xFE93198
+	  20: MOVL       	$0xFE8B028, t16
+	  21: PUTL       	t16, LR
+	  22: JMPo-c       	$0xFE93198  ($4)
+
+
+
+. 2201 FE8B014 20
+. 80 7E 01 94 56 54 10 3A 82 3E 1D C4 7F 94 88 2E 48 00 81 75
+==== BB 2202 (0xFED6E98) approx BBs exec'd 0 ====
+
+	0xFED6E98:  2C860000  cmpi cr1,r6,0
+	   0: GETL       	R6, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED6E9C:  7C063800  cmp cr0,r6,r7
+	   4: GETL       	R6, t4
+	   5: GETL       	R7, t6
+	   6: CMPL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFED6EA0:  40820010  bc 4,2,0xFED6EB0
+	   9: Jc02o       	$0xFED6EB0
+
+
+
+. 2202 FED6E98 12
+. 2C 86 00 00 7C 06 38 00 40 82 00 10
+==== BB 2203 (0xFED6EA4) approx BBs exec'd 0 ====
+
+	0xFED6EA4:  8CC30001  lbzu r6,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED6EA8:  4086FFCC  bc 4,6,0xFED6E74
+	   6: Jc06o       	$0xFED6E74
+
+
+
+. 2203 FED6EA4 8
+. 8C C3 00 01 40 86 FF CC
+==== BB 2204 (0xFED6E74) approx BBs exec'd 0 ====
+
+	0xFED6E74:  8CE40001  lbzu r7,1(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0xFED6E78:  42400038  bc 18,0,0xFED6EB0
+	   6: GETL       	CTR, t4
+	   7: ADDL       	$0xFFFFFFFF, t4
+	   8: PUTL       	t4, CTR
+	   9: SETZL       	t4, NoValue
+	  10: JIFZL       	t4, $0xFED6E7C
+	  11: JMPo       	$0xFED6EB0  ($4)
+
+
+
+. 2204 FED6E74 8
+. 8C E4 00 01 42 40 00 38
+==== BB 2205 (0xFE8B028) approx BBs exec'd 0 ====
+
+	0xFE8B028:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE8B02C:  7C7D1B78  or r29,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFE8B030:  41920018  bc 12,18,0xFE8B048
+	   7: Js18o       	$0xFE8B048
+
+
+
+. 2205 FE8B028 12
+. 2E 03 00 00 7C 7D 1B 78 41 92 00 18
+==== BB 2206 (0xFE8B048) approx BBs exec'd 0 ====
+
+	0xFE8B048:  7E439378  or r3,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B04C:  4BFFE2DD  bl 0xFE89328
+	   3: MOVL       	$0xFE8B050, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFE89328  ($4)
+
+
+
+. 2206 FE8B048 8
+. 7E 43 93 78 4B FF E2 DD
+==== BB 2207 __current_locale_name(0xFE89328) approx BBs exec'd 0 ====
+
+	0xFE89328:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8932C:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFE89330:  4811EB21  bl 0xFFA7E50
+	   9: MOVL       	$0xFE89334, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2207 FE89328 12
+. 94 21 FF F0 7D 88 02 A6 48 11 EB 21
+==== BB 2208 (0xFE89334) approx BBs exec'd 0 ====
+
+	0xFE89334:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE89338:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE8933C:  5464103A  rlwinm r4,r3,2,0,29
+	   8: GETL       	R3, t6
+	   9: SHLL       	$0x2, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0xFE89340:  7D8803A6  mtlr r12
+	  12: GETL       	R12, t8
+	  13: PUTL       	t8, LR
+	  14: INCEIPL       	$4
+
+	0xFE89344:  80BE1D50  lwz r5,7504(r30)
+	  15: GETL       	R30, t10
+	  16: ADDL       	$0x1D50, t10
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R5
+	  19: INCEIPL       	$4
+
+	0xFE89348:  83C10008  lwz r30,8(r1)
+	  20: GETL       	R1, t14
+	  21: ADDL       	$0x8, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R30
+	  24: INCEIPL       	$4
+
+	0xFE8934C:  7D251214  add r9,r5,r2
+	  25: GETL       	R5, t18
+	  26: GETL       	R2, t20
+	  27: ADDL       	t18, t20
+	  28: PUTL       	t20, R9
+	  29: INCEIPL       	$4
+
+	0xFE89350:  38210010  addi r1,r1,16
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x10, t22
+	  32: PUTL       	t22, R1
+	  33: INCEIPL       	$4
+
+	0xFE89354:  80090000  lwz r0,0(r9)
+	  34: GETL       	R9, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R0
+	  37: INCEIPL       	$4
+
+	0xFE89358:  7C640214  add r3,r4,r0
+	  38: GETL       	R4, t28
+	  39: GETL       	R0, t30
+	  40: ADDL       	t28, t30
+	  41: PUTL       	t30, R3
+	  42: INCEIPL       	$4
+
+	0xFE8935C:  80630040  lwz r3,64(r3)
+	  43: GETL       	R3, t32
+	  44: ADDL       	$0x40, t32
+	  45: LDL       	(t32), t34
+	  46: PUTL       	t34, R3
+	  47: INCEIPL       	$4
+
+	0xFE89360:  4E800020  blr
+	  48: GETL       	LR, t36
+	  49: JMPo-r       	t36  ($4)
+
+
+
+. 2208 FE89334 48
+. 93 C1 00 08 7F C8 02 A6 54 64 10 3A 7D 88 03 A6 80 BE 1D 50 83 C1 00 08 7D 25 12 14 38 21 00 10 80 09 00 00 7C 64 02 14 80 63 00 40 4E 80 00 20
+==== BB 2209 (0xFE8B050) approx BBs exec'd 0 ====
+
+	0xFE8B050:  419202B4  bc 12,18,0xFE8B304
+	   0: Js18o       	$0xFE8B304
+
+
+
+. 2209 FE8B050 4
+. 41 92 02 B4
+==== BB 2210 (0xFE8B304) approx BBs exec'd 0 ====
+
+	0xFE8B304:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8B308:  4BFFFD6C  b 0xFE8B074
+	   3: JMPo       	$0xFE8B074  ($4)
+
+
+
+. 2210 FE8B304 8
+. 7C 60 1B 78 4B FF FD 6C
+==== BB 2211 (0xFE8B074) approx BBs exec'd 0 ====
+
+	0xFE8B074:  7EA3AB78  or r3,r21,r21
+	   0: GETL       	R21, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B078:  7C1B0378  or r27,r0,r0
+	   3: GETL       	R0, t2
+	   4: PUTL       	t2, R27
+	   5: INCEIPL       	$4
+
+	0xFE8B07C:  4804BA5D  bl 0xFED6AD8
+	   6: MOVL       	$0xFE8B080, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2211 FE8B074 12
+. 7E A3 AB 78 7C 1B 03 78 48 04 BA 5D
+==== BB 2212 (0xFE8B080) approx BBs exec'd 0 ====
+
+	0xFE8B080:  2E1A0000  cmpi cr4,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE8B084:  7C711B78  or r17,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R17
+	   6: INCEIPL       	$4
+
+	0xFE8B088:  7F83E378  or r3,r28,r28
+	   7: GETL       	R28, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFE8B08C:  4804BA4D  bl 0xFED6AD8
+	  10: MOVL       	$0xFE8B090, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2212 FE8B080 16
+. 2E 1A 00 00 7C 71 1B 78 7F 83 E3 78 48 04 BA 4D
+==== BB 2213 (0xFE8B090) approx BBs exec'd 0 ====
+
+	0xFE8B090:  81610000  lwz r11,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFE8B094:  7E838A14  add r20,r3,r17
+	   4: GETL       	R3, t4
+	   5: GETL       	R17, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R20
+	   8: INCEIPL       	$4
+
+	0xFE8B098:  7F84E378  or r4,r28,r28
+	   9: GETL       	R28, t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0xFE8B09C:  39540023  addi r10,r20,35
+	  12: GETL       	R20, t10
+	  13: ADDL       	$0x23, t10
+	  14: PUTL       	t10, R10
+	  15: INCEIPL       	$4
+
+	0xFE8B0A0:  7F181214  add r24,r24,r2
+	  16: GETL       	R24, t12
+	  17: GETL       	R2, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R24
+	  20: INCEIPL       	$4
+
+	0xFE8B0A4:  55480036  rlwinm r8,r10,0,0,27
+	  21: GETL       	R10, t16
+	  22: ANDL       	$0xFFFFFFF0, t16
+	  23: PUTL       	t16, R8
+	  24: INCEIPL       	$4
+
+	0xFE8B0A8:  7CE800D0  neg r7,r8
+	  25: GETL       	R8, t18
+	  26: NEGL       	t18
+	  27: PUTL       	t18, R7
+	  28: INCEIPL       	$4
+
+	0xFE8B0AC:  7D61396E  stwux r11,r1,r7
+	  29: GETL       	R7, t20
+	  30: GETL       	R1, t22
+	  31: ADDL       	t22, t20
+	  32: PUTL       	t20, R1
+	  33: GETL       	R11, t24
+	  34: STL       	t24, (t20)
+	  35: INCEIPL       	$4
+
+	0xFE8B0B0:  38C10017  addi r6,r1,23
+	  36: GETL       	R1, t26
+	  37: ADDL       	$0x17, t26
+	  38: PUTL       	t26, R6
+	  39: INCEIPL       	$4
+
+	0xFE8B0B4:  54D40036  rlwinm r20,r6,0,0,27
+	  40: GETL       	R6, t28
+	  41: ANDL       	$0xFFFFFFF0, t28
+	  42: PUTL       	t28, R20
+	  43: INCEIPL       	$4
+
+	0xFE8B0B8:  7E83A378  or r3,r20,r20
+	  44: GETL       	R20, t30
+	  45: PUTL       	t30, R3
+	  46: INCEIPL       	$4
+
+	0xFE8B0BC:  4804D245  bl 0xFED8300
+	  47: MOVL       	$0xFE8B0C0, t32
+	  48: PUTL       	t32, LR
+	  49: JMPo-c       	$0xFED8300  ($4)
+
+
+
+. 2213 FE8B090 48
+. 81 61 00 00 7E 83 8A 14 7F 84 E3 78 39 54 00 23 7F 18 12 14 55 48 00 36 7C E8 00 D0 7D 61 39 6E 38 C1 00 17 54 D4 00 36 7E 83 A3 78 48 04 D2 45
+==== BB 2214 stpcpy(0xFED8300) approx BBs exec'd 0 ====
+
+	0xFED8300:  7C801B78  or r0,r4,r3
+	   0: GETL       	R4, t0
+	   1: GETL       	R3, t2
+	   2: ORL       	t2, t0
+	   3: PUTL       	t0, R0
+	   4: INCEIPL       	$4
+
+	0xFED8304:  540007BF  rlwinm. r0,r0,0,30,31
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x3, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFED8308:  3863FFFC  addi r3,r3,-4
+	  11: GETL       	R3, t8
+	  12: ADDL       	$0xFFFFFFFC, t8
+	  13: PUTL       	t8, R3
+	  14: INCEIPL       	$4
+
+	0xFED830C:  40820088  bc 4,2,0xFED8394
+	  15: Jc02o       	$0xFED8394
+
+
+
+. 2214 FED8300 16
+. 7C 80 1B 78 54 00 07 BF 38 63 FF FC 40 82 00 88
+==== BB 2215 (0xFED8310) approx BBs exec'd 0 ====
+
+	0xFED8310:  3CE0FEFF  lis r7,-257
+	   0: MOVL       	$0xFEFF0000, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFED8314:  3D007F7F  lis r8,32639
+	   3: MOVL       	$0x7F7F0000, t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0xFED8318:  80C40000  lwz r6,0(r4)
+	   6: GETL       	R4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0xFED831C:  38E7FEFF  addi r7,r7,-257
+	  10: MOVL       	$0xFEFEFEFF, t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFED8320:  39087F7F  addi r8,r8,32639
+	  13: MOVL       	$0x7F7F7F7F, t10
+	  14: PUTL       	t10, R8
+	  15: INCEIPL       	$4
+
+	0xFED8324:  48000024  b 0xFED8348
+	  16: JMPo       	$0xFED8348  ($4)
+
+
+
+. 2215 FED8310 24
+. 3C E0 FE FF 3D 00 7F 7F 80 C4 00 00 38 E7 FE FF 39 08 7F 7F 48 00 00 24
+==== BB 2216 (0xFED8348) approx BBs exec'd 0 ====
+
+	0xFED8348:  7C073214  add r0,r7,r6
+	   0: GETL       	R7, t0
+	   1: GETL       	R6, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED834C:  7D0930F8  nor r9,r8,r6
+	   5: GETL       	R8, t4
+	   6: GETL       	R6, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0xFED8350:  7C004839  and. r0,r0,r9
+	  11: GETL       	R0, t8
+	  12: GETL       	R9, t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFED8354:  4182FFD4  bc 12,2,0xFED8328
+	  18: Js02o       	$0xFED8328
+
+
+
+. 2216 FED8348 16
+. 7C 07 32 14 7D 09 30 F8 7C 00 48 39 41 82 FF D4
+==== BB 2217 (0xFED8328) approx BBs exec'd 0 ====
+
+	0xFED8328:  85440004  lwzu r10,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0xFED832C:  94C30004  stwu r6,4(r3)
+	   6: GETL       	R6, t4
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x4, t6
+	   9: PUTL       	t6, R3
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED8330:  7C075214  add r0,r7,r10
+	  12: GETL       	R7, t8
+	  13: GETL       	R10, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFED8334:  7D0950F8  nor r9,r8,r10
+	  17: GETL       	R8, t12
+	  18: GETL       	R10, t14
+	  19: ORL       	t14, t12
+	  20: NOTL       	t12
+	  21: PUTL       	t12, R9
+	  22: INCEIPL       	$4
+
+	0xFED8338:  7C004839  and. r0,r0,r9
+	  23: GETL       	R0, t16
+	  24: GETL       	R9, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R0
+	  27: CMP0L       	t18, t20  (-rSo)
+	  28: ICRFL       	t20, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0xFED833C:  40820020  bc 4,2,0xFED835C
+	  30: Jc02o       	$0xFED835C
+
+
+
+. 2217 FED8328 24
+. 85 44 00 04 94 C3 00 04 7C 07 52 14 7D 09 50 F8 7C 00 48 39 40 82 00 20
+==== BB 2218 (0xFED8340) approx BBs exec'd 0 ====
+
+	0xFED8340:  84C40004  lwzu r6,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED8344:  95430004  stwu r10,4(r3)
+	   6: GETL       	R10, t4
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x4, t6
+	   9: PUTL       	t6, R3
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED8348:  7C073214  add r0,r7,r6
+	  12: GETL       	R7, t8
+	  13: GETL       	R6, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFED834C:  7D0930F8  nor r9,r8,r6
+	  17: GETL       	R8, t12
+	  18: GETL       	R6, t14
+	  19: ORL       	t14, t12
+	  20: NOTL       	t12
+	  21: PUTL       	t12, R9
+	  22: INCEIPL       	$4
+
+	0xFED8350:  7C004839  and. r0,r0,r9
+	  23: GETL       	R0, t16
+	  24: GETL       	R9, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R0
+	  27: CMP0L       	t18, t20  (-rSo)
+	  28: ICRFL       	t20, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0xFED8354:  4182FFD4  bc 12,2,0xFED8328
+	  30: Js02o       	$0xFED8328
+
+
+
+. 2218 FED8340 24
+. 84 C4 00 04 95 43 00 04 7C 07 32 14 7D 09 30 F8 7C 00 48 39 41 82 FF D4
+==== BB 2219 (0xFED8358) approx BBs exec'd 0 ====
+
+	0xFED8358:  7CCA3378  or r10,r6,r6
+	   0: GETL       	R6, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFED835C:  5540463F  rlwinm. r0,r10,8,24,31
+	   3: GETL       	R10, t2
+	   4: SHRL       	$0x18, t2
+	   5: PUTL       	t2, R0
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFED8360:  9C030004  stbu r0,4(r3)
+	   9: GETL       	R0, t6
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x4, t8
+	  12: PUTL       	t8, R3
+	  13: STB       	t6, (t8)
+	  14: INCEIPL       	$4
+
+	0xFED8364:  4D820020  bclr 12,2
+	  15: GETL       	LR, t10
+	  16: Js02o-r       	t10
+
+
+
+. 2219 FED8358 16
+. 7C CA 33 78 55 40 46 3F 9C 03 00 04 4D 82 00 20
+==== BB 2220 (0xFED8368) approx BBs exec'd 0 ====
+
+	0xFED8368:  5540863F  rlwinm. r0,r10,16,24,31
+	   0: GETL       	R10, t0
+	   1: ROLL       	$0x10, t0
+	   2: ANDL       	$0xFF, t0
+	   3: PUTL       	t0, R0
+	   4: CMP0L       	t0, t2  (-rSo)
+	   5: ICRFL       	t2, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED836C:  9C030001  stbu r0,1(r3)
+	   7: GETL       	R0, t4
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R3
+	  11: STB       	t4, (t6)
+	  12: INCEIPL       	$4
+
+	0xFED8370:  4D820020  bclr 12,2
+	  13: GETL       	LR, t8
+	  14: Js02o-r       	t8
+
+
+
+. 2220 FED8368 12
+. 55 40 86 3F 9C 03 00 01 4D 82 00 20
+==== BB 2221 (0xFED8374) approx BBs exec'd 0 ====
+
+	0xFED8374:  5540C63F  rlwinm. r0,r10,24,24,31
+	   0: GETL       	R10, t0
+	   1: ROLL       	$0x18, t0
+	   2: ANDL       	$0xFF, t0
+	   3: PUTL       	t0, R0
+	   4: CMP0L       	t0, t2  (-rSo)
+	   5: ICRFL       	t2, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED8378:  9C030001  stbu r0,1(r3)
+	   7: GETL       	R0, t4
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R3
+	  11: STB       	t4, (t6)
+	  12: INCEIPL       	$4
+
+	0xFED837C:  4D820020  bclr 12,2
+	  13: GETL       	LR, t8
+	  14: Js02o-r       	t8
+
+
+
+. 2221 FED8374 12
+. 55 40 C6 3F 9C 03 00 01 4D 82 00 20
+==== BB 2222 (0xFED8380) approx BBs exec'd 0 ====
+
+	0xFED8380:  9D430001  stbu r10,1(r3)
+	   0: GETL       	R10, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x1, t2
+	   3: PUTL       	t2, R3
+	   4: STB       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED8384:  4E800020  blr
+	   6: GETL       	LR, t4
+	   7: JMPo-r       	t4  ($4)
+
+
+
+. 2222 FED8380 8
+. 9D 43 00 01 4E 80 00 20
+==== BB 2223 (0xFE8B0C0) approx BBs exec'd 0 ====
+
+	0xFE8B0C0:  38A02F00  li r5,12032
+	   0: MOVL       	$0x2F00, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE8B0C4:  B0A30000  sth r5,0(r3)
+	   3: GETL       	R5, t2
+	   4: GETL       	R3, t4
+	   5: STW       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFE8B0C8:  7EA4AB78  or r4,r21,r21
+	   7: GETL       	R21, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFE8B0CC:  7E258B78  or r5,r17,r17
+	  10: GETL       	R17, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0xFE8B0D0:  38630001  addi r3,r3,1
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x1, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFE8B0D4:  4804CF2D  bl 0xFED8000
+	  17: MOVL       	$0xFE8B0D8, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0xFED8000  ($4)
+
+
+
+. 2223 FE8B0C0 24
+. 38 A0 2F 00 B0 A3 00 00 7E A4 AB 78 7E 25 8B 78 38 63 00 01 48 04 CF 2D
+==== BB 2224 (0xFE8B0D8) approx BBs exec'd 0 ====
+
+	0xFE8B0D8:  3C802E6D  lis r4,11885
+	   0: MOVL       	$0x2E6D0000, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8B0DC:  60896F00  ori r9,r4,0x6F00
+	   3: MOVL       	$0x2E6D6F00, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0xFE8B0E0:  91230000  stw r9,0(r3)
+	   6: GETL       	R9, t4
+	   7: GETL       	R3, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8B0E4:  7F63DB78  or r3,r27,r27
+	  10: GETL       	R27, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFE8B0E8:  4804B9F1  bl 0xFED6AD8
+	  13: MOVL       	$0xFE8B0EC, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2224 FE8B0D8 20
+. 3C 80 2E 6D 60 89 6F 00 91 23 00 00 7F 63 DB 78 48 04 B9 F1
+==== BB 2225 (0xFE8B0EC) approx BBs exec'd 0 ====
+
+	0xFE8B0EC:  83810000  lwz r28,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFE8B0F0:  3803001F  addi r0,r3,31
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x1F, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFE8B0F4:  931F0018  stw r24,24(r31)
+	   8: GETL       	R24, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8B0F8:  54030036  rlwinm r3,r0,0,0,27
+	  13: GETL       	R0, t10
+	  14: ANDL       	$0xFFFFFFF0, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFE8B0FC:  7FA300D0  neg r29,r3
+	  17: GETL       	R3, t12
+	  18: NEGL       	t12
+	  19: PUTL       	t12, R29
+	  20: INCEIPL       	$4
+
+	0xFE8B100:  7F81E96E  stwux r28,r1,r29
+	  21: GETL       	R29, t14
+	  22: GETL       	R1, t16
+	  23: ADDL       	t16, t14
+	  24: PUTL       	t14, R1
+	  25: GETL       	R28, t18
+	  26: STL       	t18, (t14)
+	  27: INCEIPL       	$4
+
+	0xFE8B104:  3B410017  addi r26,r1,23
+	  28: GETL       	R1, t20
+	  29: ADDL       	$0x17, t20
+	  30: PUTL       	t20, R26
+	  31: INCEIPL       	$4
+
+	0xFE8B108:  57590036  rlwinm r25,r26,0,0,27
+	  32: GETL       	R26, t22
+	  33: ANDL       	$0xFFFFFFF0, t22
+	  34: PUTL       	t22, R25
+	  35: INCEIPL       	$4
+
+	0xFE8B10C:  893B0000  lbz r9,0(r27)
+	  36: GETL       	R27, t24
+	  37: LDB       	(t24), t26
+	  38: PUTL       	t26, R9
+	  39: INCEIPL       	$4
+
+	0xFE8B110:  2F09003A  cmpi cr6,r9,58
+	  40: GETL       	R9, t28
+	  41: MOVL       	$0x3A, t32
+	  42: CMPL       	t28, t32, t30  (-rSo)
+	  43: ICRFL       	t30, $0x6, CR
+	  44: INCEIPL       	$4
+
+	0xFE8B114:  409A0010  bc 4,26,0xFE8B124
+	  45: Jc26o       	$0xFE8B124
+
+
+
+. 2225 FE8B0EC 44
+. 83 81 00 00 38 03 00 1F 93 1F 00 18 54 03 00 36 7F A3 00 D0 7F 81 E9 6E 3B 41 00 17 57 59 00 36 89 3B 00 00 2F 09 00 3A 40 9A 00 10
+==== BB 2226 (0xFE8B124) approx BBs exec'd 0 ====
+
+	0xFE8B124:  7D2B4B79  or. r11,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R11
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8B128:  40820188  bc 4,2,0xFE8B2B0
+	   5: Jc02o       	$0xFE8B2B0
+
+
+
+. 2226 FE8B124 8
+. 7D 2B 4B 79 40 82 01 88
+==== BB 2227 (0xFE8B2B0) approx BBs exec'd 0 ====
+
+	0xFE8B2B0:  2F8B003A  cmpi cr7,r11,58
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x3A, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE8B2B4:  7F2ACB78  or r10,r25,r25
+	   5: GETL       	R25, t6
+	   6: PUTL       	t6, R10
+	   7: INCEIPL       	$4
+
+	0xFE8B2B8:  419E001C  bc 12,30,0xFE8B2D4
+	   8: Js30o       	$0xFE8B2D4
+
+
+
+. 2227 FE8B2B0 12
+. 2F 8B 00 3A 7F 2A CB 78 41 9E 00 1C
+==== BB 2228 (0xFE8B2BC) approx BBs exec'd 0 ====
+
+	0xFE8B2BC:  992A0000  stb r9,0(r10)
+	   0: GETL       	R9, t0
+	   1: GETL       	R10, t2
+	   2: STB       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8B2C0:  394A0001  addi r10,r10,1
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFE8B2C4:  8D3B0001  lbzu r9,1(r27)
+	   8: GETL       	R27, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R27
+	  11: LDB       	(t6), t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFE8B2C8:  7D384B79  or. r24,r9,r9
+	  14: GETL       	R9, t10
+	  15: PUTL       	t10, R24
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFE8B2CC:  2F98003A  cmpi cr7,r24,58
+	  19: GETL       	R24, t14
+	  20: MOVL       	$0x3A, t18
+	  21: CMPL       	t14, t18, t16  (-rSo)
+	  22: ICRFL       	t16, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0xFE8B2D0:  4082FFE8  bc 4,2,0xFE8B2B8
+	  24: Jc02o       	$0xFE8B2B8
+
+
+
+. 2228 FE8B2BC 24
+. 99 2A 00 00 39 4A 00 01 8D 3B 00 01 7D 38 4B 79 2F 98 00 3A 40 82 FF E8
+==== BB 2229 (0xFE8B2B8) approx BBs exec'd 0 ====
+
+	0xFE8B2B8:  419E001C  bc 12,30,0xFE8B2D4
+	   0: Js30o       	$0xFE8B2D4
+
+
+
+. 2229 FE8B2B8 4
+. 41 9E 00 1C
+==== BB 2230 (0xFE8B2D4) approx BBs exec'd 0 ====
+
+	0xFE8B2D4:  839E1D70  lwz r28,7536(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D70, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFE8B2D8:  3BA00000  li r29,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFE8B2DC:  835C0000  lwz r26,0(r28)
+	   8: GETL       	R28, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R26
+	  11: INCEIPL       	$4
+
+	0xFE8B2E0:  9BAA0000  stb r29,0(r10)
+	  12: GETL       	R29, t10
+	  13: GETL       	R10, t12
+	  14: STB       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0xFE8B2E4:  2F9A0000  cmpi cr7,r26,0
+	  16: GETL       	R26, t14
+	  17: CMP0L       	t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0xFE8B2E8:  41BEFE50  bc 13,30,0xFE8B138
+	  20: Js30o       	$0xFE8B138
+
+
+
+. 2230 FE8B2D4 24
+. 83 9E 1D 70 3B A0 00 00 83 5C 00 00 9B AA 00 00 2F 9A 00 00 41 BE FE 50
+==== BB 2231 (0xFE8B138) approx BBs exec'd 0 ====
+
+	0xFE8B138:  88790000  lbz r3,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8B13C:  3523FFBD  addic. r9,r3,-67
+	   4: GETL       	R3, t4
+	   5: ADCL       	$0xFFFFFFBD, t4  (-wCa)
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE8B140:  4F421042  crnor 26,2,2
+	  10: GETL       	CR, t8
+	  11: XBITB       	t8, $0x2, t10
+	  12: XBITB       	t8, $0x2, t12
+	  13: NOTB       	t10
+	  14: NOTB       	t12
+	  15: ANDB       	t12, t10
+	  16: IBITL       	t10, $0x1A, t8
+	  17: PUTL       	t8, CR
+	  18: INCEIPL       	$4
+
+	0xFE8B144:  419A000C  bc 12,26,0xFE8B150
+	  19: Js26o       	$0xFE8B150
+
+
+
+. 2231 FE8B138 16
+. 88 79 00 00 35 23 FF BD 4F 42 10 42 41 9A 00 0C
+==== BB 2232 (0xFE8B150) approx BBs exec'd 0 ====
+
+	0xFE8B150:  418202F8  bc 12,2,0xFE8B448
+	   0: Js02o       	$0xFE8B448
+
+
+
+. 2232 FE8B150 4
+. 41 82 02 F8
+==== BB 2233 (0xFE8B154) approx BBs exec'd 0 ====
+
+	0xFE8B154:  809E0198  lwz r4,408(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x198, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8B158:  7F23CB78  or r3,r25,r25
+	   5: GETL       	R25, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE8B15C:  4804B255  bl 0xFED63B0
+	   8: MOVL       	$0xFE8B160, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2233 FE8B154 12
+. 80 9E 01 98 7F 23 CB 78 48 04 B2 55
+==== BB 2234 (0xFE8B160) approx BBs exec'd 0 ====
+
+	0xFE8B160:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE8B164:  418202E4  bc 12,2,0xFE8B448
+	   4: Js02o       	$0xFE8B448
+
+
+
+. 2234 FE8B160 8
+. 2C 03 00 00 41 82 02 E4
+==== BB 2235 (0xFE8B168) approx BBs exec'd 0 ====
+
+	0xFE8B168:  7E639B78  or r3,r19,r19
+	   0: GETL       	R19, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B16C:  7F24CB78  or r4,r25,r25
+	   3: GETL       	R25, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8B170:  7E85A378  or r5,r20,r20
+	   6: GETL       	R20, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE8B174:  7EE6BB78  or r6,r23,r23
+	   9: GETL       	R23, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0xFE8B178:  48000585  bl 0xFE8B6FC
+	  12: MOVL       	$0xFE8B17C, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0xFE8B6FC  ($4)
+
+
+
+. 2235 FE8B168 20
+. 7E 63 9B 78 7F 24 CB 78 7E 85 A3 78 7E E6 BB 78 48 00 05 85
+==== BB 2236 _nl_find_domain(0xFE8B6FC) approx BBs exec'd 0 ====
+
+	0xFE8B6FC:  9421FFA0  stwu r1,-96(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFA0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8B700:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE8B704:  4811C74D  bl 0xFFA7E50
+	   9: MOVL       	$0xFE8B708, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2236 FE8B6FC 12
+. 94 21 FF A0 7C 08 02 A6 48 11 C7 4D
+==== BB 2237 (0xFE8B708) approx BBs exec'd 0 ====
+
+	0xFE8B708:  93C10058  stw r30,88(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8B70C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE8B710:  93810050  stw r28,80(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x50, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8B714:  90010064  stw r0,100(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x64, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE8B718:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0xFE8B71C:  93210044  stw r25,68(r1)
+	  21: GETL       	R25, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x44, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE8B720:  7CB92B78  or r25,r5,r5
+	  26: GETL       	R5, t20
+	  27: PUTL       	t20, R25
+	  28: INCEIPL       	$4
+
+	0xFE8B724:  839E1B48  lwz r28,6984(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1B48, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R28
+	  33: INCEIPL       	$4
+
+	0xFE8B728:  93410048  stw r26,72(r1)
+	  34: GETL       	R26, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x48, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFE8B72C:  7C9A2378  or r26,r4,r4
+	  39: GETL       	R4, t30
+	  40: PUTL       	t30, R26
+	  41: INCEIPL       	$4
+
+	0xFE8B730:  801C00A0  lwz r0,160(r28)
+	  42: GETL       	R28, t32
+	  43: ADDL       	$0xA0, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R0
+	  46: INCEIPL       	$4
+
+	0xFE8B734:  9361004C  stw r27,76(r1)
+	  47: GETL       	R27, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x4C, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFE8B738:  7CDB3378  or r27,r6,r6
+	  52: GETL       	R6, t40
+	  53: PUTL       	t40, R27
+	  54: INCEIPL       	$4
+
+	0xFE8B73C:  2F800000  cmpi cr7,r0,0
+	  55: GETL       	R0, t42
+	  56: CMP0L       	t42, t44  (-rSo)
+	  57: ICRFL       	t44, $0x7, CR
+	  58: INCEIPL       	$4
+
+	0xFE8B740:  93A10054  stw r29,84(r1)
+	  59: GETL       	R29, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x54, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFE8B744:  93010040  stw r24,64(r1)
+	  64: GETL       	R24, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x40, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFE8B748:  7C7D1B78  or r29,r3,r3
+	  69: GETL       	R3, t54
+	  70: PUTL       	t54, R29
+	  71: INCEIPL       	$4
+
+	0xFE8B74C:  93E1005C  stw r31,92(r1)
+	  72: GETL       	R31, t56
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x5C, t58
+	  75: STL       	t56, (t58)
+	  76: INCEIPL       	$4
+
+	0xFE8B750:  9181003C  stw r12,60(r1)
+	  77: GETL       	R12, t60
+	  78: GETL       	R1, t62
+	  79: ADDL       	$0x3C, t62
+	  80: STL       	t60, (t62)
+	  81: INCEIPL       	$4
+
+	0xFE8B754:  409E0120  bc 4,30,0xFE8B874
+	  82: Jc30o       	$0xFE8B874
+
+
+
+. 2237 FE8B708 80
+. 93 C1 00 58 7F C8 02 A6 93 81 00 50 90 01 00 64 7D 80 00 26 93 21 00 44 7C B9 2B 78 83 9E 1B 48 93 41 00 48 7C 9A 23 78 80 1C 00 A0 93 61 00 4C 7C DB 33 78 2F 80 00 00 93 A1 00 54 93 01 00 40 7C 7D 1B 78 93 E1 00 5C 91 81 00 3C 40 9E 01 20
+==== BB 2238 (0xFE8B758) approx BBs exec'd 0 ====
+
+	0xFE8B758:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B75C:  4804B37D  bl 0xFED6AD8
+	   3: MOVL       	$0xFE8B760, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2238 FE8B758 8
+. 7F A3 EB 78 48 04 B3 7D
+==== BB 2239 (0xFE8B760) approx BBs exec'd 0 ====
+
+	0xFE8B760:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFE8B764:  38A30001  addi r5,r3,1
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0x1, t2
+	   5: PUTL       	t2, R5
+	   6: INCEIPL       	$4
+
+	0xFE8B768:  807E01A0  lwz r3,416(r30)
+	   7: GETL       	R30, t4
+	   8: ADDL       	$0x1A0, t4
+	   9: LDL       	(t4), t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0xFE8B76C:  7FA4EB78  or r4,r29,r29
+	  12: GETL       	R29, t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0xFE8B770:  38C00000  li r6,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R6
+	  17: INCEIPL       	$4
+
+	0xFE8B774:  7F47D378  or r7,r26,r26
+	  18: GETL       	R26, t12
+	  19: PUTL       	t12, R7
+	  20: INCEIPL       	$4
+
+	0xFE8B778:  39000000  li r8,0
+	  21: MOVL       	$0x0, t14
+	  22: PUTL       	t14, R8
+	  23: INCEIPL       	$4
+
+	0xFE8B77C:  39200000  li r9,0
+	  24: MOVL       	$0x0, t16
+	  25: PUTL       	t16, R9
+	  26: INCEIPL       	$4
+
+	0xFE8B780:  39400000  li r10,0
+	  27: MOVL       	$0x0, t18
+	  28: PUTL       	t18, R10
+	  29: INCEIPL       	$4
+
+	0xFE8B784:  91610010  stw r11,16(r1)
+	  30: GETL       	R11, t20
+	  31: GETL       	R1, t22
+	  32: ADDL       	$0x10, t22
+	  33: STL       	t20, (t22)
+	  34: INCEIPL       	$4
+
+	0xFE8B788:  91610008  stw r11,8(r1)
+	  35: GETL       	R11, t24
+	  36: GETL       	R1, t26
+	  37: ADDL       	$0x8, t26
+	  38: STL       	t24, (t26)
+	  39: INCEIPL       	$4
+
+	0xFE8B78C:  9321000C  stw r25,12(r1)
+	  40: GETL       	R25, t28
+	  41: GETL       	R1, t30
+	  42: ADDL       	$0xC, t30
+	  43: STL       	t28, (t30)
+	  44: INCEIPL       	$4
+
+	0xFE8B790:  4800216D  bl 0xFE8D8FC
+	  45: MOVL       	$0xFE8B794, t32
+	  46: PUTL       	t32, LR
+	  47: JMPo-c       	$0xFE8D8FC  ($4)
+
+
+
+. 2239 FE8B760 52
+. 39 60 00 00 38 A3 00 01 80 7E 01 A0 7F A4 EB 78 38 C0 00 00 7F 47 D3 78 39 00 00 00 39 20 00 00 39 40 00 00 91 61 00 10 91 61 00 08 93 21 00 0C 48 00 21 6D
+==== BB 2240 _nl_make_l10nflist(0xFE8D8FC) approx BBs exec'd 0 ====
+
+	0xFE8D8FC:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8D900:  7D800026  mfcr r12
+	   3: GETL       	CR, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0xFE8D904:  9421FF90  stwu r1,-112(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFF90, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFE8D908:  91E1002C  stw r15,44(r1)
+	  12: GETL       	R15, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x2C, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFE8D90C:  92010030  stw r16,48(r1)
+	  17: GETL       	R16, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x30, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFE8D910:  92410038  stw r18,56(r1)
+	  22: GETL       	R18, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x38, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFE8D914:  7C721B78  or r18,r3,r3
+	  27: GETL       	R3, t20
+	  28: PUTL       	t20, R18
+	  29: INCEIPL       	$4
+
+	0xFE8D918:  9261003C  stw r19,60(r1)
+	  30: GETL       	R19, t22
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x3C, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0xFE8D91C:  7CE33B78  or r3,r7,r7
+	  35: GETL       	R7, t26
+	  36: PUTL       	t26, R3
+	  37: INCEIPL       	$4
+
+	0xFE8D920:  92810040  stw r20,64(r1)
+	  38: GETL       	R20, t28
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x40, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0xFE8D924:  7D334B78  or r19,r9,r9
+	  43: GETL       	R9, t32
+	  44: PUTL       	t32, R19
+	  45: INCEIPL       	$4
+
+	0xFE8D928:  92A10044  stw r21,68(r1)
+	  46: GETL       	R21, t34
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x44, t36
+	  49: STL       	t34, (t36)
+	  50: INCEIPL       	$4
+
+	0xFE8D92C:  7D144378  or r20,r8,r8
+	  51: GETL       	R8, t38
+	  52: PUTL       	t38, R20
+	  53: INCEIPL       	$4
+
+	0xFE8D930:  92C10048  stw r22,72(r1)
+	  54: GETL       	R22, t40
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x48, t42
+	  57: STL       	t40, (t42)
+	  58: INCEIPL       	$4
+
+	0xFE8D934:  7D555378  or r21,r10,r10
+	  59: GETL       	R10, t44
+	  60: PUTL       	t44, R21
+	  61: INCEIPL       	$4
+
+	0xFE8D938:  93010050  stw r24,80(r1)
+	  62: GETL       	R24, t46
+	  63: GETL       	R1, t48
+	  64: ADDL       	$0x50, t48
+	  65: STL       	t46, (t48)
+	  66: INCEIPL       	$4
+
+	0xFE8D93C:  54D8F0BE  rlwinm r24,r6,30,2,31
+	  67: GETL       	R6, t50
+	  68: SHRL       	$0x2, t50
+	  69: PUTL       	t50, R24
+	  70: INCEIPL       	$4
+
+	0xFE8D940:  93410058  stw r26,88(r1)
+	  71: GETL       	R26, t52
+	  72: GETL       	R1, t54
+	  73: ADDL       	$0x58, t54
+	  74: STL       	t52, (t54)
+	  75: INCEIPL       	$4
+
+	0xFE8D944:  7CF63B78  or r22,r7,r7
+	  76: GETL       	R7, t56
+	  77: PUTL       	t56, R22
+	  78: INCEIPL       	$4
+
+	0xFE8D948:  93810060  stw r28,96(r1)
+	  79: GETL       	R28, t58
+	  80: GETL       	R1, t60
+	  81: ADDL       	$0x60, t60
+	  82: STL       	t58, (t60)
+	  83: INCEIPL       	$4
+
+	0xFE8D94C:  7CBA2B78  or r26,r5,r5
+	  84: GETL       	R5, t62
+	  85: PUTL       	t62, R26
+	  86: INCEIPL       	$4
+
+	0xFE8D950:  93A10064  stw r29,100(r1)
+	  87: GETL       	R29, t64
+	  88: GETL       	R1, t66
+	  89: ADDL       	$0x64, t66
+	  90: STL       	t64, (t66)
+	  91: INCEIPL       	$4
+
+	0xFE8D954:  7CDC3378  or r28,r6,r6
+	  92: GETL       	R6, t68
+	  93: PUTL       	t68, R28
+	  94: INCEIPL       	$4
+
+	0xFE8D958:  93E1006C  stw r31,108(r1)
+	  95: GETL       	R31, t70
+	  96: GETL       	R1, t72
+	  97: ADDL       	$0x6C, t72
+	  98: STL       	t70, (t72)
+	  99: INCEIPL       	$4
+
+	0xFE8D95C:  90010074  stw r0,116(r1)
+	 100: GETL       	R0, t74
+	 101: GETL       	R1, t76
+	 102: ADDL       	$0x74, t76
+	 103: STL       	t74, (t76)
+	 104: INCEIPL       	$4
+
+	0xFE8D960:  91C10028  stw r14,40(r1)
+	 105: GETL       	R14, t78
+	 106: GETL       	R1, t80
+	 107: ADDL       	$0x28, t80
+	 108: STL       	t78, (t80)
+	 109: INCEIPL       	$4
+
+	0xFE8D964:  92210034  stw r17,52(r1)
+	 110: GETL       	R17, t82
+	 111: GETL       	R1, t84
+	 112: ADDL       	$0x34, t84
+	 113: STL       	t82, (t84)
+	 114: INCEIPL       	$4
+
+	0xFE8D968:  92E1004C  stw r23,76(r1)
+	 115: GETL       	R23, t86
+	 116: GETL       	R1, t88
+	 117: ADDL       	$0x4C, t88
+	 118: STL       	t86, (t88)
+	 119: INCEIPL       	$4
+
+	0xFE8D96C:  93210054  stw r25,84(r1)
+	 120: GETL       	R25, t90
+	 121: GETL       	R1, t92
+	 122: ADDL       	$0x54, t92
+	 123: STL       	t90, (t92)
+	 124: INCEIPL       	$4
+
+	0xFE8D970:  9361005C  stw r27,92(r1)
+	 125: GETL       	R27, t94
+	 126: GETL       	R1, t96
+	 127: ADDL       	$0x5C, t96
+	 128: STL       	t94, (t96)
+	 129: INCEIPL       	$4
+
+	0xFE8D974:  93C10068  stw r30,104(r1)
+	 130: GETL       	R30, t98
+	 131: GETL       	R1, t100
+	 132: ADDL       	$0x68, t100
+	 133: STL       	t98, (t100)
+	 134: INCEIPL       	$4
+
+	0xFE8D978:  91810024  stw r12,36(r1)
+	 135: GETL       	R12, t102
+	 136: GETL       	R1, t104
+	 137: ADDL       	$0x24, t104
+	 138: STL       	t102, (t104)
+	 139: INCEIPL       	$4
+
+	0xFE8D97C:  90810018  stw r4,24(r1)
+	 140: GETL       	R4, t106
+	 141: GETL       	R1, t108
+	 142: ADDL       	$0x18, t108
+	 143: STL       	t106, (t108)
+	 144: INCEIPL       	$4
+
+	0xFE8D980:  48049159  bl 0xFED6AD8
+	 145: MOVL       	$0xFE8D984, t110
+	 146: PUTL       	t110, LR
+	 147: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2240 FE8D8FC 136
+. 7C 08 02 A6 7D 80 00 26 94 21 FF 90 91 E1 00 2C 92 01 00 30 92 41 00 38 7C 72 1B 78 92 61 00 3C 7C E3 3B 78 92 81 00 40 7D 33 4B 78 92 A1 00 44 7D 14 43 78 92 C1 00 48 7D 55 53 78 93 01 00 50 54 D8 F0 BE 93 41 00 58 7C F6 3B 78 93 81 00 60 7C BA 2B 78 93 A1 00 64 7C DC 33 78 93 E1 00 6C 90 01 00 74 91 C1 00 28 92 21 00 34 92 E1 00 4C 93 21 00 54 93 61 00 5C 93 C1 00 68 91 81 00 24 90 81 00 18 48 04 91 59
+==== BB 2241 (0xFE8D984) approx BBs exec'd 0 ====
+
+	0xFE8D984:  73000001  andi. r0,r24,0x1
+	   0: GETL       	R24, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8D988:  7FA3D214  add r29,r3,r26
+	   6: GETL       	R3, t4
+	   7: GETL       	R26, t6
+	   8: ADDL       	t4, t6
+	   9: PUTL       	t6, R29
+	  10: INCEIPL       	$4
+
+	0xFE8D98C:  81E10078  lwz r15,120(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x78, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R15
+	  15: INCEIPL       	$4
+
+	0xFE8D990:  8201007C  lwz r16,124(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x7C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R16
+	  20: INCEIPL       	$4
+
+	0xFE8D994:  7FBFEB78  or r31,r29,r29
+	  21: GETL       	R29, t16
+	  22: PUTL       	t16, R31
+	  23: INCEIPL       	$4
+
+	0xFE8D998:  41820014  bc 12,2,0xFE8D9AC
+	  24: Js02o       	$0xFE8D9AC
+
+
+
+. 2241 FE8D984 24
+. 73 00 00 01 7F A3 D2 14 81 E1 00 78 82 01 00 7C 7F BF EB 78 41 82 00 14
+==== BB 2242 (0xFE8D9AC) approx BBs exec'd 0 ====
+
+	0xFE8D9AC:  5799F87E  rlwinm r25,r28,31,1,31
+	   0: GETL       	R28, t0
+	   1: SHRL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0xFE8D9B0:  7FFDFB78  or r29,r31,r31
+	   4: GETL       	R31, t2
+	   5: PUTL       	t2, R29
+	   6: INCEIPL       	$4
+
+	0xFE8D9B4:  73200001  andi. r0,r25,0x1
+	   7: GETL       	R25, t4
+	   8: ANDL       	$0x1, t4
+	   9: PUTL       	t4, R0
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFE8D9B8:  41820014  bc 12,2,0xFE8D9CC
+	  13: Js02o       	$0xFE8D9CC
+
+
+
+. 2242 FE8D9AC 16
+. 57 99 F8 7E 7F FD FB 78 73 20 00 01 41 82 00 14
+==== BB 2243 (0xFE8D9CC) approx BBs exec'd 0 ====
+
+	0xFE8D9CC:  73970001  andi. r23,r28,0x1
+	   0: GETL       	R28, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R23
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8D9D0:  41820188  bc 12,2,0xFE8DB58
+	   6: Js02o       	$0xFE8DB58
+
+
+
+. 2243 FE8D9CC 8
+. 73 97 00 01 41 82 01 88
+==== BB 2244 (0xFE8DB58) approx BBs exec'd 0 ====
+
+	0xFE8DB58:  7FBFEB78  or r31,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFE8DB5C:  4BFFFE88  b 0xFE8D9E4
+	   3: JMPo       	$0xFE8D9E4  ($4)
+
+
+
+. 2244 FE8DB58 8
+. 7F BF EB 78 4B FF FE 88
+==== BB 2245 (0xFE8D9E4) approx BBs exec'd 0 ====
+
+	0xFE8D9E4:  7E038378  or r3,r16,r16
+	   0: GETL       	R16, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D9E8:  579BE8FE  rlwinm r27,r28,29,3,31
+	   3: GETL       	R28, t2
+	   4: SHRL       	$0x3, t2
+	   5: PUTL       	t2, R27
+	   6: INCEIPL       	$4
+
+	0xFE8D9EC:  480490ED  bl 0xFED6AD8
+	   7: MOVL       	$0xFE8D9F0, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2245 FE8D9E4 12
+. 7E 03 83 78 57 9B E8 FE 48 04 90 ED
+==== BB 2246 (0xFE8D9F0) approx BBs exec'd 0 ====
+
+	0xFE8D9F0:  73600001  andi. r0,r27,0x1
+	   0: GETL       	R27, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8D9F4:  7D3F1A14  add r9,r31,r3
+	   6: GETL       	R31, t4
+	   7: GETL       	R3, t6
+	   8: ADDL       	t4, t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFE8D9F8:  7C7D1B78  or r29,r3,r3
+	  11: GETL       	R3, t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0xFE8D9FC:  38690002  addi r3,r9,2
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x2, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFE8DA00:  41820018  bc 12,2,0xFE8DA18
+	  18: Js02o       	$0xFE8DA18
+
+
+
+. 2246 FE8D9F0 20
+. 73 60 00 01 7D 3F 1A 14 7C 7D 1B 78 38 69 00 02 41 82 00 18
+==== BB 2247 (0xFE8DA18) approx BBs exec'd 0 ====
+
+	0xFE8DA18:  4811C8F1  bl 0xFFAA308
+	   0: MOVL       	$0xFE8DA1C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 2247 FE8DA18 4
+. 48 11 C8 F1
+==== BB 2248 (0xFE8DA1C) approx BBs exec'd 0 ====
+
+	0xFE8DA1C:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8DA20:  7C7D1B79  or. r29,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R29
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE8DA24:  418200D0  bc 12,2,0xFE8DAF4
+	   8: Js02o       	$0xFE8DAF4
+
+
+
+. 2248 FE8DA1C 12
+. 38 00 00 00 7C 7D 1B 79 41 82 00 D0
+==== BB 2249 (0xFE8DA28) approx BBs exec'd 0 ====
+
+	0xFE8DA28:  80810018  lwz r4,24(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8DA2C:  7F45D378  or r5,r26,r26
+	   5: GETL       	R26, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFE8DA30:  4804ACA9  bl 0xFED86D8
+	   8: MOVL       	$0xFE8DA34, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2249 FE8DA28 12
+. 80 81 00 18 7F 45 D3 78 48 04 AC A9
+==== BB 2250 (0xFE8DA34) approx BBs exec'd 0 ====
+
+	0xFE8DA34:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8DA38:  7F44D378  or r4,r26,r26
+	   3: GETL       	R26, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8DA3C:  38A0003A  li r5,58
+	   6: MOVL       	$0x3A, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE8DA40:  4804BF49  bl 0xFED9988
+	   9: MOVL       	$0xFE8DA44, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFED9988  ($4)
+
+
+
+. 2250 FE8DA34 16
+. 7F A3 EB 78 7F 44 D3 78 38 A0 00 3A 48 04 BF 49
+==== BB 2251 __argz_stringify_internal(0xFED9988) approx BBs exec'd 0 ====
+
+	0xFED9988:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED998C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFED9990:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFED9994:  7C9D2379  or. r29,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R29
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFED9998:  93810010  stw r28,16(r1)
+	  19: GETL       	R28, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x10, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFED999C:  7CBC2B78  or r28,r5,r5
+	  24: GETL       	R5, t18
+	  25: PUTL       	t18, R28
+	  26: INCEIPL       	$4
+
+	0xFED99A0:  93E1001C  stw r31,28(r1)
+	  27: GETL       	R31, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFED99A4:  7C7F1B78  or r31,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R31
+	  34: INCEIPL       	$4
+
+	0xFED99A8:  93C10018  stw r30,24(r1)
+	  35: GETL       	R30, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x18, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFED99AC:  90010024  stw r0,36(r1)
+	  40: GETL       	R0, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFED99B0:  41820030  bc 12,2,0xFED99E0
+	  45: Js02o       	$0xFED99E0
+
+
+
+. 2251 FED9988 44
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 7C 9D 23 79 93 81 00 10 7C BC 2B 78 93 E1 00 1C 7C 7F 1B 78 93 C1 00 18 90 01 00 24 41 82 00 30
+==== BB 2252 (0xFED99B4) approx BBs exec'd 0 ====
+
+	0xFED99B4:  4800000C  b 0xFED99C0
+	   0: JMPo       	$0xFED99C0  ($4)
+
+
+
+. 2252 FED99B4 4
+. 48 00 00 0C
+==== BB 2253 (0xFED99C0) approx BBs exec'd 0 ====
+
+	0xFED99C0:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFED99C4:  7FE3FB78  or r3,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFED99C8:  4BFFD1C9  bl 0xFED6B90
+	   6: MOVL       	$0xFED99CC, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED6B90  ($4)
+
+
+
+. 2253 FED99C0 12
+. 7F A4 EB 78 7F E3 FB 78 4B FF D1 C9
+==== BB 2254 strnlen(0xFED6B90) approx BBs exec'd 0 ====
+
+	0xFED6B90:  2F840000  cmpi cr7,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFED6B94:  7C681B78  or r8,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R8
+	   6: INCEIPL       	$4
+
+	0xFED6B98:  7D432214  add r10,r3,r4
+	   7: GETL       	R3, t6
+	   8: GETL       	R4, t8
+	   9: ADDL       	t6, t8
+	  10: PUTL       	t8, R10
+	  11: INCEIPL       	$4
+
+	0xFED6B9C:  9421FFF0  stwu r1,-16(r1)
+	  12: GETL       	R1, t10
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0xFFFFFFF0, t12
+	  15: PUTL       	t12, R1
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFED6BA0:  38600000  li r3,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0xFED6BA4:  419E00C0  bc 12,30,0xFED6C64
+	  21: Js30o       	$0xFED6C64
+
+
+
+. 2254 FED6B90 24
+. 2F 84 00 00 7C 68 1B 78 7D 43 22 14 94 21 FF F0 38 60 00 00 41 9E 00 C0
+==== BB 2255 (0xFED6BA8) approx BBs exec'd 0 ====
+
+	0xFED6BA8:  7C0A4040  cmpl cr0,r10,r8
+	   0: GETL       	R10, t0
+	   1: GETL       	R8, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFED6BAC:  418000C0  bc 12,0,0xFED6C6C
+	   5: Js00o       	$0xFED6C6C
+
+
+
+. 2255 FED6BA8 8
+. 7C 0A 40 40 41 80 00 C0
+==== BB 2256 (0xFED6BB0) approx BBs exec'd 0 ====
+
+	0xFED6BB0:  71000003  andi. r0,r8,0x3
+	   0: GETL       	R8, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED6BB4:  7D034378  or r3,r8,r8
+	   6: GETL       	R8, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFED6BB8:  4182001C  bc 12,2,0xFED6BD4
+	   9: Js02o       	$0xFED6BD4
+
+
+
+. 2256 FED6BB0 12
+. 71 00 00 03 7D 03 43 78 41 82 00 1C
+==== BB 2257 (0xFED6BD4) approx BBs exec'd 0 ====
+
+	0xFED6BD4:  7F835040  cmpl cr7,r3,r10
+	   0: GETL       	R3, t0
+	   1: GETL       	R10, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFED6BD8:  3CC08080  lis r6,-32640
+	   5: MOVL       	$0x80800000, t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0xFED6BDC:  3CE00101  lis r7,257
+	   8: MOVL       	$0x1010000, t8
+	   9: PUTL       	t8, R7
+	  10: INCEIPL       	$4
+
+	0xFED6BE0:  60C68080  ori r6,r6,0x8080
+	  11: MOVL       	$0x80808080, t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0xFED6BE4:  60E70101  ori r7,r7,0x101
+	  14: MOVL       	$0x1010101, t12
+	  15: PUTL       	t12, R7
+	  16: INCEIPL       	$4
+
+	0xFED6BE8:  7C691B78  or r9,r3,r3
+	  17: GETL       	R3, t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0xFED6BEC:  409C0068  bc 4,28,0xFED6C54
+	  20: Jc28o       	$0xFED6C54
+
+
+
+. 2257 FED6BD4 28
+. 7F 83 50 40 3C C0 80 80 3C E0 01 01 60 C6 80 80 60 E7 01 01 7C 69 1B 78 40 9C 00 68
+==== BB 2258 (0xFED6BF0) approx BBs exec'd 0 ====
+
+	0xFED6BF0:  80890000  lwz r4,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFED6BF4:  39290004  addi r9,r9,4
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFED6BF8:  7C672050  subf r3,r7,r4
+	   8: GETL       	R7, t6
+	   9: GETL       	R4, t8
+	  10: SUBL       	t6, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFED6BFC:  7C6B3039  and. r11,r3,r6
+	  13: GETL       	R3, t10
+	  14: GETL       	R6, t12
+	  15: ANDL       	t10, t12
+	  16: PUTL       	t12, R11
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFED6C00:  3869FFFC  addi r3,r9,-4
+	  20: GETL       	R9, t16
+	  21: ADDL       	$0xFFFFFFFC, t16
+	  22: PUTL       	t16, R3
+	  23: INCEIPL       	$4
+
+	0xFED6C04:  7C6B1B78  or r11,r3,r3
+	  24: GETL       	R3, t18
+	  25: PUTL       	t18, R11
+	  26: INCEIPL       	$4
+
+	0xFED6C08:  41820040  bc 12,2,0xFED6C48
+	  27: Js02o       	$0xFED6C48
+
+
+
+. 2258 FED6BF0 28
+. 80 89 00 00 39 29 00 04 7C 67 20 50 7C 6B 30 39 38 69 FF FC 7C 6B 1B 78 41 82 00 40
+==== BB 2259 (0xFED6C48) approx BBs exec'd 0 ====
+
+	0xFED6C48:  7D435378  or r3,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED6C4C:  7F895040  cmpl cr7,r9,r10
+	   3: GETL       	R9, t2
+	   4: GETL       	R10, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFED6C50:  419CFFA0  bc 12,28,0xFED6BF0
+	   8: Js28o       	$0xFED6BF0
+
+
+
+. 2259 FED6C48 12
+. 7D 43 53 78 7F 89 50 40 41 9C FF A0
+==== BB 2260 (0xFED6C0C) approx BBs exec'd 0 ====
+
+	0xFED6C0C:  88A9FFFC  lbz r5,-4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFED6C10:  2F050000  cmpi cr6,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFED6C14:  419A0040  bc 12,26,0xFED6C54
+	   9: Js26o       	$0xFED6C54
+
+
+
+. 2260 FED6C0C 12
+. 88 A9 FF FC 2F 05 00 00 41 9A 00 40
+==== BB 2261 (0xFED6C18) approx BBs exec'd 0 ====
+
+	0xFED6C18:  89830001  lbz r12,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFED6C1C:  3869FFFD  addi r3,r9,-3
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFD, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFED6C20:  2F8C0000  cmpi cr7,r12,0
+	   9: GETL       	R12, t6
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFED6C24:  419E0030  bc 12,30,0xFED6C54
+	  13: Js30o       	$0xFED6C54
+
+
+
+. 2261 FED6C18 16
+. 89 83 00 01 38 69 FF FD 2F 8C 00 00 41 9E 00 30
+==== BB 2262 (0xFED6C54) approx BBs exec'd 0 ====
+
+	0xFED6C54:  7F035040  cmpl cr6,r3,r10
+	   0: GETL       	R3, t0
+	   1: GETL       	R10, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFED6C58:  40990008  bc 4,25,0xFED6C60
+	   5: Jc25o       	$0xFED6C60
+
+
+
+. 2262 FED6C54 8
+. 7F 03 50 40 40 99 00 08
+==== BB 2263 (0xFED6C60) approx BBs exec'd 0 ====
+
+	0xFED6C60:  7C681850  subf r3,r8,r3
+	   0: GETL       	R8, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFED6C64:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFED6C68:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 2263 FED6C60 12
+. 7C 68 18 50 38 21 00 10 4E 80 00 20
+==== BB 2264 (0xFED99CC) approx BBs exec'd 0 ====
+
+	0xFED99CC:  7D23E850  subf r9,r3,r29
+	   0: GETL       	R3, t0
+	   1: GETL       	R29, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFED99D0:  7D7F1A14  add r11,r31,r3
+	   5: GETL       	R31, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFED99D4:  2B890001  cmpli cr7,r9,1
+	  10: GETL       	R9, t8
+	  11: MOVL       	$0x1, t12
+	  12: CMPUL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFED99D8:  3BA9FFFF  addi r29,r9,-1
+	  15: GETL       	R9, t14
+	  16: ADDL       	$0xFFFFFFFF, t14
+	  17: PUTL       	t14, R29
+	  18: INCEIPL       	$4
+
+	0xFED99DC:  419DFFDC  bc 12,29,0xFED99B8
+	  19: Js29o       	$0xFED99B8
+
+
+
+. 2264 FED99CC 20
+. 7D 23 E8 50 7D 7F 1A 14 2B 89 00 01 3B A9 FF FF 41 9D FF DC
+==== BB 2265 (0xFED99E0) approx BBs exec'd 0 ====
+
+	0xFED99E0:  80610024  lwz r3,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFED99E4:  83810010  lwz r28,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFED99E8:  83A10014  lwz r29,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0xFED99EC:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFED99F0:  83C10018  lwz r30,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0xFED99F4:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0xFED99F8:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0xFED99FC:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 2265 FED99E0 32
+. 80 61 00 24 83 81 00 10 83 A1 00 14 7C 68 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2266 (0xFE8DA44) approx BBs exec'd 0 ====
+
+	0xFE8DA44:  3BE0002F  li r31,47
+	   0: MOVL       	$0x2F, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFE8DA48:  7C7DD214  add r3,r29,r26
+	   3: GETL       	R29, t2
+	   4: GETL       	R26, t4
+	   5: ADDL       	t2, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE8DA4C:  7EC4B378  or r4,r22,r22
+	   8: GETL       	R22, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFE8DA50:  9BE3FFFF  stb r31,-1(r3)
+	  11: GETL       	R31, t8
+	  12: GETL       	R3, t10
+	  13: ADDL       	$0xFFFFFFFF, t10
+	  14: STB       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFE8DA54:  4804A8AD  bl 0xFED8300
+	  16: MOVL       	$0xFE8DA58, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0xFED8300  ($4)
+
+
+
+. 2266 FE8DA44 20
+. 3B E0 00 2F 7C 7D D2 14 7E C4 B3 78 9B E3 FF FF 48 04 A8 AD
+==== BB 2267 (0xFED8394) approx BBs exec'd 0 ====
+
+	0xFED8394:  88C40000  lbz r6,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFED8398:  38630003  addi r3,r3,3
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x3, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED839C:  2C060000  cmpi cr0,r6,0
+	   8: GETL       	R6, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFED83A0:  41820028  bc 12,2,0xFED83C8
+	  12: Js02o       	$0xFED83C8
+
+
+
+. 2267 FED8394 16
+. 88 C4 00 00 38 63 00 03 2C 06 00 00 41 82 00 28
+==== BB 2268 (0xFED83A4) approx BBs exec'd 0 ====
+
+	0xFED83A4:  8D440001  lbzu r10,1(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0xFED83A8:  9CC30001  stbu r6,1(r3)
+	   6: GETL       	R6, t4
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R3
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED83AC:  2C0A0000  cmpi cr0,r10,0
+	  12: GETL       	R10, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFED83B0:  41820020  bc 12,2,0xFED83D0
+	  16: Js02o       	$0xFED83D0
+
+
+
+. 2268 FED83A4 16
+. 8D 44 00 01 9C C3 00 01 2C 0A 00 00 41 82 00 20
+==== BB 2269 (0xFED83B4) approx BBs exec'd 0 ====
+
+	0xFED83B4:  60000000  nop
+	   0: NOPo       	
+	   1: INCEIPL       	$4
+
+	0xFED83B8:  8CC40001  lbzu r6,1(r4)
+	   2: GETL       	R4, t0
+	   3: ADDL       	$0x1, t0
+	   4: PUTL       	t0, R4
+	   5: LDB       	(t0), t2
+	   6: PUTL       	t2, R6
+	   7: INCEIPL       	$4
+
+	0xFED83BC:  9D430001  stbu r10,1(r3)
+	   8: GETL       	R10, t4
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x1, t6
+	  11: PUTL       	t6, R3
+	  12: STB       	t4, (t6)
+	  13: INCEIPL       	$4
+
+	0xFED83C0:  2C060000  cmpi cr0,r6,0
+	  14: GETL       	R6, t8
+	  15: CMP0L       	t8, t10  (-rSo)
+	  16: ICRFL       	t10, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFED83C4:  4082FFE0  bc 4,2,0xFED83A4
+	  18: Jc02o       	$0xFED83A4
+
+
+
+. 2269 FED83B4 20
+. 60 00 00 00 8C C4 00 01 9D 43 00 01 2C 06 00 00 40 82 FF E0
+==== BB 2270 (0xFED83D0) approx BBs exec'd 0 ====
+
+	0xFED83D0:  9D430001  stbu r10,1(r3)
+	   0: GETL       	R10, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x1, t2
+	   3: PUTL       	t2, R3
+	   4: STB       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED83D4:  4E800020  blr
+	   6: GETL       	LR, t4
+	   7: JMPo-r       	t4  ($4)
+
+
+
+. 2270 FED83D0 8
+. 9D 43 00 01 4E 80 00 20
+==== BB 2271 (0xFE8DA58) approx BBs exec'd 0 ====
+
+	0xFE8DA58:  73000001  andi. r0,r24,0x1
+	   0: GETL       	R24, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8DA5C:  408202EC  bc 4,2,0xFE8DD48
+	   6: Jc02o       	$0xFE8DD48
+
+
+
+. 2271 FE8DA58 8
+. 73 00 00 01 40 82 02 EC
+==== BB 2272 (0xFE8DA60) approx BBs exec'd 0 ====
+
+	0xFE8DA60:  73200001  andi. r0,r25,0x1
+	   0: GETL       	R25, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8DA64:  408202CC  bc 4,2,0xFE8DD30
+	   6: Jc02o       	$0xFE8DD30
+
+
+
+. 2272 FE8DA60 8
+. 73 20 00 01 40 82 02 CC
+==== BB 2273 (0xFE8DA68) approx BBs exec'd 0 ====
+
+	0xFE8DA68:  2F970000  cmpi cr7,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DA6C:  409E02AC  bc 4,30,0xFE8DD18
+	   4: Jc30o       	$0xFE8DD18
+
+
+
+. 2273 FE8DA68 8
+. 2F 97 00 00 40 9E 02 AC
+==== BB 2274 (0xFE8DA70) approx BBs exec'd 0 ====
+
+	0xFE8DA70:  73600001  andi. r0,r27,0x1
+	   0: GETL       	R27, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8DA74:  4082028C  bc 4,2,0xFE8DD00
+	   6: Jc02o       	$0xFE8DD00
+
+
+
+. 2274 FE8DA70 8
+. 73 60 00 01 40 82 02 8C
+==== BB 2275 (0xFE8DA78) approx BBs exec'd 0 ====
+
+	0xFE8DA78:  9BE30000  stb r31,0(r3)
+	   0: GETL       	R31, t0
+	   1: GETL       	R3, t2
+	   2: STB       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8DA7C:  7E048378  or r4,r16,r16
+	   4: GETL       	R16, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFE8DA80:  38630001  addi r3,r3,1
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFE8DA84:  3B600000  li r27,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0xFE8DA88:  48048A49  bl 0xFED64D0
+	  14: MOVL       	$0xFE8DA8C, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFED64D0  ($4)
+
+
+
+. 2275 FE8DA78 20
+. 9B E3 00 00 7E 04 83 78 38 63 00 01 3B 60 00 00 48 04 8A 49
+==== BB 2276 strcpy(0xFED64D0) approx BBs exec'd 0 ====
+
+	0xFED64D0:  7C801B78  or r0,r4,r3
+	   0: GETL       	R4, t0
+	   1: GETL       	R3, t2
+	   2: ORL       	t2, t0
+	   3: PUTL       	t0, R0
+	   4: INCEIPL       	$4
+
+	0xFED64D4:  540007BF  rlwinm. r0,r0,0,30,31
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x3, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFED64D8:  38A3FFFC  addi r5,r3,-4
+	  11: GETL       	R3, t8
+	  12: ADDL       	$0xFFFFFFFC, t8
+	  13: PUTL       	t8, R5
+	  14: INCEIPL       	$4
+
+	0xFED64DC:  40820088  bc 4,2,0xFED6564
+	  15: Jc02o       	$0xFED6564
+
+
+
+. 2276 FED64D0 16
+. 7C 80 1B 78 54 00 07 BF 38 A3 FF FC 40 82 00 88
+==== BB 2277 (0xFED6564) approx BBs exec'd 0 ====
+
+	0xFED6564:  88C40000  lbz r6,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFED6568:  38A3FFFF  addi r5,r3,-1
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFED656C:  2C060000  cmpi cr0,r6,0
+	   8: GETL       	R6, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFED6570:  41820028  bc 12,2,0xFED6598
+	  12: Js02o       	$0xFED6598
+
+
+
+. 2277 FED6564 16
+. 88 C4 00 00 38 A3 FF FF 2C 06 00 00 41 82 00 28
+==== BB 2278 (0xFED6574) approx BBs exec'd 0 ====
+
+	0xFED6574:  8D440001  lbzu r10,1(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0xFED6578:  9CC50001  stbu r6,1(r5)
+	   6: GETL       	R6, t4
+	   7: GETL       	R5, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R5
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED657C:  2C0A0000  cmpi cr0,r10,0
+	  12: GETL       	R10, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFED6580:  41820020  bc 12,2,0xFED65A0
+	  16: Js02o       	$0xFED65A0
+
+
+
+. 2278 FED6574 16
+. 8D 44 00 01 9C C5 00 01 2C 0A 00 00 41 82 00 20
+==== BB 2279 (0xFED6584) approx BBs exec'd 0 ====
+
+	0xFED6584:  60000000  nop
+	   0: NOPo       	
+	   1: INCEIPL       	$4
+
+	0xFED6588:  8CC40001  lbzu r6,1(r4)
+	   2: GETL       	R4, t0
+	   3: ADDL       	$0x1, t0
+	   4: PUTL       	t0, R4
+	   5: LDB       	(t0), t2
+	   6: PUTL       	t2, R6
+	   7: INCEIPL       	$4
+
+	0xFED658C:  9D450001  stbu r10,1(r5)
+	   8: GETL       	R10, t4
+	   9: GETL       	R5, t6
+	  10: ADDL       	$0x1, t6
+	  11: PUTL       	t6, R5
+	  12: STB       	t4, (t6)
+	  13: INCEIPL       	$4
+
+	0xFED6590:  2C060000  cmpi cr0,r6,0
+	  14: GETL       	R6, t8
+	  15: CMP0L       	t8, t10  (-rSo)
+	  16: ICRFL       	t10, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFED6594:  4082FFE0  bc 4,2,0xFED6574
+	  18: Jc02o       	$0xFED6574
+
+
+
+. 2279 FED6584 20
+. 60 00 00 00 8C C4 00 01 9D 45 00 01 2C 06 00 00 40 82 FF E0
+==== BB 2280 (0xFED6598) approx BBs exec'd 0 ====
+
+	0xFED6598:  98C50001  stb r6,1(r5)
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	$0x1, t2
+	   3: STB       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED659C:  4E800020  blr
+	   5: GETL       	LR, t4
+	   6: JMPo-r       	t4  ($4)
+
+
+
+. 2280 FED6598 8
+. 98 C5 00 01 4E 80 00 20
+==== BB 2281 (0xFE8DA8C) approx BBs exec'd 0 ====
+
+	0xFE8DA8C:  83F20000  lwz r31,0(r18)
+	   0: GETL       	R18, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFE8DA90:  2C9F0000  cmpi cr1,r31,0
+	   4: GETL       	R31, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFE8DA94:  41860038  bc 12,6,0xFE8DACC
+	   8: Js06o       	$0xFE8DACC
+
+
+
+. 2281 FE8DA8C 12
+. 83 F2 00 00 2C 9F 00 00 41 86 00 38
+==== BB 2282 (0xFE8DACC) approx BBs exec'd 0 ====
+
+	0xFE8DACC:  80010080  lwz r0,128(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x80, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8DAD0:  317FFFFF  addic r11,r31,-1
+	   5: GETL       	R31, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFE8DAD4:  7D8BF910  subfe r12,r11,r31
+	   9: GETL       	R11, t6
+	  10: GETL       	R31, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0xFE8DAD8:  21200000  subfic r9,r0,0
+	  14: GETL       	R0, t10
+	  15: MOVL       	$0x0, t12
+	  16: SBBL       	t10, t12  (-wCa)
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFE8DADC:  7F090114  adde r24,r9,r0
+	  19: GETL       	R9, t14
+	  20: GETL       	R0, t16
+	  21: ADCL       	t14, t16  (-rCa-wCa)
+	  22: PUTL       	t16, R24
+	  23: INCEIPL       	$4
+
+	0xFE8DAE0:  7D8BC379  or. r11,r12,r24
+	  24: GETL       	R12, t18
+	  25: GETL       	R24, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R11
+	  28: CMP0L       	t18, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFE8DAE4:  4182007C  bc 12,2,0xFE8DB60
+	  31: Js02o       	$0xFE8DB60
+
+
+
+. 2282 FE8DACC 28
+. 80 01 00 80 31 7F FF FF 7D 8B F9 10 21 20 00 00 7F 09 01 14 7D 8B C3 79 41 82 00 7C
+==== BB 2283 (0xFE8DAE8) approx BBs exec'd 0 ====
+
+	0xFE8DAE8:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8DAEC:  4811C845  bl 0xFFAA330
+	   3: MOVL       	$0xFE8DAF0, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFFAA330  ($4)
+
+
+
+. 2283 FE8DAE8 8
+. 7F A3 EB 78 48 11 C8 45
+==== BB 2284 (0xFE8DAF0) approx BBs exec'd 0 ====
+
+	0xFE8DAF0:  7FE0FB78  or r0,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8DAF4:  82610074  lwz r19,116(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x74, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R19
+	   7: INCEIPL       	$4
+
+	0xFE8DAF8:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFE8DAFC:  81210024  lwz r9,36(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x24, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0xFE8DB00:  7E6803A6  mtlr r19
+	  16: GETL       	R19, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0xFE8DB04:  81C10028  lwz r14,40(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x28, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R14
+	  23: INCEIPL       	$4
+
+	0xFE8DB08:  81E1002C  lwz r15,44(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x2C, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R15
+	  28: INCEIPL       	$4
+
+	0xFE8DB0C:  7D208120  mtcrf 0x8,r9
+	  29: GETL       	R9, t22
+	  30: ICRFL       	t22, $0x4, CR
+	  31: INCEIPL       	$4
+
+	0xFE8DB10:  82010030  lwz r16,48(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x30, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R16
+	  36: INCEIPL       	$4
+
+	0xFE8DB14:  82210034  lwz r17,52(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x34, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R17
+	  41: INCEIPL       	$4
+
+	0xFE8DB18:  82410038  lwz r18,56(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x38, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R18
+	  46: INCEIPL       	$4
+
+	0xFE8DB1C:  8261003C  lwz r19,60(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x3C, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R19
+	  51: INCEIPL       	$4
+
+	0xFE8DB20:  82810040  lwz r20,64(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x40, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R20
+	  56: INCEIPL       	$4
+
+	0xFE8DB24:  82A10044  lwz r21,68(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x44, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R21
+	  61: INCEIPL       	$4
+
+	0xFE8DB28:  82C10048  lwz r22,72(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x48, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R22
+	  66: INCEIPL       	$4
+
+	0xFE8DB2C:  82E1004C  lwz r23,76(r1)
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x4C, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R23
+	  71: INCEIPL       	$4
+
+	0xFE8DB30:  83010050  lwz r24,80(r1)
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x50, t56
+	  74: LDL       	(t56), t58
+	  75: PUTL       	t58, R24
+	  76: INCEIPL       	$4
+
+	0xFE8DB34:  83210054  lwz r25,84(r1)
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x54, t60
+	  79: LDL       	(t60), t62
+	  80: PUTL       	t62, R25
+	  81: INCEIPL       	$4
+
+	0xFE8DB38:  83410058  lwz r26,88(r1)
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x58, t64
+	  84: LDL       	(t64), t66
+	  85: PUTL       	t66, R26
+	  86: INCEIPL       	$4
+
+	0xFE8DB3C:  8361005C  lwz r27,92(r1)
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x5C, t68
+	  89: LDL       	(t68), t70
+	  90: PUTL       	t70, R27
+	  91: INCEIPL       	$4
+
+	0xFE8DB40:  83810060  lwz r28,96(r1)
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x60, t72
+	  94: LDL       	(t72), t74
+	  95: PUTL       	t74, R28
+	  96: INCEIPL       	$4
+
+	0xFE8DB44:  83A10064  lwz r29,100(r1)
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x64, t76
+	  99: LDL       	(t76), t78
+	 100: PUTL       	t78, R29
+	 101: INCEIPL       	$4
+
+	0xFE8DB48:  83C10068  lwz r30,104(r1)
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x68, t80
+	 104: LDL       	(t80), t82
+	 105: PUTL       	t82, R30
+	 106: INCEIPL       	$4
+
+	0xFE8DB4C:  83E1006C  lwz r31,108(r1)
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x6C, t84
+	 109: LDL       	(t84), t86
+	 110: PUTL       	t86, R31
+	 111: INCEIPL       	$4
+
+	0xFE8DB50:  38210070  addi r1,r1,112
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x70, t88
+	 114: PUTL       	t88, R1
+	 115: INCEIPL       	$4
+
+	0xFE8DB54:  4E800020  blr
+	 116: GETL       	LR, t90
+	 117: JMPo-r       	t90  ($4)
+
+
+
+. 2284 FE8DAF0 104
+. 7F E0 FB 78 82 61 00 74 7C 03 03 78 81 21 00 24 7E 68 03 A6 81 C1 00 28 81 E1 00 2C 7D 20 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 2285 (0xFE8B794) approx BBs exec'd 0 ====
+
+	0xFE8B794:  801C00A8  lwz r0,168(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8B798:  7C7F1B78  or r31,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFE8B79C:  2C000000  cmpi cr0,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFE8B7A0:  408200C4  bc 4,2,0xFE8B864
+	  12: Jc02o       	$0xFE8B864
+
+
+
+. 2285 FE8B794 16
+. 80 1C 00 A8 7C 7F 1B 78 2C 00 00 00 40 82 00 C4
+==== BB 2286 (0xFE8B7A4) approx BBs exec'd 0 ====
+
+	0xFE8B7A4:  2C9F0000  cmpi cr1,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8B7A8:  418600F8  bc 12,6,0xFE8B8A0
+	   4: Js06o       	$0xFE8B8A0
+
+
+
+. 2286 FE8B7A4 8
+. 2C 9F 00 00 41 86 00 F8
+==== BB 2287 (0xFE8B8A0) approx BBs exec'd 0 ====
+
+	0xFE8B8A0:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B8A4:  48001C8D  bl 0xFE8D530
+	   3: MOVL       	$0xFE8B8A8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFE8D530  ($4)
+
+
+
+. 2287 FE8B8A0 8
+. 7F 43 D3 78 48 00 1C 8D
+==== BB 2288 _nl_expand_alias(0xFE8D530) approx BBs exec'd 0 ====
+
+	0xFE8D530:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8D534:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFE8D538:  4811A919  bl 0xFFA7E50
+	   9: MOVL       	$0xFE8D53C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2288 FE8D530 12
+. 94 21 FF C0 7C 88 02 A6 48 11 A9 19
+==== BB 2289 (0xFE8D53C) approx BBs exec'd 0 ====
+
+	0xFE8D53C:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8D540:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE8D544:  92C10018  stw r22,24(r1)
+	   8: GETL       	R22, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8D548:  93410028  stw r26,40(r1)
+	  13: GETL       	R26, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x28, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE8D54C:  3AC00000  li r22,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R22
+	  20: INCEIPL       	$4
+
+	0xFE8D550:  9361002C  stw r27,44(r1)
+	  21: GETL       	R27, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x2C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE8D554:  7C7A1B78  or r26,r3,r3
+	  26: GETL       	R3, t20
+	  27: PUTL       	t20, R26
+	  28: INCEIPL       	$4
+
+	0xFE8D558:  92E1001C  stw r23,28(r1)
+	  29: GETL       	R23, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE8D55C:  38000001  li r0,1
+	  34: MOVL       	$0x1, t26
+	  35: PUTL       	t26, R0
+	  36: INCEIPL       	$4
+
+	0xFE8D560:  93010020  stw r24,32(r1)
+	  37: GETL       	R24, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x20, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFE8D564:  93210024  stw r25,36(r1)
+	  42: GETL       	R25, t32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x24, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0xFE8D568:  93810030  stw r28,48(r1)
+	  47: GETL       	R28, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x30, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFE8D56C:  93A10034  stw r29,52(r1)
+	  52: GETL       	R29, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x34, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFE8D570:  93E1003C  stw r31,60(r1)
+	  57: GETL       	R31, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x3C, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0xFE8D574:  90810044  stw r4,68(r1)
+	  62: GETL       	R4, t48
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x44, t50
+	  65: STL       	t48, (t50)
+	  66: INCEIPL       	$4
+
+	0xFE8D578:  837E0204  lwz r27,516(r30)
+	  67: GETL       	R30, t52
+	  68: ADDL       	$0x204, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R27
+	  71: INCEIPL       	$4
+
+	0xFE8D57C:  7D20D828  lwarx r9,r0,r27
+	  72: GETL       	R27, t56
+	  73: LOCKo       	
+	  74: LDL       	(t56), t58
+	  75: PUTL       	t58, R9
+	  76: INCEIPL       	$4
+
+	0xFE8D580:  7C09B000  cmp cr0,r9,r22
+	  77: GETL       	R9, t60
+	  78: GETL       	R22, t62
+	  79: CMPL       	t60, t62, t64  (-rSo)
+	  80: ICRFL       	t64, $0x0, CR
+	  81: INCEIPL       	$4
+
+	0xFE8D584:  4082000C  bc 4,2,0xFE8D590
+	  82: Jc02o       	$0xFE8D590
+
+
+
+. 2289 FE8D53C 76
+. 93 C1 00 38 7F C8 02 A6 92 C1 00 18 93 41 00 28 3A C0 00 00 93 61 00 2C 7C 7A 1B 78 92 E1 00 1C 38 00 00 01 93 01 00 20 93 21 00 24 93 81 00 30 93 A1 00 34 93 E1 00 3C 90 81 00 44 83 7E 02 04 7D 20 D8 28 7C 09 B0 00 40 82 00 0C
+==== BB 2290 (0xFE8D588) approx BBs exec'd 0 ====
+
+	0xFE8D588:  7C00D92D  stwcx. r0,r0,r27
+	   0: GETL       	R27, t0
+	   1: GETL       	R0, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8D58C:  40A2FFF0  bc 5,2,0xFE8D57C
+	   6: Jc02o       	$0xFE8D57C
+
+
+
+. 2290 FE8D588 8
+. 7C 00 D9 2D 40 A2 FF F0
+==== BB 2291 (0xFE8D57C) approx BBs exec'd 0 ====
+
+	0xFE8D57C:  7D20D828  lwarx r9,r0,r27
+	   0: GETL       	R27, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE8D580:  7C09B000  cmp cr0,r9,r22
+	   5: GETL       	R9, t4
+	   6: GETL       	R22, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE8D584:  4082000C  bc 4,2,0xFE8D590
+	  10: Jc02o       	$0xFE8D590
+
+
+
+. 2291 FE8D57C 12
+. 7D 20 D8 28 7C 09 B0 00 40 82 00 0C
+==== BB 2292 (0xFE8D590) approx BBs exec'd 0 ====
+
+	0xFE8D590:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFE8D594:  2F890000  cmpi cr7,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D598:  409E01D8  bc 4,30,0xFE8D770
+	   5: Jc30o       	$0xFE8D770
+
+
+
+. 2292 FE8D590 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 D8
+==== BB 2293 (0xFE8D59C) approx BBs exec'd 0 ====
+
+	0xFE8D59C:  82FE0200  lwz r23,512(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x200, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFE8D5A0:  833E01F0  lwz r25,496(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1F0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0xFE8D5A4:  7EF8BB78  or r24,r23,r23
+	  10: GETL       	R23, t8
+	  11: PUTL       	t8, R24
+	  12: INCEIPL       	$4
+
+	0xFE8D5A8:  80B90000  lwz r5,0(r25)
+	  13: GETL       	R25, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R5
+	  16: INCEIPL       	$4
+
+	0xFE8D5AC:  93410008  stw r26,8(r1)
+	  17: GETL       	R26, t14
+	  18: GETL       	R1, t16
+	  19: ADDL       	$0x8, t16
+	  20: STL       	t14, (t16)
+	  21: INCEIPL       	$4
+
+	0xFE8D5B0:  2C050000  cmpi cr0,r5,0
+	  22: GETL       	R5, t18
+	  23: CMP0L       	t18, t20  (-rSo)
+	  24: ICRFL       	t20, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0xFE8D5B4:  40820124  bc 4,2,0xFE8D6D8
+	  26: Jc02o       	$0xFE8D6D8
+
+
+
+. 2293 FE8D59C 28
+. 82 FE 02 00 83 3E 01 F0 7E F8 BB 78 80 B9 00 00 93 41 00 08 2C 05 00 00 40 82 01 24
+==== BB 2294 (0xFE8D5B8) approx BBs exec'd 0 ====
+
+	0xFE8D5B8:  80D80000  lwz r6,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFE8D5BC:  7F1CC378  or r28,r24,r24
+	   4: GETL       	R24, t4
+	   5: PUTL       	t4, R28
+	   6: INCEIPL       	$4
+
+	0xFE8D5C0:  39000000  li r8,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFE8D5C4:  88A60000  lbz r5,0(r6)
+	  10: GETL       	R6, t8
+	  11: LDB       	(t8), t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0xFE8D5C8:  2F050000  cmpi cr6,r5,0
+	  14: GETL       	R5, t12
+	  15: CMP0L       	t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0xFE8D5CC:  419A0094  bc 12,26,0xFE8D660
+	  18: Js26o       	$0xFE8D660
+
+
+
+. 2294 FE8D5B8 24
+. 80 D8 00 00 7F 1C C3 78 39 00 00 00 88 A6 00 00 2F 05 00 00 41 9A 00 94
+==== BB 2295 (0xFE8D5D0) approx BBs exec'd 0 ====
+
+	0xFE8D5D0:  7EFDBB78  or r29,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFE8D5D4:  815C0000  lwz r10,0(r28)
+	   3: GETL       	R28, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0xFE8D5D8:  88EA0000  lbz r7,0(r10)
+	   7: GETL       	R10, t6
+	   8: LDB       	(t6), t8
+	   9: PUTL       	t8, R7
+	  10: INCEIPL       	$4
+
+	0xFE8D5DC:  2F87003A  cmpi cr7,r7,58
+	  11: GETL       	R7, t10
+	  12: MOVL       	$0x3A, t14
+	  13: CMPL       	t10, t14, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFE8D5E0:  409E001C  bc 4,30,0xFE8D5FC
+	  16: Jc30o       	$0xFE8D5FC
+
+
+
+. 2295 FE8D5D0 20
+. 7E FD BB 78 81 5C 00 00 88 EA 00 00 2F 87 00 3A 40 9E 00 1C
+==== BB 2296 (0xFE8D5FC) approx BBs exec'd 0 ====
+
+	0xFE8D5FC:  807C0000  lwz r3,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8D600:  7F8AE378  or r10,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0xFE8D604:  88030000  lbz r0,0(r3)
+	   7: GETL       	R3, t6
+	   8: LDB       	(t6), t8
+	   9: PUTL       	t8, R0
+	  10: INCEIPL       	$4
+
+	0xFE8D608:  2F800000  cmpi cr7,r0,0
+	  11: GETL       	R0, t10
+	  12: CMP0L       	t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFE8D60C:  419E002C  bc 12,30,0xFE8D638
+	  15: Js30o       	$0xFE8D638
+
+
+
+. 2296 FE8D5FC 20
+. 80 7C 00 00 7F 8A E3 78 88 03 00 00 2F 80 00 00 41 9E 00 2C
+==== BB 2297 (0xFE8D610) approx BBs exec'd 0 ====
+
+	0xFE8D610:  2C00003A  cmpi cr0,r0,58
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x3A, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D614:  41820024  bc 12,2,0xFE8D638
+	   5: Js02o       	$0xFE8D638
+
+
+
+. 2297 FE8D610 8
+. 2C 00 00 3A 41 82 00 24
+==== BB 2298 (0xFE8D618) approx BBs exec'd 0 ====
+
+	0xFE8D618:  808A0000  lwz r4,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFE8D61C:  38040001  addi r0,r4,1
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFE8D620:  900A0000  stw r0,0(r10)
+	   8: GETL       	R0, t6
+	   9: GETL       	R10, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8D624:  89240001  lbz r9,1(r4)
+	  12: GETL       	R4, t10
+	  13: ADDL       	$0x1, t10
+	  14: LDB       	(t10), t12
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0xFE8D628:  2C890000  cmpi cr1,r9,0
+	  17: GETL       	R9, t14
+	  18: CMP0L       	t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x1, CR
+	  20: INCEIPL       	$4
+
+	0xFE8D62C:  2F09003A  cmpi cr6,r9,58
+	  21: GETL       	R9, t18
+	  22: MOVL       	$0x3A, t22
+	  23: CMPL       	t18, t22, t20  (-rSo)
+	  24: ICRFL       	t20, $0x6, CR
+	  25: INCEIPL       	$4
+
+	0xFE8D630:  41860008  bc 12,6,0xFE8D638
+	  26: Js06o       	$0xFE8D638
+
+
+
+. 2298 FE8D618 28
+. 80 8A 00 00 38 04 00 01 90 0A 00 00 89 24 00 01 2C 89 00 00 2F 09 00 3A 41 86 00 08
+==== BB 2299 (0xFE8D634) approx BBs exec'd 0 ====
+
+	0xFE8D634:  409AFFE4  bc 4,26,0xFE8D618
+	   0: Jc26o       	$0xFE8D618
+
+
+
+. 2299 FE8D634 4
+. 40 9A FF E4
+==== BB 2300 (0xFE8D638) approx BBs exec'd 0 ====
+
+	0xFE8D638:  809C0000  lwz r4,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFE8D63C:  7F9FE378  or r31,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R31
+	   6: INCEIPL       	$4
+
+	0xFE8D640:  2C080000  cmpi cr0,r8,0
+	   7: GETL       	R8, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFE8D644:  7F032040  cmpl cr6,r3,r4
+	  11: GETL       	R3, t10
+	  12: GETL       	R4, t12
+	  13: CMPUL       	t10, t12, t14  (-rSo)
+	  14: ICRFL       	t14, $0x6, CR
+	  15: INCEIPL       	$4
+
+	0xFE8D648:  41980070  bc 12,24,0xFE8D6B8
+	  16: Js24o       	$0xFE8D6B8
+
+
+
+. 2300 FE8D638 20
+. 80 9C 00 00 7F 9F E3 78 2C 08 00 00 7F 03 20 40 41 98 00 70
+==== BB 2301 (0xFE8D6B8) approx BBs exec'd 0 ====
+
+	0xFE8D6B8:  7C832050  subf r4,r3,r4
+	   0: GETL       	R3, t0
+	   1: GETL       	R4, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8D6BC:  4BFFF8F1  bl 0xFE8CFAC
+	   5: MOVL       	$0xFE8D6C0, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFE8CFAC  ($4)
+
+
+
+. 2301 FE8D6B8 8
+. 7C 83 20 50 4B FF F8 F1
+==== BB 2302 read_alias_file(0xFE8CFAC) approx BBs exec'd 0 ====
+
+	0xFE8CFAC:  9421FDF0  stwu r1,-528(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFDF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8CFB0:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE8CFB4:  91C101C8  stw r14,456(r1)
+	   9: GETL       	R14, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE8CFB8:  39C4002C  addi r14,r4,44
+	  14: GETL       	R4, t10
+	  15: ADDL       	$0x2C, t10
+	  16: PUTL       	t10, R14
+	  17: INCEIPL       	$4
+
+	0xFE8CFBC:  55CC0036  rlwinm r12,r14,0,0,27
+	  18: GETL       	R14, t12
+	  19: ANDL       	$0xFFFFFFF0, t12
+	  20: PUTL       	t12, R12
+	  21: INCEIPL       	$4
+
+	0xFE8CFC0:  80E10000  lwz r7,0(r1)
+	  22: GETL       	R1, t14
+	  23: LDL       	(t14), t16
+	  24: PUTL       	t16, R7
+	  25: INCEIPL       	$4
+
+	0xFE8CFC4:  7CCC00D0  neg r6,r12
+	  26: GETL       	R12, t18
+	  27: NEGL       	t18
+	  28: PUTL       	t18, R6
+	  29: INCEIPL       	$4
+
+	0xFE8CFC8:  936101FC  stw r27,508(r1)
+	  30: GETL       	R27, t20
+	  31: GETL       	R1, t22
+	  32: ADDL       	$0x1FC, t22
+	  33: STL       	t20, (t22)
+	  34: INCEIPL       	$4
+
+	0xFE8CFCC:  93A10204  stw r29,516(r1)
+	  35: GETL       	R29, t24
+	  36: GETL       	R1, t26
+	  37: ADDL       	$0x204, t26
+	  38: STL       	t24, (t26)
+	  39: INCEIPL       	$4
+
+	0xFE8CFD0:  4811AE81  bl 0xFFA7E50
+	  40: MOVL       	$0xFE8CFD4, t28
+	  41: PUTL       	t28, LR
+	  42: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2302 FE8CFAC 40
+. 94 21 FD F0 7C A8 02 A6 91 C1 01 C8 39 C4 00 2C 55 CC 00 36 80 E1 00 00 7C CC 00 D0 93 61 01 FC 93 A1 02 04 48 11 AE 81
+==== BB 2303 (0xFE8CFD4) approx BBs exec'd 0 ====
+
+	0xFE8CFD4:  93C10208  stw r30,520(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x208, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8CFD8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE8CFDC:  93E1020C  stw r31,524(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8CFE0:  7C3F0B78  or r31,r1,r1
+	  13: GETL       	R1, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0xFE8CFE4:  90A10214  stw r5,532(r1)
+	  16: GETL       	R5, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x214, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFE8CFE8:  7C852378  or r5,r4,r4
+	  21: GETL       	R4, t16
+	  22: PUTL       	t16, R5
+	  23: INCEIPL       	$4
+
+	0xFE8CFEC:  91E101CC  stw r15,460(r1)
+	  24: GETL       	R15, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x1CC, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFE8CFF0:  920101D0  stw r16,464(r1)
+	  29: GETL       	R16, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1D0, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE8CFF4:  922101D4  stw r17,468(r1)
+	  34: GETL       	R17, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x1D4, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFE8CFF8:  924101D8  stw r18,472(r1)
+	  39: GETL       	R18, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x1D8, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFE8CFFC:  926101DC  stw r19,476(r1)
+	  44: GETL       	R19, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x1DC, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFE8D000:  928101E0  stw r20,480(r1)
+	  49: GETL       	R20, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x1E0, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFE8D004:  92A101E4  stw r21,484(r1)
+	  54: GETL       	R21, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x1E4, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFE8D008:  92C101E8  stw r22,488(r1)
+	  59: GETL       	R22, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x1E8, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFE8D00C:  92E101EC  stw r23,492(r1)
+	  64: GETL       	R23, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x1EC, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFE8D010:  930101F0  stw r24,496(r1)
+	  69: GETL       	R24, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0x1F0, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0xFE8D014:  932101F4  stw r25,500(r1)
+	  74: GETL       	R25, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x1F4, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFE8D018:  934101F8  stw r26,504(r1)
+	  79: GETL       	R26, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x1F8, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0xFE8D01C:  93810200  stw r28,512(r1)
+	  84: GETL       	R28, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x200, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0xFE8D020:  7CE1316E  stwux r7,r1,r6
+	  89: GETL       	R6, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	t72, t70
+	  92: PUTL       	t70, R1
+	  93: GETL       	R7, t74
+	  94: STL       	t74, (t70)
+	  95: INCEIPL       	$4
+
+	0xFE8D024:  38810017  addi r4,r1,23
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x17, t76
+	  98: PUTL       	t76, R4
+	  99: INCEIPL       	$4
+
+	0xFE8D028:  549D0036  rlwinm r29,r4,0,0,27
+	 100: GETL       	R4, t78
+	 101: ANDL       	$0xFFFFFFF0, t78
+	 102: PUTL       	t78, R29
+	 103: INCEIPL       	$4
+
+	0xFE8D02C:  7C641B78  or r4,r3,r3
+	 104: GETL       	R3, t80
+	 105: PUTL       	t80, R4
+	 106: INCEIPL       	$4
+
+	0xFE8D030:  7FA3EB78  or r3,r29,r29
+	 107: GETL       	R29, t82
+	 108: PUTL       	t82, R3
+	 109: INCEIPL       	$4
+
+	0xFE8D034:  4804AFCD  bl 0xFED8000
+	 110: MOVL       	$0xFE8D038, t84
+	 111: PUTL       	t84, LR
+	 112: JMPo-c       	$0xFED8000  ($4)
+
+
+
+. 2303 FE8CFD4 100
+. 93 C1 02 08 7F C8 02 A6 93 E1 02 0C 7C 3F 0B 78 90 A1 02 14 7C 85 23 78 91 E1 01 CC 92 01 01 D0 92 21 01 D4 92 41 01 D8 92 61 01 DC 92 81 01 E0 92 A1 01 E4 92 C1 01 E8 92 E1 01 EC 93 01 01 F0 93 21 01 F4 93 41 01 F8 93 81 02 00 7C E1 31 6E 38 81 00 17 54 9D 00 36 7C 64 1B 78 7F A3 EB 78 48 04 AF CD
+==== BB 2304 (0xFE8D038) approx BBs exec'd 0 ====
+
+	0xFE8D038:  813E01EC  lwz r9,492(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1EC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE8D03C:  809E0208  lwz r4,520(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x208, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFE8D040:  A009000C  lhz r0,12(r9)
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDW       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFE8D044:  81690000  lwz r11,0(r9)
+	  15: GETL       	R9, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0xFE8D048:  81490004  lwz r10,4(r9)
+	  19: GETL       	R9, t16
+	  20: ADDL       	$0x4, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R10
+	  23: INCEIPL       	$4
+
+	0xFE8D04C:  81090008  lwz r8,8(r9)
+	  24: GETL       	R9, t20
+	  25: ADDL       	$0x8, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R8
+	  28: INCEIPL       	$4
+
+	0xFE8D050:  B003000C  sth r0,12(r3)
+	  29: GETL       	R0, t24
+	  30: GETL       	R3, t26
+	  31: ADDL       	$0xC, t26
+	  32: STW       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0xFE8D054:  91630000  stw r11,0(r3)
+	  34: GETL       	R11, t28
+	  35: GETL       	R3, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0xFE8D058:  91430004  stw r10,4(r3)
+	  38: GETL       	R10, t32
+	  39: GETL       	R3, t34
+	  40: ADDL       	$0x4, t34
+	  41: STL       	t32, (t34)
+	  42: INCEIPL       	$4
+
+	0xFE8D05C:  91030008  stw r8,8(r3)
+	  43: GETL       	R8, t36
+	  44: GETL       	R3, t38
+	  45: ADDL       	$0x8, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0xFE8D060:  7FA3EB78  or r3,r29,r29
+	  48: GETL       	R29, t40
+	  49: PUTL       	t40, R3
+	  50: INCEIPL       	$4
+
+	0xFE8D064:  4802F531  bl 0xFEBC594
+	  51: MOVL       	$0xFE8D068, t42
+	  52: PUTL       	t42, LR
+	  53: JMPo-c       	$0xFEBC594  ($4)
+
+
+
+. 2304 FE8D038 48
+. 81 3E 01 EC 80 9E 02 08 A0 09 00 0C 81 69 00 00 81 49 00 04 81 09 00 08 B0 03 00 0C 91 63 00 00 91 43 00 04 91 03 00 08 7F A3 EB 78 48 02 F5 31
+==== BB 2305 _IO_fopen@@GLIBC_2.1(0xFEBC594) approx BBs exec'd 0 ====
+
+	0xFEBC594:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFEBC598:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEBC59C:  90A10014  stw r5,20(r1)
+	   9: GETL       	R5, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEBC5A0:  38A00001  li r5,1
+	  14: MOVL       	$0x1, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFEBC5A4:  4BFFFF15  bl 0xFEBC4B8
+	  17: MOVL       	$0xFEBC5A8, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0xFEBC4B8  ($4)
+
+
+
+. 2305 FEBC594 20
+. 7C A8 02 A6 94 21 FF F0 90 A1 00 14 38 A0 00 01 4B FF FF 15
+==== BB 2306 __fopen_internal(0xFEBC4B8) approx BBs exec'd 0 ====
+
+	0xFEBC4B8:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEBC4BC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEBC4C0:  480EB991  bl 0xFFA7E50
+	   9: MOVL       	$0xFEBC4C4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2306 FEBC4B8 12
+. 94 21 FF E0 7C 08 02 A6 48 0E B9 91
+==== BB 2307 (0xFEBC4C4) approx BBs exec'd 0 ====
+
+	0xFEBC4C4:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEBC4C8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEBC4CC:  9361000C  stw r27,12(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEBC4D0:  7C7B1B78  or r27,r3,r3
+	  13: GETL       	R3, t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0xFEBC4D4:  38600168  li r3,360
+	  16: MOVL       	$0x168, t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0xFEBC4D8:  93810010  stw r28,16(r1)
+	  19: GETL       	R28, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x10, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFEBC4DC:  7CBC2B78  or r28,r5,r5
+	  24: GETL       	R5, t18
+	  25: PUTL       	t18, R28
+	  26: INCEIPL       	$4
+
+	0xFEBC4E0:  93A10014  stw r29,20(r1)
+	  27: GETL       	R29, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x14, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFEBC4E4:  7C9D2378  or r29,r4,r4
+	  32: GETL       	R4, t24
+	  33: PUTL       	t24, R29
+	  34: INCEIPL       	$4
+
+	0xFEBC4E8:  93E1001C  stw r31,28(r1)
+	  35: GETL       	R31, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFEBC4EC:  90010024  stw r0,36(r1)
+	  40: GETL       	R0, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFEBC4F0:  480EDE19  bl 0xFFAA308
+	  45: MOVL       	$0xFEBC4F4, t34
+	  46: PUTL       	t34, LR
+	  47: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 2307 FEBC4C4 48
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 7C 7B 1B 78 38 60 01 68 93 81 00 10 7C BC 2B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 90 01 00 24 48 0E DE 19
+==== BB 2308 (0xFEBC4F4) approx BBs exec'd 0 ====
+
+	0xFEBC4F4:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEBC4F8:  80FE1B78  lwz r7,7032(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1B78, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0xFEBC4FC:  38800000  li r4,0
+	  10: MOVL       	$0x0, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0xFEBC500:  38A00000  li r5,0
+	  13: MOVL       	$0x0, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0xFEBC504:  39200000  li r9,0
+	  16: MOVL       	$0x0, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFEBC508:  381F00A0  addi r0,r31,160
+	  19: GETL       	R31, t14
+	  20: ADDL       	$0xA0, t14
+	  21: PUTL       	t14, R0
+	  22: INCEIPL       	$4
+
+	0xFEBC50C:  38DF00AC  addi r6,r31,172
+	  23: GETL       	R31, t16
+	  24: ADDL       	$0xAC, t16
+	  25: PUTL       	t16, R6
+	  26: INCEIPL       	$4
+
+	0xFEBC510:  41820044  bc 12,2,0xFEBC554
+	  27: Js02o       	$0xFEBC554
+
+
+
+. 2308 FEBC4F4 32
+. 7C 7F 1B 79 80 FE 1B 78 38 80 00 00 38 A0 00 00 39 20 00 00 38 1F 00 A0 38 DF 00 AC 41 82 00 44
+==== BB 2309 (0xFEBC514) approx BBs exec'd 0 ====
+
+	0xFEBC514:  901F0048  stw r0,72(r31)
+	   0: GETL       	R0, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEBC518:  4800FC9D  bl 0xFECC1B4
+	   5: MOVL       	$0xFEBC51C, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFECC1B4  ($4)
+
+
+
+. 2309 FEBC514 8
+. 90 1F 00 48 48 00 FC 9D
+==== BB 2310 _IO_no_init(0xFECC1B4) approx BBs exec'd 0 ====
+
+	0xFECC1B4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFECC1B8:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECC1BC:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECC1C0:  7CBD2B78  or r29,r5,r5
+	  14: GETL       	R5, t10
+	  15: PUTL       	t10, R29
+	  16: INCEIPL       	$4
+
+	0xFECC1C4:  93E1001C  stw r31,28(r1)
+	  17: GETL       	R31, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECC1C8:  7CDF3378  or r31,r6,r6
+	  22: GETL       	R6, t16
+	  23: PUTL       	t16, R31
+	  24: INCEIPL       	$4
+
+	0xFECC1CC:  9361000C  stw r27,12(r1)
+	  25: GETL       	R27, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0xC, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFECC1D0:  7CFB3B78  or r27,r7,r7
+	  30: GETL       	R7, t22
+	  31: PUTL       	t22, R27
+	  32: INCEIPL       	$4
+
+	0xFECC1D4:  93810010  stw r28,16(r1)
+	  33: GETL       	R28, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x10, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0xFECC1D8:  7C7C1B78  or r28,r3,r3
+	  38: GETL       	R3, t28
+	  39: PUTL       	t28, R28
+	  40: INCEIPL       	$4
+
+	0xFECC1DC:  90010024  stw r0,36(r1)
+	  41: GETL       	R0, t30
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x24, t32
+	  44: STL       	t30, (t32)
+	  45: INCEIPL       	$4
+
+	0xFECC1E0:  93C10018  stw r30,24(r1)
+	  46: GETL       	R30, t34
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x18, t36
+	  49: STL       	t34, (t36)
+	  50: INCEIPL       	$4
+
+	0xFECC1E4:  4BFFFF51  bl 0xFECC134
+	  51: MOVL       	$0xFECC1E8, t38
+	  52: PUTL       	t38, LR
+	  53: JMPo-c       	$0xFECC134  ($4)
+
+
+
+. 2310 FECC1B4 52
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C BD 2B 78 93 E1 00 1C 7C DF 33 78 93 61 00 0C 7C FB 3B 78 93 81 00 10 7C 7C 1B 78 90 01 00 24 93 C1 00 18 4B FF FF 51
+==== BB 2311 _IO_old_init(0xFECC134) approx BBs exec'd 0 ====
+
+	0xFECC134:  81630048  lwz r11,72(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFECC138:  39200000  li r9,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFECC13C:  6484FBAD  oris r4,r4,0xFBAD
+	   8: GETL       	R4, t6
+	   9: ORL       	$0xFBAD0000, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0xFECC140:  38000000  li r0,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R0
+	  14: INCEIPL       	$4
+
+	0xFECC144:  2F8B0000  cmpi cr7,r11,0
+	  15: GETL       	R11, t10
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFECC148:  9421FFD0  stwu r1,-48(r1)
+	  19: GETL       	R1, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0xFFFFFFD0, t16
+	  22: PUTL       	t16, R1
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFECC14C:  90830000  stw r4,0(r3)
+	  25: GETL       	R4, t18
+	  26: GETL       	R3, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFECC150:  98030046  stb r0,70(r3)
+	  29: GETL       	R0, t22
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0x46, t24
+	  32: STB       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFECC154:  9123003C  stw r9,60(r3)
+	  34: GETL       	R9, t26
+	  35: GETL       	R3, t28
+	  36: ADDL       	$0x3C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFECC158:  9123001C  stw r9,28(r3)
+	  39: GETL       	R9, t30
+	  40: GETL       	R3, t32
+	  41: ADDL       	$0x1C, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFECC15C:  91230020  stw r9,32(r3)
+	  44: GETL       	R9, t34
+	  45: GETL       	R3, t36
+	  46: ADDL       	$0x20, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFECC160:  9123000C  stw r9,12(r3)
+	  49: GETL       	R9, t38
+	  50: GETL       	R3, t40
+	  51: ADDL       	$0xC, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFECC164:  91230004  stw r9,4(r3)
+	  54: GETL       	R9, t42
+	  55: GETL       	R3, t44
+	  56: ADDL       	$0x4, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFECC168:  91230008  stw r9,8(r3)
+	  59: GETL       	R9, t46
+	  60: GETL       	R3, t48
+	  61: ADDL       	$0x8, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFECC16C:  91230010  stw r9,16(r3)
+	  64: GETL       	R9, t50
+	  65: GETL       	R3, t52
+	  66: ADDL       	$0x10, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFECC170:  91230014  stw r9,20(r3)
+	  69: GETL       	R9, t54
+	  70: GETL       	R3, t56
+	  71: ADDL       	$0x14, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0xFECC174:  91230018  stw r9,24(r3)
+	  74: GETL       	R9, t58
+	  75: GETL       	R3, t60
+	  76: ADDL       	$0x18, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFECC178:  91230034  stw r9,52(r3)
+	  79: GETL       	R9, t62
+	  80: GETL       	R3, t64
+	  81: ADDL       	$0x34, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0xFECC17C:  91230024  stw r9,36(r3)
+	  84: GETL       	R9, t66
+	  85: GETL       	R3, t68
+	  86: ADDL       	$0x24, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0xFECC180:  91230028  stw r9,40(r3)
+	  89: GETL       	R9, t70
+	  90: GETL       	R3, t72
+	  91: ADDL       	$0x28, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0xFECC184:  9123002C  stw r9,44(r3)
+	  94: GETL       	R9, t74
+	  95: GETL       	R3, t76
+	  96: ADDL       	$0x2C, t76
+	  97: STL       	t74, (t76)
+	  98: INCEIPL       	$4
+
+	0xFECC188:  91230030  stw r9,48(r3)
+	  99: GETL       	R9, t78
+	 100: GETL       	R3, t80
+	 101: ADDL       	$0x30, t80
+	 102: STL       	t78, (t80)
+	 103: INCEIPL       	$4
+
+	0xFECC18C:  B1230044  sth r9,68(r3)
+	 104: GETL       	R9, t82
+	 105: GETL       	R3, t84
+	 106: ADDL       	$0x44, t84
+	 107: STW       	t82, (t84)
+	 108: INCEIPL       	$4
+
+	0xFECC190:  419E001C  bc 12,30,0xFECC1AC
+	 109: Js30o       	$0xFECC1AC
+
+
+
+. 2311 FECC134 96
+. 81 63 00 48 39 20 00 00 64 84 FB AD 38 00 00 00 2F 8B 00 00 94 21 FF D0 90 83 00 00 98 03 00 46 91 23 00 3C 91 23 00 1C 91 23 00 20 91 23 00 0C 91 23 00 04 91 23 00 08 91 23 00 10 91 23 00 14 91 23 00 18 91 23 00 34 91 23 00 24 91 23 00 28 91 23 00 2C 91 23 00 30 B1 23 00 44 41 9E 00 1C
+==== BB 2312 (0xFECC194) approx BBs exec'd 0 ====
+
+	0xFECC194:  91210010  stw r9,16(r1)
+	   0: GETL       	R9, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x10, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECC198:  91210014  stw r9,20(r1)
+	   5: GETL       	R9, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECC19C:  91210018  stw r9,24(r1)
+	  10: GETL       	R9, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x18, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFECC1A0:  912B0008  stw r9,8(r11)
+	  15: GETL       	R9, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	$0x8, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFECC1A4:  912B0000  stw r9,0(r11)
+	  20: GETL       	R9, t16
+	  21: GETL       	R11, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0xFECC1A8:  912B0004  stw r9,4(r11)
+	  24: GETL       	R9, t20
+	  25: GETL       	R11, t22
+	  26: ADDL       	$0x4, t22
+	  27: STL       	t20, (t22)
+	  28: INCEIPL       	$4
+
+	0xFECC1AC:  38210030  addi r1,r1,48
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x30, t24
+	  31: PUTL       	t24, R1
+	  32: INCEIPL       	$4
+
+	0xFECC1B0:  4E800020  blr
+	  33: GETL       	LR, t26
+	  34: JMPo-r       	t26  ($4)
+
+
+
+. 2312 FECC194 32
+. 91 21 00 10 91 21 00 14 91 21 00 18 91 2B 00 08 91 2B 00 00 91 2B 00 04 38 21 00 30 4E 80 00 20
+==== BB 2313 (0xFECC1E8) approx BBs exec'd 0 ====
+
+	0xFECC1E8:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFECC1EC:  38000000  li r0,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFECC1F0:  93BC0060  stw r29,96(r28)
+	   7: GETL       	R29, t6
+	   8: GETL       	R28, t8
+	   9: ADDL       	$0x60, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFECC1F4:  419C0038  bc 12,28,0xFECC22C
+	  12: Js28o       	$0xFECC22C
+
+
+
+. 2313 FECC1E8 16
+. 2F 9D 00 00 38 00 00 00 93 BC 00 60 41 9C 00 38
+==== BB 2314 (0xFECC1F8) approx BBs exec'd 0 ====
+
+	0xFECC1F8:  937F00B8  stw r27,184(r31)
+	   0: GETL       	R27, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0xB8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECC1FC:  93FC005C  stw r31,92(r28)
+	   5: GETL       	R31, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	$0x5C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECC200:  901F0028  stw r0,40(r31)
+	  10: GETL       	R0, t8
+	  11: GETL       	R31, t10
+	  12: ADDL       	$0x28, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFECC204:  901F0018  stw r0,24(r31)
+	  15: GETL       	R0, t12
+	  16: GETL       	R31, t14
+	  17: ADDL       	$0x18, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFECC208:  901F001C  stw r0,28(r31)
+	  20: GETL       	R0, t16
+	  21: GETL       	R31, t18
+	  22: ADDL       	$0x1C, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFECC20C:  901F0008  stw r0,8(r31)
+	  25: GETL       	R0, t20
+	  26: GETL       	R31, t22
+	  27: ADDL       	$0x8, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0xFECC210:  901F0000  stw r0,0(r31)
+	  30: GETL       	R0, t24
+	  31: GETL       	R31, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0xFECC214:  901F0004  stw r0,4(r31)
+	  34: GETL       	R0, t28
+	  35: GETL       	R31, t30
+	  36: ADDL       	$0x4, t30
+	  37: STL       	t28, (t30)
+	  38: INCEIPL       	$4
+
+	0xFECC218:  901F000C  stw r0,12(r31)
+	  39: GETL       	R0, t32
+	  40: GETL       	R31, t34
+	  41: ADDL       	$0xC, t34
+	  42: STL       	t32, (t34)
+	  43: INCEIPL       	$4
+
+	0xFECC21C:  901F0010  stw r0,16(r31)
+	  44: GETL       	R0, t36
+	  45: GETL       	R31, t38
+	  46: ADDL       	$0x10, t38
+	  47: STL       	t36, (t38)
+	  48: INCEIPL       	$4
+
+	0xFECC220:  901F0014  stw r0,20(r31)
+	  49: GETL       	R0, t40
+	  50: GETL       	R31, t42
+	  51: ADDL       	$0x14, t42
+	  52: STL       	t40, (t42)
+	  53: INCEIPL       	$4
+
+	0xFECC224:  901F0020  stw r0,32(r31)
+	  54: GETL       	R0, t44
+	  55: GETL       	R31, t46
+	  56: ADDL       	$0x20, t46
+	  57: STL       	t44, (t46)
+	  58: INCEIPL       	$4
+
+	0xFECC228:  901F0024  stw r0,36(r31)
+	  59: GETL       	R0, t48
+	  60: GETL       	R31, t50
+	  61: ADDL       	$0x24, t50
+	  62: STL       	t48, (t50)
+	  63: INCEIPL       	$4
+
+	0xFECC22C:  80610024  lwz r3,36(r1)
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x24, t52
+	  66: LDL       	(t52), t54
+	  67: PUTL       	t54, R3
+	  68: INCEIPL       	$4
+
+	0xFECC230:  8361000C  lwz r27,12(r1)
+	  69: GETL       	R1, t56
+	  70: ADDL       	$0xC, t56
+	  71: LDL       	(t56), t58
+	  72: PUTL       	t58, R27
+	  73: INCEIPL       	$4
+
+	0xFECC234:  83810010  lwz r28,16(r1)
+	  74: GETL       	R1, t60
+	  75: ADDL       	$0x10, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R28
+	  78: INCEIPL       	$4
+
+	0xFECC238:  7C6803A6  mtlr r3
+	  79: GETL       	R3, t64
+	  80: PUTL       	t64, LR
+	  81: INCEIPL       	$4
+
+	0xFECC23C:  83A10014  lwz r29,20(r1)
+	  82: GETL       	R1, t66
+	  83: ADDL       	$0x14, t66
+	  84: LDL       	(t66), t68
+	  85: PUTL       	t68, R29
+	  86: INCEIPL       	$4
+
+	0xFECC240:  83C10018  lwz r30,24(r1)
+	  87: GETL       	R1, t70
+	  88: ADDL       	$0x18, t70
+	  89: LDL       	(t70), t72
+	  90: PUTL       	t72, R30
+	  91: INCEIPL       	$4
+
+	0xFECC244:  83E1001C  lwz r31,28(r1)
+	  92: GETL       	R1, t74
+	  93: ADDL       	$0x1C, t74
+	  94: LDL       	(t74), t76
+	  95: PUTL       	t76, R31
+	  96: INCEIPL       	$4
+
+	0xFECC248:  38210020  addi r1,r1,32
+	  97: GETL       	R1, t78
+	  98: ADDL       	$0x20, t78
+	  99: PUTL       	t78, R1
+	 100: INCEIPL       	$4
+
+	0xFECC24C:  4E800020  blr
+	 101: GETL       	LR, t80
+	 102: JMPo-r       	t80  ($4)
+
+
+
+. 2314 FECC1F8 88
+. 93 7F 00 B8 93 FC 00 5C 90 1F 00 28 90 1F 00 18 90 1F 00 1C 90 1F 00 08 90 1F 00 00 90 1F 00 04 90 1F 00 0C 90 1F 00 10 90 1F 00 14 90 1F 00 20 90 1F 00 24 80 61 00 24 83 61 00 0C 83 81 00 10 7C 68 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2315 (0xFEBC51C) approx BBs exec'd 0 ====
+
+	0xFEBC51C:  809E1B50  lwz r4,6992(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEBC520:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEBC524:  909F0098  stw r4,152(r31)
+	   8: GETL       	R4, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x98, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEBC528:  4800BD11  bl 0xFEC8238
+	  13: MOVL       	$0xFEBC52C, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0xFEC8238  ($4)
+
+
+
+. 2315 FEBC51C 16
+. 80 9E 1B 50 7F E3 FB 78 90 9F 00 98 48 00 BD 11
+==== BB 2316 _IO_file_init@@GLIBC_2.1(0xFEC8238) approx BBs exec'd 0 ====
+
+	0xFEC8238:  7CC802A6  mflr r6
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFEC823C:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEC8240:  3920FFFF  li r9,-1
+	   9: MOVL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0xFEC8244:  3940FFFF  li r10,-1
+	  12: MOVL       	$0xFFFFFFFF, t8
+	  13: PUTL       	t8, R10
+	  14: INCEIPL       	$4
+
+	0xFEC8248:  93A10014  stw r29,20(r1)
+	  15: GETL       	R29, t10
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x14, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0xFEC824C:  7C7D1B78  or r29,r3,r3
+	  20: GETL       	R3, t14
+	  21: PUTL       	t14, R29
+	  22: INCEIPL       	$4
+
+	0xFEC8250:  90C10024  stw r6,36(r1)
+	  23: GETL       	R6, t16
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x24, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0xFEC8254:  80A30000  lwz r5,0(r3)
+	  28: GETL       	R3, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R5
+	  31: INCEIPL       	$4
+
+	0xFEC8258:  91230050  stw r9,80(r3)
+	  32: GETL       	R9, t24
+	  33: GETL       	R3, t26
+	  34: ADDL       	$0x50, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFEC825C:  60A4240C  ori r4,r5,0x240C
+	  37: GETL       	R5, t28
+	  38: ORL       	$0x240C, t28
+	  39: PUTL       	t28, R4
+	  40: INCEIPL       	$4
+
+	0xFEC8260:  91430054  stw r10,84(r3)
+	  41: GETL       	R10, t30
+	  42: GETL       	R3, t32
+	  43: ADDL       	$0x54, t32
+	  44: STL       	t30, (t32)
+	  45: INCEIPL       	$4
+
+	0xFEC8264:  90830000  stw r4,0(r3)
+	  46: GETL       	R4, t34
+	  47: GETL       	R3, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFEC8268:  93C10018  stw r30,24(r1)
+	  50: GETL       	R30, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x18, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0xFEC826C:  48002EA9  bl 0xFECB114
+	  55: MOVL       	$0xFEC8270, t42
+	  56: PUTL       	t42, LR
+	  57: JMPo-c       	$0xFECB114  ($4)
+
+
+
+. 2316 FEC8238 56
+. 7C C8 02 A6 94 21 FF E0 39 20 FF FF 39 40 FF FF 93 A1 00 14 7C 7D 1B 78 90 C1 00 24 80 A3 00 00 91 23 00 50 60 A4 24 0C 91 43 00 54 90 83 00 00 93 C1 00 18 48 00 2E A9
+==== BB 2317 _IO_link_in_internal(0xFECB114) approx BBs exec'd 0 ====
+
+	0xFECB114:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFECB118:  9421FFC0  stwu r1,-64(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFC0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECB11C:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xFECB120:  480DCD31  bl 0xFFA7E50
+	  12: MOVL       	$0xFECB124, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2317 FECB114 16
+. 7C 08 02 A6 94 21 FF C0 7D 80 00 26 48 0D CD 31
+==== BB 2318 (0xFECB124) approx BBs exec'd 0 ====
+
+	0xFECB124:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECB128:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECB12C:  90010044  stw r0,68(r1)
+	   8: GETL       	R0, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x44, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECB130:  80030000  lwz r0,0(r3)
+	  13: GETL       	R3, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xFECB134:  93E1003C  stw r31,60(r1)
+	  17: GETL       	R31, t14
+	  18: GETL       	R1, t16
+	  19: ADDL       	$0x3C, t16
+	  20: STL       	t14, (t16)
+	  21: INCEIPL       	$4
+
+	0xFECB138:  7C7F1B78  or r31,r3,r3
+	  22: GETL       	R3, t18
+	  23: PUTL       	t18, R31
+	  24: INCEIPL       	$4
+
+	0xFECB13C:  70090080  andi. r9,r0,0x80
+	  25: GETL       	R0, t20
+	  26: ANDL       	$0x80, t20
+	  27: PUTL       	t20, R9
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFECB140:  93410028  stw r26,40(r1)
+	  31: GETL       	R26, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x28, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFECB144:  9361002C  stw r27,44(r1)
+	  36: GETL       	R27, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x2C, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFECB148:  93810030  stw r28,48(r1)
+	  41: GETL       	R28, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x30, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFECB14C:  93A10034  stw r29,52(r1)
+	  46: GETL       	R29, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x34, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0xFECB150:  91810024  stw r12,36(r1)
+	  51: GETL       	R12, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x24, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFECB154:  40820160  bc 4,2,0xFECB2B4
+	  56: Jc02o       	$0xFECB2B4
+
+
+
+. 2318 FECB124 52
+. 93 C1 00 38 7F C8 02 A6 90 01 00 44 80 03 00 00 93 E1 00 3C 7C 7F 1B 78 70 09 00 80 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 91 81 00 24 40 82 01 60
+==== BB 2319 (0xFECB158) approx BBs exec'd 0 ====
+
+	0xFECB158:  835E1B48  lwz r26,6984(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFECB15C:  60030080  ori r3,r0,0x80
+	   5: GETL       	R0, t4
+	   6: ORL       	$0x80, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFECB160:  907F0000  stw r3,0(r31)
+	   9: GETL       	R3, t6
+	  10: GETL       	R31, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECB164:  813A00B8  lwz r9,184(r26)
+	  13: GETL       	R26, t10
+	  14: ADDL       	$0xB8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0xFECB168:  3169FFFF  addic r11,r9,-1
+	  18: GETL       	R9, t14
+	  19: ADCL       	$0xFFFFFFFF, t14  (-wCa)
+	  20: PUTL       	t14, R11
+	  21: INCEIPL       	$4
+
+	0xFECB16C:  7C0B4910  subfe r0,r11,r9
+	  22: GETL       	R11, t16
+	  23: GETL       	R9, t18
+	  24: SBBL       	t16, t18  (-rCa-wCa)
+	  25: PUTL       	t18, R0
+	  26: INCEIPL       	$4
+
+	0xFECB170:  2E000000  cmpi cr4,r0,0
+	  27: GETL       	R0, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0xFECB174:  409201B4  bc 4,18,0xFECB328
+	  31: Jc18o       	$0xFECB328
+
+
+
+. 2319 FECB158 32
+. 83 5E 1B 48 60 03 00 80 90 7F 00 00 81 3A 00 B8 31 69 FF FF 7C 0B 49 10 2E 00 00 00 40 92 01 B4
+==== BB 2320 (0xFECB178) approx BBs exec'd 0 ====
+
+	0xFECB178:  809E05E4  lwz r4,1508(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5E4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFECB17C:  90010014  stw r0,20(r1)
+	   5: GETL       	R0, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECB180:  90810010  stw r4,16(r1)
+	  10: GETL       	R4, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x10, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFECB184:  83BE05E0  lwz r29,1504(r30)
+	  15: GETL       	R30, t12
+	  16: ADDL       	$0x5E0, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R29
+	  19: INCEIPL       	$4
+
+	0xFECB188:  3B828BA0  addi r28,r2,-29792
+	  20: GETL       	R2, t16
+	  21: ADDL       	$0xFFFF8BA0, t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0xFECB18C:  80BD0008  lwz r5,8(r29)
+	  24: GETL       	R29, t18
+	  25: ADDL       	$0x8, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R5
+	  28: INCEIPL       	$4
+
+	0xFECB190:  7F85E000  cmp cr7,r5,r28
+	  29: GETL       	R5, t22
+	  30: GETL       	R28, t24
+	  31: CMPL       	t22, t24, t26  (-rSo)
+	  32: ICRFL       	t26, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0xFECB194:  419E0030  bc 12,30,0xFECB1C4
+	  34: Js30o       	$0xFECB1C4
+
+
+
+. 2320 FECB178 32
+. 80 9E 05 E4 90 01 00 14 90 81 00 10 83 BE 05 E0 3B 82 8B A0 80 BD 00 08 7F 85 E0 00 41 9E 00 30
+==== BB 2321 (0xFECB198) approx BBs exec'd 0 ====
+
+	0xFECB198:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFECB19C:  38C00001  li r6,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFECB1A0:  7D20E828  lwarx r9,r0,r29
+	   6: GETL       	R29, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFECB1A4:  7C095800  cmp cr0,r9,r11
+	  11: GETL       	R9, t8
+	  12: GETL       	R11, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECB1A8:  4082000C  bc 4,2,0xFECB1B4
+	  16: Jc02o       	$0xFECB1B4
+
+
+
+. 2321 FECB198 20
+. 39 60 00 00 38 C0 00 01 7D 20 E8 28 7C 09 58 00 40 82 00 0C
+==== BB 2322 (0xFECB1AC) approx BBs exec'd 0 ====
+
+	0xFECB1AC:  7CC0E92D  stwcx. r6,r0,r29
+	   0: GETL       	R29, t0
+	   1: GETL       	R6, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECB1B0:  40A2FFF0  bc 5,2,0xFECB1A0
+	   6: Jc02o       	$0xFECB1A0
+
+
+
+. 2322 FECB1AC 8
+. 7C C0 E9 2D 40 A2 FF F0
+==== BB 2323 (0xFECB1A0) approx BBs exec'd 0 ====
+
+	0xFECB1A0:  7D20E828  lwarx r9,r0,r29
+	   0: GETL       	R29, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECB1A4:  7C095800  cmp cr0,r9,r11
+	   5: GETL       	R9, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECB1A8:  4082000C  bc 4,2,0xFECB1B4
+	  10: Jc02o       	$0xFECB1B4
+
+
+
+. 2323 FECB1A0 12
+. 7D 20 E8 28 7C 09 58 00 40 82 00 0C
+==== BB 2324 (0xFECB1B4) approx BBs exec'd 0 ====
+
+	0xFECB1B4:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFECB1B8:  2C890000  cmpi cr1,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECB1BC:  408601BC  bc 4,6,0xFECB378
+	   5: Jc06o       	$0xFECB378
+
+
+
+. 2324 FECB1B4 12
+. 4C 00 01 2C 2C 89 00 00 40 86 01 BC
+==== BB 2325 (0xFECB1C0) approx BBs exec'd 0 ====
+
+	0xFECB1C0:  939D0008  stw r28,8(r29)
+	   0: GETL       	R28, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECB1C4:  815D0004  lwz r10,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0xFECB1C8:  837E05E8  lwz r27,1512(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x5E8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0xFECB1CC:  390A0001  addi r8,r10,1
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R8
+	  18: INCEIPL       	$4
+
+	0xFECB1D0:  911D0004  stw r8,4(r29)
+	  19: GETL       	R8, t14
+	  20: GETL       	R29, t16
+	  21: ADDL       	$0x4, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFECB1D4:  80FF0000  lwz r7,0(r31)
+	  24: GETL       	R31, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R7
+	  27: INCEIPL       	$4
+
+	0xFECB1D8:  93FB0000  stw r31,0(r27)
+	  28: GETL       	R31, t22
+	  29: GETL       	R27, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0xFECB1DC:  70E98000  andi. r9,r7,0x8000
+	  32: GETL       	R7, t26
+	  33: ANDL       	$0x8000, t26
+	  34: PUTL       	t26, R9
+	  35: CMP0L       	t26, t28  (-rSo)
+	  36: ICRFL       	t28, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0xFECB1E0:  40820054  bc 4,2,0xFECB234
+	  38: Jc02o       	$0xFECB234
+
+
+
+. 2325 FECB1C0 36
+. 93 9D 00 08 81 5D 00 04 83 7E 05 E8 39 0A 00 01 91 1D 00 04 80 FF 00 00 93 FB 00 00 70 E9 80 00 40 82 00 54
+==== BB 2326 (0xFECB1E4) approx BBs exec'd 0 ====
+
+	0xFECB1E4:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECB1E8:  3B828BA0  addi r28,r2,-29792
+	   5: GETL       	R2, t4
+	   6: ADDL       	$0xFFFF8BA0, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0xFECB1EC:  81830008  lwz r12,8(r3)
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x8, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0xFECB1F0:  7F0CE000  cmp cr6,r12,r28
+	  14: GETL       	R12, t10
+	  15: GETL       	R28, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFECB1F4:  419A0034  bc 12,26,0xFECB228
+	  19: Js26o       	$0xFECB228
+
+
+
+. 2326 FECB1E4 20
+. 80 7F 00 48 3B 82 8B A0 81 83 00 08 7F 0C E0 00 41 9A 00 34
+==== BB 2327 (0xFECB1F8) approx BBs exec'd 0 ====
+
+	0xFECB1F8:  38A00000  li r5,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFECB1FC:  38000001  li r0,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFECB200:  7C801828  lwarx r4,r0,r3
+	   6: GETL       	R3, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFECB204:  7C042800  cmp cr0,r4,r5
+	  11: GETL       	R4, t8
+	  12: GETL       	R5, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECB208:  4082000C  bc 4,2,0xFECB214
+	  16: Jc02o       	$0xFECB214
+
+
+
+. 2327 FECB1F8 20
+. 38 A0 00 00 38 00 00 01 7C 80 18 28 7C 04 28 00 40 82 00 0C
+==== BB 2328 (0xFECB20C) approx BBs exec'd 0 ====
+
+	0xFECB20C:  7C00192D  stwcx. r0,r0,r3
+	   0: GETL       	R3, t0
+	   1: GETL       	R0, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECB210:  40A2FFF0  bc 5,2,0xFECB200
+	   6: Jc02o       	$0xFECB200
+
+
+
+. 2328 FECB20C 8
+. 7C 00 19 2D 40 A2 FF F0
+==== BB 2329 (0xFECB200) approx BBs exec'd 0 ====
+
+	0xFECB200:  7C801828  lwarx r4,r0,r3
+	   0: GETL       	R3, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFECB204:  7C042800  cmp cr0,r4,r5
+	   5: GETL       	R4, t4
+	   6: GETL       	R5, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECB208:  4082000C  bc 4,2,0xFECB214
+	  10: Jc02o       	$0xFECB214
+
+
+
+. 2329 FECB200 12
+. 7C 80 18 28 7C 04 28 00 40 82 00 0C
+==== BB 2330 (0xFECB214) approx BBs exec'd 0 ====
+
+	0xFECB214:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFECB218:  2F840000  cmpi cr7,r4,0
+	   1: GETL       	R4, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECB21C:  409E018C  bc 4,30,0xFECB3A8
+	   5: Jc30o       	$0xFECB3A8
+
+
+
+. 2330 FECB214 12
+. 4C 00 01 2C 2F 84 00 00 40 9E 01 8C
+==== BB 2331 (0xFECB220) approx BBs exec'd 0 ====
+
+	0xFECB220:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECB224:  93830008  stw r28,8(r3)
+	   5: GETL       	R28, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECB228:  81230004  lwz r9,4(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0xFECB22C:  3B890001  addi r28,r9,1
+	  15: GETL       	R9, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R28
+	  18: INCEIPL       	$4
+
+	0xFECB230:  93830004  stw r28,4(r3)
+	  19: GETL       	R28, t14
+	  20: GETL       	R3, t16
+	  21: ADDL       	$0x4, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFECB234:  815F0000  lwz r10,0(r31)
+	  24: GETL       	R31, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R10
+	  27: INCEIPL       	$4
+
+	0xFECB238:  817E05EC  lwz r11,1516(r30)
+	  28: GETL       	R30, t22
+	  29: ADDL       	$0x5EC, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R11
+	  32: INCEIPL       	$4
+
+	0xFECB23C:  807E1BEC  lwz r3,7148(r30)
+	  33: GETL       	R30, t26
+	  34: ADDL       	$0x1BEC, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R3
+	  37: INCEIPL       	$4
+
+	0xFECB240:  71498000  andi. r9,r10,0x8000
+	  38: GETL       	R10, t30
+	  39: ANDL       	$0x8000, t30
+	  40: PUTL       	t30, R9
+	  41: CMP0L       	t30, t32  (-rSo)
+	  42: ICRFL       	t32, $0x0, CR
+	  43: INCEIPL       	$4
+
+	0xFECB244:  810B0000  lwz r8,0(r11)
+	  44: GETL       	R11, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R8
+	  47: INCEIPL       	$4
+
+	0xFECB248:  80C30000  lwz r6,0(r3)
+	  48: GETL       	R3, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R6
+	  51: INCEIPL       	$4
+
+	0xFECB24C:  38E80001  addi r7,r8,1
+	  52: GETL       	R8, t42
+	  53: ADDL       	$0x1, t42
+	  54: PUTL       	t42, R7
+	  55: INCEIPL       	$4
+
+	0xFECB250:  93E30000  stw r31,0(r3)
+	  56: GETL       	R31, t44
+	  57: GETL       	R3, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0xFECB254:  90EB0000  stw r7,0(r11)
+	  60: GETL       	R7, t48
+	  61: GETL       	R11, t50
+	  62: STL       	t48, (t50)
+	  63: INCEIPL       	$4
+
+	0xFECB258:  90DF0034  stw r6,52(r31)
+	  64: GETL       	R6, t52
+	  65: GETL       	R31, t54
+	  66: ADDL       	$0x34, t54
+	  67: STL       	t52, (t54)
+	  68: INCEIPL       	$4
+
+	0xFECB25C:  4082001C  bc 4,2,0xFECB278
+	  69: Jc02o       	$0xFECB278
+
+
+
+. 2331 FECB220 64
+. 80 7F 00 48 93 83 00 08 81 23 00 04 3B 89 00 01 93 83 00 04 81 5F 00 00 81 7E 05 EC 80 7E 1B EC 71 49 80 00 81 0B 00 00 80 C3 00 00 38 E8 00 01 93 E3 00 00 90 EB 00 00 90 DF 00 34 40 82 00 1C
+==== BB 2332 (0xFECB260) approx BBs exec'd 0 ====
+
+	0xFECB260:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECB264:  83E30004  lwz r31,4(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFECB268:  393FFFFF  addi r9,r31,-1
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFECB26C:  2C890000  cmpi cr1,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0xFECB270:  91230004  stw r9,4(r3)
+	  18: GETL       	R9, t14
+	  19: GETL       	R3, t16
+	  20: ADDL       	$0x4, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFECB274:  418600CC  bc 12,6,0xFECB340
+	  23: Js06o       	$0xFECB340
+
+
+
+. 2332 FECB260 24
+. 80 7F 00 48 83 E3 00 04 39 3F FF FF 2C 89 00 00 91 23 00 04 41 86 00 CC
+==== BB 2333 (0xFECB340) approx BBs exec'd 0 ====
+
+	0xFECB340:  91230008  stw r9,8(r3)
+	   0: GETL       	R9, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECB344:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0xFECB348:  7D801828  lwarx r12,r0,r3
+	   6: GETL       	R3, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0xFECB34C:  7D20192D  stwcx. r9,r0,r3
+	  11: GETL       	R3, t8
+	  12: GETL       	R9, t10
+	  13: LOCKo       	
+	  14: STL       	t10, (t8)  (-rSo)
+	  15: ICRFL       	cr, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFECB350:  40A2FFF8  bc 5,2,0xFECB348
+	  17: Jc02o       	$0xFECB348
+
+
+
+. 2333 FECB340 20
+. 91 23 00 08 7C 00 04 AC 7D 80 18 28 7D 20 19 2D 40 A2 FF F8
+==== BB 2334 (0xFECB354) approx BBs exec'd 0 ====
+
+	0xFECB354:  2F0C0001  cmpi cr6,r12,1
+	   0: GETL       	R12, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFECB358:  4099FF20  bc 4,25,0xFECB278
+	   5: Jc25o       	$0xFECB278
+
+
+
+. 2334 FECB354 8
+. 2F 0C 00 01 40 99 FF 20
+==== BB 2335 (0xFECB278) approx BBs exec'd 0 ====
+
+	0xFECB278:  80BD0004  lwz r5,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECB27C:  38800000  li r4,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFECB280:  909B0000  stw r4,0(r27)
+	   8: GETL       	R4, t6
+	   9: GETL       	R27, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFECB284:  3925FFFF  addi r9,r5,-1
+	  12: GETL       	R5, t10
+	  13: ADDL       	$0xFFFFFFFF, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0xFECB288:  2F890000  cmpi cr7,r9,0
+	  16: GETL       	R9, t12
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0xFECB28C:  913D0004  stw r9,4(r29)
+	  20: GETL       	R9, t16
+	  21: GETL       	R29, t18
+	  22: ADDL       	$0x4, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFECB290:  409E0020  bc 4,30,0xFECB2B0
+	  25: Jc30o       	$0xFECB2B0
+
+
+
+. 2335 FECB278 28
+. 80 BD 00 04 38 80 00 00 90 9B 00 00 39 25 FF FF 2F 89 00 00 91 3D 00 04 40 9E 00 20
+==== BB 2336 (0xFECB294) approx BBs exec'd 0 ====
+
+	0xFECB294:  913D0008  stw r9,8(r29)
+	   0: GETL       	R9, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECB298:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0xFECB29C:  7F60E828  lwarx r27,r0,r29
+	   6: GETL       	R29, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R27
+	  10: INCEIPL       	$4
+
+	0xFECB2A0:  7D20E92D  stwcx. r9,r0,r29
+	  11: GETL       	R29, t8
+	  12: GETL       	R9, t10
+	  13: LOCKo       	
+	  14: STL       	t10, (t8)  (-rSo)
+	  15: ICRFL       	cr, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFECB2A4:  40A2FFF8  bc 5,2,0xFECB29C
+	  17: Jc02o       	$0xFECB29C
+
+
+
+. 2336 FECB294 20
+. 91 3D 00 08 7C 00 04 AC 7F 60 E8 28 7D 20 E9 2D 40 A2 FF F8
+==== BB 2337 (0xFECB2A8) approx BBs exec'd 0 ====
+
+	0xFECB2A8:  2C1B0001  cmpi cr0,r27,1
+	   0: GETL       	R27, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFECB2AC:  418100D8  bc 12,1,0xFECB384
+	   5: Js01o       	$0xFECB384
+
+
+
+. 2337 FECB2A8 8
+. 2C 1B 00 01 41 81 00 D8
+==== BB 2338 (0xFECB2B0) approx BBs exec'd 0 ====
+
+	0xFECB2B0:  40920034  bc 4,18,0xFECB2E4
+	   0: Jc18o       	$0xFECB2E4
+
+
+
+. 2338 FECB2B0 4
+. 40 92 00 34
+==== BB 2339 (0xFECB2B4) approx BBs exec'd 0 ====
+
+	0xFECB2B4:  83A10044  lwz r29,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFECB2B8:  81210024  lwz r9,36(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFECB2BC:  7FA803A6  mtlr r29
+	  10: GETL       	R29, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFECB2C0:  83410028  lwz r26,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0xFECB2C4:  8361002C  lwz r27,44(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x2C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0xFECB2C8:  7D208120  mtcrf 0x8,r9
+	  23: GETL       	R9, t18
+	  24: ICRFL       	t18, $0x4, CR
+	  25: INCEIPL       	$4
+
+	0xFECB2CC:  83810030  lwz r28,48(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x30, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFECB2D0:  83A10034  lwz r29,52(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x34, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0xFECB2D4:  83C10038  lwz r30,56(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x38, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R30
+	  40: INCEIPL       	$4
+
+	0xFECB2D8:  83E1003C  lwz r31,60(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x3C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R31
+	  45: INCEIPL       	$4
+
+	0xFECB2DC:  38210040  addi r1,r1,64
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x40, t36
+	  48: PUTL       	t36, R1
+	  49: INCEIPL       	$4
+
+	0xFECB2E0:  4E800020  blr
+	  50: GETL       	LR, t38
+	  51: JMPo-r       	t38  ($4)
+
+
+
+. 2339 FECB2B4 48
+. 83 A1 00 44 81 21 00 24 7F A8 03 A6 83 41 00 28 83 61 00 2C 7D 20 81 20 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 2340 (0xFEC8270) approx BBs exec'd 0 ====
+
+	0xFEC8270:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEC8274:  3860FFFF  li r3,-1
+	   5: MOVL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEC8278:  907D0038  stw r3,56(r29)
+	   8: GETL       	R3, t6
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0x38, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEC827C:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xFEC8280:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFEC8284:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0xFEC8288:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0xFEC828C:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+
+. 2340 FEC8270 32
+. 80 01 00 24 38 60 FF FF 90 7D 00 38 83 C1 00 18 7C 08 03 A6 83 A1 00 14 38 21 00 20 4E 80 00 20
+==== BB 2341 (0xFEBC52C) approx BBs exec'd 0 ====
+
+	0xFEBC52C:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEBC530:  7F64DB78  or r4,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFEBC534:  7FA5EB78  or r5,r29,r29
+	   6: GETL       	R29, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFEBC538:  7F86E378  or r6,r28,r28
+	   9: GETL       	R28, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0xFEBC53C:  4800C141  bl 0xFEC867C
+	  12: MOVL       	$0xFEBC540, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0xFEC867C  ($4)
+
+
+
+. 2341 FEBC52C 20
+. 7F E3 FB 78 7F 64 DB 78 7F A5 EB 78 7F 86 E3 78 48 00 C1 41
+==== BB 2342 _IO_file_fopen@@GLIBC_2.1(0xFEC867C) approx BBs exec'd 0 ====
+
+	0xFEC867C:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFEC8680:  9421FFC0  stwu r1,-64(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFC0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEC8684:  480DF7CD  bl 0xFFA7E50
+	   9: MOVL       	$0xFEC8688, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2342 FEC867C 12
+. 7C E8 02 A6 94 21 FF C0 48 0D F7 CD
+==== BB 2343 (0xFEC8688) approx BBs exec'd 0 ====
+
+	0xFEC8688:  9361002C  stw r27,44(r1)
+	   0: GETL       	R27, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x2C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEC868C:  93C10038  stw r30,56(r1)
+	   5: GETL       	R30, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x38, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEC8690:  7C7B1B78  or r27,r3,r3
+	  10: GETL       	R3, t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0xFEC8694:  90E10044  stw r7,68(r1)
+	  13: GETL       	R7, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x44, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEC8698:  7FC802A6  mflr r30
+	  18: GETL       	LR, t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFEC869C:  80030038  lwz r0,56(r3)
+	  21: GETL       	R3, t16
+	  22: ADDL       	$0x38, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R0
+	  25: INCEIPL       	$4
+
+	0xFEC86A0:  7C872378  or r7,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R7
+	  28: INCEIPL       	$4
+
+	0xFEC86A4:  93E1003C  stw r31,60(r1)
+	  29: GETL       	R31, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x3C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFEC86A8:  39400000  li r10,0
+	  34: MOVL       	$0x0, t26
+	  35: PUTL       	t26, R10
+	  36: INCEIPL       	$4
+
+	0xFEC86AC:  2F80FFFF  cmpi cr7,r0,-1
+	  37: GETL       	R0, t28
+	  38: MOVL       	$0xFFFFFFFF, t32
+	  39: CMPL       	t28, t32, t30  (-rSo)
+	  40: ICRFL       	t30, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0xFEC86B0:  93210024  stw r25,36(r1)
+	  42: GETL       	R25, t34
+	  43: GETL       	R1, t36
+	  44: ADDL       	$0x24, t36
+	  45: STL       	t34, (t36)
+	  46: INCEIPL       	$4
+
+	0xFEC86B4:  93410028  stw r26,40(r1)
+	  47: GETL       	R26, t38
+	  48: GETL       	R1, t40
+	  49: ADDL       	$0x28, t40
+	  50: STL       	t38, (t40)
+	  51: INCEIPL       	$4
+
+	0xFEC86B8:  7C3F0B78  or r31,r1,r1
+	  52: GETL       	R1, t42
+	  53: PUTL       	t42, R31
+	  54: INCEIPL       	$4
+
+	0xFEC86BC:  93810030  stw r28,48(r1)
+	  55: GETL       	R28, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x30, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0xFEC86C0:  38600000  li r3,0
+	  60: MOVL       	$0x0, t48
+	  61: PUTL       	t48, R3
+	  62: INCEIPL       	$4
+
+	0xFEC86C4:  93A10034  stw r29,52(r1)
+	  63: GETL       	R29, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x34, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0xFEC86C8:  409E0030  bc 4,30,0xFEC86F8
+	  68: Jc30o       	$0xFEC86F8
+
+
+
+. 2343 FEC8688 68
+. 93 61 00 2C 93 C1 00 38 7C 7B 1B 78 90 E1 00 44 7F C8 02 A6 80 03 00 38 7C 87 23 78 93 E1 00 3C 39 40 00 00 2F 80 FF FF 93 21 00 24 93 41 00 28 7C 3F 0B 78 93 81 00 30 38 60 00 00 93 A1 00 34 40 9E 00 30
+==== BB 2344 (0xFEC86CC) approx BBs exec'd 0 ====
+
+	0xFEC86CC:  88050000  lbz r0,0(r5)
+	   0: GETL       	R5, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEC86D0:  2F800072  cmpi cr7,r0,114
+	   4: GETL       	R0, t4
+	   5: MOVL       	$0x72, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEC86D4:  419E006C  bc 12,30,0xFEC8740
+	   9: Js30o       	$0xFEC8740
+
+
+
+. 2344 FEC86CC 12
+. 88 05 00 00 2F 80 00 72 41 9E 00 6C
+==== BB 2345 (0xFEC8740) approx BBs exec'd 0 ====
+
+	0xFEC8740:  39000000  li r8,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0xFEC8744:  3B800008  li r28,8
+	   3: MOVL       	$0x8, t2
+	   4: PUTL       	t2, R28
+	   5: INCEIPL       	$4
+
+	0xFEC8748:  817B003C  lwz r11,60(r27)
+	   6: GETL       	R27, t4
+	   7: ADDL       	$0x3C, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R11
+	  10: INCEIPL       	$4
+
+	0xFEC874C:  7CBA2B78  or r26,r5,r5
+	  11: GETL       	R5, t8
+	  12: PUTL       	t8, R26
+	  13: INCEIPL       	$4
+
+	0xFEC8750:  39200001  li r9,1
+	  14: MOVL       	$0x1, t10
+	  15: PUTL       	t10, R9
+	  16: INCEIPL       	$4
+
+	0xFEC8754:  8C050001  lbzu r0,1(r5)
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x1, t12
+	  19: PUTL       	t12, R5
+	  20: LDB       	(t12), t14
+	  21: PUTL       	t14, R0
+	  22: INCEIPL       	$4
+
+	0xFEC8758:  2F800063  cmpi cr7,r0,99
+	  23: GETL       	R0, t16
+	  24: MOVL       	$0x63, t20
+	  25: CMPL       	t16, t20, t18  (-rSo)
+	  26: ICRFL       	t18, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0xFEC875C:  419E0394  bc 12,30,0xFEC8AF0
+	  28: Js30o       	$0xFEC8AF0
+
+
+
+. 2345 FEC8740 32
+. 39 00 00 00 3B 80 00 08 81 7B 00 3C 7C BA 2B 78 39 20 00 01 8C 05 00 01 2F 80 00 63 41 9E 03 94
+==== BB 2346 (0xFEC8AF0) approx BBs exec'd 0 ====
+
+	0xFEC8AF0:  616B0002  ori r11,r11,0x2
+	   0: GETL       	R11, t0
+	   1: ORL       	$0x2, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFEC8AF4:  917B003C  stw r11,60(r27)
+	   4: GETL       	R11, t2
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x3C, t4
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEC8AF8:  4BFFFC90  b 0xFEC8788
+	   9: JMPo       	$0xFEC8788  ($4)
+
+
+
+. 2346 FEC8AF0 12
+. 61 6B 00 02 91 7B 00 3C 4B FF FC 90
+==== BB 2347 (0xFEC8788) approx BBs exec'd 0 ====
+
+	0xFEC8788:  71600002  andi. r0,r11,0x2
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x2, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEC878C:  7D005378  or r0,r8,r10
+	   6: GETL       	R8, t4
+	   7: GETL       	R10, t6
+	   8: ORL       	t6, t4
+	   9: PUTL       	t4, R0
+	  10: INCEIPL       	$4
+
+	0xFEC8790:  40820388  bc 4,2,0xFEC8B18
+	  11: Jc02o       	$0xFEC8B18
+
+
+
+. 2347 FEC8788 12
+. 71 60 00 02 7D 00 53 78 40 82 03 88
+==== BB 2348 (0xFEC8B18) approx BBs exec'd 0 ====
+
+	0xFEC8B18:  21260000  subfic r9,r6,0
+	   0: GETL       	R6, t0
+	   1: MOVL       	$0x0, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEC8B1C:  7D093114  adde r8,r9,r6
+	   5: GETL       	R9, t4
+	   6: GETL       	R6, t6
+	   7: ADCL       	t4, t6  (-rCa-wCa)
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFEC8B20:  7CE33B78  or r3,r7,r7
+	  10: GETL       	R7, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFEC8B24:  5505801E  rlwinm r5,r8,16,0,15
+	  13: GETL       	R8, t10
+	  14: SHLL       	$0x10, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFEC8B28:  7CA40378  or r4,r5,r0
+	  17: GETL       	R5, t12
+	  18: GETL       	R0, t14
+	  19: ORL       	t14, t12
+	  20: PUTL       	t12, R4
+	  21: INCEIPL       	$4
+
+	0xFEC8B2C:  38A001B6  li r5,438
+	  22: MOVL       	$0x1B6, t16
+	  23: PUTL       	t16, R5
+	  24: INCEIPL       	$4
+
+	0xFEC8B30:  4CC63182  crxor 6,6,6
+	  25: GETL       	CR, t18
+	  26: XBITB       	t18, $0x6, t20
+	  27: XBITB       	t18, $0x6, t22
+	  28: XORB       	t22, t20
+	  29: IBITL       	t20, $0x6, t18
+	  30: PUTL       	t18, CR
+	  31: INCEIPL       	$4
+
+	0xFEC8B34:  480591ED  bl 0xFF21D20
+	  32: MOVL       	$0xFEC8B38, t24
+	  33: PUTL       	t24, LR
+	  34: JMPo-c       	$0xFF21D20  ($4)
+
+
+
+. 2348 FEC8B18 32
+. 21 26 00 00 7D 09 31 14 7C E3 3B 78 55 05 80 1E 7C A4 03 78 38 A0 01 B6 4C C6 31 82 48 05 91 ED
+==== BB 2349 (0xFEC8B38) approx BBs exec'd 0 ====
+
+	0xFEC8B38:  4BFFFC7C  b 0xFEC87B4
+	   0: JMPo       	$0xFEC87B4  ($4)
+
+
+
+. 2349 FEC8B38 4
+. 4B FF FC 7C
+==== BB 2350 (0xFEC87B4) approx BBs exec'd 0 ====
+
+	0xFEC87B4:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEC87B8:  7C7D1B78  or r29,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFEC87BC:  3B200000  li r25,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0xFEC87C0:  41840250  bc 12,4,0xFEC8A10
+	  10: Js04o       	$0xFEC8A10
+
+
+
+. 2350 FEC87B4 16
+. 2C 83 00 00 7C 7D 1B 78 3B 20 00 00 41 84 02 50
+==== BB 2351 (0xFEC87C4) approx BBs exec'd 0 ====
+
+	0xFEC87C4:  5780A33F  rlwinm. r0,r28,20,12,31
+	   0: GETL       	R28, t0
+	   1: SHRL       	$0xC, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEC87C8:  801B0000  lwz r0,0(r27)
+	   6: GETL       	R27, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFEC87CC:  38E0EFF3  li r7,-4109
+	  10: MOVL       	$0xFFFFEFF3, t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFEC87D0:  907B0038  stw r3,56(r27)
+	  13: GETL       	R3, t10
+	  14: GETL       	R27, t12
+	  15: ADDL       	$0x38, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEC87D4:  7C0C3838  and r12,r0,r7
+	  18: GETL       	R0, t14
+	  19: GETL       	R7, t16
+	  20: ANDL       	t14, t16
+	  21: PUTL       	t16, R12
+	  22: INCEIPL       	$4
+
+	0xFEC87D8:  7D8BE378  or r11,r12,r28
+	  23: GETL       	R12, t18
+	  24: GETL       	R28, t20
+	  25: ORL       	t20, t18
+	  26: PUTL       	t18, R11
+	  27: INCEIPL       	$4
+
+	0xFEC87DC:  917B0000  stw r11,0(r27)
+	  28: GETL       	R11, t22
+	  29: GETL       	R27, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0xFEC87E0:  4182000C  bc 12,2,0xFEC87EC
+	  32: Js02o       	$0xFEC87EC
+
+
+
+. 2351 FEC87C4 32
+. 57 80 A3 3F 80 1B 00 00 38 E0 EF F3 90 7B 00 38 7C 0C 38 38 7D 8B E3 78 91 7B 00 00 41 82 00 0C
+==== BB 2352 (0xFEC87EC) approx BBs exec'd 0 ====
+
+	0xFEC87EC:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC87F0:  48002925  bl 0xFECB114
+	   3: MOVL       	$0xFEC87F4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECB114  ($4)
+
+
+
+. 2352 FEC87EC 8
+. 7F 63 DB 78 48 00 29 25
+==== BB 2353 (0xFEC87F4) approx BBs exec'd 0 ====
+
+	0xFEC87F4:  7F79DB79  or. r25,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R25
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEC87F8:  41820218  bc 12,2,0xFEC8A10
+	   5: Js02o       	$0xFEC8A10
+
+
+
+. 2353 FEC87F4 8
+. 7F 79 DB 79 41 82 02 18
+==== BB 2354 (0xFEC87FC) approx BBs exec'd 0 ====
+
+	0xFEC87FC:  809E05DC  lwz r4,1500(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5DC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEC8800:  387A0001  addi r3,r26,1
+	   5: GETL       	R26, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFEC8804:  4800EC0D  bl 0xFED7410
+	   9: MOVL       	$0xFEC8808, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFED7410  ($4)
+
+
+
+. 2354 FEC87FC 12
+. 80 9E 05 DC 38 7A 00 01 48 00 EC 0D
+==== BB 2355 strstr(0xFED7410) approx BBs exec'd 0 ====
+
+	0xFED7410:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED7414:  89640000  lbz r11,0(r4)
+	   6: GETL       	R4, t4
+	   7: LDB       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFED7418:  2F8B0000  cmpi cr7,r11,0
+	  10: GETL       	R11, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFED741C:  419E0078  bc 12,30,0xFED7494
+	  14: Js30o       	$0xFED7494
+
+
+
+. 2355 FED7410 16
+. 94 21 FF F0 89 64 00 00 2F 8B 00 00 41 9E 00 78
+==== BB 2356 (0xFED7420) approx BBs exec'd 0 ====
+
+	0xFED7420:  3863FFFF  addi r3,r3,-1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFED7424:  8C030001  lbzu r0,1(r3)
+	   4: GETL       	R3, t2
+	   5: ADDL       	$0x1, t2
+	   6: PUTL       	t2, R3
+	   7: LDB       	(t2), t4
+	   8: PUTL       	t4, R0
+	   9: INCEIPL       	$4
+
+	0xFED7428:  2C000000  cmpi cr0,r0,0
+	  10: GETL       	R0, t6
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0xFED742C:  7F005800  cmp cr6,r0,r11
+	  14: GETL       	R0, t10
+	  15: GETL       	R11, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFED7430:  41820060  bc 12,2,0xFED7490
+	  19: Js02o       	$0xFED7490
+
+
+
+. 2356 FED7420 20
+. 38 63 FF FF 8C 03 00 01 2C 00 00 00 7F 00 58 00 41 82 00 60
+==== BB 2357 (0xFED7434) approx BBs exec'd 0 ====
+
+	0xFED7434:  409AFFF0  bc 4,26,0xFED7424
+	   0: Jc26o       	$0xFED7424
+
+
+
+. 2357 FED7434 4
+. 40 9A FF F0
+==== BB 2358 (0xFED7424) approx BBs exec'd 0 ====
+
+	0xFED7424:  8C030001  lbzu r0,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFED7428:  2C000000  cmpi cr0,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED742C:  7F005800  cmp cr6,r0,r11
+	  10: GETL       	R0, t8
+	  11: GETL       	R11, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0xFED7430:  41820060  bc 12,2,0xFED7490
+	  15: Js02o       	$0xFED7490
+
+
+
+. 2358 FED7424 16
+. 8C 03 00 01 2C 00 00 00 7F 00 58 00 41 82 00 60
+==== BB 2359 (0xFED7490) approx BBs exec'd 0 ====
+
+	0xFED7490:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED7494:  38210010  addi r1,r1,16
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R1
+	   6: INCEIPL       	$4
+
+	0xFED7498:  4E800020  blr
+	   7: GETL       	LR, t4
+	   8: JMPo-r       	t4  ($4)
+
+
+
+. 2359 FED7490 12
+. 38 60 00 00 38 21 00 10 4E 80 00 20
+==== BB 2360 (0xFEC8808) approx BBs exec'd 0 ====
+
+	0xFEC8808:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEC880C:  41820204  bc 12,2,0xFEC8A10
+	   5: Js02o       	$0xFEC8A10
+
+
+
+. 2360 FEC8808 8
+. 7C 7D 1B 79 41 82 02 04
+==== BB 2361 (0xFEC8A10) approx BBs exec'd 0 ====
+
+	0xFEC8A10:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC8A14:  80C10000  lwz r6,0(r1)
+	   3: GETL       	R1, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0xFEC8A18:  83460004  lwz r26,4(r6)
+	   7: GETL       	R6, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R26
+	  11: INCEIPL       	$4
+
+	0xFEC8A1C:  8326FFE4  lwz r25,-28(r6)
+	  12: GETL       	R6, t10
+	  13: ADDL       	$0xFFFFFFE4, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R25
+	  16: INCEIPL       	$4
+
+	0xFEC8A20:  7F4803A6  mtlr r26
+	  17: GETL       	R26, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0xFEC8A24:  8366FFEC  lwz r27,-20(r6)
+	  20: GETL       	R6, t16
+	  21: ADDL       	$0xFFFFFFEC, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R27
+	  24: INCEIPL       	$4
+
+	0xFEC8A28:  8346FFE8  lwz r26,-24(r6)
+	  25: GETL       	R6, t20
+	  26: ADDL       	$0xFFFFFFE8, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R26
+	  29: INCEIPL       	$4
+
+	0xFEC8A2C:  8386FFF0  lwz r28,-16(r6)
+	  30: GETL       	R6, t24
+	  31: ADDL       	$0xFFFFFFF0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R28
+	  34: INCEIPL       	$4
+
+	0xFEC8A30:  83A6FFF4  lwz r29,-12(r6)
+	  35: GETL       	R6, t28
+	  36: ADDL       	$0xFFFFFFF4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R29
+	  39: INCEIPL       	$4
+
+	0xFEC8A34:  83C6FFF8  lwz r30,-8(r6)
+	  40: GETL       	R6, t32
+	  41: ADDL       	$0xFFFFFFF8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R30
+	  44: INCEIPL       	$4
+
+	0xFEC8A38:  83E6FFFC  lwz r31,-4(r6)
+	  45: GETL       	R6, t36
+	  46: ADDL       	$0xFFFFFFFC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R31
+	  49: INCEIPL       	$4
+
+	0xFEC8A3C:  7CC13378  or r1,r6,r6
+	  50: GETL       	R6, t40
+	  51: PUTL       	t40, R1
+	  52: INCEIPL       	$4
+
+	0xFEC8A40:  4E800020  blr
+	  53: GETL       	LR, t42
+	  54: JMPo-r       	t42  ($4)
+
+
+
+. 2361 FEC8A10 52
+. 7F 23 CB 78 80 C1 00 00 83 46 00 04 83 26 FF E4 7F 48 03 A6 83 66 FF EC 83 46 FF E8 83 86 FF F0 83 A6 FF F4 83 C6 FF F8 83 E6 FF FC 7C C1 33 78 4E 80 00 20
+==== BB 2362 (0xFEBC540) approx BBs exec'd 0 ====
+
+	0xFEBC540:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEBC544:  7FE3FB78  or r3,r31,r31
+	   4: GETL       	R31, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFEBC548:  419E0034  bc 12,30,0xFEBC57C
+	   7: Js30o       	$0xFEBC57C
+
+
+
+. 2362 FEBC540 12
+. 2F 83 00 00 7F E3 FB 78 41 9E 00 34
+==== BB 2363 (0xFEBC54C) approx BBs exec'd 0 ====
+
+	0xFEBC54C:  4BFFFF01  bl 0xFEBC44C
+	   0: MOVL       	$0xFEBC550, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFEBC44C  ($4)
+
+
+
+. 2363 FEBC54C 4
+. 4B FF FF 01
+==== BB 2364 __fopen_maybe_mmap(0xFEBC44C) approx BBs exec'd 0 ====
+
+	0xFEBC44C:  8003003C  lwz r0,60(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x3C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEBC450:  7D8802A6  mflr r12
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0xFEBC454:  9421FFF0  stwu r1,-16(r1)
+	   8: GETL       	R1, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xFFFFFFF0, t8
+	  11: PUTL       	t8, R1
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEBC458:  480EB9F9  bl 0xFFA7E50
+	  14: MOVL       	$0xFEBC45C, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2364 FEBC44C 16
+. 80 03 00 3C 7D 88 02 A6 94 21 FF F0 48 0E B9 F9
+==== BB 2365 (0xFEBC45C) approx BBs exec'd 0 ====
+
+	0xFEBC45C:  70090001  andi. r9,r0,0x1
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEBC460:  93C10008  stw r30,8(r1)
+	   6: GETL       	R30, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFEBC464:  7FC802A6  mflr r30
+	  11: GETL       	LR, t8
+	  12: PUTL       	t8, R30
+	  13: INCEIPL       	$4
+
+	0xFEBC468:  7D8803A6  mtlr r12
+	  14: GETL       	R12, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFEBC46C:  4182002C  bc 12,2,0xFEBC498
+	  17: Js02o       	$0xFEBC498
+
+
+
+. 2365 FEBC45C 20
+. 70 09 00 01 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 41 82 00 2C
+==== BB 2366 (0xFEBC498) approx BBs exec'd 0 ====
+
+	0xFEBC498:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0xFEBC49C:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFEBC4A0:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 2366 FEBC498 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 2367 (0xFEBC550) approx BBs exec'd 0 ====
+
+	0xFEBC550:  7C691B78  or r9,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0xFEBC554:  80A10024  lwz r5,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFEBC558:  7D234B78  or r3,r9,r9
+	   8: GETL       	R9, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEBC55C:  8361000C  lwz r27,12(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0xC, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0xFEBC560:  83810010  lwz r28,16(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFEBC564:  7CA803A6  mtlr r5
+	  21: GETL       	R5, t16
+	  22: PUTL       	t16, LR
+	  23: INCEIPL       	$4
+
+	0xFEBC568:  83A10014  lwz r29,20(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x14, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFEBC56C:  83C10018  lwz r30,24(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x18, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R30
+	  33: INCEIPL       	$4
+
+	0xFEBC570:  83E1001C  lwz r31,28(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x1C, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R31
+	  38: INCEIPL       	$4
+
+	0xFEBC574:  38210020  addi r1,r1,32
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x20, t30
+	  41: PUTL       	t30, R1
+	  42: INCEIPL       	$4
+
+	0xFEBC578:  4E800020  blr
+	  43: GETL       	LR, t32
+	  44: JMPo-r       	t32  ($4)
+
+
+
+. 2367 FEBC550 44
+. 7C 69 1B 78 80 A1 00 24 7D 23 4B 78 83 61 00 0C 83 81 00 10 7C A8 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2368 (0xFEBC5A8) approx BBs exec'd 0 ====
+
+	0xFEBC5A8:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEBC5AC:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFEBC5B0:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFEBC5B4:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2368 FEBC5A8 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 2369 (0xFE8D068) approx BBs exec'd 0 ====
+
+	0xFE8D068:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8D06C:  7C7B1B79  or. r27,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R27
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE8D070:  4182039C  bc 12,2,0xFE8D40C
+	   8: Js02o       	$0xFE8D40C
+
+
+
+. 2369 FE8D068 12
+. 38 00 00 00 7C 7B 1B 79 41 82 03 9C
+==== BB 2370 (0xFE8D074) approx BBs exec'd 0 ====
+
+	0xFE8D074:  38800002  li r4,2
+	   0: MOVL       	$0x2, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8D078:  39C00000  li r14,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R14
+	   5: INCEIPL       	$4
+
+	0xFE8D07C:  4803A2E1  bl 0xFEC735C
+	   6: MOVL       	$0xFE8D080, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFEC735C  ($4)
+
+
+
+. 2370 FE8D074 12
+. 38 80 00 02 39 C0 00 00 48 03 A2 E1
+==== BB 2371 __fsetlocking_internal(0xFEC735C) approx BBs exec'd 0 ====
+
+	0xFEC735C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEC7360:  2F040000  cmpi cr6,r4,0
+	   6: GETL       	R4, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFEC7364:  81230000  lwz r9,0(r3)
+	  10: GETL       	R3, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0xFEC7368:  2F840002  cmpi cr7,r4,2
+	  14: GETL       	R4, t12
+	  15: MOVL       	$0x2, t16
+	  16: CMPL       	t12, t16, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFEC736C:  71208000  andi. r0,r9,0x8000
+	  19: GETL       	R9, t18
+	  20: ANDL       	$0x8000, t18
+	  21: PUTL       	t18, R0
+	  22: CMP0L       	t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0xFEC7370:  5529045E  rlwinm r9,r9,0,17,15
+	  25: GETL       	R9, t22
+	  26: ANDL       	$0xFFFF7FFF, t22
+	  27: PUTL       	t22, R9
+	  28: INCEIPL       	$4
+
+	0xFEC7374:  7C000026  mfcr r0
+	  29: GETL       	CR, t24
+	  30: PUTL       	t24, R0
+	  31: INCEIPL       	$4
+
+	0xFEC7378:  54001FFE  rlwinm r0,r0,3,31,31
+	  32: GETL       	R0, t26
+	  33: ROLL       	$0x3, t26
+	  34: ANDL       	$0x1, t26
+	  35: PUTL       	t26, R0
+	  36: INCEIPL       	$4
+
+	0xFEC737C:  20000002  subfic r0,r0,2
+	  37: GETL       	R0, t28
+	  38: MOVL       	$0x2, t30
+	  39: SBBL       	t28, t30  (-wCa)
+	  40: PUTL       	t30, R0
+	  41: INCEIPL       	$4
+
+	0xFEC7380:  419A0010  bc 12,26,0xFEC7390
+	  42: Js26o       	$0xFEC7390
+
+
+
+. 2371 FEC735C 40
+. 94 21 FF F0 2F 04 00 00 81 23 00 00 2F 84 00 02 71 20 80 00 55 29 04 5E 7C 00 00 26 54 00 1F FE 20 00 00 02 41 9A 00 10
+==== BB 2372 (0xFEC7384) approx BBs exec'd 0 ====
+
+	0xFEC7384:  612B8000  ori r11,r9,0x8000
+	   0: GETL       	R9, t0
+	   1: ORL       	$0x8000, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFEC7388:  419E0014  bc 12,30,0xFEC739C
+	   4: Js30o       	$0xFEC739C
+
+
+
+. 2372 FEC7384 8
+. 61 2B 80 00 41 9E 00 14
+==== BB 2373 (0xFEC739C) approx BBs exec'd 0 ====
+
+	0xFEC739C:  91630000  stw r11,0(r3)
+	   0: GETL       	R11, t0
+	   1: GETL       	R3, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFEC73A0:  38210010  addi r1,r1,16
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: PUTL       	t4, R1
+	   7: INCEIPL       	$4
+
+	0xFEC73A4:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEC73A8:  4E800020  blr
+	  11: GETL       	LR, t8
+	  12: JMPo-r       	t8  ($4)
+
+
+
+. 2373 FEC739C 16
+. 91 63 00 00 38 21 00 10 7C 03 03 78 4E 80 00 20
+==== BB 2374 (0xFE8D080) approx BBs exec'd 0 ====
+
+	0xFE8D080:  807B0000  lwz r3,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8D084:  70690010  andi. r9,r3,0x10
+	   4: GETL       	R3, t4
+	   5: ANDL       	$0x10, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE8D088:  40820370  bc 4,2,0xFE8D3F8
+	  10: Jc02o       	$0xFE8D3F8
+
+
+
+. 2374 FE8D080 12
+. 80 7B 00 00 70 69 00 10 40 82 03 70
+==== BB 2375 (0xFE8D08C) approx BBs exec'd 0 ====
+
+	0xFE8D08C:  81FE1D50  lwz r15,7504(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0xFE8D090:  3B5F0010  addi r26,r31,16
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R26
+	   8: INCEIPL       	$4
+
+	0xFE8D094:  7E0F1214  add r16,r15,r2
+	   9: GETL       	R15, t6
+	  10: GETL       	R2, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R16
+	  13: INCEIPL       	$4
+
+	0xFE8D098:  825E1CF8  lwz r18,7416(r30)
+	  14: GETL       	R30, t10
+	  15: ADDL       	$0x1CF8, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R18
+	  18: INCEIPL       	$4
+
+	0xFE8D09C:  921F01A4  stw r16,420(r31)
+	  19: GETL       	R16, t14
+	  20: GETL       	R31, t16
+	  21: ADDL       	$0x1A4, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFE8D0A0:  7E0F8378  or r15,r16,r16
+	  24: GETL       	R16, t18
+	  25: PUTL       	t18, R15
+	  26: INCEIPL       	$4
+
+	0xFE8D0A4:  921F01A0  stw r16,416(r31)
+	  27: GETL       	R16, t20
+	  28: GETL       	R31, t22
+	  29: ADDL       	$0x1A0, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFE8D0A8:  7F43D378  or r3,r26,r26
+	  32: GETL       	R26, t24
+	  33: PUTL       	t24, R3
+	  34: INCEIPL       	$4
+
+	0xFE8D0AC:  38800190  li r4,400
+	  35: MOVL       	$0x190, t26
+	  36: PUTL       	t26, R4
+	  37: INCEIPL       	$4
+
+	0xFE8D0B0:  7F65DB78  or r5,r27,r27
+	  38: GETL       	R27, t28
+	  39: PUTL       	t28, R5
+	  40: INCEIPL       	$4
+
+	0xFE8D0B4:  4803AFC5  bl 0xFEC8078
+	  41: MOVL       	$0xFE8D0B8, t30
+	  42: PUTL       	t30, LR
+	  43: JMPo-c       	$0xFEC8078  ($4)
+
+
+
+. 2375 FE8D08C 44
+. 81 FE 1D 50 3B 5F 00 10 7E 0F 12 14 82 5E 1C F8 92 1F 01 A4 7E 0F 83 78 92 1F 01 A0 7F 43 D3 78 38 80 01 90 7F 65 DB 78 48 03 AF C5
+==== BB 2376 fgets_unlocked(0xFEC8078) approx BBs exec'd 0 ====
+
+	0xFEC8078:  2C040000  cmpi cr0,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEC807C:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFEC8080:  9421FFE0  stwu r1,-32(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFE0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEC8084:  480DFDCD  bl 0xFFA7E50
+	  13: MOVL       	$0xFEC8088, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2376 FEC8078 16
+. 2C 04 00 00 7C 08 02 A6 94 21 FF E0 48 0D FD CD
+==== BB 2377 (0xFEC8088) approx BBs exec'd 0 ====
+
+	0xFEC8088:  93E1001C  stw r31,28(r1)
+	   0: GETL       	R31, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x1C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEC808C:  7CBF2B78  or r31,r5,r5
+	   5: GETL       	R5, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFEC8090:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEC8094:  38A4FFFF  addi r5,r4,-1
+	  13: GETL       	R4, t10
+	  14: ADDL       	$0xFFFFFFFF, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFEC8098:  93C10018  stw r30,24(r1)
+	  17: GETL       	R30, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFEC809C:  7C7D1B78  or r29,r3,r3
+	  22: GETL       	R3, t16
+	  23: PUTL       	t16, R29
+	  24: INCEIPL       	$4
+
+	0xFEC80A0:  7C641B78  or r4,r3,r3
+	  25: GETL       	R3, t18
+	  26: PUTL       	t18, R4
+	  27: INCEIPL       	$4
+
+	0xFEC80A4:  93810010  stw r28,16(r1)
+	  28: GETL       	R28, t20
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x10, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0xFEC80A8:  90010024  stw r0,36(r1)
+	  33: GETL       	R0, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x24, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0xFEC80AC:  7FC802A6  mflr r30
+	  38: GETL       	LR, t28
+	  39: PUTL       	t28, R30
+	  40: INCEIPL       	$4
+
+	0xFEC80B0:  38C0000A  li r6,10
+	  41: MOVL       	$0xA, t30
+	  42: PUTL       	t30, R6
+	  43: INCEIPL       	$4
+
+	0xFEC80B4:  38E00001  li r7,1
+	  44: MOVL       	$0x1, t32
+	  45: PUTL       	t32, R7
+	  46: INCEIPL       	$4
+
+	0xFEC80B8:  7FE3FB78  or r3,r31,r31
+	  47: GETL       	R31, t34
+	  48: PUTL       	t34, R3
+	  49: INCEIPL       	$4
+
+	0xFEC80BC:  39200000  li r9,0
+	  50: MOVL       	$0x0, t36
+	  51: PUTL       	t36, R9
+	  52: INCEIPL       	$4
+
+	0xFEC80C0:  40810034  bc 4,1,0xFEC80F4
+	  53: Jc01o       	$0xFEC80F4
+
+
+
+. 2377 FEC8088 60
+. 93 E1 00 1C 7C BF 2B 78 93 A1 00 14 38 A4 FF FF 93 C1 00 18 7C 7D 1B 78 7C 64 1B 78 93 81 00 10 90 01 00 24 7F C8 02 A6 38 C0 00 0A 38 E0 00 01 7F E3 FB 78 39 20 00 00 40 81 00 34
+==== BB 2378 (0xFEC80C4) approx BBs exec'd 0 ====
+
+	0xFEC80C4:  811F0000  lwz r8,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0xFEC80C8:  550906F2  rlwinm r9,r8,0,27,25
+	   4: GETL       	R8, t4
+	   5: ANDL       	$0xFFFFFFDF, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFEC80CC:  551C06B4  rlwinm r28,r8,0,26,26
+	   8: GETL       	R8, t6
+	   9: ANDL       	$0x20, t6
+	  10: PUTL       	t6, R28
+	  11: INCEIPL       	$4
+
+	0xFEC80D0:  913F0000  stw r9,0(r31)
+	  12: GETL       	R9, t8
+	  13: GETL       	R31, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFEC80D4:  4BFF56F9  bl 0xFEBD7CC
+	  16: MOVL       	$0xFEC80D8, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0xFEBD7CC  ($4)
+
+
+
+. 2378 FEC80C4 20
+. 81 1F 00 00 55 09 06 F2 55 1C 06 B4 91 3F 00 00 4B FF 56 F9
+==== BB 2379 _IO_getline_internal(0xFEBD7CC) approx BBs exec'd 0 ====
+
+	0xFEBD7CC:  7D0802A6  mflr r8
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0xFEBD7D0:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEBD7D4:  91010014  stw r8,20(r1)
+	   9: GETL       	R8, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEBD7D8:  39000000  li r8,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R8
+	  16: INCEIPL       	$4
+
+	0xFEBD7DC:  48000015  bl 0xFEBD7F0
+	  17: MOVL       	$0xFEBD7E0, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0xFEBD7F0  ($4)
+
+
+
+. 2379 FEBD7CC 20
+. 7D 08 02 A6 94 21 FF F0 91 01 00 14 39 00 00 00 48 00 00 15
+==== BB 2380 _IO_getline_info_internal(0xFEBD7F0) approx BBs exec'd 0 ====
+
+	0xFEBD7F0:  7D800026  mfcr r12
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFEBD7F4:  2D880000  cmpi cr3,r8,0
+	   3: GETL       	R8, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x3, CR
+	   6: INCEIPL       	$4
+
+	0xFEBD7F8:  7C0802A6  mflr r0
+	   7: GETL       	LR, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFEBD7FC:  9421FFC0  stwu r1,-64(r1)
+	  10: GETL       	R1, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0xFFFFFFC0, t10
+	  13: PUTL       	t10, R1
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFEBD800:  480EA651  bl 0xFFA7E50
+	  16: MOVL       	$0xFEBD804, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2380 FEBD7F0 20
+. 7D 80 00 26 2D 88 00 00 7C 08 02 A6 94 21 FF C0 48 0E A6 51
+==== BB 2381 (0xFEBD804) approx BBs exec'd 0 ====
+
+	0xFEBD804:  92C10018  stw r22,24(r1)
+	   0: GETL       	R22, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEBD808:  92E1001C  stw r23,28(r1)
+	   5: GETL       	R23, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x1C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEBD80C:  7D164378  or r22,r8,r8
+	  10: GETL       	R8, t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0xFEBD810:  93010020  stw r24,32(r1)
+	  13: GETL       	R24, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x20, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEBD814:  7C972378  or r23,r4,r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, R23
+	  20: INCEIPL       	$4
+
+	0xFEBD818:  93410028  stw r26,40(r1)
+	  21: GETL       	R26, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x28, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEBD81C:  7CD83378  or r24,r6,r6
+	  26: GETL       	R6, t20
+	  27: PUTL       	t20, R24
+	  28: INCEIPL       	$4
+
+	0xFEBD820:  9361002C  stw r27,44(r1)
+	  29: GETL       	R27, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x2C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFEBD824:  7C9A2378  or r26,r4,r4
+	  34: GETL       	R4, t26
+	  35: PUTL       	t26, R26
+	  36: INCEIPL       	$4
+
+	0xFEBD828:  93810030  stw r28,48(r1)
+	  37: GETL       	R28, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x30, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFEBD82C:  7C7B1B78  or r27,r3,r3
+	  42: GETL       	R3, t32
+	  43: PUTL       	t32, R27
+	  44: INCEIPL       	$4
+
+	0xFEBD830:  93C10038  stw r30,56(r1)
+	  45: GETL       	R30, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x38, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFEBD834:  7CBC2B78  or r28,r5,r5
+	  50: GETL       	R5, t38
+	  51: PUTL       	t38, R28
+	  52: INCEIPL       	$4
+
+	0xFEBD838:  93E1003C  stw r31,60(r1)
+	  53: GETL       	R31, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x3C, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0xFEBD83C:  7FC802A6  mflr r30
+	  58: GETL       	LR, t44
+	  59: PUTL       	t44, R30
+	  60: INCEIPL       	$4
+
+	0xFEBD840:  93210024  stw r25,36(r1)
+	  61: GETL       	R25, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x24, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0xFEBD844:  7CFF3B78  or r31,r7,r7
+	  66: GETL       	R7, t50
+	  67: PUTL       	t50, R31
+	  68: INCEIPL       	$4
+
+	0xFEBD848:  93A10034  stw r29,52(r1)
+	  69: GETL       	R29, t52
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x34, t54
+	  72: STL       	t52, (t54)
+	  73: INCEIPL       	$4
+
+	0xFEBD84C:  90010044  stw r0,68(r1)
+	  74: GETL       	R0, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x44, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0xFEBD850:  91810014  stw r12,20(r1)
+	  79: GETL       	R12, t60
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x14, t62
+	  82: STL       	t60, (t62)
+	  83: INCEIPL       	$4
+
+	0xFEBD854:  418E000C  bc 12,14,0xFEBD860
+	  84: Js14o       	$0xFEBD860
+
+
+
+. 2381 FEBD804 84
+. 92 C1 00 18 92 E1 00 1C 7D 16 43 78 93 01 00 20 7C 97 23 78 93 41 00 28 7C D8 33 78 93 61 00 2C 7C 9A 23 78 93 81 00 30 7C 7B 1B 78 93 C1 00 38 7C BC 2B 78 93 E1 00 3C 7F C8 02 A6 93 21 00 24 7C FF 3B 78 93 A1 00 34 90 01 00 44 91 81 00 14 41 8E 00 0C
+==== BB 2382 (0xFEBD860) approx BBs exec'd 0 ====
+
+	0xFEBD860:  80BB0060  lwz r5,96(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEBD864:  2F850000  cmpi cr7,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEBD868:  419E0150  bc 12,30,0xFEBD9B8
+	   9: Js30o       	$0xFEBD9B8
+
+
+
+. 2382 FEBD860 12
+. 80 BB 00 60 2F 85 00 00 41 9E 01 50
+==== BB 2383 (0xFEBD9B8) approx BBs exec'd 0 ====
+
+	0xFEBD9B8:  80DE1DC8  lwz r6,7624(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEBD9BC:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFEBD9C0:  4182002C  bc 12,2,0xFEBD9EC
+	   9: Js02o       	$0xFEBD9EC
+
+
+
+. 2383 FEBD9B8 12
+. 80 DE 1D C8 2C 06 00 00 41 82 00 2C
+==== BB 2384 (0xFEBD9C4) approx BBs exec'd 0 ====
+
+	0xFEBD9C4:  3860FFFF  li r3,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEBD9C8:  907B0060  stw r3,96(r27)
+	   3: GETL       	R3, t2
+	   4: GETL       	R27, t4
+	   5: ADDL       	$0x60, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFEBD9CC:  4BFFFEA0  b 0xFEBD86C
+	   8: JMPo       	$0xFEBD86C  ($4)
+
+
+
+. 2384 FEBD9C4 12
+. 38 60 FF FF 90 7B 00 60 4B FF FE A0
+==== BB 2385 (0xFEBD86C) approx BBs exec'd 0 ====
+
+	0xFEBD86C:  2C9C0000  cmpi cr1,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEBD870:  41860070  bc 12,6,0xFEBD8E0
+	   4: Js06o       	$0xFEBD8E0
+
+
+
+. 2385 FEBD86C 8
+. 2C 9C 00 00 41 86 00 70
+==== BB 2386 (0xFEBD874) approx BBs exec'd 0 ====
+
+	0xFEBD874:  2E1F0000  cmpi cr4,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFEBD878:  4D100000  mcrf cr2,cr4
+	   4: ICRFL       	CR, $0x2, CR
+	   5: INCEIPL       	$4
+
+	0xFEBD87C:  83BB0004  lwz r29,4(r27)
+	   6: GETL       	R27, t4
+	   7: ADDL       	$0x4, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R29
+	  10: INCEIPL       	$4
+
+	0xFEBD880:  7F63DB78  or r3,r27,r27
+	  11: GETL       	R27, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFEBD884:  80FB0008  lwz r7,8(r27)
+	  14: GETL       	R27, t10
+	  15: ADDL       	$0x8, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0xFEBD888:  7F04C378  or r4,r24,r24
+	  19: GETL       	R24, t14
+	  20: PUTL       	t14, R4
+	  21: INCEIPL       	$4
+
+	0xFEBD88C:  7FFD3851  subf. r31,r29,r7
+	  22: GETL       	R29, t16
+	  23: GETL       	R7, t18
+	  24: SUBL       	t16, t18
+	  25: PUTL       	t18, R31
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0xFEBD890:  7F9FE040  cmpl cr7,r31,r28
+	  29: GETL       	R31, t22
+	  30: GETL       	R28, t24
+	  31: CMPUL       	t22, t24, t26  (-rSo)
+	  32: ICRFL       	t26, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0xFEBD894:  40810090  bc 4,1,0xFEBD924
+	  34: Jc01o       	$0xFEBD924
+
+
+
+. 2386 FEBD874 36
+. 2E 1F 00 00 4D 10 00 00 83 BB 00 04 7F 63 DB 78 80 FB 00 08 7F 04 C3 78 7F FD 38 51 7F 9F E0 40 40 81 00 90
+==== BB 2387 (0xFEBD924) approx BBs exec'd 0 ====
+
+	0xFEBD924:  4800E089  bl 0xFECB9AC
+	   0: MOVL       	$0xFEBD928, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFECB9AC  ($4)
+
+
+
+. 2387 FEBD924 4
+. 48 00 E0 89
+==== BB 2388 __GI___uflow(0xFECB9AC) approx BBs exec'd 0 ====
+
+	0xFECB9AC:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECB9B0:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECB9B4:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECB9B8:  480DC499  bl 0xFFA7E50
+	  14: MOVL       	$0xFECB9BC, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2388 FECB9AC 16
+. 7C 88 02 A6 94 21 FF F0 93 C1 00 08 48 0D C4 99
+==== BB 2389 (0xFECB9BC) approx BBs exec'd 0 ====
+
+	0xFECB9BC:  93E1000C  stw r31,12(r1)
+	   0: GETL       	R31, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECB9C0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECB9C4:  90810014  stw r4,20(r1)
+	   8: GETL       	R4, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECB9C8:  7C7F1B78  or r31,r3,r3
+	  13: GETL       	R3, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0xFECB9CC:  88030046  lbz r0,70(r3)
+	  16: GETL       	R3, t12
+	  17: ADDL       	$0x46, t12
+	  18: LDB       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFECB9D0:  2F800000  cmpi cr7,r0,0
+	  21: GETL       	R0, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0xFECB9D4:  409E0030  bc 4,30,0xFECBA04
+	  25: Jc30o       	$0xFECBA04
+
+
+
+. 2389 FECB9BC 28
+. 93 E1 00 0C 7F C8 02 A6 90 81 00 14 7C 7F 1B 78 88 03 00 46 2F 80 00 00 40 9E 00 30
+==== BB 2390 (0xFECB9D8) approx BBs exec'd 0 ====
+
+	0xFECB9D8:  80BE1DC8  lwz r5,7624(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECB9DC:  2C050000  cmpi cr0,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECB9E0:  41820180  bc 12,2,0xFECBB60
+	   9: Js02o       	$0xFECBB60
+
+
+
+. 2390 FECB9D8 12
+. 80 BE 1D C8 2C 05 00 00 41 82 01 80
+==== BB 2391 (0xFECB9E4) approx BBs exec'd 0 ====
+
+	0xFECB9E4:  80630060  lwz r3,96(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECB9E8:  2C830000  cmpi cr1,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFECB9EC:  4086000C  bc 4,6,0xFECB9F8
+	   9: Jc06o       	$0xFECB9F8
+
+
+
+. 2391 FECB9E4 12
+. 80 63 00 60 2C 83 00 00 40 86 00 0C
+==== BB 2392 (0xFECB9F8) approx BBs exec'd 0 ====
+
+	0xFECB9F8:  2F03FFFF  cmpi cr6,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFECB9FC:  3800FFFF  li r0,-1
+	   5: MOVL       	$0xFFFFFFFF, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFECBA00:  409A00E0  bc 4,26,0xFECBAE0
+	   8: Jc26o       	$0xFECBAE0
+
+
+
+. 2392 FECB9F8 12
+. 2F 03 FF FF 38 00 FF FF 40 9A 00 E0
+==== BB 2393 (0xFECBA04) approx BBs exec'd 0 ====
+
+	0xFECBA04:  807F0060  lwz r3,96(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECBA08:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFECBA0C:  419E0084  bc 12,30,0xFECBA90
+	   9: Js30o       	$0xFECBA90
+
+
+
+. 2393 FECBA04 12
+. 80 7F 00 60 2F 83 00 00 41 9E 00 84
+==== BB 2394 (0xFECBA10) approx BBs exec'd 0 ====
+
+	0xFECBA10:  811F0000  lwz r8,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0xFECBA14:  71090800  andi. r9,r8,0x800
+	   4: GETL       	R8, t4
+	   5: ANDL       	$0x800, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECBA18:  40820098  bc 4,2,0xFECBAB0
+	  10: Jc02o       	$0xFECBAB0
+
+
+
+. 2394 FECBA10 12
+. 81 1F 00 00 71 09 08 00 40 82 00 98
+==== BB 2395 (0xFECBA1C) approx BBs exec'd 0 ====
+
+	0xFECBA1C:  813F0004  lwz r9,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECBA20:  809F0008  lwz r4,8(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFECBA24:  7F092040  cmpl cr6,r9,r4
+	  10: GETL       	R9, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0xFECBA28:  419800AC  bc 12,24,0xFECBAD4
+	  15: Js24o       	$0xFECBAD4
+
+
+
+. 2395 FECBA1C 16
+. 81 3F 00 04 80 9F 00 08 7F 09 20 40 41 98 00 AC
+==== BB 2396 (0xFECBA2C) approx BBs exec'd 0 ====
+
+	0xFECBA2C:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFECBA30:  71490100  andi. r9,r10,0x100
+	   4: GETL       	R10, t4
+	   5: ANDL       	$0x100, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECBA34:  408200D4  bc 4,2,0xFECBB08
+	  10: Jc02o       	$0xFECBB08
+
+
+
+. 2396 FECBA2C 12
+. 81 5F 00 00 71 49 01 00 40 82 00 D4
+==== BB 2397 (0xFECBA38) approx BBs exec'd 0 ====
+
+	0xFECBA38:  817F0030  lwz r11,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFECBA3C:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFECBA40:  408600E4  bc 4,6,0xFECBB24
+	   9: Jc06o       	$0xFECBB24
+
+
+
+. 2397 FECBA38 12
+. 81 7F 00 30 2C 8B 00 00 40 86 00 E4
+==== BB 2398 (0xFECBA44) approx BBs exec'd 0 ====
+
+	0xFECBA44:  819F0024  lwz r12,36(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFECBA48:  2C0C0000  cmpi cr0,r12,0
+	   5: GETL       	R12, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECBA4C:  408200B0  bc 4,2,0xFECBAFC
+	   9: Jc02o       	$0xFECBAFC
+
+
+
+. 2398 FECBA44 12
+. 81 9F 00 24 2C 0C 00 00 40 82 00 B0
+==== BB 2399 (0xFECBA50) approx BBs exec'd 0 ====
+
+	0xFECBA50:  88DF0046  lbz r6,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFECBA54:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFECBA58:  7CC00774  extsb r0,r6
+	   8: GETB       	R6, t6
+	   9: WIDENL_Bs       	_st6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0xFECBA5C:  7CA0FA14  add r5,r0,r31
+	  12: GETL       	R0, t8
+	  13: GETL       	R31, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFECBA60:  80850098  lwz r4,152(r5)
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x98, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R4
+	  21: INCEIPL       	$4
+
+	0xFECBA64:  83E40014  lwz r31,20(r4)
+	  22: GETL       	R4, t16
+	  23: ADDL       	$0x14, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R31
+	  26: INCEIPL       	$4
+
+	0xFECBA68:  7FE903A6  mtctr r31
+	  27: GETL       	R31, t20
+	  28: PUTL       	t20, CTR
+	  29: INCEIPL       	$4
+
+	0xFECBA6C:  4E800421  bctrl
+	  30: MOVL       	$0xFECBA70, t22
+	  31: PUTL       	t22, LR
+	  32: GETL       	CTR, t24
+	  33: JMPo-c       	t24  ($4)
+
+
+
+. 2399 FECBA50 32
+. 88 DF 00 46 7F E3 FB 78 7C C0 07 74 7C A0 FA 14 80 85 00 98 83 E4 00 14 7F E9 03 A6 4E 80 04 21
+==== BB 2400 _IO_default_uflow_internal(0xFECBCF0) approx BBs exec'd 0 ====
+
+	0xFECBCF0:  7CC802A6  mflr r6
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFECBCF4:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECBCF8:  93E10008  stw r31,8(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECBCFC:  7C7F1B78  or r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFECBD00:  90C10014  stw r6,20(r1)
+	  17: GETL       	R6, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECBD04:  88A30046  lbz r5,70(r3)
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x46, t16
+	  24: LDB       	(t16), t18
+	  25: PUTL       	t18, R5
+	  26: INCEIPL       	$4
+
+	0xFECBD08:  7CA00774  extsb r0,r5
+	  27: GETB       	R5, t20
+	  28: WIDENL_Bs       	_st20
+	  29: PUTL       	t20, R0
+	  30: INCEIPL       	$4
+
+	0xFECBD0C:  7D201A14  add r9,r0,r3
+	  31: GETL       	R0, t22
+	  32: GETL       	R3, t24
+	  33: ADDL       	t22, t24
+	  34: PUTL       	t24, R9
+	  35: INCEIPL       	$4
+
+	0xFECBD10:  80890098  lwz r4,152(r9)
+	  36: GETL       	R9, t26
+	  37: ADDL       	$0x98, t26
+	  38: LDL       	(t26), t28
+	  39: PUTL       	t28, R4
+	  40: INCEIPL       	$4
+
+	0xFECBD14:  81640010  lwz r11,16(r4)
+	  41: GETL       	R4, t30
+	  42: ADDL       	$0x10, t30
+	  43: LDL       	(t30), t32
+	  44: PUTL       	t32, R11
+	  45: INCEIPL       	$4
+
+	0xFECBD18:  7D6903A6  mtctr r11
+	  46: GETL       	R11, t34
+	  47: PUTL       	t34, CTR
+	  48: INCEIPL       	$4
+
+	0xFECBD1C:  4E800421  bctrl
+	  49: MOVL       	$0xFECBD20, t36
+	  50: PUTL       	t36, LR
+	  51: GETL       	CTR, t38
+	  52: JMPo-c       	t38  ($4)
+
+
+
+. 2400 FECBCF0 48
+. 7C C8 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 C1 00 14 88 A3 00 46 7C A0 07 74 7D 20 1A 14 80 89 00 98 81 64 00 10 7D 69 03 A6 4E 80 04 21
+==== BB 2401 _IO_file_underflow@@GLIBC_2.1(0xFEC8EB8) approx BBs exec'd 0 ====
+
+	0xFEC8EB8:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEC8EBC:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEC8EC0:  480DEF91  bl 0xFFA7E50
+	   9: MOVL       	$0xFEC8EC4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2401 FEC8EB8 12
+. 7C 08 02 A6 94 21 FF E0 48 0D EF 91
+==== BB 2402 (0xFEC8EC4) approx BBs exec'd 0 ====
+
+	0xFEC8EC4:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEC8EC8:  93E1001C  stw r31,28(r1)
+	   5: GETL       	R31, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x1C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEC8ECC:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFEC8ED0:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEC8ED4:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0xFEC8ED8:  81630000  lwz r11,0(r3)
+	  21: GETL       	R3, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R11
+	  24: INCEIPL       	$4
+
+	0xFEC8EDC:  9361000C  stw r27,12(r1)
+	  25: GETL       	R27, t20
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0xC, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0xFEC8EE0:  71600004  andi. r0,r11,0x4
+	  30: GETL       	R11, t24
+	  31: ANDL       	$0x4, t24
+	  32: PUTL       	t24, R0
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x0, CR
+	  35: INCEIPL       	$4
+
+	0xFEC8EE4:  93810010  stw r28,16(r1)
+	  36: GETL       	R28, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x10, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFEC8EE8:  93A10014  stw r29,20(r1)
+	  41: GETL       	R29, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x14, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFEC8EEC:  40820284  bc 4,2,0xFEC9170
+	  46: Jc02o       	$0xFEC9170
+
+
+
+. 2402 FEC8EC4 44
+. 93 C1 00 18 93 E1 00 1C 7F C8 02 A6 90 01 00 24 7C 7F 1B 78 81 63 00 00 93 61 00 0C 71 60 00 04 93 81 00 10 93 A1 00 14 40 82 02 84
+==== BB 2403 (0xFEC8EF0) approx BBs exec'd 0 ====
+
+	0xFEC8EF0:  80630004  lwz r3,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEC8EF4:  80BF0008  lwz r5,8(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFEC8EF8:  7F832840  cmpl cr7,r3,r5
+	  10: GETL       	R3, t8
+	  11: GETL       	R5, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEC8EFC:  409C002C  bc 4,28,0xFEC8F28
+	  15: Jc28o       	$0xFEC8F28
+
+
+
+. 2403 FEC8EF0 16
+. 80 63 00 04 80 BF 00 08 7F 83 28 40 40 9C 00 2C
+==== BB 2404 (0xFEC8F28) approx BBs exec'd 0 ====
+
+	0xFEC8F28:  80DF001C  lwz r6,28(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEC8F2C:  2C860000  cmpi cr1,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFEC8F30:  4086001C  bc 4,6,0xFEC8F4C
+	   9: Jc06o       	$0xFEC8F4C
+
+
+
+. 2404 FEC8F28 12
+. 80 DF 00 1C 2C 86 00 00 40 86 00 1C
+==== BB 2405 (0xFEC8F34) approx BBs exec'd 0 ====
+
+	0xFEC8F34:  807F0024  lwz r3,36(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEC8F38:  2F030000  cmpi cr6,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFEC8F3C:  409A0194  bc 4,26,0xFEC90D0
+	   9: Jc26o       	$0xFEC90D0
+
+
+
+. 2405 FEC8F34 12
+. 80 7F 00 24 2F 03 00 00 40 9A 01 94
+==== BB 2406 (0xFEC8F40) approx BBs exec'd 0 ====
+
+	0xFEC8F40:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC8F44:  48002D05  bl 0xFECBC48
+	   3: MOVL       	$0xFEC8F48, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECBC48  ($4)
+
+
+
+. 2406 FEC8F40 8
+. 7F E3 FB 78 48 00 2D 05
+==== BB 2407 _IO_doallocbuf_internal(0xFECBC48) approx BBs exec'd 0 ====
+
+	0xFECBC48:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECBC4C:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECBC50:  93E10008  stw r31,8(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECBC54:  7C7F1B78  or r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFECBC58:  90810014  stw r4,20(r1)
+	  17: GETL       	R4, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECBC5C:  8003001C  lwz r0,28(r3)
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x1C, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R0
+	  26: INCEIPL       	$4
+
+	0xFECBC60:  2F800000  cmpi cr7,r0,0
+	  27: GETL       	R0, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0xFECBC64:  409E0040  bc 4,30,0xFECBCA4
+	  31: Jc30o       	$0xFECBCA4
+
+
+
+. 2407 FECBC48 32
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 81 00 14 80 03 00 1C 2F 80 00 00 40 9E 00 40
+==== BB 2408 (0xFECBC68) approx BBs exec'd 0 ====
+
+	0xFECBC68:  80A30000  lwz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFECBC6C:  70A90002  andi. r9,r5,0x2
+	   4: GETL       	R5, t4
+	   5: ANDL       	$0x2, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECBC70:  41820010  bc 12,2,0xFECBC80
+	  10: Js02o       	$0xFECBC80
+
+
+
+. 2408 FECBC68 12
+. 80 A3 00 00 70 A9 00 02 41 82 00 10
+==== BB 2409 (0xFECBC80) approx BBs exec'd 0 ====
+
+	0xFECBC80:  895F0046  lbz r10,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECBC84:  7D480774  extsb r8,r10
+	   5: GETB       	R10, t4
+	   6: WIDENL_Bs       	_st4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0xFECBC88:  7D28FA14  add r9,r8,r31
+	   9: GETL       	R8, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFECBC8C:  80E90098  lwz r7,152(r9)
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x98, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0xFECBC90:  81670034  lwz r11,52(r7)
+	  19: GETL       	R7, t14
+	  20: ADDL       	$0x34, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R11
+	  23: INCEIPL       	$4
+
+	0xFECBC94:  7D6903A6  mtctr r11
+	  24: GETL       	R11, t18
+	  25: PUTL       	t18, CTR
+	  26: INCEIPL       	$4
+
+	0xFECBC98:  4E800421  bctrl
+	  27: MOVL       	$0xFECBC9C, t20
+	  28: PUTL       	t20, LR
+	  29: GETL       	CTR, t22
+	  30: JMPo-c       	t22  ($4)
+
+
+
+. 2409 FECBC80 28
+. 89 5F 00 46 7D 48 07 74 7D 28 FA 14 80 E9 00 98 81 67 00 34 7D 69 03 A6 4E 80 04 21
+==== BB 2410 _IO_file_doallocate_internal(0xFEBB788) approx BBs exec'd 0 ====
+
+	0xFEBB788:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEBB78C:  9421FF70  stwu r1,-144(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF70, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEBB790:  93A10084  stw r29,132(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x84, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEBB794:  3BA02000  li r29,8192
+	  14: MOVL       	$0x2000, t10
+	  15: PUTL       	t10, R29
+	  16: INCEIPL       	$4
+
+	0xFEBB798:  93E1008C  stw r31,140(r1)
+	  17: GETL       	R31, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x8C, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFEBB79C:  7C7F1B78  or r31,r3,r3
+	  22: GETL       	R3, t16
+	  23: PUTL       	t16, R31
+	  24: INCEIPL       	$4
+
+	0xFEBB7A0:  90810094  stw r4,148(r1)
+	  25: GETL       	R4, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x94, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFEBB7A4:  80030038  lwz r0,56(r3)
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x38, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFEBB7A8:  93C10088  stw r30,136(r1)
+	  35: GETL       	R30, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x88, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFEBB7AC:  2F800000  cmpi cr7,r0,0
+	  40: GETL       	R0, t30
+	  41: CMP0L       	t30, t32  (-rSo)
+	  42: ICRFL       	t32, $0x7, CR
+	  43: INCEIPL       	$4
+
+	0xFEBB7B0:  419C004C  bc 12,28,0xFEBB7FC
+	  44: Js28o       	$0xFEBB7FC
+
+
+
+. 2410 FEBB788 44
+. 7C 88 02 A6 94 21 FF 70 93 A1 00 84 3B A0 20 00 93 E1 00 8C 7C 7F 1B 78 90 81 00 94 80 03 00 38 93 C1 00 88 2F 80 00 00 41 9C 00 4C
+==== BB 2411 (0xFEBB7B4) approx BBs exec'd 0 ====
+
+	0xFEBB7B4:  88E30046  lbz r7,70(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFEBB7B8:  38810010  addi r4,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFEBB7BC:  7CE60774  extsb r6,r7
+	   9: GETB       	R7, t6
+	  10: WIDENL_Bs       	_st6
+	  11: PUTL       	t6, R6
+	  12: INCEIPL       	$4
+
+	0xFEBB7C0:  7D261A14  add r9,r6,r3
+	  13: GETL       	R6, t8
+	  14: GETL       	R3, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0xFEBB7C4:  80A90098  lwz r5,152(r9)
+	  18: GETL       	R9, t12
+	  19: ADDL       	$0x98, t12
+	  20: LDL       	(t12), t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0xFEBB7C8:  81650048  lwz r11,72(r5)
+	  23: GETL       	R5, t16
+	  24: ADDL       	$0x48, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R11
+	  27: INCEIPL       	$4
+
+	0xFEBB7CC:  7D6903A6  mtctr r11
+	  28: GETL       	R11, t20
+	  29: PUTL       	t20, CTR
+	  30: INCEIPL       	$4
+
+	0xFEBB7D0:  4E800421  bctrl
+	  31: MOVL       	$0xFEBB7D4, t22
+	  32: PUTL       	t22, LR
+	  33: GETL       	CTR, t24
+	  34: JMPo-c       	t24  ($4)
+
+
+
+. 2411 FEBB7B4 32
+. 88 E3 00 46 38 81 00 10 7C E6 07 74 7D 26 1A 14 80 A9 00 98 81 65 00 48 7D 69 03 A6 4E 80 04 21
+==== BB 2412 _IO_file_stat_internal(0xFECA4EC) approx BBs exec'd 0 ====
+
+	0xFECA4EC:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFECA4F0:  7C852378  or r5,r4,r4
+	   6: GETL       	R4, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFECA4F4:  80030038  lwz r0,56(r3)
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x38, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0xFECA4F8:  38210010  addi r1,r1,16
+	  14: GETL       	R1, t10
+	  15: ADDL       	$0x10, t10
+	  16: PUTL       	t10, R1
+	  17: INCEIPL       	$4
+
+	0xFECA4FC:  38600003  li r3,3
+	  18: MOVL       	$0x3, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0xFECA500:  7C040378  or r4,r0,r0
+	  21: GETL       	R0, t14
+	  22: PUTL       	t14, R4
+	  23: INCEIPL       	$4
+
+	0xFECA504:  48056DBC  b 0xFF212C0
+	  24: JMPo       	$0xFF212C0  ($4)
+
+
+
+. 2412 FECA4EC 28
+. 94 21 FF F0 7C 85 23 78 80 03 00 38 38 21 00 10 38 60 00 03 7C 04 03 78 48 05 6D BC
+==== BB 2413 (0xFEBB7D4) approx BBs exec'd 0 ====
+
+	0xFEBB7D4:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEBB7D8:  41800024  bc 12,0,0xFEBB7FC
+	   4: Js00o       	$0xFEBB7FC
+
+
+
+. 2413 FEBB7D4 8
+. 2C 03 00 00 41 80 00 24
+==== BB 2414 (0xFEBB7DC) approx BBs exec'd 0 ====
+
+	0xFEBB7DC:  81010020  lwz r8,32(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFEBB7E0:  55030426  rlwinm r3,r8,0,16,19
+	   5: GETL       	R8, t4
+	   6: ANDL       	$0xF000, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFEBB7E4:  2C832000  cmpi cr1,r3,8192
+	   9: GETL       	R3, t6
+	  10: MOVL       	$0x2000, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0xFEBB7E8:  41860078  bc 12,6,0xFEBB860
+	  14: Js06o       	$0xFEBB860
+
+
+
+. 2414 FEBB7DC 16
+. 81 01 00 20 55 03 04 26 2C 83 20 00 41 86 00 78
+==== BB 2415 (0xFEBB7EC) approx BBs exec'd 0 ====
+
+	0xFEBB7EC:  80010048  lwz r0,72(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEBB7F0:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFEBB7F4:  40810008  bc 4,1,0xFEBB7FC
+	   9: Jc01o       	$0xFEBB7FC
+
+
+
+. 2415 FEBB7EC 12
+. 80 01 00 48 2C 00 00 00 40 81 00 08
+==== BB 2416 (0xFEBB7F8) approx BBs exec'd 0 ====
+
+	0xFEBB7F8:  7C1D0378  or r29,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFEBB7FC:  399D0FFF  addi r12,r29,4095
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0xFFF, t2
+	   5: PUTL       	t2, R12
+	   6: INCEIPL       	$4
+
+	0xFEBB800:  38600000  li r3,0
+	   7: MOVL       	$0x0, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0xFEBB804:  55840026  rlwinm r4,r12,0,0,19
+	  10: GETL       	R12, t6
+	  11: ANDL       	$0xFFFFF000, t6
+	  12: PUTL       	t6, R4
+	  13: INCEIPL       	$4
+
+	0xFEBB808:  38A00003  li r5,3
+	  14: MOVL       	$0x3, t8
+	  15: PUTL       	t8, R5
+	  16: INCEIPL       	$4
+
+	0xFEBB80C:  38C00022  li r6,34
+	  17: MOVL       	$0x22, t10
+	  18: PUTL       	t10, R6
+	  19: INCEIPL       	$4
+
+	0xFEBB810:  38E0FFFF  li r7,-1
+	  20: MOVL       	$0xFFFFFFFF, t12
+	  21: PUTL       	t12, R7
+	  22: INCEIPL       	$4
+
+	0xFEBB814:  39000000  li r8,0
+	  23: MOVL       	$0x0, t14
+	  24: PUTL       	t14, R8
+	  25: INCEIPL       	$4
+
+	0xFEBB818:  480729A1  bl 0xFF2E1B8
+	  26: MOVL       	$0xFEBB81C, t16
+	  27: PUTL       	t16, LR
+	  28: JMPo-c       	$0xFF2E1B8  ($4)
+
+
+
+. 2416 FEBB7F8 36
+. 7C 1D 03 78 39 9D 0F FF 38 60 00 00 55 84 00 26 38 A0 00 03 38 C0 00 22 38 E0 FF FF 39 00 00 00 48 07 29 A1
+==== BB 2417 mmap(0xFF2E1B8) approx BBs exec'd 0 ====
+
+	0xFF2E1B8:  3800005A  li r0,90
+	   0: MOVL       	$0x5A, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF2E1BC:  44000002  sc
+	   3: JMPo-sys       	$0xFF2E1C0  ($4)
+
+
+
+. 2417 FF2E1B8 8
+. 38 00 00 5A 44 00 00 02
+==== BB 2418 (0xFF2E1C0) approx BBs exec'd 0 ====
+
+	0xFF2E1C0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 2418 FF2E1C0 4
+. 4C A3 00 20
+==== BB 2419 (0xFEBB81C) approx BBs exec'd 0 ====
+
+	0xFEBB81C:  3800FFFF  li r0,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEBB820:  2C83FFFF  cmpi cr1,r3,-1
+	   3: GETL       	R3, t2
+	   4: MOVL       	$0xFFFFFFFF, t6
+	   5: CMPL       	t2, t6, t4  (-rSo)
+	   6: ICRFL       	t4, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFEBB824:  4186001C  bc 12,6,0xFEBB840
+	   8: Js06o       	$0xFEBB840
+
+
+
+. 2419 FEBB81C 12
+. 38 00 FF FF 2C 83 FF FF 41 86 00 1C
+==== BB 2420 (0xFEBB828) approx BBs exec'd 0 ====
+
+	0xFEBB828:  7C641B78  or r4,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEBB82C:  38C00001  li r6,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFEBB830:  7FE3FB78  or r3,r31,r31
+	   6: GETL       	R31, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFEBB834:  7CA4EA14  add r5,r4,r29
+	   9: GETL       	R4, t6
+	  10: GETL       	R29, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0xFEBB838:  48010345  bl 0xFECBB7C
+	  14: MOVL       	$0xFEBB83C, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFECBB7C  ($4)
+
+
+
+. 2420 FEBB828 20
+. 7C 64 1B 78 38 C0 00 01 7F E3 FB 78 7C A4 EA 14 48 01 03 45
+==== BB 2421 _IO_setb_internal(0xFECBB7C) approx BBs exec'd 0 ====
+
+	0xFECBB7C:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFECBB80:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECBB84:  9361000C  stw r27,12(r1)
+	   9: GETL       	R27, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECBB88:  7C9B2378  or r27,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R27
+	  16: INCEIPL       	$4
+
+	0xFECBB8C:  93810010  stw r28,16(r1)
+	  17: GETL       	R28, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECBB90:  7CBC2B78  or r28,r5,r5
+	  22: GETL       	R5, t16
+	  23: PUTL       	t16, R28
+	  24: INCEIPL       	$4
+
+	0xFECBB94:  90010024  stw r0,36(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x24, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFECBB98:  8123001C  lwz r9,28(r3)
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x1C, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R9
+	  34: INCEIPL       	$4
+
+	0xFECBB9C:  93A10014  stw r29,20(r1)
+	  35: GETL       	R29, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x14, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFECBBA0:  7CDD3378  or r29,r6,r6
+	  40: GETL       	R6, t30
+	  41: PUTL       	t30, R29
+	  42: INCEIPL       	$4
+
+	0xFECBBA4:  2F890000  cmpi cr7,r9,0
+	  43: GETL       	R9, t32
+	  44: CMP0L       	t32, t34  (-rSo)
+	  45: ICRFL       	t34, $0x7, CR
+	  46: INCEIPL       	$4
+
+	0xFECBBA8:  93E1001C  stw r31,28(r1)
+	  47: GETL       	R31, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x1C, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFECBBAC:  93C10018  stw r30,24(r1)
+	  52: GETL       	R30, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x18, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFECBBB0:  7C7F1B78  or r31,r3,r3
+	  57: GETL       	R3, t44
+	  58: PUTL       	t44, R31
+	  59: INCEIPL       	$4
+
+	0xFECBBB4:  419E0028  bc 12,30,0xFECBBDC
+	  60: Js30o       	$0xFECBBDC
+
+
+
+. 2421 FECBB7C 60
+. 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 9B 23 78 93 81 00 10 7C BC 2B 78 90 01 00 24 81 23 00 1C 93 A1 00 14 7C DD 33 78 2F 89 00 00 93 E1 00 1C 93 C1 00 18 7C 7F 1B 78 41 9E 00 28
+==== BB 2422 (0xFECBBDC) approx BBs exec'd 0 ====
+
+	0xFECBBDC:  801F0000  lwz r0,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFECBBE0:  2C9D0000  cmpi cr1,r29,0
+	   4: GETL       	R29, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFECBBE4:  5409003C  rlwinm r9,r0,0,0,30
+	   8: GETL       	R0, t8
+	   9: ANDL       	$0xFFFFFFFE, t8
+	  10: PUTL       	t8, R9
+	  11: INCEIPL       	$4
+
+	0xFECBBE8:  937F001C  stw r27,28(r31)
+	  12: GETL       	R27, t10
+	  13: GETL       	R31, t12
+	  14: ADDL       	$0x1C, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0xFECBBEC:  60000001  ori r0,r0,0x1
+	  17: GETL       	R0, t14
+	  18: ORL       	$0x1, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFECBBF0:  939F0020  stw r28,32(r31)
+	  21: GETL       	R28, t16
+	  22: GETL       	R31, t18
+	  23: ADDL       	$0x20, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECBBF4:  4186002C  bc 12,6,0xFECBC20
+	  26: Js06o       	$0xFECBC20
+
+
+
+. 2422 FECBBDC 28
+. 80 1F 00 00 2C 9D 00 00 54 09 00 3C 93 7F 00 1C 60 00 00 01 93 9F 00 20 41 86 00 2C
+==== BB 2423 (0xFECBBF8) approx BBs exec'd 0 ====
+
+	0xFECBBF8:  80610024  lwz r3,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECBBFC:  913F0000  stw r9,0(r31)
+	   5: GETL       	R9, t4
+	   6: GETL       	R31, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFECBC00:  8361000C  lwz r27,12(r1)
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R27
+	  13: INCEIPL       	$4
+
+	0xFECBC04:  7C6803A6  mtlr r3
+	  14: GETL       	R3, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFECBC08:  83810010  lwz r28,16(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x10, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R28
+	  21: INCEIPL       	$4
+
+	0xFECBC0C:  83A10014  lwz r29,20(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R29
+	  26: INCEIPL       	$4
+
+	0xFECBC10:  83C10018  lwz r30,24(r1)
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x18, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R30
+	  31: INCEIPL       	$4
+
+	0xFECBC14:  83E1001C  lwz r31,28(r1)
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x1C, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R31
+	  36: INCEIPL       	$4
+
+	0xFECBC18:  38210020  addi r1,r1,32
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x20, t30
+	  39: PUTL       	t30, R1
+	  40: INCEIPL       	$4
+
+	0xFECBC1C:  4E800020  blr
+	  41: GETL       	LR, t32
+	  42: JMPo-r       	t32  ($4)
+
+
+
+. 2423 FECBBF8 40
+. 80 61 00 24 91 3F 00 00 83 61 00 0C 7C 68 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2424 (0xFEBB83C) approx BBs exec'd 0 ====
+
+	0xFEBB83C:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEBB840:  83A10094  lwz r29,148(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x94, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEBB844:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEBB848:  83C10088  lwz r30,136(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x88, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R30
+	  15: INCEIPL       	$4
+
+	0xFEBB84C:  7FA803A6  mtlr r29
+	  16: GETL       	R29, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0xFEBB850:  83E1008C  lwz r31,140(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x8C, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R31
+	  23: INCEIPL       	$4
+
+	0xFEBB854:  83A10084  lwz r29,132(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x84, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFEBB858:  38210090  addi r1,r1,144
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x90, t22
+	  31: PUTL       	t22, R1
+	  32: INCEIPL       	$4
+
+	0xFEBB85C:  4E800020  blr
+	  33: GETL       	LR, t24
+	  34: JMPo-r       	t24  ($4)
+
+
+
+. 2424 FEBB83C 36
+. 38 00 00 01 83 A1 00 94 7C 03 03 78 83 C1 00 88 7F A8 03 A6 83 E1 00 8C 83 A1 00 84 38 21 00 90 4E 80 00 20
+==== BB 2425 (0xFECBC9C) approx BBs exec'd 0 ====
+
+	0xFECBC9C:  2F03FFFF  cmpi cr6,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFECBCA0:  419A0018  bc 12,26,0xFECBCB8
+	   5: Js26o       	$0xFECBCB8
+
+
+
+. 2425 FECBC9C 8
+. 2F 03 FF FF 41 9A 00 18
+==== BB 2426 (0xFECBCA4) approx BBs exec'd 0 ====
+
+	0xFECBCA4:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECBCA8:  83E10008  lwz r31,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFECBCAC:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFECBCB0:  7C6803A6  mtlr r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFECBCB4:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 2426 FECBCA4 20
+. 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+==== BB 2427 (0xFEC8F48) approx BBs exec'd 0 ====
+
+	0xFEC8F48:  817F0000  lwz r11,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFEC8F4C:  71600202  andi. r0,r11,0x202
+	   4: GETL       	R11, t4
+	   5: ANDL       	$0x202, t4
+	   6: PUTL       	t4, R0
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEC8F50:  4182006C  bc 12,2,0xFEC8FBC
+	  10: Js02o       	$0xFEC8FBC
+
+
+
+. 2427 FEC8F48 12
+. 81 7F 00 00 71 60 02 02 41 82 00 6C
+==== BB 2428 (0xFEC8FBC) approx BBs exec'd 0 ====
+
+	0xFEC8FBC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC8FC0:  480024A5  bl 0xFECB464
+	   3: MOVL       	$0xFEC8FC4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECB464  ($4)
+
+
+
+. 2428 FEC8FBC 8
+. 7F E3 FB 78 48 00 24 A5
+==== BB 2429 _IO_switch_to_get_mode_internal(0xFECB464) approx BBs exec'd 0 ====
+
+	0xFECB464:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECB468:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECB46C:  93E10008  stw r31,8(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECB470:  7C7F1B78  or r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFECB474:  90810014  stw r4,20(r1)
+	  17: GETL       	R4, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECB478:  3880FFFF  li r4,-1
+	  22: MOVL       	$0xFFFFFFFF, t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0xFECB47C:  81630014  lwz r11,20(r3)
+	  25: GETL       	R3, t18
+	  26: ADDL       	$0x14, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R11
+	  29: INCEIPL       	$4
+
+	0xFECB480:  80030010  lwz r0,16(r3)
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x10, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFECB484:  7F8B0040  cmpl cr7,r11,r0
+	  35: GETL       	R11, t26
+	  36: GETL       	R0, t28
+	  37: CMPUL       	t26, t28, t30  (-rSo)
+	  38: ICRFL       	t30, $0x7, CR
+	  39: INCEIPL       	$4
+
+	0xFECB488:  419D0064  bc 12,29,0xFECB4EC
+	  40: Js29o       	$0xFECB4EC
+
+
+
+. 2429 FECB464 40
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 81 00 14 38 80 FF FF 81 63 00 14 80 03 00 10 7F 8B 00 40 41 9D 00 64
+==== BB 2430 (0xFECB48C) approx BBs exec'd 0 ====
+
+	0xFECB48C:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFECB490:  71400100  andi. r0,r10,0x100
+	   4: GETL       	R10, t4
+	   5: ANDL       	$0x100, t4
+	   6: PUTL       	t4, R0
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECB494:  4082004C  bc 4,2,0xFECB4E0
+	  10: Jc02o       	$0xFECB4E0
+
+
+
+. 2430 FECB48C 12
+. 81 5F 00 00 71 40 01 00 40 82 00 4C
+==== BB 2431 (0xFECB498) approx BBs exec'd 0 ====
+
+	0xFECB498:  819F0008  lwz r12,8(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFECB49C:  811F001C  lwz r8,28(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFECB4A0:  7C8B6040  cmpl cr1,r11,r12
+	  10: GETL       	R11, t8
+	  11: GETL       	R12, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFECB4A4:  911F000C  stw r8,12(r31)
+	  15: GETL       	R8, t14
+	  16: GETL       	R31, t16
+	  17: ADDL       	$0xC, t16
+	  18: STL       	t14, (t16)
+	  19: INCEIPL       	$4
+
+	0xFECB4A8:  40850008  bc 4,5,0xFECB4B0
+	  20: Jc05o       	$0xFECB4B0
+
+
+
+. 2431 FECB498 20
+. 81 9F 00 08 81 1F 00 1C 7C 8B 60 40 91 1F 00 0C 40 85 00 08
+==== BB 2432 (0xFECB4B0) approx BBs exec'd 0 ====
+
+	0xFECB4B0:  55400566  rlwinm r0,r10,0,21,19
+	   0: GETL       	R10, t0
+	   1: ANDL       	$0xFFFFF7FF, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFECB4B4:  917F0010  stw r11,16(r31)
+	   4: GETL       	R11, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x10, t4
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECB4B8:  917F0004  stw r11,4(r31)
+	   9: GETL       	R11, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x4, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECB4BC:  38600000  li r3,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFECB4C0:  917F0018  stw r11,24(r31)
+	  17: GETL       	R11, t12
+	  18: GETL       	R31, t14
+	  19: ADDL       	$0x18, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECB4C4:  917F0014  stw r11,20(r31)
+	  22: GETL       	R11, t16
+	  23: GETL       	R31, t18
+	  24: ADDL       	$0x14, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFECB4C8:  901F0000  stw r0,0(r31)
+	  27: GETL       	R0, t20
+	  28: GETL       	R31, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFECB4CC:  81410014  lwz r10,20(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x14, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R10
+	  35: INCEIPL       	$4
+
+	0xFECB4D0:  83E10008  lwz r31,8(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x8, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0xFECB4D4:  38210010  addi r1,r1,16
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x10, t32
+	  43: PUTL       	t32, R1
+	  44: INCEIPL       	$4
+
+	0xFECB4D8:  7D4803A6  mtlr r10
+	  45: GETL       	R10, t34
+	  46: PUTL       	t34, LR
+	  47: INCEIPL       	$4
+
+	0xFECB4DC:  4E800020  blr
+	  48: GETL       	LR, t36
+	  49: JMPo-r       	t36  ($4)
+
+
+
+. 2432 FECB4B0 48
+. 55 40 05 66 91 7F 00 10 91 7F 00 04 38 60 00 00 91 7F 00 18 91 7F 00 14 90 1F 00 00 81 41 00 14 83 E1 00 08 38 21 00 10 7D 48 03 A6 4E 80 00 20
+==== BB 2433 (0xFEC8FC4) approx BBs exec'd 0 ====
+
+	0xFEC8FC4:  891F0046  lbz r8,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFEC8FC8:  80BF001C  lwz r5,28(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFEC8FCC:  7D070774  extsb r7,r8
+	  10: GETB       	R8, t8
+	  11: WIDENL_Bs       	_st8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0xFEC8FD0:  837F0020  lwz r27,32(r31)
+	  14: GETL       	R31, t10
+	  15: ADDL       	$0x20, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R27
+	  18: INCEIPL       	$4
+
+	0xFEC8FD4:  7CC7FA14  add r6,r7,r31
+	  19: GETL       	R7, t14
+	  20: GETL       	R31, t16
+	  21: ADDL       	t14, t16
+	  22: PUTL       	t16, R6
+	  23: INCEIPL       	$4
+
+	0xFEC8FD8:  7CA42B78  or r4,r5,r5
+	  24: GETL       	R5, t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0xFEC8FDC:  80660098  lwz r3,152(r6)
+	  27: GETL       	R6, t20
+	  28: ADDL       	$0x98, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R3
+	  31: INCEIPL       	$4
+
+	0xFEC8FE0:  90BF0004  stw r5,4(r31)
+	  32: GETL       	R5, t24
+	  33: GETL       	R31, t26
+	  34: ADDL       	$0x4, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFEC8FE4:  81630038  lwz r11,56(r3)
+	  37: GETL       	R3, t28
+	  38: ADDL       	$0x38, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R11
+	  41: INCEIPL       	$4
+
+	0xFEC8FE8:  7FE3FB78  or r3,r31,r31
+	  42: GETL       	R31, t32
+	  43: PUTL       	t32, R3
+	  44: INCEIPL       	$4
+
+	0xFEC8FEC:  90BF000C  stw r5,12(r31)
+	  45: GETL       	R5, t34
+	  46: GETL       	R31, t36
+	  47: ADDL       	$0xC, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFEC8FF0:  90BF0008  stw r5,8(r31)
+	  50: GETL       	R5, t38
+	  51: GETL       	R31, t40
+	  52: ADDL       	$0x8, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0xFEC8FF4:  7D6903A6  mtctr r11
+	  55: GETL       	R11, t42
+	  56: PUTL       	t42, CTR
+	  57: INCEIPL       	$4
+
+	0xFEC8FF8:  90BF0018  stw r5,24(r31)
+	  58: GETL       	R5, t44
+	  59: GETL       	R31, t46
+	  60: ADDL       	$0x18, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0xFEC8FFC:  90BF0014  stw r5,20(r31)
+	  63: GETL       	R5, t48
+	  64: GETL       	R31, t50
+	  65: ADDL       	$0x14, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0xFEC9000:  90BF0010  stw r5,16(r31)
+	  68: GETL       	R5, t52
+	  69: GETL       	R31, t54
+	  70: ADDL       	$0x10, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0xFEC9004:  7CA5D850  subf r5,r5,r27
+	  73: GETL       	R5, t56
+	  74: GETL       	R27, t58
+	  75: SUBL       	t56, t58
+	  76: PUTL       	t58, R5
+	  77: INCEIPL       	$4
+
+	0xFEC9008:  4E800421  bctrl
+	  78: MOVL       	$0xFEC900C, t60
+	  79: PUTL       	t60, LR
+	  80: GETL       	CTR, t62
+	  81: JMPo-c       	t62  ($4)
+
+
+
+. 2433 FEC8FC4 72
+. 89 1F 00 46 80 BF 00 1C 7D 07 07 74 83 7F 00 20 7C C7 FA 14 7C A4 2B 78 80 66 00 98 90 BF 00 04 81 63 00 38 7F E3 FB 78 90 BF 00 0C 90 BF 00 08 7D 69 03 A6 90 BF 00 18 90 BF 00 14 90 BF 00 10 7C A5 D8 50 4E 80 04 21
+==== BB 2434 _IO_file_read_internal(0xFECA48C) approx BBs exec'd 0 ====
+
+	0xFECA48C:  7CC802A6  mflr r6
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFECA490:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECA494:  90C10014  stw r6,20(r1)
+	   9: GETL       	R6, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECA498:  8003003C  lwz r0,60(r3)
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0x3C, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R0
+	  18: INCEIPL       	$4
+
+	0xFECA49C:  70090002  andi. r9,r0,0x2
+	  19: GETL       	R0, t14
+	  20: ANDL       	$0x2, t14
+	  21: PUTL       	t14, R9
+	  22: CMP0L       	t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0xFECA4A0:  4082001C  bc 4,2,0xFECA4BC
+	  25: Jc02o       	$0xFECA4BC
+
+
+
+. 2434 FECA48C 24
+. 7C C8 02 A6 94 21 FF F0 90 C1 00 14 80 03 00 3C 70 09 00 02 40 82 00 1C
+==== BB 2435 (0xFECA4BC) approx BBs exec'd 0 ====
+
+	0xFECA4BC:  80630038  lwz r3,56(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECA4C0:  48057AA1  bl 0xFF21F60
+	   5: MOVL       	$0xFECA4C4, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFF21F60  ($4)
+
+
+
+. 2435 FECA4BC 8
+. 80 63 00 38 48 05 7A A1
+==== BB 2436 __read_nocancel(0xFF21F60) approx BBs exec'd 0 ====
+
+	0xFF21F60:  38000003  li r0,3
+	   0: MOVL       	$0x3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF21F64:  44000002  sc
+	   3: JMPo-sys       	$0xFF21F68  ($4)
+
+
+
+. 2436 FF21F60 8
+. 38 00 00 03 44 00 00 02
+==== BB 2437 (0xFF21F68) approx BBs exec'd 0 ====
+
+	0xFF21F68:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 2437 FF21F68 4
+. 4C A3 00 20
+==== BB 2438 (0xFECA4C4) approx BBs exec'd 0 ====
+
+	0xFECA4C4:  4BFFFFE8  b 0xFECA4AC
+	   0: JMPo       	$0xFECA4AC  ($4)
+
+
+
+. 2438 FECA4C4 4
+. 4B FF FF E8
+==== BB 2439 (0xFECA4AC) approx BBs exec'd 0 ====
+
+	0xFECA4AC:  80810014  lwz r4,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFECA4B0:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFECA4B4:  7C8803A6  mtlr r4
+	   9: GETL       	R4, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFECA4B8:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2439 FECA4AC 16
+. 80 81 00 14 38 21 00 10 7C 88 03 A6 4E 80 00 20
+==== BB 2440 (0xFEC900C) approx BBs exec'd 0 ====
+
+	0xFEC900C:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEC9010:  408100EC  bc 4,1,0xFEC90FC
+	   4: Jc01o       	$0xFEC90FC
+
+
+
+. 2440 FEC900C 8
+. 2C 03 00 00 40 81 00 EC
+==== BB 2441 (0xFEC9014) approx BBs exec'd 0 ====
+
+	0xFEC9014:  809F0008  lwz r4,8(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEC9018:  7FA41A14  add r29,r4,r3
+	   5: GETL       	R4, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFEC901C:  93BF0008  stw r29,8(r31)
+	  10: GETL       	R29, t8
+	  11: GETL       	R31, t10
+	  12: ADDL       	$0x8, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFEC9020:  41820170  bc 12,2,0xFEC9190
+	  15: Js02o       	$0xFEC9190
+
+
+
+. 2441 FEC9014 16
+. 80 9F 00 08 7F A4 1A 14 93 BF 00 08 41 82 01 70
+==== BB 2442 (0xFEC9024) approx BBs exec'd 0 ====
+
+	0xFEC9024:  801F0050  lwz r0,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEC9028:  2C80FFFF  cmpi cr1,r0,-1
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFEC902C:  418600B8  bc 12,6,0xFEC90E4
+	  10: Js06o       	$0xFEC90E4
+
+
+
+. 2442 FEC9024 12
+. 80 1F 00 50 2C 80 FF FF 41 86 00 B8
+==== BB 2443 (0xFEC90E4) approx BBs exec'd 0 ====
+
+	0xFEC90E4:  813F0054  lwz r9,84(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x54, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEC90E8:  2C09FFFF  cmpi cr0,r9,-1
+	   5: GETL       	R9, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEC90EC:  4082FF44  bc 4,2,0xFEC9030
+	  10: Jc02o       	$0xFEC9030
+
+
+
+. 2443 FEC90E4 12
+. 81 3F 00 54 2C 09 FF FF 40 82 FF 44
+==== BB 2444 (0xFEC90F0) approx BBs exec'd 0 ====
+
+	0xFEC90F0:  839F0004  lwz r28,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEC90F4:  887C0000  lbz r3,0(r28)
+	   5: GETL       	R28, t4
+	   6: LDB       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFEC90F8:  4BFFFF60  b 0xFEC9058
+	   9: JMPo       	$0xFEC9058  ($4)
+
+
+
+. 2444 FEC90F0 12
+. 83 9F 00 04 88 7C 00 00 4B FF FF 60
+==== BB 2445 (0xFEC9058) approx BBs exec'd 0 ====
+
+	0xFEC9058:  83E10024  lwz r31,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFEC905C:  8361000C  lwz r27,12(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0xFEC9060:  7FE803A6  mtlr r31
+	  10: GETL       	R31, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFEC9064:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFEC9068:  83A10014  lwz r29,20(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0xFEC906C:  83C10018  lwz r30,24(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R30
+	  27: INCEIPL       	$4
+
+	0xFEC9070:  83E1001C  lwz r31,28(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R31
+	  32: INCEIPL       	$4
+
+	0xFEC9074:  38210020  addi r1,r1,32
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20, t26
+	  35: PUTL       	t26, R1
+	  36: INCEIPL       	$4
+
+	0xFEC9078:  4E800020  blr
+	  37: GETL       	LR, t28
+	  38: JMPo-r       	t28  ($4)
+
+
+
+. 2445 FEC9058 36
+. 83 E1 00 24 83 61 00 0C 7F E8 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2446 (0xFECBD20) approx BBs exec'd 0 ====
+
+	0xFECBD20:  2F83FFFF  cmpi cr7,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECBD24:  3860FFFF  li r3,-1
+	   5: MOVL       	$0xFFFFFFFF, t6
+	   6: PUTL       	t6, R3
+	   7: INCEIPL       	$4
+
+	0xFECBD28:  419E0018  bc 12,30,0xFECBD40
+	   8: Js30o       	$0xFECBD40
+
+
+
+. 2446 FECBD20 12
+. 2F 83 FF FF 38 60 FF FF 41 9E 00 18
+==== BB 2447 (0xFECBD2C) approx BBs exec'd 0 ====
+
+	0xFECBD2C:  811F0004  lwz r8,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFECBD30:  88E80000  lbz r7,0(r8)
+	   5: GETL       	R8, t4
+	   6: LDB       	(t4), t6
+	   7: PUTL       	t6, R7
+	   8: INCEIPL       	$4
+
+	0xFECBD34:  38680001  addi r3,r8,1
+	   9: GETL       	R8, t8
+	  10: ADDL       	$0x1, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFECBD38:  907F0004  stw r3,4(r31)
+	  13: GETL       	R3, t10
+	  14: GETL       	R31, t12
+	  15: ADDL       	$0x4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECBD3C:  7CE33B78  or r3,r7,r7
+	  18: GETL       	R7, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0xFECBD40:  81410014  lwz r10,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R10
+	  25: INCEIPL       	$4
+
+	0xFECBD44:  83E10008  lwz r31,8(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x8, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFECBD48:  38210010  addi r1,r1,16
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x10, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFECBD4C:  7D4803A6  mtlr r10
+	  35: GETL       	R10, t26
+	  36: PUTL       	t26, LR
+	  37: INCEIPL       	$4
+
+	0xFECBD50:  4E800020  blr
+	  38: GETL       	LR, t28
+	  39: JMPo-r       	t28  ($4)
+
+
+
+. 2447 FECBD2C 40
+. 81 1F 00 04 88 E8 00 00 38 68 00 01 90 7F 00 04 7C E3 3B 78 81 41 00 14 83 E1 00 08 38 21 00 10 7D 48 03 A6 4E 80 00 20
+==== BB 2448 (0xFECBA70) approx BBs exec'd 0 ====
+
+	0xFECBA70:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFECBA74:  80E10014  lwz r7,20(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x14, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0xFECBA78:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFECBA7C:  83C10008  lwz r30,8(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R30
+	  15: INCEIPL       	$4
+
+	0xFECBA80:  83E1000C  lwz r31,12(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0xC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0xFECBA84:  7CE803A6  mtlr r7
+	  21: GETL       	R7, t16
+	  22: PUTL       	t16, LR
+	  23: INCEIPL       	$4
+
+	0xFECBA88:  38210010  addi r1,r1,16
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x10, t18
+	  26: PUTL       	t18, R1
+	  27: INCEIPL       	$4
+
+	0xFECBA8C:  4E800020  blr
+	  28: GETL       	LR, t20
+	  29: JMPo-r       	t20  ($4)
+
+
+
+. 2448 FECBA70 32
+. 7C 60 1B 78 80 E1 00 14 7C 03 03 78 83 C1 00 08 83 E1 00 0C 7C E8 03 A6 38 21 00 10 4E 80 00 20
+==== BB 2449 (0xFEBD928) approx BBs exec'd 0 ====
+
+	0xFEBD928:  3B9CFFFF  addi r28,r28,-1
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0xFEBD92C:  2F83FFFF  cmpi cr7,r3,-1
+	   4: GETL       	R3, t2
+	   5: MOVL       	$0xFFFFFFFF, t6
+	   6: CMPL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEBD930:  7F03C000  cmp cr6,r3,r24
+	   9: GETL       	R3, t8
+	  10: GETL       	R24, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFEBD934:  419E009C  bc 12,30,0xFEBD9D0
+	  14: Js30o       	$0xFEBD9D0
+
+
+
+. 2449 FEBD928 16
+. 3B 9C FF FF 2F 83 FF FF 7F 03 C0 00 41 9E 00 9C
+==== BB 2450 (0xFEBD938) approx BBs exec'd 0 ====
+
+	0xFEBD938:  419A00A4  bc 12,26,0xFEBD9DC
+	   0: Js26o       	$0xFEBD9DC
+
+
+
+. 2450 FEBD938 4
+. 41 9A 00 A4
+==== BB 2451 (0xFEBD93C) approx BBs exec'd 0 ====
+
+	0xFEBD93C:  987A0000  stb r3,0(r26)
+	   0: GETL       	R3, t0
+	   1: GETL       	R26, t2
+	   2: STB       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFEBD940:  3B5A0001  addi r26,r26,1
+	   4: GETL       	R26, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFEBD944:  4BFFFF94  b 0xFEBD8D8
+	   8: JMPo       	$0xFEBD8D8  ($4)
+
+
+
+. 2451 FEBD93C 12
+. 98 7A 00 00 3B 5A 00 01 4B FF FF 94
+==== BB 2452 (0xFEBD8D8) approx BBs exec'd 0 ====
+
+	0xFEBD8D8:  2F1C0000  cmpi cr6,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFEBD8DC:  409AFFA0  bc 4,26,0xFEBD87C
+	   4: Jc26o       	$0xFEBD87C
+
+
+
+. 2452 FEBD8D8 8
+. 2F 1C 00 00 40 9A FF A0
+==== BB 2453 (0xFEBD87C) approx BBs exec'd 0 ====
+
+	0xFEBD87C:  83BB0004  lwz r29,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEBD880:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEBD884:  80FB0008  lwz r7,8(r27)
+	   8: GETL       	R27, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFEBD888:  7F04C378  or r4,r24,r24
+	  13: GETL       	R24, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFEBD88C:  7FFD3851  subf. r31,r29,r7
+	  16: GETL       	R29, t12
+	  17: GETL       	R7, t14
+	  18: SUBL       	t12, t14
+	  19: PUTL       	t14, R31
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x0, CR
+	  22: INCEIPL       	$4
+
+	0xFEBD890:  7F9FE040  cmpl cr7,r31,r28
+	  23: GETL       	R31, t18
+	  24: GETL       	R28, t20
+	  25: CMPUL       	t18, t20, t22  (-rSo)
+	  26: ICRFL       	t22, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0xFEBD894:  40810090  bc 4,1,0xFEBD924
+	  28: Jc01o       	$0xFEBD924
+
+
+
+. 2453 FEBD87C 28
+. 83 BB 00 04 7F 63 DB 78 80 FB 00 08 7F 04 C3 78 7F FD 38 51 7F 9F E0 40 40 81 00 90
+==== BB 2454 (0xFEBD898) approx BBs exec'd 0 ====
+
+	0xFEBD898:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEBD89C:  419C0008  bc 12,28,0xFEBD8A4
+	   3: Js28o       	$0xFEBD8A4
+
+
+
+. 2454 FEBD898 8
+. 7F A3 EB 78 41 9C 00 08
+==== BB 2455 (0xFEBD8A0) approx BBs exec'd 0 ====
+
+	0xFEBD8A0:  7F9FE378  or r31,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFEBD8A4:  7FE5FB78  or r5,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFEBD8A8:  7F9FE050  subf r28,r31,r28
+	   6: GETL       	R31, t4
+	   7: GETL       	R28, t6
+	   8: SUBL       	t4, t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0xFEBD8AC:  48019EA9  bl 0xFED7754
+	  11: MOVL       	$0xFEBD8B0, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0xFED7754  ($4)
+
+
+
+. 2455 FEBD8A0 16
+. 7F 9F E3 78 7F E5 FB 78 7F 9F E0 50 48 01 9E A9
+==== BB 2456 memchr(0xFED7754) approx BBs exec'd 0 ====
+
+	0xFED7754:  2C050000  cmpi cr0,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFED7758:  9421FFF0  stwu r1,-16(r1)
+	   4: GETL       	R1, t4
+	   5: GETL       	R1, t6
+	   6: ADDL       	$0xFFFFFFF0, t6
+	   7: PUTL       	t6, R1
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFED775C:  5484063E  rlwinm r4,r4,0,24,31
+	  10: GETL       	R4, t8
+	  11: ANDL       	$0xFF, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFED7760:  41820030  bc 12,2,0xFED7790
+	  14: Js02o       	$0xFED7790
+
+
+
+. 2456 FED7754 16
+. 2C 05 00 00 94 21 FF F0 54 84 06 3E 41 82 00 30
+==== BB 2457 (0xFED7764) approx BBs exec'd 0 ====
+
+	0xFED7764:  70600003  andi. r0,r3,0x3
+	   0: GETL       	R3, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED7768:  41820028  bc 12,2,0xFED7790
+	   6: Js02o       	$0xFED7790
+
+
+
+. 2457 FED7764 8
+. 70 60 00 03 41 82 00 28
+==== BB 2458 (0xFED776C) approx BBs exec'd 0 ====
+
+	0xFED776C:  88030000  lbz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFED7770:  7F802000  cmp cr7,r0,r4
+	   4: GETL       	R0, t4
+	   5: GETL       	R4, t6
+	   6: CMPL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFED7774:  419E00CC  bc 12,30,0xFED7840
+	   9: Js30o       	$0xFED7840
+
+
+
+. 2458 FED776C 12
+. 88 03 00 00 7F 80 20 00 41 9E 00 CC
+==== BB 2459 (0xFED7778) approx BBs exec'd 0 ====
+
+	0xFED7778:  34A5FFFF  addic. r5,r5,-1
+	   0: GETL       	R5, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R5
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED777C:  38630001  addi r3,r3,1
+	   6: GETL       	R3, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0xFED7780:  546907BE  rlwinm r9,r3,0,30,31
+	  10: GETL       	R3, t6
+	  11: ANDL       	$0x3, t6
+	  12: PUTL       	t6, R9
+	  13: INCEIPL       	$4
+
+	0xFED7784:  2F890000  cmpi cr7,r9,0
+	  14: GETL       	R9, t8
+	  15: CMP0L       	t8, t10  (-rSo)
+	  16: ICRFL       	t10, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0xFED7788:  41820008  bc 12,2,0xFED7790
+	  18: Js02o       	$0xFED7790
+
+
+
+. 2459 FED7778 20
+. 34 A5 FF FF 38 63 00 01 54 69 07 BE 2F 89 00 00 41 82 00 08
+==== BB 2460 (0xFED778C) approx BBs exec'd 0 ====
+
+	0xFED778C:  409EFFE0  bc 4,30,0xFED776C
+	   0: Jc30o       	$0xFED776C
+
+
+
+. 2460 FED778C 4
+. 40 9E FF E0
+==== BB 2461 (0xFED7790) approx BBs exec'd 0 ====
+
+	0xFED7790:  28850003  cmpli cr1,r5,3
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x3, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFED7794:  5488402E  rlwinm r8,r4,8,0,23
+	   5: GETL       	R4, t6
+	   6: SHLL       	$0x8, t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0xFED7798:  7C874378  or r7,r4,r8
+	   9: GETL       	R4, t8
+	  10: GETL       	R8, t10
+	  11: ORL       	t10, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0xFED779C:  3D407EFE  lis r10,32510
+	  14: MOVL       	$0x7EFE0000, t12
+	  15: PUTL       	t12, R10
+	  16: INCEIPL       	$4
+
+	0xFED77A0:  54E6801E  rlwinm r6,r7,16,0,15
+	  17: GETL       	R7, t14
+	  18: SHLL       	$0x10, t14
+	  19: PUTL       	t14, R6
+	  20: INCEIPL       	$4
+
+	0xFED77A4:  614AFEFF  ori r10,r10,0xFEFF
+	  21: MOVL       	$0x7EFEFEFF, t16
+	  22: PUTL       	t16, R10
+	  23: INCEIPL       	$4
+
+	0xFED77A8:  7CE83378  or r8,r7,r6
+	  24: GETL       	R7, t18
+	  25: GETL       	R6, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0xFED77AC:  40850068  bc 4,5,0xFED7814
+	  29: Jc05o       	$0xFED7814
+
+
+
+. 2461 FED7790 32
+. 28 85 00 03 54 88 40 2E 7C 87 43 78 3D 40 7E FE 54 E6 80 1E 61 4A FE FF 7C E8 33 78 40 85 00 68
+==== BB 2462 (0xFED77B0) approx BBs exec'd 0 ====
+
+	0xFED77B0:  3D608101  lis r11,-32511
+	   0: MOVL       	$0x81010000, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFED77B4:  616B0100  ori r11,r11,0x100
+	   3: MOVL       	$0x81010100, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0xFED77B8:  80C30000  lwz r6,0(r3)
+	   6: GETL       	R3, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0xFED77BC:  38A5FFFC  addi r5,r5,-4
+	  10: GETL       	R5, t8
+	  11: ADDL       	$0xFFFFFFFC, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0xFED77C0:  38630004  addi r3,r3,4
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0x4, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFED77C4:  2B050003  cmpli cr6,r5,3
+	  18: GETL       	R5, t12
+	  19: MOVL       	$0x3, t16
+	  20: CMPUL       	t12, t16, t14  (-rSo)
+	  21: ICRFL       	t14, $0x6, CR
+	  22: INCEIPL       	$4
+
+	0xFED77C8:  7CC04278  xor r0,r6,r8
+	  23: GETL       	R6, t18
+	  24: GETL       	R8, t20
+	  25: XORL       	t18, t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0xFED77CC:  7D205214  add r9,r0,r10
+	  28: GETL       	R0, t22
+	  29: GETL       	R10, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R9
+	  32: INCEIPL       	$4
+
+	0xFED77D0:  7C0C4A78  xor r12,r0,r9
+	  33: GETL       	R0, t26
+	  34: GETL       	R9, t28
+	  35: XORL       	t26, t28
+	  36: PUTL       	t28, R12
+	  37: INCEIPL       	$4
+
+	0xFED77D4:  7D696079  andc. r9,r11,r12
+	  38: GETL       	R11, t30
+	  39: GETL       	R12, t32
+	  40: NOTL       	t32
+	  41: ANDL       	t30, t32
+	  42: PUTL       	t32, R9
+	  43: CMP0L       	t32, t34  (-rSo)
+	  44: ICRFL       	t34, $0x0, CR
+	  45: INCEIPL       	$4
+
+	0xFED77D8:  3923FFFC  addi r9,r3,-4
+	  46: GETL       	R3, t36
+	  47: ADDL       	$0xFFFFFFFC, t36
+	  48: PUTL       	t36, R9
+	  49: INCEIPL       	$4
+
+	0xFED77DC:  41820034  bc 12,2,0xFED7810
+	  50: Js02o       	$0xFED7810
+
+
+
+. 2462 FED77B0 48
+. 3D 60 81 01 61 6B 01 00 80 C3 00 00 38 A5 FF FC 38 63 00 04 2B 05 00 03 7C C0 42 78 7D 20 52 14 7C 0C 4A 78 7D 69 60 79 39 23 FF FC 41 82 00 34
+==== BB 2463 (0xFED7810) approx BBs exec'd 0 ====
+
+	0xFED7810:  4199FFA8  bc 12,25,0xFED77B8
+	   0: Js25o       	$0xFED77B8
+
+
+
+. 2463 FED7810 4
+. 41 99 FF A8
+==== BB 2464 (0xFED77B8) approx BBs exec'd 0 ====
+
+	0xFED77B8:  80C30000  lwz r6,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFED77BC:  38A5FFFC  addi r5,r5,-4
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFED77C0:  38630004  addi r3,r3,4
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x4, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0xFED77C4:  2B050003  cmpli cr6,r5,3
+	  12: GETL       	R5, t8
+	  13: MOVL       	$0x3, t12
+	  14: CMPUL       	t8, t12, t10  (-rSo)
+	  15: ICRFL       	t10, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0xFED77C8:  7CC04278  xor r0,r6,r8
+	  17: GETL       	R6, t14
+	  18: GETL       	R8, t16
+	  19: XORL       	t14, t16
+	  20: PUTL       	t16, R0
+	  21: INCEIPL       	$4
+
+	0xFED77CC:  7D205214  add r9,r0,r10
+	  22: GETL       	R0, t18
+	  23: GETL       	R10, t20
+	  24: ADDL       	t18, t20
+	  25: PUTL       	t20, R9
+	  26: INCEIPL       	$4
+
+	0xFED77D0:  7C0C4A78  xor r12,r0,r9
+	  27: GETL       	R0, t22
+	  28: GETL       	R9, t24
+	  29: XORL       	t22, t24
+	  30: PUTL       	t24, R12
+	  31: INCEIPL       	$4
+
+	0xFED77D4:  7D696079  andc. r9,r11,r12
+	  32: GETL       	R11, t26
+	  33: GETL       	R12, t28
+	  34: NOTL       	t28
+	  35: ANDL       	t26, t28
+	  36: PUTL       	t28, R9
+	  37: CMP0L       	t28, t30  (-rSo)
+	  38: ICRFL       	t30, $0x0, CR
+	  39: INCEIPL       	$4
+
+	0xFED77D8:  3923FFFC  addi r9,r3,-4
+	  40: GETL       	R3, t32
+	  41: ADDL       	$0xFFFFFFFC, t32
+	  42: PUTL       	t32, R9
+	  43: INCEIPL       	$4
+
+	0xFED77DC:  41820034  bc 12,2,0xFED7810
+	  44: Js02o       	$0xFED7810
+
+
+
+. 2464 FED77B8 40
+. 80 C3 00 00 38 A5 FF FC 38 63 00 04 2B 05 00 03 7C C0 42 78 7D 20 52 14 7C 0C 4A 78 7D 69 60 79 39 23 FF FC 41 82 00 34
+==== BB 2465 (0xFED77E0) approx BBs exec'd 0 ====
+
+	0xFED77E0:  88E3FFFC  lbz r7,-4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFED77E4:  7F872000  cmp cr7,r7,r4
+	   5: GETL       	R7, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFED77E8:  419E006C  bc 12,30,0xFED7854
+	  10: Js30o       	$0xFED7854
+
+
+
+. 2465 FED77E0 12
+. 88 E3 FF FC 7F 87 20 00 41 9E 00 6C
+==== BB 2466 (0xFED77EC) approx BBs exec'd 0 ====
+
+	0xFED77EC:  89890001  lbz r12,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFED77F0:  7C8C2000  cmp cr1,r12,r4
+	   5: GETL       	R12, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFED77F4:  4186006C  bc 12,6,0xFED7860
+	  10: Js06o       	$0xFED7860
+
+
+
+. 2466 FED77EC 12
+. 89 89 00 01 7C 8C 20 00 41 86 00 6C
+==== BB 2467 (0xFED77F8) approx BBs exec'd 0 ====
+
+	0xFED77F8:  88090002  lbz r0,2(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x2, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED77FC:  7C002000  cmp cr0,r0,r4
+	   5: GETL       	R0, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED7800:  4182006C  bc 12,2,0xFED786C
+	  10: Js02o       	$0xFED786C
+
+
+
+. 2467 FED77F8 12
+. 88 09 00 02 7C 00 20 00 41 82 00 6C
+==== BB 2468 (0xFED786C) approx BBs exec'd 0 ====
+
+	0xFED786C:  3863FFFE  addi r3,r3,-2
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFE, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFED7870:  38210010  addi r1,r1,16
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x10, t2
+	   6: PUTL       	t2, R1
+	   7: INCEIPL       	$4
+
+	0xFED7874:  4E800020  blr
+	   8: GETL       	LR, t4
+	   9: JMPo-r       	t4  ($4)
+
+
+
+. 2468 FED786C 12
+. 38 63 FF FE 38 21 00 10 4E 80 00 20
+==== BB 2469 (0xFEBD8B0) approx BBs exec'd 0 ====
+
+	0xFEBD8B0:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEBD8B4:  7C791B79  or. r25,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R25
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFEBD8B8:  7FE5FB78  or r5,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0xFEBD8BC:  7F43D378  or r3,r26,r26
+	  11: GETL       	R26, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFEBD8C0:  40820088  bc 4,2,0xFEBD948
+	  14: Jc02o       	$0xFEBD948
+
+
+
+. 2469 FEBD8B0 20
+. 7F A4 EB 78 7C 79 1B 79 7F E5 FB 78 7F 43 D3 78 40 82 00 88
+==== BB 2470 (0xFEBD948) approx BBs exec'd 0 ====
+
+	0xFEBD948:  7F97D050  subf r28,r23,r26
+	   0: GETL       	R23, t0
+	   1: GETL       	R26, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEBD94C:  7FFDC850  subf r31,r29,r25
+	   5: GETL       	R29, t4
+	   6: GETL       	R25, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFEBD950:  41900010  bc 12,16,0xFEBD960
+	  10: Js16o       	$0xFEBD960
+
+
+
+. 2470 FEBD948 12
+. 7F 97 D0 50 7F FD C8 50 41 90 00 10
+==== BB 2471 (0xFEBD954) approx BBs exec'd 0 ====
+
+	0xFEBD954:  3B390001  addi r25,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0xFEBD958:  40910008  bc 4,17,0xFEBD960
+	   4: Jc17o       	$0xFEBD960
+
+
+
+. 2471 FEBD954 8
+. 3B 39 00 01 40 91 00 08
+==== BB 2472 (0xFEBD95C) approx BBs exec'd 0 ====
+
+	0xFEBD95C:  3BFF0001  addi r31,r31,1
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0xFEBD960:  7F43D378  or r3,r26,r26
+	   4: GETL       	R26, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0xFEBD964:  7FA4EB78  or r4,r29,r29
+	   7: GETL       	R29, t4
+	   8: PUTL       	t4, R4
+	   9: INCEIPL       	$4
+
+	0xFEBD968:  7FE5FB78  or r5,r31,r31
+	  10: GETL       	R31, t6
+	  11: PUTL       	t6, R5
+	  12: INCEIPL       	$4
+
+	0xFEBD96C:  4801AD6D  bl 0xFED86D8
+	  13: MOVL       	$0xFEBD970, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2472 FEBD95C 20
+. 3B FF 00 01 7F 43 D3 78 7F A4 EB 78 7F E5 FB 78 48 01 AD 6D
+==== BB 2473 (0xFED8720) approx BBs exec'd 0 ====
+
+	0xFED8720:  7D2903A6  mtctr r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFED8724:  88BD0000  lbz r5,0(r29)
+	   3: GETL       	R29, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R5
+	   6: INCEIPL       	$4
+
+	0xFED8728:  3BBD0001  addi r29,r29,1
+	   7: GETL       	R29, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R29
+	  10: INCEIPL       	$4
+
+	0xFED872C:  98BF0000  stb r5,0(r31)
+	  11: GETL       	R5, t8
+	  12: GETL       	R31, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFED8730:  3BFF0001  addi r31,r31,1
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0xFED8734:  4200FFF0  bc 16,0,0xFED8724
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0xFED8738
+	  23: JMPo       	$0xFED8724  ($4)
+
+
+
+. 2473 FED8720 24
+. 7D 29 03 A6 88 BD 00 00 3B BD 00 01 98 BF 00 00 3B FF 00 01 42 00 FF F0
+==== BB 2474 (0xFED8724) approx BBs exec'd 0 ====
+
+	0xFED8724:  88BD0000  lbz r5,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFED8728:  3BBD0001  addi r29,r29,1
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFED872C:  98BF0000  stb r5,0(r31)
+	   8: GETL       	R5, t6
+	   9: GETL       	R31, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFED8730:  3BFF0001  addi r31,r31,1
+	  12: GETL       	R31, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0xFED8734:  4200FFF0  bc 16,0,0xFED8724
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0xFED8738
+	  20: JMPo       	$0xFED8724  ($4)
+
+
+
+. 2474 FED8724 20
+. 88 BD 00 00 3B BD 00 01 98 BF 00 00 3B FF 00 01 42 00 FF F0
+==== BB 2475 (0xFED88D8) approx BBs exec'd 0 ====
+
+	0xFED88D8:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFED88DC:  3884FFF8  addi r4,r4,-8
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFF8, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFED88E0:  8124000C  lwz r9,12(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0xFED88E4:  3863FFF4  addi r3,r3,-12
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFF4, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFED88E8:  38A50002  addi r5,r5,2
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x2, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0xFED88EC:  9003000C  stw r0,12(r3)
+	  21: GETL       	R0, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0xC, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0xFED88F0:  4BFFFF34  b 0xFED8824
+	  26: JMPo       	$0xFED8824  ($4)
+
+
+
+. 2475 FED88D8 28
+. 80 04 00 00 38 84 FF F8 81 24 00 0C 38 63 FF F4 38 A5 00 02 90 03 00 0C 4B FF FF 34
+==== BB 2476 (0xFED8824) approx BBs exec'd 0 ====
+
+	0xFED8824:  80040010  lwz r0,16(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED8828:  91230010  stw r9,16(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x10, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFED882C:  81240014  lwz r9,20(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0xFED8830:  90030014  stw r0,20(r3)
+	  15: GETL       	R0, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x14, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFED8834:  80040018  lwz r0,24(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x18, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0xFED8838:  91230018  stw r9,24(r3)
+	  25: GETL       	R9, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x18, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0xFED883C:  34A5FFF8  addic. r5,r5,-8
+	  30: GETL       	R5, t24
+	  31: ADCL       	$0xFFFFFFF8, t24  (-wCa)
+	  32: PUTL       	t24, R5
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x0, CR
+	  35: INCEIPL       	$4
+
+	0xFED8840:  8124001C  lwz r9,28(r4)
+	  36: GETL       	R4, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R9
+	  40: INCEIPL       	$4
+
+	0xFED8844:  9003001C  stw r0,28(r3)
+	  41: GETL       	R0, t32
+	  42: GETL       	R3, t34
+	  43: ADDL       	$0x1C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFED8848:  38840020  addi r4,r4,32
+	  46: GETL       	R4, t36
+	  47: ADDL       	$0x20, t36
+	  48: PUTL       	t36, R4
+	  49: INCEIPL       	$4
+
+	0xFED884C:  38630020  addi r3,r3,32
+	  50: GETL       	R3, t38
+	  51: ADDL       	$0x20, t38
+	  52: PUTL       	t38, R3
+	  53: INCEIPL       	$4
+
+	0xFED8850:  40A2FFB4  bc 5,2,0xFED8804
+	  54: Jc02o       	$0xFED8804
+
+
+
+. 2476 FED8824 48
+. 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 2477 (0xFEBD970) approx BBs exec'd 0 ====
+
+	0xFEBD970:  81410044  lwz r10,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEBD974:  81810014  lwz r12,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0xFEBD978:  7C7CFA14  add r3,r28,r31
+	  10: GETL       	R28, t8
+	  11: GETL       	R31, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0xFEBD97C:  933B0004  stw r25,4(r27)
+	  15: GETL       	R25, t12
+	  16: GETL       	R27, t14
+	  17: ADDL       	$0x4, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFEBD980:  7D4803A6  mtlr r10
+	  20: GETL       	R10, t16
+	  21: PUTL       	t16, LR
+	  22: INCEIPL       	$4
+
+	0xFEBD984:  82C10018  lwz r22,24(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R22
+	  27: INCEIPL       	$4
+
+	0xFEBD988:  7D838120  mtcrf 0x38,r12
+	  28: GETL       	R12, t22
+	  29: ICRFL       	t22, $0x2, CR
+	  30: ICRFL       	t22, $0x3, CR
+	  31: ICRFL       	t22, $0x4, CR
+	  32: INCEIPL       	$4
+
+	0xFEBD98C:  82E1001C  lwz r23,28(r1)
+	  33: GETL       	R1, t24
+	  34: ADDL       	$0x1C, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R23
+	  37: INCEIPL       	$4
+
+	0xFEBD990:  83010020  lwz r24,32(r1)
+	  38: GETL       	R1, t28
+	  39: ADDL       	$0x20, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R24
+	  42: INCEIPL       	$4
+
+	0xFEBD994:  83210024  lwz r25,36(r1)
+	  43: GETL       	R1, t32
+	  44: ADDL       	$0x24, t32
+	  45: LDL       	(t32), t34
+	  46: PUTL       	t34, R25
+	  47: INCEIPL       	$4
+
+	0xFEBD998:  83410028  lwz r26,40(r1)
+	  48: GETL       	R1, t36
+	  49: ADDL       	$0x28, t36
+	  50: LDL       	(t36), t38
+	  51: PUTL       	t38, R26
+	  52: INCEIPL       	$4
+
+	0xFEBD99C:  8361002C  lwz r27,44(r1)
+	  53: GETL       	R1, t40
+	  54: ADDL       	$0x2C, t40
+	  55: LDL       	(t40), t42
+	  56: PUTL       	t42, R27
+	  57: INCEIPL       	$4
+
+	0xFEBD9A0:  83810030  lwz r28,48(r1)
+	  58: GETL       	R1, t44
+	  59: ADDL       	$0x30, t44
+	  60: LDL       	(t44), t46
+	  61: PUTL       	t46, R28
+	  62: INCEIPL       	$4
+
+	0xFEBD9A4:  83A10034  lwz r29,52(r1)
+	  63: GETL       	R1, t48
+	  64: ADDL       	$0x34, t48
+	  65: LDL       	(t48), t50
+	  66: PUTL       	t50, R29
+	  67: INCEIPL       	$4
+
+	0xFEBD9A8:  83C10038  lwz r30,56(r1)
+	  68: GETL       	R1, t52
+	  69: ADDL       	$0x38, t52
+	  70: LDL       	(t52), t54
+	  71: PUTL       	t54, R30
+	  72: INCEIPL       	$4
+
+	0xFEBD9AC:  83E1003C  lwz r31,60(r1)
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0x3C, t56
+	  75: LDL       	(t56), t58
+	  76: PUTL       	t58, R31
+	  77: INCEIPL       	$4
+
+	0xFEBD9B0:  38210040  addi r1,r1,64
+	  78: GETL       	R1, t60
+	  79: ADDL       	$0x40, t60
+	  80: PUTL       	t60, R1
+	  81: INCEIPL       	$4
+
+	0xFEBD9B4:  4E800020  blr
+	  82: GETL       	LR, t62
+	  83: JMPo-r       	t62  ($4)
+
+
+
+. 2477 FEBD970 72
+. 81 41 00 44 81 81 00 14 7C 7C FA 14 93 3B 00 04 7D 48 03 A6 82 C1 00 18 7D 83 81 20 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 2478 (0xFEBD7E0) approx BBs exec'd 0 ====
+
+	0xFEBD7E0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEBD7E4:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFEBD7E8:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFEBD7EC:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2478 FEBD7E0 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 2479 (0xFEC80D8) approx BBs exec'd 0 ====
+
+	0xFEC80D8:  39400000  li r10,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFEC80DC:  2C830000  cmpi cr1,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x1, CR
+	   6: INCEIPL       	$4
+
+	0xFEC80E0:  40860038  bc 4,6,0xFEC8118
+	   7: Jc06o       	$0xFEC8118
+
+
+
+. 2479 FEC80D8 12
+. 39 40 00 00 2C 83 00 00 40 86 00 38
+==== BB 2480 (0xFEC8118) approx BBs exec'd 0 ====
+
+	0xFEC8118:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFEC811C:  809E1C4C  lwz r4,7244(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1C4C, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0xFEC8120:  7D641214  add r11,r4,r2
+	   9: GETL       	R4, t8
+	  10: GETL       	R2, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0xFEC8124:  71200020  andi. r0,r9,0x20
+	  14: GETL       	R9, t12
+	  15: ANDL       	$0x20, t12
+	  16: PUTL       	t12, R0
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFEC8128:  41820010  bc 12,2,0xFEC8138
+	  20: Js02o       	$0xFEC8138
+
+
+
+. 2480 FEC8118 20
+. 81 3F 00 00 80 9E 1C 4C 7D 64 12 14 71 20 00 20 41 82 00 10
+==== BB 2481 (0xFEC8138) approx BBs exec'd 0 ====
+
+	0xFEC8138:  38C00000  li r6,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFEC813C:  7FAAEB78  or r10,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0xFEC8140:  7CDD19AE  stbx r6,r29,r3
+	   6: GETL       	R3, t4
+	   7: GETL       	R29, t6
+	   8: ADDL       	t6, t4
+	   9: GETL       	R6, t8
+	  10: STB       	t8, (t4)
+	  11: INCEIPL       	$4
+
+	0xFEC8144:  4BFFFFA0  b 0xFEC80E4
+	  12: JMPo       	$0xFEC80E4  ($4)
+
+
+
+. 2481 FEC8138 16
+. 38 C0 00 00 7F AA EB 78 7C DD 19 AE 4B FF FF A0
+==== BB 2482 (0xFEC80E4) approx BBs exec'd 0 ====
+
+	0xFEC80E4:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFEC80E8:  7D23E378  or r3,r9,r28
+	   4: GETL       	R9, t4
+	   5: GETL       	R28, t6
+	   6: ORL       	t6, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFEC80EC:  7D495378  or r9,r10,r10
+	   9: GETL       	R10, t8
+	  10: PUTL       	t8, R9
+	  11: INCEIPL       	$4
+
+	0xFEC80F0:  907F0000  stw r3,0(r31)
+	  12: GETL       	R3, t10
+	  13: GETL       	R31, t12
+	  14: STL       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0xFEC80F4:  80E10024  lwz r7,36(r1)
+	  16: GETL       	R1, t14
+	  17: ADDL       	$0x24, t14
+	  18: LDL       	(t14), t16
+	  19: PUTL       	t16, R7
+	  20: INCEIPL       	$4
+
+	0xFEC80F8:  7D234B78  or r3,r9,r9
+	  21: GETL       	R9, t18
+	  22: PUTL       	t18, R3
+	  23: INCEIPL       	$4
+
+	0xFEC80FC:  83810010  lwz r28,16(r1)
+	  24: GETL       	R1, t20
+	  25: ADDL       	$0x10, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R28
+	  28: INCEIPL       	$4
+
+	0xFEC8100:  83A10014  lwz r29,20(r1)
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x14, t24
+	  31: LDL       	(t24), t26
+	  32: PUTL       	t26, R29
+	  33: INCEIPL       	$4
+
+	0xFEC8104:  7CE803A6  mtlr r7
+	  34: GETL       	R7, t28
+	  35: PUTL       	t28, LR
+	  36: INCEIPL       	$4
+
+	0xFEC8108:  83C10018  lwz r30,24(r1)
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x18, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R30
+	  41: INCEIPL       	$4
+
+	0xFEC810C:  83E1001C  lwz r31,28(r1)
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x1C, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R31
+	  46: INCEIPL       	$4
+
+	0xFEC8110:  38210020  addi r1,r1,32
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x20, t38
+	  49: PUTL       	t38, R1
+	  50: INCEIPL       	$4
+
+	0xFEC8114:  4E800020  blr
+	  51: GETL       	LR, t40
+	  52: JMPo-r       	t40  ($4)
+
+
+
+. 2482 FEC80E4 52
+. 81 3F 00 00 7D 23 E3 78 7D 49 53 78 90 7F 00 00 80 E1 00 24 7D 23 4B 78 83 81 00 10 83 A1 00 14 7C E8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2483 (0xFE8D0B8) approx BBs exec'd 0 ====
+
+	0xFE8D0B8:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D0BC:  419E033C  bc 12,30,0xFE8D3F8
+	   4: Js30o       	$0xFE8D3F8
+
+
+
+. 2483 FE8D0B8 8
+. 2F 83 00 00 41 9E 03 3C
+==== BB 2484 (0xFE8D0C0) approx BBs exec'd 0 ====
+
+	0xFE8D0C0:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D0C4:  3880000A  li r4,10
+	   3: MOVL       	$0xA, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8D0C8:  48049205  bl 0xFED62CC
+	   6: MOVL       	$0xFE8D0CC, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED62CC  ($4)
+
+
+
+. 2484 FE8D0C0 12
+. 7F 43 D3 78 38 80 00 0A 48 04 92 05
+==== BB 2485 (0xFED6348) approx BBs exec'd 0 ====
+
+	0xFED6348:  7CE62838  and r6,r7,r5
+	   0: GETL       	R7, t0
+	   1: GETL       	R5, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED634C:  7CEB2B78  or r11,r7,r5
+	   5: GETL       	R7, t4
+	   6: GETL       	R5, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R11
+	   9: INCEIPL       	$4
+
+	0xFED6350:  7CE06038  and r0,r7,r12
+	  10: GETL       	R7, t8
+	  11: GETL       	R12, t10
+	  12: ANDL       	t8, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFED6354:  7CEA6378  or r10,r7,r12
+	  15: GETL       	R7, t12
+	  16: GETL       	R12, t14
+	  17: ORL       	t14, t12
+	  18: PUTL       	t12, R10
+	  19: INCEIPL       	$4
+
+	0xFED6358:  7CC63A14  add r6,r6,r7
+	  20: GETL       	R6, t16
+	  21: GETL       	R7, t18
+	  22: ADDL       	t16, t18
+	  23: PUTL       	t18, R6
+	  24: INCEIPL       	$4
+
+	0xFED635C:  7C003A14  add r0,r0,r7
+	  25: GETL       	R0, t20
+	  26: GETL       	R7, t22
+	  27: ADDL       	t20, t22
+	  28: PUTL       	t22, R0
+	  29: INCEIPL       	$4
+
+	0xFED6360:  7D6530F8  nor r5,r11,r6
+	  30: GETL       	R11, t24
+	  31: GETL       	R6, t26
+	  32: ORL       	t26, t24
+	  33: NOTL       	t24
+	  34: PUTL       	t24, R5
+	  35: INCEIPL       	$4
+
+	0xFED6364:  7D4900F8  nor r9,r10,r0
+	  36: GETL       	R10, t28
+	  37: GETL       	R0, t30
+	  38: ORL       	t30, t28
+	  39: NOTL       	t28
+	  40: PUTL       	t28, R9
+	  41: INCEIPL       	$4
+
+	0xFED6368:  7C054840  cmpl cr0,r5,r9
+	  42: GETL       	R5, t32
+	  43: GETL       	R9, t34
+	  44: CMPUL       	t32, t34, t36  (-rSo)
+	  45: ICRFL       	t36, $0x0, CR
+	  46: INCEIPL       	$4
+
+	0xFED636C:  4D810020  bclr 12,1
+	  47: GETL       	LR, t38
+	  48: Js01o-r       	t38
+
+
+
+. 2485 FED6348 40
+. 7C E6 28 38 7C EB 2B 78 7C E0 60 38 7C EA 63 78 7C C6 3A 14 7C 00 3A 14 7D 65 30 F8 7D 49 00 F8 7C 05 48 40 4D 81 00 20
+==== BB 2486 (0xFED6370) approx BBs exec'd 0 ====
+
+	0xFED6370:  7D240034  cntlzw r4,r9
+	   0: GETL       	R9, t0
+	   1: CNTLZL       	t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFED6374:  5484E8FE  rlwinm r4,r4,29,3,31
+	   4: GETL       	R4, t2
+	   5: SHRL       	$0x3, t2
+	   6: PUTL       	t2, R4
+	   7: INCEIPL       	$4
+
+	0xFED6378:  7C682214  add r3,r8,r4
+	   8: GETL       	R8, t4
+	   9: GETL       	R4, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0xFED637C:  4E800020  blr
+	  13: GETL       	LR, t8
+	  14: JMPo-r       	t8  ($4)
+
+
+
+. 2486 FED6370 16
+. 7D 24 00 34 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+==== BB 2487 (0xFE8D0CC) approx BBs exec'd 0 ====
+
+	0xFE8D0CC:  7D121214  add r8,r18,r2
+	   0: GETL       	R18, t0
+	   1: GETL       	R2, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE8D0D0:  3003FFFF  addic r0,r3,-1
+	   5: GETL       	R3, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE8D0D4:  7E201910  subfe r17,r0,r3
+	   9: GETL       	R0, t6
+	  10: GETL       	R3, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R17
+	  13: INCEIPL       	$4
+
+	0xFE8D0D8:  7F43D378  or r3,r26,r26
+	  14: GETL       	R26, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFE8D0DC:  48000008  b 0xFE8D0E4
+	  17: JMPo       	$0xFE8D0E4  ($4)
+
+
+
+. 2487 FE8D0CC 20
+. 7D 12 12 14 30 03 FF FF 7E 20 19 10 7F 43 D3 78 48 00 00 08
+==== BB 2488 (0xFE8D0E4) approx BBs exec'd 0 ====
+
+	0xFE8D0E4:  82680000  lwz r19,0(r8)
+	   0: GETL       	R8, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R19
+	   3: INCEIPL       	$4
+
+	0xFE8D0E8:  2C930000  cmpi cr1,r19,0
+	   4: GETL       	R19, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFE8D0EC:  41860380  bc 12,6,0xFE8D46C
+	   8: Js06o       	$0xFE8D46C
+
+
+
+. 2488 FE8D0E4 12
+. 82 68 00 00 2C 93 00 00 41 86 03 80
+==== BB 2489 (0xFE8D0F0) approx BBs exec'd 0 ====
+
+	0xFE8D0F0:  89430000  lbz r10,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE8D0F4:  83280000  lwz r25,0(r8)
+	   4: GETL       	R8, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R25
+	   7: INCEIPL       	$4
+
+	0xFE8D0F8:  555C083C  rlwinm r28,r10,1,0,30
+	   8: GETL       	R10, t8
+	   9: SHLL       	$0x1, t8
+	  10: PUTL       	t8, R28
+	  11: INCEIPL       	$4
+
+	0xFE8D0FC:  7F1CCA2E  lhzx r24,r28,r25
+	  12: GETL       	R25, t10
+	  13: GETL       	R28, t12
+	  14: ADDL       	t12, t10
+	  15: LDW       	(t10), t14
+	  16: PUTL       	t14, R24
+	  17: INCEIPL       	$4
+
+	0xFE8D100:  73090020  andi. r9,r24,0x20
+	  18: GETL       	R24, t16
+	  19: ANDL       	$0x20, t16
+	  20: PUTL       	t16, R9
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0xFE8D104:  4082FFDC  bc 4,2,0xFE8D0E0
+	  24: Jc02o       	$0xFE8D0E0
+
+
+
+. 2489 FE8D0F0 24
+. 89 43 00 00 83 28 00 00 55 5C 08 3C 7F 1C CA 2E 73 09 00 20 40 82 FF DC
+==== BB 2490 (0xFE8D108) approx BBs exec'd 0 ====
+
+	0xFE8D108:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D10C:  419A02D8  bc 12,26,0xFE8D3E4
+	   4: Js26o       	$0xFE8D3E4
+
+
+
+. 2490 FE8D108 8
+. 2F 0A 00 00 41 9A 02 D8
+==== BB 2491 (0xFE8D110) approx BBs exec'd 0 ====
+
+	0xFE8D110:  2F8A0023  cmpi cr7,r10,35
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x23, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D114:  7C761B78  or r22,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R22
+	   7: INCEIPL       	$4
+
+	0xFE8D118:  7D121214  add r8,r18,r2
+	   8: GETL       	R18, t8
+	   9: GETL       	R2, t10
+	  10: ADDL       	t8, t10
+	  11: PUTL       	t10, R8
+	  12: INCEIPL       	$4
+
+	0xFE8D11C:  38630001  addi r3,r3,1
+	  13: GETL       	R3, t12
+	  14: ADDL       	$0x1, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0xFE8D120:  40BE0020  bc 5,30,0xFE8D140
+	  17: Jc30o       	$0xFE8D140
+
+
+
+. 2491 FE8D110 20
+. 2F 8A 00 23 7C 76 1B 78 7D 12 12 14 38 63 00 01 40 BE 00 20
+==== BB 2492 (0xFE8D124) approx BBs exec'd 0 ====
+
+	0xFE8D124:  480002C0  b 0xFE8D3E4
+	   0: JMPo       	$0xFE8D3E4  ($4)
+
+
+
+. 2492 FE8D124 4
+. 48 00 02 C0
+==== BB 2493 (0xFE8D3E4) approx BBs exec'd 0 ====
+
+	0xFE8D3E4:  2C110000  cmpi cr0,r17,0
+	   0: GETL       	R17, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D3E8:  4182009C  bc 12,2,0xFE8D484
+	   4: Js02o       	$0xFE8D484
+
+
+
+. 2493 FE8D3E4 8
+. 2C 11 00 00 41 82 00 9C
+==== BB 2494 (0xFE8D3EC) approx BBs exec'd 0 ====
+
+	0xFE8D3EC:  823B0000  lwz r17,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R17
+	   3: INCEIPL       	$4
+
+	0xFE8D3F0:  72290010  andi. r9,r17,0x10
+	   4: GETL       	R17, t4
+	   5: ANDL       	$0x10, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE8D3F4:  4182FCB4  bc 12,2,0xFE8D0A8
+	  10: Js02o       	$0xFE8D0A8
+
+
+
+. 2494 FE8D3EC 12
+. 82 3B 00 00 72 29 00 10 41 82 FC B4
+==== BB 2495 (0xFE8D0A8) approx BBs exec'd 0 ====
+
+	0xFE8D0A8:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D0AC:  38800190  li r4,400
+	   3: MOVL       	$0x190, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8D0B0:  7F65DB78  or r5,r27,r27
+	   6: GETL       	R27, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE8D0B4:  4803AFC5  bl 0xFEC8078
+	   9: MOVL       	$0xFE8D0B8, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFEC8078  ($4)
+
+
+
+. 2495 FE8D0A8 16
+. 7F 43 D3 78 38 80 01 90 7F 65 DB 78 48 03 AF C5
+==== BB 2496 (0xFED7854) approx BBs exec'd 0 ====
+
+	0xFED7854:  7D234B78  or r3,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED7858:  38210010  addi r1,r1,16
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R1
+	   6: INCEIPL       	$4
+
+	0xFED785C:  4E800020  blr
+	   7: GETL       	LR, t4
+	   8: JMPo-r       	t4  ($4)
+
+
+
+. 2496 FED7854 12
+. 7D 23 4B 78 38 21 00 10 4E 80 00 20
+==== BB 2497 (0xFED87A8) approx BBs exec'd 0 ====
+
+	0xFED87A8:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED87AC:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFED87B0:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0xFED87B4:  4800015D  bl 0xFED8910
+	  10: MOVL       	$0xFED87B8, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFED8910  ($4)
+
+
+
+. 2497 FED87A8 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 01 5D
+==== BB 2498 _wordcopy_fwd_dest_aligned(0xFED8910) approx BBs exec'd 0 ====
+
+	0xFED8910:  54A007BE  rlwinm r0,r5,0,30,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFED8914:  54881EF8  rlwinm r8,r4,3,27,28
+	   4: GETL       	R4, t2
+	   5: ROLL       	$0x3, t2
+	   6: ANDL       	$0x18, t2
+	   7: PUTL       	t2, R8
+	   8: INCEIPL       	$4
+
+	0xFED8918:  2F800001  cmpi cr7,r0,1
+	   9: GETL       	R0, t4
+	  10: MOVL       	$0x1, t8
+	  11: CMPL       	t4, t8, t6  (-rSo)
+	  12: ICRFL       	t6, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFED891C:  9421FFF0  stwu r1,-16(r1)
+	  14: GETL       	R1, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0xFFFFFFF0, t12
+	  17: PUTL       	t12, R1
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0xFED8920:  5484003A  rlwinm r4,r4,0,0,29
+	  20: GETL       	R4, t14
+	  21: ANDL       	$0xFFFFFFFC, t14
+	  22: PUTL       	t14, R4
+	  23: INCEIPL       	$4
+
+	0xFED8924:  20E80020  subfic r7,r8,32
+	  24: GETL       	R8, t16
+	  25: MOVL       	$0x20, t18
+	  26: SBBL       	t16, t18  (-wCa)
+	  27: PUTL       	t18, R7
+	  28: INCEIPL       	$4
+
+	0xFED8928:  419E00B8  bc 12,30,0xFED89E0
+	  29: Js30o       	$0xFED89E0
+
+
+
+. 2498 FED8910 28
+. 54 A0 07 BE 54 88 1E F8 2F 80 00 01 94 21 FF F0 54 84 00 3A 20 E8 00 20 41 9E 00 B8
+==== BB 2499 (0xFED892C) approx BBs exec'd 0 ====
+
+	0xFED892C:  28000001  cmpli cr0,r0,1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFED8930:  4180008C  bc 12,0,0xFED89BC
+	   5: Js00o       	$0xFED89BC
+
+
+
+. 2499 FED892C 8
+. 28 00 00 01 41 80 00 8C
+==== BB 2500 (0xFED8934) approx BBs exec'd 0 ====
+
+	0xFED8934:  2C800002  cmpi cr1,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFED8938:  418600BC  bc 12,6,0xFED89F4
+	   5: Js06o       	$0xFED89F4
+
+
+
+. 2500 FED8934 8
+. 2C 80 00 02 41 86 00 BC
+==== BB 2501 (0xFED893C) approx BBs exec'd 0 ====
+
+	0xFED893C:  2F000003  cmpi cr6,r0,3
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x3, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFED8940:  419A008C  bc 12,26,0xFED89CC
+	   5: Js26o       	$0xFED89CC
+
+
+
+. 2501 FED893C 8
+. 2F 00 00 03 41 9A 00 8C
+==== BB 2502 (0xFED89CC) approx BBs exec'd 0 ====
+
+	0xFED89CC:  81640000  lwz r11,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFED89D0:  3863FFF8  addi r3,r3,-8
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFF8, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED89D4:  81440004  lwz r10,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0xFED89D8:  38A50001  addi r5,r5,1
+	  13: GETL       	R5, t10
+	  14: ADDL       	$0x1, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFED89DC:  4BFFFF90  b 0xFED896C
+	  17: JMPo       	$0xFED896C  ($4)
+
+
+
+. 2502 FED89CC 20
+. 81 64 00 00 38 63 FF F8 81 44 00 04 38 A5 00 01 4B FF FF 90
+==== BB 2503 (0xFED896C) approx BBs exec'd 0 ====
+
+	0xFED896C:  7D6C4030  slw r12,r11,r8
+	   0: GETL       	R11, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFED8970:  7D4B3C30  srw r11,r10,r7
+	   5: GETL       	R10, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFED8974:  7D865B78  or r6,r12,r11
+	  10: GETL       	R12, t8
+	  11: GETL       	R11, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R6
+	  14: INCEIPL       	$4
+
+	0xFED8978:  81640008  lwz r11,8(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x8, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R11
+	  19: INCEIPL       	$4
+
+	0xFED897C:  90C30008  stw r6,8(r3)
+	  20: GETL       	R6, t16
+	  21: GETL       	R3, t18
+	  22: ADDL       	$0x8, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFED8980:  34A5FFFC  addic. r5,r5,-4
+	  25: GETL       	R5, t20
+	  26: ADCL       	$0xFFFFFFFC, t20  (-wCa)
+	  27: PUTL       	t20, R5
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFED8984:  7D464030  slw r6,r10,r8
+	  31: GETL       	R10, t26
+	  32: GETL       	R8, t24
+	  33: SHLL       	t24, t26
+	  34: PUTL       	t26, R6
+	  35: INCEIPL       	$4
+
+	0xFED8988:  7D693C30  srw r9,r11,r7
+	  36: GETL       	R11, t30
+	  37: GETL       	R7, t28
+	  38: SHRL       	t28, t30
+	  39: PUTL       	t30, R9
+	  40: INCEIPL       	$4
+
+	0xFED898C:  8144000C  lwz r10,12(r4)
+	  41: GETL       	R4, t32
+	  42: ADDL       	$0xC, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R10
+	  45: INCEIPL       	$4
+
+	0xFED8990:  7CC04B78  or r0,r6,r9
+	  46: GETL       	R6, t36
+	  47: GETL       	R9, t38
+	  48: ORL       	t38, t36
+	  49: PUTL       	t36, R0
+	  50: INCEIPL       	$4
+
+	0xFED8994:  38840010  addi r4,r4,16
+	  51: GETL       	R4, t40
+	  52: ADDL       	$0x10, t40
+	  53: PUTL       	t40, R4
+	  54: INCEIPL       	$4
+
+	0xFED8998:  9003000C  stw r0,12(r3)
+	  55: GETL       	R0, t42
+	  56: GETL       	R3, t44
+	  57: ADDL       	$0xC, t44
+	  58: STL       	t42, (t44)
+	  59: INCEIPL       	$4
+
+	0xFED899C:  38630010  addi r3,r3,16
+	  60: GETL       	R3, t46
+	  61: ADDL       	$0x10, t46
+	  62: PUTL       	t46, R3
+	  63: INCEIPL       	$4
+
+	0xFED89A0:  40A2FFA4  bc 5,2,0xFED8944
+	  64: Jc02o       	$0xFED8944
+
+
+
+. 2503 FED896C 56
+. 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+==== BB 2504 (0xFED8944) approx BBs exec'd 0 ====
+
+	0xFED8944:  7D664030  slw r6,r11,r8
+	   0: GETL       	R11, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED8948:  7D493C30  srw r9,r10,r7
+	   5: GETL       	R10, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFED894C:  7CC04B78  or r0,r6,r9
+	  10: GETL       	R6, t8
+	  11: GETL       	R9, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R0
+	  14: INCEIPL       	$4
+
+	0xFED8950:  81640000  lwz r11,0(r4)
+	  15: GETL       	R4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0xFED8954:  90030000  stw r0,0(r3)
+	  19: GETL       	R0, t16
+	  20: GETL       	R3, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0xFED8958:  7D404030  slw r0,r10,r8
+	  23: GETL       	R10, t22
+	  24: GETL       	R8, t20
+	  25: SHLL       	t20, t22
+	  26: PUTL       	t22, R0
+	  27: INCEIPL       	$4
+
+	0xFED895C:  7D6A3C30  srw r10,r11,r7
+	  28: GETL       	R11, t26
+	  29: GETL       	R7, t24
+	  30: SHRL       	t24, t26
+	  31: PUTL       	t26, R10
+	  32: INCEIPL       	$4
+
+	0xFED8960:  7C0C5378  or r12,r0,r10
+	  33: GETL       	R0, t28
+	  34: GETL       	R10, t30
+	  35: ORL       	t30, t28
+	  36: PUTL       	t28, R12
+	  37: INCEIPL       	$4
+
+	0xFED8964:  81440004  lwz r10,4(r4)
+	  38: GETL       	R4, t32
+	  39: ADDL       	$0x4, t32
+	  40: LDL       	(t32), t34
+	  41: PUTL       	t34, R10
+	  42: INCEIPL       	$4
+
+	0xFED8968:  91830004  stw r12,4(r3)
+	  43: GETL       	R12, t36
+	  44: GETL       	R3, t38
+	  45: ADDL       	$0x4, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0xFED896C:  7D6C4030  slw r12,r11,r8
+	  48: GETL       	R11, t42
+	  49: GETL       	R8, t40
+	  50: SHLL       	t40, t42
+	  51: PUTL       	t42, R12
+	  52: INCEIPL       	$4
+
+	0xFED8970:  7D4B3C30  srw r11,r10,r7
+	  53: GETL       	R10, t46
+	  54: GETL       	R7, t44
+	  55: SHRL       	t44, t46
+	  56: PUTL       	t46, R11
+	  57: INCEIPL       	$4
+
+	0xFED8974:  7D865B78  or r6,r12,r11
+	  58: GETL       	R12, t48
+	  59: GETL       	R11, t50
+	  60: ORL       	t50, t48
+	  61: PUTL       	t48, R6
+	  62: INCEIPL       	$4
+
+	0xFED8978:  81640008  lwz r11,8(r4)
+	  63: GETL       	R4, t52
+	  64: ADDL       	$0x8, t52
+	  65: LDL       	(t52), t54
+	  66: PUTL       	t54, R11
+	  67: INCEIPL       	$4
+
+	0xFED897C:  90C30008  stw r6,8(r3)
+	  68: GETL       	R6, t56
+	  69: GETL       	R3, t58
+	  70: ADDL       	$0x8, t58
+	  71: STL       	t56, (t58)
+	  72: INCEIPL       	$4
+
+	0xFED8980:  34A5FFFC  addic. r5,r5,-4
+	  73: GETL       	R5, t60
+	  74: ADCL       	$0xFFFFFFFC, t60  (-wCa)
+	  75: PUTL       	t60, R5
+	  76: CMP0L       	t60, t62  (-rSo)
+	  77: ICRFL       	t62, $0x0, CR
+	  78: INCEIPL       	$4
+
+	0xFED8984:  7D464030  slw r6,r10,r8
+	  79: GETL       	R10, t66
+	  80: GETL       	R8, t64
+	  81: SHLL       	t64, t66
+	  82: PUTL       	t66, R6
+	  83: INCEIPL       	$4
+
+	0xFED8988:  7D693C30  srw r9,r11,r7
+	  84: GETL       	R11, t70
+	  85: GETL       	R7, t68
+	  86: SHRL       	t68, t70
+	  87: PUTL       	t70, R9
+	  88: INCEIPL       	$4
+
+	0xFED898C:  8144000C  lwz r10,12(r4)
+	  89: GETL       	R4, t72
+	  90: ADDL       	$0xC, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R10
+	  93: INCEIPL       	$4
+
+	0xFED8990:  7CC04B78  or r0,r6,r9
+	  94: GETL       	R6, t76
+	  95: GETL       	R9, t78
+	  96: ORL       	t78, t76
+	  97: PUTL       	t76, R0
+	  98: INCEIPL       	$4
+
+	0xFED8994:  38840010  addi r4,r4,16
+	  99: GETL       	R4, t80
+	 100: ADDL       	$0x10, t80
+	 101: PUTL       	t80, R4
+	 102: INCEIPL       	$4
+
+	0xFED8998:  9003000C  stw r0,12(r3)
+	 103: GETL       	R0, t82
+	 104: GETL       	R3, t84
+	 105: ADDL       	$0xC, t84
+	 106: STL       	t82, (t84)
+	 107: INCEIPL       	$4
+
+	0xFED899C:  38630010  addi r3,r3,16
+	 108: GETL       	R3, t86
+	 109: ADDL       	$0x10, t86
+	 110: PUTL       	t86, R3
+	 111: INCEIPL       	$4
+
+	0xFED89A0:  40A2FFA4  bc 5,2,0xFED8944
+	 112: Jc02o       	$0xFED8944
+
+
+
+. 2504 FED8944 96
+. 7D 66 40 30 7D 49 3C 30 7C C0 4B 78 81 64 00 00 90 03 00 00 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+==== BB 2505 (0xFED89A4) approx BBs exec'd 0 ====
+
+	0xFED89A4:  7D654030  slw r5,r11,r8
+	   0: GETL       	R11, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFED89A8:  7D483C30  srw r8,r10,r7
+	   5: GETL       	R10, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFED89AC:  7CA44378  or r4,r5,r8
+	  10: GETL       	R5, t8
+	  11: GETL       	R8, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0xFED89B0:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x10, t12
+	  17: PUTL       	t12, R1
+	  18: INCEIPL       	$4
+
+	0xFED89B4:  90830000  stw r4,0(r3)
+	  19: GETL       	R4, t14
+	  20: GETL       	R3, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFED89B8:  4E800020  blr
+	  23: GETL       	LR, t18
+	  24: JMPo-r       	t18  ($4)
+
+
+
+. 2505 FED89A4 24
+. 7D 65 40 30 7D 48 3C 30 7C A4 43 78 38 21 00 10 90 83 00 00 4E 80 00 20
+==== BB 2506 (0xFED87B8) approx BBs exec'd 0 ====
+
+	0xFED87B8:  4BFFFF98  b 0xFED8750
+	   0: JMPo       	$0xFED8750  ($4)
+
+
+
+. 2506 FED87B8 4
+. 4B FF FF 98
+==== BB 2507 (0xFED7840) approx BBs exec'd 0 ====
+
+	0xFED7840:  38210010  addi r1,r1,16
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R1
+	   3: INCEIPL       	$4
+
+	0xFED7844:  4E800020  blr
+	   4: GETL       	LR, t2
+	   5: JMPo-r       	t2  ($4)
+
+
+
+. 2507 FED7840 8
+. 38 21 00 10 4E 80 00 20
+==== BB 2508 (0xFED7860) approx BBs exec'd 0 ====
+
+	0xFED7860:  3863FFFD  addi r3,r3,-3
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFD, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFED7864:  38210010  addi r1,r1,16
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x10, t2
+	   6: PUTL       	t2, R1
+	   7: INCEIPL       	$4
+
+	0xFED7868:  4E800020  blr
+	   8: GETL       	LR, t4
+	   9: JMPo-r       	t4  ($4)
+
+
+
+. 2508 FED7860 12
+. 38 63 FF FD 38 21 00 10 4E 80 00 20
+==== BB 2509 (0xFED89E0) approx BBs exec'd 0 ====
+
+	0xFED89E0:  81640000  lwz r11,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFED89E4:  38A5FFFF  addi r5,r5,-1
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFED89E8:  81440004  lwz r10,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0xFED89EC:  38840008  addi r4,r4,8
+	  13: GETL       	R4, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0xFED89F0:  4BFFFF54  b 0xFED8944
+	  17: JMPo       	$0xFED8944  ($4)
+
+
+
+. 2509 FED89E0 20
+. 81 64 00 00 38 A5 FF FF 81 44 00 04 38 84 00 08 4B FF FF 54
+==== BB 2510 (0xFED7804) approx BBs exec'd 0 ====
+
+	0xFED7804:  88C90003  lbz r6,3(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x3, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED7808:  7F862000  cmp cr7,r6,r4
+	   5: GETL       	R6, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFED780C:  419E003C  bc 12,30,0xFED7848
+	  10: Js30o       	$0xFED7848
+
+
+
+. 2510 FED7804 12
+. 88 C9 00 03 7F 86 20 00 41 9E 00 3C
+==== BB 2511 (0xFED7848) approx BBs exec'd 0 ====
+
+	0xFED7848:  3863FFFF  addi r3,r3,-1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFED784C:  38210010  addi r1,r1,16
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x10, t2
+	   6: PUTL       	t2, R1
+	   7: INCEIPL       	$4
+
+	0xFED7850:  4E800020  blr
+	   8: GETL       	LR, t4
+	   9: JMPo-r       	t4  ($4)
+
+
+
+. 2511 FED7848 12
+. 38 63 FF FF 38 21 00 10 4E 80 00 20
+==== BB 2512 (0xFED89BC) approx BBs exec'd 0 ====
+
+	0xFED89BC:  81440000  lwz r10,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFED89C0:  3863FFFC  addi r3,r3,-4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED89C4:  85640004  lwzu r11,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: PUTL       	t6, R4
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0xFED89C8:  4BFFFF90  b 0xFED8958
+	  14: JMPo       	$0xFED8958  ($4)
+
+
+
+. 2512 FED89BC 16
+. 81 44 00 00 38 63 FF FC 85 64 00 04 4B FF FF 90
+==== BB 2513 (0xFED8958) approx BBs exec'd 0 ====
+
+	0xFED8958:  7D404030  slw r0,r10,r8
+	   0: GETL       	R10, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED895C:  7D6A3C30  srw r10,r11,r7
+	   5: GETL       	R11, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0xFED8960:  7C0C5378  or r12,r0,r10
+	  10: GETL       	R0, t8
+	  11: GETL       	R10, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0xFED8964:  81440004  lwz r10,4(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R10
+	  19: INCEIPL       	$4
+
+	0xFED8968:  91830004  stw r12,4(r3)
+	  20: GETL       	R12, t16
+	  21: GETL       	R3, t18
+	  22: ADDL       	$0x4, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFED896C:  7D6C4030  slw r12,r11,r8
+	  25: GETL       	R11, t22
+	  26: GETL       	R8, t20
+	  27: SHLL       	t20, t22
+	  28: PUTL       	t22, R12
+	  29: INCEIPL       	$4
+
+	0xFED8970:  7D4B3C30  srw r11,r10,r7
+	  30: GETL       	R10, t26
+	  31: GETL       	R7, t24
+	  32: SHRL       	t24, t26
+	  33: PUTL       	t26, R11
+	  34: INCEIPL       	$4
+
+	0xFED8974:  7D865B78  or r6,r12,r11
+	  35: GETL       	R12, t28
+	  36: GETL       	R11, t30
+	  37: ORL       	t30, t28
+	  38: PUTL       	t28, R6
+	  39: INCEIPL       	$4
+
+	0xFED8978:  81640008  lwz r11,8(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0x8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R11
+	  44: INCEIPL       	$4
+
+	0xFED897C:  90C30008  stw r6,8(r3)
+	  45: GETL       	R6, t36
+	  46: GETL       	R3, t38
+	  47: ADDL       	$0x8, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0xFED8980:  34A5FFFC  addic. r5,r5,-4
+	  50: GETL       	R5, t40
+	  51: ADCL       	$0xFFFFFFFC, t40  (-wCa)
+	  52: PUTL       	t40, R5
+	  53: CMP0L       	t40, t42  (-rSo)
+	  54: ICRFL       	t42, $0x0, CR
+	  55: INCEIPL       	$4
+
+	0xFED8984:  7D464030  slw r6,r10,r8
+	  56: GETL       	R10, t46
+	  57: GETL       	R8, t44
+	  58: SHLL       	t44, t46
+	  59: PUTL       	t46, R6
+	  60: INCEIPL       	$4
+
+	0xFED8988:  7D693C30  srw r9,r11,r7
+	  61: GETL       	R11, t50
+	  62: GETL       	R7, t48
+	  63: SHRL       	t48, t50
+	  64: PUTL       	t50, R9
+	  65: INCEIPL       	$4
+
+	0xFED898C:  8144000C  lwz r10,12(r4)
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0xC, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R10
+	  70: INCEIPL       	$4
+
+	0xFED8990:  7CC04B78  or r0,r6,r9
+	  71: GETL       	R6, t56
+	  72: GETL       	R9, t58
+	  73: ORL       	t58, t56
+	  74: PUTL       	t56, R0
+	  75: INCEIPL       	$4
+
+	0xFED8994:  38840010  addi r4,r4,16
+	  76: GETL       	R4, t60
+	  77: ADDL       	$0x10, t60
+	  78: PUTL       	t60, R4
+	  79: INCEIPL       	$4
+
+	0xFED8998:  9003000C  stw r0,12(r3)
+	  80: GETL       	R0, t62
+	  81: GETL       	R3, t64
+	  82: ADDL       	$0xC, t64
+	  83: STL       	t62, (t64)
+	  84: INCEIPL       	$4
+
+	0xFED899C:  38630010  addi r3,r3,16
+	  85: GETL       	R3, t66
+	  86: ADDL       	$0x10, t66
+	  87: PUTL       	t66, R3
+	  88: INCEIPL       	$4
+
+	0xFED89A0:  40A2FFA4  bc 5,2,0xFED8944
+	  89: Jc02o       	$0xFED8944
+
+
+
+. 2513 FED8958 76
+. 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+==== BB 2514 (0xFED888C) approx BBs exec'd 0 ====
+
+	0xFED888C:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFED8890:  3863FFE8  addi r3,r3,-24
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFE8, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED8894:  3884FFEC  addi r4,r4,-20
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0xFFFFFFEC, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0xFED8898:  38A50005  addi r5,r5,5
+	  12: GETL       	R5, t8
+	  13: ADDL       	$0x5, t8
+	  14: PUTL       	t8, R5
+	  15: INCEIPL       	$4
+
+	0xFED889C:  4BFFFF98  b 0xFED8834
+	  16: JMPo       	$0xFED8834  ($4)
+
+
+
+. 2514 FED888C 20
+. 81 24 00 00 38 63 FF E8 38 84 FF EC 38 A5 00 05 4B FF FF 98
+==== BB 2515 (0xFE8D0E0) approx BBs exec'd 0 ====
+
+	0xFE8D0E0:  38630001  addi r3,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE8D0E4:  82680000  lwz r19,0(r8)
+	   4: GETL       	R8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R19
+	   7: INCEIPL       	$4
+
+	0xFE8D0E8:  2C930000  cmpi cr1,r19,0
+	   8: GETL       	R19, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE8D0EC:  41860380  bc 12,6,0xFE8D46C
+	  12: Js06o       	$0xFE8D46C
+
+
+
+. 2515 FE8D0E0 16
+. 38 63 00 01 82 68 00 00 2C 93 00 00 41 86 03 80
+==== BB 2516 (0xFED89F4) approx BBs exec'd 0 ====
+
+	0xFED89F4:  81440000  lwz r10,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFED89F8:  3863FFF4  addi r3,r3,-12
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFF4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED89FC:  81640004  lwz r11,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0xFED8A00:  38A50002  addi r5,r5,2
+	  13: GETL       	R5, t10
+	  14: ADDL       	$0x2, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFED8A04:  3884FFFC  addi r4,r4,-4
+	  17: GETL       	R4, t12
+	  18: ADDL       	$0xFFFFFFFC, t12
+	  19: PUTL       	t12, R4
+	  20: INCEIPL       	$4
+
+	0xFED8A08:  4BFFFF78  b 0xFED8980
+	  21: JMPo       	$0xFED8980  ($4)
+
+
+
+. 2516 FED89F4 24
+. 81 44 00 00 38 63 FF F4 81 64 00 04 38 A5 00 02 38 84 FF FC 4B FF FF 78
+==== BB 2517 (0xFED8980) approx BBs exec'd 0 ====
+
+	0xFED8980:  34A5FFFC  addic. r5,r5,-4
+	   0: GETL       	R5, t0
+	   1: ADCL       	$0xFFFFFFFC, t0  (-wCa)
+	   2: PUTL       	t0, R5
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED8984:  7D464030  slw r6,r10,r8
+	   6: GETL       	R10, t6
+	   7: GETL       	R8, t4
+	   8: SHLL       	t4, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0xFED8988:  7D693C30  srw r9,r11,r7
+	  11: GETL       	R11, t10
+	  12: GETL       	R7, t8
+	  13: SHRL       	t8, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0xFED898C:  8144000C  lwz r10,12(r4)
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0xC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R10
+	  20: INCEIPL       	$4
+
+	0xFED8990:  7CC04B78  or r0,r6,r9
+	  21: GETL       	R6, t16
+	  22: GETL       	R9, t18
+	  23: ORL       	t18, t16
+	  24: PUTL       	t16, R0
+	  25: INCEIPL       	$4
+
+	0xFED8994:  38840010  addi r4,r4,16
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x10, t20
+	  28: PUTL       	t20, R4
+	  29: INCEIPL       	$4
+
+	0xFED8998:  9003000C  stw r0,12(r3)
+	  30: GETL       	R0, t22
+	  31: GETL       	R3, t24
+	  32: ADDL       	$0xC, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0xFED899C:  38630010  addi r3,r3,16
+	  35: GETL       	R3, t26
+	  36: ADDL       	$0x10, t26
+	  37: PUTL       	t26, R3
+	  38: INCEIPL       	$4
+
+	0xFED89A0:  40A2FFA4  bc 5,2,0xFED8944
+	  39: Jc02o       	$0xFED8944
+
+
+
+. 2517 FED8980 36
+. 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+==== BB 2518 (0xFE8D140) approx BBs exec'd 0 ====
+
+	0xFE8D140:  89430000  lbz r10,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE8D144:  2C8A0000  cmpi cr1,r10,0
+	   4: GETL       	R10, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFE8D148:  4186003C  bc 12,6,0xFE8D184
+	   8: Js06o       	$0xFE8D184
+
+
+
+. 2518 FE8D140 12
+. 89 43 00 00 2C 8A 00 00 41 86 00 3C
+==== BB 2519 (0xFE8D14C) approx BBs exec'd 0 ====
+
+	0xFE8D14C:  83A80000  lwz r29,0(r8)
+	   0: GETL       	R8, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0xFE8D150:  2C1D0000  cmpi cr0,r29,0
+	   4: GETL       	R29, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE8D154:  4082FFD4  bc 4,2,0xFE8D128
+	   8: Jc02o       	$0xFE8D128
+
+
+
+. 2519 FE8D14C 12
+. 83 A8 00 00 2C 1D 00 00 40 82 FF D4
+==== BB 2520 (0xFE8D128) approx BBs exec'd 0 ====
+
+	0xFE8D128:  80C80000  lwz r6,0(r8)
+	   0: GETL       	R8, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFE8D12C:  5547083C  rlwinm r7,r10,1,0,30
+	   4: GETL       	R10, t4
+	   5: SHLL       	$0x1, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0xFE8D130:  7C07322E  lhzx r0,r7,r6
+	   8: GETL       	R6, t6
+	   9: GETL       	R7, t8
+	  10: ADDL       	t8, t6
+	  11: LDW       	(t6), t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0xFE8D134:  700B0020  andi. r11,r0,0x20
+	  14: GETL       	R0, t12
+	  15: ANDL       	$0x20, t12
+	  16: PUTL       	t12, R11
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFE8D138:  4082004C  bc 4,2,0xFE8D184
+	  20: Jc02o       	$0xFE8D184
+
+
+
+. 2520 FE8D128 20
+. 80 C8 00 00 55 47 08 3C 7C 07 32 2E 70 0B 00 20 40 82 00 4C
+==== BB 2521 (0xFE8D13C) approx BBs exec'd 0 ====
+
+	0xFE8D13C:  38630001  addi r3,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE8D140:  89430000  lbz r10,0(r3)
+	   4: GETL       	R3, t2
+	   5: LDB       	(t2), t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFE8D144:  2C8A0000  cmpi cr1,r10,0
+	   8: GETL       	R10, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE8D148:  4186003C  bc 12,6,0xFE8D184
+	  12: Js06o       	$0xFE8D184
+
+
+
+. 2521 FE8D13C 16
+. 38 63 00 01 89 43 00 00 2C 8A 00 00 41 86 00 3C
+==== BB 2522 (0xFE8D184) approx BBs exec'd 0 ====
+
+	0xFE8D184:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D188:  419A0010  bc 12,26,0xFE8D198
+	   4: Js26o       	$0xFE8D198
+
+
+
+. 2522 FE8D184 8
+. 2F 0A 00 00 41 9A 00 10
+==== BB 2523 (0xFE8D18C) approx BBs exec'd 0 ====
+
+	0xFE8D18C:  39000000  li r8,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0xFE8D190:  99030000  stb r8,0(r3)
+	   3: GETL       	R8, t2
+	   4: GETL       	R3, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFE8D194:  38630001  addi r3,r3,1
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFE8D198:  7D121214  add r8,r18,r2
+	  11: GETL       	R18, t8
+	  12: GETL       	R2, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R8
+	  15: INCEIPL       	$4
+
+	0xFE8D19C:  48000020  b 0xFE8D1BC
+	  16: JMPo       	$0xFE8D1BC  ($4)
+
+
+
+. 2523 FE8D18C 20
+. 39 00 00 00 99 03 00 00 38 63 00 01 7D 12 12 14 48 00 00 20
+==== BB 2524 (0xFE8D1BC) approx BBs exec'd 0 ====
+
+	0xFE8D1BC:  81880000  lwz r12,0(r8)
+	   0: GETL       	R8, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R12
+	   3: INCEIPL       	$4
+
+	0xFE8D1C0:  2F8C0000  cmpi cr7,r12,0
+	   4: GETL       	R12, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFE8D1C4:  409EFFDC  bc 4,30,0xFE8D1A0
+	   8: Jc30o       	$0xFE8D1A0
+
+
+
+. 2524 FE8D1BC 12
+. 81 88 00 00 2F 8C 00 00 40 9E FF DC
+==== BB 2525 (0xFE8D1A0) approx BBs exec'd 0 ====
+
+	0xFE8D1A0:  89430000  lbz r10,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE8D1A4:  83280000  lwz r25,0(r8)
+	   4: GETL       	R8, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R25
+	   7: INCEIPL       	$4
+
+	0xFE8D1A8:  555C083C  rlwinm r28,r10,1,0,30
+	   8: GETL       	R10, t8
+	   9: SHLL       	$0x1, t8
+	  10: PUTL       	t8, R28
+	  11: INCEIPL       	$4
+
+	0xFE8D1AC:  7F1CCA2E  lhzx r24,r28,r25
+	  12: GETL       	R25, t10
+	  13: GETL       	R28, t12
+	  14: ADDL       	t12, t10
+	  15: LDW       	(t10), t14
+	  16: PUTL       	t14, R24
+	  17: INCEIPL       	$4
+
+	0xFE8D1B0:  73090020  andi. r9,r24,0x20
+	  18: GETL       	R24, t16
+	  19: ANDL       	$0x20, t16
+	  20: PUTL       	t16, R9
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0xFE8D1B4:  41820044  bc 12,2,0xFE8D1F8
+	  24: Js02o       	$0xFE8D1F8
+
+
+
+. 2525 FE8D1A0 24
+. 89 43 00 00 83 28 00 00 55 5C 08 3C 7F 1C CA 2E 73 09 00 20 41 82 00 44
+==== BB 2526 (0xFE8D1B8) approx BBs exec'd 0 ====
+
+	0xFE8D1B8:  38630001  addi r3,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE8D1BC:  81880000  lwz r12,0(r8)
+	   4: GETL       	R8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0xFE8D1C0:  2F8C0000  cmpi cr7,r12,0
+	   8: GETL       	R12, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFE8D1C4:  409EFFDC  bc 4,30,0xFE8D1A0
+	  12: Jc30o       	$0xFE8D1A0
+
+
+
+. 2526 FE8D1B8 16
+. 38 63 00 01 81 88 00 00 2F 8C 00 00 40 9E FF DC
+==== BB 2527 (0xFE8D1F8) approx BBs exec'd 0 ====
+
+	0xFE8D1F8:  2C8A0000  cmpi cr1,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D1FC:  7C751B78  or r21,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R21
+	   6: INCEIPL       	$4
+
+	0xFE8D200:  39030001  addi r8,r3,1
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0xFE8D204:  7CF21214  add r7,r18,r2
+	  11: GETL       	R18, t8
+	  12: GETL       	R2, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R7
+	  15: INCEIPL       	$4
+
+	0xFE8D208:  40860020  bc 4,6,0xFE8D228
+	  16: Jc06o       	$0xFE8D228
+
+
+
+. 2527 FE8D1F8 20
+. 2C 8A 00 00 7C 75 1B 78 39 03 00 01 7C F2 12 14 40 86 00 20
+==== BB 2528 (0xFE8D228) approx BBs exec'd 0 ====
+
+	0xFE8D228:  89480000  lbz r10,0(r8)
+	   0: GETL       	R8, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE8D22C:  2F0A0000  cmpi cr6,r10,0
+	   4: GETL       	R10, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0xFE8D230:  419A0040  bc 12,26,0xFE8D270
+	   8: Js26o       	$0xFE8D270
+
+
+
+. 2528 FE8D228 12
+. 89 48 00 00 2F 0A 00 00 41 9A 00 40
+==== BB 2529 (0xFE8D234) approx BBs exec'd 0 ====
+
+	0xFE8D234:  80670000  lwz r3,0(r7)
+	   0: GETL       	R7, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8D238:  2F830000  cmpi cr7,r3,0
+	   4: GETL       	R3, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFE8D23C:  409EFFD4  bc 4,30,0xFE8D210
+	   8: Jc30o       	$0xFE8D210
+
+
+
+. 2529 FE8D234 12
+. 80 67 00 00 2F 83 00 00 40 9E FF D4
+==== BB 2530 (0xFE8D210) approx BBs exec'd 0 ====
+
+	0xFE8D210:  80C70000  lwz r6,0(r7)
+	   0: GETL       	R7, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFE8D214:  554C083C  rlwinm r12,r10,1,0,30
+	   4: GETL       	R10, t4
+	   5: SHLL       	$0x1, t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0xFE8D218:  7C0C322E  lhzx r0,r12,r6
+	   8: GETL       	R6, t6
+	   9: GETL       	R12, t8
+	  10: ADDL       	t8, t6
+	  11: LDW       	(t6), t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0xFE8D21C:  70090020  andi. r9,r0,0x20
+	  14: GETL       	R0, t12
+	  15: ANDL       	$0x20, t12
+	  16: PUTL       	t12, R9
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFE8D220:  40820050  bc 4,2,0xFE8D270
+	  20: Jc02o       	$0xFE8D270
+
+
+
+. 2530 FE8D210 20
+. 80 C7 00 00 55 4C 08 3C 7C 0C 32 2E 70 09 00 20 40 82 00 50
+==== BB 2531 (0xFE8D224) approx BBs exec'd 0 ====
+
+	0xFE8D224:  39080001  addi r8,r8,1
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0xFE8D228:  89480000  lbz r10,0(r8)
+	   4: GETL       	R8, t2
+	   5: LDB       	(t2), t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFE8D22C:  2F0A0000  cmpi cr6,r10,0
+	   8: GETL       	R10, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFE8D230:  419A0040  bc 12,26,0xFE8D270
+	  12: Js26o       	$0xFE8D270
+
+
+
+. 2531 FE8D224 16
+. 39 08 00 01 89 48 00 00 2F 0A 00 00 41 9A 00 40
+==== BB 2532 (0xFE8D270) approx BBs exec'd 0 ====
+
+	0xFE8D270:  2C8A000A  cmpi cr1,r10,10
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0xA, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D274:  418602AC  bc 12,6,0xFE8D520
+	   5: Js06o       	$0xFE8D520
+
+
+
+. 2532 FE8D270 8
+. 2C 8A 00 0A 41 86 02 AC
+==== BB 2533 (0xFE8D520) approx BBs exec'd 0 ====
+
+	0xFE8D520:  38E00000  li r7,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFE8D524:  99480001  stb r10,1(r8)
+	   3: GETL       	R10, t2
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0x1, t4
+	   6: STB       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFE8D528:  98E80000  stb r7,0(r8)
+	   8: GETL       	R7, t6
+	   9: GETL       	R8, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8D52C:  4BFFFD5C  b 0xFE8D288
+	  12: JMPo       	$0xFE8D288  ($4)
+
+
+
+. 2533 FE8D520 16
+. 38 E0 00 00 99 48 00 01 98 E8 00 00 4B FF FD 5C
+==== BB 2534 (0xFE8D288) approx BBs exec'd 0 ====
+
+	0xFE8D288:  82FE020C  lwz r23,524(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x20C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFE8D28C:  829E0210  lwz r20,528(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x210, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R20
+	   9: INCEIPL       	$4
+
+	0xFE8D290:  82770000  lwz r19,0(r23)
+	  10: GETL       	R23, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R19
+	  13: INCEIPL       	$4
+
+	0xFE8D294:  83340000  lwz r25,0(r20)
+	  14: GETL       	R20, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R25
+	  17: INCEIPL       	$4
+
+	0xFE8D298:  81130000  lwz r8,0(r19)
+	  18: GETL       	R19, t16
+	  19: LDL       	(t16), t18
+	  20: PUTL       	t18, R8
+	  21: INCEIPL       	$4
+
+	0xFE8D29C:  80990000  lwz r4,0(r25)
+	  22: GETL       	R25, t20
+	  23: LDL       	(t20), t22
+	  24: PUTL       	t22, R4
+	  25: INCEIPL       	$4
+
+	0xFE8D2A0:  7F882040  cmpl cr7,r8,r4
+	  26: GETL       	R8, t24
+	  27: GETL       	R4, t26
+	  28: CMPUL       	t24, t26, t28  (-rSo)
+	  29: ICRFL       	t28, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0xFE8D2A4:  419C0034  bc 12,28,0xFE8D2D8
+	  31: Js28o       	$0xFE8D2D8
+
+
+
+. 2534 FE8D288 32
+. 82 FE 02 0C 82 9E 02 10 82 77 00 00 83 34 00 00 81 13 00 00 80 99 00 00 7F 88 20 40 41 9C 00 34
+==== BB 2535 (0xFE8D2A8) approx BBs exec'd 0 ====
+
+	0xFE8D2A8:  2C040000  cmpi cr0,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D2AC:  549D083C  rlwinm r29,r4,1,0,30
+	   4: GETL       	R4, t4
+	   5: SHLL       	$0x1, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFE8D2B0:  40820008  bc 4,2,0xFE8D2B8
+	   8: Jc02o       	$0xFE8D2B8
+
+
+
+. 2535 FE8D2A8 12
+. 2C 04 00 00 54 9D 08 3C 40 82 00 08
+==== BB 2536 (0xFE8D2B4) approx BBs exec'd 0 ====
+
+	0xFE8D2B4:  3BA00064  li r29,100
+	   0: MOVL       	$0x64, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFE8D2B8:  839E01F4  lwz r28,500(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x1F4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFE8D2BC:  57A41838  rlwinm r4,r29,3,0,28
+	   8: GETL       	R29, t6
+	   9: SHLL       	$0x3, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0xFE8D2C0:  807C0000  lwz r3,0(r28)
+	  12: GETL       	R28, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0xFE8D2C4:  4811D055  bl 0xFFAA318
+	  16: MOVL       	$0xFE8D2C8, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0xFFAA318  ($4)
+
+
+
+. 2536 FE8D2B4 20
+. 3B A0 00 64 83 9E 01 F4 57 A4 18 38 80 7C 00 00 48 11 D0 55
+==== BB 2537 (0xFFAA318) approx BBs exec'd 0 ====
+
+	0xFFAA318:  4BF27EC8  b 0xFED21E0
+	   0: JMPo       	$0xFED21E0  ($4)
+
+
+
+. 2537 FFAA318 4
+. 4B F2 7E C8
+==== BB 2538 realloc(0xFED21E0) approx BBs exec'd 0 ====
+
+	0xFED21E0:  9421FFB0  stwu r1,-80(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFB0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED21E4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFED21E8:  480D5C69  bl 0xFFA7E50
+	   9: MOVL       	$0xFED21EC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2538 FED21E0 12
+. 94 21 FF B0 7C 08 02 A6 48 0D 5C 69
+==== BB 2539 (0xFED21EC) approx BBs exec'd 0 ====
+
+	0xFED21EC:  93C10048  stw r30,72(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED21F0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFED21F4:  9361003C  stw r27,60(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x3C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFED21F8:  90010054  stw r0,84(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x54, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFED21FC:  7C9B2378  or r27,r4,r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0xFED2200:  93810040  stw r28,64(r1)
+	  21: GETL       	R28, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x40, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFED2204:  7C7C1B78  or r28,r3,r3
+	  26: GETL       	R3, t20
+	  27: PUTL       	t20, R28
+	  28: INCEIPL       	$4
+
+	0xFED2208:  813E1AF8  lwz r9,6904(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1AF8, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0xFED220C:  92C10028  stw r22,40(r1)
+	  34: GETL       	R22, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x28, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFED2210:  80090000  lwz r0,0(r9)
+	  39: GETL       	R9, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0xFED2214:  92E1002C  stw r23,44(r1)
+	  43: GETL       	R23, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x2C, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0xFED2218:  2F800000  cmpi cr7,r0,0
+	  48: GETL       	R0, t38
+	  49: CMP0L       	t38, t40  (-rSo)
+	  50: ICRFL       	t40, $0x7, CR
+	  51: INCEIPL       	$4
+
+	0xFED221C:  93010030  stw r24,48(r1)
+	  52: GETL       	R24, t42
+	  53: GETL       	R1, t44
+	  54: ADDL       	$0x30, t44
+	  55: STL       	t42, (t44)
+	  56: INCEIPL       	$4
+
+	0xFED2220:  93210034  stw r25,52(r1)
+	  57: GETL       	R25, t46
+	  58: GETL       	R1, t48
+	  59: ADDL       	$0x34, t48
+	  60: STL       	t46, (t48)
+	  61: INCEIPL       	$4
+
+	0xFED2224:  93410038  stw r26,56(r1)
+	  62: GETL       	R26, t50
+	  63: GETL       	R1, t52
+	  64: ADDL       	$0x38, t52
+	  65: STL       	t50, (t52)
+	  66: INCEIPL       	$4
+
+	0xFED2228:  93A10044  stw r29,68(r1)
+	  67: GETL       	R29, t54
+	  68: GETL       	R1, t56
+	  69: ADDL       	$0x44, t56
+	  70: STL       	t54, (t56)
+	  71: INCEIPL       	$4
+
+	0xFED222C:  93E1004C  stw r31,76(r1)
+	  72: GETL       	R31, t58
+	  73: GETL       	R1, t60
+	  74: ADDL       	$0x4C, t60
+	  75: STL       	t58, (t60)
+	  76: INCEIPL       	$4
+
+	0xFED2230:  419E0054  bc 12,30,0xFED2284
+	  77: Js30o       	$0xFED2284
+
+
+
+. 2539 FED21EC 72
+. 93 C1 00 48 7F C8 02 A6 93 61 00 3C 90 01 00 54 7C 9B 23 78 93 81 00 40 7C 7C 1B 78 81 3E 1A F8 92 C1 00 28 80 09 00 00 92 E1 00 2C 2F 80 00 00 93 01 00 30 93 21 00 34 93 41 00 38 93 A1 00 44 93 E1 00 4C 41 9E 00 54
+==== BB 2540 (0xFED2234) approx BBs exec'd 0 ====
+
+	0xFED2234:  80C10000  lwz r6,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFED2238:  7C0903A6  mtctr r0
+	   4: GETL       	R0, t4
+	   5: PUTL       	t4, CTR
+	   6: INCEIPL       	$4
+
+	0xFED223C:  80A60004  lwz r5,4(r6)
+	   7: GETL       	R6, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0xFED2240:  4E800421  bctrl
+	  12: MOVL       	$0xFED2244, t10
+	  13: PUTL       	t10, LR
+	  14: GETL       	CTR, t12
+	  15: JMPo-c       	t12  ($4)
+
+
+
+. 2540 FED2234 16
+. 80 C1 00 00 7C 09 03 A6 80 A6 00 04 4E 80 04 21
+==== BB 2541 realloc_hook_ini(0xFED2CC0) approx BBs exec'd 0 ====
+
+	0xFED2CC0:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED2CC4:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFED2CC8:  480D5189  bl 0xFFA7E50
+	   9: MOVL       	$0xFED2CCC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2541 FED2CC0 12
+. 94 21 FF E0 7C A8 02 A6 48 0D 51 89
+==== BB 2542 (0xFED2CCC) approx BBs exec'd 0 ====
+
+	0xFED2CCC:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED2CD0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFED2CD4:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFED2CD8:  7C7D1B78  or r29,r3,r3
+	  13: GETL       	R3, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFED2CDC:  38600000  li r3,0
+	  16: MOVL       	$0x0, t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0xFED2CE0:  90A10024  stw r5,36(r1)
+	  19: GETL       	R5, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x24, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFED2CE4:  813E1AF8  lwz r9,6904(r30)
+	  24: GETL       	R30, t18
+	  25: ADDL       	$0x1AF8, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0xFED2CE8:  817E1A7C  lwz r11,6780(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1A7C, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R11
+	  33: INCEIPL       	$4
+
+	0xFED2CEC:  90690000  stw r3,0(r9)
+	  34: GETL       	R3, t26
+	  35: GETL       	R9, t28
+	  36: STL       	t26, (t28)
+	  37: INCEIPL       	$4
+
+	0xFED2CF0:  906B0000  stw r3,0(r11)
+	  38: GETL       	R3, t30
+	  39: GETL       	R11, t32
+	  40: STL       	t30, (t32)
+	  41: INCEIPL       	$4
+
+	0xFED2CF4:  93810010  stw r28,16(r1)
+	  42: GETL       	R28, t34
+	  43: GETL       	R1, t36
+	  44: ADDL       	$0x10, t36
+	  45: STL       	t34, (t36)
+	  46: INCEIPL       	$4
+
+	0xFED2CF8:  7C9C2378  or r28,r4,r4
+	  47: GETL       	R4, t38
+	  48: PUTL       	t38, R28
+	  49: INCEIPL       	$4
+
+	0xFED2CFC:  93E1001C  stw r31,28(r1)
+	  50: GETL       	R31, t40
+	  51: GETL       	R1, t42
+	  52: ADDL       	$0x1C, t42
+	  53: STL       	t40, (t42)
+	  54: INCEIPL       	$4
+
+	0xFED2D00:  4BFFF975  bl 0xFED2674
+	  55: MOVL       	$0xFED2D04, t44
+	  56: PUTL       	t44, LR
+	  57: JMPo-c       	$0xFED2674  ($4)
+
+
+
+. 2542 FED2CCC 56
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 78 38 60 00 00 90 A1 00 24 81 3E 1A F8 81 7E 1A 7C 90 69 00 00 90 6B 00 00 93 81 00 10 7C 9C 23 78 93 E1 00 1C 4B FF F9 75
+==== BB 2543 (0xFED26B4) approx BBs exec'd 0 ====
+
+	0xFED26B4:  83610054  lwz r27,84(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x54, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFED26B8:  83410038  lwz r26,56(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x38, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0xFED26BC:  7F6803A6  mtlr r27
+	  10: GETL       	R27, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFED26C0:  83810040  lwz r28,64(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x40, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFED26C4:  8361003C  lwz r27,60(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x3C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0xFED26C8:  83A10044  lwz r29,68(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x44, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R29
+	  27: INCEIPL       	$4
+
+	0xFED26CC:  83C10048  lwz r30,72(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x48, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R30
+	  32: INCEIPL       	$4
+
+	0xFED26D0:  83E1004C  lwz r31,76(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x4C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R31
+	  37: INCEIPL       	$4
+
+	0xFED26D4:  38210050  addi r1,r1,80
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x50, t30
+	  40: PUTL       	t30, R1
+	  41: INCEIPL       	$4
+
+	0xFED26D8:  4E800020  blr
+	  42: GETL       	LR, t32
+	  43: JMPo-r       	t32  ($4)
+
+
+
+. 2543 FED26B4 40
+. 83 61 00 54 83 41 00 38 7F 68 03 A6 83 81 00 40 83 61 00 3C 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+==== BB 2544 (0xFED2D04) approx BBs exec'd 0 ====
+
+	0xFED2D04:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED2D08:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED2D0C:  83C10018  lwz r30,24(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFED2D10:  7F84E378  or r4,r28,r28
+	  13: GETL       	R28, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFED2D14:  83E1001C  lwz r31,28(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x1C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0xFED2D18:  7C0803A6  mtlr r0
+	  21: GETL       	R0, t16
+	  22: PUTL       	t16, LR
+	  23: INCEIPL       	$4
+
+	0xFED2D1C:  83810010  lwz r28,16(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x10, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R28
+	  28: INCEIPL       	$4
+
+	0xFED2D20:  83A10014  lwz r29,20(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x14, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R29
+	  33: INCEIPL       	$4
+
+	0xFED2D24:  38210020  addi r1,r1,32
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x20, t26
+	  36: PUTL       	t26, R1
+	  37: INCEIPL       	$4
+
+	0xFED2D28:  4BFFF4B8  b 0xFED21E0
+	  38: JMPo       	$0xFED21E0  ($4)
+
+
+
+. 2544 FED2D04 40
+. 80 01 00 24 7F A3 EB 78 83 C1 00 18 7F 84 E3 78 83 E1 00 1C 7C 08 03 A6 83 81 00 10 83 A1 00 14 38 21 00 20 4B FF F4 B8
+==== BB 2545 (0xFED2284) approx BBs exec'd 0 ====
+
+	0xFED2284:  21240000  subfic r9,r4,0
+	   0: GETL       	R4, t0
+	   1: MOVL       	$0x0, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFED2288:  7CA92114  adde r5,r9,r4
+	   5: GETL       	R9, t4
+	   6: GETL       	R4, t6
+	   7: ADCL       	t4, t6  (-rCa-wCa)
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFED228C:  3163FFFF  addic r11,r3,-1
+	  10: GETL       	R3, t8
+	  11: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0xFED2290:  7C8B1910  subfe r4,r11,r3
+	  14: GETL       	R11, t10
+	  15: GETL       	R3, t12
+	  16: SBBL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R4
+	  18: INCEIPL       	$4
+
+	0xFED2294:  7CAB2039  and. r11,r5,r4
+	  19: GETL       	R5, t14
+	  20: GETL       	R4, t16
+	  21: ANDL       	t14, t16
+	  22: PUTL       	t16, R11
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0xFED2298:  40820270  bc 4,2,0xFED2508
+	  26: Jc02o       	$0xFED2508
+
+
+
+. 2545 FED2284 24
+. 21 24 00 00 7C A9 21 14 31 63 FF FF 7C 8B 19 10 7C AB 20 39 40 82 02 70
+==== BB 2546 (0xFED229C) approx BBs exec'd 0 ====
+
+	0xFED229C:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFED22A0:  41860248  bc 12,6,0xFED24E8
+	   4: Js06o       	$0xFED24E8
+
+
+
+. 2546 FED229C 8
+. 2C 83 00 00 41 86 02 48
+==== BB 2547 (0xFED24E8) approx BBs exec'd 0 ====
+
+	0xFED24E8:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED24EC:  4BFFF3F5  bl 0xFED18E0
+	   3: MOVL       	$0xFED24F0, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED18E0  ($4)
+
+
+
+. 2547 FED24E8 8
+. 7F 63 DB 78 4B FF F3 F5
+==== BB 2548 (0xFECF788) approx BBs exec'd 0 ====
+
+	0xFECF788:  573AD1BE  rlwinm r26,r25,26,6,31
+	   0: GETL       	R25, t0
+	   1: SHRL       	$0x6, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0xFECF78C:  2B1A0020  cmpli cr6,r26,32
+	   4: GETL       	R26, t2
+	   5: MOVL       	$0x20, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFECF790:  3B1A0038  addi r24,r26,56
+	   9: GETL       	R26, t8
+	  10: ADDL       	$0x38, t8
+	  11: PUTL       	t8, R24
+	  12: INCEIPL       	$4
+
+	0xFECF794:  4199018C  bc 12,25,0xFECF920
+	  13: Js25o       	$0xFECF920
+
+
+
+. 2548 FECF788 16
+. 57 3A D1 BE 2B 1A 00 20 3B 1A 00 38 41 99 01 8C
+==== BB 2549 (0xFECF798) approx BBs exec'd 0 ====
+
+	0xFECF798:  71400001  andi. r0,r10,0x1
+	   0: GETL       	R10, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECF79C:  418201CC  bc 12,2,0xFECF968
+	   6: Js02o       	$0xFECF968
+
+
+
+. 2549 FECF798 8
+. 71 40 00 01 41 82 01 CC
+==== BB 2550 (0xFECE53C) approx BBs exec'd 0 ====
+
+	0xFECE53C:  61630001  ori r3,r11,0x1
+	   0: GETL       	R11, t0
+	   1: ORL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFECE540:  81DE064C  lwz r14,1612(r30)
+	   4: GETL       	R30, t2
+	   5: ADDL       	$0x64C, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R14
+	   8: INCEIPL       	$4
+
+	0xFECE544:  5469F87A  rlwinm r9,r3,31,1,29
+	   9: GETL       	R3, t6
+	  10: ROLL       	$0x1F, t6
+	  11: ANDL       	$0x7FFFFFFC, t6
+	  12: PUTL       	t6, R9
+	  13: INCEIPL       	$4
+
+	0xFECE548:  821E05F0  lwz r16,1520(r30)
+	  14: GETL       	R30, t8
+	  15: ADDL       	$0x5F0, t8
+	  16: LDL       	(t8), t10
+	  17: PUTL       	t10, R16
+	  18: INCEIPL       	$4
+
+	0xFECE54C:  90780004  stw r3,4(r24)
+	  19: GETL       	R3, t12
+	  20: GETL       	R24, t14
+	  21: ADDL       	$0x4, t14
+	  22: STL       	t12, (t14)
+	  23: INCEIPL       	$4
+
+	0xFECE550:  7E89C214  add r20,r9,r24
+	  24: GETL       	R9, t16
+	  25: GETL       	R24, t18
+	  26: ADDL       	t16, t18
+	  27: PUTL       	t18, R20
+	  28: INCEIPL       	$4
+
+	0xFECE554:  3B580038  addi r26,r24,56
+	  29: GETL       	R24, t20
+	  30: ADDL       	$0x38, t20
+	  31: PUTL       	t20, R26
+	  32: INCEIPL       	$4
+
+	0xFECE558:  3AD80008  addi r22,r24,8
+	  33: GETL       	R24, t22
+	  34: ADDL       	$0x8, t22
+	  35: PUTL       	t22, R22
+	  36: INCEIPL       	$4
+
+	0xFECE55C:  7DCF7378  or r15,r14,r14
+	  37: GETL       	R14, t24
+	  38: PUTL       	t24, R15
+	  39: INCEIPL       	$4
+
+	0xFECE560:  83F60000  lwz r31,0(r22)
+	  40: GETL       	R22, t26
+	  41: LDL       	(t26), t28
+	  42: PUTL       	t28, R31
+	  43: INCEIPL       	$4
+
+	0xFECE564:  2C1F0000  cmpi cr0,r31,0
+	  44: GETL       	R31, t30
+	  45: CMP0L       	t30, t32  (-rSo)
+	  46: ICRFL       	t32, $0x0, CR
+	  47: INCEIPL       	$4
+
+	0xFECE568:  418201A0  bc 12,2,0xFECE708
+	  48: Js02o       	$0xFECE708
+
+
+
+. 2550 FECE53C 48
+. 61 63 00 01 81 DE 06 4C 54 69 F8 7A 82 1E 05 F0 90 78 00 04 7E 89 C2 14 3B 58 00 38 3A D8 00 08 7D CF 73 78 83 F6 00 00 2C 1F 00 00 41 82 01 A0
+==== BB 2551 (0xFECE708) approx BBs exec'd 0 ====
+
+	0xFECE708:  7F96A000  cmp cr7,r22,r20
+	   0: GETL       	R22, t0
+	   1: GETL       	R20, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECE70C:  3AD60004  addi r22,r22,4
+	   5: GETL       	R22, t6
+	   6: ADDL       	$0x4, t6
+	   7: PUTL       	t6, R22
+	   8: INCEIPL       	$4
+
+	0xFECE710:  409EFE50  bc 4,30,0xFECE560
+	   9: Jc30o       	$0xFECE560
+
+
+
+. 2551 FECE708 12
+. 7F 96 A0 00 3A D6 00 04 40 9E FE 50
+==== BB 2552 (0xFECE560) approx BBs exec'd 0 ====
+
+	0xFECE560:  83F60000  lwz r31,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFECE564:  2C1F0000  cmpi cr0,r31,0
+	   4: GETL       	R31, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFECE568:  418201A0  bc 12,2,0xFECE708
+	   8: Js02o       	$0xFECE708
+
+
+
+. 2552 FECE560 12
+. 83 F6 00 00 2C 1F 00 00 41 82 01 A0
+==== BB 2553 (0xFECE56C) approx BBs exec'd 0 ====
+
+	0xFECE56C:  38A00000  li r5,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFECE570:  7E138378  or r19,r16,r16
+	   3: GETL       	R16, t2
+	   4: PUTL       	t2, R19
+	   5: INCEIPL       	$4
+
+	0xFECE574:  90B60000  stw r5,0(r22)
+	   6: GETL       	R5, t4
+	   7: GETL       	R22, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECE578:  7DF27B78  or r18,r15,r15
+	  10: GETL       	R15, t8
+	  11: PUTL       	t8, R18
+	  12: INCEIPL       	$4
+
+	0xFECE57C:  7DD17378  or r17,r14,r14
+	  13: GETL       	R14, t10
+	  14: PUTL       	t10, R17
+	  15: INCEIPL       	$4
+
+	0xFECE580:  4800005C  b 0xFECE5DC
+	  16: JMPo       	$0xFECE5DC  ($4)
+
+
+
+. 2553 FECE56C 24
+. 38 A0 00 00 7E 13 83 78 90 B6 00 00 7D F2 7B 78 7D D1 73 78 48 00 00 5C
+==== BB 2554 (0xFECE5DC) approx BBs exec'd 0 ====
+
+	0xFECE5DC:  80FF0004  lwz r7,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFECE5E0:  3900FFFA  li r8,-6
+	   5: MOVL       	$0xFFFFFFFA, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0xFECE5E4:  82FF0008  lwz r23,8(r31)
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFECE5E8:  7CFC4038  and r28,r7,r8
+	  13: GETL       	R7, t10
+	  14: GETL       	R8, t12
+	  15: ANDL       	t10, t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFECE5EC:  70E80001  andi. r8,r7,0x1
+	  18: GETL       	R7, t14
+	  19: ANDL       	$0x1, t14
+	  20: PUTL       	t14, R8
+	  21: CMP0L       	t14, t16  (-rSo)
+	  22: ICRFL       	t16, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0xFECE5F0:  7FBFE214  add r29,r31,r28
+	  24: GETL       	R31, t18
+	  25: GETL       	R28, t20
+	  26: ADDL       	t18, t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFECE5F4:  80DD0004  lwz r6,4(r29)
+	  29: GETL       	R29, t22
+	  30: ADDL       	$0x4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R6
+	  33: INCEIPL       	$4
+
+	0xFECE5F8:  54DB0038  rlwinm r27,r6,0,0,28
+	  34: GETL       	R6, t26
+	  35: ANDL       	$0xFFFFFFF8, t26
+	  36: PUTL       	t26, R27
+	  37: INCEIPL       	$4
+
+	0xFECE5FC:  40820038  bc 4,2,0xFECE634
+	  38: Jc02o       	$0xFECE634
+
+
+
+. 2554 FECE5DC 36
+. 80 FF 00 04 39 00 FF FA 82 FF 00 08 7C FC 40 38 70 E8 00 01 7F BF E2 14 80 DD 00 04 54 DB 00 38 40 82 00 38
+==== BB 2555 (0xFECE634) approx BBs exec'd 0 ====
+
+	0xFECE634:  83380030  lwz r25,48(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFECE638:  7C99E800  cmp cr1,r25,r29
+	   5: GETL       	R25, t4
+	   6: GETL       	R29, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFECE63C:  418600B0  bc 12,6,0xFECE6EC
+	  10: Js06o       	$0xFECE6EC
+
+
+
+. 2555 FECE634 12
+. 83 38 00 30 7C 99 E8 00 41 86 00 B0
+==== BB 2556 (0xFECE640) approx BBs exec'd 0 ====
+
+	0xFECE640:  7C9DDA14  add r4,r29,r27
+	   0: GETL       	R29, t0
+	   1: GETL       	R27, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFECE644:  80040004  lwz r0,4(r4)
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFECE648:  700A0001  andi. r10,r0,0x1
+	  10: GETL       	R0, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R10
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECE64C:  41A2FF38  bc 13,2,0xFECE584
+	  16: Js02o       	$0xFECE584
+
+
+
+. 2556 FECE640 16
+. 7C 9D DA 14 80 04 00 04 70 0A 00 01 41 A2 FF 38
+==== BB 2557 (0xFECE650) approx BBs exec'd 0 ====
+
+	0xFECE650:  80BD0004  lwz r5,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECE654:  54A9003C  rlwinm r9,r5,0,0,30
+	   5: GETL       	R5, t4
+	   6: ANDL       	$0xFFFFFFFE, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xFECE658:  913D0004  stw r9,4(r29)
+	   9: GETL       	R9, t6
+	  10: GETL       	R29, t8
+	  11: ADDL       	$0x4, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECE65C:  4BFFFF54  b 0xFECE5B0
+	  14: JMPo       	$0xFECE5B0  ($4)
+
+
+
+. 2557 FECE650 16
+. 80 BD 00 04 54 A9 00 3C 91 3D 00 04 4B FF FF 54
+==== BB 2558 (0xFECE5B0) approx BBs exec'd 0 ====
+
+	0xFECE5B0:  2F170000  cmpi cr6,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFECE5B4:  80DA0008  lwz r6,8(r26)
+	   4: GETL       	R26, t4
+	   5: ADDL       	$0x8, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0xFECE5B8:  63870001  ori r7,r28,0x1
+	   9: GETL       	R28, t8
+	  10: ORL       	$0x1, t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFECE5BC:  93FA0008  stw r31,8(r26)
+	  13: GETL       	R31, t10
+	  14: GETL       	R26, t12
+	  15: ADDL       	$0x8, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECE5C0:  93E6000C  stw r31,12(r6)
+	  18: GETL       	R31, t14
+	  19: GETL       	R6, t16
+	  20: ADDL       	$0xC, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFECE5C4:  7F9FE12E  stwx r28,r31,r28
+	  23: GETL       	R28, t18
+	  24: GETL       	R31, t20
+	  25: ADDL       	t20, t18
+	  26: GETL       	R28, t22
+	  27: STL       	t22, (t18)
+	  28: INCEIPL       	$4
+
+	0xFECE5C8:  90FF0004  stw r7,4(r31)
+	  29: GETL       	R7, t24
+	  30: GETL       	R31, t26
+	  31: ADDL       	$0x4, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0xFECE5CC:  90DF0008  stw r6,8(r31)
+	  34: GETL       	R6, t28
+	  35: GETL       	R31, t30
+	  36: ADDL       	$0x8, t30
+	  37: STL       	t28, (t30)
+	  38: INCEIPL       	$4
+
+	0xFECE5D0:  935F000C  stw r26,12(r31)
+	  39: GETL       	R26, t32
+	  40: GETL       	R31, t34
+	  41: ADDL       	$0xC, t34
+	  42: STL       	t32, (t34)
+	  43: INCEIPL       	$4
+
+	0xFECE5D4:  7EFFBB78  or r31,r23,r23
+	  44: GETL       	R23, t36
+	  45: PUTL       	t36, R31
+	  46: INCEIPL       	$4
+
+	0xFECE5D8:  419A0130  bc 12,26,0xFECE708
+	  47: Js26o       	$0xFECE708
+
+
+
+. 2558 FECE5B0 44
+. 2F 17 00 00 80 DA 00 08 63 87 00 01 93 FA 00 08 93 E6 00 0C 7F 9F E1 2E 90 FF 00 04 90 DF 00 08 93 5F 00 0C 7E FF BB 78 41 9A 01 30
+==== BB 2559 (0xFECE714) approx BBs exec'd 0 ====
+
+	0xFECE714:  4BFFFF80  b 0xFECE694
+	   0: JMPo       	$0xFECE694  ($4)
+
+
+
+. 2559 FECE714 4
+. 4B FF FF 80
+==== BB 2560 (0xFECE694) approx BBs exec'd 0 ====
+
+	0xFECE694:  82610074  lwz r19,116(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0xFECE698:  81C10028  lwz r14,40(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x28, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R14
+	   9: INCEIPL       	$4
+
+	0xFECE69C:  7E6803A6  mtlr r19
+	  10: GETL       	R19, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFECE6A0:  81E1002C  lwz r15,44(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x2C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R15
+	  17: INCEIPL       	$4
+
+	0xFECE6A4:  82010030  lwz r16,48(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x30, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R16
+	  22: INCEIPL       	$4
+
+	0xFECE6A8:  82210034  lwz r17,52(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x34, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R17
+	  27: INCEIPL       	$4
+
+	0xFECE6AC:  82410038  lwz r18,56(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x38, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R18
+	  32: INCEIPL       	$4
+
+	0xFECE6B0:  8261003C  lwz r19,60(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x3C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R19
+	  37: INCEIPL       	$4
+
+	0xFECE6B4:  82810040  lwz r20,64(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x40, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R20
+	  42: INCEIPL       	$4
+
+	0xFECE6B8:  82A10044  lwz r21,68(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x44, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R21
+	  47: INCEIPL       	$4
+
+	0xFECE6BC:  82C10048  lwz r22,72(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x48, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R22
+	  52: INCEIPL       	$4
+
+	0xFECE6C0:  82E1004C  lwz r23,76(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x4C, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R23
+	  57: INCEIPL       	$4
+
+	0xFECE6C4:  83010050  lwz r24,80(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x50, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R24
+	  62: INCEIPL       	$4
+
+	0xFECE6C8:  83210054  lwz r25,84(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x54, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R25
+	  67: INCEIPL       	$4
+
+	0xFECE6CC:  83410058  lwz r26,88(r1)
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x58, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R26
+	  72: INCEIPL       	$4
+
+	0xFECE6D0:  8361005C  lwz r27,92(r1)
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x5C, t58
+	  75: LDL       	(t58), t60
+	  76: PUTL       	t60, R27
+	  77: INCEIPL       	$4
+
+	0xFECE6D4:  83810060  lwz r28,96(r1)
+	  78: GETL       	R1, t62
+	  79: ADDL       	$0x60, t62
+	  80: LDL       	(t62), t64
+	  81: PUTL       	t64, R28
+	  82: INCEIPL       	$4
+
+	0xFECE6D8:  83A10064  lwz r29,100(r1)
+	  83: GETL       	R1, t66
+	  84: ADDL       	$0x64, t66
+	  85: LDL       	(t66), t68
+	  86: PUTL       	t68, R29
+	  87: INCEIPL       	$4
+
+	0xFECE6DC:  83C10068  lwz r30,104(r1)
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x68, t70
+	  90: LDL       	(t70), t72
+	  91: PUTL       	t72, R30
+	  92: INCEIPL       	$4
+
+	0xFECE6E0:  83E1006C  lwz r31,108(r1)
+	  93: GETL       	R1, t74
+	  94: ADDL       	$0x6C, t74
+	  95: LDL       	(t74), t76
+	  96: PUTL       	t76, R31
+	  97: INCEIPL       	$4
+
+	0xFECE6E4:  38210070  addi r1,r1,112
+	  98: GETL       	R1, t78
+	  99: ADDL       	$0x70, t78
+	 100: PUTL       	t78, R1
+	 101: INCEIPL       	$4
+
+	0xFECE6E8:  4E800020  blr
+	 102: GETL       	LR, t80
+	 103: JMPo-r       	t80  ($4)
+
+
+
+. 2560 FECE694 88
+. 82 61 00 74 81 C1 00 28 7E 68 03 A6 81 E1 00 2C 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 2561 (0xFECF7DC) approx BBs exec'd 0 ====
+
+	0xFECF7DC:  7E559378  or r21,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R21
+	   2: INCEIPL       	$4
+
+	0xFECF7E0:  48000054  b 0xFECF834
+	   3: JMPo       	$0xFECF834  ($4)
+
+
+
+. 2561 FECF7DC 8
+. 7E 55 93 78 48 00 00 54
+==== BB 2562 (0xFECF834) approx BBs exec'd 0 ====
+
+	0xFECF834:  815F0004  lwz r10,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECF838:  83BF000C  lwz r29,12(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFECF83C:  2B8A0008  cmpli cr7,r10,8
+	  10: GETL       	R10, t8
+	  11: MOVL       	$0x8, t12
+	  12: CMPUL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFECF840:  409D0320  bc 4,29,0xFECFB60
+	  15: Jc29o       	$0xFECFB60
+
+
+
+. 2562 FECF834 16
+. 81 5F 00 04 83 BF 00 0C 2B 8A 00 08 40 9D 03 20
+==== BB 2563 (0xFECF844) approx BBs exec'd 0 ====
+
+	0xFECF844:  82FC044C  lwz r23,1100(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x44C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFECF848:  7C8AB840  cmpl cr1,r10,r23
+	   5: GETL       	R10, t4
+	   6: GETL       	R23, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFECF84C:  41850314  bc 12,5,0xFECFB60
+	  10: Js05o       	$0xFECFB60
+
+
+
+. 2563 FECF844 12
+. 82 FC 04 4C 7C 8A B8 40 41 85 03 14
+==== BB 2564 (0xFECF850) approx BBs exec'd 0 ====
+
+	0xFECF850:  2B9901FF  cmpli cr7,r25,511
+	   0: GETL       	R25, t0
+	   1: MOVL       	$0x1FF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECF854:  554A0038  rlwinm r10,r10,0,0,28
+	   5: GETL       	R10, t6
+	   6: ANDL       	$0xFFFFFFF8, t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0xFECF858:  419D000C  bc 12,29,0xFECF864
+	   9: Js29o       	$0xFECF864
+
+
+
+. 2564 FECF850 12
+. 2B 99 01 FF 55 4A 00 38 41 9D 00 0C
+==== BB 2565 (0xFECF864) approx BBs exec'd 0 ====
+
+	0xFECF864:  7F8AC800  cmp cr7,r10,r25
+	   0: GETL       	R10, t0
+	   1: GETL       	R25, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECF868:  935D0008  stw r26,8(r29)
+	   5: GETL       	R26, t6
+	   6: GETL       	R29, t8
+	   7: ADDL       	$0x8, t8
+	   8: STL       	t6, (t8)
+	   9: INCEIPL       	$4
+
+	0xFECF86C:  93BC0044  stw r29,68(r28)
+	  10: GETL       	R29, t10
+	  11: GETL       	R28, t12
+	  12: ADDL       	$0x44, t12
+	  13: STL       	t10, (t12)
+	  14: INCEIPL       	$4
+
+	0xFECF870:  419E03EC  bc 12,30,0xFECFC5C
+	  15: Js30o       	$0xFECFC5C
+
+
+
+. 2565 FECF864 16
+. 7F 8A C8 00 93 5D 00 08 93 BC 00 44 41 9E 03 EC
+==== BB 2566 (0xFECF874) approx BBs exec'd 0 ====
+
+	0xFECF874:  288A01FF  cmpli cr1,r10,511
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x1FF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECF878:  40A5FF6C  bc 5,5,0xFECF7E4
+	   5: Jc05o       	$0xFECF7E4
+
+
+
+. 2566 FECF874 8
+. 28 8A 01 FF 40 A5 FF 6C
+==== BB 2567 (0xFECF7E4) approx BBs exec'd 0 ====
+
+	0xFECF7E4:  7FAAE214  add r29,r10,r28
+	   0: GETL       	R10, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFECF7E8:  5548E8FE  rlwinm r8,r10,29,3,31
+	   5: GETL       	R10, t4
+	   6: SHRL       	$0x3, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0xFECF7EC:  393D0030  addi r9,r29,48
+	   9: GETL       	R29, t6
+	  10: ADDL       	$0x30, t6
+	  11: PUTL       	t6, R9
+	  12: INCEIPL       	$4
+
+	0xFECF7F0:  81690008  lwz r11,8(r9)
+	  13: GETL       	R9, t8
+	  14: ADDL       	$0x8, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R11
+	  17: INCEIPL       	$4
+
+	0xFECF7F4:  913F000C  stw r9,12(r31)
+	  18: GETL       	R9, t12
+	  19: GETL       	R31, t14
+	  20: ADDL       	$0xC, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0xFECF7F8:  550306FE  rlwinm r3,r8,0,27,31
+	  23: GETL       	R8, t16
+	  24: ANDL       	$0x1F, t16
+	  25: PUTL       	t16, R3
+	  26: INCEIPL       	$4
+
+	0xFECF7FC:  93EB000C  stw r31,12(r11)
+	  27: GETL       	R31, t18
+	  28: GETL       	R11, t20
+	  29: ADDL       	$0xC, t20
+	  30: STL       	t18, (t20)
+	  31: INCEIPL       	$4
+
+	0xFECF800:  3BA00001  li r29,1
+	  32: MOVL       	$0x1, t22
+	  33: PUTL       	t22, R29
+	  34: INCEIPL       	$4
+
+	0xFECF804:  917F0008  stw r11,8(r31)
+	  35: GETL       	R11, t24
+	  36: GETL       	R31, t26
+	  37: ADDL       	$0x8, t26
+	  38: STL       	t24, (t26)
+	  39: INCEIPL       	$4
+
+	0xFECF808:  7D0B2E70  srawi r11,r8,5
+	  40: GETL       	R8, t28
+	  41: SARL       	$0x5, t28  (-wCa)
+	  42: PUTL       	t28, R11
+	  43: INCEIPL       	$4
+
+	0xFECF80C:  93E90008  stw r31,8(r9)
+	  44: GETL       	R31, t30
+	  45: GETL       	R9, t32
+	  46: ADDL       	$0x8, t32
+	  47: STL       	t30, (t32)
+	  48: INCEIPL       	$4
+
+	0xFECF810:  5566103A  rlwinm r6,r11,2,0,29
+	  49: GETL       	R11, t34
+	  50: SHLL       	$0x2, t34
+	  51: PUTL       	t34, R6
+	  52: INCEIPL       	$4
+
+	0xFECF814:  83FC0044  lwz r31,68(r28)
+	  53: GETL       	R28, t36
+	  54: ADDL       	$0x44, t36
+	  55: LDL       	(t36), t38
+	  56: PUTL       	t38, R31
+	  57: INCEIPL       	$4
+
+	0xFECF818:  7F66E214  add r27,r6,r28
+	  58: GETL       	R6, t40
+	  59: GETL       	R28, t42
+	  60: ADDL       	t40, t42
+	  61: PUTL       	t42, R27
+	  62: INCEIPL       	$4
+
+	0xFECF81C:  811B0438  lwz r8,1080(r27)
+	  63: GETL       	R27, t44
+	  64: ADDL       	$0x438, t44
+	  65: LDL       	(t44), t46
+	  66: PUTL       	t46, R8
+	  67: INCEIPL       	$4
+
+	0xFECF820:  7FA41830  slw r4,r29,r3
+	  68: GETL       	R29, t50
+	  69: GETL       	R3, t48
+	  70: SHLL       	t48, t50
+	  71: PUTL       	t50, R4
+	  72: INCEIPL       	$4
+
+	0xFECF824:  7F1FD000  cmp cr6,r31,r26
+	  73: GETL       	R31, t52
+	  74: GETL       	R26, t54
+	  75: CMPL       	t52, t54, t56  (-rSo)
+	  76: ICRFL       	t56, $0x6, CR
+	  77: INCEIPL       	$4
+
+	0xFECF828:  7D0A2378  or r10,r8,r4
+	  78: GETL       	R8, t58
+	  79: GETL       	R4, t60
+	  80: ORL       	t60, t58
+	  81: PUTL       	t58, R10
+	  82: INCEIPL       	$4
+
+	0xFECF82C:  915B0438  stw r10,1080(r27)
+	  83: GETL       	R10, t62
+	  84: GETL       	R27, t64
+	  85: ADDL       	$0x438, t64
+	  86: STL       	t62, (t64)
+	  87: INCEIPL       	$4
+
+	0xFECF830:  419A0144  bc 12,26,0xFECF974
+	  88: Js26o       	$0xFECF974
+
+
+
+. 2567 FECF7E4 80
+. 7F AA E2 14 55 48 E8 FE 39 3D 00 30 81 69 00 08 91 3F 00 0C 55 03 06 FE 93 EB 00 0C 3B A0 00 01 91 7F 00 08 7D 0B 2E 70 93 E9 00 08 55 66 10 3A 83 FC 00 44 7F 66 E2 14 81 1B 04 38 7F A4 18 30 7F 1F D0 00 7D 0A 23 78 91 5B 04 38 41 9A 01 44
+==== BB 2568 (0xFECF97C) approx BBs exec'd 0 ====
+
+	0xFECF97C:  57151838  rlwinm r21,r24,3,0,28
+	   0: GETL       	R24, t0
+	   1: SHLL       	$0x3, t0
+	   2: PUTL       	t0, R21
+	   3: INCEIPL       	$4
+
+	0xFECF980:  7FF5E214  add r31,r21,r28
+	   4: GETL       	R21, t2
+	   5: GETL       	R28, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0xFECF984:  393F0030  addi r9,r31,48
+	   9: GETL       	R31, t6
+	  10: ADDL       	$0x30, t6
+	  11: PUTL       	t6, R9
+	  12: INCEIPL       	$4
+
+	0xFECF988:  83A9000C  lwz r29,12(r9)
+	  13: GETL       	R9, t8
+	  14: ADDL       	$0xC, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0xFECF98C:  7F9D4800  cmp cr7,r29,r9
+	  18: GETL       	R29, t12
+	  19: GETL       	R9, t14
+	  20: CMPL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0xFECF990:  419E0014  bc 12,30,0xFECF9A4
+	  23: Js30o       	$0xFECF9A4
+
+
+
+. 2568 FECF97C 24
+. 57 15 18 38 7F F5 E2 14 39 3F 00 30 83 A9 00 0C 7F 9D 48 00 41 9E 00 14
+==== BB 2569 (0xFED24F0) approx BBs exec'd 0 ====
+
+	0xFED24F0:  4BFFFD54  b 0xFED2244
+	   0: JMPo       	$0xFED2244  ($4)
+
+
+
+. 2569 FED24F0 4
+. 4B FF FD 54
+==== BB 2570 (0xFED2244) approx BBs exec'd 0 ====
+
+	0xFED2244:  7C691B78  or r9,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0xFED2248:  83A10054  lwz r29,84(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x54, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFED224C:  7D234B78  or r3,r9,r9
+	   8: GETL       	R9, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFED2250:  82C10028  lwz r22,40(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x28, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R22
+	  15: INCEIPL       	$4
+
+	0xFED2254:  7FA803A6  mtlr r29
+	  16: GETL       	R29, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0xFED2258:  82E1002C  lwz r23,44(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x2C, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R23
+	  23: INCEIPL       	$4
+
+	0xFED225C:  83010030  lwz r24,48(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x30, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R24
+	  28: INCEIPL       	$4
+
+	0xFED2260:  83210034  lwz r25,52(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x34, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R25
+	  33: INCEIPL       	$4
+
+	0xFED2264:  83410038  lwz r26,56(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x38, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R26
+	  38: INCEIPL       	$4
+
+	0xFED2268:  8361003C  lwz r27,60(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x3C, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R27
+	  43: INCEIPL       	$4
+
+	0xFED226C:  83810040  lwz r28,64(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x40, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R28
+	  48: INCEIPL       	$4
+
+	0xFED2270:  83A10044  lwz r29,68(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x44, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R29
+	  53: INCEIPL       	$4
+
+	0xFED2274:  83C10048  lwz r30,72(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x48, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R30
+	  58: INCEIPL       	$4
+
+	0xFED2278:  83E1004C  lwz r31,76(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x4C, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R31
+	  63: INCEIPL       	$4
+
+	0xFED227C:  38210050  addi r1,r1,80
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x50, t50
+	  66: PUTL       	t50, R1
+	  67: INCEIPL       	$4
+
+	0xFED2280:  4E800020  blr
+	  68: GETL       	LR, t52
+	  69: JMPo-r       	t52  ($4)
+
+
+
+. 2570 FED2244 64
+. 7C 69 1B 78 83 A1 00 54 7D 23 4B 78 82 C1 00 28 7F A8 03 A6 82 E1 00 2C 83 01 00 30 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+==== BB 2571 (0xFE8D2C8) approx BBs exec'd 0 ====
+
+	0xFE8D2C8:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D2CC:  4186013C  bc 12,6,0xFE8D408
+	   4: Js06o       	$0xFE8D408
+
+
+
+. 2571 FE8D2C8 8
+. 2C 83 00 00 41 86 01 3C
+==== BB 2572 (0xFE8D2D0) approx BBs exec'd 0 ====
+
+	0xFE8D2D0:  907C0000  stw r3,0(r28)
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8D2D4:  93B90000  stw r29,0(r25)
+	   4: GETL       	R29, t4
+	   5: GETL       	R25, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFE8D2D8:  7EC3B378  or r3,r22,r22
+	   8: GETL       	R22, t8
+	   9: PUTL       	t8, R3
+	  10: INCEIPL       	$4
+
+	0xFE8D2DC:  839E0214  lwz r28,532(r30)
+	  11: GETL       	R30, t10
+	  12: ADDL       	$0x214, t10
+	  13: LDL       	(t10), t12
+	  14: PUTL       	t12, R28
+	  15: INCEIPL       	$4
+
+	0xFE8D2E0:  480497F9  bl 0xFED6AD8
+	  16: MOVL       	$0xFE8D2E4, t14
+	  17: PUTL       	t14, LR
+	  18: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2572 FE8D2D0 20
+. 90 7C 00 00 93 B9 00 00 7E C3 B3 78 83 9E 02 14 48 04 97 F9
+==== BB 2573 (0xFE8D2E4) approx BBs exec'd 0 ====
+
+	0xFE8D2E4:  3B030001  addi r24,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0xFE8D2E8:  7EA3AB78  or r3,r21,r21
+	   4: GETL       	R21, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0xFE8D2EC:  480497ED  bl 0xFED6AD8
+	   7: MOVL       	$0xFE8D2F0, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2573 FE8D2E4 12
+. 3B 03 00 01 7E A3 AB 78 48 04 97 ED
+==== BB 2574 (0xFE8D2F0) approx BBs exec'd 0 ====
+
+	0xFE8D2F0:  829C0000  lwz r20,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R20
+	   3: INCEIPL       	$4
+
+	0xFE8D2F4:  817E0218  lwz r11,536(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x218, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0xFE8D2F8:  3AE30001  addi r23,r3,1
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0x1, t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFE8D2FC:  83B40000  lwz r29,0(r20)
+	  13: GETL       	R20, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0xFE8D300:  838B0000  lwz r28,0(r11)
+	  17: GETL       	R11, t14
+	  18: LDL       	(t14), t16
+	  19: PUTL       	t16, R28
+	  20: INCEIPL       	$4
+
+	0xFE8D304:  7C7DC214  add r3,r29,r24
+	  21: GETL       	R29, t18
+	  22: GETL       	R24, t20
+	  23: ADDL       	t18, t20
+	  24: PUTL       	t20, R3
+	  25: INCEIPL       	$4
+
+	0xFE8D308:  819E01F4  lwz r12,500(r30)
+	  26: GETL       	R30, t22
+	  27: ADDL       	$0x1F4, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R12
+	  30: INCEIPL       	$4
+
+	0xFE8D30C:  813C0000  lwz r9,0(r28)
+	  31: GETL       	R28, t26
+	  32: LDL       	(t26), t28
+	  33: PUTL       	t28, R9
+	  34: INCEIPL       	$4
+
+	0xFE8D310:  7F23BA14  add r25,r3,r23
+	  35: GETL       	R3, t30
+	  36: GETL       	R23, t32
+	  37: ADDL       	t30, t32
+	  38: PUTL       	t32, R25
+	  39: INCEIPL       	$4
+
+	0xFE8D314:  7F194840  cmpl cr6,r25,r9
+	  40: GETL       	R25, t34
+	  41: GETL       	R9, t36
+	  42: CMPUL       	t34, t36, t38  (-rSo)
+	  43: ICRFL       	t38, $0x6, CR
+	  44: INCEIPL       	$4
+
+	0xFE8D318:  833E01F8  lwz r25,504(r30)
+	  45: GETL       	R30, t40
+	  46: ADDL       	$0x1F8, t40
+	  47: LDL       	(t40), t42
+	  48: PUTL       	t42, R25
+	  49: INCEIPL       	$4
+
+	0xFE8D31C:  40990048  bc 4,25,0xFE8D364
+	  50: Jc25o       	$0xFE8D364
+
+
+
+. 2574 FE8D2F0 48
+. 82 9C 00 00 81 7E 02 18 3A E3 00 01 83 B4 00 00 83 8B 00 00 7C 7D C2 14 81 9E 01 F4 81 3C 00 00 7F 23 BA 14 7F 19 48 40 83 3E 01 F8 40 99 00 48
+==== BB 2575 (0xFE8D320) approx BBs exec'd 0 ====
+
+	0xFE8D320:  7C98BA14  add r4,r24,r23
+	   0: GETL       	R24, t0
+	   1: GETL       	R23, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8D324:  2B840400  cmpli cr7,r4,1024
+	   5: GETL       	R4, t4
+	   6: MOVL       	$0x400, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFE8D328:  409C0008  bc 4,28,0xFE8D330
+	  10: Jc28o       	$0xFE8D330
+
+
+
+. 2575 FE8D320 12
+. 7C 98 BA 14 2B 84 04 00 40 9C 00 08
+==== BB 2576 (0xFE8D32C) approx BBs exec'd 0 ====
+
+	0xFE8D32C:  38800400  li r4,1024
+	   0: MOVL       	$0x400, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8D330:  833E01F8  lwz r25,504(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x1F8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0xFE8D334:  7FA44A14  add r29,r4,r9
+	   8: GETL       	R4, t6
+	   9: GETL       	R9, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFE8D338:  7FA4EB78  or r4,r29,r29
+	  13: GETL       	R29, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFE8D33C:  80790000  lwz r3,0(r25)
+	  16: GETL       	R25, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0xFE8D340:  4811CFD9  bl 0xFFAA318
+	  20: MOVL       	$0xFE8D344, t16
+	  21: PUTL       	t16, LR
+	  22: JMPo-c       	$0xFFAA318  ($4)
+
+
+
+. 2576 FE8D32C 24
+. 38 80 04 00 83 3E 01 F8 7F A4 4A 14 7F A4 EB 78 80 79 00 00 48 11 CF D9
+==== BB 2577 (0xFE8D344) approx BBs exec'd 0 ====
+
+	0xFE8D344:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D348:  418200C0  bc 12,2,0xFE8D408
+	   4: Js02o       	$0xFE8D408
+
+
+
+. 2577 FE8D344 8
+. 2C 03 00 00 41 82 00 C0
+==== BB 2578 (0xFE8D34C) approx BBs exec'd 0 ====
+
+	0xFE8D34C:  81590000  lwz r10,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE8D350:  819E01F4  lwz r12,500(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1F4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0xFE8D354:  7C8A1800  cmp cr1,r10,r3
+	   9: GETL       	R10, t8
+	  10: GETL       	R3, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0xFE8D358:  4086017C  bc 4,6,0xFE8D4D4
+	  14: Jc06o       	$0xFE8D4D4
+
+
+
+. 2578 FE8D34C 16
+. 81 59 00 00 81 9E 01 F4 7C 8A 18 00 40 86 01 7C
+==== BB 2579 (0xFE8D4D4) approx BBs exec'd 0 ====
+
+	0xFE8D4D4:  80B30000  lwz r5,0(r19)
+	   0: GETL       	R19, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFE8D4D8:  38C00000  li r6,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0xFE8D4DC:  7F062840  cmpl cr6,r6,r5
+	   7: GETL       	R6, t6
+	   8: GETL       	R5, t8
+	   9: CMPUL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFE8D4E0:  40B8FE7C  bc 5,24,0xFE8D35C
+	  12: Jc24o       	$0xFE8D35C
+
+
+
+. 2579 FE8D4D4 16
+. 80 B3 00 00 38 C0 00 00 7F 06 28 40 40 B8 FE 7C
+==== BB 2580 (0xFE8D35C) approx BBs exec'd 0 ====
+
+	0xFE8D35C:  93BC0000  stw r29,0(r28)
+	   0: GETL       	R29, t0
+	   1: GETL       	R28, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8D360:  90790000  stw r3,0(r25)
+	   4: GETL       	R3, t4
+	   5: GETL       	R25, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFE8D364:  81790000  lwz r11,0(r25)
+	   8: GETL       	R25, t8
+	   9: LDL       	(t8), t10
+	  10: PUTL       	t10, R11
+	  11: INCEIPL       	$4
+
+	0xFE8D368:  7EC4B378  or r4,r22,r22
+	  12: GETL       	R22, t12
+	  13: PUTL       	t12, R4
+	  14: INCEIPL       	$4
+
+	0xFE8D36C:  81540000  lwz r10,0(r20)
+	  15: GETL       	R20, t14
+	  16: LDL       	(t14), t16
+	  17: PUTL       	t16, R10
+	  18: INCEIPL       	$4
+
+	0xFE8D370:  7F05C378  or r5,r24,r24
+	  19: GETL       	R24, t18
+	  20: PUTL       	t18, R5
+	  21: INCEIPL       	$4
+
+	0xFE8D374:  81130000  lwz r8,0(r19)
+	  22: GETL       	R19, t20
+	  23: LDL       	(t20), t22
+	  24: PUTL       	t22, R8
+	  25: INCEIPL       	$4
+
+	0xFE8D378:  39CE0001  addi r14,r14,1
+	  26: GETL       	R14, t24
+	  27: ADDL       	$0x1, t24
+	  28: PUTL       	t24, R14
+	  29: INCEIPL       	$4
+
+	0xFE8D37C:  82CC0000  lwz r22,0(r12)
+	  30: GETL       	R12, t26
+	  31: LDL       	(t26), t28
+	  32: PUTL       	t28, R22
+	  33: INCEIPL       	$4
+
+	0xFE8D380:  7C6B5214  add r3,r11,r10
+	  34: GETL       	R11, t30
+	  35: GETL       	R10, t32
+	  36: ADDL       	t30, t32
+	  37: PUTL       	t32, R3
+	  38: INCEIPL       	$4
+
+	0xFE8D384:  551C1838  rlwinm r28,r8,3,0,28
+	  39: GETL       	R8, t34
+	  40: SHLL       	$0x3, t34
+	  41: PUTL       	t34, R28
+	  42: INCEIPL       	$4
+
+	0xFE8D388:  919F01B0  stw r12,432(r31)
+	  43: GETL       	R12, t36
+	  44: GETL       	R31, t38
+	  45: ADDL       	$0x1B0, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0xFE8D38C:  4804B34D  bl 0xFED86D8
+	  48: MOVL       	$0xFE8D390, t40
+	  49: PUTL       	t40, LR
+	  50: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2580 FE8D35C 52
+. 93 BC 00 00 90 79 00 00 81 79 00 00 7E C4 B3 78 81 54 00 00 7F 05 C3 78 81 13 00 00 39 CE 00 01 82 CC 00 00 7C 6B 52 14 55 1C 18 38 91 9F 01 B0 48 04 B3 4D
+==== BB 2581 (0xFE8D390) approx BBs exec'd 0 ====
+
+	0xFE8D390:  80940000  lwz r4,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFE8D394:  7C7CB12E  stwx r3,r28,r22
+	   4: GETL       	R22, t4
+	   5: GETL       	R28, t6
+	   6: ADDL       	t6, t4
+	   7: GETL       	R3, t8
+	   8: STL       	t8, (t4)
+	   9: INCEIPL       	$4
+
+	0xFE8D398:  80790000  lwz r3,0(r25)
+	  10: GETL       	R25, t10
+	  11: LDL       	(t10), t12
+	  12: PUTL       	t12, R3
+	  13: INCEIPL       	$4
+
+	0xFE8D39C:  7CE4C214  add r7,r4,r24
+	  14: GETL       	R4, t14
+	  15: GETL       	R24, t16
+	  16: ADDL       	t14, t16
+	  17: PUTL       	t16, R7
+	  18: INCEIPL       	$4
+
+	0xFE8D3A0:  80B30000  lwz r5,0(r19)
+	  19: GETL       	R19, t18
+	  20: LDL       	(t18), t20
+	  21: PUTL       	t20, R5
+	  22: INCEIPL       	$4
+
+	0xFE8D3A4:  7EA4AB78  or r4,r21,r21
+	  23: GETL       	R21, t22
+	  24: PUTL       	t22, R4
+	  25: INCEIPL       	$4
+
+	0xFE8D3A8:  83BF01B0  lwz r29,432(r31)
+	  26: GETL       	R31, t24
+	  27: ADDL       	$0x1B0, t24
+	  28: LDL       	(t24), t26
+	  29: PUTL       	t26, R29
+	  30: INCEIPL       	$4
+
+	0xFE8D3AC:  7C633A14  add r3,r3,r7
+	  31: GETL       	R3, t28
+	  32: GETL       	R7, t30
+	  33: ADDL       	t28, t30
+	  34: PUTL       	t30, R3
+	  35: INCEIPL       	$4
+
+	0xFE8D3B0:  54B91838  rlwinm r25,r5,3,0,28
+	  36: GETL       	R5, t32
+	  37: SHLL       	$0x3, t32
+	  38: PUTL       	t32, R25
+	  39: INCEIPL       	$4
+
+	0xFE8D3B4:  90F40000  stw r7,0(r20)
+	  40: GETL       	R7, t34
+	  41: GETL       	R20, t36
+	  42: STL       	t34, (t36)
+	  43: INCEIPL       	$4
+
+	0xFE8D3B8:  7EE5BB78  or r5,r23,r23
+	  44: GETL       	R23, t38
+	  45: PUTL       	t38, R5
+	  46: INCEIPL       	$4
+
+	0xFE8D3BC:  839D0000  lwz r28,0(r29)
+	  47: GETL       	R29, t40
+	  48: LDL       	(t40), t42
+	  49: PUTL       	t42, R28
+	  50: INCEIPL       	$4
+
+	0xFE8D3C0:  4804B319  bl 0xFED86D8
+	  51: MOVL       	$0xFE8D3C4, t44
+	  52: PUTL       	t44, LR
+	  53: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2581 FE8D390 52
+. 80 94 00 00 7C 7C B1 2E 80 79 00 00 7C E4 C2 14 80 B3 00 00 7E A4 AB 78 83 BF 01 B0 7C 63 3A 14 54 B9 18 38 90 F4 00 00 7E E5 BB 78 83 9D 00 00 48 04 B3 19
+==== BB 2582 (0xFE8D3C4) approx BBs exec'd 0 ====
+
+	0xFE8D3C4:  81940000  lwz r12,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R12
+	   3: INCEIPL       	$4
+
+	0xFE8D3C8:  80D30000  lwz r6,0(r19)
+	   4: GETL       	R19, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0xFE8D3CC:  7F19E214  add r24,r25,r28
+	   8: GETL       	R25, t8
+	   9: GETL       	R28, t10
+	  10: ADDL       	t8, t10
+	  11: PUTL       	t10, R24
+	  12: INCEIPL       	$4
+
+	0xFE8D3D0:  7EACBA14  add r21,r12,r23
+	  13: GETL       	R12, t12
+	  14: GETL       	R23, t14
+	  15: ADDL       	t12, t14
+	  16: PUTL       	t14, R21
+	  17: INCEIPL       	$4
+
+	0xFE8D3D4:  90780004  stw r3,4(r24)
+	  18: GETL       	R3, t16
+	  19: GETL       	R24, t18
+	  20: ADDL       	$0x4, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0xFE8D3D8:  3AC60001  addi r22,r6,1
+	  23: GETL       	R6, t20
+	  24: ADDL       	$0x1, t20
+	  25: PUTL       	t20, R22
+	  26: INCEIPL       	$4
+
+	0xFE8D3DC:  92B40000  stw r21,0(r20)
+	  27: GETL       	R21, t22
+	  28: GETL       	R20, t24
+	  29: STL       	t22, (t24)
+	  30: INCEIPL       	$4
+
+	0xFE8D3E0:  92D30000  stw r22,0(r19)
+	  31: GETL       	R22, t26
+	  32: GETL       	R19, t28
+	  33: STL       	t26, (t28)
+	  34: INCEIPL       	$4
+
+	0xFE8D3E4:  2C110000  cmpi cr0,r17,0
+	  35: GETL       	R17, t30
+	  36: CMP0L       	t30, t32  (-rSo)
+	  37: ICRFL       	t32, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0xFE8D3E8:  4182009C  bc 12,2,0xFE8D484
+	  39: Js02o       	$0xFE8D484
+
+
+
+. 2582 FE8D3C4 40
+. 81 94 00 00 80 D3 00 00 7F 19 E2 14 7E AC BA 14 90 78 00 04 3A C6 00 01 92 B4 00 00 92 D3 00 00 2C 11 00 00 41 82 00 9C
+==== BB 2583 (0xFE8D2D8) approx BBs exec'd 0 ====
+
+	0xFE8D2D8:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D2DC:  839E0214  lwz r28,532(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x214, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFE8D2E0:  480497F9  bl 0xFED6AD8
+	   8: MOVL       	$0xFE8D2E4, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2583 FE8D2D8 12
+. 7E C3 B3 78 83 9E 02 14 48 04 97 F9
+==== BB 2584 (0xFE8D364) approx BBs exec'd 0 ====
+
+	0xFE8D364:  81790000  lwz r11,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFE8D368:  7EC4B378  or r4,r22,r22
+	   4: GETL       	R22, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFE8D36C:  81540000  lwz r10,0(r20)
+	   7: GETL       	R20, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R10
+	  10: INCEIPL       	$4
+
+	0xFE8D370:  7F05C378  or r5,r24,r24
+	  11: GETL       	R24, t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0xFE8D374:  81130000  lwz r8,0(r19)
+	  14: GETL       	R19, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R8
+	  17: INCEIPL       	$4
+
+	0xFE8D378:  39CE0001  addi r14,r14,1
+	  18: GETL       	R14, t16
+	  19: ADDL       	$0x1, t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0xFE8D37C:  82CC0000  lwz r22,0(r12)
+	  22: GETL       	R12, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R22
+	  25: INCEIPL       	$4
+
+	0xFE8D380:  7C6B5214  add r3,r11,r10
+	  26: GETL       	R11, t22
+	  27: GETL       	R10, t24
+	  28: ADDL       	t22, t24
+	  29: PUTL       	t24, R3
+	  30: INCEIPL       	$4
+
+	0xFE8D384:  551C1838  rlwinm r28,r8,3,0,28
+	  31: GETL       	R8, t26
+	  32: SHLL       	$0x3, t26
+	  33: PUTL       	t26, R28
+	  34: INCEIPL       	$4
+
+	0xFE8D388:  919F01B0  stw r12,432(r31)
+	  35: GETL       	R12, t28
+	  36: GETL       	R31, t30
+	  37: ADDL       	$0x1B0, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0xFE8D38C:  4804B34D  bl 0xFED86D8
+	  40: MOVL       	$0xFE8D390, t32
+	  41: PUTL       	t32, LR
+	  42: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2584 FE8D364 44
+. 81 79 00 00 7E C4 B3 78 81 54 00 00 7F 05 C3 78 81 13 00 00 39 CE 00 01 82 CC 00 00 7C 6B 52 14 55 1C 18 38 91 9F 01 B0 48 04 B3 4D
+==== BB 2585 (0xFEBD8A4) approx BBs exec'd 0 ====
+
+	0xFEBD8A4:  7FE5FB78  or r5,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFEBD8A8:  7F9FE050  subf r28,r31,r28
+	   3: GETL       	R31, t2
+	   4: GETL       	R28, t4
+	   5: SUBL       	t2, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFEBD8AC:  48019EA9  bl 0xFED7754
+	   8: MOVL       	$0xFEBD8B0, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED7754  ($4)
+
+
+
+. 2585 FEBD8A4 12
+. 7F E5 FB 78 7F 9F E0 50 48 01 9E A9
+==== BB 2586 (0xFED22A4) approx BBs exec'd 0 ====
+
+	0xFED22A4:  3BA3FFF8  addi r29,r3,-8
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFF8, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0xFED22A8:  815D0004  lwz r10,4(r29)
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFED22AC:  55590038  rlwinm r25,r10,0,0,28
+	   9: GETL       	R10, t6
+	  10: ANDL       	$0xFFFFFFF8, t6
+	  11: PUTL       	t6, R25
+	  12: INCEIPL       	$4
+
+	0xFED22B0:  7CF900D0  neg r7,r25
+	  13: GETL       	R25, t8
+	  14: NEGL       	t8
+	  15: PUTL       	t8, R7
+	  16: INCEIPL       	$4
+
+	0xFED22B4:  7F1D3840  cmpl cr6,r29,r7
+	  17: GETL       	R29, t10
+	  18: GETL       	R7, t12
+	  19: CMPUL       	t10, t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0xFED22B8:  419901E0  bc 12,25,0xFED2498
+	  22: Js25o       	$0xFED2498
+
+
+
+. 2586 FED22A4 24
+. 3B A3 FF F8 81 5D 00 04 55 59 00 38 7C F9 00 D0 7F 1D 38 40 41 99 01 E0
+==== BB 2587 (0xFED22BC) approx BBs exec'd 0 ====
+
+	0xFED22BC:  73A00007  andi. r0,r29,0x7
+	   0: GETL       	R29, t0
+	   1: ANDL       	$0x7, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED22C0:  408201D8  bc 4,2,0xFED2498
+	   6: Jc02o       	$0xFED2498
+
+
+
+. 2587 FED22BC 8
+. 73 A0 00 07 40 82 01 D8
+==== BB 2588 (0xFED22C4) approx BBs exec'd 0 ====
+
+	0xFED22C4:  3960FFDF  li r11,-33
+	   0: MOVL       	$0xFFFFFFDF, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFED22C8:  7C9B5840  cmpl cr1,r27,r11
+	   3: GETL       	R27, t2
+	   4: GETL       	R11, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFED22CC:  41850228  bc 12,5,0xFED24F4
+	   8: Js05o       	$0xFED24F4
+
+
+
+. 2588 FED22C4 12
+. 39 60 FF DF 7C 9B 58 40 41 85 02 28
+==== BB 2589 (0xFED22D0) approx BBs exec'd 0 ====
+
+	0xFED22D0:  399B000B  addi r12,r27,11
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xB, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xFED22D4:  2B0C000F  cmpli cr6,r12,15
+	   4: GETL       	R12, t2
+	   5: MOVL       	$0xF, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFED22D8:  55980038  rlwinm r24,r12,0,0,28
+	   9: GETL       	R12, t8
+	  10: ANDL       	$0xFFFFFFF8, t8
+	  11: PUTL       	t8, R24
+	  12: INCEIPL       	$4
+
+	0xFED22DC:  409900C0  bc 4,25,0xFED239C
+	  13: Jc25o       	$0xFED239C
+
+
+
+. 2589 FED22D0 16
+. 39 9B 00 0B 2B 0C 00 0F 55 98 00 38 40 99 00 C0
+==== BB 2590 (0xFED22E0) approx BBs exec'd 0 ====
+
+	0xFED22E0:  71400002  andi. r0,r10,0x2
+	   0: GETL       	R10, t0
+	   1: ANDL       	$0x2, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED22E4:  408200C0  bc 4,2,0xFED23A4
+	   6: Jc02o       	$0xFED23A4
+
+
+
+. 2590 FED22E0 8
+. 71 40 00 02 40 82 00 C0
+==== BB 2591 (0xFED22E8) approx BBs exec'd 0 ====
+
+	0xFED22E8:  71400004  andi. r0,r10,0x4
+	   0: GETL       	R10, t0
+	   1: ANDL       	$0x4, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED22EC:  83FE05FC  lwz r31,1532(r30)
+	   6: GETL       	R30, t4
+	   7: ADDL       	$0x5FC, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0xFED22F0:  4182000C  bc 12,2,0xFED22FC
+	  11: Js02o       	$0xFED22FC
+
+
+
+. 2591 FED22E8 12
+. 71 40 00 04 83 FE 05 FC 41 82 00 0C
+==== BB 2592 (0xFED22FC) approx BBs exec'd 0 ====
+
+	0xFED22FC:  3BA00000  li r29,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFED2300:  39200001  li r9,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0xFED2304:  7F40F828  lwarx r26,r0,r31
+	   6: GETL       	R31, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R26
+	  10: INCEIPL       	$4
+
+	0xFED2308:  7C1AE800  cmp cr0,r26,r29
+	  11: GETL       	R26, t8
+	  12: GETL       	R29, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFED230C:  4082000C  bc 4,2,0xFED2318
+	  16: Jc02o       	$0xFED2318
+
+
+
+. 2592 FED22FC 20
+. 3B A0 00 00 39 20 00 01 7F 40 F8 28 7C 1A E8 00 40 82 00 0C
+==== BB 2593 (0xFED2310) approx BBs exec'd 0 ====
+
+	0xFED2310:  7D20F92D  stwcx. r9,r0,r31
+	   0: GETL       	R31, t0
+	   1: GETL       	R9, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED2314:  40A2FFF0  bc 5,2,0xFED2304
+	   6: Jc02o       	$0xFED2304
+
+
+
+. 2593 FED2310 8
+. 7D 20 F9 2D 40 A2 FF F0
+==== BB 2594 (0xFED2304) approx BBs exec'd 0 ====
+
+	0xFED2304:  7F40F828  lwarx r26,r0,r31
+	   0: GETL       	R31, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFED2308:  7C1AE800  cmp cr0,r26,r29
+	   5: GETL       	R26, t4
+	   6: GETL       	R29, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED230C:  4082000C  bc 4,2,0xFED2318
+	  10: Jc02o       	$0xFED2318
+
+
+
+. 2594 FED2304 12
+. 7F 40 F8 28 7C 1A E8 00 40 82 00 0C
+==== BB 2595 (0xFED2318) approx BBs exec'd 0 ====
+
+	0xFED2318:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFED231C:  2F9A0000  cmpi cr7,r26,0
+	   1: GETL       	R26, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFED2320:  409E01F4  bc 4,30,0xFED2514
+	   5: Jc30o       	$0xFED2514
+
+
+
+. 2595 FED2318 12
+. 4C 00 01 2C 2F 9A 00 00 40 9E 01 F4
+==== BB 2596 (0xFED2324) approx BBs exec'd 0 ====
+
+	0xFED2324:  80FE0600  lwz r7,1536(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x600, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFED2328:  7F84E378  or r4,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFED232C:  7CA71214  add r5,r7,r2
+	   8: GETL       	R7, t6
+	   9: GETL       	R2, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0xFED2330:  7FE3FB78  or r3,r31,r31
+	  13: GETL       	R31, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0xFED2334:  93E50000  stw r31,0(r5)
+	  16: GETL       	R31, t12
+	  17: GETL       	R5, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFED2338:  7F65DB78  or r5,r27,r27
+	  20: GETL       	R27, t16
+	  21: PUTL       	t16, R5
+	  22: INCEIPL       	$4
+
+	0xFED233C:  4BFFE6B1  bl 0xFED09EC
+	  23: MOVL       	$0xFED2340, t18
+	  24: PUTL       	t18, LR
+	  25: JMPo-c       	$0xFED09EC  ($4)
+
+
+
+. 2596 FED2324 28
+. 80 FE 06 00 7F 84 E3 78 7C A7 12 14 7F E3 FB 78 93 E5 00 00 7F 65 DB 78 4B FF E6 B1
+==== BB 2597 _int_realloc(0xFED09EC) approx BBs exec'd 0 ====
+
+	0xFED09EC:  3800FFDF  li r0,-33
+	   0: MOVL       	$0xFFFFFFDF, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFED09F0:  7CC802A6  mflr r6
+	   3: GETL       	LR, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED09F4:  7F850040  cmpl cr7,r5,r0
+	   6: GETL       	R5, t4
+	   7: GETL       	R0, t6
+	   8: CMPUL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFED09F8:  9421FFB0  stwu r1,-80(r1)
+	  11: GETL       	R1, t10
+	  12: GETL       	R1, t12
+	  13: ADDL       	$0xFFFFFFB0, t12
+	  14: PUTL       	t12, R1
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0xFED09FC:  480D7455  bl 0xFFA7E50
+	  17: MOVL       	$0xFED0A00, t14
+	  18: PUTL       	t14, LR
+	  19: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2597 FED09EC 20
+. 38 00 FF DF 7C C8 02 A6 7F 85 00 40 94 21 FF B0 48 0D 74 55
+==== BB 2598 (0xFED0A00) approx BBs exec'd 0 ====
+
+	0xFED0A00:  92C10028  stw r22,40(r1)
+	   0: GETL       	R22, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED0A04:  9361003C  stw r27,60(r1)
+	   5: GETL       	R27, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x3C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFED0A08:  7C761B78  or r22,r3,r3
+	  10: GETL       	R3, t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0xFED0A0C:  93C10048  stw r30,72(r1)
+	  13: GETL       	R30, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x48, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFED0A10:  7C9B2378  or r27,r4,r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0xFED0A14:  92E1002C  stw r23,44(r1)
+	  21: GETL       	R23, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x2C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFED0A18:  7FC802A6  mflr r30
+	  26: GETL       	LR, t20
+	  27: PUTL       	t20, R30
+	  28: INCEIPL       	$4
+
+	0xFED0A1C:  93010030  stw r24,48(r1)
+	  29: GETL       	R24, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x30, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFED0A20:  93210034  stw r25,52(r1)
+	  34: GETL       	R25, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x34, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFED0A24:  93410038  stw r26,56(r1)
+	  39: GETL       	R26, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x38, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFED0A28:  93810040  stw r28,64(r1)
+	  44: GETL       	R28, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x40, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFED0A2C:  93A10044  stw r29,68(r1)
+	  49: GETL       	R29, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x44, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFED0A30:  93E1004C  stw r31,76(r1)
+	  54: GETL       	R31, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x4C, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0xFED0A34:  90C10054  stw r6,84(r1)
+	  59: GETL       	R6, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x54, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFED0A38:  419D034C  bc 12,29,0xFED0D84
+	  64: Js29o       	$0xFED0D84
+
+
+
+. 2598 FED0A00 60
+. 92 C1 00 28 93 61 00 3C 7C 76 1B 78 93 C1 00 48 7C 9B 23 78 92 E1 00 2C 7F C8 02 A6 93 01 00 30 93 21 00 34 93 41 00 38 93 81 00 40 93 A1 00 44 93 E1 00 4C 90 C1 00 54 41 9D 03 4C
+==== BB 2599 (0xFED0A3C) approx BBs exec'd 0 ====
+
+	0xFED0A3C:  3865000B  addi r3,r5,11
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0xB, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFED0A40:  2803000F  cmpli cr0,r3,15
+	   4: GETL       	R3, t2
+	   5: MOVL       	$0xF, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFED0A44:  54780038  rlwinm r24,r3,0,0,28
+	   9: GETL       	R3, t8
+	  10: ANDL       	$0xFFFFFFF8, t8
+	  11: PUTL       	t8, R24
+	  12: INCEIPL       	$4
+
+	0xFED0A48:  40810178  bc 4,1,0xFED0BC0
+	  13: Jc01o       	$0xFED0BC0
+
+
+
+. 2599 FED0A3C 16
+. 38 65 00 0B 28 03 00 0F 54 78 00 38 40 81 01 78
+==== BB 2600 (0xFED0A4C) approx BBs exec'd 0 ====
+
+	0xFED0A4C:  3B5BFFF8  addi r26,r27,-8
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xFFFFFFF8, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0xFED0A50:  73400007  andi. r0,r26,0x7
+	   4: GETL       	R26, t2
+	   5: ANDL       	$0x7, t2
+	   6: PUTL       	t2, R0
+	   7: CMP0L       	t2, t4  (-rSo)
+	   8: ICRFL       	t4, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED0A54:  817A0004  lwz r11,4(r26)
+	  10: GETL       	R26, t6
+	  11: ADDL       	$0x4, t6
+	  12: LDL       	(t6), t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0xFED0A58:  557D0038  rlwinm r29,r11,0,0,28
+	  15: GETL       	R11, t10
+	  16: ANDL       	$0xFFFFFFF8, t10
+	  17: PUTL       	t10, R29
+	  18: INCEIPL       	$4
+
+	0xFED0A5C:  40820374  bc 4,2,0xFED0DD0
+	  19: Jc02o       	$0xFED0DD0
+
+
+
+. 2600 FED0A4C 20
+. 3B 5B FF F8 73 40 00 07 81 7A 00 04 55 7D 00 38 40 82 03 74
+==== BB 2601 (0xFED0A60) approx BBs exec'd 0 ====
+
+	0xFED0A60:  2B0B0008  cmpli cr6,r11,8
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x8, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFED0A64:  409902C4  bc 4,25,0xFED0D28
+	   5: Jc25o       	$0xFED0D28
+
+
+
+. 2601 FED0A60 8
+. 2B 0B 00 08 40 99 02 C4
+==== BB 2602 (0xFED0A68) approx BBs exec'd 0 ====
+
+	0xFED0A68:  8016044C  lwz r0,1100(r22)
+	   0: GETL       	R22, t0
+	   1: ADDL       	$0x44C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED0A6C:  7F80E840  cmpl cr7,r0,r29
+	   5: GETL       	R0, t4
+	   6: GETL       	R29, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFED0A70:  409D02B8  bc 4,29,0xFED0D28
+	  10: Jc29o       	$0xFED0D28
+
+
+
+. 2602 FED0A68 12
+. 80 16 04 4C 7F 80 E8 40 40 9D 02 B8
+==== BB 2603 (0xFED0A74) approx BBs exec'd 0 ====
+
+	0xFED0A74:  71680002  andi. r8,r11,0x2
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED0A78:  40820150  bc 4,2,0xFED0BC8
+	   6: Jc02o       	$0xFED0BC8
+
+
+
+. 2603 FED0A74 8
+. 71 68 00 02 40 82 01 50
+==== BB 2604 (0xFED0A7C) approx BBs exec'd 0 ====
+
+	0xFED0A7C:  7FFAEA14  add r31,r26,r29
+	   0: GETL       	R26, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFED0A80:  813F0004  lwz r9,4(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFED0A84:  28890008  cmpli cr1,r9,8
+	  10: GETL       	R9, t8
+	  11: MOVL       	$0x8, t12
+	  12: CMPUL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFED0A88:  55280038  rlwinm r8,r9,0,0,28
+	  15: GETL       	R9, t14
+	  16: ANDL       	$0xFFFFFFF8, t14
+	  17: PUTL       	t14, R8
+	  18: INCEIPL       	$4
+
+	0xFED0A8C:  4085034C  bc 4,5,0xFED0DD8
+	  19: Jc05o       	$0xFED0DD8
+
+
+
+. 2604 FED0A7C 20
+. 7F FA EA 14 81 3F 00 04 28 89 00 08 55 28 00 38 40 85 03 4C
+==== BB 2605 (0xFED0A90) approx BBs exec'd 0 ====
+
+	0xFED0A90:  7F004040  cmpl cr6,r0,r8
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFED0A94:  40990344  bc 4,25,0xFED0DD8
+	   5: Jc25o       	$0xFED0DD8
+
+
+
+. 2605 FED0A90 8
+. 7F 00 40 40 40 99 03 44
+==== BB 2606 (0xFED0A98) approx BBs exec'd 0 ====
+
+	0xFED0A98:  7F9DC040  cmpl cr7,r29,r24
+	   0: GETL       	R29, t0
+	   1: GETL       	R24, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFED0A9C:  7FBCEB78  or r28,r29,r29
+	   5: GETL       	R29, t6
+	   6: PUTL       	t6, R28
+	   7: INCEIPL       	$4
+
+	0xFED0AA0:  409C0208  bc 4,28,0xFED0CA8
+	   8: Jc28o       	$0xFED0CA8
+
+
+
+. 2606 FED0A98 12
+. 7F 9D C0 40 7F BC EB 78 40 9C 02 08
+==== BB 2607 (0xFED0AA4) approx BBs exec'd 0 ====
+
+	0xFED0AA4:  80F60030  lwz r7,48(r22)
+	   0: GETL       	R22, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFED0AA8:  7C07F800  cmp cr0,r7,r31
+	   5: GETL       	R7, t4
+	   6: GETL       	R31, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFED0AAC:  418203F4  bc 12,2,0xFED0EA0
+	  10: Js02o       	$0xFED0EA0
+
+
+
+. 2607 FED0AA4 12
+. 80 F6 00 30 7C 07 F8 00 41 82 03 F4
+==== BB 2608 (0xFED0EA0) approx BBs exec'd 0 ====
+
+	0xFED0EA0:  7D5D4214  add r10,r29,r8
+	   0: GETL       	R29, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFED0EA4:  39980010  addi r12,r24,16
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFED0EA8:  7C8A6040  cmpl cr1,r10,r12
+	   9: GETL       	R10, t6
+	  10: GETL       	R12, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0xFED0EAC:  41A4FC04  bc 13,4,0xFED0AB0
+	  14: Js04o       	$0xFED0AB0
+
+
+
+. 2608 FED0EA0 16
+. 7D 5D 42 14 39 98 00 10 7C 8A 60 40 41 A4 FC 04
+==== BB 2609 (0xFED0EB0) approx BBs exec'd 0 ====
+
+	0xFED0EB0:  80DE05FC  lwz r6,1532(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED0EB4:  557D077E  rlwinm r29,r11,0,29,31
+	   5: GETL       	R11, t4
+	   6: ANDL       	$0x7, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFED0EB8:  7F385050  subf r25,r24,r10
+	   9: GETL       	R24, t6
+	  10: GETL       	R10, t8
+	  11: SUBL       	t6, t8
+	  12: PUTL       	t8, R25
+	  13: INCEIPL       	$4
+
+	0xFED0EBC:  7FABC378  or r11,r29,r24
+	  14: GETL       	R29, t10
+	  15: GETL       	R24, t12
+	  16: ORL       	t12, t10
+	  17: PUTL       	t10, R11
+	  18: INCEIPL       	$4
+
+	0xFED0EC0:  7EC03278  xor r0,r22,r6
+	  19: GETL       	R22, t14
+	  20: GETL       	R6, t16
+	  21: XORL       	t14, t16
+	  22: PUTL       	t16, R0
+	  23: INCEIPL       	$4
+
+	0xFED0EC4:  632A0001  ori r10,r25,0x1
+	  24: GETL       	R25, t18
+	  25: ORL       	$0x1, t18
+	  26: PUTL       	t18, R10
+	  27: INCEIPL       	$4
+
+	0xFED0EC8:  3100FFFF  addic r8,r0,-1
+	  28: GETL       	R0, t20
+	  29: ADCL       	$0xFFFFFFFF, t20  (-wCa)
+	  30: PUTL       	t20, R8
+	  31: INCEIPL       	$4
+
+	0xFED0ECC:  7FE80110  subfe r31,r8,r0
+	  32: GETL       	R8, t22
+	  33: GETL       	R0, t24
+	  34: SBBL       	t22, t24  (-rCa-wCa)
+	  35: PUTL       	t24, R31
+	  36: INCEIPL       	$4
+
+	0xFED0ED0:  7F67DB78  or r7,r27,r27
+	  37: GETL       	R27, t26
+	  38: PUTL       	t26, R7
+	  39: INCEIPL       	$4
+
+	0xFED0ED4:  57FC103A  rlwinm r28,r31,2,0,29
+	  40: GETL       	R31, t28
+	  41: SHLL       	$0x2, t28
+	  42: PUTL       	t28, R28
+	  43: INCEIPL       	$4
+
+	0xFED0ED8:  7D1AC214  add r8,r26,r24
+	  44: GETL       	R26, t30
+	  45: GETL       	R24, t32
+	  46: ADDL       	t30, t32
+	  47: PUTL       	t32, R8
+	  48: INCEIPL       	$4
+
+	0xFED0EDC:  7F975B78  or r23,r28,r11
+	  49: GETL       	R28, t34
+	  50: GETL       	R11, t36
+	  51: ORL       	t36, t34
+	  52: PUTL       	t34, R23
+	  53: INCEIPL       	$4
+
+	0xFED0EE0:  91160030  stw r8,48(r22)
+	  54: GETL       	R8, t38
+	  55: GETL       	R22, t40
+	  56: ADDL       	$0x30, t40
+	  57: STL       	t38, (t40)
+	  58: INCEIPL       	$4
+
+	0xFED0EE4:  92FA0004  stw r23,4(r26)
+	  59: GETL       	R23, t42
+	  60: GETL       	R26, t44
+	  61: ADDL       	$0x4, t44
+	  62: STL       	t42, (t44)
+	  63: INCEIPL       	$4
+
+	0xFED0EE8:  91480004  stw r10,4(r8)
+	  64: GETL       	R10, t46
+	  65: GETL       	R8, t48
+	  66: ADDL       	$0x4, t48
+	  67: STL       	t46, (t48)
+	  68: INCEIPL       	$4
+
+	0xFED0EEC:  4BFFFC98  b 0xFED0B84
+	  69: JMPo       	$0xFED0B84  ($4)
+
+
+
+. 2609 FED0EB0 64
+. 80 DE 05 FC 55 7D 07 7E 7F 38 50 50 7F AB C3 78 7E C0 32 78 63 2A 00 01 31 00 FF FF 7F E8 01 10 7F 67 DB 78 57 FC 10 3A 7D 1A C2 14 7F 97 5B 78 91 16 00 30 92 FA 00 04 91 48 00 04 4B FF FC 98
+==== BB 2610 (0xFED0B84) approx BBs exec'd 0 ====
+
+	0xFED0B84:  83210054  lwz r25,84(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x54, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFED0B88:  7CE33B78  or r3,r7,r7
+	   5: GETL       	R7, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED0B8C:  82C10028  lwz r22,40(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x28, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0xFED0B90:  7F2803A6  mtlr r25
+	  13: GETL       	R25, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFED0B94:  82E1002C  lwz r23,44(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x2C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R23
+	  20: INCEIPL       	$4
+
+	0xFED0B98:  83010030  lwz r24,48(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x30, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R24
+	  25: INCEIPL       	$4
+
+	0xFED0B9C:  83210034  lwz r25,52(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x34, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R25
+	  30: INCEIPL       	$4
+
+	0xFED0BA0:  83410038  lwz r26,56(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x38, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0xFED0BA4:  8361003C  lwz r27,60(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x3C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R27
+	  40: INCEIPL       	$4
+
+	0xFED0BA8:  83810040  lwz r28,64(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x40, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R28
+	  45: INCEIPL       	$4
+
+	0xFED0BAC:  83A10044  lwz r29,68(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x44, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R29
+	  50: INCEIPL       	$4
+
+	0xFED0BB0:  83C10048  lwz r30,72(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x48, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R30
+	  55: INCEIPL       	$4
+
+	0xFED0BB4:  83E1004C  lwz r31,76(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x4C, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R31
+	  60: INCEIPL       	$4
+
+	0xFED0BB8:  38210050  addi r1,r1,80
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x50, t48
+	  63: PUTL       	t48, R1
+	  64: INCEIPL       	$4
+
+	0xFED0BBC:  4E800020  blr
+	  65: GETL       	LR, t50
+	  66: JMPo-r       	t50  ($4)
+
+
+
+. 2610 FED0B84 60
+. 83 21 00 54 7C E3 3B 78 82 C1 00 28 7F 28 03 A6 82 E1 00 2C 83 01 00 30 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+==== BB 2611 (0xFED2340) approx BBs exec'd 0 ====
+
+	0xFED2340:  7C7C1B78  or r28,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0xFED2344:  7C0004AC  sync
+	   3: INCEIPL       	$4
+
+	0xFED2348:  7C80F828  lwarx r4,r0,r31
+	   4: GETL       	R31, t2
+	   5: LOCKo       	
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFED234C:  7FA0F92D  stwcx. r29,r0,r31
+	   9: GETL       	R31, t6
+	  10: GETL       	R29, t8
+	  11: LOCKo       	
+	  12: STL       	t8, (t6)  (-rSo)
+	  13: ICRFL       	cr, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFED2350:  40A2FFF8  bc 5,2,0xFED2348
+	  15: Jc02o       	$0xFED2348
+
+
+
+. 2611 FED2340 20
+. 7C 7C 1B 78 7C 00 04 AC 7C 80 F8 28 7F A0 F9 2D 40 A2 FF F8
+==== BB 2612 (0xFED2354) approx BBs exec'd 0 ====
+
+	0xFED2354:  2C840001  cmpi cr1,r4,1
+	   0: GETL       	R4, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFED2358:  41850168  bc 12,5,0xFED24C0
+	   5: Js05o       	$0xFED24C0
+
+
+
+. 2612 FED2354 8
+. 2C 84 00 01 41 85 01 68
+==== BB 2613 (0xFED235C) approx BBs exec'd 0 ====
+
+	0xFED235C:  7F89E378  or r9,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0xFED2360:  83A10054  lwz r29,84(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x54, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFED2364:  7D234B78  or r3,r9,r9
+	   8: GETL       	R9, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFED2368:  82C10028  lwz r22,40(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x28, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R22
+	  15: INCEIPL       	$4
+
+	0xFED236C:  7FA803A6  mtlr r29
+	  16: GETL       	R29, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0xFED2370:  82E1002C  lwz r23,44(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x2C, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R23
+	  23: INCEIPL       	$4
+
+	0xFED2374:  83010030  lwz r24,48(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x30, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R24
+	  28: INCEIPL       	$4
+
+	0xFED2378:  83210034  lwz r25,52(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x34, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R25
+	  33: INCEIPL       	$4
+
+	0xFED237C:  83410038  lwz r26,56(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x38, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R26
+	  38: INCEIPL       	$4
+
+	0xFED2380:  8361003C  lwz r27,60(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x3C, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R27
+	  43: INCEIPL       	$4
+
+	0xFED2384:  83810040  lwz r28,64(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x40, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R28
+	  48: INCEIPL       	$4
+
+	0xFED2388:  83A10044  lwz r29,68(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x44, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R29
+	  53: INCEIPL       	$4
+
+	0xFED238C:  83C10048  lwz r30,72(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x48, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R30
+	  58: INCEIPL       	$4
+
+	0xFED2390:  83E1004C  lwz r31,76(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x4C, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R31
+	  63: INCEIPL       	$4
+
+	0xFED2394:  38210050  addi r1,r1,80
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x50, t50
+	  66: PUTL       	t50, R1
+	  67: INCEIPL       	$4
+
+	0xFED2398:  4E800020  blr
+	  68: GETL       	LR, t52
+	  69: JMPo-r       	t52  ($4)
+
+
+
+. 2613 FED235C 64
+. 7F 89 E3 78 83 A1 00 54 7D 23 4B 78 82 C1 00 28 7F A8 03 A6 82 E1 00 2C 83 01 00 30 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+==== BB 2614 (0xFEC8F4C) approx BBs exec'd 0 ====
+
+	0xFEC8F4C:  71600202  andi. r0,r11,0x202
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x202, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEC8F50:  4182006C  bc 12,2,0xFEC8FBC
+	   6: Js02o       	$0xFEC8FBC
+
+
+
+. 2614 FEC8F4C 8
+. 71 60 02 02 41 82 00 6C
+==== BB 2615 (0xFEC90FC) approx BBs exec'd 0 ====
+
+	0xFEC90FC:  40820014  bc 4,2,0xFEC9110
+	   0: Jc02o       	$0xFEC9110
+
+
+
+. 2615 FEC90FC 4
+. 40 82 00 14
+==== BB 2616 (0xFEC9100) approx BBs exec'd 0 ====
+
+	0xFEC9100:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFEC9104:  61400010  ori r0,r10,0x10
+	   4: GETL       	R10, t4
+	   5: ORL       	$0x10, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFEC9108:  901F0000  stw r0,0(r31)
+	   8: GETL       	R0, t6
+	   9: GETL       	R31, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFEC910C:  4BFFFF08  b 0xFEC9014
+	  12: JMPo       	$0xFEC9014  ($4)
+
+
+
+. 2616 FEC9100 16
+. 81 5F 00 00 61 40 00 10 90 1F 00 00 4B FF FF 08
+==== BB 2617 (0xFEC9190) approx BBs exec'd 0 ====
+
+	0xFEC9190:  3860FFFF  li r3,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC9194:  4BFFFD70  b 0xFEC8F04
+	   3: JMPo       	$0xFEC8F04  ($4)
+
+
+
+. 2617 FEC9190 8
+. 38 60 FF FF 4B FF FD 70
+==== BB 2618 (0xFEC8F04) approx BBs exec'd 0 ====
+
+	0xFEC8F04:  83E10024  lwz r31,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFEC8F08:  8361000C  lwz r27,12(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0xFEC8F0C:  7FE803A6  mtlr r31
+	  10: GETL       	R31, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFEC8F10:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFEC8F14:  83A10014  lwz r29,20(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0xFEC8F18:  83C10018  lwz r30,24(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R30
+	  27: INCEIPL       	$4
+
+	0xFEC8F1C:  83E1001C  lwz r31,28(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R31
+	  32: INCEIPL       	$4
+
+	0xFEC8F20:  38210020  addi r1,r1,32
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20, t26
+	  35: PUTL       	t26, R1
+	  36: INCEIPL       	$4
+
+	0xFEC8F24:  4E800020  blr
+	  37: GETL       	LR, t28
+	  38: JMPo-r       	t28  ($4)
+
+
+
+. 2618 FEC8F04 36
+. 83 E1 00 24 83 61 00 0C 7F E8 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2619 (0xFECBD40) approx BBs exec'd 0 ====
+
+	0xFECBD40:  81410014  lwz r10,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECBD44:  83E10008  lwz r31,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFECBD48:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFECBD4C:  7D4803A6  mtlr r10
+	  14: GETL       	R10, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFECBD50:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 2619 FECBD40 20
+. 81 41 00 14 83 E1 00 08 38 21 00 10 7D 48 03 A6 4E 80 00 20
+==== BB 2620 (0xFEBD9D0) approx BBs exec'd 0 ====
+
+	0xFEBD9D0:  41AEFF10  bc 13,14,0xFEBD8E0
+	   0: Js14o       	$0xFEBD8E0
+
+
+
+. 2620 FEBD9D0 4
+. 41 AE FF 10
+==== BB 2621 (0xFEBD8E0) approx BBs exec'd 0 ====
+
+	0xFEBD8E0:  81410044  lwz r10,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEBD8E4:  7C77D050  subf r3,r23,r26
+	   5: GETL       	R23, t4
+	   6: GETL       	R26, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFEBD8E8:  81810014  lwz r12,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R12
+	  14: INCEIPL       	$4
+
+	0xFEBD8EC:  82C10018  lwz r22,24(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x18, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R22
+	  19: INCEIPL       	$4
+
+	0xFEBD8F0:  7D4803A6  mtlr r10
+	  20: GETL       	R10, t16
+	  21: PUTL       	t16, LR
+	  22: INCEIPL       	$4
+
+	0xFEBD8F4:  82E1001C  lwz r23,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R23
+	  27: INCEIPL       	$4
+
+	0xFEBD8F8:  7D838120  mtcrf 0x38,r12
+	  28: GETL       	R12, t22
+	  29: ICRFL       	t22, $0x2, CR
+	  30: ICRFL       	t22, $0x3, CR
+	  31: ICRFL       	t22, $0x4, CR
+	  32: INCEIPL       	$4
+
+	0xFEBD8FC:  83010020  lwz r24,32(r1)
+	  33: GETL       	R1, t24
+	  34: ADDL       	$0x20, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R24
+	  37: INCEIPL       	$4
+
+	0xFEBD900:  83210024  lwz r25,36(r1)
+	  38: GETL       	R1, t28
+	  39: ADDL       	$0x24, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R25
+	  42: INCEIPL       	$4
+
+	0xFEBD904:  83410028  lwz r26,40(r1)
+	  43: GETL       	R1, t32
+	  44: ADDL       	$0x28, t32
+	  45: LDL       	(t32), t34
+	  46: PUTL       	t34, R26
+	  47: INCEIPL       	$4
+
+	0xFEBD908:  8361002C  lwz r27,44(r1)
+	  48: GETL       	R1, t36
+	  49: ADDL       	$0x2C, t36
+	  50: LDL       	(t36), t38
+	  51: PUTL       	t38, R27
+	  52: INCEIPL       	$4
+
+	0xFEBD90C:  83810030  lwz r28,48(r1)
+	  53: GETL       	R1, t40
+	  54: ADDL       	$0x30, t40
+	  55: LDL       	(t40), t42
+	  56: PUTL       	t42, R28
+	  57: INCEIPL       	$4
+
+	0xFEBD910:  83A10034  lwz r29,52(r1)
+	  58: GETL       	R1, t44
+	  59: ADDL       	$0x34, t44
+	  60: LDL       	(t44), t46
+	  61: PUTL       	t46, R29
+	  62: INCEIPL       	$4
+
+	0xFEBD914:  83C10038  lwz r30,56(r1)
+	  63: GETL       	R1, t48
+	  64: ADDL       	$0x38, t48
+	  65: LDL       	(t48), t50
+	  66: PUTL       	t50, R30
+	  67: INCEIPL       	$4
+
+	0xFEBD918:  83E1003C  lwz r31,60(r1)
+	  68: GETL       	R1, t52
+	  69: ADDL       	$0x3C, t52
+	  70: LDL       	(t52), t54
+	  71: PUTL       	t54, R31
+	  72: INCEIPL       	$4
+
+	0xFEBD91C:  38210040  addi r1,r1,64
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0x40, t56
+	  75: PUTL       	t56, R1
+	  76: INCEIPL       	$4
+
+	0xFEBD920:  4E800020  blr
+	  77: GETL       	LR, t58
+	  78: JMPo-r       	t58  ($4)
+
+
+
+. 2621 FEBD8E0 68
+. 81 41 00 44 7C 77 D0 50 81 81 00 14 82 C1 00 18 7D 48 03 A6 82 E1 00 1C 7D 83 81 20 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 2622 (0xFE8D3F8) approx BBs exec'd 0 ====
+
+	0xFE8D3F8:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D3FC:  4802E4B5  bl 0xFEBB8B0
+	   3: MOVL       	$0xFE8D400, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFEBB8B0  ($4)
+
+
+
+. 2622 FE8D3F8 8
+. 7F 63 DB 78 48 02 E4 B5
+==== BB 2623 _IO_fclose@@GLIBC_2.1(0xFEBB8B0) approx BBs exec'd 0 ====
+
+	0xFEBB8B0:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEBB8B4:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEBB8B8:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEBB8BC:  480EC595  bl 0xFFA7E50
+	  14: MOVL       	$0xFEBB8C0, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2623 FEBB8B0 16
+. 7C 88 02 A6 94 21 FF E0 93 A1 00 14 48 0E C5 95
+==== BB 2624 (0xFEBB8C0) approx BBs exec'd 0 ====
+
+	0xFEBB8C0:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEBB8C4:  7C7D1B78  or r29,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEBB8C8:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEBB8CC:  7FC802A6  mflr r30
+	  13: GETL       	LR, t10
+	  14: PUTL       	t10, R30
+	  15: INCEIPL       	$4
+
+	0xFEBB8D0:  93410008  stw r26,8(r1)
+	  16: GETL       	R26, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x8, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEBB8D4:  9361000C  stw r27,12(r1)
+	  21: GETL       	R27, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEBB8D8:  93810010  stw r28,16(r1)
+	  26: GETL       	R28, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x10, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFEBB8DC:  90810024  stw r4,36(r1)
+	  31: GETL       	R4, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x24, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFEBB8E0:  88030046  lbz r0,70(r3)
+	  36: GETL       	R3, t28
+	  37: ADDL       	$0x46, t28
+	  38: LDB       	(t28), t30
+	  39: PUTL       	t30, R0
+	  40: INCEIPL       	$4
+
+	0xFEBB8E4:  7C1F0774  extsb r31,r0
+	  41: GETB       	R0, t32
+	  42: WIDENL_Bs       	_st32
+	  43: PUTL       	t32, R31
+	  44: INCEIPL       	$4
+
+	0xFEBB8E8:  2F9F0000  cmpi cr7,r31,0
+	  45: GETL       	R31, t34
+	  46: CMP0L       	t34, t36  (-rSo)
+	  47: ICRFL       	t36, $0x7, CR
+	  48: INCEIPL       	$4
+
+	0xFEBB8EC:  409E0190  bc 4,30,0xFEBBA7C
+	  49: Jc30o       	$0xFEBBA7C
+
+
+
+. 2624 FEBB8C0 48
+. 93 C1 00 18 7C 7D 1B 78 93 E1 00 1C 7F C8 02 A6 93 41 00 08 93 61 00 0C 93 81 00 10 90 81 00 24 88 03 00 46 7C 1F 07 74 2F 9F 00 00 40 9E 01 90
+==== BB 2625 (0xFEBB8F0) approx BBs exec'd 0 ====
+
+	0xFEBB8F0:  80030000  lwz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEBB8F4:  70092000  andi. r9,r0,0x2000
+	   4: GETL       	R0, t4
+	   5: ANDL       	$0x2000, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEBB8F8:  40820178  bc 4,2,0xFEBBA70
+	  10: Jc02o       	$0xFEBBA70
+
+
+
+. 2625 FEBB8F0 12
+. 80 03 00 00 70 09 20 00 40 82 01 78
+==== BB 2626 (0xFEBBA70) approx BBs exec'd 0 ====
+
+	0xFEBBA70:  4800F3E1  bl 0xFECAE50
+	   0: MOVL       	$0xFEBBA74, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFECAE50  ($4)
+
+
+
+. 2626 FEBBA70 4
+. 48 00 F3 E1
+==== BB 2627 _IO_un_link_internal(0xFECAE50) approx BBs exec'd 0 ====
+
+	0xFECAE50:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECAE54:  9421FFC0  stwu r1,-64(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFC0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECAE58:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xFECAE5C:  480DCFF5  bl 0xFFA7E50
+	  12: MOVL       	$0xFECAE60, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2627 FECAE50 16
+. 7C 88 02 A6 94 21 FF C0 7D 80 00 26 48 0D CF F5
+==== BB 2628 (0xFECAE60) approx BBs exec'd 0 ====
+
+	0xFECAE60:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECAE64:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECAE68:  90810044  stw r4,68(r1)
+	   8: GETL       	R4, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x44, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECAE6C:  80030000  lwz r0,0(r3)
+	  13: GETL       	R3, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xFECAE70:  93E1003C  stw r31,60(r1)
+	  17: GETL       	R31, t14
+	  18: GETL       	R1, t16
+	  19: ADDL       	$0x3C, t16
+	  20: STL       	t14, (t16)
+	  21: INCEIPL       	$4
+
+	0xFECAE74:  7C7F1B78  or r31,r3,r3
+	  22: GETL       	R3, t18
+	  23: PUTL       	t18, R31
+	  24: INCEIPL       	$4
+
+	0xFECAE78:  70090080  andi. r9,r0,0x80
+	  25: GETL       	R0, t20
+	  26: ANDL       	$0x80, t20
+	  27: PUTL       	t20, R9
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFECAE7C:  93410028  stw r26,40(r1)
+	  31: GETL       	R26, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x28, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFECAE80:  9361002C  stw r27,44(r1)
+	  36: GETL       	R27, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x2C, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFECAE84:  93810030  stw r28,48(r1)
+	  41: GETL       	R28, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x30, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFECAE88:  93A10034  stw r29,52(r1)
+	  46: GETL       	R29, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x34, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0xFECAE8C:  91810024  stw r12,36(r1)
+	  51: GETL       	R12, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x24, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFECAE90:  4182016C  bc 12,2,0xFECAFFC
+	  56: Js02o       	$0xFECAFFC
+
+
+
+. 2628 FECAE60 52
+. 93 C1 00 38 7F C8 02 A6 90 81 00 44 80 03 00 00 93 E1 00 3C 7C 7F 1B 78 70 09 00 80 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 91 81 00 24 41 82 01 6C
+==== BB 2629 (0xFECAE94) approx BBs exec'd 0 ====
+
+	0xFECAE94:  835E1B48  lwz r26,6984(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1B48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFECAE98:  813A00B8  lwz r9,184(r26)
+	   5: GETL       	R26, t4
+	   6: ADDL       	$0xB8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFECAE9C:  3169FFFF  addic r11,r9,-1
+	  10: GETL       	R9, t8
+	  11: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0xFECAEA0:  7C0B4910  subfe r0,r11,r9
+	  14: GETL       	R11, t10
+	  15: GETL       	R9, t12
+	  16: SBBL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R0
+	  18: INCEIPL       	$4
+
+	0xFECAEA4:  2E000000  cmpi cr4,r0,0
+	  19: GETL       	R0, t14
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x4, CR
+	  22: INCEIPL       	$4
+
+	0xFECAEA8:  409201C8  bc 4,18,0xFECB070
+	  23: Jc18o       	$0xFECB070
+
+
+
+. 2629 FECAE94 24
+. 83 5E 1B 48 81 3A 00 B8 31 69 FF FF 7C 0B 49 10 2E 00 00 00 40 92 01 C8
+==== BB 2630 (0xFECAEAC) approx BBs exec'd 0 ====
+
+	0xFECAEAC:  80DE05E4  lwz r6,1508(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5E4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFECAEB0:  90010014  stw r0,20(r1)
+	   5: GETL       	R0, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECAEB4:  90C10010  stw r6,16(r1)
+	  10: GETL       	R6, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x10, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFECAEB8:  83BE05E0  lwz r29,1504(r30)
+	  15: GETL       	R30, t12
+	  16: ADDL       	$0x5E0, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R29
+	  19: INCEIPL       	$4
+
+	0xFECAEBC:  3B828BA0  addi r28,r2,-29792
+	  20: GETL       	R2, t16
+	  21: ADDL       	$0xFFFF8BA0, t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0xFECAEC0:  80FD0008  lwz r7,8(r29)
+	  24: GETL       	R29, t18
+	  25: ADDL       	$0x8, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R7
+	  28: INCEIPL       	$4
+
+	0xFECAEC4:  7F87E000  cmp cr7,r7,r28
+	  29: GETL       	R7, t22
+	  30: GETL       	R28, t24
+	  31: CMPL       	t22, t24, t26  (-rSo)
+	  32: ICRFL       	t26, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0xFECAEC8:  419E0030  bc 12,30,0xFECAEF8
+	  34: Js30o       	$0xFECAEF8
+
+
+
+. 2630 FECAEAC 32
+. 80 DE 05 E4 90 01 00 14 90 C1 00 10 83 BE 05 E0 3B 82 8B A0 80 FD 00 08 7F 87 E0 00 41 9E 00 30
+==== BB 2631 (0xFECAECC) approx BBs exec'd 0 ====
+
+	0xFECAECC:  39400000  li r10,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFECAED0:  3B600001  li r27,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R27
+	   5: INCEIPL       	$4
+
+	0xFECAED4:  7D00E828  lwarx r8,r0,r29
+	   6: GETL       	R29, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0xFECAED8:  7C085000  cmp cr0,r8,r10
+	  11: GETL       	R8, t8
+	  12: GETL       	R10, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECAEDC:  4082000C  bc 4,2,0xFECAEE8
+	  16: Jc02o       	$0xFECAEE8
+
+
+
+. 2631 FECAECC 20
+. 39 40 00 00 3B 60 00 01 7D 00 E8 28 7C 08 50 00 40 82 00 0C
+==== BB 2632 (0xFECAEE0) approx BBs exec'd 0 ====
+
+	0xFECAEE0:  7F60E92D  stwcx. r27,r0,r29
+	   0: GETL       	R29, t0
+	   1: GETL       	R27, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECAEE4:  40A2FFF0  bc 5,2,0xFECAED4
+	   6: Jc02o       	$0xFECAED4
+
+
+
+. 2632 FECAEE0 8
+. 7F 60 E9 2D 40 A2 FF F0
+==== BB 2633 (0xFECAED4) approx BBs exec'd 0 ====
+
+	0xFECAED4:  7D00E828  lwarx r8,r0,r29
+	   0: GETL       	R29, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFECAED8:  7C085000  cmp cr0,r8,r10
+	   5: GETL       	R8, t4
+	   6: GETL       	R10, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECAEDC:  4082000C  bc 4,2,0xFECAEE8
+	  10: Jc02o       	$0xFECAEE8
+
+
+
+. 2633 FECAED4 12
+. 7D 00 E8 28 7C 08 50 00 40 82 00 0C
+==== BB 2634 (0xFECAEE8) approx BBs exec'd 0 ====
+
+	0xFECAEE8:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFECAEEC:  2C880000  cmpi cr1,r8,0
+	   1: GETL       	R8, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECAEF0:  408601EC  bc 4,6,0xFECB0DC
+	   5: Jc06o       	$0xFECB0DC
+
+
+
+. 2634 FECAEE8 12
+. 4C 00 01 2C 2C 88 00 00 40 86 01 EC
+==== BB 2635 (0xFECAEF4) approx BBs exec'd 0 ====
+
+	0xFECAEF4:  939D0008  stw r28,8(r29)
+	   0: GETL       	R28, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECAEF8:  819D0004  lwz r12,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0xFECAEFC:  837E05E8  lwz r27,1512(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x5E8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0xFECAF00:  380C0001  addi r0,r12,1
+	  15: GETL       	R12, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R0
+	  18: INCEIPL       	$4
+
+	0xFECAF04:  901D0004  stw r0,4(r29)
+	  19: GETL       	R0, t14
+	  20: GETL       	R29, t16
+	  21: ADDL       	$0x4, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFECAF08:  839F0000  lwz r28,0(r31)
+	  24: GETL       	R31, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0xFECAF0C:  93FB0000  stw r31,0(r27)
+	  28: GETL       	R31, t22
+	  29: GETL       	R27, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0xFECAF10:  73898000  andi. r9,r28,0x8000
+	  32: GETL       	R28, t26
+	  33: ANDL       	$0x8000, t26
+	  34: PUTL       	t26, R9
+	  35: CMP0L       	t26, t28  (-rSo)
+	  36: ICRFL       	t28, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0xFECAF14:  40820054  bc 4,2,0xFECAF68
+	  38: Jc02o       	$0xFECAF68
+
+
+
+. 2635 FECAEF4 36
+. 93 9D 00 08 81 9D 00 04 83 7E 05 E8 38 0C 00 01 90 1D 00 04 83 9F 00 00 93 FB 00 00 73 89 80 00 40 82 00 54
+==== BB 2636 (0xFECAF68) approx BBs exec'd 0 ====
+
+	0xFECAF68:  815E1BEC  lwz r10,7148(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BEC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECAF6C:  812A0000  lwz r9,0(r10)
+	   5: GETL       	R10, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFECAF70:  2F890000  cmpi cr7,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFECAF74:  419E0020  bc 12,30,0xFECAF94
+	  13: Js30o       	$0xFECAF94
+
+
+
+. 2636 FECAF68 16
+. 81 5E 1B EC 81 2A 00 00 2F 89 00 00 41 9E 00 20
+==== BB 2637 (0xFECAF78) approx BBs exec'd 0 ====
+
+	0xFECAF78:  7C89F800  cmp cr1,r9,r31
+	   0: GETL       	R9, t0
+	   1: GETL       	R31, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECAF7C:  41860144  bc 12,6,0xFECB0C0
+	   5: Js06o       	$0xFECB0C0
+
+
+
+. 2637 FECAF78 8
+. 7C 89 F8 00 41 86 01 44
+==== BB 2638 (0xFECB0C0) approx BBs exec'd 0 ====
+
+	0xFECB0C0:  817E05EC  lwz r11,1516(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5EC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFECB0C4:  80BF0034  lwz r5,52(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFECB0C8:  806B0000  lwz r3,0(r11)
+	  10: GETL       	R11, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0xFECB0CC:  90AA0000  stw r5,0(r10)
+	  14: GETL       	R5, t12
+	  15: GETL       	R10, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0xFECB0D0:  39230001  addi r9,r3,1
+	  18: GETL       	R3, t16
+	  19: ADDL       	$0x1, t16
+	  20: PUTL       	t16, R9
+	  21: INCEIPL       	$4
+
+	0xFECB0D4:  912B0000  stw r9,0(r11)
+	  22: GETL       	R9, t18
+	  23: GETL       	R11, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0xFECB0D8:  4BFFFEBC  b 0xFECAF94
+	  26: JMPo       	$0xFECAF94  ($4)
+
+
+
+. 2638 FECB0C0 28
+. 81 7E 05 EC 80 BF 00 34 80 6B 00 00 90 AA 00 00 39 23 00 01 91 2B 00 00 4B FF FE BC
+==== BB 2639 (0xFECAF94) approx BBs exec'd 0 ====
+
+	0xFECAF94:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFECAF98:  5548066E  rlwinm r8,r10,0,25,23
+	   4: GETL       	R10, t4
+	   5: ANDL       	$0xFFFFFF7F, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0xFECAF9C:  71098000  andi. r9,r8,0x8000
+	   8: GETL       	R8, t6
+	   9: ANDL       	$0x8000, t6
+	  10: PUTL       	t6, R9
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0xFECAFA0:  911F0000  stw r8,0(r31)
+	  14: GETL       	R8, t10
+	  15: GETL       	R31, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECAFA4:  4082001C  bc 4,2,0xFECAFC0
+	  18: Jc02o       	$0xFECAFC0
+
+
+
+. 2639 FECAF94 20
+. 81 5F 00 00 55 48 06 6E 71 09 80 00 91 1F 00 00 40 82 00 1C
+==== BB 2640 (0xFECAFC0) approx BBs exec'd 0 ====
+
+	0xFECAFC0:  819D0004  lwz r12,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFECAFC4:  38000000  li r0,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFECAFC8:  901B0000  stw r0,0(r27)
+	   8: GETL       	R0, t6
+	   9: GETL       	R27, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFECAFCC:  392CFFFF  addi r9,r12,-1
+	  12: GETL       	R12, t10
+	  13: ADDL       	$0xFFFFFFFF, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0xFECAFD0:  2C890000  cmpi cr1,r9,0
+	  16: GETL       	R9, t12
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0xFECAFD4:  913D0004  stw r9,4(r29)
+	  20: GETL       	R9, t16
+	  21: GETL       	R29, t18
+	  22: ADDL       	$0x4, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFECAFD8:  40860020  bc 4,6,0xFECAFF8
+	  25: Jc06o       	$0xFECAFF8
+
+
+
+. 2640 FECAFC0 28
+. 81 9D 00 04 38 00 00 00 90 1B 00 00 39 2C FF FF 2C 89 00 00 91 3D 00 04 40 86 00 20
+==== BB 2641 (0xFECAFDC) approx BBs exec'd 0 ====
+
+	0xFECAFDC:  913D0008  stw r9,8(r29)
+	   0: GETL       	R9, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECAFE0:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0xFECAFE4:  7F60E828  lwarx r27,r0,r29
+	   6: GETL       	R29, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R27
+	  10: INCEIPL       	$4
+
+	0xFECAFE8:  7D20E92D  stwcx. r9,r0,r29
+	  11: GETL       	R29, t8
+	  12: GETL       	R9, t10
+	  13: LOCKo       	
+	  14: STL       	t10, (t8)  (-rSo)
+	  15: ICRFL       	cr, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFECAFEC:  40A2FFF8  bc 5,2,0xFECAFE4
+	  17: Jc02o       	$0xFECAFE4
+
+
+
+. 2641 FECAFDC 20
+. 91 3D 00 08 7C 00 04 AC 7F 60 E8 28 7D 20 E9 2D 40 A2 FF F8
+==== BB 2642 (0xFECAFF0) approx BBs exec'd 0 ====
+
+	0xFECAFF0:  2C1B0001  cmpi cr0,r27,1
+	   0: GETL       	R27, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFECAFF4:  418100F4  bc 12,1,0xFECB0E8
+	   5: Js01o       	$0xFECB0E8
+
+
+
+. 2642 FECAFF0 8
+. 2C 1B 00 01 41 81 00 F4
+==== BB 2643 (0xFECAFF8) approx BBs exec'd 0 ====
+
+	0xFECAFF8:  40920034  bc 4,18,0xFECB02C
+	   0: Jc18o       	$0xFECB02C
+
+
+
+. 2643 FECAFF8 4
+. 40 92 00 34
+==== BB 2644 (0xFECAFFC) approx BBs exec'd 0 ====
+
+	0xFECAFFC:  83A10044  lwz r29,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFECB000:  80810024  lwz r4,36(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFECB004:  7FA803A6  mtlr r29
+	  10: GETL       	R29, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFECB008:  83410028  lwz r26,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0xFECB00C:  8361002C  lwz r27,44(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x2C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0xFECB010:  7C808120  mtcrf 0x8,r4
+	  23: GETL       	R4, t18
+	  24: ICRFL       	t18, $0x4, CR
+	  25: INCEIPL       	$4
+
+	0xFECB014:  83810030  lwz r28,48(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x30, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFECB018:  83A10034  lwz r29,52(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x34, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0xFECB01C:  83C10038  lwz r30,56(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x38, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R30
+	  40: INCEIPL       	$4
+
+	0xFECB020:  83E1003C  lwz r31,60(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x3C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R31
+	  45: INCEIPL       	$4
+
+	0xFECB024:  38210040  addi r1,r1,64
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x40, t36
+	  48: PUTL       	t36, R1
+	  49: INCEIPL       	$4
+
+	0xFECB028:  4E800020  blr
+	  50: GETL       	LR, t38
+	  51: JMPo-r       	t38  ($4)
+
+
+
+. 2644 FECAFFC 48
+. 83 A1 00 44 80 81 00 24 7F A8 03 A6 83 41 00 28 83 61 00 2C 7C 80 81 20 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 2645 (0xFEBBA74) approx BBs exec'd 0 ====
+
+	0xFEBBA74:  801D0000  lwz r0,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEBBA78:  4BFFFE84  b 0xFEBB8FC
+	   4: JMPo       	$0xFEBB8FC  ($4)
+
+
+
+. 2645 FEBBA74 8
+. 80 1D 00 00 4B FF FE 84
+==== BB 2646 (0xFEBB8FC) approx BBs exec'd 0 ====
+
+	0xFEBB8FC:  70098000  andi. r9,r0,0x8000
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x8000, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEBB900:  40820054  bc 4,2,0xFEBB954
+	   6: Jc02o       	$0xFEBB954
+
+
+
+. 2646 FEBB8FC 8
+. 70 09 80 00 40 82 00 54
+==== BB 2647 (0xFEBB954) approx BBs exec'd 0 ====
+
+	0xFEBB954:  540ADFFE  rlwinm r10,r0,27,31,31
+	   0: GETL       	R0, t0
+	   1: ROLL       	$0x1B, t0
+	   2: ANDL       	$0x1, t0
+	   3: PUTL       	t0, R10
+	   4: INCEIPL       	$4
+
+	0xFEBB958:  70092000  andi. r9,r0,0x2000
+	   5: GETL       	R0, t2
+	   6: ANDL       	$0x2000, t2
+	   7: PUTL       	t2, R9
+	   8: CMP0L       	t2, t4  (-rSo)
+	   9: ICRFL       	t4, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFEBB95C:  2F8A0000  cmpi cr7,r10,0
+	  11: GETL       	R10, t6
+	  12: CMP0L       	t6, t8  (-rSo)
+	  13: ICRFL       	t8, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEBB960:  7C600026  mfcr r3
+	  15: GETL       	CR, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFEBB964:  5463FFFE  rlwinm r3,r3,31,31,31
+	  18: GETL       	R3, t12
+	  19: ROLL       	$0x1F, t12
+	  20: ANDL       	$0x1, t12
+	  21: PUTL       	t12, R3
+	  22: INCEIPL       	$4
+
+	0xFEBB968:  3B43FFFF  addi r26,r3,-1
+	  23: GETL       	R3, t14
+	  24: ADDL       	$0xFFFFFFFF, t14
+	  25: PUTL       	t14, R26
+	  26: INCEIPL       	$4
+
+	0xFEBB96C:  4082013C  bc 4,2,0xFEBBAA8
+	  27: Jc02o       	$0xFEBBAA8
+
+
+
+. 2647 FEBB954 28
+. 54 0A DF FE 70 09 20 00 2F 8A 00 00 7C 60 00 26 54 63 FF FE 3B 43 FF FF 40 82 01 3C
+==== BB 2648 (0xFEBBAA8) approx BBs exec'd 0 ====
+
+	0xFEBBAA8:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEBBAAC:  4800C7E5  bl 0xFEC8290
+	   3: MOVL       	$0xFEBBAB0, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFEC8290  ($4)
+
+
+
+. 2648 FEBBAA8 8
+. 7F A3 EB 78 48 00 C7 E5
+==== BB 2649 _IO_file_close_it@@GLIBC_2.1(0xFEC8290) approx BBs exec'd 0 ====
+
+	0xFEC8290:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEC8294:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEC8298:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEC829C:  3920FFFF  li r9,-1
+	  14: MOVL       	$0xFFFFFFFF, t10
+	  15: PUTL       	t10, R9
+	  16: INCEIPL       	$4
+
+	0xFEC82A0:  93810010  stw r28,16(r1)
+	  17: GETL       	R28, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFEC82A4:  7C7F1B78  or r31,r3,r3
+	  22: GETL       	R3, t16
+	  23: PUTL       	t16, R31
+	  24: INCEIPL       	$4
+
+	0xFEC82A8:  90810024  stw r4,36(r1)
+	  25: GETL       	R4, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x24, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFEC82AC:  80030038  lwz r0,56(r3)
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x38, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFEC82B0:  93A10014  stw r29,20(r1)
+	  35: GETL       	R29, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x14, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFEC82B4:  2F80FFFF  cmpi cr7,r0,-1
+	  40: GETL       	R0, t30
+	  41: MOVL       	$0xFFFFFFFF, t34
+	  42: CMPL       	t30, t34, t32  (-rSo)
+	  43: ICRFL       	t32, $0x7, CR
+	  44: INCEIPL       	$4
+
+	0xFEC82B8:  93C10018  stw r30,24(r1)
+	  45: GETL       	R30, t36
+	  46: GETL       	R1, t38
+	  47: ADDL       	$0x18, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0xFEC82BC:  419E00D0  bc 12,30,0xFEC838C
+	  50: Js30o       	$0xFEC838C
+
+
+
+. 2649 FEC8290 48
+. 7C 88 02 A6 94 21 FF E0 93 E1 00 1C 39 20 FF FF 93 81 00 10 7C 7F 1B 78 90 81 00 24 80 03 00 38 93 A1 00 14 2F 80 FF FF 93 C1 00 18 41 9E 00 D0
+==== BB 2650 (0xFEC82C0) approx BBs exec'd 0 ====
+
+	0xFEC82C0:  80030000  lwz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEC82C4:  70090008  andi. r9,r0,0x8
+	   4: GETL       	R0, t4
+	   5: ANDL       	$0x8, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEC82C8:  418200E8  bc 12,2,0xFEC83B0
+	  10: Js02o       	$0xFEC83B0
+
+
+
+. 2650 FEC82C0 12
+. 80 03 00 00 70 09 00 08 41 82 00 E8
+==== BB 2651 (0xFEC82CC) approx BBs exec'd 0 ====
+
+	0xFEC82CC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC82D0:  3B800000  li r28,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R28
+	   5: INCEIPL       	$4
+
+	0xFEC82D4:  48004B15  bl 0xFECCDE8
+	   6: MOVL       	$0xFEC82D8, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFECCDE8  ($4)
+
+
+
+. 2651 FEC82CC 12
+. 7F E3 FB 78 3B 80 00 00 48 00 4B 15
+==== BB 2652 _IO_unsave_markers_internal(0xFECCDE8) approx BBs exec'd 0 ====
+
+	0xFECCDE8:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECCDEC:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECCDF0:  7C691B78  or r9,r3,r3
+	   9: GETL       	R3, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0xFECCDF4:  90810014  stw r4,20(r1)
+	  12: GETL       	R4, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFECCDF8:  80030030  lwz r0,48(r3)
+	  17: GETL       	R3, t12
+	  18: ADDL       	$0x30, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R0
+	  21: INCEIPL       	$4
+
+	0xFECCDFC:  2F800000  cmpi cr7,r0,0
+	  22: GETL       	R0, t16
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x7, CR
+	  25: INCEIPL       	$4
+
+	0xFECCE00:  419E000C  bc 12,30,0xFECCE0C
+	  26: Js30o       	$0xFECCE0C
+
+
+
+. 2652 FECCDE8 28
+. 7C 88 02 A6 94 21 FF F0 7C 69 1B 78 90 81 00 14 80 03 00 30 2F 80 00 00 41 9E 00 0C
+==== BB 2653 (0xFECCE0C) approx BBs exec'd 0 ====
+
+	0xFECCE0C:  80C90024  lwz r6,36(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFECCE10:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECCE14:  40820014  bc 4,2,0xFECCE28
+	   9: Jc02o       	$0xFECCE28
+
+
+
+. 2653 FECCE0C 12
+. 80 C9 00 24 2C 06 00 00 40 82 00 14
+==== BB 2654 (0xFECCE18) approx BBs exec'd 0 ====
+
+	0xFECCE18:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECCE1C:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFECCE20:  7C6803A6  mtlr r3
+	   9: GETL       	R3, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFECCE24:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2654 FECCE18 16
+. 80 61 00 14 38 21 00 10 7C 68 03 A6 4E 80 00 20
+==== BB 2655 (0xFEC82D8) approx BBs exec'd 0 ====
+
+	0xFEC82D8:  881F0046  lbz r0,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEC82DC:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEC82E0:  7C1D0774  extsb r29,r0
+	   8: GETB       	R0, t6
+	   9: WIDENL_Bs       	_st6
+	  10: PUTL       	t6, R29
+	  11: INCEIPL       	$4
+
+	0xFEC82E4:  7D9DFA14  add r12,r29,r31
+	  12: GETL       	R29, t8
+	  13: GETL       	R31, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R12
+	  16: INCEIPL       	$4
+
+	0xFEC82E8:  814C0098  lwz r10,152(r12)
+	  17: GETL       	R12, t12
+	  18: ADDL       	$0x98, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R10
+	  21: INCEIPL       	$4
+
+	0xFEC82EC:  816A0044  lwz r11,68(r10)
+	  22: GETL       	R10, t16
+	  23: ADDL       	$0x44, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R11
+	  26: INCEIPL       	$4
+
+	0xFEC82F0:  7D6903A6  mtctr r11
+	  27: GETL       	R11, t20
+	  28: PUTL       	t20, CTR
+	  29: INCEIPL       	$4
+
+	0xFEC82F4:  4E800421  bctrl
+	  30: MOVL       	$0xFEC82F8, t22
+	  31: PUTL       	t22, LR
+	  32: GETL       	CTR, t24
+	  33: JMPo-c       	t24  ($4)
+
+
+
+. 2655 FEC82D8 32
+. 88 1F 00 46 7F E3 FB 78 7C 1D 07 74 7D 9D FA 14 81 4C 00 98 81 6A 00 44 7D 69 03 A6 4E 80 04 21
+==== BB 2656 _IO_file_close_internal(0xFECA560) approx BBs exec'd 0 ====
+
+	0xFECA560:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFECA564:  80630038  lwz r3,56(r3)
+	   6: GETL       	R3, t4
+	   7: ADDL       	$0x38, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFECA568:  38210010  addi r1,r1,16
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x10, t8
+	  13: PUTL       	t8, R1
+	  14: INCEIPL       	$4
+
+	0xFECA56C:  48057980  b 0xFF21EEC
+	  15: JMPo       	$0xFF21EEC  ($4)
+
+
+
+. 2656 FECA560 16
+. 94 21 FF F0 80 63 00 38 38 21 00 10 48 05 79 80
+==== BB 2657 __close_nocancel(0xFF21EEC) approx BBs exec'd 0 ====
+
+	0xFF21EEC:  38000006  li r0,6
+	   0: MOVL       	$0x6, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF21EF0:  44000002  sc
+	   3: JMPo-sys       	$0xFF21EF4  ($4)
+
+
+
+. 2657 FF21EEC 8
+. 38 00 00 06 44 00 00 02
+==== BB 2658 (0xFF21EF4) approx BBs exec'd 0 ====
+
+	0xFF21EF4:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 2658 FF21EF4 4
+. 4C A3 00 20
+==== BB 2659 (0xFEC82F8) approx BBs exec'd 0 ====
+
+	0xFEC82F8:  7C7D1B78  or r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFEC82FC:  807F0060  lwz r3,96(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x60, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEC8300:  2F030000  cmpi cr6,r3,0
+	   8: GETL       	R3, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEC8304:  40990114  bc 4,25,0xFEC8418
+	  12: Jc25o       	$0xFEC8418
+
+
+
+. 2659 FEC82F8 16
+. 7C 7D 1B 78 80 7F 00 60 2F 03 00 00 40 99 01 14
+==== BB 2660 (0xFEC8418) approx BBs exec'd 0 ====
+
+	0xFEC8418:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEC841C:  7FE3FB78  or r3,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFEC8420:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFEC8424:  38C00000  li r6,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0xFEC8428:  48003755  bl 0xFECBB7C
+	  12: MOVL       	$0xFEC842C, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0xFECBB7C  ($4)
+
+
+
+. 2660 FEC8418 20
+. 38 80 00 00 7F E3 FB 78 38 A0 00 00 38 C0 00 00 48 00 37 55
+==== BB 2661 (0xFECBBB8) approx BBs exec'd 0 ====
+
+	0xFECBBB8:  80030000  lwz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFECBBBC:  7D234B78  or r3,r9,r9
+	   4: GETL       	R9, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFECBBC0:  700B0001  andi. r11,r0,0x1
+	   7: GETL       	R0, t6
+	   8: ANDL       	$0x1, t6
+	   9: PUTL       	t6, R11
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFECBBC4:  40A2001C  bc 5,2,0xFECBBE0
+	  13: Jc02o       	$0xFECBBE0
+
+
+
+. 2661 FECBBB8 16
+. 80 03 00 00 7D 23 4B 78 70 0B 00 01 40 A2 00 1C
+==== BB 2662 (0xFECBBC8) approx BBs exec'd 0 ====
+
+	0xFECBBC8:  80DF0020  lwz r6,32(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFECBBCC:  7CA93050  subf r5,r9,r6
+	   5: GETL       	R9, t4
+	   6: GETL       	R6, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFECBBD0:  38850FFF  addi r4,r5,4095
+	  10: GETL       	R5, t8
+	  11: ADDL       	$0xFFF, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFECBBD4:  54840026  rlwinm r4,r4,0,0,19
+	  14: GETL       	R4, t10
+	  15: ANDL       	$0xFFFFF000, t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0xFECBBD8:  480626A5  bl 0xFF2E27C
+	  18: MOVL       	$0xFECBBDC, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0xFF2E27C  ($4)
+
+
+
+. 2662 FECBBC8 20
+. 80 DF 00 20 7C A9 30 50 38 85 0F FF 54 84 00 26 48 06 26 A5
+==== BB 2663 munmap(0xFF2E27C) approx BBs exec'd 0 ====
+
+	0xFF2E27C:  3800005B  li r0,91
+	   0: MOVL       	$0x5B, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF2E280:  44000002  sc
+	   3: JMPo-sys       	$0xFF2E284  ($4)
+
+
+
+. 2663 FF2E27C 8
+. 38 00 00 5B 44 00 00 02
+==== BB 2664 (0xFF2E284) approx BBs exec'd 0 ====
+
+	0xFF2E284:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 2664 FF2E284 4
+. 4C A3 00 20
+==== BB 2665 (0xFECBC20) approx BBs exec'd 0 ====
+
+	0xFECBC20:  80610024  lwz r3,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECBC24:  901F0000  stw r0,0(r31)
+	   5: GETL       	R0, t4
+	   6: GETL       	R31, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFECBC28:  8361000C  lwz r27,12(r1)
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R27
+	  13: INCEIPL       	$4
+
+	0xFECBC2C:  7C6803A6  mtlr r3
+	  14: GETL       	R3, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFECBC30:  83810010  lwz r28,16(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x10, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R28
+	  21: INCEIPL       	$4
+
+	0xFECBC34:  83A10014  lwz r29,20(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R29
+	  26: INCEIPL       	$4
+
+	0xFECBC38:  83C10018  lwz r30,24(r1)
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x18, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R30
+	  31: INCEIPL       	$4
+
+	0xFECBC3C:  83E1001C  lwz r31,28(r1)
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x1C, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R31
+	  36: INCEIPL       	$4
+
+	0xFECBC40:  38210020  addi r1,r1,32
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x20, t30
+	  39: PUTL       	t30, R1
+	  40: INCEIPL       	$4
+
+	0xFECBC44:  4E800020  blr
+	  41: GETL       	LR, t32
+	  42: JMPo-r       	t32  ($4)
+
+
+
+. 2665 FECBC20 40
+. 80 61 00 24 90 1F 00 00 83 61 00 0C 7C 68 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2666 (0xFEC842C) approx BBs exec'd 0 ====
+
+	0xFEC842C:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEC8430:  909F0018  stw r4,24(r31)
+	   3: GETL       	R4, t2
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x18, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFEC8434:  909F000C  stw r4,12(r31)
+	   8: GETL       	R4, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEC8438:  909F0004  stw r4,4(r31)
+	  13: GETL       	R4, t10
+	  14: GETL       	R31, t12
+	  15: ADDL       	$0x4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEC843C:  909F0008  stw r4,8(r31)
+	  18: GETL       	R4, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x8, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFEC8440:  909F0014  stw r4,20(r31)
+	  23: GETL       	R4, t18
+	  24: GETL       	R31, t20
+	  25: ADDL       	$0x14, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0xFEC8444:  909F0010  stw r4,16(r31)
+	  28: GETL       	R4, t22
+	  29: GETL       	R31, t24
+	  30: ADDL       	$0x10, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0xFEC8448:  4BFFFF04  b 0xFEC834C
+	  33: JMPo       	$0xFEC834C  ($4)
+
+
+
+. 2666 FEC842C 32
+. 38 80 00 00 90 9F 00 18 90 9F 00 0C 90 9F 00 04 90 9F 00 08 90 9F 00 14 90 9F 00 10 4B FF FF 04
+==== BB 2667 (0xFEC834C) approx BBs exec'd 0 ====
+
+	0xFEC834C:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC8350:  48002B01  bl 0xFECAE50
+	   3: MOVL       	$0xFEC8354, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECAE50  ($4)
+
+
+
+. 2667 FEC834C 8
+. 7F E3 FB 78 48 00 2B 01
+==== BB 2668 (0xFEC8354) approx BBs exec'd 0 ====
+
+	0xFEC8354:  2C1D0000  cmpi cr0,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEC8358:  7FA3EB78  or r3,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFEC835C:  3FA0FBAD  lis r29,-1107
+	   7: MOVL       	$0xFBAD0000, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFEC8360:  63AB240C  ori r11,r29,0x240C
+	  10: MOVL       	$0xFBAD240C, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0xFEC8364:  3980FFFF  li r12,-1
+	  13: MOVL       	$0xFFFFFFFF, t10
+	  14: PUTL       	t10, R12
+	  15: INCEIPL       	$4
+
+	0xFEC8368:  3940FFFF  li r10,-1
+	  16: MOVL       	$0xFFFFFFFF, t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0xFEC836C:  3920FFFF  li r9,-1
+	  19: MOVL       	$0xFFFFFFFF, t14
+	  20: PUTL       	t14, R9
+	  21: INCEIPL       	$4
+
+	0xFEC8370:  919F0050  stw r12,80(r31)
+	  22: GETL       	R12, t16
+	  23: GETL       	R31, t18
+	  24: ADDL       	$0x50, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFEC8374:  915F0054  stw r10,84(r31)
+	  27: GETL       	R10, t20
+	  28: GETL       	R31, t22
+	  29: ADDL       	$0x54, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFEC8378:  917F0000  stw r11,0(r31)
+	  32: GETL       	R11, t24
+	  33: GETL       	R31, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFEC837C:  913F0038  stw r9,56(r31)
+	  36: GETL       	R9, t28
+	  37: GETL       	R31, t30
+	  38: ADDL       	$0x38, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFEC8380:  40820008  bc 4,2,0xFEC8388
+	  41: Jc02o       	$0xFEC8388
+
+
+
+. 2668 FEC8354 48
+. 2C 1D 00 00 7F A3 EB 78 3F A0 FB AD 63 AB 24 0C 39 80 FF FF 39 40 FF FF 39 20 FF FF 91 9F 00 50 91 5F 00 54 91 7F 00 00 91 3F 00 38 40 82 00 08
+==== BB 2669 (0xFEC8384) approx BBs exec'd 0 ====
+
+	0xFEC8384:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC8388:  7C691B78  or r9,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0xFEC838C:  83810024  lwz r28,36(r1)
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x24, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0xFEC8390:  7D234B78  or r3,r9,r9
+	  11: GETL       	R9, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFEC8394:  83A10014  lwz r29,20(r1)
+	  14: GETL       	R1, t10
+	  15: ADDL       	$0x14, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0xFEC8398:  7F8803A6  mtlr r28
+	  19: GETL       	R28, t14
+	  20: PUTL       	t14, LR
+	  21: INCEIPL       	$4
+
+	0xFEC839C:  83C10018  lwz r30,24(r1)
+	  22: GETL       	R1, t16
+	  23: ADDL       	$0x18, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R30
+	  26: INCEIPL       	$4
+
+	0xFEC83A0:  83810010  lwz r28,16(r1)
+	  27: GETL       	R1, t20
+	  28: ADDL       	$0x10, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R28
+	  31: INCEIPL       	$4
+
+	0xFEC83A4:  83E1001C  lwz r31,28(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x1C, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R31
+	  36: INCEIPL       	$4
+
+	0xFEC83A8:  38210020  addi r1,r1,32
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x20, t28
+	  39: PUTL       	t28, R1
+	  40: INCEIPL       	$4
+
+	0xFEC83AC:  4E800020  blr
+	  41: GETL       	LR, t30
+	  42: JMPo-r       	t30  ($4)
+
+
+
+. 2669 FEC8384 44
+. 7F 83 E3 78 7C 69 1B 78 83 81 00 24 7D 23 4B 78 83 A1 00 14 7F 88 03 A6 83 C1 00 18 83 81 00 10 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2670 (0xFEBBAB0) approx BBs exec'd 0 ====
+
+	0xFEBBAB0:  7C7A1B78  or r26,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R26
+	   2: INCEIPL       	$4
+
+	0xFEBBAB4:  4BFFFEBC  b 0xFEBB970
+	   3: JMPo       	$0xFEBB970  ($4)
+
+
+
+. 2670 FEBBAB0 8
+. 7C 7A 1B 78 4B FF FE BC
+==== BB 2671 (0xFEBB970) approx BBs exec'd 0 ====
+
+	0xFEBB970:  8BFD0046  lbz r31,70(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFEBB974:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEBB978:  38800000  li r4,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFEBB97C:  7FFC0774  extsb r28,r31
+	  11: GETB       	R31, t8
+	  12: WIDENL_Bs       	_st8
+	  13: PUTL       	t8, R28
+	  14: INCEIPL       	$4
+
+	0xFEBB980:  7F7CEA14  add r27,r28,r29
+	  15: GETL       	R28, t10
+	  16: GETL       	R29, t12
+	  17: ADDL       	t10, t12
+	  18: PUTL       	t12, R27
+	  19: INCEIPL       	$4
+
+	0xFEBB984:  819B0098  lwz r12,152(r27)
+	  20: GETL       	R27, t14
+	  21: ADDL       	$0x98, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R12
+	  24: INCEIPL       	$4
+
+	0xFEBB988:  816C0008  lwz r11,8(r12)
+	  25: GETL       	R12, t18
+	  26: ADDL       	$0x8, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R11
+	  29: INCEIPL       	$4
+
+	0xFEBB98C:  7D6903A6  mtctr r11
+	  30: GETL       	R11, t22
+	  31: PUTL       	t22, CTR
+	  32: INCEIPL       	$4
+
+	0xFEBB990:  4E800421  bctrl
+	  33: MOVL       	$0xFEBB994, t24
+	  34: PUTL       	t24, LR
+	  35: GETL       	CTR, t26
+	  36: JMPo-c       	t26  ($4)
+
+
+
+. 2671 FEBB970 36
+. 8B FD 00 46 7F A3 EB 78 38 80 00 00 7F FC 07 74 7F 7C EA 14 81 9B 00 98 81 6C 00 08 7D 69 03 A6 4E 80 04 21
+==== BB 2672 _IO_file_finish@@GLIBC_2.1(0xFEC8470) approx BBs exec'd 0 ====
+
+	0xFEC8470:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEC8474:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEC8478:  93E10008  stw r31,8(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEC847C:  7C7F1B78  or r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFEC8480:  90810014  stw r4,20(r1)
+	  17: GETL       	R4, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFEC8484:  80030038  lwz r0,56(r3)
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x38, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R0
+	  26: INCEIPL       	$4
+
+	0xFEC8488:  2F80FFFF  cmpi cr7,r0,-1
+	  27: GETL       	R0, t20
+	  28: MOVL       	$0xFFFFFFFF, t24
+	  29: CMPL       	t20, t24, t22  (-rSo)
+	  30: ICRFL       	t22, $0x7, CR
+	  31: INCEIPL       	$4
+
+	0xFEC848C:  419E0038  bc 12,30,0xFEC84C4
+	  32: Js30o       	$0xFEC84C4
+
+
+
+. 2672 FEC8470 32
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 81 00 14 80 03 00 38 2F 80 FF FF 41 9E 00 38
+==== BB 2673 (0xFEC84C4) approx BBs exec'd 0 ====
+
+	0xFEC84C4:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC84C8:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFEC84CC:  48003DC1  bl 0xFECC28C
+	   6: MOVL       	$0xFEC84D0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFECC28C  ($4)
+
+
+
+. 2673 FEC84C4 12
+. 7F E3 FB 78 38 80 00 00 48 00 3D C1
+==== BB 2674 _IO_default_finish_internal(0xFECC28C) approx BBs exec'd 0 ====
+
+	0xFECC28C:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFECC290:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECC294:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECC298:  7C7F1B78  or r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFECC29C:  93A10014  stw r29,20(r1)
+	  17: GETL       	R29, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECC2A0:  90010024  stw r0,36(r1)
+	  22: GETL       	R0, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x24, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFECC2A4:  8123001C  lwz r9,28(r3)
+	  27: GETL       	R3, t20
+	  28: ADDL       	$0x1C, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R9
+	  31: INCEIPL       	$4
+
+	0xFECC2A8:  93C10018  stw r30,24(r1)
+	  32: GETL       	R30, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x18, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFECC2AC:  2F890000  cmpi cr7,r9,0
+	  37: GETL       	R9, t28
+	  38: CMP0L       	t28, t30  (-rSo)
+	  39: ICRFL       	t30, $0x7, CR
+	  40: INCEIPL       	$4
+
+	0xFECC2B0:  419E0010  bc 12,30,0xFECC2C0
+	  41: Js30o       	$0xFECC2C0
+
+
+
+. 2674 FECC28C 40
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C 7F 1B 78 93 A1 00 14 90 01 00 24 81 23 00 1C 93 C1 00 18 2F 89 00 00 41 9E 00 10
+==== BB 2675 (0xFECC2C0) approx BBs exec'd 0 ====
+
+	0xFECC2C0:  813F0030  lwz r9,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECC2C4:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFECC2C8:  41860018  bc 12,6,0xFECC2E0
+	   9: Js06o       	$0xFECC2E0
+
+
+
+. 2675 FECC2C0 12
+. 81 3F 00 30 2C 89 00 00 41 86 00 18
+==== BB 2676 (0xFECC2E0) approx BBs exec'd 0 ====
+
+	0xFECC2E0:  807F0024  lwz r3,36(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECC2E4:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFECC2E8:  409E0028  bc 4,30,0xFECC310
+	   9: Jc30o       	$0xFECC310
+
+
+
+. 2676 FECC2E0 12
+. 80 7F 00 24 2F 83 00 00 40 9E 00 28
+==== BB 2677 (0xFECC2EC) approx BBs exec'd 0 ====
+
+	0xFECC2EC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFECC2F0:  4BFFEB61  bl 0xFECAE50
+	   3: MOVL       	$0xFECC2F4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECAE50  ($4)
+
+
+
+. 2677 FECC2EC 8
+. 7F E3 FB 78 4B FF EB 61
+==== BB 2678 (0xFECC2F4) approx BBs exec'd 0 ====
+
+	0xFECC2F4:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFECC2F8:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFECC2FC:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0xFECC300:  7D0803A6  mtlr r8
+	  15: GETL       	R8, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFECC304:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0xFECC308:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0xFECC30C:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+
+. 2678 FECC2F4 28
+. 81 01 00 24 83 A1 00 14 83 C1 00 18 7D 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2679 (0xFEC84D0) approx BBs exec'd 0 ====
+
+	0xFEC84D0:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEC84D4:  83E10008  lwz r31,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFEC84D8:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFEC84DC:  7C6803A6  mtlr r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFEC84E0:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 2679 FEC84D0 20
+. 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+==== BB 2680 (0xFEBB994) approx BBs exec'd 0 ====
+
+	0xFEBB994:  809D0000  lwz r4,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFEBB998:  70898000  andi. r9,r4,0x8000
+	   4: GETL       	R4, t4
+	   5: ANDL       	$0x8000, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEBB99C:  4182011C  bc 12,2,0xFEBBAB8
+	  10: Js02o       	$0xFEBBAB8
+
+
+
+. 2680 FEBB994 12
+. 80 9D 00 00 70 89 80 00 41 82 01 1C
+==== BB 2681 (0xFEBBAB8) approx BBs exec'd 0 ====
+
+	0xFEBBAB8:  807D0048  lwz r3,72(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEBBABC:  80A30004  lwz r5,4(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFEBBAC0:  3925FFFF  addi r9,r5,-1
+	  10: GETL       	R5, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFEBBAC4:  2F890000  cmpi cr7,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0xFEBBAC8:  91230004  stw r9,4(r3)
+	  18: GETL       	R9, t14
+	  19: GETL       	R3, t16
+	  20: ADDL       	$0x4, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFEBBACC:  40BEFED4  bc 5,30,0xFEBB9A0
+	  23: Jc30o       	$0xFEBB9A0
+
+
+
+. 2681 FEBBAB8 24
+. 80 7D 00 48 80 A3 00 04 39 25 FF FF 2F 89 00 00 91 23 00 04 40 BE FE D4
+==== BB 2682 (0xFEBB9A0) approx BBs exec'd 0 ====
+
+	0xFEBB9A0:  813D0060  lwz r9,96(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEBB9A4:  2F090000  cmpi cr6,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFEBB9A8:  409901C8  bc 4,25,0xFEBBB70
+	   9: Jc25o       	$0xFEBBB70
+
+
+
+. 2682 FEBB9A0 12
+. 81 3D 00 60 2F 09 00 00 40 99 01 C8
+==== BB 2683 (0xFEBBB70) approx BBs exec'd 0 ====
+
+	0xFEBBB70:  815D0024  lwz r10,36(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEBBB74:  2C8A0000  cmpi cr1,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFEBBB78:  4186FE8C  bc 12,6,0xFEBBA04
+	   9: Js06o       	$0xFEBBA04
+
+
+
+. 2683 FEBBB70 12
+. 81 5D 00 24 2C 8A 00 00 41 86 FE 8C
+==== BB 2684 (0xFEBBA04) approx BBs exec'd 0 ====
+
+	0xFEBBA04:  819E1D98  lwz r12,7576(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D98, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFEBBA08:  816C0000  lwz r11,0(r12)
+	   5: GETL       	R12, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0xFEBBA0C:  7F1D5800  cmp cr6,r29,r11
+	   9: GETL       	R29, t8
+	  10: GETL       	R11, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFEBBA10:  419A0034  bc 12,26,0xFEBBA44
+	  14: Js26o       	$0xFEBBA44
+
+
+
+. 2684 FEBBA04 16
+. 81 9E 1D 98 81 6C 00 00 7F 1D 58 00 41 9A 00 34
+==== BB 2685 (0xFEBBA14) approx BBs exec'd 0 ====
+
+	0xFEBBA14:  839E1C24  lwz r28,7204(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1C24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEBBA18:  837C0000  lwz r27,0(r28)
+	   5: GETL       	R28, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFEBBA1C:  7C1DD800  cmp cr0,r29,r27
+	   9: GETL       	R29, t8
+	  10: GETL       	R27, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0xFEBBA20:  41820024  bc 12,2,0xFEBBA44
+	  14: Js02o       	$0xFEBBA44
+
+
+
+. 2685 FEBBA14 16
+. 83 9E 1C 24 83 7C 00 00 7C 1D D8 00 41 82 00 24
+==== BB 2686 (0xFEBBA24) approx BBs exec'd 0 ====
+
+	0xFEBBA24:  809E1D24  lwz r4,7460(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEBBA28:  83E40000  lwz r31,0(r4)
+	   5: GETL       	R4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R31
+	   8: INCEIPL       	$4
+
+	0xFEBBA2C:  7F9DF800  cmp cr7,r29,r31
+	   9: GETL       	R29, t8
+	  10: GETL       	R31, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFEBBA30:  419E0014  bc 12,30,0xFEBBA44
+	  14: Js30o       	$0xFEBBA44
+
+
+
+. 2686 FEBBA24 16
+. 80 9E 1D 24 83 E4 00 00 7F 9D F8 00 41 9E 00 14
+==== BB 2687 (0xFEBBA34) approx BBs exec'd 0 ====
+
+	0xFEBBA34:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEBBA38:  7FA3EB78  or r3,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFEBBA3C:  901D0000  stw r0,0(r29)
+	   6: GETL       	R0, t4
+	   7: GETL       	R29, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEBBA40:  480EE8F1  bl 0xFFAA330
+	  10: MOVL       	$0xFEBBA44, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0xFFAA330  ($4)
+
+
+
+. 2687 FEBBA34 16
+. 38 00 00 00 7F A3 EB 78 90 1D 00 00 48 0E E8 F1
+==== BB 2688 (0xFECEC18) approx BBs exec'd 0 ====
+
+	0xFECEC18:  71200002  andi. r0,r9,0x2
+	   0: GETL       	R9, t0
+	   1: ANDL       	$0x2, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECEC1C:  408202FC  bc 4,2,0xFECEF18
+	   6: Jc02o       	$0xFECEF18
+
+
+
+. 2688 FECEC18 8
+. 71 20 00 02 40 82 02 FC
+==== BB 2689 (0xFECEC20) approx BBs exec'd 0 ====
+
+	0xFECEC20:  81030030  lwz r8,48(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFECEC24:  7F5FEA14  add r26,r31,r29
+	   5: GETL       	R31, t4
+	   6: GETL       	R29, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0xFECEC28:  837E065C  lwz r27,1628(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x65C, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0xFECEC2C:  7F08F800  cmp cr6,r8,r31
+	  15: GETL       	R8, t12
+	  16: GETL       	R31, t14
+	  17: CMPL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0xFECEC30:  419A0254  bc 12,26,0xFECEE84
+	  20: Js26o       	$0xFECEE84
+
+
+
+. 2689 FECEC20 20
+. 81 03 00 30 7F 5F EA 14 83 7E 06 5C 7F 08 F8 00 41 9A 02 54
+==== BB 2690 (0xFECEC34) approx BBs exec'd 0 ====
+
+	0xFECEC34:  71600002  andi. r0,r11,0x2
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x2, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECEC38:  41820350  bc 12,2,0xFECEF88
+	   6: Js02o       	$0xFECEF88
+
+
+
+. 2690 FECEC34 8
+. 71 60 00 02 41 82 03 50
+==== BB 2691 (0xFECEF88) approx BBs exec'd 0 ====
+
+	0xFECEF88:  82480004  lwz r18,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0xFECEF8C:  837E0674  lwz r27,1652(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x674, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0xFECEF90:  56510038  rlwinm r17,r18,0,0,28
+	  10: GETL       	R18, t8
+	  11: ANDL       	$0xFFFFFFF8, t8
+	  12: PUTL       	t8, R17
+	  13: INCEIPL       	$4
+
+	0xFECEF94:  7D688A14  add r11,r8,r17
+	  14: GETL       	R8, t10
+	  15: GETL       	R17, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R11
+	  18: INCEIPL       	$4
+
+	0xFECEF98:  7F8BD040  cmpl cr7,r11,r26
+	  19: GETL       	R11, t14
+	  20: GETL       	R26, t16
+	  21: CMPUL       	t14, t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0xFECEF9C:  419DFCA0  bc 12,29,0xFECEC3C
+	  24: Js29o       	$0xFECEC3C
+
+
+
+. 2691 FECEF88 24
+. 82 48 00 04 83 7E 06 74 56 51 00 38 7D 68 8A 14 7F 8B D0 40 41 9D FC A0
+==== BB 2692 (0xFECEC3C) approx BBs exec'd 0 ====
+
+	0xFECEC3C:  801A0004  lwz r0,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFECEC40:  837E0660  lwz r27,1632(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x660, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0xFECEC44:  700B0001  andi. r11,r0,0x1
+	  10: GETL       	R0, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R11
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECEC48:  4182023C  bc 12,2,0xFECEE84
+	  16: Js02o       	$0xFECEE84
+
+
+
+. 2692 FECEC3C 16
+. 80 1A 00 04 83 7E 06 60 70 0B 00 01 41 82 02 3C
+==== BB 2693 (0xFECEC4C) approx BBs exec'd 0 ====
+
+	0xFECEC4C:  28800008  cmpli cr1,r0,8
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x8, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECEC50:  541B0038  rlwinm r27,r0,0,0,28
+	   5: GETL       	R0, t6
+	   6: ANDL       	$0xFFFFFFF8, t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFECEC54:  4085022C  bc 4,5,0xFECEE80
+	   9: Jc05o       	$0xFECEE80
+
+
+
+. 2693 FECEC4C 12
+. 28 80 00 08 54 1B 00 38 40 85 02 2C
+==== BB 2694 (0xFECEC58) approx BBs exec'd 0 ====
+
+	0xFECEC58:  827C044C  lwz r19,1100(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x44C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0xFECEC5C:  7F13D840  cmpl cr6,r19,r27
+	   5: GETL       	R19, t4
+	   6: GETL       	R27, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFECEC60:  40990220  bc 4,25,0xFECEE80
+	  10: Jc25o       	$0xFECEE80
+
+
+
+. 2694 FECEC58 12
+. 82 7C 04 4C 7F 13 D8 40 40 99 02 20
+==== BB 2695 (0xFECEC64) approx BBs exec'd 0 ====
+
+	0xFECEC64:  71270001  andi. r7,r9,0x1
+	   0: GETL       	R9, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R7
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECEC68:  4082003C  bc 4,2,0xFECECA4
+	   6: Jc02o       	$0xFECECA4
+
+
+
+. 2695 FECEC64 8
+. 71 27 00 01 40 82 00 3C
+==== BB 2696 (0xFECEC6C) approx BBs exec'd 0 ====
+
+	0xFECEC6C:  829F0000  lwz r20,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R20
+	   3: INCEIPL       	$4
+
+	0xFECEC70:  7FF4F850  subf r31,r20,r31
+	   4: GETL       	R20, t4
+	   5: GETL       	R31, t6
+	   6: SUBL       	t4, t6
+	   7: PUTL       	t6, R31
+	   8: INCEIPL       	$4
+
+	0xFECEC74:  7FBDA214  add r29,r29,r20
+	   9: GETL       	R29, t8
+	  10: GETL       	R20, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0xFECEC78:  815F0008  lwz r10,8(r31)
+	  14: GETL       	R31, t12
+	  15: ADDL       	$0x8, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R10
+	  18: INCEIPL       	$4
+
+	0xFECEC7C:  817F000C  lwz r11,12(r31)
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0xC, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R11
+	  23: INCEIPL       	$4
+
+	0xFECEC80:  808A000C  lwz r4,12(r10)
+	  24: GETL       	R10, t20
+	  25: ADDL       	$0xC, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R4
+	  28: INCEIPL       	$4
+
+	0xFECEC84:  7F84F800  cmp cr7,r4,r31
+	  29: GETL       	R4, t24
+	  30: GETL       	R31, t26
+	  31: CMPL       	t24, t26, t28  (-rSo)
+	  32: ICRFL       	t28, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0xFECEC88:  409E0420  bc 4,30,0xFECF0A8
+	  34: Jc30o       	$0xFECF0A8
+
+
+
+. 2696 FECEC6C 32
+. 82 9F 00 00 7F F4 F8 50 7F BD A2 14 81 5F 00 08 81 7F 00 0C 80 8A 00 0C 7F 84 F8 00 40 9E 04 20
+==== BB 2697 (0xFECEC8C) approx BBs exec'd 0 ====
+
+	0xFECEC8C:  82AB0008  lwz r21,8(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0xFECEC90:  7C95F800  cmp cr1,r21,r31
+	   5: GETL       	R21, t4
+	   6: GETL       	R31, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFECEC94:  40860414  bc 4,6,0xFECF0A8
+	  10: Jc06o       	$0xFECF0A8
+
+
+
+. 2697 FECEC8C 12
+. 82 AB 00 08 7C 95 F8 00 40 86 04 14
+==== BB 2698 (0xFECEC98) approx BBs exec'd 0 ====
+
+	0xFECEC98:  914B0008  stw r10,8(r11)
+	   0: GETL       	R10, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECEC9C:  916A000C  stw r11,12(r10)
+	   5: GETL       	R11, t4
+	   6: GETL       	R10, t6
+	   7: ADDL       	$0xC, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECECA0:  811C0030  lwz r8,48(r28)
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x30, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0xFECECA4:  7F88D000  cmp cr7,r8,r26
+	  15: GETL       	R8, t12
+	  16: GETL       	R26, t14
+	  17: CMPL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0xFECECA8:  419E02C0  bc 12,30,0xFECEF68
+	  20: Js30o       	$0xFECEF68
+
+
+
+. 2698 FECEC98 20
+. 91 4B 00 08 91 6A 00 0C 81 1C 00 30 7F 88 D0 00 41 9E 02 C0
+==== BB 2699 (0xFECECAC) approx BBs exec'd 0 ====
+
+	0xFECECAC:  7F3ADA14  add r25,r26,r27
+	   0: GETL       	R26, t0
+	   1: GETL       	R27, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFECECB0:  83190004  lwz r24,4(r25)
+	   5: GETL       	R25, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0xFECECB4:  730A0001  andi. r10,r24,0x1
+	  10: GETL       	R24, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R10
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECECB8:  408202A0  bc 4,2,0xFECEF58
+	  16: Jc02o       	$0xFECEF58
+
+
+
+. 2699 FECECAC 16
+. 7F 3A DA 14 83 19 00 04 73 0A 00 01 40 82 02 A0
+==== BB 2700 (0xFECEF58) approx BBs exec'd 0 ====
+
+	0xFECEF58:  80FA0004  lwz r7,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFECEF5C:  54FB003C  rlwinm r27,r7,0,0,30
+	   5: GETL       	R7, t4
+	   6: ANDL       	$0xFFFFFFFE, t4
+	   7: PUTL       	t4, R27
+	   8: INCEIPL       	$4
+
+	0xFECEF60:  937A0004  stw r27,4(r26)
+	   9: GETL       	R27, t6
+	  10: GETL       	R26, t8
+	  11: ADDL       	$0x4, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECEF64:  4BFFFD84  b 0xFECECE8
+	  14: JMPo       	$0xFECECE8  ($4)
+
+
+
+. 2700 FECEF58 16
+. 80 FA 00 04 54 FB 00 3C 93 7A 00 04 4B FF FD 84
+==== BB 2701 (0xFECECE8) approx BBs exec'd 0 ====
+
+	0xFECECE8:  387C0038  addi r3,r28,56
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x38, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFECECEC:  63A80001  ori r8,r29,0x1
+	   4: GETL       	R29, t2
+	   5: ORL       	$0x1, t2
+	   6: PUTL       	t2, R8
+	   7: INCEIPL       	$4
+
+	0xFECECF0:  80C30008  lwz r6,8(r3)
+	   8: GETL       	R3, t4
+	   9: ADDL       	$0x8, t4
+	  10: LDL       	(t4), t6
+	  11: PUTL       	t6, R6
+	  12: INCEIPL       	$4
+
+	0xFECECF4:  907F000C  stw r3,12(r31)
+	  13: GETL       	R3, t8
+	  14: GETL       	R31, t10
+	  15: ADDL       	$0xC, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0xFECECF8:  90DF0008  stw r6,8(r31)
+	  18: GETL       	R6, t12
+	  19: GETL       	R31, t14
+	  20: ADDL       	$0x8, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0xFECECFC:  911F0004  stw r8,4(r31)
+	  23: GETL       	R8, t16
+	  24: GETL       	R31, t18
+	  25: ADDL       	$0x4, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0xFECED00:  7FBFE92E  stwx r29,r31,r29
+	  28: GETL       	R29, t20
+	  29: GETL       	R31, t22
+	  30: ADDL       	t22, t20
+	  31: GETL       	R29, t24
+	  32: STL       	t24, (t20)
+	  33: INCEIPL       	$4
+
+	0xFECED04:  93E6000C  stw r31,12(r6)
+	  34: GETL       	R31, t26
+	  35: GETL       	R6, t28
+	  36: ADDL       	$0xC, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFECED08:  93E30008  stw r31,8(r3)
+	  39: GETL       	R31, t30
+	  40: GETL       	R3, t32
+	  41: ADDL       	$0x8, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFECED0C:  289DFFFF  cmpli cr1,r29,65535
+	  44: GETL       	R29, t34
+	  45: MOVL       	$0xFFFF, t38
+	  46: CMPUL       	t34, t38, t36  (-rSo)
+	  47: ICRFL       	t36, $0x1, CR
+	  48: INCEIPL       	$4
+
+	0xFECED10:  40850124  bc 4,5,0xFECEE34
+	  49: Jc05o       	$0xFECEE34
+
+
+
+. 2701 FECECE8 44
+. 38 7C 00 38 63 A8 00 01 80 C3 00 08 90 7F 00 0C 90 DF 00 08 91 1F 00 04 7F BF E9 2E 93 E6 00 0C 93 E3 00 08 28 9D FF FF 40 85 01 24
+==== BB 2702 (0xFECEE34) approx BBs exec'd 0 ====
+
+	0xFECEE34:  83A10064  lwz r29,100(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x64, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFECEE38:  82210024  lwz r17,36(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R17
+	   9: INCEIPL       	$4
+
+	0xFECEE3C:  7FA803A6  mtlr r29
+	  10: GETL       	R29, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFECEE40:  82410028  lwz r18,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R18
+	  17: INCEIPL       	$4
+
+	0xFECEE44:  8261002C  lwz r19,44(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x2C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R19
+	  22: INCEIPL       	$4
+
+	0xFECEE48:  82810030  lwz r20,48(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x30, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R20
+	  27: INCEIPL       	$4
+
+	0xFECEE4C:  82A10034  lwz r21,52(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x34, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R21
+	  32: INCEIPL       	$4
+
+	0xFECEE50:  82C10038  lwz r22,56(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x38, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R22
+	  37: INCEIPL       	$4
+
+	0xFECEE54:  82E1003C  lwz r23,60(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x3C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R23
+	  42: INCEIPL       	$4
+
+	0xFECEE58:  83010040  lwz r24,64(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x40, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R24
+	  47: INCEIPL       	$4
+
+	0xFECEE5C:  83210044  lwz r25,68(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x44, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R25
+	  52: INCEIPL       	$4
+
+	0xFECEE60:  83410048  lwz r26,72(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x48, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R26
+	  57: INCEIPL       	$4
+
+	0xFECEE64:  8361004C  lwz r27,76(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x4C, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R27
+	  62: INCEIPL       	$4
+
+	0xFECEE68:  83810050  lwz r28,80(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x50, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R28
+	  67: INCEIPL       	$4
+
+	0xFECEE6C:  83A10054  lwz r29,84(r1)
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x54, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R29
+	  72: INCEIPL       	$4
+
+	0xFECEE70:  83C10058  lwz r30,88(r1)
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x58, t58
+	  75: LDL       	(t58), t60
+	  76: PUTL       	t60, R30
+	  77: INCEIPL       	$4
+
+	0xFECEE74:  83E1005C  lwz r31,92(r1)
+	  78: GETL       	R1, t62
+	  79: ADDL       	$0x5C, t62
+	  80: LDL       	(t62), t64
+	  81: PUTL       	t64, R31
+	  82: INCEIPL       	$4
+
+	0xFECEE78:  38210060  addi r1,r1,96
+	  83: GETL       	R1, t66
+	  84: ADDL       	$0x60, t66
+	  85: PUTL       	t66, R1
+	  86: INCEIPL       	$4
+
+	0xFECEE7C:  4E800020  blr
+	  87: GETL       	LR, t68
+	  88: JMPo-r       	t68  ($4)
+
+
+
+. 2702 FECEE34 76
+. 83 A1 00 64 82 21 00 24 7F A8 03 A6 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+==== BB 2703 (0xFEBBA44) approx BBs exec'd 0 ====
+
+	0xFEBBA44:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEBBA48:  83410024  lwz r26,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFEBBA4C:  8361000C  lwz r27,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0xFEBBA50:  7F4803A6  mtlr r26
+	  13: GETL       	R26, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFEBBA54:  83810010  lwz r28,16(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFEBBA58:  83410008  lwz r26,8(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x8, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R26
+	  25: INCEIPL       	$4
+
+	0xFEBBA5C:  83A10014  lwz r29,20(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R29
+	  30: INCEIPL       	$4
+
+	0xFEBBA60:  83C10018  lwz r30,24(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x18, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R30
+	  35: INCEIPL       	$4
+
+	0xFEBBA64:  83E1001C  lwz r31,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0xFEBBA68:  38210020  addi r1,r1,32
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: PUTL       	t32, R1
+	  44: INCEIPL       	$4
+
+	0xFEBBA6C:  4E800020  blr
+	  45: GETL       	LR, t34
+	  46: JMPo-r       	t34  ($4)
+
+
+
+. 2703 FEBBA44 44
+. 7F 43 D3 78 83 41 00 24 83 61 00 0C 7F 48 03 A6 83 81 00 10 83 41 00 08 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2704 (0xFE8D400) approx BBs exec'd 0 ====
+
+	0xFE8D400:  2F8E0000  cmpi cr7,r14,0
+	   0: GETL       	R14, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D404:  409E00B0  bc 4,30,0xFE8D4B4
+	   4: Jc30o       	$0xFE8D4B4
+
+
+
+. 2704 FE8D400 8
+. 2F 8E 00 00 40 9E 00 B0
+==== BB 2705 (0xFE8D4B4) approx BBs exec'd 0 ====
+
+	0xFE8D4B4:  825E01F4  lwz r18,500(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0xFE8D4B8:  38A00008  li r5,8
+	   5: MOVL       	$0x8, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFE8D4BC:  821E01F0  lwz r16,496(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1F0, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R16
+	  12: INCEIPL       	$4
+
+	0xFE8D4C0:  80720000  lwz r3,0(r18)
+	  13: GETL       	R18, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0xFE8D4C4:  80900000  lwz r4,0(r16)
+	  17: GETL       	R16, t14
+	  18: LDL       	(t14), t16
+	  19: PUTL       	t16, R4
+	  20: INCEIPL       	$4
+
+	0xFE8D4C8:  80DE01FC  lwz r6,508(r30)
+	  21: GETL       	R30, t18
+	  22: ADDL       	$0x1FC, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R6
+	  25: INCEIPL       	$4
+
+	0xFE8D4CC:  48005AC5  bl 0xFE92F90
+	  26: MOVL       	$0xFE8D4D0, t22
+	  27: PUTL       	t22, LR
+	  28: JMPo-c       	$0xFE92F90  ($4)
+
+
+
+. 2705 FE8D4B4 28
+. 82 5E 01 F4 38 A0 00 08 82 1E 01 F0 80 72 00 00 80 90 00 00 80 DE 01 FC 48 00 5A C5
+==== BB 2706 alias_compare(0xFE8D77C) approx BBs exec'd 0 ====
+
+	0xFE8D77C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8D780:  80630000  lwz r3,0(r3)
+	   6: GETL       	R3, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFE8D784:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFE8D788:  80840000  lwz r4,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0xFE8D78C:  4804AD48  b 0xFED84D4
+	  18: JMPo       	$0xFED84D4  ($4)
+
+
+
+. 2706 FE8D77C 20
+. 94 21 FF F0 80 63 00 00 38 21 00 10 80 84 00 00 48 04 AD 48
+==== BB 2707 strcasecmp(0xFED84D4) approx BBs exec'd 0 ====
+
+	0xFED84D4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED84D8:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFED84DC:  480CF975  bl 0xFFA7E50
+	   9: MOVL       	$0xFED84E0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2707 FED84D4 12
+. 94 21 FF F0 7D 88 02 A6 48 0C F9 75
+==== BB 2708 (0xFED84E0) approx BBs exec'd 0 ====
+
+	0xFED84E0:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED84E4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFED84E8:  7F832000  cmp cr7,r3,r4
+	   8: GETL       	R3, t6
+	   9: GETL       	R4, t8
+	  10: CMPL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFED84EC:  39400000  li r10,0
+	  13: MOVL       	$0x0, t12
+	  14: PUTL       	t12, R10
+	  15: INCEIPL       	$4
+
+	0xFED84F0:  7D8803A6  mtlr r12
+	  16: GETL       	R12, t14
+	  17: PUTL       	t14, LR
+	  18: INCEIPL       	$4
+
+	0xFED84F4:  80BE1D50  lwz r5,7504(r30)
+	  19: GETL       	R30, t16
+	  20: ADDL       	$0x1D50, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R5
+	  23: INCEIPL       	$4
+
+	0xFED84F8:  7D251214  add r9,r5,r2
+	  24: GETL       	R5, t20
+	  25: GETL       	R2, t22
+	  26: ADDL       	t20, t22
+	  27: PUTL       	t22, R9
+	  28: INCEIPL       	$4
+
+	0xFED84FC:  81290000  lwz r9,0(r9)
+	  29: GETL       	R9, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R9
+	  32: INCEIPL       	$4
+
+	0xFED8500:  419E0038  bc 12,30,0xFED8538
+	  33: Js30o       	$0xFED8538
+
+
+
+. 2708 FED84E0 36
+. 93 C1 00 08 7F C8 02 A6 7F 83 20 00 39 40 00 00 7D 88 03 A6 80 BE 1D 50 7D 25 12 14 81 29 00 00 41 9E 00 38
+==== BB 2709 (0xFED8504) approx BBs exec'd 0 ====
+
+	0xFED8504:  81090038  lwz r8,56(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFED8508:  88E30000  lbz r7,0(r3)
+	   5: GETL       	R3, t4
+	   6: LDB       	(t4), t6
+	   7: PUTL       	t6, R7
+	   8: INCEIPL       	$4
+
+	0xFED850C:  38630001  addi r3,r3,1
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0x1, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFED8510:  89240000  lbz r9,0(r4)
+	  13: GETL       	R4, t10
+	  14: LDB       	(t10), t12
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0xFED8514:  38840001  addi r4,r4,1
+	  17: GETL       	R4, t14
+	  18: ADDL       	$0x1, t14
+	  19: PUTL       	t14, R4
+	  20: INCEIPL       	$4
+
+	0xFED8518:  54E6103A  rlwinm r6,r7,2,0,29
+	  21: GETL       	R7, t16
+	  22: SHLL       	$0x2, t16
+	  23: PUTL       	t16, R6
+	  24: INCEIPL       	$4
+
+	0xFED851C:  2F870000  cmpi cr7,r7,0
+	  25: GETL       	R7, t18
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x7, CR
+	  28: INCEIPL       	$4
+
+	0xFED8520:  552B103A  rlwinm r11,r9,2,0,29
+	  29: GETL       	R9, t22
+	  30: SHLL       	$0x2, t22
+	  31: PUTL       	t22, R11
+	  32: INCEIPL       	$4
+
+	0xFED8524:  7D46402E  lwzx r10,r6,r8
+	  33: GETL       	R8, t24
+	  34: GETL       	R6, t26
+	  35: ADDL       	t26, t24
+	  36: LDL       	(t24), t28
+	  37: PUTL       	t28, R10
+	  38: INCEIPL       	$4
+
+	0xFED8528:  7C0B402E  lwzx r0,r11,r8
+	  39: GETL       	R8, t30
+	  40: GETL       	R11, t32
+	  41: ADDL       	t32, t30
+	  42: LDL       	(t30), t34
+	  43: PUTL       	t34, R0
+	  44: INCEIPL       	$4
+
+	0xFED852C:  7D405051  subf. r10,r0,r10
+	  45: GETL       	R0, t36
+	  46: GETL       	R10, t38
+	  47: SUBL       	t36, t38
+	  48: PUTL       	t38, R10
+	  49: CMP0L       	t38, t40  (-rSo)
+	  50: ICRFL       	t40, $0x0, CR
+	  51: INCEIPL       	$4
+
+	0xFED8530:  40820008  bc 4,2,0xFED8538
+	  52: Jc02o       	$0xFED8538
+
+
+
+. 2709 FED8504 48
+. 81 09 00 38 88 E3 00 00 38 63 00 01 89 24 00 00 38 84 00 01 54 E6 10 3A 2F 87 00 00 55 2B 10 3A 7D 46 40 2E 7C 0B 40 2E 7D 40 50 51 40 82 00 08
+==== BB 2710 (0xFED8534) approx BBs exec'd 0 ====
+
+	0xFED8534:  409EFFD4  bc 4,30,0xFED8508
+	   0: Jc30o       	$0xFED8508
+
+
+
+. 2710 FED8534 4
+. 40 9E FF D4
+==== BB 2711 (0xFED8508) approx BBs exec'd 0 ====
+
+	0xFED8508:  88E30000  lbz r7,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0xFED850C:  38630001  addi r3,r3,1
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED8510:  89240000  lbz r9,0(r4)
+	   8: GETL       	R4, t6
+	   9: LDB       	(t6), t8
+	  10: PUTL       	t8, R9
+	  11: INCEIPL       	$4
+
+	0xFED8514:  38840001  addi r4,r4,1
+	  12: GETL       	R4, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFED8518:  54E6103A  rlwinm r6,r7,2,0,29
+	  16: GETL       	R7, t12
+	  17: SHLL       	$0x2, t12
+	  18: PUTL       	t12, R6
+	  19: INCEIPL       	$4
+
+	0xFED851C:  2F870000  cmpi cr7,r7,0
+	  20: GETL       	R7, t14
+	  21: CMP0L       	t14, t16  (-rSo)
+	  22: ICRFL       	t16, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0xFED8520:  552B103A  rlwinm r11,r9,2,0,29
+	  24: GETL       	R9, t18
+	  25: SHLL       	$0x2, t18
+	  26: PUTL       	t18, R11
+	  27: INCEIPL       	$4
+
+	0xFED8524:  7D46402E  lwzx r10,r6,r8
+	  28: GETL       	R8, t20
+	  29: GETL       	R6, t22
+	  30: ADDL       	t22, t20
+	  31: LDL       	(t20), t24
+	  32: PUTL       	t24, R10
+	  33: INCEIPL       	$4
+
+	0xFED8528:  7C0B402E  lwzx r0,r11,r8
+	  34: GETL       	R8, t26
+	  35: GETL       	R11, t28
+	  36: ADDL       	t28, t26
+	  37: LDL       	(t26), t30
+	  38: PUTL       	t30, R0
+	  39: INCEIPL       	$4
+
+	0xFED852C:  7D405051  subf. r10,r0,r10
+	  40: GETL       	R0, t32
+	  41: GETL       	R10, t34
+	  42: SUBL       	t32, t34
+	  43: PUTL       	t34, R10
+	  44: CMP0L       	t34, t36  (-rSo)
+	  45: ICRFL       	t36, $0x0, CR
+	  46: INCEIPL       	$4
+
+	0xFED8530:  40820008  bc 4,2,0xFED8538
+	  47: Jc02o       	$0xFED8538
+
+
+
+. 2711 FED8508 44
+. 88 E3 00 00 38 63 00 01 89 24 00 00 38 84 00 01 54 E6 10 3A 2F 87 00 00 55 2B 10 3A 7D 46 40 2E 7C 0B 40 2E 7D 40 50 51 40 82 00 08
+==== BB 2712 (0xFED8538) approx BBs exec'd 0 ====
+
+	0xFED8538:  7D435378  or r3,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED853C:  83C10008  lwz r30,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFED8540:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFED8544:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2712 FED8538 16
+. 7D 43 53 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 2713 (0xFED8878) approx BBs exec'd 0 ====
+
+	0xFED8878:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFED887C:  3863FFE4  addi r3,r3,-28
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFE4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED8880:  3884FFE8  addi r4,r4,-24
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0xFFFFFFE8, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0xFED8884:  38A50006  addi r5,r5,6
+	  12: GETL       	R5, t8
+	  13: ADDL       	$0x6, t8
+	  14: PUTL       	t8, R5
+	  15: INCEIPL       	$4
+
+	0xFED8888:  4BFFFFB4  b 0xFED883C
+	  16: JMPo       	$0xFED883C  ($4)
+
+
+
+. 2713 FED8878 20
+. 80 04 00 00 38 63 FF E4 38 84 FF E8 38 A5 00 06 4B FF FF B4
+==== BB 2714 (0xFED883C) approx BBs exec'd 0 ====
+
+	0xFED883C:  34A5FFF8  addic. r5,r5,-8
+	   0: GETL       	R5, t0
+	   1: ADCL       	$0xFFFFFFF8, t0  (-wCa)
+	   2: PUTL       	t0, R5
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED8840:  8124001C  lwz r9,28(r4)
+	   6: GETL       	R4, t4
+	   7: ADDL       	$0x1C, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFED8844:  9003001C  stw r0,28(r3)
+	  11: GETL       	R0, t8
+	  12: GETL       	R3, t10
+	  13: ADDL       	$0x1C, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFED8848:  38840020  addi r4,r4,32
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0x20, t12
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0xFED884C:  38630020  addi r3,r3,32
+	  20: GETL       	R3, t14
+	  21: ADDL       	$0x20, t14
+	  22: PUTL       	t14, R3
+	  23: INCEIPL       	$4
+
+	0xFED8850:  40A2FFB4  bc 5,2,0xFED8804
+	  24: Jc02o       	$0xFED8804
+
+
+
+. 2714 FED883C 24
+. 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 2715 (0xFE92ED4) approx BBs exec'd 0 ====
+
+	0xFE92ED4:  7CB9E9D6  mullw r5,r25,r29
+	   0: GETL       	R25, t0
+	   1: GETL       	R29, t2
+	   2: MULL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE92ED8:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE92EDC:  7F44D378  or r4,r26,r26
+	   8: GETL       	R26, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFE92EE0:  480457F9  bl 0xFED86D8
+	  11: MOVL       	$0xFE92EE4, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 2715 FE92ED4 16
+. 7C B9 E9 D6 7F 83 E3 78 7F 44 D3 78 48 04 57 F9
+==== BB 2716 (0xFE92EE4) approx BBs exec'd 0 ====
+
+	0xFE92EE4:  4BFFFF5C  b 0xFE92E40
+	   0: JMPo       	$0xFE92E40  ($4)
+
+
+
+. 2716 FE92EE4 4
+. 4B FF FF 5C
+==== BB 2717 (0xFE8D4D0) approx BBs exec'd 0 ====
+
+	0xFE8D4D0:  4BFFFF38  b 0xFE8D408
+	   0: JMPo       	$0xFE8D408  ($4)
+
+
+
+. 2717 FE8D4D0 4
+. 4B FF FF 38
+==== BB 2718 (0xFE8D408) approx BBs exec'd 0 ====
+
+	0xFE8D408:  7DC07378  or r0,r14,r14
+	   0: GETL       	R14, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8D40C:  81210000  lwz r9,0(r1)
+	   3: GETL       	R1, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0xFE8D410:  7C030378  or r3,r0,r0
+	   7: GETL       	R0, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFE8D414:  83490004  lwz r26,4(r9)
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0xFE8D418:  81C9FFB8  lwz r14,-72(r9)
+	  15: GETL       	R9, t12
+	  16: ADDL       	$0xFFFFFFB8, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R14
+	  19: INCEIPL       	$4
+
+	0xFE8D41C:  7F4803A6  mtlr r26
+	  20: GETL       	R26, t16
+	  21: PUTL       	t16, LR
+	  22: INCEIPL       	$4
+
+	0xFE8D420:  81E9FFBC  lwz r15,-68(r9)
+	  23: GETL       	R9, t18
+	  24: ADDL       	$0xFFFFFFBC, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R15
+	  27: INCEIPL       	$4
+
+	0xFE8D424:  8209FFC0  lwz r16,-64(r9)
+	  28: GETL       	R9, t22
+	  29: ADDL       	$0xFFFFFFC0, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R16
+	  32: INCEIPL       	$4
+
+	0xFE8D428:  8229FFC4  lwz r17,-60(r9)
+	  33: GETL       	R9, t26
+	  34: ADDL       	$0xFFFFFFC4, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R17
+	  37: INCEIPL       	$4
+
+	0xFE8D42C:  8249FFC8  lwz r18,-56(r9)
+	  38: GETL       	R9, t30
+	  39: ADDL       	$0xFFFFFFC8, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R18
+	  42: INCEIPL       	$4
+
+	0xFE8D430:  8269FFCC  lwz r19,-52(r9)
+	  43: GETL       	R9, t34
+	  44: ADDL       	$0xFFFFFFCC, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R19
+	  47: INCEIPL       	$4
+
+	0xFE8D434:  8289FFD0  lwz r20,-48(r9)
+	  48: GETL       	R9, t38
+	  49: ADDL       	$0xFFFFFFD0, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R20
+	  52: INCEIPL       	$4
+
+	0xFE8D438:  82A9FFD4  lwz r21,-44(r9)
+	  53: GETL       	R9, t42
+	  54: ADDL       	$0xFFFFFFD4, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R21
+	  57: INCEIPL       	$4
+
+	0xFE8D43C:  82C9FFD8  lwz r22,-40(r9)
+	  58: GETL       	R9, t46
+	  59: ADDL       	$0xFFFFFFD8, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R22
+	  62: INCEIPL       	$4
+
+	0xFE8D440:  82E9FFDC  lwz r23,-36(r9)
+	  63: GETL       	R9, t50
+	  64: ADDL       	$0xFFFFFFDC, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R23
+	  67: INCEIPL       	$4
+
+	0xFE8D444:  8309FFE0  lwz r24,-32(r9)
+	  68: GETL       	R9, t54
+	  69: ADDL       	$0xFFFFFFE0, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R24
+	  72: INCEIPL       	$4
+
+	0xFE8D448:  8329FFE4  lwz r25,-28(r9)
+	  73: GETL       	R9, t58
+	  74: ADDL       	$0xFFFFFFE4, t58
+	  75: LDL       	(t58), t60
+	  76: PUTL       	t60, R25
+	  77: INCEIPL       	$4
+
+	0xFE8D44C:  8349FFE8  lwz r26,-24(r9)
+	  78: GETL       	R9, t62
+	  79: ADDL       	$0xFFFFFFE8, t62
+	  80: LDL       	(t62), t64
+	  81: PUTL       	t64, R26
+	  82: INCEIPL       	$4
+
+	0xFE8D450:  8369FFEC  lwz r27,-20(r9)
+	  83: GETL       	R9, t66
+	  84: ADDL       	$0xFFFFFFEC, t66
+	  85: LDL       	(t66), t68
+	  86: PUTL       	t68, R27
+	  87: INCEIPL       	$4
+
+	0xFE8D454:  8389FFF0  lwz r28,-16(r9)
+	  88: GETL       	R9, t70
+	  89: ADDL       	$0xFFFFFFF0, t70
+	  90: LDL       	(t70), t72
+	  91: PUTL       	t72, R28
+	  92: INCEIPL       	$4
+
+	0xFE8D458:  83A9FFF4  lwz r29,-12(r9)
+	  93: GETL       	R9, t74
+	  94: ADDL       	$0xFFFFFFF4, t74
+	  95: LDL       	(t74), t76
+	  96: PUTL       	t76, R29
+	  97: INCEIPL       	$4
+
+	0xFE8D45C:  83C9FFF8  lwz r30,-8(r9)
+	  98: GETL       	R9, t78
+	  99: ADDL       	$0xFFFFFFF8, t78
+	 100: LDL       	(t78), t80
+	 101: PUTL       	t80, R30
+	 102: INCEIPL       	$4
+
+	0xFE8D460:  83E9FFFC  lwz r31,-4(r9)
+	 103: GETL       	R9, t82
+	 104: ADDL       	$0xFFFFFFFC, t82
+	 105: LDL       	(t82), t84
+	 106: PUTL       	t84, R31
+	 107: INCEIPL       	$4
+
+	0xFE8D464:  7D214B78  or r1,r9,r9
+	 108: GETL       	R9, t86
+	 109: PUTL       	t86, R1
+	 110: INCEIPL       	$4
+
+	0xFE8D468:  4E800020  blr
+	 111: GETL       	LR, t88
+	 112: JMPo-r       	t88  ($4)
+
+
+
+. 2718 FE8D408 100
+. 7D C0 73 78 81 21 00 00 7C 03 03 78 83 49 00 04 81 C9 FF B8 7F 48 03 A6 81 E9 FF BC 82 09 FF C0 82 29 FF C4 82 49 FF C8 82 69 FF CC 82 89 FF D0 82 A9 FF D4 82 C9 FF D8 82 E9 FF DC 83 09 FF E0 83 29 FF E4 83 49 FF E8 83 69 FF EC 83 89 FF F0 83 A9 FF F4 83 C9 FF F8 83 E9 FF FC 7D 21 4B 78 4E 80 00 20
+==== BB 2719 (0xFE8D6C0) approx BBs exec'd 0 ====
+
+	0xFE8D6C0:  7C681B79  or. r8,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R8
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D6C4:  4182FF88  bc 12,2,0xFE8D64C
+	   5: Js02o       	$0xFE8D64C
+
+
+
+. 2719 FE8D6C0 8
+. 7C 68 1B 79 41 82 FF 88
+==== BB 2720 (0xFE8D6C8) approx BBs exec'd 0 ====
+
+	0xFE8D6C8:  80B90000  lwz r5,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFE8D6CC:  93410008  stw r26,8(r1)
+	   4: GETL       	R26, t4
+	   5: GETL       	R1, t6
+	   6: ADDL       	$0x8, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE8D6D0:  2C050000  cmpi cr0,r5,0
+	   9: GETL       	R5, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFE8D6D4:  4182FEE4  bc 12,2,0xFE8D5B8
+	  13: Js02o       	$0xFE8D5B8
+
+
+
+. 2720 FE8D6C8 16
+. 80 B9 00 00 93 41 00 08 2C 05 00 00 41 82 FE E4
+==== BB 2721 (0xFE8D6D8) approx BBs exec'd 0 ====
+
+	0xFE8D6D8:  807E01F4  lwz r3,500(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8D6DC:  38C00008  li r6,8
+	   5: MOVL       	$0x8, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0xFE8D6E0:  80FE01FC  lwz r7,508(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1FC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFE8D6E4:  80830000  lwz r4,0(r3)
+	  13: GETL       	R3, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R4
+	  16: INCEIPL       	$4
+
+	0xFE8D6E8:  38610008  addi r3,r1,8
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x8, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0xFE8D6EC:  4800512D  bl 0xFE92818
+	  21: MOVL       	$0xFE8D6F0, t16
+	  22: PUTL       	t16, LR
+	  23: JMPo-c       	$0xFE92818  ($4)
+
+
+
+. 2721 FE8D6D8 24
+. 80 7E 01 F4 38 C0 00 08 80 FE 01 FC 80 83 00 00 38 61 00 08 48 00 51 2D
+==== BB 2722 bsearch(0xFE92818) approx BBs exec'd 0 ====
+
+	0xFE92818:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE9281C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE92820:  9361001C  stw r27,28(r1)
+	   9: GETL       	R27, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFE92824:  3B600000  li r27,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R27
+	  16: INCEIPL       	$4
+
+	0xFE92828:  7F9B2840  cmpl cr7,r27,r5
+	  17: GETL       	R27, t12
+	  18: GETL       	R5, t14
+	  19: CMPUL       	t12, t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0xFE9282C:  92E1000C  stw r23,12(r1)
+	  22: GETL       	R23, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0xC, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0xFE92830:  93010010  stw r24,16(r1)
+	  27: GETL       	R24, t22
+	  28: GETL       	R1, t24
+	  29: ADDL       	$0x10, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0xFE92834:  7C771B78  or r23,r3,r3
+	  32: GETL       	R3, t26
+	  33: PUTL       	t26, R23
+	  34: INCEIPL       	$4
+
+	0xFE92838:  93210014  stw r25,20(r1)
+	  35: GETL       	R25, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x14, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0xFE9283C:  7C982378  or r24,r4,r4
+	  40: GETL       	R4, t32
+	  41: PUTL       	t32, R24
+	  42: INCEIPL       	$4
+
+	0xFE92840:  93410018  stw r26,24(r1)
+	  43: GETL       	R26, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x18, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0xFE92844:  7CD93378  or r25,r6,r6
+	  48: GETL       	R6, t38
+	  49: PUTL       	t38, R25
+	  50: INCEIPL       	$4
+
+	0xFE92848:  93810020  stw r28,32(r1)
+	  51: GETL       	R28, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x20, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFE9284C:  7CFA3B78  or r26,r7,r7
+	  56: GETL       	R7, t44
+	  57: PUTL       	t44, R26
+	  58: INCEIPL       	$4
+
+	0xFE92850:  93A10024  stw r29,36(r1)
+	  59: GETL       	R29, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x24, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFE92854:  7CBC2B78  or r28,r5,r5
+	  64: GETL       	R5, t50
+	  65: PUTL       	t50, R28
+	  66: INCEIPL       	$4
+
+	0xFE92858:  93C10028  stw r30,40(r1)
+	  67: GETL       	R30, t52
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x28, t54
+	  70: STL       	t52, (t54)
+	  71: INCEIPL       	$4
+
+	0xFE9285C:  93E1002C  stw r31,44(r1)
+	  72: GETL       	R31, t56
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x2C, t58
+	  75: STL       	t56, (t58)
+	  76: INCEIPL       	$4
+
+	0xFE92860:  90010034  stw r0,52(r1)
+	  77: GETL       	R0, t60
+	  78: GETL       	R1, t62
+	  79: ADDL       	$0x34, t62
+	  80: STL       	t60, (t62)
+	  81: INCEIPL       	$4
+
+	0xFE92864:  409C003C  bc 4,28,0xFE928A0
+	  82: Jc28o       	$0xFE928A0
+
+
+
+. 2722 FE92818 80
+. 94 21 FF D0 7C 08 02 A6 93 61 00 1C 3B 60 00 00 7F 9B 28 40 92 E1 00 0C 93 01 00 10 7C 77 1B 78 93 21 00 14 7C 98 23 78 93 41 00 18 7C D9 33 78 93 81 00 20 7C FA 3B 78 93 A1 00 24 7C BC 2B 78 93 C1 00 28 93 E1 00 2C 90 01 00 34 40 9C 00 3C
+==== BB 2723 (0xFE92868) approx BBs exec'd 0 ====
+
+	0xFE92868:  7C7BE214  add r3,r27,r28
+	   0: GETL       	R27, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE9286C:  7F4803A6  mtlr r26
+	   5: GETL       	R26, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFE92870:  547FF87E  rlwinm r31,r3,31,1,31
+	   8: GETL       	R3, t6
+	   9: SHRL       	$0x1, t6
+	  10: PUTL       	t6, R31
+	  11: INCEIPL       	$4
+
+	0xFE92874:  7EE3BB78  or r3,r23,r23
+	  12: GETL       	R23, t8
+	  13: PUTL       	t8, R3
+	  14: INCEIPL       	$4
+
+	0xFE92878:  7D3FC9D6  mullw r9,r31,r25
+	  15: GETL       	R31, t10
+	  16: GETL       	R25, t12
+	  17: MULL       	t10, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0xFE9287C:  7FA9C214  add r29,r9,r24
+	  20: GETL       	R9, t14
+	  21: GETL       	R24, t16
+	  22: ADDL       	t14, t16
+	  23: PUTL       	t16, R29
+	  24: INCEIPL       	$4
+
+	0xFE92880:  7FA4EB78  or r4,r29,r29
+	  25: GETL       	R29, t18
+	  26: PUTL       	t18, R4
+	  27: INCEIPL       	$4
+
+	0xFE92884:  4E800021  blrl
+	  28: GETL       	LR, t20
+	  29: MOVL       	$0xFE92888, t22
+	  30: PUTL       	t22, LR
+	  31: JMPo-r       	t20  ($4)
+
+
+
+. 2723 FE92868 32
+. 7C 7B E2 14 7F 48 03 A6 54 7F F8 7E 7E E3 BB 78 7D 3F C9 D6 7F A9 C2 14 7F A4 EB 78 4E 80 00 21
+==== BB 2724 (0xFE92888) approx BBs exec'd 0 ====
+
+	0xFE92888:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE9288C:  419C0084  bc 12,28,0xFE92910
+	   4: Js28o       	$0xFE92910
+
+
+
+. 2724 FE92888 8
+. 2F 83 00 00 41 9C 00 84
+==== BB 2725 (0xFE92910) approx BBs exec'd 0 ====
+
+	0xFE92910:  7FFCFB78  or r28,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0xFE92914:  7F9BE040  cmpl cr7,r27,r28
+	   3: GETL       	R27, t2
+	   4: GETL       	R28, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFE92918:  4BFFFF84  b 0xFE9289C
+	   8: JMPo       	$0xFE9289C  ($4)
+
+
+
+. 2725 FE92910 12
+. 7F FC FB 78 7F 9B E0 40 4B FF FF 84
+==== BB 2726 (0xFE9289C) approx BBs exec'd 0 ====
+
+	0xFE9289C:  419CFFCC  bc 12,28,0xFE92868
+	   0: Js28o       	$0xFE92868
+
+
+
+. 2726 FE9289C 4
+. 41 9C FF CC
+==== BB 2727 (0xFE92890) approx BBs exec'd 0 ====
+
+	0xFE92890:  3B7F0001  addi r27,r31,1
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0xFE92894:  409D0044  bc 4,29,0xFE928D8
+	   4: Jc29o       	$0xFE928D8
+
+
+
+. 2727 FE92890 8
+. 3B 7F 00 01 40 9D 00 44
+==== BB 2728 (0xFE92898) approx BBs exec'd 0 ====
+
+	0xFE92898:  7F9BE040  cmpl cr7,r27,r28
+	   0: GETL       	R27, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE9289C:  419CFFCC  bc 12,28,0xFE92868
+	   5: Js28o       	$0xFE92868
+
+
+
+. 2728 FE92898 8
+. 7F 9B E0 40 41 9C FF CC
+==== BB 2729 (0xFE928A0) approx BBs exec'd 0 ====
+
+	0xFE928A0:  80810034  lwz r4,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE928A4:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE928A8:  82E1000C  lwz r23,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFE928AC:  83010010  lwz r24,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R24
+	  17: INCEIPL       	$4
+
+	0xFE928B0:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFE928B4:  83210014  lwz r25,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0xFE928B8:  83410018  lwz r26,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R26
+	  30: INCEIPL       	$4
+
+	0xFE928BC:  8361001C  lwz r27,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R27
+	  35: INCEIPL       	$4
+
+	0xFE928C0:  83810020  lwz r28,32(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R28
+	  40: INCEIPL       	$4
+
+	0xFE928C4:  83A10024  lwz r29,36(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0xFE928C8:  83C10028  lwz r30,40(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x28, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R30
+	  50: INCEIPL       	$4
+
+	0xFE928CC:  83E1002C  lwz r31,44(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x2C, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0xFE928D0:  38210030  addi r1,r1,48
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x30, t44
+	  58: PUTL       	t44, R1
+	  59: INCEIPL       	$4
+
+	0xFE928D4:  4E800020  blr
+	  60: GETL       	LR, t46
+	  61: JMPo-r       	t46  ($4)
+
+
+
+. 2729 FE928A0 56
+. 80 81 00 34 38 60 00 00 82 E1 00 0C 83 01 00 10 7C 88 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 2730 (0xFE8D6F0) approx BBs exec'd 0 ====
+
+	0xFE8D6F0:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8D6F4:  4186FEC4  bc 12,6,0xFE8D5B8
+	   4: Js06o       	$0xFE8D5B8
+
+
+
+. 2730 FE8D6F0 8
+. 2C 83 00 00 41 86 FE C4
+==== BB 2731 (0xFE8D660) approx BBs exec'd 0 ====
+
+	0xFE8D660:  3AE00000  li r23,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R23
+	   2: INCEIPL       	$4
+
+	0xFE8D664:  7C0004AC  sync
+	   3: INCEIPL       	$4
+
+	0xFE8D668:  7D00D828  lwarx r8,r0,r27
+	   4: GETL       	R27, t2
+	   5: LOCKo       	
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0xFE8D66C:  7EE0D92D  stwcx. r23,r0,r27
+	   9: GETL       	R27, t6
+	  10: GETL       	R23, t8
+	  11: LOCKo       	
+	  12: STL       	t8, (t6)  (-rSo)
+	  13: ICRFL       	cr, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFE8D670:  40A2FFF8  bc 5,2,0xFE8D668
+	  15: Jc02o       	$0xFE8D668
+
+
+
+. 2731 FE8D660 20
+. 3A E0 00 00 7C 00 04 AC 7D 00 D8 28 7E E0 D9 2D 40 A2 FF F8
+==== BB 2732 (0xFE8D674) approx BBs exec'd 0 ====
+
+	0xFE8D674:  2C080001  cmpi cr0,r8,1
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8D678:  418100A0  bc 12,1,0xFE8D718
+	   5: Js01o       	$0xFE8D718
+
+
+
+. 2732 FE8D674 8
+. 2C 08 00 01 41 81 00 A0
+==== BB 2733 (0xFE8D67C) approx BBs exec'd 0 ====
+
+	0xFE8D67C:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D680:  82C10044  lwz r22,68(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x44, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0xFE8D684:  82E1001C  lwz r23,28(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x1C, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFE8D688:  7EC803A6  mtlr r22
+	  13: GETL       	R22, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFE8D68C:  83010020  lwz r24,32(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x20, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R24
+	  20: INCEIPL       	$4
+
+	0xFE8D690:  82C10018  lwz r22,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R22
+	  25: INCEIPL       	$4
+
+	0xFE8D694:  83210024  lwz r25,36(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x24, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R25
+	  30: INCEIPL       	$4
+
+	0xFE8D698:  83410028  lwz r26,40(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x28, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0xFE8D69C:  8361002C  lwz r27,44(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x2C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R27
+	  40: INCEIPL       	$4
+
+	0xFE8D6A0:  83810030  lwz r28,48(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x30, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R28
+	  45: INCEIPL       	$4
+
+	0xFE8D6A4:  83A10034  lwz r29,52(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x34, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R29
+	  50: INCEIPL       	$4
+
+	0xFE8D6A8:  83C10038  lwz r30,56(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x38, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R30
+	  55: INCEIPL       	$4
+
+	0xFE8D6AC:  83E1003C  lwz r31,60(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x3C, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R31
+	  60: INCEIPL       	$4
+
+	0xFE8D6B0:  38210040  addi r1,r1,64
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x40, t48
+	  63: PUTL       	t48, R1
+	  64: INCEIPL       	$4
+
+	0xFE8D6B4:  4E800020  blr
+	  65: GETL       	LR, t50
+	  66: JMPo-r       	t50  ($4)
+
+
+
+. 2733 FE8D67C 60
+. 7E C3 B3 78 82 C1 00 44 82 E1 00 1C 7E C8 03 A6 83 01 00 20 82 C1 00 18 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 2734 (0xFE8B8A8) approx BBs exec'd 0 ====
+
+	0xFE8B8A8:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE8B8AC:  41920014  bc 12,18,0xFE8B8C0
+	   4: Js18o       	$0xFE8B8C0
+
+
+
+. 2734 FE8B8A8 8
+. 2E 03 00 00 41 92 00 14
+==== BB 2735 (0xFE8B8C0) approx BBs exec'd 0 ====
+
+	0xFE8B8C0:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B8C4:  38810018  addi r4,r1,24
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x18, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0xFE8B8C8:  38A1001C  addi r5,r1,28
+	   7: GETL       	R1, t4
+	   8: ADDL       	$0x1C, t4
+	   9: PUTL       	t4, R5
+	  10: INCEIPL       	$4
+
+	0xFE8B8CC:  38C10020  addi r6,r1,32
+	  11: GETL       	R1, t6
+	  12: ADDL       	$0x20, t6
+	  13: PUTL       	t6, R6
+	  14: INCEIPL       	$4
+
+	0xFE8B8D0:  38E10024  addi r7,r1,36
+	  15: GETL       	R1, t8
+	  16: ADDL       	$0x24, t8
+	  17: PUTL       	t8, R7
+	  18: INCEIPL       	$4
+
+	0xFE8B8D4:  39010028  addi r8,r1,40
+	  19: GETL       	R1, t10
+	  20: ADDL       	$0x28, t10
+	  21: PUTL       	t10, R8
+	  22: INCEIPL       	$4
+
+	0xFE8B8D8:  480026E9  bl 0xFE8DFC0
+	  23: MOVL       	$0xFE8B8DC, t12
+	  24: PUTL       	t12, LR
+	  25: JMPo-c       	$0xFE8DFC0  ($4)
+
+
+
+. 2735 FE8B8C0 28
+. 7F 43 D3 78 38 81 00 18 38 A1 00 1C 38 C1 00 20 38 E1 00 24 39 01 00 28 48 00 26 E9
+==== BB 2736 _nl_explode_name(0xFE8DFC0) approx BBs exec'd 0 ====
+
+	0xFE8DFC0:  7D2802A6  mflr r9
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0xFE8DFC4:  9421FFD0  stwu r1,-48(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFD0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE8DFC8:  38000000  li r0,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0xFE8DFCC:  93010010  stw r24,16(r1)
+	  12: GETL       	R24, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFE8DFD0:  93210014  stw r25,20(r1)
+	  17: GETL       	R25, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFE8DFD4:  7CB82B78  or r24,r5,r5
+	  22: GETL       	R5, t16
+	  23: PUTL       	t16, R24
+	  24: INCEIPL       	$4
+
+	0xFE8DFD8:  91210034  stw r9,52(r1)
+	  25: GETL       	R9, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x34, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFE8DFDC:  7D194378  or r25,r8,r8
+	  30: GETL       	R8, t22
+	  31: PUTL       	t22, R25
+	  32: INCEIPL       	$4
+
+	0xFE8DFE0:  90050000  stw r0,0(r5)
+	  33: GETL       	R0, t24
+	  34: GETL       	R5, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFE8DFE4:  7C6A1B78  or r10,r3,r3
+	  37: GETL       	R3, t28
+	  38: PUTL       	t28, R10
+	  39: INCEIPL       	$4
+
+	0xFE8DFE8:  90060000  stw r0,0(r6)
+	  40: GETL       	R0, t30
+	  41: GETL       	R6, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFE8DFEC:  9361001C  stw r27,28(r1)
+	  44: GETL       	R27, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x1C, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFE8DFF0:  7CFB3B78  or r27,r7,r7
+	  49: GETL       	R7, t38
+	  50: PUTL       	t38, R27
+	  51: INCEIPL       	$4
+
+	0xFE8DFF4:  90070000  stw r0,0(r7)
+	  52: GETL       	R0, t40
+	  53: GETL       	R7, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFE8DFF8:  93810020  stw r28,32(r1)
+	  56: GETL       	R28, t44
+	  57: GETL       	R1, t46
+	  58: ADDL       	$0x20, t46
+	  59: STL       	t44, (t46)
+	  60: INCEIPL       	$4
+
+	0xFE8DFFC:  3B800000  li r28,0
+	  61: MOVL       	$0x0, t48
+	  62: PUTL       	t48, R28
+	  63: INCEIPL       	$4
+
+	0xFE8E000:  93A10024  stw r29,36(r1)
+	  64: GETL       	R29, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x24, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFE8E004:  7CDD3378  or r29,r6,r6
+	  69: GETL       	R6, t54
+	  70: PUTL       	t54, R29
+	  71: INCEIPL       	$4
+
+	0xFE8E008:  90080000  stw r0,0(r8)
+	  72: GETL       	R0, t56
+	  73: GETL       	R8, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0xFE8E00C:  93410018  stw r26,24(r1)
+	  76: GETL       	R26, t60
+	  77: GETL       	R1, t62
+	  78: ADDL       	$0x18, t62
+	  79: STL       	t60, (t62)
+	  80: INCEIPL       	$4
+
+	0xFE8E010:  93C10028  stw r30,40(r1)
+	  81: GETL       	R30, t64
+	  82: GETL       	R1, t66
+	  83: ADDL       	$0x28, t66
+	  84: STL       	t64, (t66)
+	  85: INCEIPL       	$4
+
+	0xFE8E014:  93E1002C  stw r31,44(r1)
+	  86: GETL       	R31, t68
+	  87: GETL       	R1, t70
+	  88: ADDL       	$0x2C, t70
+	  89: STL       	t68, (t70)
+	  90: INCEIPL       	$4
+
+	0xFE8E018:  90640000  stw r3,0(r4)
+	  91: GETL       	R3, t72
+	  92: GETL       	R4, t74
+	  93: STL       	t72, (t74)
+	  94: INCEIPL       	$4
+
+	0xFE8E01C:  88030000  lbz r0,0(r3)
+	  95: GETL       	R3, t76
+	  96: LDB       	(t76), t78
+	  97: PUTL       	t78, R0
+	  98: INCEIPL       	$4
+
+	0xFE8E020:  2F800000  cmpi cr7,r0,0
+	  99: GETL       	R0, t80
+	 100: CMP0L       	t80, t82  (-rSo)
+	 101: ICRFL       	t82, $0x7, CR
+	 102: INCEIPL       	$4
+
+	0xFE8E024:  419E0040  bc 12,30,0xFE8E064
+	 103: Js30o       	$0xFE8E064
+
+
+
+. 2736 FE8DFC0 104
+. 7D 28 02 A6 94 21 FF D0 38 00 00 00 93 01 00 10 93 21 00 14 7C B8 2B 78 91 21 00 34 7D 19 43 78 90 05 00 00 7C 6A 1B 78 90 06 00 00 93 61 00 1C 7C FB 3B 78 90 07 00 00 93 81 00 20 3B 80 00 00 93 A1 00 24 7C DD 33 78 90 08 00 00 93 41 00 18 93 C1 00 28 93 E1 00 2C 90 64 00 00 88 03 00 00 2F 80 00 00 41 9E 00 40
+==== BB 2737 (0xFE8E028) approx BBs exec'd 0 ====
+
+	0xFE8E028:  2C00005F  cmpi cr0,r0,95
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x5F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E02C:  41820038  bc 12,2,0xFE8E064
+	   5: Js02o       	$0xFE8E064
+
+
+
+. 2737 FE8E028 8
+. 2C 00 00 5F 41 82 00 38
+==== BB 2738 (0xFE8E030) approx BBs exec'd 0 ====
+
+	0xFE8E030:  2C800040  cmpi cr1,r0,64
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x40, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E034:  41860030  bc 12,6,0xFE8E064
+	   5: Js06o       	$0xFE8E064
+
+
+
+. 2738 FE8E030 8
+. 2C 80 00 40 41 86 00 30
+==== BB 2739 (0xFE8E038) approx BBs exec'd 0 ====
+
+	0xFE8E038:  2F00002E  cmpi cr6,r0,46
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2E, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E03C:  419A0028  bc 12,26,0xFE8E064
+	   5: Js26o       	$0xFE8E064
+
+
+
+. 2739 FE8E038 8
+. 2F 00 00 2E 41 9A 00 28
+==== BB 2740 (0xFE8E040) approx BBs exec'd 0 ====
+
+	0xFE8E040:  8C8A0001  lbzu r4,1(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R10
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8E044:  2F840000  cmpi cr7,r4,0
+	   6: GETL       	R4, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFE8E048:  2C04002E  cmpi cr0,r4,46
+	  10: GETL       	R4, t8
+	  11: MOVL       	$0x2E, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFE8E04C:  2F04005F  cmpi cr6,r4,95
+	  15: GETL       	R4, t14
+	  16: MOVL       	$0x5F, t18
+	  17: CMPL       	t14, t18, t16  (-rSo)
+	  18: ICRFL       	t16, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0xFE8E050:  2C840040  cmpi cr1,r4,64
+	  20: GETL       	R4, t20
+	  21: MOVL       	$0x40, t24
+	  22: CMPL       	t20, t24, t22  (-rSo)
+	  23: ICRFL       	t22, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0xFE8E054:  419E0010  bc 12,30,0xFE8E064
+	  25: Js30o       	$0xFE8E064
+
+
+
+. 2740 FE8E040 24
+. 8C 8A 00 01 2F 84 00 00 2C 04 00 2E 2F 04 00 5F 2C 84 00 40 41 9E 00 10
+==== BB 2741 (0xFE8E058) approx BBs exec'd 0 ====
+
+	0xFE8E058:  419A000C  bc 12,26,0xFE8E064
+	   0: Js26o       	$0xFE8E064
+
+
+
+. 2741 FE8E058 4
+. 41 9A 00 0C
+==== BB 2742 (0xFE8E05C) approx BBs exec'd 0 ====
+
+	0xFE8E05C:  41860008  bc 12,6,0xFE8E064
+	   0: Js06o       	$0xFE8E064
+
+
+
+. 2742 FE8E05C 4
+. 41 86 00 08
+==== BB 2743 (0xFE8E060) approx BBs exec'd 0 ====
+
+	0xFE8E060:  4082FFE0  bc 4,2,0xFE8E040
+	   0: Jc02o       	$0xFE8E040
+
+
+
+. 2743 FE8E060 4
+. 40 82 FF E0
+==== BB 2744 (0xFE8E064) approx BBs exec'd 0 ====
+
+	0xFE8E064:  7C035000  cmp cr0,r3,r10
+	   0: GETL       	R3, t0
+	   1: GETL       	R10, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E068:  7D5F5378  or r31,r10,r10
+	   5: GETL       	R10, t6
+	   6: PUTL       	t6, R31
+	   7: INCEIPL       	$4
+
+	0xFE8E06C:  418200BC  bc 12,2,0xFE8E128
+	   8: Js02o       	$0xFE8E128
+
+
+
+. 2744 FE8E064 12
+. 7C 03 50 00 7D 5F 53 78 41 82 00 BC
+==== BB 2745 (0xFE8E070) approx BBs exec'd 0 ====
+
+	0xFE8E070:  886A0000  lbz r3,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8E074:  2C830040  cmpi cr1,r3,64
+	   4: GETL       	R3, t4
+	   5: MOVL       	$0x40, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFE8E078:  7C691B78  or r9,r3,r3
+	   9: GETL       	R3, t10
+	  10: PUTL       	t10, R9
+	  11: INCEIPL       	$4
+
+	0xFE8E07C:  41860014  bc 12,6,0xFE8E090
+	  12: Js06o       	$0xFE8E090
+
+
+
+. 2745 FE8E070 16
+. 88 6A 00 00 2C 83 00 40 7C 69 1B 78 41 86 00 14
+==== BB 2746 (0xFE8E080) approx BBs exec'd 0 ====
+
+	0xFE8E080:  2F03005F  cmpi cr6,r3,95
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x5F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E084:  419A013C  bc 12,26,0xFE8E1C0
+	   5: Js26o       	$0xFE8E1C0
+
+
+
+. 2746 FE8E080 8
+. 2F 03 00 5F 41 9A 01 3C
+==== BB 2747 (0xFE8E1C0) approx BBs exec'd 0 ====
+
+	0xFE8E1C0:  38A00000  li r5,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE8E1C4:  3BEA0001  addi r31,r10,1
+	   3: GETL       	R10, t2
+	   4: ADDL       	$0x1, t2
+	   5: PUTL       	t2, R31
+	   6: INCEIPL       	$4
+
+	0xFE8E1C8:  98AA0000  stb r5,0(r10)
+	   7: GETL       	R5, t4
+	   8: GETL       	R10, t6
+	   9: STB       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFE8E1CC:  93FD0000  stw r31,0(r29)
+	  11: GETL       	R31, t8
+	  12: GETL       	R29, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFE8E1D0:  892A0001  lbz r9,1(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x1, t12
+	  17: LDB       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0xFE8E1D4:  7D204B79  or. r0,r9,r9
+	  20: GETL       	R9, t16
+	  21: PUTL       	t16, R0
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0xFE8E1D8:  41820030  bc 12,2,0xFE8E208
+	  25: Js02o       	$0xFE8E208
+
+
+
+. 2747 FE8E1C0 28
+. 38 A0 00 00 3B EA 00 01 98 AA 00 00 93 FD 00 00 89 2A 00 01 7D 20 4B 79 41 82 00 30
+==== BB 2748 (0xFE8E1DC) approx BBs exec'd 0 ====
+
+	0xFE8E1DC:  2F80002E  cmpi cr7,r0,46
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2E, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E1E0:  419E0028  bc 12,30,0xFE8E208
+	   5: Js30o       	$0xFE8E208
+
+
+
+. 2748 FE8E1DC 8
+. 2F 80 00 2E 41 9E 00 28
+==== BB 2749 (0xFE8E1E4) approx BBs exec'd 0 ====
+
+	0xFE8E1E4:  2C800040  cmpi cr1,r0,64
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x40, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E1E8:  41860020  bc 12,6,0xFE8E208
+	   5: Js06o       	$0xFE8E208
+
+
+
+. 2749 FE8E1E4 8
+. 2C 80 00 40 41 86 00 20
+==== BB 2750 (0xFE8E1EC) approx BBs exec'd 0 ====
+
+	0xFE8E1EC:  8D3F0001  lbzu r9,1(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0xFE8E1F0:  7D264B79  or. r6,r9,r9
+	   6: GETL       	R9, t4
+	   7: PUTL       	t4, R6
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFE8E1F4:  2F060040  cmpi cr6,r6,64
+	  11: GETL       	R6, t8
+	  12: MOVL       	$0x40, t12
+	  13: CMPL       	t8, t12, t10  (-rSo)
+	  14: ICRFL       	t10, $0x6, CR
+	  15: INCEIPL       	$4
+
+	0xFE8E1F8:  2F86002E  cmpi cr7,r6,46
+	  16: GETL       	R6, t14
+	  17: MOVL       	$0x2E, t18
+	  18: CMPL       	t14, t18, t16  (-rSo)
+	  19: ICRFL       	t16, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0xFE8E1FC:  4182000C  bc 12,2,0xFE8E208
+	  21: Js02o       	$0xFE8E208
+
+
+
+. 2750 FE8E1EC 20
+. 8D 3F 00 01 7D 26 4B 79 2F 06 00 40 2F 86 00 2E 41 82 00 0C
+==== BB 2751 (0xFE8E200) approx BBs exec'd 0 ====
+
+	0xFE8E200:  419E0008  bc 12,30,0xFE8E208
+	   0: Js30o       	$0xFE8E208
+
+
+
+. 2751 FE8E200 4
+. 41 9E 00 08
+==== BB 2752 (0xFE8E204) approx BBs exec'd 0 ====
+
+	0xFE8E204:  409AFFE8  bc 4,26,0xFE8E1EC
+	   0: Jc26o       	$0xFE8E1EC
+
+
+
+. 2752 FE8E204 4
+. 40 9A FF E8
+==== BB 2753 (0xFE8E208) approx BBs exec'd 0 ====
+
+	0xFE8E208:  3B800004  li r28,4
+	   0: MOVL       	$0x4, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0xFE8E20C:  4BFFFE7C  b 0xFE8E088
+	   3: JMPo       	$0xFE8E088  ($4)
+
+
+
+. 2753 FE8E208 8
+. 3B 80 00 04 4B FF FE 7C
+==== BB 2754 (0xFE8E088) approx BBs exec'd 0 ====
+
+	0xFE8E088:  2F09002E  cmpi cr6,r9,46
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x2E, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E08C:  419A00B4  bc 12,26,0xFE8E140
+	   5: Js26o       	$0xFE8E140
+
+
+
+. 2754 FE8E088 8
+. 2F 09 00 2E 41 9A 00 B4
+==== BB 2755 (0xFE8E140) approx BBs exec'd 0 ====
+
+	0xFE8E140:  38E00000  li r7,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFE8E144:  98FF0000  stb r7,0(r31)
+	   3: GETL       	R7, t2
+	   4: GETL       	R31, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFE8E148:  3BFF0001  addi r31,r31,1
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0xFE8E14C:  93FB0000  stw r31,0(r27)
+	  11: GETL       	R31, t8
+	  12: GETL       	R27, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFE8E150:  7FE3FB78  or r3,r31,r31
+	  15: GETL       	R31, t12
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0xFE8E154:  893F0000  lbz r9,0(r31)
+	  18: GETL       	R31, t14
+	  19: LDB       	(t14), t16
+	  20: PUTL       	t16, R9
+	  21: INCEIPL       	$4
+
+	0xFE8E158:  7D204B79  or. r0,r9,r9
+	  22: GETL       	R9, t18
+	  23: PUTL       	t18, R0
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x0, CR
+	  26: INCEIPL       	$4
+
+	0xFE8E15C:  4182001C  bc 12,2,0xFE8E178
+	  27: Js02o       	$0xFE8E178
+
+
+
+. 2755 FE8E140 32
+. 38 E0 00 00 98 FF 00 00 3B FF 00 01 93 FB 00 00 7F E3 FB 78 89 3F 00 00 7D 20 4B 79 41 82 00 1C
+==== BB 2756 (0xFE8E160) approx BBs exec'd 0 ====
+
+	0xFE8E160:  2F800040  cmpi cr7,r0,64
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x40, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E164:  419E0014  bc 12,30,0xFE8E178
+	   5: Js30o       	$0xFE8E178
+
+
+
+. 2756 FE8E160 8
+. 2F 80 00 40 41 9E 00 14
+==== BB 2757 (0xFE8E168) approx BBs exec'd 0 ====
+
+	0xFE8E168:  8D3F0001  lbzu r9,1(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0xFE8E16C:  7D284B79  or. r8,r9,r9
+	   6: GETL       	R9, t4
+	   7: PUTL       	t4, R8
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFE8E170:  2F880040  cmpi cr7,r8,64
+	  11: GETL       	R8, t8
+	  12: MOVL       	$0x40, t12
+	  13: CMPL       	t8, t12, t10  (-rSo)
+	  14: ICRFL       	t10, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFE8E174:  4082FFF0  bc 4,2,0xFE8E164
+	  16: Jc02o       	$0xFE8E164
+
+
+
+. 2757 FE8E168 16
+. 8D 3F 00 01 7D 28 4B 79 2F 88 00 40 40 82 FF F0
+==== BB 2758 (0xFE8E164) approx BBs exec'd 0 ====
+
+	0xFE8E164:  419E0014  bc 12,30,0xFE8E178
+	   0: Js30o       	$0xFE8E178
+
+
+
+. 2758 FE8E164 4
+. 41 9E 00 14
+==== BB 2759 (0xFE8E178) approx BBs exec'd 0 ====
+
+	0xFE8E178:  7F83F800  cmp cr7,r3,r31
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E17C:  639C0002  ori r28,r28,0x2
+	   5: GETL       	R28, t6
+	   6: ORL       	$0x2, t6
+	   7: PUTL       	t6, R28
+	   8: INCEIPL       	$4
+
+	0xFE8E180:  41BEFF10  bc 13,30,0xFE8E090
+	   9: Js30o       	$0xFE8E090
+
+
+
+. 2759 FE8E178 12
+. 7F 83 F8 00 63 9C 00 02 41 BE FF 10
+==== BB 2760 (0xFE8E184) approx BBs exec'd 0 ====
+
+	0xFE8E184:  89430000  lbz r10,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE8E188:  2C8A0000  cmpi cr1,r10,0
+	   4: GETL       	R10, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFE8E18C:  4186FF04  bc 12,6,0xFE8E090
+	   8: Js06o       	$0xFE8E090
+
+
+
+. 2760 FE8E184 12
+. 89 43 00 00 2C 8A 00 00 41 86 FF 04
+==== BB 2761 (0xFE8E190) approx BBs exec'd 0 ====
+
+	0xFE8E190:  7C83F850  subf r4,r3,r31
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8E194:  4BFFFC11  bl 0xFE8DDA4
+	   5: MOVL       	$0xFE8E198, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFE8DDA4  ($4)
+
+
+
+. 2761 FE8E190 8
+. 7C 83 F8 50 4B FF FC 11
+==== BB 2762 (0xFECF85C) approx BBs exec'd 0 ====
+
+	0xFECF85C:  7C9AE800  cmp cr1,r26,r29
+	   0: GETL       	R26, t0
+	   1: GETL       	R29, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECF860:  41860214  bc 12,6,0xFECFA74
+	   5: Js06o       	$0xFECFA74
+
+
+
+. 2762 FECF85C 8
+. 7C 9A E8 00 41 86 02 14
+==== BB 2763 (0xFECFA74) approx BBs exec'd 0 ====
+
+	0xFECFA74:  837C0034  lwz r27,52(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFECFA78:  7F1BF800  cmp cr6,r27,r31
+	   5: GETL       	R27, t4
+	   6: GETL       	R31, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFECFA7C:  409AFDE8  bc 4,26,0xFECF864
+	  10: Jc26o       	$0xFECF864
+
+
+
+. 2763 FECFA74 12
+. 83 7C 00 34 7F 1B F8 00 40 9A FD E8
+==== BB 2764 (0xFECFA38) approx BBs exec'd 0 ====
+
+	0xFECFA38:  7CC02839  and. r0,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFECFA3C:  40820014  bc 4,2,0xFECFA50
+	   7: Jc02o       	$0xFECFA50
+
+
+
+. 2764 FECFA38 8
+. 7C C0 28 39 40 82 00 14
+==== BB 2765 (0xFECFA40) approx BBs exec'd 0 ====
+
+	0xFECFA40:  54C6083C  rlwinm r6,r6,1,0,30
+	   0: GETL       	R6, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0xFECFA44:  396B0008  addi r11,r11,8
+	   4: GETL       	R11, t2
+	   5: ADDL       	$0x8, t2
+	   6: PUTL       	t2, R11
+	   7: INCEIPL       	$4
+
+	0xFECFA48:  7CC92839  and. r9,r6,r5
+	   8: GETL       	R6, t4
+	   9: GETL       	R5, t6
+	  10: ANDL       	t4, t6
+	  11: PUTL       	t6, R9
+	  12: CMP0L       	t6, t8  (-rSo)
+	  13: ICRFL       	t8, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFECFA4C:  4182FFF4  bc 12,2,0xFECFA40
+	  15: Js02o       	$0xFECFA40
+
+
+
+. 2765 FECFA40 16
+. 54 C6 08 3C 39 6B 00 08 7C C9 28 39 41 82 FF F4
+==== BB 2766 (0xFECFA50) approx BBs exec'd 0 ====
+
+	0xFECFA50:  814B000C  lwz r10,12(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECFA54:  7F8A5800  cmp cr7,r10,r11
+	   5: GETL       	R10, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFECFA58:  409E01BC  bc 4,30,0xFECFC14
+	  10: Jc30o       	$0xFECFC14
+
+
+
+. 2766 FECFA50 12
+. 81 4B 00 0C 7F 8A 58 00 40 9E 01 BC
+==== BB 2767 (0xFECFA5C) approx BBs exec'd 0 ====
+
+	0xFECFA5C:  7CA53078  andc r5,r5,r6
+	   0: GETL       	R5, t0
+	   1: GETL       	R6, t2
+	   2: NOTL       	t2
+	   3: ANDL       	t0, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFECFA60:  7F03E214  add r24,r3,r28
+	   6: GETL       	R3, t4
+	   7: GETL       	R28, t6
+	   8: ADDL       	t4, t6
+	   9: PUTL       	t6, R24
+	  10: INCEIPL       	$4
+
+	0xFECFA64:  396A0008  addi r11,r10,8
+	  11: GETL       	R10, t8
+	  12: ADDL       	$0x8, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0xFECFA68:  54C6083C  rlwinm r6,r6,1,0,30
+	  15: GETL       	R6, t10
+	  16: SHLL       	$0x1, t10
+	  17: PUTL       	t10, R6
+	  18: INCEIPL       	$4
+
+	0xFECFA6C:  90B80438  stw r5,1080(r24)
+	  19: GETL       	R5, t12
+	  20: GETL       	R24, t14
+	  21: ADDL       	$0x438, t14
+	  22: STL       	t12, (t14)
+	  23: INCEIPL       	$4
+
+	0xFECFA70:  4BFFFF7C  b 0xFECF9EC
+	  24: JMPo       	$0xFECF9EC  ($4)
+
+
+
+. 2767 FECFA5C 24
+. 7C A5 30 78 7F 03 E2 14 39 6A 00 08 54 C6 08 3C 90 B8 04 38 4B FF FF 7C
+==== BB 2768 (0xFECF9EC) approx BBs exec'd 0 ====
+
+	0xFECF9EC:  21260000  subfic r9,r6,0
+	   0: GETL       	R6, t0
+	   1: MOVL       	$0x0, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECF9F0:  7EA93114  adde r21,r9,r6
+	   5: GETL       	R9, t4
+	   6: GETL       	R6, t6
+	   7: ADCL       	t4, t6  (-rCa-wCa)
+	   8: PUTL       	t6, R21
+	   9: INCEIPL       	$4
+
+	0xFECF9F4:  7D262810  subfc r9,r6,r5
+	  10: GETL       	R6, t8
+	  11: GETL       	R5, t10
+	  12: SBBL       	t8, t10  (-wCa)
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0xFECF9F8:  7D294910  subfe r9,r9,r9
+	  15: GETL       	R9, t12
+	  16: GETL       	R9, t14
+	  17: SBBL       	t12, t14  (-rCa-wCa)
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0xFECF9FC:  7D2900D0  neg r9,r9
+	  20: GETL       	R9, t16
+	  21: NEGL       	t16
+	  22: PUTL       	t16, R9
+	  23: INCEIPL       	$4
+
+	0xFECFA00:  7D2AAB79  or. r10,r9,r21
+	  24: GETL       	R9, t18
+	  25: GETL       	R21, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R10
+	  28: CMP0L       	t18, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFECFA04:  41820034  bc 12,2,0xFECFA38
+	  31: Js02o       	$0xFECFA38
+
+
+
+. 2768 FECF9EC 28
+. 21 26 00 00 7E A9 31 14 7D 26 28 10 7D 29 49 10 7D 29 00 D0 7D 2A AB 79 41 82 00 34
+==== BB 2769 (0xFECFA30) approx BBs exec'd 0 ====
+
+	0xFECFA30:  39680030  addi r11,r8,48
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x30, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFECFA34:  38C00001  li r6,1
+	   4: MOVL       	$0x1, t2
+	   5: PUTL       	t2, R6
+	   6: INCEIPL       	$4
+
+	0xFECFA38:  7CC02839  and. r0,r6,r5
+	   7: GETL       	R6, t4
+	   8: GETL       	R5, t6
+	   9: ANDL       	t4, t6
+	  10: PUTL       	t6, R0
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0xFECFA3C:  40820014  bc 4,2,0xFECFA50
+	  14: Jc02o       	$0xFECFA50
+
+
+
+. 2769 FECFA30 16
+. 39 68 00 30 38 C0 00 01 7C C0 28 39 40 82 00 14
+==== BB 2770 (0xFECFC14) approx BBs exec'd 0 ====
+
+	0xFECFC14:  806A0004  lwz r3,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECFC18:  808A000C  lwz r4,12(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFECFC1C:  54600038  rlwinm r0,r3,0,0,28
+	  10: GETL       	R3, t8
+	  11: ANDL       	$0xFFFFFFF8, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0xFECFC20:  7CF90050  subf r7,r25,r0
+	  14: GETL       	R25, t10
+	  15: GETL       	R0, t12
+	  16: SUBL       	t10, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0xFECFC24:  91640008  stw r11,8(r4)
+	  19: GETL       	R11, t14
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x8, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFECFC28:  2A07000F  cmpli cr4,r7,15
+	  24: GETL       	R7, t18
+	  25: MOVL       	$0xF, t22
+	  26: CMPUL       	t18, t22, t20  (-rSo)
+	  27: ICRFL       	t20, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0xFECFC2C:  908B000C  stw r4,12(r11)
+	  29: GETL       	R4, t24
+	  30: GETL       	R11, t26
+	  31: ADDL       	$0xC, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0xFECFC30:  41910054  bc 12,17,0xFECFC84
+	  34: Js17o       	$0xFECFC84
+
+
+
+. 2770 FECFC14 32
+. 80 6A 00 04 80 8A 00 0C 54 60 00 38 7C F9 00 50 91 64 00 08 2A 07 00 0F 90 8B 00 0C 41 91 00 54
+==== BB 2771 (0xFECFC84) approx BBs exec'd 0 ====
+
+	0xFECFC84:  7C6ACA14  add r3,r10,r25
+	   0: GETL       	R10, t0
+	   1: GETL       	R25, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECFC88:  907C0040  stw r3,64(r28)
+	   5: GETL       	R3, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	$0x40, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECFC8C:  907C0044  stw r3,68(r28)
+	  10: GETL       	R3, t8
+	  11: GETL       	R28, t10
+	  12: ADDL       	$0x44, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFECFC90:  93430008  stw r26,8(r3)
+	  15: GETL       	R26, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x8, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFECFC94:  9343000C  stw r26,12(r3)
+	  20: GETL       	R26, t16
+	  21: GETL       	R3, t18
+	  22: ADDL       	$0xC, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFECFC98:  41850008  bc 12,5,0xFECFCA0
+	  25: Js05o       	$0xFECFCA0
+
+
+
+. 2771 FECFC84 24
+. 7C 6A CA 14 90 7C 00 40 90 7C 00 44 93 43 00 08 93 43 00 0C 41 85 00 08
+==== BB 2772 (0xFECFC9C) approx BBs exec'd 0 ====
+
+	0xFECFC9C:  907C0034  stw r3,52(r28)
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	$0x34, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECFCA0:  57AF103A  rlwinm r15,r29,2,0,29
+	   5: GETL       	R29, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R15
+	   8: INCEIPL       	$4
+
+	0xFECFCA4:  60F20001  ori r18,r7,0x1
+	   9: GETL       	R7, t6
+	  10: ORL       	$0x1, t6
+	  11: PUTL       	t6, R18
+	  12: INCEIPL       	$4
+
+	0xFECFCA8:  7DF69B78  or r22,r15,r19
+	  13: GETL       	R15, t8
+	  14: GETL       	R19, t10
+	  15: ORL       	t10, t8
+	  16: PUTL       	t8, R22
+	  17: INCEIPL       	$4
+
+	0xFECFCAC:  390A0008  addi r8,r10,8
+	  18: GETL       	R10, t12
+	  19: ADDL       	$0x8, t12
+	  20: PUTL       	t12, R8
+	  21: INCEIPL       	$4
+
+	0xFECFCB0:  92CA0004  stw r22,4(r10)
+	  22: GETL       	R22, t14
+	  23: GETL       	R10, t16
+	  24: ADDL       	$0x4, t16
+	  25: STL       	t14, (t16)
+	  26: INCEIPL       	$4
+
+	0xFECFCB4:  7CE3392E  stwx r7,r3,r7
+	  27: GETL       	R7, t18
+	  28: GETL       	R3, t20
+	  29: ADDL       	t20, t18
+	  30: GETL       	R7, t22
+	  31: STL       	t22, (t18)
+	  32: INCEIPL       	$4
+
+	0xFECFCB8:  92430004  stw r18,4(r3)
+	  33: GETL       	R18, t24
+	  34: GETL       	R3, t26
+	  35: ADDL       	$0x4, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0xFECFCBC:  4BFFF9F4  b 0xFECF6B0
+	  38: JMPo       	$0xFECF6B0  ($4)
+
+
+
+. 2772 FECFC9C 36
+. 90 7C 00 34 57 AF 10 3A 60 F2 00 01 7D F6 9B 78 39 0A 00 08 92 CA 00 04 7C E3 39 2E 92 43 00 04 4B FF F9 F4
+==== BB 2773 (0xFE8E198) approx BBs exec'd 0 ====
+
+	0xFE8E198:  90790000  stw r3,0(r25)
+	   0: GETL       	R3, t0
+	   1: GETL       	R25, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE8E19C:  7C7A1B78  or r26,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R26
+	   6: INCEIPL       	$4
+
+	0xFE8E1A0:  807B0000  lwz r3,0(r27)
+	   7: GETL       	R27, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R3
+	  10: INCEIPL       	$4
+
+	0xFE8E1A4:  7F44D378  or r4,r26,r26
+	  11: GETL       	R26, t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0xFE8E1A8:  48048209  bl 0xFED63B0
+	  14: MOVL       	$0xFE8E1AC, t12
+	  15: PUTL       	t12, LR
+	  16: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2773 FE8E198 20
+. 90 79 00 00 7C 7A 1B 78 80 7B 00 00 7F 44 D3 78 48 04 82 09
+==== BB 2774 (0xFE8E1AC) approx BBs exec'd 0 ====
+
+	0xFE8E1AC:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE8E1B0:  419A0060  bc 12,26,0xFE8E210
+	   4: Js26o       	$0xFE8E210
+
+
+
+. 2774 FE8E1AC 8
+. 2F 03 00 00 41 9A 00 60
+==== BB 2775 (0xFE8E1B4) approx BBs exec'd 0 ====
+
+	0xFE8E1B4:  893F0000  lbz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFE8E1B8:  639C0001  ori r28,r28,0x1
+	   4: GETL       	R28, t4
+	   5: ORL       	$0x1, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFE8E1BC:  4BFFFED4  b 0xFE8E090
+	   8: JMPo       	$0xFE8E090  ($4)
+
+
+
+. 2775 FE8E1B4 12
+. 89 3F 00 00 63 9C 00 01 4B FF FE D4
+==== BB 2776 (0xFE8E090) approx BBs exec'd 0 ====
+
+	0xFE8E090:  2C090040  cmpi cr0,r9,64
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x40, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8E094:  41820070  bc 12,2,0xFE8E104
+	   5: Js02o       	$0xFE8E104
+
+
+
+. 2776 FE8E090 8
+. 2C 09 00 40 41 82 00 70
+==== BB 2777 (0xFE8E098) approx BBs exec'd 0 ====
+
+	0xFE8E098:  80DD0000  lwz r6,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFE8E09C:  2C860000  cmpi cr1,r6,0
+	   4: GETL       	R6, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFE8E0A0:  41860014  bc 12,6,0xFE8E0B4
+	   8: Js06o       	$0xFE8E0B4
+
+
+
+. 2777 FE8E098 12
+. 80 DD 00 00 2C 86 00 00 41 86 00 14
+==== BB 2778 (0xFE8E0A4) approx BBs exec'd 0 ====
+
+	0xFE8E0A4:  8B060000  lbz r24,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R24
+	   3: INCEIPL       	$4
+
+	0xFE8E0A8:  2F180000  cmpi cr6,r24,0
+	   4: GETL       	R24, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0xFE8E0AC:  409A0008  bc 4,26,0xFE8E0B4
+	   8: Jc26o       	$0xFE8E0B4
+
+
+
+. 2778 FE8E0A4 12
+. 8B 06 00 00 2F 18 00 00 40 9A 00 08
+==== BB 2779 (0xFE8E0B4) approx BBs exec'd 0 ====
+
+	0xFE8E0B4:  807B0000  lwz r3,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8E0B8:  2C030000  cmpi cr0,r3,0
+	   4: GETL       	R3, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE8E0BC:  41820014  bc 12,2,0xFE8E0D0
+	   8: Js02o       	$0xFE8E0D0
+
+
+
+. 2779 FE8E0B4 12
+. 80 7B 00 00 2C 03 00 00 41 82 00 14
+==== BB 2780 (0xFE8E0C0) approx BBs exec'd 0 ====
+
+	0xFE8E0C0:  8B430000  lbz r26,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0xFE8E0C4:  2F9A0000  cmpi cr7,r26,0
+	   4: GETL       	R26, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFE8E0C8:  409E0008  bc 4,30,0xFE8E0D0
+	   8: Jc30o       	$0xFE8E0D0
+
+
+
+. 2780 FE8E0C0 12
+. 8B 43 00 00 2F 9A 00 00 40 9E 00 08
+==== BB 2781 (0xFE8E0D0) approx BBs exec'd 0 ====
+
+	0xFE8E0D0:  83610034  lwz r27,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFE8E0D4:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE8E0D8:  83010010  lwz r24,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R24
+	  12: INCEIPL       	$4
+
+	0xFE8E0DC:  7F6803A6  mtlr r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFE8E0E0:  83210014  lwz r25,20(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x14, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R25
+	  20: INCEIPL       	$4
+
+	0xFE8E0E4:  83410018  lwz r26,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R26
+	  25: INCEIPL       	$4
+
+	0xFE8E0E8:  8361001C  lwz r27,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0xFE8E0EC:  83810020  lwz r28,32(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R28
+	  35: INCEIPL       	$4
+
+	0xFE8E0F0:  83A10024  lwz r29,36(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x24, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R29
+	  40: INCEIPL       	$4
+
+	0xFE8E0F4:  83C10028  lwz r30,40(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x28, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R30
+	  45: INCEIPL       	$4
+
+	0xFE8E0F8:  83E1002C  lwz r31,44(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x2C, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R31
+	  50: INCEIPL       	$4
+
+	0xFE8E0FC:  38210030  addi r1,r1,48
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x30, t40
+	  53: PUTL       	t40, R1
+	  54: INCEIPL       	$4
+
+	0xFE8E100:  4E800020  blr
+	  55: GETL       	LR, t42
+	  56: JMPo-r       	t42  ($4)
+
+
+
+. 2781 FE8E0D0 52
+. 83 61 00 34 7F 83 E3 78 83 01 00 10 7F 68 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 2782 (0xFE8B8DC) approx BBs exec'd 0 ====
+
+	0xFE8B8DC:  801C00A4  lwz r0,164(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xA4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8B8E0:  7C781B78  or r24,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R24
+	   7: INCEIPL       	$4
+
+	0xFE8B8E4:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFE8B8E8:  409E00F8  bc 4,30,0xFE8B9E0
+	  12: Jc30o       	$0xFE8B9E0
+
+
+
+. 2782 FE8B8DC 16
+. 80 1C 00 A4 7C 78 1B 78 2F 80 00 00 40 9E 00 F8
+==== BB 2783 (0xFE8B8EC) approx BBs exec'd 0 ====
+
+	0xFE8B8EC:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B8F0:  4804B1E9  bl 0xFED6AD8
+	   3: MOVL       	$0xFE8B8F4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2783 FE8B8EC 8
+. 7F A3 EB 78 48 04 B1 E9
+==== BB 2784 (0xFE8B8F4) approx BBs exec'd 0 ====
+
+	0xFE8B8F4:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8B8F8:  38A30001  addi r5,r3,1
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0x1, t2
+	   5: PUTL       	t2, R5
+	   6: INCEIPL       	$4
+
+	0xFE8B8FC:  83A1001C  lwz r29,28(r1)
+	   7: GETL       	R1, t4
+	   8: ADDL       	$0x1C, t4
+	   9: LDL       	(t4), t6
+	  10: PUTL       	t6, R29
+	  11: INCEIPL       	$4
+
+	0xFE8B900:  807E01A0  lwz r3,416(r30)
+	  12: GETL       	R30, t8
+	  13: ADDL       	$0x1A0, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFE8B904:  7F06C378  or r6,r24,r24
+	  17: GETL       	R24, t12
+	  18: PUTL       	t12, R6
+	  19: INCEIPL       	$4
+
+	0xFE8B908:  80E10018  lwz r7,24(r1)
+	  20: GETL       	R1, t14
+	  21: ADDL       	$0x18, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R7
+	  24: INCEIPL       	$4
+
+	0xFE8B90C:  81010020  lwz r8,32(r1)
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0x20, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R8
+	  29: INCEIPL       	$4
+
+	0xFE8B910:  81210024  lwz r9,36(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x24, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R9
+	  34: INCEIPL       	$4
+
+	0xFE8B914:  81410028  lwz r10,40(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x28, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R10
+	  39: INCEIPL       	$4
+
+	0xFE8B918:  9321000C  stw r25,12(r1)
+	  40: GETL       	R25, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0xC, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFE8B91C:  3B200001  li r25,1
+	  45: MOVL       	$0x1, t34
+	  46: PUTL       	t34, R25
+	  47: INCEIPL       	$4
+
+	0xFE8B920:  93A10008  stw r29,8(r1)
+	  48: GETL       	R29, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x8, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0xFE8B924:  93210010  stw r25,16(r1)
+	  53: GETL       	R25, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x10, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0xFE8B928:  48001FD5  bl 0xFE8D8FC
+	  58: MOVL       	$0xFE8B92C, t44
+	  59: PUTL       	t44, LR
+	  60: JMPo-c       	$0xFE8D8FC  ($4)
+
+
+
+. 2784 FE8B8F4 56
+. 7F A4 EB 78 38 A3 00 01 83 A1 00 1C 80 7E 01 A0 7F 06 C3 78 80 E1 00 18 81 01 00 20 81 21 00 24 81 41 00 28 93 21 00 0C 3B 20 00 01 93 A1 00 08 93 21 00 10 48 00 1F D5
+==== BB 2785 (0xFE8D99C) approx BBs exec'd 0 ====
+
+	0xFE8D99C:  7E83A378  or r3,r20,r20
+	   0: GETL       	R20, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D9A0:  48049139  bl 0xFED6AD8
+	   3: MOVL       	$0xFE8D9A4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2785 FE8D99C 8
+. 7E 83 A3 78 48 04 91 39
+==== BB 2786 (0xFE8D9A4) approx BBs exec'd 0 ====
+
+	0xFE8D9A4:  7C7D1A14  add r3,r29,r3
+	   0: GETL       	R29, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8D9A8:  3BE30001  addi r31,r3,1
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0xFE8D9AC:  5799F87E  rlwinm r25,r28,31,1,31
+	   9: GETL       	R28, t6
+	  10: SHRL       	$0x1, t6
+	  11: PUTL       	t6, R25
+	  12: INCEIPL       	$4
+
+	0xFE8D9B0:  7FFDFB78  or r29,r31,r31
+	  13: GETL       	R31, t8
+	  14: PUTL       	t8, R29
+	  15: INCEIPL       	$4
+
+	0xFE8D9B4:  73200001  andi. r0,r25,0x1
+	  16: GETL       	R25, t10
+	  17: ANDL       	$0x1, t10
+	  18: PUTL       	t10, R0
+	  19: CMP0L       	t10, t12  (-rSo)
+	  20: ICRFL       	t12, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0xFE8D9B8:  41820014  bc 12,2,0xFE8D9CC
+	  22: Js02o       	$0xFE8D9CC
+
+
+
+. 2786 FE8D9A4 24
+. 7C 7D 1A 14 3B E3 00 01 57 99 F8 7E 7F FD FB 78 73 20 00 01 41 82 00 14
+==== BB 2787 (0xFE8D9BC) approx BBs exec'd 0 ====
+
+	0xFE8D9BC:  7E639B78  or r3,r19,r19
+	   0: GETL       	R19, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D9C0:  48049119  bl 0xFED6AD8
+	   3: MOVL       	$0xFE8D9C4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2787 FE8D9BC 8
+. 7E 63 9B 78 48 04 91 19
+==== BB 2788 (0xFE8D9C4) approx BBs exec'd 0 ====
+
+	0xFE8D9C4:  7C9F1A14  add r4,r31,r3
+	   0: GETL       	R31, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8D9C8:  3BA40001  addi r29,r4,1
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFE8D9CC:  73970001  andi. r23,r28,0x1
+	   9: GETL       	R28, t6
+	  10: ANDL       	$0x1, t6
+	  11: PUTL       	t6, R23
+	  12: CMP0L       	t6, t8  (-rSo)
+	  13: ICRFL       	t8, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFE8D9D0:  41820188  bc 12,2,0xFE8DB58
+	  15: Js02o       	$0xFE8DB58
+
+
+
+. 2788 FE8D9C4 16
+. 7C 9F 1A 14 3B A4 00 01 73 97 00 01 41 82 01 88
+==== BB 2789 (0xFE8D9D4) approx BBs exec'd 0 ====
+
+	0xFE8D9D4:  7EA3AB78  or r3,r21,r21
+	   0: GETL       	R21, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8D9D8:  48049101  bl 0xFED6AD8
+	   3: MOVL       	$0xFE8D9DC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2789 FE8D9D4 8
+. 7E A3 AB 78 48 04 91 01
+==== BB 2790 (0xFE8D9DC) approx BBs exec'd 0 ====
+
+	0xFE8D9DC:  7CBD1A14  add r5,r29,r3
+	   0: GETL       	R29, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE8D9E0:  3BE50001  addi r31,r5,1
+	   5: GETL       	R5, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0xFE8D9E4:  7E038378  or r3,r16,r16
+	   9: GETL       	R16, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0xFE8D9E8:  579BE8FE  rlwinm r27,r28,29,3,31
+	  12: GETL       	R28, t8
+	  13: SHRL       	$0x3, t8
+	  14: PUTL       	t8, R27
+	  15: INCEIPL       	$4
+
+	0xFE8D9EC:  480490ED  bl 0xFED6AD8
+	  16: MOVL       	$0xFE8D9F0, t10
+	  17: PUTL       	t10, LR
+	  18: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2790 FE8D9DC 20
+. 7C BD 1A 14 3B E5 00 01 7E 03 83 78 57 9B E8 FE 48 04 90 ED
+==== BB 2791 (0xFECFA80) approx BBs exec'd 0 ====
+
+	0xFECFA80:  38990010  addi r4,r25,16
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0xFECFA84:  7C045040  cmpl cr0,r4,r10
+	   4: GETL       	R4, t2
+	   5: GETL       	R10, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECFA88:  4080FDDC  bc 4,0,0xFECF864
+	   9: Jc00o       	$0xFECF864
+
+
+
+. 2791 FECFA80 12
+. 38 99 00 10 7C 04 50 40 40 80 FD DC
+==== BB 2792 (0xFECFA8C) approx BBs exec'd 0 ====
+
+	0xFECFA8C:  813E05FC  lwz r9,1532(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECFA90:  7C995050  subf r4,r25,r10
+	   5: GETL       	R25, t4
+	   6: GETL       	R10, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFECFA94:  63260001  ori r6,r25,0x1
+	  10: GETL       	R25, t8
+	  11: ORL       	$0x1, t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0xFECFA98:  60800001  ori r0,r4,0x1
+	  14: GETL       	R4, t10
+	  15: ORL       	$0x1, t10
+	  16: PUTL       	t10, R0
+	  17: INCEIPL       	$4
+
+	0xFECFA9C:  7F874A78  xor r7,r28,r9
+	  18: GETL       	R28, t12
+	  19: GETL       	R9, t14
+	  20: XORL       	t12, t14
+	  21: PUTL       	t14, R7
+	  22: INCEIPL       	$4
+
+	0xFECFAA0:  391F0008  addi r8,r31,8
+	  23: GETL       	R31, t16
+	  24: ADDL       	$0x8, t16
+	  25: PUTL       	t16, R8
+	  26: INCEIPL       	$4
+
+	0xFECFAA4:  3127FFFF  addic r9,r7,-1
+	  27: GETL       	R7, t18
+	  28: ADCL       	$0xFFFFFFFF, t18  (-wCa)
+	  29: PUTL       	t18, R9
+	  30: INCEIPL       	$4
+
+	0xFECFAA8:  7D693910  subfe r11,r9,r7
+	  31: GETL       	R9, t20
+	  32: GETL       	R7, t22
+	  33: SBBL       	t20, t22  (-rCa-wCa)
+	  34: PUTL       	t22, R11
+	  35: INCEIPL       	$4
+
+	0xFECFAAC:  7D3FCA14  add r9,r31,r25
+	  36: GETL       	R31, t24
+	  37: GETL       	R25, t26
+	  38: ADDL       	t24, t26
+	  39: PUTL       	t26, R9
+	  40: INCEIPL       	$4
+
+	0xFECFAB0:  5563103A  rlwinm r3,r11,2,0,29
+	  41: GETL       	R11, t28
+	  42: SHLL       	$0x2, t28
+	  43: PUTL       	t28, R3
+	  44: INCEIPL       	$4
+
+	0xFECFAB4:  913C0034  stw r9,52(r28)
+	  45: GETL       	R9, t30
+	  46: GETL       	R28, t32
+	  47: ADDL       	$0x34, t32
+	  48: STL       	t30, (t32)
+	  49: INCEIPL       	$4
+
+	0xFECFAB8:  7C653378  or r5,r3,r6
+	  50: GETL       	R3, t34
+	  51: GETL       	R6, t36
+	  52: ORL       	t36, t34
+	  53: PUTL       	t34, R5
+	  54: INCEIPL       	$4
+
+	0xFECFABC:  913C0040  stw r9,64(r28)
+	  55: GETL       	R9, t38
+	  56: GETL       	R28, t40
+	  57: ADDL       	$0x40, t40
+	  58: STL       	t38, (t40)
+	  59: INCEIPL       	$4
+
+	0xFECFAC0:  90BF0004  stw r5,4(r31)
+	  60: GETL       	R5, t42
+	  61: GETL       	R31, t44
+	  62: ADDL       	$0x4, t44
+	  63: STL       	t42, (t44)
+	  64: INCEIPL       	$4
+
+	0xFECFAC4:  913C0044  stw r9,68(r28)
+	  65: GETL       	R9, t46
+	  66: GETL       	R28, t48
+	  67: ADDL       	$0x44, t48
+	  68: STL       	t46, (t48)
+	  69: INCEIPL       	$4
+
+	0xFECFAC8:  7C89212E  stwx r4,r9,r4
+	  70: GETL       	R4, t50
+	  71: GETL       	R9, t52
+	  72: ADDL       	t52, t50
+	  73: GETL       	R4, t54
+	  74: STL       	t54, (t50)
+	  75: INCEIPL       	$4
+
+	0xFECFACC:  90090004  stw r0,4(r9)
+	  76: GETL       	R0, t56
+	  77: GETL       	R9, t58
+	  78: ADDL       	$0x4, t58
+	  79: STL       	t56, (t58)
+	  80: INCEIPL       	$4
+
+	0xFECFAD0:  9349000C  stw r26,12(r9)
+	  81: GETL       	R26, t60
+	  82: GETL       	R9, t62
+	  83: ADDL       	$0xC, t62
+	  84: STL       	t60, (t62)
+	  85: INCEIPL       	$4
+
+	0xFECFAD4:  93490008  stw r26,8(r9)
+	  86: GETL       	R26, t64
+	  87: GETL       	R9, t66
+	  88: ADDL       	$0x8, t66
+	  89: STL       	t64, (t66)
+	  90: INCEIPL       	$4
+
+	0xFECFAD8:  4BFFFBD8  b 0xFECF6B0
+	  91: JMPo       	$0xFECF6B0  ($4)
+
+
+
+. 2792 FECFA8C 80
+. 81 3E 05 FC 7C 99 50 50 63 26 00 01 60 80 00 01 7F 87 4A 78 39 1F 00 08 31 27 FF FF 7D 69 39 10 7D 3F CA 14 55 63 10 3A 91 3C 00 34 7C 65 33 78 91 3C 00 40 90 BF 00 04 91 3C 00 44 7C 89 21 2E 90 09 00 04 93 49 00 0C 93 49 00 08 4B FF FB D8
+==== BB 2793 (0xFED83C8) approx BBs exec'd 0 ====
+
+	0xFED83C8:  9CC30001  stbu r6,1(r3)
+	   0: GETL       	R6, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x1, t2
+	   3: PUTL       	t2, R3
+	   4: STB       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED83CC:  4E800020  blr
+	   6: GETL       	LR, t4
+	   7: JMPo-r       	t4  ($4)
+
+
+
+. 2793 FED83C8 8
+. 9C C3 00 01 4E 80 00 20
+==== BB 2794 (0xFE8DD48) approx BBs exec'd 0 ====
+
+	0xFE8DD48:  3900005F  li r8,95
+	   0: MOVL       	$0x5F, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0xFE8DD4C:  7E84A378  or r4,r20,r20
+	   3: GETL       	R20, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8DD50:  99030000  stb r8,0(r3)
+	   6: GETL       	R8, t4
+	   7: GETL       	R3, t6
+	   8: STB       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8DD54:  38630001  addi r3,r3,1
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFE8DD58:  4804A5A9  bl 0xFED8300
+	  14: MOVL       	$0xFE8DD5C, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFED8300  ($4)
+
+
+
+. 2794 FE8DD48 20
+. 39 00 00 5F 7E 84 A3 78 99 03 00 00 38 63 00 01 48 04 A5 A9
+==== BB 2795 (0xFE8DD5C) approx BBs exec'd 0 ====
+
+	0xFE8DD5C:  4BFFFD04  b 0xFE8DA60
+	   0: JMPo       	$0xFE8DA60  ($4)
+
+
+
+. 2795 FE8DD5C 4
+. 4B FF FD 04
+==== BB 2796 (0xFE8DD30) approx BBs exec'd 0 ====
+
+	0xFE8DD30:  3940002E  li r10,46
+	   0: MOVL       	$0x2E, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFE8DD34:  7E649B78  or r4,r19,r19
+	   3: GETL       	R19, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8DD38:  99430000  stb r10,0(r3)
+	   6: GETL       	R10, t4
+	   7: GETL       	R3, t6
+	   8: STB       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8DD3C:  38630001  addi r3,r3,1
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFE8DD40:  4804A5C1  bl 0xFED8300
+	  14: MOVL       	$0xFE8DD44, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFED8300  ($4)
+
+
+
+. 2796 FE8DD30 20
+. 39 40 00 2E 7E 64 9B 78 99 43 00 00 38 63 00 01 48 04 A5 C1
+==== BB 2797 (0xFE8DD44) approx BBs exec'd 0 ====
+
+	0xFE8DD44:  4BFFFD24  b 0xFE8DA68
+	   0: JMPo       	$0xFE8DA68  ($4)
+
+
+
+. 2797 FE8DD44 4
+. 4B FF FD 24
+==== BB 2798 (0xFE8DD18) approx BBs exec'd 0 ====
+
+	0xFE8DD18:  3960002E  li r11,46
+	   0: MOVL       	$0x2E, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFE8DD1C:  7EA4AB78  or r4,r21,r21
+	   3: GETL       	R21, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8DD20:  99630000  stb r11,0(r3)
+	   6: GETL       	R11, t4
+	   7: GETL       	R3, t6
+	   8: STB       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8DD24:  38630001  addi r3,r3,1
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFE8DD28:  4804A5D9  bl 0xFED8300
+	  14: MOVL       	$0xFE8DD2C, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFED8300  ($4)
+
+
+
+. 2798 FE8DD18 20
+. 39 60 00 2E 7E A4 AB 78 99 63 00 00 38 63 00 01 48 04 A5 D9
+==== BB 2799 (0xFE8DD2C) approx BBs exec'd 0 ====
+
+	0xFE8DD2C:  4BFFFD44  b 0xFE8DA70
+	   0: JMPo       	$0xFE8DA70  ($4)
+
+
+
+. 2799 FE8DD2C 4
+. 4B FF FD 44
+==== BB 2800 (0xFE8DB60) approx BBs exec'd 0 ====
+
+	0xFE8DB60:  80610018  lwz r3,24(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8DB64:  7F44D378  or r4,r26,r26
+	   5: GETL       	R26, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE8DB68:  4804B8B1  bl 0xFED9418
+	   8: MOVL       	$0xFE8DB6C, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFED9418  ($4)
+
+
+
+. 2800 FE8DB60 12
+. 80 61 00 18 7F 44 D3 78 48 04 B8 B1
+==== BB 2801 __argz_count_internal(0xFED9418) approx BBs exec'd 0 ====
+
+	0xFED9418:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFED941C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFED9420:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFED9424:  7C9D2379  or. r29,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R29
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFED9428:  93810010  stw r28,16(r1)
+	  19: GETL       	R28, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x10, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFED942C:  3B800000  li r28,0
+	  24: MOVL       	$0x0, t18
+	  25: PUTL       	t18, R28
+	  26: INCEIPL       	$4
+
+	0xFED9430:  93E1001C  stw r31,28(r1)
+	  27: GETL       	R31, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFED9434:  7C7F1B78  or r31,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R31
+	  34: INCEIPL       	$4
+
+	0xFED9438:  93C10018  stw r30,24(r1)
+	  35: GETL       	R30, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x18, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFED943C:  90010024  stw r0,36(r1)
+	  40: GETL       	R0, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFED9440:  41820024  bc 12,2,0xFED9464
+	  45: Js02o       	$0xFED9464
+
+
+
+. 2801 FED9418 44
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 7C 9D 23 79 93 81 00 10 3B 80 00 00 93 E1 00 1C 7C 7F 1B 78 93 C1 00 18 90 01 00 24 41 82 00 24
+==== BB 2802 (0xFED9444) approx BBs exec'd 0 ====
+
+	0xFED9444:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFED9448:  3B9C0001  addi r28,r28,1
+	   3: GETL       	R28, t2
+	   4: ADDL       	$0x1, t2
+	   5: PUTL       	t2, R28
+	   6: INCEIPL       	$4
+
+	0xFED944C:  4BFFD68D  bl 0xFED6AD8
+	   7: MOVL       	$0xFED9450, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2802 FED9444 12
+. 7F E3 FB 78 3B 9C 00 01 4B FF D6 8D
+==== BB 2803 (0xFED9450) approx BBs exec'd 0 ====
+
+	0xFED9450:  7FA3E850  subf r29,r3,r29
+	   0: GETL       	R3, t0
+	   1: GETL       	R29, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFED9454:  7C7F1A14  add r3,r31,r3
+	   5: GETL       	R31, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFED9458:  3BE30001  addi r31,r3,1
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R31
+	  13: INCEIPL       	$4
+
+	0xFED945C:  37BDFFFF  addic. r29,r29,-1
+	  14: GETL       	R29, t10
+	  15: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  16: PUTL       	t10, R29
+	  17: CMP0L       	t10, t12  (-rSo)
+	  18: ICRFL       	t12, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFED9460:  4082FFE4  bc 4,2,0xFED9444
+	  20: Jc02o       	$0xFED9444
+
+
+
+. 2803 FED9450 20
+. 7F A3 E8 50 7C 7F 1A 14 3B E3 00 01 37 BD FF FF 40 82 FF E4
+==== BB 2804 (0xFED9464) approx BBs exec'd 0 ====
+
+	0xFED9464:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFED9468:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED946C:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFED9470:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFED9474:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFED9478:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0xFED947C:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFED9480:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFED9484:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+
+. 2804 FED9464 36
+. 80 81 00 24 7F 83 E3 78 83 A1 00 14 83 81 00 10 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 2805 (0xFE8DB6C) approx BBs exec'd 0 ====
+
+	0xFE8DB6C:  38A0AAAA  li r5,-21846
+	   0: MOVL       	$0xFFFFAAAA, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFE8DB70:  7F842838  and r4,r28,r5
+	   3: GETL       	R28, t2
+	   4: GETL       	R5, t4
+	   5: ANDL       	t2, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE8DB74:  739F5555  andi. r31,r28,0x5555
+	   8: GETL       	R28, t6
+	   9: ANDL       	$0x5555, t6
+	  10: PUTL       	t6, R31
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0xFE8DB78:  7C800E70  srawi r0,r4,1
+	  14: GETL       	R4, t10
+	  15: SARL       	$0x1, t10  (-wCa)
+	  16: PUTL       	t10, R0
+	  17: INCEIPL       	$4
+
+	0xFE8DB7C:  3980CCCC  li r12,-13108
+	  18: MOVL       	$0xFFFFCCCC, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0xFE8DB80:  7F00FA14  add r24,r0,r31
+	  21: GETL       	R0, t14
+	  22: GETL       	R31, t16
+	  23: ADDL       	t14, t16
+	  24: PUTL       	t16, R24
+	  25: INCEIPL       	$4
+
+	0xFE8DB84:  7F116038  and r17,r24,r12
+	  26: GETL       	R24, t18
+	  27: GETL       	R12, t20
+	  28: ANDL       	t18, t20
+	  29: PUTL       	t20, R17
+	  30: INCEIPL       	$4
+
+	0xFE8DB88:  730E3333  andi. r14,r24,0x3333
+	  31: GETL       	R24, t22
+	  32: ANDL       	$0x3333, t22
+	  33: PUTL       	t22, R14
+	  34: CMP0L       	t22, t24  (-rSo)
+	  35: ICRFL       	t24, $0x0, CR
+	  36: INCEIPL       	$4
+
+	0xFE8DB8C:  7E2B1670  srawi r11,r17,2
+	  37: GETL       	R17, t26
+	  38: SARL       	$0x2, t26  (-wCa)
+	  39: PUTL       	t26, R11
+	  40: INCEIPL       	$4
+
+	0xFE8DB90:  7D4B7214  add r10,r11,r14
+	  41: GETL       	R11, t28
+	  42: GETL       	R14, t30
+	  43: ADDL       	t28, t30
+	  44: PUTL       	t30, R10
+	  45: INCEIPL       	$4
+
+	0xFE8DB94:  7D482670  srawi r8,r10,4
+	  46: GETL       	R10, t32
+	  47: SARL       	$0x4, t32  (-wCa)
+	  48: PUTL       	t32, R8
+	  49: INCEIPL       	$4
+
+	0xFE8DB98:  7CE85214  add r7,r8,r10
+	  50: GETL       	R8, t34
+	  51: GETL       	R10, t36
+	  52: ADDL       	t34, t36
+	  53: PUTL       	t36, R7
+	  54: INCEIPL       	$4
+
+	0xFE8DB9C:  70E60F0F  andi. r6,r7,0xF0F
+	  55: GETL       	R7, t38
+	  56: ANDL       	$0xF0F, t38
+	  57: PUTL       	t38, R6
+	  58: CMP0L       	t38, t40  (-rSo)
+	  59: ICRFL       	t40, $0x0, CR
+	  60: INCEIPL       	$4
+
+	0xFE8DBA0:  7CC94670  srawi r9,r6,8
+	  61: GETL       	R6, t42
+	  62: SARL       	$0x8, t42  (-wCa)
+	  63: PUTL       	t42, R9
+	  64: INCEIPL       	$4
+
+	0xFE8DBA4:  7CA93214  add r5,r9,r6
+	  65: GETL       	R9, t44
+	  66: GETL       	R6, t46
+	  67: ADDL       	t44, t46
+	  68: PUTL       	t46, R5
+	  69: INCEIPL       	$4
+
+	0xFE8DBA8:  54A4063E  rlwinm r4,r5,0,24,31
+	  70: GETL       	R5, t48
+	  71: ANDL       	$0xFF, t48
+	  72: PUTL       	t48, R4
+	  73: INCEIPL       	$4
+
+	0xFE8DBAC:  7C632030  slw r3,r3,r4
+	  74: GETL       	R3, t52
+	  75: GETL       	R4, t50
+	  76: SHLL       	t50, t52
+	  77: PUTL       	t52, R3
+	  78: INCEIPL       	$4
+
+	0xFE8DBB0:  547F103A  rlwinm r31,r3,2,0,29
+	  79: GETL       	R3, t54
+	  80: SHLL       	$0x2, t54
+	  81: PUTL       	t54, R31
+	  82: INCEIPL       	$4
+
+	0xFE8DBB4:  387F0014  addi r3,r31,20
+	  83: GETL       	R31, t56
+	  84: ADDL       	$0x14, t56
+	  85: PUTL       	t56, R3
+	  86: INCEIPL       	$4
+
+	0xFE8DBB8:  4811C751  bl 0xFFAA308
+	  87: MOVL       	$0xFE8DBBC, t58
+	  88: PUTL       	t58, LR
+	  89: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 2805 FE8DB6C 80
+. 38 A0 AA AA 7F 84 28 38 73 9F 55 55 7C 80 0E 70 39 80 CC CC 7F 00 FA 14 7F 11 60 38 73 0E 33 33 7E 2B 16 70 7D 4B 72 14 7D 48 26 70 7C E8 52 14 70 E6 0F 0F 7C C9 46 70 7C A9 32 14 54 A4 06 3E 7C 63 20 30 54 7F 10 3A 38 7F 00 14 48 11 C7 51
+==== BB 2806 (0xFE8DBBC) approx BBs exec'd 0 ====
+
+	0xFE8DBBC:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8DBC0:  7C711B79  or. r17,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R17
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFE8DBC4:  41A2FF30  bc 13,2,0xFE8DAF4
+	   8: Js02o       	$0xFE8DAF4
+
+
+
+. 2806 FE8DBBC 12
+. 38 00 00 00 7C 71 1B 79 41 A2 FF 30
+==== BB 2807 (0xFE8DBC8) approx BBs exec'd 0 ====
+
+	0xFE8DBC8:  80610018  lwz r3,24(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8DBCC:  7F44D378  or r4,r26,r26
+	   5: GETL       	R26, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE8DBD0:  93B10000  stw r29,0(r17)
+	   8: GETL       	R29, t6
+	   9: GETL       	R17, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8DBD4:  4804B845  bl 0xFED9418
+	  12: MOVL       	$0xFE8DBD8, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0xFED9418  ($4)
+
+
+
+. 2807 FE8DBC8 16
+. 80 61 00 18 7F 44 D3 78 93 B1 00 00 48 04 B8 45
+==== BB 2808 (0xFE8DBD8) approx BBs exec'd 0 ====
+
+	0xFE8DBD8:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8DBDC:  2F030001  cmpi cr6,r3,1
+	   3: GETL       	R3, t2
+	   4: MOVL       	$0x1, t6
+	   5: CMPL       	t2, t6, t4  (-rSo)
+	   6: ICRFL       	t4, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0xFE8DBE0:  419A0180  bc 12,26,0xFE8DD60
+	   8: Js26o       	$0xFE8DD60
+
+
+
+. 2808 FE8DBD8 12
+. 38 00 00 00 2F 03 00 01 41 9A 01 80
+==== BB 2809 (0xFE8DD60) approx BBs exec'd 0 ====
+
+	0xFE8DD60:  73290001  andi. r9,r25,0x1
+	   0: GETL       	R25, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8DD64:  41A2FE84  bc 13,2,0xFE8DBE8
+	   6: Js02o       	$0xFE8DBE8
+
+
+
+. 2809 FE8DD60 8
+. 73 29 00 01 41 A2 FE 84
+==== BB 2810 (0xFE8DD68) approx BBs exec'd 0 ====
+
+	0xFE8DD68:  2F970000  cmpi cr7,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DD6C:  41BEFE7C  bc 13,30,0xFE8DBE8
+	   4: Js30o       	$0xFE8DBE8
+
+
+
+. 2810 FE8DD68 8
+. 2F 97 00 00 41 BE FE 7C
+==== BB 2811 (0xFE8DD70) approx BBs exec'd 0 ====
+
+	0xFE8DD70:  4BFFFE74  b 0xFE8DBE4
+	   0: JMPo       	$0xFE8DBE4  ($4)
+
+
+
+. 2811 FE8DD70 4
+. 4B FF FE 74
+==== BB 2812 (0xFE8DBE4) approx BBs exec'd 0 ====
+
+	0xFE8DBE4:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE8DBE8:  2C9B0000  cmpi cr1,r27,0
+	   3: GETL       	R27, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x1, CR
+	   6: INCEIPL       	$4
+
+	0xFE8DBEC:  3AE00000  li r23,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0xFE8DBF0:  90110004  stw r0,4(r17)
+	  10: GETL       	R0, t8
+	  11: GETL       	R17, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFE8DBF4:  92F10008  stw r23,8(r17)
+	  15: GETL       	R23, t12
+	  16: GETL       	R17, t14
+	  17: ADDL       	$0x8, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFE8DBF8:  4186019C  bc 12,6,0xFE8DD94
+	  20: Js06o       	$0xFE8DD94
+
+
+
+. 2812 FE8DBE4 24
+. 38 00 00 01 2C 9B 00 00 3A E0 00 00 90 11 00 04 92 F1 00 08 41 86 01 9C
+==== BB 2813 (0xFE8DD94) approx BBs exec'd 0 ====
+
+	0xFE8DD94:  83320000  lwz r25,0(r18)
+	   0: GETL       	R18, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R25
+	   3: INCEIPL       	$4
+
+	0xFE8DD98:  9331000C  stw r25,12(r17)
+	   4: GETL       	R25, t4
+	   5: GETL       	R17, t6
+	   6: ADDL       	$0xC, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE8DD9C:  92320000  stw r17,0(r18)
+	   9: GETL       	R17, t8
+	  10: GETL       	R18, t10
+	  11: STL       	t8, (t10)
+	  12: INCEIPL       	$4
+
+	0xFE8DDA0:  4BFFFE68  b 0xFE8DC08
+	  13: JMPo       	$0xFE8DC08  ($4)
+
+
+
+. 2813 FE8DD94 16
+. 83 32 00 00 93 31 00 0C 92 32 00 00 4B FF FE 68
+==== BB 2814 (0xFE8DC08) approx BBs exec'd 0 ====
+
+	0xFE8DC08:  80610018  lwz r3,24(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8DC0C:  7F44D378  or r4,r26,r26
+	   5: GETL       	R26, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE8DC10:  3B000000  li r24,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R24
+	  10: INCEIPL       	$4
+
+	0xFE8DC14:  4804B805  bl 0xFED9418
+	  11: MOVL       	$0xFE8DC18, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0xFED9418  ($4)
+
+
+
+. 2814 FE8DC08 16
+. 80 61 00 18 7F 44 D3 78 3B 00 00 00 48 04 B8 05
+==== BB 2815 (0xFE8DC18) approx BBs exec'd 0 ====
+
+	0xFE8DC18:  68630001  xori r3,r3,0x1
+	   0: GETL       	R3, t0
+	   1: XORL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE8DC1C:  20030000  subfic r0,r3,0
+	   4: GETL       	R3, t2
+	   5: MOVL       	$0x0, t4
+	   6: SBBL       	t2, t4  (-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE8DC20:  7C601914  adde r3,r0,r3
+	   9: GETL       	R0, t6
+	  10: GETL       	R3, t8
+	  11: ADCL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFE8DC24:  7F63E051  subf. r27,r3,r28
+	  14: GETL       	R3, t10
+	  15: GETL       	R28, t12
+	  16: SUBL       	t10, t12
+	  17: PUTL       	t12, R27
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0xFE8DC28:  4180014C  bc 12,0,0xFE8DD74
+	  21: Js00o       	$0xFE8DD74
+
+
+
+. 2815 FE8DC18 20
+. 68 63 00 01 20 03 00 00 7C 60 19 14 7F 63 E0 51 41 80 01 4C
+==== BB 2816 (0xFE8DC2C) approx BBs exec'd 0 ====
+
+	0xFE8DC2C:  2E1A0000  cmpi cr4,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DC30:  7F8EE0F8  nor r14,r28,r28
+	   4: GETL       	R28, t4
+	   5: NOTL       	t4
+	   6: PUTL       	t4, R14
+	   7: INCEIPL       	$4
+
+	0xFE8DC34:  83810018  lwz r28,24(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xFE8DC38:  7F3CD214  add r25,r28,r26
+	  13: GETL       	R28, t10
+	  14: GETL       	R26, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R25
+	  17: INCEIPL       	$4
+
+	0xFE8DC3C:  7E3C8B78  or r28,r17,r17
+	  18: GETL       	R17, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFE8DC40:  4800000C  b 0xFE8DC4C
+	  21: JMPo       	$0xFE8DC4C  ($4)
+
+
+
+. 2816 FE8DC2C 24
+. 2E 1A 00 00 7F 8E E0 F8 83 81 00 18 7F 3C D2 14 7E 3C 8B 78 48 00 00 0C
+==== BB 2817 (0xFE8DC4C) approx BBs exec'd 0 ====
+
+	0xFE8DC4C:  7DCBD839  and. r11,r14,r27
+	   0: GETL       	R14, t0
+	   1: GETL       	R27, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R11
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFE8DC50:  3BA00000  li r29,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFE8DC54:  3AE00001  li r23,1
+	  10: MOVL       	$0x1, t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFE8DC58:  3B5C000C  addi r26,r28,12
+	  13: GETL       	R28, t10
+	  14: ADDL       	$0xC, t10
+	  15: PUTL       	t10, R26
+	  16: INCEIPL       	$4
+
+	0xFE8DC5C:  40A2FFE8  bc 5,2,0xFE8DC44
+	  17: Jc02o       	$0xFE8DC44
+
+
+
+. 2817 FE8DC4C 20
+. 7D CB D8 39 3B A0 00 00 3A E0 00 01 3B 5C 00 0C 40 A2 FF E8
+==== BB 2818 (0xFE8DC60) approx BBs exec'd 0 ====
+
+	0xFE8DC60:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DC64:  7F19E840  cmpl cr6,r25,r29
+	   4: GETL       	R25, t4
+	   5: GETL       	R29, t6
+	   6: CMPUL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFE8DC68:  7FA0EB78  or r0,r29,r29
+	   9: GETL       	R29, t10
+	  10: PUTL       	t10, R0
+	  11: INCEIPL       	$4
+
+	0xFE8DC6C:  419E0084  bc 12,30,0xFE8DCF0
+	  12: Js30o       	$0xFE8DCF0
+
+
+
+. 2818 FE8DC60 16
+. 2F 9D 00 00 7F 19 E8 40 7F A0 EB 78 41 9E 00 84
+==== BB 2819 (0xFE8DCF0) approx BBs exec'd 0 ====
+
+	0xFE8DCF0:  83E10018  lwz r31,24(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFE8DCF4:  40B2FF9C  bc 5,18,0xFE8DC90
+	   5: Jc18o       	$0xFE8DC90
+
+
+
+. 2819 FE8DCF0 8
+. 83 E1 00 18 40 B2 FF 9C
+==== BB 2820 (0xFE8DC90) approx BBs exec'd 0 ====
+
+	0xFE8DC90:  2C9F0000  cmpi cr1,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DC94:  7FE3FB78  or r3,r31,r31
+	   4: GETL       	R31, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFE8DC98:  7FFDFB78  or r29,r31,r31
+	   7: GETL       	R31, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFE8DC9C:  41A6FFA8  bc 13,6,0xFE8DC44
+	  10: Js06o       	$0xFE8DC44
+
+
+
+. 2820 FE8DC90 16
+. 2C 9F 00 00 7F E3 FB 78 7F FD FB 78 41 A6 FF A8
+==== BB 2821 (0xFE8DCA0) approx BBs exec'd 0 ====
+
+	0xFE8DCA0:  48048E39  bl 0xFED6AD8
+	   0: MOVL       	$0xFE8DCA4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 2821 FE8DCA0 4
+. 48 04 8E 39
+==== BB 2822 (0xFE8DCA4) approx BBs exec'd 0 ====
+
+	0xFE8DCA4:  7FE4FB78  or r4,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8DCA8:  38A30001  addi r5,r3,1
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0x1, t2
+	   5: PUTL       	t2, R5
+	   6: INCEIPL       	$4
+
+	0xFE8DCAC:  7F66DB78  or r6,r27,r27
+	   7: GETL       	R27, t4
+	   8: PUTL       	t4, R6
+	   9: INCEIPL       	$4
+
+	0xFE8DCB0:  7E439378  or r3,r18,r18
+	  10: GETL       	R18, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0xFE8DCB4:  7EC7B378  or r7,r22,r22
+	  13: GETL       	R22, t8
+	  14: PUTL       	t8, R7
+	  15: INCEIPL       	$4
+
+	0xFE8DCB8:  7E88A378  or r8,r20,r20
+	  16: GETL       	R20, t10
+	  17: PUTL       	t10, R8
+	  18: INCEIPL       	$4
+
+	0xFE8DCBC:  7E699B78  or r9,r19,r19
+	  19: GETL       	R19, t12
+	  20: PUTL       	t12, R9
+	  21: INCEIPL       	$4
+
+	0xFE8DCC0:  7EAAAB78  or r10,r21,r21
+	  22: GETL       	R21, t14
+	  23: PUTL       	t14, R10
+	  24: INCEIPL       	$4
+
+	0xFE8DCC4:  91E10008  stw r15,8(r1)
+	  25: GETL       	R15, t16
+	  26: GETL       	R1, t18
+	  27: ADDL       	$0x8, t18
+	  28: STL       	t16, (t18)
+	  29: INCEIPL       	$4
+
+	0xFE8DCC8:  9201000C  stw r16,12(r1)
+	  30: GETL       	R16, t20
+	  31: GETL       	R1, t22
+	  32: ADDL       	$0xC, t22
+	  33: STL       	t20, (t22)
+	  34: INCEIPL       	$4
+
+	0xFE8DCCC:  3B180001  addi r24,r24,1
+	  35: GETL       	R24, t24
+	  36: ADDL       	$0x1, t24
+	  37: PUTL       	t24, R24
+	  38: INCEIPL       	$4
+
+	0xFE8DCD0:  92E10010  stw r23,16(r1)
+	  39: GETL       	R23, t26
+	  40: GETL       	R1, t28
+	  41: ADDL       	$0x10, t28
+	  42: STL       	t26, (t28)
+	  43: INCEIPL       	$4
+
+	0xFE8DCD4:  4BFFFC29  bl 0xFE8D8FC
+	  44: MOVL       	$0xFE8DCD8, t30
+	  45: PUTL       	t30, LR
+	  46: JMPo-c       	$0xFE8D8FC  ($4)
+
+
+
+. 2822 FE8DCA4 52
+. 7F E4 FB 78 38 A3 00 01 7F 66 DB 78 7E 43 93 78 7E C7 B3 78 7E 88 A3 78 7E 69 9B 78 7E AA AB 78 91 E1 00 08 92 01 00 0C 3B 18 00 01 92 E1 00 10 4B FF FC 29
+==== BB 2823 (0xFE8DA98) approx BBs exec'd 0 ====
+
+	0xFE8DA98:  823F0000  lwz r17,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R17
+	   3: INCEIPL       	$4
+
+	0xFE8DA9C:  7FA4EB78  or r4,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFE8DAA0:  2E110000  cmpi cr4,r17,0
+	   7: GETL       	R17, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x4, CR
+	  10: INCEIPL       	$4
+
+	0xFE8DAA4:  7E238B78  or r3,r17,r17
+	  11: GETL       	R17, t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0xFE8DAA8:  41920018  bc 12,18,0xFE8DAC0
+	  14: Js18o       	$0xFE8DAC0
+
+
+
+. 2823 FE8DA98 20
+. 82 3F 00 00 7F A4 EB 78 2E 11 00 00 7E 23 8B 78 41 92 00 18
+==== BB 2824 (0xFE8DAAC) approx BBs exec'd 0 ====
+
+	0xFE8DAAC:  48048905  bl 0xFED63B0
+	   0: MOVL       	$0xFE8DAB0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 2824 FE8DAAC 4
+. 48 04 89 05
+==== BB 2825 (0xFE8DAB0) approx BBs exec'd 0 ====
+
+	0xFE8DAB0:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DAB4:  419E0018  bc 12,30,0xFE8DACC
+	   4: Js30o       	$0xFE8DACC
+
+
+
+. 2825 FE8DAB0 8
+. 2F 83 00 00 41 9E 00 18
+==== BB 2826 (0xFE8DAB8) approx BBs exec'd 0 ====
+
+	0xFE8DAB8:  419C02D4  bc 12,28,0xFE8DD8C
+	   0: Js28o       	$0xFE8DD8C
+
+
+
+. 2826 FE8DAB8 4
+. 41 9C 02 D4
+==== BB 2827 (0xFE8DD8C) approx BBs exec'd 0 ====
+
+	0xFE8DD8C:  3BE00000  li r31,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFE8DD90:  4BFFFD3C  b 0xFE8DACC
+	   3: JMPo       	$0xFE8DACC  ($4)
+
+
+
+. 2827 FE8DD8C 8
+. 3B E0 00 00 4B FF FD 3C
+==== BB 2828 (0xFE8DBE8) approx BBs exec'd 0 ====
+
+	0xFE8DBE8:  2C9B0000  cmpi cr1,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DBEC:  3AE00000  li r23,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R23
+	   6: INCEIPL       	$4
+
+	0xFE8DBF0:  90110004  stw r0,4(r17)
+	   7: GETL       	R0, t6
+	   8: GETL       	R17, t8
+	   9: ADDL       	$0x4, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8DBF4:  92F10008  stw r23,8(r17)
+	  12: GETL       	R23, t10
+	  13: GETL       	R17, t12
+	  14: ADDL       	$0x8, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0xFE8DBF8:  4186019C  bc 12,6,0xFE8DD94
+	  17: Js06o       	$0xFE8DD94
+
+
+
+. 2828 FE8DBE8 20
+. 2C 9B 00 00 3A E0 00 00 90 11 00 04 92 F1 00 08 41 86 01 9C
+==== BB 2829 (0xFE8DC44) approx BBs exec'd 0 ====
+
+	0xFE8DC44:  377BFFFF  addic. r27,r27,-1
+	   0: GETL       	R27, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R27
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8DC48:  4180012C  bc 12,0,0xFE8DD74
+	   6: Js00o       	$0xFE8DD74
+
+
+
+. 2829 FE8DC44 8
+. 37 7B FF FF 41 80 01 2C
+==== BB 2830 (0xFED64E0) approx BBs exec'd 0 ====
+
+	0xFED64E0:  3CE0FEFF  lis r7,-257
+	   0: MOVL       	$0xFEFF0000, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFED64E4:  3D007F7F  lis r8,32639
+	   3: MOVL       	$0x7F7F0000, t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0xFED64E8:  80C40000  lwz r6,0(r4)
+	   6: GETL       	R4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0xFED64EC:  38E7FEFF  addi r7,r7,-257
+	  10: MOVL       	$0xFEFEFEFF, t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFED64F0:  39087F7F  addi r8,r8,32639
+	  13: MOVL       	$0x7F7F7F7F, t10
+	  14: PUTL       	t10, R8
+	  15: INCEIPL       	$4
+
+	0xFED64F4:  48000024  b 0xFED6518
+	  16: JMPo       	$0xFED6518  ($4)
+
+
+
+. 2830 FED64E0 24
+. 3C E0 FE FF 3D 00 7F 7F 80 C4 00 00 38 E7 FE FF 39 08 7F 7F 48 00 00 24
+==== BB 2831 (0xFED6518) approx BBs exec'd 0 ====
+
+	0xFED6518:  7C073214  add r0,r7,r6
+	   0: GETL       	R7, t0
+	   1: GETL       	R6, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFED651C:  7D0930F8  nor r9,r8,r6
+	   5: GETL       	R8, t4
+	   6: GETL       	R6, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0xFED6520:  7C004839  and. r0,r0,r9
+	  11: GETL       	R0, t8
+	  12: GETL       	R9, t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFED6524:  4182FFD4  bc 12,2,0xFED64F8
+	  18: Js02o       	$0xFED64F8
+
+
+
+. 2831 FED6518 16
+. 7C 07 32 14 7D 09 30 F8 7C 00 48 39 41 82 FF D4
+==== BB 2832 (0xFED64F8) approx BBs exec'd 0 ====
+
+	0xFED64F8:  85440004  lwzu r10,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0xFED64FC:  94C50004  stwu r6,4(r5)
+	   6: GETL       	R6, t4
+	   7: GETL       	R5, t6
+	   8: ADDL       	$0x4, t6
+	   9: PUTL       	t6, R5
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED6500:  7C075214  add r0,r7,r10
+	  12: GETL       	R7, t8
+	  13: GETL       	R10, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFED6504:  7D0950F8  nor r9,r8,r10
+	  17: GETL       	R8, t12
+	  18: GETL       	R10, t14
+	  19: ORL       	t14, t12
+	  20: NOTL       	t12
+	  21: PUTL       	t12, R9
+	  22: INCEIPL       	$4
+
+	0xFED6508:  7C004839  and. r0,r0,r9
+	  23: GETL       	R0, t16
+	  24: GETL       	R9, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R0
+	  27: CMP0L       	t18, t20  (-rSo)
+	  28: ICRFL       	t20, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0xFED650C:  40820020  bc 4,2,0xFED652C
+	  30: Jc02o       	$0xFED652C
+
+
+
+. 2832 FED64F8 24
+. 85 44 00 04 94 C5 00 04 7C 07 52 14 7D 09 50 F8 7C 00 48 39 40 82 00 20
+==== BB 2833 (0xFED6510) approx BBs exec'd 0 ====
+
+	0xFED6510:  84C40004  lwzu r6,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFED6514:  95450004  stwu r10,4(r5)
+	   6: GETL       	R10, t4
+	   7: GETL       	R5, t6
+	   8: ADDL       	$0x4, t6
+	   9: PUTL       	t6, R5
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED6518:  7C073214  add r0,r7,r6
+	  12: GETL       	R7, t8
+	  13: GETL       	R6, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFED651C:  7D0930F8  nor r9,r8,r6
+	  17: GETL       	R8, t12
+	  18: GETL       	R6, t14
+	  19: ORL       	t14, t12
+	  20: NOTL       	t12
+	  21: PUTL       	t12, R9
+	  22: INCEIPL       	$4
+
+	0xFED6520:  7C004839  and. r0,r0,r9
+	  23: GETL       	R0, t16
+	  24: GETL       	R9, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R0
+	  27: CMP0L       	t18, t20  (-rSo)
+	  28: ICRFL       	t20, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0xFED6524:  4182FFD4  bc 12,2,0xFED64F8
+	  30: Js02o       	$0xFED64F8
+
+
+
+. 2833 FED6510 24
+. 84 C4 00 04 95 45 00 04 7C 07 32 14 7D 09 30 F8 7C 00 48 39 41 82 FF D4
+==== BB 2834 (0xFED6528) approx BBs exec'd 0 ====
+
+	0xFED6528:  7CCA3378  or r10,r6,r6
+	   0: GETL       	R6, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFED652C:  5540463F  rlwinm. r0,r10,8,24,31
+	   3: GETL       	R10, t2
+	   4: SHRL       	$0x18, t2
+	   5: PUTL       	t2, R0
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFED6530:  98050004  stb r0,4(r5)
+	   9: GETL       	R0, t6
+	  10: GETL       	R5, t8
+	  11: ADDL       	$0x4, t8
+	  12: STB       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFED6534:  4D820020  bclr 12,2
+	  14: GETL       	LR, t10
+	  15: Js02o-r       	t10
+
+
+
+. 2834 FED6528 16
+. 7C CA 33 78 55 40 46 3F 98 05 00 04 4D 82 00 20
+==== BB 2835 (0xFE8DABC) approx BBs exec'd 0 ====
+
+	0xFE8DABC:  7FFBFB78  or r27,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0xFE8DAC0:  83FF000C  lwz r31,12(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFE8DAC4:  2C9F0000  cmpi cr1,r31,0
+	   8: GETL       	R31, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE8DAC8:  4086FFD0  bc 4,6,0xFE8DA98
+	  12: Jc06o       	$0xFE8DA98
+
+
+
+. 2835 FE8DABC 16
+. 7F FB FB 78 83 FF 00 0C 2C 9F 00 00 40 86 FF D0
+==== BB 2836 (0xFE8DBFC) approx BBs exec'd 0 ====
+
+	0xFE8DBFC:  83BB000C  lwz r29,12(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFE8DC00:  93B1000C  stw r29,12(r17)
+	   5: GETL       	R29, t4
+	   6: GETL       	R17, t6
+	   7: ADDL       	$0xC, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8DC04:  923B000C  stw r17,12(r27)
+	  10: GETL       	R17, t8
+	  11: GETL       	R27, t10
+	  12: ADDL       	$0xC, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFE8DC08:  80610018  lwz r3,24(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x18, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0xFE8DC0C:  7F44D378  or r4,r26,r26
+	  20: GETL       	R26, t16
+	  21: PUTL       	t16, R4
+	  22: INCEIPL       	$4
+
+	0xFE8DC10:  3B000000  li r24,0
+	  23: MOVL       	$0x0, t18
+	  24: PUTL       	t18, R24
+	  25: INCEIPL       	$4
+
+	0xFE8DC14:  4804B805  bl 0xFED9418
+	  26: MOVL       	$0xFE8DC18, t20
+	  27: PUTL       	t20, LR
+	  28: JMPo-c       	$0xFED9418  ($4)
+
+
+
+. 2836 FE8DBFC 28
+. 83 BB 00 0C 93 B1 00 0C 92 3B 00 0C 80 61 00 18 7F 44 D3 78 3B 00 00 00 48 04 B8 05
+==== BB 2837 (0xFE8DD74) approx BBs exec'd 0 ====
+
+	0xFE8DD74:  5712103A  rlwinm r18,r24,2,0,29
+	   0: GETL       	R24, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R18
+	   3: INCEIPL       	$4
+
+	0xFE8DD78:  3A000000  li r16,0
+	   4: MOVL       	$0x0, t2
+	   5: PUTL       	t2, R16
+	   6: INCEIPL       	$4
+
+	0xFE8DD7C:  7DF28A14  add r15,r18,r17
+	   7: GETL       	R18, t4
+	   8: GETL       	R17, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R15
+	  11: INCEIPL       	$4
+
+	0xFE8DD80:  7E208B78  or r0,r17,r17
+	  12: GETL       	R17, t8
+	  13: PUTL       	t8, R0
+	  14: INCEIPL       	$4
+
+	0xFE8DD84:  920F0010  stw r16,16(r15)
+	  15: GETL       	R16, t10
+	  16: GETL       	R15, t12
+	  17: ADDL       	$0x10, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0xFE8DD88:  4BFFFD6C  b 0xFE8DAF4
+	  20: JMPo       	$0xFE8DAF4  ($4)
+
+
+
+. 2837 FE8DD74 24
+. 57 12 10 3A 3A 00 00 00 7D F2 8A 14 7E 20 8B 78 92 0F 00 10 4B FF FD 6C
+==== BB 2838 (0xFE8DAF4) approx BBs exec'd 0 ====
+
+	0xFE8DAF4:  82610074  lwz r19,116(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0xFE8DAF8:  7C030378  or r3,r0,r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE8DAFC:  81210024  lwz r9,36(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x24, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0xFE8DB00:  7E6803A6  mtlr r19
+	  13: GETL       	R19, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFE8DB04:  81C10028  lwz r14,40(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x28, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0xFE8DB08:  81E1002C  lwz r15,44(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R15
+	  25: INCEIPL       	$4
+
+	0xFE8DB0C:  7D208120  mtcrf 0x8,r9
+	  26: GETL       	R9, t20
+	  27: ICRFL       	t20, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0xFE8DB10:  82010030  lwz r16,48(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x30, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R16
+	  33: INCEIPL       	$4
+
+	0xFE8DB14:  82210034  lwz r17,52(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x34, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R17
+	  38: INCEIPL       	$4
+
+	0xFE8DB18:  82410038  lwz r18,56(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x38, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R18
+	  43: INCEIPL       	$4
+
+	0xFE8DB1C:  8261003C  lwz r19,60(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x3C, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R19
+	  48: INCEIPL       	$4
+
+	0xFE8DB20:  82810040  lwz r20,64(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x40, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R20
+	  53: INCEIPL       	$4
+
+	0xFE8DB24:  82A10044  lwz r21,68(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x44, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R21
+	  58: INCEIPL       	$4
+
+	0xFE8DB28:  82C10048  lwz r22,72(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x48, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R22
+	  63: INCEIPL       	$4
+
+	0xFE8DB2C:  82E1004C  lwz r23,76(r1)
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x4C, t50
+	  66: LDL       	(t50), t52
+	  67: PUTL       	t52, R23
+	  68: INCEIPL       	$4
+
+	0xFE8DB30:  83010050  lwz r24,80(r1)
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x50, t54
+	  71: LDL       	(t54), t56
+	  72: PUTL       	t56, R24
+	  73: INCEIPL       	$4
+
+	0xFE8DB34:  83210054  lwz r25,84(r1)
+	  74: GETL       	R1, t58
+	  75: ADDL       	$0x54, t58
+	  76: LDL       	(t58), t60
+	  77: PUTL       	t60, R25
+	  78: INCEIPL       	$4
+
+	0xFE8DB38:  83410058  lwz r26,88(r1)
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0x58, t62
+	  81: LDL       	(t62), t64
+	  82: PUTL       	t64, R26
+	  83: INCEIPL       	$4
+
+	0xFE8DB3C:  8361005C  lwz r27,92(r1)
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x5C, t66
+	  86: LDL       	(t66), t68
+	  87: PUTL       	t68, R27
+	  88: INCEIPL       	$4
+
+	0xFE8DB40:  83810060  lwz r28,96(r1)
+	  89: GETL       	R1, t70
+	  90: ADDL       	$0x60, t70
+	  91: LDL       	(t70), t72
+	  92: PUTL       	t72, R28
+	  93: INCEIPL       	$4
+
+	0xFE8DB44:  83A10064  lwz r29,100(r1)
+	  94: GETL       	R1, t74
+	  95: ADDL       	$0x64, t74
+	  96: LDL       	(t74), t76
+	  97: PUTL       	t76, R29
+	  98: INCEIPL       	$4
+
+	0xFE8DB48:  83C10068  lwz r30,104(r1)
+	  99: GETL       	R1, t78
+	 100: ADDL       	$0x68, t78
+	 101: LDL       	(t78), t80
+	 102: PUTL       	t80, R30
+	 103: INCEIPL       	$4
+
+	0xFE8DB4C:  83E1006C  lwz r31,108(r1)
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x6C, t82
+	 106: LDL       	(t82), t84
+	 107: PUTL       	t84, R31
+	 108: INCEIPL       	$4
+
+	0xFE8DB50:  38210070  addi r1,r1,112
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0x70, t86
+	 111: PUTL       	t86, R1
+	 112: INCEIPL       	$4
+
+	0xFE8DB54:  4E800020  blr
+	 113: GETL       	LR, t88
+	 114: JMPo-r       	t88  ($4)
+
+
+
+. 2838 FE8DAF4 100
+. 82 61 00 74 7C 03 03 78 81 21 00 24 7E 68 03 A6 81 C1 00 28 81 E1 00 2C 7D 20 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+==== BB 2839 (0xFE8DCD8) approx BBs exec'd 0 ====
+
+	0xFE8DCD8:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE8DCDC:  947A0004  stwu r3,4(r26)
+	   4: GETL       	R3, t4
+	   5: GETL       	R26, t6
+	   6: ADDL       	$0x4, t6
+	   7: PUTL       	t6, R26
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8DCE0:  3B9C0004  addi r28,r28,4
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x4, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0xFE8DCE4:  7F19E840  cmpl cr6,r25,r29
+	  14: GETL       	R25, t10
+	  15: GETL       	R29, t12
+	  16: CMPUL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFE8DCE8:  7FA0EB78  or r0,r29,r29
+	  19: GETL       	R29, t16
+	  20: PUTL       	t16, R0
+	  21: INCEIPL       	$4
+
+	0xFE8DCEC:  409EFF84  bc 4,30,0xFE8DC70
+	  22: Jc30o       	$0xFE8DC70
+
+
+
+. 2839 FE8DCD8 24
+. 2F 9D 00 00 94 7A 00 04 3B 9C 00 04 7F 19 E8 40 7F A0 EB 78 40 9E FF 84
+==== BB 2840 (0xFE8DC70) approx BBs exec'd 0 ====
+
+	0xFE8DC70:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8DC74:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE8DC78:  4099000C  bc 4,25,0xFE8DC84
+	   6: Jc25o       	$0xFE8DC84
+
+
+
+. 2840 FE8DC70 12
+. 7F A3 EB 78 38 80 00 00 40 99 00 0C
+==== BB 2841 (0xFE8DC7C) approx BBs exec'd 0 ====
+
+	0xFE8DC7C:  4804B4A5  bl 0xFED9120
+	   0: MOVL       	$0xFE8DC80, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED9120  ($4)
+
+
+
+. 2841 FE8DC7C 4
+. 48 04 B4 A5
+==== BB 2842 rawmemchr(0xFED9120) approx BBs exec'd 0 ====
+
+	0xFED9120:  70600003  andi. r0,r3,0x3
+	   0: GETL       	R3, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED9124:  9421FFF0  stwu r1,-16(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFFF0, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED9128:  5484063E  rlwinm r4,r4,0,24,31
+	  12: GETL       	R4, t8
+	  13: ANDL       	$0xFF, t8
+	  14: PUTL       	t8, R4
+	  15: INCEIPL       	$4
+
+	0xFED912C:  4182001C  bc 12,2,0xFED9148
+	  16: Js02o       	$0xFED9148
+
+
+
+. 2842 FED9120 16
+. 70 60 00 03 94 21 FF F0 54 84 06 3E 41 82 00 1C
+==== BB 2843 (0xFED9148) approx BBs exec'd 0 ====
+
+	0xFED9148:  5486402E  rlwinm r6,r4,8,0,23
+	   0: GETL       	R4, t0
+	   1: SHLL       	$0x8, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0xFED914C:  3D007EFE  lis r8,32510
+	   4: MOVL       	$0x7EFE0000, t2
+	   5: PUTL       	t2, R8
+	   6: INCEIPL       	$4
+
+	0xFED9150:  7C853378  or r5,r4,r6
+	   7: GETL       	R4, t4
+	   8: GETL       	R6, t6
+	   9: ORL       	t6, t4
+	  10: PUTL       	t4, R5
+	  11: INCEIPL       	$4
+
+	0xFED9154:  3D608101  lis r11,-32511
+	  12: MOVL       	$0x81010000, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0xFED9158:  54A9801E  rlwinm r9,r5,16,0,15
+	  15: GETL       	R5, t10
+	  16: SHLL       	$0x10, t10
+	  17: PUTL       	t10, R9
+	  18: INCEIPL       	$4
+
+	0xFED915C:  6108FEFF  ori r8,r8,0xFEFF
+	  19: MOVL       	$0x7EFEFEFF, t12
+	  20: PUTL       	t12, R8
+	  21: INCEIPL       	$4
+
+	0xFED9160:  7CAA4B78  or r10,r5,r9
+	  22: GETL       	R5, t14
+	  23: GETL       	R9, t16
+	  24: ORL       	t16, t14
+	  25: PUTL       	t14, R10
+	  26: INCEIPL       	$4
+
+	0xFED9164:  616B0100  ori r11,r11,0x100
+	  27: MOVL       	$0x81010100, t18
+	  28: PUTL       	t18, R11
+	  29: INCEIPL       	$4
+
+	0xFED9168:  80030000  lwz r0,0(r3)
+	  30: GETL       	R3, t20
+	  31: LDL       	(t20), t22
+	  32: PUTL       	t22, R0
+	  33: INCEIPL       	$4
+
+	0xFED916C:  38630004  addi r3,r3,4
+	  34: GETL       	R3, t24
+	  35: ADDL       	$0x4, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0xFED9170:  7C0C5278  xor r12,r0,r10
+	  38: GETL       	R0, t26
+	  39: GETL       	R10, t28
+	  40: XORL       	t26, t28
+	  41: PUTL       	t28, R12
+	  42: INCEIPL       	$4
+
+	0xFED9174:  7D2C4214  add r9,r12,r8
+	  43: GETL       	R12, t30
+	  44: GETL       	R8, t32
+	  45: ADDL       	t30, t32
+	  46: PUTL       	t32, R9
+	  47: INCEIPL       	$4
+
+	0xFED9178:  7D874A78  xor r7,r12,r9
+	  48: GETL       	R12, t34
+	  49: GETL       	R9, t36
+	  50: XORL       	t34, t36
+	  51: PUTL       	t36, R7
+	  52: INCEIPL       	$4
+
+	0xFED917C:  7D693879  andc. r9,r11,r7
+	  53: GETL       	R11, t38
+	  54: GETL       	R7, t40
+	  55: NOTL       	t40
+	  56: ANDL       	t38, t40
+	  57: PUTL       	t40, R9
+	  58: CMP0L       	t40, t42  (-rSo)
+	  59: ICRFL       	t42, $0x0, CR
+	  60: INCEIPL       	$4
+
+	0xFED9180:  3923FFFC  addi r9,r3,-4
+	  61: GETL       	R3, t44
+	  62: ADDL       	$0xFFFFFFFC, t44
+	  63: PUTL       	t44, R9
+	  64: INCEIPL       	$4
+
+	0xFED9184:  41A2FFE4  bc 13,2,0xFED9168
+	  65: Js02o       	$0xFED9168
+
+
+
+. 2843 FED9148 64
+. 54 86 40 2E 3D 00 7E FE 7C 85 33 78 3D 60 81 01 54 A9 80 1E 61 08 FE FF 7C AA 4B 78 61 6B 01 00 80 03 00 00 38 63 00 04 7C 0C 52 78 7D 2C 42 14 7D 87 4A 78 7D 69 38 79 39 23 FF FC 41 A2 FF E4
+==== BB 2844 (0xFED9168) approx BBs exec'd 0 ====
+
+	0xFED9168:  80030000  lwz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFED916C:  38630004  addi r3,r3,4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFED9170:  7C0C5278  xor r12,r0,r10
+	   8: GETL       	R0, t6
+	   9: GETL       	R10, t8
+	  10: XORL       	t6, t8
+	  11: PUTL       	t8, R12
+	  12: INCEIPL       	$4
+
+	0xFED9174:  7D2C4214  add r9,r12,r8
+	  13: GETL       	R12, t10
+	  14: GETL       	R8, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0xFED9178:  7D874A78  xor r7,r12,r9
+	  18: GETL       	R12, t14
+	  19: GETL       	R9, t16
+	  20: XORL       	t14, t16
+	  21: PUTL       	t16, R7
+	  22: INCEIPL       	$4
+
+	0xFED917C:  7D693879  andc. r9,r11,r7
+	  23: GETL       	R11, t18
+	  24: GETL       	R7, t20
+	  25: NOTL       	t20
+	  26: ANDL       	t18, t20
+	  27: PUTL       	t20, R9
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0xFED9180:  3923FFFC  addi r9,r3,-4
+	  31: GETL       	R3, t24
+	  32: ADDL       	$0xFFFFFFFC, t24
+	  33: PUTL       	t24, R9
+	  34: INCEIPL       	$4
+
+	0xFED9184:  41A2FFE4  bc 13,2,0xFED9168
+	  35: Js02o       	$0xFED9168
+
+
+
+. 2844 FED9168 32
+. 80 03 00 00 38 63 00 04 7C 0C 52 78 7D 2C 42 14 7D 87 4A 78 7D 69 38 79 39 23 FF FC 41 A2 FF E4
+==== BB 2845 (0xFED9188) approx BBs exec'd 0 ====
+
+	0xFED9188:  88A3FFFC  lbz r5,-4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFED918C:  7C852000  cmp cr1,r5,r4
+	   5: GETL       	R5, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFED9190:  41860034  bc 12,6,0xFED91C4
+	  10: Js06o       	$0xFED91C4
+
+
+
+. 2845 FED9188 12
+. 88 A3 FF FC 7C 85 20 00 41 86 00 34
+==== BB 2846 (0xFED9194) approx BBs exec'd 0 ====
+
+	0xFED9194:  88C90001  lbz r6,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFED9198:  7F062000  cmp cr6,r6,r4
+	   5: GETL       	R6, t4
+	   6: GETL       	R4, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFED919C:  419A0034  bc 12,26,0xFED91D0
+	  10: Js26o       	$0xFED91D0
+
+
+
+. 2846 FED9194 12
+. 88 C9 00 01 7F 06 20 00 41 9A 00 34
+==== BB 2847 (0xFED91D0) approx BBs exec'd 0 ====
+
+	0xFED91D0:  3863FFFD  addi r3,r3,-3
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFD, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFED91D4:  38210010  addi r1,r1,16
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x10, t2
+	   6: PUTL       	t2, R1
+	   7: INCEIPL       	$4
+
+	0xFED91D8:  4E800020  blr
+	   8: GETL       	LR, t4
+	   9: JMPo-r       	t4  ($4)
+
+
+
+. 2847 FED91D0 12
+. 38 63 FF FD 38 21 00 10 4E 80 00 20
+==== BB 2848 (0xFE8DC80) approx BBs exec'd 0 ====
+
+	0xFE8DC80:  38030001  addi r0,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFE8DC84:  7F190040  cmpl cr6,r25,r0
+	   4: GETL       	R25, t2
+	   5: GETL       	R0, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFE8DC88:  7C1F0378  or r31,r0,r0
+	   9: GETL       	R0, t8
+	  10: PUTL       	t8, R31
+	  11: INCEIPL       	$4
+
+	0xFE8DC8C:  4099006C  bc 4,25,0xFE8DCF8
+	  12: Jc25o       	$0xFE8DCF8
+
+
+
+. 2848 FE8DC80 16
+. 38 03 00 01 7F 19 00 40 7C 1F 03 78 40 99 00 6C
+==== BB 2849 (0xFE8DCF8) approx BBs exec'd 0 ====
+
+	0xFE8DCF8:  3BE00000  li r31,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFE8DCFC:  4BFFFF94  b 0xFE8DC90
+	   3: JMPo       	$0xFE8DC90  ($4)
+
+
+
+. 2849 FE8DCF8 8
+. 3B E0 00 00 4B FF FF 94
+==== BB 2850 (0xFED835C) approx BBs exec'd 0 ====
+
+	0xFED835C:  5540463F  rlwinm. r0,r10,8,24,31
+	   0: GETL       	R10, t0
+	   1: SHRL       	$0x18, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED8360:  9C030004  stbu r0,4(r3)
+	   6: GETL       	R0, t4
+	   7: GETL       	R3, t6
+	   8: ADDL       	$0x4, t6
+	   9: PUTL       	t6, R3
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED8364:  4D820020  bclr 12,2
+	  12: GETL       	LR, t8
+	  13: Js02o-r       	t8
+
+
+
+. 2850 FED835C 12
+. 55 40 46 3F 9C 03 00 04 4D 82 00 20
+==== BB 2851 (0xFE8B92C) approx BBs exec'd 0 ====
+
+	0xFE8B92C:  801C00A8  lwz r0,168(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8B930:  7C7F1B78  or r31,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFE8B934:  2C800000  cmpi cr1,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE8B938:  40860098  bc 4,6,0xFE8B9D0
+	  12: Jc06o       	$0xFE8B9D0
+
+
+
+. 2851 FE8B92C 16
+. 80 1C 00 A8 7C 7F 1B 78 2C 80 00 00 40 86 00 98
+==== BB 2852 (0xFE8B93C) approx BBs exec'd 0 ====
+
+	0xFE8B93C:  2F1F0000  cmpi cr6,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE8B940:  38600000  li r3,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFE8B944:  41BAFEE8  bc 13,26,0xFE8B82C
+	   7: Js26o       	$0xFE8B82C
+
+
+
+. 2852 FE8B93C 12
+. 2F 1F 00 00 38 60 00 00 41 BA FE E8
+==== BB 2853 (0xFE8B948) approx BBs exec'd 0 ====
+
+	0xFE8B948:  815F0004  lwz r10,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFE8B94C:  2C0A0000  cmpi cr0,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B950:  408100B8  bc 4,1,0xFE8BA08
+	   9: Jc01o       	$0xFE8BA08
+
+
+
+. 2853 FE8B948 12
+. 81 5F 00 04 2C 0A 00 00 40 81 00 B8
+==== BB 2854 (0xFE8B954) approx BBs exec'd 0 ====
+
+	0xFE8B954:  839F0008  lwz r28,8(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFE8B958:  2F9C0000  cmpi cr7,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B95C:  409E0058  bc 4,30,0xFE8B9B4
+	   9: Jc30o       	$0xFE8B9B4
+
+
+
+. 2854 FE8B954 12
+. 83 9F 00 08 2F 9C 00 00 40 9E 00 58
+==== BB 2855 (0xFE8B960) approx BBs exec'd 0 ====
+
+	0xFE8B960:  819F0010  lwz r12,16(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFE8B964:  3B800000  li r28,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFE8B968:  2C8C0000  cmpi cr1,r12,0
+	   8: GETL       	R12, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE8B96C:  41860048  bc 12,6,0xFE8B9B4
+	  12: Js06o       	$0xFE8B9B4
+
+
+
+. 2855 FE8B960 16
+. 81 9F 00 10 3B 80 00 00 2C 8C 00 00 41 86 00 48
+==== BB 2856 (0xFE8B970) approx BBs exec'd 0 ====
+
+	0xFE8B970:  39400000  li r10,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFE8B974:  7FAAFA14  add r29,r10,r31
+	   3: GETL       	R10, t2
+	   4: GETL       	R31, t4
+	   5: ADDL       	t2, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFE8B978:  3B9C0001  addi r28,r28,1
+	   8: GETL       	R28, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R28
+	  11: INCEIPL       	$4
+
+	0xFE8B97C:  813D0010  lwz r9,16(r29)
+	  12: GETL       	R29, t8
+	  13: ADDL       	$0x10, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R9
+	  16: INCEIPL       	$4
+
+	0xFE8B980:  7F64DB78  or r4,r27,r27
+	  17: GETL       	R27, t12
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0xFE8B984:  80090004  lwz r0,4(r9)
+	  20: GETL       	R9, t14
+	  21: ADDL       	$0x4, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R0
+	  24: INCEIPL       	$4
+
+	0xFE8B988:  7D234B78  or r3,r9,r9
+	  25: GETL       	R9, t18
+	  26: PUTL       	t18, R3
+	  27: INCEIPL       	$4
+
+	0xFE8B98C:  2F000000  cmpi cr6,r0,0
+	  28: GETL       	R0, t20
+	  29: CMP0L       	t20, t22  (-rSo)
+	  30: ICRFL       	t22, $0x6, CR
+	  31: INCEIPL       	$4
+
+	0xFE8B990:  4099006C  bc 4,25,0xFE8B9FC
+	  32: Jc25o       	$0xFE8B9FC
+
+
+
+. 2856 FE8B970 36
+. 39 40 00 00 7F AA FA 14 3B 9C 00 01 81 3D 00 10 7F 64 DB 78 80 09 00 04 7D 23 4B 78 2F 00 00 00 40 99 00 6C
+==== BB 2857 (0xFE8B9FC) approx BBs exec'd 0 ====
+
+	0xFE8B9FC:  48000399  bl 0xFE8BD94
+	   0: MOVL       	$0xFE8BA00, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFE8BD94  ($4)
+
+
+
+. 2857 FE8B9FC 4
+. 48 00 03 99
+==== BB 2858 _nl_load_domain(0xFE8BD94) approx BBs exec'd 0 ====
+
+	0xFE8BD94:  9421FEF0  stwu r1,-272(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFEF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE8BD98:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE8BD9C:  4811C0B5  bl 0xFFA7E50
+	   9: MOVL       	$0xFE8BDA0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2858 FE8BD94 12
+. 94 21 FE F0 7C A8 02 A6 48 11 C0 B5
+==== BB 2859 (0xFE8BDA0) approx BBs exec'd 0 ====
+
+	0xFE8BDA0:  93C10108  stw r30,264(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x108, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8BDA4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE8BDA8:  936100FC  stw r27,252(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xFC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE8BDAC:  90A10114  stw r5,276(r1)
+	  13: GETL       	R5, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x114, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE8BDB0:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0xFE8BDB4:  93A10104  stw r29,260(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x104, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE8BDB8:  3BA28BA0  addi r29,r2,-29792
+	  26: GETL       	R2, t20
+	  27: ADDL       	$0xFFFF8BA0, t20
+	  28: PUTL       	t20, R29
+	  29: INCEIPL       	$4
+
+	0xFE8BDBC:  837E01A4  lwz r27,420(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0x1A4, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R27
+	  34: INCEIPL       	$4
+
+	0xFE8BDC0:  926100DC  stw r19,220(r1)
+	  35: GETL       	R19, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0xDC, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFE8BDC4:  7C932378  or r19,r4,r4
+	  40: GETL       	R4, t30
+	  41: PUTL       	t30, R19
+	  42: INCEIPL       	$4
+
+	0xFE8BDC8:  801B0008  lwz r0,8(r27)
+	  43: GETL       	R27, t32
+	  44: ADDL       	$0x8, t32
+	  45: LDL       	(t32), t34
+	  46: PUTL       	t34, R0
+	  47: INCEIPL       	$4
+
+	0xFE8BDCC:  930100F0  stw r24,240(r1)
+	  48: GETL       	R24, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0xF0, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0xFE8BDD0:  3B000000  li r24,0
+	  53: MOVL       	$0x0, t40
+	  54: PUTL       	t40, R24
+	  55: INCEIPL       	$4
+
+	0xFE8BDD4:  7F80E800  cmp cr7,r0,r29
+	  56: GETL       	R0, t42
+	  57: GETL       	R29, t44
+	  58: CMPL       	t42, t44, t46  (-rSo)
+	  59: ICRFL       	t46, $0x7, CR
+	  60: INCEIPL       	$4
+
+	0xFE8BDD8:  934100F8  stw r26,248(r1)
+	  61: GETL       	R26, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0xF8, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0xFE8BDDC:  93E1010C  stw r31,268(r1)
+	  66: GETL       	R31, t52
+	  67: GETL       	R1, t54
+	  68: ADDL       	$0x10C, t54
+	  69: STL       	t52, (t54)
+	  70: INCEIPL       	$4
+
+	0xFE8BDE0:  7C7A1B78  or r26,r3,r3
+	  71: GETL       	R3, t56
+	  72: PUTL       	t56, R26
+	  73: INCEIPL       	$4
+
+	0xFE8BDE4:  91C100C8  stw r14,200(r1)
+	  74: GETL       	R14, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0xC8, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFE8BDE8:  7C3F0B78  or r31,r1,r1
+	  79: GETL       	R1, t62
+	  80: PUTL       	t62, R31
+	  81: INCEIPL       	$4
+
+	0xFE8BDEC:  91E100CC  stw r15,204(r1)
+	  82: GETL       	R15, t64
+	  83: GETL       	R1, t66
+	  84: ADDL       	$0xCC, t66
+	  85: STL       	t64, (t66)
+	  86: INCEIPL       	$4
+
+	0xFE8BDF0:  920100D0  stw r16,208(r1)
+	  87: GETL       	R16, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0xD0, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0xFE8BDF4:  922100D4  stw r17,212(r1)
+	  92: GETL       	R17, t72
+	  93: GETL       	R1, t74
+	  94: ADDL       	$0xD4, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0xFE8BDF8:  924100D8  stw r18,216(r1)
+	  97: GETL       	R18, t76
+	  98: GETL       	R1, t78
+	  99: ADDL       	$0xD8, t78
+	 100: STL       	t76, (t78)
+	 101: INCEIPL       	$4
+
+	0xFE8BDFC:  928100E0  stw r20,224(r1)
+	 102: GETL       	R20, t80
+	 103: GETL       	R1, t82
+	 104: ADDL       	$0xE0, t82
+	 105: STL       	t80, (t82)
+	 106: INCEIPL       	$4
+
+	0xFE8BE00:  92A100E4  stw r21,228(r1)
+	 107: GETL       	R21, t84
+	 108: GETL       	R1, t86
+	 109: ADDL       	$0xE4, t86
+	 110: STL       	t84, (t86)
+	 111: INCEIPL       	$4
+
+	0xFE8BE04:  92C100E8  stw r22,232(r1)
+	 112: GETL       	R22, t88
+	 113: GETL       	R1, t90
+	 114: ADDL       	$0xE8, t90
+	 115: STL       	t88, (t90)
+	 116: INCEIPL       	$4
+
+	0xFE8BE08:  92E100EC  stw r23,236(r1)
+	 117: GETL       	R23, t92
+	 118: GETL       	R1, t94
+	 119: ADDL       	$0xEC, t94
+	 120: STL       	t92, (t94)
+	 121: INCEIPL       	$4
+
+	0xFE8BE0C:  932100F4  stw r25,244(r1)
+	 122: GETL       	R25, t96
+	 123: GETL       	R1, t98
+	 124: ADDL       	$0xF4, t98
+	 125: STL       	t96, (t98)
+	 126: INCEIPL       	$4
+
+	0xFE8BE10:  93810100  stw r28,256(r1)
+	 127: GETL       	R28, t100
+	 128: GETL       	R1, t102
+	 129: ADDL       	$0x100, t102
+	 130: STL       	t100, (t102)
+	 131: INCEIPL       	$4
+
+	0xFE8BE14:  918100C4  stw r12,196(r1)
+	 132: GETL       	R12, t104
+	 133: GETL       	R1, t106
+	 134: ADDL       	$0xC4, t106
+	 135: STL       	t104, (t106)
+	 136: INCEIPL       	$4
+
+	0xFE8BE18:  419E002C  bc 12,30,0xFE8BE44
+	 137: Js30o       	$0xFE8BE44
+
+
+
+. 2859 FE8BDA0 124
+. 93 C1 01 08 7F C8 02 A6 93 61 00 FC 90 A1 01 14 7D 80 00 26 93 A1 01 04 3B A2 8B A0 83 7E 01 A4 92 61 00 DC 7C 93 23 78 80 1B 00 08 93 01 00 F0 3B 00 00 00 7F 80 E8 00 93 41 00 F8 93 E1 01 0C 7C 7A 1B 78 91 C1 00 C8 7C 3F 0B 78 91 E1 00 CC 92 01 00 D0 92 21 00 D4 92 41 00 D8 92 81 00 E0 92 A1 00 E4 92 C1 00 E8 92 E1 00 EC 93 21 00 F4 93 81 01 00 91 81 00 C4 41 9E 00 2C
+==== BB 2860 (0xFE8BE1C) approx BBs exec'd 0 ====
+
+	0xFE8BE1C:  38600001  li r3,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8BE20:  7D20D828  lwarx r9,r0,r27
+	   3: GETL       	R27, t2
+	   4: LOCKo       	
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFE8BE24:  7C09C000  cmp cr0,r9,r24
+	   8: GETL       	R9, t6
+	   9: GETL       	R24, t8
+	  10: CMPL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFE8BE28:  4082000C  bc 4,2,0xFE8BE34
+	  13: Jc02o       	$0xFE8BE34
+
+
+
+. 2860 FE8BE1C 16
+. 38 60 00 01 7D 20 D8 28 7C 09 C0 00 40 82 00 0C
+==== BB 2861 (0xFE8BE2C) approx BBs exec'd 0 ====
+
+	0xFE8BE2C:  7C60D92D  stwcx. r3,r0,r27
+	   0: GETL       	R27, t0
+	   1: GETL       	R3, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8BE30:  40A2FFF0  bc 5,2,0xFE8BE20
+	   6: Jc02o       	$0xFE8BE20
+
+
+
+. 2861 FE8BE2C 8
+. 7C 60 D9 2D 40 A2 FF F0
+==== BB 2862 (0xFE8BE20) approx BBs exec'd 0 ====
+
+	0xFE8BE20:  7D20D828  lwarx r9,r0,r27
+	   0: GETL       	R27, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE8BE24:  7C09C000  cmp cr0,r9,r24
+	   5: GETL       	R9, t4
+	   6: GETL       	R24, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFE8BE28:  4082000C  bc 4,2,0xFE8BE34
+	  10: Jc02o       	$0xFE8BE34
+
+
+
+. 2862 FE8BE20 12
+. 7D 20 D8 28 7C 09 C0 00 40 82 00 0C
+==== BB 2863 (0xFE8BE34) approx BBs exec'd 0 ====
+
+	0xFE8BE34:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFE8BE38:  2C090000  cmpi cr0,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8BE3C:  40820614  bc 4,2,0xFE8C450
+	   5: Jc02o       	$0xFE8C450
+
+
+
+. 2863 FE8BE34 12
+. 4C 00 01 2C 2C 09 00 00 40 82 06 14
+==== BB 2864 (0xFE8BE40) approx BBs exec'd 0 ====
+
+	0xFE8BE40:  93BB0008  stw r29,8(r27)
+	   0: GETL       	R29, t0
+	   1: GETL       	R27, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8BE44:  813B0004  lwz r9,4(r27)
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFE8BE48:  38890001  addi r4,r9,1
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFE8BE4C:  909B0004  stw r4,4(r27)
+	  14: GETL       	R4, t10
+	  15: GETL       	R27, t12
+	  16: ADDL       	$0x4, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFE8BE50:  817A0004  lwz r11,4(r26)
+	  19: GETL       	R26, t14
+	  20: ADDL       	$0x4, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R11
+	  23: INCEIPL       	$4
+
+	0xFE8BE54:  2C8B0000  cmpi cr1,r11,0
+	  24: GETL       	R11, t18
+	  25: CMP0L       	t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x1, CR
+	  27: INCEIPL       	$4
+
+	0xFE8BE58:  41860090  bc 12,6,0xFE8BEE8
+	  28: Js06o       	$0xFE8BEE8
+
+
+
+. 2864 FE8BE40 28
+. 93 BB 00 08 81 3B 00 04 38 89 00 01 90 9B 00 04 81 7A 00 04 2C 8B 00 00 41 86 00 90
+==== BB 2865 (0xFE8BEE8) approx BBs exec'd 0 ====
+
+	0xFE8BEE8:  807A0000  lwz r3,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE8BEEC:  38E0FFFF  li r7,-1
+	   4: MOVL       	$0xFFFFFFFF, t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0xFE8BEF0:  90FA0004  stw r7,4(r26)
+	   7: GETL       	R7, t6
+	   8: GETL       	R26, t8
+	   9: ADDL       	$0x4, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8BEF4:  2F830000  cmpi cr7,r3,0
+	  12: GETL       	R3, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFE8BEF8:  917A0008  stw r11,8(r26)
+	  16: GETL       	R11, t14
+	  17: GETL       	R26, t16
+	  18: ADDL       	$0x8, t16
+	  19: STL       	t14, (t16)
+	  20: INCEIPL       	$4
+
+	0xFE8BEFC:  409E005C  bc 4,30,0xFE8BF58
+	  21: Jc30o       	$0xFE8BF58
+
+
+
+. 2865 FE8BEE8 24
+. 80 7A 00 00 38 E0 FF FF 90 FA 00 04 2F 83 00 00 91 7A 00 08 40 9E 00 5C
+==== BB 2866 (0xFE8BF58) approx BBs exec'd 0 ====
+
+	0xFE8BF58:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8BF5C:  48095DC5  bl 0xFF21D20
+	   3: MOVL       	$0xFE8BF60, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFF21D20  ($4)
+
+
+
+. 2866 FE8BF58 8
+. 38 80 00 00 48 09 5D C5
+==== BB 2867 (0xFF21D2C) approx BBs exec'd 0 ====
+
+	0xFF21D2C:  4BF59C5C  b 0xFE7B988
+	   0: JMPo       	$0xFE7B988  ($4)
+
+
+
+. 2867 FF21D2C 4
+. 4B F5 9C 5C
+==== BB 2868 __syscall_error(0xFE7B988) approx BBs exec'd 0 ====
+
+	0xFE7B988:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE7B98C:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFE7B990:  4812C4C1  bl 0xFFA7E50
+	   9: MOVL       	$0xFE7B994, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2868 FE7B988 12
+. 94 21 FF F0 7D 88 02 A6 48 12 C4 C1
+==== BB 2869 (0xFE7B994) approx BBs exec'd 0 ====
+
+	0xFE7B994:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE7B998:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE7B99C:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFE7B9A0:  809E1C4C  lwz r4,7244(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x1C4C, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFE7B9A4:  83C10008  lwz r30,8(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x8, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFE7B9A8:  7D241214  add r9,r4,r2
+	  21: GETL       	R4, t16
+	  22: GETL       	R2, t18
+	  23: ADDL       	t16, t18
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0xFE7B9AC:  38210010  addi r1,r1,16
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x10, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0xFE7B9B0:  90690000  stw r3,0(r9)
+	  30: GETL       	R3, t22
+	  31: GETL       	R9, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE7B9B4:  3860FFFF  li r3,-1
+	  34: MOVL       	$0xFFFFFFFF, t26
+	  35: PUTL       	t26, R3
+	  36: INCEIPL       	$4
+
+	0xFE7B9B8:  4E800020  blr
+	  37: GETL       	LR, t28
+	  38: JMPo-r       	t28  ($4)
+
+
+
+. 2869 FE7B994 40
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 9E 1C 4C 83 C1 00 08 7D 24 12 14 38 21 00 10 90 69 00 00 38 60 FF FF 4E 80 00 20
+==== BB 2870 (0xFE8BF60) approx BBs exec'd 0 ====
+
+	0xFE8BF60:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0xFE8BF64:  7C791B78  or r25,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R25
+	   7: INCEIPL       	$4
+
+	0xFE8BF68:  41B2FF98  bc 13,18,0xFE8BF00
+	   8: Js18o       	$0xFE8BF00
+
+
+
+. 2870 FE8BF60 12
+. 2E 03 FF FF 7C 79 1B 78 41 B2 FF 98
+==== BB 2871 (0xFE8BF00) approx BBs exec'd 0 ====
+
+	0xFE8BF00:  3B200001  li r25,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0xFE8BF04:  933A0004  stw r25,4(r26)
+	   3: GETL       	R25, t2
+	   4: GETL       	R26, t4
+	   5: ADDL       	$0x4, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFE8BF08:  82DB0004  lwz r22,4(r27)
+	   8: GETL       	R27, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0xFE8BF0C:  3936FFFF  addi r9,r22,-1
+	  13: GETL       	R22, t10
+	  14: ADDL       	$0xFFFFFFFF, t10
+	  15: PUTL       	t10, R9
+	  16: INCEIPL       	$4
+
+	0xFE8BF10:  2F890000  cmpi cr7,r9,0
+	  17: GETL       	R9, t12
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0xFE8BF14:  913B0004  stw r9,4(r27)
+	  21: GETL       	R9, t16
+	  22: GETL       	R27, t18
+	  23: ADDL       	$0x4, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE8BF18:  40BEFF6C  bc 5,30,0xFE8BE84
+	  26: Jc30o       	$0xFE8BE84
+
+
+
+. 2871 FE8BF00 28
+. 3B 20 00 01 93 3A 00 04 82 DB 00 04 39 36 FF FF 2F 89 00 00 91 3B 00 04 40 BE FF 6C
+==== BB 2872 (0xFE8BF1C) approx BBs exec'd 0 ====
+
+	0xFE8BF1C:  913B0008  stw r9,8(r27)
+	   0: GETL       	R9, t0
+	   1: GETL       	R27, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8BF20:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0xFE8BF24:  7F40D828  lwarx r26,r0,r27
+	   6: GETL       	R27, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R26
+	  10: INCEIPL       	$4
+
+	0xFE8BF28:  7D20D92D  stwcx. r9,r0,r27
+	  11: GETL       	R27, t8
+	  12: GETL       	R9, t10
+	  13: LOCKo       	
+	  14: STL       	t10, (t8)  (-rSo)
+	  15: ICRFL       	cr, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFE8BF2C:  40A2FFF8  bc 5,2,0xFE8BF24
+	  17: Jc02o       	$0xFE8BF24
+
+
+
+. 2872 FE8BF1C 20
+. 91 3B 00 08 7C 00 04 AC 7F 40 D8 28 7D 20 D9 2D 40 A2 FF F8
+==== BB 2873 (0xFE8BF30) approx BBs exec'd 0 ====
+
+	0xFE8BF30:  2C1A0001  cmpi cr0,r26,1
+	   0: GETL       	R26, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8BF34:  4081FF50  bc 4,1,0xFE8BE84
+	   5: Jc01o       	$0xFE8BE84
+
+
+
+. 2873 FE8BF30 8
+. 2C 1A 00 01 40 81 FF 50
+==== BB 2874 (0xFE8BE84) approx BBs exec'd 0 ====
+
+	0xFE8BE84:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFE8BE88:  83650004  lwz r27,4(r5)
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFE8BE8C:  8145FFB4  lwz r10,-76(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0xFE8BE90:  7F6803A6  mtlr r27
+	  14: GETL       	R27, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFE8BE94:  81C5FFB8  lwz r14,-72(r5)
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0xFE8BE98:  81E5FFBC  lwz r15,-68(r5)
+	  22: GETL       	R5, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0xFE8BE9C:  7D408120  mtcrf 0x8,r10
+	  27: GETL       	R10, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0xFE8BEA0:  8205FFC0  lwz r16,-64(r5)
+	  30: GETL       	R5, t24
+	  31: ADDL       	$0xFFFFFFC0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R16
+	  34: INCEIPL       	$4
+
+	0xFE8BEA4:  8225FFC4  lwz r17,-60(r5)
+	  35: GETL       	R5, t28
+	  36: ADDL       	$0xFFFFFFC4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R17
+	  39: INCEIPL       	$4
+
+	0xFE8BEA8:  8245FFC8  lwz r18,-56(r5)
+	  40: GETL       	R5, t32
+	  41: ADDL       	$0xFFFFFFC8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R18
+	  44: INCEIPL       	$4
+
+	0xFE8BEAC:  8265FFCC  lwz r19,-52(r5)
+	  45: GETL       	R5, t36
+	  46: ADDL       	$0xFFFFFFCC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R19
+	  49: INCEIPL       	$4
+
+	0xFE8BEB0:  8285FFD0  lwz r20,-48(r5)
+	  50: GETL       	R5, t40
+	  51: ADDL       	$0xFFFFFFD0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R20
+	  54: INCEIPL       	$4
+
+	0xFE8BEB4:  82A5FFD4  lwz r21,-44(r5)
+	  55: GETL       	R5, t44
+	  56: ADDL       	$0xFFFFFFD4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R21
+	  59: INCEIPL       	$4
+
+	0xFE8BEB8:  82C5FFD8  lwz r22,-40(r5)
+	  60: GETL       	R5, t48
+	  61: ADDL       	$0xFFFFFFD8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R22
+	  64: INCEIPL       	$4
+
+	0xFE8BEBC:  82E5FFDC  lwz r23,-36(r5)
+	  65: GETL       	R5, t52
+	  66: ADDL       	$0xFFFFFFDC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R23
+	  69: INCEIPL       	$4
+
+	0xFE8BEC0:  8305FFE0  lwz r24,-32(r5)
+	  70: GETL       	R5, t56
+	  71: ADDL       	$0xFFFFFFE0, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R24
+	  74: INCEIPL       	$4
+
+	0xFE8BEC4:  8325FFE4  lwz r25,-28(r5)
+	  75: GETL       	R5, t60
+	  76: ADDL       	$0xFFFFFFE4, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R25
+	  79: INCEIPL       	$4
+
+	0xFE8BEC8:  8345FFE8  lwz r26,-24(r5)
+	  80: GETL       	R5, t64
+	  81: ADDL       	$0xFFFFFFE8, t64
+	  82: LDL       	(t64), t66
+	  83: PUTL       	t66, R26
+	  84: INCEIPL       	$4
+
+	0xFE8BECC:  8365FFEC  lwz r27,-20(r5)
+	  85: GETL       	R5, t68
+	  86: ADDL       	$0xFFFFFFEC, t68
+	  87: LDL       	(t68), t70
+	  88: PUTL       	t70, R27
+	  89: INCEIPL       	$4
+
+	0xFE8BED0:  8385FFF0  lwz r28,-16(r5)
+	  90: GETL       	R5, t72
+	  91: ADDL       	$0xFFFFFFF0, t72
+	  92: LDL       	(t72), t74
+	  93: PUTL       	t74, R28
+	  94: INCEIPL       	$4
+
+	0xFE8BED4:  83A5FFF4  lwz r29,-12(r5)
+	  95: GETL       	R5, t76
+	  96: ADDL       	$0xFFFFFFF4, t76
+	  97: LDL       	(t76), t78
+	  98: PUTL       	t78, R29
+	  99: INCEIPL       	$4
+
+	0xFE8BED8:  83C5FFF8  lwz r30,-8(r5)
+	 100: GETL       	R5, t80
+	 101: ADDL       	$0xFFFFFFF8, t80
+	 102: LDL       	(t80), t82
+	 103: PUTL       	t82, R30
+	 104: INCEIPL       	$4
+
+	0xFE8BEDC:  83E5FFFC  lwz r31,-4(r5)
+	 105: GETL       	R5, t84
+	 106: ADDL       	$0xFFFFFFFC, t84
+	 107: LDL       	(t84), t86
+	 108: PUTL       	t86, R31
+	 109: INCEIPL       	$4
+
+	0xFE8BEE0:  7CA12B78  or r1,r5,r5
+	 110: GETL       	R5, t88
+	 111: PUTL       	t88, R1
+	 112: INCEIPL       	$4
+
+	0xFE8BEE4:  4E800020  blr
+	 113: GETL       	LR, t90
+	 114: JMPo-r       	t90  ($4)
+
+
+
+. 2874 FE8BE84 100
+. 80 A1 00 00 83 65 00 04 81 45 FF B4 7F 68 03 A6 81 C5 FF B8 81 E5 FF BC 7D 40 81 20 82 05 FF C0 82 25 FF C4 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+==== BB 2875 (0xFE8BA00) approx BBs exec'd 0 ====
+
+	0xFE8BA00:  813D0010  lwz r9,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE8BA04:  4BFFFF90  b 0xFE8B994
+	   5: JMPo       	$0xFE8B994  ($4)
+
+
+
+. 2875 FE8BA00 8
+. 81 3D 00 10 4B FF FF 90
+==== BB 2876 (0xFE8B994) approx BBs exec'd 0 ====
+
+	0xFE8B994:  80690008  lwz r3,8(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8B998:  578A103A  rlwinm r10,r28,2,0,29
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFE8B99C:  7D6AFA14  add r11,r10,r31
+	   9: GETL       	R10, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0xFE8B9A0:  2C030000  cmpi cr0,r3,0
+	  14: GETL       	R3, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFE8B9A4:  40820010  bc 4,2,0xFE8B9B4
+	  18: Jc02o       	$0xFE8B9B4
+
+
+
+. 2876 FE8B994 20
+. 80 69 00 08 57 8A 10 3A 7D 6A FA 14 2C 03 00 00 40 82 00 10
+==== BB 2877 (0xFE8B9A8) approx BBs exec'd 0 ====
+
+	0xFE8B9A8:  808B0010  lwz r4,16(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8B9AC:  2F840000  cmpi cr7,r4,0
+	   5: GETL       	R4, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B9B0:  409EFFC4  bc 4,30,0xFE8B974
+	   9: Jc30o       	$0xFE8B974
+
+
+
+. 2877 FE8B9A8 12
+. 80 8B 00 10 2F 84 00 00 40 9E FF C4
+==== BB 2878 (0xFE8B974) approx BBs exec'd 0 ====
+
+	0xFE8B974:  7FAAFA14  add r29,r10,r31
+	   0: GETL       	R10, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFE8B978:  3B9C0001  addi r28,r28,1
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0xFE8B97C:  813D0010  lwz r9,16(r29)
+	   9: GETL       	R29, t6
+	  10: ADDL       	$0x10, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFE8B980:  7F64DB78  or r4,r27,r27
+	  14: GETL       	R27, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0xFE8B984:  80090004  lwz r0,4(r9)
+	  17: GETL       	R9, t12
+	  18: ADDL       	$0x4, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R0
+	  21: INCEIPL       	$4
+
+	0xFE8B988:  7D234B78  or r3,r9,r9
+	  22: GETL       	R9, t16
+	  23: PUTL       	t16, R3
+	  24: INCEIPL       	$4
+
+	0xFE8B98C:  2F000000  cmpi cr6,r0,0
+	  25: GETL       	R0, t18
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x6, CR
+	  28: INCEIPL       	$4
+
+	0xFE8B990:  4099006C  bc 4,25,0xFE8B9FC
+	  29: Jc25o       	$0xFE8B9FC
+
+
+
+. 2878 FE8B974 32
+. 7F AA FA 14 3B 9C 00 01 81 3D 00 10 7F 64 DB 78 80 09 00 04 7D 23 4B 78 2F 00 00 00 40 99 00 6C
+==== BB 2879 (0xFE8B9B4) approx BBs exec'd 0 ====
+
+	0xFE8B9B4:  4092003C  bc 4,18,0xFE8B9F0
+	   0: Jc18o       	$0xFE8B9F0
+
+
+
+. 2879 FE8B9B4 4
+. 40 92 00 3C
+==== BB 2880 (0xFE8B9B8) approx BBs exec'd 0 ====
+
+	0xFE8B9B8:  73000001  andi. r0,r24,0x1
+	   0: GETL       	R24, t0
+	   1: ANDL       	$0x1, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFE8B9BC:  4182FE6C  bc 12,2,0xFE8B828
+	   6: Js02o       	$0xFE8B828
+
+
+
+. 2880 FE8B9B8 8
+. 73 00 00 01 41 82 FE 6C
+==== BB 2881 (0xFE8B9C0) approx BBs exec'd 0 ====
+
+	0xFE8B9C0:  80610028  lwz r3,40(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x28, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE8B9C4:  4811E96D  bl 0xFFAA330
+	   5: MOVL       	$0xFE8B9C8, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0xFFAA330  ($4)
+
+
+
+. 2881 FE8B9C0 8
+. 80 61 00 28 48 11 E9 6D
+==== BB 2882 (0xFE8B9C8) approx BBs exec'd 0 ====
+
+	0xFE8B9C8:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B9CC:  4BFFFE60  b 0xFE8B82C
+	   3: JMPo       	$0xFE8B82C  ($4)
+
+
+
+. 2882 FE8B9C8 8
+. 7F E3 FB 78 4B FF FE 60
+==== BB 2883 (0xFE8B82C) approx BBs exec'd 0 ====
+
+	0xFE8B82C:  83010064  lwz r24,100(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x64, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFE8B830:  8161003C  lwz r11,60(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x3C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFE8B834:  7F0803A6  mtlr r24
+	  10: GETL       	R24, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFE8B838:  83210044  lwz r25,68(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x44, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R25
+	  17: INCEIPL       	$4
+
+	0xFE8B83C:  83010040  lwz r24,64(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x40, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R24
+	  22: INCEIPL       	$4
+
+	0xFE8B840:  7D608120  mtcrf 0x8,r11
+	  23: GETL       	R11, t18
+	  24: ICRFL       	t18, $0x4, CR
+	  25: INCEIPL       	$4
+
+	0xFE8B844:  83410048  lwz r26,72(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x48, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R26
+	  30: INCEIPL       	$4
+
+	0xFE8B848:  8361004C  lwz r27,76(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x4C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R27
+	  35: INCEIPL       	$4
+
+	0xFE8B84C:  83810050  lwz r28,80(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x50, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R28
+	  40: INCEIPL       	$4
+
+	0xFE8B850:  83A10054  lwz r29,84(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x54, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0xFE8B854:  83C10058  lwz r30,88(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x58, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R30
+	  50: INCEIPL       	$4
+
+	0xFE8B858:  83E1005C  lwz r31,92(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x5C, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0xFE8B85C:  38210060  addi r1,r1,96
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x60, t44
+	  58: PUTL       	t44, R1
+	  59: INCEIPL       	$4
+
+	0xFE8B860:  4E800020  blr
+	  60: GETL       	LR, t46
+	  61: JMPo-r       	t46  ($4)
+
+
+
+. 2883 FE8B82C 56
+. 83 01 00 64 81 61 00 3C 7F 08 03 A6 83 21 00 44 83 01 00 40 7D 60 81 20 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+==== BB 2884 (0xFE8B17C) approx BBs exec'd 0 ====
+
+	0xFE8B17C:  7C7A1B79  or. r26,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R26
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8B180:  41A2FF8C  bc 13,2,0xFE8B10C
+	   5: Js02o       	$0xFE8B10C
+
+
+
+. 2884 FE8B17C 8
+. 7C 7A 1B 79 41 A2 FF 8C
+==== BB 2885 (0xFE8B184) approx BBs exec'd 0 ====
+
+	0xFE8B184:  7EE4BB78  or r4,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE8B188:  7EC5B378  or r5,r22,r22
+	   3: GETL       	R22, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFE8B18C:  38DF0008  addi r6,r31,8
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0x8, t4
+	   8: PUTL       	t4, R6
+	   9: INCEIPL       	$4
+
+	0xFE8B190:  4BFFF5F5  bl 0xFE8A784
+	  10: MOVL       	$0xFE8B194, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFE8A784  ($4)
+
+
+
+. 2885 FE8B184 16
+. 7E E4 BB 78 7E C5 B3 78 38 DF 00 08 4B FF F5 F5
+==== BB 2886 _nl_find_msg(0xFE8A784) approx BBs exec'd 0 ====
+
+	0xFE8A784:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFE8A788:  9421FFA0  stwu r1,-96(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFA0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE8A78C:  4811D6C5  bl 0xFFA7E50
+	   9: MOVL       	$0xFE8A790, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2886 FE8A784 12
+. 7C E8 02 A6 94 21 FF A0 48 11 D6 C5
+==== BB 2887 (0xFE8A790) approx BBs exec'd 0 ====
+
+	0xFE8A790:  92210024  stw r17,36(r1)
+	   0: GETL       	R17, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x24, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE8A794:  9261002C  stw r19,44(r1)
+	   5: GETL       	R19, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x2C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFE8A798:  7CD13378  or r17,r6,r6
+	  10: GETL       	R6, t8
+	  11: PUTL       	t8, R17
+	  12: INCEIPL       	$4
+
+	0xFE8A79C:  90E10064  stw r7,100(r1)
+	  13: GETL       	R7, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x64, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE8A7A0:  7C932378  or r19,r4,r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, R19
+	  20: INCEIPL       	$4
+
+	0xFE8A7A4:  80030004  lwz r0,4(r3)
+	  21: GETL       	R3, t16
+	  22: ADDL       	$0x4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R0
+	  25: INCEIPL       	$4
+
+	0xFE8A7A8:  92810030  stw r20,48(r1)
+	  26: GETL       	R20, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x30, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFE8A7AC:  7C741B78  or r20,r3,r3
+	  31: GETL       	R3, t24
+	  32: PUTL       	t24, R20
+	  33: INCEIPL       	$4
+
+	0xFE8A7B0:  2F800000  cmpi cr7,r0,0
+	  34: GETL       	R0, t26
+	  35: CMP0L       	t26, t28  (-rSo)
+	  36: ICRFL       	t28, $0x7, CR
+	  37: INCEIPL       	$4
+
+	0xFE8A7B4:  93010040  stw r24,64(r1)
+	  38: GETL       	R24, t30
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x40, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0xFE8A7B8:  93C10058  stw r30,88(r1)
+	  43: GETL       	R30, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x58, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0xFE8A7BC:  7CB82B78  or r24,r5,r5
+	  48: GETL       	R5, t38
+	  49: PUTL       	t38, R24
+	  50: INCEIPL       	$4
+
+	0xFE8A7C0:  92410028  stw r18,40(r1)
+	  51: GETL       	R18, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x28, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFE8A7C4:  7FC802A6  mflr r30
+	  56: GETL       	LR, t44
+	  57: PUTL       	t44, R30
+	  58: INCEIPL       	$4
+
+	0xFE8A7C8:  92A10034  stw r21,52(r1)
+	  59: GETL       	R21, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x34, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0xFE8A7CC:  92C10038  stw r22,56(r1)
+	  64: GETL       	R22, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x38, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0xFE8A7D0:  92E1003C  stw r23,60(r1)
+	  69: GETL       	R23, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0x3C, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0xFE8A7D4:  93210044  stw r25,68(r1)
+	  74: GETL       	R25, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x44, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFE8A7D8:  93410048  stw r26,72(r1)
+	  79: GETL       	R26, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x48, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0xFE8A7DC:  9361004C  stw r27,76(r1)
+	  84: GETL       	R27, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x4C, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0xFE8A7E0:  93810050  stw r28,80(r1)
+	  89: GETL       	R28, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x50, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0xFE8A7E4:  93A10054  stw r29,84(r1)
+	  94: GETL       	R29, t74
+	  95: GETL       	R1, t76
+	  96: ADDL       	$0x54, t76
+	  97: STL       	t74, (t76)
+	  98: INCEIPL       	$4
+
+	0xFE8A7E8:  93E1005C  stw r31,92(r1)
+	  99: GETL       	R31, t78
+	 100: GETL       	R1, t80
+	 101: ADDL       	$0x5C, t80
+	 102: STL       	t78, (t80)
+	 103: INCEIPL       	$4
+
+	0xFE8A7EC:  409D0508  bc 4,29,0xFE8ACF4
+	 104: Jc29o       	$0xFE8ACF4
+
+
+
+. 2887 FE8A790 96
+. 92 21 00 24 92 61 00 2C 7C D1 33 78 90 E1 00 64 7C 93 23 78 80 03 00 04 92 81 00 30 7C 74 1B 78 2F 80 00 00 93 01 00 40 93 C1 00 58 7C B8 2B 78 92 41 00 28 7F C8 02 A6 92 A1 00 34 92 C1 00 38 92 E1 00 3C 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 93 E1 00 5C 40 9D 05 08
+==== BB 2888 (0xFE8A7F0) approx BBs exec'd 0 ====
+
+	0xFE8A7F0:  83740008  lwz r27,8(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFE8A7F4:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE8A7F8:  2C1B0000  cmpi cr0,r27,0
+	   8: GETL       	R27, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFE8A7FC:  418200A8  bc 12,2,0xFE8A8A4
+	  12: Js02o       	$0xFE8A8A4
+
+
+
+. 2888 FE8A7F0 16
+. 83 74 00 08 38 60 00 00 2C 1B 00 00 41 82 00 A8
+==== BB 2889 (0xFE8A8A4) approx BBs exec'd 0 ====
+
+	0xFE8A8A4:  82210064  lwz r17,100(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x64, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0xFE8A8A8:  82410028  lwz r18,40(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x28, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R18
+	   9: INCEIPL       	$4
+
+	0xFE8A8AC:  7E2803A6  mtlr r17
+	  10: GETL       	R17, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFE8A8B0:  8261002C  lwz r19,44(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x2C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R19
+	  17: INCEIPL       	$4
+
+	0xFE8A8B4:  82210024  lwz r17,36(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x24, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R17
+	  22: INCEIPL       	$4
+
+	0xFE8A8B8:  82810030  lwz r20,48(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x30, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R20
+	  27: INCEIPL       	$4
+
+	0xFE8A8BC:  82A10034  lwz r21,52(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x34, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R21
+	  32: INCEIPL       	$4
+
+	0xFE8A8C0:  82C10038  lwz r22,56(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x38, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R22
+	  37: INCEIPL       	$4
+
+	0xFE8A8C4:  82E1003C  lwz r23,60(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x3C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R23
+	  42: INCEIPL       	$4
+
+	0xFE8A8C8:  83010040  lwz r24,64(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x40, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R24
+	  47: INCEIPL       	$4
+
+	0xFE8A8CC:  83210044  lwz r25,68(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x44, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R25
+	  52: INCEIPL       	$4
+
+	0xFE8A8D0:  83410048  lwz r26,72(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x48, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R26
+	  57: INCEIPL       	$4
+
+	0xFE8A8D4:  8361004C  lwz r27,76(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x4C, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R27
+	  62: INCEIPL       	$4
+
+	0xFE8A8D8:  83810050  lwz r28,80(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x50, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R28
+	  67: INCEIPL       	$4
+
+	0xFE8A8DC:  83A10054  lwz r29,84(r1)
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x54, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R29
+	  72: INCEIPL       	$4
+
+	0xFE8A8E0:  83C10058  lwz r30,88(r1)
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x58, t58
+	  75: LDL       	(t58), t60
+	  76: PUTL       	t60, R30
+	  77: INCEIPL       	$4
+
+	0xFE8A8E4:  83E1005C  lwz r31,92(r1)
+	  78: GETL       	R1, t62
+	  79: ADDL       	$0x5C, t62
+	  80: LDL       	(t62), t64
+	  81: PUTL       	t64, R31
+	  82: INCEIPL       	$4
+
+	0xFE8A8E8:  38210060  addi r1,r1,96
+	  83: GETL       	R1, t66
+	  84: ADDL       	$0x60, t66
+	  85: PUTL       	t66, R1
+	  86: INCEIPL       	$4
+
+	0xFE8A8EC:  4E800020  blr
+	  87: GETL       	LR, t68
+	  88: JMPo-r       	t68  ($4)
+
+
+
+. 2889 FE8A8A4 76
+. 82 21 00 64 82 41 00 28 7E 28 03 A6 82 61 00 2C 82 21 00 24 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+==== BB 2890 (0xFE8B194) approx BBs exec'd 0 ====
+
+	0xFE8B194:  7C781B79  or. r24,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R24
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFE8B198:  40820054  bc 4,2,0xFE8B1EC
+	   5: Jc02o       	$0xFE8B1EC
+
+
+
+. 2890 FE8B194 8
+. 7C 78 1B 79 40 82 00 54
+==== BB 2891 (0xFE8B19C) approx BBs exec'd 0 ====
+
+	0xFE8B19C:  813A0010  lwz r9,16(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE8B1A0:  3B800000  li r28,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFE8B1A4:  2F890000  cmpi cr7,r9,0
+	   8: GETL       	R9, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFE8B1A8:  419E0040  bc 12,30,0xFE8B1E8
+	  12: Js30o       	$0xFE8B1E8
+
+
+
+. 2891 FE8B19C 16
+. 81 3A 00 10 3B 80 00 00 2F 89 00 00 41 9E 00 40
+==== BB 2892 (0xFE8B1AC) approx BBs exec'd 0 ====
+
+	0xFE8B1AC:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFE8B1B0:  7FABD214  add r29,r11,r26
+	   3: GETL       	R11, t2
+	   4: GETL       	R26, t4
+	   5: ADDL       	t2, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFE8B1B4:  7EE4BB78  or r4,r23,r23
+	   8: GETL       	R23, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFE8B1B8:  807D0010  lwz r3,16(r29)
+	  11: GETL       	R29, t8
+	  12: ADDL       	$0x10, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0xFE8B1BC:  7EC5B378  or r5,r22,r22
+	  16: GETL       	R22, t12
+	  17: PUTL       	t12, R5
+	  18: INCEIPL       	$4
+
+	0xFE8B1C0:  38DF0008  addi r6,r31,8
+	  19: GETL       	R31, t14
+	  20: ADDL       	$0x8, t14
+	  21: PUTL       	t14, R6
+	  22: INCEIPL       	$4
+
+	0xFE8B1C4:  3B9C0001  addi r28,r28,1
+	  23: GETL       	R28, t16
+	  24: ADDL       	$0x1, t16
+	  25: PUTL       	t16, R28
+	  26: INCEIPL       	$4
+
+	0xFE8B1C8:  4BFFF5BD  bl 0xFE8A784
+	  27: MOVL       	$0xFE8B1CC, t18
+	  28: PUTL       	t18, LR
+	  29: JMPo-c       	$0xFE8A784  ($4)
+
+
+
+. 2892 FE8B1AC 32
+. 39 60 00 00 7F AB D2 14 7E E4 BB 78 80 7D 00 10 7E C5 B3 78 38 DF 00 08 3B 9C 00 01 4B FF F5 BD
+==== BB 2893 (0xFE8B1CC) approx BBs exec'd 0 ====
+
+	0xFE8B1CC:  578B103A  rlwinm r11,r28,2,0,29
+	   0: GETL       	R28, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFE8B1D0:  7C781B79  or. r24,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R24
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B1D4:  7D2BD214  add r9,r11,r26
+	   9: GETL       	R11, t6
+	  10: GETL       	R26, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFE8B1D8:  40820264  bc 4,2,0xFE8B43C
+	  14: Jc02o       	$0xFE8B43C
+
+
+
+. 2893 FE8B1CC 16
+. 57 8B 10 3A 7C 78 1B 79 7D 2B D2 14 40 82 02 64
+==== BB 2894 (0xFE8B1DC) approx BBs exec'd 0 ====
+
+	0xFE8B1DC:  80890010  lwz r4,16(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE8B1E0:  2C840000  cmpi cr1,r4,0
+	   5: GETL       	R4, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B1E4:  4086FFCC  bc 4,6,0xFE8B1B0
+	   9: Jc06o       	$0xFE8B1B0
+
+
+
+. 2894 FE8B1DC 12
+. 80 89 00 10 2C 84 00 00 40 86 FF CC
+==== BB 2895 (0xFE8B1B0) approx BBs exec'd 0 ====
+
+	0xFE8B1B0:  7FABD214  add r29,r11,r26
+	   0: GETL       	R11, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFE8B1B4:  7EE4BB78  or r4,r23,r23
+	   5: GETL       	R23, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE8B1B8:  807D0010  lwz r3,16(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFE8B1BC:  7EC5B378  or r5,r22,r22
+	  13: GETL       	R22, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0xFE8B1C0:  38DF0008  addi r6,r31,8
+	  16: GETL       	R31, t12
+	  17: ADDL       	$0x8, t12
+	  18: PUTL       	t12, R6
+	  19: INCEIPL       	$4
+
+	0xFE8B1C4:  3B9C0001  addi r28,r28,1
+	  20: GETL       	R28, t14
+	  21: ADDL       	$0x1, t14
+	  22: PUTL       	t14, R28
+	  23: INCEIPL       	$4
+
+	0xFE8B1C8:  4BFFF5BD  bl 0xFE8A784
+	  24: MOVL       	$0xFE8B1CC, t16
+	  25: PUTL       	t16, LR
+	  26: JMPo-c       	$0xFE8A784  ($4)
+
+
+
+. 2895 FE8B1B0 28
+. 7F AB D2 14 7E E4 BB 78 80 7D 00 10 7E C5 B3 78 38 DF 00 08 3B 9C 00 01 4B FF F5 BD
+==== BB 2896 (0xFE8B1E8) approx BBs exec'd 0 ====
+
+	0xFE8B1E8:  4182FF24  bc 12,2,0xFE8B10C
+	   0: Js02o       	$0xFE8B10C
+
+
+
+. 2896 FE8B1E8 4
+. 41 82 FF 24
+==== BB 2897 (0xFE8B10C) approx BBs exec'd 0 ====
+
+	0xFE8B10C:  893B0000  lbz r9,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFE8B110:  2F09003A  cmpi cr6,r9,58
+	   4: GETL       	R9, t4
+	   5: MOVL       	$0x3A, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B114:  409A0010  bc 4,26,0xFE8B124
+	   9: Jc26o       	$0xFE8B124
+
+
+
+. 2897 FE8B10C 12
+. 89 3B 00 00 2F 09 00 3A 40 9A 00 10
+==== BB 2898 (0xFE8B12C) approx BBs exec'd 0 ====
+
+	0xFE8B12C:  39800043  li r12,67
+	   0: MOVL       	$0x43, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFE8B130:  99790001  stb r11,1(r25)
+	   3: GETL       	R11, t2
+	   4: GETL       	R25, t4
+	   5: ADDL       	$0x1, t4
+	   6: STB       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFE8B134:  99990000  stb r12,0(r25)
+	   8: GETL       	R12, t6
+	   9: GETL       	R25, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFE8B138:  88790000  lbz r3,0(r25)
+	  12: GETL       	R25, t10
+	  13: LDB       	(t10), t12
+	  14: PUTL       	t12, R3
+	  15: INCEIPL       	$4
+
+	0xFE8B13C:  3523FFBD  addic. r9,r3,-67
+	  16: GETL       	R3, t14
+	  17: ADCL       	$0xFFFFFFBD, t14  (-wCa)
+	  18: PUTL       	t14, R9
+	  19: CMP0L       	t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0xFE8B140:  4F421042  crnor 26,2,2
+	  22: GETL       	CR, t18
+	  23: XBITB       	t18, $0x2, t20
+	  24: XBITB       	t18, $0x2, t22
+	  25: NOTB       	t20
+	  26: NOTB       	t22
+	  27: ANDB       	t22, t20
+	  28: IBITL       	t20, $0x1A, t18
+	  29: PUTL       	t18, CR
+	  30: INCEIPL       	$4
+
+	0xFE8B144:  419A000C  bc 12,26,0xFE8B150
+	  31: Js26o       	$0xFE8B150
+
+
+
+. 2898 FE8B12C 28
+. 39 80 00 43 99 79 00 01 99 99 00 00 88 79 00 00 35 23 FF BD 4F 42 10 42 41 9A 00 0C
+==== BB 2899 (0xFE8B148) approx BBs exec'd 0 ====
+
+	0xFE8B148:  88190001  lbz r0,1(r25)
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8B14C:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B150:  418202F8  bc 12,2,0xFE8B448
+	   9: Js02o       	$0xFE8B448
+
+
+
+. 2899 FE8B148 12
+. 88 19 00 01 2C 00 00 00 41 82 02 F8
+==== BB 2900 (0xFE8B448) approx BBs exec'd 0 ====
+
+	0xFE8B448:  800F00A8  lwz r0,168(r15)
+	   0: GETL       	R15, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8B44C:  2D800000  cmpi cr3,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x3, CR
+	   8: INCEIPL       	$4
+
+	0xFE8B450:  408E002C  bc 4,14,0xFE8B47C
+	   9: Jc14o       	$0xFE8B47C
+
+
+
+. 2900 FE8B448 12
+. 80 0F 00 A8 2D 80 00 00 40 8E 00 2C
+==== BB 2901 (0xFE8B454) approx BBs exec'd 0 ====
+
+	0xFE8B454:  821F0010  lwz r16,16(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R16
+	   4: INCEIPL       	$4
+
+	0xFE8B458:  827F0018  lwz r19,24(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R19
+	   9: INCEIPL       	$4
+
+	0xFE8B45C:  92130000  stw r16,0(r19)
+	  10: GETL       	R16, t8
+	  11: GETL       	R19, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFE8B460:  41920014  bc 12,18,0xFE8B474
+	  14: Js18o       	$0xFE8B474
+
+
+
+. 2901 FE8B454 16
+. 82 1F 00 10 82 7F 00 18 92 13 00 00 41 92 00 14
+==== BB 2902 (0xFE8B474) approx BBs exec'd 0 ====
+
+	0xFE8B474:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE8B478:  4BFFFDD4  b 0xFE8B24C
+	   3: JMPo       	$0xFE8B24C  ($4)
+
+
+
+. 2902 FE8B474 8
+. 7E C3 B3 78 4B FF FD D4
+==== BB 2903 (0xFE8B24C) approx BBs exec'd 0 ====
+
+	0xFE8B24C:  81210000  lwz r9,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFE8B250:  81E90004  lwz r15,4(r9)
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R15
+	   8: INCEIPL       	$4
+
+	0xFE8B254:  8189FFB4  lwz r12,-76(r9)
+	   9: GETL       	R9, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0xFE8B258:  7DE803A6  mtlr r15
+	  14: GETL       	R15, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFE8B25C:  81C9FFB8  lwz r14,-72(r9)
+	  17: GETL       	R9, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0xFE8B260:  81E9FFBC  lwz r15,-68(r9)
+	  22: GETL       	R9, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0xFE8B264:  7D818120  mtcrf 0x18,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x3, CR
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0xFE8B268:  8209FFC0  lwz r16,-64(r9)
+	  31: GETL       	R9, t24
+	  32: ADDL       	$0xFFFFFFC0, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R16
+	  35: INCEIPL       	$4
+
+	0xFE8B26C:  8229FFC4  lwz r17,-60(r9)
+	  36: GETL       	R9, t28
+	  37: ADDL       	$0xFFFFFFC4, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R17
+	  40: INCEIPL       	$4
+
+	0xFE8B270:  8249FFC8  lwz r18,-56(r9)
+	  41: GETL       	R9, t32
+	  42: ADDL       	$0xFFFFFFC8, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R18
+	  45: INCEIPL       	$4
+
+	0xFE8B274:  8269FFCC  lwz r19,-52(r9)
+	  46: GETL       	R9, t36
+	  47: ADDL       	$0xFFFFFFCC, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R19
+	  50: INCEIPL       	$4
+
+	0xFE8B278:  8289FFD0  lwz r20,-48(r9)
+	  51: GETL       	R9, t40
+	  52: ADDL       	$0xFFFFFFD0, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R20
+	  55: INCEIPL       	$4
+
+	0xFE8B27C:  82A9FFD4  lwz r21,-44(r9)
+	  56: GETL       	R9, t44
+	  57: ADDL       	$0xFFFFFFD4, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R21
+	  60: INCEIPL       	$4
+
+	0xFE8B280:  82C9FFD8  lwz r22,-40(r9)
+	  61: GETL       	R9, t48
+	  62: ADDL       	$0xFFFFFFD8, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R22
+	  65: INCEIPL       	$4
+
+	0xFE8B284:  82E9FFDC  lwz r23,-36(r9)
+	  66: GETL       	R9, t52
+	  67: ADDL       	$0xFFFFFFDC, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R23
+	  70: INCEIPL       	$4
+
+	0xFE8B288:  8309FFE0  lwz r24,-32(r9)
+	  71: GETL       	R9, t56
+	  72: ADDL       	$0xFFFFFFE0, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R24
+	  75: INCEIPL       	$4
+
+	0xFE8B28C:  8329FFE4  lwz r25,-28(r9)
+	  76: GETL       	R9, t60
+	  77: ADDL       	$0xFFFFFFE4, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R25
+	  80: INCEIPL       	$4
+
+	0xFE8B290:  8349FFE8  lwz r26,-24(r9)
+	  81: GETL       	R9, t64
+	  82: ADDL       	$0xFFFFFFE8, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R26
+	  85: INCEIPL       	$4
+
+	0xFE8B294:  8369FFEC  lwz r27,-20(r9)
+	  86: GETL       	R9, t68
+	  87: ADDL       	$0xFFFFFFEC, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R27
+	  90: INCEIPL       	$4
+
+	0xFE8B298:  8389FFF0  lwz r28,-16(r9)
+	  91: GETL       	R9, t72
+	  92: ADDL       	$0xFFFFFFF0, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R28
+	  95: INCEIPL       	$4
+
+	0xFE8B29C:  83A9FFF4  lwz r29,-12(r9)
+	  96: GETL       	R9, t76
+	  97: ADDL       	$0xFFFFFFF4, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R29
+	 100: INCEIPL       	$4
+
+	0xFE8B2A0:  83C9FFF8  lwz r30,-8(r9)
+	 101: GETL       	R9, t80
+	 102: ADDL       	$0xFFFFFFF8, t80
+	 103: LDL       	(t80), t82
+	 104: PUTL       	t82, R30
+	 105: INCEIPL       	$4
+
+	0xFE8B2A4:  83E9FFFC  lwz r31,-4(r9)
+	 106: GETL       	R9, t84
+	 107: ADDL       	$0xFFFFFFFC, t84
+	 108: LDL       	(t84), t86
+	 109: PUTL       	t86, R31
+	 110: INCEIPL       	$4
+
+	0xFE8B2A8:  7D214B78  or r1,r9,r9
+	 111: GETL       	R9, t88
+	 112: PUTL       	t88, R1
+	 113: INCEIPL       	$4
+
+	0xFE8B2AC:  4E800020  blr
+	 114: GETL       	LR, t90
+	 115: JMPo-r       	t90  ($4)
+
+
+
+. 2903 FE8B24C 100
+. 81 21 00 00 81 E9 00 04 81 89 FF B4 7D E8 03 A6 81 C9 FF B8 81 E9 FF BC 7D 81 81 20 82 09 FF C0 82 29 FF C4 82 49 FF C8 82 69 FF CC 82 89 FF D0 82 A9 FF D4 82 C9 FF D8 82 E9 FF DC 83 09 FF E0 83 29 FF E4 83 49 FF E8 83 69 FF EC 83 89 FF F0 83 A9 FF F4 83 C9 FF F8 83 E9 FF FC 7D 21 4B 78 4E 80 00 20
+==== BB 2904 (0xFE8A4A0) approx BBs exec'd 0 ====
+
+	0xFE8A4A0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFE8A4A4:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFE8A4A8:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFE8A4AC:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2904 FE8A4A0 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 2905 (0x10001C98) approx BBs exec'd 0 ====
+
+	0x10001C98:  38610090  addi r3,r1,144
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x90, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x10001C9C:  48003541  bl 0x100051DC
+	   4: MOVL       	$0x10001CA0, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x100051DC  ($4)
+
+
+
+. 2905 10001C98 8
+. 38 61 00 90 48 00 35 41
+==== BB 2906 (0x100051DC) approx BBs exec'd 0 ====
+
+	0x100051DC:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100051E0:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x100051E4:  7C641B78  or r4,r3,r3
+	   9: GETL       	R3, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x100051E8:  93E10018  stw r31,24(r1)
+	  12: GETL       	R31, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x100051EC:  7C7F1B78  or r31,r3,r3
+	  17: GETL       	R3, t12
+	  18: PUTL       	t12, R31
+	  19: INCEIPL       	$4
+
+	0x100051F0:  38600000  li r3,0
+	  20: MOVL       	$0x0, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0x100051F4:  90010024  stw r0,36(r1)
+	  23: GETL       	R0, t16
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x24, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x100051F8:  48015ADD  bl 0x1001ACD4
+	  28: MOVL       	$0x100051FC, t20
+	  29: PUTL       	t20, LR
+	  30: JMPo-c       	$0x1001ACD4  ($4)
+
+
+
+. 2906 100051DC 32
+. 7C 08 02 A6 94 21 FF E0 7C 64 1B 78 93 E1 00 18 7C 7F 1B 78 38 60 00 00 90 01 00 24 48 01 5A DD
+==== BB 2907 (0x1001ACD4) approx BBs exec'd 0 ====
+
+	0x1001ACD4:  396000D0  li r11,208
+	   0: MOVL       	$0xD0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ACD8:  4BFFFE34  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2907 1001ACD4 8
+. 39 60 00 D0 4B FF FE 34
+==== BB 2908 clock_gettime(0xEE84410) approx BBs exec'd 0 ====
+
+	0xEE84410:  2B830001  cmpli cr7,r3,1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xEE84414:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xEE84418:  9421FFD0  stwu r1,-48(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFD0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xEE8441C:  48013C29  bl 0xEE98044
+	  14: MOVL       	$0xEE84420, t12
+	  15: PUTL       	t12, LR
+	  16: JMPo-c       	$0xEE98044  ($4)
+
+
+
+. 2908 EE84410 16
+. 2B 83 00 01 7C 08 02 A6 94 21 FF D0 48 01 3C 29
+==== BB 2909 (0xEE84420) approx BBs exec'd 0 ====
+
+	0xEE84420:  93410018  stw r26,24(r1)
+	   0: GETL       	R26, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xEE84424:  7C9A2378  or r26,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xEE84428:  93810020  stw r28,32(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xEE8442C:  3B80FFFF  li r28,-1
+	  13: MOVL       	$0xFFFFFFFF, t10
+	  14: PUTL       	t10, R28
+	  15: INCEIPL       	$4
+
+	0xEE84430:  93C10028  stw r30,40(r1)
+	  16: GETL       	R30, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x28, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xEE84434:  7FC802A6  mflr r30
+	  21: GETL       	LR, t16
+	  22: PUTL       	t16, R30
+	  23: INCEIPL       	$4
+
+	0xEE84438:  93E1002C  stw r31,44(r1)
+	  24: GETL       	R31, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x2C, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xEE8443C:  7C7F1B78  or r31,r3,r3
+	  29: GETL       	R3, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0xEE84440:  9361001C  stw r27,28(r1)
+	  32: GETL       	R27, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x1C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xEE84444:  93A10024  stw r29,36(r1)
+	  37: GETL       	R29, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x24, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xEE84448:  90010034  stw r0,52(r1)
+	  42: GETL       	R0, t32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x34, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0xEE8444C:  419D005C  bc 12,29,0xEE844A8
+	  47: Js29o       	$0xEE844A8
+
+
+
+. 2909 EE84420 48
+. 93 41 00 18 7C 9A 23 78 93 81 00 20 3B 80 FF FF 93 C1 00 28 7F C8 02 A6 93 E1 00 2C 7C 7F 1B 78 93 61 00 1C 93 A1 00 24 90 01 00 34 41 9D 00 5C
+==== BB 2910 (0xEE84450) approx BBs exec'd 0 ====
+
+	0xEE84450:  837E00DC  lwz r27,220(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xDC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xEE84454:  3BA00016  li r29,22
+	   5: MOVL       	$0x16, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xEE84458:  80BB0000  lwz r5,0(r27)
+	   8: GETL       	R27, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0xEE8445C:  2C050000  cmpi cr0,r5,0
+	  12: GETL       	R5, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xEE84460:  40820098  bc 4,2,0xEE844F8
+	  16: Jc02o       	$0xEE844F8
+
+
+
+. 2910 EE84450 20
+. 83 7E 00 DC 3B A0 00 16 80 BB 00 00 2C 05 00 00 40 82 00 98
+==== BB 2911 (0xEE84464) approx BBs exec'd 0 ====
+
+	0xEE84464:  380000F6  li r0,246
+	   0: MOVL       	$0xF6, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xEE84468:  44000002  sc
+	   3: JMPo-sys       	$0xEE8446C  ($4)
+
+
+
+. 2911 EE84464 8
+. 38 00 00 F6 44 00 00 02
+==== BB 2912 (0xEE8446C) approx BBs exec'd 0 ====
+
+	0xEE8446C:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xEE84470:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xEE84474:  4082006C  bc 4,2,0xEE844E0
+	   9: Jc02o       	$0xEE844E0
+
+
+
+. 2912 EE8446C 12
+. 7C 00 00 26 74 09 10 00 40 82 00 6C
+==== BB 2913 (0xEE84478) approx BBs exec'd 0 ====
+
+	0xEE84478:  81610034  lwz r11,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xEE8447C:  3B800000  li r28,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xEE84480:  7F83E378  or r3,r28,r28
+	   8: GETL       	R28, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xEE84484:  83410018  lwz r26,24(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x18, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R26
+	  15: INCEIPL       	$4
+
+	0xEE84488:  8361001C  lwz r27,28(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x1C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0xEE8448C:  7D6803A6  mtlr r11
+	  21: GETL       	R11, t16
+	  22: PUTL       	t16, LR
+	  23: INCEIPL       	$4
+
+	0xEE84490:  83810020  lwz r28,32(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x20, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R28
+	  28: INCEIPL       	$4
+
+	0xEE84494:  83A10024  lwz r29,36(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x24, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R29
+	  33: INCEIPL       	$4
+
+	0xEE84498:  83C10028  lwz r30,40(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x28, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R30
+	  38: INCEIPL       	$4
+
+	0xEE8449C:  83E1002C  lwz r31,44(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x2C, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R31
+	  43: INCEIPL       	$4
+
+	0xEE844A0:  38210030  addi r1,r1,48
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x30, t34
+	  46: PUTL       	t34, R1
+	  47: INCEIPL       	$4
+
+	0xEE844A4:  4E800020  blr
+	  48: GETL       	LR, t36
+	  49: JMPo-r       	t36  ($4)
+
+
+
+. 2913 EE84478 48
+. 81 61 00 34 3B 80 00 00 7F 83 E3 78 83 41 00 18 83 61 00 1C 7D 68 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 2914 (0x100051FC) approx BBs exec'd 0 ====
+
+	0x100051FC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10005200:  38800000  li r4,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0x10005204:  38610008  addi r3,r1,8
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x10005208:  39600000  li r11,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0x1000520C:  409E001C  bc 4,30,0x10005228
+	  14: Jc30o       	$0x10005228
+
+
+
+. 2914 100051FC 20
+. 2F 83 00 00 38 80 00 00 38 61 00 08 39 60 00 00 40 9E 00 1C
+==== BB 2915 (0x10005210) approx BBs exec'd 0 ====
+
+	0x10005210:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10005214:  7D635B78  or r3,r11,r11
+	   5: GETL       	R11, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x10005218:  83E10018  lwz r31,24(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R31
+	  12: INCEIPL       	$4
+
+	0x1000521C:  38210020  addi r1,r1,32
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x20, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0x10005220:  7C0803A6  mtlr r0
+	  17: GETL       	R0, t12
+	  18: PUTL       	t12, LR
+	  19: INCEIPL       	$4
+
+	0x10005224:  4E800020  blr
+	  20: GETL       	LR, t14
+	  21: JMPo-r       	t14  ($4)
+
+
+
+. 2915 10005210 24
+. 80 01 00 24 7D 63 5B 78 83 E1 00 18 38 21 00 20 7C 08 03 A6 4E 80 00 20
+==== BB 2916 (0x10001CA0) approx BBs exec'd 0 ====
+
+	0x10001CA0:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10001CA4:  409E001C  bc 4,30,0x10001CC0
+	   4: Jc30o       	$0x10001CC0
+
+
+
+. 2916 10001CA0 8
+. 2F 83 00 00 40 9E 00 1C
+==== BB 2917 (0x10001CA8) approx BBs exec'd 0 ====
+
+	0x10001CA8:  40920044  bc 4,18,0x10001CEC
+	   0: Jc18o       	$0x10001CEC
+
+
+
+. 2917 10001CA8 4
+. 40 92 00 44
+==== BB 2918 (0x10001CEC) approx BBs exec'd 0 ====
+
+	0x10001CEC:  3BE00000  li r31,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x10001CF0:  4BFFFEFC  b 0x10001BEC
+	   3: JMPo       	$0x10001BEC  ($4)
+
+
+
+. 2918 10001CEC 8
+. 3B E0 00 00 4B FF FE FC
+==== BB 2919 (0x10001BEC) approx BBs exec'd 0 ====
+
+	0x10001BEC:  408E0028  bc 4,14,0x10001C14
+	   0: Jc14o       	$0x10001C14
+
+
+
+. 2919 10001BEC 4
+. 40 8E 00 28
+==== BB 2920 (0x10001BF0) approx BBs exec'd 0 ====
+
+	0x10001BF0:  C8010090  lfd f0,144(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x90, t0
+	   2: FPU_RQ       	(t0), 0x0:0x0
+	   3: INCEIPL       	$4
+
+	0x10001BF4:  7FE3FB78  or r3,r31,r31
+	   4: GETL       	R31, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x10001BF8:  38810080  addi r4,r1,128
+	   7: GETL       	R1, t4
+	   8: ADDL       	$0x80, t4
+	   9: PUTL       	t4, R4
+	  10: INCEIPL       	$4
+
+	0x10001BFC:  D8010080  stfd f0,128(r1)
+	  11: GETL       	R1, t6
+	  12: ADDL       	$0x80, t6
+	  13: FPU_WQ       	0x0:0x0, (t6)
+	  14: INCEIPL       	$4
+
+	0x10001C00:  4BFFF751  bl 0x10001350
+	  15: MOVL       	$0x10001C04, t8
+	  16: PUTL       	t8, LR
+	  17: JMPo-c       	$0x10001350  ($4)
+
+
+
+. 2920 10001BF0 20
+. C8 01 00 90 7F E3 FB 78 38 81 00 80 D8 01 00 80 4B FF F7 51
+==== BB 2921 (0x10001350) approx BBs exec'd 0 ====
+
+	0x10001350:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10001354:  9421FFA0  stwu r1,-96(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFA0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x10001358:  9361004C  stw r27,76(r1)
+	   9: GETL       	R27, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x4C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x1000135C:  7C7B1B79  or. r27,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R27
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x10001360:  93A10054  stw r29,84(r1)
+	  19: GETL       	R29, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x54, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x10001364:  48019549  bl 0x1001A8AC
+	  24: MOVL       	$0x10001368, t18
+	  25: PUTL       	t18, LR
+	  26: JMPo-c       	$0x1001A8AC  ($4)
+
+
+
+. 2921 10001350 24
+. 7C 08 02 A6 94 21 FF A0 93 61 00 4C 7C 7B 1B 79 93 A1 00 54 48 01 95 49
+==== BB 2922 (0x10001368) approx BBs exec'd 0 ====
+
+	0x10001368:  90010064  stw r0,100(r1)
+	   0: GETL       	R0, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x64, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x1000136C:  3BA00000  li r29,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x10001370:  C8040000  lfd f0,0(r4)
+	   8: GETL       	R4, t6
+	   9: FPU_RQ       	(t6), 0x0:0x0
+	  10: INCEIPL       	$4
+
+	0x10001374:  93C10058  stw r30,88(r1)
+	  11: GETL       	R30, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x58, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x10001378:  7FC802A6  mflr r30
+	  16: GETL       	LR, t12
+	  17: PUTL       	t12, R30
+	  18: INCEIPL       	$4
+
+	0x1000137C:  93010040  stw r24,64(r1)
+	  19: GETL       	R24, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x40, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x10001380:  93210044  stw r25,68(r1)
+	  24: GETL       	R25, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x44, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x10001384:  93410048  stw r26,72(r1)
+	  29: GETL       	R26, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x48, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0x10001388:  93810050  stw r28,80(r1)
+	  34: GETL       	R28, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x50, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x1000138C:  93E1005C  stw r31,92(r1)
+	  39: GETL       	R31, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x5C, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x10001390:  D8010030  stfd f0,48(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x30, t34
+	  46: FPU_WQ       	0x0:0x0, (t34)
+	  47: INCEIPL       	$4
+
+	0x10001394:  93A10038  stw r29,56(r1)
+	  48: GETL       	R29, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x38, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x10001398:  41820118  bc 12,2,0x100014B0
+	  53: Js02o       	$0x100014B0
+
+
+
+. 2922 10001368 52
+. 90 01 00 64 3B A0 00 00 C8 04 00 00 93 C1 00 58 7F C8 02 A6 93 01 00 40 93 21 00 44 93 41 00 48 93 81 00 50 93 E1 00 5C D8 01 00 30 93 A1 00 38 41 82 01 18
+==== BB 2923 (0x100014B0) approx BBs exec'd 0 ====
+
+	0x100014B0:  83FE000C  lwz r31,12(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x100014B4:  837E0078  lwz r27,120(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x78, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0x100014B8:  801F0000  lwz r0,0(r31)
+	  10: GETL       	R31, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0x100014BC:  2F800000  cmpi cr7,r0,0
+	  14: GETL       	R0, t12
+	  15: CMP0L       	t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0x100014C0:  40BEFEE8  bc 5,30,0x100013A8
+	  18: Jc30o       	$0x100013A8
+
+
+
+. 2923 100014B0 20
+. 83 FE 00 0C 83 7E 00 78 80 1F 00 00 2F 80 00 00 40 BE FE E8
+==== BB 2924 (0x100014C4) approx BBs exec'd 0 ====
+
+	0x100014C4:  813E0018  lwz r9,24(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x100014C8:  81290000  lwz r9,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x100014CC:  2F890000  cmpi cr7,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x100014D0:  419E0068  bc 12,30,0x10001538
+	  13: Js30o       	$0x10001538
+
+
+
+. 2924 100014C4 16
+. 81 3E 00 18 81 29 00 00 2F 89 00 00 41 9E 00 68
+==== BB 2925 (0x10001538) approx BBs exec'd 0 ====
+
+	0x10001538:  3C600002  lis r3,2
+	   0: MOVL       	$0x20000, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x1000153C:  6063006C  ori r3,r3,0x6C
+	   3: MOVL       	$0x2006C, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0x10001540:  480195F5  bl 0x1001AB34
+	   6: MOVL       	$0x10001544, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x1001AB34  ($4)
+
+
+
+. 2925 10001538 12
+. 3C 60 00 02 60 63 00 6C 48 01 95 F5
+==== BB 2926 (0x1001AB34) approx BBs exec'd 0 ====
+
+	0x1001AB34:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AB38:  4BFFFFD4  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2926 1001AB34 8
+. 39 60 00 00 4B FF FF D4
+==== BB 2927 nl_langinfo(0xFE88704) approx BBs exec'd 0 ====
+
+	0xFE88704:  7C688670  srawi r8,r3,16
+	   0: GETL       	R3, t0
+	   1: SARL       	$0x10, t0  (-wCa)
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0xFE88708:  7D8802A6  mflr r12
+	   4: GETL       	LR, t2
+	   5: PUTL       	t2, R12
+	   6: INCEIPL       	$4
+
+	0xFE8870C:  69090006  xori r9,r8,0x6
+	   7: GETL       	R8, t4
+	   8: XORL       	$0x6, t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0xFE88710:  20090000  subfic r0,r9,0
+	  11: GETL       	R9, t6
+	  12: MOVL       	$0x0, t8
+	  13: SBBL       	t6, t8  (-wCa)
+	  14: PUTL       	t8, R0
+	  15: INCEIPL       	$4
+
+	0xFE88714:  7D204914  adde r9,r0,r9
+	  16: GETL       	R0, t10
+	  17: GETL       	R9, t12
+	  18: ADCL       	t10, t12  (-rCa-wCa)
+	  19: PUTL       	t12, R9
+	  20: INCEIPL       	$4
+
+	0xFE88718:  55000FFE  rlwinm r0,r8,1,31,31
+	  21: GETL       	R8, t14
+	  22: SHRL       	$0x1F, t14
+	  23: PUTL       	t14, R0
+	  24: INCEIPL       	$4
+
+	0xFE8871C:  4811F735  bl 0xFFA7E50
+	  25: MOVL       	$0xFE88720, t16
+	  26: PUTL       	t16, LR
+	  27: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2927 FE88704 28
+. 7C 68 86 70 7D 88 02 A6 69 09 00 06 20 09 00 00 7D 20 49 14 55 00 0F FE 48 11 F7 35
+==== BB 2928 (0xFE88720) approx BBs exec'd 0 ====
+
+	0xFE88720:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE88724:  7C0B4B79  or. r11,r0,r9
+	   6: GETL       	R0, t4
+	   7: GETL       	R9, t6
+	   8: ORL       	t6, t4
+	   9: PUTL       	t4, R11
+	  10: CMP0L       	t4, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFE88728:  93C10008  stw r30,8(r1)
+	  13: GETL       	R30, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x8, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE8872C:  7FC802A6  mflr r30
+	  18: GETL       	LR, t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFE88730:  2F88000C  cmpi cr7,r8,12
+	  21: GETL       	R8, t16
+	  22: MOVL       	$0xC, t20
+	  23: CMPL       	t16, t20, t18  (-rSo)
+	  24: ICRFL       	t18, $0x7, CR
+	  25: INCEIPL       	$4
+
+	0xFE88734:  5463043E  rlwinm r3,r3,0,16,31
+	  26: GETL       	R3, t22
+	  27: ANDL       	$0xFFFF, t22
+	  28: PUTL       	t22, R3
+	  29: INCEIPL       	$4
+
+	0xFE88738:  7D8803A6  mtlr r12
+	  30: GETL       	R12, t24
+	  31: PUTL       	t24, LR
+	  32: INCEIPL       	$4
+
+	0xFE8873C:  80FE0140  lwz r7,320(r30)
+	  33: GETL       	R30, t26
+	  34: ADDL       	$0x140, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R7
+	  37: INCEIPL       	$4
+
+	0xFE88740:  40820014  bc 4,2,0xFE88754
+	  38: Jc02o       	$0xFE88754
+
+
+
+. 2928 FE88720 36
+. 94 21 FF F0 7C 0B 4B 79 93 C1 00 08 7F C8 02 A6 2F 88 00 0C 54 63 04 3E 7D 88 03 A6 80 FE 01 40 40 82 00 14
+==== BB 2929 (0xFE88744) approx BBs exec'd 0 ====
+
+	0xFE88744:  809E1D50  lwz r4,7504(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE88748:  5500103A  rlwinm r0,r8,2,0,29
+	   5: GETL       	R8, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE8874C:  7D641214  add r11,r4,r2
+	   9: GETL       	R4, t6
+	  10: GETL       	R2, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0xFE88750:  409D0014  bc 4,29,0xFE88764
+	  14: Jc29o       	$0xFE88764
+
+
+
+. 2929 FE88744 16
+. 80 9E 1D 50 55 00 10 3A 7D 64 12 14 40 9D 00 14
+==== BB 2930 (0xFE88764) approx BBs exec'd 0 ====
+
+	0xFE88764:  816B0000  lwz r11,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFE88768:  546A103A  rlwinm r10,r3,2,0,29
+	   4: GETL       	R3, t4
+	   5: SHLL       	$0x2, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFE8876C:  80FE0140  lwz r7,320(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x140, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFE88770:  7CCB002E  lwzx r6,r11,r0
+	  13: GETL       	R0, t10
+	  14: GETL       	R11, t12
+	  15: ADDL       	t12, t10
+	  16: LDL       	(t10), t14
+	  17: PUTL       	t14, R6
+	  18: INCEIPL       	$4
+
+	0xFE88774:  80A60020  lwz r5,32(r6)
+	  19: GETL       	R6, t16
+	  20: ADDL       	$0x20, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R5
+	  23: INCEIPL       	$4
+
+	0xFE88778:  7D2A3214  add r9,r10,r6
+	  24: GETL       	R10, t20
+	  25: GETL       	R6, t22
+	  26: ADDL       	t20, t22
+	  27: PUTL       	t22, R9
+	  28: INCEIPL       	$4
+
+	0xFE8877C:  7F851840  cmpl cr7,r5,r3
+	  29: GETL       	R5, t24
+	  30: GETL       	R3, t26
+	  31: CMPUL       	t24, t26, t28  (-rSo)
+	  32: ICRFL       	t28, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0xFE88780:  40BDFFD4  bc 5,29,0xFE88754
+	  34: Jc29o       	$0xFE88754
+
+
+
+. 2930 FE88764 32
+. 81 6B 00 00 54 6A 10 3A 80 FE 01 40 7C CB 00 2E 80 A6 00 20 7D 2A 32 14 7F 85 18 40 40 BD FF D4
+==== BB 2931 (0xFE88784) approx BBs exec'd 0 ====
+
+	0xFE88784:  80E90024  lwz r7,36(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFE88788:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFE8878C:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFE88790:  7CE33B78  or r3,r7,r7
+	  14: GETL       	R7, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFE88794:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 2931 FE88784 20
+. 80 E9 00 24 83 C1 00 08 38 21 00 10 7C E3 3B 78 4E 80 00 20
+==== BB 2932 (0x10001544) approx BBs exec'd 0 ====
+
+	0x10001544:  88030000  lbz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x10001548:  7C7B1B78  or r27,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R27
+	   6: INCEIPL       	$4
+
+	0x1000154C:  2F800000  cmpi cr7,r0,0
+	   7: GETL       	R0, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x10001550:  409EFE58  bc 4,30,0x100013A8
+	  11: Jc30o       	$0x100013A8
+
+
+
+. 2932 10001544 16
+. 88 03 00 00 7C 7B 1B 78 2F 80 00 00 40 9E FE 58
+==== BB 2933 (0x100013A8) approx BBs exec'd 0 ====
+
+	0x100013A8:  38610030  addi r3,r1,48
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x30, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x100013AC:  480197B9  bl 0x1001AB64
+	   4: MOVL       	$0x100013B0, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x1001AB64  ($4)
+
+
+
+. 2933 100013A8 8
+. 38 61 00 30 48 01 97 B9
+==== BB 2934 (0x1001AB64) approx BBs exec'd 0 ====
+
+	0x1001AB64:  39600018  li r11,24
+	   0: MOVL       	$0x18, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AB68:  4BFFFFA4  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 2934 1001AB64 8
+. 39 60 00 18 4B FF FF A4
+==== BB 2935 localtime(0xFEE5FB8) approx BBs exec'd 0 ====
+
+	0xFEE5FB8:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEE5FBC:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFEE5FC0:  480C1E91  bl 0xFFA7E50
+	   9: MOVL       	$0xFEE5FC4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2935 FEE5FB8 12
+. 94 21 FF F0 7C 88 02 A6 48 0C 1E 91
+==== BB 2936 (0xFEE5FC4) approx BBs exec'd 0 ====
+
+	0xFEE5FC4:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE5FC8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEE5FCC:  90810014  stw r4,20(r1)
+	   8: GETL       	R4, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE5FD0:  38800001  li r4,1
+	  13: MOVL       	$0x1, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFEE5FD4:  80BE1CC0  lwz r5,7360(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x1CC0, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0xFEE5FD8:  48001BA9  bl 0xFEE7B80
+	  21: MOVL       	$0xFEE5FDC, t16
+	  22: PUTL       	t16, LR
+	  23: JMPo-c       	$0xFEE7B80  ($4)
+
+
+
+. 2936 FEE5FC4 24
+. 93 C1 00 08 7F C8 02 A6 90 81 00 14 38 80 00 01 80 BE 1C C0 48 00 1B A9
+==== BB 2937 __tz_convert(0xFEE7B80) approx BBs exec'd 0 ====
+
+	0xFEE7B80:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEE7B84:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEE7B88:  9361001C  stw r27,28(r1)
+	   9: GETL       	R27, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEE7B8C:  7C7B1B79  or. r27,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R27
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFEE7B90:  480C02C1  bl 0xFFA7E50
+	  19: MOVL       	$0xFEE7B94, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2937 FEE7B80 20
+. 94 21 FF D0 7C 08 02 A6 93 61 00 1C 7C 7B 1B 79 48 0C 02 C1
+==== BB 2938 (0xFEE7B94) approx BBs exec'd 0 ====
+
+	0xFEE7B94:  93810020  stw r28,32(r1)
+	   0: GETL       	R28, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x20, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE7B98:  93C10028  stw r30,40(r1)
+	   5: GETL       	R30, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x28, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEE7B9C:  7C9C2378  or r28,r4,r4
+	  10: GETL       	R4, t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xFEE7BA0:  93E1002C  stw r31,44(r1)
+	  13: GETL       	R31, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x2C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEE7BA4:  7FC802A6  mflr r30
+	  18: GETL       	LR, t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFEE7BA8:  93410018  stw r26,24(r1)
+	  21: GETL       	R26, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x18, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEE7BAC:  7CBF2B78  or r31,r5,r5
+	  26: GETL       	R5, t20
+	  27: PUTL       	t20, R31
+	  28: INCEIPL       	$4
+
+	0xFEE7BB0:  93A10024  stw r29,36(r1)
+	  29: GETL       	R29, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x24, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFEE7BB4:  90010034  stw r0,52(r1)
+	  34: GETL       	R0, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x34, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFEE7BB8:  418201FC  bc 12,2,0xFEE7DB4
+	  39: Js02o       	$0xFEE7DB4
+
+
+
+. 2938 FEE7B94 40
+. 93 81 00 20 93 C1 00 28 7C 9C 23 78 93 E1 00 2C 7F C8 02 A6 93 41 00 18 7C BF 2B 78 93 A1 00 24 90 01 00 34 41 82 01 FC
+==== BB 2939 (0xFEE7BBC) approx BBs exec'd 0 ====
+
+	0xFEE7BBC:  83BE08A0  lwz r29,2208(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8A0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEE7BC0:  39600000  li r11,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0xFEE7BC4:  38600001  li r3,1
+	   8: MOVL       	$0x1, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEE7BC8:  7D20E828  lwarx r9,r0,r29
+	  11: GETL       	R29, t8
+	  12: LOCKo       	
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0xFEE7BCC:  7C095800  cmp cr0,r9,r11
+	  16: GETL       	R9, t12
+	  17: GETL       	R11, t14
+	  18: CMPL       	t12, t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0xFEE7BD0:  4082000C  bc 4,2,0xFEE7BDC
+	  21: Jc02o       	$0xFEE7BDC
+
+
+
+. 2939 FEE7BBC 24
+. 83 BE 08 A0 39 60 00 00 38 60 00 01 7D 20 E8 28 7C 09 58 00 40 82 00 0C
+==== BB 2940 (0xFEE7BD4) approx BBs exec'd 0 ====
+
+	0xFEE7BD4:  7C60E92D  stwcx. r3,r0,r29
+	   0: GETL       	R29, t0
+	   1: GETL       	R3, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEE7BD8:  40A2FFF0  bc 5,2,0xFEE7BC8
+	   6: Jc02o       	$0xFEE7BC8
+
+
+
+. 2940 FEE7BD4 8
+. 7C 60 E9 2D 40 A2 FF F0
+==== BB 2941 (0xFEE7BC8) approx BBs exec'd 0 ====
+
+	0xFEE7BC8:  7D20E828  lwarx r9,r0,r29
+	   0: GETL       	R29, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE7BCC:  7C095800  cmp cr0,r9,r11
+	   5: GETL       	R9, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEE7BD0:  4082000C  bc 4,2,0xFEE7BDC
+	  10: Jc02o       	$0xFEE7BDC
+
+
+
+. 2941 FEE7BC8 12
+. 7D 20 E8 28 7C 09 58 00 40 82 00 0C
+==== BB 2942 (0xFEE7BDC) approx BBs exec'd 0 ====
+
+	0xFEE7BDC:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFEE7BE0:  2F890000  cmpi cr7,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEE7BE4:  409E01C4  bc 4,30,0xFEE7DA8
+	   5: Jc30o       	$0xFEE7DA8
+
+
+
+. 2942 FEE7BDC 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 C4
+==== BB 2943 (0xFEE7BE8) approx BBs exec'd 0 ====
+
+	0xFEE7BE8:  80DE1CC0  lwz r6,7360(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CC0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEE7BEC:  38800000  li r4,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFEE7BF0:  835E1DC0  lwz r26,7616(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1DC0, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0xFEE7BF4:  7FE33278  xor r3,r31,r6
+	  13: GETL       	R31, t10
+	  14: GETL       	R6, t12
+	  15: XORL       	t10, t12
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0xFEE7BF8:  20030000  subfic r0,r3,0
+	  18: GETL       	R3, t14
+	  19: MOVL       	$0x0, t16
+	  20: SBBL       	t14, t16  (-wCa)
+	  21: PUTL       	t16, R0
+	  22: INCEIPL       	$4
+
+	0xFEE7BFC:  7C601914  adde r3,r0,r3
+	  23: GETL       	R0, t18
+	  24: GETL       	R3, t20
+	  25: ADCL       	t18, t20  (-rCa-wCa)
+	  26: PUTL       	t20, R3
+	  27: INCEIPL       	$4
+
+	0xFEE7C00:  4BFFF1D9  bl 0xFEE6DD8
+	  28: MOVL       	$0xFEE7C04, t22
+	  29: PUTL       	t22, LR
+	  30: JMPo-c       	$0xFEE6DD8  ($4)
+
+
+
+. 2943 FEE7BE8 28
+. 80 DE 1C C0 38 80 00 00 83 5E 1D C0 7F E3 32 78 20 03 00 00 7C 60 19 14 4B FF F1 D9
+==== BB 2944 tzset_internal(0xFEE6DD8) approx BBs exec'd 0 ====
+
+	0xFEE6DD8:  9421FFB0  stwu r1,-80(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFB0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEE6DDC:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFEE6DE0:  480C1071  bl 0xFFA7E50
+	   9: MOVL       	$0xFEE6DE4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2944 FEE6DD8 12
+. 94 21 FF B0 7C A8 02 A6 48 0C 10 71
+==== BB 2945 (0xFEE6DE4) approx BBs exec'd 0 ====
+
+	0xFEE6DE4:  93C10048  stw r30,72(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE6DE8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEE6DEC:  20030000  subfic r0,r3,0
+	   8: GETL       	R3, t6
+	   9: MOVL       	$0x0, t8
+	  10: SBBL       	t6, t8  (-wCa)
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0xFEE6DF0:  7C601914  adde r3,r0,r3
+	  13: GETL       	R0, t10
+	  14: GETL       	R3, t12
+	  15: ADCL       	t10, t12  (-rCa-wCa)
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0xFEE6DF4:  90A10054  stw r5,84(r1)
+	  18: GETL       	R5, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x54, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFEE6DF8:  7D800026  mfcr r12
+	  23: GETL       	CR, t18
+	  24: PUTL       	t18, R12
+	  25: INCEIPL       	$4
+
+	0xFEE6DFC:  93A10044  stw r29,68(r1)
+	  26: GETL       	R29, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x44, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFEE6E00:  7C9D2378  or r29,r4,r4
+	  31: GETL       	R4, t24
+	  32: PUTL       	t24, R29
+	  33: INCEIPL       	$4
+
+	0xFEE6E04:  817E0894  lwz r11,2196(r30)
+	  34: GETL       	R30, t26
+	  35: ADDL       	$0x894, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R11
+	  38: INCEIPL       	$4
+
+	0xFEE6E08:  93E1004C  stw r31,76(r1)
+	  39: GETL       	R31, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x4C, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFEE6E0C:  7C3F0B78  or r31,r1,r1
+	  44: GETL       	R1, t34
+	  45: PUTL       	t34, R31
+	  46: INCEIPL       	$4
+
+	0xFEE6E10:  812B0000  lwz r9,0(r11)
+	  47: GETL       	R11, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R9
+	  50: INCEIPL       	$4
+
+	0xFEE6E14:  92C10028  stw r22,40(r1)
+	  51: GETL       	R22, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x28, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFEE6E18:  3149FFFF  addic r10,r9,-1
+	  56: GETL       	R9, t44
+	  57: ADCL       	$0xFFFFFFFF, t44  (-wCa)
+	  58: PUTL       	t44, R10
+	  59: INCEIPL       	$4
+
+	0xFEE6E1C:  7C0A4910  subfe r0,r10,r9
+	  60: GETL       	R10, t46
+	  61: GETL       	R9, t48
+	  62: SBBL       	t46, t48  (-rCa-wCa)
+	  63: PUTL       	t48, R0
+	  64: INCEIPL       	$4
+
+	0xFEE6E20:  92E1002C  stw r23,44(r1)
+	  65: GETL       	R23, t50
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x2C, t52
+	  68: STL       	t50, (t52)
+	  69: INCEIPL       	$4
+
+	0xFEE6E24:  7C091839  and. r9,r0,r3
+	  70: GETL       	R0, t54
+	  71: GETL       	R3, t56
+	  72: ANDL       	t54, t56
+	  73: PUTL       	t56, R9
+	  74: CMP0L       	t56, t58  (-rSo)
+	  75: ICRFL       	t58, $0x0, CR
+	  76: INCEIPL       	$4
+
+	0xFEE6E28:  93010030  stw r24,48(r1)
+	  77: GETL       	R24, t60
+	  78: GETL       	R1, t62
+	  79: ADDL       	$0x30, t62
+	  80: STL       	t60, (t62)
+	  81: INCEIPL       	$4
+
+	0xFEE6E2C:  93210034  stw r25,52(r1)
+	  82: GETL       	R25, t64
+	  83: GETL       	R1, t66
+	  84: ADDL       	$0x34, t66
+	  85: STL       	t64, (t66)
+	  86: INCEIPL       	$4
+
+	0xFEE6E30:  93410038  stw r26,56(r1)
+	  87: GETL       	R26, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x38, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0xFEE6E34:  9361003C  stw r27,60(r1)
+	  92: GETL       	R27, t72
+	  93: GETL       	R1, t74
+	  94: ADDL       	$0x3C, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0xFEE6E38:  93810040  stw r28,64(r1)
+	  97: GETL       	R28, t76
+	  98: GETL       	R1, t78
+	  99: ADDL       	$0x40, t78
+	 100: STL       	t76, (t78)
+	 101: INCEIPL       	$4
+
+	0xFEE6E3C:  91810024  stw r12,36(r1)
+	 102: GETL       	R12, t80
+	 103: GETL       	R1, t82
+	 104: ADDL       	$0x24, t82
+	 105: STL       	t80, (t82)
+	 106: INCEIPL       	$4
+
+	0xFEE6E40:  41820048  bc 12,2,0xFEE6E88
+	 107: Js02o       	$0xFEE6E88
+
+
+
+. 2945 FEE6DE4 96
+. 93 C1 00 48 7F C8 02 A6 20 03 00 00 7C 60 19 14 90 A1 00 54 7D 80 00 26 93 A1 00 44 7C 9D 23 78 81 7E 08 94 93 E1 00 4C 7C 3F 0B 78 81 2B 00 00 92 C1 00 28 31 49 FF FF 7C 0A 49 10 92 E1 00 2C 7C 09 18 39 93 01 00 30 93 21 00 34 93 41 00 38 93 61 00 3C 93 81 00 40 91 81 00 24 41 82 00 48
+==== BB 2946 (0xFEE6E88) approx BBs exec'd 0 ====
+
+	0xFEE6E88:  38E00001  li r7,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0xFEE6E8C:  807E08A4  lwz r3,2212(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x8A4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE6E90:  90EB0000  stw r7,0(r11)
+	   8: GETL       	R7, t6
+	   9: GETL       	R11, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFEE6E94:  4BFAC305  bl 0xFE93198
+	  12: MOVL       	$0xFEE6E98, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0xFE93198  ($4)
+
+
+
+. 2946 FEE6E88 16
+. 38 E0 00 01 80 7E 08 A4 90 EB 00 00 4B FA C3 05
+==== BB 2947 (0xFEE6E98) approx BBs exec'd 0 ====
+
+	0xFEE6E98:  201D0000  subfic r0,r29,0
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x0, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEE6E9C:  7C80E914  adde r4,r0,r29
+	   5: GETL       	R0, t4
+	   6: GETL       	R29, t6
+	   7: ADCL       	t4, t6  (-rCa-wCa)
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFEE6EA0:  2F030000  cmpi cr6,r3,0
+	  10: GETL       	R3, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFEE6EA4:  7C7C1B78  or r28,r3,r3
+	  14: GETL       	R3, t12
+	  15: PUTL       	t12, R28
+	  16: INCEIPL       	$4
+
+	0xFEE6EA8:  7CC00026  mfcr r6
+	  17: GETL       	CR, t14
+	  18: PUTL       	t14, R6
+	  19: INCEIPL       	$4
+
+	0xFEE6EAC:  54C6DFFE  rlwinm r6,r6,27,31,31
+	  20: GETL       	R6, t16
+	  21: ROLL       	$0x1B, t16
+	  22: ANDL       	$0x1, t16
+	  23: PUTL       	t16, R6
+	  24: INCEIPL       	$4
+
+	0xFEE6EB0:  7CCA2039  and. r10,r6,r4
+	  25: GETL       	R6, t18
+	  26: GETL       	R4, t20
+	  27: ANDL       	t18, t20
+	  28: PUTL       	t20, R10
+	  29: CMP0L       	t20, t22  (-rSo)
+	  30: ICRFL       	t22, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0xFEE6EB4:  408201F4  bc 4,2,0xFEE70A8
+	  32: Jc02o       	$0xFEE70A8
+
+
+
+. 2947 FEE6E98 32
+. 20 1D 00 00 7C 80 E9 14 2F 03 00 00 7C 7C 1B 78 7C C0 00 26 54 C6 DF FE 7C CA 20 39 40 82 01 F4
+==== BB 2948 (0xFEE70A8) approx BBs exec'd 0 ====
+
+	0xFEE70A8:  839E08AC  lwz r28,2220(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8AC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEE70AC:  2F1C0000  cmpi cr6,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFEE70B0:  4BFFFE08  b 0xFEE6EB8
+	   9: JMPo       	$0xFEE6EB8  ($4)
+
+
+
+. 2948 FEE70A8 12
+. 83 9E 08 AC 2F 1C 00 00 4B FF FE 08
+==== BB 2949 (0xFEE6EB8) approx BBs exec'd 0 ====
+
+	0xFEE6EB8:  419A0030  bc 12,26,0xFEE6EE8
+	   0: Js26o       	$0xFEE6EE8
+
+
+
+. 2949 FEE6EB8 4
+. 41 9A 00 30
+==== BB 2950 (0xFEE6EBC) approx BBs exec'd 0 ====
+
+	0xFEE6EBC:  891C0000  lbz r8,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0xFEE6EC0:  2F880000  cmpi cr7,r8,0
+	   4: GETL       	R8, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFEE6EC4:  409E000C  bc 4,30,0xFEE6ED0
+	   8: Jc30o       	$0xFEE6ED0
+
+
+
+. 2950 FEE6EBC 12
+. 89 1C 00 00 2F 88 00 00 40 9E 00 0C
+==== BB 2951 (0xFEE6ED0) approx BBs exec'd 0 ====
+
+	0xFEE6ED0:  419A0018  bc 12,26,0xFEE6EE8
+	   0: Js26o       	$0xFEE6EE8
+
+
+
+. 2951 FEE6ED0 4
+. 41 9A 00 18
+==== BB 2952 (0xFEE6ED4) approx BBs exec'd 0 ====
+
+	0xFEE6ED4:  895C0000  lbz r10,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFEE6ED8:  694B003A  xori r11,r10,0x3A
+	   4: GETL       	R10, t4
+	   5: XORL       	$0x3A, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0xFEE6EDC:  216B0000  subfic r11,r11,0
+	   8: GETL       	R11, t6
+	   9: MOVL       	$0x0, t8
+	  10: SBBL       	t6, t8  (-wCa)
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0xFEE6EE0:  7D7C0194  addze r11,r28
+	  13: GETL       	R28, t10
+	  14: ADCL       	$0x0, t10  (-rCa-wCa)
+	  15: PUTL       	t10, R11
+	  16: INCEIPL       	$4
+
+	0xFEE6EE4:  7D7C5B78  or r28,r11,r11
+	  17: GETL       	R11, t12
+	  18: PUTL       	t12, R28
+	  19: INCEIPL       	$4
+
+	0xFEE6EE8:  835E0898  lwz r26,2200(r30)
+	  20: GETL       	R30, t14
+	  21: ADDL       	$0x898, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R26
+	  24: INCEIPL       	$4
+
+	0xFEE6EEC:  313CFFFF  addic r9,r28,-1
+	  25: GETL       	R28, t18
+	  26: ADCL       	$0xFFFFFFFF, t18  (-wCa)
+	  27: PUTL       	t18, R9
+	  28: INCEIPL       	$4
+
+	0xFEE6EF0:  7EC9E110  subfe r22,r9,r28
+	  29: GETL       	R9, t20
+	  30: GETL       	R28, t22
+	  31: SBBL       	t20, t22  (-rCa-wCa)
+	  32: PUTL       	t22, R22
+	  33: INCEIPL       	$4
+
+	0xFEE6EF4:  83BA0000  lwz r29,0(r26)
+	  34: GETL       	R26, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R29
+	  37: INCEIPL       	$4
+
+	0xFEE6EF8:  315DFFFF  addic r10,r29,-1
+	  38: GETL       	R29, t28
+	  39: ADCL       	$0xFFFFFFFF, t28  (-wCa)
+	  40: PUTL       	t28, R10
+	  41: INCEIPL       	$4
+
+	0xFEE6EFC:  7EEAE910  subfe r23,r10,r29
+	  42: GETL       	R10, t30
+	  43: GETL       	R29, t32
+	  44: SBBL       	t30, t32  (-rCa-wCa)
+	  45: PUTL       	t32, R23
+	  46: INCEIPL       	$4
+
+	0xFEE6F00:  7EEBB039  and. r11,r23,r22
+	  47: GETL       	R23, t34
+	  48: GETL       	R22, t36
+	  49: ANDL       	t34, t36
+	  50: PUTL       	t36, R11
+	  51: CMP0L       	t36, t38  (-rSo)
+	  52: ICRFL       	t38, $0x0, CR
+	  53: INCEIPL       	$4
+
+	0xFEE6F04:  41820018  bc 12,2,0xFEE6F1C
+	  54: Js02o       	$0xFEE6F1C
+
+
+
+. 2952 FEE6ED4 52
+. 89 5C 00 00 69 4B 00 3A 21 6B 00 00 7D 7C 01 94 7D 7C 5B 78 83 5E 08 98 31 3C FF FF 7E C9 E1 10 83 BA 00 00 31 5D FF FF 7E EA E9 10 7E EB B0 39 41 82 00 18
+==== BB 2953 (0xFEE6F1C) approx BBs exec'd 0 ====
+
+	0xFEE6F1C:  2E1C0000  cmpi cr4,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFEE6F20:  41920458  bc 12,18,0xFEE7378
+	   4: Js18o       	$0xFEE7378
+
+
+
+. 2953 FEE6F1C 8
+. 2E 1C 00 00 41 92 04 58
+==== BB 2954 (0xFEE6F24) approx BBs exec'd 0 ====
+
+	0xFEE6F24:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFEE6F28:  837E089C  lwz r27,2204(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x89C, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFEE6F2C:  3B000000  li r24,0
+	   9: MOVL       	$0x0, t8
+	  10: PUTL       	t8, R24
+	  11: INCEIPL       	$4
+
+	0xFEE6F30:  931B0020  stw r24,32(r27)
+	  12: GETL       	R24, t10
+	  13: GETL       	R27, t12
+	  14: ADDL       	$0x20, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0xFEE6F34:  931B0000  stw r24,0(r27)
+	  17: GETL       	R24, t14
+	  18: GETL       	R27, t16
+	  19: STL       	t14, (t16)
+	  20: INCEIPL       	$4
+
+	0xFEE6F38:  409A017C  bc 4,26,0xFEE70B4
+	  21: Jc26o       	$0xFEE70B4
+
+
+
+. 2954 FEE6F24 24
+. 2F 1D 00 00 83 7E 08 9C 3B 00 00 00 93 1B 00 20 93 1B 00 00 40 9A 01 7C
+==== BB 2955 (0xFEE6F3C) approx BBs exec'd 0 ====
+
+	0xFEE6F3C:  7F5DD378  or r29,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFEE6F40:  40920180  bc 4,18,0xFEE70C0
+	   3: Jc18o       	$0xFEE70C0
+
+
+
+. 2955 FEE6F3C 8
+. 7F 5D D3 78 40 92 01 80
+==== BB 2956 (0xFEE70C0) approx BBs exec'd 0 ====
+
+	0xFEE70C0:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEE70C4:  4BFEF6D9  bl 0xFED679C
+	   3: MOVL       	$0xFEE70C8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFED679C  ($4)
+
+
+
+. 2956 FEE70C0 8
+. 7F 83 E3 78 4B FE F6 D9
+==== BB 2957 (0xFEE70C8) approx BBs exec'd 0 ====
+
+	0xFEE70C8:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEE70CC:  4BFFFE7C  b 0xFEE6F48
+	   3: JMPo       	$0xFEE6F48  ($4)
+
+
+
+. 2957 FEE70C8 8
+. 7C 60 1B 78 4B FF FE 7C
+==== BB 2958 (0xFEE6F48) approx BBs exec'd 0 ====
+
+	0xFEE6F48:  833E1DC0  lwz r25,7616(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFEE6F4C:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE6F50:  901D0000  stw r0,0(r29)
+	   8: GETL       	R0, t6
+	   9: GETL       	R29, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFEE6F54:  38800000  li r4,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0xFEE6F58:  38A00000  li r5,0
+	  15: MOVL       	$0x0, t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0xFEE6F5C:  48000E8D  bl 0xFEE7DE8
+	  18: MOVL       	$0xFEE6F60, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0xFEE7DE8  ($4)
+
+
+
+. 2958 FEE6F48 24
+. 83 3E 1D C0 7F 83 E3 78 90 1D 00 00 38 80 00 00 38 A0 00 00 48 00 0E 8D
+==== BB 2959 __tzfile_read(0xFEE7DE8) approx BBs exec'd 0 ====
+
+	0xFEE7DE8:  9421FEF0  stwu r1,-272(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFEF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEE7DEC:  7CC802A6  mflr r6
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0xFEE7DF0:  480C0061  bl 0xFFA7E50
+	   9: MOVL       	$0xFEE7DF4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2959 FEE7DE8 12
+. 94 21 FE F0 7C C8 02 A6 48 0C 00 61
+==== BB 2960 (0xFEE7DF4) approx BBs exec'd 0 ====
+
+	0xFEE7DF4:  93C10108  stw r30,264(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x108, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE7DF8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEE7DFC:  936100FC  stw r27,252(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xFC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE7E00:  7C7B1B79  or. r27,r3,r3
+	  13: GETL       	R3, t10
+	  14: PUTL       	t10, R27
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFEE7E04:  928100E0  stw r20,224(r1)
+	  18: GETL       	R20, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0xE0, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFEE7E08:  90C10114  stw r6,276(r1)
+	  23: GETL       	R6, t18
+	  24: GETL       	R1, t20
+	  25: ADDL       	$0x114, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0xFEE7E0C:  38000000  li r0,0
+	  28: MOVL       	$0x0, t22
+	  29: PUTL       	t22, R0
+	  30: INCEIPL       	$4
+
+	0xFEE7E10:  829E1DC0  lwz r20,7616(r30)
+	  31: GETL       	R30, t24
+	  32: ADDL       	$0x1DC0, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R20
+	  35: INCEIPL       	$4
+
+	0xFEE7E14:  92C100E8  stw r22,232(r1)
+	  36: GETL       	R22, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0xE8, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFEE7E18:  7CB62B78  or r22,r5,r5
+	  41: GETL       	R5, t32
+	  42: PUTL       	t32, R22
+	  43: INCEIPL       	$4
+
+	0xFEE7E1C:  92E100EC  stw r23,236(r1)
+	  44: GETL       	R23, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0xEC, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFEE7E20:  7C972378  or r23,r4,r4
+	  49: GETL       	R4, t38
+	  50: PUTL       	t38, R23
+	  51: INCEIPL       	$4
+
+	0xFEE7E24:  930100F0  stw r24,240(r1)
+	  52: GETL       	R24, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0xF0, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFEE7E28:  93E1010C  stw r31,268(r1)
+	  57: GETL       	R31, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x10C, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0xFEE7E2C:  7C3F0B78  or r31,r1,r1
+	  62: GETL       	R1, t48
+	  63: PUTL       	t48, R31
+	  64: INCEIPL       	$4
+
+	0xFEE7E30:  83140000  lwz r24,0(r20)
+	  65: GETL       	R20, t50
+	  66: LDL       	(t50), t52
+	  67: PUTL       	t52, R24
+	  68: INCEIPL       	$4
+
+	0xFEE7E34:  91C100C8  stw r14,200(r1)
+	  69: GETL       	R14, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0xC8, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0xFEE7E38:  91E100CC  stw r15,204(r1)
+	  74: GETL       	R15, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0xCC, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0xFEE7E3C:  920100D0  stw r16,208(r1)
+	  79: GETL       	R16, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0xD0, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0xFEE7E40:  922100D4  stw r17,212(r1)
+	  84: GETL       	R17, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0xD4, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0xFEE7E44:  924100D8  stw r18,216(r1)
+	  89: GETL       	R18, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0xD8, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0xFEE7E48:  926100DC  stw r19,220(r1)
+	  94: GETL       	R19, t74
+	  95: GETL       	R1, t76
+	  96: ADDL       	$0xDC, t76
+	  97: STL       	t74, (t76)
+	  98: INCEIPL       	$4
+
+	0xFEE7E4C:  92A100E4  stw r21,228(r1)
+	  99: GETL       	R21, t78
+	 100: GETL       	R1, t80
+	 101: ADDL       	$0xE4, t80
+	 102: STL       	t78, (t80)
+	 103: INCEIPL       	$4
+
+	0xFEE7E50:  932100F4  stw r25,244(r1)
+	 104: GETL       	R25, t82
+	 105: GETL       	R1, t84
+	 106: ADDL       	$0xF4, t84
+	 107: STL       	t82, (t84)
+	 108: INCEIPL       	$4
+
+	0xFEE7E54:  934100F8  stw r26,248(r1)
+	 109: GETL       	R26, t86
+	 110: GETL       	R1, t88
+	 111: ADDL       	$0xF8, t88
+	 112: STL       	t86, (t88)
+	 113: INCEIPL       	$4
+
+	0xFEE7E58:  93810100  stw r28,256(r1)
+	 114: GETL       	R28, t90
+	 115: GETL       	R1, t92
+	 116: ADDL       	$0x100, t92
+	 117: STL       	t90, (t92)
+	 118: INCEIPL       	$4
+
+	0xFEE7E5C:  93A10104  stw r29,260(r1)
+	 119: GETL       	R29, t94
+	 120: GETL       	R1, t96
+	 121: ADDL       	$0x104, t96
+	 122: STL       	t94, (t96)
+	 123: INCEIPL       	$4
+
+	0xFEE7E60:  90140000  stw r0,0(r20)
+	 124: GETL       	R0, t98
+	 125: GETL       	R20, t100
+	 126: STL       	t98, (t100)
+	 127: INCEIPL       	$4
+
+	0xFEE7E64:  418200AC  bc 12,2,0xFEE7F10
+	 128: Js02o       	$0xFEE7F10
+
+
+
+. 2960 FEE7DF4 116
+. 93 C1 01 08 7F C8 02 A6 93 61 00 FC 7C 7B 1B 79 92 81 00 E0 90 C1 01 14 38 00 00 00 82 9E 1D C0 92 C1 00 E8 7C B6 2B 78 92 E1 00 EC 7C 97 23 78 93 01 00 F0 93 E1 01 0C 7C 3F 0B 78 83 14 00 00 91 C1 00 C8 91 E1 00 CC 92 01 00 D0 92 21 00 D4 92 41 00 D8 92 61 00 DC 92 A1 00 E4 93 21 00 F4 93 41 00 F8 93 81 01 00 93 A1 01 04 90 14 00 00 41 82 00 AC
+==== BB 2961 (0xFEE7E68) approx BBs exec'd 0 ====
+
+	0xFEE7E68:  8BBB0000  lbz r29,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0xFEE7E6C:  7FABEB79  or. r11,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R11
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFEE7E70:  418202FC  bc 12,2,0xFEE816C
+	   9: Js02o       	$0xFEE816C
+
+
+
+. 2961 FEE7E68 12
+. 8B BB 00 00 7F AB EB 79 41 82 02 FC
+==== BB 2962 (0xFEE7E74) approx BBs exec'd 0 ====
+
+	0xFEE7E74:  813E1D70  lwz r9,7536(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1D70, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE7E78:  80890000  lwz r4,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0xFEE7E7C:  2F840000  cmpi cr7,r4,0
+	   9: GETL       	R4, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFEE7E80:  419E0098  bc 12,30,0xFEE7F18
+	  13: Js30o       	$0xFEE7F18
+
+
+
+. 2962 FEE7E74 16
+. 81 3E 1D 70 80 89 00 00 2F 84 00 00 41 9E 00 98
+==== BB 2963 (0xFEE7F18) approx BBs exec'd 0 ====
+
+	0xFEE7F18:  2C9D002F  cmpi cr1,r29,47
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x2F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEE7F1C:  41860080  bc 12,6,0xFEE7F9C
+	   5: Js06o       	$0xFEE7F9C
+
+
+
+. 2963 FEE7F18 8
+. 2C 9D 00 2F 41 86 00 80
+==== BB 2964 (0xFEE7F9C) approx BBs exec'd 0 ====
+
+	0xFEE7F9C:  2F980000  cmpi cr7,r24,0
+	   0: GETL       	R24, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEE7FA0:  419E001C  bc 12,30,0xFEE7FBC
+	   4: Js30o       	$0xFEE7FBC
+
+
+
+. 2964 FEE7F9C 8
+. 2F 98 00 00 41 9E 00 1C
+==== BB 2965 (0xFEE7FBC) approx BBs exec'd 0 ====
+
+	0xFEE7FBC:  809E090C  lwz r4,2316(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x90C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEE7FC0:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE7FC4:  4BFD45D1  bl 0xFEBC594
+	   8: MOVL       	$0xFEE7FC8, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFEBC594  ($4)
+
+
+
+. 2965 FEE7FBC 12
+. 80 9E 09 0C 7F 63 DB 78 4B FD 45 D1
+==== BB 2966 (0xFEE7FC8) approx BBs exec'd 0 ====
+
+	0xFEE7FC8:  7C7B1B79  or. r27,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R27
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEE7FCC:  418201A0  bc 12,2,0xFEE816C
+	   5: Js02o       	$0xFEE816C
+
+
+
+. 2966 FEE7FC8 8
+. 7C 7B 1B 79 41 82 01 A0
+==== BB 2967 (0xFEE7FD0) approx BBs exec'd 0 ====
+
+	0xFEE7FD0:  4BFDD015  bl 0xFEC4FE4
+	   0: MOVL       	$0xFEE7FD4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFEC4FE4  ($4)
+
+
+
+. 2967 FEE7FD0 4
+. 4B FD D0 15
+==== BB 2968 fileno_unlocked(0xFEC4FE4) approx BBs exec'd 0 ====
+
+	0xFEC4FE4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEC4FE8:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFEC4FEC:  80030000  lwz r0,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0xFEC4FF0:  480E2E61  bl 0xFFA7E50
+	  13: MOVL       	$0xFEC4FF4, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2968 FEC4FE4 16
+. 94 21 FF F0 7D 88 02 A6 80 03 00 00 48 0E 2E 61
+==== BB 2969 (0xFEC4FF4) approx BBs exec'd 0 ====
+
+	0xFEC4FF4:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEC4FF8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEC4FFC:  70092000  andi. r9,r0,0x2000
+	   8: GETL       	R0, t6
+	   9: ANDL       	$0x2000, t6
+	  10: PUTL       	t6, R9
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0xFEC5000:  7D8803A6  mtlr r12
+	  14: GETL       	R12, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFEC5004:  813E1C4C  lwz r9,7244(r30)
+	  17: GETL       	R30, t12
+	  18: ADDL       	$0x1C4C, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R9
+	  21: INCEIPL       	$4
+
+	0xFEC5008:  7D291214  add r9,r9,r2
+	  22: GETL       	R9, t16
+	  23: GETL       	R2, t18
+	  24: ADDL       	t16, t18
+	  25: PUTL       	t18, R9
+	  26: INCEIPL       	$4
+
+	0xFEC500C:  41820020  bc 12,2,0xFEC502C
+	  27: Js02o       	$0xFEC502C
+
+
+
+. 2969 FEC4FF4 28
+. 93 C1 00 08 7F C8 02 A6 70 09 20 00 7D 88 03 A6 81 3E 1C 4C 7D 29 12 14 41 82 00 20
+==== BB 2970 (0xFEC5010) approx BBs exec'd 0 ====
+
+	0xFEC5010:  80030038  lwz r0,56(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEC5014:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEC5018:  419C0014  bc 12,28,0xFEC502C
+	   9: Js28o       	$0xFEC502C
+
+
+
+. 2970 FEC5010 12
+. 80 03 00 38 2F 80 00 00 41 9C 00 14
+==== BB 2971 (0xFEC501C) approx BBs exec'd 0 ====
+
+	0xFEC501C:  7C030378  or r3,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC5020:  83C10008  lwz r30,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEC5024:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFEC5028:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 2971 FEC501C 16
+. 7C 03 03 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 2972 (0xFEE7FD4) approx BBs exec'd 0 ====
+
+	0xFEE7FD4:  38BF0040  addi r5,r31,64
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x40, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0xFEE7FD8:  7C641B78  or r4,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0xFEE7FDC:  38600003  li r3,3
+	   7: MOVL       	$0x3, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0xFEE7FE0:  480392E1  bl 0xFF212C0
+	  10: MOVL       	$0xFEE7FE4, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFF212C0  ($4)
+
+
+
+. 2972 FEE7FD4 16
+. 38 BF 00 40 7C 64 1B 78 38 60 00 03 48 03 92 E1
+==== BB 2973 (0xFEE7FE4) approx BBs exec'd 0 ====
+
+	0xFEE7FE4:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEE7FE8:  408204E0  bc 4,2,0xFEE84C8
+	   5: Jc02o       	$0xFEE84C8
+
+
+
+. 2973 FEE7FE4 8
+. 7C 7D 1B 79 40 82 04 E0
+==== BB 2974 (0xFEE7FEC) approx BBs exec'd 0 ====
+
+	0xFEE7FEC:  827E08CC  lwz r19,2252(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8CC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0xFEE7FF0:  839E08D0  lwz r28,2256(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x8D0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFEE7FF4:  80730000  lwz r3,0(r19)
+	  10: GETL       	R19, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0xFEE7FF8:  480C2339  bl 0xFFAA330
+	  14: MOVL       	$0xFEE7FFC, t12
+	  15: PUTL       	t12, LR
+	  16: JMPo-c       	$0xFFAA330  ($4)
+
+
+
+. 2974 FEE7FEC 16
+. 82 7E 08 CC 83 9E 08 D0 80 73 00 00 48 0C 23 39
+==== BB 2975 (0xFEE7FFC) approx BBs exec'd 0 ====
+
+	0xFEE7FFC:  C81F0040  lfd f0,64(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x40, t0
+	   2: FPU_RQ       	(t0), 0x0:0x0
+	   3: INCEIPL       	$4
+
+	0xFEE8000:  C9BF0048  lfd f13,72(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x48, t2
+	   6: FPU_RQ       	(t2), 0x0:0xD
+	   7: INCEIPL       	$4
+
+	0xFEE8004:  38800002  li r4,2
+	   8: MOVL       	$0x2, t4
+	   9: PUTL       	t4, R4
+	  10: INCEIPL       	$4
+
+	0xFEE8008:  80BF0090  lwz r5,144(r31)
+	  11: GETL       	R31, t6
+	  12: ADDL       	$0x90, t6
+	  13: LDL       	(t6), t8
+	  14: PUTL       	t8, R5
+	  15: INCEIPL       	$4
+
+	0xFEE800C:  7F63DB78  or r3,r27,r27
+	  16: GETL       	R27, t10
+	  17: PUTL       	t10, R3
+	  18: INCEIPL       	$4
+
+	0xFEE8010:  813E08D4  lwz r9,2260(r30)
+	  19: GETL       	R30, t12
+	  20: ADDL       	$0x8D4, t12
+	  21: LDL       	(t12), t14
+	  22: PUTL       	t14, R9
+	  23: INCEIPL       	$4
+
+	0xFEE8014:  93B30000  stw r29,0(r19)
+	  24: GETL       	R29, t16
+	  25: GETL       	R19, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0xFEE8018:  83BE08D8  lwz r29,2264(r30)
+	  28: GETL       	R30, t20
+	  29: ADDL       	$0x8D8, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0xFEE801C:  90A90000  stw r5,0(r9)
+	  33: GETL       	R5, t24
+	  34: GETL       	R9, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFEE8020:  D81C0000  stfd f0,0(r28)
+	  37: GETL       	R28, t28
+	  38: FPU_WQ       	0x0:0x0, (t28)
+	  39: INCEIPL       	$4
+
+	0xFEE8024:  D9BD0000  stfd f13,0(r29)
+	  40: GETL       	R29, t30
+	  41: FPU_WQ       	0x0:0xD, (t30)
+	  42: INCEIPL       	$4
+
+	0xFEE8028:  4BFDF335  bl 0xFEC735C
+	  43: MOVL       	$0xFEE802C, t32
+	  44: PUTL       	t32, LR
+	  45: JMPo-c       	$0xFEC735C  ($4)
+
+
+
+. 2975 FEE7FFC 48
+. C8 1F 00 40 C9 BF 00 48 38 80 00 02 80 BF 00 90 7F 63 DB 78 81 3E 08 D4 93 B3 00 00 83 BE 08 D8 90 A9 00 00 D8 1C 00 00 D9 BD 00 00 4B FD F3 35
+==== BB 2976 (0xFEE802C) approx BBs exec'd 0 ====
+
+	0xFEE802C:  387F0010  addi r3,r31,16
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFEE8030:  3880002C  li r4,44
+	   4: MOVL       	$0x2C, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0xFEE8034:  38A00001  li r5,1
+	   7: MOVL       	$0x1, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0xFEE8038:  7F66DB78  or r6,r27,r27
+	  10: GETL       	R27, t6
+	  11: PUTL       	t6, R6
+	  12: INCEIPL       	$4
+
+	0xFEE803C:  4BFDFEA1  bl 0xFEC7EDC
+	  13: MOVL       	$0xFEE8040, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0xFEC7EDC  ($4)
+
+
+
+. 2976 FEE802C 20
+. 38 7F 00 10 38 80 00 2C 38 A0 00 01 7F 66 DB 78 4B FD FE A1
+==== BB 2977 fread_unlocked(0xFEC7EDC) approx BBs exec'd 0 ====
+
+	0xFEC7EDC:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEC7EE0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEC7EE4:  93810010  stw r28,16(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEC7EE8:  7F8429D7  mullw. r28,r4,r5
+	  14: GETL       	R4, t10
+	  15: GETL       	R5, t12
+	  16: MULL       	t10, t12
+	  17: PUTL       	t12, R28
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0xFEC7EEC:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEC7EF0:  7C9D2378  or r29,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFEC7EF4:  93E1001C  stw r31,28(r1)
+	  29: GETL       	R31, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFEC7EF8:  7C641B78  or r4,r3,r3
+	  34: GETL       	R3, t26
+	  35: PUTL       	t26, R4
+	  36: INCEIPL       	$4
+
+	0xFEC7EFC:  90010024  stw r0,36(r1)
+	  37: GETL       	R0, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x24, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFEC7F00:  7CBF2B78  or r31,r5,r5
+	  42: GETL       	R5, t32
+	  43: PUTL       	t32, R31
+	  44: INCEIPL       	$4
+
+	0xFEC7F04:  93C10018  stw r30,24(r1)
+	  45: GETL       	R30, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x18, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFEC7F08:  7CC33378  or r3,r6,r6
+	  50: GETL       	R6, t38
+	  51: PUTL       	t38, R3
+	  52: INCEIPL       	$4
+
+	0xFEC7F0C:  7F85E378  or r5,r28,r28
+	  53: GETL       	R28, t40
+	  54: PUTL       	t40, R5
+	  55: INCEIPL       	$4
+
+	0xFEC7F10:  38000000  li r0,0
+	  56: MOVL       	$0x0, t42
+	  57: PUTL       	t42, R0
+	  58: INCEIPL       	$4
+
+	0xFEC7F14:  40820028  bc 4,2,0xFEC7F3C
+	  59: Jc02o       	$0xFEC7F3C
+
+
+
+. 2977 FEC7EDC 60
+. 94 21 FF E0 7C 08 02 A6 93 81 00 10 7F 84 29 D7 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C 64 1B 78 90 01 00 24 7C BF 2B 78 93 C1 00 18 7C C3 33 78 7F 85 E3 78 38 00 00 00 40 82 00 28
+==== BB 2978 (0xFEC7F3C) approx BBs exec'd 0 ====
+
+	0xFEC7F3C:  48003F35  bl 0xFECBE70
+	   0: MOVL       	$0xFEC7F40, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFECBE70  ($4)
+
+
+
+. 2978 FEC7F3C 4
+. 48 00 3F 35
+==== BB 2979 _IO_sgetn_internal(0xFECBE70) approx BBs exec'd 0 ====
+
+	0xFECBE70:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFECBE74:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECBE78:  91810014  stw r12,20(r1)
+	   9: GETL       	R12, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECBE7C:  89030046  lbz r8,70(r3)
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0x46, t10
+	  16: LDB       	(t10), t12
+	  17: PUTL       	t12, R8
+	  18: INCEIPL       	$4
+
+	0xFECBE80:  7D070774  extsb r7,r8
+	  19: GETB       	R8, t14
+	  20: WIDENL_Bs       	_st14
+	  21: PUTL       	t14, R7
+	  22: INCEIPL       	$4
+
+	0xFECBE84:  7D271A14  add r9,r7,r3
+	  23: GETL       	R7, t16
+	  24: GETL       	R3, t18
+	  25: ADDL       	t16, t18
+	  26: PUTL       	t18, R9
+	  27: INCEIPL       	$4
+
+	0xFECBE88:  80C90098  lwz r6,152(r9)
+	  28: GETL       	R9, t20
+	  29: ADDL       	$0x98, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R6
+	  32: INCEIPL       	$4
+
+	0xFECBE8C:  81660020  lwz r11,32(r6)
+	  33: GETL       	R6, t24
+	  34: ADDL       	$0x20, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R11
+	  37: INCEIPL       	$4
+
+	0xFECBE90:  7D6903A6  mtctr r11
+	  38: GETL       	R11, t28
+	  39: PUTL       	t28, CTR
+	  40: INCEIPL       	$4
+
+	0xFECBE94:  4E800421  bctrl
+	  41: MOVL       	$0xFECBE98, t30
+	  42: PUTL       	t30, LR
+	  43: GETL       	CTR, t32
+	  44: JMPo-c       	t32  ($4)
+
+
+
+. 2979 FECBE70 40
+. 7D 88 02 A6 94 21 FF F0 91 81 00 14 89 03 00 46 7D 07 07 74 7D 27 1A 14 80 C9 00 98 81 66 00 20 7D 69 03 A6 4E 80 04 21
+==== BB 2980 _IO_file_xsgetn_internal(0xFECA96C) approx BBs exec'd 0 ====
+
+	0xFECA96C:  7CC802A6  mflr r6
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFECA970:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECA974:  93410008  stw r26,8(r1)
+	   9: GETL       	R26, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECA978:  7CBA2B78  or r26,r5,r5
+	  14: GETL       	R5, t10
+	  15: PUTL       	t10, R26
+	  16: INCEIPL       	$4
+
+	0xFECA97C:  9361000C  stw r27,12(r1)
+	  17: GETL       	R27, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFECA980:  7C9B2378  or r27,r4,r4
+	  22: GETL       	R4, t16
+	  23: PUTL       	t16, R27
+	  24: INCEIPL       	$4
+
+	0xFECA984:  90C10024  stw r6,36(r1)
+	  25: GETL       	R6, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x24, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFECA988:  8003001C  lwz r0,28(r3)
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x1C, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFECA98C:  93810010  stw r28,16(r1)
+	  35: GETL       	R28, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x10, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFECA990:  7CBC2B78  or r28,r5,r5
+	  40: GETL       	R5, t30
+	  41: PUTL       	t30, R28
+	  42: INCEIPL       	$4
+
+	0xFECA994:  2F800000  cmpi cr7,r0,0
+	  43: GETL       	R0, t32
+	  44: CMP0L       	t32, t34  (-rSo)
+	  45: ICRFL       	t34, $0x7, CR
+	  46: INCEIPL       	$4
+
+	0xFECA998:  93A10014  stw r29,20(r1)
+	  47: GETL       	R29, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x14, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFECA99C:  93C10018  stw r30,24(r1)
+	  52: GETL       	R30, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x18, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFECA9A0:  7C7D1B78  or r29,r3,r3
+	  57: GETL       	R3, t44
+	  58: PUTL       	t44, R29
+	  59: INCEIPL       	$4
+
+	0xFECA9A4:  93E1001C  stw r31,28(r1)
+	  60: GETL       	R31, t46
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x1C, t48
+	  63: STL       	t46, (t48)
+	  64: INCEIPL       	$4
+
+	0xFECA9A8:  409E0018  bc 4,30,0xFECA9C0
+	  65: Jc30o       	$0xFECA9C0
+
+
+
+. 2980 FECA96C 64
+. 7C C8 02 A6 94 21 FF E0 93 41 00 08 7C BA 2B 78 93 61 00 0C 7C 9B 23 78 90 C1 00 24 80 03 00 1C 93 81 00 10 7C BC 2B 78 2F 80 00 00 93 A1 00 14 93 C1 00 18 7C 7D 1B 78 93 E1 00 1C 40 9E 00 18
+==== BB 2981 (0xFECA9AC) approx BBs exec'd 0 ====
+
+	0xFECA9AC:  80630024  lwz r3,36(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECA9B0:  2C030000  cmpi cr0,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECA9B4:  40820188  bc 4,2,0xFECAB3C
+	   9: Jc02o       	$0xFECAB3C
+
+
+
+. 2981 FECA9AC 12
+. 80 63 00 24 2C 03 00 00 40 82 01 88
+==== BB 2982 (0xFECA9B8) approx BBs exec'd 0 ====
+
+	0xFECA9B8:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFECA9BC:  4800128D  bl 0xFECBC48
+	   3: MOVL       	$0xFECA9C0, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECBC48  ($4)
+
+
+
+. 2982 FECA9B8 8
+. 7F A3 EB 78 48 00 12 8D
+==== BB 2983 (0xFECA9C0) approx BBs exec'd 0 ====
+
+	0xFECA9C0:  2F9A0000  cmpi cr7,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFECA9C4:  419E00EC  bc 12,30,0xFECAAB0
+	   4: Js30o       	$0xFECAAB0
+
+
+
+. 2983 FECA9C0 8
+. 2F 9A 00 00 41 9E 00 EC
+==== BB 2984 (0xFECA9C8) approx BBs exec'd 0 ====
+
+	0xFECA9C8:  80BD0008  lwz r5,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECA9CC:  809D0004  lwz r4,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFECA9D0:  7FE42850  subf r31,r4,r5
+	  10: GETL       	R4, t8
+	  11: GETL       	R5, t10
+	  12: SUBL       	t8, t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFECA9D4:  7C9CF840  cmpl cr1,r28,r31
+	  15: GETL       	R28, t12
+	  16: GETL       	R31, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0xFECA9D8:  40850180  bc 4,5,0xFECAB58
+	  20: Jc05o       	$0xFECAB58
+
+
+
+. 2984 FECA9C8 20
+. 80 BD 00 08 80 9D 00 04 7F E4 28 50 7C 9C F8 40 40 85 01 80
+==== BB 2985 (0xFECA9DC) approx BBs exec'd 0 ====
+
+	0xFECA9DC:  2F1F0000  cmpi cr6,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFECA9E0:  409A010C  bc 4,26,0xFECAAEC
+	   4: Jc26o       	$0xFECAAEC
+
+
+
+. 2985 FECA9DC 8
+. 2F 1F 00 00 40 9A 01 0C
+==== BB 2986 (0xFECA9E4) approx BBs exec'd 0 ====
+
+	0xFECA9E4:  817D0000  lwz r11,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFECA9E8:  71690100  andi. r9,r11,0x100
+	   4: GETL       	R11, t4
+	   5: ANDL       	$0x100, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECA9EC:  408200F0  bc 4,2,0xFECAADC
+	  10: Jc02o       	$0xFECAADC
+
+
+
+. 2986 FECA9E4 12
+. 81 7D 00 00 71 69 01 00 40 82 00 F0
+==== BB 2987 (0xFECA9F0) approx BBs exec'd 0 ====
+
+	0xFECA9F0:  813D001C  lwz r9,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECA9F4:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFECA9F8:  419E0014  bc 12,30,0xFECAA0C
+	   9: Js30o       	$0xFECAA0C
+
+
+
+. 2987 FECA9F0 12
+. 81 3D 00 1C 2F 89 00 00 41 9E 00 14
+==== BB 2988 (0xFECA9FC) approx BBs exec'd 0 ====
+
+	0xFECA9FC:  83FD0020  lwz r31,32(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFECAA00:  7D89F850  subf r12,r9,r31
+	   5: GETL       	R9, t4
+	   6: GETL       	R31, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0xFECAA04:  7C8CE040  cmpl cr1,r12,r28
+	  10: GETL       	R12, t8
+	  11: GETL       	R28, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFECAA08:  4185011C  bc 12,5,0xFECAB24
+	  15: Js05o       	$0xFECAB24
+
+
+
+. 2988 FECA9FC 16
+. 83 FD 00 20 7D 89 F8 50 7C 8C E0 40 41 85 01 1C
+==== BB 2989 (0xFECAB24) approx BBs exec'd 0 ====
+
+	0xFECAB24:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFECAB28:  48000CCD  bl 0xFECB7F4
+	   3: MOVL       	$0xFECAB2C, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECB7F4  ($4)
+
+
+
+. 2989 FECAB24 8
+. 7F A3 EB 78 48 00 0C CD
+==== BB 2990 __GI___underflow(0xFECB7F4) approx BBs exec'd 0 ====
+
+	0xFECB7F4:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECB7F8:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFECB7FC:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFECB800:  480DC651  bl 0xFFA7E50
+	  14: MOVL       	$0xFECB804, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 2990 FECB7F4 16
+. 7C 88 02 A6 94 21 FF F0 93 C1 00 08 48 0D C6 51
+==== BB 2991 (0xFECB804) approx BBs exec'd 0 ====
+
+	0xFECB804:  93E1000C  stw r31,12(r1)
+	   0: GETL       	R31, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECB808:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECB80C:  90810014  stw r4,20(r1)
+	   8: GETL       	R4, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECB810:  7C7F1B78  or r31,r3,r3
+	  13: GETL       	R3, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0xFECB814:  88030046  lbz r0,70(r3)
+	  16: GETL       	R3, t12
+	  17: ADDL       	$0x46, t12
+	  18: LDB       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFECB818:  2F800000  cmpi cr7,r0,0
+	  21: GETL       	R0, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0xFECB81C:  409E0030  bc 4,30,0xFECB84C
+	  25: Jc30o       	$0xFECB84C
+
+
+
+. 2991 FECB804 28
+. 93 E1 00 0C 7F C8 02 A6 90 81 00 14 7C 7F 1B 78 88 03 00 46 2F 80 00 00 40 9E 00 30
+==== BB 2992 (0xFECB820) approx BBs exec'd 0 ====
+
+	0xFECB820:  80BE1DC8  lwz r5,7624(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECB824:  2C050000  cmpi cr0,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECB828:  41820168  bc 12,2,0xFECB990
+	   9: Js02o       	$0xFECB990
+
+
+
+. 2992 FECB820 12
+. 80 BE 1D C8 2C 05 00 00 41 82 01 68
+==== BB 2993 (0xFECB82C) approx BBs exec'd 0 ====
+
+	0xFECB82C:  80630060  lwz r3,96(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECB830:  2C830000  cmpi cr1,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFECB834:  4086000C  bc 4,6,0xFECB840
+	   9: Jc06o       	$0xFECB840
+
+
+
+. 2993 FECB82C 12
+. 80 63 00 60 2C 83 00 00 40 86 00 0C
+==== BB 2994 (0xFECB838) approx BBs exec'd 0 ====
+
+	0xFECB838:  3860FFFF  li r3,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFECB83C:  907F0060  stw r3,96(r31)
+	   3: GETL       	R3, t2
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x60, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFECB840:  2F03FFFF  cmpi cr6,r3,-1
+	   8: GETL       	R3, t6
+	   9: MOVL       	$0xFFFFFFFF, t10
+	  10: CMPL       	t6, t10, t8  (-rSo)
+	  11: ICRFL       	t8, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFECB844:  3860FFFF  li r3,-1
+	  13: MOVL       	$0xFFFFFFFF, t12
+	  14: PUTL       	t12, R3
+	  15: INCEIPL       	$4
+
+	0xFECB848:  409A00D0  bc 4,26,0xFECB918
+	  16: Jc26o       	$0xFECB918
+
+
+
+. 2994 FECB838 20
+. 38 60 FF FF 90 7F 00 60 2F 03 FF FF 38 60 FF FF 40 9A 00 D0
+==== BB 2995 (0xFECB84C) approx BBs exec'd 0 ====
+
+	0xFECB84C:  807F0060  lwz r3,96(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECB850:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFECB854:  419E007C  bc 12,30,0xFECB8D0
+	   9: Js30o       	$0xFECB8D0
+
+
+
+. 2995 FECB84C 12
+. 80 7F 00 60 2F 83 00 00 41 9E 00 7C
+==== BB 2996 (0xFECB858) approx BBs exec'd 0 ====
+
+	0xFECB858:  811F0000  lwz r8,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0xFECB85C:  71090800  andi. r9,r8,0x800
+	   4: GETL       	R8, t4
+	   5: ANDL       	$0x800, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECB860:  40820090  bc 4,2,0xFECB8F0
+	  10: Jc02o       	$0xFECB8F0
+
+
+
+. 2996 FECB858 12
+. 81 1F 00 00 71 09 08 00 40 82 00 90
+==== BB 2997 (0xFECB864) approx BBs exec'd 0 ====
+
+	0xFECB864:  807F0004  lwz r3,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECB868:  809F0008  lwz r4,8(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFECB86C:  7F032040  cmpl cr6,r3,r4
+	  10: GETL       	R3, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0xFECB870:  419800A4  bc 12,24,0xFECB914
+	  15: Js24o       	$0xFECB914
+
+
+
+. 2997 FECB864 16
+. 80 7F 00 04 80 9F 00 08 7F 03 20 40 41 98 00 A4
+==== BB 2998 (0xFECB874) approx BBs exec'd 0 ====
+
+	0xFECB874:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFECB878:  71490100  andi. r9,r10,0x100
+	   4: GETL       	R10, t4
+	   5: ANDL       	$0x100, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECB87C:  408200C0  bc 4,2,0xFECB93C
+	  10: Jc02o       	$0xFECB93C
+
+
+
+. 2998 FECB874 12
+. 81 5F 00 00 71 49 01 00 40 82 00 C0
+==== BB 2999 (0xFECB880) approx BBs exec'd 0 ====
+
+	0xFECB880:  813F0030  lwz r9,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECB884:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFECB888:  408600D0  bc 4,6,0xFECB958
+	   9: Jc06o       	$0xFECB958
+
+
+
+. 2999 FECB880 12
+. 81 3F 00 30 2C 89 00 00 40 86 00 D0
+==== BB 3000 (0xFECB88C) approx BBs exec'd 0 ====
+
+	0xFECB88C:  817F0024  lwz r11,36(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFECB890:  2C0B0000  cmpi cr0,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECB894:  4082009C  bc 4,2,0xFECB930
+	   9: Jc02o       	$0xFECB930
+
+
+
+. 3000 FECB88C 12
+. 81 7F 00 24 2C 0B 00 00 40 82 00 9C
+==== BB 3001 (0xFECB898) approx BBs exec'd 0 ====
+
+	0xFECB898:  88BF0046  lbz r5,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFECB89C:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFECB8A0:  7CA00774  extsb r0,r5
+	   8: GETB       	R5, t6
+	   9: WIDENL_Bs       	_st6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0xFECB8A4:  7C80FA14  add r4,r0,r31
+	  12: GETL       	R0, t8
+	  13: GETL       	R31, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0xFECB8A8:  83E40098  lwz r31,152(r4)
+	  17: GETL       	R4, t12
+	  18: ADDL       	$0x98, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R31
+	  21: INCEIPL       	$4
+
+	0xFECB8AC:  819F0010  lwz r12,16(r31)
+	  22: GETL       	R31, t16
+	  23: ADDL       	$0x10, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R12
+	  26: INCEIPL       	$4
+
+	0xFECB8B0:  7D8903A6  mtctr r12
+	  27: GETL       	R12, t20
+	  28: PUTL       	t20, CTR
+	  29: INCEIPL       	$4
+
+	0xFECB8B4:  4E800421  bctrl
+	  30: MOVL       	$0xFECB8B8, t22
+	  31: PUTL       	t22, LR
+	  32: GETL       	CTR, t24
+	  33: JMPo-c       	t24  ($4)
+
+
+
+. 3001 FECB898 32
+. 88 BF 00 46 7F E3 FB 78 7C A0 07 74 7C 80 FA 14 83 E4 00 98 81 9F 00 10 7D 89 03 A6 4E 80 04 21
+==== BB 3002 (0xFECB8B8) approx BBs exec'd 0 ====
+
+	0xFECB8B8:  80C10014  lwz r6,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFECB8BC:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFECB8C0:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFECB8C4:  7CC803A6  mtlr r6
+	  15: GETL       	R6, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFECB8C8:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFECB8CC:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 3002 FECB8B8 24
+. 80 C1 00 14 83 C1 00 08 83 E1 00 0C 7C C8 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3003 (0xFECAB2C) approx BBs exec'd 0 ====
+
+	0xFECAB2C:  2F03FFFF  cmpi cr6,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFECAB30:  41BAFF80  bc 13,26,0xFECAAB0
+	   5: Js26o       	$0xFECAAB0
+
+
+
+. 3003 FECAB2C 8
+. 2F 03 FF FF 41 BA FF 80
+==== BB 3004 (0xFECAB34) approx BBs exec'd 0 ====
+
+	0xFECAB34:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFECAB38:  4BFFFF74  b 0xFECAAAC
+	   4: JMPo       	$0xFECAAAC  ($4)
+
+
+
+. 3004 FECAB34 8
+. 2F 9C 00 00 4B FF FF 74
+==== BB 3005 (0xFECAAAC) approx BBs exec'd 0 ====
+
+	0xFECAAAC:  409EFF1C  bc 4,30,0xFECA9C8
+	   0: Jc30o       	$0xFECA9C8
+
+
+
+. 3005 FECAAAC 4
+. 40 9E FF 1C
+==== BB 3006 (0xFECAB58) approx BBs exec'd 0 ====
+
+	0xFECAB58:  7F85E378  or r5,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFECAB5C:  7F63DB78  or r3,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFECAB60:  4800DB79  bl 0xFED86D8
+	   6: MOVL       	$0xFECAB64, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 3006 FECAB58 12
+. 7F 85 E3 78 7F 63 DB 78 48 00 DB 79
+==== BB 3007 (0xFECAB64) approx BBs exec'd 0 ====
+
+	0xFECAB64:  811D0004  lwz r8,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFECAB68:  7CE8E214  add r7,r8,r28
+	   5: GETL       	R8, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0xFECAB6C:  3B800000  li r28,0
+	  10: MOVL       	$0x0, t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xFECAB70:  90FD0004  stw r7,4(r29)
+	  13: GETL       	R7, t10
+	  14: GETL       	R29, t12
+	  15: ADDL       	$0x4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECAB74:  4BFFFF3C  b 0xFECAAB0
+	  18: JMPo       	$0xFECAAB0  ($4)
+
+
+
+. 3007 FECAB64 20
+. 81 1D 00 04 7C E8 E2 14 3B 80 00 00 90 FD 00 04 4B FF FF 3C
+==== BB 3008 (0xFECAAB0) approx BBs exec'd 0 ====
+
+	0xFECAAB0:  7C7CD050  subf r3,r28,r26
+	   0: GETL       	R28, t0
+	   1: GETL       	R26, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECAAB4:  83410024  lwz r26,36(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0xFECAAB8:  8361000C  lwz r27,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0xFECAABC:  7F4803A6  mtlr r26
+	  15: GETL       	R26, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFECAAC0:  83810010  lwz r28,16(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R28
+	  22: INCEIPL       	$4
+
+	0xFECAAC4:  83410008  lwz r26,8(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x8, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R26
+	  27: INCEIPL       	$4
+
+	0xFECAAC8:  83A10014  lwz r29,20(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x14, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R29
+	  32: INCEIPL       	$4
+
+	0xFECAACC:  83C10018  lwz r30,24(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x18, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R30
+	  37: INCEIPL       	$4
+
+	0xFECAAD0:  83E1001C  lwz r31,28(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0xFECAAD4:  38210020  addi r1,r1,32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x20, t34
+	  45: PUTL       	t34, R1
+	  46: INCEIPL       	$4
+
+	0xFECAAD8:  4E800020  blr
+	  47: GETL       	LR, t36
+	  48: JMPo-r       	t36  ($4)
+
+
+
+. 3008 FECAAB0 44
+. 7C 7C D0 50 83 41 00 24 83 61 00 0C 7F 48 03 A6 83 81 00 10 83 41 00 08 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3009 (0xFECBE98) approx BBs exec'd 0 ====
+
+	0xFECBE98:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFECBE9C:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFECBEA0:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFECBEA4:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3009 FECBE98 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 3010 (0xFEC7F40) approx BBs exec'd 0 ====
+
+	0xFEC7F40:  7F9C1800  cmp cr7,r28,r3
+	   0: GETL       	R28, t0
+	   1: GETL       	R3, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEC7F44:  7C601B78  or r0,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFEC7F48:  7FE3FB78  or r3,r31,r31
+	   8: GETL       	R31, t8
+	   9: PUTL       	t8, R3
+	  10: INCEIPL       	$4
+
+	0xFEC7F4C:  419E0008  bc 12,30,0xFEC7F54
+	  11: Js30o       	$0xFEC7F54
+
+
+
+. 3010 FEC7F40 16
+. 7F 9C 18 00 7C 60 1B 78 7F E3 FB 78 41 9E 00 08
+==== BB 3011 (0xFEC7F54) approx BBs exec'd 0 ====
+
+	0xFEC7F54:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEC7F58:  7C601B78  or r0,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFEC7F5C:  7C030378  or r3,r0,r0
+	   8: GETL       	R0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEC7F60:  83810010  lwz r28,16(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x10, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R28
+	  15: INCEIPL       	$4
+
+	0xFEC7F64:  83A10014  lwz r29,20(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x14, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0xFEC7F68:  7C8803A6  mtlr r4
+	  21: GETL       	R4, t16
+	  22: PUTL       	t16, LR
+	  23: INCEIPL       	$4
+
+	0xFEC7F6C:  83C10018  lwz r30,24(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x18, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R30
+	  28: INCEIPL       	$4
+
+	0xFEC7F70:  83E1001C  lwz r31,28(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x1C, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R31
+	  33: INCEIPL       	$4
+
+	0xFEC7F74:  38210020  addi r1,r1,32
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x20, t26
+	  36: PUTL       	t26, R1
+	  37: INCEIPL       	$4
+
+	0xFEC7F78:  4E800020  blr
+	  38: GETL       	LR, t28
+	  39: JMPo-r       	t28  ($4)
+
+
+
+. 3011 FEC7F54 40
+. 80 81 00 24 7C 60 1B 78 7C 03 03 78 83 81 00 10 83 A1 00 14 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3012 (0xFEE8040) approx BBs exec'd 0 ====
+
+	0xFEE8040:  2F030001  cmpi cr6,r3,1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFEE8044:  409A011C  bc 4,26,0xFEE8160
+	   5: Jc26o       	$0xFEE8160
+
+
+
+. 3012 FEE8040 8
+. 2F 03 00 01 40 9A 01 1C
+==== BB 3013 (0xFEE8048) approx BBs exec'd 0 ====
+
+	0xFEE8048:  815F0030  lwz r10,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEE804C:  811F0034  lwz r8,52(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFEE8050:  1F0A0005  mulli r24,r10,5
+	  10: GETL       	R10, t8
+	  11: MULL       	$0x5, t8
+	  12: PUTL       	t8, R24
+	  13: INCEIPL       	$4
+
+	0xFEE8054:  82BF0038  lwz r21,56(r31)
+	  14: GETL       	R31, t10
+	  15: ADDL       	$0x38, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R21
+	  18: INCEIPL       	$4
+
+	0xFEE8058:  55111838  rlwinm r17,r8,3,0,28
+	  19: GETL       	R8, t14
+	  20: SHLL       	$0x3, t14
+	  21: PUTL       	t14, R17
+	  22: INCEIPL       	$4
+
+	0xFEE805C:  817F002C  lwz r11,44(r31)
+	  23: GETL       	R31, t16
+	  24: ADDL       	$0x2C, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R11
+	  27: INCEIPL       	$4
+
+	0xFEE8060:  3A580003  addi r18,r24,3
+	  28: GETL       	R24, t20
+	  29: ADDL       	$0x3, t20
+	  30: PUTL       	t20, R18
+	  31: INCEIPL       	$4
+
+	0xFEE8064:  7E11AA14  add r16,r17,r21
+	  32: GETL       	R17, t22
+	  33: GETL       	R21, t24
+	  34: ADDL       	t22, t24
+	  35: PUTL       	t24, R16
+	  36: INCEIPL       	$4
+
+	0xFEE8068:  565C003A  rlwinm r28,r18,0,0,29
+	  37: GETL       	R18, t26
+	  38: ANDL       	$0xFFFFFFFC, t26
+	  39: PUTL       	t26, R28
+	  40: INCEIPL       	$4
+
+	0xFEE806C:  831E08DC  lwz r24,2268(r30)
+	  41: GETL       	R30, t28
+	  42: ADDL       	$0x8DC, t28
+	  43: LDL       	(t28), t30
+	  44: PUTL       	t30, R24
+	  45: INCEIPL       	$4
+
+	0xFEE8070:  7DFC8214  add r15,r28,r16
+	  46: GETL       	R28, t32
+	  47: GETL       	R16, t34
+	  48: ADDL       	t32, t34
+	  49: PUTL       	t34, R15
+	  50: INCEIPL       	$4
+
+	0xFEE8074:  825E08E0  lwz r18,2272(r30)
+	  51: GETL       	R30, t36
+	  52: ADDL       	$0x8E0, t36
+	  53: LDL       	(t36), t38
+	  54: PUTL       	t38, R18
+	  55: INCEIPL       	$4
+
+	0xFEE8078:  821E08E4  lwz r16,2276(r30)
+	  56: GETL       	R30, t40
+	  57: ADDL       	$0x8E4, t40
+	  58: LDL       	(t40), t42
+	  59: PUTL       	t42, R16
+	  60: INCEIPL       	$4
+
+	0xFEE807C:  39CF0003  addi r14,r15,3
+	  61: GETL       	R15, t44
+	  62: ADDL       	$0x3, t44
+	  63: PUTL       	t44, R14
+	  64: INCEIPL       	$4
+
+	0xFEE8080:  91180000  stw r8,0(r24)
+	  65: GETL       	R8, t46
+	  66: GETL       	R24, t48
+	  67: STL       	t46, (t48)
+	  68: INCEIPL       	$4
+
+	0xFEE8084:  556C1838  rlwinm r12,r11,3,0,28
+	  69: GETL       	R11, t50
+	  70: SHLL       	$0x3, t50
+	  71: PUTL       	t50, R12
+	  72: INCEIPL       	$4
+
+	0xFEE8088:  91500000  stw r10,0(r16)
+	  73: GETL       	R10, t52
+	  74: GETL       	R16, t54
+	  75: STL       	t52, (t54)
+	  76: INCEIPL       	$4
+
+	0xFEE808C:  55DA003A  rlwinm r26,r14,0,0,29
+	  77: GETL       	R14, t56
+	  78: ANDL       	$0xFFFFFFFC, t56
+	  79: PUTL       	t56, R26
+	  80: INCEIPL       	$4
+
+	0xFEE8090:  91720000  stw r11,0(r18)
+	  81: GETL       	R11, t58
+	  82: GETL       	R18, t60
+	  83: STL       	t58, (t60)
+	  84: INCEIPL       	$4
+
+	0xFEE8094:  7C7A6214  add r3,r26,r12
+	  85: GETL       	R26, t62
+	  86: GETL       	R12, t64
+	  87: ADDL       	t62, t64
+	  88: PUTL       	t64, R3
+	  89: INCEIPL       	$4
+
+	0xFEE8098:  80FF0028  lwz r7,40(r31)
+	  90: GETL       	R31, t66
+	  91: ADDL       	$0x28, t66
+	  92: LDL       	(t66), t68
+	  93: PUTL       	t68, R7
+	  94: INCEIPL       	$4
+
+	0xFEE809C:  7C63BA14  add r3,r3,r23
+	  95: GETL       	R3, t70
+	  96: GETL       	R23, t72
+	  97: ADDL       	t70, t72
+	  98: PUTL       	t72, R3
+	  99: INCEIPL       	$4
+
+	0xFEE80A0:  809F0024  lwz r4,36(r31)
+	 100: GETL       	R31, t74
+	 101: ADDL       	$0x24, t74
+	 102: LDL       	(t74), t76
+	 103: PUTL       	t76, R4
+	 104: INCEIPL       	$4
+
+	0xFEE80A4:  90FF00B8  stw r7,184(r31)
+	 105: GETL       	R7, t78
+	 106: GETL       	R31, t80
+	 107: ADDL       	$0xB8, t80
+	 108: STL       	t78, (t80)
+	 109: INCEIPL       	$4
+
+	0xFEE80A8:  909F00BC  stw r4,188(r31)
+	 110: GETL       	R4, t82
+	 111: GETL       	R31, t84
+	 112: ADDL       	$0xBC, t84
+	 113: STL       	t82, (t84)
+	 114: INCEIPL       	$4
+
+	0xFEE80AC:  480C225D  bl 0xFFAA308
+	 115: MOVL       	$0xFEE80B0, t86
+	 116: PUTL       	t86, LR
+	 117: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 3013 FEE8048 104
+. 81 5F 00 30 81 1F 00 34 1F 0A 00 05 82 BF 00 38 55 11 18 38 81 7F 00 2C 3A 58 00 03 7E 11 AA 14 56 5C 00 3A 83 1E 08 DC 7D FC 82 14 82 5E 08 E0 82 1E 08 E4 39 CF 00 03 91 18 00 00 55 6C 18 38 91 50 00 00 55 DA 00 3A 91 72 00 00 7C 7A 62 14 80 FF 00 28 7C 63 BA 14 80 9F 00 24 90 FF 00 B8 90 9F 00 BC 48 0C 22 5D
+==== BB 3014 (0xFEE80B0) approx BBs exec'd 0 ====
+
+	0xFEE80B0:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEE80B4:  90730000  stw r3,0(r19)
+	   4: GETL       	R3, t4
+	   5: GETL       	R19, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFEE80B8:  419E00A8  bc 12,30,0xFEE8160
+	   8: Js30o       	$0xFEE8160
+
+
+
+. 3014 FEE80B0 12
+. 2F 83 00 00 90 73 00 00 41 9E 00 A8
+==== BB 3015 (0xFEE80BC) approx BBs exec'd 0 ====
+
+	0xFEE80BC:  2C970000  cmpi cr1,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEE80C0:  83B00000  lwz r29,0(r16)
+	   4: GETL       	R16, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R29
+	   7: INCEIPL       	$4
+
+	0xFEE80C4:  82F80000  lwz r23,0(r24)
+	   8: GETL       	R24, t8
+	   9: LDL       	(t8), t10
+	  10: PUTL       	t10, R23
+	  11: INCEIPL       	$4
+
+	0xFEE80C8:  7F23E214  add r25,r3,r28
+	  12: GETL       	R3, t12
+	  13: GETL       	R28, t14
+	  14: ADDL       	t12, t14
+	  15: PUTL       	t14, R25
+	  16: INCEIPL       	$4
+
+	0xFEE80CC:  81DE08E8  lwz r14,2280(r30)
+	  17: GETL       	R30, t16
+	  18: ADDL       	$0x8E8, t16
+	  19: LDL       	(t16), t18
+	  20: PUTL       	t18, R14
+	  21: INCEIPL       	$4
+
+	0xFEE80D0:  57A0103A  rlwinm r0,r29,2,0,29
+	  22: GETL       	R29, t20
+	  23: SHLL       	$0x2, t20
+	  24: PUTL       	t20, R0
+	  25: INCEIPL       	$4
+
+	0xFEE80D4:  56E91838  rlwinm r9,r23,3,0,28
+	  26: GETL       	R23, t22
+	  27: SHLL       	$0x3, t22
+	  28: PUTL       	t22, R9
+	  29: INCEIPL       	$4
+
+	0xFEE80D8:  81FE08EC  lwz r15,2284(r30)
+	  30: GETL       	R30, t24
+	  31: ADDL       	$0x8EC, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R15
+	  34: INCEIPL       	$4
+
+	0xFEE80DC:  82FE08F0  lwz r23,2288(r30)
+	  35: GETL       	R30, t28
+	  36: ADDL       	$0x8F0, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R23
+	  39: INCEIPL       	$4
+
+	0xFEE80E0:  7D43D214  add r10,r3,r26
+	  40: GETL       	R3, t32
+	  41: GETL       	R26, t34
+	  42: ADDL       	t32, t34
+	  43: PUTL       	t34, R10
+	  44: INCEIPL       	$4
+
+	0xFEE80E4:  823E08F4  lwz r17,2292(r30)
+	  45: GETL       	R30, t36
+	  46: ADDL       	$0x8F4, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R17
+	  49: INCEIPL       	$4
+
+	0xFEE80E8:  7CC9CA14  add r6,r9,r25
+	  50: GETL       	R9, t40
+	  51: GETL       	R25, t42
+	  52: ADDL       	t40, t42
+	  53: PUTL       	t42, R6
+	  54: INCEIPL       	$4
+
+	0xFEE80EC:  7F401A14  add r26,r0,r3
+	  55: GETL       	R0, t44
+	  56: GETL       	R3, t46
+	  57: ADDL       	t44, t46
+	  58: PUTL       	t46, R26
+	  59: INCEIPL       	$4
+
+	0xFEE80F0:  90CF0000  stw r6,0(r15)
+	  60: GETL       	R6, t48
+	  61: GETL       	R15, t50
+	  62: STL       	t48, (t50)
+	  63: INCEIPL       	$4
+
+	0xFEE80F4:  934E0000  stw r26,0(r14)
+	  64: GETL       	R26, t52
+	  65: GETL       	R14, t54
+	  66: STL       	t52, (t54)
+	  67: INCEIPL       	$4
+
+	0xFEE80F8:  93370000  stw r25,0(r23)
+	  68: GETL       	R25, t56
+	  69: GETL       	R23, t58
+	  70: STL       	t56, (t58)
+	  71: INCEIPL       	$4
+
+	0xFEE80FC:  91510000  stw r10,0(r17)
+	  72: GETL       	R10, t60
+	  73: GETL       	R17, t62
+	  74: STL       	t60, (t62)
+	  75: INCEIPL       	$4
+
+	0xFEE8100:  408600DC  bc 4,6,0xFEE81DC
+	  76: Jc06o       	$0xFEE81DC
+
+
+
+. 3015 FEE80BC 72
+. 2C 97 00 00 83 B0 00 00 82 F8 00 00 7F 23 E2 14 81 DE 08 E8 57 A0 10 3A 56 E9 18 38 81 FE 08 EC 82 FE 08 F0 7D 43 D2 14 82 3E 08 F4 7C C9 CA 14 7F 40 1A 14 90 CF 00 00 93 4E 00 00 93 37 00 00 91 51 00 00 40 86 00 DC
+==== BB 3016 (0xFEE8104) approx BBs exec'd 0 ====
+
+	0xFEE8104:  1EDD0005  mulli r22,r29,5
+	   0: GETL       	R29, t0
+	   1: MULL       	$0x5, t0
+	   2: PUTL       	t0, R22
+	   3: INCEIPL       	$4
+
+	0xFEE8108:  80F00000  lwz r7,0(r16)
+	   4: GETL       	R16, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0xFEE810C:  38800001  li r4,1
+	   8: MOVL       	$0x1, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFEE8110:  1CA70005  mulli r5,r7,5
+	  11: GETL       	R7, t8
+	  12: MULL       	$0x5, t8
+	  13: PUTL       	t8, R5
+	  14: INCEIPL       	$4
+
+	0xFEE8114:  80730000  lwz r3,0(r19)
+	  15: GETL       	R19, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0xFEE8118:  7F66DB78  or r6,r27,r27
+	  19: GETL       	R27, t14
+	  20: PUTL       	t14, R6
+	  21: INCEIPL       	$4
+
+	0xFEE811C:  4BFDFDC1  bl 0xFEC7EDC
+	  22: MOVL       	$0xFEE8120, t16
+	  23: PUTL       	t16, LR
+	  24: JMPo-c       	$0xFEC7EDC  ($4)
+
+
+
+. 3016 FEE8104 28
+. 1E DD 00 05 80 F0 00 00 38 80 00 01 1C A7 00 05 80 73 00 00 7F 66 DB 78 4B FD FD C1
+==== BB 3017 (0xFEE8120) approx BBs exec'd 0 ====
+
+	0xFEE8120:  7C03B000  cmp cr0,r3,r22
+	   0: GETL       	R3, t0
+	   1: GETL       	R22, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEE8124:  4082003C  bc 4,2,0xFEE8160
+	   5: Jc02o       	$0xFEE8160
+
+
+
+. 3017 FEE8120 8
+. 7C 03 B0 00 40 82 00 3C
+==== BB 3018 (0xFEE8128) approx BBs exec'd 0 ====
+
+	0xFEE8128:  80100000  lwz r0,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEE812C:  39200000  li r9,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0xFEE8130:  7F090040  cmpl cr6,r9,r0
+	   7: GETL       	R9, t6
+	   8: GETL       	R0, t8
+	   9: CMPUL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEE8134:  409800C8  bc 4,24,0xFEE81FC
+	  12: Jc24o       	$0xFEE81FC
+
+
+
+. 3018 FEE8128 16
+. 80 10 00 00 39 20 00 00 7F 09 00 40 40 98 00 C8
+==== BB 3019 (0xFEE8138) approx BBs exec'd 0 ====
+
+	0xFEE8138:  810E0000  lwz r8,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0xFEE813C:  7C0B0378  or r11,r0,r0
+	   4: GETL       	R0, t4
+	   5: PUTL       	t4, R11
+	   6: INCEIPL       	$4
+
+	0xFEE8140:  81580000  lwz r10,0(r24)
+	   7: GETL       	R24, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R10
+	  10: INCEIPL       	$4
+
+	0xFEE8144:  48000008  b 0xFEE814C
+	  11: JMPo       	$0xFEE814C  ($4)
+
+
+
+. 3019 FEE8138 16
+. 81 0E 00 00 7C 0B 03 78 81 58 00 00 48 00 00 08
+==== BB 3020 (0xFEE814C) approx BBs exec'd 0 ====
+
+	0xFEE814C:  7FA848AE  lbzx r29,r8,r9
+	   0: GETL       	R9, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t2, t0
+	   3: LDB       	(t0), t4
+	   4: PUTL       	t4, R29
+	   5: INCEIPL       	$4
+
+	0xFEE8150:  39290001  addi r9,r9,1
+	   6: GETL       	R9, t6
+	   7: ADDL       	$0x1, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFEE8154:  7F095840  cmpl cr6,r9,r11
+	  10: GETL       	R9, t8
+	  11: GETL       	R11, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0xFEE8158:  7F9D5040  cmpl cr7,r29,r10
+	  15: GETL       	R29, t14
+	  16: GETL       	R10, t16
+	  17: CMPUL       	t14, t16, t18  (-rSo)
+	  18: ICRFL       	t18, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0xFEE815C:  419CFFEC  bc 12,28,0xFEE8148
+	  20: Js28o       	$0xFEE8148
+
+
+
+. 3020 FEE814C 20
+. 7F A8 48 AE 39 29 00 01 7F 09 58 40 7F 9D 50 40 41 9C FF EC
+==== BB 3021 (0xFEE8148) approx BBs exec'd 0 ====
+
+	0xFEE8148:  409800B4  bc 4,24,0xFEE81FC
+	   0: Jc24o       	$0xFEE81FC
+
+
+
+. 3021 FEE8148 4
+. 40 98 00 B4
+==== BB 3022 (0xFEE81FC) approx BBs exec'd 0 ====
+
+	0xFEE81FC:  81780000  lwz r11,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFEE8200:  3B400000  li r26,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R26
+	   6: INCEIPL       	$4
+
+	0xFEE8204:  7C9A5840  cmpl cr1,r26,r11
+	   7: GETL       	R26, t6
+	   8: GETL       	R11, t8
+	   9: CMPUL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFEE8208:  408400D0  bc 4,4,0xFEE82D8
+	  12: Jc04o       	$0xFEE82D8
+
+
+
+. 3022 FEE81FC 16
+. 81 78 00 00 3B 40 00 00 7C 9A 58 40 40 84 00 D0
+==== BB 3023 (0xFEE820C) approx BBs exec'd 0 ====
+
+	0xFEE820C:  3B3F00B0  addi r25,r31,176
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xB0, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0xFEE8210:  7F3CCB78  or r28,r25,r25
+	   4: GETL       	R25, t2
+	   5: PUTL       	t2, R28
+	   6: INCEIPL       	$4
+
+	0xFEE8214:  3BA00004  li r29,4
+	   7: MOVL       	$0x4, t4
+	   8: PUTL       	t4, R29
+	   9: INCEIPL       	$4
+
+	0xFEE8218:  813B0004  lwz r9,4(r27)
+	  10: GETL       	R27, t6
+	  11: ADDL       	$0x4, t6
+	  12: LDL       	(t6), t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0xFEE821C:  7F63DB78  or r3,r27,r27
+	  15: GETL       	R27, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFEE8220:  811B0008  lwz r8,8(r27)
+	  18: GETL       	R27, t12
+	  19: ADDL       	$0x8, t12
+	  20: LDL       	(t12), t14
+	  21: PUTL       	t14, R8
+	  22: INCEIPL       	$4
+
+	0xFEE8224:  38090001  addi r0,r9,1
+	  23: GETL       	R9, t16
+	  24: ADDL       	$0x1, t16
+	  25: PUTL       	t16, R0
+	  26: INCEIPL       	$4
+
+	0xFEE8228:  7C094040  cmpl cr0,r9,r8
+	  27: GETL       	R9, t18
+	  28: GETL       	R8, t20
+	  29: CMPUL       	t18, t20, t22  (-rSo)
+	  30: ICRFL       	t22, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0xFEE822C:  408002E0  bc 4,0,0xFEE850C
+	  32: Jc00o       	$0xFEE850C
+
+
+
+. 3023 FEE820C 36
+. 3B 3F 00 B0 7F 3C CB 78 3B A0 00 04 81 3B 00 04 7F 63 DB 78 81 1B 00 08 38 09 00 01 7C 09 40 40 40 80 02 E0
+==== BB 3024 (0xFEE8230) approx BBs exec'd 0 ====
+
+	0xFEE8230:  88690000  lbz r3,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE8234:  901B0004  stw r0,4(r27)
+	   4: GETL       	R0, t4
+	   5: GETL       	R27, t6
+	   6: ADDL       	$0x4, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFEE8238:  2F03FFFF  cmpi cr6,r3,-1
+	   9: GETL       	R3, t8
+	  10: MOVL       	$0xFFFFFFFF, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFEE823C:  419A0014  bc 12,26,0xFEE8250
+	  14: Js26o       	$0xFEE8250
+
+
+
+. 3024 FEE8230 16
+. 88 69 00 00 90 1B 00 04 2F 03 FF FF 41 9A 00 14
+==== BB 3025 (0xFEE8240) approx BBs exec'd 0 ====
+
+	0xFEE8240:  37BDFFFF  addic. r29,r29,-1
+	   0: GETL       	R29, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R29
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEE8244:  987C0000  stb r3,0(r28)
+	   6: GETL       	R3, t4
+	   7: GETL       	R28, t6
+	   8: STB       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEE8248:  3B9C0001  addi r28,r28,1
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0xFEE824C:  4082FFCC  bc 4,2,0xFEE8218
+	  14: Jc02o       	$0xFEE8218
+
+
+
+. 3025 FEE8240 16
+. 37 BD FF FF 98 7C 00 00 3B 9C 00 01 40 82 FF CC
+==== BB 3026 (0xFEE8218) approx BBs exec'd 0 ====
+
+	0xFEE8218:  813B0004  lwz r9,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE821C:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE8220:  811B0008  lwz r8,8(r27)
+	   8: GETL       	R27, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0xFEE8224:  38090001  addi r0,r9,1
+	  13: GETL       	R9, t10
+	  14: ADDL       	$0x1, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFEE8228:  7C094040  cmpl cr0,r9,r8
+	  17: GETL       	R9, t12
+	  18: GETL       	R8, t14
+	  19: CMPUL       	t12, t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0xFEE822C:  408002E0  bc 4,0,0xFEE850C
+	  22: Jc00o       	$0xFEE850C
+
+
+
+. 3026 FEE8218 24
+. 81 3B 00 04 7F 63 DB 78 81 1B 00 08 38 09 00 01 7C 09 40 40 40 80 02 E0
+==== BB 3027 (0xFEE8250) approx BBs exec'd 0 ====
+
+	0xFEE8250:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEE8254:  40BEFF0C  bc 5,30,0xFEE8160
+	   4: Jc30o       	$0xFEE8160
+
+
+
+. 3027 FEE8250 8
+. 2F 9D 00 00 40 BE FF 0C
+==== BB 3028 (0xFEE8258) approx BBs exec'd 0 ====
+
+	0xFEE8258:  807B0004  lwz r3,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEE825C:  815B0008  lwz r10,8(r27)
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0xFEE8260:  7C835040  cmpl cr1,r3,r10
+	  10: GETL       	R3, t8
+	  11: GETL       	R10, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFEE8264:  408402C0  bc 4,4,0xFEE8524
+	  15: Jc04o       	$0xFEE8524
+
+
+
+. 3028 FEE8258 16
+. 80 7B 00 04 81 5B 00 08 7C 83 50 40 40 84 02 C0
+==== BB 3029 (0xFEE8268) approx BBs exec'd 0 ====
+
+	0xFEE8268:  39830001  addi r12,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xFEE826C:  88630000  lbz r3,0(r3)
+	   4: GETL       	R3, t2
+	   5: LDB       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE8270:  919B0004  stw r12,4(r27)
+	   8: GETL       	R12, t6
+	   9: GETL       	R27, t8
+	  10: ADDL       	$0x4, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE8274:  2B030001  cmpli cr6,r3,1
+	  13: GETL       	R3, t10
+	  14: MOVL       	$0x1, t14
+	  15: CMPUL       	t10, t14, t12  (-rSo)
+	  16: ICRFL       	t12, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0xFEE8278:  41B9FEE8  bc 13,25,0xFEE8160
+	  18: Js25o       	$0xFEE8160
+
+
+
+. 3029 FEE8268 20
+. 39 83 00 01 88 63 00 00 91 9B 00 04 2B 03 00 01 41 B9 FE E8
+==== BB 3030 (0xFEE827C) approx BBs exec'd 0 ====
+
+	0xFEE827C:  80170000  lwz r0,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEE8280:  575D1838  rlwinm r29,r26,3,0,28
+	   4: GETL       	R26, t4
+	   5: SHLL       	$0x3, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEE8284:  7D3D0214  add r9,r29,r0
+	   8: GETL       	R29, t6
+	   9: GETL       	R0, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0xFEE8288:  98690004  stb r3,4(r9)
+	  13: GETL       	R3, t10
+	  14: GETL       	R9, t12
+	  15: ADDL       	$0x4, t12
+	  16: STB       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEE828C:  807B0004  lwz r3,4(r27)
+	  18: GETL       	R27, t14
+	  19: ADDL       	$0x4, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0xFEE8290:  80DB0008  lwz r6,8(r27)
+	  23: GETL       	R27, t18
+	  24: ADDL       	$0x8, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R6
+	  27: INCEIPL       	$4
+
+	0xFEE8294:  7C033040  cmpl cr0,r3,r6
+	  28: GETL       	R3, t22
+	  29: GETL       	R6, t24
+	  30: CMPUL       	t22, t24, t26  (-rSo)
+	  31: ICRFL       	t26, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0xFEE8298:  40800298  bc 4,0,0xFEE8530
+	  33: Jc00o       	$0xFEE8530
+
+
+
+. 3030 FEE827C 32
+. 80 17 00 00 57 5D 18 38 7D 3D 02 14 98 69 00 04 80 7B 00 04 80 DB 00 08 7C 03 30 40 40 80 02 98
+==== BB 3031 (0xFEE829C) approx BBs exec'd 0 ====
+
+	0xFEE829C:  38A30001  addi r5,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0xFEE82A0:  88630000  lbz r3,0(r3)
+	   4: GETL       	R3, t2
+	   5: LDB       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE82A4:  90BB0004  stw r5,4(r27)
+	   8: GETL       	R5, t6
+	   9: GETL       	R27, t8
+	  10: ADDL       	$0x4, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE82A8:  7F83A840  cmpl cr7,r3,r21
+	  13: GETL       	R3, t10
+	  14: GETL       	R21, t12
+	  15: CMPUL       	t10, t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0xFEE82AC:  41BDFEB4  bc 13,29,0xFEE8160
+	  18: Js29o       	$0xFEE8160
+
+
+
+. 3031 FEE829C 20
+. 38 A3 00 01 88 63 00 00 90 BB 00 04 7F 83 A8 40 41 BD FE B4
+==== BB 3032 (0xFEE82B0) approx BBs exec'd 0 ====
+
+	0xFEE82B0:  80F70000  lwz r7,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0xFEE82B4:  3B5A0001  addi r26,r26,1
+	   4: GETL       	R26, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFEE82B8:  7EDD3A14  add r22,r29,r7
+	   8: GETL       	R29, t6
+	   9: GETL       	R7, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0xFEE82BC:  98760005  stb r3,5(r22)
+	  13: GETL       	R3, t10
+	  14: GETL       	R22, t12
+	  15: ADDL       	$0x5, t12
+	  16: STB       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEE82C0:  80980000  lwz r4,0(r24)
+	  18: GETL       	R24, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R4
+	  21: INCEIPL       	$4
+
+	0xFEE82C4:  80770000  lwz r3,0(r23)
+	  22: GETL       	R23, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R3
+	  25: INCEIPL       	$4
+
+	0xFEE82C8:  7C9A2040  cmpl cr1,r26,r4
+	  26: GETL       	R26, t22
+	  27: GETL       	R4, t24
+	  28: CMPUL       	t22, t24, t26  (-rSo)
+	  29: ICRFL       	t26, $0x1, CR
+	  30: INCEIPL       	$4
+
+	0xFEE82CC:  839F00B0  lwz r28,176(r31)
+	  31: GETL       	R31, t28
+	  32: ADDL       	$0xB0, t28
+	  33: LDL       	(t28), t30
+	  34: PUTL       	t30, R28
+	  35: INCEIPL       	$4
+
+	0xFEE82D0:  7F9D192E  stwx r28,r29,r3
+	  36: GETL       	R3, t32
+	  37: GETL       	R29, t34
+	  38: ADDL       	t34, t32
+	  39: GETL       	R28, t36
+	  40: STL       	t36, (t32)
+	  41: INCEIPL       	$4
+
+	0xFEE82D4:  4184FF3C  bc 12,4,0xFEE8210
+	  42: Js04o       	$0xFEE8210
+
+
+
+. 3032 FEE82B0 40
+. 80 F7 00 00 3B 5A 00 01 7E DD 3A 14 98 76 00 05 80 98 00 00 80 77 00 00 7C 9A 20 40 83 9F 00 B0 7F 9D 19 2E 41 84 FF 3C
+==== BB 3033 (0xFEE8210) approx BBs exec'd 0 ====
+
+	0xFEE8210:  7F3CCB78  or r28,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0xFEE8214:  3BA00004  li r29,4
+	   3: MOVL       	$0x4, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0xFEE8218:  813B0004  lwz r9,4(r27)
+	   6: GETL       	R27, t4
+	   7: ADDL       	$0x4, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFEE821C:  7F63DB78  or r3,r27,r27
+	  11: GETL       	R27, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0xFEE8220:  811B0008  lwz r8,8(r27)
+	  14: GETL       	R27, t10
+	  15: ADDL       	$0x8, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R8
+	  18: INCEIPL       	$4
+
+	0xFEE8224:  38090001  addi r0,r9,1
+	  19: GETL       	R9, t14
+	  20: ADDL       	$0x1, t14
+	  21: PUTL       	t14, R0
+	  22: INCEIPL       	$4
+
+	0xFEE8228:  7C094040  cmpl cr0,r9,r8
+	  23: GETL       	R9, t16
+	  24: GETL       	R8, t18
+	  25: CMPUL       	t16, t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x0, CR
+	  27: INCEIPL       	$4
+
+	0xFEE822C:  408002E0  bc 4,0,0xFEE850C
+	  28: Jc00o       	$0xFEE850C
+
+
+
+. 3033 FEE8210 32
+. 7F 3C CB 78 3B A0 00 04 81 3B 00 04 7F 63 DB 78 81 1B 00 08 38 09 00 01 7C 09 40 40 40 80 02 E0
+==== BB 3034 (0xFEE82D8) approx BBs exec'd 0 ====
+
+	0xFEE82D8:  806F0000  lwz r3,0(r15)
+	   0: GETL       	R15, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE82DC:  38800001  li r4,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFEE82E0:  7EA5AB78  or r5,r21,r21
+	   7: GETL       	R21, t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFEE82E4:  7F66DB78  or r6,r27,r27
+	  10: GETL       	R27, t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0xFEE82E8:  4BFDFBF5  bl 0xFEC7EDC
+	  13: MOVL       	$0xFEE82EC, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0xFEC7EDC  ($4)
+
+
+
+. 3034 FEE82D8 20
+. 80 6F 00 00 38 80 00 01 7E A5 AB 78 7F 66 DB 78 4B FD FB F5
+==== BB 3035 (0xFEE82EC) approx BBs exec'd 0 ====
+
+	0xFEE82EC:  7F03A800  cmp cr6,r3,r21
+	   0: GETL       	R3, t0
+	   1: GETL       	R21, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFEE82F0:  40BAFE70  bc 5,26,0xFEE8160
+	   5: Jc26o       	$0xFEE8160
+
+
+
+. 3035 FEE82EC 8
+. 7F 03 A8 00 40 BA FE 70
+==== BB 3036 (0xFEE82F4) approx BBs exec'd 0 ====
+
+	0xFEE82F4:  83520000  lwz r26,0(r18)
+	   0: GETL       	R18, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0xFEE82F8:  3B200000  li r25,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R25
+	   6: INCEIPL       	$4
+
+	0xFEE82FC:  7C19D040  cmpl cr0,r25,r26
+	   7: GETL       	R25, t6
+	   8: GETL       	R26, t8
+	   9: CMPUL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFEE8300:  408000C8  bc 4,0,0xFEE83C8
+	  12: Jc00o       	$0xFEE83C8
+
+
+
+. 3036 FEE82F4 16
+. 83 52 00 00 3B 20 00 00 7C 19 D0 40 40 80 00 C8
+==== BB 3037 (0xFEE83C8) approx BBs exec'd 0 ====
+
+	0xFEE83C8:  823F00B8  lwz r17,184(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xB8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0xFEE83CC:  3BA00000  li r29,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEE83D0:  7C9D8840  cmpl cr1,r29,r17
+	   8: GETL       	R29, t6
+	   9: GETL       	R17, t8
+	  10: CMPUL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0xFEE83D4:  40840054  bc 4,4,0xFEE8428
+	  13: Jc04o       	$0xFEE8428
+
+
+
+. 3037 FEE83C8 16
+. 82 3F 00 B8 3B A0 00 00 7C 9D 88 40 40 84 00 54
+==== BB 3038 (0xFEE83D8) approx BBs exec'd 0 ====
+
+	0xFEE83D8:  813B0004  lwz r9,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE83DC:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE83E0:  825B0008  lwz r18,8(r27)
+	   8: GETL       	R27, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R18
+	  12: INCEIPL       	$4
+
+	0xFEE83E4:  38090001  addi r0,r9,1
+	  13: GETL       	R9, t10
+	  14: ADDL       	$0x1, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFEE83E8:  7C099040  cmpl cr0,r9,r18
+	  17: GETL       	R9, t12
+	  18: GETL       	R18, t14
+	  19: CMPUL       	t12, t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0xFEE83EC:  408002FC  bc 4,0,0xFEE86E8
+	  22: Jc00o       	$0xFEE86E8
+
+
+
+. 3038 FEE83D8 24
+. 81 3B 00 04 7F 63 DB 78 82 5B 00 08 38 09 00 01 7C 09 90 40 40 80 02 FC
+==== BB 3039 (0xFEE83F0) approx BBs exec'd 0 ====
+
+	0xFEE83F0:  88690000  lbz r3,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE83F4:  901B0004  stw r0,4(r27)
+	   4: GETL       	R0, t4
+	   5: GETL       	R27, t6
+	   6: ADDL       	$0x4, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFEE83F8:  2F83FFFF  cmpi cr7,r3,-1
+	   9: GETL       	R3, t8
+	  10: MOVL       	$0xFFFFFFFF, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFEE83FC:  833F00B8  lwz r25,184(r31)
+	  14: GETL       	R31, t14
+	  15: ADDL       	$0xB8, t14
+	  16: LDL       	(t14), t16
+	  17: PUTL       	t16, R25
+	  18: INCEIPL       	$4
+
+	0xFEE8400:  57A01838  rlwinm r0,r29,3,0,28
+	  19: GETL       	R29, t18
+	  20: SHLL       	$0x3, t18
+	  21: PUTL       	t18, R0
+	  22: INCEIPL       	$4
+
+	0xFEE8404:  3BBD0001  addi r29,r29,1
+	  23: GETL       	R29, t20
+	  24: ADDL       	$0x1, t20
+	  25: PUTL       	t20, R29
+	  26: INCEIPL       	$4
+
+	0xFEE8408:  3163FFFF  addic r11,r3,-1
+	  27: GETL       	R3, t22
+	  28: ADCL       	$0xFFFFFFFF, t22  (-wCa)
+	  29: PUTL       	t22, R11
+	  30: INCEIPL       	$4
+
+	0xFEE840C:  7CAB1910  subfe r5,r11,r3
+	  31: GETL       	R11, t24
+	  32: GETL       	R3, t26
+	  33: SBBL       	t24, t26  (-rCa-wCa)
+	  34: PUTL       	t26, R5
+	  35: INCEIPL       	$4
+
+	0xFEE8410:  7F1DC840  cmpl cr6,r29,r25
+	  36: GETL       	R29, t28
+	  37: GETL       	R25, t30
+	  38: CMPUL       	t28, t30, t32  (-rSo)
+	  39: ICRFL       	t32, $0x6, CR
+	  40: INCEIPL       	$4
+
+	0xFEE8414:  41BEFD4C  bc 13,30,0xFEE8160
+	  41: Js30o       	$0xFEE8160
+
+
+
+. 3039 FEE83F0 40
+. 88 69 00 00 90 1B 00 04 2F 83 FF FF 83 3F 00 B8 57 A0 18 38 3B BD 00 01 31 63 FF FF 7C AB 19 10 7F 1D C8 40 41 BE FD 4C
+==== BB 3040 (0xFEE8418) approx BBs exec'd 0 ====
+
+	0xFEE8418:  80970000  lwz r4,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFEE841C:  7F802214  add r28,r0,r4
+	   4: GETL       	R0, t4
+	   5: GETL       	R4, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R28
+	   8: INCEIPL       	$4
+
+	0xFEE8420:  98BC0006  stb r5,6(r28)
+	   9: GETL       	R5, t8
+	  10: GETL       	R28, t10
+	  11: ADDL       	$0x6, t10
+	  12: STB       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFEE8424:  4198FFB4  bc 12,24,0xFEE83D8
+	  14: Js24o       	$0xFEE83D8
+
+
+
+. 3040 FEE8418 16
+. 80 97 00 00 7F 80 22 14 98 BC 00 06 41 98 FF B4
+==== BB 3041 (0xFEE8428) approx BBs exec'd 0 ====
+
+	0xFEE8428:  80780000  lwz r3,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE842C:  7F0AC378  or r10,r24,r24
+	   4: GETL       	R24, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0xFEE8430:  7F1D1840  cmpl cr6,r29,r3
+	   7: GETL       	R29, t6
+	   8: GETL       	R3, t8
+	   9: CMPUL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEE8434:  40980028  bc 4,24,0xFEE845C
+	  12: Jc24o       	$0xFEE845C
+
+
+
+. 3041 FEE8428 16
+. 80 78 00 00 7F 0A C3 78 7F 1D 18 40 40 98 00 28
+==== BB 3042 (0xFEE845C) approx BBs exec'd 0 ====
+
+	0xFEE845C:  817F00BC  lwz r11,188(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xBC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFEE8460:  3BA00000  li r29,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEE8464:  7C1D5840  cmpl cr0,r29,r11
+	   8: GETL       	R29, t6
+	   9: GETL       	R11, t8
+	  10: CMPUL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFEE8468:  41A00040  bc 13,0,0xFEE84A8
+	  13: Js00o       	$0xFEE84A8
+
+
+
+. 3042 FEE845C 16
+. 81 7F 00 BC 3B A0 00 00 7C 1D 58 40 41 A0 00 40
+==== BB 3043 (0xFEE84A8) approx BBs exec'd 0 ====
+
+	0xFEE84A8:  813B0004  lwz r9,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE84AC:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE84B0:  815B0008  lwz r10,8(r27)
+	   8: GETL       	R27, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0xFEE84B4:  38090001  addi r0,r9,1
+	  13: GETL       	R9, t10
+	  14: ADDL       	$0x1, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFEE84B8:  7F895040  cmpl cr7,r9,r10
+	  17: GETL       	R9, t12
+	  18: GETL       	R10, t14
+	  19: CMPUL       	t12, t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0xFEE84BC:  419CFFB4  bc 12,28,0xFEE8470
+	  22: Js28o       	$0xFEE8470
+
+
+
+. 3043 FEE84A8 24
+. 81 3B 00 04 7F 63 DB 78 81 5B 00 08 38 09 00 01 7F 89 50 40 41 9C FF B4
+==== BB 3044 (0xFEE8470) approx BBs exec'd 0 ====
+
+	0xFEE8470:  88690000  lbz r3,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE8474:  901B0004  stw r0,4(r27)
+	   4: GETL       	R0, t4
+	   5: GETL       	R27, t6
+	   6: ADDL       	$0x4, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFEE8478:  2C83FFFF  cmpi cr1,r3,-1
+	   9: GETL       	R3, t8
+	  10: MOVL       	$0xFFFFFFFF, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0xFEE847C:  819F00BC  lwz r12,188(r31)
+	  14: GETL       	R31, t14
+	  15: ADDL       	$0xBC, t14
+	  16: LDL       	(t14), t16
+	  17: PUTL       	t16, R12
+	  18: INCEIPL       	$4
+
+	0xFEE8480:  57A01838  rlwinm r0,r29,3,0,28
+	  19: GETL       	R29, t18
+	  20: SHLL       	$0x3, t18
+	  21: PUTL       	t18, R0
+	  22: INCEIPL       	$4
+
+	0xFEE8484:  3BBD0001  addi r29,r29,1
+	  23: GETL       	R29, t20
+	  24: ADDL       	$0x1, t20
+	  25: PUTL       	t20, R29
+	  26: INCEIPL       	$4
+
+	0xFEE8488:  3163FFFF  addic r11,r3,-1
+	  27: GETL       	R3, t22
+	  28: ADCL       	$0xFFFFFFFF, t22  (-wCa)
+	  29: PUTL       	t22, R11
+	  30: INCEIPL       	$4
+
+	0xFEE848C:  7CCB1910  subfe r6,r11,r3
+	  31: GETL       	R11, t24
+	  32: GETL       	R3, t26
+	  33: SBBL       	t24, t26  (-rCa-wCa)
+	  34: PUTL       	t26, R6
+	  35: INCEIPL       	$4
+
+	0xFEE8490:  7F1D6040  cmpl cr6,r29,r12
+	  36: GETL       	R29, t28
+	  37: GETL       	R12, t30
+	  38: CMPUL       	t28, t30, t32  (-rSo)
+	  39: ICRFL       	t32, $0x6, CR
+	  40: INCEIPL       	$4
+
+	0xFEE8494:  41A6FCCC  bc 13,6,0xFEE8160
+	  41: Js06o       	$0xFEE8160
+
+
+
+. 3044 FEE8470 40
+. 88 69 00 00 90 1B 00 04 2C 83 FF FF 81 9F 00 BC 57 A0 18 38 3B BD 00 01 31 63 FF FF 7C CB 19 10 7F 1D 60 40 41 A6 FC CC
+==== BB 3045 (0xFEE8498) approx BBs exec'd 0 ====
+
+	0xFEE8498:  82370000  lwz r17,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R17
+	   3: INCEIPL       	$4
+
+	0xFEE849C:  7D208A14  add r9,r0,r17
+	   4: GETL       	R0, t4
+	   5: GETL       	R17, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFEE84A0:  98C90007  stb r6,7(r9)
+	   9: GETL       	R6, t8
+	  10: GETL       	R9, t10
+	  11: ADDL       	$0x7, t10
+	  12: STB       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFEE84A4:  40980098  bc 4,24,0xFEE853C
+	  14: Jc24o       	$0xFEE853C
+
+
+
+. 3045 FEE8498 16
+. 82 37 00 00 7D 20 8A 14 98 C9 00 07 40 98 00 98
+==== BB 3046 (0xFEE853C) approx BBs exec'd 0 ====
+
+	0xFEE853C:  82780000  lwz r19,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R19
+	   3: INCEIPL       	$4
+
+	0xFEE8540:  7F0AC378  or r10,r24,r24
+	   4: GETL       	R24, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0xFEE8544:  7F1D9840  cmpl cr6,r29,r19
+	   7: GETL       	R29, t6
+	   8: GETL       	R19, t8
+	   9: CMPUL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEE8548:  40980028  bc 4,24,0xFEE8570
+	  12: Jc24o       	$0xFEE8570
+
+
+
+. 3046 FEE853C 16
+. 82 78 00 00 7F 0A C3 78 7F 1D 98 40 40 98 00 28
+==== BB 3047 (0xFEE8570) approx BBs exec'd 0 ====
+
+	0xFEE8570:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEE8574:  3BA00000  li r29,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0xFEE8578:  4BFD3339  bl 0xFEBB8B0
+	   6: MOVL       	$0xFEE857C, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFEBB8B0  ($4)
+
+
+
+. 3047 FEE8570 12
+. 7F 63 DB 78 3B A0 00 00 4B FD 33 39
+==== BB 3048 (0xFECECA4) approx BBs exec'd 0 ====
+
+	0xFECECA4:  7F88D000  cmp cr7,r8,r26
+	   0: GETL       	R8, t0
+	   1: GETL       	R26, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECECA8:  419E02C0  bc 12,30,0xFECEF68
+	   5: Js30o       	$0xFECEF68
+
+
+
+. 3048 FECECA4 8
+. 7F 88 D0 00 41 9E 02 C0
+==== BB 3049 (0xFEE857C) approx BBs exec'd 0 ====
+
+	0xFEE857C:  80180000  lwz r0,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEE8580:  7F9D0040  cmpl cr7,r29,r0
+	   4: GETL       	R29, t4
+	   5: GETL       	R0, t6
+	   6: CMPUL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEE8584:  409C0030  bc 4,28,0xFEE85B4
+	   9: Jc28o       	$0xFEE85B4
+
+
+
+. 3049 FEE857C 12
+. 80 18 00 00 7F 9D 00 40 40 9C 00 30
+==== BB 3050 (0xFEE8588) approx BBs exec'd 0 ====
+
+	0xFEE8588:  82D70000  lwz r22,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R22
+	   3: INCEIPL       	$4
+
+	0xFEE858C:  57A31838  rlwinm r3,r29,3,0,28
+	   4: GETL       	R29, t4
+	   5: SHLL       	$0x3, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE8590:  836F0000  lwz r27,0(r15)
+	   8: GETL       	R15, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0xFEE8594:  3BBD0001  addi r29,r29,1
+	  12: GETL       	R29, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFEE8598:  7C83B214  add r4,r3,r22
+	  16: GETL       	R3, t12
+	  17: GETL       	R22, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R4
+	  20: INCEIPL       	$4
+
+	0xFEE859C:  8B840005  lbz r28,5(r4)
+	  21: GETL       	R4, t16
+	  22: ADDL       	$0x5, t16
+	  23: LDB       	(t16), t18
+	  24: PUTL       	t18, R28
+	  25: INCEIPL       	$4
+
+	0xFEE85A0:  7C7CDA14  add r3,r28,r27
+	  26: GETL       	R28, t20
+	  27: GETL       	R27, t22
+	  28: ADDL       	t20, t22
+	  29: PUTL       	t22, R3
+	  30: INCEIPL       	$4
+
+	0xFEE85A4:  4BFFE705  bl 0xFEE6CA8
+	  31: MOVL       	$0xFEE85A8, t24
+	  32: PUTL       	t24, LR
+	  33: JMPo-c       	$0xFEE6CA8  ($4)
+
+
+
+. 3050 FEE8588 32
+. 82 D7 00 00 57 A3 18 38 83 6F 00 00 3B BD 00 01 7C 83 B2 14 8B 84 00 05 7C 7C DA 14 4B FF E7 05
+==== BB 3051 __tzstring(0xFEE6CA8) approx BBs exec'd 0 ====
+
+	0xFEE6CA8:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEE6CAC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEE6CB0:  480C11A1  bl 0xFFA7E50
+	   9: MOVL       	$0xFEE6CB4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3051 FEE6CA8 12
+. 94 21 FF D0 7C 08 02 A6 48 0C 11 A1
+==== BB 3052 (0xFEE6CB4) approx BBs exec'd 0 ====
+
+	0xFEE6CB4:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE6CB8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEE6CBC:  93210014  stw r25,20(r1)
+	   8: GETL       	R25, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE6CC0:  9361001C  stw r27,28(r1)
+	  13: GETL       	R27, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x1C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEE6CC4:  7C7B1B78  or r27,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0xFEE6CC8:  93810020  stw r28,32(r1)
+	  21: GETL       	R28, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x20, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEE6CCC:  833E0890  lwz r25,2192(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x890, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R25
+	  30: INCEIPL       	$4
+
+	0xFEE6CD0:  93A10024  stw r29,36(r1)
+	  31: GETL       	R29, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x24, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFEE6CD4:  93410018  stw r26,24(r1)
+	  36: GETL       	R26, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x18, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFEE6CD8:  93E1002C  stw r31,44(r1)
+	  41: GETL       	R31, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x2C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0xFEE6CDC:  90010034  stw r0,52(r1)
+	  46: GETL       	R0, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x34, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0xFEE6CE0:  4BFEFDF9  bl 0xFED6AD8
+	  51: MOVL       	$0xFEE6CE4, t40
+	  52: PUTL       	t40, LR
+	  53: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 3052 FEE6CB4 48
+. 93 C1 00 28 7F C8 02 A6 93 21 00 14 93 61 00 1C 7C 7B 1B 78 93 81 00 20 83 3E 08 90 93 A1 00 24 93 41 00 18 93 E1 00 2C 90 01 00 34 4B FE FD F9
+==== BB 3053 (0xFEE6CE4) approx BBs exec'd 0 ====
+
+	0xFEE6CE4:  83990000  lwz r28,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFEE6CE8:  7C7D1B78  or r29,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFEE6CEC:  2F9C0000  cmpi cr7,r28,0
+	   7: GETL       	R28, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFEE6CF0:  7F8BE378  or r11,r28,r28
+	  11: GETL       	R28, t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0xFEE6CF4:  419E0040  bc 12,30,0xFEE6D34
+	  14: Js30o       	$0xFEE6D34
+
+
+
+. 3053 FEE6CE4 20
+. 83 99 00 00 7C 7D 1B 78 2F 9C 00 00 7F 8B E3 78 41 9E 00 40
+==== BB 3054 (0xFEE6D34) approx BBs exec'd 0 ====
+
+	0xFEE6D34:  387D0009  addi r3,r29,9
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x9, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFEE6D38:  480C35D1  bl 0xFFAA308
+	   4: MOVL       	$0xFEE6D3C, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0xFFAA308  ($4)
+
+
+
+. 3054 FEE6D34 8
+. 38 7D 00 09 48 0C 35 D1
+==== BB 3055 (0xFECF74C) approx BBs exec'd 0 ====
+
+	0xFECF74C:  82FE05FC  lwz r23,1532(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFECF750:  7E83CA14  add r20,r3,r25
+	   5: GETL       	R3, t4
+	   6: GETL       	R25, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R20
+	   9: INCEIPL       	$4
+
+	0xFECF754:  82D40004  lwz r22,4(r20)
+	  10: GETL       	R20, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R22
+	  14: INCEIPL       	$4
+
+	0xFECF758:  7E1CB800  cmp cr4,r28,r23
+	  15: GETL       	R28, t12
+	  16: GETL       	R23, t14
+	  17: CMPL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x4, CR
+	  19: INCEIPL       	$4
+
+	0xFECF75C:  8263000C  lwz r19,12(r3)
+	  20: GETL       	R3, t18
+	  21: ADDL       	$0xC, t18
+	  22: LDL       	(t18), t20
+	  23: PUTL       	t20, R19
+	  24: INCEIPL       	$4
+
+	0xFECF760:  62D50001  ori r21,r22,0x1
+	  25: GETL       	R22, t22
+	  26: ORL       	$0x1, t22
+	  27: PUTL       	t22, R21
+	  28: INCEIPL       	$4
+
+	0xFECF764:  92B40004  stw r21,4(r20)
+	  29: GETL       	R21, t24
+	  30: GETL       	R20, t26
+	  31: ADDL       	$0x4, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0xFECF768:  91530008  stw r10,8(r19)
+	  34: GETL       	R10, t28
+	  35: GETL       	R19, t30
+	  36: ADDL       	$0x8, t30
+	  37: STL       	t28, (t30)
+	  38: INCEIPL       	$4
+
+	0xFECF76C:  926A000C  stw r19,12(r10)
+	  39: GETL       	R19, t32
+	  40: GETL       	R10, t34
+	  41: ADDL       	$0xC, t34
+	  42: STL       	t32, (t34)
+	  43: INCEIPL       	$4
+
+	0xFECF770:  41920010  bc 12,18,0xFECF780
+	  44: Js18o       	$0xFECF780
+
+
+
+. 3055 FECF74C 40
+. 82 FE 05 FC 7E 83 CA 14 82 D4 00 04 7E 1C B8 00 82 63 00 0C 62 D5 00 01 92 B4 00 04 91 53 00 08 92 6A 00 0C 41 92 00 10
+==== BB 3056 (0xFECF780) approx BBs exec'd 0 ====
+
+	0xFECF780:  39030008  addi r8,r3,8
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0xFECF784:  4BFFFF2C  b 0xFECF6B0
+	   4: JMPo       	$0xFECF6B0  ($4)
+
+
+
+. 3056 FECF780 8
+. 39 03 00 08 4B FF FF 2C
+==== BB 3057 (0xFEE6D3C) approx BBs exec'd 0 ====
+
+	0xFEE6D3C:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEE6D40:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE6D44:  41820030  bc 12,2,0xFEE6D74
+	   8: Js02o       	$0xFEE6D74
+
+
+
+. 3057 FEE6D3C 12
+. 7C 7F 1B 79 38 60 00 00 41 82 00 30
+==== BB 3058 (0xFEE6D48) approx BBs exec'd 0 ====
+
+	0xFEE6D48:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEE6D4C:  3B5F0008  addi r26,r31,8
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x8, t2
+	   5: PUTL       	t2, R26
+	   6: INCEIPL       	$4
+
+	0xFEE6D50:  907F0000  stw r3,0(r31)
+	   7: GETL       	R3, t4
+	   8: GETL       	R31, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFEE6D54:  7F64DB78  or r4,r27,r27
+	  11: GETL       	R27, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFEE6D58:  93BF0004  stw r29,4(r31)
+	  14: GETL       	R29, t10
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x4, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFEE6D5C:  7F43D378  or r3,r26,r26
+	  19: GETL       	R26, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0xFEE6D60:  4BFEF771  bl 0xFED64D0
+	  22: MOVL       	$0xFEE6D64, t16
+	  23: PUTL       	t16, LR
+	  24: JMPo-c       	$0xFED64D0  ($4)
+
+
+
+. 3058 FEE6D48 28
+. 38 60 00 00 3B 5F 00 08 90 7F 00 00 7F 64 DB 78 93 BF 00 04 7F 43 D3 78 4B FE F7 71
+==== BB 3059 (0xFED6538) approx BBs exec'd 0 ====
+
+	0xFED6538:  5540863F  rlwinm. r0,r10,16,24,31
+	   0: GETL       	R10, t0
+	   1: ROLL       	$0x10, t0
+	   2: ANDL       	$0xFF, t0
+	   3: PUTL       	t0, R0
+	   4: CMP0L       	t0, t2  (-rSo)
+	   5: ICRFL       	t2, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED653C:  98050005  stb r0,5(r5)
+	   7: GETL       	R0, t4
+	   8: GETL       	R5, t6
+	   9: ADDL       	$0x5, t6
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED6540:  4D820020  bclr 12,2
+	  12: GETL       	LR, t8
+	  13: Js02o-r       	t8
+
+
+
+. 3059 FED6538 12
+. 55 40 86 3F 98 05 00 05 4D 82 00 20
+==== BB 3060 (0xFED6544) approx BBs exec'd 0 ====
+
+	0xFED6544:  5540C63F  rlwinm. r0,r10,24,24,31
+	   0: GETL       	R10, t0
+	   1: ROLL       	$0x18, t0
+	   2: ANDL       	$0xFF, t0
+	   3: PUTL       	t0, R0
+	   4: CMP0L       	t0, t2  (-rSo)
+	   5: ICRFL       	t2, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFED6548:  98050006  stb r0,6(r5)
+	   7: GETL       	R0, t4
+	   8: GETL       	R5, t6
+	   9: ADDL       	$0x6, t6
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFED654C:  4D820020  bclr 12,2
+	  12: GETL       	LR, t8
+	  13: Js02o-r       	t8
+
+
+
+. 3060 FED6544 12
+. 55 40 C6 3F 98 05 00 06 4D 82 00 20
+==== BB 3061 (0xFED6550) approx BBs exec'd 0 ====
+
+	0xFED6550:  99450007  stb r10,7(r5)
+	   0: GETL       	R10, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	$0x7, t2
+	   3: STB       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFED6554:  4E800020  blr
+	   5: GETL       	LR, t4
+	   6: JMPo-r       	t4  ($4)
+
+
+
+. 3061 FED6550 8
+. 99 45 00 07 4E 80 00 20
+==== BB 3062 (0xFEE6D64) approx BBs exec'd 0 ====
+
+	0xFEE6D64:  2C9C0000  cmpi cr1,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEE6D68:  41860068  bc 12,6,0xFEE6DD0
+	   4: Js06o       	$0xFEE6DD0
+
+
+
+. 3062 FEE6D64 8
+. 2C 9C 00 00 41 86 00 68
+==== BB 3063 (0xFEE6DD0) approx BBs exec'd 0 ====
+
+	0xFEE6DD0:  93F90000  stw r31,0(r25)
+	   0: GETL       	R31, t0
+	   1: GETL       	R25, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFEE6DD4:  4BFFFF9C  b 0xFEE6D70
+	   4: JMPo       	$0xFEE6D70  ($4)
+
+
+
+. 3063 FEE6DD0 8
+. 93 F9 00 00 4B FF FF 9C
+==== BB 3064 (0xFEE6D70) approx BBs exec'd 0 ====
+
+	0xFEE6D70:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEE6D74:  80C10034  lwz r6,52(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x34, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0xFEE6D78:  83210014  lwz r25,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R25
+	  12: INCEIPL       	$4
+
+	0xFEE6D7C:  83410018  lwz r26,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0xFEE6D80:  7CC803A6  mtlr r6
+	  18: GETL       	R6, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFEE6D84:  8361001C  lwz r27,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0xFEE6D88:  83810020  lwz r28,32(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFEE6D8C:  83A10024  lwz r29,36(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0xFEE6D90:  83C10028  lwz r30,40(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x28, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R30
+	  40: INCEIPL       	$4
+
+	0xFEE6D94:  83E1002C  lwz r31,44(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x2C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R31
+	  45: INCEIPL       	$4
+
+	0xFEE6D98:  38210030  addi r1,r1,48
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x30, t36
+	  48: PUTL       	t36, R1
+	  49: INCEIPL       	$4
+
+	0xFEE6D9C:  4E800020  blr
+	  50: GETL       	LR, t38
+	  51: JMPo-r       	t38  ($4)
+
+
+
+. 3064 FEE6D70 48
+. 7F 43 D3 78 80 C1 00 34 83 21 00 14 83 41 00 18 7C C8 03 A6 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 3065 (0xFEE85A8) approx BBs exec'd 0 ====
+
+	0xFEE85A8:  80180000  lwz r0,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEE85AC:  7F9D0040  cmpl cr7,r29,r0
+	   4: GETL       	R29, t4
+	   5: GETL       	R0, t6
+	   6: CMPUL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEE85B0:  419CFFD8  bc 12,28,0xFEE8588
+	   9: Js28o       	$0xFEE8588
+
+
+
+. 3065 FEE85A8 12
+. 80 18 00 00 7F 9D 00 40 41 9C FF D8
+==== BB 3066 (0xFEE6CF8) approx BBs exec'd 0 ====
+
+	0xFEE6CF8:  80AB0004  lwz r5,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEE6CFC:  7D7C5B78  or r28,r11,r11
+	   5: GETL       	R11, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFEE6D00:  7F63DB78  or r3,r27,r27
+	   8: GETL       	R27, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEE6D04:  7F85E840  cmpl cr7,r5,r29
+	  11: GETL       	R5, t8
+	  12: GETL       	R29, t10
+	  13: CMPUL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFEE6D08:  7C9D2850  subf r4,r29,r5
+	  16: GETL       	R29, t14
+	  17: GETL       	R5, t16
+	  18: SUBL       	t14, t16
+	  19: PUTL       	t16, R4
+	  20: INCEIPL       	$4
+
+	0xFEE6D0C:  7D2B2214  add r9,r11,r4
+	  21: GETL       	R11, t18
+	  22: GETL       	R4, t20
+	  23: ADDL       	t18, t20
+	  24: PUTL       	t20, R9
+	  25: INCEIPL       	$4
+
+	0xFEE6D10:  3BE90008  addi r31,r9,8
+	  26: GETL       	R9, t22
+	  27: ADDL       	$0x8, t22
+	  28: PUTL       	t22, R31
+	  29: INCEIPL       	$4
+
+	0xFEE6D14:  7FE4FB78  or r4,r31,r31
+	  30: GETL       	R31, t24
+	  31: PUTL       	t24, R4
+	  32: INCEIPL       	$4
+
+	0xFEE6D18:  419C0010  bc 12,28,0xFEE6D28
+	  33: Js28o       	$0xFEE6D28
+
+
+
+. 3066 FEE6CF8 36
+. 80 AB 00 04 7D 7C 5B 78 7F 63 DB 78 7F 85 E8 40 7C 9D 28 50 7D 2B 22 14 3B E9 00 08 7F E4 FB 78 41 9C 00 10
+==== BB 3067 (0xFEE6D1C) approx BBs exec'd 0 ====
+
+	0xFEE6D1C:  4BFEF695  bl 0xFED63B0
+	   0: MOVL       	$0xFEE6D20, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFED63B0  ($4)
+
+
+
+. 3067 FEE6D1C 4
+. 4B FE F6 95
+==== BB 3068 (0xFEE6D20) approx BBs exec'd 0 ====
+
+	0xFEE6D20:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEE6D24:  4182007C  bc 12,2,0xFEE6DA0
+	   4: Js02o       	$0xFEE6DA0
+
+
+
+. 3068 FEE6D20 8
+. 2C 03 00 00 41 82 00 7C
+==== BB 3069 (0xFEE6D28) approx BBs exec'd 0 ====
+
+	0xFEE6D28:  817C0000  lwz r11,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFEE6D2C:  2F8B0000  cmpi cr7,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFEE6D30:  409EFFC8  bc 4,30,0xFEE6CF8
+	   8: Jc30o       	$0xFEE6CF8
+
+
+
+. 3069 FEE6D28 12
+. 81 7C 00 00 2F 8B 00 00 40 9E FF C8
+==== BB 3070 (0xFEE6D6C) approx BBs exec'd 0 ====
+
+	0xFEE6D6C:  93FC0000  stw r31,0(r28)
+	   0: GETL       	R31, t0
+	   1: GETL       	R28, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFEE6D70:  7F43D378  or r3,r26,r26
+	   4: GETL       	R26, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0xFEE6D74:  80C10034  lwz r6,52(r1)
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x34, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R6
+	  11: INCEIPL       	$4
+
+	0xFEE6D78:  83210014  lwz r25,20(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R25
+	  16: INCEIPL       	$4
+
+	0xFEE6D7C:  83410018  lwz r26,24(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x18, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R26
+	  21: INCEIPL       	$4
+
+	0xFEE6D80:  7CC803A6  mtlr r6
+	  22: GETL       	R6, t18
+	  23: PUTL       	t18, LR
+	  24: INCEIPL       	$4
+
+	0xFEE6D84:  8361001C  lwz r27,28(r1)
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x1C, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R27
+	  29: INCEIPL       	$4
+
+	0xFEE6D88:  83810020  lwz r28,32(r1)
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x20, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R28
+	  34: INCEIPL       	$4
+
+	0xFEE6D8C:  83A10024  lwz r29,36(r1)
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x24, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R29
+	  39: INCEIPL       	$4
+
+	0xFEE6D90:  83C10028  lwz r30,40(r1)
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x28, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R30
+	  44: INCEIPL       	$4
+
+	0xFEE6D94:  83E1002C  lwz r31,44(r1)
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x2C, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R31
+	  49: INCEIPL       	$4
+
+	0xFEE6D98:  38210030  addi r1,r1,48
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x30, t40
+	  52: PUTL       	t40, R1
+	  53: INCEIPL       	$4
+
+	0xFEE6D9C:  4E800020  blr
+	  54: GETL       	LR, t42
+	  55: JMPo-r       	t42  ($4)
+
+
+
+. 3070 FEE6D6C 52
+. 93 FC 00 00 7F 43 D3 78 80 C1 00 34 83 21 00 14 83 41 00 18 7C C8 03 A6 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 3071 (0xFED652C) approx BBs exec'd 0 ====
+
+	0xFED652C:  5540463F  rlwinm. r0,r10,8,24,31
+	   0: GETL       	R10, t0
+	   1: SHRL       	$0x18, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFED6530:  98050004  stb r0,4(r5)
+	   6: GETL       	R0, t4
+	   7: GETL       	R5, t6
+	   8: ADDL       	$0x4, t6
+	   9: STB       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFED6534:  4D820020  bclr 12,2
+	  11: GETL       	LR, t8
+	  12: Js02o-r       	t8
+
+
+
+. 3071 FED652C 12
+. 55 40 46 3F 98 05 00 04 4D 82 00 20
+==== BB 3072 (0xFEE6DA0) approx BBs exec'd 0 ====
+
+	0xFEE6DA0:  80C10034  lwz r6,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEE6DA4:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE6DA8:  83210014  lwz r25,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R25
+	  12: INCEIPL       	$4
+
+	0xFEE6DAC:  83410018  lwz r26,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0xFEE6DB0:  7CC803A6  mtlr r6
+	  18: GETL       	R6, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFEE6DB4:  8361001C  lwz r27,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0xFEE6DB8:  83810020  lwz r28,32(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFEE6DBC:  83A10024  lwz r29,36(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0xFEE6DC0:  83C10028  lwz r30,40(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x28, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R30
+	  40: INCEIPL       	$4
+
+	0xFEE6DC4:  83E1002C  lwz r31,44(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x2C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R31
+	  45: INCEIPL       	$4
+
+	0xFEE6DC8:  38210030  addi r1,r1,48
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x30, t36
+	  48: PUTL       	t36, R1
+	  49: INCEIPL       	$4
+
+	0xFEE6DCC:  4E800020  blr
+	  50: GETL       	LR, t38
+	  51: JMPo-r       	t38  ($4)
+
+
+
+. 3072 FEE6DA0 48
+. 80 C1 00 34 7F E3 FB 78 83 21 00 14 83 41 00 18 7C C8 03 A6 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 3073 (0xFEE85B4) approx BBs exec'd 0 ====
+
+	0xFEE85B4:  83B00000  lwz r29,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0xFEE85B8:  3B000000  li r24,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R24
+	   6: INCEIPL       	$4
+
+	0xFEE85BC:  835E1AE4  lwz r26,6884(r30)
+	   7: GETL       	R30, t6
+	   8: ADDL       	$0x1AE4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R26
+	  11: INCEIPL       	$4
+
+	0xFEE85C0:  2F9D0000  cmpi cr7,r29,0
+	  12: GETL       	R29, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFEE85C4:  931A0004  stw r24,4(r26)
+	  16: GETL       	R24, t14
+	  17: GETL       	R26, t16
+	  18: ADDL       	$0x4, t16
+	  19: STL       	t14, (t16)
+	  20: INCEIPL       	$4
+
+	0xFEE85C8:  931A0000  stw r24,0(r26)
+	  21: GETL       	R24, t18
+	  22: GETL       	R26, t20
+	  23: STL       	t18, (t20)
+	  24: INCEIPL       	$4
+
+	0xFEE85CC:  419E0060  bc 12,30,0xFEE862C
+	  25: Js30o       	$0xFEE862C
+
+
+
+. 3073 FEE85B4 28
+. 83 B0 00 00 3B 00 00 00 83 5E 1A E4 2F 9D 00 00 93 1A 00 04 93 1A 00 00 41 9E 00 60
+==== BB 3074 (0xFEE85D0) approx BBs exec'd 0 ====
+
+	0xFEE85D0:  818E0000  lwz r12,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R12
+	   3: INCEIPL       	$4
+
+	0xFEE85D4:  3BBDFFFF  addi r29,r29,-1
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEE85D8:  81170000  lwz r8,0(r23)
+	   8: GETL       	R23, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0xFEE85DC:  7D5D60AE  lbzx r10,r29,r12
+	  12: GETL       	R12, t10
+	  13: GETL       	R29, t12
+	  14: ADDL       	t12, t10
+	  15: LDB       	(t10), t14
+	  16: PUTL       	t14, R10
+	  17: INCEIPL       	$4
+
+	0xFEE85E0:  554B1838  rlwinm r11,r10,3,0,28
+	  18: GETL       	R10, t16
+	  19: SHLL       	$0x3, t16
+	  20: PUTL       	t16, R11
+	  21: INCEIPL       	$4
+
+	0xFEE85E4:  7D2B4214  add r9,r11,r8
+	  22: GETL       	R11, t18
+	  23: GETL       	R8, t20
+	  24: ADDL       	t18, t20
+	  25: PUTL       	t20, R9
+	  26: INCEIPL       	$4
+
+	0xFEE85E8:  8B890004  lbz r28,4(r9)
+	  27: GETL       	R9, t22
+	  28: ADDL       	$0x4, t22
+	  29: LDB       	(t22), t24
+	  30: PUTL       	t24, R28
+	  31: INCEIPL       	$4
+
+	0xFEE85EC:  579B103A  rlwinm r27,r28,2,0,29
+	  32: GETL       	R28, t26
+	  33: SHLL       	$0x2, t26
+	  34: PUTL       	t26, R27
+	  35: INCEIPL       	$4
+
+	0xFEE85F0:  7CFBD02E  lwzx r7,r27,r26
+	  36: GETL       	R26, t28
+	  37: GETL       	R27, t30
+	  38: ADDL       	t30, t28
+	  39: LDL       	(t28), t32
+	  40: PUTL       	t32, R7
+	  41: INCEIPL       	$4
+
+	0xFEE85F4:  2C870000  cmpi cr1,r7,0
+	  42: GETL       	R7, t34
+	  43: CMP0L       	t34, t36  (-rSo)
+	  44: ICRFL       	t36, $0x1, CR
+	  45: INCEIPL       	$4
+
+	0xFEE85F8:  4186000C  bc 12,6,0xFEE8604
+	  46: Js06o       	$0xFEE8604
+
+
+
+. 3074 FEE85D0 44
+. 81 8E 00 00 3B BD FF FF 81 17 00 00 7D 5D 60 AE 55 4B 18 38 7D 2B 42 14 8B 89 00 04 57 9B 10 3A 7C FB D0 2E 2C 87 00 00 41 86 00 0C
+==== BB 3075 (0xFEE8604) approx BBs exec'd 0 ====
+
+	0xFEE8604:  88090005  lbz r0,5(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x5, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEE8608:  223C0001  subfic r17,r28,1
+	   5: GETL       	R28, t4
+	   6: MOVL       	$0x1, t6
+	   7: SBBL       	t4, t6  (-wCa)
+	   8: PUTL       	t6, R17
+	   9: INCEIPL       	$4
+
+	0xFEE860C:  826F0000  lwz r19,0(r15)
+	  10: GETL       	R15, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R19
+	  13: INCEIPL       	$4
+
+	0xFEE8610:  7C730214  add r3,r19,r0
+	  14: GETL       	R19, t12
+	  15: GETL       	R0, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R3
+	  18: INCEIPL       	$4
+
+	0xFEE8614:  4BFFE695  bl 0xFEE6CA8
+	  19: MOVL       	$0xFEE8618, t16
+	  20: PUTL       	t16, LR
+	  21: JMPo-c       	$0xFEE6CA8  ($4)
+
+
+
+. 3075 FEE8604 20
+. 88 09 00 05 22 3C 00 01 82 6F 00 00 7C 73 02 14 4B FF E6 95
+==== BB 3076 (0xFEE8618) approx BBs exec'd 0 ====
+
+	0xFEE8618:  5629103A  rlwinm r9,r17,2,0,29
+	   0: GETL       	R17, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFEE861C:  7C7BD12E  stwx r3,r27,r26
+	   4: GETL       	R26, t2
+	   5: GETL       	R27, t4
+	   6: ADDL       	t4, t2
+	   7: GETL       	R3, t6
+	   8: STL       	t6, (t2)
+	   9: INCEIPL       	$4
+
+	0xFEE8620:  7CC9D02E  lwzx r6,r9,r26
+	  10: GETL       	R26, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	t10, t8
+	  13: LDL       	(t8), t12
+	  14: PUTL       	t12, R6
+	  15: INCEIPL       	$4
+
+	0xFEE8624:  2F060000  cmpi cr6,r6,0
+	  16: GETL       	R6, t14
+	  17: CMP0L       	t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0xFEE8628:  419AFFD4  bc 12,26,0xFEE85FC
+	  20: Js26o       	$0xFEE85FC
+
+
+
+. 3076 FEE8618 20
+. 56 29 10 3A 7C 7B D1 2E 7C C9 D0 2E 2F 06 00 00 41 9A FF D4
+==== BB 3077 (0xFEE85FC) approx BBs exec'd 0 ====
+
+	0xFEE85FC:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEE8600:  4BFFFFCC  b 0xFEE85CC
+	   4: JMPo       	$0xFEE85CC  ($4)
+
+
+
+. 3077 FEE85FC 8
+. 2F 9D 00 00 4B FF FF CC
+==== BB 3078 (0xFEE85CC) approx BBs exec'd 0 ====
+
+	0xFEE85CC:  419E0060  bc 12,30,0xFEE862C
+	   0: Js30o       	$0xFEE862C
+
+
+
+. 3078 FEE85CC 4
+. 41 9E 00 60
+==== BB 3079 (0xFEE862C) approx BBs exec'd 0 ====
+
+	0xFEE862C:  807A0000  lwz r3,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE8630:  2C030000  cmpi cr0,r3,0
+	   4: GETL       	R3, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0xFEE8634:  4182016C  bc 12,2,0xFEE87A0
+	   8: Js02o       	$0xFEE87A0
+
+
+
+. 3079 FEE862C 12
+. 80 7A 00 00 2C 03 00 00 41 82 01 6C
+==== BB 3080 (0xFEE8638) approx BBs exec'd 0 ====
+
+	0xFEE8638:  83BA0004  lwz r29,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEE863C:  2F9D0000  cmpi cr7,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEE8640:  409E0008  bc 4,30,0xFEE8648
+	   9: Jc30o       	$0xFEE8648
+
+
+
+. 3080 FEE8638 12
+. 83 BA 00 04 2F 9D 00 00 40 9E 00 08
+==== BB 3081 (0xFEE8648) approx BBs exec'd 0 ====
+
+	0xFEE8648:  812F0000  lwz r9,0(r15)
+	   0: GETL       	R15, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFEE864C:  815E1C2C  lwz r10,7212(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1C2C, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0xFEE8650:  88090000  lbz r0,0(r9)
+	   9: GETL       	R9, t8
+	  10: LDB       	(t8), t10
+	  11: PUTL       	t10, R0
+	  12: INCEIPL       	$4
+
+	0xFEE8654:  7D2B4B78  or r11,r9,r9
+	  13: GETL       	R9, t12
+	  14: PUTL       	t12, R11
+	  15: INCEIPL       	$4
+
+	0xFEE8658:  48000008  b 0xFEE8660
+	  16: JMPo       	$0xFEE8660  ($4)
+
+
+
+. 3081 FEE8648 20
+. 81 2F 00 00 81 5E 1C 2C 88 09 00 00 7D 2B 4B 78 48 00 00 08
+==== BB 3082 (0xFEE8660) approx BBs exec'd 0 ====
+
+	0xFEE8660:  2C800000  cmpi cr1,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEE8664:  4086FFF8  bc 4,6,0xFEE865C
+	   4: Jc06o       	$0xFEE865C
+
+
+
+. 3082 FEE8660 8
+. 2C 80 00 00 40 86 FF F8
+==== BB 3083 (0xFEE865C) approx BBs exec'd 0 ====
+
+	0xFEE865C:  8C090001  lbzu r0,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFEE8660:  2C800000  cmpi cr1,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFEE8664:  4086FFF8  bc 4,6,0xFEE865C
+	  10: Jc06o       	$0xFEE865C
+
+
+
+. 3083 FEE865C 12
+. 8C 09 00 01 2C 80 00 00 40 86 FF F8
+==== BB 3084 (0xFEE8668) approx BBs exec'd 0 ====
+
+	0xFEE8668:  834A0000  lwz r26,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0xFEE866C:  7D6B4850  subf r11,r11,r9
+	   4: GETL       	R11, t4
+	   5: GETL       	R9, t6
+	   6: SUBL       	t4, t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0xFEE8670:  7F0BD040  cmpl cr6,r11,r26
+	   9: GETL       	R11, t8
+	  10: GETL       	R26, t10
+	  11: CMPUL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFEE8674:  40990008  bc 4,25,0xFEE867C
+	  14: Jc25o       	$0xFEE867C
+
+
+
+. 3084 FEE8668 16
+. 83 4A 00 00 7D 6B 48 50 7F 0B D0 40 40 99 00 08
+==== BB 3085 (0xFEE8678) approx BBs exec'd 0 ====
+
+	0xFEE8678:  916A0000  stw r11,0(r10)
+	   0: GETL       	R11, t0
+	   1: GETL       	R10, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFEE867C:  832F0000  lwz r25,0(r15)
+	   4: GETL       	R15, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R25
+	   7: INCEIPL       	$4
+
+	0xFEE8680:  39290001  addi r9,r9,1
+	   8: GETL       	R9, t8
+	   9: ADDL       	$0x1, t8
+	  10: PUTL       	t8, R9
+	  11: INCEIPL       	$4
+
+	0xFEE8684:  7E59AA14  add r18,r25,r21
+	  12: GETL       	R25, t10
+	  13: GETL       	R21, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R18
+	  16: INCEIPL       	$4
+
+	0xFEE8688:  7C099040  cmpl cr0,r9,r18
+	  17: GETL       	R9, t14
+	  18: GETL       	R18, t16
+	  19: CMPUL       	t14, t16, t18  (-rSo)
+	  20: ICRFL       	t18, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0xFEE868C:  4180FFC4  bc 12,0,0xFEE8650
+	  22: Js00o       	$0xFEE8650
+
+
+
+. 3085 FEE8678 24
+. 91 6A 00 00 83 2F 00 00 39 29 00 01 7E 59 AA 14 7C 09 90 40 41 80 FF C4
+==== BB 3086 (0xFEE8650) approx BBs exec'd 0 ====
+
+	0xFEE8650:  88090000  lbz r0,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEE8654:  7D2B4B78  or r11,r9,r9
+	   4: GETL       	R9, t4
+	   5: PUTL       	t4, R11
+	   6: INCEIPL       	$4
+
+	0xFEE8658:  48000008  b 0xFEE8660
+	   7: JMPo       	$0xFEE8660  ($4)
+
+
+
+. 3086 FEE8650 12
+. 88 09 00 00 7D 2B 4B 78 48 00 00 08
+==== BB 3087 (0xFEE867C) approx BBs exec'd 0 ====
+
+	0xFEE867C:  832F0000  lwz r25,0(r15)
+	   0: GETL       	R15, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R25
+	   3: INCEIPL       	$4
+
+	0xFEE8680:  39290001  addi r9,r9,1
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFEE8684:  7E59AA14  add r18,r25,r21
+	   8: GETL       	R25, t6
+	   9: GETL       	R21, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R18
+	  12: INCEIPL       	$4
+
+	0xFEE8688:  7C099040  cmpl cr0,r9,r18
+	  13: GETL       	R9, t10
+	  14: GETL       	R18, t12
+	  15: CMPUL       	t10, t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0xFEE868C:  4180FFC4  bc 12,0,0xFEE8650
+	  18: Js00o       	$0xFEE8650
+
+
+
+. 3087 FEE867C 20
+. 83 2F 00 00 39 29 00 01 7E 59 AA 14 7C 09 90 40 41 80 FF C4
+==== BB 3088 (0xFEE8690) approx BBs exec'd 0 ====
+
+	0xFEE8690:  80B00000  lwz r5,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFEE8694:  2F850000  cmpi cr7,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFEE8698:  409E0058  bc 4,30,0xFEE86F0
+	   8: Jc30o       	$0xFEE86F0
+
+
+
+. 3088 FEE8690 12
+. 80 B0 00 00 2F 85 00 00 40 9E 00 58
+==== BB 3089 (0xFEE86F0) approx BBs exec'd 0 ====
+
+	0xFEE86F0:  38C00000  li r6,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFEE86F4:  815E08F8  lwz r10,2296(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x8F8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFEE86F8:  2F060000  cmpi cr6,r6,0
+	   8: GETL       	R6, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEE86FC:  809E08FC  lwz r4,2300(r30)
+	  12: GETL       	R30, t10
+	  13: ADDL       	$0x8FC, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R4
+	  16: INCEIPL       	$4
+
+	0xFEE8700:  38A5FFFF  addi r5,r5,-1
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFFF, t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0xFEE8704:  38E00000  li r7,0
+	  21: MOVL       	$0x0, t16
+	  22: PUTL       	t16, R7
+	  23: INCEIPL       	$4
+
+	0xFEE8708:  90C40000  stw r6,0(r4)
+	  24: GETL       	R6, t18
+	  25: GETL       	R4, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0xFEE870C:  90CA0000  stw r6,0(r10)
+	  28: GETL       	R6, t22
+	  29: GETL       	R10, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0xFEE8710:  48000044  b 0xFEE8754
+	  32: JMPo       	$0xFEE8754  ($4)
+
+
+
+. 3089 FEE86F0 36
+. 38 C0 00 00 81 5E 08 F8 2F 06 00 00 80 9E 08 FC 38 A5 FF FF 38 E0 00 00 90 C4 00 00 90 CA 00 00 48 00 00 44
+==== BB 3090 (0xFEE8754) approx BBs exec'd 0 ====
+
+	0xFEE8754:  2C870000  cmpi cr1,r7,0
+	   0: GETL       	R7, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEE8758:  41A6FFBC  bc 13,6,0xFEE8714
+	   4: Js06o       	$0xFEE8714
+
+
+
+. 3090 FEE8754 8
+. 2C 87 00 00 41 A6 FF BC
+==== BB 3091 (0xFEE8714) approx BBs exec'd 0 ====
+
+	0xFEE8714:  838E0000  lwz r28,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFEE8718:  81170000  lwz r8,0(r23)
+	   4: GETL       	R23, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R8
+	   7: INCEIPL       	$4
+
+	0xFEE871C:  7F7C28AE  lbzx r27,r28,r5
+	   8: GETL       	R5, t8
+	   9: GETL       	R28, t10
+	  10: ADDL       	t10, t8
+	  11: LDB       	(t8), t12
+	  12: PUTL       	t12, R27
+	  13: INCEIPL       	$4
+
+	0xFEE8720:  576B1838  rlwinm r11,r27,3,0,28
+	  14: GETL       	R27, t14
+	  15: SHLL       	$0x3, t14
+	  16: PUTL       	t14, R11
+	  17: INCEIPL       	$4
+
+	0xFEE8724:  7E0B4214  add r16,r11,r8
+	  18: GETL       	R11, t16
+	  19: GETL       	R8, t18
+	  20: ADDL       	t16, t18
+	  21: PUTL       	t18, R16
+	  22: INCEIPL       	$4
+
+	0xFEE8728:  89F00004  lbz r15,4(r16)
+	  23: GETL       	R16, t20
+	  24: ADDL       	$0x4, t20
+	  25: LDB       	(t20), t22
+	  26: PUTL       	t22, R15
+	  27: INCEIPL       	$4
+
+	0xFEE872C:  2C0F0000  cmpi cr0,r15,0
+	  28: GETL       	R15, t24
+	  29: CMP0L       	t24, t26  (-rSo)
+	  30: ICRFL       	t26, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0xFEE8730:  4082002C  bc 4,2,0xFEE875C
+	  32: Jc02o       	$0xFEE875C
+
+
+
+. 3091 FEE8714 32
+. 83 8E 00 00 81 17 00 00 7F 7C 28 AE 57 6B 18 38 7E 0B 42 14 89 F0 00 04 2C 0F 00 00 40 82 00 2C
+==== BB 3092 (0xFEE8734) approx BBs exec'd 0 ====
+
+	0xFEE8734:  7C6B402E  lwzx r3,r11,r8
+	   0: GETL       	R8, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R3
+	   5: INCEIPL       	$4
+
+	0xFEE8738:  38E00001  li r7,1
+	   6: MOVL       	$0x1, t6
+	   7: PUTL       	t6, R7
+	   8: INCEIPL       	$4
+
+	0xFEE873C:  906A0000  stw r3,0(r10)
+	   9: GETL       	R3, t8
+	  10: GETL       	R10, t10
+	  11: STL       	t8, (t10)
+	  12: INCEIPL       	$4
+
+	0xFEE8740:  7CE03039  and. r0,r7,r6
+	  13: GETL       	R7, t12
+	  14: GETL       	R6, t14
+	  15: ANDL       	t12, t14
+	  16: PUTL       	t14, R0
+	  17: CMP0L       	t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0xFEE8744:  2F850000  cmpi cr7,r5,0
+	  20: GETL       	R5, t18
+	  21: CMP0L       	t18, t20  (-rSo)
+	  22: ICRFL       	t20, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0xFEE8748:  38A5FFFF  addi r5,r5,-1
+	  24: GETL       	R5, t22
+	  25: ADDL       	$0xFFFFFFFF, t22
+	  26: PUTL       	t22, R5
+	  27: INCEIPL       	$4
+
+	0xFEE874C:  40820048  bc 4,2,0xFEE8794
+	  28: Jc02o       	$0xFEE8794
+
+
+
+. 3092 FEE8734 28
+. 7C 6B 40 2E 38 E0 00 01 90 6A 00 00 7C E0 30 39 2F 85 00 00 38 A5 FF FF 40 82 00 48
+==== BB 3093 (0xFEE8750) approx BBs exec'd 0 ====
+
+	0xFEE8750:  419E0044  bc 12,30,0xFEE8794
+	   0: Js30o       	$0xFEE8794
+
+
+
+. 3093 FEE8750 4
+. 41 9E 00 44
+==== BB 3094 (0xFEE875C) approx BBs exec'd 0 ====
+
+	0xFEE875C:  40BAFFE4  bc 5,26,0xFEE8740
+	   0: Jc26o       	$0xFEE8740
+
+
+
+. 3094 FEE875C 4
+. 40 BA FF E4
+==== BB 3095 (0xFEE8760) approx BBs exec'd 0 ====
+
+	0xFEE8760:  816E0000  lwz r11,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFEE8764:  81170000  lwz r8,0(r23)
+	   4: GETL       	R23, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R8
+	   7: INCEIPL       	$4
+
+	0xFEE8768:  7D8B28AE  lbzx r12,r11,r5
+	   8: GETL       	R5, t8
+	   9: GETL       	R11, t10
+	  10: ADDL       	t10, t8
+	  11: LDB       	(t8), t12
+	  12: PUTL       	t12, R12
+	  13: INCEIPL       	$4
+
+	0xFEE876C:  558B1838  rlwinm r11,r12,3,0,28
+	  14: GETL       	R12, t14
+	  15: SHLL       	$0x3, t14
+	  16: PUTL       	t14, R11
+	  17: INCEIPL       	$4
+
+	0xFEE8770:  7F0B4214  add r24,r11,r8
+	  18: GETL       	R11, t16
+	  19: GETL       	R8, t18
+	  20: ADDL       	t16, t18
+	  21: PUTL       	t18, R24
+	  22: INCEIPL       	$4
+
+	0xFEE8774:  8AD80004  lbz r22,4(r24)
+	  23: GETL       	R24, t20
+	  24: ADDL       	$0x4, t20
+	  25: LDB       	(t20), t22
+	  26: PUTL       	t22, R22
+	  27: INCEIPL       	$4
+
+	0xFEE8778:  2F960000  cmpi cr7,r22,0
+	  28: GETL       	R22, t24
+	  29: CMP0L       	t24, t26  (-rSo)
+	  30: ICRFL       	t26, $0x7, CR
+	  31: INCEIPL       	$4
+
+	0xFEE877C:  41BEFFC4  bc 13,30,0xFEE8740
+	  32: Js30o       	$0xFEE8740
+
+
+
+. 3095 FEE8760 32
+. 81 6E 00 00 81 17 00 00 7D 8B 28 AE 55 8B 18 38 7F 0B 42 14 8A D8 00 04 2F 96 00 00 41 BE FF C4
+==== BB 3096 (0xFEE8780) approx BBs exec'd 0 ====
+
+	0xFEE8780:  38C00001  li r6,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFEE8784:  7D2B402E  lwzx r9,r11,r8
+	   3: GETL       	R8, t2
+	   4: GETL       	R11, t4
+	   5: ADDL       	t4, t2
+	   6: LDL       	(t2), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFEE8788:  2F060000  cmpi cr6,r6,0
+	   9: GETL       	R6, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFEE878C:  91240000  stw r9,0(r4)
+	  13: GETL       	R9, t12
+	  14: GETL       	R4, t14
+	  15: STL       	t12, (t14)
+	  16: INCEIPL       	$4
+
+	0xFEE8790:  4BFFFFB0  b 0xFEE8740
+	  17: JMPo       	$0xFEE8740  ($4)
+
+
+
+. 3096 FEE8780 20
+. 38 C0 00 01 7D 2B 40 2E 2F 06 00 00 91 24 00 00 4B FF FF B0
+==== BB 3097 (0xFEE8740) approx BBs exec'd 0 ====
+
+	0xFEE8740:  7CE03039  and. r0,r7,r6
+	   0: GETL       	R7, t0
+	   1: GETL       	R6, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFEE8744:  2F850000  cmpi cr7,r5,0
+	   7: GETL       	R5, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFEE8748:  38A5FFFF  addi r5,r5,-1
+	  11: GETL       	R5, t10
+	  12: ADDL       	$0xFFFFFFFF, t10
+	  13: PUTL       	t10, R5
+	  14: INCEIPL       	$4
+
+	0xFEE874C:  40820048  bc 4,2,0xFEE8794
+	  15: Jc02o       	$0xFEE8794
+
+
+
+. 3097 FEE8740 16
+. 7C E0 30 39 2F 85 00 00 38 A5 FF FF 40 82 00 48
+==== BB 3098 (0xFEE8794) approx BBs exec'd 0 ====
+
+	0xFEE8794:  40BAFF20  bc 5,26,0xFEE86B4
+	   0: Jc26o       	$0xFEE86B4
+
+
+
+. 3098 FEE8794 4
+. 40 BA FF 20
+==== BB 3099 (0xFEE86B4) approx BBs exec'd 0 ====
+
+	0xFEE86B4:  80EA0000  lwz r7,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0xFEE86B8:  39400001  li r10,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0xFEE86BC:  80C40000  lwz r6,0(r4)
+	   7: GETL       	R4, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R6
+	  10: INCEIPL       	$4
+
+	0xFEE86C0:  80BE1A78  lwz r5,6776(r30)
+	  11: GETL       	R30, t10
+	  12: ADDL       	$0x1A78, t10
+	  13: LDL       	(t10), t12
+	  14: PUTL       	t12, R5
+	  15: INCEIPL       	$4
+
+	0xFEE86C4:  7EE700D0  neg r23,r7
+	  16: GETL       	R7, t14
+	  17: NEGL       	t14
+	  18: PUTL       	t14, R23
+	  19: INCEIPL       	$4
+
+	0xFEE86C8:  7CE83278  xor r8,r7,r6
+	  20: GETL       	R7, t16
+	  21: GETL       	R6, t18
+	  22: XORL       	t16, t18
+	  23: PUTL       	t18, R8
+	  24: INCEIPL       	$4
+
+	0xFEE86CC:  81DE1D40  lwz r14,7488(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x1D40, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R14
+	  29: INCEIPL       	$4
+
+	0xFEE86D0:  3128FFFF  addic r9,r8,-1
+	  30: GETL       	R8, t24
+	  31: ADCL       	$0xFFFFFFFF, t24  (-wCa)
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0xFEE86D4:  7C894110  subfe r4,r9,r8
+	  34: GETL       	R9, t26
+	  35: GETL       	R8, t28
+	  36: SBBL       	t26, t28  (-rCa-wCa)
+	  37: PUTL       	t28, R4
+	  38: INCEIPL       	$4
+
+	0xFEE86D8:  91540000  stw r10,0(r20)
+	  39: GETL       	R10, t30
+	  40: GETL       	R20, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0xFEE86DC:  90850000  stw r4,0(r5)
+	  43: GETL       	R4, t34
+	  44: GETL       	R5, t36
+	  45: STL       	t34, (t36)
+	  46: INCEIPL       	$4
+
+	0xFEE86E0:  92EE0000  stw r23,0(r14)
+	  47: GETL       	R23, t38
+	  48: GETL       	R14, t40
+	  49: STL       	t38, (t40)
+	  50: INCEIPL       	$4
+
+	0xFEE86E4:  4BFFF7D0  b 0xFEE7EB4
+	  51: JMPo       	$0xFEE7EB4  ($4)
+
+
+
+. 3099 FEE86B4 52
+. 80 EA 00 00 39 40 00 01 80 C4 00 00 80 BE 1A 78 7E E7 00 D0 7C E8 32 78 81 DE 1D 40 31 28 FF FF 7C 89 41 10 91 54 00 00 90 85 00 00 92 EE 00 00 4B FF F7 D0
+==== BB 3100 (0xFEE7EB4) approx BBs exec'd 0 ====
+
+	0xFEE7EB4:  80610000  lwz r3,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE7EB8:  82230004  lwz r17,4(r3)
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R17
+	   8: INCEIPL       	$4
+
+	0xFEE7EBC:  81C3FFB8  lwz r14,-72(r3)
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0xFFFFFFB8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R14
+	  13: INCEIPL       	$4
+
+	0xFEE7EC0:  7E2803A6  mtlr r17
+	  14: GETL       	R17, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFEE7EC4:  81E3FFBC  lwz r15,-68(r3)
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0xFFFFFFBC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R15
+	  21: INCEIPL       	$4
+
+	0xFEE7EC8:  8203FFC0  lwz r16,-64(r3)
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0xFFFFFFC0, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R16
+	  26: INCEIPL       	$4
+
+	0xFEE7ECC:  8223FFC4  lwz r17,-60(r3)
+	  27: GETL       	R3, t22
+	  28: ADDL       	$0xFFFFFFC4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R17
+	  31: INCEIPL       	$4
+
+	0xFEE7ED0:  8243FFC8  lwz r18,-56(r3)
+	  32: GETL       	R3, t26
+	  33: ADDL       	$0xFFFFFFC8, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R18
+	  36: INCEIPL       	$4
+
+	0xFEE7ED4:  8263FFCC  lwz r19,-52(r3)
+	  37: GETL       	R3, t30
+	  38: ADDL       	$0xFFFFFFCC, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R19
+	  41: INCEIPL       	$4
+
+	0xFEE7ED8:  8283FFD0  lwz r20,-48(r3)
+	  42: GETL       	R3, t34
+	  43: ADDL       	$0xFFFFFFD0, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R20
+	  46: INCEIPL       	$4
+
+	0xFEE7EDC:  82A3FFD4  lwz r21,-44(r3)
+	  47: GETL       	R3, t38
+	  48: ADDL       	$0xFFFFFFD4, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R21
+	  51: INCEIPL       	$4
+
+	0xFEE7EE0:  82C3FFD8  lwz r22,-40(r3)
+	  52: GETL       	R3, t42
+	  53: ADDL       	$0xFFFFFFD8, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R22
+	  56: INCEIPL       	$4
+
+	0xFEE7EE4:  82E3FFDC  lwz r23,-36(r3)
+	  57: GETL       	R3, t46
+	  58: ADDL       	$0xFFFFFFDC, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R23
+	  61: INCEIPL       	$4
+
+	0xFEE7EE8:  8303FFE0  lwz r24,-32(r3)
+	  62: GETL       	R3, t50
+	  63: ADDL       	$0xFFFFFFE0, t50
+	  64: LDL       	(t50), t52
+	  65: PUTL       	t52, R24
+	  66: INCEIPL       	$4
+
+	0xFEE7EEC:  8323FFE4  lwz r25,-28(r3)
+	  67: GETL       	R3, t54
+	  68: ADDL       	$0xFFFFFFE4, t54
+	  69: LDL       	(t54), t56
+	  70: PUTL       	t56, R25
+	  71: INCEIPL       	$4
+
+	0xFEE7EF0:  8343FFE8  lwz r26,-24(r3)
+	  72: GETL       	R3, t58
+	  73: ADDL       	$0xFFFFFFE8, t58
+	  74: LDL       	(t58), t60
+	  75: PUTL       	t60, R26
+	  76: INCEIPL       	$4
+
+	0xFEE7EF4:  8363FFEC  lwz r27,-20(r3)
+	  77: GETL       	R3, t62
+	  78: ADDL       	$0xFFFFFFEC, t62
+	  79: LDL       	(t62), t64
+	  80: PUTL       	t64, R27
+	  81: INCEIPL       	$4
+
+	0xFEE7EF8:  8383FFF0  lwz r28,-16(r3)
+	  82: GETL       	R3, t66
+	  83: ADDL       	$0xFFFFFFF0, t66
+	  84: LDL       	(t66), t68
+	  85: PUTL       	t68, R28
+	  86: INCEIPL       	$4
+
+	0xFEE7EFC:  83A3FFF4  lwz r29,-12(r3)
+	  87: GETL       	R3, t70
+	  88: ADDL       	$0xFFFFFFF4, t70
+	  89: LDL       	(t70), t72
+	  90: PUTL       	t72, R29
+	  91: INCEIPL       	$4
+
+	0xFEE7F00:  83C3FFF8  lwz r30,-8(r3)
+	  92: GETL       	R3, t74
+	  93: ADDL       	$0xFFFFFFF8, t74
+	  94: LDL       	(t74), t76
+	  95: PUTL       	t76, R30
+	  96: INCEIPL       	$4
+
+	0xFEE7F04:  83E3FFFC  lwz r31,-4(r3)
+	  97: GETL       	R3, t78
+	  98: ADDL       	$0xFFFFFFFC, t78
+	  99: LDL       	(t78), t80
+	 100: PUTL       	t80, R31
+	 101: INCEIPL       	$4
+
+	0xFEE7F08:  7C611B78  or r1,r3,r3
+	 102: GETL       	R3, t82
+	 103: PUTL       	t82, R1
+	 104: INCEIPL       	$4
+
+	0xFEE7F0C:  4E800020  blr
+	 105: GETL       	LR, t84
+	 106: JMPo-r       	t84  ($4)
+
+
+
+. 3100 FEE7EB4 92
+. 80 61 00 00 82 23 00 04 81 C3 FF B8 7E 28 03 A6 81 E3 FF BC 82 03 FF C0 82 23 FF C4 82 43 FF C8 82 63 FF CC 82 83 FF D0 82 A3 FF D4 82 C3 FF D8 82 E3 FF DC 83 03 FF E0 83 23 FF E4 83 43 FF E8 83 63 FF EC 83 83 FF F0 83 A3 FF F4 83 C3 FF F8 83 E3 FF FC 7C 61 1B 78 4E 80 00 20
+==== BB 3101 (0xFEE6F60) approx BBs exec'd 0 ====
+
+	0xFEE6F60:  83B90000  lwz r29,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0xFEE6F64:  2F9D0000  cmpi cr7,r29,0
+	   4: GETL       	R29, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFEE6F68:  40BEFEDC  bc 5,30,0xFEE6E44
+	   8: Jc30o       	$0xFEE6E44
+
+
+
+. 3101 FEE6F60 12
+. 83 B9 00 00 2F 9D 00 00 40 BE FE DC
+==== BB 3102 (0xFEE6E44) approx BBs exec'd 0 ====
+
+	0xFEE6E44:  80C10000  lwz r6,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFEE6E48:  80660004  lwz r3,4(r6)
+	   4: GETL       	R6, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFEE6E4C:  80E6FFD4  lwz r7,-44(r6)
+	   9: GETL       	R6, t8
+	  10: ADDL       	$0xFFFFFFD4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R7
+	  13: INCEIPL       	$4
+
+	0xFEE6E50:  82C6FFD8  lwz r22,-40(r6)
+	  14: GETL       	R6, t12
+	  15: ADDL       	$0xFFFFFFD8, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R22
+	  18: INCEIPL       	$4
+
+	0xFEE6E54:  7C6803A6  mtlr r3
+	  19: GETL       	R3, t16
+	  20: PUTL       	t16, LR
+	  21: INCEIPL       	$4
+
+	0xFEE6E58:  82E6FFDC  lwz r23,-36(r6)
+	  22: GETL       	R6, t18
+	  23: ADDL       	$0xFFFFFFDC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R23
+	  26: INCEIPL       	$4
+
+	0xFEE6E5C:  7CE08120  mtcrf 0x8,r7
+	  27: GETL       	R7, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0xFEE6E60:  8306FFE0  lwz r24,-32(r6)
+	  30: GETL       	R6, t24
+	  31: ADDL       	$0xFFFFFFE0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R24
+	  34: INCEIPL       	$4
+
+	0xFEE6E64:  8326FFE4  lwz r25,-28(r6)
+	  35: GETL       	R6, t28
+	  36: ADDL       	$0xFFFFFFE4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R25
+	  39: INCEIPL       	$4
+
+	0xFEE6E68:  8346FFE8  lwz r26,-24(r6)
+	  40: GETL       	R6, t32
+	  41: ADDL       	$0xFFFFFFE8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R26
+	  44: INCEIPL       	$4
+
+	0xFEE6E6C:  8366FFEC  lwz r27,-20(r6)
+	  45: GETL       	R6, t36
+	  46: ADDL       	$0xFFFFFFEC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R27
+	  49: INCEIPL       	$4
+
+	0xFEE6E70:  8386FFF0  lwz r28,-16(r6)
+	  50: GETL       	R6, t40
+	  51: ADDL       	$0xFFFFFFF0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R28
+	  54: INCEIPL       	$4
+
+	0xFEE6E74:  83A6FFF4  lwz r29,-12(r6)
+	  55: GETL       	R6, t44
+	  56: ADDL       	$0xFFFFFFF4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R29
+	  59: INCEIPL       	$4
+
+	0xFEE6E78:  83C6FFF8  lwz r30,-8(r6)
+	  60: GETL       	R6, t48
+	  61: ADDL       	$0xFFFFFFF8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R30
+	  64: INCEIPL       	$4
+
+	0xFEE6E7C:  83E6FFFC  lwz r31,-4(r6)
+	  65: GETL       	R6, t52
+	  66: ADDL       	$0xFFFFFFFC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R31
+	  69: INCEIPL       	$4
+
+	0xFEE6E80:  7CC13378  or r1,r6,r6
+	  70: GETL       	R6, t56
+	  71: PUTL       	t56, R1
+	  72: INCEIPL       	$4
+
+	0xFEE6E84:  4E800020  blr
+	  73: GETL       	LR, t58
+	  74: JMPo-r       	t58  ($4)
+
+
+
+. 3102 FEE6E44 68
+. 80 C1 00 00 80 66 00 04 80 E6 FF D4 82 C6 FF D8 7C 68 03 A6 82 E6 FF DC 7C E0 81 20 83 06 FF E0 83 26 FF E4 83 46 FF E8 83 66 FF EC 83 86 FF F0 83 A6 FF F4 83 C6 FF F8 83 E6 FF FC 7C C1 33 78 4E 80 00 20
+==== BB 3103 (0xFEE7C04) approx BBs exec'd 0 ====
+
+	0xFEE7C04:  80BA0000  lwz r5,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFEE7C08:  2C850000  cmpi cr1,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFEE7C0C:  40860114  bc 4,6,0xFEE7D20
+	   8: Jc06o       	$0xFEE7D20
+
+
+
+. 3103 FEE7C04 12
+. 80 BA 00 00 2C 85 00 00 40 86 01 14
+==== BB 3104 (0xFEE7D20) approx BBs exec'd 0 ====
+
+	0xFEE7D20:  807B0000  lwz r3,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE7D24:  7F84E378  or r4,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFEE7D28:  38A10008  addi r5,r1,8
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0xFEE7D2C:  38C1000C  addi r6,r1,12
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0xC, t8
+	  13: PUTL       	t8, R6
+	  14: INCEIPL       	$4
+
+	0xFEE7D30:  7FE7FB78  or r7,r31,r31
+	  15: GETL       	R31, t10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0xFEE7D34:  48000D09  bl 0xFEE8A3C
+	  18: MOVL       	$0xFEE7D38, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0xFEE8A3C  ($4)
+
+
+
+. 3104 FEE7D20 24
+. 80 7B 00 00 7F 84 E3 78 38 A1 00 08 38 C1 00 0C 7F E7 FB 78 48 00 0D 09
+==== BB 3105 __tzfile_compute(0xFEE8A3C) approx BBs exec'd 0 ====
+
+	0xFEE8A3C:  2F840000  cmpi cr7,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEE8A40:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFEE8A44:  9421FFC0  stwu r1,-64(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFC0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE8A48:  480BF409  bl 0xFFA7E50
+	  13: MOVL       	$0xFEE8A4C, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3105 FEE8A3C 16
+. 2F 84 00 00 7C 08 02 A6 94 21 FF C0 48 0B F4 09
+==== BB 3106 (0xFEE8A4C) approx BBs exec'd 0 ====
+
+	0xFEE8A4C:  9261000C  stw r19,12(r1)
+	   0: GETL       	R19, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE8A50:  7CD33378  or r19,r6,r6
+	   5: GETL       	R6, t4
+	   6: PUTL       	t4, R19
+	   7: INCEIPL       	$4
+
+	0xFEE8A54:  92810010  stw r20,16(r1)
+	   8: GETL       	R20, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE8A58:  7CB42B78  or r20,r5,r5
+	  13: GETL       	R5, t10
+	  14: PUTL       	t10, R20
+	  15: INCEIPL       	$4
+
+	0xFEE8A5C:  92C10018  stw r22,24(r1)
+	  16: GETL       	R22, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x18, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEE8A60:  7CF63B78  or r22,r7,r7
+	  21: GETL       	R7, t16
+	  22: PUTL       	t16, R22
+	  23: INCEIPL       	$4
+
+	0xFEE8A64:  93410028  stw r26,40(r1)
+	  24: GETL       	R26, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x28, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFEE8A68:  7C7A1B78  or r26,r3,r3
+	  29: GETL       	R3, t22
+	  30: PUTL       	t22, R26
+	  31: INCEIPL       	$4
+
+	0xFEE8A6C:  93C10038  stw r30,56(r1)
+	  32: GETL       	R30, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x38, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0xFEE8A70:  7FC802A6  mflr r30
+	  37: GETL       	LR, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xFEE8A74:  92A10014  stw r21,20(r1)
+	  40: GETL       	R21, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x14, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFEE8A78:  92E1001C  stw r23,28(r1)
+	  45: GETL       	R23, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x1C, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFEE8A7C:  93010020  stw r24,32(r1)
+	  50: GETL       	R24, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x20, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0xFEE8A80:  93210024  stw r25,36(r1)
+	  55: GETL       	R25, t42
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x24, t44
+	  58: STL       	t42, (t44)
+	  59: INCEIPL       	$4
+
+	0xFEE8A84:  9361002C  stw r27,44(r1)
+	  60: GETL       	R27, t46
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x2C, t48
+	  63: STL       	t46, (t48)
+	  64: INCEIPL       	$4
+
+	0xFEE8A88:  93810030  stw r28,48(r1)
+	  65: GETL       	R28, t50
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x30, t52
+	  68: STL       	t50, (t52)
+	  69: INCEIPL       	$4
+
+	0xFEE8A8C:  93A10034  stw r29,52(r1)
+	  70: GETL       	R29, t54
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x34, t56
+	  73: STL       	t54, (t56)
+	  74: INCEIPL       	$4
+
+	0xFEE8A90:  93E1003C  stw r31,60(r1)
+	  75: GETL       	R31, t58
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x3C, t60
+	  78: STL       	t58, (t60)
+	  79: INCEIPL       	$4
+
+	0xFEE8A94:  90010044  stw r0,68(r1)
+	  80: GETL       	R0, t62
+	  81: GETL       	R1, t64
+	  82: ADDL       	$0x44, t64
+	  83: STL       	t62, (t64)
+	  84: INCEIPL       	$4
+
+	0xFEE8A98:  419E0170  bc 12,30,0xFEE8C08
+	  85: Js30o       	$0xFEE8C08
+
+
+
+. 3106 FEE8A4C 80
+. 92 61 00 0C 7C D3 33 78 92 81 00 10 7C B4 2B 78 92 C1 00 18 7C F6 3B 78 93 41 00 28 7C 7A 1B 78 93 C1 00 38 7F C8 02 A6 92 A1 00 14 92 E1 00 1C 93 01 00 20 93 21 00 24 93 61 00 2C 93 81 00 30 93 A1 00 34 93 E1 00 3C 90 01 00 44 41 9E 01 70
+==== BB 3107 (0xFEE8A9C) approx BBs exec'd 0 ====
+
+	0xFEE8A9C:  80DE08E4  lwz r6,2276(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8E4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEE8AA0:  81660000  lwz r11,0(r6)
+	   5: GETL       	R6, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0xFEE8AA4:  2C0B0000  cmpi cr0,r11,0
+	   9: GETL       	R11, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFEE8AA8:  408201F8  bc 4,2,0xFEE8CA0
+	  13: Jc02o       	$0xFEE8CA0
+
+
+
+. 3107 FEE8A9C 16
+. 80 DE 08 E4 81 66 00 00 2C 0B 00 00 40 82 01 F8
+==== BB 3108 (0xFEE8CA0) approx BBs exec'd 0 ====
+
+	0xFEE8CA0:  813E08CC  lwz r9,2252(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8CC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE8CA4:  81290000  lwz r9,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0xFEE8CA8:  80690000  lwz r3,0(r9)
+	   9: GETL       	R9, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R3
+	  12: INCEIPL       	$4
+
+	0xFEE8CAC:  7C83D000  cmp cr1,r3,r26
+	  13: GETL       	R3, t12
+	  14: GETL       	R26, t14
+	  15: CMPL       	t12, t14, t16  (-rSo)
+	  16: ICRFL       	t16, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0xFEE8CB0:  41A5FDFC  bc 13,5,0xFEE8AAC
+	  18: Js05o       	$0xFEE8AAC
+
+
+
+. 3108 FEE8CA0 20
+. 81 3E 08 CC 81 29 00 00 80 69 00 00 7C 83 D0 00 41 A5 FD FC
+==== BB 3109 (0xFEE8CB4) approx BBs exec'd 0 ====
+
+	0xFEE8CB4:  39400001  li r10,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFEE8CB8:  7F0A5840  cmpl cr6,r10,r11
+	   3: GETL       	R10, t2
+	   4: GETL       	R11, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0xFEE8CBC:  40980024  bc 4,24,0xFEE8CE0
+	   8: Jc24o       	$0xFEE8CE0
+
+
+
+. 3109 FEE8CB4 12
+. 39 40 00 01 7F 0A 58 40 40 98 00 24
+==== BB 3110 (0xFEE8CC0) approx BBs exec'd 0 ====
+
+	0xFEE8CC0:  7D284B78  or r8,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0xFEE8CC4:  554C103A  rlwinm r12,r10,2,0,29
+	   3: GETL       	R10, t2
+	   4: SHLL       	$0x2, t2
+	   5: PUTL       	t2, R12
+	   6: INCEIPL       	$4
+
+	0xFEE8CC8:  7CEC402E  lwzx r7,r12,r8
+	   7: GETL       	R8, t4
+	   8: GETL       	R12, t6
+	   9: ADDL       	t6, t4
+	  10: LDL       	(t4), t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0xFEE8CCC:  7F87D000  cmp cr7,r7,r26
+	  13: GETL       	R7, t10
+	  14: GETL       	R26, t12
+	  15: CMPL       	t10, t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0xFEE8CD0:  419D0010  bc 12,29,0xFEE8CE0
+	  18: Js29o       	$0xFEE8CE0
+
+
+
+. 3110 FEE8CC0 20
+. 7D 28 4B 78 55 4C 10 3A 7C EC 40 2E 7F 87 D0 00 41 9D 00 10
+==== BB 3111 (0xFEE8CD4) approx BBs exec'd 0 ====
+
+	0xFEE8CD4:  394A0001  addi r10,r10,1
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0xFEE8CD8:  7C0A5840  cmpl cr0,r10,r11
+	   4: GETL       	R10, t2
+	   5: GETL       	R11, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFEE8CDC:  4180FFE8  bc 12,0,0xFEE8CC4
+	   9: Js00o       	$0xFEE8CC4
+
+
+
+. 3111 FEE8CD4 12
+. 39 4A 00 01 7C 0A 58 40 41 80 FF E8
+==== BB 3112 (0xFEE8CC4) approx BBs exec'd 0 ====
+
+	0xFEE8CC4:  554C103A  rlwinm r12,r10,2,0,29
+	   0: GETL       	R10, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xFEE8CC8:  7CEC402E  lwzx r7,r12,r8
+	   4: GETL       	R8, t2
+	   5: GETL       	R12, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0xFEE8CCC:  7F87D000  cmp cr7,r7,r26
+	  10: GETL       	R7, t8
+	  11: GETL       	R26, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEE8CD0:  419D0010  bc 12,29,0xFEE8CE0
+	  15: Js29o       	$0xFEE8CE0
+
+
+
+. 3112 FEE8CC4 16
+. 55 4C 10 3A 7C EC 40 2E 7F 87 D0 00 41 9D 00 10
+==== BB 3113 (0xFEE8CE0) approx BBs exec'd 0 ====
+
+	0xFEE8CE0:  817E08E8  lwz r11,2280(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFEE8CE4:  831E08F0  lwz r24,2288(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x8F0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0xFEE8CE8:  82AB0000  lwz r21,0(r11)
+	  10: GETL       	R11, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R21
+	  13: INCEIPL       	$4
+
+	0xFEE8CEC:  7D155214  add r8,r21,r10
+	  14: GETL       	R21, t12
+	  15: GETL       	R10, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R8
+	  18: INCEIPL       	$4
+
+	0xFEE8CF0:  88E8FFFF  lbz r7,-1(r8)
+	  19: GETL       	R8, t16
+	  20: ADDL       	$0xFFFFFFFF, t16
+	  21: LDB       	(t16), t18
+	  22: PUTL       	t18, R7
+	  23: INCEIPL       	$4
+
+	0xFEE8CF4:  4BFFFE14  b 0xFEE8B08
+	  24: JMPo       	$0xFEE8B08  ($4)
+
+
+
+. 3113 FEE8CE0 24
+. 81 7E 08 E8 83 1E 08 F0 82 AB 00 00 7D 15 52 14 88 E8 FF FF 4B FF FE 14
+==== BB 3114 (0xFEE8B08) approx BBs exec'd 0 ====
+
+	0xFEE8B08:  83860000  lwz r28,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFEE8B0C:  3AE00000  li r23,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R23
+	   6: INCEIPL       	$4
+
+	0xFEE8B10:  813E08FC  lwz r9,2300(r30)
+	   7: GETL       	R30, t6
+	   8: ADDL       	$0x8FC, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R9
+	  11: INCEIPL       	$4
+
+	0xFEE8B14:  54FD1838  rlwinm r29,r7,3,0,28
+	  12: GETL       	R7, t10
+	  13: SHLL       	$0x3, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0xFEE8B18:  815E08F8  lwz r10,2296(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x8F8, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R10
+	  20: INCEIPL       	$4
+
+	0xFEE8B1C:  2C9C0000  cmpi cr1,r28,0
+	  21: GETL       	R28, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0xFEE8B20:  83690000  lwz r27,0(r9)
+	  25: GETL       	R9, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R27
+	  28: INCEIPL       	$4
+
+	0xFEE8B24:  80AA0000  lwz r5,0(r10)
+	  29: GETL       	R10, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R5
+	  32: INCEIPL       	$4
+
+	0xFEE8B28:  833E1D40  lwz r25,7488(r30)
+	  33: GETL       	R30, t28
+	  34: ADDL       	$0x1D40, t28
+	  35: LDL       	(t28), t30
+	  36: PUTL       	t30, R25
+	  37: INCEIPL       	$4
+
+	0xFEE8B2C:  7CA0DA78  xor r0,r5,r27
+	  38: GETL       	R5, t32
+	  39: GETL       	R27, t34
+	  40: XORL       	t32, t34
+	  41: PUTL       	t34, R0
+	  42: INCEIPL       	$4
+
+	0xFEE8B30:  80D80000  lwz r6,0(r24)
+	  43: GETL       	R24, t36
+	  44: LDL       	(t36), t38
+	  45: PUTL       	t38, R6
+	  46: INCEIPL       	$4
+
+	0xFEE8B34:  837E1AE4  lwz r27,6884(r30)
+	  47: GETL       	R30, t40
+	  48: ADDL       	$0x1AE4, t40
+	  49: LDL       	(t40), t42
+	  50: PUTL       	t42, R27
+	  51: INCEIPL       	$4
+
+	0xFEE8B38:  3120FFFF  addic r9,r0,-1
+	  52: GETL       	R0, t44
+	  53: ADCL       	$0xFFFFFFFF, t44  (-wCa)
+	  54: PUTL       	t44, R9
+	  55: INCEIPL       	$4
+
+	0xFEE8B3C:  7C890110  subfe r4,r9,r0
+	  56: GETL       	R9, t46
+	  57: GETL       	R0, t48
+	  58: SBBL       	t46, t48  (-rCa-wCa)
+	  59: PUTL       	t48, R4
+	  60: INCEIPL       	$4
+
+	0xFEE8B40:  807E1A78  lwz r3,6776(r30)
+	  61: GETL       	R30, t50
+	  62: ADDL       	$0x1A78, t50
+	  63: LDL       	(t50), t52
+	  64: PUTL       	t52, R3
+	  65: INCEIPL       	$4
+
+	0xFEE8B44:  7FE500D0  neg r31,r5
+	  66: GETL       	R5, t54
+	  67: NEGL       	t54
+	  68: PUTL       	t54, R31
+	  69: INCEIPL       	$4
+
+	0xFEE8B48:  93F90000  stw r31,0(r25)
+	  70: GETL       	R31, t56
+	  71: GETL       	R25, t58
+	  72: STL       	t56, (t58)
+	  73: INCEIPL       	$4
+
+	0xFEE8B4C:  7F3D3214  add r25,r29,r6
+	  74: GETL       	R29, t60
+	  75: GETL       	R6, t62
+	  76: ADDL       	t60, t62
+	  77: PUTL       	t62, R25
+	  78: INCEIPL       	$4
+
+	0xFEE8B50:  90830000  stw r4,0(r3)
+	  79: GETL       	R4, t64
+	  80: GETL       	R3, t66
+	  81: STL       	t64, (t66)
+	  82: INCEIPL       	$4
+
+	0xFEE8B54:  92FB0004  stw r23,4(r27)
+	  83: GETL       	R23, t68
+	  84: GETL       	R27, t70
+	  85: ADDL       	$0x4, t70
+	  86: STL       	t68, (t70)
+	  87: INCEIPL       	$4
+
+	0xFEE8B58:  92FB0000  stw r23,0(r27)
+	  88: GETL       	R23, t72
+	  89: GETL       	R27, t74
+	  90: STL       	t72, (t74)
+	  91: INCEIPL       	$4
+
+	0xFEE8B5C:  82BE08EC  lwz r21,2284(r30)
+	  92: GETL       	R30, t76
+	  93: ADDL       	$0x8EC, t76
+	  94: LDL       	(t76), t78
+	  95: PUTL       	t78, R21
+	  96: INCEIPL       	$4
+
+	0xFEE8B60:  41860068  bc 12,6,0xFEE8BC8
+	  97: Js06o       	$0xFEE8BC8
+
+
+
+. 3114 FEE8B08 92
+. 83 86 00 00 3A E0 00 00 81 3E 08 FC 54 FD 18 38 81 5E 08 F8 2C 9C 00 00 83 69 00 00 80 AA 00 00 83 3E 1D 40 7C A0 DA 78 80 D8 00 00 83 7E 1A E4 31 20 FF FF 7C 89 01 10 80 7E 1A 78 7F E5 00 D0 93 F9 00 00 7F 3D 32 14 90 83 00 00 92 FB 00 04 92 FB 00 00 82 BE 08 EC 41 86 00 68
+==== BB 3115 (0xFEE8B64) approx BBs exec'd 0 ====
+
+	0xFEE8B64:  82FE08E8  lwz r23,2280(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0xFEE8B68:  4800000C  b 0xFEE8B74
+	   5: JMPo       	$0xFEE8B74  ($4)
+
+
+
+. 3115 FEE8B64 8
+. 82 FE 08 E8 48 00 00 0C
+==== BB 3116 (0xFEE8B74) approx BBs exec'd 0 ====
+
+	0xFEE8B74:  83B70000  lwz r29,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0xFEE8B78:  3B9CFFFF  addi r28,r28,-1
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFEE8B7C:  81780000  lwz r11,0(r24)
+	   8: GETL       	R24, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0xFEE8B80:  7CDCE8AE  lbzx r6,r28,r29
+	  12: GETL       	R29, t10
+	  13: GETL       	R28, t12
+	  14: ADDL       	t12, t10
+	  15: LDB       	(t10), t14
+	  16: PUTL       	t14, R6
+	  17: INCEIPL       	$4
+
+	0xFEE8B84:  54C81838  rlwinm r8,r6,3,0,28
+	  18: GETL       	R6, t16
+	  19: SHLL       	$0x3, t16
+	  20: PUTL       	t16, R8
+	  21: INCEIPL       	$4
+
+	0xFEE8B88:  7D885A14  add r12,r8,r11
+	  22: GETL       	R8, t18
+	  23: GETL       	R11, t20
+	  24: ADDL       	t18, t20
+	  25: PUTL       	t20, R12
+	  26: INCEIPL       	$4
+
+	0xFEE8B8C:  8BAC0004  lbz r29,4(r12)
+	  27: GETL       	R12, t22
+	  28: ADDL       	$0x4, t22
+	  29: LDB       	(t22), t24
+	  30: PUTL       	t24, R29
+	  31: INCEIPL       	$4
+
+	0xFEE8B90:  892C0005  lbz r9,5(r12)
+	  32: GETL       	R12, t26
+	  33: ADDL       	$0x5, t26
+	  34: LDB       	(t26), t28
+	  35: PUTL       	t28, R9
+	  36: INCEIPL       	$4
+
+	0xFEE8B94:  57BF103A  rlwinm r31,r29,2,0,29
+	  37: GETL       	R29, t30
+	  38: SHLL       	$0x2, t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0xFEE8B98:  7CFFD82E  lwzx r7,r31,r27
+	  41: GETL       	R27, t32
+	  42: GETL       	R31, t34
+	  43: ADDL       	t34, t32
+	  44: LDL       	(t32), t36
+	  45: PUTL       	t36, R7
+	  46: INCEIPL       	$4
+
+	0xFEE8B9C:  2F070000  cmpi cr6,r7,0
+	  47: GETL       	R7, t38
+	  48: CMP0L       	t38, t40  (-rSo)
+	  49: ICRFL       	t40, $0x6, CR
+	  50: INCEIPL       	$4
+
+	0xFEE8BA0:  409AFFCC  bc 4,26,0xFEE8B6C
+	  51: Jc26o       	$0xFEE8B6C
+
+
+
+. 3116 FEE8B74 48
+. 83 B7 00 00 3B 9C FF FF 81 78 00 00 7C DC E8 AE 54 C8 18 38 7D 88 5A 14 8B AC 00 04 89 2C 00 05 57 BF 10 3A 7C FF D8 2E 2F 07 00 00 40 9A FF CC
+==== BB 3117 (0xFEE8BA4) approx BBs exec'd 0 ====
+
+	0xFEE8BA4:  80B50000  lwz r5,0(r21)
+	   0: GETL       	R21, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFEE8BA8:  7C654A14  add r3,r5,r9
+	   4: GETL       	R5, t4
+	   5: GETL       	R9, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFEE8BAC:  4BFFE0FD  bl 0xFEE6CA8
+	   9: MOVL       	$0xFEE8BB0, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xFEE6CA8  ($4)
+
+
+
+. 3117 FEE8BA4 12
+. 80 B5 00 00 7C 65 4A 14 4B FF E0 FD
+==== BB 3118 (0xFEE8BB0) approx BBs exec'd 0 ====
+
+	0xFEE8BB0:  209D0001  subfic r4,r29,1
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x1, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEE8BB4:  7C7FD92E  stwx r3,r31,r27
+	   5: GETL       	R27, t4
+	   6: GETL       	R31, t6
+	   7: ADDL       	t6, t4
+	   8: GETL       	R3, t8
+	   9: STL       	t8, (t4)
+	  10: INCEIPL       	$4
+
+	0xFEE8BB8:  5483103A  rlwinm r3,r4,2,0,29
+	  11: GETL       	R4, t10
+	  12: SHLL       	$0x2, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0xFEE8BBC:  7FE3D82E  lwzx r31,r3,r27
+	  15: GETL       	R27, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	t14, t12
+	  18: LDL       	(t12), t16
+	  19: PUTL       	t16, R31
+	  20: INCEIPL       	$4
+
+	0xFEE8BC0:  2F9F0000  cmpi cr7,r31,0
+	  21: GETL       	R31, t18
+	  22: CMP0L       	t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0xFEE8BC4:  419EFFA8  bc 12,30,0xFEE8B6C
+	  25: Js30o       	$0xFEE8B6C
+
+
+
+. 3118 FEE8BB0 24
+. 20 9D 00 01 7C 7F D9 2E 54 83 10 3A 7F E3 D8 2E 2F 9F 00 00 41 9E FF A8
+==== BB 3119 (0xFEE8B6C) approx BBs exec'd 0 ====
+
+	0xFEE8B6C:  2C1C0000  cmpi cr0,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEE8B70:  41820058  bc 12,2,0xFEE8BC8
+	   4: Js02o       	$0xFEE8BC8
+
+
+
+. 3119 FEE8B6C 8
+. 2C 1C 00 00 41 82 00 58
+==== BB 3120 (0xFEE8BC8) approx BBs exec'd 0 ====
+
+	0xFEE8BC8:  807B0000  lwz r3,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEE8BCC:  2C830000  cmpi cr1,r3,0
+	   4: GETL       	R3, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFEE8BD0:  418601B0  bc 12,6,0xFEE8D80
+	   8: Js06o       	$0xFEE8D80
+
+
+
+. 3120 FEE8BC8 12
+. 80 7B 00 00 2C 83 00 00 41 86 01 B0
+==== BB 3121 (0xFEE8BD4) approx BBs exec'd 0 ====
+
+	0xFEE8BD4:  831B0004  lwz r24,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFEE8BD8:  2F180000  cmpi cr6,r24,0
+	   5: GETL       	R24, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFEE8BDC:  409A0008  bc 4,26,0xFEE8BE4
+	   9: Jc26o       	$0xFEE8BE4
+
+
+
+. 3121 FEE8BD4 12
+. 83 1B 00 04 2F 18 00 00 40 9A 00 08
+==== BB 3122 (0xFEE8BE4) approx BBs exec'd 0 ====
+
+	0xFEE8BE4:  88190004  lbz r0,4(r25)
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEE8BE8:  82F50000  lwz r23,0(r21)
+	   5: GETL       	R21, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R23
+	   8: INCEIPL       	$4
+
+	0xFEE8BEC:  90160020  stw r0,32(r22)
+	   9: GETL       	R0, t8
+	  10: GETL       	R22, t10
+	  11: ADDL       	$0x20, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFEE8BF0:  8AB90005  lbz r21,5(r25)
+	  14: GETL       	R25, t12
+	  15: ADDL       	$0x5, t12
+	  16: LDB       	(t12), t14
+	  17: PUTL       	t14, R21
+	  18: INCEIPL       	$4
+
+	0xFEE8BF4:  7C75BA14  add r3,r21,r23
+	  19: GETL       	R21, t16
+	  20: GETL       	R23, t18
+	  21: ADDL       	t16, t18
+	  22: PUTL       	t18, R3
+	  23: INCEIPL       	$4
+
+	0xFEE8BF8:  4BFFE0B1  bl 0xFEE6CA8
+	  24: MOVL       	$0xFEE8BFC, t20
+	  25: PUTL       	t20, LR
+	  26: JMPo-c       	$0xFEE6CA8  ($4)
+
+
+
+. 3122 FEE8BE4 24
+. 88 19 00 04 82 F5 00 00 90 16 00 20 8A B9 00 05 7C 75 BA 14 4B FF E0 B1
+==== BB 3123 (0xFEE8BFC) approx BBs exec'd 0 ====
+
+	0xFEE8BFC:  83990000  lwz r28,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFEE8C00:  90760028  stw r3,40(r22)
+	   4: GETL       	R3, t4
+	   5: GETL       	R22, t6
+	   6: ADDL       	$0x28, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFEE8C04:  93960024  stw r28,36(r22)
+	   9: GETL       	R28, t8
+	  10: GETL       	R22, t10
+	  11: ADDL       	$0x24, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFEE8C08:  3AC00000  li r22,0
+	  14: MOVL       	$0x0, t12
+	  15: PUTL       	t12, R22
+	  16: INCEIPL       	$4
+
+	0xFEE8C0C:  833E08E0  lwz r25,2272(r30)
+	  17: GETL       	R30, t14
+	  18: ADDL       	$0x8E0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R25
+	  21: INCEIPL       	$4
+
+	0xFEE8C10:  92D30000  stw r22,0(r19)
+	  22: GETL       	R22, t18
+	  23: GETL       	R19, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0xFEE8C14:  81590000  lwz r10,0(r25)
+	  26: GETL       	R25, t22
+	  27: LDL       	(t22), t24
+	  28: PUTL       	t24, R10
+	  29: INCEIPL       	$4
+
+	0xFEE8C18:  811E08F4  lwz r8,2292(r30)
+	  30: GETL       	R30, t26
+	  31: ADDL       	$0x8F4, t26
+	  32: LDL       	(t26), t28
+	  33: PUTL       	t28, R8
+	  34: INCEIPL       	$4
+
+	0xFEE8C1C:  92D40000  stw r22,0(r20)
+	  35: GETL       	R22, t30
+	  36: GETL       	R20, t32
+	  37: STL       	t30, (t32)
+	  38: INCEIPL       	$4
+
+	0xFEE8C20:  2F8A0000  cmpi cr7,r10,0
+	  39: GETL       	R10, t34
+	  40: CMP0L       	t34, t36  (-rSo)
+	  41: ICRFL       	t36, $0x7, CR
+	  42: INCEIPL       	$4
+
+	0xFEE8C24:  394AFFFF  addi r10,r10,-1
+	  43: GETL       	R10, t38
+	  44: ADDL       	$0xFFFFFFFF, t38
+	  45: PUTL       	t38, R10
+	  46: INCEIPL       	$4
+
+	0xFEE8C28:  7D054378  or r5,r8,r8
+	  47: GETL       	R8, t40
+	  48: PUTL       	t40, R5
+	  49: INCEIPL       	$4
+
+	0xFEE8C2C:  55491838  rlwinm r9,r10,3,0,28
+	  50: GETL       	R10, t42
+	  51: SHLL       	$0x3, t42
+	  52: PUTL       	t42, R9
+	  53: INCEIPL       	$4
+
+	0xFEE8C30:  419E002C  bc 12,30,0xFEE8C5C
+	  54: Js30o       	$0xFEE8C5C
+
+
+
+. 3123 FEE8BFC 56
+. 83 99 00 00 90 76 00 28 93 96 00 24 3A C0 00 00 83 3E 08 E0 92 D3 00 00 81 59 00 00 81 1E 08 F4 92 D4 00 00 2F 8A 00 00 39 4A FF FF 7D 05 43 78 55 49 18 38 41 9E 00 2C
+==== BB 3124 (0xFEE8C5C) approx BBs exec'd 0 ====
+
+	0xFEE8C5C:  82610044  lwz r19,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0xFEE8C60:  82810010  lwz r20,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R20
+	   9: INCEIPL       	$4
+
+	0xFEE8C64:  7E6803A6  mtlr r19
+	  10: GETL       	R19, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFEE8C68:  82A10014  lwz r21,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R21
+	  17: INCEIPL       	$4
+
+	0xFEE8C6C:  8261000C  lwz r19,12(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R19
+	  22: INCEIPL       	$4
+
+	0xFEE8C70:  82C10018  lwz r22,24(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R22
+	  27: INCEIPL       	$4
+
+	0xFEE8C74:  82E1001C  lwz r23,28(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R23
+	  32: INCEIPL       	$4
+
+	0xFEE8C78:  83010020  lwz r24,32(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R24
+	  37: INCEIPL       	$4
+
+	0xFEE8C7C:  83210024  lwz r25,36(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x24, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R25
+	  42: INCEIPL       	$4
+
+	0xFEE8C80:  83410028  lwz r26,40(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x28, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R26
+	  47: INCEIPL       	$4
+
+	0xFEE8C84:  8361002C  lwz r27,44(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x2C, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R27
+	  52: INCEIPL       	$4
+
+	0xFEE8C88:  83810030  lwz r28,48(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x30, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R28
+	  57: INCEIPL       	$4
+
+	0xFEE8C8C:  83A10034  lwz r29,52(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x34, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R29
+	  62: INCEIPL       	$4
+
+	0xFEE8C90:  83C10038  lwz r30,56(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x38, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R30
+	  67: INCEIPL       	$4
+
+	0xFEE8C94:  83E1003C  lwz r31,60(r1)
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x3C, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R31
+	  72: INCEIPL       	$4
+
+	0xFEE8C98:  38210040  addi r1,r1,64
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x40, t58
+	  75: PUTL       	t58, R1
+	  76: INCEIPL       	$4
+
+	0xFEE8C9C:  4E800020  blr
+	  77: GETL       	LR, t60
+	  78: JMPo-r       	t60  ($4)
+
+
+
+. 3124 FEE8C5C 68
+. 82 61 00 44 82 81 00 10 7E 68 03 A6 82 A1 00 14 82 61 00 0C 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+==== BB 3125 (0xFEE7D38) approx BBs exec'd 0 ====
+
+	0xFEE7D38:  4BFFFF00  b 0xFEE7C38
+	   0: JMPo       	$0xFEE7C38  ($4)
+
+
+
+. 3125 FEE7D38 4
+. 4B FF FF 00
+==== BB 3126 (0xFEE7C38) approx BBs exec'd 0 ====
+
+	0xFEE7C38:  2C1F0000  cmpi cr0,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEE7C3C:  41820098  bc 12,2,0xFEE7CD4
+	   4: Js02o       	$0xFEE7CD4
+
+
+
+. 3126 FEE7C38 8
+. 2C 1F 00 00 41 82 00 98
+==== BB 3127 (0xFEE7C40) approx BBs exec'd 0 ====
+
+	0xFEE7C40:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEE7C44:  419E00F8  bc 12,30,0xFEE7D3C
+	   4: Js30o       	$0xFEE7D3C
+
+
+
+. 3127 FEE7C40 8
+. 2F 9C 00 00 41 9E 00 F8
+==== BB 3128 (0xFEE7C48) approx BBs exec'd 0 ====
+
+	0xFEE7C48:  839A0000  lwz r28,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFEE7C4C:  2C9C0000  cmpi cr1,r28,0
+	   4: GETL       	R28, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFEE7C50:  40860108  bc 4,6,0xFEE7D58
+	   8: Jc06o       	$0xFEE7D58
+
+
+
+. 3128 FEE7C48 12
+. 83 9A 00 00 2C 9C 00 00 40 86 01 08
+==== BB 3129 (0xFEE7D58) approx BBs exec'd 0 ====
+
+	0xFEE7D58:  813F0024  lwz r9,36(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE7D5C:  4BFFFF4C  b 0xFEE7CA8
+	   5: JMPo       	$0xFEE7CA8  ($4)
+
+
+
+. 3129 FEE7D58 8
+. 81 3F 00 24 4B FF FF 4C
+==== BB 3130 (0xFEE7CA8) approx BBs exec'd 0 ====
+
+	0xFEE7CA8:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEE7CAC:  83610008  lwz r27,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0xFEE7CB0:  7FE5FB78  or r5,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0xFEE7CB4:  7C9B4850  subf r4,r27,r9
+	  11: GETL       	R27, t8
+	  12: GETL       	R9, t10
+	  13: SUBL       	t8, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFEE7CB8:  4BFFDC91  bl 0xFEE5948
+	  16: MOVL       	$0xFEE7CBC, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0xFEE5948  ($4)
+
+
+
+. 3130 FEE7CA8 20
+. 7F 63 DB 78 83 61 00 08 7F E5 FB 78 7C 9B 48 50 4B FF DC 91
+==== BB 3131 __offtime(0xFEE5948) approx BBs exec'd 0 ====
+
+	0xFEE5948:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEE594C:  3D800001  lis r12,1
+	   6: MOVL       	$0x10000, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFEE5950:  80C30000  lwz r6,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0xFEE5954:  61835180  ori r3,r12,0x5180
+	  13: MOVL       	$0x15180, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0xFEE5958:  93010010  stw r24,16(r1)
+	  16: GETL       	R24, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x10, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEE595C:  3F00C22E  lis r24,-15826
+	  21: MOVL       	$0xC22E0000, t16
+	  22: PUTL       	t16, R24
+	  23: INCEIPL       	$4
+
+	0xFEE5960:  92E1000C  stw r23,12(r1)
+	  24: GETL       	R23, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0xC, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0xFEE5964:  63174507  ori r23,r24,0x4507
+	  29: MOVL       	$0xC22E4507, t22
+	  30: PUTL       	t22, R23
+	  31: INCEIPL       	$4
+
+	0xFEE5968:  7D06B896  mulhw r8,r6,r23
+	  32: GETL       	R6, t24
+	  33: GETL       	R23, t26
+	  34: MULHL       	t24, t26
+	  35: PUTL       	t26, R8
+	  36: INCEIPL       	$4
+
+	0xFEE596C:  7CCAFE70  srawi r10,r6,31
+	  37: GETL       	R6, t28
+	  38: SARL       	$0x1F, t28  (-wCa)
+	  39: PUTL       	t28, R10
+	  40: INCEIPL       	$4
+
+	0xFEE5970:  93210014  stw r25,20(r1)
+	  41: GETL       	R25, t30
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x14, t32
+	  44: STL       	t30, (t32)
+	  45: INCEIPL       	$4
+
+	0xFEE5974:  7F2802A6  mflr r25
+	  46: GETL       	LR, t34
+	  47: PUTL       	t34, R25
+	  48: INCEIPL       	$4
+
+	0xFEE5978:  480C24D9  bl 0xFFA7E50
+	  49: MOVL       	$0xFEE597C, t36
+	  50: PUTL       	t36, LR
+	  51: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3131 FEE5948 52
+. 94 21 FF D0 3D 80 00 01 80 C3 00 00 61 83 51 80 93 01 00 10 3F 00 C2 2E 92 E1 00 0C 63 17 45 07 7D 06 B8 96 7C CA FE 70 93 21 00 14 7F 28 02 A6 48 0C 24 D9
+==== BB 3132 (0xFEE597C) approx BBs exec'd 0 ====
+
+	0xFEE597C:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE5980:  93410018  stw r26,24(r1)
+	   5: GETL       	R26, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x18, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEE5984:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFEE5988:  7CE83214  add r7,r8,r6
+	  13: GETL       	R8, t10
+	  14: GETL       	R6, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0xFEE598C:  9361001C  stw r27,28(r1)
+	  18: GETL       	R27, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x1C, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFEE5990:  7CE08670  srawi r0,r7,16
+	  23: GETL       	R7, t18
+	  24: SARL       	$0x10, t18  (-wCa)
+	  25: PUTL       	t18, R0
+	  26: INCEIPL       	$4
+
+	0xFEE5994:  93810020  stw r28,32(r1)
+	  27: GETL       	R28, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFEE5998:  7D8A0050  subf r12,r10,r0
+	  32: GETL       	R10, t24
+	  33: GETL       	R0, t26
+	  34: SUBL       	t24, t26
+	  35: PUTL       	t26, R12
+	  36: INCEIPL       	$4
+
+	0xFEE599C:  93A10024  stw r29,36(r1)
+	  37: GETL       	R29, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x24, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFEE59A0:  7D6C19D6  mullw r11,r12,r3
+	  42: GETL       	R12, t32
+	  43: GETL       	R3, t34
+	  44: MULL       	t32, t34
+	  45: PUTL       	t34, R11
+	  46: INCEIPL       	$4
+
+	0xFEE59A4:  93E1002C  stw r31,44(r1)
+	  47: GETL       	R31, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x2C, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFEE59A8:  7F2803A6  mtlr r25
+	  52: GETL       	R25, t40
+	  53: PUTL       	t40, LR
+	  54: INCEIPL       	$4
+
+	0xFEE59AC:  7CB72B78  or r23,r5,r5
+	  55: GETL       	R5, t42
+	  56: PUTL       	t42, R23
+	  57: INCEIPL       	$4
+
+	0xFEE59B0:  7D2B3050  subf r9,r11,r6
+	  58: GETL       	R11, t44
+	  59: GETL       	R6, t46
+	  60: SUBL       	t44, t46
+	  61: PUTL       	t46, R9
+	  62: INCEIPL       	$4
+
+	0xFEE59B4:  7CE92215  add. r7,r9,r4
+	  63: GETL       	R9, t48
+	  64: GETL       	R4, t50
+	  65: ADDL       	t48, t50
+	  66: PUTL       	t50, R7
+	  67: CMP0L       	t50, t52  (-rSo)
+	  68: ICRFL       	t52, $0x0, CR
+	  69: INCEIPL       	$4
+
+	0xFEE59B8:  41800210  bc 12,0,0xFEE5BC8
+	  70: Js00o       	$0xFEE5BC8
+
+
+
+. 3132 FEE597C 64
+. 93 C1 00 28 93 41 00 18 7F C8 02 A6 7C E8 32 14 93 61 00 1C 7C E0 86 70 93 81 00 20 7D 8A 00 50 93 A1 00 24 7D 6C 19 D6 93 E1 00 2C 7F 28 03 A6 7C B7 2B 78 7D 2B 30 50 7C E9 22 15 41 80 02 10
+==== BB 3133 (0xFEE59BC) approx BBs exec'd 0 ====
+
+	0xFEE59BC:  3F400001  lis r26,1
+	   0: MOVL       	$0x10000, t0
+	   1: PUTL       	t0, R26
+	   2: INCEIPL       	$4
+
+	0xFEE59C0:  6345517F  ori r5,r26,0x517F
+	   3: MOVL       	$0x1517F, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFEE59C4:  7F872800  cmp cr7,r7,r5
+	   6: GETL       	R7, t4
+	   7: GETL       	R5, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFEE59C8:  409D0020  bc 4,29,0xFEE59E8
+	  11: Jc29o       	$0xFEE59E8
+
+
+
+. 3133 FEE59BC 16
+. 3F 40 00 01 63 45 51 7F 7F 87 28 00 40 9D 00 20
+==== BB 3134 (0xFEE59E8) approx BBs exec'd 0 ====
+
+	0xFEE59E8:  3F2091A2  lis r25,-28254
+	   0: MOVL       	$0x91A20000, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0xFEE59EC:  7CE8FE70  srawi r8,r7,31
+	   3: GETL       	R7, t2
+	   4: SARL       	$0x1F, t2  (-wCa)
+	   5: PUTL       	t2, R8
+	   6: INCEIPL       	$4
+
+	0xFEE59F0:  6338B3C5  ori r24,r25,0xB3C5
+	   7: MOVL       	$0x91A2B3C5, t4
+	   8: PUTL       	t4, R24
+	   9: INCEIPL       	$4
+
+	0xFEE59F4:  3D609249  lis r11,-28087
+	  10: MOVL       	$0x92490000, t6
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0xFEE59F8:  7D27C096  mulhw r9,r7,r24
+	  13: GETL       	R7, t8
+	  14: GETL       	R24, t10
+	  15: MULHL       	t8, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0xFEE59FC:  617F2493  ori r31,r11,0x2493
+	  18: MOVL       	$0x92492493, t12
+	  19: PUTL       	t12, R31
+	  20: INCEIPL       	$4
+
+	0xFEE5A00:  3CC08888  lis r6,-30584
+	  21: MOVL       	$0x88880000, t14
+	  22: PUTL       	t14, R6
+	  23: INCEIPL       	$4
+
+	0xFEE5A04:  60DC8889  ori r28,r6,0x8889
+	  24: MOVL       	$0x88888889, t16
+	  25: PUTL       	t16, R28
+	  26: INCEIPL       	$4
+
+	0xFEE5A08:  7C093A14  add r0,r9,r7
+	  27: GETL       	R9, t18
+	  28: GETL       	R7, t20
+	  29: ADDL       	t18, t20
+	  30: PUTL       	t20, R0
+	  31: INCEIPL       	$4
+
+	0xFEE5A0C:  7C035E70  srawi r3,r0,11
+	  32: GETL       	R0, t22
+	  33: SARL       	$0xB, t22  (-wCa)
+	  34: PUTL       	t22, R3
+	  35: INCEIPL       	$4
+
+	0xFEE5A10:  7FA81850  subf r29,r8,r3
+	  36: GETL       	R8, t24
+	  37: GETL       	R3, t26
+	  38: SUBL       	t24, t26
+	  39: PUTL       	t26, R29
+	  40: INCEIPL       	$4
+
+	0xFEE5A14:  390C0004  addi r8,r12,4
+	  41: GETL       	R12, t28
+	  42: ADDL       	$0x4, t28
+	  43: PUTL       	t28, R8
+	  44: INCEIPL       	$4
+
+	0xFEE5A18:  1D5D0E10  mulli r10,r29,3600
+	  45: GETL       	R29, t30
+	  46: MULL       	$0xE10, t30
+	  47: PUTL       	t30, R10
+	  48: INCEIPL       	$4
+
+	0xFEE5A1C:  7D05FE70  srawi r5,r8,31
+	  49: GETL       	R8, t32
+	  50: SARL       	$0x1F, t32  (-wCa)
+	  51: PUTL       	t32, R5
+	  52: INCEIPL       	$4
+
+	0xFEE5A20:  93B70008  stw r29,8(r23)
+	  53: GETL       	R29, t34
+	  54: GETL       	R23, t36
+	  55: ADDL       	$0x8, t36
+	  56: STL       	t34, (t36)
+	  57: INCEIPL       	$4
+
+	0xFEE5A24:  7F68F896  mulhw r27,r8,r31
+	  58: GETL       	R8, t38
+	  59: GETL       	R31, t40
+	  60: MULHL       	t38, t40
+	  61: PUTL       	t40, R27
+	  62: INCEIPL       	$4
+
+	0xFEE5A28:  7C6A3850  subf r3,r10,r7
+	  63: GETL       	R10, t42
+	  64: GETL       	R7, t44
+	  65: SUBL       	t42, t44
+	  66: PUTL       	t44, R3
+	  67: INCEIPL       	$4
+
+	0xFEE5A2C:  7C78FE70  srawi r24,r3,31
+	  68: GETL       	R3, t46
+	  69: SARL       	$0x1F, t46  (-wCa)
+	  70: PUTL       	t46, R24
+	  71: INCEIPL       	$4
+
+	0xFEE5A30:  7F23E096  mulhw r25,r3,r28
+	  72: GETL       	R3, t48
+	  73: GETL       	R28, t50
+	  74: MULHL       	t48, t50
+	  75: PUTL       	t50, R25
+	  76: INCEIPL       	$4
+
+	0xFEE5A34:  7F5B4214  add r26,r27,r8
+	  77: GETL       	R27, t52
+	  78: GETL       	R8, t54
+	  79: ADDL       	t52, t54
+	  80: PUTL       	t54, R26
+	  81: INCEIPL       	$4
+
+	0xFEE5A38:  7F441670  srawi r4,r26,2
+	  82: GETL       	R26, t56
+	  83: SARL       	$0x2, t56  (-wCa)
+	  84: PUTL       	t56, R4
+	  85: INCEIPL       	$4
+
+	0xFEE5A3C:  7C052050  subf r0,r5,r4
+	  86: GETL       	R5, t58
+	  87: GETL       	R4, t60
+	  88: SUBL       	t58, t60
+	  89: PUTL       	t60, R0
+	  90: INCEIPL       	$4
+
+	0xFEE5A40:  7CF91A14  add r7,r25,r3
+	  91: GETL       	R25, t62
+	  92: GETL       	R3, t64
+	  93: ADDL       	t62, t64
+	  94: PUTL       	t64, R7
+	  95: INCEIPL       	$4
+
+	0xFEE5A44:  1D600007  mulli r11,r0,7
+	  96: GETL       	R0, t66
+	  97: MULL       	$0x7, t66
+	  98: PUTL       	t66, R11
+	  99: INCEIPL       	$4
+
+	0xFEE5A48:  7CE92E70  srawi r9,r7,5
+	 100: GETL       	R7, t68
+	 101: SARL       	$0x5, t68  (-wCa)
+	 102: PUTL       	t68, R9
+	 103: INCEIPL       	$4
+
+	0xFEE5A4C:  7FB84850  subf r29,r24,r9
+	 104: GETL       	R24, t70
+	 105: GETL       	R9, t72
+	 106: SUBL       	t70, t72
+	 107: PUTL       	t72, R29
+	 108: INCEIPL       	$4
+
+	0xFEE5A50:  7D0B4051  subf. r8,r11,r8
+	 109: GETL       	R11, t74
+	 110: GETL       	R8, t76
+	 111: SUBL       	t74, t76
+	 112: PUTL       	t76, R8
+	 113: CMP0L       	t76, t78  (-rSo)
+	 114: ICRFL       	t78, $0x0, CR
+	 115: INCEIPL       	$4
+
+	0xFEE5A54:  1CDD003C  mulli r6,r29,60
+	 116: GETL       	R29, t80
+	 117: MULL       	$0x3C, t80
+	 118: PUTL       	t80, R6
+	 119: INCEIPL       	$4
+
+	0xFEE5A58:  93B70004  stw r29,4(r23)
+	 120: GETL       	R29, t82
+	 121: GETL       	R23, t84
+	 122: ADDL       	$0x4, t84
+	 123: STL       	t82, (t84)
+	 124: INCEIPL       	$4
+
+	0xFEE5A5C:  7FE61850  subf r31,r6,r3
+	 125: GETL       	R6, t86
+	 126: GETL       	R3, t88
+	 127: SUBL       	t86, t88
+	 128: PUTL       	t88, R31
+	 129: INCEIPL       	$4
+
+	0xFEE5A60:  93F70000  stw r31,0(r23)
+	 130: GETL       	R31, t90
+	 131: GETL       	R23, t92
+	 132: STL       	t90, (t92)
+	 133: INCEIPL       	$4
+
+	0xFEE5A64:  41800258  bc 12,0,0xFEE5CBC
+	 134: Js00o       	$0xFEE5CBC
+
+
+
+. 3134 FEE59E8 128
+. 3F 20 91 A2 7C E8 FE 70 63 38 B3 C5 3D 60 92 49 7D 27 C0 96 61 7F 24 93 3C C0 88 88 60 DC 88 89 7C 09 3A 14 7C 03 5E 70 7F A8 18 50 39 0C 00 04 1D 5D 0E 10 7D 05 FE 70 93 B7 00 08 7F 68 F8 96 7C 6A 38 50 7C 78 FE 70 7F 23 E0 96 7F 5B 42 14 7F 44 16 70 7C 05 20 50 7C F9 1A 14 1D 60 00 07 7C E9 2E 70 7F B8 48 50 7D 0B 40 51 1C DD 00 3C 93 B7 00 04 7F E6 18 50 93 F7 00 00 41 80 02 58
+==== BB 3135 (0xFEE5A68) approx BBs exec'd 0 ====
+
+	0xFEE5A68:  91170018  stw r8,24(r23)
+	   0: GETL       	R8, t0
+	   1: GETL       	R23, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE5A6C:  3CA051EB  lis r5,20971
+	   5: MOVL       	$0x51EB0000, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFEE5A70:  3C80B38C  lis r4,-19572
+	   8: MOVL       	$0xB38C0000, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFEE5A74:  60BA851F  ori r26,r5,0x851F
+	  11: MOVL       	$0x51EB851F, t8
+	  12: PUTL       	t8, R26
+	  13: INCEIPL       	$4
+
+	0xFEE5A78:  6098F9B1  ori r24,r4,0xF9B1
+	  14: MOVL       	$0xB38CF9B1, t10
+	  15: PUTL       	t10, R24
+	  16: INCEIPL       	$4
+
+	0xFEE5A7C:  3BE007B2  li r31,1970
+	  17: MOVL       	$0x7B2, t12
+	  18: PUTL       	t12, R31
+	  19: INCEIPL       	$4
+
+	0xFEE5A80:  391FFFFF  addi r8,r31,-1
+	  20: MOVL       	$0x7B1, t14
+	  21: PUTL       	t14, R8
+	  22: INCEIPL       	$4
+
+	0xFEE5A84:  2F0C0000  cmpi cr6,r12,0
+	  23: GETL       	R12, t16
+	  24: CMP0L       	t16, t18  (-rSo)
+	  25: ICRFL       	t18, $0x6, CR
+	  26: INCEIPL       	$4
+
+	0xFEE5A88:  7D1B1670  srawi r27,r8,2
+	  27: GETL       	R8, t20
+	  28: SARL       	$0x2, t20  (-wCa)
+	  29: PUTL       	t20, R27
+	  30: INCEIPL       	$4
+
+	0xFEE5A8C:  7F7B0194  addze r27,r27
+	  31: GETL       	R27, t22
+	  32: ADCL       	$0x0, t22  (-rCa-wCa)
+	  33: PUTL       	t22, R27
+	  34: INCEIPL       	$4
+
+	0xFEE5A90:  7D8AFE70  srawi r10,r12,31
+	  35: GETL       	R12, t24
+	  36: SARL       	$0x1F, t24  (-wCa)
+	  37: PUTL       	t24, R10
+	  38: INCEIPL       	$4
+
+	0xFEE5A94:  5779103A  rlwinm r25,r27,2,0,29
+	  39: GETL       	R27, t26
+	  40: SHLL       	$0x2, t26
+	  41: PUTL       	t26, R25
+	  42: INCEIPL       	$4
+
+	0xFEE5A98:  7D05FE70  srawi r5,r8,31
+	  43: GETL       	R8, t28
+	  44: SARL       	$0x1F, t28  (-wCa)
+	  45: PUTL       	t28, R5
+	  46: INCEIPL       	$4
+
+	0xFEE5A9C:  7C994050  subf r4,r25,r8
+	  47: GETL       	R25, t30
+	  48: GETL       	R8, t32
+	  49: SUBL       	t30, t32
+	  50: PUTL       	t32, R4
+	  51: INCEIPL       	$4
+
+	0xFEE5AA0:  73E70003  andi. r7,r31,0x3
+	  52: GETL       	R31, t34
+	  53: ANDL       	$0x3, t34
+	  54: PUTL       	t34, R7
+	  55: CMP0L       	t34, t36  (-rSo)
+	  56: ICRFL       	t36, $0x0, CR
+	  57: INCEIPL       	$4
+
+	0xFEE5AA4:  54990FFE  rlwinm r25,r4,1,31,31
+	  58: GETL       	R4, t38
+	  59: SHRL       	$0x1F, t38
+	  60: PUTL       	t38, R25
+	  61: INCEIPL       	$4
+
+	0xFEE5AA8:  41980044  bc 12,24,0xFEE5AEC
+	  62: Js24o       	$0xFEE5AEC
+
+
+
+. 3135 FEE5A68 68
+. 91 17 00 18 3C A0 51 EB 3C 80 B3 8C 60 BA 85 1F 60 98 F9 B1 3B E0 07 B2 39 1F FF FF 2F 0C 00 00 7D 1B 16 70 7F 7B 01 94 7D 8A FE 70 57 79 10 3A 7D 05 FE 70 7C 99 40 50 73 E7 00 03 54 99 0F FE 41 98 00 44
+==== BB 3136 (0xFEE5AAC) approx BBs exec'd 0 ====
+
+	0xFEE5AAC:  7FEBFE70  srawi r11,r31,31
+	   0: GETL       	R31, t0
+	   1: SARL       	$0x1F, t0  (-wCa)
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFEE5AB0:  2C8C016C  cmpi cr1,r12,364
+	   4: GETL       	R12, t2
+	   5: MOVL       	$0x16C, t6
+	   6: CMPL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFEE5AB4:  40820034  bc 4,2,0xFEE5AE8
+	   9: Jc02o       	$0xFEE5AE8
+
+
+
+. 3136 FEE5AAC 12
+. 7F EB FE 70 2C 8C 01 6C 40 82 00 34
+==== BB 3137 (0xFEE5AE8) approx BBs exec'd 0 ====
+
+	0xFEE5AE8:  40850108  bc 4,5,0xFEE5BF0
+	   0: Jc05o       	$0xFEE5BF0
+
+
+
+. 3137 FEE5AE8 4
+. 40 85 01 08
+==== BB 3138 (0xFEE5AEC) approx BBs exec'd 0 ====
+
+	0xFEE5AEC:  7C0CC096  mulhw r0,r12,r24
+	   0: GETL       	R12, t0
+	   1: GETL       	R24, t2
+	   2: MULHL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEE5AF0:  7FA06214  add r29,r0,r12
+	   5: GETL       	R0, t4
+	   6: GETL       	R12, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFEE5AF4:  7C88D096  mulhw r4,r8,r26
+	  10: GETL       	R8, t8
+	  11: GETL       	R26, t10
+	  12: MULHL       	t8, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0xFEE5AF8:  7FAB4670  srawi r11,r29,8
+	  15: GETL       	R29, t12
+	  16: SARL       	$0x8, t12  (-wCa)
+	  17: PUTL       	t12, R11
+	  18: INCEIPL       	$4
+
+	0xFEE5AFC:  7F8A5850  subf r28,r10,r11
+	  19: GETL       	R10, t14
+	  20: GETL       	R11, t16
+	  21: SUBL       	t14, t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0xFEE5B00:  7CDCFA14  add r6,r28,r31
+	  24: GETL       	R28, t18
+	  25: GETL       	R31, t20
+	  26: ADDL       	t18, t20
+	  27: PUTL       	t20, R6
+	  28: INCEIPL       	$4
+
+	0xFEE5B04:  1D3C016D  mulli r9,r28,365
+	  29: GETL       	R28, t22
+	  30: MULL       	$0x16D, t22
+	  31: PUTL       	t22, R9
+	  32: INCEIPL       	$4
+
+	0xFEE5B08:  7C9C3E70  srawi r28,r4,7
+	  33: GETL       	R4, t24
+	  34: SARL       	$0x7, t24  (-wCa)
+	  35: PUTL       	t24, R28
+	  36: INCEIPL       	$4
+
+	0xFEE5B0C:  7F85E050  subf r28,r5,r28
+	  37: GETL       	R5, t26
+	  38: GETL       	R28, t28
+	  39: SUBL       	t26, t28
+	  40: PUTL       	t28, R28
+	  41: INCEIPL       	$4
+
+	0xFEE5B10:  7C696050  subf r3,r9,r12
+	  42: GETL       	R9, t30
+	  43: GETL       	R12, t32
+	  44: SUBL       	t30, t32
+	  45: PUTL       	t32, R3
+	  46: INCEIPL       	$4
+
+	0xFEE5B14:  7C892E70  srawi r9,r4,5
+	  47: GETL       	R4, t34
+	  48: SARL       	$0x5, t34  (-wCa)
+	  49: PUTL       	t34, R9
+	  50: INCEIPL       	$4
+
+	0xFEE5B18:  54670FFE  rlwinm r7,r3,1,31,31
+	  51: GETL       	R3, t36
+	  52: SHRL       	$0x1F, t36
+	  53: PUTL       	t36, R7
+	  54: INCEIPL       	$4
+
+	0xFEE5B1C:  7D254850  subf r9,r5,r9
+	  55: GETL       	R5, t38
+	  56: GETL       	R9, t40
+	  57: SUBL       	t38, t40
+	  58: PUTL       	t40, R9
+	  59: INCEIPL       	$4
+
+	0xFEE5B20:  7CC73050  subf r6,r7,r6
+	  60: GETL       	R7, t42
+	  61: GETL       	R6, t44
+	  62: SUBL       	t42, t44
+	  63: PUTL       	t44, R6
+	  64: INCEIPL       	$4
+
+	0xFEE5B24:  38E6FFFF  addi r7,r6,-1
+	  65: GETL       	R6, t46
+	  66: ADDL       	$0xFFFFFFFF, t46
+	  67: PUTL       	t46, R7
+	  68: INCEIPL       	$4
+
+	0xFEE5B28:  7C07D096  mulhw r0,r7,r26
+	  69: GETL       	R7, t48
+	  70: GETL       	R26, t50
+	  71: MULHL       	t48, t50
+	  72: PUTL       	t50, R0
+	  73: INCEIPL       	$4
+
+	0xFEE5B2C:  7CEBFE70  srawi r11,r7,31
+	  74: GETL       	R7, t52
+	  75: SARL       	$0x1F, t52  (-wCa)
+	  76: PUTL       	t52, R11
+	  77: INCEIPL       	$4
+
+	0xFEE5B30:  7C0A2E70  srawi r10,r0,5
+	  78: GETL       	R0, t54
+	  79: SARL       	$0x5, t54  (-wCa)
+	  80: PUTL       	t54, R10
+	  81: INCEIPL       	$4
+
+	0xFEE5B34:  7C1D3E70  srawi r29,r0,7
+	  82: GETL       	R0, t56
+	  83: SARL       	$0x7, t56  (-wCa)
+	  84: PUTL       	t56, R29
+	  85: INCEIPL       	$4
+
+	0xFEE5B38:  7D4B5050  subf r10,r11,r10
+	  86: GETL       	R11, t58
+	  87: GETL       	R10, t60
+	  88: SUBL       	t58, t60
+	  89: PUTL       	t60, R10
+	  90: INCEIPL       	$4
+
+	0xFEE5B3C:  7FABE850  subf r29,r11,r29
+	  91: GETL       	R11, t62
+	  92: GETL       	R29, t64
+	  93: SUBL       	t62, t64
+	  94: PUTL       	t64, R29
+	  95: INCEIPL       	$4
+
+	0xFEE5B40:  1C6A0064  mulli r3,r10,100
+	  96: GETL       	R10, t66
+	  97: MULL       	$0x64, t66
+	  98: PUTL       	t66, R3
+	  99: INCEIPL       	$4
+
+	0xFEE5B44:  1C9D0190  mulli r4,r29,400
+	 100: GETL       	R29, t68
+	 101: MULL       	$0x190, t68
+	 102: PUTL       	t68, R4
+	 103: INCEIPL       	$4
+
+	0xFEE5B48:  7CA33850  subf r5,r3,r7
+	 104: GETL       	R3, t70
+	 105: GETL       	R7, t72
+	 106: SUBL       	t70, t72
+	 107: PUTL       	t72, R5
+	 108: INCEIPL       	$4
+
+	0xFEE5B4C:  1C090064  mulli r0,r9,100
+	 109: GETL       	R9, t74
+	 110: MULL       	$0x64, t74
+	 111: PUTL       	t74, R0
+	 112: INCEIPL       	$4
+
+	0xFEE5B50:  54AB0FFE  rlwinm r11,r5,1,31,31
+	 113: GETL       	R5, t76
+	 114: SHRL       	$0x1F, t76
+	 115: PUTL       	t76, R11
+	 116: INCEIPL       	$4
+
+	0xFEE5B54:  1C7C0190  mulli r3,r28,400
+	 117: GETL       	R28, t78
+	 118: MULL       	$0x190, t78
+	 119: PUTL       	t78, R3
+	 120: INCEIPL       	$4
+
+	0xFEE5B58:  7CA43850  subf r5,r4,r7
+	 121: GETL       	R4, t80
+	 122: GETL       	R7, t82
+	 123: SUBL       	t80, t82
+	 124: PUTL       	t82, R5
+	 125: INCEIPL       	$4
+
+	0xFEE5B5C:  7C804050  subf r4,r0,r8
+	 126: GETL       	R0, t84
+	 127: GETL       	R8, t86
+	 128: SUBL       	t84, t86
+	 129: PUTL       	t86, R4
+	 130: INCEIPL       	$4
+
+	0xFEE5B60:  7CE01670  srawi r0,r7,2
+	 131: GETL       	R7, t88
+	 132: SARL       	$0x2, t88  (-wCa)
+	 133: PUTL       	t88, R0
+	 134: INCEIPL       	$4
+
+	0xFEE5B64:  7C000194  addze r0,r0
+	 135: GETL       	R0, t90
+	 136: ADCL       	$0x0, t90  (-rCa-wCa)
+	 137: PUTL       	t90, R0
+	 138: INCEIPL       	$4
+
+	0xFEE5B68:  7D4B5050  subf r10,r11,r10
+	 139: GETL       	R11, t92
+	 140: GETL       	R10, t94
+	 141: SUBL       	t92, t94
+	 142: PUTL       	t94, R10
+	 143: INCEIPL       	$4
+
+	0xFEE5B6C:  7C634050  subf r3,r3,r8
+	 144: GETL       	R3, t96
+	 145: GETL       	R8, t98
+	 146: SUBL       	t96, t98
+	 147: PUTL       	t98, R3
+	 148: INCEIPL       	$4
+
+	0xFEE5B70:  54A50FFE  rlwinm r5,r5,1,31,31
+	 149: GETL       	R5, t100
+	 150: SHRL       	$0x1F, t100
+	 151: PUTL       	t100, R5
+	 152: INCEIPL       	$4
+
+	0xFEE5B74:  54840FFE  rlwinm r4,r4,1,31,31
+	 153: GETL       	R4, t102
+	 154: SHRL       	$0x1F, t102
+	 155: PUTL       	t102, R4
+	 156: INCEIPL       	$4
+
+	0xFEE5B78:  7D7F3050  subf r11,r31,r6
+	 157: GETL       	R31, t104
+	 158: GETL       	R6, t106
+	 159: SUBL       	t104, t106
+	 160: PUTL       	t106, R11
+	 161: INCEIPL       	$4
+
+	0xFEE5B7C:  541F103A  rlwinm r31,r0,2,0,29
+	 162: GETL       	R0, t108
+	 163: SHLL       	$0x2, t108
+	 164: PUTL       	t108, R31
+	 165: INCEIPL       	$4
+
+	0xFEE5B80:  7FA5E850  subf r29,r5,r29
+	 166: GETL       	R5, t110
+	 167: GETL       	R29, t112
+	 168: SUBL       	t110, t112
+	 169: PUTL       	t112, R29
+	 170: INCEIPL       	$4
+
+	0xFEE5B84:  7D0A0050  subf r8,r10,r0
+	 171: GETL       	R10, t114
+	 172: GETL       	R0, t116
+	 173: SUBL       	t114, t116
+	 174: PUTL       	t116, R8
+	 175: INCEIPL       	$4
+
+	0xFEE5B88:  7CBF3850  subf r5,r31,r7
+	 176: GETL       	R31, t118
+	 177: GETL       	R7, t120
+	 178: SUBL       	t118, t120
+	 179: PUTL       	t120, R5
+	 180: INCEIPL       	$4
+
+	0xFEE5B8C:  7D244850  subf r9,r4,r9
+	 181: GETL       	R4, t122
+	 182: GETL       	R9, t124
+	 183: SUBL       	t122, t124
+	 184: PUTL       	t124, R9
+	 185: INCEIPL       	$4
+
+	0xFEE5B90:  547F0FFE  rlwinm r31,r3,1,31,31
+	 186: GETL       	R3, t126
+	 187: SHRL       	$0x1F, t126
+	 188: PUTL       	t126, R31
+	 189: INCEIPL       	$4
+
+	0xFEE5B94:  1D4B016D  mulli r10,r11,365
+	 190: GETL       	R11, t128
+	 191: MULL       	$0x16D, t128
+	 192: PUTL       	t128, R10
+	 193: INCEIPL       	$4
+
+	0xFEE5B98:  7C08EA14  add r0,r8,r29
+	 194: GETL       	R8, t130
+	 195: GETL       	R29, t132
+	 196: ADDL       	t130, t132
+	 197: PUTL       	t132, R0
+	 198: INCEIPL       	$4
+
+	0xFEE5B9C:  7C9FE050  subf r4,r31,r28
+	 199: GETL       	R31, t134
+	 200: GETL       	R28, t136
+	 201: SUBL       	t134, t136
+	 202: PUTL       	t136, R4
+	 203: INCEIPL       	$4
+
+	0xFEE5BA0:  7D09D850  subf r8,r9,r27
+	 204: GETL       	R9, t138
+	 205: GETL       	R27, t140
+	 206: SUBL       	t138, t140
+	 207: PUTL       	t140, R8
+	 208: INCEIPL       	$4
+
+	0xFEE5BA4:  54A70FFE  rlwinm r7,r5,1,31,31
+	 209: GETL       	R5, t142
+	 210: SHRL       	$0x1F, t142
+	 211: PUTL       	t142, R7
+	 212: INCEIPL       	$4
+
+	0xFEE5BA8:  7FE82214  add r31,r8,r4
+	 213: GETL       	R8, t144
+	 214: GETL       	R4, t146
+	 215: ADDL       	t144, t146
+	 216: PUTL       	t146, R31
+	 217: INCEIPL       	$4
+
+	0xFEE5BAC:  7C670050  subf r3,r7,r0
+	 218: GETL       	R7, t148
+	 219: GETL       	R0, t150
+	 220: SUBL       	t148, t150
+	 221: PUTL       	t150, R3
+	 222: INCEIPL       	$4
+
+	0xFEE5BB0:  7FB9F850  subf r29,r25,r31
+	 223: GETL       	R25, t152
+	 224: GETL       	R31, t154
+	 225: SUBL       	t152, t154
+	 226: PUTL       	t154, R29
+	 227: INCEIPL       	$4
+
+	0xFEE5BB4:  7F8A1A14  add r28,r10,r3
+	 228: GETL       	R10, t156
+	 229: GETL       	R3, t158
+	 230: ADDL       	t156, t158
+	 231: PUTL       	t158, R28
+	 232: INCEIPL       	$4
+
+	0xFEE5BB8:  7F7DE050  subf r27,r29,r28
+	 233: GETL       	R29, t160
+	 234: GETL       	R28, t162
+	 235: SUBL       	t160, t162
+	 236: PUTL       	t162, R27
+	 237: INCEIPL       	$4
+
+	0xFEE5BBC:  7CDF3378  or r31,r6,r6
+	 238: GETL       	R6, t164
+	 239: PUTL       	t164, R31
+	 240: INCEIPL       	$4
+
+	0xFEE5BC0:  7D9B6050  subf r12,r27,r12
+	 241: GETL       	R27, t166
+	 242: GETL       	R12, t168
+	 243: SUBL       	t166, t168
+	 244: PUTL       	t168, R12
+	 245: INCEIPL       	$4
+
+	0xFEE5BC4:  4BFFFEBC  b 0xFEE5A80
+	 246: JMPo       	$0xFEE5A80  ($4)
+
+
+
+. 3138 FEE5AEC 220
+. 7C 0C C0 96 7F A0 62 14 7C 88 D0 96 7F AB 46 70 7F 8A 58 50 7C DC FA 14 1D 3C 01 6D 7C 9C 3E 70 7F 85 E0 50 7C 69 60 50 7C 89 2E 70 54 67 0F FE 7D 25 48 50 7C C7 30 50 38 E6 FF FF 7C 07 D0 96 7C EB FE 70 7C 0A 2E 70 7C 1D 3E 70 7D 4B 50 50 7F AB E8 50 1C 6A 00 64 1C 9D 01 90 7C A3 38 50 1C 09 00 64 54 AB 0F FE 1C 7C 01 90 7C A4 38 50 7C 80 40 50 7C E0 16 70 7C 00 01 94 7D 4B 50 50 7C 63 40 50 54 A5 0F FE 54 84 0F FE 7D 7F 30 50 54 1F 10 3A 7F A5 E8 50 7D 0A 00 50 7C BF 38 50 7D 24 48 50 54 7F 0F FE 1D 4B 01 6D 7C 08 EA 14 7C 9F E0 50 7D 09 D8 50 54 A7 0F FE 7F E8 22 14 7C 67 00 50 7F B9 F8 50 7F 8A 1A 14 7F 7D E0 50 7C DF 33 78 7D 9B 60 50 4B FF FE BC
+==== BB 3139 (0xFEE5A80) approx BBs exec'd 0 ====
+
+	0xFEE5A80:  391FFFFF  addi r8,r31,-1
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0xFEE5A84:  2F0C0000  cmpi cr6,r12,0
+	   4: GETL       	R12, t2
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0xFEE5A88:  7D1B1670  srawi r27,r8,2
+	   8: GETL       	R8, t6
+	   9: SARL       	$0x2, t6  (-wCa)
+	  10: PUTL       	t6, R27
+	  11: INCEIPL       	$4
+
+	0xFEE5A8C:  7F7B0194  addze r27,r27
+	  12: GETL       	R27, t8
+	  13: ADCL       	$0x0, t8  (-rCa-wCa)
+	  14: PUTL       	t8, R27
+	  15: INCEIPL       	$4
+
+	0xFEE5A90:  7D8AFE70  srawi r10,r12,31
+	  16: GETL       	R12, t10
+	  17: SARL       	$0x1F, t10  (-wCa)
+	  18: PUTL       	t10, R10
+	  19: INCEIPL       	$4
+
+	0xFEE5A94:  5779103A  rlwinm r25,r27,2,0,29
+	  20: GETL       	R27, t12
+	  21: SHLL       	$0x2, t12
+	  22: PUTL       	t12, R25
+	  23: INCEIPL       	$4
+
+	0xFEE5A98:  7D05FE70  srawi r5,r8,31
+	  24: GETL       	R8, t14
+	  25: SARL       	$0x1F, t14  (-wCa)
+	  26: PUTL       	t14, R5
+	  27: INCEIPL       	$4
+
+	0xFEE5A9C:  7C994050  subf r4,r25,r8
+	  28: GETL       	R25, t16
+	  29: GETL       	R8, t18
+	  30: SUBL       	t16, t18
+	  31: PUTL       	t18, R4
+	  32: INCEIPL       	$4
+
+	0xFEE5AA0:  73E70003  andi. r7,r31,0x3
+	  33: GETL       	R31, t20
+	  34: ANDL       	$0x3, t20
+	  35: PUTL       	t20, R7
+	  36: CMP0L       	t20, t22  (-rSo)
+	  37: ICRFL       	t22, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0xFEE5AA4:  54990FFE  rlwinm r25,r4,1,31,31
+	  39: GETL       	R4, t24
+	  40: SHRL       	$0x1F, t24
+	  41: PUTL       	t24, R25
+	  42: INCEIPL       	$4
+
+	0xFEE5AA8:  41980044  bc 12,24,0xFEE5AEC
+	  43: Js24o       	$0xFEE5AEC
+
+
+
+. 3139 FEE5A80 44
+. 39 1F FF FF 2F 0C 00 00 7D 1B 16 70 7F 7B 01 94 7D 8A FE 70 57 79 10 3A 7D 05 FE 70 7C 99 40 50 73 E7 00 03 54 99 0F FE 41 98 00 44
+==== BB 3140 (0xFEE5BF0) approx BBs exec'd 0 ====
+
+	0xFEE5BF0:  2C870000  cmpi cr1,r7,0
+	   0: GETL       	R7, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEE5BF4:  3B5FF894  addi r26,r31,-1900
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0xFFFFF894, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0xFEE5BF8:  93570014  stw r26,20(r23)
+	   8: GETL       	R26, t6
+	   9: GETL       	R23, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE5BFC:  39600000  li r11,0
+	  13: MOVL       	$0x0, t10
+	  14: PUTL       	t10, R11
+	  15: INCEIPL       	$4
+
+	0xFEE5C00:  9197001C  stw r12,28(r23)
+	  16: GETL       	R12, t12
+	  17: GETL       	R23, t14
+	  18: ADDL       	$0x1C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEE5C04:  40860040  bc 4,6,0xFEE5C44
+	  21: Jc06o       	$0xFEE5C44
+
+
+
+. 3140 FEE5BF0 24
+. 2C 87 00 00 3B 5F F8 94 93 57 00 14 39 60 00 00 91 97 00 1C 40 86 00 40
+==== BB 3141 (0xFEE5C44) approx BBs exec'd 0 ====
+
+	0xFEE5C44:  1D6B001A  mulli r11,r11,26
+	   0: GETL       	R11, t0
+	   1: MULL       	$0x1A, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0xFEE5C48:  807E1CAC  lwz r3,7340(r30)
+	   4: GETL       	R30, t2
+	   5: ADDL       	$0x1CAC, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFEE5C4C:  3900000B  li r8,11
+	   9: MOVL       	$0xB, t6
+	  10: PUTL       	t6, R8
+	  11: INCEIPL       	$4
+
+	0xFEE5C50:  39400016  li r10,22
+	  12: MOVL       	$0x16, t8
+	  13: PUTL       	t8, R10
+	  14: INCEIPL       	$4
+
+	0xFEE5C54:  7D6B1A14  add r11,r11,r3
+	  15: GETL       	R11, t10
+	  16: GETL       	R3, t12
+	  17: ADDL       	t10, t12
+	  18: PUTL       	t12, R11
+	  19: INCEIPL       	$4
+
+	0xFEE5C58:  A3EB0016  lhz r31,22(r11)
+	  20: GETL       	R11, t14
+	  21: ADDL       	$0x16, t14
+	  22: LDW       	(t14), t16
+	  23: PUTL       	t16, R31
+	  24: INCEIPL       	$4
+
+	0xFEE5C5C:  7F9F6000  cmp cr7,r31,r12
+	  25: GETL       	R31, t18
+	  26: GETL       	R12, t20
+	  27: CMPL       	t18, t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0xFEE5C60:  409D0018  bc 4,29,0xFEE5C78
+	  30: Jc29o       	$0xFEE5C78
+
+
+
+. 3141 FEE5C44 32
+. 1D 6B 00 1A 80 7E 1C AC 39 00 00 0B 39 40 00 16 7D 6B 1A 14 A3 EB 00 16 7F 9F 60 00 40 9D 00 18
+==== BB 3142 (0xFEE5C64) approx BBs exec'd 0 ====
+
+	0xFEE5C64:  3908FFFF  addi r8,r8,-1
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0xFEE5C68:  550A083C  rlwinm r10,r8,1,0,30
+	   4: GETL       	R8, t2
+	   5: SHLL       	$0x1, t2
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0xFEE5C6C:  7F8A5A2E  lhzx r28,r10,r11
+	   8: GETL       	R11, t4
+	   9: GETL       	R10, t6
+	  10: ADDL       	t6, t4
+	  11: LDW       	(t4), t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0xFEE5C70:  7F9C6000  cmp cr7,r28,r12
+	  14: GETL       	R28, t10
+	  15: GETL       	R12, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFEE5C74:  419DFFF0  bc 12,29,0xFEE5C64
+	  19: Js29o       	$0xFEE5C64
+
+
+
+. 3142 FEE5C64 20
+. 39 08 FF FF 55 0A 08 3C 7F 8A 5A 2E 7F 9C 60 00 41 9D FF F0
+==== BB 3143 (0xFEE5C78) approx BBs exec'd 0 ====
+
+	0xFEE5C78:  7FAA5A2E  lhzx r29,r10,r11
+	   0: GETL       	R11, t0
+	   1: GETL       	R10, t2
+	   2: ADDL       	t2, t0
+	   3: LDW       	(t0), t4
+	   4: PUTL       	t4, R29
+	   5: INCEIPL       	$4
+
+	0xFEE5C7C:  38600001  li r3,1
+	   6: MOVL       	$0x1, t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFEE5C80:  91170010  stw r8,16(r23)
+	   9: GETL       	R8, t8
+	  10: GETL       	R23, t10
+	  11: ADDL       	$0x10, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0xFEE5C84:  7D3D6050  subf r9,r29,r12
+	  14: GETL       	R29, t12
+	  15: GETL       	R12, t14
+	  16: SUBL       	t12, t14
+	  17: PUTL       	t14, R9
+	  18: INCEIPL       	$4
+
+	0xFEE5C88:  83010010  lwz r24,16(r1)
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x10, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R24
+	  23: INCEIPL       	$4
+
+	0xFEE5C8C:  39890001  addi r12,r9,1
+	  24: GETL       	R9, t20
+	  25: ADDL       	$0x1, t20
+	  26: PUTL       	t20, R12
+	  27: INCEIPL       	$4
+
+	0xFEE5C90:  83210014  lwz r25,20(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x14, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R25
+	  32: INCEIPL       	$4
+
+	0xFEE5C94:  9197000C  stw r12,12(r23)
+	  33: GETL       	R12, t26
+	  34: GETL       	R23, t28
+	  35: ADDL       	$0xC, t28
+	  36: STL       	t26, (t28)
+	  37: INCEIPL       	$4
+
+	0xFEE5C98:  83410018  lwz r26,24(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x18, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R26
+	  42: INCEIPL       	$4
+
+	0xFEE5C9C:  82E1000C  lwz r23,12(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0xC, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R23
+	  47: INCEIPL       	$4
+
+	0xFEE5CA0:  8361001C  lwz r27,28(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x1C, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R27
+	  52: INCEIPL       	$4
+
+	0xFEE5CA4:  83810020  lwz r28,32(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x20, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R28
+	  57: INCEIPL       	$4
+
+	0xFEE5CA8:  83A10024  lwz r29,36(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x24, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R29
+	  62: INCEIPL       	$4
+
+	0xFEE5CAC:  83C10028  lwz r30,40(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x28, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R30
+	  67: INCEIPL       	$4
+
+	0xFEE5CB0:  83E1002C  lwz r31,44(r1)
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x2C, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R31
+	  72: INCEIPL       	$4
+
+	0xFEE5CB4:  38210030  addi r1,r1,48
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x30, t58
+	  75: PUTL       	t58, R1
+	  76: INCEIPL       	$4
+
+	0xFEE5CB8:  4E800020  blr
+	  77: GETL       	LR, t60
+	  78: JMPo-r       	t60  ($4)
+
+
+
+. 3143 FEE5C78 68
+. 7F AA 5A 2E 38 60 00 01 91 17 00 10 7D 3D 60 50 83 01 00 10 39 89 00 01 83 21 00 14 91 97 00 0C 83 41 00 18 82 E1 00 0C 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 3144 (0xFEE7CBC) approx BBs exec'd 0 ====
+
+	0xFEE7CBC:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEE7CC0:  41820090  bc 12,2,0xFEE7D50
+	   4: Js02o       	$0xFEE7D50
+
+
+
+. 3144 FEE7CBC 8
+. 2C 03 00 00 41 82 00 90
+==== BB 3145 (0xFEE7CC4) approx BBs exec'd 0 ====
+
+	0xFEE7CC4:  80DF0000  lwz r6,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFEE7CC8:  80E1000C  lwz r7,12(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xC, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R7
+	   8: INCEIPL       	$4
+
+	0xFEE7CCC:  7CA63A14  add r5,r6,r7
+	   9: GETL       	R6, t8
+	  10: GETL       	R7, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0xFEE7CD0:  90BF0000  stw r5,0(r31)
+	  14: GETL       	R5, t12
+	  15: GETL       	R31, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0xFEE7CD4:  39400000  li r10,0
+	  18: MOVL       	$0x0, t16
+	  19: PUTL       	t16, R10
+	  20: INCEIPL       	$4
+
+	0xFEE7CD8:  7C0004AC  sync
+	  21: INCEIPL       	$4
+
+	0xFEE7CDC:  7D00E828  lwarx r8,r0,r29
+	  22: GETL       	R29, t18
+	  23: LOCKo       	
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R8
+	  26: INCEIPL       	$4
+
+	0xFEE7CE0:  7D40E92D  stwcx. r10,r0,r29
+	  27: GETL       	R29, t22
+	  28: GETL       	R10, t24
+	  29: LOCKo       	
+	  30: STL       	t24, (t22)  (-rSo)
+	  31: ICRFL       	cr, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0xFEE7CE4:  40A2FFF8  bc 5,2,0xFEE7CDC
+	  33: Jc02o       	$0xFEE7CDC
+
+
+
+. 3145 FEE7CC4 36
+. 80 DF 00 00 80 E1 00 0C 7C A6 3A 14 90 BF 00 00 39 40 00 00 7C 00 04 AC 7D 00 E8 28 7D 40 E9 2D 40 A2 FF F8
+==== BB 3146 (0xFEE7CE8) approx BBs exec'd 0 ====
+
+	0xFEE7CE8:  2F880001  cmpi cr7,r8,1
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEE7CEC:  419D0074  bc 12,29,0xFEE7D60
+	   5: Js29o       	$0xFEE7D60
+
+
+
+. 3146 FEE7CE8 8
+. 2F 88 00 01 41 9D 00 74
+==== BB 3147 (0xFEE7CF0) approx BBs exec'd 0 ====
+
+	0xFEE7CF0:  7FE5FB78  or r5,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFEE7CF4:  83A10034  lwz r29,52(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x34, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEE7CF8:  7CA32B78  or r3,r5,r5
+	   8: GETL       	R5, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEE7CFC:  83410018  lwz r26,24(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x18, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R26
+	  15: INCEIPL       	$4
+
+	0xFEE7D00:  7FA803A6  mtlr r29
+	  16: GETL       	R29, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0xFEE7D04:  8361001C  lwz r27,28(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x1C, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R27
+	  23: INCEIPL       	$4
+
+	0xFEE7D08:  83810020  lwz r28,32(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x20, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R28
+	  28: INCEIPL       	$4
+
+	0xFEE7D0C:  83A10024  lwz r29,36(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x24, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R29
+	  33: INCEIPL       	$4
+
+	0xFEE7D10:  83C10028  lwz r30,40(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x28, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R30
+	  38: INCEIPL       	$4
+
+	0xFEE7D14:  83E1002C  lwz r31,44(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x2C, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R31
+	  43: INCEIPL       	$4
+
+	0xFEE7D18:  38210030  addi r1,r1,48
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x30, t34
+	  46: PUTL       	t34, R1
+	  47: INCEIPL       	$4
+
+	0xFEE7D1C:  4E800020  blr
+	  48: GETL       	LR, t36
+	  49: JMPo-r       	t36  ($4)
+
+
+
+. 3147 FEE7CF0 48
+. 7F E5 FB 78 83 A1 00 34 7C A3 2B 78 83 41 00 18 7F A8 03 A6 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 3148 (0xFEE5FDC) approx BBs exec'd 0 ====
+
+	0xFEE5FDC:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEE5FE0:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFEE5FE4:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFEE5FE8:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFEE5FEC:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+
+. 3148 FEE5FDC 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 3149 (0x100013B0) approx BBs exec'd 0 ====
+
+	0x100013B0:  7C7A1B79  or. r26,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R26
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x100013B4:  41820134  bc 12,2,0x100014E8
+	   5: Js02o       	$0x100014E8
+
+
+
+. 3149 100013B0 8
+. 7C 7A 1B 79 41 82 01 34
+==== BB 3150 (0x100013B8) approx BBs exec'd 0 ====
+
+	0x100013B8:  83FE000C  lwz r31,12(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x100013BC:  3B010030  addi r24,r1,48
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x30, t4
+	   7: PUTL       	t4, R24
+	   8: INCEIPL       	$4
+
+	0x100013C0:  3B200001  li r25,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R25
+	  11: INCEIPL       	$4
+
+	0x100013C4:  7FA3EB78  or r3,r29,r29
+	  12: GETL       	R29, t8
+	  13: PUTL       	t8, R3
+	  14: INCEIPL       	$4
+
+	0x100013C8:  38810038  addi r4,r1,56
+	  15: GETL       	R1, t10
+	  16: ADDL       	$0x38, t10
+	  17: PUTL       	t10, R4
+	  18: INCEIPL       	$4
+
+	0x100013CC:  38A00001  li r5,1
+	  19: MOVL       	$0x1, t12
+	  20: PUTL       	t12, R5
+	  21: INCEIPL       	$4
+
+	0x100013D0:  3B800000  li r28,0
+	  22: MOVL       	$0x0, t14
+	  23: PUTL       	t14, R28
+	  24: INCEIPL       	$4
+
+	0x100013D4:  48005185  bl 0x10006558
+	  25: MOVL       	$0x100013D8, t16
+	  26: PUTL       	t16, LR
+	  27: JMPo-c       	$0x10006558  ($4)
+
+
+
+. 3150 100013B8 32
+. 83 FE 00 0C 3B 01 00 30 3B 20 00 01 7F A3 EB 78 38 81 00 38 38 A0 00 01 3B 80 00 00 48 00 51 85
+==== BB 3151 (0x10006558) approx BBs exec'd 0 ====
+
+	0x10006558:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x1000655C:  2C030000  cmpi cr0,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x10006560:  9421FFF0  stwu r1,-16(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFF0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x10006564:  7C8B2378  or r11,r4,r4
+	  13: GETL       	R4, t10
+	  14: PUTL       	t10, R11
+	  15: INCEIPL       	$4
+
+	0x10006568:  90010014  stw r0,20(r1)
+	  16: GETL       	R0, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x14, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x1000656C:  3C007FFF  lis r0,32767
+	  21: MOVL       	$0x7FFF0000, t16
+	  22: PUTL       	t16, R0
+	  23: INCEIPL       	$4
+
+	0x10006570:  6000FFFF  ori r0,r0,0xFFFF
+	  24: MOVL       	$0x7FFFFFFF, t18
+	  25: PUTL       	t18, R0
+	  26: INCEIPL       	$4
+
+	0x10006574:  81240000  lwz r9,0(r4)
+	  27: GETL       	R4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x10006578:  41820030  bc 12,2,0x100065A8
+	  31: Js02o       	$0x100065A8
+
+
+
+. 3151 10006558 36
+. 7C 08 02 A6 2C 03 00 00 94 21 FF F0 7C 8B 23 78 90 01 00 14 3C 00 7F FF 60 00 FF FF 81 24 00 00 41 82 00 30
+==== BB 3152 (0x100065A8) approx BBs exec'd 0 ====
+
+	0x100065A8:  2F890000  cmpi cr7,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x100065AC:  40BEFFE0  bc 5,30,0x1000658C
+	   4: Jc30o       	$0x1000658C
+
+
+
+. 3152 100065A8 8
+. 2F 89 00 00 40 BE FF E0
+==== BB 3153 (0x100065B0) approx BBs exec'd 0 ====
+
+	0x100065B0:  38000040  li r0,64
+	   0: MOVL       	$0x40, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100065B4:  7D202B96  divwu r9, r0, r5
+	   3: GETL       	R0, t4
+	   4: GETL       	R5, t2
+	   5: UDIVL       	t2, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x100065B8:  20090000  subfic r0,r9,0
+	   8: GETL       	R9, t6
+	   9: MOVL       	$0x0, t8
+	  10: SBBL       	t6, t8  (-wCa)
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x100065BC:  7C090194  addze r0,r9
+	  13: GETL       	R9, t10
+	  14: ADCL       	$0x0, t10  (-rCa-wCa)
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0x100065C0:  7C090378  or r9,r0,r0
+	  17: GETL       	R0, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x100065C4:  4BFFFFC8  b 0x1000658C
+	  20: JMPo       	$0x1000658C  ($4)
+
+
+
+. 3153 100065B0 24
+. 38 00 00 40 7D 20 2B 96 20 09 00 00 7C 09 01 94 7C 09 03 78 4B FF FF C8
+==== BB 3154 (0x1000658C) approx BBs exec'd 0 ====
+
+	0x1000658C:  7C8929D6  mullw r4,r9,r5
+	   0: GETL       	R9, t0
+	   1: GETL       	R5, t2
+	   2: MULL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x10006590:  912B0000  stw r9,0(r11)
+	   5: GETL       	R9, t4
+	   6: GETL       	R11, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x10006594:  4BFFFF8D  bl 0x10006520
+	   9: MOVL       	$0x10006598, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0x10006520  ($4)
+
+
+
+. 3154 1000658C 12
+. 7C 89 29 D6 91 2B 00 00 4B FF FF 8D
+==== BB 3155 (0x10006520) approx BBs exec'd 0 ====
+
+	0x10006520:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10006524:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x10006528:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x1000652C:  3800FFFF  li r0,-1
+	  14: MOVL       	$0xFFFFFFFF, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0x10006530:  7F802040  cmpl cr7,r0,r4
+	  17: GETL       	R0, t12
+	  18: GETL       	R4, t14
+	  19: CMPUL       	t12, t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0x10006534:  419C0020  bc 12,28,0x10006554
+	  22: Js28o       	$0x10006554
+
+
+
+. 3155 10006520 24
+. 7C 08 02 A6 94 21 FF F0 90 01 00 14 38 00 FF FF 7F 80 20 40 41 9C 00 20
+==== BB 3156 (0x10006538) approx BBs exec'd 0 ====
+
+	0x10006538:  48014705  bl 0x1001AC3C
+	   0: MOVL       	$0x1000653C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x1001AC3C  ($4)
+
+
+
+. 3156 10006538 4
+. 48 01 47 05
+==== BB 3157 (0x1001AC3C) approx BBs exec'd 0 ====
+
+	0x1001AC3C:  39600084  li r11,132
+	   0: MOVL       	$0x84, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AC40:  4BFFFECC  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3157 1001AC3C 8
+. 39 60 00 84 4B FF FE CC
+==== BB 3158 (0x1000653C) approx BBs exec'd 0 ====
+
+	0x1000653C:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10006540:  419E0014  bc 12,30,0x10006554
+	   4: Js30o       	$0x10006554
+
+
+
+. 3158 1000653C 8
+. 2F 83 00 00 41 9E 00 14
+==== BB 3159 (0x10006544) approx BBs exec'd 0 ====
+
+	0x10006544:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10006548:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x1000654C:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10006550:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3159 10006544 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 3160 (0x10006598) approx BBs exec'd 0 ====
+
+	0x10006598:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x1000659C:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x100065A0:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x100065A4:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3160 10006598 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 3161 (0x100013D8) approx BBs exec'd 0 ====
+
+	0x100013D8:  809E0070  lwz r4,112(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x70, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x100013DC:  9B230000  stb r25,0(r3)
+	   5: GETL       	R25, t4
+	   6: GETL       	R3, t6
+	   7: STB       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x100013E0:  7C7D1B78  or r29,r3,r3
+	   9: GETL       	R3, t8
+	  10: PUTL       	t8, R29
+	  11: INCEIPL       	$4
+
+	0x100013E4:  801F0000  lwz r0,0(r31)
+	  12: GETL       	R31, t10
+	  13: LDL       	(t10), t12
+	  14: PUTL       	t12, R0
+	  15: INCEIPL       	$4
+
+	0x100013E8:  38600006  li r3,6
+	  16: MOVL       	$0x6, t14
+	  17: PUTL       	t14, R3
+	  18: INCEIPL       	$4
+
+	0x100013EC:  2F800000  cmpi cr7,r0,0
+	  19: GETL       	R0, t16
+	  20: CMP0L       	t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x100013F0:  409E00AC  bc 4,30,0x1000149C
+	  23: Jc30o       	$0x1000149C
+
+
+
+. 3161 100013D8 28
+. 80 9E 00 70 9B 23 00 00 7C 7D 1B 78 80 1F 00 00 38 60 00 06 2F 80 00 00 40 9E 00 AC
+==== BB 3162 (0x100013F4) approx BBs exec'd 0 ====
+
+	0x100013F4:  80810038  lwz r4,56(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x100013F8:  7F65DB78  or r5,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x100013FC:  81180004  lwz r8,4(r24)
+	   8: GETL       	R24, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0x10001400:  7F46D378  or r6,r26,r26
+	  13: GETL       	R26, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x10001404:  38E00000  li r7,0
+	  16: MOVL       	$0x0, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x10001408:  7FA3EB78  or r3,r29,r29
+	  19: GETL       	R29, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0x1000140C:  480023C9  bl 0x100037D4
+	  22: MOVL       	$0x10001410, t16
+	  23: PUTL       	t16, LR
+	  24: JMPo-c       	$0x100037D4  ($4)
+
+
+
+. 3162 100013F4 28
+. 80 81 00 38 7F 65 DB 78 81 18 00 04 7F 46 D3 78 38 E0 00 00 7F A3 EB 78 48 00 23 C9
+==== BB 3163 (0x100037D4) approx BBs exec'd 0 ====
+
+	0x100037D4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100037D8:  9421FB40  stwu r1,-1216(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFB40, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x100037DC:  92C10498  stw r22,1176(r1)
+	   9: GETL       	R22, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x498, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x100037E0:  480170CD  bl 0x1001A8AC
+	  14: MOVL       	$0x100037E4, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x1001A8AC  ($4)
+
+
+
+. 3163 100037D4 16
+. 7C 08 02 A6 94 21 FB 40 92 C1 04 98 48 01 70 CD
+==== BB 3164 (0x100037E4) approx BBs exec'd 0 ====
+
+	0x100037E4:  91E1047C  stw r15,1148(r1)
+	   0: GETL       	R15, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x47C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x100037E8:  7CEF3B78  or r15,r7,r7
+	   5: GETL       	R7, t4
+	   6: PUTL       	t4, R15
+	   7: INCEIPL       	$4
+
+	0x100037EC:  900104C4  stw r0,1220(r1)
+	   8: GETL       	R0, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x4C4, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x100037F0:  82C60008  lwz r22,8(r6)
+	  13: GETL       	R6, t10
+	  14: ADDL       	$0x8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R22
+	  17: INCEIPL       	$4
+
+	0x100037F4:  92210484  stw r17,1156(r1)
+	  18: GETL       	R17, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x484, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x100037F8:  3A200000  li r17,0
+	  23: MOVL       	$0x0, t18
+	  24: PUTL       	t18, R17
+	  25: INCEIPL       	$4
+
+	0x100037FC:  2F96000C  cmpi cr7,r22,12
+	  26: GETL       	R22, t20
+	  27: MOVL       	$0xC, t24
+	  28: CMPL       	t20, t24, t22  (-rSo)
+	  29: ICRFL       	t22, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0x10003800:  92410488  stw r18,1160(r1)
+	  31: GETL       	R18, t26
+	  32: GETL       	R1, t28
+	  33: ADDL       	$0x488, t28
+	  34: STL       	t26, (t28)
+	  35: INCEIPL       	$4
+
+	0x10003804:  9261048C  stw r19,1164(r1)
+	  36: GETL       	R19, t30
+	  37: GETL       	R1, t32
+	  38: ADDL       	$0x48C, t32
+	  39: STL       	t30, (t32)
+	  40: INCEIPL       	$4
+
+	0x10003808:  7D134378  or r19,r8,r8
+	  41: GETL       	R8, t34
+	  42: PUTL       	t34, R19
+	  43: INCEIPL       	$4
+
+	0x1000380C:  92A10494  stw r21,1172(r1)
+	  44: GETL       	R21, t36
+	  45: GETL       	R1, t38
+	  46: ADDL       	$0x494, t38
+	  47: STL       	t36, (t38)
+	  48: INCEIPL       	$4
+
+	0x10003810:  3AA00000  li r21,0
+	  49: MOVL       	$0x0, t40
+	  50: PUTL       	t40, R21
+	  51: INCEIPL       	$4
+
+	0x10003814:  92E1049C  stw r23,1180(r1)
+	  52: GETL       	R23, t42
+	  53: GETL       	R1, t44
+	  54: ADDL       	$0x49C, t44
+	  55: STL       	t42, (t44)
+	  56: INCEIPL       	$4
+
+	0x10003818:  7C972378  or r23,r4,r4
+	  57: GETL       	R4, t46
+	  58: PUTL       	t46, R23
+	  59: INCEIPL       	$4
+
+	0x1000381C:  930104A0  stw r24,1184(r1)
+	  60: GETL       	R24, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0x4A0, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x10003820:  7CA42B78  or r4,r5,r5
+	  65: GETL       	R5, t52
+	  66: PUTL       	t52, R4
+	  67: INCEIPL       	$4
+
+	0x10003824:  932104A4  stw r25,1188(r1)
+	  68: GETL       	R25, t54
+	  69: GETL       	R1, t56
+	  70: ADDL       	$0x4A4, t56
+	  71: STL       	t54, (t56)
+	  72: INCEIPL       	$4
+
+	0x10003828:  7CD83378  or r24,r6,r6
+	  73: GETL       	R6, t58
+	  74: PUTL       	t58, R24
+	  75: INCEIPL       	$4
+
+	0x1000382C:  93C104B8  stw r30,1208(r1)
+	  76: GETL       	R30, t60
+	  77: GETL       	R1, t62
+	  78: ADDL       	$0x4B8, t62
+	  79: STL       	t60, (t62)
+	  80: INCEIPL       	$4
+
+	0x10003830:  7C791B78  or r25,r3,r3
+	  81: GETL       	R3, t64
+	  82: PUTL       	t64, R25
+	  83: INCEIPL       	$4
+
+	0x10003834:  91C10478  stw r14,1144(r1)
+	  84: GETL       	R14, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x478, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0x10003838:  7FC802A6  mflr r30
+	  89: GETL       	LR, t70
+	  90: PUTL       	t70, R30
+	  91: INCEIPL       	$4
+
+	0x1000383C:  92010480  stw r16,1152(r1)
+	  92: GETL       	R16, t72
+	  93: GETL       	R1, t74
+	  94: ADDL       	$0x480, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0x10003840:  92810490  stw r20,1168(r1)
+	  97: GETL       	R20, t76
+	  98: GETL       	R1, t78
+	  99: ADDL       	$0x490, t78
+	 100: STL       	t76, (t78)
+	 101: INCEIPL       	$4
+
+	0x10003844:  934104A8  stw r26,1192(r1)
+	 102: GETL       	R26, t80
+	 103: GETL       	R1, t82
+	 104: ADDL       	$0x4A8, t82
+	 105: STL       	t80, (t82)
+	 106: INCEIPL       	$4
+
+	0x10003848:  936104AC  stw r27,1196(r1)
+	 107: GETL       	R27, t84
+	 108: GETL       	R1, t86
+	 109: ADDL       	$0x4AC, t86
+	 110: STL       	t84, (t86)
+	 111: INCEIPL       	$4
+
+	0x1000384C:  938104B0  stw r28,1200(r1)
+	 112: GETL       	R28, t88
+	 113: GETL       	R1, t90
+	 114: ADDL       	$0x4B0, t90
+	 115: STL       	t88, (t90)
+	 116: INCEIPL       	$4
+
+	0x10003850:  93A104B4  stw r29,1204(r1)
+	 117: GETL       	R29, t92
+	 118: GETL       	R1, t94
+	 119: ADDL       	$0x4B4, t94
+	 120: STL       	t92, (t94)
+	 121: INCEIPL       	$4
+
+	0x10003854:  93E104BC  stw r31,1212(r1)
+	 122: GETL       	R31, t96
+	 123: GETL       	R1, t98
+	 124: ADDL       	$0x4BC, t98
+	 125: STL       	t96, (t98)
+	 126: INCEIPL       	$4
+
+	0x10003858:  82460028  lwz r18,40(r6)
+	 127: GETL       	R6, t100
+	 128: ADDL       	$0x28, t100
+	 129: LDL       	(t100), t102
+	 130: PUTL       	t102, R18
+	 131: INCEIPL       	$4
+
+	0x1000385C:  409D016C  bc 4,29,0x100039C8
+	 132: Jc29o       	$0x100039C8
+
+
+
+. 3164 100037E4 124
+. 91 E1 04 7C 7C EF 3B 78 90 01 04 C4 82 C6 00 08 92 21 04 84 3A 20 00 00 2F 96 00 0C 92 41 04 88 92 61 04 8C 7D 13 43 78 92 A1 04 94 3A A0 00 00 92 E1 04 9C 7C 97 23 78 93 01 04 A0 7C A4 2B 78 93 21 04 A4 7C D8 33 78 93 C1 04 B8 7C 79 1B 78 91 C1 04 78 7F C8 02 A6 92 01 04 80 92 81 04 90 93 41 04 A8 93 61 04 AC 93 81 04 B0 93 A1 04 B4 93 E1 04 BC 82 46 00 28 40 9D 01 6C
+==== BB 3165 (0x100039C8) approx BBs exec'd 0 ====
+
+	0x100039C8:  2F960000  cmpi cr7,r22,0
+	   0: GETL       	R22, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x100039CC:  40BEFE98  bc 5,30,0x10003864
+	   4: Jc30o       	$0x10003864
+
+
+
+. 3165 100039C8 8
+. 2F 96 00 00 40 BE FE 98
+==== BB 3166 (0x10003864) approx BBs exec'd 0 ====
+
+	0x10003864:  88040000  lbz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x10003868:  7C9A2378  or r26,r4,r4
+	   4: GETL       	R4, t4
+	   5: PUTL       	t4, R26
+	   6: INCEIPL       	$4
+
+	0x1000386C:  2F800000  cmpi cr7,r0,0
+	   7: GETL       	R0, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x10003870:  419E005C  bc 12,30,0x100038CC
+	  11: Js30o       	$0x100038CC
+
+
+
+. 3166 10003864 16
+. 88 04 00 00 7C 9A 23 78 2F 80 00 00 41 9E 00 5C
+==== BB 3167 (0x10003874) approx BBs exec'd 0 ====
+
+	0x10003874:  7C090774  extsb r9,r0
+	   0: GETB       	R0, t0
+	   1: WIDENL_Bs       	_st0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x10003878:  3B600000  li r27,0
+	   4: MOVL       	$0x0, t2
+	   5: PUTL       	t2, R27
+	   6: INCEIPL       	$4
+
+	0x1000387C:  2F89003F  cmpi cr7,r9,63
+	   7: GETL       	R9, t4
+	   8: MOVL       	$0x3F, t8
+	   9: CMPL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x10003880:  3B80FFFF  li r28,-1
+	  12: MOVL       	$0xFFFFFFFF, t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0x10003884:  9361045C  stw r27,1116(r1)
+	  15: GETL       	R27, t12
+	  16: GETL       	R1, t14
+	  17: ADDL       	$0x45C, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x10003888:  3A800000  li r20,0
+	  20: MOVL       	$0x0, t16
+	  21: PUTL       	t16, R20
+	  22: INCEIPL       	$4
+
+	0x1000388C:  39400000  li r10,0
+	  23: MOVL       	$0x0, t18
+	  24: PUTL       	t18, R10
+	  25: INCEIPL       	$4
+
+	0x10003890:  419D0064  bc 12,29,0x100038F4
+	  26: Js29o       	$0x100038F4
+
+
+
+. 3167 10003874 32
+. 7C 09 07 74 3B 60 00 00 2F 89 00 3F 3B 80 FF FF 93 61 04 5C 3A 80 00 00 39 40 00 00 41 9D 00 64
+==== BB 3168 (0x10003894) approx BBs exec'd 0 ====
+
+	0x10003894:  2F890026  cmpi cr7,r9,38
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x26, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10003898:  419C0140  bc 12,28,0x100039D8
+	   5: Js28o       	$0x100039D8
+
+
+
+. 3168 10003894 8
+. 2F 89 00 26 41 9C 01 40
+==== BB 3169 (0x100039D8) approx BBs exec'd 0 ====
+
+	0x100039D8:  2F890023  cmpi cr7,r9,35
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x23, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100039DC:  419D0078  bc 12,29,0x10003A54
+	   5: Js29o       	$0x10003A54
+
+
+
+. 3169 100039D8 8
+. 2F 89 00 23 41 9D 00 78
+==== BB 3170 (0x10003A54) approx BBs exec'd 0 ====
+
+	0x10003A54:  2F890025  cmpi cr7,r9,37
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x25, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10003A58:  409EFEB8  bc 4,30,0x10003910
+	   5: Jc30o       	$0x10003910
+
+
+
+. 3170 10003A54 8
+. 2F 89 00 25 40 9E FE B8
+==== BB 3171 (0x10003A5C) approx BBs exec'd 0 ====
+
+	0x10003A5C:  8D1A0001  lbzu r8,1(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0x10003A60:  7D000774  extsb r0,r8
+	   6: GETB       	R8, t4
+	   7: WIDENL_Bs       	_st4
+	   8: PUTL       	t4, R0
+	   9: INCEIPL       	$4
+
+	0x10003A64:  2F800030  cmpi cr7,r0,48
+	  10: GETL       	R0, t6
+	  11: MOVL       	$0x30, t10
+	  12: CMPL       	t6, t10, t8  (-rSo)
+	  13: ICRFL       	t8, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x10003A68:  419E0018  bc 12,30,0x10003A80
+	  15: Js30o       	$0x10003A80
+
+
+
+. 3171 10003A5C 16
+. 8D 1A 00 01 7D 00 07 74 2F 80 00 30 41 9E 00 18
+==== BB 3172 (0x10003A6C) approx BBs exec'd 0 ====
+
+	0x10003A6C:  419D0D7C  bc 12,29,0x100047E8
+	   0: Js29o       	$0x100047E8
+
+
+
+. 3172 10003A6C 4
+. 41 9D 0D 7C
+==== BB 3173 (0x100047E8) approx BBs exec'd 0 ====
+
+	0x100047E8:  2F80005E  cmpi cr7,r0,94
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x5E, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100047EC:  419E0018  bc 12,30,0x10004804
+	   5: Js30o       	$0x10004804
+
+
+
+. 3173 100047E8 8
+. 2F 80 00 5E 41 9E 00 18
+==== BB 3174 (0x100047F0) approx BBs exec'd 0 ====
+
+	0x100047F0:  2F80005F  cmpi cr7,r0,95
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x5F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100047F4:  419EF28C  bc 12,30,0x10003A80
+	   5: Js30o       	$0x10003A80
+
+
+
+. 3174 100047F0 8
+. 2F 80 00 5F 41 9E F2 8C
+==== BB 3175 (0x100047F8) approx BBs exec'd 0 ====
+
+	0x100047F8:  4BFFF2DC  b 0x10003AD4
+	   0: JMPo       	$0x10003AD4  ($4)
+
+
+
+. 3175 100047F8 4
+. 4B FF F2 DC
+==== BB 3176 (0x10003AD4) approx BBs exec'd 0 ====
+
+	0x10003AD4:  7D090774  extsb r9,r8
+	   0: GETB       	R8, t0
+	   1: WIDENL_Bs       	_st0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x10003AD8:  3929FFD0  addi r9,r9,-48
+	   4: GETL       	R9, t2
+	   5: ADDL       	$0xFFFFFFD0, t2
+	   6: PUTL       	t2, R9
+	   7: INCEIPL       	$4
+
+	0x10003ADC:  2B890009  cmpli cr7,r9,9
+	   8: GETL       	R9, t4
+	   9: MOVL       	$0x9, t8
+	  10: CMPUL       	t4, t8, t6  (-rSo)
+	  11: ICRFL       	t6, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x10003AE0:  419D0048  bc 12,29,0x10003B28
+	  13: Js29o       	$0x10003B28
+
+
+
+. 3176 10003AD4 16
+. 7D 09 07 74 39 29 FF D0 2B 89 00 09 41 9D 00 48
+==== BB 3177 (0x10003B28) approx BBs exec'd 0 ====
+
+	0x10003B28:  7D000774  extsb r0,r8
+	   0: GETB       	R8, t0
+	   1: WIDENL_Bs       	_st0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x10003B2C:  2F800045  cmpi cr7,r0,69
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x45, t6
+	   6: CMPL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x10003B30:  419E0CDC  bc 12,30,0x1000480C
+	   9: Js30o       	$0x1000480C
+
+
+
+. 3177 10003B28 12
+. 7D 00 07 74 2F 80 00 45 41 9E 0C DC
+==== BB 3178 (0x10003B34) approx BBs exec'd 0 ====
+
+	0x10003B34:  2F80004F  cmpi cr7,r0,79
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x4F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10003B38:  3A000000  li r16,0
+	   5: MOVL       	$0x0, t6
+	   6: PUTL       	t6, R16
+	   7: INCEIPL       	$4
+
+	0x10003B3C:  419E0CD0  bc 12,30,0x1000480C
+	   8: Js30o       	$0x1000480C
+
+
+
+. 3178 10003B34 12
+. 2F 80 00 4F 3A 00 00 00 41 9E 0C D0
+==== BB 3179 (0x10003B40) approx BBs exec'd 0 ====
+
+	0x10003B40:  7D0E0774  extsb r14,r8
+	   0: GETB       	R8, t0
+	   1: WIDENL_Bs       	_st0
+	   2: PUTL       	t0, R14
+	   3: INCEIPL       	$4
+
+	0x10003B44:  2B8E007A  cmpli cr7,r14,122
+	   4: GETL       	R14, t2
+	   5: MOVL       	$0x7A, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x10003B48:  419D0138  bc 12,29,0x10003C80
+	   9: Js29o       	$0x10003C80
+
+
+
+. 3179 10003B40 12
+. 7D 0E 07 74 2B 8E 00 7A 41 9D 01 38
+==== BB 3180 (0x10003B4C) approx BBs exec'd 0 ====
+
+	0x10003B4C:  817E0130  lwz r11,304(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x130, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x10003B50:  55C0103A  rlwinm r0,r14,2,0,29
+	   5: GETL       	R14, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10003B54:  7D2B002E  lwzx r9,r11,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x10003B58:  7D295A14  add r9,r9,r11
+	  15: GETL       	R9, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x10003B5C:  7D2903A6  mtctr r9
+	  20: GETL       	R9, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x10003B60:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 3180 10003B4C 24
+. 81 7E 01 30 55 C0 10 3A 7D 2B 00 2E 7D 29 5A 14 7D 29 03 A6 4E 80 04 20
+==== BB 3181 (0x10003C5C) approx BBs exec'd 0 ====
+
+	0x10003C5C:  2F100000  cmpi cr6,r16,0
+	   0: GETL       	R16, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x10003C60:  409A0020  bc 4,26,0x10003C80
+	   4: Jc26o       	$0x10003C80
+
+
+
+. 3181 10003C5C 8
+. 2F 10 00 00 40 9A 00 20
+==== BB 3182 (0x10003C64) approx BBs exec'd 0 ====
+
+	0x10003C64:  2F8A0000  cmpi cr7,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10003C68:  41BEFF1C  bc 13,30,0x10003B84
+	   4: Js30o       	$0x10003B84
+
+
+
+. 3182 10003C64 8
+. 2F 8A 00 00 41 BE FF 1C
+==== BB 3183 (0x10003B84) approx BBs exec'd 0 ====
+
+	0x10003B84:  38000025  li r0,37
+	   0: MOVL       	$0x25, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10003B88:  39210459  addi r9,r1,1113
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x459, t2
+	   5: PUTL       	t2, R9
+	   6: INCEIPL       	$4
+
+	0x10003B8C:  98010458  stb r0,1112(r1)
+	   7: GETL       	R0, t4
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x458, t6
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x10003B90:  419A000C  bc 12,26,0x10003B9C
+	  12: Js26o       	$0x10003B9C
+
+
+
+. 3183 10003B84 16
+. 38 00 00 25 39 21 04 59 98 01 04 58 41 9A 00 0C
+==== BB 3184 (0x10003B9C) approx BBs exec'd 0 ====
+
+	0x10003B9C:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10003BA0:  3A010020  addi r16,r1,32
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x20, t2
+	   5: PUTL       	t2, R16
+	   6: INCEIPL       	$4
+
+	0x10003BA4:  98090001  stb r0,1(r9)
+	   7: GETL       	R0, t4
+	   8: GETL       	R9, t6
+	   9: ADDL       	$0x1, t6
+	  10: STB       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x10003BA8:  38A10458  addi r5,r1,1112
+	  12: GETL       	R1, t8
+	  13: ADDL       	$0x458, t8
+	  14: PUTL       	t8, R5
+	  15: INCEIPL       	$4
+
+	0x10003BAC:  99C90000  stb r14,0(r9)
+	  16: GETL       	R14, t10
+	  17: GETL       	R9, t12
+	  18: STB       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x10003BB0:  7E038378  or r3,r16,r16
+	  20: GETL       	R16, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0x10003BB4:  38800400  li r4,1024
+	  23: MOVL       	$0x400, t16
+	  24: PUTL       	t16, R4
+	  25: INCEIPL       	$4
+
+	0x10003BB8:  7F06C378  or r6,r24,r24
+	  26: GETL       	R24, t18
+	  27: PUTL       	t18, R6
+	  28: INCEIPL       	$4
+
+	0x10003BBC:  48017019  bl 0x1001ABD4
+	  29: MOVL       	$0x10003BC0, t20
+	  30: PUTL       	t20, LR
+	  31: JMPo-c       	$0x1001ABD4  ($4)
+
+
+
+. 3184 10003B9C 36
+. 38 00 00 00 3A 01 00 20 98 09 00 01 38 A1 04 58 99 C9 00 00 7E 03 83 78 38 80 04 00 7F 06 C3 78 48 01 70 19
+==== BB 3185 (0x1001ABD4) approx BBs exec'd 0 ====
+
+	0x1001ABD4:  39600050  li r11,80
+	   0: MOVL       	$0x50, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ABD8:  4BFFFF34  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3185 1001ABD4 8
+. 39 60 00 50 4B FF FF 34
+==== BB 3186 strftime(0xFEEC444) approx BBs exec'd 0 ====
+
+	0xFEEC444:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEEC448:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0xFEEC44C:  480BBA05  bl 0xFFA7E50
+	   9: MOVL       	$0xFEEC450, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3186 FEEC444 12
+. 94 21 FF F0 7D 88 02 A6 48 0B BA 05
+==== BB 3187 (0xFEEC450) approx BBs exec'd 0 ====
+
+	0xFEEC450:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEEC454:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEEC458:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFEEC45C:  80FE1D50  lwz r7,7504(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x1D50, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R7
+	  15: INCEIPL       	$4
+
+	0xFEEC460:  83C10008  lwz r30,8(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x8, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFEEC464:  7D271214  add r9,r7,r2
+	  21: GETL       	R7, t16
+	  22: GETL       	R2, t18
+	  23: ADDL       	t16, t18
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0xFEEC468:  38210010  addi r1,r1,16
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x10, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0xFEEC46C:  80E90000  lwz r7,0(r9)
+	  30: GETL       	R9, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R7
+	  33: INCEIPL       	$4
+
+	0xFEEC470:  48000034  b 0xFEEC4A4
+	  34: JMPo       	$0xFEEC4A4  ($4)
+
+
+
+. 3187 FEEC450 36
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 FE 1D 50 83 C1 00 08 7D 27 12 14 38 21 00 10 80 E9 00 00 48 00 00 34
+==== BB 3188 strftime_l(0xFEEC4A4) approx BBs exec'd 0 ====
+
+	0xFEEC4A4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEEC4A8:  9421FF60  stwu r1,-160(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF60, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEEC4AC:  480BB9A5  bl 0xFFA7E50
+	   9: MOVL       	$0xFEEC4B0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3188 FEEC4A4 12
+. 7C 08 02 A6 94 21 FF 60 48 0B B9 A5
+==== BB 3189 (0xFEEC4B0) approx BBs exec'd 0 ====
+
+	0xFEEC4B0:  93C10098  stw r30,152(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x98, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEEC4B4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEEC4B8:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0xFEEC4BC:  900100A4  stw r0,164(r1)
+	  11: GETL       	R0, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0xA4, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFEEC4C0:  9261006C  stw r19,108(r1)
+	  16: GETL       	R19, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x6C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEEC4C4:  82660008  lwz r19,8(r6)
+	  21: GETL       	R6, t16
+	  22: ADDL       	$0x8, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R19
+	  25: INCEIPL       	$4
+
+	0xFEEC4C8:  93E1009C  stw r31,156(r1)
+	  26: GETL       	R31, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x9C, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFEEC4CC:  7CBF2B78  or r31,r5,r5
+	  31: GETL       	R5, t24
+	  32: PUTL       	t24, R31
+	  33: INCEIPL       	$4
+
+	0xFEEC4D0:  92010060  stw r16,96(r1)
+	  34: GETL       	R16, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x60, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFEEC4D4:  92210064  stw r17,100(r1)
+	  39: GETL       	R17, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x64, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFEEC4D8:  92810070  stw r20,112(r1)
+	  44: GETL       	R20, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x70, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFEEC4DC:  7C942378  or r20,r4,r4
+	  49: GETL       	R4, t38
+	  50: PUTL       	t38, R20
+	  51: INCEIPL       	$4
+
+	0xFEEC4E0:  92A10074  stw r21,116(r1)
+	  52: GETL       	R21, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x74, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFEEC4E4:  3AA00000  li r21,0
+	  57: MOVL       	$0x0, t44
+	  58: PUTL       	t44, R21
+	  59: INCEIPL       	$4
+
+	0xFEEC4E8:  92C10078  stw r22,120(r1)
+	  60: GETL       	R22, t46
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x78, t48
+	  63: STL       	t46, (t48)
+	  64: INCEIPL       	$4
+
+	0xFEEC4EC:  7CF63B78  or r22,r7,r7
+	  65: GETL       	R7, t50
+	  66: PUTL       	t50, R22
+	  67: INCEIPL       	$4
+
+	0xFEEC4F0:  92E1007C  stw r23,124(r1)
+	  68: GETL       	R23, t52
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x7C, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0xFEEC4F4:  7CD73378  or r23,r6,r6
+	  73: GETL       	R6, t56
+	  74: PUTL       	t56, R23
+	  75: INCEIPL       	$4
+
+	0xFEEC4F8:  9361008C  stw r27,140(r1)
+	  76: GETL       	R27, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x8C, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0xFEEC4FC:  7C7B1B78  or r27,r3,r3
+	  81: GETL       	R3, t62
+	  82: PUTL       	t62, R27
+	  83: INCEIPL       	$4
+
+	0xFEEC500:  91C10058  stw r14,88(r1)
+	  84: GETL       	R14, t64
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x58, t66
+	  87: STL       	t64, (t66)
+	  88: INCEIPL       	$4
+
+	0xFEEC504:  91E1005C  stw r15,92(r1)
+	  89: GETL       	R15, t68
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x5C, t70
+	  92: STL       	t68, (t70)
+	  93: INCEIPL       	$4
+
+	0xFEEC508:  92410068  stw r18,104(r1)
+	  94: GETL       	R18, t72
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x68, t74
+	  97: STL       	t72, (t74)
+	  98: INCEIPL       	$4
+
+	0xFEEC50C:  93010080  stw r24,128(r1)
+	  99: GETL       	R24, t76
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x80, t78
+	 102: STL       	t76, (t78)
+	 103: INCEIPL       	$4
+
+	0xFEEC510:  93210084  stw r25,132(r1)
+	 104: GETL       	R25, t80
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x84, t82
+	 107: STL       	t80, (t82)
+	 108: INCEIPL       	$4
+
+	0xFEEC514:  93410088  stw r26,136(r1)
+	 109: GETL       	R26, t84
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x88, t86
+	 112: STL       	t84, (t86)
+	 113: INCEIPL       	$4
+
+	0xFEEC518:  93810090  stw r28,144(r1)
+	 114: GETL       	R28, t88
+	 115: GETL       	R1, t90
+	 116: ADDL       	$0x90, t90
+	 117: STL       	t88, (t90)
+	 118: INCEIPL       	$4
+
+	0xFEEC51C:  93A10094  stw r29,148(r1)
+	 119: GETL       	R29, t92
+	 120: GETL       	R1, t94
+	 121: ADDL       	$0x94, t94
+	 122: STL       	t92, (t94)
+	 123: INCEIPL       	$4
+
+	0xFEEC520:  91810054  stw r12,84(r1)
+	 124: GETL       	R12, t96
+	 125: GETL       	R1, t98
+	 126: ADDL       	$0x54, t98
+	 127: STL       	t96, (t98)
+	 128: INCEIPL       	$4
+
+	0xFEEC524:  82270008  lwz r17,8(r7)
+	 129: GETL       	R7, t100
+	 130: ADDL       	$0x8, t100
+	 131: LDL       	(t100), t102
+	 132: PUTL       	t102, R17
+	 133: INCEIPL       	$4
+
+	0xFEEC528:  82060028  lwz r16,40(r6)
+	 134: GETL       	R6, t104
+	 135: ADDL       	$0x28, t104
+	 136: LDL       	(t104), t106
+	 137: PUTL       	t106, R16
+	 138: INCEIPL       	$4
+
+	0xFEEC52C:  4BFFB575  bl 0xFEE7AA0
+	 139: MOVL       	$0xFEEC530, t108
+	 140: PUTL       	t108, LR
+	 141: JMPo-c       	$0xFEE7AA0  ($4)
+
+
+
+. 3189 FEEC4B0 128
+. 93 C1 00 98 7F C8 02 A6 7D 80 00 26 90 01 00 A4 92 61 00 6C 82 66 00 08 93 E1 00 9C 7C BF 2B 78 92 01 00 60 92 21 00 64 92 81 00 70 7C 94 23 78 92 A1 00 74 3A A0 00 00 92 C1 00 78 7C F6 3B 78 92 E1 00 7C 7C D7 33 78 93 61 00 8C 7C 7B 1B 78 91 C1 00 58 91 E1 00 5C 92 41 00 68 93 01 00 80 93 21 00 84 93 41 00 88 93 81 00 90 93 A1 00 94 91 81 00 54 82 27 00 08 82 06 00 28 4B FF B5 75
+==== BB 3190 tzset(0xFEE7AA0) approx BBs exec'd 0 ====
+
+	0xFEE7AA0:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEE7AA4:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFEE7AA8:  480C03A9  bl 0xFFA7E50
+	   9: MOVL       	$0xFEE7AAC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3190 FEE7AA0 12
+. 94 21 FF E0 7C 68 02 A6 48 0C 03 A9
+==== BB 3191 (0xFEE7AAC) approx BBs exec'd 0 ====
+
+	0xFEE7AAC:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEE7AB0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEE7AB4:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEE7AB8:  93E1001C  stw r31,28(r1)
+	  13: GETL       	R31, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x1C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEE7ABC:  3BA00000  li r29,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0xFEE7AC0:  90610024  stw r3,36(r1)
+	  21: GETL       	R3, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x24, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEE7AC4:  38000001  li r0,1
+	  26: MOVL       	$0x1, t20
+	  27: PUTL       	t20, R0
+	  28: INCEIPL       	$4
+
+	0xFEE7AC8:  83FE08A0  lwz r31,2208(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x8A0, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R31
+	  33: INCEIPL       	$4
+
+	0xFEE7ACC:  7D20F828  lwarx r9,r0,r31
+	  34: GETL       	R31, t26
+	  35: LOCKo       	
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R9
+	  38: INCEIPL       	$4
+
+	0xFEE7AD0:  7C09E800  cmp cr0,r9,r29
+	  39: GETL       	R9, t30
+	  40: GETL       	R29, t32
+	  41: CMPL       	t30, t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x0, CR
+	  43: INCEIPL       	$4
+
+	0xFEE7AD4:  4082000C  bc 4,2,0xFEE7AE0
+	  44: Jc02o       	$0xFEE7AE0
+
+
+
+. 3191 FEE7AAC 44
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 93 E1 00 1C 3B A0 00 00 90 61 00 24 38 00 00 01 83 FE 08 A0 7D 20 F8 28 7C 09 E8 00 40 82 00 0C
+==== BB 3192 (0xFEE7AD8) approx BBs exec'd 0 ====
+
+	0xFEE7AD8:  7C00F92D  stwcx. r0,r0,r31
+	   0: GETL       	R31, t0
+	   1: GETL       	R0, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEE7ADC:  40A2FFF0  bc 5,2,0xFEE7ACC
+	   6: Jc02o       	$0xFEE7ACC
+
+
+
+. 3192 FEE7AD8 8
+. 7C 00 F9 2D 40 A2 FF F0
+==== BB 3193 (0xFEE7ACC) approx BBs exec'd 0 ====
+
+	0xFEE7ACC:  7D20F828  lwarx r9,r0,r31
+	   0: GETL       	R31, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEE7AD0:  7C09E800  cmp cr0,r9,r29
+	   5: GETL       	R9, t4
+	   6: GETL       	R29, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEE7AD4:  4082000C  bc 4,2,0xFEE7AE0
+	  10: Jc02o       	$0xFEE7AE0
+
+
+
+. 3193 FEE7ACC 12
+. 7D 20 F8 28 7C 09 E8 00 40 82 00 0C
+==== BB 3194 (0xFEE7AE0) approx BBs exec'd 0 ====
+
+	0xFEE7AE0:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFEE7AE4:  2F890000  cmpi cr7,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEE7AE8:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEE7AEC:  409E0078  bc 4,30,0xFEE7B64
+	   8: Jc30o       	$0xFEE7B64
+
+
+
+. 3194 FEE7AE0 16
+. 4C 00 01 2C 2F 89 00 00 7F E3 FB 78 40 9E 00 78
+==== BB 3195 (0xFEE7AF0) approx BBs exec'd 0 ====
+
+	0xFEE7AF0:  38800001  li r4,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEE7AF4:  38600001  li r3,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0xFEE7AF8:  4BFFF2E1  bl 0xFEE6DD8
+	   6: MOVL       	$0xFEE7AFC, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFEE6DD8  ($4)
+
+
+
+. 3195 FEE7AF0 12
+. 38 80 00 01 38 60 00 01 4B FF F2 E1
+==== BB 3196 (0xFEE6EE8) approx BBs exec'd 0 ====
+
+	0xFEE6EE8:  835E0898  lwz r26,2200(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x898, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFEE6EEC:  313CFFFF  addic r9,r28,-1
+	   5: GETL       	R28, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xFEE6EF0:  7EC9E110  subfe r22,r9,r28
+	   9: GETL       	R9, t6
+	  10: GETL       	R28, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R22
+	  13: INCEIPL       	$4
+
+	0xFEE6EF4:  83BA0000  lwz r29,0(r26)
+	  14: GETL       	R26, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFEE6EF8:  315DFFFF  addic r10,r29,-1
+	  18: GETL       	R29, t14
+	  19: ADCL       	$0xFFFFFFFF, t14  (-wCa)
+	  20: PUTL       	t14, R10
+	  21: INCEIPL       	$4
+
+	0xFEE6EFC:  7EEAE910  subfe r23,r10,r29
+	  22: GETL       	R10, t16
+	  23: GETL       	R29, t18
+	  24: SBBL       	t16, t18  (-rCa-wCa)
+	  25: PUTL       	t18, R23
+	  26: INCEIPL       	$4
+
+	0xFEE6F00:  7EEBB039  and. r11,r23,r22
+	  27: GETL       	R23, t20
+	  28: GETL       	R22, t22
+	  29: ANDL       	t20, t22
+	  30: PUTL       	t22, R11
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0xFEE6F04:  41820018  bc 12,2,0xFEE6F1C
+	  34: Js02o       	$0xFEE6F1C
+
+
+
+. 3196 FEE6EE8 32
+. 83 5E 08 98 31 3C FF FF 7E C9 E1 10 83 BA 00 00 31 5D FF FF 7E EA E9 10 7E EB B0 39 41 82 00 18
+==== BB 3197 (0xFEE7378) approx BBs exec'd 0 ====
+
+	0xFEE7378:  839E08AC  lwz r28,2220(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8AC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEE737C:  2E1C0000  cmpi cr4,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0xFEE7380:  4BFFFBA4  b 0xFEE6F24
+	   9: JMPo       	$0xFEE6F24  ($4)
+
+
+
+. 3197 FEE7378 12
+. 83 9E 08 AC 2E 1C 00 00 4B FF FB A4
+==== BB 3198 (0xFEE70B4) approx BBs exec'd 0 ====
+
+	0xFEE70B4:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEE70B8:  480C3279  bl 0xFFAA330
+	   3: MOVL       	$0xFEE70BC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFFAA330  ($4)
+
+
+
+. 3198 FEE70B4 8
+. 7F A3 EB 78 48 0C 32 79
+==== BB 3199 (0xFEE70BC) approx BBs exec'd 0 ====
+
+	0xFEE70BC:  4BFFFE80  b 0xFEE6F3C
+	   0: JMPo       	$0xFEE6F3C  ($4)
+
+
+
+. 3199 FEE70BC 4
+. 4B FF FE 80
+==== BB 3200 (0xFEE7FA4) approx BBs exec'd 0 ====
+
+	0xFEE7FA4:  38600003  li r3,3
+	   0: MOVL       	$0x3, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEE7FA8:  7F64DB78  or r4,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFEE7FAC:  38BF0040  addi r5,r31,64
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0x40, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0xFEE7FB0:  480392BD  bl 0xFF2126C
+	  10: MOVL       	$0xFEE7FB4, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0xFF2126C  ($4)
+
+
+
+. 3200 FEE7FA4 16
+. 38 60 00 03 7F 64 DB 78 38 BF 00 40 48 03 92 BD
+==== BB 3201 __xstat64@@GLIBC_2.2(0xFF2126C) approx BBs exec'd 0 ====
+
+	0xFF2126C:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFF21270:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFF21274:  48086BDD  bl 0xFFA7E50
+	   9: MOVL       	$0xFF21278, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3201 FF2126C 12
+. 7D 88 02 A6 94 21 FF F0 48 08 6B DD
+==== BB 3202 (0xFF21278) approx BBs exec'd 0 ====
+
+	0xFF21278:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF2127C:  7C832378  or r3,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF21280:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0xFF21284:  7CA42B78  or r4,r5,r5
+	  11: GETL       	R5, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFF21288:  380000C3  li r0,195
+	  14: MOVL       	$0xC3, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFF2128C:  7D8803A6  mtlr r12
+	  17: GETL       	R12, t12
+	  18: PUTL       	t12, LR
+	  19: INCEIPL       	$4
+
+	0xFF21290:  44000002  sc
+	  20: JMPo-sys       	$0xFF21294  ($4)
+
+
+
+. 3202 FF21278 28
+. 93 C1 00 08 7C 83 23 78 7F C8 02 A6 7C A4 2B 78 38 00 00 C3 7D 88 03 A6 44 00 00 02
+==== BB 3203 (0xFF21294) approx BBs exec'd 0 ====
+
+	0xFF21294:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF21298:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFF2129C:  813E1C4C  lwz r9,7244(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x1C4C, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFF212A0:  7D291214  add r9,r9,r2
+	  14: GETL       	R9, t10
+	  15: GETL       	R2, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFF212A4:  40820010  bc 4,2,0xFF212B4
+	  19: Jc02o       	$0xFF212B4
+
+
+
+. 3203 FF21294 20
+. 7C 00 00 26 74 09 10 00 81 3E 1C 4C 7D 29 12 14 40 82 00 10
+==== BB 3204 (0xFF212A8) approx BBs exec'd 0 ====
+
+	0xFF212A8:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0xFF212AC:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFF212B0:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+
+. 3204 FF212A8 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+==== BB 3205 (0xFEE7FB4) approx BBs exec'd 0 ====
+
+	0xFEE7FB4:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEE7FB8:  418601BC  bc 12,6,0xFEE8174
+	   4: Js06o       	$0xFEE8174
+
+
+
+. 3205 FEE7FB4 8
+. 2C 83 00 00 41 86 01 BC
+==== BB 3206 (0xFEE8174) approx BBs exec'd 0 ====
+
+	0xFEE8174:  817E08D8  lwz r11,2264(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8D8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFEE8178:  821F0048  lwz r16,72(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x48, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R16
+	   9: INCEIPL       	$4
+
+	0xFEE817C:  81EB0000  lwz r15,0(r11)
+	  10: GETL       	R11, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R15
+	  13: INCEIPL       	$4
+
+	0xFEE8180:  7C107800  cmp cr0,r16,r15
+	  14: GETL       	R16, t12
+	  15: GETL       	R15, t14
+	  16: CMPL       	t12, t14, t16  (-rSo)
+	  17: ICRFL       	t16, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFEE8184:  4082FE38  bc 4,2,0xFEE7FBC
+	  19: Jc02o       	$0xFEE7FBC
+
+
+
+. 3206 FEE8174 20
+. 81 7E 08 D8 82 1F 00 48 81 EB 00 00 7C 10 78 00 40 82 FE 38
+==== BB 3207 (0xFEE8188) approx BBs exec'd 0 ====
+
+	0xFEE8188:  822B0004  lwz r17,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0xFEE818C:  825F004C  lwz r18,76(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x4C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R18
+	   9: INCEIPL       	$4
+
+	0xFEE8190:  7F128800  cmp cr6,r18,r17
+	  10: GETL       	R18, t8
+	  11: GETL       	R17, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0xFEE8194:  409AFE28  bc 4,26,0xFEE7FBC
+	  15: Jc26o       	$0xFEE7FBC
+
+
+
+. 3207 FEE8188 16
+. 82 2B 00 04 82 5F 00 4C 7F 12 88 00 40 9A FE 28
+==== BB 3208 (0xFEE8198) approx BBs exec'd 0 ====
+
+	0xFEE8198:  817E08D0  lwz r11,2256(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8D0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFEE819C:  82BF0040  lwz r21,64(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x40, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R21
+	   9: INCEIPL       	$4
+
+	0xFEE81A0:  826B0000  lwz r19,0(r11)
+	  10: GETL       	R11, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R19
+	  13: INCEIPL       	$4
+
+	0xFEE81A4:  7F959800  cmp cr7,r21,r19
+	  14: GETL       	R21, t12
+	  15: GETL       	R19, t14
+	  16: CMPL       	t12, t14, t16  (-rSo)
+	  17: ICRFL       	t16, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFEE81A8:  409EFE14  bc 4,30,0xFEE7FBC
+	  19: Jc30o       	$0xFEE7FBC
+
+
+
+. 3208 FEE8198 20
+. 81 7E 08 D0 82 BF 00 40 82 6B 00 00 7F 95 98 00 40 9E FE 14
+==== BB 3209 (0xFEE81AC) approx BBs exec'd 0 ====
+
+	0xFEE81AC:  830B0004  lwz r24,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFEE81B0:  833F0044  lwz r25,68(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x44, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0xFEE81B4:  7C99C000  cmp cr1,r25,r24
+	  10: GETL       	R25, t8
+	  11: GETL       	R24, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFEE81B8:  4086FE04  bc 4,6,0xFEE7FBC
+	  15: Jc06o       	$0xFEE7FBC
+
+
+
+. 3209 FEE81AC 16
+. 83 0B 00 04 83 3F 00 44 7C 99 C0 00 40 86 FE 04
+==== BB 3210 (0xFEE81BC) approx BBs exec'd 0 ====
+
+	0xFEE81BC:  80DE08D4  lwz r6,2260(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x8D4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEE81C0:  801F0090  lwz r0,144(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x90, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0xFEE81C4:  83460000  lwz r26,0(r6)
+	  10: GETL       	R6, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R26
+	  13: INCEIPL       	$4
+
+	0xFEE81C8:  7C00D000  cmp cr0,r0,r26
+	  14: GETL       	R0, t12
+	  15: GETL       	R26, t14
+	  16: CMPL       	t12, t14, t16  (-rSo)
+	  17: ICRFL       	t16, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFEE81CC:  4082FDF0  bc 4,2,0xFEE7FBC
+	  19: Jc02o       	$0xFEE7FBC
+
+
+
+. 3210 FEE81BC 20
+. 80 DE 08 D4 80 1F 00 90 83 46 00 00 7C 00 D0 00 40 82 FD F0
+==== BB 3211 (0xFEE81D0) approx BBs exec'd 0 ====
+
+	0xFEE81D0:  39400001  li r10,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFEE81D4:  91540000  stw r10,0(r20)
+	   3: GETL       	R10, t2
+	   4: GETL       	R20, t4
+	   5: STL       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFEE81D8:  4BFFFCDC  b 0xFEE7EB4
+	   7: JMPo       	$0xFEE7EB4  ($4)
+
+
+
+. 3211 FEE81D0 12
+. 39 40 00 01 91 54 00 00 4B FF FC DC
+==== BB 3212 (0xFEE7AFC) approx BBs exec'd 0 ====
+
+	0xFEE7AFC:  80BE1DC0  lwz r5,7616(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEE7B00:  815E089C  lwz r10,2204(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x89C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0xFEE7B04:  80850000  lwz r4,0(r5)
+	  10: GETL       	R5, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0xFEE7B08:  817E1AE4  lwz r11,6884(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x1AE4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0xFEE7B0C:  2C040000  cmpi cr0,r4,0
+	  19: GETL       	R4, t16
+	  20: CMP0L       	t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x0, CR
+	  22: INCEIPL       	$4
+
+	0xFEE7B10:  40820014  bc 4,2,0xFEE7B24
+	  23: Jc02o       	$0xFEE7B24
+
+
+
+. 3212 FEE7AFC 24
+. 80 BE 1D C0 81 5E 08 9C 80 85 00 00 81 7E 1A E4 2C 04 00 00 40 82 00 14
+==== BB 3213 (0xFEE7B24) approx BBs exec'd 0 ====
+
+	0xFEE7B24:  7C0004AC  sync
+	   0: INCEIPL       	$4
+
+	0xFEE7B28:  7D00F828  lwarx r8,r0,r31
+	   1: GETL       	R31, t0
+	   2: LOCKo       	
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0xFEE7B2C:  7FA0F92D  stwcx. r29,r0,r31
+	   6: GETL       	R31, t4
+	   7: GETL       	R29, t6
+	   8: LOCKo       	
+	   9: STL       	t6, (t4)  (-rSo)
+	  10: ICRFL       	cr, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0xFEE7B30:  40A2FFF8  bc 5,2,0xFEE7B28
+	  12: Jc02o       	$0xFEE7B28
+
+
+
+. 3213 FEE7B24 16
+. 7C 00 04 AC 7D 00 F8 28 7F A0 F9 2D 40 A2 FF F8
+==== BB 3214 (0xFEE7B34) approx BBs exec'd 0 ====
+
+	0xFEE7B34:  2C880001  cmpi cr1,r8,1
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEE7B38:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t6
+	   6: PUTL       	t6, R3
+	   7: INCEIPL       	$4
+
+	0xFEE7B3C:  380000DD  li r0,221
+	   8: MOVL       	$0xDD, t8
+	   9: PUTL       	t8, R0
+	  10: INCEIPL       	$4
+
+	0xFEE7B40:  38800001  li r4,1
+	  11: MOVL       	$0x1, t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0xFEE7B44:  41850028  bc 12,5,0xFEE7B6C
+	  14: Js05o       	$0xFEE7B6C
+
+
+
+. 3214 FEE7B34 20
+. 2C 88 00 01 7F E3 FB 78 38 00 00 DD 38 80 00 01 41 85 00 28
+==== BB 3215 (0xFEE7B48) approx BBs exec'd 0 ====
+
+	0xFEE7B48:  81410024  lwz r10,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEE7B4C:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFEE7B50:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0xFEE7B54:  7D4803A6  mtlr r10
+	  15: GETL       	R10, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFEE7B58:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0xFEE7B5C:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0xFEE7B60:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+
+. 3215 FEE7B48 28
+. 81 41 00 24 83 A1 00 14 83 C1 00 18 7D 48 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3216 (0xFEEC530) approx BBs exec'd 0 ====
+
+	0xFEEC530:  2F93000C  cmpi cr7,r19,12
+	   0: GETL       	R19, t0
+	   1: MOVL       	$0xC, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEEC534:  409D00EC  bc 4,29,0xFEEC620
+	   5: Jc29o       	$0xFEEC620
+
+
+
+. 3216 FEEC530 8
+. 2F 93 00 0C 40 9D 00 EC
+==== BB 3217 (0xFEEC620) approx BBs exec'd 0 ====
+
+	0xFEEC620:  2C130000  cmpi cr0,r19,0
+	   0: GETL       	R19, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEEC624:  40A2FF18  bc 5,2,0xFEEC53C
+	   4: Jc02o       	$0xFEEC53C
+
+
+
+. 3217 FEEC620 8
+. 2C 13 00 00 40 A2 FF 18
+==== BB 3218 (0xFEEC53C) approx BBs exec'd 0 ====
+
+	0xFEEC53C:  881F0000  lbz r0,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEEC540:  2C800000  cmpi cr1,r0,0
+	   4: GETL       	R0, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0xFEEC544:  41860050  bc 12,6,0xFEEC594
+	   8: Js06o       	$0xFEEC594
+
+
+
+. 3218 FEEC53C 12
+. 88 1F 00 00 2C 80 00 00 41 86 00 50
+==== BB 3219 (0xFEEC548) approx BBs exec'd 0 ====
+
+	0xFEEC548:  2E000025  cmpi cr4,r0,37
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x25, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0xFEEC54C:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t6
+	   6: PUTL       	t6, R26
+	   7: INCEIPL       	$4
+
+	0xFEEC550:  3B80FFFF  li r28,-1
+	   8: MOVL       	$0xFFFFFFFF, t8
+	   9: PUTL       	t8, R28
+	  10: INCEIPL       	$4
+
+	0xFEEC554:  3BA00000  li r29,0
+	  11: MOVL       	$0x0, t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0xFEEC558:  3A400000  li r18,0
+	  14: MOVL       	$0x0, t12
+	  15: PUTL       	t12, R18
+	  16: INCEIPL       	$4
+
+	0xFEEC55C:  39400000  li r10,0
+	  17: MOVL       	$0x0, t14
+	  18: PUTL       	t14, R10
+	  19: INCEIPL       	$4
+
+	0xFEEC560:  419200D4  bc 12,18,0xFEEC634
+	  20: Js18o       	$0xFEEC634
+
+
+
+. 3219 FEEC548 28
+. 2E 00 00 25 3B 40 00 00 3B 80 FF FF 3B A0 00 00 3A 40 00 00 39 40 00 00 41 92 00 D4
+==== BB 3220 (0xFEEC634) approx BBs exec'd 0 ====
+
+	0xFEEC634:  8D1F0001  lbzu r8,1(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0xFEEC638:  2F880030  cmpi cr7,r8,48
+	   6: GETL       	R8, t4
+	   7: MOVL       	$0x30, t8
+	   8: CMPL       	t4, t8, t6  (-rSo)
+	   9: ICRFL       	t6, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFEEC63C:  419E0018  bc 12,30,0xFEEC654
+	  11: Js30o       	$0xFEEC654
+
+
+
+. 3220 FEEC634 12
+. 8D 1F 00 01 2F 88 00 30 41 9E 00 18
+==== BB 3221 (0xFEEC640) approx BBs exec'd 0 ====
+
+	0xFEEC640:  419D001C  bc 12,29,0xFEEC65C
+	   0: Js29o       	$0xFEEC65C
+
+
+
+. 3221 FEEC640 4
+. 41 9D 00 1C
+==== BB 3222 (0xFEEC65C) approx BBs exec'd 0 ====
+
+	0xFEEC65C:  2C88005E  cmpi cr1,r8,94
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x5E, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEEC660:  41860010  bc 12,6,0xFEEC670
+	   5: Js06o       	$0xFEEC670
+
+
+
+. 3222 FEEC65C 8
+. 2C 88 00 5E 41 86 00 10
+==== BB 3223 (0xFEEC664) approx BBs exec'd 0 ====
+
+	0xFEEC664:  2F88005F  cmpi cr7,r8,95
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x5F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEEC668:  419EFFEC  bc 12,30,0xFEEC654
+	   5: Js30o       	$0xFEEC654
+
+
+
+. 3223 FEEC664 8
+. 2F 88 00 5F 41 9E FF EC
+==== BB 3224 (0xFEEC66C) approx BBs exec'd 0 ====
+
+	0xFEEC66C:  4800000C  b 0xFEEC678
+	   0: JMPo       	$0xFEEC678  ($4)
+
+
+
+. 3224 FEEC66C 4
+. 48 00 00 0C
+==== BB 3225 (0xFEEC678) approx BBs exec'd 0 ====
+
+	0xFEEC678:  38A8FFD0  addi r5,r8,-48
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0xFEEC67C:  2A050009  cmpli cr4,r5,9
+	   4: GETL       	R5, t2
+	   5: MOVL       	$0x9, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0xFEEC680:  41910040  bc 12,17,0xFEEC6C0
+	   9: Js17o       	$0xFEEC6C0
+
+
+
+. 3225 FEEC678 12
+. 38 A8 FF D0 2A 05 00 09 41 91 00 40
+==== BB 3226 (0xFEEC6C0) approx BBs exec'd 0 ====
+
+	0xFEEC6C0:  2C080045  cmpi cr0,r8,69
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x45, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEEC6C4:  7D004378  or r0,r8,r8
+	   5: GETL       	R8, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFEEC6C8:  41820104  bc 12,2,0xFEEC7CC
+	   8: Js02o       	$0xFEEC7CC
+
+
+
+. 3226 FEEC6C0 12
+. 2C 08 00 45 7D 00 43 78 41 82 01 04
+==== BB 3227 (0xFEEC6CC) approx BBs exec'd 0 ====
+
+	0xFEEC6CC:  2C88004F  cmpi cr1,r8,79
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x4F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEEC6D0:  3B200000  li r25,0
+	   5: MOVL       	$0x0, t6
+	   6: PUTL       	t6, R25
+	   7: INCEIPL       	$4
+
+	0xFEEC6D4:  418600F8  bc 12,6,0xFEEC7CC
+	   8: Js06o       	$0xFEEC7CC
+
+
+
+. 3227 FEEC6CC 12
+. 2C 88 00 4F 3B 20 00 00 41 86 00 F8
+==== BB 3228 (0xFEEC6D8) approx BBs exec'd 0 ====
+
+	0xFEEC6D8:  2A08007A  cmpli cr4,r8,122
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x7A, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0xFEEC6DC:  41910030  bc 12,17,0xFEEC70C
+	   5: Js17o       	$0xFEEC70C
+
+
+
+. 3228 FEEC6D8 8
+. 2A 08 00 7A 41 91 00 30
+==== BB 3229 (0xFEEC6E0) approx BBs exec'd 0 ====
+
+	0xFEEC6E0:  819E0938  lwz r12,2360(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x938, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFEEC6E4:  5500103A  rlwinm r0,r8,2,0,29
+	   5: GETL       	R8, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEEC6E8:  7F0C002E  lwzx r24,r12,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R12, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0xFEEC6EC:  7DF86214  add r15,r24,r12
+	  15: GETL       	R24, t12
+	  16: GETL       	R12, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R15
+	  19: INCEIPL       	$4
+
+	0xFEEC6F0:  7DE903A6  mtctr r15
+	  20: GETL       	R15, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0xFEEC6F4:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+
+. 3229 FEEC6E0 24
+. 81 9E 09 38 55 00 10 3A 7F 0C 00 2E 7D F8 62 14 7D E9 03 A6 4E 80 04 20
+==== BB 3230 (0xFEED214) approx BBs exec'd 0 ====
+
+	0xFEED214:  2E190000  cmpi cr4,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFEED218:  40B2F4F4  bc 5,18,0xFEEC70C
+	   4: Jc18o       	$0xFEEC70C
+
+
+
+. 3230 FEED214 8
+. 2E 19 00 00 40 B2 F4 F4
+==== BB 3231 (0xFEED21C) approx BBs exec'd 0 ====
+
+	0xFEED21C:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFEED220:  419A0008  bc 12,26,0xFEED228
+	   4: Js26o       	$0xFEED228
+
+
+
+. 3231 FEED21C 8
+. 2F 0A 00 00 41 9A 00 08
+==== BB 3232 (0xFEED228) approx BBs exec'd 0 ====
+
+	0xFEED228:  83170018  lwz r24,24(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFEED22C:  571913BA  rlwinm r25,r24,2,14,29
+	   5: GETL       	R24, t4
+	   6: ROLL       	$0x2, t4
+	   7: ANDL       	$0x3FFFC, t4
+	   8: PUTL       	t4, R25
+	   9: INCEIPL       	$4
+
+	0xFEED230:  7D198A14  add r8,r25,r17
+	  10: GETL       	R25, t6
+	  11: GETL       	R17, t8
+	  12: ADDL       	t6, t8
+	  13: PUTL       	t8, R8
+	  14: INCEIPL       	$4
+
+	0xFEED234:  80680024  lwz r3,36(r8)
+	  15: GETL       	R8, t10
+	  16: ADDL       	$0x24, t10
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0xFEED238:  4BFE98A1  bl 0xFED6AD8
+	  20: MOVL       	$0xFEED23C, t14
+	  21: PUTL       	t14, LR
+	  22: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 3232 FEED228 20
+. 83 17 00 18 57 19 13 BA 7D 19 8A 14 80 68 00 24 4B FE 98 A1
+==== BB 3233 (0xFEED23C) approx BBs exec'd 0 ====
+
+	0xFEED23C:  7FA3E050  subf r29,r3,r28
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEED240:  7C791B78  or r25,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0xFEED244:  2F1D0000  cmpi cr6,r29,0
+	   8: GETL       	R29, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEED248:  7FA0EB78  or r0,r29,r29
+	  12: GETL       	R29, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFEED24C:  41980A08  bc 12,24,0xFEEDC54
+	  15: Js24o       	$0xFEEDC54
+
+
+
+. 3233 FEED23C 20
+. 7F A3 E0 50 7C 79 1B 78 2F 1D 00 00 7F A0 EB 78 41 98 0A 08
+==== BB 3234 (0xFEEDC54) approx BBs exec'd 0 ====
+
+	0xFEEDC54:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEEDC58:  4BFFF5F8  b 0xFEED250
+	   3: JMPo       	$0xFEED250  ($4)
+
+
+
+. 3234 FEEDC54 8
+. 38 00 00 00 4B FF F5 F8
+==== BB 3235 (0xFEED250) approx BBs exec'd 0 ====
+
+	0xFEED250:  7F80CA14  add r28,r0,r25
+	   0: GETL       	R0, t0
+	   1: GETL       	R25, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEED254:  7C75A050  subf r3,r21,r20
+	   5: GETL       	R21, t4
+	   6: GETL       	R20, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFEED258:  7F9C1840  cmpl cr7,r28,r3
+	  10: GETL       	R28, t8
+	  11: GETL       	R3, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEED25C:  40BCF360  bc 5,28,0xFEEC5BC
+	  15: Jc28o       	$0xFEEC5BC
+
+
+
+. 3235 FEED250 16
+. 7F 80 CA 14 7C 75 A0 50 7F 9C 18 40 40 BC F3 60
+==== BB 3236 (0xFEED260) approx BBs exec'd 0 ====
+
+	0xFEED260:  2C1B0000  cmpi cr0,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEED264:  41A2F930  bc 13,2,0xFEECB94
+	   4: Js02o       	$0xFEECB94
+
+
+
+. 3236 FEED260 8
+. 2C 1B 00 00 41 A2 F9 30
+==== BB 3237 (0xFEED268) approx BBs exec'd 0 ====
+
+	0xFEED268:  4099002C  bc 4,25,0xFEED294
+	   0: Jc25o       	$0xFEED294
+
+
+
+. 3237 FEED268 4
+. 40 99 00 2C
+==== BB 3238 (0xFEED294) approx BBs exec'd 0 ====
+
+	0xFEED294:  2E120000  cmpi cr4,r18,0
+	   0: GETL       	R18, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFEED298:  41920948  bc 12,18,0xFEEDBE0
+	   4: Js18o       	$0xFEEDBE0
+
+
+
+. 3238 FEED294 8
+. 2E 12 00 00 41 92 09 48
+==== BB 3239 (0xFEEDBE0) approx BBs exec'd 0 ====
+
+	0xFEEDBE0:  570913BA  rlwinm r9,r24,2,14,29
+	   0: GETL       	R24, t0
+	   1: ROLL       	$0x2, t0
+	   2: ANDL       	$0x3FFFC, t0
+	   3: PUTL       	t0, R9
+	   4: INCEIPL       	$4
+
+	0xFEEDBE4:  4BFFFEC8  b 0xFEEDAAC
+	   5: JMPo       	$0xFEEDAAC  ($4)
+
+
+
+. 3239 FEEDBE0 8
+. 57 09 13 BA 4B FF FE C8
+==== BB 3240 (0xFEEDAAC) approx BBs exec'd 0 ====
+
+	0xFEEDAAC:  7D498A14  add r10,r9,r17
+	   0: GETL       	R9, t0
+	   1: GETL       	R17, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEEDAB0:  808A0024  lwz r4,36(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFEEDAB4:  4BFFFFBC  b 0xFEEDA70
+	  10: JMPo       	$0xFEEDA70  ($4)
+
+
+
+. 3240 FEEDAAC 12
+. 7D 49 8A 14 80 8A 00 24 4B FF FF BC
+==== BB 3241 (0xFEEDA70) approx BBs exec'd 0 ====
+
+	0xFEEDA70:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEEDA74:  7F25CB78  or r5,r25,r25
+	   3: GETL       	R25, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFEEDA78:  4BFEAC61  bl 0xFED86D8
+	   6: MOVL       	$0xFEEDA7C, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED86D8  ($4)
+
+
+
+. 3241 FEEDA70 12
+. 7F 63 DB 78 7F 25 CB 78 4B FE AC 61
+==== BB 3242 (0xFEEDA7C) approx BBs exec'd 0 ====
+
+	0xFEEDA7C:  7F7BCA14  add r27,r27,r25
+	   0: GETL       	R27, t0
+	   1: GETL       	R25, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFEEDA80:  4BFFF784  b 0xFEED204
+	   5: JMPo       	$0xFEED204  ($4)
+
+
+
+. 3242 FEEDA7C 8
+. 7F 7B CA 14 4B FF F7 84
+==== BB 3243 (0xFEED204) approx BBs exec'd 0 ====
+
+	0xFEED204:  7EB5E214  add r21,r21,r28
+	   0: GETL       	R21, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0xFEED208:  4BFFF990  b 0xFEECB98
+	   5: JMPo       	$0xFEECB98  ($4)
+
+
+
+. 3243 FEED204 8
+. 7E B5 E2 14 4B FF F9 90
+==== BB 3244 (0xFEECB98) approx BBs exec'd 0 ====
+
+	0xFEECB98:  8C1F0001  lbzu r0,1(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFEECB9C:  4BFFF9F0  b 0xFEEC58C
+	   6: JMPo       	$0xFEEC58C  ($4)
+
+
+
+. 3244 FEECB98 8
+. 8C 1F 00 01 4B FF F9 F0
+==== BB 3245 (0xFEEC58C) approx BBs exec'd 0 ====
+
+	0xFEEC58C:  2C800000  cmpi cr1,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEEC590:  4086FFB8  bc 4,6,0xFEEC548
+	   4: Jc06o       	$0xFEEC548
+
+
+
+. 3245 FEEC58C 8
+. 2C 80 00 00 40 86 FF B8
+==== BB 3246 (0xFEEC594) approx BBs exec'd 0 ====
+
+	0xFEEC594:  3134FFFF  addic r9,r20,-1
+	   0: GETL       	R20, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFEEC598:  7E09A110  subfe r16,r9,r20
+	   4: GETL       	R9, t2
+	   5: GETL       	R20, t4
+	   6: SBBL       	t2, t4  (-rCa-wCa)
+	   7: PUTL       	t4, R16
+	   8: INCEIPL       	$4
+
+	0xFEEC59C:  317BFFFF  addic r11,r27,-1
+	   9: GETL       	R27, t6
+	  10: ADCL       	$0xFFFFFFFF, t6  (-wCa)
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0xFEEC5A0:  7E2BD910  subfe r17,r11,r27
+	  13: GETL       	R11, t8
+	  14: GETL       	R27, t10
+	  15: SBBL       	t8, t10  (-rCa-wCa)
+	  16: PUTL       	t10, R17
+	  17: INCEIPL       	$4
+
+	0xFEEC5A4:  7E2B8039  and. r11,r17,r16
+	  18: GETL       	R17, t12
+	  19: GETL       	R16, t14
+	  20: ANDL       	t12, t14
+	  21: PUTL       	t14, R11
+	  22: CMP0L       	t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0xFEEC5A8:  4182000C  bc 12,2,0xFEEC5B4
+	  25: Js02o       	$0xFEEC5B4
+
+
+
+. 3246 FEEC594 24
+. 31 34 FF FF 7E 09 A1 10 31 7B FF FF 7E 2B D9 10 7E 2B 80 39 41 82 00 0C
+==== BB 3247 (0xFEEC5AC) approx BBs exec'd 0 ====
+
+	0xFEEC5AC:  3A600000  li r19,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R19
+	   2: INCEIPL       	$4
+
+	0xFEEC5B0:  9A7B0000  stb r19,0(r27)
+	   3: GETL       	R19, t2
+	   4: GETL       	R27, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0xFEEC5B4:  7EA3AB78  or r3,r21,r21
+	   7: GETL       	R21, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFEEC5B8:  48000008  b 0xFEEC5C0
+	  10: JMPo       	$0xFEEC5C0  ($4)
+
+
+
+. 3247 FEEC5AC 16
+. 3A 60 00 00 9A 7B 00 00 7E A3 AB 78 48 00 00 08
+==== BB 3248 (0xFEEC5C0) approx BBs exec'd 0 ====
+
+	0xFEEC5C0:  828100A4  lwz r20,164(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xA4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0xFEEC5C4:  81010054  lwz r8,84(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x54, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0xFEEC5C8:  7E8803A6  mtlr r20
+	  10: GETL       	R20, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFEEC5CC:  81C10058  lwz r14,88(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x58, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R14
+	  17: INCEIPL       	$4
+
+	0xFEEC5D0:  81E1005C  lwz r15,92(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x5C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R15
+	  22: INCEIPL       	$4
+
+	0xFEEC5D4:  7D008120  mtcrf 0x8,r8
+	  23: GETL       	R8, t18
+	  24: ICRFL       	t18, $0x4, CR
+	  25: INCEIPL       	$4
+
+	0xFEEC5D8:  82010060  lwz r16,96(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x60, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R16
+	  30: INCEIPL       	$4
+
+	0xFEEC5DC:  82210064  lwz r17,100(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x64, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R17
+	  35: INCEIPL       	$4
+
+	0xFEEC5E0:  82410068  lwz r18,104(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x68, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R18
+	  40: INCEIPL       	$4
+
+	0xFEEC5E4:  8261006C  lwz r19,108(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x6C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R19
+	  45: INCEIPL       	$4
+
+	0xFEEC5E8:  82810070  lwz r20,112(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x70, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R20
+	  50: INCEIPL       	$4
+
+	0xFEEC5EC:  82A10074  lwz r21,116(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x74, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R21
+	  55: INCEIPL       	$4
+
+	0xFEEC5F0:  82C10078  lwz r22,120(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x78, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R22
+	  60: INCEIPL       	$4
+
+	0xFEEC5F4:  82E1007C  lwz r23,124(r1)
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x7C, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R23
+	  65: INCEIPL       	$4
+
+	0xFEEC5F8:  83010080  lwz r24,128(r1)
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x80, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R24
+	  70: INCEIPL       	$4
+
+	0xFEEC5FC:  83210084  lwz r25,132(r1)
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x84, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R25
+	  75: INCEIPL       	$4
+
+	0xFEEC600:  83410088  lwz r26,136(r1)
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x88, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R26
+	  80: INCEIPL       	$4
+
+	0xFEEC604:  8361008C  lwz r27,140(r1)
+	  81: GETL       	R1, t64
+	  82: ADDL       	$0x8C, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R27
+	  85: INCEIPL       	$4
+
+	0xFEEC608:  83810090  lwz r28,144(r1)
+	  86: GETL       	R1, t68
+	  87: ADDL       	$0x90, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R28
+	  90: INCEIPL       	$4
+
+	0xFEEC60C:  83A10094  lwz r29,148(r1)
+	  91: GETL       	R1, t72
+	  92: ADDL       	$0x94, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R29
+	  95: INCEIPL       	$4
+
+	0xFEEC610:  83C10098  lwz r30,152(r1)
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x98, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R30
+	 100: INCEIPL       	$4
+
+	0xFEEC614:  83E1009C  lwz r31,156(r1)
+	 101: GETL       	R1, t80
+	 102: ADDL       	$0x9C, t80
+	 103: LDL       	(t80), t82
+	 104: PUTL       	t82, R31
+	 105: INCEIPL       	$4
+
+	0xFEEC618:  382100A0  addi r1,r1,160
+	 106: GETL       	R1, t84
+	 107: ADDL       	$0xA0, t84
+	 108: PUTL       	t84, R1
+	 109: INCEIPL       	$4
+
+	0xFEEC61C:  4E800020  blr
+	 110: GETL       	LR, t86
+	 111: JMPo-r       	t86  ($4)
+
+
+
+. 3248 FEEC5C0 96
+. 82 81 00 A4 81 01 00 54 7E 88 03 A6 81 C1 00 58 81 E1 00 5C 7D 00 81 20 82 01 00 60 82 21 00 64 82 41 00 68 82 61 00 6C 82 81 00 70 82 A1 00 74 82 C1 00 78 82 E1 00 7C 83 01 00 80 83 21 00 84 83 41 00 88 83 61 00 8C 83 81 00 90 83 A1 00 94 83 C1 00 98 83 E1 00 9C 38 21 00 A0 4E 80 00 20
+==== BB 3249 (0x10003BC0) approx BBs exec'd 0 ====
+
+	0x10003BC0:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x10003BC4:  40820010  bc 4,2,0x10003BD4
+	   5: Jc02o       	$0x10003BD4
+
+
+
+. 3249 10003BC0 8
+. 7C 7F 1B 79 40 82 00 10
+==== BB 3250 (0x10003BD4) approx BBs exec'd 0 ====
+
+	0x10003BD4:  7FBFE050  subf r29,r31,r28
+	   0: GETL       	R31, t0
+	   1: GETL       	R28, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x10003BD8:  2F1D0000  cmpi cr6,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x10003BDC:  7FA0EB78  or r0,r29,r29
+	   9: GETL       	R29, t8
+	  10: PUTL       	t8, R0
+	  11: INCEIPL       	$4
+
+	0x10003BE0:  41980C78  bc 12,24,0x10004858
+	  12: Js24o       	$0x10004858
+
+
+
+. 3250 10003BD4 16
+. 7F BF E0 50 2F 1D 00 00 7F A0 EB 78 41 98 0C 78
+==== BB 3251 (0x10004858) approx BBs exec'd 0 ====
+
+	0x10004858:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x1000485C:  4BFFF388  b 0x10003BE4
+	   3: JMPo       	$0x10003BE4  ($4)
+
+
+
+. 3251 10004858 8
+. 38 00 00 00 4B FF F3 88
+==== BB 3252 (0x10003BE4) approx BBs exec'd 0 ====
+
+	0x10003BE4:  7F80FA14  add r28,r0,r31
+	   0: GETL       	R0, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x10003BE8:  7C15B850  subf r0,r21,r23
+	   5: GETL       	R21, t4
+	   6: GETL       	R23, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x10003BEC:  7F9C0040  cmpl cr7,r28,r0
+	  10: GETL       	R28, t8
+	  11: GETL       	R0, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x10003BF0:  40BCFE08  bc 5,28,0x100039F8
+	  15: Jc28o       	$0x100039F8
+
+
+
+. 3252 10003BE4 16
+. 7F 80 FA 14 7C 15 B8 50 7F 9C 00 40 40 BC FE 08
+==== BB 3253 (0x10003BF4) approx BBs exec'd 0 ====
+
+	0x10003BF4:  2C190000  cmpi cr0,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x10003BF8:  41820058  bc 12,2,0x10003C50
+	   4: Js02o       	$0x10003C50
+
+
+
+. 3253 10003BF4 8
+. 2C 19 00 00 41 82 00 58
+==== BB 3254 (0x10003BFC) approx BBs exec'd 0 ====
+
+	0x10003BFC:  40990028  bc 4,25,0x10003C24
+	   0: Jc25o       	$0x10003C24
+
+
+
+. 3254 10003BFC 4
+. 40 99 00 28
+==== BB 3255 (0x10003C24) approx BBs exec'd 0 ====
+
+	0x10003C24:  8001045C  lwz r0,1116(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x45C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10003C28:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x10003C2C:  409E0C14  bc 4,30,0x10004840
+	   9: Jc30o       	$0x10004840
+
+
+
+. 3255 10003C24 12
+. 80 01 04 5C 2F 80 00 00 40 9E 0C 14
+==== BB 3256 (0x10003C30) approx BBs exec'd 0 ====
+
+	0x10003C30:  2F940000  cmpi cr7,r20,0
+	   0: GETL       	R20, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10003C34:  7E048378  or r4,r16,r16
+	   4: GETL       	R16, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0x10003C38:  409E00D4  bc 4,30,0x10003D0C
+	   7: Jc30o       	$0x10003D0C
+
+
+
+. 3256 10003C30 12
+. 2F 94 00 00 7E 04 83 78 40 9E 00 D4
+==== BB 3257 (0x10003C3C) approx BBs exec'd 0 ====
+
+	0x10003C3C:  7E048378  or r4,r16,r16
+	   0: GETL       	R16, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x10003C40:  7F23CB78  or r3,r25,r25
+	   3: GETL       	R25, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0x10003C44:  7FE5FB78  or r5,r31,r31
+	   6: GETL       	R31, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x10003C48:  48017015  bl 0x1001AC5C
+	   9: MOVL       	$0x10003C4C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x1001AC5C  ($4)
+
+
+
+. 3257 10003C3C 16
+. 7E 04 83 78 7F 23 CB 78 7F E5 FB 78 48 01 70 15
+==== BB 3258 (0x1001AC5C) approx BBs exec'd 0 ====
+
+	0x1001AC5C:  39600094  li r11,148
+	   0: MOVL       	$0x94, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AC60:  4BFFFEAC  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3258 1001AC5C 8
+. 39 60 00 94 4B FF FE AC
+==== BB 3259 (0x10003C4C) approx BBs exec'd 0 ====
+
+	0x10003C4C:  7F39FA14  add r25,r25,r31
+	   0: GETL       	R25, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x10003C50:  7EB5E214  add r21,r21,r28
+	   5: GETL       	R21, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R21
+	   9: INCEIPL       	$4
+
+	0x10003C54:  8C1A0001  lbzu r0,1(r26)
+	  10: GETL       	R26, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R26
+	  13: LDB       	(t8), t10
+	  14: PUTL       	t10, R0
+	  15: INCEIPL       	$4
+
+	0x10003C58:  4BFFFC6C  b 0x100038C4
+	  16: JMPo       	$0x100038C4  ($4)
+
+
+
+. 3259 10003C4C 16
+. 7F 39 FA 14 7E B5 E2 14 8C 1A 00 01 4B FF FC 6C
+==== BB 3260 (0x100038C4) approx BBs exec'd 0 ====
+
+	0x100038C4:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x100038C8:  409EFFAC  bc 4,30,0x10003874
+	   4: Jc30o       	$0x10003874
+
+
+
+. 3260 100038C4 8
+. 2F 80 00 00 40 9E FF AC
+==== BB 3261 (0x100039E0) approx BBs exec'd 0 ====
+
+	0x100039E0:  2F890020  cmpi cr7,r9,32
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x20, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100039E4:  40BCFEB8  bc 5,28,0x1000389C
+	   5: Jc28o       	$0x1000389C
+
+
+
+. 3261 100039E0 8
+. 2F 89 00 20 40 BC FE B8
+==== BB 3262 (0x1000389C) approx BBs exec'd 0 ====
+
+	0x1000389C:  7C15B850  subf r0,r21,r23
+	   0: GETL       	R21, t0
+	   1: GETL       	R23, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x100038A0:  2B800001  cmpli cr7,r0,1
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x100038A4:  409D0154  bc 4,29,0x100039F8
+	  10: Jc29o       	$0x100039F8
+
+
+
+. 3262 1000389C 12
+. 7C 15 B8 50 2B 80 00 01 40 9D 01 54
+==== BB 3263 (0x100038A8) approx BBs exec'd 0 ====
+
+	0x100038A8:  2C190000  cmpi cr0,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x100038AC:  41820010  bc 12,2,0x100038BC
+	   4: Js02o       	$0x100038BC
+
+
+
+. 3263 100038A8 8
+. 2C 19 00 00 41 82 00 10
+==== BB 3264 (0x100038B0) approx BBs exec'd 0 ====
+
+	0x100038B0:  881A0000  lbz r0,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x100038B4:  98190000  stb r0,0(r25)
+	   4: GETL       	R0, t4
+	   5: GETL       	R25, t6
+	   6: STB       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0x100038B8:  3B390001  addi r25,r25,1
+	   8: GETL       	R25, t8
+	   9: ADDL       	$0x1, t8
+	  10: PUTL       	t8, R25
+	  11: INCEIPL       	$4
+
+	0x100038BC:  3AB50001  addi r21,r21,1
+	  12: GETL       	R21, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R21
+	  15: INCEIPL       	$4
+
+	0x100038C0:  8C1A0001  lbzu r0,1(r26)
+	  16: GETL       	R26, t12
+	  17: ADDL       	$0x1, t12
+	  18: PUTL       	t12, R26
+	  19: LDB       	(t12), t14
+	  20: PUTL       	t14, R0
+	  21: INCEIPL       	$4
+
+	0x100038C4:  2F800000  cmpi cr7,r0,0
+	  22: GETL       	R0, t16
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x7, CR
+	  25: INCEIPL       	$4
+
+	0x100038C8:  409EFFAC  bc 4,30,0x10003874
+	  26: Jc30o       	$0x10003874
+
+
+
+. 3264 100038B0 28
+. 88 1A 00 00 98 19 00 00 3B 39 00 01 3A B5 00 01 8C 1A 00 01 2F 80 00 00 40 9E FF AC
+==== BB 3265 (0x10004460) approx BBs exec'd 0 ====
+
+	0x10004460:  2F8A0000  cmpi cr7,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10004464:  419E0010  bc 12,30,0x10004474
+	   4: Js30o       	$0x10004474
+
+
+
+. 3265 10004460 8
+. 2F 8A 00 00 41 9E 00 10
+==== BB 3266 (0x10004474) approx BBs exec'd 0 ====
+
+	0x10004474:  2F100000  cmpi cr6,r16,0
+	   0: GETL       	R16, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x10004478:  41BAF70C  bc 13,26,0x10003B84
+	   4: Js26o       	$0x10003B84
+
+
+
+. 3266 10004474 8
+. 2F 10 00 00 41 BA F7 0C
+==== BB 3267 (0x1001ABD4) approx BBs exec'd 0 ====
+
+	0x1001ABD4:  4BED1870  b 0xFEEC444
+	   0: JMPo       	$0xFEEC444  ($4)
+
+
+
+. 3267 1001ABD4 4
+. 4B ED 18 70
+==== BB 3268 (0xFEED138) approx BBs exec'd 0 ====
+
+	0xFEED138:  2C8A0000  cmpi cr1,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEED13C:  41860008  bc 12,6,0xFEED144
+	   4: Js06o       	$0xFEED144
+
+
+
+. 3268 FEED138 8
+. 2C 8A 00 00 41 86 00 08
+==== BB 3269 (0xFEED144) approx BBs exec'd 0 ====
+
+	0xFEED144:  2E190000  cmpi cr4,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0xFEED148:  40B2F5C4  bc 5,18,0xFEEC70C
+	   4: Jc18o       	$0xFEEC70C
+
+
+
+. 3269 FEED144 8
+. 2E 19 00 00 40 B2 F5 C4
+==== BB 3270 (0xFEED14C) approx BBs exec'd 0 ====
+
+	0xFEED14C:  83170010  lwz r24,16(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFEED150:  3B38000E  addi r25,r24,14
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0xE, t4
+	   7: PUTL       	t4, R25
+	   8: INCEIPL       	$4
+
+	0xFEED154:  572813BA  rlwinm r8,r25,2,14,29
+	   9: GETL       	R25, t6
+	  10: ROLL       	$0x2, t6
+	  11: ANDL       	$0x3FFFC, t6
+	  12: PUTL       	t6, R8
+	  13: INCEIPL       	$4
+
+	0xFEED158:  7D488A14  add r10,r8,r17
+	  14: GETL       	R8, t8
+	  15: GETL       	R17, t10
+	  16: ADDL       	t8, t10
+	  17: PUTL       	t10, R10
+	  18: INCEIPL       	$4
+
+	0xFEED15C:  806A0024  lwz r3,36(r10)
+	  19: GETL       	R10, t12
+	  20: ADDL       	$0x24, t12
+	  21: LDL       	(t12), t14
+	  22: PUTL       	t14, R3
+	  23: INCEIPL       	$4
+
+	0xFEED160:  4BFE9979  bl 0xFED6AD8
+	  24: MOVL       	$0xFEED164, t16
+	  25: PUTL       	t16, LR
+	  26: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 3270 FEED14C 24
+. 83 17 00 10 3B 38 00 0E 57 28 13 BA 7D 48 8A 14 80 6A 00 24 4B FE 99 79
+==== BB 3271 (0xFEED164) approx BBs exec'd 0 ====
+
+	0xFEED164:  7FA3E050  subf r29,r3,r28
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEED168:  7C791B78  or r25,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0xFEED16C:  2F1D0000  cmpi cr6,r29,0
+	   8: GETL       	R29, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEED170:  7FA0EB78  or r0,r29,r29
+	  12: GETL       	R29, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFEED174:  41980A64  bc 12,24,0xFEEDBD8
+	  15: Js24o       	$0xFEEDBD8
+
+
+
+. 3271 FEED164 20
+. 7F A3 E0 50 7C 79 1B 78 2F 1D 00 00 7F A0 EB 78 41 98 0A 64
+==== BB 3272 (0xFEEDBD8) approx BBs exec'd 0 ====
+
+	0xFEEDBD8:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEEDBDC:  4BFFF59C  b 0xFEED178
+	   3: JMPo       	$0xFEED178  ($4)
+
+
+
+. 3272 FEEDBD8 8
+. 38 00 00 00 4B FF F5 9C
+==== BB 3273 (0xFEED178) approx BBs exec'd 0 ====
+
+	0xFEED178:  7F80CA14  add r28,r0,r25
+	   0: GETL       	R0, t0
+	   1: GETL       	R25, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEED17C:  7C95A050  subf r4,r21,r20
+	   5: GETL       	R21, t4
+	   6: GETL       	R20, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0xFEED180:  7F9C2040  cmpl cr7,r28,r4
+	  10: GETL       	R28, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEED184:  40BCF438  bc 5,28,0xFEEC5BC
+	  15: Jc28o       	$0xFEEC5BC
+
+
+
+. 3273 FEED178 16
+. 7F 80 CA 14 7C 95 A0 50 7F 9C 20 40 40 BC F4 38
+==== BB 3274 (0xFEED188) approx BBs exec'd 0 ====
+
+	0xFEED188:  2C1B0000  cmpi cr0,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEED18C:  41A2FA08  bc 13,2,0xFEECB94
+	   4: Js02o       	$0xFEECB94
+
+
+
+. 3274 FEED188 8
+. 2C 1B 00 00 41 A2 FA 08
+==== BB 3275 (0xFEED190) approx BBs exec'd 0 ====
+
+	0xFEED190:  4099002C  bc 4,25,0xFEED1BC
+	   0: Jc25o       	$0xFEED1BC
+
+
+
+. 3275 FEED190 4
+. 40 99 00 2C
+==== BB 3276 (0xFEED1BC) approx BBs exec'd 0 ====
+
+	0xFEED1BC:  2C920000  cmpi cr1,r18,0
+	   0: GETL       	R18, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFEED1C0:  41860A6C  bc 12,6,0xFEEDC2C
+	   4: Js06o       	$0xFEEDC2C
+
+
+
+. 3276 FEED1BC 8
+. 2C 92 00 00 41 86 0A 6C
+==== BB 3277 (0xFEEDC2C) approx BBs exec'd 0 ====
+
+	0xFEEDC2C:  3938000E  addi r9,r24,14
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0xE, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFEEDC30:  4BFFFE78  b 0xFEEDAA8
+	   4: JMPo       	$0xFEEDAA8  ($4)
+
+
+
+. 3277 FEEDC2C 8
+. 39 38 00 0E 4B FF FE 78
+==== BB 3278 (0xFEEDAA8) approx BBs exec'd 0 ====
+
+	0xFEEDAA8:  552913BA  rlwinm r9,r9,2,14,29
+	   0: GETL       	R9, t0
+	   1: ROLL       	$0x2, t0
+	   2: ANDL       	$0x3FFFC, t0
+	   3: PUTL       	t0, R9
+	   4: INCEIPL       	$4
+
+	0xFEEDAAC:  7D498A14  add r10,r9,r17
+	   5: GETL       	R9, t2
+	   6: GETL       	R17, t4
+	   7: ADDL       	t2, t4
+	   8: PUTL       	t4, R10
+	   9: INCEIPL       	$4
+
+	0xFEEDAB0:  808A0024  lwz r4,36(r10)
+	  10: GETL       	R10, t6
+	  11: ADDL       	$0x24, t6
+	  12: LDL       	(t6), t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0xFEEDAB4:  4BFFFFBC  b 0xFEEDA70
+	  15: JMPo       	$0xFEEDA70  ($4)
+
+
+
+. 3278 FEEDAA8 16
+. 55 29 13 BA 7D 49 8A 14 80 8A 00 24 4B FF FF BC
+==== BB 3279 (0x1001AC5C) approx BBs exec'd 0 ====
+
+	0x1001AC5C:  4BEBDA7C  b 0xFED86D8
+	   0: JMPo       	$0xFED86D8  ($4)
+
+
+
+. 3279 1001AC5C 4
+. 4B EB DA 7C
+==== BB 3280 (0x100045A4) approx BBs exec'd 0 ====
+
+	0x100045A4:  2F900045  cmpi cr7,r16,69
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x45, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100045A8:  41BEF6D8  bc 13,30,0x10003C80
+	   5: Js30o       	$0x10003C80
+
+
+
+. 3280 100045A4 8
+. 2F 90 00 45 41 BE F6 D8
+==== BB 3281 (0x100045AC) approx BBs exec'd 0 ====
+
+	0x100045AC:  2F9C0002  cmpi cr7,r28,2
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100045B0:  7F87E378  or r7,r28,r28
+	   5: GETL       	R28, t6
+	   6: PUTL       	t6, R7
+	   7: INCEIPL       	$4
+
+	0x100045B4:  419C04BC  bc 12,28,0x10004A70
+	   8: Js28o       	$0x10004A70
+
+
+
+. 3281 100045AC 12
+. 2F 9C 00 02 7F 87 E3 78 41 9C 04 BC
+==== BB 3282 (0x10004A70) approx BBs exec'd 0 ====
+
+	0x10004A70:  8118000C  lwz r8,12(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x10004A74:  38E00002  li r7,2
+	   5: MOVL       	$0x2, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x10004A78:  4BFFF978  b 0x100043F0
+	   8: JMPo       	$0x100043F0  ($4)
+
+
+
+. 3282 10004A70 12
+. 81 18 00 0C 38 E0 00 02 4B FF F9 78
+==== BB 3283 (0x100043F0) approx BBs exec'd 0 ====
+
+	0x100043F0:  6B690030  xori r9,r27,0x30
+	   0: GETL       	R27, t0
+	   1: XORL       	$0x30, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x100043F4:  6B6B002D  xori r11,r27,0x2D
+	   4: GETL       	R27, t2
+	   5: XORL       	$0x2D, t2
+	   6: PUTL       	t2, R11
+	   7: INCEIPL       	$4
+
+	0x100043F8:  3009FFFF  addic r0,r9,-1
+	   8: GETL       	R9, t4
+	   9: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	  10: PUTL       	t4, R0
+	  11: INCEIPL       	$4
+
+	0x100043FC:  7D404910  subfe r10,r0,r9
+	  12: GETL       	R0, t6
+	  13: GETL       	R9, t8
+	  14: SBBL       	t6, t8  (-rCa-wCa)
+	  15: PUTL       	t8, R10
+	  16: INCEIPL       	$4
+
+	0x10004400:  312BFFFF  addic r9,r11,-1
+	  17: GETL       	R11, t10
+	  18: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  19: PUTL       	t10, R9
+	  20: INCEIPL       	$4
+
+	0x10004404:  7C095910  subfe r0,r9,r11
+	  21: GETL       	R9, t12
+	  22: GETL       	R11, t14
+	  23: SBBL       	t12, t14  (-rCa-wCa)
+	  24: PUTL       	t14, R0
+	  25: INCEIPL       	$4
+
+	0x10004408:  7D4B0039  and. r11,r10,r0
+	  26: GETL       	R10, t16
+	  27: GETL       	R0, t18
+	  28: ANDL       	t16, t18
+	  29: PUTL       	t18, R11
+	  30: CMP0L       	t18, t20  (-rSo)
+	  31: ICRFL       	t20, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0x1000440C:  2F10004F  cmpi cr6,r16,79
+	  33: GETL       	R16, t22
+	  34: MOVL       	$0x4F, t26
+	  35: CMPL       	t22, t26, t24  (-rSo)
+	  36: ICRFL       	t24, $0x6, CR
+	  37: INCEIPL       	$4
+
+	0x10004410:  41A2FB4C  bc 13,2,0x10003F5C
+	  38: Js02o       	$0x10003F5C
+
+
+
+. 3283 100043F0 36
+. 6B 69 00 30 6B 6B 00 2D 30 09 FF FF 7D 40 49 10 31 2B FF FF 7C 09 59 10 7D 4B 00 39 2F 10 00 4F 41 A2 FB 4C
+==== BB 3284 (0x10004414) approx BBs exec'd 0 ====
+
+	0x10004414:  3B60005F  li r27,95
+	   0: MOVL       	$0x5F, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0x10004418:  4BFFFB44  b 0x10003F5C
+	   3: JMPo       	$0x10003F5C  ($4)
+
+
+
+. 3284 10004414 8
+. 3B 60 00 5F 4B FF FB 44
+==== BB 3285 (0x10003F5C) approx BBs exec'd 0 ====
+
+	0x10003F5C:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10003F60:  5400DFFE  rlwinm r0,r0,27,31,31
+	   3: GETL       	R0, t2
+	   4: ROLL       	$0x1B, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x10003F64:  7D0940F8  nor r9,r8,r8
+	   8: GETL       	R8, t4
+	   9: NOTL       	t4
+	  10: PUTL       	t4, R9
+	  11: INCEIPL       	$4
+
+	0x10003F68:  55290FFE  rlwinm r9,r9,1,31,31
+	  12: GETL       	R9, t6
+	  13: SHRL       	$0x1F, t6
+	  14: PUTL       	t6, R9
+	  15: INCEIPL       	$4
+
+	0x10003F6C:  7C0B4839  and. r11,r0,r9
+	  16: GETL       	R0, t8
+	  17: GETL       	R9, t10
+	  18: ANDL       	t8, t10
+	  19: PUTL       	t10, R11
+	  20: CMP0L       	t10, t12  (-rSo)
+	  21: ICRFL       	t12, $0x0, CR
+	  22: INCEIPL       	$4
+
+	0x10003F70:  40A2FDB0  bc 5,2,0x10003D20
+	  23: Jc02o       	$0x10003D20
+
+
+
+. 3285 10003F5C 24
+. 7C 00 00 26 54 00 DF FE 7D 09 40 F8 55 29 0F FE 7C 0B 48 39 40 A2 FD B0
+==== BB 3286 (0x10003F74) approx BBs exec'd 0 ====
+
+	0x10003F74:  55000FFE  rlwinm r0,r8,1,31,31
+	   0: GETL       	R8, t0
+	   1: SHRL       	$0x1F, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x10003F78:  7D0B4378  or r11,r8,r8
+	   4: GETL       	R8, t2
+	   5: PUTL       	t2, R11
+	   6: INCEIPL       	$4
+
+	0x10003F7C:  2F000000  cmpi cr6,r0,0
+	   7: GETL       	R0, t4
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x6, CR
+	  10: INCEIPL       	$4
+
+	0x10003F80:  3BE1001C  addi r31,r1,28
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x1C, t8
+	  13: PUTL       	t8, R31
+	  14: INCEIPL       	$4
+
+	0x10003F84:  419A0008  bc 12,26,0x10003F8C
+	  15: Js26o       	$0x10003F8C
+
+
+
+. 3286 10003F74 20
+. 55 00 0F FE 7D 0B 43 78 2F 00 00 00 3B E1 00 1C 41 9A 00 08
+==== BB 3287 (0x10003F8C) approx BBs exec'd 0 ====
+
+	0x10003F8C:  3D40CCCC  lis r10,-13108
+	   0: MOVL       	$0xCCCC0000, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x10003F90:  614ACCCD  ori r10,r10,0xCCCD
+	   3: MOVL       	$0xCCCCCCCD, t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0x10003F94:  7C0B5016  mulhwu r0,r11,r10
+	   6: GETL       	R11, t4
+	   7: GETL       	R10, t6
+	   8: UMULHL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x10003F98:  5400E8FE  rlwinm r0,r0,29,3,31
+	  11: GETL       	R0, t8
+	  12: SHRL       	$0x3, t8
+	  13: PUTL       	t8, R0
+	  14: INCEIPL       	$4
+
+	0x10003F9C:  1D20000A  mulli r9,r0,10
+	  15: GETL       	R0, t10
+	  16: MULL       	$0xA, t10
+	  17: PUTL       	t10, R9
+	  18: INCEIPL       	$4
+
+	0x10003FA0:  2F800000  cmpi cr7,r0,0
+	  19: GETL       	R0, t12
+	  20: CMP0L       	t12, t14  (-rSo)
+	  21: ICRFL       	t14, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x10003FA4:  7D295850  subf r9,r9,r11
+	  23: GETL       	R9, t16
+	  24: GETL       	R11, t18
+	  25: SUBL       	t16, t18
+	  26: PUTL       	t18, R9
+	  27: INCEIPL       	$4
+
+	0x10003FA8:  7C0B0378  or r11,r0,r0
+	  28: GETL       	R0, t20
+	  29: PUTL       	t20, R11
+	  30: INCEIPL       	$4
+
+	0x10003FAC:  39290030  addi r9,r9,48
+	  31: GETL       	R9, t22
+	  32: ADDL       	$0x30, t22
+	  33: PUTL       	t22, R9
+	  34: INCEIPL       	$4
+
+	0x10003FB0:  9D3FFFFF  stbu r9,-1(r31)
+	  35: GETL       	R9, t24
+	  36: GETL       	R31, t26
+	  37: ADDL       	$0xFFFFFFFF, t26
+	  38: PUTL       	t26, R31
+	  39: STB       	t24, (t26)
+	  40: INCEIPL       	$4
+
+	0x10003FB4:  409EFFE0  bc 4,30,0x10003F94
+	  41: Jc30o       	$0x10003F94
+
+
+
+. 3287 10003F8C 44
+. 3D 40 CC CC 61 4A CC CD 7C 0B 50 16 54 00 E8 FE 1D 20 00 0A 2F 80 00 00 7D 29 58 50 7C 0B 03 78 39 29 00 30 9D 3F FF FF 40 9E FF E0
+==== BB 3288 (0x10003F94) approx BBs exec'd 0 ====
+
+	0x10003F94:  7C0B5016  mulhwu r0,r11,r10
+	   0: GETL       	R11, t0
+	   1: GETL       	R10, t2
+	   2: UMULHL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10003F98:  5400E8FE  rlwinm r0,r0,29,3,31
+	   5: GETL       	R0, t4
+	   6: SHRL       	$0x3, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10003F9C:  1D20000A  mulli r9,r0,10
+	   9: GETL       	R0, t6
+	  10: MULL       	$0xA, t6
+	  11: PUTL       	t6, R9
+	  12: INCEIPL       	$4
+
+	0x10003FA0:  2F800000  cmpi cr7,r0,0
+	  13: GETL       	R0, t8
+	  14: CMP0L       	t8, t10  (-rSo)
+	  15: ICRFL       	t10, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x10003FA4:  7D295850  subf r9,r9,r11
+	  17: GETL       	R9, t12
+	  18: GETL       	R11, t14
+	  19: SUBL       	t12, t14
+	  20: PUTL       	t14, R9
+	  21: INCEIPL       	$4
+
+	0x10003FA8:  7C0B0378  or r11,r0,r0
+	  22: GETL       	R0, t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0x10003FAC:  39290030  addi r9,r9,48
+	  25: GETL       	R9, t18
+	  26: ADDL       	$0x30, t18
+	  27: PUTL       	t18, R9
+	  28: INCEIPL       	$4
+
+	0x10003FB0:  9D3FFFFF  stbu r9,-1(r31)
+	  29: GETL       	R9, t20
+	  30: GETL       	R31, t22
+	  31: ADDL       	$0xFFFFFFFF, t22
+	  32: PUTL       	t22, R31
+	  33: STB       	t20, (t22)
+	  34: INCEIPL       	$4
+
+	0x10003FB4:  409EFFE0  bc 4,30,0x10003F94
+	  35: Jc30o       	$0x10003F94
+
+
+
+. 3288 10003F94 36
+. 7C 0B 50 16 54 00 E8 FE 1D 20 00 0A 2F 80 00 00 7D 29 58 50 7C 0B 03 78 39 29 00 30 9D 3F FF FF 40 9E FF E0
+==== BB 3289 (0x10003FB8) approx BBs exec'd 0 ====
+
+	0x10003FB8:  4BFFFE68  b 0x10003E20
+	   0: JMPo       	$0x10003E20  ($4)
+
+
+
+. 3289 10003FB8 4
+. 4B FF FE 68
+==== BB 3290 (0x10003E20) approx BBs exec'd 0 ====
+
+	0x10003E20:  419A000C  bc 12,26,0x10003E2C
+	   0: Js26o       	$0x10003E2C
+
+
+
+. 3290 10003E20 4
+. 41 9A 00 0C
+==== BB 3291 (0x10003E2C) approx BBs exec'd 0 ====
+
+	0x10003E2C:  2F9B002D  cmpi cr7,r27,45
+	   0: GETL       	R27, t0
+	   1: MOVL       	$0x2D, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10003E30:  419E0A08  bc 12,30,0x10004838
+	   5: Js30o       	$0x10004838
+
+
+
+. 3291 10003E2C 8
+. 2F 9B 00 2D 41 9E 0A 08
+==== BB 3292 (0x10003E34) approx BBs exec'd 0 ====
+
+	0x10003E34:  3A010010  addi r16,r1,16
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R16
+	   3: INCEIPL       	$4
+
+	0x10003E38:  7C10F850  subf r0,r16,r31
+	   4: GETL       	R16, t2
+	   5: GETL       	R31, t4
+	   6: SUBL       	t2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10003E3C:  7C070214  add r0,r7,r0
+	   9: GETL       	R7, t6
+	  10: GETL       	R0, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x10003E40:  37A0FFF4  addic. r29,r0,-12
+	  14: GETL       	R0, t10
+	  15: ADCL       	$0xFFFFFFF4, t10  (-wCa)
+	  16: PUTL       	t10, R29
+	  17: CMP0L       	t10, t12  (-rSo)
+	  18: ICRFL       	t12, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x10003E44:  40810044  bc 4,1,0x10003E88
+	  20: Jc01o       	$0x10003E88
+
+
+
+. 3292 10003E34 20
+. 3A 01 00 10 7C 10 F8 50 7C 07 02 14 37 A0 FF F4 40 81 00 44
+==== BB 3293 (0x10003E88) approx BBs exec'd 0 ====
+
+	0x10003E88:  7D3F8050  subf r9,r31,r16
+	   0: GETL       	R31, t0
+	   1: GETL       	R16, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x10003E8C:  3A09000C  addi r16,r9,12
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xC, t4
+	   7: PUTL       	t4, R16
+	   8: INCEIPL       	$4
+
+	0x10003E90:  7FB0E050  subf r29,r16,r28
+	   9: GETL       	R16, t6
+	  10: GETL       	R28, t8
+	  11: SUBL       	t6, t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0x10003E94:  2F1D0000  cmpi cr6,r29,0
+	  14: GETL       	R29, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0x10003E98:  7FA0EB78  or r0,r29,r29
+	  18: GETL       	R29, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x10003E9C:  419809C4  bc 12,24,0x10004860
+	  21: Js24o       	$0x10004860
+
+
+
+. 3293 10003E88 24
+. 7D 3F 80 50 3A 09 00 0C 7F B0 E0 50 2F 1D 00 00 7F A0 EB 78 41 98 09 C4
+==== BB 3294 (0x10004860) approx BBs exec'd 0 ====
+
+	0x10004860:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10004864:  4BFFF63C  b 0x10003EA0
+	   3: JMPo       	$0x10003EA0  ($4)
+
+
+
+. 3294 10004860 8
+. 38 00 00 00 4B FF F6 3C
+==== BB 3295 (0x10003EA0) approx BBs exec'd 0 ====
+
+	0x10003EA0:  7F808214  add r28,r0,r16
+	   0: GETL       	R0, t0
+	   1: GETL       	R16, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x10003EA4:  7C15B850  subf r0,r21,r23
+	   5: GETL       	R21, t4
+	   6: GETL       	R23, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x10003EA8:  7F9C0040  cmpl cr7,r28,r0
+	  10: GETL       	R28, t8
+	  11: GETL       	R0, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x10003EAC:  40BCFB4C  bc 5,28,0x100039F8
+	  15: Jc28o       	$0x100039F8
+
+
+
+. 3295 10003EA0 16
+. 7F 80 82 14 7C 15 B8 50 7F 9C 00 40 40 BC FB 4C
+==== BB 3296 (0x10003EB0) approx BBs exec'd 0 ====
+
+	0x10003EB0:  2C190000  cmpi cr0,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x10003EB4:  41A2FD9C  bc 13,2,0x10003C50
+	   4: Js02o       	$0x10003C50
+
+
+
+. 3296 10003EB0 8
+. 2C 19 00 00 41 A2 FD 9C
+==== BB 3297 (0x10003EB8) approx BBs exec'd 0 ====
+
+	0x10003EB8:  40990028  bc 4,25,0x10003EE0
+	   0: Jc25o       	$0x10003EE0
+
+
+
+. 3297 10003EB8 4
+. 40 99 00 28
+==== BB 3298 (0x10003EE0) approx BBs exec'd 0 ====
+
+	0x10003EE0:  2F940000  cmpi cr7,r20,0
+	   0: GETL       	R20, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10003EE4:  419E093C  bc 12,30,0x10004820
+	   4: Js30o       	$0x10004820
+
+
+
+. 3298 10003EE0 8
+. 2F 94 00 00 41 9E 09 3C
+==== BB 3299 (0x10004820) approx BBs exec'd 0 ====
+
+	0x10004820:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x10004824:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x10004828:  7E058378  or r5,r16,r16
+	   6: GETL       	R16, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x1000482C:  7F398214  add r25,r25,r16
+	   9: GETL       	R25, t6
+	  10: GETL       	R16, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R25
+	  13: INCEIPL       	$4
+
+	0x10004830:  4801642D  bl 0x1001AC5C
+	  14: MOVL       	$0x10004834, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x1001AC5C  ($4)
+
+
+
+. 3299 10004820 20
+. 7F 23 CB 78 7F E4 FB 78 7E 05 83 78 7F 39 82 14 48 01 64 2D
+==== BB 3300 (0x10004834) approx BBs exec'd 0 ====
+
+	0x10004834:  4BFFF6C8  b 0x10003EFC
+	   0: JMPo       	$0x10003EFC  ($4)
+
+
+
+. 3300 10004834 4
+. 4B FF F6 C8
+==== BB 3301 (0x10003EFC) approx BBs exec'd 0 ====
+
+	0x10003EFC:  7EB5E214  add r21,r21,r28
+	   0: GETL       	R21, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x10003F00:  4BFFFD54  b 0x10003C54
+	   5: JMPo       	$0x10003C54  ($4)
+
+
+
+. 3301 10003EFC 8
+. 7E B5 E2 14 4B FF FD 54
+==== BB 3302 (0x10003C54) approx BBs exec'd 0 ====
+
+	0x10003C54:  8C1A0001  lbzu r0,1(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x10003C58:  4BFFFC6C  b 0x100038C4
+	   6: JMPo       	$0x100038C4  ($4)
+
+
+
+. 3302 10003C54 8
+. 8C 1A 00 01 4B FF FC 6C
+==== BB 3303 (0x1000477C) approx BBs exec'd 0 ====
+
+	0x1000477C:  2F900045  cmpi cr7,r16,69
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x45, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10004780:  41BEF500  bc 13,30,0x10003C80
+	   5: Js30o       	$0x10003C80
+
+
+
+. 3303 1000477C 8
+. 2F 90 00 45 41 BE F5 00
+==== BB 3304 (0x10004784) approx BBs exec'd 0 ====
+
+	0x10004784:  2F9C0002  cmpi cr7,r28,2
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10004788:  7F87E378  or r7,r28,r28
+	   5: GETL       	R28, t6
+	   6: PUTL       	t6, R7
+	   7: INCEIPL       	$4
+
+	0x1000478C:  419C02D4  bc 12,28,0x10004A60
+	   8: Js28o       	$0x10004A60
+
+
+
+. 3304 10004784 12
+. 2F 9C 00 02 7F 87 E3 78 41 9C 02 D4
+==== BB 3305 (0x10004A60) approx BBs exec'd 0 ====
+
+	0x10004A60:  2F10004F  cmpi cr6,r16,79
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x4F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x10004A64:  81180008  lwz r8,8(r24)
+	   5: GETL       	R24, t6
+	   6: ADDL       	$0x8, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R8
+	   9: INCEIPL       	$4
+
+	0x10004A68:  38E00002  li r7,2
+	  10: MOVL       	$0x2, t10
+	  11: PUTL       	t10, R7
+	  12: INCEIPL       	$4
+
+	0x10004A6C:  4BFFF4F0  b 0x10003F5C
+	  13: JMPo       	$0x10003F5C  ($4)
+
+
+
+. 3305 10004A60 16
+. 2F 10 00 4F 81 18 00 08 38 E0 00 02 4B FF F4 F0
+==== BB 3306 (0x100046B4) approx BBs exec'd 0 ====
+
+	0x100046B4:  2F900045  cmpi cr7,r16,69
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x45, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100046B8:  41BEF5C8  bc 13,30,0x10003C80
+	   5: Js30o       	$0x10003C80
+
+
+
+. 3306 100046B4 8
+. 2F 90 00 45 41 BE F5 C8
+==== BB 3307 (0x100046BC) approx BBs exec'd 0 ====
+
+	0x100046BC:  2F9C0002  cmpi cr7,r28,2
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x100046C0:  7F87E378  or r7,r28,r28
+	   5: GETL       	R28, t6
+	   6: PUTL       	t6, R7
+	   7: INCEIPL       	$4
+
+	0x100046C4:  419C0224  bc 12,28,0x100048E8
+	   8: Js28o       	$0x100048E8
+
+
+
+. 3307 100046BC 12
+. 2F 9C 00 02 7F 87 E3 78 41 9C 02 24
+==== BB 3308 (0x100048E8) approx BBs exec'd 0 ====
+
+	0x100048E8:  2F10004F  cmpi cr6,r16,79
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x4F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x100048EC:  81180004  lwz r8,4(r24)
+	   5: GETL       	R24, t6
+	   6: ADDL       	$0x4, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R8
+	   9: INCEIPL       	$4
+
+	0x100048F0:  38E00002  li r7,2
+	  10: MOVL       	$0x2, t10
+	  11: PUTL       	t10, R7
+	  12: INCEIPL       	$4
+
+	0x100048F4:  4BFFF668  b 0x10003F5C
+	  13: JMPo       	$0x10003F5C  ($4)
+
+
+
+. 3308 100048E8 16
+. 2F 10 00 4F 81 18 00 04 38 E0 00 02 4B FF F6 68
+==== BB 3309 (0x10003E48) approx BBs exec'd 0 ====
+
+	0x10003E48:  2F9B005F  cmpi cr7,r27,95
+	   0: GETL       	R27, t0
+	   1: MOVL       	$0x5F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10003E4C:  419E0A28  bc 12,30,0x10004874
+	   5: Js30o       	$0x10004874
+
+
+
+. 3309 10003E48 8
+. 2F 9B 00 5F 41 9E 0A 28
+==== BB 3310 (0x10003E50) approx BBs exec'd 0 ====
+
+	0x10003E50:  7C15B850  subf r0,r21,r23
+	   0: GETL       	R21, t0
+	   1: GETL       	R23, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10003E54:  7F870040  cmpl cr7,r7,r0
+	   5: GETL       	R7, t4
+	   6: GETL       	R0, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x10003E58:  40BCFBA0  bc 5,28,0x100039F8
+	  10: Jc28o       	$0x100039F8
+
+
+
+. 3310 10003E50 12
+. 7C 15 B8 50 7F 87 00 40 40 BC FB A0
+==== BB 3311 (0x10003E5C) approx BBs exec'd 0 ====
+
+	0x10003E5C:  419A0A0C  bc 12,26,0x10004868
+	   0: Js26o       	$0x10004868
+
+
+
+. 3311 10003E5C 4
+. 41 9A 0A 0C
+==== BB 3312 (0x10004868) approx BBs exec'd 0 ====
+
+	0x10004868:  2C190000  cmpi cr0,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x1000486C:  4182F614  bc 12,2,0x10003E80
+	   4: Js02o       	$0x10003E80
+
+
+
+. 3312 10004868 8
+. 2C 19 00 00 41 82 F6 14
+==== BB 3313 (0x10004870) approx BBs exec'd 0 ====
+
+	0x10004870:  4BFFF694  b 0x10003F04
+	   0: JMPo       	$0x10003F04  ($4)
+
+
+
+. 3313 10004870 4
+. 4B FF F6 94
+==== BB 3314 (0x10003F04) approx BBs exec'd 0 ====
+
+	0x10003F04:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x10003F08:  38800030  li r4,48
+	   3: MOVL       	$0x30, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x10003F0C:  7FA5EB78  or r5,r29,r29
+	   6: GETL       	R29, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x10003F10:  7F39EA14  add r25,r25,r29
+	   9: GETL       	R25, t6
+	  10: GETL       	R29, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R25
+	  13: INCEIPL       	$4
+
+	0x10003F14:  48016DB1  bl 0x1001ACC4
+	  14: MOVL       	$0x10003F18, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x1001ACC4  ($4)
+
+
+
+. 3314 10003F04 20
+. 7F 23 CB 78 38 80 00 30 7F A5 EB 78 7F 39 EA 14 48 01 6D B1
+==== BB 3315 (0x1001ACC4) approx BBs exec'd 0 ====
+
+	0x1001ACC4:  396000C8  li r11,200
+	   0: MOVL       	$0xC8, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ACC8:  4BFFFE44  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3315 1001ACC4 8
+. 39 60 00 C8 4B FF FE 44
+==== BB 3316 memset(0xFED7D24) approx BBs exec'd 0 ====
+
+	0xFED7D24:  28850004  cmpli cr1,r5,4
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x4, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFED7D28:  70670003  andi. r7,r3,0x3
+	   5: GETL       	R3, t6
+	   6: ANDL       	$0x3, t6
+	   7: PUTL       	t6, R7
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0xFED7D2C:  7C661B78  or r6,r3,r3
+	  11: GETL       	R3, t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0xFED7D30:  40850190  bc 4,5,0xFED7EC0
+	  14: Jc05o       	$0xFED7EC0
+
+
+
+. 3316 FED7D24 16
+. 28 85 00 04 70 67 00 03 7C 66 1B 78 40 85 01 90
+==== BB 3317 (0xFED7EC0) approx BBs exec'd 0 ====
+
+	0xFED7EC0:  2A850001  cmpli cr5,r5,1
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x5, CR
+	   4: INCEIPL       	$4
+
+	0xFED7EC4:  28850003  cmpli cr1,r5,3
+	   5: GETL       	R5, t6
+	   6: MOVL       	$0x3, t10
+	   7: CMPUL       	t6, t10, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFED7EC8:  4D940020  bclr 12,20
+	  10: GETL       	LR, t12
+	  11: Js20o-r       	t12
+
+
+
+. 3317 FED7EC0 12
+. 2A 85 00 01 28 85 00 03 4D 94 00 20
+==== BB 3318 (0xFED7ECC) approx BBs exec'd 0 ====
+
+	0xFED7ECC:  98860000  stb r4,0(r6)
+	   0: GETL       	R4, t0
+	   1: GETL       	R6, t2
+	   2: STB       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFED7ED0:  4D960020  bclr 12,22
+	   4: GETL       	LR, t4
+	   5: Js22o-r       	t4
+
+
+
+. 3318 FED7ECC 8
+. 98 86 00 00 4D 96 00 20
+==== BB 3319 (0x10003F18) approx BBs exec'd 0 ====
+
+	0x10003F18:  4BFFFF68  b 0x10003E80
+	   0: JMPo       	$0x10003E80  ($4)
+
+
+
+. 3319 10003F18 4
+. 4B FF FF 68
+==== BB 3320 (0x10003E80) approx BBs exec'd 0 ====
+
+	0x10003E80:  7EB5EA14  add r21,r21,r29
+	   0: GETL       	R21, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x10003E84:  3B800000  li r28,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x10003E88:  7D3F8050  subf r9,r31,r16
+	   8: GETL       	R31, t6
+	   9: GETL       	R16, t8
+	  10: SUBL       	t6, t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x10003E8C:  3A09000C  addi r16,r9,12
+	  13: GETL       	R9, t10
+	  14: ADDL       	$0xC, t10
+	  15: PUTL       	t10, R16
+	  16: INCEIPL       	$4
+
+	0x10003E90:  7FB0E050  subf r29,r16,r28
+	  17: GETL       	R16, t12
+	  18: GETL       	R28, t14
+	  19: SUBL       	t12, t14
+	  20: PUTL       	t14, R29
+	  21: INCEIPL       	$4
+
+	0x10003E94:  2F1D0000  cmpi cr6,r29,0
+	  22: GETL       	R29, t16
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x6, CR
+	  25: INCEIPL       	$4
+
+	0x10003E98:  7FA0EB78  or r0,r29,r29
+	  26: GETL       	R29, t20
+	  27: PUTL       	t20, R0
+	  28: INCEIPL       	$4
+
+	0x10003E9C:  419809C4  bc 12,24,0x10004860
+	  29: Js24o       	$0x10004860
+
+
+
+. 3320 10003E80 32
+. 7E B5 EA 14 3B 80 00 00 7D 3F 80 50 3A 09 00 0C 7F B0 E0 50 2F 1D 00 00 7F A0 EB 78 41 98 09 C4
+==== BB 3321 (0x1000441C) approx BBs exec'd 0 ====
+
+	0x1000441C:  2F900045  cmpi cr7,r16,69
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x45, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10004420:  41BEF860  bc 13,30,0x10003C80
+	   5: Js30o       	$0x10003C80
+
+
+
+. 3321 1000441C 8
+. 2F 90 00 45 41 BE F8 60
+==== BB 3322 (0x10004424) approx BBs exec'd 0 ====
+
+	0x10004424:  2F9C0002  cmpi cr7,r28,2
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10004428:  7F87E378  or r7,r28,r28
+	   5: GETL       	R28, t6
+	   6: PUTL       	t6, R7
+	   7: INCEIPL       	$4
+
+	0x1000442C:  419C06A4  bc 12,28,0x10004AD0
+	   8: Js28o       	$0x10004AD0
+
+
+
+. 3322 10004424 12
+. 2F 9C 00 02 7F 87 E3 78 41 9C 06 A4
+==== BB 3323 (0x10004AD0) approx BBs exec'd 0 ====
+
+	0x10004AD0:  2F10004F  cmpi cr6,r16,79
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x4F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x10004AD4:  81180000  lwz r8,0(r24)
+	   5: GETL       	R24, t6
+	   6: LDL       	(t6), t8
+	   7: PUTL       	t8, R8
+	   8: INCEIPL       	$4
+
+	0x10004AD8:  38E00002  li r7,2
+	   9: MOVL       	$0x2, t10
+	  10: PUTL       	t10, R7
+	  11: INCEIPL       	$4
+
+	0x10004ADC:  4BFFF480  b 0x10003F5C
+	  12: JMPo       	$0x10003F5C  ($4)
+
+
+
+. 3323 10004AD0 16
+. 2F 10 00 4F 81 18 00 00 38 E0 00 02 4B FF F4 80
+==== BB 3324 (0x1001ACC4) approx BBs exec'd 0 ====
+
+	0x1001ACC4:  4BEBD060  b 0xFED7D24
+	   0: JMPo       	$0xFED7D24  ($4)
+
+
+
+. 3324 1001ACC4 4
+. 4B EB D0 60
+==== BB 3325 (0x100045C0) approx BBs exec'd 0 ====
+
+	0x100045C0:  2F8A0000  cmpi cr7,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x100045C4:  419E0010  bc 12,30,0x100045D4
+	   4: Js30o       	$0x100045D4
+
+
+
+. 3325 100045C0 8
+. 2F 8A 00 00 41 9E 00 10
+==== BB 3326 (0x100045D4) approx BBs exec'd 0 ====
+
+	0x100045D4:  2F920000  cmpi cr7,r18,0
+	   0: GETL       	R18, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x100045D8:  419E0480  bc 12,30,0x10004A58
+	   4: Js30o       	$0x10004A58
+
+
+
+. 3326 100045D4 8
+. 2F 92 00 00 41 9E 04 80
+==== BB 3327 (0x100045DC) approx BBs exec'd 0 ====
+
+	0x100045DC:  7E439378  or r3,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x100045E0:  4801661D  bl 0x1001ABFC
+	   3: MOVL       	$0x100045E4, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x1001ABFC  ($4)
+
+
+
+. 3327 100045DC 8
+. 7E 43 93 78 48 01 66 1D
+==== BB 3328 (0x1001ABFC) approx BBs exec'd 0 ====
+
+	0x1001ABFC:  39600064  li r11,100
+	   0: MOVL       	$0x64, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AC00:  4BFFFF0C  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3328 1001ABFC 8
+. 39 60 00 64 4B FF FF 0C
+==== BB 3329 (0x100045E4) approx BBs exec'd 0 ====
+
+	0x100045E4:  7FE3E050  subf r31,r3,r28
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x100045E8:  7C7D1B78  or r29,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x100045EC:  2F1F0000  cmpi cr6,r31,0
+	   8: GETL       	R31, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x100045F0:  7FE0FB78  or r0,r31,r31
+	  12: GETL       	R31, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x100045F4:  41980488  bc 12,24,0x10004A7C
+	  15: Js24o       	$0x10004A7C
+
+
+
+. 3329 100045E4 20
+. 7F E3 E0 50 7C 7D 1B 78 2F 1F 00 00 7F E0 FB 78 41 98 04 88
+==== BB 3330 (0x10004A7C) approx BBs exec'd 0 ====
+
+	0x10004A7C:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10004A80:  4BFFFB78  b 0x100045F8
+	   3: JMPo       	$0x100045F8  ($4)
+
+
+
+. 3330 10004A7C 8
+. 38 00 00 00 4B FF FB 78
+==== BB 3331 (0x100045F8) approx BBs exec'd 0 ====
+
+	0x100045F8:  7F80EA14  add r28,r0,r29
+	   0: GETL       	R0, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x100045FC:  7C15B850  subf r0,r21,r23
+	   5: GETL       	R21, t4
+	   6: GETL       	R23, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x10004600:  7F9C0040  cmpl cr7,r28,r0
+	  10: GETL       	R28, t8
+	  11: GETL       	R0, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x10004604:  40BCF3F4  bc 5,28,0x100039F8
+	  15: Jc28o       	$0x100039F8
+
+
+
+. 3331 100045F8 16
+. 7F 80 EA 14 7C 15 B8 50 7F 9C 00 40 40 BC F3 F4
+==== BB 3332 (0x10004608) approx BBs exec'd 0 ====
+
+	0x10004608:  2C190000  cmpi cr0,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x1000460C:  41A2F644  bc 13,2,0x10003C50
+	   4: Js02o       	$0x10003C50
+
+
+
+. 3332 10004608 8
+. 2C 19 00 00 41 A2 F6 44
+==== BB 3333 (0x10004610) approx BBs exec'd 0 ====
+
+	0x10004610:  40990028  bc 4,25,0x10004638
+	   0: Jc25o       	$0x10004638
+
+
+
+. 3333 10004610 4
+. 40 99 00 28
+==== BB 3334 (0x10004638) approx BBs exec'd 0 ====
+
+	0x10004638:  8001045C  lwz r0,1116(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x45C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x1000463C:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x10004640:  409E03FC  bc 4,30,0x10004A3C
+	   9: Jc30o       	$0x10004A3C
+
+
+
+. 3334 10004638 12
+. 80 01 04 5C 2F 80 00 00 40 9E 03 FC
+==== BB 3335 (0x10004644) approx BBs exec'd 0 ====
+
+	0x10004644:  2F940000  cmpi cr7,r20,0
+	   0: GETL       	R20, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10004648:  419E0278  bc 12,30,0x100048C0
+	   4: Js30o       	$0x100048C0
+
+
+
+. 3335 10004644 8
+. 2F 94 00 00 41 9E 02 78
+==== BB 3336 (0x100048C0) approx BBs exec'd 0 ====
+
+	0x100048C0:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x100048C4:  7E449378  or r4,r18,r18
+	   3: GETL       	R18, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x100048C8:  7FA5EB78  or r5,r29,r29
+	   6: GETL       	R29, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x100048CC:  7F39EA14  add r25,r25,r29
+	   9: GETL       	R25, t6
+	  10: GETL       	R29, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R25
+	  13: INCEIPL       	$4
+
+	0x100048D0:  4801638D  bl 0x1001AC5C
+	  14: MOVL       	$0x100048D4, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x1001AC5C  ($4)
+
+
+
+. 3336 100048C0 20
+. 7F 23 CB 78 7E 44 93 78 7F A5 EB 78 7F 39 EA 14 48 01 63 8D
+==== BB 3337 (0x100048D4) approx BBs exec'd 0 ====
+
+	0x100048D4:  4BFFFD8C  b 0x10004660
+	   0: JMPo       	$0x10004660  ($4)
+
+
+
+. 3337 100048D4 4
+. 4B FF FD 8C
+==== BB 3338 (0x10004660) approx BBs exec'd 0 ====
+
+	0x10004660:  7EB5E214  add r21,r21,r28
+	   0: GETL       	R21, t0
+	   1: GETL       	R28, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x10004664:  4BFFF5F0  b 0x10003C54
+	   5: JMPo       	$0x10003C54  ($4)
+
+
+
+. 3338 10004660 8
+. 7E B5 E2 14 4B FF F5 F0
+==== BB 3339 (0x10004668) approx BBs exec'd 0 ====
+
+	0x10004668:  2F900045  cmpi cr7,r16,69
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x45, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x1000466C:  41BEF6B4  bc 13,30,0x10003D20
+	   5: Js30o       	$0x10003D20
+
+
+
+. 3339 10004668 8
+. 2F 90 00 45 41 BE F6 B4
+==== BB 3340 (0x10004670) approx BBs exec'd 0 ====
+
+	0x10004670:  2F10004F  cmpi cr6,r16,79
+	   0: GETL       	R16, t0
+	   1: MOVL       	$0x4F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x10004674:  41BAF60C  bc 13,26,0x10003C80
+	   5: Js26o       	$0x10003C80
+
+
+
+. 3340 10004670 8
+. 2F 10 00 4F 41 BA F6 0C
+==== BB 3341 (0x10004678) approx BBs exec'd 0 ====
+
+	0x10004678:  2F9C0001  cmpi cr7,r28,1
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x1000467C:  7F87E378  or r7,r28,r28
+	   5: GETL       	R28, t6
+	   6: PUTL       	t6, R7
+	   7: INCEIPL       	$4
+
+	0x10004680:  419C0438  bc 12,28,0x10004AB8
+	   8: Js28o       	$0x10004AB8
+
+
+
+. 3341 10004678 12
+. 2F 9C 00 01 7F 87 E3 78 41 9C 04 38
+==== BB 3342 (0x10004AB8) approx BBs exec'd 0 ====
+
+	0x10004AB8:  81380014  lwz r9,20(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x10004ABC:  38E00001  li r7,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x10004AC0:  3909076C  addi r8,r9,1900
+	   8: GETL       	R9, t6
+	   9: ADDL       	$0x76C, t6
+	  10: PUTL       	t6, R8
+	  11: INCEIPL       	$4
+
+	0x10004AC4:  4BFFF498  b 0x10003F5C
+	  12: JMPo       	$0x10003F5C  ($4)
+
+
+
+. 3342 10004AB8 16
+. 81 38 00 14 38 E0 00 01 39 09 07 6C 4B FF F4 98
+==== BB 3343 (0x100038CC) approx BBs exec'd 0 ====
+
+	0x100038CC:  3137FFFF  addic r9,r23,-1
+	   0: GETL       	R23, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x100038D0:  7C09B910  subfe r0,r9,r23
+	   4: GETL       	R9, t2
+	   5: GETL       	R23, t4
+	   6: SBBL       	t2, t4  (-rCa-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x100038D4:  3179FFFF  addic r11,r25,-1
+	   9: GETL       	R25, t6
+	  10: ADCL       	$0xFFFFFFFF, t6  (-wCa)
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0x100038D8:  7D2BC910  subfe r9,r11,r25
+	  13: GETL       	R11, t8
+	  14: GETL       	R25, t10
+	  15: SBBL       	t8, t10  (-rCa-wCa)
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x100038DC:  7D2B0039  and. r11,r9,r0
+	  18: GETL       	R9, t12
+	  19: GETL       	R0, t14
+	  20: ANDL       	t12, t14
+	  21: PUTL       	t14, R11
+	  22: CMP0L       	t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0x100038E0:  4182000C  bc 12,2,0x100038EC
+	  25: Js02o       	$0x100038EC
+
+
+
+. 3343 100038CC 24
+. 31 37 FF FF 7C 09 B9 10 31 79 FF FF 7D 2B C9 10 7D 2B 00 39 41 82 00 0C
+==== BB 3344 (0x100038E4) approx BBs exec'd 0 ====
+
+	0x100038E4:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100038E8:  98190000  stb r0,0(r25)
+	   3: GETL       	R0, t2
+	   4: GETL       	R25, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x100038EC:  7EA3AB78  or r3,r21,r21
+	   7: GETL       	R21, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x100038F0:  4800010C  b 0x100039FC
+	  10: JMPo       	$0x100039FC  ($4)
+
+
+
+. 3344 100038E4 16
+. 38 00 00 00 98 19 00 00 7E A3 AB 78 48 00 01 0C
+==== BB 3345 (0x100039FC) approx BBs exec'd 0 ====
+
+	0x100039FC:  800104C4  lwz r0,1220(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x4C4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10003A00:  81C10478  lwz r14,1144(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x478, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R14
+	   9: INCEIPL       	$4
+
+	0x10003A04:  81E1047C  lwz r15,1148(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x47C, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R15
+	  14: INCEIPL       	$4
+
+	0x10003A08:  7C0803A6  mtlr r0
+	  15: GETL       	R0, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x10003A0C:  82010480  lwz r16,1152(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x480, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R16
+	  22: INCEIPL       	$4
+
+	0x10003A10:  82210484  lwz r17,1156(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x484, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R17
+	  27: INCEIPL       	$4
+
+	0x10003A14:  82410488  lwz r18,1160(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x488, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R18
+	  32: INCEIPL       	$4
+
+	0x10003A18:  8261048C  lwz r19,1164(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x48C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R19
+	  37: INCEIPL       	$4
+
+	0x10003A1C:  82810490  lwz r20,1168(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x490, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R20
+	  42: INCEIPL       	$4
+
+	0x10003A20:  82A10494  lwz r21,1172(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x494, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R21
+	  47: INCEIPL       	$4
+
+	0x10003A24:  82C10498  lwz r22,1176(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x498, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R22
+	  52: INCEIPL       	$4
+
+	0x10003A28:  82E1049C  lwz r23,1180(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x49C, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R23
+	  57: INCEIPL       	$4
+
+	0x10003A2C:  830104A0  lwz r24,1184(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x4A0, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R24
+	  62: INCEIPL       	$4
+
+	0x10003A30:  832104A4  lwz r25,1188(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x4A4, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R25
+	  67: INCEIPL       	$4
+
+	0x10003A34:  834104A8  lwz r26,1192(r1)
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x4A8, t54
+	  70: LDL       	(t54), t56
+	  71: PUTL       	t56, R26
+	  72: INCEIPL       	$4
+
+	0x10003A38:  836104AC  lwz r27,1196(r1)
+	  73: GETL       	R1, t58
+	  74: ADDL       	$0x4AC, t58
+	  75: LDL       	(t58), t60
+	  76: PUTL       	t60, R27
+	  77: INCEIPL       	$4
+
+	0x10003A3C:  838104B0  lwz r28,1200(r1)
+	  78: GETL       	R1, t62
+	  79: ADDL       	$0x4B0, t62
+	  80: LDL       	(t62), t64
+	  81: PUTL       	t64, R28
+	  82: INCEIPL       	$4
+
+	0x10003A40:  83A104B4  lwz r29,1204(r1)
+	  83: GETL       	R1, t66
+	  84: ADDL       	$0x4B4, t66
+	  85: LDL       	(t66), t68
+	  86: PUTL       	t68, R29
+	  87: INCEIPL       	$4
+
+	0x10003A44:  83C104B8  lwz r30,1208(r1)
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x4B8, t70
+	  90: LDL       	(t70), t72
+	  91: PUTL       	t72, R30
+	  92: INCEIPL       	$4
+
+	0x10003A48:  83E104BC  lwz r31,1212(r1)
+	  93: GETL       	R1, t74
+	  94: ADDL       	$0x4BC, t74
+	  95: LDL       	(t74), t76
+	  96: PUTL       	t76, R31
+	  97: INCEIPL       	$4
+
+	0x10003A4C:  382104C0  addi r1,r1,1216
+	  98: GETL       	R1, t78
+	  99: ADDL       	$0x4C0, t78
+	 100: PUTL       	t78, R1
+	 101: INCEIPL       	$4
+
+	0x10003A50:  4E800020  blr
+	 102: GETL       	LR, t80
+	 103: JMPo-r       	t80  ($4)
+
+
+
+. 3345 100039FC 88
+. 80 01 04 C4 81 C1 04 78 81 E1 04 7C 7C 08 03 A6 82 01 04 80 82 21 04 84 82 41 04 88 82 61 04 8C 82 81 04 90 82 A1 04 94 82 C1 04 98 82 E1 04 9C 83 01 04 A0 83 21 04 A4 83 41 04 A8 83 61 04 AC 83 81 04 B0 83 A1 04 B4 83 C1 04 B8 83 E1 04 BC 38 21 04 C0 4E 80 00 20
+==== BB 3346 (0x10001410) approx BBs exec'd 0 ====
+
+	0x10001410:  809E0074  lwz r4,116(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x10001414:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x10001418:  38600006  li r3,6
+	   9: MOVL       	$0x6, t8
+	  10: PUTL       	t8, R3
+	  11: INCEIPL       	$4
+
+	0x1000141C:  409E0068  bc 4,30,0x10001484
+	  12: Jc30o       	$0x10001484
+
+
+
+. 3346 10001410 16
+. 80 9E 00 74 2F 83 00 00 38 60 00 06 40 9E 00 68
+==== BB 3347 (0x10001484) approx BBs exec'd 0 ====
+
+	0x10001484:  801F0000  lwz r0,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x10001488:  3B800001  li r28,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R28
+	   6: INCEIPL       	$4
+
+	0x1000148C:  2F800000  cmpi cr7,r0,0
+	   7: GETL       	R0, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x10001490:  419EFFA8  bc 12,30,0x10001438
+	  11: Js30o       	$0x10001438
+
+
+
+. 3347 10001484 16
+. 80 1F 00 00 3B 80 00 01 2F 80 00 00 41 9E FF A8
+==== BB 3348 (0x10001438) approx BBs exec'd 0 ====
+
+	0x10001438:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x1000143C:  419EFF88  bc 12,30,0x100013C4
+	   4: Js30o       	$0x100013C4
+
+
+
+. 3348 10001438 8
+. 2F 9C 00 00 41 9E FF 88
+==== BB 3349 (0x10001440) approx BBs exec'd 0 ====
+
+	0x10001440:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x10001444:  48019781  bl 0x1001ABC4
+	   3: MOVL       	$0x10001448, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x1001ABC4  ($4)
+
+
+
+. 3349 10001440 8
+. 7F A3 EB 78 48 01 97 81
+==== BB 3350 (0x1001ABC4) approx BBs exec'd 0 ====
+
+	0x1001ABC4:  39600048  li r11,72
+	   0: MOVL       	$0x48, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ABC8:  4BFFFF44  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3350 1001ABC4 8
+. 39 60 00 48 4B FF FF 44
+==== BB 3351 _IO_puts(0xFEBE3E0) approx BBs exec'd 0 ====
+
+	0xFEBE3E0:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEBE3E4:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFEBE3E8:  480E9A69  bl 0xFFA7E50
+	   9: MOVL       	$0xFEBE3EC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3351 FEBE3E0 12
+. 94 21 FF D0 7C 88 02 A6 48 0E 9A 69
+==== BB 3352 (0xFEBE3EC) approx BBs exec'd 0 ====
+
+	0xFEBE3EC:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEBE3F0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEBE3F4:  93810020  stw r28,32(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEBE3F8:  93210014  stw r25,20(r1)
+	  13: GETL       	R25, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEBE3FC:  3B20FFFF  li r25,-1
+	  18: MOVL       	$0xFFFFFFFF, t14
+	  19: PUTL       	t14, R25
+	  20: INCEIPL       	$4
+
+	0xFEBE400:  93410018  stw r26,24(r1)
+	  21: GETL       	R26, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x18, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEBE404:  7C7A1B78  or r26,r3,r3
+	  26: GETL       	R3, t20
+	  27: PUTL       	t20, R26
+	  28: INCEIPL       	$4
+
+	0xFEBE408:  839E1C24  lwz r28,7204(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1C24, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R28
+	  33: INCEIPL       	$4
+
+	0xFEBE40C:  9361001C  stw r27,28(r1)
+	  34: GETL       	R27, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x1C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFEBE410:  93E1002C  stw r31,44(r1)
+	  39: GETL       	R31, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x2C, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFEBE414:  93A10024  stw r29,36(r1)
+	  44: GETL       	R29, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x24, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0xFEBE418:  90810034  stw r4,52(r1)
+	  49: GETL       	R4, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x34, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFEBE41C:  480186BD  bl 0xFED6AD8
+	  54: MOVL       	$0xFEBE420, t42
+	  55: PUTL       	t42, LR
+	  56: JMPo-c       	$0xFED6AD8  ($4)
+
+
+
+. 3352 FEBE3EC 52
+. 93 C1 00 28 7F C8 02 A6 93 81 00 20 93 21 00 14 3B 20 FF FF 93 41 00 18 7C 7A 1B 78 83 9E 1C 24 93 61 00 1C 93 E1 00 2C 93 A1 00 24 90 81 00 34 48 01 86 BD
+==== BB 3353 (0xFEBE420) approx BBs exec'd 0 ====
+
+	0xFEBE420:  83FC0000  lwz r31,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFEBE424:  7C7B1B78  or r27,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R27
+	   6: INCEIPL       	$4
+
+	0xFEBE428:  801F0000  lwz r0,0(r31)
+	   7: GETL       	R31, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R0
+	  10: INCEIPL       	$4
+
+	0xFEBE42C:  70098000  andi. r9,r0,0x8000
+	  11: GETL       	R0, t10
+	  12: ANDL       	$0x8000, t10
+	  13: PUTL       	t10, R9
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFEBE430:  40820054  bc 4,2,0xFEBE484
+	  17: Jc02o       	$0xFEBE484
+
+
+
+. 3353 FEBE420 20
+. 83 FC 00 00 7C 7B 1B 78 80 1F 00 00 70 09 80 00 40 82 00 54
+==== BB 3354 (0xFEBE434) approx BBs exec'd 0 ====
+
+	0xFEBE434:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEBE438:  3BA28BA0  addi r29,r2,-29792
+	   5: GETL       	R2, t4
+	   6: ADDL       	$0xFFFF8BA0, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFEBE43C:  80A30008  lwz r5,8(r3)
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x8, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0xFEBE440:  7F85E800  cmp cr7,r5,r29
+	  14: GETL       	R5, t10
+	  15: GETL       	R29, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFEBE444:  419E0034  bc 12,30,0xFEBE478
+	  19: Js30o       	$0xFEBE478
+
+
+
+. 3354 FEBE434 20
+. 80 7F 00 48 3B A2 8B A0 80 A3 00 08 7F 85 E8 00 41 9E 00 34
+==== BB 3355 (0xFEBE448) approx BBs exec'd 0 ====
+
+	0xFEBE448:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFEBE44C:  38C00001  li r6,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0xFEBE450:  7D201828  lwarx r9,r0,r3
+	   6: GETL       	R3, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFEBE454:  7C095800  cmp cr0,r9,r11
+	  11: GETL       	R9, t8
+	  12: GETL       	R11, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFEBE458:  4082000C  bc 4,2,0xFEBE464
+	  16: Jc02o       	$0xFEBE464
+
+
+
+. 3355 FEBE448 20
+. 39 60 00 00 38 C0 00 01 7D 20 18 28 7C 09 58 00 40 82 00 0C
+==== BB 3356 (0xFEBE45C) approx BBs exec'd 0 ====
+
+	0xFEBE45C:  7CC0192D  stwcx. r6,r0,r3
+	   0: GETL       	R3, t0
+	   1: GETL       	R6, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEBE460:  40A2FFF0  bc 5,2,0xFEBE450
+	   6: Jc02o       	$0xFEBE450
+
+
+
+. 3356 FEBE45C 8
+. 7C C0 19 2D 40 A2 FF F0
+==== BB 3357 (0xFEBE450) approx BBs exec'd 0 ====
+
+	0xFEBE450:  7D201828  lwarx r9,r0,r3
+	   0: GETL       	R3, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEBE454:  7C095800  cmp cr0,r9,r11
+	   5: GETL       	R9, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEBE458:  4082000C  bc 4,2,0xFEBE464
+	  10: Jc02o       	$0xFEBE464
+
+
+
+. 3357 FEBE450 12
+. 7D 20 18 28 7C 09 58 00 40 82 00 0C
+==== BB 3358 (0xFEBE464) approx BBs exec'd 0 ====
+
+	0xFEBE464:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFEBE468:  2C890000  cmpi cr1,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEBE46C:  4086019C  bc 4,6,0xFEBE608
+	   5: Jc06o       	$0xFEBE608
+
+
+
+. 3358 FEBE464 12
+. 4C 00 01 2C 2C 89 00 00 40 86 01 9C
+==== BB 3359 (0xFEBE470) approx BBs exec'd 0 ====
+
+	0xFEBE470:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEBE474:  93A30008  stw r29,8(r3)
+	   5: GETL       	R29, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEBE478:  81030004  lwz r8,4(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0xFEBE47C:  38E80001  addi r7,r8,1
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0xFEBE480:  90E30004  stw r7,4(r3)
+	  19: GETL       	R7, t14
+	  20: GETL       	R3, t16
+	  21: ADDL       	$0x4, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFEBE484:  807C0000  lwz r3,0(r28)
+	  24: GETL       	R28, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R3
+	  27: INCEIPL       	$4
+
+	0xFEBE488:  89430046  lbz r10,70(r3)
+	  28: GETL       	R3, t22
+	  29: ADDL       	$0x46, t22
+	  30: LDB       	(t22), t24
+	  31: PUTL       	t24, R10
+	  32: INCEIPL       	$4
+
+	0xFEBE48C:  2F0A0000  cmpi cr6,r10,0
+	  33: GETL       	R10, t26
+	  34: CMP0L       	t26, t28  (-rSo)
+	  35: ICRFL       	t28, $0x6, CR
+	  36: INCEIPL       	$4
+
+	0xFEBE490:  409A00A4  bc 4,26,0xFEBE534
+	  37: Jc26o       	$0xFEBE534
+
+
+
+. 3359 FEBE470 36
+. 80 7F 00 48 93 A3 00 08 81 03 00 04 38 E8 00 01 90 E3 00 04 80 7C 00 00 89 43 00 46 2F 0A 00 00 40 9A 00 A4
+==== BB 3360 (0xFEBE494) approx BBs exec'd 0 ====
+
+	0xFEBE494:  819E1DC8  lwz r12,7624(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1DC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFEBE498:  2C0C0000  cmpi cr0,r12,0
+	   5: GETL       	R12, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFEBE49C:  41820160  bc 12,2,0xFEBE5FC
+	   9: Js02o       	$0xFEBE5FC
+
+
+
+. 3360 FEBE494 12
+. 81 9E 1D C8 2C 0C 00 00 41 82 01 60
+==== BB 3361 (0xFEBE4A0) approx BBs exec'd 0 ====
+
+	0xFEBE4A0:  83A30060  lwz r29,96(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEBE4A4:  2F9D0000  cmpi cr7,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEBE4A8:  419E0080  bc 12,30,0xFEBE528
+	   9: Js30o       	$0xFEBE528
+
+
+
+. 3361 FEBE4A0 12
+. 83 A3 00 60 2F 9D 00 00 41 9E 00 80
+==== BB 3362 (0xFEBE528) approx BBs exec'd 0 ====
+
+	0xFEBE528:  3800FFFF  li r0,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEBE52C:  90030060  stw r0,96(r3)
+	   3: GETL       	R0, t2
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x60, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFEBE530:  4BFFFF7C  b 0xFEBE4AC
+	   8: JMPo       	$0xFEBE4AC  ($4)
+
+
+
+. 3362 FEBE528 12
+. 38 00 FF FF 90 03 00 60 4B FF FF 7C
+==== BB 3363 (0xFEBE4AC) approx BBs exec'd 0 ====
+
+	0xFEBE4AC:  80630060  lwz r3,96(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEBE4B0:  2C83FFFF  cmpi cr1,r3,-1
+	   5: GETL       	R3, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFEBE4B4:  41860080  bc 12,6,0xFEBE534
+	  10: Js06o       	$0xFEBE534
+
+
+
+. 3363 FEBE4AC 12
+. 80 63 00 60 2C 83 FF FF 41 86 00 80
+==== BB 3364 (0xFEBE534) approx BBs exec'd 0 ====
+
+	0xFEBE534:  80DC0000  lwz r6,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0xFEBE538:  7F44D378  or r4,r26,r26
+	   4: GETL       	R26, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFEBE53C:  88E60046  lbz r7,70(r6)
+	   7: GETL       	R6, t6
+	   8: ADDL       	$0x46, t6
+	   9: LDB       	(t6), t8
+	  10: PUTL       	t8, R7
+	  11: INCEIPL       	$4
+
+	0xFEBE540:  7CC33378  or r3,r6,r6
+	  12: GETL       	R6, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0xFEBE544:  7CE90774  extsb r9,r7
+	  15: GETB       	R7, t12
+	  16: WIDENL_Bs       	_st12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0xFEBE548:  7CA93214  add r5,r9,r6
+	  19: GETL       	R9, t14
+	  20: GETL       	R6, t16
+	  21: ADDL       	t14, t16
+	  22: PUTL       	t16, R5
+	  23: INCEIPL       	$4
+
+	0xFEBE54C:  81650098  lwz r11,152(r5)
+	  24: GETL       	R5, t18
+	  25: ADDL       	$0x98, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R11
+	  28: INCEIPL       	$4
+
+	0xFEBE550:  7F65DB78  or r5,r27,r27
+	  29: GETL       	R27, t22
+	  30: PUTL       	t22, R5
+	  31: INCEIPL       	$4
+
+	0xFEBE554:  834B001C  lwz r26,28(r11)
+	  32: GETL       	R11, t24
+	  33: ADDL       	$0x1C, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R26
+	  36: INCEIPL       	$4
+
+	0xFEBE558:  7F4903A6  mtctr r26
+	  37: GETL       	R26, t28
+	  38: PUTL       	t28, CTR
+	  39: INCEIPL       	$4
+
+	0xFEBE55C:  4E800421  bctrl
+	  40: MOVL       	$0xFEBE560, t30
+	  41: PUTL       	t30, LR
+	  42: GETL       	CTR, t32
+	  43: JMPo-c       	t32  ($4)
+
+
+
+. 3364 FEBE534 44
+. 80 DC 00 00 7F 44 D3 78 88 E6 00 46 7C C3 33 78 7C E9 07 74 7C A9 32 14 81 65 00 98 7F 65 DB 78 83 4B 00 1C 7F 49 03 A6 4E 80 04 21
+==== BB 3365 _IO_file_xsputn@@GLIBC_2.1(0xFECA658) approx BBs exec'd 0 ====
+
+	0xFECA658:  2F850000  cmpi cr7,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFECA65C:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFECA660:  9421FFD0  stwu r1,-48(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFD0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECA664:  93010010  stw r24,16(r1)
+	  13: GETL       	R24, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECA668:  3B000000  li r24,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R24
+	  20: INCEIPL       	$4
+
+	0xFECA66C:  93210014  stw r25,20(r1)
+	  21: GETL       	R25, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECA670:  7CB92B78  or r25,r5,r5
+	  26: GETL       	R5, t20
+	  27: PUTL       	t20, R25
+	  28: INCEIPL       	$4
+
+	0xFECA674:  93410018  stw r26,24(r1)
+	  29: GETL       	R26, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x18, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFECA678:  7CBA2B78  or r26,r5,r5
+	  34: GETL       	R5, t26
+	  35: PUTL       	t26, R26
+	  36: INCEIPL       	$4
+
+	0xFECA67C:  9361001C  stw r27,28(r1)
+	  37: GETL       	R27, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFECA680:  7C9B2378  or r27,r4,r4
+	  42: GETL       	R4, t32
+	  43: PUTL       	t32, R27
+	  44: INCEIPL       	$4
+
+	0xFECA684:  93810020  stw r28,32(r1)
+	  45: GETL       	R28, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x20, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFECA688:  7C9C2378  or r28,r4,r4
+	  50: GETL       	R4, t38
+	  51: PUTL       	t38, R28
+	  52: INCEIPL       	$4
+
+	0xFECA68C:  93A10024  stw r29,36(r1)
+	  53: GETL       	R29, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x24, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0xFECA690:  7C7D1B78  or r29,r3,r3
+	  58: GETL       	R3, t44
+	  59: PUTL       	t44, R29
+	  60: INCEIPL       	$4
+
+	0xFECA694:  93C10028  stw r30,40(r1)
+	  61: GETL       	R30, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x28, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0xFECA698:  38600000  li r3,0
+	  66: MOVL       	$0x0, t50
+	  67: PUTL       	t50, R3
+	  68: INCEIPL       	$4
+
+	0xFECA69C:  93E1002C  stw r31,44(r1)
+	  69: GETL       	R31, t52
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x2C, t54
+	  72: STL       	t52, (t54)
+	  73: INCEIPL       	$4
+
+	0xFECA6A0:  90010034  stw r0,52(r1)
+	  74: GETL       	R0, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x34, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0xFECA6A4:  419E006C  bc 12,30,0xFECA710
+	  79: Js30o       	$0xFECA710
+
+
+
+. 3365 FECA658 80
+. 2F 85 00 00 7C 08 02 A6 94 21 FF D0 93 01 00 10 3B 00 00 00 93 21 00 14 7C B9 2B 78 93 41 00 18 7C BA 2B 78 93 61 00 1C 7C 9B 23 78 93 81 00 20 7C 9C 23 78 93 A1 00 24 7C 7D 1B 78 93 C1 00 28 38 60 00 00 93 E1 00 2C 90 01 00 34 41 9E 00 6C
+==== BB 3366 (0xFECA6A8) approx BBs exec'd 0 ====
+
+	0xFECA6A8:  817D0000  lwz r11,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0xFECA6AC:  809D0018  lwz r4,24(r29)
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x18, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0xFECA6B0:  71690200  andi. r9,r11,0x200
+	   9: GETL       	R11, t8
+	  10: ANDL       	$0x200, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFECA6B4:  807D0014  lwz r3,20(r29)
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x14, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0xFECA6B8:  7FE32050  subf r31,r3,r4
+	  20: GETL       	R3, t16
+	  21: GETL       	R4, t18
+	  22: SUBL       	t16, t18
+	  23: PUTL       	t18, R31
+	  24: INCEIPL       	$4
+
+	0xFECA6BC:  40820084  bc 4,2,0xFECA740
+	  25: Jc02o       	$0xFECA740
+
+
+
+. 3366 FECA6A8 24
+. 81 7D 00 00 80 9D 00 18 71 69 02 00 80 7D 00 14 7F E3 20 50 40 82 00 84
+==== BB 3367 (0xFECA6C0) approx BBs exec'd 0 ====
+
+	0xFECA6C0:  2C1F0000  cmpi cr0,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFECA6C4:  4182003C  bc 12,2,0xFECA700
+	   4: Js02o       	$0xFECA700
+
+
+
+. 3367 FECA6C0 8
+. 2C 1F 00 00 41 82 00 3C
+==== BB 3368 (0xFECA700) approx BBs exec'd 0 ====
+
+	0xFECA700:  7C79C214  add r3,r25,r24
+	   0: GETL       	R25, t0
+	   1: GETL       	R24, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECA704:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFECA708:  409E0074  bc 4,30,0xFECA77C
+	   9: Jc30o       	$0xFECA77C
+
+
+
+. 3368 FECA700 12
+. 7C 79 C2 14 2F 83 00 00 40 9E 00 74
+==== BB 3369 (0xFECA77C) approx BBs exec'd 0 ====
+
+	0xFECA77C:  8B1D0046  lbz r24,70(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFECA780:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFECA784:  3880FFFF  li r4,-1
+	   8: MOVL       	$0xFFFFFFFF, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFECA788:  7F0C0774  extsb r12,r24
+	  11: GETB       	R24, t8
+	  12: WIDENL_Bs       	_st8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0xFECA78C:  7D4CEA14  add r10,r12,r29
+	  15: GETL       	R12, t10
+	  16: GETL       	R29, t12
+	  17: ADDL       	t10, t12
+	  18: PUTL       	t12, R10
+	  19: INCEIPL       	$4
+
+	0xFECA790:  810A0098  lwz r8,152(r10)
+	  20: GETL       	R10, t14
+	  21: ADDL       	$0x98, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R8
+	  24: INCEIPL       	$4
+
+	0xFECA794:  8168000C  lwz r11,12(r8)
+	  25: GETL       	R8, t18
+	  26: ADDL       	$0xC, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R11
+	  29: INCEIPL       	$4
+
+	0xFECA798:  7D6903A6  mtctr r11
+	  30: GETL       	R11, t22
+	  31: PUTL       	t22, CTR
+	  32: INCEIPL       	$4
+
+	0xFECA79C:  4E800421  bctrl
+	  33: MOVL       	$0xFECA7A0, t24
+	  34: PUTL       	t24, LR
+	  35: GETL       	CTR, t26
+	  36: JMPo-c       	t26  ($4)
+
+
+
+. 3369 FECA77C 36
+. 8B 1D 00 46 7F A3 EB 78 38 80 FF FF 7F 0C 07 74 7D 4C EA 14 81 0A 00 98 81 68 00 0C 7D 69 03 A6 4E 80 04 21
+==== BB 3370 _IO_file_overflow@@GLIBC_2.1(0xFEC984C) approx BBs exec'd 0 ====
+
+	0xFEC984C:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEC9850:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEC9854:  480DE5FD  bl 0xFFA7E50
+	   9: MOVL       	$0xFEC9858, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3370 FEC984C 12
+. 7C 08 02 A6 94 21 FF E0 48 0D E5 FD
+==== BB 3371 (0xFEC9858) approx BBs exec'd 0 ====
+
+	0xFEC9858:  93A10014  stw r29,20(r1)
+	   0: GETL       	R29, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEC985C:  93C10018  stw r30,24(r1)
+	   5: GETL       	R30, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x18, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEC9860:  7C9D2378  or r29,r4,r4
+	  10: GETL       	R4, t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFEC9864:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEC9868:  7FC802A6  mflr r30
+	  18: GETL       	LR, t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0xFEC986C:  81630000  lwz r11,0(r3)
+	  21: GETL       	R3, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R11
+	  24: INCEIPL       	$4
+
+	0xFEC9870:  93E1001C  stw r31,28(r1)
+	  25: GETL       	R31, t20
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x1C, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0xFEC9874:  7C7F1B78  or r31,r3,r3
+	  30: GETL       	R3, t24
+	  31: PUTL       	t24, R31
+	  32: INCEIPL       	$4
+
+	0xFEC9878:  71600008  andi. r0,r11,0x8
+	  33: GETL       	R11, t26
+	  34: ANDL       	$0x8, t26
+	  35: PUTL       	t26, R0
+	  36: CMP0L       	t26, t28  (-rSo)
+	  37: ICRFL       	t28, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0xFEC987C:  93810010  stw r28,16(r1)
+	  39: GETL       	R28, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x10, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0xFEC9880:  40820210  bc 4,2,0xFEC9A90
+	  44: Jc02o       	$0xFEC9A90
+
+
+
+. 3371 FEC9858 44
+. 93 A1 00 14 93 C1 00 18 7C 9D 23 78 90 01 00 24 7F C8 02 A6 81 63 00 00 93 E1 00 1C 7C 7F 1B 78 71 60 00 08 93 81 00 10 40 82 02 10
+==== BB 3372 (0xFEC9884) approx BBs exec'd 0 ====
+
+	0xFEC9884:  71690800  andi. r9,r11,0x800
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x800, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEC9888:  408200F4  bc 4,2,0xFEC997C
+	   6: Jc02o       	$0xFEC997C
+
+
+
+. 3372 FEC9884 8
+. 71 69 08 00 40 82 00 F4
+==== BB 3373 (0xFEC988C) approx BBs exec'd 0 ====
+
+	0xFEC988C:  80A30010  lwz r5,16(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEC9890:  2C850000  cmpi cr1,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFEC9894:  4086012C  bc 4,6,0xFEC99C0
+	   9: Jc06o       	$0xFEC99C0
+
+
+
+. 3373 FEC988C 12
+. 80 A3 00 10 2C 85 00 00 40 86 01 2C
+==== BB 3374 (0xFEC9898) approx BBs exec'd 0 ====
+
+	0xFEC9898:  480023B1  bl 0xFECBC48
+	   0: MOVL       	$0xFEC989C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFECBC48  ($4)
+
+
+
+. 3374 FEC9898 4
+. 48 00 23 B1
+==== BB 3375 (0xFEC989C) approx BBs exec'd 0 ====
+
+	0xFEC989C:  80DF001C  lwz r6,28(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEC98A0:  817F0000  lwz r11,0(r31)
+	   5: GETL       	R31, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0xFEC98A4:  7CC53378  or r5,r6,r6
+	   9: GETL       	R6, t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0xFEC98A8:  90DF000C  stw r6,12(r31)
+	  12: GETL       	R6, t10
+	  13: GETL       	R31, t12
+	  14: ADDL       	$0xC, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0xFEC98AC:  90DF0004  stw r6,4(r31)
+	  17: GETL       	R6, t14
+	  18: GETL       	R31, t16
+	  19: ADDL       	$0x4, t16
+	  20: STL       	t14, (t16)
+	  21: INCEIPL       	$4
+
+	0xFEC98B0:  90DF0008  stw r6,8(r31)
+	  22: GETL       	R6, t18
+	  23: GETL       	R31, t20
+	  24: ADDL       	$0x8, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0xFEC98B4:  71600100  andi. r0,r11,0x100
+	  27: GETL       	R11, t22
+	  28: ANDL       	$0x100, t22
+	  29: PUTL       	t22, R0
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0xFEC98B8:  4082018C  bc 4,2,0xFEC9A44
+	  33: Jc02o       	$0xFEC9A44
+
+
+
+. 3375 FEC989C 32
+. 80 DF 00 1C 81 7F 00 00 7C C5 33 78 90 DF 00 0C 90 DF 00 04 90 DF 00 08 71 60 01 00 40 82 01 8C
+==== BB 3376 (0xFEC98BC) approx BBs exec'd 0 ====
+
+	0xFEC98BC:  815F0020  lwz r10,32(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEC98C0:  7F855000  cmp cr7,r5,r10
+	   5: GETL       	R5, t4
+	   6: GETL       	R10, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFEC98C4:  419E013C  bc 12,30,0xFEC9A00
+	  10: Js30o       	$0xFEC9A00
+
+
+
+. 3376 FEC98BC 12
+. 81 5F 00 20 7F 85 50 00 41 9E 01 3C
+==== BB 3377 (0xFEC98C8) approx BBs exec'd 0 ====
+
+	0xFEC98C8:  819F0060  lwz r12,96(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFEC98CC:  616B0800  ori r11,r11,0x800
+	   5: GETL       	R11, t4
+	   6: ORL       	$0x800, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFEC98D0:  813F0008  lwz r9,8(r31)
+	   9: GETL       	R31, t6
+	  10: ADDL       	$0x8, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFEC98D4:  7CA42B78  or r4,r5,r5
+	  14: GETL       	R5, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0xFEC98D8:  2C8C0000  cmpi cr1,r12,0
+	  17: GETL       	R12, t12
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x1, CR
+	  20: INCEIPL       	$4
+
+	0xFEC98DC:  90BF0014  stw r5,20(r31)
+	  21: GETL       	R5, t16
+	  22: GETL       	R31, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEC98E0:  913F000C  stw r9,12(r31)
+	  26: GETL       	R9, t20
+	  27: GETL       	R31, t22
+	  28: ADDL       	$0xC, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFEC98E4:  90BF0010  stw r5,16(r31)
+	  31: GETL       	R5, t24
+	  32: GETL       	R31, t26
+	  33: ADDL       	$0x10, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFEC98E8:  913F0004  stw r9,4(r31)
+	  36: GETL       	R9, t28
+	  37: GETL       	R31, t30
+	  38: ADDL       	$0x4, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0xFEC98EC:  917F0000  stw r11,0(r31)
+	  41: GETL       	R11, t32
+	  42: GETL       	R31, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0xFEC98F0:  40850144  bc 4,5,0xFEC9A34
+	  45: Jc05o       	$0xFEC9A34
+
+
+
+. 3377 FEC98C8 44
+. 81 9F 00 60 61 6B 08 00 81 3F 00 08 7C A4 2B 78 2C 8C 00 00 90 BF 00 14 91 3F 00 0C 90 BF 00 10 91 3F 00 04 91 7F 00 00 40 85 01 44
+==== BB 3378 (0xFEC9A34) approx BBs exec'd 0 ====
+
+	0xFEC9A34:  71600202  andi. r0,r11,0x202
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x202, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEC9A38:  41A2FEBC  bc 13,2,0xFEC98F4
+	   6: Js02o       	$0xFEC98F4
+
+
+
+. 3378 FEC9A34 8
+. 71 60 02 02 41 A2 FE BC
+==== BB 3379 (0xFEC98F4) approx BBs exec'd 0 ====
+
+	0xFEC98F4:  915F0018  stw r10,24(r31)
+	   0: GETL       	R10, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEC98F8:  2F1DFFFF  cmpi cr6,r29,-1
+	   5: GETL       	R29, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFEC98FC:  419A0098  bc 12,26,0xFEC9994
+	  10: Js26o       	$0xFEC9994
+
+
+
+. 3379 FEC98F4 12
+. 91 5F 00 18 2F 1D FF FF 41 9A 00 98
+==== BB 3380 (0xFEC9994) approx BBs exec'd 0 ====
+
+	0xFEC9994:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC9998:  7CA42850  subf r5,r4,r5
+	   3: GETL       	R4, t2
+	   4: GETL       	R5, t4
+	   5: SUBL       	t2, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFEC999C:  4BFFF38D  bl 0xFEC8D28
+	   8: MOVL       	$0xFEC99A0, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0xFEC8D28  ($4)
+
+
+
+. 3380 FEC9994 12
+. 7F E3 FB 78 7C A4 28 50 4B FF F3 8D
+==== BB 3381 _IO_do_write@@GLIBC_2.1(0xFEC8D28) approx BBs exec'd 0 ====
+
+	0xFEC8D28:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEC8D2C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEC8D30:  9361000C  stw r27,12(r1)
+	   9: GETL       	R27, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEC8D34:  7CBB2B79  or. r27,r5,r5
+	  14: GETL       	R5, t10
+	  15: PUTL       	t10, R27
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFEC8D38:  93810010  stw r28,16(r1)
+	  19: GETL       	R28, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x10, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFEC8D3C:  7C9C2378  or r28,r4,r4
+	  24: GETL       	R4, t18
+	  25: PUTL       	t18, R28
+	  26: INCEIPL       	$4
+
+	0xFEC8D40:  93E1001C  stw r31,28(r1)
+	  27: GETL       	R31, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFEC8D44:  7C7F1B78  or r31,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R31
+	  34: INCEIPL       	$4
+
+	0xFEC8D48:  93A10014  stw r29,20(r1)
+	  35: GETL       	R29, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x14, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFEC8D4C:  93C10018  stw r30,24(r1)
+	  40: GETL       	R30, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x18, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFEC8D50:  90010024  stw r0,36(r1)
+	  45: GETL       	R0, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x24, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFEC8D54:  418200A4  bc 12,2,0xFEC8DF8
+	  50: Js02o       	$0xFEC8DF8
+
+
+
+. 3381 FEC8D28 48
+. 94 21 FF E0 7C 08 02 A6 93 61 00 0C 7C BB 2B 79 93 81 00 10 7C 9C 23 78 93 E1 00 1C 7C 7F 1B 78 93 A1 00 14 93 C1 00 18 90 01 00 24 41 82 00 A4
+==== BB 3382 (0xFEC8DF8) approx BBs exec'd 0 ====
+
+	0xFEC8DF8:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC8DFC:  83610024  lwz r27,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0xFEC8E00:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0xFEC8E04:  7F6803A6  mtlr r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFEC8E08:  83A10014  lwz r29,20(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x14, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0xFEC8E0C:  8361000C  lwz r27,12(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0xFEC8E10:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0xFEC8E14:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0xFEC8E18:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0xFEC8E1C:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+
+. 3382 FEC8DF8 40
+. 38 60 00 00 83 61 00 24 83 81 00 10 7F 68 03 A6 83 A1 00 14 83 61 00 0C 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3383 (0xFEC99A0) approx BBs exec'd 0 ====
+
+	0xFEC99A0:  83A10024  lwz r29,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEC99A4:  83810010  lwz r28,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFEC99A8:  7FA803A6  mtlr r29
+	  10: GETL       	R29, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFEC99AC:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xFEC99B0:  83A10014  lwz r29,20(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0xFEC99B4:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0xFEC99B8:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0xFEC99BC:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+
+. 3383 FEC99A0 32
+. 83 A1 00 24 83 81 00 10 7F A8 03 A6 83 C1 00 18 83 A1 00 14 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3384 (0xFECA7A0) approx BBs exec'd 0 ====
+
+	0xFECA7A0:  2C83FFFF  cmpi cr1,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFECA7A4:  7C79D050  subf r3,r25,r26
+	   5: GETL       	R25, t6
+	   6: GETL       	R26, t8
+	   7: SUBL       	t6, t8
+	   8: PUTL       	t8, R3
+	   9: INCEIPL       	$4
+
+	0xFECA7A8:  41A6FF68  bc 13,6,0xFECA710
+	  10: Js06o       	$0xFECA710
+
+
+
+. 3384 FECA7A0 12
+. 2C 83 FF FF 7C 79 D0 50 41 A6 FF 68
+==== BB 3385 (0xFECA7AC) approx BBs exec'd 0 ====
+
+	0xFECA7AC:  83FD0020  lwz r31,32(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFECA7B0:  7F3CCB78  or r28,r25,r25
+	   5: GETL       	R25, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFECA7B4:  801D001C  lwz r0,28(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x1C, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0xFECA7B8:  7D20F850  subf r9,r0,r31
+	  13: GETL       	R0, t10
+	  14: GETL       	R31, t12
+	  15: SUBL       	t10, t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0xFECA7BC:  2B09007F  cmpli cr6,r9,127
+	  18: GETL       	R9, t14
+	  19: MOVL       	$0x7F, t18
+	  20: CMPUL       	t14, t18, t16  (-rSo)
+	  21: ICRFL       	t16, $0x6, CR
+	  22: INCEIPL       	$4
+
+	0xFECA7C0:  4099000C  bc 4,25,0xFECA7CC
+	  23: Jc25o       	$0xFECA7CC
+
+
+
+. 3385 FECA7AC 24
+. 83 FD 00 20 7F 3C CB 78 80 1D 00 1C 7D 20 F8 50 2B 09 00 7F 40 99 00 0C
+==== BB 3386 (0xFECA7C4) approx BBs exec'd 0 ====
+
+	0xFECA7C4:  7C994B96  divwu r4, r25, r9
+	   0: GETL       	R25, t2
+	   1: GETL       	R9, t0
+	   2: UDIVL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFECA7C8:  7F8449D6  mullw r28,r4,r9
+	   5: GETL       	R4, t4
+	   6: GETL       	R9, t6
+	   7: MULL       	t4, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFECA7CC:  2C1C0000  cmpi cr0,r28,0
+	  10: GETL       	R28, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0xFECA7D0:  418200A8  bc 12,2,0xFECA878
+	  14: Js02o       	$0xFECA878
+
+
+
+. 3386 FECA7C4 16
+. 7C 99 4B 96 7F 84 49 D6 2C 1C 00 00 41 82 00 A8
+==== BB 3387 (0xFECA878) approx BBs exec'd 0 ====
+
+	0xFECA878:  2F190000  cmpi cr6,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFECA87C:  419AFE90  bc 12,26,0xFECA70C
+	   4: Js26o       	$0xFECA70C
+
+
+
+. 3387 FECA878 8
+. 2F 19 00 00 41 9A FE 90
+==== BB 3388 (0xFECA880) approx BBs exec'd 0 ====
+
+	0xFECA880:  7F25CB78  or r5,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFECA884:  7C9BE214  add r4,r27,r28
+	   3: GETL       	R27, t2
+	   4: GETL       	R28, t4
+	   5: ADDL       	t2, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFECA888:  7FA3EB78  or r3,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFECA88C:  480014C9  bl 0xFECBD54
+	  11: MOVL       	$0xFECA890, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0xFECBD54  ($4)
+
+
+
+. 3388 FECA880 16
+. 7F 25 CB 78 7C 9B E2 14 7F A3 EB 78 48 00 14 C9
+==== BB 3389 _IO_default_xsputn_internal(0xFECBD54) approx BBs exec'd 0 ====
+
+	0xFECBD54:  2F850000  cmpi cr7,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFECBD58:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFECBD5C:  9421FFE0  stwu r1,-32(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFE0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECBD60:  93410008  stw r26,8(r1)
+	  13: GETL       	R26, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x8, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECBD64:  7CBA2B78  or r26,r5,r5
+	  18: GETL       	R5, t14
+	  19: PUTL       	t14, R26
+	  20: INCEIPL       	$4
+
+	0xFECBD68:  9361000C  stw r27,12(r1)
+	  21: GETL       	R27, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECBD6C:  7CBB2B78  or r27,r5,r5
+	  26: GETL       	R5, t20
+	  27: PUTL       	t20, R27
+	  28: INCEIPL       	$4
+
+	0xFECBD70:  93810010  stw r28,16(r1)
+	  29: GETL       	R28, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x10, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFECBD74:  7C7C1B78  or r28,r3,r3
+	  34: GETL       	R3, t26
+	  35: PUTL       	t26, R28
+	  36: INCEIPL       	$4
+
+	0xFECBD78:  93E1001C  stw r31,28(r1)
+	  37: GETL       	R31, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFECBD7C:  38600000  li r3,0
+	  42: MOVL       	$0x0, t32
+	  43: PUTL       	t32, R3
+	  44: INCEIPL       	$4
+
+	0xFECBD80:  93A10014  stw r29,20(r1)
+	  45: GETL       	R29, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x14, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFECBD84:  7C9F2378  or r31,r4,r4
+	  50: GETL       	R4, t38
+	  51: PUTL       	t38, R31
+	  52: INCEIPL       	$4
+
+	0xFECBD88:  93C10018  stw r30,24(r1)
+	  53: GETL       	R30, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x18, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0xFECBD8C:  90010024  stw r0,36(r1)
+	  58: GETL       	R0, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x24, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0xFECBD90:  419E00A0  bc 12,30,0xFECBE30
+	  63: Js30o       	$0xFECBE30
+
+
+
+. 3389 FECBD54 64
+. 2F 85 00 00 7C 08 02 A6 94 21 FF E0 93 41 00 08 7C BA 2B 78 93 61 00 0C 7C BB 2B 78 93 81 00 10 7C 7C 1B 78 93 E1 00 1C 38 60 00 00 93 A1 00 14 7C 9F 23 78 93 C1 00 18 90 01 00 24 41 9E 00 A0
+==== BB 3390 (0xFECBD94) approx BBs exec'd 0 ====
+
+	0xFECBD94:  807C0014  lwz r3,20(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECBD98:  80BC0018  lwz r5,24(r28)
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFECBD9C:  7C032840  cmpl cr0,r3,r5
+	  10: GETL       	R3, t8
+	  11: GETL       	R5, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFECBDA0:  4080004C  bc 4,0,0xFECBDEC
+	  15: Jc00o       	$0xFECBDEC
+
+
+
+. 3390 FECBD94 16
+. 80 7C 00 14 80 BC 00 18 7C 03 28 40 40 80 00 4C
+==== BB 3391 (0xFECBDA4) approx BBs exec'd 0 ====
+
+	0xFECBDA4:  7FA32850  subf r29,r3,r5
+	   0: GETL       	R3, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFECBDA8:  7C9DD840  cmpl cr1,r29,r27
+	   5: GETL       	R29, t4
+	   6: GETL       	R27, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0xFECBDAC:  40850008  bc 4,5,0xFECBDB4
+	  10: Jc05o       	$0xFECBDB4
+
+
+
+. 3391 FECBDA4 12
+. 7F A3 28 50 7C 9D D8 40 40 85 00 08
+==== BB 3392 (0xFECBDB0) approx BBs exec'd 0 ====
+
+	0xFECBDB0:  7F7DDB78  or r29,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFECBDB4:  2B1D0014  cmpli cr6,r29,20
+	   3: GETL       	R29, t2
+	   4: MOVL       	$0x14, t6
+	   5: CMPUL       	t2, t6, t4  (-rSo)
+	   6: ICRFL       	t4, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0xFECBDB8:  419900A0  bc 12,25,0xFECBE58
+	   8: Js25o       	$0xFECBE58
+
+
+
+. 3392 FECBDB0 12
+. 7F 7D DB 78 2B 1D 00 14 41 99 00 A0
+==== BB 3393 (0xFECBE58) approx BBs exec'd 0 ====
+
+	0xFECBE58:  7FE4FB78  or r4,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFECBE5C:  7FA5EB78  or r5,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFECBE60:  4800C1A1  bl 0xFED8000
+	   6: MOVL       	$0xFECBE64, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFED8000  ($4)
+
+
+
+. 3393 FECBE58 12
+. 7F E4 FB 78 7F A5 EB 78 48 00 C1 A1
+==== BB 3394 (0xFED88F4) approx BBs exec'd 0 ====
+
+	0xFED88F4:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFED88F8:  3884FFFC  addi r4,r4,-4
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFED88FC:  80040008  lwz r0,8(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0xFED8900:  3863FFF8  addi r3,r3,-8
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFF8, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0xFED8904:  38A50001  addi r5,r5,1
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x1, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0xFED8908:  91230008  stw r9,8(r3)
+	  21: GETL       	R9, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x8, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0xFED890C:  4BFFFF10  b 0xFED881C
+	  26: JMPo       	$0xFED881C  ($4)
+
+
+
+. 3394 FED88F4 28
+. 81 24 00 00 38 84 FF FC 80 04 00 08 38 63 FF F8 38 A5 00 01 91 23 00 08 4B FF FF 10
+==== BB 3395 (0xFED881C) approx BBs exec'd 0 ====
+
+	0xFED881C:  8124000C  lwz r9,12(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFED8820:  9003000C  stw r0,12(r3)
+	   5: GETL       	R0, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0xC, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFED8824:  80040010  lwz r0,16(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x10, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFED8828:  91230010  stw r9,16(r3)
+	  15: GETL       	R9, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x10, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFED882C:  81240014  lwz r9,20(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x14, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R9
+	  24: INCEIPL       	$4
+
+	0xFED8830:  90030014  stw r0,20(r3)
+	  25: GETL       	R0, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x14, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0xFED8834:  80040018  lwz r0,24(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x18, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R0
+	  34: INCEIPL       	$4
+
+	0xFED8838:  91230018  stw r9,24(r3)
+	  35: GETL       	R9, t28
+	  36: GETL       	R3, t30
+	  37: ADDL       	$0x18, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0xFED883C:  34A5FFF8  addic. r5,r5,-8
+	  40: GETL       	R5, t32
+	  41: ADCL       	$0xFFFFFFF8, t32  (-wCa)
+	  42: PUTL       	t32, R5
+	  43: CMP0L       	t32, t34  (-rSo)
+	  44: ICRFL       	t34, $0x0, CR
+	  45: INCEIPL       	$4
+
+	0xFED8840:  8124001C  lwz r9,28(r4)
+	  46: GETL       	R4, t36
+	  47: ADDL       	$0x1C, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R9
+	  50: INCEIPL       	$4
+
+	0xFED8844:  9003001C  stw r0,28(r3)
+	  51: GETL       	R0, t40
+	  52: GETL       	R3, t42
+	  53: ADDL       	$0x1C, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0xFED8848:  38840020  addi r4,r4,32
+	  56: GETL       	R4, t44
+	  57: ADDL       	$0x20, t44
+	  58: PUTL       	t44, R4
+	  59: INCEIPL       	$4
+
+	0xFED884C:  38630020  addi r3,r3,32
+	  60: GETL       	R3, t46
+	  61: ADDL       	$0x20, t46
+	  62: PUTL       	t46, R3
+	  63: INCEIPL       	$4
+
+	0xFED8850:  40A2FFB4  bc 5,2,0xFED8804
+	  64: Jc02o       	$0xFED8804
+
+
+
+. 3395 FED881C 56
+. 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+==== BB 3396 (0xFECBE64) approx BBs exec'd 0 ====
+
+	0xFECBE64:  7FFFEA14  add r31,r31,r29
+	   0: GETL       	R31, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFECBE68:  907C0014  stw r3,20(r28)
+	   5: GETL       	R3, t4
+	   6: GETL       	R28, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECBE6C:  4BFFFF7C  b 0xFECBDE8
+	  10: JMPo       	$0xFECBDE8  ($4)
+
+
+
+. 3396 FECBE64 12
+. 7F FF EA 14 90 7C 00 14 4B FF FF 7C
+==== BB 3397 (0xFECBDE8) approx BBs exec'd 0 ====
+
+	0xFECBDE8:  7F7DD850  subf r27,r29,r27
+	   0: GETL       	R29, t0
+	   1: GETL       	R27, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFECBDEC:  2C9B0000  cmpi cr1,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFECBDF0:  4186003C  bc 12,6,0xFECBE2C
+	   9: Js06o       	$0xFECBE2C
+
+
+
+. 3397 FECBDE8 12
+. 7F 7D D8 50 2C 9B 00 00 41 86 00 3C
+==== BB 3398 (0xFECBE2C) approx BBs exec'd 0 ====
+
+	0xFECBE2C:  7C7BD050  subf r3,r27,r26
+	   0: GETL       	R27, t0
+	   1: GETL       	R26, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECBE30:  80E10024  lwz r7,36(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0xFECBE34:  83410008  lwz r26,8(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0xFECBE38:  8361000C  lwz r27,12(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0xC, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R27
+	  19: INCEIPL       	$4
+
+	0xFECBE3C:  7CE803A6  mtlr r7
+	  20: GETL       	R7, t16
+	  21: PUTL       	t16, LR
+	  22: INCEIPL       	$4
+
+	0xFECBE40:  83810010  lwz r28,16(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x10, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0xFECBE44:  83A10014  lwz r29,20(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x14, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R29
+	  32: INCEIPL       	$4
+
+	0xFECBE48:  83C10018  lwz r30,24(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x18, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R30
+	  37: INCEIPL       	$4
+
+	0xFECBE4C:  83E1001C  lwz r31,28(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0xFECBE50:  38210020  addi r1,r1,32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x20, t34
+	  45: PUTL       	t34, R1
+	  46: INCEIPL       	$4
+
+	0xFECBE54:  4E800020  blr
+	  47: GETL       	LR, t36
+	  48: JMPo-r       	t36  ($4)
+
+
+
+. 3398 FECBE2C 44
+. 7C 7B D0 50 80 E1 00 24 83 41 00 08 83 61 00 0C 7C E8 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3399 (0xFECA890) approx BBs exec'd 0 ====
+
+	0xFECA890:  7F23C850  subf r25,r3,r25
+	   0: GETL       	R3, t0
+	   1: GETL       	R25, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFECA894:  7C79D050  subf r3,r25,r26
+	   5: GETL       	R25, t4
+	   6: GETL       	R26, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFECA898:  4BFFFE78  b 0xFECA710
+	  10: JMPo       	$0xFECA710  ($4)
+
+
+
+. 3399 FECA890 12
+. 7F 23 C8 50 7C 79 D0 50 4B FF FE 78
+==== BB 3400 (0xFECA710) approx BBs exec'd 0 ====
+
+	0xFECA710:  83210034  lwz r25,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0xFECA714:  83010010  lwz r24,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0xFECA718:  7F2803A6  mtlr r25
+	  10: GETL       	R25, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0xFECA71C:  83410018  lwz r26,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0xFECA720:  83210014  lwz r25,20(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R25
+	  22: INCEIPL       	$4
+
+	0xFECA724:  8361001C  lwz r27,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R27
+	  27: INCEIPL       	$4
+
+	0xFECA728:  83810020  lwz r28,32(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R28
+	  32: INCEIPL       	$4
+
+	0xFECA72C:  83A10024  lwz r29,36(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x24, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R29
+	  37: INCEIPL       	$4
+
+	0xFECA730:  83C10028  lwz r30,40(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x28, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R30
+	  42: INCEIPL       	$4
+
+	0xFECA734:  83E1002C  lwz r31,44(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x2C, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R31
+	  47: INCEIPL       	$4
+
+	0xFECA738:  38210030  addi r1,r1,48
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x30, t38
+	  50: PUTL       	t38, R1
+	  51: INCEIPL       	$4
+
+	0xFECA73C:  4E800020  blr
+	  52: GETL       	LR, t40
+	  53: JMPo-r       	t40  ($4)
+
+
+
+. 3400 FECA710 48
+. 83 21 00 34 83 01 00 10 7F 28 03 A6 83 41 00 18 83 21 00 14 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 3401 (0xFEBE560) approx BBs exec'd 0 ====
+
+	0xFEBE560:  7F03D800  cmp cr6,r3,r27
+	   0: GETL       	R3, t0
+	   1: GETL       	R27, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFEBE564:  409AFF54  bc 4,26,0xFEBE4B8
+	   5: Jc26o       	$0xFEBE4B8
+
+
+
+. 3401 FEBE560 8
+. 7F 03 D8 00 40 9A FF 54
+==== BB 3402 (0xFEBE568) approx BBs exec'd 0 ====
+
+	0xFEBE568:  807C0000  lwz r3,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEBE56C:  81430014  lwz r10,20(r3)
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x14, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0xFEBE570:  83830018  lwz r28,24(r3)
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0x18, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R28
+	  13: INCEIPL       	$4
+
+	0xFEBE574:  7C0AE040  cmpl cr0,r10,r28
+	  14: GETL       	R10, t12
+	  15: GETL       	R28, t14
+	  16: CMPUL       	t12, t14, t16  (-rSo)
+	  17: ICRFL       	t16, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFEBE578:  408000B4  bc 4,0,0xFEBE62C
+	  19: Jc00o       	$0xFEBE62C
+
+
+
+. 3402 FEBE568 20
+. 80 7C 00 00 81 43 00 14 83 83 00 18 7C 0A E0 40 40 80 00 B4
+==== BB 3403 (0xFEBE57C) approx BBs exec'd 0 ====
+
+	0xFEBE57C:  3B2A0001  addi r25,r10,1
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0xFEBE580:  3880000A  li r4,10
+	   4: MOVL       	$0xA, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0xFEBE584:  988A0000  stb r4,0(r10)
+	   7: GETL       	R4, t4
+	   8: GETL       	R10, t6
+	   9: STB       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFEBE588:  93230014  stw r25,20(r3)
+	  11: GETL       	R25, t8
+	  12: GETL       	R3, t10
+	  13: ADDL       	$0x14, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFEBE58C:  3B3B0001  addi r25,r27,1
+	  16: GETL       	R27, t12
+	  17: ADDL       	$0x1, t12
+	  18: PUTL       	t12, R25
+	  19: INCEIPL       	$4
+
+	0xFEBE590:  4BFFFF28  b 0xFEBE4B8
+	  20: JMPo       	$0xFEBE4B8  ($4)
+
+
+
+. 3403 FEBE57C 24
+. 3B 2A 00 01 38 80 00 0A 98 8A 00 00 93 23 00 14 3B 3B 00 01 4B FF FF 28
+==== BB 3404 (0xFEBE4B8) approx BBs exec'd 0 ====
+
+	0xFEBE4B8:  807F0000  lwz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEBE4BC:  70698000  andi. r9,r3,0x8000
+	   4: GETL       	R3, t4
+	   5: ANDL       	$0x8000, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEBE4C0:  40820038  bc 4,2,0xFEBE4F8
+	  10: Jc02o       	$0xFEBE4F8
+
+
+
+. 3404 FEBE4B8 12
+. 80 7F 00 00 70 69 80 00 40 82 00 38
+==== BB 3405 (0xFEBE4C4) approx BBs exec'd 0 ====
+
+	0xFEBE4C4:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEBE4C8:  81430004  lwz r10,4(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0xFEBE4CC:  392AFFFF  addi r9,r10,-1
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFEBE4D0:  2F890000  cmpi cr7,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0xFEBE4D4:  91230004  stw r9,4(r3)
+	  18: GETL       	R9, t14
+	  19: GETL       	R3, t16
+	  20: ADDL       	$0x4, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFEBE4D8:  409E0020  bc 4,30,0xFEBE4F8
+	  23: Jc30o       	$0xFEBE4F8
+
+
+
+. 3405 FEBE4C4 24
+. 80 7F 00 48 81 43 00 04 39 2A FF FF 2F 89 00 00 91 23 00 04 40 9E 00 20
+==== BB 3406 (0xFEBE4DC) approx BBs exec'd 0 ====
+
+	0xFEBE4DC:  91230008  stw r9,8(r3)
+	   0: GETL       	R9, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEBE4E0:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0xFEBE4E4:  7D801828  lwarx r12,r0,r3
+	   6: GETL       	R3, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0xFEBE4E8:  7D20192D  stwcx. r9,r0,r3
+	  11: GETL       	R3, t8
+	  12: GETL       	R9, t10
+	  13: LOCKo       	
+	  14: STL       	t10, (t8)  (-rSo)
+	  15: ICRFL       	cr, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFEBE4EC:  40A2FFF8  bc 5,2,0xFEBE4E4
+	  17: Jc02o       	$0xFEBE4E4
+
+
+
+. 3406 FEBE4DC 20
+. 91 23 00 08 7C 00 04 AC 7D 80 18 28 7D 20 19 2D 40 A2 FF F8
+==== BB 3407 (0xFEBE4F0) approx BBs exec'd 0 ====
+
+	0xFEBE4F0:  2C8C0001  cmpi cr1,r12,1
+	   0: GETL       	R12, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEBE4F4:  4185011C  bc 12,5,0xFEBE610
+	   5: Js05o       	$0xFEBE610
+
+
+
+. 3407 FEBE4F0 8
+. 2C 8C 00 01 41 85 01 1C
+==== BB 3408 (0xFEBE4F8) approx BBs exec'd 0 ====
+
+	0xFEBE4F8:  83A10034  lwz r29,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFEBE4FC:  7F23CB78  or r3,r25,r25
+	   5: GETL       	R25, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEBE500:  83410018  lwz r26,24(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0xFEBE504:  7FA803A6  mtlr r29
+	  13: GETL       	R29, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0xFEBE508:  83210014  lwz r25,20(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x14, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R25
+	  20: INCEIPL       	$4
+
+	0xFEBE50C:  8361001C  lwz r27,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R27
+	  25: INCEIPL       	$4
+
+	0xFEBE510:  83810020  lwz r28,32(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R28
+	  30: INCEIPL       	$4
+
+	0xFEBE514:  83A10024  lwz r29,36(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0xFEBE518:  83C10028  lwz r30,40(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x28, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R30
+	  40: INCEIPL       	$4
+
+	0xFEBE51C:  83E1002C  lwz r31,44(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x2C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R31
+	  45: INCEIPL       	$4
+
+	0xFEBE520:  38210030  addi r1,r1,48
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x30, t36
+	  48: PUTL       	t36, R1
+	  49: INCEIPL       	$4
+
+	0xFEBE524:  4E800020  blr
+	  50: GETL       	LR, t38
+	  51: JMPo-r       	t38  ($4)
+
+
+
+. 3408 FEBE4F8 48
+. 83 A1 00 34 7F 23 CB 78 83 41 00 18 7F A8 03 A6 83 21 00 14 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+==== BB 3409 (0x10001448) approx BBs exec'd 0 ====
+
+	0x10001448:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x1000144C:  48005B5D  bl 0x10006FA8
+	   3: MOVL       	$0x10001450, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x10006FA8  ($4)
+
+
+
+. 3409 10001448 8
+. 7F A3 EB 78 48 00 5B 5D
+==== BB 3410 (0x10006FA8) approx BBs exec'd 0 ====
+
+	0x10006FA8:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10006FAC:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0x10006FB0:  9421FFF0  stwu r1,-16(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFF0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x10006FB4:  90010014  stw r0,20(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x10006FB8:  419E0008  bc 12,30,0x10006FC0
+	  18: Js30o       	$0x10006FC0
+
+
+
+. 3410 10006FA8 20
+. 2F 83 00 00 7C 08 02 A6 94 21 FF F0 90 01 00 14 41 9E 00 08
+==== BB 3411 (0x10006FBC) approx BBs exec'd 0 ====
+
+	0x10006FBC:  48013D01  bl 0x1001ACBC
+	   0: MOVL       	$0x10006FC0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x1001ACBC  ($4)
+
+
+
+. 3411 10006FBC 4
+. 48 01 3D 01
+==== BB 3412 (0x1001ACBC) approx BBs exec'd 0 ====
+
+	0x1001ACBC:  396000C4  li r11,196
+	   0: MOVL       	$0xC4, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ACC0:  4BFFFE4C  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3412 1001ACBC 8
+. 39 60 00 C4 4B FF FE 4C
+==== BB 3413 (0x10006FC0) approx BBs exec'd 0 ====
+
+	0x10006FC0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10006FC4:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x10006FC8:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10006FCC:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3413 10006FC0 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+==== BB 3414 (0x10001450) approx BBs exec'd 0 ====
+
+	0x10001450:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x10001454:  80010064  lwz r0,100(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x64, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x10001458:  83010040  lwz r24,64(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x40, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R24
+	  12: INCEIPL       	$4
+
+	0x1000145C:  83210044  lwz r25,68(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x44, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R25
+	  17: INCEIPL       	$4
+
+	0x10001460:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x10001464:  83410048  lwz r26,72(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x48, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R26
+	  25: INCEIPL       	$4
+
+	0x10001468:  8361004C  lwz r27,76(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x4C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0x1000146C:  83810050  lwz r28,80(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x50, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R28
+	  35: INCEIPL       	$4
+
+	0x10001470:  83A10054  lwz r29,84(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x54, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R29
+	  40: INCEIPL       	$4
+
+	0x10001474:  83C10058  lwz r30,88(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x58, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R30
+	  45: INCEIPL       	$4
+
+	0x10001478:  83E1005C  lwz r31,92(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x5C, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R31
+	  50: INCEIPL       	$4
+
+	0x1000147C:  38210060  addi r1,r1,96
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x60, t40
+	  53: PUTL       	t40, R1
+	  54: INCEIPL       	$4
+
+	0x10001480:  4E800020  blr
+	  55: GETL       	LR, t42
+	  56: JMPo-r       	t42  ($4)
+
+
+
+. 3414 10001450 52
+. 38 60 00 00 80 01 00 64 83 01 00 40 83 21 00 44 7C 08 03 A6 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+==== BB 3415 (0x10001C04) approx BBs exec'd 0 ====
+
+	0x10001C04:  7F9C1B78  or r28,r28,r3
+	   0: GETL       	R28, t0
+	   1: GETL       	R3, t2
+	   2: ORL       	t2, t0
+	   3: PUTL       	t0, R28
+	   4: INCEIPL       	$4
+
+	0x10001C08:  301CFFFF  addic r0,r28,-1
+	   5: GETL       	R28, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10001C0C:  7C60E110  subfe r3,r0,r28
+	   9: GETL       	R0, t6
+	  10: GETL       	R28, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x10001C10:  4BFFFCCC  b 0x100018DC
+	  14: JMPo       	$0x100018DC  ($4)
+
+
+
+. 3415 10001C04 16
+. 7F 9C 1B 78 30 1C FF FF 7C 60 E1 10 4B FF FC CC
+==== BB 3416 (0x100018DC) approx BBs exec'd 0 ====
+
+	0x100018DC:  480193C1  bl 0x1001AC9C
+	   0: MOVL       	$0x100018E0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x1001AC9C  ($4)
+
+
+
+. 3416 100018DC 4
+. 48 01 93 C1
+==== BB 3417 (0x1001AC9C) approx BBs exec'd 0 ====
+
+	0x1001AC9C:  396000B4  li r11,180
+	   0: MOVL       	$0xB4, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001ACA0:  4BFFFE6C  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3417 1001AC9C 8
+. 39 60 00 B4 4B FF FE 6C
+==== BB 3418 exit(0xFE93B98) approx BBs exec'd 0 ====
+
+	0xFE93B98:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE93B9C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE93BA0:  481142B1  bl 0xFFA7E50
+	   9: MOVL       	$0xFE93BA4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3418 FE93B98 12
+. 94 21 FF E0 7C 08 02 A6 48 11 42 B1
+==== BB 3419 (0xFE93BA4) approx BBs exec'd 0 ====
+
+	0xFE93BA4:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE93BA8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE93BAC:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE93BB0:  93810010  stw r28,16(r1)
+	  13: GETL       	R28, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE93BB4:  7C7C1B78  or r28,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFE93BB8:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE93BBC:  83FE1AFC  lwz r31,6908(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1AFC, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFE93BC0:  90010024  stw r0,36(r1)
+	  31: GETL       	R0, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x24, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFE93BC4:  801F0000  lwz r0,0(r31)
+	  36: GETL       	R31, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R0
+	  39: INCEIPL       	$4
+
+	0xFE93BC8:  2F800000  cmpi cr7,r0,0
+	  40: GETL       	R0, t32
+	  41: CMP0L       	t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x7, CR
+	  43: INCEIPL       	$4
+
+	0xFE93BCC:  419E00CC  bc 12,30,0xFE93C98
+	  44: Js30o       	$0xFE93C98
+
+
+
+. 3419 FE93BA4 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 81 00 10 7C 7C 1B 78 93 A1 00 14 83 FE 1A FC 90 01 00 24 80 1F 00 00 2F 80 00 00 41 9E 00 CC
+==== BB 3420 (0xFE93BD0) approx BBs exec'd 0 ====
+
+	0xFE93BD0:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFE93BD4:  80690004  lwz r3,4(r9)
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFE93BD8:  2C030000  cmpi cr0,r3,0
+	   9: GETL       	R3, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFE93BDC:  41820078  bc 12,2,0xFE93C54
+	  13: Js02o       	$0xFE93C54
+
+
+
+. 3420 FE93BD0 16
+. 81 3F 00 00 80 69 00 04 2C 03 00 00 41 82 00 78
+==== BB 3421 (0xFE93BE0) approx BBs exec'd 0 ====
+
+	0xFE93BE0:  7D2A4B78  or r10,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFE93BE4:  48000018  b 0xFE93BFC
+	   3: JMPo       	$0xFE93BFC  ($4)
+
+
+
+. 3421 FE93BE0 8
+. 7D 2A 4B 78 48 00 00 18
+==== BB 3422 (0xFE93BFC) approx BBs exec'd 0 ====
+
+	0xFE93BFC:  80AA0004  lwz r5,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE93C00:  3925FFFF  addi r9,r5,-1
+	   5: GETL       	R5, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xFE93C04:  55242036  rlwinm r4,r9,4,0,27
+	   9: GETL       	R9, t6
+	  10: SHLL       	$0x4, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0xFE93C08:  912A0004  stw r9,4(r10)
+	  13: GETL       	R9, t8
+	  14: GETL       	R10, t10
+	  15: ADDL       	$0x4, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0xFE93C0C:  7D645214  add r11,r4,r10
+	  18: GETL       	R4, t12
+	  19: GETL       	R10, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R11
+	  22: INCEIPL       	$4
+
+	0xFE93C10:  800B0008  lwz r0,8(r11)
+	  23: GETL       	R11, t16
+	  24: ADDL       	$0x8, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R0
+	  27: INCEIPL       	$4
+
+	0xFE93C14:  396B0008  addi r11,r11,8
+	  28: GETL       	R11, t20
+	  29: ADDL       	$0x8, t20
+	  30: PUTL       	t20, R11
+	  31: INCEIPL       	$4
+
+	0xFE93C18:  2F800003  cmpi cr7,r0,3
+	  32: GETL       	R0, t22
+	  33: MOVL       	$0x3, t26
+	  34: CMPL       	t22, t26, t24  (-rSo)
+	  35: ICRFL       	t24, $0x7, CR
+	  36: INCEIPL       	$4
+
+	0xFE93C1C:  419E0054  bc 12,30,0xFE93C70
+	  37: Js30o       	$0xFE93C70
+
+
+
+. 3422 FE93BFC 36
+. 80 AA 00 04 39 25 FF FF 55 24 20 36 91 2A 00 04 7D 64 52 14 80 0B 00 08 39 6B 00 08 2F 80 00 03 41 9E 00 54
+==== BB 3423 (0xFE93C20) approx BBs exec'd 0 ====
+
+	0xFE93C20:  2C800002  cmpi cr1,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFE93C24:  2F000004  cmpi cr6,r0,4
+	   5: GETL       	R0, t6
+	   6: MOVL       	$0x4, t10
+	   7: CMPL       	t6, t10, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFE93C28:  40BDFFC0  bc 5,29,0xFE93BE8
+	  10: Jc29o       	$0xFE93BE8
+
+
+
+. 3423 FE93C20 12
+. 2C 80 00 02 2F 00 00 04 40 BD FF C0
+==== BB 3424 (0xFE93C2C) approx BBs exec'd 0 ====
+
+	0xFE93C2C:  409AFFC0  bc 4,26,0xFE93BEC
+	   0: Jc26o       	$0xFE93BEC
+
+
+
+. 3424 FE93C2C 4
+. 40 9A FF C0
+==== BB 3425 (0xFE93C30) approx BBs exec'd 0 ====
+
+	0xFE93C30:  810B0004  lwz r8,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE93C34:  7F84E378  or r4,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE93C38:  806B0008  lwz r3,8(r11)
+	   8: GETL       	R11, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFE93C3C:  7D0903A6  mtctr r8
+	  13: GETL       	R8, t10
+	  14: PUTL       	t10, CTR
+	  15: INCEIPL       	$4
+
+	0xFE93C40:  4E800421  bctrl
+	  16: MOVL       	$0xFE93C44, t12
+	  17: PUTL       	t12, LR
+	  18: GETL       	CTR, t14
+	  19: JMPo-c       	t14  ($4)
+
+
+
+. 3425 FE93C30 20
+. 81 0B 00 04 7F 84 E3 78 80 6B 00 08 7D 09 03 A6 4E 80 04 21
+==== BB 3426 close_stdout(0x100050CC) approx BBs exec'd 0 ====
+
+	0x100050CC:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x100050D0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x100050D4:  480157D9  bl 0x1001A8AC
+	   9: MOVL       	$0x100050D8, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x1001A8AC  ($4)
+
+
+
+. 3426 100050CC 12
+. 94 21 FF E0 7C 08 02 A6 48 01 57 D9
+==== BB 3427 (0x100050D8) approx BBs exec'd 0 ====
+
+	0x100050D8:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x100050DC:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x100050E0:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x100050E4:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x100050E8:  93E1001C  stw r31,28(r1)
+	  18: GETL       	R31, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x1C, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x100050EC:  83BE01EC  lwz r29,492(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x1EC, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R29
+	  27: INCEIPL       	$4
+
+	0x100050F0:  807D0000  lwz r3,0(r29)
+	  28: GETL       	R29, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R3
+	  31: INCEIPL       	$4
+
+	0x100050F4:  80030000  lwz r0,0(r3)
+	  32: GETL       	R3, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R0
+	  35: INCEIPL       	$4
+
+	0x100050F8:  5400DFFE  rlwinm r0,r0,27,31,31
+	  36: GETL       	R0, t30
+	  37: ROLL       	$0x1B, t30
+	  38: ANDL       	$0x1, t30
+	  39: PUTL       	t30, R0
+	  40: INCEIPL       	$4
+
+	0x100050FC:  33E0FFFF  addic r31,r0,-1
+	  41: GETL       	R0, t32
+	  42: ADCL       	$0xFFFFFFFF, t32  (-wCa)
+	  43: PUTL       	t32, R31
+	  44: INCEIPL       	$4
+
+	0x10005100:  7FFFF910  subfe r31,r31,r31
+	  45: GETL       	R31, t34
+	  46: GETL       	R31, t36
+	  47: SBBL       	t34, t36  (-rCa-wCa)
+	  48: PUTL       	t36, R31
+	  49: INCEIPL       	$4
+
+	0x10005104:  2F9F0000  cmpi cr7,r31,0
+	  50: GETL       	R31, t38
+	  51: CMP0L       	t38, t40  (-rSo)
+	  52: ICRFL       	t40, $0x7, CR
+	  53: INCEIPL       	$4
+
+	0x10005108:  419E0010  bc 12,30,0x10005118
+	  54: Js30o       	$0x10005118
+
+
+
+. 3427 100050D8 52
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 90 01 00 24 93 E1 00 1C 83 BE 01 EC 80 7D 00 00 80 03 00 00 54 00 DF FE 33 E0 FF FF 7F FF F9 10 2F 9F 00 00 41 9E 00 10
+==== BB 3428 (0x1000510C) approx BBs exec'd 0 ====
+
+	0x1000510C:  48015A31  bl 0x1001AB3C
+	   0: MOVL       	$0x10005110, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x1001AB3C  ($4)
+
+
+
+. 3428 1000510C 4
+. 48 01 5A 31
+==== BB 3429 (0x1001AB3C) approx BBs exec'd 0 ====
+
+	0x1001AB3C:  39600004  li r11,4
+	   0: MOVL       	$0x4, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AB40:  4BFFFFCC  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3429 1001AB3C 8
+. 39 60 00 04 4B FF FF CC
+==== BB 3430 __fpending(0xFEC731C) approx BBs exec'd 0 ====
+
+	0xFEC731C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEC7320:  80030060  lwz r0,96(r3)
+	   6: GETL       	R3, t4
+	   7: ADDL       	$0x60, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0xFEC7324:  2F800000  cmpi cr7,r0,0
+	  11: GETL       	R0, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEC7328:  409D0020  bc 4,29,0xFEC7348
+	  15: Jc29o       	$0xFEC7348
+
+
+
+. 3430 FEC731C 16
+. 94 21 FF F0 80 03 00 60 2F 80 00 00 40 9D 00 20
+==== BB 3431 (0xFEC7348) approx BBs exec'd 0 ====
+
+	0xFEC7348:  80A30010  lwz r5,16(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEC734C:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFEC7350:  80C30014  lwz r6,20(r3)
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x14, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0xFEC7354:  7C653050  subf r3,r5,r6
+	  14: GETL       	R5, t10
+	  15: GETL       	R6, t12
+	  16: SUBL       	t10, t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0xFEC7358:  4E800020  blr
+	  19: GETL       	LR, t14
+	  20: JMPo-r       	t14  ($4)
+
+
+
+. 3431 FEC7348 20
+. 80 A3 00 10 38 21 00 10 80 C3 00 14 7C 65 30 50 4E 80 00 20
+==== BB 3432 (0x10005110) approx BBs exec'd 0 ====
+
+	0x10005110:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10005114:  419E0074  bc 12,30,0x10005188
+	   4: Js30o       	$0x10005188
+
+
+
+. 3432 10005110 8
+. 2F 83 00 00 41 9E 00 74
+==== BB 3433 (0x10005118) approx BBs exec'd 0 ====
+
+	0x10005118:  807D0000  lwz r3,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x1000511C:  48015B49  bl 0x1001AC64
+	   4: MOVL       	$0x10005120, t4
+	   5: PUTL       	t4, LR
+	   6: JMPo-c       	$0x1001AC64  ($4)
+
+
+
+. 3433 10005118 8
+. 80 7D 00 00 48 01 5B 49
+==== BB 3434 (0x1001AC64) approx BBs exec'd 0 ====
+
+	0x1001AC64:  39600098  li r11,152
+	   0: MOVL       	$0x98, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x1001AC68:  4BFFFEA4  b 0x1001AB0C
+	   3: JMPo       	$0x1001AB0C  ($4)
+
+
+
+. 3434 1001AC64 8
+. 39 60 00 98 4B FF FE A4
+==== BB 3435 (0xFECAF18) approx BBs exec'd 0 ====
+
+	0xFECAF18:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECAF1C:  3B828BA0  addi r28,r2,-29792
+	   5: GETL       	R2, t4
+	   6: ADDL       	$0xFFFF8BA0, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0xFECAF20:  80830008  lwz r4,8(r3)
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x8, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0xFECAF24:  7F04E000  cmp cr6,r4,r28
+	  14: GETL       	R4, t10
+	  15: GETL       	R28, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFECAF28:  419A0034  bc 12,26,0xFECAF5C
+	  19: Js26o       	$0xFECAF5C
+
+
+
+. 3435 FECAF18 20
+. 80 7F 00 48 3B 82 8B A0 80 83 00 08 7F 04 E0 00 41 9A 00 34
+==== BB 3436 (0xFECAF2C) approx BBs exec'd 0 ====
+
+	0xFECAF2C:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFECAF30:  38A00001  li r5,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFECAF34:  7D201828  lwarx r9,r0,r3
+	   6: GETL       	R3, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0xFECAF38:  7C095800  cmp cr0,r9,r11
+	  11: GETL       	R9, t8
+	  12: GETL       	R11, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0xFECAF3C:  4082000C  bc 4,2,0xFECAF48
+	  16: Jc02o       	$0xFECAF48
+
+
+
+. 3436 FECAF2C 20
+. 39 60 00 00 38 A0 00 01 7D 20 18 28 7C 09 58 00 40 82 00 0C
+==== BB 3437 (0xFECAF40) approx BBs exec'd 0 ====
+
+	0xFECAF40:  7CA0192D  stwcx. r5,r0,r3
+	   0: GETL       	R3, t0
+	   1: GETL       	R5, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECAF44:  40A2FFF0  bc 5,2,0xFECAF34
+	   6: Jc02o       	$0xFECAF34
+
+
+
+. 3437 FECAF40 8
+. 7C A0 19 2D 40 A2 FF F0
+==== BB 3438 (0xFECAF34) approx BBs exec'd 0 ====
+
+	0xFECAF34:  7D201828  lwarx r9,r0,r3
+	   0: GETL       	R3, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECAF38:  7C095800  cmp cr0,r9,r11
+	   5: GETL       	R9, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECAF3C:  4082000C  bc 4,2,0xFECAF48
+	  10: Jc02o       	$0xFECAF48
+
+
+
+. 3438 FECAF34 12
+. 7D 20 18 28 7C 09 58 00 40 82 00 0C
+==== BB 3439 (0xFECAF48) approx BBs exec'd 0 ====
+
+	0xFECAF48:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFECAF4C:  2F890000  cmpi cr7,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECAF50:  409E01BC  bc 4,30,0xFECB10C
+	   5: Jc30o       	$0xFECB10C
+
+
+
+. 3439 FECAF48 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 BC
+==== BB 3440 (0xFECAF54) approx BBs exec'd 0 ====
+
+	0xFECAF54:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECAF58:  93830008  stw r28,8(r3)
+	   5: GETL       	R28, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECAF5C:  80E30004  lwz r7,4(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R7
+	  14: INCEIPL       	$4
+
+	0xFECAF60:  38C70001  addi r6,r7,1
+	  15: GETL       	R7, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R6
+	  18: INCEIPL       	$4
+
+	0xFECAF64:  90C30004  stw r6,4(r3)
+	  19: GETL       	R6, t14
+	  20: GETL       	R3, t16
+	  21: ADDL       	$0x4, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFECAF68:  815E1BEC  lwz r10,7148(r30)
+	  24: GETL       	R30, t18
+	  25: ADDL       	$0x1BEC, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R10
+	  28: INCEIPL       	$4
+
+	0xFECAF6C:  812A0000  lwz r9,0(r10)
+	  29: GETL       	R10, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R9
+	  32: INCEIPL       	$4
+
+	0xFECAF70:  2F890000  cmpi cr7,r9,0
+	  33: GETL       	R9, t26
+	  34: CMP0L       	t26, t28  (-rSo)
+	  35: ICRFL       	t28, $0x7, CR
+	  36: INCEIPL       	$4
+
+	0xFECAF74:  419E0020  bc 12,30,0xFECAF94
+	  37: Js30o       	$0xFECAF94
+
+
+
+. 3440 FECAF54 36
+. 80 7F 00 48 93 83 00 08 80 E3 00 04 38 C7 00 01 90 C3 00 04 81 5E 1B EC 81 2A 00 00 2F 89 00 00 41 9E 00 20
+==== BB 3441 (0xFECAF80) approx BBs exec'd 0 ====
+
+	0xFECAF80:  80690034  lwz r3,52(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECAF84:  39490034  addi r10,r9,52
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x34, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFECAF88:  2F830000  cmpi cr7,r3,0
+	   9: GETL       	R3, t6
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFECAF8C:  7C691B78  or r9,r3,r3
+	  13: GETL       	R3, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0xFECAF90:  409EFFE8  bc 4,30,0xFECAF78
+	  16: Jc30o       	$0xFECAF78
+
+
+
+. 3441 FECAF80 20
+. 80 69 00 34 39 49 00 34 2F 83 00 00 7C 69 1B 78 40 9E FF E8
+==== BB 3442 (0xFECAFA8) approx BBs exec'd 0 ====
+
+	0xFECAFA8:  807F0048  lwz r3,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECAFAC:  83E30004  lwz r31,4(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFECAFB0:  393FFFFF  addi r9,r31,-1
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0xFECAFB4:  2F090000  cmpi cr6,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0xFECAFB8:  91230004  stw r9,4(r3)
+	  18: GETL       	R9, t14
+	  19: GETL       	R3, t16
+	  20: ADDL       	$0x4, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFECAFBC:  419A00CC  bc 12,26,0xFECB088
+	  23: Js26o       	$0xFECB088
+
+
+
+. 3442 FECAFA8 24
+. 80 7F 00 48 83 E3 00 04 39 3F FF FF 2F 09 00 00 91 23 00 04 41 9A 00 CC
+==== BB 3443 (0xFECB088) approx BBs exec'd 0 ====
+
+	0xFECB088:  91230008  stw r9,8(r3)
+	   0: GETL       	R9, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECB08C:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0xFECB090:  7F801828  lwarx r28,r0,r3
+	   6: GETL       	R3, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0xFECB094:  7D20192D  stwcx. r9,r0,r3
+	  11: GETL       	R3, t8
+	  12: GETL       	R9, t10
+	  13: LOCKo       	
+	  14: STL       	t10, (t8)  (-rSo)
+	  15: ICRFL       	cr, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFECB098:  40A2FFF8  bc 5,2,0xFECB090
+	  17: Jc02o       	$0xFECB090
+
+
+
+. 3443 FECB088 20
+. 91 23 00 08 7C 00 04 AC 7F 80 18 28 7D 20 19 2D 40 A2 FF F8
+==== BB 3444 (0xFECB09C) approx BBs exec'd 0 ====
+
+	0xFECB09C:  2F9C0001  cmpi cr7,r28,1
+	   0: GETL       	R28, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFECB0A0:  409DFF20  bc 4,29,0xFECAFC0
+	   5: Jc29o       	$0xFECAFC0
+
+
+
+. 3444 FECB09C 8
+. 2F 9C 00 01 40 9D FF 20
+==== BB 3445 (0xFEBB904) approx BBs exec'd 0 ====
+
+	0xFEBB904:  807D0048  lwz r3,72(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEBB908:  3B828BA0  addi r28,r2,-29792
+	   5: GETL       	R2, t4
+	   6: ADDL       	$0xFFFF8BA0, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0xFEBB90C:  80A30008  lwz r5,8(r3)
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x8, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0xFEBB910:  7C85E000  cmp cr1,r5,r28
+	  14: GETL       	R5, t10
+	  15: GETL       	R28, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x1, CR
+	  18: INCEIPL       	$4
+
+	0xFEBB914:  41860030  bc 12,6,0xFEBB944
+	  19: Js06o       	$0xFEBB944
+
+
+
+. 3445 FEBB904 20
+. 80 7D 00 48 3B 82 8B A0 80 A3 00 08 7C 85 E0 00 41 86 00 30
+==== BB 3446 (0xFEBB918) approx BBs exec'd 0 ====
+
+	0xFEBB918:  38C00001  li r6,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0xFEBB91C:  7D201828  lwarx r9,r0,r3
+	   3: GETL       	R3, t2
+	   4: LOCKo       	
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0xFEBB920:  7C09F800  cmp cr0,r9,r31
+	   8: GETL       	R9, t6
+	   9: GETL       	R31, t8
+	  10: CMPL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFEBB924:  4082000C  bc 4,2,0xFEBB930
+	  13: Jc02o       	$0xFEBB930
+
+
+
+. 3446 FEBB918 16
+. 38 C0 00 01 7D 20 18 28 7C 09 F8 00 40 82 00 0C
+==== BB 3447 (0xFEBB928) approx BBs exec'd 0 ====
+
+	0xFEBB928:  7CC0192D  stwcx. r6,r0,r3
+	   0: GETL       	R3, t0
+	   1: GETL       	R6, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEBB92C:  40A2FFF0  bc 5,2,0xFEBB91C
+	   6: Jc02o       	$0xFEBB91C
+
+
+
+. 3447 FEBB928 8
+. 7C C0 19 2D 40 A2 FF F0
+==== BB 3448 (0xFEBB91C) approx BBs exec'd 0 ====
+
+	0xFEBB91C:  7D201828  lwarx r9,r0,r3
+	   0: GETL       	R3, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEBB920:  7C09F800  cmp cr0,r9,r31
+	   5: GETL       	R9, t4
+	   6: GETL       	R31, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEBB924:  4082000C  bc 4,2,0xFEBB930
+	  10: Jc02o       	$0xFEBB930
+
+
+
+. 3448 FEBB91C 12
+. 7D 20 18 28 7C 09 F8 00 40 82 00 0C
+==== BB 3449 (0xFEBB930) approx BBs exec'd 0 ====
+
+	0xFEBB930:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFEBB934:  2F090000  cmpi cr6,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0xFEBB938:  409A0250  bc 4,26,0xFEBBB88
+	   5: Jc26o       	$0xFEBBB88
+
+
+
+. 3449 FEBB930 12
+. 4C 00 01 2C 2F 09 00 00 40 9A 02 50
+==== BB 3450 (0xFEBB93C) approx BBs exec'd 0 ====
+
+	0xFEBB93C:  807D0048  lwz r3,72(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEBB940:  93830008  stw r28,8(r3)
+	   5: GETL       	R28, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEBB944:  81030004  lwz r8,4(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0xFEBB948:  38E80001  addi r7,r8,1
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0xFEBB94C:  90E30004  stw r7,4(r3)
+	  19: GETL       	R7, t14
+	  20: GETL       	R3, t16
+	  21: ADDL       	$0x4, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0xFEBB950:  801D0000  lwz r0,0(r29)
+	  24: GETL       	R29, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0xFEBB954:  540ADFFE  rlwinm r10,r0,27,31,31
+	  28: GETL       	R0, t22
+	  29: ROLL       	$0x1B, t22
+	  30: ANDL       	$0x1, t22
+	  31: PUTL       	t22, R10
+	  32: INCEIPL       	$4
+
+	0xFEBB958:  70092000  andi. r9,r0,0x2000
+	  33: GETL       	R0, t24
+	  34: ANDL       	$0x2000, t24
+	  35: PUTL       	t24, R9
+	  36: CMP0L       	t24, t26  (-rSo)
+	  37: ICRFL       	t26, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0xFEBB95C:  2F8A0000  cmpi cr7,r10,0
+	  39: GETL       	R10, t28
+	  40: CMP0L       	t28, t30  (-rSo)
+	  41: ICRFL       	t30, $0x7, CR
+	  42: INCEIPL       	$4
+
+	0xFEBB960:  7C600026  mfcr r3
+	  43: GETL       	CR, t32
+	  44: PUTL       	t32, R3
+	  45: INCEIPL       	$4
+
+	0xFEBB964:  5463FFFE  rlwinm r3,r3,31,31,31
+	  46: GETL       	R3, t34
+	  47: ROLL       	$0x1F, t34
+	  48: ANDL       	$0x1, t34
+	  49: PUTL       	t34, R3
+	  50: INCEIPL       	$4
+
+	0xFEBB968:  3B43FFFF  addi r26,r3,-1
+	  51: GETL       	R3, t36
+	  52: ADDL       	$0xFFFFFFFF, t36
+	  53: PUTL       	t36, R26
+	  54: INCEIPL       	$4
+
+	0xFEBB96C:  4082013C  bc 4,2,0xFEBBAA8
+	  55: Jc02o       	$0xFEBBAA8
+
+
+
+. 3450 FEBB93C 52
+. 80 7D 00 48 93 83 00 08 81 03 00 04 38 E8 00 01 90 E3 00 04 80 1D 00 00 54 0A DF FE 70 09 20 00 2F 8A 00 00 7C 60 00 26 54 63 FF FE 3B 43 FF FF 40 82 01 3C
+==== BB 3451 (0xFEC83B0) approx BBs exec'd 0 ====
+
+	0xFEC83B0:  70090800  andi. r9,r0,0x800
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x800, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEC83B4:  41A2FF18  bc 13,2,0xFEC82CC
+	   6: Js02o       	$0xFEC82CC
+
+
+
+. 3451 FEC83B0 8
+. 70 09 08 00 41 A2 FF 18
+==== BB 3452 (0xFEC83B8) approx BBs exec'd 0 ====
+
+	0xFEC83B8:  80A30060  lwz r5,96(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEC83BC:  2C850000  cmpi cr1,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0xFEC83C0:  40850098  bc 4,5,0xFEC8458
+	   9: Jc05o       	$0xFEC8458
+
+
+
+. 3452 FEC83B8 12
+. 80 A3 00 60 2C 85 00 00 40 85 00 98
+==== BB 3453 (0xFEC8458) approx BBs exec'd 0 ====
+
+	0xFEC8458:  80830010  lwz r4,16(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEC845C:  80C30014  lwz r6,20(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0xFEC8460:  7CA43050  subf r5,r4,r6
+	  10: GETL       	R4, t8
+	  11: GETL       	R6, t10
+	  12: SUBL       	t8, t10
+	  13: PUTL       	t10, R5
+	  14: INCEIPL       	$4
+
+	0xFEC8464:  480008C5  bl 0xFEC8D28
+	  15: MOVL       	$0xFEC8468, t12
+	  16: PUTL       	t12, LR
+	  17: JMPo-c       	$0xFEC8D28  ($4)
+
+
+
+. 3453 FEC8458 16
+. 80 83 00 10 80 C3 00 14 7C A4 30 50 48 00 08 C5
+==== BB 3454 (0xFEC8D58) approx BBs exec'd 0 ====
+
+	0xFEC8D58:  80830000  lwz r4,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0xFEC8D5C:  70891000  andi. r9,r4,0x1000
+	   4: GETL       	R4, t4
+	   5: ANDL       	$0x1000, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEC8D60:  418200C0  bc 12,2,0xFEC8E20
+	  10: Js02o       	$0xFEC8E20
+
+
+
+. 3454 FEC8D58 12
+. 80 83 00 00 70 89 10 00 41 82 00 C0
+==== BB 3455 (0xFEC8E20) approx BBs exec'd 0 ====
+
+	0xFEC8E20:  80030008  lwz r0,8(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEC8E24:  80A30010  lwz r5,16(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0xFEC8E28:  7F802800  cmp cr7,r0,r5
+	  10: GETL       	R0, t8
+	  11: GETL       	R5, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEC8E2C:  41BEFF48  bc 13,30,0xFEC8D74
+	  15: Js30o       	$0xFEC8D74
+
+
+
+. 3455 FEC8E20 16
+. 80 03 00 08 80 A3 00 10 7F 80 28 00 41 BE FF 48
+==== BB 3456 (0xFEC8D74) approx BBs exec'd 0 ====
+
+	0xFEC8D74:  891F0046  lbz r8,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFEC8D78:  7F84E378  or r4,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFEC8D7C:  7FE3FB78  or r3,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEC8D80:  7F65DB78  or r5,r27,r27
+	  11: GETL       	R27, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0xFEC8D84:  7D070774  extsb r7,r8
+	  14: GETB       	R8, t10
+	  15: WIDENL_Bs       	_st10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0xFEC8D88:  7D27FA14  add r9,r7,r31
+	  18: GETL       	R7, t12
+	  19: GETL       	R31, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0xFEC8D8C:  81690098  lwz r11,152(r9)
+	  23: GETL       	R9, t16
+	  24: ADDL       	$0x98, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R11
+	  27: INCEIPL       	$4
+
+	0xFEC8D90:  83AB003C  lwz r29,60(r11)
+	  28: GETL       	R11, t20
+	  29: ADDL       	$0x3C, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0xFEC8D94:  7FA903A6  mtctr r29
+	  33: GETL       	R29, t24
+	  34: PUTL       	t24, CTR
+	  35: INCEIPL       	$4
+
+	0xFEC8D98:  4E800421  bctrl
+	  36: MOVL       	$0xFEC8D9C, t26
+	  37: PUTL       	t26, LR
+	  38: GETL       	CTR, t28
+	  39: JMPo-c       	t28  ($4)
+
+
+
+. 3456 FEC8D74 40
+. 89 1F 00 46 7F 84 E3 78 7F E3 FB 78 7F 65 DB 78 7D 07 07 74 7D 27 FA 14 81 69 00 98 83 AB 00 3C 7F A9 03 A6 4E 80 04 21
+==== BB 3457 _IO_file_write@@GLIBC_2.1(0xFECA570) approx BBs exec'd 0 ====
+
+	0xFECA570:  2F850000  cmpi cr7,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFECA574:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFECA578:  9421FFE0  stwu r1,-32(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFE0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECA57C:  9361000C  stw r27,12(r1)
+	  13: GETL       	R27, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xC, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECA580:  7CBB2B78  or r27,r5,r5
+	  18: GETL       	R5, t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0xFECA584:  93810010  stw r28,16(r1)
+	  21: GETL       	R28, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECA588:  7C7C1B78  or r28,r3,r3
+	  26: GETL       	R3, t20
+	  27: PUTL       	t20, R28
+	  28: INCEIPL       	$4
+
+	0xFECA58C:  93A10014  stw r29,20(r1)
+	  29: GETL       	R29, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x14, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFECA590:  7C9D2378  or r29,r4,r4
+	  34: GETL       	R4, t26
+	  35: PUTL       	t26, R29
+	  36: INCEIPL       	$4
+
+	0xFECA594:  93E1001C  stw r31,28(r1)
+	  37: GETL       	R31, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0xFECA598:  7CBF2B78  or r31,r5,r5
+	  42: GETL       	R5, t32
+	  43: PUTL       	t32, R31
+	  44: INCEIPL       	$4
+
+	0xFECA59C:  93C10018  stw r30,24(r1)
+	  45: GETL       	R30, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x18, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0xFECA5A0:  90010024  stw r0,36(r1)
+	  50: GETL       	R0, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x24, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0xFECA5A4:  409D003C  bc 4,29,0xFECA5E0
+	  55: Jc29o       	$0xFECA5E0
+
+
+
+. 3457 FECA570 56
+. 2F 85 00 00 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C BB 2B 78 93 81 00 10 7C 7C 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C BF 2B 78 93 C1 00 18 90 01 00 24 40 9D 00 3C
+==== BB 3458 (0xFECA5A8) approx BBs exec'd 0 ====
+
+	0xFECA5A8:  807C003C  lwz r3,60(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x3C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECA5AC:  7FA4EB78  or r4,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFECA5B0:  7FE5FB78  or r5,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0xFECA5B4:  70690002  andi. r9,r3,0x2
+	  11: GETL       	R3, t8
+	  12: ANDL       	$0x2, t8
+	  13: PUTL       	t8, R9
+	  14: CMP0L       	t8, t10  (-rSo)
+	  15: ICRFL       	t10, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFECA5B8:  4082007C  bc 4,2,0xFECA634
+	  17: Jc02o       	$0xFECA634
+
+
+
+. 3458 FECA5A8 20
+. 80 7C 00 3C 7F A4 EB 78 7F E5 FB 78 70 69 00 02 40 82 00 7C
+==== BB 3459 (0xFECA5BC) approx BBs exec'd 0 ====
+
+	0xFECA5BC:  807C0038  lwz r3,56(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECA5C0:  7FA4EB78  or r4,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFECA5C4:  7FE5FB78  or r5,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0xFECA5C8:  48057A11  bl 0xFF21FD8
+	  11: MOVL       	$0xFECA5CC, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0xFF21FD8  ($4)
+
+
+
+. 3459 FECA5BC 16
+. 80 7C 00 38 7F A4 EB 78 7F E5 FB 78 48 05 7A 11
+==== BB 3460 write(0xFF21FD8) approx BBs exec'd 0 ====
+
+	0xFF21FD8:  81428BA0  lwz r10,-29792(r2)
+	   0: GETL       	R2, t0
+	   1: ADDL       	$0xFFFF8BA0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFF21FDC:  2C0A0000  cmpi cr0,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFF21FE0:  40820014  bc 4,2,0xFF21FF4
+	   9: Jc02o       	$0xFF21FF4
+
+
+
+. 3460 FF21FD8 12
+. 81 42 8B A0 2C 0A 00 00 40 82 00 14
+==== BB 3461 __write_nocancel(0xFF21FE4) approx BBs exec'd 0 ====
+
+	0xFF21FE4:  38000004  li r0,4
+	   0: MOVL       	$0x4, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFF21FE8:  44000002  sc
+	   3: JMPo-sys       	$0xFF21FEC  ($4)
+
+
+
+. 3461 FF21FE4 8
+. 38 00 00 04 44 00 00 02
+Wed Jan 12 11:03:06 GMT 2005
+==== BB 3462 (0xFF21FEC) approx BBs exec'd 0 ====
+
+	0xFF21FEC:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+
+. 3462 FF21FEC 4
+. 4C A3 00 20
+==== BB 3463 (0xFECA5CC) approx BBs exec'd 0 ====
+
+	0xFECA5CC:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFECA5D0:  7FBD1A14  add r29,r29,r3
+	   4: GETL       	R29, t4
+	   5: GETL       	R3, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0xFECA5D4:  41840074  bc 12,4,0xFECA648
+	   9: Js04o       	$0xFECA648
+
+
+
+. 3463 FECA5CC 12
+. 2C 83 00 00 7F BD 1A 14 41 84 00 74
+==== BB 3464 (0xFECA5D8) approx BBs exec'd 0 ====
+
+	0xFECA5D8:  7FE3F851  subf. r31,r3,r31
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R31
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFECA5DC:  4181FFCC  bc 12,1,0xFECA5A8
+	   7: Js01o       	$0xFECA5A8
+
+
+
+. 3464 FECA5D8 8
+. 7F E3 F8 51 41 81 FF CC
+==== BB 3465 (0xFECA5E0) approx BBs exec'd 0 ====
+
+	0xFECA5E0:  80DC0050  lwz r6,80(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFECA5E4:  7C7FD850  subf r3,r31,r27
+	   5: GETL       	R31, t4
+	   6: GETL       	R27, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFECA5E8:  2F060000  cmpi cr6,r6,0
+	  10: GETL       	R6, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFECA5EC:  41980024  bc 12,24,0xFECA610
+	  14: Js24o       	$0xFECA610
+
+
+
+. 3465 FECA5E0 16
+. 80 DC 00 50 7C 7F D8 50 2F 06 00 00 41 98 00 24
+==== BB 3466 (0xFECA610) approx BBs exec'd 0 ====
+
+	0xFECA610:  80E10024  lwz r7,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0xFECA614:  8361000C  lwz r27,12(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0xFECA618:  83810010  lwz r28,16(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0xFECA61C:  7CE803A6  mtlr r7
+	  15: GETL       	R7, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFECA620:  83A10014  lwz r29,20(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0xFECA624:  83C10018  lwz r30,24(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R30
+	  27: INCEIPL       	$4
+
+	0xFECA628:  83E1001C  lwz r31,28(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x1C, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R31
+	  32: INCEIPL       	$4
+
+	0xFECA62C:  38210020  addi r1,r1,32
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x20, t26
+	  35: PUTL       	t26, R1
+	  36: INCEIPL       	$4
+
+	0xFECA630:  4E800020  blr
+	  37: GETL       	LR, t28
+	  38: JMPo-r       	t28  ($4)
+
+
+
+. 3466 FECA610 36
+. 80 E1 00 24 83 61 00 0C 83 81 00 10 7C E8 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3467 (0xFEC8D9C) approx BBs exec'd 0 ====
+
+	0xFEC8D9C:  7C7D1B78  or r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFEC8DA0:  A07F0044  lhz r3,68(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x44, t2
+	   5: LDW       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEC8DA4:  301DFFFF  addic r0,r29,-1
+	   8: GETL       	R29, t6
+	   9: ADCL       	$0xFFFFFFFF, t6  (-wCa)
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0xFEC8DA8:  7C80E910  subfe r4,r0,r29
+	  12: GETL       	R0, t8
+	  13: GETL       	R29, t10
+	  14: SBBL       	t8, t10  (-rCa-wCa)
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0xFEC8DAC:  7D4300D0  neg r10,r3
+	  17: GETL       	R3, t12
+	  18: NEGL       	t12
+	  19: PUTL       	t12, R10
+	  20: INCEIPL       	$4
+
+	0xFEC8DB0:  55400FFE  rlwinm r0,r10,1,31,31
+	  21: GETL       	R10, t14
+	  22: SHRL       	$0x1F, t14
+	  23: PUTL       	t14, R0
+	  24: INCEIPL       	$4
+
+	0xFEC8DB4:  7C0B2039  and. r11,r0,r4
+	  25: GETL       	R0, t16
+	  26: GETL       	R4, t18
+	  27: ANDL       	t16, t18
+	  28: PUTL       	t18, R11
+	  29: CMP0L       	t18, t20  (-rSo)
+	  30: ICRFL       	t20, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0xFEC8DB8:  408200CC  bc 4,2,0xFEC8E84
+	  32: Jc02o       	$0xFEC8E84
+
+
+
+. 3467 FEC8D9C 32
+. 7C 7D 1B 78 A0 7F 00 44 30 1D FF FF 7C 80 E9 10 7D 43 00 D0 55 40 0F FE 7C 0B 20 39 40 82 00 CC
+==== BB 3468 (0xFEC8DBC) approx BBs exec'd 0 ====
+
+	0xFEC8DBC:  839F0060  lwz r28,96(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0xFEC8DC0:  813F001C  lwz r9,28(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFEC8DC4:  2F9C0000  cmpi cr7,r28,0
+	  10: GETL       	R28, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0xFEC8DC8:  913F000C  stw r9,12(r31)
+	  14: GETL       	R9, t12
+	  15: GETL       	R31, t14
+	  16: ADDL       	$0xC, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0xFEC8DCC:  913F0004  stw r9,4(r31)
+	  19: GETL       	R9, t16
+	  20: GETL       	R31, t18
+	  21: ADDL       	$0x4, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0xFEC8DD0:  913F0008  stw r9,8(r31)
+	  24: GETL       	R9, t20
+	  25: GETL       	R31, t22
+	  26: ADDL       	$0x8, t22
+	  27: STL       	t20, (t22)
+	  28: INCEIPL       	$4
+
+	0xFEC8DD4:  913F0014  stw r9,20(r31)
+	  29: GETL       	R9, t24
+	  30: GETL       	R31, t26
+	  31: ADDL       	$0x14, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0xFEC8DD8:  913F0010  stw r9,16(r31)
+	  34: GETL       	R9, t28
+	  35: GETL       	R31, t30
+	  36: ADDL       	$0x10, t30
+	  37: STL       	t28, (t30)
+	  38: INCEIPL       	$4
+
+	0xFEC8DDC:  409D0094  bc 4,29,0xFEC8E70
+	  39: Jc29o       	$0xFEC8E70
+
+
+
+. 3468 FEC8DBC 36
+. 83 9F 00 60 81 3F 00 1C 2F 9C 00 00 91 3F 00 0C 91 3F 00 04 91 3F 00 08 91 3F 00 14 91 3F 00 10 40 9D 00 94
+==== BB 3469 (0xFEC8E70) approx BBs exec'd 0 ====
+
+	0xFEC8E70:  80BF0000  lwz r5,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFEC8E74:  70AB0202  andi. r11,r5,0x202
+	   4: GETL       	R5, t4
+	   5: ANDL       	$0x202, t4
+	   6: PUTL       	t4, R11
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEC8E78:  40A2FF6C  bc 5,2,0xFEC8DE4
+	  10: Jc02o       	$0xFEC8DE4
+
+
+
+. 3469 FEC8E70 12
+. 80 BF 00 00 70 AB 02 02 40 A2 FF 6C
+==== BB 3470 (0xFEC8E7C) approx BBs exec'd 0 ====
+
+	0xFEC8E7C:  813F0020  lwz r9,32(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEC8E80:  4BFFFF64  b 0xFEC8DE4
+	   5: JMPo       	$0xFEC8DE4  ($4)
+
+
+
+. 3470 FEC8E7C 8
+. 81 3F 00 20 4B FF FF 64
+==== BB 3471 (0xFEC8DE4) approx BBs exec'd 0 ====
+
+	0xFEC8DE4:  913F0018  stw r9,24(r31)
+	   0: GETL       	R9, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEC8DE8:  7FA5EB78  or r5,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFEC8DEC:  7C85D800  cmp cr1,r5,r27
+	   8: GETL       	R5, t6
+	   9: GETL       	R27, t8
+	  10: CMPL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0xFEC8DF0:  3860FFFF  li r3,-1
+	  13: MOVL       	$0xFFFFFFFF, t12
+	  14: PUTL       	t12, R3
+	  15: INCEIPL       	$4
+
+	0xFEC8DF4:  40A60008  bc 5,6,0xFEC8DFC
+	  16: Jc06o       	$0xFEC8DFC
+
+
+
+. 3471 FEC8DE4 20
+. 91 3F 00 18 7F A5 EB 78 7C 85 D8 00 38 60 FF FF 40 A6 00 08
+==== BB 3472 (0xFEC8468) approx BBs exec'd 0 ====
+
+	0xFEC8468:  7C7C1B78  or r28,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0xFEC846C:  4BFFFF74  b 0xFEC83E0
+	   3: JMPo       	$0xFEC83E0  ($4)
+
+
+
+. 3472 FEC8468 8
+. 7C 7C 1B 78 4B FF FF 74
+==== BB 3473 (0xFEC83E0) approx BBs exec'd 0 ====
+
+	0xFEC83E0:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEC83E4:  48004A05  bl 0xFECCDE8
+	   3: MOVL       	$0xFEC83E8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFECCDE8  ($4)
+
+
+
+. 3473 FEC83E0 8
+. 7F E3 FB 78 48 00 4A 05
+==== BB 3474 (0xFEC83E8) approx BBs exec'd 0 ====
+
+	0xFEC83E8:  881F0046  lbz r0,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEC83EC:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEC83F0:  7C1D0774  extsb r29,r0
+	   8: GETB       	R0, t6
+	   9: WIDENL_Bs       	_st6
+	  10: PUTL       	t6, R29
+	  11: INCEIPL       	$4
+
+	0xFEC83F4:  7D9DFA14  add r12,r29,r31
+	  12: GETL       	R29, t8
+	  13: GETL       	R31, t10
+	  14: ADDL       	t8, t10
+	  15: PUTL       	t10, R12
+	  16: INCEIPL       	$4
+
+	0xFEC83F8:  814C0098  lwz r10,152(r12)
+	  17: GETL       	R12, t12
+	  18: ADDL       	$0x98, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R10
+	  21: INCEIPL       	$4
+
+	0xFEC83FC:  816A0044  lwz r11,68(r10)
+	  22: GETL       	R10, t16
+	  23: ADDL       	$0x44, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R11
+	  26: INCEIPL       	$4
+
+	0xFEC8400:  7D6903A6  mtctr r11
+	  27: GETL       	R11, t20
+	  28: PUTL       	t20, CTR
+	  29: INCEIPL       	$4
+
+	0xFEC8404:  4E800421  bctrl
+	  30: MOVL       	$0xFEC8408, t22
+	  31: PUTL       	t22, LR
+	  32: GETL       	CTR, t24
+	  33: JMPo-c       	t24  ($4)
+
+
+
+. 3474 FEC83E8 32
+. 88 1F 00 46 7F E3 FB 78 7C 1D 07 74 7D 9D FA 14 81 4C 00 98 81 6A 00 44 7D 69 03 A6 4E 80 04 21
+==== BB 3475 (0xFEC8408) approx BBs exec'd 0 ====
+
+	0xFEC8408:  7C7D1B78  or r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFEC840C:  807F0060  lwz r3,96(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x60, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEC8410:  2F030000  cmpi cr6,r3,0
+	   8: GETL       	R3, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEC8414:  4199FEF4  bc 12,25,0xFEC8308
+	  12: Js25o       	$0xFEC8308
+
+
+
+. 3475 FEC8408 16
+. 7C 7D 1B 78 80 7F 00 60 2F 03 00 00 41 99 FE F4
+==== BB 3476 (0xFEBBAD0) approx BBs exec'd 0 ====
+
+	0xFEBBAD0:  91230008  stw r9,8(r3)
+	   0: GETL       	R9, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEBBAD4:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0xFEBBAD8:  7CC01828  lwarx r6,r0,r3
+	   6: GETL       	R3, t4
+	   7: LOCKo       	
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0xFEBBADC:  7D20192D  stwcx. r9,r0,r3
+	  11: GETL       	R3, t8
+	  12: GETL       	R9, t10
+	  13: LOCKo       	
+	  14: STL       	t10, (t8)  (-rSo)
+	  15: ICRFL       	cr, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0xFEBBAE0:  40A2FFF8  bc 5,2,0xFEBBAD8
+	  17: Jc02o       	$0xFEBBAD8
+
+
+
+. 3476 FEBBAD0 20
+. 91 23 00 08 7C 00 04 AC 7C C0 18 28 7D 20 19 2D 40 A2 FF F8
+==== BB 3477 (0xFEBBAE4) approx BBs exec'd 0 ====
+
+	0xFEBBAE4:  2C860001  cmpi cr1,r6,1
+	   0: GETL       	R6, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEBBAE8:  4085FEB8  bc 4,5,0xFEBB9A0
+	   5: Jc05o       	$0xFEBB9A0
+
+
+
+. 3477 FEBBAE4 8
+. 2C 86 00 01 40 85 FE B8
+==== BB 3478 (0x10005120) approx BBs exec'd 0 ====
+
+	0x10005120:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x10005124:  409E0080  bc 4,30,0x100051A4
+	   4: Jc30o       	$0x100051A4
+
+
+
+. 3478 10005120 8
+. 2F 83 00 00 40 9E 00 80
+==== BB 3479 (0x10005128) approx BBs exec'd 0 ====
+
+	0x10005128:  2F9F0000  cmpi cr7,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x1000512C:  809E0160  lwz r4,352(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x160, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x10005130:  38A00005  li r5,5
+	   9: MOVL       	$0x5, t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x10005134:  38600000  li r3,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x10005138:  419C0050  bc 12,28,0x10005188
+	  15: Js28o       	$0x10005188
+
+
+
+. 3479 10005128 20
+. 2F 9F 00 00 80 9E 01 60 38 A0 00 05 38 60 00 00 41 9C 00 50
+==== BB 3480 (0x10005188) approx BBs exec'd 0 ====
+
+	0x10005188:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x1000518C:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x10005190:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x10005194:  7C0803A6  mtlr r0
+	  15: GETL       	R0, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x10005198:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x1000519C:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0x100051A0:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+
+. 3480 10005188 28
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3481 (0xFE93C44) approx BBs exec'd 0 ====
+
+	0xFE93C44:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE93C48:  818A0004  lwz r12,4(r10)
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0xFE93C4C:  2C8C0000  cmpi cr1,r12,0
+	   9: GETL       	R12, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0xFE93C50:  4086FFAC  bc 4,6,0xFE93BFC
+	  13: Jc06o       	$0xFE93BFC
+
+
+
+. 3481 FE93C44 16
+. 81 5F 00 00 81 8A 00 04 2C 8C 00 00 40 86 FF AC
+==== BB 3482 (0x10007AB4) approx BBs exec'd 0 ====
+
+	0x10007AB4:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10007AB8:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x10007ABC:  429F0005  bcl 20,31,0x10007AC0
+	   9: MOVL       	$0x10007AC0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10007AC0:  93C10018  stw r30,24(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x10007AC4:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0x10007AC8:  93E1001C  stw r31,28(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x10007ACC:  93A10014  stw r29,20(r1)
+	  25: GETL       	R29, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x10007AD0:  90A10024  stw r5,36(r1)
+	  30: GETL       	R5, t22
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0x10007AD4:  809EFFF0  lwz r4,-16(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFFFFF0, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x10007AD8:  7FC4F214  add r30,r4,r30
+	  40: GETL       	R4, t30
+	  41: GETL       	R30, t32
+	  42: ADDL       	t30, t32
+	  43: PUTL       	t32, R30
+	  44: INCEIPL       	$4
+
+	0x10007ADC:  817E800C  lwz r11,-32756(r30)
+	  45: GETL       	R30, t34
+	  46: ADDL       	$0xFFFF800C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R11
+	  49: INCEIPL       	$4
+
+	0x10007AE0:  807E8008  lwz r3,-32760(r30)
+	  50: GETL       	R30, t38
+	  51: ADDL       	$0xFFFF8008, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R3
+	  54: INCEIPL       	$4
+
+	0x10007AE4:  7C0B1850  subf r0,r11,r3
+	  55: GETL       	R11, t42
+	  56: GETL       	R3, t44
+	  57: SUBL       	t42, t44
+	  58: PUTL       	t44, R0
+	  59: INCEIPL       	$4
+
+	0x10007AE8:  7C091670  srawi r9,r0,2
+	  60: GETL       	R0, t46
+	  61: SARL       	$0x2, t46  (-wCa)
+	  62: PUTL       	t46, R9
+	  63: INCEIPL       	$4
+
+	0x10007AEC:  2F890000  cmpi cr7,r9,0
+	  64: GETL       	R9, t48
+	  65: CMP0L       	t48, t50  (-rSo)
+	  66: ICRFL       	t50, $0x7, CR
+	  67: INCEIPL       	$4
+
+	0x10007AF0:  3BE9FFFF  addi r31,r9,-1
+	  68: GETL       	R9, t52
+	  69: ADDL       	$0xFFFFFFFF, t52
+	  70: PUTL       	t52, R31
+	  71: INCEIPL       	$4
+
+	0x10007AF4:  419E0028  bc 12,30,0x10007B1C
+	  72: Js30o       	$0x10007B1C
+
+
+
+. 3482 10007AB4 68
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 A1 00 14 90 A1 00 24 80 9E FF F0 7F C4 F2 14 81 7E 80 0C 80 7E 80 08 7C 0B 18 50 7C 09 16 70 2F 89 00 00 3B E9 FF FF 41 9E 00 28
+==== BB 3483 (0x10007B1C) approx BBs exec'd 0 ====
+
+	0x10007B1C:  480002BD  bl 0x10007DD8
+	   0: MOVL       	$0x10007B20, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x10007DD8  ($4)
+
+
+
+. 3483 10007B1C 4
+. 48 00 02 BD
+==== BB 3484 (0x10007DD8) approx BBs exec'd 0 ====
+
+	0x10007DD8:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10007DDC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10007DE0:  90010024  stw r0,36(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x24, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x10007DE4:  4BFF9275  bl 0x10001058
+	  14: MOVL       	$0x10007DE8, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x10001058  ($4)
+
+
+
+. 3484 10007DD8 16
+. 94 21 FF E0 7C 08 02 A6 90 01 00 24 4B FF 92 75
+==== BB 3485 (0x10001058) approx BBs exec'd 0 ====
+
+	0x10001058:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x1000105C:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x10001060:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x10001064:  3FC01002  lis r30,4098
+	  14: MOVL       	$0x10020000, t10
+	  15: PUTL       	t10, R30
+	  16: INCEIPL       	$4
+
+	0x10001068:  93E1000C  stw r31,12(r1)
+	  17: GETL       	R31, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0x1000106C:  90010014  stw r0,20(r1)
+	  22: GETL       	R0, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x14, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x10001070:  881EADD4  lbz r0,-21036(r30)
+	  27: MOVL       	$0x1001ADD4, t20
+	  28: LDB       	(t20), t22
+	  29: PUTL       	t22, R0
+	  30: INCEIPL       	$4
+
+	0x10001074:  2F800000  cmpi cr7,r0,0
+	  31: GETL       	R0, t24
+	  32: CMP0L       	t24, t26  (-rSo)
+	  33: ICRFL       	t26, $0x7, CR
+	  34: INCEIPL       	$4
+
+	0x10001078:  409E0034  bc 4,30,0x100010AC
+	  35: Jc30o       	$0x100010AC
+
+
+
+. 3485 10001058 36
+. 7C 08 02 A6 94 21 FF F0 93 C1 00 08 3F C0 10 02 93 E1 00 0C 90 01 00 14 88 1E AD D4 2F 80 00 00 40 9E 00 34
+==== BB 3486 (0x1000107C) approx BBs exec'd 0 ====
+
+	0x1000107C:  3FE01002  lis r31,4098
+	   0: MOVL       	$0x10020000, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x10001080:  48000014  b 0x10001094
+	   3: JMPo       	$0x10001094  ($4)
+
+
+
+. 3486 1000107C 8
+. 3F E0 10 02 48 00 00 14
+==== BB 3487 (0x10001094) approx BBs exec'd 0 ====
+
+	0x10001094:  817FA898  lwz r11,-22376(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xFFFFA898, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x10001098:  812B0000  lwz r9,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x1000109C:  2F890000  cmpi cr7,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x100010A0:  409EFFE4  bc 4,30,0x10001084
+	  13: Jc30o       	$0x10001084
+
+
+
+. 3487 10001094 16
+. 81 7F A8 98 81 2B 00 00 2F 89 00 00 40 9E FF E4
+==== BB 3488 (0x100010A4) approx BBs exec'd 0 ====
+
+	0x100010A4:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100010A8:  981EADD4  stb r0,-21036(r30)
+	   3: GETL       	R0, t2
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0xFFFFADD4, t4
+	   6: STB       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x100010AC:  80010014  lwz r0,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x100010B0:  83C10008  lwz r30,8(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x100010B4:  83E1000C  lwz r31,12(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x100010B8:  7C0803A6  mtlr r0
+	  23: GETL       	R0, t18
+	  24: PUTL       	t18, LR
+	  25: INCEIPL       	$4
+
+	0x100010BC:  38210010  addi r1,r1,16
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x10, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0x100010C0:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+
+. 3488 100010A4 32
+. 38 00 00 01 98 1E AD D4 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3489 (0x10007DE8) approx BBs exec'd 0 ====
+
+	0x10007DE8:  80010024  lwz r0,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10007DEC:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0x10007DF0:  38210020  addi r1,r1,32
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x20, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0x10007DF4:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3489 10007DE8 16
+. 80 01 00 24 7C 08 03 A6 38 21 00 20 4E 80 00 20
+==== BB 3490 (0x10007B20) approx BBs exec'd 0 ====
+
+	0x10007B20:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x10007B24:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x10007B28:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x10007B2C:  7D0803A6  mtlr r8
+	  15: GETL       	R8, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x10007B30:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x10007B34:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0x10007B38:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+
+. 3490 10007B20 28
+. 81 01 00 24 83 A1 00 14 83 C1 00 18 7D 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3491 _dl_fini(0x2547BAE0) approx BBs exec'd 0 ====
+
+	0x2547BAE0:  9421FFA0  stwu r1,-96(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFA0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547BAE4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547BAE8:  4801B519  bl 0x25497000
+	   9: MOVL       	$0x2547BAEC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+
+. 3491 2547BAE0 12
+. 94 21 FF A0 7C 08 02 A6 48 01 B5 19
+==== BB 3492 (0x2547BAEC) approx BBs exec'd 0 ====
+
+	0x2547BAEC:  93C10058  stw r30,88(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547BAF0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547BAF4:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x2547BAF8:  91C10018  stw r14,24(r1)
+	  11: GETL       	R14, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x18, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547BAFC:  92410028  stw r18,40(r1)
+	  16: GETL       	R18, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x28, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547BB00:  81DE04F4  lwz r14,1268(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x4F4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R14
+	  25: INCEIPL       	$4
+
+	0x2547BB04:  825E04C8  lwz r18,1224(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4C8, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R18
+	  30: INCEIPL       	$4
+
+	0x2547BB08:  91E1001C  stw r15,28(r1)
+	  31: GETL       	R15, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x1C, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547BB0C:  39E0000F  li r15,15
+	  36: MOVL       	$0xF, t28
+	  37: PUTL       	t28, R15
+	  38: INCEIPL       	$4
+
+	0x2547BB10:  92010020  stw r16,32(r1)
+	  39: GETL       	R16, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x20, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x2547BB14:  3A000000  li r16,0
+	  44: MOVL       	$0x0, t34
+	  45: PUTL       	t34, R16
+	  46: INCEIPL       	$4
+
+	0x2547BB18:  92210024  stw r17,36(r1)
+	  47: GETL       	R17, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x24, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547BB1C:  3A200168  li r17,360
+	  52: MOVL       	$0x168, t40
+	  53: PUTL       	t40, R17
+	  54: INCEIPL       	$4
+
+	0x2547BB20:  92C10038  stw r22,56(r1)
+	  55: GETL       	R22, t42
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x38, t44
+	  58: STL       	t42, (t44)
+	  59: INCEIPL       	$4
+
+	0x2547BB24:  3AC00000  li r22,0
+	  60: MOVL       	$0x0, t46
+	  61: PUTL       	t46, R22
+	  62: INCEIPL       	$4
+
+	0x2547BB28:  93E1005C  stw r31,92(r1)
+	  63: GETL       	R31, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x5C, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x2547BB2C:  7C3F0B78  or r31,r1,r1
+	  68: GETL       	R1, t52
+	  69: PUTL       	t52, R31
+	  70: INCEIPL       	$4
+
+	0x2547BB30:  9261002C  stw r19,44(r1)
+	  71: GETL       	R19, t54
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x2C, t56
+	  74: STL       	t54, (t56)
+	  75: INCEIPL       	$4
+
+	0x2547BB34:  92810030  stw r20,48(r1)
+	  76: GETL       	R20, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x30, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0x2547BB38:  92A10034  stw r21,52(r1)
+	  81: GETL       	R21, t62
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x34, t64
+	  84: STL       	t62, (t64)
+	  85: INCEIPL       	$4
+
+	0x2547BB3C:  92E1003C  stw r23,60(r1)
+	  86: GETL       	R23, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x3C, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0x2547BB40:  93010040  stw r24,64(r1)
+	  91: GETL       	R24, t70
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x40, t72
+	  94: STL       	t70, (t72)
+	  95: INCEIPL       	$4
+
+	0x2547BB44:  93210044  stw r25,68(r1)
+	  96: GETL       	R25, t74
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x44, t76
+	  99: STL       	t74, (t76)
+	 100: INCEIPL       	$4
+
+	0x2547BB48:  93410048  stw r26,72(r1)
+	 101: GETL       	R26, t78
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x48, t80
+	 104: STL       	t78, (t80)
+	 105: INCEIPL       	$4
+
+	0x2547BB4C:  9361004C  stw r27,76(r1)
+	 106: GETL       	R27, t82
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x4C, t84
+	 109: STL       	t82, (t84)
+	 110: INCEIPL       	$4
+
+	0x2547BB50:  93810050  stw r28,80(r1)
+	 111: GETL       	R28, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x50, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x2547BB54:  93A10054  stw r29,84(r1)
+	 116: GETL       	R29, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x54, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x2547BB58:  90010064  stw r0,100(r1)
+	 121: GETL       	R0, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x64, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0x2547BB5C:  91810014  stw r12,20(r1)
+	 126: GETL       	R12, t98
+	 127: GETL       	R1, t100
+	 128: ADDL       	$0x14, t100
+	 129: STL       	t98, (t100)
+	 130: INCEIPL       	$4
+
+	0x2547BB60:  809203F8  lwz r4,1016(r18)
+	 131: GETL       	R18, t102
+	 132: ADDL       	$0x3F8, t102
+	 133: LDL       	(t102), t104
+	 134: PUTL       	t104, R4
+	 135: INCEIPL       	$4
+
+	0x2547BB64:  38720180  addi r3,r18,384
+	 136: GETL       	R18, t106
+	 137: ADDL       	$0x180, t106
+	 138: PUTL       	t106, R3
+	 139: INCEIPL       	$4
+
+	0x2547BB68:  7C8903A6  mtctr r4
+	 140: GETL       	R4, t108
+	 141: PUTL       	t108, CTR
+	 142: INCEIPL       	$4
+
+	0x2547BB6C:  4E800421  bctrl
+	 143: MOVL       	$0x2547BB70, t110
+	 144: PUTL       	t110, LR
+	 145: GETL       	CTR, t112
+	 146: JMPo-c       	t112  ($4)
+
+
+
+. 3492 2547BAEC 132
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 C1 00 18 92 41 00 28 81 DE 04 F4 82 5E 04 C8 91 E1 00 1C 39 E0 00 0F 92 01 00 20 3A 00 00 00 92 21 00 24 3A 20 01 68 92 C1 00 38 3A C0 00 00 93 E1 00 5C 7C 3F 0B 78 92 61 00 2C 92 81 00 30 92 A1 00 34 92 E1 00 3C 93 01 00 40 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 90 01 00 64 91 81 00 14 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+==== BB 3493 (0x2547BB70) approx BBs exec'd 0 ====
+
+	0x2547BB70:  7D319214  add r9,r17,r18
+	   0: GETL       	R17, t0
+	   1: GETL       	R18, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BB74:  80690004  lwz r3,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547BB78:  546A103A  rlwinm r10,r3,2,0,29
+	  10: GETL       	R3, t8
+	  11: SHLL       	$0x2, t8
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547BB7C:  7F8A8040  cmpl cr7,r10,r16
+	  14: GETL       	R10, t10
+	  15: GETL       	R16, t12
+	  16: CMPUL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547BB80:  409D002C  bc 4,29,0x2547BBAC
+	  19: Jc29o       	$0x2547BBAC
+
+
+
+. 3493 2547BB70 20
+. 7D 31 92 14 80 69 00 04 54 6A 10 3A 7F 8A 80 40 40 9D 00 2C
+==== BB 3494 (0x2547BBAC) approx BBs exec'd 0 ====
+
+	0x2547BBAC:  7D71902E  lwzx r11,r17,r18
+	   0: GETL       	R18, t0
+	   1: GETL       	R17, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R11
+	   5: INCEIPL       	$4
+
+	0x2547BBB0:  3AE00000  li r23,0
+	   6: MOVL       	$0x0, t6
+	   7: PUTL       	t6, R23
+	   8: INCEIPL       	$4
+
+	0x2547BBB4:  2E0B0000  cmpi cr4,r11,0
+	   9: GETL       	R11, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0x2547BBB8:  41920020  bc 12,18,0x2547BBD8
+	  13: Js18o       	$0x2547BBD8
+
+
+
+. 3494 2547BBAC 16
+. 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+==== BB 3495 (0x2547BBD8) approx BBs exec'd 0 ====
+
+	0x2547BBD8:  7EF3BB79  or. r19,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R19
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547BBDC:  41820110  bc 12,2,0x2547BCEC
+	   5: Js02o       	$0x2547BCEC
+
+
+
+. 3495 2547BBD8 8
+. 7E F3 BB 79 41 82 01 10
+==== BB 3496 (0x2547BCEC) approx BBs exec'd 0 ====
+
+	0x2547BCEC:  80B203FC  lwz r5,1020(r18)
+	   0: GETL       	R18, t0
+	   1: ADDL       	$0x3FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547BCF0:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547BCF4:  38720180  addi r3,r18,384
+	   8: GETL       	R18, t6
+	   9: ADDL       	$0x180, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x2547BCF8:  7E1A9840  cmpl cr4,r26,r19
+	  12: GETL       	R26, t8
+	  13: GETL       	R19, t10
+	  14: CMPUL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x2547BCFC:  7CA903A6  mtctr r5
+	  17: GETL       	R5, t14
+	  18: PUTL       	t14, CTR
+	  19: INCEIPL       	$4
+
+	0x2547BD00:  4E800421  bctrl
+	  20: MOVL       	$0x2547BD04, t16
+	  21: PUTL       	t16, LR
+	  22: GETL       	CTR, t18
+	  23: JMPo-c       	t18  ($4)
+
+
+
+. 3496 2547BCEC 24
+. 80 B2 03 FC 3B 40 00 00 38 72 01 80 7E 1A 98 40 7C A9 03 A6 4E 80 04 21
+==== BB 3497 (0x2547BD04) approx BBs exec'd 0 ====
+
+	0x2547BD04:  40900068  bc 4,16,0x2547BD6C
+	   0: Jc16o       	$0x2547BD6C
+
+
+
+. 3497 2547BD04 4
+. 40 90 00 68
+==== BB 3498 (0x2547BD6C) approx BBs exec'd 0 ====
+
+	0x2547BD6C:  35EFFFFF  addic. r15,r15,-1
+	   0: GETL       	R15, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R15
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547BD70:  3A31FFE8  addi r17,r17,-24
+	   6: GETL       	R17, t4
+	   7: ADDL       	$0xFFFFFFE8, t4
+	   8: PUTL       	t4, R17
+	   9: INCEIPL       	$4
+
+	0x2547BD74:  4080FDEC  bc 4,0,0x2547BB60
+	  10: Jc00o       	$0x2547BB60
+
+
+
+. 3498 2547BD6C 12
+. 35 EF FF FF 3A 31 FF E8 40 80 FD EC
+==== BB 3499 (0x2547BB60) approx BBs exec'd 0 ====
+
+	0x2547BB60:  809203F8  lwz r4,1016(r18)
+	   0: GETL       	R18, t0
+	   1: ADDL       	$0x3F8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547BB64:  38720180  addi r3,r18,384
+	   5: GETL       	R18, t4
+	   6: ADDL       	$0x180, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x2547BB68:  7C8903A6  mtctr r4
+	   9: GETL       	R4, t6
+	  10: PUTL       	t6, CTR
+	  11: INCEIPL       	$4
+
+	0x2547BB6C:  4E800421  bctrl
+	  12: MOVL       	$0x2547BB70, t8
+	  13: PUTL       	t8, LR
+	  14: GETL       	CTR, t10
+	  15: JMPo-c       	t10  ($4)
+
+
+
+. 3499 2547BB60 16
+. 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+==== BB 3500 (0x2547BB84) approx BBs exec'd 0 ====
+
+	0x2547BB84:  2C100000  cmpi cr0,r16,0
+	   0: GETL       	R16, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547BB88:  40820110  bc 4,2,0x2547BC98
+	   4: Jc02o       	$0x2547BC98
+
+
+
+. 3500 2547BB84 8
+. 2C 10 00 00 40 82 01 10
+==== BB 3501 (0x2547BB8C) approx BBs exec'd 0 ====
+
+	0x2547BB8C:  390A001E  addi r8,r10,30
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1E, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2547BB90:  81610000  lwz r11,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547BB94:  55070036  rlwinm r7,r8,0,0,27
+	   8: GETL       	R8, t6
+	   9: ANDL       	$0xFFFFFFF0, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x2547BB98:  7D505378  or r16,r10,r10
+	  12: GETL       	R10, t8
+	  13: PUTL       	t8, R16
+	  14: INCEIPL       	$4
+
+	0x2547BB9C:  7CC700D0  neg r6,r7
+	  15: GETL       	R7, t10
+	  16: NEGL       	t10
+	  17: PUTL       	t10, R6
+	  18: INCEIPL       	$4
+
+	0x2547BBA0:  7D61316E  stwux r11,r1,r6
+	  19: GETL       	R6, t12
+	  20: GETL       	R1, t14
+	  21: ADDL       	t14, t12
+	  22: PUTL       	t12, R1
+	  23: GETL       	R11, t16
+	  24: STL       	t16, (t12)
+	  25: INCEIPL       	$4
+
+	0x2547BBA4:  38A10017  addi r5,r1,23
+	  26: GETL       	R1, t18
+	  27: ADDL       	$0x17, t18
+	  28: PUTL       	t18, R5
+	  29: INCEIPL       	$4
+
+	0x2547BBA8:  54B60036  rlwinm r22,r5,0,0,27
+	  30: GETL       	R5, t20
+	  31: ANDL       	$0xFFFFFFF0, t20
+	  32: PUTL       	t20, R22
+	  33: INCEIPL       	$4
+
+	0x2547BBAC:  7D71902E  lwzx r11,r17,r18
+	  34: GETL       	R18, t22
+	  35: GETL       	R17, t24
+	  36: ADDL       	t24, t22
+	  37: LDL       	(t22), t26
+	  38: PUTL       	t26, R11
+	  39: INCEIPL       	$4
+
+	0x2547BBB0:  3AE00000  li r23,0
+	  40: MOVL       	$0x0, t28
+	  41: PUTL       	t28, R23
+	  42: INCEIPL       	$4
+
+	0x2547BBB4:  2E0B0000  cmpi cr4,r11,0
+	  43: GETL       	R11, t30
+	  44: CMP0L       	t30, t32  (-rSo)
+	  45: ICRFL       	t32, $0x4, CR
+	  46: INCEIPL       	$4
+
+	0x2547BBB8:  41920020  bc 12,18,0x2547BBD8
+	  47: Js18o       	$0x2547BBD8
+
+
+
+. 3501 2547BB8C 48
+. 39 0A 00 1E 81 61 00 00 55 07 00 36 7D 50 53 78 7C C7 00 D0 7D 61 31 6E 38 A1 00 17 54 B6 00 36 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+==== BB 3502 (0x2547BBBC) approx BBs exec'd 0 ====
+
+	0x2547BBBC:  834B0014  lwz r26,20(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547BBC0:  56EA103A  rlwinm r10,r23,2,0,29
+	   5: GETL       	R23, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547BBC4:  7F1A5800  cmp cr6,r26,r11
+	   9: GETL       	R26, t6
+	  10: GETL       	R11, t8
+	  11: CMPL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x2547BBC8:  419A0108  bc 12,26,0x2547BCD0
+	  14: Js26o       	$0x2547BCD0
+
+
+
+. 3502 2547BBBC 16
+. 83 4B 00 14 56 EA 10 3A 7F 1A 58 00 41 9A 01 08
+==== BB 3503 (0x2547BCD0) approx BBs exec'd 0 ====
+
+	0x2547BCD0:  836B0178  lwz r27,376(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x178, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547BCD4:  3AF70001  addi r23,r23,1
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R23
+	   8: INCEIPL       	$4
+
+	0x2547BCD8:  7D6AB12E  stwx r11,r10,r22
+	   9: GETL       	R22, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R11, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x2547BCDC:  395B0001  addi r10,r27,1
+	  15: GETL       	R27, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547BCE0:  914B0178  stw r10,376(r11)
+	  19: GETL       	R10, t14
+	  20: GETL       	R11, t16
+	  21: ADDL       	$0x178, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547BCE4:  816B000C  lwz r11,12(r11)
+	  24: GETL       	R11, t18
+	  25: ADDL       	$0xC, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R11
+	  28: INCEIPL       	$4
+
+	0x2547BCE8:  4BFFFEE8  b 0x2547BBD0
+	  29: JMPo       	$0x2547BBD0  ($4)
+
+
+
+. 3503 2547BCD0 28
+. 83 6B 01 78 3A F7 00 01 7D 6A B1 2E 39 5B 00 01 91 4B 01 78 81 6B 00 0C 4B FF FE E8
+==== BB 3504 (0x2547BBD0) approx BBs exec'd 0 ====
+
+	0x2547BBD0:  2E0B0000  cmpi cr4,r11,0
+	   0: GETL       	R11, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x2547BBD4:  4092FFE8  bc 4,18,0x2547BBBC
+	   4: Jc18o       	$0x2547BBBC
+
+
+
+. 3504 2547BBD0 8
+. 2E 0B 00 00 40 92 FF E8
+==== BB 3505 (0x2547BBE0) approx BBs exec'd 0 ====
+
+	0x2547BBE0:  2E0F0000  cmpi cr4,r15,0
+	   0: GETL       	R15, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x2547BBE4:  7F71902E  lwzx r27,r17,r18
+	   4: GETL       	R18, t4
+	   5: GETL       	R17, t6
+	   6: ADDL       	t6, t4
+	   7: LDL       	(t4), t8
+	   8: PUTL       	t8, R27
+	   9: INCEIPL       	$4
+
+	0x2547BBE8:  40920008  bc 4,18,0x2547BBF0
+	  10: Jc18o       	$0x2547BBF0
+
+
+
+. 3505 2547BBE0 12
+. 2E 0F 00 00 7F 71 90 2E 40 92 00 08
+==== BB 3506 (0x2547BBEC) approx BBs exec'd 0 ====
+
+	0x2547BBEC:  837B000C  lwz r27,12(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547BBF0:  2F9B0000  cmpi cr7,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BBF4:  419E00F8  bc 12,30,0x2547BCEC
+	   9: Js30o       	$0x2547BCEC
+
+
+
+. 3506 2547BBEC 12
+. 83 7B 00 0C 2F 9B 00 00 41 9E 00 F8
+==== BB 3507 (0x2547BBF8) approx BBs exec'd 0 ====
+
+	0x2547BBF8:  839B0014  lwz r28,20(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547BBFC:  7C9CD800  cmp cr1,r28,r27
+	   5: GETL       	R28, t4
+	   6: GETL       	R27, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x2547BC00:  4086FFEC  bc 4,6,0x2547BBEC
+	  10: Jc06o       	$0x2547BBEC
+
+
+
+. 3507 2547BBF8 12
+. 83 9B 00 14 7C 9C D8 00 40 86 FF EC
+==== BB 3508 (0x2547BC04) approx BBs exec'd 0 ====
+
+	0x2547BC04:  7F200026  mfcr r25
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x2547BC08:  57399FFE  rlwinm r25,r25,19,31,31
+	   3: GETL       	R25, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R25
+	   7: INCEIPL       	$4
+
+	0x2547BC0C:  48000008  b 0x2547BC14
+	   8: JMPo       	$0x2547BC14  ($4)
+
+
+
+. 3508 2547BC04 12
+. 7F 20 00 26 57 39 9F FE 48 00 00 08
+==== BB 3509 (0x2547BC14) approx BBs exec'd 0 ====
+
+	0x2547BC14:  572C103A  rlwinm r12,r25,2,0,29
+	   0: GETL       	R25, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0x2547BC18:  7FACB02E  lwzx r29,r12,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R12, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547BC1C:  7F1DD800  cmp cr6,r29,r27
+	  10: GETL       	R29, t8
+	  11: GETL       	R27, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x2547BC20:  409AFFF0  bc 4,26,0x2547BC10
+	  15: Jc26o       	$0x2547BC10
+
+
+
+. 3509 2547BC14 16
+. 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+==== BB 3510 (0x2547BC24) approx BBs exec'd 0 ====
+
+	0x2547BC24:  3B590001  addi r26,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BC28:  7C1AB840  cmpl cr0,r26,r23
+	   4: GETL       	R26, t2
+	   5: GETL       	R23, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547BC2C:  40A0FFC0  bc 5,0,0x2547BBEC
+	   9: Jc00o       	$0x2547BBEC
+
+
+
+. 3510 2547BC24 12
+. 3B 59 00 01 7C 1A B8 40 40 A0 FF C0
+==== BB 3511 (0x2547BC30) approx BBs exec'd 0 ====
+
+	0x2547BC30:  5720103A  rlwinm r0,r25,2,0,29
+	   0: GETL       	R25, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x2547BC34:  7C140378  or r20,r0,r0
+	   4: GETL       	R0, t2
+	   5: PUTL       	t2, R20
+	   6: INCEIPL       	$4
+
+	0x2547BC38:  7EA0B214  add r21,r0,r22
+	   7: GETL       	R0, t4
+	   8: GETL       	R22, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R21
+	  11: INCEIPL       	$4
+
+	0x2547BC3C:  5758103A  rlwinm r24,r26,2,0,29
+	  12: GETL       	R26, t8
+	  13: SHLL       	$0x2, t8
+	  14: PUTL       	t8, R24
+	  15: INCEIPL       	$4
+
+	0x2547BC40:  7C78B02E  lwzx r3,r24,r22
+	  16: GETL       	R22, t10
+	  17: GETL       	R24, t12
+	  18: ADDL       	t12, t10
+	  19: LDL       	(t10), t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0x2547BC44:  816301E8  lwz r11,488(r3)
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x1E8, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R11
+	  26: INCEIPL       	$4
+
+	0x2547BC48:  2F8B0000  cmpi cr7,r11,0
+	  27: GETL       	R11, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0x2547BC4C:  419E002C  bc 12,30,0x2547BC78
+	  31: Js30o       	$0x2547BC78
+
+
+
+. 3511 2547BC30 32
+. 57 20 10 3A 7C 14 03 78 7E A0 B2 14 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+==== BB 3512 (0x2547BC50) approx BBs exec'd 0 ====
+
+	0x2547BC50:  812B0000  lwz r9,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547BC54:  2C890000  cmpi cr1,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547BC58:  41860020  bc 12,6,0x2547BC78
+	   8: Js06o       	$0x2547BC78
+
+
+
+. 3512 2547BC50 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 20
+==== BB 3513 (0x2547BC5C) approx BBs exec'd 0 ====
+
+	0x2547BC5C:  7C99D050  subf r4,r25,r26
+	   0: GETL       	R25, t0
+	   1: GETL       	R26, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547BC60:  5485103A  rlwinm r5,r4,2,0,29
+	   5: GETL       	R4, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547BC64:  7F09D800  cmp cr6,r9,r27
+	   9: GETL       	R9, t6
+	  10: GETL       	R27, t8
+	  11: CMPL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x2547BC68:  419A0240  bc 12,26,0x2547BEA8
+	  14: Js26o       	$0x2547BEA8
+
+
+
+. 3513 2547BC5C 16
+. 7C 99 D0 50 54 85 10 3A 7F 09 D8 00 41 9A 02 40
+==== BB 3514 (0x2547BC6C) approx BBs exec'd 0 ====
+
+	0x2547BC6C:  852B0004  lwzu r9,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R11
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x2547BC70:  2C090000  cmpi cr0,r9,0
+	   6: GETL       	R9, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547BC74:  4082FFF0  bc 4,2,0x2547BC64
+	  10: Jc02o       	$0x2547BC64
+
+
+
+. 3514 2547BC6C 12
+. 85 2B 00 04 2C 09 00 00 40 82 FF F0
+==== BB 3515 (0x2547BC64) approx BBs exec'd 0 ====
+
+	0x2547BC64:  7F09D800  cmp cr6,r9,r27
+	   0: GETL       	R9, t0
+	   1: GETL       	R27, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x2547BC68:  419A0240  bc 12,26,0x2547BEA8
+	   5: Js26o       	$0x2547BEA8
+
+
+
+. 3515 2547BC64 8
+. 7F 09 D8 00 41 9A 02 40
+==== BB 3516 (0x2547BC78) approx BBs exec'd 0 ====
+
+	0x2547BC78:  7D38B02E  lwzx r9,r24,r22
+	   0: GETL       	R22, t0
+	   1: GETL       	R24, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R9
+	   5: INCEIPL       	$4
+
+	0x2547BC7C:  814901F4  lwz r10,500(r9)
+	   6: GETL       	R9, t6
+	   7: ADDL       	$0x1F4, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R10
+	  10: INCEIPL       	$4
+
+	0x2547BC80:  2F8A0000  cmpi cr7,r10,0
+	  11: GETL       	R10, t10
+	  12: CMP0L       	t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x2547BC84:  409E0258  bc 4,30,0x2547BEDC
+	  15: Jc30o       	$0x2547BEDC
+
+
+
+. 3516 2547BC78 16
+. 7D 38 B0 2E 81 49 01 F4 2F 8A 00 00 40 9E 02 58
+==== BB 3517 (0x2547BC88) approx BBs exec'd 0 ====
+
+	0x2547BC88:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BC8C:  7F1AB840  cmpl cr6,r26,r23
+	   4: GETL       	R26, t2
+	   5: GETL       	R23, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547BC90:  4198FFAC  bc 12,24,0x2547BC3C
+	   9: Js24o       	$0x2547BC3C
+
+
+
+. 3517 2547BC88 12
+. 3B 5A 00 01 7F 1A B8 40 41 98 FF AC
+==== BB 3518 (0x2547BC3C) approx BBs exec'd 0 ====
+
+	0x2547BC3C:  5758103A  rlwinm r24,r26,2,0,29
+	   0: GETL       	R26, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547BC40:  7C78B02E  lwzx r3,r24,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R24, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547BC44:  816301E8  lwz r11,488(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1E8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547BC48:  2F8B0000  cmpi cr7,r11,0
+	  15: GETL       	R11, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547BC4C:  419E002C  bc 12,30,0x2547BC78
+	  19: Js30o       	$0x2547BC78
+
+
+
+. 3518 2547BC3C 20
+. 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+==== BB 3519 (0x2547BC94) approx BBs exec'd 0 ====
+
+	0x2547BC94:  4BFFFF58  b 0x2547BBEC
+	   0: JMPo       	$0x2547BBEC  ($4)
+
+
+
+. 3519 2547BC94 4
+. 4B FF FF 58
+==== BB 3520 (0x2547BC10) approx BBs exec'd 0 ====
+
+	0x2547BC10:  3B390001  addi r25,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x2547BC14:  572C103A  rlwinm r12,r25,2,0,29
+	   4: GETL       	R25, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R12
+	   7: INCEIPL       	$4
+
+	0x2547BC18:  7FACB02E  lwzx r29,r12,r22
+	   8: GETL       	R22, t4
+	   9: GETL       	R12, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0x2547BC1C:  7F1DD800  cmp cr6,r29,r27
+	  14: GETL       	R29, t10
+	  15: GETL       	R27, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0x2547BC20:  409AFFF0  bc 4,26,0x2547BC10
+	  19: Jc26o       	$0x2547BC10
+
+
+
+. 3520 2547BC10 20
+. 3B 39 00 01 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+==== BB 3521 (0x2547BEA8) approx BBs exec'd 0 ====
+
+	0x2547BEA8:  7F98B02E  lwzx r28,r24,r22
+	   0: GETL       	R22, t0
+	   1: GETL       	R24, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R28
+	   5: INCEIPL       	$4
+
+	0x2547BEAC:  3BB50004  addi r29,r21,4
+	   6: GETL       	R21, t6
+	   7: ADDL       	$0x4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547BEB0:  7EA4AB78  or r4,r21,r21
+	  10: GETL       	R21, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2547BEB4:  7FA3EB78  or r3,r29,r29
+	  13: GETL       	R29, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x2547BEB8:  480076E5  bl 0x2548359C
+	  16: MOVL       	$0x2547BEBC, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0x2548359C  ($4)
+
+
+
+. 3521 2547BEA8 20
+. 7F 98 B0 2E 3B B5 00 04 7E A4 AB 78 7F A3 EB 78 48 00 76 E5
+==== BB 3522 (0x2547BEBC) approx BBs exec'd 0 ====
+
+	0x2547BEBC:  93950000  stw r28,0(r21)
+	   0: GETL       	R28, t0
+	   1: GETL       	R21, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547BEC0:  7D38B02E  lwzx r9,r24,r22
+	   4: GETL       	R22, t4
+	   5: GETL       	R24, t6
+	   6: ADDL       	t6, t4
+	   7: LDL       	(t4), t8
+	   8: PUTL       	t8, R9
+	   9: INCEIPL       	$4
+
+	0x2547BEC4:  3B390001  addi r25,r25,1
+	  10: GETL       	R25, t10
+	  11: ADDL       	$0x1, t10
+	  12: PUTL       	t10, R25
+	  13: INCEIPL       	$4
+
+	0x2547BEC8:  3A940004  addi r20,r20,4
+	  14: GETL       	R20, t12
+	  15: ADDL       	$0x4, t12
+	  16: PUTL       	t12, R20
+	  17: INCEIPL       	$4
+
+	0x2547BECC:  7FB5EB78  or r21,r29,r29
+	  18: GETL       	R29, t14
+	  19: PUTL       	t14, R21
+	  20: INCEIPL       	$4
+
+	0x2547BED0:  814901F4  lwz r10,500(r9)
+	  21: GETL       	R9, t16
+	  22: ADDL       	$0x1F4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R10
+	  25: INCEIPL       	$4
+
+	0x2547BED4:  2F8A0000  cmpi cr7,r10,0
+	  26: GETL       	R10, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0x2547BED8:  419EFDB0  bc 12,30,0x2547BC88
+	  30: Js30o       	$0x2547BC88
+
+
+
+. 3522 2547BEBC 32
+. 93 95 00 00 7D 38 B0 2E 3B 39 00 01 3A 94 00 04 7F B5 EB 78 81 49 01 F4 2F 8A 00 00 41 9E FD B0
+==== BB 3523 (0x2547BD08) approx BBs exec'd 0 ====
+
+	0x2547BD08:  817E034C  lwz r11,844(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x34C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547BD0C:  832B0000  lwz r25,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x2547BD10:  48000030  b 0x2547BD40
+	   9: JMPo       	$0x2547BD40  ($4)
+
+
+
+. 3523 2547BD08 12
+. 81 7E 03 4C 83 2B 00 00 48 00 00 30
+==== BB 3524 (0x2547BD40) approx BBs exec'd 0 ====
+
+	0x2547BD40:  5748103A  rlwinm r8,r26,2,0,29
+	   0: GETL       	R26, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2547BD44:  7F88B02E  lwzx r28,r8,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R8, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x2547BD48:  817C0180  lwz r11,384(r28)
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x180, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547BD4C:  75601000  andis. r0,r11,0x1000
+	  15: GETL       	R11, t12
+	  16: ANDL       	$0x10000000, t12
+	  17: PUTL       	t12, R0
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x2547BD50:  40A2FFC4  bc 5,2,0x2547BD14
+	  21: Jc02o       	$0x2547BD14
+
+
+
+. 3524 2547BD40 20
+. 57 48 10 3A 7F 88 B0 2E 81 7C 01 80 75 60 10 00 40 A2 FF C4
+==== BB 3525 (0x2547BD14) approx BBs exec'd 0 ====
+
+	0x2547BD14:  55750104  rlwinm r21,r11,0,4,2
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0xEFFFFFFF, t0
+	   2: PUTL       	t0, R21
+	   3: INCEIPL       	$4
+
+	0x2547BD18:  809C0004  lwz r4,4(r28)
+	   4: GETL       	R28, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547BD1C:  92BC0180  stw r21,384(r28)
+	   9: GETL       	R21, t6
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x180, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BD20:  8A840000  lbz r20,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDB       	(t10), t12
+	  16: PUTL       	t12, R20
+	  17: INCEIPL       	$4
+
+	0x2547BD24:  2F140000  cmpi cr6,r20,0
+	  18: GETL       	R20, t14
+	  19: CMP0L       	t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x2547BD28:  409A00C0  bc 4,26,0x2547BDE8
+	  22: Jc26o       	$0x2547BDE8
+
+
+
+. 3525 2547BD14 24
+. 55 75 01 04 80 9C 00 04 92 BC 01 80 8A 84 00 00 2F 14 00 00 40 9A 00 C0
+==== BB 3526 (0x2547BD2C) approx BBs exec'd 0 ====
+
+	0x2547BD2C:  7569C000  andis. r9,r11,0xC000
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0xC0000000, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547BD30:  408200B8  bc 4,2,0x2547BDE8
+	   6: Jc02o       	$0x2547BDE8
+
+
+
+. 3526 2547BD2C 8
+. 75 69 C0 00 40 82 00 B8
+==== BB 3527 (0x2547BD34) approx BBs exec'd 0 ====
+
+	0x2547BD34:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BD38:  7F9A9840  cmpl cr7,r26,r19
+	   4: GETL       	R26, t2
+	   5: GETL       	R19, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BD3C:  409C0030  bc 4,28,0x2547BD6C
+	   9: Jc28o       	$0x2547BD6C
+
+
+
+. 3527 2547BD34 12
+. 3B 5A 00 01 7F 9A 98 40 40 9C 00 30
+==== BB 3528 (0x2547BDE8) approx BBs exec'd 0 ====
+
+	0x2547BDE8:  813C0088  lwz r9,136(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x88, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BDEC:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BDF0:  409E0010  bc 4,30,0x2547BE00
+	   9: Jc30o       	$0x2547BE00
+
+
+
+. 3528 2547BDE8 12
+. 81 3C 00 88 2F 89 00 00 40 9E 00 10
+==== BB 3529 (0x2547BDF4) approx BBs exec'd 0 ====
+
+	0x2547BDF4:  82FC0054  lwz r23,84(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x54, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x2547BDF8:  2C970000  cmpi cr1,r23,0
+	   5: GETL       	R23, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547BDFC:  4186FF38  bc 12,6,0x2547BD34
+	   9: Js06o       	$0x2547BD34
+
+
+
+. 3529 2547BDF4 12
+. 82 FC 00 54 2C 97 00 00 41 86 FF 38
+==== BB 3530 (0x2547BE00) approx BBs exec'd 0 ====
+
+	0x2547BE00:  830E0000  lwz r24,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R24
+	   3: INCEIPL       	$4
+
+	0x2547BE04:  730B0002  andi. r11,r24,0x2
+	   4: GETL       	R24, t4
+	   5: ANDL       	$0x2, t4
+	   6: PUTL       	t4, R11
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547BE08:  4082007C  bc 4,2,0x2547BE84
+	  10: Jc02o       	$0x2547BE84
+
+
+
+. 3530 2547BE00 12
+. 83 0E 00 00 73 0B 00 02 40 82 00 7C
+==== BB 3531 (0x2547BE0C) approx BBs exec'd 0 ====
+
+	0x2547BE0C:  2F090000  cmpi cr6,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547BE10:  419A0044  bc 12,26,0x2547BE54
+	   4: Js26o       	$0x2547BE54
+
+
+
+. 3531 2547BE0C 8
+. 2F 09 00 00 41 9A 00 44
+==== BB 3532 (0x2547BE54) approx BBs exec'd 0 ====
+
+	0x2547BE54:  813C0054  lwz r9,84(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x54, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BE58:  2E090000  cmpi cr4,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547BE5C:  4192FEF8  bc 12,18,0x2547BD54
+	   9: Js18o       	$0x2547BD54
+
+
+
+. 3532 2547BE54 12
+. 81 3C 00 54 2E 09 00 00 41 92 FE F8
+==== BB 3533 (0x2547BE60) approx BBs exec'd 0 ====
+
+	0x2547BE60:  80A90004  lwz r5,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547BE64:  83BC0000  lwz r29,0(r28)
+	   5: GETL       	R28, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0x2547BE68:  7CFD2A14  add r7,r29,r5
+	   9: GETL       	R29, t8
+	  10: GETL       	R5, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R7
+	  13: INCEIPL       	$4
+
+	0x2547BE6C:  7CE903A6  mtctr r7
+	  14: GETL       	R7, t12
+	  15: PUTL       	t12, CTR
+	  16: INCEIPL       	$4
+
+	0x2547BE70:  4E800421  bctrl
+	  17: MOVL       	$0x2547BE74, t14
+	  18: PUTL       	t14, LR
+	  19: GETL       	CTR, t16
+	  20: JMPo-c       	t16  ($4)
+
+
+
+. 3533 2547BE60 20
+. 80 A9 00 04 83 BC 00 00 7C FD 2A 14 7C E9 03 A6 4E 80 04 21
+==== BB 3534 (0xFFDEF80) approx BBs exec'd 0 ====
+
+	0xFFDEF80:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDEF84:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDEF88:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFDEF8C:  4BFFF979  bl 0xFFDE904
+	  14: MOVL       	$0xFFDEF90, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFDE904  ($4)
+
+
+
+. 3534 FFDEF80 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF F9 79
+==== BB 3535 (0xFFDE904) approx BBs exec'd 0 ====
+
+	0xFFDE904:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE908:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE90C:  429F0005  bcl 20,31,0xFFDE910
+	   9: MOVL       	$0xFFDE910, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFDE910:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFDE914:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFDE918:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFDE91C:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFFDE920:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFFDE924:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xFFDE928:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFFDE92C:  88090000  lbz r0,0(r9)
+	  45: GETL       	R9, t34
+	  46: LDB       	(t34), t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0xFFDE930:  2F800000  cmpi cr7,r0,0
+	  49: GETL       	R0, t38
+	  50: CMP0L       	t38, t40  (-rSo)
+	  51: ICRFL       	t40, $0x7, CR
+	  52: INCEIPL       	$4
+
+	0xFFDE934:  409E0050  bc 4,30,0xFFDE984
+	  53: Jc30o       	$0xFFDE984
+
+
+
+. 3535 FFDE904 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+==== BB 3536 (0xFFDE938) approx BBs exec'd 0 ====
+
+	0xFFDE938:  801E8004  lwz r0,-32764(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8004, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDE93C:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFFDE940:  419E0010  bc 12,30,0xFFDE950
+	   9: Js30o       	$0xFFDE950
+
+
+
+. 3536 FFDE938 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+==== BB 3537 (0xFFDE944) approx BBs exec'd 0 ====
+
+	0xFFDE944:  813E8008  lwz r9,-32760(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8008, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFFDE948:  80690000  lwz r3,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFFDE94C:  48010831  bl 0xFFEF17C
+	   9: MOVL       	$0xFFDE950, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xFFEF17C  ($4)
+
+
+
+. 3537 FFDE944 12
+. 81 3E 80 08 80 69 00 00 48 01 08 31
+==== BB 3538 (0xFFEF17C) approx BBs exec'd 0 ====
+
+	0xFFEF17C:  3960000C  li r11,12
+	   0: MOVL       	$0xC, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFFEF180:  4BFFFFBC  b 0xFFEF13C
+	   3: JMPo       	$0xFFEF13C  ($4)
+
+
+
+. 3538 FFEF17C 8
+. 39 60 00 0C 4B FF FF BC
+==== BB 3539 (0xFFEF13C) approx BBs exec'd 0 ====
+
+	0xFFEF13C:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xFFEF140:  7D6C5A14  add r11,r12,r11
+	   4: GETL       	R12, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFFEF144:  3980AF5C  li r12,-20644
+	   9: MOVL       	$0xFFFFAF5C, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xFFEF148:  3D8C2548  addis r12,r12,9544
+	  12: MOVL       	$0x2547AF5C, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0xFFEF14C:  7D8903A6  mtctr r12
+	  15: GETL       	R12, t10
+	  16: PUTL       	t10, CTR
+	  17: INCEIPL       	$4
+
+	0xFFEF150:  39808040  li r12,-32704
+	  18: MOVL       	$0xFFFF8040, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0xFFEF154:  3D8C2547  addis r12,r12,9543
+	  21: MOVL       	$0x25468040, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0xFFEF158:  4E800420  bctr
+	  24: GETL       	CTR, t16
+	  25: JMPo       	t16  ($4)
+
+
+
+. 3539 FFEF13C 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 80 40 3D 8C 25 47 4E 80 04 20
+==== BB 3540 __cxa_finalize(0xFE93F44) approx BBs exec'd 0 ====
+
+	0xFE93F44:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE93F48:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE93F4C:  48113F05  bl 0xFFA7E50
+	   9: MOVL       	$0xFE93F50, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3540 FE93F44 12
+. 94 21 FF D0 7C 08 02 A6 48 11 3F 05
+==== BB 3541 (0xFE93F50) approx BBs exec'd 0 ====
+
+	0xFE93F50:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE93F54:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE93F58:  93810020  stw r28,32(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE93F5C:  90010034  stw r0,52(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE93F60:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0xFE93F64:  93210014  stw r25,20(r1)
+	  21: GETL       	R25, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE93F68:  2E030000  cmpi cr4,r3,0
+	  26: GETL       	R3, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0xFE93F6C:  813E1AFC  lwz r9,6908(r30)
+	  30: GETL       	R30, t24
+	  31: ADDL       	$0x1AFC, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R9
+	  34: INCEIPL       	$4
+
+	0xFE93F70:  7C791B78  or r25,r3,r3
+	  35: GETL       	R3, t28
+	  36: PUTL       	t28, R25
+	  37: INCEIPL       	$4
+
+	0xFE93F74:  93010010  stw r24,16(r1)
+	  38: GETL       	R24, t30
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x10, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0xFE93F78:  83890000  lwz r28,0(r9)
+	  43: GETL       	R9, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R28
+	  46: INCEIPL       	$4
+
+	0xFE93F7C:  93410018  stw r26,24(r1)
+	  47: GETL       	R26, t38
+	  48: GETL       	R1, t40
+	  49: ADDL       	$0x18, t40
+	  50: STL       	t38, (t40)
+	  51: INCEIPL       	$4
+
+	0xFE93F80:  2F9C0000  cmpi cr7,r28,0
+	  52: GETL       	R28, t42
+	  53: CMP0L       	t42, t44  (-rSo)
+	  54: ICRFL       	t44, $0x7, CR
+	  55: INCEIPL       	$4
+
+	0xFE93F84:  9361001C  stw r27,28(r1)
+	  56: GETL       	R27, t46
+	  57: GETL       	R1, t48
+	  58: ADDL       	$0x1C, t48
+	  59: STL       	t46, (t48)
+	  60: INCEIPL       	$4
+
+	0xFE93F88:  93A10024  stw r29,36(r1)
+	  61: GETL       	R29, t50
+	  62: GETL       	R1, t52
+	  63: ADDL       	$0x24, t52
+	  64: STL       	t50, (t52)
+	  65: INCEIPL       	$4
+
+	0xFE93F8C:  93E1002C  stw r31,44(r1)
+	  66: GETL       	R31, t54
+	  67: GETL       	R1, t56
+	  68: ADDL       	$0x2C, t56
+	  69: STL       	t54, (t56)
+	  70: INCEIPL       	$4
+
+	0xFE93F90:  9181000C  stw r12,12(r1)
+	  71: GETL       	R12, t58
+	  72: GETL       	R1, t60
+	  73: ADDL       	$0xC, t60
+	  74: STL       	t58, (t60)
+	  75: INCEIPL       	$4
+
+	0xFE93F94:  419E0088  bc 12,30,0xFE9401C
+	  76: Js30o       	$0xFE9401C
+
+
+
+. 3541 FE93F50 72
+. 93 C1 00 28 7F C8 02 A6 93 81 00 20 90 01 00 34 7D 80 00 26 93 21 00 14 2E 03 00 00 81 3E 1A FC 7C 79 1B 78 93 01 00 10 83 89 00 00 93 41 00 18 2F 9C 00 00 93 61 00 1C 93 A1 00 24 93 E1 00 2C 91 81 00 0C 41 9E 00 88
+==== BB 3542 (0xFE93F98) approx BBs exec'd 0 ====
+
+	0xFE93F98:  80BC0004  lwz r5,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFE93F9C:  3BBC0008  addi r29,r28,8
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFE93FA0:  54A42036  rlwinm r4,r5,4,0,27
+	   9: GETL       	R5, t6
+	  10: SHLL       	$0x4, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0xFE93FA4:  7C64E214  add r3,r4,r28
+	  13: GETL       	R4, t8
+	  14: GETL       	R28, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFE93FA8:  3BE3FFF8  addi r31,r3,-8
+	  18: GETL       	R3, t12
+	  19: ADDL       	$0xFFFFFFF8, t12
+	  20: PUTL       	t12, R31
+	  21: INCEIPL       	$4
+
+	0xFE93FAC:  7C1DF840  cmpl cr0,r29,r31
+	  22: GETL       	R29, t14
+	  23: GETL       	R31, t16
+	  24: CMPUL       	t14, t16, t18  (-rSo)
+	  25: ICRFL       	t18, $0x0, CR
+	  26: INCEIPL       	$4
+
+	0xFE93FB0:  41810060  bc 12,1,0xFE94010
+	  27: Js01o       	$0xFE94010
+
+
+
+. 3542 FE93F98 28
+. 80 BC 00 04 3B BC 00 08 54 A4 20 36 7C 64 E2 14 3B E3 FF F8 7C 1D F8 40 41 81 00 60
+==== BB 3543 (0xFE94010) approx BBs exec'd 0 ====
+
+	0xFE94010:  839C0000  lwz r28,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFE94014:  2F9C0000  cmpi cr7,r28,0
+	   4: GETL       	R28, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFE94018:  409EFF80  bc 4,30,0xFE93F98
+	   8: Jc30o       	$0xFE93F98
+
+
+
+. 3543 FE94010 12
+. 83 9C 00 00 2F 9C 00 00 40 9E FF 80
+==== BB 3544 (0xFE9401C) approx BBs exec'd 0 ====
+
+	0xFE9401C:  40920098  bc 4,18,0xFE940B4
+	   0: Jc18o       	$0xFE940B4
+
+
+
+. 3544 FE9401C 4
+. 40 92 00 98
+==== BB 3545 (0xFE940B4) approx BBs exec'd 0 ====
+
+	0xFE940B4:  81410034  lwz r10,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFE940B8:  7F23CB78  or r3,r25,r25
+	   5: GETL       	R25, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE940BC:  8101000C  lwz r8,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0xFE940C0:  83C10028  lwz r30,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xFE940C4:  7D4803A6  mtlr r10
+	  18: GETL       	R10, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFE940C8:  83E1002C  lwz r31,44(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0xFE940CC:  7D008120  mtcrf 0x8,r8
+	  26: GETL       	R8, t20
+	  27: ICRFL       	t20, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0xFE940D0:  83010010  lwz r24,16(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x10, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R24
+	  33: INCEIPL       	$4
+
+	0xFE940D4:  83210014  lwz r25,20(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x14, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R25
+	  38: INCEIPL       	$4
+
+	0xFE940D8:  83410018  lwz r26,24(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x18, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R26
+	  43: INCEIPL       	$4
+
+	0xFE940DC:  8361001C  lwz r27,28(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x1C, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R27
+	  48: INCEIPL       	$4
+
+	0xFE940E0:  83810020  lwz r28,32(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x20, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R28
+	  53: INCEIPL       	$4
+
+	0xFE940E4:  83A10024  lwz r29,36(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x24, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R29
+	  58: INCEIPL       	$4
+
+	0xFE940E8:  38210030  addi r1,r1,48
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x30, t46
+	  61: PUTL       	t46, R1
+	  62: INCEIPL       	$4
+
+	0xFE940EC:  480ABD24  b 0xFF3FE10
+	  63: JMPo       	$0xFF3FE10  ($4)
+
+
+
+. 3545 FE940B4 60
+. 81 41 00 34 7F 23 CB 78 81 01 00 0C 83 C1 00 28 7D 48 03 A6 83 E1 00 2C 7D 00 81 20 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 38 21 00 30 48 0A BD 24
+==== BB 3546 __unregister_atfork(0xFF3FE10) approx BBs exec'd 0 ====
+
+	0xFF3FE10:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF3FE14:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFF3FE18:  48068039  bl 0xFFA7E50
+	   9: MOVL       	$0xFF3FE1C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3546 FF3FE10 12
+. 94 21 FF D0 7C 08 02 A6 48 06 80 39
+==== BB 3547 (0xFF3FE1C) approx BBs exec'd 0 ====
+
+	0xFF3FE1C:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF3FE20:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF3FE24:  93010010  stw r24,16(r1)
+	   8: GETL       	R24, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF3FE28:  90010034  stw r0,52(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF3FE2C:  93A10024  stw r29,36(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x24, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFF3FE30:  831E1CD4  lwz r24,7380(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x1CD4, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R24
+	  27: INCEIPL       	$4
+
+	0xFF3FE34:  93410018  stw r26,24(r1)
+	  28: GETL       	R26, t22
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x18, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0xFF3FE38:  7C7A1B78  or r26,r3,r3
+	  33: GETL       	R3, t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0xFF3FE3C:  83B80000  lwz r29,0(r24)
+	  36: GETL       	R24, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R29
+	  39: INCEIPL       	$4
+
+	0xFF3FE40:  9361001C  stw r27,28(r1)
+	  40: GETL       	R27, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x1C, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0xFF3FE44:  3B600000  li r27,0
+	  45: MOVL       	$0x0, t36
+	  46: PUTL       	t36, R27
+	  47: INCEIPL       	$4
+
+	0xFF3FE48:  93E1002C  stw r31,44(r1)
+	  48: GETL       	R31, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x2C, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0xFF3FE4C:  7C3F0B78  or r31,r1,r1
+	  53: GETL       	R1, t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0xFF3FE50:  93210014  stw r25,20(r1)
+	  56: GETL       	R25, t44
+	  57: GETL       	R1, t46
+	  58: ADDL       	$0x14, t46
+	  59: STL       	t44, (t46)
+	  60: INCEIPL       	$4
+
+	0xFF3FE54:  93810020  stw r28,32(r1)
+	  61: GETL       	R28, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x20, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0xFF3FE58:  48000018  b 0xFF3FE70
+	  66: JMPo       	$0xFF3FE70  ($4)
+
+
+
+. 3547 FF3FE1C 64
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 A1 00 24 83 1E 1C D4 93 41 00 18 7C 7A 1B 78 83 B8 00 00 93 61 00 1C 3B 60 00 00 93 E1 00 2C 7C 3F 0B 78 93 21 00 14 93 81 00 20 48 00 00 18
+==== BB 3548 (0xFF3FE70) approx BBs exec'd 0 ====
+
+	0xFF3FE70:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFF3FE74:  409AFFE8  bc 4,26,0xFF3FE5C
+	   4: Jc26o       	$0xFF3FE5C
+
+
+
+. 3548 FF3FE70 8
+. 2F 1D 00 00 40 9A FF E8
+==== BB 3549 (0xFF3FE5C) approx BBs exec'd 0 ====
+
+	0xFF3FE5C:  807D0010  lwz r3,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFF3FE60:  7F83D000  cmp cr7,r3,r26
+	   5: GETL       	R3, t4
+	   6: GETL       	R26, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFF3FE64:  419E0048  bc 12,30,0xFF3FEAC
+	  10: Js30o       	$0xFF3FEAC
+
+
+
+. 3549 FF3FE5C 12
+. 80 7D 00 10 7F 83 D0 00 41 9E 00 48
+==== BB 3550 (0xFF3FE68) approx BBs exec'd 0 ====
+
+	0xFF3FE68:  7FBBEB78  or r27,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0xFF3FE6C:  83BD0000  lwz r29,0(r29)
+	   3: GETL       	R29, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R29
+	   6: INCEIPL       	$4
+
+	0xFF3FE70:  2F1D0000  cmpi cr6,r29,0
+	   7: GETL       	R29, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x6, CR
+	  10: INCEIPL       	$4
+
+	0xFF3FE74:  409AFFE8  bc 4,26,0xFF3FE5C
+	  11: Jc26o       	$0xFF3FE5C
+
+
+
+. 3550 FF3FE68 16
+. 7F BB EB 78 83 BD 00 00 2F 1D 00 00 40 9A FF E8
+==== BB 3551 (0xFF3FE78) approx BBs exec'd 0 ====
+
+	0xFF3FE78:  80610000  lwz r3,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFF3FE7C:  83830004  lwz r28,4(r3)
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R28
+	   8: INCEIPL       	$4
+
+	0xFF3FE80:  8303FFE0  lwz r24,-32(r3)
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0xFFFFFFE0, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R24
+	  13: INCEIPL       	$4
+
+	0xFF3FE84:  7F8803A6  mtlr r28
+	  14: GETL       	R28, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFF3FE88:  8323FFE4  lwz r25,-28(r3)
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0xFFFFFFE4, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R25
+	  21: INCEIPL       	$4
+
+	0xFF3FE8C:  8343FFE8  lwz r26,-24(r3)
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0xFFFFFFE8, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R26
+	  26: INCEIPL       	$4
+
+	0xFF3FE90:  8363FFEC  lwz r27,-20(r3)
+	  27: GETL       	R3, t22
+	  28: ADDL       	$0xFFFFFFEC, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R27
+	  31: INCEIPL       	$4
+
+	0xFF3FE94:  8383FFF0  lwz r28,-16(r3)
+	  32: GETL       	R3, t26
+	  33: ADDL       	$0xFFFFFFF0, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R28
+	  36: INCEIPL       	$4
+
+	0xFF3FE98:  83A3FFF4  lwz r29,-12(r3)
+	  37: GETL       	R3, t30
+	  38: ADDL       	$0xFFFFFFF4, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R29
+	  41: INCEIPL       	$4
+
+	0xFF3FE9C:  83C3FFF8  lwz r30,-8(r3)
+	  42: GETL       	R3, t34
+	  43: ADDL       	$0xFFFFFFF8, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R30
+	  46: INCEIPL       	$4
+
+	0xFF3FEA0:  83E3FFFC  lwz r31,-4(r3)
+	  47: GETL       	R3, t38
+	  48: ADDL       	$0xFFFFFFFC, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R31
+	  51: INCEIPL       	$4
+
+	0xFF3FEA4:  7C611B78  or r1,r3,r3
+	  52: GETL       	R3, t42
+	  53: PUTL       	t42, R1
+	  54: INCEIPL       	$4
+
+	0xFF3FEA8:  4E800020  blr
+	  55: GETL       	LR, t44
+	  56: JMPo-r       	t44  ($4)
+
+
+
+. 3551 FF3FE78 52
+. 80 61 00 00 83 83 00 04 83 03 FF E0 7F 88 03 A6 83 23 FF E4 83 43 FF E8 83 63 FF EC 83 83 FF F0 83 A3 FF F4 83 C3 FF F8 83 E3 FF FC 7C 61 1B 78 4E 80 00 20
+==== BB 3552 (0xFFDE950) approx BBs exec'd 0 ====
+
+	0xFFDE950:  83FE800C  lwz r31,-32756(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF800C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFFDE954:  48000014  b 0xFFDE968
+	   5: JMPo       	$0xFFDE968  ($4)
+
+
+
+. 3552 FFDE950 8
+. 83 FE 80 0C 48 00 00 14
+==== BB 3553 (0xFFDE968) approx BBs exec'd 0 ====
+
+	0xFFDE968:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFFDE96C:  81690000  lwz r11,0(r9)
+	   4: GETL       	R9, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R11
+	   7: INCEIPL       	$4
+
+	0xFFDE970:  2F8B0000  cmpi cr7,r11,0
+	   8: GETL       	R11, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFFDE974:  409EFFE4  bc 4,30,0xFFDE958
+	  12: Jc30o       	$0xFFDE958
+
+
+
+. 3553 FFDE968 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+==== BB 3554 (0xFFDE978) approx BBs exec'd 0 ====
+
+	0xFFDE978:  813E8000  lwz r9,-32768(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8000, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFFDE97C:  38000001  li r0,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFFDE980:  98090000  stb r0,0(r9)
+	   8: GETL       	R0, t6
+	   9: GETL       	R9, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFFDE984:  80010014  lwz r0,20(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xFFDE988:  83C10008  lwz r30,8(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R30
+	  21: INCEIPL       	$4
+
+	0xFFDE98C:  83E1000C  lwz r31,12(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R31
+	  26: INCEIPL       	$4
+
+	0xFFDE990:  7C0803A6  mtlr r0
+	  27: GETL       	R0, t22
+	  28: PUTL       	t22, LR
+	  29: INCEIPL       	$4
+
+	0xFFDE994:  38210010  addi r1,r1,16
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x10, t24
+	  32: PUTL       	t24, R1
+	  33: INCEIPL       	$4
+
+	0xFFDE998:  4E800020  blr
+	  34: GETL       	LR, t26
+	  35: JMPo-r       	t26  ($4)
+
+
+
+. 3554 FFDE978 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3555 (0xFFDEF90) approx BBs exec'd 0 ====
+
+	0xFFDEF90:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDEF94:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFDEF98:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFFDEF9C:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3555 FFDEF90 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3556 (0x2547BE74) approx BBs exec'd 0 ====
+
+	0x2547BE74:  811C0178  lwz r8,376(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x178, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547BE78:  3968FFFF  addi r11,r8,-1
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0x2547BE7C:  917C0178  stw r11,376(r28)
+	   9: GETL       	R11, t6
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x178, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BE80:  4BFFFEE0  b 0x2547BD60
+	  14: JMPo       	$0x2547BD60  ($4)
+
+
+
+. 3556 2547BE74 16
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 4B FF FE E0
+==== BB 3557 (0x2547BD60) approx BBs exec'd 0 ====
+
+	0x2547BD60:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BD64:  7F9A9840  cmpl cr7,r26,r19
+	   4: GETL       	R26, t2
+	   5: GETL       	R19, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BD68:  419CFFD8  bc 12,28,0x2547BD40
+	   9: Js28o       	$0x2547BD40
+
+
+
+. 3557 2547BD60 12
+. 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+==== BB 3558 (0xEE87878) approx BBs exec'd 0 ====
+
+	0xEE87878:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE8787C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE87880:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE87884:  4BFFA7B5  bl 0xEE82038
+	  14: MOVL       	$0xEE87888, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xEE82038  ($4)
+
+
+
+. 3558 EE87878 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF A7 B5
+==== BB 3559 (0xEE82038) approx BBs exec'd 0 ====
+
+	0xEE82038:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE8203C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE82040:  429F0005  bcl 20,31,0xEE82044
+	   9: MOVL       	$0xEE82044, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xEE82044:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE82048:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xEE8204C:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xEE82050:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xEE82054:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xEE82058:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xEE8205C:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xEE82060:  88090000  lbz r0,0(r9)
+	  45: GETL       	R9, t34
+	  46: LDB       	(t34), t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0xEE82064:  2F800000  cmpi cr7,r0,0
+	  49: GETL       	R0, t38
+	  50: CMP0L       	t38, t40  (-rSo)
+	  51: ICRFL       	t40, $0x7, CR
+	  52: INCEIPL       	$4
+
+	0xEE82068:  409E0050  bc 4,30,0xEE820B8
+	  53: Jc30o       	$0xEE820B8
+
+
+
+. 3559 EE82038 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+==== BB 3560 (0xEE8206C) approx BBs exec'd 0 ====
+
+	0xEE8206C:  801E8004  lwz r0,-32764(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8004, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE82070:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xEE82074:  419E0010  bc 12,30,0xEE82084
+	   9: Js30o       	$0xEE82084
+
+
+
+. 3560 EE8206C 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+==== BB 3561 (0xEE82078) approx BBs exec'd 0 ====
+
+	0xEE82078:  813E8008  lwz r9,-32760(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8008, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xEE8207C:  80690000  lwz r3,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xEE82080:  48016361  bl 0xEE983E0
+	   9: MOVL       	$0xEE82084, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xEE983E0  ($4)
+
+
+
+. 3561 EE82078 12
+. 81 3E 80 08 80 69 00 00 48 01 63 61
+==== BB 3562 (0xEE983E0) approx BBs exec'd 0 ====
+
+	0xEE983E0:  396000F0  li r11,240
+	   0: MOVL       	$0xF0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xEE983E4:  4BFFFDF4  b 0xEE981D8
+	   3: JMPo       	$0xEE981D8  ($4)
+
+
+
+. 3562 EE983E0 8
+. 39 60 00 F0 4B FF FD F4
+==== BB 3563 (0xEE981D8) approx BBs exec'd 0 ====
+
+	0xEE981D8:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xEE981DC:  7D6C5A14  add r11,r12,r11
+	   4: GETL       	R12, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xEE981E0:  3980AF5C  li r12,-20644
+	   9: MOVL       	$0xFFFFAF5C, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xEE981E4:  3D8C2548  addis r12,r12,9544
+	  12: MOVL       	$0x2547AF5C, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0xEE981E8:  7D8903A6  mtctr r12
+	  15: GETL       	R12, t10
+	  16: PUTL       	t10, CTR
+	  17: INCEIPL       	$4
+
+	0xEE981EC:  39808328  li r12,-31960
+	  18: MOVL       	$0xFFFF8328, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0xEE981F0:  3D8C2547  addis r12,r12,9543
+	  21: MOVL       	$0x25468328, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0xEE981F4:  4E800420  bctr
+	  24: GETL       	CTR, t16
+	  25: JMPo       	t16  ($4)
+
+
+
+. 3563 EE981D8 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 83 28 3D 8C 25 47 4E 80 04 20
+==== BB 3564 (0xEE82084) approx BBs exec'd 0 ====
+
+	0xEE82084:  83FE800C  lwz r31,-32756(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF800C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xEE82088:  48000014  b 0xEE8209C
+	   5: JMPo       	$0xEE8209C  ($4)
+
+
+
+. 3564 EE82084 8
+. 83 FE 80 0C 48 00 00 14
+==== BB 3565 (0xEE8209C) approx BBs exec'd 0 ====
+
+	0xEE8209C:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xEE820A0:  81690000  lwz r11,0(r9)
+	   4: GETL       	R9, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R11
+	   7: INCEIPL       	$4
+
+	0xEE820A4:  2F8B0000  cmpi cr7,r11,0
+	   8: GETL       	R11, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xEE820A8:  409EFFE4  bc 4,30,0xEE8208C
+	  12: Jc30o       	$0xEE8208C
+
+
+
+. 3565 EE8209C 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+==== BB 3566 (0xEE820AC) approx BBs exec'd 0 ====
+
+	0xEE820AC:  813E8000  lwz r9,-32768(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8000, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xEE820B0:  38000001  li r0,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xEE820B4:  98090000  stb r0,0(r9)
+	   8: GETL       	R0, t6
+	   9: GETL       	R9, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xEE820B8:  80010014  lwz r0,20(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xEE820BC:  83C10008  lwz r30,8(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R30
+	  21: INCEIPL       	$4
+
+	0xEE820C0:  83E1000C  lwz r31,12(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R31
+	  26: INCEIPL       	$4
+
+	0xEE820C4:  7C0803A6  mtlr r0
+	  27: GETL       	R0, t22
+	  28: PUTL       	t22, LR
+	  29: INCEIPL       	$4
+
+	0xEE820C8:  38210010  addi r1,r1,16
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x10, t24
+	  32: PUTL       	t24, R1
+	  33: INCEIPL       	$4
+
+	0xEE820CC:  4E800020  blr
+	  34: GETL       	LR, t26
+	  35: JMPo-r       	t26  ($4)
+
+
+
+. 3566 EE820AC 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3567 (0xEE87888) approx BBs exec'd 0 ====
+
+	0xEE87888:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE8788C:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE87890:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xEE87894:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3567 EE87888 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3568 (0xEE3D124) approx BBs exec'd 0 ====
+
+	0xEE3D124:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE3D128:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE3D12C:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xEE3D130:  4BFF7161  bl 0xEE34290
+	  14: MOVL       	$0xEE3D134, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xEE34290  ($4)
+
+
+
+. 3568 EE3D124 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF 71 61
+==== BB 3569 (0xEE34290) approx BBs exec'd 0 ====
+
+	0xEE34290:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xEE34294:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xEE34298:  429F0005  bcl 20,31,0xEE3429C
+	   9: MOVL       	$0xEE3429C, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xEE3429C:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xEE342A0:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xEE342A4:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xEE342A8:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xEE342AC:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xEE342B0:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xEE342B4:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xEE342B8:  88090000  lbz r0,0(r9)
+	  45: GETL       	R9, t34
+	  46: LDB       	(t34), t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0xEE342BC:  2F800000  cmpi cr7,r0,0
+	  49: GETL       	R0, t38
+	  50: CMP0L       	t38, t40  (-rSo)
+	  51: ICRFL       	t40, $0x7, CR
+	  52: INCEIPL       	$4
+
+	0xEE342C0:  409E0050  bc 4,30,0xEE34310
+	  53: Jc30o       	$0xEE34310
+
+
+
+. 3569 EE34290 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+==== BB 3570 (0xEE342C4) approx BBs exec'd 0 ====
+
+	0xEE342C4:  801E8004  lwz r0,-32764(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8004, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE342C8:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xEE342CC:  419E0010  bc 12,30,0xEE342DC
+	   9: Js30o       	$0xEE342DC
+
+
+
+. 3570 EE342C4 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+==== BB 3571 (0xEE342D0) approx BBs exec'd 0 ====
+
+	0xEE342D0:  813E8008  lwz r9,-32760(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8008, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xEE342D4:  80690000  lwz r3,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xEE342D8:  4801B805  bl 0xEE4FADC
+	   9: MOVL       	$0xEE342DC, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xEE4FADC  ($4)
+
+
+
+. 3571 EE342D0 12
+. 81 3E 80 08 80 69 00 00 48 01 B8 05
+==== BB 3572 (0xEE4FADC) approx BBs exec'd 0 ====
+
+	0xEE4FADC:  39600070  li r11,112
+	   0: MOVL       	$0x70, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xEE4FAE0:  4BFFFEF4  b 0xEE4F9D4
+	   3: JMPo       	$0xEE4F9D4  ($4)
+
+
+
+. 3572 EE4FADC 8
+. 39 60 00 70 4B FF FE F4
+==== BB 3573 (0xEE342DC) approx BBs exec'd 0 ====
+
+	0xEE342DC:  83FE800C  lwz r31,-32756(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF800C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xEE342E0:  48000014  b 0xEE342F4
+	   5: JMPo       	$0xEE342F4  ($4)
+
+
+
+. 3573 EE342DC 8
+. 83 FE 80 0C 48 00 00 14
+==== BB 3574 (0xEE342F4) approx BBs exec'd 0 ====
+
+	0xEE342F4:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xEE342F8:  81690000  lwz r11,0(r9)
+	   4: GETL       	R9, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R11
+	   7: INCEIPL       	$4
+
+	0xEE342FC:  2F8B0000  cmpi cr7,r11,0
+	   8: GETL       	R11, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xEE34300:  409EFFE4  bc 4,30,0xEE342E4
+	  12: Jc30o       	$0xEE342E4
+
+
+
+. 3574 EE342F4 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+==== BB 3575 (0xEE34304) approx BBs exec'd 0 ====
+
+	0xEE34304:  813E8000  lwz r9,-32768(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8000, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xEE34308:  38000001  li r0,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xEE3430C:  98090000  stb r0,0(r9)
+	   8: GETL       	R0, t6
+	   9: GETL       	R9, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xEE34310:  80010014  lwz r0,20(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xEE34314:  83C10008  lwz r30,8(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R30
+	  21: INCEIPL       	$4
+
+	0xEE34318:  83E1000C  lwz r31,12(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R31
+	  26: INCEIPL       	$4
+
+	0xEE3431C:  7C0803A6  mtlr r0
+	  27: GETL       	R0, t22
+	  28: PUTL       	t22, LR
+	  29: INCEIPL       	$4
+
+	0xEE34320:  38210010  addi r1,r1,16
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x10, t24
+	  32: PUTL       	t24, R1
+	  33: INCEIPL       	$4
+
+	0xEE34324:  4E800020  blr
+	  34: GETL       	LR, t26
+	  35: JMPo-r       	t26  ($4)
+
+
+
+. 3575 EE34304 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3576 (0xEE3D134) approx BBs exec'd 0 ====
+
+	0xEE3D134:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xEE3D138:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xEE3D13C:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xEE3D140:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3576 EE3D134 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3577 (0xFFB1EC0) approx BBs exec'd 0 ====
+
+	0xFFB1EC0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFB1EC4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFB1EC8:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFB1ECC:  4BFFEED5  bl 0xFFB0DA0
+	  14: MOVL       	$0xFFB1ED0, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFB0DA0  ($4)
+
+
+
+. 3577 FFB1EC0 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF EE D5
+==== BB 3578 (0xFFB0DA0) approx BBs exec'd 0 ====
+
+	0xFFB0DA0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFB0DA4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFB0DA8:  429F0005  bcl 20,31,0xFFB0DAC
+	   9: MOVL       	$0xFFB0DAC, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFB0DAC:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFB0DB0:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFB0DB4:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFB0DB8:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFFB0DBC:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFFB0DC0:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xFFB0DC4:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFFB0DC8:  88090000  lbz r0,0(r9)
+	  45: GETL       	R9, t34
+	  46: LDB       	(t34), t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0xFFB0DCC:  2F800000  cmpi cr7,r0,0
+	  49: GETL       	R0, t38
+	  50: CMP0L       	t38, t40  (-rSo)
+	  51: ICRFL       	t40, $0x7, CR
+	  52: INCEIPL       	$4
+
+	0xFFB0DD0:  409E0050  bc 4,30,0xFFB0E20
+	  53: Jc30o       	$0xFFB0E20
+
+
+
+. 3578 FFB0DA0 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+==== BB 3579 (0xFFB0DD4) approx BBs exec'd 0 ====
+
+	0xFFB0DD4:  801E8004  lwz r0,-32764(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8004, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFB0DD8:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFFB0DDC:  419E0010  bc 12,30,0xFFB0DEC
+	   9: Js30o       	$0xFFB0DEC
+
+
+
+. 3579 FFB0DD4 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+==== BB 3580 (0xFFB0DE0) approx BBs exec'd 0 ====
+
+	0xFFB0DE0:  813E8008  lwz r9,-32760(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8008, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFFB0DE4:  80690000  lwz r3,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFFB0DE8:  4801235D  bl 0xFFC3144
+	   9: MOVL       	$0xFFB0DEC, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xFFC3144  ($4)
+
+
+
+. 3580 FFB0DE0 12
+. 81 3E 80 08 80 69 00 00 48 01 23 5D
+==== BB 3581 (0xFFC3144) approx BBs exec'd 0 ====
+
+	0xFFC3144:  39600028  li r11,40
+	   0: MOVL       	$0x28, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFFC3148:  4BFFFF84  b 0xFFC30CC
+	   3: JMPo       	$0xFFC30CC  ($4)
+
+
+
+. 3581 FFC3144 8
+. 39 60 00 28 4B FF FF 84
+==== BB 3582 (0xFFC30CC) approx BBs exec'd 0 ====
+
+	0xFFC30CC:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xFFC30D0:  7D6C5A14  add r11,r12,r11
+	   4: GETL       	R12, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFFC30D4:  3980AF5C  li r12,-20644
+	   9: MOVL       	$0xFFFFAF5C, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xFFC30D8:  3D8C2548  addis r12,r12,9544
+	  12: MOVL       	$0x2547AF5C, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0xFFC30DC:  7D8903A6  mtctr r12
+	  15: GETL       	R12, t10
+	  16: PUTL       	t10, CTR
+	  17: INCEIPL       	$4
+
+	0xFFC30E0:  39808B90  li r12,-29808
+	  18: MOVL       	$0xFFFF8B90, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0xFFC30E4:  3D8C2547  addis r12,r12,9543
+	  21: MOVL       	$0x25468B90, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0xFFC30E8:  4E800420  bctr
+	  24: GETL       	CTR, t16
+	  25: JMPo       	t16  ($4)
+
+
+
+. 3582 FFC30CC 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 8B 90 3D 8C 25 47 4E 80 04 20
+==== BB 3583 (0xFFB0DEC) approx BBs exec'd 0 ====
+
+	0xFFB0DEC:  83FE800C  lwz r31,-32756(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF800C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFFB0DF0:  48000014  b 0xFFB0E04
+	   5: JMPo       	$0xFFB0E04  ($4)
+
+
+
+. 3583 FFB0DEC 8
+. 83 FE 80 0C 48 00 00 14
+==== BB 3584 (0xFFB0E04) approx BBs exec'd 0 ====
+
+	0xFFB0E04:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFFB0E08:  81690000  lwz r11,0(r9)
+	   4: GETL       	R9, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R11
+	   7: INCEIPL       	$4
+
+	0xFFB0E0C:  2F8B0000  cmpi cr7,r11,0
+	   8: GETL       	R11, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFFB0E10:  409EFFE4  bc 4,30,0xFFB0DF4
+	  12: Jc30o       	$0xFFB0DF4
+
+
+
+. 3584 FFB0E04 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+==== BB 3585 (0xFFB0DF4) approx BBs exec'd 0 ====
+
+	0xFFB0DF4:  38090004  addi r0,r9,4
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0xFFB0DF8:  7D6803A6  mtlr r11
+	   4: GETL       	R11, t2
+	   5: PUTL       	t2, LR
+	   6: INCEIPL       	$4
+
+	0xFFB0DFC:  901F0000  stw r0,0(r31)
+	   7: GETL       	R0, t4
+	   8: GETL       	R31, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0xFFB0E00:  4E800021  blrl
+	  11: GETL       	LR, t8
+	  12: MOVL       	$0xFFB0E04, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-r       	t8  ($4)
+
+
+
+. 3585 FFB0DF4 16
+. 38 09 00 04 7D 68 03 A6 90 1F 00 00 4E 80 00 21
+==== BB 3586 fini(0xFFB16B0) approx BBs exec'd 0 ====
+
+	0xFFB16B0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFB16B4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFB16B8:  48011951  bl 0xFFC3008
+	   9: MOVL       	$0xFFB16BC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC3008  ($4)
+
+
+
+. 3586 FFB16B0 12
+. 94 21 FF F0 7C 08 02 A6 48 01 19 51
+==== BB 3587 (0xFFB16BC) approx BBs exec'd 0 ====
+
+	0xFFB16BC:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFFB16C0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFFB16C4:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFFB16C8:  90010014  stw r0,20(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFFB16CC:  813E002C  lwz r9,44(r30)
+	  18: GETL       	R30, t14
+	  19: ADDL       	$0x2C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R9
+	  22: INCEIPL       	$4
+
+	0xFFB16D0:  809E0038  lwz r4,56(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x38, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R4
+	  27: INCEIPL       	$4
+
+	0xFFB16D4:  83E9000C  lwz r31,12(r9)
+	  28: GETL       	R9, t22
+	  29: ADDL       	$0xC, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R31
+	  32: INCEIPL       	$4
+
+	0xFFB16D8:  2F9F0000  cmpi cr7,r31,0
+	  33: GETL       	R31, t26
+	  34: CMP0L       	t26, t28  (-rSo)
+	  35: ICRFL       	t28, $0x7, CR
+	  36: INCEIPL       	$4
+
+	0xFFB16DC:  7FE3FB78  or r3,r31,r31
+	  37: GETL       	R31, t30
+	  38: PUTL       	t30, R3
+	  39: INCEIPL       	$4
+
+	0xFFB16E0:  419E0014  bc 12,30,0xFFB16F4
+	  40: Js30o       	$0xFFB16F4
+
+
+
+. 3587 FFB16BC 40
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 2C 80 9E 00 38 83 E9 00 0C 2F 9F 00 00 7F E3 FB 78 41 9E 00 14
+==== BB 3588 (0xFFB16F4) approx BBs exec'd 0 ====
+
+	0xFFB16F4:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFFB16F8:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFFB16FC:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFFB1700:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFFB1704:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFFB1708:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 3588 FFB16F4 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3589 (0xFFB0E14) approx BBs exec'd 0 ====
+
+	0xFFB0E14:  813E8000  lwz r9,-32768(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8000, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFFB0E18:  38000001  li r0,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFFB0E1C:  98090000  stb r0,0(r9)
+	   8: GETL       	R0, t6
+	   9: GETL       	R9, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFFB0E20:  80010014  lwz r0,20(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xFFB0E24:  83C10008  lwz r30,8(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R30
+	  21: INCEIPL       	$4
+
+	0xFFB0E28:  83E1000C  lwz r31,12(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R31
+	  26: INCEIPL       	$4
+
+	0xFFB0E2C:  7C0803A6  mtlr r0
+	  27: GETL       	R0, t22
+	  28: PUTL       	t22, LR
+	  29: INCEIPL       	$4
+
+	0xFFB0E30:  38210010  addi r1,r1,16
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x10, t24
+	  32: PUTL       	t24, R1
+	  33: INCEIPL       	$4
+
+	0xFFB0E34:  4E800020  blr
+	  34: GETL       	LR, t26
+	  35: JMPo-r       	t26  ($4)
+
+
+
+. 3589 FFB0E14 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3590 (0xFFB1ED0) approx BBs exec'd 0 ====
+
+	0xFFB1ED0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFB1ED4:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFB1ED8:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFFB1EDC:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+
+. 3590 FFB1ED0 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3591 (0x2547BE14) approx BBs exec'd 0 ====
+
+	0x2547BE14:  809C0090  lwz r4,144(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x90, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547BE18:  80690004  lwz r3,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547BE1C:  83640004  lwz r27,4(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0x2547BE20:  801C0000  lwz r0,0(r28)
+	  15: GETL       	R28, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R0
+	  18: INCEIPL       	$4
+
+	0x2547BE24:  576CF0BE  rlwinm r12,r27,30,2,31
+	  19: GETL       	R27, t16
+	  20: SHRL       	$0x2, t16
+	  21: PUTL       	t16, R12
+	  22: INCEIPL       	$4
+
+	0x2547BE28:  2F8C0000  cmpi cr7,r12,0
+	  23: GETL       	R12, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x7, CR
+	  26: INCEIPL       	$4
+
+	0x2547BE2C:  7F601A14  add r27,r0,r3
+	  27: GETL       	R0, t22
+	  28: GETL       	R3, t24
+	  29: ADDL       	t22, t24
+	  30: PUTL       	t24, R27
+	  31: INCEIPL       	$4
+
+	0x2547BE30:  3BACFFFF  addi r29,r12,-1
+	  32: GETL       	R12, t26
+	  33: ADDL       	$0xFFFFFFFF, t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0x2547BE34:  419E0020  bc 12,30,0x2547BE54
+	  36: Js30o       	$0x2547BE54
+
+
+
+. 3591 2547BE14 36
+. 80 9C 00 90 80 69 00 04 83 64 00 04 80 1C 00 00 57 6C F0 BE 2F 8C 00 00 7F 60 1A 14 3B AC FF FF 41 9E 00 20
+==== BB 3592 (0x2547BE38) approx BBs exec'd 0 ====
+
+	0x2547BE38:  57A6103A  rlwinm r6,r29,2,0,29
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x2547BE3C:  7D26D82E  lwzx r9,r6,r27
+	   4: GETL       	R27, t2
+	   5: GETL       	R6, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2547BE40:  7D2903A6  mtctr r9
+	  10: GETL       	R9, t8
+	  11: PUTL       	t8, CTR
+	  12: INCEIPL       	$4
+
+	0x2547BE44:  4E800421  bctrl
+	  13: MOVL       	$0x2547BE48, t10
+	  14: PUTL       	t10, LR
+	  15: GETL       	CTR, t12
+	  16: JMPo-c       	t12  ($4)
+
+
+
+. 3592 2547BE38 16
+. 57 A6 10 3A 7D 26 D8 2E 7D 29 03 A6 4E 80 04 21
+==== BB 3593 __libc_fini(0xFE7B5BC) approx BBs exec'd 0 ====
+
+	0xFE7B5BC:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE7B5C0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE7B5C4:  4812C88D  bl 0xFFA7E50
+	   9: MOVL       	$0xFE7B5C8, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3593 FE7B5BC 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 8D
+==== BB 3594 (0xFE7B5C8) approx BBs exec'd 0 ====
+
+	0xFE7B5C8:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE7B5CC:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE7B5D0:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE7B5D4:  90010014  stw r0,20(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE7B5D8:  813E0014  lwz r9,20(r30)
+	  18: GETL       	R30, t14
+	  19: ADDL       	$0x14, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R9
+	  22: INCEIPL       	$4
+
+	0xFE7B5DC:  80090004  lwz r0,4(r9)
+	  23: GETL       	R9, t18
+	  24: ADDL       	$0x4, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0xFE7B5E0:  3BE90004  addi r31,r9,4
+	  28: GETL       	R9, t22
+	  29: ADDL       	$0x4, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0xFE7B5E4:  2F800000  cmpi cr7,r0,0
+	  32: GETL       	R0, t24
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0xFE7B5E8:  419E0018  bc 12,30,0xFE7B600
+	  36: Js30o       	$0xFE7B600
+
+
+
+. 3594 FE7B5C8 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 14 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+==== BB 3595 (0xFE7B600) approx BBs exec'd 0 ====
+
+	0xFE7B600:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE7B604:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFE7B608:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFE7B60C:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFE7B610:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFE7B614:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+
+. 3595 FE7B600 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+==== BB 3596 (0x2547BE48) approx BBs exec'd 0 ====
+
+	0x2547BE48:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547BE4C:  3BBDFFFF  addi r29,r29,-1
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x2547BE50:  409EFFE8  bc 4,30,0x2547BE38
+	   8: Jc30o       	$0x2547BE38
+
+
+
+. 3596 2547BE48 12
+. 2F 9D 00 00 3B BD FF FF 40 9E FF E8
+==== BB 3597 (0x2547BD54) approx BBs exec'd 0 ====
+
+	0x2547BD54:  811C0178  lwz r8,376(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x178, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547BD58:  3968FFFF  addi r11,r8,-1
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0x2547BD5C:  917C0178  stw r11,376(r28)
+	   9: GETL       	R11, t6
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x178, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BD60:  3B5A0001  addi r26,r26,1
+	  14: GETL       	R26, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R26
+	  17: INCEIPL       	$4
+
+	0x2547BD64:  7F9A9840  cmpl cr7,r26,r19
+	  18: GETL       	R26, t12
+	  19: GETL       	R19, t14
+	  20: CMPUL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x2547BD68:  419CFFD8  bc 12,28,0x2547BD40
+	  23: Js28o       	$0x2547BD40
+
+
+
+. 3597 2547BD54 24
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+==== BB 3598 (0x2547BD78) approx BBs exec'd 0 ====
+
+	0x2547BD78:  81EE0000  lwz r15,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R15
+	   3: INCEIPL       	$4
+
+	0x2547BD7C:  71E90080  andi. r9,r15,0x80
+	   4: GETL       	R15, t4
+	   5: ANDL       	$0x80, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547BD80:  408201AC  bc 4,2,0x2547BF2C
+	  10: Jc02o       	$0x2547BF2C
+
+
+
+. 3598 2547BD78 12
+. 81 EE 00 00 71 E9 00 80 40 82 01 AC
+==== BB 3599 (0x2547BD84) approx BBs exec'd 0 ====
+
+	0x2547BD84:  81410000  lwz r10,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x2547BD88:  81CA0004  lwz r14,4(r10)
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R14
+	   8: INCEIPL       	$4
+
+	0x2547BD8C:  818AFFB4  lwz r12,-76(r10)
+	   9: GETL       	R10, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x2547BD90:  7DC803A6  mtlr r14
+	  14: GETL       	R14, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x2547BD94:  81EAFFBC  lwz r15,-68(r10)
+	  17: GETL       	R10, t14
+	  18: ADDL       	$0xFFFFFFBC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R15
+	  21: INCEIPL       	$4
+
+	0x2547BD98:  81CAFFB8  lwz r14,-72(r10)
+	  22: GETL       	R10, t18
+	  23: ADDL       	$0xFFFFFFB8, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R14
+	  26: INCEIPL       	$4
+
+	0x2547BD9C:  7D808120  mtcrf 0x8,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x2547BDA0:  820AFFC0  lwz r16,-64(r10)
+	  30: GETL       	R10, t24
+	  31: ADDL       	$0xFFFFFFC0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R16
+	  34: INCEIPL       	$4
+
+	0x2547BDA4:  822AFFC4  lwz r17,-60(r10)
+	  35: GETL       	R10, t28
+	  36: ADDL       	$0xFFFFFFC4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R17
+	  39: INCEIPL       	$4
+
+	0x2547BDA8:  824AFFC8  lwz r18,-56(r10)
+	  40: GETL       	R10, t32
+	  41: ADDL       	$0xFFFFFFC8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R18
+	  44: INCEIPL       	$4
+
+	0x2547BDAC:  826AFFCC  lwz r19,-52(r10)
+	  45: GETL       	R10, t36
+	  46: ADDL       	$0xFFFFFFCC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R19
+	  49: INCEIPL       	$4
+
+	0x2547BDB0:  828AFFD0  lwz r20,-48(r10)
+	  50: GETL       	R10, t40
+	  51: ADDL       	$0xFFFFFFD0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R20
+	  54: INCEIPL       	$4
+
+	0x2547BDB4:  82AAFFD4  lwz r21,-44(r10)
+	  55: GETL       	R10, t44
+	  56: ADDL       	$0xFFFFFFD4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R21
+	  59: INCEIPL       	$4
+
+	0x2547BDB8:  82CAFFD8  lwz r22,-40(r10)
+	  60: GETL       	R10, t48
+	  61: ADDL       	$0xFFFFFFD8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R22
+	  64: INCEIPL       	$4
+
+	0x2547BDBC:  82EAFFDC  lwz r23,-36(r10)
+	  65: GETL       	R10, t52
+	  66: ADDL       	$0xFFFFFFDC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R23
+	  69: INCEIPL       	$4
+
+	0x2547BDC0:  830AFFE0  lwz r24,-32(r10)
+	  70: GETL       	R10, t56
+	  71: ADDL       	$0xFFFFFFE0, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R24
+	  74: INCEIPL       	$4
+
+	0x2547BDC4:  832AFFE4  lwz r25,-28(r10)
+	  75: GETL       	R10, t60
+	  76: ADDL       	$0xFFFFFFE4, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R25
+	  79: INCEIPL       	$4
+
+	0x2547BDC8:  834AFFE8  lwz r26,-24(r10)
+	  80: GETL       	R10, t64
+	  81: ADDL       	$0xFFFFFFE8, t64
+	  82: LDL       	(t64), t66
+	  83: PUTL       	t66, R26
+	  84: INCEIPL       	$4
+
+	0x2547BDCC:  836AFFEC  lwz r27,-20(r10)
+	  85: GETL       	R10, t68
+	  86: ADDL       	$0xFFFFFFEC, t68
+	  87: LDL       	(t68), t70
+	  88: PUTL       	t70, R27
+	  89: INCEIPL       	$4
+
+	0x2547BDD0:  838AFFF0  lwz r28,-16(r10)
+	  90: GETL       	R10, t72
+	  91: ADDL       	$0xFFFFFFF0, t72
+	  92: LDL       	(t72), t74
+	  93: PUTL       	t74, R28
+	  94: INCEIPL       	$4
+
+	0x2547BDD4:  83AAFFF4  lwz r29,-12(r10)
+	  95: GETL       	R10, t76
+	  96: ADDL       	$0xFFFFFFF4, t76
+	  97: LDL       	(t76), t78
+	  98: PUTL       	t78, R29
+	  99: INCEIPL       	$4
+
+	0x2547BDD8:  83CAFFF8  lwz r30,-8(r10)
+	 100: GETL       	R10, t80
+	 101: ADDL       	$0xFFFFFFF8, t80
+	 102: LDL       	(t80), t82
+	 103: PUTL       	t82, R30
+	 104: INCEIPL       	$4
+
+	0x2547BDDC:  83EAFFFC  lwz r31,-4(r10)
+	 105: GETL       	R10, t84
+	 106: ADDL       	$0xFFFFFFFC, t84
+	 107: LDL       	(t84), t86
+	 108: PUTL       	t86, R31
+	 109: INCEIPL       	$4
+
+	0x2547BDE0:  7D415378  or r1,r10,r10
+	 110: GETL       	R10, t88
+	 111: PUTL       	t88, R1
+	 112: INCEIPL       	$4
+
+	0x2547BDE4:  4E800020  blr
+	 113: GETL       	LR, t90
+	 114: JMPo-r       	t90  ($4)
+
+
+
+. 3599 2547BD84 100
+. 81 41 00 00 81 CA 00 04 81 8A FF B4 7D C8 03 A6 81 EA FF BC 81 CA FF B8 7D 80 81 20 82 0A FF C0 82 2A FF C4 82 4A FF C8 82 6A FF CC 82 8A FF D0 82 AA FF D4 82 CA FF D8 82 EA FF DC 83 0A FF E0 83 2A FF E4 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+==== BB 3600 (0xFE93C54) approx BBs exec'd 0 ====
+
+	0xFE93C54:  807F0000  lwz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFE93C58:  80030000  lwz r0,0(r3)
+	   4: GETL       	R3, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFE93C5C:  2F000000  cmpi cr6,r0,0
+	   8: GETL       	R0, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFE93C60:  901F0000  stw r0,0(r31)
+	  12: GETL       	R0, t12
+	  13: GETL       	R31, t14
+	  14: STL       	t12, (t14)
+	  15: INCEIPL       	$4
+
+	0xFE93C64:  419AFF64  bc 12,26,0xFE93BC8
+	  16: Js26o       	$0xFE93BC8
+
+
+
+. 3600 FE93C54 20
+. 80 7F 00 00 80 03 00 00 2F 00 00 00 90 1F 00 00 41 9A FF 64
+==== BB 3601 (0xFE93BC8) approx BBs exec'd 0 ====
+
+	0xFE93BC8:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE93BCC:  419E00CC  bc 12,30,0xFE93C98
+	   4: Js30o       	$0xFE93C98
+
+
+
+. 3601 FE93BC8 8
+. 2F 80 00 00 41 9E 00 CC
+==== BB 3602 (0xFE93C98) approx BBs exec'd 0 ====
+
+	0xFE93C98:  83FE1BBC  lwz r31,7100(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BBC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFE93C9C:  83BE1C38  lwz r29,7224(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1C38, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFE93CA0:  7F9FE840  cmpl cr7,r31,r29
+	  10: GETL       	R31, t8
+	  11: GETL       	R29, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFE93CA4:  409C001C  bc 4,28,0xFE93CC0
+	  15: Jc28o       	$0xFE93CC0
+
+
+
+. 3602 FE93C98 16
+. 83 FE 1B BC 83 BE 1C 38 7F 9F E8 40 40 9C 00 1C
+==== BB 3603 (0xFE93CA8) approx BBs exec'd 0 ====
+
+	0xFE93CA8:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFE93CAC:  3BFF0004  addi r31,r31,4
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFE93CB0:  7D4903A6  mtctr r10
+	   8: GETL       	R10, t6
+	   9: PUTL       	t6, CTR
+	  10: INCEIPL       	$4
+
+	0xFE93CB4:  4E800421  bctrl
+	  11: MOVL       	$0xFE93CB8, t8
+	  12: PUTL       	t8, LR
+	  13: GETL       	CTR, t10
+	  14: JMPo-c       	t10  ($4)
+
+
+
+. 3603 FE93CA8 16
+. 81 5F 00 00 3B FF 00 04 7D 49 03 A6 4E 80 04 21
+==== BB 3604 _IO_cleanup(0xFECCB0C) approx BBs exec'd 0 ====
+
+	0xFECCB0C:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFECCB10:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFECCB14:  480DB33D  bl 0xFFA7E50
+	   9: MOVL       	$0xFECCB18, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3604 FECCB0C 12
+. 94 21 FF E0 7C 08 02 A6 48 0D B3 3D
+==== BB 3605 (0xFECCB18) approx BBs exec'd 0 ====
+
+	0xFECCB18:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECCB1C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECCB20:  38600000  li r3,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFECCB24:  93810010  stw r28,16(r1)
+	  11: GETL       	R28, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x10, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFECCB28:  93E1001C  stw r31,28(r1)
+	  16: GETL       	R31, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x1C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFECCB2C:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECCB30:  90010024  stw r0,36(r1)
+	  26: GETL       	R0, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x24, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFECCB34:  4BFFF98D  bl 0xFECC4C0
+	  31: MOVL       	$0xFECCB38, t24
+	  32: PUTL       	t24, LR
+	  33: JMPo-c       	$0xFECC4C0  ($4)
+
+
+
+. 3605 FECCB18 32
+. 93 C1 00 18 7F C8 02 A6 38 60 00 00 93 81 00 10 93 E1 00 1C 93 A1 00 14 90 01 00 24 4B FF F9 8D
+==== BB 3606 _IO_flush_all_lockp(0xFECC4C0) approx BBs exec'd 0 ====
+
+	0xFECC4C0:  9421FFB0  stwu r1,-80(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFB0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFECC4C4:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFECC4C8:  480DB989  bl 0xFFA7E50
+	   9: MOVL       	$0xFECC4CC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3606 FECC4C0 12
+. 94 21 FF B0 7C 88 02 A6 48 0D B9 89
+==== BB 3607 (0xFECC4CC) approx BBs exec'd 0 ====
+
+	0xFECC4CC:  93C10048  stw r30,72(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECC4D0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFECC4D4:  92C10028  stw r22,40(r1)
+	   8: GETL       	R22, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x28, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFECC4D8:  90810054  stw r4,84(r1)
+	  13: GETL       	R4, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x54, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFECC4DC:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0xFECC4E0:  92E1002C  stw r23,44(r1)
+	  21: GETL       	R23, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x2C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFECC4E4:  3AE00000  li r23,0
+	  26: MOVL       	$0x0, t20
+	  27: PUTL       	t20, R23
+	  28: INCEIPL       	$4
+
+	0xFECC4E8:  82DE1B48  lwz r22,6984(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1B48, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R22
+	  33: INCEIPL       	$4
+
+	0xFECC4EC:  93E1004C  stw r31,76(r1)
+	  34: GETL       	R31, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x4C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFECC4F0:  7C7F1B78  or r31,r3,r3
+	  39: GETL       	R3, t30
+	  40: PUTL       	t30, R31
+	  41: INCEIPL       	$4
+
+	0xFECC4F4:  813600B8  lwz r9,184(r22)
+	  42: GETL       	R22, t32
+	  43: ADDL       	$0xB8, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R9
+	  46: INCEIPL       	$4
+
+	0xFECC4F8:  93010030  stw r24,48(r1)
+	  47: GETL       	R24, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x30, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFECC4FC:  3169FFFF  addic r11,r9,-1
+	  52: GETL       	R9, t40
+	  53: ADCL       	$0xFFFFFFFF, t40  (-wCa)
+	  54: PUTL       	t40, R11
+	  55: INCEIPL       	$4
+
+	0xFECC500:  7C0B4910  subfe r0,r11,r9
+	  56: GETL       	R11, t42
+	  57: GETL       	R9, t44
+	  58: SBBL       	t42, t44  (-rCa-wCa)
+	  59: PUTL       	t44, R0
+	  60: INCEIPL       	$4
+
+	0xFECC504:  93210034  stw r25,52(r1)
+	  61: GETL       	R25, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x34, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0xFECC508:  2D800000  cmpi cr3,r0,0
+	  66: GETL       	R0, t50
+	  67: CMP0L       	t50, t52  (-rSo)
+	  68: ICRFL       	t52, $0x3, CR
+	  69: INCEIPL       	$4
+
+	0xFECC50C:  93410038  stw r26,56(r1)
+	  70: GETL       	R26, t54
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x38, t56
+	  73: STL       	t54, (t56)
+	  74: INCEIPL       	$4
+
+	0xFECC510:  9361003C  stw r27,60(r1)
+	  75: GETL       	R27, t58
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x3C, t60
+	  78: STL       	t58, (t60)
+	  79: INCEIPL       	$4
+
+	0xFECC514:  93810040  stw r28,64(r1)
+	  80: GETL       	R28, t62
+	  81: GETL       	R1, t64
+	  82: ADDL       	$0x40, t64
+	  83: STL       	t62, (t64)
+	  84: INCEIPL       	$4
+
+	0xFECC518:  93A10044  stw r29,68(r1)
+	  85: GETL       	R29, t66
+	  86: GETL       	R1, t68
+	  87: ADDL       	$0x44, t68
+	  88: STL       	t66, (t68)
+	  89: INCEIPL       	$4
+
+	0xFECC51C:  91810024  stw r12,36(r1)
+	  90: GETL       	R12, t70
+	  91: GETL       	R1, t72
+	  92: ADDL       	$0x24, t72
+	  93: STL       	t70, (t72)
+	  94: INCEIPL       	$4
+
+	0xFECC520:  408E0248  bc 4,14,0xFECC768
+	  95: Jc14o       	$0xFECC768
+
+
+
+. 3607 FECC4CC 88
+. 93 C1 00 48 7F C8 02 A6 92 C1 00 28 90 81 00 54 7D 80 00 26 92 E1 00 2C 3A E0 00 00 82 DE 1B 48 93 E1 00 4C 7C 7F 1B 78 81 36 00 B8 93 01 00 30 31 69 FF FF 7C 0B 49 10 93 21 00 34 2D 80 00 00 93 41 00 38 93 61 00 3C 93 81 00 40 93 A1 00 44 91 81 00 24 40 8E 02 48
+==== BB 3608 (0xFECC524) approx BBs exec'd 0 ====
+
+	0xFECC524:  807E05E4  lwz r3,1508(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5E4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECC528:  92E10014  stw r23,20(r1)
+	   5: GETL       	R23, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFECC52C:  90610010  stw r3,16(r1)
+	  10: GETL       	R3, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x10, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFECC530:  2E1F0000  cmpi cr4,r31,0
+	  15: GETL       	R31, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x4, CR
+	  18: INCEIPL       	$4
+
+	0xFECC534:  41920050  bc 12,18,0xFECC584
+	  19: Js18o       	$0xFECC584
+
+
+
+. 3608 FECC524 20
+. 80 7E 05 E4 92 E1 00 14 90 61 00 10 2E 1F 00 00 41 92 00 50
+==== BB 3609 (0xFECC584) approx BBs exec'd 0 ====
+
+	0xFECC584:  831E1BEC  lwz r24,7148(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BEC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFECC588:  833E05EC  lwz r25,1516(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x5EC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0xFECC58C:  83F80000  lwz r31,0(r24)
+	  10: GETL       	R24, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R31
+	  13: INCEIPL       	$4
+
+	0xFECC590:  83990000  lwz r28,0(r25)
+	  14: GETL       	R25, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R28
+	  17: INCEIPL       	$4
+
+	0xFECC594:  2C9F0000  cmpi cr1,r31,0
+	  18: GETL       	R31, t16
+	  19: CMP0L       	t16, t18  (-rSo)
+	  20: ICRFL       	t18, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0xFECC598:  41860128  bc 12,6,0xFECC6C0
+	  22: Js06o       	$0xFECC6C0
+
+
+
+. 3609 FECC584 24
+. 83 1E 1B EC 83 3E 05 EC 83 F8 00 00 83 99 00 00 2C 9F 00 00 41 86 01 28
+==== BB 3610 (0xFECC59C) approx BBs exec'd 0 ====
+
+	0xFECC59C:  835E05E8  lwz r26,1512(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFECC5A0:  3B600000  li r27,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0xFECC5A4:  93FA0000  stw r31,0(r26)
+	   8: GETL       	R31, t6
+	   9: GETL       	R26, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFECC5A8:  4192005C  bc 12,18,0xFECC604
+	  12: Js18o       	$0xFECC604
+
+
+
+. 3610 FECC59C 16
+. 83 5E 05 E8 3B 60 00 00 93 FA 00 00 41 92 00 5C
+==== BB 3611 (0xFECC604) approx BBs exec'd 0 ====
+
+	0xFECC604:  807F0060  lwz r3,96(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFECC608:  2F030000  cmpi cr6,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFECC60C:  40990144  bc 4,25,0xFECC750
+	   9: Jc25o       	$0xFECC750
+
+
+
+. 3611 FECC604 12
+. 80 7F 00 60 2F 03 00 00 40 99 01 44
+==== BB 3612 (0xFECC750) approx BBs exec'd 0 ====
+
+	0xFECC750:  817F0014  lwz r11,20(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFECC754:  813F0010  lwz r9,16(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFECC758:  7C8B4840  cmpl cr1,r11,r9
+	  10: GETL       	R11, t8
+	  11: GETL       	R9, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFECC75C:  40A5FEB4  bc 5,5,0xFECC610
+	  15: Jc05o       	$0xFECC610
+
+
+
+. 3612 FECC750 16
+. 81 7F 00 14 81 3F 00 10 7C 8B 48 40 40 A5 FE B4
+==== BB 3613 (0xFECC610) approx BBs exec'd 0 ====
+
+	0xFECC610:  895F0046  lbz r10,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFECC614:  2C0A0000  cmpi cr0,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFECC618:  40820044  bc 4,2,0xFECC65C
+	   9: Jc02o       	$0xFECC65C
+
+
+
+. 3613 FECC610 12
+. 89 5F 00 46 2C 0A 00 00 40 82 00 44
+==== BB 3614 (0xFECC61C) approx BBs exec'd 0 ====
+
+	0xFECC61C:  40990040  bc 4,25,0xFECC65C
+	   0: Jc25o       	$0xFECC65C
+
+
+
+. 3614 FECC61C 4
+. 40 99 00 40
+==== BB 3615 (0xFECC65C) approx BBs exec'd 0 ====
+
+	0xFECC65C:  41920044  bc 12,18,0xFECC6A0
+	   0: Js18o       	$0xFECC6A0
+
+
+
+. 3615 FECC65C 4
+. 41 92 00 44
+==== BB 3616 (0xFECC6A0) approx BBs exec'd 0 ====
+
+	0xFECC6A0:  80190000  lwz r0,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFECC6A4:  937A0000  stw r27,0(r26)
+	   4: GETL       	R27, t4
+	   5: GETL       	R26, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFECC6A8:  7F9C0000  cmp cr7,r28,r0
+	   8: GETL       	R28, t8
+	   9: GETL       	R0, t10
+	  10: CMPL       	t8, t10, t12  (-rSo)
+	  11: ICRFL       	t12, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFECC6AC:  419E009C  bc 12,30,0xFECC748
+	  13: Js30o       	$0xFECC748
+
+
+
+. 3616 FECC6A0 16
+. 80 19 00 00 93 7A 00 00 7F 9C 00 00 41 9E 00 9C
+==== BB 3617 (0xFECC748) approx BBs exec'd 0 ====
+
+	0xFECC748:  83FF0034  lwz r31,52(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFECC74C:  4BFFFF6C  b 0xFECC6B8
+	   5: JMPo       	$0xFECC6B8  ($4)
+
+
+
+. 3617 FECC748 8
+. 83 FF 00 34 4B FF FF 6C
+==== BB 3618 (0xFECC6B8) approx BBs exec'd 0 ====
+
+	0xFECC6B8:  2C1F0000  cmpi cr0,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFECC6BC:  4082FEE8  bc 4,2,0xFECC5A4
+	   4: Jc02o       	$0xFECC5A4
+
+
+
+. 3618 FECC6B8 8
+. 2C 1F 00 00 40 82 FE E8
+==== BB 3619 (0xFECC5A4) approx BBs exec'd 0 ====
+
+	0xFECC5A4:  93FA0000  stw r31,0(r26)
+	   0: GETL       	R31, t0
+	   1: GETL       	R26, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFECC5A8:  4192005C  bc 12,18,0xFECC604
+	   4: Js18o       	$0xFECC604
+
+
+
+. 3619 FECC5A4 8
+. 93 FA 00 00 41 92 00 5C
+==== BB 3620 (0xFECC6C0) approx BBs exec'd 0 ====
+
+	0xFECC6C0:  41920038  bc 12,18,0xFECC6F8
+	   0: Js18o       	$0xFECC6F8
+
+
+
+. 3620 FECC6C0 4
+. 41 92 00 38
+==== BB 3621 (0xFECC6F8) approx BBs exec'd 0 ====
+
+	0xFECC6F8:  408E0088  bc 4,14,0xFECC780
+	   0: Jc14o       	$0xFECC780
+
+
+
+. 3621 FECC6F8 4
+. 40 8E 00 88
+==== BB 3622 (0xFECC6FC) approx BBs exec'd 0 ====
+
+	0xFECC6FC:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFECC700:  81810024  lwz r12,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0xFECC704:  82E10054  lwz r23,84(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x54, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFECC708:  82C10028  lwz r22,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R22
+	  17: INCEIPL       	$4
+
+	0xFECC70C:  7D818120  mtcrf 0x18,r12
+	  18: GETL       	R12, t14
+	  19: ICRFL       	t14, $0x3, CR
+	  20: ICRFL       	t14, $0x4, CR
+	  21: INCEIPL       	$4
+
+	0xFECC710:  7EE803A6  mtlr r23
+	  22: GETL       	R23, t16
+	  23: PUTL       	t16, LR
+	  24: INCEIPL       	$4
+
+	0xFECC714:  83010030  lwz r24,48(r1)
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0x30, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R24
+	  29: INCEIPL       	$4
+
+	0xFECC718:  82E1002C  lwz r23,44(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x2C, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R23
+	  34: INCEIPL       	$4
+
+	0xFECC71C:  83210034  lwz r25,52(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x34, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R25
+	  39: INCEIPL       	$4
+
+	0xFECC720:  83410038  lwz r26,56(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x38, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R26
+	  44: INCEIPL       	$4
+
+	0xFECC724:  8361003C  lwz r27,60(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x3C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R27
+	  49: INCEIPL       	$4
+
+	0xFECC728:  83810040  lwz r28,64(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x40, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R28
+	  54: INCEIPL       	$4
+
+	0xFECC72C:  83A10044  lwz r29,68(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x44, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R29
+	  59: INCEIPL       	$4
+
+	0xFECC730:  83C10048  lwz r30,72(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x48, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R30
+	  64: INCEIPL       	$4
+
+	0xFECC734:  83E1004C  lwz r31,76(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x4C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R31
+	  69: INCEIPL       	$4
+
+	0xFECC738:  38210050  addi r1,r1,80
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x50, t54
+	  72: PUTL       	t54, R1
+	  73: INCEIPL       	$4
+
+	0xFECC73C:  4E800020  blr
+	  74: GETL       	LR, t56
+	  75: JMPo-r       	t56  ($4)
+
+
+
+. 3622 FECC6FC 68
+. 7E E3 BB 78 81 81 00 24 82 E1 00 54 82 C1 00 28 7D 81 81 20 7E E8 03 A6 83 01 00 30 82 E1 00 2C 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+==== BB 3623 (0xFECCB38) approx BBs exec'd 0 ====
+
+	0xFECCB38:  813E1BEC  lwz r9,7148(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BEC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFECCB3C:  7C7C1B78  or r28,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFECCB40:  83E90000  lwz r31,0(r9)
+	   8: GETL       	R9, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R31
+	  11: INCEIPL       	$4
+
+	0xFECCB44:  2F9F0000  cmpi cr7,r31,0
+	  12: GETL       	R31, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFECCB48:  419E0080  bc 12,30,0xFECCBC8
+	  16: Js30o       	$0xFECCBC8
+
+
+
+. 3623 FECCB38 20
+. 81 3E 1B EC 7C 7C 1B 78 83 E9 00 00 2F 9F 00 00 41 9E 00 80
+==== BB 3624 (0xFECCB4C) approx BBs exec'd 0 ====
+
+	0xFECCB4C:  3BA0FFFF  li r29,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFECCB50:  48000014  b 0xFECCB64
+	   3: JMPo       	$0xFECCB64  ($4)
+
+
+
+. 3624 FECCB4C 8
+. 3B A0 FF FF 48 00 00 14
+==== BB 3625 (0xFECCB64) approx BBs exec'd 0 ====
+
+	0xFECCB64:  801F0000  lwz r0,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFECCB68:  70090002  andi. r9,r0,0x2
+	   4: GETL       	R0, t4
+	   5: ANDL       	$0x2, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFECCB6C:  5403EFFE  rlwinm r3,r0,29,31,31
+	  10: GETL       	R0, t8
+	  11: ROLL       	$0x1D, t8
+	  12: ANDL       	$0x1, t8
+	  13: PUTL       	t8, R3
+	  14: INCEIPL       	$4
+
+	0xFECCB70:  2F830000  cmpi cr7,r3,0
+	  15: GETL       	R3, t10
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFECCB74:  40A2FFE0  bc 5,2,0xFECCB54
+	  19: Jc02o       	$0xFECCB54
+
+
+
+. 3625 FECCB64 20
+. 80 1F 00 00 70 09 00 02 54 03 EF FE 2F 83 00 00 40 A2 FF E0
+==== BB 3626 (0xFECCB54) approx BBs exec'd 0 ====
+
+	0xFECCB54:  93BF0060  stw r29,96(r31)
+	   0: GETL       	R29, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFECCB58:  83FF0034  lwz r31,52(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFECCB5C:  2F1F0000  cmpi cr6,r31,0
+	  10: GETL       	R31, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFECCB60:  419A0068  bc 12,26,0xFECCBC8
+	  14: Js26o       	$0xFECCBC8
+
+
+
+. 3626 FECCB54 16
+. 93 BF 00 60 83 FF 00 34 2F 1F 00 00 41 9A 00 68
+==== BB 3627 (0xFECCB78) approx BBs exec'd 0 ====
+
+	0xFECCB78:  70091000  andi. r9,r0,0x1000
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x1000, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFECCB7C:  419E0008  bc 12,30,0xFECCB84
+	   6: Js30o       	$0xFECCB84
+
+
+
+. 3627 FECCB78 8
+. 70 09 10 00 41 9E 00 08
+==== BB 3628 (0xFECCB80) approx BBs exec'd 0 ====
+
+	0xFECCB80:  41A2FFD4  bc 13,2,0xFECCB54
+	   0: Js02o       	$0xFECCB54
+
+
+
+. 3628 FECCB80 4
+. 41 A2 FF D4
+==== BB 3629 (0xFECCBC8) approx BBs exec'd 0 ====
+
+	0xFECCBC8:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFECCBCC:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFECCBD0:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFECCBD4:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFECCBD8:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFECCBDC:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0xFECCBE0:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFECCBE4:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFECCBE8:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+
+. 3629 FECCBC8 36
+. 80 81 00 24 7F 83 E3 78 83 A1 00 14 83 81 00 10 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+==== BB 3630 (0xFE93CB8) approx BBs exec'd 0 ====
+
+	0xFE93CB8:  7F9FE840  cmpl cr7,r31,r29
+	   0: GETL       	R31, t0
+	   1: GETL       	R29, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE93CBC:  419CFFEC  bc 12,28,0xFE93CA8
+	   5: Js28o       	$0xFE93CA8
+
+
+
+. 3630 FE93CB8 8
+. 7F 9F E8 40 41 9C FF EC
+==== BB 3631 (0xFE93CC0) approx BBs exec'd 0 ====
+
+	0xFE93CC0:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE93CC4:  48062141  bl 0xFEF5E04
+	   3: MOVL       	$0xFE93CC8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFEF5E04  ($4)
+
+
+
+. 3631 FE93CC0 8
+. 7F 83 E3 78 48 06 21 41
+==== BB 3632 __GI__exit(0xFEF5E04) approx BBs exec'd 0 ====
+
+	0xFEF5E04:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEF5E08:  480B2049  bl 0xFFA7E50
+	   6: MOVL       	$0xFEF5E0C, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFFA7E50  ($4)
+
+
+
+. 3632 FEF5E04 8
+. 94 21 FF E0 48 0B 20 49
+==== BB 3633 (0xFEF5E0C) approx BBs exec'd 0 ====
+
+	0xFEF5E0C:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEF5E10:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEF5E14:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEF5E18:  93E1001C  stw r31,28(r1)
+	  13: GETL       	R31, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x1C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEF5E1C:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0xFEF5E20:  813E1C4C  lwz r9,7244(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x1C4C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0xFEF5E24:  7FA91214  add r29,r9,r2
+	  26: GETL       	R9, t20
+	  27: GETL       	R2, t22
+	  28: ADDL       	t20, t22
+	  29: PUTL       	t22, R29
+	  30: INCEIPL       	$4
+
+	0xFEF5E28:  380000EA  li r0,234
+	  31: MOVL       	$0xEA, t24
+	  32: PUTL       	t24, R0
+	  33: INCEIPL       	$4
+
+	0xFEF5E2C:  7FE3FB78  or r3,r31,r31
+	  34: GETL       	R31, t26
+	  35: PUTL       	t26, R3
+	  36: INCEIPL       	$4
+
+	0xFEF5E30:  44000002  sc
+	  37: JMPo-sys       	$0xFEF5E34  ($4)
+
+
+
+. 3633 FEF5E0C 40
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 93 E1 00 1C 7C 7F 1B 78 81 3E 1C 4C 7F A9 12 14 38 00 00 EA 7F E3 FB 78 44 00 00 02
diff --git a/VEX/orig_ppc32/loadsafp.orig b/VEX/orig_ppc32/loadsafp.orig
new file mode 100644
index 0000000..fea268b
--- /dev/null
+++ b/VEX/orig_ppc32/loadsafp.orig
@@ -0,0 +1,22354 @@
+==3281== Nulgrind, a binary JIT-compiler for Linux.
+==3281== Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.
+==3281== Using valgrind-2.2.0-ppc, a program supervision framework for Linux.
+==3281== Copyright (C) 2000-2004, and GNU GPL'd, by Julian Seward et al.
+==3281== For more details, rerun with: -v
+==3281== 
+. 0 254804D4 20
+. 7C 23 0B 78 38 80 00 00 38 21 FF F0 90 81 00 00 4B FF 15 81
+
+. 0 25471A64 80
+. 7C 08 02 A6 39 60 00 4B 94 21 FD 50 7D 69 03 A6 92 E1 02 8C 90 01 02 B4 7C 77 1B 78 93 01 02 90 38 00 00 00 93 21 02 94 39 21 00 30 93 41 02 98 93 61 02 9C 93 81 02 A0 93 A1 02 A4 93 C1 02 A8 93 E1 02 AC 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 25471AA8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 25471AB4 4
+. 48 00 00 09
+
+. 0 25471ABC 12
+. 7C E8 02 A6 3B 01 00 10 48 02 55 3D
+
+. 0 25497000 4
+. 4E 80 00 21
+
+. 0 25471AC8 64
+. 7D 48 02 A6 81 87 00 00 81 0A 00 00 55 86 30 2E 7C C4 36 70 7C A8 38 50 39 01 00 30 7C E5 22 14 90 E1 00 10 80 6A 00 00 7C 07 18 2E 7D 27 1A 14 91 21 00 18 7D 2A 4B 78 2F 80 00 00 41 9E 00 68
+
+. 0 25471B08 64
+. 3F E0 6F FF 3F 60 70 00 3F 40 6F FF 3F 20 6F FF 3F A0 6F FF 3F 80 6F FF 7C 0B 03 78 63 E6 FF FF 63 65 00 21 63 44 FD FF 63 23 FE 34 63 BD FE FF 63 9C FF 40 2C 0B 00 21 55 60 10 3A 40 81 00 18
+
+. 0 25471B5C 16
+. 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+
+. 0 25471B3C 12
+. 2C 0B 00 21 55 60 10 3A 40 81 00 18
+
+. 0 25471B48 12
+. 7C 0B 30 50 28 80 00 0F 41 85 02 D8
+
+. 0 25471B54 24
+. 7C 0B 28 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+
+. 0 25471E28 24
+. 55 79 08 3C 7F 2C 0E 70 7D 89 60 F8 20 0C 00 31 2B 09 00 02 40 B9 FD 1C
+
+. 0 25471E40 12
+. 7F 4B 20 50 2B 9A 00 0B 41 9D 00 10
+
+. 0 25471E4C 12
+. 7C 0B 18 50 54 00 10 3A 4B FF FD 08
+
+. 0 25471B6C 12
+. 81 78 00 00 2F 0B 00 00 41 9A 00 B0
+
+. 0 25471B78 12
+. 81 28 00 10 2F 89 00 00 41 9E 00 10
+
+. 0 25471B84 24
+. 81 49 00 04 7C 6A 5A 14 90 69 00 04 81 28 00 0C 2C 09 00 00 41 82 00 10
+
+. 0 25471B9C 24
+. 80 A9 00 04 7C 85 5A 14 90 89 00 04 81 28 00 14 2C 89 00 00 41 86 00 10
+
+. 0 25471BB4 24
+. 80 E9 00 04 7C C7 5A 14 90 C9 00 04 81 28 00 18 2F 09 00 00 41 9A 00 10
+
+. 0 25471BCC 24
+. 83 A9 00 04 7F 9D 5A 14 93 89 00 04 81 28 00 1C 2F 89 00 00 41 9E 00 10
+
+. 0 25471BE4 24
+. 80 09 00 04 7F E0 5A 14 93 E9 00 04 81 28 00 5C 2C 09 00 00 41 82 00 10
+
+. 0 25471BFC 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 C4 2C 89 00 00 41 86 00 10
+
+. 0 25471C14 32
+. 83 49 00 04 7D 1A 5A 14 91 09 00 04 80 E1 00 10 2F 07 00 00 39 60 FF FF 91 61 02 3C 40 9A 00 10
+
+. 0 25471C40 20
+. 7F 03 C3 78 38 80 00 00 38 A0 00 00 3B 60 00 00 48 00 EB E1
+
+. 0 25480830 16
+. 7C C8 02 A6 94 21 FF D0 7D 80 00 26 48 01 67 C5
+
+. 0 25480840 68
+. 93 21 00 14 7C 99 23 78 90 C1 00 34 80 03 00 7C 93 A1 00 24 7C 7D 1B 78 2F 80 00 00 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 93 41 00 18 93 61 00 1C 93 81 00 20 93 E1 00 2C 91 81 00 08 41 9E 02 94
+
+. 0 25480884 56
+. 82 E3 00 28 3D 40 AA AA 61 48 AA AB 80 83 00 2C 80 F7 00 04 83 E4 00 04 7D 67 40 16 55 7B E8 FE 28 1B 20 00 57 63 08 3C 38 03 00 12 7D 20 1A 14 3A E9 C0 00 40 81 02 9C
+
+. 0 25480B54 8
+. 7C 17 03 78 4B FF FD 64
+
+. 0 254808BC 72
+. 56 E8 10 3A 2E 19 00 00 7F 48 FA 14 3C 60 7D 69 3C FA 00 01 57 4B 04 3E 38 87 80 00 3C C0 4E 80 54 89 84 3E 65 60 81 6B 65 2C 3D 6B 60 7C 03 A6 60 D8 04 20 91 9F 00 00 90 1F 00 04 93 9F 00 08 93 1F 00 0C 41 92 01 78
+
+. 0 25480A78 32
+. 81 7E 04 F0 31 59 FF FF 7C AA C9 10 31 4B FF FF 7C 8A 59 10 7C AA 20 39 39 40 00 04 41 82 00 10
+
+. 0 25480AA4 4
+. 41 92 00 B8
+
+. 0 25480B5C 8
+. 39 60 00 06 4B FF FF 4C
+
+. 0 25480AAC 8
+. 39 20 00 00 48 00 00 14
+
+. 0 25480AC4 8
+. 7E 09 58 40 41 90 FF EC
+
+. 0 25480AB4 24
+. 55 3A 10 3A 7E FA FA 14 7C 00 B8 6C 7D 29 52 14 7E 09 58 40 41 90 FF EC
+
+. 0 25480ACC 28
+. 55 68 10 3A 7C E8 FA 14 39 27 FF FC 7C 00 48 6C 7C 00 04 AC 39 20 00 00 48 00 00 14
+
+. 0 25480AF8 8
+. 7F 89 58 40 41 9C FF EC
+
+. 0 25480AE8 12
+. 55 3B 10 3A 7C DB FA 14 7C 00 37 AC
+
+. 0 25480AF4 12
+. 7D 29 52 14 7F 89 58 40 41 9C FF EC
+
+. 0 25480B00 12
+. 7C 68 FA 14 3B E3 FF FC 7C 00 FF AC
+
+. 0 25480B0C 72
+. 7C 00 04 AC 4C 00 01 2C 7F 23 CB 78 81 01 00 08 83 21 00 34 82 E1 00 0C 7D 00 81 20 7F 28 03 A6 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25471C54 44
+. 38 E0 00 00 81 61 00 4C 39 40 00 01 91 41 02 64 2C 0B 00 00 93 61 02 60 93 61 02 70 93 61 02 58 93 61 02 6C 90 E1 02 54 41 82 00 18
+
+. 0 25471C80 28
+. 80 AB 00 04 80 81 00 50 90 A1 02 50 80 E4 00 04 90 E1 02 54 2C 83 00 00 41 86 00 10
+
+. 0 25471CA8 48
+. 3B 61 00 08 3B 20 00 02 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+
+. 0 25471CD8 32
+. 81 2A 00 04 3C A0 AA AA 60 A4 AA AB 7D 4B 20 16 7D 0B 43 78 55 40 E8 FE 7C 00 48 40 40 81 00 08
+
+. 0 25471CF8 20
+. 7D 20 4B 78 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+
+. 0 25471D0C 28
+. 81 8B 00 08 80 CB 00 00 39 6B 00 0C 7C EC EA 14 7F 8B F8 40 7C E6 E9 2E 41 9C FF E8
+
+. 0 25471D28 8
+. 7C 9F E0 40 40 84 00 8C
+
+. 0 25471D30 32
+. 81 1F 00 04 81 7F 00 00 55 0A 06 3E 55 00 E1 36 2F 0A 00 16 7C C0 D2 14 7D 0B EA 14 41 9A 00 C4
+
+. 0 25471D50 8
+. 2F 8A 00 00 41 9E 00 58
+
+. 0 25471D58 12
+. 89 66 00 0C 55 69 E1 3F 41 82 01 4C
+
+. 0 25471D64 16
+. A0 06 00 0E 2F 00 00 00 39 20 00 00 41 9A 00 08
+
+. 0 25471D74 28
+. 81 21 00 10 80 A6 00 04 7D 29 2A 14 2F 8A 00 01 80 7F 00 08 7D 29 1A 14 41 9E 00 78
+
+. 0 25471D90 8
+. 2C 0A 00 14 41 82 00 70
+
+. 0 25471E04 12
+. 3B FF 00 0C 91 28 00 00 4B FF FF A4
+
+. 0 25471DB0 8
+. 7C 9F E0 40 41 84 FF 7C
+
+. 0 25471D98 20
+. 7F 03 C3 78 7F E4 FB 78 38 A0 00 00 7C C7 33 78 48 00 EF 01
+
+. 0 25480CA8 16
+. 2B 8A 00 4D 7C 08 02 A6 94 21 FF D0 48 01 63 4D
+
+. 0 25480CB8 68
+. 93 41 00 18 7D 3A 4B 78 93 61 00 1C 7C DB 33 78 93 81 00 20 7C 9C 23 78 93 A1 00 24 7C BD 2B 78 93 C1 00 28 7F C8 02 A6 93 E1 00 2C 7D 5F 53 78 90 01 00 34 90 61 00 08 90 E1 00 10 91 01 00 0C 41 9D 01 40
+
+. 0 25480CFC 24
+. 81 7E 04 50 55 44 10 3A 7C 64 58 2E 7D 23 5A 14 7D 29 03 A6 4E 80 04 20
+
+. 0 25480FD8 24
+. 80 A1 00 0C 7F A5 D0 50 57 BF 30 32 7F E9 36 70 7F 89 E8 00 41 9E 02 54
+
+. 0 25481240 16
+. 55 3A 01 BA 67 40 48 00 90 05 00 00 4B FF FB B4
+
+. 0 25480E00 16
+. 7C 00 28 6C 7C 00 04 AC 83 81 00 0C 7C 00 E7 AC
+
+. 0 25480E10 40
+. 80 61 00 34 83 41 00 18 83 61 00 1C 7C 68 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25471DAC 12
+. 3B FF 00 0C 7C 9F E0 40 41 84 FF 7C
+
+. 0 25471DB8 12
+. 37 39 FF FF 3B 7B 00 0C 40 80 FE F0
+
+. 0 25471CB0 40
+. 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+
+. 0 25471CFC 16
+. 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+
+. 0 25471DC4 12
+. 7E E3 BB 78 7F 04 C3 78 4B FF FB BD
+
+. 0 25471988 12
+. 94 21 FF E0 7D 28 02 A6 48 02 56 71
+
+. 0 25471994 68
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 91 21 00 24 38 A0 01 2C 80 04 00 00 83 BE 04 C8 81 84 00 08 38 84 00 20 90 1D 01 B8 91 9D 01 C0 93 61 00 0C 7C 7B 1B 78 93 81 00 10 38 7D 01 D8 3B 9D 01 B8 48 01 22 0D
+
+. 0 25483BE0 52
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 7B 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C 7F 1B 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+. 0 25483C14 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+. 0 25483C40 8
+. 73 A0 00 03 40 82 00 6C
+
+. 0 25483C48 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 00 71
+
+. 0 25483CC4 16
+. 54 A0 07 7E 7D 88 02 A6 2B 80 00 07 48 01 33 31
+
+. 0 25483CD4 20
+. 94 21 FF F0 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 41 9D 00 28
+
+. 0 25483CE8 24
+. 81 7E 04 90 54 00 10 3A 7C CB 00 2E 7D 26 5A 14 7D 29 03 A6 4E 80 04 20
+
+. 0 25483D94 20
+. 81 24 00 00 38 63 FF E8 38 84 FF EC 38 A5 00 05 4B FF FF 98
+
+. 0 25483D3C 32
+. 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25483D0C 80
+. 80 04 00 00 91 23 00 00 81 24 00 04 90 03 00 04 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25483D5C 16
+. 83 C1 00 08 38 21 00 10 91 23 00 00 4E 80 00 20
+
+. 0 25483C58 24
+. 57 86 00 3A 57 85 07 BE 7F FF 32 14 7F BD 32 14 2C 85 00 00 41 86 00 1C
+
+. 0 25483C88 40
+. 81 01 00 24 7F 63 DB 78 83 81 00 10 83 61 00 0C 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 254719D8 8
+. 7F 83 E3 78 48 00 6B 91
+
+. 0 2547856C 16
+. 81 23 00 30 94 21 FF F0 2F 89 00 00 41 9E 00 24
+
+. 0 2547857C 40
+. 81 69 00 04 81 4B 00 00 38 8B 00 08 90 83 01 70 55 49 10 3A 91 43 01 6C 7C 04 4A 14 90 03 01 74 38 21 00 10 4E 80 00 20
+
+. 0 254719E0 64
+. 80 FE 04 FC 80 DE 05 00 39 40 FF FF 80 BE 04 A4 39 00 00 01 81 7E 04 C0 7F 63 DB 78 80 9E 00 24 91 5D 03 E4 93 9D 01 CC 91 1D 03 30 90 FD 03 58 90 DD 03 5C 90 BD 03 60 90 2B 00 00 48 00 D6 29
+
+. 0 2547F044 16
+. 7D 88 02 A6 94 21 FD E0 38 E3 00 04 48 01 7F B1
+
+. 0 2547F054 136
+. 93 C1 02 18 7F C8 02 A6 91 81 02 24 81 03 00 00 80 BE 04 C0 55 06 10 3A 81 3E 04 D4 7D 46 3A 14 90 65 00 00 80 0A 00 04 39 4A 00 04 80 7E 04 D0 2F 80 00 00 81 7E 04 B8 92 A1 01 F4 3A A0 00 00 92 C1 01 F8 3A C0 00 00 92 E1 01 FC 7C 97 23 78 93 01 02 00 3B 00 00 00 93 21 02 04 3B 20 00 00 93 E1 02 1C 3B E0 00 00 93 41 02 08 93 61 02 0C 93 81 02 10 93 A1 02 14 91 03 00 00 90 E9 00 00 91 4B 00 00 41 9E 00 10
+
+. 0 2547F0DC 12
+. 84 8A 00 04 2F 84 00 00 40 9E FF F8
+
+. 0 2547F0E8 24
+. 3B 6A 00 13 38 0A 00 04 57 6B 00 36 83 4B 00 00 2F 9A 00 10 41 9D 00 08
+
+. 0 2547F104 44
+. 7C 08 03 78 83 5E 04 F4 81 68 00 00 3B 80 00 00 81 3E 03 EC 2C 0B 00 00 83 BE 04 E0 90 09 00 00 93 A1 01 E0 93 9A 00 0C 41 82 00 4C
+
+. 0 2547F130 16
+. 7D 6A 5B 78 38 0A FF FD 28 80 00 14 41 85 00 24
+
+. 0 2547F140 24
+. 81 7E 03 F4 54 05 10 3A 7C 65 58 2E 7C E3 5A 14 7C E9 03 A6 4E 80 04 20
+
+. 0 2547F160 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+
+. 0 2547F134 12
+. 38 0A FF FD 28 80 00 14 41 85 00 24
+
+. 0 2547F3D4 12
+. 81 3E 04 F0 2F 09 00 00 41 BA FD 84
+
+. 0 2547F3E0 36
+. 80 68 00 04 90 69 00 00 81 48 00 00 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+
+. 0 2547F158 32
+. 83 A8 00 04 93 BA 00 3C 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+
+. 0 2547F4BC 16
+. 81 48 00 04 91 5A 00 04 81 48 00 00 4B FF FF 24
+
+. 0 2547F3EC 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+
+. 0 2547F418 16
+. 81 28 00 04 91 3A 00 1C 81 48 00 00 4B FF FF C8
+
+. 0 2547F4EC 28
+. 38 A0 00 01 82 C8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 30
+
+. 0 2547F4CC 28
+. 38 A0 00 01 82 A8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 50
+
+. 0 2547F494 36
+. 80 08 00 04 38 A0 00 01 81 48 00 00 90 01 01 E0 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 80
+
+. 0 2547F470 32
+. 38 A0 00 01 80 C8 00 04 7C AB 50 30 85 48 00 08 7F 39 32 78 7F FF 5B 78 2F 8A 00 00 40 9E FC A8
+
+. 0 2547F44C 32
+. 38 A0 00 01 81 88 00 04 7C AB 50 30 85 48 00 08 7F 18 62 78 7F FF 5B 78 2F 8A 00 00 40 9E FC CC
+
+. 0 2547F3A4 44
+. 83 68 00 04 38 A0 00 01 80 9E 04 B4 3B E0 FF FF 81 48 00 00 93 64 00 00 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 68
+
+. 0 2547F3D0 4
+. 4B FF FD A8
+
+. 0 2547F178 8
+. 38 61 00 50 48 00 36 4D
+
+. 0 254827C8 8
+. 38 00 00 7A 44 00 00 02
+
+. 0 254827D0 4
+. 4C A3 00 20
+
+. 0 2547F180 12
+. 2C 03 00 00 3B A1 00 10 40 82 01 DC
+
+. 0 2547F18C 32
+. 3B A1 00 D2 89 3D 00 00 7F A3 EB 78 38 E0 00 00 39 00 00 00 39 49 FF D0 2B 8A 00 09 41 9D 00 68
+
+. 0 2547F1AC 24
+. 89 63 00 01 39 29 FF D0 39 43 00 01 38 0B FF D0 2B 80 00 09 41 9D 00 20
+
+. 0 2547F1E0 24
+. 2C 8B 00 2E 54 FB 40 2E 7F 67 4B 78 39 08 00 01 38 6A 00 01 40 86 00 14
+
+. 0 2547F1F8 16
+. 89 2A 00 01 3B 89 FF D0 28 1C 00 09 40 81 FF A8
+
+. 0 2547F208 8
+. 2F 08 00 02 41 99 00 10
+
+. 0 2547F21C 16
+. 3C A0 00 02 60 AB 02 04 7F 87 58 40 40 9D 03 B4
+
+. 0 2547F22C 12
+. 90 FA 00 08 38 80 00 6E 48 00 3B CD
+
+. 0 25482E00 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+
+. 0 25482E64 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 25482E48 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+
+. 0 25482E5C 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 25482E70 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+
+. 0 2547F238 8
+. 2C 83 00 00 41 86 02 F4
+
+. 0 2547F530 20
+. 81 3A 00 08 3C 00 00 02 60 03 05 44 7C 89 18 40 41 A5 FD 24
+
+. 0 2547F264 12
+. 2F 1F FF FF 83 BE 04 B4 41 9A 00 30
+
+. 0 2547F29C 12
+. 83 1A 00 04 2F 98 00 00 40 9E 00 0C
+
+. 0 2547F2B0 12
+. 38 60 00 00 83 9E 05 00 48 00 11 19
+
+. 0 254803D0 16
+. 94 21 FF F0 90 61 00 08 38 00 00 2D 44 00 00 02
+
+. 0 254803E0 12
+. 80 C1 00 08 7C 88 02 A6 48 01 6C 19
+
+. 0 254803EC 32
+. 7C A8 02 A6 80 A5 05 04 7C 88 03 A6 90 65 00 00 7C 06 18 40 38 21 00 10 38 60 00 00 4C A1 00 20
+
+. 0 2547F2BC 36
+. 81 3E 05 04 80 9A 00 04 81 69 00 00 7F 24 00 D0 7C 8B E0 40 7F 64 5A 14 39 9B FF FF 7D 83 C8 38 40 84 02 D8
+
+. 0 2547F2E0 12
+. 80 7A 00 0C 2C 03 00 00 41 82 00 0C
+
+. 0 2547F2F4 8
+. 38 60 00 00 48 00 11 1D
+
+. 0 25480414 12
+. 94 21 FF E0 7C 88 02 A6 48 01 6B E5
+
+. 0 25480420 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 81 00 24 93 A1 00 14 7C 7D 1B 78 83 FE 05 04 38 60 00 00 80 1F 00 00 2F 80 00 00 41 9E 00 3C
+
+. 0 2548044C 16
+. 81 3E 04 9C 80 A9 00 00 2C 05 00 00 40 82 00 2C
+
+. 0 2548045C 8
+. 2F 1D 00 00 40 9A 00 3C
+
+. 0 25480464 32
+. 80 7F 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547F2FC 8
+. 7F 83 E0 00 41 9E 02 C4
+
+. 0 2547F304 12
+. 80 BD 00 00 2C 85 00 00 40 86 02 A0
+
+. 0 2547F310 20
+. 7E C3 B3 78 7E E8 03 A6 7E A4 AB 78 38 A1 01 E0 4E 80 00 21
+
+. 0 254721F0 12
+. 94 21 FF 30 7C 08 02 A6 48 02 4E 09
+
+. 0 254721FC 204
+. 93 C1 00 C8 7F C8 02 A6 92 01 00 90 90 01 00 D4 7D 60 00 26 91 C1 00 88 82 1E 04 B4 81 9E 04 B8 81 50 00 00 81 DE 04 C8 2F 8A 00 00 81 3E 00 20 91 61 00 84 81 1E 04 D8 81 6C 00 00 80 FE 04 B0 80 DE 00 1C 80 1E 04 A8 93 E1 00 CC 7C 3F 0B 78 91 E1 00 8C 92 41 00 98 3A 40 00 00 92 61 00 9C 3A 60 00 00 92 81 00 A0 3A 80 00 00 92 A1 00 A4 3A A0 00 00 92 C1 00 A8 7C B6 2B 78 92 E1 00 AC 7C 97 23 78 93 01 00 B0 7C 78 1B 78 93 21 00 B4 3B 20 00 00 91 2E 03 FC 92 21 00 94 93 41 00 B8 93 61 00 BC 93 81 00 C0 93 A1 00 C4 91 0E 01 B4 90 EE 04 2C 90 CE 03 F8 90 0E 04 04 91 7F 00 38 81 FE 04 F4 81 3E 00 7C 41 9E 00 08
+
+. 0 254722CC 16
+. 82 3E 00 40 91 2F 00 54 38 7F 00 38 48 00 D9 51
+
+. 0 2547FC28 24
+. 94 21 FF F0 39 40 00 00 81 63 00 00 81 2B 00 00 2F 89 00 00 41 9E 00 1C
+
+. 0 2547FC40 12
+. 88 09 00 00 2C 00 00 4C 41 82 00 1C
+
+. 0 2547FC4C 12
+. 85 2B 00 04 2F 89 00 00 40 9E FF EC
+
+. 0 2547FC64 12
+. 88 89 00 01 2C 84 00 44 40 86 FF E0
+
+. 0 2547FC70 12
+. 88 A9 00 02 2F 05 00 5F 40 9A FF D4
+
+. 0 2547FC7C 16
+. 38 CB 00 04 39 49 00 03 90 C3 00 00 4B FF FF D0
+
+. 0 2547FC58 12
+. 7D 43 53 78 38 21 00 10 4E 80 00 20
+
+. 0 254722DC 8
+. 7C 7D 1B 79 41 82 00 64
+
+. 0 254722E4 16
+. 88 1D 00 00 39 60 00 00 7C 09 03 79 40 A2 00 18
+
+. 0 25472308 8
+. 2C 89 00 3D 40 86 FF EC
+
+. 0 254722F8 16
+. 39 6B 00 01 7C 1D 58 AE 7C 09 03 79 41 82 00 0C
+
+. 0 25472310 8
+. 2D 80 00 3D 40 8E FF C0
+
+. 0 25472318 12
+. 38 0B FF FC 2A 00 00 10 41 B1 FF B4
+
+. 0 25472324 24
+. 80 BE 00 80 54 1A 10 3A 7C 9A 28 2E 7C 64 2A 14 7C 69 03 A6 4E 80 04 20
+
+. 0 25473248 16
+. 80 9E 00 C4 7F A3 EB 78 38 A0 00 0C 48 01 00 45
+
+. 0 25483298 32
+. 2B 85 00 0F 94 21 FF E0 93 81 00 10 7C 6B 1B 78 93 A1 00 14 93 C1 00 18 93 E1 00 1C 40 9D 00 BC
+
+. 0 25483370 8
+. 2C 05 00 00 41 82 00 24
+
+. 0 25483378 24
+. 89 8B 00 00 39 6B 00 01 8B A4 00 00 38 84 00 01 7C 7D 60 51 40 82 00 10
+
+. 0 25483390 8
+. 34 A5 FF FF 40 82 FF E4
+
+. 0 25483398 28
+. 38 60 00 00 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25473258 8
+. 2F 83 00 00 40 9E 04 28
+
+. 0 25473260 16
+. 81 3E 00 4C 38 1D 00 0D 90 09 00 00 4B FF F0 68
+
+. 0 254722D4 8
+. 38 7F 00 38 48 00 D9 51
+
+. 0 25473218 16
+. 80 9E 00 C0 7F A3 EB 78 38 A0 00 07 48 01 00 75
+
+. 0 2548339C 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25473228 8
+. 2C 83 00 00 40 86 04 8C
+
+. 0 254736B8 16
+. 80 9E 00 E0 7F A3 EB 78 38 A0 00 07 48 00 FB D5
+
+. 0 254736C8 8
+. 2C 03 00 00 40 82 00 88
+
+. 0 254736D0 12
+. 38 FD 00 08 90 F1 00 00 4B FF EB FC
+
+. 0 25472344 16
+. 83 70 00 00 93 3F 00 30 2D 9B 00 00 40 8E 12 8C
+
+. 0 25472354 32
+. 81 3E 00 34 30 14 FF FF 7F 40 A1 10 83 A9 00 00 31 3D FF FF 7F 89 E9 10 7F 80 D0 39 40 82 0B 58
+
+. 0 25472374 16
+. 83 56 00 00 80 BE 04 E0 7D 9A 28 00 41 8E 09 40
+
+. 0 25472384 28
+. 80 7E 00 84 38 A0 00 00 38 C0 00 00 38 E0 00 00 7C 64 1B 78 39 00 00 00 48 00 6D 59
+
+. 0 254790F4 12
+. 94 21 FF D0 7C 08 02 A6 48 01 DF 05
+
+. 0 25479100 80
+. 93 C1 00 28 7F C8 02 A6 92 C1 00 08 92 E1 00 0C 7C 77 1B 78 93 01 00 10 7C 83 23 78 93 21 00 14 7C B8 2B 78 93 41 00 18 7C F6 3B 78 93 61 00 1C 7C 9A 23 78 93 81 00 20 7D 19 43 78 93 E1 00 2C 7C DC 33 78 90 01 00 34 93 A1 00 24 48 00 9E 85
+
+. 0 25482FD0 56
+. 54 64 00 3A 3C E0 7F 7F 54 65 1E F8 81 04 00 00 39 20 FF FF 38 E7 7F 7F 7D 29 2C 30 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 40 00 F8 7C 08 48 39 7C 60 11 20 40 82 00 70
+
+. 0 25483074 20
+. 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+. 0 25479150 20
+. 38 80 00 01 7C 69 1B 78 38 63 02 4D 3B 69 00 01 48 01 E8 CD
+
+. 0 25497A2C 4
+. 4B FE 84 8C
+
+. 0 2547FEB8 20
+. 7C 63 21 D6 7C A8 02 A6 94 21 FF F0 90 A1 00 14 48 01 7B 5D
+
+. 0 25497A24 4
+. 4B FE 84 6C
+
+. 0 2547FE90 24
+. 7C 88 02 A6 94 21 FF F0 90 81 00 14 7C 64 1B 78 38 60 00 08 48 01 7B 79
+
+. 0 25497A1C 4
+. 4B FE 83 2C
+
+. 0 2547FD48 12
+. 94 21 FF E0 7C 08 02 A6 48 01 72 B1
+
+. 0 2547FD54 84
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 90 01 00 24 7C 6A 1B 78 93 81 00 10 38 A0 00 03 83 7E 04 18 38 C0 00 22 93 A1 00 14 38 E0 FF FF 81 7B 00 00 7F A3 00 D0 93 E1 00 1C 39 00 00 00 2F 8B 00 00 81 9E 05 00 38 60 00 00 7C 9C 23 78 83 FE 04 1C 40 9E 00 24
+
+. 0 2547FDA8 64
+. 81 3E 04 F4 91 9F 00 00 81 69 00 04 7C 8B 62 14 39 24 FF FF 7C 8B 00 D0 7D 2B 20 38 91 7B 00 00 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+
+. 0 2547FDE8 56
+. 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 1F 00 00 7C C8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+
+. 0 2547FEA8 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 2547FECC 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 25479164 12
+. 7C 7F 1B 79 38 60 00 00 41 82 01 74
+
+. 0 25479170 28
+. 3B BF 02 40 7F 44 D3 78 7F 65 DB 78 93 FF 00 14 93 BF 00 1C 38 7F 02 4C 48 00 AA 59
+
+. 0 25483C68 8
+. 2C 85 00 00 41 86 00 1C
+
+. 0 25483C70 24
+. 7C A9 03 A6 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+. 0 2547918C 84
+. 38 A0 00 01 90 BD 00 08 1D 59 00 18 80 DF 01 80 38 80 FF FF 80 BE 04 C8 53 06 F0 02 93 9F 01 68 90 DF 01 80 38 DF 01 AC 7D 6A 28 2E 90 7F 02 40 38 60 00 04 2F 8B 00 00 90 9F 02 2C 90 7F 01 BC 38 80 00 00 92 FF 00 04 93 3F 00 18 90 DF 01 C0 40 9E 01 3C
+
+. 0 254791E0 52
+. 7F EA 29 2E 2F 1C 00 00 7D 4A 2A 14 81 65 01 98 83 2A 00 04 81 85 01 9C 3B 19 00 01 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 BA 00 0C
+
+. 0 25479214 4
+. 48 00 01 64
+
+. 0 25479378 8
+. 7F FC FB 78 4B FF FE AC
+
+. 0 25479228 12
+. 2F 84 00 00 39 3C 01 58 41 9E 00 10
+
+. 0 25479240 12
+. 56 CC EF FE 7D 8B 20 39 41 82 00 10
+
+. 0 25479258 28
+. 54 9A 10 3A 7D 3A 31 2E 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+
+. 0 254792DC 60
+. 7F E3 FB 78 83 81 00 34 82 C1 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 254723A0 12
+. 82 8E 00 00 2D 94 00 00 41 8E 0C 90
+
+. 0 254723AC 84
+. 80 D6 00 00 93 14 01 4C 90 D4 01 50 B2 F4 01 54 56 EC 28 34 81 14 01 78 7C 8C C2 14 80 F4 01 7C 7E 04 C0 40 39 28 00 01 3B 27 00 01 39 00 00 00 38 E0 00 00 3B 80 FF FF 7F 0A C3 78 93 94 01 A0 91 34 01 78 93 34 01 7C 91 14 01 A4 90 F4 01 A8 40 91 00 C8
+
+. 0 25472400 24
+. 3F 60 64 74 83 BE 00 44 80 DE 00 48 63 63 E5 52 38 A0 00 01 48 00 00 28
+
+. 0 2547243C 12
+. 80 0A 00 00 2F 00 00 06 41 9A 07 98
+
+. 0 25472BDC 16
+. 81 6A 00 08 7F 4B C0 50 93 54 00 00 4B FF F8 48
+
+. 0 25472430 12
+. 39 4A 00 20 7F 84 50 40 40 9D 00 84
+
+. 0 25472448 8
+. 2B 80 00 06 40 BD FF CC
+
+. 0 25472418 8
+. 2C 00 00 02 41 82 08 54
+
+. 0 25472420 8
+. 28 80 00 02 41 85 07 E4
+
+. 0 25472C08 8
+. 2F 00 00 03 40 9A F8 24
+
+. 0 25472C10 36
+. 82 AE 01 C0 7F A7 EB 78 81 74 00 00 2F 15 00 00 81 2A 00 08 93 AE 01 D4 7C 0B 4A 14 90 1D 00 00 40 9A 04 44
+
+. 0 25473074 8
+. 3A A0 00 01 4B FF F3 B8
+
+. 0 25472428 8
+. 2E 00 00 01 41 92 0B 80
+
+. 0 25472FAC 36
+. 80 EA 00 1C 81 0A 00 08 7D 87 00 D0 80 F4 00 00 7D 1C 60 38 81 34 01 A0 7C 07 E2 14 7F 09 00 40 41 99 00 B0
+
+. 0 2547307C 12
+. 90 14 01 A0 81 0A 00 08 4B FF FF 4C
+
+. 0 25472FD0 24
+. 83 6A 00 14 7E E7 42 14 81 14 01 A4 7D 77 DA 14 7F 88 58 40 40 9C 00 08
+
+. 0 25472FE8 16
+. 91 74 01 A4 83 4A 00 18 73 49 00 01 41 A2 F4 3C
+
+. 0 25472FF8 12
+. 80 14 01 A8 7C 80 58 40 40 A4 F4 30
+
+. 0 25473004 8
+. 91 74 01 A8 4B FF F4 28
+
+. 0 25472C70 20
+. 82 D4 00 00 83 2A 00 08 7C 16 CA 14 90 14 00 08 4B FF F7 B0
+
+. 0 25472450 16
+. 3E E0 64 74 62 E9 E5 51 7F 80 48 00 41 9E 08 28
+
+. 0 25472C84 12
+. 80 EA 00 18 90 EE 04 00 4B FF F7 A4
+
+. 0 254724BC 20
+. 81 14 01 A4 80 F4 01 A8 81 34 02 18 2C 09 00 00 41 82 00 10
+
+. 0 254724DC 8
+. 2C 88 00 00 40 86 00 0C
+
+. 0 254724EC 8
+. 2E 07 00 00 40 92 00 0C
+
+. 0 254724FC 12
+. 80 AE 01 D4 2F 05 00 00 40 9A 00 1C
+
+. 0 25472520 8
+. 2E 13 00 00 40 92 01 A8
+
+. 0 25472528 12
+. 81 54 00 08 2C 0A 00 00 41 82 01 94
+
+. 0 25472534 16
+. 81 6A 00 00 39 14 00 20 2C 8B 00 00 41 86 00 64
+
+. 0 25472544 60
+. 3E C0 6F FF 3C C0 70 00 3C 00 6F FF 3F 40 6F FF 3E E0 6F FF 3E 60 6F FF 62 C7 FF FF 60 C6 00 21 60 05 FD FF 63 44 FE 34 62 E3 FE FF 62 7D FF 40 2F 0B 00 21 55 60 10 3A 40 99 00 18
+
+. 0 25472594 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 25472574 12
+. 2F 0B 00 21 55 60 10 3A 40 99 00 18
+
+. 0 25472580 12
+. 7F 2B 38 50 2B 99 00 0F 41 9D 07 08
+
+. 0 2547258C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 254725A4 12
+. 81 74 00 00 2C 0B 00 00 41 82 00 AC
+
+. 0 25472658 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+
+. 0 25472690 12
+. 81 28 00 98 2C 89 00 00 41 86 00 18
+
+. 0 254726B0 12
+. 81 68 00 74 2F 0B 00 00 41 9A 00 0C
+
+. 0 254726C4 8
+. 7E 83 A3 78 48 00 5E A5
+
+. 0 254726CC 12
+. 81 1F 00 30 2F 88 00 02 41 9E 0A 04
+
+. 0 254726D8 4
+. 41 92 08 8C
+
+. 0 25472F64 12
+. 82 BE 00 4C 80 75 00 00 48 00 21 69
+
+. 0 254750D4 12
+. 94 21 FF D0 7C 08 02 A6 48 02 1F 25
+
+. 0 254750E0 76
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 21 00 14 7C 79 1B 78 83 1E 04 F4 80 BE 01 54 80 98 00 10 80 DE 01 5C 80 78 00 0C 93 61 00 1C 93 81 00 20 93 E1 00 2C 7C 3F 0B 78 92 E1 00 0C 93 41 00 18 93 A1 00 24 48 00 A6 29
+
+. 0 2547F750 12
+. 94 21 FF B0 7C E8 02 A6 48 01 78 A9
+
+. 0 2547F75C 136
+. 93 C1 00 48 7F C8 02 A6 93 81 00 40 90 E1 00 54 7D 80 00 26 92 01 00 10 7C D0 33 78 81 3E 04 F4 92 61 00 1C 7C 93 23 78 81 69 00 40 80 09 00 3C 92 81 00 20 7C B4 2B 78 7C 1C 58 38 92 C1 00 28 2F 9C 00 00 93 E1 00 4C 92 21 00 14 7C 3F 0B 78 92 41 00 18 7C 76 1B 78 92 A1 00 24 30 03 FF FF 7D 40 19 10 92 E1 00 2C 39 60 00 00 93 01 00 30 93 21 00 34 93 41 00 38 93 61 00 3C 93 A1 00 44 91 81 00 0C 41 9E 00 28
+
+. 0 2547F808 48
+. 3B 0A 00 01 82 21 00 00 57 12 18 38 3B 20 00 00 3A F2 00 1E 3B 40 00 00 56 F5 00 36 7D 55 00 D0 7E 21 51 6E 39 01 00 17 55 1B 00 36 41 9E 00 48
+
+. 0 2547F87C 8
+. 2F 96 00 00 41 9E 00 18
+
+. 0 2547F898 32
+. 2E 18 00 01 57 29 18 38 83 3E 04 10 7E 69 DA 14 3A C0 00 03 7F 29 D9 2E 92 D3 00 04 41 92 03 24
+
+. 0 2547FBD8 12
+. 81 9B 00 04 39 0C 00 01 4B FF FD 30
+
+. 0 2547F910 24
+. 3B A0 00 01 7F AA C0 30 55 5A 18 38 91 54 00 00 7C 7A 42 14 48 01 81 01
+
+. 0 2547FDC8 32
+. 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+
+. 0 2547F928 8
+. 7C 76 1B 79 41 82 02 E8
+
+. 0 2547F930 4
+. 41 92 01 FC
+
+. 0 2547FB2C 60
+. 83 34 00 00 39 00 00 00 82 7B 00 04 3B 00 00 2F 57 29 18 38 91 16 00 0C 7C 69 B2 14 3B 93 00 01 93 96 00 04 3A 40 00 02 90 76 00 00 90 76 00 08 80 BB 00 04 80 9B 00 00 48 00 3E BD
+
+. 0 25483A20 44
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+. 0 25483AA0 8
+. 2C 85 00 00 41 86 00 1C
+
+. 0 25483AA8 24
+. 7C A9 03 A6 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+. 0 25483AAC 20
+. 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+. 0 25483AC0 36
+. 81 01 00 24 7F A3 EB 78 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547FB68 112
+. 9B 03 00 00 92 54 00 00 83 76 00 04 7E C3 B3 78 93 70 00 00 81 01 00 00 82 08 00 04 80 E8 FF BC 7E 08 03 A6 82 28 FF C4 82 08 FF C0 7C E0 81 20 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 2547512C 20
+. 81 3E 01 60 83 7E 01 64 90 69 00 00 38 60 00 0C 48 02 28 E9
+
+. 0 25475140 12
+. 7C 7C 1B 79 90 7B 00 00 41 82 01 88
+
+. 0 2547514C 40
+. 83 5E 01 54 3D 40 CC CC 61 46 CC CD 81 1A 00 00 55 07 10 3A 38 A7 00 27 7C 65 30 16 54 7D E1 3E 1C 7D 01 40 48 02 28 B5
+
+. 0 25475174 24
+. 80 9B 00 00 90 7C 00 00 81 24 00 00 80 DE 01 88 2F 89 00 00 41 9E 01 4C
+
+. 0 2547518C 96
+. 82 FE 04 C8 39 60 00 00 1C 7D 00 14 80 FA 00 00 81 1E 01 4C 39 40 00 00 80 9E 01 94 38 C0 00 00 80 BE 01 48 91 7B 00 04 7D 2B 4B 78 91 37 01 B0 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+
+. 0 254751EC 24
+. 7C E9 03 A6 38 00 00 00 39 2B 00 14 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 254751F8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 25475204 12
+. 2F 0A 00 01 39 4A 00 01 41 9A 00 BC
+
+. 0 25475210 20
+. 7C 03 5A 14 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+
+. 0 254751BC 48
+. 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+
+. 0 254752C8 8
+. 38 00 00 00 4B FF FF 48
+
+. 0 25475214 16
+. 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+
+. 0 25475224 32
+. 83 B7 00 00 38 00 00 09 80 9E 01 58 39 20 00 00 2F 9D 00 00 91 3C 00 00 90 04 00 00 41 9E 00 28
+
+. 0 25475244 12
+. 83 9D 00 94 2C 9C 00 00 40 86 01 BC
+
+. 0 25475250 20
+. 81 7D 00 5C 38 00 FF FF 90 1D 01 E0 2F 0B 00 00 40 9A 00 84
+
+. 0 25475264 12
+. 90 1D 01 8C 2C 19 00 00 41 82 00 10
+
+. 0 25475270 12
+. 8B B9 00 00 2F 9D 00 00 40 9E 00 94
+
+. 0 2547530C 12
+. 7F 23 CB 78 7F 3D CB 78 48 00 DC BD
+
+. 0 25483008 12
+. 3C C0 FE FF 38 C6 FE FF 41 9D 00 1C
+
+. 0 25483014 24
+. 85 04 00 04 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 48 00 F9 40 82 00 4C
+
+. 0 2548302C 32
+. 81 04 00 04 85 24 00 08 7C 06 42 14 7C EA 40 F8 7C 00 50 39 7D 66 4A 14 7C EC 48 F8 40 82 00 1C
+
+. 0 2548304C 8
+. 7D 60 60 39 41 82 FF DC
+
+. 0 25483054 16
+. 7C E0 48 38 7C 00 3A 14 7D 88 00 78 48 00 00 14
+
+. 0 25475318 44
+. 7F A4 EB 78 3B 63 00 1F 81 81 00 00 57 7A 00 36 7C 79 1B 78 7D 7A 00 D0 38 B9 00 01 7D 81 59 6E 39 41 00 17 55 43 00 36 48 00 E8 A1
+
+. 0 25483CB0 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 01 5D
+
+. 0 25483E18 28
+. 54 A0 07 BE 54 88 1E F8 2F 80 00 01 94 21 FF F0 54 84 00 3A 20 E8 00 20 41 9E 00 B8
+
+. 0 25483EE8 20
+. 81 64 00 00 38 A5 FF FF 81 44 00 04 38 84 00 08 4B FF FF 54
+
+. 0 25483E4C 96
+. 7D 66 40 30 7D 49 3C 30 7C C0 4B 78 81 64 00 00 90 03 00 00 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 25483EAC 24
+. 7D 65 40 30 7D 48 3C 30 7C A4 43 78 38 21 00 10 90 83 00 00 4E 80 00 20
+
+. 0 25483CC0 4
+. 4B FF FF 98
+
+. 0 25475344 16
+. 89 3D 00 00 7C 7C 1B 78 38 60 00 01 48 00 00 18
+
+. 0 25475368 8
+. 2C 89 00 00 40 86 FF E8
+
+. 0 25475354 28
+. 38 09 FF C6 8D 3D 00 01 20 80 00 01 7C 83 01 94 7C 83 23 78 2C 89 00 00 40 86 FF E8
+
+. 0 25475370 16
+. 54 69 10 3A 83 BE 01 68 38 69 00 04 48 02 26 A9
+
+. 0 25475380 20
+. 80 DE 01 88 2F 03 00 00 7C 64 1B 78 90 7D 00 00 41 BA FF 44
+
+. 0 25475394 28
+. 80 7E 04 B4 39 00 00 00 80 BE 01 A0 80 C3 00 00 7F 83 E3 78 80 FE 01 A4 4B FF F7 9D
+
+. 0 25474B48 12
+. 94 21 FF A0 7C 08 02 A6 48 02 24 B1
+
+. 0 25474B54 144
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 E1 00 1C 2D 88 00 00 92 01 00 20 92 21 00 24 92 41 00 28 81 FE 01 48 82 3E 04 C8 82 1E 01 54 82 5E 01 58 92 61 00 2C 7C F3 3B 78 92 81 00 30 7C D4 33 78 92 A1 00 34 7C B5 2B 78 92 C1 00 38 7C 96 23 78 92 E1 00 3C 7D 17 43 78 93 01 00 40 3B 00 00 00 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 93 E1 00 5C 90 01 00 64 91 81 00 18 90 61 00 08 38 61 00 08 7E A4 AB 78 48 00 B7 5D
+
+. 0 2548033C 20
+. 94 21 FF F0 7C 68 1B 78 80 63 00 00 2F 83 00 00 41 9E 00 54
+
+. 0 25480350 20
+. 88 03 00 00 7C 6A 1B 78 39 20 00 00 2C 00 00 00 41 82 00 3C
+
+. 0 25480364 20
+. 7C 0B 03 78 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+
+. 0 25480378 12
+. 8C 09 00 01 2F 00 00 00 40 9A FF F0
+
+. 0 25480370 8
+. 7C 80 58 00 41 86 00 10
+
+. 0 25480384 8
+. 2F 80 00 00 40 9E 00 20
+
+. 0 2548038C 12
+. 8D 6A 00 01 2C 0B 00 00 40 82 FF D4
+
+. 0 25480368 16
+. 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+
+. 0 254803A8 20
+. 39 2A 00 01 38 00 00 00 98 0A 00 00 91 28 00 00 4B FF FF E8
+
+. 0 254803A0 8
+. 38 21 00 10 4E 80 00 20
+
+. 0 25474BE4 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 41 92 01 4C
+
+. 0 25474BF4 4
+. 48 00 E3 DD
+
+. 0 25483064 36
+. 7C E0 40 38 38 84 FF FC 7C 00 3A 14 7D 48 00 78 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+. 0 25474BF8 8
+. 7C 7D 1B 79 40 82 01 A8
+
+. 0 25474DA4 8
+. 2B 9D 00 01 40 9D 00 24
+
+. 0 25474DAC 16
+. 7D 3F EA 14 88 69 FF FF 2C 83 00 2F 40 86 00 14
+
+. 0 25474DCC 8
+. 2F 1D 00 00 41 BA FE 34
+
+. 0 25474DD4 16
+. 7D 3C EA 14 88 89 FF FF 2C 04 00 2F 41 A2 FE 24
+
+. 0 25474DE4 20
+. 2F 94 00 00 38 A0 00 2F 7C BC E9 AE 3B BD 00 01 41 9E FE 18
+
+. 0 25474C0C 16
+. 83 F1 01 B0 7E 39 8B 78 2E 1F 00 00 41 92 00 24
+
+. 0 25474C1C 20
+. 81 5F 00 10 7F 83 E3 78 7F A5 EB 78 7F 8A E8 00 41 9E 02 40
+
+. 0 25474C30 12
+. 83 FF 00 00 2E 1F 00 00 40 92 FF E4
+
+. 0 25474C3C 4
+. 41 8E 02 84
+
+. 0 25474EC0 32
+. 80 70 00 00 3B 60 00 00 7E 1A 83 78 54 60 10 3A 7D 80 DA 14 7F EC EA 14 38 7F 00 15 48 02 2B 49
+
+. 0 25474EE0 8
+. 7C 7F 1B 79 40 82 FD 8C
+
+. 0 25474C70 32
+. 80 F0 00 00 7F 84 E3 78 54 E6 10 3A 7C A6 FA 14 38 65 00 14 7F A5 EB 78 90 7F 00 0C 48 00 ED 95
+
+. 0 25483A4C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+. 0 25483A78 8
+. 73 E0 00 03 40 82 00 68
+
+. 0 25483A80 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 02 39
+
+. 0 25483DA8 28
+. 80 04 00 00 38 84 FF F0 81 24 00 14 38 63 FF EC 38 A5 00 04 90 03 00 14 4B FF FF 7C
+
+. 0 25483A90 24
+. 57 86 00 3A 57 85 07 BE 7F BD 32 14 7F FF 32 14 2C 85 00 00 41 86 00 1C
+
+. 0 25474C90 24
+. 39 20 00 00 99 23 00 00 80 92 00 00 93 BF 00 10 7F 9D 20 40 40 9D 00 08
+
+. 0 25474CA8 32
+. 93 B2 00 00 81 3A 00 00 89 5C 00 00 2C 89 00 00 69 48 00 2F 7F 88 00 D0 57 83 17 BC 41 86 00 18
+
+. 0 25474CC8 20
+. 7D 29 03 A6 39 7F 00 14 90 6B 00 00 39 6B 00 04 42 00 FF F8
+
+. 0 25474CD0 12
+. 90 6B 00 00 39 6B 00 04 42 00 FF F8
+
+. 0 25474CDC 8
+. 92 7F 00 04 41 8E 02 1C
+
+. 0 25474EFC 8
+. 92 FF 00 08 4B FF FE 08
+
+. 0 25474D08 36
+. 80 19 01 B0 57 1D 10 3A 38 61 00 08 7E A4 AB 78 90 1F 00 00 3B 18 00 01 93 F9 01 B0 7F FD B1 2E 48 00 B6 15
+
+. 0 25474D2C 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 40 92 FE BC
+
+. 0 2547FE20 32
+. 81 3E 04 F4 81 89 00 04 7C 8C E2 14 7D 4C 00 D0 39 64 FF FF 7D 7D 50 38 7F A4 EB 78 48 00 29 5D
+
+. 0 25482798 8
+. 38 00 00 5A 44 00 00 02
+
+. 0 254827A0 4
+. 4C A3 00 20
+
+. 0 2547FE40 16
+. 80 BB 00 00 7D 23 EA 14 7C 83 28 00 41 86 00 08
+
+. 0 2547FE50 64
+. 90 7F 00 00 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 3B 00 00 7C C8 03 A6 91 1F 00 00 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+
+. 0 25483AE4 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 03 29
+
+. 0 25483E34 8
+. 28 00 00 01 41 80 00 8C
+
+. 0 25483E3C 8
+. 2C 80 00 02 41 86 00 BC
+
+. 0 25483EFC 24
+. 81 44 00 00 38 63 FF F4 81 64 00 04 38 A5 00 02 38 84 FF FC 4B FF FF 78
+
+. 0 25483E88 36
+. 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 25483AF4 4
+. 4B FF FF 9C
+
+. 0 25474CAC 28
+. 81 3A 00 00 89 5C 00 00 2C 89 00 00 69 48 00 2F 7F 88 00 D0 57 83 17 BC 41 86 00 18
+
+. 0 2548039C 12
+. 91 28 00 00 38 21 00 10 4E 80 00 20
+
+. 0 25474C00 12
+. 83 9E 01 50 2F 94 00 00 40 9E 01 F0
+
+. 0 25474D3C 104
+. 81 E1 00 64 57 10 10 3A 81 81 00 18 7C 70 B1 2E 7D E8 03 A6 7E C3 B3 78 81 E1 00 1C 82 01 00 20 7D 81 81 20 82 21 00 24 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+. 0 254753B0 16
+. 80 7D 00 00 83 83 00 00 2C 1C 00 00 41 82 00 74
+
+. 0 254753C0 72
+. 38 E0 00 00 90 FD 00 04 83 B7 01 B0 93 B8 00 60 81 41 00 00 82 EA 00 04 83 0A FF E0 7E E8 03 A6 83 2A FF E4 82 EA FF DC 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+
+. 0 25472F70 4
+. 4B FF F7 6C
+
+. 0 254726DC 12
+. 80 EE 01 BC 2E 07 00 00 40 92 00 10
+
+. 0 254726E8 88
+. 83 AE 01 D4 81 5D 00 00 91 4E 01 BC 83 4F 00 4C 3A E0 00 01 80 8E 03 38 38 6E 01 B8 2C 1A FF FE 82 6E 00 04 81 6E 01 98 52 E4 F0 02 81 8E 01 9C 38 B3 00 01 90 8E 03 38 31 0C 00 01 7C EB 01 94 90 AE 00 04 90 74 00 0C 90 EE 01 98 91 0E 01 9C 92 8E 01 C8 41 82 08 38
+
+. 0 25472F74 20
+. 80 14 00 00 7D E9 7B 78 2F 00 00 00 38 00 FF FF 41 9A 00 08
+
+. 0 25472F8C 8
+. 90 09 00 4C 4B FF F7 B0
+
+. 0 25472740 40
+. 81 8E 03 58 A1 6C 00 2C 83 8C 00 1C B1 6E 03 0C A3 2C 00 2C 7D 6C E2 14 91 6E 03 04 2F 99 00 00 39 59 FF FF 41 9E 00 2C
+
+. 0 25472768 28
+. 3D 20 64 74 61 27 E5 52 55 58 28 34 7F 78 58 2E 7D 18 5A 14 7C 9B 38 00 41 86 0C 80
+
+. 0 25473400 36
+. 83 B1 00 00 3B 40 00 00 82 C8 00 08 3B 20 00 00 2C 1D 00 00 92 CE 03 EC 80 C8 00 14 90 CE 03 F0 41 82 F3 84
+
+. 0 25473424 8
+. 7F A3 EB 78 48 00 FB A9
+
+. 0 2547342C 44
+. 7F A4 EB 78 7C 71 1B 78 38 63 00 1F 54 6A 00 36 80 E1 00 00 7E AA 00 D0 38 B1 00 01 7C E1 A9 6E 39 01 00 17 55 03 00 36 48 01 07 8D
+
+. 0 25483EC4 16
+. 81 44 00 00 38 63 FF FC 85 64 00 04 4B FF FF 90
+
+. 0 25483E60 76
+. 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 25473458 16
+. 90 7F 00 44 80 9E 00 D0 38 7F 00 44 48 00 CE D9
+
+. 0 25480398 16
+. 39 20 00 00 91 28 00 00 38 21 00 10 4E 80 00 20
+
+. 0 25473468 8
+. 7C 7D 1B 79 41 A2 F3 38
+
+. 0 25473470 12
+. 88 BD 00 00 2F 05 00 00 41 BA FF E4
+
+. 0 2547347C 12
+. 80 90 00 00 2F 84 00 00 40 9E 07 74
+
+. 0 25473488 32
+. 7F A4 EB 78 38 A0 00 01 38 C0 00 01 38 E0 00 00 39 00 00 00 39 20 00 00 7E 83 A3 78 48 00 38 45
+
+. 0 25476CE8 12
+. 94 21 FD 70 7C 08 02 A6 48 02 03 11
+
+. 0 25476CF4 136
+. 93 C1 02 88 7F C8 02 A6 92 61 02 5C 92 A1 02 64 1E 69 00 18 90 01 02 94 7D 80 00 26 82 BE 04 C8 93 A1 02 84 7F B3 A8 2E 92 01 02 50 7C D0 33 78 2F 9D 00 00 92 21 02 54 92 E1 02 6C 7C F1 3B 78 93 01 02 70 7C B7 2B 78 93 21 02 74 7D 18 43 78 93 61 02 7C 7D 39 4B 78 93 E1 02 8C 7C 7B 1B 78 91 C1 02 48 7C 9F 23 78 91 E1 02 4C 92 41 02 58 92 81 02 60 92 C1 02 68 93 41 02 78 93 81 02 80 91 81 02 44 41 9E 00 40
+
+. 0 25476D7C 24
+. 3A C0 00 00 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+
+. 0 25476D94 4
+. 48 00 59 41
+
+. 0 2547C6D4 40
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C 9F 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 80 84 00 04 93 C1 00 18 48 00 67 E9
+
+. 0 25482EE0 16
+. 7C 80 1B 78 54 00 07 BF 3C E0 FE FF 40 82 00 94
+
+. 0 25482EF0 24
+. 80 A3 00 00 80 C4 00 00 3D 00 7F 7F 38 E7 FE FF 39 08 7F 7F 48 00 00 10
+
+. 0 25482F14 20
+. 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+
+. 0 25482F08 8
+. 84 A3 00 04 40 86 00 54
+
+. 0 25482F60 16
+. 80 A3 FF FC 7C AA 32 79 7C 66 28 50 4C A0 00 20
+
+. 0 2547C6FC 12
+. 2F 83 00 00 38 60 00 01 41 9E 00 30
+
+. 0 2547C708 8
+. 83 FF 00 1C 48 00 00 1C
+
+. 0 2547C728 8
+. 2C 1F 00 00 40 82 FF E4
+
+. 0 2547C710 12
+. 80 9F 00 00 7F A3 EB 78 48 00 67 C9
+
+. 0 2547C71C 8
+. 2C 83 00 00 41 86 00 30
+
+. 0 2547C724 12
+. 83 FF 00 04 2C 1F 00 00 40 82 FF E4
+
+. 0 2547C730 32
+. 38 60 00 00 80 81 00 24 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25476D98 8
+. 2C 83 00 00 40 86 01 B8
+
+. 0 25476DA0 12
+. 80 7D 01 80 74 69 00 80 41 82 00 F8
+
+. 0 25476EA0 16
+. 81 7D 00 58 7F E3 FB 78 2D 8B 00 00 41 AE FF 00
+
+. 0 25476DAC 12
+. 83 BD 00 0C 2C 9D 00 00 40 86 FF CC
+
+. 0 25476D80 20
+. 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+
+. 0 25476EB0 24
+. 81 3D 00 34 81 6B 00 04 80 89 00 04 7F 84 5A 14 7F 84 E3 78 48 00 C0 1D
+
+. 0 25482F80 12
+. 88 A3 00 00 88 C4 00 00 48 00 00 10
+
+. 0 25482F98 8
+. 2C 85 00 00 41 86 00 20
+
+. 0 25482FA0 8
+. 7C 05 30 00 40 82 00 18
+
+. 0 25482FBC 8
+. 7C 66 28 50 4E 80 00 20
+
+. 0 25476EC8 8
+. 2E 03 00 00 40 92 FE E0
+
+. 0 25476DB8 20
+. 82 9E 04 F4 2D 9B 00 00 81 54 00 00 71 49 00 40 40 82 03 90
+
+. 0 25476DCC 12
+. 7F E3 FB 78 38 80 00 2F 48 00 C0 2D
+
+. 0 25482EB4 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+
+. 0 25476DD8 8
+. 2F 03 00 00 41 9A 02 90
+
+. 0 25476DE0 4
+. 41 8E 01 D8
+
+. 0 25476DE4 12
+. 7F 63 DB 78 7F E4 FB 78 4B FF D9 41
+
+. 0 2547472C 12
+. 94 21 FF D0 7C 08 02 A6 48 02 28 CD
+
+. 0 25474738 72
+. 7D 80 00 26 93 C1 00 28 7F C8 02 A6 93 A1 00 24 7C 9D 23 78 93 41 00 18 38 80 00 24 93 81 00 20 7C 7C 1B 78 92 E1 00 0C 7F A3 EB 78 93 01 00 10 93 21 00 14 93 61 00 1C 93 E1 00 2C 90 01 00 34 91 81 00 08 48 00 E6 85
+
+. 0 25474780 8
+. 7C 7A 1B 79 40 82 00 74
+
+. 0 25474788 8
+. 7F A3 EB 78 48 00 E8 45
+
+. 0 25474790 12
+. 3B E3 00 01 7F E3 FB 78 48 02 32 8D
+
+. 0 2547479C 12
+. 38 00 00 00 2C 03 00 00 41 82 00 14
+
+. 0 254747A8 12
+. 7F A4 EB 78 7F E5 FB 78 48 00 F4 31
+
+. 0 25483D6C 20
+. 80 04 00 00 38 63 FF FC 81 24 00 04 90 03 00 04 4B FF FF A0
+
+. 0 25483D1C 64
+. 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 254747B4 68
+. 7C 60 1B 78 7C 03 03 78 83 81 00 34 81 41 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 7D 40 81 20 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25476DF0 20
+. 3B 40 FF FF 2F 03 00 00 90 61 02 28 2E 1A FF FF 40 9A 02 00
+
+. 0 25477000 8
+. 38 81 00 18 4B FF F4 41
+
+. 0 25476444 12
+. 94 21 FF A0 7C 08 02 A6 48 02 0B B5
+
+. 0 25476450 76
+. 93 C1 00 58 7F C8 02 A6 93 01 00 40 93 E1 00 5C 7C 98 23 78 7C 3F 0B 78 38 80 00 00 92 C1 00 38 7C 76 1B 78 93 21 00 44 93 61 00 4C 3B 60 00 00 92 A1 00 34 92 E1 00 3C 93 41 00 48 93 81 00 50 93 A1 00 54 90 01 00 64 48 00 BB 49
+
+. 0 25481FE0 8
+. 38 00 00 05 44 00 00 02
+
+. 0 25481FE8 4
+. 4C A3 00 20
+
+. 0 2547649C 12
+. 2F 83 FF FF 7C 79 1B 78 41 9E 00 CC
+
+. 0 254764A8 24
+. 82 FE 05 14 3B 98 00 04 7F 84 E3 78 38 A0 02 00 93 77 00 00 48 00 BB 45
+
+. 0 25482000 8
+. 38 00 00 03 44 00 00 02
+
+. 0 25482008 4
+. 4C A3 00 20
+
+. 0 254764C0 16
+. 2C 03 00 33 7C 7D 1B 78 90 78 00 00 40 81 01 4C
+
+. 0 254764D0 16
+. 80 9E 01 6C 7F 83 E3 78 38 A0 00 09 48 00 CD BD
+
+. 0 254764E0 8
+. 2F 03 00 00 40 9A 02 34
+
+. 0 254764E8 16
+. 81 9C 00 14 81 1E 02 00 2C 8C 00 01 40 86 01 38
+
+. 0 254764F8 12
+. A1 1C 00 12 2F 08 00 14 40 9A 01 D0
+
+. 0 25476504 12
+. A0 1C 00 10 2F 80 00 03 40 9E 02 78
+
+. 0 25476510 16
+. A2 BC 00 2A 81 1E 02 04 2C 95 00 20 40 86 01 10
+
+. 0 25476520 32
+. A1 7C 00 2C 80 9C 00 1C 55 7B 28 34 7F 44 DA 14 7F 1A E8 40 7F A4 C2 14 3B 5D 00 04 41 99 01 08
+
+. 0 25476540 16
+. 55 7B 28 34 7F 5D D3 78 7C 1B D2 14 48 00 00 1C
+
+. 0 25476568 8
+. 7C 00 E8 40 41 81 FF E4
+
+. 0 25476550 12
+. 80 9D 00 00 2C 84 00 04 41 86 00 5C
+
+. 0 2547655C 20
+. 55 66 28 34 3B BD 00 20 7C 06 D2 14 7C 00 E8 40 41 81 FF E4
+
+. 0 25476570 68
+. 80 81 00 00 7F 23 CB 78 82 E4 00 04 82 A4 FF D4 7E E8 03 A6 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+
+. 0 25477008 12
+. 2E 03 FF FF 7C 7A 1B 78 40 92 FD F4
+
+. 0 25476E04 20
+. 57 12 27 FE 32 D2 FF FF 7E D6 B1 10 7F 7C B0 38 40 92 02 0C
+
+. 0 25477020 52
+. 80 FE 04 C0 7F E3 FB 78 80 C1 02 28 7F 44 D3 78 81 C7 00 00 7E 08 83 78 7F 87 E3 78 7F 09 C3 78 38 A1 00 18 39 41 02 2C 91 C1 02 2C 93 21 00 08 4B FF E4 B9
+
+. 0 25475508 12
+. 94 21 FF 00 7C 08 02 A6 48 02 1A F1
+
+. 0 25475514 144
+. 7D 80 00 26 93 C1 00 F8 7F C8 02 A6 93 E1 00 FC 7C 3F 0B 78 90 01 01 04 92 01 00 C0 7C D0 33 78 92 61 00 CC 7C 93 23 78 92 81 00 D0 7C 74 1B 78 92 C1 00 D8 38 60 00 03 92 E1 00 DC 7D 36 4B 78 93 21 00 E4 7C F7 3B 78 93 41 00 E8 7D 19 43 78 93 61 00 EC 3B 40 00 00 93 81 00 F0 7C BC 2B 78 91 C1 00 B8 38 BF 00 28 91 E1 00 BC 92 21 00 C4 92 41 00 C8 92 A1 00 D4 93 01 00 E0 93 A1 00 F4 91 81 00 B4 91 5F 00 98 83 7F 01 08 48 00 C9 05
+
+. 0 25481EA4 12
+. 94 21 FF 80 7C C8 02 A6 48 01 51 55
+
+. 0 25481EB0 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+
+. 0 25481EF0 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C5 7C A4 2B 78 83 5D 00 00 44 00 00 02
+
+. 0 25481F08 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+
+. 0 25481F14 8
+. 2C 83 FF FF 40 A6 00 44
+
+. 0 25481F5C 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+
+. 0 254755A4 12
+. 2F 83 00 00 93 5F 00 A0 41 9C 08 2C
+
+. 0 254755B0 20
+. 1C BB 00 18 80 9E 04 C8 7F A5 20 2E 2C 1D 00 00 41 82 01 48
+
+. 0 254755C4 12
+. 81 7F 00 30 3B 40 00 00 48 00 00 10
+
+. 0 254755DC 12
+. 80 DD 01 D8 7C 86 58 00 40 86 FF EC
+
+. 0 254755E8 16
+. 81 1D 01 DC 80 FF 00 34 7E 08 38 00 40 92 FF DC
+
+. 0 254755D0 12
+. 83 BD 00 0C 2E 1D 00 00 41 92 01 30
+
+. 0 25475708 8
+. 2F 1B 00 00 40 9A 06 E0
+
+. 0 25475710 12
+. 72 C0 00 04 38 60 00 00 40 A2 FF 8C
+
+. 0 2547571C 16
+. 82 3E 04 F4 80 91 00 00 70 89 00 40 40 82 07 38
+
+. 0 2547572C 28
+. 7F 25 CB 78 7F 68 DB 78 7E 03 83 78 7E 84 A3 78 7E E6 BB 78 7E C7 B3 78 48 00 39 B1
+
+. 0 25479318 8
+. 7D 69 5B 78 48 00 00 08
+
+. 0 25479324 12
+. 80 09 00 0C 2C 80 00 00 40 86 FF F4
+
+. 0 25479320 16
+. 7C 09 03 78 80 09 00 0C 2C 80 00 00 40 86 FF F4
+
+. 0 25479330 72
+. 93 E9 00 0C 2F 1C 00 00 91 3F 00 10 38 80 00 01 7D 0A 28 2E 7D 4A 2A 14 83 2A 00 04 81 65 01 98 38 E8 01 58 81 85 01 9C 3B 19 00 01 90 E6 00 00 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 9A FE A8
+
+. 0 2547921C 12
+. 80 1C 01 68 2C 00 00 00 40 82 FF F4
+
+. 0 25479234 12
+. 81 66 00 00 7C 89 58 00 41 86 00 24
+
+. 0 25479260 20
+. 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+
+. 0 25479274 12
+. 7E E3 BB 78 3B 80 00 00 48 00 9D 55
+
+. 0 25479280 20
+. 2F 9D 00 2F 3B 63 00 01 83 5E 05 14 7F 7D DB 78 41 9E 01 54
+
+. 0 254793E4 8
+. 7F 63 DB 78 48 01 E6 3D
+
+. 0 254793EC 12
+. 2C 83 00 00 7C 7C 1B 78 40 86 FF B0
+
+. 0 254793A4 12
+. 7E E4 BB 78 7F 65 DB 78 48 00 A6 75
+
+. 0 254793B0 12
+. 8E E3 FF FF 2C 17 00 2F 40 82 FF F8
+
+. 0 254793BC 8
+. 7F 03 E0 00 41 9A 00 14
+
+. 0 254793C4 16
+. 38 C0 00 00 98 C3 00 00 93 9F 01 9C 4B FF FF 0C
+
+. 0 25475748 12
+. 3B 1C 00 04 7C 7A 1B 79 41 82 07 08
+
+. 0 25475754 60
+. A1 F8 00 2C 81 D8 00 18 A1 58 00 10 B1 FA 01 54 91 DA 01 50 A1 18 00 2C 83 78 00 1C 55 1D 28 34 80 DC 00 00 7C FB EA 14 7F 3B E2 14 7F 87 30 40 91 5F 00 9C 3B 79 00 04 41 9D 04 38
+
+. 0 25475790 80
+. A1 1A 01 54 7C 32 0B 78 80 81 00 00 3B A0 00 07 1C E8 00 18 3B 80 00 00 38 C7 00 1E 54 D9 02 B6 7C B9 00 D0 7C 81 29 6E A0 FA 01 54 38 61 00 2F 93 BF 00 A4 54 75 00 36 54 E9 28 34 93 9F 00 A8 7C 09 DA 14 7F 7D DB 78 7E 00 D8 40 40 91 03 A8
+
+. 0 254757E0 8
+. 3B 20 00 00 48 00 00 28
+
+. 0 2547580C 12
+. 81 3D 00 00 2F 09 00 06 41 9A 00 94
+
+. 0 25475818 8
+. 28 09 00 06 40 A1 FF CC
+
+. 0 254757E8 8
+. 2F 89 00 01 41 9E 02 C4
+
+. 0 25475AB0 20
+. 81 31 00 04 81 7D 00 1C 39 09 FF FF 7D 69 40 39 40 82 06 2C
+
+. 0 25475AC4 24
+. 81 5D 00 08 38 AB FF FF 80 DD 00 04 7C 86 50 50 7C 8B 28 39 40 82 06 50
+
+. 0 25475ADC 116
+. 7D 4A 40 78 3B 9C 00 01 7D 55 C9 2E 28 9C 00 01 81 3D 00 08 80 1D 00 10 81 F1 00 04 7C 69 02 14 7D 83 7A 14 7D CF 00 D0 39 6C FF FF 7D 68 70 38 7D 75 CA 14 91 0B 00 04 3B 39 00 18 80 9D 00 08 80 DD 00 10 7C A4 32 14 90 AB 00 08 81 3D 00 08 80 1D 00 14 7C 69 02 14 90 6B 00 0C 81 91 00 04 81 DD 00 04 7D EC 00 D0 7D C8 78 38 91 0B 00 10 40 85 00 18
+
+. 0 25475B64 32
+. 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+
+. 0 254757F8 20
+. 54 EF 28 34 3B BD 00 20 7D CF DA 14 7C 8E E8 40 40 85 00 BC
+
+. 0 25475B50 12
+. 80 8B FF EC 7E 04 50 00 41 92 00 0C
+
+. 0 25475B5C 40
+. 39 40 00 01 91 5F 00 A8 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+
+. 0 254757F0 8
+. 2C 89 00 02 41 86 03 B8
+
+. 0 25475BAC 24
+. 80 7D 00 14 81 9D 00 08 54 6B E8 FE 91 9A 00 08 B1 7A 01 56 4B FF FC 38
+
+. 0 25475820 16
+. 3D C0 64 74 61 C0 E5 51 7E 09 00 00 41 92 02 78
+
+. 0 25475AA4 12
+. 81 5D 00 18 91 5F 00 A4 4B FF FD 4C
+
+. 0 254758C4 8
+. 2E 1C 00 00 41 92 02 BC
+
+. 0 254758CC 36
+. 1D FC 00 18 83 9F 00 9C 81 75 00 00 7E BD AB 78 2E 1C 00 03 7F 8F AA 14 83 3C FF F4 7D CB C8 50 40 92 08 18
+
+. 0 254758F0 20
+. 81 91 00 4C 7D C4 73 78 7E E3 BB 78 7D 65 60 38 48 00 AD E9
+
+. 0 254806E8 16
+. 7C A3 2B 79 7D 88 02 A6 94 21 FF E0 48 01 69 0D
+
+. 0 254806F8 32
+. 93 A1 00 14 7C 9D 23 78 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 7D 88 03 A6 40 82 00 FC
+
+. 0 25480718 40
+. 3C C0 00 03 83 FE 04 F4 80 9E 04 C8 60 C6 FF FF 3C E0 70 00 39 80 00 0F 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+
+. 0 25480740 36
+. 81 3F 00 04 38 A9 FF FF 7C A3 28 F8 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+
+. 0 25480764 12
+. 83 8B 01 80 77 89 C0 00 40 82 00 0C
+
+. 0 25480770 8
+. 7F 07 50 40 40 98 00 54
+
+. 0 254807C8 16
+. 7D 47 53 78 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+
+. 0 2548074C 24
+. 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+
+. 0 25480778 32
+. 7C 06 40 10 38 00 00 00 7C 00 01 14 7D 2A 30 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 40 82 00 94
+
+. 0 25480798 32
+. 7C 08 38 10 38 00 00 00 7C 00 01 14 7D 26 50 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 41 82 00 18
+
+. 0 254807CC 12
+. 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+
+. 0 254807D8 8
+. 35 8C FF FF 40 80 FF 54
+
+. 0 25480730 16
+. 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+
+. 0 254807E0 28
+. 3C 67 FF FF 80 FF 00 04 7F 03 30 40 38 87 FF FF 7C 85 EB 78 38 85 00 01 40 99 00 14
+
+. 0 254807FC 16
+. 7D 06 18 50 7C 64 18 50 7F 88 20 40 40 9C 00 08
+
+. 0 25480810 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25475904 24
+. 80 B5 00 14 81 15 00 10 7D C4 73 78 38 C0 08 02 7E 67 9B 78 48 00 CE 81
+
+. 0 2547591C 12
+. 2F 03 FF FF 90 7A 01 A0 41 9A 01 3C
+
+. 0 25475928 32
+. 81 7F 00 A8 7C A3 72 14 90 BA 01 A4 2F 8B 00 00 80 F5 00 00 7C 07 18 50 90 1A 00 00 40 9E 05 34
+
+. 0 25475E78 24
+. 80 75 00 04 38 A0 00 00 80 9C FF F4 7C 83 20 50 7C 60 1A 14 48 00 C9 2D
+
+. 0 254827B8 8
+. 38 00 00 7D 44 00 00 02
+
+. 0 254827C0 4
+. 4C A3 00 20
+
+. 0 25475E90 4
+. 4B FF FA B8
+
+. 0 25475948 12
+. 80 DD 00 14 70 C9 00 04 41 82 00 14
+
+. 0 25475954 28
+. 83 9D 00 04 83 3A 00 00 7D 19 E2 14 91 1A 01 A8 81 9A 01 4C 2F 8C 00 00 40 9E 00 38
+
+. 0 25475970 16
+. 81 5D 00 10 81 78 00 1C 7C 8A 58 40 41 85 00 28
+
+. 0 25475980 36
+. 80 1D 00 04 81 1D 00 00 A1 38 00 2C 7C 88 00 50 55 23 28 34 7C A4 52 14 7C EB 1A 14 7F 05 38 40 40 98 06 9C
+
+. 0 2547603C 16
+. 7E E8 5A 14 7D 6A B8 50 91 7A 01 4C 4B FF F9 5C
+
+. 0 254759A4 16
+. 80 9D 00 0C 80 7D 00 08 7C 04 18 40 40 81 00 68
+
+. 0 25475A18 16
+. 3B BD 00 18 7F 35 7A 14 7F 99 E8 40 40 9D 01 F8
+
+. 0 25475A28 16
+. 80 9D 00 04 80 1D 00 00 7C 84 00 40 40 85 FF 14
+
+. 0 25475A38 32
+. 81 5A 00 00 7C 80 20 50 80 BD 00 14 38 C0 08 12 81 1D 00 10 7C 6A 02 14 7E 67 9B 78 48 00 CD 45
+
+. 0 25475A58 8
+. 2F 03 FF FF 40 9A FE EC
+
+. 0 254759B4 40
+. 80 DA 00 00 81 71 00 04 7F 26 1A 14 7E E6 22 14 7D 0B CA 14 7C 6B 00 D0 39 48 FF FF 7D 5C 18 38 7F 97 E0 40 40 9C 00 08
+
+. 0 254759DC 12
+. 7E FC BB 78 7C 9C C8 40 40 85 00 2C
+
+. 0 254759E8 12
+. 80 BD 00 14 70 AA 00 02 41 82 08 38
+
+. 0 254759F4 16
+. 7C B9 E0 50 7F 23 CB 78 38 80 00 00 48 00 DD 45
+
+. 0 25483744 16
+. 28 85 00 04 70 67 00 03 7C 66 1B 78 40 85 01 90
+
+. 0 25483754 12
+. 2A 85 00 1F 50 84 44 2E 41 A2 00 24
+
+. 0 25483780 12
+. 7C A0 11 20 50 84 80 1E 40 95 01 98
+
+. 0 2548378C 12
+. 70 C7 00 1C 20 E7 00 20 41 82 00 40
+
+. 0 25483798 24
+. 7C E0 11 20 7C C6 3A 14 7C A7 28 50 28 87 00 10 7C C8 33 78 40 9C 00 0C
+
+. 0 254837B8 4
+. 41 84 00 14
+
+. 0 254837CC 4
+. 40 9D 00 08
+
+. 0 254837D0 20
+. 90 88 FF FC 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+
+. 0 25483980 8
+. 7C 08 02 A6 41 82 FF 9C
+
+. 0 25483988 4
+. 48 01 36 79
+
+. 0 2548398C 24
+. 7D 28 02 A6 81 29 04 F0 81 09 00 00 7C 08 03 A6 28 88 00 00 41 86 FE 44
+
+. 0 254839A4 8
+. 28 88 00 20 41 86 FE BC
+
+. 0 25483864 32
+. 54 A5 06 FE 7C E0 21 20 54 E0 C9 FF 7C 09 03 A6 38 E0 00 20 39 00 FF C0 28 85 00 10 40 9A 00 0C
+
+. 0 2548388C 8
+. 39 20 FF E0 40 99 00 10
+
+. 0 254838A0 8
+. 2A 85 00 00 41 82 00 7C
+
+. 0 254838A8 24
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 80 7C 08 37 EC 7C 09 37 EC 42 00 FF EC
+
+. 0 254838C0 4
+. 4D 96 00 20
+
+. 0 254838C4 4
+. 48 00 00 60
+
+. 0 25483924 8
+. 7C C6 2A 14 41 9F 00 20
+
+. 0 2548392C 4
+. 41 9E 00 24
+
+. 0 25483930 4
+. 41 9D 00 28
+
+. 0 25483958 8
+. 94 86 FF FC 41 84 00 14
+
+. 0 25483970 4
+. 4C 9C 00 20
+
+. 0 25475A04 12
+. 80 BD 00 14 70 A0 00 02 41 82 08 08
+
+. 0 25475A10 8
+. 7F 97 E0 40 41 9D 06 AC
+
+. 0 25475C1C 24
+. 82 A1 00 00 7E 41 93 78 92 A1 00 00 81 3A 00 08 2F 09 00 00 40 9A 02 64
+
+. 0 25475E94 16
+. 82 5A 00 00 7D 29 92 14 91 3A 00 08 4B FF FD 98
+
+. 0 25475C38 12
+. 2E 09 00 00 7D 2A 4B 78 41 92 02 64
+
+. 0 25475C44 16
+. 81 69 00 00 39 1A 00 20 2C 0B 00 00 41 82 00 64
+
+. 0 25475C54 60
+. 3C 60 70 00 3C E0 6F FF 3C A0 6F FF 3F 80 6F FF 3D E0 6F FF 3F A0 6F FF 60 66 00 21 60 E7 FF FF 60 A5 FD FF 63 84 FE 34 61 E3 FE FF 63 BD FF 40 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+
+. 0 25475CA4 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 25475C84 12
+. 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+
+. 0 25475C90 12
+. 7D 2B 38 50 28 89 00 0F 41 85 03 BC
+
+. 0 25475C9C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 25475CB4 12
+. 81 7A 00 00 2C 8B 00 00 41 86 00 AC
+
+. 0 25475CC0 12
+. 81 28 00 10 2F 09 00 00 41 9A 00 10
+
+. 0 25475CCC 24
+. 81 49 00 04 7C 8A 5A 14 90 89 00 04 81 28 00 0C 2E 09 00 00 41 92 00 10
+
+. 0 25475CE4 24
+. 82 49 00 04 7C D2 5A 14 90 C9 00 04 81 28 00 14 2C 09 00 00 41 82 00 10
+
+. 0 25475CFC 24
+. 81 E9 00 04 7F AF 5A 14 93 A9 00 04 81 28 00 18 2F 89 00 00 41 9E 00 10
+
+. 0 25475D14 24
+. 80 A9 00 04 7F 85 5A 14 93 89 00 04 81 28 00 1C 2C 89 00 00 41 86 00 10
+
+. 0 25475D2C 24
+. 80 E9 00 04 7C 67 5A 14 90 69 00 04 81 28 00 5C 2F 09 00 00 41 9A 00 10
+
+. 0 25475D44 24
+. 80 09 00 04 7E E0 5A 14 92 E9 00 04 81 28 00 C4 2E 09 00 00 41 92 00 10
+
+. 0 25475D5C 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 78 2C 09 00 00 41 82 00 30
+
+. 0 25475DA0 12
+. 81 28 00 98 2F 89 00 00 41 9E 03 54
+
+. 0 25475DAC 16
+. 81 69 00 04 71 60 00 01 91 7A 01 FC 41 82 00 08
+
+. 0 25475DC0 12
+. 81 28 00 74 2C 89 00 00 41 86 00 E0
+
+. 0 25475EA8 8
+. 71 69 00 40 40 82 02 A4
+
+. 0 25475EB0 12
+. 81 3A 01 4C 2F 89 00 00 41 9E 01 CC
+
+. 0 25475EBC 36
+. 82 5A 00 00 7C C9 92 14 90 DA 01 4C 80 BE 04 C8 83 9F 00 A4 81 E5 04 00 7F 9D 78 78 73 AB 00 01 40 82 02 B0
+
+. 0 25475EE0 12
+. 81 3A 02 18 2F 89 00 00 41 9E 00 10
+
+. 0 25475EF8 8
+. 7E 63 9B 78 48 00 C0 F5
+
+. 0 25481FF0 8
+. 38 00 00 06 44 00 00 02
+
+. 0 25481FF8 4
+. 4C A3 00 20
+
+. 0 25475F00 12
+. 81 1E 01 C8 2C 83 00 00 40 A6 FB 68
+
+. 0 25475F0C 48
+. 81 7A 01 80 81 FF 00 9C 55 7D 00 02 69 F3 00 02 21 33 00 00 7E 69 99 14 6F B2 40 00 21 52 00 00 7E 4A 91 14 7E 4A 98 39 3A 60 FF FF 40 82 00 BC
+
+. 0 25475F3C 28
+. 80 7A 01 50 80 FA 00 00 7D 63 3A 14 91 7A 01 50 80 B1 00 00 70 A9 00 40 40 82 02 78
+
+. 0 25475F58 8
+. 7F 43 D3 78 48 00 26 11
+
+. 0 25475F60 8
+. 72 C0 00 08 40 82 00 10
+
+. 0 25475F68 12
+. 81 DA 00 60 2F 0E 00 00 40 9A 02 E0
+
+. 0 25475F74 12
+. 81 1A 01 FC 71 09 00 20 41 82 00 0C
+
+. 0 25475F80 36
+. 81 3E 04 C8 93 49 01 A0 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+
+. 0 25475E50 8
+. 7F 43 D3 78 4B FF F8 50
+
+. 0 254756A4 100
+. 80 A1 00 00 83 45 00 04 81 85 FF B4 7F 48 03 A6 81 C5 FF B8 81 E5 FF BC 7D 80 81 20 82 05 FF C0 82 25 FF C4 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+. 0 25477054 4
+. 4B FF FF 04
+
+. 0 25476F58 96
+. 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+. 0 254734A8 36
+. 80 9E 00 D0 82 E3 01 78 3A 77 00 01 92 63 01 78 38 7F 00 44 6A 7D 00 01 23 BD 00 00 7F B9 01 94 48 00 CE 75
+
+. 0 254734CC 12
+. 7F B9 EB 78 7C 7D 1B 79 40 82 FF 9C
+
+. 0 254734D8 12
+. 80 7E 00 3C 38 80 00 04 48 00 EB 51
+
+. 0 25482030 8
+. 38 00 00 21 44 00 00 02
+
+. 0 25482038 4
+. 4C A3 00 20
+
+. 0 2548203C 4
+. 4B FF F2 F4
+
+. 0 25481330 12
+. 94 21 FF F0 7D 88 02 A6 48 01 5C C9
+
+. 0 2548133C 36
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 3E 05 14 83 C1 00 08 38 21 00 10 90 69 00 00 38 60 FF FF 4E 80 00 20
+
+. 0 254734E4 8
+. 2E 03 00 00 40 92 F2 D0
+
+. 0 254727B8 8
+. 2C 19 00 00 40 82 13 F0
+
+. 0 25473BAC 64
+. 57 36 10 3A 83 A1 00 00 38 D6 00 1E 39 60 00 00 54 C0 00 36 7F 40 00 D0 7F A1 D1 6E 3B 61 00 17 81 2E 01 C4 57 7A 00 36 55 6C 10 3A 39 6B 00 01 7D 2C D1 2E 81 29 00 0C 2F 89 00 00 40 9E FF EC
+
+. 0 25473BEC 4
+. 4B FF EB D4
+
+. 0 254727C0 36
+. 81 7F 00 30 38 E0 00 00 7F 44 D3 78 7F 25 CB 78 69 66 00 03 20 06 00 00 7C C0 31 14 7E 83 A3 78 48 00 79 E5
+
+. 0 2547A1C4 24
+. 94 21 FF 60 7D 28 02 A6 91 E1 00 5C 1D E5 00 0C 91 C1 00 58 48 01 CE 29
+
+. 0 2547A1DC 172
+. 39 CF 00 36 81 41 00 00 55 CC 00 36 93 A1 00 94 7D 0C 00 D0 3B A0 00 00 91 21 00 A4 7F 9D 28 40 92 E1 00 7C 7C 77 1B 78 93 C1 00 98 38 00 00 00 93 E1 00 9C 7C 3F 0B 78 92 01 00 60 7F C8 02 A6 92 21 00 64 7C CF 33 78 92 41 00 68 7C EE 3B 78 92 61 00 6C 92 81 00 70 92 A1 00 74 92 C1 00 78 93 01 00 80 93 21 00 84 93 41 00 88 93 61 00 8C 93 81 00 90 7D 41 41 6E 39 00 00 01 81 23 01 80 38 61 00 17 54 6A 00 36 51 09 C9 4C 39 6A 00 0C 90 0A 00 00 91 6A 00 08 91 37 01 80 91 5F 00 08 92 EA 00 04 91 1F 00 0C 40 9C 00 5C
+
+. 0 2547A288 20
+. 7C A9 03 A6 7D 47 53 78 38 A0 00 00 38 C0 00 01 48 00 00 08
+
+. 0 2547A2A0 60
+. 1E 68 00 0C 57 B5 10 3A 7E 15 20 2E 39 28 00 01 7E 53 3A 14 7C B3 39 2E 3A 92 00 0C 92 12 00 04 92 92 00 08 3B BD 00 01 82 30 01 80 91 3F 00 0C 50 D1 C9 4C 92 30 01 80 42 00 FF C4
+
+. 0 2547A2DC 64
+. 7D 28 4B 78 83 1F 00 08 1E C8 00 0C 82 1E 05 14 3A 20 00 00 7F 1B C3 79 7C F6 C2 14 80 90 00 00 38 A7 FF F4 38 C0 00 00 7C B3 2B 78 90 C5 00 08 92 3F 00 34 90 9F 00 38 92 30 00 00 41 82 02 AC
+
+. 0 2547A31C 32
+. 83 5B 00 04 3B 80 00 01 93 9B 00 00 3B 00 00 00 83 3A 01 58 3A C0 00 00 2C 99 00 00 40 86 00 30
+
+. 0 2547A33C 32
+. 80 7A 01 E8 7F 4B BA 78 31 4B FF FF 7F AA 59 10 21 23 00 00 7C 09 19 14 7C 0B E8 39 41 82 00 10
+
+. 0 2547A368 12
+. 82 BA 00 24 2F 95 00 00 40 9E 00 1C
+
+. 0 2547A38C 44
+. 80 DA 00 34 7F 72 DB 78 83 9A 00 08 82 A6 00 04 91 FF 00 14 91 DF 00 18 81 3C 00 00 92 BF 00 1C 2F 09 00 00 93 5F 00 10 41 9A 05 04
+
+. 0 2547A3B8 16
+. 3F 20 7F FF 63 27 FF FD 90 FF 00 40 48 00 00 BC
+
+. 0 2547A480 8
+. 2F 89 00 01 41 9E FF 44
+
+. 0 2547A3C8 24
+. 80 1C 00 04 38 80 00 24 7F A0 AA 14 7F A3 EB 78 7F B9 EB 78 48 00 8A 25
+
+. 0 2547A3E0 8
+. 2C 83 00 00 40 86 05 88
+
+. 0 2547A3E8 24
+. 80 BE 02 F8 38 7F 00 30 38 9F 00 34 38 DF 00 10 93 BF 00 20 48 00 11 A5
+
+. 0 2547B5A0 12
+. 94 21 FD 60 7D 48 02 A6 48 01 BA 59
+
+. 0 2547B5AC 56
+. 93 C1 02 98 7F C8 02 A6 39 00 00 00 91 41 02 A4 93 E1 02 9C 80 FE 04 C8 91 01 00 14 81 27 01 B4 90 81 02 74 7D 29 03 A6 90 A1 02 78 90 C1 02 7C 90 61 02 70 4E 80 04 21
+
+. 0 2547185C 12
+. 94 21 FF F0 7D 88 02 A6 48 02 57 9D
+
+. 0 25471868 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 7E 00 18 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 2547B5E4 20
+. 80 03 00 00 90 61 02 80 38 61 00 20 90 01 00 08 48 00 72 BD
+
+. 0 254828B0 8
+. 38 80 00 00 48 00 01 DC
+
+. 0 25482A90 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 01 44 C9
+
+. 0 25482B3C 24
+. 7C A8 02 A6 80 A5 04 E4 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+
+. 0 25482CA8 4
+. 4B FF D2 F0
+
+. 0 2547FF98 24
+. 94 21 FF F0 38 00 00 00 90 03 01 C0 38 21 00 10 38 60 00 00 4E 80 00 20
+
+. 0 2547B5F8 20
+. 7C 7F 1B 79 80 61 02 7C 7F EA FB 78 2F 9F FF FF 40 82 00 54
+
+. 0 2547B60C 24
+. 81 21 02 80 38 01 00 10 81 81 02 78 90 09 00 00 7D 89 03 A6 4E 80 04 21
+
+. 0 2547A0BC 48
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 38 A0 00 00 90 81 00 14 80 63 00 00 80 9F 00 10 80 03 01 80 74 09 C0 00 54 06 17 BE 40 82 00 08
+
+. 0 2547A0EC 20
+. 38 C0 00 01 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+
+. 0 25482FA8 20
+. 8C A3 00 01 8C C4 00 01 2C 85 00 00 7C 05 30 00 40 86 FF D4
+
+. 0 25482F8C 8
+. 8C A3 00 01 40 82 00 34
+
+. 0 25482FC4 12
+. 88 A3 FF FF 7C 66 28 50 4E 80 00 20
+
+. 0 2547706C 8
+. 7F E3 FB 78 48 00 BF 61
+
+. 0 25477074 16
+. 83 54 00 00 3A C3 00 01 73 49 00 01 40 82 03 C4
+
+. 0 25477084 8
+. 3B 40 FF FF 41 8E 00 20
+
+. 0 2547708C 28
+. 83 9B 00 94 7F 6C FE 70 7D 9D DA 78 2E 1A FF FF 2F 9C 00 00 7D CC E8 50 40 9E 01 6C
+
+. 0 254770A8 24
+. 2C 9B 00 00 7F 65 FE 70 7C A0 DA 78 7F 7D DB 78 7D C5 00 50 41 86 03 70
+
+. 0 254770C0 12
+. 82 5E 01 9C 39 E0 FF FF 48 00 00 24
+
+. 0 254770EC 24
+. 80 7D 01 8C 3B 9D 01 8C 38 00 00 00 2E 03 FF FF 2F 03 00 00 41 92 00 2C
+
+. 0 2547712C 32
+. 2C 00 00 00 7F 86 E3 78 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 41 82 FF 84
+
+. 0 254770CC 32
+. 2E 1A FF FF 83 BD 01 68 31 3D FF FF 7C C9 E9 10 7D 20 00 26 55 29 9F FE 7D 2B 30 39 41 82 00 B0
+
+. 0 25477198 4
+. 40 92 00 A8
+
+. 0 2547719C 12
+. 80 B5 00 00 2F 85 00 00 41 9E 00 68
+
+. 0 254771A8 40
+. 83 A5 01 80 7C AA DA 78 30 0A FF FF 7D 00 51 10 57 BC 00 02 6F 92 80 00 31 32 FF FF 7D E9 91 10 7D E0 40 39 41 82 00 40
+
+. 0 2547720C 4
+. 40 92 00 34
+
+. 0 25477210 16
+. 80 DE 01 68 80 A6 00 00 2C 85 FF FF 41 86 00 24
+
+. 0 25477220 24
+. 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 4B FF F5 61
+
+. 0 25476794 12
+. 94 21 FF 00 7C 08 02 A6 48 02 08 65
+
+. 0 254767A0 220
+. 93 C1 00 F8 7F C8 02 A6 92 41 00 C8 92 61 00 CC 7D 20 00 26 90 01 01 04 38 00 00 00 82 5E 01 5C 82 7E 01 58 92 01 00 C0 92 21 00 C4 82 13 00 00 82 32 00 00 7D 12 43 78 91 E1 00 BC 7D F0 8A 14 91 C1 00 B8 93 E1 00 FC 7D CF 22 14 7C 3F 0B 78 39 8E 00 1E 90 DF 00 84 55 8B 00 36 81 41 00 00 7C 71 1B 78 92 A1 00 D4 39 E0 00 00 91 21 00 B4 7D 2B 00 D0 82 A6 00 00 92 C1 00 D8 7C 96 23 78 93 41 00 E8 3B 40 FF FF 93 61 00 EC 2E 1A FF FF 93 A1 00 F4 92 81 00 D0 92 E1 00 DC 93 01 00 E0 93 21 00 E4 93 81 00 F0 7D 41 49 6E 38 81 00 17 83 B5 00 00 81 DE 04 F4 54 9B 00 36 90 BF 00 80 90 FF 00 88 90 1F 00 8C 80 6E 00 00 3A 60 00 00 3A E0 00 00 70 69 00 01 40 82 02 5C
+
+. 0 2547687C 20
+. 80 9D 00 0C 7F 63 DB 78 80 BD 00 10 3B 80 00 00 48 00 D1 95
+
+. 0 25476890 8
+. 7C 78 1B 78 40 92 01 A0
+
+. 0 25476898 16
+. 83 3E 01 54 81 99 00 00 7F 9C 60 40 40 9C 01 4C
+
+. 0 254768A8 16
+. 81 7E 02 28 3A 80 00 02 82 0B 00 00 48 00 00 1C
+
+. 0 254768D0 12
+. 80 DD 00 14 2C 86 00 01 41 A6 FF E0
+
+. 0 254768DC 32
+. 81 3E 01 60 57 8A 18 38 7F 03 C3 78 80 A9 00 00 7E 6A 2A 14 7C 8A 28 2E 80 B3 00 04 48 00 D1 29
+
+. 0 254768FC 12
+. 7E 24 8B 78 7E C5 B3 78 48 00 D1 1D
+
+. 0 25476908 16
+. 83 4E 00 00 7E 7B 18 50 73 49 00 01 40 82 01 AC
+
+. 0 25476918 12
+. 7F 63 DB 78 7E 44 93 78 4B FF FB 25
+
+. 0 25481FEC 4
+. 4B FF F3 44
+
+. 0 25476924 20
+. 80 1D 00 14 7C 7A 1B 78 2E 03 FF FF 2F 00 00 00 40 9A 00 0C
+
+. 0 25476938 4
+. 41 92 00 78
+
+. 0 254769B0 28
+. 7C 76 98 50 7F 64 DB 78 7C FB 1A 14 38 BF 00 10 98 07 FF FF 38 60 00 03 48 00 B3 A1
+
+. 0 25481D68 12
+. 94 21 FF 80 7C C8 02 A6 48 01 52 91
+
+. 0 25481D74 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+
+. 0 25481DB4 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C3 7C A4 2B 78 83 5D 00 00 44 00 00 02
+
+. 0 25481DCC 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+
+. 0 25481E98 12
+. 90 7D 00 00 38 60 FF FF 4B FF FF 38
+
+. 0 25481DD8 8
+. 2C 83 FF FF 40 A6 00 44
+
+. 0 25481DE0 12
+. 80 9D 00 00 2F 04 00 26 40 BA 00 38
+
+. 0 25481E20 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+
+. 0 254769CC 8
+. 2F 83 00 00 40 9E 00 14
+
+. 0 254769E4 12
+. 38 80 00 01 90 9D 00 14 4B FF FF 54
+
+. 0 25476940 24
+. 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+
+. 0 254768B8 12
+. 3B 9C 00 01 3B BD 00 04 40 92 01 74
+
+. 0 254768C4 12
+. 81 39 00 00 7C 9C 48 40 40 84 01 24
+
+. 0 254769D4 16
+. 81 1F 00 20 55 00 04 26 2C 80 40 00 41 A6 FF 5C
+
+. 0 2547693C 28
+. 92 9D 00 14 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+
+. 0 254769F0 4
+. 40 92 00 44
+
+. 0 254769F4 8
+. 2F 17 00 00 41 9A 00 1C
+
+. 0 254769FC 16
+. 83 BE 05 14 81 3D 00 00 2F 89 00 02 41 9E 00 0C
+
+. 0 25476A14 16
+. 87 B5 00 04 7D EF BB 78 2C 9D 00 00 40 86 FE 48
+
+. 0 25476868 20
+. 80 6E 00 00 3A 60 00 00 3A E0 00 00 70 69 00 01 40 82 02 5C
+
+. 0 25483DE0 28
+. 80 04 00 00 38 84 FF F8 81 24 00 0C 38 63 FF F4 38 A5 00 02 90 03 00 0C 4B FF FF 34
+
+. 0 25483D2C 48
+. 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25483DC4 28
+. 81 24 00 00 38 84 FF F4 80 04 00 10 38 63 FF F0 38 A5 00 03 91 23 00 10 4B FF FF 58
+
+. 0 25483D34 40
+. 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25476A24 8
+. 2E 0F 00 00 41 92 02 6C
+
+. 0 25476A2C 8
+. 38 60 FF FF 48 00 00 2C
+
+. 0 25476A5C 100
+. 81 01 00 00 81 E8 00 04 80 88 FF B4 7D E8 03 A6 81 C8 FF B8 81 E8 FF BC 7C 80 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 25477238 32
+. 2E 03 FF FF 7C 7A 1B 78 7D 6E 00 D0 7C E0 00 26 54 E7 9F FE 55 6E 0F FE 7C EB 70 39 41 82 00 44
+
+. 0 25477258 20
+. 80 1B 01 E0 3B BB 01 E0 39 20 00 00 2F 00 FF FF 41 9A 00 28
+
+. 0 25477290 8
+. 2C 09 00 00 40 82 00 DC
+
+. 0 25477298 4
+. 40 92 00 8C
+
+. 0 2547729C 8
+. 2F 17 00 00 40 9A 01 E4
+
+. 0 254772A4 8
+. 7F E3 FB 78 48 00 09 49
+
+. 0 25477BF0 12
+. 94 21 FF C0 7C 88 02 A6 48 01 F4 09
+
+. 0 25477BFC 88
+. 93 C1 00 38 7F C8 02 A6 92 61 00 0C 90 81 00 44 7D 80 00 26 92 E1 00 1C 7C 77 1B 78 82 7E 04 F4 92 81 00 10 80 13 00 00 92 A1 00 14 70 09 00 01 92 C1 00 18 93 01 00 20 93 21 00 24 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 93 E1 00 3C 91 81 00 08 40 82 03 7C
+
+. 0 25477C54 16
+. 82 DE 02 54 80 76 00 00 2F 83 00 00 41 9E 00 FC
+
+. 0 25477D5C 16
+. 80 7E 02 60 38 A0 00 01 80 9E 02 5C 48 00 42 45
+
+. 0 2547BFAC 52
+. 7C 08 02 A6 94 21 FF 60 93 81 00 90 7C 9C 23 78 38 80 00 00 93 61 00 8C 93 A1 00 94 7C BD 2B 78 93 E1 00 9C 3B 60 FF FF 93 C1 00 98 90 01 00 A4 48 00 60 05
+
+. 0 2547BFE0 20
+. 7C 7F 1B 79 38 A1 00 10 38 60 00 03 7F E4 FB 78 41 80 00 40
+
+. 0 2547BFF4 4
+. 48 00 5E B1
+
+. 0 2547BFF8 8
+. 2F 83 00 00 41 9C 00 2C
+
+. 0 2547C000 40
+. 81 21 00 44 7F A5 EB 78 38 C0 00 02 7F E7 FB 78 2C 89 00 00 7D 24 4B 78 39 00 00 00 38 60 00 00 91 3C 00 00 40 86 00 34
+
+. 0 2547C058 4
+. 48 00 67 41
+
+. 0 2547C05C 8
+. 7C 7B 1B 78 4B FF FF C8
+
+. 0 2547C028 8
+. 7F E3 FB 78 48 00 5F C5
+
+. 0 2547C030 40
+. 80 81 00 A4 7F 63 DB 78 83 81 00 90 83 61 00 8C 7C 88 03 A6 83 A1 00 94 83 C1 00 98 83 E1 00 9C 38 21 00 A0 4E 80 00 20
+
+. 0 25477D6C 12
+. 2E 03 FF FF 7C 7F 1B 78 41 92 01 54
+
+. 0 25477D78 16
+. 81 3E 02 5C 83 A9 00 00 28 9D 00 10 40 85 00 E8
+
+. 0 25477D88 12
+. 80 9E 02 64 38 A0 00 0B 48 00 B5 09
+
+. 0 25477D94 8
+. 2F 03 00 00 40 9A 00 D4
+
+. 0 25477D9C 40
+. 81 5F 00 0C 93 F6 00 00 1D 0A 00 0C 83 9E 02 58 38 E8 00 17 54 E5 00 38 38 C5 00 30 7C 7F 2A 14 7E 06 E8 40 41 91 00 1C
+
+. 0 25477DC4 16
+. 80 9E 02 68 38 A0 00 14 90 7C 00 00 48 00 B4 C9
+
+. 0 254832B8 12
+. 54 80 07 BE 2C 00 00 00 41 82 00 2C
+
+. 0 254832EC 12
+. 55 69 07 BE 2F 09 00 00 40 9A 00 C0
+
+. 0 254832F8 24
+. 54 A8 F0 BE 7D 6A 5B 78 55 00 07 BE 7C 89 23 78 2F 80 00 01 41 9E 01 50
+
+. 0 2548345C 36
+. 80 6B 00 00 39 08 FF FF 80 C4 00 00 39 4B 00 04 39 24 00 04 7F 03 30 00 80 EA 00 00 80 09 00 00 41 9A 00 C8
+
+. 0 25483544 16
+. 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+
+. 0 25483554 16
+. 7C 03 30 00 80 EA 00 08 80 09 00 08 41 A2 FD D4
+
+. 0 25483334 16
+. 7C 87 00 00 80 6A 00 0C 80 C9 00 0C 41 86 01 48
+
+. 0 25483488 20
+. 39 08 FF FC 39 4A 00 10 2F 08 00 00 39 29 00 10 40 BA FF D8
+
+. 0 2548349C 24
+. 7F 83 30 00 7C 03 30 10 7C 00 01 10 7C 00 03 B8 60 00 00 01 40 9E FE A4
+
+. 0 254834B4 8
+. 38 00 00 00 4B FF FE 9C
+
+. 0 25483354 12
+. 2F 80 00 00 7C 03 03 78 40 9E 00 40
+
+. 0 25483360 24
+. 54 BC 00 3A 54 A5 07 BE 7C 84 E2 14 7D 6B E2 14 2C 05 00 00 41 82 00 24
+
+. 0 25477DD4 8
+. 2C 03 00 00 41 A2 FE 8C
+
+. 0 25477C64 16
+. 81 56 00 00 38 60 00 00 2F 0A FF FF 41 9A 00 94
+
+. 0 25477C74 20
+. 83 1E 02 58 3A A0 00 00 83 78 00 00 2E 1B FF FF 41 92 01 64
+
+. 0 25477C88 36
+. 83 FB 00 14 83 9E 02 5C 3B 5F FF FF 7F 95 D0 00 83 BC 00 00 3B 80 00 00 7E CA EA 14 7F 3B B0 50 41 9D 00 4C
+
+. 0 25477CAC 44
+. 7C 7C D2 14 80 98 00 00 7C 7D 0E 70 7F BD 01 94 7E E3 BB 78 1F FD 00 18 7D 9F 22 14 80 0C 00 34 7C 00 C8 40 7C 9B 02 14 40 80 00 20
+
+. 0 25477CD8 4
+. 4B FF FD FD
+
+. 0 25477AD4 20
+. 94 21 FF F0 7C 67 1B 78 89 43 00 00 2F 8A 00 00 41 9E 00 DC
+
+. 0 25477AE8 20
+. 88 64 00 00 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+
+. 0 25477B84 16
+. 39 03 FF D0 7C 60 1B 78 28 08 00 09 40 81 00 48
+
+. 0 25477B94 16
+. 55 43 06 3E 54 00 06 3E 7C 83 00 00 40 86 00 44
+
+. 0 25477BA4 16
+. 8D 47 00 01 8C 64 00 01 2F 0A 00 00 40 9A FF 3C
+
+. 0 25477AEC 16
+. 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+
+. 0 25477BE4 12
+. 7C 60 18 50 38 21 00 10 4E 80 00 20
+
+. 0 25477CDC 8
+. 2F 83 00 00 41 9E 02 14
+
+. 0 25477CE4 4
+. 40 9C 00 6C
+
+. 0 25477D50 12
+. 3B 5D FF FF 7F 9C D0 00 4B FF FF 98
+
+. 0 25477CF0 4
+. 40 9D FF BC
+
+. 0 25477CE8 12
+. 3B 9D 00 01 7F 9C D0 00 40 9D FF BC
+
+. 0 25477AFC 16
+. 38 A3 FF D0 7C 69 1B 78 28 85 00 09 41 85 00 C4
+
+. 0 25477B0C 36
+. 8D 47 00 01 55 6B 06 3E 55 29 06 3E 39 0B FF D0 38 6A FF D0 39 69 FF D0 2B 83 00 09 38 84 00 01 41 9D 00 20
+
+. 0 25477B4C 8
+. 88 64 00 00 48 00 00 14
+
+. 0 25477B64 12
+. 38 A3 FF D0 2B 05 00 09 40 99 FF E8
+
+. 0 25477B70 8
+. 7F 88 58 00 41 BE 00 38
+
+. 0 25477BAC 8
+. 2F 0A 00 00 40 9A FF 3C
+
+. 0 25477BB4 12
+. 7C 63 50 50 38 21 00 10 4E 80 00 20
+
+. 0 25477EF4 16
+. 2F 9D 00 00 7F B6 EB 78 7F FC FB 78 41 BD 00 20
+
+. 0 25477F20 32
+. 80 D8 00 00 7E E3 BB 78 3B 9C FF E8 7D 3F 32 14 80 A9 00 1C 7C 85 C8 40 7C 9B 2A 14 41 84 FF CC
+
+. 0 25477F08 4
+. 4B FF FB CD
+
+. 0 25477F0C 8
+. 2F 03 00 00 40 9A 00 30
+
+. 0 25477F40 20
+. 7E 1D B0 00 80 F8 00 00 7F FF 3A 14 3B 9F 00 30 40 91 00 24
+
+. 0 25477F74 36
+. 81 1F 00 30 69 1F 00 01 21 3F 00 00 7F E9 F9 14 69 14 00 03 21 54 00 00 7E 8A A1 14 7F EB A3 79 41 82 00 24
+
+. 0 25477F98 12
+. 80 FC 00 08 7F 07 C8 40 40 98 00 18
+
+. 0 25477FA4 8
+. 2E 15 00 00 41 92 01 28
+
+. 0 254780D0 12
+. 81 33 00 08 2C 89 00 00 41 86 00 10
+
+. 0 254780DC 12
+. 81 7C 00 0C 7C 0B 48 40 41 81 FE D4
+
+. 0 254780E8 44
+. 80 D3 00 3C 3A 80 00 00 80 7C 00 10 66 89 80 00 80 1C 00 14 7D 25 48 F8 7C CC 30 F8 7C 64 28 38 7C 1C 60 38 7C 80 E3 79 40 82 FE A8
+
+. 0 25478114 16
+. 83 F3 00 38 7E A7 DA 14 7F 1F 40 00 40 9A FE 98
+
+. 0 25478124 4
+. 4B FF FB D0
+
+. 0 25477CF4 12
+. 82 F3 00 00 72 EB 00 01 40 82 01 AC
+
+. 0 25477D00 80
+. 7E A3 AB 78 82 61 00 44 80 E1 00 08 7E 68 03 A6 82 81 00 10 82 61 00 0C 7C E0 81 20 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+
+. 0 254772AC 12
+. 7C 72 1B 79 7F 80 00 26 41 82 00 20
+
+. 0 254772B8 4
+. 41 8E 01 7C
+
+. 0 254772BC 16
+. 7F 69 DB 78 83 A9 01 FC 73 A9 08 00 40 82 01 D4
+
+. 0 254772CC 8
+. 7F 88 01 20 40 82 00 C8
+
+. 0 25477398 12
+. 7E 43 93 78 38 81 00 18 4B FF F0 A5
+
+. 0 254765B4 12
+. 80 FD 00 10 2F 07 00 20 40 9A FF A0
+
+. 0 254765C0 12
+. 81 5D 00 1C 2B 8A 00 03 40 BD FF 94
+
+. 0 254765CC 44
+. 81 9D 00 04 38 A0 00 00 81 18 00 00 3A BF 00 10 38 0C 00 20 7D 6C C2 14 7C 00 40 40 7F 23 CB 78 7D 84 63 78 3B 6B 00 04 41 81 00 FC
+
+. 0 254765F8 16
+. 80 9E 01 70 7F 63 DB 78 38 A0 00 10 48 00 CC 95
+
+. 0 25483310 8
+. 28 00 00 01 41 80 02 28
+
+. 0 2548353C 24
+. 80 EB 00 00 80 04 00 00 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+
+. 0 25476608 8
+. 2F 03 00 00 41 9A 00 84
+
+. 0 25476690 40
+. 81 3B 00 10 8B 5B 00 17 8B BB 00 1B 2F 89 00 00 57 55 40 2E 8B 1B 00 1F 7F 95 EA 14 57 96 40 2E 7C 16 C2 14 40 9E 00 1C
+
+. 0 254766B8 16
+. 80 7E 04 F4 81 23 00 08 2C 09 00 00 41 A2 FE AC
+
+. 0 254766C8 8
+. 7C 89 00 40 40 A4 FE A4
+
+. 0 254773A4 12
+. 2E 03 FF FF 7C 7A 1B 78 41 B2 FF 2C
+
+. 0 254773B0 8
+. 7E 43 93 78 48 00 BC 1D
+
+. 0 254773B8 12
+. 3B A3 00 01 7F A3 EB 78 48 02 06 65
+
+. 0 254773C4 12
+. 38 00 00 00 2F 03 00 00 41 9A 00 14
+
+. 0 254773D0 12
+. 7E 44 93 78 7F A5 EB 78 48 00 C8 09
+
+. 0 25483C74 20
+. 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+. 0 254773DC 16
+. 7C 60 1B 78 2F 80 00 00 90 01 02 28 40 9E FE EC
+
+. 0 254772D4 4
+. 40 92 00 50
+
+. 0 25477324 12
+. 82 F4 00 00 72 E9 00 01 41 82 FA D8
+
+. 0 254758A8 28
+. 54 EF 28 34 80 1D 00 08 7D CF DA 14 3B BD 00 20 7C 8E E8 40 90 1A 01 4C 41 85 FF 4C
+
+. 0 25475830 8
+. 7F 09 00 40 41 99 02 4C
+
+. 0 25475A80 16
+. 3D 40 64 74 61 4F E5 52 7F 89 78 00 40 9E FD 6C
+
+. 0 25475A90 20
+. 81 1D 00 08 91 1A 02 34 80 DD 00 14 90 DA 02 38 4B FF FD 58
+
+. 0 25483974 12
+. 90 86 FF FC 90 86 FF F8 4E 80 00 20
+
+. 0 25476054 24
+. 55 6C 08 3C 7D 80 0E 70 7C 17 00 F8 20 00 00 31 2B 17 00 02 40 B9 FC 38
+
+. 0 2547606C 12
+. 7F 2B 28 50 2A 19 00 0B 41 91 00 C4
+
+. 0 25476078 12
+. 7C 0B 20 50 54 00 10 3A 4B FF FC 24
+
+. 0 25475D68 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+
+. 0 254760FC 8
+. 81 7A 01 FC 4B FF FC C0
+
+. 0 25475F88 28
+. 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+
+. 0 2547A100 24
+. 90 7F 00 14 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+
+. 0 2547B624 56
+. 38 60 00 00 81 61 00 08 80 C1 02 80 80 A1 02 70 80 81 02 74 93 E5 00 00 91 66 00 00 93 E4 00 00 81 41 02 A4 83 C1 02 98 83 E1 02 9C 7D 48 03 A6 38 21 02 A0 4E 80 00 20
+
+. 0 2547A400 12
+. 83 3F 00 34 2F 19 00 00 40 9A 01 AC
+
+. 0 2547A40C 20
+. 81 5F 00 24 80 8A 01 80 54 89 3F BE 2F 89 00 00 40 9E 00 44
+
+. 0 2547A420 72
+. 80 01 00 00 38 C0 00 01 80 FF 00 0C 94 01 FF E0 3A 81 00 17 56 85 00 36 91 25 00 08 91 25 00 00 39 27 00 01 90 B3 00 08 91 45 00 04 82 6A 01 80 91 3F 00 0C 50 D3 C9 4C 92 6A 01 80 7C B3 2B 78 2C 98 00 00 41 86 00 10
+
+. 0 2547A474 12
+. 85 3C 00 08 2C 09 00 00 41 82 04 3C
+
+. 0 25482F94 12
+. 8C C4 00 01 2C 85 00 00 41 86 00 20
+
+. 0 25475838 8
+. 2C 09 00 07 40 82 FF BC
+
+. 0 25475840 12
+. 80 1D 00 14 2F 00 00 00 41 BA FF B0
+
+. 0 2547584C 24
+. 90 1A 02 20 81 7D 00 1C 91 7A 02 24 81 3D 00 1C 2F 89 00 00 41 9E 07 EC
+
+. 0 25475864 52
+. 80 BD 00 08 39 29 FF FF 7C A0 48 38 90 1A 02 28 81 DA 01 80 3C C0 40 00 81 5D 00 10 55 C8 00 02 7C 08 30 00 91 5A 02 1C 80 9D 00 08 90 9A 02 18 40 82 0A 24
+
+. 0 25475898 4
+. 48 00 83 7D
+
+. 0 2547DC14 12
+. 94 21 FF F0 7D 88 02 A6 48 01 93 E5
+
+. 0 2547DC20 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 DE 04 C8 88 06 04 0C 2F 80 00 00 40 9E 00 20
+
+. 0 2547DC3C 28
+. 81 46 04 08 39 0A 00 01 91 06 04 08 7D 03 43 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 2547589C 12
+. A0 FA 01 54 90 7A 02 30 4B FF FF 54
+
+. 0 254759E0 8
+. 7C 9C C8 40 40 85 00 2C
+
+. 0 254837B0 12
+. 90 88 FF FC 94 88 FF F8 41 84 00 14
+
+. 0 254837D4 16
+. 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+
+. 0 25483884 16
+. 7C 00 37 EC 38 C6 00 20 39 20 FF E0 40 99 00 10
+
+. 0 254760C0 28
+. 80 BD 00 14 7F 83 E3 78 7C 9C B8 50 38 C0 00 32 38 E0 FF FF 39 00 00 00 48 00 C6 C1
+
+. 0 254760DC 8
+. 2C 83 FF FF 40 86 F9 38
+
+. 0 25475D74 16
+. 80 09 00 04 70 0A 00 02 90 1A 02 00 41 82 00 08
+
+. 0 25475D88 8
+. 70 0B 00 04 41 82 00 08
+
+. 0 25475D94 8
+. 70 0A 00 08 41 82 00 08
+
+. 0 25475D9C 16
+. 91 28 00 60 81 28 00 98 2F 89 00 00 41 9E 03 54
+
+. 0 25475DBC 16
+. 91 28 00 60 81 28 00 74 2C 89 00 00 41 86 00 E0
+
+. 0 25475EEC 20
+. 80 DA 00 00 7D 49 32 14 91 5A 02 18 7E 63 9B 78 48 00 C0 F5
+
+. 0 2547A488 12
+. 80 7F 00 40 7C 09 18 00 41 82 00 14
+
+. 0 2547A494 16
+. 3D 40 7F FF 61 48 FF FF 7F 09 40 00 40 9A FF D4
+
+. 0 2547A8B8 8
+. 2F 98 00 00 41 9E 00 5C
+
+. 0 2547A918 16
+. 80 1B 00 00 2F 1B 00 00 2C 00 00 00 41 82 00 1C
+
+. 0 2547A928 12
+. 83 7B 00 08 2F 1B 00 00 41 BA FC 94
+
+. 0 2547A934 12
+. 80 1B 00 00 2C 00 00 00 40 82 FF EC
+
+. 0 2547A940 4
+. 40 9A F9 DC
+
+. 0 2547A35C 12
+. A1 3A 01 56 2F 09 00 00 40 9A 05 E4
+
+. 0 2547A948 36
+. 55 34 10 3A 81 81 00 00 3A 54 00 1E 56 49 03 76 7D 49 00 D0 7D 81 51 6E 39 01 00 17 55 18 00 36 4B FF FA 00
+
+. 0 2547A0F0 16
+. 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+
+. 0 25482F10 24
+. 84 C4 00 04 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+
+. 0 25482F28 8
+. 7D 00 28 38 41 86 00 2C
+
+. 0 25482F30 16
+. 7C 00 42 14 7C AA 32 79 7D 29 00 78 41 80 00 34
+
+. 0 25482F40 24
+. 7D 4A 00 34 7D 29 00 34 39 29 00 07 7C 89 50 00 7C 66 28 50 4C A4 00 20
+
+. 0 25482F58 8
+. 38 60 00 00 4E 80 00 20
+
+. 0 2547C750 32
+. 80 81 00 24 38 60 00 01 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25476F54 100
+. 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+. 0 2547A460 8
+. 2C 98 00 00 41 86 00 10
+
+. 0 2547A468 24
+. 56 CB 10 3A 3A D6 00 01 7D 4B C1 2E 85 3C 00 08 2C 09 00 00 41 82 04 3C
+
+. 0 2547A8C0 28
+. 3B B6 00 01 56 DC 10 3A 3A 40 00 00 57 B6 18 38 7E 5C C1 2E 38 76 00 04 48 01 D1 4D
+
+. 0 2547A8DC 16
+. 2C 83 00 00 7C 69 1B 78 90 7A 01 E8 41 86 05 C4
+
+. 0 2547A8EC 24
+. 57 B5 10 3A 7F 04 C3 78 7E A5 AB 78 93 49 00 00 38 63 00 04 48 00 92 E1
+
+. 0 2547A904 20
+. 80 9A 01 E8 7E A5 AB 78 7F 55 22 14 38 7A 00 04 48 00 92 CD
+
+. 0 25476ED0 12
+. 80 9D 00 1C 3B 60 00 00 48 00 00 20
+
+. 0 25476EF8 8
+. 2F 04 00 00 40 9A FF E0
+
+. 0 25476EDC 16
+. 7C 9B 23 78 80 84 00 00 7F 83 E3 78 48 00 BF F9
+
+. 0 25476EEC 8
+. 2F 83 00 00 41 9E 00 58
+
+. 0 25476EF4 12
+. 80 9B 00 04 2F 04 00 00 40 9A FF E0
+
+. 0 25476F00 8
+. 7F 83 E3 78 48 00 C0 CD
+
+. 0 25476F08 16
+. 7C 66 1B 78 38 63 00 0D 3B 46 00 01 48 02 0B 11
+
+. 0 25476F18 16
+. 7C 7F 1B 79 80 DE 01 AC 7F 84 E3 78 41 82 01 3C
+
+. 0 25476F28 16
+. 7F 84 E3 78 7F 45 D3 78 38 7F 00 0C 48 00 CC AD
+
+. 0 25476F38 128
+. 92 DF 00 04 90 7F 00 00 92 DF 00 08 93 FB 00 04 81 1D 01 80 65 07 00 80 90 FD 01 80 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+. 0 2547A374 12
+. 80 9A 00 F0 2C 84 00 00 40 86 00 10
+
+. 0 2547A380 12
+. 80 BA 00 E8 2C 05 00 00 41 82 05 30
+
+. 0 2547A5C4 32
+. 83 10 00 00 82 7F 00 38 21 78 00 00 7D CB C1 14 31 53 FF FF 7F 6A 99 10 7D C8 D8 39 41 82 00 0C
+
+. 0 2547A5E4 20
+. 81 1F 00 38 91 10 00 00 80 77 01 E8 2F 03 00 00 41 9A 00 18
+
+. 0 2547A60C 16
+. 80 DF 00 0C 54 C5 18 38 38 65 00 04 48 01 D4 0D
+
+. 0 2547A61C 12
+. 2C 83 00 00 90 77 01 E8 41 86 08 9C
+
+. 0 2547A628 44
+. 80 FF 00 08 38 A0 00 00 82 9F 00 0C 2C 07 00 00 56 9D 10 3A 92 97 01 5C 7D 23 EA 14 90 BF 00 0C 38 89 00 04 90 97 01 58 41 82 00 50
+
+. 0 2547A654 12
+. 2F 0F 00 00 38 C0 00 00 40 9A 01 F8
+
+. 0 2547A660 56
+. 81 7F 00 0C 80 07 00 04 83 37 01 58 55 76 10 3A 39 8B 00 01 7C 16 C9 2E 81 E7 00 04 80 6F 01 80 91 9F 00 0C 50 C3 C9 4C 90 6F 01 80 80 E7 00 08 2F 07 00 00 40 9A FF CC
+
+. 0 2547A698 24
+. 80 BF 00 0C 80 97 01 58 83 9E 04 F4 80 FC 00 00 70 E8 04 00 40 82 04 E0
+
+. 0 2547A6B0 16
+. 80 D7 01 F0 38 60 00 00 2B 86 00 00 40 9D 00 48
+
+. 0 2547A704 16
+. 80 77 01 E8 54 A5 10 3A 3B 00 00 01 48 00 94 D1
+
+. 0 2547A714 12
+. 80 FF 00 0C 7C 18 38 40 40 80 03 2C
+
+. 0 2547A720 32
+. 81 57 01 E8 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+
+. 0 2547A754 12
+. 3B 79 00 01 7C 9B 38 40 40 84 00 7C
+
+. 0 2547A760 20
+. 57 68 10 3A 7E 68 50 2E 81 73 01 E8 2F 0B 00 00 41 9A 00 5C
+
+. 0 2547A774 12
+. 81 2B 00 00 2F 89 00 00 41 9E 00 50
+
+. 0 2547A780 12
+. 7E 19 D8 50 56 05 10 3A 48 00 00 10
+
+. 0 2547A798 12
+. 7C 09 D0 00 39 6B 00 04 40 82 FF EC
+
+. 0 2547A78C 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 38
+
+. 0 2547A7CC 12
+. 3B 7B 00 01 7C 9B 38 40 41 84 FF 8C
+
+. 0 2547A7D8 12
+. 3B 18 00 01 7F 18 38 40 41 98 FF 44
+
+. 0 2547A724 28
+. 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+
+. 0 2547A740 20
+. 3B 39 00 01 57 2E 10 3A 7F 6E 50 2E 7F 9B D0 00 40 9E FF F0
+
+. 0 2547A7E4 20
+. 2F 91 00 00 54 F7 10 3A 3B 40 00 00 7F 57 51 2E 40 9E 06 DC
+
+. 0 2547A7F8 92
+. 81 01 00 00 82 28 00 04 81 C8 FF B8 7E 28 03 A6 81 E8 FF BC 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 254727E4 12
+. 80 F4 01 5C 2C 87 00 00 41 86 00 3C
+
+. 0 254727F0 56
+. 81 14 01 58 7C E9 03 A6 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+
+. 0 254727F8 48
+. 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+
+. 0 25472828 24
+. 81 6E 01 C8 80 EE 01 C4 90 EB 00 0C 81 2E 01 C4 2E 09 00 00 41 92 00 08
+
+. 0 25472840 16
+. 91 69 00 10 81 4E 03 30 2B 0A 00 01 40 99 13 58
+
+. 0 25472850 28
+. 81 54 01 58 39 0E 01 B8 39 20 00 01 39 60 00 04 80 6A 00 04 7F 83 40 00 41 9E 00 18
+
+. 0 2547286C 20
+. 39 29 00 01 55 2B 10 3A 7C 8B 50 2E 7F 84 40 00 40 9E FF F0
+
+. 0 25472880 24
+. 81 1F 00 30 7D 4B 52 14 81 6A FF FC 2C 08 00 00 91 6E 01 C8 40 82 0F 34
+
+. 0 25472898 24
+. 82 74 01 5C 3A E9 00 01 38 00 00 00 7D C9 73 78 7F 97 98 40 40 9C 00 08
+
+. 0 254728B4 28
+. 90 09 01 C4 82 0E 01 C8 38 0E 01 B8 90 10 00 0C 81 2E 01 C4 2C 89 00 00 41 86 00 08
+
+. 0 254728D4 44
+. 80 7E 00 2C 69 1A 00 03 20 1A 00 00 7F 40 D1 14 80 9E 00 14 21 48 00 00 7F AA 41 14 38 BF 00 58 93 BF 00 58 93 5F 00 5C 48 00 8D 9D
+
+. 0 2547B698 12
+. 94 21 FF D0 7C C8 02 A6 48 01 B9 61
+
+. 0 2547B6A4 68
+. 93 C1 00 28 7F C8 02 A6 93 61 00 1C 7C BB 2B 78 90 C1 00 34 92 E1 00 0C 7C 97 23 78 80 BE 04 C8 93 21 00 14 7C 79 1B 78 81 25 01 B4 93 41 00 18 93 81 00 20 7D 29 03 A6 93 A1 00 24 83 9E 03 1C 4E 80 04 21
+
+. 0 2547B6E8 36
+. 38 80 00 00 7C 7D 1B 78 7E E8 03 A6 83 5D 00 00 7F 63 DB 78 90 9D 00 00 83 7C 00 00 93 3C 00 00 4E 80 00 21
+
+. 0 254717D4 12
+. 94 21 FF E0 7C 08 02 A6 48 02 58 25
+
+. 0 254717E0 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 7C 7F 1B 78 80 BF 00 04 38 80 00 01 81 3E 04 C8 93 A1 00 14 80 69 00 00 48 00 B7 C1
+
+. 0 2547CFC8 56
+. 94 21 FF E0 7C 08 02 A6 93 E1 00 1C 7C 7F 1B 79 93 41 00 08 7C 9A 23 78 93 61 00 0C 7C BB 2B 78 93 81 00 10 3B 80 00 00 93 A1 00 14 93 C1 00 18 90 01 00 24 40 A2 00 18
+
+. 0 2547D014 28
+. 80 DF 01 80 7F E3 FB 78 7F 44 D3 78 7F 65 DB 78 74 C9 00 40 3B A0 00 00 40 82 FF D8
+
+. 0 2547D030 4
+. 4B FF FB 41
+
+. 0 2547CB70 16
+. 7C 08 02 A6 94 21 FF 60 7D 80 00 26 48 01 A4 85
+
+. 0 2547CB80 116
+. 92 01 00 60 3A 00 00 00 90 01 00 A4 81 23 00 34 92 A1 00 74 7C 95 23 78 2F 89 00 00 92 C1 00 78 93 01 00 80 3A C0 00 00 93 61 00 8C 3B 00 00 00 93 C1 00 98 7C 7B 1B 78 93 E1 00 9C 7F C8 02 A6 92 21 00 64 7C 3F 0B 78 92 41 00 68 38 60 00 00 92 61 00 6C 92 81 00 70 92 E1 00 7C 93 21 00 84 93 41 00 88 93 81 00 90 93 A1 00 94 91 81 00 5C 41 9E 01 80
+
+. 0 2547CBF4 20
+. 82 5B 00 AC 83 29 00 04 2D 92 00 00 82 3B 00 B4 41 8E 01 28
+
+. 0 2547CC08 36
+. 81 7B 00 00 2E 05 00 00 81 32 00 04 82 9E 04 C8 7C 8B 4A 2E 7E EB 4A 14 82 7E 04 D4 2C 04 00 01 40 82 02 D0
+
+. 0 2547CC2C 24
+. 83 5B 00 18 83 97 00 04 1E 1A 00 18 7F 5C CA 14 7F B0 A0 2E 48 00 00 1C
+
+. 0 2547CC5C 8
+. 2F 1D 00 00 40 9A FF E4
+
+. 0 2547CC44 12
+. 7F A4 EB 78 7F 43 D3 78 4B FF FA 89
+
+. 0 2547CC50 8
+. 2F 83 00 00 40 9E 02 20
+
+. 0 2547CC58 12
+. 83 BD 00 0C 2F 1D 00 00 40 9A FF E4
+
+. 0 2547CE74 8
+. 7F BC EB 78 41 92 FE 30
+
+. 0 2547CCA8 36
+. 80 97 00 08 7F A4 BA 14 7E 7A 9B 78 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+
+. 0 2547CCCC 36
+. 80 DA 00 00 80 66 00 00 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+
+. 0 2547C770 12
+. 94 21 FF 20 7D 48 02 A6 48 01 A8 89
+
+. 0 2547C77C 116
+. 93 C1 00 D8 7F C8 02 A6 92 61 00 AC 91 41 00 E4 3A 60 00 00 81 66 00 34 81 3E 04 F4 92 C1 00 B8 7C F6 3B 78 80 09 00 00 92 E1 00 BC 7D 17 43 78 70 09 00 10 93 01 00 C0 93 21 00 C4 7C 78 1B 78 93 61 00 CC 7C D9 33 78 93 81 00 D0 7C 9B 23 78 93 E1 00 DC 7C BC 2B 78 92 81 00 B0 7C 3F 0B 78 92 A1 00 B4 93 41 00 C8 93 A1 00 D4 83 4B 00 04 40 82 02 30
+
+. 0 2547C7F0 12
+. 81 39 00 B4 2C 89 00 00 41 86 02 64
+
+. 0 2547C7FC 36
+. 81 69 00 04 3A 9F 00 08 80 19 00 00 82 BE 03 58 7F A0 5A 14 A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+
+. 0 2547C820 12
+. 80 9D 00 08 7C 84 D8 00 41 86 00 B4
+
+. 0 2547C82C 16
+. 80 FD 00 10 2F 87 00 00 7F BD 3A 14 40 9E FF D8
+
+. 0 2547C810 16
+. A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+
+. 0 2547C8DC 16
+. 80 BD 00 0C 7C C5 E8 2E 7C 86 D2 14 48 00 65 F9
+
+. 0 2547C8EC 8
+. 2F 03 00 00 40 BA FF 3C
+
+. 0 2547C8F4 76
+. 38 60 00 00 80 C1 00 00 80 E6 00 04 82 66 FF CC 82 86 FF D0 7C E8 03 A6 82 A6 FF D4 82 C6 FF D8 82 E6 FF DC 83 06 FF E0 83 26 FF E4 83 46 FF E8 83 66 FF EC 83 86 FF F0 83 A6 FF F4 83 C6 FF F8 83 E6 FF FC 7C C1 33 78 4E 80 00 20
+
+. 0 2547CCF0 20
+. A1 7D 00 06 7E D6 1B 78 55 60 04 7E 7F 00 C0 40 40 99 00 08
+
+. 0 2547CD04 20
+. 7C 18 03 78 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+
+. 0 2547CD18 12
+. 80 17 00 0C 2C 00 00 00 41 82 00 0C
+
+. 0 2547CD24 8
+. 7E F7 02 14 4B FF FF 04
+
+. 0 2547CD08 16
+. 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+
+. 0 2547CD2C 8
+. 2E 11 00 00 41 92 00 34
+
+. 0 2547CD64 8
+. 2F 98 00 00 40 9E 00 64
+
+. 0 2547CDCC 20
+. 3B B8 00 01 38 80 00 10 7F A3 EB 78 3A 00 00 0C 48 01 AC 51
+
+. 0 2547CDE0 20
+. 80 DE 03 78 2C 03 00 00 7C 65 1B 78 90 7B 01 88 41 82 01 AC
+
+. 0 2547CDF4 20
+. 83 9B 00 E4 93 BB 01 84 83 1C 00 04 93 1B 01 98 41 8E 00 88
+
+. 0 2547CE08 88
+. 83 52 00 04 82 5B 00 00 7C D2 D2 14 81 86 00 08 7C EC 32 14 A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+. 0 2547CE60 12
+. 80 06 00 0C 2C 80 00 00 41 86 00 24
+
+. 0 2547CE6C 8
+. 7C C6 02 14 4B FF FF A4
+
+. 0 2547CE14 76
+. 81 86 00 08 7C EC 32 14 A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+. 0 2547CE8C 4
+. 41 B2 FE E0
+
+. 0 2547CD6C 96
+. 7E C3 B3 78 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+. 0 2547D034 8
+. 2F 83 00 00 41 BE FF CC
+
+. 0 2547D004 16
+. 83 FF 00 0C 7F 9C EB 78 2C 9F 00 00 41 86 00 40
+
+. 0 2547CCD4 28
+. 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+
+. 0 2547CCB4 24
+. 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+
+. 0 2547CE1C 68
+. A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+. 0 2547CD34 28
+. 82 BB 00 00 82 F1 00 04 7D 35 BA 14 A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+
+. 0 2547CD54 16
+. 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+
+. 0 2547CD40 16
+. A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+
+. 0 2547CE90 32
+. 80 DB 00 00 38 E0 00 00 83 71 00 04 7C 66 DA 14 A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+
+. 0 2547CED4 16
+. 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+
+. 0 2547CEA0 16
+. A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+
+. 0 2547CEB0 52
+. A3 43 00 04 83 83 00 08 57 57 23 76 7F 17 2A 14 93 98 00 04 7E 88 18 2E 90 F8 00 0C 7E 74 CA 14 7E 77 29 2E 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+
+. 0 2547CEE4 8
+. 7E C3 B3 78 4B FF FE 88
+
+. 0 2547CD70 92
+. 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+. 0 2547CD50 20
+. 7C 18 03 78 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+
+. 0 2547D050 44
+. 80 81 00 24 7F 83 E3 78 83 41 00 08 83 61 00 0C 7C 88 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547180C 8
+. 2F 83 00 00 41 9E 00 14
+
+. 0 25471824 28
+. 80 61 00 24 83 A1 00 14 83 C1 00 18 7C 68 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547B70C 52
+. 93 5D 00 00 80 01 00 34 93 7C 00 00 82 E1 00 0C 7C 08 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 38 21 00 30 4E 80 00 20
+
+. 0 25472900 12
+. 83 6E 03 D8 2E 1B 00 00 40 92 0A EC
+
+. 0 2547290C 24
+. 81 6E 04 08 3A CB 00 3F 91 6E 04 14 56 D9 18 38 38 79 00 08 48 02 51 05
+
+. 0 25472924 24
+. 7F 25 CB 78 7C 6C 1B 78 38 80 00 00 38 63 00 08 91 8E 04 10 48 01 0E 0D
+
+. 0 2547293C 24
+. 80 CE 04 10 38 00 00 00 7E 89 A3 78 90 06 00 04 92 C6 00 00 41 8E 00 24
+
+. 0 25472954 16
+. 38 63 00 04 83 89 02 20 2F 1C 00 00 41 9A 00 08
+
+. 0 25472968 12
+. 81 29 00 0C 2C 09 00 00 40 82 FF E8
+
+. 0 25472958 12
+. 83 89 02 20 2F 1C 00 00 41 9A 00 08
+
+. 0 25472964 16
+. 95 23 00 08 81 29 00 0C 2C 09 00 00 40 82 FF E8
+
+. 0 25472974 4
+. 48 00 B3 59
+
+. 0 2547DCCC 12
+. 94 21 FF E0 7D 88 02 A6 48 01 93 2D
+
+. 0 2547DCD8 72
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 93 81 00 10 38 60 00 00 93 A1 00 14 7D 88 03 A6 83 7E 04 C8 3B 80 00 20 93 E1 00 1C 3B A0 00 00 81 3B 04 10 3B E0 00 00 39 80 00 01 38 A9 00 08 80 05 00 0C 2F 80 00 00 41 9E 01 04
+
+. 0 2547DD20 8
+. 38 C0 00 08 48 00 00 58
+
+. 0 2547DD7C 36
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 81 0A 02 28 7C 1C 58 40 38 8B FF FF 7C E8 00 D0 7C E8 20 38 40 80 00 08
+
+. 0 2547DDA4 16
+. 80 EA 02 20 7C 9F E8 50 7C 87 20 40 40 A5 FF 78
+
+. 0 2547DDB4 40
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 7C CB 1A 14 38 06 FF FF 7D 20 5B 96 7D 29 59 D6 7C E3 48 50 7C 07 40 40 40 80 00 08
+
+. 0 2547DDE0 20
+. 7D 68 48 50 7C E3 58 50 91 6A 02 2C 7C 87 20 40 40 85 00 0C
+
+. 0 2547DDFC 36
+. 80 6A 02 20 39 8C 00 01 55 86 18 38 7D 43 4A 14 7C 68 50 50 7D 06 2A 14 80 88 00 04 2F 04 00 00 40 9A FF 60
+
+. 0 2547DE20 48
+. 3B A3 06 9F 93 9B 04 20 57 A5 00 34 90 7B 04 1C 90 BB 04 18 83 81 00 10 83 61 00 0C 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25472978 4
+. 48 00 B5 7D
+
+. 0 2547DEF4 12
+. 94 21 FF E0 7C 68 02 A6 48 01 91 05
+
+. 0 2547DF00 64
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 90 61 00 24 93 A1 00 14 83 9E 04 C8 93 61 00 0C 80 7C 04 20 80 9C 04 18 38 A3 04 6F 7D 23 00 D0 7C A0 48 38 93 E1 00 1C 7F A4 02 14 7F A4 EB 78 48 01 9A E1
+
+. 0 2547DF40 24
+. 38 A0 04 70 7C 7F 1B 79 38 80 00 00 7F FB FB 78 7C 1F EA 14 41 82 00 4C
+
+. 0 2547DF58 16
+. 80 FC 04 18 7F A7 00 50 38 7D FB 90 48 00 57 E1
+
+. 0 254837BC 20
+. 90 88 FF FC 90 88 FF F8 90 88 FF F4 94 88 FF F0 40 9D 00 08
+
+. 0 25483894 20
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 40 2A 85 00 00 41 82 00 7C
+
+. 0 2547DF68 20
+. 80 DC 04 08 38 80 00 04 38 66 00 10 3B E6 00 0E 48 01 9A B5
+
+. 0 2547DF7C 12
+. 2C 83 00 00 38 03 00 04 41 86 00 44
+
+. 0 2547DF88 24
+. 93 E3 00 00 90 1D FF FC 2F 9D 00 00 7F BF EB 78 7F 63 DB 78 41 9E 00 34
+
+. 0 2547DFA0 40
+. 81 01 00 24 7F E3 FB 78 83 61 00 0C 83 81 00 10 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547297C 8
+. 7C 71 1B 79 41 82 0E 04
+
+. 0 25472984 28
+. 81 3F 00 30 39 54 01 58 83 11 FF FC 2F 89 00 00 91 5F 00 78 93 0E 04 24 40 9E 0E 38
+
+. 0 254729A0 16
+. 80 74 01 3C 2E 12 00 00 2F 83 00 00 41 9E 01 18
+
+. 0 25472AC4 8
+. 80 6E 01 B8 48 00 94 7D
+
+. 0 2547BF44 12
+. 94 21 FF F0 7D 88 02 A6 48 01 B0 B5
+
+. 0 2547BF50 36
+. 93 C1 00 08 7F C8 02 A6 7C 68 1B 78 7D 88 03 A6 81 5E 05 18 80 0A 00 08 7D 43 53 78 2F 80 00 00 40 9E 00 24
+
+. 0 2547BF74 44
+. 81 3E 04 C8 38 A0 00 01 80 9E 05 08 81 69 00 00 90 AA 00 00 91 0A 00 10 91 6A 00 04 90 8A 00 08 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 25472ACC 16
+. 81 34 00 74 7C 7A 1B 78 2F 09 00 00 41 9A 00 08
+
+. 0 25472ADC 16
+. 90 69 00 04 81 2E 02 2C 2F 89 00 00 41 9E 00 08
+
+. 0 25472AF0 24
+. 82 5F 00 78 C8 14 01 58 92 4E 00 08 D8 0F 00 14 92 4E 00 10 41 92 08 44
+
+. 0 25473348 40
+. 81 74 00 0C 7E 9D A3 78 82 0F 00 50 2C 0B 00 00 82 EF 00 28 31 50 FF FF 7F 6A 81 10 7E F3 DB 78 92 6F 00 28 41 82 00 18
+
+. 0 25473370 20
+. 7D 60 5B 78 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+
+. 0 25473374 16
+. 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+
+. 0 25473384 16
+. 82 DD 00 1C 81 36 00 04 2F 89 00 00 40 9E 04 20
+
+. 0 254737B0 20
+. 38 00 00 01 90 09 00 08 81 29 00 04 2C 89 00 00 41 86 FB D4
+
+. 0 25473394 12
+. 3B 8E 01 B8 7D 9C E8 00 41 8E 00 18
+
+. 0 254733B4 12
+. 83 BD 00 10 2E 1D 00 00 40 92 FF C8
+
+. 0 254733A0 20
+. 80 9D 01 C0 7F A3 EB 78 80 AF 00 28 7F 66 DB 78 48 00 62 11
+
+. 0 254795C0 20
+. 7C E8 02 A6 94 21 FF 50 7D 80 00 26 93 E1 00 AC 48 01 DA 31
+
+. 0 254795D4 108
+. 7C 3F 0B 78 90 E1 00 B4 80 03 01 80 92 21 00 74 3A 20 00 00 74 08 20 00 93 61 00 9C 93 81 00 A0 7C DB 33 78 93 C1 00 A8 7C BC 2B 78 92 01 00 70 7F C8 02 A6 92 41 00 78 92 61 00 7C 92 81 00 80 92 A1 00 84 92 C1 00 88 92 E1 00 8C 93 01 00 90 93 21 00 94 93 41 00 98 93 A1 00 A4 91 81 00 6C 90 9F 00 10 90 7F 00 0C 40 82 01 18
+
+. 0 25479640 8
+. 2D 86 00 00 41 8E 01 6C
+
+. 0 254797B0 20
+. 80 A3 00 80 30 85 FF FF 7C 84 21 10 7F 9C 20 38 4B FF FE 88
+
+. 0 25479648 16
+. 83 5E 04 F4 80 DA 00 00 70 C9 00 20 40 82 07 B4
+
+. 0 25479658 16
+. 80 7F 00 0C 81 43 00 78 2E 0A 00 00 40 92 06 C0
+
+. 0 25479668 32
+. 80 C3 00 34 7F 84 E3 78 7F 65 DB 78 3B 80 00 01 83 A6 00 04 3B 60 00 00 93 BF 00 08 48 00 71 AD
+
+. 0 25479688 44
+. 83 BF 00 0C 38 C0 00 00 93 9F 00 34 81 7D 00 3C 93 7F 00 30 2C 8B 00 00 93 7F 00 40 93 7F 00 28 93 7F 00 3C 90 DF 00 24 41 86 00 18
+
+. 0 254796B4 28
+. 81 0B 00 04 80 9D 00 40 91 1F 00 20 80 C4 00 04 90 DF 00 24 2E 03 00 00 41 92 00 10
+
+. 0 254796DC 48
+. 82 1E 04 C8 3A BF 00 08 3A 40 00 02 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+
+. 0 254797C4 24
+. 81 5D 00 C0 38 E0 00 00 80 7D 00 38 2F 8A 00 00 82 63 00 04 41 9E 00 08
+
+. 0 254797DC 28
+. 80 EA 00 04 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+
+. 0 254797F8 24
+. 7C E9 3B 78 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+
+. 0 25479810 8
+. 2F 19 00 00 40 9A 01 CC
+
+. 0 25479818 12
+. 80 DD 01 1C 2C 06 00 00 41 82 01 C0
+
+. 0 25479824 12
+. 81 3D 00 E4 2F 89 00 00 41 9E 02 80
+
+. 0 25479830 12
+. 7C 9C C0 40 82 89 00 04 40 A4 FE E8
+
+. 0 2547983C 68
+. 81 9C 00 04 81 56 01 88 55 97 C2 3E 55 9D 06 3E 56 EB 08 3C 2E 1D 00 16 7C AB A2 2E 56 E3 20 36 81 7C 00 00 7F 63 9A 14 54 BA 23 76 93 7F 00 50 7D 4A D2 14 7E EB CA 14 7F 66 DB 78 3B 40 00 00 41 92 01 DC
+
+. 0 25479880 8
+. 2F 1D 00 00 41 9A 01 48
+
+. 0 25479888 12
+. 88 9B 00 0C 54 88 E1 3F 41 82 06 2C
+
+. 0 25479894 12
+. 8B 5B 00 0C 57 49 E1 3F 41 82 01 B4
+
+. 0 254798A0 28
+. 80 9F 00 0C 2E 1D 00 15 2C 9D 00 0A 2F 1D 00 13 80 C4 02 08 7F 86 D8 00 41 9E 06 2C
+
+. 0 254798BC 28
+. 7C C0 00 26 54 C6 9F FE 39 00 00 00 7F 40 00 26 57 5A 3F FE 7C CB D3 79 40 82 01 30
+
+. 0 254798D8 36
+. 38 7D FF BC 6B A5 00 02 21 65 00 00 7C AB 29 14 21 23 00 0A 39 20 00 00 7D 29 49 14 7D 2B 2B 79 40 82 01 0C
+
+. 0 254798FC 40
+. 7C 00 00 26 54 00 DF FE 2C 8A 00 00 93 64 02 08 38 E0 00 00 54 0C 08 3C 39 60 00 01 7D 88 43 78 91 04 02 0C 41 86 00 18
+
+. 0 25479924 12
+. 83 4A 00 04 2E 1A 00 00 41 92 00 0C
+
+. 0 25479930 44
+. 7D 47 53 78 39 60 00 00 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+
+. 0 254785A4 16
+. 7C 08 02 A6 7D 80 00 26 94 21 FF 60 48 01 EA 51
+
+. 0 254785B4 124
+. 92 61 00 6C 7C 93 23 78 92 81 00 70 7D 14 43 78 92 A1 00 74 7C D5 33 78 92 C1 00 78 7C B6 2B 78 92 E1 00 7C 7D 37 4B 78 93 01 00 80 7C F8 3B 78 93 21 00 84 7C 79 1B 78 93 41 00 88 7D 5A 53 78 93 81 00 90 3B 80 00 00 93 C1 00 98 7F C8 02 A6 93 E1 00 9C 7C 3F 0B 78 92 21 00 64 92 41 00 68 93 61 00 8C 93 A1 00 94 90 01 00 A4 91 81 00 60 88 03 00 00 2F 80 00 00 41 9E 00 18
+
+. 0 25478630 20
+. 89 63 00 01 7C 1C 03 78 39 23 00 01 2C 0B 00 00 40 82 01 5C
+
+. 0 2547879C 24
+. 89 49 00 01 54 03 20 36 7F 83 5A 14 39 29 00 01 2C 8A 00 00 41 A6 FE 94
+
+. 0 254787B4 24
+. 89 69 00 01 57 84 20 36 7F 84 52 14 39 29 00 01 2E 0B 00 00 41 B2 FE 7C
+
+. 0 254787CC 24
+. 89 49 00 01 57 85 20 36 7F 85 5A 14 39 29 00 01 2F 0A 00 00 41 BA FE 64
+
+. 0 25478644 48
+. 82 5E 04 C8 2E 1A 00 00 3B 60 00 00 39 40 00 00 83 B2 01 A8 93 7F 00 48 3B 60 00 00 39 7D 00 01 91 5F 00 4C 91 72 01 A8 7E BD AB 78 40 92 05 38
+
+. 0 25478674 16
+. 81 75 00 00 2C 8B 00 00 7F 68 DB 78 41 86 00 4C
+
+. 0 25478684 44
+. 80 F5 00 00 3A 3F 00 18 80 B6 00 00 7F 23 CB 78 7F 84 E3 78 38 DF 00 48 7F 09 C3 78 7E EA BB 78 93 41 00 08 92 81 00 0C 4B FF FA FD
+
+. 0 254781A8 12
+. 7D 68 02 A6 94 21 FF 90 48 01 EE 51
+
+. 0 254781B4 140
+. 93 C1 00 68 80 01 00 78 7F C8 02 A6 91 61 00 74 7D 80 00 26 91 E1 00 2C 92 01 00 30 81 E7 00 04 82 07 00 00 91 C1 00 28 7C 8E 23 78 92 21 00 34 92 A1 00 44 7D 35 4B 78 93 41 00 58 31 20 FF FF 7E 29 01 10 7D 1A 43 78 92 41 00 38 92 61 00 3C 92 81 00 40 92 C1 00 48 92 E1 00 4C 93 01 00 50 93 21 00 54 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 91 81 00 24 90 61 00 08 90 A1 00 0C 90 C1 00 10 91 41 00 14 48 00 00 10
+
+. 0 2547824C 44
+. 57 45 10 3A 80 81 00 78 7D 25 80 2E 3A 80 00 00 3A 60 00 00 83 89 00 14 7F 83 22 78 21 63 00 00 7C 6B 19 14 7E 29 18 39 40 82 FF CC
+
+. 0 25478278 12
+. 80 C1 00 7C 54 CB FF FF 41 82 00 10
+
+. 0 25478290 16
+. 81 5E 04 F4 81 0A 00 00 71 09 00 08 40 82 02 48
+
+. 0 254782A0 56
+. 80 1C 01 6C 83 3C 01 70 7D 8E 03 96 81 7C 00 38 80 7C 00 34 83 0B 00 04 82 43 00 04 82 DC 01 98 7F EC 01 D6 7F BF 70 50 57 BB 10 3A 7F FB C8 2E 2E 1F 00 00 41 92 00 F8
+
+. 0 254782D8 8
+. 2D 96 00 00 48 00 00 28
+
+. 0 25478304 20
+. 57 E9 20 36 7F A9 C2 14 80 9D 00 04 2C 84 00 00 41 A6 FF CC
+
+. 0 25478318 12
+. A0 FD 00 0E 2C 07 00 00 40 82 00 10
+
+. 0 25478324 12
+. 81 01 00 7C 71 00 00 01 40 A2 FF C4
+
+. 0 25478330 16
+. 8B 7D 00 0C 57 60 07 3E 2F 80 00 02 40 9D 00 0C
+
+. 0 25478348 12
+. 81 41 00 0C 7F 1D 50 00 41 9A 00 1C
+
+. 0 25478354 16
+. 7E E9 C0 2E 80 81 00 08 7C 77 92 14 48 00 AB 81
+
+. 0 25478364 8
+. 2C 03 00 00 40 82 FF 88
+
+. 0 254782F0 20
+. 83 BC 01 74 57 E7 10 3A 7F E7 E8 2E 2E 1F 00 00 41 92 01 08
+
+. 0 25478408 8
+. 2D 94 00 01 40 8E FF C0
+
+. 0 254783CC 24
+. 7F 00 00 26 57 18 9F FE 31 75 FF FF 7E CB A9 10 7F 0B B0 39 41 A2 FE 60
+
+. 0 254783E4 12
+. 80 75 00 0C 2E 03 00 00 41 92 FE 54
+
+. 0 25478240 12
+. 3B 5A 00 01 7F 9A 78 40 40 9C 01 DC
+
+. 0 2547836C 8
+. 2F 95 00 00 41 9E 01 30
+
+. 0 25478374 4
+. 41 8E 00 44
+
+. 0 25478378 36
+. 57 F9 08 3C 81 5C 01 88 7E F9 B2 2E 81 35 00 04 56 EB 23 76 7D 8B 52 14 83 2C 00 04 7C 99 48 00 41 86 00 F0
+
+. 0 25478488 12
+. 7C 6B 50 2E 80 95 00 00 48 00 AA 51
+
+. 0 25478494 8
+. 2F 03 00 00 40 9A FF 04
+
+. 0 2547849C 4
+. 4B FF FF 1C
+
+. 0 254783B8 12
+. 57 60 E1 3E 2F 80 00 01 41 9E 01 7C
+
+. 0 2547853C 20
+. 81 C1 00 10 38 60 00 01 93 8E 00 04 93 AE 00 00 4B FF FE DC
+
+. 0 25478428 96
+. 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 254786B0 12
+. 2C 03 00 00 39 00 00 00 41 81 00 14
+
+. 0 254786CC 12
+. 81 5F 00 48 2F 8A 00 00 41 9E 05 04
+
+. 0 254786D8 12
+. 80 B6 00 00 2F 85 00 00 41 9E 01 40
+
+. 0 254786E4 24
+. 88 E5 00 0D 39 20 00 00 2E 09 00 00 54 E6 07 BE 2F 06 00 03 41 9A 02 34
+
+. 0 254786FC 28
+. 3B 7F 00 48 83 9B 00 04 3D 80 80 00 80 1C 01 80 54 1D 00 02 7F 1D 60 00 41 9A 05 BC
+
+. 0 25478718 32
+. 82 BB 00 04 83 BE 04 F4 83 55 01 80 67 57 00 10 92 F5 01 80 80 1D 00 00 70 09 04 04 40 82 02 A0
+
+. 0 25478738 100
+. 80 7B 00 04 80 1F 00 48 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+
+. 0 2547995C 36
+. 81 1F 00 50 80 9F 00 0C 7C 7A 1B 78 7D 06 43 78 90 64 02 10 91 04 02 14 2F 06 00 00 39 20 00 00 41 9A 00 10
+
+. 0 25479980 28
+. 80 FA 00 00 81 66 00 04 7D 27 5A 14 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+
+. 0 2547999C 4
+. 41 9D 00 D0
+
+. 0 254799A0 8
+. 2F 9D 00 01 41 9E 00 EC
+
+. 0 25479A90 8
+. 91 37 00 00 4B FF FF 38
+
+. 0 254799CC 12
+. 3B 9C 00 0C 7E 1C C0 40 41 90 FE 68
+
+. 0 254787E4 24
+. 89 69 00 01 57 86 20 36 7F 86 52 14 39 49 00 01 2F 8B 00 00 41 BE FE 4C
+
+. 0 254787FC 36
+. 57 92 20 36 7E 32 5A 14 8D 6A 00 01 56 28 00 06 2F 8B 00 00 7E 27 42 78 55 09 46 3E 7C FC 4A 78 4B FF FF DC
+
+. 0 254787F8 4
+. 41 BE FE 4C
+
+. 0 254782E0 16
+. 88 DD 00 0C 54 C5 07 3E 2F 05 00 06 41 9A 00 2C
+
+. 0 25479EE4 28
+. 7D 20 00 26 55 29 9F FE 39 60 00 00 7C A0 00 26 54 A5 3F FE 7D 28 2B 79 40 82 00 58
+
+. 0 25479F00 36
+. 38 1D FF BC 6B A3 00 02 21 03 00 00 7C 68 19 14 21 80 00 0A 39 80 00 00 7D 8C 61 14 7D 88 1B 79 40 82 00 34
+
+. 0 25479F24 12
+. 2F 1D 00 13 81 24 02 0C 41 9A 00 B4
+
+. 0 25479F30 8
+. 7F 8B 48 00 40 9E F9 88
+
+. 0 25479F38 28
+. 81 70 01 AC 80 C4 02 14 83 44 02 10 39 4B 00 01 91 50 01 AC 90 DF 00 50 4B FF FA 24
+
+. 0 25479974 12
+. 2F 06 00 00 39 20 00 00 41 9A 00 10
+
+. 0 254799A8 8
+. 2C 1D 00 14 41 82 00 E4
+
+. 0 25479EBC 12
+. A1 1B 00 0E 2F 88 00 00 41 BE F9 D0
+
+. 0 25479A50 8
+. 83 5F 00 0C 4B FF FF 20
+
+. 0 25479A98 8
+. 2F 1A 00 00 41 BA FF 30
+
+. 0 25479AA0 12
+. 83 7A 02 30 93 77 00 00 4B FF FF 24
+
+. 0 25479A6C 8
+. 2C 9D 00 49 41 86 02 38
+
+. 0 25479CA8 8
+. 2C 1A 00 00 41 A2 FD 20
+
+. 0 25479CB0 12
+. 81 7A 02 2C 2C 8B FF FF 41 86 03 4C
+
+. 0 25479CBC 24
+. 83 46 00 04 7D 8B D2 14 7C 6C 02 14 39 23 90 00 91 37 00 00 4B FF FC FC
+
+. 0 254783C4 8
+. 2C 00 00 02 41 82 01 64
+
+. 0 2547852C 16
+. 82 9E 04 F4 82 74 00 30 2C 93 00 00 40 86 00 18
+
+. 0 254783F0 8
+. 7F 84 E3 78 48 00 42 E1
+
+. 0 254783F8 8
+. 2F 03 00 00 41 9A FE 44
+
+. 0 25479938 36
+. 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+
+. 0 25478424 100
+. 38 60 00 00 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 254786BC 4
+. 41 80 01 6C
+
+. 0 254786C0 12
+. 84 FD 00 04 2F 07 00 00 40 9A FF C4
+
+. 0 25478BD8 12
+. 81 36 00 00 2C 09 00 00 40 82 01 B8
+
+. 0 25478D98 16
+. 8B A9 00 0C 57 BB E1 3E 2C 9B 00 02 40 86 FE 40
+
+. 0 25478DA8 12
+. 38 60 00 00 38 00 00 00 4B FF F9 90
+
+. 0 25478740 92
+. 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+
+. 0 2547998C 16
+. 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+
+. 0 254784A0 4
+. 41 AE FF 18
+
+. 0 254784A4 12
+. 80 61 00 14 54 69 FF FF 41 82 00 6C
+
+. 0 25478518 20
+. 57 E9 08 3C 7C C9 B2 2E 54 C5 04 7E 2F 85 00 02 4B FF FF 98
+
+. 0 254784C0 4
+. 40 BD FE F8
+
+. 0 25479A04 8
+. 39 00 00 01 4B FF FE F4
+
+. 0 254799B0 28
+. 7F 45 D3 78 7F 67 DB 78 7E E8 BB 78 7F AA EB 78 7E C3 B3 78 7F 84 E3 78 48 00 72 E1
+
+. 0 25480FF0 24
+. 3D 3A FE 00 3D 60 FC 00 38 69 00 03 61 60 00 02 7C 03 00 40 40 81 01 94
+
+. 0 25481198 28
+. 81 61 00 08 81 4B 00 2C 81 0A 00 04 7C E8 28 50 7C EA 16 70 28 8A 40 11 41 85 00 A0
+
+. 0 254811B4 56
+. 81 6B 00 28 3C 60 AA AA 60 60 AA AB 3B AA FF EE 83 EB 00 04 57 AB F8 7E 7F 7F 00 16 57 7C E8 FE 2B 1C 20 00 57 87 08 3C 39 27 00 12 55 2C 10 3A 7C CC 42 14 40 99 00 14
+
+. 0 254811FC 60
+. 1F EA FF FC 55 6C 10 3A 55 9B 04 3E 7F 4C 31 2E 3B BF FF FC 67 6A 39 60 57 BC 01 BA 91 45 00 00 67 87 48 00 94 E5 00 04 7C 00 28 6C 7C 00 04 AC 80 A1 00 0C 39 05 00 04 7C 00 47 AC
+
+. 0 25481238 8
+. 80 A1 00 0C 4B FF FB C4
+
+. 0 254799D8 8
+. 83 BF 00 0C 4B FF FD 44
+
+. 0 25479720 12
+. 36 52 FF FF 3A B5 00 0C 40 80 FF C0
+
+. 0 254796E8 36
+. 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+
+. 0 2547970C 8
+. 7F 8B C0 40 40 9C 00 10
+
+. 0 254797FC 20
+. 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+
+. 0 2547972C 4
+. 40 8E 07 10
+
+. 0 25479730 20
+. 82 7D 01 80 2F 91 00 00 66 72 20 00 92 5D 01 80 40 9E 05 C0
+
+. 0 25479744 16
+. 80 7F 00 0C 82 A3 02 38 2F 95 00 00 40 9E 02 BC
+
+. 0 25479A0C 4
+. 4B FF FB 1D
+
+. 0 25479528 12
+. 94 21 FF F0 7D 88 02 A6 48 01 DA D1
+
+. 0 25479534 80
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 91 81 00 14 7C 7F 1B 78 80 A3 00 00 81 3E 04 F4 81 03 02 34 80 E3 02 38 80 C9 00 04 7C 65 42 14 7C 83 3A 14 38 A0 00 01 7D 46 00 D0 7C 60 50 38 7C 8B 50 38 7F 80 58 00 7C 03 03 78 7C 80 58 50 41 9E 00 10
+
+. 0 25479584 4
+. 48 00 92 35
+
+. 0 25479588 8
+. 2C 03 00 00 41 80 00 1C
+
+. 0 25479590 24
+. 81 61 00 14 83 C1 00 08 83 E1 00 0C 7D 68 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 25479A10 4
+. 4B FF FD 44
+
+. 0 25479754 92
+. 80 81 00 00 83 24 00 04 81 84 FF BC 7F 28 03 A6 82 04 FF C0 82 24 FF C4 7D 81 81 20 82 44 FF C8 82 64 FF CC 82 84 FF D0 82 A4 FF D4 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+
+. 0 25480904 12
+. 2F 85 00 00 3B 9F 00 18 41 9E 02 64
+
+. 0 25480B70 8
+. 83 1E 04 CC 4B FF FD A0
+
+. 0 25480914 4
+. 40 9E 02 C0
+
+. 0 25480918 88
+. 3C 98 FE 00 3C 00 FC 00 38 64 00 03 60 06 00 02 39 80 00 00 7F 03 30 40 61 8A 80 00 7C BA 00 D0 7D 1A 50 50 54 A9 04 3E 55 07 84 3E 3D 60 55 6C 3C 80 7D 6C 64 E0 3D 6B 65 23 39 6B 61 66 08 3C 60 8C 5A 14 90 1C 00 00 90 7C 00 04 90 DC 00 08 91 9C 00 0C 40 99 02 0C
+
+. 0 25480B78 92
+. 3C 98 00 01 3C 7D 00 01 38 C4 80 00 39 83 80 00 54 C9 84 3E 57 A7 04 3E 57 08 04 3E 55 8B 84 3E 3D 40 7D 89 3C A0 4E 80 65 18 39 80 65 3D 3D 8C 61 44 03 A6 64 E3 39 80 65 66 3D 8C 60 A0 04 20 90 1C 00 24 93 1C 00 10 93 BC 00 14 90 9C 00 18 90 7C 00 1C 90 DC 00 20 4B FF FD D0
+
+. 0 254809A0 16
+. 39 00 00 00 38 80 00 12 7F 88 D8 40 40 9C 00 54
+
+. 0 254809B0 80
+. 38 E0 FF D4 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+
+. 0 254809B4 76
+. 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+
+. 0 25480A00 8
+. 7C 88 D8 40 40 84 00 74
+
+. 0 25480A98 12
+. 80 0B 00 00 2F 00 00 00 40 9A 00 C4
+
+. 0 25480B64 12
+. 7C 0A 16 70 7D 4A 01 94 4B FF FF 38
+
+. 0 25480AA8 12
+. 7E EB BB 78 39 20 00 00 48 00 00 14
+
+. 0 254796D0 12
+. 81 7D 00 70 2F 0B 00 00 40 9A 03 3C
+
+. 0 25479A14 60
+. 83 5D 00 7C 83 3D 00 28 82 9A 00 04 82 BF 00 20 92 9F 00 2C 82 59 00 04 7D 55 32 14 7D 35 A0 50 7E 14 92 14 91 3F 00 24 7E 70 50 50 92 5F 00 30 92 7F 00 3C 92 1F 00 38 4B FF FC 90
+
+. 0 25479714 12
+. 39 29 00 0C 7F 89 C0 40 41 9C FF F8
+
+. 0 254799E0 8
+. 7F 8B E0 40 40 BC FE 40
+
+. 0 254799E8 28
+. 81 2B 00 08 80 8B 00 00 39 6B 00 0C 7F 8B E0 40 7D 09 CA 14 7D 04 C9 2E 4B FF FF E4
+
+. 0 254799E4 4
+. 40 BC FE 40
+
+. 0 254797E0 24
+. 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+
+. 0 254733C0 4
+. 48 00 C2 25
+
+. 0 2547F5E4 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+
+. 0 254733C4 12
+. 80 CE 01 A4 2C 06 00 00 40 82 09 4C
+
+. 0 254733D0 12
+. 81 6E 03 30 2B 0B 00 01 40 99 F7 84
+
+. 0 254733DC 20
+. 80 94 01 C0 7F 83 E3 78 38 A0 00 00 38 C0 00 00 48 00 61 D5
+
+. 0 254733F0 4
+. 4B FF F7 6C
+
+. 0 25472B5C 8
+. 7E 23 8B 78 48 00 B4 79
+
+. 0 2547DFD8 20
+. 94 21 FF D0 7C 08 02 A6 93 21 00 14 7C 79 1B 79 48 01 90 19
+
+. 0 2547DFEC 48
+. 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 38 60 00 00 93 41 00 18 93 61 00 1C 93 81 00 20 93 A1 00 24 93 E1 00 2C 90 01 00 34 41 82 00 D8
+
+. 0 2547E01C 40
+. 83 1E 04 C8 3B 40 00 00 82 F9 FF FC 83 78 04 10 81 7B 00 00 20 1A 00 00 7F E0 D1 14 7D 3A FA 14 7F 8B F8 40 40 9D 00 64
+
+. 0 2547E044 28
+. 57 E3 18 38 7D 23 DA 14 3B 89 00 0C 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+
+. 0 2547E060 12
+. 81 3C 00 00 2F 09 00 00 41 9A 00 28
+
+. 0 2547E06C 20
+. 81 69 02 2C 2C 0B FF FF 7D 4B CA 14 7D 43 53 78 40 82 00 40
+
+. 0 2547E0BC 32
+. 81 69 02 30 55 68 10 3A 7D 48 B9 2E 80 A9 02 1C 80 E9 02 20 80 89 02 18 7F A5 38 50 48 00 59 49
+
+. 0 2547E0DC 12
+. 7F A5 EB 78 38 80 00 00 48 00 56 61
+
+. 0 25483920 12
+. 28 85 00 10 7C C6 2A 14 41 9F 00 20
+
+. 0 25483934 4
+. 40 84 00 2C
+
+. 0 25483938 4
+. 4C 9C 00 20
+
+. 0 2548393C 12
+. 90 86 FF FC 90 86 FF F8 4E 80 00 20
+
+. 0 2547E0E8 4
+. 4B FF FF A4
+
+. 0 2547E08C 20
+. 81 7B 00 00 3B FF 00 01 3B 9C 00 08 7F 8B F8 40 41 9D FF B4
+
+. 0 2547E050 16
+. 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+
+. 0 2547E0A4 16
+. 81 58 04 08 7D 3A 4B 78 7C 8A 48 40 40 85 00 3C
+
+. 0 2547E0EC 56
+. 7F 23 CB 78 81 81 00 34 82 E1 00 0C 83 01 00 10 7D 88 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25472B64 16
+. 38 51 70 00 3B 00 00 01 93 1A 00 0C 48 00 94 31
+
+. 0 2547BFA0 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+
+. 0 25472B74 4
+. 48 00 55 BD
+
+. 0 25478130 12
+. 94 21 FF F0 7C 68 02 A6 48 01 EE C9
+
+. 0 2547813C 40
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 61 00 14 38 00 FF FD 83 FE 02 54 80 7F 00 00 39 23 FF FF 7F 89 00 40 40 9D 00 1C
+
+. 0 2547817C 12
+. 80 BE 02 5C 80 85 00 00 48 00 A6 25
+
+. 0 254827A8 8
+. 38 00 00 5B 44 00 00 02
+
+. 0 254827B0 4
+. 4C A3 00 20
+
+. 0 25478188 32
+. 38 80 00 00 80 C1 00 14 90 9F 00 00 83 C1 00 08 7C C8 03 A6 83 E1 00 0C 38 21 00 10 4E 80 00 20
+
+. 0 25472B78 100
+. 81 01 00 00 82 28 00 04 81 88 FF B4 7E 28 03 A6 81 C8 FF B8 81 E8 FF BC 7D 81 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 2547F324 64
+. 82 A1 02 24 80 61 01 E0 7E A8 03 A6 82 C1 01 F8 82 A1 01 F4 82 E1 01 FC 83 01 02 00 83 21 02 04 83 41 02 08 83 61 02 0C 83 81 02 10 83 A1 02 14 83 C1 02 18 83 E1 02 1C 38 21 02 20 4E 80 00 20
+
+. 0 25471A20 24
+. 81 3E 04 F4 7C 7D 1B 78 38 60 00 00 80 09 00 00 70 09 00 80 40 82 00 28
+
+. 0 25471A38 36
+. 80 81 00 24 7F A3 EB 78 83 61 00 0C 83 81 00 10 7C 88 03 A6 83 A1 00 14 83 C1 00 18 38 21 00 20 4E 80 00 20
+
+. 0 25471DD0 52
+. 82 E1 02 B4 83 01 02 90 7E E8 03 A6 83 21 02 94 82 E1 02 8C 83 41 02 98 83 61 02 9C 83 81 02 A0 83 A1 02 A4 83 C1 02 A8 83 E1 02 AC 38 21 02 B0 4E 80 00 20
+
+. 0 254804E8 4
+. 48 01 6B 19
+
+. 0 254804EC 48
+. 7F E8 02 A6 7C 7E 1B 78 83 9F 04 98 83 BF 04 D0 83 7F 04 DC 80 7C 00 00 80 9D 00 00 80 BB 00 00 54 86 10 3A 7C C5 32 14 38 C6 00 04 4B FF B2 29
+
+. 0 2547B740 12
+. 94 21 FF C0 7C 08 02 A6 48 01 B8 B9
+
+. 0 2547B74C 96
+. 93 C1 00 38 7F C8 02 A6 92 C1 00 18 90 01 00 44 93 E1 00 3C 82 DE 04 C8 92 E1 00 1C 7C 97 23 78 83 F6 01 A0 93 01 00 20 7C B8 2B 78 2F 9F 00 00 93 21 00 24 93 41 00 28 7C D9 33 78 93 61 00 2C 7C 7A 1B 78 93 A1 00 34 92 81 00 10 92 A1 00 14 93 81 00 30 83 A3 00 A0 83 63 00 A4 40 9E 01 9C
+
+. 0 2547B944 12
+. 80 1F 01 80 74 09 10 00 40 82 01 10
+
+. 0 2547B950 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 00 F0
+
+. 0 2547B968 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 10
+
+. 0 2547B980 16
+. 81 3E 04 F4 80 C9 00 00 70 C9 00 02 40 82 01 30
+
+. 0 2547B990 8
+. 2F 0A 00 00 40 9A 00 D4
+
+. 0 2547BA68 32
+. 81 8A 00 04 7E E3 BB 78 81 1F 00 00 7F 04 C3 78 7F 25 CB 78 7C E8 62 14 7C E9 03 A6 4E 80 04 21
+
+. 0 FFDE898 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+
+. 0 FFDE8C0 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 08 39
+
+. 0 FFEF108 4
+. 4E 80 00 21
+
+. 0 FFDE8D4 16
+. 7F C8 02 A6 80 1E 00 0C 2F 80 00 00 41 9E 00 0C
+
+. 0 FFDE8EC 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FFDE8A8 4
+. 48 00 01 15
+
+. 0 FFDE9BC 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+
+. 0 FFDEA00 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 FFDE8AC 4
+. 48 00 06 59
+
+. 0 FFDEF04 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+
+. 0 FFDEF44 8
+. 2F 80 FF FF 40 9E FF F0
+
+. 0 FFDEF4C 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FFDE8B0 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BA88 4
+. 4B FF FF 10
+
+. 0 2547B998 12
+. 81 7F 00 84 2C 8B 00 00 41 86 00 BC
+
+. 0 2547BA5C 12
+. 3B 80 00 00 93 96 01 A0 4B FF FD 48
+
+. 0 2547B7AC 8
+. 2F 9D 00 00 40 9E 02 40
+
+. 0 2547B7B4 12
+. 38 60 00 00 3A 80 00 01 48 00 07 89
+
+. 0 2547BF94 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 2547B7C0 12
+. 92 83 00 0C 7C 76 1B 78 48 00 07 D9
+
+. 0 2547B7CC 16
+. 83 BA 01 5C 2F 9D 00 00 3B 9D FF FF 41 9E 00 58
+
+. 0 2547B7DC 24
+. 82 BA 01 E8 57 80 10 3A 7F F5 00 2E 80 1F 01 80 74 09 10 00 40 82 00 34
+
+. 0 2547B7F4 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 01 04
+
+. 0 2547B80C 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 64
+
+. 0 2547B818 12
+. 80 BF 00 84 2F 85 00 00 40 9E 00 58
+
+. 0 2547B824 12
+. 2F 9C 00 00 3B 9C FF FF 40 9E FF B0
+
+. 0 2547B878 16
+. 80 DE 04 F4 83 66 00 00 73 69 00 02 40 82 00 9C
+
+. 0 2547B888 8
+. 2F 0A 00 00 40 9A 00 5C
+
+. 0 2547B8E8 32
+. 81 0A 00 04 7E E3 BB 78 80 FF 00 00 7F 25 CB 78 7C 87 42 14 7C 89 03 A6 7F 04 C3 78 4E 80 04 21
+
+. 0 FE9B620 12
+. 94 21 FF E0 7C 08 02 A6 48 12 C8 29
+
+. 0 FFC7E50 4
+. 4E 80 00 21
+
+. 0 FE9B62C 60
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 7C BC 2B 78 93 E1 00 1C 7C 9D 23 78 81 7E 1C E4 7C 7F 1B 78 81 1E 1A A4 39 20 00 00 2F 8B 00 00 90 01 00 24 7D 0A 43 78 41 9E 00 10
+
+. 0 FE9B674 16
+. 91 2A 00 00 80 88 00 00 2C 84 00 00 40 86 00 24
+
+. 0 FE9B684 28
+. 81 3E 1B F4 81 7E 1B C8 81 49 00 00 80 AB 00 34 7D 43 53 78 7F 05 50 00 41 9A 00 08
+
+. 0 FE9B6A4 40
+. 81 9E 1A 8C 7F E3 FB 78 81 1E 1D D4 7F A4 EB 78 80 FE 1B 84 7F 85 E3 78 93 EC 00 00 93 A8 00 00 93 87 00 00 48 0B 64 21
+
+. 0 FF51AE8 20
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 7C 9D 23 79 48 07 63 59
+
+. 0 FF51AFC 20
+. 93 C1 00 18 93 E1 00 1C 7F C8 02 A6 90 01 00 24 41 82 00 3C
+
+. 0 FF51B10 20
+. 83 FD 00 00 38 80 00 2F 2F 9F 00 00 7F E3 FB 78 41 9E 00 28
+
+. 0 FF51B24 4
+. 4B FA 54 59
+
+. 0 FEF6F7C 40
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 54 9D 06 3E 2F 9D 00 00 93 C1 00 18 90 01 00 24 7C 60 1B 78 93 E1 00 1C 40 9E 00 24
+
+. 0 FEF6FC4 8
+. 3B E0 00 00 48 00 00 0C
+
+. 0 FEF6FD4 12
+. 7C 03 03 78 7F A4 EB 78 4B FF F2 F1
+
+. 0 FEF62CC 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+
+. 0 FEF6330 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 FEF6314 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+
+. 0 FEF6380 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+
+. 0 FEF6FE0 8
+. 2C 03 00 00 40 82 FF E8
+
+. 0 FEF6FCC 20
+. 7C 7F 1B 78 38 03 00 01 7C 03 03 78 7F A4 EB 78 4B FF F2 F1
+
+. 0 FEF6328 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 FEF633C 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+
+. 0 FEF6FE8 32
+. 80 81 00 24 7F E3 FB 78 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FF51B28 12
+. 2C 83 00 00 38 63 00 01 41 86 00 34
+
+. 0 FF51B34 48
+. 80 9E 1C 34 90 64 00 00 80 BD 00 00 80 7E 1E 24 90 A3 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B6CC 4
+. 4B FF FE 95
+
+. 0 FE9B560 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 E9
+
+. 0 FE9B56C 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 18 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+
+. 0 FE9B590 8
+. 7C 09 03 A6 4E 80 04 21
+
+. 0 FEED850 12
+. 7D 88 02 A6 94 21 FF F0 48 0D A5 F9
+
+. 0 FEED85C 40
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 1E 1D C8 81 1E 1B 00 2F 80 00 00 81 5E 1B CC 80 FE 1A E8 38 00 FF B4 41 9E 00 10
+
+. 0 FEED884 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FE9B598 12
+. 84 1F 00 04 2F 80 00 00 40 9E FF F0
+
+. 0 FE9B5A4 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FE9B6D0 32
+. 80 C1 00 24 83 81 00 10 83 A1 00 14 7C C8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547B908 4
+. 4B FF FF 88
+
+. 0 2547B890 12
+. 81 7F 00 84 2C 8B 00 00 41 A6 FF 8C
+
+. 0 FDF9B90 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+
+. 0 FDF9BB8 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 07 94 E9
+
+. 0 FE730B0 4
+. 4E 80 00 21
+
+. 0 FDF9BCC 16
+. 7F C8 02 A6 80 1E 22 4C 2F 80 00 00 41 9E 00 0C
+
+. 0 FDF9BE4 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FDF9BA0 4
+. 48 00 01 15
+
+. 0 FDF9CB4 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+
+. 0 FDF9CF8 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 FDF9BA4 4
+. 48 03 0C 19
+
+. 0 FE2A7BC 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+
+. 0 FE2A7FC 8
+. 2F 80 FF FF 40 9E FF F0
+
+. 0 FE2A804 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FDF9BA8 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547B90C 8
+. 75 20 C0 00 40 A2 FE FC
+
+. 0 2547B914 12
+. 2F 9C 00 00 3B 9C FF FF 4B FF FF 10
+
+. 0 2547B82C 4
+. 40 9E FF B0
+
+. 0 2547B830 72
+. 82 E1 00 44 3B 00 00 00 83 C1 00 38 7E E8 03 A6 83 E1 00 3C 93 16 00 0C 82 81 00 10 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 38 21 00 40 48 00 07 2C
+
+. 0 2548051C 32
+. 80 7D 00 00 80 9B 00 00 54 65 10 3A 7C C4 2A 14 38 A6 00 04 84 06 00 04 2C 00 00 00 40 82 FF F8
+
+. 0 25480530 12
+. 84 06 00 04 2C 00 00 00 40 82 FF F8
+
+. 0 2548053C 40
+. 38 C6 00 04 80 FF 04 AC 7F C9 03 A6 3B E0 00 00 93 E1 00 00 7F E8 03 A6 93 E1 00 04 93 E1 00 08 93 E1 00 0C 4E 80 04 20
+
+. 0 100006C8 36
+. 7C 29 0B 78 54 21 00 36 38 00 00 00 94 21 FF F0 7C 08 03 A6 90 01 00 00 3D 00 10 01 85 A8 68 40 48 02 88 7C
+
+. 0 10028F64 8
+. 39 60 00 38 4B FF FF 64
+
+. 0 10028ECC 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 7A 88 3D 8C 25 49 4E 80 04 20
+
+. 0 2547AF5C 68
+. 94 21 FF C0 90 01 00 0C 90 61 00 10 90 81 00 14 7D 83 63 78 90 A1 00 18 7D 64 5B 78 90 C1 00 1C 7C 08 02 A6 90 E1 00 20 90 01 00 30 91 01 00 24 7C 00 00 26 91 21 00 28 91 41 00 2C 90 01 00 08 48 00 00 C9
+
+. 0 2547B064 16
+. 7C E8 02 A6 94 21 FF E0 93 81 00 10 48 01 BF 91
+
+. 0 2547B074 96
+. 93 A1 00 14 90 E1 00 24 80 C3 00 7C 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 7C 7F 1B 78 81 46 00 04 80 A3 00 38 7F AA 22 14 7D 6A 20 2E 80 7D 00 04 81 85 00 04 54 66 C2 3E 80 7F 00 00 54 C5 20 36 7D 05 62 14 7F 83 5A 14 88 08 00 0D 70 09 00 03 81 3F 00 34 80 89 00 04 91 01 00 08 40 82 00 E0
+
+. 0 2547B0D4 16
+. 81 5F 00 E4 38 E0 00 00 2F 8A 00 00 40 9E 00 9C
+
+. 0 2547B17C 52
+. 81 6A 00 04 54 C9 08 3C 80 DF 01 88 7D 09 5A 2E 55 07 23 76 7D 66 3A 14 80 6B 00 04 7C 6A FE 70 7D 40 1A 78 7D 20 50 50 7D 28 FE 70 7D 67 40 38 4B FF FF 38
+
+. 0 2547B0E4 36
+. 7C 05 60 2E 39 40 00 00 80 DF 01 C0 38 A1 00 08 7C 60 22 14 39 00 00 01 7F E4 FB 78 39 20 00 01 4B FF D4 A1
+
+. 0 2547B108 16
+. 39 40 00 00 81 61 00 08 2C 8B 00 00 41 86 00 18
+
+. 0 2547B118 8
+. 2F 03 00 00 41 9A 00 A0
+
+. 0 2547B120 40
+. 81 83 00 00 80 8B 00 04 7D 4C 22 14 80 FE 04 F4 80 DD 00 08 80 67 00 2C 7D 66 52 14 2C 03 00 00 7D 63 5B 78 40 82 00 18
+
+. 0 2547B148 20
+. 7F E3 FB 78 7F A4 EB 78 7F 85 E3 78 7D 66 5B 78 48 00 54 0D
+
+. 0 25480564 52
+. 7D 45 30 50 3D 06 FE 00 55 4C 30 32 3C E0 FC 00 7D 84 36 70 39 28 00 03 7F 84 50 00 60 E0 00 02 54 8B 01 BA 94 21 FF F0 7F 09 00 40 65 6B 48 00 41 9E 00 EC
+
+. 0 25480680 16
+. 91 65 00 00 7C 00 28 6C 7C 00 04 AC 7C 00 2F AC
+
+. 0 25480690 20
+. 7C 00 04 AC 4C 00 01 2C 7C C3 33 78 38 21 00 10 4E 80 00 20
+
+. 0 2547B15C 32
+. 83 81 00 24 83 A1 00 14 7F 88 03 A6 83 C1 00 18 83 81 00 10 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547AFA0 64
+. 7C 69 03 A6 80 01 00 30 81 41 00 2C 81 21 00 28 7C 08 03 A6 81 01 00 24 80 01 00 08 80 E1 00 20 80 C1 00 1C 7C 0F F1 20 80 A1 00 18 80 81 00 14 80 61 00 10 80 01 00 0C 38 21 00 40 4E 80 04 20
+
+. 0 FE9B8C4 12
+. 7C 08 02 A6 94 21 FF F0 48 12 C5 85
+
+. 0 FE9B8D0 44
+. 93 C1 00 08 7D 2A 4B 78 7F C8 02 A6 90 01 00 14 7C 6C 1B 78 80 09 00 00 7C 85 23 78 7C E9 3B 78 7D 07 43 78 2F 80 00 00 41 9E 00 34
+
+. 0 FE9B92C 16
+. 80 06 00 00 7C CB 33 78 2C 80 00 00 41 86 00 3C
+
+. 0 FE9B93C 8
+. 81 1E 1C B0 48 00 00 10
+
+. 0 FE9B950 12
+. 2F 80 00 13 2F 08 00 00 40 9E FF EC
+
+. 0 FE9B944 12
+. 84 0B 00 08 2F 00 00 00 41 9A 00 28
+
+. 0 FE9B95C 4
+. 41 BA FF E8
+
+. 0 FE9B960 20
+. 80 8B 00 04 90 88 00 00 84 0B 00 08 2F 00 00 00 40 9A FF E0
+
+. 0 FE9B974 20
+. 81 07 00 0C 7D 84 63 78 80 67 00 04 80 E7 00 08 4B FF FD 91
+
+. 0 FE9B714 12
+. 94 21 FD E0 7D 48 02 A6 48 12 C7 35
+
+. 0 FE9B720 84
+. 93 C1 02 18 7F C8 02 A6 93 A1 02 14 54 80 10 3A 93 81 02 10 7D 60 2A 14 93 E1 02 1C 83 BE 1C E4 7C FC 3B 78 91 41 02 24 7D 1F 43 78 2F 9D 00 00 90 61 01 F0 93 61 02 0C 7D 23 4B 78 90 81 01 F4 39 6B 00 04 90 A1 01 F8 39 40 00 00 90 C1 01 FC 41 9E 00 10
+
+. 0 FE9B780 24
+. 2C 83 00 00 83 7E 1B 84 81 3E 1A A4 91 7B 00 00 91 49 00 00 41 86 00 10
+
+. 0 FE9B798 12
+. 38 80 00 00 38 A0 00 00 48 01 87 31
+
+. 0 FEB3ED0 44
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C BF 2B 78 93 81 00 10 7C 9C 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 93 C1 00 18 4B FF FE 35
+
+. 0 FEB3D2C 12
+. 94 21 FF E0 7C 68 02 A6 48 11 41 1D
+
+. 0 FEB3D38 52
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 38 00 00 01 93 61 00 0C 3B A0 00 00 93 E1 00 1C 90 61 00 24 83 9E 02 B4 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+
+. 0 FEB3D6C 8
+. 7C 00 E1 2D 40 A2 FF F0
+
+. 0 FEB3D60 12
+. 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+
+. 0 FEB3D74 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 34
+
+. 0 FEB3D80 16
+. 83 7E 1A FC 83 FB 00 00 2F 1F 00 00 41 9A 00 50
+
+. 0 FEB3D90 16
+. 81 7F 00 04 3B A0 00 00 28 0B 00 00 40 81 00 24
+
+. 0 FEB3DC0 8
+. 7C 0B E8 40 41 81 01 04
+
+. 0 FEB3DC8 8
+. 28 8B 00 1F 40 85 00 F0
+
+. 0 FEB3EBC 16
+. 39 2B 00 01 7D 7D 5B 78 91 3F 00 04 40 BA FF 40
+
+. 0 FEB3E08 36
+. 57 AA 20 36 39 00 00 01 7C EA FA 14 91 07 00 08 39 80 00 00 7C 00 04 AC 7D 60 E0 28 7D 80 E1 2D 40 A2 FF F8
+
+. 0 FEB3E2C 8
+. 2F 8B 00 01 41 9D 00 60
+
+. 0 FEB3E34 4
+. 41 9A 00 34
+
+. 0 FEB3E38 48
+. 57 BC 20 36 83 A1 00 24 7F 7C FA 14 83 C1 00 18 38 7B 00 08 7F A8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEB3EFC 12
+. 2C 03 00 00 38 00 FF FF 41 82 00 1C
+
+. 0 FEB3F08 60
+. 38 80 00 04 93 E3 00 0C 90 83 00 00 38 00 00 00 93 A3 00 04 93 83 00 08 80 A1 00 24 7C 03 03 78 83 81 00 10 83 A1 00 14 7C A8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B7A4 8
+. 2F 1F 00 00 41 9A 00 14
+
+. 0 FE9B7AC 16
+. 7F E3 FB 78 38 80 00 00 38 A0 00 00 48 01 87 19
+
+. 0 FEB3DA0 20
+. 39 3F 00 08 80 89 00 00 39 29 00 10 2C 84 00 00 41 86 00 10
+
+. 0 FEB3DB4 12
+. 3B BD 00 01 7F 8B E8 40 41 9D FF E8
+
+. 0 FE9B7BC 20
+. 83 FE 1B C8 80 7F 00 00 70 69 00 02 7F A0 00 26 40 82 00 80
+
+. 0 FE9B7D0 8
+. 2F 9C 00 00 41 9E 00 1C
+
+. 0 FE9B7D8 24
+. 80 BB 00 00 7F 88 03 A6 80 61 01 F4 80 81 01 F8 80 C1 01 FC 4E 80 00 21
+
+. 0 100164BC 48
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 93 A1 00 14 80 9E FF F0 90 A1 00 24 7F C4 F2 14 4B FE A1 B9
+
+. 0 100006A0 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 41
+
+. 0 100006EC 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 02 87 85
+
+. 0 10028E80 4
+. 4E 80 00 21
+
+. 0 10000700 16
+. 7F C8 02 A6 80 1E 00 0C 2F 80 00 00 41 9E 00 0C
+
+. 0 10000718 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 100006B0 4
+. 48 00 01 05
+
+. 0 100007B4 40
+. 7C 08 02 A6 3D 60 10 03 94 21 FF F0 3D 20 00 00 39 29 00 00 38 6B 89 3C 90 01 00 14 80 0B 89 3C 2F 80 00 00 41 9E 00 14
+
+. 0 100007EC 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 100006B4 4
+. 48 01 61 01
+
+. 0 100167B4 36
+. 7C 08 02 A6 94 21 FF F0 3D 20 10 03 93 E1 00 0C 39 29 89 30 90 01 00 14 3B E9 FF FC 80 09 FF FC 48 00 00 10
+
+. 0 100167E4 8
+. 2F 80 FF FF 40 9E FF F0
+
+. 0 100167EC 20
+. 80 01 00 14 83 E1 00 0C 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 100006B8 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 100164EC 28
+. 83 9E 80 04 80 7E 80 00 3B E0 00 00 7C 1C 18 50 7C 00 16 70 7F 9F 00 40 40 9C 00 24
+
+. 0 10016528 32
+. 81 01 00 24 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B7F0 8
+. 7F A8 01 20 40 82 00 B0
+
+. 0 FE9B7F8 8
+. 38 61 00 10 48 01 4F 9D
+
+. 0 FEB0798 8
+. 38 80 00 00 4B FF FC 94
+
+. 0 FEB0430 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 11 79 79
+
+. 0 FEB04DC 24
+. 7C A8 02 A6 80 A5 1B C8 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+
+. 0 FEB0648 4
+. 48 00 00 AC
+
+. 0 FEB06F4 52
+. 2F 84 00 00 7C 08 02 A6 94 21 FF E0 38 A3 01 C4 93 A1 00 14 38 80 00 00 93 E1 00 1C 3B A0 00 00 7C 7F 1B 78 93 C1 00 18 90 01 00 24 38 60 00 00 40 9E 00 28
+
+. 0 FEB0728 36
+. 80 81 00 24 38 60 00 00 93 BF 01 C0 83 A1 00 14 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B800 8
+. 2C 83 00 00 40 86 00 68
+
+. 0 FE9B808 56
+. 83 A2 8B F4 3B 61 00 10 81 9E 1B 84 93 A1 01 E0 83 82 8B F0 81 01 01 F0 93 81 01 E4 80 61 01 F4 7D 08 03 A6 93 62 8B F4 80 81 01 F8 80 AC 00 00 80 C1 01 FC 4E 80 00 21
+
+. 0 10004654 48
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 81 00 10 3B 80 00 01 93 C1 00 18 3B C0 00 63 90 01 00 24 48 01 03 19
+
+. 0 10014998 52
+. 94 21 FF D0 7C 08 02 A6 42 9F 00 05 93 C1 00 28 7F C8 02 A6 93 A1 00 24 90 01 00 34 3B A0 00 00 93 E1 00 2C 80 1E FF F0 7F C0 F2 14 80 7E 80 00 48 01 45 4D
+
+. 0 10028F14 8
+. 39 60 00 10 4B FF FF B4
+
+. 0 FEB3198 12
+. 94 21 FF E0 7C 08 02 A6 48 11 4C B1
+
+. 0 FEB31A4 40
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 41 00 08 7C 7F 1B 78 93 61 00 0C 93 81 00 10 93 A1 00 14 90 01 00 24 48 04 39 11
+
+. 0 FEF6AD8 56
+. 54 64 00 3A 3C E0 7F 7F 54 65 1E F8 81 04 00 00 39 20 FF FF 38 E7 7F 7F 7D 29 2C 30 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 40 00 F8 7C 08 48 39 7C 60 11 20 40 82 00 70
+
+. 0 FEF6B10 12
+. 3C C0 FE FF 38 C6 FE FF 41 9D 00 1C
+
+. 0 FEF6B1C 24
+. 85 04 00 04 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 48 00 F9 40 82 00 4C
+
+. 0 FEF6B34 32
+. 81 04 00 04 85 24 00 08 7C 06 42 14 7C EA 40 F8 7C 00 50 39 7D 66 4A 14 7C EC 48 F8 40 82 00 1C
+
+. 0 FEF6B54 8
+. 7D 60 60 39 41 82 FF DC
+
+. 0 FEF6B5C 16
+. 7C E0 48 38 7C 00 3A 14 7D 88 00 78 48 00 00 14
+
+. 0 FEF6B7C 20
+. 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+. 0 FEB31CC 16
+. 81 3E 1B 84 81 29 00 00 2F 89 00 00 41 9E 00 10
+
+. 0 FEB31DC 12
+. 89 7F 00 00 2C 0B 00 00 40 82 00 30
+
+. 0 FEB3214 12
+. 88 1F 00 01 2C 80 00 00 40 86 00 48
+
+. 0 FEB3264 36
+. 81 49 00 00 54 08 40 2E 55 6C 06 3E 7D 3D 4B 78 2C 8A 00 00 7D 9A 43 78 3B 83 FF FE 3B 7F 00 02 41 A6 FF 64
+
+. 0 FEB3288 8
+. 7D 5F 53 78 48 00 00 10
+
+. 0 FEB329C 36
+. 88 1F 00 01 38 7F 00 02 89 3F 00 00 7F 64 DB 78 54 0B 40 2E 7F 85 E3 78 7D 6A 4B 78 7F 1A 50 00 40 9A FF D4
+
+. 0 FEB3290 12
+. 87 FD 00 04 2C 9F 00 00 41 A6 FF 50
+
+. 0 FEB31E8 44
+. 38 60 00 00 83 41 00 24 83 61 00 0C 7F 48 03 A6 83 81 00 10 83 41 00 08 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 100149CC 28
+. 38 00 00 00 7C 69 1B 79 39 60 00 00 90 01 00 0C 91 61 00 10 90 01 00 08 41 82 00 10
+
+. 0 100149F4 28
+. 80 01 00 34 83 A1 00 24 83 C1 00 28 7C 08 03 A6 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 10004684 8
+. 2F 9D 00 02 41 9E 00 D8
+
+. 0 1000468C 12
+. 3B E0 00 01 7F 9F F0 40 41 9D 00 58
+
+. 0 10004698 8
+. 7F 9F E0 40 41 9C 00 44
+
+. 0 100046A0 16
+. 3B A0 00 01 7F A3 EB 78 7F E4 FB 78 4B FF CC 4D
+
+. 0 100012F8 60
+. 94 21 FF D0 7C 08 02 A6 93 81 00 20 7F 84 19 D6 93 A1 00 24 93 61 00 1C 7C 7B 1B 78 90 01 00 34 57 9D 20 36 93 41 00 18 7F A3 EB 78 93 C1 00 28 93 E1 00 2C 7C 9E 23 78 48 02 7C 05
+
+. 0 10028F34 8
+. 39 60 00 20 4B FF FF 94
+
+. 0 FEF18E0 12
+. 94 21 FF E0 7C 08 02 A6 48 0D 65 69
+
+. 0 FEF18EC 52
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 90 01 00 24 7C 7D 1B 78 93 41 00 08 81 3E 1A 7C 93 61 00 0C 80 09 00 00 93 81 00 10 2F 80 00 00 93 E1 00 1C 41 9E 00 3C
+
+. 0 FEF1920 16
+. 80 A1 00 00 7C 09 03 A6 80 85 00 04 4E 80 04 21
+
+. 0 FEF2A68 12
+. 94 21 FF E0 7C 88 02 A6 48 0D 53 E1
+
+. 0 FEF2A74 40
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 78 38 60 00 00 90 81 00 24 81 3E 1A 7C 93 E1 00 1C 90 69 00 00 4B FF FB DD
+
+. 0 FEF2674 12
+. 94 21 FF B0 7C 68 02 A6 48 0D 57 D5
+
+. 0 FEF2680 52
+. 93 C1 00 48 7F C8 02 A6 93 61 00 3C 90 61 00 54 93 E1 00 4C 3B E0 00 00 83 7E 1C 84 93 41 00 38 80 1B 00 00 93 81 00 40 2F 80 00 00 93 A1 00 44 41 9C 00 2C
+
+. 0 FEF26DC 36
+. 83 BE 06 28 3C A0 00 02 3C C0 00 01 93 FB 00 00 90 DD 00 10 90 BD 00 00 90 BD 00 04 90 BD 00 08 48 05 72 01
+
+. 0 FF498FC 12
+. 7D 88 02 A6 94 21 FF F0 48 07 E5 4D
+
+. 0 FF49908 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 3E 1B C8 80 09 00 04 2F 80 00 00 41 9E 00 14
+
+. 0 FF49924 16
+. 7C 03 03 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FEF2700 32
+. 81 3E 1B F8 83 9E 05 FC 80 89 00 00 90 7D 00 18 2C 04 00 00 93 FC 00 00 93 9C 04 48 41 82 01 00
+
+. 0 FEF281C 20
+. 80 7E 06 48 38 81 00 10 38 A1 00 20 38 C0 00 00 48 09 A5 E1
+
+. 0 FF8CE0C 12
+. 94 21 FF D0 7C E8 02 A6 48 03 B0 3D
+
+. 0 FF8CE18 72
+. 93 C1 00 28 7F C8 02 A6 93 61 00 1C 90 E1 00 34 93 21 00 14 7C D9 33 78 83 7E 1B 98 93 41 00 18 7C BA 2B 78 80 1B 03 F8 93 81 00 20 7C 9C 23 78 93 E1 00 2C 7C 09 03 A6 7C 7F 1B 78 93 A1 00 24 38 7B 01 80 4E 80 04 21
+
+. 0 25471884 24
+. 80 83 00 04 94 21 FF F0 39 24 00 01 38 21 00 10 91 23 00 04 4E 80 00 20
+
+. 0 FF8CE60 28
+. 38 A0 00 00 7F 67 DB 78 38 DB 01 68 81 07 00 00 38 E7 00 18 2F 88 00 00 41 9E 00 6C
+
+. 0 FF8CE7C 12
+. 80 68 01 A0 7C 03 F8 40 41 81 00 54
+
+. 0 FF8CED8 8
+. 81 08 00 0C 4B FF FF 98
+
+. 0 FF8CE74 8
+. 2F 88 00 00 41 9E 00 6C
+
+. 0 FF8CE88 12
+. 80 88 01 A4 7C 84 F8 40 40 85 00 48
+
+. 0 FF8CE94 12
+. A1 68 01 54 2F 0B 00 00 41 9A 00 44
+
+. 0 FF8CEA0 24
+. 81 48 01 4C 39 6B FF FF 55 69 28 34 7D 89 50 2E 2F 8C 00 01 40 9E FF F0
+
+. 0 FF8CEA4 20
+. 39 6B FF FF 55 69 28 34 7D 89 50 2E 2F 8C 00 01 40 9E FF F0
+
+. 0 FF8CEB8 32
+. 7C 69 52 14 80 08 00 00 81 63 00 08 81 43 00 14 7F A0 5A 14 7D 3D 52 14 7C 09 F8 40 41 81 00 0C
+
+. 0 FF8CEE0 12
+. 7D 05 43 78 7C 87 30 00 40 85 FF 84
+
+. 0 FF8CE6C 16
+. 81 07 00 00 38 E7 00 18 2F 88 00 00 41 9E 00 6C
+
+. 0 FF8CEE4 8
+. 7C 87 30 00 40 85 FF 84
+
+. 0 FF8CEEC 12
+. 2F 05 00 00 3B A0 00 00 41 9A 01 2C
+
+. 0 FF8CEF8 32
+. 80 85 00 04 80 E5 01 A0 90 9C 00 00 81 05 00 04 90 FC 00 04 88 C8 00 00 2F 86 00 00 41 9E 01 64
+
+. 0 FF8CF18 40
+. 81 05 00 30 80 E5 00 34 2C 88 00 00 81 65 00 38 80 67 00 04 80 C5 00 48 81 4B 00 04 7C 67 1B 78 83 A6 00 04 41 86 00 14
+
+. 0 FF8CF40 32
+. 80 88 00 04 81 04 00 04 55 00 20 36 7C E0 52 14 7F 8A 38 40 38 80 00 00 2C 04 00 00 40 9C 00 84
+
+. 0 FF8CF60 20
+. 80 C5 00 00 81 0A 00 04 7D 66 42 14 7C 8B F8 40 41 85 00 64
+
+. 0 FF8CF74 16
+. 89 2A 00 0C 55 2C 07 3E 2F 8C 00 06 41 9E 00 54
+
+. 0 FF8CF84 12
+. 80 0A 00 08 2C 80 00 00 40 86 00 0C
+
+. 0 FF8CF90 8
+. 7F 8B F8 00 41 9E 00 10
+
+. 0 FF8CF98 12
+. 7C 0B 02 14 7C 80 F8 40 40 85 00 34
+
+. 0 FF8CFD4 12
+. 39 4A 00 10 7C 8A 38 40 41 84 FF 88
+
+. 0 FF8CF64 16
+. 81 0A 00 04 7D 66 42 14 7C 8B F8 40 41 85 00 64
+
+. 0 FF8CFE0 8
+. 2F 9A 00 00 41 9E 00 08
+
+. 0 FF8CFE8 12
+. 90 BA 00 00 2C 99 00 00 41 86 00 08
+
+. 0 FF8CFF8 4
+. 41 82 00 68
+
+. 0 FF8D060 12
+. 90 9C 00 0C 90 9C 00 08 4B FF FF B4
+
+. 0 FF8D01C 20
+. 3B A0 00 01 83 9B 03 FC 38 7B 01 80 7F 89 03 A6 4E 80 04 21
+
+. 0 2547189C 24
+. 80 83 00 04 94 21 FF F0 39 24 FF FF 38 21 00 10 91 23 00 04 4E 80 00 20
+
+. 0 FF8D030 48
+. 83 61 00 34 7F A3 EB 78 83 21 00 14 7F 68 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FEF2830 8
+. 2C 83 00 00 41 A6 FE F8
+
+. 0 FEF2838 16
+. 81 01 00 20 80 E8 00 18 2F 07 00 00 41 BA FE E8
+
+. 0 FEF272C 56
+. 80 9E 06 00 80 7E 1B 4C 7F A4 12 14 3B E0 00 00 93 9D 00 00 83 9E 05 F8 80 C3 00 00 80 9E 06 18 80 7E 05 F4 80 BE 06 1C 83 5E 1B 6C 93 FC 00 00 3B 80 00 00 48 06 D4 E1
+
+. 0 FF5FC40 12
+. 94 21 FF D0 7C E8 02 A6 48 06 82 09
+
+. 0 FF5FC4C 76
+. 93 C1 00 28 7F C8 02 A6 93 41 00 18 93 61 00 1C 7C DA 33 78 93 81 00 20 7C BB 2B 78 93 A1 00 24 7C 9C 23 78 93 E1 00 2C 7C 7D 1B 78 93 21 00 14 39 60 00 00 90 E1 00 34 38 00 00 01 83 FE 1D 14 7D 20 F8 28 7C 09 58 00 40 82 00 0C
+
+. 0 FF5FC98 8
+. 7C 00 F9 2D 40 A2 FF F0
+
+. 0 FF5FC8C 12
+. 7D 20 F8 28 7C 09 58 00 40 82 00 0C
+
+. 0 FF5FCA0 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 5C
+
+. 0 FF5FCAC 24
+. 80 7E 10 F8 7C 79 1B 78 39 40 00 00 39 63 00 18 39 00 00 00 48 00 00 08
+
+. 0 FF5FCC8 32
+. 80 8B 00 00 39 4A 00 01 7D 09 43 78 2B 0A 00 2F 2C 04 00 00 39 08 00 1C 39 6B 00 1C 40 82 FF E0
+
+. 0 FF5FCE8 32
+. 7C 69 1A 14 39 40 00 01 39 63 00 04 39 00 00 00 2F 0B 00 00 91 4B 00 14 91 0B 00 18 41 9A 00 24
+
+. 0 FF5FD08 52
+. 81 9E 1C D4 93 AB 00 04 83 2C 00 00 93 8B 00 08 93 23 00 04 93 6B 00 0C 91 6C 00 00 93 4B 00 10 3B 40 00 00 7C 00 04 AC 7C 60 F8 28 7F 40 F9 2D 40 A2 FF F8
+
+. 0 FF5FD3C 8
+. 2C 03 00 01 41 81 00 A4
+
+. 0 FF5FD44 4
+. 40 9A 00 34
+
+. 0 FF5FD78 48
+. 83 61 00 34 38 60 00 00 83 21 00 14 7F 68 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FEF2764 20
+. 81 3A 00 00 81 9E 1D 70 2F 89 00 00 83 EC 00 00 41 9E 00 68
+
+. 0 FEF2778 32
+. 7D 20 4B 78 3B 41 00 24 91 21 00 24 7C 09 03 78 3B A0 00 00 81 69 00 00 2C 0B 00 00 41 82 00 1C
+
+. 0 FEF2798 12
+. 88 AB 00 00 2C 85 00 4D 41 86 00 C0
+
+. 0 FEF27A4 12
+. 85 69 00 04 2C 0B 00 00 40 82 FF EC
+
+. 0 FEF2860 12
+. 88 CB 00 01 2F 06 00 41 40 9A FF 3C
+
+. 0 FEF286C 12
+. 88 EB 00 02 2F 87 00 4C 40 9E FF 30
+
+. 0 FEF27B0 8
+. 2C 1D 00 00 40 82 01 1C
+
+. 0 FEF27B8 8
+. 2F 9C 00 00 41 9E 00 20
+
+. 0 FEF27DC 16
+. 83 5E 1A 88 81 3A 00 00 2F 09 00 00 40 9A 00 64
+
+. 0 FEF27EC 48
+. 3B 80 00 01 93 9B 00 00 83 61 00 54 83 41 00 38 7F 68 03 A6 83 81 00 40 83 61 00 3C 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+
+. 0 FEF2A9C 32
+. 80 01 00 24 83 C1 00 18 7F A3 EB 78 83 E1 00 1C 7C 08 03 A6 83 A1 00 14 38 21 00 20 4B FF EE 28
+
+. 0 FEF1958 20
+. 80 9E 06 00 7C 64 12 14 83 E3 00 00 2C 1F 00 00 41 82 00 90
+
+. 0 FEF196C 16
+. 38 E0 00 01 7C C0 F8 28 2C 06 00 00 40 82 00 0C
+
+. 0 FEF197C 8
+. 7C E0 F9 2D 40 A2 FF F0
+
+. 0 FEF1970 12
+. 7C C0 F8 28 2C 06 00 00 40 82 00 0C
+
+. 0 FEF1984 12
+. 4C 00 01 2C 2C 86 00 00 40 86 00 6C
+
+. 0 FEF1990 12
+. 2F 1F 00 00 38 60 00 00 41 BA FF 98
+
+. 0 FEF199C 12
+. 7F E3 FB 78 7F A4 EB 78 4B FF DC 39
+
+. 0 FEEF5DC 24
+. 38 00 FF DF 7D 80 00 26 7F 84 00 40 7C A8 02 A6 94 21 FF 90 48 0D 88 61
+
+. 0 FEEF5F4 92
+. 93 81 00 60 7C 7C 1B 78 93 C1 00 68 7F C8 02 A6 91 C1 00 28 91 E1 00 2C 92 01 00 30 92 21 00 34 92 41 00 38 92 61 00 3C 92 81 00 40 92 A1 00 44 92 C1 00 48 92 E1 00 4C 93 01 00 50 93 21 00 54 93 41 00 58 93 61 00 5C 93 A1 00 64 93 E1 00 6C 90 A1 00 74 91 81 00 24 41 9D 04 90
+
+. 0 FEEF650 16
+. 38 64 00 0B 28 03 00 0F 54 79 00 38 40 81 00 B8
+
+. 0 FEEF660 12
+. 81 5C 00 04 7C 8A C8 40 41 84 00 BC
+
+. 0 FEEF724 8
+. 2B 99 01 FF 41 9D 00 60
+
+. 0 FEEF72C 24
+. 7E 59 E2 14 57 38 E8 FE 39 52 00 30 80 6A 00 0C 7C 83 50 00 41 86 00 60
+
+. 0 FEEF744 8
+. 2D 83 00 00 41 8E 02 20
+
+. 0 FEEF968 8
+. 7F 83 E3 78 4B FF EB 59
+
+. 0 FEEE4C4 12
+. 7C 88 02 A6 94 21 FF 90 48 0D 99 85
+
+. 0 FEEE4D0 108
+. 38 00 00 7F 93 01 00 50 39 23 00 38 90 81 00 74 7C 78 1B 78 81 63 00 04 7C 09 03 A6 93 C1 00 68 2F 8B 00 00 91 C1 00 28 91 E1 00 2C 7F C8 02 A6 92 01 00 30 92 21 00 34 92 41 00 38 92 61 00 3C 92 81 00 40 92 A1 00 44 92 C1 00 48 92 E1 00 4C 93 21 00 54 93 41 00 58 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 41 9E 01 28
+
+. 0 FEEE660 16
+. 91 29 00 0C 91 29 00 08 39 29 00 08 42 00 FF F4
+
+. 0 FEEE670 12
+. 81 DE 05 FC 7C 98 70 00 41 86 00 08
+
+. 0 FEEE680 108
+. 55 72 07 BC 3A 18 00 38 62 4F 00 49 92 18 00 30 91 F8 00 04 82 61 00 74 81 C1 00 28 7E 68 03 A6 81 E1 00 2C 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 FEEF970 4
+. 4B FF FE 30
+
+. 0 FEEF7A0 60
+. 82 BE 05 FC 63 34 00 01 82 5E 06 78 3B 5C 00 38 7E 1C A8 00 81 DE 06 2C 7E B6 AB 78 7F 8F AA 78 4D 90 00 00 7E 93 A3 78 3A 39 00 10 3A 01 00 10 83 FC 00 44 7F 1F D0 00 41 9A 01 9C
+
+. 0 FEEF974 8
+. 28 19 01 FF 40 81 00 2C
+
+. 0 FEEF9A4 100
+. 39 78 00 01 7F 8A B2 78 55 64 D9 7E 55 66 18 38 54 83 10 3A 55 7D 06 FE 7C E3 E2 14 7D 06 E2 14 54 9B 40 2E 3B E0 00 01 80 A7 04 38 7F 1C B0 00 28 99 01 FF 7F E6 E8 30 39 68 00 30 30 0A FF FF 7F A0 51 10 7D 1B E2 14 21 26 00 00 7E A9 31 14 7D 26 28 10 7D 29 49 10 7D 29 00 D0 7D 2A AB 79 41 82 00 34
+
+. 0 FEEFA08 28
+. 39 27 04 38 38 84 00 01 38 E7 00 04 2B 84 00 03 54 83 10 3A 39 08 01 00 41 9D 01 64
+
+. 0 FEEFA24 12
+. 84 A9 00 04 2C 05 00 00 41 82 FF E0
+
+. 0 FEEFA0C 24
+. 38 84 00 01 38 E7 00 04 2B 84 00 03 54 83 10 3A 39 08 01 00 41 9D 01 64
+
+. 0 FEEFB84 20
+. 83 BC 00 30 82 FD 00 04 56 E0 00 38 7F 11 00 40 40 99 03 1C
+
+. 0 FEEFB98 12
+. 81 9C 00 04 71 8A 00 01 40 82 01 A0
+
+. 0 FEEFD40 24
+. 82 FE 06 28 82 77 00 08 80 B7 00 18 7D 93 C8 40 3A 65 FF FF 41 8D 00 28
+
+. 0 FEEFD7C 40
+. 81 7E 06 80 7F B6 EB 78 83 FD 00 04 3B 00 00 00 81 EB 00 00 3B 60 00 00 57 F5 00 38 7C 9C 78 00 7E 1D AA 14 41 86 01 64
+
+. 0 FEEFF04 24
+. 80 BC 00 04 81 97 00 04 70 AA 00 02 7E 4C CA 14 38 12 00 10 40 82 00 08
+
+. 0 FEEFF1C 20
+. 7C 15 00 50 7C 00 9A 14 7E 72 98 F8 7C 1F 90 39 40 81 01 B4
+
+. 0 FEEFF30 20
+. 81 DE 1A 70 7F E3 FB 78 81 0E 00 00 7D 09 03 A6 4E 80 04 21
+
+. 0 FEF4188 16
+. 7C 08 02 A6 94 21 FF F0 90 01 00 14 48 05 4B DD
+
+. 0 FF48D70 12
+. 94 21 FF E0 7C 88 02 A6 48 07 F0 D9
+
+. 0 FF48D7C 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 81 00 24 93 A1 00 14 7C 7D 1B 78 83 FE 1D 6C 38 60 00 00 80 1F 00 00 2F 80 00 00 41 9E 00 3C
+
+. 0 FF48DE0 4
+. 4B FF FF 4D
+
+. 0 FF48D2C 16
+. 94 21 FF F0 90 61 00 08 38 00 00 2D 44 00 00 02
+
+. 0 FF48D3C 12
+. 80 C1 00 08 7C 88 02 A6 48 07 F1 0D
+
+. 0 FF48D48 32
+. 7C A8 02 A6 80 A5 1D 6C 7C 88 03 A6 90 65 00 00 7C 06 18 40 38 21 00 10 38 60 00 00 4C A1 00 20
+
+. 0 FF48DE4 12
+. 2C 83 00 00 38 60 FF FF 41 A4 FF D8
+
+. 0 FF48DF0 8
+. 2F 1D 00 00 41 9A FF CC
+
+. 0 FF48DF8 12
+. 83 FF 00 00 7C 7F EA 14 4B FF FF 2D
+
+. 0 FF48E04 12
+. 2F 83 00 00 38 60 FF FF 41 BC FF B8
+
+. 0 FF48E10 32
+. 80 C1 00 24 7F E3 FB 78 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEF4198 12
+. 2F 83 FF FF 38 00 00 00 41 9E 00 08
+
+. 0 FEF41A4 24
+. 7C 60 1B 78 80 81 00 14 7C 03 03 78 38 21 00 10 7C 88 03 A6 4E 80 00 20
+
+. 0 FEEFF44 12
+. 7C 7B 1B 79 7F 40 00 26 41 82 01 9C
+
+. 0 FEEFF50 16
+. 81 5E 1B B8 81 2A 00 00 2E 09 00 00 41 B2 00 0C
+
+. 0 FEEFF68 16
+. 57 5A 20 3E 7F 40 11 20 57 5A E0 3E 41 9E 00 4C
+
+. 0 FEEFF78 12
+. 83 57 00 28 2E 1A 00 00 40 92 00 08
+
+. 0 FEEFF84 44
+. 93 77 00 28 2F 18 00 00 7F 65 82 78 20 05 00 00 7C A0 29 14 82 FC 04 4C 7D 80 00 26 55 8C DF FE 7C AA 60 39 7D 57 FA 14 41 82 02 54
+
+. 0 FEF0200 28
+. 81 1C 00 04 31 75 FF FF 7F AB A9 10 69 00 00 02 54 00 FF FE 7C 0B E8 39 41 82 00 0C
+
+. 0 FEF0224 16
+. 2C 80 00 00 3B A0 00 00 7F 7A DB 78 41 86 01 04
+
+. 0 FEF0234 8
+. 2F 15 00 00 41 9A 00 F4
+
+. 0 FEF032C 8
+. 91 5C 04 4C 4B FF FF 18
+
+. 0 FEF0248 8
+. 73 60 00 07 41 82 00 0C
+
+. 0 FEF0258 48
+. 7C 9B FA 14 7D 5D AA 14 7D 24 52 14 7D 69 9A 14 7D 7B 90 38 7F E9 D8 50 83 7E 1A 70 7F AA FA 14 82 7B 00 00 7F A3 EB 78 7E 69 03 A6 4E 80 04 21
+
+. 0 FF48DA8 16
+. 81 3E 1A A4 80 A9 00 00 2C 05 00 00 40 82 00 2C
+
+. 0 FF48DB8 8
+. 2F 1D 00 00 40 9A 00 3C
+
+. 0 FF48DC0 32
+. 80 7F 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEF0288 12
+. 7C 78 1B 79 7F E0 00 26 41 82 00 80
+
+. 0 FEF0294 16
+. 80 7E 1B B8 81 23 00 00 2F 89 00 00 41 BE 00 0C
+
+. 0 FEF02AC 16
+. 57 FF 80 3E 7F E0 81 20 57 FF 80 3E 41 B2 FD 08
+
+. 0 FEF02BC 44
+. 7C BA C0 50 2D 95 00 00 7D 85 EA 14 7F 48 D3 78 61 97 00 01 91 1C 00 30 92 FA 00 04 83 5C 04 4C 7D 5A EA 14 91 5C 04 4C 41 AE FB 74
+
+. 0 FEEFE58 12
+. 82 DC 04 50 7F 0A B0 40 40 99 00 08
+
+. 0 FEEFE64 20
+. 91 5C 04 50 81 C8 00 04 55 CB 00 38 7F 8B 88 40 41 9C 00 78
+
+. 0 FEEFE78 56
+. 7F 89 7A 78 7E 79 58 50 31 49 FF FF 7F 6A 49 10 38 68 00 08 57 7F 10 3A 7E 28 CA 14 7F E6 A3 78 62 6F 00 01 90 C8 00 04 7C 68 1B 78 91 F1 00 04 92 3C 00 30 4B FF F8 04
+
+. 0 FEEF6B0 100
+. 83 81 00 74 7D 03 43 78 81 81 00 24 7F 88 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 FEF19A8 8
+. 7C 7C 1B 79 41 82 00 60
+
+. 0 FEF19B0 20
+. 38 00 00 00 7C 00 04 AC 7F A0 F8 28 7C 00 F9 2D 40 A2 FF F8
+
+. 0 FEF19C4 8
+. 2F 9D 00 01 41 9D 00 DC
+
+. 0 FEF19CC 44
+. 7F 83 E3 78 83 81 00 24 83 41 00 08 7F 88 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEF1930 40
+. 83 81 00 24 83 41 00 08 7F 88 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10001334 12
+. 7C 7A 1B 78 7F A3 EB 78 48 02 7B F9
+
+. 0 10028F34 4
+. 4B EC 89 AC
+
+. 0 FEEF66C 32
+. 57 29 E8 FE 38 09 FF FE 54 06 10 3A 7D 66 E2 14 83 AB 00 08 3B 6B 00 08 2D 9D 00 00 41 8E 00 9C
+
+. 0 FEEFEB0 60
+. 82 9E 05 FC 7D 19 00 50 63 31 00 01 61 0A 00 01 7F 93 A2 78 7D DD CA 14 31 73 FF FF 7E 4B 99 10 39 1D 00 08 56 50 10 3A 91 DC 00 30 7E 0F 8B 78 91 FD 00 04 91 4E 00 04 4B FF F7 C8
+
+. 0 10001340 12
+. 7C 7F 1B 78 7F A3 EB 78 48 02 7B ED
+
+. 0 1000134C 20
+. 57 80 08 3C 2B 80 00 00 7C 7D 1B 78 39 40 00 00 40 9D 00 38
+
+. 0 10001360 52
+. 3D 60 10 02 7C 09 03 A6 39 6B 80 F8 3D 20 43 30 C9 AB 00 00 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+. 0 10001374 32
+. 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+. 0 10001394 16
+. 57 85 20 36 7F 43 D3 78 7F E4 FB 78 48 02 7B D5
+
+. 0 10028F74 8
+. 39 60 00 40 4B FF FF 54
+
+. 0 FEF86D8 52
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 7B 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C 7F 1B 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+. 0 FEF870C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+. 0 FEF8738 8
+. 73 A0 00 03 40 82 00 6C
+
+. 0 FEF8740 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 00 71
+
+. 0 FEF87BC 16
+. 54 A0 07 7E 7D 88 02 A6 2B 80 00 07 48 0C F6 89
+
+. 0 FEF87CC 20
+. 94 21 FF F0 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 41 9D 00 28
+
+. 0 FEF87E0 24
+. 81 7E 07 C8 54 00 10 3A 7C CB 00 2E 7D 26 5A 14 7D 29 03 A6 4E 80 04 20
+
+. 0 FEF88A0 28
+. 80 04 00 00 38 84 FF F0 81 24 00 14 38 63 FF EC 38 A5 00 04 90 03 00 14 4B FF FF 7C
+
+. 0 FEF8834 32
+. 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 FEF8854 16
+. 83 C1 00 08 38 21 00 10 91 23 00 00 4E 80 00 20
+
+. 0 FEF8750 24
+. 57 86 00 3A 57 85 07 BE 7F FF 32 14 7F BD 32 14 2C 85 00 00 41 86 00 1C
+
+. 0 FEF8780 40
+. 81 01 00 24 7F 63 DB 78 83 81 00 10 83 61 00 0C 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 100013A4 12
+. 2B 9E 00 01 38 E0 00 00 40 9D 00 18
+
+. 0 100013C4 12
+. 39 40 00 00 7F 8A F0 40 40 9C 00 64
+
+. 0 100013D0 24
+. 39 00 00 00 57 66 20 36 39 60 00 00 7D 49 53 78 7F 8B 38 40 40 9C 00 1C
+
+. 0 10001400 48
+. 7C 0B D9 D6 39 4A 00 01 7F 8A F0 40 7C 08 FC AE 7D 28 FA 14 C9 A9 00 08 7D 08 32 14 54 00 20 36 7D 20 EA 14 7C 1D 05 AE D9 A9 00 08 41 9C FF AC
+
+. 0 10001430 16
+. 34 9E FF FF 39 00 00 00 38 E0 00 00 41 82 00 74
+
+. 0 100014B0 44
+. 3D 20 10 02 3C A0 10 01 C8 29 81 10 3C 60 10 01 7F 67 DB 78 7F E6 FB 78 7F C8 F3 78 7F A4 EB 78 38 A5 6B FC 38 63 6C 1C 4B FF F3 41
+
+. 0 10000818 124
+. 94 21 FF A0 7C 08 02 A6 93 81 00 38 7D 1C 43 79 3D 20 10 02 93 01 00 28 39 29 80 F0 93 21 00 2C 93 41 00 30 FD 00 08 90 93 C1 00 40 7C 78 1B 78 93 E1 00 44 7C 9E 23 78 DB A1 00 48 7C B9 2B 78 DB C1 00 50 7C DF 33 78 DB E1 00 58 7C FA 3B 78 92 41 00 10 92 61 00 14 92 81 00 18 92 A1 00 1C 92 C1 00 20 92 E1 00 24 93 61 00 34 93 A1 00 3C 90 01 00 64 C8 29 00 00 41 82 00 60
+
+. 0 10000894 84
+. 3D 20 10 02 7F 89 03 A6 39 29 80 F8 38 00 00 00 C9 29 00 00 54 EA 20 36 7D 20 FA 14 7D 60 F2 14 C9 AB 00 08 7D 9F 04 AE 7C 1E 04 AE 7C 00 52 14 C9 69 00 08 FC 0C 00 28 FD 4B 68 28 FC 00 02 10 FD A0 52 10 FC 00 68 2A FF E0 02 72 FF 9F 08 00 40 9D 00 08
+
+. 0 100008EC 4
+. 42 00 FF C0
+
+. 0 100008F0 12
+. FF 81 40 00 38 60 00 00 40 9C 00 58
+
+. 0 100008FC 84
+. 80 01 00 64 82 41 00 10 82 61 00 14 7C 08 03 A6 82 81 00 18 82 A1 00 1C 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB A1 00 48 CB C1 00 50 CB E1 00 58 38 21 00 60 4E 80 00 20
+
+. 0 100014DC 20
+. 3C 80 10 01 7F C5 F3 78 38 84 6C 30 4C C6 31 82 48 01 40 FD
+
+. 0 100155E8 84
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 88 7F C8 02 A6 93 A1 00 84 90 01 00 94 2F 83 00 00 93 E1 00 8C 80 1E FF F0 90 C1 00 14 7F C0 F2 14 90 E1 00 18 83 BE 80 00 91 01 00 1C 81 7D 00 00 91 21 00 20 39 6B 00 01 91 41 00 24 90 A1 00 10 40 86 00 24
+
+. 0 1001565C 12
+. 91 7D 00 00 7C 9D 23 78 40 9E 00 54
+
+. 0 10015668 32
+. 83 FE 80 08 81 7E 80 04 80 1F 00 00 81 2B 00 00 2F 80 00 00 39 29 00 01 91 2B 00 00 40 9E 00 C8
+
+. 0 1001574C 8
+. 80 7E 80 0C 48 01 38 1D
+
+. 0 10028F6C 8
+. 39 60 00 3C 4B FF FF 5C
+
+. 0 FECADD0 12
+. 94 21 FF 70 7D 88 02 A6 48 0F D0 79
+
+. 0 FECADDC 64
+. 93 C1 00 88 7F C8 02 A6 7C 60 1B 78 91 81 00 94 90 81 00 0C 7C 04 03 78 81 7E 1A A0 90 A1 00 10 38 A1 00 70 80 6B 00 00 90 C1 00 14 90 E1 00 18 91 01 00 1C 91 21 00 20 91 41 00 24 40 86 00 24
+
+. 0 FECAE3C 36
+. 38 C1 00 98 39 21 00 08 39 00 00 01 38 E0 00 00 99 01 00 70 98 E1 00 71 90 C1 00 74 91 21 00 78 4B FF 80 79
+
+. 0 FEC2ED4 12
+. 94 21 FA 40 7C C8 02 A6 48 10 4F 75
+
+. 0 FEC2EE0 108
+. 7D 80 00 26 93 C1 05 B8 7F C8 02 A6 92 C1 05 98 7C B6 2B 78 93 61 05 AC 7C 7B 1B 78 93 E1 05 BC 7C 3F 0B 78 90 C1 05 C4 91 C1 05 78 91 E1 05 7C 92 01 05 80 92 21 05 84 92 41 05 88 92 61 05 8C 92 81 05 90 92 A1 05 94 92 E1 05 9C 93 01 05 A0 93 21 05 A4 93 41 05 A8 93 81 05 B0 93 A1 05 B4 91 81 05 74 90 9F 04 C4 4B FD 8E CD
+
+. 0 FE9BE14 12
+. 94 21 FF F0 7D 88 02 A6 48 12 C0 35
+
+. 0 FE9BE20 32
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 7E 1C 4C 83 C1 00 08 7C 63 12 14 38 21 00 10 4E 80 00 20
+
+. 0 FEC2F4C 32
+. 88 1B 00 46 38 E0 00 00 80 63 00 00 2F 80 00 00 90 FF 04 C8 90 7F 04 DC 90 FF 04 E0 40 9E 00 30
+
+. 0 FEC2F6C 12
+. 80 9E 1D C8 2C 04 00 00 41 82 04 00
+
+. 0 FEC2F78 12
+. 80 7B 00 60 2C 83 00 00 40 86 00 0C
+
+. 0 FEC2F84 20
+. 38 60 FF FF 90 7B 00 60 2D 83 FF FF 39 20 FF FF 40 8E 00 74
+
+. 0 FEC2F98 12
+. 81 3B 00 00 71 20 00 08 40 82 2B FC
+
+. 0 FEC2FA4 12
+. 80 BF 04 C4 2E 05 00 00 41 92 36 28
+
+. 0 FEC2FB0 12
+. 89 1B 00 46 2F 08 00 00 40 9A 00 34
+
+. 0 FEC2FBC 12
+. 81 3E 1D C8 2F 89 00 00 41 9E 2A A8
+
+. 0 FEC2FC8 12
+. 80 7B 00 60 2C 83 00 00 40 86 00 0C
+
+. 0 FEC2FDC 12
+. 2D 83 FF FF 39 20 FF FF 40 8E 00 24
+
+. 0 FEC2FE8 12
+. 81 3B 00 00 71 20 00 02 41 82 00 80
+
+. 0 FEC3070 56
+. 81 76 00 08 39 E0 00 00 82 36 00 00 38 9F 04 40 82 16 00 04 39 C0 FF FF 80 7F 04 C4 91 7F 04 08 91 FF 04 D8 92 3F 04 00 92 1F 04 04 91 FF 04 40 91 FF 04 44 48 01 76 E5
+
+. 0 FEDA788 16
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 48 0E D6 BD
+
+. 0 FEDA798 44
+. 93 C1 00 18 7C 9D 23 78 93 E1 00 1C 7F C8 02 A6 93 61 00 0C 7C 7F 1B 78 93 81 00 10 90 01 00 24 88 03 00 00 2F 80 00 00 41 9E 00 48
+
+. 0 FEDA7C4 8
+. 2C 00 00 25 41 82 00 40
+
+. 0 FEDA7CC 36
+. 81 3E 1D 50 3B 80 00 00 7F 69 12 14 93 9D 00 00 7F E3 FB 78 88 9F 00 00 7F A5 EB 78 70 89 00 80 40 82 00 44
+
+. 0 FEDA7F0 20
+. 3B FF 00 01 88 7F 00 00 2F 83 00 00 2F 03 00 25 41 9E 00 08
+
+. 0 FEDA804 4
+. 40 9A FF D4
+
+. 0 FEDA7D8 24
+. 93 9D 00 00 7F E3 FB 78 88 9F 00 00 7F A5 EB 78 70 89 00 80 40 82 00 44
+
+. 0 FEDA808 40
+. 80 A1 00 24 7F E3 FB 78 83 61 00 0C 83 81 00 10 7C A8 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEC30A8 36
+. 81 7B 00 00 39 40 00 00 90 7F 04 D0 7C 7D 1B 78 71 60 80 00 90 7F 04 4C 91 DF 04 CC 91 5F 04 E4 40 82 00 2C
+
+. 0 FEC30CC 28
+. 82 9E 1B 48 80 14 00 B8 31 40 FF FF 7E 4A 01 10 2E 12 00 00 92 5F 04 E4 40 92 02 74
+
+. 0 FEC30E8 20
+. 83 1E 1A F0 93 7F 04 14 93 1F 04 10 71 60 80 00 40 82 00 54
+
+. 0 FEC30FC 20
+. 80 7B 00 48 3B 82 8B A0 83 23 00 08 7F 19 E0 00 41 9A 00 34
+
+. 0 FEC3110 20
+. 38 E0 00 00 38 00 00 01 7F 40 18 28 7C 1A 38 00 40 82 00 0C
+
+. 0 FEC3124 8
+. 7C 00 19 2D 40 A2 FF F0
+
+. 0 FEC3118 12
+. 7F 40 18 28 7C 1A 38 00 40 82 00 0C
+
+. 0 FEC312C 12
+. 4C 00 01 2C 2F 9A 00 00 40 9E 1F 30
+
+. 0 FEC3138 72
+. 80 7B 00 48 93 83 00 08 81 83 00 04 3B 8C 00 01 93 83 00 04 89 3B 00 46 3A 80 FF FF 80 DF 04 C4 7D 28 07 74 7C 68 DA 14 7F A6 E8 50 80 A3 00 98 7F 63 DB 78 80 85 00 1C 7F A5 EB 78 7C 89 03 A6 7C C4 33 78 4E 80 04 21
+
+. 0 FEEA658 80
+. 2F 85 00 00 7C 08 02 A6 94 21 FF D0 93 01 00 10 3B 00 00 00 93 21 00 14 7C B9 2B 78 93 41 00 18 7C BA 2B 78 93 61 00 1C 7C 9B 23 78 93 81 00 20 7C 9C 23 78 93 A1 00 24 7C 7D 1B 78 93 C1 00 28 38 60 00 00 93 E1 00 2C 90 01 00 34 41 9E 00 6C
+
+. 0 FEEA6A8 24
+. 81 7D 00 00 80 9D 00 18 71 69 02 00 80 7D 00 14 7F E3 20 50 40 82 00 84
+
+. 0 FEEA6C0 8
+. 2C 1F 00 00 41 82 00 3C
+
+. 0 FEEA700 12
+. 7C 79 C2 14 2F 83 00 00 40 9E 00 74
+
+. 0 FEEA77C 36
+. 8B 1D 00 46 7F A3 EB 78 38 80 FF FF 7F 0C 07 74 7D 4C EA 14 81 0A 00 98 81 68 00 0C 7D 69 03 A6 4E 80 04 21
+
+. 0 FEE984C 12
+. 7C 08 02 A6 94 21 FF E0 48 0D E5 FD
+
+. 0 FEE9858 44
+. 93 A1 00 14 93 C1 00 18 7C 9D 23 78 90 01 00 24 7F C8 02 A6 81 63 00 00 93 E1 00 1C 7C 7F 1B 78 71 60 00 08 93 81 00 10 40 82 02 10
+
+. 0 FEE9884 8
+. 71 69 08 00 40 82 00 F4
+
+. 0 FEE988C 12
+. 80 A3 00 10 2C 85 00 00 40 86 01 2C
+
+. 0 FEE9898 4
+. 48 00 23 B1
+
+. 0 FEEBC48 32
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 81 00 14 80 03 00 1C 2F 80 00 00 40 9E 00 40
+
+. 0 FEEBC68 12
+. 80 A3 00 00 70 A9 00 02 41 82 00 10
+
+. 0 FEEBC80 28
+. 89 5F 00 46 7D 48 07 74 7D 28 FA 14 80 E9 00 98 81 67 00 34 7D 69 03 A6 4E 80 04 21
+
+. 0 FEDB788 44
+. 7C 88 02 A6 94 21 FF 70 93 A1 00 84 3B A0 20 00 93 E1 00 8C 7C 7F 1B 78 90 81 00 94 80 03 00 38 93 C1 00 88 2F 80 00 00 41 9C 00 4C
+
+. 0 FEDB7B4 32
+. 88 E3 00 46 38 81 00 10 7C E6 07 74 7D 26 1A 14 80 A9 00 98 81 65 00 48 7D 69 03 A6 4E 80 04 21
+
+. 0 FEEA4EC 28
+. 94 21 FF F0 7C 85 23 78 80 03 00 38 38 21 00 10 38 60 00 03 7C 04 03 78 48 05 6D BC
+
+. 0 FF412C0 12
+. 7D 88 02 A6 94 21 FF F0 48 08 6B 89
+
+. 0 FF412CC 28
+. 93 C1 00 08 7C 83 23 78 7F C8 02 A6 7C A4 2B 78 38 00 00 C5 7D 88 03 A6 44 00 00 02
+
+. 0 FF412E8 20
+. 7C 00 00 26 74 09 10 00 81 3E 1C 4C 7D 29 12 14 40 82 00 10
+
+. 0 FF412FC 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FEDB7D4 8
+. 2C 03 00 00 41 80 00 24
+
+. 0 FEDB7DC 16
+. 81 01 00 20 55 03 04 26 2C 83 20 00 41 86 00 78
+
+. 0 FEDB7EC 12
+. 80 01 00 48 2C 00 00 00 40 81 00 08
+
+. 0 FEDB7F8 36
+. 7C 1D 03 78 39 9D 0F FF 38 60 00 00 55 84 00 26 38 A0 00 03 38 C0 00 22 38 E0 FF FF 39 00 00 00 48 07 29 A1
+
+. 0 FF4E1B8 8
+. 38 00 00 5A 44 00 00 02
+
+. 0 FF4E1C0 4
+. 4C A3 00 20
+
+. 0 FEDB81C 12
+. 38 00 FF FF 2C 83 FF FF 41 86 00 1C
+
+. 0 FEDB828 20
+. 7C 64 1B 78 38 C0 00 01 7F E3 FB 78 7C A4 EA 14 48 01 03 45
+
+. 0 FEEBB7C 60
+. 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 9B 23 78 93 81 00 10 7C BC 2B 78 90 01 00 24 81 23 00 1C 93 A1 00 14 7C DD 33 78 2F 89 00 00 93 E1 00 1C 93 C1 00 18 7C 7F 1B 78 41 9E 00 28
+
+. 0 FEEBBDC 28
+. 80 1F 00 00 2C 9D 00 00 54 09 00 3C 93 7F 00 1C 60 00 00 01 93 9F 00 20 41 86 00 2C
+
+. 0 FEEBBF8 40
+. 80 61 00 24 91 3F 00 00 83 61 00 0C 7C 68 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEDB83C 36
+. 38 00 00 01 83 A1 00 94 7C 03 03 78 83 C1 00 88 7F A8 03 A6 83 E1 00 8C 83 A1 00 84 38 21 00 90 4E 80 00 20
+
+. 0 FEEBC9C 8
+. 2F 03 FF FF 41 9A 00 18
+
+. 0 FEEBCA4 20
+. 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+
+. 0 FEE989C 32
+. 80 DF 00 1C 81 7F 00 00 7C C5 33 78 90 DF 00 0C 90 DF 00 04 90 DF 00 08 71 60 01 00 40 82 01 8C
+
+. 0 FEE98BC 12
+. 81 5F 00 20 7F 85 50 00 41 9E 01 3C
+
+. 0 FEE98C8 44
+. 81 9F 00 60 61 6B 08 00 81 3F 00 08 7C A4 2B 78 2C 8C 00 00 90 BF 00 14 91 3F 00 0C 90 BF 00 10 91 3F 00 04 91 7F 00 00 40 85 01 44
+
+. 0 FEE9A34 8
+. 71 60 02 02 41 A2 FE BC
+
+. 0 FEE98F4 12
+. 91 5F 00 18 2F 1D FF FF 41 9A 00 98
+
+. 0 FEE9994 12
+. 7F E3 FB 78 7C A4 28 50 4B FF F3 8D
+
+. 0 FEE8D28 48
+. 94 21 FF E0 7C 08 02 A6 93 61 00 0C 7C BB 2B 79 93 81 00 10 7C 9C 23 78 93 E1 00 1C 7C 7F 1B 78 93 A1 00 14 93 C1 00 18 90 01 00 24 41 82 00 A4
+
+. 0 FEE8DF8 40
+. 38 60 00 00 83 61 00 24 83 81 00 10 7F 68 03 A6 83 A1 00 14 83 61 00 0C 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEE99A0 32
+. 83 A1 00 24 83 81 00 10 7F A8 03 A6 83 C1 00 18 83 A1 00 14 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEA7A0 12
+. 2C 83 FF FF 7C 79 D0 50 41 A6 FF 68
+
+. 0 FEEA7AC 24
+. 83 FD 00 20 7F 3C CB 78 80 1D 00 1C 7D 20 F8 50 2B 09 00 7F 40 99 00 0C
+
+. 0 FEEA7C4 16
+. 7C 99 4B 96 7F 84 49 D6 2C 1C 00 00 41 82 00 A8
+
+. 0 FEEA878 8
+. 2F 19 00 00 41 9A FE 90
+
+. 0 FEEA880 16
+. 7F 25 CB 78 7C 9B E2 14 7F A3 EB 78 48 00 14 C9
+
+. 0 FEEBD54 64
+. 2F 85 00 00 7C 08 02 A6 94 21 FF E0 93 41 00 08 7C BA 2B 78 93 61 00 0C 7C BB 2B 78 93 81 00 10 7C 7C 1B 78 93 E1 00 1C 38 60 00 00 93 A1 00 14 7C 9F 23 78 93 C1 00 18 90 01 00 24 41 9E 00 A0
+
+. 0 FEEBD94 16
+. 80 7C 00 14 80 BC 00 18 7C 03 28 40 40 80 00 4C
+
+. 0 FEEBDA4 12
+. 7F A3 28 50 7C 9D D8 40 40 85 00 08
+
+. 0 FEEBDB0 12
+. 7F 7D DB 78 2B 1D 00 14 41 99 00 A0
+
+. 0 FEEBDBC 8
+. 2F 9D 00 00 41 9E 00 28
+
+. 0 FEEBDC4 8
+. 34 1D FF FF 41 80 00 1C
+
+. 0 FEEBDCC 24
+. 7F A9 03 A6 88 9F 00 00 3B FF 00 01 98 83 00 00 38 63 00 01 42 00 FF F0
+
+. 0 FEEBDD0 20
+. 88 9F 00 00 3B FF 00 01 98 83 00 00 38 63 00 01 42 00 FF F0
+
+. 0 FEEBDE4 16
+. 90 7C 00 14 7F 7D D8 50 2C 9B 00 00 41 86 00 3C
+
+. 0 FEEBE2C 44
+. 7C 7B D0 50 80 E1 00 24 83 41 00 08 83 61 00 0C 7C E8 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEA890 12
+. 7F 23 C8 50 7C 79 D0 50 4B FF FE 78
+
+. 0 FEEA710 48
+. 83 21 00 34 83 01 00 10 7F 28 03 A6 83 41 00 18 83 21 00 14 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FEC3180 8
+. 7C 83 E8 00 40 86 01 60
+
+. 0 FEC3188 20
+. 80 9F 04 4C 7C 74 1B 78 89 44 00 00 2D 8A 00 00 41 8E 01 4C
+
+. 0 FEC32E4 12
+. 83 3B 00 00 73 27 80 00 40 82 00 38
+
+. 0 FEC32F0 24
+. 80 7B 00 48 83 63 00 04 39 3B FF FF 2E 09 00 00 91 23 00 04 40 92 00 20
+
+. 0 FEC3308 20
+. 91 23 00 08 7C 00 04 AC 7D 40 18 28 7D 20 19 2D 40 A2 FF F8
+
+. 0 FEC331C 8
+. 2F 8A 00 01 41 9D 1D 8C
+
+. 0 FEC3324 12
+. 81 1F 04 E4 2F 08 00 00 40 9A 00 0C
+
+. 0 FEC3330 8
+. 7E 89 A3 78 4B FF FC D4
+
+. 0 FEC3008 104
+. 81 61 00 00 7D 23 4B 78 83 AB 00 04 81 8B FF B4 7F A8 03 A6 81 CB FF B8 81 EB FF BC 7D 81 81 20 82 0B FF C0 82 2B FF C4 82 4B FF C8 82 6B FF CC 82 8B FF D0 82 AB FF D4 82 CB FF D8 82 EB FF DC 83 0B FF E0 83 2B FF E4 83 4B FF E8 83 6B FF EC 83 8B FF F0 83 AB FF F4 83 CB FF F8 83 EB FF FC 7D 61 5B 78 4E 80 00 20
+
+. 0 FECAE60 20
+. 80 81 00 94 83 C1 00 88 38 21 00 90 7C 88 03 A6 4E 80 00 20
+
+. 0 10015754 4
+. 4B FF FF 8C
+
+. 0 100156E0 20
+. 80 1F 00 00 7F A4 EB 78 38 A1 00 70 2F 80 00 00 41 9E FF AC
+
+. 0 100156F4 44
+. 83 BE 80 18 38 00 00 02 39 20 00 00 98 01 00 70 99 21 00 71 38 01 00 98 80 7D 00 00 39 21 00 08 90 01 00 74 91 21 00 78 48 01 38 29
+
+. 0 10028F44 8
+. 39 60 00 28 4B FF FF 84
+
+. 0 FEC2F8C 12
+. 2D 83 FF FF 39 20 FF FF 40 8E 00 74
+
+. 0 FEEA6C8 8
+. 7C 9F D0 40 41 85 01 D0
+
+. 0 FEEA89C 8
+. 7F 5F D3 78 4B FF FE 30
+
+. 0 FEEA6D0 8
+. 2B 1F 00 14 41 99 01 D0
+
+. 0 FEEA8A4 12
+. 7F 84 E3 78 7F E5 FB 78 48 00 D7 55
+
+. 0 FEF8000 44
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+. 0 FEF802C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+. 0 FEF8040 24
+. 7D 29 03 A6 88 BF 00 00 3B FF 00 01 98 BD 00 00 3B BD 00 01 42 00 FF F0
+
+. 0 FEF8044 20
+. 88 BF 00 00 3B FF 00 01 98 BD 00 00 3B BD 00 01 42 00 FF F0
+
+. 0 FEF8058 8
+. 73 E0 00 03 40 82 00 68
+
+. 0 FEF80C4 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 08 41
+
+. 0 FEF8910 28
+. 54 A0 07 BE 54 88 1E F8 2F 80 00 01 94 21 FF F0 54 84 00 3A 20 E8 00 20 41 9E 00 B8
+
+. 0 FEF892C 8
+. 28 00 00 01 41 80 00 8C
+
+. 0 FEF89BC 16
+. 81 44 00 00 38 63 FF FC 85 64 00 04 4B FF FF 90
+
+. 0 FEF8958 76
+. 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 FEF8944 96
+. 7D 66 40 30 7D 49 3C 30 7C C0 4B 78 81 64 00 00 90 03 00 00 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 FEF89A4 24
+. 7D 65 40 30 7D 48 3C 30 7C A4 43 78 38 21 00 10 90 83 00 00 4E 80 00 20
+
+. 0 FEF80D4 4
+. 4B FF FF 9C
+
+. 0 FEF8070 24
+. 57 86 00 3A 57 85 07 BE 7F BD 32 14 7F FF 32 14 2C 85 00 00 41 86 00 1C
+
+. 0 FEF80A0 36
+. 81 01 00 24 7F A3 EB 78 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEA8B0 16
+. 7F 7C FA 14 7F 3F D0 50 90 7D 00 14 4B FF FE 44
+
+. 0 FEEA70C 52
+. 7C 79 D0 50 83 21 00 34 83 01 00 10 7F 28 03 A6 83 41 00 18 83 21 00 14 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FEC319C 112
+. 81 DE 03 FC 91 DF 05 60 3B 04 00 01 39 60 00 00 93 1F 04 4C 3A 20 FF FF 8B 04 00 01 3A 00 00 20 39 FF 03 F8 91 7F 04 EC 3A 58 FF E0 91 7F 04 F0 2A 12 00 5A 91 7F 04 F4 91 7F 04 F8 38 00 00 00 91 7F 04 FC 91 7F 05 00 91 7F 05 04 91 7F 05 08 91 7F 05 0C 91 7F 05 10 92 3F 05 14 91 7F 05 18 92 1F 05 1C 91 7F 05 68 91 FF 04 D4 41 91 00 20
+
+. 0 FEC320C 44
+. 80 1E 03 D8 57 07 10 3A 81 3E 03 DC 7F 47 02 14 83 3A FF 80 57 20 10 3A 7C 09 00 2E 83 9F 05 60 7D 60 E2 14 7D 69 03 A6 4E 80 04 20
+
+. 0 FEC4788 20
+. 83 5F 05 00 39 60 00 0A 91 7F 04 E8 2F 9A 00 00 41 9E 12 38
+
+. 0 FEC59D0 12
+. 89 76 00 00 29 8B 00 08 40 8C 07 60
+
+. 0 FEC59DC 36
+. 82 16 00 08 55 6F 10 3A 38 6B 00 01 7D 30 7A 14 98 76 00 00 80 09 00 00 54 1A 0F FE 2E 1A 00 00 41 92 00 08
+
+. 0 FEC5A04 8
+. 7C 0E 03 78 4B FF F2 E0
+
+. 0 FEC4CE8 12
+. 80 9F 05 14 2F 84 00 00 41 9C 05 E4
+
+. 0 FEC52D4 36
+. 38 E0 00 01 90 FF 05 14 80 9F 04 D4 6B 06 00 58 21 46 00 00 7C CA 31 14 80 BF 04 E8 7D C3 73 78 4B FF C4 AD
+
+. 0 FEC17A0 12
+. 7D 88 02 A6 94 21 FF F0 48 10 66 A9
+
+. 0 FEC17AC 24
+. 2F 86 00 00 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 1E 1D 08 40 9E 00 08
+
+. 0 FEC17C4 12
+. 81 1E 1E 10 2C 05 00 0A 41 82 00 78
+
+. 0 FEC1844 44
+. 3D 40 CC CC 61 4A CC CD 7C C3 50 16 54 C0 E8 FE 1C A0 00 0A 2C 00 00 00 7D 25 18 50 7C 03 03 78 7D 69 40 AE 9D 64 FF FF 40 82 FF E0
+
+. 0 FEC1870 16
+. 7C 83 23 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FEC52F8 28
+. 80 FF 04 CC 81 3F 04 FC 7C 7C 1B 78 31 07 FF FF 7E 08 39 10 7D 29 80 39 40 82 05 74
+
+. 0 FEC5314 28
+. 82 5F 04 E8 82 3F 05 18 6A 43 00 0A 20 E3 00 00 7C 67 19 14 7E 28 18 39 41 82 EA 7C
+
+. 0 FEC3DA8 20
+. 80 FF 04 D4 83 3F 05 14 7F BC 38 50 7F 1D C8 00 41 98 00 24
+
+. 0 FEC3DBC 20
+. 81 7F 04 EC 31 2E FF FF 7D 89 71 10 7D 8A 58 39 41 82 00 10
+
+. 0 FEC3DDC 32
+. 80 BF 04 F4 80 1F 05 14 2C 85 00 00 7D FD 00 50 7D E7 78 F8 7C E4 FE 70 7D F9 20 38 40 86 12 D0
+
+. 0 FEC3DFC 48
+. 82 5F 04 EC 31 0E FF FF 7C 68 71 10 7D CA FE 70 7E 1D CA 14 7C 69 90 39 7D 48 72 78 81 3F 05 10 7D CA 40 50 91 DF 05 64 7C B0 48 50 41 82 00 10
+
+. 0 FEC3E38 32
+. 80 9F 05 1C 80 DF 04 F8 2F 04 00 20 7F 4C 33 78 83 5F 04 F0 7D 8B D3 78 7F 4B 28 50 41 9A 11 B4
+
+. 0 FEC5008 8
+. 2F 9A 00 00 40 9D 00 18
+
+. 0 FEC5024 8
+. 3B 40 00 00 40 B2 EE 34
+
+. 0 FEC502C 12
+. 80 FF 04 F8 2C 87 00 00 41 86 08 A4
+
+. 0 FEC58D8 12
+. 80 BF 04 F0 2D 85 00 00 41 AE E5 A8
+
+. 0 FEC3E88 20
+. 82 1F 05 64 7D 10 00 D0 55 0A 0F FE 7D 48 90 39 41 82 00 10
+
+. 0 FEC3EA8 8
+. 7C BA CA 15 40 81 00 14
+
+. 0 FEC3EC0 40
+. 8B 5B 00 46 7F 84 E3 78 7F 63 DB 78 7F A5 EB 78 7F 4B 07 74 7F 2B DA 14 83 99 00 98 83 1C 00 1C 7F 09 03 A6 4E 80 04 21
+
+. 0 FEEA6D8 8
+. 34 1F FF FF 41 80 00 1C
+
+. 0 FEEA6E0 24
+. 7F E9 03 A6 88 FB 00 00 3B 7B 00 01 98 E3 00 00 38 63 00 01 42 00 FF F0
+
+. 0 FEEA6F8 20
+. 90 7D 00 14 7F 3F D0 50 7C 79 C2 14 2F 83 00 00 40 9E 00 74
+
+. 0 FEC3EE8 8
+. 7F 83 E8 00 40 9E 0A 08
+
+. 0 FEC3EF0 8
+. 7E 94 EA 14 4B FF F6 CC
+
+. 0 FEC35C0 24
+. 80 FF 05 68 83 BF 04 D8 2D 87 00 00 3B 9D 00 01 93 9F 04 D8 40 8E 07 08
+
+. 0 FEC35D8 24
+. 83 3F 04 4C 38 9F 04 40 39 D9 00 01 7D C3 73 78 91 DF 04 4C 48 01 71 9D
+
+. 0 FEC35F0 44
+. 8A 5B 00 46 7C AE 18 50 90 7F 04 4C 7E 40 07 74 7D C4 73 78 7D 00 DA 14 7F 63 DB 78 82 08 00 98 81 50 00 1C 7D 49 03 A6 4E 80 04 21
+
+. 0 FEC361C 16
+. 80 9F 04 4C 7C EE 20 50 7C 03 38 00 40 82 33 70
+
+. 0 FEC362C 16
+. 8B 44 00 00 7E 94 1A 14 2E 1A 00 00 40 92 FB 6C
+
+. 0 FEC363C 4
+. 4B FF FC A8
+
+. 0 10015720 8
+. 38 60 00 0A 48 01 38 09
+
+. 0 10028F2C 8
+. 39 60 00 1C 4B FF FF 9C
+
+. 0 FEE0CD4 12
+. 94 21 FF E0 7C 88 02 A6 48 0E 71 75
+
+. 0 FEE0CE0 52
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 90 81 00 24 93 E1 00 1C 83 7E 1C 24 93 A1 00 14 7C 7D 1B 78 83 FB 00 00 93 81 00 10 80 1F 00 00 70 09 80 00 40 82 00 54
+
+. 0 FEE0D14 20
+. 80 7F 00 48 3B 82 8B A0 80 A3 00 08 7F 85 E0 00 41 9E 00 34
+
+. 0 FEE0D28 20
+. 39 60 00 00 38 C0 00 01 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEE0D3C 8
+. 7C C0 19 2D 40 A2 FF F0
+
+. 0 FEE0D30 12
+. 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEE0D44 12
+. 4C 00 01 2C 2C 89 00 00 40 86 00 B4
+
+. 0 FEE0D50 40
+. 80 7F 00 48 93 83 00 08 81 03 00 04 38 E8 00 01 90 E3 00 04 80 7B 00 00 81 23 00 14 81 43 00 18 7F 09 50 40 40 98 00 7C
+
+. 0 FEE0D78 28
+. 39 89 00 01 9B A9 00 00 57 BC 06 3E 91 83 00 14 80 7F 00 00 70 69 80 00 40 82 00 38
+
+. 0 FEE0D94 24
+. 80 7F 00 48 83 A3 00 04 39 3D FF FF 2F 09 00 00 91 23 00 04 40 9A 00 20
+
+. 0 FEE0DAC 20
+. 91 23 00 08 7C 00 04 AC 7C 00 18 28 7D 20 19 2D 40 A2 FF F8
+
+. 0 FEE0DC0 8
+. 2F 80 00 01 41 9D 00 44
+
+. 0 FEE0DC8 40
+. 80 81 00 24 7F 83 E3 78 83 61 00 0C 83 81 00 10 7C 88 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10015728 8
+. 80 7D 00 00 48 01 37 F1
+
+. 0 10028F1C 8
+. 39 60 00 14 4B FF FF AC
+
+. 0 FEDBE1C 32
+. 94 21 FF E0 7C 08 02 A6 93 E1 00 1C 7C 7F 1B 79 93 A1 00 14 93 C1 00 18 90 01 00 24 41 82 01 78
+
+. 0 FEDBE3C 12
+. 80 7F 00 00 70 69 80 00 40 82 00 54
+
+. 0 FEDBE48 20
+. 80 7F 00 48 3B A2 8B A0 80 83 00 08 7F 84 E8 00 41 9E 00 34
+
+. 0 FEDBE5C 20
+. 39 60 00 00 38 A0 00 01 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEDBE70 8
+. 7C A0 19 2D 40 A2 FF F0
+
+. 0 FEDBE64 12
+. 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEDBE78 12
+. 4C 00 01 2C 2C 89 00 00 40 86 00 C0
+
+. 0 FEDBE84 52
+. 80 7F 00 48 93 A3 00 08 80 E3 00 04 38 C7 00 01 90 C3 00 04 88 1F 00 46 7F E3 FB 78 7C 1D 07 74 7D 9D FA 14 81 4C 00 98 81 0A 00 30 7D 09 03 A6 4E 80 04 21
+
+. 0 FEE9AB0 12
+. 7C 08 02 A6 94 21 FF E0 48 0D E3 99
+
+. 0 FEE9ABC 44
+. 93 A1 00 14 93 C1 00 18 3B A0 00 00 90 01 00 24 7F C8 02 A6 80 A3 00 14 80 83 00 10 93 E1 00 1C 7C 7F 1B 78 7F 85 20 40 40 9D 00 50
+
+. 0 FEE9AE8 12
+. 80 C3 00 60 2C 06 00 00 40 81 00 C4
+
+. 0 FEE9BB4 8
+. 7C A4 28 50 4B FF F1 71
+
+. 0 FEE8D58 12
+. 80 83 00 00 70 89 10 00 41 82 00 C0
+
+. 0 FEE8E20 16
+. 80 03 00 08 80 A3 00 10 7F 80 28 00 41 BE FF 48
+
+. 0 FEE8D74 40
+. 89 1F 00 46 7F 84 E3 78 7F E3 FB 78 7F 65 DB 78 7D 07 07 74 7D 27 FA 14 81 69 00 98 83 AB 00 3C 7F A9 03 A6 4E 80 04 21
+
+. 0 FEEA570 56
+. 2F 85 00 00 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C BB 2B 78 93 81 00 10 7C 7C 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C BF 2B 78 93 C1 00 18 90 01 00 24 40 9D 00 3C
+
+. 0 FEEA5A8 20
+. 80 7C 00 3C 7F A4 EB 78 7F E5 FB 78 70 69 00 02 40 82 00 7C
+
+. 0 FEEA5BC 16
+. 80 7C 00 38 7F A4 EB 78 7F E5 FB 78 48 05 7A 11
+
+. 0 FF41FD8 12
+. 81 42 8B A0 2C 0A 00 00 40 82 00 14
+
+. 0 FF41FE4 8
+. 38 00 00 04 44 00 00 02
+
+PASS: fft_complex_bitreverse_order, n = 1
+. 0 FF41FEC 4
+. 4C A3 00 20
+
+. 0 FEEA5CC 12
+. 2C 83 00 00 7F BD 1A 14 41 84 00 74
+
+. 0 FEEA5D8 8
+. 7F E3 F8 51 41 81 FF CC
+
+. 0 FEEA5E0 16
+. 80 DC 00 50 7C 7F D8 50 2F 06 00 00 41 98 00 24
+
+. 0 FEEA610 36
+. 80 E1 00 24 83 61 00 0C 83 81 00 10 7C E8 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEE8D9C 32
+. 7C 7D 1B 78 A0 7F 00 44 30 1D FF FF 7C 80 E9 10 7D 43 00 D0 55 40 0F FE 7C 0B 20 39 40 82 00 CC
+
+. 0 FEE8DBC 36
+. 83 9F 00 60 81 3F 00 1C 2F 9C 00 00 91 3F 00 0C 91 3F 00 04 91 3F 00 08 91 3F 00 14 91 3F 00 10 40 9D 00 94
+
+. 0 FEE8E70 12
+. 80 BF 00 00 70 AB 02 02 40 A2 FF 6C
+
+. 0 FEE8E7C 8
+. 81 3F 00 20 4B FF FF 64
+
+. 0 FEE8DE4 20
+. 91 3F 00 18 7F A5 EB 78 7C 85 D8 00 38 60 FF FF 40 A6 00 08
+
+. 0 FEE9BBC 4
+. 4B FF FF 50
+
+. 0 FEE9B0C 8
+. 2C 83 00 00 41 86 00 24
+
+. 0 FEE9B34 16
+. 80 7F 00 04 80 9F 00 08 7C 04 18 51 40 82 00 34
+
+. 0 FEE9B44 48
+. 38 80 FF FF 38 60 FF FF 90 9F 00 50 90 7F 00 54 7F A3 EB 78 83 A1 00 24 83 C1 00 18 7F A8 03 A6 83 E1 00 1C 83 A1 00 14 38 21 00 20 4E 80 00 20
+
+. 0 FEDBEB8 24
+. 80 BF 00 00 21 23 00 00 7C 69 19 14 3B A3 FF FF 70 A9 80 00 40 82 00 1C
+
+. 0 FEDBED0 24
+. 80 7F 00 48 81 23 00 04 39 29 FF FF 2C 89 00 00 91 23 00 04 41 86 00 24
+
+. 0 FEDBF08 20
+. 91 23 00 08 7C 00 04 AC 7C C0 18 28 7D 20 19 2D 40 A2 FF F8
+
+. 0 FEDBF1C 8
+. 2F 06 00 01 40 99 FF C8
+
+. 0 FEDBEE8 32
+. 7F A3 EB 78 80 E1 00 24 83 A1 00 14 83 C1 00 18 7C E8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10015730 28
+. 80 01 00 94 83 A1 00 84 83 C1 00 88 7C 08 03 A6 83 E1 00 8C 38 21 00 90 4E 80 00 20
+
+. 0 100014F0 8
+. 7F A3 EB 78 48 02 7A 91
+
+. 0 10028F84 8
+. 39 60 00 48 4B FF FF 44
+
+. 0 FEEF490 12
+. 94 21 FF E0 7C 08 02 A6 48 0D 89 B9
+
+. 0 FEEF49C 44
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 90 01 00 24 7C 7C 1B 78 93 A1 00 14 81 3E 1D 88 93 E1 00 1C 80 09 00 00 2F 80 00 00 41 9E 00 34
+
+. 0 FEEF4F8 8
+. 2C 03 00 00 41 A2 FF DC
+
+. 0 FEEF500 16
+. 39 03 FF F8 80 88 00 04 70 80 00 02 40 82 00 88
+
+. 0 FEEF510 12
+. 70 80 00 04 83 FE 05 FC 40 82 00 70
+
+. 0 FEEF51C 20
+. 3B A0 00 00 38 00 00 01 7D 00 F8 28 7C 08 E8 00 40 82 00 0C
+
+. 0 FEEF530 8
+. 7C 00 F9 2D 40 A2 FF F0
+
+. 0 FEEF524 12
+. 7D 00 F8 28 7C 08 E8 00 40 82 00 0C
+
+. 0 FEEF538 12
+. 4C 00 01 2C 2C 88 00 00 40 86 00 90
+
+. 0 FEEF544 12
+. 7F 84 E3 78 7F E3 FB 78 4B FF F6 4D
+
+. 0 FEEEB98 24
+. 7C A8 02 A6 94 21 FF A0 93 E1 00 5C 3B E4 FF F8 93 A1 00 54 48 0D 92 A5
+
+. 0 FEEEBB0 84
+. 90 A1 00 64 81 3F 00 04 93 81 00 50 7C 7C 1B 78 55 3D 00 38 93 C1 00 58 7C 1D 00 D0 92 21 00 24 7F 9F 00 40 92 41 00 28 92 61 00 2C 7F C8 02 A6 92 81 00 30 92 A1 00 34 92 C1 00 38 92 E1 00 3C 93 01 00 40 93 21 00 44 93 41 00 48 93 61 00 4C 41 9D 02 A0
+
+. 0 FEEEC04 8
+. 73 E0 00 07 40 82 02 98
+
+. 0 FEEEC0C 12
+. 81 63 00 04 7F 0B E8 40 40 98 01 DC
+
+. 0 FEEEDF0 16
+. 7C DF EA 14 81 26 00 04 2B 89 00 08 40 9D 01 54
+
+. 0 FEEEE00 16
+. 81 03 04 4C 55 2A 00 38 7C 8A 40 40 40 84 01 44
+
+. 0 FEEEE10 28
+. 57 AA F8 7A 55 6C 00 3C 7D 2A 18 2E 91 83 00 04 7C 09 F8 00 83 7E 06 64 41 82 00 5C
+
+. 0 FEEEE2C 84
+. 91 3F 00 08 7F EA 19 2E 83 A1 00 64 82 21 00 24 7F A8 03 A6 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+. 0 FEEF550 16
+. 7C 00 04 AC 7F 80 F8 28 7F A0 F9 2D 40 A2 FF F8
+
+. 0 FEEF560 8
+. 2F 1C 00 01 40 99 FF 74
+
+. 0 FEEF4D8 32
+. 80 A1 00 24 83 81 00 10 83 A1 00 14 7C A8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 100014F8 8
+. 7F E3 FB 78 48 02 7A 89
+
+. 0 10028F84 4
+. 4B EC 65 0C
+
+. 0 10001500 8
+. 7F 43 D3 78 48 02 7A 81
+
+. 0 10001508 40
+. 80 01 00 34 83 41 00 18 83 61 00 1C 7C 08 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 100046B0 12
+. 7F A3 EB 78 7F E4 FB 78 4B FF CE 79
+
+. 0 10001530 100
+. 94 21 FF B0 7C 08 02 A6 93 81 00 38 7F 84 19 D6 93 A1 00 3C 93 41 00 30 7C 7A 1B 78 93 E1 00 44 3B E0 00 00 7D 80 00 26 57 9D 20 36 7F A3 EB 78 90 01 00 54 91 81 00 18 57 9C 08 3C 92 E1 00 24 93 01 00 28 93 21 00 2C 93 61 00 34 93 C1 00 40 7C 9E 23 78 DB E1 00 48 92 A1 00 1C 92 C1 00 20 48 02 79 A5
+
+. 0 10028F34 4
+. 4B EC 89 AC
+
+. 0 FEEF68C 24
+. 81 5D 00 04 3B 9D 00 08 55 48 E8 FE 38 E8 FF FE 7E 07 00 00 40 92 06 20
+
+. 0 FEEF6A4 112
+. 82 3D 00 08 7F 88 E3 78 92 3B 00 00 83 81 00 74 7D 03 43 78 81 81 00 24 7F 88 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 10001594 12
+. 7C 7B 1B 78 7F A3 EB 78 48 02 79 99
+
+. 0 100015A0 12
+. 7C 79 1B 78 7F A3 EB 78 48 02 79 8D
+
+. 0 100015AC 12
+. 7C 78 1B 78 7F A3 EB 78 48 02 79 81
+
+. 0 100015B8 12
+. 2B 9C 00 00 7C 77 1B 78 40 9D 00 68
+
+. 0 100015C4 100
+. 3D 60 10 02 3D 20 10 02 C9 0B 81 00 3D 60 10 02 39 6B 80 F8 C8 E9 80 E8 7F 89 03 A6 3D 20 10 02 C9 4B 00 00 C9 29 81 08 3D 20 43 30 93 E1 00 0C 57 E0 18 38 91 21 00 08 3B FF 00 01 C8 01 00 08 FC 00 50 28 FD A0 48 2A 7C 1B 05 AE FD 80 38 2A FD 60 40 2A 7D B7 05 AE 7D 99 05 AE 7D 78 05 AE 42 00 FF CC
+
+. 0 100015F0 56
+. 93 E1 00 0C 57 E0 18 38 91 21 00 08 3B FF 00 01 C8 01 00 08 FC 00 50 28 FD A0 48 2A 7C 1B 05 AE FD 80 38 2A FD 60 40 2A 7D B7 05 AE 7D 99 05 AE 7D 78 05 AE 42 00 FF CC
+
+. 0 10001628 8
+. 38 60 00 00 48 01 3D C9
+
+. 0 100153F4 68
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 83 C1 00 08 80 09 00 00 90 69 00 00 7C 03 03 78 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10001630 20
+. 7F C3 F3 78 7F 44 D3 78 7F 65 DB 78 7F 06 C3 78 48 00 39 D9
+
+. 0 10005018 60
+. 94 21 FF D0 7C 08 02 A6 93 61 00 1C 7C 7B 1B 79 93 21 00 14 7C D9 33 78 93 41 00 18 7C 9A 23 78 93 A1 00 24 7C BD 2B 78 93 81 00 20 93 C1 00 28 93 E1 00 2C 90 01 00 34 41 82 00 78
+
+. 0 10005054 8
+. 2F 9B 00 00 41 9E 00 30
+
+. 0 1000505C 16
+. 3B C0 00 00 54 9C 20 36 7F 7F DB 78 4B FF F7 25
+
+. 0 1000478C 92
+. 94 21 FF F0 3D 40 10 03 81 2A 8E 78 3C 00 41 C6 60 00 4E 6D 3D 60 43 30 7D 29 01 D6 91 61 00 08 3D 60 10 02 39 6B 80 F8 C8 0B 00 00 39 29 30 39 55 29 00 7E 91 21 00 0C C9 A1 00 08 38 21 00 10 91 2A 8E 78 3D 20 10 02 FD AD 00 28 39 29 81 30 C8 29 00 00 FC 2D 00 72 4E 80 00 20
+
+. 0 1000506C 8
+. 7C 3E ED AE 4B FF F7 1D
+
+. 0 10005074 20
+. 37 FF FF FF 7D 3E EA 14 7F DE E2 14 D8 29 00 08 40 82 FF E4
+
+. 0 10005088 20
+. 7F A3 EB 78 7F 44 D3 78 7F 65 DB 78 7F 26 CB 78 48 00 0E 89
+
+. 0 10005F20 20
+. 7C 08 02 A6 94 21 FF F0 38 E0 FF FF 90 01 00 14 4B FF FD 19
+
+. 0 10005C48 208
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 50 7F C8 02 A6 6C E7 80 00 90 01 00 94 90 E1 00 0C 80 1E FF F0 90 A1 00 14 7F C0 F2 14 3C 00 43 30 81 3E 80 00 90 01 00 08 C8 09 00 00 C9 A1 00 08 81 3E 80 08 FD AD 00 28 90 01 00 10 C9 89 00 00 81 3E 80 04 FD AD 68 2A 93 21 00 3C C8 09 00 00 3B 20 00 00 7F 99 28 40 DB 41 00 60 FD AD 00 32 C8 01 00 10 92 A1 00 2C 7C D5 33 78 FC 00 60 28 93 41 00 40 93 61 00 44 7C 7A 1B 78 DB 21 00 58 7C BB 2B 78 DB 61 00 68 FF 4D 00 24 DB 81 00 70 DB A1 00 78 DB C1 00 80 DB E1 00 88 92 61 00 24 92 81 00 28 92 C1 00 30 92 E1 00 34 93 01 00 38 93 81 00 48 93 A1 00 4C 93 E1 00 54 40 9C 00 C0
+
+. 0 10005D18 40
+. 54 94 20 36 82 7E 80 0C FF 20 60 90 3A C0 00 00 7E 98 A3 78 2F 9B 00 00 CB 93 00 00 3B A0 00 00 FF A0 E0 90 41 9E 00 7C
+
+. 0 10005D40 44
+. 3E E0 43 30 FF 60 C8 90 3B 80 00 00 7F 7F DB 78 93 A1 00 14 92 E1 00 10 CB E1 00 10 FF FF D8 28 FF FF 06 B2 FC 20 F8 90 48 02 31 8D
+
+. 0 10028EF4 8
+. 39 60 00 00 4B FF FF D4
+
+. 0 FE0DF50 32
+. 94 21 FF 90 3C 60 3E 3F D8 21 00 30 60 60 FF FF 81 21 00 30 81 41 00 34 7C 88 02 A6 48 06 51 45
+
+. 0 FE0DF70 48
+. DB E1 00 68 55 29 00 7E 93 C1 00 50 7F 89 00 00 DB A1 00 58 DB C1 00 60 7F C8 02 A6 93 A1 00 4C FF E0 08 90 93 E1 00 54 90 81 00 74 40 9D 07 30
+
+. 0 FE0E6CC 12
+. 81 3E 10 0C C8 29 00 00 4B FF FA B8
+
+. 0 FE0E18C 40
+. 80 81 00 74 83 A1 00 4C 83 C1 00 50 7C 88 03 A6 83 E1 00 54 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 10005D6C 12
+. FF C0 08 90 FC 20 F8 90 48 02 32 21
+
+. 0 10028F94 8
+. 39 60 00 50 4B FF FF 34
+
+. 0 FE0C884 36
+. 94 21 FF 60 3C 60 3E 4F D8 21 00 30 60 60 FF FF 81 21 00 30 81 41 00 34 7D 80 00 26 7C 88 02 A6 48 06 68 0D
+
+. 0 FE0C8A8 88
+. 55 2B 00 7E DB E1 00 98 7F 8B 00 00 93 C1 00 68 DB 41 00 70 7F C8 02 A6 DB 61 00 78 FF E0 08 90 DB 81 00 80 FD 60 08 90 DB A1 00 88 DB C1 00 90 93 01 00 50 93 21 00 54 93 41 00 58 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 90 81 00 A4 91 81 00 4C 40 9D 01 60
+
+. 0 FE0CA5C 84
+. 83 01 00 A4 FC 20 58 90 80 81 00 4C 7F 08 03 A6 83 21 00 54 83 01 00 50 7C 80 81 20 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C CB 41 00 70 CB 61 00 78 CB 81 00 80 CB A1 00 88 CB C1 00 90 CB E1 00 98 38 21 00 A0 4E 80 00 20
+
+. 0 10005D78 64
+. 7D 7D CA 14 7C 0B DB 96 7D 3C D2 14 7C 1C D4 AE 37 FF FF FF C9 89 00 08 7F 9C C2 14 FD A1 03 32 FC 21 00 32 FC 1E 68 38 FF DE 0B 3A FF 9C 00 2A FF BD F0 2A 7C 00 D9 D6 7F A0 58 50 40 82 FF 9C
+
+. 0 10005DB8 28
+. 3B 39 00 01 7D 36 AA 14 7F 99 D8 40 7F 96 AD AE DB A9 00 08 7E D6 A2 14 41 9C FF 5C
+
+. 0 10005DD4 100
+. 80 01 00 94 38 60 00 00 82 61 00 24 82 81 00 28 7C 08 03 A6 82 A1 00 2C 82 C1 00 30 82 E1 00 34 83 01 00 38 83 21 00 3C 83 41 00 40 83 61 00 44 83 81 00 48 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB 21 00 58 CB 41 00 60 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10005F34 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 1000509C 44
+. 80 01 00 34 83 21 00 14 83 41 00 18 7C 08 03 A6 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 10001644 8
+. 2F 9E 00 00 41 9E 00 30
+
+. 0 1000164C 44
+. 7F C9 03 A6 38 00 00 00 57 4B 20 36 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 CA 14 7C 19 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+. 0 10001678 16
+. 7F 63 DB 78 7F 44 D3 78 7F C5 F3 78 48 00 7B 49
+
+. 0 100091CC 20
+. 7C 08 02 A6 94 21 FF F0 38 C0 FF FF 90 01 00 14 4B FF FD 09
+
+. 0 10008EE4 124
+. 94 21 FF 90 7C 08 02 A6 42 9F 00 05 93 C1 00 40 7F C8 02 A6 2F 85 00 01 90 01 00 74 93 41 00 30 7C DA 33 78 80 1E FF F0 93 81 00 38 7C BC 2B 78 93 A1 00 3C 7F C0 F2 14 93 E1 00 44 7C 9D 23 78 7C 7F 1B 78 DB 61 00 48 DB 81 00 50 38 60 00 00 DB A1 00 58 DB C1 00 60 DB E1 00 68 92 81 00 18 92 A1 00 1C 92 C1 00 20 92 E1 00 24 93 01 00 28 93 21 00 2C 93 61 00 34 41 9E 02 00
+
+. 0 1000915C 84
+. 80 01 00 74 82 81 00 18 82 A1 00 1C 7C 08 03 A6 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB 61 00 48 CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 100091E0 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10001688 8
+. 2F 9E 00 00 41 9E 00 30
+
+. 0 10001690 44
+. 7F C9 03 A6 38 00 00 00 57 4B 20 36 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 BA 14 7C 17 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+. 0 100016BC 52
+. 3E A0 10 02 2A 1A 00 01 CB F5 81 10 3C A0 10 01 3C 60 10 01 38 A5 69 3C FC 20 F8 90 7F 04 C3 78 7F 66 DB 78 38 63 69 4C 7F 47 D3 78 7F C8 F3 78 4B FF F1 2D
+
+. 0 100016F0 24
+. 3C 80 10 01 38 84 6C 58 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3E E5
+
+. 0 10028F6C 4
+. 4B EA 1E 64
+
+. 0 FEEA6E4 20
+. 88 FB 00 00 3B 7B 00 01 98 E3 00 00 38 63 00 01 42 00 FF F0
+
+. 0 10028F44 4
+. 4B E9 9F 90
+
+. 0 FEF89E0 20
+. 81 64 00 00 38 A5 FF FF 81 44 00 04 38 84 00 08 4B FF FF 54
+
+. 0 FEC31A4 104
+. 3B 04 00 01 39 60 00 00 93 1F 04 4C 3A 20 FF FF 8B 04 00 01 3A 00 00 20 39 FF 03 F8 91 7F 04 EC 3A 58 FF E0 91 7F 04 F0 2A 12 00 5A 91 7F 04 F4 91 7F 04 F8 38 00 00 00 91 7F 04 FC 91 7F 05 00 91 7F 05 04 91 7F 05 08 91 7F 05 0C 91 7F 05 10 92 3F 05 14 91 7F 05 18 92 1F 05 1C 91 7F 05 68 91 FF 04 D4 41 91 00 20
+
+. 0 10028F2C 4
+. 4B EB 7D A8
+
+. 0 10028F1C 4
+. 4B EB 2F 00
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 1, stride = 1
+. 0 10001708 4
+. 41 91 03 68
+
+. 0 1000170C 16
+. 7F 44 D3 78 7F C5 F3 78 7F 63 DB 78 48 00 7B 01
+
+. 0 10009218 64
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 38 C0 00 01 7C 7F 1B 78 93 81 00 10 80 1E FF F0 7C 9C 23 78 93 A1 00 14 7C BD 2B 78 7F C0 F2 14 4B FF FC 91
+
+. 0 10009258 8
+. 2C 03 00 00 40 82 00 64
+
+. 0 10009260 48
+. 81 3E 80 9C 3C 00 43 30 90 01 00 08 7F 83 E8 40 93 A1 00 0C C8 09 00 00 C9 A1 00 08 81 3E 80 A0 FD AD 00 28 C8 09 00 00 FD 80 68 24 40 9C 00 34
+
+. 0 10009290 48
+. 7F A9 03 A6 57 84 20 36 38 00 00 00 7C 1F 04 AE 7D 20 FA 14 FC 00 03 32 7C 1F 05 AE 7C 00 22 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+. 0 100092C0 32
+. 80 01 00 24 83 81 00 10 83 A1 00 14 7C 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 1000171C 40
+. FC 20 F8 90 3F E0 10 01 3C A0 10 01 38 A5 69 98 7F 24 CB 78 7F 66 DB 78 38 7F 69 90 7F 47 D3 78 7F C8 F3 78 4B FF F0 D9
+
+. 0 10001744 24
+. 3C 80 10 01 38 84 6C A0 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3E 91
+
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 1, stride = 1
+. 0 1000175C 4
+. 41 91 02 E4
+
+. 0 10001760 16
+. 7E E3 BB 78 7F 44 D3 78 7F C5 F3 78 48 00 7A 85
+
+. 0 100091F0 20
+. 7C 08 02 A6 94 21 FF F0 38 C0 00 01 90 01 00 14 4B FF FC E5
+
+. 0 10009204 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10001770 8
+. 2F 9E 00 00 41 9E 00 54
+
+. 0 10001778 80
+. 3C 00 43 30 3D 20 10 02 90 01 00 08 39 29 80 F8 93 C1 00 0C 7F C9 03 A6 C9 A1 00 08 38 00 00 00 C8 09 00 00 57 4B 20 36 FD 8D 00 28 7C 19 04 AE 7D 20 CA 14 FC 00 03 32 7C 19 05 AE 7C 00 5A 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+. 0 100017C8 40
+. CB F5 81 10 3C A0 10 01 38 A5 69 E4 7F 24 CB 78 FC 20 F8 90 7E E6 BB 78 7F 47 D3 78 7F C8 F3 78 38 7F 69 90 4B FF F0 2D
+
+. 0 100017F0 24
+. 3C 80 10 01 38 84 6C E8 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3D E5
+
+. 0 FEF8088 24
+. 7C A9 03 A6 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 1, stride = 1
+. 0 10001808 8
+. 2B 9A 00 01 41 9D 02 04
+
+. 0 10001810 52
+. 3D 20 10 02 3D 60 10 02 39 29 80 F0 39 6B 81 18 C8 49 00 00 7F 66 DB 78 C8 2B 00 00 7F 07 C3 78 7F C4 F3 78 7F 45 D3 78 38 60 00 01 3F 80 10 01 48 00 2F A9
+
+. 0 100047E8 96
+. 94 21 FF 90 7C 08 02 A6 93 C1 00 30 7C 9E 23 79 DB 81 00 50 FF 80 08 90 DB A1 00 58 FF A0 10 90 93 21 00 1C 7C F9 3B 78 93 61 00 24 7C 7B 1B 78 DB 21 00 38 DB 41 00 40 DB 61 00 48 DB C1 00 60 DB E1 00 68 93 01 00 18 93 41 00 20 93 81 00 28 93 A1 00 2C 93 E1 00 34 90 01 00 74 41 82 01 58
+
+. 0 10004848 8
+. 2F 9E 00 00 41 9E 00 3C
+
+. 0 10004850 56
+. 7F C9 03 A6 39 60 00 00 39 80 00 00 38 00 00 00 54 AA 20 36 7D 20 32 14 7D 66 01 2E 38 C6 00 04 7D 86 01 2E 38 C6 FF FC 91 69 00 08 91 89 00 0C 7C 00 52 14 42 00 FF E0
+
+. 0 10004888 40
+. 7C 1B F3 96 2F 9E 00 00 7C 00 F1 D6 7C 00 D8 50 7C 00 29 D6 54 00 20 36 7D 20 32 14 7F 86 05 AE DB A9 00 08 41 9E 00 A0
+
+. 0 100048B0 104
+. 3F 00 43 30 3D 20 10 02 39 29 80 F8 93 C1 00 14 93 01 00 10 54 BA 20 36 CB 29 00 00 3D 20 10 02 C8 01 00 10 3B 80 00 00 CB 49 81 38 3B A0 00 00 FF 60 C8 28 7F DF F3 78 7C 1D F3 96 93 01 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD DA 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF D8 24 FC 20 F8 90 48 02 45 E1
+
+. 0 10028EF4 4
+. 4B DE 50 5C
+
+. 0 10004918 12
+. FF C0 08 90 FC 20 F8 90 48 02 46 75
+
+. 0 10028F94 4
+. 4B DE 38 F0
+
+. 0 10004924 40
+. 37 FF FF FF FC 01 07 32 7D 3C CA 14 FC 21 07 72 FC 1E 07 7A FF DE 0F 38 7F DC CD AE 7F 9C D2 14 D8 09 00 08 40 82 FF A0
+
+. 0 1000494C 80
+. 38 60 00 00 80 01 00 74 83 01 00 18 83 21 00 1C 7C 08 03 A6 83 41 00 20 83 61 00 24 83 81 00 28 83 A1 00 2C 83 C1 00 30 83 E1 00 34 CB 21 00 38 CB 41 00 40 CB 61 00 48 CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 10001844 16
+. 7F 44 D3 78 7F C5 F3 78 7F 63 DB 78 48 00 79 7D
+
+. 0 10001854 40
+. FC 20 F8 90 3C A0 10 01 7F 47 D3 78 7F C8 F3 78 38 A5 6A 40 7F 04 C3 78 7F 66 DB 78 38 7C 6A 34 3B A0 00 00 4B FF EF A1
+
+. 0 1000187C 24
+. 3C 80 10 01 38 84 6D 30 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3D 59
+
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 1, stride = 1
+. 0 10001894 44
+. 3D 20 10 02 3D 60 10 02 39 29 80 F0 39 6B 81 18 C8 49 00 00 C8 2B 00 00 7F 06 C3 78 7F 44 D3 78 7F 65 DB 78 7F C3 F3 78 48 00 31 05
+
+. 0 100049C0 20
+. 2C 03 00 00 7C 08 02 A6 94 21 FF F0 90 01 00 14 41 82 00 B0
+
+. 0 100049D4 8
+. 2F 83 00 00 41 9E 00 24
+
+. 0 100049DC 32
+. 7C 69 03 A6 38 00 00 00 54 8B 20 36 7D 20 2A 14 7C 25 05 AE D8 49 00 08 7C 00 5A 14 42 00 FF F0
+
+. 0 100049FC 8
+. 2F 83 00 00 41 9E 00 3C
+
+. 0 10004A04 56
+. 7C 69 03 A6 54 84 20 36 39 60 00 00 39 80 00 00 38 00 00 00 7D 20 32 14 7D 66 01 2E 38 C6 00 04 7D 86 01 2E 38 C6 FF FC 91 69 00 08 91 89 00 0C 7C 00 22 14 42 00 FF E0
+
+. 0 10004A3C 68
+. 3C 00 43 30 3D 20 10 02 90 61 00 0C 39 29 80 F8 90 01 00 08 38 60 00 00 C8 01 00 08 C9 A9 00 00 80 01 00 14 38 21 00 10 FC 00 68 28 7C 08 03 A6 FD A0 00 B2 FC 00 00 72 D9 A6 00 08 D8 06 00 00 4E 80 00 20
+
+. 0 100018C0 16
+. 7F 44 D3 78 7F C5 F3 78 7F 63 DB 78 48 00 79 01
+
+. 0 100018D0 36
+. FC 20 F8 90 3C A0 10 01 7F 04 C3 78 38 A5 6A 90 7F 66 DB 78 7F 47 D3 78 7F C8 F3 78 38 7C 6A 34 4B FF EF 29
+
+. 0 100018F4 24
+. 3C 80 10 01 38 84 6D 78 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3C E1
+
+. 0 FEF808C 20
+. 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 1, stride = 1
+. 0 1000190C 12
+. 7F 9D F0 40 3B E0 00 00 40 9C 00 78
+
+. 0 10001918 52
+. 3E C0 10 01 3D 20 10 02 3D 60 10 02 39 29 81 18 39 6B 80 F0 C8 29 00 00 7F 66 DB 78 C8 4B 00 00 7F 07 C3 78 7F E3 FB 78 7F C4 F3 78 7F 45 D3 78 48 00 31 69
+
+. 0 10004AB0 112
+. 94 21 FF 80 7C 08 02 A6 93 C1 00 40 7C 9E 23 79 DB 81 00 60 FF 80 08 90 DB A1 00 68 FF A0 10 90 92 E1 00 24 7C B7 2B 78 93 21 00 2C 7C D9 33 78 93 41 00 30 7C 7A 1B 78 93 61 00 34 7C FB 3B 78 DB 21 00 48 DB 41 00 50 DB 61 00 58 DB C1 00 70 DB E1 00 78 92 C1 00 20 93 01 00 28 93 81 00 38 93 A1 00 3C 93 E1 00 44 90 01 00 84 41 82 01 90
+
+. 0 10004B20 8
+. 2F 9E 00 00 41 9E 00 A0
+
+. 0 10004B28 104
+. 3E C0 43 30 3D 20 10 02 39 29 80 F8 93 C1 00 14 92 C1 00 10 3B 80 00 00 CB 29 00 00 3D 20 10 02 C8 01 00 10 3B A0 00 00 CB 49 81 40 54 B8 20 36 FF 60 C8 28 7F DF F3 78 7C 1D F3 96 92 C1 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD D2 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF D8 24 FC 20 F8 90 48 02 43 69
+
+. 0 10004B90 12
+. FF C0 08 90 FC 20 F8 90 48 02 43 FD
+
+. 0 10004B9C 40
+. 37 FF FF FF FC 01 07 32 7D 3C CA 14 FC 21 07 72 FC 1E 07 7A FF DE 0F 38 7F DC CD AE 7F 9C C2 14 D8 09 00 08 40 82 FF A0
+
+. 0 10004BC4 8
+. 2F 9E 00 00 41 9E 00 3C
+
+. 0 10004BCC 56
+. 7F C9 03 A6 39 60 00 00 39 80 00 00 38 00 00 00 56 EA 20 36 7D 20 DA 14 7D 7B 01 2E 3B 7B 00 04 7D 9B 01 2E 3B 7B FF FC 91 69 00 08 91 89 00 0C 7C 00 52 14 42 00 FF E0
+
+. 0 10004C04 8
+. 2F 9A 00 00 40 9D 00 C8
+
+. 0 10004CD0 20
+. 7D 3A F0 50 7C 09 F3 96 7C 00 F1 D6 7C 60 48 50 4B FF FF 38
+
+. 0 10004C18 148
+. 3C 00 43 30 3D 20 10 02 90 01 00 10 39 29 80 F8 93 C1 00 14 7C 17 19 D6 C9 A1 00 10 38 60 00 00 C8 09 00 00 FD AD 00 28 54 00 20 36 7D 20 DA 14 FC 0D 07 72 FD AD 07 32 7D BB 05 AE D8 09 00 08 80 01 00 84 82 C1 00 20 82 E1 00 24 7C 08 03 A6 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB 21 00 48 CB 41 00 50 CB 61 00 58 CB 81 00 60 CB A1 00 68 CB C1 00 70 CB E1 00 78 38 21 00 80 4E 80 00 20
+
+. 0 1000194C 16
+. 7F 44 D3 78 7F C5 F3 78 7F 63 DB 78 48 00 78 75
+
+. 0 1000195C 36
+. C8 35 81 10 3B FF 00 01 38 7C 6A 34 7F 04 C3 78 38 B6 6A E4 7F 66 DB 78 7F 47 D3 78 7F C8 F3 78 4B FF EE 9D
+
+. 0 10001980 12
+. 7F 9F F0 40 7F BD 1B 78 41 9C FF 94
+
+. 0 1000198C 28
+. 3C 80 10 01 7F C5 F3 78 7F 46 D3 78 38 84 6D C4 7F A3 EB 78 4C C6 31 82 48 01 3C 45
+
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 1, stride = 1
+. 0 100019A8 8
+. 7F 63 DB 78 48 02 75 D9
+
+. 0 10028F84 4
+. 4B EC 65 0C
+
+. 0 100019B0 8
+. 7F 23 CB 78 48 02 75 D1
+
+. 0 100019B8 8
+. 7F 03 C3 78 48 02 75 C9
+
+. 0 100019C0 8
+. 7E E3 BB 78 48 02 75 C1
+
+. 0 100019C8 72
+. 80 01 00 54 81 81 00 18 82 A1 00 1C 7C 08 03 A6 82 C1 00 20 7D 80 81 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 100046BC 12
+. 7F A3 EB 78 7F E4 FB 78 4B FF D7 69
+
+. 0 10001E2C 60
+. 94 21 FF D0 7C 08 02 A6 93 81 00 20 7F 84 19 D6 93 A1 00 24 93 61 00 1C 7C 7B 1B 78 90 01 00 34 57 9D 18 38 93 41 00 18 7F A3 EB 78 93 C1 00 28 93 E1 00 2C 7C 9E 23 78 48 02 70 D1
+
+. 0 10001E68 12
+. 7C 7A 1B 78 7F A3 EB 78 48 02 70 C5
+
+. 0 10001E74 12
+. 7C 7F 1B 78 7F A3 EB 78 48 02 70 B9
+
+. 0 10001E80 16
+. 2B 9C 00 00 7C 7D 1B 78 39 40 00 00 40 9D 00 38
+
+. 0 10001E90 52
+. 3D 60 10 02 7F 89 03 A6 39 6B 80 F8 3D 20 43 30 C9 AB 00 00 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+. 0 10001EC4 16
+. 57 85 18 38 7F 43 D3 78 7F E4 FB 78 48 02 70 A5
+
+. 0 10028F74 4
+. 4B EC F7 64
+
+. 0 FEF8760 8
+. 2C 85 00 00 41 86 00 1C
+
+. 0 FEF8768 24
+. 7C A9 03 A6 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+. 0 FEF876C 20
+. 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+. 0 10001ED4 12
+. 2B 9E 00 01 38 E0 00 00 40 9D 00 18
+
+. 0 10001EF4 12
+. 39 40 00 00 7F 8A F0 40 40 9C 00 54
+
+. 0 10001F00 24
+. 7F E8 FB 78 57 66 18 38 39 60 00 00 7D 49 53 78 7F 8B 38 40 40 9C 00 1C
+
+. 0 10001F30 32
+. 7C 0B D9 D6 39 4A 00 01 7F 8A F0 40 C8 08 00 00 7D 08 32 14 54 00 18 38 7C 1D 05 AE 41 9C FF BC
+
+. 0 10001F50 16
+. 34 DE FF FF 39 60 00 00 39 40 00 00 41 82 00 5C
+
+. 0 10001FB8 44
+. 3D 20 10 02 3C A0 10 01 C8 29 81 10 3C 60 10 01 7F 67 DB 78 7F E6 FB 78 7F C8 F3 78 7F A4 EB 78 38 A5 6B FC 38 63 6C 1C 4B FF EA 69
+
+. 0 10000A48 124
+. 94 21 FF A0 7C 08 02 A6 93 A1 00 3C 7D 1D 43 79 3D 20 10 02 93 01 00 28 39 29 80 F0 93 21 00 2C 93 61 00 34 FD 40 08 90 93 C1 00 40 7C 78 1B 78 93 E1 00 44 7C 9E 23 78 DB A1 00 48 7C B9 2B 78 DB C1 00 50 7C DF 33 78 DB E1 00 58 7C FB 3B 78 92 41 00 10 92 61 00 14 92 81 00 18 92 A1 00 1C 92 C1 00 20 92 E1 00 24 93 41 00 30 93 81 00 38 90 01 00 64 C8 29 00 00 41 82 00 44
+
+. 0 10000AC4 56
+. 3D 20 10 02 7F A9 03 A6 39 29 80 F8 38 00 00 00 C9 69 00 00 54 E9 18 38 7D 9F 04 AE 7D BE 04 AE 7C 00 4A 14 FC 0C 68 28 FC 00 02 10 FF E0 02 F2 FF 9F 08 00 40 9D 00 08
+
+. 0 10000B00 4
+. 42 00 FF DC
+
+. 0 10000B04 12
+. FF 81 50 00 38 60 00 00 40 9C 00 58
+
+. 0 10000B10 84
+. 80 01 00 64 82 41 00 10 82 61 00 14 7C 08 03 A6 82 81 00 18 82 A1 00 1C 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB A1 00 48 CB C1 00 50 CB E1 00 58 38 21 00 60 4E 80 00 20
+
+. 0 10001FE4 20
+. 3C 80 10 01 7F C5 F3 78 38 84 70 08 4C C6 31 82 48 01 35 F5
+
+. 0 FEF8934 8
+. 2C 80 00 02 41 86 00 BC
+
+. 0 FEF893C 8
+. 2F 00 00 03 41 9A 00 8C
+
+. 0 FEF89CC 20
+. 81 64 00 00 38 63 FF F8 81 44 00 04 38 A5 00 01 4B FF FF 90
+
+. 0 FEF896C 56
+. 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+PASS: fft_real_bitreverse_order, n = 1
+. 0 10001FF8 8
+. 7F A3 EB 78 48 02 6F 89
+
+. 0 10002000 8
+. 7F E3 FB 78 48 02 6F 81
+
+. 0 10002008 8
+. 7F 43 D3 78 48 02 6F 79
+
+. 0 10002010 40
+. 80 01 00 34 83 41 00 18 83 61 00 1C 7C 08 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 100046C8 16
+. 7F A3 EB 78 7F E4 FB 78 3B BD 00 01 4B FF D9 65
+
+. 0 10002038 64
+. 94 21 FF D0 7C 08 02 A6 93 41 00 18 7F 44 19 D6 93 61 00 1C 7C 7B 1B 78 90 01 00 34 93 21 00 14 57 43 18 38 93 81 00 20 93 A1 00 24 7C 9C 23 78 57 5D 20 36 93 C1 00 28 93 E1 00 2C 48 02 6E C1
+
+. 0 10002078 12
+. 7C 7F 1B 78 7F A3 EB 78 48 02 6E B5
+
+. 0 10002084 12
+. 7C 7E 1B 78 7F A3 EB 78 48 02 6E A9
+
+. 0 10002090 12
+. 7C 79 1B 78 7F A3 EB 78 48 02 6E 9D
+
+. 0 1000209C 16
+. 2B 9A 00 00 7C 7D 1B 78 39 40 00 00 40 9D 00 38
+
+. 0 100020AC 52
+. 3D 60 10 02 7F 49 03 A6 39 6B 80 F8 3D 20 43 30 C9 AB 00 00 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+. 0 100020E0 16
+. 57 40 08 3C 39 40 00 00 2B 80 00 00 40 9D 00 64
+
+. 0 100020F0 96
+. 3D 60 10 02 3D 20 10 02 C9 2B 81 00 3D 60 10 02 39 6B 80 F8 C9 09 80 E8 7C 09 03 A6 3D 20 10 02 C9 6B 00 00 C9 49 81 08 3D 20 43 30 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 58 28 FD A0 50 2A FD 80 40 2A FC 00 48 2A 7D BD 05 AE 7D 9E 05 AE 7C 19 05 AE 42 00 FF D0
+
+. 0 1000211C 52
+. 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 58 28 FD A0 50 2A FD 80 40 2A FC 00 48 2A 7D BD 05 AE 7D 9E 05 AE 7C 19 05 AE 42 00 FF D0
+
+. 0 10002150 8
+. 38 60 00 00 48 01 32 A1
+
+. 0 10002158 20
+. 7F 64 DB 78 7F C5 F3 78 7F A6 EB 78 7F 83 E3 78 48 00 2F AD
+
+. 0 10005114 68
+. 94 21 FF D0 7C 08 02 A6 93 21 00 14 7C 79 1B 79 92 E1 00 0C 7C D7 33 78 93 01 00 10 7C 98 23 78 93 A1 00 24 7C BD 2B 78 93 41 00 18 93 61 00 1C 93 81 00 20 93 C1 00 28 93 E1 00 2C 90 01 00 34 41 82 00 88
+
+. 0 10005158 8
+. 2F 99 00 00 41 9E 00 38
+
+. 0 10005160 24
+. 3B 40 00 00 3B 60 00 00 3B C0 00 00 54 9C 20 36 7F 3F CB 78 4B FF F6 19
+
+. 0 10005178 28
+. 37 FF FF FF 7D 3E EA 14 7C 3E ED AE 93 49 00 08 93 69 00 0C 7F DE E2 14 40 82 FF E4
+
+. 0 10005194 20
+. 7F A3 EB 78 7F 04 C3 78 7F 25 CB 78 7E E6 BB 78 48 00 0D 7D
+
+. 0 100051A8 52
+. 80 01 00 34 82 E1 00 0C 83 01 00 10 7C 08 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 1000216C 16
+. 7F 23 CB 78 7F C4 F3 78 57 45 20 36 48 02 6D FD
+
+. 0 1000217C 8
+. 2F 9C 00 00 41 9E 00 24
+
+. 0 10002184 32
+. 7F 89 03 A6 39 60 00 00 55 60 20 36 55 69 18 38 7C 1E 04 AE 7D 6B DA 14 7C 09 FD AE 42 00 FF EC
+
+. 0 100021A4 20
+. 7F 64 DB 78 7F 85 E3 78 7F E3 FB 78 3F 40 10 02 48 01 03 D1
+
+. 0 10012584 124
+. 94 21 FF 90 7C 08 02 A6 42 9F 00 05 93 C1 00 48 7F C8 02 A6 2F 85 00 01 90 01 00 74 93 81 00 40 7C BC 2B 78 80 1E FF F0 93 A1 00 44 7C 9D 23 78 93 E1 00 4C 7F C0 F2 14 7C 7F 1B 78 DB 81 00 50 DB A1 00 58 38 60 00 00 DB C1 00 60 DB E1 00 68 92 41 00 18 92 61 00 1C 92 81 00 20 92 A1 00 24 92 C1 00 28 92 E1 00 2C 93 01 00 30 93 21 00 34 93 41 00 38 93 61 00 3C 41 9E 02 10
+
+. 0 1001280C 88
+. 80 01 00 74 82 41 00 18 82 61 00 1C 7C 08 03 A6 82 81 00 20 82 A1 00 24 82 C1 00 28 82 E1 00 2C 83 01 00 30 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 100021B8 20
+. 7F C4 F3 78 7F 65 DB 78 7F 86 E3 78 7F E3 FB 78 48 00 C4 15
+
+. 0 1000E5DC 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 2C 06 00 00 90 01 00 14 7C AC 2B 78 93 E1 00 0C 80 1E FF F0 7F C0 F2 14 41 82 00 C4
+
+. 0 1000E60C 40
+. 38 06 FF FF 83 FE 81 CC 2B 80 00 01 C8 03 00 00 C9 BF 00 00 38 A0 00 01 D8 04 00 00 7C 07 03 78 D9 A4 00 08 40 9D 00 54
+
+. 0 1000E684 8
+. 7F 87 28 00 41 9E 00 20
+
+. 0 1000E68C 28
+. 80 01 00 14 38 60 00 00 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 100021CC 40
+. C8 3A 81 10 3C A0 10 01 3C 60 10 01 7F 67 DB 78 7F 88 E3 78 38 A5 69 3C 7F A4 EB 78 7F C6 F3 78 38 63 69 4C 4B FF E6 29
+
+. 0 100021F4 24
+. 3C 80 10 01 38 84 70 2C 7F 85 E3 78 7F 66 DB 78 4C C6 31 82 48 01 33 E1
+
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 1, stride = 1
+. 0 1000220C 16
+. 7F E3 FB 78 7F 64 DB 78 7F 85 E3 78 48 00 BE 79
+
+. 0 1000E090 132
+. 94 21 FF 80 7C 08 02 A6 42 9F 00 05 93 C1 00 50 7F C8 02 A6 2F 85 00 01 90 01 00 84 92 E1 00 34 7C B7 2B 78 80 1E FF F0 93 A1 00 4C 7C 9D 23 78 93 E1 00 54 7F C0 F2 14 7C 7F 1B 78 DB 61 00 58 DB 81 00 60 38 60 00 00 DB A1 00 68 DB C1 00 70 DB E1 00 78 92 21 00 1C 92 41 00 20 92 61 00 24 92 81 00 28 92 A1 00 2C 92 C1 00 30 93 01 00 38 93 21 00 3C 93 41 00 40 93 61 00 44 93 81 00 48 41 9E 02 34
+
+. 0 1000E344 96
+. 80 01 00 84 82 21 00 1C 82 41 00 20 7C 08 03 A6 82 61 00 24 82 81 00 28 82 A1 00 2C 82 C1 00 30 82 E1 00 34 83 01 00 38 83 21 00 3C 83 41 00 40 83 61 00 44 83 81 00 48 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB 61 00 58 CB 81 00 60 CB A1 00 68 CB C1 00 70 CB E1 00 78 38 21 00 80 4E 80 00 20
+
+. 0 1000221C 8
+. 2F 9C 00 00 41 9E 00 44
+
+. 0 10002224 64
+. 3C 00 43 30 3D 20 10 02 90 01 00 08 39 29 80 F8 93 81 00 0C 7F 89 03 A6 C8 09 00 00 38 00 00 00 C9 A1 00 08 57 69 18 38 FD AD 00 28 7C 1F 04 AE FC 00 68 24 7C 1F 05 AE 7C 00 4A 14 42 00 FF F0
+
+. 0 10002264 20
+. 7F C4 F3 78 7F 65 DB 78 7F 86 E3 78 7F E3 FB 78 48 01 06 11
+
+. 0 10012884 40
+. 7C 08 02 A6 94 21 FF F0 42 9F 00 05 93 C1 00 08 7F C8 02 A6 2C 06 00 00 90 01 00 14 80 1E FF F0 7F C0 F2 14 41 82 00 54
+
+. 0 100128AC 8
+. 2F 86 00 00 41 9E 00 34
+
+. 0 100128B4 48
+. 81 3E 82 9C 7C C9 03 A6 39 40 00 00 C9 A9 00 00 55 40 18 38 55 49 20 36 7C 03 04 AE 7D 69 22 14 7D 4A 2A 14 7C 09 25 AE D9 AB 00 08 42 00 FF E4
+
+. 0 100128E4 24
+. 80 01 00 14 38 60 00 00 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10002278 40
+. C8 3A 81 10 3C A0 10 01 3C 60 10 01 7F 67 DB 78 7F 88 E3 78 7F 24 CB 78 7F C6 F3 78 38 A5 69 98 38 63 69 90 4B FF E5 7D
+
+. 0 100022A0 24
+. 3C 80 10 01 7F 85 E3 78 7F 66 DB 78 38 84 70 6C 4C C6 31 82 48 01 33 35
+
+. 0 FEF89F4 24
+. 81 44 00 00 38 63 FF F4 81 64 00 04 38 A5 00 02 38 84 FF FC 4B FF FF 78
+
+. 0 FEF8980 36
+. 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 1, stride = 1
+. 0 100022B8 8
+. 7F E3 FB 78 48 02 6C C9
+
+. 0 100022C0 8
+. 7F C3 F3 78 48 02 6C C1
+
+. 0 100022C8 8
+. 7F 23 CB 78 48 02 6C B9
+
+. 0 100022D0 8
+. 7F A3 EB 78 48 02 6C B1
+
+. 0 100022D8 44
+. 80 01 00 34 83 21 00 14 83 41 00 18 7C 08 03 A6 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 100046D8 8
+. 2B 9D 00 03 40 9D FF C8
+
+. 0 100046A4 12
+. 7F A3 EB 78 7F E4 FB 78 4B FF CC 4D
+
+. 0 FEF8864 20
+. 80 04 00 00 38 63 FF FC 81 24 00 04 90 03 00 04 4B FF FF A0
+
+. 0 FEF8814 64
+. 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+PASS: fft_complex_bitreverse_order, n = 1
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 1, stride = 2
+. 0 10001A70 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 00 00 7F 63 DB 78 4B FF F1 B1
+
+. 0 10000C30 20
+. 39 40 00 00 94 21 FF F0 7F 8A 28 40 39 60 00 00 40 9C 00 98
+
+. 0 10000C44 16
+. 7C A7 2B 78 2B 84 00 01 39 6B 00 02 40 9D 00 80
+
+. 0 10000C54 124
+. 3D 20 10 02 38 04 FF FF 39 29 80 F8 7C 09 03 A6 C9 49 00 00 3D 00 43 30 55 69 18 38 7C 0B 32 14 39 6B 00 01 90 01 00 0C 91 01 00 08 7C 0B 32 14 C9 81 00 08 90 01 00 0C 55 60 18 38 C8 01 00 08 FD 8C 50 28 7D 69 1C AE 39 6B 00 01 FC 00 50 28 7D A3 04 AE FF 8B 60 00 FF 0D 00 00 4F DE F0 42 4F 5A D0 42 7D 20 00 26 55 20 FF FE 55 29 DF FE 7D 4A 03 78 7D 4A 4B 78 42 00 FF A0
+
+. 0 10000CD0 8
+. 34 E7 FF FF 40 82 FF 74
+
+. 0 10000CD8 12
+. 7D 43 53 78 38 21 00 10 4E 80 00 20
+
+. 0 10001A84 24
+. 3C 80 10 01 38 84 6E A0 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3B 51
+
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 1, stride = 2
+. 0 10001A9C 4
+. 4B FF FC 70
+
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 1, stride = 2
+. 0 10001A40 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 00 00 7F 63 DB 78 4B FF F1 E1
+
+. 0 10001A54 24
+. 3C 80 10 01 38 84 6E 54 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3B 81
+
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 1, stride = 2
+. 0 10001A6C 4
+. 4B FF FC F4
+
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 1, stride = 2
+. 0 10001A10 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 0B B8 7E E3 BB 78 4B FF F2 11
+
+. 0 10001A24 24
+. 3C 80 10 01 38 84 6E 08 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 3B B1
+
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 1, stride = 2
+. 0 10001A3C 4
+. 4B FF FD D4
+
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 1, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 1, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 1, stride = 2
+. 0 10001EA4 32
+. 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+PASS: fft_real_bitreverse_order, n = 1
+. 0 100020C0 32
+. 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 1, stride = 2
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 1, stride = 2
+. 0 FEF8804 80
+. 80 04 00 00 91 23 00 00 81 24 00 04 90 03 00 04 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+PASS: fft_complex_bitreverse_order, n = 1
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 1, stride = 3
+. 0 10000C6C 100
+. 55 69 18 38 7C 0B 32 14 39 6B 00 01 90 01 00 0C 91 01 00 08 7C 0B 32 14 C9 81 00 08 90 01 00 0C 55 60 18 38 C8 01 00 08 FD 8C 50 28 7D 69 1C AE 39 6B 00 01 FC 00 50 28 7D A3 04 AE FF 8B 60 00 FF 0D 00 00 4F DE F0 42 4F 5A D0 42 7D 20 00 26 55 20 FF FE 55 29 DF FE 7D 4A 03 78 7D 4A 4B 78 42 00 FF A0
+
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 1, stride = 3
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 1, stride = 3
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 1, stride = 3
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 1, stride = 3
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 1, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 1, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 1, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 1, stride = 3
+. 0 FEF88D8 28
+. 80 04 00 00 38 84 FF F8 81 24 00 0C 38 63 FF F4 38 A5 00 02 90 03 00 0C 4B FF FF 34
+
+. 0 FEF8824 48
+. 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+PASS: fft_real_bitreverse_order, n = 1
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 1, stride = 3
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 1, stride = 3
+. 0 100046E0 12
+. 57 FF 08 3C 7F 9F F0 40 40 9D FF B0
+
+. 0 100013B0 20
+. 39 20 00 01 38 E7 00 01 7D 20 38 30 7F 80 F0 40 41 9C FF F4
+
+. 0 100013E8 24
+. 7C E9 03 A6 55 20 07 FE 55 6B 08 3C 7D 6B 03 78 55 29 F8 7E 42 00 FF F0
+
+. 0 100013D8 16
+. 39 60 00 00 7D 49 53 78 7F 8B 38 40 40 9C 00 1C
+
+. 0 10001440 24
+. 57 C3 F8 7E 38 C0 00 00 57 65 20 36 7F 87 40 40 7C 6A 1B 78 40 9C 00 40
+
+. 0 10001494 8
+. 7F 8A 40 40 40 9D FF F4
+
+. 0 1000149C 20
+. 38 E7 00 01 7D 08 52 14 7F 84 38 40 7C C6 2A 14 41 9D FF A0
+
+. 0 100008AC 60
+. 7D 20 FA 14 7D 60 F2 14 C9 AB 00 08 7D 9F 04 AE 7C 1E 04 AE 7C 00 52 14 C9 69 00 08 FC 0C 00 28 FD 4B 68 28 FC 00 02 10 FD A0 52 10 FC 00 68 2A FF E0 02 72 FF 9F 08 00 40 9D 00 08
+
+PASS: fft_complex_bitreverse_order, n = 2
+. 0 10005068 4
+. 4B FF F7 25
+
+. 0 10005D50 28
+. 93 A1 00 14 92 E1 00 10 CB E1 00 10 FF FF D8 28 FF FF 06 B2 FC 20 F8 90 48 02 31 8D
+
+. 0 10005D2C 20
+. 2F 9B 00 00 CB 93 00 00 3B A0 00 00 FF A0 E0 90 41 9E 00 7C
+
+. 0 FE0DFA0 16
+. 3C C0 3F EB 60 C5 5F FF 7C 09 28 00 41 81 02 08
+
+. 0 FE0E1B4 16
+. 3D 60 40 03 61 6A 68 FC 7C 09 50 00 41 81 00 E4
+
+. 0 FE0E2A4 16
+. 3C 60 41 99 60 65 21 FA 7C 09 28 00 41 81 01 F8
+
+. 0 FE0E2B4 120
+. 81 5E 0E 58 81 7E 0E 5C C8 0A 00 00 C9 6B 00 00 80 FE 0E 54 FC 8B 00 7A 81 3E 0E 50 C8 A7 00 00 C9 89 00 00 80 9E 0E 4C FD A4 00 28 80 DE 0F C4 CB A4 00 00 C8 46 00 00 FC 65 03 72 D8 81 00 30 81 21 00 30 81 41 00 34 FC EC 03 72 55 5F 07 BE FD 01 18 28 FC DD 03 72 FD 48 38 28 FC 81 10 00 D8 C1 00 28 FC 4A 30 28 FC 2A 10 28 D8 41 00 20 FC 81 30 28 40 85 03 9C
+
+. 0 FE0E6C4 8
+. 81 3E 10 08 4B FF FC 68
+
+. 0 FE0E330 16
+. 2F 1F 00 01 C9 29 00 00 FC 7F 02 72 41 9A 05 C0
+
+. 0 FE0E340 4
+. 40 99 03 98
+
+. 0 FE0E344 8
+. 2C 1F 00 02 41 82 03 98
+
+. 0 FE0E6E0 12
+. C8 26 00 00 FC 02 08 00 40 80 00 10
+
+. 0 FE0E6F8 180
+. 81 5E 0E 80 80 BE 0F C8 C9 6A 00 00 80 9E 0F CC FC 0B 10 2A 80 7E 0F D0 83 BE 0F D4 CB C5 00 00 FD 20 58 28 C8 C4 00 00 C9 03 00 00 CB BD 00 00 FD 42 48 28 80 FE 0F D8 D8 01 00 30 C8 A7 00 00 FD AA 20 2A 81 61 00 30 81 81 00 34 81 1E 0E 7C 55 80 28 34 D8 81 00 28 FD 2D 03 72 7D 20 42 14 C9 49 00 08 FD 89 37 BA CB C9 00 18 FC E9 EA 3A C9 09 00 10 7F A8 04 AE FD 6D 02 72 FC 0C 2A 7A FD 6B 69 FA FC C0 02 72 FC EB 02 B2 FD 88 01 B2 FC 1E 38 28 FD 5D 02 F2 FD A0 60 28 FC AD 50 28 FD A8 28 2A FF C8 68 28 FD 45 F0 2A FC 8A 08 00 40 85 04 B4
+
+. 0 FE0EC5C 16
+. 81 9E 10 04 C8 EC 00 00 FC 0A 19 F8 4B FF FB 50
+
+. 0 FE0E7B8 12
+. FC 6D 00 2A FF 03 68 00 40 9A 03 04
+
+. 0 FE0E7C4 12
+. 2F 9F 00 00 FC 20 68 90 41 BE F9 C0
+
+. 0 FE0E7D0 4
+. 4B FF FE A4
+
+. 0 FE0E674 8
+. FC 20 08 50 4B FF FB 14
+
+. 0 FE0C900 16
+. 3C C0 3F CF 60 C5 FF FF 7C 0B 28 00 41 81 01 A4
+
+. 0 FE0CAB0 16
+. 3D 80 3F EB 61 80 5F FF 7F 8B 00 00 41 9D 01 F8
+
+. 0 FE0CCB4 16
+. 3F E0 40 03 63 FD 68 FC 7E 0B E8 00 41 91 01 24
+
+. 0 FE0CDE4 16
+. 3D 00 41 99 61 0A 21 FA 7F 0B 50 00 41 99 03 68
+
+. 0 FE0CDF4 120
+. 83 7E 0E 5C 83 9E 0E 58 C9 5B 00 00 C9 1C 00 00 83 5E 0E 54 FF CA 40 7A 83 3E 0E 50 C8 7A 00 00 C8 B9 00 00 83 1E 0E 4C FC FE 40 28 83 BE 0F 64 C9 78 00 00 DB C1 00 30 FC 43 01 F2 CB 7D 00 00 81 21 00 30 81 41 00 34 FF 85 01 F2 55 5F 07 BE FF A1 10 28 FC 2B 01 F2 FF 5D E0 28 FE 1F D8 00 D8 21 00 28 FF DA 08 28 FD 9A F0 28 DB C1 00 20 FC 4C 08 28 40 91 09 38
+
+. 0 FE0D7A0 8
+. 81 3E 0F B0 4B FF F6 CC
+
+. 0 FE0CE70 16
+. 2F 9F 00 01 C9 29 00 00 FC 9F 02 72 41 9E 04 D0
+
+. 0 FE0CE80 4
+. 41 9D 04 BC
+
+. 0 FE0D33C 8
+. 2F 9F 00 02 41 9E 07 2C
+
+. 0 FE0DA6C 8
+. 2F 9F 00 00 4B FF F4 1C
+
+. 0 FE0CE8C 8
+. FD 5E 07 B2 41 9E 00 10
+
+. 0 FE0CE94 32
+. FF C0 F0 50 FC 40 10 50 DB C1 00 20 80 DE 0F 98 D8 41 00 28 C8 C6 00 00 FC 8A 30 00 40 84 0A B8
+
+. 0 FE0CEB4 100
+. 80 FE 0E 70 81 1E 0E 6C C9 27 00 00 C8 C8 00 00 81 5E 0E 68 FC 69 32 BA 81 7E 0E 64 C9 0A 00 00 C8 AB 00 00 81 9E 0E 60 FC E3 42 BA 83 FE 0F 7C CB 8C 00 00 C9 7F 00 00 C8 3D 00 00 FF A7 2A BA FF 62 02 F2 FF 5D E2 BA FD 9A DF B8 FD AC 12 BA FD 9E 68 2A FC 1E 60 28 FD A0 68 2A FF 0D 08 00 40 99 0E 10
+
+. 0 FE0CF18 28
+. 81 3E 0F 90 C9 49 00 00 FC 0D 22 BA FC 8C 00 2A FC 20 60 90 FE 04 60 00 41 B2 FB 28
+
+. 0 FE0CF34 172
+. 80 9E 0F 58 FD 1E 07 B2 C8 87 00 00 CB 44 00 00 C9 48 00 00 FD 9E D0 2A C8 6B 00 00 FD 24 52 3A 80 FE 0E 88 81 7E 0F 5C FD 8C D0 28 CB 87 00 00 C8 2A 00 00 C8 CB 00 00 FD 7E 60 28 80 BE 0E 84 FC A9 0A 3A CB 65 00 00 FC FC 01 B2 FD 2B 10 2A FF BC 03 32 FD A5 1A 3A C8 BD 00 00 FC 07 03 32 FF 5C 02 72 FD 5D 03 32 FC 2D DA 3A FC DA 02 72 FC 80 02 72 FF 8A 03 32 FC 61 22 3A FD 66 02 72 FD 9E E0 2A FC E3 5F BA FF BE 60 28 FF 47 10 2A FF 7D E0 2A FD BA D8 2A FC 2C 68 2A FC 0C 08 28 FC C0 68 2A FF 86 28 00 40 9D 0E 00
+
+. 0 FE0CFE0 20
+. 81 5E 0F 88 FC 1F 28 00 C9 0A 00 00 FC C6 02 32 40 81 0E BC
+
+. 0 FE0DEAC 8
+. 81 3E 0F B8 4B FF F1 48
+
+. 0 FE0CFF8 20
+. C8 09 00 00 FC 1F 30 3A FF 81 00 2A FF 1C 08 00 41 BA FA 50
+
+. 0 FE0D00C 16
+. C8 FD 00 00 FC 20 F0 90 FE 1E 38 00 41 91 00 0C
+
+. 0 FE0D01C 20
+. FC 40 10 50 FC 20 F0 50 3B 01 00 10 7F 03 C3 78 48 01 81 C1
+
+. 0 FE251EC 12
+. 94 21 FF A0 7D 88 02 A6 48 04 DE BD
+
+. 0 FE251F8 324
+. 93 C1 00 18 7F C8 02 A6 DB 21 00 28 DB 61 00 38 DB C1 00 50 7D 88 03 A6 80 DE 20 98 80 BE 20 90 C8 C6 00 00 80 DE 20 9C FF 26 08 2A C8 E5 00 00 CB 66 00 00 DB A1 00 48 FC 79 30 28 DB 21 00 08 FC A7 06 F2 DB 01 00 20 DB E1 00 58 FD 41 18 28 DB 81 00 40 DB 41 00 30 FF A7 28 28 80 9E 20 8C FC 6A 10 2A 80 BE 20 A0 C8 C4 00 00 FF 3D 28 2A 80 E1 00 08 81 01 00 0C FD A3 06 F2 81 5E 20 88 81 7E 20 94 55 00 28 34 FC 2A 18 28 FC A7 C8 28 7D 20 5A 14 FD 23 68 28 FF A1 10 2A C8 25 00 00 FF C9 68 2A FF 43 07 72 FD 03 F0 28 FF 1E 07 B2 FC 5E 02 32 7F CB 04 AE FD 9A D0 2A CB 49 00 08 FC 02 10 2A C8 4A 00 00 FF F8 00 2A FF 98 F8 28 CB 09 00 18 FD 7C 00 2A CB 89 00 10 FC 88 5A 3A FD A4 60 2A FC 9F 68 2A FD 04 06 F2 FD 3F 20 28 FC 04 40 28 FF E9 68 2A FD 60 40 2A FD 87 07 F2 FD 04 58 28 FF 6B 06 72 FD 48 06 72 FC C6 61 3A FF 2B 51 7A FC FB C8 2A FD 3B 38 28 FD A9 C8 2A FC 08 69 7A FD 80 30 2A FD 47 60 2A FF 8A 08 00 FD 67 50 28 FD 22 50 2A FD A0 50 90 FD 6B 60 2A 41 9D 00 08
+
+. 0 FE2533C 12
+. FD A0 50 50 FC 02 08 00 40 81 07 1C
+
+. 0 FE25348 8
+. FF 8D 10 00 40 9D 06 F4
+
+. 0 FE25A40 32
+. CB 2A 00 00 81 1E 20 84 FC D9 48 28 C9 A8 00 00 FC A6 50 2A FC 25 58 2A FC E1 68 2A 4B FF F9 10
+
+. 0 FE2536C 128
+. C9 A6 00 00 FD 69 38 2A C8 45 00 00 FD 84 03 72 81 3E 20 80 FC CB 03 72 FC A4 60 28 FD 29 58 28 FC 2B 30 28 FD 05 60 2A C8 A9 00 00 FF 29 38 2A FC 01 30 2A FD 84 40 28 FF 7F 02 F2 FD 4C 00 32 FC 2B 00 28 FC C8 00 32 FD A8 50 7A FD 24 DE 7A FC E6 68 2A FC 06 38 28 FD 00 68 2A FF 6C 40 7A FD 7B 48 2A FD 07 58 2A FC 88 10 00 FD 47 40 28 FC C5 40 2A FC E0 40 90 FD 2A 58 2A 41 85 00 08
+
+. 0 FE253F0 8
+. FF 05 10 00 40 99 06 40
+
+. 0 FE25A34 12
+. FC 40 28 50 FF 87 10 00 4B FF F9 C0
+
+. 0 FE253FC 4
+. 40 9D 06 18
+
+. 0 FE25A14 32
+. C9 89 00 00 81 3E 20 7C FC AC 30 28 C9 69 00 00 FC 25 40 2A FC 01 48 2A FC A0 58 2A 4B FF F9 EC
+
+. 0 FE2541C 204
+. CB 26 00 00 FD 46 28 2A C8 45 00 00 FD A4 06 72 FD 6A 06 72 FD 04 68 28 FC C6 50 28 FF 6A 58 28 FC E8 68 2A FC 26 28 2A FD 9B 58 2A FF 64 38 28 FD 3F 02 B2 FC BB 03 32 FD 6A 60 28 FC 07 03 32 FD A7 2A FA FC C4 48 7A FC 20 68 2A FC E3 06 72 FD 00 08 28 FD 83 38 28 FD 28 68 2A FD 8C 38 2A FC BB 4A FA FF 63 60 28 FC 05 30 2A FD A1 00 2A FC ED 06 72 FD 01 68 28 FD 5D 03 72 FF 2D 38 28 FD 68 00 2A FC D9 38 2A FF 23 52 FA FD 3B 01 B2 FD 0D 30 28 FC AC 01 B2 FC 2C 4A 3A FC C5 08 2A FC 05 30 28 FD 40 08 2A FC FB 52 3A FD 27 C8 2A FC 26 48 2A FF 81 10 00 FD A6 08 28 FC A1 18 2A FC E0 08 90 FC CD 48 2A 41 9D 00 08
+
+. 0 FE254E8 12
+. FC E0 08 50 FC 03 10 00 40 81 05 18
+
+. 0 FE254F4 8
+. FF 87 18 00 40 9D 04 FC
+
+. 0 FE259F4 20
+. FD 83 28 28 FC EC 08 2A FD 27 30 2A FC 29 E8 2A 4B FF FB 08
+
+. 0 FE2550C 144
+. C8 C6 00 00 80 9E 20 78 FC 04 01 B2 81 7E 20 74 C8 E4 00 00 C9 0B 00 00 FD 67 01 B2 C8 C5 00 00 81 3E 20 70 FD 44 00 28 FC 47 07 F2 FF 2A 00 2A FF A7 58 28 FD 08 11 3A FC 7D 58 2A FC 44 C8 28 FF A5 08 2A FD 47 18 28 C8 E9 00 00 FF 62 00 F2 FD 99 00 F2 FD B9 DA BA FD 25 E8 28 FF 6C 68 2A FF 29 08 2A FC 0C D8 28 FC 60 68 2A FD 62 1A BA FC AB 40 2A FD 5B 28 2A FC 8A 30 00 FC 3B 50 28 FD 27 50 2A FD A0 50 90 FD 61 28 2A 41 85 00 08
+
+. 0 FE2559C 12
+. FD A0 50 50 FF 07 30 00 40 99 04 44
+
+. 0 FE255A8 8
+. FF 8D 38 00 40 9D 04 1C
+
+. 0 FE259C8 32
+. C8 C9 00 00 80 FE 20 6C FC 06 48 28 C9 07 00 00 FC E0 50 2A FC 47 58 2A FC E2 40 2A 4B FF FB E8
+
+. 0 FE255CC 128
+. C9 A6 00 00 FD 69 38 2A C8 A5 00 00 FD 04 03 72 81 3E 20 68 FC 4B 03 72 FC C4 40 28 FD 29 58 28 FF 6B 10 28 FC 66 40 2A C8 C9 00 00 FC 29 38 2A FC 1B 10 2A FF 64 18 28 FD 9F 02 F2 FD 5B 00 32 FC 4B 00 28 FC E3 00 32 FD A3 50 BA FD 24 60 7A FD 47 68 2A FD 07 50 28 FC 68 68 2A FD 9B 18 BA FC 2C 48 2A FD 0A 08 2A FF 88 28 00 FD 6A 40 28 FD 26 40 2A FC E0 40 90 FD 4B 08 2A 41 9D 00 08
+
+. 0 FE25650 8
+. FC 06 28 00 40 81 03 68
+
+. 0 FE259BC 12
+. FC A0 30 50 FF 87 28 00 4B FF FC 98
+
+. 0 FE2565C 4
+. 40 9D 03 40
+
+. 0 FE2599C 32
+. C9 A9 00 00 81 5E 20 64 FC CD 48 28 C9 6A 00 00 FC 46 40 2A FF 62 50 2A FC FB 58 2A 4B FF FC C4
+
+. 0 FE2567C 128
+. C8 46 00 00 FD 69 38 2A C8 A5 00 00 FD 84 00 B2 81 3E 20 60 FF 6B 00 B2 FC C4 60 28 FD 29 58 28 FC 0B D8 28 FD A6 60 2A C8 C9 00 00 FD 1F 02 F2 FC 60 D8 2A FD 84 68 28 FC 29 38 2A FD 6B 18 28 FD 4C 00 F2 FC 4D 00 F2 FF 6D 52 FA FD 24 40 7A FC E2 D8 2A FC 02 38 28 FC 60 D8 2A FD 0C 1A FA FC 28 48 2A FD 07 08 2A FC 88 28 00 FD 47 40 28 FD 26 40 2A FC E0 40 90 FD 4A 08 2A 41 85 00 08
+
+. 0 FE256FC 12
+. FC E0 40 50 FF 06 28 00 40 99 02 8C
+
+. 0 FE25708 8
+. FF 87 30 00 40 9D 02 64
+
+. 0 FE25970 32
+. C9 89 00 00 81 7E 20 5C FC CC 48 28 C9 6B 00 00 FF 66 40 2A FC 1B 50 2A FC E0 58 2A 4B FF FD A0
+
+. 0 FE2572C 288
+. C8 66 00 00 FD 69 38 2A CB 65 00 00 FD 84 00 F2 FD 0B 00 F2 FC 04 60 28 FD 29 58 28 FC 4B 40 28 FD A0 60 2A FC DF 02 F2 FC A2 40 2A FD 04 68 28 FF E9 38 2A FC 4B 28 28 FC E8 01 72 FC 2D 01 72 FD 4D 38 BA FD 24 37 FA FC E1 50 2A FC DC 00 F2 FC 01 38 28 FC 9D 00 F2 FD 80 50 2A FD 7C 30 28 FF FE 00 F2 FC A8 60 BA FD BD 20 28 FD 8B 30 2A FC A5 48 2A FD 0D 20 2A FC DC 60 28 FD 27 28 2A FC 3E F8 28 FC 46 02 32 FD 49 00 F2 FD 61 F8 2A FF FD 40 28 FC 09 50 28 FC 3E 58 28 FC 6C 17 FA FC 80 50 2A FD 8C 02 32 FD 49 20 28 FD A4 02 F2 FD 0A 02 F2 FC E7 48 28 FD 78 07 72 FC 04 40 7A FD 0C 18 2A FC 47 28 2A FC 8D 00 2A FC AC 40 28 FC E2 07 B2 FD AD 20 28 FD 85 18 2A FC 5C 5E 7A FC 6D 00 2A FC A6 67 FA FF 2A 18 7A FF A9 3E BA FF 85 10 2A FF F9 E8 2A FC 68 E0 2A FD A4 F8 2A FF 83 D8 00 FF 08 18 28 FC 84 68 28 FD 63 68 28 FC 58 E0 2A FC 24 F8 2A FD 80 18 90 41 9D 00 08
+
+. 0 FE25850 8
+. FC 0D D8 00 40 81 01 10
+
+. 0 FE25964 12
+. FF 60 68 50 FF 8C D8 00 4B FF FE F0
+
+. 0 FE2585C 4
+. 40 9D 00 F4
+
+. 0 FE25860 48
+. FF 23 58 28 FF 19 68 28 FF 98 08 28 FC 5C 10 2A FC 2B 10 2A C9 A5 00 00 FC 81 68 00 FF EB 08 28 FD 81 F0 2A FC 60 08 90 FC 5F 10 2A 41 85 00 08
+
+. 0 FE25894 8
+. FF 1E 68 00 40 99 00 5C
+
+. 0 FE258F4 12
+. FC C0 F0 50 FF 83 30 00 41 BD FF A8
+
+. 0 FE258A4 80
+. FD A1 60 28 83 C1 00 18 CB 01 00 20 CB 21 00 28 FC 6D F0 2A CB 61 00 38 CB 81 00 40 CB A1 00 48 FF C3 D0 2A CB E1 00 58 FC 3E 10 2A CB C1 00 50 FC 4C 08 2A FD 2C 10 28 D8 43 00 00 FF 49 08 2A DB 43 00 08 CB 41 00 30 38 21 00 60 4E 80 00 20
+
+. 0 FE0D030 16
+. C9 A1 00 18 C8 1D 00 00 FF 8D 00 00 40 9D 0E 38
+
+. 0 FE0DE74 20
+. 83 FE 0F 68 FC 9F 00 00 C8 7F 00 00 FD AD 00 F2 40 85 00 80
+
+. 0 FE0DF04 8
+. 81 3E 0F BC 4B FF FF 84
+
+. 0 FE0DE8C 16
+. C8 C9 00 00 FD 7F 01 B2 FC 0D 58 28 4B FF F1 C8
+
+. 0 FE0D060 16
+. C8 21 00 10 FD 21 00 2A FF 01 48 00 41 9A 00 D8
+
+. 0 FE0D070 112
+. C8 DC 00 00 C9 3B 00 00 C9 7A 00 00 FC 69 37 FA C8 B9 00 00 83 5E 0E 48 83 3E 0E 44 C8 FA 00 00 FF 63 30 28 CB D9 00 00 D8 61 00 30 81 21 00 30 81 41 00 34 FC 4B 06 F2 71 40 00 02 FF 85 06 F2 FF BF 10 28 FD 07 06 F2 FD BD E0 28 FF 5E 06 F2 FC 0D 40 28 FF C0 D0 28 FD 8D 00 28 FD 40 F0 28 FC 8C 40 28 FC 2A D0 28 FC 44 08 2A 41 82 00 0C
+
+. 0 FE0D0E0 24
+. FF C0 F0 50 FC 40 10 50 C8 9D 00 00 FC 20 F0 90 FE 1E 20 00 41 91 00 0C
+
+. 0 FE0D0F8 16
+. FC 40 10 50 FC 20 F0 50 7F 03 C3 78 48 01 80 E9
+
+. 0 FE0D108 16
+. C9 A1 00 18 C8 1D 00 00 FF 8D 00 00 40 9D 0E 14
+
+. 0 FE0DF28 16
+. FF 1F 00 00 C9 5F 00 00 FD AD 02 B2 40 99 00 14
+
+. 0 FE0DF48 8
+. 81 3E 0F C0 4B FF FF 88
+
+. 0 FE0DED4 16
+. C9 89 00 00 FD 5F 03 32 FC 0D 50 28 4B FF F2 54
+
+. 0 FE0D134 16
+. C8 21 00 10 FF 41 00 2A FE 01 D0 00 40 92 04 70
+
+. 0 FE0D144 12
+. CB FD 00 00 FF 9E F8 00 41 BD F9 0C
+
+. 0 FE0D150 8
+. FC 20 08 50 4B FF FC 6C
+
+. 0 FE0CDC0 8
+. FD 60 08 90 4B FF FC 98
+
+. 0 10001658 32
+. 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 CA 14 7C 19 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+. 0 10008F60 8
+. 7C A3 2B 78 4B FF D8 95
+
+. 0 100067F8 20
+. 38 00 00 01 94 21 FF F0 7F 80 18 40 39 20 00 00 40 9C 00 14
+
+. 0 1000680C 16
+. 54 00 08 3C 39 29 00 01 7F 80 18 40 41 9C FF F4
+
+. 0 1000681C 20
+. 38 00 00 01 7C 00 48 30 7F 83 00 00 38 60 FF FF 40 9E 00 08
+
+. 0 10006830 12
+. 7D 23 4B 78 38 21 00 10 4E 80 00 20
+
+. 0 10008F68 12
+. 2F 83 FF FF 7C 79 1B 78 41 9E 02 40
+
+. 0 10008F74 24
+. 7F E3 FB 78 7F A4 EB 78 7F 85 E3 78 7F 26 CB 78 3B 00 00 00 4B FF D2 C5
+
+. 0 1000624C 28
+. 94 21 FF F0 39 00 00 00 93 E1 00 08 7C 9F 23 78 34 85 FF FF 38 E0 00 00 41 82 00 74
+
+. 0 10006268 24
+. 54 A5 F8 7E 38 C0 00 00 57 EC 20 36 7F 87 40 40 7C AA 2B 78 40 9C 00 40
+
+. 0 100062BC 8
+. 7F 8A 40 40 40 9D FF F4
+
+. 0 100062C4 20
+. 38 E7 00 01 7D 08 52 14 7F 84 38 40 7C C6 62 14 41 9D FF A0
+
+. 0 100062D8 16
+. 38 60 00 00 83 E1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 10008F8C 12
+. 7F 98 C8 40 3B 60 00 01 40 9C 01 C4
+
+. 0 10008F98 108
+. 6F 40 80 00 81 3E 80 8C 3F 40 43 30 90 01 00 0C 93 41 00 08 C8 09 00 00 C9 A1 00 08 81 3E 80 90 FD AD 00 28 82 FE 80 84 C8 09 00 00 82 DE 80 88 FD AD 68 2A 82 BE 80 94 82 9E 80 98 FF 6D 00 32 93 61 00 14 93 41 00 10 C8 15 00 00 CB E1 00 10 CB B7 00 00 FF FF 00 28 CB D6 00 00 FF FF F8 2A FF FB F8 24 FC 20 F8 90 48 01 FF 95
+
+. 0 10009004 16
+. FF 80 08 90 C8 34 00 00 FC 3F 00 72 48 01 FF 85
+
+. 0 FE0CCC4 8
+. 2F 09 00 00 40 99 06 44
+
+. 0 FE0D30C 16
+. 83 9E 0E 78 C8 5C 00 00 FD 82 08 2A 4B FF F9 C0
+
+. 0 FE0CCD8 20
+. 83 FE 0F 64 C8 FF 00 00 FF 8C 38 00 4F DD F3 82 41 9E 0A 88
+
+. 0 FE0D770 48
+. 81 5E 0E 80 83 BE 0E 74 C8 CA 00 00 CB DD 00 00 FC 86 60 2A FC A4 30 28 D8 81 00 30 81 61 00 30 81 81 00 34 FD AC 28 28 FD 4D F0 2A 4B FF F5 80
+
+. 0 FE0CD1C 152
+. FD 0A 02 B2 80 DE 0F 6C 80 BE 0F 70 55 80 28 34 80 9E 0F 78 80 7E 0F 74 C8 46 00 00 FD 2A 02 32 C8 E5 00 00 CB 64 00 00 CB 83 00 00 FF 48 38 BA 80 FE 0F 7C FF A8 E6 FA 81 1E 0E 7C C9 67 00 00 7D 20 42 14 7D A8 04 AE FC 3A 5A 3A C8 49 00 10 C8 89 00 18 FC C9 57 7A C9 49 00 08 81 9E 0F 90 FC 01 02 32 CB 4C 00 00 FC 66 02 B2 FD 82 00 32 FC A4 18 28 FF CD 01 B2 FC E5 60 28 FF 67 F0 28 FD 82 D8 2A FF 82 60 28 FF BB E0 2A FD 1D 66 BA FC 08 60 00 40 82 08 10
+
+. 0 FE0CDB4 8
+. FC 20 60 90 41 B9 FC A0
+
+. 0 FE0CDBC 12
+. FC 20 08 50 FD 60 08 90 4B FF FC 98
+
+. 0 10009014 24
+. 39 00 00 00 FC 01 00 72 7F 88 E0 40 57 66 08 3C FD 01 00 7A 40 9C 00 64
+
+. 0 1000902C 96
+. 7C 08 DA 14 7D 3D 41 D6 7D 08 32 14 7F 88 E0 40 7C 1D 01 D6 55 29 20 36 7C 09 FC AE 7D 69 FA 14 54 00 20 36 7D 7F 04 AE 7D 40 FA 14 C9 8A 00 08 FC 00 58 28 7C 1F 05 AE C9 AB 00 08 FD AD 60 28 D9 AA 00 08 7C 09 FC AE FC 00 58 2A 7C 09 FD AE C9 AB 00 08 FD AD 60 2A D9 AB 00 08 41 9C FF A4
+
+. 0 1000908C 12
+. 38 E0 00 01 7F 87 D8 40 40 9C 00 B4
+
+. 0 10009148 16
+. 3B 18 00 01 7C DB 33 78 7F 98 C8 40 41 9C FE 84
+
+. 0 10009158 88
+. 38 60 00 00 80 01 00 74 82 81 00 18 82 A1 00 1C 7C 08 03 A6 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB 61 00 48 CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 1000169C 32
+. 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 BA 14 7C 17 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+. 0 100008E8 8
+. FC 20 F8 90 42 00 FF C0
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 2, stride = 1
+. 0 FE0CE6C 20
+. 81 3E 0F 94 2F 9F 00 01 C9 29 00 00 FC 9F 02 72 41 9E 04 D0
+
+. 0 FE0DD24 16
+. 80 7E 0F 90 C8 23 00 00 FC 0D 20 78 4B FF F1 F4
+
+. 0 FE0CF24 16
+. FC 8C 00 2A FC 20 60 90 FE 04 60 00 41 B2 FB 28
+
+. 0 FE0DDDC 20
+. 81 1E 0F 88 FC 9F 28 00 C9 A8 00 00 FC C6 03 72 40 85 01 00
+
+. 0 FE0DDF0 20
+. 81 3E 0F 9C CB A9 00 00 FF 7F 07 72 FC 06 D8 28 4B FF F2 00
+
+. 0 FE0D000 12
+. FF 81 00 2A FF 1C 08 00 41 BA FA 50
+
+. 0 FE0D024 12
+. 3B 01 00 10 7F 03 C3 78 48 01 81 C1
+
+. 0 FE0DE88 20
+. 81 3E 0F A0 C8 C9 00 00 FD 7F 01 B2 FC 0D 58 28 4B FF F1 C8
+
+. 0 FE0D100 8
+. 7F 03 C3 78 48 01 80 E9
+
+. 0 FE0DF38 8
+. 81 3E 0F A4 4B FF FF 98
+
+. 0 FE0CA58 88
+. FD 60 08 90 83 01 00 A4 FC 20 58 90 80 81 00 4C 7F 08 03 A6 83 21 00 54 83 01 00 50 7C 80 81 20 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C CB 41 00 70 CB 61 00 78 CB 81 00 80 CB A1 00 88 CB C1 00 90 CB E1 00 98 38 21 00 A0 4E 80 00 20
+
+. 0 FE0CCCC 32
+. 83 9E 0E 78 CB 9C 00 00 FD 9C 08 28 83 FE 0F 64 C8 FF 00 00 FF 8C 38 00 4F DD F3 82 41 9E 0A 88
+
+. 0 1000929C 36
+. 7C 1F 04 AE 7D 20 FA 14 FC 00 03 32 7C 1F 05 AE 7C 00 22 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 2, stride = 1
+. 0 100017A4 36
+. 7C 19 04 AE 7D 20 CA 14 FC 00 03 32 7C 19 05 AE 7C 00 5A 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 2, stride = 1
+. 0 10004864 36
+. 7D 20 32 14 7D 66 01 2E 38 C6 00 04 7D 86 01 2E 38 C6 FF FC 91 69 00 08 91 89 00 0C 7C 00 52 14 42 00 FF E0
+
+. 0 100048E8 48
+. 7C 1D F3 96 93 01 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD DA 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF D8 24 FC 20 F8 90 48 02 45 E1
+
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 2, stride = 1
+. 0 100049E8 20
+. 7D 20 2A 14 7C 25 05 AE D8 49 00 08 7C 00 5A 14 42 00 FF F0
+
+. 0 10004A18 36
+. 7D 20 32 14 7D 66 01 2E 38 C6 00 04 7D 86 01 2E 38 C6 FF FC 91 69 00 08 91 89 00 0C 7C 00 22 14 42 00 FF E0
+
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 2, stride = 1
+. 0 10004B60 48
+. 7C 1D F3 96 92 C1 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD D2 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF D8 24 FC 20 F8 90 48 02 43 69
+
+. 0 10004BE0 36
+. 7D 20 DA 14 7D 7B 01 2E 3B 7B 00 04 7D 9B 01 2E 3B 7B FF FC 91 69 00 08 91 89 00 0C 7C 00 52 14 42 00 FF E0
+
+. 0 1000191C 48
+. 3D 20 10 02 3D 60 10 02 39 29 81 18 39 6B 80 F0 C8 29 00 00 7F 66 DB 78 C8 4B 00 00 7F 07 C3 78 7F E3 FB 78 7F C4 F3 78 7F 45 D3 78 48 00 31 69
+
+. 0 FE0E32C 20
+. 81 3E 0F F4 2F 1F 00 01 C9 29 00 00 FC 7F 02 72 41 9A 05 C0
+
+. 0 FE0E6EC 192
+. FC 40 10 50 FC 80 20 50 D8 41 00 20 81 5E 0E 80 80 BE 0F C8 C9 6A 00 00 80 9E 0F CC FC 0B 10 2A 80 7E 0F D0 83 BE 0F D4 CB C5 00 00 FD 20 58 28 C8 C4 00 00 C9 03 00 00 CB BD 00 00 FD 42 48 28 80 FE 0F D8 D8 01 00 30 C8 A7 00 00 FD AA 20 2A 81 61 00 30 81 81 00 34 81 1E 0E 7C 55 80 28 34 D8 81 00 28 FD 2D 03 72 7D 20 42 14 C9 49 00 08 FD 89 37 BA CB C9 00 18 FC E9 EA 3A C9 09 00 10 7F A8 04 AE FD 6D 02 72 FC 0C 2A 7A FD 6B 69 FA FC C0 02 72 FC EB 02 B2 FD 88 01 B2 FC 1E 38 28 FD 5D 02 F2 FD A0 60 28 FC AD 50 28 FD A8 28 2A FF C8 68 28 FD 45 F0 2A FC 8A 08 00 40 85 04 B4
+
+. 0 10004C0C 160
+. 7C 1A F3 96 7C 00 F1 D6 7C 60 D0 50 3C 00 43 30 3D 20 10 02 90 01 00 10 39 29 80 F8 93 C1 00 14 7C 17 19 D6 C9 A1 00 10 38 60 00 00 C8 09 00 00 FD AD 00 28 54 00 20 36 7D 20 DA 14 FC 0D 07 72 FD AD 07 32 7D BB 05 AE D8 09 00 08 80 01 00 84 82 C1 00 20 82 E1 00 24 7C 08 03 A6 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB 21 00 48 CB 41 00 50 CB 61 00 58 CB 81 00 60 CB A1 00 68 CB C1 00 70 CB E1 00 78 38 21 00 80 4E 80 00 20
+
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 2, stride = 1
+. 0 10001EE0 20
+. 39 20 00 01 38 E7 00 01 7D 20 38 30 7F 80 F0 40 41 9C FF F4
+
+. 0 10001F18 24
+. 7C E9 03 A6 55 20 07 FE 55 6B 08 3C 7D 6B 03 78 55 29 F8 7E 42 00 FF F0
+
+. 0 10001F08 16
+. 39 60 00 00 7D 49 53 78 7F 8B 38 40 40 9C 00 1C
+
+. 0 10001F60 24
+. 57 C5 F8 7E 39 00 00 00 57 67 18 38 7F 8A 58 40 7C A9 2B 78 40 9C 00 28
+
+. 0 10001F9C 8
+. 7F 89 58 40 40 9D FF F4
+
+. 0 10001FA4 20
+. 39 4A 00 01 7D 6B 4A 14 7F 86 50 40 7D 08 3A 14 41 9D FF B8
+
+. 0 10000ADC 32
+. 7D 9F 04 AE 7D BE 04 AE 7C 00 4A 14 FC 0C 68 28 FC 00 02 10 FF E0 02 F2 FF 9F 08 00 40 9D 00 08
+
+PASS: fft_real_bitreverse_order, n = 2
+. 0 10005174 4
+. 4B FF F6 19
+
+. 0 1000218C 24
+. 55 60 20 36 55 69 18 38 7C 1E 04 AE 7D 6B DA 14 7C 09 FD AE 42 00 FF EC
+
+. 0 10012600 8
+. 7C A3 2B 78 4B FF 41 F5
+
+. 0 10012608 12
+. 2F 83 FF FF 7C 79 1B 78 41 9E 02 54
+
+. 0 10012614 24
+. 7F 85 E3 78 7F E3 FB 78 7F A4 EB 78 7F 26 CB 78 3B 00 00 01 4B FF 3C C1
+
+. 0 100062E8 20
+. 34 C5 FF FF 94 21 FF F0 39 60 00 00 39 40 00 00 41 82 00 5C
+
+. 0 100062FC 24
+. 54 A5 F8 7E 39 00 00 00 54 87 18 38 7F 8A 58 40 7C A9 2B 78 40 9C 00 28
+
+. 0 10006338 8
+. 7F 89 58 40 40 9D FF F4
+
+. 0 10006340 20
+. 39 4A 00 01 7D 6B 4A 14 7F 86 50 40 7D 08 3A 14 41 9D FF B8
+
+. 0 10006354 12
+. 38 60 00 00 38 21 00 10 4E 80 00 20
+
+. 0 1001262C 16
+. 7F 98 C8 40 7F 9A E3 78 3B 80 00 01 41 9D 01 D0
+
+. 0 1001263C 40
+. 82 DE 82 84 3E E0 43 30 82 BE 82 88 82 9E 82 8C 82 7E 82 90 82 5E 82 94 57 5A F8 7F 7F 9B E3 78 57 9C 08 3C 41 82 00 40
+
+. 0 10012664 60
+. 7F 49 03 A6 39 60 00 00 7C 0B DA 14 7D 2B E9 D6 7D 6B E2 14 7C 00 E9 D6 55 29 18 38 7C 09 FC AE 54 00 18 38 7D 9F 04 AE FD A0 60 28 FC 00 60 2A 7C 09 FD AE 7D BF 05 AE 42 00 FF D0
+
+. 0 100126A0 44
+. 93 81 00 0C 92 E1 00 08 C9 B4 00 00 C8 01 00 08 CB F3 00 00 FC 00 68 28 CB B6 00 00 CB D5 00 00 FF FF 00 24 FC 20 F8 90 48 01 68 CD
+
+. 0 100126CC 16
+. FF 80 08 90 C8 32 00 00 FC 3F 00 72 48 01 68 BD
+
+. 0 100126DC 24
+. 57 65 F8 7E FC 01 00 72 2B 85 00 01 38 C0 00 01 FC 81 00 7A 40 9D 00 D8
+
+. 0 100127C8 8
+. 2B 9B 00 01 40 9D 00 30
+
+. 0 100127FC 12
+. 3B 18 00 01 7F 98 C8 40 40 9D FE 50
+
+. 0 10012808 92
+. 38 60 00 00 80 01 00 74 82 41 00 18 82 61 00 1C 7C 08 03 A6 82 81 00 20 82 A1 00 24 82 C1 00 28 82 E1 00 2C 83 01 00 30 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 1000E6A8 36
+. 7D 2C 29 D6 C9 BF 00 00 55 20 18 38 55 29 20 36 7C 03 04 AE 7D 69 22 14 7C 09 25 AE D9 AB 00 08 4B FF FF C4
+
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 2, stride = 1
+. 0 1000E114 8
+. 7C A3 2B 78 4B FF 86 E1
+
+. 0 1000E11C 12
+. 2F 83 FF FF 7C 78 1B 78 41 9E 02 80
+
+. 0 1000E128 24
+. 3B 20 00 01 7E FC BB 78 7F 99 18 40 3B 40 00 01 56 FB F8 7E 41 9D 01 F0
+
+. 0 1000E140 40
+. 81 3E 81 B8 3E C0 43 30 82 BE 81 A4 CB 69 00 00 82 9E 81 A8 82 7E 81 AC 82 5E 81 B0 82 3E 81 B4 2F 9A 00 00 41 9E 00 40
+
+. 0 1000E168 60
+. 7F 49 03 A6 39 60 00 00 7C 0B DA 14 7D 2B E9 D6 7D 6B E2 14 7C 00 E9 D6 55 29 18 38 7C 09 FC AE 54 00 18 38 7D 9F 04 AE FD A0 60 28 FC 00 60 2A 7C 09 FD AE 7D BF 05 AE 42 00 FF D0
+
+. 0 1000E1A4 44
+. 93 81 00 0C 92 C1 00 08 C9 B3 00 00 C8 01 00 08 CB F2 00 00 FC 00 68 28 CB B5 00 00 CB D4 00 00 FF FF 00 24 FC 20 F8 90 48 01 AD C9
+
+. 0 1000E1D0 16
+. FF 80 08 90 C8 31 00 00 FC 3F 00 72 48 01 AD B9
+
+. 0 1000E1E0 24
+. 57 65 F8 7E FC 01 00 72 2B 85 00 01 38 C0 00 01 FC A1 00 7A 40 9D 00 C8
+
+. 0 1000E2BC 8
+. 2B 9B 00 01 40 9D 00 54
+
+. 0 1000E314 24
+. 3B 39 00 01 7C BB 2B 78 7F 99 C0 40 57 9C F8 7E 57 5A 08 3C 40 9D FE 38
+
+. 0 1000E32C 20
+. 7F E3 FB 78 7F A4 EB 78 7E E5 BB 78 7F 06 C3 78 4B FF 7F AD
+
+. 0 1000E340 100
+. 38 60 00 00 80 01 00 84 82 21 00 1C 82 41 00 20 7C 08 03 A6 82 61 00 24 82 81 00 28 82 A1 00 2C 82 C1 00 30 82 E1 00 34 83 01 00 38 83 21 00 3C 83 41 00 40 83 61 00 44 83 81 00 48 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB 61 00 58 CB 81 00 60 CB A1 00 68 CB C1 00 70 CB E1 00 78 38 21 00 80 4E 80 00 20
+
+. 0 10002250 20
+. 7C 1F 04 AE FC 00 68 24 7C 1F 05 AE 7C 00 4A 14 42 00 FF F0
+
+. 0 100128C4 32
+. 55 40 18 38 55 49 20 36 7C 03 04 AE 7D 69 22 14 7D 4A 2A 14 7C 09 25 AE D9 AB 00 08 42 00 FF E4
+
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 2, stride = 1
+PASS: fft_complex_bitreverse_order, n = 2
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 2, stride = 2
+. 0 10000C48 12
+. 2B 84 00 01 39 6B 00 02 40 9D 00 80
+
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 2, stride = 2
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 2, stride = 2
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 2, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 2, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 2, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 2, stride = 2
+PASS: fft_real_bitreverse_order, n = 2
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 2, stride = 2
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 2, stride = 2
+PASS: fft_complex_bitreverse_order, n = 2
+. 0 FEEEC18 8
+. 71 20 00 02 40 82 02 FC
+
+. 0 FEEEC20 20
+. 81 03 00 30 7F 5F EA 14 83 7E 06 5C 7F 08 F8 00 41 9A 02 54
+
+. 0 FEEEC34 8
+. 71 60 00 02 41 82 03 50
+
+. 0 FEEEF88 24
+. 82 48 00 04 83 7E 06 74 56 51 00 38 7D 68 8A 14 7F 8B D0 40 41 9D FC A0
+
+. 0 FEEEC3C 16
+. 80 1A 00 04 83 7E 06 60 70 0B 00 01 41 82 02 3C
+
+. 0 FEEEC4C 12
+. 28 80 00 08 54 1B 00 38 40 85 02 2C
+
+. 0 FEEEC58 12
+. 82 7C 04 4C 7F 13 D8 40 40 99 02 20
+
+. 0 FEEEC64 8
+. 71 27 00 01 40 82 00 3C
+
+. 0 FEEECA4 8
+. 7F 88 D0 00 41 9E 02 C0
+
+. 0 FEEEF68 20
+. 7F BD DA 14 93 FC 00 30 63 AA 00 01 91 5F 00 04 4B FF FD 94
+
+. 0 FEEED0C 8
+. 28 9D FF FF 40 85 01 24
+
+. 0 FEEED14 12
+. 83 BC 00 04 73 A9 00 01 41 82 02 60
+
+. 0 FEEEF7C 8
+. 7F 83 E3 78 4B FF F5 45
+
+. 0 FEEE53C 48
+. 61 63 00 01 81 DE 06 4C 54 69 F8 7A 82 1E 05 F0 90 78 00 04 7E 89 C2 14 3B 58 00 38 3A D8 00 08 7D CF 73 78 83 F6 00 00 2C 1F 00 00 41 82 01 A0
+
+. 0 FEEE56C 24
+. 38 A0 00 00 7E 13 83 78 90 B6 00 00 7D F2 7B 78 7D D1 73 78 48 00 00 5C
+
+. 0 FEEE5DC 36
+. 80 FF 00 04 39 00 FF FA 82 FF 00 08 7C FC 40 38 70 E8 00 01 7F BF E2 14 80 DD 00 04 54 DB 00 38 40 82 00 38
+
+. 0 FEEE634 12
+. 83 38 00 30 7C 99 E8 00 41 86 00 B0
+
+. 0 FEEE640 16
+. 7C 9D DA 14 80 04 00 04 70 0A 00 01 41 A2 FF 38
+
+. 0 FEEE650 16
+. 80 BD 00 04 54 A9 00 3C 91 3D 00 04 4B FF FF 54
+
+. 0 FEEE5B0 44
+. 2F 17 00 00 80 DA 00 08 63 87 00 01 93 FA 00 08 93 E6 00 0C 7F 9F E1 2E 90 FF 00 04 90 DF 00 08 93 5F 00 0C 7E FF BB 78 41 9A 01 30
+
+. 0 FEEE600 32
+. 81 7F 00 00 7F EB F8 50 7F 9C 5A 14 81 5F 00 08 81 7F 00 0C 81 8A 00 0C 7C 8C F8 00 40 86 00 FC
+
+. 0 FEEE620 12
+. 82 AB 00 08 7F 15 F8 00 40 9A 00 F0
+
+. 0 FEEE62C 20
+. 91 4B 00 08 91 6A 00 0C 83 38 00 30 7C 99 E8 00 41 86 00 B0
+
+. 0 FEEE708 12
+. 7F 96 A0 00 3A D6 00 04 40 9E FE 50
+
+. 0 FEEE560 12
+. 83 F6 00 00 2C 1F 00 00 41 82 01 A0
+
+. 0 FEEE584 24
+. 81 7D 00 08 7F 9C DA 14 81 3D 00 0C 83 6B 00 0C 7F 1B E8 00 40 9A 01 9C
+
+. 0 FEEE59C 12
+. 80 69 00 08 7F 83 E8 00 40 9E 01 90
+
+. 0 FEEE5A8 52
+. 91 69 00 08 91 2B 00 0C 2F 17 00 00 80 DA 00 08 63 87 00 01 93 FA 00 08 93 E6 00 0C 7F 9F E1 2E 90 FF 00 04 90 DF 00 08 93 5F 00 0C 7E FF BB 78 41 9A 01 30
+
+. 0 FEEE714 4
+. 4B FF FF 80
+
+. 0 FEEE694 88
+. 82 61 00 74 81 C1 00 28 7E 68 03 A6 81 E1 00 2C 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 FEEEF84 4
+. 4B FF FD 9C
+
+. 0 FEEED20 12
+. 83 FE 05 FC 7F 1C F8 00 41 9A 03 9C
+
+. 0 FEEF0C4 28
+. 82 3C 00 30 81 5E 06 28 81 71 00 04 81 8A 00 00 55 7F 00 38 7F 9F 60 40 41 BC FD 58
+
+. 0 FEEF0E0 36
+. 82 EA 00 04 82 4A 00 18 7E D7 F8 50 7E B6 92 14 3A 95 FF EF 7C 94 93 96 3A 64 FF FF 7F 73 91 D7 40 A1 FD 34
+
+. 0 FEEEE34 76
+. 83 A1 00 64 82 21 00 24 7F A8 03 A6 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+. 0 FEEEC6C 32
+. 82 9F 00 00 7F F4 F8 50 7F BD A2 14 81 5F 00 08 81 7F 00 0C 80 8A 00 0C 7F 84 F8 00 40 9E 04 20
+
+. 0 FEEEC8C 12
+. 82 AB 00 08 7C 95 F8 00 40 86 04 14
+
+. 0 FEEEC98 20
+. 91 4B 00 08 91 6A 00 0C 81 1C 00 30 7F 88 D0 00 41 9E 02 C0
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 2, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 2, stride = 3
+. 0 FEEECAC 16
+. 7F 3A DA 14 83 19 00 04 73 0A 00 01 40 82 02 A0
+
+. 0 FEEEF58 16
+. 80 FA 00 04 54 FB 00 3C 93 7A 00 04 4B FF FD 84
+
+. 0 FEEECE8 44
+. 38 7C 00 38 63 A8 00 01 80 C3 00 08 90 7F 00 0C 90 DF 00 08 91 1F 00 04 7F BF E9 2E 93 E6 00 0C 93 E3 00 08 28 9D FF FF 40 85 01 24
+
+PASS: fft_real_bitreverse_order, n = 2
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 2, stride = 3
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 2, stride = 3
+. 0 FEEE6EC 28
+. 2F 17 00 00 7D 1C DA 14 61 1C 00 01 93 F8 00 30 93 9F 00 04 7E FF BB 78 40 9A FE D8
+
+. 0 100013B4 16
+. 38 E7 00 01 7D 20 38 30 7F 80 F0 40 41 9C FF F4
+
+. 0 100013EC 20
+. 55 20 07 FE 55 6B 08 3C 7D 6B 03 78 55 29 F8 7E 42 00 FF F0
+
+. 0 1000144C 12
+. 7F 87 40 40 7C 6A 1B 78 40 9C 00 40
+
+. 0 10001458 52
+. 7C 1B 41 D6 7D 66 FA 14 7D 86 FC AE C9 6B 00 08 54 00 20 36 7D BF 04 AE 7D 20 FA 14 7D A6 FD AE C8 09 00 08 D8 0B 00 08 7D 9F 05 AE D9 69 00 08 48 00 00 0C
+
+. 0 1000148C 16
+. 7D 0A 40 50 55 4A F8 7E 7F 8A 40 40 40 9D FF F4
+
+PASS: fft_complex_bitreverse_order, n = 4
+. 0 FE0E1C4 16
+. 80 DE 0F C4 CB A6 00 00 FC 81 E8 00 41 85 01 98
+
+. 0 FE0E1D4 60
+. 83 BE 0E 78 80 7E 0E 74 C8 7D 00 00 83 FE 0F EC FC 03 08 2A C8 23 00 00 C9 BF 00 00 FC C1 00 2A FD 46 01 B2 D8 C1 00 20 FD 60 30 28 FF 8A 68 00 FC 4B 08 2A D8 41 00 28 40 9C 01 98
+
+. 0 FE0E210 100
+. 81 5E 0E 70 81 1E 0E 6C C9 AA 00 00 C9 28 00 00 80 FE 0E 68 FD 6D 4A BA 80 BE 0E 64 C8 E7 00 00 C8 25 00 00 81 3E 0E 60 FC 6B 3A BA 80 9E 0F D8 CB A9 00 00 C8 84 00 00 C8 E6 00 00 FC A3 0A BA FF C2 01 32 FD 05 EA BA FD 88 F1 B8 FD 2C 12 BA FC 26 48 2A FC 06 08 28 FD 80 48 2A FF 0C 38 00 40 99 04 3C
+
+. 0 FE0E6AC 24
+. 83 BE 0F DC 81 9E 0F F0 CB DD 00 00 C8 0C 00 00 FC 0C 07 B8 4B FF FB C8
+
+. 0 FE0E288 12
+. FD 81 00 2A FC 0C 08 00 41 82 FE FC
+
+. 0 FE0E294 12
+. FC 20 30 90 FC 60 F8 90 4B FF D7 FD
+
+. 0 FE0BA98 12
+. 94 21 FF B0 7C 08 02 A6 48 06 76 11
+
+. 0 FE0BAA4 232
+. 93 C1 00 38 7F C8 02 A6 FC E1 00 72 DB C1 00 40 DB E1 00 48 FF E0 08 90 81 1E 0E 8C 80 FE 0E 70 C9 48 00 00 81 7E 0E 6C FD 81 50 2A CB C7 00 00 C9 2B 00 00 80 DE 0E 88 FD 8C 50 28 80 BE 0E 68 FC BE 49 FA 80 9E 0E 90 C8 C6 00 00 FD 01 60 28 C8 05 00 00 C8 24 00 00 FD 46 03 32 80 7E 0E 64 FD A5 01 FA 81 3E 0E 84 C8 83 00 00 FD 66 00 72 C9 29 00 00 93 E1 00 3C FD 08 10 2A 83 FE 0E 94 FF CD 21 FA 93 81 00 30 93 A1 00 34 FC 0B 03 32 90 01 00 54 FC A6 02 32 C8 DF 00 00 FC 2A 03 32 FD BE 49 FA FC 80 02 32 FF C5 02 32 FD 41 03 32 FD 2D 21 FA FD 7E 02 32 FC 9F 50 2A FF C0 18 90 FC A9 5F FA FC 7F 20 28 FC 25 10 2A FC 03 50 2A FD A1 00 2A FC 24 68 2A FD 84 08 28 FD 8C 68 2A FF 8C 30 00 40 9D 00 58
+
+. 0 FE0BBE0 20
+. FC 9E 30 00 81 9E 0E 98 C8 CC 00 00 FD 8C 01 B2 41 85 00 78
+
+. 0 FE0BBF4 28
+. 81 3E 0E A0 C9 A9 00 00 FD 7E 03 72 FD 8C 58 28 FC 81 60 2A FF 04 08 00 41 BA FF AC
+
+. 0 FE0BC10 16
+. C9 1F 00 00 FC 20 F8 90 FF 9F 40 00 41 9D 00 0C
+
+. 0 FE0BC28 12
+. 3B 81 00 10 7F 83 E3 78 48 01 95 BD
+
+. 0 FE0BC34 16
+. C9 A1 00 18 C8 1F 00 00 FC 0D 00 00 40 81 00 40
+
+. 0 FE0BC80 20
+. 83 BE 0E A4 FF 1E 00 00 C8 BD 00 00 FD AD 01 72 41 99 01 40
+
+. 0 FE0BC94 32
+. 81 3E 0E AC C9 29 00 00 FC 1E 02 72 FC 0D 00 28 C9 A1 00 10 FD 8D 00 2A FF 8D 60 00 41 9E 00 E4
+
+. 0 FE0BCB4 132
+. 80 FE 0E 5C 81 7E 0E 58 C8 C7 00 00 C8 0B 00 00 80 DE 0E 54 FD 26 07 BA 80 BE 0E 50 C8 E6 00 00 C9 85 00 00 80 9E 0E 48 FD 69 00 28 80 7E 0E 44 C9 44 00 00 CB E3 00 00 FC 67 02 F2 D9 21 00 20 81 21 00 20 81 41 00 24 FC AC 02 F2 55 49 07 BE FC 3E 18 28 2C 09 00 01 FC 8A 02 F2 FC 41 28 28 FC DF 02 F2 FD 02 20 28 FF E8 30 28 FD A2 40 28 FC E8 F8 28 FC 6D 20 28 FD 47 30 28 FC 43 50 2A 41 82 00 E4
+
+. 0 FE0BD38 16
+. C9 BF 00 00 FC 20 F8 90 FC 9F 68 00 41 85 00 0C
+
+. 0 FE0BD50 8
+. 7F 83 E3 78 48 01 94 99
+
+. 0 FE0BD58 16
+. C9 A1 00 18 C8 1F 00 00 FF 0D 00 00 40 99 00 84
+
+. 0 FE0BDE8 16
+. FC 1E 00 00 C8 5D 00 00 FD AD 00 B2 40 81 00 38
+
+. 0 FE0BE2C 8
+. 81 3E 0E B4 4B FF FF CC
+
+. 0 FE0BDFC 16
+. C8 29 00 00 FD 7E 00 72 FC 0D 58 28 4B FF FF 7C
+
+. 0 FE0BD84 16
+. C9 A1 00 10 FC AD 00 2A FC 8D 28 00 40 86 00 7C
+
+. 0 FE0BD94 16
+. CB DF 00 00 FC 20 68 90 FF 1F F0 00 41 B9 FE 18
+
+. 0 FE0BBB8 40
+. 83 81 00 54 83 A1 00 34 7F 88 03 A6 83 C1 00 38 83 81 00 30 83 E1 00 3C CB C1 00 40 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 FE0E2A0 4
+. 4B FF FE EC
+
+. 0 FE0E8FC 8
+. FD 42 00 B2 40 9A 00 10
+
+. 0 FE0E904 32
+. FC 40 10 50 FC 80 20 50 D8 41 00 20 81 1E 0F EC D8 81 00 28 CB C8 00 00 FF 0A F0 00 40 98 00 A4
+
+. 0 FE0E924 100
+. 80 7E 0E 70 80 BE 0E 6C C8 C3 00 00 C9 05 00 00 83 FE 0E 68 FD 26 42 BA 83 BE 0E 64 CB DF 00 00 C8 1D 00 00 C8 C6 00 00 FD 69 F2 BA 81 9E 0E 60 80 DE 0F D8 C9 8C 00 00 C8 A6 00 00 FD AB 02 BA FF A4 01 72 FC ED 62 BA FC 27 E8 B8 FD 01 22 BA FC 22 40 2A FD 42 08 28 FD AA 40 2A FF 8D 30 00 40 9D 03 E8
+
+. 0 FE0E988 24
+. 80 9E 0F DC C9 44 00 00 FC 0D 1A BA FC 61 00 2A FC 03 08 00 41 82 F7 F0
+
+. 0 FE0E9A0 16
+. FC 20 10 90 FC 60 F8 90 FC 40 20 90 4B FF D0 ED
+
+. 0 FE0BB8C 20
+. 81 5E 0E 98 FC 1E 30 00 C8 6A 00 00 FD 8C 00 F2 40 81 00 D4
+
+. 0 FE0BC70 16
+. 81 3E 0E A0 C8 E9 00 00 FD 9E 61 FA 4B FF FF 30
+
+. 0 FE0BBAC 12
+. FC 81 60 2A FF 04 08 00 40 9A 00 5C
+
+. 0 FE0BC20 20
+. FC 40 10 50 FC 20 F8 50 3B 81 00 10 7F 83 E3 78 48 01 95 BD
+
+. 0 FE0BE18 12
+. FF E0 F8 50 FC 40 10 50 4B FF FF 18
+
+. 0 FE0BD48 16
+. FC 40 10 50 FC 20 F8 50 7F 83 E3 78 48 01 94 99
+
+. 0 FE0BD68 16
+. FF 9E 00 00 C8 9D 00 00 FD AD 01 32 40 9D 00 B0
+
+. 0 FE0BE24 8
+. 81 3E 0E B4 4B FF FF 54
+
+. 0 FE0BD7C 24
+. C9 09 00 00 FC 1E 6A 3A C9 A1 00 10 FC AD 00 2A FC 8D 28 00 40 86 00 7C
+
+. 0 FE0BDA4 44
+. 83 81 00 54 FC 20 68 50 83 A1 00 34 7F 88 03 A6 83 C1 00 38 83 81 00 30 83 E1 00 3C CB C1 00 40 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 FE0E9B0 4
+. 4B FF F7 DC
+
+. 0 FE0D34C 12
+. C8 7D 00 00 FF 9E 18 00 40 9C 00 10
+
+. 0 FE0D364 180
+. 81 5E 0E 80 80 FE 0F 6C C9 4A 00 00 80 DE 0F 70 FD 8A F0 2A 80 9E 0F 78 80 7E 0F 74 C8 27 00 00 FC CC 50 28 C9 66 00 00 CB A4 00 00 C8 03 00 00 FD BE 30 28 80 BE 0F 7C D9 81 00 30 C8 A5 00 00 FF 6D 10 2A 81 61 00 30 81 81 00 34 81 1E 0E 7C 55 99 28 34 D8 41 00 28 FD 3B 06 F2 7F 59 42 14 CB 5A 00 08 FF 89 58 7A C8 3A 00 18 FD 09 07 7A CB BA 00 10 FC FB 02 72 FD 9C 2A 7A 7F 99 44 AE FD 67 DA 3A FD 4C 02 72 FC CB 06 B2 FD 1D 02 B2 FC 01 30 28 FC BC 02 F2 FD A0 40 28 FF 6D 28 28 FD BD D8 2A FC FD 68 28 FD 5B 38 2A FC 8A 18 00 40 85 04 00
+
+. 0 FE0D814 16
+. 83 9E 0F 80 C9 3C 00 00 FC 0A 22 78 4B FF FC 04
+
+. 0 FE0D424 20
+. FC 8D 00 2A 57 E0 F8 7F FC 20 68 90 FF 04 68 00 41 9A 03 90
+
+. 0 FE0D7C4 4
+. 41 A2 F2 94
+
+. 0 10006274 12
+. 7F 87 40 40 7C AA 2B 78 40 9C 00 40
+
+. 0 10006280 52
+. 7C 1F 41 D6 7D 66 1A 14 7D 86 1C AE C9 6B 00 08 54 00 20 36 7D A3 04 AE 7D 20 1A 14 7D A6 1D AE C8 09 00 08 D8 0B 00 08 7D 83 05 AE D9 69 00 08 48 00 00 0C
+
+. 0 100062B4 16
+. 7D 0A 40 50 55 4A F8 7E 7F 8A 40 40 40 9D FF F4
+
+. 0 10008FD8 44
+. 93 61 00 14 93 41 00 10 C8 15 00 00 CB E1 00 10 CB B7 00 00 FF FF 00 28 CB D6 00 00 FF FF F8 2A FF FB F8 24 FC 20 F8 90 48 01 FF 95
+
+. 0 FE0CAC0 8
+. 2F 89 00 00 40 9D 0C 9C
+
+. 0 FE0D760 16
+. 80 DE 0E 80 CB 46 00 00 FC 1A 08 28 4B FF F3 68
+
+. 0 FE0CAD4 16
+. D8 01 00 30 80 E1 00 30 81 01 00 34 40 9D 02 E8
+
+. 0 FE0CDC8 28
+. 90 E1 00 30 91 01 00 34 CB C6 00 00 C8 41 00 30 FF A2 F0 28 FD 5D F8 2A 4B FF FD 14
+
+. 0 FE0CAF4 88
+. FC 0A 02 B2 81 5E 0F 6C 80 FE 0F 70 55 04 28 34 CB 6A 00 00 CB 47 00 00 81 3E 0F 74 FC 8A 00 32 81 7E 0F 78 FD 60 D6 FA 80 BE 0F 7C C8 69 00 00 CB CB 00 00 CB A5 00 00 FC 20 1F BA 81 5E 0E 7C FD AB E8 3A 7D 24 54 AE 7D 24 52 14 FD 64 50 7A FC 0D 00 32 41 9D 00 08
+
+. 0 FE0CB4C 12
+. FD 20 48 50 C9 49 00 08 41 9D 00 08
+
+. 0 FE0CB58 64
+. FD 40 50 50 C9 A9 00 18 FC 29 00 32 CB 89 00 10 FC 8B 53 7A 81 1E 0F 80 C8 E8 00 00 FC 44 08 28 FD 9C 12 FA FC C9 60 2A FD 49 30 28 FC 20 30 90 FD 0C 50 2A FC A8 31 FA FC 05 30 00 41 A2 FE C4
+
+. 0 10009098 48
+. FC 1C 07 B2 39 00 00 00 7F 88 E0 40 FD 7C F7 7A FD A8 07 B2 FD 88 07 72 FC 1D 00 28 FD 2B 68 28 FD 40 60 28 FF C0 48 90 FF A0 50 90 40 9C 00 78
+
+. 0 100090C8 116
+. 7D 28 3A 14 7D 08 32 14 7C 09 DA 14 7D 3D 49 D6 7F 88 E0 40 7C 1D 01 D6 55 29 20 36 7D A9 FC AE 7D 69 FA 14 54 00 20 36 7D 40 FA 14 7C 1F 04 AE C9 6A 00 08 FD 89 02 F2 FD 8A 60 38 FC 09 00 32 FD AD 60 28 FD 6A 02 FA 7D BF 05 AE C8 0B 00 08 FC 00 58 28 D8 0A 00 08 7D A9 FC AE FD AD 60 2A 7D A9 FD AE C8 0B 00 08 FC 00 58 2A D8 0B 00 08 41 9C FF 90
+
+. 0 1000913C 12
+. 38 E7 00 01 7F 87 D8 40 41 9C FF 54
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 4, stride = 1
+. 0 FE0CAC8 28
+. 80 DE 0E 80 C9 46 00 00 FC 0A 08 2A D8 01 00 30 80 E1 00 30 81 01 00 34 40 9D 02 E8
+
+. 0 FE0CAE4 104
+. CB 86 00 00 C9 21 00 30 FF 69 E0 28 FD 5F D8 28 FC 0A 02 B2 81 5E 0F 6C 80 FE 0F 70 55 04 28 34 CB 6A 00 00 CB 47 00 00 81 3E 0F 74 FC 8A 00 32 81 7E 0F 78 FD 60 D6 FA 80 BE 0F 7C C8 69 00 00 CB CB 00 00 CB A5 00 00 FC 20 1F BA 81 5E 0E 7C FD AB E8 3A 7D 24 54 AE 7D 24 52 14 FD 64 50 7A FC 0D 00 32 41 9D 00 08
+
+. 0 FE0CB50 8
+. C9 49 00 08 41 9D 00 08
+
+. 0 FE0CB5C 60
+. C9 A9 00 18 FC 29 00 32 CB 89 00 10 FC 8B 53 7A 81 1E 0F 80 C8 E8 00 00 FC 44 08 28 FD 9C 12 FA FC C9 60 2A FD 49 30 28 FC 20 30 90 FD 0C 50 2A FC A8 31 FA FC 05 30 00 41 A2 FE C4
+
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 4, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 4, stride = 1
+. 0 FE0E368 60
+. 81 9E 0E 78 80 7E 0E 74 C8 8C 00 00 83 FE 0F EC FC 04 08 28 C8 23 00 00 C9 BF 00 00 FC C1 00 2A FD 46 01 B2 D8 C1 00 20 FD 60 30 28 FF 8A 68 00 FC 4B 08 2A D8 41 00 28 41 BC FE 70
+
+. 0 FE0BC68 8
+. 81 3E 0E 9C 4B FF FF 8C
+
+. 0 FE0BBF8 24
+. C9 A9 00 00 FD 7E 03 72 FD 8C 58 28 FC 81 60 2A FF 04 08 00 41 BA FF AC
+
+. 0 FE0BDD0 8
+. 81 3E 0E A8 4B FF FE C4
+
+. 0 FE0BC98 28
+. C9 29 00 00 FC 1E 02 72 FC 0D 00 28 C9 A1 00 10 FD 8D 00 2A FF 8D 60 00 41 9E 00 E4
+
+. 0 FE0BDF8 20
+. 81 3E 0E B0 C8 29 00 00 FD 7E 00 72 FC 0D 58 28 4B FF FF 7C
+
+. 0 FE0E34C 8
+. 2C 9F 00 03 41 86 05 AC
+
+. 0 FE0E910 20
+. 81 1E 0F EC D8 81 00 28 CB C8 00 00 FF 0A F0 00 40 98 00 A4
+
+. 0 FE0BBA0 24
+. 81 3E 0E 9C C8 E9 00 00 FD 9E 61 FA FC 81 60 2A FF 04 08 00 40 9A 00 5C
+
+. 0 FE0BD78 28
+. 81 3E 0E B0 C9 09 00 00 FC 1E 6A 3A C9 A1 00 10 FC AD 00 2A FC 8D 28 00 40 86 00 7C
+
+. 0 FE0D344 8
+. 2C 1F 00 03 40 82 FF E8
+
+. 0 FE0D358 192
+. FF C0 F0 50 FC 40 10 50 DB C1 00 20 81 5E 0E 80 80 FE 0F 6C C9 4A 00 00 80 DE 0F 70 FD 8A F0 2A 80 9E 0F 78 80 7E 0F 74 C8 27 00 00 FC CC 50 28 C9 66 00 00 CB A4 00 00 C8 03 00 00 FD BE 30 28 80 BE 0F 7C D9 81 00 30 C8 A5 00 00 FF 6D 10 2A 81 61 00 30 81 81 00 34 81 1E 0E 7C 55 99 28 34 D8 41 00 28 FD 3B 06 F2 7F 59 42 14 CB 5A 00 08 FF 89 58 7A C8 3A 00 18 FD 09 07 7A CB BA 00 10 FC FB 02 72 FD 9C 2A 7A 7F 99 44 AE FD 67 DA 3A FD 4C 02 72 FC CB 06 B2 FD 1D 02 B2 FC 01 30 28 FC BC 02 F2 FD A0 40 28 FF 6D 28 28 FD BD D8 2A FC FD 68 28 FD 5B 38 2A FC 8A 18 00 40 85 04 00
+
+. 0 FE0D7C8 8
+. FC 20 08 50 4B FF F5 F4
+
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 4, stride = 1
+. 0 10001EE4 16
+. 38 E7 00 01 7D 20 38 30 7F 80 F0 40 41 9C FF F4
+
+. 0 10001F1C 20
+. 55 20 07 FE 55 6B 08 3C 7D 6B 03 78 55 29 F8 7E 42 00 FF F0
+
+. 0 10001F6C 12
+. 7F 8A 58 40 7C A9 2B 78 40 9C 00 28
+
+. 0 10001F78 28
+. 7C 1B 59 D6 7D A8 FC AE 54 00 18 38 7C 1F 04 AE 7C 08 FD AE 7D BF 05 AE 48 00 00 0C
+
+. 0 10001F94 16
+. 7D 69 58 50 55 29 F8 7E 7F 89 58 40 40 9D FF F4
+
+PASS: fft_real_bitreverse_order, n = 4
+. 0 10006308 12
+. 7F 8A 58 40 7C A9 2B 78 40 9C 00 28
+
+. 0 10006314 28
+. 7C 04 59 D6 7D A8 1C AE 54 00 18 38 7C 03 04 AE 7C 08 1D AE 7D A3 05 AE 48 00 00 0C
+
+. 0 10006330 16
+. 7D 69 58 50 55 29 F8 7E 7F 89 58 40 40 9D FF F4
+
+. 0 1001266C 52
+. 7C 0B DA 14 7D 2B E9 D6 7D 6B E2 14 7C 00 E9 D6 55 29 18 38 7C 09 FC AE 54 00 18 38 7D 9F 04 AE FD A0 60 28 FC 00 60 2A 7C 09 FD AE 7D BF 05 AE 42 00 FF D0
+
+. 0 10012654 16
+. 57 5A F8 7F 7F 9B E3 78 57 9C 08 3C 41 82 00 40
+
+. 0 100127D0 8
+. 2F 9A 00 00 41 9E 00 28
+
+. 0 100127D8 36
+. 7F 49 03 A6 7D 25 E0 50 7C 09 E9 D6 7D 29 E2 14 54 00 18 38 7C 1F 04 AE FC 00 00 50 7C 1F 05 AE 42 00 FF E8
+
+. 0 1000E634 80
+. 7D 88 63 78 7D 67 61 D6 38 A5 00 01 7C E5 30 50 7F 87 28 40 55 09 18 38 55 60 18 38 7C 09 1C AE 7D A3 04 AE 55 6B 20 36 55 00 20 36 7D 4B 22 14 FD 80 68 50 7D 20 22 14 7C 04 05 AE 7D 08 62 14 D9 A9 00 08 7C 0B 25 AE D9 8A 00 08 41 9D FF B8
+
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 4, stride = 1
+. 0 1000E2C4 8
+. 2F 9A 00 00 41 9E 00 4C
+
+. 0 1000E2CC 72
+. 7D 7B 2A 14 FD A0 D8 90 39 40 00 00 7F 49 03 A6 7C 0A 2A 14 7D 2B E9 D6 7D 4A E2 14 7D 6B E2 14 7C 00 E9 D6 55 29 18 38 54 00 18 38 7C 1F 04 AE FC 00 00 2A 7C 1F 05 AE 7C 09 FC AE FC 00 03 72 7C 09 FD AE 42 00 FF CC
+
+. 0 1000E160 8
+. 2F 9A 00 00 41 9E 00 40
+
+. 0 1000E170 52
+. 7C 0B DA 14 7D 2B E9 D6 7D 6B E2 14 7C 00 E9 D6 55 29 18 38 7C 09 FC AE 54 00 18 38 7D 9F 04 AE FD A0 60 28 FC 00 60 2A 7C 09 FD AE 7D BF 05 AE 42 00 FF D0
+
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 4, stride = 1
+PASS: fft_complex_bitreverse_order, n = 4
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 4, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 4, stride = 2
+PASS: fft_real_bitreverse_order, n = 4
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 4, stride = 2
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 4, stride = 2
+PASS: fft_complex_bitreverse_order, n = 4
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 4, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 4, stride = 3
+PASS: fft_real_bitreverse_order, n = 4
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 4, stride = 3
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 4, stride = 3
+PASS: fft_complex_bitreverse_order, n = 8
+. 0 FE0DFB0 20
+. 80 FE 0F C4 FC 00 08 90 CB A7 00 00 FF 01 E8 00 41 99 00 08
+
+. 0 FE0DFC4 192
+. FC 00 08 50 81 3E 0E 80 80 9E 0F C8 C8 C9 00 00 80 7E 0F CC FC 66 00 2A 83 FE 0F D0 83 BE 0F D4 C8 83 00 00 FC 43 30 28 D8 61 00 30 C8 64 00 00 CB DF 00 00 FD 80 10 28 C8 5D 00 00 81 9E 0F D8 80 E1 00 30 81 01 00 34 FD 4C 03 32 C8 AC 00 00 80 DE 0E 7C 55 0A 28 34 81 1E 0F DC 7D 6A 32 14 FC 2A 20 FA C9 6B 00 08 C9 2B 00 18 FD 0C 02 B2 FC EA 17 BA FD A1 2A BA C8 2B 00 10 FD 08 61 FA FC 0D 02 B2 7D 4A 34 AE FD 68 02 F2 FD A1 00 32 FC E9 58 28 FD 8A 02 32 FC 07 68 28 C9 A8 00 00 FD 40 60 28 FD 61 50 2A FD 21 58 28 FC 20 58 90 FD 0A 48 2A FC E8 5B 7A FC 87 58 00 41 86 01 0C
+
+. 0 FE0E3A4 24
+. C8 A6 00 00 38 A0 00 01 FC 00 30 90 FC 86 28 00 FC A0 10 90 41 85 00 10
+
+. 0 FE0E3BC 188
+. FC 00 30 50 38 A0 00 00 FC A0 10 50 81 1E 0E 80 81 7E 0F C8 CB A8 00 00 81 5E 0F CC FC 7D 00 2A C9 AB 00 00 C8 8A 00 00 80 9E 0F D8 FD 83 E8 28 D8 61 00 30 80 E1 00 30 81 01 00 34 81 3E 0F D4 FD 80 60 28 80 FE 0F D0 CB C4 00 00 55 00 28 34 C9 07 00 00 FD 4C 03 32 C8 E9 00 00 80 7E 0E 7C 7F E0 1A 14 FD 2A 23 7A FC 2A 3A 3A C8 FF 00 08 FD 69 F2 BA 7D 23 04 AE CB DF 00 18 FC 6C 02 B2 FF AB 02 B2 FC 03 28 7A C8 3F 00 10 FC 8C E9 7A CB A6 00 00 FD 40 60 2A FD A9 01 32 FD 0A 3F BA FD 68 68 28 FC 81 5A BA FC 29 20 2A FC 69 08 28 FD 44 18 2A FF 8A E8 00 40 9D 02 08
+
+. 0 FE0E67C 24
+. 83 FE 0F F8 83 BE 0F F0 C9 3F 00 00 C9 7D 00 00 FD 4A 5A 78 4B FF FD FC
+
+. 0 FE0E48C 12
+. FF C1 50 2A FF 1E 08 00 40 9A 05 20
+
+. 0 FE0E498 8
+. 2F 85 00 00 40 BE FC F0
+
+. 0 FE0E4A0 8
+. FC 20 08 50 4B FF FC E8
+
+. 0 FE0CCEC 200
+. 81 5E 0E 80 83 BE 0E 74 C9 4A 00 00 C9 7D 00 00 FC 2A 60 28 FC 60 58 50 FC 01 50 28 D8 21 00 30 81 61 00 30 81 81 00 34 FD 20 60 2A FD 43 48 28 FD 0A 02 B2 80 DE 0F 6C 80 BE 0F 70 55 80 28 34 80 9E 0F 78 80 7E 0F 74 C8 46 00 00 FD 2A 02 32 C8 E5 00 00 CB 64 00 00 CB 83 00 00 FF 48 38 BA 80 FE 0F 7C FF A8 E6 FA 81 1E 0E 7C C9 67 00 00 7D 20 42 14 7D A8 04 AE FC 3A 5A 3A C8 49 00 10 C8 89 00 18 FC C9 57 7A C9 49 00 08 81 9E 0F 90 FC 01 02 32 CB 4C 00 00 FC 66 02 B2 FD 82 00 32 FC A4 18 28 FF CD 01 B2 FC E5 60 28 FF 67 F0 28 FD 82 D8 2A FF 82 60 28 FF BB E0 2A FD 1D 66 BA FC 08 60 00 40 82 08 10
+
+. 0 FE0E9C4 24
+. C8 26 00 00 38 A0 00 01 FC 00 10 90 FC 82 08 00 FC C0 20 90 41 85 00 10
+
+. 0 FE0E9DC 188
+. FC 00 10 50 38 A0 00 00 FC C0 20 50 80 7E 0E 80 83 FE 0F C8 CB A3 00 00 83 BE 0F CC FD 3D 00 2A C9 1F 00 00 C8 FD 00 00 81 7E 0F D8 FD 89 E8 28 D9 21 00 30 80 E1 00 30 81 01 00 34 81 9E 0F D0 FD 80 60 28 55 00 28 34 81 1E 0F D4 C8 0B 00 00 FF CC 03 32 C8 2C 00 00 C9 48 00 00 81 5E 0E 7C FC BE 3A 3A 7C E0 52 14 FD 7E 50 7A C8 27 00 18 C9 47 00 08 FD A5 07 BA 7C AA 04 AE FD 2C 07 B2 FF AD 07 B2 FC E9 32 FA C9 27 00 10 FD 0C E9 BA CB A6 00 00 FC 07 60 2A FC C5 02 32 FF C0 50 7A FD 7E 30 28 FD A9 58 3A FC 25 68 2A FD 85 08 28 FD 4D 60 2A FF 0A E8 00 40 99 02 E8
+
+. 0 FE0ED7C 16
+. 80 9E 0F F8 C9 64 00 00 FC 0A 1A F8 4B FF FD 1C
+
+. 0 FE0EAA4 12
+. FC 61 00 2A FF 83 08 00 41 9E F9 EC
+
+. 0 FE0E6D8 8
+. 2F 9F 00 00 40 BE FC 78
+
+. 0 FE0E7AC 24
+. 81 7E 10 04 C8 2B 00 00 FC 0A 18 7A FC 6D 00 2A FF 03 68 00 40 9A 03 04
+
+. 0 FE0CE84 8
+. 2F 9F 00 00 40 9E 04 A8
+
+. 0 FE0CEA0 20
+. 80 DE 0F 98 D8 41 00 28 C8 C6 00 00 FC 8A 30 00 40 84 0A B8
+
+. 0 FE0D968 24
+. CB 5D 00 00 3B E0 00 01 FD A0 F0 90 FC 1E D0 00 FC C0 10 90 41 81 00 10
+
+. 0 FE0D98C 180
+. 81 7E 0E 80 80 BE 0F 6C C8 2B 00 00 80 9E 0F 70 FF 41 68 2A C8 E5 00 00 C9 24 00 00 80 7E 0F 7C DB 41 00 30 C9 61 00 30 80 FE 0F 78 FD 8B 08 28 80 DE 0F 74 C8 A3 00 00 C8 67 00 00 FF 4D 60 28 C9 A6 00 00 81 21 00 30 81 41 00 34 81 1E 0E 7C FD 5A 06 B2 55 5C 28 34 7F 7C 42 14 C9 7B 00 18 FF 6A 49 FA C8 FB 00 08 FD 0A 68 FA FF 9B 2A BA 7F 7C 44 AE FF BA 02 B2 FC 1C 02 B2 FC 3D 32 3A CB BB 00 10 C9 1D 00 00 FD 3A 01 BA FD A1 D0 2A FC 7B 02 72 FC AD 3A FA FF 85 18 28 FD 9D E3 7A FD BB 60 2A FC 1B 68 28 FD 4C 00 2A FE 0A 40 00 40 91 02 D8
+
+. 0 FE0DD14 16
+. 81 9E 0F B4 C8 0C 00 00 FC 0A 20 38 4B FF FD 2C
+
+. 0 FE0DA4C 12
+. FC 8D 00 2A FF 84 68 00 40 9E 00 58
+
+. 0 FE0DA58 12
+. 2F 9F 00 00 FC 20 68 90 40 BE EF F8
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 8, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 8, stride = 1
+. 0 FE0DFC8 188
+. 81 3E 0E 80 80 9E 0F C8 C8 C9 00 00 80 7E 0F CC FC 66 00 2A 83 FE 0F D0 83 BE 0F D4 C8 83 00 00 FC 43 30 28 D8 61 00 30 C8 64 00 00 CB DF 00 00 FD 80 10 28 C8 5D 00 00 81 9E 0F D8 80 E1 00 30 81 01 00 34 FD 4C 03 32 C8 AC 00 00 80 DE 0E 7C 55 0A 28 34 81 1E 0F DC 7D 6A 32 14 FC 2A 20 FA C9 6B 00 08 C9 2B 00 18 FD 0C 02 B2 FC EA 17 BA FD A1 2A BA C8 2B 00 10 FD 08 61 FA FC 0D 02 B2 7D 4A 34 AE FD 68 02 F2 FD A1 00 32 FC E9 58 28 FD 8A 02 32 FC 07 68 28 C9 A8 00 00 FD 40 60 28 FD 61 50 2A FD 21 58 28 FC 20 58 90 FD 0A 48 2A FC E8 5B 7A FC 87 58 00 41 86 01 0C
+
+. 0 FE0D980 192
+. FD A0 F0 50 3B E0 00 00 FC C0 10 50 81 7E 0E 80 80 BE 0F 6C C8 2B 00 00 80 9E 0F 70 FF 41 68 2A C8 E5 00 00 C9 24 00 00 80 7E 0F 7C DB 41 00 30 C9 61 00 30 80 FE 0F 78 FD 8B 08 28 80 DE 0F 74 C8 A3 00 00 C8 67 00 00 FF 4D 60 28 C9 A6 00 00 81 21 00 30 81 41 00 34 81 1E 0E 7C FD 5A 06 B2 55 5C 28 34 7F 7C 42 14 C9 7B 00 18 FF 6A 49 FA C8 FB 00 08 FD 0A 68 FA FF 9B 2A BA 7F 7C 44 AE FF BA 02 B2 FC 1C 02 B2 FC 3D 32 3A CB BB 00 10 C9 1D 00 00 FD 3A 01 BA FD A1 D0 2A FC 7B 02 72 FC AD 3A FA FF 85 18 28 FD 9D E3 7A FD BB 60 2A FC 1B 68 28 FD 4C 00 2A FE 0A 40 00 40 91 02 D8
+
+. 0 FE0DA64 8
+. FC 20 08 50 4B FF F3 58
+
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 8, stride = 1
+PASS: fft_real_bitreverse_order, n = 8
+. 0 100127E0 28
+. 7C 09 E9 D6 7D 29 E2 14 54 00 18 38 7C 1F 04 AE FC 00 00 50 7C 1F 05 AE 42 00 FF E8
+
+. 0 100126F4 44
+. FC 1C 07 B2 2F 9A 00 00 FD 7C F7 7A FD A4 07 B2 FD 84 07 72 FC 1D 00 28 FC AB 68 28 FC C0 60 28 FF C0 28 90 FF A0 30 90 41 9E 00 A0
+
+. 0 10012720 156
+. 7F 49 03 A6 7D 06 E0 50 38 E0 00 00 7D 48 E9 D6 7C 07 DA 14 7D 60 32 14 7C 06 00 50 7D 27 32 14 7D 6B E9 D6 55 4A 18 38 7D 4A FC AE 7C E7 E2 14 7D 08 E2 14 FD 66 02 B2 7C 00 E9 D6 55 6B 18 38 7C 0B FC AE FD 45 02 B2 FC E5 00 32 7D 29 E9 D6 54 00 18 38 7D 1F 04 AE FC 06 00 32 FD A8 58 28 55 29 18 38 FD 6B 40 2A 7D 29 FC AE FD AD 38 28 FD 89 00 28 FC 00 48 2A FD 6B 38 2A FD 8C 50 2A FC 00 50 28 FD A0 68 50 7C 09 FD AE 7D 6A FD AE 7D 9F 05 AE 7D AB FD AE 42 00 FF 74
+
+. 0 100127BC 12
+. 38 C6 00 01 7F 85 30 40 41 9D FF 30
+
+. 0 1000E638 76
+. 7D 67 61 D6 38 A5 00 01 7C E5 30 50 7F 87 28 40 55 09 18 38 55 60 18 38 7C 09 1C AE 7D A3 04 AE 55 6B 20 36 55 00 20 36 7D 4B 22 14 FD 80 68 50 7D 20 22 14 7C 04 05 AE 7D 08 62 14 D9 A9 00 08 7C 0B 25 AE D9 8A 00 08 41 9D FF B8
+
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 8, stride = 1
+. 0 1000E1F8 44
+. FC 1C 07 B2 2F 9A 00 00 FD 7C F7 7A FD A5 07 B2 FD 85 07 72 FC 1D 00 28 FC CB 68 28 FC E0 60 28 FF C0 30 90 FF A0 38 90 41 9E 00 90
+
+. 0 1000E224 140
+. 7F 49 03 A6 7D 06 E0 50 38 E0 00 00 7C 07 DA 14 7D 27 32 14 7D 40 32 14 7C 06 00 50 7D 4A E9 D6 7C E7 E2 14 7D 68 E9 D6 55 4A 18 38 7D 0A FC AE 7D 08 E2 14 FD 00 40 50 7D 29 E9 D6 55 6B 18 38 7C 0B FC AE FD 80 40 28 7C 00 E9 D6 55 29 18 38 7D 69 FC AE FC 00 40 2A 54 00 18 38 7D 3F 04 AE FD AB 48 28 FD 6B 48 2A FD 26 03 32 FD 46 03 72 7D 69 FD AE 7C 1F 05 AE FD A7 4B 78 FD 87 53 3A 7D AA FD AE 7D 8B FD AE 42 00 FF 84
+
+. 0 1000E2B0 12
+. 38 C6 00 01 7F 85 30 40 41 9D FF 40
+
+. 0 1000E2DC 56
+. 7C 0A 2A 14 7D 2B E9 D6 7D 4A E2 14 7D 6B E2 14 7C 00 E9 D6 55 29 18 38 54 00 18 38 7C 1F 04 AE FC 00 00 2A 7C 1F 05 AE 7C 09 FC AE FC 00 03 72 7C 09 FD AE 42 00 FF CC
+
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 8, stride = 1
+PASS: fft_complex_bitreverse_order, n = 8
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 8, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 8, stride = 2
+PASS: fft_real_bitreverse_order, n = 8
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 8, stride = 2
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 8, stride = 2
+PASS: fft_complex_bitreverse_order, n = 8
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 8, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 8, stride = 3
+PASS: fft_real_bitreverse_order, n = 8
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 8, stride = 3
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 8, stride = 3
+. 0 FEC184C 36
+. 7C C3 50 16 54 C0 E8 FE 1C A0 00 0A 2C 00 00 00 7D 25 18 50 7C 03 03 78 7D 69 40 AE 9D 64 FF FF 40 82 FF E0
+
+PASS: fft_complex_bitreverse_order, n = 16
+. 0 FE0E3C8 176
+. 81 1E 0E 80 81 7E 0F C8 CB A8 00 00 81 5E 0F CC FC 7D 00 2A C9 AB 00 00 C8 8A 00 00 80 9E 0F D8 FD 83 E8 28 D8 61 00 30 80 E1 00 30 81 01 00 34 81 3E 0F D4 FD 80 60 28 80 FE 0F D0 CB C4 00 00 55 00 28 34 C9 07 00 00 FD 4C 03 32 C8 E9 00 00 80 7E 0E 7C 7F E0 1A 14 FD 2A 23 7A FC 2A 3A 3A C8 FF 00 08 FD 69 F2 BA 7D 23 04 AE CB DF 00 18 FC 6C 02 B2 FF AB 02 B2 FC 03 28 7A C8 3F 00 10 FC 8C E9 7A CB A6 00 00 FD 40 60 2A FD A9 01 32 FD 0A 3F BA FD 68 68 28 FC 81 5A BA FC 29 20 2A FC 69 08 28 FD 44 18 2A FF 8A E8 00 40 9D 02 08
+
+. 0 FE0EA98 24
+. 80 DE 0F F8 C8 C6 00 00 FC 0A 19 BA FC 61 00 2A FF 83 08 00 41 9E F9 EC
+
+. 0 FE0D418 32
+. 83 7E 0F 80 C8 7B 00 00 FC 0A 20 FA FC 8D 00 2A 57 E0 F8 7F FC 20 68 90 FF 04 68 00 41 9A 03 90
+
+. 0 FE0E9E8 176
+. 80 7E 0E 80 83 FE 0F C8 CB A3 00 00 83 BE 0F CC FD 3D 00 2A C9 1F 00 00 C8 FD 00 00 81 7E 0F D8 FD 89 E8 28 D9 21 00 30 80 E1 00 30 81 01 00 34 81 9E 0F D0 FD 80 60 28 55 00 28 34 81 1E 0F D4 C8 0B 00 00 FF CC 03 32 C8 2C 00 00 C9 48 00 00 81 5E 0E 7C FC BE 3A 3A 7C E0 52 14 FD 7E 50 7A C8 27 00 18 C9 47 00 08 FD A5 07 BA 7C AA 04 AE FD 2C 07 B2 FF AD 07 B2 FC E9 32 FA C9 27 00 10 FD 0C E9 BA CB A6 00 00 FC 07 60 2A FC C5 02 32 FF C0 50 7A FD 7E 30 28 FD A9 58 3A FC 25 68 2A FD 85 08 28 FD 4D 60 2A FF 0A E8 00 40 99 02 E8
+
+. 0 FE0DA40 24
+. 83 1E 0F B4 C8 D8 00 00 FC 0A 21 BA FC 8D 00 2A FF 84 68 00 40 9E 00 58
+
+. 0 FE0C910 108
+. FC E1 00 72 83 1E 0E 70 81 5E 0E 6C CB 98 00 00 CB AA 00 00 81 1E 0E 68 FD 47 00 72 80 FE 0E 64 FF 5C E9 FA CB 68 00 00 C9 27 00 00 81 7E 0E 60 81 3E 0F 54 FC DA D9 FA C8 AB 00 00 C9 A9 00 00 FD 06 49 FA FC C8 01 F2 FC 26 28 2A FD 81 02 B2 FD 7F 60 2A FC 9F 58 28 FC 20 58 90 FC 64 60 2A FC 43 5B 7A FC 82 58 00 41 86 00 E0
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 16, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 16, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 16, stride = 1
+PASS: fft_real_bitreverse_order, n = 16
+. 0 1001272C 144
+. 7D 48 E9 D6 7C 07 DA 14 7D 60 32 14 7C 06 00 50 7D 27 32 14 7D 6B E9 D6 55 4A 18 38 7D 4A FC AE 7C E7 E2 14 7D 08 E2 14 FD 66 02 B2 7C 00 E9 D6 55 6B 18 38 7C 0B FC AE FD 45 02 B2 FC E5 00 32 7D 29 E9 D6 54 00 18 38 7D 1F 04 AE FC 06 00 32 FD A8 58 28 55 29 18 38 FD 6B 40 2A 7D 29 FC AE FD AD 38 28 FD 89 00 28 FC 00 48 2A FD 6B 38 2A FD 8C 50 2A FC 00 50 28 FD A0 68 50 7C 09 FD AE 7D 6A FD AE 7D 9F 05 AE 7D AB FD AE 42 00 FF 74
+
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 16, stride = 1
+. 0 1000E230 128
+. 7C 07 DA 14 7D 27 32 14 7D 40 32 14 7C 06 00 50 7D 4A E9 D6 7C E7 E2 14 7D 68 E9 D6 55 4A 18 38 7D 0A FC AE 7D 08 E2 14 FD 00 40 50 7D 29 E9 D6 55 6B 18 38 7C 0B FC AE FD 80 40 28 7C 00 E9 D6 55 29 18 38 7D 69 FC AE FC 00 40 2A 54 00 18 38 7D 3F 04 AE FD AB 48 28 FD 6B 48 2A FD 26 03 32 FD 46 03 72 7D 69 FD AE 7C 1F 05 AE FD A7 4B 78 FD 87 53 3A 7D AA FD AE 7D 8B FD AE 42 00 FF 84
+
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 16, stride = 1
+. 0 FEEF788 16
+. 57 3A D1 BE 2B 1A 00 20 3B 1A 00 38 41 99 01 8C
+
+. 0 FEEF798 8
+. 71 40 00 01 41 82 01 CC
+
+. 0 FEEF97C 24
+. 57 15 18 38 7F F5 E2 14 39 3F 00 30 83 A9 00 0C 7F 9D 48 00 41 9E 00 14
+
+PASS: fft_complex_bitreverse_order, n = 16
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 16, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 16, stride = 2
+PASS: fft_real_bitreverse_order, n = 16
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 16, stride = 2
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 16, stride = 2
+PASS: fft_complex_bitreverse_order, n = 16
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 16, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 16, stride = 3
+PASS: fft_real_bitreverse_order, n = 16
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 16, stride = 3
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 16, stride = 3
+PASS: fft_complex_bitreverse_order, n = 32
+. 0 FE0E478 32
+. 81 9E 0F F8 80 DE 0F F0 C8 0C 00 00 C8 A6 00 00 FD 4A 28 3A FF C1 50 2A FF 1E 08 00 40 9A 05 20
+
+. 0 FE0DAAC 20
+. CB BD 00 00 FF 80 10 90 FC A0 F0 90 FF 9E E8 00 41 9D 00 08
+
+. 0 FE0DAC0 36
+. FC A0 F0 50 C9 4B 00 00 FC 6A 28 2A FD 03 50 28 D8 61 00 30 81 21 00 30 81 41 00 34 FC 25 40 28 41 9D 00 08
+
+. 0 FE0DAE4 192
+. FF 80 10 50 FC A1 00 72 C8 06 00 00 C8 87 00 00 55 40 28 34 83 FE 0F 84 7C C0 42 14 C8 66 00 10 FF 65 01 3A C9 5F 00 00 C9 06 00 18 FC 41 01 72 C8 E6 00 08 C9 85 00 00 FD A3 50 2A CB 44 00 00 FD 21 50 2A C8 03 00 00 FC C2 06 F2 FC 4D 50 28 FF 65 D3 3A FD 66 3A 3A FC 83 10 28 FD 29 50 28 FC E3 59 BA 7C C8 04 AE FF 44 40 2A FD 61 48 28 FD 9B 01 7A FD 1A 38 7A FD 46 00 72 FD 22 02 72 FC 6B E0 2A FF 6C 01 72 FC 02 40 FA FC 8A 07 32 FC E9 30 2A FF 46 06 F2 FD 60 20 28 FC 26 38 28 FD AB D0 28 FD 81 48 2A FC AD 60 2A FC 27 28 2A FC 47 08 28 FC 85 10 2A FC 84 E8 00 40 85 02 14
+
+. 0 FE0DDB4 20
+. 80 7E 0F 88 FC 1F E8 00 C9 03 00 00 FC 84 02 32 40 81 00 F0
+
+. 0 FE0DEB4 8
+. 81 3E 0F B8 4B FF FF 14
+
+. 0 FE0DDCC 16
+. C9 69 00 00 FC 7F 02 F2 FC 04 18 28 4B FF FD EC
+
+. 0 FE0DBC4 12
+. FC C1 00 2A FE 06 08 00 41 B2 F5 78
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 32, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 32, stride = 1
+. 0 FE0DAC4 32
+. C9 4B 00 00 FC 6A 28 2A FD 03 50 28 D8 61 00 30 81 21 00 30 81 41 00 34 FC 25 40 28 41 9D 00 08
+
+. 0 FE0DAE8 188
+. FC A1 00 72 C8 06 00 00 C8 87 00 00 55 40 28 34 83 FE 0F 84 7C C0 42 14 C8 66 00 10 FF 65 01 3A C9 5F 00 00 C9 06 00 18 FC 41 01 72 C8 E6 00 08 C9 85 00 00 FD A3 50 2A CB 44 00 00 FD 21 50 2A C8 03 00 00 FC C2 06 F2 FC 4D 50 28 FF 65 D3 3A FD 66 3A 3A FC 83 10 28 FD 29 50 28 FC E3 59 BA 7C C8 04 AE FF 44 40 2A FD 61 48 28 FD 9B 01 7A FD 1A 38 7A FD 46 00 72 FD 22 02 72 FC 6B E0 2A FF 6C 01 72 FC 02 40 FA FC 8A 07 32 FC E9 30 2A FF 46 06 F2 FD 60 20 28 FC 26 38 28 FD AB D0 28 FD 81 48 2A FC AD 60 2A FC 27 28 2A FC 47 08 28 FC 85 10 2A FC 84 E8 00 40 85 02 14
+
+. 0 FE0DDC8 20
+. 81 3E 0F 9C C9 69 00 00 FC 7F 02 F2 FC 04 18 28 4B FF FD EC
+
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 32, stride = 1
+PASS: fft_real_bitreverse_order, n = 32
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 32, stride = 1
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 32, stride = 1
+PASS: fft_complex_bitreverse_order, n = 32
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 32, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 32, stride = 2
+PASS: fft_real_bitreverse_order, n = 32
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 32, stride = 2
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 32, stride = 2
+PASS: fft_complex_bitreverse_order, n = 32
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 32, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 32, stride = 3
+PASS: fft_real_bitreverse_order, n = 32
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 32, stride = 3
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 32, stride = 3
+PASS: fft_complex_bitreverse_order, n = 64
+. 0 FE0D5C0 16
+. C9 9F 00 00 FD A0 F8 90 FC 9F 60 00 41 85 00 08
+
+. 0 FE0D5D0 24
+. FD A0 F8 50 CB DC 00 00 FC FE 68 28 FF 07 60 00 4F 59 D3 82 41 9A 01 EC
+
+. 0 FE0D7D0 36
+. C8 CA 00 00 C9 5D 00 00 FD 86 38 2A FC AC 30 28 D9 81 00 30 81 41 00 30 81 61 00 34 FC 47 28 28 4B FF FE 20
+
+. 0 FE0D610 200
+. FD 22 00 B2 C8 E5 00 00 CB C6 00 00 55 6B 28 34 CB 87 00 00 7C AB 42 14 80 FE 0F 84 FF A9 3F BA C8 83 00 00 CB 47 00 00 C8 C4 00 00 FF 62 D0 2A C8 65 00 08 FD 7D E2 7A 7F AB 44 AE 80 7E 0F 88 FD 9D D0 2A FD 1B D0 28 FC 2B 02 72 C9 65 00 18 FF CC D0 28 FC 02 40 28 FD A2 0A BA C8 25 00 10 FC E0 50 2A FC BD F0 28 FF 81 03 72 FF 69 21 BA FF 42 02 72 FC 9E 02 32 FD 45 18 2A FD 9E 01 F2 FC 0B E0 28 FD 3A 06 F2 CB 43 00 00 FC 6A 00 B2 FF 81 20 28 FD A0 60 28 FC BD 02 72 FF C1 E0 28 FC CD 18 28 FC FE 20 28 FC 46 28 28 FF 62 38 2A FC 3C D8 2A FD 7C 08 28 FF BB 58 2A FD 5D 0E BA FE 0A 08 00 41 92 00 68
+
+. 0 FE0D73C 12
+. C8 1F 00 00 FF 9F 00 00 41 BD F3 14
+
+. 0 FE0D748 4
+. 4B FF FA 08
+
+. 0 FE0E9B4 12
+. FC 20 30 90 FC 60 F8 90 4B FF D4 79
+
+. 0 FE0BE34 12
+. 94 21 FF B0 7C 08 02 A6 48 06 72 75
+
+. 0 FE0BE40 56
+. 93 C1 00 30 7F C8 02 A6 93 E1 00 34 DB C1 00 40 FF C0 08 90 DB A1 00 38 FF A0 18 90 83 FE 0E B8 DB E1 00 48 FC A0 F0 90 C8 3F 00 00 90 01 00 54 FF 9E 08 00 41 9D 00 08
+
+. 0 FE0BE78 40
+. FC A0 F0 50 81 3E 0E 80 C8 69 00 00 FD A3 28 2A FC 0D 18 28 D9 A1 00 20 80 E1 00 20 81 01 00 24 FC 65 00 28 41 9D 00 08
+
+. 0 FE0BEA0 216
+. FC 40 10 50 FC A3 00 F2 81 3E 0E BC 81 9E 0E C0 55 04 28 34 CB E9 00 00 C9 AC 00 00 81 5E 0E 7C FC E3 01 72 80 FE 0E C4 FC 85 6F FA 7C C4 52 14 C9 47 00 00 CB E6 00 10 C9 86 00 08 FD 23 50 2A FD 07 01 32 C8 86 00 18 80 BE 0E C8 FD 7F 50 2A 81 7E 0E CC C8 C5 00 00 FD 29 50 28 FD A8 61 3A C8 0B 00 00 80 7E 0E D0 FD 6B 50 28 FD 85 01 BA C8 03 00 00 FC FF 58 28 FC DF 6A 3A 7D A4 54 AE FD 47 20 2A FD 03 48 28 FF EC 01 7A FC EA 30 FA FC 8D 00 F2 FD 2B 02 72 FC C8 10 2A FC 7F 01 72 FC 0B 39 BA FD 44 00 B2 FC A9 68 2A FD 8D 00 F2 FD 0D 28 28 FF E0 50 28 FC E8 48 2A FC DF 60 28 FD 66 38 2A FD A5 58 2A FC 85 68 28 FC AB 20 2A FF 85 08 00 40 9D 00 68
+
+. 0 FE0BF78 20
+. FC 1D 08 00 81 1E 0E D4 C8 28 00 00 FC A5 00 72 40 81 00 D8
+
+. 0 FE0C060 16
+. 81 3E 0E DC C8 69 00 00 FC 1D 28 FA 4B FF FF 2C
+
+. 0 FE0BF98 12
+. FC AD 00 2A FF 05 68 00 40 9A 00 6C
+
+. 0 FE0BFA4 16
+. CB BF 00 00 FC 20 68 90 FC 1E E8 00 41 81 00 08
+
+. 0 FE0BFB4 40
+. FC 20 68 50 83 E1 00 54 83 C1 00 30 7F E8 03 A6 CB A1 00 38 83 E1 00 34 CB C1 00 40 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 FE0E9C0 4
+. 4B FF F7 CC
+
+. 0 FE0EAB0 16
+. FC 20 10 90 FC 60 F8 90 FC 40 20 90 4B FF D3 79
+
+. 0 FE0BFDC 20
+. 80 7E 0E D4 FC 9D 08 00 C9 03 00 00 FC A5 02 32 41 85 00 6C
+
+. 0 FE0BFF0 28
+. 81 3E 0E DC C9 49 00 00 FD 3D 02 B2 FC 05 48 28 FC AD 00 2A FF 05 68 00 41 BA FF 9C
+
+. 0 FE0EAC0 4
+. 4B FF F6 CC
+
+. 0 FE0ED6C 16
+. 81 3E 0F DC C8 E9 00 00 FC 0D 19 F8 4B FF FC 1C
+
+. 0 FE0E994 12
+. FC 61 00 2A FC 03 08 00 41 82 F7 F0
+
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 64, stride = 1
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 64, stride = 1
+. 0 FE0D5D4 20
+. CB DC 00 00 FC FE 68 28 FF 07 60 00 4F 59 D3 82 41 9A 01 EC
+
+. 0 FE0BF8C 24
+. 81 3E 0E D8 C8 69 00 00 FC 1D 28 FA FC AD 00 2A FF 05 68 00 40 9A 00 6C
+
+. 0 FE0C058 8
+. 81 3E 0E D8 4B FF FF 98
+
+. 0 FE0BFF4 24
+. C9 49 00 00 FD 3D 02 B2 FC 05 48 28 FC AD 00 2A FF 05 68 00 41 BA FF 9C
+
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 64, stride = 1
+PASS: fft_real_bitreverse_order, n = 64
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 64, stride = 1
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 64, stride = 1
+PASS: fft_complex_bitreverse_order, n = 64
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 64, stride = 2
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 64, stride = 2
+PASS: fft_real_bitreverse_order, n = 64
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 64, stride = 2
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 64, stride = 2
+. 0 FEEF920 16
+. 57 3B BA 7E 28 1B 00 14 3B 1B 00 5B 40 A1 FE 6C
+
+PASS: fft_complex_bitreverse_order, n = 64
+PASS: gsl_fft_complex_radix2_forward with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_forward avoids unstrided data, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_inverse with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_inverse other data untouched, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_backward with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_backward avoids unstrided data, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_pulse, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_constant, n = 64, stride = 3
+PASS: gsl_fft_complex_radix2_forward with signal_exp, n = 64, stride = 3
+PASS: fft_real_bitreverse_order, n = 64
+PASS: gsl_fft_real_radix2 with signal_real_noise, n = 64, stride = 3
+PASS: gsl_fft_halfcomplex_radix2 with data from signal_noise, n = 64, stride = 3
+. 0 100046EC 12
+. 7F 9F E3 78 7F 9F F0 40 41 9D 00 50
+
+. 0 100046F8 16
+. 3B A0 00 01 7F A3 EB 78 7F E4 FB 78 4B FF C5 E1
+
+. 0 10000CE4 108
+. 94 21 FF B0 7C 08 02 A6 93 81 00 38 7F 84 19 D6 93 A1 00 3C 93 41 00 30 7C 7A 1B 78 93 E1 00 44 3B E0 00 00 7D 80 00 26 57 9D 20 36 7F A3 EB 78 90 01 00 54 91 81 00 10 57 9C 08 3C 92 E1 00 24 93 01 00 28 93 21 00 2C 93 61 00 34 93 C1 00 40 7C 9E 23 78 DB E1 00 48 92 61 00 14 92 81 00 18 92 A1 00 1C 92 C1 00 20 48 02 81 E9
+
+. 0 10000D50 12
+. 7C 7B 1B 78 7F A3 EB 78 48 02 81 DD
+
+. 0 10000D5C 12
+. 7C 79 1B 78 7F A3 EB 78 48 02 81 D1
+
+. 0 10000D68 12
+. 7C 78 1B 78 7F A3 EB 78 48 02 81 C5
+
+. 0 10000D74 12
+. 2B 9C 00 00 7C 77 1B 78 40 9D 00 68
+
+. 0 10000D80 100
+. 3D 60 10 02 3D 20 10 02 C9 0B 81 00 3D 60 10 02 39 6B 80 F8 C8 E9 80 E8 7F 89 03 A6 3D 20 10 02 C9 4B 00 00 C9 29 81 08 3D 20 43 30 93 E1 00 0C 57 E0 18 38 91 21 00 08 3B FF 00 01 C8 01 00 08 FC 00 50 28 FD A0 48 2A 7C 1B 05 AE FD 80 38 2A FD 60 40 2A 7D B7 05 AE 7D 99 05 AE 7D 78 05 AE 42 00 FF CC
+
+. 0 10000DAC 56
+. 93 E1 00 0C 57 E0 18 38 91 21 00 08 3B FF 00 01 C8 01 00 08 FC 00 50 28 FD A0 48 2A 7C 1B 05 AE FD 80 38 2A FD 60 40 2A 7D B7 05 AE 7D 99 05 AE 7D 78 05 AE 42 00 FF CC
+
+. 0 10000DE4 8
+. 38 60 00 00 48 01 46 0D
+
+. 0 10000DEC 8
+. 7F C3 F3 78 48 00 5A 51
+
+. 0 10006840 128
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 60 7F C8 02 A6 92 E1 00 44 7C 77 1B 79 90 01 00 94 DB 61 00 68 80 1E FF F0 DB 81 00 70 DB A1 00 78 7F C0 F2 14 DB C1 00 80 DB E1 00 88 91 C1 00 20 91 E1 00 24 92 01 00 28 92 21 00 2C 92 41 00 30 92 61 00 34 92 81 00 38 92 A1 00 3C 92 C1 00 40 93 01 00 48 93 21 00 4C 93 41 00 50 93 61 00 54 93 81 00 58 93 A1 00 5C 93 E1 00 64 41 82 01 DC
+
+. 0 100068C0 8
+. 38 60 02 0C 48 02 26 71
+
+. 0 100068C8 12
+. 7C 69 1B 79 91 21 00 14 41 82 02 44
+
+. 0 100068D4 8
+. 56 E3 20 36 48 02 26 5D
+
+. 0 100068DC 16
+. 81 21 00 14 2F 83 00 00 90 69 02 08 41 9E 02 48
+
+. 0 100068EC 24
+. 81 21 00 14 7E E3 BB 78 38 81 00 10 92 E9 00 00 38 A9 00 08 4B FF FE 69
+
+. 0 10006768 116
+. 94 21 FF C0 7C 08 02 A6 42 9F 00 05 93 C1 00 38 7F C8 02 A6 93 81 00 30 90 01 00 44 7C A6 2B 78 93 A1 00 34 7C 85 23 78 80 1E FF F0 38 81 00 10 7F C0 F2 14 81 3E 80 10 83 89 00 18 80 09 00 00 83 A9 00 14 81 69 00 04 81 49 00 08 81 09 00 0C 80 E9 00 10 90 01 00 10 93 A1 00 24 93 81 00 28 91 61 00 14 91 41 00 18 91 01 00 1C 90 E1 00 20 4B FF FC A1
+
+. 0 10006478 60
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 2F 83 00 00 90 01 00 14 38 E0 00 00 93 E1 00 0C 7C 68 1B 78 80 1E FF F0 7C BF 2B 78 38 A0 00 00 7F C0 F2 14 41 9E 01 7C
+
+. 0 100064B4 8
+. 2F 83 00 01 41 9E 01 4C
+
+. 0 10006604 40
+. 90 66 00 00 38 00 00 00 90 7F 00 00 7C 03 03 78 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 100067DC 28
+. 80 01 00 44 83 81 00 30 83 A1 00 34 7C 08 03 A6 83 C1 00 38 38 21 00 40 4E 80 00 20
+
+. 0 10006904 8
+. 2F 83 00 00 40 9E 01 DC
+
+. 0 1000690C 68
+. 81 61 00 10 3C 00 43 30 81 21 00 14 3A A0 00 00 7F 95 58 40 39 C0 00 01 91 69 00 04 81 3E 80 20 90 01 00 08 92 E1 00 0C C9 89 00 00 C9 A1 00 08 81 3E 80 24 FD AD 60 28 C8 09 00 00 FF A0 68 24 40 9C 00 DC
+
+. 0 10006950 60
+. 81 21 00 14 FF 60 60 90 7D 6F 5B 78 3B 40 00 00 83 09 02 08 7D 30 4B 78 7F 1B C3 78 82 50 00 08 3A 60 00 01 7D D1 73 78 93 70 01 08 7D CE 91 D6 7F 93 90 40 7E D7 73 96 40 9C 00 8C
+
+. 0 10006A14 12
+. 35 EF FF FF 3A 10 00 04 40 82 FF 50
+
+. 0 10006A20 8
+. 7F 95 B8 40 41 9D 00 90
+
+. 0 10006A28 112
+. 80 61 00 14 80 01 00 94 81 C1 00 20 81 E1 00 24 7C 08 03 A6 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10000DF4 36
+. 3C 80 10 01 7F 46 D3 78 38 84 68 DC 7F C5 F3 78 7C 76 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 47 D5
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 1, stride = 1
+. 0 10000E18 8
+. 7F C3 F3 78 48 00 5D 3D
+
+. 0 10006B58 60
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 79 90 01 00 24 93 E1 00 1C 38 60 00 08 80 1E FF F0 38 A0 00 7B 38 C0 00 01 7F C0 F2 14 41 82 00 5C
+
+. 0 10006B94 4
+. 48 02 23 A1
+
+. 0 10006B98 20
+. 38 A0 00 83 7C 7F 1B 79 38 C0 00 08 57 A3 20 36 41 82 00 74
+
+. 0 10006BAC 8
+. 93 BF 00 00 48 02 23 85
+
+. 0 10006BB4 24
+. 2F 83 00 00 7C 60 1B 78 90 1F 00 04 7F E3 FB 78 7F E0 FB 78 41 9E 00 68
+
+. 0 10006BCC 32
+. 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10000E20 32
+. 3C 80 10 01 38 84 69 14 7F C5 F3 78 7C 7C 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 47 AD
+
+PASS: gsl_fft_complex_workspace_alloc, n = 1
+. 0 10000E40 20
+. 7F C3 F3 78 7F 44 D3 78 7F 65 DB 78 7F 06 C3 78 48 00 41 C9
+
+. 0 10000E54 8
+. 2F 9E 00 00 41 9E 00 30
+
+. 0 10000E5C 44
+. 7F C9 03 A6 38 00 00 00 57 4B 20 36 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 CA 14 7C 19 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+. 0 10000E88 24
+. 7F 63 DB 78 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 48 00 80 21
+
+. 0 10008EBC 20
+. 7C 08 02 A6 94 21 FF F0 39 00 FF FF 90 01 00 14 4B FF DF 0D
+
+. 0 10006DD8 232
+. 94 21 FD 80 7C 08 02 A6 42 9F 00 05 D9 C1 01 F0 D9 E1 01 F8 7D 80 00 26 DA 01 02 00 39 20 00 00 DA 21 02 08 DA 41 02 10 DA 61 02 18 DA 81 02 20 DA A1 02 28 DA C1 02 30 DA E1 02 38 DB 01 02 40 DB 21 02 48 DB 41 02 50 DB 61 02 58 DB 81 02 60 DB A1 02 68 DB C1 02 70 DB E1 02 78 91 C1 01 A8 91 E1 01 AC 92 01 01 B0 92 21 01 B4 92 41 01 B8 92 61 01 BC 92 81 01 C0 92 A1 01 C4 92 C1 01 C8 92 E1 01 CC 93 01 01 D0 93 21 01 D4 93 41 01 D8 93 61 01 DC 93 81 01 E0 93 C1 01 E8 7F C8 02 A6 90 01 02 84 81 67 00 04 80 1E FF F0 91 01 00 1C 39 00 00 01 7F C0 F2 14 7C A0 2B 79 93 A1 01 E4 93 E1 01 EC 91 81 01 A4 90 01 00 18 90 61 00 10 90 81 00 14 81 46 00 04 91 01 00 24 91 21 00 28 91 61 00 2C 41 82 0D 70
+
+. 0 10006EC0 16
+. 80 01 00 18 38 60 00 00 2F 80 00 01 41 9E 06 D8
+
+. 0 100075A4 168
+. 80 01 02 84 81 81 01 A4 81 C1 01 A8 7C 08 03 A6 81 E1 01 AC 7D 81 81 20 82 01 01 B0 82 21 01 B4 82 41 01 B8 82 61 01 BC 82 81 01 C0 82 A1 01 C4 82 C1 01 C8 82 E1 01 CC 83 01 01 D0 83 21 01 D4 83 41 01 D8 83 61 01 DC 83 81 01 E0 83 A1 01 E4 83 C1 01 E8 83 E1 01 EC C9 C1 01 F0 C9 E1 01 F8 CA 01 02 00 CA 21 02 08 CA 41 02 10 CA 61 02 18 CA 81 02 20 CA A1 02 28 CA C1 02 30 CA E1 02 38 CB 01 02 40 CB 21 02 48 CB 41 02 50 CB 61 02 58 CB 81 02 60 CB A1 02 68 CB C1 02 70 CB E1 02 78 38 21 02 80 4E 80 00 20
+
+. 0 10008ED0 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10000EA0 8
+. 2F 9E 00 00 41 9E 00 30
+
+. 0 10000EA8 44
+. 7F C9 03 A6 38 00 00 00 57 4B 20 36 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 BA 14 7C 17 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+. 0 10000ED4 52
+. 3E 60 10 02 2A 1A 00 01 CB F3 81 10 3C A0 10 01 3C 60 10 01 38 A5 69 3C FC 20 F8 90 7F 04 C3 78 7F 66 DB 78 38 63 69 4C 7F 47 D3 78 7F C8 F3 78 4B FF F9 15
+
+. 0 10000F08 24
+. 3C 80 10 01 38 84 69 50 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 46 CD
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 1, stride = 1
+. 0 10000F20 4
+. 41 91 03 A8
+
+. 0 10000F24 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 7E 99
+
+. 0 10008DD0 64
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 39 00 00 01 7C 7F 1B 78 93 81 00 10 80 1E FF F0 7C 9C 23 78 93 A1 00 14 7C BD 2B 78 7F C0 F2 14 4B FF DF CD
+
+. 0 10008E10 8
+. 2C 03 00 00 40 82 00 64
+
+. 0 10008E18 48
+. 81 3E 80 74 3C 00 43 30 90 01 00 08 7F 83 E8 40 93 A1 00 0C C8 09 00 00 C9 A1 00 08 81 3E 80 78 FD AD 00 28 C8 09 00 00 FD 80 68 24 40 9C 00 34
+
+. 0 10008E48 48
+. 7F A9 03 A6 57 84 20 36 38 00 00 00 7C 1F 04 AE 7D 20 FA 14 FC 00 03 32 7C 1F 05 AE 7C 00 22 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+. 0 10008E78 32
+. 80 01 00 24 83 81 00 10 83 A1 00 14 7C 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10000F3C 40
+. FC 20 F8 90 3F E0 10 01 3C A0 10 01 38 A5 69 98 7F 24 CB 78 7F 66 DB 78 38 7F 69 90 7F 47 D3 78 7F C8 F3 78 4B FF F8 B9
+
+. 0 10000F64 24
+. 3C 80 10 01 38 84 69 A4 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 46 71
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 1, stride = 1
+. 0 10000F7C 4
+. 41 91 03 1C
+
+. 0 10000F80 24
+. 7E E3 BB 78 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 48 00 7F 05
+
+. 0 10008E98 20
+. 7C 08 02 A6 94 21 FF F0 39 00 00 01 90 01 00 14 4B FF DF 31
+
+. 0 10008EAC 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10000F98 8
+. 2F 9E 00 00 41 9E 00 54
+
+. 0 10000FA0 80
+. 3C 00 43 30 3D 20 10 02 90 01 00 08 39 29 80 F8 93 C1 00 0C 7F C9 03 A6 C9 A1 00 08 38 00 00 00 C8 09 00 00 57 4B 20 36 FD 8D 00 28 7C 19 04 AE 7D 20 CA 14 FC 00 03 32 7C 19 05 AE 7C 00 5A 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+. 0 10000FF0 40
+. CB F3 81 10 3C A0 10 01 38 A5 69 E4 7F 24 CB 78 FC 20 F8 90 7E E6 BB 78 7F 47 D3 78 7F C8 F3 78 38 7F 69 90 4B FF F8 05
+
+. 0 10001018 24
+. 3C 80 10 01 38 84 69 F4 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 45 BD
+
+PASS: gsl_fft_complex_backward with signal_noise, n = 1, stride = 1
+. 0 10001030 8
+. 2B 9A 00 01 41 9D 02 34
+
+. 0 10001038 52
+. 3D 20 10 02 3D 60 10 02 39 29 80 F0 39 6B 81 18 C8 49 00 00 7F C4 F3 78 C8 2B 00 00 7F 45 D3 78 7F 66 DB 78 7F 07 C3 78 38 60 00 01 3E A0 10 01 48 00 37 81
+
+. 0 1000106C 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 7E 3D
+
+. 0 10001084 40
+. FC 20 F8 90 3C A0 10 01 7F C8 F3 78 7F 47 D3 78 38 A5 6A 40 7F 04 C3 78 7F 66 DB 78 38 75 6A 34 3B A0 00 00 4B FF F7 71
+
+. 0 100010AC 24
+. 3C 80 10 01 38 84 6A 50 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 45 29
+
+PASS: gsl_fft_complex_forward with signal_pulse, n = 1, stride = 1
+. 0 100010C4 44
+. 3D 20 10 02 3D 60 10 02 39 29 80 F0 39 6B 81 18 C8 49 00 00 C8 2B 00 00 7F 44 D3 78 7F 65 DB 78 7F 06 C3 78 7F C3 F3 78 48 00 38 D5
+
+. 0 100010F0 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 7D B9
+
+. 0 10001108 36
+. FC 20 F8 90 3C A0 10 01 7F 04 C3 78 38 A5 6A 90 7F 66 DB 78 7F 47 D3 78 7F C8 F3 78 38 75 6A 34 4B FF F6 F1
+
+. 0 1000112C 24
+. 3C 80 10 01 38 84 6A A0 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 44 A9
+
+PASS: gsl_fft_complex_forward with signal_constant, n = 1, stride = 1
+. 0 10001144 12
+. 7F 9D F0 40 3B E0 00 00 40 9C 00 80
+
+. 0 10001150 52
+. 3E 80 10 01 3D 20 10 02 3D 60 10 02 39 29 81 18 39 6B 80 F0 C8 29 00 00 7F E3 FB 78 C8 4B 00 00 7F C4 F3 78 7F 45 D3 78 7F 66 DB 78 7F 07 C3 78 48 00 39 31
+
+. 0 10001184 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 7D 25
+
+. 0 1000119C 36
+. C8 33 81 10 3B FF 00 01 38 75 6A 34 7F 04 C3 78 38 B4 6A E4 7F 66 DB 78 7F 47 D3 78 7F C8 F3 78 4B FF F6 5D
+
+. 0 100011C0 12
+. 7F 9F F0 40 7F BD 1B 78 41 9C FF 8C
+
+. 0 100011CC 28
+. 3C 80 10 01 7F C5 F3 78 7F 46 D3 78 38 84 6A F0 7F A3 EB 78 4C C6 31 82 48 01 44 05
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 1, stride = 1
+. 0 100011E8 8
+. 7E C3 B3 78 48 00 5A 65
+
+. 0 10006C50 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 02 08 48 02 23 19
+
+. 0 10006C70 16
+. 38 00 00 00 90 1D 02 08 7F A3 EB 78 48 02 23 09
+
+. 0 10006C80 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 100011F0 8
+. 7F 83 E3 78 48 00 5A A5
+
+. 0 10006C98 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 00 04 48 02 22 D1
+
+. 0 10006CB8 16
+. 38 00 00 00 90 1D 00 04 7F A3 EB 78 48 02 22 C1
+
+. 0 10006CC8 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 100011F8 8
+. 7F 63 DB 78 48 02 7D 89
+
+. 0 10001200 8
+. 7F 23 CB 78 48 02 7D 81
+
+. 0 10001208 8
+. 7F 03 C3 78 48 02 7D 79
+
+. 0 10001210 8
+. 7E E3 BB 78 48 02 7D 71
+
+. 0 10001218 80
+. 80 01 00 54 81 81 00 10 82 61 00 14 7C 08 03 A6 82 81 00 18 7D 80 81 20 82 A1 00 1C 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 10004708 12
+. 7F A3 EB 78 7F E4 FB 78 4B FF E4 CD
+
+. 0 10002BDC 108
+. 94 21 FF B0 7C 08 02 A6 93 81 00 38 7F 84 19 D6 93 A1 00 3C 93 41 00 30 7C 7A 1B 78 93 E1 00 44 3B E0 00 00 7D 80 00 26 57 9D 18 38 7F A3 EB 78 90 01 00 54 91 81 00 10 57 9C 08 3C 92 E1 00 24 93 01 00 28 93 21 00 2C 93 61 00 34 93 C1 00 40 7C 9E 23 78 DB E1 00 48 92 61 00 14 92 81 00 18 92 A1 00 1C 92 C1 00 20 48 02 62 F1
+
+. 0 10002C48 12
+. 7C 7B 1B 78 7F A3 EB 78 48 02 62 E5
+
+. 0 FEEF7DC 8
+. 7E 55 93 78 48 00 00 54
+
+. 0 FEEF834 16
+. 81 5F 00 04 83 BF 00 0C 2B 8A 00 08 40 9D 03 20
+
+. 0 FEEF844 12
+. 82 FC 04 4C 7C 8A B8 40 41 85 03 14
+
+. 0 FEEF850 12
+. 2B 99 01 FF 55 4A 00 38 41 9D 00 0C
+
+. 0 FEEF85C 8
+. 7C 9A E8 00 41 86 02 14
+
+. 0 FEEFA74 12
+. 83 7C 00 34 7F 1B F8 00 40 9A FD E8
+
+. 0 FEEF864 16
+. 7F 8A C8 00 93 5D 00 08 93 BC 00 44 41 9E 03 EC
+
+. 0 FEEF874 8
+. 28 8A 01 FF 40 A5 FF 6C
+
+. 0 FEEF87C 16
+. 55 43 D1 BE 2B 03 00 20 39 03 00 38 40 99 00 48
+
+. 0 FEEF8D0 24
+. 55 0C 18 38 7E EC E2 14 39 37 00 30 81 69 00 08 7C 0B 48 00 41 A2 FF 10
+
+. 0 FEEF7F4 64
+. 91 3F 00 0C 55 03 06 FE 93 EB 00 0C 3B A0 00 01 91 7F 00 08 7D 0B 2E 70 93 E9 00 08 55 66 10 3A 83 FC 00 44 7F 66 E2 14 81 1B 04 38 7F A4 18 30 7F 1F D0 00 7D 0A 23 78 91 5B 04 38 41 9A 01 44
+
+. 0 FEEFA30 16
+. 39 68 00 30 38 C0 00 01 7C C0 28 39 40 82 00 14
+
+. 0 FEEFA50 12
+. 81 4B 00 0C 7F 8A 58 00 40 9E 01 BC
+
+. 0 FEEFC14 32
+. 80 6A 00 04 80 8A 00 0C 54 60 00 38 7C F9 00 50 91 64 00 08 2A 07 00 0F 90 8B 00 0C 41 91 00 54
+
+. 0 FEEFC84 24
+. 7C 6A CA 14 90 7C 00 40 90 7C 00 44 93 43 00 08 93 43 00 0C 41 85 00 08
+
+. 0 FEEFC9C 36
+. 90 7C 00 34 57 AF 10 3A 60 F2 00 01 7D F6 9B 78 39 0A 00 08 92 CA 00 04 7C E3 39 2E 92 43 00 04 4B FF F9 F4
+
+. 0 10002C54 12
+. 7C 79 1B 78 7F A3 EB 78 48 02 62 D9
+
+. 0 FEEFA80 12
+. 38 99 00 10 7C 04 50 40 40 80 FD DC
+
+. 0 FEEFA8C 80
+. 81 3E 05 FC 7C 99 50 50 63 26 00 01 60 80 00 01 7F 87 4A 78 39 1F 00 08 31 27 FF FF 7D 69 39 10 7D 3F CA 14 55 63 10 3A 91 3C 00 34 7C 65 33 78 91 3C 00 40 90 BF 00 04 91 3C 00 44 7C 89 21 2E 90 09 00 04 93 49 00 0C 93 49 00 08 4B FF FB D8
+
+. 0 10002C60 12
+. 7C 78 1B 78 7F A3 EB 78 48 02 62 CD
+
+. 0 10002C6C 12
+. 2B 9C 00 00 7C 77 1B 78 40 9D 00 78
+
+. 0 10002C78 116
+. 3D 60 10 02 3D 20 10 02 C9 0B 81 00 3D 60 10 02 39 6B 80 F8 C8 E9 80 E8 7F 89 03 A6 3D 20 10 02 C9 4B 00 00 C9 29 81 08 3D 20 43 30 93 E1 00 0C 57 E0 10 3A 91 21 00 08 3B FF 00 01 C8 01 00 08 FC 00 50 28 FD A0 48 2A FD 80 38 2A FD 60 40 2A FD A0 68 18 FD 80 60 18 FD 60 58 18 FC 00 00 18 7D B7 05 2E 7D 99 05 2E 7D 78 05 2E 7C 1B 05 2E 42 00 FF BC
+
+. 0 10002CA4 72
+. 93 E1 00 0C 57 E0 10 3A 91 21 00 08 3B FF 00 01 C8 01 00 08 FC 00 50 28 FD A0 48 2A FD 80 38 2A FD 60 40 2A FD A0 68 18 FD 80 60 18 FD 60 58 18 FC 00 00 18 7D B7 05 2E 7D 99 05 2E 7D 78 05 2E 7C 1B 05 2E 42 00 FF BC
+
+. 0 10002CEC 8
+. 38 60 00 00 48 01 27 05
+
+. 0 10002CF4 8
+. 7F C3 F3 78 48 00 69 71
+
+. 0 10009668 128
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 60 7F C8 02 A6 92 E1 00 44 7C 77 1B 79 90 01 00 94 DB 61 00 68 80 1E FF F0 DB 81 00 70 DB A1 00 78 7F C0 F2 14 DB C1 00 80 DB E1 00 88 91 C1 00 20 91 E1 00 24 92 01 00 28 92 21 00 2C 92 41 00 30 92 61 00 34 92 81 00 38 92 A1 00 3C 92 C1 00 40 93 01 00 48 93 21 00 4C 93 41 00 50 93 61 00 54 93 81 00 58 93 A1 00 5C 93 E1 00 64 41 82 01 E4
+
+. 0 100096E8 8
+. 38 60 02 0C 48 01 F8 49
+
+. 0 FEEF7E4 80
+. 7F AA E2 14 55 48 E8 FE 39 3D 00 30 81 69 00 08 91 3F 00 0C 55 03 06 FE 93 EB 00 0C 3B A0 00 01 91 7F 00 08 7D 0B 2E 70 93 E9 00 08 55 66 10 3A 83 FC 00 44 7F 66 E2 14 81 1B 04 38 7F A4 18 30 7F 1F D0 00 7D 0A 23 78 91 5B 04 38 41 9A 01 44
+
+. 0 100096F0 12
+. 7C 69 1B 79 91 21 00 14 41 82 02 4C
+
+. 0 100096FC 8
+. 56 E3 18 38 48 01 F8 35
+
+. 0 FEEFA38 8
+. 7C C0 28 39 40 82 00 14
+
+. 0 FEEFA40 16
+. 54 C6 08 3C 39 6B 00 08 7C C9 28 39 41 82 FF F4
+
+. 0 10009704 16
+. 81 21 00 14 2F 83 00 00 90 69 02 08 41 9E 02 50
+
+. 0 10009714 24
+. 81 21 00 14 7E E3 BB 78 38 81 00 10 92 E9 00 00 38 A9 00 08 4B FF D0 41
+
+. 0 1000972C 8
+. 2F 83 00 00 40 9E 01 E4
+
+. 0 10009734 68
+. 81 61 00 10 3C 00 43 30 81 21 00 14 3A A0 00 00 7F 95 58 40 39 C0 00 01 91 69 00 04 81 3E 80 C4 90 01 00 08 92 E1 00 0C C9 89 00 00 C9 A1 00 08 81 3E 80 C8 FD AD 60 28 C8 09 00 00 FF A0 68 24 40 9C 00 E4
+
+. 0 10009778 60
+. 81 21 00 14 FF 60 60 90 7D 6F 5B 78 3B 40 00 00 83 09 02 08 7D 30 4B 78 7F 1B C3 78 82 50 00 08 3A 60 00 01 7D D1 73 78 93 70 01 08 7D CE 91 D6 7F 93 90 40 7E D7 73 96 40 9C 00 94
+
+. 0 10009844 12
+. 35 EF FF FF 3A 10 00 04 40 82 FF 48
+
+. 0 10009850 8
+. 7F 95 B8 40 41 9D 00 90
+
+. 0 10009858 112
+. 80 61 00 14 80 01 00 94 81 C1 00 20 81 E1 00 24 7C 08 03 A6 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10002CFC 36
+. 3C 80 10 01 7F 46 D3 78 38 84 74 BC 7F C5 F3 78 7C 76 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 28 CD
+
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 1, stride = 1
+. 0 10002D20 8
+. 7F C3 F3 78 48 00 6C 65
+
+. 0 10009988 60
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 79 90 01 00 24 93 E1 00 1C 38 60 00 08 80 1E FF F0 38 A0 00 7B 38 C0 00 01 7F C0 F2 14 41 82 00 5C
+
+. 0 100099C4 4
+. 48 01 F5 71
+
+. 0 100099C8 20
+. 38 A0 00 83 7C 7F 1B 79 38 C0 00 08 57 A3 18 38 41 82 00 74
+
+. 0 100099DC 8
+. 93 BF 00 00 48 01 F5 55
+
+. 0 100099E4 24
+. 2F 83 00 00 7C 60 1B 78 90 1F 00 04 7F E3 FB 78 7F E0 FB 78 41 9E 00 68
+
+. 0 100099FC 32
+. 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10002D28 32
+. 3C 80 10 01 38 84 74 F8 7F C5 F3 78 7C 7C 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 28 A5
+
+PASS: gsl_fft_complex_workspace_float_alloc, n = 1
+. 0 10002D48 20
+. 7F C3 F3 78 7F 44 D3 78 7F 65 DB 78 7F 06 C3 78 48 00 2C D1
+
+. 0 10005A28 64
+. 94 21 FF D0 7C 08 02 A6 93 41 00 18 7C 7A 1B 79 93 01 00 10 7C D8 33 78 93 21 00 14 7C 99 23 78 93 81 00 20 7C BC 2B 78 93 61 00 1C 93 A1 00 24 93 C1 00 28 93 E1 00 2C 90 01 00 34 41 82 00 84
+
+. 0 10005A68 8
+. 2F 9A 00 00 41 9E 00 38
+
+. 0 10005A70 16
+. 3B C0 00 00 54 9B 18 38 7F 5F D3 78 4B FF ED 11
+
+. 0 10005A80 20
+. 7F BE E2 14 FC 20 08 18 7C 3E E5 2E 7F DE DA 14 4B FF EC FD
+
+. 0 10005A94 16
+. 37 FF FF FF FC 20 08 18 D0 3D 00 04 40 82 FF DC
+
+. 0 10005AA4 20
+. 7F 83 E3 78 7F 24 CB 78 7F 45 D3 78 7F 06 C3 78 48 00 06 89
+
+. 0 1000613C 20
+. 7C 08 02 A6 94 21 FF F0 38 E0 FF FF 90 01 00 14 4B FF FD FD
+
+. 0 10005F48 208
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 50 7F C8 02 A6 6C E7 80 00 90 01 00 94 90 E1 00 0C 80 1E FF F0 90 A1 00 14 7F C0 F2 14 3C 00 43 30 81 3E 80 18 90 01 00 08 C8 09 00 00 C9 A1 00 08 81 3E 80 20 FD AD 00 28 90 01 00 10 C9 89 00 00 81 3E 80 1C FD AD 68 2A 93 21 00 3C C8 09 00 00 3B 20 00 00 7F 99 28 40 DB 41 00 60 FD AD 00 32 C8 01 00 10 92 A1 00 2C 7C D5 33 78 FC 00 60 28 93 41 00 40 93 61 00 44 7C 7A 1B 78 DB 21 00 58 7C BB 2B 78 DB 61 00 68 FF 4D 00 24 DB 81 00 70 DB A1 00 78 DB C1 00 80 DB E1 00 88 92 61 00 24 92 81 00 28 92 C1 00 30 92 E1 00 34 93 01 00 38 93 81 00 48 93 A1 00 4C 93 E1 00 54 40 9C 00 C4
+
+. 0 10006018 40
+. 54 94 18 38 82 7E 80 24 FF 20 60 90 3A C0 00 00 7E 98 A3 78 2F 9B 00 00 C3 93 00 00 3B A0 00 00 FF A0 E0 90 41 9E 00 80
+
+. 0 10006040 44
+. 3E E0 43 30 FF 60 C8 90 3B 80 00 00 7F 7F DB 78 93 A1 00 14 92 E1 00 10 CB E1 00 10 FF FF D8 28 FF FF 06 B2 FC 20 F8 90 48 02 2E 8D
+
+. 0 1000606C 12
+. FF C0 08 18 FC 20 F8 90 48 02 2F 21
+
+. 0 10006078 68
+. 7D 7D CA 14 7C 0B DB 96 FC 20 08 18 7D 3C D2 14 7C 1C D4 2E C1 89 00 04 37 FF FF FF 7F 9C C2 14 ED A1 03 32 EC 21 00 32 EC 1E 68 38 EF DE 0B 3A EF 9C 00 2A EF BD F0 2A 7C 00 D9 D6 7F A0 58 50 40 82 FF 98
+
+. 0 100060BC 28
+. 3B 39 00 01 7D 36 AA 14 7F 99 D8 40 7F 96 AD 2E D3 A9 00 04 7E D6 A2 14 41 9C FF 58
+
+. 0 100060D8 100
+. 80 01 00 94 38 60 00 00 82 61 00 24 82 81 00 28 7C 08 03 A6 82 A1 00 2C 82 C1 00 30 82 E1 00 34 83 01 00 38 83 21 00 3C 83 41 00 40 83 61 00 44 83 81 00 48 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB 21 00 58 CB 41 00 60 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10006150 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10005AB8 48
+. 80 01 00 34 83 01 00 10 83 21 00 14 7C 08 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 10002D5C 8
+. 2F 9E 00 00 41 9E 00 30
+
+. 0 10002D64 44
+. 7F C9 03 A6 38 00 00 00 57 4B 18 38 7D 20 DA 14 7C 1B 04 2E C1 A9 00 04 7D 20 CA 14 7C 19 05 2E 7C 00 5A 14 D1 A9 00 04 42 00 FF E4
+
+. 0 10002D90 24
+. 7F 63 DB 78 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 48 00 8F D9
+
+. 0 1000BD7C 20
+. 7C 08 02 A6 94 21 FF F0 39 00 FF FF 90 01 00 14 4B FF DE 7D
+
+. 0 10009C08 232
+. 94 21 FD D0 7C 08 02 A6 42 9F 00 05 D9 C1 01 A0 D9 E1 01 A8 7D 80 00 26 DA 01 01 B0 39 20 00 00 DA 21 01 B8 DA 41 01 C0 DA 61 01 C8 DA 81 01 D0 DA A1 01 D8 DA C1 01 E0 DA E1 01 E8 DB 01 01 F0 DB 21 01 F8 DB 41 02 00 DB 61 02 08 DB 81 02 10 DB A1 02 18 DB C1 02 20 DB E1 02 28 91 C1 01 58 91 E1 01 5C 92 01 01 60 92 21 01 64 92 41 01 68 92 61 01 6C 92 81 01 70 92 A1 01 74 92 C1 01 78 92 E1 01 7C 93 01 01 80 93 21 01 84 93 41 01 88 93 61 01 8C 93 81 01 90 93 C1 01 98 7F C8 02 A6 90 01 02 34 81 67 00 04 80 1E FF F0 91 01 00 1C 39 00 00 01 7F C0 F2 14 7C A0 2B 79 93 A1 01 94 93 E1 01 9C 91 81 01 54 90 01 00 18 90 61 00 10 90 81 00 14 81 46 00 04 91 01 00 24 91 21 00 28 91 61 00 2C 41 82 0D 8C
+
+. 0 10009CF0 16
+. 80 01 00 18 38 60 00 00 2F 80 00 01 41 9E 06 C8
+
+. 0 1000A3C4 168
+. 80 01 02 34 81 81 01 54 81 C1 01 58 7C 08 03 A6 81 E1 01 5C 7D 81 81 20 82 01 01 60 82 21 01 64 82 41 01 68 82 61 01 6C 82 81 01 70 82 A1 01 74 82 C1 01 78 82 E1 01 7C 83 01 01 80 83 21 01 84 83 41 01 88 83 61 01 8C 83 81 01 90 83 A1 01 94 83 C1 01 98 83 E1 01 9C C9 C1 01 A0 C9 E1 01 A8 CA 01 01 B0 CA 21 01 B8 CA 41 01 C0 CA 61 01 C8 CA 81 01 D0 CA A1 01 D8 CA C1 01 E0 CA E1 01 E8 CB 01 01 F0 CB 21 01 F8 CB 41 02 00 CB 61 02 08 CB 81 02 10 CB A1 02 18 CB C1 02 20 CB E1 02 28 38 21 02 30 4E 80 00 20
+
+. 0 1000BD90 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10002DA8 8
+. 2F 9E 00 00 41 9E 00 30
+
+. 0 10002DB0 44
+. 7F C9 03 A6 38 00 00 00 57 4B 18 38 7D 20 DA 14 7C 1B 04 2E C1 A9 00 04 7D 20 BA 14 7C 17 05 2E 7C 00 5A 14 D1 A9 00 04 42 00 FF E4
+
+. 0 10002DDC 52
+. 3E 60 10 02 2A 1A 00 01 CB F3 81 10 3C A0 10 01 3C 60 10 01 38 A5 69 3C FC 20 F8 90 7F 04 C3 78 7F 66 DB 78 38 63 69 4C 7F 47 D3 78 7F C8 F3 78 4B FF F8 F9
+
+. 0 10002704 124
+. 94 21 FF A0 7C 08 02 A6 93 81 00 38 7D 1C 43 79 3D 20 10 02 93 01 00 28 39 29 80 F0 93 21 00 2C 93 61 00 34 FD 20 08 90 93 C1 00 40 7C 78 1B 78 93 E1 00 44 7C 9E 23 78 DB A1 00 48 7C B9 2B 78 DB C1 00 50 7C DF 33 78 DB E1 00 58 7C FB 3B 78 92 41 00 10 92 61 00 14 92 81 00 18 92 A1 00 1C 92 C1 00 20 92 E1 00 24 93 41 00 30 93 A1 00 3C 90 01 00 64 C8 29 00 00 41 82 00 60
+
+. 0 10002780 84
+. 3D 20 10 02 7F 89 03 A6 39 29 81 20 38 00 00 00 C9 49 00 00 54 EA 18 38 7D 20 FA 14 7D 60 F2 14 7C 1F 04 2E 7D 9E 04 2E 7C 00 52 14 C1 A9 00 04 C1 6B 00 04 EC 00 60 28 ED AD 58 28 FC 00 02 10 FD A0 6A 10 FC 00 68 2A FF E0 02 B2 FF 9F 08 00 40 9D 00 08
+
+. 0 100027D8 4
+. 42 00 FF C0
+
+. 0 100027DC 12
+. FF 81 48 00 38 60 00 00 40 9C 00 58
+
+. 0 100027E8 84
+. 80 01 00 64 82 41 00 10 82 61 00 14 7C 08 03 A6 82 81 00 18 82 A1 00 1C 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB A1 00 48 CB C1 00 50 CB E1 00 58 38 21 00 60 4E 80 00 20
+
+. 0 10002E10 24
+. 3C 80 10 01 38 84 75 28 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 27 C5
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 1, stride = 1
+. 0 10002E28 4
+. 41 91 03 AC
+
+. 0 10002E2C 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 8E 4D
+
+. 0 1000BC8C 64
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 39 00 00 01 7C 7F 1B 78 93 81 00 10 80 1E FF F0 7C 9C 23 78 93 A1 00 14 7C BD 2B 78 7F C0 F2 14 4B FF DF 41
+
+. 0 1000BCCC 8
+. 2C 03 00 00 40 82 00 68
+
+. 0 1000BCD4 52
+. 81 3E 81 08 3C 00 43 30 90 01 00 08 7F 83 E8 40 93 A1 00 0C C8 09 00 00 C9 A1 00 08 81 3E 81 0C FD AD 00 28 C0 09 00 00 FD A0 68 18 ED 80 68 24 40 9C 00 34
+
+. 0 1000BD08 48
+. 7F A9 03 A6 57 84 18 38 38 00 00 00 7C 1F 04 2E 7D 20 FA 14 EC 00 03 32 7C 1F 05 2E 7C 00 22 14 C1 A9 00 04 ED AD 03 32 D1 A9 00 04 42 00 FF E0
+
+. 0 1000BD38 32
+. 80 01 00 24 83 81 00 10 83 A1 00 14 7C 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10002E44 40
+. FC 20 F8 90 3F E0 10 01 3C A0 10 01 38 A5 69 98 7F 24 CB 78 7F 66 DB 78 38 7F 69 90 7F 47 D3 78 7F C8 F3 78 4B FF F8 9D
+
+. 0 10002E6C 24
+. 3C 80 10 01 38 84 75 70 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 27 69
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 1, stride = 1
+. 0 10002E84 4
+. 41 91 03 20
+
+. 0 10002E88 24
+. 7E E3 BB 78 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 48 00 8E BD
+
+. 0 1000BD58 20
+. 7C 08 02 A6 94 21 FF F0 39 00 00 01 90 01 00 14 4B FF DE A1
+
+. 0 1000BD6C 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10002EA0 8
+. 2F 9E 00 00 41 9E 00 58
+
+. 0 10002EA8 84
+. 3C 00 43 30 3D 20 10 02 90 01 00 08 39 29 80 F8 93 C1 00 0C 7F C9 03 A6 C9 A1 00 08 38 00 00 00 C8 09 00 00 57 4B 18 38 FD AD 00 28 FD 80 68 18 7C 19 04 2E 7D 20 CA 14 EC 00 03 32 7C 19 05 2E 7C 00 5A 14 C1 A9 00 04 ED AD 03 32 D1 A9 00 04 42 00 FF E0
+
+. 0 10002EFC 40
+. CB F3 81 10 3C A0 10 01 38 A5 69 E4 7F 24 CB 78 FC 20 F8 90 7E E6 BB 78 7F 47 D3 78 7F C8 F3 78 38 7F 69 90 4B FF F7 E5
+
+. 0 10002F24 24
+. 3C 80 10 01 38 84 75 B8 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 26 B1
+
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 1, stride = 1
+. 0 10002F3C 8
+. 2B 9A 00 01 41 9D 02 34
+
+. 0 10002F44 52
+. 3D 20 10 02 3D 60 10 02 39 29 81 28 39 6B 81 2C C0 49 00 00 7F C4 F3 78 C0 2B 00 00 7F 45 D3 78 7F 66 DB 78 7F 07 C3 78 38 60 00 01 3E A0 10 01 48 00 22 BD
+
+. 0 10005230 96
+. 94 21 FF 90 7C 08 02 A6 93 C1 00 30 7C 9E 23 79 DB 81 00 50 FF 80 08 90 DB A1 00 58 FF A0 10 90 93 21 00 1C 7C F9 3B 78 93 61 00 24 7C 7B 1B 78 DB 21 00 38 DB 41 00 40 DB 61 00 48 DB C1 00 60 DB E1 00 68 93 01 00 18 93 41 00 20 93 81 00 28 93 A1 00 2C 93 E1 00 34 90 01 00 74 41 82 01 48
+
+. 0 10005290 8
+. 2F 9E 00 00 41 9E 00 28
+
+. 0 10005298 36
+. 7F C9 03 A6 39 40 00 00 38 00 00 00 54 AB 18 38 7D 20 32 14 7D 46 01 2E 91 49 00 04 7C 00 5A 14 42 00 FF F0
+
+. 0 100052BC 40
+. 7C 1B F3 96 2F 9E 00 00 7C 00 F1 D6 7C 00 D8 50 7C 00 29 D6 54 00 18 38 7D 20 32 14 7F 86 05 2E D3 A9 00 04 41 9E 00 A4
+
+. 0 100052E4 104
+. 3F 00 43 30 3D 20 10 02 39 29 80 F8 93 C1 00 14 93 01 00 10 54 BA 18 38 CB 29 00 00 3D 20 10 02 C8 01 00 10 3B 80 00 00 CB 49 81 38 3B A0 00 00 FF 60 C8 28 7F DF F3 78 7C 1D F3 96 93 01 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD DA 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF D8 24 FC 20 F8 90 48 02 3B AD
+
+. 0 1000534C 12
+. FF C0 08 18 FC 20 F8 90 48 02 3C 41
+
+. 0 10005358 44
+. 37 FF FF FF FC 20 08 18 7D 3C CA 14 EC 01 07 32 EC 21 07 72 EC 1E 07 7A EF DE 0F 38 7F DC CD 2E 7F 9C D2 14 D0 09 00 04 40 82 FF 9C
+
+. 0 10005384 80
+. 38 60 00 00 80 01 00 74 83 01 00 18 83 21 00 1C 7C 08 03 A6 83 41 00 20 83 61 00 24 83 81 00 28 83 A1 00 2C 83 C1 00 30 83 E1 00 34 CB 21 00 38 CB 41 00 40 CB 61 00 48 CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 10002F78 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 8D F1
+
+. 0 10002F90 40
+. FC 20 F8 90 3C A0 10 01 7F C8 F3 78 7F 47 D3 78 38 A5 6A 40 7F 04 C3 78 7F 66 DB 78 38 75 6A 34 3B A0 00 00 4B FF F7 51
+
+. 0 10002FB8 24
+. 3C 80 10 01 38 84 76 00 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 26 1D
+
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 1, stride = 1
+. 0 10002FD0 44
+. 3D 20 10 02 3D 60 10 02 39 29 81 28 39 6B 81 2C C0 49 00 00 C0 2B 00 00 7F 44 D3 78 7F 65 DB 78 7F 06 C3 78 7F C3 F3 78 48 00 24 01
+
+. 0 100053F8 20
+. 2C 03 00 00 7C 08 02 A6 94 21 FF F0 90 01 00 14 41 82 00 A0
+
+. 0 1000540C 8
+. 2F 83 00 00 41 9E 00 24
+
+. 0 10005414 32
+. 7C 69 03 A6 38 00 00 00 54 8B 18 38 7D 20 2A 14 7C 25 05 2E D0 49 00 04 7C 00 5A 14 42 00 FF F0
+
+. 0 10005434 8
+. 2F 83 00 00 41 9E 00 28
+
+. 0 1000543C 36
+. 7C 69 03 A6 54 84 18 38 39 60 00 00 38 00 00 00 7D 20 32 14 7D 66 01 2E 91 69 00 04 7C 00 22 14 42 00 FF F0
+
+. 0 10005460 72
+. 3C 00 43 30 3D 20 10 02 90 61 00 0C 39 29 80 F8 90 01 00 08 38 60 00 00 C8 01 00 08 C9 A9 00 00 80 01 00 14 38 21 00 10 FC 00 68 28 7C 08 03 A6 FC 00 00 18 ED A0 00 B2 EC 00 00 72 D1 A6 00 04 D0 06 00 00 4E 80 00 20
+
+. 0 10002FFC 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 8D 6D
+
+. 0 10003014 36
+. FC 20 F8 90 3C A0 10 01 7F 04 C3 78 38 A5 6A 90 7F 66 DB 78 7F 47 D3 78 7F C8 F3 78 38 75 6A 34 4B FF F6 D1
+
+. 0 10003038 24
+. 3C 80 10 01 38 84 76 48 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 25 9D
+
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 1, stride = 1
+. 0 10003050 12
+. 7F 9D F0 40 3B E0 00 00 40 9C 00 80
+
+. 0 1000305C 52
+. 3E 80 10 01 3D 20 10 02 3D 60 10 02 39 29 81 2C 39 6B 81 28 C0 29 00 00 7F E3 FB 78 C0 4B 00 00 7F C4 F3 78 7F 45 D3 78 7F 66 DB 78 7F 07 C3 78 48 00 24 4D
+
+. 0 100054D8 112
+. 94 21 FF 80 7C 08 02 A6 93 C1 00 40 7C 9E 23 79 DB 61 00 58 FF 60 08 90 DB 81 00 60 FF 80 10 90 92 E1 00 24 7C B7 2B 78 93 21 00 2C 7C D9 33 78 93 41 00 30 7C 7A 1B 78 93 61 00 34 7C FB 3B 78 DB 21 00 48 DB 41 00 50 DB A1 00 68 DB C1 00 70 DB E1 00 78 92 C1 00 20 93 01 00 28 93 81 00 38 93 A1 00 3C 93 E1 00 44 90 01 00 84 41 82 01 84
+
+. 0 10005548 8
+. 2F 9E 00 00 41 9E 00 A4
+
+. 0 10005550 104
+. 3E C0 43 30 3D 20 10 02 39 29 80 F8 93 C1 00 14 92 C1 00 10 3B 80 00 00 CB 29 00 00 3D 20 10 02 C8 01 00 10 3B A0 00 00 CB 49 81 40 54 B8 18 38 FF A0 C8 28 7F DF F3 78 7C 1D F3 96 92 C1 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD D2 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF E8 24 FC 20 F8 90 48 02 39 41
+
+. 0 100055B8 12
+. FF C0 08 18 FC 20 F8 90 48 02 39 D5
+
+. 0 100055C4 44
+. 37 FF FF FF FC 20 08 18 7D 3C CA 14 EC 01 06 F2 EC 21 07 32 EC 1E 07 3A EF DE 0E F8 7F DC CD 2E 7F 9C C2 14 D0 09 00 04 40 82 FF 9C
+
+. 0 100055F0 8
+. 2F 9E 00 00 41 9E 00 28
+
+. 0 100055F8 36
+. 7F C9 03 A6 39 40 00 00 38 00 00 00 56 EB 18 38 7D 20 DA 14 7D 5B 01 2E 91 49 00 04 7C 00 5A 14 42 00 FF F0
+
+. 0 1000561C 8
+. 2F 9A 00 00 40 9D 00 CC
+
+. 0 100056EC 20
+. 7D 3A F0 50 7C 09 F3 96 7C 00 F1 D6 7C 60 48 50 4B FF FF 34
+
+. 0 10005630 152
+. 3C 00 43 30 3D 20 10 02 90 01 00 10 39 29 80 F8 93 C1 00 14 7C 17 19 D6 C9 A9 00 00 38 60 00 00 C8 01 00 10 FC 00 68 28 54 00 18 38 7D 20 DA 14 FC 00 00 18 ED A0 07 32 EC 00 06 F2 7C 1B 05 2E D1 A9 00 04 80 01 00 84 82 C1 00 20 82 E1 00 24 7C 08 03 A6 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB 21 00 48 CB 41 00 50 CB 61 00 58 CB 81 00 60 CB A1 00 68 CB C1 00 70 CB E1 00 78 38 21 00 80 4E 80 00 20
+
+. 0 10003090 24
+. 7F 44 D3 78 7F C5 F3 78 7E C6 B3 78 7F 87 E3 78 7F 63 DB 78 48 00 8C D9
+
+. 0 100030A8 36
+. C8 33 81 10 3B FF 00 01 38 75 6A 34 7F 04 C3 78 38 B4 6A E4 7F 66 DB 78 7F 47 D3 78 7F C8 F3 78 4B FF F6 3D
+
+. 0 100030CC 12
+. 7F 9F F0 40 7F BD 1B 78 41 9C FF 8C
+
+. 0 100030D8 28
+. 3C 80 10 01 7F C5 F3 78 7F 46 D3 78 38 84 76 90 7F A3 EB 78 4C C6 31 82 48 01 24 F9
+
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 1, stride = 1
+. 0 100030F4 8
+. 7E C3 B3 78 48 00 69 89
+
+. 0 10009A80 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 02 08 48 01 F4 E9
+
+. 0 10009AA0 16
+. 38 00 00 00 90 1D 02 08 7F A3 EB 78 48 01 F4 D9
+
+. 0 10009AB0 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 100030FC 8
+. 7F 83 E3 78 48 00 69 C9
+
+. 0 10009AC8 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 00 04 48 01 F4 A1
+
+. 0 10009AE8 16
+. 38 00 00 00 90 1D 00 04 7F A3 EB 78 48 01 F4 91
+
+. 0 10009AF8 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10003104 8
+. 7F 63 DB 78 48 02 5E 7D
+
+. 0 1000310C 8
+. 7F 23 CB 78 48 02 5E 75
+
+. 0 10003114 8
+. 7F 03 C3 78 48 02 5E 6D
+
+. 0 1000311C 8
+. 7E E3 BB 78 48 02 5E 65
+
+. 0 10003124 80
+. 80 01 00 54 81 81 00 10 82 61 00 14 7C 08 03 A6 82 81 00 18 7D 80 81 20 82 A1 00 1C 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 10004714 12
+. 7F A3 EB 78 7F E4 FB 78 4B FF D3 85
+
+. 0 10001AA0 76
+. 94 21 FF C0 7C 08 02 A6 93 41 00 28 7F 44 19 D6 93 81 00 30 7C 7C 1B 78 90 01 00 44 93 21 00 24 57 43 18 38 93 61 00 2C 93 A1 00 34 57 5D 20 36 93 C1 00 38 7C 9E 23 78 93 E1 00 3C 92 C1 00 18 92 E1 00 1C 93 01 00 20 48 02 74 4D
+
+. 0 10001AEC 12
+. 7C 7F 1B 78 7F A3 EB 78 48 02 74 41
+
+. 0 10001AF8 12
+. 7C 7B 1B 78 7F A3 EB 78 48 02 74 35
+
+. 0 FEEFC5C 20
+. 7F 9F CA 14 83 1C 00 04 63 16 00 01 92 DC 00 04 41 8E 00 10
+
+. 0 FEEFC7C 8
+. 39 1F 00 08 4B FF FA 30
+
+. 0 10001B04 12
+. 7C 79 1B 78 7F A3 EB 78 48 02 74 29
+
+. 0 FEEFA5C 24
+. 7C A5 30 78 7F 03 E2 14 39 6A 00 08 54 C6 08 3C 90 B8 04 38 4B FF FF 7C
+
+. 0 FEEF9EC 28
+. 21 26 00 00 7E A9 31 14 7D 26 28 10 7D 29 49 10 7D 29 00 D0 7D 2A AB 79 41 82 00 34
+
+. 0 10001B10 16
+. 2B 9A 00 00 7C 7D 1B 78 39 40 00 00 40 9D 00 38
+
+. 0 10001B20 52
+. 3D 60 10 02 7F 49 03 A6 39 6B 80 F8 3D 20 43 30 C9 AB 00 00 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+. 0 10001B54 16
+. 57 40 08 3C 39 40 00 00 2B 80 00 00 40 9D 00 64
+
+. 0 10001B64 96
+. 3D 60 10 02 3D 20 10 02 C9 2B 81 00 3D 60 10 02 39 6B 80 F8 C9 09 80 E8 7C 09 03 A6 3D 20 10 02 C9 6B 00 00 C9 49 81 08 3D 20 43 30 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 58 28 FD A0 50 2A FD 80 40 2A FC 00 48 2A 7D BD 05 AE 7D 9B 05 AE 7C 19 05 AE 42 00 FF D0
+
+. 0 10001B90 52
+. 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 58 28 FD A0 50 2A FD 80 40 2A FC 00 48 2A 7D BD 05 AE 7D 9B 05 AE 7C 19 05 AE 42 00 FF D0
+
+. 0 10001BC4 8
+. 38 60 00 00 48 01 38 2D
+
+. 0 10001BCC 8
+. 7F C3 F3 78 48 00 ED 75
+
+. 0 10010944 136
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 60 7F C8 02 A6 7D 80 00 26 93 01 00 48 7C 78 1B 79 90 01 00 94 80 1E FF F0 DB 61 00 68 DB 81 00 70 7F C0 F2 14 DB A1 00 78 DB C1 00 80 DB E1 00 88 91 C1 00 20 91 E1 00 24 92 01 00 28 92 21 00 2C 92 41 00 30 92 61 00 34 92 81 00 38 92 A1 00 3C 92 C1 00 40 92 E1 00 44 93 21 00 4C 93 41 00 50 93 61 00 54 93 81 00 58 93 A1 00 5C 93 E1 00 64 91 81 00 1C 41 82 02 08
+
+. 0 100109CC 8
+. 38 60 02 0C 48 01 85 65
+
+. 0 100109D4 12
+. 7C 69 1B 79 91 21 00 14 41 82 02 6C
+
+. 0 100109E0 8
+. 2F 98 00 01 41 9E 01 DC
+
+. 0 10010BC0 16
+. 81 21 00 14 3A E0 00 00 92 E9 02 08 4B FF FE 34
+
+. 0 10010A00 24
+. 81 21 00 14 7F 03 C3 78 38 81 00 10 93 09 00 00 38 A9 00 08 4B FF 5C 6D
+
+. 0 10006680 92
+. 94 21 FF C0 7C 08 02 A6 42 9F 00 05 93 C1 00 38 7F C8 02 A6 7C A6 2B 78 90 01 00 44 7C 85 23 78 38 81 00 10 80 1E FF F0 7F C0 F2 14 81 3E 80 0C 80 09 00 00 80 E9 00 10 81 69 00 04 81 49 00 08 81 09 00 0C 90 01 00 10 91 61 00 14 91 41 00 18 91 01 00 1C 90 E1 00 20 4B FF FD A1
+
+. 0 100066DC 20
+. 80 01 00 44 83 C1 00 38 38 21 00 40 7C 08 03 A6 4E 80 00 20
+
+. 0 10010A18 8
+. 2F 83 00 00 40 9E 01 FC
+
+. 0 10010A20 68
+. 81 61 00 10 3C 00 43 30 81 21 00 14 3A C0 00 00 2B 8B 00 00 39 C0 00 01 91 69 00 04 81 3E 82 44 90 01 00 08 93 01 00 0C C9 89 00 00 C9 A1 00 08 81 3E 82 48 FD AD 60 28 C8 09 00 00 FF A0 68 24 40 9D 00 DC
+
+. 0 10010A64 60
+. 81 21 00 14 FF 60 60 90 7D 6F 5B 78 3B 20 00 00 82 E9 02 08 7D 31 4B 78 7E FA BB 78 82 71 00 08 3A 80 00 01 7D C9 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7E 58 73 96 40 9C 00 94
+
+. 0 10010B30 12
+. 35 EF FF FF 3A 31 00 04 40 82 FF 48
+
+. 0 10010B3C 16
+. 57 00 F8 7E 80 61 00 14 7F 80 B0 40 41 9C 00 A4
+
+. 0 10010B4C 116
+. 80 01 00 94 81 81 00 1C 81 C1 00 20 7C 08 03 A6 81 E1 00 24 7D 80 81 20 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10001BD4 36
+. 3C 80 10 01 7F 86 E3 78 38 84 6E EC 7F C5 F3 78 7C 76 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 39 F5
+
+PASS: gsl_fft_real_wavetable_alloc, n = 1, stride = 1
+. 0 10001BF8 8
+. 7F C3 F3 78 48 00 F0 91
+
+. 0 10010C8C 60
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 79 90 01 00 24 93 E1 00 1C 38 60 00 08 80 1E FF F0 38 A0 00 84 38 C0 00 01 7F C0 F2 14 41 82 00 5C
+
+. 0 10010CC8 4
+. 48 01 82 6D
+
+. 0 10010CCC 20
+. 38 A0 00 8C 7C 7F 1B 79 38 C0 00 08 57 A3 18 38 41 82 00 74
+
+. 0 10010CE0 8
+. 93 BF 00 00 48 01 82 51
+
+. 0 10010CE8 24
+. 2F 83 00 00 7C 60 1B 78 90 1F 00 04 7F E3 FB 78 7F E0 FB 78 41 9E 00 68
+
+. 0 10010D00 32
+. 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10001C00 32
+. 3C 80 10 01 38 84 6F 20 7F C5 F3 78 7C 78 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 39 CD
+
+PASS: gsl_fft_real_workspace_alloc, n = 1
+. 0 10001C20 20
+. 7F 84 E3 78 7F 65 DB 78 7F A6 EB 78 7F C3 F3 78 48 00 34 E5
+
+. 0 10001C34 16
+. 7F 23 CB 78 7F 64 DB 78 57 45 20 36 48 02 73 35
+
+. 0 10001C44 8
+. 2F 9E 00 00 41 9E 00 24
+
+. 0 10001C4C 32
+. 7F C9 03 A6 39 60 00 00 55 60 20 36 55 69 18 38 7C 1B 04 AE 7D 6B E2 14 7C 09 FD AE 42 00 FF EC
+
+. 0 10001C6C 28
+. 7F 07 C3 78 7F 84 E3 78 7F C5 F3 78 7E C6 B3 78 7F E3 FB 78 3E E0 10 02 48 00 F1 95
+
+. 0 10010E18 216
+. 94 21 FE F0 7C 08 02 A6 42 9F 00 05 92 21 00 44 D9 C1 00 80 39 20 00 00 D9 E1 00 88 3A 20 00 01 DA 01 00 90 DA 21 00 98 DA 41 00 A0 DA 61 00 A8 DA 81 00 B0 DA A1 00 B8 DA C1 00 C0 DA E1 00 C8 DB 01 00 D0 DB 21 00 D8 DB 41 00 E0 DB 61 00 E8 DB 81 00 F0 DB A1 00 F8 DB C1 01 00 DB E1 01 08 91 C1 00 38 91 E1 00 3C 92 01 00 40 92 41 00 48 92 61 00 4C 92 81 00 50 92 A1 00 54 92 C1 00 58 92 E1 00 5C 93 01 00 60 93 21 00 64 93 41 00 68 93 61 00 6C 93 81 00 70 93 C1 00 78 7F C8 02 A6 90 01 01 14 81 07 00 04 80 1E FF F0 93 A1 00 74 7F C0 F2 14 7C A0 2B 79 93 E1 00 7C 90 01 00 18 90 61 00 10 90 81 00 14 81 46 00 04 91 01 00 20 91 21 00 24 41 82 16 40
+
+. 0 10010EF0 16
+. 80 01 00 18 38 60 00 00 2F 80 00 01 41 9E 09 2C
+
+. 0 10011828 160
+. 80 01 01 14 81 C1 00 38 81 E1 00 3C 7C 08 03 A6 82 01 00 40 82 21 00 44 82 41 00 48 82 61 00 4C 82 81 00 50 82 A1 00 54 82 C1 00 58 82 E1 00 5C 83 01 00 60 83 21 00 64 83 41 00 68 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C C9 C1 00 80 C9 E1 00 88 CA 01 00 90 CA 21 00 98 CA 41 00 A0 CA 61 00 A8 CA 81 00 B0 CA A1 00 B8 CA C1 00 C0 CA E1 00 C8 CB 01 00 D0 CB 21 00 D8 CB 41 00 E0 CB 61 00 E8 CB 81 00 F0 CB A1 00 F8 CB C1 01 00 CB E1 01 08 38 21 01 10 4E 80 00 20
+
+. 0 10001C88 20
+. 7F 64 DB 78 7F 85 E3 78 7F C6 F3 78 7F E3 FB 78 48 00 C8 05
+
+. 0 1000E49C 56
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 2C 06 00 00 90 01 00 24 93 E1 00 1C 7C 7F 1B 78 80 1E FF F0 7C A3 2B 78 93 A1 00 14 7F C0 F2 14 41 82 00 D4
+
+. 0 1000E4D4 40
+. 39 26 FF FF 83 BE 81 C8 2B 89 00 01 C8 1F 00 00 C9 BD 00 00 38 A0 00 01 D8 04 00 00 7D 28 4B 78 D9 A4 00 08 40 9D 00 58
+
+. 0 1000E550 8
+. 7F 88 28 00 41 9E 00 24
+
+. 0 1000E558 32
+. 80 01 00 24 38 60 00 00 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10001C9C 40
+. C8 37 81 10 3C A0 10 01 3C 60 10 01 7F 87 E3 78 7F C8 F3 78 38 A5 69 3C 7F A4 EB 78 7F 66 DB 78 38 63 69 4C 4B FF EB 59
+
+. 0 10001CC4 24
+. 3C 80 10 01 38 84 6F 48 7F C5 F3 78 7F 86 E3 78 4C C6 31 82 48 01 39 11
+
+PASS: gsl_fft_real with signal_real_noise, n = 1, stride = 1
+. 0 10001CDC 8
+. 7F C3 F3 78 48 00 A8 61
+
+. 0 1000C540 136
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 60 7F C8 02 A6 7D 80 00 26 92 E1 00 44 7C 77 1B 79 90 01 00 94 80 1E FF F0 DB 61 00 68 DB 81 00 70 7F C0 F2 14 DB A1 00 78 DB C1 00 80 DB E1 00 88 91 C1 00 20 91 E1 00 24 92 01 00 28 92 21 00 2C 92 41 00 30 92 61 00 34 92 81 00 38 92 A1 00 3C 92 C1 00 40 93 01 00 48 93 21 00 4C 93 41 00 50 93 61 00 54 93 81 00 58 93 A1 00 5C 93 E1 00 64 91 81 00 1C 41 82 01 F0
+
+. 0 1000C5C8 8
+. 38 60 02 0C 48 01 C9 69
+
+. 0 1000C5D0 12
+. 7C 69 1B 79 91 21 00 14 41 82 02 54
+
+. 0 1000C5DC 8
+. 56 E3 20 36 48 01 C9 55
+
+. 0 1000C5E4 16
+. 81 21 00 14 2F 83 00 00 90 69 02 08 41 9E 02 58
+
+. 0 1000C5F4 24
+. 81 21 00 14 7E E3 BB 78 38 81 00 10 92 E9 00 00 38 A9 00 08 4B FF A0 ED
+
+. 0 100066F4 92
+. 94 21 FF C0 7C 08 02 A6 42 9F 00 05 93 C1 00 38 7F C8 02 A6 7C A6 2B 78 90 01 00 44 7C 85 23 78 38 81 00 10 80 1E FF F0 7F C0 F2 14 81 3E 80 0C 80 09 00 00 80 E9 00 10 81 69 00 04 81 49 00 08 81 09 00 0C 90 01 00 10 91 61 00 14 91 41 00 18 91 01 00 1C 90 E1 00 20 4B FF FD 2D
+
+. 0 10006750 20
+. 80 01 00 44 83 C1 00 38 38 21 00 40 7C 08 03 A6 4E 80 00 20
+
+. 0 1000C60C 8
+. 2F 83 00 00 40 9E 01 EC
+
+. 0 1000C614 68
+. 81 61 00 10 3C 00 43 30 81 21 00 14 3A C0 00 00 7F 96 58 40 39 C0 00 01 91 69 00 04 81 3E 81 54 90 01 00 08 92 E1 00 0C C9 89 00 00 C9 A1 00 08 81 3E 81 58 FD AD 60 28 C8 09 00 00 FF A0 68 24 40 9C 00 DC
+
+. 0 1000C658 60
+. 81 21 00 14 FF 60 60 90 7D 6F 5B 78 3B 20 00 00 83 09 02 08 7D 31 4B 78 7F 1A C3 78 82 71 00 08 3A 80 00 01 7D D2 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7D 37 73 96 40 9C 00 94
+
+. 0 1000C724 12
+. 35 EF FF FF 3A 31 00 04 40 82 FF 48
+
+. 0 1000C730 16
+. 56 E0 F8 7E 80 61 00 14 7F 80 B0 40 41 9C 00 94
+
+. 0 1000C740 116
+. 80 01 00 94 81 81 00 1C 81 C1 00 20 7C 08 03 A6 81 E1 00 24 7D 80 81 20 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10001CE4 36
+. 3C 80 10 01 7C 7A 1B 78 38 84 6F 84 20 03 00 00 7C 60 19 14 7F C5 F3 78 7F 86 E3 78 4C C6 31 82 48 01 38 E5
+
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 1, stride = 1
+. 0 10001D08 24
+. 7F E3 FB 78 7F 84 E3 78 7F C5 F3 78 7F 46 D3 78 7F 07 C3 78 48 00 AB 9D
+
+. 0 1000C8B8 200
+. 94 21 FE F0 7C 08 02 A6 42 9F 00 05 D9 C1 00 80 D9 E1 00 88 DA 01 00 90 DA 21 00 98 DA 41 00 A0 DA 61 00 A8 DA 81 00 B0 DA A1 00 B8 DA C1 00 C0 DA E1 00 C8 DB 01 00 D0 DB 21 00 D8 DB 41 00 E0 DB 61 00 E8 DB 81 00 F0 DB A1 00 F8 DB C1 01 00 DB E1 01 08 91 C1 00 38 91 E1 00 3C 92 01 00 40 92 21 00 44 92 41 00 48 92 61 00 4C 92 81 00 50 92 A1 00 54 92 C1 00 58 92 E1 00 5C 93 01 00 60 93 21 00 64 93 41 00 68 93 61 00 6C 93 81 00 70 93 C1 00 78 7F C8 02 A6 90 01 01 14 81 07 00 04 80 1E FF F0 93 A1 00 74 7F C0 F2 14 7C A0 2B 79 93 E1 00 7C 90 01 00 18 90 61 00 10 90 81 00 14 91 01 00 1C 41 82 10 58
+
+. 0 1000C980 16
+. 80 01 00 18 38 60 00 00 2F 80 00 01 41 9E 05 98
+
+. 0 1000CF24 160
+. 80 01 01 14 81 C1 00 38 81 E1 00 3C 7C 08 03 A6 82 01 00 40 82 21 00 44 82 41 00 48 82 61 00 4C 82 81 00 50 82 A1 00 54 82 C1 00 58 82 E1 00 5C 83 01 00 60 83 21 00 64 83 41 00 68 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C C9 C1 00 80 C9 E1 00 88 CA 01 00 90 CA 21 00 98 CA 41 00 A0 CA 61 00 A8 CA 81 00 B0 CA A1 00 B8 CA C1 00 C0 CA E1 00 C8 CB 01 00 D0 CB 21 00 D8 CB 41 00 E0 CB 61 00 E8 CB 81 00 F0 CB A1 00 F8 CB C1 01 00 CB E1 01 08 38 21 01 10 4E 80 00 20
+
+. 0 10001D20 8
+. 2F 9E 00 00 41 9E 00 44
+
+. 0 10001D28 64
+. 3C 00 43 30 3D 20 10 02 90 01 00 08 39 29 80 F8 93 C1 00 0C 7F C9 03 A6 C8 09 00 00 38 00 00 00 C9 A1 00 08 57 89 18 38 FD AD 00 28 7C 1F 04 AE FC 00 68 24 7C 1F 05 AE 7C 00 4A 14 42 00 FF F0
+
+. 0 10001D68 20
+. 7F 64 DB 78 7F 85 E3 78 7F C6 F3 78 7F E3 FB 78 48 01 0B 0D
+
+. 0 10001D7C 40
+. C8 37 81 10 3C A0 10 01 3C 60 10 01 7F 87 E3 78 7F C8 F3 78 7F 24 CB 78 7F 66 DB 78 38 A5 69 98 38 63 69 90 4B FF EA 79
+
+. 0 10001DA4 24
+. 3C 80 10 01 7F C5 F3 78 7F 86 E3 78 38 84 6F C0 4C C6 31 82 48 01 38 31
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 1, stride = 1
+. 0 10001DBC 8
+. 7F 03 C3 78 48 00 F0 0D
+
+. 0 10010DCC 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 00 04 48 01 81 9D
+
+. 0 10010DEC 16
+. 38 00 00 00 90 1D 00 04 7F A3 EB 78 48 01 81 8D
+
+. 0 10010DFC 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10001DC4 8
+. 7E C3 B3 78 48 00 EF BD
+
+. 0 10010D84 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 02 08 48 01 81 E5
+
+. 0 10010DA4 16
+. 38 00 00 00 90 1D 02 08 7F A3 EB 78 48 01 81 D5
+
+. 0 10010DB4 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10001DCC 8
+. 7F 43 D3 78 48 00 AA 9D
+
+. 0 1000C86C 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 02 08 48 01 C6 FD
+
+. 0 1000C88C 16
+. 38 00 00 00 90 1D 02 08 7F A3 EB 78 48 01 C6 ED
+
+. 0 1000C89C 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10001DD4 8
+. 7F E3 FB 78 48 02 71 AD
+
+. 0 10001DDC 8
+. 7F 63 DB 78 48 02 71 A5
+
+. 0 10001DE4 8
+. 7F 23 CB 78 48 02 71 9D
+
+. 0 10001DEC 8
+. 7F A3 EB 78 48 02 71 95
+
+. 0 10001DF4 56
+. 80 01 00 44 82 C1 00 18 82 E1 00 1C 7C 08 03 A6 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+
+. 0 10004720 16
+. 7F A3 EB 78 7F E4 FB 78 3B BD 00 01 4B FF F2 99
+
+. 0 100039C4 76
+. 94 21 FF C0 7C 08 02 A6 93 41 00 28 7F 44 19 D6 93 81 00 30 7C 7C 1B 78 90 01 00 44 93 21 00 24 57 43 10 3A 93 61 00 2C 93 A1 00 34 57 5D 18 38 93 C1 00 38 7C 9E 23 78 93 E1 00 3C 92 C1 00 18 92 E1 00 1C 93 01 00 20 48 02 55 29
+
+. 0 FEEF714 16
+. 81 5C 00 04 3B 20 00 10 7C 8A C8 40 40 A4 FF 4C
+
+. 0 10003A10 12
+. 7C 7F 1B 78 7F A3 EB 78 48 02 55 1D
+
+. 0 10003A1C 12
+. 7C 7B 1B 78 7F A3 EB 78 48 02 55 11
+
+. 0 10003A28 12
+. 7C 79 1B 78 7F A3 EB 78 48 02 55 05
+
+. 0 10003A34 16
+. 2B 9A 00 00 7C 7D 1B 78 39 40 00 00 40 9D 00 3C
+
+. 0 10003A44 56
+. 3D 60 10 02 7F 49 03 A6 39 6B 80 F8 3D 20 43 30 C9 AB 00 00 91 41 00 0C 55 40 10 3A 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 FC 00 00 18 7C 1F 05 2E 42 00 FF E0
+
+. 0 10003A7C 16
+. 57 40 08 3C 39 40 00 00 2B 80 00 00 40 9D 00 70
+
+. 0 10003A8C 108
+. 3D 60 10 02 3D 20 10 02 C9 2B 81 00 3D 60 10 02 39 6B 80 F8 C9 09 80 E8 7C 09 03 A6 3D 20 10 02 C9 6B 00 00 C9 49 81 08 3D 20 43 30 91 41 00 0C 55 40 10 3A 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 58 28 FD A0 50 2A FD 80 40 2A FC 00 48 2A FD A0 68 18 FD 80 60 18 FC 00 00 18 7D BD 05 2E 7D 9B 05 2E 7C 19 05 2E 42 00 FF C4
+
+. 0 10003AB8 64
+. 91 41 00 0C 55 40 10 3A 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 58 28 FD A0 50 2A FD 80 40 2A FC 00 48 2A FD A0 68 18 FD 80 60 18 FC 00 00 18 7D BD 05 2E 7D 9B 05 2E 7C 19 05 2E 42 00 FF C4
+
+. 0 10003AF8 8
+. 38 60 00 00 48 01 18 F9
+
+. 0 10003B00 8
+. 7F C3 F3 78 48 00 EE 29
+
+. 0 1001292C 136
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 60 7F C8 02 A6 7D 80 00 26 93 01 00 48 7C 78 1B 79 90 01 00 94 80 1E FF F0 DB 61 00 68 DB 81 00 70 7F C0 F2 14 DB A1 00 78 DB C1 00 80 DB E1 00 88 91 C1 00 20 91 E1 00 24 92 01 00 28 92 21 00 2C 92 41 00 30 92 61 00 34 92 81 00 38 92 A1 00 3C 92 C1 00 40 92 E1 00 44 93 21 00 4C 93 41 00 50 93 61 00 54 93 81 00 58 93 A1 00 5C 93 E1 00 64 91 81 00 1C 41 82 02 10
+
+. 0 100129B4 8
+. 38 60 02 0C 48 01 65 7D
+
+. 0 100129BC 12
+. 7C 69 1B 79 91 21 00 14 41 82 02 74
+
+. 0 100129C8 8
+. 2F 98 00 01 41 9E 01 E4
+
+. 0 10012BB0 16
+. 81 21 00 14 3A E0 00 00 92 E9 02 08 4B FF FE 2C
+
+. 0 100129E8 24
+. 81 21 00 14 7F 03 C3 78 38 81 00 10 93 09 00 00 38 A9 00 08 4B FF 3C 85
+
+. 0 10012A00 8
+. 2F 83 00 00 40 9E 02 04
+
+. 0 10012A08 68
+. 81 61 00 10 3C 00 43 30 81 21 00 14 3A C0 00 00 2B 8B 00 00 39 C0 00 01 91 69 00 04 81 3E 82 A0 90 01 00 08 93 01 00 0C C9 89 00 00 C9 A1 00 08 81 3E 82 A4 FD AD 60 28 C8 09 00 00 FF A0 68 24 40 9D 00 E4
+
+. 0 10012A4C 60
+. 81 21 00 14 FF 60 60 90 7D 6F 5B 78 3B 20 00 00 82 E9 02 08 7D 31 4B 78 7E FA BB 78 82 71 00 08 3A 80 00 01 7D C9 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7E 58 73 96 40 9C 00 9C
+
+. 0 10012B20 12
+. 35 EF FF FF 3A 31 00 04 40 82 FF 40
+
+. 0 10012B2C 16
+. 57 00 F8 7E 80 61 00 14 7F 80 B0 40 41 9C 00 A4
+
+. 0 10012B3C 116
+. 80 01 00 94 81 81 00 1C 81 C1 00 20 7C 08 03 A6 81 E1 00 24 7D 80 81 20 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10003B08 36
+. 3C 80 10 01 7F 86 E3 78 38 84 7A 78 7F C5 F3 78 7C 76 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 1A C1
+
+PASS: gsl_fft_real_wavetable_float_alloc, n = 1, stride = 1
+. 0 10003B2C 8
+. 7F C3 F3 78 48 00 F1 4D
+
+. 0 10012C7C 60
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 79 90 01 00 24 93 E1 00 1C 38 60 00 08 80 1E FF F0 38 A0 00 84 38 C0 00 01 7F C0 F2 14 41 82 00 5C
+
+. 0 10012CB8 4
+. 48 01 62 7D
+
+. 0 10012CBC 20
+. 38 A0 00 8C 7C 7F 1B 79 38 C0 00 08 57 A3 10 3A 41 82 00 74
+
+. 0 10012CD0 8
+. 93 BF 00 00 48 01 62 61
+
+. 0 10012CD8 24
+. 2F 83 00 00 7C 60 1B 78 90 1F 00 04 7F E3 FB 78 7F E0 FB 78 41 9E 00 68
+
+. 0 10012CF0 32
+. 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10003B34 32
+. 3C 80 10 01 38 84 7A B0 7F C5 F3 78 7C 78 1B 78 20 03 00 00 7C 60 19 14 4C C6 31 82 48 01 1A 99
+
+PASS: gsl_fft_real_workspace_float_alloc, n = 1
+. 0 10003B54 20
+. 7F 84 E3 78 7F 65 DB 78 7F A6 EB 78 7F C3 F3 78 48 00 1F D5
+
+. 0 10005B38 64
+. 94 21 FF D0 7C 08 02 A6 93 61 00 1C 7C 7B 1B 79 93 01 00 10 7C D8 33 78 93 21 00 14 7C 99 23 78 93 A1 00 24 7C BD 2B 78 93 41 00 18 93 81 00 20 93 C1 00 28 93 E1 00 2C 90 01 00 34 41 82 00 80
+
+. 0 10005B78 8
+. 2F 9B 00 00 41 9E 00 34
+
+. 0 10005B80 20
+. 3B 40 00 00 3B C0 00 00 54 9C 18 38 7F 7F DB 78 4B FF EB FD
+
+. 0 10005B94 28
+. 37 FF FF FF FC 20 08 18 7D 3E EA 14 7C 3E ED 2E 7F DE E2 14 93 49 00 04 40 82 FF E4
+
+. 0 10005BB0 20
+. 7F A3 EB 78 7F 24 CB 78 7F 65 DB 78 7F 06 C3 78 48 00 05 7D
+
+. 0 10005BC4 48
+. 80 01 00 34 83 01 00 10 83 21 00 14 7C 08 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 10003B68 16
+. 7F 23 CB 78 7F 64 DB 78 57 45 18 38 48 02 54 01
+
+. 0 10003B78 8
+. 2F 9E 00 00 41 9E 00 24
+
+. 0 10003B80 32
+. 7F C9 03 A6 39 60 00 00 55 60 18 38 55 69 10 3A 7C 1B 04 2E 7D 6B E2 14 7C 09 FD 2E 42 00 FF EC
+
+. 0 10003BA0 28
+. 7F 07 C3 78 7F 84 E3 78 7F C5 F3 78 7E C6 B3 78 7F E3 FB 78 3E E0 10 02 48 00 F2 51
+
+. 0 10012E08 216
+. 94 21 FE E0 7C 08 02 A6 42 9F 00 05 92 21 00 54 D9 C1 00 90 39 20 00 00 D9 E1 00 98 3A 20 00 01 DA 01 00 A0 DA 21 00 A8 DA 41 00 B0 DA 61 00 B8 DA 81 00 C0 DA A1 00 C8 DA C1 00 D0 DA E1 00 D8 DB 01 00 E0 DB 21 00 E8 DB 41 00 F0 DB 61 00 F8 DB 81 01 00 DB A1 01 08 DB C1 01 10 DB E1 01 18 91 C1 00 48 91 E1 00 4C 92 01 00 50 92 41 00 58 92 61 00 5C 92 81 00 60 92 A1 00 64 92 C1 00 68 92 E1 00 6C 93 01 00 70 93 21 00 74 93 41 00 78 93 61 00 7C 93 81 00 80 93 C1 00 88 7F C8 02 A6 90 01 01 24 81 07 00 04 80 1E FF F0 93 A1 00 84 7F C0 F2 14 7C A0 2B 79 93 E1 00 8C 90 01 00 18 90 61 00 10 90 81 00 14 81 46 00 04 91 01 00 20 91 21 00 24 41 82 16 B4
+
+. 0 10012EE0 16
+. 80 01 00 18 38 60 00 00 2F 80 00 01 41 9E 09 68
+
+. 0 10013854 160
+. 80 01 01 24 81 C1 00 48 81 E1 00 4C 7C 08 03 A6 82 01 00 50 82 21 00 54 82 41 00 58 82 61 00 5C 82 81 00 60 82 A1 00 64 82 C1 00 68 82 E1 00 6C 83 01 00 70 83 21 00 74 83 41 00 78 83 61 00 7C 83 81 00 80 83 A1 00 84 83 C1 00 88 83 E1 00 8C C9 C1 00 90 C9 E1 00 98 CA 01 00 A0 CA 21 00 A8 CA 41 00 B0 CA 61 00 B8 CA 81 00 C0 CA A1 00 C8 CA C1 00 D0 CA E1 00 D8 CB 01 00 E0 CB 21 00 E8 CB 41 00 F0 CB 61 00 F8 CB 81 01 00 CB A1 01 08 CB C1 01 10 CB E1 01 18 38 21 01 20 4E 80 00 20
+
+. 0 10003BBC 20
+. 7F 64 DB 78 7F 85 E3 78 7F C6 F3 78 7F E3 FB 78 48 00 CB 15
+
+. 0 100106E0 56
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 2C 06 00 00 90 01 00 24 93 E1 00 1C 7C 7F 1B 78 80 1E FF F0 7C A3 2B 78 93 A1 00 14 7F C0 F2 14 41 82 00 D4
+
+. 0 10010718 40
+. 39 26 FF FF 83 BE 82 38 2B 89 00 01 C0 1F 00 00 C1 BD 00 00 38 A0 00 01 D0 04 00 00 7D 28 4B 78 D1 A4 00 04 40 9D 00 58
+
+. 0 10010794 8
+. 7F 88 28 00 41 9E 00 24
+
+. 0 1001079C 32
+. 80 01 00 24 38 60 00 00 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10003BD0 40
+. C8 37 81 10 3C A0 10 01 3C 60 10 01 7F 87 E3 78 7F C8 F3 78 38 A5 69 3C 7F A4 EB 78 7F 66 DB 78 38 63 69 4C 4B FF EB 11
+
+. 0 10003BF8 24
+. 3C 80 10 01 38 84 7A DC 7F C5 F3 78 7F 86 E3 78 4C C6 31 82 48 01 19 DD
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 1, stride = 1
+. 0 10003C10 8
+. 7F C3 F3 78 48 00 AA ED
+
+. 0 1000E700 136
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 60 7F C8 02 A6 7D 80 00 26 92 E1 00 44 7C 77 1B 79 90 01 00 94 80 1E FF F0 DB 61 00 68 DB 81 00 70 7F C0 F2 14 DB A1 00 78 DB C1 00 80 DB E1 00 88 91 C1 00 20 91 E1 00 24 92 01 00 28 92 21 00 2C 92 41 00 30 92 61 00 34 92 81 00 38 92 A1 00 3C 92 C1 00 40 93 01 00 48 93 21 00 4C 93 41 00 50 93 61 00 54 93 81 00 58 93 A1 00 5C 93 E1 00 64 91 81 00 1C 41 82 01 F8
+
+. 0 1000E788 8
+. 38 60 02 0C 48 01 A7 A9
+
+. 0 1000E790 12
+. 7C 69 1B 79 91 21 00 14 41 82 02 5C
+
+. 0 1000E79C 8
+. 56 E3 18 38 48 01 A7 95
+
+. 0 1000E7A4 16
+. 81 21 00 14 2F 83 00 00 90 69 02 08 41 9E 02 60
+
+. 0 1000E7B4 24
+. 81 21 00 14 7E E3 BB 78 38 81 00 10 92 E9 00 00 38 A9 00 08 4B FF 7F 2D
+
+. 0 1000E7CC 8
+. 2F 83 00 00 40 9E 01 F4
+
+. 0 1000E7D4 68
+. 81 61 00 10 3C 00 43 30 81 21 00 14 3A C0 00 00 7F 96 58 40 39 C0 00 01 91 69 00 04 81 3E 81 D0 90 01 00 08 92 E1 00 0C C9 89 00 00 C9 A1 00 08 81 3E 81 D4 FD AD 60 28 C8 09 00 00 FF A0 68 24 40 9C 00 E4
+
+. 0 1000E818 60
+. 81 21 00 14 FF 60 60 90 7D 6F 5B 78 3B 20 00 00 83 09 02 08 7D 31 4B 78 7F 1A C3 78 82 71 00 08 3A 80 00 01 7D D2 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7D 37 73 96 40 9C 00 9C
+
+. 0 1000E8EC 12
+. 35 EF FF FF 3A 31 00 04 40 82 FF 40
+
+. 0 1000E8F8 16
+. 56 E0 F8 7E 80 61 00 14 7F 80 B0 40 41 9C 00 94
+
+. 0 1000E908 116
+. 80 01 00 94 81 81 00 1C 81 C1 00 20 7C 08 03 A6 81 E1 00 24 7D 80 81 20 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10003C18 36
+. 3C 80 10 01 7C 7A 1B 78 38 84 7B 1C 20 03 00 00 7C 60 19 14 7F C5 F3 78 7F 86 E3 78 4C C6 31 82 48 01 19 B1
+
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 1, stride = 1
+. 0 10003C3C 24
+. 7F E3 FB 78 7F 84 E3 78 7F C5 F3 78 7F 46 D3 78 7F 07 C3 78 48 00 AE 31
+
+. 0 1000EA80 200
+. 94 21 FE F0 7C 08 02 A6 42 9F 00 05 D9 C1 00 80 D9 E1 00 88 DA 01 00 90 DA 21 00 98 DA 41 00 A0 DA 61 00 A8 DA 81 00 B0 DA A1 00 B8 DA C1 00 C0 DA E1 00 C8 DB 01 00 D0 DB 21 00 D8 DB 41 00 E0 DB 61 00 E8 DB 81 00 F0 DB A1 00 F8 DB C1 01 00 DB E1 01 08 91 C1 00 38 91 E1 00 3C 92 01 00 40 92 21 00 44 92 41 00 48 92 61 00 4C 92 81 00 50 92 A1 00 54 92 C1 00 58 92 E1 00 5C 93 01 00 60 93 21 00 64 93 41 00 68 93 61 00 6C 93 81 00 70 93 C1 00 78 7F C8 02 A6 90 01 01 14 81 07 00 04 80 1E FF F0 93 A1 00 74 7F C0 F2 14 7C A0 2B 79 93 E1 00 7C 90 01 00 18 90 61 00 10 90 81 00 14 91 01 00 1C 41 82 10 7C
+
+. 0 1000EB48 16
+. 80 01 00 18 38 60 00 00 2F 80 00 01 41 9E 05 9C
+
+. 0 1000F0F0 160
+. 80 01 01 14 81 C1 00 38 81 E1 00 3C 7C 08 03 A6 82 01 00 40 82 21 00 44 82 41 00 48 82 61 00 4C 82 81 00 50 82 A1 00 54 82 C1 00 58 82 E1 00 5C 83 01 00 60 83 21 00 64 83 41 00 68 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C C9 C1 00 80 C9 E1 00 88 CA 01 00 90 CA 21 00 98 CA 41 00 A0 CA 61 00 A8 CA 81 00 B0 CA A1 00 B8 CA C1 00 C0 CA E1 00 C8 CB 01 00 D0 CB 21 00 D8 CB 41 00 E0 CB 61 00 E8 CB 81 00 F0 CB A1 00 F8 CB C1 01 00 CB E1 01 08 38 21 01 10 4E 80 00 20
+
+. 0 10003C54 8
+. 2F 9E 00 00 41 9E 00 48
+
+. 0 10003C5C 68
+. 3C 00 43 30 3D 20 10 02 90 01 00 08 39 29 80 F8 93 C1 00 0C 7F C9 03 A6 C8 09 00 00 38 00 00 00 C9 A1 00 08 57 89 10 3A FD AD 00 28 FD A0 68 18 7C 1F 04 2E EC 00 68 24 7C 1F 05 2E 7C 00 4A 14 42 00 FF F0
+
+. 0 10003CA0 20
+. 7F 64 DB 78 7F 85 E3 78 7F C6 F3 78 7F E3 FB 78 48 01 0C 41
+
+. 0 100148F0 40
+. 7C 08 02 A6 94 21 FF F0 42 9F 00 05 93 C1 00 08 7F C8 02 A6 2C 06 00 00 90 01 00 14 80 1E FF F0 7F C0 F2 14 41 82 00 54
+
+. 0 10014918 8
+. 2F 86 00 00 41 9E 00 34
+
+. 0 10014920 48
+. 81 3E 82 F0 7C C9 03 A6 39 40 00 00 C1 A9 00 00 55 40 10 3A 55 49 18 38 7C 03 04 2E 7D 69 22 14 7D 4A 2A 14 7C 09 25 2E D1 AB 00 04 42 00 FF E4
+
+. 0 10014950 24
+. 80 01 00 14 38 60 00 00 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10003CB4 40
+. C8 37 81 10 3C A0 10 01 3C 60 10 01 7F 87 E3 78 7F C8 F3 78 7F 24 CB 78 7F 66 DB 78 38 A5 69 98 38 63 69 90 4B FF EA 2D
+
+. 0 10003CDC 24
+. 3C 80 10 01 7F C5 F3 78 7F 86 E3 78 38 84 7B 5C 4C C6 31 82 48 01 18 F9
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 1, stride = 1
+. 0 10003CF4 8
+. 7F 03 C3 78 48 00 F0 C5
+
+. 0 10012DBC 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 00 04 48 01 61 AD
+
+. 0 10012DDC 16
+. 38 00 00 00 90 1D 00 04 7F A3 EB 78 48 01 61 9D
+
+. 0 10012DEC 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10003CFC 8
+. 7E C3 B3 78 48 00 F0 75
+
+. 0 10012D74 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 02 08 48 01 61 F5
+
+. 0 10012D94 16
+. 38 00 00 00 90 1D 02 08 7F A3 EB 78 48 01 61 E5
+
+. 0 10012DA4 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10003D04 8
+. 7F 43 D3 78 48 00 AD 2D
+
+. 0 1000EA34 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 02 08 48 01 A5 35
+
+. 0 1000EA54 16
+. 38 00 00 00 90 1D 02 08 7F A3 EB 78 48 01 A5 25
+
+. 0 1000EA64 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10003D0C 8
+. 7F E3 FB 78 48 02 52 75
+
+. 0 10003D14 8
+. 7F 63 DB 78 48 02 52 6D
+
+. 0 10003D1C 8
+. 7F 23 CB 78 48 02 52 65
+
+. 0 10003D24 8
+. 7F A3 EB 78 48 02 52 5D
+
+. 0 10003D2C 56
+. 80 01 00 44 82 C1 00 18 82 E1 00 1C 7C 08 03 A6 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+
+. 0 10004730 8
+. 2B 9D 00 03 40 9D FF C8
+
+. 0 100046FC 12
+. 7F A3 EB 78 7F E4 FB 78 4B FF C5 E1
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 1, stride = 2
+. 0 FEEF74C 40
+. 82 FE 05 FC 7E 83 CA 14 82 D4 00 04 7E 1C B8 00 82 63 00 0C 62 D5 00 01 92 B4 00 04 91 53 00 08 92 6A 00 0C 41 92 00 10
+
+. 0 FEEF780 8
+. 39 03 00 08 4B FF FF 2C
+
+PASS: gsl_fft_complex_workspace_alloc, n = 1
+PASS: gsl_fft_complex_forward with signal_noise, n = 1, stride = 2
+. 0 100012C8 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 00 00 7F 63 DB 78 4B FF F9 59
+
+. 0 100012DC 24
+. 3C 80 10 01 38 84 6B B8 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 42 F9
+
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 1, stride = 2
+. 0 100012F4 4
+. 4B FF FC 30
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 1, stride = 2
+. 0 10001298 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 00 00 7F 63 DB 78 4B FF F9 89
+
+. 0 100012AC 24
+. 3C 80 10 01 38 84 6B 74 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 43 29
+
+PASS: gsl_fft_complex_inverse other data untouched, n = 1, stride = 2
+. 0 100012C4 4
+. 4B FF FC BC
+
+PASS: gsl_fft_complex_backward with signal_noise, n = 1, stride = 2
+. 0 10001268 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 0B B8 7E E3 BB 78 4B FF F9 B9
+
+. 0 1000127C 24
+. 3C 80 10 01 38 84 6B 30 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 43 59
+
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 1, stride = 2
+. 0 10001294 4
+. 4B FF FD A4
+
+PASS: gsl_fft_complex_forward with signal_pulse, n = 1, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 1, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 1, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 1, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 1
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 1, stride = 2
+. 0 100031D4 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 00 00 7F 63 DB 78 4B FF F9 3D
+
+. 0 10002B20 20
+. 39 40 00 00 94 21 FF F0 7F 8A 28 40 39 60 00 00 40 9C 00 A0
+
+. 0 10002B34 16
+. 7C A7 2B 78 2B 84 00 01 39 6B 00 02 40 9D 00 88
+
+. 0 10002B44 132
+. 3D 20 10 02 38 04 FF FF 39 29 80 F8 7C 09 03 A6 C9 49 00 00 3D 00 43 30 55 69 10 3A 7C 0B 32 14 39 6B 00 01 90 01 00 0C 91 01 00 08 7C 0B 32 14 C9 A1 00 08 90 01 00 0C 55 60 10 3A C8 01 00 08 FD AD 50 28 7D 69 1C 2E 39 6B 00 01 FC 00 50 28 7D 83 04 2E FD A0 68 18 FC 00 00 18 FF 8B 68 00 FF 0C 00 00 4F DE F0 42 4F 5A D0 42 7D 20 00 26 55 20 FF FE 55 29 DF FE 7D 4A 03 78 7D 4A 4B 78 42 00 FF 98
+
+. 0 10002BC8 8
+. 34 E7 FF FF 40 82 FF 6C
+
+. 0 10002BD0 12
+. 7D 43 53 78 38 21 00 10 4E 80 00 20
+
+. 0 100031E8 24
+. 3C 80 10 01 38 84 77 68 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 23 ED
+
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 1, stride = 2
+. 0 10003200 4
+. 4B FF FC 2C
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 1, stride = 2
+. 0 100031A4 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 00 00 7F 63 DB 78 4B FF F9 6D
+
+. 0 100031B8 24
+. 3C 80 10 01 38 84 77 20 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 24 1D
+
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 1, stride = 2
+. 0 100031D0 4
+. 4B FF FC B8
+
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 1, stride = 2
+. 0 10003174 20
+. 7F 44 D3 78 7F C5 F3 78 38 C0 0B B8 7E E3 BB 78 4B FF F9 9D
+
+. 0 10003188 24
+. 3C 80 10 01 38 84 76 D4 7F C5 F3 78 7F 46 D3 78 4C C6 31 82 48 01 24 4D
+
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 1, stride = 2
+. 0 100031A0 4
+. 4B FF FD A4
+
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 1, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 1, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 1, stride = 2
+. 0 10001B34 32
+. 91 41 00 0C 55 40 18 38 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 7C 1F 05 AE 42 00 FF E4
+
+PASS: gsl_fft_real_wavetable_alloc, n = 1, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 1
+PASS: gsl_fft_real with signal_real_noise, n = 1, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 1, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 1, stride = 2
+. 0 10003A58 36
+. 91 41 00 0C 55 40 10 3A 91 21 00 08 39 4A 00 01 C8 01 00 08 FC 00 68 28 FC 00 00 18 7C 1F 05 2E 42 00 FF E0
+
+PASS: gsl_fft_real_wavetable_float_alloc, n = 1, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 1
+PASS: gsl_fft_real_float with signal_real_noise, n = 1, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 1, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 1, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 1, stride = 3
+. 0 FEEFC34 20
+. 7F 4A 02 14 81 DA 00 04 61 C7 00 01 90 FA 00 04 41 9A 00 10
+
+. 0 FEEFC54 8
+. 39 0A 00 08 4B FF FA 58
+
+PASS: gsl_fft_complex_workspace_alloc, n = 1
+PASS: gsl_fft_complex_forward with signal_noise, n = 1, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 1, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 1, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 1, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 1, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 1, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 1, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 1, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 1, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 1, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 1
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 1, stride = 3
+. 0 10002B5C 108
+. 55 69 10 3A 7C 0B 32 14 39 6B 00 01 90 01 00 0C 91 01 00 08 7C 0B 32 14 C9 A1 00 08 90 01 00 0C 55 60 10 3A C8 01 00 08 FD AD 50 28 7D 69 1C 2E 39 6B 00 01 FC 00 50 28 7D 83 04 2E FD A0 68 18 FC 00 00 18 FF 8B 68 00 FF 0C 00 00 4F DE F0 42 4F 5A D0 42 7D 20 00 26 55 20 FF FE 55 29 DF FE 7D 4A 03 78 7D 4A 4B 78 42 00 FF 98
+
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 1, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 1, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 1, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 1, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 1, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 1, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 1, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 1, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 1, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 1
+PASS: gsl_fft_real with signal_real_noise, n = 1, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 1, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 1, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 1, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 1
+PASS: gsl_fft_real_float with signal_real_noise, n = 1, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 1, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 1, stride = 3
+. 0 10004738 12
+. 3B FF 00 01 7F 9F F0 40 40 9D FF B8
+
+. 0 100064BC 36
+. 81 24 00 00 68 6B 00 01 30 09 FF FF 7D 40 49 10 31 2B FF FF 7C 09 59 10 7D 49 00 39 7D 6A 5B 78 41 82 00 5C
+
+. 0 100064E0 16
+. 39 80 00 00 7D 6C 20 2E 7D 28 5B 96 48 00 00 18
+
+. 0 10006504 12
+. 7C 09 59 D6 7F 88 00 00 41 9E FF E4
+
+. 0 10006510 40
+. 38 A5 00 01 69 0A 00 01 54 AC 10 3A 7D 2C 20 2E 30 09 FF FF 7D 60 49 10 31 2A FF FF 7C 09 51 10 7D 69 00 39 40 82 FF B0
+
+. 0 100064E4 12
+. 7D 6C 20 2E 7D 28 5B 96 48 00 00 18
+
+. 0 100064F0 32
+. 7D 28 4B 78 7D 29 5B 96 54 E0 10 3A 7D 66 01 2E 38 E7 00 01 7C 09 59 D6 7F 88 00 00 41 9E FF E4
+
+. 0 10006538 20
+. 31 2A FF FF 7C 09 51 10 39 60 00 02 7C 0A 40 79 48 00 00 24
+
+. 0 1000656C 8
+. 55 00 F8 7E 40 82 FF DC
+
+. 0 10006574 12
+. 2F 88 00 01 39 60 00 03 40 BE 00 0C
+
+. 0 10006580 4
+. 48 00 00 30
+
+. 0 100065B0 16
+. 38 A0 00 00 39 60 00 01 7F 85 38 40 40 9C 00 1C
+
+. 0 100065C0 24
+. 7C E9 03 A6 54 A0 10 3A 38 A5 00 01 7D 26 00 2E 7D 6B 49 D6 42 00 FF F0
+
+. 0 100065D8 8
+. 7F 8B 18 00 40 9E 00 84
+
+. 0 100065E0 36
+. 38 00 00 00 90 FF 00 00 7C 03 03 78 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 1000698C 24
+. FF 80 D8 90 7E 39 8B 78 3B 80 00 01 3B E0 00 00 7F 9C B0 40 41 9D 00 64
+
+. 0 100069A4 68
+. 3E 80 43 30 FF C0 E0 90 7F FF CA 14 92 81 00 08 7C 1F BB 96 3B 9C 00 01 7F BA C2 14 3A B5 00 01 3B 7B 00 10 7C 00 B9 D6 7F E0 F8 50 93 E1 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 02 25 11
+
+. 0 100069E8 16
+. 7C 3A C5 AE FC 20 F8 90 3B 5A 00 10 48 02 25 A1
+
+. 0 100069F8 12
+. 7F 9C B0 40 D8 3D 00 08 40 9D FF AC
+
+. 0 10006A04 16
+. 3A 73 00 01 7F 39 8A 14 7F 93 90 40 41 9C FF 84
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 2, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 2
+. 0 10000E68 32
+. 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 CA 14 7C 19 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+. 0 10006ED0 16
+. 80 06 00 00 81 01 00 18 7F 80 40 00 40 9E 1E B8
+
+. 0 10006EE0 16
+. 80 07 00 00 80 E1 00 18 7F 80 38 00 40 9E 1E C4
+
+. 0 10006EF0 12
+. 80 01 00 28 7F 80 50 40 40 9C 06 A8
+
+. 0 10006EFC 96
+. 80 E1 00 1C 38 C6 01 08 81 01 00 18 81 21 00 18 2E 07 FF FF 91 41 00 20 55 08 F8 7E 55 29 F0 BE 7D 47 00 D0 4D 90 00 00 90 C1 01 88 91 01 01 80 91 21 01 84 91 41 01 8C 81 61 01 88 80 01 00 24 82 EB FF 00 81 81 00 28 7C 00 B9 D6 80 C1 00 18 2F 8C 00 00 90 01 00 24 7D E6 03 96 40 9E 06 F4
+
+. 0 10006F5C 28
+. 2F 97 00 02 3B 00 00 01 83 E1 00 10 83 21 00 14 83 41 00 2C 93 01 00 28 41 9E 06 F8
+
+. 0 1000766C 36
+. 3B 60 00 00 81 01 01 88 7F 9B 78 40 81 21 00 24 82 68 00 00 38 A0 00 00 38 E0 00 00 55 26 F8 7E 40 BC FE EC
+
+. 0 10007690 32
+. 81 41 00 1C 7E 7C 9B 78 82 DE 80 40 38 80 00 00 2F 0A FF FF 82 FE 80 44 2F 9B 00 00 40 9E 00 CC
+
+. 0 100076B0 16
+. C8 B6 00 00 C8 D7 00 00 2F 86 00 00 41 9E 00 A0
+
+. 0 100076C0 156
+. 7C 07 C1 D6 7C C9 03 A6 54 88 20 36 57 3D 20 36 57 03 20 36 54 0A 20 36 81 61 01 80 7D 28 FA 14 C9 29 00 08 7C 84 CA 14 7C 05 5A 14 7D 88 FC AE 7C 00 C9 D6 7D 6A D2 14 38 A5 00 01 7D 08 EA 14 54 00 20 36 7D 20 FA 14 7D 1F 04 AE C8 E9 00 08 7C 07 32 14 FC 0C 40 28 7C 00 C1 D6 FD A9 38 28 38 E7 00 01 FD 8C 40 2A FD 66 00 32 54 00 20 36 FD 46 03 72 7D 20 D2 14 7D 8A D5 AE 7D 4A 1A 14 FD A5 5B 7A FC 05 50 38 FD 29 38 2A D9 2B 00 08 7C 1A 05 AE D9 A9 00 08 42 00 FF 80
+
+. 0 1000775C 20
+. 3B 7B 00 01 7C E7 32 14 7F 9B 78 40 3B 9C 00 10 40 BC FE 0C
+
+. 0 10007578 28
+. 80 C1 01 88 80 01 00 20 38 C6 00 04 90 C1 01 88 34 C0 FF FF 90 C1 00 20 40 82 F9 A4
+
+. 0 10007594 12
+. 80 E1 00 28 2F 87 00 01 41 9E 0A 48
+
+. 0 10007FE4 20
+. 81 21 00 18 39 00 00 00 91 01 00 20 7F 88 48 40 40 BC F5 AC
+
+. 0 10007FF8 76
+. 81 41 00 14 7D 29 03 A6 38 00 00 00 55 44 20 36 81 61 00 20 81 81 00 2C 55 69 20 36 80 C1 00 10 7C 09 64 AE 7D 29 62 14 80 E1 00 20 7D 60 32 14 7C 06 05 AE 7C 00 22 14 C9 A9 00 08 38 E7 00 01 90 E1 00 20 D9 AB 00 08 42 00 FF C8
+
+. 0 10008008 60
+. 81 61 00 20 81 81 00 2C 55 69 20 36 80 C1 00 10 7C 09 64 AE 7D 29 62 14 80 E1 00 20 7D 60 32 14 7C 06 05 AE 7C 00 22 14 C9 A9 00 08 38 E7 00 01 90 E1 00 20 D9 AB 00 08 42 00 FF C8
+
+. 0 10008044 8
+. 38 60 00 00 4B FF F5 5C
+
+. 0 10000EB4 32
+. 7D 20 DA 14 7C 1B 04 AE C9 A9 00 08 7D 20 BA 14 7C 17 05 AE 7C 00 5A 14 D9 A9 00 08 42 00 FF E4
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 2, stride = 1
+. 0 10008E54 36
+. 7C 1F 04 AE 7D 20 FA 14 FC 00 03 32 7C 1F 05 AE 7C 00 22 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 2, stride = 1
+. 0 10000FCC 36
+. 7C 19 04 AE 7D 20 CA 14 FC 00 03 32 7C 19 05 AE 7C 00 5A 14 C9 A9 00 08 FD AD 03 32 D9 A9 00 08 42 00 FF E0
+
+PASS: gsl_fft_complex_backward with signal_noise, n = 2, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 2, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 2, stride = 1
+. 0 10001154 48
+. 3D 20 10 02 3D 60 10 02 39 29 81 18 39 6B 80 F0 C8 29 00 00 7F E3 FB 78 C8 4B 00 00 7F C4 F3 78 7F 45 D3 78 7F 66 DB 78 7F 07 C3 78 48 00 39 31
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 2, stride = 1
+. 0 100097B4 24
+. FF 80 D8 90 7E 39 8B 78 3B 80 00 01 3B E0 00 00 7F 9C B0 40 41 9D 00 6C
+
+. 0 100097CC 68
+. 3E 80 43 30 FF C0 E0 90 7F FF CA 14 92 81 00 08 7C 1F BB 96 3B 9C 00 01 7F BA C2 14 3A B5 00 01 3B 7B 00 08 7C 00 B9 D6 7F E0 F8 50 93 E1 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 F6 E9
+
+. 0 10009810 20
+. FC 00 08 18 FC 20 F8 90 7C 1A C5 2E 3B 5A 00 08 48 01 F7 75
+
+. 0 10009824 16
+. 7F 9C B0 40 FC 20 08 18 D0 3D 00 04 40 9D FF A4
+
+. 0 10009834 16
+. 3A 73 00 01 7F 39 8A 14 7F 93 90 40 41 9C FF 7C
+
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 2, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 2
+. 0 10005A7C 4
+. 4B FF ED 11
+
+. 0 10006050 28
+. 93 A1 00 14 92 E1 00 10 CB E1 00 10 FF FF D8 28 FF FF 06 B2 FC 20 F8 90 48 02 2E 8D
+
+. 0 1000602C 20
+. 2F 9B 00 00 C3 93 00 00 3B A0 00 00 FF A0 E0 90 41 9E 00 80
+
+. 0 10002D70 32
+. 7D 20 DA 14 7C 1B 04 2E C1 A9 00 04 7D 20 CA 14 7C 19 05 2E 7C 00 5A 14 D1 A9 00 04 42 00 FF E4
+
+. 0 10009D00 16
+. 80 06 00 00 81 01 00 18 7F 80 40 00 40 9E 1F 44
+
+. 0 10009D10 16
+. 80 07 00 00 80 E1 00 18 7F 80 38 00 40 9E 1F 50
+
+. 0 10009D20 12
+. 80 01 00 28 7F 80 50 40 40 9C 06 98
+
+. 0 10009D2C 88
+. 80 E1 00 1C 38 C6 01 08 81 01 00 18 81 21 00 18 2E 07 FF FF 55 08 F8 7E 90 C1 01 44 55 29 F0 BE 4D 90 00 00 91 41 00 20 91 01 01 3C 91 21 01 40 81 41 01 44 81 81 00 24 82 EA FF 00 81 61 00 28 7D 8C B9 D6 80 01 00 18 2F 8B 00 00 91 81 00 24 7D E0 63 96 40 9E 06 EC
+
+. 0 10009D84 28
+. 2F 97 00 02 3B 00 00 01 83 E1 00 10 83 21 00 14 83 41 00 2C 93 01 00 28 41 9E 06 F0
+
+. 0 1000A48C 36
+. 3B 60 00 00 81 01 01 44 7F 9B 78 40 81 21 00 24 82 88 00 00 38 A0 00 00 38 E0 00 00 55 26 F8 7E 40 BC FE EC
+
+. 0 1000A4B0 32
+. 81 41 00 1C 7E 9C A3 78 82 FE 80 CC 38 80 00 00 2F 0A FF FF 82 DE 80 D0 2F 9B 00 00 40 9E 00 CC
+
+. 0 1000A4D0 16
+. C0 B7 00 00 C0 D6 00 00 2F 86 00 00 41 9E 00 A0
+
+. 0 1000A4E0 156
+. 7C 07 C1 D6 7C C9 03 A6 54 88 18 38 57 3D 18 38 57 03 18 38 54 0A 18 38 81 61 01 3C 7D 28 FA 14 C1 29 00 04 7C 84 CA 14 7C 05 5A 14 7D 48 FC 2E 7C 00 C9 D6 38 A5 00 01 7D 08 EA 14 54 00 18 38 7D 20 FA 14 7D 1F 04 2E C0 E9 00 04 7C 07 32 14 EC 0A 40 28 7C 00 C1 D6 ED A9 38 28 7D 2A D2 14 ED 4A 40 2A 38 E7 00 01 ED 86 00 32 ED 66 03 72 54 00 18 38 ED 29 38 2A 7D 60 D2 14 ED A5 63 7A 7D 4A D5 2E EC 05 58 38 7D 4A 1A 14 D1 29 00 04 7C 1A 05 2E D1 AB 00 04 42 00 FF 80
+
+. 0 1000A57C 20
+. 3B 7B 00 01 7C E7 32 14 7F 9B 78 40 3B 9C 00 08 40 BC FE 0C
+
+. 0 1000A398 28
+. 80 E1 01 44 80 01 00 20 38 E7 00 04 90 E1 01 44 34 E0 FF FF 90 E1 00 20 40 82 F9 AC
+
+. 0 1000A3B4 12
+. 81 01 00 28 2F 88 00 01 41 9E 0A AC
+
+. 0 1000AE68 20
+. 81 41 00 18 39 20 00 00 91 21 00 20 7F 89 50 40 40 BC F5 48
+
+. 0 1000AE7C 72
+. 81 61 00 14 7D 49 03 A6 38 00 00 00 55 64 18 38 81 81 00 20 80 E1 00 2C 55 89 18 38 81 01 00 10 7C 09 3C 2E 7D 29 3A 14 7D 60 42 14 39 8C 00 01 7C 08 05 2E 7C 00 22 14 C1 A9 00 04 91 81 00 20 D1 AB 00 04 42 00 FF CC
+
+. 0 1000AE8C 56
+. 81 81 00 20 80 E1 00 2C 55 89 18 38 81 01 00 10 7C 09 3C 2E 7D 29 3A 14 7D 60 42 14 39 8C 00 01 7C 08 05 2E 7C 00 22 14 C1 A9 00 04 91 81 00 20 D1 AB 00 04 42 00 FF CC
+
+. 0 1000AEC4 8
+. 38 60 00 00 4B FF F4 FC
+
+. 0 10002DBC 32
+. 7D 20 DA 14 7C 1B 04 2E C1 A9 00 04 7D 20 BA 14 7C 17 05 2E 7C 00 5A 14 D1 A9 00 04 42 00 FF E4
+
+. 0 10002798 60
+. 7D 20 FA 14 7D 60 F2 14 7C 1F 04 2E 7D 9E 04 2E 7C 00 52 14 C1 A9 00 04 C1 6B 00 04 EC 00 60 28 ED AD 58 28 FC 00 02 10 FD A0 6A 10 FC 00 68 2A FF E0 02 B2 FF 9F 08 00 40 9D 00 08
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 2, stride = 1
+. 0 1000BD14 36
+. 7C 1F 04 2E 7D 20 FA 14 EC 00 03 32 7C 1F 05 2E 7C 00 22 14 C1 A9 00 04 ED AD 03 32 D1 A9 00 04 42 00 FF E0
+
+. 0 100027D4 8
+. FC 20 F8 90 42 00 FF C0
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 2, stride = 1
+. 0 10002ED8 36
+. 7C 19 04 2E 7D 20 CA 14 EC 00 03 32 7C 19 05 2E 7C 00 5A 14 C1 A9 00 04 ED AD 03 32 D1 A9 00 04 42 00 FF E0
+
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 2, stride = 1
+. 0 100052A8 20
+. 7D 20 32 14 7D 46 01 2E 91 49 00 04 7C 00 5A 14 42 00 FF F0
+
+. 0 1000531C 48
+. 7C 1D F3 96 93 01 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD DA 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF D8 24 FC 20 F8 90 48 02 3B AD
+
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 2, stride = 1
+. 0 10005420 20
+. 7D 20 2A 14 7C 25 05 2E D0 49 00 04 7C 00 5A 14 42 00 FF F0
+
+. 0 1000544C 20
+. 7D 20 32 14 7D 66 01 2E 91 69 00 04 7C 00 22 14 42 00 FF F0
+
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 2, stride = 1
+. 0 10005588 48
+. 7C 1D F3 96 92 C1 00 08 7C 00 F1 D6 7C 00 E8 50 7F BD D2 14 90 01 00 0C CB E1 00 08 FF FF C8 28 FF FF 06 B2 FF FF E8 24 FC 20 F8 90 48 02 39 41
+
+. 0 10005608 20
+. 7D 20 DA 14 7D 5B 01 2E 91 49 00 04 7C 00 5A 14 42 00 FF F0
+
+. 0 10003060 48
+. 3D 20 10 02 3D 60 10 02 39 29 81 2C 39 6B 81 28 C0 29 00 00 7F E3 FB 78 C0 4B 00 00 7F C4 F3 78 7F 45 D3 78 7F 66 DB 78 7F 07 C3 78 48 00 24 4D
+
+. 0 10005624 164
+. 7C 1A F3 96 7C 00 F1 D6 7C 60 D0 50 3C 00 43 30 3D 20 10 02 90 01 00 10 39 29 80 F8 93 C1 00 14 7C 17 19 D6 C9 A9 00 00 38 60 00 00 C8 01 00 10 FC 00 68 28 54 00 18 38 7D 20 DA 14 FC 00 00 18 ED A0 07 32 EC 00 06 F2 7C 1B 05 2E D1 A9 00 04 80 01 00 84 82 C1 00 20 82 E1 00 24 7C 08 03 A6 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB 21 00 48 CB 41 00 50 CB 61 00 58 CB 81 00 60 CB A1 00 68 CB C1 00 70 CB E1 00 78 38 21 00 80 4E 80 00 20
+
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 2, stride = 1
+. 0 100109E8 8
+. 57 03 18 36 48 01 85 49
+
+. 0 100109F0 16
+. 81 21 00 14 2F 83 00 00 90 69 02 08 41 9E 02 68
+
+. 0 10010AA0 32
+. 38 09 00 01 FF 80 D8 90 54 00 F8 7E 7E 5B 93 78 2A 00 00 01 7C 10 03 78 3B 80 00 00 40 91 00 64
+
+. 0 10010B20 16
+. 3A 94 00 01 7F 7B 92 14 7F 94 98 40 41 9C FF 8C
+
+PASS: gsl_fft_real_wavetable_alloc, n = 2, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 2
+. 0 10001C54 24
+. 55 60 20 36 55 69 18 38 7C 1B 04 AE 7D 6B E2 14 7C 09 FD AE 42 00 FF EC
+
+. 0 10010F00 16
+. 80 06 00 00 81 01 00 18 7F 80 40 00 40 9E 16 3C
+
+. 0 10010F10 16
+. 80 07 00 00 80 E1 00 18 7F 80 38 00 40 9E 16 48
+
+. 0 10010F20 12
+. 80 01 00 24 7F 80 50 40 40 9C 08 FC
+
+. 0 10010F2C 68
+. 80 E1 00 18 38 C6 01 08 81 01 00 18 54 E7 F8 7E 90 C1 00 30 91 41 00 1C 55 0E F0 BE 90 E1 00 2C 81 21 00 24 81 41 00 30 2F 89 00 00 39 31 00 01 83 6A FF 00 55 29 F8 7E 38 09 FF FF 7E 31 D9 D6 40 9E 09 9C
+
+. 0 10010F70 28
+. 2F 9B 00 02 3B 00 00 01 83 E1 00 10 83 A1 00 14 83 21 00 20 93 01 00 24 41 9E 09 A0
+
+. 0 10011928 28
+. 81 81 00 18 56 25 F8 7E 80 E1 00 30 7F 4C 8B 96 83 87 00 00 2F 9A 00 00 41 9E 00 64
+
+. 0 10011944 96
+. 7F 49 03 A6 39 51 FF FF 38 E0 00 00 39 00 00 00 81 21 00 2C 7D 78 39 D6 7C E7 8A 14 7C 08 4A 14 7C 1D 01 D6 55 6B 18 38 7D 3D 41 D6 54 00 18 38 7D BF 04 AE 7D 08 2A 14 55 29 18 38 7C 18 51 D6 7C 09 FC AE 7D 4A 8A 14 FD 80 68 28 FC 00 68 2A 54 00 18 38 7C 0B CD AE 7D 99 05 AE 42 00 FF B4
+
+. 0 100119A4 8
+. 2F 85 00 01 41 BE FE 0C
+
+. 0 100117B4 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 F7 80
+
+. 0 100117D0 12
+. 81 01 00 24 2F 88 00 01 40 BE 00 4C
+
+. 0 100117DC 20
+. 81 41 00 18 39 20 00 00 91 21 00 1C 7F 89 50 40 40 9C 00 38
+
+. 0 100117F0 52
+. 81 61 00 14 7D 49 03 A6 80 61 00 10 55 64 18 38 81 81 00 1C 80 E1 00 20 55 80 18 38 39 8C 00 01 7C 07 04 AE 91 81 00 1C D8 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 10011800 36
+. 81 81 00 1C 80 E1 00 20 55 80 18 38 39 8C 00 01 7C 07 04 AE 91 81 00 1C D8 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 10011824 164
+. 38 60 00 00 80 01 01 14 81 C1 00 38 81 E1 00 3C 7C 08 03 A6 82 01 00 40 82 21 00 44 82 41 00 48 82 61 00 4C 82 81 00 50 82 A1 00 54 82 C1 00 58 82 E1 00 5C 83 01 00 60 83 21 00 64 83 41 00 68 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C C9 C1 00 80 C9 E1 00 88 CA 01 00 90 CA 21 00 98 CA 41 00 A0 CA 61 00 A8 CA 81 00 B0 CA A1 00 B8 CA C1 00 C0 CA E1 00 C8 CB 01 00 D0 CB 21 00 D8 CB 41 00 E0 CB 61 00 E8 CB 81 00 F0 CB A1 00 F8 CB C1 01 00 CB E1 01 08 38 21 01 10 4E 80 00 20
+
+. 0 1000E578 44
+. 54 C9 18 38 7C 03 29 D6 39 29 FF F8 C9 BD 00 00 7D 29 19 D6 54 00 20 36 7C 09 FC AE 7D 20 22 14 7C 04 05 AE D9 A9 00 08 4B FF FF B8
+
+PASS: gsl_fft_real with signal_real_noise, n = 2, stride = 1
+. 0 1000C694 32
+. 38 09 00 01 FF 80 D8 90 54 00 F8 7E 7E 5B 93 78 2A 00 00 01 7C 10 03 78 3B 80 00 00 40 91 00 64
+
+. 0 1000C714 16
+. 3A 94 00 01 7F 7B 92 14 7F 94 98 40 41 9C FF 8C
+
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 2, stride = 1
+. 0 1000C990 16
+. 80 06 00 00 81 01 00 18 7F 80 40 00 40 9E 15 FC
+
+. 0 1000C9A0 16
+. 80 07 00 00 80 E1 00 18 7F 80 38 00 40 9E 15 D0
+
+. 0 1000C9B0 28
+. 80 06 00 04 38 E0 00 00 39 00 00 01 90 E1 00 28 7F 87 00 40 91 01 00 20 40 9C 05 58
+
+. 0 1000C9CC 88
+. 81 21 00 18 38 C6 01 08 81 41 00 18 55 29 F8 7E 90 C1 00 34 55 4A F0 BE 90 01 00 24 91 21 00 2C 91 41 00 30 81 61 00 34 83 21 00 20 83 EB FF 00 80 01 00 18 7F 39 F9 D6 81 81 00 28 2F 8C 00 00 7F 60 CB 96 93 21 00 20 39 3B 00 01 55 29 F8 7E 38 09 FF FF 40 9E 06 5C
+
+. 0 1000CA24 28
+. 2F 9F 00 02 3A E0 00 01 83 A1 00 10 83 81 00 14 83 41 00 1C 92 E1 00 28 41 9E 06 60
+
+. 0 1000D09C 20
+. 80 E1 00 20 81 01 00 34 54 F6 F8 7F 82 08 00 00 41 82 00 60
+
+. 0 1000D0B0 92
+. 57 69 20 36 7E C9 03 A6 7D 27 4B 78 39 49 FF F8 39 00 00 00 81 61 00 2C 7C 08 E1 D6 7D 28 5A 14 7D 6A E1 D6 54 00 20 36 7C 1D 04 AE 7D 4A 3A 14 7D AB EC AE 7C 08 B9 D6 7D 08 DA 14 FD 80 68 28 FC 00 68 2A 7D 29 B9 D6 54 00 18 38 7C 1A 05 AE 55 29 18 38 7D 89 D5 AE 42 00 FF BC
+
+. 0 1000D10C 8
+. 2F 9B 00 01 41 BE FD A0
+
+. 0 1000CEB0 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 FB 28
+
+. 0 1000CECC 12
+. 80 E1 00 28 2F 87 00 01 40 BE 00 4C
+
+. 0 1000CED8 20
+. 81 21 00 18 39 00 00 00 91 01 00 24 7F 88 48 40 40 9C 00 38
+
+. 0 1000CEEC 52
+. 81 41 00 14 7D 29 03 A6 80 61 00 10 55 44 18 38 81 61 00 24 81 81 00 1C 55 60 18 38 39 6B 00 01 7C 0C 04 AE 91 61 00 24 D8 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 1000CEFC 36
+. 81 61 00 24 81 81 00 1C 55 60 18 38 39 6B 00 01 7C 0C 04 AE 91 61 00 24 D8 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 1000CF20 164
+. 38 60 00 00 80 01 01 14 81 C1 00 38 81 E1 00 3C 7C 08 03 A6 82 01 00 40 82 21 00 44 82 41 00 48 82 61 00 4C 82 81 00 50 82 A1 00 54 82 C1 00 58 82 E1 00 5C 83 01 00 60 83 21 00 64 83 41 00 68 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C C9 C1 00 80 C9 E1 00 88 CA 01 00 90 CA 21 00 98 CA 41 00 A0 CA 61 00 A8 CA 81 00 B0 CA A1 00 B8 CA C1 00 C0 CA E1 00 C8 CB 01 00 D0 CB 21 00 D8 CB 41 00 E0 CB 61 00 E8 CB 81 00 F0 CB A1 00 F8 CB C1 01 00 CB E1 01 08 38 21 01 10 4E 80 00 20
+
+. 0 10001D54 20
+. 7C 1F 04 AE FC 00 68 24 7C 1F 05 AE 7C 00 4A 14 42 00 FF F0
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 2, stride = 1
+. 0 100129D0 8
+. 57 03 10 38 48 01 65 61
+
+. 0 100129D8 16
+. 81 21 00 14 2F 83 00 00 90 69 02 08 41 9E 02 70
+
+. 0 10012A88 32
+. 38 09 00 01 FF 80 D8 90 54 00 F8 7E 7E 5B 93 78 2A 00 00 01 7C 10 03 78 3B 80 00 00 40 91 00 6C
+
+. 0 10012B10 16
+. 3A 94 00 01 7F 7B 92 14 7F 94 98 40 41 9C FF 84
+
+PASS: gsl_fft_real_wavetable_float_alloc, n = 2, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 2
+. 0 10005B90 4
+. 4B FF EB FD
+
+. 0 10003B88 24
+. 55 60 18 38 55 69 10 3A 7C 1B 04 2E 7D 6B E2 14 7C 09 FD 2E 42 00 FF EC
+
+. 0 10012EF0 16
+. 80 06 00 00 81 01 00 18 7F 80 40 00 40 9E 16 B0
+
+. 0 10012F00 16
+. 80 07 00 00 80 E1 00 18 7F 80 38 00 40 9E 16 BC
+
+. 0 10012F10 12
+. 80 01 00 24 7F 80 50 40 40 9C 09 38
+
+. 0 10012F1C 68
+. 80 E1 00 18 38 C6 01 08 81 01 00 18 54 E7 F8 7E 90 C1 00 30 91 41 00 1C 55 0E F0 BE 90 E1 00 2C 81 21 00 24 81 41 00 30 2F 89 00 00 39 31 00 01 83 6A FF 00 55 29 F8 7E 38 09 FF FF 7E 31 D9 D6 40 9E 09 D8
+
+. 0 10012F60 28
+. 2F 9B 00 02 3B 00 00 01 83 A1 00 10 83 E1 00 14 83 21 00 20 93 01 00 24 41 9E 09 DC
+
+. 0 10013954 28
+. 81 81 00 18 56 24 F8 7E 80 E1 00 30 7F 4C 8B 96 83 87 00 00 2F 9A 00 00 41 9E 00 64
+
+. 0 10013970 96
+. 7F 49 03 A6 39 51 FF FF 38 E0 00 00 39 00 00 00 81 21 00 2C 7D 78 39 D6 7C E7 8A 14 7C 08 4A 14 7C 1F 01 D6 55 6B 10 3A 7D 3F 41 D6 54 00 10 3A 7D BD 04 2E 7D 08 22 14 55 29 10 3A 7C 18 51 D6 7C 09 EC 2E 7D 4A 8A 14 ED 80 68 28 EC 00 68 2A 54 00 10 3A 7C 0B CD 2E 7D 99 05 2E 42 00 FF B4
+
+. 0 100139D0 8
+. 2F 84 00 01 41 BE FE 0C
+
+. 0 100137E0 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 F7 44
+
+. 0 100137FC 12
+. 81 01 00 24 2F 88 00 01 40 BE 00 4C
+
+. 0 10013808 20
+. 81 41 00 18 39 20 00 00 91 21 00 1C 7F 89 50 40 40 9C 00 38
+
+. 0 1001381C 52
+. 81 61 00 14 7D 49 03 A6 80 61 00 10 55 64 10 3A 81 81 00 1C 80 E1 00 20 55 80 10 3A 39 8C 00 01 7C 07 04 2E 91 81 00 1C D0 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 1001382C 36
+. 81 81 00 1C 80 E1 00 20 55 80 10 3A 39 8C 00 01 7C 07 04 2E 91 81 00 1C D0 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 10013850 164
+. 38 60 00 00 80 01 01 24 81 C1 00 48 81 E1 00 4C 7C 08 03 A6 82 01 00 50 82 21 00 54 82 41 00 58 82 61 00 5C 82 81 00 60 82 A1 00 64 82 C1 00 68 82 E1 00 6C 83 01 00 70 83 21 00 74 83 41 00 78 83 61 00 7C 83 81 00 80 83 A1 00 84 83 C1 00 88 83 E1 00 8C C9 C1 00 90 C9 E1 00 98 CA 01 00 A0 CA 21 00 A8 CA 41 00 B0 CA 61 00 B8 CA 81 00 C0 CA A1 00 C8 CA C1 00 D0 CA E1 00 D8 CB 01 00 E0 CB 21 00 E8 CB 41 00 F0 CB 61 00 F8 CB 81 01 00 CB A1 01 08 CB C1 01 10 CB E1 01 18 38 21 01 20 4E 80 00 20
+
+. 0 100107BC 44
+. 54 C9 10 3A 7C 03 29 D6 39 29 FF FC C1 BD 00 00 7D 29 19 D6 54 00 18 38 7C 09 FC 2E 7D 20 22 14 7C 04 05 2E D1 A9 00 04 4B FF FF B8
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 2, stride = 1
+. 0 1000E854 32
+. 38 09 00 01 FF 80 D8 90 54 00 F8 7E 7E 5B 93 78 2A 00 00 01 7C 10 03 78 3B 80 00 00 40 91 00 6C
+
+. 0 1000E8DC 16
+. 3A 94 00 01 7F 7B 92 14 7F 94 98 40 41 9C FF 84
+
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 2, stride = 1
+. 0 1000EB58 16
+. 80 06 00 00 81 01 00 18 7F 80 40 00 40 9E 16 60
+
+. 0 1000EB68 16
+. 80 07 00 00 80 E1 00 18 7F 80 38 00 40 9E 16 34
+
+. 0 1000EB78 28
+. 80 06 00 04 38 E0 00 00 39 00 00 01 90 E1 00 28 7F 87 00 40 91 01 00 20 40 9C 05 5C
+
+. 0 1000EB94 88
+. 81 21 00 18 38 C6 01 08 81 41 00 18 55 29 F8 7E 90 C1 00 34 55 4A F0 BE 90 01 00 24 91 21 00 2C 91 41 00 30 81 61 00 34 83 21 00 20 83 AB FF 00 80 01 00 18 7F 39 E9 D6 81 81 00 28 2F 8C 00 00 7F 60 CB 96 93 21 00 20 39 3B 00 01 55 29 F8 7E 38 09 FF FF 40 9E 06 60
+
+. 0 1000EBEC 28
+. 2F 9D 00 02 3A E0 00 01 83 E1 00 10 83 81 00 14 83 41 00 1C 92 E1 00 28 41 9E 06 64
+
+. 0 1000F268 20
+. 80 E1 00 20 81 01 00 34 54 F6 F8 7F 82 08 00 00 41 82 00 60
+
+. 0 1000F27C 92
+. 57 69 18 38 7E C9 03 A6 7D 27 4B 78 39 49 FF FC 39 00 00 00 81 61 00 2C 7C 08 E1 D6 7D 28 5A 14 7D 6A E1 D6 54 00 18 38 7C 1F 04 2E 7D 4A 3A 14 7D AB FC 2E 7C 08 B9 D6 7D 08 DA 14 ED 80 68 28 EC 00 68 2A 7D 29 B9 D6 54 00 10 3A 7C 1A 05 2E 55 29 10 3A 7D 89 D5 2E 42 00 FF BC
+
+. 0 1000F2D8 8
+. 2F 9B 00 01 41 BE FD A0
+
+. 0 1000F07C 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 FB 24
+
+. 0 1000F098 12
+. 80 E1 00 28 2F 87 00 01 40 BE 00 4C
+
+. 0 1000F0A4 20
+. 81 21 00 18 39 00 00 00 91 01 00 24 7F 88 48 40 40 9C 00 38
+
+. 0 1000F0B8 52
+. 81 41 00 14 7D 29 03 A6 80 61 00 10 55 44 10 3A 81 61 00 24 81 81 00 1C 55 60 10 3A 39 6B 00 01 7C 0C 04 2E 91 61 00 24 D0 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 1000F0C8 36
+. 81 61 00 24 81 81 00 1C 55 60 10 3A 39 6B 00 01 7C 0C 04 2E 91 61 00 24 D0 03 00 00 7C 63 22 14 42 00 FF E0
+
+. 0 1000F0EC 164
+. 38 60 00 00 80 01 01 14 81 C1 00 38 81 E1 00 3C 7C 08 03 A6 82 01 00 40 82 21 00 44 82 41 00 48 82 61 00 4C 82 81 00 50 82 A1 00 54 82 C1 00 58 82 E1 00 5C 83 01 00 60 83 21 00 64 83 41 00 68 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C C9 C1 00 80 C9 E1 00 88 CA 01 00 90 CA 21 00 98 CA 41 00 A0 CA 61 00 A8 CA 81 00 B0 CA A1 00 B8 CA C1 00 C0 CA E1 00 C8 CB 01 00 D0 CB 21 00 D8 CB 41 00 E0 CB 61 00 E8 CB 81 00 F0 CB A1 00 F8 CB C1 01 00 CB E1 01 08 38 21 01 10 4E 80 00 20
+
+. 0 10003C8C 20
+. 7C 1F 04 2E EC 00 68 24 7C 1F 05 2E 7C 00 4A 14 42 00 FF F0
+
+. 0 10014930 32
+. 55 40 10 3A 55 49 18 38 7C 03 04 2E 7D 69 22 14 7D 4A 2A 14 7C 09 25 2E D1 AB 00 04 42 00 FF E4
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 2, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 2, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 2
+PASS: gsl_fft_complex_forward with signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 2, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 2, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 2, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 2, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 2, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 2, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 2, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 2
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 2, stride = 2
+. 0 10002B38 12
+. 2B 84 00 01 39 6B 00 02 40 9D 00 88
+
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 2, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 2, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 2, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 2, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 2, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 2, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 2, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 2
+PASS: gsl_fft_real with signal_real_noise, n = 2, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 2, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 2, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 2, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 2
+PASS: gsl_fft_real_float with signal_real_noise, n = 2, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 2, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 2, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 2, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 2
+PASS: gsl_fft_complex_forward with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 2, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 2, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 2, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 2, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 2, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 2, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 2, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 2
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 2, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 2, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 2, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 2, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 2, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 2, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 2, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 2, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 2
+PASS: gsl_fft_real with signal_real_noise, n = 2, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 2, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 2, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 2, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 2
+PASS: gsl_fft_real_float with signal_real_noise, n = 2, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 2, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 2, stride = 3
+. 0 FEEF8E8 20
+. 80 E9 00 0C 61 4A 00 01 80 07 00 04 7F 80 50 40 41 9C 00 14
+
+. 0 FEEF8FC 12
+. 7D 2B 4B 78 7C E9 3B 78 4B FF FE F0
+
+. 0 FEEFCA0 32
+. 57 AF 10 3A 60 F2 00 01 7D F6 9B 78 39 0A 00 08 92 CA 00 04 7C E3 39 2E 92 43 00 04 4B FF F9 F4
+
+. 0 10006994 16
+. 3B 80 00 01 3B E0 00 00 7F 9C B0 40 41 9D 00 64
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 3, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 3
+. 0 10006F78 8
+. 2F 97 00 03 41 9E 08 1C
+
+. 0 10007798 72
+. 3D 20 AA AA 81 81 00 24 61 29 AA AB 80 E1 00 18 7D 6C 48 16 3A 40 00 00 80 C1 01 88 7F 92 78 40 55 E0 20 36 82 66 00 00 38 80 00 00 7D 27 48 16 55 63 F8 7E 7E 20 9A 14 38 A0 00 00 54 77 08 3C 55 34 F8 7E 40 BC FD 9C
+
+. 0 100077E0 60
+. 81 1E 80 4C 7E 6C 9B 78 81 3E 80 50 7E 35 8B 78 81 5E 80 48 56 96 08 3C 82 1E 80 40 3E 60 43 30 81 DE 80 44 3B A0 00 00 CB 68 00 00 CB 49 00 00 CB 2A 00 00 2F 92 00 00 40 9E 01 64
+
+. 0 1000781C 24
+. CB D0 00 00 CB AE 00 00 FC 20 F0 90 FF E0 E8 90 2F 83 00 00 41 9E 01 2C
+
+. 0 10007834 296
+. 81 61 00 1C FF 80 D8 90 92 61 00 08 57 A6 20 36 6D 60 80 00 57 3B 20 36 90 01 00 0C 7C 05 C1 D6 C8 01 00 08 57 1C 20 36 7C 69 03 A6 FC 00 D0 28 54 07 20 36 FC 40 06 72 7C 04 A2 14 7D 76 22 14 7C 00 C9 D6 7D 26 FA 14 C8 E9 00 08 7D 07 D2 14 7D 06 FC AE 38 84 00 01 7F BD CA 14 7D 6B C9 D6 54 00 20 36 7D 40 FA 14 7D BF 04 AE C8 0A 00 08 7C 05 1A 14 7C C6 DA 14 55 6B 20 36 7C 00 C1 D6 7D 2B FA 14 7D 8B FC AE C9 69 00 08 7D 37 2A 14 FC 8D 60 2A 38 A5 00 01 FC 60 58 2A 7D 29 C1 D6 FD AD 60 28 54 00 20 36 FC 00 58 28 7D 60 D2 14 FD 44 07 32 55 29 20 36 FD 83 07 32 7D 49 D2 14 FD A2 03 72 FC 02 00 32 FD 48 50 28 FD 87 60 28 FD 08 20 2A FD 6A 00 2A FD 2C 68 28 FD 4A 00 28 7D 07 D5 AE FD 8C 68 2A 7C E7 E2 14 FC BF 02 F2 FC 1D 02 B2 FD BD 03 32 FC DF 02 72 FD 9E 03 3A FD 21 2A 7A FD 5E 6A B8 FD 61 32 F8 FC E7 18 2A D8 E8 00 08 7D 5A 05 AE D9 8B 00 08 7D 69 D5 AE D9 2A 00 08 42 00 FF 14
+
+. 0 1000795C 24
+. 3A 52 00 01 7C A5 BA 14 7F 92 78 40 39 8C 00 10 3A B5 00 10 40 BC FC 08
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 3, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 3, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 3, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 3, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 3, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 3, stride = 1
+. 0 FEEECBC 20
+. 81 7A 00 08 81 3A 00 0C 80 0B 00 0C 7C 80 D0 00 40 86 04 DC
+
+. 0 FEEECD0 12
+. 80 A9 00 08 7F 05 D0 00 40 9A 04 D0
+
+. 0 FEEECDC 56
+. 91 69 00 08 91 2B 00 0C 7F BD DA 14 38 7C 00 38 63 A8 00 01 80 C3 00 08 90 7F 00 0C 90 DF 00 08 91 1F 00 04 7F BF E9 2E 93 E6 00 0C 93 E3 00 08 28 9D FF FF 40 85 01 24
+
+. 0 100097BC 16
+. 3B 80 00 01 3B E0 00 00 7F 9C B0 40 41 9D 00 6C
+
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 3, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 3
+. 0 10009DA0 8
+. 2F 97 00 03 41 9E 08 14
+
+. 0 1000A5B8 72
+. 3D 20 AA AA 81 81 00 24 61 29 AA AB 81 01 00 18 7D 6C 48 16 3A 40 00 00 80 E1 01 44 7F 92 78 40 55 E0 18 38 82 87 00 00 38 80 00 00 7D 28 48 16 55 63 F8 7E 7E 20 A2 14 38 A0 00 00 54 77 08 3C 55 33 F8 7E 40 BC FD 9C
+
+. 0 1000A600 60
+. 81 3E 80 D8 7E 8C A3 78 81 5E 80 DC 7E 35 8B 78 81 7E 80 D4 56 76 08 3C 82 1E 80 CC 3E 80 43 30 81 DE 80 D0 3B A0 00 00 CB 69 00 00 CB 4A 00 00 C3 2B 00 00 2F 92 00 00 40 9E 01 80
+
+. 0 1000A63C 24
+. C3 B0 00 00 C3 8E 00 00 FF E0 E8 90 FF C0 E0 90 2F 83 00 00 41 9E 01 48
+
+. 0 1000A654 324
+. 80 E1 00 1C FC 20 D8 90 92 81 00 08 57 A6 18 38 6C E0 80 00 57 3B 18 38 90 01 00 0C 7C 05 C1 D6 C8 01 00 08 57 1C 18 38 7C 69 03 A6 FC 00 D0 28 54 07 18 38 FC 00 00 18 EC 40 06 72 7C 04 9A 14 7D 76 22 14 7C 00 C9 D6 7D 26 FA 14 C0 A9 00 04 38 84 00 01 7C C6 FC 2E 7F BD CA 14 FD 80 28 90 7C C6 DA 14 7D 6B C9 D6 54 00 18 38 7D 40 FA 14 7D 3F 04 2E C1 4A 00 04 FD 60 30 90 7C 05 1A 14 55 6B 18 38 7C 00 C1 D6 7D 2B FA 14 7D 0B FC 2E C0 E9 00 04 7D 77 2A 14 EC 89 40 2A 7D 27 D2 14 EC 6A 38 2A 7D 6B C1 D6 ED 29 40 28 54 00 18 38 FC 00 20 90 7D 40 D2 14 FD A0 18 90 38 A5 00 01 ED 4A 38 28 55 6B 18 38 FC 00 00 72 7D 0B D2 14 FD AD 00 72 FD 6B 00 28 ED 22 02 72 FD 8C 68 28 ED 42 02 B2 FD 60 58 18 FD 80 60 18 EC C6 20 2A EC 0B 50 2A ED AC 48 28 ED 6B 50 28 7C C7 D5 2E ED 8C 48 2A 7C E7 E2 14 ED 3E 00 32 ED 5C 02 F2 ED 1C 03 32 EC FE 03 72 EC A5 18 2A ED 9D 53 3A ED BF 4B 7A ED 7D 42 F8 D0 A9 00 04 EC 1F 38 38 7D 7A 05 2E D1 8A 00 04 7C 0B D5 2E D1 A8 00 04 42 00 FE FC
+
+. 0 1000A798 24
+. 3A 52 00 01 7C A5 BA 14 7F 92 78 40 39 8C 00 08 3A B5 00 08 40 BC FB EC
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 3, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 3, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 3, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 3, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 3, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 3, stride = 1
+. 0 10010AB8 8
+. 3B 80 00 00 40 91 00 64
+
+PASS: gsl_fft_real_wavetable_alloc, n = 3, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 3
+. 0 10010F8C 8
+. 2F 9B 00 03 41 9E 0E 5C
+
+. 0 10011DEC 56
+. 80 E1 00 18 3D 20 AA AA 81 01 00 30 61 29 AA AB 7E A7 8B 96 54 00 20 36 83 88 00 00 7E E0 E2 14 2F 95 00 00 7C 11 48 16 7D 27 48 16 54 1B F8 7E 55 23 F8 7E 41 9E 00 A4
+
+. 0 10011E24 160
+. 81 3E 82 50 57 66 08 3C 81 7E 82 54 39 46 FF FF C9 49 00 00 38 E0 00 00 C9 2B 00 00 39 00 00 00 7E A9 03 A6 7C 08 1A 14 55 4B 18 38 7D 20 1A 14 7C 1D 01 D6 39 6B 00 08 7D 3D 49 D6 54 00 18 38 7D BF 04 AE 55 29 18 38 7C 1D 41 D6 7C 09 FC AE 7D 08 DA 14 FD 6D 00 2A FD AD 00 28 54 00 18 38 7C 1F 04 AE 7D 38 39 D6 FD 8B 02 B2 7C E7 8A 14 FD AD 02 72 7C 18 51 D6 55 29 18 38 FD 80 60 28 7D 4A 8A 14 FC 00 58 2A 7D 6B C1 D6 54 00 18 38 7C 09 CD AE 7D 99 05 AE 7D AB CD AE 42 00 FF 88
+
+. 0 10011EC4 8
+. 2F 9B 00 01 41 BE F8 EC
+
+. 0 1000E4FC 84
+. 54 6A 20 36 54 60 18 38 7C E0 FA 14 7D 4C 53 78 7C 03 41 D6 38 A5 00 01 7D AA FC AE 7D 05 30 50 7F 88 28 40 C8 07 00 00 FD 80 68 50 7D 2A 22 14 54 00 20 36 7C 0A 25 AE 7D 60 22 14 D9 A9 00 08 7C E7 62 14 7C 04 05 AE 7D 4A 62 14 D9 8B 00 08 41 9D FF C0
+
+PASS: gsl_fft_real with signal_real_noise, n = 3, stride = 1
+. 0 1000C6AC 8
+. 3B 80 00 00 40 91 00 64
+
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 3, stride = 1
+. 0 1000CA40 8
+. 2F 9F 00 03 41 9E 08 70
+
+. 0 1000D2B4 48
+. 81 41 00 20 54 00 20 36 81 61 00 34 7E 8A FB 96 81 9E 81 64 82 0B 00 00 83 21 00 18 7F 00 82 14 CB CC 00 00 2F 94 00 00 7F F9 FB 96 41 9E 00 94
+
+. 0 1000D2E4 144
+. FD 3E F0 2A 7E 89 03 A6 57 66 08 3C 38 E0 00 00 1C 07 00 03 7D 07 FA 14 7D 48 FA 14 7D 17 41 D6 7D 26 02 14 39 29 FF FF 55 2B 18 38 39 6B 00 08 7C 1C 01 D6 55 08 18 38 7D 6B E1 D6 54 00 18 38 7D 7D 04 AE 7D 3C 49 D6 7D AB EC AE FD A9 03 72 55 29 18 38 7C 17 39 D6 7C 09 EC AE 7C E7 DA 14 FD 8B 00 28 FC 00 00 2A 7D 57 51 D6 54 00 18 38 FD 4C 68 2A FD 6B 00 2A FD 8C 68 28 55 4A 18 38 7D 7A 05 AE 7D 88 D5 AE 7D 4A D5 AE 42 00 FF 84
+
+. 0 1000D374 8
+. 2F 9B 00 01 41 BE FB 38
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 3, stride = 1
+. 0 10012AA0 8
+. 3B 80 00 00 40 91 00 6C
+
+PASS: gsl_fft_real_wavetable_float_alloc, n = 3, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 3
+. 0 10012F7C 8
+. 2F 9B 00 03 41 9E 0E 9C
+
+. 0 10013E1C 56
+. 80 E1 00 18 3D 20 AA AA 81 01 00 30 61 29 AA AB 7E A7 8B 96 54 00 18 38 83 88 00 00 7E E0 E2 14 2F 95 00 00 7C 11 48 16 7D 27 48 16 54 1B F8 7E 55 23 F8 7E 41 9E 00 B0
+
+. 0 10013E54 172
+. 81 3E 82 A8 57 66 08 3C 81 7E 82 AC 39 46 FF FF C9 29 00 00 38 E0 00 00 C1 0B 00 00 39 00 00 00 7E A9 03 A6 7C 08 1A 14 55 4B 10 3A 7D 20 1A 14 7C 1F 01 D6 39 6B 00 04 7D 3F 49 D6 54 00 10 3A 7D 7D 04 2E 55 29 10 3A 7C 1F 41 D6 7C 09 EC 2E 7D 08 DA 14 ED 4B 00 2A 54 00 10 3A ED 6B 00 28 7D 9D 04 2E 7D 38 39 D6 FD A0 50 90 7C E7 8A 14 FC 00 60 90 ED 6B 02 32 FD AD 02 72 7C 18 51 D6 55 29 10 3A ED 8C 50 2A 7D 4A 8A 14 FC 00 68 28 7D 6B C1 D6 54 00 10 3A 7D 89 CD 2E FC 00 00 18 7C 19 05 2E 7D 6B CD 2E 42 00 FF 7C
+
+. 0 10013F00 8
+. 2F 9B 00 01 41 BE F8 DC
+
+. 0 10010740 84
+. 54 6A 18 38 54 60 10 3A 7C E0 FA 14 7D 4C 53 78 7C 03 41 D6 38 A5 00 01 7D AA FC 2E 7D 05 30 50 7F 88 28 40 C0 07 00 00 FD 80 68 50 7D 2A 22 14 54 00 18 38 7C 0A 25 2E 7D 60 22 14 D1 A9 00 04 7C E7 62 14 7C 04 05 2E 7D 4A 62 14 D1 8B 00 04 41 9D FF C0
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 3, stride = 1
+. 0 1000E86C 8
+. 3B 80 00 00 40 91 00 6C
+
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 3, stride = 1
+. 0 1000EC08 8
+. 2F 9D 00 03 41 9E 08 74
+
+. 0 1000F480 48
+. 81 41 00 20 54 00 18 38 81 61 00 34 7E 6A EB 96 81 9E 81 DC 82 0B 00 00 C3 AC 00 00 7E C0 82 14 80 01 00 18 2F 93 00 00 7F 20 EB 96 41 9E 00 94
+
+. 0 1000F4B0 144
+. ED 3D E8 2A 7E 69 03 A6 57 66 08 3C 38 E0 00 00 1C 07 00 03 7D 07 CA 14 7D 48 CA 14 7D 17 41 D6 7D 26 02 14 39 29 FF FF 55 2B 10 3A 7C 1C 01 D6 39 6B 00 04 55 08 10 3A 7D 3C 49 D6 54 00 10 3A 7D 7F 04 2E 7D 6B E1 D6 55 29 10 3A 7C 09 FC 2E ED 8B 00 28 7D AB FC 2E EC 00 00 2A 7C 17 39 D6 7C E7 DA 14 ED A9 03 72 ED 6B 00 2A 7D 57 51 D6 ED 4C 68 2A ED 8C 68 28 54 00 10 3A 7D 7A 05 2E 55 4A 10 3A 7D 88 D5 2E 7D 4A D5 2E 42 00 FF 84
+
+. 0 1000F540 8
+. 2F 9B 00 01 41 BE FB 38
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 3, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 3, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 3
+PASS: gsl_fft_complex_forward with signal_noise, n = 3, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 3, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 3, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 3, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 3, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 3, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 3, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 3, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 3, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 3, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 3
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 3, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 3, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 3, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 3, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 3, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 3, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 3, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 3, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 3, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 3, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 3
+PASS: gsl_fft_real with signal_real_noise, n = 3, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 3, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 3, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 3, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 3
+PASS: gsl_fft_real_float with signal_real_noise, n = 3, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 3, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 3, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 3, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 3
+PASS: gsl_fft_complex_forward with signal_noise, n = 3, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 3, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 3, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 3, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 3, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 3, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 3, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 3, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 3, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 3, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 3
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 3, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 3, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 3, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 3, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 3, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 3, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 3, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 3, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 3, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 3, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 3
+PASS: gsl_fft_real with signal_real_noise, n = 3, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 3, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 3, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 3, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 3
+. 0 FEF8878 20
+. 80 04 00 00 38 63 FF E4 38 84 FF E8 38 A5 00 06 4B FF FF B4
+
+. 0 FEF883C 24
+. 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 3, stride = 3
+. 0 FEEF994 16
+. 81 29 00 08 80 E9 00 04 7C 87 C8 40 40 84 01 54
+
+. 0 FEEFAF4 16
+. 81 DD 00 04 55 DF 00 38 7D 9F C8 40 41 8C FF F0
+
+. 0 FEEFB04 24
+. 81 7D 00 08 7F 19 F8 50 81 3D 00 0C 82 0B 00 0C 7F 10 E8 00 40 9A 05 AC
+
+. 0 FEEFB1C 12
+. 82 29 00 08 7C 11 E8 00 40 82 05 A0
+
+. 0 FEEFB28 16
+. 91 69 00 08 91 2B 00 0C 28 98 00 0F 41 85 05 54
+
+. 0 FEF0088 60
+. 31 2F FF FF 7C A9 79 10 63 17 00 01 54 A0 10 3A 7D 3D CA 14 7C 0C A3 78 39 1D 00 08 91 3C 00 44 91 9D 00 04 91 3C 00 40 7F 09 C1 2E 93 49 00 0C 92 E9 00 04 93 49 00 08 4B FF F5 F0
+
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 3, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 3, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 4, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 4
+. 0 10006F80 8
+. 2F 97 00 04 41 9E 0A 2C
+
+. 0 100079B0 52
+. 39 C0 00 00 81 81 01 88 7F 8E 78 40 80 C1 00 24 82 6C 00 00 55 E0 20 36 54 DC F0 BE 3B A0 00 00 7E 20 9A 14 38 60 00 00 7E 40 8A 14 1E BC 00 03 40 BC FB 98
+
+. 0 100079E4 68
+. 81 01 01 84 3D 60 43 30 80 E1 00 1C 7E 2C 8B 78 81 3E 80 44 7E 71 9B 78 81 5E 80 50 2F 07 FF FF 1E 08 00 03 91 21 01 7C 55 14 08 3C 57 93 08 3C CA CA 00 00 7D 68 03 A6 3B 60 00 00 2F 8E 00 00 40 9E 01 C0
+
+. 0 10007A28 40
+. 80 DE 80 40 80 E1 01 7C CB 06 00 00 CA E7 00 00 FF 60 C0 90 FF 20 B8 90 FF 80 C0 90 FF 40 B8 90 2F 9C 00 00 41 9E 01 74
+
+. 0 10007A50 368
+. 81 01 00 1C 7D 28 02 A6 57 64 20 36 57 36 20 36 6D 00 80 00 57 17 20 36 90 01 00 0C 7C 03 C1 D6 91 21 00 08 7F 89 03 A6 C8 01 00 08 54 05 20 36 FF A0 B0 28 81 41 01 84 7D 70 EA 14 7D 6B C9 D6 7D 24 FA 14 7C 1D 52 14 C9 29 00 08 7D 54 EA 14 7D 84 FC AE 7C C5 D2 14 7C 00 C9 D6 55 6B 20 36 7D 2B FA 14 7C CB FC AE C8 49 00 08 7D 73 1A 14 3B BD 00 01 54 00 20 36 7D 4A C9 D6 7D 00 FA 14 7C 7F 04 AE C8 88 00 08 7C 15 1A 14 FD 43 30 28 7F 7B CA 14 FD 64 10 28 55 4A 20 36 7D 2A FA 14 7D 0A FC AE C8 E9 00 08 FD 5D 02 B2 7D 23 E2 14 7D 6B C1 D6 FD 7D 02 F2 7C 84 B2 14 38 63 00 01 FC 0C 40 28 FD A9 38 28 7D 29 C1 D6 FC 63 30 2A 55 6B 20 36 FC 84 10 2A 7D 0B D2 14 FD 8C 40 2A FD 29 38 2A 7C 00 C1 D6 FC A0 58 2A 55 29 20 36 FC CD 50 28 7D 49 D2 14 FD 0C 18 28 FC E9 20 28 54 00 20 36 FD AD 50 2A 7C E0 D2 14 FC 00 58 28 FF D9 02 32 FD 77 00 32 FD 5A 01 72 FC 57 03 72 FC 39 01 F2 FF FA 01 B2 FD B8 5B 7A FC FB F1 FA FC DC 51 BA FC 18 10 38 FD 1B 0A 38 FC BC F9 78 FD 8C 18 2A FD 29 20 2A 7D 85 D5 AE 7C A5 BA 14 D9 26 00 08 7C 09 D5 AE D9 AA 00 08 7D 0B D5 AE D8 E8 00 08 7C BA 05 AE D8 C7 00 08 42 00 FE C8
+
+. 0 10007BC0 28
+. 39 CE 00 01 7C 63 AA 14 7F 8E 78 40 3A 31 00 10 39 8C 00 10 3A 52 00 10 40 BC F9 A0
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 4, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 4, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 4, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 4, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 4
+. 0 10009DA8 8
+. 2F 97 00 04 41 9E 0A 40
+
+. 0 1000A7EC 52
+. 39 C0 00 00 81 01 01 44 7F 8E 78 40 81 21 00 24 82 88 00 00 55 E0 18 38 55 3C F0 BE 3B A0 00 00 7E 20 A2 14 38 60 00 00 7E 40 8A 14 1E BC 00 03 40 BC FB 7C
+
+. 0 1000A820 80
+. 81 61 01 40 3D 20 43 30 81 9E 80 CC 57 93 08 3C 81 41 00 1C 1E 0B 00 03 80 FE 80 D0 7D 28 03 A6 81 1E 80 DC 2F 0A FF FF 91 81 01 34 3B 60 00 00 7E 4C 93 78 90 E1 01 38 7E 32 8B 78 CA C8 00 00 7E 91 A3 78 55 74 08 3C 2F 8E 00 00 40 9E 01 C4
+
+. 0 1000A870 40
+. 81 41 01 34 81 61 01 38 C3 0A 00 00 C2 EB 00 00 FF 60 C0 90 FF 40 B8 90 FF 80 C0 90 FF 20 B8 90 2F 9C 00 00 41 9E 01 78
+
+. 0 1000A898 372
+. 80 E1 00 1C 7D 08 02 A6 57 64 18 38 57 36 18 38 6C E0 80 00 57 17 18 38 90 01 00 0C 7C 03 C1 D6 91 01 00 08 7F 89 03 A6 C8 01 00 08 FC 00 B0 28 54 05 18 38 FF A0 00 18 81 21 01 40 7D 70 EA 14 7D 6B C9 D6 7D 54 EA 14 7C 1D 4A 14 7D A4 FC 2E 7D 24 FA 14 C0 E9 00 04 3B BD 00 01 7C 00 C9 D6 55 6B 18 38 7D 2B FA 14 7C AB FC 2E C0 89 00 04 7D 75 1A 14 7F 7B CA 14 7D 4A C9 D6 54 00 18 38 7D 00 FA 14 7C 7F 04 2E C0 C8 00 04 7C 03 E2 14 ED 43 28 28 7C 84 B2 14 55 4A 18 38 EC 06 20 28 7D 2A FA 14 7D 2A FC 2E C1 09 00 04 ED 5D 02 B2 ED 8D 48 28 7D 33 1A 14 ED 67 40 28 7C 00 C1 D6 EC 1D 00 32 7D 45 D2 14 ED AD 48 2A 38 63 00 01 EC E7 40 2A EC 63 28 2A 7D 29 C1 D6 EC C6 20 2A 54 00 18 38 EC 4C 00 2A 7D 00 D2 14 EC 8B 50 28 ED 0D 18 28 7D 6B C1 D6 EC A7 30 28 55 29 18 38 ED 8C 00 28 7C E9 D2 14 ED 6B 50 2A EF FA 02 32 55 6B 18 38 EC 37 03 32 7C CB D2 14 EF D9 00 B2 EC 17 02 F2 ED 5A 01 72 ED 39 01 32 ED 78 0A FA EC BB F9 7A EC 9C F1 3A ED 98 03 38 ED 1B 52 38 EC 5C 48 B8 ED AD 18 2A EC E7 30 2A 7D A5 D5 2E 7C A5 BA 14 D0 EA 00 04 7D 9A 05 2E D1 68 00 04 7D 09 D5 2E D0 A7 00 04 7C 4B D5 2E D0 86 00 04 42 00 FE C8
+
+. 0 1000AA0C 28
+. 39 CE 00 01 7C 63 AA 14 7F 8E 78 40 3A 31 00 08 3A 52 00 08 39 8C 00 08 40 BC F9 74
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 4, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 4, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 4, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 4, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 4
+. 0 10010F94 8
+. 2F 9B 00 04 41 9E 11 B0
+
+. 0 10012148 40
+. 81 61 00 18 54 00 20 36 81 81 00 30 56 3A F0 BE 7E 4B 8B 96 83 8C 00 00 7E E0 E2 14 7E A0 BA 14 2F 92 00 00 41 9E 00 B8
+
+. 0 10012170 180
+. 57 44 08 3C 7E 49 03 A6 38 E4 FF FF 38 A0 00 00 38 C0 00 00 7C 06 72 14 7D 44 3A 14 7D 20 72 14 7C 1D 01 D6 7D 69 72 14 54 E8 18 38 39 08 00 08 54 00 18 38 7D 3D 49 D6 7D BF 04 AE 7D 7D 59 D6 55 29 18 38 7D 29 FC AE 7C 1D 31 D6 55 6B 18 38 7D 6B FC AE 7C C6 D2 14 FD 4D 58 28 54 00 18 38 FD AD 58 2A 7D 9F 04 AE 7D 38 39 D6 FD 40 50 50 7C E7 8A 14 FC 0C 48 2A FD 8C 48 28 7C 18 29 D6 55 29 18 38 FD 60 68 28 7C A5 8A 14 FC 00 68 2A 7D 58 51 D6 54 00 18 38 7C 19 05 AE 7D 89 CD AE 7D 08 C1 D6 55 4A 18 38 7D 48 CD AE 7D 6A CD AE 42 00 FF 64
+
+. 0 10012224 8
+. 2F 9A 00 01 41 BE F5 8C
+
+PASS: gsl_fft_real with signal_real_noise, n = 4, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 4, stride = 1
+. 0 1000CA48 8
+. 2F 9F 00 04 41 9E 0B A8
+
+. 0 1000D5F4 32
+. 81 01 00 20 54 00 20 36 81 21 00 34 55 11 F0 BF 82 09 00 00 7F 00 82 14 7E C0 C2 14 41 82 00 BC
+
+. 0 1000D614 184
+. 7E 29 03 A6 57 63 08 3C 38 A0 00 00 54 A0 10 3A 81 81 00 30 7D 23 02 14 7C 1C 01 D6 39 29 FF FF 55 2A 18 38 7D 63 4A 14 39 4A 00 08 7D 7C 59 D6 54 00 18 38 7D 9D 04 AE 7D 05 62 14 7C E8 62 14 7D 3C 49 D6 55 6B 18 38 7D 4B EC AE 7C C7 62 14 FD 6C 50 28 7D 4A E1 D6 55 29 18 38 7C 09 EC AE FD 8C 50 2A FC 00 00 2A 7D AA EC AE 7C 17 29 D6 7C A5 DA 14 FD AD 68 2A FD 4C 00 28 7D 17 41 D6 FD 8C 00 2A FD 2B 68 2A 54 00 18 38 FD 6B 68 28 7D 9A 05 AE 7C F7 39 D6 55 08 18 38 7D 68 D5 AE 7C D7 31 D6 54 E7 18 38 7D 47 D5 AE 54 C6 18 38 7D 26 D5 AE 42 00 FF 58
+
+. 0 1000D6CC 8
+. 2F 9B 00 01 41 BE F7 E0
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 4, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 4, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 4
+. 0 10012F84 8
+. 2F 9B 00 04 41 9E 12 1C
+
+. 0 100141A4 40
+. 81 81 00 18 54 00 18 38 80 E1 00 30 56 3A F0 BE 7E 4C 8B 96 83 87 00 00 7E E0 E2 14 7F 60 BA 14 2F 92 00 00 41 9E 00 B8
+
+. 0 100141CC 180
+. 57 44 08 3C 7E 49 03 A6 38 E4 FF FF 38 A0 00 00 38 C0 00 00 7C 06 72 14 7D 44 3A 14 7D 20 72 14 7C 1F 01 D6 7D 69 72 14 54 E8 10 3A 39 08 00 04 54 00 10 3A 7D 3F 49 D6 7D BD 04 2E 7D 7F 59 D6 55 29 10 3A 7D 29 EC 2E 7C 1F 31 D6 55 6B 10 3A 7D 6B EC 2E 7C C6 D2 14 ED 4D 58 28 54 00 10 3A ED AD 58 2A 7D 9D 04 2E 7D 38 39 D6 FD 40 50 50 7C E7 8A 14 EC 0C 48 2A ED 8C 48 28 7C 18 29 D6 55 29 10 3A ED 60 68 28 7C A5 8A 14 EC 00 68 2A 7D 58 51 D6 54 00 10 3A 7C 19 05 2E 7D 89 CD 2E 7D 08 C1 D6 55 4A 10 3A 7D 48 CD 2E 7D 6A CD 2E 42 00 FF 64
+
+. 0 10014280 8
+. 2F 9A 00 01 41 BE F5 5C
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 4, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 4, stride = 1
+. 0 1000EC10 8
+. 2F 9D 00 04 41 9E 0B C4
+
+. 0 1000F7D8 32
+. 81 01 00 20 54 00 18 38 81 21 00 34 55 11 F0 BF 82 09 00 00 7E C0 82 14 7F 20 B2 14 41 82 00 BC
+
+. 0 1000F7F8 184
+. 7E 29 03 A6 57 63 08 3C 38 A0 00 00 54 A0 10 3A 81 81 00 30 7D 23 02 14 7C 1C 01 D6 39 29 FF FF 55 2A 10 3A 7D 63 4A 14 39 4A 00 04 7D 7C 59 D6 54 00 10 3A 7D 9F 04 2E 7D 05 62 14 7C E8 62 14 7D 3C 49 D6 55 6B 10 3A 7D 4B FC 2E 7C C7 62 14 ED 6C 50 28 7D 4A E1 D6 55 29 10 3A 7C 09 FC 2E ED 8C 50 2A EC 00 00 2A 7D AA FC 2E 7C 17 29 D6 7C A5 DA 14 ED AD 68 2A ED 4C 00 28 7D 17 41 D6 ED 8C 00 2A ED 2B 68 2A 54 00 10 3A ED 6B 68 28 7D 9A 05 2E 7C F7 39 D6 55 08 10 3A 7D 68 D5 2E 7C D7 31 D6 54 E7 10 3A 7D 47 D5 2E 54 C6 10 3A 7D 26 D5 2E 42 00 FF 58
+
+. 0 1000F8B0 8
+. 2F 9B 00 01 41 BE F7 C8
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 4, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 4, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 4
+PASS: gsl_fft_complex_forward with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 4, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 4, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 4, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 4, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 4, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 4, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 4, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 4
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 4, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 4, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 4, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 4, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 4, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 4, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 4, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 4
+PASS: gsl_fft_real with signal_real_noise, n = 4, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 4, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 4, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 4, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 4
+PASS: gsl_fft_real_float with signal_real_noise, n = 4, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 4, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 4, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 4, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 4
+PASS: gsl_fft_complex_forward with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 4, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 4, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 4, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 4, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 4, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 4, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 4, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 4
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 4, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 4, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 4, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 4, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 4, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 4, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 4, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 4
+PASS: gsl_fft_real with signal_real_noise, n = 4, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 4, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 4, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 4, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 4
+PASS: gsl_fft_real_float with signal_real_noise, n = 4, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 4, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 4, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 5, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 5
+. 0 10006F88 8
+. 2F 97 00 05 41 9E 0C BC
+
+. 0 10007C48 80
+. 81 81 01 88 3C 00 CC CC 80 C1 00 18 60 00 CC CD 81 61 00 24 3B 80 00 00 82 6C 00 00 3B 60 00 00 7D 4B 00 16 81 3E 80 54 55 EB 20 36 7E 2B 9A 14 C8 29 00 00 7E 4B 8A 14 7C 06 00 16 55 57 F0 BE 7F AB 92 14 56 F0 10 3A 54 15 F0 BE 48 02 13 01
+
+. 0 10007C98 16
+. 81 3E 80 58 FE E0 08 90 C8 29 00 00 48 02 12 F1
+
+. 0 10007CA8 16
+. 7F 9C 78 40 FF 00 08 90 93 81 00 30 40 BC F8 C4
+
+. 0 10007CB8 64
+. 1C 17 00 03 80 DE 80 44 92 41 01 70 1D D5 00 03 92 21 01 74 7C 08 03 A6 90 C1 01 68 56 B2 08 3C 93 A1 01 6C 56 B1 10 3A 92 61 01 78 56 EC 08 3C 3A C0 00 00 80 E1 00 30 2F 87 00 00 40 9E 02 B8
+
+. 0 10007CF8 48
+. 81 1E 80 40 81 21 01 68 CA 08 00 00 C9 E9 00 00 FE 40 80 90 FE 20 78 90 FE C0 80 90 FE A0 78 90 FE 80 80 90 FE 60 78 90 2F 97 00 00 41 9E 02 34
+
+. 0 10007D28 400
+. 80 C1 00 1C 3C E0 43 30 81 1E 80 50 56 DD 20 36 6C C0 80 00 90 E1 00 08 90 01 00 0C 7C 1C C1 D6 C8 01 00 08 57 33 20 36 C8 48 00 00 57 14 20 36 80 DE 80 5C 7E E9 03 A6 FF 20 10 28 54 03 20 36 C9 C6 00 00 7C 1B AA 14 7D 72 DA 14 7D 4E DA 14 7D 11 DA 14 7C 00 C9 D6 7D 3D FA 14 CB 89 00 08 7C 83 D2 14 7F BD FC AE 3B 7B 00 01 7E D6 CA 14 7D 6B C9 D6 54 00 20 36 7D 20 FA 14 7C FF 04 AE C9 49 00 08 7C 1C BA 14 7F BD 9A 14 7D 4A C9 D6 55 6B 20 36 7C CB FA 14 7D AB FC AE C8 06 00 08 7D 08 C9 D6 55 4A 20 36 7C EA FA 14 7D 8A FC AE C9 67 00 08 7C E8 02 A6 FC 8D 60 2A 7D 50 E2 14 55 08 20 36 FC 60 58 2A 7D 28 FA 14 7C 48 FC AE C8 29 00 08 FD AD 60 28 FD 27 10 2A 81 1E 80 60 FD 0A 08 2A 7D 67 E2 14 FC 00 58 28 7D 2C E2 14 FF 69 20 2A 7C 00 C1 D6 FF 48 18 2A 3B 9C 00 01 FC E7 10 28 C8 48 00 00 FC B7 03 72 7D 29 C1 D6 54 00 20 36 FC D7 00 32 7C E0 D2 14 FD B8 03 72 7D 6B C1 D6 55 29 20 36 FC 18 00 32 7C C9 D2 14 FD 4A 08 28 FD 29 20 28 7D 4A C1 D6 FD 08 18 28 55 6B 20 36 FD 7B 00 B2 7C AB D2 14 FD 9A 00 B2 55 4A 20 36 7D 0A D2 14 FC B8 29 F8 FC D8 32 B8 FC F7 69 FA FD 57 02 BA FD 29 03 B2 FD 08 03 B2 FD 7D 58 28 FD 9C 60 28 FC F9 01 F2 FD 59 02 B2 FC B9 01 72 FC D9 01 B2 FC 0B 48 28 FD AC 40 28
+
+. 0 10007EB8 160
+. FD 6B 48 2A FD 8C 40 2A FC 80 30 2A FC 6D 28 28 FC 4B 50 2A FC 2C 38 28 FD 6B 50 28 FC 00 30 28 FD AD 28 2A FD 8C 38 2A FF CF 02 F2 FF F1 00 32 FC B5 01 32 FC D3 00 B2 FC EF 03 32 FD 51 03 72 FD 35 00 F2 FD 13 00 72 FD 90 F3 3A FD B2 FB 7A FC 76 28 FA FC 34 30 7A FD 70 3A F8 FC 12 50 38 FC 96 49 38 FC 54 40 B8 FF BD D8 2A FF 9C D0 2A 7F A3 D5 AE 7C 63 A2 14 DB 84 00 08 7D 7A 05 AE D9 87 00 08 7C 09 D5 AE D9 A6 00 08 7C 8B D5 AE D8 65 00 08 7C 4A D5 AE D8 28 00 08 42 00 FE 18
+
+. 0 10007F58 72
+. 80 E1 00 30 7F 9C 82 14 81 01 01 78 38 E7 00 01 81 21 01 74 7F 87 78 40 81 41 01 70 81 61 01 6C 39 08 00 10 39 29 00 10 39 4A 00 10 39 6B 00 10 90 E1 00 30 91 01 01 78 91 21 01 74 91 41 01 70 91 61 01 6C 40 BC F5 DC
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 5, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 5, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 5, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 5, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 5, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 5, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 5, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 5
+. 0 10009DB0 8
+. 2F 97 00 05 41 9E 0C E0
+
+. 0 1000AA94 80
+. 81 81 01 44 3C 00 CC CC 80 E1 00 18 60 00 CC CD 81 61 00 24 3B 60 00 00 82 8C 00 00 3B 80 00 00 7D 4B 00 16 81 3E 80 E0 55 EB 18 38 7E 2B A2 14 C8 29 00 00 7E 4B 8A 14 7C 07 00 16 55 57 F0 BE 7F AB 92 14 56 F0 10 3A 54 15 F0 BE 48 01 E4 B5
+
+. 0 1000AAE4 16
+. 81 3E 80 E4 FE E0 08 18 C8 29 00 00 48 01 E4 A5
+
+. 0 1000AAF4 16
+. 7F 9B 78 40 93 61 00 30 FF 00 08 18 40 BC F8 98
+
+. 0 1000AB04 72
+. 1C 17 00 03 80 FE 80 CC 81 1E 80 D0 1D D5 00 03 92 41 01 28 7C 08 03 A6 92 21 01 2C 56 B2 08 3C 90 E1 01 1C 56 B1 10 3A 91 01 01 20 56 EC 08 3C 93 A1 01 24 3A C0 00 00 92 81 01 30 81 21 00 30 2F 89 00 00 40 9E 02 E4
+
+. 0 1000AB4C 52
+. 81 41 01 1C 81 61 01 20 C1 CA 00 00 C0 0B 00 00 FE 80 70 90 D0 01 00 34 FE 60 00 90 FE 40 70 90 FE 20 00 90 FE 00 70 90 FD E0 00 90 2F 97 00 00 41 9E 02 5C
+
+. 0 1000AB80 400
+. 81 21 00 1C 3D 40 43 30 81 7E 80 DC 56 C3 18 38 6D 20 80 00 91 41 00 08 90 01 00 0C 7C 1C C1 D6 C8 01 00 08 57 33 18 38 C8 2B 00 00 57 14 18 38 80 FE 80 E8 7E E9 03 A6 FC 00 08 28 81 1E 80 EC CA C7 00 00 54 1D 18 38 CA A8 00 00 FF 20 00 18 7C 1B AA 14 7D 72 DA 14 7D 4E DA 14 7D 11 DA 14 7C 00 C9 D6 7D 23 FA 14 C3 89 00 04 7C BC BA 14 7F A3 FC 2E 7C 8C E2 14 3B 7B 00 01 7D 6B C9 D6 54 00 18 38 7D 20 FA 14 7C 9F 04 2E C0 A9 00 04 7E D6 CA 14 7C 63 9A 14 7D 4A C9 D6 55 6B 18 38 7C CB FA 14 7C EB FC 2E C1 06 00 04 7C D0 E2 14 7D 08 C9 D6 55 4A 18 38 7C EA FA 14 7D 4A FC 2E C0 C7 00 04 ED 87 50 2A 55 08 18 38 ED 68 30 2A 7D 28 FA 14 7C 68 FC 2E C0 49 00 04 EC E7 50 28 EC 04 18 2A 7D 28 02 A6 ED A5 10 2A 7C A5 C1 D6 ED 08 30 28 EF 60 60 2A 7C 09 E2 14 EF 4D 58 2A 7D 3D D2 14 EC 00 60 28 7C 84 C1 D6 ED AD 58 28 54 A5 18 38 FD 40 D8 90 7D 65 D2 14 FD 20 D0 90 3B 9C 00 01 7C 00 C1 D6 FD 4A 05 72 54 84 18 38 7D 44 D2 14 FD 29 05 72 7C C6 C1 D6 54 00 18 38 7D 00 D2 14 FD 80 E8 90 FD 60 E0 90 FC 00 05 B2 54 C6 18 38 7C E6 D2 14 FD AD 05 B2 EC 84 18 28 EC A5 10 28 C0 41 00 34 FD 8C 50 28 FD 6B 48 28 EC D7 01 F2 ED 57 02 32 EC F8 01 F2 ED 18 02 32 FC 00 00 18 FD A0 68 18 EC D8 31 38 ED 58 51 78
+
+. 0 1000AD10 200
+. FD 80 60 18 FD 60 58 18 EC 97 39 3A EC B7 41 7A ED 2C 00 28 ED 0B 68 28 EC 99 01 32 EC B9 01 72 EC D9 01 B2 ED 59 02 B2 ED 8C 00 2A ED 6B 68 2A EC 09 50 2A ED A8 30 28 EC 6B 20 28 EC EC 28 2A ED 29 50 28 ED 8C 28 28 ED 08 30 2A ED 6B 20 2A EC 22 03 32 EF F3 02 72 EC 82 02 F2 EF D1 00 32 EC AF 01 F2 EC 53 02 32 ED 51 03 72 EC CF 00 F2 EF BD D8 2A EF 9C D0 2A ED 6E 0A FA ED 14 FA 3A 7F BD D5 2E ED B2 F3 7A D3 89 00 04 EC 70 28 FA 7F BD A2 14 ED 8E 23 38 ED 34 12 78 EC 12 50 38 EC F0 31 F8 7D 85 D5 2E D1 6B 00 04 7D 24 D5 2E D1 0A 00 04 7C 1A 05 2E D1 A8 00 04 7C E6 D5 2E D0 67 00 04 42 00 FD FC
+
+. 0 1000ADD8 72
+. 80 E1 00 30 7F 9C 82 14 81 01 01 30 38 E7 00 01 81 21 01 2C 7F 87 78 40 81 41 01 28 81 61 01 24 39 08 00 08 39 29 00 08 39 4A 00 08 39 6B 00 08 90 E1 00 30 91 01 01 30 91 21 01 2C 91 41 01 28 91 61 01 24 40 BC F5 7C
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 5, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 5, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 5, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 5, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 5, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 5, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 5, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 5
+. 0 10010F9C 8
+. 2F 9B 00 05 41 9E 02 78
+
+. 0 10011218 68
+. 81 01 00 18 54 00 20 36 81 21 00 30 3D 60 CC CC 7E 48 8B 96 61 6B CC CD 83 89 00 00 81 3E 82 5C 7E E0 E2 14 7E A0 BA 14 C8 29 00 00 7E 80 AA 14 7C 11 58 16 7D 68 58 16 54 16 F0 BE 55 7A F0 BE 48 01 7D 3D
+
+. 0 1001125C 16
+. 81 3E 82 60 FF 80 08 90 C8 29 00 00 48 01 7D 2D
+
+. 0 1001126C 12
+. 2F 92 00 00 FF 60 08 90 41 9E 01 14
+
+. 0 10011278 272
+. 80 FE 82 64 56 C3 08 3C 81 1E 82 68 FC C0 E0 50 FC A0 08 50 C8 87 00 00 C8 68 00 00 38 A3 FF FF 38 80 00 00 3B 60 00 00 7E 49 03 A6 7C 1B D2 14 7D 03 2A 14 7D 20 D2 14 7C 1D 01 D6 7D 69 D2 14 7D 4B D2 14 55 07 18 38 7D 3D 49 D6 54 00 18 38 7D 7F 04 AE 54 A6 18 38 38 C6 00 08 38 E7 00 08 7D 7D 59 D6 55 29 18 38 7D A9 FC AE 7D 5D 51 D6 55 6B 18 38 7D 4B FC AE FD 8D 50 2A 55 4A 18 38 7C 1D D9 D6 7D 0A FC AE FD AD 50 28 7F 7B B2 14 FC 0B 40 2A 54 00 18 38 FD 3C 03 72 7D 5F 04 AE 7D 38 21 D6 FC E0 60 2A 7C 84 8A 14 FC 00 60 28 FD BB 03 72 7C 18 29 D6 55 29 18 38 FD 87 00 F2 7C A5 8A 14 FD 6B 40 28 FC 00 01 32 7D 18 41 D6 54 00 18 38 FD 8A 60 28 FD 25 4A FA 7C C6 C1 D6 55 08 18 38 FD 66 6A F8 FD 4A 38 2A 7C E7 C1 D6 FD AC 00 28 FD 8C 00 2A 7D 49 CD AE 7D 99 05 AE 7D 66 CD AE 7D A8 CD AE 7D 27 CD AE 42 00 FF 20
+
+. 0 10011388 8
+. 2F 96 00 01 41 9E 04 28
+
+. 0 1000E50C 68
+. 7C 03 41 D6 38 A5 00 01 7D AA FC AE 7D 05 30 50 7F 88 28 40 C8 07 00 00 FD 80 68 50 7D 2A 22 14 54 00 20 36 7C 0A 25 AE 7D 60 22 14 D9 A9 00 08 7C E7 62 14 7C 04 05 AE 7D 4A 62 14 D9 8B 00 08 41 9D FF C0
+
+PASS: gsl_fft_real with signal_real_noise, n = 5, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 5, stride = 1
+. 0 1000CA50 8
+. 2F 9F 00 05 41 9E 0F 9C
+
+. 0 1000D9F0 36
+. 83 21 00 34 54 00 20 36 81 3E 81 70 82 19 00 00 C8 29 00 00 7F 00 82 14 7E C0 C2 14 7E A0 B2 14 48 01 B5 85
+
+. 0 1000DA14 16
+. 81 3E 81 74 FF 20 08 90 C8 29 00 00 48 01 B5 75
+
+. 0 1000DA24 28
+. 80 01 00 20 80 C1 00 18 FF 00 08 90 7E 60 FB 96 2F 93 00 00 7F 26 FB 96 41 9E 01 14
+
+. 0 1000DA40 272
+. 80 FE 81 78 57 66 08 3C 81 1E 81 7C 38 A0 00 00 C8 E7 00 00 7E 69 03 A6 C8 C8 00 00 1D 05 00 05 7C E5 CA 14 7D 26 42 14 7D 1C 41 D6 39 29 FF FF 7C 06 4A 14 54 0A 18 38 7D 7C 49 D6 39 4A 00 08 55 29 18 38 39 29 00 08 55 08 18 38 7C 1C 01 D6 55 6B 18 38 7D 8B EC AE 7D 67 CA 14 7D 08 EC AE 54 00 18 38 7D 4A E1 D6 7C 1D 04 AE FD 2C 00 2A 7D 6A EC AE 7D 29 E1 D6 FD 8C 00 28 7D 4B CA 14 FD 29 48 2A FD B9 02 F2 7C 09 EC AE 7D 2A CA 14 FD 78 02 F2 7C 17 29 D6 7C A5 DA 14 FD 49 01 B2 FD 8C 01 F2 7C F7 39 D6 54 00 18 38 FD B8 68 38 FC 19 58 3A 7D 77 59 D6 54 E7 18 38 FD 48 50 28 FD AD 68 2A FC 00 00 2A 7D 57 51 D6 FD 6A 60 28 55 6B 18 38 FD 8C 50 2A FD 08 48 2A 7D 37 49 D6 FD 4B 68 2A FD 2C 00 2A 55 4A 18 38 FD 8C 00 28 7D 1A 05 AE FD 6B 68 28 55 29 18 38 7D 87 D5 AE 7D 6B D5 AE 7D 4A D5 AE 7D 29 D5 AE 42 00 FF 10
+
+. 0 1000DB50 8
+. 2F 9B 00 01 41 BE F3 5C
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 5, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 5, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 5
+. 0 10012F8C 8
+. 2F 9B 00 05 41 9E 02 78
+
+. 0 10013208 68
+. 81 41 00 18 54 00 18 38 81 81 00 30 3D 60 CC CC 7E 6A 8B 96 61 6B CC CD 83 8C 00 00 81 3E 82 B8 7E E0 E2 14 7F 60 BA 14 C8 29 00 00 7E C0 DA 14 7C 11 58 16 7D 6A 58 16 54 15 F0 BE 55 7A F0 BE 48 01 5D 4D
+
+. 0 1001324C 16
+. 81 3E 82 BC FF 60 08 18 C8 29 00 00 48 01 5D 3D
+
+. 0 1001325C 12
+. 2F 93 00 00 FF 40 08 18 41 9E 01 24
+
+. 0 10013268 288
+. 80 FE 82 C0 56 B4 08 3C 81 1E 82 C4 FC 80 D8 50 FC 60 D0 50 C8 C7 00 00 C8 A8 00 00 38 B4 FF FF 38 80 00 00 38 60 00 00 7E 69 03 A6 7C 03 D2 14 7D 14 2A 14 7D 20 D2 14 7C 1F 01 D6 7D 69 D2 14 7D 4B D2 14 55 07 10 3A 7D 3F 49 D6 54 00 10 3A 7D 5D 04 2E 54 A6 10 3A 38 C6 00 04 38 E7 00 04 7D 7F 59 D6 55 29 10 3A 7D 69 EC 2E 7D 5F 51 D6 55 6B 10 3A 7D 8B EC 2E ED AB 60 2A 55 4A 10 3A 7C 1F 19 D6 7D 0A EC 2E ED 6B 60 28 7C 63 AA 14 EC 0A 40 2A 54 00 10 3A ED 4A 40 28 7D 3D 04 2E 7D 38 21 D6 EC E0 68 2A 7C 84 8A 14 EC 00 68 28 FD A0 48 90 FD 80 38 90 7C 18 29 D6 55 29 10 3A ED 29 38 2A 7C A5 8A 14 FD 8C 01 72 7D 18 41 D6 54 00 10 3A FC 00 01 B2 7D 29 CD 2E FD AD 60 28 ED 9B 02 F2 7C C6 C1 D6 FC 00 00 18 55 08 10 3A FD A0 68 18 ED 7A 02 F2 7C E7 C1 D6 ED 83 62 BA ED 0D 00 28 ED 44 5A B8 ED AD 00 2A 7D B9 05 2E 7D 46 CD 2E 7D 08 CD 2E 7D 87 CD 2E 42 00 FF 10
+
+. 0 10013388 8
+. 2F 95 00 01 41 9E 04 54
+
+. 0 10010750 68
+. 7C 03 41 D6 38 A5 00 01 7D AA FC 2E 7D 05 30 50 7F 88 28 40 C0 07 00 00 FD 80 68 50 7D 2A 22 14 54 00 18 38 7C 0A 25 2E 7D 60 22 14 D1 A9 00 04 7C E7 62 14 7C 04 05 2E 7D 4A 62 14 D1 8B 00 04 41 9D FF C0
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 5, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 5, stride = 1
+. 0 1000EC18 8
+. 2F 9D 00 05 41 9E 0F C0
+
+. 0 1000FBDC 36
+. 83 21 00 34 54 00 18 38 81 3E 81 E8 82 19 00 00 C8 29 00 00 7E C0 82 14 7F 20 B2 14 7E A0 CA 14 48 01 93 99
+
+. 0 1000FC00 16
+. 81 3E 81 EC FF 20 08 18 C8 29 00 00 48 01 93 89
+
+. 0 1000FC10 28
+. 80 01 00 20 80 C1 00 18 FF 00 08 18 7E 40 EB 96 2F 92 00 00 7F 06 EB 96 41 9E 01 24
+
+. 0 1000FC2C 288
+. 80 FE 81 F0 57 65 08 3C 81 1E 81 F4 38 80 00 00 C8 C7 00 00 7E 49 03 A6 C8 A8 00 00 1D 44 00 05 7C C4 C2 14 7C E6 C2 14 7C D7 31 D6 7D 65 52 14 39 6B FF FF 7C 05 5A 14 7D 3C 59 D6 54 08 10 3A 39 08 00 04 55 6B 10 3A 39 6B 00 04 7C 1C 01 D6 55 29 10 3A 7C 09 FC 2E 7D 27 C2 14 54 C6 10 3A 54 00 10 3A 7D 5C 51 D6 7D BF 04 2E ED 00 68 2A EC 00 68 28 55 4A 10 3A 7D 08 E1 D6 7C EA FC 2E ED 08 40 2A FD A0 38 90 FD 80 40 90 7D 6B E1 D6 7D 28 FC 2E FC 00 01 B2 FD 8C 01 72 7D 6B FC 2E 7D 69 C2 14 ED 59 02 72 7C 17 21 D6 ED 38 02 72 7C 84 DA 14 FD AD 60 28 FC 00 00 18 ED 58 52 F8 7C F7 39 D6 FD A0 68 18 54 00 10 3A ED 79 4A FA ED 4A 50 2A ED 8D 00 28 7D 37 49 D6 EC 00 68 2A 54 E7 10 3A ED 6B 58 2A ED AC 50 2A 7D 77 59 D6 EC E7 40 2A ED 20 58 2A 55 29 10 3A EC 00 58 28 ED 8C 50 28 7C FA 05 2E 55 6B 10 3A 7C 06 D5 2E 7D 87 D5 2E 7D A9 D5 2E 7D 2B D5 2E 42 00 FF 00
+
+. 0 1000FD4C 8
+. 2F 9B 00 01 41 BE F3 2C
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 5, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 5, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 5
+PASS: gsl_fft_complex_forward with signal_noise, n = 5, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 5, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 5, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 5, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 5, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 5, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 5, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 5, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 5, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 5, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 5
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 5, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 5, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 5, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 5, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 5, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 5, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 5, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 5, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 5, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 5, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 5
+PASS: gsl_fft_real with signal_real_noise, n = 5, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 5, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 5, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 5, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 5
+PASS: gsl_fft_real_float with signal_real_noise, n = 5, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 5, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 5, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 5, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 5
+PASS: gsl_fft_complex_forward with signal_noise, n = 5, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 5, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 5, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 5, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 5, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 5, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 5, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 5, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 5, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 5, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 5
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 5, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 5, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 5, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 5, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 5, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 5, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 5, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 5, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 5, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 5, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 5
+PASS: gsl_fft_real with signal_real_noise, n = 5, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 5, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 5, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 5, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 5
+PASS: gsl_fft_real_float with signal_real_noise, n = 5, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 5, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 5, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 6, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 6
+. 0 10006F90 8
+. 2F 97 00 06 41 9E 11 2C
+
+. 0 100080C0 88
+. 81 81 00 24 3D 20 AA AA 80 C1 01 88 61 29 AA AB 81 01 00 18 7D 6C 48 16 38 E0 00 00 82 66 00 00 55 E0 20 36 90 E1 00 34 7F 87 78 40 7D 28 48 16 7E 20 9A 14 7E 40 8A 14 55 76 F0 BE 7F A0 92 14 7F 80 EA 14 55 35 F0 BE 39 80 00 00 3A E0 00 00 1E 16 00 05 40 BC F4 64
+
+. 0 10008118 100
+. 81 21 00 1C 1D 55 00 05 1D 76 00 03 80 DE 80 44 2F 09 FF FF 56 A7 10 3A 56 C8 08 3C 56 C9 10 3A 92 21 01 60 1D D5 00 03 91 41 01 40 56 B1 08 3C 91 61 01 48 7C E8 03 A6 90 C1 01 50 3A 80 00 00 93 81 01 54 93 A1 01 58 92 41 01 5C 92 61 01 64 91 01 01 44 91 21 01 4C 81 41 00 34 2F 8A 00 00 40 9E 03 58
+
+. 0 1000817C 76
+. 81 7E 80 40 80 C1 01 50 C8 0B 00 00 D8 01 00 38 C8 06 00 00 CA 01 00 38 D8 01 00 40 FD C0 00 90 80 C1 00 40 80 E1 00 44 FD E0 80 90 FE 60 80 90 D8 01 00 50 90 C1 00 48 90 E1 00 4C FE 40 00 90 FE 20 80 90 2F 96 00 00 41 9E 02 AC
+
+. 0 100081C8 400
+. 80 C1 00 1C 3C E0 43 30 81 1E 80 50 56 9B 20 36 6C C0 80 00 90 E1 00 08 90 01 00 0C 7C 17 C1 D6 C8 48 00 00 57 32 20 36 C8 01 00 08 57 13 20 36 80 FE 80 48 7E C9 03 A6 FC 00 10 28 80 DE 80 4C C8 47 00 00 54 1C 20 36 CA 86 00 00 FE A0 00 B2 7C C8 02 A6 80 E1 01 40 7C 0C AA 14 7D 71 62 14 7D 07 62 14 7C 00 C9 D6 7D 46 62 14 7C 9B FC AE 7D 3B FA 14 CA E1 00 40 C8 29 00 08 7C EE 62 14 7D 6B C9 D6 54 00 20 36 7D 20 FA 14 7C FF 04 AE C9 09 00 08 7F BC D2 14 CB 01 00 48 39 8C 00 01 7D 4A C9 D6 55 6B 20 36 7D 2B FC AE 7C AB FA 14 C9 65 00 08 7E 94 CA 14 80 01 01 48 7F 7B 92 14 7D 08 C9 D6 55 4A 20 36 7C 0A FC AE 7C CA FA 14 C9 A6 00 08 7D 40 BA 14 FC C9 00 2A 80 C1 01 4C 55 08 20 36 7C E7 C9 D6 7D 48 FC AE 7D 28 FA 14 C9 89 00 08 FC AB 68 2A FC 6A 38 2A 81 01 01 44 FC 4C 40 2A 54 E7 20 36 FD 6B 68 28 7F 87 FC AE FD 4A 38 28 7D 27 FA 14 FD 8C 40 28 CB A9 00 08 FC E6 05 32 7D 68 BA 14 7D 37 B2 14 7D 06 BA 14 FD A3 05 32 7C 10 BA 14 7D 29 C1 D6 3A F7 00 01 FD 29 00 28 FD 05 05 32 FC 02 05 32 7D 6B C1 D6 55 29 20 36 FD 75 02 F2 7C E9 D2 14 FD 95 03 32 7D 4A C1 D6 55 6B 20 36 FD 35 02 72 7C CB D2 14 FD 55 02 B2 7D 08 C1 D6 55 4A 20 36 FC E4 38 28 7C AA D2 14 FD BC 68 28 FD 01 40 28 7C 00 C1 D6
+
+. 0 10008358 280
+. FC 1D 00 28 55 08 20 36 FC 84 30 2A 7C 88 D2 14 FC C7 58 2A FF 9C 18 2A 54 00 20 36 FC 6D 60 2A 7C 60 D2 14 FC 21 28 2A FF BD 10 2A FC A8 48 28 FC 40 50 28 FC E7 58 28 FD AD 60 28 FC 00 50 2A FF E6 18 28 FD 08 48 2A FC C6 18 2A FF C5 10 28 FC A5 10 2A C8 41 00 48 FD 87 68 28 FD 68 00 28 FC 42 01 B2 FD 08 00 2A C8 01 00 40 FD 24 E0 28 FD 41 E8 28 D8 41 01 98 FC E7 68 2A C9 A1 00 50 FC 00 03 32 FF 6D 07 F2 FF 57 02 F2 D8 01 01 90 FF 38 01 72 FE CD 07 B2 C9 A1 00 38 FC 6E 02 72 FC 52 01 F2 FF 0E 02 B2 FE F2 02 32 FD 6D 02 FA C8 01 01 98 FD 4F 1A BA FC B0 01 7A FD 13 12 3A FF D1 DF BA FD 8D D3 38 FC D0 C9 B8 FD 2F C2 78 FC F3 B9 F8 FF F1 B7 F8 FC 84 E0 2A FC 21 E8 2A 7C 9C D5 AE 7F 9C 9A 14 D8 3D 00 08 7D 89 D5 AE D9 67 00 08 7C CB D5 AE D8 A6 00 08 7D 2A D5 AE D9 45 00 08 7C E8 D5 AE D9 04 00 08 7F FA 05 AE DB C3 00 08 42 00 FD AC
+
+. 0 10008470 84
+. 80 C1 00 34 7E F7 82 14 80 E1 01 64 38 C6 00 01 81 01 01 60 7F 86 78 40 81 21 01 5C 81 41 01 58 38 E7 00 10 81 61 01 54 39 08 00 10 39 29 00 10 39 4A 00 10 39 6B 00 10 90 C1 00 34 90 E1 01 64 91 01 01 60 91 21 01 5C 91 41 01 58 91 61 01 54 40 BC F0 B8
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 6, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 6, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 6, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 6, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 6, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 6, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 6, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 6
+. 0 10009DB8 8
+. 2F 97 00 06 41 9E 11 88
+
+. 0 1000AF44 88
+. 81 81 00 24 3D 20 AA AA 80 E1 01 44 61 29 AA AB 81 41 00 18 7D 6C 48 16 39 00 00 00 82 87 00 00 55 E0 18 38 91 01 00 38 7F 88 78 40 7D 2A 48 16 7E 20 A2 14 7E 40 8A 14 55 76 F0 BE 7F A0 92 14 7F 80 EA 14 55 35 F0 BE 39 80 00 00 3A E0 00 00 1E 16 00 05 40 BC F4 00
+
+. 0 1000AF9C 108
+. 81 61 00 1C 1C 15 00 03 1C F5 00 05 81 3E 80 CC 1D 16 00 03 81 5E 80 D0 2F 0B FF FF 90 01 00 F0 56 CB 08 3C 56 C0 10 3A 92 21 01 14 7C E8 03 A6 92 81 01 18 56 B1 08 3C 91 01 00 F8 56 AE 10 3A 91 21 01 00 3A 80 00 00 91 41 01 04 93 81 01 08 93 A1 01 0C 92 41 01 10 91 61 00 F4 90 01 00 FC 80 E1 00 38 2F 87 00 00 40 9E 03 48
+
+. 0 1000B008 64
+. 81 01 01 00 80 E1 01 04 C0 08 00 00 D0 01 00 3C C2 61 00 3C C0 07 00 00 FE 40 98 90 D0 01 00 40 FE 20 00 90 D0 01 00 44 FE 00 98 90 FD E0 00 90 D0 01 00 48 FD C0 98 90 2F 96 00 00 41 9E 02 A8
+
+. 0 1000B048 400
+. 80 E1 00 1C 3D 00 43 30 81 3E 80 DC 56 9B 18 38 6C E0 80 00 91 01 00 08 90 01 00 0C 7C 17 C1 D6 C8 29 00 00 57 32 18 38 C8 01 00 08 57 13 18 38 81 1E 80 D4 7E C9 03 A6 FC 00 08 28 80 FE 80 F0 C0 28 00 00 54 1C 18 38 C2 87 00 00 FC 00 00 18 EE A0 00 72 7C E8 02 A6 7C 0C AA 14 7D 71 62 14 7D 4E 62 14 7C 00 C9 D6 81 21 00 F0 7D 07 62 14 7C 9B FC 2E 7C E9 62 14 7D 3B FA 14 7D 6B C9 D6 54 00 18 38 C0 69 00 04 7D 20 FA 14 C1 09 00 04 39 8C 00 01 7C FF 04 2E 7C 17 B2 14 7D 4A C9 D6 55 6B 18 38 7C AB FA 14 7D 2B FC 2E C1 65 00 04 7E 94 CA 14 7F 7B 92 14 7D 08 C9 D6 55 4A 18 38 7C CA FA 14 7C 0A FC 2E C1 A6 00 04 EC C9 00 2A 81 41 00 F4 55 08 18 38 7C E7 C9 D6 7D 28 FA 14 7D 48 FC 2E C1 89 00 04 EC AB 68 2A EC 4A 38 2A 81 01 00 FC EC 2C 40 2A 54 E7 18 38 7D 27 FA 14 ED 29 00 28 ED 6B 68 28 7F E7 FC 2E ED 4A 38 28 C3 C9 00 04 ED 8C 40 28 80 E1 00 F8 ED 06 05 32 7D 2A BA 14 EC E5 05 32 7D 67 BA 14 EC 02 05 32 7D 48 BA 14 ED A1 05 32 7D 10 BA 14 EC E3 38 28 7C 00 C1 D6 ED 04 40 28 7C FC D2 14 ED 35 02 72 3A F7 00 01 ED 75 02 F2 EC 1F 00 28 7D 29 C1 D6 ED BE 68 28 54 00 18 38 ED 55 02 B2 7C C0 D2 14 ED 95 03 32 EC 84 30 2A 7D 6B C1 D6 EC C8 58 2A 55 29 18 38 EC 63 28 2A 7C A9 D2 14 EC A7 48 28
+
+. 0 1000B1D8 276
+. EF FF 10 2A 7D 4A C1 D6 EC 40 60 2A 55 6B 18 38 EF DE 08 2A 7C 8B D2 14 EC 2D 50 28 ED 08 58 28 7D 08 C1 D6 ED AD 50 2A 55 4A 18 38 EC E7 48 2A 7C 6A D2 14 EC 00 60 28 EF 85 08 28 55 08 18 38 ED 67 68 28 7F A8 D2 14 ED 88 00 28 ED 08 00 2A C0 01 00 40 EF A6 10 28 EC E7 68 2A EC C6 10 2A C0 41 00 48 EC A5 08 2A C0 21 00 44 EF 60 03 32 EE E0 02 F2 C0 01 00 48 ED 24 F8 28 ED 43 F0 28 EC 00 07 32 EF 41 01 B2 ED A1 01 72 EC 2F 01 F2 D0 01 01 48 EF 02 07 72 C0 01 00 3C EE D1 02 72 EF 2F 02 32 EC 51 02 B2 ED 10 0A 38 C0 21 01 48 ED 60 DA FA EC B3 D1 7A ED 52 B2 BA EC F0 C9 FA EF 8E C7 3A ED 80 BB 38 EC D3 69 B8 ED 32 12 78 EF AE 0F 78 EC 84 F8 2A EC 63 F0 2A 7C 9C D5 2E 7F 9C 9A 14 D0 67 00 04 7D 9A 05 2E D1 66 00 04 7C C9 D5 2E D0 A5 00 04 7D 2B D5 2E D1 44 00 04 7D 0A D5 2E D0 E3 00 04 7F A8 D5 2E D3 9D 00 04 42 00 FD B4
+
+. 0 1000B2EC 84
+. 80 E1 00 38 7E F7 82 14 81 01 01 18 38 E7 00 01 81 21 01 14 7F 87 78 40 81 41 01 10 81 61 01 0C 39 08 00 08 90 E1 00 38 39 29 00 08 80 E1 01 08 39 4A 00 08 39 6B 00 08 91 01 01 18 38 E7 00 08 91 21 01 14 91 41 01 10 91 61 01 0C 90 E1 01 08 40 BC F0 5C
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 6, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 6, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 6, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 6, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 6, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 6, stride = 1
+. 0 100065C4 20
+. 54 A0 10 3A 38 A5 00 01 7D 26 00 2E 7D 6B 49 D6 42 00 FF F0
+
+. 0 10010A80 32
+. 82 71 00 08 3A 80 00 01 7D C9 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7E 58 73 96 40 9C 00 94
+
+. 0 10010AC0 68
+. 3E A0 43 30 FF C0 E0 90 3B F0 FF FF 7F 9C DA 14 92 A1 00 08 7C 1C C3 96 7F B9 BA 14 3A D6 00 01 3B 5A 00 10 7C 00 C1 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 83 F5
+
+. 0 10010B04 16
+. 7C 39 BD AE FC 20 F8 90 3B 39 00 10 48 01 84 85
+
+. 0 10010B14 12
+. 37 FF FF FF D8 3D 00 08 40 82 FF B0
+
+PASS: gsl_fft_real_wavetable_alloc, n = 6, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 6
+. 0 10011E48 124
+. 7C 08 1A 14 55 4B 18 38 7D 20 1A 14 7C 1D 01 D6 39 6B 00 08 7D 3D 49 D6 54 00 18 38 7D BF 04 AE 55 29 18 38 7C 1D 41 D6 7C 09 FC AE 7D 08 DA 14 FD 6D 00 2A FD AD 00 28 54 00 18 38 7C 1F 04 AE 7D 38 39 D6 FD 8B 02 B2 7C E7 8A 14 FD AD 02 72 7C 18 51 D6 55 29 18 38 FD 80 60 28 7D 4A 8A 14 FC 00 58 2A 7D 6B C1 D6 54 00 18 38 7C 09 CD AE 7D 99 05 AE 7D AB CD AE 42 00 FF 88
+
+. 0 10010F4C 36
+. 81 21 00 24 81 41 00 30 2F 89 00 00 39 31 00 01 83 6A FF 00 55 29 F8 7E 38 09 FF FF 7E 31 D9 D6 40 9E 09 9C
+
+. 0 10011908 32
+. 2F 9B 00 02 39 60 00 00 83 E1 00 20 3B A0 00 01 83 21 00 10 83 01 00 14 91 61 00 24 40 9E F6 68
+
+. 0 100119AC 20
+. 38 05 00 01 3B 60 00 01 54 00 F8 7E 2B 80 00 01 40 9D 00 F8
+
+. 0 100119C0 32
+. 7C 17 03 78 38 9C 00 10 38 71 FF FE 2F 9A 00 00 C8 04 FF F8 C9 04 FF F0 FC E0 00 50 41 9E 00 C4
+
+. 0 100119E0 192
+. 57 69 08 3C 7F 49 03 A6 38 C9 FF FF 38 E3 FF FF 7C C8 33 78 81 41 00 2C 55 0B 18 38 39 6B 00 08 7D 28 52 14 7D 6B E9 D6 54 CA 18 38 39 4A 00 08 7C 1D 49 D6 55 29 18 38 39 29 00 08 7D 6B FC AE 54 EB 18 38 39 6B 00 08 54 00 18 38 7D 29 E9 D6 7D 9F 04 AE FC 07 03 32 7D A9 FC AE 7C 1D 41 D6 7D 08 2A 14 FD 47 03 72 FD A8 03 7A 54 00 18 38 7C 1F 04 AE 7D 38 39 D6 FD 88 53 38 7C E7 8A 14 FD 4B 68 28 7C 18 31 D6 FD 6B 68 2A FD 20 60 28 55 29 18 38 FD 40 50 50 7C C6 8A 14 FC 00 60 2A 7D 4A C1 D6 54 00 18 38 7C 19 05 AE 7D 6B C1 D6 7D 6A CD AE 7D 29 CD AE 7D 4B CD AE 42 00 FF 58
+
+. 0 10011AA0 20
+. 3B 7B 00 01 38 63 FF FE 7F 97 D8 40 38 84 00 10 41 9D FF 1C
+
+. 0 10011AB4 8
+. 70 AB 00 01 40 A2 FC FC
+
+PASS: gsl_fft_real with signal_real_noise, n = 6, stride = 1
+. 0 1000C674 32
+. 82 71 00 08 3A 80 00 01 7D D2 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7D 37 73 96 40 9C 00 94
+
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 6, stride = 1
+. 0 1000D37C 20
+. 38 1B 00 01 3A A0 00 01 54 00 F8 7E 2B 80 00 01 40 9D 01 98
+
+. 0 1000D524 8
+. 73 67 00 01 40 A2 F9 88
+
+. 0 1000D52C 8
+. 2F 94 00 00 41 BE F9 80
+
+. 0 1000D534 160
+. FD 3E F0 2A 7E 89 03 A6 57 66 08 3C 38 FB FF FF 38 A0 00 00 1D 25 00 03 7D 47 FA 14 7D 0A FA 14 7D 57 51 D6 7D 29 DA 14 39 29 FF FF 7C 06 4A 14 55 2B 18 38 7C 1C 01 D6 39 6B 00 08 55 4A 18 38 7C A5 DA 14 7D 3C 49 D6 54 00 18 38 7D 5D 04 AE 7D 6B E1 D6 55 29 18 38 7C 09 EC AE FD 80 50 28 7D AB EC AE FC 00 00 2A 7C 17 39 D6 7C E7 DA 14 FD A9 03 72 FD 60 60 50 7D 17 41 D6 FC 00 50 2A FD 8C 68 28 54 00 18 38 FD 6B 68 28 7C 1A 05 AE 55 08 18 38 7D 8A D5 AE 7D 68 D5 AE 42 00 FF 78
+
+. 0 1000D5D4 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 F4 04
+
+. 0 1000C9F0 52
+. 81 61 00 34 83 21 00 20 83 EB FF 00 80 01 00 18 7F 39 F9 D6 81 81 00 28 2F 8C 00 00 7F 60 CB 96 93 21 00 20 39 3B 00 01 55 29 F8 7E 38 09 FF FF 40 9E 06 5C
+
+. 0 1000D07C 32
+. 2F 9F 00 02 38 C0 00 00 83 A1 00 1C 3B 80 00 01 83 41 00 10 82 E1 00 14 90 C1 00 28 40 9E F9 A8
+
+. 0 1000D0C4 72
+. 81 61 00 2C 7C 08 E1 D6 7D 28 5A 14 7D 6A E1 D6 54 00 20 36 7C 1D 04 AE 7D 4A 3A 14 7D AB EC AE 7C 08 B9 D6 7D 08 DA 14 FD 80 68 28 FC 00 68 2A 7D 29 B9 D6 54 00 18 38 7C 1A 05 AE 55 29 18 38 7D 89 D5 AE 42 00 FF BC
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 6, stride = 1
+. 0 10012A68 32
+. 82 71 00 08 3A 80 00 01 7D C9 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7E 58 73 96 40 9C 00 9C
+
+. 0 10012AA8 68
+. 3E A0 43 30 FF C0 E0 90 3B F0 FF FF 7F 9C DA 14 92 A1 00 08 7C 1C C3 96 7F B9 BA 14 3A D6 00 01 3B 5A 00 08 7C 00 C1 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 64 0D
+
+. 0 10012AEC 20
+. FC 00 08 18 FC 20 F8 90 7C 19 BD 2E 3B 39 00 08 48 01 64 99
+
+. 0 10012B00 16
+. 37 FF FF FF FC 20 08 18 D0 3D 00 04 40 82 FF A8
+
+PASS: gsl_fft_real_wavetable_float_alloc, n = 6, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 6
+. 0 10013E78 136
+. 7C 08 1A 14 55 4B 10 3A 7D 20 1A 14 7C 1F 01 D6 39 6B 00 04 7D 3F 49 D6 54 00 10 3A 7D 7D 04 2E 55 29 10 3A 7C 1F 41 D6 7C 09 EC 2E 7D 08 DA 14 ED 4B 00 2A 54 00 10 3A ED 6B 00 28 7D 9D 04 2E 7D 38 39 D6 FD A0 50 90 7C E7 8A 14 FC 00 60 90 ED 6B 02 32 FD AD 02 72 7C 18 51 D6 55 29 10 3A ED 8C 50 2A 7D 4A 8A 14 FC 00 68 28 7D 6B C1 D6 54 00 10 3A 7D 89 CD 2E FC 00 00 18 7C 19 05 2E 7D 6B CD 2E 42 00 FF 7C
+
+. 0 10012F3C 36
+. 81 21 00 24 81 41 00 30 2F 89 00 00 39 31 00 01 83 6A FF 00 55 29 F8 7E 38 09 FF FF 7E 31 D9 D6 40 9E 09 D8
+
+. 0 10013934 32
+. 2F 9B 00 02 39 60 00 00 83 A1 00 20 3B E0 00 01 83 21 00 10 83 01 00 14 91 61 00 24 40 9E F6 2C
+
+. 0 100139D8 20
+. 38 04 00 01 3B 60 00 01 54 00 F8 7E 2B 80 00 01 40 9D 00 F8
+
+. 0 100139EC 32
+. 38 7C 00 08 7C 17 03 78 3B 91 FF FE 2F 9A 00 00 C0 03 FF FC C1 03 FF F8 FC E0 00 50 41 9E 00 C4
+
+. 0 10013A0C 192
+. 57 69 08 3C 7F 49 03 A6 38 A9 FF FF 38 DC FF FF 7C A7 2B 78 81 41 00 2C 54 EB 10 3A 39 6B 00 04 54 A8 10 3A 7D 27 52 14 7D 6B F9 D6 54 CA 10 3A 39 08 00 04 39 4A 00 04 7C 1F 49 D6 55 29 10 3A 39 29 00 04 7D 6B EC 2E 54 00 10 3A 7D 29 F9 D6 7D BD 04 2E ED 87 03 72 7C 09 EC 2E 7C 1F 39 D6 7C E7 22 14 ED 88 60 3A EC 07 00 32 54 00 10 3A 7D 38 31 D6 ED 4B 60 28 7C C6 8A 14 ED A8 03 78 7C 1D 04 2E ED 6B 60 2A 7C 18 29 D6 FD 40 50 50 ED 20 68 28 55 29 10 3A EC 00 68 2A 7C A5 8A 14 7D 08 C1 D6 54 00 10 3A 7C 19 05 2E 7D 4A C1 D6 7D 68 CD 2E 7D 29 CD 2E 7D 4A CD 2E 42 00 FF 58
+
+. 0 10013ACC 20
+. 3B 7B 00 01 3B 9C FF FE 7F 97 D8 40 38 63 00 08 41 9D FF 1C
+
+. 0 10013AE0 8
+. 70 8B 00 01 40 A2 FC FC
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 6, stride = 1
+. 0 1000E834 32
+. 82 71 00 08 3A 80 00 01 7D D2 73 78 93 51 01 08 7D CE 99 D6 7F 94 98 40 7D 37 73 96 40 9C 00 9C
+
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 6, stride = 1
+. 0 1000F548 20
+. 38 1B 00 01 3A 80 00 01 54 00 F8 7E 2B 80 00 01 40 9D 01 B0
+
+. 0 1000F708 8
+. 73 67 00 01 40 A2 F9 70
+
+. 0 1000F710 8
+. 2F 93 00 00 41 BE F9 68
+
+. 0 1000F718 160
+. ED 3D E8 2A 7E 69 03 A6 57 66 08 3C 38 FB FF FF 38 A0 00 00 1D 25 00 03 7D 47 CA 14 7D 0A CA 14 7D 57 51 D6 7D 29 DA 14 39 29 FF FF 7C 06 4A 14 55 2B 10 3A 7C 1C 01 D6 39 6B 00 04 55 4A 10 3A 7C A5 DA 14 7D 3C 49 D6 54 00 10 3A 7D 5F 04 2E 55 29 10 3A 7D 6B E1 D6 7C 09 FC 2E ED 80 50 28 7D AB FC 2E EC 00 00 2A 7C 17 39 D6 7C E7 DA 14 ED A9 03 72 FD 60 60 50 EC 00 50 2A 7D 17 41 D6 ED 8C 68 28 ED 6B 68 28 54 00 10 3A 7C 1A 05 2E 7D 8A D5 2E 55 08 10 3A 7D 68 D5 2E 42 00 FF 78
+
+. 0 1000F7B8 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 F3 E8
+
+. 0 1000EBB8 52
+. 81 61 00 34 83 21 00 20 83 AB FF 00 80 01 00 18 7F 39 E9 D6 81 81 00 28 2F 8C 00 00 7F 60 CB 96 93 21 00 20 39 3B 00 01 55 29 F8 7E 38 09 FF FF 40 9E 06 60
+
+. 0 1000F248 32
+. 2F 9D 00 02 38 C0 00 00 83 E1 00 1C 3B 80 00 01 83 41 00 10 82 E1 00 14 90 C1 00 28 40 9E F9 A4
+
+. 0 1000F290 72
+. 81 61 00 2C 7C 08 E1 D6 7D 28 5A 14 7D 6A E1 D6 54 00 18 38 7C 1F 04 2E 7D 4A 3A 14 7D AB FC 2E 7C 08 B9 D6 7D 08 DA 14 ED 80 68 28 EC 00 68 2A 7D 29 B9 D6 54 00 10 3A 7C 1A 05 2E 55 29 10 3A 7D 89 D5 2E 42 00 FF BC
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 6, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 6, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 6
+PASS: gsl_fft_complex_forward with signal_noise, n = 6, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 6, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 6, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 6, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 6, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 6, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 6, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 6, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 6, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 6, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 6
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 6, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 6, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 6, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 6, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 6, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 6, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 6, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 6, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 6, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 6, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 6
+PASS: gsl_fft_real with signal_real_noise, n = 6, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 6, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 6, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 6, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 6
+PASS: gsl_fft_real_float with signal_real_noise, n = 6, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 6, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 6, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 6, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 6
+PASS: gsl_fft_complex_forward with signal_noise, n = 6, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 6, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 6, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 6, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 6, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 6, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 6, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 6, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 6, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 6, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 6
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 6, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 6, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 6, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 6, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 6, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 6, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 6, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 6, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 6, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 6, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 6
+PASS: gsl_fft_real with signal_real_noise, n = 6, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 6, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 6, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 6, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 6
+PASS: gsl_fft_real_float with signal_real_noise, n = 6, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 6, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 6, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 7, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 7
+. 0 10006F98 8
+. 2F 97 00 07 41 9E 15 EC
+
+. 0 10008588 108
+. 81 81 00 24 3D 20 24 92 80 E1 00 18 61 29 49 25 80 C1 01 88 7D 0C 48 16 55 EA 20 36 82 66 00 00 3A C0 00 00 3A 80 00 00 7D 27 48 16 7E 2A 9A 14 7D 68 60 50 81 9E 80 64 7E 4A 8A 14 55 6B F8 7E C8 2C 00 00 7C 09 38 50 7F AA 92 14 54 00 F8 7E 7D 08 5A 14 7F 8A EA 14 7D 29 02 14 55 10 F0 BE 7F 6A E2 14 55 2E F0 BE 48 02 09 05
+
+. 0 100085F4 28
+. 81 3E 80 68 1C 10 00 06 D8 21 00 60 CB C9 00 00 90 01 01 00 FC 20 F0 90 48 02 08 E9
+
+. 0 10008610 20
+. 81 3E 80 6C D8 21 00 68 CB E9 00 00 FC 20 F8 90 48 02 08 D5
+
+. 0 10008624 16
+. 80 DE 80 64 D8 21 00 70 C8 26 00 00 48 02 09 65
+
+. 0 10008634 12
+. D8 21 00 78 FC 20 F0 90 48 02 09 59
+
+. 0 10008640 12
+. D8 21 00 80 FC 20 F8 90 48 02 09 4D
+
+. 0 1000864C 16
+. 7F 96 78 40 D8 21 00 88 92 C1 00 58 40 BC EF 20
+
+. 0 1000865C 124
+. 80 01 00 1C 1C CE 00 03 1C EE 00 05 81 9E 80 44 1D 0E 00 06 2F 00 FF FF 1D 50 00 03 90 C1 01 08 1D 70 00 05 90 E1 01 0C 55 C0 10 3A 56 06 08 3C 56 07 10 3A 91 81 01 24 92 61 01 3C 55 CC 08 3C 91 01 01 10 7C 08 03 A6 91 41 01 18 3A 60 00 00 91 61 01 20 93 61 01 28 93 81 01 2C 93 A1 01 30 92 41 01 34 92 21 01 38 90 C1 01 14 90 E1 01 1C 81 01 00 58 2F 88 00 00 40 9E 05 AC
+
+. 0 100086D8 160
+. 81 3E 80 40 80 C1 01 24 C8 09 00 00 D8 01 00 90 C8 06 00 00 80 C1 00 90 80 E1 00 94 D8 01 00 98 90 C1 00 A0 90 E1 00 A4 80 E1 00 98 81 01 00 9C 90 E1 00 A8 91 01 00 AC 81 01 00 A0 81 21 00 A4 91 01 00 B0 91 21 00 B4 81 21 00 A8 81 41 00 AC 91 21 00 B8 91 41 00 BC 80 C1 00 B8 80 E1 00 BC 81 41 00 B0 81 61 00 B4 90 C1 00 C8 90 E1 00 CC 91 41 00 C0 91 61 00 C4 91 41 00 D0 91 61 00 D4 90 C1 00 D8 90 E1 00 DC 91 41 00 E0 91 61 00 E4 90 C1 00 E8 90 E1 00 EC 2F 90 00 00 41 9E 04 9C
+
+. 0 10008778 400
+. C8 E1 00 78 3C E0 43 30 C9 01 00 80 7E 09 03 A6 FD A7 38 2A C9 61 00 88 FD 88 40 2A CA E1 00 60 FD 47 40 2A CB 01 00 68 FD AD 40 28 CB 21 00 88 FD 87 60 28 80 DE 80 70 FD 2B 58 2A CB 41 00 70 FD 17 C0 2A C8 46 00 00 FD AD C8 2A 80 C1 01 8C FD 2A 48 2A 81 3E 80 50 FD 8C C8 28 6C C0 80 00 FD 4A C8 28 90 01 00 0C FC E8 D0 2A 90 E1 00 08 FD AD 10 24 80 DE 80 40 7C 16 C1 D6 56 75 20 36 57 31 20 36 57 12 20 36 54 17 20 36 FD 4A 10 24 FD 8C 10 24 FD 29 10 24 FC E7 10 24 FC 17 B8 2A FD 78 C0 2A FC DA D0 2A FC 00 C0 28 FD 77 58 28 FD 08 30 28 FC 00 D0 28 FD 6B D0 2A FE 08 10 24 FE 40 10 24 C8 01 00 08 FE 2B 10 24 C8 49 00 00 FC 00 10 28 C8 46 00 00 FE 67 10 28 FD 20 02 72 FD E0 02 B2 FD C0 03 72 D9 21 00 F8 FC 00 03 32 D8 01 00 F0 7C E8 02 A6 81 21 01 0C 80 C1 01 08 7C 14 72 14 7D 6C A2 14 7C 00 C9 D6 7D 07 A2 14 7E F5 FC AE 7C E9 A2 14 81 21 01 10 7D 46 A2 14 7C C9 A2 14 7D 6B C9 D6 7D 35 FA 14 54 00 20 36 CA C9 00 08 7D 20 FA 14 7C BF 04 AE 7C E7 C9 D6 55 6B 20 36 C8 E9 00 08 7C AB FA 14 7C CB FC AE 7F 77 D2 14 C9 05 00 08 3A 94 00 01 7C C6 C9 D6 54 E7 20 36 7D 67 FA 14 7C 87 FC AE C8 6B 00 08 7D 76 82 14 FD 46 20 28 80 01 01 14 7D 4A C9 D6 54 C6 20 36 7D 26 FA 14 7D 86 FC AE C9 69 00 08
+
+. 0 10008908 400
+. FD 28 18 28 FF 25 60 28 80 C1 01 1C 7D 08 C9 D6 55 4A 20 36 7C 2A FC AE 7C 6A FA 14 C8 43 00 08 FF 07 58 28 FC C6 20 2A 7D 40 B2 14 55 08 20 36 FD 08 18 2A 7D A8 FC AE 7C 88 FA 14 C8 04 00 08 FC A5 60 2A FF ED 08 2A C8 61 00 F8 FC E7 58 2A 80 01 01 18 FF C0 10 2A 7C E6 B2 14 FD AD 08 28 81 21 01 20 FC 00 10 28 80 C1 01 00 FD 86 28 2A 7D 00 B2 14 FD 68 38 2A 7C 09 B2 14 FC 2A C8 28 7D 26 B2 14 FC 49 C0 28 7D 6B C1 D6 FF A5 F8 28 7E 73 CA 14 FF 87 F0 28 7E B5 8A 14 FC 23 00 72 3A D6 00 01 7D 4A C1 D6 55 6B 20 36 FC 43 00 B2 C8 61 00 F0 7C CB D2 14 FF 6D 50 28 FF 40 48 28 7D 08 C1 D6 FC A6 28 28 55 4A 20 36 FC E8 38 28 7C AA D2 14 FF B2 07 72 7C E7 C1 D6 55 08 20 36 FE F7 60 2A 7C 88 D2 14 FE D6 58 2A FC DF 30 28 FD 1E 40 28 7C 00 C1 D6 FD 4D 50 2A 54 E7 20 36 FD 20 48 2A 7C 67 D2 14 FD B9 68 28 FC 18 00 28 7D 29 C1 D6 FF 92 07 32 54 00 20 36 7F A0 D2 14 FE F7 F8 2A FF 63 06 F2 55 29 20 36 7F 89 D2 14 FF 43 06 B2 7E F7 D5 AE 7E F7 92 14 FE D6 F0 2A FC D1 01 B2 FD 11 02 32 FC B0 01 72 FD AE 03 72 FC 0E 00 32 FD 8C F8 2A FD 6B F0 2A FD 4A C8 2A FD 29 C0 2A CB 01 00 B8 FF E0 E8 50 FC 60 08 50 FC 80 10 50 FC F0 01 F2 FD 4F 02 B2 FD 2F 02 72 FD 93 BB 3A CA E1 00 D8 FD 73 B2 FA FF C0 E0 50
+
+. 0 10008A98 376
+. FF FF 28 28 FC 63 68 28 FC 84 00 28 FF BD 30 2A FF 9C 40 2A FD AD D8 2A FC 00 D0 2A FC A5 30 28 FC 42 D0 28 FF DE 38 28 FC 21 D8 28 FC E7 40 28 FC 84 48 2A FD AD 50 2A FC 00 48 2A FF EC F8 2A FF AC E8 2A FF 8B E0 2A FC 42 48 2A FD 8C 28 2A FC 63 50 2A FC 21 50 2A FF CB F0 2A FD 6B 38 2A FC BF 20 28 FC FD 00 28 FC DC 68 2A FF BD 00 2A C8 01 00 C8 FF 9C 68 28 C9 A1 00 A8 FD 4C 10 2A FF FF 20 2A C8 81 00 98 FD 8C 10 28 C8 41 00 D8 FD 2B 08 28 FD 1E 18 2A FD 6B 08 2A FF 64 07 72 FF DE 18 28 C8 61 00 E8 FE A2 01 72 FC 44 07 32 C8 81 00 B8 FF 38 03 32 FE 83 01 F2 C8 61 00 C8 DA DB 00 08 FF 00 02 B2 FC 04 02 F2 FC 97 02 32 CA E1 00 E8 FF 4D 07 F2 FC 2D 07 B2 FD A3 02 72 FC 77 01 B2 CA E1 00 90 FF 97 DF 3A CA E1 00 B0 CB 61 00 A0 FD 77 CA FA CB 21 00 C0 CA E1 00 90 FF DB D7 BA CB 41 00 D0 CB 61 00 E0 FD 39 C2 7A CB 21 00 B0 CB 01 00 A0 FD 1A AA 3A CB 41 00 C0 FC DB A1 BA CB 61 00 D0 FD 99 03 38 C8 01 00 E0 FF B7 17 78 FF F8 0F F8 FD 5A 6A B8 7F AB D5 AE DB 86 00 08 FC BB 21 78 7F EA D5 AE DB C5 00 08 FC E0 19 F8 7D 88 D5 AE D9 64 00 08 7D 47 D5 AE D9 23 00 08 7C BA 05 AE D9 1D 00 08 7C E9 D5 AE D8 DC 00 08 42 00 FC 60
+
+. 0 10008C10 100
+. 80 C1 00 58 80 E1 01 00 38 C6 00 01 81 01 01 3C 7F 86 78 40 81 21 01 38 81 41 01 34 7E D6 3A 14 81 61 01 30 39 08 00 10 80 E1 01 28 39 29 00 10 90 C1 00 58 39 4A 00 10 80 C1 01 2C 39 6B 00 10 38 E7 00 10 91 01 01 3C 38 C6 00 10 91 21 01 38 91 41 01 34 91 61 01 30 90 C1 01 2C 90 E1 01 28 40 BC E9 08
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 7, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 7, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 7, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 7, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 7, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 7, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 7, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 7
+. 0 10009DC0 8
+. 2F 97 00 07 41 9E 16 40
+
+. 0 1000B404 112
+. 81 81 00 18 3D 20 24 92 81 41 00 24 61 29 49 25 81 61 01 44 3A 60 00 00 7D 0A 48 16 80 01 00 24 82 8B 00 00 55 EA 18 38 80 FE 80 F4 3A E0 00 00 7E 2A A2 14 7D 2C 48 16 7E 4A 8A 14 7D 68 00 50 C8 27 00 00 55 6B F8 7E 7F AA 92 14 7C 09 60 50 7D 08 5A 14 54 00 F8 7E 7F 8A EA 14 7D 29 02 14 55 10 F0 BE 7F 6A E2 14 55 2E F0 BE 48 01 DA 85
+
+. 0 1000B474 32
+. 81 3E 80 F8 FC 20 08 18 1C 10 00 06 CB C9 00 00 90 01 00 A8 D0 21 00 4C FC 20 F0 90 48 01 DA 65
+
+. 0 1000B494 24
+. 81 3E 80 FC FC 20 08 18 CB E9 00 00 D0 21 00 50 FC 20 F8 90 48 01 DA 4D
+
+. 0 1000B4AC 20
+. 80 FE 80 F4 FC 20 08 18 D0 21 00 54 C8 27 00 00 48 01 DA D9
+
+. 0 1000B4C0 16
+. FC 20 08 18 D0 21 00 58 FC 20 F0 90 48 01 DA C9
+
+. 0 1000B4D0 16
+. FC 20 08 18 D0 21 00 5C FC 20 F8 90 48 01 DA B9
+
+. 0 1000B4E0 20
+. 39 80 00 00 7F 8C 78 40 FC 20 08 18 D0 21 00 60 40 BC EE A8
+
+. 0 1000B4F4 140
+. 1C EE 00 03 80 01 00 1C 1D 0E 00 05 92 81 00 EC 1D 2E 00 06 90 E1 00 AC 1D 50 00 03 80 FE 80 CC 1D 70 00 05 91 01 00 B4 81 1E 80 D0 2F 00 FF FF 91 21 00 B8 55 C9 08 3C 91 41 00 C4 55 CA 10 3A 91 61 00 CC 7D 60 00 D0 90 E1 00 D0 56 00 08 3C 56 07 10 3A 91 01 00 D4 93 61 00 D8 7D 28 03 A6 93 81 00 DC 3A 80 00 00 93 A1 00 E0 92 41 00 E4 92 21 00 E8 91 41 00 B0 91 61 00 BC 90 01 00 C0 90 E1 00 C8 2F 8C 00 00 40 9E 05 B4
+
+. 0 1000B580 76
+. 81 01 00 D0 80 E1 00 D4 C0 08 00 00 D0 01 00 64 C0 21 00 64 C0 07 00 00 D0 21 00 6C D0 01 00 68 D0 01 00 70 D0 21 00 74 D0 01 00 78 D0 21 00 7C D0 01 00 80 D0 21 00 84 D0 01 00 88 D0 21 00 8C D0 01 00 90 2F 90 00 00 41 9E 05 04
+
+. 0 1000B5CC 400
+. C0 21 00 58 3D 20 43 30 C0 41 00 5C 7E 09 03 A6 FD 80 08 90 C1 21 00 4C C1 A1 00 50 FC 00 10 90 C2 E1 00 50 ED 41 10 2A EC C9 68 2A C0 A1 00 60 FD A0 48 90 C3 41 00 54 FD 0C 60 2A 80 FE 81 00 FC E0 00 2A 81 5E 80 DC FD 20 B8 90 C8 27 00 00 FD 08 00 28 80 E1 00 BC FC 0D 68 2A 91 21 00 08 FD 60 28 90 6C E0 80 00 FC 60 50 90 90 01 00 0C ED 4A 28 28 80 FE 81 04 FC A0 D0 90 7C 17 C1 D6 FC 00 48 28 56 95 18 38 FD 8C 38 28 57 31 18 38 FC 8B 58 2A 57 12 18 38 FC 00 28 28 54 16 18 38 EC E6 D0 2A FD 08 58 2A FC 00 08 24 FD 8C 58 28 D8 01 00 98 FC 63 20 2A C8 01 00 08 FD 65 28 2A FD 4A 08 24 FD 08 08 24 FD 8C 08 24 FC 63 08 24 FC C6 58 28 FC E7 08 24 FC C6 08 24 FC 49 48 2A D8 C1 00 A0 FD AD 10 28 FD AD 28 2A FE 4D 08 24 C8 2A 00 00 FC 00 08 28 C8 27 00 00 FE 67 08 28 FD C0 00 F2 FE 20 02 B2 FE 00 02 32 FD E0 03 32 81 01 00 AC 7C E8 02 A6 81 21 00 B0 7C 13 72 14 7D 48 9A 14 7C 00 C9 D6 7D 09 9A 14 81 21 00 B4 7D 67 9A 14 CA E1 00 98 7C E9 9A 14 81 21 00 B8 7D 6B C9 D6 54 00 18 38 7C C9 9A 14 7C BF 04 2E 7D 35 FA 14 7E 95 FC 2E C2 A9 00 04 7D 20 FA 14 7C E7 C9 D6 55 6B 18 38 C0 E9 00 04 7C AB FA 14 7C CB FC 2E 7F B7 82 14 C1 45 00 04 7F 76 D2 14 7C C6 C9 D6 54 E7 18 38 7D 67 FA 14 7C 87 FC 2E
+
+. 0 1000B75C 400
+. C0 6B 00 04 3A 73 00 01 ED 86 20 28 80 01 00 C0 7D 4A C9 D6 54 C6 18 38 7D 26 FA 14 7D 26 FC 2E C1 09 00 04 ED 6A 18 28 EF A5 48 28 80 E1 00 C4 7D 08 C9 D6 55 4A 18 38 7C 6A FA 14 7C 2A FC 2E C0 43 00 04 EF 87 40 28 EC A5 48 2A 81 21 00 CC 55 08 18 38 EC C6 20 2A 7C 88 FA 14 7D A8 FC 2E C0 04 00 04 EC E7 40 2A EF 6D 08 2A 81 01 00 C8 EF 40 10 2A 81 41 00 A8 ED 4A 18 2A 7F 80 BA 14 EF 26 28 2A 7C 07 BA 14 EC 00 10 28 7C A8 BA 14 EC 4B E0 28 7C 89 BA 14 EF 0A 38 2A 7C 6A BA 14 ED AD 08 28 7F BD C1 D6 EC 2C E8 28 7E 94 CA 14 EC 85 D8 28 7E B5 8A 14 EC 67 D0 28 3A F7 00 01 ED 39 D8 2A 7F 9C C1 D6 EF C0 58 28 57 BD 18 38 EC A6 28 28 7D 3D D2 14 EC DB 30 28 ED 60 58 2A 7C 00 C1 D6 EC 1C 00 28 57 9C 18 38 7D 7C D2 14 ED 18 D0 2A EF ED 60 28 7C A5 C1 D6 EC EA 38 28 54 00 18 38 ED 5A 50 28 7D 40 D2 14 ED 8D 60 2A 7C 84 C1 D6 ED BD 68 28 54 A5 18 38 7D 05 D2 14 ED 6B E0 2A 7C 63 C1 D6 54 84 18 38 7C E4 D2 14 54 63 18 38 FC 4E 00 B2 7C C3 D2 14 FC 97 01 32 FC 77 00 F2 CA E1 00 A0 ED 8C E8 2A FC 2E 00 72 FD 33 02 72 FC D2 01 B2 FC 10 00 32 FF CF 07 B2 FC B7 01 72 FC F7 01 F2 FC 40 10 18 EE 94 C8 2A FD 13 02 32 FD 52 02 B2 FD B0 03 72 FF EF 07 F2 FC 80 20 18 FC 60 18 18 FC 20 08 18 EE B5 C0 2A
+
+. 0 1000B8EC 400
+. FD 71 02 F2 EE 94 D8 2A FC C0 30 18 FF C0 F0 18 FC 00 00 18 FF 60 10 50 FD 91 03 32 FD 20 48 18 EE B5 D0 2A FD 40 50 18 FF E0 F8 18 FC A0 28 18 FC E0 38 18 FD A0 68 18 FF A0 20 50 FF 80 18 50 FF 40 08 50 FD 00 40 18 FD 60 58 18 ED 34 48 2A EF 7B 00 28 EC 84 30 2A EC 00 F0 2A FD 80 60 18 ED 15 40 2A EF BD 28 28 EF 9C 38 28 EC A5 30 28 EC E7 50 28 EF 5A 68 28 EC 21 F8 28 EC 42 F0 28 C3 C1 00 70 EC 63 50 2A ED AD F8 2A C3 E1 00 78 EC 00 58 2A EC 89 20 2A EF 7B 58 2A ED AD 60 2A EC 21 60 2A EC 42 58 2A EF A9 E8 2A EF 88 E0 2A EC 68 18 2A ED 29 28 2A ED 08 38 2A EF 5A 60 2A EC E4 00 28 EC BD D8 28 ED 68 08 28 ED 89 10 2A EC C3 68 2A EC 84 00 2A C0 01 00 80 ED 29 10 28 C0 41 00 90 ED 08 08 2A C0 21 00 88 EC 63 68 28 EF BD D8 2A C3 61 00 68 ED 5C D0 2A EC 42 01 F2 EF 9C D0 28 EE E0 03 32 EF 5B 01 32 D0 41 01 48 ED BB 00 F2 C3 61 00 88 EF 1F 02 72 EE C1 01 72 EC 3F 02 32 EF E0 02 F2 C0 01 00 90 EF 3E 07 72 EC 5E 07 32 EF DB 02 B2 EF 60 01 B2 C0 01 00 64 EC 60 D0 FA C3 41 00 6C C0 01 00 74 EF 9A CF 3A C3 41 00 7C ED 00 C2 3A C0 01 00 84 ED 7A BA FA C2 E1 00 8C C3 41 01 48 ED 40 B2 BA C0 01 00 64 EC D7 D1 BA C3 41 00 7C EC 80 69 38 C2 E1 00 74 C1 A1 00 6C ED 9A FB 38 C0 01 00 8C ED 37 0A 78
+
+. 0 1000BA7C 80
+. C3 E1 00 84 EF AD 17 78 EC E0 D9 F8 7E 96 D5 2E EC BF F1 78 D2 BB 00 04 7E D6 92 14 7C 9D D5 2E D0 69 00 04 7F BC D5 2E D3 8B 00 04 7D 3A 05 2E D1 0A 00 04 7D 85 D5 2E D1 68 00 04 7C A4 D5 2E D1 47 00 04 7C E3 D5 2E D0 C6 00 04 42 00 FC 14
+
+. 0 1000BACC 92
+. 80 E1 00 EC 39 8C 00 01 7F 8C 78 40 81 01 00 E8 38 E7 00 08 81 21 00 E4 81 41 00 E0 39 08 00 08 81 61 00 DC 39 29 00 08 90 E1 00 EC 39 4A 00 08 80 E1 00 D8 39 6B 00 08 80 01 00 A8 38 E7 00 08 91 01 00 E8 7E F7 02 14 91 21 00 E4 91 41 00 E0 91 61 00 DC 90 E1 00 D8 40 BC E8 74
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 7, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 7, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 7, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 7, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 7, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 7, stride = 1
+. 0 10006588 16
+. 7D 28 5B 96 7C 09 59 D6 7F 88 00 00 40 9E FF F0
+
+. 0 10006584 20
+. 39 6B 00 02 7D 28 5B 96 7C 09 59 D6 7F 88 00 00 40 9E FF F0
+
+. 0 10006598 24
+. 54 E0 10 3A 7D 28 4B 78 2F 89 00 01 7D 66 01 2E 38 E7 00 01 40 9E FF DC
+
+PASS: gsl_fft_real_wavetable_alloc, n = 7, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 7
+. 0 10010FA4 76
+. 3C 00 43 30 80 FE 82 6C 90 01 00 08 3A 40 00 00 93 61 00 0C 7E 91 DB 96 C9 A7 00 00 C8 01 00 08 81 3E 82 70 FC 00 68 28 81 01 00 18 CB E9 00 00 81 21 00 30 FF FF 00 24 83 89 00 00 7E 08 8B 96 FC 20 F8 90 7F 48 DB 96 48 01 7F 09
+
+. 0 10010FF0 12
+. FF C0 08 90 FC 20 F8 90 48 01 7F 9D
+
+. 0 10010FFC 12
+. 7F 92 80 40 FF A0 08 90 40 9C 00 D8
+
+. 0 10011008 72
+. 82 FE 82 74 38 60 00 00 82 DE 82 78 56 86 08 3C 38 A0 00 00 7C 18 19 D6 C8 D7 00 00 C8 F6 00 00 38 E0 00 00 39 43 FF FF 7F 6B DB 78 54 04 18 38 2F 07 00 00 C9 36 00 00 C9 57 00 00 FD 00 48 90 FD 60 48 90 41 9A 00 14
+
+. 0 10011060 12
+. 39 00 00 00 7F 88 D8 40 40 9C 00 44
+
+. 0 1001106C 36
+. 7F 69 03 A6 7C A9 2B 78 7C 09 E9 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 18 38 7D 9F 04 AE 41 9E 00 14
+
+. 0 100110A0 12
+. FD 0B 43 3A FD 2A 4B 3A 42 00 FF CC
+
+. 0 10011074 28
+. 7C 09 E9 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 18 38 7D 9F 04 AE 41 9E 00 14
+
+. 0 10011090 28
+. FC 07 02 F2 FD A7 02 B2 FD 46 02 B8 FD 66 6A FA FD 0B 43 3A FD 2A 4B 3A 42 00 FF CC
+
+. 0 100110AC 4
+. 40 9A 08 1C
+
+. 0 100110B0 24
+. 7D 24 CD AE 38 E7 00 01 39 6B FF FF 7F 8B 38 40 7D 4A 32 14 40 9C FF 74
+
+. 0 10011038 24
+. 2F 07 00 00 C9 36 00 00 C9 57 00 00 FD 00 48 90 FD 60 48 90 41 9A 00 14
+
+. 0 10011050 28
+. FC 07 07 72 FD A6 07 72 FC C6 07 BA FC E7 6F B8 39 00 00 00 7F 88 D8 40 40 9C 00 44
+
+. 0 100118C8 8
+. 7F 8B 38 40 40 9D 00 24
+
+. 0 100118D0 32
+. 55 49 18 38 7C 18 51 D6 39 29 00 08 7D 29 C1 D6 54 00 18 38 7D 39 05 AE 7D 09 CD AE 4B FF F7 C8
+
+. 0 100110B4 20
+. 38 E7 00 01 39 6B FF FF 7F 8B 38 40 7D 4A 32 14 40 9C FF 74
+
+. 0 100110C8 20
+. 3A 52 00 01 7C A5 A2 14 7F 92 80 40 7C 63 8A 14 41 9C FF 44
+
+. 0 100110DC 8
+. 2F 94 00 01 41 9E 06 D4
+
+PASS: gsl_fft_real with signal_real_noise, n = 7, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 7, stride = 1
+. 0 1000CA58 76
+. 3C 00 43 30 81 1E 81 84 90 01 00 08 3A 40 00 00 93 E1 00 0C 81 21 00 20 C9 A8 00 00 C8 01 00 08 7D C9 FB 96 81 3E 81 88 FC 00 68 28 81 41 00 34 CB E9 00 00 81 61 00 18 FF FF 00 24 82 0A 00 00 7E 6B FB 96 FC 20 F8 90 48 01 C4 55
+
+. 0 1000CAA4 12
+. FF C0 08 90 FC 20 F8 90 48 01 C4 E9
+
+. 0 1000CAB0 12
+. 7F 92 70 40 FF A0 08 90 40 9C 00 DC
+
+. 0 1000CABC 40
+. 7E BF D9 D6 82 DE 81 8C 80 BE 81 90 3B 20 00 00 3B 00 00 00 38 60 00 00 C9 16 00 00 7F 83 F8 40 C9 25 00 00 40 9C 00 A0
+
+. 0 1000CAE4 40
+. 7C 18 D9 D6 7F 26 CB 78 7C 1C 01 D6 7C 95 91 D6 54 07 18 38 2F 83 00 00 C9 85 00 00 C9 76 00 00 FD 40 60 90 41 9E 00 18
+
+. 0 1000CB20 28
+. 39 00 00 00 39 64 FF FF 57 6A 08 3C 2F 88 00 00 7C 08 F8 50 7F 00 40 00 40 9E 04 8C
+
+. 0 1000CB3C 40
+. C8 05 00 00 7D A7 EC AE FC 0A 00 32 FC 0B 03 78 FD 8C 00 2A 39 08 00 01 7D 6B 52 14 7C 08 F8 50 7F 80 40 40 40 9C FF CC
+
+. 0 1000CB2C 16
+. 2F 88 00 00 7C 08 F8 50 7F 00 40 00 40 9E 04 8C
+
+. 0 1000CFC4 28
+. FD A9 02 B2 55 69 18 38 39 29 00 08 FC 09 02 F2 FD 68 6A F8 FD 48 02 BA 41 9A 00 28
+
+. 0 1000CFE0 36
+. 7D 29 E1 D6 7C 1C 59 D6 7C 09 EC AE FC 0A 00 32 54 00 18 38 7D BD 04 AE FC 0B 03 78 FC 00 00 2A 4B FF FB 4C
+
+. 0 1000CB4C 24
+. FD 8C 00 2A 39 08 00 01 7D 6B 52 14 7C 08 F8 50 7F 80 40 40 40 9C FF CC
+
+. 0 1000CB64 28
+. 7C 17 31 D6 38 63 00 01 7F 83 F8 40 7C C6 9A 14 54 00 18 38 7D 9A 05 AE 41 9C FF 7C
+
+. 0 1000CAF8 20
+. 2F 83 00 00 C9 85 00 00 C9 76 00 00 FD 40 60 90 41 9E 00 18
+
+. 0 1000CB0C 48
+. FC 09 07 B2 FD A9 07 72 FC 08 07 7A FD 08 6F B8 FD 20 00 90 39 00 00 00 39 64 FF FF 57 6A 08 3C 2F 88 00 00 7C 08 F8 50 7F 00 40 00 40 9E 04 8C
+
+. 0 1000CB80 20
+. 3A 52 00 01 7F 18 FA 14 7F 92 70 40 7F 39 DA 14 41 9C FF 40
+
+. 0 1000CB94 8
+. 2F 9B 00 01 41 9E 03 18
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 7, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 7, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 7
+. 0 10012F94 76
+. 3C 00 43 30 80 FE 82 C8 90 01 00 08 3A 40 00 00 93 61 00 0C 7E 91 DB 96 C9 A7 00 00 C8 01 00 08 81 3E 82 CC FC 00 68 28 81 01 00 18 CB E9 00 00 81 21 00 30 FF FF 00 24 83 89 00 00 7E 08 8B 96 FC 20 F8 90 7F 48 DB 96 48 01 5F 19
+
+. 0 10012FE0 12
+. FF C0 08 18 FC 20 F8 90 48 01 5F AD
+
+. 0 10012FEC 12
+. 7F 92 80 40 FF A0 08 18 40 9C 00 D8
+
+. 0 10012FF8 72
+. 82 FE 82 D0 38 60 00 00 82 DE 82 D4 56 86 08 3C 38 A0 00 00 7C 18 19 D6 C0 D7 00 00 C0 F6 00 00 38 E0 00 00 39 43 FF FF 7F 6B DB 78 54 04 10 3A 2F 07 00 00 C1 36 00 00 C1 57 00 00 FD 00 48 90 FD 60 48 90 41 9A 00 14
+
+. 0 10013050 12
+. 39 00 00 00 7F 88 D8 40 40 9C 00 44
+
+. 0 1001305C 36
+. 7F 69 03 A6 7C A9 2B 78 7C 09 F9 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 10 3A 7D 9D 04 2E 41 9E 00 14
+
+. 0 10013090 12
+. ED 0B 43 3A ED 2A 4B 3A 42 00 FF CC
+
+. 0 10013064 28
+. 7C 09 F9 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 10 3A 7D 9D 04 2E 41 9E 00 14
+
+. 0 10013080 28
+. EC 07 02 F2 ED A7 02 B2 ED 46 02 B8 ED 66 6A FA ED 0B 43 3A ED 2A 4B 3A 42 00 FF CC
+
+. 0 1001309C 4
+. 40 9A 08 58
+
+. 0 100130A0 24
+. 7D 24 CD 2E 38 E7 00 01 39 6B FF FF 7F 8B 38 40 7D 4A 32 14 40 9C FF 74
+
+. 0 10013028 24
+. 2F 07 00 00 C1 36 00 00 C1 57 00 00 FD 00 48 90 FD 60 48 90 41 9A 00 14
+
+. 0 10013040 28
+. EC 07 07 72 ED A6 07 72 EC C6 07 BA EC E7 6F B8 39 00 00 00 7F 88 D8 40 40 9C 00 44
+
+. 0 100138F4 8
+. 7F 8B 38 40 40 9D 00 24
+
+. 0 100138FC 32
+. 55 49 10 3A 7C 18 51 D6 39 29 00 04 7D 29 C1 D6 54 00 10 3A 7D 39 05 2E 7D 09 CD 2E 4B FF F7 8C
+
+. 0 100130A4 20
+. 38 E7 00 01 39 6B FF FF 7F 8B 38 40 7D 4A 32 14 40 9C FF 74
+
+. 0 100130B8 20
+. 3A 52 00 01 7C A5 A2 14 7F 92 80 40 7C 63 8A 14 41 9C FF 44
+
+. 0 100130CC 8
+. 2F 94 00 01 41 9E 07 10
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 7, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 7, stride = 1
+. 0 1000EC20 76
+. 3C 00 43 30 80 DE 81 FC 90 01 00 08 3A 40 00 00 93 A1 00 0C C9 A6 00 00 C8 01 00 08 81 3E 82 00 FC 00 68 28 80 E1 00 20 CB E9 00 00 7D C7 EB 96 81 01 00 34 FF FF 00 24 81 21 00 18 82 08 00 00 7E 69 EB 96 FC 20 F8 90 48 01 A2 8D
+
+. 0 1000EC6C 12
+. FF C0 08 18 FC 20 F8 90 48 01 A3 21
+
+. 0 1000EC78 12
+. 7F 92 70 40 FF A0 08 18 40 9C 00 DC
+
+. 0 1000EC84 40
+. 7E BD D9 D6 82 DE 82 04 80 BE 82 08 3B 20 00 00 3B 00 00 00 38 60 00 00 C1 16 00 00 7F 83 E8 40 C1 25 00 00 40 9C 00 A0
+
+. 0 1000ECAC 40
+. 7C 18 D9 D6 7F 26 CB 78 7C 1C 01 D6 7C 95 91 D6 54 07 10 3A 2F 83 00 00 C1 85 00 00 C1 76 00 00 FD 40 60 90 41 9E 00 18
+
+. 0 1000ECE8 28
+. 39 00 00 00 39 64 FF FF 57 6A 08 3C 2F 88 00 00 7C 08 E8 50 7F 00 40 00 40 9E 04 90
+
+. 0 1000ED04 40
+. C0 05 00 00 7D A7 FC 2E EC 0A 00 32 EC 0B 03 78 ED 8C 00 2A 39 08 00 01 7D 6B 52 14 7C 08 E8 50 7F 80 40 40 40 9C FF CC
+
+. 0 1000ECF4 16
+. 2F 88 00 00 7C 08 E8 50 7F 00 40 00 40 9E 04 90
+
+. 0 1000F190 28
+. EC 09 02 B2 55 69 10 3A ED A9 02 F2 39 29 00 04 ED 68 02 F8 ED 48 6A BA 41 9A 00 28
+
+. 0 1000F1AC 36
+. 7D 29 E1 D6 7C 1C 59 D6 7C 09 FC 2E EC 0A 00 32 54 00 10 3A 7D BF 04 2E EC 0B 03 78 EC 00 00 2A 4B FF FB 48
+
+. 0 1000ED14 24
+. ED 8C 00 2A 39 08 00 01 7D 6B 52 14 7C 08 E8 50 7F 80 40 40 40 9C FF CC
+
+. 0 1000ED2C 28
+. 7C 17 31 D6 38 63 00 01 7F 83 E8 40 7C C6 9A 14 54 00 10 3A 7D 9A 05 2E 41 9C FF 7C
+
+. 0 1000ECC0 20
+. 2F 83 00 00 C1 85 00 00 C1 76 00 00 FD 40 60 90 41 9E 00 18
+
+. 0 1000ECD4 48
+. EC 09 07 B2 ED A9 07 72 EC 08 07 7A ED 08 6F B8 FD 20 00 90 39 00 00 00 39 64 FF FF 57 6A 08 3C 2F 88 00 00 7C 08 E8 50 7F 00 40 00 40 9E 04 90
+
+. 0 1000ED48 20
+. 3A 52 00 01 7F 18 EA 14 7F 92 70 40 7F 39 DA 14 41 9C FF 40
+
+. 0 1000ED5C 8
+. 2F 9B 00 01 41 9E 03 1C
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 7, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 7, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 7
+PASS: gsl_fft_complex_forward with signal_noise, n = 7, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 7, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 7, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 7, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 7, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 7, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 7, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 7, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 7, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 7, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 7
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 7, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 7, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 7, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 7, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 7, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 7, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 7, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 7, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 7, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 7, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 7
+PASS: gsl_fft_real with signal_real_noise, n = 7, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 7, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 7, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 7, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 7
+PASS: gsl_fft_real_float with signal_real_noise, n = 7, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 7, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 7, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 7, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 7
+PASS: gsl_fft_complex_forward with signal_noise, n = 7, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 7, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 7, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 7, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 7, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 7, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 7, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 7, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 7, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 7, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 7
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 7, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 7, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 7, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 7, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 7, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 7, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 7, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 7, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 7, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 7, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 7
+PASS: gsl_fft_real with signal_real_noise, n = 7, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 7, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 7, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 7, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 7
+PASS: gsl_fft_real_float with signal_real_noise, n = 7, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 7, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 7, stride = 3
+. 0 100069AC 60
+. 7F FF CA 14 92 81 00 08 7C 1F BB 96 3B 9C 00 01 7F BA C2 14 3A B5 00 01 3B 7B 00 10 7C 00 B9 D6 7F E0 F8 50 93 E1 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 02 25 11
+
+. 0 1000696C 32
+. 82 50 00 08 3A 60 00 01 7D D1 73 78 93 70 01 08 7D CE 91 D6 7F 93 90 40 7E D7 73 96 40 9C 00 8C
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 8, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 8
+. 0 10007BDC 8
+. 2F 8E 00 00 41 BE FE 48
+
+. 0 10007BE4 4
+. 40 9A 00 20
+
+. 0 10007BE8 28
+. CB 11 FF F0 CA F1 FF F8 CB 6C FF F0 CB 2C FF F8 CB 92 FF F0 CB 52 FF F8 4B FF FE 48
+
+. 0 10007A48 8
+. 2F 9C 00 00 41 9E 01 74
+
+. 0 10006F34 40
+. 81 61 01 88 80 01 00 24 82 EB FF 00 81 81 00 28 7C 00 B9 D6 80 C1 00 18 2F 8C 00 00 90 01 00 24 7D E6 03 96 40 9E 06 F4
+
+. 0 1000764C 32
+. 2F 97 00 02 38 E0 00 00 83 E1 00 2C 3B 20 00 01 83 41 00 10 83 01 00 14 90 E1 00 28 40 9E F9 10
+
+. 0 100076D8 132
+. 81 61 01 80 7D 28 FA 14 C9 29 00 08 7C 84 CA 14 7C 05 5A 14 7D 88 FC AE 7C 00 C9 D6 7D 6A D2 14 38 A5 00 01 7D 08 EA 14 54 00 20 36 7D 20 FA 14 7D 1F 04 AE C8 E9 00 08 7C 07 32 14 FC 0C 40 28 7C 00 C1 D6 FD A9 38 28 38 E7 00 01 FD 8C 40 2A FD 66 00 32 54 00 20 36 FD 46 03 72 7D 20 D2 14 7D 8A D5 AE 7D 4A 1A 14 FD A5 5B 7A FC 05 50 38 FD 29 38 2A D9 2B 00 08 7C 1A 05 AE D9 A9 00 08 42 00 FF 80
+
+. 0 100075A0 172
+. 38 60 00 00 80 01 02 84 81 81 01 A4 81 C1 01 A8 7C 08 03 A6 81 E1 01 AC 7D 81 81 20 82 01 01 B0 82 21 01 B4 82 41 01 B8 82 61 01 BC 82 81 01 C0 82 A1 01 C4 82 C1 01 C8 82 E1 01 CC 83 01 01 D0 83 21 01 D4 83 41 01 D8 83 61 01 DC 83 81 01 E0 83 A1 01 E4 83 C1 01 E8 83 E1 01 EC C9 C1 01 F0 C9 E1 01 F8 CA 01 02 00 CA 21 02 08 CA 41 02 10 CA 61 02 18 CA 81 02 20 CA A1 02 28 CA C1 02 30 CA E1 02 38 CB 01 02 40 CB 21 02 48 CB 41 02 50 CB 61 02 58 CB 81 02 60 CB A1 02 68 CB C1 02 70 CB E1 02 78 38 21 02 80 4E 80 00 20
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 8, stride = 1
+. 0 10007C04 40
+. C8 11 FF F8 C9 AC FF F8 C9 92 FF F8 FE E0 00 50 FF 20 68 50 CB 11 FF F0 FF 40 60 50 CB 6C FF F0 CB 92 FF F0 4B FF FE 20
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 8, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 8, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 8, stride = 1
+. 0 100097D4 60
+. 7F FF CA 14 92 81 00 08 7C 1F BB 96 3B 9C 00 01 7F BA C2 14 3A B5 00 01 3B 7B 00 08 7C 00 B9 D6 7F E0 F8 50 93 E1 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 F6 E9
+
+. 0 10009794 32
+. 82 50 00 08 3A 60 00 01 7D D1 73 78 93 70 01 08 7D CE 91 D6 7F 93 90 40 7E D7 73 96 40 9C 00 94
+
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 8, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 8
+. 0 1000AA28 8
+. 2F 8E 00 00 41 BE FE 44
+
+. 0 1000AA30 4
+. 40 9A 00 20
+
+. 0 1000AA34 28
+. C3 11 FF F8 C2 F1 FF FC C3 72 FF F8 C3 52 FF FC C3 8C FF F8 C3 2C FF FC 4B FF FE 44
+
+. 0 1000A890 8
+. 2F 9C 00 00 41 9E 01 78
+
+. 0 10009D5C 40
+. 81 41 01 44 81 81 00 24 82 EA FF 00 81 61 00 28 7D 8C B9 D6 80 01 00 18 2F 8B 00 00 91 81 00 24 7D E0 63 96 40 9E 06 EC
+
+. 0 1000A46C 32
+. 2F 97 00 02 38 E0 00 00 83 E1 00 2C 3B 20 00 01 83 41 00 10 83 01 00 14 90 E1 00 28 40 9E F9 18
+
+. 0 1000A4F8 132
+. 81 61 01 3C 7D 28 FA 14 C1 29 00 04 7C 84 CA 14 7C 05 5A 14 7D 48 FC 2E 7C 00 C9 D6 38 A5 00 01 7D 08 EA 14 54 00 18 38 7D 20 FA 14 7D 1F 04 2E C0 E9 00 04 7C 07 32 14 EC 0A 40 28 7C 00 C1 D6 ED A9 38 28 7D 2A D2 14 ED 4A 40 2A 38 E7 00 01 ED 86 00 32 ED 66 03 72 54 00 18 38 ED 29 38 2A 7D 60 D2 14 ED A5 63 7A 7D 4A D5 2E EC 05 58 38 7D 4A 1A 14 D1 29 00 04 7C 1A 05 2E D1 AB 00 04 42 00 FF 80
+
+. 0 1000A3C0 172
+. 38 60 00 00 80 01 02 34 81 81 01 54 81 C1 01 58 7C 08 03 A6 81 E1 01 5C 7D 81 81 20 82 01 01 60 82 21 01 64 82 41 01 68 82 61 01 6C 82 81 01 70 82 A1 01 74 82 C1 01 78 82 E1 01 7C 83 01 01 80 83 21 01 84 83 41 01 88 83 61 01 8C 83 81 01 90 83 A1 01 94 83 C1 01 98 83 E1 01 9C C9 C1 01 A0 C9 E1 01 A8 CA 01 01 B0 CA 21 01 B8 CA 41 01 C0 CA 61 01 C8 CA 81 01 D0 CA A1 01 D8 CA C1 01 E0 CA E1 01 E8 CB 01 01 F0 CB 21 01 F8 CB 41 02 00 CB 61 02 08 CB 81 02 10 CB A1 02 18 CB C1 02 20 CB E1 02 28 38 21 02 30 4E 80 00 20
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 8, stride = 1
+. 0 1000AA50 40
+. C0 11 FF FC C1 B2 FF FC C1 8C FF FC FE E0 00 50 FF 40 68 50 C3 11 FF F8 FF 20 60 50 C3 72 FF F8 C3 8C FF F8 4B FF FE 1C
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 8, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 8, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 8, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 8, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 8
+. 0 10012184 160
+. 7C 06 72 14 7D 44 3A 14 7D 20 72 14 7C 1D 01 D6 7D 69 72 14 54 E8 18 38 39 08 00 08 54 00 18 38 7D 3D 49 D6 7D BF 04 AE 7D 7D 59 D6 55 29 18 38 7D 29 FC AE 7C 1D 31 D6 55 6B 18 38 7D 6B FC AE 7C C6 D2 14 FD 4D 58 28 54 00 18 38 FD AD 58 2A 7D 9F 04 AE 7D 38 39 D6 FD 40 50 50 7C E7 8A 14 FC 0C 48 2A FD 8C 48 28 7C 18 29 D6 55 29 18 38 FD 60 68 28 7C A5 8A 14 FC 00 68 2A 7D 58 51 D6 54 00 18 38 7C 19 05 AE 7D 89 CD AE 7D 08 C1 D6 55 4A 18 38 7D 48 CD AE 7D 6A CD AE 42 00 FF 64
+
+. 0 10011ABC 8
+. 2F 9A 00 00 41 BE FC F4
+
+. 0 10011AC4 88
+. 38 E5 FF FF 7F 49 03 A6 7C E8 3B 78 7C 1D 41 D6 81 81 00 2C 54 EA 18 38 7D 68 62 14 39 4A 00 08 7D 38 39 D6 54 00 18 38 7C 1F 04 AE 7D 08 2A 14 7C E7 8A 14 7D 7D 59 D6 55 29 18 38 7C 09 CD AE 55 6B 18 38 7D 4A C1 D6 7C 0B FC AE FC 00 00 50 7C 0A CD AE 42 00 FF B8
+
+. 0 10011B1C 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 F4 18
+
+. 0 10011B38 4
+. 4B FF FC 98
+
+PASS: gsl_fft_real with signal_real_noise, n = 8, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 8, stride = 1
+. 0 1000D6D4 20
+. 39 FB 00 01 3A 40 00 01 55 E0 F8 7E 2B 80 00 01 40 9D 01 F0
+
+. 0 1000D8D4 8
+. 73 66 00 01 40 A2 F5 D8
+
+. 0 1000D8DC 8
+. 2F 91 00 00 41 BE F5 D0
+
+. 0 1000D8E4 208
+. 80 FE 81 6C 38 9B FF FF 7E 29 03 A6 57 63 08 3C C9 27 00 00 7C 85 23 78 57 7F 10 3A 7D 23 2A 14 54 AB 18 38 55 2A 18 38 7C 1C 29 D6 39 6B 00 08 81 81 00 30 39 4A 00 08 7D 04 62 14 7D 3C 49 D6 54 00 18 38 7D 9D 04 AE 7C E8 62 14 7C C7 62 14 7C A5 FA 14 7D 6B E1 D6 55 29 18 38 7D A9 EC AE FD 4C 68 28 7D 4A E1 D6 7D 6B EC AE FD 8C 68 2A FD 4A 02 72 7C 0A EC AE 7C 17 21 D6 FD 8C 60 2A 7C 84 DA 14 FD AB 00 2A FC 00 58 28 7D 17 41 D6 54 00 18 38 FD AD 02 72 7D 9A 05 AE FC 00 00 2A 7C F7 39 D6 55 08 18 38 FD 6A 68 2A FD 4A 68 28 7C D7 31 D6 FD 60 58 50 54 E7 18 38 7D 48 D5 AE 7C 07 D5 AE 54 C6 18 38 7D 66 D5 AE 42 00 FF 50
+
+. 0 1000D9B4 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 F0 24
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 8, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 8, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 8
+. 0 100141E0 160
+. 7C 06 72 14 7D 44 3A 14 7D 20 72 14 7C 1F 01 D6 7D 69 72 14 54 E8 10 3A 39 08 00 04 54 00 10 3A 7D 3F 49 D6 7D BD 04 2E 7D 7F 59 D6 55 29 10 3A 7D 29 EC 2E 7C 1F 31 D6 55 6B 10 3A 7D 6B EC 2E 7C C6 D2 14 ED 4D 58 28 54 00 10 3A ED AD 58 2A 7D 9D 04 2E 7D 38 39 D6 FD 40 50 50 7C E7 8A 14 EC 0C 48 2A ED 8C 48 28 7C 18 29 D6 55 29 10 3A ED 60 68 28 7C A5 8A 14 EC 00 68 2A 7D 58 51 D6 54 00 10 3A 7C 19 05 2E 7D 89 CD 2E 7D 08 C1 D6 55 4A 10 3A 7D 48 CD 2E 7D 6A CD 2E 42 00 FF 64
+
+. 0 10013AE8 8
+. 2F 9A 00 00 41 BE FC F4
+
+. 0 10013AF0 88
+. 38 E4 FF FF 7F 49 03 A6 7C E8 3B 78 7C 1F 41 D6 81 81 00 2C 54 EA 10 3A 7D 68 62 14 39 4A 00 04 7D 38 39 D6 54 00 10 3A 7C 1D 04 2E 7D 08 22 14 7C E7 8A 14 7D 7F 59 D6 55 29 10 3A 7C 09 CD 2E 55 6B 10 3A 7D 4A C1 D6 7C 0B EC 2E FC 00 00 50 7C 0A CD 2E 42 00 FF B8
+
+. 0 10013B48 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 F3 DC
+
+. 0 10013B64 4
+. 4B FF FC 98
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 8, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 8, stride = 1
+. 0 1000F8B8 20
+. 39 FB 00 01 3A 40 00 01 55 E0 F8 7E 2B 80 00 01 40 9D 01 F0
+
+. 0 1000FAB8 8
+. 73 66 00 01 40 A2 F5 C0
+
+. 0 1000FAC0 8
+. 2F 91 00 00 41 BE F5 B8
+
+. 0 1000FAC8 216
+. 80 FE 81 E4 38 9B FF FF 7E 29 03 A6 57 63 08 3C C9 27 00 00 7C 85 23 78 57 7D 10 3A 7D 23 2A 14 54 AB 10 3A 55 2A 10 3A 7C 1C 29 D6 39 6B 00 04 81 81 00 30 39 4A 00 04 7D 04 62 14 7D 3C 49 D6 54 00 10 3A 7D 5F 04 2E 7C E8 62 14 7C C7 62 14 7C A5 EA 14 7D 6B E1 D6 55 29 10 3A 7C 09 FC 2E ED AA 00 28 7D 4A E1 D6 7D 6B FC 2E ED 4A 00 2A 7D 8A FC 2E 7C 17 21 D6 FD AD 02 72 7C 84 DA 14 EC 0B 60 2A ED 8C 58 28 7D 17 41 D6 FD A0 68 18 54 00 10 3A ED 4A 50 2A 7C F7 39 D6 ED 8C 60 2A FC 00 02 72 55 08 10 3A 7D 5A 05 2E 7C D7 31 D6 54 E7 10 3A FC 00 00 18 ED 6D 00 2A 54 C6 10 3A ED AD 00 28 FD 60 58 50 7D A8 D5 2E 7D 87 D5 2E 7D 66 D5 2E 42 00 FF 48
+
+. 0 1000FBA0 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 F0 00
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 8, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 8, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 8
+PASS: gsl_fft_complex_forward with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 8, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 8, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 8, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 8, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 8, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 8, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 8, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 8
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 8, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 8, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 8, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 8, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 8, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 8, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 8, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 8
+PASS: gsl_fft_real with signal_real_noise, n = 8, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 8, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 8, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 8, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 8
+PASS: gsl_fft_real_float with signal_real_noise, n = 8, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 8, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 8, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 8, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 8
+PASS: gsl_fft_complex_forward with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 8, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 8, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 8, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 8, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 8, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 8, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 8, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 8
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 8, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 8, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 8, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 8, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 8, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 8, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 8, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 8
+PASS: gsl_fft_real with signal_real_noise, n = 8, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 8, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 8, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 8, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 8
+PASS: gsl_fft_real_float with signal_real_noise, n = 8, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 8, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 8, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 9, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 9
+. 0 10007974 8
+. 2F 92 00 00 41 BE FE A4
+
+. 0 1000797C 4
+. 40 92 00 18
+
+. 0 10007980 20
+. CB CC FF F0 CB AC FF F8 C8 35 FF F0 CB F5 FF F8 4B FF FE 9C
+
+. 0 1000782C 8
+. 2F 83 00 00 41 9E 01 2C
+
+. 0 1000786C 240
+. 7C 04 A2 14 7D 76 22 14 7C 00 C9 D6 7D 26 FA 14 C8 E9 00 08 7D 07 D2 14 7D 06 FC AE 38 84 00 01 7F BD CA 14 7D 6B C9 D6 54 00 20 36 7D 40 FA 14 7D BF 04 AE C8 0A 00 08 7C 05 1A 14 7C C6 DA 14 55 6B 20 36 7C 00 C1 D6 7D 2B FA 14 7D 8B FC AE C9 69 00 08 7D 37 2A 14 FC 8D 60 2A 38 A5 00 01 FC 60 58 2A 7D 29 C1 D6 FD AD 60 28 54 00 20 36 FC 00 58 28 7D 60 D2 14 FD 44 07 32 55 29 20 36 FD 83 07 32 7D 49 D2 14 FD A2 03 72 FC 02 00 32 FD 48 50 28 FD 87 60 28 FD 08 20 2A FD 6A 00 2A FD 2C 68 28 FD 4A 00 28 7D 07 D5 AE FD 8C 68 2A 7C E7 E2 14 FC BF 02 F2 FC 1D 02 B2 FD BD 03 32 FC DF 02 72 FD 9E 03 3A FD 21 2A 7A FD 5E 6A B8 FD 61 32 F8 FC E7 18 2A D8 E8 00 08 7D 5A 05 AE D9 8B 00 08 7D 69 D5 AE D9 2A 00 08 42 00 FF 14
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 9, stride = 1
+. 0 10007994 28
+. C8 0C FF F8 C9 B5 FF F8 FF A0 00 50 CB CC FF F0 FF E0 68 50 C8 35 FF F0 4B FF FE 80
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 9, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 9, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 9, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 9, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 9, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 9, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 9
+. 0 1000A7B0 8
+. 2F 92 00 00 41 BE FE 88
+
+. 0 1000A7B8 4
+. 40 92 00 18
+
+. 0 1000A7BC 20
+. C3 AC FF F8 C3 8C FF FC C3 F5 FF F8 C3 D5 FF FC 4B FF FE 80
+
+. 0 1000A64C 8
+. 2F 83 00 00 41 9E 01 48
+
+. 0 1000A690 264
+. 7C 04 9A 14 7D 76 22 14 7C 00 C9 D6 7D 26 FA 14 C0 A9 00 04 38 84 00 01 7C C6 FC 2E 7F BD CA 14 FD 80 28 90 7C C6 DA 14 7D 6B C9 D6 54 00 18 38 7D 40 FA 14 7D 3F 04 2E C1 4A 00 04 FD 60 30 90 7C 05 1A 14 55 6B 18 38 7C 00 C1 D6 7D 2B FA 14 7D 0B FC 2E C0 E9 00 04 7D 77 2A 14 EC 89 40 2A 7D 27 D2 14 EC 6A 38 2A 7D 6B C1 D6 ED 29 40 28 54 00 18 38 FC 00 20 90 7D 40 D2 14 FD A0 18 90 38 A5 00 01 ED 4A 38 28 55 6B 18 38 FC 00 00 72 7D 0B D2 14 FD AD 00 72 FD 6B 00 28 ED 22 02 72 FD 8C 68 28 ED 42 02 B2 FD 60 58 18 FD 80 60 18 EC C6 20 2A EC 0B 50 2A ED AC 48 28 ED 6B 50 28 7C C7 D5 2E ED 8C 48 2A 7C E7 E2 14 ED 3E 00 32 ED 5C 02 F2 ED 1C 03 32 EC FE 03 72 EC A5 18 2A ED 9D 53 3A ED BF 4B 7A ED 7D 42 F8 D0 A9 00 04 EC 1F 38 38 7D 7A 05 2E D1 8A 00 04 7C 0B D5 2E D1 A8 00 04 42 00 FE FC
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 9, stride = 1
+. 0 1000A7D0 28
+. C0 0C FF FC C1 B5 FF FC FF 80 00 50 C3 AC FF F8 FF C0 68 50 C3 F5 FF F8 4B FF FE 64
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 9, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 9, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 9, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 9, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 9, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 9, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 9
+. 0 10011ECC 20
+. 38 1B 00 01 3A C0 00 01 54 00 F8 7E 2B 80 00 01 40 9D 01 A4
+
+. 0 10011EE0 64
+. 81 9E 82 50 3B 57 00 10 80 FE 82 54 3B 9C 00 10 7C 14 03 78 57 77 08 3C CB CC 00 00 CB A7 00 00 2F 95 00 00 C8 1C FF F8 C9 BA FF F8 FC 80 00 50 C8 BC FF F0 FC 40 68 50 C8 7A FF F0 41 9E 01 50
+
+. 0 10011F20 332
+. 56 CB 08 3C 7D 36 D8 50 55 29 08 3C 38 AB FF FF 38 89 FF FF FC 20 F0 90 FF E0 E8 90 7C A6 2B 78 7E A9 03 A6 7D 26 1A 14 7C F7 2A 14 7C 09 1A 14 7D 7D 49 D6 54 0A 18 38 55 29 18 38 39 29 00 08 7C 1D 01 D6 39 4A 00 08 55 6B 18 38 7D 8B FC AE 54 CB 18 38 39 6B 00 08 7D 29 E9 D6 54 00 18 38 7D 7F 04 AE FD 04 03 32 54 E8 18 38 FC E2 02 F2 39 08 00 08 7D 4A E9 D6 7D A9 FC AE 54 A9 18 38 FD 44 03 72 39 29 00 08 7C 0A FC AE FD A5 43 7A 7D 6B E9 D6 FD 22 00 32 FC 03 38 3A 7C 1D 31 D6 7C EB FC AE FD 63 4A F8 54 8B 18 38 39 6B 00 08 FD 85 53 38 7C C6 DA 14 54 00 18 38 7D 58 21 D6 FC CD 00 2A 7C 84 8A 14 FD AD 00 28 FD 4C 58 28 FD 26 00 72 7C F8 39 D6 55 4A 18 38 FD 8C 58 2A 7D 7F 04 AE FD 4A 07 F2 7C 18 29 D6 54 E7 18 38 FC 0C 00 72 7C A5 8A 14 FD 27 48 28 FD AD 07 F2 7D 29 C1 D6 54 00 18 38 FC 0B 00 28 FD 09 50 28 7D 08 C1 D6 FD 6B 60 2A FC E7 30 2A FD 80 68 2A FD 00 40 50 7D 79 05 AE 7D 6B C1 D6 FC 00 68 28 FD 29 50 2A 7C E9 CD AE 7C 07 CD AE 7D 28 CD AE 7D 8A CD AE 7D 0B CD AE 42 00 FE DC
+
+. 0 1001206C 20
+. 3A D6 00 01 3B 9C 00 10 7F 94 B0 40 3B 5A 00 10 41 9D FE 84
+
+. 0 10012080 8
+. 73 68 00 01 40 A2 F7 30
+
+PASS: gsl_fft_real with signal_real_noise, n = 9, stride = 1
+. 0 1000C6B4 68
+. 3E A0 43 30 FF C0 E0 90 3B F0 FF FF 7F 9C DA 14 92 A1 00 08 7C 1C BB 96 7F B9 C2 14 3A D6 00 01 3B 5A 00 10 7C 00 B9 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 C8 01
+
+. 0 1000C6F8 16
+. 7C 39 C5 AE FC 20 F8 90 3B 39 00 10 48 01 C8 91
+
+. 0 1000C708 12
+. 37 FF FF FF D8 3D 00 08 40 82 FF B0
+
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 9, stride = 1
+. 0 1000D390 48
+. 80 DE 81 68 7C 13 03 78 3B 18 00 10 3A D0 00 10 57 79 08 3C CB 86 00 00 2F 94 00 00 C8 36 FF F0 CB F6 FF F8 C8 78 FF F0 C8 58 FF F8 41 9E 01 54
+
+. 0 1000D3C0 336
+. 56 A4 08 3C FF A0 E0 90 38 A4 FF FF 38 60 00 00 7E 89 03 A6 1D 03 00 03 7C C5 FA 14 7C 63 DA 14 7D 64 40 50 7D 08 22 14 7D 6B CA 14 39 08 FF FF 39 6B FF FF 7C 19 42 14 55 6A 18 38 54 09 18 38 39 4A 00 08 39 29 00 08 7D 4A E1 D6 55 07 18 38 38 E7 00 08 7D 29 E1 D6 7D 4A EC AE 54 CA 18 38 FD 40 50 50 39 4A 00 08 7C 1C 01 D6 7D 69 EC AE 54 A9 18 38 FC 8B 50 2A 39 29 00 08 FD 6B 50 28 7D 7C 59 D6 54 00 18 38 7D 1D 04 AE FD A4 07 72 FD 6B 07 B2 55 6B 18 38 7C E7 E1 D6 7C 0B EC AE FD 88 00 28 7D 1C 41 D6 FD 08 00 2A 7C C7 EC AE 7C E6 FA 14 FD 8C 07 B2 54 EB 18 38 39 6B 00 08 FD A6 68 28 55 08 18 38 FC 08 07 72 7D 28 EC AE 7C 17 29 D6 7C A5 DA 14 FD 4D 60 28 FD AD 60 2A FC 09 00 28 FC A3 02 B2 7C D7 31 D6 54 00 18 38 FC E1 03 72 FD 42 02 B2 7C F7 39 D6 54 C6 18 38 FD BF 03 72 FD 80 58 2A 7D 29 B9 D6 FC 00 58 28 54 E7 18 38 FD 29 40 2A FC A2 2B 3A 7D 4A B9 D6 FC FF 38 3A 7D 3A 05 AE FC 01 68 38 7D 6B B9 D6 FD 83 53 38 FC C6 20 2A 7C C9 D5 AE 7C 06 D5 AE 7C EA D5 AE 7D 87 D5 AE 7C AB D5 AE 42 00 FE C8
+
+. 0 1000D510 20
+. 3A B5 00 01 3A D6 00 10 7F 93 A8 40 3B 18 00 10 41 9D FE 88
+
+. 0 1000D2F4 128
+. 1C 07 00 03 7D 07 FA 14 7D 48 FA 14 7D 17 41 D6 7D 26 02 14 39 29 FF FF 55 2B 18 38 39 6B 00 08 7C 1C 01 D6 55 08 18 38 7D 6B E1 D6 54 00 18 38 7D 7D 04 AE 7D 3C 49 D6 7D AB EC AE FD A9 03 72 55 29 18 38 7C 17 39 D6 7C 09 EC AE 7C E7 DA 14 FD 8B 00 28 FC 00 00 2A 7D 57 51 D6 54 00 18 38 FD 4C 68 2A FD 6B 00 2A FD 8C 68 28 55 4A 18 38 7D 7A 05 AE 7D 88 D5 AE 7D 4A D5 AE 42 00 FF 84
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 9, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 9, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 9
+. 0 10013F08 20
+. 38 1B 00 01 3A C0 00 01 54 00 F8 7E 2B 80 00 01 40 9D 01 B8
+
+. 0 10013F1C 76
+. 81 9E 82 B0 3B 57 00 08 80 FE 82 AC 3B 9C 00 08 C0 0C 00 00 7C 14 03 78 57 77 08 3C C3 C7 00 00 D0 01 00 38 81 81 00 38 7D 88 03 A6 2F 95 00 00 C0 1C FF FC C1 BA FF FC FC 80 00 50 C0 BC FF F8 FC 40 68 50 C0 7A FF F8 41 9E 01 58
+
+. 0 10013F68 340
+. 56 CB 08 3C 7D 36 D8 50 7D 08 02 A6 55 29 08 3C 38 AB FF FF 91 01 00 38 38 89 FF FF C0 21 00 38 FF E0 F0 90 7C A6 2B 78 7E A9 03 A6 7D 26 1A 14 54 88 10 3A 7C 09 1A 14 7D 7F 49 D6 54 0A 10 3A 55 29 10 3A 39 29 00 04 7C 1F 01 D6 39 4A 00 04 55 6B 10 3A 7D 4B EC 2E 54 CB 10 3A 39 6B 00 04 7D 29 F9 D6 54 00 10 3A 7D 7D 04 2E ED 84 02 B2 39 08 00 04 ED 22 02 F2 7D 4A F9 D6 7D A9 EC 2E 54 A9 10 3A ED 85 63 7A 39 29 00 04 ED A4 03 72 7C 0A EC 2E 7D 6B F9 D6 ED 23 48 3A EC 02 00 32 ED 45 6A B8 7C 1F 31 D6 EC CC 48 2A 7C EB EC 2E ED 63 02 F8 7D 77 2A 14 ED 8C 48 28 55 6A 10 3A 54 00 10 3A 7C F8 21 D6 ED 0A 58 28 7C 1D 04 2E ED 4A 58 2A 39 4A 00 04 ED 66 00 72 7C C6 DA 14 ED 08 07 F2 7C 18 29 D6 ED AA 00 72 54 E7 10 3A ED 67 58 28 7C A5 8A 14 ED 8C 07 F2 7C 84 8A 14 ED A0 68 28 7D 78 59 D6 ED 2B 40 28 54 00 10 3A EC 00 50 2A EC E7 30 2A 7D 29 C1 D6 ED 4D 60 2A FD 20 48 50 55 6B 10 3A ED AD 60 28 7C 19 05 2E ED 6B 40 2A 7D 4A C1 D6 7C E9 CD 2E 7D AB CD 2E 7D 08 C1 D6 7D 6A CD 2E 7D 47 CD 2E 7D 28 CD 2E 42 00 FE DC
+
+. 0 100140BC 20
+. 3A D6 00 01 3B 9C 00 08 7F 94 B0 40 3B 5A 00 08 41 9D FE 7C
+
+. 0 100140D0 8
+. 73 69 00 01 40 A2 F7 0C
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 9, stride = 1
+. 0 1000E874 68
+. 3E A0 43 30 FF C0 E0 90 3B F0 FF FF 7F 9C DA 14 92 A1 00 08 7C 1C BB 96 7F B9 C2 14 3A D6 00 01 3B 5A 00 08 7C 00 B9 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 A6 41
+
+. 0 1000E8B8 20
+. FC 00 08 18 FC 20 F8 90 7C 19 C5 2E 3B 39 00 08 48 01 A6 CD
+
+. 0 1000E8CC 16
+. 37 FF FF FF FC 20 08 18 D0 3D 00 04 40 82 FF A8
+
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 9, stride = 1
+. 0 1000F55C 48
+. 80 DE 81 E0 7C 12 03 78 3A D6 00 08 3A B0 00 08 57 78 08 3C CB 86 00 00 2F 93 00 00 C0 75 FF F8 C0 55 FF FC C0 36 FF F8 C3 F6 FF FC 41 9E 01 6C
+
+. 0 1000F58C 360
+. 56 83 08 3C FF C0 E0 90 38 83 FF FF 3B A0 00 00 7E 69 03 A6 1D 1D 00 03 7C C4 CA 14 7C A6 CA 14 7F BD DA 14 7D 63 40 50 7D 08 1A 14 7D 6B C2 14 39 08 FF FF 39 6B FF FF 7C 18 42 14 55 6A 10 3A 54 09 10 3A 39 4A 00 04 39 29 00 04 7D 4A E1 D6 55 07 10 3A 38 E7 00 04 7D 29 E1 D6 7D 2A FC 2E 54 CA 10 3A FD 20 48 50 39 4A 00 04 7C 1C 01 D6 7D 49 FC 2E 54 89 10 3A EC 8A 48 2A 39 29 00 04 ED 4A 48 28 7D 7C 59 D6 54 00 10 3A 7D 7F 04 2E FD A0 20 90 ED 4A 07 72 55 6B 10 3A 7C E7 E1 D6 7C EB FC 2E FD AD 07 B2 54 AB 10 3A EC AB 38 2A 39 6B 00 04 7D 1C 41 D6 7C C7 FC 2E ED 6B 38 28 FD 80 28 90 FC 00 30 90 55 08 10 3A ED 6B 07 72 7D 08 FC 2E FD 8C 07 B2 7C 17 21 D6 7C 84 DA 14 FC 00 68 28 FD A0 40 90 EC C6 20 2A FC 00 00 18 7C D7 31 D6 FD AD 60 28 54 00 10 3A ED 08 28 2A ED 80 58 28 FD A0 68 18 7C B7 29 D6 EC 00 58 2A 54 C6 10 3A EC E1 03 32 7D 1A 05 2E ED 2D 50 2A ED 63 00 32 7D 29 B9 D6 ED AD 50 28 54 A5 10 3A EC 02 00 32 ED 9F 03 32 7D 4A B9 D6 ED 62 5B 7A EC FF 3A 7A 7C C9 D5 2E ED A3 03 78 ED 21 62 78 7D 6B B9 D6 7D A6 D5 2E 7D 6A D5 2E 7D 25 D5 2E 7C EB D5 2E 42 00 FE B0
+
+. 0 1000F6F4 20
+. 3A 94 00 01 3A B5 00 08 7F 92 A0 40 3A D6 00 08 41 9D FE 70
+
+. 0 1000F4C0 128
+. 1C 07 00 03 7D 07 CA 14 7D 48 CA 14 7D 17 41 D6 7D 26 02 14 39 29 FF FF 55 2B 10 3A 7C 1C 01 D6 39 6B 00 04 55 08 10 3A 7D 3C 49 D6 54 00 10 3A 7D 7F 04 2E 7D 6B E1 D6 55 29 10 3A 7C 09 FC 2E ED 8B 00 28 7D AB FC 2E EC 00 00 2A 7C 17 39 D6 7C E7 DA 14 ED A9 03 72 ED 6B 00 2A 7D 57 51 D6 ED 4C 68 2A ED 8C 68 28 54 00 10 3A 7D 7A 05 2E 55 4A 10 3A 7D 88 D5 2E 7D 4A D5 2E 42 00 FF 84
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 9, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 9, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 9
+PASS: gsl_fft_complex_forward with signal_noise, n = 9, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 9, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 9, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 9, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 9, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 9, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 9, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 9, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 9, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 9, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 9
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 9, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 9, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 9, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 9, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 9, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 9, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 9, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 9, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 9, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 9, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 9
+PASS: gsl_fft_real with signal_real_noise, n = 9, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 9, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 9, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 9, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 9
+PASS: gsl_fft_real_float with signal_real_noise, n = 9, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 9, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 9, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 9, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 9
+PASS: gsl_fft_complex_forward with signal_noise, n = 9, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 9, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 9, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 9, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 9, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 9, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 9, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 9, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 9, stride = 3
+. 0 FEEF88C 16
+. 55 46 BA 7E 28 06 00 14 39 06 00 5B 40 81 00 38
+
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 9, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 9
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 9, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 9, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 9, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 9, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 9, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 9, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 9, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 9, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 9, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 9, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 9
+PASS: gsl_fft_real with signal_real_noise, n = 9, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 9, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 9, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 9, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 9
+PASS: gsl_fft_real_float with signal_real_noise, n = 9, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 9, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 9, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 10, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 10
+. 0 10007FA0 12
+. 80 E1 00 30 2F 87 00 00 41 BE FD 50
+
+. 0 10007FAC 4
+. 40 8E 00 D0
+
+. 0 10007FB0 52
+. 81 41 01 78 81 61 01 74 80 C1 01 70 80 E1 01 6C CA 0A FF F0 C9 EA FF F8 CA 4B FF F0 CA 2B FF F8 CA C6 FF F0 CA A6 FF F8 CA 87 FF F0 CA 67 FF F8 4B FF FD 40
+
+. 0 10007D20 8
+. 2F 97 00 00 41 9E 02 34
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 10, stride = 1
+. 0 1000807C 68
+. 81 01 01 78 81 21 01 74 81 41 01 70 81 61 01 6C C8 08 FF F8 C9 A9 FF F8 C9 8A FF F8 FD E0 00 50 C9 6B FF F8 FE 20 68 50 FE A0 60 50 CA 08 FF F0 FE 60 58 50 CA 49 FF F0 CA CA FF F0 CA 8B FF F0 4B FF FC 64
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 10, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 10, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 10, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 10, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 10, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 10, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 10
+. 0 1000AE20 12
+. 81 21 00 30 2F 89 00 00 41 BE FD 24
+
+. 0 1000AE2C 4
+. 40 8E 00 D0
+
+. 0 1000AE30 56
+. 80 E1 01 30 81 01 01 28 C0 07 FF FC 81 21 01 24 C1 C7 FF F8 80 E1 01 2C C2 48 FF F8 C2 87 FF F8 C2 67 FF FC C2 28 FF FC C2 09 FF F8 C1 E9 FF FC D0 01 00 34 4B FF FD 14
+
+. 0 1000AB78 8
+. 2F 97 00 00 41 9E 02 5C
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 10, stride = 1
+. 0 1000AEFC 72
+. 81 41 01 30 81 61 01 2C 80 E1 01 28 81 01 01 24 C0 0A FF FC C1 AB FF FC C1 87 FF FC FC 00 00 50 C1 68 FF FC FE 60 68 50 FE 20 60 50 C1 CA FF F8 FD E0 58 50 C2 8B FF F8 C2 47 FF F8 C2 08 FF F8 D0 01 00 34 4B FF FC 38
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 10, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 10, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 10, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 10, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 10, stride = 1
+. 0 10010ACC 56
+. 7F 9C DA 14 92 A1 00 08 7C 1C C3 96 7F B9 BA 14 3A D6 00 01 3B 5A 00 10 7C 00 C1 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 83 F5
+
+PASS: gsl_fft_real_wavetable_alloc, n = 10, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 10
+. 0 100112A4 228
+. 7C 1B D2 14 7D 03 2A 14 7D 20 D2 14 7C 1D 01 D6 7D 69 D2 14 7D 4B D2 14 55 07 18 38 7D 3D 49 D6 54 00 18 38 7D 7F 04 AE 54 A6 18 38 38 C6 00 08 38 E7 00 08 7D 7D 59 D6 55 29 18 38 7D A9 FC AE 7D 5D 51 D6 55 6B 18 38 7D 4B FC AE FD 8D 50 2A 55 4A 18 38 7C 1D D9 D6 7D 0A FC AE FD AD 50 28 7F 7B B2 14 FC 0B 40 2A 54 00 18 38 FD 3C 03 72 7D 5F 04 AE 7D 38 21 D6 FC E0 60 2A 7C 84 8A 14 FC 00 60 28 FD BB 03 72 7C 18 29 D6 55 29 18 38 FD 87 00 F2 7C A5 8A 14 FD 6B 40 28 FC 00 01 32 7D 18 41 D6 54 00 18 38 FD 8A 60 28 FD 25 4A FA 7C C6 C1 D6 55 08 18 38 FD 66 6A F8 FD 4A 38 2A 7C E7 C1 D6 FD AC 00 28 FD 8C 00 2A 7D 49 CD AE 7D 99 05 AE 7D 66 CD AE 7D A8 CD AE 7D 27 CD AE 42 00 FF 20
+
+. 0 100119CC 20
+. 2F 9A 00 00 C8 04 FF F8 C9 04 FF F0 FC E0 00 50 41 9E 00 C4
+
+PASS: gsl_fft_real with signal_real_noise, n = 10, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 10, stride = 1
+. 0 1000DB58 20
+. 3A 5B 00 01 3A 80 00 01 56 40 F8 7E 2B 80 00 01 40 9D 02 C0
+
+. 0 1000DE28 8
+. 73 6B 00 01 40 A2 F0 84
+
+. 0 1000DE30 8
+. 2F 93 00 00 41 BE F0 7C
+
+. 0 1000DE38 292
+. 81 9E 81 7C 7E 69 03 A6 80 FE 81 80 57 66 08 3C C8 CC 00 00 38 BB FF FF C8 A7 00 00 38 80 00 00 1D 24 00 05 7C E5 CA 14 7C 84 DA 14 7D 29 DA 14 39 29 FF FF 7C 06 4A 14 7D 7C 49 D6 54 0A 18 38 7D 06 02 14 39 4A 00 08 7C 1C 01 D6 55 6B 18 38 7D 6B EC AE 55 29 18 38 39 29 00 08 FD 6B 58 2A 54 00 18 38 7D 4A E1 D6 7C 1D 04 AE FC 00 00 2A 7D 1C 41 D6 7D AA EC AE FD 8B 00 28 FD 6B 00 2A 7D 29 E1 D6 55 08 18 38 7C E8 EC AE FD AD 68 2A FD 8C 01 72 FD 2B 39 B8 7C 09 EC AE 7D 27 CA 14 7C 17 29 D6 FD 58 03 72 7D 69 CA 14 7D 4B CA 14 FD B9 03 72 7C A5 DA 14 7C F7 39 D6 54 00 18 38 FC 00 00 2A FD 0C 48 28 FD 8C 48 2A FD 59 50 38 7D 37 49 D6 54 E7 18 38 FC 18 68 3A FD 20 60 50 7D 77 59 D6 FD A0 40 50 55 29 18 38 FD 6B 38 2A FD 29 00 28 7D 57 51 D6 FD AD 50 28 FD 8C 00 28 55 6B 18 38 FD 08 50 28 7D 7A 05 AE 55 4A 18 38 7D 87 D5 AE 7D 09 D5 AE 7D AB D5 AE 7D 2A D5 AE 42 00 FF 00
+
+. 0 1000DF5C 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 EA 7C
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 10, stride = 1
+. 0 10012AB4 56
+. 7F 9C DA 14 92 A1 00 08 7C 1C C3 96 7F B9 BA 14 3A D6 00 01 3B 5A 00 08 7C 00 C1 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 64 0D
+
+PASS: gsl_fft_real_wavetable_float_alloc, n = 10, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 10
+. 0 10013294 244
+. 7C 03 D2 14 7D 14 2A 14 7D 20 D2 14 7C 1F 01 D6 7D 69 D2 14 7D 4B D2 14 55 07 10 3A 7D 3F 49 D6 54 00 10 3A 7D 5D 04 2E 54 A6 10 3A 38 C6 00 04 38 E7 00 04 7D 7F 59 D6 55 29 10 3A 7D 69 EC 2E 7D 5F 51 D6 55 6B 10 3A 7D 8B EC 2E ED AB 60 2A 55 4A 10 3A 7C 1F 19 D6 7D 0A EC 2E ED 6B 60 28 7C 63 AA 14 EC 0A 40 2A 54 00 10 3A ED 4A 40 28 7D 3D 04 2E 7D 38 21 D6 EC E0 68 2A 7C 84 8A 14 EC 00 68 28 FD A0 48 90 FD 80 38 90 7C 18 29 D6 55 29 10 3A ED 29 38 2A 7C A5 8A 14 FD 8C 01 72 7D 18 41 D6 54 00 10 3A FC 00 01 B2 7D 29 CD 2E FD AD 60 28 ED 9B 02 F2 7C C6 C1 D6 FC 00 00 18 55 08 10 3A FD A0 68 18 ED 7A 02 F2 7C E7 C1 D6 ED 83 62 BA ED 0D 00 28 ED 44 5A B8 ED AD 00 2A 7D B9 05 2E 7D 46 CD 2E 7D 08 CD 2E 7D 87 CD 2E 42 00 FF 10
+
+. 0 100139F8 20
+. 2F 9A 00 00 C0 03 FF FC C1 03 FF F8 FC E0 00 50 41 9E 00 C4
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 10, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 10, stride = 1
+. 0 1000FD54 20
+. 3A 3B 00 01 3A 60 00 01 56 20 F8 7E 2B 80 00 01 40 9D 02 E0
+
+. 0 10010044 8
+. 73 6B 00 01 40 A2 F0 34
+
+. 0 1001004C 8
+. 2F 92 00 00 41 BE F0 2C
+
+. 0 10010054 308
+. 81 9E 81 F4 7E 49 03 A6 83 3E 81 F8 57 65 08 3C C8 CC 00 00 38 DB FF FF C8 B9 00 00 38 80 00 00 1D 24 00 05 7C E6 C2 14 7C 84 DA 14 7D 29 DA 14 39 29 FF FF 7C 05 4A 14 7D 7C 49 D6 7D 45 02 14 54 08 10 3A 39 08 00 04 7C 1C 01 D6 55 6B 10 3A 7D 4B FC 2E 55 29 10 3A 39 29 00 04 ED 4A 50 2A 54 00 10 3A 7D 5C 51 D6 7C 1F 04 2E EC 00 00 2A 55 4A 10 3A 7D 08 E1 D6 7C EA FC 2E 7D 47 C2 14 ED 8A 00 28 ED 4A 00 2A FC 00 38 90 7D 29 E1 D6 FD A0 50 90 ED 4A 38 2A FD 8C 01 72 7D 69 FC 2E 7D 2A C2 14 7C 17 31 D6 FD AD 01 B8 7C 08 FC 2E 7D 69 C2 14 FD 80 60 18 7C C6 DA 14 EC 00 00 2A 7C F7 39 D6 FD A0 68 18 54 00 10 3A ED 6B 58 2A 7D 5A 05 2E ED 38 00 32 ED 0C 68 28 7D 57 51 D6 ED 8C 68 2A 54 E7 10 3A EC 19 00 32 ED 39 4A F8 FD A0 40 50 7D 37 49 D6 ED 78 02 FA 55 4A 10 3A FC 00 60 50 ED AD 48 28 7D 77 59 D6 ED 8C 58 28 EC 00 58 28 55 29 10 3A ED 08 48 28 7D 87 D5 2E 55 6B 10 3A 7D 0A D5 2E 7D A9 D5 2E 7C 0B D5 2E 42 00 FE F0
+
+. 0 10010188 28
+. 80 C1 00 34 80 01 00 24 38 C6 00 04 90 C1 00 34 34 C0 FF FF 90 C1 00 24 40 82 EA 18
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 10, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 10, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 10
+PASS: gsl_fft_complex_forward with signal_noise, n = 10, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 10, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 10, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 10, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 10, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 10, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 10, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 10, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 10, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 10, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 10
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 10, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 10, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 10, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 10, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 10, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 10, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 10, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 10, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 10, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 10, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 10
+PASS: gsl_fft_real with signal_real_noise, n = 10, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 10, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 10, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 10, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 10
+PASS: gsl_fft_real_float with signal_real_noise, n = 10, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 10, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 10, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 10, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 10
+PASS: gsl_fft_complex_forward with signal_noise, n = 10, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 10, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 10, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 10, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 10, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 10, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 10, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 10, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 10, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 10, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 10
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 10, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 10, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 10, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 10, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 10, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 10, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 10, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 10, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 10, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 10, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 10
+PASS: gsl_fft_real with signal_real_noise, n = 10, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 10, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 10, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 10, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 10
+PASS: gsl_fft_real_float with signal_real_noise, n = 10, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 10, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 10, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 11, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 11
+. 0 10006FA0 44
+. 81 01 00 18 3B 77 FF FF 81 41 00 24 7C 68 BB 96 81 21 01 88 82 69 00 00 7F 8A BB 96 2F 83 00 00 7D 7B E1 D6 7D 68 03 A6 41 9E 00 3C
+
+. 0 10006FCC 56
+. 7C 69 03 A6 39 40 00 00 38 00 00 00 57 07 20 36 57 28 20 36 7C 0A FC AE 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 AE 7C 00 3A 14 C9 AB 00 08 D9 A9 00 08 42 00 FF E0
+
+. 0 10007004 20
+. 57 69 F8 7E 3A A0 00 01 39 29 00 01 2B 89 00 01 40 9D 00 B4
+
+. 0 10007018 20
+. 7D 3D 4B 78 7C 65 1B 78 38 C0 00 00 7F 86 18 40 40 9C 00 90
+
+. 0 1000702C 140
+. 7C 15 B8 50 7C 69 03 A6 7C 80 19 D6 7D 25 32 14 7C 04 32 14 7D 59 49 D6 38 C6 00 01 7D 79 01 D6 55 4A 20 36 7D AA FC AE 7C EA FA 14 55 6B 20 36 7D 38 49 D6 7C 0B FC AE 7D 0B FA 14 FD AD 00 2A 55 29 20 36 7C 18 01 D6 7D A9 D5 AE 7D 29 D2 14 C9 A8 00 08 C8 07 00 08 54 00 20 36 FC 00 68 2A D8 09 00 08 7D 20 D2 14 7C 0B FC AE 7D AA FC AE FD AD 00 28 7D BA 05 AE C8 07 00 08 C9 A8 00 08 FC 00 68 28 D8 09 00 08 42 00 FF 84
+
+. 0 100070B8 16
+. 3A B5 00 01 7C A5 1A 14 7F 9D A8 40 41 9D FF 5C
+
+. 0 10007020 12
+. 38 C0 00 00 7F 86 18 40 40 9C 00 90
+
+. 0 100070C8 8
+. 2F 83 00 00 41 9E 00 3C
+
+. 0 100070D0 56
+. 7C 69 03 A6 39 40 00 00 38 00 00 00 57 27 20 36 57 08 20 36 7C 0A D4 AE 7D 6A D2 14 7D 20 FA 14 7D 4A 42 14 7C 1F 05 AE 7C 00 3A 14 C9 AB 00 08 D9 A9 00 08 42 00 FF E0
+
+. 0 10007108 20
+. 57 69 F8 7E 39 80 00 01 39 29 00 01 2B 89 00 01 40 9D 00 74
+
+. 0 1000711C 20
+. 7D 27 4B 78 7C 6A 1B 78 38 C0 00 00 7F 86 18 40 40 9C 00 50
+
+. 0 10007130 76
+. 7C 69 03 A6 38 00 00 00 57 28 20 36 7D 2A 32 14 7C 1F 04 AE 7D 29 C1 D6 7D 60 FA 14 38 C6 00 01 55 29 20 36 7D A9 D4 AE 7D 29 D2 14 FC 00 68 2A 7C 1F 05 AE 7C 00 42 14 C9 A9 00 08 C8 0B 00 08 FC 00 68 2A D8 0B 00 08 42 00 FF C4
+
+. 0 1000717C 16
+. 39 8C 00 01 7D 4A 1A 14 7F 87 60 40 41 9D FF 9C
+
+. 0 10007124 12
+. 38 C0 00 00 7F 86 18 40 40 9C 00 50
+
+. 0 1000718C 20
+. 57 69 F8 7E 3A A0 00 01 39 29 00 01 2B 89 00 01 40 9D 01 B4
+
+. 0 100071A0 64
+. 81 9E 80 40 7E D7 79 D6 80 01 00 1C 7D 30 4B 78 28 89 00 01 91 81 01 04 2F 00 FF FF 7C 7D 1B 78 7D 32 4B 78 38 C0 00 00 7C B5 79 D6 7F 86 18 40 7C 15 B8 50 7F 60 19 D6 7C B4 2B 78 40 9C 00 60
+
+. 0 100071E0 92
+. 7C 69 03 A6 38 00 00 00 57 08 20 36 7D 26 EA 14 7C 1A 04 AE 7D 29 C9 D6 7D 40 D2 14 7D 66 DA 14 38 C6 00 01 55 29 20 36 7D 6B C9 D6 7C 09 FD AE 7D 29 FA 14 C8 0A 00 08 D8 09 00 08 55 6B 20 36 7D BA 04 AE 7C 00 42 14 7D AB FD AE 7D 6B FA 14 C8 0A 00 08 D8 0B 00 08 42 00 FF B4
+
+. 0 1000723C 8
+. 39 80 00 01 40 85 01 00
+
+. 0 10007244 20
+. 81 C1 01 04 7C 67 1B 78 82 3E 80 44 2F 85 00 00 40 9E 0D F8
+
+. 0 1000804C 4
+. 40 9A 00 18
+
+. 0 10008050 20
+. 54 A9 20 36 7D 29 9A 14 C9 09 FF F8 C8 E9 FF F0 4B FF F2 00
+
+. 0 10007260 12
+. 38 C0 00 00 7F 86 18 40 40 9C 00 B8
+
+. 0 1000726C 180
+. 7C 0C B8 50 7C 69 03 A6 7C 80 19 D6 7D 44 32 14 7D 27 32 14 7D 4A C1 D6 7C 06 EA 14 7D 06 DA 14 38 C6 00 01 7D 29 C1 D6 55 4A 20 36 7D 6A D2 14 7D 8A D4 AE C8 0B 00 08 FD 88 03 32 55 29 20 36 7C 00 C9 D6 7D 49 D4 AE FC 08 00 32 7D 29 D2 14 FD 47 02 B2 C9 69 00 08 54 00 20 36 7D 08 C9 D6 7D BF 04 AE FD 67 02 F2 7D 60 FA 14 FD 2A 00 28 FD 4A 00 2A 55 08 20 36 7D 28 FA 14 FD AD 48 2A FD 2B 60 2A FD 6B 60 28 7D BF 05 AE C8 0B 00 08 FC 00 48 2A D8 0B 00 08 7D A8 FC AE FD AD 50 2A 7D A8 FD AE C8 09 00 08 FC 00 58 2A D8 09 00 08 42 00 FF 5C
+
+. 0 10007320 32
+. 7C A5 A2 14 39 8C 00 01 7C 05 B3 96 7F 92 60 40 7C E7 1A 14 7C 00 B1 D6 7C A0 28 50 41 9D FF 14
+
+. 0 10007250 8
+. 2F 85 00 00 40 9E 0D F8
+
+. 0 10007340 16
+. 3A B5 00 01 7F BD 1A 14 7F 90 A8 40 41 9D FE 78
+
+. 0 100071C4 28
+. 38 C0 00 00 7C B5 79 D6 7F 86 18 40 7C 15 B8 50 7F 60 19 D6 7C B4 2B 78 40 9C 00 60
+
+. 0 10007350 8
+. 2F 9C 00 00 41 9E 00 3C
+
+. 0 10007358 56
+. 7F 89 03 A6 39 40 00 00 38 00 00 00 57 07 20 36 57 28 20 36 7C 0A FC AE 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 AE 7C 00 3A 14 C9 AB 00 08 D9 A9 00 08 42 00 FF E0
+
+. 0 10007390 12
+. 39 80 00 01 7F 8C B8 40 40 9C 00 68
+
+. 0 1000739C 20
+. 7F 8A E3 78 7C 60 1B 78 38 80 00 00 7F 84 E0 40 40 9C 00 40
+
+. 0 100073B0 60
+. 7F 89 03 A6 7D 20 22 14 7D 6A 22 14 7D 29 C9 D6 38 84 00 01 7D 6B C1 D6 55 29 20 36 7D A9 FC AE 7D 29 FA 14 55 6B 20 36 7D AB D5 AE 7D 6B D2 14 C8 09 00 08 D8 0B 00 08 42 00 FF CC
+
+. 0 100073EC 20
+. 39 8C 00 01 7C 00 1A 14 7F 8C B8 40 7D 4A E2 14 41 9C FF A8
+
+. 0 100073A4 12
+. 38 80 00 00 7F 84 E0 40 40 9C 00 40
+
+. 0 10007400 12
+. 2B 8F 00 01 80 A1 00 24 40 9D 00 68
+
+. 0 10007470 20
+. 3B A0 00 01 7F 86 E3 78 7F 9D 78 40 80 A1 00 24 40 9C 00 F8
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 11, stride = 1
+. 0 10008064 24
+. 54 A9 20 36 7D 29 9A 14 C8 09 FF F8 C8 E9 FF F0 FD 00 00 50 4B FF F1 E8
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 11, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 11, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 11, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 11, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 11, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 11, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 11
+. 0 10009DC8 44
+. 81 01 00 18 3B 77 FF FF 81 41 00 24 7C 68 BB 96 81 21 01 44 82 89 00 00 7F 8A BB 96 2F 83 00 00 7D 7B E1 D6 7D 68 03 A6 41 9E 00 3C
+
+. 0 10009DF4 56
+. 7C 69 03 A6 39 40 00 00 38 00 00 00 57 07 18 38 57 28 18 38 7C 0A FC 2E 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 2E 7C 00 3A 14 C1 AB 00 04 D1 A9 00 04 42 00 FF E0
+
+. 0 10009E2C 20
+. 57 69 F8 7E 3A A0 00 01 39 29 00 01 2B 89 00 01 40 9D 00 B4
+
+. 0 10009E40 20
+. 7D 3D 4B 78 7C 65 1B 78 38 C0 00 00 7F 86 18 40 40 9C 00 90
+
+. 0 10009E54 140
+. 7C 15 B8 50 7C 69 03 A6 7C 80 19 D6 7D 25 32 14 7C 04 32 14 7D 59 49 D6 38 C6 00 01 7D 79 01 D6 55 4A 18 38 7D AA FC 2E 7C EA FA 14 55 6B 18 38 7D 38 49 D6 7C 0B FC 2E 7D 0B FA 14 ED AD 00 2A 55 29 18 38 7C 18 01 D6 7D A9 D5 2E 7D 29 D2 14 C1 A8 00 04 C0 07 00 04 54 00 18 38 EC 00 68 2A D0 09 00 04 7D 20 D2 14 7C 0B FC 2E 7D AA FC 2E ED AD 00 28 7D BA 05 2E C0 07 00 04 C1 A8 00 04 EC 00 68 28 D0 09 00 04 42 00 FF 84
+
+. 0 10009EE0 16
+. 3A B5 00 01 7C A5 1A 14 7F 9D A8 40 41 9D FF 5C
+
+. 0 10009E48 12
+. 38 C0 00 00 7F 86 18 40 40 9C 00 90
+
+. 0 10009EF0 8
+. 2F 83 00 00 41 9E 00 3C
+
+. 0 10009EF8 56
+. 7C 69 03 A6 39 40 00 00 38 00 00 00 57 27 18 38 57 08 18 38 7C 0A D4 2E 7D 6A D2 14 7D 20 FA 14 7D 4A 42 14 7C 1F 05 2E 7C 00 3A 14 C1 AB 00 04 D1 A9 00 04 42 00 FF E0
+
+. 0 10009F30 20
+. 57 69 F8 7E 39 80 00 01 39 29 00 01 2B 89 00 01 40 9D 00 74
+
+. 0 10009F44 20
+. 7D 27 4B 78 7C 6A 1B 78 38 C0 00 00 7F 86 18 40 40 9C 00 50
+
+. 0 10009F58 76
+. 7C 69 03 A6 38 00 00 00 57 28 18 38 7D 2A 32 14 7C 1F 04 2E 7D 29 C1 D6 7D 60 FA 14 38 C6 00 01 55 29 18 38 7D A9 D4 2E 7D 29 D2 14 EC 00 68 2A 7C 1F 05 2E 7C 00 42 14 C1 A9 00 04 C0 0B 00 04 EC 00 68 2A D0 0B 00 04 42 00 FF C4
+
+. 0 10009FA4 16
+. 39 8C 00 01 7D 4A 1A 14 7F 87 60 40 41 9D FF 9C
+
+. 0 10009F4C 12
+. 38 C0 00 00 7F 86 18 40 40 9C 00 50
+
+. 0 10009FB4 20
+. 57 69 F8 7E 3A A0 00 01 39 29 00 01 2B 89 00 01 40 9D 01 AC
+
+. 0 10009FC8 56
+. 81 81 00 1C 7E D7 79 D6 7D 2E 4B 78 28 89 00 01 2F 0C FF FF 7C 7D 1B 78 7D 32 4B 78 38 C0 00 00 7C B5 79 D6 7F 86 18 40 7C 15 B8 50 7F 60 19 D6 7C B3 2B 78 40 9C 00 60
+
+. 0 1000A000 92
+. 7C 69 03 A6 38 00 00 00 57 08 18 38 7D 26 EA 14 7C 1A 04 2E 7D 29 C9 D6 7D 40 D2 14 7D 66 DA 14 38 C6 00 01 55 29 18 38 7D 6B C9 D6 7C 09 FD 2E 7D 29 FA 14 C0 0A 00 04 D0 09 00 04 55 6B 18 38 7D BA 04 2E 7C 00 42 14 7D AB FD 2E 7D 6B FA 14 C0 0A 00 04 D0 0B 00 04 42 00 FF B4
+
+. 0 1000A05C 8
+. 39 80 00 01 40 85 01 00
+
+. 0 1000A064 20
+. 82 3E 80 CC 7C 67 1B 78 82 1E 80 D0 2F 85 00 00 40 9E 0E 58
+
+. 0 1000AECC 4
+. 40 9A 00 18
+
+. 0 1000AED0 20
+. 54 A9 18 38 7D 29 A2 14 C1 09 FF FC C0 E9 FF F8 4B FF F1 A0
+
+. 0 1000A080 12
+. 38 C0 00 00 7F 86 18 40 40 9C 00 B8
+
+. 0 1000A08C 180
+. 7C 0C B8 50 7C 69 03 A6 7C 80 19 D6 7D 44 32 14 7D 67 32 14 7D 4A C1 D6 7C 06 EA 14 7D 06 DA 14 38 C6 00 01 7D 6B C1 D6 55 4A 18 38 7D 2A D2 14 7D AA D4 2E C1 29 00 04 ED A8 03 72 55 6B 18 38 7C 00 C9 D6 7D 4B D4 2E ED 28 02 72 7D 6B D2 14 ED 47 02 B2 C1 6B 00 04 54 00 18 38 7D 08 C9 D6 7C 1F 04 2E ED 67 02 F2 ED 8A 48 28 7D 20 FA 14 ED 4A 48 2A 55 08 18 38 EC 00 60 2A ED 8B 68 2A ED 6B 68 28 7C 1F 05 2E C0 09 00 04 EC 00 60 2A D0 09 00 04 7D 28 FA 14 7D A8 FC 2E ED AD 50 2A 7D A8 FD 2E C0 09 00 04 EC 00 58 2A D0 09 00 04 42 00 FF 5C
+
+. 0 1000A140 32
+. 7C A5 9A 14 39 8C 00 01 7C 05 B3 96 7F 92 60 40 7C E7 1A 14 7C 00 B1 D6 7C A0 28 50 41 9D FF 14
+
+. 0 1000A070 8
+. 2F 85 00 00 40 9E 0E 58
+
+. 0 1000A160 16
+. 3A B5 00 01 7F BD 1A 14 7F 8E A8 40 41 9D FE 78
+
+. 0 10009FE4 28
+. 38 C0 00 00 7C B5 79 D6 7F 86 18 40 7C 15 B8 50 7F 60 19 D6 7C B3 2B 78 40 9C 00 60
+
+. 0 1000A170 8
+. 2F 9C 00 00 41 9E 00 3C
+
+. 0 1000A178 56
+. 7F 89 03 A6 39 40 00 00 38 00 00 00 57 07 18 38 57 28 18 38 7C 0A FC 2E 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 2E 7C 00 3A 14 C1 AB 00 04 D1 A9 00 04 42 00 FF E0
+
+. 0 1000A1B0 12
+. 39 80 00 01 7F 8C B8 40 40 9C 00 68
+
+. 0 1000A1BC 20
+. 7F 8A E3 78 7C 60 1B 78 38 80 00 00 7F 84 E0 40 40 9C 00 40
+
+. 0 1000A1D0 60
+. 7F 89 03 A6 7D 20 22 14 7D 6A 22 14 7D 29 C9 D6 38 84 00 01 7D 6B C1 D6 55 29 18 38 7D A9 FC 2E 7D 29 FA 14 55 6B 18 38 7D AB D5 2E 7D 6B D2 14 C0 09 00 04 D0 0B 00 04 42 00 FF CC
+
+. 0 1000A20C 20
+. 39 8C 00 01 7C 00 1A 14 7F 8C B8 40 7D 4A E2 14 41 9C FF A8
+
+. 0 1000A1C4 12
+. 38 80 00 00 7F 84 E0 40 40 9C 00 40
+
+. 0 1000A220 12
+. 2B 8F 00 01 80 A1 00 24 40 9D 00 68
+
+. 0 1000A290 20
+. 3B A0 00 01 7F 86 E3 78 7F 9D 78 40 80 A1 00 24 40 9C 00 F8
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 11, stride = 1
+. 0 1000AEE4 24
+. 54 A9 18 38 7D 29 A2 14 C0 09 FF FC C0 E9 FF F8 FD 00 00 50 4B FF F1 88
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 11, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 11, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 11, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 11, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 11, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 11, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 11
+PASS: gsl_fft_real with signal_real_noise, n = 11, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 11, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 11, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 11, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 11
+PASS: gsl_fft_real_float with signal_real_noise, n = 11, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 11, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 11, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 11, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 11
+PASS: gsl_fft_complex_forward with signal_noise, n = 11, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 11, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 11, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 11, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 11, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 11, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 11, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 11, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 11, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 11, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 11
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 11, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 11, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 11, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 11, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 11, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 11, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 11, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 11, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 11, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 11, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 11
+PASS: gsl_fft_real with signal_real_noise, n = 11, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 11, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 11, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 11, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 11
+PASS: gsl_fft_real_float with signal_real_noise, n = 11, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 11, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 11, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 11, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 11
+PASS: gsl_fft_complex_forward with signal_noise, n = 11, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 11, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 11, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 11, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 11, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 11, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 11, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 11, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 11, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 11, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 11
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 11, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 11, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 11, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 11, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 11, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 11, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 11, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 11, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 11, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 11, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 11
+PASS: gsl_fft_real with signal_real_noise, n = 11, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 11, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 11, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 11, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 11
+PASS: gsl_fft_real_float with signal_real_noise, n = 11, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 11, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 11, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 12, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 12
+. 0 100084C4 12
+. 81 41 00 34 2F 8A 00 00 41 BE FC B0
+
+. 0 100084D0 4
+. 40 9A 00 54
+
+. 0 100084D4 80
+. 80 E1 01 64 80 C1 01 60 C8 07 FF F0 C8 47 FF F8 81 01 01 54 80 E1 01 58 D8 01 00 38 CA 06 FF F0 C8 06 FF F8 80 C1 01 5C D8 01 00 48 C9 E6 FF F0 C8 08 FF F8 C9 C6 FF F8 CA 67 FF F0 CA 47 FF F8 CA 28 FF F0 D8 41 00 40 D8 01 00 50 4B FF FC A0
+
+. 0 100081C0 8
+. 2F 96 00 00 41 9E 02 AC
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 12, stride = 1
+. 0 10008524 100
+. 80 C1 01 64 80 E1 01 60 81 01 01 5C 81 21 01 58 81 41 01 54 C8 06 FF F8 C9 A7 FF F8 FC 00 00 50 C9 88 FF F8 C9 69 FF F8 FD A0 68 50 C9 4A FF F8 FD C0 60 50 FE 40 58 50 D8 01 00 40 FD 40 50 50 C8 06 FF F0 CA 07 FF F0 C9 E8 FF F0 CA 69 FF F0 CA 2A FF F0 D9 A1 00 48 D9 41 00 50 D8 01 00 38 4B FF FC 3C
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 12, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 12, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 12, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 12, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 12, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 12, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 12
+. 0 1000B340 12
+. 80 E1 00 38 2F 87 00 00 41 BE FC C0
+
+. 0 1000B34C 4
+. 40 9A 00 54
+
+. 0 1000B350 80
+. 80 E1 01 18 81 01 01 0C C0 07 FF F8 C0 27 FF FC 80 E1 01 14 81 21 01 08 D0 01 00 3C C2 67 FF F8 C0 07 FF FC 80 E1 01 10 D0 01 00 44 C2 47 FF F8 C0 09 FF FC C2 27 FF FC C2 08 FF F8 C1 E8 FF FC C1 C9 FF F8 D0 21 00 40 D0 01 00 48 4B FF FC A4
+
+. 0 1000B040 8
+. 2F 96 00 00 41 9E 02 A8
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 12, stride = 1
+. 0 1000B3A0 100
+. 80 E1 01 18 81 01 01 14 81 21 01 10 81 41 01 0C 81 61 01 08 C0 07 FF FC C1 A8 FF FC FC 00 00 50 C1 89 FF FC C1 6A FF FC FD A0 68 50 C1 4B FF FC FE 20 60 50 FD E0 58 50 D0 01 00 40 FD 40 50 50 C0 07 FF F8 C2 68 FF F8 C2 49 FF F8 C2 0A FF F8 C1 CB FF F8 D1 A1 00 44 D1 41 00 48 D0 01 00 3C 4B FF FC 40
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 12, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 12, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 12, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 12, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 12, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 12, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 12
+. 0 10012088 8
+. 2F 95 00 00 41 BE F7 28
+
+. 0 10012090 152
+. 81 3E 82 50 39 1B FF FF 81 5E 82 54 7E A9 03 A6 57 66 08 3C C9 49 00 00 C9 2A 00 00 7D 07 43 78 7C 07 1A 14 7D 66 42 14 7D 20 1A 14 7C 1D 01 D6 55 0A 18 38 39 4A 00 08 7D 3D 49 D6 54 00 18 38 7C 1F 04 AE 7C 1D 39 D6 55 29 18 38 7D 89 FC AE 7C E7 DA 14 FD 60 60 2A 54 00 18 38 FC 00 60 28 7D BF 04 AE 7D 78 59 D6 FD 6B 02 72 FD 80 6A BA 7C 18 41 D6 55 6B 18 38 FD AD 00 28 7D 08 8A 14 7D 4A C1 D6 54 00 18 38 7D 99 05 AE 7D 6A CD AE 7D AB CD AE 42 00 FF 8C
+
+. 0 10012128 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 EE 0C
+
+. 0 10012144 4
+. 4B FF F6 8C
+
+PASS: gsl_fft_real with signal_real_noise, n = 12, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 12, stride = 1
+. 0 1000D6E8 52
+. 3A 98 00 10 3A 70 00 10 3A B6 00 10 57 78 08 3C 3A 00 00 02 2F 91 00 00 CB D3 FF F0 CB B3 FF F8 CB 94 FF F0 CB 74 FF F8 CB 55 FF F0 CB 35 FF F8 41 9E 01 9C
+
+. 0 1000D71C 400
+. 38 70 FF FF 7D 30 C0 50 7E 29 03 A6 3B E9 FF FF 7C 79 1B 78 57 76 10 3A 7C F8 FA 14 57 EB 18 38 54 E9 18 38 39 6B 00 08 39 29 00 08 7D 58 CA 14 7D 29 E1 D6 55 46 18 38 57 28 18 38 39 08 00 08 38 C6 00 08 7D 6B E1 D6 7D 69 EC AE FD 60 58 50 7C 1C C9 D6 7D 4B EC AE 7F 39 B2 14 FD 40 50 50 7D 3C F9 D6 54 00 18 38 7D 3D 04 AE 7F FF B2 14 80 01 00 30 7D 5C 51 D6 55 29 18 38 7C 29 EC AE 7D 63 02 14 7C AB 02 14 7C 85 02 14 7C FC 39 D6 55 4A 18 38 7C CA EC AE 54 69 18 38 55 6A 18 38 FC E6 08 28 39 29 00 08 7D 08 E1 D6 54 E7 18 38 7C A7 EC AE FC C6 08 2A 54 87 18 38 FD 89 28 28 39 4A 00 08 7C C6 E1 D6 7D A8 EC AE FD 29 28 2A 54 A8 18 38 FC 0D 58 28 39 08 00 08 FD AD 58 2A 38 E7 00 08 7D 06 EC AE 7C 17 19 D6 FC 60 38 28 7C 63 DA 14 FC 48 50 28 FD 08 50 2A FC 00 38 2A 7D 77 59 D6 FC 8C 10 2A 54 00 18 38 FD 6D 40 28 FC FE 00 32 7C B7 29 D6 55 6B 18 38 FC BC 02 F2 FC 39 01 32 7C 97 21 D6 54 A5 18 38 FF F9 00 F2 FC 1D 00 32 7D 29 B9 D6 54 84 18 38 FD 7B 02 F2 FD 49 30 28 FD 8C 10 28 7D 4A B9 D6 FC 7A 08 FA FC FD 3B 3A 7D 08 B9 D6 FC BB 2A BA FD 9E 03 38 7C E7 B9 D6 FD 5C 5A B8 FC 9A F9 38 FD 29 30 2A FD AD 40 2A 7D 3A 05 AE 7D A9 D5 AE 7D 8B D5 AE 7C EA D5 AE 7D 45 D5 AE 7C A8 D5 AE 7C 84 D5 AE
+
+. 0 1000D8AC 8
+. 7C 67 D5 AE 42 00 FE 84
+
+. 0 1000D8B4 32
+. 3A 52 00 01 55 E0 F8 7E 7F 80 90 40 3A 10 00 02 3A 73 00 10 3A 94 00 10 3A B5 00 10 41 9D FE 2C
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 12, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 12, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 12
+. 0 100140D8 8
+. 2F 95 00 00 41 BE F7 04
+
+. 0 100140E0 164
+. 81 5E 82 A8 39 1B FF FF 81 7E 82 AC 7E A9 03 A6 57 66 08 3C C9 2A 00 00 C1 0B 00 00 7D 07 43 78 7C 07 1A 14 7D 66 42 14 7D 20 1A 14 7C 1F 01 D6 55 0A 10 3A 39 4A 00 04 7D 3F 49 D6 54 00 10 3A 7D 7D 04 2E 7C 1F 39 D6 55 29 10 3A 7C 09 EC 2E 7C E7 DA 14 ED 4B 00 28 54 00 10 3A ED 6B 00 2A 7D 9D 04 2E 7D 78 59 D6 FC 00 50 90 FD A0 60 90 ED 6B 02 32 7C 18 41 D6 ED 8C 50 28 FC 00 6A 7A 55 6B 10 3A 7D 08 8A 14 7D 4A C1 D6 54 00 10 3A FC 00 00 18 7C 19 05 2E 7D 6A CD 2E 7D 8B CD 2E 42 00 FF 80
+
+. 0 10014184 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 ED A0
+
+. 0 100141A0 4
+. 4B FF F6 5C
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 12, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 12, stride = 1
+. 0 1000F8CC 52
+. 3A 70 00 08 3A B9 00 08 3A 96 00 08 57 78 08 3C 3A 00 00 02 2F 91 00 00 C3 F3 FF F8 C3 D3 FF FC C3 B4 FF F8 C3 94 FF FC C3 75 FF F8 C3 55 FF FC 41 9E 01 9C
+
+. 0 1000F900 400
+. 38 70 FF FF 7D 30 C0 50 7E 29 03 A6 3B A9 FF FF 7C 79 1B 78 57 76 10 3A 7C F8 EA 14 57 AB 10 3A 54 E9 10 3A 39 6B 00 04 39 29 00 04 7D 58 CA 14 7D 29 E1 D6 55 46 10 3A 57 28 10 3A 39 08 00 04 38 C6 00 04 7D 6B E1 D6 7D 49 FC 2E FD 40 50 50 7C 1C C9 D6 7D 2B FC 2E 7F 39 B2 14 FD 20 48 50 7D 3C E9 D6 54 00 10 3A 7D 7F 04 2E 7F BD B2 14 80 01 00 30 7D 5C 51 D6 55 29 10 3A 7C 29 FC 2E 7D 63 02 14 7C AB 02 14 7C 85 02 14 7C FC 39 D6 55 4A 10 3A 7C EA FC 2E 54 69 10 3A 55 6A 10 3A EC A7 08 28 39 29 00 04 7D 08 E1 D6 54 E7 10 3A 7C C7 FC 2E EC E7 08 2A 54 87 10 3A ED 8B 30 28 39 4A 00 04 7C C6 E1 D6 7D A8 FC 2E ED 6B 30 2A 54 A8 10 3A EC 0D 50 28 39 08 00 04 ED AD 50 2A 38 E7 00 04 7D 06 FC 2E 7C 17 19 D6 EC 40 28 28 7C 63 DA 14 EC 88 48 28 ED 08 48 2A EC 00 28 2A 7D 77 59 D6 EC 6C 20 2A 54 00 10 3A ED 4D 40 28 ED 2B 38 28 ED 8C 20 28 7C B7 29 D6 EC DF 00 32 55 6B 10 3A EC BD 02 B2 EC 3A 00 F2 EC 9A 00 B2 7C 97 21 D6 EC 1E 00 32 54 A5 10 3A ED 5C 02 B2 EC DE 33 3A 7D 29 B9 D6 EC BC 2A 7A EC 5B 08 BA 54 84 10 3A ED 9F 03 38 ED 3D 52 78 7D 4A B9 D6 EC 7B 20 F8 ED 6B 38 2A ED AD 40 2A 7D 08 B9 D6 7D 7A 05 2E 7D A9 D5 2E 7D 8B D5 2E 7C CA D5 2E 7C E7 B9 D6 7D 25 D5 2E 7C A8 D5 2E 7C 64 D5 2E
+
+. 0 1000FA90 8
+. 7C 47 D5 2E 42 00 FE 84
+
+. 0 1000FA98 32
+. 3A 52 00 01 55 E0 F8 7E 7F 80 90 40 3A 10 00 02 3A 73 00 08 3A 94 00 08 3A B5 00 08 41 9D FE 2C
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 12, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 12, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 12
+PASS: gsl_fft_complex_forward with signal_noise, n = 12, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 12, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 12, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 12, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 12, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 12, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 12, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 12, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 12, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 12, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 12
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 12, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 12, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 12, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 12, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 12, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 12, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 12, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 12, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 12, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 12, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 12
+PASS: gsl_fft_real with signal_real_noise, n = 12, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 12, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 12, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 12, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 12
+PASS: gsl_fft_real_float with signal_real_noise, n = 12, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 12, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 12, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 12, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 12
+PASS: gsl_fft_complex_forward with signal_noise, n = 12, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 12, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 12, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 12, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 12, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 12, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 12, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 12, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 12, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 12, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 12
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 12, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 12, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 12, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 12, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 12, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 12, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 12, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 12, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 12, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 12, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 12
+PASS: gsl_fft_real with signal_real_noise, n = 12, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 12, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 12, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 12, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 12
+PASS: gsl_fft_real_float with signal_real_noise, n = 12, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 12, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 12, stride = 3
+. 0 FE0BE7C 36
+. 81 3E 0E 80 C8 69 00 00 FD A3 28 2A FC 0D 18 28 D9 A1 00 20 80 E1 00 20 81 01 00 24 FC 65 00 28 41 9D 00 08
+
+. 0 FE0BEA4 212
+. FC A3 00 F2 81 3E 0E BC 81 9E 0E C0 55 04 28 34 CB E9 00 00 C9 AC 00 00 81 5E 0E 7C FC E3 01 72 80 FE 0E C4 FC 85 6F FA 7C C4 52 14 C9 47 00 00 CB E6 00 10 C9 86 00 08 FD 23 50 2A FD 07 01 32 C8 86 00 18 80 BE 0E C8 FD 7F 50 2A 81 7E 0E CC C8 C5 00 00 FD 29 50 28 FD A8 61 3A C8 0B 00 00 80 7E 0E D0 FD 6B 50 28 FD 85 01 BA C8 03 00 00 FC FF 58 28 FC DF 6A 3A 7D A4 54 AE FD 47 20 2A FD 03 48 28 FF EC 01 7A FC EA 30 FA FC 8D 00 F2 FD 2B 02 72 FC C8 10 2A FC 7F 01 72 FC 0B 39 BA FD 44 00 B2 FC A9 68 2A FD 8D 00 F2 FD 0D 28 28 FF E0 50 28 FC E8 48 2A FC DF 60 28 FD 66 38 2A FD A5 58 2A FC 85 68 28 FC AB 20 2A FF 85 08 00 40 9D 00 68
+
+. 0 FE0BFB8 36
+. 83 E1 00 54 83 C1 00 30 7F E8 03 A6 CB A1 00 38 83 E1 00 34 CB C1 00 40 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 FE0E274 32
+. 81 7E 0F DC 80 DE 0F F0 C9 0B 00 00 C9 46 00 00 FC 0C 52 3A FD 81 00 2A FC 0C 08 00 41 82 FE FC
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 13, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 13
+PASS: gsl_fft_complex_forward with signal_noise, n = 13, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 13, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 13, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 13, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 13, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 13, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 13, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 13
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 13, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 13, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 13, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 13, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 13, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 13, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 13, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 13
+PASS: gsl_fft_real with signal_real_noise, n = 13, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 13, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 13, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 13, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 13
+PASS: gsl_fft_real_float with signal_real_noise, n = 13, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 13, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 13, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 13, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 13
+PASS: gsl_fft_complex_forward with signal_noise, n = 13, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 13, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 13, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 13, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 13, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 13, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 13, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 13, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 13, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 13, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 13
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 13, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 13, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 13, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 13, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 13, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 13, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 13, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 13, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 13, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 13, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 13
+PASS: gsl_fft_real with signal_real_noise, n = 13, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 13, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 13, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 13, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 13
+PASS: gsl_fft_real_float with signal_real_noise, n = 13, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 13, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 13, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 13, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 13
+PASS: gsl_fft_complex_forward with signal_noise, n = 13, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 13, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 13, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 13, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 13, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 13, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 13, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 13, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 13, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 13, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 13
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 13, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 13, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 13, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 13, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 13, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 13, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 13, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 13, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 13, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 13, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 13
+PASS: gsl_fft_real with signal_real_noise, n = 13, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 13, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 13, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 13, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 13
+PASS: gsl_fft_real_float with signal_real_noise, n = 13, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 13, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 13, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 14, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 14
+. 0 10008C74 12
+. 81 01 00 58 2F 88 00 00 41 BE FA 5C
+
+. 0 10008C80 4
+. 40 9A 00 80
+
+. 0 10008C84 124
+. 80 E1 01 3C 80 C1 01 38 C8 07 FF F0 C8 47 FF F8 D8 01 00 90 D8 41 00 98 C8 06 FF F0 C8 46 FF F8 80 C1 01 34 D8 01 00 A0 D8 41 00 A8 C8 06 FF F0 C8 46 FF F8 80 C1 01 30 D8 01 00 B0 D8 41 00 B8 C8 06 FF F0 C8 46 FF F8 80 C1 01 2C D8 01 00 C0 D8 41 00 C8 C8 06 FF F0 C8 46 FF F8 80 C1 01 28 D8 01 00 D0 D8 41 00 D8 C8 06 FF F0 C8 46 FF F8 D8 01 00 E0 D8 41 00 E8 4B FF FA 74
+
+. 0 10008770 8
+. 2F 90 00 00 41 9E 04 9C
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 14, stride = 1
+. 0 10008D00 148
+. 80 C1 01 3C 80 E1 01 38 81 01 01 34 81 21 01 30 81 41 01 2C 81 61 01 28 C8 06 FF F8 C9 A7 FF F8 FC 00 00 50 C9 88 FF F8 C9 69 FF F8 FD A0 68 50 C9 4A FF F8 FD 80 60 50 C9 2B FF F8 FD 60 58 50 FD 40 50 50 D8 01 00 98 FD 20 48 50 C8 06 FF F0 C8 47 FF F0 C8 68 FF F0 C8 89 FF F0 C8 AA FF F0 C8 CB FF F0 D9 A1 00 A8 D9 81 00 B8 D9 61 00 C8 D9 41 00 D8 D9 21 00 E8 D8 01 00 90 D8 41 00 A0 D8 61 00 B0 D8 81 00 C0 D8 A1 00 D0 D8 C1 00 E0 4B FF F9 E0
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 14, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 14, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 14, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 14, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 14, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 14, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 14
+. 0 1000BB28 8
+. 2F 8C 00 00 41 BE FA 54
+
+. 0 1000BB30 4
+. 40 9A 00 80
+
+. 0 1000BB34 124
+. 80 E1 00 EC C0 07 FF F8 C0 27 FF FC 80 E1 00 E8 D0 01 00 64 D0 21 00 68 C0 07 FF F8 C0 27 FF FC 80 E1 00 E4 D0 01 00 6C D0 21 00 70 C0 07 FF F8 C0 27 FF FC 80 E1 00 E0 D0 01 00 74 D0 21 00 78 C0 07 FF F8 C0 27 FF FC 80 E1 00 DC D0 01 00 7C D0 21 00 80 C0 07 FF F8 C0 27 FF FC 80 E1 00 D8 D0 01 00 84 D0 21 00 88 C0 07 FF F8 C0 27 FF FC D0 01 00 8C D0 21 00 90 4B FF FA 18
+
+. 0 1000B5C4 8
+. 2F 90 00 00 41 9E 05 04
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 14, stride = 1
+. 0 1000BBB0 160
+. 80 E1 00 EC 81 01 00 E8 C0 07 FF FC C1 A8 FF FC FC 00 00 50 80 E1 00 D8 81 01 00 EC FD A0 68 50 81 21 00 E4 81 41 00 E0 81 61 00 DC C1 27 FF FC D0 01 00 68 80 E1 00 E8 FD 20 48 50 C0 08 FF F8 C1 89 FF FC C1 6A FF FC C1 4B FF FC FD 80 60 50 D0 01 00 64 FD 60 58 50 C0 07 FF F8 FD 40 50 50 80 E1 00 D8 D0 01 00 6C C0 29 FF F8 C0 4A FF F8 C0 AB FF F8 C0 07 FF F8 D1 A1 00 70 D1 81 00 78 D1 61 00 80 D1 41 00 88 D1 21 00 90 D0 21 00 74 D0 41 00 7C D0 A1 00 84 D0 01 00 8C 4B FF F9 78
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 14, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 14, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 14, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 14, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 14, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 14, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 14
+. 0 10011954 80
+. 81 21 00 2C 7D 78 39 D6 7C E7 8A 14 7C 08 4A 14 7C 1D 01 D6 55 6B 18 38 7D 3D 41 D6 54 00 18 38 7D BF 04 AE 7D 08 2A 14 55 29 18 38 7C 18 51 D6 7C 09 FC AE 7D 4A 8A 14 FD 80 68 28 FC 00 68 2A 54 00 18 38 7C 0B CD AE 7D 99 05 AE 42 00 FF B4
+
+. 0 100110E4 20
+. 39 F4 00 01 3A 60 00 01 55 E0 F8 7E 2B 80 00 01 40 9D 0A A4
+
+. 0 10011B98 8
+. 72 80 00 01 40 A2 FC 18
+
+. 0 10011BA0 52
+. 3C 00 43 30 80 FE 82 6C 90 01 00 08 3A 40 00 00 93 61 00 0C C9 A7 00 00 C8 01 00 08 81 3E 82 7C FC 00 68 28 CB E9 00 00 FF FF 00 24 FC 20 F8 90 48 01 73 25
+
+. 0 10011BD4 12
+. FF 80 08 90 FC 20 F8 90 48 01 73 B9
+
+. 0 10011BE0 12
+. 7F 92 80 40 FC 20 08 50 40 BC FB CC
+
+. 0 10011BEC 40
+. 82 FE 82 74 56 6A 08 3C 82 DE 82 78 38 C0 00 00 38 A0 00 00 38 E0 00 00 C8 77 00 00 7F 87 D8 40 C8 96 00 00 40 9C 00 FC
+
+. 0 10011C14 32
+. 2F 87 00 00 C8 B6 00 00 C9 37 00 00 FC C0 28 90 FD 40 28 90 FC E0 48 90 FD 00 28 90 41 9E 00 14
+
+. 0 10011C44 12
+. 39 00 00 00 7F 88 D8 40 40 9C 00 74
+
+. 0 10011C50 36
+. 7D 25 52 14 7F 69 03 A6 39 29 FF FF 7C 1D 49 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 18 38 41 9E 00 28
+
+. 0 10011C98 40
+. 7C 1F 04 AE FD A8 00 32 FC 07 00 32 FD 6A 03 72 FD 8A 00 32 FC 09 58 38 FD A9 63 7A FC A5 00 2A FC C6 68 2A 42 00 FF A0
+
+. 0 10011C5C 24
+. 7C 1D 49 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 18 38 41 9E 00 28
+
+. 0 10011C74 76
+. FC 08 07 32 FD 68 00 72 FD A4 02 B2 FD 84 02 72 FC 07 00 7A FD 23 6A 78 FD 43 62 BA FC E7 5F 38 FD 00 00 90 7C 1F 04 AE FD A8 00 32 FC 07 00 32 FD 6A 03 72 FD 8A 00 32 FC 09 58 38 FD A9 63 7A FC A5 00 2A FC C6 68 2A 42 00 FF A0
+
+. 0 10011CC0 16
+. 38 07 00 01 7D 27 D8 50 7F 80 48 40 40 9C 00 AC
+
+. 0 10011CD0 60
+. 7D 27 A1 D6 7F 80 D8 40 7C 07 03 78 55 29 08 3C 7D 26 4A 14 7D 29 52 14 39 29 FF FF 55 2B 18 38 7D 38 49 D6 39 6B 00 08 7D 6B C1 D6 55 29 18 38 7C A9 CD AE 7C CB CD AE 41 9C FF 0C
+
+. 0 10011C34 28
+. FC 04 07 72 FD A3 07 72 FC 63 07 BA FC 84 6F B8 39 00 00 00 7F 88 D8 40 40 9C 00 74
+
+. 0 10011D78 8
+. 7F 80 48 00 41 9E 00 44
+
+. 0 10011DC0 44
+. 7D 27 A1 D6 7F 80 D8 40 7C 07 03 78 55 29 08 3C 7D 26 4A 14 7D 29 52 14 39 29 FF FF 7D 38 49 D6 55 29 18 38 7C A9 CD AE 4B FF FF 20
+
+. 0 10011D08 4
+. 41 9C FF 0C
+
+. 0 10011D80 64
+. 7D 29 A1 D6 7F 80 D8 40 FC 00 30 50 7C 07 03 78 55 29 08 3C 7D 26 4A 14 7D 2A 48 50 39 29 FF FF 55 2B 18 38 7D 38 49 D6 39 6B 00 08 7D 6B C1 D6 55 29 18 38 7C A9 CD AE 7C 0B CD AE 4B FF FF 4C
+
+. 0 10011D0C 20
+. 3A 52 00 01 7C A5 A2 14 7F 92 80 40 7C C6 8A 14 41 9C FE E4
+
+. 0 10011D20 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 F2 14
+
+. 0 10011D3C 4
+. 4B FF FA 94
+
+PASS: gsl_fft_real with signal_real_noise, n = 14, stride = 1
+. 0 1000C6C0 56
+. 7F 9C DA 14 92 A1 00 08 7C 1C BB 96 7F B9 C2 14 3A D6 00 01 3B 5A 00 10 7C 00 B9 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 C8 01
+
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 14, stride = 1
+. 0 1000D114 20
+. 38 1B 00 01 3B 00 00 01 54 00 F8 7E 2B 80 00 01 40 9D 00 F8
+
+. 0 1000D128 28
+. 7C 15 03 78 3B F0 00 10 3B 3B FF FF 2F 96 00 00 C8 DF FF F0 C8 FF FF F8 41 9E 00 C8
+
+. 0 1000D144 196
+. 57 09 08 3C 57 2B 08 3C 38 C9 FF FF 7E C9 03 A6 38 8B FF FF 7C C5 33 78 57 63 08 3C 54 A9 18 38 54 8B 18 38 39 29 00 08 39 6B 00 08 7D 29 E1 D6 81 81 00 2C 54 C8 18 38 7D 46 62 14 55 47 18 38 7D 6B E1 D6 7D 29 EC AE 39 08 00 08 38 E7 00 08 7D 3C 29 D6 7D 0B EC AE 7C A5 1A 14 FD 89 40 2A FD 29 40 28 7C 1C 21 D6 55 29 18 38 7D 69 EC AE FD 46 03 32 7C 84 1A 14 FD 87 03 32 54 00 18 38 7D 57 51 D6 7D BD 04 AE FC 0B 68 28 7C 17 31 D6 FD 6B 68 2A 55 4A 18 38 FD 47 50 3A 7C C6 DA 14 FC 06 60 38 7D 08 B9 D6 54 00 18 38 7D 7A 05 AE 7C E7 B9 D6 7D 28 D5 AE 7C 0A D5 AE 7D 47 D5 AE 42 00 FF 5C
+
+. 0 1000D208 20
+. 3B 18 00 01 3B 39 FF FF 7F 95 C0 40 3B FF 00 10 41 9D FF 1C
+
+. 0 1000D134 16
+. 2F 96 00 00 C8 DF FF F0 C8 FF FF F8 41 9E 00 C8
+
+. 0 1000D21C 8
+. 73 79 00 01 40 A2 FC 90
+
+. 0 1000CAD0 20
+. 38 60 00 00 C9 16 00 00 7F 83 F8 40 C9 25 00 00 40 9C 00 A0
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 14, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 14, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 14
+. 0 10013980 80
+. 81 21 00 2C 7D 78 39 D6 7C E7 8A 14 7C 08 4A 14 7C 1F 01 D6 55 6B 10 3A 7D 3F 41 D6 54 00 10 3A 7D BD 04 2E 7D 08 22 14 55 29 10 3A 7C 18 51 D6 7C 09 EC 2E 7D 4A 8A 14 ED 80 68 28 EC 00 68 2A 54 00 10 3A 7C 0B CD 2E 7D 99 05 2E 42 00 FF B4
+
+. 0 100130D4 20
+. 39 F4 00 01 3A 60 00 01 55 E0 F8 7E 2B 80 00 01 40 9D 0A E0
+
+. 0 10013BC4 8
+. 72 80 00 01 40 A2 FC 18
+
+. 0 10013BCC 52
+. 3C 00 43 30 80 FE 82 C8 90 01 00 08 3A 40 00 00 93 61 00 0C C9 A7 00 00 C8 01 00 08 81 3E 82 D8 FC 00 68 28 CB E9 00 00 FF FF 00 24 FC 20 F8 90 48 01 52 F9
+
+. 0 10013C00 12
+. FF 80 08 18 FC 20 F8 90 48 01 53 8D
+
+. 0 10013C0C 16
+. 7F 92 80 40 FC 20 08 18 FC 20 08 50 40 BC FB C8
+
+. 0 10013C1C 40
+. 82 FE 82 D0 56 6A 08 3C 82 DE 82 D4 38 C0 00 00 38 A0 00 00 38 E0 00 00 C0 77 00 00 7F 87 D8 40 C0 96 00 00 40 9C 00 FC
+
+. 0 10013C44 32
+. 2F 87 00 00 C0 B6 00 00 C1 17 00 00 FC C0 28 90 FD 40 28 90 FC E0 40 90 FD 20 28 90 41 9E 00 14
+
+. 0 10013C74 12
+. 39 00 00 00 7F 88 D8 40 40 9C 00 74
+
+. 0 10013C80 36
+. 7D 25 52 14 7F 69 03 A6 39 29 FF FF 7C 1F 49 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 10 3A 41 9E 00 28
+
+. 0 10013CC8 40
+. 7C 1D 04 2E ED A7 00 32 EC 09 00 32 ED 6A 03 72 ED 8A 00 32 EC 08 58 3A ED A8 63 78 EC C6 00 2A EC A5 68 2A 42 00 FF A0
+
+. 0 10013C8C 24
+. 7C 1F 49 D6 2F 88 00 00 7D 29 D2 14 39 08 00 01 54 00 10 3A 41 9E 00 28
+
+. 0 10013CA4 76
+. EC 09 07 32 ED 89 00 72 ED 64 02 B2 EC 07 00 7A ED A4 02 32 EC E7 67 38 ED 03 5A 38 ED 43 6A BA FD 20 00 90 7C 1D 04 2E ED A7 00 32 EC 09 00 32 ED 6A 03 72 ED 8A 00 32 EC 08 58 3A ED A8 63 78 EC C6 00 2A EC A5 68 2A 42 00 FF A0
+
+. 0 10013CF0 16
+. 38 07 00 01 7D 27 D8 50 7F 80 48 40 40 9C 00 AC
+
+. 0 10013D00 60
+. 7D 27 A1 D6 7F 80 D8 40 7C 07 03 78 55 29 08 3C 7D 26 4A 14 7D 29 52 14 39 29 FF FF 55 2B 10 3A 7D 38 49 D6 39 6B 00 04 7D 6B C1 D6 55 29 10 3A 7C A9 CD 2E 7C CB CD 2E 41 9C FF 0C
+
+. 0 10013C64 28
+. EC 04 07 72 ED A3 07 72 EC 63 07 BA EC 84 6F B8 39 00 00 00 7F 88 D8 40 40 9C 00 74
+
+. 0 10013DA8 8
+. 7F 80 48 00 41 9E 00 44
+
+. 0 10013DF0 44
+. 7D 27 A1 D6 7F 80 D8 40 7C 07 03 78 55 29 08 3C 7D 26 4A 14 7D 29 52 14 39 29 FF FF 7D 38 49 D6 55 29 10 3A 7C A9 CD 2E 4B FF FF 20
+
+. 0 10013D38 4
+. 41 9C FF 0C
+
+. 0 10013DB0 64
+. 7D 29 A1 D6 7F 80 D8 40 FC 00 30 50 7C 07 03 78 55 29 08 3C 7D 26 4A 14 7D 2A 48 50 39 29 FF FF 55 2B 10 3A 7D 38 49 D6 39 6B 00 04 7D 6B C1 D6 55 29 10 3A 7C A9 CD 2E 7C 0B CD 2E 4B FF FF 4C
+
+. 0 10013D3C 20
+. 3A 52 00 01 7C A5 A2 14 7F 92 80 40 7C C6 8A 14 41 9C FE E4
+
+. 0 10013D50 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 F1 D4
+
+. 0 10013D6C 4
+. 4B FF FA 90
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 14, stride = 1
+. 0 1000E880 56
+. 7F 9C DA 14 92 A1 00 08 7C 1C BB 96 7F B9 C2 14 3A D6 00 01 3B 5A 00 08 7C 00 B9 D6 7F 80 E0 50 93 81 00 0C CB E1 00 08 FF FF F0 28 FF FF 07 72 FC 20 F8 90 48 01 A6 41
+
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 14, stride = 1
+. 0 1000F2E0 20
+. 38 1B 00 01 3B 00 00 01 54 00 F8 7E 2B 80 00 01 40 9D 00 F8
+
+. 0 1000F2F4 28
+. 7C 15 03 78 3B B0 00 08 3B 3B FF FF 2F 96 00 00 C0 DD FF F8 C0 FD FF FC 41 9E 00 C8
+
+. 0 1000F310 196
+. 57 09 08 3C 57 2B 08 3C 38 C9 FF FF 7E C9 03 A6 38 8B FF FF 7C C5 33 78 57 63 08 3C 54 A9 10 3A 54 8B 10 3A 39 29 00 04 39 6B 00 04 7D 29 E1 D6 81 81 00 2C 54 C8 10 3A 7D 46 62 14 55 47 10 3A 7D 6B E1 D6 7D 29 FC 2E 39 08 00 04 38 E7 00 04 7D 3C 29 D6 7D 0B FC 2E 7C A5 1A 14 EC 09 40 2A ED 29 40 28 7C 1C 21 D6 55 29 10 3A 7D A9 FC 2E ED 46 00 32 EC 07 00 32 7C 84 1A 14 54 00 10 3A 7D 57 51 D6 7D 9F 04 2E ED 6D 60 28 7C 17 31 D6 ED AD 60 2A 55 4A 10 3A ED 47 52 FA 7C C6 DA 14 ED 66 02 F8 7D 08 B9 D6 54 00 10 3A 7D BA 05 2E 7C E7 B9 D6 7D 28 D5 2E 7D 6A D5 2E 7D 47 D5 2E 42 00 FF 5C
+
+. 0 1000F3D4 20
+. 3B 18 00 01 3B 39 FF FF 7F 95 C0 40 3B BD 00 08 41 9D FF 1C
+
+. 0 1000F300 16
+. 2F 96 00 00 C0 DD FF F8 C0 FD FF FC 41 9E 00 C8
+
+. 0 1000F3E8 8
+. 73 79 00 01 40 A2 FC 90
+
+. 0 1000EC98 20
+. 38 60 00 00 C1 16 00 00 7F 83 E8 40 C1 25 00 00 40 9C 00 A0
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 14, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 14, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 14
+PASS: gsl_fft_complex_forward with signal_noise, n = 14, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 14, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 14, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 14, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 14, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 14, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 14, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 14, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 14, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 14, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 14
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 14, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 14, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 14, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 14, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 14, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 14, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 14, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 14, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 14, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 14, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 14
+PASS: gsl_fft_real with signal_real_noise, n = 14, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 14, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 14, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 14, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 14
+PASS: gsl_fft_real_float with signal_real_noise, n = 14, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 14, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 14, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 14, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 14
+PASS: gsl_fft_complex_forward with signal_noise, n = 14, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 14, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 14, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 14, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 14, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 14, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 14, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 14, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 14, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 14, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 14
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 14, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 14, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 14, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 14, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 14, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 14, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 14, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 14, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 14, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 14, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 14
+PASS: gsl_fft_real with signal_real_noise, n = 14, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 14, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 14, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 14, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 14
+PASS: gsl_fft_real_float with signal_real_noise, n = 14, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 14, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 14, stride = 3
+. 0 FE0CB98 20
+. 83 1E 0F 64 FD A0 F8 90 CB 98 00 00 FE 1F E0 00 40 91 07 50
+
+. 0 FE0D2F8 8
+. FD A0 F8 50 4B FF F8 B0
+
+. 0 FE0CBAC 192
+. C8 26 00 00 83 7E 0F 84 FD 01 68 2A 80 DE 0F 88 C8 9B 00 00 FD 48 08 28 D9 01 00 30 81 61 00 30 81 81 00 34 FD 6D 50 28 55 99 28 34 7F 59 52 14 7D 59 54 AE C8 FA 00 10 FD 0B 02 F2 C8 5A 00 18 C8 BA 00 08 FC 07 20 2A FC CB 20 2A FC 68 1F BA FD 2B 02 32 FC 20 20 28 FD A8 D6 FA FD 89 00 F2 FD 27 08 28 FC 66 20 28 FF CC 28 BA FD AD EA 3A FF 67 F3 3A FF 49 10 2A FC A1 00 F2 FD 9A DA FA CB 66 00 00 FC 8D 02 32 FC 4B 18 28 FF C5 50 2A FC EA 01 32 FC C1 60 BA FD 0A F0 28 FC 06 38 28 FD 68 28 2A FD 20 58 2A FC 3E 48 2A FF BE 08 28 FF 49 E8 2A FC 7A 0E FA FC 83 08 00 41 86 06 98
+
+. 0 FE0D300 4
+. 41 B1 F7 58
+
+. 0 FE0D304 8
+. FC 20 08 50 4B FF FA B8
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 15, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 15
+. 0 FE0DBA4 20
+. FF 1F E8 00 81 3E 0F 88 CB A9 00 00 FC 84 07 72 40 99 03 30
+
+. 0 FE0DEE4 8
+. 81 3E 0F B8 4B FF FC D4
+
+. 0 FE0DBBC 20
+. C8 A9 00 00 FC 1F 21 7A FC C1 00 2A FE 06 08 00 41 B2 F5 78
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 15, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 15, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 15, stride = 1
+. 0 FE0EAC4 20
+. CB C6 00 00 FF A0 20 90 FC E0 10 90 FF 82 F0 00 41 9D 00 08
+
+. 0 FE0EADC 32
+. C8 AA 00 00 FD A5 38 2A FD 8D 28 28 D9 A1 00 30 81 41 00 30 81 61 00 34 FC 27 60 28 41 9D 00 08
+
+. 0 FE0EB00 188
+. FD 21 00 72 C8 C5 00 00 C8 84 00 00 C9 07 00 00 55 67 28 34 81 5E 0F E0 7C A7 42 14 FC A9 21 BA C8 9D 00 00 C9 4A 00 00 C8 C3 00 00 FC 61 50 2A FD A5 42 7A 7C A7 44 AE FD 03 50 28 C8 65 00 08 FD 85 50 2A FD 6D 02 72 FD 8C 50 28 FC E1 40 28 FC 01 5F 7A C9 65 00 10 FC E7 E8 2A FD 45 60 28 FD AB 00 32 C8 05 00 18 FC C9 21 BA FD 21 02 72 FD 0C 02 32 FD 4A 18 2A FD 8C 01 F2 FC 80 68 28 FD A9 01 B2 FC 6A 00 72 FC E4 60 28 FC 8B 40 28 FC 25 03 72 FD 6B 20 28 FC 07 18 28 FD 2B 40 28 FD 40 08 28 FC CA 48 2A FC 24 30 2A FD 04 08 28 FC 86 40 2A FF 84 F0 00 40 9D 01 D4
+
+. 0 FE0ED8C 20
+. 83 BE 0F E4 FC 9F F0 00 C8 1D 00 00 FC 84 00 32 40 85 00 F0
+
+. 0 FE0EE8C 8
+. 81 3E 10 18 4B FF FF 14
+
+. 0 FE0EDA4 16
+. C9 69 00 00 FC FF 02 F2 FC 04 38 28 4B FF FE 2C
+
+. 0 FE0EBDC 12
+. FD 81 00 2A FF 0C 08 00 41 9A 00 60
+
+. 0 FE0EC44 8
+. 2F 9F 00 00 41 BE F5 44
+
+PASS: gsl_fft_complex_forward with signal_pulse, n = 15, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 15, stride = 1
+. 0 FE0EDA0 20
+. 81 3E 10 10 C9 69 00 00 FC FF 02 F2 FC 04 38 28 4B FF FE 2C
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 15, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 15, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 15
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 15, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 15, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 15, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 15, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 15, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 15, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 15, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 15
+. 0 10011F00 32
+. 2F 95 00 00 C8 1C FF F8 C9 BA FF F8 FC 80 00 50 C8 BC FF F0 FC 40 68 50 C8 7A FF F0 41 9E 01 50
+
+PASS: gsl_fft_real with signal_real_noise, n = 15, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 15, stride = 1
+. 0 1000DB6C 60
+. 39 D8 00 10 3A 35 00 10 39 F6 00 10 3A 10 00 10 57 78 08 3C 2F 93 00 00 CA F0 FF F0 CA D0 FF F8 CA AE FF F0 CA 8E FF F8 CA 6F FF F0 CA 4F FF F8 CA 31 FF F0 CA 11 FF F8 41 9E 02 64
+
+. 0 1000DBA8 400
+. 81 3E 81 80 56 96 08 3C 81 5E 81 7C 3B F6 FF FF C9 E9 00 00 3A A0 00 00 C9 CA 00 00 7E 69 03 A6 1C F5 00 05 7C 7F CA 14 7E B5 DA 14 7D 76 38 50 7C E7 B2 14 7D 6B C2 14 38 E7 FF FF 39 6B FF FF 7C 18 3A 14 7C D8 5A 14 55 6A 18 38 54 C9 18 38 39 4A 00 08 39 29 00 08 7D 18 02 14 7D 29 E1 D6 54 05 18 38 55 04 18 38 38 A5 00 08 38 84 00 08 7D 4A E1 D6 7D 09 EC AE 54 E9 18 38 FD 00 40 50 39 29 00 08 7C 1C 01 D6 7C 8A EC AE 7D 43 CA 14 FC 80 20 50 7D 1C 41 D6 54 00 18 38 7C FD 04 AE 7C 0A CA 14 7C DC 31 D6 55 08 18 38 7D 48 EC AE 7D 00 CA 14 7D 7C 59 D6 54 C6 18 38 7C A6 EC AE 55 46 18 38 38 C6 00 08 FC CA 28 2A 7C A5 E1 D6 55 6B 18 38 7C 6B EC AE FD 4A 28 28 57 EB 18 38 FD A7 18 2A 39 6B 00 08 7C 84 E1 D6 7D 65 EC AE FC B9 02 B2 54 05 18 38 38 A5 00 08 FD 8B 20 2A 7C 04 EC AE FF 6D 30 2A 7C FC 39 D6 FD AD 30 28 FD 20 40 2A 55 04 18 38 FC 00 40 28 38 84 00 08 FD 1B 03 B2 7D 29 E1 D6 54 E7 18 38 FF 4C 48 2A 7C 47 EC AE FD 8C 48 28 54 67 18 38 FC D9 00 32 38 E7 00 08 7C 29 EC AE 7C 77 19 D6 FD 3A 03 B2 FD 58 02 B2 7D 37 F9 D6 54 63 18 38 FC 18 00 32 7F FF DA 14 FC E7 18 28 FD 6B 20 28 7D 57 51 D6 FD AD 03 F2 55 29 18 38 FD 8C 03 F2 7C 17 01 D6 55 4A 18 38 FD 02 40 28 FD 21 48 28 FC B8 29 F8
+
+. 0 1000DD38 208
+. 7D 17 41 D6 54 00 18 38 FC D8 32 F8 FC F9 51 FA 7D 6B B9 D6 55 08 18 38 FD 79 02 FA FD 49 60 28 FC 08 68 28 7C E7 B9 D6 FD 08 68 2A FD 29 60 2A FD A0 30 2A FD 8A 28 28 7C C6 B9 D6 FC 88 58 2A FC 69 38 28 FD 08 58 28 FD 29 38 2A 7C A5 B9 D6 FC 00 30 28 FD 4A 28 2A FF F6 02 32 7C 84 B9 D6 FF D4 00 32 FF B2 03 72 FF 90 01 32 FD 76 02 72 FC F4 02 B2 FC D2 03 32 FC B0 00 F2 FD 37 FA 7A FD 55 F2 BA FD 93 EB 3A FC 71 E0 FA FD 17 5A 38 FC 15 38 38 FD B3 33 78 FC 91 29 38 FC 42 D8 2A FC 21 D0 2A 7C 49 D5 AE 7C 2B D5 AE 7D 03 D5 AE 7D 27 D5 AE 7C 0A D5 AE 7D 46 D5 AE 7D BA 05 AE 7D 85 D5 AE 7C 88 D5 AE 7C 64 D5 AE 42 00 FD C4
+
+. 0 1000DE08 32
+. 3A 94 00 01 56 40 F8 7E 7F 80 A0 40 3A 10 00 10 39 CE 00 10 39 EF 00 10 3A 31 00 10 41 9D FD 5C
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 15, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 15, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 15
+. 0 10013F48 32
+. 2F 95 00 00 C0 1C FF FC C1 BA FF FC FC 80 00 50 C0 BC FF F8 FC 40 68 50 C0 7A FF F8 41 9E 01 58
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 15, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 15, stride = 1
+. 0 1000FD68 60
+. 39 96 00 08 39 F5 00 08 39 D9 00 08 3A 10 00 08 57 76 08 3C 2F 92 00 00 C2 F0 FF F8 C2 D0 FF FC C2 AC FF F8 C2 8C FF FC C2 6E FF F8 C2 4E FF FC C2 2F FF F8 C2 0F FF FC 41 9E 02 84
+
+. 0 1000FDA4 400
+. 81 3E 81 F8 56 75 08 3C 81 5E 81 F4 3B 35 FF FF C9 E9 00 00 3A 80 00 00 C9 CA 00 00 7E 49 03 A6 1C F4 00 05 7F B9 C2 14 7C 7D C2 14 7E 94 DA 14 7D 75 38 50 7C E7 AA 14 7D 6B B2 14 38 E7 FF FF 39 6B FF FF 7C 16 3A 14 7C D6 5A 14 55 6A 10 3A 54 C9 10 3A 39 4A 00 04 39 29 00 04 7D 16 02 14 7D 29 E1 D6 54 05 10 3A 55 04 10 3A 38 A5 00 04 38 84 00 04 7D 4A E1 D6 7C 89 FC 2E 54 E9 10 3A FC 80 20 50 39 29 00 04 7C 1C 01 D6 7C 6A FC 2E FC 60 18 50 7D 1C 41 D6 54 00 10 3A 7C BF 04 2E 7C DC 31 D6 55 08 10 3A 7C C8 FC 2E 7D 7C 59 D6 54 C6 10 3A 7F E6 FC 2E 57 A6 10 3A 38 C6 00 04 ED 66 F8 2A 7C A5 E1 D6 55 6B 10 3A 7F CB FC 2E EC C6 F8 28 7D 63 C2 14 EC 05 F0 2A 55 6A 10 3A 7C 84 E1 D6 7C E5 FC 2E EC A5 F0 28 54 65 10 3A ED A7 18 2A 38 A5 00 04 EF 60 58 2A 39 4A 00 04 7D 04 FC 2E 7C FC 39 D6 EC 00 58 28 ED 88 20 2A FD 40 D8 90 7D 29 E1 D6 54 E7 10 3A EF 4D 60 2A 7C 47 FC 2E ED AD 60 28 7C EB C2 14 FD 4A 03 B2 54 E8 10 3A 7C 29 FC 2E 57 29 10 3A FD 20 D0 90 7C 17 C9 D6 39 29 00 04 39 08 00 04 FD 29 03 B2 7F 39 DA 14 7F B7 E9 D6 54 00 10 3A FD 80 10 90 FD 60 08 90 FC 00 03 F2 7C 77 19 D6 57 BD 10 3A FD AD 03 F2 ED 08 20 28 FD 8C 50 28 7D 77 59 D6 FD 6B 48 28 54 63 10 3A EC E7 18 28 FC 00 00 18
+
+. 0 1000FF34 240
+. FD A0 68 18 7C F7 39 D6 FD 80 60 18 55 6B 10 3A FD 60 58 18 ED 39 01 B2 ED 59 02 32 7D 29 B9 D6 EC D8 01 B2 54 E7 10 3A ED 18 02 32 ED 38 49 78 ED 58 51 F8 7C C6 B9 D6 EC 8C 00 28 EC 6B 68 28 EC B9 31 7A EC F9 41 FA 7C A5 B9 D6 ED 8C 00 2A ED 6B 68 2A EC 04 50 2A ED A3 48 28 7D 4A B9 D6 ED 0C 38 2A EC CB 28 28 ED 8C 38 28 ED 6B 28 2A 7D 08 B9 D6 EC 84 50 28 EC 63 48 2A ED 56 03 32 ED 34 01 32 EC F2 00 32 EC B0 02 32 EF F6 02 F2 EF D4 00 F2 EF B2 03 72 EF 90 01 B2 EC 42 D8 2A EC 21 D0 2A ED 77 52 FA EC 75 48 FA 7C 5A 05 2E ED B3 3B 7A 7C 29 D5 2E EC D1 29 BA ED 97 FB 38 EC 95 F1 38 EC 13 E8 38 ED 11 E2 38 7D 9D D5 2E 7D 66 D5 2E 7C 83 D5 2E 7C 65 D5 2E 7C 0B D5 2E 7D AA D5 2E 7D 07 D5 2E 7C C8 D5 2E 42 00 FD A4
+
+. 0 10010024 32
+. 3A 73 00 01 56 20 F8 7E 7F 80 98 40 3A 10 00 08 39 8C 00 08 39 CE 00 08 39 EF 00 08 41 9D FD 3C
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 15, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 15, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 15
+PASS: gsl_fft_complex_forward with signal_noise, n = 15, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 15, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 15, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 15, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 15, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 15, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 15, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 15, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 15, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 15, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 15
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 15, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 15, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 15, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 15, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 15, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 15, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 15, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 15, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 15, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 15, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 15
+PASS: gsl_fft_real with signal_real_noise, n = 15, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 15, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 15, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 15, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 15
+PASS: gsl_fft_real_float with signal_real_noise, n = 15, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 15, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 15, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 15, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 15
+PASS: gsl_fft_complex_forward with signal_noise, n = 15, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 15, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 15, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 15, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 15, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 15, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 15, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 15, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 15, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 15, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 15
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 15, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 15, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 15, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 15, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 15, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 15, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 15, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 15, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 15, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 15, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 15
+PASS: gsl_fft_real with signal_real_noise, n = 15, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 15, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 15, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 15, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 15
+PASS: gsl_fft_real_float with signal_real_noise, n = 15, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 15, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 15, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 16, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 16
+. 0 10007A84 316
+. 81 41 01 84 7D 70 EA 14 7D 6B C9 D6 7D 24 FA 14 7C 1D 52 14 C9 29 00 08 7D 54 EA 14 7D 84 FC AE 7C C5 D2 14 7C 00 C9 D6 55 6B 20 36 7D 2B FA 14 7C CB FC AE C8 49 00 08 7D 73 1A 14 3B BD 00 01 54 00 20 36 7D 4A C9 D6 7D 00 FA 14 7C 7F 04 AE C8 88 00 08 7C 15 1A 14 FD 43 30 28 7F 7B CA 14 FD 64 10 28 55 4A 20 36 7D 2A FA 14 7D 0A FC AE C8 E9 00 08 FD 5D 02 B2 7D 23 E2 14 7D 6B C1 D6 FD 7D 02 F2 7C 84 B2 14 38 63 00 01 FC 0C 40 28 FD A9 38 28 7D 29 C1 D6 FC 63 30 2A 55 6B 20 36 FC 84 10 2A 7D 0B D2 14 FD 8C 40 2A FD 29 38 2A 7C 00 C1 D6 FC A0 58 2A 55 29 20 36 FC CD 50 28 7D 49 D2 14 FD 0C 18 28 FC E9 20 28 54 00 20 36 FD AD 50 2A 7C E0 D2 14 FC 00 58 28 FF D9 02 32 FD 77 00 32 FD 5A 01 72 FC 57 03 72 FC 39 01 F2 FF FA 01 B2 FD B8 5B 7A FC FB F1 FA FC DC 51 BA FC 18 10 38 FD 1B 0A 38 FC BC F9 78 FD 8C 18 2A FD 29 20 2A 7D 85 D5 AE 7C A5 BA 14 D9 26 00 08 7C 09 D5 AE D9 AA 00 08 7D 0B D5 AE D8 E8 00 08 7C BA 05 AE D8 C7 00 08 42 00 FE C8
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 16, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 16, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 16, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 16, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 16
+. 0 1000A8D0 316
+. 81 21 01 40 7D 70 EA 14 7D 6B C9 D6 7D 54 EA 14 7C 1D 4A 14 7D A4 FC 2E 7D 24 FA 14 C0 E9 00 04 3B BD 00 01 7C 00 C9 D6 55 6B 18 38 7D 2B FA 14 7C AB FC 2E C0 89 00 04 7D 75 1A 14 7F 7B CA 14 7D 4A C9 D6 54 00 18 38 7D 00 FA 14 7C 7F 04 2E C0 C8 00 04 7C 03 E2 14 ED 43 28 28 7C 84 B2 14 55 4A 18 38 EC 06 20 28 7D 2A FA 14 7D 2A FC 2E C1 09 00 04 ED 5D 02 B2 ED 8D 48 28 7D 33 1A 14 ED 67 40 28 7C 00 C1 D6 EC 1D 00 32 7D 45 D2 14 ED AD 48 2A 38 63 00 01 EC E7 40 2A EC 63 28 2A 7D 29 C1 D6 EC C6 20 2A 54 00 18 38 EC 4C 00 2A 7D 00 D2 14 EC 8B 50 28 ED 0D 18 28 7D 6B C1 D6 EC A7 30 28 55 29 18 38 ED 8C 00 28 7C E9 D2 14 ED 6B 50 2A EF FA 02 32 55 6B 18 38 EC 37 03 32 7C CB D2 14 EF D9 00 B2 EC 17 02 F2 ED 5A 01 72 ED 39 01 32 ED 78 0A FA EC BB F9 7A EC 9C F1 3A ED 98 03 38 ED 1B 52 38 EC 5C 48 B8 ED AD 18 2A EC E7 30 2A 7D A5 D5 2E 7C A5 BA 14 D0 EA 00 04 7D 9A 05 2E D1 68 00 04 7D 09 D5 2E D0 A7 00 04 7C 4B D5 2E D0 86 00 04 42 00 FE C8
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 16, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 16, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 16, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 16, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 16
+. 0 1001222C 20
+. 3A 1A 00 01 3A 60 00 01 56 00 F8 7E 2B 80 00 01 40 9D 01 FC
+
+. 0 10012240 60
+. 3A D5 00 10 3A 9C 00 10 3A B7 00 10 57 57 08 3C 2F 92 00 00 C8 14 FF F8 C9 B5 FF F8 C9 96 FF F8 FC 20 00 50 FF C0 68 50 C8 54 FF F0 FF 80 60 50 CB F5 FF F0 CB B6 FF F0 41 9E 01 A4
+
+. 0 1001227C 400
+. 56 67 08 3C 7D 33 D0 50 55 29 08 3C 3B 87 FF FF 7E 49 03 A6 38 69 FF FF 7F 9B E3 78 7C 1B 72 14 57 66 18 38 7D 60 72 14 54 09 18 38 7D 0B 72 14 39 29 00 08 55 0A 18 38 7D 29 E9 D6 39 4A 00 08 55 67 18 38 38 E7 00 08 7D 4A E9 D6 7D 49 FC AE 38 C6 00 08 FD 21 02 B2 7C B7 E2 14 7C 97 1A 14 7C 1D 01 D6 7C AA FC AE 54 69 18 38 FD 7C 01 72 57 8A 18 38 39 4A 00 08 7D 7D 59 D6 54 00 18 38 7D 9F 04 AE 39 29 00 08 FC 01 03 32 7D 1D 41 D6 55 6B 18 38 7C EB FC AE FD 82 4B 38 FC DE 01 F2 7C E7 E9 D6 55 08 18 38 7D 08 FC AE FD 42 02 BA 54 88 18 38 FC 9C 02 32 39 08 00 08 7D A7 FC AE 7C C6 E9 D6 FD 1D 5A 38 54 A7 18 38 38 E7 00 08 FD 3E 03 72 7C 1D D9 D6 7D 66 FC AE FD BF 33 7A 7F 7B D2 14 FC BD 21 7A 54 00 18 38 7D 78 19 D6 FC CC 40 28 7C 1F 04 AE FC FF 49 F8 7C 63 8A 14 FC 8B 68 28 7C 18 E1 D6 FD 2A 28 28 55 6B 18 38 FD 6B 68 2A 7F 9C 8A 14 FC C0 30 50 FD 4A 28 2A 7C B8 29 D6 FD A0 38 28 54 00 18 38 FC 00 38 2A FC 64 30 28 FC AB 50 28 7C 98 21 D6 FD 20 48 50 54 A5 18 38 FD 8C 40 2A FC A0 28 50 7D 4A C1 D6 FC ED 48 2A FD 00 60 28 54 84 18 38 FC 60 18 50 FC 00 60 2A 7C E7 C1 D6 FD 6B 50 2A FD AD 48 28 FC 84 30 2A 7C 19 05 AE 7D 6A CD AE 7D 08 C1 D6 7D A5 CD AE 7C 87 CD AE 7D 04 CD AE 7D 29 C1 D6
+
+. 0 1001240C 16
+. 7C A8 CD AE 7C EB CD AE 7C 69 CD AE 42 00 FE 80
+
+. 0 1001241C 28
+. 3A 73 00 01 56 00 F8 7E 7F 80 98 40 3A 94 00 10 3A B5 00 10 3A D6 00 10 41 9D FE 1C
+
+. 0 10012438 8
+. 73 40 00 01 40 A2 F3 78
+
+. 0 10012440 8
+. 2F 92 00 00 41 BE F3 70
+
+. 0 10012448 196
+. 56 67 08 3C 7E 49 03 A6 38 C7 FF FF 80 FE 82 58 57 44 08 3C 38 BA FF FF C9 07 00 00 7C 05 72 14 7D 44 32 14 7D 60 72 14 7C 1D 01 D6 7D 2B 72 14 55 48 18 38 54 C7 18 38 7D 3D 49 D6 54 00 18 38 7C 1F 04 AE 38 E7 00 08 39 08 00 08 7D 7D 59 D6 55 29 18 38 7D A9 FC AE FD 80 68 2A 7C 1D 29 D6 FC 00 68 28 55 6B 18 38 7D 4B FC AE FD 8C 02 32 7C A5 D2 14 FC 00 02 32 54 00 18 38 7D BF 04 AE 7D 58 51 D6 FD 60 50 50 FD 4A 60 28 FD 2D 00 28 7C 18 31 D6 FD 6B 60 28 FD AD 00 2A 55 4A 18 38 7C C6 8A 14 7C E7 C1 D6 54 00 18 38 7D B9 05 AE 7D 08 C1 D6 7D 67 CD AE 7D 2A CD AE 7D 48 CD AE 42 00 FF 5C
+
+. 0 1001250C 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 EA 28
+
+. 0 10012528 4
+. 4B FF F2 A8
+
+PASS: gsl_fft_real with signal_real_noise, n = 16, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 16, stride = 1
+. 0 1000D620 172
+. 54 A0 10 3A 81 81 00 30 7D 23 02 14 7C 1C 01 D6 39 29 FF FF 55 2A 18 38 7D 63 4A 14 39 4A 00 08 7D 7C 59 D6 54 00 18 38 7D 9D 04 AE 7D 05 62 14 7C E8 62 14 7D 3C 49 D6 55 6B 18 38 7D 4B EC AE 7C C7 62 14 FD 6C 50 28 7D 4A E1 D6 55 29 18 38 7C 09 EC AE FD 8C 50 2A FC 00 00 2A 7D AA EC AE 7C 17 29 D6 7C A5 DA 14 FD AD 68 2A FD 4C 00 28 7D 17 41 D6 FD 8C 00 2A FD 2B 68 2A 54 00 18 38 FD 6B 68 28 7D 9A 05 AE 7C F7 39 D6 55 08 18 38 7D 68 D5 AE 7C D7 31 D6 54 E7 18 38 7D 47 D5 AE 54 C6 18 38 7D 26 D5 AE 42 00 FF 58
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 16, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 16, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 16
+. 0 10014288 20
+. 3A 1A 00 01 3A 60 00 01 56 00 F8 7E 2B 80 00 01 40 9D 01 FC
+
+. 0 1001429C 60
+. 3A B7 00 08 3A DB 00 08 3A 9C 00 08 57 57 08 3C 2F 92 00 00 C0 14 FF FC C1 B5 FF FC C1 96 FF FC FC 20 00 50 FF C0 68 50 C0 54 FF F8 FF 80 60 50 C3 F5 FF F8 C3 B6 FF F8 41 9E 01 A4
+
+. 0 100142D8 400
+. 56 68 08 3C 7D 33 D0 50 55 29 08 3C 3B 88 FF FF 7E 49 03 A6 38 69 FF FF 7F 9B E3 78 7C 1B 72 14 57 66 10 3A 7D 60 72 14 54 09 10 3A 7D 0B 72 14 39 29 00 04 55 0A 10 3A 7D 29 F9 D6 39 4A 00 04 55 67 10 3A 38 E7 00 04 7D 4A F9 D6 7D 49 EC 2E 38 C6 00 04 ED 21 02 B2 7C B7 E2 14 7C 97 1A 14 7C 1F 01 D6 7C EA EC 2E 57 89 10 3A EC 9C 01 F2 54 6A 10 3A 39 29 00 04 7D 7F 59 D6 54 00 10 3A 7D 7D 04 2E 39 4A 00 04 ED 22 4A F8 7D 1F 41 D6 55 6B 10 3A 7D 0B EC 2E ED 61 02 F2 54 AB 10 3A ED 9E 02 32 39 6B 00 04 7C E7 F9 D6 55 08 10 3A 7C 08 EC 2E ED 42 5A BA 54 88 10 3A EC 9D 20 38 39 08 00 04 EC 1C 00 32 7C C6 F9 D6 7D A7 EC 2E EC A9 20 28 ED 9F 63 7A 7C 1F D9 D6 EC FD 01 FA ED BE 03 72 7D 66 EC 2E FC A0 28 50 7F 7B D2 14 EC 6B 60 28 EC CA 38 28 54 00 10 3A ED 1F 6A 38 7C 1D 04 2E ED 4A 38 2A 7C 18 E1 D6 ED 6B 60 2A 7F 9C 8A 14 ED A0 40 28 ED 29 20 2A EC E3 28 28 7C F8 19 D6 ED 8B 50 28 54 00 10 3A FC C0 30 50 7C 63 8A 14 EC 00 40 2A 7C B8 29 D6 FD 80 60 50 EC 8D 30 2A 54 E7 10 3A ED 00 48 28 FC E0 38 50 7C 98 21 D6 EC 00 48 2A ED 6B 50 2A 54 A5 10 3A ED AD 30 28 EC 63 28 2A 7C 19 05 2E 7D 29 C1 D6 54 84 10 3A 7D 6B C1 D6 7D 69 CD 2E 7D A5 CD 2E 7D 08 C1 D6 7C 6B CD 2E 7D 04 CD 2E 7D 4A C1 D6
+
+. 0 10014468 16
+. 7D 88 CD 2E 7C 87 CD 2E 7C EA CD 2E 42 00 FE 80
+
+. 0 10014478 28
+. 3A 73 00 01 56 00 F8 7E 7F 80 98 40 3A 94 00 08 3A B5 00 08 3A D6 00 08 41 9D FE 1C
+
+. 0 10014494 8
+. 73 48 00 01 40 A2 F3 48
+
+. 0 1001449C 8
+. 2F 92 00 00 41 BE F3 40
+
+. 0 100144A4 204
+. 81 3E 82 B4 56 68 08 3C 38 C8 FF FF 7E 49 03 A6 57 44 08 3C C9 09 00 00 38 BA FF FF 7C 05 72 14 7D 44 32 14 7D 60 72 14 7C 1F 01 D6 7D 2B 72 14 55 47 10 3A 54 C8 10 3A 7D 3F 49 D6 54 00 10 3A 7D BD 04 2E 39 08 00 04 38 E7 00 04 55 29 10 3A 7D 7F 59 D6 7D 89 EC 2E EC 0D 60 2A ED AD 60 28 7C 1F 29 D6 55 6B 10 3A 7D 6B EC 2E 7C A5 D2 14 FD 40 58 50 54 00 10 3A FC 00 02 32 7D 9D 04 2E 7D 38 31 D6 7C C6 8A 14 FD AD 02 32 FC 00 00 18 7D 58 51 D6 55 29 10 3A FD A0 68 18 ED 6B 00 28 ED 4A 00 28 7D 08 C1 D6 ED 2C 68 28 ED 8C 68 2A 55 4A 10 3A 7C E7 C1 D6 7D 89 CD 2E 7D 48 CD 2E 7D 2A CD 2E 7D 67 CD 2E 42 00 FF 54
+
+. 0 10014570 28
+. 80 E1 00 30 80 01 00 1C 38 E7 00 04 90 E1 00 30 34 E0 FF FF 90 E1 00 1C 40 82 E9 B4
+
+. 0 1001458C 4
+. 4B FF F2 70
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 16, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 16, stride = 1
+. 0 1000F804 172
+. 54 A0 10 3A 81 81 00 30 7D 23 02 14 7C 1C 01 D6 39 29 FF FF 55 2A 10 3A 7D 63 4A 14 39 4A 00 04 7D 7C 59 D6 54 00 10 3A 7D 9F 04 2E 7D 05 62 14 7C E8 62 14 7D 3C 49 D6 55 6B 10 3A 7D 4B FC 2E 7C C7 62 14 ED 6C 50 28 7D 4A E1 D6 55 29 10 3A 7C 09 FC 2E ED 8C 50 2A EC 00 00 2A 7D AA FC 2E 7C 17 29 D6 7C A5 DA 14 ED AD 68 2A ED 4C 00 28 7D 17 41 D6 ED 8C 00 2A ED 2B 68 2A 54 00 10 3A ED 6B 68 28 7D 9A 05 2E 7C F7 39 D6 55 08 10 3A 7D 68 D5 2E 7C D7 31 D6 54 E7 10 3A 7D 47 D5 2E 54 C6 10 3A 7D 26 D5 2E 42 00 FF 58
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 16, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 16, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 16
+PASS: gsl_fft_complex_forward with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 16, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 16, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 16, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 16, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 16, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 16, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 16, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 16
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 16, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 16, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 16, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 16, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 16, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 16, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 16, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 16
+PASS: gsl_fft_real with signal_real_noise, n = 16, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 16, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 16, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 16, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 16
+PASS: gsl_fft_real_float with signal_real_noise, n = 16, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 16, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 16, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 16, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 16
+PASS: gsl_fft_complex_forward with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 16, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 16, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 16, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 16, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 16, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 16, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 16, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 16
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 16, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 16, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 16, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 16, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 16, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 16, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 16, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 16, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 16
+PASS: gsl_fft_real with signal_real_noise, n = 16, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 16, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 16, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 16, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 16
+PASS: gsl_fft_real_float with signal_real_noise, n = 16, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 16, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 16, stride = 3
+. 0 FE0D438 16
+. CB BD 00 00 FC E0 F0 90 FF 9E E8 00 41 9D 00 08
+
+. 0 FE0D44C 32
+. C9 0A 00 00 FC A8 38 2A FF 65 40 28 D8 A1 00 30 81 41 00 30 81 61 00 34 FC 27 D8 28 41 9D 00 08
+
+. 0 FE0D470 188
+. FF 41 00 72 C9 47 00 00 C8 66 00 00 55 6A 28 34 C8 E5 00 00 7C EA 42 14 83 1E 0F 84 FF 9A 1A BA C8 A4 00 00 C8 18 00 00 C9 63 00 00 FD 81 00 2A C8 67 00 08 FD BC 3E BA 7F 8A 44 AE FD 0C 00 28 FC DC 00 2A FF 6D 06 B2 FD 86 00 28 C8 07 00 18 FD 21 40 28 FC 81 D8 BA CB 67 00 10 FC E9 10 2A FD 5C 60 28 FD BB 01 32 FC DA 59 7A FD 21 06 B2 FD 6C 01 F2 FF 4C 02 32 FC 8A 18 2A FC A0 68 28 FD 29 01 B2 FD 44 00 72 FD BB D0 28 FC 85 58 28 FC 7C 02 72 FD 84 50 28 FC DB 68 28 FC 2C 18 28 FD 66 D0 28 FC 01 58 2A FC 2D 00 2A FC ED 08 28 FC 80 38 2A FC 04 E8 00 40 81 05 5C
+
+. 0 FE0DA84 20
+. 81 1E 0F 88 FF 9F E8 00 CB 48 00 00 FC 84 06 B2 40 9D 02 F0
+
+. 0 FE0DD84 8
+. 81 3E 0F B8 4B FF FD 14
+
+. 0 FE0DA9C 16
+. C9 69 00 00 FC BF 02 F2 FC 04 28 28 4B FF FA A4
+
+. 0 FE0D54C 12
+. FC 81 00 2A FC 84 08 00 41 86 02 6C
+
+. 0 FE0D7C0 8
+. 57 E0 F8 7F 41 A2 F2 94
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 17, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 17
+PASS: gsl_fft_complex_forward with signal_noise, n = 17, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 17, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 17, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 17, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 17, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 17, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 17, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 17
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 17, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 17, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 17, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 17, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 17, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 17, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 17, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 17
+PASS: gsl_fft_real with signal_real_noise, n = 17, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 17, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 17, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 17, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 17
+PASS: gsl_fft_real_float with signal_real_noise, n = 17, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 17, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 17, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 17, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 17
+PASS: gsl_fft_complex_forward with signal_noise, n = 17, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 17, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 17, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 17, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 17, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 17, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 17, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 17, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 17, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 17, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 17
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 17, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 17, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 17, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 17, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 17, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 17, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 17, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 17, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 17, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 17, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 17
+PASS: gsl_fft_real with signal_real_noise, n = 17, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 17, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 17, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 17, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 17
+PASS: gsl_fft_real_float with signal_real_noise, n = 17, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 17, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 17, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 17, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 17
+PASS: gsl_fft_complex_forward with signal_noise, n = 17, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 17, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 17, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 17, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 17, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 17, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 17, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 17, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 17, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 17, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 17
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 17, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 17, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 17, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 17, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 17, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 17, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 17, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 17, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 17, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 17, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 17
+PASS: gsl_fft_real with signal_real_noise, n = 17, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 17, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 17, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 17, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 17
+PASS: gsl_fft_real_float with signal_real_noise, n = 17, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 17, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 17, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 18, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 18
+. 0 FE0EBBC 20
+. FC 1F F0 00 81 1E 0F E4 CB C8 00 00 FC 84 07 B2 40 81 02 B8
+
+. 0 FE0EE84 8
+. 81 3E 10 18 4B FF FD 4C
+
+. 0 FE0EBD4 20
+. C9 29 00 00 FC 1F 22 7A FD 81 00 2A FF 0C 08 00 41 9A 00 60
+
+. 0 FE0EC4C 4
+. 4B FF FA 28
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 18, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 18, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 18, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 18, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 18, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 18, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 18, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 18
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 18, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 18, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 18, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 18, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 18, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 18, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 18, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 18
+. 0 10011F44 296
+. 7D 26 1A 14 7C F7 2A 14 7C 09 1A 14 7D 7D 49 D6 54 0A 18 38 55 29 18 38 39 29 00 08 7C 1D 01 D6 39 4A 00 08 55 6B 18 38 7D 8B FC AE 54 CB 18 38 39 6B 00 08 7D 29 E9 D6 54 00 18 38 7D 7F 04 AE FD 04 03 32 54 E8 18 38 FC E2 02 F2 39 08 00 08 7D 4A E9 D6 7D A9 FC AE 54 A9 18 38 FD 44 03 72 39 29 00 08 7C 0A FC AE FD A5 43 7A 7D 6B E9 D6 FD 22 00 32 FC 03 38 3A 7C 1D 31 D6 7C EB FC AE FD 63 4A F8 54 8B 18 38 39 6B 00 08 FD 85 53 38 7C C6 DA 14 54 00 18 38 7D 58 21 D6 FC CD 00 2A 7C 84 8A 14 FD AD 00 28 FD 4C 58 28 FD 26 00 72 7C F8 39 D6 55 4A 18 38 FD 8C 58 2A 7D 7F 04 AE FD 4A 07 F2 7C 18 29 D6 54 E7 18 38 FC 0C 00 72 7C A5 8A 14 FD 27 48 28 FD AD 07 F2 7D 29 C1 D6 54 00 18 38 FC 0B 00 28 FD 09 50 28 7D 08 C1 D6 FD 6B 60 2A FC E7 30 2A FD 80 68 2A FD 00 40 50 7D 79 05 AE 7D 6B C1 D6 FC 00 68 28 FD 29 50 2A 7C E9 CD AE 7C 07 CD AE 7D 28 CD AE 7D 8A CD AE 7D 0B CD AE 42 00 FE DC
+
+PASS: gsl_fft_real with signal_real_noise, n = 18, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 18, stride = 1
+. 0 1000D3A8 24
+. 2F 94 00 00 C8 36 FF F0 CB F6 FF F8 C8 78 FF F0 C8 58 FF F8 41 9E 01 54
+
+. 0 1000D548 140
+. 1D 25 00 03 7D 47 FA 14 7D 0A FA 14 7D 57 51 D6 7D 29 DA 14 39 29 FF FF 7C 06 4A 14 55 2B 18 38 7C 1C 01 D6 39 6B 00 08 55 4A 18 38 7C A5 DA 14 7D 3C 49 D6 54 00 18 38 7D 5D 04 AE 7D 6B E1 D6 55 29 18 38 7C 09 EC AE FD 80 50 28 7D AB EC AE FC 00 00 2A 7C 17 39 D6 7C E7 DA 14 FD A9 03 72 FD 60 60 50 7D 17 41 D6 FC 00 50 2A FD 8C 68 28 54 00 18 38 FD 6B 68 28 7C 1A 05 AE 55 08 18 38 7D 8A D5 AE 7D 68 D5 AE 42 00 FF 78
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 18, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 18, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 18
+. 0 10013F94 296
+. 7D 26 1A 14 54 88 10 3A 7C 09 1A 14 7D 7F 49 D6 54 0A 10 3A 55 29 10 3A 39 29 00 04 7C 1F 01 D6 39 4A 00 04 55 6B 10 3A 7D 4B EC 2E 54 CB 10 3A 39 6B 00 04 7D 29 F9 D6 54 00 10 3A 7D 7D 04 2E ED 84 02 B2 39 08 00 04 ED 22 02 F2 7D 4A F9 D6 7D A9 EC 2E 54 A9 10 3A ED 85 63 7A 39 29 00 04 ED A4 03 72 7C 0A EC 2E 7D 6B F9 D6 ED 23 48 3A EC 02 00 32 ED 45 6A B8 7C 1F 31 D6 EC CC 48 2A 7C EB EC 2E ED 63 02 F8 7D 77 2A 14 ED 8C 48 28 55 6A 10 3A 54 00 10 3A 7C F8 21 D6 ED 0A 58 28 7C 1D 04 2E ED 4A 58 2A 39 4A 00 04 ED 66 00 72 7C C6 DA 14 ED 08 07 F2 7C 18 29 D6 ED AA 00 72 54 E7 10 3A ED 67 58 28 7C A5 8A 14 ED 8C 07 F2 7C 84 8A 14 ED A0 68 28 7D 78 59 D6 ED 2B 40 28 54 00 10 3A EC 00 50 2A EC E7 30 2A 7D 29 C1 D6 ED 4D 60 2A FD 20 48 50 55 6B 10 3A ED AD 60 28 7C 19 05 2E ED 6B 40 2A 7D 4A C1 D6 7C E9 CD 2E 7D AB CD 2E 7D 08 C1 D6 7D 6A CD 2E 7D 47 CD 2E 7D 28 CD 2E 42 00 FE DC
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 18, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 18, stride = 1
+. 0 1000F574 24
+. 2F 93 00 00 C0 75 FF F8 C0 55 FF FC C0 36 FF F8 C3 F6 FF FC 41 9E 01 6C
+
+. 0 1000F72C 140
+. 1D 25 00 03 7D 47 CA 14 7D 0A CA 14 7D 57 51 D6 7D 29 DA 14 39 29 FF FF 7C 06 4A 14 55 2B 10 3A 7C 1C 01 D6 39 6B 00 04 55 4A 10 3A 7C A5 DA 14 7D 3C 49 D6 54 00 10 3A 7D 5F 04 2E 55 29 10 3A 7D 6B E1 D6 7C 09 FC 2E ED 80 50 28 7D AB FC 2E EC 00 00 2A 7C 17 39 D6 7C E7 DA 14 ED A9 03 72 FD 60 60 50 EC 00 50 2A 7D 17 41 D6 ED 8C 68 28 ED 6B 68 28 54 00 10 3A 7C 1A 05 2E 7D 8A D5 2E 55 08 10 3A 7D 68 D5 2E 42 00 FF 78
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 18, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 18, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 18
+PASS: gsl_fft_complex_forward with signal_noise, n = 18, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 18, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 18, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 18, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 18, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 18, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 18, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 18, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 18, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 18, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 18
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 18, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 18, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 18, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 18, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 18, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 18, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 18, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 18, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 18, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 18, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 18
+PASS: gsl_fft_real with signal_real_noise, n = 18, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 18, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 18, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 18, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 18
+PASS: gsl_fft_real_float with signal_real_noise, n = 18, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 18, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 18, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 18, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 18
+PASS: gsl_fft_complex_forward with signal_noise, n = 18, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 18, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 18, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 18, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 18, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 18, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 18, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 18, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 18, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 18, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 18
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 18, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 18, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 18, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 18, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 18, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 18, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 18, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 18, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 18, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 18, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 18
+PASS: gsl_fft_real with signal_real_noise, n = 18, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 18, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 18, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 18, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 18
+PASS: gsl_fft_real_float with signal_real_noise, n = 18, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 18, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 18, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 19, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 19
+PASS: gsl_fft_complex_forward with signal_noise, n = 19, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 19, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 19, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 19, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 19, stride = 1
+. 0 FE0DBB8 24
+. 81 3E 0F 9C C8 A9 00 00 FC 1F 21 7A FC C1 00 2A FE 06 08 00 41 B2 F5 78
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 19, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 19, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 19
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 19, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 19, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 19, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 19, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 19, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 19, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 19, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 19
+PASS: gsl_fft_real with signal_real_noise, n = 19, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 19, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 19, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 19, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 19
+PASS: gsl_fft_real_float with signal_real_noise, n = 19, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 19, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 19, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 19, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 19
+PASS: gsl_fft_complex_forward with signal_noise, n = 19, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 19, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 19, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 19, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 19, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 19, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 19, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 19, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 19, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 19, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 19
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 19, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 19, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 19, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 19, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 19, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 19, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 19, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 19, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 19, stride = 2
+. 0 FEEF554 12
+. 7F 80 F8 28 7F A0 F9 2D 40 A2 FF F8
+
+PASS: gsl_fft_real_wavetable_alloc, n = 19, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 19
+PASS: gsl_fft_real with signal_real_noise, n = 19, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 19, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 19, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 19, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 19
+PASS: gsl_fft_real_float with signal_real_noise, n = 19, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 19, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 19, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 19, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 19
+PASS: gsl_fft_complex_forward with signal_noise, n = 19, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 19, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 19, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 19, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 19, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 19, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 19, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 19, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 19, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 19, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 19
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 19, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 19, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 19, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 19, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 19, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 19, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 19, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 19, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 19, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 19, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 19
+PASS: gsl_fft_real with signal_real_noise, n = 19, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 19, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 19, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 19, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 19
+PASS: gsl_fft_real_float with signal_real_noise, n = 19, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 19, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 19, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 20, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 20
+PASS: gsl_fft_complex_forward with signal_noise, n = 20, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 20, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 20, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 20, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 20, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 20, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 20, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 20
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 20, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 20, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 20, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 20, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 20, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 20, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 20, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 20
+. 0 10012250 44
+. 2F 92 00 00 C8 14 FF F8 C9 B5 FF F8 C9 96 FF F8 FC 20 00 50 FF C0 68 50 C8 54 FF F0 FF 80 60 50 CB F5 FF F0 CB B6 FF F0 41 9E 01 A4
+
+PASS: gsl_fft_real with signal_real_noise, n = 20, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 20, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 20, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 20, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 20
+. 0 100142AC 44
+. 2F 92 00 00 C0 14 FF FC C1 B5 FF FC C1 96 FF FC FC 20 00 50 FF C0 68 50 C0 54 FF F8 FF 80 60 50 C3 F5 FF F8 C3 B6 FF F8 41 9E 01 A4
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 20, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 20, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 20, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 20, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 20
+PASS: gsl_fft_complex_forward with signal_noise, n = 20, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 20, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 20, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 20, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 20, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 20, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 20, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 20, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 20, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 20, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 20
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 20, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 20, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 20, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 20, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 20, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 20, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 20, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 20, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 20, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 20, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 20
+PASS: gsl_fft_real with signal_real_noise, n = 20, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 20, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 20, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 20, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 20
+PASS: gsl_fft_real_float with signal_real_noise, n = 20, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 20, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 20, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 20, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 20
+PASS: gsl_fft_complex_forward with signal_noise, n = 20, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 20, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 20, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 20, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 20, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 20, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 20, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 20, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 20, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 20, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 20
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 20, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 20, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 20, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 20, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 20, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 20, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 20, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 20, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 20, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 20, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 20
+PASS: gsl_fft_real with signal_real_noise, n = 20, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 20, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 20, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 20, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 20
+PASS: gsl_fft_real_float with signal_real_noise, n = 20, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 20, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 20, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 21, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 21
+PASS: gsl_fft_complex_forward with signal_noise, n = 21, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 21, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 21, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 21, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 21, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 21, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 21, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 21
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 21, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 21, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 21, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 21, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 21, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 21, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 21, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 21
+. 0 100110F8 20
+. 80 9E 82 74 80 7E 82 78 3A 40 00 00 7F 92 80 40 40 9C 0A 80
+
+. 0 1001110C 36
+. 56 6C 08 3C 3A A0 00 00 55 E5 F8 7E 3A C0 00 00 38 E0 00 00 C8 64 00 00 7F 87 D8 40 C8 83 00 00 40 9C 0A 48
+
+. 0 10011130 36
+. 7D 35 62 14 56 97 08 3C 38 C9 FF FF 2F 87 00 00 C8 A3 00 00 C8 E4 00 00 FC C0 28 90 FD 00 28 90 41 9E 00 14
+
+. 0 10011164 12
+. 39 00 00 00 7F 88 D8 40 40 9C 09 D0
+
+. 0 10011170 20
+. 7D 36 62 14 7F 69 03 A6 39 49 FF FF 7D 65 98 50 48 00 00 78
+
+. 0 100111F8 16
+. 2F 88 00 00 55 60 20 36 7D 20 E2 14 41 BE FF 80
+
+. 0 10011184 72
+. C9 24 00 00 C9 83 00 00 55 49 18 38 7C 1D 51 D6 39 29 00 08 7D 65 5A 14 39 08 00 01 7D 29 E9 D6 54 00 18 38 7D 7F 04 AE 7D 4A D2 14 39 6B FF FF FD AC 02 F2 7C 09 FC AE FD 8C 00 32 FD 49 68 3A FD 89 62 F8 41 9E 00 14
+
+. 0 100111DC 28
+. FC 08 03 32 FD A8 02 B2 FC 07 02 BA FD A7 6B 38 FC C6 00 2A FC A5 68 2A 42 40 09 48
+
+. 0 10011208 16
+. C8 09 00 08 7D 3C 04 AE FD 80 00 50 4B FF FF 78
+
+. 0 1001118C 64
+. 55 49 18 38 7C 1D 51 D6 39 29 00 08 7D 65 5A 14 39 08 00 01 7D 29 E9 D6 54 00 18 38 7D 7F 04 AE 7D 4A D2 14 39 6B FF FF FD AC 02 F2 7C 09 FC AE FD 8C 00 32 FD 49 68 3A FD 89 62 F8 41 9E 00 14
+
+. 0 100111CC 44
+. FC 04 02 32 FD A4 01 F2 FC E3 01 F8 FD 03 6A 3A FC 08 03 32 FD A8 02 B2 FC 07 02 BA FD A7 6B 38 FC C6 00 2A FC A5 68 2A 42 40 09 48
+
+. 0 10011B3C 12
+. 7D 27 D8 50 7F 89 38 40 40 9D 01 FC
+
+. 0 10011B48 44
+. 54 C9 18 38 7C 18 31 D6 39 29 00 08 7D 29 C1 D6 54 00 18 38 7C B9 05 AE 7C C9 CD AE 38 E7 00 01 7C C6 BA 14 7F 87 D8 40 41 9C F5 CC
+
+. 0 1001113C 24
+. 2F 87 00 00 C8 A3 00 00 C8 E4 00 00 FC C0 28 90 FD 00 28 90 41 9E 00 14
+
+. 0 10011154 28
+. FC 04 07 72 FD A3 07 72 FC 63 07 BA FC 84 6F B8 39 00 00 00 7F 88 D8 40 40 9C 09 D0
+
+. 0 10011D40 56
+. 7D 29 A1 D6 FC 00 30 50 55 29 08 3C 7D 35 4A 14 7D 2C 48 50 39 29 FF FF 55 2B 18 38 7D 38 49 D6 39 6B 00 08 7D 6B C1 D6 55 29 18 38 7C A9 CD AE 7C 0B CD AE 4B FF FD F0
+
+. 0 10011B64 16
+. 38 E7 00 01 7C C6 BA 14 7F 87 D8 40 41 9C F5 CC
+
+. 0 10011B74 20
+. 3A 52 00 01 7E D6 A2 14 7F 92 80 40 7E B5 8A 14 41 9C F5 98
+
+. 0 10011B88 16
+. 3A 73 00 01 55 E0 F8 7E 7F 80 98 40 41 9D F5 6C
+
+PASS: gsl_fft_real with signal_real_noise, n = 21, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 21, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 21, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 21, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 21
+. 0 100130E8 20
+. 80 9E 82 D0 80 7E 82 D4 3A 40 00 00 7F 92 80 40 40 9C 0A BC
+
+. 0 100130FC 36
+. 56 6C 08 3C 3A A0 00 00 55 E5 F8 7E 3A C0 00 00 38 E0 00 00 C0 64 00 00 7F 87 D8 40 C0 83 00 00 40 9C 0A 84
+
+. 0 10013120 36
+. 7D 35 62 14 56 97 08 3C 38 C9 FF FF 2F 87 00 00 C0 A3 00 00 C0 E4 00 00 FC C0 28 90 FD 00 28 90 41 9E 00 14
+
+. 0 10013154 12
+. 39 00 00 00 7F 88 D8 40 40 9C 0A 0C
+
+. 0 10013160 20
+. 7D 36 62 14 7F 69 03 A6 39 49 FF FF 7D 65 98 50 48 00 00 78
+
+. 0 100131E8 16
+. 2F 88 00 00 55 60 18 38 7D 20 E2 14 41 BE FF 80
+
+. 0 10013174 72
+. C1 24 00 00 C1 83 00 00 55 49 10 3A 7C 1F 51 D6 39 29 00 04 7D 65 5A 14 39 08 00 01 7D 29 F9 D6 54 00 10 3A 7D 7D 04 2E 7D 4A D2 14 39 6B FF FF ED AC 02 F2 7C 09 EC 2E ED 8C 00 32 ED 49 68 3A ED 89 62 F8 41 9E 00 14
+
+. 0 100131CC 28
+. EC 08 03 32 ED A8 02 B2 EC 07 02 BA ED A7 6B 38 EC C6 00 2A EC A5 68 2A 42 40 09 84
+
+. 0 100131F8 16
+. C0 09 00 04 7D 3C 04 2E FD 80 00 50 4B FF FF 78
+
+. 0 1001317C 64
+. 55 49 10 3A 7C 1F 51 D6 39 29 00 04 7D 65 5A 14 39 08 00 01 7D 29 F9 D6 54 00 10 3A 7D 7D 04 2E 7D 4A D2 14 39 6B FF FF ED AC 02 F2 7C 09 EC 2E ED 8C 00 32 ED 49 68 3A ED 89 62 F8 41 9E 00 14
+
+. 0 100131BC 44
+. EC 04 02 32 ED A4 01 F2 EC E3 01 F8 ED 03 6A 3A EC 08 03 32 ED A8 02 B2 EC 07 02 BA ED A7 6B 38 EC C6 00 2A EC A5 68 2A 42 40 09 84
+
+. 0 10013B68 12
+. 7D 27 D8 50 7F 89 38 40 40 9D 02 00
+
+. 0 10013B74 44
+. 54 C9 10 3A 7C 18 31 D6 39 29 00 04 7D 29 C1 D6 54 00 10 3A 7C B9 05 2E 7C C9 CD 2E 38 E7 00 01 7C C6 BA 14 7F 87 D8 40 41 9C F5 90
+
+. 0 1001312C 24
+. 2F 87 00 00 C0 A3 00 00 C0 E4 00 00 FC C0 28 90 FD 00 28 90 41 9E 00 14
+
+. 0 10013144 28
+. EC 04 07 72 ED A3 07 72 EC 63 07 BA EC 84 6F B8 39 00 00 00 7F 88 D8 40 40 9C 0A 0C
+
+. 0 10013D70 56
+. 7D 29 A1 D6 FC 00 30 50 55 29 08 3C 7D 35 4A 14 7D 2C 48 50 39 29 FF FF 55 2B 10 3A 7D 38 49 D6 39 6B 00 04 7D 6B C1 D6 55 29 10 3A 7C A9 CD 2E 7C 0B CD 2E 4B FF FD EC
+
+. 0 10013B90 16
+. 38 E7 00 01 7C C6 BA 14 7F 87 D8 40 41 9C F5 90
+
+. 0 10013BA0 20
+. 3A 52 00 01 7E D6 A2 14 7F 92 80 40 7E B5 8A 14 41 9C F5 5C
+
+. 0 10013BB4 16
+. 3A 73 00 01 55 E0 F8 7E 7F 80 98 40 41 9D F5 30
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 21, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 21, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 21, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 21, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 21
+PASS: gsl_fft_complex_forward with signal_noise, n = 21, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 21, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 21, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 21, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 21, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 21, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 21, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 21, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 21, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 21, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 21
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 21, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 21, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 21, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 21, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 21, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 21, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 21, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 21, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 21, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 21, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 21
+PASS: gsl_fft_real with signal_real_noise, n = 21, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 21, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 21, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 21, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 21
+PASS: gsl_fft_real_float with signal_real_noise, n = 21, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 21, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 21, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 21, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 21
+PASS: gsl_fft_complex_forward with signal_noise, n = 21, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 21, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 21, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 21, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 21, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 21, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 21, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 21, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 21, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 21, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 21
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 21, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 21, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 21, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 21, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 21, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 21, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 21, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 21, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 21, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 21, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 21
+PASS: gsl_fft_real with signal_real_noise, n = 21, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 21, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 21, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 21, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 21
+PASS: gsl_fft_real_float with signal_real_noise, n = 21, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 21, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 21, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 22, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 22
+. 0 10007770 8
+. 2F 9B 00 00 41 BE FF 3C
+
+. 0 10007778 4
+. 40 9A 00 10
+
+. 0 1000777C 12
+. C8 BC FF F0 C8 DC FF F8 4B FF FF 34
+
+. 0 100076B8 8
+. 2F 86 00 00 41 9E 00 A0
+
+. 0 10006FE0 36
+. 7C 0A FC AE 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 AE 7C 00 3A 14 C9 AB 00 08 D9 A9 00 08 42 00 FF E0
+
+. 0 10007038 128
+. 7D 25 32 14 7C 04 32 14 7D 59 49 D6 38 C6 00 01 7D 79 01 D6 55 4A 20 36 7D AA FC AE 7C EA FA 14 55 6B 20 36 7D 38 49 D6 7C 0B FC AE 7D 0B FA 14 FD AD 00 2A 55 29 20 36 7C 18 01 D6 7D A9 D5 AE 7D 29 D2 14 C9 A8 00 08 C8 07 00 08 54 00 20 36 FC 00 68 2A D8 09 00 08 7D 20 D2 14 7C 0B FC AE 7D AA FC AE FD AD 00 28 7D BA 05 AE C8 07 00 08 C9 A8 00 08 FC 00 68 28 D8 09 00 08 42 00 FF 84
+
+. 0 100070E4 36
+. 7C 0A D4 AE 7D 6A D2 14 7D 20 FA 14 7D 4A 42 14 7C 1F 05 AE 7C 00 3A 14 C9 AB 00 08 D9 A9 00 08 42 00 FF E0
+
+. 0 1000713C 64
+. 7D 2A 32 14 7C 1F 04 AE 7D 29 C1 D6 7D 60 FA 14 38 C6 00 01 55 29 20 36 7D A9 D4 AE 7D 29 D2 14 FC 00 68 2A 7C 1F 05 AE 7C 00 42 14 C9 A9 00 08 C8 0B 00 08 FC 00 68 2A D8 0B 00 08 42 00 FF C4
+
+. 0 100071EC 80
+. 7D 26 EA 14 7C 1A 04 AE 7D 29 C9 D6 7D 40 D2 14 7D 66 DA 14 38 C6 00 01 55 29 20 36 7D 6B C9 D6 7C 09 FD AE 7D 29 FA 14 C8 0A 00 08 D8 09 00 08 55 6B 20 36 7D BA 04 AE 7C 00 42 14 7D AB FD AE 7D 6B FA 14 C8 0A 00 08 D8 0B 00 08 42 00 FF B4
+
+. 0 10007278 168
+. 7D 44 32 14 7D 27 32 14 7D 4A C1 D6 7C 06 EA 14 7D 06 DA 14 38 C6 00 01 7D 29 C1 D6 55 4A 20 36 7D 6A D2 14 7D 8A D4 AE C8 0B 00 08 FD 88 03 32 55 29 20 36 7C 00 C9 D6 7D 49 D4 AE FC 08 00 32 7D 29 D2 14 FD 47 02 B2 C9 69 00 08 54 00 20 36 7D 08 C9 D6 7D BF 04 AE FD 67 02 F2 7D 60 FA 14 FD 2A 00 28 FD 4A 00 2A 55 08 20 36 7D 28 FA 14 FD AD 48 2A FD 2B 60 2A FD 6B 60 28 7D BF 05 AE C8 0B 00 08 FC 00 48 2A D8 0B 00 08 7D A8 FC AE FD AD 50 2A 7D A8 FD AE C8 09 00 08 FC 00 58 2A D8 09 00 08 42 00 FF 5C
+
+. 0 1000736C 36
+. 7C 0A FC AE 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 AE 7C 00 3A 14 C9 AB 00 08 D9 A9 00 08 42 00 FF E0
+
+. 0 100073B4 56
+. 7D 20 22 14 7D 6A 22 14 7D 29 C9 D6 38 84 00 01 7D 6B C1 D6 55 29 20 36 7D A9 FC AE 7D 29 FA 14 55 6B 20 36 7D AB D5 AE 7D 6B D2 14 C8 09 00 08 D8 0B 00 08 42 00 FF CC
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 22, stride = 1
+. 0 10007788 16
+. C8 1C FF F8 C8 BC FF F0 FC C0 00 50 4B FF FF 24
+
+PASS: gsl_fft_complex_inverse with signal_noise, n = 22, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 22, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 22, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 22, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 22, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 22, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 22
+. 0 1000A590 8
+. 2F 9B 00 00 41 BE FF 3C
+
+. 0 1000A598 4
+. 40 9A 00 10
+
+. 0 1000A59C 12
+. C0 BC FF F8 C0 DC FF FC 4B FF FF 34
+
+. 0 1000A4D8 8
+. 2F 86 00 00 41 9E 00 A0
+
+. 0 10009E08 36
+. 7C 0A FC 2E 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 2E 7C 00 3A 14 C1 AB 00 04 D1 A9 00 04 42 00 FF E0
+
+. 0 10009E60 128
+. 7D 25 32 14 7C 04 32 14 7D 59 49 D6 38 C6 00 01 7D 79 01 D6 55 4A 18 38 7D AA FC 2E 7C EA FA 14 55 6B 18 38 7D 38 49 D6 7C 0B FC 2E 7D 0B FA 14 ED AD 00 2A 55 29 18 38 7C 18 01 D6 7D A9 D5 2E 7D 29 D2 14 C1 A8 00 04 C0 07 00 04 54 00 18 38 EC 00 68 2A D0 09 00 04 7D 20 D2 14 7C 0B FC 2E 7D AA FC 2E ED AD 00 28 7D BA 05 2E C0 07 00 04 C1 A8 00 04 EC 00 68 28 D0 09 00 04 42 00 FF 84
+
+. 0 10009F0C 36
+. 7C 0A D4 2E 7D 6A D2 14 7D 20 FA 14 7D 4A 42 14 7C 1F 05 2E 7C 00 3A 14 C1 AB 00 04 D1 A9 00 04 42 00 FF E0
+
+. 0 10009F64 64
+. 7D 2A 32 14 7C 1F 04 2E 7D 29 C1 D6 7D 60 FA 14 38 C6 00 01 55 29 18 38 7D A9 D4 2E 7D 29 D2 14 EC 00 68 2A 7C 1F 05 2E 7C 00 42 14 C1 A9 00 04 C0 0B 00 04 EC 00 68 2A D0 0B 00 04 42 00 FF C4
+
+. 0 1000A00C 80
+. 7D 26 EA 14 7C 1A 04 2E 7D 29 C9 D6 7D 40 D2 14 7D 66 DA 14 38 C6 00 01 55 29 18 38 7D 6B C9 D6 7C 09 FD 2E 7D 29 FA 14 C0 0A 00 04 D0 09 00 04 55 6B 18 38 7D BA 04 2E 7C 00 42 14 7D AB FD 2E 7D 6B FA 14 C0 0A 00 04 D0 0B 00 04 42 00 FF B4
+
+. 0 1000A098 168
+. 7D 44 32 14 7D 67 32 14 7D 4A C1 D6 7C 06 EA 14 7D 06 DA 14 38 C6 00 01 7D 6B C1 D6 55 4A 18 38 7D 2A D2 14 7D AA D4 2E C1 29 00 04 ED A8 03 72 55 6B 18 38 7C 00 C9 D6 7D 4B D4 2E ED 28 02 72 7D 6B D2 14 ED 47 02 B2 C1 6B 00 04 54 00 18 38 7D 08 C9 D6 7C 1F 04 2E ED 67 02 F2 ED 8A 48 28 7D 20 FA 14 ED 4A 48 2A 55 08 18 38 EC 00 60 2A ED 8B 68 2A ED 6B 68 28 7C 1F 05 2E C0 09 00 04 EC 00 60 2A D0 09 00 04 7D 28 FA 14 7D A8 FC 2E ED AD 50 2A 7D A8 FD 2E C0 09 00 04 EC 00 58 2A D0 09 00 04 42 00 FF 5C
+
+. 0 1000A18C 36
+. 7C 0A FC 2E 7D 6A FA 14 7D 20 D2 14 7D 4A 42 14 7C 1A 05 2E 7C 00 3A 14 C1 AB 00 04 D1 A9 00 04 42 00 FF E0
+
+. 0 1000A1D4 56
+. 7D 20 22 14 7D 6A 22 14 7D 29 C9 D6 38 84 00 01 7D 6B C1 D6 55 29 18 38 7D A9 FC 2E 7D 29 FA 14 55 6B 18 38 7D AB D5 2E 7D 6B D2 14 C0 09 00 04 D0 0B 00 04 42 00 FF CC
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 22, stride = 1
+. 0 1000A5A8 16
+. C0 1C FF FC C0 BC FF F8 FC C0 00 50 4B FF FF 24
+
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 22, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 22, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 22, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 22, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 22, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 22, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 22
+PASS: gsl_fft_real with signal_real_noise, n = 22, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 22, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 22, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 22, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 22
+PASS: gsl_fft_real_float with signal_real_noise, n = 22, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 22, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 22, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 22, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 22
+PASS: gsl_fft_complex_forward with signal_noise, n = 22, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 22, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 22, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 22, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 22, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 22, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 22, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 22, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 22, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 22, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 22
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 22, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 22, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 22, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 22, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 22, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 22, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 22, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 22, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 22, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 22, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 22
+PASS: gsl_fft_real with signal_real_noise, n = 22, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 22, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 22, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 22, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 22
+PASS: gsl_fft_real_float with signal_real_noise, n = 22, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 22, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 22, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 22, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 22
+PASS: gsl_fft_complex_forward with signal_noise, n = 22, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 22, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 22, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 22, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 22, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 22, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 22, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 22, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 22, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 22, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 22
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 22, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 22, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 22, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 22, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 22, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 22, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 22, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 22, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 22, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 22, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 22
+PASS: gsl_fft_real with signal_real_noise, n = 22, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 22, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 22, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 22, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 22
+PASS: gsl_fft_real_float with signal_real_noise, n = 22, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 22, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 22, stride = 3
+. 0 FE0E084 8
+. FC 00 F8 90 40 99 02 D8
+
+. 0 FE0E360 8
+. FC 00 F8 50 4B FF FD 28
+
+. 0 FE0E08C 200
+. FD 46 00 2A 81 1E 0F E0 80 BE 0F E4 FD 0A 30 28 D9 41 00 30 81 21 00 30 81 41 00 34 FD 40 40 28 55 40 28 34 C8 08 00 00 7C E0 32 14 7C C6 04 AE FC 2A 02 B2 C9 07 00 10 FD 2A 00 2A FD 86 00 2A FC E1 20 FA C8 67 00 08 FD 8C 00 28 FD 29 00 28 C8 07 00 18 FD A7 28 7A FC 8A 48 28 FC E6 60 28 FD 6D 00 72 FC A1 17 BA FD A8 02 F2 FD 6A 00 72 FC 47 18 2A FC 2C 02 72 FD 8C 01 32 FF C0 68 28 FC 8B 01 72 FC E2 02 B2 FD A8 08 28 FC 7E 60 28 CB C5 00 00 FC 46 01 32 FD 83 38 28 FD 08 68 28 FC 0C 10 28 FD 48 08 28 FD 20 50 2A FC CD 48 2A FD 6D 30 28 FC 20 30 90 FC A9 58 2A FC 85 37 BA FF 84 30 00 41 9E 00 3C
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 23, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 23
+PASS: gsl_fft_complex_forward with signal_noise, n = 23, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 23, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 23, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 23, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 23, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 23, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 23, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 23
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 23, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 23, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 23, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 23, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 23, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 23, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 23, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 23
+PASS: gsl_fft_real with signal_real_noise, n = 23, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 23, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 23, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 23, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 23
+PASS: gsl_fft_real_float with signal_real_noise, n = 23, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 23, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 23, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 23, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 23
+PASS: gsl_fft_complex_forward with signal_noise, n = 23, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 23, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 23, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 23, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 23, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 23, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 23, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 23, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 23, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 23, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 23
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 23, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 23, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 23, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 23, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 23, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 23, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 23, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 23, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 23, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 23, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 23
+PASS: gsl_fft_real with signal_real_noise, n = 23, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 23, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 23, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 23, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 23
+PASS: gsl_fft_real_float with signal_real_noise, n = 23, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 23, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 23, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 23, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 23
+PASS: gsl_fft_complex_forward with signal_noise, n = 23, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 23, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 23, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 23, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 23, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 23, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 23, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 23, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 23, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 23, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 23
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 23, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 23, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 23, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 23, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 23, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 23, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 23, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 23, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 23, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 23, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 23
+PASS: gsl_fft_real with signal_real_noise, n = 23, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 23, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 23, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 23, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 23
+PASS: gsl_fft_real_float with signal_real_noise, n = 23, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 23, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 23, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 24, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 24
+PASS: gsl_fft_complex_forward with signal_noise, n = 24, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 24, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 24, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 24, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 24, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 24, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 24, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 24
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 24, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 24, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 24, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 24, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 24, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 24, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 24, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 24
+. 0 100120B0 120
+. 7C 07 1A 14 7D 66 42 14 7D 20 1A 14 7C 1D 01 D6 55 0A 18 38 39 4A 00 08 7D 3D 49 D6 54 00 18 38 7C 1F 04 AE 7C 1D 39 D6 55 29 18 38 7D 89 FC AE 7C E7 DA 14 FD 60 60 2A 54 00 18 38 FC 00 60 28 7D BF 04 AE 7D 78 59 D6 FD 6B 02 72 FD 80 6A BA 7C 18 41 D6 55 6B 18 38 FD AD 00 28 7D 08 8A 14 7D 4A C1 D6 54 00 18 38 7D 99 05 AE 7D 6A CD AE 7D AB CD AE 42 00 FF 8C
+
+PASS: gsl_fft_real with signal_real_noise, n = 24, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 24, stride = 1
+. 0 1000D6FC 32
+. 2F 91 00 00 CB D3 FF F0 CB B3 FF F8 CB 94 FF F0 CB 74 FF F8 CB 55 FF F0 CB 35 FF F8 41 9E 01 9C
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 24, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 24, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 24
+. 0 10014100 132
+. 7C 07 1A 14 7D 66 42 14 7D 20 1A 14 7C 1F 01 D6 55 0A 10 3A 39 4A 00 04 7D 3F 49 D6 54 00 10 3A 7D 7D 04 2E 7C 1F 39 D6 55 29 10 3A 7C 09 EC 2E 7C E7 DA 14 ED 4B 00 28 54 00 10 3A ED 6B 00 2A 7D 9D 04 2E 7D 78 59 D6 FC 00 50 90 FD A0 60 90 ED 6B 02 32 7C 18 41 D6 ED 8C 50 28 FC 00 6A 7A 55 6B 10 3A 7D 08 8A 14 7D 4A C1 D6 54 00 10 3A FC 00 00 18 7C 19 05 2E 7D 6A CD 2E 7D 8B CD 2E 42 00 FF 80
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 24, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 24, stride = 1
+. 0 1000F8E0 32
+. 2F 91 00 00 C3 F3 FF F8 C3 D3 FF FC C3 B4 FF F8 C3 94 FF FC C3 75 FF F8 C3 55 FF FC 41 9E 01 9C
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 24, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 24, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 24
+PASS: gsl_fft_complex_forward with signal_noise, n = 24, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 24, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 24, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 24, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 24, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 24, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 24, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 24, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 24, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 24, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 24
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 24, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 24, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 24, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 24, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 24, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 24, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 24, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 24, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 24, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 24, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 24
+PASS: gsl_fft_real with signal_real_noise, n = 24, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 24, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 24, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 24, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 24
+PASS: gsl_fft_real_float with signal_real_noise, n = 24, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 24, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 24, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 24, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 24
+PASS: gsl_fft_complex_forward with signal_noise, n = 24, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 24, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 24, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 24, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 24, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 24, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 24, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 24, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 24, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 24, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 24
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 24, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 24, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 24, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 24, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 24, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 24, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 24, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 24, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 24, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 24, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 24
+PASS: gsl_fft_real with signal_real_noise, n = 24, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 24, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 24, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 24, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 24
+PASS: gsl_fft_real_float with signal_real_noise, n = 24, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 24, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 24, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 25, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 25
+. 0 10007D6C 400
+. 7C 1B AA 14 7D 72 DA 14 7D 4E DA 14 7D 11 DA 14 7C 00 C9 D6 7D 3D FA 14 CB 89 00 08 7C 83 D2 14 7F BD FC AE 3B 7B 00 01 7E D6 CA 14 7D 6B C9 D6 54 00 20 36 7D 20 FA 14 7C FF 04 AE C9 49 00 08 7C 1C BA 14 7F BD 9A 14 7D 4A C9 D6 55 6B 20 36 7C CB FA 14 7D AB FC AE C8 06 00 08 7D 08 C9 D6 55 4A 20 36 7C EA FA 14 7D 8A FC AE C9 67 00 08 7C E8 02 A6 FC 8D 60 2A 7D 50 E2 14 55 08 20 36 FC 60 58 2A 7D 28 FA 14 7C 48 FC AE C8 29 00 08 FD AD 60 28 FD 27 10 2A 81 1E 80 60 FD 0A 08 2A 7D 67 E2 14 FC 00 58 28 7D 2C E2 14 FF 69 20 2A 7C 00 C1 D6 FF 48 18 2A 3B 9C 00 01 FC E7 10 28 C8 48 00 00 FC B7 03 72 7D 29 C1 D6 54 00 20 36 FC D7 00 32 7C E0 D2 14 FD B8 03 72 7D 6B C1 D6 55 29 20 36 FC 18 00 32 7C C9 D2 14 FD 4A 08 28 FD 29 20 28 7D 4A C1 D6 FD 08 18 28 55 6B 20 36 FD 7B 00 B2 7C AB D2 14 FD 9A 00 B2 55 4A 20 36 7D 0A D2 14 FC B8 29 F8 FC D8 32 B8 FC F7 69 FA FD 57 02 BA FD 29 03 B2 FD 08 03 B2 FD 7D 58 28 FD 9C 60 28 FC F9 01 F2 FD 59 02 B2 FC B9 01 72 FC D9 01 B2 FC 0B 48 28 FD AC 40 28 FD 6B 48 2A FD 8C 40 2A FC 80 30 2A FC 6D 28 28 FC 4B 50 2A FC 2C 38 28 FD 6B 50 28 FC 00 30 28 FD AD 28 2A FD 8C 38 2A FF CF 02 F2 FF F1 00 32 FC B5 01 32 FC D3 00 B2 FC EF 03 32 FD 51 03 72 FD 35 00 F2
+
+. 0 10007EFC 92
+. FD 13 00 72 FD 90 F3 3A FD B2 FB 7A FC 76 28 FA FC 34 30 7A FD 70 3A F8 FC 12 50 38 FC 96 49 38 FC 54 40 B8 FF BD D8 2A FF 9C D0 2A 7F A3 D5 AE 7C 63 A2 14 DB 84 00 08 7D 7A 05 AE D9 87 00 08 7C 09 D5 AE D9 A6 00 08 7C 8B D5 AE D8 65 00 08 7C 4A D5 AE D8 28 00 08 42 00 FE 18
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 25, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 25, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 25, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 25, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 25, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 25, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 25, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 25
+. 0 1000ABD0 400
+. 7C 1B AA 14 7D 72 DA 14 7D 4E DA 14 7D 11 DA 14 7C 00 C9 D6 7D 23 FA 14 C3 89 00 04 7C BC BA 14 7F A3 FC 2E 7C 8C E2 14 3B 7B 00 01 7D 6B C9 D6 54 00 18 38 7D 20 FA 14 7C 9F 04 2E C0 A9 00 04 7E D6 CA 14 7C 63 9A 14 7D 4A C9 D6 55 6B 18 38 7C CB FA 14 7C EB FC 2E C1 06 00 04 7C D0 E2 14 7D 08 C9 D6 55 4A 18 38 7C EA FA 14 7D 4A FC 2E C0 C7 00 04 ED 87 50 2A 55 08 18 38 ED 68 30 2A 7D 28 FA 14 7C 68 FC 2E C0 49 00 04 EC E7 50 28 EC 04 18 2A 7D 28 02 A6 ED A5 10 2A 7C A5 C1 D6 ED 08 30 28 EF 60 60 2A 7C 09 E2 14 EF 4D 58 2A 7D 3D D2 14 EC 00 60 28 7C 84 C1 D6 ED AD 58 28 54 A5 18 38 FD 40 D8 90 7D 65 D2 14 FD 20 D0 90 3B 9C 00 01 7C 00 C1 D6 FD 4A 05 72 54 84 18 38 7D 44 D2 14 FD 29 05 72 7C C6 C1 D6 54 00 18 38 7D 00 D2 14 FD 80 E8 90 FD 60 E0 90 FC 00 05 B2 54 C6 18 38 7C E6 D2 14 FD AD 05 B2 EC 84 18 28 EC A5 10 28 C0 41 00 34 FD 8C 50 28 FD 6B 48 28 EC D7 01 F2 ED 57 02 32 EC F8 01 F2 ED 18 02 32 FC 00 00 18 FD A0 68 18 EC D8 31 38 ED 58 51 78 FD 80 60 18 FD 60 58 18 EC 97 39 3A EC B7 41 7A ED 2C 00 28 ED 0B 68 28 EC 99 01 32 EC B9 01 72 EC D9 01 B2 ED 59 02 B2 ED 8C 00 2A ED 6B 68 2A EC 09 50 2A ED A8 30 28 EC 6B 20 28 EC EC 28 2A ED 29 50 28 ED 8C 28 28 ED 08 30 2A ED 6B 20 2A
+
+. 0 1000AD60 120
+. EC 22 03 32 EF F3 02 72 EC 82 02 F2 EF D1 00 32 EC AF 01 F2 EC 53 02 32 ED 51 03 72 EC CF 00 F2 EF BD D8 2A EF 9C D0 2A ED 6E 0A FA ED 14 FA 3A 7F BD D5 2E ED B2 F3 7A D3 89 00 04 EC 70 28 FA 7F BD A2 14 ED 8E 23 38 ED 34 12 78 EC 12 50 38 EC F0 31 F8 7D 85 D5 2E D1 6B 00 04 7D 24 D5 2E D1 0A 00 04 7C 1A 05 2E D1 A8 00 04 7C E6 D5 2E D0 67 00 04 42 00 FD FC
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 25, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 25, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 25, stride = 1
+. 0 FEC3310 12
+. 7D 40 18 28 7D 20 19 2D 40 A2 FF F8
+
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 25, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 25, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 25, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 25, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 25
+. 0 10011390 20
+. 3A 16 00 01 3A 60 00 01 56 00 F8 7E 2B 80 00 01 40 9D 02 FC
+
+. 0 100113A4 108
+. 81 3E 82 64 3A F7 00 10 3B 9C 00 10 7E E8 03 A6 FE 40 E0 50 39 F4 00 10 FE 20 D8 50 39 95 00 10 93 81 00 28 56 D7 08 3C C9 C9 00 00 7D 68 02 A6 81 41 00 28 2F 92 00 00 C9 8C FF F8 C8 0A FF F8 C9 AB FF F8 FE A0 60 50 C9 6F FF F8 FF 20 00 50 FE E0 68 50 CB 4A FF F0 FE 60 58 50 CB 0B FF F0 CA CC FF F0 CA 8F FF F0 41 9E 02 60
+
+. 0 10011410 400
+. 56 69 08 3C 80 FE 82 68 3B 69 FF FF 7E 93 B0 50 FE 00 70 90 C9 E7 00 00 3A A0 00 00 7F 7C DB 78 7E 49 03 A6 7D 7C D2 14 7C 0B D2 14 7D 1D 59 D6 7D 20 D2 14 7D 49 D2 14 54 07 18 38 55 26 18 38 55 45 18 38 7C 1D 01 D6 55 6B 18 38 39 6B 00 08 38 E7 00 08 38 C6 00 08 7D 3D 49 D6 38 A5 00 08 55 08 18 38 54 00 18 38 7C E8 FC AE 7D 5F 04 AE 7D 5D 51 D6 55 29 18 38 7D 29 FC AE FC 39 01 F2 57 89 18 38 FC 17 02 B2 39 29 00 08 7D 6B E9 D6 55 4A 18 38 7C AA FC AE FD B5 02 72 57 6A 18 38 FD 93 01 72 39 4A 00 08 7C E7 E9 D6 7D 0B FC AE 56 8B 08 3C FC 79 02 32 7D 6B AA 14 39 6B FF FF 7C C6 E9 D6 7D 67 FC AE FD 1A 0A 3A 7C F7 5A 14 55 64 18 38 FC 57 02 F2 54 E3 18 38 7C A5 E9 D6 7C C6 FC AE FD 78 02 FA 38 84 00 08 38 63 00 08 FF F5 01 B2 7E B5 8A 14 7C 85 FC AE 7D 29 E9 D6 FC D6 69 BA FF D3 01 32 7C 1D E1 D6 7F A9 FC AE FC 94 61 3A 7D 37 DA 14 7D 17 4A 14 FD 36 FA 78 55 26 18 38 54 00 18 38 55 05 18 38 FC B4 F1 78 7C 3F 04 AE 38 C6 00 08 7C 18 D9 D6 FC FA 19 F8 38 A5 00 08 7F 9C B2 14 FD 58 12 B8 7F 7B 8A 14 7D 38 49 D6 54 00 18 38 FD AB 30 2A FC 08 20 2A FD 87 28 2A FC 6A 48 2A 7D 18 41 D6 FF C0 68 2A 55 29 18 38 FD 4A 48 28 FF EC 18 2A FC 00 68 28 7D 78 59 D6 FD 3E 03 F2 55 08 18 38 FD 6B 30 28
+
+. 0 10011410 204
+. FC 5C 02 B2 7C F8 39 D6 55 6B 18 38 FD 5B 02 B2 FC E7 28 28 7D 4A C1 D6 FC 00 04 32 54 E7 18 38 FD 8C 18 28 FD BF 03 F2 7C C6 C1 D6 FD 3D 48 28 FC DC 02 F2 7C A5 C1 D6 FD 7B 02 F2 FD 08 20 28 FC 51 11 FA 7C 84 C1 D6 FD 8C 04 32 FC F2 51 F8 7C 63 C1 D6 FD 49 00 28 FD A1 68 28 FD 29 00 2A FC D1 32 3A FD 12 5A 38 FC 0D 60 28 FC AA 10 28 FD 69 38 28 FD AD 60 2A FC 80 30 2A FC 21 F8 2A FD 8D 40 2A FF BD F0 2A FD 60 58 50 7C 39 05 AE FC A0 28 50 FD AD 40 28 7F AA CD AE FD 29 38 2A FC 00 30 28 FD 4A 10 2A 7D A9 CD AE 7D 26 CD AE 7C 08 CD AE 7D 45 CD AE 7D 8B CD AE 7D 64 CD AE 7C 87 CD AE 7C A3 CD AE 42 00 FD CC
+
+. 0 1001166C 48
+. 3A 73 00 01 56 00 F8 7E 7D 28 02 A6 7F 80 98 40 81 01 00 28 39 8C 00 10 39 EF 00 10 39 08 00 10 39 29 00 10 91 01 00 28 7D 28 03 A6 41 9D FD 38
+
+. 0 100113D0 64
+. 7D 68 02 A6 81 41 00 28 2F 92 00 00 C9 8C FF F8 C8 0A FF F8 C9 AB FF F8 FE A0 60 50 C9 6F FF F8 FF 20 00 50 FE E0 68 50 CB 4A FF F0 FE 60 58 50 CB 0B FF F0 CA CC FF F0 CA 8F FF F0 41 9E 02 60
+
+. 0 1001169C 8
+. 72 CA 00 01 40 82 01 14
+
+PASS: gsl_fft_real with signal_real_noise, n = 25, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 25, stride = 1
+. 0 1000DB80 40
+. 2F 93 00 00 CA F0 FF F0 CA D0 FF F8 CA AE FF F0 CA 8E FF F8 CA 6F FF F0 CA 4F FF F8 CA 31 FF F0 CA 11 FF F8 41 9E 02 64
+
+. 0 1000DA5C 244
+. 1D 05 00 05 7C E5 CA 14 7D 26 42 14 7D 1C 41 D6 39 29 FF FF 7C 06 4A 14 54 0A 18 38 7D 7C 49 D6 39 4A 00 08 55 29 18 38 39 29 00 08 55 08 18 38 7C 1C 01 D6 55 6B 18 38 7D 8B EC AE 7D 67 CA 14 7D 08 EC AE 54 00 18 38 7D 4A E1 D6 7C 1D 04 AE FD 2C 00 2A 7D 6A EC AE 7D 29 E1 D6 FD 8C 00 28 7D 4B CA 14 FD 29 48 2A FD B9 02 F2 7C 09 EC AE 7D 2A CA 14 FD 78 02 F2 7C 17 29 D6 7C A5 DA 14 FD 49 01 B2 FD 8C 01 F2 7C F7 39 D6 54 00 18 38 FD B8 68 38 FC 19 58 3A 7D 77 59 D6 54 E7 18 38 FD 48 50 28 FD AD 68 2A FC 00 00 2A 7D 57 51 D6 FD 6A 60 28 55 6B 18 38 FD 8C 50 2A FD 08 48 2A 7D 37 49 D6 FD 4B 68 2A FD 2C 00 2A 55 4A 18 38 FD 8C 00 28 7D 1A 05 AE FD 6B 68 28 55 29 18 38 7D 87 D5 AE 7D 6B D5 AE 7D 4A D5 AE 7D 29 D5 AE 42 00 FF 10
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 25, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 25, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 25
+. 0 10013390 20
+. 3A 55 00 01 3A 80 00 01 56 40 F8 7E 2B 80 00 01 40 9D 03 18
+
+. 0 100133A4 100
+. 3A F7 00 08 3B 9C 00 08 7E E8 03 A6 FD E0 D8 50 FD C0 D0 50 39 F6 00 08 39 9B 00 08 93 81 00 28 56 B7 08 3C 7D 48 02 A6 81 21 00 28 2F 93 00 00 C1 8C FF FC C0 09 FF FC C1 AA FF FC FE 80 60 50 C1 6F FF FC FF 00 00 50 FE C0 68 50 C3 29 FF F8 FE 40 58 50 C2 EA FF F8 C2 AC FF F8 C2 6F FF F8 41 9E 02 84
+
+. 0 10013408 400
+. 56 89 08 3C 81 7E 82 C0 80 FE 82 C4 3B 89 FF FF 7E 14 A8 50 CA 2B 00 00 CA 07 00 00 3A C0 00 00 7F 9B E3 78 7E 69 03 A6 7D 7B D2 14 7C 0B D2 14 7D 1F 59 D6 7D 20 D2 14 7D 49 D2 14 54 07 10 3A 55 26 10 3A 55 45 10 3A 7C 1F 01 D6 55 6B 10 3A 39 6B 00 04 38 E7 00 04 38 C6 00 04 7D 3F 49 D6 38 A5 00 04 55 08 10 3A 54 00 10 3A 7C C8 EC 2E 7D 1D 04 2E 7D 5F 51 D6 55 29 10 3A 7C 89 EC 2E EC B8 01 B2 EC F6 02 32 57 69 10 3A EC 54 01 32 39 29 00 04 7D 6B F9 D6 55 4A 10 3A 7C 6A EC 2E 57 8A 10 3A 39 4A 00 04 EC 32 00 F2 7C E7 F9 D6 7D 6B EC 2E EC B9 2A FA ED 78 02 F2 7C C6 F9 D6 7C 07 EC 2E EC F7 38 3A EC 16 00 32 7C A5 F9 D6 7D 86 EC 2E EC D9 59 B8 EC 55 13 3A ED 94 03 32 7D A5 EC 2E ED 17 02 38 7D 29 F9 D6 EC 33 0B 7A ED B2 03 72 EC 95 61 38 EC 05 08 2A 7C 1F D9 D6 ED 87 10 2A 7F C9 EC 2E EC 73 68 F8 56 09 08 3C ED 68 20 2A 7D 29 B2 14 EF 80 60 2A 54 00 10 3A ED A6 18 2A 7F FD 04 2E EC 00 60 28 7C 17 E2 14 FD 40 E0 90 39 29 FF FF EF AD 58 2A 7D 17 02 14 ED AD 58 28 7C 97 4A 14 FD 4A 04 32 54 07 10 3A 55 06 10 3A 55 25 10 3A 54 83 10 3A FD 80 E8 90 7D 78 E1 D6 FD 20 F0 90 38 E7 00 04 38 C6 00 04 FD 8C 04 32 38 A5 00 04 38 63 00 04 7C 18 01 D6 FC 00 04 72 55 6B 10 3A 7F 7B AA 14 ED 08 20 28
+
+. 0 10013598 240
+. 7E D6 8A 14 FD 29 50 28 7D 18 41 D6 FD 60 F8 90 54 00 10 3A FD AD 04 72 7F 9C 8A 14 EC C6 18 28 7D 38 49 D6 FC 00 00 18 55 08 10 3A EC E7 10 28 FD 6B 60 28 FD 20 48 18 7C 98 21 D6 ED 5B 02 32 55 29 10 3A ED 1A 02 32 EC 89 00 28 ED 4E 51 BA 7D 4A C1 D6 FD A0 68 18 54 84 10 3A ED 29 00 2A EC A5 08 28 FD 60 58 18 7C E7 C1 D6 EC CF 41 B8 ED 9B 01 F2 EC FA 01 F2 EC 0B 68 28 7C C6 C1 D6 ED 8E 61 7A ED 6B 68 2A EC 64 50 28 ED 09 30 28 7C A5 C1 D6 EC AF 39 78 ED A0 60 2A EF FF E8 2A 7C 63 C1 D6 EC EB 28 2A EF DE E0 2A FD 00 40 50 7F EB CD 2E FC 60 18 50 ED 6B 28 28 7F CA CD 2E ED 29 30 2A EC 00 60 28 EC 84 50 2A 7D 79 05 2E 7D 27 CD 2E 7C 08 CD 2E 7C 86 CD 2E 7C E9 CD 2E 7D 05 CD 2E 7D A4 CD 2E 7C 63 CD 2E 42 00 FD AC
+
+. 0 10013688 48
+. 3A 94 00 01 56 40 F8 7E 7D 28 02 A6 7F 80 A0 40 81 01 00 28 39 8C 00 08 39 EF 00 08 39 08 00 08 39 29 00 08 91 01 00 28 7D 28 03 A6 41 9D FD 14
+
+. 0 100133C8 64
+. 7D 48 02 A6 81 21 00 28 2F 93 00 00 C1 8C FF FC C0 09 FF FC C1 AA FF FC FE 80 60 50 C1 6F FF FC FF 00 00 50 FE C0 68 50 C3 29 FF F8 FE 40 58 50 C2 EA FF F8 C2 AC FF F8 C2 6F FF F8 41 9E 02 84
+
+. 0 100136B8 8
+. 72 AA 00 01 40 82 01 24
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 25, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 25, stride = 1
+. 0 1000FD7C 40
+. 2F 92 00 00 C2 F0 FF F8 C2 D0 FF FC C2 AC FF F8 C2 8C FF FC C2 6E FF F8 C2 4E FF FC C2 2F FF F8 C2 0F FF FC 41 9E 02 84
+
+. 0 1000FC48 260
+. 1D 44 00 05 7C C4 C2 14 7C E6 C2 14 7C D7 31 D6 7D 65 52 14 39 6B FF FF 7C 05 5A 14 7D 3C 59 D6 54 08 10 3A 39 08 00 04 55 6B 10 3A 39 6B 00 04 7C 1C 01 D6 55 29 10 3A 7C 09 FC 2E 7D 27 C2 14 54 C6 10 3A 54 00 10 3A 7D 5C 51 D6 7D BF 04 2E ED 00 68 2A EC 00 68 28 55 4A 10 3A 7D 08 E1 D6 7C EA FC 2E ED 08 40 2A FD A0 38 90 FD 80 40 90 7D 6B E1 D6 7D 28 FC 2E FC 00 01 B2 FD 8C 01 72 7D 6B FC 2E 7D 69 C2 14 ED 59 02 72 7C 17 21 D6 ED 38 02 72 7C 84 DA 14 FD AD 60 28 FC 00 00 18 ED 58 52 F8 7C F7 39 D6 FD A0 68 18 54 00 10 3A ED 79 4A FA ED 4A 50 2A ED 8D 00 28 7D 37 49 D6 EC 00 68 2A 54 E7 10 3A ED 6B 58 2A ED AC 50 2A 7D 77 59 D6 EC E7 40 2A ED 20 58 2A 55 29 10 3A EC 00 58 28 ED 8C 50 28 7C FA 05 2E 55 6B 10 3A 7C 06 D5 2E 7D 87 D5 2E 7D A9 D5 2E 7D 2B D5 2E 42 00 FF 00
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 25, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 25, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 25
+PASS: gsl_fft_complex_forward with signal_noise, n = 25, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 25, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 25, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 25, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 25, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 25, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 25, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 25, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 25, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 25, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 25
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 25, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 25, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 25, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 25, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 25, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 25, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 25, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 25, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 25, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 25, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 25
+PASS: gsl_fft_real with signal_real_noise, n = 25, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 25, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 25, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 25, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 25
+PASS: gsl_fft_real_float with signal_real_noise, n = 25, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 25, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 25, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 25, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 25
+PASS: gsl_fft_complex_forward with signal_noise, n = 25, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 25, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 25, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 25, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 25, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 25, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 25, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 25, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 25, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 25, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 25
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 25, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 25, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 25, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 25, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 25, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 25, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 25, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 25, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 25, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 25, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 25
+PASS: gsl_fft_real with signal_real_noise, n = 25, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 25, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 25, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 25, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 25
+PASS: gsl_fft_real_float with signal_real_noise, n = 25, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 25, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 25, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 26, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 26
+PASS: gsl_fft_complex_forward with signal_noise, n = 26, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 26, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 26, stride = 1
+. 0 FE0D040 20
+. 83 FE 0F 68 FC 1F 00 00 C8 5F 00 00 FD AD 00 B2 40 81 0E AC
+
+. 0 FE0DEFC 8
+. 81 3E 0F BC 4B FF F1 58
+
+. 0 FE0D058 24
+. C8 A9 00 00 FC 1F 69 7A C8 21 00 10 FD 21 00 2A FF 01 48 00 41 9A 00 D8
+
+PASS: gsl_fft_complex_forward with signal_pulse, n = 26, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 26, stride = 1
+. 0 FE0D054 28
+. 81 3E 0F A0 C8 A9 00 00 FC 1F 69 7A C8 21 00 10 FD 21 00 2A FF 01 48 00 41 9A 00 D8
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 26, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 26, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 26
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 26, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 26, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 26, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 26, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 26, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 26, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 26, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 26
+PASS: gsl_fft_real with signal_real_noise, n = 26, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 26, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 26, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 26, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 26
+PASS: gsl_fft_real_float with signal_real_noise, n = 26, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 26, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 26, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 26, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 26
+PASS: gsl_fft_complex_forward with signal_noise, n = 26, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 26, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 26, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 26, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 26, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 26, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 26, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 26, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 26, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 26, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 26
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 26, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 26, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 26, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 26, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 26, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 26, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 26, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 26, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 26, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 26, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 26
+PASS: gsl_fft_real with signal_real_noise, n = 26, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 26, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 26, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 26, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 26
+PASS: gsl_fft_real_float with signal_real_noise, n = 26, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 26, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 26, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 26, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 26
+PASS: gsl_fft_complex_forward with signal_noise, n = 26, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 26, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 26, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 26, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 26, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 26, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 26, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 26, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 26, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 26, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 26
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 26, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 26, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 26, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 26, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 26, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 26, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 26, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 26, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 26, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 26, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 26
+PASS: gsl_fft_real with signal_real_noise, n = 26, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 26, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 26, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 26, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 26
+PASS: gsl_fft_real_float with signal_real_noise, n = 26, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 26, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 26, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 27, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 27
+PASS: gsl_fft_complex_forward with signal_noise, n = 27, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 27, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 27, stride = 1
+. 0 FE0DEEC 8
+. 81 3E 0F B8 4B FF FF 04
+
+. 0 FE0DDF4 16
+. CB A9 00 00 FF 7F 07 72 FC 06 D8 28 4B FF F2 00
+
+PASS: gsl_fft_complex_forward with signal_pulse, n = 27, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 27, stride = 1
+. 0 FE0EBD0 24
+. 81 3E 10 10 C9 29 00 00 FC 1F 22 7A FD 81 00 2A FF 0C 08 00 41 9A 00 60
+
+. 0 FE0CFF4 24
+. 81 3E 0F 9C C8 09 00 00 FC 1F 30 3A FF 81 00 2A FF 1C 08 00 41 BA FA 50
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 27, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 27, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 27
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 27, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 27, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 27, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 27, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 27, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 27, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 27, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 27
+PASS: gsl_fft_real with signal_real_noise, n = 27, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 27, stride = 1
+. 0 1000D3D4 316
+. 1D 03 00 03 7C C5 FA 14 7C 63 DA 14 7D 64 40 50 7D 08 22 14 7D 6B CA 14 39 08 FF FF 39 6B FF FF 7C 19 42 14 55 6A 18 38 54 09 18 38 39 4A 00 08 39 29 00 08 7D 4A E1 D6 55 07 18 38 38 E7 00 08 7D 29 E1 D6 7D 4A EC AE 54 CA 18 38 FD 40 50 50 39 4A 00 08 7C 1C 01 D6 7D 69 EC AE 54 A9 18 38 FC 8B 50 2A 39 29 00 08 FD 6B 50 28 7D 7C 59 D6 54 00 18 38 7D 1D 04 AE FD A4 07 72 FD 6B 07 B2 55 6B 18 38 7C E7 E1 D6 7C 0B EC AE FD 88 00 28 7D 1C 41 D6 FD 08 00 2A 7C C7 EC AE 7C E6 FA 14 FD 8C 07 B2 54 EB 18 38 39 6B 00 08 FD A6 68 28 55 08 18 38 FC 08 07 72 7D 28 EC AE 7C 17 29 D6 7C A5 DA 14 FD 4D 60 28 FD AD 60 2A FC 09 00 28 FC A3 02 B2 7C D7 31 D6 54 00 18 38 FC E1 03 72 FD 42 02 B2 7C F7 39 D6 54 C6 18 38 FD BF 03 72 FD 80 58 2A 7D 29 B9 D6 FC 00 58 28 54 E7 18 38 FD 29 40 2A FC A2 2B 3A 7D 4A B9 D6 FC FF 38 3A 7D 3A 05 AE FC 01 68 38 7D 6B B9 D6 FD 83 53 38 FC C6 20 2A 7C C9 D5 AE 7C 06 D5 AE 7C EA D5 AE 7D 87 D5 AE 7C AB D5 AE 42 00 FE C8
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 27, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 27, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 27
+PASS: gsl_fft_real_float with signal_real_noise, n = 27, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 27, stride = 1
+. 0 1000F5A0 340
+. 1D 1D 00 03 7C C4 CA 14 7C A6 CA 14 7F BD DA 14 7D 63 40 50 7D 08 1A 14 7D 6B C2 14 39 08 FF FF 39 6B FF FF 7C 18 42 14 55 6A 10 3A 54 09 10 3A 39 4A 00 04 39 29 00 04 7D 4A E1 D6 55 07 10 3A 38 E7 00 04 7D 29 E1 D6 7D 2A FC 2E 54 CA 10 3A FD 20 48 50 39 4A 00 04 7C 1C 01 D6 7D 49 FC 2E 54 89 10 3A EC 8A 48 2A 39 29 00 04 ED 4A 48 28 7D 7C 59 D6 54 00 10 3A 7D 7F 04 2E FD A0 20 90 ED 4A 07 72 55 6B 10 3A 7C E7 E1 D6 7C EB FC 2E FD AD 07 B2 54 AB 10 3A EC AB 38 2A 39 6B 00 04 7D 1C 41 D6 7C C7 FC 2E ED 6B 38 28 FD 80 28 90 FC 00 30 90 55 08 10 3A ED 6B 07 72 7D 08 FC 2E FD 8C 07 B2 7C 17 21 D6 7C 84 DA 14 FC 00 68 28 FD A0 40 90 EC C6 20 2A FC 00 00 18 7C D7 31 D6 FD AD 60 28 54 00 10 3A ED 08 28 2A ED 80 58 28 FD A0 68 18 7C B7 29 D6 EC 00 58 2A 54 C6 10 3A EC E1 03 32 7D 1A 05 2E ED 2D 50 2A ED 63 00 32 7D 29 B9 D6 ED AD 50 28 54 A5 10 3A EC 02 00 32 ED 9F 03 32 7D 4A B9 D6 ED 62 5B 7A EC FF 3A 7A 7C C9 D5 2E ED A3 03 78 ED 21 62 78 7D 6B B9 D6 7D A6 D5 2E 7D 6A D5 2E 7D 25 D5 2E 7C EB D5 2E 42 00 FE B0
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 27, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 27, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 27
+PASS: gsl_fft_complex_forward with signal_noise, n = 27, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 27, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 27, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 27, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 27, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 27, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 27, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 27, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 27, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 27, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 27
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 27, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 27, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 27, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 27, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 27, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 27, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 27, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 27, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 27, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 27, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 27
+PASS: gsl_fft_real with signal_real_noise, n = 27, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 27, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 27, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 27, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 27
+PASS: gsl_fft_real_float with signal_real_noise, n = 27, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 27, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 27, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 27, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 27
+PASS: gsl_fft_complex_forward with signal_noise, n = 27, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 27, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 27, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 27, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 27, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 27, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 27, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 27, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 27, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 27, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 27
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 27, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 27, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 27, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 27, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 27, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 27, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 27, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 27, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 27, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 27, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 27
+PASS: gsl_fft_real with signal_real_noise, n = 27, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 27, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 27, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 27, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 27
+PASS: gsl_fft_real_float with signal_real_noise, n = 27, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 27, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 27, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 28, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 28
+PASS: gsl_fft_complex_forward with signal_noise, n = 28, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 28, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 28, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 28, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 28, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 28, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 28, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 28
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 28, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 28, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 28, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 28, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 28, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 28, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 28, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 28
+PASS: gsl_fft_real with signal_real_noise, n = 28, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 28, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 28, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 28, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 28
+PASS: gsl_fft_real_float with signal_real_noise, n = 28, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 28, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 28, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 28, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 28
+PASS: gsl_fft_complex_forward with signal_noise, n = 28, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 28, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 28, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 28, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 28, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 28, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 28, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 28, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 28, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 28, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 28
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 28, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 28, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 28, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 28, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 28, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 28, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 28, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 28, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 28, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 28, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 28
+PASS: gsl_fft_real with signal_real_noise, n = 28, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 28, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 28, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 28, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 28
+PASS: gsl_fft_real_float with signal_real_noise, n = 28, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 28, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 28, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 28, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 28
+PASS: gsl_fft_complex_forward with signal_noise, n = 28, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 28, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 28, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 28, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 28, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 28, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 28, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 28, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 28, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 28, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 28
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 28, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 28, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 28, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 28, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 28, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 28, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 28, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 28, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 28, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 28, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 28
+PASS: gsl_fft_real with signal_real_noise, n = 28, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 28, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 28, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 28, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 28
+PASS: gsl_fft_real_float with signal_real_noise, n = 28, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 28, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 28, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 29, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 29
+PASS: gsl_fft_complex_forward with signal_noise, n = 29, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 29, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 29, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 29, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 29, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 29, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 29, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 29
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 29, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 29, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 29, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 29, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 29, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 29, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 29, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 29
+PASS: gsl_fft_real with signal_real_noise, n = 29, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 29, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 29, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 29, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 29
+PASS: gsl_fft_real_float with signal_real_noise, n = 29, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 29, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 29, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 29, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 29
+PASS: gsl_fft_complex_forward with signal_noise, n = 29, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 29, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 29, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 29, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 29, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 29, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 29, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 29, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 29, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 29, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 29
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 29, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 29, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 29, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 29, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 29, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 29, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 29, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 29, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 29, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 29, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 29
+PASS: gsl_fft_real with signal_real_noise, n = 29, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 29, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 29, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 29, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 29
+PASS: gsl_fft_real_float with signal_real_noise, n = 29, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 29, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 29, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 29, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 29
+PASS: gsl_fft_complex_forward with signal_noise, n = 29, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 29, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 29, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 29, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 29, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 29, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 29, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 29, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 29, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 29, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 29
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 29, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 29, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 29, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 29, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 29, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 29, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 29, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 29, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 29, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 29, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 29
+PASS: gsl_fft_real with signal_real_noise, n = 29, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 29, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 29, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 29, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 29
+PASS: gsl_fft_real_float with signal_real_noise, n = 29, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 29, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 29, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 30, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 30
+PASS: gsl_fft_complex_forward with signal_noise, n = 30, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 30, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 30, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 30, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 30, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 30, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 30, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 30
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 30, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 30, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 30, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 30, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 30, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 30, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 30, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 30
+PASS: gsl_fft_real with signal_real_noise, n = 30, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 30, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 30, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 30, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 30
+PASS: gsl_fft_real_float with signal_real_noise, n = 30, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 30, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 30, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 30, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 30
+PASS: gsl_fft_complex_forward with signal_noise, n = 30, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 30, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 30, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 30, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 30, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 30, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 30, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 30, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 30, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 30, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 30
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 30, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 30, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 30, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 30, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 30, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 30, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 30, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 30, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 30, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 30, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 30
+PASS: gsl_fft_real with signal_real_noise, n = 30, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 30, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 30, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 30, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 30
+PASS: gsl_fft_real_float with signal_real_noise, n = 30, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 30, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 30, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 30, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 30
+PASS: gsl_fft_complex_forward with signal_noise, n = 30, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 30, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 30, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 30, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 30, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 30, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 30, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 30, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 30, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 30, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 30
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 30, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 30, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 30, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 30, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 30, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 30, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 30, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 30, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 30, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 30, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 30
+PASS: gsl_fft_real with signal_real_noise, n = 30, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 30, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 30, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 30, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 30
+PASS: gsl_fft_real_float with signal_real_noise, n = 30, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 30, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 30, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 31, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 31
+PASS: gsl_fft_complex_forward with signal_noise, n = 31, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 31, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 31, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 31, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 31, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 31, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 31, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 31
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 31, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 31, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 31, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 31, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 31, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 31, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 31, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 31
+PASS: gsl_fft_real with signal_real_noise, n = 31, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 31, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 31, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 31, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 31
+PASS: gsl_fft_real_float with signal_real_noise, n = 31, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 31, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 31, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 31, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 31
+PASS: gsl_fft_complex_forward with signal_noise, n = 31, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 31, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 31, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 31, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 31, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 31, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 31, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 31, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 31, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 31, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 31
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 31, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 31, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 31, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 31, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 31, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 31, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 31, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 31, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 31, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 31, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 31
+PASS: gsl_fft_real with signal_real_noise, n = 31, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 31, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 31, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 31, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 31
+PASS: gsl_fft_real_float with signal_real_noise, n = 31, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 31, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 31, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 31, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 31
+PASS: gsl_fft_complex_forward with signal_noise, n = 31, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 31, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 31, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 31, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 31, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 31, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 31, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 31, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 31, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 31, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 31
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 31, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 31, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 31, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 31, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 31, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 31, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 31, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 31, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 31, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 31, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 31
+PASS: gsl_fft_real with signal_real_noise, n = 31, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 31, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 31, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 31, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 31
+PASS: gsl_fft_real_float with signal_real_noise, n = 31, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 31, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 31, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 32, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 32
+PASS: gsl_fft_complex_forward with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 32, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 32, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 32, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 32, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 32
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 32, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 32, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 32, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 32, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 32
+. 0 10012298 388
+. 7C 1B 72 14 57 66 18 38 7D 60 72 14 54 09 18 38 7D 0B 72 14 39 29 00 08 55 0A 18 38 7D 29 E9 D6 39 4A 00 08 55 67 18 38 38 E7 00 08 7D 4A E9 D6 7D 49 FC AE 38 C6 00 08 FD 21 02 B2 7C B7 E2 14 7C 97 1A 14 7C 1D 01 D6 7C AA FC AE 54 69 18 38 FD 7C 01 72 57 8A 18 38 39 4A 00 08 7D 7D 59 D6 54 00 18 38 7D 9F 04 AE 39 29 00 08 FC 01 03 32 7D 1D 41 D6 55 6B 18 38 7C EB FC AE FD 82 4B 38 FC DE 01 F2 7C E7 E9 D6 55 08 18 38 7D 08 FC AE FD 42 02 BA 54 88 18 38 FC 9C 02 32 39 08 00 08 7D A7 FC AE 7C C6 E9 D6 FD 1D 5A 38 54 A7 18 38 38 E7 00 08 FD 3E 03 72 7C 1D D9 D6 7D 66 FC AE FD BF 33 7A 7F 7B D2 14 FC BD 21 7A 54 00 18 38 7D 78 19 D6 FC CC 40 28 7C 1F 04 AE FC FF 49 F8 7C 63 8A 14 FC 8B 68 28 7C 18 E1 D6 FD 2A 28 28 55 6B 18 38 FD 6B 68 2A 7F 9C 8A 14 FC C0 30 50 FD 4A 28 2A 7C B8 29 D6 FD A0 38 28 54 00 18 38 FC 00 38 2A FC 64 30 28 FC AB 50 28 7C 98 21 D6 FD 20 48 50 54 A5 18 38 FD 8C 40 2A FC A0 28 50 7D 4A C1 D6 FC ED 48 2A FD 00 60 28 54 84 18 38 FC 60 18 50 FC 00 60 2A 7C E7 C1 D6 FD 6B 50 2A FD AD 48 28 FC 84 30 2A 7C 19 05 AE 7D 6A CD AE 7D 08 C1 D6 7D A5 CD AE 7C 87 CD AE 7D 04 CD AE 7D 29 C1 D6 7C A8 CD AE 7C EB CD AE 7C 69 CD AE 42 00 FE 80
+
+. 0 10012464 168
+. 7C 05 72 14 7D 44 32 14 7D 60 72 14 7C 1D 01 D6 7D 2B 72 14 55 48 18 38 54 C7 18 38 7D 3D 49 D6 54 00 18 38 7C 1F 04 AE 38 E7 00 08 39 08 00 08 7D 7D 59 D6 55 29 18 38 7D A9 FC AE FD 80 68 2A 7C 1D 29 D6 FC 00 68 28 55 6B 18 38 7D 4B FC AE FD 8C 02 32 7C A5 D2 14 FC 00 02 32 54 00 18 38 7D BF 04 AE 7D 58 51 D6 FD 60 50 50 FD 4A 60 28 FD 2D 00 28 7C 18 31 D6 FD 6B 60 28 FD AD 00 2A 55 4A 18 38 7C C6 8A 14 7C E7 C1 D6 54 00 18 38 7D B9 05 AE 7D 08 C1 D6 7D 67 CD AE 7D 2A CD AE 7D 48 CD AE 42 00 FF 5C
+
+PASS: gsl_fft_real with signal_real_noise, n = 32, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 32, stride = 1
+. 0 1000D900 180
+. 7D 23 2A 14 54 AB 18 38 55 2A 18 38 7C 1C 29 D6 39 6B 00 08 81 81 00 30 39 4A 00 08 7D 04 62 14 7D 3C 49 D6 54 00 18 38 7D 9D 04 AE 7C E8 62 14 7C C7 62 14 7C A5 FA 14 7D 6B E1 D6 55 29 18 38 7D A9 EC AE FD 4C 68 28 7D 4A E1 D6 7D 6B EC AE FD 8C 68 2A FD 4A 02 72 7C 0A EC AE 7C 17 21 D6 FD 8C 60 2A 7C 84 DA 14 FD AB 00 2A FC 00 58 28 7D 17 41 D6 54 00 18 38 FD AD 02 72 7D 9A 05 AE FC 00 00 2A 7C F7 39 D6 55 08 18 38 FD 6A 68 2A FD 4A 68 28 7C D7 31 D6 FD 60 58 50 54 E7 18 38 7D 48 D5 AE 7C 07 D5 AE 54 C6 18 38 7D 66 D5 AE 42 00 FF 50
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 32, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 32, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 32
+. 0 100142F4 388
+. 7C 1B 72 14 57 66 10 3A 7D 60 72 14 54 09 10 3A 7D 0B 72 14 39 29 00 04 55 0A 10 3A 7D 29 F9 D6 39 4A 00 04 55 67 10 3A 38 E7 00 04 7D 4A F9 D6 7D 49 EC 2E 38 C6 00 04 ED 21 02 B2 7C B7 E2 14 7C 97 1A 14 7C 1F 01 D6 7C EA EC 2E 57 89 10 3A EC 9C 01 F2 54 6A 10 3A 39 29 00 04 7D 7F 59 D6 54 00 10 3A 7D 7D 04 2E 39 4A 00 04 ED 22 4A F8 7D 1F 41 D6 55 6B 10 3A 7D 0B EC 2E ED 61 02 F2 54 AB 10 3A ED 9E 02 32 39 6B 00 04 7C E7 F9 D6 55 08 10 3A 7C 08 EC 2E ED 42 5A BA 54 88 10 3A EC 9D 20 38 39 08 00 04 EC 1C 00 32 7C C6 F9 D6 7D A7 EC 2E EC A9 20 28 ED 9F 63 7A 7C 1F D9 D6 EC FD 01 FA ED BE 03 72 7D 66 EC 2E FC A0 28 50 7F 7B D2 14 EC 6B 60 28 EC CA 38 28 54 00 10 3A ED 1F 6A 38 7C 1D 04 2E ED 4A 38 2A 7C 18 E1 D6 ED 6B 60 2A 7F 9C 8A 14 ED A0 40 28 ED 29 20 2A EC E3 28 28 7C F8 19 D6 ED 8B 50 28 54 00 10 3A FC C0 30 50 7C 63 8A 14 EC 00 40 2A 7C B8 29 D6 FD 80 60 50 EC 8D 30 2A 54 E7 10 3A ED 00 48 28 FC E0 38 50 7C 98 21 D6 EC 00 48 2A ED 6B 50 2A 54 A5 10 3A ED AD 30 28 EC 63 28 2A 7C 19 05 2E 7D 29 C1 D6 54 84 10 3A 7D 6B C1 D6 7D 69 CD 2E 7D A5 CD 2E 7D 08 C1 D6 7C 6B CD 2E 7D 04 CD 2E 7D 4A C1 D6 7D 88 CD 2E 7C 87 CD 2E 7C EA CD 2E 42 00 FE 80
+
+. 0 100144C0 176
+. 7C 05 72 14 7D 44 32 14 7D 60 72 14 7C 1F 01 D6 7D 2B 72 14 55 47 10 3A 54 C8 10 3A 7D 3F 49 D6 54 00 10 3A 7D BD 04 2E 39 08 00 04 38 E7 00 04 55 29 10 3A 7D 7F 59 D6 7D 89 EC 2E EC 0D 60 2A ED AD 60 28 7C 1F 29 D6 55 6B 10 3A 7D 6B EC 2E 7C A5 D2 14 FD 40 58 50 54 00 10 3A FC 00 02 32 7D 9D 04 2E 7D 38 31 D6 7C C6 8A 14 FD AD 02 32 FC 00 00 18 7D 58 51 D6 55 29 10 3A FD A0 68 18 ED 6B 00 28 ED 4A 00 28 7D 08 C1 D6 ED 2C 68 28 ED 8C 68 2A 55 4A 10 3A 7C E7 C1 D6 7D 89 CD 2E 7D 48 CD 2E 7D 2A CD 2E 7D 67 CD 2E 42 00 FF 54
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 32, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 32, stride = 1
+. 0 1000FAE4 188
+. 7D 23 2A 14 54 AB 10 3A 55 2A 10 3A 7C 1C 29 D6 39 6B 00 04 81 81 00 30 39 4A 00 04 7D 04 62 14 7D 3C 49 D6 54 00 10 3A 7D 5F 04 2E 7C E8 62 14 7C C7 62 14 7C A5 EA 14 7D 6B E1 D6 55 29 10 3A 7C 09 FC 2E ED AA 00 28 7D 4A E1 D6 7D 6B FC 2E ED 4A 00 2A 7D 8A FC 2E 7C 17 21 D6 FD AD 02 72 7C 84 DA 14 EC 0B 60 2A ED 8C 58 28 7D 17 41 D6 FD A0 68 18 54 00 10 3A ED 4A 50 2A 7C F7 39 D6 ED 8C 60 2A FC 00 02 72 55 08 10 3A 7D 5A 05 2E 7C D7 31 D6 54 E7 10 3A FC 00 00 18 ED 6D 00 2A 54 C6 10 3A ED AD 00 28 FD 60 58 50 7D A8 D5 2E 7D 87 D5 2E 7D 66 D5 2E 42 00 FF 48
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 32, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 32, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 32
+PASS: gsl_fft_complex_forward with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 32, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 32, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 32, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 32, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 32, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 32, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 32, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 32
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 32, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 32, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 32, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 32, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 32, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 32, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 32, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 32
+PASS: gsl_fft_real with signal_real_noise, n = 32, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 32, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 32, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 32, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 32
+PASS: gsl_fft_real_float with signal_real_noise, n = 32, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 32, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 32, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 32, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 32
+PASS: gsl_fft_complex_forward with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 32, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 32, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 32, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 32, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 32, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 32, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 32, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 32
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 32, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 32, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 32, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 32, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 32, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 32, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 32, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 32
+PASS: gsl_fft_real with signal_real_noise, n = 32, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 32, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 32, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 32, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 32
+PASS: gsl_fft_real_float with signal_real_noise, n = 32, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 32, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 32, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 33, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 33
+PASS: gsl_fft_complex_forward with signal_noise, n = 33, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 33, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 33, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 33, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 33, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 33, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 33, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 33
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 33, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 33, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 33, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 33, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 33, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 33, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 33, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 33
+PASS: gsl_fft_real with signal_real_noise, n = 33, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 33, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 33, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 33, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 33
+PASS: gsl_fft_real_float with signal_real_noise, n = 33, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 33, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 33, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 33, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 33
+PASS: gsl_fft_complex_forward with signal_noise, n = 33, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 33, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 33, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 33, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 33, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 33, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 33, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 33, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 33, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 33, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 33
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 33, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 33, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 33, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 33, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 33, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 33, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 33, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 33, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 33, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 33, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 33
+PASS: gsl_fft_real with signal_real_noise, n = 33, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 33, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 33, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 33, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 33
+PASS: gsl_fft_real_float with signal_real_noise, n = 33, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 33, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 33, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 33, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 33
+PASS: gsl_fft_complex_forward with signal_noise, n = 33, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 33, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 33, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 33, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 33, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 33, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 33, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 33, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 33, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 33, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 33
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 33, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 33, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 33, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 33, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 33, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 33, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 33, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 33, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 33, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 33, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 33
+PASS: gsl_fft_real with signal_real_noise, n = 33, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 33, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 33, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 33, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 33
+PASS: gsl_fft_real_float with signal_real_noise, n = 33, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 33, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 33, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 34, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 34
+PASS: gsl_fft_complex_forward with signal_noise, n = 34, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 34, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 34, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 34, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 34, stride = 1
+. 0 FE0DA98 20
+. 81 3E 0F 9C C9 69 00 00 FC BF 02 F2 FC 04 28 28 4B FF FA A4
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 34, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 34, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 34
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 34, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 34, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 34, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 34, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 34, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 34, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 34, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 34
+PASS: gsl_fft_real with signal_real_noise, n = 34, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 34, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 34, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 34, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 34
+PASS: gsl_fft_real_float with signal_real_noise, n = 34, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 34, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 34, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 34, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 34
+PASS: gsl_fft_complex_forward with signal_noise, n = 34, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 34, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 34, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 34, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 34, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 34, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 34, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 34, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 34, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 34, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 34
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 34, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 34, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 34, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 34, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 34, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 34, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 34, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 34, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 34, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 34, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 34
+PASS: gsl_fft_real with signal_real_noise, n = 34, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 34, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 34, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 34, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 34
+PASS: gsl_fft_real_float with signal_real_noise, n = 34, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 34, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 34, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 34, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 34
+PASS: gsl_fft_complex_forward with signal_noise, n = 34, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 34, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 34, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 34, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 34, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 34, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 34, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 34, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 34, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 34, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 34
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 34, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 34, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 34, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 34, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 34, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 34, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 34, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 34, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 34, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 34, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 34
+PASS: gsl_fft_real with signal_real_noise, n = 34, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 34, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 34, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 34, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 34
+PASS: gsl_fft_real_float with signal_real_noise, n = 34, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 34, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 34, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 35, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 35
+PASS: gsl_fft_complex_forward with signal_noise, n = 35, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 35, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 35, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 35, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 35, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 35, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 35, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 35
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 35, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 35, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 35, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 35, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 35, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 35, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 35, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 35
+. 0 10011100 12
+. 3A 40 00 00 7F 92 80 40 40 9C 0A 80
+
+PASS: gsl_fft_real with signal_real_noise, n = 35, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 35, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 35, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 35, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 35
+. 0 100130F0 12
+. 3A 40 00 00 7F 92 80 40 40 9C 0A BC
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 35, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 35, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 35, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 35, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 35
+PASS: gsl_fft_complex_forward with signal_noise, n = 35, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 35, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 35, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 35, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 35, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 35, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 35, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 35, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 35, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 35, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 35
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 35, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 35, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 35, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 35, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 35, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 35, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 35, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 35, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 35, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 35, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 35
+PASS: gsl_fft_real with signal_real_noise, n = 35, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 35, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 35, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 35, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 35
+PASS: gsl_fft_real_float with signal_real_noise, n = 35, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 35, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 35, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 35, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 35
+PASS: gsl_fft_complex_forward with signal_noise, n = 35, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 35, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 35, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 35, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 35, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 35, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 35, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 35, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 35, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 35, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 35
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 35, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 35, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 35, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 35, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 35, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 35, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 35, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 35, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 35, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 35, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 35
+PASS: gsl_fft_real with signal_real_noise, n = 35, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 35, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 35, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 35, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 35
+PASS: gsl_fft_real_float with signal_real_noise, n = 35, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 35, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 35, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 36, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 36
+. 0 10008218 400
+. 7C C8 02 A6 80 E1 01 40 7C 0C AA 14 7D 71 62 14 7D 07 62 14 7C 00 C9 D6 7D 46 62 14 7C 9B FC AE 7D 3B FA 14 CA E1 00 40 C8 29 00 08 7C EE 62 14 7D 6B C9 D6 54 00 20 36 7D 20 FA 14 7C FF 04 AE C9 09 00 08 7F BC D2 14 CB 01 00 48 39 8C 00 01 7D 4A C9 D6 55 6B 20 36 7D 2B FC AE 7C AB FA 14 C9 65 00 08 7E 94 CA 14 80 01 01 48 7F 7B 92 14 7D 08 C9 D6 55 4A 20 36 7C 0A FC AE 7C CA FA 14 C9 A6 00 08 7D 40 BA 14 FC C9 00 2A 80 C1 01 4C 55 08 20 36 7C E7 C9 D6 7D 48 FC AE 7D 28 FA 14 C9 89 00 08 FC AB 68 2A FC 6A 38 2A 81 01 01 44 FC 4C 40 2A 54 E7 20 36 FD 6B 68 28 7F 87 FC AE FD 4A 38 28 7D 27 FA 14 FD 8C 40 28 CB A9 00 08 FC E6 05 32 7D 68 BA 14 7D 37 B2 14 7D 06 BA 14 FD A3 05 32 7C 10 BA 14 7D 29 C1 D6 3A F7 00 01 FD 29 00 28 FD 05 05 32 FC 02 05 32 7D 6B C1 D6 55 29 20 36 FD 75 02 F2 7C E9 D2 14 FD 95 03 32 7D 4A C1 D6 55 6B 20 36 FD 35 02 72 7C CB D2 14 FD 55 02 B2 7D 08 C1 D6 55 4A 20 36 FC E4 38 28 7C AA D2 14 FD BC 68 28 FD 01 40 28 7C 00 C1 D6 FC 1D 00 28 55 08 20 36 FC 84 30 2A 7C 88 D2 14 FC C7 58 2A FF 9C 18 2A 54 00 20 36 FC 6D 60 2A 7C 60 D2 14 FC 21 28 2A FF BD 10 2A FC A8 48 28 FC 40 50 28 FC E7 58 28 FD AD 60 28 FC 00 50 2A FF E6 18 28 FD 08 48 2A FC C6 18 2A FF C5 10 28
+
+. 0 100083A8 200
+. FC A5 10 2A C8 41 00 48 FD 87 68 28 FD 68 00 28 FC 42 01 B2 FD 08 00 2A C8 01 00 40 FD 24 E0 28 FD 41 E8 28 D8 41 01 98 FC E7 68 2A C9 A1 00 50 FC 00 03 32 FF 6D 07 F2 FF 57 02 F2 D8 01 01 90 FF 38 01 72 FE CD 07 B2 C9 A1 00 38 FC 6E 02 72 FC 52 01 F2 FF 0E 02 B2 FE F2 02 32 FD 6D 02 FA C8 01 01 98 FD 4F 1A BA FC B0 01 7A FD 13 12 3A FF D1 DF BA FD 8D D3 38 FC D0 C9 B8 FD 2F C2 78 FC F3 B9 F8 FF F1 B7 F8 FC 84 E0 2A FC 21 E8 2A 7C 9C D5 AE 7F 9C 9A 14 D8 3D 00 08 7D 89 D5 AE D9 67 00 08 7C CB D5 AE D8 A6 00 08 7D 2A D5 AE D9 45 00 08 7C E8 D5 AE D9 04 00 08 7F FA 05 AE DB C3 00 08 42 00 FD AC
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 36, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 36, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 36, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 36, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 36, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 36, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 36, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 36
+. 0 1000B09C 400
+. 7C E8 02 A6 7C 0C AA 14 7D 71 62 14 7D 4E 62 14 7C 00 C9 D6 81 21 00 F0 7D 07 62 14 7C 9B FC 2E 7C E9 62 14 7D 3B FA 14 7D 6B C9 D6 54 00 18 38 C0 69 00 04 7D 20 FA 14 C1 09 00 04 39 8C 00 01 7C FF 04 2E 7C 17 B2 14 7D 4A C9 D6 55 6B 18 38 7C AB FA 14 7D 2B FC 2E C1 65 00 04 7E 94 CA 14 7F 7B 92 14 7D 08 C9 D6 55 4A 18 38 7C CA FA 14 7C 0A FC 2E C1 A6 00 04 EC C9 00 2A 81 41 00 F4 55 08 18 38 7C E7 C9 D6 7D 28 FA 14 7D 48 FC 2E C1 89 00 04 EC AB 68 2A EC 4A 38 2A 81 01 00 FC EC 2C 40 2A 54 E7 18 38 7D 27 FA 14 ED 29 00 28 ED 6B 68 28 7F E7 FC 2E ED 4A 38 28 C3 C9 00 04 ED 8C 40 28 80 E1 00 F8 ED 06 05 32 7D 2A BA 14 EC E5 05 32 7D 67 BA 14 EC 02 05 32 7D 48 BA 14 ED A1 05 32 7D 10 BA 14 EC E3 38 28 7C 00 C1 D6 ED 04 40 28 7C FC D2 14 ED 35 02 72 3A F7 00 01 ED 75 02 F2 EC 1F 00 28 7D 29 C1 D6 ED BE 68 28 54 00 18 38 ED 55 02 B2 7C C0 D2 14 ED 95 03 32 EC 84 30 2A 7D 6B C1 D6 EC C8 58 2A 55 29 18 38 EC 63 28 2A 7C A9 D2 14 EC A7 48 28 EF FF 10 2A 7D 4A C1 D6 EC 40 60 2A 55 6B 18 38 EF DE 08 2A 7C 8B D2 14 EC 2D 50 28 ED 08 58 28 7D 08 C1 D6 ED AD 50 2A 55 4A 18 38 EC E7 48 2A 7C 6A D2 14 EC 00 60 28 EF 85 08 28 55 08 18 38 ED 67 68 28 7F A8 D2 14 ED 88 00 28 ED 08 00 2A C0 01 00 40
+
+. 0 1000B22C 192
+. EF A6 10 28 EC E7 68 2A EC C6 10 2A C0 41 00 48 EC A5 08 2A C0 21 00 44 EF 60 03 32 EE E0 02 F2 C0 01 00 48 ED 24 F8 28 ED 43 F0 28 EC 00 07 32 EF 41 01 B2 ED A1 01 72 EC 2F 01 F2 D0 01 01 48 EF 02 07 72 C0 01 00 3C EE D1 02 72 EF 2F 02 32 EC 51 02 B2 ED 10 0A 38 C0 21 01 48 ED 60 DA FA EC B3 D1 7A ED 52 B2 BA EC F0 C9 FA EF 8E C7 3A ED 80 BB 38 EC D3 69 B8 ED 32 12 78 EF AE 0F 78 EC 84 F8 2A EC 63 F0 2A 7C 9C D5 2E 7F 9C 9A 14 D0 67 00 04 7D 9A 05 2E D1 66 00 04 7C C9 D5 2E D0 A5 00 04 7D 2B D5 2E D1 44 00 04 7D 0A D5 2E D0 E3 00 04 7F A8 D5 2E D3 9D 00 04 42 00 FD B4
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 36, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 36, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 36, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 36, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 36, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 36, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 36, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 36
+PASS: gsl_fft_real with signal_real_noise, n = 36, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 36, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 36, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 36, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 36
+PASS: gsl_fft_real_float with signal_real_noise, n = 36, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 36, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 36, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 36, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 36
+PASS: gsl_fft_complex_forward with signal_noise, n = 36, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 36, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 36, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 36, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 36, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 36, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 36, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 36, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 36, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 36, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 36
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 36, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 36, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 36, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 36, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 36, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 36, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 36, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 36, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 36, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 36, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 36
+PASS: gsl_fft_real with signal_real_noise, n = 36, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 36, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 36, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 36, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 36
+PASS: gsl_fft_real_float with signal_real_noise, n = 36, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 36, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 36, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 36, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 36
+PASS: gsl_fft_complex_forward with signal_noise, n = 36, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 36, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 36, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 36, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 36, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 36, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 36, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 36, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 36, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 36, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 36
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 36, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 36, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 36, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 36, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 36, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 36, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 36, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 36, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 36, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 36, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 36
+PASS: gsl_fft_real with signal_real_noise, n = 36, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 36, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 36, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 36, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 36
+PASS: gsl_fft_real_float with signal_real_noise, n = 36, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 36, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 36, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 37, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 37
+PASS: gsl_fft_complex_forward with signal_noise, n = 37, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 37, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 37, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 37, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 37, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 37, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 37, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 37
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 37, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 37, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 37, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 37, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 37, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 37, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 37, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 37
+PASS: gsl_fft_real with signal_real_noise, n = 37, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 37, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 37, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 37, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 37
+PASS: gsl_fft_real_float with signal_real_noise, n = 37, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 37, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 37, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 37, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 37
+PASS: gsl_fft_complex_forward with signal_noise, n = 37, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 37, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 37, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 37, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 37, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 37, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 37, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 37, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 37, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 37, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 37
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 37, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 37, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 37, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 37, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 37, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 37, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 37, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 37, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 37, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 37, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 37
+PASS: gsl_fft_real with signal_real_noise, n = 37, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 37, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 37, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 37, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 37
+PASS: gsl_fft_real_float with signal_real_noise, n = 37, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 37, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 37, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 37, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 37
+PASS: gsl_fft_complex_forward with signal_noise, n = 37, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 37, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 37, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 37, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 37, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 37, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 37, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 37, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 37, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 37, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 37
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 37, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 37, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 37, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 37, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 37, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 37, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 37, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 37, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 37, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 37, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 37
+PASS: gsl_fft_real with signal_real_noise, n = 37, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 37, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 37, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 37, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 37
+PASS: gsl_fft_real_float with signal_real_noise, n = 37, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 37, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 37, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 38, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 38
+PASS: gsl_fft_complex_forward with signal_noise, n = 38, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 38, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 38, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 38, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 38, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 38, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 38, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 38
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 38, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 38, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 38, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 38, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 38, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 38, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 38, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 38
+PASS: gsl_fft_real with signal_real_noise, n = 38, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 38, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 38, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 38, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 38
+PASS: gsl_fft_real_float with signal_real_noise, n = 38, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 38, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 38, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 38, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 38
+PASS: gsl_fft_complex_forward with signal_noise, n = 38, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 38, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 38, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 38, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 38, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 38, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 38, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 38, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 38, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 38, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 38
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 38, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 38, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 38, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 38, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 38, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 38, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 38, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 38, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 38, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 38, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 38
+PASS: gsl_fft_real with signal_real_noise, n = 38, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 38, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 38, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 38, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 38
+PASS: gsl_fft_real_float with signal_real_noise, n = 38, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 38, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 38, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 38, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 38
+PASS: gsl_fft_complex_forward with signal_noise, n = 38, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 38, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 38, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 38, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 38, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 38, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 38, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 38, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 38, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 38, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 38
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 38, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 38, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 38, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 38, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 38, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 38, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 38, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 38, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 38, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 38, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 38
+PASS: gsl_fft_real with signal_real_noise, n = 38, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 38, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 38, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 38, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 38
+PASS: gsl_fft_real_float with signal_real_noise, n = 38, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 38, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 38, stride = 3
+. 0 FE0C97C 144
+. 83 BE 0F 58 83 9E 0E 88 C9 1D 00 00 83 7E 0F 5C FC BF 40 2A C8 9C 00 00 C8 3B 00 00 83 5E 0E 84 FC 05 40 28 83 3E 0F 60 FD 64 00 72 CB BA 00 00 FD A4 00 32 FD 3F 00 28 FC 4B 00 32 FC 6D 00 32 FF C4 02 72 FD 03 00 32 C8 19 00 00 FF 62 02 72 FF 9E 02 72 FF 46 E8 2A FD BF 40 2A FD 5C 02 72 FD 9A D9 FA FC BF 68 28 FC 8C 57 FA FC 25 40 2A FD 64 08 2A FC 4D 58 2A FC 6D 10 28 FC 20 10 90 FF C3 58 2A FC FE 10 3A FE 07 10 00 41 92 00 50
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 39, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 39
+PASS: gsl_fft_complex_forward with signal_noise, n = 39, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 39, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 39, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 39, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 39, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 39, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 39, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 39
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 39, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 39, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 39, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 39, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 39, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 39, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 39, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 39
+PASS: gsl_fft_real with signal_real_noise, n = 39, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 39, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 39, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 39, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 39
+PASS: gsl_fft_real_float with signal_real_noise, n = 39, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 39, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 39, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 39, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 39
+PASS: gsl_fft_complex_forward with signal_noise, n = 39, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 39, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 39, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 39, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 39, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 39, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 39, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 39, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 39, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 39, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 39
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 39, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 39, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 39, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 39, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 39, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 39, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 39, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 39, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 39, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 39, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 39
+PASS: gsl_fft_real with signal_real_noise, n = 39, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 39, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 39, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 39, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 39
+PASS: gsl_fft_real_float with signal_real_noise, n = 39, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 39, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 39, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 39, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 39
+PASS: gsl_fft_complex_forward with signal_noise, n = 39, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 39, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 39, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 39, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 39, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 39, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 39, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 39, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 39, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 39, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 39
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 39, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 39, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 39, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 39, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 39, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 39, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 39, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 39, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 39, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 39, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 39
+PASS: gsl_fft_real with signal_real_noise, n = 39, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 39, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 39, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 39, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 39
+PASS: gsl_fft_real_float with signal_real_noise, n = 39, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 39, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 39, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 40, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 40
+. 0 FE0D52C 20
+. FE 1F E8 00 80 9E 0F 88 CB A4 00 00 FC 84 07 72 40 91 08 40
+
+. 0 FE0DD7C 8
+. 81 3E 0F B8 4B FF F7 C4
+
+. 0 FE0D544 20
+. C9 09 00 00 FC 1F 22 3A FC 81 00 2A FC 84 08 00 41 86 02 6C
+
+. 0 FE0DBD0 16
+. CB BD 00 00 FC 20 F0 90 FF 9E E8 00 41 9D 00 08
+
+. 0 FE0DBE4 12
+. FC 40 E0 90 38 61 00 10 48 01 76 01
+
+. 0 FE25858 8
+. FF 8C 68 00 40 9D 00 F4
+
+. 0 FE2589C 8
+. FF 83 F0 00 40 9D 00 60
+
+. 0 FE25900 80
+. FD 7E 60 28 83 C1 00 18 CB 01 00 20 CB 21 00 28 FC AB 08 2A CB 61 00 38 CB 81 00 40 CB A1 00 48 FC E5 10 2A CB C1 00 50 CB E1 00 58 FC 27 D0 2A FC 4C 08 2A FD 2C 10 28 D8 43 00 00 FF 49 08 2A DB 43 00 08 CB 41 00 30 38 21 00 60 4E 80 00 20
+
+. 0 FE0DBF0 12
+. C9 A1 00 18 FC 8D E8 00 40 85 02 C4
+
+. 0 FE0DBFC 20
+. 80 BE 0F 8C FF 1F E8 00 CB 85 00 00 FD AD 07 32 40 99 03 14
+
+. 0 FE0DF20 8
+. 81 3E 0F BC 4B FF F2 08
+
+. 0 FE0D12C 24
+. C8 29 00 00 FC 1F 68 7A C8 21 00 10 FF 41 00 2A FE 01 D0 00 40 92 04 70
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 40, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 40, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 40, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 40, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 40, stride = 1
+. 0 FE0D540 24
+. 81 3E 0F 9C C9 09 00 00 FC 1F 22 3A FC 81 00 2A FC 84 08 00 41 86 02 6C
+
+. 0 FE0DBE0 16
+. FC 20 F0 50 FC 40 E0 90 38 61 00 10 48 01 76 01
+
+. 0 FE0DC10 8
+. 81 3E 0F A0 4B FF F5 18
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 40, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 40, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 40
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 40, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 40, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 40, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 40, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 40, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 40, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 40, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 40
+PASS: gsl_fft_real with signal_real_noise, n = 40, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 40, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 40, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 40, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 40
+PASS: gsl_fft_real_float with signal_real_noise, n = 40, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 40, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 40, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 40, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 40
+PASS: gsl_fft_complex_forward with signal_noise, n = 40, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 40, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 40, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 40, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 40, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 40, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 40, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 40, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 40, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 40, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 40
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 40, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 40, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 40, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 40, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 40, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 40, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 40, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 40, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 40, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 40, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 40
+PASS: gsl_fft_real with signal_real_noise, n = 40, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 40, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 40, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 40, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 40
+PASS: gsl_fft_real_float with signal_real_noise, n = 40, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 40, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 40, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 40, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 40
+PASS: gsl_fft_complex_forward with signal_noise, n = 40, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 40, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 40, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 40, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 40, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 40, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 40, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 40, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 40, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 40, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 40
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 40, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 40, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 40, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 40, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 40, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 40, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 40, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 40, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 40, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 40, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 40
+PASS: gsl_fft_real with signal_real_noise, n = 40, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 40, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 40, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 40, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 40
+PASS: gsl_fft_real_float with signal_real_noise, n = 40, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 40, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 40, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 41, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 41
+PASS: gsl_fft_complex_forward with signal_noise, n = 41, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 41, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 41, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 41, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 41, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 41, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 41, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 41
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 41, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 41, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 41, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 41, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 41, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 41, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 41, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 41
+PASS: gsl_fft_real with signal_real_noise, n = 41, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 41, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 41, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 41, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 41
+PASS: gsl_fft_real_float with signal_real_noise, n = 41, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 41, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 41, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 41, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 41
+PASS: gsl_fft_complex_forward with signal_noise, n = 41, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 41, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 41, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 41, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 41, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 41, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 41, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 41, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 41, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 41, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 41
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 41, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 41, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 41, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 41, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 41, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 41, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 41, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 41, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 41, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 41, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 41
+PASS: gsl_fft_real with signal_real_noise, n = 41, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 41, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 41, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 41, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 41
+PASS: gsl_fft_real_float with signal_real_noise, n = 41, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 41, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 41, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 41, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 41
+PASS: gsl_fft_complex_forward with signal_noise, n = 41, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 41, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 41, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 41, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 41, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 41, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 41, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 41, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 41, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 41, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 41
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 41, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 41, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 41, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 41, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 41, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 41, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 41, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 41, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 41, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 41, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 41
+PASS: gsl_fft_real with signal_real_noise, n = 41, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 41, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 41, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 41, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 41
+PASS: gsl_fft_real_float with signal_real_noise, n = 41, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 41, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 41, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 42, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 42
+PASS: gsl_fft_complex_forward with signal_noise, n = 42, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 42, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 42, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 42, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 42, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 42, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 42, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 42
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 42, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 42, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 42, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 42, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 42, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 42, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 42, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 42
+. 0 100119F4 172
+. 81 41 00 2C 55 0B 18 38 39 6B 00 08 7D 28 52 14 7D 6B E9 D6 54 CA 18 38 39 4A 00 08 7C 1D 49 D6 55 29 18 38 39 29 00 08 7D 6B FC AE 54 EB 18 38 39 6B 00 08 54 00 18 38 7D 29 E9 D6 7D 9F 04 AE FC 07 03 32 7D A9 FC AE 7C 1D 41 D6 7D 08 2A 14 FD 47 03 72 FD A8 03 7A 54 00 18 38 7C 1F 04 AE 7D 38 39 D6 FD 88 53 38 7C E7 8A 14 FD 4B 68 28 7C 18 31 D6 FD 6B 68 2A FD 20 60 28 55 29 18 38 FD 40 50 50 7C C6 8A 14 FC 00 60 2A 7D 4A C1 D6 54 00 18 38 7C 19 05 AE 7D 6B C1 D6 7D 6A CD AE 7D 29 CD AE 7D 4B CD AE 42 00 FF 58
+
+PASS: gsl_fft_real with signal_real_noise, n = 42, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 42, stride = 1
+. 0 1000D160 168
+. 54 A9 18 38 54 8B 18 38 39 29 00 08 39 6B 00 08 7D 29 E1 D6 81 81 00 2C 54 C8 18 38 7D 46 62 14 55 47 18 38 7D 6B E1 D6 7D 29 EC AE 39 08 00 08 38 E7 00 08 7D 3C 29 D6 7D 0B EC AE 7C A5 1A 14 FD 89 40 2A FD 29 40 28 7C 1C 21 D6 55 29 18 38 7D 69 EC AE FD 46 03 32 7C 84 1A 14 FD 87 03 32 54 00 18 38 7D 57 51 D6 7D BD 04 AE FC 0B 68 28 7C 17 31 D6 FD 6B 68 2A 55 4A 18 38 FD 47 50 3A 7C C6 DA 14 FC 06 60 38 7D 08 B9 D6 54 00 18 38 7D 7A 05 AE 7C E7 B9 D6 7D 28 D5 AE 7C 0A D5 AE 7D 47 D5 AE 42 00 FF 5C
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 42, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 42, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 42
+. 0 10013A20 172
+. 81 41 00 2C 54 EB 10 3A 39 6B 00 04 54 A8 10 3A 7D 27 52 14 7D 6B F9 D6 54 CA 10 3A 39 08 00 04 39 4A 00 04 7C 1F 49 D6 55 29 10 3A 39 29 00 04 7D 6B EC 2E 54 00 10 3A 7D 29 F9 D6 7D BD 04 2E ED 87 03 72 7C 09 EC 2E 7C 1F 39 D6 7C E7 22 14 ED 88 60 3A EC 07 00 32 54 00 10 3A 7D 38 31 D6 ED 4B 60 28 7C C6 8A 14 ED A8 03 78 7C 1D 04 2E ED 6B 60 2A 7C 18 29 D6 FD 40 50 50 ED 20 68 28 55 29 10 3A EC 00 68 2A 7C A5 8A 14 7D 08 C1 D6 54 00 10 3A 7C 19 05 2E 7D 4A C1 D6 7D 68 CD 2E 7D 29 CD 2E 7D 4A CD 2E 42 00 FF 58
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 42, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 42, stride = 1
+. 0 1000F32C 168
+. 54 A9 10 3A 54 8B 10 3A 39 29 00 04 39 6B 00 04 7D 29 E1 D6 81 81 00 2C 54 C8 10 3A 7D 46 62 14 55 47 10 3A 7D 6B E1 D6 7D 29 FC 2E 39 08 00 04 38 E7 00 04 7D 3C 29 D6 7D 0B FC 2E 7C A5 1A 14 EC 09 40 2A ED 29 40 28 7C 1C 21 D6 55 29 10 3A 7D A9 FC 2E ED 46 00 32 EC 07 00 32 7C 84 1A 14 54 00 10 3A 7D 57 51 D6 7D 9F 04 2E ED 6D 60 28 7C 17 31 D6 ED AD 60 2A 55 4A 10 3A ED 47 52 FA 7C C6 DA 14 ED 66 02 F8 7D 08 B9 D6 54 00 10 3A 7D BA 05 2E 7C E7 B9 D6 7D 28 D5 2E 7D 6A D5 2E 7D 47 D5 2E 42 00 FF 5C
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 42, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 42, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 42
+PASS: gsl_fft_complex_forward with signal_noise, n = 42, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 42, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 42, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 42, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 42, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 42, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 42, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 42, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 42, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 42, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 42
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 42, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 42, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 42, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 42, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 42, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 42, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 42, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 42, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 42, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 42, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 42
+PASS: gsl_fft_real with signal_real_noise, n = 42, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 42, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 42, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 42, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 42
+PASS: gsl_fft_real_float with signal_real_noise, n = 42, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 42, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 42, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 42, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 42
+PASS: gsl_fft_complex_forward with signal_noise, n = 42, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 42, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 42, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 42, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 42, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 42, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 42, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 42, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 42, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 42, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 42
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 42, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 42, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 42, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 42, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 42, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 42, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 42, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 42, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 42, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 42, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 42
+PASS: gsl_fft_real with signal_real_noise, n = 42, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 42, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 42, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 42, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 42
+PASS: gsl_fft_real_float with signal_real_noise, n = 42, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 42, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 42, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 43, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 43
+PASS: gsl_fft_complex_forward with signal_noise, n = 43, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 43, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 43, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 43, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 43, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 43, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 43, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 43
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 43, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 43, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 43, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 43, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 43, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 43, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 43, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 43
+PASS: gsl_fft_real with signal_real_noise, n = 43, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 43, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 43, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 43, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 43
+PASS: gsl_fft_real_float with signal_real_noise, n = 43, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 43, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 43, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 43, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 43
+PASS: gsl_fft_complex_forward with signal_noise, n = 43, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 43, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 43, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 43, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 43, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 43, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 43, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 43, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 43, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 43, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 43
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 43, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 43, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 43, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 43, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 43, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 43, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 43, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 43, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 43, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 43, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 43
+PASS: gsl_fft_real with signal_real_noise, n = 43, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 43, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 43, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 43, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 43
+PASS: gsl_fft_real_float with signal_real_noise, n = 43, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 43, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 43, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 43, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 43
+PASS: gsl_fft_complex_forward with signal_noise, n = 43, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 43, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 43, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 43, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 43, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 43, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 43, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 43, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 43, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 43, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 43
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 43, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 43, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 43, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 43, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 43, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 43, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 43, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 43, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 43, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 43, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 43
+PASS: gsl_fft_real with signal_real_noise, n = 43, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 43, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 43, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 43, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 43
+PASS: gsl_fft_real_float with signal_real_noise, n = 43, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 43, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 43, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 44, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 44
+PASS: gsl_fft_complex_forward with signal_noise, n = 44, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 44, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 44, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 44, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 44, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 44, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 44, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 44
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 44, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 44, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 44, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 44, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 44, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 44, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 44, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 44
+PASS: gsl_fft_real with signal_real_noise, n = 44, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 44, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 44, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 44, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 44
+PASS: gsl_fft_real_float with signal_real_noise, n = 44, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 44, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 44, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 44, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 44
+PASS: gsl_fft_complex_forward with signal_noise, n = 44, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 44, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 44, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 44, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 44, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 44, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 44, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 44, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 44, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 44, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 44
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 44, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 44, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 44, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 44, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 44, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 44, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 44, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 44, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 44, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 44, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 44
+PASS: gsl_fft_real with signal_real_noise, n = 44, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 44, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 44, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 44, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 44
+PASS: gsl_fft_real_float with signal_real_noise, n = 44, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 44, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 44, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 44, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 44
+PASS: gsl_fft_complex_forward with signal_noise, n = 44, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 44, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 44, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 44, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 44, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 44, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 44, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 44, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 44, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 44, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 44
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 44, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 44, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 44, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 44, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 44, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 44, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 44, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 44, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 44, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 44, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 44
+PASS: gsl_fft_real with signal_real_noise, n = 44, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 44, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 44, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 44, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 44
+PASS: gsl_fft_real_float with signal_real_noise, n = 44, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 44, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 44, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 45, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 45
+PASS: gsl_fft_complex_forward with signal_noise, n = 45, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 45, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 45, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 45, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 45, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 45, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 45, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 45
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 45, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 45, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 45, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 45, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 45, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 45, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 45, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 45
+PASS: gsl_fft_real with signal_real_noise, n = 45, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 45, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 45, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 45, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 45
+PASS: gsl_fft_real_float with signal_real_noise, n = 45, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 45, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 45, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 45, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 45
+PASS: gsl_fft_complex_forward with signal_noise, n = 45, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 45, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 45, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 45, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 45, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 45, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 45, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 45, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 45, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 45, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 45
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 45, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 45, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 45, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 45, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 45, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 45, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 45, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 45, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 45, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 45, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 45
+PASS: gsl_fft_real with signal_real_noise, n = 45, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 45, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 45, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 45, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 45
+PASS: gsl_fft_real_float with signal_real_noise, n = 45, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 45, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 45, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 45, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 45
+PASS: gsl_fft_complex_forward with signal_noise, n = 45, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 45, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 45, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 45, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 45, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 45, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 45, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 45, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 45, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 45, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 45
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 45, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 45, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 45, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 45, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 45, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 45, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 45, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 45, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 45, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 45, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 45
+PASS: gsl_fft_real with signal_real_noise, n = 45, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 45, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 45, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 45, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 45
+PASS: gsl_fft_real_float with signal_real_noise, n = 45, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 45, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 45, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 46, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 46
+PASS: gsl_fft_complex_forward with signal_noise, n = 46, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 46, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 46, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 46, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 46, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 46, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 46, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 46
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 46, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 46, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 46, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 46, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 46, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 46, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 46, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 46
+PASS: gsl_fft_real with signal_real_noise, n = 46, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 46, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 46, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 46, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 46
+PASS: gsl_fft_real_float with signal_real_noise, n = 46, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 46, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 46, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 46, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 46
+PASS: gsl_fft_complex_forward with signal_noise, n = 46, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 46, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 46, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 46, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 46, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 46, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 46, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 46, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 46, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 46, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 46
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 46, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 46, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 46, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 46, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 46, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 46, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 46, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 46, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 46, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 46, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 46
+PASS: gsl_fft_real with signal_real_noise, n = 46, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 46, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 46, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 46, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 46
+PASS: gsl_fft_real_float with signal_real_noise, n = 46, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 46, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 46, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 46, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 46
+PASS: gsl_fft_complex_forward with signal_noise, n = 46, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 46, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 46, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 46, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 46, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 46, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 46, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 46, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 46, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 46, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 46
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 46, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 46, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 46, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 46, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 46, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 46, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 46, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 46, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 46, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 46, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 46
+PASS: gsl_fft_real with signal_real_noise, n = 46, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 46, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 46, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 46, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 46
+PASS: gsl_fft_real_float with signal_real_noise, n = 46, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 46, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 46, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 47, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 47
+PASS: gsl_fft_complex_forward with signal_noise, n = 47, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 47, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 47, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 47, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 47, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 47, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 47, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 47
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 47, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 47, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 47, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 47, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 47, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 47, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 47, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 47
+PASS: gsl_fft_real with signal_real_noise, n = 47, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 47, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 47, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 47, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 47
+PASS: gsl_fft_real_float with signal_real_noise, n = 47, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 47, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 47, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 47, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 47
+PASS: gsl_fft_complex_forward with signal_noise, n = 47, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 47, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 47, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 47, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 47, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 47, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 47, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 47, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 47, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 47, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 47
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 47, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 47, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 47, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 47, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 47, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 47, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 47, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 47, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 47, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 47, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 47
+PASS: gsl_fft_real with signal_real_noise, n = 47, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 47, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 47, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 47, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 47
+PASS: gsl_fft_real_float with signal_real_noise, n = 47, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 47, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 47, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 47, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 47
+PASS: gsl_fft_complex_forward with signal_noise, n = 47, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 47, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 47, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 47, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 47, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 47, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 47, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 47, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 47, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 47, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 47
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 47, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 47, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 47, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 47, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 47, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 47, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 47, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 47, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 47, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 47, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 47
+PASS: gsl_fft_real with signal_real_noise, n = 47, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 47, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 47, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 47, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 47
+PASS: gsl_fft_real_float with signal_real_noise, n = 47, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 47, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 47, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 48, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 48
+PASS: gsl_fft_complex_forward with signal_noise, n = 48, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 48, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 48, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 48, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 48, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 48, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 48, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 48
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 48, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 48, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 48, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 48, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 48, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 48, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 48, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 48
+PASS: gsl_fft_real with signal_real_noise, n = 48, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 48, stride = 1
+. 0 1000D734 384
+. 7C F8 FA 14 57 EB 18 38 54 E9 18 38 39 6B 00 08 39 29 00 08 7D 58 CA 14 7D 29 E1 D6 55 46 18 38 57 28 18 38 39 08 00 08 38 C6 00 08 7D 6B E1 D6 7D 69 EC AE FD 60 58 50 7C 1C C9 D6 7D 4B EC AE 7F 39 B2 14 FD 40 50 50 7D 3C F9 D6 54 00 18 38 7D 3D 04 AE 7F FF B2 14 80 01 00 30 7D 5C 51 D6 55 29 18 38 7C 29 EC AE 7D 63 02 14 7C AB 02 14 7C 85 02 14 7C FC 39 D6 55 4A 18 38 7C CA EC AE 54 69 18 38 55 6A 18 38 FC E6 08 28 39 29 00 08 7D 08 E1 D6 54 E7 18 38 7C A7 EC AE FC C6 08 2A 54 87 18 38 FD 89 28 28 39 4A 00 08 7C C6 E1 D6 7D A8 EC AE FD 29 28 2A 54 A8 18 38 FC 0D 58 28 39 08 00 08 FD AD 58 2A 38 E7 00 08 7D 06 EC AE 7C 17 19 D6 FC 60 38 28 7C 63 DA 14 FC 48 50 28 FD 08 50 2A FC 00 38 2A 7D 77 59 D6 FC 8C 10 2A 54 00 18 38 FD 6D 40 28 FC FE 00 32 7C B7 29 D6 55 6B 18 38 FC BC 02 F2 FC 39 01 32 7C 97 21 D6 54 A5 18 38 FF F9 00 F2 FC 1D 00 32 7D 29 B9 D6 54 84 18 38 FD 7B 02 F2 FD 49 30 28 FD 8C 10 28 7D 4A B9 D6 FC 7A 08 FA FC FD 3B 3A 7D 08 B9 D6 FC BB 2A BA FD 9E 03 38 7C E7 B9 D6 FD 5C 5A B8 FC 9A F9 38 FD 29 30 2A FD AD 40 2A 7D 3A 05 AE 7D A9 D5 AE 7D 8B D5 AE 7C EA D5 AE 7D 45 D5 AE 7C A8 D5 AE 7C 84 D5 AE 7C 67 D5 AE 42 00 FE 84
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 48, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 48, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 48
+PASS: gsl_fft_real_float with signal_real_noise, n = 48, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 48, stride = 1
+. 0 1000F918 384
+. 7C F8 EA 14 57 AB 10 3A 54 E9 10 3A 39 6B 00 04 39 29 00 04 7D 58 CA 14 7D 29 E1 D6 55 46 10 3A 57 28 10 3A 39 08 00 04 38 C6 00 04 7D 6B E1 D6 7D 49 FC 2E FD 40 50 50 7C 1C C9 D6 7D 2B FC 2E 7F 39 B2 14 FD 20 48 50 7D 3C E9 D6 54 00 10 3A 7D 7F 04 2E 7F BD B2 14 80 01 00 30 7D 5C 51 D6 55 29 10 3A 7C 29 FC 2E 7D 63 02 14 7C AB 02 14 7C 85 02 14 7C FC 39 D6 55 4A 10 3A 7C EA FC 2E 54 69 10 3A 55 6A 10 3A EC A7 08 28 39 29 00 04 7D 08 E1 D6 54 E7 10 3A 7C C7 FC 2E EC E7 08 2A 54 87 10 3A ED 8B 30 28 39 4A 00 04 7C C6 E1 D6 7D A8 FC 2E ED 6B 30 2A 54 A8 10 3A EC 0D 50 28 39 08 00 04 ED AD 50 2A 38 E7 00 04 7D 06 FC 2E 7C 17 19 D6 EC 40 28 28 7C 63 DA 14 EC 88 48 28 ED 08 48 2A EC 00 28 2A 7D 77 59 D6 EC 6C 20 2A 54 00 10 3A ED 4D 40 28 ED 2B 38 28 ED 8C 20 28 7C B7 29 D6 EC DF 00 32 55 6B 10 3A EC BD 02 B2 EC 3A 00 F2 EC 9A 00 B2 7C 97 21 D6 EC 1E 00 32 54 A5 10 3A ED 5C 02 B2 EC DE 33 3A 7D 29 B9 D6 EC BC 2A 7A EC 5B 08 BA 54 84 10 3A ED 9F 03 38 ED 3D 52 78 7D 4A B9 D6 EC 7B 20 F8 ED 6B 38 2A ED AD 40 2A 7D 08 B9 D6 7D 7A 05 2E 7D A9 D5 2E 7D 8B D5 2E 7C CA D5 2E 7C E7 B9 D6 7D 25 D5 2E 7C A8 D5 2E 7C 64 D5 2E 7C 47 D5 2E 42 00 FE 84
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 48, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 48, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 48
+PASS: gsl_fft_complex_forward with signal_noise, n = 48, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 48, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 48, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 48, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 48, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 48, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 48, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 48, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 48, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 48, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 48
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 48, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 48, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 48, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 48, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 48, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 48, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 48, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 48, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 48, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 48, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 48
+PASS: gsl_fft_real with signal_real_noise, n = 48, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 48, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 48, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 48, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 48
+PASS: gsl_fft_real_float with signal_real_noise, n = 48, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 48, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 48, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 48, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 48
+PASS: gsl_fft_complex_forward with signal_noise, n = 48, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 48, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 48, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 48, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 48, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 48, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 48, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 48, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 48, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 48, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 48
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 48, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 48, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 48, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 48, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 48, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 48, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 48, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 48, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 48, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 48, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 48
+PASS: gsl_fft_real with signal_real_noise, n = 48, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 48, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 48, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 48, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 48
+PASS: gsl_fft_real_float with signal_real_noise, n = 48, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 48, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 48, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 49, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 49
+. 0 FE0D6D8 12
+. C8 5F 00 00 FF 9F 10 00 40 9D 01 24
+
+. 0 FE0D804 16
+. FC 60 F8 50 C8 BC 00 00 FC 43 28 28 4B FF FE DC
+
+. 0 FE0D6EC 24
+. C9 BD 00 00 38 61 00 10 FC 22 68 28 FC 82 08 28 FC 44 68 28 48 01 8B DD
+
+. 0 FE262DC 12
+. 94 21 FF D0 7C 08 02 A6 48 04 CD CD
+
+. 0 FE262E8 68
+. 93 C1 00 28 7F C8 02 A6 93 E1 00 2C 90 01 00 34 7C 7F 1B 78 FD 80 08 90 38 61 00 10 81 7E 20 AC FD 60 10 90 80 9E 20 58 C9 AB 00 00 81 3E 20 B0 FF 81 68 00 C9 44 00 00 C8 09 00 00 FC 0A 00 32 41 9D 00 0C
+
+. 0 FE2632C 24
+. FD 80 08 50 FD 60 10 50 FC 00 60 00 FC 20 60 90 FC 40 58 90 41 81 00 D0
+
+. 0 FE26410 4
+. 4B FF F6 5D
+
+. 0 FE25A6C 12
+. 94 21 FF 90 7C A8 02 A6 48 04 D6 3D
+
+. 0 FE25A78 400
+. 93 C1 00 28 7F C8 02 A6 DB E1 00 68 DB 81 00 50 DB 21 00 38 7C A8 03 A6 80 9E 20 98 81 1E 20 A4 C8 04 00 00 C9 68 00 00 FC C0 08 2A DB 01 00 30 DB C1 00 60 DB A1 00 58 FF E6 00 28 DB 61 00 48 C8 88 00 00 81 9E 20 90 FF 81 F8 28 D8 C1 00 08 DB 41 00 40 93 E1 00 2C FF FC 10 2A 83 FE 20 8C 80 FE 20 A8 81 21 00 08 81 41 00 0C FD 5F 02 F2 80 DE 20 88 81 7E 20 94 55 40 28 34 FD 1C F8 28 93 01 00 10 7D 20 5A 14 93 21 00 14 FC BF 50 28 93 41 00 18 FF 88 10 2A C9 1F 00 00 93 61 00 1C 7C 7A 1B 78 FD 25 50 2A 93 81 00 20 FF 7F 07 32 93 A1 00 24 80 9E 20 84 FD BF 48 28 83 3E 20 7C FF 09 02 72 83 BE 20 78 83 7E 20 74 FF 29 03 72 83 9E 20 6C 81 5E 20 68 FC 3B D8 2A 80 7E 20 64 7F 6B 04 AE FC 59 C8 2A 81 7E 20 70 CB 29 00 18 80 BE 20 60 FF D8 10 2A 83 1E 20 5C FF B8 F0 28 CB 09 00 08 FD 9D 10 2A C8 4C 00 00 CB A9 00 10 FD 42 01 32 81 3E 20 80 FC 6D 63 7A FC A2 50 28 FC E3 08 2A FD 85 50 2A FC 7E 38 2A FC C3 01 32 C8 86 00 00 FF 5E 18 28 FD 63 30 28 FF DA 38 2A C8 E7 00 00 FC 2B 30 2A FD 62 60 28 FC 02 07 B2 FC A3 08 28 FF 41 03 32 FD 25 03 32 FD 08 00 FA FD A1 4A FA FC 3A 68 2A FC DA 08 28 FC 06 68 2A FC 45 02 FA FD 82 40 2A FC A1 60 2A FF 85 38 00 FD 41 28 28 FD 24 28 2A FD A0 28 90 FC CA 60 2A
+
+. 0 FE25C08 4
+. 41 9D 00 08
+
+. 0 FE25C0C 12
+. FD A0 28 50 FC 04 38 00 40 81 06 BC
+
+. 0 FE25C18 8
+. FF 8D 20 00 40 9D 06 98
+
+. 0 FE262B4 28
+. C9 66 00 00 C8 44 00 00 FD 0B 48 28 FD 48 28 2A FC AA 30 2A FC C5 10 2A 4B FF F9 6C
+
+. 0 FE25C38 124
+. C8 48 00 00 FD 69 30 2A C8 27 00 00 FD 83 00 B2 FD 4B 00 B2 C8 49 00 00 FD A3 60 28 FC A9 58 28 FF 4B 50 28 FC ED 60 2A FD 25 30 2A FC 9A 50 2A FF 43 38 28 FD 1E 02 F2 FC DA 01 32 FC AB 20 28 FC 07 01 32 FD A7 31 7A FC 83 42 7A FC C0 68 2A FD 40 30 28 FD 8A 68 2A FC FA 61 7A FC 07 20 2A FC A6 00 2A FC 85 08 00 FD 26 28 28 FC 80 28 90 FC C2 28 2A FC E9 00 2A 41 85 00 08
+
+. 0 FE25CB8 8
+. FF 02 08 00 40 99 05 EC
+
+. 0 FE262A8 12
+. FC 20 10 50 FF 84 08 00 4B FF FA 14
+
+. 0 FE25CC4 4
+. 40 9D 05 C8
+
+. 0 FE2628C 28
+. C8 49 00 00 CB 59 00 00 FD 82 30 28 FD AC 28 2A FD 4D 38 2A FC AA D0 2A 4B FF FA 3C
+
+. 0 FE25CE0 204
+. C8 E8 00 00 FD 46 28 2A C8 47 00 00 FD 83 01 F2 FC 8A 01 F2 FF 43 60 28 FC C6 50 28 FC 2A 20 28 FD BA 60 2A FD 1E 02 B2 FC 01 20 2A FC 23 68 28 FD 26 28 2A FD 4A 00 28 FD 61 00 32 FF 4D 00 32 FC 8D 5A BA FC C3 42 7A FD 7A 20 2A FD 1F 01 F2 FC BA 58 28 FD 9F 40 28 FC 05 20 2A FD 8C 40 2A FD A1 02 BA FD 1F 60 28 FD 2D 30 2A FD AB 48 2A FC 8D 01 F2 FF 4B 68 28 FC BC 03 72 FC 2D 20 28 FD 7A 48 2A FC 01 20 2A FC 3F 2A FA FC C8 00 32 FC 8D 00 28 FC EC 00 32 FD 4C 31 3A FC C7 50 2A FF 47 30 28 FD 3A 50 2A FC A8 49 3A FC E5 08 2A FC 26 38 2A FF 81 10 00 FC 06 08 28 FC 81 F8 2A FC A0 08 90 FC C0 38 2A 41 9D 00 08
+
+. 0 FE25DB0 8
+. FC 1F 10 00 40 81 04 CC
+
+. 0 FE26280 12
+. FC 40 F8 50 FF 85 10 00 4B FF FB 34
+
+. 0 FE25DBC 4
+. 40 9D 04 B0
+
+. 0 FE2626C 20
+. FD BF 20 28 FD 0D 08 2A FD 28 30 2A FC 29 E0 2A 4B FF FB 54
+
+. 0 FE25DD0 132
+. C8 C8 00 00 C9 5D 00 00 FD A3 01 B2 C9 1B 00 00 C8 A7 00 00 FC EA 01 B2 C8 CB 00 00 FC 43 68 28 FD 8A 07 B2 FF 42 68 2A FF EA 38 28 FC 48 60 FA FC 1F 38 2A FD 83 D0 28 FF E4 08 2A FD 4A 00 28 FD 6C 00 32 FF 9A 00 32 FD BA 5A BA FD 24 F8 28 FC 9C 68 2A FF 49 08 2A FC FC 20 28 FC 07 68 2A FC 2C 02 BA FF 81 10 2A FC E4 E0 2A FC 87 28 00 FD 64 38 28 FD 26 38 2A FD A0 38 90 FD 0B E0 2A 41 85 00 08
+
+. 0 FE25E54 12
+. FD A0 38 50 FF 06 28 00 40 99 04 04
+
+. 0 FE25E60 8
+. FF 8D 30 00 40 9D 03 E0
+
+. 0 FE26244 28
+. C8 CB 00 00 C9 5C 00 00 FD A6 48 28 FC 4D 38 2A FD 82 40 2A FC EC 50 2A 4B FF FC 24
+
+. 0 FE25E80 124
+. C8 A8 00 00 FD 69 38 2A C8 87 00 00 FD 83 01 72 FD 4B 01 72 C8 AA 00 00 FC C3 60 28 FD 29 58 28 FC 4B 50 28 FD A6 60 2A FF 9E 02 F2 FC 02 50 2A FD 83 68 28 FC 29 38 2A FD 6B 00 28 FD 0C 00 32 FC CD 00 32 FC 4D 42 FA FD 23 E0 7A FD 06 10 2A FD 46 40 28 FC 0A 10 2A FC EC 02 FA FC 27 48 2A FC E8 08 2A FF 87 20 00 FF 88 38 28 FD 25 38 2A FC C0 38 90 FD 1C 08 2A 41 9D 00 08
+
+. 0 FE25F00 8
+. FC 05 20 00 40 81 03 34
+
+. 0 FE26238 12
+. FC 80 28 50 FF 86 20 00 4B FF FC CC
+
+. 0 FE25F0C 4
+. 40 9D 03 10
+
+. 0 FE2621C 28
+. C9 8A 00 00 C9 63 00 00 FC CC 48 28 FD 46 38 2A FC 4A 40 2A FC E2 58 2A 4B FF FC F4
+
+. 0 FE25F28 124
+. C8 A8 00 00 FD 69 38 2A C8 87 00 00 FC C3 01 72 FD 4B 01 72 C8 A5 00 00 FD A3 30 28 FD 29 58 28 FC 4B 50 28 FF 8D 30 2A FC 3E 02 F2 FC 02 50 2A FC 43 E0 28 FD 89 38 2A FD 6B 00 28 FD 02 00 32 FC DC 00 32 FD BC 42 FA FD 23 0B 3A FD 06 68 2A FD 46 40 28 FC 0A 68 2A FC E2 02 FA FC 27 48 2A FC E8 08 2A FC 87 20 00 FF 88 38 28 FD 25 38 2A FC C0 38 90 FD 1C 08 2A 41 85 00 08
+
+. 0 FE25FA4 12
+. FC C0 38 50 FF 05 20 00 40 99 02 64
+
+. 0 FE25FB0 8
+. FF 86 28 00 40 9D 02 40
+
+. 0 FE261F4 28
+. C8 C5 00 00 C9 78 00 00 FD A6 48 28 FD 4D 38 2A FC 4A 40 2A FC E2 58 2A 4B FF FD C4
+
+. 0 FE25FD0 288
+. C9 48 00 00 FC A9 38 2A CB 87 00 00 FD 83 02 B2 FD 05 02 B2 FD 63 60 28 FC 9E 01 72 FC CB 60 2A FC 25 40 28 FD 29 28 28 FF C1 40 2A FC 23 30 28 FC 49 38 2A FD 65 F0 28 FC E1 07 B2 FC 06 07 B2 FD A6 3A FA FD 23 20 BA FC E0 68 2A FC DB 02 B2 FC 60 38 28 FC 9F 02 B2 FD 83 68 2A FC 5D 02 B2 FC A1 62 FA FD 1B 30 28 FF DF 20 28 FC A5 48 2A FD 88 30 2A FD 7E 20 2A FD 27 28 2A FC 3D 10 28 FC DB 60 28 FD 49 02 B2 FD A1 10 2A FC 66 02 F2 FC 09 50 28 FC 5F 58 28 FC 3D 68 28 FC 80 50 2A FC 6C 18 BA FD 49 20 28 FF CC 02 F2 FD 0A 03 72 FD A4 03 72 FC 04 40 7A FD 67 48 28 FD 1E 18 2A FC 8D 00 2A FC EB 28 2A FD 9E 40 28 FC AD 20 28 FD 78 07 F2 FF CC 18 2A FC E7 07 72 FD A5 00 2A FD 9B 5E BA FC A6 F0 BA FC 6A 68 7A FF E9 3E 7A FF 65 60 2A FF C3 F8 2A FC 68 D8 2A FD A4 F0 2A FF 83 E0 00 FF 48 18 28 FF 04 68 28 FD 63 68 2A FC 5A D8 2A FC 38 F0 2A FD 80 18 90 41 9D 00 08
+
+. 0 FE260F0 12
+. FD 80 18 50 FC 0D E0 00 40 81 00 F0
+
+. 0 FE260FC 8
+. FF 8C 68 00 40 9D 00 D4
+
+. 0 FE26104 48
+. FF 43 58 28 FF 7A 68 2A FF 1B 08 2A FC 58 10 2A FC 2B 10 2A C9 A7 00 00 FC 60 E8 90 FC 9D 68 00 FC 8B 08 28 FD 9D 08 28 FC 44 10 2A 41 85 00 08
+
+. 0 FE26138 8
+. FF 01 68 00 40 99 00 78
+
+. 0 FE261B4 12
+. FD A0 08 50 FF 83 68 00 41 BD FF 8C
+
+. 0 FE26148 108
+. FC DD 60 28 FC 66 08 28 FF A3 10 28 FC 3D C8 2A FD 2C 08 2A 83 01 00 10 83 21 00 14 83 61 00 1C FC 4C 48 28 D9 3A 00 00 83 81 00 20 83 A1 00 24 FF 22 08 2A 83 C1 00 28 83 E1 00 2C CB 01 00 30 DB 3A 00 08 CB 41 00 40 83 41 00 18 CB 21 00 38 CB 61 00 48 CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 FE26414 4
+. 4B FF FF 7C
+
+. 0 FE26390 40
+. C8 E1 00 18 C9 01 00 10 80 61 00 34 D9 1F 00 00 D8 FF 00 08 7C 68 03 A6 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FE0D704 28
+. 80 9E 0F 8C C8 21 00 10 C9 21 00 18 C8 C4 00 00 FD 09 09 BA FC 01 40 00 41 82 00 20
+
+. 0 1000886C 400
+. 7C E8 02 A6 81 21 01 0C 80 C1 01 08 7C 14 72 14 7D 6C A2 14 7C 00 C9 D6 7D 07 A2 14 7E F5 FC AE 7C E9 A2 14 81 21 01 10 7D 46 A2 14 7C C9 A2 14 7D 6B C9 D6 7D 35 FA 14 54 00 20 36 CA C9 00 08 7D 20 FA 14 7C BF 04 AE 7C E7 C9 D6 55 6B 20 36 C8 E9 00 08 7C AB FA 14 7C CB FC AE 7F 77 D2 14 C9 05 00 08 3A 94 00 01 7C C6 C9 D6 54 E7 20 36 7D 67 FA 14 7C 87 FC AE C8 6B 00 08 7D 76 82 14 FD 46 20 28 80 01 01 14 7D 4A C9 D6 54 C6 20 36 7D 26 FA 14 7D 86 FC AE C9 69 00 08 FD 28 18 28 FF 25 60 28 80 C1 01 1C 7D 08 C9 D6 55 4A 20 36 7C 2A FC AE 7C 6A FA 14 C8 43 00 08 FF 07 58 28 FC C6 20 2A 7D 40 B2 14 55 08 20 36 FD 08 18 2A 7D A8 FC AE 7C 88 FA 14 C8 04 00 08 FC A5 60 2A FF ED 08 2A C8 61 00 F8 FC E7 58 2A 80 01 01 18 FF C0 10 2A 7C E6 B2 14 FD AD 08 28 81 21 01 20 FC 00 10 28 80 C1 01 00 FD 86 28 2A 7D 00 B2 14 FD 68 38 2A 7C 09 B2 14 FC 2A C8 28 7D 26 B2 14 FC 49 C0 28 7D 6B C1 D6 FF A5 F8 28 7E 73 CA 14 FF 87 F0 28 7E B5 8A 14 FC 23 00 72 3A D6 00 01 7D 4A C1 D6 55 6B 20 36 FC 43 00 B2 C8 61 00 F0 7C CB D2 14 FF 6D 50 28 FF 40 48 28 7D 08 C1 D6 FC A6 28 28 55 4A 20 36 FC E8 38 28 7C AA D2 14 FF B2 07 72 7C E7 C1 D6 55 08 20 36 FE F7 60 2A 7C 88 D2 14 FE D6 58 2A FC DF 30 28 FD 1E 40 28
+
+. 0 100089FC 400
+. 7C 00 C1 D6 FD 4D 50 2A 54 E7 20 36 FD 20 48 2A 7C 67 D2 14 FD B9 68 28 FC 18 00 28 7D 29 C1 D6 FF 92 07 32 54 00 20 36 7F A0 D2 14 FE F7 F8 2A FF 63 06 F2 55 29 20 36 7F 89 D2 14 FF 43 06 B2 7E F7 D5 AE 7E F7 92 14 FE D6 F0 2A FC D1 01 B2 FD 11 02 32 FC B0 01 72 FD AE 03 72 FC 0E 00 32 FD 8C F8 2A FD 6B F0 2A FD 4A C8 2A FD 29 C0 2A CB 01 00 B8 FF E0 E8 50 FC 60 08 50 FC 80 10 50 FC F0 01 F2 FD 4F 02 B2 FD 2F 02 72 FD 93 BB 3A CA E1 00 D8 FD 73 B2 FA FF C0 E0 50 FF FF 28 28 FC 63 68 28 FC 84 00 28 FF BD 30 2A FF 9C 40 2A FD AD D8 2A FC 00 D0 2A FC A5 30 28 FC 42 D0 28 FF DE 38 28 FC 21 D8 28 FC E7 40 28 FC 84 48 2A FD AD 50 2A FC 00 48 2A FF EC F8 2A FF AC E8 2A FF 8B E0 2A FC 42 48 2A FD 8C 28 2A FC 63 50 2A FC 21 50 2A FF CB F0 2A FD 6B 38 2A FC BF 20 28 FC FD 00 28 FC DC 68 2A FF BD 00 2A C8 01 00 C8 FF 9C 68 28 C9 A1 00 A8 FD 4C 10 2A FF FF 20 2A C8 81 00 98 FD 8C 10 28 C8 41 00 D8 FD 2B 08 28 FD 1E 18 2A FD 6B 08 2A FF 64 07 72 FF DE 18 28 C8 61 00 E8 FE A2 01 72 FC 44 07 32 C8 81 00 B8 FF 38 03 32 FE 83 01 F2 C8 61 00 C8 DA DB 00 08 FF 00 02 B2 FC 04 02 F2 FC 97 02 32 CA E1 00 E8 FF 4D 07 F2 FC 2D 07 B2 FD A3 02 72 FC 77 01 B2 CA E1 00 90 FF 97 DF 3A CA E1 00 B0 CB 61 00 A0
+
+. 0 10008B8C 132
+. FD 77 CA FA CB 21 00 C0 CA E1 00 90 FF DB D7 BA CB 41 00 D0 CB 61 00 E0 FD 39 C2 7A CB 21 00 B0 CB 01 00 A0 FD 1A AA 3A CB 41 00 C0 FC DB A1 BA CB 61 00 D0 FD 99 03 38 C8 01 00 E0 FF B7 17 78 FF F8 0F F8 FD 5A 6A B8 7F AB D5 AE DB 86 00 08 FC BB 21 78 7F EA D5 AE DB C5 00 08 FC E0 19 F8 7D 88 D5 AE D9 64 00 08 7D 47 D5 AE D9 23 00 08 7C BA 05 AE D9 1D 00 08 7C E9 D5 AE D8 DC 00 08 42 00 FC 60
+
+PASS: gsl_fft_complex_forward with signal_noise, n = 49, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 49, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 49, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 49, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 49, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 49, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 49, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 49
+. 0 1000B6DC 400
+. 81 01 00 AC 7C E8 02 A6 81 21 00 B0 7C 13 72 14 7D 48 9A 14 7C 00 C9 D6 7D 09 9A 14 81 21 00 B4 7D 67 9A 14 CA E1 00 98 7C E9 9A 14 81 21 00 B8 7D 6B C9 D6 54 00 18 38 7C C9 9A 14 7C BF 04 2E 7D 35 FA 14 7E 95 FC 2E C2 A9 00 04 7D 20 FA 14 7C E7 C9 D6 55 6B 18 38 C0 E9 00 04 7C AB FA 14 7C CB FC 2E 7F B7 82 14 C1 45 00 04 7F 76 D2 14 7C C6 C9 D6 54 E7 18 38 7D 67 FA 14 7C 87 FC 2E C0 6B 00 04 3A 73 00 01 ED 86 20 28 80 01 00 C0 7D 4A C9 D6 54 C6 18 38 7D 26 FA 14 7D 26 FC 2E C1 09 00 04 ED 6A 18 28 EF A5 48 28 80 E1 00 C4 7D 08 C9 D6 55 4A 18 38 7C 6A FA 14 7C 2A FC 2E C0 43 00 04 EF 87 40 28 EC A5 48 2A 81 21 00 CC 55 08 18 38 EC C6 20 2A 7C 88 FA 14 7D A8 FC 2E C0 04 00 04 EC E7 40 2A EF 6D 08 2A 81 01 00 C8 EF 40 10 2A 81 41 00 A8 ED 4A 18 2A 7F 80 BA 14 EF 26 28 2A 7C 07 BA 14 EC 00 10 28 7C A8 BA 14 EC 4B E0 28 7C 89 BA 14 EF 0A 38 2A 7C 6A BA 14 ED AD 08 28 7F BD C1 D6 EC 2C E8 28 7E 94 CA 14 EC 85 D8 28 7E B5 8A 14 EC 67 D0 28 3A F7 00 01 ED 39 D8 2A 7F 9C C1 D6 EF C0 58 28 57 BD 18 38 EC A6 28 28 7D 3D D2 14 EC DB 30 28 ED 60 58 2A 7C 00 C1 D6 EC 1C 00 28 57 9C 18 38 7D 7C D2 14 ED 18 D0 2A EF ED 60 28 7C A5 C1 D6 EC EA 38 28 54 00 18 38 ED 5A 50 28 7D 40 D2 14 ED 8D 60 2A
+
+. 0 1000B86C 400
+. 7C 84 C1 D6 ED BD 68 28 54 A5 18 38 7D 05 D2 14 ED 6B E0 2A 7C 63 C1 D6 54 84 18 38 7C E4 D2 14 54 63 18 38 FC 4E 00 B2 7C C3 D2 14 FC 97 01 32 FC 77 00 F2 CA E1 00 A0 ED 8C E8 2A FC 2E 00 72 FD 33 02 72 FC D2 01 B2 FC 10 00 32 FF CF 07 B2 FC B7 01 72 FC F7 01 F2 FC 40 10 18 EE 94 C8 2A FD 13 02 32 FD 52 02 B2 FD B0 03 72 FF EF 07 F2 FC 80 20 18 FC 60 18 18 FC 20 08 18 EE B5 C0 2A FD 71 02 F2 EE 94 D8 2A FC C0 30 18 FF C0 F0 18 FC 00 00 18 FF 60 10 50 FD 91 03 32 FD 20 48 18 EE B5 D0 2A FD 40 50 18 FF E0 F8 18 FC A0 28 18 FC E0 38 18 FD A0 68 18 FF A0 20 50 FF 80 18 50 FF 40 08 50 FD 00 40 18 FD 60 58 18 ED 34 48 2A EF 7B 00 28 EC 84 30 2A EC 00 F0 2A FD 80 60 18 ED 15 40 2A EF BD 28 28 EF 9C 38 28 EC A5 30 28 EC E7 50 28 EF 5A 68 28 EC 21 F8 28 EC 42 F0 28 C3 C1 00 70 EC 63 50 2A ED AD F8 2A C3 E1 00 78 EC 00 58 2A EC 89 20 2A EF 7B 58 2A ED AD 60 2A EC 21 60 2A EC 42 58 2A EF A9 E8 2A EF 88 E0 2A EC 68 18 2A ED 29 28 2A ED 08 38 2A EF 5A 60 2A EC E4 00 28 EC BD D8 28 ED 68 08 28 ED 89 10 2A EC C3 68 2A EC 84 00 2A C0 01 00 80 ED 29 10 28 C0 41 00 90 ED 08 08 2A C0 21 00 88 EC 63 68 28 EF BD D8 2A C3 61 00 68 ED 5C D0 2A EC 42 01 F2 EF 9C D0 28 EE E0 03 32 EF 5B 01 32 D0 41 01 48
+
+. 0 1000B9FC 208
+. ED BB 00 F2 C3 61 00 88 EF 1F 02 72 EE C1 01 72 EC 3F 02 32 EF E0 02 F2 C0 01 00 90 EF 3E 07 72 EC 5E 07 32 EF DB 02 B2 EF 60 01 B2 C0 01 00 64 EC 60 D0 FA C3 41 00 6C C0 01 00 74 EF 9A CF 3A C3 41 00 7C ED 00 C2 3A C0 01 00 84 ED 7A BA FA C2 E1 00 8C C3 41 01 48 ED 40 B2 BA C0 01 00 64 EC D7 D1 BA C3 41 00 7C EC 80 69 38 C2 E1 00 74 C1 A1 00 6C ED 9A FB 38 C0 01 00 8C ED 37 0A 78 C3 E1 00 84 EF AD 17 78 EC E0 D9 F8 7E 96 D5 2E EC BF F1 78 D2 BB 00 04 7E D6 92 14 7C 9D D5 2E D0 69 00 04 7F BC D5 2E D3 8B 00 04 7D 3A 05 2E D1 0A 00 04 7D 85 D5 2E D1 68 00 04 7C A4 D5 2E D1 47 00 04 7C E3 D5 2E D0 C6 00 04 42 00 FC 14
+
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 49, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 49, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 49, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 49, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 49, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 49, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 49, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 49
+. 0 1001101C 52
+. 7C 18 19 D6 C8 D7 00 00 C8 F6 00 00 38 E0 00 00 39 43 FF FF 7F 6B DB 78 54 04 18 38 2F 07 00 00 C9 36 00 00 C9 57 00 00 FD 00 48 90 FD 60 48 90 41 9A 00 14
+
+PASS: gsl_fft_real with signal_real_noise, n = 49, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 49, stride = 1
+. 0 1000CB9C 24
+. 38 1B 00 01 39 E0 00 01 54 0B F8 7E 7C 08 03 A6 2B 8B 00 01 40 9D 01 8C
+
+. 0 1000CBB4 24
+. 83 3E 81 8C 7D 74 5B 78 83 1E 81 90 3A 40 00 00 7F 92 70 40 40 9C 01 60
+
+. 0 1000CBCC 40
+. 20 CF 00 00 7E C6 79 14 55 EC 08 3C 3A 20 00 00 3A A0 00 00 38 60 00 00 C8 B9 00 00 7F 83 F8 40 C8 D8 00 00 40 9C 01 24
+
+. 0 1000CBF4 40
+. 7D 31 62 14 7D 74 78 50 38 A9 FF FF 38 8B 00 01 2C 83 00 00 C9 18 00 00 C9 39 00 00 FC E0 40 90 FD 40 40 90 41 86 00 18
+
+. 0 1000CC30 12
+. 39 00 00 00 7F 88 F8 40 40 9C 00 80
+
+. 0 1000CC3C 48
+. 7C 15 D9 D6 7F E9 03 A6 57 67 08 3C 7D 20 62 14 7C CC 00 50 39 49 FF FF 2F 88 00 00 55 49 18 38 39 69 00 08 7D 28 F8 50 7F 09 40 40 41 9E 00 14
+
+. 0 1000CC7C 4
+. 40 99 03 9C
+
+. 0 1000CC80 56
+. 7C 1C 51 D6 7D 2B E1 D6 54 00 18 38 7D 9D 04 AE 7D 69 EC AE FD AA 03 32 39 08 00 01 7D 4A 3A 14 FC 0A 02 F2 FD A9 6A FA FC 09 03 38 FC E7 68 2A FD 08 00 2A 42 00 FF A0
+
+. 0 1000CC54 24
+. 2F 88 00 00 55 49 18 38 39 69 00 08 7D 28 F8 50 7F 09 40 40 41 9E 00 14
+
+. 0 1000CC6C 20
+. FC 06 02 B2 FD A6 02 72 FD 25 02 78 FD 45 6A BA 40 99 03 9C
+
+. 0 1000D018 52
+. 7D 29 D9 D6 55 29 08 3C 7D 26 4A 14 39 29 FF FF 55 2B 18 38 7D 3C 49 D6 39 6B 00 08 7D 6B E1 D6 55 29 18 38 7D 89 EC AE 7C 0B EC AE FD 60 00 50 4B FF FC 4C
+
+. 0 1000CC94 36
+. FD AA 03 32 39 08 00 01 7D 4A 3A 14 FC 0A 02 F2 FD A9 6A FA FC 09 03 38 FC E7 68 2A FD 08 00 2A 42 00 FF A0
+
+. 0 1000CCB8 16
+. 7C 00 00 26 54 00 3F FE 7E C7 03 79 41 82 03 88
+
+. 0 1000CCC8 76
+. C9 39 00 00 C9 58 00 00 FD AA 02 32 54 A9 18 38 7C 17 29 D6 39 29 00 08 FC 0A 01 F2 38 63 00 01 7F 83 F8 40 FD A9 69 FA 7D 74 22 14 7D 29 B9 D6 54 00 18 38 FC 09 02 38 38 8B FF FF 7C A5 9A 14 7C 1A 05 AE 7D A9 D5 AE 41 9C FE F4
+
+. 0 1000CC04 24
+. 2C 83 00 00 C9 18 00 00 C9 39 00 00 FC E0 40 90 FD 40 40 90 41 86 00 18
+
+. 0 1000CC1C 32
+. FC 06 07 B2 FD A6 07 72 FC 05 07 7A FC A5 6F B8 FC C0 00 90 39 00 00 00 7F 88 F8 40 40 9C 00 80
+
+. 0 1000D04C 20
+. 54 89 20 36 7D 29 82 14 C9 49 FF F8 C9 29 FF F0 4B FF FC 74
+
+. 0 1000CCD0 68
+. FD AA 02 32 54 A9 18 38 7C 17 29 D6 39 29 00 08 FC 0A 01 F2 38 63 00 01 7F 83 F8 40 FD A9 69 FA 7D 74 22 14 7D 29 B9 D6 54 00 18 38 FC 09 02 38 38 8B FF FF 7C A5 9A 14 7C 1A 05 AE 7D A9 D5 AE 41 9C FE F4
+
+. 0 1000CD14 20
+. 3A 52 00 01 7E B5 FA 14 7F 92 70 40 7E 31 DA 14 41 9C FE BC
+
+. 0 1000CD28 20
+. 7D 08 02 A6 39 EF 00 01 55 00 F8 7E 7F 80 78 40 41 9D FE 88
+
+. 0 1000CBC0 12
+. 3A 40 00 00 7F 92 70 40 40 9C 01 60
+
+. 0 1000CD3C 8
+. 73 69 00 01 40 82 01 70
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 49, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 49, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 49
+. 0 1001300C 52
+. 7C 18 19 D6 C0 D7 00 00 C0 F6 00 00 38 E0 00 00 39 43 FF FF 7F 6B DB 78 54 04 10 3A 2F 07 00 00 C1 36 00 00 C1 57 00 00 FD 00 48 90 FD 60 48 90 41 9A 00 14
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 49, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 49, stride = 1
+. 0 1000ED64 24
+. 38 1B 00 01 39 E0 00 01 54 0B F8 7E 7C 08 03 A6 2B 8B 00 01 40 9D 01 8C
+
+. 0 1000ED7C 24
+. 83 3E 82 04 7D 74 5B 78 83 1E 82 08 3A 40 00 00 7F 92 70 40 40 9C 01 60
+
+. 0 1000ED94 40
+. 20 CF 00 00 7E C6 79 14 55 EC 08 3C 3A 20 00 00 3A A0 00 00 38 60 00 00 C0 B9 00 00 7F 83 E8 40 C0 D8 00 00 40 9C 01 24
+
+. 0 1000EDBC 40
+. 7D 31 62 14 7D 74 78 50 38 A9 FF FF 38 8B 00 01 2C 83 00 00 C1 18 00 00 C1 39 00 00 FC E0 40 90 FD 40 40 90 41 86 00 18
+
+. 0 1000EDF8 12
+. 39 00 00 00 7F 88 E8 40 40 9C 00 80
+
+. 0 1000EE04 48
+. 7C 15 D9 D6 7F A9 03 A6 57 67 08 3C 7D 20 62 14 7C CC 00 50 39 49 FF FF 2F 88 00 00 55 49 10 3A 39 69 00 04 7D 28 E8 50 7F 09 40 40 41 9E 00 14
+
+. 0 1000EE44 4
+. 40 99 03 A0
+
+. 0 1000EE48 56
+. 7C 1C 51 D6 7D 2B E1 D6 54 00 10 3A 7D 9F 04 2E 7D 69 FC 2E ED AA 03 32 39 08 00 01 EC 0A 02 F2 7D 4A 3A 14 ED A9 6A FA EC 09 03 38 EC E7 68 2A ED 08 00 2A 42 00 FF A0
+
+. 0 1000EE1C 24
+. 2F 88 00 00 55 49 10 3A 39 69 00 04 7D 28 E8 50 7F 09 40 40 41 9E 00 14
+
+. 0 1000EE34 20
+. EC 06 02 B2 ED A6 02 72 ED 25 02 78 ED 45 6A BA 40 99 03 A0
+
+. 0 1000F1E4 52
+. 7D 29 D9 D6 55 29 08 3C 7D 26 4A 14 39 29 FF FF 55 2B 10 3A 7D 3C 49 D6 39 6B 00 04 7D 6B E1 D6 55 29 10 3A 7D 89 FC 2E 7C 0B FC 2E FD 60 00 50 4B FF FC 48
+
+. 0 1000EE5C 36
+. ED AA 03 32 39 08 00 01 EC 0A 02 F2 7D 4A 3A 14 ED A9 6A FA EC 09 03 38 EC E7 68 2A ED 08 00 2A 42 00 FF A0
+
+. 0 1000EE80 16
+. 7C 00 00 26 54 00 3F FE 7E C7 03 79 41 82 03 8C
+
+. 0 1000EE90 76
+. C1 39 00 00 C1 58 00 00 54 A9 10 3A ED AA 02 32 EC 0A 01 F2 7C 17 29 D6 39 29 00 04 38 63 00 01 ED A9 69 FA 7F 83 E8 40 EC 09 02 38 7D 29 B9 D6 54 00 10 3A 7D 74 22 14 7C 1A 05 2E 38 8B FF FF 7C A5 9A 14 7D A9 D5 2E 41 9C FE F4
+
+. 0 1000EDCC 24
+. 2C 83 00 00 C1 18 00 00 C1 39 00 00 FC E0 40 90 FD 40 40 90 41 86 00 18
+
+. 0 1000EDE4 32
+. EC 06 07 B2 ED A6 07 72 EC 05 07 7A EC A5 6F B8 FC C0 00 90 39 00 00 00 7F 88 E8 40 40 9C 00 80
+
+. 0 1000F218 20
+. 54 89 18 38 7D 29 82 14 C1 49 FF FC C1 29 FF F8 4B FF FC 70
+
+. 0 1000EE98 68
+. 54 A9 10 3A ED AA 02 32 EC 0A 01 F2 7C 17 29 D6 39 29 00 04 38 63 00 01 ED A9 69 FA 7F 83 E8 40 EC 09 02 38 7D 29 B9 D6 54 00 10 3A 7D 74 22 14 7C 1A 05 2E 38 8B FF FF 7C A5 9A 14 7D A9 D5 2E 41 9C FE F4
+
+. 0 1000EEDC 20
+. 3A 52 00 01 7E B5 EA 14 7F 92 70 40 7E 31 DA 14 41 9C FE BC
+
+. 0 1000EEF0 20
+. 7D 08 02 A6 39 EF 00 01 55 00 F8 7E 7F 80 78 40 41 9D FE 88
+
+. 0 1000ED88 12
+. 3A 40 00 00 7F 92 70 40 40 9C 01 60
+
+. 0 1000EF04 8
+. 73 69 00 01 40 82 01 74
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 49, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 49, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 49
+PASS: gsl_fft_complex_forward with signal_noise, n = 49, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 49, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 49, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 49, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 49, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 49, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 49, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 49, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 49, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 49, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 49
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 49, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 49, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 49, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 49, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 49, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 49, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 49, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 49, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 49, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 49, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 49
+PASS: gsl_fft_real with signal_real_noise, n = 49, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 49, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 49, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 49, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 49
+PASS: gsl_fft_real_float with signal_real_noise, n = 49, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 49, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 49, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 49, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 49
+PASS: gsl_fft_complex_forward with signal_noise, n = 49, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 49, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 49, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 49, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 49, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 49, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 49, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 49, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 49, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 49, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 49
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 49, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 49, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 49, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 49, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 49, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 49, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 49, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 49, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 49, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 49, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 49
+PASS: gsl_fft_real with signal_real_noise, n = 49, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 49, stride = 3
+. 0 FEE0DB4 12
+. 7C 00 18 28 7D 20 19 2D 40 A2 FF F8
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 49, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 49, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 49
+PASS: gsl_fft_real_float with signal_real_noise, n = 49, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 49, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 49, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 50, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 50
+PASS: gsl_fft_complex_forward with signal_noise, n = 50, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 50, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 50, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 50, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 50, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 50, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 50, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 50
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 50, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 50, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 50, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 50, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 50, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 50, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 50, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 50
+. 0 10011434 400
+. 7D 7C D2 14 7C 0B D2 14 7D 1D 59 D6 7D 20 D2 14 7D 49 D2 14 54 07 18 38 55 26 18 38 55 45 18 38 7C 1D 01 D6 55 6B 18 38 39 6B 00 08 38 E7 00 08 38 C6 00 08 7D 3D 49 D6 38 A5 00 08 55 08 18 38 54 00 18 38 7C E8 FC AE 7D 5F 04 AE 7D 5D 51 D6 55 29 18 38 7D 29 FC AE FC 39 01 F2 57 89 18 38 FC 17 02 B2 39 29 00 08 7D 6B E9 D6 55 4A 18 38 7C AA FC AE FD B5 02 72 57 6A 18 38 FD 93 01 72 39 4A 00 08 7C E7 E9 D6 7D 0B FC AE 56 8B 08 3C FC 79 02 32 7D 6B AA 14 39 6B FF FF 7C C6 E9 D6 7D 67 FC AE FD 1A 0A 3A 7C F7 5A 14 55 64 18 38 FC 57 02 F2 54 E3 18 38 7C A5 E9 D6 7C C6 FC AE FD 78 02 FA 38 84 00 08 38 63 00 08 FF F5 01 B2 7E B5 8A 14 7C 85 FC AE 7D 29 E9 D6 FC D6 69 BA FF D3 01 32 7C 1D E1 D6 7F A9 FC AE FC 94 61 3A 7D 37 DA 14 7D 17 4A 14 FD 36 FA 78 55 26 18 38 54 00 18 38 55 05 18 38 FC B4 F1 78 7C 3F 04 AE 38 C6 00 08 7C 18 D9 D6 FC FA 19 F8 38 A5 00 08 7F 9C B2 14 FD 58 12 B8 7F 7B 8A 14 7D 38 49 D6 54 00 18 38 FD AB 30 2A FC 08 20 2A FD 87 28 2A FC 6A 48 2A 7D 18 41 D6 FF C0 68 2A 55 29 18 38 FD 4A 48 28 FF EC 18 2A FC 00 68 28 7D 78 59 D6 FD 3E 03 F2 55 08 18 38 FD 6B 30 28 FC 5C 02 B2 7C F8 39 D6 55 6B 18 38 FD 5B 02 B2 FC E7 28 28 7D 4A C1 D6 FC 00 04 32 54 E7 18 38 FD 8C 18 28
+
+. 0 100115C4 168
+. FD BF 03 F2 7C C6 C1 D6 FD 3D 48 28 FC DC 02 F2 7C A5 C1 D6 FD 7B 02 F2 FD 08 20 28 FC 51 11 FA 7C 84 C1 D6 FD 8C 04 32 FC F2 51 F8 7C 63 C1 D6 FD 49 00 28 FD A1 68 28 FD 29 00 2A FC D1 32 3A FD 12 5A 38 FC 0D 60 28 FC AA 10 28 FD 69 38 28 FD AD 60 2A FC 80 30 2A FC 21 F8 2A FD 8D 40 2A FF BD F0 2A FD 60 58 50 7C 39 05 AE FC A0 28 50 FD AD 40 28 7F AA CD AE FD 29 38 2A FC 00 30 28 FD 4A 10 2A 7D A9 CD AE 7D 26 CD AE 7C 08 CD AE 7D 45 CD AE 7D 8B CD AE 7D 64 CD AE 7C 87 CD AE 7C A3 CD AE 42 00 FD CC
+
+PASS: gsl_fft_real with signal_real_noise, n = 50, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 50, stride = 1
+. 0 1000DE58 260
+. 1D 24 00 05 7C E5 CA 14 7C 84 DA 14 7D 29 DA 14 39 29 FF FF 7C 06 4A 14 7D 7C 49 D6 54 0A 18 38 7D 06 02 14 39 4A 00 08 7C 1C 01 D6 55 6B 18 38 7D 6B EC AE 55 29 18 38 39 29 00 08 FD 6B 58 2A 54 00 18 38 7D 4A E1 D6 7C 1D 04 AE FC 00 00 2A 7D 1C 41 D6 7D AA EC AE FD 8B 00 28 FD 6B 00 2A 7D 29 E1 D6 55 08 18 38 7C E8 EC AE FD AD 68 2A FD 8C 01 72 FD 2B 39 B8 7C 09 EC AE 7D 27 CA 14 7C 17 29 D6 FD 58 03 72 7D 69 CA 14 7D 4B CA 14 FD B9 03 72 7C A5 DA 14 7C F7 39 D6 54 00 18 38 FC 00 00 2A FD 0C 48 28 FD 8C 48 2A FD 59 50 38 7D 37 49 D6 54 E7 18 38 FC 18 68 3A FD 20 60 50 7D 77 59 D6 FD A0 40 50 55 29 18 38 FD 6B 38 2A FD 29 00 28 7D 57 51 D6 FD AD 50 28 FD 8C 00 28 55 6B 18 38 FD 08 50 28 7D 7A 05 AE 55 4A 18 38 7D 87 D5 AE 7D 09 D5 AE 7D AB D5 AE 7D 2A D5 AE 42 00 FF 00
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 50, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 50, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 50
+. 0 10013430 400
+. 7D 7B D2 14 7C 0B D2 14 7D 1F 59 D6 7D 20 D2 14 7D 49 D2 14 54 07 10 3A 55 26 10 3A 55 45 10 3A 7C 1F 01 D6 55 6B 10 3A 39 6B 00 04 38 E7 00 04 38 C6 00 04 7D 3F 49 D6 38 A5 00 04 55 08 10 3A 54 00 10 3A 7C C8 EC 2E 7D 1D 04 2E 7D 5F 51 D6 55 29 10 3A 7C 89 EC 2E EC B8 01 B2 EC F6 02 32 57 69 10 3A EC 54 01 32 39 29 00 04 7D 6B F9 D6 55 4A 10 3A 7C 6A EC 2E 57 8A 10 3A 39 4A 00 04 EC 32 00 F2 7C E7 F9 D6 7D 6B EC 2E EC B9 2A FA ED 78 02 F2 7C C6 F9 D6 7C 07 EC 2E EC F7 38 3A EC 16 00 32 7C A5 F9 D6 7D 86 EC 2E EC D9 59 B8 EC 55 13 3A ED 94 03 32 7D A5 EC 2E ED 17 02 38 7D 29 F9 D6 EC 33 0B 7A ED B2 03 72 EC 95 61 38 EC 05 08 2A 7C 1F D9 D6 ED 87 10 2A 7F C9 EC 2E EC 73 68 F8 56 09 08 3C ED 68 20 2A 7D 29 B2 14 EF 80 60 2A 54 00 10 3A ED A6 18 2A 7F FD 04 2E EC 00 60 28 7C 17 E2 14 FD 40 E0 90 39 29 FF FF EF AD 58 2A 7D 17 02 14 ED AD 58 28 7C 97 4A 14 FD 4A 04 32 54 07 10 3A 55 06 10 3A 55 25 10 3A 54 83 10 3A FD 80 E8 90 7D 78 E1 D6 FD 20 F0 90 38 E7 00 04 38 C6 00 04 FD 8C 04 32 38 A5 00 04 38 63 00 04 7C 18 01 D6 FC 00 04 72 55 6B 10 3A 7F 7B AA 14 ED 08 20 28 7E D6 8A 14 FD 29 50 28 7D 18 41 D6 FD 60 F8 90 54 00 10 3A FD AD 04 72 7F 9C 8A 14 EC C6 18 28 7D 38 49 D6 FC 00 00 18
+
+. 0 100135C0 200
+. 55 08 10 3A EC E7 10 28 FD 6B 60 28 FD 20 48 18 7C 98 21 D6 ED 5B 02 32 55 29 10 3A ED 1A 02 32 EC 89 00 28 ED 4E 51 BA 7D 4A C1 D6 FD A0 68 18 54 84 10 3A ED 29 00 2A EC A5 08 28 FD 60 58 18 7C E7 C1 D6 EC CF 41 B8 ED 9B 01 F2 EC FA 01 F2 EC 0B 68 28 7C C6 C1 D6 ED 8E 61 7A ED 6B 68 2A EC 64 50 28 ED 09 30 28 7C A5 C1 D6 EC AF 39 78 ED A0 60 2A EF FF E8 2A 7C 63 C1 D6 EC EB 28 2A EF DE E0 2A FD 00 40 50 7F EB CD 2E FC 60 18 50 ED 6B 28 28 7F CA CD 2E ED 29 30 2A EC 00 60 28 EC 84 50 2A 7D 79 05 2E 7D 27 CD 2E 7C 08 CD 2E 7C 86 CD 2E 7C E9 CD 2E 7D 05 CD 2E 7D A4 CD 2E 7C 63 CD 2E 42 00 FD AC
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 50, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 50, stride = 1
+. 0 10010074 276
+. 1D 24 00 05 7C E6 C2 14 7C 84 DA 14 7D 29 DA 14 39 29 FF FF 7C 05 4A 14 7D 7C 49 D6 7D 45 02 14 54 08 10 3A 39 08 00 04 7C 1C 01 D6 55 6B 10 3A 7D 4B FC 2E 55 29 10 3A 39 29 00 04 ED 4A 50 2A 54 00 10 3A 7D 5C 51 D6 7C 1F 04 2E EC 00 00 2A 55 4A 10 3A 7D 08 E1 D6 7C EA FC 2E 7D 47 C2 14 ED 8A 00 28 ED 4A 00 2A FC 00 38 90 7D 29 E1 D6 FD A0 50 90 ED 4A 38 2A FD 8C 01 72 7D 69 FC 2E 7D 2A C2 14 7C 17 31 D6 FD AD 01 B8 7C 08 FC 2E 7D 69 C2 14 FD 80 60 18 7C C6 DA 14 EC 00 00 2A 7C F7 39 D6 FD A0 68 18 54 00 10 3A ED 6B 58 2A 7D 5A 05 2E ED 38 00 32 ED 0C 68 28 7D 57 51 D6 ED 8C 68 2A 54 E7 10 3A EC 19 00 32 ED 39 4A F8 FD A0 40 50 7D 37 49 D6 ED 78 02 FA 55 4A 10 3A FC 00 60 50 ED AD 48 28 7D 77 59 D6 ED 8C 58 28 EC 00 58 28 55 29 10 3A ED 08 48 28 7D 87 D5 2E 55 6B 10 3A 7D 0A D5 2E 7D A9 D5 2E 7C 0B D5 2E 42 00 FE F0
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 50, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 50, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 50
+PASS: gsl_fft_complex_forward with signal_noise, n = 50, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 50, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 50, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 50, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 50, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 50, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 50, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 50, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 50, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 50, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 50
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 50, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 50, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 50, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 50, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 50, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 50, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 50, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 50, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 50, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 50, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 50
+PASS: gsl_fft_real with signal_real_noise, n = 50, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 50, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 50, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 50, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 50
+PASS: gsl_fft_real_float with signal_real_noise, n = 50, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 50, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 50, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 50, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 50
+PASS: gsl_fft_complex_forward with signal_noise, n = 50, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 50, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 50, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 50, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 50, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 50, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 50, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 50, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 50, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 50, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 50
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 50, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 50, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 50, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 50, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 50, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 50, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 50, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 50, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 50, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 50, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 50
+PASS: gsl_fft_real with signal_real_noise, n = 50, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 50, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 50, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 50, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 50
+PASS: gsl_fft_real_float with signal_real_noise, n = 50, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 50, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 50, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 51, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 51
+PASS: gsl_fft_complex_forward with signal_noise, n = 51, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 51, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 51, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 51, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 51, stride = 1
+. 0 FE0D6E4 32
+. C9 5C 00 00 FC 5F 50 28 C9 BD 00 00 38 61 00 10 FC 22 68 28 FC 82 08 28 FC 44 68 28 48 01 8B DD
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 51, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 51, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 51
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 51, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 51, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 51, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 51, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 51, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 51, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 51, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 51
+PASS: gsl_fft_real with signal_real_noise, n = 51, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 51, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 51, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 51, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 51
+PASS: gsl_fft_real_float with signal_real_noise, n = 51, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 51, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 51, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 51, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 51
+PASS: gsl_fft_complex_forward with signal_noise, n = 51, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 51, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 51, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 51, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 51, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 51, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 51, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 51, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 51, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 51, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 51
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 51, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 51, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 51, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 51, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 51, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 51, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 51, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 51, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 51, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 51, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 51
+PASS: gsl_fft_real with signal_real_noise, n = 51, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 51, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 51, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 51, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 51
+PASS: gsl_fft_real_float with signal_real_noise, n = 51, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 51, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 51, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 51, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 51
+PASS: gsl_fft_complex_forward with signal_noise, n = 51, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 51, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 51, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 51, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 51, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 51, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 51, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 51, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 51, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 51, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 51
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 51, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 51, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 51, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 51, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 51, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 51, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 51, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 51, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 51, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 51, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 51
+PASS: gsl_fft_real with signal_real_noise, n = 51, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 51, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 51, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 51, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 51
+PASS: gsl_fft_real_float with signal_real_noise, n = 51, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 51, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 51, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 52, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 52
+PASS: gsl_fft_complex_forward with signal_noise, n = 52, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 52, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 52, stride = 1
+. 0 FE0BC44 20
+. 83 BE 0E A4 FC 9E 00 00 C8 5D 00 00 FD AD 00 B2 40 85 01 84
+
+. 0 FE0BDD8 16
+. 81 3E 0E AC C8 29 00 00 FC 1E 68 7A 4B FF FE C0
+
+. 0 FE0BCA4 16
+. C9 A1 00 10 FD 8D 00 2A FF 8D 60 00 41 9E 00 E4
+
+PASS: gsl_fft_complex_forward with signal_pulse, n = 52, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 52, stride = 1
+. 0 FE0BC58 16
+. 81 3E 0E A8 C8 29 00 00 FC 1E 68 7A 48 00 00 40
+
+PASS: gsl_fft_complex_forward with signal_exp, n = 52, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 52, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 52
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 52, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 52, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 52, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 52, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 52, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 52, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 52, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 52
+PASS: gsl_fft_real with signal_real_noise, n = 52, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 52, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 52, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 52, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 52
+PASS: gsl_fft_real_float with signal_real_noise, n = 52, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 52, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 52, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 52, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 52
+PASS: gsl_fft_complex_forward with signal_noise, n = 52, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 52, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 52, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 52, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 52, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 52, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 52, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 52, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 52, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 52, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 52
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 52, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 52, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 52, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 52, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 52, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 52, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 52, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 52, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 52, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 52, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 52
+PASS: gsl_fft_real with signal_real_noise, n = 52, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 52, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 52, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 52, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 52
+PASS: gsl_fft_real_float with signal_real_noise, n = 52, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 52, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 52, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 52, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 52
+PASS: gsl_fft_complex_forward with signal_noise, n = 52, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 52, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 52, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 52, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 52, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 52, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 52, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 52, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 52, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 52, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 52
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 52, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 52, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 52, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 52, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 52, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 52, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 52, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 52, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 52, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 52, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 52
+PASS: gsl_fft_real with signal_real_noise, n = 52, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 52, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 52, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 52, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 52
+PASS: gsl_fft_real_float with signal_real_noise, n = 52, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 52, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 52, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 53, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 53
+PASS: gsl_fft_complex_forward with signal_noise, n = 53, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 53, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 53, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 53, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 53, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 53, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 53, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 53
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 53, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 53, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 53, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 53, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 53, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 53, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 53, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 53
+PASS: gsl_fft_real with signal_real_noise, n = 53, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 53, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 53, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 53, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 53
+PASS: gsl_fft_real_float with signal_real_noise, n = 53, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 53, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 53, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 53, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 53
+PASS: gsl_fft_complex_forward with signal_noise, n = 53, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 53, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 53, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 53, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 53, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 53, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 53, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 53, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 53, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 53, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 53
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 53, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 53, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 53, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 53, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 53, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 53, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 53, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 53, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 53, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 53, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 53
+PASS: gsl_fft_real with signal_real_noise, n = 53, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 53, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 53, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 53, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 53
+PASS: gsl_fft_real_float with signal_real_noise, n = 53, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 53, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 53, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 53, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 53
+PASS: gsl_fft_complex_forward with signal_noise, n = 53, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 53, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 53, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 53, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 53, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 53, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 53, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 53, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 53, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 53, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 53
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 53, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 53, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 53, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 53, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 53, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 53, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 53, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 53, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 53, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 53, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 53
+PASS: gsl_fft_real with signal_real_noise, n = 53, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 53, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 53, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 53, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 53
+PASS: gsl_fft_real_float with signal_real_noise, n = 53, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 53, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 53, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 54, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 54
+PASS: gsl_fft_complex_forward with signal_noise, n = 54, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 54, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 54, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 54, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 54, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 54, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 54, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 54
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 54, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 54, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 54, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 54, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 54, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 54, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 54, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 54
+PASS: gsl_fft_real with signal_real_noise, n = 54, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 54, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 54, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 54, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 54
+PASS: gsl_fft_real_float with signal_real_noise, n = 54, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 54, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 54, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 54, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 54
+PASS: gsl_fft_complex_forward with signal_noise, n = 54, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 54, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 54, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 54, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 54, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 54, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 54, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 54, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 54, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 54, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 54
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 54, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 54, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 54, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 54, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 54, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 54, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 54, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 54, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 54, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 54, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 54
+PASS: gsl_fft_real with signal_real_noise, n = 54, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 54, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 54, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 54, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 54
+PASS: gsl_fft_real_float with signal_real_noise, n = 54, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 54, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 54, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 54, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 54
+PASS: gsl_fft_complex_forward with signal_noise, n = 54, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 54, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 54, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 54, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 54, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 54, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 54, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 54, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 54, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 54, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 54
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 54, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 54, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 54, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 54, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 54, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 54, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 54, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 54, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 54, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 54, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 54
+PASS: gsl_fft_real with signal_real_noise, n = 54, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 54, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 54, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 54, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 54
+PASS: gsl_fft_real_float with signal_real_noise, n = 54, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 54, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 54, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 55, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 55
+PASS: gsl_fft_complex_forward with signal_noise, n = 55, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 55, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 55, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 55, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 55, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 55, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 55, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 55
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 55, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 55, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 55, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 55, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 55, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 55, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 55, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 55
+PASS: gsl_fft_real with signal_real_noise, n = 55, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 55, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 55, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 55, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 55
+PASS: gsl_fft_real_float with signal_real_noise, n = 55, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 55, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 55, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 55, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 55
+PASS: gsl_fft_complex_forward with signal_noise, n = 55, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 55, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 55, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 55, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 55, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 55, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 55, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 55, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 55, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 55, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 55
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 55, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 55, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 55, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 55, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 55, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 55, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 55, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 55, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 55, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 55, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 55
+PASS: gsl_fft_real with signal_real_noise, n = 55, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 55, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 55, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 55, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 55
+PASS: gsl_fft_real_float with signal_real_noise, n = 55, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 55, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 55, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 55, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 55
+PASS: gsl_fft_complex_forward with signal_noise, n = 55, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 55, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 55, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 55, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 55, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 55, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 55, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 55, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 55, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 55, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 55
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 55, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 55, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 55, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 55, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 55, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 55, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 55, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 55, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 55, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 55, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 55
+PASS: gsl_fft_real with signal_real_noise, n = 55, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 55, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 55, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 55, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 55
+PASS: gsl_fft_real_float with signal_real_noise, n = 55, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 55, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 55, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 56, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 56
+PASS: gsl_fft_complex_forward with signal_noise, n = 56, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 56, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 56, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 56, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 56, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 56, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 56, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 56
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 56, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 56, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 56, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 56, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 56, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 56, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 56, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 56
+. 0 10011AD0 76
+. 7C 1D 41 D6 81 81 00 2C 54 EA 18 38 7D 68 62 14 39 4A 00 08 7D 38 39 D6 54 00 18 38 7C 1F 04 AE 7D 08 2A 14 7C E7 8A 14 7D 7D 59 D6 55 29 18 38 7C 09 CD AE 55 6B 18 38 7D 4A C1 D6 7C 0B FC AE FC 00 00 50 7C 0A CD AE 42 00 FF B8
+
+PASS: gsl_fft_real with signal_real_noise, n = 56, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 56, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 56, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 56, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 56
+. 0 10013AFC 76
+. 7C 1F 41 D6 81 81 00 2C 54 EA 10 3A 7D 68 62 14 39 4A 00 04 7D 38 39 D6 54 00 10 3A 7C 1D 04 2E 7D 08 22 14 7C E7 8A 14 7D 7F 59 D6 55 29 10 3A 7C 09 CD 2E 55 6B 10 3A 7D 4A C1 D6 7C 0B EC 2E FC 00 00 50 7C 0A CD 2E 42 00 FF B8
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 56, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 56, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 56, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 56, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 56
+PASS: gsl_fft_complex_forward with signal_noise, n = 56, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 56, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 56, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 56, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 56, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 56, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 56, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 56, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 56, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 56, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 56
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 56, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 56, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 56, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 56, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 56, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 56, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 56, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 56, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 56, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 56, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 56
+PASS: gsl_fft_real with signal_real_noise, n = 56, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 56, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 56, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 56, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 56
+PASS: gsl_fft_real_float with signal_real_noise, n = 56, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 56, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 56, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 56, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 56
+PASS: gsl_fft_complex_forward with signal_noise, n = 56, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 56, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 56, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 56, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 56, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 56, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 56, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 56, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 56, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 56, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 56
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 56, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 56, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 56, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 56, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 56, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 56, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 56, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 56, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 56, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 56, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 56
+PASS: gsl_fft_real with signal_real_noise, n = 56, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 56, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 56, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 56, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 56
+PASS: gsl_fft_real_float with signal_real_noise, n = 56, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 56, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 56, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 57, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 57
+PASS: gsl_fft_complex_forward with signal_noise, n = 57, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 57, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 57, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 57, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 57, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 57, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 57, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 57
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 57, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 57, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 57, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 57, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 57, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 57, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 57, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 57
+PASS: gsl_fft_real with signal_real_noise, n = 57, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 57, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 57, stride = 1
+. 0 FEF19B8 12
+. 7F A0 F8 28 7C 00 F9 2D 40 A2 FF F8
+
+PASS: gsl_fft_real_wavetable_float_alloc, n = 57, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 57
+PASS: gsl_fft_real_float with signal_real_noise, n = 57, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 57, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 57, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 57, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 57
+PASS: gsl_fft_complex_forward with signal_noise, n = 57, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 57, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 57, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 57, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 57, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 57, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 57, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 57, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 57, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 57, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 57
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 57, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 57, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 57, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 57, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 57, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 57, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 57, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 57, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 57, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 57, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 57
+PASS: gsl_fft_real with signal_real_noise, n = 57, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 57, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 57, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 57, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 57
+PASS: gsl_fft_real_float with signal_real_noise, n = 57, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 57, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 57, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 57, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 57
+PASS: gsl_fft_complex_forward with signal_noise, n = 57, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 57, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 57, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 57, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 57, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 57, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 57, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 57, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 57, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 57, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 57
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 57, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 57, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 57, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 57, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 57, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 57, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 57, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 57, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 57, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 57, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 57
+PASS: gsl_fft_real with signal_real_noise, n = 57, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 57, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 57, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 57, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 57
+PASS: gsl_fft_real_float with signal_real_noise, n = 57, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 57, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 57, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 58, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 58
+PASS: gsl_fft_complex_forward with signal_noise, n = 58, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 58, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 58, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 58, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 58, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 58, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 58, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 58
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 58, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 58, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 58, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 58, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 58, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 58, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 58, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 58
+PASS: gsl_fft_real with signal_real_noise, n = 58, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 58, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 58, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 58, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 58
+PASS: gsl_fft_real_float with signal_real_noise, n = 58, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 58, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 58, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 58, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 58
+PASS: gsl_fft_complex_forward with signal_noise, n = 58, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 58, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 58, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 58, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 58, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 58, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 58, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 58, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 58, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 58, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 58
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 58, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 58, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 58, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 58, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 58, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 58, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 58, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 58, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 58, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 58, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 58
+PASS: gsl_fft_real with signal_real_noise, n = 58, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 58, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 58, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 58, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 58
+PASS: gsl_fft_real_float with signal_real_noise, n = 58, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 58, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 58, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 58, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 58
+PASS: gsl_fft_complex_forward with signal_noise, n = 58, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 58, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 58, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 58, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 58, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 58, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 58, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 58, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 58, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 58, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 58
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 58, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 58, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 58, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 58, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 58, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 58, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 58, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 58, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 58, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 58, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 58
+PASS: gsl_fft_real with signal_real_noise, n = 58, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 58, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 58, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 58, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 58
+PASS: gsl_fft_real_float with signal_real_noise, n = 58, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 58, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 58, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 59, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 59
+PASS: gsl_fft_complex_forward with signal_noise, n = 59, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 59, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 59, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 59, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 59, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 59, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 59, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 59
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 59, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 59, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 59, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 59, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 59, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 59, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 59, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 59
+PASS: gsl_fft_real with signal_real_noise, n = 59, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 59, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 59, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 59, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 59
+PASS: gsl_fft_real_float with signal_real_noise, n = 59, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 59, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 59, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 59, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 59
+PASS: gsl_fft_complex_forward with signal_noise, n = 59, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 59, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 59, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 59, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 59, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 59, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 59, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 59, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 59, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 59, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 59
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 59, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 59, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 59, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 59, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 59, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 59, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 59, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 59, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 59, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 59, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 59
+PASS: gsl_fft_real with signal_real_noise, n = 59, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 59, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 59, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 59, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 59
+PASS: gsl_fft_real_float with signal_real_noise, n = 59, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 59, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 59, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 59, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 59
+PASS: gsl_fft_complex_forward with signal_noise, n = 59, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 59, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 59, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 59, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 59, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 59, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 59, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 59, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 59, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 59, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 59
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 59, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 59, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 59, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 59, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 59, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 59, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 59, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 59, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 59, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 59, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 59
+PASS: gsl_fft_real with signal_real_noise, n = 59, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 59, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 59, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 59, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 59
+PASS: gsl_fft_real_float with signal_real_noise, n = 59, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 59, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 59, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 60, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 60
+PASS: gsl_fft_complex_forward with signal_noise, n = 60, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 60, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 60, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 60, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 60, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 60, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 60, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 60
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 60, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 60, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 60, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 60, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 60, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 60, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 60, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 60
+PASS: gsl_fft_real with signal_real_noise, n = 60, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 60, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 60, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 60, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 60
+PASS: gsl_fft_real_float with signal_real_noise, n = 60, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 60, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 60, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 60, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 60
+PASS: gsl_fft_complex_forward with signal_noise, n = 60, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 60, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 60, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 60, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 60, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 60, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 60, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 60, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 60, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 60, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 60
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 60, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 60, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 60, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 60, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 60, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 60, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 60, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 60, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 60, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 60, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 60
+PASS: gsl_fft_real with signal_real_noise, n = 60, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 60, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 60, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 60, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 60
+PASS: gsl_fft_real_float with signal_real_noise, n = 60, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 60, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 60, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 60, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 60
+PASS: gsl_fft_complex_forward with signal_noise, n = 60, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 60, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 60, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 60, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 60, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 60, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 60, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 60, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 60, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 60, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 60
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 60, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 60, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 60, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 60, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 60, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 60, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 60, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 60, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 60, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 60, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 60
+PASS: gsl_fft_real with signal_real_noise, n = 60, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 60, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 60, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 60, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 60
+PASS: gsl_fft_real_float with signal_real_noise, n = 60, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 60, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 60, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 61, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 61
+PASS: gsl_fft_complex_forward with signal_noise, n = 61, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 61, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 61, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 61, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 61, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 61, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 61, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 61
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 61, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 61, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 61, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 61, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 61, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 61, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 61, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 61
+PASS: gsl_fft_real with signal_real_noise, n = 61, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 61, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 61, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 61, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 61
+PASS: gsl_fft_real_float with signal_real_noise, n = 61, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 61, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 61, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 61, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 61
+PASS: gsl_fft_complex_forward with signal_noise, n = 61, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 61, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 61, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 61, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 61, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 61, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 61, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 61, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 61, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 61, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 61
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 61, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 61, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 61, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 61, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 61, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 61, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 61, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 61, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 61, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 61, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 61
+PASS: gsl_fft_real with signal_real_noise, n = 61, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 61, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 61, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 61, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 61
+PASS: gsl_fft_real_float with signal_real_noise, n = 61, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 61, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 61, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 61, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 61
+PASS: gsl_fft_complex_forward with signal_noise, n = 61, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 61, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 61, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 61, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 61, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 61, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 61, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 61, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 61, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 61, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 61
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 61, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 61, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 61, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 61, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 61, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 61, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 61, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 61, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 61, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 61, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 61
+PASS: gsl_fft_real with signal_real_noise, n = 61, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 61, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 61, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 61, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 61
+PASS: gsl_fft_real_float with signal_real_noise, n = 61, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 61, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 61, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 62, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 62
+PASS: gsl_fft_complex_forward with signal_noise, n = 62, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 62, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 62, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 62, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 62, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 62, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 62, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 62
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 62, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 62, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 62, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 62, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 62, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 62, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 62, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 62
+PASS: gsl_fft_real with signal_real_noise, n = 62, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 62, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 62, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 62, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 62
+PASS: gsl_fft_real_float with signal_real_noise, n = 62, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 62, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 62, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 62, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 62
+PASS: gsl_fft_complex_forward with signal_noise, n = 62, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 62, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 62, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 62, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 62, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 62, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 62, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 62, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 62, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 62, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 62
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 62, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 62, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 62, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 62, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 62, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 62, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 62, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 62, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 62, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 62, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 62
+PASS: gsl_fft_real with signal_real_noise, n = 62, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 62, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 62, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 62, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 62
+PASS: gsl_fft_real_float with signal_real_noise, n = 62, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 62, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 62, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 62, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 62
+PASS: gsl_fft_complex_forward with signal_noise, n = 62, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 62, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 62, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 62, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 62, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 62, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 62, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 62, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 62, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 62, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 62
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 62, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 62, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 62, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 62, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 62, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 62, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 62, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 62, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 62, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 62, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 62
+PASS: gsl_fft_real with signal_real_noise, n = 62, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 62, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 62, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 62, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 62
+PASS: gsl_fft_real_float with signal_real_noise, n = 62, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 62, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 62, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 63, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 63
+PASS: gsl_fft_complex_forward with signal_noise, n = 63, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 63, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 63, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 63, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 63, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 63, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 63, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 63
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 63, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 63, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 63, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 63, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 63, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 63, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 63, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 63
+PASS: gsl_fft_real with signal_real_noise, n = 63, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 63, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 63, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 63, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 63
+PASS: gsl_fft_real_float with signal_real_noise, n = 63, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 63, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 63, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 63, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 63
+PASS: gsl_fft_complex_forward with signal_noise, n = 63, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 63, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 63, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 63, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 63, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 63, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 63, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 63, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 63, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 63, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 63
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 63, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 63, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 63, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 63, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 63, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 63, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 63, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 63, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 63, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 63, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 63
+PASS: gsl_fft_real with signal_real_noise, n = 63, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 63, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 63, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 63, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 63
+PASS: gsl_fft_real_float with signal_real_noise, n = 63, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 63, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 63, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 63, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 63
+PASS: gsl_fft_complex_forward with signal_noise, n = 63, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 63, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 63, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 63, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 63, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 63, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 63, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 63, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 63, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 63, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 63
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 63, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 63, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 63, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 63, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 63, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 63, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 63, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 63, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 63, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 63, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 63
+PASS: gsl_fft_real with signal_real_noise, n = 63, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 63, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 63, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 63, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 63
+PASS: gsl_fft_real_float with signal_real_noise, n = 63, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 63, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 63, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 64, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 64
+PASS: gsl_fft_complex_forward with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 64, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 64, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 64, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 64, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 64
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 64, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 64, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 64, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 64, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 64
+PASS: gsl_fft_real with signal_real_noise, n = 64, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 64, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 64, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 64, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 64
+PASS: gsl_fft_real_float with signal_real_noise, n = 64, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 64, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 64, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 64, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 64
+PASS: gsl_fft_complex_forward with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 64, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 64, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 64, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 64, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 64, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 64, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 64, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 64
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 64, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 64, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 64, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 64, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 64, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 64, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 64, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 64
+PASS: gsl_fft_real with signal_real_noise, n = 64, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 64, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 64, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 64, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 64
+PASS: gsl_fft_real_float with signal_real_noise, n = 64, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 64, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 64, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 64, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 64
+PASS: gsl_fft_complex_forward with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 64, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 64, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 64, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 64, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 64, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 64, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 64, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 64
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 64, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 64, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 64, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 64, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 64, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 64, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 64, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 64
+PASS: gsl_fft_real with signal_real_noise, n = 64, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 64, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 64, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 64, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 64
+PASS: gsl_fft_real_float with signal_real_noise, n = 64, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 64, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 64, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 65, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 65
+PASS: gsl_fft_complex_forward with signal_noise, n = 65, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 65, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 65, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 65, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 65, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 65, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 65, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 65
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 65, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 65, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 65, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 65, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 65, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 65, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 65, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 65
+PASS: gsl_fft_real with signal_real_noise, n = 65, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 65, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 65, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 65, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 65
+PASS: gsl_fft_real_float with signal_real_noise, n = 65, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 65, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 65, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 65, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 65
+PASS: gsl_fft_complex_forward with signal_noise, n = 65, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 65, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 65, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 65, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 65, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 65, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 65, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 65, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 65, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 65, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 65
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 65, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 65, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 65, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 65, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 65, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 65, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 65, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 65, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 65, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 65, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 65
+PASS: gsl_fft_real with signal_real_noise, n = 65, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 65, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 65, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 65, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 65
+PASS: gsl_fft_real_float with signal_real_noise, n = 65, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 65, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 65, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 65, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 65
+PASS: gsl_fft_complex_forward with signal_noise, n = 65, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 65, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 65, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 65, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 65, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 65, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 65, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 65, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 65, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 65, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 65
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 65, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 65, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 65, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 65, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 65, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 65, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 65, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 65, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 65, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 65, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 65
+PASS: gsl_fft_real with signal_real_noise, n = 65, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 65, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 65, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 65, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 65
+PASS: gsl_fft_real_float with signal_real_noise, n = 65, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 65, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 65, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 66, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 66
+PASS: gsl_fft_complex_forward with signal_noise, n = 66, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 66, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 66, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 66, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 66, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 66, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 66, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 66
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 66, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 66, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 66, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 66, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 66, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 66, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 66, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 66
+PASS: gsl_fft_real with signal_real_noise, n = 66, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 66, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 66, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 66, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 66
+PASS: gsl_fft_real_float with signal_real_noise, n = 66, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 66, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 66, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 66, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 66
+PASS: gsl_fft_complex_forward with signal_noise, n = 66, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 66, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 66, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 66, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 66, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 66, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 66, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 66, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 66, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 66, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 66
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 66, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 66, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 66, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 66, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 66, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 66, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 66, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 66, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 66, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 66, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 66
+PASS: gsl_fft_real with signal_real_noise, n = 66, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 66, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 66, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 66, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 66
+PASS: gsl_fft_real_float with signal_real_noise, n = 66, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 66, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 66, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 66, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 66
+PASS: gsl_fft_complex_forward with signal_noise, n = 66, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 66, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 66, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 66, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 66, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 66, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 66, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 66, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 66, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 66, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 66
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 66, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 66, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 66, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 66, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 66, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 66, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 66, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 66, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 66, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 66, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 66
+PASS: gsl_fft_real with signal_real_noise, n = 66, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 66, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 66, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 66, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 66
+PASS: gsl_fft_real_float with signal_real_noise, n = 66, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 66, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 66, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 67, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 67
+PASS: gsl_fft_complex_forward with signal_noise, n = 67, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 67, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 67, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 67, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 67, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 67, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 67, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 67
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 67, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 67, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 67, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 67, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 67, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 67, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 67, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 67
+PASS: gsl_fft_real with signal_real_noise, n = 67, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 67, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 67, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 67, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 67
+PASS: gsl_fft_real_float with signal_real_noise, n = 67, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 67, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 67, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 67, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 67
+PASS: gsl_fft_complex_forward with signal_noise, n = 67, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 67, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 67, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 67, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 67, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 67, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 67, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 67, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 67, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 67, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 67
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 67, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 67, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 67, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 67, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 67, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 67, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 67, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 67, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 67, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 67, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 67
+PASS: gsl_fft_real with signal_real_noise, n = 67, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 67, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 67, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 67, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 67
+PASS: gsl_fft_real_float with signal_real_noise, n = 67, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 67, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 67, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 67, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 67
+PASS: gsl_fft_complex_forward with signal_noise, n = 67, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 67, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 67, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 67, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 67, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 67, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 67, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 67, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 67, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 67, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 67
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 67, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 67, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 67, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 67, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 67, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 67, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 67, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 67, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 67, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 67, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 67
+PASS: gsl_fft_real with signal_real_noise, n = 67, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 67, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 67, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 67, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 67
+PASS: gsl_fft_real_float with signal_real_noise, n = 67, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 67, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 67, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 68, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 68
+PASS: gsl_fft_complex_forward with signal_noise, n = 68, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 68, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 68, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 68, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 68, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 68, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 68, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 68
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 68, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 68, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 68, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 68, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 68, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 68, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 68, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 68
+PASS: gsl_fft_real with signal_real_noise, n = 68, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 68, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 68, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 68, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 68
+PASS: gsl_fft_real_float with signal_real_noise, n = 68, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 68, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 68, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 68, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 68
+PASS: gsl_fft_complex_forward with signal_noise, n = 68, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 68, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 68, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 68, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 68, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 68, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 68, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 68, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 68, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 68, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 68
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 68, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 68, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 68, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 68, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 68, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 68, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 68, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 68, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 68, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 68, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 68
+PASS: gsl_fft_real with signal_real_noise, n = 68, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 68, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 68, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 68, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 68
+PASS: gsl_fft_real_float with signal_real_noise, n = 68, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 68, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 68, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 68, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 68
+PASS: gsl_fft_complex_forward with signal_noise, n = 68, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 68, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 68, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 68, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 68, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 68, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 68, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 68, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 68, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 68, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 68
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 68, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 68, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 68, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 68, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 68, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 68, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 68, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 68, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 68, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 68, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 68
+PASS: gsl_fft_real with signal_real_noise, n = 68, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 68, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 68, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 68, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 68
+PASS: gsl_fft_real_float with signal_real_noise, n = 68, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 68, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 68, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 69, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 69
+PASS: gsl_fft_complex_forward with signal_noise, n = 69, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 69, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 69, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 69, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 69, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 69, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 69, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 69
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 69, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 69, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 69, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 69, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 69, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 69, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 69, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 69
+PASS: gsl_fft_real with signal_real_noise, n = 69, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 69, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 69, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 69, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 69
+PASS: gsl_fft_real_float with signal_real_noise, n = 69, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 69, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 69, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 69, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 69
+PASS: gsl_fft_complex_forward with signal_noise, n = 69, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 69, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 69, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 69, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 69, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 69, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 69, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 69, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 69, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 69, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 69
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 69, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 69, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 69, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 69, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 69, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 69, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 69, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 69, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 69, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 69, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 69
+PASS: gsl_fft_real with signal_real_noise, n = 69, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 69, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 69, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 69, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 69
+PASS: gsl_fft_real_float with signal_real_noise, n = 69, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 69, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 69, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 69, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 69
+PASS: gsl_fft_complex_forward with signal_noise, n = 69, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 69, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 69, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 69, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 69, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 69, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 69, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 69, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 69, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 69, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 69
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 69, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 69, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 69, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 69, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 69, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 69, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 69, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 69, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 69, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 69, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 69
+PASS: gsl_fft_real with signal_real_noise, n = 69, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 69, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 69, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 69, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 69
+PASS: gsl_fft_real_float with signal_real_noise, n = 69, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 69, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 69, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 70, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 70
+PASS: gsl_fft_complex_forward with signal_noise, n = 70, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 70, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 70, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 70, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 70, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 70, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 70, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 70
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 70, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 70, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 70, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 70, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 70, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 70, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 70, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 70
+PASS: gsl_fft_real with signal_real_noise, n = 70, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 70, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 70, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 70, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 70
+PASS: gsl_fft_real_float with signal_real_noise, n = 70, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 70, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 70, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 70, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 70
+PASS: gsl_fft_complex_forward with signal_noise, n = 70, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 70, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 70, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 70, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 70, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 70, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 70, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 70, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 70, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 70, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 70
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 70, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 70, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 70, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 70, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 70, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 70, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 70, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 70, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 70, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 70, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 70
+PASS: gsl_fft_real with signal_real_noise, n = 70, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 70, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 70, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 70, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 70
+PASS: gsl_fft_real_float with signal_real_noise, n = 70, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 70, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 70, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 70, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 70
+PASS: gsl_fft_complex_forward with signal_noise, n = 70, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 70, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 70, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 70, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 70, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 70, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 70, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 70, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 70, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 70, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 70
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 70, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 70, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 70, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 70, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 70, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 70, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 70, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 70, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 70, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 70, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 70
+PASS: gsl_fft_real with signal_real_noise, n = 70, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 70, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 70, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 70, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 70
+PASS: gsl_fft_real_float with signal_real_noise, n = 70, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 70, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 70, stride = 3
+. 0 FE0D5E8 240
+. C8 0A 00 00 C9 3D 00 00 FD 00 38 28 FD 40 48 50 FC 68 00 28 D9 01 00 30 81 41 00 30 81 61 00 34 FC 83 38 2A FC 40 20 50 FD 22 00 B2 C8 E5 00 00 CB C6 00 00 55 6B 28 34 CB 87 00 00 7C AB 42 14 80 FE 0F 84 FF A9 3F BA C8 83 00 00 CB 47 00 00 C8 C4 00 00 FF 62 D0 2A C8 65 00 08 FD 7D E2 7A 7F AB 44 AE 80 7E 0F 88 FD 9D D0 2A FD 1B D0 28 FC 2B 02 72 C9 65 00 18 FF CC D0 28 FC 02 40 28 FD A2 0A BA C8 25 00 10 FC E0 50 2A FC BD F0 28 FF 81 03 72 FF 69 21 BA FF 42 02 72 FC 9E 02 32 FD 45 18 2A FD 9E 01 F2 FC 0B E0 28 FD 3A 06 F2 CB 43 00 00 FC 6A 00 B2 FF 81 20 28 FD A0 60 28 FC BD 02 72 FF C1 E0 28 FC CD 18 28 FC FE 20 28 FC 46 28 28 FF 62 38 2A FC 3C D8 2A FD 7C 08 28 FF BB 58 2A FD 5D 0E BA FE 0A 08 00 41 92 00 68
+
+PASS: gsl_fft_complex_wavetable_alloc, n = 71, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 71
+PASS: gsl_fft_complex_forward with signal_noise, n = 71, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 71, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 71, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 71, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 71, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 71, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 71, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 71
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 71, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 71, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 71, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 71, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 71, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 71, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 71, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 71
+PASS: gsl_fft_real with signal_real_noise, n = 71, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 71, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 71, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 71, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 71
+PASS: gsl_fft_real_float with signal_real_noise, n = 71, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 71, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 71, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 71, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 71
+PASS: gsl_fft_complex_forward with signal_noise, n = 71, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 71, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 71, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 71, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 71, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 71, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 71, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 71, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 71, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 71, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 71
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 71, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 71, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 71, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 71, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 71, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 71, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 71, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 71, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 71, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 71, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 71
+PASS: gsl_fft_real with signal_real_noise, n = 71, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 71, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 71, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 71, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 71
+PASS: gsl_fft_real_float with signal_real_noise, n = 71, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 71, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 71, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 71, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 71
+PASS: gsl_fft_complex_forward with signal_noise, n = 71, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 71, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 71, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 71, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 71, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 71, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 71, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 71, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 71, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 71, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 71
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 71, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 71, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 71, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 71, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 71, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 71, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 71, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 71, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 71, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 71, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 71
+PASS: gsl_fft_real with signal_real_noise, n = 71, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 71, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 71, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 71, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 71
+PASS: gsl_fft_real_float with signal_real_noise, n = 71, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 71, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 71, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 72, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 72
+PASS: gsl_fft_complex_forward with signal_noise, n = 72, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 72, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 72, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 72, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 72, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 72, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 72, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 72
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 72, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 72, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 72, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 72, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 72, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 72, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 72, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 72
+PASS: gsl_fft_real with signal_real_noise, n = 72, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 72, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 72, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 72, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 72
+PASS: gsl_fft_real_float with signal_real_noise, n = 72, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 72, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 72, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 72, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 72
+PASS: gsl_fft_complex_forward with signal_noise, n = 72, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 72, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 72, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 72, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 72, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 72, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 72, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 72, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 72, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 72, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 72
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 72, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 72, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 72, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 72, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 72, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 72, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 72, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 72, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 72, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 72, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 72
+PASS: gsl_fft_real with signal_real_noise, n = 72, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 72, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 72, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 72, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 72
+PASS: gsl_fft_real_float with signal_real_noise, n = 72, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 72, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 72, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 72, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 72
+PASS: gsl_fft_complex_forward with signal_noise, n = 72, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 72, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 72, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 72, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 72, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 72, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 72, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 72, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 72, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 72, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 72
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 72, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 72, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 72, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 72, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 72, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 72, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 72, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 72, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 72, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 72, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 72
+PASS: gsl_fft_real with signal_real_noise, n = 72, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 72, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 72, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 72, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 72
+PASS: gsl_fft_real_float with signal_real_noise, n = 72, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 72, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 72, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 73, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 73
+PASS: gsl_fft_complex_forward with signal_noise, n = 73, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 73, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 73, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 73, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 73, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 73, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 73, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 73
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 73, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 73, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 73, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 73, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 73, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 73, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 73, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 73
+PASS: gsl_fft_real with signal_real_noise, n = 73, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 73, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 73, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 73, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 73
+PASS: gsl_fft_real_float with signal_real_noise, n = 73, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 73, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 73, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 73, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 73
+PASS: gsl_fft_complex_forward with signal_noise, n = 73, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 73, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 73, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 73, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 73, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 73, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 73, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 73, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 73, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 73, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 73
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 73, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 73, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 73, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 73, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 73, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 73, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 73, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 73, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 73, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 73, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 73
+PASS: gsl_fft_real with signal_real_noise, n = 73, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 73, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 73, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 73, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 73
+PASS: gsl_fft_real_float with signal_real_noise, n = 73, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 73, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 73, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 73, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 73
+PASS: gsl_fft_complex_forward with signal_noise, n = 73, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 73, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 73, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 73, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 73, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 73, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 73, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 73, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 73, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 73, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 73
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 73, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 73, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 73, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 73, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 73, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 73, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 73, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 73, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 73, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 73, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 73
+PASS: gsl_fft_real with signal_real_noise, n = 73, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 73, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 73, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 73, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 73
+PASS: gsl_fft_real_float with signal_real_noise, n = 73, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 73, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 73, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 74, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 74
+PASS: gsl_fft_complex_forward with signal_noise, n = 74, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 74, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 74, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 74, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 74, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 74, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 74, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 74
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 74, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 74, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 74, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 74, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 74, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 74, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 74, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 74
+PASS: gsl_fft_real with signal_real_noise, n = 74, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 74, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 74, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 74, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 74
+PASS: gsl_fft_real_float with signal_real_noise, n = 74, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 74, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 74, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 74, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 74
+PASS: gsl_fft_complex_forward with signal_noise, n = 74, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 74, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 74, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 74, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 74, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 74, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 74, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 74, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 74, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 74, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 74
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 74, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 74, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 74, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 74, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 74, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 74, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 74, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 74, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 74, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 74, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 74
+PASS: gsl_fft_real with signal_real_noise, n = 74, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 74, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 74, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 74, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 74
+PASS: gsl_fft_real_float with signal_real_noise, n = 74, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 74, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 74, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 74, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 74
+PASS: gsl_fft_complex_forward with signal_noise, n = 74, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 74, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 74, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 74, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 74, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 74, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 74, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 74, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 74, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 74, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 74
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 74, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 74, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 74, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 74, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 74, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 74, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 74, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 74, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 74, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 74, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 74
+PASS: gsl_fft_real with signal_real_noise, n = 74, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 74, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 74, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 74, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 74
+PASS: gsl_fft_real_float with signal_real_noise, n = 74, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 74, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 74, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 75, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 75
+PASS: gsl_fft_complex_forward with signal_noise, n = 75, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 75, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 75, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 75, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 75, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 75, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 75, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 75
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 75, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 75, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 75, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 75, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 75, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 75, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 75, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 75
+PASS: gsl_fft_real with signal_real_noise, n = 75, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 75, stride = 1
+. 0 1000DBC8 400
+. 1C F5 00 05 7C 7F CA 14 7E B5 DA 14 7D 76 38 50 7C E7 B2 14 7D 6B C2 14 38 E7 FF FF 39 6B FF FF 7C 18 3A 14 7C D8 5A 14 55 6A 18 38 54 C9 18 38 39 4A 00 08 39 29 00 08 7D 18 02 14 7D 29 E1 D6 54 05 18 38 55 04 18 38 38 A5 00 08 38 84 00 08 7D 4A E1 D6 7D 09 EC AE 54 E9 18 38 FD 00 40 50 39 29 00 08 7C 1C 01 D6 7C 8A EC AE 7D 43 CA 14 FC 80 20 50 7D 1C 41 D6 54 00 18 38 7C FD 04 AE 7C 0A CA 14 7C DC 31 D6 55 08 18 38 7D 48 EC AE 7D 00 CA 14 7D 7C 59 D6 54 C6 18 38 7C A6 EC AE 55 46 18 38 38 C6 00 08 FC CA 28 2A 7C A5 E1 D6 55 6B 18 38 7C 6B EC AE FD 4A 28 28 57 EB 18 38 FD A7 18 2A 39 6B 00 08 7C 84 E1 D6 7D 65 EC AE FC B9 02 B2 54 05 18 38 38 A5 00 08 FD 8B 20 2A 7C 04 EC AE FF 6D 30 2A 7C FC 39 D6 FD AD 30 28 FD 20 40 2A 55 04 18 38 FC 00 40 28 38 84 00 08 FD 1B 03 B2 7D 29 E1 D6 54 E7 18 38 FF 4C 48 2A 7C 47 EC AE FD 8C 48 28 54 67 18 38 FC D9 00 32 38 E7 00 08 7C 29 EC AE 7C 77 19 D6 FD 3A 03 B2 FD 58 02 B2 7D 37 F9 D6 54 63 18 38 FC 18 00 32 7F FF DA 14 FC E7 18 28 FD 6B 20 28 7D 57 51 D6 FD AD 03 F2 55 29 18 38 FD 8C 03 F2 7C 17 01 D6 55 4A 18 38 FD 02 40 28 FD 21 48 28 FC B8 29 F8 7D 17 41 D6 54 00 18 38 FC D8 32 F8 FC F9 51 FA 7D 6B B9 D6 55 08 18 38 FD 79 02 FA FD 49 60 28
+
+. 0 1000DD58 176
+. FC 08 68 28 7C E7 B9 D6 FD 08 68 2A FD 29 60 2A FD A0 30 2A FD 8A 28 28 7C C6 B9 D6 FC 88 58 2A FC 69 38 28 FD 08 58 28 FD 29 38 2A 7C A5 B9 D6 FC 00 30 28 FD 4A 28 2A FF F6 02 32 7C 84 B9 D6 FF D4 00 32 FF B2 03 72 FF 90 01 32 FD 76 02 72 FC F4 02 B2 FC D2 03 32 FC B0 00 F2 FD 37 FA 7A FD 55 F2 BA FD 93 EB 3A FC 71 E0 FA FD 17 5A 38 FC 15 38 38 FD B3 33 78 FC 91 29 38 FC 42 D8 2A FC 21 D0 2A 7C 49 D5 AE 7C 2B D5 AE 7D 03 D5 AE 7D 27 D5 AE 7C 0A D5 AE 7D 46 D5 AE 7D BA 05 AE 7D 85 D5 AE 7C 88 D5 AE 7C 64 D5 AE 42 00 FD C4
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 75, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 75, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 75
+PASS: gsl_fft_real_float with signal_real_noise, n = 75, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 75, stride = 1
+. 0 1000FDC4 400
+. 1C F4 00 05 7F B9 C2 14 7C 7D C2 14 7E 94 DA 14 7D 75 38 50 7C E7 AA 14 7D 6B B2 14 38 E7 FF FF 39 6B FF FF 7C 16 3A 14 7C D6 5A 14 55 6A 10 3A 54 C9 10 3A 39 4A 00 04 39 29 00 04 7D 16 02 14 7D 29 E1 D6 54 05 10 3A 55 04 10 3A 38 A5 00 04 38 84 00 04 7D 4A E1 D6 7C 89 FC 2E 54 E9 10 3A FC 80 20 50 39 29 00 04 7C 1C 01 D6 7C 6A FC 2E FC 60 18 50 7D 1C 41 D6 54 00 10 3A 7C BF 04 2E 7C DC 31 D6 55 08 10 3A 7C C8 FC 2E 7D 7C 59 D6 54 C6 10 3A 7F E6 FC 2E 57 A6 10 3A 38 C6 00 04 ED 66 F8 2A 7C A5 E1 D6 55 6B 10 3A 7F CB FC 2E EC C6 F8 28 7D 63 C2 14 EC 05 F0 2A 55 6A 10 3A 7C 84 E1 D6 7C E5 FC 2E EC A5 F0 28 54 65 10 3A ED A7 18 2A 38 A5 00 04 EF 60 58 2A 39 4A 00 04 7D 04 FC 2E 7C FC 39 D6 EC 00 58 28 ED 88 20 2A FD 40 D8 90 7D 29 E1 D6 54 E7 10 3A EF 4D 60 2A 7C 47 FC 2E ED AD 60 28 7C EB C2 14 FD 4A 03 B2 54 E8 10 3A 7C 29 FC 2E 57 29 10 3A FD 20 D0 90 7C 17 C9 D6 39 29 00 04 39 08 00 04 FD 29 03 B2 7F 39 DA 14 7F B7 E9 D6 54 00 10 3A FD 80 10 90 FD 60 08 90 FC 00 03 F2 7C 77 19 D6 57 BD 10 3A FD AD 03 F2 ED 08 20 28 FD 8C 50 28 7D 77 59 D6 FD 6B 48 28 54 63 10 3A EC E7 18 28 FC 00 00 18 FD A0 68 18 7C F7 39 D6 FD 80 60 18 55 6B 10 3A FD 60 58 18 ED 39 01 B2 ED 59 02 32 7D 29 B9 D6
+
+. 0 1000FF54 208
+. EC D8 01 B2 54 E7 10 3A ED 18 02 32 ED 38 49 78 ED 58 51 F8 7C C6 B9 D6 EC 8C 00 28 EC 6B 68 28 EC B9 31 7A EC F9 41 FA 7C A5 B9 D6 ED 8C 00 2A ED 6B 68 2A EC 04 50 2A ED A3 48 28 7D 4A B9 D6 ED 0C 38 2A EC CB 28 28 ED 8C 38 28 ED 6B 28 2A 7D 08 B9 D6 EC 84 50 28 EC 63 48 2A ED 56 03 32 ED 34 01 32 EC F2 00 32 EC B0 02 32 EF F6 02 F2 EF D4 00 F2 EF B2 03 72 EF 90 01 B2 EC 42 D8 2A EC 21 D0 2A ED 77 52 FA EC 75 48 FA 7C 5A 05 2E ED B3 3B 7A 7C 29 D5 2E EC D1 29 BA ED 97 FB 38 EC 95 F1 38 EC 13 E8 38 ED 11 E2 38 7D 9D D5 2E 7D 66 D5 2E 7C 83 D5 2E 7C 65 D5 2E 7C 0B D5 2E 7D AA D5 2E 7D 07 D5 2E 7C C8 D5 2E 42 00 FD A4
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 75, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 75, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 75
+PASS: gsl_fft_complex_forward with signal_noise, n = 75, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 75, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 75, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 75, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 75, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 75, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 75, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 75, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 75, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 75, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 75
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 75, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 75, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 75, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 75, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 75, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 75, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 75, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 75, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 75, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 75, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 75
+PASS: gsl_fft_real with signal_real_noise, n = 75, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 75, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 75, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 75, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 75
+PASS: gsl_fft_real_float with signal_real_noise, n = 75, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 75, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 75, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 75, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 75
+PASS: gsl_fft_complex_forward with signal_noise, n = 75, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 75, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 75, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 75, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 75, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 75, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 75, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 75, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 75, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 75, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 75
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 75, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 75, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 75, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 75, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 75, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 75, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 75, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 75, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 75, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 75, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 75
+PASS: gsl_fft_real with signal_real_noise, n = 75, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 75, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 75, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 75, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 75
+PASS: gsl_fft_real_float with signal_real_noise, n = 75, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 75, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 75, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 76, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 76
+PASS: gsl_fft_complex_forward with signal_noise, n = 76, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 76, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 76, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 76, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 76, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 76, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 76, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 76
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 76, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 76, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 76, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 76, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 76, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 76, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 76, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 76
+PASS: gsl_fft_real with signal_real_noise, n = 76, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 76, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 76, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 76, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 76
+PASS: gsl_fft_real_float with signal_real_noise, n = 76, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 76, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 76, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 76, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 76
+PASS: gsl_fft_complex_forward with signal_noise, n = 76, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 76, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 76, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 76, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 76, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 76, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 76, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 76, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 76, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 76, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 76
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 76, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 76, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 76, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 76, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 76, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 76, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 76, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 76, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 76, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 76, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 76
+PASS: gsl_fft_real with signal_real_noise, n = 76, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 76, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 76, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 76, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 76
+PASS: gsl_fft_real_float with signal_real_noise, n = 76, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 76, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 76, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 76, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 76
+PASS: gsl_fft_complex_forward with signal_noise, n = 76, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 76, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 76, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 76, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 76, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 76, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 76, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 76, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 76, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 76, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 76
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 76, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 76, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 76, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 76, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 76, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 76, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 76, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 76, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 76, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 76, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 76
+PASS: gsl_fft_real with signal_real_noise, n = 76, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 76, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 76, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 76, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 76
+PASS: gsl_fft_real_float with signal_real_noise, n = 76, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 76, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 76, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 77, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 77
+PASS: gsl_fft_complex_forward with signal_noise, n = 77, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 77, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 77, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 77, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 77, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 77, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 77, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 77
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 77, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 77, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 77, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 77, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 77, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 77, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 77, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 77
+PASS: gsl_fft_real with signal_real_noise, n = 77, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 77, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 77, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 77, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 77
+PASS: gsl_fft_real_float with signal_real_noise, n = 77, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 77, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 77, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 77, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 77
+PASS: gsl_fft_complex_forward with signal_noise, n = 77, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 77, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 77, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 77, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 77, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 77, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 77, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 77, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 77, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 77, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 77
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 77, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 77, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 77, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 77, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 77, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 77, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 77, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 77, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 77, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 77, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 77
+PASS: gsl_fft_real with signal_real_noise, n = 77, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 77, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 77, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 77, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 77
+PASS: gsl_fft_real_float with signal_real_noise, n = 77, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 77, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 77, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 77, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 77
+PASS: gsl_fft_complex_forward with signal_noise, n = 77, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 77, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 77, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 77, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 77, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 77, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 77, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 77, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 77, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 77, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 77
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 77, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 77, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 77, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 77, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 77, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 77, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 77, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 77, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 77, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 77, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 77
+PASS: gsl_fft_real with signal_real_noise, n = 77, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 77, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 77, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 77, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 77
+PASS: gsl_fft_real_float with signal_real_noise, n = 77, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 77, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 77, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 78, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 78
+PASS: gsl_fft_complex_forward with signal_noise, n = 78, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 78, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 78, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 78, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 78, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 78, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 78, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 78
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 78, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 78, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 78, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 78, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 78, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 78, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 78, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 78
+PASS: gsl_fft_real with signal_real_noise, n = 78, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 78, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 78, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 78, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 78
+PASS: gsl_fft_real_float with signal_real_noise, n = 78, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 78, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 78, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 78, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 78
+PASS: gsl_fft_complex_forward with signal_noise, n = 78, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 78, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 78, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 78, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 78, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 78, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 78, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 78, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 78, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 78, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 78
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 78, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 78, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 78, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 78, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 78, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 78, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 78, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 78, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 78, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 78, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 78
+PASS: gsl_fft_real with signal_real_noise, n = 78, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 78, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 78, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 78, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 78
+PASS: gsl_fft_real_float with signal_real_noise, n = 78, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 78, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 78, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 78, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 78
+PASS: gsl_fft_complex_forward with signal_noise, n = 78, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 78, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 78, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 78, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 78, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 78, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 78, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 78, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 78, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 78, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 78
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 78, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 78, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 78, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 78, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 78, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 78, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 78, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 78, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 78, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 78, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 78
+PASS: gsl_fft_real with signal_real_noise, n = 78, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 78, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 78, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 78, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 78
+PASS: gsl_fft_real_float with signal_real_noise, n = 78, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 78, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 78, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 79, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 79
+PASS: gsl_fft_complex_forward with signal_noise, n = 79, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 79, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 79, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 79, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 79, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 79, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 79, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 79
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 79, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 79, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 79, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 79, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 79, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 79, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 79, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 79
+PASS: gsl_fft_real with signal_real_noise, n = 79, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 79, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 79, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 79, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 79
+PASS: gsl_fft_real_float with signal_real_noise, n = 79, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 79, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 79, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 79, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 79
+PASS: gsl_fft_complex_forward with signal_noise, n = 79, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 79, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 79, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 79, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 79, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 79, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 79, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 79, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 79, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 79, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 79
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 79, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 79, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 79, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 79, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 79, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 79, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 79, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 79, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 79, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 79, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 79
+PASS: gsl_fft_real with signal_real_noise, n = 79, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 79, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 79, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 79, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 79
+PASS: gsl_fft_real_float with signal_real_noise, n = 79, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 79, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 79, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 79, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 79
+PASS: gsl_fft_complex_forward with signal_noise, n = 79, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 79, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 79, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 79, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 79, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 79, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 79, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 79, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 79, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 79, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 79
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 79, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 79, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 79, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 79, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 79, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 79, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 79, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 79, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 79, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 79, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 79
+PASS: gsl_fft_real with signal_real_noise, n = 79, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 79, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 79, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 79, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 79
+PASS: gsl_fft_real_float with signal_real_noise, n = 79, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 79, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 79, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 80, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 80
+PASS: gsl_fft_complex_forward with signal_noise, n = 80, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 80, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 80, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 80, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 80, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 80, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 80, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 80
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 80, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 80, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 80, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 80, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 80, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 80, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 80, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 80
+PASS: gsl_fft_real with signal_real_noise, n = 80, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 80, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 80, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 80, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 80
+PASS: gsl_fft_real_float with signal_real_noise, n = 80, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 80, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 80, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 80, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 80
+PASS: gsl_fft_complex_forward with signal_noise, n = 80, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 80, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 80, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 80, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 80, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 80, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 80, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 80, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 80, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 80, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 80
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 80, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 80, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 80, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 80, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 80, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 80, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 80, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 80, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 80, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 80, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 80
+PASS: gsl_fft_real with signal_real_noise, n = 80, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 80, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 80, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 80, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 80
+PASS: gsl_fft_real_float with signal_real_noise, n = 80, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 80, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 80, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 80, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 80
+PASS: gsl_fft_complex_forward with signal_noise, n = 80, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 80, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 80, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 80, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 80, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 80, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 80, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 80, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 80, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 80, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 80
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 80, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 80, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 80, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 80, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 80, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 80, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 80, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 80, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 80, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 80, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 80
+PASS: gsl_fft_real with signal_real_noise, n = 80, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 80, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 80, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 80, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 80
+PASS: gsl_fft_real_float with signal_real_noise, n = 80, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 80, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 80, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 81, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 81
+PASS: gsl_fft_complex_forward with signal_noise, n = 81, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 81, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 81, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 81, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 81, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 81, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 81, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 81
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 81, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 81, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 81, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 81, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 81, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 81, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 81, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 81
+PASS: gsl_fft_real with signal_real_noise, n = 81, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 81, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 81, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 81, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 81
+PASS: gsl_fft_real_float with signal_real_noise, n = 81, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 81, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 81, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 81, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 81
+PASS: gsl_fft_complex_forward with signal_noise, n = 81, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 81, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 81, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 81, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 81, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 81, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 81, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 81, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 81, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 81, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 81
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 81, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 81, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 81, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 81, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 81, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 81, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 81, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 81, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 81, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 81, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 81
+PASS: gsl_fft_real with signal_real_noise, n = 81, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 81, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 81, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 81, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 81
+PASS: gsl_fft_real_float with signal_real_noise, n = 81, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 81, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 81, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 81, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 81
+PASS: gsl_fft_complex_forward with signal_noise, n = 81, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 81, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 81, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 81, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 81, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 81, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 81, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 81, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 81, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 81, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 81
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 81, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 81, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 81, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 81, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 81, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 81, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 81, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 81, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 81, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 81, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 81
+PASS: gsl_fft_real with signal_real_noise, n = 81, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 81, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 81, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 81, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 81
+PASS: gsl_fft_real_float with signal_real_noise, n = 81, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 81, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 81, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 82, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 82
+PASS: gsl_fft_complex_forward with signal_noise, n = 82, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 82, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 82, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 82, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 82, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 82, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 82, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 82
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 82, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 82, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 82, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 82, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 82, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 82, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 82, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 82
+PASS: gsl_fft_real with signal_real_noise, n = 82, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 82, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 82, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 82, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 82
+PASS: gsl_fft_real_float with signal_real_noise, n = 82, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 82, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 82, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 82, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 82
+PASS: gsl_fft_complex_forward with signal_noise, n = 82, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 82, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 82, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 82, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 82, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 82, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 82, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 82, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 82, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 82, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 82
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 82, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 82, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 82, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 82, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 82, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 82, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 82, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 82, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 82, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 82, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 82
+PASS: gsl_fft_real with signal_real_noise, n = 82, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 82, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 82, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 82, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 82
+PASS: gsl_fft_real_float with signal_real_noise, n = 82, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 82, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 82, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 82, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 82
+PASS: gsl_fft_complex_forward with signal_noise, n = 82, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 82, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 82, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 82, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 82, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 82, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 82, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 82, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 82, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 82, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 82
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 82, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 82, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 82, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 82, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 82, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 82, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 82, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 82, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 82, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 82, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 82
+PASS: gsl_fft_real with signal_real_noise, n = 82, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 82, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 82, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 82, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 82
+PASS: gsl_fft_real_float with signal_real_noise, n = 82, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 82, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 82, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 83, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 83
+PASS: gsl_fft_complex_forward with signal_noise, n = 83, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 83, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 83, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 83, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 83, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 83, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 83, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 83
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 83, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 83, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 83, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 83, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 83, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 83, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 83, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 83
+PASS: gsl_fft_real with signal_real_noise, n = 83, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 83, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 83, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 83, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 83
+PASS: gsl_fft_real_float with signal_real_noise, n = 83, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 83, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 83, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 83, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 83
+PASS: gsl_fft_complex_forward with signal_noise, n = 83, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 83, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 83, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 83, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 83, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 83, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 83, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 83, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 83, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 83, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 83
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 83, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 83, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 83, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 83, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 83, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 83, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 83, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 83, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 83, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 83, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 83
+PASS: gsl_fft_real with signal_real_noise, n = 83, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 83, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 83, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 83, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 83
+PASS: gsl_fft_real_float with signal_real_noise, n = 83, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 83, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 83, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 83, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 83
+PASS: gsl_fft_complex_forward with signal_noise, n = 83, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 83, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 83, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 83, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 83, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 83, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 83, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 83, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 83, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 83, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 83
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 83, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 83, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 83, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 83, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 83, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 83, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 83, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 83, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 83, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 83, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 83
+PASS: gsl_fft_real with signal_real_noise, n = 83, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 83, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 83, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 83, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 83
+PASS: gsl_fft_real_float with signal_real_noise, n = 83, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 83, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 83, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 84, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 84
+PASS: gsl_fft_complex_forward with signal_noise, n = 84, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 84, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 84, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 84, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 84, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 84, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 84, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 84
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 84, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 84, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 84, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 84, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 84, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 84, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 84, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 84
+PASS: gsl_fft_real with signal_real_noise, n = 84, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 84, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 84, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 84, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 84
+PASS: gsl_fft_real_float with signal_real_noise, n = 84, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 84, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 84, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 84, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 84
+PASS: gsl_fft_complex_forward with signal_noise, n = 84, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 84, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 84, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 84, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 84, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 84, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 84, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 84, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 84, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 84, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 84
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 84, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 84, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 84, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 84, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 84, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 84, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 84, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 84, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 84, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 84, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 84
+PASS: gsl_fft_real with signal_real_noise, n = 84, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 84, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 84, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 84, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 84
+PASS: gsl_fft_real_float with signal_real_noise, n = 84, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 84, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 84, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 84, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 84
+PASS: gsl_fft_complex_forward with signal_noise, n = 84, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 84, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 84, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 84, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 84, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 84, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 84, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 84, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 84, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 84, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 84
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 84, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 84, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 84, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 84, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 84, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 84, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 84, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 84, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 84, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 84, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 84
+PASS: gsl_fft_real with signal_real_noise, n = 84, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 84, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 84, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 84, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 84
+PASS: gsl_fft_real_float with signal_real_noise, n = 84, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 84, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 84, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 85, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 85
+PASS: gsl_fft_complex_forward with signal_noise, n = 85, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 85, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 85, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 85, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 85, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 85, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 85, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 85
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 85, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 85, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 85, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 85, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 85, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 85, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 85, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 85
+PASS: gsl_fft_real with signal_real_noise, n = 85, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 85, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 85, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 85, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 85
+PASS: gsl_fft_real_float with signal_real_noise, n = 85, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 85, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 85, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 85, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 85
+PASS: gsl_fft_complex_forward with signal_noise, n = 85, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 85, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 85, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 85, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 85, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 85, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 85, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 85, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 85, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 85, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 85
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 85, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 85, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 85, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 85, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 85, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 85, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 85, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 85, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 85, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 85, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 85
+PASS: gsl_fft_real with signal_real_noise, n = 85, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 85, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 85, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 85, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 85
+PASS: gsl_fft_real_float with signal_real_noise, n = 85, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 85, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 85, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 85, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 85
+PASS: gsl_fft_complex_forward with signal_noise, n = 85, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 85, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 85, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 85, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 85, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 85, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 85, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 85, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 85, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 85, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 85
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 85, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 85, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 85, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 85, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 85, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 85, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 85, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 85, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 85, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 85, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 85
+PASS: gsl_fft_real with signal_real_noise, n = 85, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 85, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 85, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 85, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 85
+PASS: gsl_fft_real_float with signal_real_noise, n = 85, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 85, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 85, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 86, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 86
+PASS: gsl_fft_complex_forward with signal_noise, n = 86, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 86, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 86, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 86, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 86, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 86, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 86, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 86
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 86, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 86, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 86, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 86, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 86, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 86, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 86, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 86
+PASS: gsl_fft_real with signal_real_noise, n = 86, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 86, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 86, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 86, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 86
+PASS: gsl_fft_real_float with signal_real_noise, n = 86, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 86, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 86, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 86, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 86
+PASS: gsl_fft_complex_forward with signal_noise, n = 86, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 86, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 86, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 86, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 86, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 86, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 86, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 86, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 86, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 86, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 86
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 86, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 86, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 86, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 86, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 86, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 86, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 86, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 86, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 86, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 86, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 86
+PASS: gsl_fft_real with signal_real_noise, n = 86, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 86, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 86, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 86, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 86
+PASS: gsl_fft_real_float with signal_real_noise, n = 86, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 86, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 86, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 86, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 86
+PASS: gsl_fft_complex_forward with signal_noise, n = 86, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 86, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 86, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 86, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 86, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 86, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 86, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 86, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 86, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 86, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 86
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 86, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 86, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 86, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 86, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 86, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 86, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 86, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 86, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 86, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 86, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 86
+PASS: gsl_fft_real with signal_real_noise, n = 86, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 86, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 86, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 86, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 86
+PASS: gsl_fft_real_float with signal_real_noise, n = 86, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 86, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 86, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 87, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 87
+PASS: gsl_fft_complex_forward with signal_noise, n = 87, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 87, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 87, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 87, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 87, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 87, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 87, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 87
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 87, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 87, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 87, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 87, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 87, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 87, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 87, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 87
+PASS: gsl_fft_real with signal_real_noise, n = 87, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 87, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 87, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 87, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 87
+PASS: gsl_fft_real_float with signal_real_noise, n = 87, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 87, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 87, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 87, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 87
+PASS: gsl_fft_complex_forward with signal_noise, n = 87, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 87, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 87, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 87, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 87, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 87, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 87, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 87, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 87, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 87, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 87
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 87, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 87, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 87, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 87, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 87, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 87, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 87, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 87, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 87, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 87, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 87
+PASS: gsl_fft_real with signal_real_noise, n = 87, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 87, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 87, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 87, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 87
+PASS: gsl_fft_real_float with signal_real_noise, n = 87, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 87, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 87, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 87, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 87
+PASS: gsl_fft_complex_forward with signal_noise, n = 87, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 87, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 87, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 87, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 87, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 87, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 87, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 87, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 87, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 87, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 87
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 87, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 87, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 87, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 87, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 87, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 87, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 87, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 87, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 87, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 87, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 87
+PASS: gsl_fft_real with signal_real_noise, n = 87, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 87, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 87, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 87, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 87
+PASS: gsl_fft_real_float with signal_real_noise, n = 87, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 87, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 87, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 88, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 88
+PASS: gsl_fft_complex_forward with signal_noise, n = 88, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 88, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 88, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 88, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 88, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 88, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 88, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 88
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 88, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 88, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 88, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 88, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 88, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 88, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 88, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 88
+PASS: gsl_fft_real with signal_real_noise, n = 88, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 88, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 88, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 88, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 88
+PASS: gsl_fft_real_float with signal_real_noise, n = 88, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 88, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 88, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 88, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 88
+PASS: gsl_fft_complex_forward with signal_noise, n = 88, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 88, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 88, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 88, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 88, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 88, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 88, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 88, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 88, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 88, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 88
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 88, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 88, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 88, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 88, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 88, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 88, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 88, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 88, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 88, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 88, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 88
+PASS: gsl_fft_real with signal_real_noise, n = 88, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 88, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 88, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 88, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 88
+PASS: gsl_fft_real_float with signal_real_noise, n = 88, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 88, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 88, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 88, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 88
+PASS: gsl_fft_complex_forward with signal_noise, n = 88, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 88, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 88, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 88, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 88, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 88, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 88, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 88, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 88, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 88, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 88
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 88, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 88, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 88, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 88, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 88, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 88, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 88, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 88, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 88, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 88, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 88
+PASS: gsl_fft_real with signal_real_noise, n = 88, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 88, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 88, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 88, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 88
+PASS: gsl_fft_real_float with signal_real_noise, n = 88, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 88, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 88, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 89, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 89
+PASS: gsl_fft_complex_forward with signal_noise, n = 89, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 89, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 89, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 89, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 89, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 89, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 89, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 89
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 89, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 89, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 89, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 89, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 89, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 89, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 89, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 89
+PASS: gsl_fft_real with signal_real_noise, n = 89, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 89, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 89, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 89, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 89
+PASS: gsl_fft_real_float with signal_real_noise, n = 89, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 89, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 89, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 89, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 89
+PASS: gsl_fft_complex_forward with signal_noise, n = 89, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 89, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 89, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 89, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 89, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 89, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 89, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 89, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 89, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 89, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 89
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 89, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 89, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 89, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 89, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 89, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 89, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 89, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 89, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 89, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 89, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 89
+PASS: gsl_fft_real with signal_real_noise, n = 89, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 89, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 89, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 89, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 89
+PASS: gsl_fft_real_float with signal_real_noise, n = 89, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 89, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 89, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 89, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 89
+PASS: gsl_fft_complex_forward with signal_noise, n = 89, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 89, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 89, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 89, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 89, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 89, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 89, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 89, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 89, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 89, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 89
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 89, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 89, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 89, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 89, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 89, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 89, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 89, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 89, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 89, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 89, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 89
+PASS: gsl_fft_real with signal_real_noise, n = 89, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 89, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 89, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 89, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 89
+PASS: gsl_fft_real_float with signal_real_noise, n = 89, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 89, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 89, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 90, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 90
+PASS: gsl_fft_complex_forward with signal_noise, n = 90, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 90, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 90, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 90, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 90, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 90, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 90, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 90
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 90, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 90, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 90, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 90, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 90, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 90, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 90, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 90
+PASS: gsl_fft_real with signal_real_noise, n = 90, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 90, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 90, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 90, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 90
+PASS: gsl_fft_real_float with signal_real_noise, n = 90, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 90, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 90, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 90, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 90
+PASS: gsl_fft_complex_forward with signal_noise, n = 90, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 90, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 90, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 90, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 90, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 90, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 90, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 90, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 90, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 90, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 90
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 90, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 90, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 90, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 90, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 90, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 90, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 90, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 90, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 90, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 90, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 90
+PASS: gsl_fft_real with signal_real_noise, n = 90, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 90, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 90, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 90, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 90
+PASS: gsl_fft_real_float with signal_real_noise, n = 90, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 90, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 90, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 90, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 90
+PASS: gsl_fft_complex_forward with signal_noise, n = 90, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 90, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 90, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 90, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 90, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 90, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 90, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 90, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 90, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 90, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 90
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 90, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 90, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 90, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 90, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 90, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 90, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 90, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 90, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 90, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 90, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 90
+PASS: gsl_fft_real with signal_real_noise, n = 90, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 90, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 90, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 90, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 90
+PASS: gsl_fft_real_float with signal_real_noise, n = 90, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 90, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 90, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 91, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 91
+PASS: gsl_fft_complex_forward with signal_noise, n = 91, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 91, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 91, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 91, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 91, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 91, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 91, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 91
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 91, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 91, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 91, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 91, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 91, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 91, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 91, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 91
+PASS: gsl_fft_real with signal_real_noise, n = 91, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 91, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 91, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 91, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 91
+PASS: gsl_fft_real_float with signal_real_noise, n = 91, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 91, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 91, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 91, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 91
+PASS: gsl_fft_complex_forward with signal_noise, n = 91, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 91, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 91, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 91, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 91, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 91, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 91, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 91, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 91, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 91, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 91
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 91, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 91, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 91, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 91, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 91, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 91, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 91, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 91, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 91, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 91, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 91
+PASS: gsl_fft_real with signal_real_noise, n = 91, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 91, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 91, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 91, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 91
+PASS: gsl_fft_real_float with signal_real_noise, n = 91, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 91, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 91, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 91, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 91
+PASS: gsl_fft_complex_forward with signal_noise, n = 91, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 91, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 91, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 91, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 91, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 91, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 91, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 91, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 91, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 91, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 91
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 91, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 91, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 91, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 91, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 91, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 91, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 91, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 91, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 91, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 91, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 91
+PASS: gsl_fft_real with signal_real_noise, n = 91, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 91, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 91, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 91, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 91
+PASS: gsl_fft_real_float with signal_real_noise, n = 91, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 91, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 91, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 92, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 92
+PASS: gsl_fft_complex_forward with signal_noise, n = 92, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 92, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 92, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 92, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 92, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 92, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 92, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 92
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 92, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 92, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 92, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 92, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 92, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 92, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 92, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 92
+PASS: gsl_fft_real with signal_real_noise, n = 92, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 92, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 92, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 92, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 92
+PASS: gsl_fft_real_float with signal_real_noise, n = 92, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 92, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 92, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 92, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 92
+PASS: gsl_fft_complex_forward with signal_noise, n = 92, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 92, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 92, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 92, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 92, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 92, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 92, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 92, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 92, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 92, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 92
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 92, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 92, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 92, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 92, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 92, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 92, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 92, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 92, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 92, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 92, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 92
+PASS: gsl_fft_real with signal_real_noise, n = 92, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 92, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 92, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 92, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 92
+PASS: gsl_fft_real_float with signal_real_noise, n = 92, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 92, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 92, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 92, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 92
+PASS: gsl_fft_complex_forward with signal_noise, n = 92, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 92, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 92, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 92, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 92, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 92, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 92, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 92, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 92, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 92, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 92
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 92, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 92, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 92, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 92, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 92, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 92, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 92, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 92, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 92, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 92, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 92
+PASS: gsl_fft_real with signal_real_noise, n = 92, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 92, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 92, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 92, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 92
+PASS: gsl_fft_real_float with signal_real_noise, n = 92, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 92, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 92, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 93, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 93
+PASS: gsl_fft_complex_forward with signal_noise, n = 93, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 93, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 93, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 93, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 93, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 93, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 93, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 93
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 93, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 93, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 93, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 93, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 93, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 93, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 93, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 93
+PASS: gsl_fft_real with signal_real_noise, n = 93, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 93, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 93, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 93, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 93
+PASS: gsl_fft_real_float with signal_real_noise, n = 93, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 93, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 93, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 93, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 93
+PASS: gsl_fft_complex_forward with signal_noise, n = 93, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 93, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 93, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 93, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 93, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 93, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 93, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 93, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 93, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 93, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 93
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 93, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 93, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 93, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 93, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 93, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 93, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 93, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 93, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 93, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 93, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 93
+PASS: gsl_fft_real with signal_real_noise, n = 93, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 93, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 93, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 93, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 93
+PASS: gsl_fft_real_float with signal_real_noise, n = 93, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 93, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 93, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 93, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 93
+PASS: gsl_fft_complex_forward with signal_noise, n = 93, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 93, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 93, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 93, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 93, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 93, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 93, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 93, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 93, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 93, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 93
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 93, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 93, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 93, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 93, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 93, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 93, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 93, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 93, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 93, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 93, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 93
+PASS: gsl_fft_real with signal_real_noise, n = 93, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 93, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 93, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 93, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 93
+PASS: gsl_fft_real_float with signal_real_noise, n = 93, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 93, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 93, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 94, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 94
+PASS: gsl_fft_complex_forward with signal_noise, n = 94, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 94, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 94, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 94, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 94, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 94, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 94, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 94
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 94, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 94, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 94, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 94, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 94, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 94, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 94, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 94
+PASS: gsl_fft_real with signal_real_noise, n = 94, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 94, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 94, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 94, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 94
+PASS: gsl_fft_real_float with signal_real_noise, n = 94, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 94, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 94, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 94, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 94
+PASS: gsl_fft_complex_forward with signal_noise, n = 94, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 94, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 94, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 94, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 94, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 94, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 94, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 94, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 94, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 94, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 94
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 94, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 94, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 94, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 94, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 94, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 94, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 94, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 94, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 94, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 94, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 94
+PASS: gsl_fft_real with signal_real_noise, n = 94, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 94, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 94, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 94, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 94
+PASS: gsl_fft_real_float with signal_real_noise, n = 94, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 94, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 94, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 94, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 94
+PASS: gsl_fft_complex_forward with signal_noise, n = 94, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 94, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 94, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 94, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 94, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 94, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 94, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 94, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 94, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 94, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 94
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 94, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 94, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 94, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 94, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 94, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 94, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 94, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 94, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 94, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 94, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 94
+PASS: gsl_fft_real with signal_real_noise, n = 94, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 94, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 94, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 94, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 94
+PASS: gsl_fft_real_float with signal_real_noise, n = 94, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 94, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 94, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 95, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 95
+PASS: gsl_fft_complex_forward with signal_noise, n = 95, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 95, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 95, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 95, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 95, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 95, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 95, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 95
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 95, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 95, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 95, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 95, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 95, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 95, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 95, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 95
+PASS: gsl_fft_real with signal_real_noise, n = 95, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 95, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 95, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 95, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 95
+PASS: gsl_fft_real_float with signal_real_noise, n = 95, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 95, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 95, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 95, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 95
+PASS: gsl_fft_complex_forward with signal_noise, n = 95, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 95, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 95, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 95, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 95, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 95, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 95, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 95, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 95, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 95, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 95
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 95, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 95, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 95, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 95, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 95, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 95, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 95, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 95, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 95, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 95, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 95
+PASS: gsl_fft_real with signal_real_noise, n = 95, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 95, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 95, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 95, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 95
+PASS: gsl_fft_real_float with signal_real_noise, n = 95, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 95, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 95, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 95, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 95
+PASS: gsl_fft_complex_forward with signal_noise, n = 95, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 95, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 95, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 95, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 95, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 95, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 95, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 95, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 95, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 95, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 95
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 95, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 95, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 95, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 95, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 95, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 95, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 95, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 95, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 95, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 95, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 95
+PASS: gsl_fft_real with signal_real_noise, n = 95, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 95, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 95, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 95, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 95
+PASS: gsl_fft_real_float with signal_real_noise, n = 95, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 95, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 95, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 96, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 96
+PASS: gsl_fft_complex_forward with signal_noise, n = 96, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 96, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 96, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 96, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 96, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 96, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 96, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 96
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 96, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 96, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 96, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 96, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 96, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 96, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 96, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 96
+PASS: gsl_fft_real with signal_real_noise, n = 96, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 96, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 96, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 96, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 96
+PASS: gsl_fft_real_float with signal_real_noise, n = 96, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 96, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 96, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 96, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 96
+PASS: gsl_fft_complex_forward with signal_noise, n = 96, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 96, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 96, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 96, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 96, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 96, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 96, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 96, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 96, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 96, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 96
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 96, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 96, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 96, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 96, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 96, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 96, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 96, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 96, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 96, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 96, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 96
+PASS: gsl_fft_real with signal_real_noise, n = 96, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 96, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 96, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 96, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 96
+PASS: gsl_fft_real_float with signal_real_noise, n = 96, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 96, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 96, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 96, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 96
+PASS: gsl_fft_complex_forward with signal_noise, n = 96, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 96, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 96, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 96, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 96, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 96, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 96, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 96, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 96, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 96, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 96
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 96, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 96, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 96, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 96, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 96, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 96, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 96, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 96, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 96, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 96, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 96
+PASS: gsl_fft_real with signal_real_noise, n = 96, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 96, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 96, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 96, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 96
+PASS: gsl_fft_real_float with signal_real_noise, n = 96, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 96, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 96, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 97, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 97
+PASS: gsl_fft_complex_forward with signal_noise, n = 97, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 97, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 97, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 97, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 97, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 97, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 97, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 97
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 97, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 97, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 97, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 97, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 97, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 97, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 97, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 97
+PASS: gsl_fft_real with signal_real_noise, n = 97, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 97, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 97, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 97, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 97
+PASS: gsl_fft_real_float with signal_real_noise, n = 97, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 97, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 97, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 97, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 97
+PASS: gsl_fft_complex_forward with signal_noise, n = 97, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 97, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 97, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 97, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 97, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 97, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 97, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 97, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 97, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 97, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 97
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 97, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 97, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 97, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 97, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 97, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 97, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 97, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 97, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 97, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 97, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 97
+PASS: gsl_fft_real with signal_real_noise, n = 97, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 97, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 97, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 97, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 97
+PASS: gsl_fft_real_float with signal_real_noise, n = 97, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 97, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 97, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 97, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 97
+PASS: gsl_fft_complex_forward with signal_noise, n = 97, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 97, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 97, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 97, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 97, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 97, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 97, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 97, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 97, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 97, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 97
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 97, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 97, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 97, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 97, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 97, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 97, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 97, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 97, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 97, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 97, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 97
+PASS: gsl_fft_real with signal_real_noise, n = 97, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 97, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 97, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 97, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 97
+PASS: gsl_fft_real_float with signal_real_noise, n = 97, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 97, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 97, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 98, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 98
+PASS: gsl_fft_complex_forward with signal_noise, n = 98, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 98, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 98, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 98, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 98, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 98, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 98, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 98
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 98, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 98, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 98, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 98, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 98, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 98, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 98, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 98
+. 0 10011C00 20
+. 38 E0 00 00 C8 77 00 00 7F 87 D8 40 C8 96 00 00 40 9C 00 FC
+
+PASS: gsl_fft_real with signal_real_noise, n = 98, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 98, stride = 1
+. 0 1000CBE0 20
+. 38 60 00 00 C8 B9 00 00 7F 83 F8 40 C8 D8 00 00 40 9C 01 24
+
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 98, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 98, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 98
+. 0 10013C30 20
+. 38 E0 00 00 C0 77 00 00 7F 87 D8 40 C0 96 00 00 40 9C 00 FC
+
+PASS: gsl_fft_real_float with signal_real_noise, n = 98, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 98, stride = 1
+. 0 1000EDA8 20
+. 38 60 00 00 C0 B9 00 00 7F 83 E8 40 C0 D8 00 00 40 9C 01 24
+
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 98, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 98, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 98
+PASS: gsl_fft_complex_forward with signal_noise, n = 98, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 98, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 98, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 98, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 98, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 98, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 98, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 98, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 98, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 98, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 98
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 98, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 98, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 98, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 98, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 98, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 98, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 98, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 98, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 98, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 98, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 98
+PASS: gsl_fft_real with signal_real_noise, n = 98, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 98, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 98, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 98, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 98
+PASS: gsl_fft_real_float with signal_real_noise, n = 98, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 98, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 98, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 98, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 98
+PASS: gsl_fft_complex_forward with signal_noise, n = 98, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 98, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 98, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 98, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 98, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 98, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 98, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 98, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 98, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 98, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 98
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 98, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 98, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 98, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 98, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 98, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 98, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 98, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 98, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 98, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 98, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 98
+PASS: gsl_fft_real with signal_real_noise, n = 98, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 98, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 98, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 98, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 98
+PASS: gsl_fft_real_float with signal_real_noise, n = 98, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 98, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 98, stride = 3
+PASS: gsl_fft_complex_wavetable_alloc, n = 99, stride = 1
+PASS: gsl_fft_complex_workspace_alloc, n = 99
+PASS: gsl_fft_complex_forward with signal_noise, n = 99, stride = 1
+PASS: gsl_fft_complex_inverse with signal_noise, n = 99, stride = 1
+PASS: gsl_fft_complex_backward with signal_noise, n = 99, stride = 1
+PASS: gsl_fft_complex_forward with signal_pulse, n = 99, stride = 1
+PASS: gsl_fft_complex_forward with signal_constant, n = 99, stride = 1
+PASS: gsl_fft_complex_forward with signal_exp, n = 99, stride = 1
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 99, stride = 1
+PASS: gsl_fft_complex_workspace_float_alloc, n = 99
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 99, stride = 1
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 99, stride = 1
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 99, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 99, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 99, stride = 1
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 99, stride = 1
+PASS: gsl_fft_real_wavetable_alloc, n = 99, stride = 1
+PASS: gsl_fft_real_workspace_alloc, n = 99
+PASS: gsl_fft_real with signal_real_noise, n = 99, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 99, stride = 1
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 99, stride = 1
+PASS: gsl_fft_real_wavetable_float_alloc, n = 99, stride = 1
+PASS: gsl_fft_real_workspace_float_alloc, n = 99
+PASS: gsl_fft_real_float with signal_real_noise, n = 99, stride = 1
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 99, stride = 1
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 99, stride = 1
+PASS: gsl_fft_complex_wavetable_alloc, n = 99, stride = 2
+PASS: gsl_fft_complex_workspace_alloc, n = 99
+PASS: gsl_fft_complex_forward with signal_noise, n = 99, stride = 2
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 99, stride = 2
+PASS: gsl_fft_complex_inverse with signal_noise, n = 99, stride = 2
+PASS: gsl_fft_complex_inverse other data untouched, n = 99, stride = 2
+PASS: gsl_fft_complex_backward with signal_noise, n = 99, stride = 2
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 99, stride = 2
+PASS: gsl_fft_complex_forward with signal_pulse, n = 99, stride = 2
+PASS: gsl_fft_complex_forward with signal_constant, n = 99, stride = 2
+PASS: gsl_fft_complex_forward with signal_exp, n = 99, stride = 2
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 99, stride = 2
+PASS: gsl_fft_complex_workspace_float_alloc, n = 99
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 99, stride = 2
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 99, stride = 2
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 99, stride = 2
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 99, stride = 2
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 99, stride = 2
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 99, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 99, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 99, stride = 2
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 99, stride = 2
+PASS: gsl_fft_real_wavetable_alloc, n = 99, stride = 2
+PASS: gsl_fft_real_workspace_alloc, n = 99
+PASS: gsl_fft_real with signal_real_noise, n = 99, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 99, stride = 2
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 99, stride = 2
+PASS: gsl_fft_real_wavetable_float_alloc, n = 99, stride = 2
+PASS: gsl_fft_real_workspace_float_alloc, n = 99
+PASS: gsl_fft_real_float with signal_real_noise, n = 99, stride = 2
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 99, stride = 2
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 99, stride = 2
+PASS: gsl_fft_complex_wavetable_alloc, n = 99, stride = 3
+PASS: gsl_fft_complex_workspace_alloc, n = 99
+PASS: gsl_fft_complex_forward with signal_noise, n = 99, stride = 3
+PASS: gsl_fft_complex_forward avoids unstrided data, n = 99, stride = 3
+PASS: gsl_fft_complex_inverse with signal_noise, n = 99, stride = 3
+PASS: gsl_fft_complex_inverse other data untouched, n = 99, stride = 3
+PASS: gsl_fft_complex_backward with signal_noise, n = 99, stride = 3
+PASS: gsl_fft_complex_backward avoids unstrided data, n = 99, stride = 3
+PASS: gsl_fft_complex_forward with signal_pulse, n = 99, stride = 3
+PASS: gsl_fft_complex_forward with signal_constant, n = 99, stride = 3
+PASS: gsl_fft_complex_forward with signal_exp, n = 99, stride = 3
+PASS: gsl_fft_complex_wavetable_float_alloc, n = 99, stride = 3
+PASS: gsl_fft_complex_workspace_float_alloc, n = 99
+PASS: gsl_fft_complex_float_forward with signal_noise, n = 99, stride = 3
+PASS: gsl_fft_complex_float_forward avoids unstrided data, n = 99, stride = 3
+PASS: gsl_fft_complex_float_inverse with signal_noise, n = 99, stride = 3
+PASS: gsl_fft_complex_float_inverse other data untouched, n = 99, stride = 3
+PASS: gsl_fft_complex_float_backward with signal_noise, n = 99, stride = 3
+PASS: gsl_fft_complex_float_backward avoids unstrided data, n = 99, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_pulse, n = 99, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_constant, n = 99, stride = 3
+PASS: gsl_fft_complex_float_forward with signal_exp, n = 99, stride = 3
+PASS: gsl_fft_real_wavetable_alloc, n = 99, stride = 3
+PASS: gsl_fft_real_workspace_alloc, n = 99
+PASS: gsl_fft_real with signal_real_noise, n = 99, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_alloc, n = 99, stride = 3
+PASS: gsl_fft_halfcomplex with data from signal_noise, n = 99, stride = 3
+PASS: gsl_fft_real_wavetable_float_alloc, n = 99, stride = 3
+PASS: gsl_fft_real_workspace_float_alloc, n = 99
+PASS: gsl_fft_real_float with signal_real_noise, n = 99, stride = 3
+PASS: gsl_fft_halfcomplex_wavetable_float_alloc, n = 99, stride = 3
+PASS: gsl_fft_halfcomplex_float with data from signal_noise, n = 99, stride = 3
+. 0 10004744 12
+. 3C 60 10 00 38 63 47 88 48 01 0C A9
+
+. 0 10004750 4
+. 4B FF DB B5
+
+. 0 10002304 56
+. 7C 08 02 A6 94 21 FF C0 38 60 00 00 92 C1 00 18 92 E1 00 1C 90 01 00 44 93 01 00 20 3B 01 00 08 93 21 00 24 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 48 00 45 09
+
+. 0 10006A98 20
+. 80 7E 80 00 38 A0 00 21 80 9E 80 14 38 C0 00 01 48 00 E8 B5
+
+. 0 1001535C 56
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 A1 00 14 90 01 00 24 80 1E FF F0 7F C0 F2 14 7C 60 1B 78 81 3E 80 00 81 29 00 00 2F 89 00 00 41 9E 00 24
+
+. 0 10015394 8
+. 7D 28 03 A6 4E 80 00 21
+
+. 0 10004788 4
+. 4E 80 00 20
+
+. 0 1001539C 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10006AAC 8
+. 38 60 00 00 4B FF FF 7C
+
+. 0 10006A2C 108
+. 80 01 00 94 81 C1 00 20 81 E1 00 24 7C 08 03 A6 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 1000233C 24
+. 30 03 FF FF 7F 80 19 10 3C 80 10 01 38 84 70 B8 7F 83 E3 78 48 01 32 99
+
+PASS: trap for n = 0 in gsl_fft_complex_wavetable_alloc
+. 0 10002354 8
+. 38 60 00 00 48 00 E5 ED
+
+. 0 10010BD0 20
+. 80 7E 80 00 38 A0 00 21 80 9E 82 40 38 C0 00 01 48 00 47 7D
+
+. 0 10010BE4 8
+. 38 60 00 00 4B FF FF 64
+
+. 0 1000235C 24
+. 3C 80 10 01 30 03 FF FF 7F A0 19 10 38 84 70 EC 7F A3 EB 78 48 01 32 79
+
+. 0 1001563C 44
+. D8 21 00 28 D8 41 00 30 D8 61 00 38 D8 81 00 40 D8 A1 00 48 D8 C1 00 50 D8 E1 00 58 D9 01 00 60 91 7D 00 00 7C 9D 23 78 40 9E 00 54
+
+. 0 FECAE1C 68
+. D8 21 00 28 D8 41 00 30 D8 61 00 38 D8 81 00 40 D8 A1 00 48 D8 C1 00 50 D8 E1 00 58 D9 01 00 60 38 C1 00 98 39 21 00 08 39 00 00 01 38 E0 00 00 99 01 00 70 98 E1 00 71 90 C1 00 74 91 21 00 78 4B FF 80 79
+
+PASS: trap for n = 0 in gsl_fft_real_wavetable_alloc
+. 0 10002374 8
+. 38 60 00 00 48 00 A1 C9
+
+. 0 1000C7B4 20
+. 80 7E 80 00 38 A0 00 21 80 9E 81 50 38 C0 00 01 48 00 8B 99
+
+. 0 1000C7C8 8
+. 38 60 00 00 4B FF FF 74
+
+. 0 1000237C 24
+. 3C 80 10 01 31 23 FF FF 7C 09 19 10 38 84 71 1C 7C 03 03 78 48 01 32 59
+
+PASS: trap for n = 0 in gsl_fft_halfcomplex_wavetable_alloc
+. 0 10002394 8
+. 38 60 00 00 48 00 47 C1
+
+. 0 10006BEC 12
+. 80 7E 80 00 80 9E 80 14 48 00 E7 69
+
+. 0 10006BF8 36
+. 38 00 00 00 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 1000239C 16
+. 3C 80 10 01 7F 83 E3 78 38 84 71 54 48 01 32 41
+
+PASS: trap for n = 0 in gsl_fft_complex_workspace_alloc
+. 0 100023AC 8
+. 38 60 00 00 48 00 E8 DD
+
+. 0 10010D20 12
+. 80 7E 80 00 80 9E 82 40 48 00 46 35
+
+. 0 10010D2C 36
+. 38 00 00 00 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 100023B4 16
+. 3C 80 10 01 38 84 71 88 7F A3 EB 78 48 01 32 29
+
+PASS: trap for n = 0 in gsl_fft_real_workspace_alloc
+. 0 100023C4 8
+. 38 60 00 0A 48 00 44 79
+
+. 0 100023CC 16
+. 3B A1 00 10 7C 7B 1B 78 38 60 00 0A 48 00 A1 69
+
+. 0 100023DC 16
+. 3B 80 00 03 7C 76 1B 78 38 60 00 0A 48 00 E5 5D
+
+. 0 100023EC 12
+. 7C 77 1B 78 38 60 00 0A 48 00 47 65
+
+. 0 100023F8 12
+. 7C 7A 1B 78 38 60 00 0A 48 00 E8 8D
+
+. 0 10002404 28
+. 7F 66 DB 78 7C 79 1B 78 7F 47 D3 78 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 6A A1
+
+. 0 10007C2C 20
+. 80 7E 80 00 38 A0 00 6B 80 9E 80 34 38 C0 00 01 48 00 D7 21
+
+. 0 10007C40 8
+. 38 60 00 01 4B FF F9 60
+
+. 0 10002420 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 71 B8 48 01 31 B9
+
+PASS: trap for n = 0 in gsl_fft_complex_forward
+. 0 10002434 24
+. 7E E6 BB 78 7F 27 CB 78 38 A0 00 00 7F 03 C3 78 38 80 00 01 48 00 E9 D1
+
+. 0 1001252C 20
+. 80 7E 80 00 38 A0 00 36 80 9E 82 4C 38 C0 00 01 48 00 2E 21
+
+. 0 10012540 8
+. 38 60 00 01 4B FF F2 E4
+
+. 0 1000244C 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 71 E4 48 01 31 8D
+
+PASS: trap for n = 0 in gsl_fft_real_transform
+. 0 10002460 24
+. 7E C6 B3 78 7F 27 CB 78 38 A0 00 00 7F 03 C3 78 38 80 00 01 48 00 A4 45
+
+. 0 1000D9D4 20
+. 80 7E 80 00 38 A0 00 58 80 9E 81 5C 38 C0 00 01 48 00 79 79
+
+. 0 1000D9E8 8
+. 38 60 00 01 4B FF F5 38
+
+. 0 10002478 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 72 10 48 01 31 61
+
+PASS: trap for n = 0 in gsl_fft_halfcomplex_transform
+. 0 1000248C 16
+. 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 6D 35
+
+. 0 10006834 8
+. 38 21 00 10 4E 80 00 20
+
+. 0 100091B0 20
+. 80 7E 80 7C 38 A0 00 5A 80 9E 80 80 38 C0 00 04 48 00 C1 9D
+
+. 0 100091C4 8
+. 38 60 00 04 4B FF FF 94
+
+. 0 1000249C 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 72 40 48 01 31 3D
+
+PASS: trap for n = 0 in gsl_fft_complex_radix2_forward
+. 0 100024B0 24
+. 7F 66 DB 78 7F 47 D3 78 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 69 D5
+
+. 0 100024C8 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 72 74 48 01 31 11
+
+PASS: trap for n = 0 in gsl_fft_complex_backward
+. 0 100024DC 16
+. 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 6D 09
+
+. 0 100024EC 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 72 A0 48 01 30 ED
+
+PASS: trap for n = 0 in gsl_fft_complex_radix2_backward
+. 0 10002500 24
+. 7F 66 DB 78 7F 47 D3 78 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 68 BD
+
+. 0 10002518 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 72 D4 48 01 30 C1
+
+PASS: trap for n = 0 in gsl_fft_complex_inverse
+. 0 1000252C 16
+. 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 6C E1
+
+. 0 1000253C 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 73 00 48 01 30 9D
+
+PASS: trap for n = 0 in gsl_fft_complex_radix2_inverse
+. 0 10002550 16
+. 38 A0 00 11 7F A3 EB 78 38 80 00 01 48 00 6C 71
+
+. 0 10002560 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 73 34 48 01 30 79
+
+PASS: trap for n != 2^k in gsl_fft_complex_radix2_forward
+. 0 10002574 16
+. 38 A0 00 11 7F A3 EB 78 38 80 00 01 48 00 6C 71
+
+. 0 10002584 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 73 68 48 01 30 55
+
+PASS: trap for n != 2^k in gsl_fft_complex_radix2_backward
+. 0 10002598 16
+. 38 A0 00 11 7F A3 EB 78 38 80 00 01 48 00 6C 75
+
+. 0 100025A8 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 73 A0 48 01 30 31
+
+PASS: trap for n != 2^k in gsl_fft_complex_radix2_inverse
+. 0 100025BC 28
+. 7F 66 DB 78 7F 47 D3 78 93 9B 00 00 38 A0 00 04 38 80 00 01 7F A3 EB 78 48 00 68 E9
+
+. 0 10008D94 20
+. 80 7E 80 38 38 A0 00 75 80 9E 80 34 38 C0 00 04 48 00 C5 B9
+
+. 0 10008DA8 8
+. 38 60 00 04 4B FF E7 F8
+
+. 0 100025D8 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 73 D4 48 01 30 01
+
+PASS: trap for n != nw in gsl_fft_complex_forward
+. 0 100025EC 28
+. 7F 66 DB 78 7F 47 D3 78 93 9B 00 00 38 A0 00 04 38 80 00 01 7F A3 EB 78 48 00 68 95
+
+. 0 10002608 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 74 00 48 01 2F D1
+
+PASS: trap for n != nw in gsl_fft_complex_backward
+. 0 1000261C 28
+. 7F 66 DB 78 7F 47 D3 78 93 9B 00 00 38 A0 00 04 7F A3 EB 78 38 80 00 01 48 00 67 9D
+
+. 0 10002638 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 74 30 48 01 2F A1
+
+PASS: trap for n != nw in gsl_fft_complex_inverse
+. 0 1000264C 28
+. 7E E6 BB 78 7F 27 CB 78 93 97 00 00 38 A0 00 04 38 80 00 01 7F 03 C3 78 48 00 E7 B5
+
+. 0 10012548 20
+. 80 7E 80 38 38 A0 00 40 80 9E 82 4C 38 C0 00 04 48 00 2E 05
+
+. 0 1001255C 8
+. 38 60 00 04 4B FF F2 C8
+
+. 0 10002668 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 74 5C 48 01 2F 71
+
+PASS: trap for n != nw in gsl_fft_real_transform
+. 0 1000267C 28
+. 7E C6 B3 78 7F 27 CB 78 38 A0 00 04 93 96 00 00 7F 03 C3 78 38 80 00 01 48 00 A2 25
+
+. 0 1000DF98 20
+. 80 7E 80 38 38 A0 00 62 80 9E 81 5C 38 C0 00 04 48 00 73 B5
+
+. 0 1000DFAC 8
+. 38 60 00 04 4B FF EF 74
+
+. 0 10002698 20
+. 3C 80 10 01 38 84 74 88 20 03 00 00 7C 60 19 14 48 01 2F 41
+
+PASS: trap for n != nw in gsl_fft_halfcomplex_transform
+. 0 100026AC 8
+. 7E C3 B3 78 48 00 A1 BD
+
+. 0 100026B4 8
+. 7E E3 BB 78 48 00 E6 CD
+
+. 0 100026BC 8
+. 7F 63 DB 78 48 00 45 91
+
+. 0 100026C4 8
+. 7F 23 CB 78 48 00 E7 05
+
+. 0 100026CC 8
+. 7F 43 D3 78 48 00 45 C9
+
+. 0 100026D4 48
+. 80 01 00 44 82 C1 00 18 82 E1 00 1C 7C 08 03 A6 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 38 21 00 40 4E 80 00 20
+
+. 0 10004754 4
+. 4B FF FB 01
+
+. 0 10004254 56
+. 7C 08 02 A6 94 21 FF C0 38 60 00 00 92 C1 00 18 92 E1 00 1C 90 01 00 44 93 01 00 20 3B 01 00 08 93 21 00 24 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 48 00 53 E1
+
+. 0 100098C8 20
+. 80 7E 80 00 38 A0 00 21 80 9E 80 14 38 C0 00 01 48 00 BA 85
+
+. 0 100098DC 8
+. 38 60 00 00 4B FF FF 7C
+
+. 0 1000985C 108
+. 80 01 00 94 81 C1 00 20 81 E1 00 24 7C 08 03 A6 82 01 00 28 82 21 00 2C 82 41 00 30 82 61 00 34 82 81 00 38 82 A1 00 3C 82 C1 00 40 82 E1 00 44 83 01 00 48 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 1000428C 24
+. 30 03 FF FF 7F 80 19 10 3C 80 10 01 38 84 7C 6C 7F 83 E3 78 48 01 13 49
+
+PASS: trap for n = 0 in gsl_fft_complex_wavetable_float_alloc
+. 0 100042A4 8
+. 38 60 00 00 48 00 E6 85
+
+. 0 10012BC0 20
+. 80 7E 80 00 38 A0 00 21 80 9E 82 40 38 C0 00 01 48 00 27 8D
+
+. 0 10012BD4 8
+. 38 60 00 00 4B FF FF 64
+
+. 0 100042AC 24
+. 3C 80 10 01 30 03 FF FF 7F A0 19 10 38 84 7C A4 7F A3 EB 78 48 01 13 29
+
+PASS: trap for n = 0 in gsl_fft_real_wavetable_float_alloc
+. 0 100042C4 8
+. 38 60 00 00 48 00 A4 39
+
+. 0 1000E97C 20
+. 80 7E 80 00 38 A0 00 21 80 9E 81 50 38 C0 00 01 48 00 69 D1
+
+. 0 1000E990 8
+. 38 60 00 00 4B FF FF 74
+
+. 0 100042CC 24
+. 3C 80 10 01 31 23 FF FF 7C 09 19 10 38 84 7C DC 7C 03 03 78 48 01 13 09
+
+PASS: trap for n = 0 in gsl_fft_halfcomplex_wavetable_float_alloc
+. 0 100042E4 8
+. 38 60 00 00 48 00 56 A1
+
+. 0 10009A1C 12
+. 80 7E 80 00 80 9E 80 14 48 00 B9 39
+
+. 0 10009A28 36
+. 38 00 00 00 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 100042EC 16
+. 3C 80 10 01 7F 83 E3 78 38 84 7D 18 48 01 12 F1
+
+PASS: trap for n = 0 in gsl_fft_complex_workspace_float_alloc
+. 0 100042FC 8
+. 38 60 00 00 48 00 E9 7D
+
+. 0 10012D10 12
+. 80 7E 80 00 80 9E 82 40 48 00 26 45
+
+. 0 10012D1C 36
+. 38 00 00 00 7C 03 03 78 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10004304 16
+. 3C 80 10 01 38 84 7D 50 7F A3 EB 78 48 01 12 D9
+
+PASS: trap for n = 0 in gsl_fft_real_workspace_float_alloc
+. 0 10004314 8
+. 38 60 00 0A 48 00 53 51
+
+. 0 1000431C 16
+. 3B A1 00 0C 7C 7B 1B 78 38 60 00 0A 48 00 A3 D9
+
+. 0 1000432C 16
+. 3B 80 00 03 7C 76 1B 78 38 60 00 0A 48 00 E5 F5
+
+. 0 1000433C 12
+. 7C 77 1B 78 38 60 00 0A 48 00 56 45
+
+. 0 10004348 12
+. 7C 7A 1B 78 38 60 00 0A 48 00 E9 2D
+
+. 0 10004354 28
+. 7F 66 DB 78 7C 79 1B 78 7F 47 D3 78 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 7A 11
+
+. 0 1000AA78 20
+. 80 7E 80 00 38 A0 00 6B 80 9E 80 34 38 C0 00 01 48 00 A8 D5
+
+. 0 1000AA8C 8
+. 38 60 00 01 4B FF F9 34
+
+. 0 10004370 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7D 88 48 01 12 69
+
+PASS: trap for n = 0 in gsl_fft_complex_float_forward
+. 0 10004384 24
+. 7E E6 BB 78 7F 27 CB 78 38 A0 00 00 7F 03 C3 78 38 80 00 01 48 00 EA 71
+
+. 0 10014590 20
+. 80 7E 80 00 38 A0 00 36 80 9E 82 4C 38 C0 00 01 48 00 0D BD
+
+. 0 100145A4 8
+. 38 60 00 01 4B FF F2 AC
+
+. 0 1000439C 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7D B8 48 01 12 3D
+
+PASS: trap for n = 0 in gsl_fft_real_float_transform
+. 0 100043B0 24
+. 7E C6 B3 78 7F 27 CB 78 38 A0 00 00 7F 03 C3 78 38 80 00 01 48 00 A6 BD
+
+. 0 1000FBC0 20
+. 80 7E 80 00 38 A0 00 58 80 9E 81 5C 38 C0 00 01 48 00 57 8D
+
+. 0 1000FBD4 8
+. 38 60 00 01 4B FF F5 18
+
+. 0 100043C8 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7D E8 48 01 12 11
+
+PASS: trap for n = 0 in gsl_fft_halfcomplex_float_transform
+. 0 100043DC 16
+. 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 7C AD
+
+. 0 1000C094 20
+. 7C 08 02 A6 94 21 FF F0 38 C0 FF FF 90 01 00 14 4B FF FD 01
+
+. 0 1000BDA4 124
+. 94 21 FF 90 7C 08 02 A6 42 9F 00 05 93 C1 00 40 7F C8 02 A6 2F 85 00 01 90 01 00 74 93 41 00 30 7C DA 33 78 80 1E FF F0 93 81 00 38 7C BC 2B 78 93 A1 00 3C 7F C0 F2 14 93 E1 00 44 7C 9D 23 78 7C 7F 1B 78 DB 61 00 48 DB 81 00 50 38 60 00 00 DB A1 00 58 DB C1 00 60 DB E1 00 68 92 81 00 18 92 A1 00 1C 92 C1 00 20 92 E1 00 24 93 01 00 28 93 21 00 2C 93 61 00 34 41 9E 02 08
+
+. 0 1000BE20 8
+. 7C A3 2B 78 4B FF A9 D5
+
+. 0 1000BE28 12
+. 2F 83 FF FF 7C 79 1B 78 41 9E 02 48
+
+. 0 1000C078 20
+. 80 7E 80 7C 38 A0 00 5A 80 9E 80 80 38 C0 00 04 48 00 92 D5
+
+. 0 1000C08C 8
+. 38 60 00 04 4B FF FF 94
+
+. 0 1000C024 84
+. 80 01 00 74 82 81 00 18 82 A1 00 1C 7C 08 03 A6 82 C1 00 20 82 E1 00 24 83 01 00 28 83 21 00 2C 83 41 00 30 83 61 00 34 83 81 00 38 83 A1 00 3C 83 C1 00 40 83 E1 00 44 CB 61 00 48 CB 81 00 50 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 1000C0A8 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 100043EC 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7E 20 48 01 11 ED
+
+PASS: trap for n = 0 in gsl_fft_complex_float_radix2_forward
+. 0 10004400 24
+. 7F 66 DB 78 7F 47 D3 78 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 79 45
+
+. 0 10004418 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7E 58 48 01 11 C1
+
+PASS: trap for n = 0 in gsl_fft_complex_float_backward
+. 0 1000442C 16
+. 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 7C 81
+
+. 0 1000C0B8 20
+. 7C 08 02 A6 94 21 FF F0 38 C0 00 01 90 01 00 14 4B FF FC DD
+
+. 0 1000C0CC 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 1000443C 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7E 8C 48 01 11 9D
+
+PASS: trap for n = 0 in gsl_fft_complex_float_radix2_backward
+. 0 10004450 24
+. 7F 66 DB 78 7F 47 D3 78 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 78 29
+
+. 0 10004468 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7E C4 48 01 11 71
+
+PASS: trap for n = 0 in gsl_fft_complex_float_inverse
+. 0 1000447C 16
+. 38 A0 00 00 7F A3 EB 78 38 80 00 01 48 00 7C 59
+
+. 0 1000C0E0 64
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 38 C0 00 01 7C 7F 1B 78 93 81 00 10 80 1E FF F0 7C 9C 23 78 93 A1 00 14 7C BD 2B 78 7F C0 F2 14 4B FF FC 89
+
+. 0 1000C120 8
+. 2C 03 00 00 40 82 00 68
+
+. 0 1000C18C 32
+. 80 01 00 24 83 81 00 10 83 A1 00 14 7C 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 1000448C 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7E F4 48 01 11 4D
+
+PASS: trap for n = 0 in gsl_fft_complex_float_radix2_inverse
+. 0 100044A0 16
+. 38 A0 00 11 7F A3 EB 78 38 80 00 01 48 00 7B E9
+
+. 0 100044B0 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7F 2C 48 01 11 29
+
+PASS: trap for n != 2^k in gsl_fft_complex_float_radix2_forward
+. 0 100044C4 16
+. 38 A0 00 11 7F A3 EB 78 38 80 00 01 48 00 7B E9
+
+. 0 100044D4 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7F 68 48 01 11 05
+
+PASS: trap for n != 2^k in gsl_fft_complex_float_radix2_backward
+. 0 100044E8 16
+. 38 A0 00 11 7F A3 EB 78 38 80 00 01 48 00 7B ED
+
+. 0 100044F8 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7F A4 48 01 10 E1
+
+PASS: trap for n != 2^k in gsl_fft_complex_float_radix2_inverse
+. 0 1000450C 28
+. 7F 66 DB 78 7F 47 D3 78 93 9B 00 00 38 A0 00 04 38 80 00 01 7F A3 EB 78 48 00 78 59
+
+. 0 1000BC50 20
+. 80 7E 80 38 38 A0 00 75 80 9E 80 34 38 C0 00 04 48 00 96 FD
+
+. 0 1000BC64 8
+. 38 60 00 04 4B FF E7 5C
+
+. 0 10004528 20
+. 3C 80 10 01 20 03 00 00 7C 60 19 14 38 84 7F E0 48 01 10 B1
+
+PASS: trap for n != nw in gsl_fft_complex_float_forward
+. 0 1000453C 28
+. 7F 66 DB 78 7F 47 D3 78 93 9B 00 00 38 A0 00 04 38 80 00 01 7F A3 EB 78 48 00 78 05
+
+. 0 10004558 20
+. 3C 80 10 02 20 03 00 00 7C 60 19 14 38 84 80 14 48 01 10 81
+
+PASS: trap for n != nw in gsl_fft_complex_float_backward
+. 0 1000456C 28
+. 7F 66 DB 78 7F 47 D3 78 93 9B 00 00 38 A0 00 04 7F A3 EB 78 38 80 00 01 48 00 77 09
+
+. 0 10004588 20
+. 3C 80 10 02 20 03 00 00 7C 60 19 14 38 84 80 48 48 01 10 51
+
+PASS: trap for n != nw in gsl_fft_complex_float_inverse
+. 0 1000459C 28
+. 7E E6 BB 78 7F 27 CB 78 93 97 00 00 38 A0 00 04 38 80 00 01 7F 03 C3 78 48 00 E8 55
+
+. 0 100145AC 20
+. 80 7E 80 38 38 A0 00 40 80 9E 82 4C 38 C0 00 04 48 00 0D A1
+
+. 0 100145C0 8
+. 38 60 00 04 4B FF F2 90
+
+. 0 100045B8 20
+. 3C 80 10 02 20 03 00 00 7C 60 19 14 38 84 80 7C 48 01 10 21
+
+PASS: trap for n != nw in gsl_fft_real_float_transform
+. 0 100045CC 28
+. 7E C6 B3 78 7F 27 CB 78 38 A0 00 04 93 96 00 00 7F 03 C3 78 38 80 00 01 48 00 A4 9D
+
+. 0 100101C4 20
+. 80 7E 80 38 38 A0 00 62 80 9E 81 5C 38 C0 00 04 48 00 51 89
+
+. 0 100101D8 8
+. 38 60 00 04 4B FF EF 14
+
+. 0 100045E8 20
+. 3C 80 10 02 38 84 80 B0 20 03 00 00 7C 60 19 14 48 01 0F F1
+
+PASS: trap for n != nw in gsl_fft_halfcomplex_float_transform
+. 0 100045FC 8
+. 7E C3 B3 78 48 00 A4 35
+
+. 0 10004604 8
+. 7E E3 BB 78 48 00 E7 6D
+
+. 0 1000460C 8
+. 7F 63 DB 78 48 00 54 71
+
+. 0 10004614 8
+. 7F 23 CB 78 48 00 E7 A5
+
+. 0 1000461C 8
+. 7F 43 D3 78 48 00 54 A9
+
+. 0 10004624 48
+. 80 01 00 44 82 C1 00 18 82 E1 00 1C 7C 08 03 A6 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 38 21 00 40 4E 80 00 20
+
+. 0 10004758 4
+. 48 01 1B 2D
+
+. 0 10016284 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 38 60 00 01 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 10 80 09 00 00 2F 80 00 00 40 9E 00 38
+
+. 0 100162B8 28
+. 81 3E 80 04 38 60 00 00 81 7E 80 00 80 A9 00 00 80 8B 00 00 7F 85 20 00 41 9E 00 1C
+
+. 0 100162EC 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 1000475C 4
+. 48 02 48 21
+
+. 0 10028F7C 8
+. 39 60 00 44 4B FF FF 4C
+
+. 0 FEB3B98 12
+. 94 21 FF E0 7C 08 02 A6 48 11 42 B1
+
+. 0 FEB3BA4 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 81 00 10 7C 7C 1B 78 93 A1 00 14 83 FE 1A FC 90 01 00 24 80 1F 00 00 2F 80 00 00 41 9E 00 CC
+
+. 0 FEB3BD0 16
+. 81 3F 00 00 80 69 00 04 2C 03 00 00 41 82 00 78
+
+. 0 FEB3BE0 8
+. 7D 2A 4B 78 48 00 00 18
+
+. 0 FEB3BFC 36
+. 80 AA 00 04 39 25 FF FF 55 24 20 36 91 2A 00 04 7D 64 52 14 80 0B 00 08 39 6B 00 08 2F 80 00 03 41 9E 00 54
+
+. 0 FEB3C20 12
+. 2C 80 00 02 2F 00 00 04 40 BD FF C0
+
+. 0 FEB3C2C 4
+. 40 9A FF C0
+
+. 0 FEB3C30 20
+. 81 0B 00 04 7F 84 E3 78 80 6B 00 08 7D 09 03 A6 4E 80 04 21
+
+. 0 1001654C 68
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 A1 00 14 90 A1 00 24 80 9E FF F0 7F C4 F2 14 81 7E 80 0C 80 7E 80 08 7C 0B 18 50 7C 09 16 70 2F 89 00 00 3B E9 FF FF 41 9E 00 28
+
+. 0 100165B4 4
+. 48 00 02 69
+
+. 0 1001681C 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FE 9F 05
+
+. 0 1000072C 36
+. 7C 08 02 A6 94 21 FF F0 93 C1 00 08 3F C0 10 03 93 E1 00 0C 90 01 00 14 88 1E 8F FC 2F 80 00 00 40 9E 00 34
+
+. 0 10000750 8
+. 3F E0 10 03 48 00 00 14
+
+. 0 10000768 16
+. 81 7F 8E 74 81 2B 00 00 2F 89 00 00 40 9E FF E4
+
+. 0 10000778 32
+. 38 00 00 01 98 1E 8F FC 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 1001682C 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 100165B8 28
+. 81 01 00 24 83 A1 00 14 83 C1 00 18 7D 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEB3C44 16
+. 81 5F 00 00 81 8A 00 04 2C 8C 00 00 40 86 FF AC
+
+. 0 2547BAE0 12
+. 94 21 FF A0 7C 08 02 A6 48 01 B5 19
+
+. 0 2547BAEC 132
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 C1 00 18 92 41 00 28 81 DE 04 F4 82 5E 04 C8 91 E1 00 1C 39 E0 00 0F 92 01 00 20 3A 00 00 00 92 21 00 24 3A 20 01 68 92 C1 00 38 3A C0 00 00 93 E1 00 5C 7C 3F 0B 78 92 61 00 2C 92 81 00 30 92 A1 00 34 92 E1 00 3C 93 01 00 40 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 90 01 00 64 91 81 00 14 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+
+. 0 2547BB70 20
+. 7D 31 92 14 80 69 00 04 54 6A 10 3A 7F 8A 80 40 40 9D 00 2C
+
+. 0 2547BBAC 16
+. 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+
+. 0 2547BBD8 8
+. 7E F3 BB 79 41 82 01 10
+
+. 0 2547BCEC 24
+. 80 B2 03 FC 3B 40 00 00 38 72 01 80 7E 1A 98 40 7C A9 03 A6 4E 80 04 21
+
+. 0 2547BD04 4
+. 40 90 00 68
+
+. 0 2547BD6C 12
+. 35 EF FF FF 3A 31 FF E8 40 80 FD EC
+
+. 0 2547BB60 16
+. 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+
+. 0 2547BB84 8
+. 2C 10 00 00 40 82 01 10
+
+. 0 2547BB8C 48
+. 39 0A 00 1E 81 61 00 00 55 07 00 36 7D 50 53 78 7C C7 00 D0 7D 61 31 6E 38 A1 00 17 54 B6 00 36 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+
+. 0 2547BBBC 16
+. 83 4B 00 14 56 EA 10 3A 7F 1A 58 00 41 9A 01 08
+
+. 0 2547BCD0 28
+. 83 6B 01 78 3A F7 00 01 7D 6A B1 2E 39 5B 00 01 91 4B 01 78 81 6B 00 0C 4B FF FE E8
+
+. 0 2547BBD0 8
+. 2E 0B 00 00 40 92 FF E8
+
+. 0 2547BBE0 12
+. 2E 0F 00 00 7F 71 90 2E 40 92 00 08
+
+. 0 2547BBEC 12
+. 83 7B 00 0C 2F 9B 00 00 41 9E 00 F8
+
+. 0 2547BBF8 12
+. 83 9B 00 14 7C 9C D8 00 40 86 FF EC
+
+. 0 2547BC04 12
+. 7F 20 00 26 57 39 9F FE 48 00 00 08
+
+. 0 2547BC14 16
+. 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+
+. 0 2547BC24 12
+. 3B 59 00 01 7C 1A B8 40 40 A0 FF C0
+
+. 0 2547BC30 32
+. 57 20 10 3A 7C 14 03 78 7E A0 B2 14 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+
+. 0 2547BC50 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 20
+
+. 0 2547BC5C 16
+. 7C 99 D0 50 54 85 10 3A 7F 09 D8 00 41 9A 02 40
+
+. 0 2547BC6C 12
+. 85 2B 00 04 2C 09 00 00 40 82 FF F0
+
+. 0 2547BC64 8
+. 7F 09 D8 00 41 9A 02 40
+
+. 0 2547BC78 16
+. 7D 38 B0 2E 81 49 01 F4 2F 8A 00 00 40 9E 02 58
+
+. 0 2547BC88 12
+. 3B 5A 00 01 7F 1A B8 40 41 98 FF AC
+
+. 0 2547BC3C 20
+. 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+
+. 0 2547BC94 4
+. 4B FF FF 58
+
+. 0 2547BC10 20
+. 3B 39 00 01 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+
+. 0 2547BD08 12
+. 81 7E 03 4C 83 2B 00 00 48 00 00 30
+
+. 0 2547BD40 20
+. 57 48 10 3A 7F 88 B0 2E 81 7C 01 80 75 60 10 00 40 A2 FF C4
+
+. 0 2547BD14 24
+. 55 75 01 04 80 9C 00 04 92 BC 01 80 8A 84 00 00 2F 14 00 00 40 9A 00 C0
+
+. 0 2547BD2C 8
+. 75 69 C0 00 40 82 00 B8
+
+. 0 2547BD34 12
+. 3B 5A 00 01 7F 9A 98 40 40 9C 00 30
+
+. 0 2547BDE8 12
+. 81 3C 00 88 2F 89 00 00 40 9E 00 10
+
+. 0 2547BDF4 12
+. 82 FC 00 54 2C 97 00 00 41 86 FF 38
+
+. 0 2547BE00 12
+. 83 0E 00 00 73 0B 00 02 40 82 00 7C
+
+. 0 2547BE0C 8
+. 2F 09 00 00 41 9A 00 44
+
+. 0 2547BE54 12
+. 81 3C 00 54 2E 09 00 00 41 92 FE F8
+
+. 0 2547BE60 20
+. 80 A9 00 04 83 BC 00 00 7C FD 2A 14 7C E9 03 A6 4E 80 04 21
+
+. 0 FFDEF80 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF F9 79
+
+. 0 FFDE904 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+
+. 0 FFDE938 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+
+. 0 FFDE944 12
+. 81 3E 80 08 80 69 00 00 48 01 08 31
+
+. 0 FFEF17C 8
+. 39 60 00 0C 4B FF FF BC
+
+. 0 FFEF13C 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 80 D0 3D 8C 25 47 4E 80 04 20
+
+. 0 FEB3F44 12
+. 94 21 FF D0 7C 08 02 A6 48 11 3F 05
+
+. 0 FEB3F50 72
+. 93 C1 00 28 7F C8 02 A6 93 81 00 20 90 01 00 34 7D 80 00 26 93 21 00 14 2E 03 00 00 81 3E 1A FC 7C 79 1B 78 93 01 00 10 83 89 00 00 93 41 00 18 2F 9C 00 00 93 61 00 1C 93 A1 00 24 93 E1 00 2C 91 81 00 0C 41 9E 00 88
+
+. 0 FEB3F98 28
+. 80 BC 00 04 3B BC 00 08 54 A4 20 36 7C 64 E2 14 3B E3 FF F8 7C 1D F8 40 41 81 00 60
+
+. 0 FEB4010 12
+. 83 9C 00 00 2F 9C 00 00 40 9E FF 80
+
+. 0 FEB401C 4
+. 40 92 00 98
+
+. 0 FEB40B4 60
+. 81 41 00 34 7F 23 CB 78 81 01 00 0C 83 C1 00 28 7D 48 03 A6 83 E1 00 2C 7D 00 81 20 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 38 21 00 30 48 0A BD 24
+
+. 0 FF5FE10 12
+. 94 21 FF D0 7C 08 02 A6 48 06 80 39
+
+. 0 FF5FE1C 64
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 A1 00 24 83 1E 1C D4 93 41 00 18 7C 7A 1B 78 83 B8 00 00 93 61 00 1C 3B 60 00 00 93 E1 00 2C 7C 3F 0B 78 93 21 00 14 93 81 00 20 48 00 00 18
+
+. 0 FF5FE70 8
+. 2F 1D 00 00 40 9A FF E8
+
+. 0 FF5FE5C 12
+. 80 7D 00 10 7F 83 D0 00 41 9E 00 48
+
+. 0 FF5FE68 16
+. 7F BB EB 78 83 BD 00 00 2F 1D 00 00 40 9A FF E8
+
+. 0 FF5FE78 52
+. 80 61 00 00 83 83 00 04 83 03 FF E0 7F 88 03 A6 83 23 FF E4 83 43 FF E8 83 63 FF EC 83 83 FF F0 83 A3 FF F4 83 C3 FF F8 83 E3 FF FC 7C 61 1B 78 4E 80 00 20
+
+. 0 FFDE950 8
+. 83 FE 80 0C 48 00 00 14
+
+. 0 FFDE968 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+
+. 0 FFDE978 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FFDEF90 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BE74 16
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 4B FF FE E0
+
+. 0 2547BD60 12
+. 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+
+. 0 FE2A838 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FC F3 B9
+
+. 0 FDF9BFC 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+
+. 0 FDF9C30 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+
+. 0 FDF9C3C 12
+. 81 3E 80 08 80 69 00 00 48 07 B7 C1
+
+. 0 FE75404 8
+. 39 60 00 44 4B FF FF 4C
+
+. 0 FE75354 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 83 B8 3D 8C 25 47 4E 80 04 20
+
+. 0 FDF9C48 8
+. 83 FE 80 0C 48 00 00 14
+
+. 0 FDF9C60 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+
+. 0 FDF9C70 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FE2A848 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BE14 36
+. 80 9C 00 90 80 69 00 04 83 64 00 04 80 1C 00 00 57 6C F0 BE 2F 8C 00 00 7F 60 1A 14 3B AC FF FF 41 9E 00 20
+
+. 0 2547BE38 16
+. 57 A6 10 3A 7D 26 D8 2E 7D 29 03 A6 4E 80 04 21
+
+. 0 FE9B5BC 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 8D
+
+. 0 FE9B5C8 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 14 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+
+. 0 FE9B600 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BE48 12
+. 2F 9D 00 00 3B BD FF FF 40 9E FF E8
+
+. 0 2547BD54 24
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+
+. 0 2547BD78 12
+. 81 EE 00 00 71 E9 00 80 40 82 01 AC
+
+. 0 2547BD84 100
+. 81 41 00 00 81 CA 00 04 81 8A FF B4 7D C8 03 A6 81 EA FF BC 81 CA FF B8 7D 80 81 20 82 0A FF C0 82 2A FF C4 82 4A FF C8 82 6A FF CC 82 8A FF D0 82 AA FF D4 82 CA FF D8 82 EA FF DC 83 0A FF E0 83 2A FF E4 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+
+. 0 FEB3C54 20
+. 80 7F 00 00 80 03 00 00 2F 00 00 00 90 1F 00 00 41 9A FF 64
+
+. 0 FEB3BC8 8
+. 2F 80 00 00 41 9E 00 CC
+
+. 0 FEB3C98 16
+. 83 FE 1B BC 83 BE 1C 38 7F 9F E8 40 40 9C 00 1C
+
+. 0 FEB3CA8 16
+. 81 5F 00 00 3B FF 00 04 7D 49 03 A6 4E 80 04 21
+
+. 0 FEECB0C 12
+. 94 21 FF E0 7C 08 02 A6 48 0D B3 3D
+
+. 0 FEECB18 32
+. 93 C1 00 18 7F C8 02 A6 38 60 00 00 93 81 00 10 93 E1 00 1C 93 A1 00 14 90 01 00 24 4B FF F9 8D
+
+. 0 FEEC4C0 12
+. 94 21 FF B0 7C 88 02 A6 48 0D B9 89
+
+. 0 FEEC4CC 88
+. 93 C1 00 48 7F C8 02 A6 92 C1 00 28 90 81 00 54 7D 80 00 26 92 E1 00 2C 3A E0 00 00 82 DE 1B 48 93 E1 00 4C 7C 7F 1B 78 81 36 00 B8 93 01 00 30 31 69 FF FF 7C 0B 49 10 93 21 00 34 2D 80 00 00 93 41 00 38 93 61 00 3C 93 81 00 40 93 A1 00 44 91 81 00 24 40 8E 02 48
+
+. 0 FEEC524 20
+. 80 7E 05 E4 92 E1 00 14 90 61 00 10 2E 1F 00 00 41 92 00 50
+
+. 0 FEEC584 24
+. 83 1E 1B EC 83 3E 05 EC 83 F8 00 00 83 99 00 00 2C 9F 00 00 41 86 01 28
+
+. 0 FEEC59C 16
+. 83 5E 05 E8 3B 60 00 00 93 FA 00 00 41 92 00 5C
+
+. 0 FEEC604 12
+. 80 7F 00 60 2F 03 00 00 40 99 01 44
+
+. 0 FEEC750 16
+. 81 7F 00 14 81 3F 00 10 7C 8B 48 40 40 A5 FE B4
+
+. 0 FEEC610 12
+. 89 5F 00 46 2C 0A 00 00 40 82 00 44
+
+. 0 FEEC61C 4
+. 40 99 00 40
+
+. 0 FEEC65C 4
+. 41 92 00 44
+
+. 0 FEEC6A0 16
+. 80 19 00 00 93 7A 00 00 7F 9C 00 00 41 9E 00 9C
+
+. 0 FEEC748 8
+. 83 FF 00 34 4B FF FF 6C
+
+. 0 FEEC6B8 8
+. 2C 1F 00 00 40 82 FE E8
+
+. 0 FEEC5A4 8
+. 93 FA 00 00 41 92 00 5C
+
+. 0 FEEC6C0 4
+. 41 92 00 38
+
+. 0 FEEC6F8 4
+. 40 8E 00 88
+
+. 0 FEEC6FC 68
+. 7E E3 BB 78 81 81 00 24 82 E1 00 54 82 C1 00 28 7D 81 81 20 7E E8 03 A6 83 01 00 30 82 E1 00 2C 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+
+. 0 FEECB38 20
+. 81 3E 1B EC 7C 7C 1B 78 83 E9 00 00 2F 9F 00 00 41 9E 00 80
+
+. 0 FEECB4C 8
+. 3B A0 FF FF 48 00 00 14
+
+. 0 FEECB64 20
+. 80 1F 00 00 70 09 00 02 54 03 EF FE 2F 83 00 00 40 A2 FF E0
+
+. 0 FEECB54 16
+. 93 BF 00 60 83 FF 00 34 2F 1F 00 00 41 9A 00 68
+
+. 0 FEECB78 8
+. 70 09 10 00 41 9E 00 08
+
+. 0 FEECB84 24
+. 80 DF 00 60 7F E3 FB 78 38 80 00 00 38 A0 00 00 2C 86 00 00 41 86 FF BC
+
+. 0 FEECB9C 28
+. 89 9F 00 46 7D 8A 07 74 7D 0A FA 14 80 E8 00 98 81 67 00 2C 7D 69 03 A6 4E 80 04 21
+
+. 0 FEE8C4C 24
+. 7C 08 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 01 00 14 48 00 33 3D
+
+. 0 FEEBF9C 68
+. 7D 48 02 A6 94 21 FF E0 93 81 00 10 7C BC 2B 78 93 A1 00 14 7C 9D 23 78 91 41 00 24 93 E1 00 1C 7C 7F 1B 78 93 C1 00 18 89 03 00 46 7D 07 07 74 7C C7 1A 14 80 86 00 98 81 64 00 30 7D 69 03 A6 4E 80 04 21
+
+. 0 FEEBFE0 32
+. 20 1D 00 00 7D 20 E9 14 2F 83 FF FF 21 7C 00 00 7C 0B E1 14 7D 2B 03 79 38 60 00 00 41 9E 00 54
+
+. 0 FEEC000 20
+. 7F A4 EB 78 7F E3 FB 78 38 BF 00 48 38 C0 00 00 40 82 00 60
+
+. 0 FEEC070 16
+. 81 9F 00 00 38 9F 00 47 61 80 00 02 4B FF FF AC
+
+. 0 FEEC028 8
+. 90 1F 00 00 4B FF FB 51
+
+. 0 FEEBBB8 16
+. 80 03 00 00 7D 23 4B 78 70 0B 00 01 40 A2 00 1C
+
+. 0 FEEBBC8 20
+. 80 DF 00 20 7C A9 30 50 38 85 0F FF 54 84 00 26 48 06 26 A5
+
+. 0 FF4E27C 8
+. 38 00 00 5B 44 00 00 02
+
+. 0 FF4E284 4
+. 4C A3 00 20
+
+. 0 FEEBC20 40
+. 80 61 00 24 90 1F 00 00 83 61 00 0C 7C 68 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEC030 64
+. 38 A0 00 00 7F E3 FB 78 90 BF 00 0C 90 BF 00 18 90 BF 00 14 90 BF 00 10 90 BF 00 08 90 BF 00 04 83 81 00 24 83 A1 00 14 7F 88 03 A6 83 C1 00 18 83 81 00 10 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEE8C64 12
+. 2F 83 00 00 38 60 00 00 41 9E 00 24
+
+. 0 FEE8C70 52
+. 80 9F 00 1C 7F E3 FB 78 90 9F 00 08 90 9F 00 18 90 9F 00 14 90 9F 00 10 90 9F 00 0C 90 9F 00 04 80 A1 00 14 83 E1 00 08 38 21 00 10 7C A8 03 A6 4E 80 00 20
+
+. 0 FEECBB8 16
+. 93 BF 00 60 83 FF 00 34 2F 1F 00 00 40 9A FF A0
+
+. 0 FEECB80 4
+. 41 A2 FF D4
+
+. 0 FEECBC8 36
+. 80 81 00 24 7F 83 E3 78 83 A1 00 14 83 81 00 10 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEB3CB8 8
+. 7F 9F E8 40 41 9C FF EC
+
+. 0 FEB3CC0 8
+. 7F 83 E3 78 48 06 21 41
+
+. 0 FF15E04 8
+. 94 21 FF E0 48 0B 20 49
+
+. 0 FF15E0C 40
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 93 E1 00 1C 7C 7F 1B 78 81 3E 1C 4C 7F A9 12 14 38 00 00 EA 7F E3 FB 78 44 00 00 02
+
+==3281== 
diff --git a/VEX/orig_ppc32/morefp.orig b/VEX/orig_ppc32/morefp.orig
new file mode 100644
index 0000000..818586b
--- /dev/null
+++ b/VEX/orig_ppc32/morefp.orig
@@ -0,0 +1,6944 @@
+==3289== Nulgrind, a binary JIT-compiler for Linux.
+==3289== Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.
+==3289== Using valgrind-2.2.0-ppc, a program supervision framework for Linux.
+==3289== Copyright (C) 2000-2004, and GNU GPL'd, by Julian Seward et al.
+==3289== For more details, rerun with: -v
+==3289== 
+. 0 254804D4 20
+. 7C 23 0B 78 38 80 00 00 38 21 FF F0 90 81 00 00 4B FF 15 81
+
+. 0 25471A64 80
+. 7C 08 02 A6 39 60 00 4B 94 21 FD 50 7D 69 03 A6 92 E1 02 8C 90 01 02 B4 7C 77 1B 78 93 01 02 90 38 00 00 00 93 21 02 94 39 21 00 30 93 41 02 98 93 61 02 9C 93 81 02 A0 93 A1 02 A4 93 C1 02 A8 93 E1 02 AC 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 25471AA8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 25471AB4 4
+. 48 00 00 09
+
+. 0 25471ABC 12
+. 7C E8 02 A6 3B 01 00 10 48 02 55 3D
+
+. 0 25497000 4
+. 4E 80 00 21
+
+. 0 25471AC8 64
+. 7D 48 02 A6 81 87 00 00 81 0A 00 00 55 86 30 2E 7C C4 36 70 7C A8 38 50 39 01 00 30 7C E5 22 14 90 E1 00 10 80 6A 00 00 7C 07 18 2E 7D 27 1A 14 91 21 00 18 7D 2A 4B 78 2F 80 00 00 41 9E 00 68
+
+. 0 25471B08 64
+. 3F E0 6F FF 3F 60 70 00 3F 40 6F FF 3F 20 6F FF 3F A0 6F FF 3F 80 6F FF 7C 0B 03 78 63 E6 FF FF 63 65 00 21 63 44 FD FF 63 23 FE 34 63 BD FE FF 63 9C FF 40 2C 0B 00 21 55 60 10 3A 40 81 00 18
+
+. 0 25471B5C 16
+. 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+
+. 0 25471B3C 12
+. 2C 0B 00 21 55 60 10 3A 40 81 00 18
+
+. 0 25471B48 12
+. 7C 0B 30 50 28 80 00 0F 41 85 02 D8
+
+. 0 25471B54 24
+. 7C 0B 28 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+
+. 0 25471E28 24
+. 55 79 08 3C 7F 2C 0E 70 7D 89 60 F8 20 0C 00 31 2B 09 00 02 40 B9 FD 1C
+
+. 0 25471E40 12
+. 7F 4B 20 50 2B 9A 00 0B 41 9D 00 10
+
+. 0 25471E4C 12
+. 7C 0B 18 50 54 00 10 3A 4B FF FD 08
+
+. 0 25471B6C 12
+. 81 78 00 00 2F 0B 00 00 41 9A 00 B0
+
+. 0 25471B78 12
+. 81 28 00 10 2F 89 00 00 41 9E 00 10
+
+. 0 25471B84 24
+. 81 49 00 04 7C 6A 5A 14 90 69 00 04 81 28 00 0C 2C 09 00 00 41 82 00 10
+
+. 0 25471B9C 24
+. 80 A9 00 04 7C 85 5A 14 90 89 00 04 81 28 00 14 2C 89 00 00 41 86 00 10
+
+. 0 25471BB4 24
+. 80 E9 00 04 7C C7 5A 14 90 C9 00 04 81 28 00 18 2F 09 00 00 41 9A 00 10
+
+. 0 25471BCC 24
+. 83 A9 00 04 7F 9D 5A 14 93 89 00 04 81 28 00 1C 2F 89 00 00 41 9E 00 10
+
+. 0 25471BE4 24
+. 80 09 00 04 7F E0 5A 14 93 E9 00 04 81 28 00 5C 2C 09 00 00 41 82 00 10
+
+. 0 25471BFC 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 C4 2C 89 00 00 41 86 00 10
+
+. 0 25471C14 32
+. 83 49 00 04 7D 1A 5A 14 91 09 00 04 80 E1 00 10 2F 07 00 00 39 60 FF FF 91 61 02 3C 40 9A 00 10
+
+. 0 25471C40 20
+. 7F 03 C3 78 38 80 00 00 38 A0 00 00 3B 60 00 00 48 00 EB E1
+
+. 0 25480830 16
+. 7C C8 02 A6 94 21 FF D0 7D 80 00 26 48 01 67 C5
+
+. 0 25480840 68
+. 93 21 00 14 7C 99 23 78 90 C1 00 34 80 03 00 7C 93 A1 00 24 7C 7D 1B 78 2F 80 00 00 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 93 41 00 18 93 61 00 1C 93 81 00 20 93 E1 00 2C 91 81 00 08 41 9E 02 94
+
+. 0 25480884 56
+. 82 E3 00 28 3D 40 AA AA 61 48 AA AB 80 83 00 2C 80 F7 00 04 83 E4 00 04 7D 67 40 16 55 7B E8 FE 28 1B 20 00 57 63 08 3C 38 03 00 12 7D 20 1A 14 3A E9 C0 00 40 81 02 9C
+
+. 0 25480B54 8
+. 7C 17 03 78 4B FF FD 64
+
+. 0 254808BC 72
+. 56 E8 10 3A 2E 19 00 00 7F 48 FA 14 3C 60 7D 69 3C FA 00 01 57 4B 04 3E 38 87 80 00 3C C0 4E 80 54 89 84 3E 65 60 81 6B 65 2C 3D 6B 60 7C 03 A6 60 D8 04 20 91 9F 00 00 90 1F 00 04 93 9F 00 08 93 1F 00 0C 41 92 01 78
+
+. 0 25480A78 32
+. 81 7E 04 F0 31 59 FF FF 7C AA C9 10 31 4B FF FF 7C 8A 59 10 7C AA 20 39 39 40 00 04 41 82 00 10
+
+. 0 25480AA4 4
+. 41 92 00 B8
+
+. 0 25480B5C 8
+. 39 60 00 06 4B FF FF 4C
+
+. 0 25480AAC 8
+. 39 20 00 00 48 00 00 14
+
+. 0 25480AC4 8
+. 7E 09 58 40 41 90 FF EC
+
+. 0 25480AB4 24
+. 55 3A 10 3A 7E FA FA 14 7C 00 B8 6C 7D 29 52 14 7E 09 58 40 41 90 FF EC
+
+. 0 25480ACC 28
+. 55 68 10 3A 7C E8 FA 14 39 27 FF FC 7C 00 48 6C 7C 00 04 AC 39 20 00 00 48 00 00 14
+
+. 0 25480AF8 8
+. 7F 89 58 40 41 9C FF EC
+
+. 0 25480AE8 12
+. 55 3B 10 3A 7C DB FA 14 7C 00 37 AC
+
+. 0 25480AF4 12
+. 7D 29 52 14 7F 89 58 40 41 9C FF EC
+
+. 0 25480B00 12
+. 7C 68 FA 14 3B E3 FF FC 7C 00 FF AC
+
+. 0 25480B0C 72
+. 7C 00 04 AC 4C 00 01 2C 7F 23 CB 78 81 01 00 08 83 21 00 34 82 E1 00 0C 7D 00 81 20 7F 28 03 A6 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25471C54 44
+. 38 E0 00 00 81 61 00 4C 39 40 00 01 91 41 02 64 2C 0B 00 00 93 61 02 60 93 61 02 70 93 61 02 58 93 61 02 6C 90 E1 02 54 41 82 00 18
+
+. 0 25471C80 28
+. 80 AB 00 04 80 81 00 50 90 A1 02 50 80 E4 00 04 90 E1 02 54 2C 83 00 00 41 86 00 10
+
+. 0 25471CA8 48
+. 3B 61 00 08 3B 20 00 02 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+
+. 0 25471CD8 32
+. 81 2A 00 04 3C A0 AA AA 60 A4 AA AB 7D 4B 20 16 7D 0B 43 78 55 40 E8 FE 7C 00 48 40 40 81 00 08
+
+. 0 25471CF8 20
+. 7D 20 4B 78 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+
+. 0 25471D0C 28
+. 81 8B 00 08 80 CB 00 00 39 6B 00 0C 7C EC EA 14 7F 8B F8 40 7C E6 E9 2E 41 9C FF E8
+
+. 0 25471D28 8
+. 7C 9F E0 40 40 84 00 8C
+
+. 0 25471D30 32
+. 81 1F 00 04 81 7F 00 00 55 0A 06 3E 55 00 E1 36 2F 0A 00 16 7C C0 D2 14 7D 0B EA 14 41 9A 00 C4
+
+. 0 25471D50 8
+. 2F 8A 00 00 41 9E 00 58
+
+. 0 25471D58 12
+. 89 66 00 0C 55 69 E1 3F 41 82 01 4C
+
+. 0 25471D64 16
+. A0 06 00 0E 2F 00 00 00 39 20 00 00 41 9A 00 08
+
+. 0 25471D74 28
+. 81 21 00 10 80 A6 00 04 7D 29 2A 14 2F 8A 00 01 80 7F 00 08 7D 29 1A 14 41 9E 00 78
+
+. 0 25471D90 8
+. 2C 0A 00 14 41 82 00 70
+
+. 0 25471E04 12
+. 3B FF 00 0C 91 28 00 00 4B FF FF A4
+
+. 0 25471DB0 8
+. 7C 9F E0 40 41 84 FF 7C
+
+. 0 25471D98 20
+. 7F 03 C3 78 7F E4 FB 78 38 A0 00 00 7C C7 33 78 48 00 EF 01
+
+. 0 25480CA8 16
+. 2B 8A 00 4D 7C 08 02 A6 94 21 FF D0 48 01 63 4D
+
+. 0 25480CB8 68
+. 93 41 00 18 7D 3A 4B 78 93 61 00 1C 7C DB 33 78 93 81 00 20 7C 9C 23 78 93 A1 00 24 7C BD 2B 78 93 C1 00 28 7F C8 02 A6 93 E1 00 2C 7D 5F 53 78 90 01 00 34 90 61 00 08 90 E1 00 10 91 01 00 0C 41 9D 01 40
+
+. 0 25480CFC 24
+. 81 7E 04 50 55 44 10 3A 7C 64 58 2E 7D 23 5A 14 7D 29 03 A6 4E 80 04 20
+
+. 0 25480FD8 24
+. 80 A1 00 0C 7F A5 D0 50 57 BF 30 32 7F E9 36 70 7F 89 E8 00 41 9E 02 54
+
+. 0 25481240 16
+. 55 3A 01 BA 67 40 48 00 90 05 00 00 4B FF FB B4
+
+. 0 25480E00 16
+. 7C 00 28 6C 7C 00 04 AC 83 81 00 0C 7C 00 E7 AC
+
+. 0 25480E10 40
+. 80 61 00 34 83 41 00 18 83 61 00 1C 7C 68 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25471DAC 12
+. 3B FF 00 0C 7C 9F E0 40 41 84 FF 7C
+
+. 0 25471DB8 12
+. 37 39 FF FF 3B 7B 00 0C 40 80 FE F0
+
+. 0 25471CB0 40
+. 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+
+. 0 25471CFC 16
+. 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+
+. 0 25471DC4 12
+. 7E E3 BB 78 7F 04 C3 78 4B FF FB BD
+
+. 0 25471988 12
+. 94 21 FF E0 7D 28 02 A6 48 02 56 71
+
+. 0 25471994 68
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 91 21 00 24 38 A0 01 2C 80 04 00 00 83 BE 04 C8 81 84 00 08 38 84 00 20 90 1D 01 B8 91 9D 01 C0 93 61 00 0C 7C 7B 1B 78 93 81 00 10 38 7D 01 D8 3B 9D 01 B8 48 01 22 0D
+
+. 0 25483BE0 52
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 7B 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C 7F 1B 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+. 0 25483C14 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+. 0 25483C40 8
+. 73 A0 00 03 40 82 00 6C
+
+. 0 25483C48 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 00 71
+
+. 0 25483CC4 16
+. 54 A0 07 7E 7D 88 02 A6 2B 80 00 07 48 01 33 31
+
+. 0 25483CD4 20
+. 94 21 FF F0 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 41 9D 00 28
+
+. 0 25483CE8 24
+. 81 7E 04 90 54 00 10 3A 7C CB 00 2E 7D 26 5A 14 7D 29 03 A6 4E 80 04 20
+
+. 0 25483D94 20
+. 81 24 00 00 38 63 FF E8 38 84 FF EC 38 A5 00 05 4B FF FF 98
+
+. 0 25483D3C 32
+. 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25483D0C 80
+. 80 04 00 00 91 23 00 00 81 24 00 04 90 03 00 04 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25483D5C 16
+. 83 C1 00 08 38 21 00 10 91 23 00 00 4E 80 00 20
+
+. 0 25483C58 24
+. 57 86 00 3A 57 85 07 BE 7F FF 32 14 7F BD 32 14 2C 85 00 00 41 86 00 1C
+
+. 0 25483C88 40
+. 81 01 00 24 7F 63 DB 78 83 81 00 10 83 61 00 0C 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 254719D8 8
+. 7F 83 E3 78 48 00 6B 91
+
+. 0 2547856C 16
+. 81 23 00 30 94 21 FF F0 2F 89 00 00 41 9E 00 24
+
+. 0 2547857C 40
+. 81 69 00 04 81 4B 00 00 38 8B 00 08 90 83 01 70 55 49 10 3A 91 43 01 6C 7C 04 4A 14 90 03 01 74 38 21 00 10 4E 80 00 20
+
+. 0 254719E0 64
+. 80 FE 04 FC 80 DE 05 00 39 40 FF FF 80 BE 04 A4 39 00 00 01 81 7E 04 C0 7F 63 DB 78 80 9E 00 24 91 5D 03 E4 93 9D 01 CC 91 1D 03 30 90 FD 03 58 90 DD 03 5C 90 BD 03 60 90 2B 00 00 48 00 D6 29
+
+. 0 2547F044 16
+. 7D 88 02 A6 94 21 FD E0 38 E3 00 04 48 01 7F B1
+
+. 0 2547F054 136
+. 93 C1 02 18 7F C8 02 A6 91 81 02 24 81 03 00 00 80 BE 04 C0 55 06 10 3A 81 3E 04 D4 7D 46 3A 14 90 65 00 00 80 0A 00 04 39 4A 00 04 80 7E 04 D0 2F 80 00 00 81 7E 04 B8 92 A1 01 F4 3A A0 00 00 92 C1 01 F8 3A C0 00 00 92 E1 01 FC 7C 97 23 78 93 01 02 00 3B 00 00 00 93 21 02 04 3B 20 00 00 93 E1 02 1C 3B E0 00 00 93 41 02 08 93 61 02 0C 93 81 02 10 93 A1 02 14 91 03 00 00 90 E9 00 00 91 4B 00 00 41 9E 00 10
+
+. 0 2547F0DC 12
+. 84 8A 00 04 2F 84 00 00 40 9E FF F8
+
+. 0 2547F0E8 24
+. 3B 6A 00 13 38 0A 00 04 57 6B 00 36 83 4B 00 00 2F 9A 00 10 41 9D 00 08
+
+. 0 2547F104 44
+. 7C 08 03 78 83 5E 04 F4 81 68 00 00 3B 80 00 00 81 3E 03 EC 2C 0B 00 00 83 BE 04 E0 90 09 00 00 93 A1 01 E0 93 9A 00 0C 41 82 00 4C
+
+. 0 2547F130 16
+. 7D 6A 5B 78 38 0A FF FD 28 80 00 14 41 85 00 24
+
+. 0 2547F140 24
+. 81 7E 03 F4 54 05 10 3A 7C 65 58 2E 7C E3 5A 14 7C E9 03 A6 4E 80 04 20
+
+. 0 2547F160 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+
+. 0 2547F134 12
+. 38 0A FF FD 28 80 00 14 41 85 00 24
+
+. 0 2547F3D4 12
+. 81 3E 04 F0 2F 09 00 00 41 BA FD 84
+
+. 0 2547F3E0 36
+. 80 68 00 04 90 69 00 00 81 48 00 00 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+
+. 0 2547F158 32
+. 83 A8 00 04 93 BA 00 3C 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+
+. 0 2547F4BC 16
+. 81 48 00 04 91 5A 00 04 81 48 00 00 4B FF FF 24
+
+. 0 2547F3EC 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+
+. 0 2547F418 16
+. 81 28 00 04 91 3A 00 1C 81 48 00 00 4B FF FF C8
+
+. 0 2547F4EC 28
+. 38 A0 00 01 82 C8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 30
+
+. 0 2547F4CC 28
+. 38 A0 00 01 82 A8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 50
+
+. 0 2547F494 36
+. 80 08 00 04 38 A0 00 01 81 48 00 00 90 01 01 E0 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 80
+
+. 0 2547F470 32
+. 38 A0 00 01 80 C8 00 04 7C AB 50 30 85 48 00 08 7F 39 32 78 7F FF 5B 78 2F 8A 00 00 40 9E FC A8
+
+. 0 2547F44C 32
+. 38 A0 00 01 81 88 00 04 7C AB 50 30 85 48 00 08 7F 18 62 78 7F FF 5B 78 2F 8A 00 00 40 9E FC CC
+
+. 0 2547F3A4 44
+. 83 68 00 04 38 A0 00 01 80 9E 04 B4 3B E0 FF FF 81 48 00 00 93 64 00 00 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 68
+
+. 0 2547F3D0 4
+. 4B FF FD A8
+
+. 0 2547F178 8
+. 38 61 00 50 48 00 36 4D
+
+. 0 254827C8 8
+. 38 00 00 7A 44 00 00 02
+
+. 0 254827D0 4
+. 4C A3 00 20
+
+. 0 2547F180 12
+. 2C 03 00 00 3B A1 00 10 40 82 01 DC
+
+. 0 2547F18C 32
+. 3B A1 00 D2 89 3D 00 00 7F A3 EB 78 38 E0 00 00 39 00 00 00 39 49 FF D0 2B 8A 00 09 41 9D 00 68
+
+. 0 2547F1AC 24
+. 89 63 00 01 39 29 FF D0 39 43 00 01 38 0B FF D0 2B 80 00 09 41 9D 00 20
+
+. 0 2547F1E0 24
+. 2C 8B 00 2E 54 FB 40 2E 7F 67 4B 78 39 08 00 01 38 6A 00 01 40 86 00 14
+
+. 0 2547F1F8 16
+. 89 2A 00 01 3B 89 FF D0 28 1C 00 09 40 81 FF A8
+
+. 0 2547F208 8
+. 2F 08 00 02 41 99 00 10
+
+. 0 2547F21C 16
+. 3C A0 00 02 60 AB 02 04 7F 87 58 40 40 9D 03 B4
+
+. 0 2547F22C 12
+. 90 FA 00 08 38 80 00 6E 48 00 3B CD
+
+. 0 25482E00 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+
+. 0 25482E64 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 25482E48 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+
+. 0 25482E5C 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 25482E70 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+
+. 0 2547F238 8
+. 2C 83 00 00 41 86 02 F4
+
+. 0 2547F530 20
+. 81 3A 00 08 3C 00 00 02 60 03 05 44 7C 89 18 40 41 A5 FD 24
+
+. 0 2547F264 12
+. 2F 1F FF FF 83 BE 04 B4 41 9A 00 30
+
+. 0 2547F29C 12
+. 83 1A 00 04 2F 98 00 00 40 9E 00 0C
+
+. 0 2547F2B0 12
+. 38 60 00 00 83 9E 05 00 48 00 11 19
+
+. 0 254803D0 16
+. 94 21 FF F0 90 61 00 08 38 00 00 2D 44 00 00 02
+
+. 0 254803E0 12
+. 80 C1 00 08 7C 88 02 A6 48 01 6C 19
+
+. 0 254803EC 32
+. 7C A8 02 A6 80 A5 05 04 7C 88 03 A6 90 65 00 00 7C 06 18 40 38 21 00 10 38 60 00 00 4C A1 00 20
+
+. 0 2547F2BC 36
+. 81 3E 05 04 80 9A 00 04 81 69 00 00 7F 24 00 D0 7C 8B E0 40 7F 64 5A 14 39 9B FF FF 7D 83 C8 38 40 84 02 D8
+
+. 0 2547F2E0 12
+. 80 7A 00 0C 2C 03 00 00 41 82 00 0C
+
+. 0 2547F2F4 8
+. 38 60 00 00 48 00 11 1D
+
+. 0 25480414 12
+. 94 21 FF E0 7C 88 02 A6 48 01 6B E5
+
+. 0 25480420 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 81 00 24 93 A1 00 14 7C 7D 1B 78 83 FE 05 04 38 60 00 00 80 1F 00 00 2F 80 00 00 41 9E 00 3C
+
+. 0 2548044C 16
+. 81 3E 04 9C 80 A9 00 00 2C 05 00 00 40 82 00 2C
+
+. 0 2548045C 8
+. 2F 1D 00 00 40 9A 00 3C
+
+. 0 25480464 32
+. 80 7F 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547F2FC 8
+. 7F 83 E0 00 41 9E 02 C4
+
+. 0 2547F304 12
+. 80 BD 00 00 2C 85 00 00 40 86 02 A0
+
+. 0 2547F310 20
+. 7E C3 B3 78 7E E8 03 A6 7E A4 AB 78 38 A1 01 E0 4E 80 00 21
+
+. 0 254721F0 12
+. 94 21 FF 30 7C 08 02 A6 48 02 4E 09
+
+. 0 254721FC 204
+. 93 C1 00 C8 7F C8 02 A6 92 01 00 90 90 01 00 D4 7D 60 00 26 91 C1 00 88 82 1E 04 B4 81 9E 04 B8 81 50 00 00 81 DE 04 C8 2F 8A 00 00 81 3E 00 20 91 61 00 84 81 1E 04 D8 81 6C 00 00 80 FE 04 B0 80 DE 00 1C 80 1E 04 A8 93 E1 00 CC 7C 3F 0B 78 91 E1 00 8C 92 41 00 98 3A 40 00 00 92 61 00 9C 3A 60 00 00 92 81 00 A0 3A 80 00 00 92 A1 00 A4 3A A0 00 00 92 C1 00 A8 7C B6 2B 78 92 E1 00 AC 7C 97 23 78 93 01 00 B0 7C 78 1B 78 93 21 00 B4 3B 20 00 00 91 2E 03 FC 92 21 00 94 93 41 00 B8 93 61 00 BC 93 81 00 C0 93 A1 00 C4 91 0E 01 B4 90 EE 04 2C 90 CE 03 F8 90 0E 04 04 91 7F 00 38 81 FE 04 F4 81 3E 00 7C 41 9E 00 08
+
+. 0 254722CC 16
+. 82 3E 00 40 91 2F 00 54 38 7F 00 38 48 00 D9 51
+
+. 0 2547FC28 24
+. 94 21 FF F0 39 40 00 00 81 63 00 00 81 2B 00 00 2F 89 00 00 41 9E 00 1C
+
+. 0 2547FC40 12
+. 88 09 00 00 2C 00 00 4C 41 82 00 1C
+
+. 0 2547FC4C 12
+. 85 2B 00 04 2F 89 00 00 40 9E FF EC
+
+. 0 2547FC64 12
+. 88 89 00 01 2C 84 00 44 40 86 FF E0
+
+. 0 2547FC70 12
+. 88 A9 00 02 2F 05 00 5F 40 9A FF D4
+
+. 0 2547FC7C 16
+. 38 CB 00 04 39 49 00 03 90 C3 00 00 4B FF FF D0
+
+. 0 2547FC58 12
+. 7D 43 53 78 38 21 00 10 4E 80 00 20
+
+. 0 254722DC 8
+. 7C 7D 1B 79 41 82 00 64
+
+. 0 254722E4 16
+. 88 1D 00 00 39 60 00 00 7C 09 03 79 40 A2 00 18
+
+. 0 25472308 8
+. 2C 89 00 3D 40 86 FF EC
+
+. 0 254722F8 16
+. 39 6B 00 01 7C 1D 58 AE 7C 09 03 79 41 82 00 0C
+
+. 0 25472310 8
+. 2D 80 00 3D 40 8E FF C0
+
+. 0 25472318 12
+. 38 0B FF FC 2A 00 00 10 41 B1 FF B4
+
+. 0 25472324 24
+. 80 BE 00 80 54 1A 10 3A 7C 9A 28 2E 7C 64 2A 14 7C 69 03 A6 4E 80 04 20
+
+. 0 25473248 16
+. 80 9E 00 C4 7F A3 EB 78 38 A0 00 0C 48 01 00 45
+
+. 0 25483298 32
+. 2B 85 00 0F 94 21 FF E0 93 81 00 10 7C 6B 1B 78 93 A1 00 14 93 C1 00 18 93 E1 00 1C 40 9D 00 BC
+
+. 0 25483370 8
+. 2C 05 00 00 41 82 00 24
+
+. 0 25483378 24
+. 89 8B 00 00 39 6B 00 01 8B A4 00 00 38 84 00 01 7C 7D 60 51 40 82 00 10
+
+. 0 25483390 8
+. 34 A5 FF FF 40 82 FF E4
+
+. 0 25483398 28
+. 38 60 00 00 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25473258 8
+. 2F 83 00 00 40 9E 04 28
+
+. 0 25473260 16
+. 81 3E 00 4C 38 1D 00 0D 90 09 00 00 4B FF F0 68
+
+. 0 254722D4 8
+. 38 7F 00 38 48 00 D9 51
+
+. 0 25473218 16
+. 80 9E 00 C0 7F A3 EB 78 38 A0 00 07 48 01 00 75
+
+. 0 2548339C 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25473228 8
+. 2C 83 00 00 40 86 04 8C
+
+. 0 254736B8 16
+. 80 9E 00 E0 7F A3 EB 78 38 A0 00 07 48 00 FB D5
+
+. 0 254736C8 8
+. 2C 03 00 00 40 82 00 88
+
+. 0 254736D0 12
+. 38 FD 00 08 90 F1 00 00 4B FF EB FC
+
+. 0 25472344 16
+. 83 70 00 00 93 3F 00 30 2D 9B 00 00 40 8E 12 8C
+
+. 0 25472354 32
+. 81 3E 00 34 30 14 FF FF 7F 40 A1 10 83 A9 00 00 31 3D FF FF 7F 89 E9 10 7F 80 D0 39 40 82 0B 58
+
+. 0 25472374 16
+. 83 56 00 00 80 BE 04 E0 7D 9A 28 00 41 8E 09 40
+
+. 0 25472384 28
+. 80 7E 00 84 38 A0 00 00 38 C0 00 00 38 E0 00 00 7C 64 1B 78 39 00 00 00 48 00 6D 59
+
+. 0 254790F4 12
+. 94 21 FF D0 7C 08 02 A6 48 01 DF 05
+
+. 0 25479100 80
+. 93 C1 00 28 7F C8 02 A6 92 C1 00 08 92 E1 00 0C 7C 77 1B 78 93 01 00 10 7C 83 23 78 93 21 00 14 7C B8 2B 78 93 41 00 18 7C F6 3B 78 93 61 00 1C 7C 9A 23 78 93 81 00 20 7D 19 43 78 93 E1 00 2C 7C DC 33 78 90 01 00 34 93 A1 00 24 48 00 9E 85
+
+. 0 25482FD0 56
+. 54 64 00 3A 3C E0 7F 7F 54 65 1E F8 81 04 00 00 39 20 FF FF 38 E7 7F 7F 7D 29 2C 30 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 40 00 F8 7C 08 48 39 7C 60 11 20 40 82 00 70
+
+. 0 25483074 20
+. 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+. 0 25479150 20
+. 38 80 00 01 7C 69 1B 78 38 63 02 4D 3B 69 00 01 48 01 E8 CD
+
+. 0 25497A2C 4
+. 4B FE 84 8C
+
+. 0 2547FEB8 20
+. 7C 63 21 D6 7C A8 02 A6 94 21 FF F0 90 A1 00 14 48 01 7B 5D
+
+. 0 25497A24 4
+. 4B FE 84 6C
+
+. 0 2547FE90 24
+. 7C 88 02 A6 94 21 FF F0 90 81 00 14 7C 64 1B 78 38 60 00 08 48 01 7B 79
+
+. 0 25497A1C 4
+. 4B FE 83 2C
+
+. 0 2547FD48 12
+. 94 21 FF E0 7C 08 02 A6 48 01 72 B1
+
+. 0 2547FD54 84
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 90 01 00 24 7C 6A 1B 78 93 81 00 10 38 A0 00 03 83 7E 04 18 38 C0 00 22 93 A1 00 14 38 E0 FF FF 81 7B 00 00 7F A3 00 D0 93 E1 00 1C 39 00 00 00 2F 8B 00 00 81 9E 05 00 38 60 00 00 7C 9C 23 78 83 FE 04 1C 40 9E 00 24
+
+. 0 2547FDA8 64
+. 81 3E 04 F4 91 9F 00 00 81 69 00 04 7C 8B 62 14 39 24 FF FF 7C 8B 00 D0 7D 2B 20 38 91 7B 00 00 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+
+. 0 2547FDE8 56
+. 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 1F 00 00 7C C8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+
+. 0 2547FEA8 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 2547FECC 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 25479164 12
+. 7C 7F 1B 79 38 60 00 00 41 82 01 74
+
+. 0 25479170 28
+. 3B BF 02 40 7F 44 D3 78 7F 65 DB 78 93 FF 00 14 93 BF 00 1C 38 7F 02 4C 48 00 AA 59
+
+. 0 25483C68 8
+. 2C 85 00 00 41 86 00 1C
+
+. 0 25483C70 24
+. 7C A9 03 A6 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+. 0 2547918C 84
+. 38 A0 00 01 90 BD 00 08 1D 59 00 18 80 DF 01 80 38 80 FF FF 80 BE 04 C8 53 06 F0 02 93 9F 01 68 90 DF 01 80 38 DF 01 AC 7D 6A 28 2E 90 7F 02 40 38 60 00 04 2F 8B 00 00 90 9F 02 2C 90 7F 01 BC 38 80 00 00 92 FF 00 04 93 3F 00 18 90 DF 01 C0 40 9E 01 3C
+
+. 0 254791E0 52
+. 7F EA 29 2E 2F 1C 00 00 7D 4A 2A 14 81 65 01 98 83 2A 00 04 81 85 01 9C 3B 19 00 01 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 BA 00 0C
+
+. 0 25479214 4
+. 48 00 01 64
+
+. 0 25479378 8
+. 7F FC FB 78 4B FF FE AC
+
+. 0 25479228 12
+. 2F 84 00 00 39 3C 01 58 41 9E 00 10
+
+. 0 25479240 12
+. 56 CC EF FE 7D 8B 20 39 41 82 00 10
+
+. 0 25479258 28
+. 54 9A 10 3A 7D 3A 31 2E 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+
+. 0 254792DC 60
+. 7F E3 FB 78 83 81 00 34 82 C1 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 254723A0 12
+. 82 8E 00 00 2D 94 00 00 41 8E 0C 90
+
+. 0 254723AC 84
+. 80 D6 00 00 93 14 01 4C 90 D4 01 50 B2 F4 01 54 56 EC 28 34 81 14 01 78 7C 8C C2 14 80 F4 01 7C 7E 04 C0 40 39 28 00 01 3B 27 00 01 39 00 00 00 38 E0 00 00 3B 80 FF FF 7F 0A C3 78 93 94 01 A0 91 34 01 78 93 34 01 7C 91 14 01 A4 90 F4 01 A8 40 91 00 C8
+
+. 0 25472400 24
+. 3F 60 64 74 83 BE 00 44 80 DE 00 48 63 63 E5 52 38 A0 00 01 48 00 00 28
+
+. 0 2547243C 12
+. 80 0A 00 00 2F 00 00 06 41 9A 07 98
+
+. 0 25472BDC 16
+. 81 6A 00 08 7F 4B C0 50 93 54 00 00 4B FF F8 48
+
+. 0 25472430 12
+. 39 4A 00 20 7F 84 50 40 40 9D 00 84
+
+. 0 25472448 8
+. 2B 80 00 06 40 BD FF CC
+
+. 0 25472418 8
+. 2C 00 00 02 41 82 08 54
+
+. 0 25472420 8
+. 28 80 00 02 41 85 07 E4
+
+. 0 25472C08 8
+. 2F 00 00 03 40 9A F8 24
+
+. 0 25472C10 36
+. 82 AE 01 C0 7F A7 EB 78 81 74 00 00 2F 15 00 00 81 2A 00 08 93 AE 01 D4 7C 0B 4A 14 90 1D 00 00 40 9A 04 44
+
+. 0 25473074 8
+. 3A A0 00 01 4B FF F3 B8
+
+. 0 25472428 8
+. 2E 00 00 01 41 92 0B 80
+
+. 0 25472FAC 36
+. 80 EA 00 1C 81 0A 00 08 7D 87 00 D0 80 F4 00 00 7D 1C 60 38 81 34 01 A0 7C 07 E2 14 7F 09 00 40 41 99 00 B0
+
+. 0 2547307C 12
+. 90 14 01 A0 81 0A 00 08 4B FF FF 4C
+
+. 0 25472FD0 24
+. 83 6A 00 14 7E E7 42 14 81 14 01 A4 7D 77 DA 14 7F 88 58 40 40 9C 00 08
+
+. 0 25472FE8 16
+. 91 74 01 A4 83 4A 00 18 73 49 00 01 41 A2 F4 3C
+
+. 0 25472FF8 12
+. 80 14 01 A8 7C 80 58 40 40 A4 F4 30
+
+. 0 25473004 8
+. 91 74 01 A8 4B FF F4 28
+
+. 0 25472C70 20
+. 82 D4 00 00 83 2A 00 08 7C 16 CA 14 90 14 00 08 4B FF F7 B0
+
+. 0 25472450 16
+. 3E E0 64 74 62 E9 E5 51 7F 80 48 00 41 9E 08 28
+
+. 0 25472C84 12
+. 80 EA 00 18 90 EE 04 00 4B FF F7 A4
+
+. 0 254724BC 20
+. 81 14 01 A4 80 F4 01 A8 81 34 02 18 2C 09 00 00 41 82 00 10
+
+. 0 254724DC 8
+. 2C 88 00 00 40 86 00 0C
+
+. 0 254724EC 8
+. 2E 07 00 00 40 92 00 0C
+
+. 0 254724FC 12
+. 80 AE 01 D4 2F 05 00 00 40 9A 00 1C
+
+. 0 25472520 8
+. 2E 13 00 00 40 92 01 A8
+
+. 0 25472528 12
+. 81 54 00 08 2C 0A 00 00 41 82 01 94
+
+. 0 25472534 16
+. 81 6A 00 00 39 14 00 20 2C 8B 00 00 41 86 00 64
+
+. 0 25472544 60
+. 3E C0 6F FF 3C C0 70 00 3C 00 6F FF 3F 40 6F FF 3E E0 6F FF 3E 60 6F FF 62 C7 FF FF 60 C6 00 21 60 05 FD FF 63 44 FE 34 62 E3 FE FF 62 7D FF 40 2F 0B 00 21 55 60 10 3A 40 99 00 18
+
+. 0 25472594 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 25472574 12
+. 2F 0B 00 21 55 60 10 3A 40 99 00 18
+
+. 0 25472580 12
+. 7F 2B 38 50 2B 99 00 0F 41 9D 07 08
+
+. 0 2547258C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 254725A4 12
+. 81 74 00 00 2C 0B 00 00 41 82 00 AC
+
+. 0 25472658 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+
+. 0 25472690 12
+. 81 28 00 98 2C 89 00 00 41 86 00 18
+
+. 0 254726B0 12
+. 81 68 00 74 2F 0B 00 00 41 9A 00 0C
+
+. 0 254726C4 8
+. 7E 83 A3 78 48 00 5E A5
+
+. 0 254726CC 12
+. 81 1F 00 30 2F 88 00 02 41 9E 0A 04
+
+. 0 254726D8 4
+. 41 92 08 8C
+
+. 0 25472F64 12
+. 82 BE 00 4C 80 75 00 00 48 00 21 69
+
+. 0 254750D4 12
+. 94 21 FF D0 7C 08 02 A6 48 02 1F 25
+
+. 0 254750E0 76
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 21 00 14 7C 79 1B 78 83 1E 04 F4 80 BE 01 54 80 98 00 10 80 DE 01 5C 80 78 00 0C 93 61 00 1C 93 81 00 20 93 E1 00 2C 7C 3F 0B 78 92 E1 00 0C 93 41 00 18 93 A1 00 24 48 00 A6 29
+
+. 0 2547F750 12
+. 94 21 FF B0 7C E8 02 A6 48 01 78 A9
+
+. 0 2547F75C 136
+. 93 C1 00 48 7F C8 02 A6 93 81 00 40 90 E1 00 54 7D 80 00 26 92 01 00 10 7C D0 33 78 81 3E 04 F4 92 61 00 1C 7C 93 23 78 81 69 00 40 80 09 00 3C 92 81 00 20 7C B4 2B 78 7C 1C 58 38 92 C1 00 28 2F 9C 00 00 93 E1 00 4C 92 21 00 14 7C 3F 0B 78 92 41 00 18 7C 76 1B 78 92 A1 00 24 30 03 FF FF 7D 40 19 10 92 E1 00 2C 39 60 00 00 93 01 00 30 93 21 00 34 93 41 00 38 93 61 00 3C 93 A1 00 44 91 81 00 0C 41 9E 00 28
+
+. 0 2547F808 48
+. 3B 0A 00 01 82 21 00 00 57 12 18 38 3B 20 00 00 3A F2 00 1E 3B 40 00 00 56 F5 00 36 7D 55 00 D0 7E 21 51 6E 39 01 00 17 55 1B 00 36 41 9E 00 48
+
+. 0 2547F87C 8
+. 2F 96 00 00 41 9E 00 18
+
+. 0 2547F898 32
+. 2E 18 00 01 57 29 18 38 83 3E 04 10 7E 69 DA 14 3A C0 00 03 7F 29 D9 2E 92 D3 00 04 41 92 03 24
+
+. 0 2547FBD8 12
+. 81 9B 00 04 39 0C 00 01 4B FF FD 30
+
+. 0 2547F910 24
+. 3B A0 00 01 7F AA C0 30 55 5A 18 38 91 54 00 00 7C 7A 42 14 48 01 81 01
+
+. 0 2547FDC8 32
+. 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+
+. 0 2547F928 8
+. 7C 76 1B 79 41 82 02 E8
+
+. 0 2547F930 4
+. 41 92 01 FC
+
+. 0 2547FB2C 60
+. 83 34 00 00 39 00 00 00 82 7B 00 04 3B 00 00 2F 57 29 18 38 91 16 00 0C 7C 69 B2 14 3B 93 00 01 93 96 00 04 3A 40 00 02 90 76 00 00 90 76 00 08 80 BB 00 04 80 9B 00 00 48 00 3E BD
+
+. 0 25483A20 44
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+. 0 25483AA0 8
+. 2C 85 00 00 41 86 00 1C
+
+. 0 25483AA8 24
+. 7C A9 03 A6 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+. 0 25483AAC 20
+. 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+. 0 25483AC0 36
+. 81 01 00 24 7F A3 EB 78 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547FB68 112
+. 9B 03 00 00 92 54 00 00 83 76 00 04 7E C3 B3 78 93 70 00 00 81 01 00 00 82 08 00 04 80 E8 FF BC 7E 08 03 A6 82 28 FF C4 82 08 FF C0 7C E0 81 20 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 2547512C 20
+. 81 3E 01 60 83 7E 01 64 90 69 00 00 38 60 00 0C 48 02 28 E9
+
+. 0 25475140 12
+. 7C 7C 1B 79 90 7B 00 00 41 82 01 88
+
+. 0 2547514C 40
+. 83 5E 01 54 3D 40 CC CC 61 46 CC CD 81 1A 00 00 55 07 10 3A 38 A7 00 27 7C 65 30 16 54 7D E1 3E 1C 7D 01 40 48 02 28 B5
+
+. 0 25475174 24
+. 80 9B 00 00 90 7C 00 00 81 24 00 00 80 DE 01 88 2F 89 00 00 41 9E 01 4C
+
+. 0 2547518C 96
+. 82 FE 04 C8 39 60 00 00 1C 7D 00 14 80 FA 00 00 81 1E 01 4C 39 40 00 00 80 9E 01 94 38 C0 00 00 80 BE 01 48 91 7B 00 04 7D 2B 4B 78 91 37 01 B0 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+
+. 0 254751EC 24
+. 7C E9 03 A6 38 00 00 00 39 2B 00 14 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 254751F8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+
+. 0 25475204 12
+. 2F 0A 00 01 39 4A 00 01 41 9A 00 BC
+
+. 0 25475210 20
+. 7C 03 5A 14 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+
+. 0 254751BC 48
+. 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+
+. 0 254752C8 8
+. 38 00 00 00 4B FF FF 48
+
+. 0 25475214 16
+. 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+
+. 0 25475224 32
+. 83 B7 00 00 38 00 00 09 80 9E 01 58 39 20 00 00 2F 9D 00 00 91 3C 00 00 90 04 00 00 41 9E 00 28
+
+. 0 25475244 12
+. 83 9D 00 94 2C 9C 00 00 40 86 01 BC
+
+. 0 25475250 20
+. 81 7D 00 5C 38 00 FF FF 90 1D 01 E0 2F 0B 00 00 40 9A 00 84
+
+. 0 25475264 12
+. 90 1D 01 8C 2C 19 00 00 41 82 00 10
+
+. 0 25475270 12
+. 8B B9 00 00 2F 9D 00 00 40 9E 00 94
+
+. 0 2547530C 12
+. 7F 23 CB 78 7F 3D CB 78 48 00 DC BD
+
+. 0 25483008 12
+. 3C C0 FE FF 38 C6 FE FF 41 9D 00 1C
+
+. 0 25483014 24
+. 85 04 00 04 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 48 00 F9 40 82 00 4C
+
+. 0 2548302C 32
+. 81 04 00 04 85 24 00 08 7C 06 42 14 7C EA 40 F8 7C 00 50 39 7D 66 4A 14 7C EC 48 F8 40 82 00 1C
+
+. 0 2548304C 8
+. 7D 60 60 39 41 82 FF DC
+
+. 0 25483054 16
+. 7C E0 48 38 7C 00 3A 14 7D 88 00 78 48 00 00 14
+
+. 0 25475318 44
+. 7F A4 EB 78 3B 63 00 1F 81 81 00 00 57 7A 00 36 7C 79 1B 78 7D 7A 00 D0 38 B9 00 01 7D 81 59 6E 39 41 00 17 55 43 00 36 48 00 E8 A1
+
+. 0 25483CB0 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 01 5D
+
+. 0 25483E18 28
+. 54 A0 07 BE 54 88 1E F8 2F 80 00 01 94 21 FF F0 54 84 00 3A 20 E8 00 20 41 9E 00 B8
+
+. 0 25483EE8 20
+. 81 64 00 00 38 A5 FF FF 81 44 00 04 38 84 00 08 4B FF FF 54
+
+. 0 25483E4C 96
+. 7D 66 40 30 7D 49 3C 30 7C C0 4B 78 81 64 00 00 90 03 00 00 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 25483EAC 24
+. 7D 65 40 30 7D 48 3C 30 7C A4 43 78 38 21 00 10 90 83 00 00 4E 80 00 20
+
+. 0 25483CC0 4
+. 4B FF FF 98
+
+. 0 25475344 16
+. 89 3D 00 00 7C 7C 1B 78 38 60 00 01 48 00 00 18
+
+. 0 25475368 8
+. 2C 89 00 00 40 86 FF E8
+
+. 0 25475354 28
+. 38 09 FF C6 8D 3D 00 01 20 80 00 01 7C 83 01 94 7C 83 23 78 2C 89 00 00 40 86 FF E8
+
+. 0 25475370 16
+. 54 69 10 3A 83 BE 01 68 38 69 00 04 48 02 26 A9
+
+. 0 25475380 20
+. 80 DE 01 88 2F 03 00 00 7C 64 1B 78 90 7D 00 00 41 BA FF 44
+
+. 0 25475394 28
+. 80 7E 04 B4 39 00 00 00 80 BE 01 A0 80 C3 00 00 7F 83 E3 78 80 FE 01 A4 4B FF F7 9D
+
+. 0 25474B48 12
+. 94 21 FF A0 7C 08 02 A6 48 02 24 B1
+
+. 0 25474B54 144
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 E1 00 1C 2D 88 00 00 92 01 00 20 92 21 00 24 92 41 00 28 81 FE 01 48 82 3E 04 C8 82 1E 01 54 82 5E 01 58 92 61 00 2C 7C F3 3B 78 92 81 00 30 7C D4 33 78 92 A1 00 34 7C B5 2B 78 92 C1 00 38 7C 96 23 78 92 E1 00 3C 7D 17 43 78 93 01 00 40 3B 00 00 00 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 93 E1 00 5C 90 01 00 64 91 81 00 18 90 61 00 08 38 61 00 08 7E A4 AB 78 48 00 B7 5D
+
+. 0 2548033C 20
+. 94 21 FF F0 7C 68 1B 78 80 63 00 00 2F 83 00 00 41 9E 00 54
+
+. 0 25480350 20
+. 88 03 00 00 7C 6A 1B 78 39 20 00 00 2C 00 00 00 41 82 00 3C
+
+. 0 25480364 20
+. 7C 0B 03 78 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+
+. 0 25480378 12
+. 8C 09 00 01 2F 00 00 00 40 9A FF F0
+
+. 0 25480370 8
+. 7C 80 58 00 41 86 00 10
+
+. 0 25480384 8
+. 2F 80 00 00 40 9E 00 20
+
+. 0 2548038C 12
+. 8D 6A 00 01 2C 0B 00 00 40 82 FF D4
+
+. 0 25480368 16
+. 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+
+. 0 254803A8 20
+. 39 2A 00 01 38 00 00 00 98 0A 00 00 91 28 00 00 4B FF FF E8
+
+. 0 254803A0 8
+. 38 21 00 10 4E 80 00 20
+
+. 0 25474BE4 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 41 92 01 4C
+
+. 0 25474BF4 4
+. 48 00 E3 DD
+
+. 0 25483064 36
+. 7C E0 40 38 38 84 FF FC 7C 00 3A 14 7D 48 00 78 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+. 0 25474BF8 8
+. 7C 7D 1B 79 40 82 01 A8
+
+. 0 25474DA4 8
+. 2B 9D 00 01 40 9D 00 24
+
+. 0 25474DAC 16
+. 7D 3F EA 14 88 69 FF FF 2C 83 00 2F 40 86 00 14
+
+. 0 25474DCC 8
+. 2F 1D 00 00 41 BA FE 34
+
+. 0 25474DD4 16
+. 7D 3C EA 14 88 89 FF FF 2C 04 00 2F 41 A2 FE 24
+
+. 0 25474DE4 20
+. 2F 94 00 00 38 A0 00 2F 7C BC E9 AE 3B BD 00 01 41 9E FE 18
+
+. 0 25474C0C 16
+. 83 F1 01 B0 7E 39 8B 78 2E 1F 00 00 41 92 00 24
+
+. 0 25474C1C 20
+. 81 5F 00 10 7F 83 E3 78 7F A5 EB 78 7F 8A E8 00 41 9E 02 40
+
+. 0 25474C30 12
+. 83 FF 00 00 2E 1F 00 00 40 92 FF E4
+
+. 0 25474C3C 4
+. 41 8E 02 84
+
+. 0 25474EC0 32
+. 80 70 00 00 3B 60 00 00 7E 1A 83 78 54 60 10 3A 7D 80 DA 14 7F EC EA 14 38 7F 00 15 48 02 2B 49
+
+. 0 25474EE0 8
+. 7C 7F 1B 79 40 82 FD 8C
+
+. 0 25474C70 32
+. 80 F0 00 00 7F 84 E3 78 54 E6 10 3A 7C A6 FA 14 38 65 00 14 7F A5 EB 78 90 7F 00 0C 48 00 ED 95
+
+. 0 25483A4C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+. 0 25483A78 8
+. 73 E0 00 03 40 82 00 68
+
+. 0 25483A80 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 02 39
+
+. 0 25483DA8 28
+. 80 04 00 00 38 84 FF F0 81 24 00 14 38 63 FF EC 38 A5 00 04 90 03 00 14 4B FF FF 7C
+
+. 0 25483A90 24
+. 57 86 00 3A 57 85 07 BE 7F BD 32 14 7F FF 32 14 2C 85 00 00 41 86 00 1C
+
+. 0 25474C90 24
+. 39 20 00 00 99 23 00 00 80 92 00 00 93 BF 00 10 7F 9D 20 40 40 9D 00 08
+
+. 0 25474CA8 32
+. 93 B2 00 00 81 3A 00 00 89 5C 00 00 2C 89 00 00 69 48 00 2F 7F 88 00 D0 57 83 17 BC 41 86 00 18
+
+. 0 25474CC8 20
+. 7D 29 03 A6 39 7F 00 14 90 6B 00 00 39 6B 00 04 42 00 FF F8
+
+. 0 25474CD0 12
+. 90 6B 00 00 39 6B 00 04 42 00 FF F8
+
+. 0 25474CDC 8
+. 92 7F 00 04 41 8E 02 1C
+
+. 0 25474EFC 8
+. 92 FF 00 08 4B FF FE 08
+
+. 0 25474D08 36
+. 80 19 01 B0 57 1D 10 3A 38 61 00 08 7E A4 AB 78 90 1F 00 00 3B 18 00 01 93 F9 01 B0 7F FD B1 2E 48 00 B6 15
+
+. 0 25474D2C 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 40 92 FE BC
+
+. 0 2547FE20 32
+. 81 3E 04 F4 81 89 00 04 7C 8C E2 14 7D 4C 00 D0 39 64 FF FF 7D 7D 50 38 7F A4 EB 78 48 00 29 5D
+
+. 0 25482798 8
+. 38 00 00 5A 44 00 00 02
+
+. 0 254827A0 4
+. 4C A3 00 20
+
+. 0 2547FE40 16
+. 80 BB 00 00 7D 23 EA 14 7C 83 28 00 41 86 00 08
+
+. 0 2547FE50 64
+. 90 7F 00 00 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 3B 00 00 7C C8 03 A6 91 1F 00 00 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+
+. 0 25483AE4 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 03 29
+
+. 0 25483E34 8
+. 28 00 00 01 41 80 00 8C
+
+. 0 25483E3C 8
+. 2C 80 00 02 41 86 00 BC
+
+. 0 25483EFC 24
+. 81 44 00 00 38 63 FF F4 81 64 00 04 38 A5 00 02 38 84 FF FC 4B FF FF 78
+
+. 0 25483E88 36
+. 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 25483AF4 4
+. 4B FF FF 9C
+
+. 0 25474CAC 28
+. 81 3A 00 00 89 5C 00 00 2C 89 00 00 69 48 00 2F 7F 88 00 D0 57 83 17 BC 41 86 00 18
+
+. 0 2548039C 12
+. 91 28 00 00 38 21 00 10 4E 80 00 20
+
+. 0 25474C00 12
+. 83 9E 01 50 2F 94 00 00 40 9E 01 F0
+
+. 0 25474D3C 104
+. 81 E1 00 64 57 10 10 3A 81 81 00 18 7C 70 B1 2E 7D E8 03 A6 7E C3 B3 78 81 E1 00 1C 82 01 00 20 7D 81 81 20 82 21 00 24 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+. 0 254753B0 16
+. 80 7D 00 00 83 83 00 00 2C 1C 00 00 41 82 00 74
+
+. 0 254753C0 72
+. 38 E0 00 00 90 FD 00 04 83 B7 01 B0 93 B8 00 60 81 41 00 00 82 EA 00 04 83 0A FF E0 7E E8 03 A6 83 2A FF E4 82 EA FF DC 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+
+. 0 25472F70 4
+. 4B FF F7 6C
+
+. 0 254726DC 12
+. 80 EE 01 BC 2E 07 00 00 40 92 00 10
+
+. 0 254726E8 88
+. 83 AE 01 D4 81 5D 00 00 91 4E 01 BC 83 4F 00 4C 3A E0 00 01 80 8E 03 38 38 6E 01 B8 2C 1A FF FE 82 6E 00 04 81 6E 01 98 52 E4 F0 02 81 8E 01 9C 38 B3 00 01 90 8E 03 38 31 0C 00 01 7C EB 01 94 90 AE 00 04 90 74 00 0C 90 EE 01 98 91 0E 01 9C 92 8E 01 C8 41 82 08 38
+
+. 0 25472F74 20
+. 80 14 00 00 7D E9 7B 78 2F 00 00 00 38 00 FF FF 41 9A 00 08
+
+. 0 25472F8C 8
+. 90 09 00 4C 4B FF F7 B0
+
+. 0 25472740 40
+. 81 8E 03 58 A1 6C 00 2C 83 8C 00 1C B1 6E 03 0C A3 2C 00 2C 7D 6C E2 14 91 6E 03 04 2F 99 00 00 39 59 FF FF 41 9E 00 2C
+
+. 0 25472768 28
+. 3D 20 64 74 61 27 E5 52 55 58 28 34 7F 78 58 2E 7D 18 5A 14 7C 9B 38 00 41 86 0C 80
+
+. 0 25473400 36
+. 83 B1 00 00 3B 40 00 00 82 C8 00 08 3B 20 00 00 2C 1D 00 00 92 CE 03 EC 80 C8 00 14 90 CE 03 F0 41 82 F3 84
+
+. 0 25473424 8
+. 7F A3 EB 78 48 00 FB A9
+
+. 0 2547342C 44
+. 7F A4 EB 78 7C 71 1B 78 38 63 00 1F 54 6A 00 36 80 E1 00 00 7E AA 00 D0 38 B1 00 01 7C E1 A9 6E 39 01 00 17 55 03 00 36 48 01 07 8D
+
+. 0 25483EC4 16
+. 81 44 00 00 38 63 FF FC 85 64 00 04 4B FF FF 90
+
+. 0 25483E60 76
+. 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+. 0 25473458 16
+. 90 7F 00 44 80 9E 00 D0 38 7F 00 44 48 00 CE D9
+
+. 0 25480398 16
+. 39 20 00 00 91 28 00 00 38 21 00 10 4E 80 00 20
+
+. 0 25473468 8
+. 7C 7D 1B 79 41 A2 F3 38
+
+. 0 25473470 12
+. 88 BD 00 00 2F 05 00 00 41 BA FF E4
+
+. 0 2547347C 12
+. 80 90 00 00 2F 84 00 00 40 9E 07 74
+
+. 0 25473488 32
+. 7F A4 EB 78 38 A0 00 01 38 C0 00 01 38 E0 00 00 39 00 00 00 39 20 00 00 7E 83 A3 78 48 00 38 45
+
+. 0 25476CE8 12
+. 94 21 FD 70 7C 08 02 A6 48 02 03 11
+
+. 0 25476CF4 136
+. 93 C1 02 88 7F C8 02 A6 92 61 02 5C 92 A1 02 64 1E 69 00 18 90 01 02 94 7D 80 00 26 82 BE 04 C8 93 A1 02 84 7F B3 A8 2E 92 01 02 50 7C D0 33 78 2F 9D 00 00 92 21 02 54 92 E1 02 6C 7C F1 3B 78 93 01 02 70 7C B7 2B 78 93 21 02 74 7D 18 43 78 93 61 02 7C 7D 39 4B 78 93 E1 02 8C 7C 7B 1B 78 91 C1 02 48 7C 9F 23 78 91 E1 02 4C 92 41 02 58 92 81 02 60 92 C1 02 68 93 41 02 78 93 81 02 80 91 81 02 44 41 9E 00 40
+
+. 0 25476D7C 24
+. 3A C0 00 00 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+
+. 0 25476D94 4
+. 48 00 59 41
+
+. 0 2547C6D4 40
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C 9F 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 80 84 00 04 93 C1 00 18 48 00 67 E9
+
+. 0 25482EE0 16
+. 7C 80 1B 78 54 00 07 BF 3C E0 FE FF 40 82 00 94
+
+. 0 25482EF0 24
+. 80 A3 00 00 80 C4 00 00 3D 00 7F 7F 38 E7 FE FF 39 08 7F 7F 48 00 00 10
+
+. 0 25482F14 20
+. 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+
+. 0 25482F08 8
+. 84 A3 00 04 40 86 00 54
+
+. 0 25482F60 16
+. 80 A3 FF FC 7C AA 32 79 7C 66 28 50 4C A0 00 20
+
+. 0 2547C6FC 12
+. 2F 83 00 00 38 60 00 01 41 9E 00 30
+
+. 0 2547C708 8
+. 83 FF 00 1C 48 00 00 1C
+
+. 0 2547C728 8
+. 2C 1F 00 00 40 82 FF E4
+
+. 0 2547C710 12
+. 80 9F 00 00 7F A3 EB 78 48 00 67 C9
+
+. 0 2547C71C 8
+. 2C 83 00 00 41 86 00 30
+
+. 0 2547C724 12
+. 83 FF 00 04 2C 1F 00 00 40 82 FF E4
+
+. 0 2547C730 32
+. 38 60 00 00 80 81 00 24 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25476D98 8
+. 2C 83 00 00 40 86 01 B8
+
+. 0 25476DA0 12
+. 80 7D 01 80 74 69 00 80 41 82 00 F8
+
+. 0 25476EA0 16
+. 81 7D 00 58 7F E3 FB 78 2D 8B 00 00 41 AE FF 00
+
+. 0 25476DAC 12
+. 83 BD 00 0C 2C 9D 00 00 40 86 FF CC
+
+. 0 25476D80 20
+. 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+
+. 0 25476EB0 24
+. 81 3D 00 34 81 6B 00 04 80 89 00 04 7F 84 5A 14 7F 84 E3 78 48 00 C0 1D
+
+. 0 25482F80 12
+. 88 A3 00 00 88 C4 00 00 48 00 00 10
+
+. 0 25482F98 8
+. 2C 85 00 00 41 86 00 20
+
+. 0 25482FA0 8
+. 7C 05 30 00 40 82 00 18
+
+. 0 25482FBC 8
+. 7C 66 28 50 4E 80 00 20
+
+. 0 25476EC8 8
+. 2E 03 00 00 40 92 FE E0
+
+. 0 25476DB8 20
+. 82 9E 04 F4 2D 9B 00 00 81 54 00 00 71 49 00 40 40 82 03 90
+
+. 0 25476DCC 12
+. 7F E3 FB 78 38 80 00 2F 48 00 C0 2D
+
+. 0 25482EB4 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+
+. 0 25476DD8 8
+. 2F 03 00 00 41 9A 02 90
+
+. 0 25476DE0 4
+. 41 8E 01 D8
+
+. 0 25476DE4 12
+. 7F 63 DB 78 7F E4 FB 78 4B FF D9 41
+
+. 0 2547472C 12
+. 94 21 FF D0 7C 08 02 A6 48 02 28 CD
+
+. 0 25474738 72
+. 7D 80 00 26 93 C1 00 28 7F C8 02 A6 93 A1 00 24 7C 9D 23 78 93 41 00 18 38 80 00 24 93 81 00 20 7C 7C 1B 78 92 E1 00 0C 7F A3 EB 78 93 01 00 10 93 21 00 14 93 61 00 1C 93 E1 00 2C 90 01 00 34 91 81 00 08 48 00 E6 85
+
+. 0 25474780 8
+. 7C 7A 1B 79 40 82 00 74
+
+. 0 25474788 8
+. 7F A3 EB 78 48 00 E8 45
+
+. 0 25474790 12
+. 3B E3 00 01 7F E3 FB 78 48 02 32 8D
+
+. 0 2547479C 12
+. 38 00 00 00 2C 03 00 00 41 82 00 14
+
+. 0 254747A8 12
+. 7F A4 EB 78 7F E5 FB 78 48 00 F4 31
+
+. 0 25483D6C 20
+. 80 04 00 00 38 63 FF FC 81 24 00 04 90 03 00 04 4B FF FF A0
+
+. 0 25483D1C 64
+. 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 254747B4 68
+. 7C 60 1B 78 7C 03 03 78 83 81 00 34 81 41 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 7D 40 81 20 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25476DF0 20
+. 3B 40 FF FF 2F 03 00 00 90 61 02 28 2E 1A FF FF 40 9A 02 00
+
+. 0 25477000 8
+. 38 81 00 18 4B FF F4 41
+
+. 0 25476444 12
+. 94 21 FF A0 7C 08 02 A6 48 02 0B B5
+
+. 0 25476450 76
+. 93 C1 00 58 7F C8 02 A6 93 01 00 40 93 E1 00 5C 7C 98 23 78 7C 3F 0B 78 38 80 00 00 92 C1 00 38 7C 76 1B 78 93 21 00 44 93 61 00 4C 3B 60 00 00 92 A1 00 34 92 E1 00 3C 93 41 00 48 93 81 00 50 93 A1 00 54 90 01 00 64 48 00 BB 49
+
+. 0 25481FE0 8
+. 38 00 00 05 44 00 00 02
+
+. 0 25481FE8 4
+. 4C A3 00 20
+
+. 0 2547649C 12
+. 2F 83 FF FF 7C 79 1B 78 41 9E 00 CC
+
+. 0 254764A8 24
+. 82 FE 05 14 3B 98 00 04 7F 84 E3 78 38 A0 02 00 93 77 00 00 48 00 BB 45
+
+. 0 25482000 8
+. 38 00 00 03 44 00 00 02
+
+. 0 25482008 4
+. 4C A3 00 20
+
+. 0 254764C0 16
+. 2C 03 00 33 7C 7D 1B 78 90 78 00 00 40 81 01 4C
+
+. 0 254764D0 16
+. 80 9E 01 6C 7F 83 E3 78 38 A0 00 09 48 00 CD BD
+
+. 0 254764E0 8
+. 2F 03 00 00 40 9A 02 34
+
+. 0 254764E8 16
+. 81 9C 00 14 81 1E 02 00 2C 8C 00 01 40 86 01 38
+
+. 0 254764F8 12
+. A1 1C 00 12 2F 08 00 14 40 9A 01 D0
+
+. 0 25476504 12
+. A0 1C 00 10 2F 80 00 03 40 9E 02 78
+
+. 0 25476510 16
+. A2 BC 00 2A 81 1E 02 04 2C 95 00 20 40 86 01 10
+
+. 0 25476520 32
+. A1 7C 00 2C 80 9C 00 1C 55 7B 28 34 7F 44 DA 14 7F 1A E8 40 7F A4 C2 14 3B 5D 00 04 41 99 01 08
+
+. 0 25476540 16
+. 55 7B 28 34 7F 5D D3 78 7C 1B D2 14 48 00 00 1C
+
+. 0 25476568 8
+. 7C 00 E8 40 41 81 FF E4
+
+. 0 25476550 12
+. 80 9D 00 00 2C 84 00 04 41 86 00 5C
+
+. 0 2547655C 20
+. 55 66 28 34 3B BD 00 20 7C 06 D2 14 7C 00 E8 40 41 81 FF E4
+
+. 0 25476570 68
+. 80 81 00 00 7F 23 CB 78 82 E4 00 04 82 A4 FF D4 7E E8 03 A6 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+
+. 0 25477008 12
+. 2E 03 FF FF 7C 7A 1B 78 40 92 FD F4
+
+. 0 25476E04 20
+. 57 12 27 FE 32 D2 FF FF 7E D6 B1 10 7F 7C B0 38 40 92 02 0C
+
+. 0 25477020 52
+. 80 FE 04 C0 7F E3 FB 78 80 C1 02 28 7F 44 D3 78 81 C7 00 00 7E 08 83 78 7F 87 E3 78 7F 09 C3 78 38 A1 00 18 39 41 02 2C 91 C1 02 2C 93 21 00 08 4B FF E4 B9
+
+. 0 25475508 12
+. 94 21 FF 00 7C 08 02 A6 48 02 1A F1
+
+. 0 25475514 144
+. 7D 80 00 26 93 C1 00 F8 7F C8 02 A6 93 E1 00 FC 7C 3F 0B 78 90 01 01 04 92 01 00 C0 7C D0 33 78 92 61 00 CC 7C 93 23 78 92 81 00 D0 7C 74 1B 78 92 C1 00 D8 38 60 00 03 92 E1 00 DC 7D 36 4B 78 93 21 00 E4 7C F7 3B 78 93 41 00 E8 7D 19 43 78 93 61 00 EC 3B 40 00 00 93 81 00 F0 7C BC 2B 78 91 C1 00 B8 38 BF 00 28 91 E1 00 BC 92 21 00 C4 92 41 00 C8 92 A1 00 D4 93 01 00 E0 93 A1 00 F4 91 81 00 B4 91 5F 00 98 83 7F 01 08 48 00 C9 05
+
+. 0 25481EA4 12
+. 94 21 FF 80 7C C8 02 A6 48 01 51 55
+
+. 0 25481EB0 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+
+. 0 25481EF0 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C5 7C A4 2B 78 83 5D 00 00 44 00 00 02
+
+. 0 25481F08 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+
+. 0 25481F14 8
+. 2C 83 FF FF 40 A6 00 44
+
+. 0 25481F5C 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+
+. 0 254755A4 12
+. 2F 83 00 00 93 5F 00 A0 41 9C 08 2C
+
+. 0 254755B0 20
+. 1C BB 00 18 80 9E 04 C8 7F A5 20 2E 2C 1D 00 00 41 82 01 48
+
+. 0 254755C4 12
+. 81 7F 00 30 3B 40 00 00 48 00 00 10
+
+. 0 254755DC 12
+. 80 DD 01 D8 7C 86 58 00 40 86 FF EC
+
+. 0 254755E8 16
+. 81 1D 01 DC 80 FF 00 34 7E 08 38 00 40 92 FF DC
+
+. 0 254755D0 12
+. 83 BD 00 0C 2E 1D 00 00 41 92 01 30
+
+. 0 25475708 8
+. 2F 1B 00 00 40 9A 06 E0
+
+. 0 25475710 12
+. 72 C0 00 04 38 60 00 00 40 A2 FF 8C
+
+. 0 2547571C 16
+. 82 3E 04 F4 80 91 00 00 70 89 00 40 40 82 07 38
+
+. 0 2547572C 28
+. 7F 25 CB 78 7F 68 DB 78 7E 03 83 78 7E 84 A3 78 7E E6 BB 78 7E C7 B3 78 48 00 39 B1
+
+. 0 25479318 8
+. 7D 69 5B 78 48 00 00 08
+
+. 0 25479324 12
+. 80 09 00 0C 2C 80 00 00 40 86 FF F4
+
+. 0 25479320 16
+. 7C 09 03 78 80 09 00 0C 2C 80 00 00 40 86 FF F4
+
+. 0 25479330 72
+. 93 E9 00 0C 2F 1C 00 00 91 3F 00 10 38 80 00 01 7D 0A 28 2E 7D 4A 2A 14 83 2A 00 04 81 65 01 98 38 E8 01 58 81 85 01 9C 3B 19 00 01 90 E6 00 00 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 9A FE A8
+
+. 0 2547921C 12
+. 80 1C 01 68 2C 00 00 00 40 82 FF F4
+
+. 0 25479234 12
+. 81 66 00 00 7C 89 58 00 41 86 00 24
+
+. 0 25479260 20
+. 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+
+. 0 25479274 12
+. 7E E3 BB 78 3B 80 00 00 48 00 9D 55
+
+. 0 25479280 20
+. 2F 9D 00 2F 3B 63 00 01 83 5E 05 14 7F 7D DB 78 41 9E 01 54
+
+. 0 254793E4 8
+. 7F 63 DB 78 48 01 E6 3D
+
+. 0 254793EC 12
+. 2C 83 00 00 7C 7C 1B 78 40 86 FF B0
+
+. 0 254793A4 12
+. 7E E4 BB 78 7F 65 DB 78 48 00 A6 75
+
+. 0 254793B0 12
+. 8E E3 FF FF 2C 17 00 2F 40 82 FF F8
+
+. 0 254793BC 8
+. 7F 03 E0 00 41 9A 00 14
+
+. 0 254793C4 16
+. 38 C0 00 00 98 C3 00 00 93 9F 01 9C 4B FF FF 0C
+
+. 0 25475748 12
+. 3B 1C 00 04 7C 7A 1B 79 41 82 07 08
+
+. 0 25475754 60
+. A1 F8 00 2C 81 D8 00 18 A1 58 00 10 B1 FA 01 54 91 DA 01 50 A1 18 00 2C 83 78 00 1C 55 1D 28 34 80 DC 00 00 7C FB EA 14 7F 3B E2 14 7F 87 30 40 91 5F 00 9C 3B 79 00 04 41 9D 04 38
+
+. 0 25475790 80
+. A1 1A 01 54 7C 32 0B 78 80 81 00 00 3B A0 00 07 1C E8 00 18 3B 80 00 00 38 C7 00 1E 54 D9 02 B6 7C B9 00 D0 7C 81 29 6E A0 FA 01 54 38 61 00 2F 93 BF 00 A4 54 75 00 36 54 E9 28 34 93 9F 00 A8 7C 09 DA 14 7F 7D DB 78 7E 00 D8 40 40 91 03 A8
+
+. 0 254757E0 8
+. 3B 20 00 00 48 00 00 28
+
+. 0 2547580C 12
+. 81 3D 00 00 2F 09 00 06 41 9A 00 94
+
+. 0 25475818 8
+. 28 09 00 06 40 A1 FF CC
+
+. 0 254757E8 8
+. 2F 89 00 01 41 9E 02 C4
+
+. 0 25475AB0 20
+. 81 31 00 04 81 7D 00 1C 39 09 FF FF 7D 69 40 39 40 82 06 2C
+
+. 0 25475AC4 24
+. 81 5D 00 08 38 AB FF FF 80 DD 00 04 7C 86 50 50 7C 8B 28 39 40 82 06 50
+
+. 0 25475ADC 116
+. 7D 4A 40 78 3B 9C 00 01 7D 55 C9 2E 28 9C 00 01 81 3D 00 08 80 1D 00 10 81 F1 00 04 7C 69 02 14 7D 83 7A 14 7D CF 00 D0 39 6C FF FF 7D 68 70 38 7D 75 CA 14 91 0B 00 04 3B 39 00 18 80 9D 00 08 80 DD 00 10 7C A4 32 14 90 AB 00 08 81 3D 00 08 80 1D 00 14 7C 69 02 14 90 6B 00 0C 81 91 00 04 81 DD 00 04 7D EC 00 D0 7D C8 78 38 91 0B 00 10 40 85 00 18
+
+. 0 25475B64 32
+. 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+
+. 0 254757F8 20
+. 54 EF 28 34 3B BD 00 20 7D CF DA 14 7C 8E E8 40 40 85 00 BC
+
+. 0 25475B50 12
+. 80 8B FF EC 7E 04 50 00 41 92 00 0C
+
+. 0 25475B5C 40
+. 39 40 00 01 91 5F 00 A8 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+
+. 0 254757F0 8
+. 2C 89 00 02 41 86 03 B8
+
+. 0 25475BAC 24
+. 80 7D 00 14 81 9D 00 08 54 6B E8 FE 91 9A 00 08 B1 7A 01 56 4B FF FC 38
+
+. 0 25475820 16
+. 3D C0 64 74 61 C0 E5 51 7E 09 00 00 41 92 02 78
+
+. 0 25475AA4 12
+. 81 5D 00 18 91 5F 00 A4 4B FF FD 4C
+
+. 0 254758C4 8
+. 2E 1C 00 00 41 92 02 BC
+
+. 0 254758CC 36
+. 1D FC 00 18 83 9F 00 9C 81 75 00 00 7E BD AB 78 2E 1C 00 03 7F 8F AA 14 83 3C FF F4 7D CB C8 50 40 92 08 18
+
+. 0 254758F0 20
+. 81 91 00 4C 7D C4 73 78 7E E3 BB 78 7D 65 60 38 48 00 AD E9
+
+. 0 254806E8 16
+. 7C A3 2B 79 7D 88 02 A6 94 21 FF E0 48 01 69 0D
+
+. 0 254806F8 32
+. 93 A1 00 14 7C 9D 23 78 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 7D 88 03 A6 40 82 00 FC
+
+. 0 25480718 40
+. 3C C0 00 03 83 FE 04 F4 80 9E 04 C8 60 C6 FF FF 3C E0 70 00 39 80 00 0F 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+
+. 0 25480740 36
+. 81 3F 00 04 38 A9 FF FF 7C A3 28 F8 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+
+. 0 25480764 12
+. 83 8B 01 80 77 89 C0 00 40 82 00 0C
+
+. 0 25480770 8
+. 7F 07 50 40 40 98 00 54
+
+. 0 254807C8 16
+. 7D 47 53 78 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+
+. 0 2548074C 24
+. 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+
+. 0 25480778 32
+. 7C 06 40 10 38 00 00 00 7C 00 01 14 7D 2A 30 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 40 82 00 94
+
+. 0 25480798 32
+. 7C 08 38 10 38 00 00 00 7C 00 01 14 7D 26 50 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 41 82 00 18
+
+. 0 254807CC 12
+. 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+
+. 0 254807D8 8
+. 35 8C FF FF 40 80 FF 54
+
+. 0 25480730 16
+. 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+
+. 0 254807E0 28
+. 3C 67 FF FF 80 FF 00 04 7F 03 30 40 38 87 FF FF 7C 85 EB 78 38 85 00 01 40 99 00 14
+
+. 0 254807FC 16
+. 7D 06 18 50 7C 64 18 50 7F 88 20 40 40 9C 00 08
+
+. 0 25480810 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25475904 24
+. 80 B5 00 14 81 15 00 10 7D C4 73 78 38 C0 08 02 7E 67 9B 78 48 00 CE 81
+
+. 0 2547591C 12
+. 2F 03 FF FF 90 7A 01 A0 41 9A 01 3C
+
+. 0 25475928 32
+. 81 7F 00 A8 7C A3 72 14 90 BA 01 A4 2F 8B 00 00 80 F5 00 00 7C 07 18 50 90 1A 00 00 40 9E 05 34
+
+. 0 25475E78 24
+. 80 75 00 04 38 A0 00 00 80 9C FF F4 7C 83 20 50 7C 60 1A 14 48 00 C9 2D
+
+. 0 254827B8 8
+. 38 00 00 7D 44 00 00 02
+
+. 0 254827C0 4
+. 4C A3 00 20
+
+. 0 25475E90 4
+. 4B FF FA B8
+
+. 0 25475948 12
+. 80 DD 00 14 70 C9 00 04 41 82 00 14
+
+. 0 25475954 28
+. 83 9D 00 04 83 3A 00 00 7D 19 E2 14 91 1A 01 A8 81 9A 01 4C 2F 8C 00 00 40 9E 00 38
+
+. 0 25475970 16
+. 81 5D 00 10 81 78 00 1C 7C 8A 58 40 41 85 00 28
+
+. 0 25475980 36
+. 80 1D 00 04 81 1D 00 00 A1 38 00 2C 7C 88 00 50 55 23 28 34 7C A4 52 14 7C EB 1A 14 7F 05 38 40 40 98 06 9C
+
+. 0 2547603C 16
+. 7E E8 5A 14 7D 6A B8 50 91 7A 01 4C 4B FF F9 5C
+
+. 0 254759A4 16
+. 80 9D 00 0C 80 7D 00 08 7C 04 18 40 40 81 00 68
+
+. 0 25475A18 16
+. 3B BD 00 18 7F 35 7A 14 7F 99 E8 40 40 9D 01 F8
+
+. 0 25475A28 16
+. 80 9D 00 04 80 1D 00 00 7C 84 00 40 40 85 FF 14
+
+. 0 25475A38 32
+. 81 5A 00 00 7C 80 20 50 80 BD 00 14 38 C0 08 12 81 1D 00 10 7C 6A 02 14 7E 67 9B 78 48 00 CD 45
+
+. 0 25475A58 8
+. 2F 03 FF FF 40 9A FE EC
+
+. 0 254759B4 40
+. 80 DA 00 00 81 71 00 04 7F 26 1A 14 7E E6 22 14 7D 0B CA 14 7C 6B 00 D0 39 48 FF FF 7D 5C 18 38 7F 97 E0 40 40 9C 00 08
+
+. 0 254759DC 12
+. 7E FC BB 78 7C 9C C8 40 40 85 00 2C
+
+. 0 254759E8 12
+. 80 BD 00 14 70 AA 00 02 41 82 08 38
+
+. 0 254759F4 16
+. 7C B9 E0 50 7F 23 CB 78 38 80 00 00 48 00 DD 45
+
+. 0 25483744 16
+. 28 85 00 04 70 67 00 03 7C 66 1B 78 40 85 01 90
+
+. 0 25483754 12
+. 2A 85 00 1F 50 84 44 2E 41 A2 00 24
+
+. 0 25483780 12
+. 7C A0 11 20 50 84 80 1E 40 95 01 98
+
+. 0 2548378C 12
+. 70 C7 00 1C 20 E7 00 20 41 82 00 40
+
+. 0 25483798 24
+. 7C E0 11 20 7C C6 3A 14 7C A7 28 50 28 87 00 10 7C C8 33 78 40 9C 00 0C
+
+. 0 254837B8 4
+. 41 84 00 14
+
+. 0 254837CC 4
+. 40 9D 00 08
+
+. 0 254837D0 20
+. 90 88 FF FC 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+
+. 0 25483980 8
+. 7C 08 02 A6 41 82 FF 9C
+
+. 0 25483988 4
+. 48 01 36 79
+
+. 0 2548398C 24
+. 7D 28 02 A6 81 29 04 F0 81 09 00 00 7C 08 03 A6 28 88 00 00 41 86 FE 44
+
+. 0 254839A4 8
+. 28 88 00 20 41 86 FE BC
+
+. 0 25483864 32
+. 54 A5 06 FE 7C E0 21 20 54 E0 C9 FF 7C 09 03 A6 38 E0 00 20 39 00 FF C0 28 85 00 10 40 9A 00 0C
+
+. 0 2548388C 8
+. 39 20 FF E0 40 99 00 10
+
+. 0 254838A0 8
+. 2A 85 00 00 41 82 00 7C
+
+. 0 254838A8 24
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 80 7C 08 37 EC 7C 09 37 EC 42 00 FF EC
+
+. 0 254838C0 4
+. 4D 96 00 20
+
+. 0 254838C4 4
+. 48 00 00 60
+
+. 0 25483924 8
+. 7C C6 2A 14 41 9F 00 20
+
+. 0 2548392C 4
+. 41 9E 00 24
+
+. 0 25483930 4
+. 41 9D 00 28
+
+. 0 25483958 8
+. 94 86 FF FC 41 84 00 14
+
+. 0 25483970 4
+. 4C 9C 00 20
+
+. 0 25475A04 12
+. 80 BD 00 14 70 A0 00 02 41 82 08 08
+
+. 0 25475A10 8
+. 7F 97 E0 40 41 9D 06 AC
+
+. 0 25475C1C 24
+. 82 A1 00 00 7E 41 93 78 92 A1 00 00 81 3A 00 08 2F 09 00 00 40 9A 02 64
+
+. 0 25475E94 16
+. 82 5A 00 00 7D 29 92 14 91 3A 00 08 4B FF FD 98
+
+. 0 25475C38 12
+. 2E 09 00 00 7D 2A 4B 78 41 92 02 64
+
+. 0 25475C44 16
+. 81 69 00 00 39 1A 00 20 2C 0B 00 00 41 82 00 64
+
+. 0 25475C54 60
+. 3C 60 70 00 3C E0 6F FF 3C A0 6F FF 3F 80 6F FF 3D E0 6F FF 3F A0 6F FF 60 66 00 21 60 E7 FF FF 60 A5 FD FF 63 84 FE 34 61 E3 FE FF 63 BD FF 40 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+
+. 0 25475CA4 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 25475C84 12
+. 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+
+. 0 25475C90 12
+. 7D 2B 38 50 28 89 00 0F 41 85 03 BC
+
+. 0 25475C9C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+. 0 25475CB4 12
+. 81 7A 00 00 2C 8B 00 00 41 86 00 AC
+
+. 0 25475CC0 12
+. 81 28 00 10 2F 09 00 00 41 9A 00 10
+
+. 0 25475CCC 24
+. 81 49 00 04 7C 8A 5A 14 90 89 00 04 81 28 00 0C 2E 09 00 00 41 92 00 10
+
+. 0 25475CE4 24
+. 82 49 00 04 7C D2 5A 14 90 C9 00 04 81 28 00 14 2C 09 00 00 41 82 00 10
+
+. 0 25475CFC 24
+. 81 E9 00 04 7F AF 5A 14 93 A9 00 04 81 28 00 18 2F 89 00 00 41 9E 00 10
+
+. 0 25475D14 24
+. 80 A9 00 04 7F 85 5A 14 93 89 00 04 81 28 00 1C 2C 89 00 00 41 86 00 10
+
+. 0 25475D2C 24
+. 80 E9 00 04 7C 67 5A 14 90 69 00 04 81 28 00 5C 2F 09 00 00 41 9A 00 10
+
+. 0 25475D44 24
+. 80 09 00 04 7E E0 5A 14 92 E9 00 04 81 28 00 C4 2E 09 00 00 41 92 00 10
+
+. 0 25475D5C 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 78 2C 09 00 00 41 82 00 30
+
+. 0 25475DA0 12
+. 81 28 00 98 2F 89 00 00 41 9E 03 54
+
+. 0 25475DAC 16
+. 81 69 00 04 71 60 00 01 91 7A 01 FC 41 82 00 08
+
+. 0 25475DC0 12
+. 81 28 00 74 2C 89 00 00 41 86 00 E0
+
+. 0 25475EA8 8
+. 71 69 00 40 40 82 02 A4
+
+. 0 25475EB0 12
+. 81 3A 01 4C 2F 89 00 00 41 9E 01 CC
+
+. 0 25475EBC 36
+. 82 5A 00 00 7C C9 92 14 90 DA 01 4C 80 BE 04 C8 83 9F 00 A4 81 E5 04 00 7F 9D 78 78 73 AB 00 01 40 82 02 B0
+
+. 0 25475EE0 12
+. 81 3A 02 18 2F 89 00 00 41 9E 00 10
+
+. 0 25475EF8 8
+. 7E 63 9B 78 48 00 C0 F5
+
+. 0 25481FF0 8
+. 38 00 00 06 44 00 00 02
+
+. 0 25481FF8 4
+. 4C A3 00 20
+
+. 0 25475F00 12
+. 81 1E 01 C8 2C 83 00 00 40 A6 FB 68
+
+. 0 25475F0C 48
+. 81 7A 01 80 81 FF 00 9C 55 7D 00 02 69 F3 00 02 21 33 00 00 7E 69 99 14 6F B2 40 00 21 52 00 00 7E 4A 91 14 7E 4A 98 39 3A 60 FF FF 40 82 00 BC
+
+. 0 25475F3C 28
+. 80 7A 01 50 80 FA 00 00 7D 63 3A 14 91 7A 01 50 80 B1 00 00 70 A9 00 40 40 82 02 78
+
+. 0 25475F58 8
+. 7F 43 D3 78 48 00 26 11
+
+. 0 25475F60 8
+. 72 C0 00 08 40 82 00 10
+
+. 0 25475F68 12
+. 81 DA 00 60 2F 0E 00 00 40 9A 02 E0
+
+. 0 25475F74 12
+. 81 1A 01 FC 71 09 00 20 41 82 00 0C
+
+. 0 25475F80 36
+. 81 3E 04 C8 93 49 01 A0 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+
+. 0 25475E50 8
+. 7F 43 D3 78 4B FF F8 50
+
+. 0 254756A4 100
+. 80 A1 00 00 83 45 00 04 81 85 FF B4 7F 48 03 A6 81 C5 FF B8 81 E5 FF BC 7D 80 81 20 82 05 FF C0 82 25 FF C4 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+. 0 25477054 4
+. 4B FF FF 04
+
+. 0 25476F58 96
+. 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+. 0 254734A8 36
+. 80 9E 00 D0 82 E3 01 78 3A 77 00 01 92 63 01 78 38 7F 00 44 6A 7D 00 01 23 BD 00 00 7F B9 01 94 48 00 CE 75
+
+. 0 254734CC 12
+. 7F B9 EB 78 7C 7D 1B 79 40 82 FF 9C
+
+. 0 254734D8 12
+. 80 7E 00 3C 38 80 00 04 48 00 EB 51
+
+. 0 25482030 8
+. 38 00 00 21 44 00 00 02
+
+. 0 25482038 4
+. 4C A3 00 20
+
+. 0 2548203C 4
+. 4B FF F2 F4
+
+. 0 25481330 12
+. 94 21 FF F0 7D 88 02 A6 48 01 5C C9
+
+. 0 2548133C 36
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 3E 05 14 83 C1 00 08 38 21 00 10 90 69 00 00 38 60 FF FF 4E 80 00 20
+
+. 0 254734E4 8
+. 2E 03 00 00 40 92 F2 D0
+
+. 0 254727B8 8
+. 2C 19 00 00 40 82 13 F0
+
+. 0 25473BAC 64
+. 57 36 10 3A 83 A1 00 00 38 D6 00 1E 39 60 00 00 54 C0 00 36 7F 40 00 D0 7F A1 D1 6E 3B 61 00 17 81 2E 01 C4 57 7A 00 36 55 6C 10 3A 39 6B 00 01 7D 2C D1 2E 81 29 00 0C 2F 89 00 00 40 9E FF EC
+
+. 0 25473BEC 4
+. 4B FF EB D4
+
+. 0 254727C0 36
+. 81 7F 00 30 38 E0 00 00 7F 44 D3 78 7F 25 CB 78 69 66 00 03 20 06 00 00 7C C0 31 14 7E 83 A3 78 48 00 79 E5
+
+. 0 2547A1C4 24
+. 94 21 FF 60 7D 28 02 A6 91 E1 00 5C 1D E5 00 0C 91 C1 00 58 48 01 CE 29
+
+. 0 2547A1DC 172
+. 39 CF 00 36 81 41 00 00 55 CC 00 36 93 A1 00 94 7D 0C 00 D0 3B A0 00 00 91 21 00 A4 7F 9D 28 40 92 E1 00 7C 7C 77 1B 78 93 C1 00 98 38 00 00 00 93 E1 00 9C 7C 3F 0B 78 92 01 00 60 7F C8 02 A6 92 21 00 64 7C CF 33 78 92 41 00 68 7C EE 3B 78 92 61 00 6C 92 81 00 70 92 A1 00 74 92 C1 00 78 93 01 00 80 93 21 00 84 93 41 00 88 93 61 00 8C 93 81 00 90 7D 41 41 6E 39 00 00 01 81 23 01 80 38 61 00 17 54 6A 00 36 51 09 C9 4C 39 6A 00 0C 90 0A 00 00 91 6A 00 08 91 37 01 80 91 5F 00 08 92 EA 00 04 91 1F 00 0C 40 9C 00 5C
+
+. 0 2547A288 20
+. 7C A9 03 A6 7D 47 53 78 38 A0 00 00 38 C0 00 01 48 00 00 08
+
+. 0 2547A2A0 60
+. 1E 68 00 0C 57 B5 10 3A 7E 15 20 2E 39 28 00 01 7E 53 3A 14 7C B3 39 2E 3A 92 00 0C 92 12 00 04 92 92 00 08 3B BD 00 01 82 30 01 80 91 3F 00 0C 50 D1 C9 4C 92 30 01 80 42 00 FF C4
+
+. 0 2547A2DC 64
+. 7D 28 4B 78 83 1F 00 08 1E C8 00 0C 82 1E 05 14 3A 20 00 00 7F 1B C3 79 7C F6 C2 14 80 90 00 00 38 A7 FF F4 38 C0 00 00 7C B3 2B 78 90 C5 00 08 92 3F 00 34 90 9F 00 38 92 30 00 00 41 82 02 AC
+
+. 0 2547A31C 32
+. 83 5B 00 04 3B 80 00 01 93 9B 00 00 3B 00 00 00 83 3A 01 58 3A C0 00 00 2C 99 00 00 40 86 00 30
+
+. 0 2547A33C 32
+. 80 7A 01 E8 7F 4B BA 78 31 4B FF FF 7F AA 59 10 21 23 00 00 7C 09 19 14 7C 0B E8 39 41 82 00 10
+
+. 0 2547A368 12
+. 82 BA 00 24 2F 95 00 00 40 9E 00 1C
+
+. 0 2547A38C 44
+. 80 DA 00 34 7F 72 DB 78 83 9A 00 08 82 A6 00 04 91 FF 00 14 91 DF 00 18 81 3C 00 00 92 BF 00 1C 2F 09 00 00 93 5F 00 10 41 9A 05 04
+
+. 0 2547A3B8 16
+. 3F 20 7F FF 63 27 FF FD 90 FF 00 40 48 00 00 BC
+
+. 0 2547A480 8
+. 2F 89 00 01 41 9E FF 44
+
+. 0 2547A3C8 24
+. 80 1C 00 04 38 80 00 24 7F A0 AA 14 7F A3 EB 78 7F B9 EB 78 48 00 8A 25
+
+. 0 2547A3E0 8
+. 2C 83 00 00 40 86 05 88
+
+. 0 2547A3E8 24
+. 80 BE 02 F8 38 7F 00 30 38 9F 00 34 38 DF 00 10 93 BF 00 20 48 00 11 A5
+
+. 0 2547B5A0 12
+. 94 21 FD 60 7D 48 02 A6 48 01 BA 59
+
+. 0 2547B5AC 56
+. 93 C1 02 98 7F C8 02 A6 39 00 00 00 91 41 02 A4 93 E1 02 9C 80 FE 04 C8 91 01 00 14 81 27 01 B4 90 81 02 74 7D 29 03 A6 90 A1 02 78 90 C1 02 7C 90 61 02 70 4E 80 04 21
+
+. 0 2547185C 12
+. 94 21 FF F0 7D 88 02 A6 48 02 57 9D
+
+. 0 25471868 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 7E 00 18 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 2547B5E4 20
+. 80 03 00 00 90 61 02 80 38 61 00 20 90 01 00 08 48 00 72 BD
+
+. 0 254828B0 8
+. 38 80 00 00 48 00 01 DC
+
+. 0 25482A90 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 01 44 C9
+
+. 0 25482B3C 24
+. 7C A8 02 A6 80 A5 04 E4 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+
+. 0 25482CA8 4
+. 4B FF D2 F0
+
+. 0 2547FF98 24
+. 94 21 FF F0 38 00 00 00 90 03 01 C0 38 21 00 10 38 60 00 00 4E 80 00 20
+
+. 0 2547B5F8 20
+. 7C 7F 1B 79 80 61 02 7C 7F EA FB 78 2F 9F FF FF 40 82 00 54
+
+. 0 2547B60C 24
+. 81 21 02 80 38 01 00 10 81 81 02 78 90 09 00 00 7D 89 03 A6 4E 80 04 21
+
+. 0 2547A0BC 48
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 38 A0 00 00 90 81 00 14 80 63 00 00 80 9F 00 10 80 03 01 80 74 09 C0 00 54 06 17 BE 40 82 00 08
+
+. 0 2547A0EC 20
+. 38 C0 00 01 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+
+. 0 25482FA8 20
+. 8C A3 00 01 8C C4 00 01 2C 85 00 00 7C 05 30 00 40 86 FF D4
+
+. 0 25482F8C 8
+. 8C A3 00 01 40 82 00 34
+
+. 0 25482FC4 12
+. 88 A3 FF FF 7C 66 28 50 4E 80 00 20
+
+. 0 2547706C 8
+. 7F E3 FB 78 48 00 BF 61
+
+. 0 25477074 16
+. 83 54 00 00 3A C3 00 01 73 49 00 01 40 82 03 C4
+
+. 0 25477084 8
+. 3B 40 FF FF 41 8E 00 20
+
+. 0 2547708C 28
+. 83 9B 00 94 7F 6C FE 70 7D 9D DA 78 2E 1A FF FF 2F 9C 00 00 7D CC E8 50 40 9E 01 6C
+
+. 0 254770A8 24
+. 2C 9B 00 00 7F 65 FE 70 7C A0 DA 78 7F 7D DB 78 7D C5 00 50 41 86 03 70
+
+. 0 254770C0 12
+. 82 5E 01 9C 39 E0 FF FF 48 00 00 24
+
+. 0 254770EC 24
+. 80 7D 01 8C 3B 9D 01 8C 38 00 00 00 2E 03 FF FF 2F 03 00 00 41 92 00 2C
+
+. 0 2547712C 32
+. 2C 00 00 00 7F 86 E3 78 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 41 82 FF 84
+
+. 0 254770CC 32
+. 2E 1A FF FF 83 BD 01 68 31 3D FF FF 7C C9 E9 10 7D 20 00 26 55 29 9F FE 7D 2B 30 39 41 82 00 B0
+
+. 0 25477198 4
+. 40 92 00 A8
+
+. 0 2547719C 12
+. 80 B5 00 00 2F 85 00 00 41 9E 00 68
+
+. 0 254771A8 40
+. 83 A5 01 80 7C AA DA 78 30 0A FF FF 7D 00 51 10 57 BC 00 02 6F 92 80 00 31 32 FF FF 7D E9 91 10 7D E0 40 39 41 82 00 40
+
+. 0 2547720C 4
+. 40 92 00 34
+
+. 0 25477210 16
+. 80 DE 01 68 80 A6 00 00 2C 85 FF FF 41 86 00 24
+
+. 0 25477220 24
+. 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 4B FF F5 61
+
+. 0 25476794 12
+. 94 21 FF 00 7C 08 02 A6 48 02 08 65
+
+. 0 254767A0 220
+. 93 C1 00 F8 7F C8 02 A6 92 41 00 C8 92 61 00 CC 7D 20 00 26 90 01 01 04 38 00 00 00 82 5E 01 5C 82 7E 01 58 92 01 00 C0 92 21 00 C4 82 13 00 00 82 32 00 00 7D 12 43 78 91 E1 00 BC 7D F0 8A 14 91 C1 00 B8 93 E1 00 FC 7D CF 22 14 7C 3F 0B 78 39 8E 00 1E 90 DF 00 84 55 8B 00 36 81 41 00 00 7C 71 1B 78 92 A1 00 D4 39 E0 00 00 91 21 00 B4 7D 2B 00 D0 82 A6 00 00 92 C1 00 D8 7C 96 23 78 93 41 00 E8 3B 40 FF FF 93 61 00 EC 2E 1A FF FF 93 A1 00 F4 92 81 00 D0 92 E1 00 DC 93 01 00 E0 93 21 00 E4 93 81 00 F0 7D 41 49 6E 38 81 00 17 83 B5 00 00 81 DE 04 F4 54 9B 00 36 90 BF 00 80 90 FF 00 88 90 1F 00 8C 80 6E 00 00 3A 60 00 00 3A E0 00 00 70 69 00 01 40 82 02 5C
+
+. 0 2547687C 20
+. 80 9D 00 0C 7F 63 DB 78 80 BD 00 10 3B 80 00 00 48 00 D1 95
+
+. 0 25476890 8
+. 7C 78 1B 78 40 92 01 A0
+
+. 0 25476898 16
+. 83 3E 01 54 81 99 00 00 7F 9C 60 40 40 9C 01 4C
+
+. 0 254768A8 16
+. 81 7E 02 28 3A 80 00 02 82 0B 00 00 48 00 00 1C
+
+. 0 254768D0 12
+. 80 DD 00 14 2C 86 00 01 41 A6 FF E0
+
+. 0 254768DC 32
+. 81 3E 01 60 57 8A 18 38 7F 03 C3 78 80 A9 00 00 7E 6A 2A 14 7C 8A 28 2E 80 B3 00 04 48 00 D1 29
+
+. 0 254768FC 12
+. 7E 24 8B 78 7E C5 B3 78 48 00 D1 1D
+
+. 0 25476908 16
+. 83 4E 00 00 7E 7B 18 50 73 49 00 01 40 82 01 AC
+
+. 0 25476918 12
+. 7F 63 DB 78 7E 44 93 78 4B FF FB 25
+
+. 0 25481FEC 4
+. 4B FF F3 44
+
+. 0 25476924 20
+. 80 1D 00 14 7C 7A 1B 78 2E 03 FF FF 2F 00 00 00 40 9A 00 0C
+
+. 0 25476938 4
+. 41 92 00 78
+
+. 0 254769B0 28
+. 7C 76 98 50 7F 64 DB 78 7C FB 1A 14 38 BF 00 10 98 07 FF FF 38 60 00 03 48 00 B3 A1
+
+. 0 25481D68 12
+. 94 21 FF 80 7C C8 02 A6 48 01 52 91
+
+. 0 25481D74 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+
+. 0 25481DB4 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C3 7C A4 2B 78 83 5D 00 00 44 00 00 02
+
+. 0 25481DCC 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+
+. 0 25481E98 12
+. 90 7D 00 00 38 60 FF FF 4B FF FF 38
+
+. 0 25481DD8 8
+. 2C 83 FF FF 40 A6 00 44
+
+. 0 25481DE0 12
+. 80 9D 00 00 2F 04 00 26 40 BA 00 38
+
+. 0 25481E20 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+
+. 0 254769CC 8
+. 2F 83 00 00 40 9E 00 14
+
+. 0 254769E4 12
+. 38 80 00 01 90 9D 00 14 4B FF FF 54
+
+. 0 25476940 24
+. 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+
+. 0 254768B8 12
+. 3B 9C 00 01 3B BD 00 04 40 92 01 74
+
+. 0 254768C4 12
+. 81 39 00 00 7C 9C 48 40 40 84 01 24
+
+. 0 254769D4 16
+. 81 1F 00 20 55 00 04 26 2C 80 40 00 41 A6 FF 5C
+
+. 0 2547693C 28
+. 92 9D 00 14 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+
+. 0 254769F0 4
+. 40 92 00 44
+
+. 0 254769F4 8
+. 2F 17 00 00 41 9A 00 1C
+
+. 0 254769FC 16
+. 83 BE 05 14 81 3D 00 00 2F 89 00 02 41 9E 00 0C
+
+. 0 25476A14 16
+. 87 B5 00 04 7D EF BB 78 2C 9D 00 00 40 86 FE 48
+
+. 0 25476868 20
+. 80 6E 00 00 3A 60 00 00 3A E0 00 00 70 69 00 01 40 82 02 5C
+
+. 0 25483DE0 28
+. 80 04 00 00 38 84 FF F8 81 24 00 0C 38 63 FF F4 38 A5 00 02 90 03 00 0C 4B FF FF 34
+
+. 0 25483D2C 48
+. 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25483DC4 28
+. 81 24 00 00 38 84 FF F4 80 04 00 10 38 63 FF F0 38 A5 00 03 91 23 00 10 4B FF FF 58
+
+. 0 25483D34 40
+. 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+. 0 25476A24 8
+. 2E 0F 00 00 41 92 02 6C
+
+. 0 25476A2C 8
+. 38 60 FF FF 48 00 00 2C
+
+. 0 25476A5C 100
+. 81 01 00 00 81 E8 00 04 80 88 FF B4 7D E8 03 A6 81 C8 FF B8 81 E8 FF BC 7C 80 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 25477238 32
+. 2E 03 FF FF 7C 7A 1B 78 7D 6E 00 D0 7C E0 00 26 54 E7 9F FE 55 6E 0F FE 7C EB 70 39 41 82 00 44
+
+. 0 25477258 20
+. 80 1B 01 E0 3B BB 01 E0 39 20 00 00 2F 00 FF FF 41 9A 00 28
+
+. 0 25477290 8
+. 2C 09 00 00 40 82 00 DC
+
+. 0 25477298 4
+. 40 92 00 8C
+
+. 0 2547729C 8
+. 2F 17 00 00 40 9A 01 E4
+
+. 0 254772A4 8
+. 7F E3 FB 78 48 00 09 49
+
+. 0 25477BF0 12
+. 94 21 FF C0 7C 88 02 A6 48 01 F4 09
+
+. 0 25477BFC 88
+. 93 C1 00 38 7F C8 02 A6 92 61 00 0C 90 81 00 44 7D 80 00 26 92 E1 00 1C 7C 77 1B 78 82 7E 04 F4 92 81 00 10 80 13 00 00 92 A1 00 14 70 09 00 01 92 C1 00 18 93 01 00 20 93 21 00 24 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 93 E1 00 3C 91 81 00 08 40 82 03 7C
+
+. 0 25477C54 16
+. 82 DE 02 54 80 76 00 00 2F 83 00 00 41 9E 00 FC
+
+. 0 25477D5C 16
+. 80 7E 02 60 38 A0 00 01 80 9E 02 5C 48 00 42 45
+
+. 0 2547BFAC 52
+. 7C 08 02 A6 94 21 FF 60 93 81 00 90 7C 9C 23 78 38 80 00 00 93 61 00 8C 93 A1 00 94 7C BD 2B 78 93 E1 00 9C 3B 60 FF FF 93 C1 00 98 90 01 00 A4 48 00 60 05
+
+. 0 2547BFE0 20
+. 7C 7F 1B 79 38 A1 00 10 38 60 00 03 7F E4 FB 78 41 80 00 40
+
+. 0 2547BFF4 4
+. 48 00 5E B1
+
+. 0 2547BFF8 8
+. 2F 83 00 00 41 9C 00 2C
+
+. 0 2547C000 40
+. 81 21 00 44 7F A5 EB 78 38 C0 00 02 7F E7 FB 78 2C 89 00 00 7D 24 4B 78 39 00 00 00 38 60 00 00 91 3C 00 00 40 86 00 34
+
+. 0 2547C058 4
+. 48 00 67 41
+
+. 0 2547C05C 8
+. 7C 7B 1B 78 4B FF FF C8
+
+. 0 2547C028 8
+. 7F E3 FB 78 48 00 5F C5
+
+. 0 2547C030 40
+. 80 81 00 A4 7F 63 DB 78 83 81 00 90 83 61 00 8C 7C 88 03 A6 83 A1 00 94 83 C1 00 98 83 E1 00 9C 38 21 00 A0 4E 80 00 20
+
+. 0 25477D6C 12
+. 2E 03 FF FF 7C 7F 1B 78 41 92 01 54
+
+. 0 25477D78 16
+. 81 3E 02 5C 83 A9 00 00 28 9D 00 10 40 85 00 E8
+
+. 0 25477D88 12
+. 80 9E 02 64 38 A0 00 0B 48 00 B5 09
+
+. 0 25477D94 8
+. 2F 03 00 00 40 9A 00 D4
+
+. 0 25477D9C 40
+. 81 5F 00 0C 93 F6 00 00 1D 0A 00 0C 83 9E 02 58 38 E8 00 17 54 E5 00 38 38 C5 00 30 7C 7F 2A 14 7E 06 E8 40 41 91 00 1C
+
+. 0 25477DC4 16
+. 80 9E 02 68 38 A0 00 14 90 7C 00 00 48 00 B4 C9
+
+. 0 254832B8 12
+. 54 80 07 BE 2C 00 00 00 41 82 00 2C
+
+. 0 254832EC 12
+. 55 69 07 BE 2F 09 00 00 40 9A 00 C0
+
+. 0 254832F8 24
+. 54 A8 F0 BE 7D 6A 5B 78 55 00 07 BE 7C 89 23 78 2F 80 00 01 41 9E 01 50
+
+. 0 2548345C 36
+. 80 6B 00 00 39 08 FF FF 80 C4 00 00 39 4B 00 04 39 24 00 04 7F 03 30 00 80 EA 00 00 80 09 00 00 41 9A 00 C8
+
+. 0 25483544 16
+. 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+
+. 0 25483554 16
+. 7C 03 30 00 80 EA 00 08 80 09 00 08 41 A2 FD D4
+
+. 0 25483334 16
+. 7C 87 00 00 80 6A 00 0C 80 C9 00 0C 41 86 01 48
+
+. 0 25483488 20
+. 39 08 FF FC 39 4A 00 10 2F 08 00 00 39 29 00 10 40 BA FF D8
+
+. 0 2548349C 24
+. 7F 83 30 00 7C 03 30 10 7C 00 01 10 7C 00 03 B8 60 00 00 01 40 9E FE A4
+
+. 0 254834B4 8
+. 38 00 00 00 4B FF FE 9C
+
+. 0 25483354 12
+. 2F 80 00 00 7C 03 03 78 40 9E 00 40
+
+. 0 25483360 24
+. 54 BC 00 3A 54 A5 07 BE 7C 84 E2 14 7D 6B E2 14 2C 05 00 00 41 82 00 24
+
+. 0 25477DD4 8
+. 2C 03 00 00 41 A2 FE 8C
+
+. 0 25477C64 16
+. 81 56 00 00 38 60 00 00 2F 0A FF FF 41 9A 00 94
+
+. 0 25477C74 20
+. 83 1E 02 58 3A A0 00 00 83 78 00 00 2E 1B FF FF 41 92 01 64
+
+. 0 25477C88 36
+. 83 FB 00 14 83 9E 02 5C 3B 5F FF FF 7F 95 D0 00 83 BC 00 00 3B 80 00 00 7E CA EA 14 7F 3B B0 50 41 9D 00 4C
+
+. 0 25477CAC 44
+. 7C 7C D2 14 80 98 00 00 7C 7D 0E 70 7F BD 01 94 7E E3 BB 78 1F FD 00 18 7D 9F 22 14 80 0C 00 34 7C 00 C8 40 7C 9B 02 14 40 80 00 20
+
+. 0 25477CD8 4
+. 4B FF FD FD
+
+. 0 25477AD4 20
+. 94 21 FF F0 7C 67 1B 78 89 43 00 00 2F 8A 00 00 41 9E 00 DC
+
+. 0 25477AE8 20
+. 88 64 00 00 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+
+. 0 25477B84 16
+. 39 03 FF D0 7C 60 1B 78 28 08 00 09 40 81 00 48
+
+. 0 25477B94 16
+. 55 43 06 3E 54 00 06 3E 7C 83 00 00 40 86 00 44
+
+. 0 25477BA4 16
+. 8D 47 00 01 8C 64 00 01 2F 0A 00 00 40 9A FF 3C
+
+. 0 25477AEC 16
+. 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+
+. 0 25477BE4 12
+. 7C 60 18 50 38 21 00 10 4E 80 00 20
+
+. 0 25477CDC 8
+. 2F 83 00 00 41 9E 02 14
+
+. 0 25477CE4 4
+. 40 9C 00 6C
+
+. 0 25477D50 12
+. 3B 5D FF FF 7F 9C D0 00 4B FF FF 98
+
+. 0 25477CF0 4
+. 40 9D FF BC
+
+. 0 25477CE8 12
+. 3B 9D 00 01 7F 9C D0 00 40 9D FF BC
+
+. 0 25477AFC 16
+. 38 A3 FF D0 7C 69 1B 78 28 85 00 09 41 85 00 C4
+
+. 0 25477B0C 36
+. 8D 47 00 01 55 6B 06 3E 55 29 06 3E 39 0B FF D0 38 6A FF D0 39 69 FF D0 2B 83 00 09 38 84 00 01 41 9D 00 20
+
+. 0 25477B4C 8
+. 88 64 00 00 48 00 00 14
+
+. 0 25477B64 12
+. 38 A3 FF D0 2B 05 00 09 40 99 FF E8
+
+. 0 25477B70 8
+. 7F 88 58 00 41 BE 00 38
+
+. 0 25477BAC 8
+. 2F 0A 00 00 40 9A FF 3C
+
+. 0 25477BB4 12
+. 7C 63 50 50 38 21 00 10 4E 80 00 20
+
+. 0 25477EF4 16
+. 2F 9D 00 00 7F B6 EB 78 7F FC FB 78 41 BD 00 20
+
+. 0 25477F20 32
+. 80 D8 00 00 7E E3 BB 78 3B 9C FF E8 7D 3F 32 14 80 A9 00 1C 7C 85 C8 40 7C 9B 2A 14 41 84 FF CC
+
+. 0 25477F08 4
+. 4B FF FB CD
+
+. 0 25477F0C 8
+. 2F 03 00 00 40 9A 00 30
+
+. 0 25477F40 20
+. 7E 1D B0 00 80 F8 00 00 7F FF 3A 14 3B 9F 00 30 40 91 00 24
+
+. 0 25477F74 36
+. 81 1F 00 30 69 1F 00 01 21 3F 00 00 7F E9 F9 14 69 14 00 03 21 54 00 00 7E 8A A1 14 7F EB A3 79 41 82 00 24
+
+. 0 25477F98 12
+. 80 FC 00 08 7F 07 C8 40 40 98 00 18
+
+. 0 25477FA4 8
+. 2E 15 00 00 41 92 01 28
+
+. 0 254780D0 12
+. 81 33 00 08 2C 89 00 00 41 86 00 10
+
+. 0 254780DC 12
+. 81 7C 00 0C 7C 0B 48 40 41 81 FE D4
+
+. 0 254780E8 44
+. 80 D3 00 3C 3A 80 00 00 80 7C 00 10 66 89 80 00 80 1C 00 14 7D 25 48 F8 7C CC 30 F8 7C 64 28 38 7C 1C 60 38 7C 80 E3 79 40 82 FE A8
+
+. 0 25478114 16
+. 83 F3 00 38 7E A7 DA 14 7F 1F 40 00 40 9A FE 98
+
+. 0 25478124 4
+. 4B FF FB D0
+
+. 0 25477CF4 12
+. 82 F3 00 00 72 EB 00 01 40 82 01 AC
+
+. 0 25477D00 80
+. 7E A3 AB 78 82 61 00 44 80 E1 00 08 7E 68 03 A6 82 81 00 10 82 61 00 0C 7C E0 81 20 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+
+. 0 254772AC 12
+. 7C 72 1B 79 7F 80 00 26 41 82 00 20
+
+. 0 254772B8 4
+. 41 8E 01 7C
+
+. 0 254772BC 16
+. 7F 69 DB 78 83 A9 01 FC 73 A9 08 00 40 82 01 D4
+
+. 0 254772CC 8
+. 7F 88 01 20 40 82 00 C8
+
+. 0 25477398 12
+. 7E 43 93 78 38 81 00 18 4B FF F0 A5
+
+. 0 254765B4 12
+. 80 FD 00 10 2F 07 00 20 40 9A FF A0
+
+. 0 254765C0 12
+. 81 5D 00 1C 2B 8A 00 03 40 BD FF 94
+
+. 0 254765CC 44
+. 81 9D 00 04 38 A0 00 00 81 18 00 00 3A BF 00 10 38 0C 00 20 7D 6C C2 14 7C 00 40 40 7F 23 CB 78 7D 84 63 78 3B 6B 00 04 41 81 00 FC
+
+. 0 254765F8 16
+. 80 9E 01 70 7F 63 DB 78 38 A0 00 10 48 00 CC 95
+
+. 0 25483310 8
+. 28 00 00 01 41 80 02 28
+
+. 0 2548353C 24
+. 80 EB 00 00 80 04 00 00 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+
+. 0 25476608 8
+. 2F 03 00 00 41 9A 00 84
+
+. 0 25476690 40
+. 81 3B 00 10 8B 5B 00 17 8B BB 00 1B 2F 89 00 00 57 55 40 2E 8B 1B 00 1F 7F 95 EA 14 57 96 40 2E 7C 16 C2 14 40 9E 00 1C
+
+. 0 254766B8 16
+. 80 7E 04 F4 81 23 00 08 2C 09 00 00 41 A2 FE AC
+
+. 0 254766C8 8
+. 7C 89 00 40 40 A4 FE A4
+
+. 0 254773A4 12
+. 2E 03 FF FF 7C 7A 1B 78 41 B2 FF 2C
+
+. 0 254773B0 8
+. 7E 43 93 78 48 00 BC 1D
+
+. 0 254773B8 12
+. 3B A3 00 01 7F A3 EB 78 48 02 06 65
+
+. 0 254773C4 12
+. 38 00 00 00 2F 03 00 00 41 9A 00 14
+
+. 0 254773D0 12
+. 7E 44 93 78 7F A5 EB 78 48 00 C8 09
+
+. 0 25483C74 20
+. 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+. 0 254773DC 16
+. 7C 60 1B 78 2F 80 00 00 90 01 02 28 40 9E FE EC
+
+. 0 254772D4 4
+. 40 92 00 50
+
+. 0 25477324 12
+. 82 F4 00 00 72 E9 00 01 41 82 FA D8
+
+. 0 254758A8 28
+. 54 EF 28 34 80 1D 00 08 7D CF DA 14 3B BD 00 20 7C 8E E8 40 90 1A 01 4C 41 85 FF 4C
+
+. 0 25475830 8
+. 7F 09 00 40 41 99 02 4C
+
+. 0 25475A80 16
+. 3D 40 64 74 61 4F E5 52 7F 89 78 00 40 9E FD 6C
+
+. 0 25475A90 20
+. 81 1D 00 08 91 1A 02 34 80 DD 00 14 90 DA 02 38 4B FF FD 58
+
+. 0 25483974 12
+. 90 86 FF FC 90 86 FF F8 4E 80 00 20
+
+. 0 25476054 24
+. 55 6C 08 3C 7D 80 0E 70 7C 17 00 F8 20 00 00 31 2B 17 00 02 40 B9 FC 38
+
+. 0 2547606C 12
+. 7F 2B 28 50 2A 19 00 0B 41 91 00 C4
+
+. 0 25476078 12
+. 7C 0B 20 50 54 00 10 3A 4B FF FC 24
+
+. 0 25475D68 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+
+. 0 254760FC 8
+. 81 7A 01 FC 4B FF FC C0
+
+. 0 25475F88 28
+. 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+
+. 0 2547A100 24
+. 90 7F 00 14 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+
+. 0 2547B624 56
+. 38 60 00 00 81 61 00 08 80 C1 02 80 80 A1 02 70 80 81 02 74 93 E5 00 00 91 66 00 00 93 E4 00 00 81 41 02 A4 83 C1 02 98 83 E1 02 9C 7D 48 03 A6 38 21 02 A0 4E 80 00 20
+
+. 0 2547A400 12
+. 83 3F 00 34 2F 19 00 00 40 9A 01 AC
+
+. 0 2547A40C 20
+. 81 5F 00 24 80 8A 01 80 54 89 3F BE 2F 89 00 00 40 9E 00 44
+
+. 0 2547A420 72
+. 80 01 00 00 38 C0 00 01 80 FF 00 0C 94 01 FF E0 3A 81 00 17 56 85 00 36 91 25 00 08 91 25 00 00 39 27 00 01 90 B3 00 08 91 45 00 04 82 6A 01 80 91 3F 00 0C 50 D3 C9 4C 92 6A 01 80 7C B3 2B 78 2C 98 00 00 41 86 00 10
+
+. 0 2547A474 12
+. 85 3C 00 08 2C 09 00 00 41 82 04 3C
+
+. 0 25482F94 12
+. 8C C4 00 01 2C 85 00 00 41 86 00 20
+
+. 0 25475838 8
+. 2C 09 00 07 40 82 FF BC
+
+. 0 25475840 12
+. 80 1D 00 14 2F 00 00 00 41 BA FF B0
+
+. 0 2547584C 24
+. 90 1A 02 20 81 7D 00 1C 91 7A 02 24 81 3D 00 1C 2F 89 00 00 41 9E 07 EC
+
+. 0 25475864 52
+. 80 BD 00 08 39 29 FF FF 7C A0 48 38 90 1A 02 28 81 DA 01 80 3C C0 40 00 81 5D 00 10 55 C8 00 02 7C 08 30 00 91 5A 02 1C 80 9D 00 08 90 9A 02 18 40 82 0A 24
+
+. 0 25475898 4
+. 48 00 83 7D
+
+. 0 2547DC14 12
+. 94 21 FF F0 7D 88 02 A6 48 01 93 E5
+
+. 0 2547DC20 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 DE 04 C8 88 06 04 0C 2F 80 00 00 40 9E 00 20
+
+. 0 2547DC3C 28
+. 81 46 04 08 39 0A 00 01 91 06 04 08 7D 03 43 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 2547589C 12
+. A0 FA 01 54 90 7A 02 30 4B FF FF 54
+
+. 0 254759E0 8
+. 7C 9C C8 40 40 85 00 2C
+
+. 0 254837B0 12
+. 90 88 FF FC 94 88 FF F8 41 84 00 14
+
+. 0 254837D4 16
+. 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+
+. 0 25483884 16
+. 7C 00 37 EC 38 C6 00 20 39 20 FF E0 40 99 00 10
+
+. 0 254760C0 28
+. 80 BD 00 14 7F 83 E3 78 7C 9C B8 50 38 C0 00 32 38 E0 FF FF 39 00 00 00 48 00 C6 C1
+
+. 0 254760DC 8
+. 2C 83 FF FF 40 86 F9 38
+
+. 0 25475D74 16
+. 80 09 00 04 70 0A 00 02 90 1A 02 00 41 82 00 08
+
+. 0 25475D88 8
+. 70 0B 00 04 41 82 00 08
+
+. 0 25475D94 8
+. 70 0A 00 08 41 82 00 08
+
+. 0 25475D9C 16
+. 91 28 00 60 81 28 00 98 2F 89 00 00 41 9E 03 54
+
+. 0 25475DBC 16
+. 91 28 00 60 81 28 00 74 2C 89 00 00 41 86 00 E0
+
+. 0 25475EEC 20
+. 80 DA 00 00 7D 49 32 14 91 5A 02 18 7E 63 9B 78 48 00 C0 F5
+
+. 0 2547A488 12
+. 80 7F 00 40 7C 09 18 00 41 82 00 14
+
+. 0 2547A494 16
+. 3D 40 7F FF 61 48 FF FF 7F 09 40 00 40 9A FF D4
+
+. 0 2547A8B8 8
+. 2F 98 00 00 41 9E 00 5C
+
+. 0 2547A918 16
+. 80 1B 00 00 2F 1B 00 00 2C 00 00 00 41 82 00 1C
+
+. 0 2547A928 12
+. 83 7B 00 08 2F 1B 00 00 41 BA FC 94
+
+. 0 2547A934 12
+. 80 1B 00 00 2C 00 00 00 40 82 FF EC
+
+. 0 2547A940 4
+. 40 9A F9 DC
+
+. 0 2547A35C 12
+. A1 3A 01 56 2F 09 00 00 40 9A 05 E4
+
+. 0 2547A948 36
+. 55 34 10 3A 81 81 00 00 3A 54 00 1E 56 49 03 76 7D 49 00 D0 7D 81 51 6E 39 01 00 17 55 18 00 36 4B FF FA 00
+
+. 0 2547A0F0 16
+. 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+
+. 0 25482F10 24
+. 84 C4 00 04 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+
+. 0 25482F28 8
+. 7D 00 28 38 41 86 00 2C
+
+. 0 25482F30 16
+. 7C 00 42 14 7C AA 32 79 7D 29 00 78 41 80 00 34
+
+. 0 25482F40 24
+. 7D 4A 00 34 7D 29 00 34 39 29 00 07 7C 89 50 00 7C 66 28 50 4C A4 00 20
+
+. 0 25482F58 8
+. 38 60 00 00 4E 80 00 20
+
+. 0 2547C750 32
+. 80 81 00 24 38 60 00 01 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25476F54 100
+. 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+. 0 2547A460 8
+. 2C 98 00 00 41 86 00 10
+
+. 0 2547A468 24
+. 56 CB 10 3A 3A D6 00 01 7D 4B C1 2E 85 3C 00 08 2C 09 00 00 41 82 04 3C
+
+. 0 2547A8C0 28
+. 3B B6 00 01 56 DC 10 3A 3A 40 00 00 57 B6 18 38 7E 5C C1 2E 38 76 00 04 48 01 D1 4D
+
+. 0 2547A8DC 16
+. 2C 83 00 00 7C 69 1B 78 90 7A 01 E8 41 86 05 C4
+
+. 0 2547A8EC 24
+. 57 B5 10 3A 7F 04 C3 78 7E A5 AB 78 93 49 00 00 38 63 00 04 48 00 92 E1
+
+. 0 2547A904 20
+. 80 9A 01 E8 7E A5 AB 78 7F 55 22 14 38 7A 00 04 48 00 92 CD
+
+. 0 25476ED0 12
+. 80 9D 00 1C 3B 60 00 00 48 00 00 20
+
+. 0 25476EF8 8
+. 2F 04 00 00 40 9A FF E0
+
+. 0 25476EDC 16
+. 7C 9B 23 78 80 84 00 00 7F 83 E3 78 48 00 BF F9
+
+. 0 25476EEC 8
+. 2F 83 00 00 41 9E 00 58
+
+. 0 25476EF4 12
+. 80 9B 00 04 2F 04 00 00 40 9A FF E0
+
+. 0 25476F00 8
+. 7F 83 E3 78 48 00 C0 CD
+
+. 0 25476F08 16
+. 7C 66 1B 78 38 63 00 0D 3B 46 00 01 48 02 0B 11
+
+. 0 25476F18 16
+. 7C 7F 1B 79 80 DE 01 AC 7F 84 E3 78 41 82 01 3C
+
+. 0 25476F28 16
+. 7F 84 E3 78 7F 45 D3 78 38 7F 00 0C 48 00 CC AD
+
+. 0 25476F38 128
+. 92 DF 00 04 90 7F 00 00 92 DF 00 08 93 FB 00 04 81 1D 01 80 65 07 00 80 90 FD 01 80 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+. 0 2547A374 12
+. 80 9A 00 F0 2C 84 00 00 40 86 00 10
+
+. 0 2547A380 12
+. 80 BA 00 E8 2C 05 00 00 41 82 05 30
+
+. 0 2547A5C4 32
+. 83 10 00 00 82 7F 00 38 21 78 00 00 7D CB C1 14 31 53 FF FF 7F 6A 99 10 7D C8 D8 39 41 82 00 0C
+
+. 0 2547A5E4 20
+. 81 1F 00 38 91 10 00 00 80 77 01 E8 2F 03 00 00 41 9A 00 18
+
+. 0 2547A60C 16
+. 80 DF 00 0C 54 C5 18 38 38 65 00 04 48 01 D4 0D
+
+. 0 2547A61C 12
+. 2C 83 00 00 90 77 01 E8 41 86 08 9C
+
+. 0 2547A628 44
+. 80 FF 00 08 38 A0 00 00 82 9F 00 0C 2C 07 00 00 56 9D 10 3A 92 97 01 5C 7D 23 EA 14 90 BF 00 0C 38 89 00 04 90 97 01 58 41 82 00 50
+
+. 0 2547A654 12
+. 2F 0F 00 00 38 C0 00 00 40 9A 01 F8
+
+. 0 2547A660 56
+. 81 7F 00 0C 80 07 00 04 83 37 01 58 55 76 10 3A 39 8B 00 01 7C 16 C9 2E 81 E7 00 04 80 6F 01 80 91 9F 00 0C 50 C3 C9 4C 90 6F 01 80 80 E7 00 08 2F 07 00 00 40 9A FF CC
+
+. 0 2547A698 24
+. 80 BF 00 0C 80 97 01 58 83 9E 04 F4 80 FC 00 00 70 E8 04 00 40 82 04 E0
+
+. 0 2547A6B0 16
+. 80 D7 01 F0 38 60 00 00 2B 86 00 00 40 9D 00 48
+
+. 0 2547A704 16
+. 80 77 01 E8 54 A5 10 3A 3B 00 00 01 48 00 94 D1
+
+. 0 2547A714 12
+. 80 FF 00 0C 7C 18 38 40 40 80 03 2C
+
+. 0 2547A720 32
+. 81 57 01 E8 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+
+. 0 2547A754 12
+. 3B 79 00 01 7C 9B 38 40 40 84 00 7C
+
+. 0 2547A760 20
+. 57 68 10 3A 7E 68 50 2E 81 73 01 E8 2F 0B 00 00 41 9A 00 5C
+
+. 0 2547A774 12
+. 81 2B 00 00 2F 89 00 00 41 9E 00 50
+
+. 0 2547A780 12
+. 7E 19 D8 50 56 05 10 3A 48 00 00 10
+
+. 0 2547A798 12
+. 7C 09 D0 00 39 6B 00 04 40 82 FF EC
+
+. 0 2547A78C 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 38
+
+. 0 2547A7CC 12
+. 3B 7B 00 01 7C 9B 38 40 41 84 FF 8C
+
+. 0 2547A7D8 12
+. 3B 18 00 01 7F 18 38 40 41 98 FF 44
+
+. 0 2547A724 28
+. 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+
+. 0 2547A740 20
+. 3B 39 00 01 57 2E 10 3A 7F 6E 50 2E 7F 9B D0 00 40 9E FF F0
+
+. 0 2547A7E4 20
+. 2F 91 00 00 54 F7 10 3A 3B 40 00 00 7F 57 51 2E 40 9E 06 DC
+
+. 0 2547A7F8 92
+. 81 01 00 00 82 28 00 04 81 C8 FF B8 7E 28 03 A6 81 E8 FF BC 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 254727E4 12
+. 80 F4 01 5C 2C 87 00 00 41 86 00 3C
+
+. 0 254727F0 56
+. 81 14 01 58 7C E9 03 A6 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+
+. 0 254727F8 48
+. 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+
+. 0 25472828 24
+. 81 6E 01 C8 80 EE 01 C4 90 EB 00 0C 81 2E 01 C4 2E 09 00 00 41 92 00 08
+
+. 0 25472840 16
+. 91 69 00 10 81 4E 03 30 2B 0A 00 01 40 99 13 58
+
+. 0 25472850 28
+. 81 54 01 58 39 0E 01 B8 39 20 00 01 39 60 00 04 80 6A 00 04 7F 83 40 00 41 9E 00 18
+
+. 0 2547286C 20
+. 39 29 00 01 55 2B 10 3A 7C 8B 50 2E 7F 84 40 00 40 9E FF F0
+
+. 0 25472880 24
+. 81 1F 00 30 7D 4B 52 14 81 6A FF FC 2C 08 00 00 91 6E 01 C8 40 82 0F 34
+
+. 0 25472898 24
+. 82 74 01 5C 3A E9 00 01 38 00 00 00 7D C9 73 78 7F 97 98 40 40 9C 00 08
+
+. 0 254728B4 28
+. 90 09 01 C4 82 0E 01 C8 38 0E 01 B8 90 10 00 0C 81 2E 01 C4 2C 89 00 00 41 86 00 08
+
+. 0 254728D4 44
+. 80 7E 00 2C 69 1A 00 03 20 1A 00 00 7F 40 D1 14 80 9E 00 14 21 48 00 00 7F AA 41 14 38 BF 00 58 93 BF 00 58 93 5F 00 5C 48 00 8D 9D
+
+. 0 2547B698 12
+. 94 21 FF D0 7C C8 02 A6 48 01 B9 61
+
+. 0 2547B6A4 68
+. 93 C1 00 28 7F C8 02 A6 93 61 00 1C 7C BB 2B 78 90 C1 00 34 92 E1 00 0C 7C 97 23 78 80 BE 04 C8 93 21 00 14 7C 79 1B 78 81 25 01 B4 93 41 00 18 93 81 00 20 7D 29 03 A6 93 A1 00 24 83 9E 03 1C 4E 80 04 21
+
+. 0 2547B6E8 36
+. 38 80 00 00 7C 7D 1B 78 7E E8 03 A6 83 5D 00 00 7F 63 DB 78 90 9D 00 00 83 7C 00 00 93 3C 00 00 4E 80 00 21
+
+. 0 254717D4 12
+. 94 21 FF E0 7C 08 02 A6 48 02 58 25
+
+. 0 254717E0 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 7C 7F 1B 78 80 BF 00 04 38 80 00 01 81 3E 04 C8 93 A1 00 14 80 69 00 00 48 00 B7 C1
+
+. 0 2547CFC8 56
+. 94 21 FF E0 7C 08 02 A6 93 E1 00 1C 7C 7F 1B 79 93 41 00 08 7C 9A 23 78 93 61 00 0C 7C BB 2B 78 93 81 00 10 3B 80 00 00 93 A1 00 14 93 C1 00 18 90 01 00 24 40 A2 00 18
+
+. 0 2547D014 28
+. 80 DF 01 80 7F E3 FB 78 7F 44 D3 78 7F 65 DB 78 74 C9 00 40 3B A0 00 00 40 82 FF D8
+
+. 0 2547D030 4
+. 4B FF FB 41
+
+. 0 2547CB70 16
+. 7C 08 02 A6 94 21 FF 60 7D 80 00 26 48 01 A4 85
+
+. 0 2547CB80 116
+. 92 01 00 60 3A 00 00 00 90 01 00 A4 81 23 00 34 92 A1 00 74 7C 95 23 78 2F 89 00 00 92 C1 00 78 93 01 00 80 3A C0 00 00 93 61 00 8C 3B 00 00 00 93 C1 00 98 7C 7B 1B 78 93 E1 00 9C 7F C8 02 A6 92 21 00 64 7C 3F 0B 78 92 41 00 68 38 60 00 00 92 61 00 6C 92 81 00 70 92 E1 00 7C 93 21 00 84 93 41 00 88 93 81 00 90 93 A1 00 94 91 81 00 5C 41 9E 01 80
+
+. 0 2547CBF4 20
+. 82 5B 00 AC 83 29 00 04 2D 92 00 00 82 3B 00 B4 41 8E 01 28
+
+. 0 2547CC08 36
+. 81 7B 00 00 2E 05 00 00 81 32 00 04 82 9E 04 C8 7C 8B 4A 2E 7E EB 4A 14 82 7E 04 D4 2C 04 00 01 40 82 02 D0
+
+. 0 2547CC2C 24
+. 83 5B 00 18 83 97 00 04 1E 1A 00 18 7F 5C CA 14 7F B0 A0 2E 48 00 00 1C
+
+. 0 2547CC5C 8
+. 2F 1D 00 00 40 9A FF E4
+
+. 0 2547CC44 12
+. 7F A4 EB 78 7F 43 D3 78 4B FF FA 89
+
+. 0 2547CC50 8
+. 2F 83 00 00 40 9E 02 20
+
+. 0 2547CC58 12
+. 83 BD 00 0C 2F 1D 00 00 40 9A FF E4
+
+. 0 2547CE74 8
+. 7F BC EB 78 41 92 FE 30
+
+. 0 2547CCA8 36
+. 80 97 00 08 7F A4 BA 14 7E 7A 9B 78 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+
+. 0 2547CCCC 36
+. 80 DA 00 00 80 66 00 00 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+
+. 0 2547C770 12
+. 94 21 FF 20 7D 48 02 A6 48 01 A8 89
+
+. 0 2547C77C 116
+. 93 C1 00 D8 7F C8 02 A6 92 61 00 AC 91 41 00 E4 3A 60 00 00 81 66 00 34 81 3E 04 F4 92 C1 00 B8 7C F6 3B 78 80 09 00 00 92 E1 00 BC 7D 17 43 78 70 09 00 10 93 01 00 C0 93 21 00 C4 7C 78 1B 78 93 61 00 CC 7C D9 33 78 93 81 00 D0 7C 9B 23 78 93 E1 00 DC 7C BC 2B 78 92 81 00 B0 7C 3F 0B 78 92 A1 00 B4 93 41 00 C8 93 A1 00 D4 83 4B 00 04 40 82 02 30
+
+. 0 2547C7F0 12
+. 81 39 00 B4 2C 89 00 00 41 86 02 64
+
+. 0 2547C7FC 36
+. 81 69 00 04 3A 9F 00 08 80 19 00 00 82 BE 03 58 7F A0 5A 14 A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+
+. 0 2547C820 12
+. 80 9D 00 08 7C 84 D8 00 41 86 00 B4
+
+. 0 2547C82C 16
+. 80 FD 00 10 2F 87 00 00 7F BD 3A 14 40 9E FF D8
+
+. 0 2547C810 16
+. A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+
+. 0 2547C8DC 16
+. 80 BD 00 0C 7C C5 E8 2E 7C 86 D2 14 48 00 65 F9
+
+. 0 2547C8EC 8
+. 2F 03 00 00 40 BA FF 3C
+
+. 0 2547C8F4 76
+. 38 60 00 00 80 C1 00 00 80 E6 00 04 82 66 FF CC 82 86 FF D0 7C E8 03 A6 82 A6 FF D4 82 C6 FF D8 82 E6 FF DC 83 06 FF E0 83 26 FF E4 83 46 FF E8 83 66 FF EC 83 86 FF F0 83 A6 FF F4 83 C6 FF F8 83 E6 FF FC 7C C1 33 78 4E 80 00 20
+
+. 0 2547CCF0 20
+. A1 7D 00 06 7E D6 1B 78 55 60 04 7E 7F 00 C0 40 40 99 00 08
+
+. 0 2547CD04 20
+. 7C 18 03 78 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+
+. 0 2547CD18 12
+. 80 17 00 0C 2C 00 00 00 41 82 00 0C
+
+. 0 2547CD24 8
+. 7E F7 02 14 4B FF FF 04
+
+. 0 2547CD08 16
+. 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+
+. 0 2547CD2C 8
+. 2E 11 00 00 41 92 00 34
+
+. 0 2547CD64 8
+. 2F 98 00 00 40 9E 00 64
+
+. 0 2547CDCC 20
+. 3B B8 00 01 38 80 00 10 7F A3 EB 78 3A 00 00 0C 48 01 AC 51
+
+. 0 2547CDE0 20
+. 80 DE 03 78 2C 03 00 00 7C 65 1B 78 90 7B 01 88 41 82 01 AC
+
+. 0 2547CDF4 20
+. 83 9B 00 E4 93 BB 01 84 83 1C 00 04 93 1B 01 98 41 8E 00 88
+
+. 0 2547CE08 88
+. 83 52 00 04 82 5B 00 00 7C D2 D2 14 81 86 00 08 7C EC 32 14 A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+. 0 2547CE60 12
+. 80 06 00 0C 2C 80 00 00 41 86 00 24
+
+. 0 2547CE6C 8
+. 7C C6 02 14 4B FF FF A4
+
+. 0 2547CE14 76
+. 81 86 00 08 7C EC 32 14 A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+. 0 2547CE8C 4
+. 41 B2 FE E0
+
+. 0 2547CD6C 96
+. 7E C3 B3 78 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+. 0 2547D034 8
+. 2F 83 00 00 41 BE FF CC
+
+. 0 2547D004 16
+. 83 FF 00 0C 7F 9C EB 78 2C 9F 00 00 41 86 00 40
+
+. 0 2547CCD4 28
+. 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+
+. 0 2547CCB4 24
+. 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+
+. 0 2547CE1C 68
+. A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+. 0 2547CD34 28
+. 82 BB 00 00 82 F1 00 04 7D 35 BA 14 A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+
+. 0 2547CD54 16
+. 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+
+. 0 2547CD40 16
+. A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+
+. 0 2547CE90 32
+. 80 DB 00 00 38 E0 00 00 83 71 00 04 7C 66 DA 14 A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+
+. 0 2547CED4 16
+. 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+
+. 0 2547CEA0 16
+. A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+
+. 0 2547CEB0 52
+. A3 43 00 04 83 83 00 08 57 57 23 76 7F 17 2A 14 93 98 00 04 7E 88 18 2E 90 F8 00 0C 7E 74 CA 14 7E 77 29 2E 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+
+. 0 2547CEE4 8
+. 7E C3 B3 78 4B FF FE 88
+
+. 0 2547CD70 92
+. 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+. 0 2547CD50 20
+. 7C 18 03 78 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+
+. 0 2547D050 44
+. 80 81 00 24 7F 83 E3 78 83 41 00 08 83 61 00 0C 7C 88 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547180C 8
+. 2F 83 00 00 41 9E 00 14
+
+. 0 25471824 28
+. 80 61 00 24 83 A1 00 14 83 C1 00 18 7C 68 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547B70C 52
+. 93 5D 00 00 80 01 00 34 93 7C 00 00 82 E1 00 0C 7C 08 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 38 21 00 30 4E 80 00 20
+
+. 0 25472900 12
+. 83 6E 03 D8 2E 1B 00 00 40 92 0A EC
+
+. 0 2547290C 24
+. 81 6E 04 08 3A CB 00 3F 91 6E 04 14 56 D9 18 38 38 79 00 08 48 02 51 05
+
+. 0 25472924 24
+. 7F 25 CB 78 7C 6C 1B 78 38 80 00 00 38 63 00 08 91 8E 04 10 48 01 0E 0D
+
+. 0 2547293C 24
+. 80 CE 04 10 38 00 00 00 7E 89 A3 78 90 06 00 04 92 C6 00 00 41 8E 00 24
+
+. 0 25472954 16
+. 38 63 00 04 83 89 02 20 2F 1C 00 00 41 9A 00 08
+
+. 0 25472968 12
+. 81 29 00 0C 2C 09 00 00 40 82 FF E8
+
+. 0 25472958 12
+. 83 89 02 20 2F 1C 00 00 41 9A 00 08
+
+. 0 25472964 16
+. 95 23 00 08 81 29 00 0C 2C 09 00 00 40 82 FF E8
+
+. 0 25472974 4
+. 48 00 B3 59
+
+. 0 2547DCCC 12
+. 94 21 FF E0 7D 88 02 A6 48 01 93 2D
+
+. 0 2547DCD8 72
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 93 81 00 10 38 60 00 00 93 A1 00 14 7D 88 03 A6 83 7E 04 C8 3B 80 00 20 93 E1 00 1C 3B A0 00 00 81 3B 04 10 3B E0 00 00 39 80 00 01 38 A9 00 08 80 05 00 0C 2F 80 00 00 41 9E 01 04
+
+. 0 2547DD20 8
+. 38 C0 00 08 48 00 00 58
+
+. 0 2547DD7C 36
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 81 0A 02 28 7C 1C 58 40 38 8B FF FF 7C E8 00 D0 7C E8 20 38 40 80 00 08
+
+. 0 2547DDA4 16
+. 80 EA 02 20 7C 9F E8 50 7C 87 20 40 40 A5 FF 78
+
+. 0 2547DDB4 40
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 7C CB 1A 14 38 06 FF FF 7D 20 5B 96 7D 29 59 D6 7C E3 48 50 7C 07 40 40 40 80 00 08
+
+. 0 2547DDE0 20
+. 7D 68 48 50 7C E3 58 50 91 6A 02 2C 7C 87 20 40 40 85 00 0C
+
+. 0 2547DDFC 36
+. 80 6A 02 20 39 8C 00 01 55 86 18 38 7D 43 4A 14 7C 68 50 50 7D 06 2A 14 80 88 00 04 2F 04 00 00 40 9A FF 60
+
+. 0 2547DE20 48
+. 3B A3 06 9F 93 9B 04 20 57 A5 00 34 90 7B 04 1C 90 BB 04 18 83 81 00 10 83 61 00 0C 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 25472978 4
+. 48 00 B5 7D
+
+. 0 2547DEF4 12
+. 94 21 FF E0 7C 68 02 A6 48 01 91 05
+
+. 0 2547DF00 64
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 90 61 00 24 93 A1 00 14 83 9E 04 C8 93 61 00 0C 80 7C 04 20 80 9C 04 18 38 A3 04 6F 7D 23 00 D0 7C A0 48 38 93 E1 00 1C 7F A4 02 14 7F A4 EB 78 48 01 9A E1
+
+. 0 2547DF40 24
+. 38 A0 04 70 7C 7F 1B 79 38 80 00 00 7F FB FB 78 7C 1F EA 14 41 82 00 4C
+
+. 0 2547DF58 16
+. 80 FC 04 18 7F A7 00 50 38 7D FB 90 48 00 57 E1
+
+. 0 254837BC 20
+. 90 88 FF FC 90 88 FF F8 90 88 FF F4 94 88 FF F0 40 9D 00 08
+
+. 0 25483894 20
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 40 2A 85 00 00 41 82 00 7C
+
+. 0 2547DF68 20
+. 80 DC 04 08 38 80 00 04 38 66 00 10 3B E6 00 0E 48 01 9A B5
+
+. 0 2547DF7C 12
+. 2C 83 00 00 38 03 00 04 41 86 00 44
+
+. 0 2547DF88 24
+. 93 E3 00 00 90 1D FF FC 2F 9D 00 00 7F BF EB 78 7F 63 DB 78 41 9E 00 34
+
+. 0 2547DFA0 40
+. 81 01 00 24 7F E3 FB 78 83 61 00 0C 83 81 00 10 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547297C 8
+. 7C 71 1B 79 41 82 0E 04
+
+. 0 25472984 28
+. 81 3F 00 30 39 54 01 58 83 11 FF FC 2F 89 00 00 91 5F 00 78 93 0E 04 24 40 9E 0E 38
+
+. 0 254729A0 16
+. 80 74 01 3C 2E 12 00 00 2F 83 00 00 41 9E 01 18
+
+. 0 25472AC4 8
+. 80 6E 01 B8 48 00 94 7D
+
+. 0 2547BF44 12
+. 94 21 FF F0 7D 88 02 A6 48 01 B0 B5
+
+. 0 2547BF50 36
+. 93 C1 00 08 7F C8 02 A6 7C 68 1B 78 7D 88 03 A6 81 5E 05 18 80 0A 00 08 7D 43 53 78 2F 80 00 00 40 9E 00 24
+
+. 0 2547BF74 44
+. 81 3E 04 C8 38 A0 00 01 80 9E 05 08 81 69 00 00 90 AA 00 00 91 0A 00 10 91 6A 00 04 90 8A 00 08 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 25472ACC 16
+. 81 34 00 74 7C 7A 1B 78 2F 09 00 00 41 9A 00 08
+
+. 0 25472ADC 16
+. 90 69 00 04 81 2E 02 2C 2F 89 00 00 41 9E 00 08
+
+. 0 25472AF0 24
+. 82 5F 00 78 C8 14 01 58 92 4E 00 08 D8 0F 00 14 92 4E 00 10 41 92 08 44
+
+. 0 25473348 40
+. 81 74 00 0C 7E 9D A3 78 82 0F 00 50 2C 0B 00 00 82 EF 00 28 31 50 FF FF 7F 6A 81 10 7E F3 DB 78 92 6F 00 28 41 82 00 18
+
+. 0 25473370 20
+. 7D 60 5B 78 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+
+. 0 25473374 16
+. 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+
+. 0 25473384 16
+. 82 DD 00 1C 81 36 00 04 2F 89 00 00 40 9E 04 20
+
+. 0 254737B0 20
+. 38 00 00 01 90 09 00 08 81 29 00 04 2C 89 00 00 41 86 FB D4
+
+. 0 25473394 12
+. 3B 8E 01 B8 7D 9C E8 00 41 8E 00 18
+
+. 0 254733B4 12
+. 83 BD 00 10 2E 1D 00 00 40 92 FF C8
+
+. 0 254733A0 20
+. 80 9D 01 C0 7F A3 EB 78 80 AF 00 28 7F 66 DB 78 48 00 62 11
+
+. 0 254795C0 20
+. 7C E8 02 A6 94 21 FF 50 7D 80 00 26 93 E1 00 AC 48 01 DA 31
+
+. 0 254795D4 108
+. 7C 3F 0B 78 90 E1 00 B4 80 03 01 80 92 21 00 74 3A 20 00 00 74 08 20 00 93 61 00 9C 93 81 00 A0 7C DB 33 78 93 C1 00 A8 7C BC 2B 78 92 01 00 70 7F C8 02 A6 92 41 00 78 92 61 00 7C 92 81 00 80 92 A1 00 84 92 C1 00 88 92 E1 00 8C 93 01 00 90 93 21 00 94 93 41 00 98 93 A1 00 A4 91 81 00 6C 90 9F 00 10 90 7F 00 0C 40 82 01 18
+
+. 0 25479640 8
+. 2D 86 00 00 41 8E 01 6C
+
+. 0 254797B0 20
+. 80 A3 00 80 30 85 FF FF 7C 84 21 10 7F 9C 20 38 4B FF FE 88
+
+. 0 25479648 16
+. 83 5E 04 F4 80 DA 00 00 70 C9 00 20 40 82 07 B4
+
+. 0 25479658 16
+. 80 7F 00 0C 81 43 00 78 2E 0A 00 00 40 92 06 C0
+
+. 0 25479668 32
+. 80 C3 00 34 7F 84 E3 78 7F 65 DB 78 3B 80 00 01 83 A6 00 04 3B 60 00 00 93 BF 00 08 48 00 71 AD
+
+. 0 25479688 44
+. 83 BF 00 0C 38 C0 00 00 93 9F 00 34 81 7D 00 3C 93 7F 00 30 2C 8B 00 00 93 7F 00 40 93 7F 00 28 93 7F 00 3C 90 DF 00 24 41 86 00 18
+
+. 0 254796B4 28
+. 81 0B 00 04 80 9D 00 40 91 1F 00 20 80 C4 00 04 90 DF 00 24 2E 03 00 00 41 92 00 10
+
+. 0 254796DC 48
+. 82 1E 04 C8 3A BF 00 08 3A 40 00 02 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+
+. 0 254797C4 24
+. 81 5D 00 C0 38 E0 00 00 80 7D 00 38 2F 8A 00 00 82 63 00 04 41 9E 00 08
+
+. 0 254797DC 28
+. 80 EA 00 04 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+
+. 0 254797F8 24
+. 7C E9 3B 78 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+
+. 0 25479810 8
+. 2F 19 00 00 40 9A 01 CC
+
+. 0 25479818 12
+. 80 DD 01 1C 2C 06 00 00 41 82 01 C0
+
+. 0 25479824 12
+. 81 3D 00 E4 2F 89 00 00 41 9E 02 80
+
+. 0 25479830 12
+. 7C 9C C0 40 82 89 00 04 40 A4 FE E8
+
+. 0 2547983C 68
+. 81 9C 00 04 81 56 01 88 55 97 C2 3E 55 9D 06 3E 56 EB 08 3C 2E 1D 00 16 7C AB A2 2E 56 E3 20 36 81 7C 00 00 7F 63 9A 14 54 BA 23 76 93 7F 00 50 7D 4A D2 14 7E EB CA 14 7F 66 DB 78 3B 40 00 00 41 92 01 DC
+
+. 0 25479880 8
+. 2F 1D 00 00 41 9A 01 48
+
+. 0 25479888 12
+. 88 9B 00 0C 54 88 E1 3F 41 82 06 2C
+
+. 0 25479894 12
+. 8B 5B 00 0C 57 49 E1 3F 41 82 01 B4
+
+. 0 254798A0 28
+. 80 9F 00 0C 2E 1D 00 15 2C 9D 00 0A 2F 1D 00 13 80 C4 02 08 7F 86 D8 00 41 9E 06 2C
+
+. 0 254798BC 28
+. 7C C0 00 26 54 C6 9F FE 39 00 00 00 7F 40 00 26 57 5A 3F FE 7C CB D3 79 40 82 01 30
+
+. 0 254798D8 36
+. 38 7D FF BC 6B A5 00 02 21 65 00 00 7C AB 29 14 21 23 00 0A 39 20 00 00 7D 29 49 14 7D 2B 2B 79 40 82 01 0C
+
+. 0 254798FC 40
+. 7C 00 00 26 54 00 DF FE 2C 8A 00 00 93 64 02 08 38 E0 00 00 54 0C 08 3C 39 60 00 01 7D 88 43 78 91 04 02 0C 41 86 00 18
+
+. 0 25479924 12
+. 83 4A 00 04 2E 1A 00 00 41 92 00 0C
+
+. 0 25479930 44
+. 7D 47 53 78 39 60 00 00 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+
+. 0 254785A4 16
+. 7C 08 02 A6 7D 80 00 26 94 21 FF 60 48 01 EA 51
+
+. 0 254785B4 124
+. 92 61 00 6C 7C 93 23 78 92 81 00 70 7D 14 43 78 92 A1 00 74 7C D5 33 78 92 C1 00 78 7C B6 2B 78 92 E1 00 7C 7D 37 4B 78 93 01 00 80 7C F8 3B 78 93 21 00 84 7C 79 1B 78 93 41 00 88 7D 5A 53 78 93 81 00 90 3B 80 00 00 93 C1 00 98 7F C8 02 A6 93 E1 00 9C 7C 3F 0B 78 92 21 00 64 92 41 00 68 93 61 00 8C 93 A1 00 94 90 01 00 A4 91 81 00 60 88 03 00 00 2F 80 00 00 41 9E 00 18
+
+. 0 25478630 20
+. 89 63 00 01 7C 1C 03 78 39 23 00 01 2C 0B 00 00 40 82 01 5C
+
+. 0 2547879C 24
+. 89 49 00 01 54 03 20 36 7F 83 5A 14 39 29 00 01 2C 8A 00 00 41 A6 FE 94
+
+. 0 254787B4 24
+. 89 69 00 01 57 84 20 36 7F 84 52 14 39 29 00 01 2E 0B 00 00 41 B2 FE 7C
+
+. 0 254787CC 24
+. 89 49 00 01 57 85 20 36 7F 85 5A 14 39 29 00 01 2F 0A 00 00 41 BA FE 64
+
+. 0 25478644 48
+. 82 5E 04 C8 2E 1A 00 00 3B 60 00 00 39 40 00 00 83 B2 01 A8 93 7F 00 48 3B 60 00 00 39 7D 00 01 91 5F 00 4C 91 72 01 A8 7E BD AB 78 40 92 05 38
+
+. 0 25478674 16
+. 81 75 00 00 2C 8B 00 00 7F 68 DB 78 41 86 00 4C
+
+. 0 25478684 44
+. 80 F5 00 00 3A 3F 00 18 80 B6 00 00 7F 23 CB 78 7F 84 E3 78 38 DF 00 48 7F 09 C3 78 7E EA BB 78 93 41 00 08 92 81 00 0C 4B FF FA FD
+
+. 0 254781A8 12
+. 7D 68 02 A6 94 21 FF 90 48 01 EE 51
+
+. 0 254781B4 140
+. 93 C1 00 68 80 01 00 78 7F C8 02 A6 91 61 00 74 7D 80 00 26 91 E1 00 2C 92 01 00 30 81 E7 00 04 82 07 00 00 91 C1 00 28 7C 8E 23 78 92 21 00 34 92 A1 00 44 7D 35 4B 78 93 41 00 58 31 20 FF FF 7E 29 01 10 7D 1A 43 78 92 41 00 38 92 61 00 3C 92 81 00 40 92 C1 00 48 92 E1 00 4C 93 01 00 50 93 21 00 54 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 91 81 00 24 90 61 00 08 90 A1 00 0C 90 C1 00 10 91 41 00 14 48 00 00 10
+
+. 0 2547824C 44
+. 57 45 10 3A 80 81 00 78 7D 25 80 2E 3A 80 00 00 3A 60 00 00 83 89 00 14 7F 83 22 78 21 63 00 00 7C 6B 19 14 7E 29 18 39 40 82 FF CC
+
+. 0 25478278 12
+. 80 C1 00 7C 54 CB FF FF 41 82 00 10
+
+. 0 25478290 16
+. 81 5E 04 F4 81 0A 00 00 71 09 00 08 40 82 02 48
+
+. 0 254782A0 56
+. 80 1C 01 6C 83 3C 01 70 7D 8E 03 96 81 7C 00 38 80 7C 00 34 83 0B 00 04 82 43 00 04 82 DC 01 98 7F EC 01 D6 7F BF 70 50 57 BB 10 3A 7F FB C8 2E 2E 1F 00 00 41 92 00 F8
+
+. 0 254783CC 24
+. 7F 00 00 26 57 18 9F FE 31 75 FF FF 7E CB A9 10 7F 0B B0 39 41 A2 FE 60
+
+. 0 254783E4 12
+. 80 75 00 0C 2E 03 00 00 41 92 FE 54
+
+. 0 25478240 12
+. 3B 5A 00 01 7F 9A 78 40 40 9C 01 DC
+
+. 0 254782D8 8
+. 2D 96 00 00 48 00 00 28
+
+. 0 25478304 20
+. 57 E9 20 36 7F A9 C2 14 80 9D 00 04 2C 84 00 00 41 A6 FF CC
+
+. 0 25478318 12
+. A0 FD 00 0E 2C 07 00 00 40 82 00 10
+
+. 0 25478330 16
+. 8B 7D 00 0C 57 60 07 3E 2F 80 00 02 40 9D 00 0C
+
+. 0 25478348 12
+. 81 41 00 0C 7F 1D 50 00 41 9A 00 1C
+
+. 0 25478354 16
+. 7E E9 C0 2E 80 81 00 08 7C 77 92 14 48 00 AB 81
+
+. 0 25478364 8
+. 2C 03 00 00 40 82 FF 88
+
+. 0 254782F0 20
+. 83 BC 01 74 57 E7 10 3A 7F E7 E8 2E 2E 1F 00 00 41 92 01 08
+
+. 0 25478408 8
+. 2D 94 00 01 40 8E FF C0
+
+. 0 2547836C 8
+. 2F 95 00 00 41 9E 01 30
+
+. 0 25478374 4
+. 41 8E 00 44
+
+. 0 25478378 36
+. 57 F9 08 3C 81 5C 01 88 7E F9 B2 2E 81 35 00 04 56 EB 23 76 7D 8B 52 14 83 2C 00 04 7C 99 48 00 41 86 00 F0
+
+. 0 25478488 12
+. 7C 6B 50 2E 80 95 00 00 48 00 AA 51
+
+. 0 25478494 8
+. 2F 03 00 00 40 9A FF 04
+
+. 0 2547849C 4
+. 4B FF FF 1C
+
+. 0 254783B8 12
+. 57 60 E1 3E 2F 80 00 01 41 9E 01 7C
+
+. 0 2547853C 20
+. 81 C1 00 10 38 60 00 01 93 8E 00 04 93 AE 00 00 4B FF FE DC
+
+. 0 25478428 96
+. 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 254786B0 12
+. 2C 03 00 00 39 00 00 00 41 81 00 14
+
+. 0 254786CC 12
+. 81 5F 00 48 2F 8A 00 00 41 9E 05 04
+
+. 0 254786D8 12
+. 80 B6 00 00 2F 85 00 00 41 9E 01 40
+
+. 0 254786E4 24
+. 88 E5 00 0D 39 20 00 00 2E 09 00 00 54 E6 07 BE 2F 06 00 03 41 9A 02 34
+
+. 0 254786FC 28
+. 3B 7F 00 48 83 9B 00 04 3D 80 80 00 80 1C 01 80 54 1D 00 02 7F 1D 60 00 41 9A 05 BC
+
+. 0 25478718 32
+. 82 BB 00 04 83 BE 04 F4 83 55 01 80 67 57 00 10 92 F5 01 80 80 1D 00 00 70 09 04 04 40 82 02 A0
+
+. 0 25478738 100
+. 80 7B 00 04 80 1F 00 48 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+
+. 0 2547995C 36
+. 81 1F 00 50 80 9F 00 0C 7C 7A 1B 78 7D 06 43 78 90 64 02 10 91 04 02 14 2F 06 00 00 39 20 00 00 41 9A 00 10
+
+. 0 25479980 28
+. 80 FA 00 00 81 66 00 04 7D 27 5A 14 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+
+. 0 2547999C 4
+. 41 9D 00 D0
+
+. 0 254799A0 8
+. 2F 9D 00 01 41 9E 00 EC
+
+. 0 25479A90 8
+. 91 37 00 00 4B FF FF 38
+
+. 0 254799CC 12
+. 3B 9C 00 0C 7E 1C C0 40 41 90 FE 68
+
+. 0 254787E4 24
+. 89 69 00 01 57 86 20 36 7F 86 52 14 39 49 00 01 2F 8B 00 00 41 BE FE 4C
+
+. 0 254787FC 36
+. 57 92 20 36 7E 32 5A 14 8D 6A 00 01 56 28 00 06 2F 8B 00 00 7E 27 42 78 55 09 46 3E 7C FC 4A 78 4B FF FF DC
+
+. 0 254787F8 4
+. 41 BE FE 4C
+
+. 0 25478324 12
+. 81 01 00 7C 71 00 00 01 40 A2 FF C4
+
+. 0 25479EE4 28
+. 7D 20 00 26 55 29 9F FE 39 60 00 00 7C A0 00 26 54 A5 3F FE 7D 28 2B 79 40 82 00 58
+
+. 0 25479F00 36
+. 38 1D FF BC 6B A3 00 02 21 03 00 00 7C 68 19 14 21 80 00 0A 39 80 00 00 7D 8C 61 14 7D 88 1B 79 40 82 00 34
+
+. 0 25479F24 12
+. 2F 1D 00 13 81 24 02 0C 41 9A 00 B4
+
+. 0 25479F30 8
+. 7F 8B 48 00 40 9E F9 88
+
+. 0 25479F38 28
+. 81 70 01 AC 80 C4 02 14 83 44 02 10 39 4B 00 01 91 50 01 AC 90 DF 00 50 4B FF FA 24
+
+. 0 25479974 12
+. 2F 06 00 00 39 20 00 00 41 9A 00 10
+
+. 0 254782E0 16
+. 88 DD 00 0C 54 C5 07 3E 2F 05 00 06 41 9A 00 2C
+
+. 0 254799A8 8
+. 2C 1D 00 14 41 82 00 E4
+
+. 0 25479EBC 12
+. A1 1B 00 0E 2F 88 00 00 41 BE F9 D0
+
+. 0 25479A50 8
+. 83 5F 00 0C 4B FF FF 20
+
+. 0 25479A98 8
+. 2F 1A 00 00 41 BA FF 30
+
+. 0 25479AA0 12
+. 83 7A 02 30 93 77 00 00 4B FF FF 24
+
+. 0 25479A6C 8
+. 2C 9D 00 49 41 86 02 38
+
+. 0 25479CA8 8
+. 2C 1A 00 00 41 A2 FD 20
+
+. 0 25479CB0 12
+. 81 7A 02 2C 2C 8B FF FF 41 86 03 4C
+
+. 0 25479CBC 24
+. 83 46 00 04 7D 8B D2 14 7C 6C 02 14 39 23 90 00 91 37 00 00 4B FF FC FC
+
+. 0 254783C4 8
+. 2C 00 00 02 41 82 01 64
+
+. 0 2547852C 16
+. 82 9E 04 F4 82 74 00 30 2C 93 00 00 40 86 00 18
+
+. 0 254783F0 8
+. 7F 84 E3 78 48 00 42 E1
+
+. 0 254783F8 8
+. 2F 03 00 00 41 9A FE 44
+
+. 0 25479938 36
+. 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+
+. 0 25478424 100
+. 38 60 00 00 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 254786BC 4
+. 41 80 01 6C
+
+. 0 254786C0 12
+. 84 FD 00 04 2F 07 00 00 40 9A FF C4
+
+. 0 25478BD8 12
+. 81 36 00 00 2C 09 00 00 40 82 01 B8
+
+. 0 25478D98 16
+. 8B A9 00 0C 57 BB E1 3E 2C 9B 00 02 40 86 FE 40
+
+. 0 25478DA8 12
+. 38 60 00 00 38 00 00 00 4B FF F9 90
+
+. 0 25478740 92
+. 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+
+. 0 2547998C 16
+. 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+
+. 0 254784A0 4
+. 41 AE FF 18
+
+. 0 254784A4 12
+. 80 61 00 14 54 69 FF FF 41 82 00 6C
+
+. 0 25478518 20
+. 57 E9 08 3C 7C C9 B2 2E 54 C5 04 7E 2F 85 00 02 4B FF FF 98
+
+. 0 254784C0 4
+. 40 BD FE F8
+
+. 0 25479A04 8
+. 39 00 00 01 4B FF FE F4
+
+. 0 254799B0 28
+. 7F 45 D3 78 7F 67 DB 78 7E E8 BB 78 7F AA EB 78 7E C3 B3 78 7F 84 E3 78 48 00 72 E1
+
+. 0 25480FF0 24
+. 3D 3A FE 00 3D 60 FC 00 38 69 00 03 61 60 00 02 7C 03 00 40 40 81 01 94
+
+. 0 25481198 28
+. 81 61 00 08 81 4B 00 2C 81 0A 00 04 7C E8 28 50 7C EA 16 70 28 8A 40 11 41 85 00 A0
+
+. 0 254811B4 56
+. 81 6B 00 28 3C 60 AA AA 60 60 AA AB 3B AA FF EE 83 EB 00 04 57 AB F8 7E 7F 7F 00 16 57 7C E8 FE 2B 1C 20 00 57 87 08 3C 39 27 00 12 55 2C 10 3A 7C CC 42 14 40 99 00 14
+
+. 0 254811FC 60
+. 1F EA FF FC 55 6C 10 3A 55 9B 04 3E 7F 4C 31 2E 3B BF FF FC 67 6A 39 60 57 BC 01 BA 91 45 00 00 67 87 48 00 94 E5 00 04 7C 00 28 6C 7C 00 04 AC 80 A1 00 0C 39 05 00 04 7C 00 47 AC
+
+. 0 25481238 8
+. 80 A1 00 0C 4B FF FB C4
+
+. 0 254799D8 8
+. 83 BF 00 0C 4B FF FD 44
+
+. 0 25479720 12
+. 36 52 FF FF 3A B5 00 0C 40 80 FF C0
+
+. 0 254796E8 36
+. 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+
+. 0 2547970C 8
+. 7F 8B C0 40 40 9C 00 10
+
+. 0 254797FC 20
+. 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+
+. 0 2547972C 4
+. 40 8E 07 10
+
+. 0 25479730 20
+. 82 7D 01 80 2F 91 00 00 66 72 20 00 92 5D 01 80 40 9E 05 C0
+
+. 0 25479744 16
+. 80 7F 00 0C 82 A3 02 38 2F 95 00 00 40 9E 02 BC
+
+. 0 25479A0C 4
+. 4B FF FB 1D
+
+. 0 25479528 12
+. 94 21 FF F0 7D 88 02 A6 48 01 DA D1
+
+. 0 25479534 80
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 91 81 00 14 7C 7F 1B 78 80 A3 00 00 81 3E 04 F4 81 03 02 34 80 E3 02 38 80 C9 00 04 7C 65 42 14 7C 83 3A 14 38 A0 00 01 7D 46 00 D0 7C 60 50 38 7C 8B 50 38 7F 80 58 00 7C 03 03 78 7C 80 58 50 41 9E 00 10
+
+. 0 25479584 4
+. 48 00 92 35
+
+. 0 25479588 8
+. 2C 03 00 00 41 80 00 1C
+
+. 0 25479590 24
+. 81 61 00 14 83 C1 00 08 83 E1 00 0C 7D 68 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 25479A10 4
+. 4B FF FD 44
+
+. 0 25479754 92
+. 80 81 00 00 83 24 00 04 81 84 FF BC 7F 28 03 A6 82 04 FF C0 82 24 FF C4 7D 81 81 20 82 44 FF C8 82 64 FF CC 82 84 FF D0 82 A4 FF D4 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+
+. 0 25480904 12
+. 2F 85 00 00 3B 9F 00 18 41 9E 02 64
+
+. 0 25480B70 8
+. 83 1E 04 CC 4B FF FD A0
+
+. 0 25480914 4
+. 40 9E 02 C0
+
+. 0 25480918 88
+. 3C 98 FE 00 3C 00 FC 00 38 64 00 03 60 06 00 02 39 80 00 00 7F 03 30 40 61 8A 80 00 7C BA 00 D0 7D 1A 50 50 54 A9 04 3E 55 07 84 3E 3D 60 55 6C 3C 80 7D 6C 64 E0 3D 6B 65 23 39 6B 61 66 08 3C 60 8C 5A 14 90 1C 00 00 90 7C 00 04 90 DC 00 08 91 9C 00 0C 40 99 02 0C
+
+. 0 25480B78 92
+. 3C 98 00 01 3C 7D 00 01 38 C4 80 00 39 83 80 00 54 C9 84 3E 57 A7 04 3E 57 08 04 3E 55 8B 84 3E 3D 40 7D 89 3C A0 4E 80 65 18 39 80 65 3D 3D 8C 61 44 03 A6 64 E3 39 80 65 66 3D 8C 60 A0 04 20 90 1C 00 24 93 1C 00 10 93 BC 00 14 90 9C 00 18 90 7C 00 1C 90 DC 00 20 4B FF FD D0
+
+. 0 254809A0 16
+. 39 00 00 00 38 80 00 12 7F 88 D8 40 40 9C 00 54
+
+. 0 254809B0 80
+. 38 E0 FF D4 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+
+. 0 254809B4 76
+. 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+
+. 0 25480A00 8
+. 7C 88 D8 40 40 84 00 74
+
+. 0 25480A98 12
+. 80 0B 00 00 2F 00 00 00 40 9A 00 C4
+
+. 0 25480B64 12
+. 7C 0A 16 70 7D 4A 01 94 4B FF FF 38
+
+. 0 25480AA8 12
+. 7E EB BB 78 39 20 00 00 48 00 00 14
+
+. 0 254796D0 12
+. 81 7D 00 70 2F 0B 00 00 40 9A 03 3C
+
+. 0 25479A14 60
+. 83 5D 00 7C 83 3D 00 28 82 9A 00 04 82 BF 00 20 92 9F 00 2C 82 59 00 04 7D 55 32 14 7D 35 A0 50 7E 14 92 14 91 3F 00 24 7E 70 50 50 92 5F 00 30 92 7F 00 3C 92 1F 00 38 4B FF FC 90
+
+. 0 25479714 12
+. 39 29 00 0C 7F 89 C0 40 41 9C FF F8
+
+. 0 254799E0 8
+. 7F 8B E0 40 40 BC FE 40
+
+. 0 254799E8 28
+. 81 2B 00 08 80 8B 00 00 39 6B 00 0C 7F 8B E0 40 7D 09 CA 14 7D 04 C9 2E 4B FF FF E4
+
+. 0 254799E4 4
+. 40 BC FE 40
+
+. 0 254797E0 24
+. 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+
+. 0 254733C0 4
+. 48 00 C2 25
+
+. 0 2547F5E4 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+
+. 0 254733C4 12
+. 80 CE 01 A4 2C 06 00 00 40 82 09 4C
+
+. 0 254733D0 12
+. 81 6E 03 30 2B 0B 00 01 40 99 F7 84
+
+. 0 254733DC 20
+. 80 94 01 C0 7F 83 E3 78 38 A0 00 00 38 C0 00 00 48 00 61 D5
+
+. 0 254733F0 4
+. 4B FF F7 6C
+
+. 0 25472B5C 8
+. 7E 23 8B 78 48 00 B4 79
+
+. 0 2547DFD8 20
+. 94 21 FF D0 7C 08 02 A6 93 21 00 14 7C 79 1B 79 48 01 90 19
+
+. 0 2547DFEC 48
+. 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 38 60 00 00 93 41 00 18 93 61 00 1C 93 81 00 20 93 A1 00 24 93 E1 00 2C 90 01 00 34 41 82 00 D8
+
+. 0 2547E01C 40
+. 83 1E 04 C8 3B 40 00 00 82 F9 FF FC 83 78 04 10 81 7B 00 00 20 1A 00 00 7F E0 D1 14 7D 3A FA 14 7F 8B F8 40 40 9D 00 64
+
+. 0 2547E044 28
+. 57 E3 18 38 7D 23 DA 14 3B 89 00 0C 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+
+. 0 2547E060 12
+. 81 3C 00 00 2F 09 00 00 41 9A 00 28
+
+. 0 2547E06C 20
+. 81 69 02 2C 2C 0B FF FF 7D 4B CA 14 7D 43 53 78 40 82 00 40
+
+. 0 2547E0BC 32
+. 81 69 02 30 55 68 10 3A 7D 48 B9 2E 80 A9 02 1C 80 E9 02 20 80 89 02 18 7F A5 38 50 48 00 59 49
+
+. 0 2547E0DC 12
+. 7F A5 EB 78 38 80 00 00 48 00 56 61
+
+. 0 25483920 12
+. 28 85 00 10 7C C6 2A 14 41 9F 00 20
+
+. 0 25483934 4
+. 40 84 00 2C
+
+. 0 25483938 4
+. 4C 9C 00 20
+
+. 0 2548393C 12
+. 90 86 FF FC 90 86 FF F8 4E 80 00 20
+
+. 0 2547E0E8 4
+. 4B FF FF A4
+
+. 0 2547E08C 20
+. 81 7B 00 00 3B FF 00 01 3B 9C 00 08 7F 8B F8 40 41 9D FF B4
+
+. 0 2547E050 16
+. 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+
+. 0 2547E0A4 16
+. 81 58 04 08 7D 3A 4B 78 7C 8A 48 40 40 85 00 3C
+
+. 0 2547E0EC 56
+. 7F 23 CB 78 81 81 00 34 82 E1 00 0C 83 01 00 10 7D 88 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 25472B64 16
+. 38 51 70 00 3B 00 00 01 93 1A 00 0C 48 00 94 31
+
+. 0 2547BFA0 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+
+. 0 25472B74 4
+. 48 00 55 BD
+
+. 0 25478130 12
+. 94 21 FF F0 7C 68 02 A6 48 01 EE C9
+
+. 0 2547813C 40
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 61 00 14 38 00 FF FD 83 FE 02 54 80 7F 00 00 39 23 FF FF 7F 89 00 40 40 9D 00 1C
+
+. 0 2547817C 12
+. 80 BE 02 5C 80 85 00 00 48 00 A6 25
+
+. 0 254827A8 8
+. 38 00 00 5B 44 00 00 02
+
+. 0 254827B0 4
+. 4C A3 00 20
+
+. 0 25478188 32
+. 38 80 00 00 80 C1 00 14 90 9F 00 00 83 C1 00 08 7C C8 03 A6 83 E1 00 0C 38 21 00 10 4E 80 00 20
+
+. 0 25472B78 100
+. 81 01 00 00 82 28 00 04 81 88 FF B4 7E 28 03 A6 81 C8 FF B8 81 E8 FF BC 7D 81 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+. 0 2547F324 64
+. 82 A1 02 24 80 61 01 E0 7E A8 03 A6 82 C1 01 F8 82 A1 01 F4 82 E1 01 FC 83 01 02 00 83 21 02 04 83 41 02 08 83 61 02 0C 83 81 02 10 83 A1 02 14 83 C1 02 18 83 E1 02 1C 38 21 02 20 4E 80 00 20
+
+. 0 25471A20 24
+. 81 3E 04 F4 7C 7D 1B 78 38 60 00 00 80 09 00 00 70 09 00 80 40 82 00 28
+
+. 0 25471A38 36
+. 80 81 00 24 7F A3 EB 78 83 61 00 0C 83 81 00 10 7C 88 03 A6 83 A1 00 14 83 C1 00 18 38 21 00 20 4E 80 00 20
+
+. 0 25471DD0 52
+. 82 E1 02 B4 83 01 02 90 7E E8 03 A6 83 21 02 94 82 E1 02 8C 83 41 02 98 83 61 02 9C 83 81 02 A0 83 A1 02 A4 83 C1 02 A8 83 E1 02 AC 38 21 02 B0 4E 80 00 20
+
+. 0 254804E8 4
+. 48 01 6B 19
+
+. 0 254804EC 48
+. 7F E8 02 A6 7C 7E 1B 78 83 9F 04 98 83 BF 04 D0 83 7F 04 DC 80 7C 00 00 80 9D 00 00 80 BB 00 00 54 86 10 3A 7C C5 32 14 38 C6 00 04 4B FF B2 29
+
+. 0 2547B740 12
+. 94 21 FF C0 7C 08 02 A6 48 01 B8 B9
+
+. 0 2547B74C 96
+. 93 C1 00 38 7F C8 02 A6 92 C1 00 18 90 01 00 44 93 E1 00 3C 82 DE 04 C8 92 E1 00 1C 7C 97 23 78 83 F6 01 A0 93 01 00 20 7C B8 2B 78 2F 9F 00 00 93 21 00 24 93 41 00 28 7C D9 33 78 93 61 00 2C 7C 7A 1B 78 93 A1 00 34 92 81 00 10 92 A1 00 14 93 81 00 30 83 A3 00 A0 83 63 00 A4 40 9E 01 9C
+
+. 0 2547B944 12
+. 80 1F 01 80 74 09 10 00 40 82 01 10
+
+. 0 2547B950 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 00 F0
+
+. 0 2547B968 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 10
+
+. 0 2547B980 16
+. 81 3E 04 F4 80 C9 00 00 70 C9 00 02 40 82 01 30
+
+. 0 2547B990 8
+. 2F 0A 00 00 40 9A 00 D4
+
+. 0 2547BA68 32
+. 81 8A 00 04 7E E3 BB 78 81 1F 00 00 7F 04 C3 78 7F 25 CB 78 7C E8 62 14 7C E9 03 A6 4E 80 04 21
+
+. 0 FFDE898 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+
+. 0 FFDE8C0 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 08 39
+
+. 0 FFEF108 4
+. 4E 80 00 21
+
+. 0 FFDE8D4 16
+. 7F C8 02 A6 80 1E 00 0C 2F 80 00 00 41 9E 00 0C
+
+. 0 FFDE8EC 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FFDE8A8 4
+. 48 00 01 15
+
+. 0 FFDE9BC 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+
+. 0 FFDEA00 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 FFDE8AC 4
+. 48 00 06 59
+
+. 0 FFDEF04 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+
+. 0 FFDEF44 8
+. 2F 80 FF FF 40 9E FF F0
+
+. 0 FFDEF4C 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FFDE8B0 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BA88 4
+. 4B FF FF 10
+
+. 0 2547B998 12
+. 81 7F 00 84 2C 8B 00 00 41 86 00 BC
+
+. 0 2547BA5C 12
+. 3B 80 00 00 93 96 01 A0 4B FF FD 48
+
+. 0 2547B7AC 8
+. 2F 9D 00 00 40 9E 02 40
+
+. 0 2547B7B4 12
+. 38 60 00 00 3A 80 00 01 48 00 07 89
+
+. 0 2547BF94 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 2547B7C0 12
+. 92 83 00 0C 7C 76 1B 78 48 00 07 D9
+
+. 0 2547B7CC 16
+. 83 BA 01 5C 2F 9D 00 00 3B 9D FF FF 41 9E 00 58
+
+. 0 2547B7DC 24
+. 82 BA 01 E8 57 80 10 3A 7F F5 00 2E 80 1F 01 80 74 09 10 00 40 82 00 34
+
+. 0 2547B7F4 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 01 04
+
+. 0 2547B80C 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 64
+
+. 0 2547B818 12
+. 80 BF 00 84 2F 85 00 00 40 9E 00 58
+
+. 0 2547B824 12
+. 2F 9C 00 00 3B 9C FF FF 40 9E FF B0
+
+. 0 2547B878 16
+. 80 DE 04 F4 83 66 00 00 73 69 00 02 40 82 00 9C
+
+. 0 2547B888 8
+. 2F 0A 00 00 40 9A 00 5C
+
+. 0 2547B8E8 32
+. 81 0A 00 04 7E E3 BB 78 80 FF 00 00 7F 25 CB 78 7C 87 42 14 7C 89 03 A6 7F 04 C3 78 4E 80 04 21
+
+. 0 FE9B620 12
+. 94 21 FF E0 7C 08 02 A6 48 12 C8 29
+
+. 0 FFC7E50 4
+. 4E 80 00 21
+
+. 0 FE9B62C 60
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 7C BC 2B 78 93 E1 00 1C 7C 9D 23 78 81 7E 1C E4 7C 7F 1B 78 81 1E 1A A4 39 20 00 00 2F 8B 00 00 90 01 00 24 7D 0A 43 78 41 9E 00 10
+
+. 0 FE9B674 16
+. 91 2A 00 00 80 88 00 00 2C 84 00 00 40 86 00 24
+
+. 0 FE9B684 28
+. 81 3E 1B F4 81 7E 1B C8 81 49 00 00 80 AB 00 34 7D 43 53 78 7F 05 50 00 41 9A 00 08
+
+. 0 FE9B6A4 40
+. 81 9E 1A 8C 7F E3 FB 78 81 1E 1D D4 7F A4 EB 78 80 FE 1B 84 7F 85 E3 78 93 EC 00 00 93 A8 00 00 93 87 00 00 48 0B 64 21
+
+. 0 FF51AE8 20
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 7C 9D 23 79 48 07 63 59
+
+. 0 FF51AFC 20
+. 93 C1 00 18 93 E1 00 1C 7F C8 02 A6 90 01 00 24 41 82 00 3C
+
+. 0 FF51B10 20
+. 83 FD 00 00 38 80 00 2F 2F 9F 00 00 7F E3 FB 78 41 9E 00 28
+
+. 0 FF51B24 4
+. 4B FA 54 59
+
+. 0 FEF6F7C 40
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 54 9D 06 3E 2F 9D 00 00 93 C1 00 18 90 01 00 24 7C 60 1B 78 93 E1 00 1C 40 9E 00 24
+
+. 0 FEF6FC4 8
+. 3B E0 00 00 48 00 00 0C
+
+. 0 FEF6FD4 12
+. 7C 03 03 78 7F A4 EB 78 4B FF F2 F1
+
+. 0 FEF62CC 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+
+. 0 FEF6330 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 FEF6314 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+
+. 0 FEF6380 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+
+. 0 FEF6FE0 8
+. 2C 03 00 00 40 82 FF E8
+
+. 0 FEF6FCC 20
+. 7C 7F 1B 78 38 03 00 01 7C 03 03 78 7F A4 EB 78 4B FF F2 F1
+
+. 0 FEF6328 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+. 0 FEF633C 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+
+. 0 FEF6FE8 32
+. 80 81 00 24 7F E3 FB 78 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FF51B28 12
+. 2C 83 00 00 38 63 00 01 41 86 00 34
+
+. 0 FF51B34 48
+. 80 9E 1C 34 90 64 00 00 80 BD 00 00 80 7E 1E 24 90 A3 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B6CC 4
+. 4B FF FE 95
+
+. 0 FE9B560 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 E9
+
+. 0 FE9B56C 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 18 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+
+. 0 FE9B590 8
+. 7C 09 03 A6 4E 80 04 21
+
+. 0 FEED850 12
+. 7D 88 02 A6 94 21 FF F0 48 0D A5 F9
+
+. 0 FEED85C 40
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 1E 1D C8 81 1E 1B 00 2F 80 00 00 81 5E 1B CC 80 FE 1A E8 38 00 FF B4 41 9E 00 10
+
+. 0 FEED884 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FE9B598 12
+. 84 1F 00 04 2F 80 00 00 40 9E FF F0
+
+. 0 FE9B5A4 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FE9B6D0 32
+. 80 C1 00 24 83 81 00 10 83 A1 00 14 7C C8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547B908 4
+. 4B FF FF 88
+
+. 0 2547B890 12
+. 81 7F 00 84 2C 8B 00 00 41 A6 FF 8C
+
+. 0 FDF9B90 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+
+. 0 FDF9BB8 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 07 94 E9
+
+. 0 FE730B0 4
+. 4E 80 00 21
+
+. 0 FDF9BCC 16
+. 7F C8 02 A6 80 1E 22 4C 2F 80 00 00 41 9E 00 0C
+
+. 0 FDF9BE4 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FDF9BA0 4
+. 48 00 01 15
+
+. 0 FDF9CB4 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+
+. 0 FDF9CF8 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 FDF9BA4 4
+. 48 03 0C 19
+
+. 0 FE2A7BC 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+
+. 0 FE2A7FC 8
+. 2F 80 FF FF 40 9E FF F0
+
+. 0 FE2A804 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FDF9BA8 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547B90C 8
+. 75 20 C0 00 40 A2 FE FC
+
+. 0 2547B914 12
+. 2F 9C 00 00 3B 9C FF FF 4B FF FF 10
+
+. 0 2547B82C 4
+. 40 9E FF B0
+
+. 0 2547B830 72
+. 82 E1 00 44 3B 00 00 00 83 C1 00 38 7E E8 03 A6 83 E1 00 3C 93 16 00 0C 82 81 00 10 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 38 21 00 40 48 00 07 2C
+
+. 0 2548051C 32
+. 80 7D 00 00 80 9B 00 00 54 65 10 3A 7C C4 2A 14 38 A6 00 04 84 06 00 04 2C 00 00 00 40 82 FF F8
+
+. 0 25480530 12
+. 84 06 00 04 2C 00 00 00 40 82 FF F8
+
+. 0 2548053C 40
+. 38 C6 00 04 80 FF 04 AC 7F C9 03 A6 3B E0 00 00 93 E1 00 00 7F E8 03 A6 93 E1 00 04 93 E1 00 08 93 E1 00 0C 4E 80 04 20
+
+. 0 10000860 36
+. 7C 29 0B 78 54 21 00 36 38 00 00 00 94 21 FF F0 7C 08 03 A6 90 01 00 00 3D 00 10 02 85 A8 93 98 48 03 63 40
+
+. 0 10036BC0 8
+. 39 60 00 40 4B FF FF 54
+
+. 0 10036B18 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 7A 88 3D 8C 25 49 4E 80 04 20
+
+. 0 2547AF5C 68
+. 94 21 FF C0 90 01 00 0C 90 61 00 10 90 81 00 14 7D 83 63 78 90 A1 00 18 7D 64 5B 78 90 C1 00 1C 7C 08 02 A6 90 E1 00 20 90 01 00 30 91 01 00 24 7C 00 00 26 91 21 00 28 91 41 00 2C 90 01 00 08 48 00 00 C9
+
+. 0 2547B064 16
+. 7C E8 02 A6 94 21 FF E0 93 81 00 10 48 01 BF 91
+
+. 0 2547B074 96
+. 93 A1 00 14 90 E1 00 24 80 C3 00 7C 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 7C 7F 1B 78 81 46 00 04 80 A3 00 38 7F AA 22 14 7D 6A 20 2E 80 7D 00 04 81 85 00 04 54 66 C2 3E 80 7F 00 00 54 C5 20 36 7D 05 62 14 7F 83 5A 14 88 08 00 0D 70 09 00 03 81 3F 00 34 80 89 00 04 91 01 00 08 40 82 00 E0
+
+. 0 2547B0D4 16
+. 81 5F 00 E4 38 E0 00 00 2F 8A 00 00 40 9E 00 9C
+
+. 0 2547B17C 52
+. 81 6A 00 04 54 C9 08 3C 80 DF 01 88 7D 09 5A 2E 55 07 23 76 7D 66 3A 14 80 6B 00 04 7C 6A FE 70 7D 40 1A 78 7D 20 50 50 7D 28 FE 70 7D 67 40 38 4B FF FF 38
+
+. 0 2547B0E4 36
+. 7C 05 60 2E 39 40 00 00 80 DF 01 C0 38 A1 00 08 7C 60 22 14 39 00 00 01 7F E4 FB 78 39 20 00 01 4B FF D4 A1
+
+. 0 2547B108 16
+. 39 40 00 00 81 61 00 08 2C 8B 00 00 41 86 00 18
+
+. 0 2547B118 8
+. 2F 03 00 00 41 9A 00 A0
+
+. 0 2547B120 40
+. 81 83 00 00 80 8B 00 04 7D 4C 22 14 80 FE 04 F4 80 DD 00 08 80 67 00 2C 7D 66 52 14 2C 03 00 00 7D 63 5B 78 40 82 00 18
+
+. 0 2547B148 20
+. 7F E3 FB 78 7F A4 EB 78 7F 85 E3 78 7D 66 5B 78 48 00 54 0D
+
+. 0 25480564 52
+. 7D 45 30 50 3D 06 FE 00 55 4C 30 32 3C E0 FC 00 7D 84 36 70 39 28 00 03 7F 84 50 00 60 E0 00 02 54 8B 01 BA 94 21 FF F0 7F 09 00 40 65 6B 48 00 41 9E 00 EC
+
+. 0 25480680 16
+. 91 65 00 00 7C 00 28 6C 7C 00 04 AC 7C 00 2F AC
+
+. 0 25480690 20
+. 7C 00 04 AC 4C 00 01 2C 7C C3 33 78 38 21 00 10 4E 80 00 20
+
+. 0 2547B15C 32
+. 83 81 00 24 83 A1 00 14 7F 88 03 A6 83 C1 00 18 83 81 00 10 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 2547AFA0 64
+. 7C 69 03 A6 80 01 00 30 81 41 00 2C 81 21 00 28 7C 08 03 A6 81 01 00 24 80 01 00 08 80 E1 00 20 80 C1 00 1C 7C 0F F1 20 80 A1 00 18 80 81 00 14 80 61 00 10 80 01 00 0C 38 21 00 40 4E 80 04 20
+
+. 0 FE9B8C4 12
+. 7C 08 02 A6 94 21 FF F0 48 12 C5 85
+
+. 0 FE9B8D0 44
+. 93 C1 00 08 7D 2A 4B 78 7F C8 02 A6 90 01 00 14 7C 6C 1B 78 80 09 00 00 7C 85 23 78 7C E9 3B 78 7D 07 43 78 2F 80 00 00 41 9E 00 34
+
+. 0 FE9B92C 16
+. 80 06 00 00 7C CB 33 78 2C 80 00 00 41 86 00 3C
+
+. 0 FE9B93C 8
+. 81 1E 1C B0 48 00 00 10
+
+. 0 FE9B950 12
+. 2F 80 00 13 2F 08 00 00 40 9E FF EC
+
+. 0 FE9B944 12
+. 84 0B 00 08 2F 00 00 00 41 9A 00 28
+
+. 0 FE9B95C 4
+. 41 BA FF E8
+
+. 0 FE9B960 20
+. 80 8B 00 04 90 88 00 00 84 0B 00 08 2F 00 00 00 40 9A FF E0
+
+. 0 FE9B974 20
+. 81 07 00 0C 7D 84 63 78 80 67 00 04 80 E7 00 08 4B FF FD 91
+
+. 0 FE9B714 12
+. 94 21 FD E0 7D 48 02 A6 48 12 C7 35
+
+. 0 FE9B720 84
+. 93 C1 02 18 7F C8 02 A6 93 A1 02 14 54 80 10 3A 93 81 02 10 7D 60 2A 14 93 E1 02 1C 83 BE 1C E4 7C FC 3B 78 91 41 02 24 7D 1F 43 78 2F 9D 00 00 90 61 01 F0 93 61 02 0C 7D 23 4B 78 90 81 01 F4 39 6B 00 04 90 A1 01 F8 39 40 00 00 90 C1 01 FC 41 9E 00 10
+
+. 0 FE9B780 24
+. 2C 83 00 00 83 7E 1B 84 81 3E 1A A4 91 7B 00 00 91 49 00 00 41 86 00 10
+
+. 0 FE9B798 12
+. 38 80 00 00 38 A0 00 00 48 01 87 31
+
+. 0 FEB3ED0 44
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C BF 2B 78 93 81 00 10 7C 9C 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 93 C1 00 18 4B FF FE 35
+
+. 0 FEB3D2C 12
+. 94 21 FF E0 7C 68 02 A6 48 11 41 1D
+
+. 0 FEB3D38 52
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 38 00 00 01 93 61 00 0C 3B A0 00 00 93 E1 00 1C 90 61 00 24 83 9E 02 B4 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+
+. 0 FEB3D6C 8
+. 7C 00 E1 2D 40 A2 FF F0
+
+. 0 FEB3D60 12
+. 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+
+. 0 FEB3D74 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 34
+
+. 0 FEB3D80 16
+. 83 7E 1A FC 83 FB 00 00 2F 1F 00 00 41 9A 00 50
+
+. 0 FEB3D90 16
+. 81 7F 00 04 3B A0 00 00 28 0B 00 00 40 81 00 24
+
+. 0 FEB3DC0 8
+. 7C 0B E8 40 41 81 01 04
+
+. 0 FEB3DC8 8
+. 28 8B 00 1F 40 85 00 F0
+
+. 0 FEB3EBC 16
+. 39 2B 00 01 7D 7D 5B 78 91 3F 00 04 40 BA FF 40
+
+. 0 FEB3E08 36
+. 57 AA 20 36 39 00 00 01 7C EA FA 14 91 07 00 08 39 80 00 00 7C 00 04 AC 7D 60 E0 28 7D 80 E1 2D 40 A2 FF F8
+
+. 0 FEB3E2C 8
+. 2F 8B 00 01 41 9D 00 60
+
+. 0 FEB3E34 4
+. 41 9A 00 34
+
+. 0 FEB3E38 48
+. 57 BC 20 36 83 A1 00 24 7F 7C FA 14 83 C1 00 18 38 7B 00 08 7F A8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEB3EFC 12
+. 2C 03 00 00 38 00 FF FF 41 82 00 1C
+
+. 0 FEB3F08 60
+. 38 80 00 04 93 E3 00 0C 90 83 00 00 38 00 00 00 93 A3 00 04 93 83 00 08 80 A1 00 24 7C 03 03 78 83 81 00 10 83 A1 00 14 7C A8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B7A4 8
+. 2F 1F 00 00 41 9A 00 14
+
+. 0 FE9B7AC 16
+. 7F E3 FB 78 38 80 00 00 38 A0 00 00 48 01 87 19
+
+. 0 FEB3DA0 20
+. 39 3F 00 08 80 89 00 00 39 29 00 10 2C 84 00 00 41 86 00 10
+
+. 0 FEB3DB4 12
+. 3B BD 00 01 7F 8B E8 40 41 9D FF E8
+
+. 0 FE9B7BC 20
+. 83 FE 1B C8 80 7F 00 00 70 69 00 02 7F A0 00 26 40 82 00 80
+
+. 0 FE9B7D0 8
+. 2F 9C 00 00 41 9E 00 1C
+
+. 0 FE9B7D8 24
+. 80 BB 00 00 7F 88 03 A6 80 61 01 F4 80 81 01 F8 80 C1 01 FC 4E 80 00 21
+
+. 0 10019018 48
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 93 A1 00 14 80 9E FF F0 90 A1 00 24 7F C4 F2 14 4B FE 77 F5
+
+. 0 10000838 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 41
+
+. 0 10000884 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 03 62 39
+
+. 0 10036ACC 4
+. 4E 80 00 21
+
+. 0 10000898 16
+. 7F C8 02 A6 80 1E 00 0C 2F 80 00 00 41 9E 00 0C
+
+. 0 100008B0 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 10000848 4
+. 48 00 01 05
+
+. 0 1000094C 40
+. 7C 08 02 A6 3D 60 10 03 94 21 FF F0 3D 20 00 00 39 29 00 00 38 6B E0 10 90 01 00 14 80 0B E0 10 2F 80 00 00 41 9E 00 14
+
+. 0 10000984 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 1000084C 4
+. 48 01 8A C5
+
+. 0 10019310 36
+. 7C 08 02 A6 94 21 FF F0 3D 20 10 03 93 E1 00 0C 39 29 E0 04 90 01 00 14 3B E9 FF FC 80 09 FF FC 48 00 00 10
+
+. 0 10019340 8
+. 2F 80 FF FF 40 9E FF F0
+
+. 0 10019348 20
+. 80 01 00 14 83 E1 00 0C 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 10000850 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 10019048 28
+. 83 9E 80 04 80 7E 80 00 3B E0 00 00 7C 1C 18 50 7C 00 16 70 7F 9F 00 40 40 9C 00 24
+
+. 0 10019084 32
+. 81 01 00 24 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B7F0 8
+. 7F A8 01 20 40 82 00 B0
+
+. 0 FE9B7F8 8
+. 38 61 00 10 48 01 4F 9D
+
+. 0 FEB0798 8
+. 38 80 00 00 4B FF FC 94
+
+. 0 FEB0430 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 11 79 79
+
+. 0 FEB04DC 24
+. 7C A8 02 A6 80 A5 1B C8 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+
+. 0 FEB0648 4
+. 48 00 00 AC
+
+. 0 FEB06F4 52
+. 2F 84 00 00 7C 08 02 A6 94 21 FF E0 38 A3 01 C4 93 A1 00 14 38 80 00 00 93 E1 00 1C 3B A0 00 00 7C 7F 1B 78 93 C1 00 18 90 01 00 24 38 60 00 00 40 9E 00 28
+
+. 0 FEB0728 36
+. 80 81 00 24 38 60 00 00 93 BF 01 C0 83 A1 00 14 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FE9B800 8
+. 2C 83 00 00 40 86 00 68
+
+. 0 FE9B808 56
+. 83 A2 8B F4 3B 61 00 10 81 9E 1B 84 93 A1 01 E0 83 82 8B F0 81 01 01 F0 93 81 01 E4 80 61 01 F4 7D 08 03 A6 93 62 8B F4 80 81 01 F8 80 AC 00 00 80 C1 01 FC 4E 80 00 21
+
+. 0 1000104C 16
+. 7C 08 02 A6 94 21 FF F0 90 01 00 14 48 01 64 9D
+
+. 0 100174F4 52
+. 94 21 FF D0 7C 08 02 A6 42 9F 00 05 93 C1 00 28 7F C8 02 A6 93 A1 00 24 90 01 00 34 3B A0 00 00 93 E1 00 2C 80 1E FF F0 7F C0 F2 14 80 7E 80 00 48 01 F6 3D
+
+. 0 10036B60 8
+. 39 60 00 10 4B FF FF B4
+
+. 0 FEB3198 12
+. 94 21 FF E0 7C 08 02 A6 48 11 4C B1
+
+. 0 FEB31A4 40
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 41 00 08 7C 7F 1B 78 93 61 00 0C 93 81 00 10 93 A1 00 14 90 01 00 24 48 04 39 11
+
+. 0 FEF6AD8 56
+. 54 64 00 3A 3C E0 7F 7F 54 65 1E F8 81 04 00 00 39 20 FF FF 38 E7 7F 7F 7D 29 2C 30 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 40 00 F8 7C 08 48 39 7C 60 11 20 40 82 00 70
+
+. 0 FEF6B10 12
+. 3C C0 FE FF 38 C6 FE FF 41 9D 00 1C
+
+. 0 FEF6B1C 24
+. 85 04 00 04 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 48 00 F9 40 82 00 4C
+
+. 0 FEF6B34 32
+. 81 04 00 04 85 24 00 08 7C 06 42 14 7C EA 40 F8 7C 00 50 39 7D 66 4A 14 7C EC 48 F8 40 82 00 1C
+
+. 0 FEF6B54 8
+. 7D 60 60 39 41 82 FF DC
+
+. 0 FEF6B5C 16
+. 7C E0 48 38 7C 00 3A 14 7D 88 00 78 48 00 00 14
+
+. 0 FEF6B7C 20
+. 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+. 0 FEB31CC 16
+. 81 3E 1B 84 81 29 00 00 2F 89 00 00 41 9E 00 10
+
+. 0 FEB31DC 12
+. 89 7F 00 00 2C 0B 00 00 40 82 00 30
+
+. 0 FEB3214 12
+. 88 1F 00 01 2C 80 00 00 40 86 00 48
+
+. 0 FEB3264 36
+. 81 49 00 00 54 08 40 2E 55 6C 06 3E 7D 3D 4B 78 2C 8A 00 00 7D 9A 43 78 3B 83 FF FE 3B 7F 00 02 41 A6 FF 64
+
+. 0 FEB3288 8
+. 7D 5F 53 78 48 00 00 10
+
+. 0 FEB329C 36
+. 88 1F 00 01 38 7F 00 02 89 3F 00 00 7F 64 DB 78 54 0B 40 2E 7F 85 E3 78 7D 6A 4B 78 7F 1A 50 00 40 9A FF D4
+
+. 0 FEB3290 12
+. 87 FD 00 04 2C 9F 00 00 41 A6 FF 50
+
+. 0 FEB31E8 44
+. 38 60 00 00 83 41 00 24 83 61 00 0C 7F 48 03 A6 83 81 00 10 83 41 00 08 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10017528 28
+. 38 00 00 00 7C 69 1B 79 39 60 00 00 90 01 00 0C 91 61 00 10 90 01 00 08 41 82 00 10
+
+. 0 10017550 28
+. 80 01 00 34 83 A1 00 24 83 C1 00 28 7C 08 03 A6 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 1000105C 4
+. 4B FF F9 55
+
+. 0 100009B0 116
+. 7C 08 02 A6 3D 60 10 02 94 21 FF A0 39 2B 93 B0 80 EB 93 B0 38 60 00 03 80 C9 00 14 81 69 00 08 81 49 00 0C 81 09 00 10 90 01 00 64 80 09 00 04 3D 20 10 02 39 29 94 00 93 81 00 50 C8 29 00 00 3B 81 00 10 93 A1 00 54 3B A1 00 30 FC 40 08 90 93 C1 00 58 93 E1 00 5C 90 E1 00 10 90 01 00 14 91 61 00 18 91 41 00 1C 91 01 00 20 90 C1 00 24 48 00 0B 7D
+
+. 0 1000159C 36
+. 7C 08 02 A6 94 21 FF E0 DB C1 00 10 FF C0 10 90 DB E1 00 18 FF E0 08 90 93 E1 00 08 90 01 00 24 4B FF FA ED
+
+. 0 100010A8 52
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 79 90 01 00 24 93 81 00 10 80 1E FF F0 93 E1 00 1C 7F C0 F2 14 41 82 00 94
+
+. 0 100010DC 8
+. 38 60 00 30 48 03 5A A1
+
+. 0 10036B80 8
+. 39 60 00 20 4B FF FF 94
+
+. 0 FEF18E0 12
+. 94 21 FF E0 7C 08 02 A6 48 0D 65 69
+
+. 0 FEF18EC 52
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 90 01 00 24 7C 7D 1B 78 93 41 00 08 81 3E 1A 7C 93 61 00 0C 80 09 00 00 93 81 00 10 2F 80 00 00 93 E1 00 1C 41 9E 00 3C
+
+. 0 FEF1920 16
+. 80 A1 00 00 7C 09 03 A6 80 85 00 04 4E 80 04 21
+
+. 0 FEF2A68 12
+. 94 21 FF E0 7C 88 02 A6 48 0D 53 E1
+
+. 0 FEF2A74 40
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 7C 7D 1B 78 38 60 00 00 90 81 00 24 81 3E 1A 7C 93 E1 00 1C 90 69 00 00 4B FF FB DD
+
+. 0 FEF2674 12
+. 94 21 FF B0 7C 68 02 A6 48 0D 57 D5
+
+. 0 FEF2680 52
+. 93 C1 00 48 7F C8 02 A6 93 61 00 3C 90 61 00 54 93 E1 00 4C 3B E0 00 00 83 7E 1C 84 93 41 00 38 80 1B 00 00 93 81 00 40 2F 80 00 00 93 A1 00 44 41 9C 00 2C
+
+. 0 FEF26DC 36
+. 83 BE 06 28 3C A0 00 02 3C C0 00 01 93 FB 00 00 90 DD 00 10 90 BD 00 00 90 BD 00 04 90 BD 00 08 48 05 72 01
+
+. 0 FF498FC 12
+. 7D 88 02 A6 94 21 FF F0 48 07 E5 4D
+
+. 0 FF49908 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 3E 1B C8 80 09 00 04 2F 80 00 00 41 9E 00 14
+
+. 0 FF49924 16
+. 7C 03 03 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FEF2700 32
+. 81 3E 1B F8 83 9E 05 FC 80 89 00 00 90 7D 00 18 2C 04 00 00 93 FC 00 00 93 9C 04 48 41 82 01 00
+
+. 0 FEF281C 20
+. 80 7E 06 48 38 81 00 10 38 A1 00 20 38 C0 00 00 48 09 A5 E1
+
+. 0 FF8CE0C 12
+. 94 21 FF D0 7C E8 02 A6 48 03 B0 3D
+
+. 0 FF8CE18 72
+. 93 C1 00 28 7F C8 02 A6 93 61 00 1C 90 E1 00 34 93 21 00 14 7C D9 33 78 83 7E 1B 98 93 41 00 18 7C BA 2B 78 80 1B 03 F8 93 81 00 20 7C 9C 23 78 93 E1 00 2C 7C 09 03 A6 7C 7F 1B 78 93 A1 00 24 38 7B 01 80 4E 80 04 21
+
+. 0 25471884 24
+. 80 83 00 04 94 21 FF F0 39 24 00 01 38 21 00 10 91 23 00 04 4E 80 00 20
+
+. 0 FF8CE60 28
+. 38 A0 00 00 7F 67 DB 78 38 DB 01 68 81 07 00 00 38 E7 00 18 2F 88 00 00 41 9E 00 6C
+
+. 0 FF8CE7C 12
+. 80 68 01 A0 7C 03 F8 40 41 81 00 54
+
+. 0 FF8CED8 8
+. 81 08 00 0C 4B FF FF 98
+
+. 0 FF8CE74 8
+. 2F 88 00 00 41 9E 00 6C
+
+. 0 FF8CE88 12
+. 80 88 01 A4 7C 84 F8 40 40 85 00 48
+
+. 0 FF8CE94 12
+. A1 68 01 54 2F 0B 00 00 41 9A 00 44
+
+. 0 FF8CEA0 24
+. 81 48 01 4C 39 6B FF FF 55 69 28 34 7D 89 50 2E 2F 8C 00 01 40 9E FF F0
+
+. 0 FF8CEA4 20
+. 39 6B FF FF 55 69 28 34 7D 89 50 2E 2F 8C 00 01 40 9E FF F0
+
+. 0 FF8CEB8 32
+. 7C 69 52 14 80 08 00 00 81 63 00 08 81 43 00 14 7F A0 5A 14 7D 3D 52 14 7C 09 F8 40 41 81 00 0C
+
+. 0 FF8CEE0 12
+. 7D 05 43 78 7C 87 30 00 40 85 FF 84
+
+. 0 FF8CE6C 16
+. 81 07 00 00 38 E7 00 18 2F 88 00 00 41 9E 00 6C
+
+. 0 FF8CEE4 8
+. 7C 87 30 00 40 85 FF 84
+
+. 0 FF8CEEC 12
+. 2F 05 00 00 3B A0 00 00 41 9A 01 2C
+
+. 0 FF8CEF8 32
+. 80 85 00 04 80 E5 01 A0 90 9C 00 00 81 05 00 04 90 FC 00 04 88 C8 00 00 2F 86 00 00 41 9E 01 64
+
+. 0 FF8CF18 40
+. 81 05 00 30 80 E5 00 34 2C 88 00 00 81 65 00 38 80 67 00 04 80 C5 00 48 81 4B 00 04 7C 67 1B 78 83 A6 00 04 41 86 00 14
+
+. 0 FF8CF40 32
+. 80 88 00 04 81 04 00 04 55 00 20 36 7C E0 52 14 7F 8A 38 40 38 80 00 00 2C 04 00 00 40 9C 00 84
+
+. 0 FF8CF60 20
+. 80 C5 00 00 81 0A 00 04 7D 66 42 14 7C 8B F8 40 41 85 00 64
+
+. 0 FF8CF74 16
+. 89 2A 00 0C 55 2C 07 3E 2F 8C 00 06 41 9E 00 54
+
+. 0 FF8CF84 12
+. 80 0A 00 08 2C 80 00 00 40 86 00 0C
+
+. 0 FF8CF90 8
+. 7F 8B F8 00 41 9E 00 10
+
+. 0 FF8CF98 12
+. 7C 0B 02 14 7C 80 F8 40 40 85 00 34
+
+. 0 FF8CFD4 12
+. 39 4A 00 10 7C 8A 38 40 41 84 FF 88
+
+. 0 FF8CF64 16
+. 81 0A 00 04 7D 66 42 14 7C 8B F8 40 41 85 00 64
+
+. 0 FF8CFE0 8
+. 2F 9A 00 00 41 9E 00 08
+
+. 0 FF8CFE8 12
+. 90 BA 00 00 2C 99 00 00 41 86 00 08
+
+. 0 FF8CFF8 4
+. 41 82 00 68
+
+. 0 FF8D060 12
+. 90 9C 00 0C 90 9C 00 08 4B FF FF B4
+
+. 0 FF8D01C 20
+. 3B A0 00 01 83 9B 03 FC 38 7B 01 80 7F 89 03 A6 4E 80 04 21
+
+. 0 2547189C 24
+. 80 83 00 04 94 21 FF F0 39 24 FF FF 38 21 00 10 91 23 00 04 4E 80 00 20
+
+. 0 FF8D030 48
+. 83 61 00 34 7F A3 EB 78 83 21 00 14 7F 68 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FEF2830 8
+. 2C 83 00 00 41 A6 FE F8
+
+. 0 FEF2838 16
+. 81 01 00 20 80 E8 00 18 2F 07 00 00 41 BA FE E8
+
+. 0 FEF272C 56
+. 80 9E 06 00 80 7E 1B 4C 7F A4 12 14 3B E0 00 00 93 9D 00 00 83 9E 05 F8 80 C3 00 00 80 9E 06 18 80 7E 05 F4 80 BE 06 1C 83 5E 1B 6C 93 FC 00 00 3B 80 00 00 48 06 D4 E1
+
+. 0 FF5FC40 12
+. 94 21 FF D0 7C E8 02 A6 48 06 82 09
+
+. 0 FF5FC4C 76
+. 93 C1 00 28 7F C8 02 A6 93 41 00 18 93 61 00 1C 7C DA 33 78 93 81 00 20 7C BB 2B 78 93 A1 00 24 7C 9C 23 78 93 E1 00 2C 7C 7D 1B 78 93 21 00 14 39 60 00 00 90 E1 00 34 38 00 00 01 83 FE 1D 14 7D 20 F8 28 7C 09 58 00 40 82 00 0C
+
+. 0 FF5FC98 8
+. 7C 00 F9 2D 40 A2 FF F0
+
+. 0 FF5FC8C 12
+. 7D 20 F8 28 7C 09 58 00 40 82 00 0C
+
+. 0 FF5FCA0 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 5C
+
+. 0 FF5FCAC 24
+. 80 7E 10 F8 7C 79 1B 78 39 40 00 00 39 63 00 18 39 00 00 00 48 00 00 08
+
+. 0 FF5FCC8 32
+. 80 8B 00 00 39 4A 00 01 7D 09 43 78 2B 0A 00 2F 2C 04 00 00 39 08 00 1C 39 6B 00 1C 40 82 FF E0
+
+. 0 FF5FCE8 32
+. 7C 69 1A 14 39 40 00 01 39 63 00 04 39 00 00 00 2F 0B 00 00 91 4B 00 14 91 0B 00 18 41 9A 00 24
+
+. 0 FF5FD08 52
+. 81 9E 1C D4 93 AB 00 04 83 2C 00 00 93 8B 00 08 93 23 00 04 93 6B 00 0C 91 6C 00 00 93 4B 00 10 3B 40 00 00 7C 00 04 AC 7C 60 F8 28 7F 40 F9 2D 40 A2 FF F8
+
+. 0 FF5FD3C 8
+. 2C 03 00 01 41 81 00 A4
+
+. 0 FF5FD44 4
+. 40 9A 00 34
+
+. 0 FF5FD78 48
+. 83 61 00 34 38 60 00 00 83 21 00 14 7F 68 03 A6 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FEF2764 20
+. 81 3A 00 00 81 9E 1D 70 2F 89 00 00 83 EC 00 00 41 9E 00 68
+
+. 0 FEF2778 32
+. 7D 20 4B 78 3B 41 00 24 91 21 00 24 7C 09 03 78 3B A0 00 00 81 69 00 00 2C 0B 00 00 41 82 00 1C
+
+. 0 FEF2798 12
+. 88 AB 00 00 2C 85 00 4D 41 86 00 C0
+
+. 0 FEF27A4 12
+. 85 69 00 04 2C 0B 00 00 40 82 FF EC
+
+. 0 FEF2860 12
+. 88 CB 00 01 2F 06 00 41 40 9A FF 3C
+
+. 0 FEF286C 12
+. 88 EB 00 02 2F 87 00 4C 40 9E FF 30
+
+. 0 FEF27B0 8
+. 2C 1D 00 00 40 82 01 1C
+
+. 0 FEF27B8 8
+. 2F 9C 00 00 41 9E 00 20
+
+. 0 FEF27DC 16
+. 83 5E 1A 88 81 3A 00 00 2F 09 00 00 40 9A 00 64
+
+. 0 FEF27EC 48
+. 3B 80 00 01 93 9B 00 00 83 61 00 54 83 41 00 38 7F 68 03 A6 83 81 00 40 83 61 00 3C 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+
+. 0 FEF2A9C 32
+. 80 01 00 24 83 C1 00 18 7F A3 EB 78 83 E1 00 1C 7C 08 03 A6 83 A1 00 14 38 21 00 20 4B FF EE 28
+
+. 0 FEF1958 20
+. 80 9E 06 00 7C 64 12 14 83 E3 00 00 2C 1F 00 00 41 82 00 90
+
+. 0 FEF196C 16
+. 38 E0 00 01 7C C0 F8 28 2C 06 00 00 40 82 00 0C
+
+. 0 FEF197C 8
+. 7C E0 F9 2D 40 A2 FF F0
+
+. 0 FEF1970 12
+. 7C C0 F8 28 2C 06 00 00 40 82 00 0C
+
+. 0 FEF1984 12
+. 4C 00 01 2C 2C 86 00 00 40 86 00 6C
+
+. 0 FEF1990 12
+. 2F 1F 00 00 38 60 00 00 41 BA FF 98
+
+. 0 FEF199C 12
+. 7F E3 FB 78 7F A4 EB 78 4B FF DC 39
+
+. 0 FEEF5DC 24
+. 38 00 FF DF 7D 80 00 26 7F 84 00 40 7C A8 02 A6 94 21 FF 90 48 0D 88 61
+
+. 0 FEEF5F4 92
+. 93 81 00 60 7C 7C 1B 78 93 C1 00 68 7F C8 02 A6 91 C1 00 28 91 E1 00 2C 92 01 00 30 92 21 00 34 92 41 00 38 92 61 00 3C 92 81 00 40 92 A1 00 44 92 C1 00 48 92 E1 00 4C 93 01 00 50 93 21 00 54 93 41 00 58 93 61 00 5C 93 A1 00 64 93 E1 00 6C 90 A1 00 74 91 81 00 24 41 9D 04 90
+
+. 0 FEEF650 16
+. 38 64 00 0B 28 03 00 0F 54 79 00 38 40 81 00 B8
+
+. 0 FEEF660 12
+. 81 5C 00 04 7C 8A C8 40 41 84 00 BC
+
+. 0 FEEF724 8
+. 2B 99 01 FF 41 9D 00 60
+
+. 0 FEEF72C 24
+. 7E 59 E2 14 57 38 E8 FE 39 52 00 30 80 6A 00 0C 7C 83 50 00 41 86 00 60
+
+. 0 FEEF744 8
+. 2D 83 00 00 41 8E 02 20
+
+. 0 FEEF968 8
+. 7F 83 E3 78 4B FF EB 59
+
+. 0 FEEE4C4 12
+. 7C 88 02 A6 94 21 FF 90 48 0D 99 85
+
+. 0 FEEE4D0 108
+. 38 00 00 7F 93 01 00 50 39 23 00 38 90 81 00 74 7C 78 1B 78 81 63 00 04 7C 09 03 A6 93 C1 00 68 2F 8B 00 00 91 C1 00 28 91 E1 00 2C 7F C8 02 A6 92 01 00 30 92 21 00 34 92 41 00 38 92 61 00 3C 92 81 00 40 92 A1 00 44 92 C1 00 48 92 E1 00 4C 93 21 00 54 93 41 00 58 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 41 9E 01 28
+
+. 0 FEEE660 16
+. 91 29 00 0C 91 29 00 08 39 29 00 08 42 00 FF F4
+
+. 0 FEEE670 12
+. 81 DE 05 FC 7C 98 70 00 41 86 00 08
+
+. 0 FEEE680 108
+. 55 72 07 BC 3A 18 00 38 62 4F 00 49 92 18 00 30 91 F8 00 04 82 61 00 74 81 C1 00 28 7E 68 03 A6 81 E1 00 2C 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 FEEF970 4
+. 4B FF FE 30
+
+. 0 FEEF7A0 60
+. 82 BE 05 FC 63 34 00 01 82 5E 06 78 3B 5C 00 38 7E 1C A8 00 81 DE 06 2C 7E B6 AB 78 7F 8F AA 78 4D 90 00 00 7E 93 A3 78 3A 39 00 10 3A 01 00 10 83 FC 00 44 7F 1F D0 00 41 9A 01 9C
+
+. 0 FEEF974 8
+. 28 19 01 FF 40 81 00 2C
+
+. 0 FEEF9A4 100
+. 39 78 00 01 7F 8A B2 78 55 64 D9 7E 55 66 18 38 54 83 10 3A 55 7D 06 FE 7C E3 E2 14 7D 06 E2 14 54 9B 40 2E 3B E0 00 01 80 A7 04 38 7F 1C B0 00 28 99 01 FF 7F E6 E8 30 39 68 00 30 30 0A FF FF 7F A0 51 10 7D 1B E2 14 21 26 00 00 7E A9 31 14 7D 26 28 10 7D 29 49 10 7D 29 00 D0 7D 2A AB 79 41 82 00 34
+
+. 0 FEEFA08 28
+. 39 27 04 38 38 84 00 01 38 E7 00 04 2B 84 00 03 54 83 10 3A 39 08 01 00 41 9D 01 64
+
+. 0 FEEFA24 12
+. 84 A9 00 04 2C 05 00 00 41 82 FF E0
+
+. 0 FEEFA0C 24
+. 38 84 00 01 38 E7 00 04 2B 84 00 03 54 83 10 3A 39 08 01 00 41 9D 01 64
+
+. 0 FEEFB84 20
+. 83 BC 00 30 82 FD 00 04 56 E0 00 38 7F 11 00 40 40 99 03 1C
+
+. 0 FEEFB98 12
+. 81 9C 00 04 71 8A 00 01 40 82 01 A0
+
+. 0 FEEFD40 24
+. 82 FE 06 28 82 77 00 08 80 B7 00 18 7D 93 C8 40 3A 65 FF FF 41 8D 00 28
+
+. 0 FEEFD7C 40
+. 81 7E 06 80 7F B6 EB 78 83 FD 00 04 3B 00 00 00 81 EB 00 00 3B 60 00 00 57 F5 00 38 7C 9C 78 00 7E 1D AA 14 41 86 01 64
+
+. 0 FEEFF04 24
+. 80 BC 00 04 81 97 00 04 70 AA 00 02 7E 4C CA 14 38 12 00 10 40 82 00 08
+
+. 0 FEEFF1C 20
+. 7C 15 00 50 7C 00 9A 14 7E 72 98 F8 7C 1F 90 39 40 81 01 B4
+
+. 0 FEEFF30 20
+. 81 DE 1A 70 7F E3 FB 78 81 0E 00 00 7D 09 03 A6 4E 80 04 21
+
+. 0 FEF4188 16
+. 7C 08 02 A6 94 21 FF F0 90 01 00 14 48 05 4B DD
+
+. 0 FF48D70 12
+. 94 21 FF E0 7C 88 02 A6 48 07 F0 D9
+
+. 0 FF48D7C 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 81 00 24 93 A1 00 14 7C 7D 1B 78 83 FE 1D 6C 38 60 00 00 80 1F 00 00 2F 80 00 00 41 9E 00 3C
+
+. 0 FF48DE0 4
+. 4B FF FF 4D
+
+. 0 FF48D2C 16
+. 94 21 FF F0 90 61 00 08 38 00 00 2D 44 00 00 02
+
+. 0 FF48D3C 12
+. 80 C1 00 08 7C 88 02 A6 48 07 F1 0D
+
+. 0 FF48D48 32
+. 7C A8 02 A6 80 A5 1D 6C 7C 88 03 A6 90 65 00 00 7C 06 18 40 38 21 00 10 38 60 00 00 4C A1 00 20
+
+. 0 FF48DE4 12
+. 2C 83 00 00 38 60 FF FF 41 A4 FF D8
+
+. 0 FF48DF0 8
+. 2F 1D 00 00 41 9A FF CC
+
+. 0 FF48DF8 12
+. 83 FF 00 00 7C 7F EA 14 4B FF FF 2D
+
+. 0 FF48E04 12
+. 2F 83 00 00 38 60 FF FF 41 BC FF B8
+
+. 0 FF48E10 32
+. 80 C1 00 24 7F E3 FB 78 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEF4198 12
+. 2F 83 FF FF 38 00 00 00 41 9E 00 08
+
+. 0 FEF41A4 24
+. 7C 60 1B 78 80 81 00 14 7C 03 03 78 38 21 00 10 7C 88 03 A6 4E 80 00 20
+
+. 0 FEEFF44 12
+. 7C 7B 1B 79 7F 40 00 26 41 82 01 9C
+
+. 0 FEEFF50 16
+. 81 5E 1B B8 81 2A 00 00 2E 09 00 00 41 B2 00 0C
+
+. 0 FEEFF68 16
+. 57 5A 20 3E 7F 40 11 20 57 5A E0 3E 41 9E 00 4C
+
+. 0 FEEFF78 12
+. 83 57 00 28 2E 1A 00 00 40 92 00 08
+
+. 0 FEEFF84 44
+. 93 77 00 28 2F 18 00 00 7F 65 82 78 20 05 00 00 7C A0 29 14 82 FC 04 4C 7D 80 00 26 55 8C DF FE 7C AA 60 39 7D 57 FA 14 41 82 02 54
+
+. 0 FEF0200 28
+. 81 1C 00 04 31 75 FF FF 7F AB A9 10 69 00 00 02 54 00 FF FE 7C 0B E8 39 41 82 00 0C
+
+. 0 FEF0224 16
+. 2C 80 00 00 3B A0 00 00 7F 7A DB 78 41 86 01 04
+
+. 0 FEF0234 8
+. 2F 15 00 00 41 9A 00 F4
+
+. 0 FEF032C 8
+. 91 5C 04 4C 4B FF FF 18
+
+. 0 FEF0248 8
+. 73 60 00 07 41 82 00 0C
+
+. 0 FEF0258 48
+. 7C 9B FA 14 7D 5D AA 14 7D 24 52 14 7D 69 9A 14 7D 7B 90 38 7F E9 D8 50 83 7E 1A 70 7F AA FA 14 82 7B 00 00 7F A3 EB 78 7E 69 03 A6 4E 80 04 21
+
+. 0 FF48DA8 16
+. 81 3E 1A A4 80 A9 00 00 2C 05 00 00 40 82 00 2C
+
+. 0 FF48DB8 8
+. 2F 1D 00 00 40 9A 00 3C
+
+. 0 FEF0288 12
+. 7C 78 1B 79 7F E0 00 26 41 82 00 80
+
+. 0 FEF0294 16
+. 80 7E 1B B8 81 23 00 00 2F 89 00 00 41 BE 00 0C
+
+. 0 FEF02AC 16
+. 57 FF 80 3E 7F E0 81 20 57 FF 80 3E 41 B2 FD 08
+
+. 0 FEF02BC 44
+. 7C BA C0 50 2D 95 00 00 7D 85 EA 14 7F 48 D3 78 61 97 00 01 91 1C 00 30 92 FA 00 04 83 5C 04 4C 7D 5A EA 14 91 5C 04 4C 41 AE FB 74
+
+. 0 FEEFE58 12
+. 82 DC 04 50 7F 0A B0 40 40 99 00 08
+
+. 0 FEEFE64 20
+. 91 5C 04 50 81 C8 00 04 55 CB 00 38 7F 8B 88 40 41 9C 00 78
+
+. 0 FEEFE78 56
+. 7F 89 7A 78 7E 79 58 50 31 49 FF FF 7F 6A 49 10 38 68 00 08 57 7F 10 3A 7E 28 CA 14 7F E6 A3 78 62 6F 00 01 90 C8 00 04 7C 68 1B 78 91 F1 00 04 92 3C 00 30 4B FF F8 04
+
+. 0 FEEF6B0 100
+. 83 81 00 74 7D 03 43 78 81 81 00 24 7F 88 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 FEF19A8 8
+. 7C 7C 1B 79 41 82 00 60
+
+. 0 FEF19B0 20
+. 38 00 00 00 7C 00 04 AC 7F A0 F8 28 7C 00 F9 2D 40 A2 FF F8
+
+. 0 FEF19C4 8
+. 2F 9D 00 01 41 9D 00 DC
+
+. 0 FEF19CC 44
+. 7F 83 E3 78 83 81 00 24 83 41 00 08 7F 88 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEF1930 40
+. 83 81 00 24 83 41 00 08 7F 88 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 100010E4 8
+. 7C 7F 1B 79 41 82 00 BC
+
+. 0 100010EC 32
+. 81 3E 80 0C 57 BC 18 38 93 BF 00 00 38 7C 00 10 C8 09 00 00 D8 1F 00 08 D8 1F 00 10 48 03 5A 79
+
+. 0 10036B80 4
+. 4B EB AD 60
+
+. 0 FEEF66C 32
+. 57 29 E8 FE 38 09 FF FE 54 06 10 3A 7D 66 E2 14 83 AB 00 08 3B 6B 00 08 2D 9D 00 00 41 8E 00 9C
+
+. 0 FEEFEB0 60
+. 82 9E 05 FC 7D 19 00 50 63 31 00 01 61 0A 00 01 7F 93 A2 78 7D DD CA 14 31 73 FF FF 7E 4B 99 10 39 1D 00 08 56 50 10 3A 91 DC 00 30 7E 0F 8B 78 91 FD 00 04 91 4E 00 04 4B FF F7 C8
+
+. 0 1000110C 12
+. 2F 83 00 00 90 7F 00 20 41 9E 00 AC
+
+. 0 10001118 16
+. 38 7D 00 01 7C 63 E9 D6 54 63 10 38 48 03 5A 5D
+
+. 0 10001128 12
+. 2F 83 00 00 90 7F 00 24 41 9E 00 B4
+
+. 0 10001134 8
+. 38 7C 00 08 48 03 5A 49
+
+. 0 1000113C 16
+. 2F 83 00 00 90 7F 00 28 7F E3 FB 78 41 9E 00 C8
+
+. 0 1000114C 32
+. 80 01 00 24 83 81 00 10 83 A1 00 14 7C 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 100015C0 20
+. 7C 7F 1B 79 FC 20 F8 90 FC 40 F0 90 38 00 00 00 41 82 00 18
+
+. 0 100015D4 4
+. 4B FF FC 69
+
+. 0 1000123C 108
+. 94 21 FF 80 7C 08 02 A6 42 9F 00 05 93 C1 00 60 7F C8 02 A6 DB A1 00 68 90 01 00 84 FF A0 10 90 DB E1 00 78 FF E0 08 90 80 1E FF F0 93 81 00 58 7C 7C 1B 78 7F C0 F2 14 DB C1 00 70 81 1E 80 1C 92 E1 00 44 C9 A8 00 00 93 01 00 48 FF 82 68 00 93 21 00 4C 93 41 00 50 4F DC F3 82 93 61 00 54 93 A1 00 5C 93 E1 00 64 41 9E 02 84
+
+. 0 100012A8 8
+. FF 81 68 00 41 9C 01 F8
+
+. 0 100012B0 20
+. C8 03 00 08 3A E0 00 00 3B 20 00 00 FF 80 08 00 40 9E 01 6C
+
+. 0 1000142C 36
+. 81 63 00 00 3B 60 00 00 81 43 00 20 3B A0 00 01 38 0B 00 02 D8 3C 00 08 2B 80 00 01 D9 AA 00 00 40 9D 00 48
+
+. 0 10001450 20
+. 3B E1 00 10 C8 3C 00 08 7F A3 EB 78 7F E4 FB 78 48 00 36 A9
+
+. 0 10004B08 108
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 50 7F C8 02 A6 DB 81 00 70 90 01 00 94 FF 80 08 90 93 61 00 44 7C 9B 23 78 80 1E FF F0 93 E1 00 54 7C 7F 1B 78 7F C0 F2 14 DB 21 00 58 81 3E 80 78 DB 41 00 60 C8 09 00 00 DB 61 00 68 FF 81 00 00 DB A1 00 78 DB C1 00 80 4F DC F3 82 DB E1 00 88 93 81 00 48 93 A1 00 4C 41 9E 03 E4
+
+. 0 10004B74 8
+. 2F 83 00 00 41 9E 00 C0
+
+. 0 10004B7C 16
+. 81 3E 80 80 C8 09 00 00 FF 81 00 00 41 9C 07 38
+
+. 0 10004B8C 8
+. 2F 83 00 01 41 9E 04 2C
+
+. 0 10004FBC 16
+. 81 3E 80 8C C8 29 00 00 FF 9C 08 00 40 9C 00 90
+
+. 0 10004FCC 36
+. 81 3E 80 98 81 7E 80 90 C8 29 00 00 81 3E 80 94 FC 3C 00 72 80 6B 00 04 80 89 00 04 38 84 FF FF 4B FF F8 09
+
+. 0 100047F4 56
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 7C 80 23 79 81 3E 80 00 54 84 18 38 7D 84 1C AE C9 49 00 00 40 81 00 60
+
+. 0 1000482C 56
+. 81 7E 80 04 FD 21 08 2A 7D 24 1A 14 C8 0B 00 00 38 69 FF F8 FC 09 00 28 FD 60 00 2A FC 0B 53 38 34 00 FF FF C9 A3 00 00 38 63 FF F8 FD 40 60 90 FD 80 68 2A 41 81 FF E8
+
+. 0 10004848 28
+. FC 0B 53 38 34 00 FF FF C9 A3 00 00 38 63 FF F8 FD 40 60 90 FD 80 68 2A 41 81 FF E8
+
+. 0 10004864 36
+. C8 2B 00 00 80 01 00 14 FC 29 08 28 83 C1 00 08 7C 08 03 A6 38 21 00 10 FC 21 02 B2 FC 2C 08 28 4E 80 00 20
+
+. 0 10004FF0 4
+. 4B FF FB F0
+
+. 0 10004BE0 88
+. 81 3E 80 9C D8 3B 00 00 C8 09 00 00 FC 21 00 32 D8 3B 00 08 38 60 00 00 80 01 00 94 83 61 00 44 83 81 00 48 7C 08 03 A6 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB 21 00 58 CB 41 00 60 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10001464 40
+. 81 7C 00 00 57 A9 18 38 3B BD 00 01 38 0B 00 02 81 5C 00 20 7F 80 E8 40 C8 01 00 10 7F 7B 1A 14 7C 09 55 AE 41 9D FF CC
+
+. 0 10001454 16
+. C8 3C 00 08 7F A3 EB 78 7F E4 FB 78 48 00 36 A9
+
+. 0 10004B94 8
+. 2B 83 00 0A 41 9D 00 FC
+
+. 0 10004B9C 36
+. 3C 00 43 30 81 3E 80 AC 90 01 00 08 90 61 00 0C C9 A1 00 08 C8 09 00 00 FC 2D 00 28 FF 81 E0 00 40 9D 04 38
+
+. 0 10004BC0 32
+. FC 3C 08 24 81 3E 80 94 54 60 10 3A 81 7E 80 90 7C 89 00 2E 7C 6B 00 2E 38 84 FF FF 4B FF FC 19
+
+. 0 1000148C 8
+. 2F 9B 00 00 40 9E 00 6C
+
+. 0 10001494 16
+. 38 00 00 00 81 1E 80 1C 7C 17 03 78 4B FF FE 2C
+
+. 0 100012CC 56
+. 55 69 18 38 38 0B 00 01 7D 29 52 14 2B 80 00 01 CB C9 00 08 3B E0 00 01 C8 08 00 00 3B 61 00 20 FD BE E8 24 81 3C 00 28 DB BC 00 10 D9 BC 00 18 D8 09 00 00 41 BD 00 0C
+
+. 0 1000130C 32
+. 81 3E 80 2C 57 FD 18 38 7C 5D 54 AE 3B FF 00 01 C8 29 00 00 7F 63 DB 78 FC 3F 08 2A 48 00 04 A5
+
+. 0 100017CC 92
+. 94 21 FF 60 7C 08 02 A6 42 9F 00 05 93 C1 00 70 7F C8 02 A6 DB E1 00 98 90 01 00 A4 DB C1 00 90 FF C0 10 90 80 1E FF F0 93 E1 00 74 7C 7F 1B 78 7F C0 F2 14 DB 61 00 78 81 3E 80 00 DB 81 00 80 CB E9 00 00 DB A1 00 88 FF 82 F8 00 93 61 00 64 93 81 00 68 93 A1 00 6C 41 9C 00 0C
+
+. 0 10001828 8
+. FF 01 F8 00 40 98 00 64
+
+. 0 10001890 4
+. 41 9E 01 84
+
+. 0 10001894 36
+. 81 3E 80 0C FD 82 00 B2 C9 A9 00 00 81 3E 80 10 FD A1 68 2A C8 09 00 00 FD AD 00 32 FF 8C 68 00 41 9C 01 C0
+
+. 0 10001A74 24
+. 81 3E 80 14 7C 65 1B 78 38 80 00 64 38 60 FF FF C8 69 00 00 48 00 7C B9
+
+. 0 10009740 116
+. 94 21 FF 30 7C 08 02 A6 42 9F 00 05 93 C1 00 A0 7F C8 02 A6 DB 61 00 A8 90 01 00 D4 FF 60 18 90 DB 81 00 B0 FF 80 10 90 80 1E FF F0 DB A1 00 B8 FF A0 08 90 7F C0 F2 14 93 61 00 94 81 3E 80 00 7C 7B 1B 78 93 81 00 98 7C 9C 23 78 C9 A9 00 00 93 E1 00 A4 7C BF 2B 78 FF 01 68 00 DB C1 00 C0 DB E1 00 C8 93 21 00 8C 93 41 00 90 93 A1 00 9C 41 98 00 0C
+
+. 0 100097B4 8
+. FF 82 68 00 40 9C 00 6C
+
+. 0 10009824 4
+. 41 9E 00 F0
+
+. 0 10009828 4
+. 40 9A 01 54
+
+. 0 1000997C 16
+. 81 3E 80 10 C8 09 00 00 FF 81 00 00 41 9C 00 80
+
+. 0 10009A08 20
+. 81 3E 80 14 CB E9 00 00 FC 21 F8 2A FF E2 07 F2 48 02 D1 A1
+
+. 0 10036BB8 8
+. 39 60 00 3C 4B FF FF 5C
+
+. 0 FE0B07C 12
+. FD 60 04 8E 7D 68 02 A6 48 06 80 2D
+
+. 0 FE0B088 36
+. 7D 48 02 A6 81 2A 0D D4 7D 68 03 A6 C9 A9 00 00 FC 00 0A 10 FD 8D 68 28 FF 80 68 00 FF 01 60 00 4C 9C 00 20
+
+. 0 FE0B0AC 8
+. FF 80 31 0C 40 99 00 20
+
+. 0 FE0B0B4 20
+. FC 21 68 2A FC 21 68 28 FE 81 60 00 FC 02 5D 8E 4C B6 00 20
+
+. 0 10009A1C 72
+. 3C 00 43 30 FC 00 08 1E 90 01 00 38 83 5E 80 0C 38 61 00 40 D8 01 00 30 83 A1 00 34 C9 BA 00 00 6F A9 80 00 91 21 00 3C 81 3E 80 18 C8 21 00 38 C8 09 00 00 FC 21 00 28 FF DD 08 28 FC 21 68 2A FC 40 F0 90 48 00 87 15
+
+. 0 10012174 72
+. 94 21 FF C0 7C 08 02 A6 42 9F 00 05 93 C1 00 38 7F C8 02 A6 93 E1 00 3C 90 01 00 44 7C 7F 1B 78 93 A1 00 34 38 81 00 20 80 1E FF F0 38 61 00 10 7F C0 F2 14 38 00 00 00 81 3E 80 B8 C8 09 00 00 FF 82 00 00 40 9E 00 34
+
+. 0 100121BC 48
+. 81 3E 80 BC 7C 03 03 78 D8 1F 00 08 80 01 00 44 C8 09 00 00 83 A1 00 34 7C 08 03 A6 D8 1F 00 00 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+
+. 0 10009A64 20
+. FC 20 F8 90 38 81 00 50 7C 79 1B 78 7F A3 EB 78 48 00 5F D9
+
+. 0 1000FA4C 112
+. 94 21 FF A0 7C 08 02 A6 42 9F 00 05 93 C1 00 28 7F C8 02 A6 DB 81 00 40 90 01 00 64 DB A1 00 48 FF A0 08 90 80 1E FF F0 93 A1 00 24 7C 9D 23 78 7F C0 F2 14 93 E1 00 2C 81 3E 83 6C 7C 7F 1B 78 DB 41 00 30 CB 89 00 00 54 69 0F FE DB 61 00 38 FF 01 E0 00 DB C1 00 50 DB E1 00 58 7C 00 00 26 54 00 CF FE 93 81 00 20 7C 0B 4B 79 40 82 00 A8
+
+. 0 1000FABC 8
+. 2F 83 00 00 40 9E 00 50
+
+. 0 1000FB10 8
+. 2F 83 00 01 41 9E 01 D4
+
+. 0 1000FB18 4
+. 40 9A 00 A8
+
+. 0 1000FBC0 32
+. 6C 60 80 00 3D 20 43 30 83 9E 83 74 90 01 00 0C 91 21 00 08 CB 5C 00 00 CB C1 00 08 48 02 70 3D
+
+. 0 10036C18 8
+. 39 60 00 6C 4B FF FE FC
+
+. 0 FE14388 12
+. 94 21 FF E0 7C 68 02 A6 48 05 ED 21
+
+. 0 FE14394 28
+. 93 C1 00 08 7F C8 02 A6 DB C1 00 10 DB E1 00 18 FF E0 08 90 90 61 00 24 4B FF 00 91
+
+. 0 FE0443C 32
+. 94 21 F8 F0 3C 60 00 0F D8 21 06 C0 60 60 FF FF 80 E1 06 C0 81 01 06 C4 7C 88 02 A6 48 06 EC 59
+
+. 0 FE0445C 80
+. DB C1 07 00 7C EB 3B 78 93 C1 06 F0 7F 8B 00 00 DB A1 06 F8 DB E1 07 08 7F C8 02 A6 92 C1 06 D0 FF C0 08 90 92 E1 06 D4 39 40 00 00 93 01 06 D8 93 21 06 DC 93 41 06 E0 93 61 06 E4 93 81 06 E8 93 A1 06 EC 93 E1 06 F4 90 81 07 14 41 9D 00 4C
+
+. 0 FE044F4 16
+. 3E C0 7F EF 62 CC FF FF 7F 0B 60 00 40 99 00 4C
+
+. 0 FE0454C 28
+. 80 9E 08 54 80 7E 08 98 C8 64 00 00 C9 A3 00 00 FC 9E 18 28 FF 84 68 00 40 9C 04 FC
+
+. 0 FE04A60 16
+. 83 1E 08 94 C8 F8 00 00 FC 87 20 00 41 A4 FB 10
+
+. 0 FE0457C 48
+. 54 EB 03 3E 7C E0 A6 70 83 FE 08 60 65 67 3F F0 90 E1 06 C0 91 01 06 C4 7D 20 52 14 C8 9F 00 00 39 49 FC 01 C9 A1 06 C0 FC 84 68 00 40 84 00 20
+
+. 0 FE045AC 352
+. 80 BE 08 58 39 49 FC 02 CB A5 00 00 FD 5D 03 72 D9 41 06 C0 80 E1 06 C0 81 01 06 C4 80 BE 08 64 6D 57 80 00 90 E1 06 C0 91 01 06 C4 C8 41 06 C0 C8 E5 00 00 83 3E 08 68 FD 87 10 2A 92 E1 06 A4 C8 39 00 00 FC E0 10 90 83 FE 08 9C D9 81 06 C0 80 A1 06 C0 80 C1 06 C4 3C C0 43 30 90 E1 06 C0 91 01 06 C4 54 B9 A6 3E 80 FE 07 94 57 38 18 38 3A D9 FF B5 7D 18 3C AE 6E C9 80 00 90 C1 06 A0 57 36 20 36 FD 28 08 BA 90 C1 06 A8 C8 21 06 A0 91 21 06 A4 C9 A1 06 A0 D9 21 06 C0 80 E1 06 C0 81 01 06 C4 C9 3F 00 00 54 E8 E4 3E 81 7E 08 6C 39 88 FF 4C 81 5E 08 70 6D 80 80 00 FC 8D 48 28 90 01 06 AC 55 1D 18 38 CB E1 06 A8 55 07 20 36 C8 CB 00 00 FD 7F 48 28 C8 0A 00 00 CB A4 00 00 83 9E 07 98 FC 6B E8 3A 83 7E 07 A8 7D 5D E4 AE FC A4 E9 BA 83 5E 07 A4 C8 5B 00 00 FC 88 02 B2 C9 9A 00 00 82 FE 07 9C FD 05 00 F2 C8 63 00 00 7C B6 BC AE 7C D6 BA 14 81 9E 07 A0 83 1E 08 74 FF 05 18 00 FC E7 40 28 7D 47 64 AE C8 D8 00 00 7D 27 62 14 FD 60 28 90 FC 07 01 32 FF E2 60 3A FC 41 48 28 C8 26 00 08 FD 85 50 2A FD BF 00 32 FD 26 00 B2 FF ED 00 3A C9 A9 00 08 40 98 00 08
+
+. 0 FE0470C 12
+. FD 60 28 50 FF 8A 18 00 40 9C 0D 48
+
+. 0 FE0545C 8
+. FF 8B 50 00 4B FF F2 C0
+
+. 0 FE04720 4
+. 40 9D 0D 30
+
+. 0 FE04724 28
+. FF A5 60 28 FD 5D 50 2A C8 03 00 00 FC C9 60 2A FD 60 48 90 FC 09 00 00 40 80 00 08
+
+. 0 FE04744 8
+. FC 8C 00 00 40 84 0D 30
+
+. 0 FE0474C 12
+. FC A0 60 50 FF 8B 28 00 40 9D 0D 18
+
+. 0 FE04758 72
+. FD 69 30 28 FC 0B 60 2A FD 61 68 2A 83 5E 08 78 81 1E 08 7C CB BA 00 00 FC AB 50 2A C9 88 00 00 FC 65 00 2A FC BD 18 BA FC 3F 28 2A FD 2C 08 2A FF E1 60 28 FD A9 30 2A FD 5F 30 2A FC 20 68 90 FF 0D 50 00 41 BA FD 6C
+
+. 0 FE04508 68
+. 83 E1 07 14 82 C1 06 D0 7F E8 03 A6 82 E1 06 D4 83 01 06 D8 83 21 06 DC 83 41 06 E0 83 61 06 E4 83 81 06 E8 83 A1 06 EC 83 C1 06 F0 83 E1 06 F4 CB A1 06 F8 CB C1 07 00 CB E1 07 08 38 21 07 10 4E 80 00 20
+
+. 0 FE143B0 24
+. 81 3E 22 28 FF C0 08 90 80 09 00 00 FC 20 F8 90 2F 80 FF FF 41 9E 00 10
+
+. 0 FE143C8 4
+. 48 00 38 D1
+
+. 0 FE17C98 40
+. 94 21 FF F0 FC 00 04 8E FF 00 00 8C FF 81 08 00 4F DE F0 42 7C 60 00 26 54 63 FF FE FD FE 05 8E 38 21 00 10 4E 80 00 20
+
+. 0 FE143CC 8
+. 2C 03 00 00 41 82 00 24
+
+. 0 FE143F4 16
+. 80 9E 11 F4 C8 04 00 00 FF 9F 00 00 41 BD FF D4
+
+. 0 FE143D4 32
+. 80 A1 00 24 FC 20 F0 90 83 C1 00 08 CB C1 00 10 7C A8 03 A6 CB E1 00 18 38 21 00 20 4E 80 00 20
+
+. 0 1000FBE0 40
+. 81 3E 83 70 FF DE D0 28 CB 69 00 00 81 3E 83 78 FC 01 D8 2A CB E9 00 00 FC 3E D8 2A FF FE F8 2A FF DE D8 3A 48 02 70 15
+
+. 0 10036C18 4
+. 4B DD D7 70
+
+. 0 1000FC08 36
+. 81 3E 83 7C FF FF 00 72 C8 09 00 00 81 3E 83 80 FF DE F8 28 FF DE 00 2A C8 09 00 00 FF 9E 00 00 41 9C 01 38
+
+. 0 1000FC2C 16
+. 81 3E 83 88 C8 09 00 00 FF 9E 00 00 41 9D 00 FC
+
+. 0 1000FC3C 16
+. 39 20 00 01 FD 60 D8 90 7F 89 F8 00 41 9D 00 E4
+
+. 0 1000FC4C 52
+. 7F 8A E3 78 FD A0 D0 90 3D 60 43 30 6D 20 80 00 91 61 00 18 90 01 00 1C 39 29 00 01 C8 01 00 18 7F 89 F8 00 FC 00 68 28 FC 1D 00 24 FD 6B 00 32 40 9D FF DC
+
+. 0 1000FC58 40
+. 6D 20 80 00 91 61 00 18 90 01 00 1C 39 29 00 01 C8 01 00 18 7F 89 F8 00 FC 00 68 28 FC 1D 00 24 FD 6B 00 32 40 9D FF DC
+
+. 0 1000FC80 76
+. 3D 20 43 30 6F E0 80 00 91 21 00 18 FD 80 5A 10 90 01 00 1C 38 60 00 00 C9 A1 00 18 C8 0A 00 00 81 3E 83 90 FD AD 00 28 D9 7D 00 00 C8 09 00 00 81 3E 83 84 FD AD 00 32 C8 09 00 00 FF 8C 00 00 FD AD 02 F2 D9 BD 00 08 40 9C FE 10
+
+. 0 1000FAD8 56
+. 80 01 00 64 83 81 00 20 83 A1 00 24 7C 08 03 A6 83 C1 00 28 83 E1 00 2C CB 41 00 30 CB 61 00 38 CB 81 00 40 CB A1 00 48 CB C1 00 50 CB E1 00 58 38 21 00 60 4E 80 00 20
+
+. 0 10009A78 16
+. FC 40 F0 90 FC 20 F8 90 7C 7D 1B 78 48 02 D0 F5
+
+. 0 10036B78 8
+. 39 60 00 1C 4B FF FF 9C
+
+. 0 FE144F8 12
+. 94 21 FF C0 7C 68 02 A6 48 05 EB B1
+
+. 0 FE14504 48
+. 93 C1 00 20 7F C8 02 A6 7D 80 00 26 DB A1 00 28 FF A0 08 90 DB C1 00 30 FF C0 10 90 DB E1 00 38 93 E1 00 24 90 61 00 44 91 81 00 1C 4B FF 12 F1
+
+. 0 FE05820 16
+. 94 21 FF 90 7C 08 02 A6 D8 41 00 28 48 06 D8 85
+
+. 0 FE05830 88
+. 81 61 00 28 81 81 00 2C 93 C1 00 50 7F C8 02 A6 2F 8C 00 00 D8 21 00 28 DB A1 00 58 FF A0 08 90 DB E1 00 68 FF E0 10 90 93 E1 00 54 DB C1 00 60 93 21 00 3C 93 41 00 40 93 61 00 44 93 81 00 48 93 A1 00 4C 90 01 00 74 83 FE 08 EC 80 E1 00 28 81 01 00 2C 40 9E 00 68
+
+. 0 FE05888 16
+. 54 E9 00 7E 3C 60 7F F0 7F 09 18 00 41 9A 06 48
+
+. 0 FE05898 4
+. 41 99 06 4C
+
+. 0 FE0589C 20
+. 80 9E 08 F0 FC 20 E8 90 C9 A4 00 00 FC 9F 68 00 41 86 06 40
+
+. 0 FE058B0 16
+. 80 BE 08 F4 C8 25 00 00 FF 1F 08 00 40 9A 00 0C
+
+. 0 FE058C8 16
+. 80 DE 08 F8 C8 46 00 00 FF 9F 10 00 41 9E 07 D4
+
+. 0 FE058D8 20
+. 83 FE 08 EC FC 20 68 90 C8 7F 00 00 FC 1F 18 00 41 82 06 04
+
+. 0 FE05EEC 56
+. 83 A1 00 74 83 21 00 3C 7F A8 03 A6 83 41 00 40 83 61 00 44 83 81 00 48 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB A1 00 58 CB C1 00 60 CB E1 00 68 38 21 00 70 4E 80 00 20
+
+. 0 FE14534 20
+. 81 3E 22 28 FF E0 08 90 80 09 00 00 2F 80 FF FF 41 9E 00 14
+
+. 0 FE14548 8
+. FC 20 F0 90 48 00 37 4D
+
+. 0 FE14550 8
+. 2C 03 00 00 41 82 00 34
+
+. 0 FE14588 8
+. FC 20 E8 90 48 00 37 0D
+
+. 0 FE14590 8
+. 2C 83 00 00 40 86 00 AC
+
+. 0 FE14598 16
+. 83 FE 11 FC C8 1F 00 00 FF 1D 00 00 40 9A 00 C4
+
+. 0 FE14668 8
+. FC 20 F8 90 48 00 36 55
+
+. 0 FE17CC0 36
+. 94 21 FF E0 D8 21 00 08 80 61 00 08 80 81 00 0C 38 21 00 20 54 64 00 7E 3C 64 80 10 54 63 0F FE 4E 80 00 20
+
+. 0 FE14670 8
+. 2E 03 00 00 40 92 00 48
+
+. 0 FE146BC 12
+. C8 5F 00 00 FC 9F 10 00 40 A6 FE 94
+
+. 0 FE14558 48
+. FC 20 F8 90 80 C1 00 44 80 A1 00 1C 83 C1 00 20 7C C8 03 A6 83 E1 00 24 7C A0 81 20 CB A1 00 28 CB C1 00 30 CB E1 00 38 38 21 00 40 4E 80 00 20
+
+. 0 10009A88 72
+. C8 01 00 50 C9 81 00 40 2F 9D 00 00 FC 00 00 72 C9 A1 00 58 81 3E 80 1C FD AD 00 72 FD 40 60 24 C8 01 00 48 FD 60 52 10 D9 41 00 10 FC 4D 60 24 FD 8B 60 24 FC 4C 10 3A C8 09 00 00 FC 4B 10 3A D8 41 00 18 40 BE FF 2C
+
+. 0 10009AD0 8
+. 7F 3D CB 78 4B FF FF 24
+
+. 0 100099F8 8
+. 7F 4A D3 78 4B FF FE 48
+
+. 0 10009844 72
+. 3D 20 43 30 81 7E 80 18 6F 60 80 00 91 21 00 38 90 01 00 3C C9 6B 00 00 39 60 00 01 C9 A1 00 38 7F 0B E0 00 81 3E 80 24 FD AD 58 28 C8 6A 00 00 C8 09 00 00 FD 80 18 90 FD AD 00 32 FD AD 07 32 FC 2D 07 32 41 99 00 4C
+
+. 0 1000988C 64
+. 3D 20 43 30 6D 60 80 00 91 21 00 38 90 01 00 3C 39 6B 00 01 C9 A1 00 38 FD AD 58 28 FC 0D E8 2A FC 00 03 72 FC 01 00 24 FD 8C 00 32 FC 63 60 2A FC 0C 18 24 FC 00 02 10 FF 80 D8 00 41 9C 00 0C
+
+. 0 100098CC 8
+. 7F 0B E0 00 40 99 FF C0
+
+. 0 10009890 60
+. 6D 60 80 00 91 21 00 38 90 01 00 3C 39 6B 00 01 C9 A1 00 38 FD AD 58 28 FC 0D E8 2A FC 00 03 72 FC 01 00 24 FD 8C 00 32 FC 63 60 2A FC 0C 18 24 FC 00 02 10 FF 80 D8 00 41 9C 00 0C
+
+. 0 100098D4 20
+. FC 00 1A 10 D8 61 00 20 FC 80 06 F2 D8 81 00 28 41 98 01 1C
+
+. 0 10009A00 8
+. 3B 80 00 00 4B FF FE E8
+
+. 0 100098EC 12
+. FC 20 50 90 7F E3 FB 78 48 00 21 49
+
+. 0 1000BA3C 72
+. 94 21 FF C0 7C 08 02 A6 DB 81 00 20 FF 80 08 90 DB A1 00 28 FF A0 18 90 DB C1 00 30 FF C0 20 90 DB E1 00 38 FF E0 10 90 FC 40 18 90 93 A1 00 14 FF DE 07 32 7C 7D 1B 78 93 C1 00 18 FF FF 07 72 90 01 00 44 4B FF FE 45
+
+. 0 1000B8C4 60
+. 7C 08 02 A6 94 21 FF F0 42 9F 00 05 93 C1 00 08 7F C8 02 A6 7C 6B 1B 78 90 01 00 14 FD A0 0A 10 FD 80 12 10 80 1E FF F0 7F C0 F2 14 81 3E 80 00 C8 09 00 00 FF 81 00 00 41 9E 00 0C
+
+. 0 1000B900 8
+. FF 82 00 00 40 9E 00 24
+
+. 0 1000B928 20
+. 81 3E 80 04 C8 09 00 00 FF 0D 00 00 4F D8 D3 82 41 9E 00 9C
+
+. 0 1000B93C 12
+. FF 8C 00 00 4F DC F3 82 41 9E 00 C0
+
+. 0 1000BA04 8
+. 4F D9 D3 82 40 9E FF 40
+
+. 0 1000BA0C 4
+. 4B FF FF D4
+
+. 0 1000B9E0 36
+. FC 01 00 B2 81 3E 80 08 38 60 00 00 FD A0 02 10 D8 0B 00 00 C8 09 00 00 FD AD 00 32 D9 AB 00 08 4B FF FF 14
+
+. 0 1000B914 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 1000BA84 64
+. C8 1D 00 08 FF C0 F2 10 80 01 00 44 83 C1 00 18 FF E0 FA 10 CB 81 00 20 CB A1 00 28 7C 08 03 A6 FF FF F0 2A CB C1 00 30 FC 00 F8 2A CB E1 00 38 D8 1D 00 08 83 A1 00 14 38 21 00 40 4E 80 00 20
+
+. 0 100098F8 8
+. 2C 03 00 00 40 A2 FE E8
+
+. 0 10009900 12
+. 2F 9D 00 00 7F A3 EB 78 40 BE FE DC
+
+. 0 1000990C 8
+. 7F 83 E3 78 4B FF FE D4
+
+. 0 100097E4 64
+. 80 01 00 D4 83 21 00 8C 83 41 00 90 7C 08 03 A6 83 61 00 94 83 81 00 98 83 A1 00 9C 83 C1 00 A0 83 E1 00 A4 CB 61 00 A8 CB 81 00 B0 CB A1 00 B8 CB C1 00 C0 CB E1 00 C8 38 21 00 D0 4E 80 00 20
+
+. 0 10001A8C 4
+. 4B FF FD CC
+
+. 0 10001858 56
+. 80 01 00 A4 83 61 00 64 83 81 00 68 7C 08 03 A6 83 A1 00 6C 83 C1 00 70 83 E1 00 74 CB 61 00 78 CB 81 00 80 CB A1 00 88 CB C1 00 90 CB E1 00 98 38 21 00 A0 4E 80 00 20
+
+. 0 1000132C 36
+. 81 7C 00 00 C8 01 00 20 7F 39 1A 14 38 0B 00 01 81 3C 00 28 FC 00 00 32 7F 80 F8 40 7C 1D 4D AE 41 9D FF BC
+
+. 0 10001308 36
+. 81 5C 00 20 81 3E 80 2C 57 FD 18 38 7C 5D 54 AE 3B FF 00 01 C8 29 00 00 7F 63 DB 78 FC 3F 08 2A 48 00 04 A5
+
+. 0 100018B8 16
+. 81 3E 80 18 C8 09 00 00 FF 81 00 00 41 9D 01 D4
+
+. 0 100018C8 16
+. 81 3E 80 1C C8 09 00 00 FF 82 00 00 41 9D 01 BC
+
+. 0 100018D8 72
+. 81 3E 80 20 3C 00 43 30 90 01 00 10 38 61 00 40 C8 09 00 00 38 81 00 48 83 9E 80 24 FC 01 00 2A CB 7C 00 00 FD A0 00 1E D9 A1 00 08 83 A1 00 0C 6F A0 80 00 90 01 00 14 C8 01 00 10 FC 00 D8 28 FF 81 00 28 48 00 8F 55
+
+. 0 1000A870 268
+. 94 21 FF A0 7C 08 02 A6 42 9F 00 05 93 C1 00 20 7F C8 02 A6 DB 81 00 40 90 01 00 64 FD A0 10 50 DB 21 00 28 3D 40 43 30 80 1E FF F0 DB A1 00 48 FF AD 00 B2 7F C0 F2 14 DB 61 00 38 81 3E 81 EC 81 7E 81 F8 C9 09 00 00 81 3E 81 F0 FC 01 40 2A CB 2B 00 00 C9 49 00 00 FC E0 40 90 81 3E 81 F4 FC 60 40 90 FC 00 00 2A 81 7E 81 FC FC C0 50 90 DB C1 00 50 FF 80 50 90 DB E1 00 58 FC 02 00 24 CB C9 00 00 81 3E 81 E8 DB 41 00 30 CB 4B 00 00 C8 A9 00 00 39 20 00 01 FD 60 42 BA FD 80 50 2A FF 60 40 90 FF E0 40 90 FD 4C 58 24 FD 20 00 90 39 29 00 01 91 41 00 08 6D 2B 80 00 FD 00 30 90 91 61 00 0C FC C0 60 90 C9 A1 00 08 FC 80 50 90 FD 40 38 90 2C 89 27 0F FD AD F0 28 FC E0 58 90 FD AD 08 2A FC 0D D8 28 FC 00 06 72 FC 00 03 72 FC 1D 00 24 FD 80 62 3A FD 60 5A BA FD A0 62 10 FC 00 5A 10 FF 8D 28 00 41 9D 00 0C
+
+. 0 1000A97C 8
+. FF 80 28 00 40 9D 00 14
+
+. 0 1000A994 64
+. 91 61 00 14 FD 4C 58 24 91 41 00 10 C9 A1 00 10 FD AD F0 28 FC 04 50 24 FD AD 08 2A FC 00 F8 28 FD AD 68 2A FC 00 02 10 FD AD 10 24 FD AD 48 28 FF 00 D0 00 FD 3F 68 24 FF 89 E0 00 40 9C 00 08
+
+. 0 1000A9D4 8
+. FC 60 18 50 41 98 00 48
+
+. 0 1000A9DC 4
+. 40 85 FF 44
+
+. 0 1000A920 92
+. 39 29 00 01 91 41 00 08 6D 2B 80 00 FD 00 30 90 91 61 00 0C FC C0 60 90 C9 A1 00 08 FC 80 50 90 FD 40 38 90 2C 89 27 0F FD AD F0 28 FC E0 58 90 FD AD 08 2A FC 0D D8 28 FC 00 06 72 FC 00 03 72 FC 1D 00 24 FD 80 62 3A FD 60 5A BA FD A0 62 10 FC 00 5A 10 FF 8D 28 00 41 9D 00 0C
+
+. 0 1000A9D8 4
+. 41 98 00 48
+
+. 0 1000AA20 8
+. 2C 89 27 0F 4B FF FF BC
+
+. 0 1000A9E0 12
+. D9 43 00 00 D8 64 00 00 41 85 00 40
+
+. 0 1000A9EC 52
+. 38 60 00 00 80 01 00 64 83 C1 00 20 CB 21 00 28 7C 08 03 A6 CB 41 00 30 CB 61 00 38 CB 81 00 40 CB A1 00 48 CB C1 00 50 CB E1 00 58 38 21 00 60 4E 80 00 20
+
+. 0 10001920 20
+. 81 3E 80 28 7C 7B 1B 78 C8 09 00 00 FF 9E 00 00 40 9C 01 70
+
+. 0 10001AA0 20
+. FC 20 E0 90 38 61 00 50 FC 40 F0 90 38 81 00 58 48 00 8F 99
+
+. 0 1000AA48 228
+. 94 21 FF B0 7C 08 02 A6 42 9F 00 05 93 C1 00 10 7F C8 02 A6 DB C1 00 40 90 01 00 54 FC 21 00 72 DB A1 00 38 39 40 00 02 80 1E FF F0 3D 00 43 30 DB E1 00 48 7F C0 F2 14 DB 61 00 28 81 3E 82 08 81 7E 82 18 C9 89 00 00 81 3E 82 14 FD 6C 10 24 DB 21 00 18 CB C9 00 00 81 3E 82 04 CB 2B 00 00 CB A9 00 00 81 3E 82 10 81 7E 82 1C C8 09 00 00 81 3E 82 0C DB 81 00 30 DB 41 00 20 CB 4B 00 00 39 60 00 02 FF EB 00 32 C8 09 00 00 FC 80 08 28 FF 60 60 90 FD BE 07 B2 FD 9F 67 FA FD 64 02 F2 FC 42 10 2A FC 00 F0 50 FD 6B 60 24 FD A2 68 BA FC BF F2 FA FC E0 68 24 FD 02 68 24 FC 05 01 F2 FD A5 02 32 FC CB 10 2A FF 80 F0 90 FD 26 02 38 FD 46 69 FA FC 3F 52 78 FC 7F 4A BA FF E0 08 90 48 00 00 10
+
+. 0 1000AB38 104
+. 6D 60 80 00 91 01 00 08 90 01 00 0C FD A5 01 72 C8 01 00 08 39 6B 00 02 FF DE E0 2A FC 00 C8 28 FD A6 69 BA FC 84 00 2A FD 64 68 24 FC E4 F1 FA FD 04 12 3A FC 05 02 F2 FC C6 12 FA FD A0 42 10 FD 60 3A 10 FC BE 00 28 FC 00 32 10 FD AD 58 2A FD 80 2A 10 FD 47 01 F2 FF 8D E8 00 FC 00 60 2A FF 00 E8 00 40 9C 00 08
+
+. 0 1000ABA4 8
+. FD A8 52 3A 40 98 00 08
+
+. 0 1000ABB0 72
+. FC 00 68 50 FD 08 68 24 FC E7 00 24 FD A5 01 F2 FD 85 02 32 FD 26 6A 38 FD 46 61 FA FC 09 D8 28 FD 63 02 72 FC 00 02 10 FD A0 52 10 FD 83 02 B2 FC 00 68 2A FC 7F 5A BA FC 21 62 78 FF 80 D0 00 FF E0 08 90 40 9C FF 38
+
+. 0 1000AB2C 12
+. 39 4A 00 01 2F 8A 27 10 41 9D 00 C8
+
+. 0 1000ABF8 16
+. 2F 8A 27 10 D8 23 00 00 D8 64 00 00 41 9E 00 38
+
+. 0 1000AC08 52
+. 38 60 00 00 80 01 00 54 83 C1 00 10 CB 21 00 18 7C 08 03 A6 CB 41 00 20 CB 61 00 28 CB 81 00 30 CB A1 00 38 CB C1 00 40 CB E1 00 48 38 21 00 50 4E 80 00 20
+
+. 0 10001AB4 44
+. 81 3E 80 30 C9 A1 00 48 2F 9D 00 00 C8 09 00 00 7C 7C 1B 78 7F A9 EB 78 FD AD 00 32 C8 01 00 40 FF A0 68 90 FD 8D 00 32 40 9D 00 44
+
+. 0 10001AE0 64
+. FD A0 D8 90 3D 60 43 30 7F A9 03 A6 6D 20 80 00 91 61 00 10 90 01 00 14 39 29 FF FF C8 01 00 10 FC 00 68 28 FC 00 E0 2A FC 00 00 2A FC 00 F0 24 FC 00 67 78 FD 80 E8 90 FF A0 00 90 42 00 FF D0
+
+. 0 10001AEC 52
+. 6D 20 80 00 91 61 00 10 90 01 00 14 39 29 FF FF C8 01 00 10 FC 00 68 28 FC 00 E0 2A FC 00 00 2A FC 00 F0 24 FC 00 67 78 FD 80 E8 90 FF A0 00 90 42 00 FF D0
+
+. 0 10001B20 24
+. 81 3E 80 00 FC 2C E8 24 C8 09 00 00 FF 9D 00 00 4F DD F3 82 41 9E 00 B4
+
+. 0 10001BE8 8
+. 81 3E 80 0C 4B FF FF 50
+
+. 0 10001B3C 64
+. FC 1C F0 24 CB 69 00 00 81 3E 80 28 C9 81 00 58 CB E9 00 00 81 3E 80 2C C9 A9 00 00 FC 00 08 28 C8 21 00 50 FD BE 03 72 FC 21 00 28 FD BF 68 24 FC 01 60 24 FC 21 60 3A FC 2D 08 24 48 03 50 71
+
+. 0 10036BE8 8
+. 39 60 00 54 4B FF FF 2C
+
+. 0 FE149AC 12
+. 94 21 FF E0 7C 68 02 A6 48 05 E6 FD
+
+. 0 FE149B8 36
+. 93 C1 00 10 7F C8 02 A6 DB E1 00 18 90 61 00 24 FF E0 08 90 81 3E 22 38 80 09 00 3C 74 09 40 00 41 82 00 84
+
+. 0 FE14A5C 4
+. 4B FF 29 E9
+
+. 0 FE07444 12
+. 94 21 FF E0 7C 08 02 A6 48 06 BC 65
+
+. 0 FE07450 40
+. 93 C1 00 18 7F C8 02 A6 FD 00 08 90 90 01 00 24 80 7E 0A 00 81 3E 09 FC C8 03 00 00 FF 81 00 00 C0 09 00 00 40 9D 00 EC
+
+. 0 FE07478 24
+. FC 20 00 90 D9 01 00 08 80 A1 00 08 80 C1 00 0C FF 81 40 00 41 9E 00 D8
+
+. 0 FE07490 112
+. FC A0 04 8E 7C AC 2B 78 FF 80 01 0C 81 5E 22 30 55 89 BD 38 55 88 00 BE 80 9E 0A 04 7C C9 54 2E 65 07 3F E0 7C A9 52 14 7C C8 33 78 FC 40 30 90 C1 85 00 04 90 E1 00 08 91 01 00 0C 75 80 7F F0 C8 81 00 08 C8 C4 00 00 3D 6C 40 00 FC 62 20 BC 55 67 F8 56 39 00 00 00 FC E0 20 90 FD 6C 60 2A FC 0C 10 FA FD AC 30 3C FD 20 20 3C 41 82 00 BC
+
+. 0 FE07500 96
+. FD 4D 62 FA 90 E1 00 08 91 01 00 0C C8 21 00 08 FC 44 00 72 FD 8A 02 7A FD 0A 50 2A FC 60 08 90 FC 8A 33 3C FD AC 3B 3C FC 04 52 3A FC 20 63 7A FD 60 00 2A FD 41 00 F2 FD 20 30 7C FC EA 10 7C FD 09 02 FA FD FE 2D 8E 80 01 00 24 FC 28 51 FA 83 C1 00 18 38 21 00 20 7C 08 03 A6 4E 80 00 20
+
+. 0 FE14A60 8
+. FD A0 08 90 4B FF FF 7C
+
+. 0 FE149E0 20
+. 80 BE 22 28 FC 00 68 90 80 85 00 00 2F 84 FF FF 41 9E 00 0C
+
+. 0 FE149F4 8
+. FC 9F F8 00 41 86 00 20
+
+. 0 FE14A18 32
+. 80 DE 12 08 FC 20 F8 90 38 60 00 1A FC 40 F8 90 C8 06 00 00 FF 1F 00 00 FC 00 68 90 40 98 FF C8
+
+. 0 FE149FC 28
+. 80 E1 00 24 FC 20 00 90 83 C1 00 10 CB E1 00 18 7C E8 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10001B7C 100
+. 81 3E 80 30 C9 81 00 48 FC 21 06 F2 C8 09 00 00 6F A0 80 00 90 01 00 14 3C 00 43 30 FD 8C 00 32 81 3E 80 24 90 01 00 10 2F 9C 00 00 C9 A9 00 00 7F 83 E3 78 C8 01 00 10 FD 8C 00 72 81 3E 80 38 FC 00 68 28 C9 A9 00 00 FD 8C E8 24 FC 00 F8 2A D9 9F 00 00 FD 60 62 10 FC 00 03 72 FC 00 02 F2 4B FF FE 28
+
+. 0 10001A04 8
+. D8 1F 00 08 40 BE FE 50
+
+. 0 10001A0C 8
+. 7F 63 DB 78 4B FF FE 48
+
+. 0 10001B38 68
+. 81 3E 80 34 FC 1C F0 24 CB 69 00 00 81 3E 80 28 C9 81 00 58 CB E9 00 00 81 3E 80 2C C9 A9 00 00 FC 00 08 28 C8 21 00 50 FD BE 03 72 FC 21 00 28 FD BF 68 24 FC 01 60 24 FC 21 60 3A FC 2D 08 24 48 03 50 71
+
+. 0 10036BE8 4
+. 4B DD DD C4
+
+. 0 10001350 16
+. 38 0B 00 01 3B 60 00 01 2B 80 00 01 40 9D 00 84
+
+. 0 10001360 16
+. 3B 01 00 30 3B E0 00 01 7F 9F D8 40 41 9D 00 64
+
+. 0 10001370 60
+. 39 3B FF FF 57 7A 18 38 7D 29 D9 D6 55 29 10 38 3B A9 00 08 81 7C 00 20 57 E9 18 38 FC 20 F8 90 3B FF 00 01 7C 09 5C AE 7F 03 C3 78 7C 5A 5C AE FC 42 00 32 FC 42 F0 24 48 00 04 25
+
+. 0 10036BB8 4
+. 4B DD 44 C4
+
+. 0 1000FCE8 68
+. 80 01 00 64 38 60 00 00 DB 84 00 08 D8 3D 00 00 7C 08 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C CB 41 00 30 CB 61 00 38 CB 81 00 40 CB A1 00 48 CB C1 00 50 CB E1 00 58 38 21 00 60 4E 80 00 20
+
+. 0 10036B78 4
+. 4B DD D9 80
+
+. 0 1000B9D4 12
+. FF 8C 00 00 4F DD F3 82 40 9E FF 60
+
+. 0 1000B948 12
+. FF 8D 60 00 FD 60 68 90 41 9C 00 08
+
+. 0 1000B958 8
+. FF 8D 60 00 41 9D 00 08
+
+. 0 1000B960 20
+. FD A0 60 90 81 3E 80 0C C8 09 00 00 FF 8D 00 00 41 9C 00 18
+
+. 0 1000B988 48
+. FC 01 00 B2 81 3E 80 14 38 60 00 00 FD A0 02 10 D8 0B 00 00 C8 09 00 00 81 3E 80 08 FF 8D 00 00 C8 09 00 00 FD AD 00 32 D9 AB 00 08 40 9C FF 60
+
+. 0 100013AC 32
+. 7F 9F D8 40 81 3C 00 24 7F 39 1A 14 C8 01 00 30 7D 3D 4A 14 3B BD 00 08 D8 09 FF F8 40 9D FF BC
+
+. 0 100013CC 20
+. 81 7C 00 00 3B 7B 00 01 38 0B 00 01 7F 80 D8 40 41 9D FF 88
+
+. 0 10001364 12
+. 3B E0 00 01 7F 9F D8 40 41 9D 00 64
+
+. 0 10001384 40
+. 81 7C 00 20 57 E9 18 38 FC 20 F8 90 3B FF 00 01 7C 09 5C AE 7F 03 C3 78 7C 5A 5C AE FC 42 00 32 FC 42 F0 24 48 00 04 25
+
+. 0 100013E0 12
+. 2F 99 00 00 7E E3 BB 78 40 9E 01 98
+
+. 0 100013EC 64
+. 80 01 00 84 82 E1 00 44 83 01 00 48 7C 08 03 A6 83 21 00 4C 83 41 00 50 83 61 00 54 83 81 00 58 83 A1 00 5C 83 C1 00 60 83 E1 00 64 CB A1 00 68 CB C1 00 70 CB E1 00 78 38 21 00 80 4E 80 00 20
+
+. 0 100015D8 12
+. 38 00 00 00 2F 83 00 00 40 9E 00 08
+
+. 0 100015E4 36
+. 7F E0 FB 78 7C 03 03 78 80 01 00 24 83 E1 00 08 CB C1 00 10 7C 08 03 A6 CB E1 00 18 38 21 00 20 4E 80 00 20
+
+. 0 10000A24 16
+. 7F 84 E3 78 7F A5 EB 78 7C 7E 1B 78 48 00 0C 89
+
+. 0 100016B8 92
+. 94 21 FF E0 7C 08 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 7C 7F 1B 78 80 63 00 00 38 C0 00 00 80 1E FF F0 54 69 18 38 2B 83 00 00 7F C0 F2 14 80 1F 00 20 C8 1F 00 10 7D 29 02 14 93 A1 00 14 C9 A9 00 08 7C BD 2B 78 93 81 00 10 FD 40 68 24 40 9D 00 94
+
+. 0 10001714 20
+. 83 9E 80 34 2B 03 00 00 C9 7C 00 00 39 00 00 00 40 99 00 60
+
+. 0 10001728 36
+. 80 BF 00 24 7C 69 03 A6 81 9F 00 28 7F 88 30 40 55 00 18 38 7C E0 62 14 7D 0A 43 78 7C CB 33 78 41 9C 00 0C
+
+. 0 1000174C 56
+. 7D 0B 43 78 7C CA 33 78 39 2B 00 01 C9 A7 00 08 7D 29 59 D6 7D 84 04 AE 39 08 00 01 55 29 F8 7E 7D 29 52 14 55 29 18 38 7C 09 2C AE FC 00 68 24 FD 6C 58 3A 42 00 FF B4
+
+. 0 10001734 24
+. 7F 88 30 40 55 00 18 38 7C E0 62 14 7D 0A 43 78 7C CB 33 78 41 9C 00 0C
+
+. 0 10001784 32
+. FC 0B 58 2A 54 C0 18 38 38 C6 00 01 7F 83 30 40 FC 00 02 B2 FC 00 02 B2 7C 1D 05 AE 41 9D FF 7C
+
+. 0 1000171C 12
+. C9 7C 00 00 39 00 00 00 40 99 00 60
+
+. 0 10001754 48
+. 39 2B 00 01 C9 A7 00 08 7D 29 59 D6 7D 84 04 AE 39 08 00 01 55 29 F8 7E 7D 29 52 14 55 29 18 38 7C 09 2C AE FC 00 68 24 FD 6C 58 3A 42 00 FF B4
+
+. 0 100017A4 36
+. 80 01 00 24 38 60 00 00 83 81 00 10 83 A1 00 14 7C 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10000A34 44
+. 3D 20 10 02 C8 01 00 30 C9 A9 93 C8 3D 20 10 02 C9 89 93 D0 3B E0 00 00 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000A64 40
+. 3D 20 10 02 C8 01 00 38 C9 A9 93 D8 3D 20 10 02 FC 00 68 28 C9 A9 93 E0 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000A90 32
+. 3D 20 10 02 C8 01 00 40 C9 A9 93 E8 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000AB4 16
+. 7F A4 EB 78 7F 85 E3 78 7F C3 F3 78 48 00 0B F9
+
+. 0 10000AC4 80
+. 3D 20 10 02 C9 69 93 F0 3D 20 10 02 C9 81 00 10 C9 29 93 F8 3D 20 10 02 FD 8C 02 F2 39 29 94 00 C9 A9 00 00 C8 01 00 18 FD AC 68 28 D9 81 00 10 FD 40 02 F2 C8 01 00 20 FD A0 6A 10 FD 80 02 F2 D9 41 00 18 FF 8D 48 00 D9 81 00 20 40 9D 00 08
+
+. 0 10000B18 44
+. 3D 20 10 02 39 29 94 08 C8 09 00 00 3D 20 10 02 39 29 94 10 FC 0A 00 28 C9 A9 00 00 FC 00 02 10 FC 00 03 72 FF 80 48 00 40 9D 00 08
+
+. 0 10000B48 32
+. 3D 20 10 02 39 29 94 18 C8 09 00 00 FD AC 00 28 FD A0 6A 10 FD AD 00 24 FF 8D 48 00 40 9D 00 08
+
+. 0 10000B6C 8
+. 7F C3 F3 78 48 00 0A F5
+
+. 0 10001664 32
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 C1 00 18 90 01 00 24 80 63 00 28 48 03 55 79
+
+. 0 10036BF8 8
+. 39 60 00 5C 4B FF FF 1C
+
+. 0 FEEF490 12
+. 94 21 FF E0 7C 08 02 A6 48 0D 89 B9
+
+. 0 FEEF49C 44
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 90 01 00 24 7C 7C 1B 78 93 A1 00 14 81 3E 1D 88 93 E1 00 1C 80 09 00 00 2F 80 00 00 41 9E 00 34
+
+. 0 FEEF4F8 8
+. 2C 03 00 00 41 A2 FF DC
+
+. 0 FEEF500 16
+. 39 03 FF F8 80 88 00 04 70 80 00 02 40 82 00 88
+
+. 0 FEEF510 12
+. 70 80 00 04 83 FE 05 FC 40 82 00 70
+
+. 0 FEEF51C 20
+. 3B A0 00 00 38 00 00 01 7D 00 F8 28 7C 08 E8 00 40 82 00 0C
+
+. 0 FEEF530 8
+. 7C 00 F9 2D 40 A2 FF F0
+
+. 0 FEEF524 12
+. 7D 00 F8 28 7C 08 E8 00 40 82 00 0C
+
+. 0 FEEF538 12
+. 4C 00 01 2C 2C 88 00 00 40 86 00 90
+
+. 0 FEEF544 12
+. 7F 84 E3 78 7F E3 FB 78 4B FF F6 4D
+
+. 0 FEEEB98 24
+. 7C A8 02 A6 94 21 FF A0 93 E1 00 5C 3B E4 FF F8 93 A1 00 54 48 0D 92 A5
+
+. 0 FEEEBB0 84
+. 90 A1 00 64 81 3F 00 04 93 81 00 50 7C 7C 1B 78 55 3D 00 38 93 C1 00 58 7C 1D 00 D0 92 21 00 24 7F 9F 00 40 92 41 00 28 92 61 00 2C 7F C8 02 A6 92 81 00 30 92 A1 00 34 92 C1 00 38 92 E1 00 3C 93 01 00 40 93 21 00 44 93 41 00 48 93 61 00 4C 41 9D 02 A0
+
+. 0 FEEEC04 8
+. 73 E0 00 07 40 82 02 98
+
+. 0 FEEEC0C 12
+. 81 63 00 04 7F 0B E8 40 40 98 01 DC
+
+. 0 FEEEDF0 16
+. 7C DF EA 14 81 26 00 04 2B 89 00 08 40 9D 01 54
+
+. 0 FEEEE00 16
+. 81 03 04 4C 55 2A 00 38 7C 8A 40 40 40 84 01 44
+
+. 0 FEEEE10 28
+. 57 AA F8 7A 55 6C 00 3C 7D 2A 18 2E 91 83 00 04 7C 09 F8 00 83 7E 06 64 41 82 00 5C
+
+. 0 FEEEE2C 84
+. 91 3F 00 08 7F EA 19 2E 83 A1 00 64 82 21 00 24 7F A8 03 A6 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+. 0 FEEF550 16
+. 7C 00 04 AC 7F 80 F8 28 7F A0 F9 2D 40 A2 FF F8
+
+. 0 FEEF560 8
+. 2F 1C 00 01 40 99 FF 74
+
+. 0 FEEF4D8 32
+. 80 A1 00 24 83 81 00 10 83 A1 00 14 7C A8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10001684 8
+. 80 7D 00 24 48 03 55 71
+
+. 0 10036BF8 4
+. 4B EB 88 98
+
+. 0 1000168C 8
+. 80 7D 00 20 48 03 55 69
+
+. 0 10001694 8
+. 7F A3 EB 78 48 03 55 61
+
+. 0 1000169C 24
+. 80 01 00 24 83 A1 00 14 83 C1 00 18 7C 08 03 A6 38 21 00 20 4E 80 00 20
+
+. 0 10000B74 36
+. 80 01 00 64 7F E3 FB 78 83 81 00 50 83 A1 00 54 7C 08 03 A6 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+. 0 10001060 12
+. 3C 80 10 02 38 84 94 F8 48 01 70 DD
+
+. 0 10018144 84
+. 94 21 FF 70 7C 08 02 A6 42 9F 00 05 93 C1 00 88 7F C8 02 A6 93 A1 00 84 90 01 00 94 2F 83 00 00 93 E1 00 8C 80 1E FF F0 90 C1 00 14 7F C0 F2 14 90 E1 00 18 83 BE 80 00 91 01 00 1C 81 7D 00 00 91 21 00 20 39 6B 00 01 91 41 00 24 90 A1 00 10 40 86 00 24
+
+. 0 100181B8 12
+. 91 7D 00 00 7C 9D 23 78 40 9E 00 54
+
+. 0 100181C4 32
+. 83 FE 80 08 81 7E 80 04 80 1F 00 00 81 2B 00 00 2F 80 00 00 39 29 00 01 91 2B 00 00 40 9E 00 C8
+
+. 0 100182A8 8
+. 80 7E 80 0C 48 01 E9 2D
+
+. 0 10036BD8 8
+. 39 60 00 4C 4B FF FF 3C
+
+. 0 FECADD0 12
+. 94 21 FF 70 7D 88 02 A6 48 0F D0 79
+
+. 0 FECADDC 64
+. 93 C1 00 88 7F C8 02 A6 7C 60 1B 78 91 81 00 94 90 81 00 0C 7C 04 03 78 81 7E 1A A0 90 A1 00 10 38 A1 00 70 80 6B 00 00 90 C1 00 14 90 E1 00 18 91 01 00 1C 91 21 00 20 91 41 00 24 40 86 00 24
+
+. 0 FECAE3C 36
+. 38 C1 00 98 39 21 00 08 39 00 00 01 38 E0 00 00 99 01 00 70 98 E1 00 71 90 C1 00 74 91 21 00 78 4B FF 80 79
+
+. 0 FEC2ED4 12
+. 94 21 FA 40 7C C8 02 A6 48 10 4F 75
+
+. 0 FEC2EE0 108
+. 7D 80 00 26 93 C1 05 B8 7F C8 02 A6 92 C1 05 98 7C B6 2B 78 93 61 05 AC 7C 7B 1B 78 93 E1 05 BC 7C 3F 0B 78 90 C1 05 C4 91 C1 05 78 91 E1 05 7C 92 01 05 80 92 21 05 84 92 41 05 88 92 61 05 8C 92 81 05 90 92 A1 05 94 92 E1 05 9C 93 01 05 A0 93 21 05 A4 93 41 05 A8 93 81 05 B0 93 A1 05 B4 91 81 05 74 90 9F 04 C4 4B FD 8E CD
+
+. 0 FE9BE14 12
+. 94 21 FF F0 7D 88 02 A6 48 12 C0 35
+
+. 0 FE9BE20 32
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 7E 1C 4C 83 C1 00 08 7C 63 12 14 38 21 00 10 4E 80 00 20
+
+. 0 FEC2F4C 32
+. 88 1B 00 46 38 E0 00 00 80 63 00 00 2F 80 00 00 90 FF 04 C8 90 7F 04 DC 90 FF 04 E0 40 9E 00 30
+
+. 0 FEC2F6C 12
+. 80 9E 1D C8 2C 04 00 00 41 82 04 00
+
+. 0 FEC2F78 12
+. 80 7B 00 60 2C 83 00 00 40 86 00 0C
+
+. 0 FEC2F84 20
+. 38 60 FF FF 90 7B 00 60 2D 83 FF FF 39 20 FF FF 40 8E 00 74
+
+. 0 FEC2F98 12
+. 81 3B 00 00 71 20 00 08 40 82 2B FC
+
+. 0 FEC2FA4 12
+. 80 BF 04 C4 2E 05 00 00 41 92 36 28
+
+. 0 FEC2FB0 12
+. 89 1B 00 46 2F 08 00 00 40 9A 00 34
+
+. 0 FEC2FBC 12
+. 81 3E 1D C8 2F 89 00 00 41 9E 2A A8
+
+. 0 FEC2FC8 12
+. 80 7B 00 60 2C 83 00 00 40 86 00 0C
+
+. 0 FEC2FDC 12
+. 2D 83 FF FF 39 20 FF FF 40 8E 00 24
+
+. 0 FEC2FE8 12
+. 81 3B 00 00 71 20 00 02 41 82 00 80
+
+. 0 FEC3070 56
+. 81 76 00 08 39 E0 00 00 82 36 00 00 38 9F 04 40 82 16 00 04 39 C0 FF FF 80 7F 04 C4 91 7F 04 08 91 FF 04 D8 92 3F 04 00 92 1F 04 04 91 FF 04 40 91 FF 04 44 48 01 76 E5
+
+. 0 FEDA788 16
+. 7C 08 02 A6 94 21 FF E0 93 A1 00 14 48 0E D6 BD
+
+. 0 FEDA798 44
+. 93 C1 00 18 7C 9D 23 78 93 E1 00 1C 7F C8 02 A6 93 61 00 0C 7C 7F 1B 78 93 81 00 10 90 01 00 24 88 03 00 00 2F 80 00 00 41 9E 00 48
+
+. 0 FEDA7C4 8
+. 2C 00 00 25 41 82 00 40
+
+. 0 FEDA7CC 36
+. 81 3E 1D 50 3B 80 00 00 7F 69 12 14 93 9D 00 00 7F E3 FB 78 88 9F 00 00 7F A5 EB 78 70 89 00 80 40 82 00 44
+
+. 0 FEDA7F0 20
+. 3B FF 00 01 88 7F 00 00 2F 83 00 00 2F 03 00 25 41 9E 00 08
+
+. 0 FEDA804 4
+. 40 9A FF D4
+
+. 0 FEDA7D8 24
+. 93 9D 00 00 7F E3 FB 78 88 9F 00 00 7F A5 EB 78 70 89 00 80 40 82 00 44
+
+. 0 FEDA808 40
+. 80 A1 00 24 7F E3 FB 78 83 61 00 0C 83 81 00 10 7C A8 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEC30A8 36
+. 81 7B 00 00 39 40 00 00 90 7F 04 D0 7C 7D 1B 78 71 60 80 00 90 7F 04 4C 91 DF 04 CC 91 5F 04 E4 40 82 00 2C
+
+. 0 FEC30CC 28
+. 82 9E 1B 48 80 14 00 B8 31 40 FF FF 7E 4A 01 10 2E 12 00 00 92 5F 04 E4 40 92 02 74
+
+. 0 FEC30E8 20
+. 83 1E 1A F0 93 7F 04 14 93 1F 04 10 71 60 80 00 40 82 00 54
+
+. 0 FEC30FC 20
+. 80 7B 00 48 3B 82 8B A0 83 23 00 08 7F 19 E0 00 41 9A 00 34
+
+. 0 FEC3110 20
+. 38 E0 00 00 38 00 00 01 7F 40 18 28 7C 1A 38 00 40 82 00 0C
+
+. 0 FEC3124 8
+. 7C 00 19 2D 40 A2 FF F0
+
+. 0 FEC3118 12
+. 7F 40 18 28 7C 1A 38 00 40 82 00 0C
+
+. 0 FEC312C 12
+. 4C 00 01 2C 2F 9A 00 00 40 9E 1F 30
+
+. 0 FEC3138 72
+. 80 7B 00 48 93 83 00 08 81 83 00 04 3B 8C 00 01 93 83 00 04 89 3B 00 46 3A 80 FF FF 80 DF 04 C4 7D 28 07 74 7C 68 DA 14 7F A6 E8 50 80 A3 00 98 7F 63 DB 78 80 85 00 1C 7F A5 EB 78 7C 89 03 A6 7C C4 33 78 4E 80 04 21
+
+. 0 FEEA658 80
+. 2F 85 00 00 7C 08 02 A6 94 21 FF D0 93 01 00 10 3B 00 00 00 93 21 00 14 7C B9 2B 78 93 41 00 18 7C BA 2B 78 93 61 00 1C 7C 9B 23 78 93 81 00 20 7C 9C 23 78 93 A1 00 24 7C 7D 1B 78 93 C1 00 28 38 60 00 00 93 E1 00 2C 90 01 00 34 41 9E 00 6C
+
+. 0 FEEA6A8 24
+. 81 7D 00 00 80 9D 00 18 71 69 02 00 80 7D 00 14 7F E3 20 50 40 82 00 84
+
+. 0 FEEA6C0 8
+. 2C 1F 00 00 41 82 00 3C
+
+. 0 FEEA700 12
+. 7C 79 C2 14 2F 83 00 00 40 9E 00 74
+
+. 0 FEEA77C 36
+. 8B 1D 00 46 7F A3 EB 78 38 80 FF FF 7F 0C 07 74 7D 4C EA 14 81 0A 00 98 81 68 00 0C 7D 69 03 A6 4E 80 04 21
+
+. 0 FEE984C 12
+. 7C 08 02 A6 94 21 FF E0 48 0D E5 FD
+
+. 0 FEE9858 44
+. 93 A1 00 14 93 C1 00 18 7C 9D 23 78 90 01 00 24 7F C8 02 A6 81 63 00 00 93 E1 00 1C 7C 7F 1B 78 71 60 00 08 93 81 00 10 40 82 02 10
+
+. 0 FEE9884 8
+. 71 69 08 00 40 82 00 F4
+
+. 0 FEE988C 12
+. 80 A3 00 10 2C 85 00 00 40 86 01 2C
+
+. 0 FEE9898 4
+. 48 00 23 B1
+
+. 0 FEEBC48 32
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 81 00 14 80 03 00 1C 2F 80 00 00 40 9E 00 40
+
+. 0 FEEBC68 12
+. 80 A3 00 00 70 A9 00 02 41 82 00 10
+
+. 0 FEEBC80 28
+. 89 5F 00 46 7D 48 07 74 7D 28 FA 14 80 E9 00 98 81 67 00 34 7D 69 03 A6 4E 80 04 21
+
+. 0 FEDB788 44
+. 7C 88 02 A6 94 21 FF 70 93 A1 00 84 3B A0 20 00 93 E1 00 8C 7C 7F 1B 78 90 81 00 94 80 03 00 38 93 C1 00 88 2F 80 00 00 41 9C 00 4C
+
+. 0 FEDB7B4 32
+. 88 E3 00 46 38 81 00 10 7C E6 07 74 7D 26 1A 14 80 A9 00 98 81 65 00 48 7D 69 03 A6 4E 80 04 21
+
+. 0 FEEA4EC 28
+. 94 21 FF F0 7C 85 23 78 80 03 00 38 38 21 00 10 38 60 00 03 7C 04 03 78 48 05 6D BC
+
+. 0 FF412C0 12
+. 7D 88 02 A6 94 21 FF F0 48 08 6B 89
+
+. 0 FF412CC 28
+. 93 C1 00 08 7C 83 23 78 7F C8 02 A6 7C A4 2B 78 38 00 00 C5 7D 88 03 A6 44 00 00 02
+
+. 0 FF412E8 20
+. 7C 00 00 26 74 09 10 00 81 3E 1C 4C 7D 29 12 14 40 82 00 10
+
+. 0 FF412FC 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+. 0 FEDB7D4 8
+. 2C 03 00 00 41 80 00 24
+
+. 0 FEDB7DC 16
+. 81 01 00 20 55 03 04 26 2C 83 20 00 41 86 00 78
+
+. 0 FEDB7EC 12
+. 80 01 00 48 2C 00 00 00 40 81 00 08
+
+. 0 FEDB7F8 36
+. 7C 1D 03 78 39 9D 0F FF 38 60 00 00 55 84 00 26 38 A0 00 03 38 C0 00 22 38 E0 FF FF 39 00 00 00 48 07 29 A1
+
+. 0 FF4E1B8 8
+. 38 00 00 5A 44 00 00 02
+
+. 0 FF4E1C0 4
+. 4C A3 00 20
+
+. 0 FEDB81C 12
+. 38 00 FF FF 2C 83 FF FF 41 86 00 1C
+
+. 0 FEDB828 20
+. 7C 64 1B 78 38 C0 00 01 7F E3 FB 78 7C A4 EA 14 48 01 03 45
+
+. 0 FEEBB7C 60
+. 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 9B 23 78 93 81 00 10 7C BC 2B 78 90 01 00 24 81 23 00 1C 93 A1 00 14 7C DD 33 78 2F 89 00 00 93 E1 00 1C 93 C1 00 18 7C 7F 1B 78 41 9E 00 28
+
+. 0 FEEBBDC 28
+. 80 1F 00 00 2C 9D 00 00 54 09 00 3C 93 7F 00 1C 60 00 00 01 93 9F 00 20 41 86 00 2C
+
+. 0 FEEBBF8 40
+. 80 61 00 24 91 3F 00 00 83 61 00 0C 7C 68 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEDB83C 36
+. 38 00 00 01 83 A1 00 94 7C 03 03 78 83 C1 00 88 7F A8 03 A6 83 E1 00 8C 83 A1 00 84 38 21 00 90 4E 80 00 20
+
+. 0 FEEBC9C 8
+. 2F 03 FF FF 41 9A 00 18
+
+. 0 FEEBCA4 20
+. 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+
+. 0 FEE989C 32
+. 80 DF 00 1C 81 7F 00 00 7C C5 33 78 90 DF 00 0C 90 DF 00 04 90 DF 00 08 71 60 01 00 40 82 01 8C
+
+. 0 FEE98BC 12
+. 81 5F 00 20 7F 85 50 00 41 9E 01 3C
+
+. 0 FEE98C8 44
+. 81 9F 00 60 61 6B 08 00 81 3F 00 08 7C A4 2B 78 2C 8C 00 00 90 BF 00 14 91 3F 00 0C 90 BF 00 10 91 3F 00 04 91 7F 00 00 40 85 01 44
+
+. 0 FEE9A34 8
+. 71 60 02 02 41 A2 FE BC
+
+. 0 FEE98F4 12
+. 91 5F 00 18 2F 1D FF FF 41 9A 00 98
+
+. 0 FEE9994 12
+. 7F E3 FB 78 7C A4 28 50 4B FF F3 8D
+
+. 0 FEE8D28 48
+. 94 21 FF E0 7C 08 02 A6 93 61 00 0C 7C BB 2B 79 93 81 00 10 7C 9C 23 78 93 E1 00 1C 7C 7F 1B 78 93 A1 00 14 93 C1 00 18 90 01 00 24 41 82 00 A4
+
+. 0 FEE8DF8 40
+. 38 60 00 00 83 61 00 24 83 81 00 10 7F 68 03 A6 83 A1 00 14 83 61 00 0C 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEE99A0 32
+. 83 A1 00 24 83 81 00 10 7F A8 03 A6 83 C1 00 18 83 A1 00 14 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEA7A0 12
+. 2C 83 FF FF 7C 79 D0 50 41 A6 FF 68
+
+. 0 FEEA7AC 24
+. 83 FD 00 20 7F 3C CB 78 80 1D 00 1C 7D 20 F8 50 2B 09 00 7F 40 99 00 0C
+
+. 0 FEEA7C4 16
+. 7C 99 4B 96 7F 84 49 D6 2C 1C 00 00 41 82 00 A8
+
+. 0 FEEA878 8
+. 2F 19 00 00 41 9A FE 90
+
+. 0 FEEA880 16
+. 7F 25 CB 78 7C 9B E2 14 7F A3 EB 78 48 00 14 C9
+
+. 0 FEEBD54 64
+. 2F 85 00 00 7C 08 02 A6 94 21 FF E0 93 41 00 08 7C BA 2B 78 93 61 00 0C 7C BB 2B 78 93 81 00 10 7C 7C 1B 78 93 E1 00 1C 38 60 00 00 93 A1 00 14 7C 9F 23 78 93 C1 00 18 90 01 00 24 41 9E 00 A0
+
+. 0 FEEBD94 16
+. 80 7C 00 14 80 BC 00 18 7C 03 28 40 40 80 00 4C
+
+. 0 FEEBDA4 12
+. 7F A3 28 50 7C 9D D8 40 40 85 00 08
+
+. 0 FEEBDB0 12
+. 7F 7D DB 78 2B 1D 00 14 41 99 00 A0
+
+. 0 FEEBDBC 8
+. 2F 9D 00 00 41 9E 00 28
+
+. 0 FEEBDC4 8
+. 34 1D FF FF 41 80 00 1C
+
+. 0 FEEBDCC 24
+. 7F A9 03 A6 88 9F 00 00 3B FF 00 01 98 83 00 00 38 63 00 01 42 00 FF F0
+
+. 0 FEEBDD0 20
+. 88 9F 00 00 3B FF 00 01 98 83 00 00 38 63 00 01 42 00 FF F0
+
+. 0 FEEBDE4 16
+. 90 7C 00 14 7F 7D D8 50 2C 9B 00 00 41 86 00 3C
+
+. 0 FEEBE2C 44
+. 7C 7B D0 50 80 E1 00 24 83 41 00 08 83 61 00 0C 7C E8 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEA890 12
+. 7F 23 C8 50 7C 79 D0 50 4B FF FE 78
+
+. 0 FEEA710 48
+. 83 21 00 34 83 01 00 10 7F 28 03 A6 83 41 00 18 83 21 00 14 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 FEC3180 8
+. 7C 83 E8 00 40 86 01 60
+
+. 0 FEC3188 20
+. 80 9F 04 4C 7C 74 1B 78 89 44 00 00 2D 8A 00 00 41 8E 01 4C
+
+. 0 FEC32E4 12
+. 83 3B 00 00 73 27 80 00 40 82 00 38
+
+. 0 FEC32F0 24
+. 80 7B 00 48 83 63 00 04 39 3B FF FF 2E 09 00 00 91 23 00 04 40 92 00 20
+
+. 0 FEC3308 20
+. 91 23 00 08 7C 00 04 AC 7D 40 18 28 7D 20 19 2D 40 A2 FF F8
+
+. 0 FEC331C 8
+. 2F 8A 00 01 41 9D 1D 8C
+
+. 0 FEC3324 12
+. 81 1F 04 E4 2F 08 00 00 40 9A 00 0C
+
+. 0 FEC3330 8
+. 7E 89 A3 78 4B FF FC D4
+
+. 0 FEC3008 104
+. 81 61 00 00 7D 23 4B 78 83 AB 00 04 81 8B FF B4 7F A8 03 A6 81 CB FF B8 81 EB FF BC 7D 81 81 20 82 0B FF C0 82 2B FF C4 82 4B FF C8 82 6B FF CC 82 8B FF D0 82 AB FF D4 82 CB FF D8 82 EB FF DC 83 0B FF E0 83 2B FF E4 83 4B FF E8 83 6B FF EC 83 8B FF F0 83 AB FF F4 83 CB FF F8 83 EB FF FC 7D 61 5B 78 4E 80 00 20
+
+. 0 FECAE60 20
+. 80 81 00 94 83 C1 00 88 38 21 00 90 7C 88 03 A6 4E 80 00 20
+
+. 0 100182B0 4
+. 4B FF FF 8C
+
+. 0 1001823C 20
+. 80 1F 00 00 7F A4 EB 78 38 A1 00 70 2F 80 00 00 41 9E FF AC
+
+. 0 10018250 44
+. 83 BE 80 18 38 00 00 02 39 20 00 00 98 01 00 70 99 21 00 71 38 01 00 98 80 7D 00 00 39 21 00 08 90 01 00 74 91 21 00 78 48 01 E9 19
+
+. 0 10036B90 8
+. 39 60 00 28 4B FF FF 84
+
+. 0 FEC2F8C 12
+. 2D 83 FF FF 39 20 FF FF 40 8E 00 74
+
+. 0 FEEA6C8 8
+. 7C 9F D0 40 41 85 01 D0
+
+. 0 FEEA89C 8
+. 7F 5F D3 78 4B FF FE 30
+
+. 0 FEEA6D0 8
+. 2B 1F 00 14 41 99 01 D0
+
+. 0 FEEA6D8 8
+. 34 1F FF FF 41 80 00 1C
+
+. 0 FEEA6E0 24
+. 7F E9 03 A6 88 FB 00 00 3B 7B 00 01 98 E3 00 00 38 63 00 01 42 00 FF F0
+
+. 0 FEEA6E4 20
+. 88 FB 00 00 3B 7B 00 01 98 E3 00 00 38 63 00 01 42 00 FF F0
+
+. 0 FEEA6F8 20
+. 90 7D 00 14 7F 3F D0 50 7C 79 C2 14 2F 83 00 00 40 9E 00 74
+
+. 0 FEEA70C 52
+. 7C 79 D0 50 83 21 00 34 83 01 00 10 7F 28 03 A6 83 41 00 18 83 21 00 14 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+. 0 1001827C 8
+. 38 60 00 0A 48 01 E8 F1
+
+. 0 10036B70 8
+. 39 60 00 18 4B FF FF A4
+
+. 0 FEE0CD4 12
+. 94 21 FF E0 7C 88 02 A6 48 0E 71 75
+
+. 0 FEE0CE0 52
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 90 81 00 24 93 E1 00 1C 83 7E 1C 24 93 A1 00 14 7C 7D 1B 78 83 FB 00 00 93 81 00 10 80 1F 00 00 70 09 80 00 40 82 00 54
+
+. 0 FEE0D14 20
+. 80 7F 00 48 3B 82 8B A0 80 A3 00 08 7F 85 E0 00 41 9E 00 34
+
+. 0 FEE0D28 20
+. 39 60 00 00 38 C0 00 01 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEE0D3C 8
+. 7C C0 19 2D 40 A2 FF F0
+
+. 0 FEE0D30 12
+. 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEE0D44 12
+. 4C 00 01 2C 2C 89 00 00 40 86 00 B4
+
+. 0 FEE0D50 40
+. 80 7F 00 48 93 83 00 08 81 03 00 04 38 E8 00 01 90 E3 00 04 80 7B 00 00 81 23 00 14 81 43 00 18 7F 09 50 40 40 98 00 7C
+
+. 0 FEE0D78 28
+. 39 89 00 01 9B A9 00 00 57 BC 06 3E 91 83 00 14 80 7F 00 00 70 69 80 00 40 82 00 38
+
+. 0 FEE0D94 24
+. 80 7F 00 48 83 A3 00 04 39 3D FF FF 2F 09 00 00 91 23 00 04 40 9A 00 20
+
+. 0 FEE0DAC 20
+. 91 23 00 08 7C 00 04 AC 7C 00 18 28 7D 20 19 2D 40 A2 FF F8
+
+. 0 FEE0DC0 8
+. 2F 80 00 01 41 9D 00 44
+
+. 0 FEE0DC8 40
+. 80 81 00 24 7F 83 E3 78 83 61 00 0C 83 81 00 10 7C 88 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 10018284 8
+. 80 7D 00 00 48 01 E8 E1
+
+. 0 10036B68 8
+. 39 60 00 14 4B FF FF AC
+
+. 0 FEDBE1C 32
+. 94 21 FF E0 7C 08 02 A6 93 E1 00 1C 7C 7F 1B 79 93 A1 00 14 93 C1 00 18 90 01 00 24 41 82 01 78
+
+. 0 FEDBE3C 12
+. 80 7F 00 00 70 69 80 00 40 82 00 54
+
+. 0 FEDBE48 20
+. 80 7F 00 48 3B A2 8B A0 80 83 00 08 7F 84 E8 00 41 9E 00 34
+
+. 0 FEDBE5C 20
+. 39 60 00 00 38 A0 00 01 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEDBE70 8
+. 7C A0 19 2D 40 A2 FF F0
+
+. 0 FEDBE64 12
+. 7D 20 18 28 7C 09 58 00 40 82 00 0C
+
+. 0 FEDBE78 12
+. 4C 00 01 2C 2C 89 00 00 40 86 00 C0
+
+. 0 FEDBE84 52
+. 80 7F 00 48 93 A3 00 08 80 E3 00 04 38 C7 00 01 90 C3 00 04 88 1F 00 46 7F E3 FB 78 7C 1D 07 74 7D 9D FA 14 81 4C 00 98 81 0A 00 30 7D 09 03 A6 4E 80 04 21
+
+. 0 FEE9AB0 12
+. 7C 08 02 A6 94 21 FF E0 48 0D E3 99
+
+. 0 FEE9ABC 44
+. 93 A1 00 14 93 C1 00 18 3B A0 00 00 90 01 00 24 7F C8 02 A6 80 A3 00 14 80 83 00 10 93 E1 00 1C 7C 7F 1B 78 7F 85 20 40 40 9D 00 50
+
+. 0 FEE9AE8 12
+. 80 C3 00 60 2C 06 00 00 40 81 00 C4
+
+. 0 FEE9BB4 8
+. 7C A4 28 50 4B FF F1 71
+
+. 0 FEE8D58 12
+. 80 83 00 00 70 89 10 00 41 82 00 C0
+
+. 0 FEE8E20 16
+. 80 03 00 08 80 A3 00 10 7F 80 28 00 41 BE FF 48
+
+. 0 FEE8D74 40
+. 89 1F 00 46 7F 84 E3 78 7F E3 FB 78 7F 65 DB 78 7D 07 07 74 7D 27 FA 14 81 69 00 98 83 AB 00 3C 7F A9 03 A6 4E 80 04 21
+
+. 0 FEEA570 56
+. 2F 85 00 00 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C BB 2B 78 93 81 00 10 7C 7C 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C BF 2B 78 93 C1 00 18 90 01 00 24 40 9D 00 3C
+
+. 0 FEEA5A8 20
+. 80 7C 00 3C 7F A4 EB 78 7F E5 FB 78 70 69 00 02 40 82 00 7C
+
+. 0 FEEA5BC 16
+. 80 7C 00 38 7F A4 EB 78 7F E5 FB 78 48 05 7A 11
+
+. 0 FF41FD8 12
+. 81 42 8B A0 2C 0A 00 00 40 82 00 14
+
+. 0 FF41FE4 8
+. 38 00 00 04 44 00 00 02
+
+PASS: Small Exact DHT
+. 0 FF41FEC 4
+. 4C A3 00 20
+
+. 0 FEEA5CC 12
+. 2C 83 00 00 7F BD 1A 14 41 84 00 74
+
+. 0 FEEA5D8 8
+. 7F E3 F8 51 41 81 FF CC
+
+. 0 FEEA5E0 16
+. 80 DC 00 50 7C 7F D8 50 2F 06 00 00 41 98 00 24
+
+. 0 FEEA610 36
+. 80 E1 00 24 83 61 00 0C 83 81 00 10 7C E8 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEE8D9C 32
+. 7C 7D 1B 78 A0 7F 00 44 30 1D FF FF 7C 80 E9 10 7D 43 00 D0 55 40 0F FE 7C 0B 20 39 40 82 00 CC
+
+. 0 FEE8DBC 36
+. 83 9F 00 60 81 3F 00 1C 2F 9C 00 00 91 3F 00 0C 91 3F 00 04 91 3F 00 08 91 3F 00 14 91 3F 00 10 40 9D 00 94
+
+. 0 FEE8E70 12
+. 80 BF 00 00 70 AB 02 02 40 A2 FF 6C
+
+. 0 FEE8E7C 8
+. 81 3F 00 20 4B FF FF 64
+
+. 0 FEE8DE4 20
+. 91 3F 00 18 7F A5 EB 78 7C 85 D8 00 38 60 FF FF 40 A6 00 08
+
+. 0 FEE9BBC 4
+. 4B FF FF 50
+
+. 0 FEE9B0C 8
+. 2C 83 00 00 41 86 00 24
+
+. 0 FEE9B34 16
+. 80 7F 00 04 80 9F 00 08 7C 04 18 51 40 82 00 34
+
+. 0 FEE9B44 48
+. 38 80 FF FF 38 60 FF FF 90 9F 00 50 90 7F 00 54 7F A3 EB 78 83 A1 00 24 83 C1 00 18 7F A8 03 A6 83 E1 00 1C 83 A1 00 14 38 21 00 20 4E 80 00 20
+
+. 0 FEDBEB8 24
+. 80 BF 00 00 21 23 00 00 7C 69 19 14 3B A3 FF FF 70 A9 80 00 40 82 00 1C
+
+. 0 FEDBED0 24
+. 80 7F 00 48 81 23 00 04 39 29 FF FF 2C 89 00 00 91 23 00 04 41 86 00 24
+
+. 0 FEDBF08 20
+. 91 23 00 08 7C 00 04 AC 7C C0 18 28 7D 20 19 2D 40 A2 FF F8
+
+. 0 FEDBF1C 8
+. 2F 06 00 01 40 99 FF C8
+
+. 0 FEDBEE8 32
+. 7F A3 EB 78 80 E1 00 24 83 A1 00 14 83 C1 00 18 7C E8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 1001828C 28
+. 80 01 00 94 83 A1 00 84 83 C1 00 88 7C 08 03 A6 83 E1 00 8C 38 21 00 90 4E 80 00 20
+
+. 0 1000106C 4
+. 4B FF FB 2D
+
+. 0 10000B98 72
+. 3D 20 10 02 7C 08 02 A6 39 29 94 48 94 21 F7 D0 C8 29 00 00 3D 20 10 02 39 29 94 50 38 60 00 80 C8 49 00 00 DB E1 08 28 93 81 08 18 3B 80 00 00 93 A1 08 1C 93 C1 08 20 3B C1 00 10 93 E1 08 24 90 01 08 34 48 00 09 C1
+
+. 0 10036B80 4
+. 4B EB AD 60
+
+. 0 FEEF68C 24
+. 81 5D 00 04 3B 9D 00 08 55 48 E8 FE 38 E8 FF FE 7E 07 00 00 40 92 06 20
+
+. 0 FEEF6A4 112
+. 82 3D 00 08 7F 88 E3 78 92 3B 00 00 83 81 00 74 7D 03 43 78 81 81 00 24 7F 88 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 FEEF788 16
+. 57 3A D1 BE 2B 1A 00 20 3B 1A 00 38 41 99 01 8C
+
+. 0 FEEF798 8
+. 71 40 00 01 41 82 01 CC
+
+. 0 FEEE53C 48
+. 61 63 00 01 81 DE 06 4C 54 69 F8 7A 82 1E 05 F0 90 78 00 04 7E 89 C2 14 3B 58 00 38 3A D8 00 08 7D CF 73 78 83 F6 00 00 2C 1F 00 00 41 82 01 A0
+
+. 0 FEEE708 12
+. 7F 96 A0 00 3A D6 00 04 40 9E FE 50
+
+. 0 FEEE560 12
+. 83 F6 00 00 2C 1F 00 00 41 82 01 A0
+
+. 0 FEEE56C 24
+. 38 A0 00 00 7E 13 83 78 90 B6 00 00 7D F2 7B 78 7D D1 73 78 48 00 00 5C
+
+. 0 FEEE5DC 36
+. 80 FF 00 04 39 00 FF FA 82 FF 00 08 7C FC 40 38 70 E8 00 01 7F BF E2 14 80 DD 00 04 54 DB 00 38 40 82 00 38
+
+. 0 FEEE634 12
+. 83 38 00 30 7C 99 E8 00 41 86 00 B0
+
+. 0 FEEE6EC 28
+. 2F 17 00 00 7D 1C DA 14 61 1C 00 01 93 F8 00 30 93 9F 00 04 7E FF BB 78 40 9A FE D8
+
+. 0 FEEE640 16
+. 7C 9D DA 14 80 04 00 04 70 0A 00 01 41 A2 FF 38
+
+. 0 FEEE650 16
+. 80 BD 00 04 54 A9 00 3C 91 3D 00 04 4B FF FF 54
+
+. 0 FEEE5B0 44
+. 2F 17 00 00 80 DA 00 08 63 87 00 01 93 FA 00 08 93 E6 00 0C 7F 9F E1 2E 90 FF 00 04 90 DF 00 08 93 5F 00 0C 7E FF BB 78 41 9A 01 30
+
+. 0 FEEE600 32
+. 81 7F 00 00 7F EB F8 50 7F 9C 5A 14 81 5F 00 08 81 7F 00 0C 81 8A 00 0C 7C 8C F8 00 40 86 00 FC
+
+. 0 FEEE620 12
+. 82 AB 00 08 7F 15 F8 00 40 9A 00 F0
+
+. 0 FEEE62C 20
+. 91 4B 00 08 91 6A 00 0C 83 38 00 30 7C 99 E8 00 41 86 00 B0
+
+. 0 FEEE714 4
+. 4B FF FF 80
+
+. 0 FEEE694 88
+. 82 61 00 74 81 C1 00 28 7E 68 03 A6 81 E1 00 2C 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+. 0 FEEF97C 24
+. 57 15 18 38 7F F5 E2 14 39 3F 00 30 83 A9 00 0C 7F 9D 48 00 41 9E 00 14
+
+. 0 FEEF920 16
+. 57 3B BA 7E 28 1B 00 14 3B 1B 00 5B 40 A1 FE 6C
+
+. 0 FEEF930 16
+. 57 3D A3 3E 2B 9D 00 0A 3B 1D 00 6E 40 BD FE 5C
+
+. 0 FEEF940 16
+. 57 3F 8B FE 28 9F 00 04 3B 1F 00 77 40 A5 FE 4C
+
+. 0 10004C94 72
+. 81 3E 80 98 3C 00 43 30 90 01 00 10 21 63 00 14 39 60 00 00 7D 6B 59 14 C8 09 00 00 81 3E 80 AC 90 61 00 14 FD 81 00 32 C9 A1 00 10 C8 09 00 00 FC 2D 00 28 FF 81 60 00 7C 00 00 26 54 00 F7 FE 7C 09 58 39 40 82 04 60
+
+. 0 10005138 36
+. FC 21 08 2A 81 3E 80 94 54 60 10 3A 81 7E 80 90 7C 89 00 2E FC 3C 08 24 7C 6B 00 2E 38 84 FF FF 4B FF F6 9D
+
+. 0 1000515C 28
+. 81 3E 80 B4 D8 3B 00 00 38 60 00 00 C8 09 00 00 FC 21 00 32 D8 3B 00 08 4B FF FA E0
+
+. 0 10004C54 64
+. 80 01 00 94 83 61 00 44 83 81 00 48 7C 08 03 A6 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB 21 00 58 CB 41 00 60 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10004CDC 12
+. FC 1C E0 2A FF 81 00 00 40 9D 04 94
+
+. 0 10004CE8 76
+. 81 3E 80 C0 FD 81 60 2A C9 A9 00 00 81 3E 80 B8 FD BC 03 72 C8 09 00 00 81 3E 80 BC FD 8C 00 28 C8 09 00 00 FD 6D 07 32 81 3E 80 C4 FF CC 00 32 C8 89 00 00 81 3E 80 C8 C8 09 00 00 FD BE 01 32 FF 8B 00 00 FC ED 03 72 41 9C 03 5C
+
+. 0 1000508C 172
+. 81 3E 80 E8 C9 49 00 00 81 3E 80 E0 FD 47 02 B2 C9 A9 00 00 81 3E 80 D8 FD A7 03 72 C9 89 00 00 81 3E 80 D0 FD 4A 01 F2 C8 09 00 00 FD 87 03 32 81 3E 80 CC FC 07 00 32 C9 09 00 00 FD AD 01 F2 81 3E 80 D4 FD 8C 01 F2 C9 29 00 00 81 3E 80 DC FC 00 01 F2 C9 69 00 00 FD 4A 01 F2 81 3E 80 E4 FD AD 01 F2 FD 8C 01 F2 FD 29 00 24 FC 08 38 24 FD 4A 01 F2 FD AD 01 F2 FD 6B 60 24 C9 89 00 00 81 3E 80 EC FD 4A 01 F2 FD 8C 68 24 C9 A9 00 00 FD AD 50 24 FC 00 48 2A FC 00 58 2A FC 00 60 2A FC 00 68 2A FF E0 41 3A 4B FF FD 94
+
+. 0 10004EC8 16
+. FC 3C F0 24 38 60 00 0E 38 81 00 20 48 00 05 E9
+
+. 0 100054BC 56
+. 7C 08 02 A6 94 21 FF E0 42 9F 00 05 93 C1 00 18 7F C8 02 A6 2F 83 00 00 90 01 00 24 7C 8A 23 78 39 60 00 00 80 1E FF F0 7F C0 F2 14 81 1E 80 00 C9 48 00 00 41 9C 00 90
+
+. 0 100054F4 36
+. 70 60 00 01 39 6B 00 01 7C 00 00 26 7C 63 0E 71 54 00 20 3E 7C 00 11 20 54 00 E0 3E 90 01 00 10 41 9E 00 08
+
+. 0 1000551C 8
+. FC 21 00 72 40 82 FF D4
+
+. 0 10005518 12
+. FD 4A 00 72 FC 21 00 72 40 82 FF D4
+
+. 0 10005524 92
+. 3D 20 43 30 6D 60 80 00 91 21 00 08 FD 60 52 10 81 3E 80 14 38 60 00 00 90 01 00 0C C8 01 00 08 C9 A9 00 00 C9 88 00 00 FC 00 68 28 81 3E 80 18 D9 4A 00 00 C9 A9 00 00 FC 00 60 2A FC 00 03 72 FC 00 02 F2 D8 0A 00 08 80 01 00 24 83 C1 00 18 38 21 00 20 7C 08 03 A6 4E 80 00 20
+
+. 0 10004ED8 124
+. FD 5E 07 F2 81 3E 80 C8 38 60 00 00 FD A0 F2 10 C9 61 00 20 C8 09 00 00 81 3E 80 C0 FC 0A 00 32 D9 5B 00 00 C9 89 00 00 FD AD 03 32 FC 00 02 10 FC 00 03 32 FD AD 02 FA D9 BB 00 08 80 01 00 94 83 61 00 44 83 81 00 48 7C 08 03 A6 83 A1 00 4C 83 C1 00 50 83 E1 00 54 CB 21 00 58 CB 41 00 60 CB 61 00 68 CB 81 00 70 CB A1 00 78 CB C1 00 80 CB E1 00 88 38 21 00 90 4E 80 00 20
+
+. 0 10036B78 4
+. 4B DD D9 80
+
+. 0 10036BE8 4
+. 4B DD DD C4
+
+. 0 1000982C 96
+. 81 5E 80 0C FC 40 68 90 3B A0 00 00 D9 A1 00 18 C9 4A 00 00 D9 41 00 10 3D 20 43 30 81 7E 80 18 6F 60 80 00 91 21 00 38 90 01 00 3C C9 6B 00 00 39 60 00 01 C9 A1 00 38 7F 0B E0 00 81 3E 80 24 FD AD 58 28 C8 6A 00 00 C8 09 00 00 FD 80 18 90 FD AD 00 32 FD AD 07 32 FC 2D 07 32 41 99 00 4C
+
+. 0 10000BE0 32
+. 3D 20 10 02 7C 7D 1B 78 39 29 94 00 3B E0 00 00 CB E9 00 00 7F E4 FB 78 7F A3 EB 78 48 00 0A 0D
+
+. 0 10001608 56
+. 81 23 00 00 54 84 18 38 80 03 00 20 55 29 18 38 94 21 FF F0 7D 29 02 14 7C 84 02 14 C8 24 00 08 38 21 00 10 C8 09 00 08 C9 A3 00 10 FC 21 00 24 FC 21 03 72 4E 80 00 20
+
+. 0 10000C00 28
+. 3B FF 00 01 FC 21 F8 7A 2F 9F 00 7F FC 3F 08 24 D8 3E 00 00 3B DE 00 08 40 9D FF DC
+
+. 0 10000BF4 12
+. 7F E4 FB 78 7F A3 EB 78 48 00 0A 0D
+
+. 0 10000C1C 16
+. 7F A3 EB 78 38 81 00 10 38 A1 04 10 48 00 0A 91
+
+. 0 10000C2C 56
+. 3D 20 10 02 39 29 94 58 C8 01 04 10 C9 A9 00 00 3D 20 10 02 C9 89 94 20 3D 20 10 02 FC 00 68 28 39 29 94 60 C9 A9 00 00 FC 00 02 10 FC 00 03 72 FF 80 60 00 40 9D 00 08
+
+. 0 10000C68 32
+. 3D 20 10 02 C8 01 04 38 C9 A9 94 28 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000C8C 32
+. 3D 20 10 02 C8 01 04 60 C9 A9 94 30 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000CB0 32
+. 3D 20 10 02 C8 01 05 28 C9 A9 94 38 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000CD4 32
+. 3D 20 10 02 C8 01 07 30 C9 A9 94 40 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000CF8 8
+. 7F A3 EB 78 48 00 09 69
+
+. 0 FEEEC18 8
+. 71 20 00 02 40 82 02 FC
+
+. 0 FEEEC20 20
+. 81 03 00 30 7F 5F EA 14 83 7E 06 5C 7F 08 F8 00 41 9A 02 54
+
+. 0 FEEEC34 8
+. 71 60 00 02 41 82 03 50
+
+. 0 FEEEF88 24
+. 82 48 00 04 83 7E 06 74 56 51 00 38 7D 68 8A 14 7F 8B D0 40 41 9D FC A0
+
+. 0 FEEEC3C 16
+. 80 1A 00 04 83 7E 06 60 70 0B 00 01 41 82 02 3C
+
+. 0 FEEEC4C 12
+. 28 80 00 08 54 1B 00 38 40 85 02 2C
+
+. 0 FEEEC58 12
+. 82 7C 04 4C 7F 13 D8 40 40 99 02 20
+
+. 0 FEEEC64 8
+. 71 27 00 01 40 82 00 3C
+
+. 0 FEEECA4 8
+. 7F 88 D0 00 41 9E 02 C0
+
+. 0 FEEEF68 20
+. 7F BD DA 14 93 FC 00 30 63 AA 00 01 91 5F 00 04 4B FF FD 94
+
+. 0 FEEED0C 8
+. 28 9D FF FF 40 85 01 24
+
+. 0 FEEED14 12
+. 83 BC 00 04 73 A9 00 01 41 82 02 60
+
+. 0 FEEED20 12
+. 83 FE 05 FC 7F 1C F8 00 41 9A 03 9C
+
+. 0 FEEF0C4 28
+. 82 3C 00 30 81 5E 06 28 81 71 00 04 81 8A 00 00 55 7F 00 38 7F 9F 60 40 41 BC FD 58
+
+. 0 FEEEE34 76
+. 83 A1 00 64 82 21 00 24 7F A8 03 A6 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+. 0 FEEF0E0 36
+. 82 EA 00 04 82 4A 00 18 7E D7 F8 50 7E B6 92 14 3A 95 FF EF 7C 94 93 96 3A 64 FF FF 7F 73 91 D7 40 A1 FD 34
+
+. 0 FEEF104 20
+. 83 5E 1A 70 38 60 00 00 83 3A 00 00 7F 29 03 A6 4E 80 04 21
+
+. 0 FF48DC0 32
+. 80 7F 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEF118 16
+. 83 1C 00 30 7F B8 FA 14 7C 9D 18 00 40 86 FD 10
+
+. 0 FEEF128 16
+. 80 1A 00 00 7C 7B 00 D0 7C 09 03 A6 4E 80 04 21
+
+. 0 FEEF138 16
+. 80 BE 1B B8 81 25 00 00 2F 09 00 00 40 9A 01 D4
+
+. 0 FEEF148 16
+. 81 3A 00 00 38 60 00 00 7D 29 03 A6 4E 80 04 21
+
+. 0 FEEF158 8
+. 2F 83 00 00 41 BE FC D8
+
+. 0 FEEF160 8
+. 7D 43 E8 51 41 A2 FC D0
+
+. 0 FEEF168 32
+. 80 DC 04 4C 7C 6A F8 50 83 5C 00 30 60 7B 00 01 7C EA 30 50 90 FC 04 4C 93 7A 00 04 4B FF FC B0
+
+. 0 10000D00 40
+. 80 01 08 34 7F 83 E3 78 83 A1 08 1C 83 81 08 18 7C 08 03 A6 83 C1 08 20 83 E1 08 24 CB E1 08 28 38 21 08 30 4E 80 00 20
+
+. 0 10001070 12
+. 3C 80 10 02 38 84 95 08 48 01 70 CD
+
+. 0 10036BD8 4
+. 4B E9 41 F8
+
+. 0 10036B90 4
+. 4B E8 C3 44
+
+. 0 10036B70 4
+. 4B EA A1 64
+
+. 0 10036B68 4
+. 4B EA 52 B4
+
+PASS: Simple  DHT
+. 0 1000107C 4
+. 4B FF FC AD
+
+. 0 10000D28 68
+. 3D 20 10 02 7C 08 02 A6 39 29 94 00 94 21 F7 E0 C8 29 00 00 3D 20 10 02 39 29 94 98 38 60 00 80 C8 49 00 00 93 81 08 10 3B 80 00 00 93 A1 08 14 93 C1 08 18 3B C1 00 10 93 E1 08 1C 90 01 08 24 48 00 08 35
+
+. 0 10004D34 400
+. 81 3E 80 CC FC EB 38 24 81 7E 81 24 C8 A9 00 00 81 3E 81 38 C8 0B 00 00 C9 89 00 00 81 3E 81 20 81 7E 81 00 C9 49 00 00 81 3E 81 3C C9 A9 00 00 81 3E 81 0C FD 25 58 24 C9 69 00 00 81 3E 81 40 FD 89 03 32 FD 49 02 BA C8 09 00 00 81 3E 81 28 FD AD 60 28 FD 69 02 F2 FD AD 02 7A C8 09 00 00 81 3E 81 44 FD 4A 02 7A C8 09 00 00 81 3E 81 10 FD AD 02 7A C8 0B 00 00 C9 89 00 00 81 3E 80 FC FD 8C 58 28 C9 09 00 00 81 3E 81 14 FD 09 02 3A C8 09 00 00 81 3E 81 2C FD 8C 02 7A C8 09 00 00 81 3E 81 48 FD 4A 02 7A C8 09 00 00 81 3E 80 F0 FD AD 02 7A C8 C9 00 00 81 3E 81 04 FC C9 01 B2 C8 09 00 00 81 3E 81 18 FD 08 02 7A C8 09 00 00 81 3E 81 30 FD 8C 02 7A C8 09 00 00 81 3E 81 4C FD 4A 02 7A C8 09 00 00 81 3E 80 F4 FD AD 02 7A C9 69 00 00 81 3E 81 08 FD 25 48 28 FD 6B 30 28 C8 09 00 00 81 3E 80 F8 FD 29 01 F2 FD 08 00 32 C8 09 00 00 81 3E 81 1C FD 6B 00 32 C8 09 00 00 81 3E 81 34 FD 09 02 32 FD 8C 00 32 C8 09 00 00 81 3E 81 50 FD 4A 00 32 C8 09 00 00 FD 69 02 F2 FD AD 00 32 FD 89 03 32 FD 49 02 B2 FD A9 03 72 FD 6B 49 FA FD 08 01 F2 FD 8C 01 F2 FD 4A 01 F2 FD AD 01 F2 FD 08 59 FA FD 8C 01 F2 FD 4A 01 F2 FD AD 01 F2 FD 8C 41 FA FD 4A 01 F2 FD AD 01 F2 FD 4A 61 FA FD AD 01 F2 FD AD 51 FA FD AD 01 32
+
+. 0 10004EC4 20
+. FF E5 68 28 FC 3C F0 24 38 60 00 0E 38 81 00 20 48 00 05 E9
+
+. 0 1000B954 12
+. FD 60 60 90 FF 8D 60 00 41 9D 00 08
+
+. 0 1000B964 16
+. 81 3E 80 0C C8 09 00 00 FF 8D 00 00 41 9C 00 18
+
+. 0 10000D6C 20
+. 7C 7D 1B 78 3B E0 00 00 7F E4 FB 78 7F A3 EB 78 48 00 08 8D
+
+. 0 10000D80 12
+. 3B FF 00 01 FC 20 08 50 48 03 5E 41
+
+. 0 10036BC8 8
+. 39 60 00 44 4B FF FF 4C
+
+. 0 FE13750 12
+. 94 21 FF E0 7C 68 02 A6 48 05 F9 59
+
+. 0 FE1375C 28
+. 93 C1 00 08 7F C8 02 A6 DB C1 00 10 DB E1 00 18 FF E0 08 90 90 61 00 24 4B FE DC 9D
+
+. 0 FE01410 32
+. 94 21 FF E0 3C 60 03 F6 D8 21 00 08 60 60 20 01 80 E1 00 08 81 01 00 0C 7C 88 02 A6 48 07 1C 85
+
+. 0 FE01430 40
+. 93 C1 00 18 7C EA 3B 78 7F C8 02 A6 55 4B 00 7E 90 81 00 24 3D 2B C3 70 FD 80 08 90 7F 89 00 40 38 80 00 00 41 9D 01 1C
+
+. 0 FE01458 244
+. 81 3E 04 64 81 5E 04 60 C8 C9 00 00 C9 6A 00 00 80 7E 04 58 FC C6 58 7A 80 FE 04 54 C9 A3 00 00 C9 47 00 00 81 9E 04 5C FC A6 58 28 81 1E 04 4C C8 4C 00 00 81 9E 04 48 FC 8D 01 72 80 7E 04 44 81 7E 04 50 FD 02 01 72 C8 A8 00 00 C8 4B 00 00 FC E1 20 28 FC 6A 38 2A D8 61 00 08 FC 03 50 28 80 E1 00 08 81 01 00 0C D8 C1 00 08 7D 06 46 70 55 00 24 F6 54 C5 00 3C 7D A3 04 AE 39 25 01 64 FD 67 00 28 55 27 18 38 7D 60 1A 14 7D 47 62 14 7C 87 64 AE C8 6A 00 08 FD 6B 40 28 C9 2B 00 08 FD 04 03 72 80 BE 04 68 FD 83 03 72 81 21 00 08 81 41 00 0C FC E2 2A FA 38 CA 03 FF 54 C3 A0 16 FD 44 62 7A FC 0B 02 F2 FC 43 52 7A C8 65 00 00 FC C0 59 FA FD 82 11 BA FC A8 61 BA FD 88 28 2A FC 88 60 28 FC 44 28 2A FD A2 60 FA FC 0D 60 00 40 82 01 A8
+
+. 0 FE0154C 36
+. 90 61 00 08 90 81 00 0C C9 01 00 08 FC 2C 02 32 80 E1 00 24 83 C1 00 18 38 21 00 20 7C E8 03 A6 4E 80 00 20
+
+. 0 FE13778 28
+. 81 3E 22 28 FF C0 08 90 80 09 00 00 FC 20 F8 90 2F 80 FF FF FD A0 F0 90 41 9E 00 4C
+
+. 0 FE13794 4
+. 48 00 45 2D
+
+. 0 FE13798 12
+. FD A0 F0 90 2C 03 00 00 41 82 00 3C
+
+. 0 FE137A4 28
+. 80 9E 11 A8 FC 20 F8 90 FC 40 F8 90 38 60 00 06 C8 04 00 00 FC 9F 00 00 41 85 00 40
+
+. 0 FE137C0 28
+. 80 BE 11 AC FC 20 F8 90 38 60 00 07 FC 40 F8 90 C8 65 00 00 FF 1F 18 00 41 98 00 24
+
+. 0 FE137DC 32
+. 80 C1 00 24 FC 20 68 90 83 C1 00 08 CB C1 00 10 7C C8 03 A6 CB E1 00 18 38 21 00 20 4E 80 00 20
+
+. 0 10000D8C 16
+. 2F 9F 00 7F D8 3E 00 00 3B DE 00 08 40 9D FF DC
+
+. 0 10000D74 12
+. 7F E4 FB 78 7F A3 EB 78 48 00 08 8D
+
+. 0 10036BC8 4
+. 4B DD CB 88
+
+. 0 10000D9C 16
+. 7F A3 EB 78 38 81 00 10 38 A1 04 10 48 00 09 11
+
+. 0 10000DAC 40
+. 3D 20 10 02 C9 A9 94 68 3D 20 10 02 C8 01 04 10 C9 89 94 20 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000DD8 32
+. 3D 20 10 02 C8 01 04 38 C9 A9 94 70 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000DFC 32
+. 3D 20 10 02 C8 01 04 60 C9 A9 94 78 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000E20 32
+. 3D 20 10 02 C8 01 05 28 C9 A9 94 80 FC 00 68 28 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000E44 40
+. 3D 20 10 02 C8 01 07 30 C9 A9 94 88 3D 20 10 02 FC 00 68 28 C9 A9 94 90 FC 00 02 10 FC 00 68 24 FF 80 60 00 40 9D 00 08
+
+. 0 10000E70 8
+. 7F A3 EB 78 48 00 07 F1
+
+. 0 10000E78 36
+. 80 01 08 24 7F 83 E3 78 83 A1 08 14 83 81 08 10 7C 08 03 A6 83 C1 08 18 83 E1 08 1C 38 21 08 20 4E 80 00 20
+
+. 0 10001080 12
+. 3C 80 10 02 38 84 95 14 48 01 70 BD
+
+. 0 10036BD8 4
+. 4B E9 41 F8
+
+PASS: Exp  J1 DHT
+. 0 1000108C 4
+. 4B FF FE 11
+
+. 0 10000E9C 64
+. 3D 20 10 02 7C 08 02 A6 39 29 94 00 94 21 F7 D0 C8 29 00 00 38 60 00 80 DB E1 08 28 FC 40 08 90 93 81 08 18 93 A1 08 1C 3B 80 00 00 93 C1 08 20 3B C1 00 10 93 E1 08 24 90 01 08 34 48 00 06 C5
+
+. 0 10000EDC 32
+. 3D 20 10 02 7C 7D 1B 78 39 29 94 00 3B E0 00 00 CB E9 00 00 7F E4 FB 78 7F A3 EB 78 48 00 07 11
+
+. 0 10000EFC 32
+. 3B FF 00 01 FC 01 00 72 2F 9F 00 7F FC 1F 00 28 FC 00 00 72 D8 1E 00 00 3B DE 00 08 40 9D FF D8
+
+. 0 10000EF0 12
+. 7F E4 FB 78 7F A3 EB 78 48 00 07 11
+
+. 0 10000F1C 16
+. 7F A3 EB 78 38 81 00 10 38 A1 04 10 48 00 07 91
+
+. 0 10000F2C 40
+. 3D 20 10 02 C9 A9 94 A0 3D 20 10 02 C8 01 04 10 FC 00 68 28 FC 00 02 10 FC 00 68 24 C9 A9 94 A8 FF 80 68 00 40 9D 00 08
+
+. 0 10000F58 48
+. 3D 20 10 02 C8 01 04 38 C9 A9 94 B0 3D 20 10 02 FC 00 68 28 C9 A9 94 B8 3D 20 10 02 FC 00 02 10 FC 00 68 24 C9 A9 94 C0 FF 80 68 00 40 9D 00 08
+
+. 0 10000F8C 40
+. 3D 20 10 02 C8 01 04 60 C9 A9 94 C8 3D 20 10 02 FC 00 68 28 FC 00 02 10 FC 00 68 24 C9 A9 94 D0 FF 80 68 00 40 9D 00 08
+
+. 0 10000FB8 48
+. 3D 20 10 02 C8 01 05 28 C9 A9 94 D8 3D 20 10 02 FC 00 68 28 C9 A9 94 E0 3D 20 10 02 FC 00 02 10 FC 00 68 24 C9 A9 94 E8 FF 80 68 00 40 9D 00 08
+
+. 0 10000FEC 44
+. 3D 20 10 02 C8 01 07 30 C9 89 94 F0 3D 20 10 02 39 29 94 60 FC 00 60 28 C9 A9 00 00 FC 00 02 10 FC 00 60 24 FF 80 68 00 40 9D 00 08
+
+. 0 1000101C 8
+. 7F A3 EB 78 48 00 06 45
+
+. 0 10001024 40
+. 80 01 08 34 7F 83 E3 78 83 A1 08 1C 83 81 08 18 7C 08 03 A6 83 C1 08 20 83 E1 08 24 CB E1 08 28 38 21 08 30 4E 80 00 20
+
+. 0 10001090 12
+. 3C 80 10 02 38 84 95 20 48 01 70 AD
+
+PASS: Poly J1 DHT
+. 0 1000109C 4
+. 48 01 7D 45
+
+. 0 10018DE0 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 38 60 00 01 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 10 80 09 00 00 2F 80 00 00 40 9E 00 38
+
+. 0 10018E14 28
+. 81 3E 80 04 38 60 00 00 81 7E 80 00 80 A9 00 00 80 8B 00 00 7F 85 20 00 41 9E 00 1C
+
+. 0 10018E48 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+. 0 100010A0 4
+. 48 03 5B 51
+
+. 0 10036BF0 8
+. 39 60 00 58 4B FF FF 24
+
+. 0 FEB3B98 12
+. 94 21 FF E0 7C 08 02 A6 48 11 42 B1
+
+. 0 FEB3BA4 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 81 00 10 7C 7C 1B 78 93 A1 00 14 83 FE 1A FC 90 01 00 24 80 1F 00 00 2F 80 00 00 41 9E 00 CC
+
+. 0 FEB3BD0 16
+. 81 3F 00 00 80 69 00 04 2C 03 00 00 41 82 00 78
+
+. 0 FEB3BE0 8
+. 7D 2A 4B 78 48 00 00 18
+
+. 0 FEB3BFC 36
+. 80 AA 00 04 39 25 FF FF 55 24 20 36 91 2A 00 04 7D 64 52 14 80 0B 00 08 39 6B 00 08 2F 80 00 03 41 9E 00 54
+
+. 0 FEB3C20 12
+. 2C 80 00 02 2F 00 00 04 40 BD FF C0
+
+. 0 FEB3C2C 4
+. 40 9A FF C0
+
+. 0 FEB3C30 20
+. 81 0B 00 04 7F 84 E3 78 80 6B 00 08 7D 09 03 A6 4E 80 04 21
+
+. 0 100190A8 68
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 A1 00 14 90 A1 00 24 80 9E FF F0 7F C4 F2 14 81 7E 80 0C 80 7E 80 08 7C 0B 18 50 7C 09 16 70 2F 89 00 00 3B E9 FF FF 41 9E 00 28
+
+. 0 10019110 4
+. 48 00 02 69
+
+. 0 10019378 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FE 75 41
+
+. 0 100008C4 36
+. 7C 08 02 A6 94 21 FF F0 93 C1 00 08 3F C0 10 03 93 E1 00 0C 90 01 00 14 88 1E 6C B4 2F 80 00 00 40 9E 00 34
+
+. 0 100008E8 8
+. 3F E0 10 03 48 00 00 14
+
+. 0 10000900 16
+. 81 7F FD 44 81 2B 00 00 2F 89 00 00 40 9E FF E4
+
+. 0 10000910 32
+. 38 00 00 01 98 1E 6C B4 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 10019388 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 10019114 28
+. 81 01 00 24 83 A1 00 14 83 C1 00 18 7D 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEB3C44 16
+. 81 5F 00 00 81 8A 00 04 2C 8C 00 00 40 86 FF AC
+
+. 0 2547BAE0 12
+. 94 21 FF A0 7C 08 02 A6 48 01 B5 19
+
+. 0 2547BAEC 132
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 C1 00 18 92 41 00 28 81 DE 04 F4 82 5E 04 C8 91 E1 00 1C 39 E0 00 0F 92 01 00 20 3A 00 00 00 92 21 00 24 3A 20 01 68 92 C1 00 38 3A C0 00 00 93 E1 00 5C 7C 3F 0B 78 92 61 00 2C 92 81 00 30 92 A1 00 34 92 E1 00 3C 93 01 00 40 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 90 01 00 64 91 81 00 14 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+
+. 0 2547BB70 20
+. 7D 31 92 14 80 69 00 04 54 6A 10 3A 7F 8A 80 40 40 9D 00 2C
+
+. 0 2547BBAC 16
+. 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+
+. 0 2547BBD8 8
+. 7E F3 BB 79 41 82 01 10
+
+. 0 2547BCEC 24
+. 80 B2 03 FC 3B 40 00 00 38 72 01 80 7E 1A 98 40 7C A9 03 A6 4E 80 04 21
+
+. 0 2547BD04 4
+. 40 90 00 68
+
+. 0 2547BD6C 12
+. 35 EF FF FF 3A 31 FF E8 40 80 FD EC
+
+. 0 2547BB60 16
+. 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+
+. 0 2547BB84 8
+. 2C 10 00 00 40 82 01 10
+
+. 0 2547BB8C 48
+. 39 0A 00 1E 81 61 00 00 55 07 00 36 7D 50 53 78 7C C7 00 D0 7D 61 31 6E 38 A1 00 17 54 B6 00 36 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+
+. 0 2547BBBC 16
+. 83 4B 00 14 56 EA 10 3A 7F 1A 58 00 41 9A 01 08
+
+. 0 2547BCD0 28
+. 83 6B 01 78 3A F7 00 01 7D 6A B1 2E 39 5B 00 01 91 4B 01 78 81 6B 00 0C 4B FF FE E8
+
+. 0 2547BBD0 8
+. 2E 0B 00 00 40 92 FF E8
+
+. 0 2547BBE0 12
+. 2E 0F 00 00 7F 71 90 2E 40 92 00 08
+
+. 0 2547BBEC 12
+. 83 7B 00 0C 2F 9B 00 00 41 9E 00 F8
+
+. 0 2547BBF8 12
+. 83 9B 00 14 7C 9C D8 00 40 86 FF EC
+
+. 0 2547BC04 12
+. 7F 20 00 26 57 39 9F FE 48 00 00 08
+
+. 0 2547BC14 16
+. 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+
+. 0 2547BC24 12
+. 3B 59 00 01 7C 1A B8 40 40 A0 FF C0
+
+. 0 2547BC30 32
+. 57 20 10 3A 7C 14 03 78 7E A0 B2 14 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+
+. 0 2547BC50 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 20
+
+. 0 2547BC5C 16
+. 7C 99 D0 50 54 85 10 3A 7F 09 D8 00 41 9A 02 40
+
+. 0 2547BC6C 12
+. 85 2B 00 04 2C 09 00 00 40 82 FF F0
+
+. 0 2547BC64 8
+. 7F 09 D8 00 41 9A 02 40
+
+. 0 2547BC78 16
+. 7D 38 B0 2E 81 49 01 F4 2F 8A 00 00 40 9E 02 58
+
+. 0 2547BC88 12
+. 3B 5A 00 01 7F 1A B8 40 41 98 FF AC
+
+. 0 2547BC3C 20
+. 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+
+. 0 2547BC94 4
+. 4B FF FF 58
+
+. 0 2547BC10 20
+. 3B 39 00 01 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+
+. 0 2547BD08 12
+. 81 7E 03 4C 83 2B 00 00 48 00 00 30
+
+. 0 2547BD40 20
+. 57 48 10 3A 7F 88 B0 2E 81 7C 01 80 75 60 10 00 40 A2 FF C4
+
+. 0 2547BD14 24
+. 55 75 01 04 80 9C 00 04 92 BC 01 80 8A 84 00 00 2F 14 00 00 40 9A 00 C0
+
+. 0 2547BD2C 8
+. 75 69 C0 00 40 82 00 B8
+
+. 0 2547BD34 12
+. 3B 5A 00 01 7F 9A 98 40 40 9C 00 30
+
+. 0 2547BDE8 12
+. 81 3C 00 88 2F 89 00 00 40 9E 00 10
+
+. 0 2547BDF4 12
+. 82 FC 00 54 2C 97 00 00 41 86 FF 38
+
+. 0 2547BE00 12
+. 83 0E 00 00 73 0B 00 02 40 82 00 7C
+
+. 0 2547BE0C 8
+. 2F 09 00 00 41 9A 00 44
+
+. 0 2547BE54 12
+. 81 3C 00 54 2E 09 00 00 41 92 FE F8
+
+. 0 2547BE60 20
+. 80 A9 00 04 83 BC 00 00 7C FD 2A 14 7C E9 03 A6 4E 80 04 21
+
+. 0 FFDEF80 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF F9 79
+
+. 0 FFDE904 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+
+. 0 FFDE938 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+
+. 0 FFDE944 12
+. 81 3E 80 08 80 69 00 00 48 01 08 31
+
+. 0 FFEF17C 8
+. 39 60 00 0C 4B FF FF BC
+
+. 0 FFEF13C 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 80 D0 3D 8C 25 47 4E 80 04 20
+
+. 0 FEB3F44 12
+. 94 21 FF D0 7C 08 02 A6 48 11 3F 05
+
+. 0 FEB3F50 72
+. 93 C1 00 28 7F C8 02 A6 93 81 00 20 90 01 00 34 7D 80 00 26 93 21 00 14 2E 03 00 00 81 3E 1A FC 7C 79 1B 78 93 01 00 10 83 89 00 00 93 41 00 18 2F 9C 00 00 93 61 00 1C 93 A1 00 24 93 E1 00 2C 91 81 00 0C 41 9E 00 88
+
+. 0 FEB3F98 28
+. 80 BC 00 04 3B BC 00 08 54 A4 20 36 7C 64 E2 14 3B E3 FF F8 7C 1D F8 40 41 81 00 60
+
+. 0 FEB4010 12
+. 83 9C 00 00 2F 9C 00 00 40 9E FF 80
+
+. 0 FEB401C 4
+. 40 92 00 98
+
+. 0 FEB40B4 60
+. 81 41 00 34 7F 23 CB 78 81 01 00 0C 83 C1 00 28 7D 48 03 A6 83 E1 00 2C 7D 00 81 20 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 38 21 00 30 48 0A BD 24
+
+. 0 FF5FE10 12
+. 94 21 FF D0 7C 08 02 A6 48 06 80 39
+
+. 0 FF5FE1C 64
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 A1 00 24 83 1E 1C D4 93 41 00 18 7C 7A 1B 78 83 B8 00 00 93 61 00 1C 3B 60 00 00 93 E1 00 2C 7C 3F 0B 78 93 21 00 14 93 81 00 20 48 00 00 18
+
+. 0 FF5FE70 8
+. 2F 1D 00 00 40 9A FF E8
+
+. 0 FF5FE5C 12
+. 80 7D 00 10 7F 83 D0 00 41 9E 00 48
+
+. 0 FF5FE68 16
+. 7F BB EB 78 83 BD 00 00 2F 1D 00 00 40 9A FF E8
+
+. 0 FF5FE78 52
+. 80 61 00 00 83 83 00 04 83 03 FF E0 7F 88 03 A6 83 23 FF E4 83 43 FF E8 83 63 FF EC 83 83 FF F0 83 A3 FF F4 83 C3 FF F8 83 E3 FF FC 7C 61 1B 78 4E 80 00 20
+
+. 0 FFDE950 8
+. 83 FE 80 0C 48 00 00 14
+
+. 0 FFDE968 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+
+. 0 FFDE978 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FFDEF90 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BE74 16
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 4B FF FE E0
+
+. 0 2547BD60 12
+. 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+
+. 0 FE2A838 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FC F3 B9
+
+. 0 FDF9BFC 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+
+. 0 FDF9C30 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+
+. 0 FDF9C3C 12
+. 81 3E 80 08 80 69 00 00 48 07 B7 C1
+
+. 0 FE75404 8
+. 39 60 00 44 4B FF FF 4C
+
+. 0 FE75354 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 83 B8 3D 8C 25 47 4E 80 04 20
+
+. 0 FDF9C48 8
+. 83 FE 80 0C 48 00 00 14
+
+. 0 FDF9C60 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+
+. 0 FDF9C70 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 FE2A848 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BE14 36
+. 80 9C 00 90 80 69 00 04 83 64 00 04 80 1C 00 00 57 6C F0 BE 2F 8C 00 00 7F 60 1A 14 3B AC FF FF 41 9E 00 20
+
+. 0 2547BE38 16
+. 57 A6 10 3A 7D 26 D8 2E 7D 29 03 A6 4E 80 04 21
+
+. 0 FE9B5BC 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 8D
+
+. 0 FE9B5C8 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 14 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+
+. 0 FE9B600 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+
+. 0 2547BE48 12
+. 2F 9D 00 00 3B BD FF FF 40 9E FF E8
+
+. 0 2547BD54 24
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+
+. 0 2547BD78 12
+. 81 EE 00 00 71 E9 00 80 40 82 01 AC
+
+. 0 2547BD84 100
+. 81 41 00 00 81 CA 00 04 81 8A FF B4 7D C8 03 A6 81 EA FF BC 81 CA FF B8 7D 80 81 20 82 0A FF C0 82 2A FF C4 82 4A FF C8 82 6A FF CC 82 8A FF D0 82 AA FF D4 82 CA FF D8 82 EA FF DC 83 0A FF E0 83 2A FF E4 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+
+. 0 FEB3C54 20
+. 80 7F 00 00 80 03 00 00 2F 00 00 00 90 1F 00 00 41 9A FF 64
+
+. 0 FEB3BC8 8
+. 2F 80 00 00 41 9E 00 CC
+
+. 0 FEB3C98 16
+. 83 FE 1B BC 83 BE 1C 38 7F 9F E8 40 40 9C 00 1C
+
+. 0 FEB3CA8 16
+. 81 5F 00 00 3B FF 00 04 7D 49 03 A6 4E 80 04 21
+
+. 0 FEECB0C 12
+. 94 21 FF E0 7C 08 02 A6 48 0D B3 3D
+
+. 0 FEECB18 32
+. 93 C1 00 18 7F C8 02 A6 38 60 00 00 93 81 00 10 93 E1 00 1C 93 A1 00 14 90 01 00 24 4B FF F9 8D
+
+. 0 FEEC4C0 12
+. 94 21 FF B0 7C 88 02 A6 48 0D B9 89
+
+. 0 FEEC4CC 88
+. 93 C1 00 48 7F C8 02 A6 92 C1 00 28 90 81 00 54 7D 80 00 26 92 E1 00 2C 3A E0 00 00 82 DE 1B 48 93 E1 00 4C 7C 7F 1B 78 81 36 00 B8 93 01 00 30 31 69 FF FF 7C 0B 49 10 93 21 00 34 2D 80 00 00 93 41 00 38 93 61 00 3C 93 81 00 40 93 A1 00 44 91 81 00 24 40 8E 02 48
+
+. 0 FEEC524 20
+. 80 7E 05 E4 92 E1 00 14 90 61 00 10 2E 1F 00 00 41 92 00 50
+
+. 0 FEEC584 24
+. 83 1E 1B EC 83 3E 05 EC 83 F8 00 00 83 99 00 00 2C 9F 00 00 41 86 01 28
+
+. 0 FEEC59C 16
+. 83 5E 05 E8 3B 60 00 00 93 FA 00 00 41 92 00 5C
+
+. 0 FEEC604 12
+. 80 7F 00 60 2F 03 00 00 40 99 01 44
+
+. 0 FEEC750 16
+. 81 7F 00 14 81 3F 00 10 7C 8B 48 40 40 A5 FE B4
+
+. 0 FEEC610 12
+. 89 5F 00 46 2C 0A 00 00 40 82 00 44
+
+. 0 FEEC61C 4
+. 40 99 00 40
+
+. 0 FEEC65C 4
+. 41 92 00 44
+
+. 0 FEEC6A0 16
+. 80 19 00 00 93 7A 00 00 7F 9C 00 00 41 9E 00 9C
+
+. 0 FEEC748 8
+. 83 FF 00 34 4B FF FF 6C
+
+. 0 FEEC6B8 8
+. 2C 1F 00 00 40 82 FE E8
+
+. 0 FEEC5A4 8
+. 93 FA 00 00 41 92 00 5C
+
+. 0 FEEC6C0 4
+. 41 92 00 38
+
+. 0 FEEC6F8 4
+. 40 8E 00 88
+
+. 0 FEEC6FC 68
+. 7E E3 BB 78 81 81 00 24 82 E1 00 54 82 C1 00 28 7D 81 81 20 7E E8 03 A6 83 01 00 30 82 E1 00 2C 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+
+. 0 FEECB38 20
+. 81 3E 1B EC 7C 7C 1B 78 83 E9 00 00 2F 9F 00 00 41 9E 00 80
+
+. 0 FEECB4C 8
+. 3B A0 FF FF 48 00 00 14
+
+. 0 FEECB64 20
+. 80 1F 00 00 70 09 00 02 54 03 EF FE 2F 83 00 00 40 A2 FF E0
+
+. 0 FEECB54 16
+. 93 BF 00 60 83 FF 00 34 2F 1F 00 00 41 9A 00 68
+
+. 0 FEECB78 8
+. 70 09 10 00 41 9E 00 08
+
+. 0 FEECB84 24
+. 80 DF 00 60 7F E3 FB 78 38 80 00 00 38 A0 00 00 2C 86 00 00 41 86 FF BC
+
+. 0 FEECB9C 28
+. 89 9F 00 46 7D 8A 07 74 7D 0A FA 14 80 E8 00 98 81 67 00 2C 7D 69 03 A6 4E 80 04 21
+
+. 0 FEE8C4C 24
+. 7C 08 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 90 01 00 14 48 00 33 3D
+
+. 0 FEEBF9C 68
+. 7D 48 02 A6 94 21 FF E0 93 81 00 10 7C BC 2B 78 93 A1 00 14 7C 9D 23 78 91 41 00 24 93 E1 00 1C 7C 7F 1B 78 93 C1 00 18 89 03 00 46 7D 07 07 74 7C C7 1A 14 80 86 00 98 81 64 00 30 7D 69 03 A6 4E 80 04 21
+
+. 0 FEEBFE0 32
+. 20 1D 00 00 7D 20 E9 14 2F 83 FF FF 21 7C 00 00 7C 0B E1 14 7D 2B 03 79 38 60 00 00 41 9E 00 54
+
+. 0 FEEC000 20
+. 7F A4 EB 78 7F E3 FB 78 38 BF 00 48 38 C0 00 00 40 82 00 60
+
+. 0 FEEC070 16
+. 81 9F 00 00 38 9F 00 47 61 80 00 02 4B FF FF AC
+
+. 0 FEEC028 8
+. 90 1F 00 00 4B FF FB 51
+
+. 0 FEEBBB8 16
+. 80 03 00 00 7D 23 4B 78 70 0B 00 01 40 A2 00 1C
+
+. 0 FEEBBC8 20
+. 80 DF 00 20 7C A9 30 50 38 85 0F FF 54 84 00 26 48 06 26 A5
+
+. 0 FF4E27C 8
+. 38 00 00 5B 44 00 00 02
+
+. 0 FF4E284 4
+. 4C A3 00 20
+
+. 0 FEEBC20 40
+. 80 61 00 24 90 1F 00 00 83 61 00 0C 7C 68 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEEC030 64
+. 38 A0 00 00 7F E3 FB 78 90 BF 00 0C 90 BF 00 18 90 BF 00 14 90 BF 00 10 90 BF 00 08 90 BF 00 04 83 81 00 24 83 A1 00 14 7F 88 03 A6 83 C1 00 18 83 81 00 10 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEE8C64 12
+. 2F 83 00 00 38 60 00 00 41 9E 00 24
+
+. 0 FEE8C70 52
+. 80 9F 00 1C 7F E3 FB 78 90 9F 00 08 90 9F 00 18 90 9F 00 14 90 9F 00 10 90 9F 00 0C 90 9F 00 04 80 A1 00 14 83 E1 00 08 38 21 00 10 7C A8 03 A6 4E 80 00 20
+
+. 0 FEECBB8 16
+. 93 BF 00 60 83 FF 00 34 2F 1F 00 00 40 9A FF A0
+
+. 0 FEECB80 4
+. 41 A2 FF D4
+
+. 0 FEECBC8 36
+. 80 81 00 24 7F 83 E3 78 83 A1 00 14 83 81 00 10 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+. 0 FEB3CB8 8
+. 7F 9F E8 40 41 9C FF EC
+
+. 0 FEB3CC0 8
+. 7F 83 E3 78 48 06 21 41
+
+. 0 FF15E04 8
+. 94 21 FF E0 48 0B 20 49
+
+. 0 FF15E0C 40
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 93 E1 00 1C 7C 7F 1B 78 81 3E 1C 4C 7F A9 12 14 38 00 00 EA 7F E3 FB 78 44 00 00 02
+
+==3289== 
diff --git a/VEX/orig_ppc32/return0.orig b/VEX/orig_ppc32/return0.orig
new file mode 100644
index 0000000..75adad9
--- /dev/null
+++ b/VEX/orig_ppc32/return0.orig
@@ -0,0 +1,60452 @@
+==== BB 0 _start(0x254804D4) approx BBs exec'd 0 ====
+
+	0x254804D4:  7C230B78  or r3,r1,r1
+	   0: GETL       	R1, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254804D8:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x254804DC:  3821FFF0  addi r1,r1,-16
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0xFFFFFFF0, t4
+	   8: PUTL       	t4, R1
+	   9: INCEIPL       	$4
+
+	0x254804E0:  90810000  stw r4,0(r1)
+	  10: GETL       	R4, t6
+	  11: GETL       	R1, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x254804E4:  4BFF1581  bl 0x25471A64
+	  14: MOVL       	$0x254804E8, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25471A64  ($4)
+
+
+. 0 254804D4 20
+. 7C 23 0B 78 38 80 00 00 38 21 FF F0 90 81 00 00 4B FF 15 81
+
+==== BB 1 _dl_start(0x25471A64) approx BBs exec'd 0 ====
+
+	0x25471A64:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25471A68:  3960004B  li r11,75
+	   3: MOVL       	$0x4B, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x25471A6C:  9421FD50  stwu r1,-688(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFD50, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x25471A70:  7D6903A6  mtctr r11
+	  12: GETL       	R11, t8
+	  13: PUTL       	t8, CTR
+	  14: INCEIPL       	$4
+
+	0x25471A74:  92E1028C  stw r23,652(r1)
+	  15: GETL       	R23, t10
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x28C, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25471A78:  900102B4  stw r0,692(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2B4, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x25471A7C:  7C771B78  or r23,r3,r3
+	  25: GETL       	R3, t18
+	  26: PUTL       	t18, R23
+	  27: INCEIPL       	$4
+
+	0x25471A80:  93010290  stw r24,656(r1)
+	  28: GETL       	R24, t20
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x290, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0x25471A84:  38000000  li r0,0
+	  33: MOVL       	$0x0, t24
+	  34: PUTL       	t24, R0
+	  35: INCEIPL       	$4
+
+	0x25471A88:  93210294  stw r25,660(r1)
+	  36: GETL       	R25, t26
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x294, t28
+	  39: STL       	t26, (t28)
+	  40: INCEIPL       	$4
+
+	0x25471A8C:  39210030  addi r9,r1,48
+	  41: GETL       	R1, t30
+	  42: ADDL       	$0x30, t30
+	  43: PUTL       	t30, R9
+	  44: INCEIPL       	$4
+
+	0x25471A90:  93410298  stw r26,664(r1)
+	  45: GETL       	R26, t32
+	  46: GETL       	R1, t34
+	  47: ADDL       	$0x298, t34
+	  48: STL       	t32, (t34)
+	  49: INCEIPL       	$4
+
+	0x25471A94:  9361029C  stw r27,668(r1)
+	  50: GETL       	R27, t36
+	  51: GETL       	R1, t38
+	  52: ADDL       	$0x29C, t38
+	  53: STL       	t36, (t38)
+	  54: INCEIPL       	$4
+
+	0x25471A98:  938102A0  stw r28,672(r1)
+	  55: GETL       	R28, t40
+	  56: GETL       	R1, t42
+	  57: ADDL       	$0x2A0, t42
+	  58: STL       	t40, (t42)
+	  59: INCEIPL       	$4
+
+	0x25471A9C:  93A102A4  stw r29,676(r1)
+	  60: GETL       	R29, t44
+	  61: GETL       	R1, t46
+	  62: ADDL       	$0x2A4, t46
+	  63: STL       	t44, (t46)
+	  64: INCEIPL       	$4
+
+	0x25471AA0:  93C102A8  stw r30,680(r1)
+	  65: GETL       	R30, t48
+	  66: GETL       	R1, t50
+	  67: ADDL       	$0x2A8, t50
+	  68: STL       	t48, (t50)
+	  69: INCEIPL       	$4
+
+	0x25471AA4:  93E102AC  stw r31,684(r1)
+	  70: GETL       	R31, t52
+	  71: GETL       	R1, t54
+	  72: ADDL       	$0x2AC, t54
+	  73: STL       	t52, (t54)
+	  74: INCEIPL       	$4
+
+	0x25471AA8:  90090000  stw r0,0(r9)
+	  75: GETL       	R0, t56
+	  76: GETL       	R9, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0x25471AAC:  39290004  addi r9,r9,4
+	  79: GETL       	R9, t60
+	  80: ADDL       	$0x4, t60
+	  81: PUTL       	t60, R9
+	  82: INCEIPL       	$4
+
+	0x25471AB0:  4200FFF8  bc 16,0,0x25471AA8
+	  83: GETL       	CTR, t62
+	  84: ADDL       	$0xFFFFFFFF, t62
+	  85: PUTL       	t62, CTR
+	  86: JIFZL       	t62, $0x25471AB4
+	  87: JMPo       	$0x25471AA8  ($4)
+
+
+. 0 25471A64 80
+. 7C 08 02 A6 39 60 00 4B 94 21 FD 50 7D 69 03 A6 92 E1 02 8C 90 01 02 B4 7C 77 1B 78 93 01 02 90 38 00 00 00 93 21 02 94 39 21 00 30 93 41 02 98 93 61 02 9C 93 81 02 A0 93 A1 02 A4 93 C1 02 A8 93 E1 02 AC 90 09 00 00 39 29 00 04 42 00 FF F8
+
+==== BB 2 (0x25471AA8) approx BBs exec'd 0 ====
+
+	0x25471AA8:  90090000  stw r0,0(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25471AAC:  39290004  addi r9,r9,4
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25471AB0:  4200FFF8  bc 16,0,0x25471AA8
+	   8: GETL       	CTR, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, CTR
+	  11: JIFZL       	t6, $0x25471AB4
+	  12: JMPo       	$0x25471AA8  ($4)
+
+
+. 0 25471AA8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+
+==== BB 3 (0x25471AB4) approx BBs exec'd 0 ====
+
+	0x25471AB4:  48000009  bl 0x25471ABC
+	   0: MOVL       	$0x25471AB8, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25471ABC  ($4)
+
+
+. 0 25471AB4 4
+. 48 00 00 09
+
+==== BB 4 (0x25471ABC) approx BBs exec'd 0 ====
+
+	0x25471ABC:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x25471AC0:  3B010010  addi r24,r1,16
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R24
+	   6: INCEIPL       	$4
+
+	0x25471AC4:  4802553D  bl 0x25497000
+	   7: MOVL       	$0x25471AC8, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25471ABC 12
+. 7C E8 02 A6 3B 01 00 10 48 02 55 3D
+
+==== BB 5 (0x25497000) approx BBs exec'd 0 ====
+
+	0x25497000:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0x25497004, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+. 0 25497000 4
+. 4E 80 00 21
+
+==== BB 6 (0x25471AC8) approx BBs exec'd 0 ====
+
+	0x25471AC8:  7D4802A6  mflr r10
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x25471ACC:  81870000  lwz r12,0(r7)
+	   3: GETL       	R7, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R12
+	   6: INCEIPL       	$4
+
+	0x25471AD0:  810A0000  lwz r8,0(r10)
+	   7: GETL       	R10, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R8
+	  10: INCEIPL       	$4
+
+	0x25471AD4:  5586302E  rlwinm r6,r12,6,0,23
+	  11: GETL       	R12, t10
+	  12: ROLL       	$0x6, t10
+	  13: ANDL       	$0xFFFFFF00, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x25471AD8:  7CC43670  srawi r4,r6,6
+	  16: GETL       	R6, t12
+	  17: SARL       	$0x6, t12  (-wCa)
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0x25471ADC:  7CA83850  subf r5,r8,r7
+	  20: GETL       	R8, t14
+	  21: GETL       	R7, t16
+	  22: SUBL       	t14, t16
+	  23: PUTL       	t16, R5
+	  24: INCEIPL       	$4
+
+	0x25471AE0:  39010030  addi r8,r1,48
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0x30, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0x25471AE4:  7CE52214  add r7,r5,r4
+	  29: GETL       	R5, t20
+	  30: GETL       	R4, t22
+	  31: ADDL       	t20, t22
+	  32: PUTL       	t22, R7
+	  33: INCEIPL       	$4
+
+	0x25471AE8:  90E10010  stw r7,16(r1)
+	  34: GETL       	R7, t24
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x10, t26
+	  37: STL       	t24, (t26)
+	  38: INCEIPL       	$4
+
+	0x25471AEC:  806A0000  lwz r3,0(r10)
+	  39: GETL       	R10, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R3
+	  42: INCEIPL       	$4
+
+	0x25471AF0:  7C07182E  lwzx r0,r7,r3
+	  43: GETL       	R3, t32
+	  44: GETL       	R7, t34
+	  45: ADDL       	t34, t32
+	  46: LDL       	(t32), t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0x25471AF4:  7D271A14  add r9,r7,r3
+	  49: GETL       	R7, t38
+	  50: GETL       	R3, t40
+	  51: ADDL       	t38, t40
+	  52: PUTL       	t40, R9
+	  53: INCEIPL       	$4
+
+	0x25471AF8:  91210018  stw r9,24(r1)
+	  54: GETL       	R9, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x18, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x25471AFC:  7D2A4B78  or r10,r9,r9
+	  59: GETL       	R9, t46
+	  60: PUTL       	t46, R10
+	  61: INCEIPL       	$4
+
+	0x25471B00:  2F800000  cmpi cr7,r0,0
+	  62: GETL       	R0, t48
+	  63: CMP0L       	t48, t50  (-rSo)
+	  64: ICRFL       	t50, $0x7, CR
+	  65: INCEIPL       	$4
+
+	0x25471B04:  419E0068  bc 12,30,0x25471B6C
+	  66: Js30o       	$0x25471B6C
+
+
+. 0 25471AC8 64
+. 7D 48 02 A6 81 87 00 00 81 0A 00 00 55 86 30 2E 7C C4 36 70 7C A8 38 50 39 01 00 30 7C E5 22 14 90 E1 00 10 80 6A 00 00 7C 07 18 2E 7D 27 1A 14 91 21 00 18 7D 2A 4B 78 2F 80 00 00 41 9E 00 68
+
+==== BB 7 (0x25471B08) approx BBs exec'd 0 ====
+
+	0x25471B08:  3FE06FFF  lis r31,28671
+	   0: MOVL       	$0x6FFF0000, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x25471B0C:  3F607000  lis r27,28672
+	   3: MOVL       	$0x70000000, t2
+	   4: PUTL       	t2, R27
+	   5: INCEIPL       	$4
+
+	0x25471B10:  3F406FFF  lis r26,28671
+	   6: MOVL       	$0x6FFF0000, t4
+	   7: PUTL       	t4, R26
+	   8: INCEIPL       	$4
+
+	0x25471B14:  3F206FFF  lis r25,28671
+	   9: MOVL       	$0x6FFF0000, t6
+	  10: PUTL       	t6, R25
+	  11: INCEIPL       	$4
+
+	0x25471B18:  3FA06FFF  lis r29,28671
+	  12: MOVL       	$0x6FFF0000, t8
+	  13: PUTL       	t8, R29
+	  14: INCEIPL       	$4
+
+	0x25471B1C:  3F806FFF  lis r28,28671
+	  15: MOVL       	$0x6FFF0000, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0x25471B20:  7C0B0378  or r11,r0,r0
+	  18: GETL       	R0, t12
+	  19: PUTL       	t12, R11
+	  20: INCEIPL       	$4
+
+	0x25471B24:  63E6FFFF  ori r6,r31,0xFFFF
+	  21: MOVL       	$0x6FFFFFFF, t14
+	  22: PUTL       	t14, R6
+	  23: INCEIPL       	$4
+
+	0x25471B28:  63650021  ori r5,r27,0x21
+	  24: MOVL       	$0x70000021, t16
+	  25: PUTL       	t16, R5
+	  26: INCEIPL       	$4
+
+	0x25471B2C:  6344FDFF  ori r4,r26,0xFDFF
+	  27: MOVL       	$0x6FFFFDFF, t18
+	  28: PUTL       	t18, R4
+	  29: INCEIPL       	$4
+
+	0x25471B30:  6323FE34  ori r3,r25,0xFE34
+	  30: MOVL       	$0x6FFFFE34, t20
+	  31: PUTL       	t20, R3
+	  32: INCEIPL       	$4
+
+	0x25471B34:  63BDFEFF  ori r29,r29,0xFEFF
+	  33: MOVL       	$0x6FFFFEFF, t22
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x25471B38:  639CFF40  ori r28,r28,0xFF40
+	  36: MOVL       	$0x6FFFFF40, t24
+	  37: PUTL       	t24, R28
+	  38: INCEIPL       	$4
+
+	0x25471B3C:  2C0B0021  cmpi cr0,r11,33
+	  39: GETL       	R11, t26
+	  40: MOVL       	$0x21, t30
+	  41: CMPL       	t26, t30, t28  (-rSo)
+	  42: ICRFL       	t28, $0x0, CR
+	  43: INCEIPL       	$4
+
+	0x25471B40:  5560103A  rlwinm r0,r11,2,0,29
+	  44: GETL       	R11, t32
+	  45: SHLL       	$0x2, t32
+	  46: PUTL       	t32, R0
+	  47: INCEIPL       	$4
+
+	0x25471B44:  40810018  bc 4,1,0x25471B5C
+	  48: Jc01o       	$0x25471B5C
+
+
+. 0 25471B08 64
+. 3F E0 6F FF 3F 60 70 00 3F 40 6F FF 3F 20 6F FF 3F A0 6F FF 3F 80 6F FF 7C 0B 03 78 63 E6 FF FF 63 65 00 21 63 44 FD FF 63 23 FE 34 63 BD FE FF 63 9C FF 40 2C 0B 00 21 55 60 10 3A 40 81 00 18
+
+==== BB 8 (0x25471B5C) approx BBs exec'd 0 ====
+
+	0x25471B5C:  7D48012E  stwx r10,r8,r0
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R10, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x25471B60:  856A0008  lwzu r11,8(r10)
+	   6: GETL       	R10, t6
+	   7: ADDL       	$0x8, t6
+	   8: PUTL       	t6, R10
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25471B64:  2C8B0000  cmpi cr1,r11,0
+	  12: GETL       	R11, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x25471B68:  4086FFD4  bc 4,6,0x25471B3C
+	  16: Jc06o       	$0x25471B3C
+
+
+. 0 25471B5C 16
+. 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+
+==== BB 9 (0x25471B3C) approx BBs exec'd 0 ====
+
+	0x25471B3C:  2C0B0021  cmpi cr0,r11,33
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x21, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25471B40:  5560103A  rlwinm r0,r11,2,0,29
+	   5: GETL       	R11, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x25471B44:  40810018  bc 4,1,0x25471B5C
+	   9: Jc01o       	$0x25471B5C
+
+
+. 0 25471B3C 12
+. 2C 0B 00 21 55 60 10 3A 40 81 00 18
+
+==== BB 10 (0x25471B48) approx BBs exec'd 0 ====
+
+	0x25471B48:  7C0B3050  subf r0,r11,r6
+	   0: GETL       	R11, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471B4C:  2880000F  cmpli cr1,r0,15
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0xF, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25471B50:  418502D8  bc 12,5,0x25471E28
+	  10: Js05o       	$0x25471E28
+
+
+. 0 25471B48 12
+. 7C 0B 30 50 28 80 00 0F 41 85 02 D8
+
+==== BB 11 (0x25471B54) approx BBs exec'd 0 ====
+
+	0x25471B54:  7C0B2850  subf r0,r11,r5
+	   0: GETL       	R11, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471B58:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25471B5C:  7D48012E  stwx r10,r8,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x25471B60:  856A0008  lwzu r11,8(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x8, t12
+	  17: PUTL       	t12, R10
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x25471B64:  2C8B0000  cmpi cr1,r11,0
+	  21: GETL       	R11, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0x25471B68:  4086FFD4  bc 4,6,0x25471B3C
+	  25: Jc06o       	$0x25471B3C
+
+
+. 0 25471B54 24
+. 7C 0B 28 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2C 8B 00 00 40 86 FF D4
+
+==== BB 12 (0x25471E28) approx BBs exec'd 0 ====
+
+	0x25471E28:  5579083C  rlwinm r25,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x25471E2C:  7F2C0E70  srawi r12,r25,1
+	   4: GETL       	R25, t2
+	   5: SARL       	$0x1, t2  (-wCa)
+	   6: PUTL       	t2, R12
+	   7: INCEIPL       	$4
+
+	0x25471E30:  7D8960F8  nor r9,r12,r12
+	   8: GETL       	R12, t4
+	   9: NOTL       	t4
+	  10: PUTL       	t4, R9
+	  11: INCEIPL       	$4
+
+	0x25471E34:  200C0031  subfic r0,r12,49
+	  12: GETL       	R12, t6
+	  13: MOVL       	$0x31, t8
+	  14: SBBL       	t6, t8  (-wCa)
+	  15: PUTL       	t8, R0
+	  16: INCEIPL       	$4
+
+	0x25471E38:  2B090002  cmpli cr6,r9,2
+	  17: GETL       	R9, t10
+	  18: MOVL       	$0x2, t14
+	  19: CMPUL       	t10, t14, t12  (-rSo)
+	  20: ICRFL       	t12, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x25471E3C:  40B9FD1C  bc 5,25,0x25471B58
+	  22: Jc25o       	$0x25471B58
+
+
+. 0 25471E28 24
+. 55 79 08 3C 7F 2C 0E 70 7D 89 60 F8 20 0C 00 31 2B 09 00 02 40 B9 FD 1C
+
+==== BB 13 (0x25471E40) approx BBs exec'd 0 ====
+
+	0x25471E40:  7F4B2050  subf r26,r11,r4
+	   0: GETL       	R11, t0
+	   1: GETL       	R4, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25471E44:  2B9A000B  cmpli cr7,r26,11
+	   5: GETL       	R26, t4
+	   6: MOVL       	$0xB, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x25471E48:  419D0010  bc 12,29,0x25471E58
+	  10: Js29o       	$0x25471E58
+
+
+. 0 25471E40 12
+. 7F 4B 20 50 2B 9A 00 0B 41 9D 00 10
+
+==== BB 14 (0x25471E4C) approx BBs exec'd 0 ====
+
+	0x25471E4C:  7C0B1850  subf r0,r11,r3
+	   0: GETL       	R11, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471E50:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25471E54:  4BFFFD08  b 0x25471B5C
+	   9: JMPo       	$0x25471B5C  ($4)
+
+
+. 0 25471E4C 12
+. 7C 0B 18 50 54 00 10 3A 4B FF FD 08
+
+==== BB 15 (0x25471B6C) approx BBs exec'd 0 ====
+
+	0x25471B6C:  81780000  lwz r11,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25471B70:  2F0B0000  cmpi cr6,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x25471B74:  419A00B0  bc 12,26,0x25471C24
+	   8: Js26o       	$0x25471C24
+
+
+. 0 25471B6C 12
+. 81 78 00 00 2F 0B 00 00 41 9A 00 B0
+
+==== BB 16 (0x25471B78) approx BBs exec'd 0 ====
+
+	0x25471B78:  81280010  lwz r9,16(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471B7C:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25471B80:  419E0010  bc 12,30,0x25471B90
+	   9: Js30o       	$0x25471B90
+
+
+. 0 25471B78 12
+. 81 28 00 10 2F 89 00 00 41 9E 00 10
+
+==== BB 17 (0x25471B84) approx BBs exec'd 0 ====
+
+	0x25471B84:  81490004  lwz r10,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25471B88:  7C6A5A14  add r3,r10,r11
+	   5: GETL       	R10, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25471B8C:  90690004  stw r3,4(r9)
+	  10: GETL       	R3, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471B90:  8128000C  lwz r9,12(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471B94:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25471B98:  41820010  bc 12,2,0x25471BA8
+	  24: Js02o       	$0x25471BA8
+
+
+. 0 25471B84 24
+. 81 49 00 04 7C 6A 5A 14 90 69 00 04 81 28 00 0C 2C 09 00 00 41 82 00 10
+
+==== BB 18 (0x25471B9C) approx BBs exec'd 0 ====
+
+	0x25471B9C:  80A90004  lwz r5,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25471BA0:  7C855A14  add r4,r5,r11
+	   5: GETL       	R5, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25471BA4:  90890004  stw r4,4(r9)
+	  10: GETL       	R4, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BA8:  81280014  lwz r9,20(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x14, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BAC:  2C890000  cmpi cr1,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x1, CR
+	  23: INCEIPL       	$4
+
+	0x25471BB0:  41860010  bc 12,6,0x25471BC0
+	  24: Js06o       	$0x25471BC0
+
+
+. 0 25471B9C 24
+. 80 A9 00 04 7C 85 5A 14 90 89 00 04 81 28 00 14 2C 89 00 00 41 86 00 10
+
+==== BB 19 (0x25471BB4) approx BBs exec'd 0 ====
+
+	0x25471BB4:  80E90004  lwz r7,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25471BB8:  7CC75A14  add r6,r7,r11
+	   5: GETL       	R7, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25471BBC:  90C90004  stw r6,4(r9)
+	  10: GETL       	R6, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BC0:  81280018  lwz r9,24(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x18, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BC4:  2F090000  cmpi cr6,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25471BC8:  419A0010  bc 12,26,0x25471BD8
+	  24: Js26o       	$0x25471BD8
+
+
+. 0 25471BB4 24
+. 80 E9 00 04 7C C7 5A 14 90 C9 00 04 81 28 00 18 2F 09 00 00 41 9A 00 10
+
+==== BB 20 (0x25471BCC) approx BBs exec'd 0 ====
+
+	0x25471BCC:  83A90004  lwz r29,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25471BD0:  7F9D5A14  add r28,r29,r11
+	   5: GETL       	R29, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25471BD4:  93890004  stw r28,4(r9)
+	  10: GETL       	R28, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BD8:  8128001C  lwz r9,28(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BDC:  2F890000  cmpi cr7,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0x25471BE0:  419E0010  bc 12,30,0x25471BF0
+	  24: Js30o       	$0x25471BF0
+
+
+. 0 25471BCC 24
+. 83 A9 00 04 7F 9D 5A 14 93 89 00 04 81 28 00 1C 2F 89 00 00 41 9E 00 10
+
+==== BB 21 (0x25471BE4) approx BBs exec'd 0 ====
+
+	0x25471BE4:  80090004  lwz r0,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471BE8:  7FE05A14  add r31,r0,r11
+	   5: GETL       	R0, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0x25471BEC:  93E90004  stw r31,4(r9)
+	  10: GETL       	R31, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471BF0:  8128005C  lwz r9,92(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x5C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471BF4:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25471BF8:  41820010  bc 12,2,0x25471C08
+	  24: Js02o       	$0x25471C08
+
+
+. 0 25471BE4 24
+. 80 09 00 04 7F E0 5A 14 93 E9 00 04 81 28 00 5C 2C 09 00 00 41 82 00 10
+
+==== BB 22 (0x25471BFC) approx BBs exec'd 0 ====
+
+	0x25471BFC:  83290004  lwz r25,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25471C00:  7D995A14  add r12,r25,r11
+	   5: GETL       	R25, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25471C04:  91890004  stw r12,4(r9)
+	  10: GETL       	R12, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471C08:  812800C4  lwz r9,196(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25471C0C:  2C890000  cmpi cr1,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x1, CR
+	  23: INCEIPL       	$4
+
+	0x25471C10:  41860010  bc 12,6,0x25471C20
+	  24: Js06o       	$0x25471C20
+
+
+. 0 25471BFC 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 C4 2C 89 00 00 41 86 00 10
+
+==== BB 23 (0x25471C14) approx BBs exec'd 0 ====
+
+	0x25471C14:  83490004  lwz r26,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25471C18:  7D1A5A14  add r8,r26,r11
+	   5: GETL       	R26, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25471C1C:  91090004  stw r8,4(r9)
+	  10: GETL       	R8, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471C20:  80E10010  lwz r7,16(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x10, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x25471C24:  2F070000  cmpi cr6,r7,0
+	  20: GETL       	R7, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25471C28:  3960FFFF  li r11,-1
+	  24: MOVL       	$0xFFFFFFFF, t20
+	  25: PUTL       	t20, R11
+	  26: INCEIPL       	$4
+
+	0x25471C2C:  9161023C  stw r11,572(r1)
+	  27: GETL       	R11, t22
+	  28: GETL       	R1, t24
+	  29: ADDL       	$0x23C, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0x25471C30:  409A0010  bc 4,26,0x25471C40
+	  32: Jc26o       	$0x25471C40
+
+
+. 0 25471C14 32
+. 83 49 00 04 7D 1A 5A 14 91 09 00 04 80 E1 00 10 2F 07 00 00 39 60 FF FF 91 61 02 3C 40 9A 00 10
+
+==== BB 24 (0x25471C40) approx BBs exec'd 0 ====
+
+	0x25471C40:  7F03C378  or r3,r24,r24
+	   0: GETL       	R24, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25471C44:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25471C48:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25471C4C:  3B600000  li r27,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R27
+	  11: INCEIPL       	$4
+
+	0x25471C50:  4800EBE1  bl 0x25480830
+	  12: MOVL       	$0x25471C54, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25480830  ($4)
+
+
+. 0 25471C40 20
+. 7F 03 C3 78 38 80 00 00 38 A0 00 00 3B 60 00 00 48 00 EB E1
+
+==== BB 25 __elf_machine_runtime_setup(0x25480830) approx BBs exec'd 0 ====
+
+	0x25480830:  7CC802A6  mflr r6
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x25480834:  9421FFD0  stwu r1,-48(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFD0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x25480838:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x2548083C:  480167C5  bl 0x25497000
+	  12: MOVL       	$0x25480840, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25480830 16
+. 7C C8 02 A6 94 21 FF D0 7D 80 00 26 48 01 67 C5
+
+==== BB 26 (0x25480840) approx BBs exec'd 0 ====
+
+	0x25480840:  93210014  stw r25,20(r1)
+	   0: GETL       	R25, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25480844:  7C992378  or r25,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0x25480848:  90C10034  stw r6,52(r1)
+	   8: GETL       	R6, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x34, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2548084C:  8003007C  lwz r0,124(r3)
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x7C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0x25480850:  93A10024  stw r29,36(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x24, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25480854:  7C7D1B78  or r29,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25480858:  2F800000  cmpi cr7,r0,0
+	  26: GETL       	R0, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0x2548085C:  93C10028  stw r30,40(r1)
+	  30: GETL       	R30, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x28, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25480860:  92E1000C  stw r23,12(r1)
+	  35: GETL       	R23, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0xC, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25480864:  7FC802A6  mflr r30
+	  40: GETL       	LR, t32
+	  41: PUTL       	t32, R30
+	  42: INCEIPL       	$4
+
+	0x25480868:  93010010  stw r24,16(r1)
+	  43: GETL       	R24, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x10, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x2548086C:  93410018  stw r26,24(r1)
+	  48: GETL       	R26, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x18, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0x25480870:  9361001C  stw r27,28(r1)
+	  53: GETL       	R27, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x1C, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x25480874:  93810020  stw r28,32(r1)
+	  58: GETL       	R28, t46
+	  59: GETL       	R1, t48
+	  60: ADDL       	$0x20, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x25480878:  93E1002C  stw r31,44(r1)
+	  63: GETL       	R31, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x2C, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x2548087C:  91810008  stw r12,8(r1)
+	  68: GETL       	R12, t54
+	  69: GETL       	R1, t56
+	  70: ADDL       	$0x8, t56
+	  71: STL       	t54, (t56)
+	  72: INCEIPL       	$4
+
+	0x25480880:  419E0294  bc 12,30,0x25480B14
+	  73: Js30o       	$0x25480B14
+
+
+. 0 25480840 68
+. 93 21 00 14 7C 99 23 78 90 C1 00 34 80 03 00 7C 93 A1 00 24 7C 7D 1B 78 2F 80 00 00 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 93 41 00 18 93 61 00 1C 93 81 00 20 93 E1 00 2C 91 81 00 08 41 9E 02 94
+
+==== BB 27 (0x25480884) approx BBs exec'd 0 ====
+
+	0x25480884:  82E30028  lwz r23,40(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x28, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25480888:  3D40AAAA  lis r10,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x2548088C:  6148AAAB  ori r8,r10,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0x25480890:  8083002C  lwz r4,44(r3)
+	  11: GETL       	R3, t8
+	  12: ADDL       	$0x2C, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0x25480894:  80F70004  lwz r7,4(r23)
+	  16: GETL       	R23, t12
+	  17: ADDL       	$0x4, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R7
+	  20: INCEIPL       	$4
+
+	0x25480898:  83E40004  lwz r31,4(r4)
+	  21: GETL       	R4, t16
+	  22: ADDL       	$0x4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x2548089C:  7D674016  mulhwu r11,r7,r8
+	  26: GETL       	R7, t20
+	  27: GETL       	R8, t22
+	  28: UMULHL       	t20, t22
+	  29: PUTL       	t22, R11
+	  30: INCEIPL       	$4
+
+	0x254808A0:  557BE8FE  rlwinm r27,r11,29,3,31
+	  31: GETL       	R11, t24
+	  32: SHRL       	$0x3, t24
+	  33: PUTL       	t24, R27
+	  34: INCEIPL       	$4
+
+	0x254808A4:  281B2000  cmpli cr0,r27,8192
+	  35: GETL       	R27, t26
+	  36: MOVL       	$0x2000, t30
+	  37: CMPUL       	t26, t30, t28  (-rSo)
+	  38: ICRFL       	t28, $0x0, CR
+	  39: INCEIPL       	$4
+
+	0x254808A8:  5763083C  rlwinm r3,r27,1,0,30
+	  40: GETL       	R27, t32
+	  41: SHLL       	$0x1, t32
+	  42: PUTL       	t32, R3
+	  43: INCEIPL       	$4
+
+	0x254808AC:  38030012  addi r0,r3,18
+	  44: GETL       	R3, t34
+	  45: ADDL       	$0x12, t34
+	  46: PUTL       	t34, R0
+	  47: INCEIPL       	$4
+
+	0x254808B0:  7D201A14  add r9,r0,r3
+	  48: GETL       	R0, t36
+	  49: GETL       	R3, t38
+	  50: ADDL       	t36, t38
+	  51: PUTL       	t38, R9
+	  52: INCEIPL       	$4
+
+	0x254808B4:  3AE9C000  addi r23,r9,-16384
+	  53: GETL       	R9, t40
+	  54: ADDL       	$0xFFFFC000, t40
+	  55: PUTL       	t40, R23
+	  56: INCEIPL       	$4
+
+	0x254808B8:  4081029C  bc 4,1,0x25480B54
+	  57: Jc01o       	$0x25480B54
+
+
+. 0 25480884 56
+. 82 E3 00 28 3D 40 AA AA 61 48 AA AB 80 83 00 2C 80 F7 00 04 83 E4 00 04 7D 67 40 16 55 7B E8 FE 28 1B 20 00 57 63 08 3C 38 03 00 12 7D 20 1A 14 3A E9 C0 00 40 81 02 9C
+
+==== BB 28 (0x25480B54) approx BBs exec'd 0 ====
+
+	0x25480B54:  7C170378  or r23,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R23
+	   2: INCEIPL       	$4
+
+	0x25480B58:  4BFFFD64  b 0x254808BC
+	   3: JMPo       	$0x254808BC  ($4)
+
+
+. 0 25480B54 8
+. 7C 17 03 78 4B FF FD 64
+
+==== BB 29 (0x254808BC) approx BBs exec'd 0 ====
+
+	0x254808BC:  56E8103A  rlwinm r8,r23,2,0,29
+	   0: GETL       	R23, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x254808C0:  2E190000  cmpi cr4,r25,0
+	   4: GETL       	R25, t2
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x254808C4:  7F48FA14  add r26,r8,r31
+	   8: GETL       	R8, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x254808C8:  3C607D69  lis r3,32105
+	  13: MOVL       	$0x7D690000, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x254808CC:  3CFA0001  addis r7,r26,1
+	  16: GETL       	R26, t12
+	  17: ADDL       	$0x10000, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x254808D0:  574B043E  rlwinm r11,r26,0,16,31
+	  20: GETL       	R26, t14
+	  21: ANDL       	$0xFFFF, t14
+	  22: PUTL       	t14, R11
+	  23: INCEIPL       	$4
+
+	0x254808D4:  38878000  addi r4,r7,-32768
+	  24: GETL       	R7, t16
+	  25: ADDL       	$0xFFFF8000, t16
+	  26: PUTL       	t16, R4
+	  27: INCEIPL       	$4
+
+	0x254808D8:  3CC04E80  lis r6,20096
+	  28: MOVL       	$0x4E800000, t18
+	  29: PUTL       	t18, R6
+	  30: INCEIPL       	$4
+
+	0x254808DC:  5489843E  rlwinm r9,r4,16,16,31
+	  31: GETL       	R4, t20
+	  32: SHRL       	$0x10, t20
+	  33: PUTL       	t20, R9
+	  34: INCEIPL       	$4
+
+	0x254808E0:  6560816B  oris r0,r11,0x816B
+	  35: GETL       	R11, t22
+	  36: ORL       	$0x816B0000, t22
+	  37: PUTL       	t22, R0
+	  38: INCEIPL       	$4
+
+	0x254808E4:  652C3D6B  oris r12,r9,0x3D6B
+	  39: GETL       	R9, t24
+	  40: ORL       	$0x3D6B0000, t24
+	  41: PUTL       	t24, R12
+	  42: INCEIPL       	$4
+
+	0x254808E8:  607C03A6  ori r28,r3,0x3A6
+	  43: MOVL       	$0x7D6903A6, t26
+	  44: PUTL       	t26, R28
+	  45: INCEIPL       	$4
+
+	0x254808EC:  60D80420  ori r24,r6,0x420
+	  46: MOVL       	$0x4E800420, t28
+	  47: PUTL       	t28, R24
+	  48: INCEIPL       	$4
+
+	0x254808F0:  919F0000  stw r12,0(r31)
+	  49: GETL       	R12, t30
+	  50: GETL       	R31, t32
+	  51: STL       	t30, (t32)
+	  52: INCEIPL       	$4
+
+	0x254808F4:  901F0004  stw r0,4(r31)
+	  53: GETL       	R0, t34
+	  54: GETL       	R31, t36
+	  55: ADDL       	$0x4, t36
+	  56: STL       	t34, (t36)
+	  57: INCEIPL       	$4
+
+	0x254808F8:  939F0008  stw r28,8(r31)
+	  58: GETL       	R28, t38
+	  59: GETL       	R31, t40
+	  60: ADDL       	$0x8, t40
+	  61: STL       	t38, (t40)
+	  62: INCEIPL       	$4
+
+	0x254808FC:  931F000C  stw r24,12(r31)
+	  63: GETL       	R24, t42
+	  64: GETL       	R31, t44
+	  65: ADDL       	$0xC, t44
+	  66: STL       	t42, (t44)
+	  67: INCEIPL       	$4
+
+	0x25480900:  41920178  bc 12,18,0x25480A78
+	  68: Js18o       	$0x25480A78
+
+
+. 0 254808BC 72
+. 56 E8 10 3A 2E 19 00 00 7F 48 FA 14 3C 60 7D 69 3C FA 00 01 57 4B 04 3E 38 87 80 00 3C C0 4E 80 54 89 84 3E 65 60 81 6B 65 2C 3D 6B 60 7C 03 A6 60 D8 04 20 91 9F 00 00 90 1F 00 04 93 9F 00 08 93 1F 00 0C 41 92 01 78
+
+==== BB 30 (0x25480A78) approx BBs exec'd 0 ====
+
+	0x25480A78:  817E04F0  lwz r11,1264(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25480A7C:  3159FFFF  addic r10,r25,-1
+	   5: GETL       	R25, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x25480A80:  7CAAC910  subfe r5,r10,r25
+	   9: GETL       	R10, t6
+	  10: GETL       	R25, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x25480A84:  314BFFFF  addic r10,r11,-1
+	  14: GETL       	R11, t10
+	  15: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  16: PUTL       	t10, R10
+	  17: INCEIPL       	$4
+
+	0x25480A88:  7C8A5910  subfe r4,r10,r11
+	  18: GETL       	R10, t12
+	  19: GETL       	R11, t14
+	  20: SBBL       	t12, t14  (-rCa-wCa)
+	  21: PUTL       	t14, R4
+	  22: INCEIPL       	$4
+
+	0x25480A8C:  7CAA2039  and. r10,r5,r4
+	  23: GETL       	R5, t16
+	  24: GETL       	R4, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R10
+	  27: CMP0L       	t18, t20  (-rSo)
+	  28: ICRFL       	t20, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0x25480A90:  39400004  li r10,4
+	  30: MOVL       	$0x4, t22
+	  31: PUTL       	t22, R10
+	  32: INCEIPL       	$4
+
+	0x25480A94:  41820010  bc 12,2,0x25480AA4
+	  33: Js02o       	$0x25480AA4
+
+
+. 0 25480A78 32
+. 81 7E 04 F0 31 59 FF FF 7C AA C9 10 31 4B FF FF 7C 8A 59 10 7C AA 20 39 39 40 00 04 41 82 00 10
+
+==== BB 31 (0x25480AA4) approx BBs exec'd 0 ====
+
+	0x25480AA4:  419200B8  bc 12,18,0x25480B5C
+	   0: Js18o       	$0x25480B5C
+
+
+. 0 25480AA4 4
+. 41 92 00 B8
+
+==== BB 32 (0x25480B5C) approx BBs exec'd 0 ====
+
+	0x25480B5C:  39600006  li r11,6
+	   0: MOVL       	$0x6, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x25480B60:  4BFFFF4C  b 0x25480AAC
+	   3: JMPo       	$0x25480AAC  ($4)
+
+
+. 0 25480B5C 8
+. 39 60 00 06 4B FF FF 4C
+
+==== BB 33 (0x25480AAC) approx BBs exec'd 0 ====
+
+	0x25480AAC:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25480AB0:  48000014  b 0x25480AC4
+	   3: JMPo       	$0x25480AC4  ($4)
+
+
+. 0 25480AAC 8
+. 39 20 00 00 48 00 00 14
+
+==== BB 34 (0x25480AC4) approx BBs exec'd 0 ====
+
+	0x25480AC4:  7E095840  cmpl cr4,r9,r11
+	   0: GETL       	R9, t0
+	   1: GETL       	R11, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x25480AC8:  4190FFEC  bc 12,16,0x25480AB4
+	   5: Js16o       	$0x25480AB4
+
+
+. 0 25480AC4 8
+. 7E 09 58 40 41 90 FF EC
+
+==== BB 35 (0x25480AB4) approx BBs exec'd 0 ====
+
+	0x25480AB4:  553A103A  rlwinm r26,r9,2,0,29
+	   0: GETL       	R9, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25480AB8:  7EFAFA14  add r23,r26,r31
+	   4: GETL       	R26, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R23
+	   8: INCEIPL       	$4
+
+	0x25480ABC:  7C00B86C  dcbst r0,r23
+	   9: INCEIPL       	$4
+
+	0x25480AC0:  7D295214  add r9,r9,r10
+	  10: GETL       	R9, t6
+	  11: GETL       	R10, t8
+	  12: ADDL       	t6, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x25480AC4:  7E095840  cmpl cr4,r9,r11
+	  15: GETL       	R9, t10
+	  16: GETL       	R11, t12
+	  17: CMPUL       	t10, t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x4, CR
+	  19: INCEIPL       	$4
+
+	0x25480AC8:  4190FFEC  bc 12,16,0x25480AB4
+	  20: Js16o       	$0x25480AB4
+
+
+. 0 25480AB4 24
+. 55 3A 10 3A 7E FA FA 14 7C 00 B8 6C 7D 29 52 14 7E 09 58 40 41 90 FF EC
+
+==== BB 36 (0x25480ACC) approx BBs exec'd 0 ====
+
+	0x25480ACC:  5568103A  rlwinm r8,r11,2,0,29
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x25480AD0:  7CE8FA14  add r7,r8,r31
+	   4: GETL       	R8, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x25480AD4:  3927FFFC  addi r9,r7,-4
+	   9: GETL       	R7, t6
+	  10: ADDL       	$0xFFFFFFFC, t6
+	  11: PUTL       	t6, R9
+	  12: INCEIPL       	$4
+
+	0x25480AD8:  7C00486C  dcbst r0,r9
+	  13: INCEIPL       	$4
+
+	0x25480ADC:  7C0004AC  sync
+	  14: INCEIPL       	$4
+
+	0x25480AE0:  39200000  li r9,0
+	  15: MOVL       	$0x0, t8
+	  16: PUTL       	t8, R9
+	  17: INCEIPL       	$4
+
+	0x25480AE4:  48000014  b 0x25480AF8
+	  18: JMPo       	$0x25480AF8  ($4)
+
+
+. 0 25480ACC 28
+. 55 68 10 3A 7C E8 FA 14 39 27 FF FC 7C 00 48 6C 7C 00 04 AC 39 20 00 00 48 00 00 14
+
+==== BB 37 (0x25480AF8) approx BBs exec'd 0 ====
+
+	0x25480AF8:  7F895840  cmpl cr7,r9,r11
+	   0: GETL       	R9, t0
+	   1: GETL       	R11, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25480AFC:  419CFFEC  bc 12,28,0x25480AE8
+	   5: Js28o       	$0x25480AE8
+
+
+. 0 25480AF8 8
+. 7F 89 58 40 41 9C FF EC
+
+==== BB 38 (0x25480AE8) approx BBs exec'd 0 ====
+
+	0x25480AE8:  553B103A  rlwinm r27,r9,2,0,29
+	   0: GETL       	R9, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25480AEC:  7CDBFA14  add r6,r27,r31
+	   4: GETL       	R27, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25480AF0:  7C0037AC  icbi r0,r6
+	   9: GETL       	R6, t6
+	  10: CALLM_So       	
+	  11: PUSHL       	t6
+	  12: CALLMo       	$0x68
+	  13: CALLM_Eo       	
+	  14: INCEIPL       	$4
+
+	0x25480AF4:  7D295214  add r9,r9,r10
+	  15: GETL       	R9, t8
+	  16: GETL       	R10, t10
+	  17: ADDL       	t8, t10
+	  18: PUTL       	t10, R9
+	  19: INCEIPL       	$4
+
+	0x25480AF8:  7F895840  cmpl cr7,r9,r11
+	  20: GETL       	R9, t12
+	  21: GETL       	R11, t14
+	  22: CMPUL       	t12, t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25480AFC:  419CFFEC  bc 12,28,0x25480AE8
+	  25: Js28o       	$0x25480AE8
+
+
+. 0 25480AE8 24
+. 55 3B 10 3A 7C DB FA 14 7C 00 37 AC 7D 29 52 14 7F 89 58 40 41 9C FF EC
+
+==== BB 39 (0x25480B00) approx BBs exec'd 0 ====
+
+	0x25480B00:  7C68FA14  add r3,r8,r31
+	   0: GETL       	R8, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25480B04:  3BE3FFFC  addi r31,r3,-4
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0xFFFFFFFC, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0x25480B08:  7C00FFAC  icbi r0,r31
+	   9: GETL       	R31, t6
+	  10: CALLM_So       	
+	  11: PUSHL       	t6
+	  12: CALLMo       	$0x68
+	  13: CALLM_Eo       	
+	  14: INCEIPL       	$4
+
+	0x25480B0C:  7C0004AC  sync
+	  15: INCEIPL       	$4
+
+	0x25480B10:  4C00012C  	  16: INCEIPL       	$4
+
+	0x25480B14:  7F23CB78  or r3,r25,r25
+	  17: GETL       	R25, t8
+	  18: PUTL       	t8, R3
+	  19: INCEIPL       	$4
+
+	0x25480B18:  81010008  lwz r8,8(r1)
+	  20: GETL       	R1, t10
+	  21: ADDL       	$0x8, t10
+	  22: LDL       	(t10), t12
+	  23: PUTL       	t12, R8
+	  24: INCEIPL       	$4
+
+	0x25480B1C:  83210034  lwz r25,52(r1)
+	  25: GETL       	R1, t14
+	  26: ADDL       	$0x34, t14
+	  27: LDL       	(t14), t16
+	  28: PUTL       	t16, R25
+	  29: INCEIPL       	$4
+
+	0x25480B20:  82E1000C  lwz r23,12(r1)
+	  30: GETL       	R1, t18
+	  31: ADDL       	$0xC, t18
+	  32: LDL       	(t18), t20
+	  33: PUTL       	t20, R23
+	  34: INCEIPL       	$4
+
+	0x25480B24:  7D008120  mtcrf 0x8,r8
+	  35: GETL       	R8, t22
+	  36: ICRFL       	t22, $0x4, CR
+	  37: INCEIPL       	$4
+
+	0x25480B28:  7F2803A6  mtlr r25
+	  38: GETL       	R25, t24
+	  39: PUTL       	t24, LR
+	  40: INCEIPL       	$4
+
+	0x25480B2C:  83010010  lwz r24,16(r1)
+	  41: GETL       	R1, t26
+	  42: ADDL       	$0x10, t26
+	  43: LDL       	(t26), t28
+	  44: PUTL       	t28, R24
+	  45: INCEIPL       	$4
+
+	0x25480B30:  83210014  lwz r25,20(r1)
+	  46: GETL       	R1, t30
+	  47: ADDL       	$0x14, t30
+	  48: LDL       	(t30), t32
+	  49: PUTL       	t32, R25
+	  50: INCEIPL       	$4
+
+	0x25480B34:  83410018  lwz r26,24(r1)
+	  51: GETL       	R1, t34
+	  52: ADDL       	$0x18, t34
+	  53: LDL       	(t34), t36
+	  54: PUTL       	t36, R26
+	  55: INCEIPL       	$4
+
+	0x25480B38:  8361001C  lwz r27,28(r1)
+	  56: GETL       	R1, t38
+	  57: ADDL       	$0x1C, t38
+	  58: LDL       	(t38), t40
+	  59: PUTL       	t40, R27
+	  60: INCEIPL       	$4
+
+	0x25480B3C:  83810020  lwz r28,32(r1)
+	  61: GETL       	R1, t42
+	  62: ADDL       	$0x20, t42
+	  63: LDL       	(t42), t44
+	  64: PUTL       	t44, R28
+	  65: INCEIPL       	$4
+
+	0x25480B40:  83A10024  lwz r29,36(r1)
+	  66: GETL       	R1, t46
+	  67: ADDL       	$0x24, t46
+	  68: LDL       	(t46), t48
+	  69: PUTL       	t48, R29
+	  70: INCEIPL       	$4
+
+	0x25480B44:  83C10028  lwz r30,40(r1)
+	  71: GETL       	R1, t50
+	  72: ADDL       	$0x28, t50
+	  73: LDL       	(t50), t52
+	  74: PUTL       	t52, R30
+	  75: INCEIPL       	$4
+
+	0x25480B48:  83E1002C  lwz r31,44(r1)
+	  76: GETL       	R1, t54
+	  77: ADDL       	$0x2C, t54
+	  78: LDL       	(t54), t56
+	  79: PUTL       	t56, R31
+	  80: INCEIPL       	$4
+
+	0x25480B4C:  38210030  addi r1,r1,48
+	  81: GETL       	R1, t58
+	  82: ADDL       	$0x30, t58
+	  83: PUTL       	t58, R1
+	  84: INCEIPL       	$4
+
+	0x25480B50:  4E800020  blr
+	  85: GETL       	LR, t60
+	  86: JMPo-r       	t60  ($4)
+
+
+. 0 25480B00 84
+. 7C 68 FA 14 3B E3 FF FC 7C 00 FF AC 7C 00 04 AC 4C 00 01 2C 7F 23 CB 78 81 01 00 08 83 21 00 34 82 E1 00 0C 7D 00 81 20 7F 28 03 A6 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+==== BB 40 (0x25471C54) approx BBs exec'd 0 ====
+
+	0x25471C54:  38E00000  li r7,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x25471C58:  8161004C  lwz r11,76(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x4C, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25471C5C:  39400001  li r10,1
+	   8: MOVL       	$0x1, t6
+	   9: PUTL       	t6, R10
+	  10: INCEIPL       	$4
+
+	0x25471C60:  91410264  stw r10,612(r1)
+	  11: GETL       	R10, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x264, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25471C64:  2C0B0000  cmpi cr0,r11,0
+	  16: GETL       	R11, t12
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x25471C68:  93610260  stw r27,608(r1)
+	  20: GETL       	R27, t16
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x260, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25471C6C:  93610270  stw r27,624(r1)
+	  25: GETL       	R27, t20
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x270, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25471C70:  93610258  stw r27,600(r1)
+	  30: GETL       	R27, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x258, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25471C74:  9361026C  stw r27,620(r1)
+	  35: GETL       	R27, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x26C, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25471C78:  90E10254  stw r7,596(r1)
+	  40: GETL       	R7, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x254, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0x25471C7C:  41820018  bc 12,2,0x25471C94
+	  45: Js02o       	$0x25471C94
+
+
+. 0 25471C54 44
+. 38 E0 00 00 81 61 00 4C 39 40 00 01 91 41 02 64 2C 0B 00 00 93 61 02 60 93 61 02 70 93 61 02 58 93 61 02 6C 90 E1 02 54 41 82 00 18
+
+==== BB 41 (0x25471C80) approx BBs exec'd 0 ====
+
+	0x25471C80:  80AB0004  lwz r5,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25471C84:  80810050  lwz r4,80(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x50, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25471C88:  90A10250  stw r5,592(r1)
+	  10: GETL       	R5, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x250, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25471C8C:  80E40004  lwz r7,4(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x25471C90:  90E10254  stw r7,596(r1)
+	  20: GETL       	R7, t16
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x254, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25471C94:  2C830000  cmpi cr1,r3,0
+	  25: GETL       	R3, t20
+	  26: CMP0L       	t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x1, CR
+	  28: INCEIPL       	$4
+
+	0x25471C98:  41860010  bc 12,6,0x25471CA8
+	  29: Js06o       	$0x25471CA8
+
+
+. 0 25471C80 28
+. 80 AB 00 04 80 81 00 50 90 A1 02 50 80 E4 00 04 90 E1 02 54 2C 83 00 00 41 86 00 10
+
+==== BB 42 (0x25471CA8) approx BBs exec'd 0 ====
+
+	0x25471CA8:  3B610008  addi r27,r1,8
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25471CAC:  3B200002  li r25,2
+	   4: MOVL       	$0x2, t2
+	   5: PUTL       	t2, R25
+	   6: INCEIPL       	$4
+
+	0x25471CB0:  814100D0  lwz r10,208(r1)
+	   7: GETL       	R1, t4
+	   8: ADDL       	$0xD0, t4
+	   9: LDL       	(t4), t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0x25471CB4:  81210048  lwz r9,72(r1)
+	  12: GETL       	R1, t8
+	  13: ADDL       	$0x48, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R9
+	  16: INCEIPL       	$4
+
+	0x25471CB8:  2F8A0000  cmpi cr7,r10,0
+	  17: GETL       	R10, t12
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x25471CBC:  811B0248  lwz r8,584(r27)
+	  21: GETL       	R27, t16
+	  22: ADDL       	$0x248, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R8
+	  25: INCEIPL       	$4
+
+	0x25471CC0:  817B024C  lwz r11,588(r27)
+	  26: GETL       	R27, t20
+	  27: ADDL       	$0x24C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R11
+	  30: INCEIPL       	$4
+
+	0x25471CC4:  83490004  lwz r26,4(r9)
+	  31: GETL       	R9, t24
+	  32: ADDL       	$0x4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0x25471CC8:  39200000  li r9,0
+	  36: MOVL       	$0x0, t28
+	  37: PUTL       	t28, R9
+	  38: INCEIPL       	$4
+
+	0x25471CCC:  7F885A14  add r28,r8,r11
+	  39: GETL       	R8, t30
+	  40: GETL       	R11, t32
+	  41: ADDL       	t30, t32
+	  42: PUTL       	t32, R28
+	  43: INCEIPL       	$4
+
+	0x25471CD0:  83A10010  lwz r29,16(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x10, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R29
+	  48: INCEIPL       	$4
+
+	0x25471CD4:  419E0008  bc 12,30,0x25471CDC
+	  49: Js30o       	$0x25471CDC
+
+
+. 0 25471CA8 48
+. 3B 61 00 08 3B 20 00 02 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+
+==== BB 43 (0x25471CD8) approx BBs exec'd 0 ====
+
+	0x25471CD8:  812A0004  lwz r9,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471CDC:  3CA0AAAA  lis r5,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25471CE0:  60A4AAAB  ori r4,r5,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x25471CE4:  7D4B2016  mulhwu r10,r11,r4
+	  11: GETL       	R11, t8
+	  12: GETL       	R4, t10
+	  13: UMULHL       	t8, t10
+	  14: PUTL       	t10, R10
+	  15: INCEIPL       	$4
+
+	0x25471CE8:  7D0B4378  or r11,r8,r8
+	  16: GETL       	R8, t12
+	  17: PUTL       	t12, R11
+	  18: INCEIPL       	$4
+
+	0x25471CEC:  5540E8FE  rlwinm r0,r10,29,3,31
+	  19: GETL       	R10, t14
+	  20: SHRL       	$0x3, t14
+	  21: PUTL       	t14, R0
+	  22: INCEIPL       	$4
+
+	0x25471CF0:  7C004840  cmpl cr0,r0,r9
+	  23: GETL       	R0, t16
+	  24: GETL       	R9, t18
+	  25: CMPUL       	t16, t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x0, CR
+	  27: INCEIPL       	$4
+
+	0x25471CF4:  40810008  bc 4,1,0x25471CFC
+	  28: Jc01o       	$0x25471CFC
+
+
+. 0 25471CD8 32
+. 81 2A 00 04 3C A0 AA AA 60 A4 AA AB 7D 4B 20 16 7D 0B 43 78 55 40 E8 FE 7C 00 48 40 40 81 00 08
+
+==== BB 44 (0x25471CF8) approx BBs exec'd 0 ====
+
+	0x25471CF8:  7D204B78  or r0,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25471CFC:  1C60000C  mulli r3,r0,12
+	   3: GETL       	R0, t2
+	   4: MULL       	$0xC, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x25471D00:  7FEB1A14  add r31,r11,r3
+	   7: GETL       	R11, t4
+	   8: GETL       	R3, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R31
+	  11: INCEIPL       	$4
+
+	0x25471D04:  7F8BF840  cmpl cr7,r11,r31
+	  12: GETL       	R11, t8
+	  13: GETL       	R31, t10
+	  14: CMPUL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x25471D08:  409C0020  bc 4,28,0x25471D28
+	  17: Jc28o       	$0x25471D28
+
+
+. 0 25471CF8 20
+. 7D 20 4B 78 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+
+==== BB 45 (0x25471D0C) approx BBs exec'd 0 ====
+
+	0x25471D0C:  818B0008  lwz r12,8(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x25471D10:  80CB0000  lwz r6,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x25471D14:  396B000C  addi r11,r11,12
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0xC, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x25471D18:  7CECEA14  add r7,r12,r29
+	  13: GETL       	R12, t10
+	  14: GETL       	R29, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0x25471D1C:  7F8BF840  cmpl cr7,r11,r31
+	  18: GETL       	R11, t14
+	  19: GETL       	R31, t16
+	  20: CMPUL       	t14, t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x25471D20:  7CE6E92E  stwx r7,r6,r29
+	  23: GETL       	R29, t20
+	  24: GETL       	R6, t22
+	  25: ADDL       	t22, t20
+	  26: GETL       	R7, t24
+	  27: STL       	t24, (t20)
+	  28: INCEIPL       	$4
+
+	0x25471D24:  419CFFE8  bc 12,28,0x25471D0C
+	  29: Js28o       	$0x25471D0C
+
+
+. 0 25471D0C 28
+. 81 8B 00 08 80 CB 00 00 39 6B 00 0C 7C EC EA 14 7F 8B F8 40 7C E6 E9 2E 41 9C FF E8
+
+==== BB 46 (0x25471D28) approx BBs exec'd 0 ====
+
+	0x25471D28:  7C9FE040  cmpl cr1,r31,r28
+	   0: GETL       	R31, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25471D2C:  4084008C  bc 4,4,0x25471DB8
+	   5: Jc04o       	$0x25471DB8
+
+
+. 0 25471D28 8
+. 7C 9F E0 40 40 84 00 8C
+
+==== BB 47 (0x25471D30) approx BBs exec'd 0 ====
+
+	0x25471D30:  811F0004  lwz r8,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25471D34:  817F0000  lwz r11,0(r31)
+	   5: GETL       	R31, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x25471D38:  550A063E  rlwinm r10,r8,0,24,31
+	   9: GETL       	R8, t8
+	  10: ANDL       	$0xFF, t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25471D3C:  5500E136  rlwinm r0,r8,28,4,27
+	  13: GETL       	R8, t10
+	  14: ROLL       	$0x1C, t10
+	  15: ANDL       	$0xFFFFFF0, t10
+	  16: PUTL       	t10, R0
+	  17: INCEIPL       	$4
+
+	0x25471D40:  2F0A0016  cmpi cr6,r10,22
+	  18: GETL       	R10, t12
+	  19: MOVL       	$0x16, t16
+	  20: CMPL       	t12, t16, t14  (-rSo)
+	  21: ICRFL       	t14, $0x6, CR
+	  22: INCEIPL       	$4
+
+	0x25471D44:  7CC0D214  add r6,r0,r26
+	  23: GETL       	R0, t18
+	  24: GETL       	R26, t20
+	  25: ADDL       	t18, t20
+	  26: PUTL       	t20, R6
+	  27: INCEIPL       	$4
+
+	0x25471D48:  7D0BEA14  add r8,r11,r29
+	  28: GETL       	R11, t22
+	  29: GETL       	R29, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R8
+	  32: INCEIPL       	$4
+
+	0x25471D4C:  419A00C4  bc 12,26,0x25471E10
+	  33: Js26o       	$0x25471E10
+
+
+. 0 25471D30 32
+. 81 1F 00 04 81 7F 00 00 55 0A 06 3E 55 00 E1 36 2F 0A 00 16 7C C0 D2 14 7D 0B EA 14 41 9A 00 C4
+
+==== BB 48 (0x25471D50) approx BBs exec'd 0 ====
+
+	0x25471D50:  2F8A0000  cmpi cr7,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25471D54:  419E0058  bc 12,30,0x25471DAC
+	   4: Js30o       	$0x25471DAC
+
+
+. 0 25471D50 8
+. 2F 8A 00 00 41 9E 00 58
+
+==== BB 49 (0x25471D58) approx BBs exec'd 0 ====
+
+	0x25471D58:  8966000C  lbz r11,12(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25471D5C:  5569E13F  rlwinm. r9,r11,28,4,31
+	   5: GETL       	R11, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25471D60:  4182014C  bc 12,2,0x25471EAC
+	  11: Js02o       	$0x25471EAC
+
+
+. 0 25471D58 12
+. 89 66 00 0C 55 69 E1 3F 41 82 01 4C
+
+==== BB 50 (0x25471D64) approx BBs exec'd 0 ====
+
+	0x25471D64:  A006000E  lhz r0,14(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0xE, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25471D68:  2F000000  cmpi cr6,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25471D6C:  39200000  li r9,0
+	   9: MOVL       	$0x0, t8
+	  10: PUTL       	t8, R9
+	  11: INCEIPL       	$4
+
+	0x25471D70:  419A0008  bc 12,26,0x25471D78
+	  12: Js26o       	$0x25471D78
+
+
+. 0 25471D64 16
+. A0 06 00 0E 2F 00 00 00 39 20 00 00 41 9A 00 08
+
+==== BB 51 (0x25471D74) approx BBs exec'd 0 ====
+
+	0x25471D74:  81210010  lwz r9,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471D78:  80A60004  lwz r5,4(r6)
+	   5: GETL       	R6, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0x25471D7C:  7D292A14  add r9,r9,r5
+	  10: GETL       	R9, t8
+	  11: GETL       	R5, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25471D80:  2F8A0001  cmpi cr7,r10,1
+	  15: GETL       	R10, t12
+	  16: MOVL       	$0x1, t16
+	  17: CMPL       	t12, t16, t14  (-rSo)
+	  18: ICRFL       	t14, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0x25471D84:  807F0008  lwz r3,8(r31)
+	  20: GETL       	R31, t18
+	  21: ADDL       	$0x8, t18
+	  22: LDL       	(t18), t20
+	  23: PUTL       	t20, R3
+	  24: INCEIPL       	$4
+
+	0x25471D88:  7D291A14  add r9,r9,r3
+	  25: GETL       	R9, t22
+	  26: GETL       	R3, t24
+	  27: ADDL       	t22, t24
+	  28: PUTL       	t24, R9
+	  29: INCEIPL       	$4
+
+	0x25471D8C:  419E0078  bc 12,30,0x25471E04
+	  30: Js30o       	$0x25471E04
+
+
+. 0 25471D74 28
+. 81 21 00 10 80 A6 00 04 7D 29 2A 14 2F 8A 00 01 80 7F 00 08 7D 29 1A 14 41 9E 00 78
+
+==== BB 52 (0x25471D90) approx BBs exec'd 0 ====
+
+	0x25471D90:  2C0A0014  cmpi cr0,r10,20
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x14, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25471D94:  41820070  bc 12,2,0x25471E04
+	   5: Js02o       	$0x25471E04
+
+
+. 0 25471D90 8
+. 2C 0A 00 14 41 82 00 70
+
+==== BB 53 (0x25471E04) approx BBs exec'd 0 ====
+
+	0x25471E04:  3BFF000C  addi r31,r31,12
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25471E08:  91280000  stw r9,0(r8)
+	   4: GETL       	R9, t2
+	   5: GETL       	R8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25471E0C:  4BFFFFA4  b 0x25471DB0
+	   8: JMPo       	$0x25471DB0  ($4)
+
+
+. 0 25471E04 12
+. 3B FF 00 0C 91 28 00 00 4B FF FF A4
+
+==== BB 54 (0x25471DB0) approx BBs exec'd 0 ====
+
+	0x25471DB0:  7C9FE040  cmpl cr1,r31,r28
+	   0: GETL       	R31, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25471DB4:  4184FF7C  bc 12,4,0x25471D30
+	   5: Js04o       	$0x25471D30
+
+
+. 0 25471DB0 8
+. 7C 9F E0 40 41 84 FF 7C
+
+==== BB 55 (0x25471D98) approx BBs exec'd 0 ====
+
+	0x25471D98:  7F03C378  or r3,r24,r24
+	   0: GETL       	R24, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25471D9C:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25471DA0:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25471DA4:  7CC73378  or r7,r6,r6
+	   9: GETL       	R6, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x25471DA8:  4800EF01  bl 0x25480CA8
+	  12: MOVL       	$0x25471DAC, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25480CA8  ($4)
+
+
+. 0 25471D98 20
+. 7F 03 C3 78 7F E4 FB 78 38 A0 00 00 7C C7 33 78 48 00 EF 01
+
+==== BB 56 __process_machine_rela(0x25480CA8) approx BBs exec'd 0 ====
+
+	0x25480CA8:  2B8A004D  cmpli cr7,r10,77
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x4D, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25480CAC:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25480CB0:  9421FFD0  stwu r1,-48(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFD0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25480CB4:  4801634D  bl 0x25497000
+	  14: MOVL       	$0x25480CB8, t12
+	  15: PUTL       	t12, LR
+	  16: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25480CA8 16
+. 2B 8A 00 4D 7C 08 02 A6 94 21 FF D0 48 01 63 4D
+
+==== BB 57 (0x25480CB8) approx BBs exec'd 0 ====
+
+	0x25480CB8:  93410018  stw r26,24(r1)
+	   0: GETL       	R26, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25480CBC:  7D3A4B78  or r26,r9,r9
+	   5: GETL       	R9, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x25480CC0:  9361001C  stw r27,28(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25480CC4:  7CDB3378  or r27,r6,r6
+	  13: GETL       	R6, t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0x25480CC8:  93810020  stw r28,32(r1)
+	  16: GETL       	R28, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x20, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x25480CCC:  7C9C2378  or r28,r4,r4
+	  21: GETL       	R4, t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0x25480CD0:  93A10024  stw r29,36(r1)
+	  24: GETL       	R29, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x24, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25480CD4:  7CBD2B78  or r29,r5,r5
+	  29: GETL       	R5, t22
+	  30: PUTL       	t22, R29
+	  31: INCEIPL       	$4
+
+	0x25480CD8:  93C10028  stw r30,40(r1)
+	  32: GETL       	R30, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x28, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25480CDC:  7FC802A6  mflr r30
+	  37: GETL       	LR, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0x25480CE0:  93E1002C  stw r31,44(r1)
+	  40: GETL       	R31, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x2C, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x25480CE4:  7D5F5378  or r31,r10,r10
+	  45: GETL       	R10, t34
+	  46: PUTL       	t34, R31
+	  47: INCEIPL       	$4
+
+	0x25480CE8:  90010034  stw r0,52(r1)
+	  48: GETL       	R0, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x34, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25480CEC:  90610008  stw r3,8(r1)
+	  53: GETL       	R3, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x8, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25480CF0:  90E10010  stw r7,16(r1)
+	  58: GETL       	R7, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x10, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x25480CF4:  9101000C  stw r8,12(r1)
+	  63: GETL       	R8, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0xC, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25480CF8:  419D0140  bc 12,29,0x25480E38
+	  68: Js29o       	$0x25480E38
+
+
+. 0 25480CB8 68
+. 93 41 00 18 7D 3A 4B 78 93 61 00 1C 7C DB 33 78 93 81 00 20 7C 9C 23 78 93 A1 00 24 7C BD 2B 78 93 C1 00 28 7F C8 02 A6 93 E1 00 2C 7D 5F 53 78 90 01 00 34 90 61 00 08 90 E1 00 10 91 01 00 0C 41 9D 01 40
+
+==== BB 58 (0x25480CFC) approx BBs exec'd 0 ====
+
+	0x25480CFC:  817E0450  lwz r11,1104(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x450, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25480D00:  5544103A  rlwinm r4,r10,2,0,29
+	   5: GETL       	R10, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25480D04:  7C64582E  lwzx r3,r4,r11
+	   9: GETL       	R11, t6
+	  10: GETL       	R4, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x25480D08:  7D235A14  add r9,r3,r11
+	  15: GETL       	R3, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25480D0C:  7D2903A6  mtctr r9
+	  20: GETL       	R9, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x25480D10:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+. 0 25480CFC 24
+. 81 7E 04 50 55 44 10 3A 7C 64 58 2E 7D 23 5A 14 7D 29 03 A6 4E 80 04 20
+
+==== BB 59 (0x25480FD8) approx BBs exec'd 0 ====
+
+	0x25480FD8:  80A1000C  lwz r5,12(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25480FDC:  7FA5D050  subf r29,r5,r26
+	   5: GETL       	R5, t4
+	   6: GETL       	R26, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25480FE0:  57BF3032  rlwinm r31,r29,6,0,25
+	  10: GETL       	R29, t8
+	  11: SHLL       	$0x6, t8
+	  12: PUTL       	t8, R31
+	  13: INCEIPL       	$4
+
+	0x25480FE4:  7FE93670  srawi r9,r31,6
+	  14: GETL       	R31, t10
+	  15: SARL       	$0x6, t10  (-wCa)
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x25480FE8:  7F89E800  cmp cr7,r9,r29
+	  18: GETL       	R9, t12
+	  19: GETL       	R29, t14
+	  20: CMPL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x25480FEC:  419E0254  bc 12,30,0x25481240
+	  23: Js30o       	$0x25481240
+
+
+. 0 25480FD8 24
+. 80 A1 00 0C 7F A5 D0 50 57 BF 30 32 7F E9 36 70 7F 89 E8 00 41 9E 02 54
+
+==== BB 60 (0x25481240) approx BBs exec'd 0 ====
+
+	0x25481240:  553A01BA  rlwinm r26,r9,0,6,29
+	   0: GETL       	R9, t0
+	   1: ANDL       	$0x3FFFFFC, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25481244:  67404800  oris r0,r26,0x4800
+	   4: GETL       	R26, t2
+	   5: ORL       	$0x48000000, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x25481248:  90050000  stw r0,0(r5)
+	   8: GETL       	R0, t4
+	   9: GETL       	R5, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x2548124C:  4BFFFBB4  b 0x25480E00
+	  12: JMPo       	$0x25480E00  ($4)
+
+
+. 0 25481240 16
+. 55 3A 01 BA 67 40 48 00 90 05 00 00 4B FF FB B4
+
+==== BB 61 (0x25480E00) approx BBs exec'd 0 ====
+
+	0x25480E00:  7C00286C  dcbst r0,r5
+	   0: INCEIPL       	$4
+
+	0x25480E04:  7C0004AC  sync
+	   1: INCEIPL       	$4
+
+	0x25480E08:  8381000C  lwz r28,12(r1)
+	   2: GETL       	R1, t0
+	   3: ADDL       	$0xC, t0
+	   4: LDL       	(t0), t2
+	   5: PUTL       	t2, R28
+	   6: INCEIPL       	$4
+
+	0x25480E0C:  7C00E7AC  icbi r0,r28
+	   7: GETL       	R28, t4
+	   8: CALLM_So       	
+	   9: PUSHL       	t4
+	  10: CALLMo       	$0x68
+	  11: CALLM_Eo       	
+	  12: INCEIPL       	$4
+
+	0x25480E10:  80610034  lwz r3,52(r1)
+	  13: GETL       	R1, t6
+	  14: ADDL       	$0x34, t6
+	  15: LDL       	(t6), t8
+	  16: PUTL       	t8, R3
+	  17: INCEIPL       	$4
+
+	0x25480E14:  83410018  lwz r26,24(r1)
+	  18: GETL       	R1, t10
+	  19: ADDL       	$0x18, t10
+	  20: LDL       	(t10), t12
+	  21: PUTL       	t12, R26
+	  22: INCEIPL       	$4
+
+	0x25480E18:  8361001C  lwz r27,28(r1)
+	  23: GETL       	R1, t14
+	  24: ADDL       	$0x1C, t14
+	  25: LDL       	(t14), t16
+	  26: PUTL       	t16, R27
+	  27: INCEIPL       	$4
+
+	0x25480E1C:  7C6803A6  mtlr r3
+	  28: GETL       	R3, t18
+	  29: PUTL       	t18, LR
+	  30: INCEIPL       	$4
+
+	0x25480E20:  83810020  lwz r28,32(r1)
+	  31: GETL       	R1, t20
+	  32: ADDL       	$0x20, t20
+	  33: LDL       	(t20), t22
+	  34: PUTL       	t22, R28
+	  35: INCEIPL       	$4
+
+	0x25480E24:  83A10024  lwz r29,36(r1)
+	  36: GETL       	R1, t24
+	  37: ADDL       	$0x24, t24
+	  38: LDL       	(t24), t26
+	  39: PUTL       	t26, R29
+	  40: INCEIPL       	$4
+
+	0x25480E28:  83C10028  lwz r30,40(r1)
+	  41: GETL       	R1, t28
+	  42: ADDL       	$0x28, t28
+	  43: LDL       	(t28), t30
+	  44: PUTL       	t30, R30
+	  45: INCEIPL       	$4
+
+	0x25480E2C:  83E1002C  lwz r31,44(r1)
+	  46: GETL       	R1, t32
+	  47: ADDL       	$0x2C, t32
+	  48: LDL       	(t32), t34
+	  49: PUTL       	t34, R31
+	  50: INCEIPL       	$4
+
+	0x25480E30:  38210030  addi r1,r1,48
+	  51: GETL       	R1, t36
+	  52: ADDL       	$0x30, t36
+	  53: PUTL       	t36, R1
+	  54: INCEIPL       	$4
+
+	0x25480E34:  4E800020  blr
+	  55: GETL       	LR, t38
+	  56: JMPo-r       	t38  ($4)
+
+
+. 0 25480E00 56
+. 7C 00 28 6C 7C 00 04 AC 83 81 00 0C 7C 00 E7 AC 80 61 00 34 83 41 00 18 83 61 00 1C 7C 68 03 A6 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+==== BB 62 (0x25471DAC) approx BBs exec'd 0 ====
+
+	0x25471DAC:  3BFF000C  addi r31,r31,12
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25471DB0:  7C9FE040  cmpl cr1,r31,r28
+	   4: GETL       	R31, t2
+	   5: GETL       	R28, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25471DB4:  4184FF7C  bc 12,4,0x25471D30
+	   9: Js04o       	$0x25471D30
+
+
+. 0 25471DAC 12
+. 3B FF 00 0C 7C 9F E0 40 41 84 FF 7C
+
+==== BB 63 (0x25471DB8) approx BBs exec'd 0 ====
+
+	0x25471DB8:  3739FFFF  addic. r25,r25,-1
+	   0: GETL       	R25, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R25
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25471DBC:  3B7B000C  addi r27,r27,12
+	   6: GETL       	R27, t4
+	   7: ADDL       	$0xC, t4
+	   8: PUTL       	t4, R27
+	   9: INCEIPL       	$4
+
+	0x25471DC0:  4080FEF0  bc 4,0,0x25471CB0
+	  10: Jc00o       	$0x25471CB0
+
+
+. 0 25471DB8 12
+. 37 39 FF FF 3B 7B 00 0C 40 80 FE F0
+
+==== BB 64 (0x25471CB0) approx BBs exec'd 0 ====
+
+	0x25471CB0:  814100D0  lwz r10,208(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xD0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25471CB4:  81210048  lwz r9,72(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x48, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25471CB8:  2F8A0000  cmpi cr7,r10,0
+	  10: GETL       	R10, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25471CBC:  811B0248  lwz r8,584(r27)
+	  14: GETL       	R27, t12
+	  15: ADDL       	$0x248, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R8
+	  18: INCEIPL       	$4
+
+	0x25471CC0:  817B024C  lwz r11,588(r27)
+	  19: GETL       	R27, t16
+	  20: ADDL       	$0x24C, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R11
+	  23: INCEIPL       	$4
+
+	0x25471CC4:  83490004  lwz r26,4(r9)
+	  24: GETL       	R9, t20
+	  25: ADDL       	$0x4, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R26
+	  28: INCEIPL       	$4
+
+	0x25471CC8:  39200000  li r9,0
+	  29: MOVL       	$0x0, t24
+	  30: PUTL       	t24, R9
+	  31: INCEIPL       	$4
+
+	0x25471CCC:  7F885A14  add r28,r8,r11
+	  32: GETL       	R8, t26
+	  33: GETL       	R11, t28
+	  34: ADDL       	t26, t28
+	  35: PUTL       	t28, R28
+	  36: INCEIPL       	$4
+
+	0x25471CD0:  83A10010  lwz r29,16(r1)
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x10, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R29
+	  41: INCEIPL       	$4
+
+	0x25471CD4:  419E0008  bc 12,30,0x25471CDC
+	  42: Js30o       	$0x25471CDC
+
+
+. 0 25471CB0 40
+. 81 41 00 D0 81 21 00 48 2F 8A 00 00 81 1B 02 48 81 7B 02 4C 83 49 00 04 39 20 00 00 7F 88 5A 14 83 A1 00 10 41 9E 00 08
+
+==== BB 65 (0x25471CFC) approx BBs exec'd 0 ====
+
+	0x25471CFC:  1C60000C  mulli r3,r0,12
+	   0: GETL       	R0, t0
+	   1: MULL       	$0xC, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x25471D00:  7FEB1A14  add r31,r11,r3
+	   4: GETL       	R11, t2
+	   5: GETL       	R3, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0x25471D04:  7F8BF840  cmpl cr7,r11,r31
+	   9: GETL       	R11, t6
+	  10: GETL       	R31, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25471D08:  409C0020  bc 4,28,0x25471D28
+	  14: Jc28o       	$0x25471D28
+
+
+. 0 25471CFC 16
+. 1C 60 00 0C 7F EB 1A 14 7F 8B F8 40 40 9C 00 20
+
+==== BB 66 (0x25471DC4) approx BBs exec'd 0 ====
+
+	0x25471DC4:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25471DC8:  7F04C378  or r4,r24,r24
+	   3: GETL       	R24, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25471DCC:  4BFFFBBD  bl 0x25471988
+	   6: MOVL       	$0x25471DD0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25471988  ($4)
+
+
+. 0 25471DC4 12
+. 7E E3 BB 78 7F 04 C3 78 4B FF FB BD
+
+==== BB 67 _dl_start_final(0x25471988) approx BBs exec'd 0 ====
+
+	0x25471988:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547198C:  7D2802A6  mflr r9
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x25471990:  48025671  bl 0x25497000
+	   9: MOVL       	$0x25471994, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25471988 12
+. 94 21 FF E0 7D 28 02 A6 48 02 56 71
+
+==== BB 68 (0x25471994) approx BBs exec'd 0 ====
+
+	0x25471994:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25471998:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547199C:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254719A0:  91210024  stw r9,36(r1)
+	  13: GETL       	R9, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254719A4:  38A0012C  li r5,300
+	  18: MOVL       	$0x12C, t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0x254719A8:  80040000  lwz r0,0(r4)
+	  21: GETL       	R4, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0x254719AC:  83BE04C8  lwz r29,1224(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x4C8, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R29
+	  29: INCEIPL       	$4
+
+	0x254719B0:  81840008  lwz r12,8(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R12
+	  34: INCEIPL       	$4
+
+	0x254719B4:  38840020  addi r4,r4,32
+	  35: GETL       	R4, t28
+	  36: ADDL       	$0x20, t28
+	  37: PUTL       	t28, R4
+	  38: INCEIPL       	$4
+
+	0x254719B8:  901D01B8  stw r0,440(r29)
+	  39: GETL       	R0, t30
+	  40: GETL       	R29, t32
+	  41: ADDL       	$0x1B8, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x254719BC:  919D01C0  stw r12,448(r29)
+	  44: GETL       	R12, t34
+	  45: GETL       	R29, t36
+	  46: ADDL       	$0x1C0, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x254719C0:  9361000C  stw r27,12(r1)
+	  49: GETL       	R27, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0xC, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0x254719C4:  7C7B1B78  or r27,r3,r3
+	  54: GETL       	R3, t42
+	  55: PUTL       	t42, R27
+	  56: INCEIPL       	$4
+
+	0x254719C8:  93810010  stw r28,16(r1)
+	  57: GETL       	R28, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x10, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0x254719CC:  387D01D8  addi r3,r29,472
+	  62: GETL       	R29, t48
+	  63: ADDL       	$0x1D8, t48
+	  64: PUTL       	t48, R3
+	  65: INCEIPL       	$4
+
+	0x254719D0:  3B9D01B8  addi r28,r29,440
+	  66: GETL       	R29, t50
+	  67: ADDL       	$0x1B8, t50
+	  68: PUTL       	t50, R28
+	  69: INCEIPL       	$4
+
+	0x254719D4:  4801220D  bl 0x25483BE0
+	  70: MOVL       	$0x254719D8, t52
+	  71: PUTL       	t52, LR
+	  72: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 25471994 68
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 91 21 00 24 38 A0 01 2C 80 04 00 00 83 BE 04 C8 81 84 00 08 38 84 00 20 90 1D 01 B8 91 9D 01 C0 93 61 00 0C 7C 7B 1B 78 93 81 00 10 38 7D 01 D8 3B 9D 01 B8 48 01 22 0D
+
+==== BB 69 memcpy(0x25483BE0) approx BBs exec'd 0 ====
+
+	0x25483BE0:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25483BE4:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25483BE8:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFE0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25483BEC:  9361000C  stw r27,12(r1)
+	  14: GETL       	R27, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0xC, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25483BF0:  7C7B1B78  or r27,r3,r3
+	  19: GETL       	R3, t16
+	  20: PUTL       	t16, R27
+	  21: INCEIPL       	$4
+
+	0x25483BF4:  93A10014  stw r29,20(r1)
+	  22: GETL       	R29, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x14, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x25483BF8:  7C9D2378  or r29,r4,r4
+	  27: GETL       	R4, t22
+	  28: PUTL       	t22, R29
+	  29: INCEIPL       	$4
+
+	0x25483BFC:  93E1001C  stw r31,28(r1)
+	  30: GETL       	R31, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x1C, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25483C00:  7C7F1B78  or r31,r3,r3
+	  35: GETL       	R3, t28
+	  36: PUTL       	t28, R31
+	  37: INCEIPL       	$4
+
+	0x25483C04:  93810010  stw r28,16(r1)
+	  38: GETL       	R28, t30
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x10, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0x25483C08:  93C10018  stw r30,24(r1)
+	  43: GETL       	R30, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x18, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25483C0C:  90010024  stw r0,36(r1)
+	  48: GETL       	R0, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x24, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0x25483C10:  409D0058  bc 4,29,0x25483C68
+	  53: Jc29o       	$0x25483C68
+
+
+. 0 25483BE0 52
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 61 00 0C 7C 7B 1B 78 93 A1 00 14 7C 9D 23 78 93 E1 00 1C 7C 7F 1B 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+==== BB 70 (0x25483C14) approx BBs exec'd 0 ====
+
+	0x25483C14:  7C8300D0  neg r4,r3
+	   0: GETL       	R3, t0
+	   1: NEGL       	t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25483C18:  548307BE  rlwinm r3,r4,0,30,31
+	   4: GETL       	R4, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25483C1C:  7C691B79  or. r9,r3,r3
+	   8: GETL       	R3, t4
+	   9: PUTL       	t4, R9
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25483C20:  7F832850  subf r28,r3,r5
+	  13: GETL       	R3, t8
+	  14: GETL       	R5, t10
+	  15: SUBL       	t8, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0x25483C24:  4182001C  bc 12,2,0x25483C40
+	  18: Js02o       	$0x25483C40
+
+
+. 0 25483C14 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+==== BB 71 (0x25483C40) approx BBs exec'd 0 ====
+
+	0x25483C40:  73A00003  andi. r0,r29,0x3
+	   0: GETL       	R29, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483C44:  4082006C  bc 4,2,0x25483CB0
+	   6: Jc02o       	$0x25483CB0
+
+
+. 0 25483C40 8
+. 73 A0 00 03 40 82 00 6C
+
+==== BB 72 (0x25483C48) approx BBs exec'd 0 ====
+
+	0x25483C48:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25483C4C:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25483C50:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x25483C54:  48000071  bl 0x25483CC4
+	  10: MOVL       	$0x25483C58, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483CC4  ($4)
+
+
+. 0 25483C48 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 00 71
+
+==== BB 73 _wordcopy_fwd_aligned(0x25483CC4) approx BBs exec'd 0 ====
+
+	0x25483CC4:  54A0077E  rlwinm r0,r5,0,29,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x7, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25483CC8:  7D8802A6  mflr r12
+	   4: GETL       	LR, t2
+	   5: PUTL       	t2, R12
+	   6: INCEIPL       	$4
+
+	0x25483CCC:  2B800007  cmpli cr7,r0,7
+	   7: GETL       	R0, t4
+	   8: MOVL       	$0x7, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x25483CD0:  48013331  bl 0x25497000
+	  12: MOVL       	$0x25483CD4, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25483CC4 16
+. 54 A0 07 7E 7D 88 02 A6 2B 80 00 07 48 01 33 31
+
+==== BB 74 (0x25483CD4) approx BBs exec'd 0 ====
+
+	0x25483CD4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25483CD8:  93C10008  stw r30,8(r1)
+	   6: GETL       	R30, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x25483CDC:  7FC802A6  mflr r30
+	  11: GETL       	LR, t8
+	  12: PUTL       	t8, R30
+	  13: INCEIPL       	$4
+
+	0x25483CE0:  7D8803A6  mtlr r12
+	  14: GETL       	R12, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0x25483CE4:  419D0028  bc 12,29,0x25483D0C
+	  17: Js29o       	$0x25483D0C
+
+
+. 0 25483CD4 20
+. 94 21 FF F0 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 41 9D 00 28
+
+==== BB 75 (0x25483CE8) approx BBs exec'd 0 ====
+
+	0x25483CE8:  817E0490  lwz r11,1168(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x490, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25483CEC:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25483CF0:  7CCB002E  lwzx r6,r11,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0x25483CF4:  7D265A14  add r9,r6,r11
+	  15: GETL       	R6, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25483CF8:  7D2903A6  mtctr r9
+	  20: GETL       	R9, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x25483CFC:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+. 0 25483CE8 24
+. 81 7E 04 90 54 00 10 3A 7C CB 00 2E 7D 26 5A 14 7D 29 03 A6 4E 80 04 20
+
+==== BB 76 (0x25483D94) approx BBs exec'd 0 ====
+
+	0x25483D94:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25483D98:  3863FFE8  addi r3,r3,-24
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFE8, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483D9C:  3884FFEC  addi r4,r4,-20
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0xFFFFFFEC, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x25483DA0:  38A50005  addi r5,r5,5
+	  12: GETL       	R5, t8
+	  13: ADDL       	$0x5, t8
+	  14: PUTL       	t8, R5
+	  15: INCEIPL       	$4
+
+	0x25483DA4:  4BFFFF98  b 0x25483D3C
+	  16: JMPo       	$0x25483D3C  ($4)
+
+
+. 0 25483D94 20
+. 81 24 00 00 38 63 FF E8 38 84 FF EC 38 A5 00 05 4B FF FF 98
+
+==== BB 77 (0x25483D3C) approx BBs exec'd 0 ====
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x18, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  10: GETL       	R5, t8
+	  11: ADCL       	$0xFFFFFFF8, t8  (-wCa)
+	  12: PUTL       	t8, R5
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0x1C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  21: GETL       	R0, t16
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R4
+	  29: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  30: GETL       	R3, t22
+	  31: ADDL       	$0x20, t22
+	  32: PUTL       	t22, R3
+	  33: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  34: Jc02o       	$0x25483D0C
+
+
+. 0 25483D3C 32
+. 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+==== BB 78 (0x25483D0C) approx BBs exec'd 0 ====
+
+	0x25483D0C:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25483D10:  91230000  stw r9,0(r3)
+	   4: GETL       	R9, t4
+	   5: GETL       	R3, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0x25483D14:  81240004  lwz r9,4(r4)
+	   8: GETL       	R4, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R9
+	  12: INCEIPL       	$4
+
+	0x25483D18:  90030004  stw r0,4(r3)
+	  13: GETL       	R0, t12
+	  14: GETL       	R3, t14
+	  15: ADDL       	$0x4, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0x25483D1C:  80040008  lwz r0,8(r4)
+	  18: GETL       	R4, t16
+	  19: ADDL       	$0x8, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R0
+	  22: INCEIPL       	$4
+
+	0x25483D20:  91230008  stw r9,8(r3)
+	  23: GETL       	R9, t20
+	  24: GETL       	R3, t22
+	  25: ADDL       	$0x8, t22
+	  26: STL       	t20, (t22)
+	  27: INCEIPL       	$4
+
+	0x25483D24:  8124000C  lwz r9,12(r4)
+	  28: GETL       	R4, t24
+	  29: ADDL       	$0xC, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R9
+	  32: INCEIPL       	$4
+
+	0x25483D28:  9003000C  stw r0,12(r3)
+	  33: GETL       	R0, t28
+	  34: GETL       	R3, t30
+	  35: ADDL       	$0xC, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0x25483D2C:  80040010  lwz r0,16(r4)
+	  38: GETL       	R4, t32
+	  39: ADDL       	$0x10, t32
+	  40: LDL       	(t32), t34
+	  41: PUTL       	t34, R0
+	  42: INCEIPL       	$4
+
+	0x25483D30:  91230010  stw r9,16(r3)
+	  43: GETL       	R9, t36
+	  44: GETL       	R3, t38
+	  45: ADDL       	$0x10, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	  48: GETL       	R4, t40
+	  49: ADDL       	$0x14, t40
+	  50: LDL       	(t40), t42
+	  51: PUTL       	t42, R9
+	  52: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	  53: GETL       	R0, t44
+	  54: GETL       	R3, t46
+	  55: ADDL       	$0x14, t46
+	  56: STL       	t44, (t46)
+	  57: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  58: GETL       	R4, t48
+	  59: ADDL       	$0x18, t48
+	  60: LDL       	(t48), t50
+	  61: PUTL       	t50, R0
+	  62: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  63: GETL       	R9, t52
+	  64: GETL       	R3, t54
+	  65: ADDL       	$0x18, t54
+	  66: STL       	t52, (t54)
+	  67: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  68: GETL       	R5, t56
+	  69: ADCL       	$0xFFFFFFF8, t56  (-wCa)
+	  70: PUTL       	t56, R5
+	  71: CMP0L       	t56, t58  (-rSo)
+	  72: ICRFL       	t58, $0x0, CR
+	  73: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  74: GETL       	R4, t60
+	  75: ADDL       	$0x1C, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R9
+	  78: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  79: GETL       	R0, t64
+	  80: GETL       	R3, t66
+	  81: ADDL       	$0x1C, t66
+	  82: STL       	t64, (t66)
+	  83: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  84: GETL       	R4, t68
+	  85: ADDL       	$0x20, t68
+	  86: PUTL       	t68, R4
+	  87: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  88: GETL       	R3, t70
+	  89: ADDL       	$0x20, t70
+	  90: PUTL       	t70, R3
+	  91: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  92: Jc02o       	$0x25483D0C
+
+
+. 0 25483D0C 80
+. 80 04 00 00 91 23 00 00 81 24 00 04 90 03 00 04 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+==== BB 79 (0x25483D5C) approx BBs exec'd 0 ====
+
+	0x25483D5C:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0x25483D60:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x25483D64:  91230000  stw r9,0(r3)
+	   9: GETL       	R9, t6
+	  10: GETL       	R3, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25483D68:  4E800020  blr
+	  13: GETL       	LR, t10
+	  14: JMPo-r       	t10  ($4)
+
+
+. 0 25483D5C 16
+. 83 C1 00 08 38 21 00 10 91 23 00 00 4E 80 00 20
+
+==== BB 80 (0x25483C58) approx BBs exec'd 0 ====
+
+	0x25483C58:  5786003A  rlwinm r6,r28,0,0,29
+	   0: GETL       	R28, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25483C5C:  578507BE  rlwinm r5,r28,0,30,31
+	   4: GETL       	R28, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x25483C60:  7FFF3214  add r31,r31,r6
+	   8: GETL       	R31, t4
+	   9: GETL       	R6, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R31
+	  12: INCEIPL       	$4
+
+	0x25483C64:  7FBD3214  add r29,r29,r6
+	  13: GETL       	R29, t8
+	  14: GETL       	R6, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x25483C68:  2C850000  cmpi cr1,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0x25483C6C:  4186001C  bc 12,6,0x25483C88
+	  22: Js06o       	$0x25483C88
+
+
+. 0 25483C58 24
+. 57 86 00 3A 57 85 07 BE 7F FF 32 14 7F BD 32 14 2C 85 00 00 41 86 00 1C
+
+==== BB 81 (0x25483C88) approx BBs exec'd 0 ====
+
+	0x25483C88:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25483C8C:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483C90:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x25483C94:  8361000C  lwz r27,12(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x25483C98:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x25483C9C:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25483CA0:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x25483CA4:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x25483CA8:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x25483CAC:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+. 0 25483C88 40
+. 81 01 00 24 7F 63 DB 78 83 81 00 10 83 61 00 0C 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 82 (0x254719D8) approx BBs exec'd 0 ====
+
+	0x254719D8:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254719DC:  48006B91  bl 0x2547856C
+	   3: MOVL       	$0x254719E0, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547856C  ($4)
+
+
+. 0 254719D8 8
+. 7F 83 E3 78 48 00 6B 91
+
+==== BB 83 _dl_setup_hash(0x2547856C) approx BBs exec'd 0 ====
+
+	0x2547856C:  81230030  lwz r9,48(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25478570:  9421FFF0  stwu r1,-16(r1)
+	   5: GETL       	R1, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xFFFFFFF0, t6
+	   8: PUTL       	t6, R1
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x25478574:  2F890000  cmpi cr7,r9,0
+	  11: GETL       	R9, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x25478578:  419E0024  bc 12,30,0x2547859C
+	  15: Js30o       	$0x2547859C
+
+
+. 0 2547856C 16
+. 81 23 00 30 94 21 FF F0 2F 89 00 00 41 9E 00 24
+
+==== BB 84 (0x2547857C) approx BBs exec'd 0 ====
+
+	0x2547857C:  81690004  lwz r11,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25478580:  814B0000  lwz r10,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x25478584:  388B0008  addi r4,r11,8
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0x8, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25478588:  90830170  stw r4,368(r3)
+	  13: GETL       	R4, t10
+	  14: GETL       	R3, t12
+	  15: ADDL       	$0x170, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547858C:  5549103A  rlwinm r9,r10,2,0,29
+	  18: GETL       	R10, t14
+	  19: SHLL       	$0x2, t14
+	  20: PUTL       	t14, R9
+	  21: INCEIPL       	$4
+
+	0x25478590:  9143016C  stw r10,364(r3)
+	  22: GETL       	R10, t16
+	  23: GETL       	R3, t18
+	  24: ADDL       	$0x16C, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25478594:  7C044A14  add r0,r4,r9
+	  27: GETL       	R4, t20
+	  28: GETL       	R9, t22
+	  29: ADDL       	t20, t22
+	  30: PUTL       	t22, R0
+	  31: INCEIPL       	$4
+
+	0x25478598:  90030174  stw r0,372(r3)
+	  32: GETL       	R0, t24
+	  33: GETL       	R3, t26
+	  34: ADDL       	$0x174, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x2547859C:  38210010  addi r1,r1,16
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x10, t28
+	  39: PUTL       	t28, R1
+	  40: INCEIPL       	$4
+
+	0x254785A0:  4E800020  blr
+	  41: GETL       	LR, t30
+	  42: JMPo-r       	t30  ($4)
+
+
+. 0 2547857C 40
+. 81 69 00 04 81 4B 00 00 38 8B 00 08 90 83 01 70 55 49 10 3A 91 43 01 6C 7C 04 4A 14 90 03 01 74 38 21 00 10 4E 80 00 20
+
+==== BB 85 (0x254719E0) approx BBs exec'd 0 ====
+
+	0x254719E0:  80FE04FC  lwz r7,1276(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254719E4:  80DE0500  lwz r6,1280(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x500, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x254719E8:  3940FFFF  li r10,-1
+	  10: MOVL       	$0xFFFFFFFF, t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x254719EC:  80BE04A4  lwz r5,1188(r30)
+	  13: GETL       	R30, t10
+	  14: ADDL       	$0x4A4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x254719F0:  39000001  li r8,1
+	  18: MOVL       	$0x1, t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0x254719F4:  817E04C0  lwz r11,1216(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x4C0, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R11
+	  25: INCEIPL       	$4
+
+	0x254719F8:  7F63DB78  or r3,r27,r27
+	  26: GETL       	R27, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x254719FC:  809E0024  lwz r4,36(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x24, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R4
+	  33: INCEIPL       	$4
+
+	0x25471A00:  915D03E4  stw r10,996(r29)
+	  34: GETL       	R10, t26
+	  35: GETL       	R29, t28
+	  36: ADDL       	$0x3E4, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x25471A04:  939D01CC  stw r28,460(r29)
+	  39: GETL       	R28, t30
+	  40: GETL       	R29, t32
+	  41: ADDL       	$0x1CC, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x25471A08:  911D0330  stw r8,816(r29)
+	  44: GETL       	R8, t34
+	  45: GETL       	R29, t36
+	  46: ADDL       	$0x330, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x25471A0C:  90FD0358  stw r7,856(r29)
+	  49: GETL       	R7, t38
+	  50: GETL       	R29, t40
+	  51: ADDL       	$0x358, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0x25471A10:  90DD035C  stw r6,860(r29)
+	  54: GETL       	R6, t42
+	  55: GETL       	R29, t44
+	  56: ADDL       	$0x35C, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x25471A14:  90BD0360  stw r5,864(r29)
+	  59: GETL       	R5, t46
+	  60: GETL       	R29, t48
+	  61: ADDL       	$0x360, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0x25471A18:  902B0000  stw r1,0(r11)
+	  64: GETL       	R1, t50
+	  65: GETL       	R11, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x25471A1C:  4800D629  bl 0x2547F044
+	  68: MOVL       	$0x25471A20, t54
+	  69: PUTL       	t54, LR
+	  70: JMPo-c       	$0x2547F044  ($4)
+
+
+. 0 254719E0 64
+. 80 FE 04 FC 80 DE 05 00 39 40 FF FF 80 BE 04 A4 39 00 00 01 81 7E 04 C0 7F 63 DB 78 80 9E 00 24 91 5D 03 E4 93 9D 01 CC 91 1D 03 30 90 FD 03 58 90 DD 03 5C 90 BD 03 60 90 2B 00 00 48 00 D6 29
+
+==== BB 86 _dl_sysdep_start(0x2547F044) approx BBs exec'd 0 ====
+
+	0x2547F044:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0x2547F048:  9421FDE0  stwu r1,-544(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFDE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547F04C:  38E30004  addi r7,r3,4
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x4, t6
+	  11: PUTL       	t6, R7
+	  12: INCEIPL       	$4
+
+	0x2547F050:  48017FB1  bl 0x25497000
+	  13: MOVL       	$0x2547F054, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547F044 16
+. 7D 88 02 A6 94 21 FD E0 38 E3 00 04 48 01 7F B1
+
+==== BB 87 (0x2547F054) approx BBs exec'd 0 ====
+
+	0x2547F054:  93C10218  stw r30,536(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x218, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547F058:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547F05C:  91810224  stw r12,548(r1)
+	   8: GETL       	R12, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x224, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547F060:  81030000  lwz r8,0(r3)
+	  13: GETL       	R3, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R8
+	  16: INCEIPL       	$4
+
+	0x2547F064:  80BE04C0  lwz r5,1216(r30)
+	  17: GETL       	R30, t14
+	  18: ADDL       	$0x4C0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R5
+	  21: INCEIPL       	$4
+
+	0x2547F068:  5506103A  rlwinm r6,r8,2,0,29
+	  22: GETL       	R8, t18
+	  23: SHLL       	$0x2, t18
+	  24: PUTL       	t18, R6
+	  25: INCEIPL       	$4
+
+	0x2547F06C:  813E04D4  lwz r9,1236(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4D4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x2547F070:  7D463A14  add r10,r6,r7
+	  31: GETL       	R6, t24
+	  32: GETL       	R7, t26
+	  33: ADDL       	t24, t26
+	  34: PUTL       	t26, R10
+	  35: INCEIPL       	$4
+
+	0x2547F074:  90650000  stw r3,0(r5)
+	  36: GETL       	R3, t28
+	  37: GETL       	R5, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x2547F078:  800A0004  lwz r0,4(r10)
+	  40: GETL       	R10, t32
+	  41: ADDL       	$0x4, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R0
+	  44: INCEIPL       	$4
+
+	0x2547F07C:  394A0004  addi r10,r10,4
+	  45: GETL       	R10, t36
+	  46: ADDL       	$0x4, t36
+	  47: PUTL       	t36, R10
+	  48: INCEIPL       	$4
+
+	0x2547F080:  807E04D0  lwz r3,1232(r30)
+	  49: GETL       	R30, t38
+	  50: ADDL       	$0x4D0, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R3
+	  53: INCEIPL       	$4
+
+	0x2547F084:  2F800000  cmpi cr7,r0,0
+	  54: GETL       	R0, t42
+	  55: CMP0L       	t42, t44  (-rSo)
+	  56: ICRFL       	t44, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x2547F088:  817E04B8  lwz r11,1208(r30)
+	  58: GETL       	R30, t46
+	  59: ADDL       	$0x4B8, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R11
+	  62: INCEIPL       	$4
+
+	0x2547F08C:  92A101F4  stw r21,500(r1)
+	  63: GETL       	R21, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x1F4, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x2547F090:  3AA00000  li r21,0
+	  68: MOVL       	$0x0, t54
+	  69: PUTL       	t54, R21
+	  70: INCEIPL       	$4
+
+	0x2547F094:  92C101F8  stw r22,504(r1)
+	  71: GETL       	R22, t56
+	  72: GETL       	R1, t58
+	  73: ADDL       	$0x1F8, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0x2547F098:  3AC00000  li r22,0
+	  76: MOVL       	$0x0, t60
+	  77: PUTL       	t60, R22
+	  78: INCEIPL       	$4
+
+	0x2547F09C:  92E101FC  stw r23,508(r1)
+	  79: GETL       	R23, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x1FC, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x2547F0A0:  7C972378  or r23,r4,r4
+	  84: GETL       	R4, t66
+	  85: PUTL       	t66, R23
+	  86: INCEIPL       	$4
+
+	0x2547F0A4:  93010200  stw r24,512(r1)
+	  87: GETL       	R24, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x200, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x2547F0A8:  3B000000  li r24,0
+	  92: MOVL       	$0x0, t72
+	  93: PUTL       	t72, R24
+	  94: INCEIPL       	$4
+
+	0x2547F0AC:  93210204  stw r25,516(r1)
+	  95: GETL       	R25, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x204, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x2547F0B0:  3B200000  li r25,0
+	 100: MOVL       	$0x0, t78
+	 101: PUTL       	t78, R25
+	 102: INCEIPL       	$4
+
+	0x2547F0B4:  93E1021C  stw r31,540(r1)
+	 103: GETL       	R31, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x21C, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x2547F0B8:  3BE00000  li r31,0
+	 108: MOVL       	$0x0, t84
+	 109: PUTL       	t84, R31
+	 110: INCEIPL       	$4
+
+	0x2547F0BC:  93410208  stw r26,520(r1)
+	 111: GETL       	R26, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x208, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x2547F0C0:  9361020C  stw r27,524(r1)
+	 116: GETL       	R27, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x20C, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x2547F0C4:  93810210  stw r28,528(r1)
+	 121: GETL       	R28, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x210, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0x2547F0C8:  93A10214  stw r29,532(r1)
+	 126: GETL       	R29, t98
+	 127: GETL       	R1, t100
+	 128: ADDL       	$0x214, t100
+	 129: STL       	t98, (t100)
+	 130: INCEIPL       	$4
+
+	0x2547F0CC:  91030000  stw r8,0(r3)
+	 131: GETL       	R8, t102
+	 132: GETL       	R3, t104
+	 133: STL       	t102, (t104)
+	 134: INCEIPL       	$4
+
+	0x2547F0D0:  90E90000  stw r7,0(r9)
+	 135: GETL       	R7, t106
+	 136: GETL       	R9, t108
+	 137: STL       	t106, (t108)
+	 138: INCEIPL       	$4
+
+	0x2547F0D4:  914B0000  stw r10,0(r11)
+	 139: GETL       	R10, t110
+	 140: GETL       	R11, t112
+	 141: STL       	t110, (t112)
+	 142: INCEIPL       	$4
+
+	0x2547F0D8:  419E0010  bc 12,30,0x2547F0E8
+	 143: Js30o       	$0x2547F0E8
+
+
+. 0 2547F054 136
+. 93 C1 02 18 7F C8 02 A6 91 81 02 24 81 03 00 00 80 BE 04 C0 55 06 10 3A 81 3E 04 D4 7D 46 3A 14 90 65 00 00 80 0A 00 04 39 4A 00 04 80 7E 04 D0 2F 80 00 00 81 7E 04 B8 92 A1 01 F4 3A A0 00 00 92 C1 01 F8 3A C0 00 00 92 E1 01 FC 7C 97 23 78 93 01 02 00 3B 00 00 00 93 21 02 04 3B 20 00 00 93 E1 02 1C 3B E0 00 00 93 41 02 08 93 61 02 0C 93 81 02 10 93 A1 02 14 91 03 00 00 90 E9 00 00 91 4B 00 00 41 9E 00 10
+
+==== BB 88 (0x2547F0DC) approx BBs exec'd 0 ====
+
+	0x2547F0DC:  848A0004  lwzu r4,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R10
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x2547F0E0:  2F840000  cmpi cr7,r4,0
+	   6: GETL       	R4, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x2547F0E4:  409EFFF8  bc 4,30,0x2547F0DC
+	  10: Jc30o       	$0x2547F0DC
+
+
+. 0 2547F0DC 12
+. 84 8A 00 04 2F 84 00 00 40 9E FF F8
+
+==== BB 89 (0x2547F0E8) approx BBs exec'd 0 ====
+
+	0x2547F0E8:  3B6A0013  addi r27,r10,19
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x13, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x2547F0EC:  380A0004  addi r0,r10,4
+	   4: GETL       	R10, t2
+	   5: ADDL       	$0x4, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x2547F0F0:  576B0036  rlwinm r11,r27,0,0,27
+	   8: GETL       	R27, t4
+	   9: ANDL       	$0xFFFFFFF0, t4
+	  10: PUTL       	t4, R11
+	  11: INCEIPL       	$4
+
+	0x2547F0F4:  834B0000  lwz r26,0(r11)
+	  12: GETL       	R11, t6
+	  13: LDL       	(t6), t8
+	  14: PUTL       	t8, R26
+	  15: INCEIPL       	$4
+
+	0x2547F0F8:  2F9A0010  cmpi cr7,r26,16
+	  16: GETL       	R26, t10
+	  17: MOVL       	$0x10, t14
+	  18: CMPL       	t10, t14, t12  (-rSo)
+	  19: ICRFL       	t12, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x2547F0FC:  419D0008  bc 12,29,0x2547F104
+	  21: Js29o       	$0x2547F104
+
+
+. 0 2547F0E8 24
+. 3B 6A 00 13 38 0A 00 04 57 6B 00 36 83 4B 00 00 2F 9A 00 10 41 9D 00 08
+
+==== BB 90 (0x2547F104) approx BBs exec'd 0 ====
+
+	0x2547F104:  7C080378  or r8,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x2547F108:  835E04F4  lwz r26,1268(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x4F4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547F10C:  81680000  lwz r11,0(r8)
+	   8: GETL       	R8, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x2547F110:  3B800000  li r28,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0x2547F114:  813E03EC  lwz r9,1004(r30)
+	  15: GETL       	R30, t12
+	  16: ADDL       	$0x3EC, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x2547F118:  2C0B0000  cmpi cr0,r11,0
+	  20: GETL       	R11, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x2547F11C:  83BE04E0  lwz r29,1248(r30)
+	  24: GETL       	R30, t20
+	  25: ADDL       	$0x4E0, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R29
+	  28: INCEIPL       	$4
+
+	0x2547F120:  90090000  stw r0,0(r9)
+	  29: GETL       	R0, t24
+	  30: GETL       	R9, t26
+	  31: STL       	t24, (t26)
+	  32: INCEIPL       	$4
+
+	0x2547F124:  93A101E0  stw r29,480(r1)
+	  33: GETL       	R29, t28
+	  34: GETL       	R1, t30
+	  35: ADDL       	$0x1E0, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0x2547F128:  939A000C  stw r28,12(r26)
+	  38: GETL       	R28, t32
+	  39: GETL       	R26, t34
+	  40: ADDL       	$0xC, t34
+	  41: STL       	t32, (t34)
+	  42: INCEIPL       	$4
+
+	0x2547F12C:  4182004C  bc 12,2,0x2547F178
+	  43: Js02o       	$0x2547F178
+
+
+. 0 2547F104 44
+. 7C 08 03 78 83 5E 04 F4 81 68 00 00 3B 80 00 00 81 3E 03 EC 2C 0B 00 00 83 BE 04 E0 90 09 00 00 93 A1 01 E0 93 9A 00 0C 41 82 00 4C
+
+==== BB 91 (0x2547F130) approx BBs exec'd 0 ====
+
+	0x2547F130:  7D6A5B78  or r10,r11,r11
+	   0: GETL       	R11, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x2547F134:  380AFFFD  addi r0,r10,-3
+	   3: GETL       	R10, t2
+	   4: ADDL       	$0xFFFFFFFD, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0x2547F138:  28800014  cmpli cr1,r0,20
+	   7: GETL       	R0, t4
+	   8: MOVL       	$0x14, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x2547F13C:  41850024  bc 12,5,0x2547F160
+	  12: Js05o       	$0x2547F160
+
+
+. 0 2547F130 16
+. 7D 6A 5B 78 38 0A FF FD 28 80 00 14 41 85 00 24
+
+==== BB 92 (0x2547F140) approx BBs exec'd 0 ====
+
+	0x2547F140:  817E03F4  lwz r11,1012(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x3F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547F144:  5405103A  rlwinm r5,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547F148:  7C65582E  lwzx r3,r5,r11
+	   9: GETL       	R11, t6
+	  10: GETL       	R5, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x2547F14C:  7CE35A14  add r7,r3,r11
+	  15: GETL       	R3, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x2547F150:  7CE903A6  mtctr r7
+	  20: GETL       	R7, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x2547F154:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+. 0 2547F140 24
+. 81 7E 03 F4 54 05 10 3A 7C 65 58 2E 7C E3 5A 14 7C E9 03 A6 4E 80 04 20
+
+==== BB 93 (0x2547F160) approx BBs exec'd 0 ====
+
+	0x2547F160:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F164:  7CAB5030  slw r11,r5,r10
+	   3: GETL       	R5, t4
+	   4: GETL       	R10, t2
+	   5: SHLL       	t2, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547F168:  85480008  lwzu r10,8(r8)
+	   8: GETL       	R8, t6
+	   9: ADDL       	$0x8, t6
+	  10: PUTL       	t6, R8
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547F16C:  7FFF5B78  or r31,r31,r11
+	  14: GETL       	R31, t10
+	  15: GETL       	R11, t12
+	  16: ORL       	t12, t10
+	  17: PUTL       	t10, R31
+	  18: INCEIPL       	$4
+
+	0x2547F170:  2F8A0000  cmpi cr7,r10,0
+	  19: GETL       	R10, t14
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x2547F174:  409EFFC0  bc 4,30,0x2547F134
+	  23: Jc30o       	$0x2547F134
+
+
+. 0 2547F160 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+
+==== BB 94 (0x2547F134) approx BBs exec'd 0 ====
+
+	0x2547F134:  380AFFFD  addi r0,r10,-3
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0xFFFFFFFD, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x2547F138:  28800014  cmpli cr1,r0,20
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x14, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547F13C:  41850024  bc 12,5,0x2547F160
+	   9: Js05o       	$0x2547F160
+
+
+. 0 2547F134 12
+. 38 0A FF FD 28 80 00 14 41 85 00 24
+
+==== BB 95 (0x2547F3D4) approx BBs exec'd 0 ====
+
+	0x2547F3D4:  813E04F0  lwz r9,1264(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F3D8:  2F090000  cmpi cr6,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547F3DC:  41BAFD84  bc 13,26,0x2547F160
+	   9: Js26o       	$0x2547F160
+
+
+. 0 2547F3D4 12
+. 81 3E 04 F0 2F 09 00 00 41 BA FD 84
+
+==== BB 96 (0x2547F3E0) approx BBs exec'd 0 ====
+
+	0x2547F3E0:  80680004  lwz r3,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547F3E4:  90690000  stw r3,0(r9)
+	   5: GETL       	R3, t4
+	   6: GETL       	R9, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547F3E8:  81480000  lwz r10,0(r8)
+	   9: GETL       	R8, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R10
+	  12: INCEIPL       	$4
+
+	0x2547F3EC:  38A00001  li r5,1
+	  13: MOVL       	$0x1, t12
+	  14: PUTL       	t12, R5
+	  15: INCEIPL       	$4
+
+	0x2547F3F0:  7CAB5030  slw r11,r5,r10
+	  16: GETL       	R5, t16
+	  17: GETL       	R10, t14
+	  18: SHLL       	t14, t16
+	  19: PUTL       	t16, R11
+	  20: INCEIPL       	$4
+
+	0x2547F3F4:  85480008  lwzu r10,8(r8)
+	  21: GETL       	R8, t18
+	  22: ADDL       	$0x8, t18
+	  23: PUTL       	t18, R8
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R10
+	  26: INCEIPL       	$4
+
+	0x2547F3F8:  7FFF5B78  or r31,r31,r11
+	  27: GETL       	R31, t22
+	  28: GETL       	R11, t24
+	  29: ORL       	t24, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0x2547F3FC:  2F8A0000  cmpi cr7,r10,0
+	  32: GETL       	R10, t26
+	  33: CMP0L       	t26, t28  (-rSo)
+	  34: ICRFL       	t28, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0x2547F400:  409EFD34  bc 4,30,0x2547F134
+	  36: Jc30o       	$0x2547F134
+
+
+. 0 2547F3E0 36
+. 80 68 00 04 90 69 00 00 81 48 00 00 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+
+==== BB 97 (0x2547F158) approx BBs exec'd 0 ====
+
+	0x2547F158:  83A80004  lwz r29,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547F15C:  93BA003C  stw r29,60(r26)
+	   5: GETL       	R29, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x3C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547F160:  38A00001  li r5,1
+	  10: MOVL       	$0x1, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x2547F164:  7CAB5030  slw r11,r5,r10
+	  13: GETL       	R5, t12
+	  14: GETL       	R10, t10
+	  15: SHLL       	t10, t12
+	  16: PUTL       	t12, R11
+	  17: INCEIPL       	$4
+
+	0x2547F168:  85480008  lwzu r10,8(r8)
+	  18: GETL       	R8, t14
+	  19: ADDL       	$0x8, t14
+	  20: PUTL       	t14, R8
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R10
+	  23: INCEIPL       	$4
+
+	0x2547F16C:  7FFF5B78  or r31,r31,r11
+	  24: GETL       	R31, t18
+	  25: GETL       	R11, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R31
+	  28: INCEIPL       	$4
+
+	0x2547F170:  2F8A0000  cmpi cr7,r10,0
+	  29: GETL       	R10, t22
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547F174:  409EFFC0  bc 4,30,0x2547F134
+	  33: Jc30o       	$0x2547F134
+
+
+. 0 2547F158 32
+. 83 A8 00 04 93 BA 00 3C 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FF C0
+
+==== BB 98 (0x2547F4BC) approx BBs exec'd 0 ====
+
+	0x2547F4BC:  81480004  lwz r10,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547F4C0:  915A0004  stw r10,4(r26)
+	   5: GETL       	R10, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x4, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547F4C4:  81480000  lwz r10,0(r8)
+	  10: GETL       	R8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0x2547F4C8:  4BFFFF24  b 0x2547F3EC
+	  14: JMPo       	$0x2547F3EC  ($4)
+
+
+. 0 2547F4BC 16
+. 81 48 00 04 91 5A 00 04 81 48 00 00 4B FF FF 24
+
+==== BB 99 (0x2547F3EC) approx BBs exec'd 0 ====
+
+	0x2547F3EC:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F3F0:  7CAB5030  slw r11,r5,r10
+	   3: GETL       	R5, t4
+	   4: GETL       	R10, t2
+	   5: SHLL       	t2, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547F3F4:  85480008  lwzu r10,8(r8)
+	   8: GETL       	R8, t6
+	   9: ADDL       	$0x8, t6
+	  10: PUTL       	t6, R8
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547F3F8:  7FFF5B78  or r31,r31,r11
+	  14: GETL       	R31, t10
+	  15: GETL       	R11, t12
+	  16: ORL       	t12, t10
+	  17: PUTL       	t10, R31
+	  18: INCEIPL       	$4
+
+	0x2547F3FC:  2F8A0000  cmpi cr7,r10,0
+	  19: GETL       	R10, t14
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x2547F400:  409EFD34  bc 4,30,0x2547F134
+	  23: Jc30o       	$0x2547F134
+
+
+. 0 2547F3EC 24
+. 38 A0 00 01 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 34
+
+==== BB 100 (0x2547F418) approx BBs exec'd 0 ====
+
+	0x2547F418:  81280004  lwz r9,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F41C:  913A001C  stw r9,28(r26)
+	   5: GETL       	R9, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x1C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547F420:  81480000  lwz r10,0(r8)
+	  10: GETL       	R8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0x2547F424:  4BFFFFC8  b 0x2547F3EC
+	  14: JMPo       	$0x2547F3EC  ($4)
+
+
+. 0 2547F418 16
+. 81 28 00 04 91 3A 00 1C 81 48 00 00 4B FF FF C8
+
+==== BB 101 (0x2547F4EC) approx BBs exec'd 0 ====
+
+	0x2547F4EC:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F4F0:  82C80004  lwz r22,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x2547F4F4:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F4F8:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F4FC:  7FFF5B78  or r31,r31,r11
+	  19: GETL       	R31, t14
+	  20: GETL       	R11, t16
+	  21: ORL       	t16, t14
+	  22: PUTL       	t14, R31
+	  23: INCEIPL       	$4
+
+	0x2547F500:  2F8A0000  cmpi cr7,r10,0
+	  24: GETL       	R10, t18
+	  25: CMP0L       	t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547F504:  409EFC30  bc 4,30,0x2547F134
+	  28: Jc30o       	$0x2547F134
+
+
+. 0 2547F4EC 28
+. 38 A0 00 01 82 C8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 30
+
+==== BB 102 (0x2547F4CC) approx BBs exec'd 0 ====
+
+	0x2547F4CC:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F4D0:  82A80004  lwz r21,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R21
+	   7: INCEIPL       	$4
+
+	0x2547F4D4:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F4D8:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F4DC:  7FFF5B78  or r31,r31,r11
+	  19: GETL       	R31, t14
+	  20: GETL       	R11, t16
+	  21: ORL       	t16, t14
+	  22: PUTL       	t14, R31
+	  23: INCEIPL       	$4
+
+	0x2547F4E0:  2F8A0000  cmpi cr7,r10,0
+	  24: GETL       	R10, t18
+	  25: CMP0L       	t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547F4E4:  409EFC50  bc 4,30,0x2547F134
+	  28: Jc30o       	$0x2547F134
+
+
+. 0 2547F4CC 28
+. 38 A0 00 01 82 A8 00 04 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 50
+
+==== BB 103 (0x2547F494) approx BBs exec'd 0 ====
+
+	0x2547F494:  80080004  lwz r0,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547F498:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547F49C:  81480000  lwz r10,0(r8)
+	   8: GETL       	R8, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R10
+	  11: INCEIPL       	$4
+
+	0x2547F4A0:  900101E0  stw r0,480(r1)
+	  12: GETL       	R0, t10
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0x1E0, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0x2547F4A4:  7CAB5030  slw r11,r5,r10
+	  17: GETL       	R5, t16
+	  18: GETL       	R10, t14
+	  19: SHLL       	t14, t16
+	  20: PUTL       	t16, R11
+	  21: INCEIPL       	$4
+
+	0x2547F4A8:  85480008  lwzu r10,8(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0x8, t18
+	  24: PUTL       	t18, R8
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R10
+	  27: INCEIPL       	$4
+
+	0x2547F4AC:  7FFF5B78  or r31,r31,r11
+	  28: GETL       	R31, t22
+	  29: GETL       	R11, t24
+	  30: ORL       	t24, t22
+	  31: PUTL       	t22, R31
+	  32: INCEIPL       	$4
+
+	0x2547F4B0:  2F8A0000  cmpi cr7,r10,0
+	  33: GETL       	R10, t26
+	  34: CMP0L       	t26, t28  (-rSo)
+	  35: ICRFL       	t28, $0x7, CR
+	  36: INCEIPL       	$4
+
+	0x2547F4B4:  409EFC80  bc 4,30,0x2547F134
+	  37: Jc30o       	$0x2547F134
+
+
+. 0 2547F494 36
+. 80 08 00 04 38 A0 00 01 81 48 00 00 90 01 01 E0 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FC 80
+
+==== BB 104 (0x2547F470) approx BBs exec'd 0 ====
+
+	0x2547F470:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F474:  80C80004  lwz r6,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x2547F478:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F47C:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F480:  7F393278  xor r25,r25,r6
+	  19: GETL       	R25, t14
+	  20: GETL       	R6, t16
+	  21: XORL       	t14, t16
+	  22: PUTL       	t16, R25
+	  23: INCEIPL       	$4
+
+	0x2547F484:  7FFF5B78  or r31,r31,r11
+	  24: GETL       	R31, t18
+	  25: GETL       	R11, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R31
+	  28: INCEIPL       	$4
+
+	0x2547F488:  2F8A0000  cmpi cr7,r10,0
+	  29: GETL       	R10, t22
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547F48C:  409EFCA8  bc 4,30,0x2547F134
+	  33: Jc30o       	$0x2547F134
+
+
+. 0 2547F470 32
+. 38 A0 00 01 80 C8 00 04 7C AB 50 30 85 48 00 08 7F 39 32 78 7F FF 5B 78 2F 8A 00 00 40 9E FC A8
+
+==== BB 105 (0x2547F44C) approx BBs exec'd 0 ====
+
+	0x2547F44C:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F450:  81880004  lwz r12,4(r8)
+	   3: GETL       	R8, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x2547F454:  7CAB5030  slw r11,r5,r10
+	   8: GETL       	R5, t8
+	   9: GETL       	R10, t6
+	  10: SHLL       	t6, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547F458:  85480008  lwzu r10,8(r8)
+	  13: GETL       	R8, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R8
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547F45C:  7F186278  xor r24,r24,r12
+	  19: GETL       	R24, t14
+	  20: GETL       	R12, t16
+	  21: XORL       	t14, t16
+	  22: PUTL       	t16, R24
+	  23: INCEIPL       	$4
+
+	0x2547F460:  7FFF5B78  or r31,r31,r11
+	  24: GETL       	R31, t18
+	  25: GETL       	R11, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R31
+	  28: INCEIPL       	$4
+
+	0x2547F464:  2F8A0000  cmpi cr7,r10,0
+	  29: GETL       	R10, t22
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547F468:  409EFCCC  bc 4,30,0x2547F134
+	  33: Jc30o       	$0x2547F134
+
+
+. 0 2547F44C 32
+. 38 A0 00 01 81 88 00 04 7C AB 50 30 85 48 00 08 7F 18 62 78 7F FF 5B 78 2F 8A 00 00 40 9E FC CC
+
+==== BB 106 (0x2547F3A4) approx BBs exec'd 0 ====
+
+	0x2547F3A4:  83680004  lwz r27,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547F3A8:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547F3AC:  809E04B4  lwz r4,1204(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x4B4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2547F3B0:  3BE0FFFF  li r31,-1
+	  13: MOVL       	$0xFFFFFFFF, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0x2547F3B4:  81480000  lwz r10,0(r8)
+	  16: GETL       	R8, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R10
+	  19: INCEIPL       	$4
+
+	0x2547F3B8:  93640000  stw r27,0(r4)
+	  20: GETL       	R27, t16
+	  21: GETL       	R4, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x2547F3BC:  7CAB5030  slw r11,r5,r10
+	  24: GETL       	R5, t22
+	  25: GETL       	R10, t20
+	  26: SHLL       	t20, t22
+	  27: PUTL       	t22, R11
+	  28: INCEIPL       	$4
+
+	0x2547F3C0:  85480008  lwzu r10,8(r8)
+	  29: GETL       	R8, t24
+	  30: ADDL       	$0x8, t24
+	  31: PUTL       	t24, R8
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R10
+	  34: INCEIPL       	$4
+
+	0x2547F3C4:  7FFF5B78  or r31,r31,r11
+	  35: GETL       	R31, t28
+	  36: GETL       	R11, t30
+	  37: ORL       	t30, t28
+	  38: PUTL       	t28, R31
+	  39: INCEIPL       	$4
+
+	0x2547F3C8:  2F8A0000  cmpi cr7,r10,0
+	  40: GETL       	R10, t32
+	  41: CMP0L       	t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x7, CR
+	  43: INCEIPL       	$4
+
+	0x2547F3CC:  409EFD68  bc 4,30,0x2547F134
+	  44: Jc30o       	$0x2547F134
+
+
+. 0 2547F3A4 44
+. 83 68 00 04 38 A0 00 01 80 9E 04 B4 3B E0 FF FF 81 48 00 00 93 64 00 00 7C AB 50 30 85 48 00 08 7F FF 5B 78 2F 8A 00 00 40 9E FD 68
+
+==== BB 107 (0x2547F3D0) approx BBs exec'd 0 ====
+
+	0x2547F3D0:  4BFFFDA8  b 0x2547F178
+	   0: JMPo       	$0x2547F178  ($4)
+
+
+. 0 2547F3D0 4
+. 4B FF FD A8
+
+==== BB 108 (0x2547F178) approx BBs exec'd 0 ====
+
+	0x2547F178:  38610050  addi r3,r1,80
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x50, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x2547F17C:  4800364D  bl 0x254827C8
+	   4: MOVL       	$0x2547F180, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x254827C8  ($4)
+
+
+. 0 2547F178 8
+. 38 61 00 50 48 00 36 4D
+
+==== BB 109 uname(0x254827C8) approx BBs exec'd 0 ====
+
+	0x254827C8:  3800007A  li r0,122
+	   0: MOVL       	$0x7A, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254827CC:  44000002  sc
+	   3: JMPo-sys       	$0x254827D0  ($4)
+
+
+. 0 254827C8 8
+. 38 00 00 7A 44 00 00 02
+
+==== BB 110 (0x254827D0) approx BBs exec'd 0 ====
+
+	0x254827D0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 254827D0 4
+. 4C A3 00 20
+
+==== BB 111 (0x2547F180) approx BBs exec'd 0 ====
+
+	0x2547F180:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547F184:  3BA10010  addi r29,r1,16
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x2547F188:  408201DC  bc 4,2,0x2547F364
+	   8: Jc02o       	$0x2547F364
+
+
+. 0 2547F180 12
+. 2C 03 00 00 3B A1 00 10 40 82 01 DC
+
+==== BB 112 (0x2547F18C) approx BBs exec'd 0 ====
+
+	0x2547F18C:  3BA100D2  addi r29,r1,210
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xD2, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547F190:  893D0000  lbz r9,0(r29)
+	   4: GETL       	R29, t2
+	   5: LDB       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x2547F194:  7FA3EB78  or r3,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x2547F198:  38E00000  li r7,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x2547F19C:  39000000  li r8,0
+	  14: MOVL       	$0x0, t10
+	  15: PUTL       	t10, R8
+	  16: INCEIPL       	$4
+
+	0x2547F1A0:  3949FFD0  addi r10,r9,-48
+	  17: GETL       	R9, t12
+	  18: ADDL       	$0xFFFFFFD0, t12
+	  19: PUTL       	t12, R10
+	  20: INCEIPL       	$4
+
+	0x2547F1A4:  2B8A0009  cmpli cr7,r10,9
+	  21: GETL       	R10, t14
+	  22: MOVL       	$0x9, t18
+	  23: CMPUL       	t14, t18, t16  (-rSo)
+	  24: ICRFL       	t16, $0x7, CR
+	  25: INCEIPL       	$4
+
+	0x2547F1A8:  419D0068  bc 12,29,0x2547F210
+	  26: Js29o       	$0x2547F210
+
+
+. 0 2547F18C 32
+. 3B A1 00 D2 89 3D 00 00 7F A3 EB 78 38 E0 00 00 39 00 00 00 39 49 FF D0 2B 8A 00 09 41 9D 00 68
+
+==== BB 113 (0x2547F1AC) approx BBs exec'd 0 ====
+
+	0x2547F1AC:  89630001  lbz r11,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547F1B0:  3929FFD0  addi r9,r9,-48
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFD0, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547F1B4:  39430001  addi r10,r3,1
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x1, t6
+	  11: PUTL       	t6, R10
+	  12: INCEIPL       	$4
+
+	0x2547F1B8:  380BFFD0  addi r0,r11,-48
+	  13: GETL       	R11, t8
+	  14: ADDL       	$0xFFFFFFD0, t8
+	  15: PUTL       	t8, R0
+	  16: INCEIPL       	$4
+
+	0x2547F1BC:  2B800009  cmpli cr7,r0,9
+	  17: GETL       	R0, t10
+	  18: MOVL       	$0x9, t14
+	  19: CMPUL       	t10, t14, t12  (-rSo)
+	  20: ICRFL       	t12, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0x2547F1C0:  419D0020  bc 12,29,0x2547F1E0
+	  22: Js29o       	$0x2547F1E0
+
+
+. 0 2547F1AC 24
+. 89 63 00 01 39 29 FF D0 39 43 00 01 38 0B FF D0 2B 80 00 09 41 9D 00 20
+
+==== BB 114 (0x2547F1E0) approx BBs exec'd 0 ====
+
+	0x2547F1E0:  2C8B002E  cmpi cr1,r11,46
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x2E, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x2547F1E4:  54FB402E  rlwinm r27,r7,8,0,23
+	   5: GETL       	R7, t6
+	   6: SHLL       	$0x8, t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0x2547F1E8:  7F674B78  or r7,r27,r9
+	   9: GETL       	R27, t8
+	  10: GETL       	R9, t10
+	  11: ORL       	t10, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x2547F1EC:  39080001  addi r8,r8,1
+	  14: GETL       	R8, t12
+	  15: ADDL       	$0x1, t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0x2547F1F0:  386A0001  addi r3,r10,1
+	  18: GETL       	R10, t14
+	  19: ADDL       	$0x1, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0x2547F1F4:  40860014  bc 4,6,0x2547F208
+	  22: Jc06o       	$0x2547F208
+
+
+. 0 2547F1E0 24
+. 2C 8B 00 2E 54 FB 40 2E 7F 67 4B 78 39 08 00 01 38 6A 00 01 40 86 00 14
+
+==== BB 115 (0x2547F1F8) approx BBs exec'd 0 ====
+
+	0x2547F1F8:  892A0001  lbz r9,1(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F1FC:  3B89FFD0  addi r28,r9,-48
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFD0, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x2547F200:  281C0009  cmpli cr0,r28,9
+	   9: GETL       	R28, t6
+	  10: MOVL       	$0x9, t10
+	  11: CMPUL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x2547F204:  4081FFA8  bc 4,1,0x2547F1AC
+	  14: Jc01o       	$0x2547F1AC
+
+
+. 0 2547F1F8 16
+. 89 2A 00 01 3B 89 FF D0 28 1C 00 09 40 81 FF A8
+
+==== BB 116 (0x2547F208) approx BBs exec'd 0 ====
+
+	0x2547F208:  2F080002  cmpi cr6,r8,2
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x2547F20C:  41990010  bc 12,25,0x2547F21C
+	   5: Js25o       	$0x2547F21C
+
+
+. 0 2547F208 8
+. 2F 08 00 02 41 99 00 10
+
+==== BB 117 (0x2547F21C) approx BBs exec'd 0 ====
+
+	0x2547F21C:  3CA00002  lis r5,2
+	   0: MOVL       	$0x20000, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547F220:  60AB0204  ori r11,r5,0x204
+	   3: MOVL       	$0x20204, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x2547F224:  7F875840  cmpl cr7,r7,r11
+	   6: GETL       	R7, t4
+	   7: GETL       	R11, t6
+	   8: CMPUL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x2547F228:  409D03B4  bc 4,29,0x2547F5DC
+	  11: Jc29o       	$0x2547F5DC
+
+
+. 0 2547F21C 16
+. 3C A0 00 02 60 AB 02 04 7F 87 58 40 40 9D 03 B4
+
+==== BB 118 (0x2547F22C) approx BBs exec'd 0 ====
+
+	0x2547F22C:  90FA0008  stw r7,8(r26)
+	   0: GETL       	R7, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547F230:  3880006E  li r4,110
+	   5: MOVL       	$0x6E, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547F234:  48003BCD  bl 0x25482E00
+	   8: MOVL       	$0x2547F238, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25482E00  ($4)
+
+
+. 0 2547F22C 12
+. 90 FA 00 08 38 80 00 6E 48 00 3B CD
+
+==== BB 119 strchr(0x25482E00) approx BBs exec'd 0 ====
+
+	0x25482E00:  5084442E  rlwimi r4,r4,8,16,23
+	   0: GETL       	R4, t0
+	   1: GETL       	R4, t2
+	   2: ROLL       	$0x8, t2
+	   3: ANDL       	$0xFF00, t2
+	   4: ANDL       	$0xFFFF00FF, t0
+	   5: ORL       	t0, t2
+	   6: PUTL       	t2, R4
+	   7: INCEIPL       	$4
+
+	0x25482E04:  3960FFFF  li r11,-1
+	   8: MOVL       	$0xFFFFFFFF, t4
+	   9: PUTL       	t4, R11
+	  10: INCEIPL       	$4
+
+	0x25482E08:  5084801E  rlwimi r4,r4,16,0,15
+	  11: GETL       	R4, t6
+	  12: GETL       	R4, t8
+	  13: ROLL       	$0x10, t8
+	  14: ANDL       	$0xFFFF0000, t8
+	  15: ANDL       	$0xFFFF, t6
+	  16: ORL       	t6, t8
+	  17: PUTL       	t8, R4
+	  18: INCEIPL       	$4
+
+	0x25482E0C:  546A1EF8  rlwinm r10,r3,3,27,28
+	  19: GETL       	R3, t10
+	  20: ROLL       	$0x3, t10
+	  21: ANDL       	$0x18, t10
+	  22: PUTL       	t10, R10
+	  23: INCEIPL       	$4
+
+	0x25482E10:  3CC0FEFF  lis r6,-257
+	  24: MOVL       	$0xFEFF0000, t12
+	  25: PUTL       	t12, R6
+	  26: INCEIPL       	$4
+
+	0x25482E14:  3CE07F7F  lis r7,32639
+	  27: MOVL       	$0x7F7F0000, t14
+	  28: PUTL       	t14, R7
+	  29: INCEIPL       	$4
+
+	0x25482E18:  5468003A  rlwinm r8,r3,0,0,29
+	  30: GETL       	R3, t16
+	  31: ANDL       	$0xFFFFFFFC, t16
+	  32: PUTL       	t16, R8
+	  33: INCEIPL       	$4
+
+	0x25482E1C:  38C6FEFF  addi r6,r6,-257
+	  34: MOVL       	$0xFEFEFEFF, t18
+	  35: PUTL       	t18, R6
+	  36: INCEIPL       	$4
+
+	0x25482E20:  38E77F7F  addi r7,r7,32639
+	  37: MOVL       	$0x7F7F7F7F, t20
+	  38: PUTL       	t20, R7
+	  39: INCEIPL       	$4
+
+	0x25482E24:  80A80000  lwz r5,0(r8)
+	  40: GETL       	R8, t22
+	  41: LDL       	(t22), t24
+	  42: PUTL       	t24, R5
+	  43: INCEIPL       	$4
+
+	0x25482E28:  7D6B5430  srw r11,r11,r10
+	  44: GETL       	R11, t28
+	  45: GETL       	R10, t26
+	  46: SHRL       	t26, t28
+	  47: PUTL       	t28, R11
+	  48: INCEIPL       	$4
+
+	0x25482E2C:  7CA55B38  orc r5,r5,r11
+	  49: GETL       	R5, t30
+	  50: GETL       	R11, t32
+	  51: NOTL       	t32
+	  52: ORL       	t30, t32
+	  53: PUTL       	t32, R5
+	  54: INCEIPL       	$4
+
+	0x25482E30:  7C062A14  add r0,r6,r5
+	  55: GETL       	R6, t34
+	  56: GETL       	R5, t36
+	  57: ADDL       	t34, t36
+	  58: PUTL       	t36, R0
+	  59: INCEIPL       	$4
+
+	0x25482E34:  7CE928F8  nor r9,r7,r5
+	  60: GETL       	R7, t38
+	  61: GETL       	R5, t40
+	  62: ORL       	t40, t38
+	  63: NOTL       	t38
+	  64: PUTL       	t38, R9
+	  65: INCEIPL       	$4
+
+	0x25482E38:  7C004839  and. r0,r0,r9
+	  66: GETL       	R0, t42
+	  67: GETL       	R9, t44
+	  68: ANDL       	t42, t44
+	  69: PUTL       	t44, R0
+	  70: CMP0L       	t44, t46  (-rSo)
+	  71: ICRFL       	t46, $0x0, CR
+	  72: INCEIPL       	$4
+
+	0x25482E3C:  7C8C2A78  xor r12,r4,r5
+	  73: GETL       	R4, t48
+	  74: GETL       	R5, t50
+	  75: XORL       	t48, t50
+	  76: PUTL       	t50, R12
+	  77: INCEIPL       	$4
+
+	0x25482E40:  7D8C5B38  orc r12,r12,r11
+	  78: GETL       	R12, t52
+	  79: GETL       	R11, t54
+	  80: NOTL       	t54
+	  81: ORL       	t52, t54
+	  82: PUTL       	t54, R12
+	  83: INCEIPL       	$4
+
+	0x25482E44:  48000020  b 0x25482E64
+	  84: JMPo       	$0x25482E64  ($4)
+
+
+. 0 25482E00 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+
+==== BB 120 (0x25482E64) approx BBs exec'd 0 ====
+
+	0x25482E64:  7C066214  add r0,r6,r12
+	   0: GETL       	R6, t0
+	   1: GETL       	R12, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482E68:  7CE960F8  nor r9,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0x25482E6C:  4182FFDC  bc 12,2,0x25482E48
+	  11: Js02o       	$0x25482E48
+
+
+. 0 25482E64 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+==== BB 121 (0x25482E48) approx BBs exec'd 0 ====
+
+	0x25482E48:  84A80004  lwzu r5,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R8
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482E4C:  7C004839  and. r0,r0,r9
+	   6: GETL       	R0, t4
+	   7: GETL       	R9, t6
+	   8: ANDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25482E50:  7C062A14  add r0,r6,r5
+	  13: GETL       	R6, t10
+	  14: GETL       	R5, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0x25482E54:  7CE928F8  nor r9,r7,r5
+	  18: GETL       	R7, t14
+	  19: GETL       	R5, t16
+	  20: ORL       	t16, t14
+	  21: NOTL       	t14
+	  22: PUTL       	t14, R9
+	  23: INCEIPL       	$4
+
+	0x25482E58:  4082005C  bc 4,2,0x25482EB4
+	  24: Jc02o       	$0x25482EB4
+
+
+. 0 25482E48 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+
+==== BB 122 (0x25482E5C) approx BBs exec'd 0 ====
+
+	0x25482E5C:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x25482E60:  7C8C2A78  xor r12,r4,r5
+	   7: GETL       	R4, t6
+	   8: GETL       	R5, t8
+	   9: XORL       	t6, t8
+	  10: PUTL       	t8, R12
+	  11: INCEIPL       	$4
+
+	0x25482E64:  7C066214  add r0,r6,r12
+	  12: GETL       	R6, t10
+	  13: GETL       	R12, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0x25482E68:  7CE960F8  nor r9,r7,r12
+	  17: GETL       	R7, t14
+	  18: GETL       	R12, t16
+	  19: ORL       	t16, t14
+	  20: NOTL       	t14
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x25482E6C:  4182FFDC  bc 12,2,0x25482E48
+	  23: Js02o       	$0x25482E48
+
+
+. 0 25482E5C 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+==== BB 123 (0x25482E70) approx BBs exec'd 0 ====
+
+	0x25482E70:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x25482E74:  38600000  li r3,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25482E78:  4D820020  bclr 12,2
+	  10: GETL       	LR, t8
+	  11: Js02o-r       	t8
+
+
+. 0 25482E70 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+
+==== BB 124 (0x2547F238) approx BBs exec'd 0 ====
+
+	0x2547F238:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547F23C:  418602F4  bc 12,6,0x2547F530
+	   4: Js06o       	$0x2547F530
+
+
+. 0 2547F238 8
+. 2C 83 00 00 41 86 02 F4
+
+==== BB 125 (0x2547F530) approx BBs exec'd 0 ====
+
+	0x2547F530:  813A0008  lwz r9,8(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F534:  3C000002  lis r0,2
+	   5: MOVL       	$0x20000, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547F538:  60030544  ori r3,r0,0x544
+	   8: MOVL       	$0x20544, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x2547F53C:  7C891840  cmpl cr1,r9,r3
+	  11: GETL       	R9, t8
+	  12: GETL       	R3, t10
+	  13: CMPUL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x2547F540:  41A5FD24  bc 13,5,0x2547F264
+	  16: Js05o       	$0x2547F264
+
+
+. 0 2547F530 20
+. 81 3A 00 08 3C 00 00 02 60 03 05 44 7C 89 18 40 41 A5 FD 24
+
+==== BB 126 (0x2547F264) approx BBs exec'd 0 ====
+
+	0x2547F264:  2F1FFFFF  cmpi cr6,r31,-1
+	   0: GETL       	R31, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x2547F268:  83BE04B4  lwz r29,1204(r30)
+	   5: GETL       	R30, t6
+	   6: ADDL       	$0x4B4, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R29
+	   9: INCEIPL       	$4
+
+	0x2547F26C:  419A0030  bc 12,26,0x2547F29C
+	  10: Js26o       	$0x2547F29C
+
+
+. 0 2547F264 12
+. 2F 1F FF FF 83 BE 04 B4 41 9A 00 30
+
+==== BB 127 (0x2547F29C) approx BBs exec'd 0 ====
+
+	0x2547F29C:  831A0004  lwz r24,4(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x2547F2A0:  2F980000  cmpi cr7,r24,0
+	   5: GETL       	R24, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547F2A4:  409E000C  bc 4,30,0x2547F2B0
+	   9: Jc30o       	$0x2547F2B0
+
+
+. 0 2547F29C 12
+. 83 1A 00 04 2F 98 00 00 40 9E 00 0C
+
+==== BB 128 (0x2547F2B0) approx BBs exec'd 0 ====
+
+	0x2547F2B0:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547F2B4:  839E0500  lwz r28,1280(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x500, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547F2B8:  48001119  bl 0x254803D0
+	   8: MOVL       	$0x2547F2BC, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x254803D0  ($4)
+
+
+. 0 2547F2B0 12
+. 38 60 00 00 83 9E 05 00 48 00 11 19
+
+==== BB 129 brk(0x254803D0) approx BBs exec'd 0 ====
+
+	0x254803D0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254803D4:  90610008  stw r3,8(r1)
+	   6: GETL       	R3, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x8, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x254803D8:  3800002D  li r0,45
+	  11: MOVL       	$0x2D, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x254803DC:  44000002  sc
+	  14: JMPo-sys       	$0x254803E0  ($4)
+
+
+. 0 254803D0 16
+. 94 21 FF F0 90 61 00 08 38 00 00 2D 44 00 00 02
+
+==== BB 130 (0x254803E0) approx BBs exec'd 0 ====
+
+	0x254803E0:  80C10008  lwz r6,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254803E4:  7C8802A6  mflr r4
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254803E8:  48016C19  bl 0x25497000
+	   8: MOVL       	$0x254803EC, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254803E0 12
+. 80 C1 00 08 7C 88 02 A6 48 01 6C 19
+
+==== BB 131 (0x254803EC) approx BBs exec'd 0 ====
+
+	0x254803EC:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x254803F0:  80A50504  lwz r5,1284(r5)
+	   3: GETL       	R5, t2
+	   4: ADDL       	$0x504, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x254803F4:  7C8803A6  mtlr r4
+	   8: GETL       	R4, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x254803F8:  90650000  stw r3,0(r5)
+	  11: GETL       	R3, t8
+	  12: GETL       	R5, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254803FC:  7C061840  cmpl cr0,r6,r3
+	  15: GETL       	R6, t12
+	  16: GETL       	R3, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x25480400:  38210010  addi r1,r1,16
+	  20: GETL       	R1, t18
+	  21: ADDL       	$0x10, t18
+	  22: PUTL       	t18, R1
+	  23: INCEIPL       	$4
+
+	0x25480404:  38600000  li r3,0
+	  24: MOVL       	$0x0, t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0x25480408:  4CA10020  bclr 5,1
+	  27: GETL       	LR, t22
+	  28: Jc01o-r       	t22
+
+
+. 0 254803EC 32
+. 7C A8 02 A6 80 A5 05 04 7C 88 03 A6 90 65 00 00 7C 06 18 40 38 21 00 10 38 60 00 00 4C A1 00 20
+
+==== BB 132 (0x2547F2BC) approx BBs exec'd 0 ====
+
+	0x2547F2BC:  813E0504  lwz r9,1284(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x504, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547F2C0:  809A0004  lwz r4,4(r26)
+	   5: GETL       	R26, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547F2C4:  81690000  lwz r11,0(r9)
+	  10: GETL       	R9, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x2547F2C8:  7F2400D0  neg r25,r4
+	  14: GETL       	R4, t12
+	  15: NEGL       	t12
+	  16: PUTL       	t12, R25
+	  17: INCEIPL       	$4
+
+	0x2547F2CC:  7C8BE040  cmpl cr1,r11,r28
+	  18: GETL       	R11, t14
+	  19: GETL       	R28, t16
+	  20: CMPUL       	t14, t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x2547F2D0:  7F645A14  add r27,r4,r11
+	  23: GETL       	R4, t20
+	  24: GETL       	R11, t22
+	  25: ADDL       	t20, t22
+	  26: PUTL       	t22, R27
+	  27: INCEIPL       	$4
+
+	0x2547F2D4:  399BFFFF  addi r12,r27,-1
+	  28: GETL       	R27, t24
+	  29: ADDL       	$0xFFFFFFFF, t24
+	  30: PUTL       	t24, R12
+	  31: INCEIPL       	$4
+
+	0x2547F2D8:  7D83C838  and r3,r12,r25
+	  32: GETL       	R12, t26
+	  33: GETL       	R25, t28
+	  34: ANDL       	t26, t28
+	  35: PUTL       	t28, R3
+	  36: INCEIPL       	$4
+
+	0x2547F2DC:  408402D8  bc 4,4,0x2547F5B4
+	  37: Jc04o       	$0x2547F5B4
+
+
+. 0 2547F2BC 36
+. 81 3E 05 04 80 9A 00 04 81 69 00 00 7F 24 00 D0 7C 8B E0 40 7F 64 5A 14 39 9B FF FF 7D 83 C8 38 40 84 02 D8
+
+==== BB 133 (0x2547F2E0) approx BBs exec'd 0 ====
+
+	0x2547F2E0:  807A000C  lwz r3,12(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547F2E4:  2C030000  cmpi cr0,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547F2E8:  4182000C  bc 12,2,0x2547F2F4
+	   9: Js02o       	$0x2547F2F4
+
+
+. 0 2547F2E0 12
+. 80 7A 00 0C 2C 03 00 00 41 82 00 0C
+
+==== BB 134 (0x2547F2F4) approx BBs exec'd 0 ====
+
+	0x2547F2F4:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547F2F8:  4800111D  bl 0x25480414
+	   3: MOVL       	$0x2547F2FC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25480414  ($4)
+
+
+. 0 2547F2F4 8
+. 38 60 00 00 48 00 11 1D
+
+==== BB 135 sbrk(0x25480414) approx BBs exec'd 0 ====
+
+	0x25480414:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25480418:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2548041C:  48016BE5  bl 0x25497000
+	   9: MOVL       	$0x25480420, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25480414 12
+. 94 21 FF E0 7C 88 02 A6 48 01 6B E5
+
+==== BB 136 (0x25480420) approx BBs exec'd 0 ====
+
+	0x25480420:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25480424:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25480428:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2548042C:  90810024  stw r4,36(r1)
+	  13: GETL       	R4, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25480430:  93A10014  stw r29,20(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25480434:  7C7D1B78  or r29,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25480438:  83FE0504  lwz r31,1284(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x504, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0x2548043C:  38600000  li r3,0
+	  31: MOVL       	$0x0, t24
+	  32: PUTL       	t24, R3
+	  33: INCEIPL       	$4
+
+	0x25480440:  801F0000  lwz r0,0(r31)
+	  34: GETL       	R31, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R0
+	  37: INCEIPL       	$4
+
+	0x25480444:  2F800000  cmpi cr7,r0,0
+	  38: GETL       	R0, t30
+	  39: CMP0L       	t30, t32  (-rSo)
+	  40: ICRFL       	t32, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0x25480448:  419E003C  bc 12,30,0x25480484
+	  42: Js30o       	$0x25480484
+
+
+. 0 25480420 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 81 00 24 93 A1 00 14 7C 7D 1B 78 83 FE 05 04 38 60 00 00 80 1F 00 00 2F 80 00 00 41 9E 00 3C
+
+==== BB 137 (0x2548044C) approx BBs exec'd 0 ====
+
+	0x2548044C:  813E049C  lwz r9,1180(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x49C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25480450:  80A90000  lwz r5,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R5
+	   8: INCEIPL       	$4
+
+	0x25480454:  2C050000  cmpi cr0,r5,0
+	   9: GETL       	R5, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25480458:  4082002C  bc 4,2,0x25480484
+	  13: Jc02o       	$0x25480484
+
+
+. 0 2548044C 16
+. 81 3E 04 9C 80 A9 00 00 2C 05 00 00 40 82 00 2C
+
+==== BB 138 (0x2548045C) approx BBs exec'd 0 ====
+
+	0x2548045C:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25480460:  409A003C  bc 4,26,0x2548049C
+	   4: Jc26o       	$0x2548049C
+
+
+. 0 2548045C 8
+. 2F 1D 00 00 40 9A 00 3C
+
+==== BB 139 (0x25480464) approx BBs exec'd 0 ====
+
+	0x25480464:  807F0000  lwz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25480468:  80C10024  lwz r6,36(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x2548046C:  83A10014  lwz r29,20(r1)
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0x25480470:  83C10018  lwz r30,24(r1)
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x18, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R30
+	  18: INCEIPL       	$4
+
+	0x25480474:  7CC803A6  mtlr r6
+	  19: GETL       	R6, t16
+	  20: PUTL       	t16, LR
+	  21: INCEIPL       	$4
+
+	0x25480478:  83E1001C  lwz r31,28(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R31
+	  26: INCEIPL       	$4
+
+	0x2548047C:  38210020  addi r1,r1,32
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x20, t22
+	  29: PUTL       	t22, R1
+	  30: INCEIPL       	$4
+
+	0x25480480:  4E800020  blr
+	  31: GETL       	LR, t24
+	  32: JMPo-r       	t24  ($4)
+
+
+. 0 25480464 32
+. 80 7F 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 140 (0x2547F2FC) approx BBs exec'd 0 ====
+
+	0x2547F2FC:  7F83E000  cmp cr7,r3,r28
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2547F300:  419E02C4  bc 12,30,0x2547F5C4
+	   5: Js30o       	$0x2547F5C4
+
+
+. 0 2547F2FC 8
+. 7F 83 E0 00 41 9E 02 C4
+
+==== BB 141 (0x2547F304) approx BBs exec'd 0 ====
+
+	0x2547F304:  80BD0000  lwz r5,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x2547F308:  2C850000  cmpi cr1,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547F30C:  408602A0  bc 4,6,0x2547F5AC
+	   8: Jc06o       	$0x2547F5AC
+
+
+. 0 2547F304 12
+. 80 BD 00 00 2C 85 00 00 40 86 02 A0
+
+==== BB 142 (0x2547F310) approx BBs exec'd 0 ====
+
+	0x2547F310:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547F314:  7EE803A6  mtlr r23
+	   3: GETL       	R23, t2
+	   4: PUTL       	t2, LR
+	   5: INCEIPL       	$4
+
+	0x2547F318:  7EA4AB78  or r4,r21,r21
+	   6: GETL       	R21, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547F31C:  38A101E0  addi r5,r1,480
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x1E0, t6
+	  11: PUTL       	t6, R5
+	  12: INCEIPL       	$4
+
+	0x2547F320:  4E800021  blrl
+	  13: GETL       	LR, t8
+	  14: MOVL       	$0x2547F324, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-r       	t8  ($4)
+
+
+. 0 2547F310 20
+. 7E C3 B3 78 7E E8 03 A6 7E A4 AB 78 38 A1 01 E0 4E 80 00 21
+
+==== BB 143 dl_main(0x254721F0) approx BBs exec'd 0 ====
+
+	0x254721F0:  9421FF30  stwu r1,-208(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF30, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254721F4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254721F8:  48024E09  bl 0x25497000
+	   9: MOVL       	$0x254721FC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254721F0 12
+. 94 21 FF 30 7C 08 02 A6 48 02 4E 09
+
+==== BB 144 (0x254721FC) approx BBs exec'd 0 ====
+
+	0x254721FC:  93C100C8  stw r30,200(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xC8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472200:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25472204:  92010090  stw r16,144(r1)
+	   8: GETL       	R16, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x90, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25472208:  900100D4  stw r0,212(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xD4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547220C:  7D600026  mfcr r11
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x25472210:  91C10088  stw r14,136(r1)
+	  21: GETL       	R14, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x88, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25472214:  821E04B4  lwz r16,1204(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4B4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R16
+	  30: INCEIPL       	$4
+
+	0x25472218:  819E04B8  lwz r12,1208(r30)
+	  31: GETL       	R30, t24
+	  32: ADDL       	$0x4B8, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R12
+	  35: INCEIPL       	$4
+
+	0x2547221C:  81500000  lwz r10,0(r16)
+	  36: GETL       	R16, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R10
+	  39: INCEIPL       	$4
+
+	0x25472220:  81DE04C8  lwz r14,1224(r30)
+	  40: GETL       	R30, t32
+	  41: ADDL       	$0x4C8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R14
+	  44: INCEIPL       	$4
+
+	0x25472224:  2F8A0000  cmpi cr7,r10,0
+	  45: GETL       	R10, t36
+	  46: CMP0L       	t36, t38  (-rSo)
+	  47: ICRFL       	t38, $0x7, CR
+	  48: INCEIPL       	$4
+
+	0x25472228:  813E0020  lwz r9,32(r30)
+	  49: GETL       	R30, t40
+	  50: ADDL       	$0x20, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R9
+	  53: INCEIPL       	$4
+
+	0x2547222C:  91610084  stw r11,132(r1)
+	  54: GETL       	R11, t44
+	  55: GETL       	R1, t46
+	  56: ADDL       	$0x84, t46
+	  57: STL       	t44, (t46)
+	  58: INCEIPL       	$4
+
+	0x25472230:  811E04D8  lwz r8,1240(r30)
+	  59: GETL       	R30, t48
+	  60: ADDL       	$0x4D8, t48
+	  61: LDL       	(t48), t50
+	  62: PUTL       	t50, R8
+	  63: INCEIPL       	$4
+
+	0x25472234:  816C0000  lwz r11,0(r12)
+	  64: GETL       	R12, t52
+	  65: LDL       	(t52), t54
+	  66: PUTL       	t54, R11
+	  67: INCEIPL       	$4
+
+	0x25472238:  80FE04B0  lwz r7,1200(r30)
+	  68: GETL       	R30, t56
+	  69: ADDL       	$0x4B0, t56
+	  70: LDL       	(t56), t58
+	  71: PUTL       	t58, R7
+	  72: INCEIPL       	$4
+
+	0x2547223C:  80DE001C  lwz r6,28(r30)
+	  73: GETL       	R30, t60
+	  74: ADDL       	$0x1C, t60
+	  75: LDL       	(t60), t62
+	  76: PUTL       	t62, R6
+	  77: INCEIPL       	$4
+
+	0x25472240:  801E04A8  lwz r0,1192(r30)
+	  78: GETL       	R30, t64
+	  79: ADDL       	$0x4A8, t64
+	  80: LDL       	(t64), t66
+	  81: PUTL       	t66, R0
+	  82: INCEIPL       	$4
+
+	0x25472244:  93E100CC  stw r31,204(r1)
+	  83: GETL       	R31, t68
+	  84: GETL       	R1, t70
+	  85: ADDL       	$0xCC, t70
+	  86: STL       	t68, (t70)
+	  87: INCEIPL       	$4
+
+	0x25472248:  7C3F0B78  or r31,r1,r1
+	  88: GETL       	R1, t72
+	  89: PUTL       	t72, R31
+	  90: INCEIPL       	$4
+
+	0x2547224C:  91E1008C  stw r15,140(r1)
+	  91: GETL       	R15, t74
+	  92: GETL       	R1, t76
+	  93: ADDL       	$0x8C, t76
+	  94: STL       	t74, (t76)
+	  95: INCEIPL       	$4
+
+	0x25472250:  92410098  stw r18,152(r1)
+	  96: GETL       	R18, t78
+	  97: GETL       	R1, t80
+	  98: ADDL       	$0x98, t80
+	  99: STL       	t78, (t80)
+	 100: INCEIPL       	$4
+
+	0x25472254:  3A400000  li r18,0
+	 101: MOVL       	$0x0, t82
+	 102: PUTL       	t82, R18
+	 103: INCEIPL       	$4
+
+	0x25472258:  9261009C  stw r19,156(r1)
+	 104: GETL       	R19, t84
+	 105: GETL       	R1, t86
+	 106: ADDL       	$0x9C, t86
+	 107: STL       	t84, (t86)
+	 108: INCEIPL       	$4
+
+	0x2547225C:  3A600000  li r19,0
+	 109: MOVL       	$0x0, t88
+	 110: PUTL       	t88, R19
+	 111: INCEIPL       	$4
+
+	0x25472260:  928100A0  stw r20,160(r1)
+	 112: GETL       	R20, t90
+	 113: GETL       	R1, t92
+	 114: ADDL       	$0xA0, t92
+	 115: STL       	t90, (t92)
+	 116: INCEIPL       	$4
+
+	0x25472264:  3A800000  li r20,0
+	 117: MOVL       	$0x0, t94
+	 118: PUTL       	t94, R20
+	 119: INCEIPL       	$4
+
+	0x25472268:  92A100A4  stw r21,164(r1)
+	 120: GETL       	R21, t96
+	 121: GETL       	R1, t98
+	 122: ADDL       	$0xA4, t98
+	 123: STL       	t96, (t98)
+	 124: INCEIPL       	$4
+
+	0x2547226C:  3AA00000  li r21,0
+	 125: MOVL       	$0x0, t100
+	 126: PUTL       	t100, R21
+	 127: INCEIPL       	$4
+
+	0x25472270:  92C100A8  stw r22,168(r1)
+	 128: GETL       	R22, t102
+	 129: GETL       	R1, t104
+	 130: ADDL       	$0xA8, t104
+	 131: STL       	t102, (t104)
+	 132: INCEIPL       	$4
+
+	0x25472274:  7CB62B78  or r22,r5,r5
+	 133: GETL       	R5, t106
+	 134: PUTL       	t106, R22
+	 135: INCEIPL       	$4
+
+	0x25472278:  92E100AC  stw r23,172(r1)
+	 136: GETL       	R23, t108
+	 137: GETL       	R1, t110
+	 138: ADDL       	$0xAC, t110
+	 139: STL       	t108, (t110)
+	 140: INCEIPL       	$4
+
+	0x2547227C:  7C972378  or r23,r4,r4
+	 141: GETL       	R4, t112
+	 142: PUTL       	t112, R23
+	 143: INCEIPL       	$4
+
+	0x25472280:  930100B0  stw r24,176(r1)
+	 144: GETL       	R24, t114
+	 145: GETL       	R1, t116
+	 146: ADDL       	$0xB0, t116
+	 147: STL       	t114, (t116)
+	 148: INCEIPL       	$4
+
+	0x25472284:  7C781B78  or r24,r3,r3
+	 149: GETL       	R3, t118
+	 150: PUTL       	t118, R24
+	 151: INCEIPL       	$4
+
+	0x25472288:  932100B4  stw r25,180(r1)
+	 152: GETL       	R25, t120
+	 153: GETL       	R1, t122
+	 154: ADDL       	$0xB4, t122
+	 155: STL       	t120, (t122)
+	 156: INCEIPL       	$4
+
+	0x2547228C:  3B200000  li r25,0
+	 157: MOVL       	$0x0, t124
+	 158: PUTL       	t124, R25
+	 159: INCEIPL       	$4
+
+	0x25472290:  912E03FC  stw r9,1020(r14)
+	 160: GETL       	R9, t126
+	 161: GETL       	R14, t128
+	 162: ADDL       	$0x3FC, t128
+	 163: STL       	t126, (t128)
+	 164: INCEIPL       	$4
+
+	0x25472294:  92210094  stw r17,148(r1)
+	 165: GETL       	R17, t130
+	 166: GETL       	R1, t132
+	 167: ADDL       	$0x94, t132
+	 168: STL       	t130, (t132)
+	 169: INCEIPL       	$4
+
+	0x25472298:  934100B8  stw r26,184(r1)
+	 170: GETL       	R26, t134
+	 171: GETL       	R1, t136
+	 172: ADDL       	$0xB8, t136
+	 173: STL       	t134, (t136)
+	 174: INCEIPL       	$4
+
+	0x2547229C:  936100BC  stw r27,188(r1)
+	 175: GETL       	R27, t138
+	 176: GETL       	R1, t140
+	 177: ADDL       	$0xBC, t140
+	 178: STL       	t138, (t140)
+	 179: INCEIPL       	$4
+
+	0x254722A0:  938100C0  stw r28,192(r1)
+	 180: GETL       	R28, t142
+	 181: GETL       	R1, t144
+	 182: ADDL       	$0xC0, t144
+	 183: STL       	t142, (t144)
+	 184: INCEIPL       	$4
+
+	0x254722A4:  93A100C4  stw r29,196(r1)
+	 185: GETL       	R29, t146
+	 186: GETL       	R1, t148
+	 187: ADDL       	$0xC4, t148
+	 188: STL       	t146, (t148)
+	 189: INCEIPL       	$4
+
+	0x254722A8:  910E01B4  stw r8,436(r14)
+	 190: GETL       	R8, t150
+	 191: GETL       	R14, t152
+	 192: ADDL       	$0x1B4, t152
+	 193: STL       	t150, (t152)
+	 194: INCEIPL       	$4
+
+	0x254722AC:  90EE042C  stw r7,1068(r14)
+	 195: GETL       	R7, t154
+	 196: GETL       	R14, t156
+	 197: ADDL       	$0x42C, t156
+	 198: STL       	t154, (t156)
+	 199: INCEIPL       	$4
+
+	0x254722B0:  90CE03F8  stw r6,1016(r14)
+	 200: GETL       	R6, t158
+	 201: GETL       	R14, t160
+	 202: ADDL       	$0x3F8, t160
+	 203: STL       	t158, (t160)
+	 204: INCEIPL       	$4
+
+	0x254722B4:  900E0404  stw r0,1028(r14)
+	 205: GETL       	R0, t162
+	 206: GETL       	R14, t164
+	 207: ADDL       	$0x404, t164
+	 208: STL       	t162, (t164)
+	 209: INCEIPL       	$4
+
+	0x254722B8:  917F0038  stw r11,56(r31)
+	 210: GETL       	R11, t166
+	 211: GETL       	R31, t168
+	 212: ADDL       	$0x38, t168
+	 213: STL       	t166, (t168)
+	 214: INCEIPL       	$4
+
+	0x254722BC:  81FE04F4  lwz r15,1268(r30)
+	 215: GETL       	R30, t170
+	 216: ADDL       	$0x4F4, t170
+	 217: LDL       	(t170), t172
+	 218: PUTL       	t172, R15
+	 219: INCEIPL       	$4
+
+	0x254722C0:  813E007C  lwz r9,124(r30)
+	 220: GETL       	R30, t174
+	 221: ADDL       	$0x7C, t174
+	 222: LDL       	(t174), t176
+	 223: PUTL       	t176, R9
+	 224: INCEIPL       	$4
+
+	0x254722C4:  419E0008  bc 12,30,0x254722CC
+	 225: Js30o       	$0x254722CC
+
+
+. 0 254721FC 204
+. 93 C1 00 C8 7F C8 02 A6 92 01 00 90 90 01 00 D4 7D 60 00 26 91 C1 00 88 82 1E 04 B4 81 9E 04 B8 81 50 00 00 81 DE 04 C8 2F 8A 00 00 81 3E 00 20 91 61 00 84 81 1E 04 D8 81 6C 00 00 80 FE 04 B0 80 DE 00 1C 80 1E 04 A8 93 E1 00 CC 7C 3F 0B 78 91 E1 00 8C 92 41 00 98 3A 40 00 00 92 61 00 9C 3A 60 00 00 92 81 00 A0 3A 80 00 00 92 A1 00 A4 3A A0 00 00 92 C1 00 A8 7C B6 2B 78 92 E1 00 AC 7C 97 23 78 93 01 00 B0 7C 78 1B 78 93 21 00 B4 3B 20 00 00 91 2E 03 FC 92 21 00 94 93 41 00 B8 93 61 00 BC 93 81 00 C0 93 A1 00 C4 91 0E 01 B4 90 EE 04 2C 90 CE 03 F8 90 0E 04 04 91 7F 00 38 81 FE 04 F4 81 3E 00 7C 41 9E 00 08
+
+==== BB 145 (0x254722CC) approx BBs exec'd 0 ====
+
+	0x254722CC:  823E0040  lwz r17,64(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x40, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0x254722D0:  912F0054  stw r9,84(r15)
+	   5: GETL       	R9, t4
+	   6: GETL       	R15, t6
+	   7: ADDL       	$0x54, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x254722D4:  387F0038  addi r3,r31,56
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x38, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x254722D8:  4800D951  bl 0x2547FC28
+	  14: MOVL       	$0x254722DC, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x2547FC28  ($4)
+
+
+. 0 254722CC 16
+. 82 3E 00 40 91 2F 00 54 38 7F 00 38 48 00 D9 51
+
+==== BB 146 _dl_next_ld_env_entry(0x2547FC28) approx BBs exec'd 0 ====
+
+	0x2547FC28:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547FC2C:  39400000  li r10,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547FC30:  81630000  lwz r11,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x2547FC34:  812B0000  lwz r9,0(r11)
+	  13: GETL       	R11, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0x2547FC38:  2F890000  cmpi cr7,r9,0
+	  17: GETL       	R9, t14
+	  18: CMP0L       	t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x2547FC3C:  419E001C  bc 12,30,0x2547FC58
+	  21: Js30o       	$0x2547FC58
+
+
+. 0 2547FC28 24
+. 94 21 FF F0 39 40 00 00 81 63 00 00 81 2B 00 00 2F 89 00 00 41 9E 00 1C
+
+==== BB 147 (0x2547FC40) approx BBs exec'd 0 ====
+
+	0x2547FC40:  88090000  lbz r0,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547FC44:  2C00004C  cmpi cr0,r0,76
+	   4: GETL       	R0, t4
+	   5: MOVL       	$0x4C, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547FC48:  4182001C  bc 12,2,0x2547FC64
+	   9: Js02o       	$0x2547FC64
+
+
+. 0 2547FC40 12
+. 88 09 00 00 2C 00 00 4C 41 82 00 1C
+
+==== BB 148 (0x2547FC4C) approx BBs exec'd 0 ====
+
+	0x2547FC4C:  852B0004  lwzu r9,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R11
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x2547FC50:  2F890000  cmpi cr7,r9,0
+	   6: GETL       	R9, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x2547FC54:  409EFFEC  bc 4,30,0x2547FC40
+	  10: Jc30o       	$0x2547FC40
+
+
+. 0 2547FC4C 12
+. 85 2B 00 04 2F 89 00 00 40 9E FF EC
+
+==== BB 149 (0x2547FC64) approx BBs exec'd 0 ====
+
+	0x2547FC64:  88890001  lbz r4,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547FC68:  2C840044  cmpi cr1,r4,68
+	   5: GETL       	R4, t4
+	   6: MOVL       	$0x44, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x2547FC6C:  4086FFE0  bc 4,6,0x2547FC4C
+	  10: Jc06o       	$0x2547FC4C
+
+
+. 0 2547FC64 12
+. 88 89 00 01 2C 84 00 44 40 86 FF E0
+
+==== BB 150 (0x2547FC70) approx BBs exec'd 0 ====
+
+	0x2547FC70:  88A90002  lbz r5,2(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x2, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547FC74:  2F05005F  cmpi cr6,r5,95
+	   5: GETL       	R5, t4
+	   6: MOVL       	$0x5F, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x2547FC78:  409AFFD4  bc 4,26,0x2547FC4C
+	  10: Jc26o       	$0x2547FC4C
+
+
+. 0 2547FC70 12
+. 88 A9 00 02 2F 05 00 5F 40 9A FF D4
+
+==== BB 151 (0x2547FC7C) approx BBs exec'd 0 ====
+
+	0x2547FC7C:  38CB0004  addi r6,r11,4
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x2547FC80:  39490003  addi r10,r9,3
+	   4: GETL       	R9, t2
+	   5: ADDL       	$0x3, t2
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x2547FC84:  90C30000  stw r6,0(r3)
+	   8: GETL       	R6, t4
+	   9: GETL       	R3, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x2547FC88:  4BFFFFD0  b 0x2547FC58
+	  12: JMPo       	$0x2547FC58  ($4)
+
+
+. 0 2547FC7C 16
+. 38 CB 00 04 39 49 00 03 90 C3 00 00 4B FF FF D0
+
+==== BB 152 (0x2547FC58) approx BBs exec'd 0 ====
+
+	0x2547FC58:  7D435378  or r3,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547FC5C:  38210010  addi r1,r1,16
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: PUTL       	t2, R1
+	   6: INCEIPL       	$4
+
+	0x2547FC60:  4E800020  blr
+	   7: GETL       	LR, t4
+	   8: JMPo-r       	t4  ($4)
+
+
+. 0 2547FC58 12
+. 7D 43 53 78 38 21 00 10 4E 80 00 20
+
+==== BB 153 (0x254722DC) approx BBs exec'd 0 ====
+
+	0x254722DC:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254722E0:  41820064  bc 12,2,0x25472344
+	   5: Js02o       	$0x25472344
+
+
+. 0 254722DC 8
+. 7C 7D 1B 79 41 82 00 64
+
+==== BB 154 (0x254722E4) approx BBs exec'd 0 ====
+
+	0x254722E4:  881D0000  lbz r0,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x254722E8:  39600000  li r11,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R11
+	   6: INCEIPL       	$4
+
+	0x254722EC:  7C090379  or. r9,r0,r0
+	   7: GETL       	R0, t6
+	   8: PUTL       	t6, R9
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x254722F0:  40A20018  bc 5,2,0x25472308
+	  12: Jc02o       	$0x25472308
+
+
+. 0 254722E4 16
+. 88 1D 00 00 39 60 00 00 7C 09 03 79 40 A2 00 18
+
+==== BB 155 (0x25472308) approx BBs exec'd 0 ====
+
+	0x25472308:  2C89003D  cmpi cr1,r9,61
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x3D, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x2547230C:  4086FFEC  bc 4,6,0x254722F8
+	   5: Jc06o       	$0x254722F8
+
+
+. 0 25472308 8
+. 2C 89 00 3D 40 86 FF EC
+
+==== BB 156 (0x254722F8) approx BBs exec'd 0 ====
+
+	0x254722F8:  396B0001  addi r11,r11,1
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0x254722FC:  7C1D58AE  lbzx r0,r29,r11
+	   4: GETL       	R11, t2
+	   5: GETL       	R29, t4
+	   6: ADDL       	t4, t2
+	   7: LDB       	(t2), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x25472300:  7C090379  or. r9,r0,r0
+	  10: GETL       	R0, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25472304:  4182000C  bc 12,2,0x25472310
+	  15: Js02o       	$0x25472310
+
+
+. 0 254722F8 16
+. 39 6B 00 01 7C 1D 58 AE 7C 09 03 79 41 82 00 0C
+
+==== BB 157 (0x25472310) approx BBs exec'd 0 ====
+
+	0x25472310:  2D80003D  cmpi cr3,r0,61
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x3D, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x3, CR
+	   4: INCEIPL       	$4
+
+	0x25472314:  408EFFC0  bc 4,14,0x254722D4
+	   5: Jc14o       	$0x254722D4
+
+
+. 0 25472310 8
+. 2D 80 00 3D 40 8E FF C0
+
+==== BB 158 (0x25472318) approx BBs exec'd 0 ====
+
+	0x25472318:  380BFFFC  addi r0,r11,-4
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x2547231C:  2A000010  cmpli cr4,r0,16
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x10, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x25472320:  41B1FFB4  bc 13,17,0x254722D4
+	   9: Js17o       	$0x254722D4
+
+
+. 0 25472318 12
+. 38 0B FF FC 2A 00 00 10 41 B1 FF B4
+
+==== BB 159 (0x25472324) approx BBs exec'd 0 ====
+
+	0x25472324:  80BE0080  lwz r5,128(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x80, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25472328:  541A103A  rlwinm r26,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R26
+	   8: INCEIPL       	$4
+
+	0x2547232C:  7C9A282E  lwzx r4,r26,r5
+	   9: GETL       	R5, t6
+	  10: GETL       	R26, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x25472330:  7C642A14  add r3,r4,r5
+	  15: GETL       	R4, t12
+	  16: GETL       	R5, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0x25472334:  7C6903A6  mtctr r3
+	  20: GETL       	R3, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x25472338:  4E800420  bctr
+	  23: GETL       	CTR, t18
+	  24: JMPo       	t18  ($4)
+
+
+. 0 25472324 24
+. 80 BE 00 80 54 1A 10 3A 7C 9A 28 2E 7C 64 2A 14 7C 69 03 A6 4E 80 04 20
+
+==== BB 160 (0x25473248) approx BBs exec'd 0 ====
+
+	0x25473248:  809E00C4  lwz r4,196(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xC4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547324C:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25473250:  38A0000C  li r5,12
+	   8: MOVL       	$0xC, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25473254:  48010045  bl 0x25483298
+	  11: MOVL       	$0x25473258, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+. 0 25473248 16
+. 80 9E 00 C4 7F A3 EB 78 38 A0 00 0C 48 01 00 45
+
+==== BB 161 memcmp(0x25483298) approx BBs exec'd 0 ====
+
+	0x25483298:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2548329C:  9421FFE0  stwu r1,-32(r1)
+	   5: GETL       	R1, t6
+	   6: GETL       	R1, t8
+	   7: ADDL       	$0xFFFFFFE0, t8
+	   8: PUTL       	t8, R1
+	   9: STL       	t6, (t8)
+	  10: INCEIPL       	$4
+
+	0x254832A0:  93810010  stw r28,16(r1)
+	  11: GETL       	R28, t10
+	  12: GETL       	R1, t12
+	  13: ADDL       	$0x10, t12
+	  14: STL       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0x254832A4:  7C6B1B78  or r11,r3,r3
+	  16: GETL       	R3, t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0x254832A8:  93A10014  stw r29,20(r1)
+	  19: GETL       	R29, t16
+	  20: GETL       	R1, t18
+	  21: ADDL       	$0x14, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x254832AC:  93C10018  stw r30,24(r1)
+	  24: GETL       	R30, t20
+	  25: GETL       	R1, t22
+	  26: ADDL       	$0x18, t22
+	  27: STL       	t20, (t22)
+	  28: INCEIPL       	$4
+
+	0x254832B0:  93E1001C  stw r31,28(r1)
+	  29: GETL       	R31, t24
+	  30: GETL       	R1, t26
+	  31: ADDL       	$0x1C, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0x254832B4:  409D00BC  bc 4,29,0x25483370
+	  34: Jc29o       	$0x25483370
+
+
+. 0 25483298 32
+. 2B 85 00 0F 94 21 FF E0 93 81 00 10 7C 6B 1B 78 93 A1 00 14 93 C1 00 18 93 E1 00 1C 40 9D 00 BC
+
+==== BB 162 (0x25483370) approx BBs exec'd 0 ====
+
+	0x25483370:  2C050000  cmpi cr0,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25483374:  41820024  bc 12,2,0x25483398
+	   4: Js02o       	$0x25483398
+
+
+. 0 25483370 8
+. 2C 05 00 00 41 82 00 24
+
+==== BB 163 (0x25483378) approx BBs exec'd 0 ====
+
+	0x25483378:  898B0000  lbz r12,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R12
+	   3: INCEIPL       	$4
+
+	0x2548337C:  396B0001  addi r11,r11,1
+	   4: GETL       	R11, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25483380:  8BA40000  lbz r29,0(r4)
+	   8: GETL       	R4, t6
+	   9: LDB       	(t6), t8
+	  10: PUTL       	t8, R29
+	  11: INCEIPL       	$4
+
+	0x25483384:  38840001  addi r4,r4,1
+	  12: GETL       	R4, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0x25483388:  7C7D6051  subf. r3,r29,r12
+	  16: GETL       	R29, t12
+	  17: GETL       	R12, t14
+	  18: SUBL       	t12, t14
+	  19: PUTL       	t14, R3
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x0, CR
+	  22: INCEIPL       	$4
+
+	0x2548338C:  40820010  bc 4,2,0x2548339C
+	  23: Jc02o       	$0x2548339C
+
+
+. 0 25483378 24
+. 89 8B 00 00 39 6B 00 01 8B A4 00 00 38 84 00 01 7C 7D 60 51 40 82 00 10
+
+==== BB 164 (0x25483390) approx BBs exec'd 0 ====
+
+	0x25483390:  34A5FFFF  addic. r5,r5,-1
+	   0: GETL       	R5, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R5
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483394:  4082FFE4  bc 4,2,0x25483378
+	   6: Jc02o       	$0x25483378
+
+
+. 0 25483390 8
+. 34 A5 FF FF 40 82 FF E4
+
+==== BB 165 (0x25483398) approx BBs exec'd 0 ====
+
+	0x25483398:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2548339C:  83810010  lwz r28,16(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x10, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x254833A0:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x254833A4:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x254833A8:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x254833AC:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0x254833B0:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+. 0 25483398 28
+. 38 60 00 00 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 166 (0x25473258) approx BBs exec'd 0 ====
+
+	0x25473258:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547325C:  409E0428  bc 4,30,0x25473684
+	   4: Jc30o       	$0x25473684
+
+
+. 0 25473258 8
+. 2F 83 00 00 40 9E 04 28
+
+==== BB 167 (0x25473260) approx BBs exec'd 0 ====
+
+	0x25473260:  813E004C  lwz r9,76(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25473264:  381D000D  addi r0,r29,13
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0xD, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25473268:  90090000  stw r0,0(r9)
+	   9: GETL       	R0, t6
+	  10: GETL       	R9, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547326C:  4BFFF068  b 0x254722D4
+	  13: JMPo       	$0x254722D4  ($4)
+
+
+. 0 25473260 16
+. 81 3E 00 4C 38 1D 00 0D 90 09 00 00 4B FF F0 68
+
+==== BB 168 (0x254722D4) approx BBs exec'd 0 ====
+
+	0x254722D4:  387F0038  addi r3,r31,56
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x38, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x254722D8:  4800D951  bl 0x2547FC28
+	   4: MOVL       	$0x254722DC, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x2547FC28  ($4)
+
+
+. 0 254722D4 8
+. 38 7F 00 38 48 00 D9 51
+
+==== BB 169 (0x25473218) approx BBs exec'd 0 ====
+
+	0x25473218:  809E00C0  lwz r4,192(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xC0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547321C:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25473220:  38A00007  li r5,7
+	   8: MOVL       	$0x7, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25473224:  48010075  bl 0x25483298
+	  11: MOVL       	$0x25473228, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+. 0 25473218 16
+. 80 9E 00 C0 7F A3 EB 78 38 A0 00 07 48 01 00 75
+
+==== BB 170 (0x2548339C) approx BBs exec'd 0 ====
+
+	0x2548339C:  83810010  lwz r28,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x254833A0:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x254833A4:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x254833A8:  83E1001C  lwz r31,28(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R31
+	  19: INCEIPL       	$4
+
+	0x254833AC:  38210020  addi r1,r1,32
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x20, t16
+	  22: PUTL       	t16, R1
+	  23: INCEIPL       	$4
+
+	0x254833B0:  4E800020  blr
+	  24: GETL       	LR, t18
+	  25: JMPo-r       	t18  ($4)
+
+
+. 0 2548339C 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 171 (0x25473228) approx BBs exec'd 0 ====
+
+	0x25473228:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547322C:  4086048C  bc 4,6,0x254736B8
+	   4: Jc06o       	$0x254736B8
+
+
+. 0 25473228 8
+. 2C 83 00 00 40 86 04 8C
+
+==== BB 172 (0x254736B8) approx BBs exec'd 0 ====
+
+	0x254736B8:  809E00E0  lwz r4,224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xE0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254736BC:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254736C0:  38A00007  li r5,7
+	   8: MOVL       	$0x7, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x254736C4:  4800FBD5  bl 0x25483298
+	  11: MOVL       	$0x254736C8, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+. 0 254736B8 16
+. 80 9E 00 E0 7F A3 EB 78 38 A0 00 07 48 00 FB D5
+
+==== BB 173 (0x254736C8) approx BBs exec'd 0 ====
+
+	0x254736C8:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x254736CC:  40820088  bc 4,2,0x25473754
+	   4: Jc02o       	$0x25473754
+
+
+. 0 254736C8 8
+. 2C 03 00 00 40 82 00 88
+
+==== BB 174 (0x254736D0) approx BBs exec'd 0 ====
+
+	0x254736D0:  38FD0008  addi r7,r29,8
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R7
+	   3: INCEIPL       	$4
+
+	0x254736D4:  90F10000  stw r7,0(r17)
+	   4: GETL       	R7, t2
+	   5: GETL       	R17, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254736D8:  4BFFEBFC  b 0x254722D4
+	   8: JMPo       	$0x254722D4  ($4)
+
+
+. 0 254736D0 12
+. 38 FD 00 08 90 F1 00 00 4B FF EB FC
+
+==== BB 175 (0x25472344) approx BBs exec'd 0 ====
+
+	0x25472344:  83700000  lwz r27,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R27
+	   3: INCEIPL       	$4
+
+	0x25472348:  933F0030  stw r25,48(r31)
+	   4: GETL       	R25, t4
+	   5: GETL       	R31, t6
+	   6: ADDL       	$0x30, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547234C:  2D9B0000  cmpi cr3,r27,0
+	   9: GETL       	R27, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x3, CR
+	  12: INCEIPL       	$4
+
+	0x25472350:  408E128C  bc 4,14,0x254735DC
+	  13: Jc14o       	$0x254735DC
+
+
+. 0 25472344 16
+. 83 70 00 00 93 3F 00 30 2D 9B 00 00 40 8E 12 8C
+
+==== BB 176 (0x25472354) approx BBs exec'd 0 ====
+
+	0x25472354:  813E0034  lwz r9,52(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472358:  3014FFFF  addic r0,r20,-1
+	   5: GETL       	R20, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547235C:  7F40A110  subfe r26,r0,r20
+	   9: GETL       	R0, t6
+	  10: GETL       	R20, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R26
+	  13: INCEIPL       	$4
+
+	0x25472360:  83A90000  lwz r29,0(r9)
+	  14: GETL       	R9, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0x25472364:  313DFFFF  addic r9,r29,-1
+	  18: GETL       	R29, t14
+	  19: ADCL       	$0xFFFFFFFF, t14  (-wCa)
+	  20: PUTL       	t14, R9
+	  21: INCEIPL       	$4
+
+	0x25472368:  7F89E910  subfe r28,r9,r29
+	  22: GETL       	R9, t16
+	  23: GETL       	R29, t18
+	  24: SBBL       	t16, t18  (-rCa-wCa)
+	  25: PUTL       	t18, R28
+	  26: INCEIPL       	$4
+
+	0x2547236C:  7F80D039  and. r0,r28,r26
+	  27: GETL       	R28, t20
+	  28: GETL       	R26, t22
+	  29: ANDL       	t20, t22
+	  30: PUTL       	t22, R0
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0x25472370:  40820B58  bc 4,2,0x25472EC8
+	  34: Jc02o       	$0x25472EC8
+
+
+. 0 25472354 32
+. 81 3E 00 34 30 14 FF FF 7F 40 A1 10 83 A9 00 00 31 3D FF FF 7F 89 E9 10 7F 80 D0 39 40 82 0B 58
+
+==== BB 177 (0x25472374) approx BBs exec'd 0 ====
+
+	0x25472374:  83560000  lwz r26,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0x25472378:  80BE04E0  lwz r5,1248(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x4E0, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R5
+	   8: INCEIPL       	$4
+
+	0x2547237C:  7D9A2800  cmp cr3,r26,r5
+	   9: GETL       	R26, t8
+	  10: GETL       	R5, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x3, CR
+	  13: INCEIPL       	$4
+
+	0x25472380:  418E0940  bc 12,14,0x25472CC0
+	  14: Js14o       	$0x25472CC0
+
+
+. 0 25472374 16
+. 83 56 00 00 80 BE 04 E0 7D 9A 28 00 41 8E 09 40
+
+==== BB 178 (0x25472384) approx BBs exec'd 0 ====
+
+	0x25472384:  807E0084  lwz r3,132(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25472388:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547238C:  38C00000  li r6,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x25472390:  38E00000  li r7,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x25472394:  7C641B78  or r4,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0x25472398:  39000000  li r8,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R8
+	  19: INCEIPL       	$4
+
+	0x2547239C:  48006D59  bl 0x254790F4
+	  20: MOVL       	$0x254723A0, t14
+	  21: PUTL       	t14, LR
+	  22: JMPo-c       	$0x254790F4  ($4)
+
+
+. 0 25472384 28
+. 80 7E 00 84 38 A0 00 00 38 C0 00 00 38 E0 00 00 7C 64 1B 78 39 00 00 00 48 00 6D 59
+
+==== BB 179 _dl_new_object(0x254790F4) approx BBs exec'd 0 ====
+
+	0x254790F4:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254790F8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254790FC:  4801DF05  bl 0x25497000
+	   9: MOVL       	$0x25479100, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254790F4 12
+. 94 21 FF D0 7C 08 02 A6 48 01 DF 05
+
+==== BB 180 (0x25479100) approx BBs exec'd 0 ====
+
+	0x25479100:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25479104:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25479108:  92C10008  stw r22,8(r1)
+	   8: GETL       	R22, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x8, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547910C:  92E1000C  stw r23,12(r1)
+	  13: GETL       	R23, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xC, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25479110:  7C771B78  or r23,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R23
+	  20: INCEIPL       	$4
+
+	0x25479114:  93010010  stw r24,16(r1)
+	  21: GETL       	R24, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25479118:  7C832378  or r3,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x2547911C:  93210014  stw r25,20(r1)
+	  29: GETL       	R25, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x14, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0x25479120:  7CB82B78  or r24,r5,r5
+	  34: GETL       	R5, t26
+	  35: PUTL       	t26, R24
+	  36: INCEIPL       	$4
+
+	0x25479124:  93410018  stw r26,24(r1)
+	  37: GETL       	R26, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x18, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x25479128:  7CF63B78  or r22,r7,r7
+	  42: GETL       	R7, t32
+	  43: PUTL       	t32, R22
+	  44: INCEIPL       	$4
+
+	0x2547912C:  9361001C  stw r27,28(r1)
+	  45: GETL       	R27, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x1C, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x25479130:  7C9A2378  or r26,r4,r4
+	  50: GETL       	R4, t38
+	  51: PUTL       	t38, R26
+	  52: INCEIPL       	$4
+
+	0x25479134:  93810020  stw r28,32(r1)
+	  53: GETL       	R28, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x20, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25479138:  7D194378  or r25,r8,r8
+	  58: GETL       	R8, t44
+	  59: PUTL       	t44, R25
+	  60: INCEIPL       	$4
+
+	0x2547913C:  93E1002C  stw r31,44(r1)
+	  61: GETL       	R31, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x2C, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0x25479140:  7CDC3378  or r28,r6,r6
+	  66: GETL       	R6, t50
+	  67: PUTL       	t50, R28
+	  68: INCEIPL       	$4
+
+	0x25479144:  90010034  stw r0,52(r1)
+	  69: GETL       	R0, t52
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x34, t54
+	  72: STL       	t52, (t54)
+	  73: INCEIPL       	$4
+
+	0x25479148:  93A10024  stw r29,36(r1)
+	  74: GETL       	R29, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x24, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0x2547914C:  48009E85  bl 0x25482FD0
+	  79: MOVL       	$0x25479150, t60
+	  80: PUTL       	t60, LR
+	  81: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 25479100 80
+. 93 C1 00 28 7F C8 02 A6 92 C1 00 08 92 E1 00 0C 7C 77 1B 78 93 01 00 10 7C 83 23 78 93 21 00 14 7C B8 2B 78 93 41 00 18 7C F6 3B 78 93 61 00 1C 7C 9A 23 78 93 81 00 20 7D 19 43 78 93 E1 00 2C 7C DC 33 78 90 01 00 34 93 A1 00 24 48 00 9E 85
+
+==== BB 181 strlen(0x25482FD0) approx BBs exec'd 0 ====
+
+	0x25482FD0:  5464003A  rlwinm r4,r3,0,0,29
+	   0: GETL       	R3, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25482FD4:  3CE07F7F  lis r7,32639
+	   4: MOVL       	$0x7F7F0000, t2
+	   5: PUTL       	t2, R7
+	   6: INCEIPL       	$4
+
+	0x25482FD8:  54651EF8  rlwinm r5,r3,3,27,28
+	   7: GETL       	R3, t4
+	   8: ROLL       	$0x3, t4
+	   9: ANDL       	$0x18, t4
+	  10: PUTL       	t4, R5
+	  11: INCEIPL       	$4
+
+	0x25482FDC:  81040000  lwz r8,0(r4)
+	  12: GETL       	R4, t6
+	  13: LDL       	(t6), t8
+	  14: PUTL       	t8, R8
+	  15: INCEIPL       	$4
+
+	0x25482FE0:  3920FFFF  li r9,-1
+	  16: MOVL       	$0xFFFFFFFF, t10
+	  17: PUTL       	t10, R9
+	  18: INCEIPL       	$4
+
+	0x25482FE4:  38E77F7F  addi r7,r7,32639
+	  19: MOVL       	$0x7F7F7F7F, t12
+	  20: PUTL       	t12, R7
+	  21: INCEIPL       	$4
+
+	0x25482FE8:  7D292C30  srw r9,r9,r5
+	  22: GETL       	R9, t16
+	  23: GETL       	R5, t14
+	  24: SHRL       	t14, t16
+	  25: PUTL       	t16, R9
+	  26: INCEIPL       	$4
+
+	0x25482FEC:  7CE04038  and r0,r7,r8
+	  27: GETL       	R7, t18
+	  28: GETL       	R8, t20
+	  29: ANDL       	t18, t20
+	  30: PUTL       	t20, R0
+	  31: INCEIPL       	$4
+
+	0x25482FF0:  7CEA4378  or r10,r7,r8
+	  32: GETL       	R7, t22
+	  33: GETL       	R8, t24
+	  34: ORL       	t24, t22
+	  35: PUTL       	t22, R10
+	  36: INCEIPL       	$4
+
+	0x25482FF4:  7C003A14  add r0,r0,r7
+	  37: GETL       	R0, t26
+	  38: GETL       	R7, t28
+	  39: ADDL       	t26, t28
+	  40: PUTL       	t28, R0
+	  41: INCEIPL       	$4
+
+	0x25482FF8:  7D4000F8  nor r0,r10,r0
+	  42: GETL       	R10, t30
+	  43: GETL       	R0, t32
+	  44: ORL       	t32, t30
+	  45: NOTL       	t30
+	  46: PUTL       	t30, R0
+	  47: INCEIPL       	$4
+
+	0x25482FFC:  7C084839  and. r8,r0,r9
+	  48: GETL       	R0, t34
+	  49: GETL       	R9, t36
+	  50: ANDL       	t34, t36
+	  51: PUTL       	t36, R8
+	  52: CMP0L       	t36, t38  (-rSo)
+	  53: ICRFL       	t38, $0x0, CR
+	  54: INCEIPL       	$4
+
+	0x25483000:  7C601120  mtcrf 0x1,r3
+	  55: GETL       	R3, t40
+	  56: ICRFL       	t40, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x25483004:  40820070  bc 4,2,0x25483074
+	  58: Jc02o       	$0x25483074
+
+
+. 0 25482FD0 56
+. 54 64 00 3A 3C E0 7F 7F 54 65 1E F8 81 04 00 00 39 20 FF FF 38 E7 7F 7F 7D 29 2C 30 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 40 00 F8 7C 08 48 39 7C 60 11 20 40 82 00 70
+
+==== BB 182 (0x25483074) approx BBs exec'd 0 ====
+
+	0x25483074:  7D0B0034  cntlzw r11,r8
+	   0: GETL       	R8, t0
+	   1: CNTLZL       	t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0x25483078:  7C032050  subf r0,r3,r4
+	   4: GETL       	R3, t2
+	   5: GETL       	R4, t4
+	   6: SUBL       	t2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2548307C:  556BE8FE  rlwinm r11,r11,29,3,31
+	   9: GETL       	R11, t6
+	  10: SHRL       	$0x3, t6
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0x25483080:  7C605A14  add r3,r0,r11
+	  13: GETL       	R0, t8
+	  14: GETL       	R11, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0x25483084:  4E800020  blr
+	  18: GETL       	LR, t12
+	  19: JMPo-r       	t12  ($4)
+
+
+. 0 25483074 20
+. 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+==== BB 183 (0x25479150) approx BBs exec'd 0 ====
+
+	0x25479150:  38800001  li r4,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25479154:  7C691B78  or r9,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x25479158:  3863024D  addi r3,r3,589
+	   6: GETL       	R3, t4
+	   7: ADDL       	$0x24D, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x2547915C:  3B690001  addi r27,r9,1
+	  10: GETL       	R9, t6
+	  11: ADDL       	$0x1, t6
+	  12: PUTL       	t6, R27
+	  13: INCEIPL       	$4
+
+	0x25479160:  4801E8CD  bl 0x25497A2C
+	  14: MOVL       	$0x25479164, t8
+	  15: PUTL       	t8, LR
+	  16: JMPo-c       	$0x25497A2C  ($4)
+
+
+. 0 25479150 20
+. 38 80 00 01 7C 69 1B 78 38 63 02 4D 3B 69 00 01 48 01 E8 CD
+
+==== BB 184 (0x25497A2C) approx BBs exec'd 0 ====
+
+	0x25497A2C:  4BFE848C  b 0x2547FEB8
+	   0: JMPo       	$0x2547FEB8  ($4)
+
+
+. 0 25497A2C 4
+. 4B FE 84 8C
+
+==== BB 185 calloc(0x2547FEB8) approx BBs exec'd 0 ====
+
+	0x2547FEB8:  7C6321D6  mullw r3,r3,r4
+	   0: GETL       	R3, t0
+	   1: GETL       	R4, t2
+	   2: MULL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547FEBC:  7CA802A6  mflr r5
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547FEC0:  9421FFF0  stwu r1,-16(r1)
+	   8: GETL       	R1, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xFFFFFFF0, t8
+	  11: PUTL       	t8, R1
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547FEC4:  90A10014  stw r5,20(r1)
+	  14: GETL       	R5, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x2547FEC8:  48017B5D  bl 0x25497A24
+	  19: MOVL       	$0x2547FECC, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 2547FEB8 20
+. 7C 63 21 D6 7C A8 02 A6 94 21 FF F0 90 A1 00 14 48 01 7B 5D
+
+==== BB 186 (0x25497A24) approx BBs exec'd 0 ====
+
+	0x25497A24:  4BFE846C  b 0x2547FE90
+	   0: JMPo       	$0x2547FE90  ($4)
+
+
+. 0 25497A24 4
+. 4B FE 84 6C
+
+==== BB 187 malloc(0x2547FE90) approx BBs exec'd 0 ====
+
+	0x2547FE90:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547FE94:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547FE98:  90810014  stw r4,20(r1)
+	   9: GETL       	R4, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547FE9C:  7C641B78  or r4,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0x2547FEA0:  38600008  li r3,8
+	  17: MOVL       	$0x8, t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x2547FEA4:  48017B79  bl 0x25497A1C
+	  20: MOVL       	$0x2547FEA8, t14
+	  21: PUTL       	t14, LR
+	  22: JMPo-c       	$0x25497A1C  ($4)
+
+
+. 0 2547FE90 24
+. 7C 88 02 A6 94 21 FF F0 90 81 00 14 7C 64 1B 78 38 60 00 08 48 01 7B 79
+
+==== BB 188 (0x25497A1C) approx BBs exec'd 0 ====
+
+	0x25497A1C:  4BFE832C  b 0x2547FD48
+	   0: JMPo       	$0x2547FD48  ($4)
+
+
+. 0 25497A1C 4
+. 4B FE 83 2C
+
+==== BB 189 __libc_memalign(0x2547FD48) approx BBs exec'd 0 ====
+
+	0x2547FD48:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547FD4C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547FD50:  480172B1  bl 0x25497000
+	   9: MOVL       	$0x2547FD54, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547FD48 12
+. 94 21 FF E0 7C 08 02 A6 48 01 72 B1
+
+==== BB 190 (0x2547FD54) approx BBs exec'd 0 ====
+
+	0x2547FD54:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547FD58:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547FD5C:  9361000C  stw r27,12(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547FD60:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547FD64:  7C6A1B78  or r10,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R10
+	  20: INCEIPL       	$4
+
+	0x2547FD68:  93810010  stw r28,16(r1)
+	  21: GETL       	R28, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547FD6C:  38A00003  li r5,3
+	  26: MOVL       	$0x3, t20
+	  27: PUTL       	t20, R5
+	  28: INCEIPL       	$4
+
+	0x2547FD70:  837E0418  lwz r27,1048(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x418, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R27
+	  33: INCEIPL       	$4
+
+	0x2547FD74:  38C00022  li r6,34
+	  34: MOVL       	$0x22, t26
+	  35: PUTL       	t26, R6
+	  36: INCEIPL       	$4
+
+	0x2547FD78:  93A10014  stw r29,20(r1)
+	  37: GETL       	R29, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x14, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x2547FD7C:  38E0FFFF  li r7,-1
+	  42: MOVL       	$0xFFFFFFFF, t32
+	  43: PUTL       	t32, R7
+	  44: INCEIPL       	$4
+
+	0x2547FD80:  817B0000  lwz r11,0(r27)
+	  45: GETL       	R27, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R11
+	  48: INCEIPL       	$4
+
+	0x2547FD84:  7FA300D0  neg r29,r3
+	  49: GETL       	R3, t38
+	  50: NEGL       	t38
+	  51: PUTL       	t38, R29
+	  52: INCEIPL       	$4
+
+	0x2547FD88:  93E1001C  stw r31,28(r1)
+	  53: GETL       	R31, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x1C, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x2547FD8C:  39000000  li r8,0
+	  58: MOVL       	$0x0, t44
+	  59: PUTL       	t44, R8
+	  60: INCEIPL       	$4
+
+	0x2547FD90:  2F8B0000  cmpi cr7,r11,0
+	  61: GETL       	R11, t46
+	  62: CMP0L       	t46, t48  (-rSo)
+	  63: ICRFL       	t48, $0x7, CR
+	  64: INCEIPL       	$4
+
+	0x2547FD94:  819E0500  lwz r12,1280(r30)
+	  65: GETL       	R30, t50
+	  66: ADDL       	$0x500, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R12
+	  69: INCEIPL       	$4
+
+	0x2547FD98:  38600000  li r3,0
+	  70: MOVL       	$0x0, t54
+	  71: PUTL       	t54, R3
+	  72: INCEIPL       	$4
+
+	0x2547FD9C:  7C9C2378  or r28,r4,r4
+	  73: GETL       	R4, t56
+	  74: PUTL       	t56, R28
+	  75: INCEIPL       	$4
+
+	0x2547FDA0:  83FE041C  lwz r31,1052(r30)
+	  76: GETL       	R30, t58
+	  77: ADDL       	$0x41C, t58
+	  78: LDL       	(t58), t60
+	  79: PUTL       	t60, R31
+	  80: INCEIPL       	$4
+
+	0x2547FDA4:  409E0024  bc 4,30,0x2547FDC8
+	  81: Jc30o       	$0x2547FDC8
+
+
+. 0 2547FD54 84
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 90 01 00 24 7C 6A 1B 78 93 81 00 10 38 A0 00 03 83 7E 04 18 38 C0 00 22 93 A1 00 14 38 E0 FF FF 81 7B 00 00 7F A3 00 D0 93 E1 00 1C 39 00 00 00 2F 8B 00 00 81 9E 05 00 38 60 00 00 7C 9C 23 78 83 FE 04 1C 40 9E 00 24
+
+==== BB 191 (0x2547FDA8) approx BBs exec'd 0 ====
+
+	0x2547FDA8:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547FDAC:  919F0000  stw r12,0(r31)
+	   5: GETL       	R12, t4
+	   6: GETL       	R31, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547FDB0:  81690004  lwz r11,4(r9)
+	   9: GETL       	R9, t8
+	  10: ADDL       	$0x4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x2547FDB4:  7C8B6214  add r4,r11,r12
+	  14: GETL       	R11, t12
+	  15: GETL       	R12, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R4
+	  18: INCEIPL       	$4
+
+	0x2547FDB8:  3924FFFF  addi r9,r4,-1
+	  19: GETL       	R4, t16
+	  20: ADDL       	$0xFFFFFFFF, t16
+	  21: PUTL       	t16, R9
+	  22: INCEIPL       	$4
+
+	0x2547FDBC:  7C8B00D0  neg r4,r11
+	  23: GETL       	R11, t18
+	  24: NEGL       	t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x2547FDC0:  7D2B2038  and r11,r9,r4
+	  27: GETL       	R9, t20
+	  28: GETL       	R4, t22
+	  29: ANDL       	t20, t22
+	  30: PUTL       	t22, R11
+	  31: INCEIPL       	$4
+
+	0x2547FDC4:  917B0000  stw r11,0(r27)
+	  32: GETL       	R11, t24
+	  33: GETL       	R27, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547FDC8:  813F0000  lwz r9,0(r31)
+	  36: GETL       	R31, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R9
+	  39: INCEIPL       	$4
+
+	0x2547FDCC:  7C895214  add r4,r9,r10
+	  40: GETL       	R9, t32
+	  41: GETL       	R10, t34
+	  42: ADDL       	t32, t34
+	  43: PUTL       	t34, R4
+	  44: INCEIPL       	$4
+
+	0x2547FDD0:  3804FFFF  addi r0,r4,-1
+	  45: GETL       	R4, t36
+	  46: ADDL       	$0xFFFFFFFF, t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0x2547FDD4:  7C0AE838  and r10,r0,r29
+	  49: GETL       	R0, t38
+	  50: GETL       	R29, t40
+	  51: ANDL       	t38, t40
+	  52: PUTL       	t40, R10
+	  53: INCEIPL       	$4
+
+	0x2547FDD8:  7D8AE214  add r12,r10,r28
+	  54: GETL       	R10, t42
+	  55: GETL       	R28, t44
+	  56: ADDL       	t42, t44
+	  57: PUTL       	t44, R12
+	  58: INCEIPL       	$4
+
+	0x2547FDDC:  915F0000  stw r10,0(r31)
+	  59: GETL       	R10, t46
+	  60: GETL       	R31, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x2547FDE0:  7C0C5840  cmpl cr0,r12,r11
+	  63: GETL       	R12, t50
+	  64: GETL       	R11, t52
+	  65: CMPUL       	t50, t52, t54  (-rSo)
+	  66: ICRFL       	t54, $0x0, CR
+	  67: INCEIPL       	$4
+
+	0x2547FDE4:  4080003C  bc 4,0,0x2547FE20
+	  68: Jc00o       	$0x2547FE20
+
+
+. 0 2547FDA8 64
+. 81 3E 04 F4 91 9F 00 00 81 69 00 04 7C 8B 62 14 39 24 FF FF 7C 8B 00 D0 7D 2B 20 38 91 7B 00 00 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+
+==== BB 192 (0x2547FDE8) approx BBs exec'd 0 ====
+
+	0x2547FDE8:  807F0000  lwz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x2547FDEC:  80C10024  lwz r6,36(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x2547FDF0:  7D03E214  add r8,r3,r28
+	   9: GETL       	R3, t8
+	  10: GETL       	R28, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R8
+	  13: INCEIPL       	$4
+
+	0x2547FDF4:  80FE0420  lwz r7,1056(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x420, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R7
+	  18: INCEIPL       	$4
+
+	0x2547FDF8:  911F0000  stw r8,0(r31)
+	  19: GETL       	R8, t16
+	  20: GETL       	R31, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0x2547FDFC:  7CC803A6  mtlr r6
+	  23: GETL       	R6, t20
+	  24: PUTL       	t20, LR
+	  25: INCEIPL       	$4
+
+	0x2547FE00:  8361000C  lwz r27,12(r1)
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0xC, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R27
+	  30: INCEIPL       	$4
+
+	0x2547FE04:  83810010  lwz r28,16(r1)
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x10, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R28
+	  35: INCEIPL       	$4
+
+	0x2547FE08:  83A10014  lwz r29,20(r1)
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x14, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R29
+	  40: INCEIPL       	$4
+
+	0x2547FE0C:  83C10018  lwz r30,24(r1)
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x18, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R30
+	  45: INCEIPL       	$4
+
+	0x2547FE10:  83E1001C  lwz r31,28(r1)
+	  46: GETL       	R1, t38
+	  47: ADDL       	$0x1C, t38
+	  48: LDL       	(t38), t40
+	  49: PUTL       	t40, R31
+	  50: INCEIPL       	$4
+
+	0x2547FE14:  38210020  addi r1,r1,32
+	  51: GETL       	R1, t42
+	  52: ADDL       	$0x20, t42
+	  53: PUTL       	t42, R1
+	  54: INCEIPL       	$4
+
+	0x2547FE18:  90670000  stw r3,0(r7)
+	  55: GETL       	R3, t44
+	  56: GETL       	R7, t46
+	  57: STL       	t44, (t46)
+	  58: INCEIPL       	$4
+
+	0x2547FE1C:  4E800020  blr
+	  59: GETL       	LR, t48
+	  60: JMPo-r       	t48  ($4)
+
+
+. 0 2547FDE8 56
+. 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 1F 00 00 7C C8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+
+==== BB 193 (0x2547FEA8) approx BBs exec'd 0 ====
+
+	0x2547FEA8:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547FEAC:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x2547FEB0:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x2547FEB4:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+. 0 2547FEA8 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+==== BB 194 (0x2547FECC) approx BBs exec'd 0 ====
+
+	0x2547FECC:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547FED0:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x2547FED4:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x2547FED8:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+. 0 2547FECC 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+==== BB 195 (0x25479164) approx BBs exec'd 0 ====
+
+	0x25479164:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25479168:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547916C:  41820174  bc 12,2,0x254792E0
+	   8: Js02o       	$0x254792E0
+
+
+. 0 25479164 12
+. 7C 7F 1B 79 38 60 00 00 41 82 01 74
+
+==== BB 196 (0x25479170) approx BBs exec'd 0 ====
+
+	0x25479170:  3BBF0240  addi r29,r31,576
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x240, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x25479174:  7F44D378  or r4,r26,r26
+	   4: GETL       	R26, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x25479178:  7F65DB78  or r5,r27,r27
+	   7: GETL       	R27, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x2547917C:  93FF0014  stw r31,20(r31)
+	  10: GETL       	R31, t6
+	  11: GETL       	R31, t8
+	  12: ADDL       	$0x14, t8
+	  13: STL       	t6, (t8)
+	  14: INCEIPL       	$4
+
+	0x25479180:  93BF001C  stw r29,28(r31)
+	  15: GETL       	R29, t10
+	  16: GETL       	R31, t12
+	  17: ADDL       	$0x1C, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25479184:  387F024C  addi r3,r31,588
+	  20: GETL       	R31, t14
+	  21: ADDL       	$0x24C, t14
+	  22: PUTL       	t14, R3
+	  23: INCEIPL       	$4
+
+	0x25479188:  4800AA59  bl 0x25483BE0
+	  24: MOVL       	$0x2547918C, t16
+	  25: PUTL       	t16, LR
+	  26: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 25479170 28
+. 3B BF 02 40 7F 44 D3 78 7F 65 DB 78 93 FF 00 14 93 BF 00 1C 38 7F 02 4C 48 00 AA 59
+
+==== BB 197 (0x25483C68) approx BBs exec'd 0 ====
+
+	0x25483C68:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25483C6C:  4186001C  bc 12,6,0x25483C88
+	   4: Js06o       	$0x25483C88
+
+
+. 0 25483C68 8
+. 2C 85 00 00 41 86 00 1C
+
+==== BB 198 (0x25483C70) approx BBs exec'd 0 ====
+
+	0x25483C70:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25483C74:  88FD0000  lbz r7,0(r29)
+	   3: GETL       	R29, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0x25483C78:  3BBD0001  addi r29,r29,1
+	   7: GETL       	R29, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R29
+	  10: INCEIPL       	$4
+
+	0x25483C7C:  98FF0000  stb r7,0(r31)
+	  11: GETL       	R7, t8
+	  12: GETL       	R31, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25483C80:  3BFF0001  addi r31,r31,1
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0x25483C84:  4200FFF0  bc 16,0,0x25483C74
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0x25483C88
+	  23: JMPo       	$0x25483C74  ($4)
+
+
+. 0 25483C70 24
+. 7C A9 03 A6 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+==== BB 199 (0x2547918C) approx BBs exec'd 0 ====
+
+	0x2547918C:  38A00001  li r5,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25479190:  90BD0008  stw r5,8(r29)
+	   3: GETL       	R5, t2
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25479194:  1D590018  mulli r10,r25,24
+	   8: GETL       	R25, t6
+	   9: MULL       	$0x18, t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0x25479198:  80DF0180  lwz r6,384(r31)
+	  12: GETL       	R31, t8
+	  13: ADDL       	$0x180, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R6
+	  16: INCEIPL       	$4
+
+	0x2547919C:  3880FFFF  li r4,-1
+	  17: MOVL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0x254791A0:  80BE04C8  lwz r5,1224(r30)
+	  20: GETL       	R30, t14
+	  21: ADDL       	$0x4C8, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R5
+	  24: INCEIPL       	$4
+
+	0x254791A4:  5306F002  rlwimi r6,r24,30,0,1
+	  25: GETL       	R6, t18
+	  26: GETL       	R24, t20
+	  27: ROLL       	$0x1E, t20
+	  28: ANDL       	$0xC0000000, t20
+	  29: ANDL       	$0x3FFFFFFF, t18
+	  30: ORL       	t18, t20
+	  31: PUTL       	t20, R6
+	  32: INCEIPL       	$4
+
+	0x254791A8:  939F0168  stw r28,360(r31)
+	  33: GETL       	R28, t22
+	  34: GETL       	R31, t24
+	  35: ADDL       	$0x168, t24
+	  36: STL       	t22, (t24)
+	  37: INCEIPL       	$4
+
+	0x254791AC:  90DF0180  stw r6,384(r31)
+	  38: GETL       	R6, t26
+	  39: GETL       	R31, t28
+	  40: ADDL       	$0x180, t28
+	  41: STL       	t26, (t28)
+	  42: INCEIPL       	$4
+
+	0x254791B0:  38DF01AC  addi r6,r31,428
+	  43: GETL       	R31, t30
+	  44: ADDL       	$0x1AC, t30
+	  45: PUTL       	t30, R6
+	  46: INCEIPL       	$4
+
+	0x254791B4:  7D6A282E  lwzx r11,r10,r5
+	  47: GETL       	R5, t32
+	  48: GETL       	R10, t34
+	  49: ADDL       	t34, t32
+	  50: LDL       	(t32), t36
+	  51: PUTL       	t36, R11
+	  52: INCEIPL       	$4
+
+	0x254791B8:  907F0240  stw r3,576(r31)
+	  53: GETL       	R3, t38
+	  54: GETL       	R31, t40
+	  55: ADDL       	$0x240, t40
+	  56: STL       	t38, (t40)
+	  57: INCEIPL       	$4
+
+	0x254791BC:  38600004  li r3,4
+	  58: MOVL       	$0x4, t42
+	  59: PUTL       	t42, R3
+	  60: INCEIPL       	$4
+
+	0x254791C0:  2F8B0000  cmpi cr7,r11,0
+	  61: GETL       	R11, t44
+	  62: CMP0L       	t44, t46  (-rSo)
+	  63: ICRFL       	t46, $0x7, CR
+	  64: INCEIPL       	$4
+
+	0x254791C4:  909F022C  stw r4,556(r31)
+	  65: GETL       	R4, t48
+	  66: GETL       	R31, t50
+	  67: ADDL       	$0x22C, t50
+	  68: STL       	t48, (t50)
+	  69: INCEIPL       	$4
+
+	0x254791C8:  907F01BC  stw r3,444(r31)
+	  70: GETL       	R3, t52
+	  71: GETL       	R31, t54
+	  72: ADDL       	$0x1BC, t54
+	  73: STL       	t52, (t54)
+	  74: INCEIPL       	$4
+
+	0x254791CC:  38800000  li r4,0
+	  75: MOVL       	$0x0, t56
+	  76: PUTL       	t56, R4
+	  77: INCEIPL       	$4
+
+	0x254791D0:  92FF0004  stw r23,4(r31)
+	  78: GETL       	R23, t58
+	  79: GETL       	R31, t60
+	  80: ADDL       	$0x4, t60
+	  81: STL       	t58, (t60)
+	  82: INCEIPL       	$4
+
+	0x254791D4:  933F0018  stw r25,24(r31)
+	  83: GETL       	R25, t62
+	  84: GETL       	R31, t64
+	  85: ADDL       	$0x18, t64
+	  86: STL       	t62, (t64)
+	  87: INCEIPL       	$4
+
+	0x254791D8:  90DF01C0  stw r6,448(r31)
+	  88: GETL       	R6, t66
+	  89: GETL       	R31, t68
+	  90: ADDL       	$0x1C0, t68
+	  91: STL       	t66, (t68)
+	  92: INCEIPL       	$4
+
+	0x254791DC:  409E013C  bc 4,30,0x25479318
+	  93: Jc30o       	$0x25479318
+
+
+. 0 2547918C 84
+. 38 A0 00 01 90 BD 00 08 1D 59 00 18 80 DF 01 80 38 80 FF FF 80 BE 04 C8 53 06 F0 02 93 9F 01 68 90 DF 01 80 38 DF 01 AC 7D 6A 28 2E 90 7F 02 40 38 60 00 04 2F 8B 00 00 90 9F 02 2C 90 7F 01 BC 38 80 00 00 92 FF 00 04 93 3F 00 18 90 DF 01 C0 40 9E 01 3C
+
+==== BB 200 (0x254791E0) approx BBs exec'd 0 ====
+
+	0x254791E0:  7FEA292E  stwx r31,r10,r5
+	   0: GETL       	R5, t0
+	   1: GETL       	R10, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R31, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x254791E4:  2F1C0000  cmpi cr6,r28,0
+	   6: GETL       	R28, t6
+	   7: CMP0L       	t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254791E8:  7D4A2A14  add r10,r10,r5
+	  10: GETL       	R10, t10
+	  11: GETL       	R5, t12
+	  12: ADDL       	t10, t12
+	  13: PUTL       	t12, R10
+	  14: INCEIPL       	$4
+
+	0x254791EC:  81650198  lwz r11,408(r5)
+	  15: GETL       	R5, t14
+	  16: ADDL       	$0x198, t14
+	  17: LDL       	(t14), t16
+	  18: PUTL       	t16, R11
+	  19: INCEIPL       	$4
+
+	0x254791F0:  832A0004  lwz r25,4(r10)
+	  20: GETL       	R10, t18
+	  21: ADDL       	$0x4, t18
+	  22: LDL       	(t18), t20
+	  23: PUTL       	t20, R25
+	  24: INCEIPL       	$4
+
+	0x254791F4:  8185019C  lwz r12,412(r5)
+	  25: GETL       	R5, t22
+	  26: ADDL       	$0x19C, t22
+	  27: LDL       	(t22), t24
+	  28: PUTL       	t24, R12
+	  29: INCEIPL       	$4
+
+	0x254791F8:  3B190001  addi r24,r25,1
+	  30: GETL       	R25, t26
+	  31: ADDL       	$0x1, t26
+	  32: PUTL       	t26, R24
+	  33: INCEIPL       	$4
+
+	0x254791FC:  310C0001  addic r8,r12,1
+	  34: GETL       	R12, t28
+	  35: ADCL       	$0x1, t28  (-wCa)
+	  36: PUTL       	t28, R8
+	  37: INCEIPL       	$4
+
+	0x25479200:  7CEB0194  addze r7,r11
+	  38: GETL       	R11, t30
+	  39: ADCL       	$0x0, t30  (-rCa-wCa)
+	  40: PUTL       	t30, R7
+	  41: INCEIPL       	$4
+
+	0x25479204:  930A0004  stw r24,4(r10)
+	  42: GETL       	R24, t32
+	  43: GETL       	R10, t34
+	  44: ADDL       	$0x4, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0x25479208:  90E50198  stw r7,408(r5)
+	  47: GETL       	R7, t36
+	  48: GETL       	R5, t38
+	  49: ADDL       	$0x198, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547920C:  9105019C  stw r8,412(r5)
+	  52: GETL       	R8, t40
+	  53: GETL       	R5, t42
+	  54: ADDL       	$0x19C, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x25479210:  40BA000C  bc 5,26,0x2547921C
+	  57: Jc26o       	$0x2547921C
+
+
+. 0 254791E0 52
+. 7F EA 29 2E 2F 1C 00 00 7D 4A 2A 14 81 65 01 98 83 2A 00 04 81 85 01 9C 3B 19 00 01 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 BA 00 0C
+
+==== BB 201 (0x25479214) approx BBs exec'd 0 ====
+
+	0x25479214:  48000164  b 0x25479378
+	   0: JMPo       	$0x25479378  ($4)
+
+
+. 0 25479214 4
+. 48 00 01 64
+
+==== BB 202 (0x25479378) approx BBs exec'd 0 ====
+
+	0x25479378:  7FFCFB78  or r28,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x2547937C:  4BFFFEAC  b 0x25479228
+	   3: JMPo       	$0x25479228  ($4)
+
+
+. 0 25479378 8
+. 7F FC FB 78 4B FF FE AC
+
+==== BB 203 (0x25479228) approx BBs exec'd 0 ====
+
+	0x25479228:  2F840000  cmpi cr7,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547922C:  393C0158  addi r9,r28,344
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0x158, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25479230:  419E0010  bc 12,30,0x25479240
+	   8: Js30o       	$0x25479240
+
+
+. 0 25479228 12
+. 2F 84 00 00 39 3C 01 58 41 9E 00 10
+
+==== BB 204 (0x25479240) approx BBs exec'd 0 ====
+
+	0x25479240:  56CCEFFE  rlwinm r12,r22,29,31,31
+	   0: GETL       	R22, t0
+	   1: ROLL       	$0x1D, t0
+	   2: ANDL       	$0x1, t0
+	   3: PUTL       	t0, R12
+	   4: INCEIPL       	$4
+
+	0x25479244:  7D8B2039  and. r11,r12,r4
+	   5: GETL       	R12, t2
+	   6: GETL       	R4, t4
+	   7: ANDL       	t2, t4
+	   8: PUTL       	t4, R11
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25479248:  41820010  bc 12,2,0x25479258
+	  12: Js02o       	$0x25479258
+
+
+. 0 25479240 12
+. 56 CC EF FE 7D 8B 20 39 41 82 00 10
+
+==== BB 205 (0x25479258) approx BBs exec'd 0 ====
+
+	0x25479258:  549A103A  rlwinm r26,r4,2,0,29
+	   0: GETL       	R4, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547925C:  7D3A312E  stwx r9,r26,r6
+	   4: GETL       	R6, t2
+	   5: GETL       	R26, t4
+	   6: ADDL       	t4, t2
+	   7: GETL       	R9, t6
+	   8: STL       	t6, (t2)
+	   9: INCEIPL       	$4
+
+	0x25479260:  3B7F0158  addi r27,r31,344
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x158, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x25479264:  937F01C4  stw r27,452(r31)
+	  14: GETL       	R27, t10
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x1C4, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25479268:  8BB70000  lbz r29,0(r23)
+	  19: GETL       	R23, t14
+	  20: LDB       	(t14), t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0x2547926C:  2F1D0000  cmpi cr6,r29,0
+	  23: GETL       	R29, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x6, CR
+	  26: INCEIPL       	$4
+
+	0x25479270:  419A006C  bc 12,26,0x254792DC
+	  27: Js26o       	$0x254792DC
+
+
+. 0 25479258 28
+. 54 9A 10 3A 7D 3A 31 2E 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+
+==== BB 206 (0x254792DC) approx BBs exec'd 0 ====
+
+	0x254792DC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254792E0:  83810034  lwz r28,52(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x34, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x254792E4:  82C10008  lwz r22,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R22
+	  12: INCEIPL       	$4
+
+	0x254792E8:  7F8803A6  mtlr r28
+	  13: GETL       	R28, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x254792EC:  82E1000C  lwz r23,12(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0xC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R23
+	  20: INCEIPL       	$4
+
+	0x254792F0:  83010010  lwz r24,16(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R24
+	  25: INCEIPL       	$4
+
+	0x254792F4:  83210014  lwz r25,20(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R25
+	  30: INCEIPL       	$4
+
+	0x254792F8:  83410018  lwz r26,24(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x18, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0x254792FC:  8361001C  lwz r27,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R27
+	  40: INCEIPL       	$4
+
+	0x25479300:  83810020  lwz r28,32(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R28
+	  45: INCEIPL       	$4
+
+	0x25479304:  83A10024  lwz r29,36(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x24, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R29
+	  50: INCEIPL       	$4
+
+	0x25479308:  83C10028  lwz r30,40(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x28, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R30
+	  55: INCEIPL       	$4
+
+	0x2547930C:  83E1002C  lwz r31,44(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x2C, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R31
+	  60: INCEIPL       	$4
+
+	0x25479310:  38210030  addi r1,r1,48
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x30, t48
+	  63: PUTL       	t48, R1
+	  64: INCEIPL       	$4
+
+	0x25479314:  4E800020  blr
+	  65: GETL       	LR, t50
+	  66: JMPo-r       	t50  ($4)
+
+
+. 0 254792DC 60
+. 7F E3 FB 78 83 81 00 34 82 C1 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+==== BB 207 (0x254723A0) approx BBs exec'd 0 ====
+
+	0x254723A0:  828E0000  lwz r20,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R20
+	   3: INCEIPL       	$4
+
+	0x254723A4:  2D940000  cmpi cr3,r20,0
+	   4: GETL       	R20, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x3, CR
+	   7: INCEIPL       	$4
+
+	0x254723A8:  418E0C90  bc 12,14,0x25473038
+	   8: Js14o       	$0x25473038
+
+
+. 0 254723A0 12
+. 82 8E 00 00 2D 94 00 00 41 8E 0C 90
+
+==== BB 208 (0x254723AC) approx BBs exec'd 0 ====
+
+	0x254723AC:  80D60000  lwz r6,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x254723B0:  9314014C  stw r24,332(r20)
+	   4: GETL       	R24, t4
+	   5: GETL       	R20, t6
+	   6: ADDL       	$0x14C, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x254723B4:  90D40150  stw r6,336(r20)
+	   9: GETL       	R6, t8
+	  10: GETL       	R20, t10
+	  11: ADDL       	$0x150, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x254723B8:  B2F40154  sth r23,340(r20)
+	  14: GETL       	R23, t12
+	  15: GETL       	R20, t14
+	  16: ADDL       	$0x154, t14
+	  17: STW       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x254723BC:  56EC2834  rlwinm r12,r23,5,0,26
+	  19: GETL       	R23, t16
+	  20: SHLL       	$0x5, t16
+	  21: PUTL       	t16, R12
+	  22: INCEIPL       	$4
+
+	0x254723C0:  81140178  lwz r8,376(r20)
+	  23: GETL       	R20, t18
+	  24: ADDL       	$0x178, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R8
+	  27: INCEIPL       	$4
+
+	0x254723C4:  7C8CC214  add r4,r12,r24
+	  28: GETL       	R12, t22
+	  29: GETL       	R24, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R4
+	  32: INCEIPL       	$4
+
+	0x254723C8:  80F4017C  lwz r7,380(r20)
+	  33: GETL       	R20, t26
+	  34: ADDL       	$0x17C, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R7
+	  37: INCEIPL       	$4
+
+	0x254723CC:  7E04C040  cmpl cr4,r4,r24
+	  38: GETL       	R4, t30
+	  39: GETL       	R24, t32
+	  40: CMPUL       	t30, t32, t34  (-rSo)
+	  41: ICRFL       	t34, $0x4, CR
+	  42: INCEIPL       	$4
+
+	0x254723D0:  39280001  addi r9,r8,1
+	  43: GETL       	R8, t36
+	  44: ADDL       	$0x1, t36
+	  45: PUTL       	t36, R9
+	  46: INCEIPL       	$4
+
+	0x254723D4:  3B270001  addi r25,r7,1
+	  47: GETL       	R7, t38
+	  48: ADDL       	$0x1, t38
+	  49: PUTL       	t38, R25
+	  50: INCEIPL       	$4
+
+	0x254723D8:  39000000  li r8,0
+	  51: MOVL       	$0x0, t40
+	  52: PUTL       	t40, R8
+	  53: INCEIPL       	$4
+
+	0x254723DC:  38E00000  li r7,0
+	  54: MOVL       	$0x0, t42
+	  55: PUTL       	t42, R7
+	  56: INCEIPL       	$4
+
+	0x254723E0:  3B80FFFF  li r28,-1
+	  57: MOVL       	$0xFFFFFFFF, t44
+	  58: PUTL       	t44, R28
+	  59: INCEIPL       	$4
+
+	0x254723E4:  7F0AC378  or r10,r24,r24
+	  60: GETL       	R24, t46
+	  61: PUTL       	t46, R10
+	  62: INCEIPL       	$4
+
+	0x254723E8:  939401A0  stw r28,416(r20)
+	  63: GETL       	R28, t48
+	  64: GETL       	R20, t50
+	  65: ADDL       	$0x1A0, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x254723EC:  91340178  stw r9,376(r20)
+	  68: GETL       	R9, t52
+	  69: GETL       	R20, t54
+	  70: ADDL       	$0x178, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x254723F0:  9334017C  stw r25,380(r20)
+	  73: GETL       	R25, t56
+	  74: GETL       	R20, t58
+	  75: ADDL       	$0x17C, t58
+	  76: STL       	t56, (t58)
+	  77: INCEIPL       	$4
+
+	0x254723F4:  911401A4  stw r8,420(r20)
+	  78: GETL       	R8, t60
+	  79: GETL       	R20, t62
+	  80: ADDL       	$0x1A4, t62
+	  81: STL       	t60, (t62)
+	  82: INCEIPL       	$4
+
+	0x254723F8:  90F401A8  stw r7,424(r20)
+	  83: GETL       	R7, t64
+	  84: GETL       	R20, t66
+	  85: ADDL       	$0x1A8, t66
+	  86: STL       	t64, (t66)
+	  87: INCEIPL       	$4
+
+	0x254723FC:  409100C8  bc 4,17,0x254724C4
+	  88: Jc17o       	$0x254724C4
+
+
+. 0 254723AC 84
+. 80 D6 00 00 93 14 01 4C 90 D4 01 50 B2 F4 01 54 56 EC 28 34 81 14 01 78 7C 8C C2 14 80 F4 01 7C 7E 04 C0 40 39 28 00 01 3B 27 00 01 39 00 00 00 38 E0 00 00 3B 80 FF FF 7F 0A C3 78 93 94 01 A0 91 34 01 78 93 34 01 7C 91 14 01 A4 90 F4 01 A8 40 91 00 C8
+
+==== BB 209 (0x25472400) approx BBs exec'd 0 ====
+
+	0x25472400:  3F606474  lis r27,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0x25472404:  83BE0044  lwz r29,68(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x44, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25472408:  80DE0048  lwz r6,72(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x48, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0x2547240C:  6363E552  ori r3,r27,0xE552
+	  13: MOVL       	$0x6474E552, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x25472410:  38A00001  li r5,1
+	  16: MOVL       	$0x1, t12
+	  17: PUTL       	t12, R5
+	  18: INCEIPL       	$4
+
+	0x25472414:  48000028  b 0x2547243C
+	  19: JMPo       	$0x2547243C  ($4)
+
+
+. 0 25472400 24
+. 3F 60 64 74 83 BE 00 44 80 DE 00 48 63 63 E5 52 38 A0 00 01 48 00 00 28
+
+==== BB 210 (0x2547243C) approx BBs exec'd 0 ====
+
+	0x2547243C:  800A0000  lwz r0,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25472440:  2F000006  cmpi cr6,r0,6
+	   4: GETL       	R0, t4
+	   5: MOVL       	$0x6, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25472444:  419A0798  bc 12,26,0x25472BDC
+	   9: Js26o       	$0x25472BDC
+
+
+. 0 2547243C 12
+. 80 0A 00 00 2F 00 00 06 41 9A 07 98
+
+==== BB 211 (0x25472BDC) approx BBs exec'd 0 ====
+
+	0x25472BDC:  816A0008  lwz r11,8(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25472BE0:  7F4BC050  subf r26,r11,r24
+	   5: GETL       	R11, t4
+	   6: GETL       	R24, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25472BE4:  93540000  stw r26,0(r20)
+	  10: GETL       	R26, t8
+	  11: GETL       	R20, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25472BE8:  4BFFF848  b 0x25472430
+	  14: JMPo       	$0x25472430  ($4)
+
+
+. 0 25472BDC 16
+. 81 6A 00 08 7F 4B C0 50 93 54 00 00 4B FF F8 48
+
+==== BB 212 (0x25472430) approx BBs exec'd 0 ====
+
+	0x25472430:  394A0020  addi r10,r10,32
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x20, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0x25472434:  7F845040  cmpl cr7,r4,r10
+	   4: GETL       	R4, t2
+	   5: GETL       	R10, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25472438:  409D0084  bc 4,29,0x254724BC
+	   9: Jc29o       	$0x254724BC
+
+
+. 0 25472430 12
+. 39 4A 00 20 7F 84 50 40 40 9D 00 84
+
+==== BB 213 (0x25472448) approx BBs exec'd 0 ====
+
+	0x25472448:  2B800006  cmpli cr7,r0,6
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2547244C:  40BDFFCC  bc 5,29,0x25472418
+	   5: Jc29o       	$0x25472418
+
+
+. 0 25472448 8
+. 2B 80 00 06 40 BD FF CC
+
+==== BB 214 (0x25472418) approx BBs exec'd 0 ====
+
+	0x25472418:  2C000002  cmpi cr0,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547241C:  41820854  bc 12,2,0x25472C70
+	   5: Js02o       	$0x25472C70
+
+
+. 0 25472418 8
+. 2C 00 00 02 41 82 08 54
+
+==== BB 215 (0x25472420) approx BBs exec'd 0 ====
+
+	0x25472420:  28800002  cmpli cr1,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25472424:  418507E4  bc 12,5,0x25472C08
+	   5: Js05o       	$0x25472C08
+
+
+. 0 25472420 8
+. 28 80 00 02 41 85 07 E4
+
+==== BB 216 (0x25472C08) approx BBs exec'd 0 ====
+
+	0x25472C08:  2F000003  cmpi cr6,r0,3
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x3, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25472C0C:  409AF824  bc 4,26,0x25472430
+	   5: Jc26o       	$0x25472430
+
+
+. 0 25472C08 8
+. 2F 00 00 03 40 9A F8 24
+
+==== BB 217 (0x25472C10) approx BBs exec'd 0 ====
+
+	0x25472C10:  82AE01C0  lwz r21,448(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25472C14:  7FA7EB78  or r7,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x25472C18:  81740000  lwz r11,0(r20)
+	   8: GETL       	R20, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25472C1C:  2F150000  cmpi cr6,r21,0
+	  12: GETL       	R21, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x6, CR
+	  15: INCEIPL       	$4
+
+	0x25472C20:  812A0008  lwz r9,8(r10)
+	  16: GETL       	R10, t14
+	  17: ADDL       	$0x8, t14
+	  18: LDL       	(t14), t16
+	  19: PUTL       	t16, R9
+	  20: INCEIPL       	$4
+
+	0x25472C24:  93AE01D4  stw r29,468(r14)
+	  21: GETL       	R29, t18
+	  22: GETL       	R14, t20
+	  23: ADDL       	$0x1D4, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0x25472C28:  7C0B4A14  add r0,r11,r9
+	  26: GETL       	R11, t22
+	  27: GETL       	R9, t24
+	  28: ADDL       	t22, t24
+	  29: PUTL       	t24, R0
+	  30: INCEIPL       	$4
+
+	0x25472C2C:  901D0000  stw r0,0(r29)
+	  31: GETL       	R0, t26
+	  32: GETL       	R29, t28
+	  33: STL       	t26, (t28)
+	  34: INCEIPL       	$4
+
+	0x25472C30:  409A0444  bc 4,26,0x25473074
+	  35: Jc26o       	$0x25473074
+
+
+. 0 25472C10 36
+. 82 AE 01 C0 7F A7 EB 78 81 74 00 00 2F 15 00 00 81 2A 00 08 93 AE 01 D4 7C 0B 4A 14 90 1D 00 00 40 9A 04 44
+
+==== BB 218 (0x25473074) approx BBs exec'd 0 ====
+
+	0x25473074:  3AA00001  li r21,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R21
+	   2: INCEIPL       	$4
+
+	0x25473078:  4BFFF3B8  b 0x25472430
+	   3: JMPo       	$0x25472430  ($4)
+
+
+. 0 25473074 8
+. 3A A0 00 01 4B FF F3 B8
+
+==== BB 219 (0x25472428) approx BBs exec'd 0 ====
+
+	0x25472428:  2E000001  cmpi cr4,r0,1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547242C:  41920B80  bc 12,18,0x25472FAC
+	   5: Js18o       	$0x25472FAC
+
+
+. 0 25472428 8
+. 2E 00 00 01 41 92 0B 80
+
+==== BB 220 (0x25472FAC) approx BBs exec'd 0 ====
+
+	0x25472FAC:  80EA001C  lwz r7,28(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25472FB0:  810A0008  lwz r8,8(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25472FB4:  7D8700D0  neg r12,r7
+	  10: GETL       	R7, t8
+	  11: NEGL       	t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x25472FB8:  80F40000  lwz r7,0(r20)
+	  14: GETL       	R20, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0x25472FBC:  7D1C6038  and r28,r8,r12
+	  18: GETL       	R8, t14
+	  19: GETL       	R12, t16
+	  20: ANDL       	t14, t16
+	  21: PUTL       	t16, R28
+	  22: INCEIPL       	$4
+
+	0x25472FC0:  813401A0  lwz r9,416(r20)
+	  23: GETL       	R20, t18
+	  24: ADDL       	$0x1A0, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R9
+	  27: INCEIPL       	$4
+
+	0x25472FC4:  7C07E214  add r0,r7,r28
+	  28: GETL       	R7, t22
+	  29: GETL       	R28, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R0
+	  32: INCEIPL       	$4
+
+	0x25472FC8:  7F090040  cmpl cr6,r9,r0
+	  33: GETL       	R9, t26
+	  34: GETL       	R0, t28
+	  35: CMPUL       	t26, t28, t30  (-rSo)
+	  36: ICRFL       	t30, $0x6, CR
+	  37: INCEIPL       	$4
+
+	0x25472FCC:  419900B0  bc 12,25,0x2547307C
+	  38: Js25o       	$0x2547307C
+
+
+. 0 25472FAC 36
+. 80 EA 00 1C 81 0A 00 08 7D 87 00 D0 80 F4 00 00 7D 1C 60 38 81 34 01 A0 7C 07 E2 14 7F 09 00 40 41 99 00 B0
+
+==== BB 221 (0x2547307C) approx BBs exec'd 0 ====
+
+	0x2547307C:  901401A0  stw r0,416(r20)
+	   0: GETL       	R0, t0
+	   1: GETL       	R20, t2
+	   2: ADDL       	$0x1A0, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25473080:  810A0008  lwz r8,8(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25473084:  4BFFFF4C  b 0x25472FD0
+	  10: JMPo       	$0x25472FD0  ($4)
+
+
+. 0 2547307C 12
+. 90 14 01 A0 81 0A 00 08 4B FF FF 4C
+
+==== BB 222 (0x25472FD0) approx BBs exec'd 0 ====
+
+	0x25472FD0:  836A0014  lwz r27,20(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25472FD4:  7EE74214  add r23,r7,r8
+	   5: GETL       	R7, t4
+	   6: GETL       	R8, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0x25472FD8:  811401A4  lwz r8,420(r20)
+	  10: GETL       	R20, t8
+	  11: ADDL       	$0x1A4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0x25472FDC:  7D77DA14  add r11,r23,r27
+	  15: GETL       	R23, t12
+	  16: GETL       	R27, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R11
+	  19: INCEIPL       	$4
+
+	0x25472FE0:  7F885840  cmpl cr7,r8,r11
+	  20: GETL       	R8, t16
+	  21: GETL       	R11, t18
+	  22: CMPUL       	t16, t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25472FE4:  409C0008  bc 4,28,0x25472FEC
+	  25: Jc28o       	$0x25472FEC
+
+
+. 0 25472FD0 24
+. 83 6A 00 14 7E E7 42 14 81 14 01 A4 7D 77 DA 14 7F 88 58 40 40 9C 00 08
+
+==== BB 223 (0x25472FE8) approx BBs exec'd 0 ====
+
+	0x25472FE8:  917401A4  stw r11,420(r20)
+	   0: GETL       	R11, t0
+	   1: GETL       	R20, t2
+	   2: ADDL       	$0x1A4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472FEC:  834A0018  lwz r26,24(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25472FF0:  73490001  andi. r9,r26,0x1
+	  10: GETL       	R26, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R9
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25472FF4:  41A2F43C  bc 13,2,0x25472430
+	  16: Js02o       	$0x25472430
+
+
+. 0 25472FE8 16
+. 91 74 01 A4 83 4A 00 18 73 49 00 01 41 A2 F4 3C
+
+==== BB 224 (0x25472FF8) approx BBs exec'd 0 ====
+
+	0x25472FF8:  801401A8  lwz r0,424(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x1A8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25472FFC:  7C805840  cmpl cr1,r0,r11
+	   5: GETL       	R0, t4
+	   6: GETL       	R11, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25473000:  40A4F430  bc 5,4,0x25472430
+	  10: Jc04o       	$0x25472430
+
+
+. 0 25472FF8 12
+. 80 14 01 A8 7C 80 58 40 40 A4 F4 30
+
+==== BB 225 (0x25473004) approx BBs exec'd 0 ====
+
+	0x25473004:  917401A8  stw r11,424(r20)
+	   0: GETL       	R11, t0
+	   1: GETL       	R20, t2
+	   2: ADDL       	$0x1A8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25473008:  4BFFF428  b 0x25472430
+	   5: JMPo       	$0x25472430  ($4)
+
+
+. 0 25473004 8
+. 91 74 01 A8 4B FF F4 28
+
+==== BB 226 (0x25472C70) approx BBs exec'd 0 ====
+
+	0x25472C70:  82D40000  lwz r22,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R22
+	   3: INCEIPL       	$4
+
+	0x25472C74:  832A0008  lwz r25,8(r10)
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x8, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x25472C78:  7C16CA14  add r0,r22,r25
+	   9: GETL       	R22, t8
+	  10: GETL       	R25, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0x25472C7C:  90140008  stw r0,8(r20)
+	  14: GETL       	R0, t12
+	  15: GETL       	R20, t14
+	  16: ADDL       	$0x8, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25472C80:  4BFFF7B0  b 0x25472430
+	  19: JMPo       	$0x25472430  ($4)
+
+
+. 0 25472C70 20
+. 82 D4 00 00 83 2A 00 08 7C 16 CA 14 90 14 00 08 4B FF F7 B0
+
+==== BB 227 (0x25472450) approx BBs exec'd 0 ====
+
+	0x25472450:  3EE06474  lis r23,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R23
+	   2: INCEIPL       	$4
+
+	0x25472454:  62E9E551  ori r9,r23,0xE551
+	   3: MOVL       	$0x6474E551, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x25472458:  7F804800  cmp cr7,r0,r9
+	   6: GETL       	R0, t4
+	   7: GETL       	R9, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x2547245C:  419E0828  bc 12,30,0x25472C84
+	  11: Js30o       	$0x25472C84
+
+
+. 0 25472450 16
+. 3E E0 64 74 62 E9 E5 51 7F 80 48 00 41 9E 08 28
+
+==== BB 228 (0x25472C84) approx BBs exec'd 0 ====
+
+	0x25472C84:  80EA0018  lwz r7,24(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25472C88:  90EE0400  stw r7,1024(r14)
+	   5: GETL       	R7, t4
+	   6: GETL       	R14, t6
+	   7: ADDL       	$0x400, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25472C8C:  4BFFF7A4  b 0x25472430
+	  10: JMPo       	$0x25472430  ($4)
+
+
+. 0 25472C84 12
+. 80 EA 00 18 90 EE 04 00 4B FF F7 A4
+
+==== BB 229 (0x254724BC) approx BBs exec'd 0 ====
+
+	0x254724BC:  811401A4  lwz r8,420(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x1A4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254724C0:  80F401A8  lwz r7,424(r20)
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x1A8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x254724C4:  81340218  lwz r9,536(r20)
+	  10: GETL       	R20, t8
+	  11: ADDL       	$0x218, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x254724C8:  2C090000  cmpi cr0,r9,0
+	  15: GETL       	R9, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x254724CC:  41820010  bc 12,2,0x254724DC
+	  19: Js02o       	$0x254724DC
+
+
+. 0 254724BC 20
+. 81 14 01 A4 80 F4 01 A8 81 34 02 18 2C 09 00 00 41 82 00 10
+
+==== BB 230 (0x254724DC) approx BBs exec'd 0 ====
+
+	0x254724DC:  2C880000  cmpi cr1,r8,0
+	   0: GETL       	R8, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x254724E0:  4086000C  bc 4,6,0x254724EC
+	   4: Jc06o       	$0x254724EC
+
+
+. 0 254724DC 8
+. 2C 88 00 00 40 86 00 0C
+
+==== BB 231 (0x254724EC) approx BBs exec'd 0 ====
+
+	0x254724EC:  2E070000  cmpi cr4,r7,0
+	   0: GETL       	R7, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x254724F0:  4092000C  bc 4,18,0x254724FC
+	   4: Jc18o       	$0x254724FC
+
+
+. 0 254724EC 8
+. 2E 07 00 00 40 92 00 0C
+
+==== BB 232 (0x254724FC) approx BBs exec'd 0 ====
+
+	0x254724FC:  80AE01D4  lwz r5,468(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1D4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25472500:  2F050000  cmpi cr6,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25472504:  409A001C  bc 4,26,0x25472520
+	   9: Jc26o       	$0x25472520
+
+
+. 0 254724FC 12
+. 80 AE 01 D4 2F 05 00 00 40 9A 00 1C
+
+==== BB 233 (0x25472520) approx BBs exec'd 0 ====
+
+	0x25472520:  2E130000  cmpi cr4,r19,0
+	   0: GETL       	R19, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25472524:  409201A8  bc 4,18,0x254726CC
+	   4: Jc18o       	$0x254726CC
+
+
+. 0 25472520 8
+. 2E 13 00 00 40 92 01 A8
+
+==== BB 234 (0x25472528) approx BBs exec'd 0 ====
+
+	0x25472528:  81540008  lwz r10,8(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547252C:  2C0A0000  cmpi cr0,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25472530:  41820194  bc 12,2,0x254726C4
+	   9: Js02o       	$0x254726C4
+
+
+. 0 25472528 12
+. 81 54 00 08 2C 0A 00 00 41 82 01 94
+
+==== BB 235 (0x25472534) approx BBs exec'd 0 ====
+
+	0x25472534:  816A0000  lwz r11,0(r10)
+	   0: GETL       	R10, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25472538:  39140020  addi r8,r20,32
+	   4: GETL       	R20, t4
+	   5: ADDL       	$0x20, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x2547253C:  2C8B0000  cmpi cr1,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25472540:  41860064  bc 12,6,0x254725A4
+	  12: Js06o       	$0x254725A4
+
+
+. 0 25472534 16
+. 81 6A 00 00 39 14 00 20 2C 8B 00 00 41 86 00 64
+
+==== BB 236 (0x25472544) approx BBs exec'd 0 ====
+
+	0x25472544:  3EC06FFF  lis r22,28671
+	   0: MOVL       	$0x6FFF0000, t0
+	   1: PUTL       	t0, R22
+	   2: INCEIPL       	$4
+
+	0x25472548:  3CC07000  lis r6,28672
+	   3: MOVL       	$0x70000000, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x2547254C:  3C006FFF  lis r0,28671
+	   6: MOVL       	$0x6FFF0000, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25472550:  3F406FFF  lis r26,28671
+	   9: MOVL       	$0x6FFF0000, t6
+	  10: PUTL       	t6, R26
+	  11: INCEIPL       	$4
+
+	0x25472554:  3EE06FFF  lis r23,28671
+	  12: MOVL       	$0x6FFF0000, t8
+	  13: PUTL       	t8, R23
+	  14: INCEIPL       	$4
+
+	0x25472558:  3E606FFF  lis r19,28671
+	  15: MOVL       	$0x6FFF0000, t10
+	  16: PUTL       	t10, R19
+	  17: INCEIPL       	$4
+
+	0x2547255C:  62C7FFFF  ori r7,r22,0xFFFF
+	  18: MOVL       	$0x6FFFFFFF, t12
+	  19: PUTL       	t12, R7
+	  20: INCEIPL       	$4
+
+	0x25472560:  60C60021  ori r6,r6,0x21
+	  21: MOVL       	$0x70000021, t14
+	  22: PUTL       	t14, R6
+	  23: INCEIPL       	$4
+
+	0x25472564:  6005FDFF  ori r5,r0,0xFDFF
+	  24: MOVL       	$0x6FFFFDFF, t16
+	  25: PUTL       	t16, R5
+	  26: INCEIPL       	$4
+
+	0x25472568:  6344FE34  ori r4,r26,0xFE34
+	  27: MOVL       	$0x6FFFFE34, t18
+	  28: PUTL       	t18, R4
+	  29: INCEIPL       	$4
+
+	0x2547256C:  62E3FEFF  ori r3,r23,0xFEFF
+	  30: MOVL       	$0x6FFFFEFF, t20
+	  31: PUTL       	t20, R3
+	  32: INCEIPL       	$4
+
+	0x25472570:  627DFF40  ori r29,r19,0xFF40
+	  33: MOVL       	$0x6FFFFF40, t22
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x25472574:  2F0B0021  cmpi cr6,r11,33
+	  36: GETL       	R11, t24
+	  37: MOVL       	$0x21, t28
+	  38: CMPL       	t24, t28, t26  (-rSo)
+	  39: ICRFL       	t26, $0x6, CR
+	  40: INCEIPL       	$4
+
+	0x25472578:  5560103A  rlwinm r0,r11,2,0,29
+	  41: GETL       	R11, t30
+	  42: SHLL       	$0x2, t30
+	  43: PUTL       	t30, R0
+	  44: INCEIPL       	$4
+
+	0x2547257C:  40990018  bc 4,25,0x25472594
+	  45: Jc25o       	$0x25472594
+
+
+. 0 25472544 60
+. 3E C0 6F FF 3C C0 70 00 3C 00 6F FF 3F 40 6F FF 3E E0 6F FF 3E 60 6F FF 62 C7 FF FF 60 C6 00 21 60 05 FD FF 63 44 FE 34 62 E3 FE FF 62 7D FF 40 2F 0B 00 21 55 60 10 3A 40 99 00 18
+
+==== BB 237 (0x25472594) approx BBs exec'd 0 ====
+
+	0x25472594:  7D48012E  stwx r10,r8,r0
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R10, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x25472598:  856A0008  lwzu r11,8(r10)
+	   6: GETL       	R10, t6
+	   7: ADDL       	$0x8, t6
+	   8: PUTL       	t6, R10
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x2547259C:  2F8B0000  cmpi cr7,r11,0
+	  12: GETL       	R11, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0x254725A0:  409EFFD4  bc 4,30,0x25472574
+	  16: Jc30o       	$0x25472574
+
+
+. 0 25472594 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+==== BB 238 (0x25472574) approx BBs exec'd 0 ====
+
+	0x25472574:  2F0B0021  cmpi cr6,r11,33
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x21, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25472578:  5560103A  rlwinm r0,r11,2,0,29
+	   5: GETL       	R11, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x2547257C:  40990018  bc 4,25,0x25472594
+	   9: Jc25o       	$0x25472594
+
+
+. 0 25472574 12
+. 2F 0B 00 21 55 60 10 3A 40 99 00 18
+
+==== BB 239 (0x25472580) approx BBs exec'd 0 ====
+
+	0x25472580:  7F2B3850  subf r25,r11,r7
+	   0: GETL       	R11, t0
+	   1: GETL       	R7, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25472584:  2B99000F  cmpli cr7,r25,15
+	   5: GETL       	R25, t4
+	   6: MOVL       	$0xF, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x25472588:  419D0708  bc 12,29,0x25472C90
+	  10: Js29o       	$0x25472C90
+
+
+. 0 25472580 12
+. 7F 2B 38 50 2B 99 00 0F 41 9D 07 08
+
+==== BB 240 (0x2547258C) approx BBs exec'd 0 ====
+
+	0x2547258C:  7C0B3050  subf r0,r11,r6
+	   0: GETL       	R11, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25472590:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25472594:  7D48012E  stwx r10,r8,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x25472598:  856A0008  lwzu r11,8(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x8, t12
+	  17: PUTL       	t12, R10
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x2547259C:  2F8B0000  cmpi cr7,r11,0
+	  21: GETL       	R11, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x254725A0:  409EFFD4  bc 4,30,0x25472574
+	  25: Jc30o       	$0x25472574
+
+
+. 0 2547258C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+==== BB 241 (0x254725A4) approx BBs exec'd 0 ====
+
+	0x254725A4:  81740000  lwz r11,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x254725A8:  2C0B0000  cmpi cr0,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x254725AC:  418200AC  bc 12,2,0x25472658
+	   8: Js02o       	$0x25472658
+
+
+. 0 254725A4 12
+. 81 74 00 00 2C 0B 00 00 41 82 00 AC
+
+==== BB 242 (0x25472658) approx BBs exec'd 0 ====
+
+	0x25472658:  81280078  lwz r9,120(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x78, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547265C:  2C090000  cmpi cr0,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25472660:  41820030  bc 12,2,0x25472690
+	   9: Js02o       	$0x25472690
+
+
+. 0 25472658 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+
+==== BB 243 (0x25472690) approx BBs exec'd 0 ====
+
+	0x25472690:  81280098  lwz r9,152(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x98, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472694:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25472698:  41860018  bc 12,6,0x254726B0
+	   9: Js06o       	$0x254726B0
+
+
+. 0 25472690 12
+. 81 28 00 98 2C 89 00 00 41 86 00 18
+
+==== BB 244 (0x254726B0) approx BBs exec'd 0 ====
+
+	0x254726B0:  81680074  lwz r11,116(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254726B4:  2F0B0000  cmpi cr6,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x254726B8:  419A000C  bc 12,26,0x254726C4
+	   9: Js26o       	$0x254726C4
+
+
+. 0 254726B0 12
+. 81 68 00 74 2F 0B 00 00 41 9A 00 0C
+
+==== BB 245 (0x254726C4) approx BBs exec'd 0 ====
+
+	0x254726C4:  7E83A378  or r3,r20,r20
+	   0: GETL       	R20, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254726C8:  48005EA5  bl 0x2547856C
+	   3: MOVL       	$0x254726CC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547856C  ($4)
+
+
+. 0 254726C4 8
+. 7E 83 A3 78 48 00 5E A5
+
+==== BB 246 (0x254726CC) approx BBs exec'd 0 ====
+
+	0x254726CC:  811F0030  lwz r8,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254726D0:  2F880002  cmpi cr7,r8,2
+	   5: GETL       	R8, t4
+	   6: MOVL       	$0x2, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x254726D4:  419E0A04  bc 12,30,0x254730D8
+	  10: Js30o       	$0x254730D8
+
+
+. 0 254726CC 12
+. 81 1F 00 30 2F 88 00 02 41 9E 0A 04
+
+==== BB 247 (0x254726D8) approx BBs exec'd 0 ====
+
+	0x254726D8:  4192088C  bc 12,18,0x25472F64
+	   0: Js18o       	$0x25472F64
+
+
+. 0 254726D8 4
+. 41 92 08 8C
+
+==== BB 248 (0x25472F64) approx BBs exec'd 0 ====
+
+	0x25472F64:  82BE004C  lwz r21,76(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25472F68:  80750000  lwz r3,0(r21)
+	   5: GETL       	R21, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0x25472F6C:  48002169  bl 0x254750D4
+	   9: MOVL       	$0x25472F70, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0x254750D4  ($4)
+
+
+. 0 25472F64 12
+. 82 BE 00 4C 80 75 00 00 48 00 21 69
+
+==== BB 249 _dl_init_paths(0x254750D4) approx BBs exec'd 0 ====
+
+	0x254750D4:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254750D8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254750DC:  48021F25  bl 0x25497000
+	   9: MOVL       	$0x254750E0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254750D4 12
+. 94 21 FF D0 7C 08 02 A6 48 02 1F 25
+
+==== BB 250 (0x254750E0) approx BBs exec'd 0 ====
+
+	0x254750E0:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254750E4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x254750E8:  93010010  stw r24,16(r1)
+	   8: GETL       	R24, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254750EC:  90010034  stw r0,52(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254750F0:  93210014  stw r25,20(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x254750F4:  7C791B78  or r25,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x254750F8:  831E04F4  lwz r24,1268(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4F4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R24
+	  30: INCEIPL       	$4
+
+	0x254750FC:  80BE0154  lwz r5,340(r30)
+	  31: GETL       	R30, t24
+	  32: ADDL       	$0x154, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R5
+	  35: INCEIPL       	$4
+
+	0x25475100:  80980010  lwz r4,16(r24)
+	  36: GETL       	R24, t28
+	  37: ADDL       	$0x10, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R4
+	  40: INCEIPL       	$4
+
+	0x25475104:  80DE015C  lwz r6,348(r30)
+	  41: GETL       	R30, t32
+	  42: ADDL       	$0x15C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R6
+	  45: INCEIPL       	$4
+
+	0x25475108:  8078000C  lwz r3,12(r24)
+	  46: GETL       	R24, t36
+	  47: ADDL       	$0xC, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R3
+	  50: INCEIPL       	$4
+
+	0x2547510C:  9361001C  stw r27,28(r1)
+	  51: GETL       	R27, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x1C, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0x25475110:  93810020  stw r28,32(r1)
+	  56: GETL       	R28, t44
+	  57: GETL       	R1, t46
+	  58: ADDL       	$0x20, t46
+	  59: STL       	t44, (t46)
+	  60: INCEIPL       	$4
+
+	0x25475114:  93E1002C  stw r31,44(r1)
+	  61: GETL       	R31, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x2C, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0x25475118:  7C3F0B78  or r31,r1,r1
+	  66: GETL       	R1, t52
+	  67: PUTL       	t52, R31
+	  68: INCEIPL       	$4
+
+	0x2547511C:  92E1000C  stw r23,12(r1)
+	  69: GETL       	R23, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0xC, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0x25475120:  93410018  stw r26,24(r1)
+	  74: GETL       	R26, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x18, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x25475124:  93A10024  stw r29,36(r1)
+	  79: GETL       	R29, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x24, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25475128:  4800A629  bl 0x2547F750
+	  84: MOVL       	$0x2547512C, t66
+	  85: PUTL       	t66, LR
+	  86: JMPo-c       	$0x2547F750  ($4)
+
+
+. 0 254750E0 76
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 21 00 14 7C 79 1B 78 83 1E 04 F4 80 BE 01 54 80 98 00 10 80 DE 01 5C 80 78 00 0C 93 61 00 1C 93 81 00 20 93 E1 00 2C 7C 3F 0B 78 92 E1 00 0C 93 41 00 18 93 A1 00 24 48 00 A6 29
+
+==== BB 251 _dl_important_hwcaps(0x2547F750) approx BBs exec'd 0 ====
+
+	0x2547F750:  9421FFB0  stwu r1,-80(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFB0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547F754:  7CE802A6  mflr r7
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x2547F758:  480178A9  bl 0x25497000
+	   9: MOVL       	$0x2547F75C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547F750 12
+. 94 21 FF B0 7C E8 02 A6 48 01 78 A9
+
+==== BB 252 (0x2547F75C) approx BBs exec'd 0 ====
+
+	0x2547F75C:  93C10048  stw r30,72(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547F760:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547F764:  93810040  stw r28,64(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x40, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547F768:  90E10054  stw r7,84(r1)
+	  13: GETL       	R7, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x54, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547F76C:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0x2547F770:  92010010  stw r16,16(r1)
+	  21: GETL       	R16, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547F774:  7CD03378  or r16,r6,r6
+	  26: GETL       	R6, t20
+	  27: PUTL       	t20, R16
+	  28: INCEIPL       	$4
+
+	0x2547F778:  813E04F4  lwz r9,1268(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4F4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x2547F77C:  9261001C  stw r19,28(r1)
+	  34: GETL       	R19, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x1C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x2547F780:  7C932378  or r19,r4,r4
+	  39: GETL       	R4, t30
+	  40: PUTL       	t30, R19
+	  41: INCEIPL       	$4
+
+	0x2547F784:  81690040  lwz r11,64(r9)
+	  42: GETL       	R9, t32
+	  43: ADDL       	$0x40, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R11
+	  46: INCEIPL       	$4
+
+	0x2547F788:  8009003C  lwz r0,60(r9)
+	  47: GETL       	R9, t36
+	  48: ADDL       	$0x3C, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R0
+	  51: INCEIPL       	$4
+
+	0x2547F78C:  92810020  stw r20,32(r1)
+	  52: GETL       	R20, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x20, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x2547F790:  7CB42B78  or r20,r5,r5
+	  57: GETL       	R5, t44
+	  58: PUTL       	t44, R20
+	  59: INCEIPL       	$4
+
+	0x2547F794:  7C1C5838  and r28,r0,r11
+	  60: GETL       	R0, t46
+	  61: GETL       	R11, t48
+	  62: ANDL       	t46, t48
+	  63: PUTL       	t48, R28
+	  64: INCEIPL       	$4
+
+	0x2547F798:  92C10028  stw r22,40(r1)
+	  65: GETL       	R22, t50
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x28, t52
+	  68: STL       	t50, (t52)
+	  69: INCEIPL       	$4
+
+	0x2547F79C:  2F9C0000  cmpi cr7,r28,0
+	  70: GETL       	R28, t54
+	  71: CMP0L       	t54, t56  (-rSo)
+	  72: ICRFL       	t56, $0x7, CR
+	  73: INCEIPL       	$4
+
+	0x2547F7A0:  93E1004C  stw r31,76(r1)
+	  74: GETL       	R31, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x4C, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x2547F7A4:  92210014  stw r17,20(r1)
+	  79: GETL       	R17, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x14, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x2547F7A8:  7C3F0B78  or r31,r1,r1
+	  84: GETL       	R1, t66
+	  85: PUTL       	t66, R31
+	  86: INCEIPL       	$4
+
+	0x2547F7AC:  92410018  stw r18,24(r1)
+	  87: GETL       	R18, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x18, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x2547F7B0:  7C761B78  or r22,r3,r3
+	  92: GETL       	R3, t72
+	  93: PUTL       	t72, R22
+	  94: INCEIPL       	$4
+
+	0x2547F7B4:  92A10024  stw r21,36(r1)
+	  95: GETL       	R21, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x24, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x2547F7B8:  3003FFFF  addic r0,r3,-1
+	 100: GETL       	R3, t78
+	 101: ADCL       	$0xFFFFFFFF, t78  (-wCa)
+	 102: PUTL       	t78, R0
+	 103: INCEIPL       	$4
+
+	0x2547F7BC:  7D401910  subfe r10,r0,r3
+	 104: GETL       	R0, t80
+	 105: GETL       	R3, t82
+	 106: SBBL       	t80, t82  (-rCa-wCa)
+	 107: PUTL       	t82, R10
+	 108: INCEIPL       	$4
+
+	0x2547F7C0:  92E1002C  stw r23,44(r1)
+	 109: GETL       	R23, t84
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x2C, t86
+	 112: STL       	t84, (t86)
+	 113: INCEIPL       	$4
+
+	0x2547F7C4:  39600000  li r11,0
+	 114: MOVL       	$0x0, t88
+	 115: PUTL       	t88, R11
+	 116: INCEIPL       	$4
+
+	0x2547F7C8:  93010030  stw r24,48(r1)
+	 117: GETL       	R24, t90
+	 118: GETL       	R1, t92
+	 119: ADDL       	$0x30, t92
+	 120: STL       	t90, (t92)
+	 121: INCEIPL       	$4
+
+	0x2547F7CC:  93210034  stw r25,52(r1)
+	 122: GETL       	R25, t94
+	 123: GETL       	R1, t96
+	 124: ADDL       	$0x34, t96
+	 125: STL       	t94, (t96)
+	 126: INCEIPL       	$4
+
+	0x2547F7D0:  93410038  stw r26,56(r1)
+	 127: GETL       	R26, t98
+	 128: GETL       	R1, t100
+	 129: ADDL       	$0x38, t100
+	 130: STL       	t98, (t100)
+	 131: INCEIPL       	$4
+
+	0x2547F7D4:  9361003C  stw r27,60(r1)
+	 132: GETL       	R27, t102
+	 133: GETL       	R1, t104
+	 134: ADDL       	$0x3C, t104
+	 135: STL       	t102, (t104)
+	 136: INCEIPL       	$4
+
+	0x2547F7D8:  93A10044  stw r29,68(r1)
+	 137: GETL       	R29, t106
+	 138: GETL       	R1, t108
+	 139: ADDL       	$0x44, t108
+	 140: STL       	t106, (t108)
+	 141: INCEIPL       	$4
+
+	0x2547F7DC:  9181000C  stw r12,12(r1)
+	 142: GETL       	R12, t110
+	 143: GETL       	R1, t112
+	 144: ADDL       	$0xC, t112
+	 145: STL       	t110, (t112)
+	 146: INCEIPL       	$4
+
+	0x2547F7E0:  419E0028  bc 12,30,0x2547F808
+	 147: Js30o       	$0x2547F808
+
+
+. 0 2547F75C 136
+. 93 C1 00 48 7F C8 02 A6 93 81 00 40 90 E1 00 54 7D 80 00 26 92 01 00 10 7C D0 33 78 81 3E 04 F4 92 61 00 1C 7C 93 23 78 81 69 00 40 80 09 00 3C 92 81 00 20 7C B4 2B 78 7C 1C 58 38 92 C1 00 28 2F 9C 00 00 93 E1 00 4C 92 21 00 14 7C 3F 0B 78 92 41 00 18 7C 76 1B 78 92 A1 00 24 30 03 FF FF 7D 40 19 10 92 E1 00 2C 39 60 00 00 93 01 00 30 93 21 00 34 93 41 00 38 93 61 00 3C 93 A1 00 44 91 81 00 0C 41 9E 00 28
+
+==== BB 253 (0x2547F808) approx BBs exec'd 0 ====
+
+	0x2547F808:  3B0A0001  addi r24,r10,1
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547F80C:  82210000  lwz r17,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R17
+	   7: INCEIPL       	$4
+
+	0x2547F810:  57121838  rlwinm r18,r24,3,0,28
+	   8: GETL       	R24, t6
+	   9: SHLL       	$0x3, t6
+	  10: PUTL       	t6, R18
+	  11: INCEIPL       	$4
+
+	0x2547F814:  3B200000  li r25,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R25
+	  14: INCEIPL       	$4
+
+	0x2547F818:  3AF2001E  addi r23,r18,30
+	  15: GETL       	R18, t10
+	  16: ADDL       	$0x1E, t10
+	  17: PUTL       	t10, R23
+	  18: INCEIPL       	$4
+
+	0x2547F81C:  3B400000  li r26,0
+	  19: MOVL       	$0x0, t12
+	  20: PUTL       	t12, R26
+	  21: INCEIPL       	$4
+
+	0x2547F820:  56F50036  rlwinm r21,r23,0,0,27
+	  22: GETL       	R23, t14
+	  23: ANDL       	$0xFFFFFFF0, t14
+	  24: PUTL       	t14, R21
+	  25: INCEIPL       	$4
+
+	0x2547F824:  7D5500D0  neg r10,r21
+	  26: GETL       	R21, t16
+	  27: NEGL       	t16
+	  28: PUTL       	t16, R10
+	  29: INCEIPL       	$4
+
+	0x2547F828:  7E21516E  stwux r17,r1,r10
+	  30: GETL       	R10, t18
+	  31: GETL       	R1, t20
+	  32: ADDL       	t20, t18
+	  33: PUTL       	t18, R1
+	  34: GETL       	R17, t22
+	  35: STL       	t22, (t18)
+	  36: INCEIPL       	$4
+
+	0x2547F82C:  39010017  addi r8,r1,23
+	  37: GETL       	R1, t24
+	  38: ADDL       	$0x17, t24
+	  39: PUTL       	t24, R8
+	  40: INCEIPL       	$4
+
+	0x2547F830:  551B0036  rlwinm r27,r8,0,0,27
+	  41: GETL       	R8, t26
+	  42: ANDL       	$0xFFFFFFF0, t26
+	  43: PUTL       	t26, R27
+	  44: INCEIPL       	$4
+
+	0x2547F834:  419E0048  bc 12,30,0x2547F87C
+	  45: Js30o       	$0x2547F87C
+
+
+. 0 2547F808 48
+. 3B 0A 00 01 82 21 00 00 57 12 18 38 3B 20 00 00 3A F2 00 1E 3B 40 00 00 56 F5 00 36 7D 55 00 D0 7E 21 51 6E 39 01 00 17 55 1B 00 36 41 9E 00 48
+
+==== BB 254 (0x2547F87C) approx BBs exec'd 0 ====
+
+	0x2547F87C:  2F960000  cmpi cr7,r22,0
+	   0: GETL       	R22, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547F880:  419E0018  bc 12,30,0x2547F898
+	   4: Js30o       	$0x2547F898
+
+
+. 0 2547F87C 8
+. 2F 96 00 00 41 9E 00 18
+
+==== BB 255 (0x2547F898) approx BBs exec'd 0 ====
+
+	0x2547F898:  2E180001  cmpi cr4,r24,1
+	   0: GETL       	R24, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547F89C:  57291838  rlwinm r9,r25,3,0,28
+	   5: GETL       	R25, t6
+	   6: SHLL       	$0x3, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x2547F8A0:  833E0410  lwz r25,1040(r30)
+	   9: GETL       	R30, t8
+	  10: ADDL       	$0x410, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R25
+	  13: INCEIPL       	$4
+
+	0x2547F8A4:  7E69DA14  add r19,r9,r27
+	  14: GETL       	R9, t12
+	  15: GETL       	R27, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R19
+	  18: INCEIPL       	$4
+
+	0x2547F8A8:  3AC00003  li r22,3
+	  19: MOVL       	$0x3, t16
+	  20: PUTL       	t16, R22
+	  21: INCEIPL       	$4
+
+	0x2547F8AC:  7F29D92E  stwx r25,r9,r27
+	  22: GETL       	R27, t18
+	  23: GETL       	R9, t20
+	  24: ADDL       	t20, t18
+	  25: GETL       	R25, t22
+	  26: STL       	t22, (t18)
+	  27: INCEIPL       	$4
+
+	0x2547F8B0:  92D30004  stw r22,4(r19)
+	  28: GETL       	R22, t24
+	  29: GETL       	R19, t26
+	  30: ADDL       	$0x4, t26
+	  31: STL       	t24, (t26)
+	  32: INCEIPL       	$4
+
+	0x2547F8B4:  41920324  bc 12,18,0x2547FBD8
+	  33: Js18o       	$0x2547FBD8
+
+
+. 0 2547F898 32
+. 2E 18 00 01 57 29 18 38 83 3E 04 10 7E 69 DA 14 3A C0 00 03 7F 29 D9 2E 92 D3 00 04 41 92 03 24
+
+==== BB 256 (0x2547FBD8) approx BBs exec'd 0 ====
+
+	0x2547FBD8:  819B0004  lwz r12,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x2547FBDC:  390C0001  addi r8,r12,1
+	   5: GETL       	R12, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2547FBE0:  4BFFFD30  b 0x2547F910
+	   9: JMPo       	$0x2547F910  ($4)
+
+
+. 0 2547FBD8 12
+. 81 9B 00 04 39 0C 00 01 4B FF FD 30
+
+==== BB 257 (0x2547F910) approx BBs exec'd 0 ====
+
+	0x2547F910:  3BA00001  li r29,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0x2547F914:  7FAAC030  slw r10,r29,r24
+	   3: GETL       	R29, t4
+	   4: GETL       	R24, t2
+	   5: SHLL       	t2, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x2547F918:  555A1838  rlwinm r26,r10,3,0,28
+	   8: GETL       	R10, t6
+	   9: SHLL       	$0x3, t6
+	  10: PUTL       	t6, R26
+	  11: INCEIPL       	$4
+
+	0x2547F91C:  91540000  stw r10,0(r20)
+	  12: GETL       	R10, t8
+	  13: GETL       	R20, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547F920:  7C7A4214  add r3,r26,r8
+	  16: GETL       	R26, t12
+	  17: GETL       	R8, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0x2547F924:  48018101  bl 0x25497A24
+	  21: MOVL       	$0x2547F928, t16
+	  22: PUTL       	t16, LR
+	  23: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 2547F910 24
+. 3B A0 00 01 7F AA C0 30 55 5A 18 38 91 54 00 00 7C 7A 42 14 48 01 81 01
+
+==== BB 258 (0x2547FDC8) approx BBs exec'd 0 ====
+
+	0x2547FDC8:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547FDCC:  7C895214  add r4,r9,r10
+	   4: GETL       	R9, t4
+	   5: GETL       	R10, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x2547FDD0:  3804FFFF  addi r0,r4,-1
+	   9: GETL       	R4, t8
+	  10: ADDL       	$0xFFFFFFFF, t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x2547FDD4:  7C0AE838  and r10,r0,r29
+	  13: GETL       	R0, t10
+	  14: GETL       	R29, t12
+	  15: ANDL       	t10, t12
+	  16: PUTL       	t12, R10
+	  17: INCEIPL       	$4
+
+	0x2547FDD8:  7D8AE214  add r12,r10,r28
+	  18: GETL       	R10, t14
+	  19: GETL       	R28, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R12
+	  22: INCEIPL       	$4
+
+	0x2547FDDC:  915F0000  stw r10,0(r31)
+	  23: GETL       	R10, t18
+	  24: GETL       	R31, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x2547FDE0:  7C0C5840  cmpl cr0,r12,r11
+	  27: GETL       	R12, t22
+	  28: GETL       	R11, t24
+	  29: CMPUL       	t22, t24, t26  (-rSo)
+	  30: ICRFL       	t26, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0x2547FDE4:  4080003C  bc 4,0,0x2547FE20
+	  32: Jc00o       	$0x2547FE20
+
+
+. 0 2547FDC8 32
+. 81 3F 00 00 7C 89 52 14 38 04 FF FF 7C 0A E8 38 7D 8A E2 14 91 5F 00 00 7C 0C 58 40 40 80 00 3C
+
+==== BB 259 (0x2547F928) approx BBs exec'd 0 ====
+
+	0x2547F928:  7C761B79  or. r22,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R22
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547F92C:  418202E8  bc 12,2,0x2547FC14
+	   5: Js02o       	$0x2547FC14
+
+
+. 0 2547F928 8
+. 7C 76 1B 79 41 82 02 E8
+
+==== BB 260 (0x2547F930) approx BBs exec'd 0 ====
+
+	0x2547F930:  419201FC  bc 12,18,0x2547FB2C
+	   0: Js18o       	$0x2547FB2C
+
+
+. 0 2547F930 4
+. 41 92 01 FC
+
+==== BB 261 (0x2547FB2C) approx BBs exec'd 0 ====
+
+	0x2547FB2C:  83340000  lwz r25,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R25
+	   3: INCEIPL       	$4
+
+	0x2547FB30:  39000000  li r8,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R8
+	   6: INCEIPL       	$4
+
+	0x2547FB34:  827B0004  lwz r19,4(r27)
+	   7: GETL       	R27, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R19
+	  11: INCEIPL       	$4
+
+	0x2547FB38:  3B00002F  li r24,47
+	  12: MOVL       	$0x2F, t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0x2547FB3C:  57291838  rlwinm r9,r25,3,0,28
+	  15: GETL       	R25, t12
+	  16: SHLL       	$0x3, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0x2547FB40:  9116000C  stw r8,12(r22)
+	  19: GETL       	R8, t14
+	  20: GETL       	R22, t16
+	  21: ADDL       	$0xC, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547FB44:  7C69B214  add r3,r9,r22
+	  24: GETL       	R9, t18
+	  25: GETL       	R22, t20
+	  26: ADDL       	t18, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x2547FB48:  3B930001  addi r28,r19,1
+	  29: GETL       	R19, t22
+	  30: ADDL       	$0x1, t22
+	  31: PUTL       	t22, R28
+	  32: INCEIPL       	$4
+
+	0x2547FB4C:  93960004  stw r28,4(r22)
+	  33: GETL       	R28, t24
+	  34: GETL       	R22, t26
+	  35: ADDL       	$0x4, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x2547FB50:  3A400002  li r18,2
+	  38: MOVL       	$0x2, t28
+	  39: PUTL       	t28, R18
+	  40: INCEIPL       	$4
+
+	0x2547FB54:  90760000  stw r3,0(r22)
+	  41: GETL       	R3, t30
+	  42: GETL       	R22, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547FB58:  90760008  stw r3,8(r22)
+	  45: GETL       	R3, t34
+	  46: GETL       	R22, t36
+	  47: ADDL       	$0x8, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x2547FB5C:  80BB0004  lwz r5,4(r27)
+	  50: GETL       	R27, t38
+	  51: ADDL       	$0x4, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R5
+	  54: INCEIPL       	$4
+
+	0x2547FB60:  809B0000  lwz r4,0(r27)
+	  55: GETL       	R27, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R4
+	  58: INCEIPL       	$4
+
+	0x2547FB64:  48003EBD  bl 0x25483A20
+	  59: MOVL       	$0x2547FB68, t46
+	  60: PUTL       	t46, LR
+	  61: JMPo-c       	$0x25483A20  ($4)
+
+
+. 0 2547FB2C 60
+. 83 34 00 00 39 00 00 00 82 7B 00 04 3B 00 00 2F 57 29 18 38 91 16 00 0C 7C 69 B2 14 3B 93 00 01 93 96 00 04 3A 40 00 02 90 76 00 00 90 76 00 08 80 BB 00 04 80 9B 00 00 48 00 3E BD
+
+==== BB 262 mempcpy(0x25483A20) approx BBs exec'd 0 ====
+
+	0x25483A20:  2B85000F  cmpli cr7,r5,15
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0xF, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25483A24:  7C0802A6  mflr r0
+	   5: GETL       	LR, t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25483A28:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t8
+	   9: GETL       	R1, t10
+	  10: ADDL       	$0xFFFFFFE0, t10
+	  11: PUTL       	t10, R1
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25483A2C:  93A10014  stw r29,20(r1)
+	  14: GETL       	R29, t12
+	  15: GETL       	R1, t14
+	  16: ADDL       	$0x14, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25483A30:  7C7D1B78  or r29,r3,r3
+	  19: GETL       	R3, t16
+	  20: PUTL       	t16, R29
+	  21: INCEIPL       	$4
+
+	0x25483A34:  93E1001C  stw r31,28(r1)
+	  22: GETL       	R31, t18
+	  23: GETL       	R1, t20
+	  24: ADDL       	$0x1C, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x25483A38:  7C9F2378  or r31,r4,r4
+	  27: GETL       	R4, t22
+	  28: PUTL       	t22, R31
+	  29: INCEIPL       	$4
+
+	0x25483A3C:  93810010  stw r28,16(r1)
+	  30: GETL       	R28, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x10, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25483A40:  93C10018  stw r30,24(r1)
+	  35: GETL       	R30, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x18, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25483A44:  90010024  stw r0,36(r1)
+	  40: GETL       	R0, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x24, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0x25483A48:  409D0058  bc 4,29,0x25483AA0
+	  45: Jc29o       	$0x25483AA0
+
+
+. 0 25483A20 44
+. 2B 85 00 0F 7C 08 02 A6 94 21 FF E0 93 A1 00 14 7C 7D 1B 78 93 E1 00 1C 7C 9F 23 78 93 81 00 10 93 C1 00 18 90 01 00 24 40 9D 00 58
+
+==== BB 263 (0x25483AA0) approx BBs exec'd 0 ====
+
+	0x25483AA0:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25483AA4:  4186001C  bc 12,6,0x25483AC0
+	   4: Js06o       	$0x25483AC0
+
+
+. 0 25483AA0 8
+. 2C 85 00 00 41 86 00 1C
+
+==== BB 264 (0x25483AA8) approx BBs exec'd 0 ====
+
+	0x25483AA8:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25483AAC:  88FF0000  lbz r7,0(r31)
+	   3: GETL       	R31, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0x25483AB0:  3BFF0001  addi r31,r31,1
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x1, t6
+	   9: PUTL       	t6, R31
+	  10: INCEIPL       	$4
+
+	0x25483AB4:  98FD0000  stb r7,0(r29)
+	  11: GETL       	R7, t8
+	  12: GETL       	R29, t10
+	  13: STB       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25483AB8:  3BBD0001  addi r29,r29,1
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x25483ABC:  4200FFF0  bc 16,0,0x25483AAC
+	  19: GETL       	CTR, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, CTR
+	  22: JIFZL       	t14, $0x25483AC0
+	  23: JMPo       	$0x25483AAC  ($4)
+
+
+. 0 25483AA8 24
+. 7C A9 03 A6 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+==== BB 265 (0x25483AAC) approx BBs exec'd 0 ====
+
+	0x25483AAC:  88FF0000  lbz r7,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25483AB0:  3BFF0001  addi r31,r31,1
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x25483AB4:  98FD0000  stb r7,0(r29)
+	   8: GETL       	R7, t6
+	   9: GETL       	R29, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25483AB8:  3BBD0001  addi r29,r29,1
+	  12: GETL       	R29, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x25483ABC:  4200FFF0  bc 16,0,0x25483AAC
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0x25483AC0
+	  20: JMPo       	$0x25483AAC  ($4)
+
+
+. 0 25483AAC 20
+. 88 FF 00 00 3B FF 00 01 98 FD 00 00 3B BD 00 01 42 00 FF F0
+
+==== BB 266 (0x25483AC0) approx BBs exec'd 0 ====
+
+	0x25483AC0:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25483AC4:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483AC8:  83810010  lwz r28,16(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x25483ACC:  83A10014  lwz r29,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0x25483AD0:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x25483AD4:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0x25483AD8:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0x25483ADC:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0x25483AE0:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+. 0 25483AC0 36
+. 81 01 00 24 7F A3 EB 78 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 267 (0x2547FB68) approx BBs exec'd 0 ====
+
+	0x2547FB68:  9B030000  stb r24,0(r3)
+	   0: GETL       	R24, t0
+	   1: GETL       	R3, t2
+	   2: STB       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547FB6C:  92540000  stw r18,0(r20)
+	   4: GETL       	R18, t4
+	   5: GETL       	R20, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0x2547FB70:  83760004  lwz r27,4(r22)
+	   8: GETL       	R22, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R27
+	  12: INCEIPL       	$4
+
+	0x2547FB74:  7EC3B378  or r3,r22,r22
+	  13: GETL       	R22, t12
+	  14: PUTL       	t12, R3
+	  15: INCEIPL       	$4
+
+	0x2547FB78:  93700000  stw r27,0(r16)
+	  16: GETL       	R27, t14
+	  17: GETL       	R16, t16
+	  18: STL       	t14, (t16)
+	  19: INCEIPL       	$4
+
+	0x2547FB7C:  81010000  lwz r8,0(r1)
+	  20: GETL       	R1, t18
+	  21: LDL       	(t18), t20
+	  22: PUTL       	t20, R8
+	  23: INCEIPL       	$4
+
+	0x2547FB80:  82080004  lwz r16,4(r8)
+	  24: GETL       	R8, t22
+	  25: ADDL       	$0x4, t22
+	  26: LDL       	(t22), t24
+	  27: PUTL       	t24, R16
+	  28: INCEIPL       	$4
+
+	0x2547FB84:  80E8FFBC  lwz r7,-68(r8)
+	  29: GETL       	R8, t26
+	  30: ADDL       	$0xFFFFFFBC, t26
+	  31: LDL       	(t26), t28
+	  32: PUTL       	t28, R7
+	  33: INCEIPL       	$4
+
+	0x2547FB88:  7E0803A6  mtlr r16
+	  34: GETL       	R16, t30
+	  35: PUTL       	t30, LR
+	  36: INCEIPL       	$4
+
+	0x2547FB8C:  8228FFC4  lwz r17,-60(r8)
+	  37: GETL       	R8, t32
+	  38: ADDL       	$0xFFFFFFC4, t32
+	  39: LDL       	(t32), t34
+	  40: PUTL       	t34, R17
+	  41: INCEIPL       	$4
+
+	0x2547FB90:  8208FFC0  lwz r16,-64(r8)
+	  42: GETL       	R8, t36
+	  43: ADDL       	$0xFFFFFFC0, t36
+	  44: LDL       	(t36), t38
+	  45: PUTL       	t38, R16
+	  46: INCEIPL       	$4
+
+	0x2547FB94:  7CE08120  mtcrf 0x8,r7
+	  47: GETL       	R7, t40
+	  48: ICRFL       	t40, $0x4, CR
+	  49: INCEIPL       	$4
+
+	0x2547FB98:  8248FFC8  lwz r18,-56(r8)
+	  50: GETL       	R8, t42
+	  51: ADDL       	$0xFFFFFFC8, t42
+	  52: LDL       	(t42), t44
+	  53: PUTL       	t44, R18
+	  54: INCEIPL       	$4
+
+	0x2547FB9C:  8268FFCC  lwz r19,-52(r8)
+	  55: GETL       	R8, t46
+	  56: ADDL       	$0xFFFFFFCC, t46
+	  57: LDL       	(t46), t48
+	  58: PUTL       	t48, R19
+	  59: INCEIPL       	$4
+
+	0x2547FBA0:  8288FFD0  lwz r20,-48(r8)
+	  60: GETL       	R8, t50
+	  61: ADDL       	$0xFFFFFFD0, t50
+	  62: LDL       	(t50), t52
+	  63: PUTL       	t52, R20
+	  64: INCEIPL       	$4
+
+	0x2547FBA4:  82A8FFD4  lwz r21,-44(r8)
+	  65: GETL       	R8, t54
+	  66: ADDL       	$0xFFFFFFD4, t54
+	  67: LDL       	(t54), t56
+	  68: PUTL       	t56, R21
+	  69: INCEIPL       	$4
+
+	0x2547FBA8:  82C8FFD8  lwz r22,-40(r8)
+	  70: GETL       	R8, t58
+	  71: ADDL       	$0xFFFFFFD8, t58
+	  72: LDL       	(t58), t60
+	  73: PUTL       	t60, R22
+	  74: INCEIPL       	$4
+
+	0x2547FBAC:  82E8FFDC  lwz r23,-36(r8)
+	  75: GETL       	R8, t62
+	  76: ADDL       	$0xFFFFFFDC, t62
+	  77: LDL       	(t62), t64
+	  78: PUTL       	t64, R23
+	  79: INCEIPL       	$4
+
+	0x2547FBB0:  8308FFE0  lwz r24,-32(r8)
+	  80: GETL       	R8, t66
+	  81: ADDL       	$0xFFFFFFE0, t66
+	  82: LDL       	(t66), t68
+	  83: PUTL       	t68, R24
+	  84: INCEIPL       	$4
+
+	0x2547FBB4:  8328FFE4  lwz r25,-28(r8)
+	  85: GETL       	R8, t70
+	  86: ADDL       	$0xFFFFFFE4, t70
+	  87: LDL       	(t70), t72
+	  88: PUTL       	t72, R25
+	  89: INCEIPL       	$4
+
+	0x2547FBB8:  8348FFE8  lwz r26,-24(r8)
+	  90: GETL       	R8, t74
+	  91: ADDL       	$0xFFFFFFE8, t74
+	  92: LDL       	(t74), t76
+	  93: PUTL       	t76, R26
+	  94: INCEIPL       	$4
+
+	0x2547FBBC:  8368FFEC  lwz r27,-20(r8)
+	  95: GETL       	R8, t78
+	  96: ADDL       	$0xFFFFFFEC, t78
+	  97: LDL       	(t78), t80
+	  98: PUTL       	t80, R27
+	  99: INCEIPL       	$4
+
+	0x2547FBC0:  8388FFF0  lwz r28,-16(r8)
+	 100: GETL       	R8, t82
+	 101: ADDL       	$0xFFFFFFF0, t82
+	 102: LDL       	(t82), t84
+	 103: PUTL       	t84, R28
+	 104: INCEIPL       	$4
+
+	0x2547FBC4:  83A8FFF4  lwz r29,-12(r8)
+	 105: GETL       	R8, t86
+	 106: ADDL       	$0xFFFFFFF4, t86
+	 107: LDL       	(t86), t88
+	 108: PUTL       	t88, R29
+	 109: INCEIPL       	$4
+
+	0x2547FBC8:  83C8FFF8  lwz r30,-8(r8)
+	 110: GETL       	R8, t90
+	 111: ADDL       	$0xFFFFFFF8, t90
+	 112: LDL       	(t90), t92
+	 113: PUTL       	t92, R30
+	 114: INCEIPL       	$4
+
+	0x2547FBCC:  83E8FFFC  lwz r31,-4(r8)
+	 115: GETL       	R8, t94
+	 116: ADDL       	$0xFFFFFFFC, t94
+	 117: LDL       	(t94), t96
+	 118: PUTL       	t96, R31
+	 119: INCEIPL       	$4
+
+	0x2547FBD0:  7D014378  or r1,r8,r8
+	 120: GETL       	R8, t98
+	 121: PUTL       	t98, R1
+	 122: INCEIPL       	$4
+
+	0x2547FBD4:  4E800020  blr
+	 123: GETL       	LR, t100
+	 124: JMPo-r       	t100  ($4)
+
+
+. 0 2547FB68 112
+. 9B 03 00 00 92 54 00 00 83 76 00 04 7E C3 B3 78 93 70 00 00 81 01 00 00 82 08 00 04 80 E8 FF BC 7E 08 03 A6 82 28 FF C4 82 08 FF C0 7C E0 81 20 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+==== BB 268 (0x2547512C) approx BBs exec'd 0 ====
+
+	0x2547512C:  813E0160  lwz r9,352(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x160, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475130:  837E0164  lwz r27,356(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x164, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R27
+	   9: INCEIPL       	$4
+
+	0x25475134:  90690000  stw r3,0(r9)
+	  10: GETL       	R3, t8
+	  11: GETL       	R9, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475138:  3860000C  li r3,12
+	  14: MOVL       	$0xC, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x2547513C:  480228E9  bl 0x25497A24
+	  17: MOVL       	$0x25475140, t14
+	  18: PUTL       	t14, LR
+	  19: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 2547512C 20
+. 81 3E 01 60 83 7E 01 64 90 69 00 00 38 60 00 0C 48 02 28 E9
+
+==== BB 269 (0x25475140) approx BBs exec'd 0 ====
+
+	0x25475140:  7C7C1B79  or. r28,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R28
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25475144:  907B0000  stw r3,0(r27)
+	   5: GETL       	R3, t4
+	   6: GETL       	R27, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25475148:  41820188  bc 12,2,0x254752D0
+	   9: Js02o       	$0x254752D0
+
+
+. 0 25475140 12
+. 7C 7C 1B 79 90 7B 00 00 41 82 01 88
+
+==== BB 270 (0x2547514C) approx BBs exec'd 0 ====
+
+	0x2547514C:  835E0154  lwz r26,340(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25475150:  3D40CCCC  lis r10,-13108
+	   5: MOVL       	$0xCCCC0000, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x25475154:  6146CCCD  ori r6,r10,0xCCCD
+	   8: MOVL       	$0xCCCCCCCD, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x25475158:  811A0000  lwz r8,0(r26)
+	  11: GETL       	R26, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0x2547515C:  5507103A  rlwinm r7,r8,2,0,29
+	  15: GETL       	R8, t12
+	  16: SHLL       	$0x2, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x25475160:  38A70027  addi r5,r7,39
+	  19: GETL       	R7, t14
+	  20: ADDL       	$0x27, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x25475164:  7C653016  mulhwu r3,r5,r6
+	  23: GETL       	R5, t16
+	  24: GETL       	R6, t18
+	  25: UMULHL       	t16, t18
+	  26: PUTL       	t18, R3
+	  27: INCEIPL       	$4
+
+	0x25475168:  547DE13E  rlwinm r29,r3,28,4,31
+	  28: GETL       	R3, t20
+	  29: SHRL       	$0x4, t20
+	  30: PUTL       	t20, R29
+	  31: INCEIPL       	$4
+
+	0x2547516C:  1C7D0140  mulli r3,r29,320
+	  32: GETL       	R29, t22
+	  33: MULL       	$0x140, t22
+	  34: PUTL       	t22, R3
+	  35: INCEIPL       	$4
+
+	0x25475170:  480228B5  bl 0x25497A24
+	  36: MOVL       	$0x25475174, t24
+	  37: PUTL       	t24, LR
+	  38: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 2547514C 40
+. 83 5E 01 54 3D 40 CC CC 61 46 CC CD 81 1A 00 00 55 07 10 3A 38 A7 00 27 7C 65 30 16 54 7D E1 3E 1C 7D 01 40 48 02 28 B5
+
+==== BB 271 (0x25475174) approx BBs exec'd 0 ====
+
+	0x25475174:  809B0000  lwz r4,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25475178:  907C0000  stw r3,0(r28)
+	   4: GETL       	R3, t4
+	   5: GETL       	R28, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0x2547517C:  81240000  lwz r9,0(r4)
+	   8: GETL       	R4, t8
+	   9: LDL       	(t8), t10
+	  10: PUTL       	t10, R9
+	  11: INCEIPL       	$4
+
+	0x25475180:  80DE0188  lwz r6,392(r30)
+	  12: GETL       	R30, t12
+	  13: ADDL       	$0x188, t12
+	  14: LDL       	(t12), t14
+	  15: PUTL       	t14, R6
+	  16: INCEIPL       	$4
+
+	0x25475184:  2F890000  cmpi cr7,r9,0
+	  17: GETL       	R9, t16
+	  18: CMP0L       	t16, t18  (-rSo)
+	  19: ICRFL       	t18, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x25475188:  419E014C  bc 12,30,0x254752D4
+	  21: Js30o       	$0x254752D4
+
+
+. 0 25475174 24
+. 80 9B 00 00 90 7C 00 00 81 24 00 00 80 DE 01 88 2F 89 00 00 41 9E 01 4C
+
+==== BB 272 (0x2547518C) approx BBs exec'd 0 ====
+
+	0x2547518C:  82FE04C8  lwz r23,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25475190:  39600000  li r11,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25475194:  1C7D0014  mulli r3,r29,20
+	   8: GETL       	R29, t6
+	   9: MULL       	$0x14, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x25475198:  80FA0000  lwz r7,0(r26)
+	  12: GETL       	R26, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R7
+	  15: INCEIPL       	$4
+
+	0x2547519C:  811E014C  lwz r8,332(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x14C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0x254751A0:  39400000  li r10,0
+	  21: MOVL       	$0x0, t16
+	  22: PUTL       	t16, R10
+	  23: INCEIPL       	$4
+
+	0x254751A4:  809E0194  lwz r4,404(r30)
+	  24: GETL       	R30, t18
+	  25: ADDL       	$0x194, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R4
+	  28: INCEIPL       	$4
+
+	0x254751A8:  38C00000  li r6,0
+	  29: MOVL       	$0x0, t22
+	  30: PUTL       	t22, R6
+	  31: INCEIPL       	$4
+
+	0x254751AC:  80BE0148  lwz r5,328(r30)
+	  32: GETL       	R30, t24
+	  33: ADDL       	$0x148, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R5
+	  36: INCEIPL       	$4
+
+	0x254751B0:  917B0004  stw r11,4(r27)
+	  37: GETL       	R11, t28
+	  38: GETL       	R27, t30
+	  39: ADDL       	$0x4, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x254751B4:  7D2B4B78  or r11,r9,r9
+	  42: GETL       	R9, t32
+	  43: PUTL       	t32, R11
+	  44: INCEIPL       	$4
+
+	0x254751B8:  913701B0  stw r9,432(r23)
+	  45: GETL       	R9, t34
+	  46: GETL       	R23, t36
+	  47: ADDL       	$0x1B0, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x254751BC:  555B103A  rlwinm r27,r10,2,0,29
+	  50: GETL       	R10, t38
+	  51: SHLL       	$0x2, t38
+	  52: PUTL       	t38, R27
+	  53: INCEIPL       	$4
+
+	0x254751C0:  7C863840  cmpl cr1,r6,r7
+	  54: GETL       	R6, t40
+	  55: GETL       	R7, t42
+	  56: CMPUL       	t40, t42, t44  (-rSo)
+	  57: ICRFL       	t44, $0x1, CR
+	  58: INCEIPL       	$4
+
+	0x254751C4:  7F5B282E  lwzx r26,r27,r5
+	  59: GETL       	R5, t46
+	  60: GETL       	R27, t48
+	  61: ADDL       	t48, t46
+	  62: LDL       	(t46), t50
+	  63: PUTL       	t50, R26
+	  64: INCEIPL       	$4
+
+	0x254751C8:  917C0000  stw r11,0(r28)
+	  65: GETL       	R11, t52
+	  66: GETL       	R28, t54
+	  67: STL       	t52, (t54)
+	  68: INCEIPL       	$4
+
+	0x254751CC:  3B9C0004  addi r28,r28,4
+	  69: GETL       	R28, t56
+	  70: ADDL       	$0x4, t56
+	  71: PUTL       	t56, R28
+	  72: INCEIPL       	$4
+
+	0x254751D0:  7D9A4214  add r12,r26,r8
+	  73: GETL       	R26, t58
+	  74: GETL       	R8, t60
+	  75: ADDL       	t58, t60
+	  76: PUTL       	t60, R12
+	  77: INCEIPL       	$4
+
+	0x254751D4:  910B000C  stw r8,12(r11)
+	  78: GETL       	R8, t62
+	  79: GETL       	R11, t64
+	  80: ADDL       	$0xC, t64
+	  81: STL       	t62, (t64)
+	  82: INCEIPL       	$4
+
+	0x254751D8:  908B0004  stw r4,4(r11)
+	  83: GETL       	R4, t66
+	  84: GETL       	R11, t68
+	  85: ADDL       	$0x4, t68
+	  86: STL       	t66, (t68)
+	  87: INCEIPL       	$4
+
+	0x254751DC:  390C0001  addi r8,r12,1
+	  88: GETL       	R12, t70
+	  89: ADDL       	$0x1, t70
+	  90: PUTL       	t70, R8
+	  91: INCEIPL       	$4
+
+	0x254751E0:  90CB0008  stw r6,8(r11)
+	  92: GETL       	R6, t72
+	  93: GETL       	R11, t74
+	  94: ADDL       	$0x8, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0x254751E4:  934B0010  stw r26,16(r11)
+	  97: GETL       	R26, t76
+	  98: GETL       	R11, t78
+	  99: ADDL       	$0x10, t78
+	 100: STL       	t76, (t78)
+	 101: INCEIPL       	$4
+
+	0x254751E8:  4084001C  bc 4,4,0x25475204
+	 102: Jc04o       	$0x25475204
+
+
+. 0 2547518C 96
+. 82 FE 04 C8 39 60 00 00 1C 7D 00 14 80 FA 00 00 81 1E 01 4C 39 40 00 00 80 9E 01 94 38 C0 00 00 80 BE 01 48 91 7B 00 04 7D 2B 4B 78 91 37 01 B0 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+
+==== BB 273 (0x254751EC) approx BBs exec'd 0 ====
+
+	0x254751EC:  7CE903A6  mtctr r7
+	   0: GETL       	R7, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x254751F0:  38000000  li r0,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x254751F4:  392B0014  addi r9,r11,20
+	   6: GETL       	R11, t4
+	   7: ADDL       	$0x14, t4
+	   8: PUTL       	t4, R9
+	   9: INCEIPL       	$4
+
+	0x254751F8:  90090000  stw r0,0(r9)
+	  10: GETL       	R0, t6
+	  11: GETL       	R9, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x254751FC:  39290004  addi r9,r9,4
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x4, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x25475200:  4200FFF8  bc 16,0,0x254751F8
+	  18: GETL       	CTR, t12
+	  19: ADDL       	$0xFFFFFFFF, t12
+	  20: PUTL       	t12, CTR
+	  21: JIFZL       	t12, $0x25475204
+	  22: JMPo       	$0x254751F8  ($4)
+
+
+. 0 254751EC 24
+. 7C E9 03 A6 38 00 00 00 39 2B 00 14 90 09 00 00 39 29 00 04 42 00 FF F8
+
+==== BB 274 (0x254751F8) approx BBs exec'd 0 ====
+
+	0x254751F8:  90090000  stw r0,0(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x254751FC:  39290004  addi r9,r9,4
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25475200:  4200FFF8  bc 16,0,0x254751F8
+	   8: GETL       	CTR, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, CTR
+	  11: JIFZL       	t6, $0x25475204
+	  12: JMPo       	$0x254751F8  ($4)
+
+
+. 0 254751F8 12
+. 90 09 00 00 39 29 00 04 42 00 FF F8
+
+==== BB 275 (0x25475204) approx BBs exec'd 0 ====
+
+	0x25475204:  2F0A0001  cmpi cr6,r10,1
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475208:  394A0001  addi r10,r10,1
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0x1, t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x2547520C:  419A00BC  bc 12,26,0x254752C8
+	   9: Js26o       	$0x254752C8
+
+
+. 0 25475204 12
+. 2F 0A 00 01 39 4A 00 01 41 9A 00 BC
+
+==== BB 276 (0x25475210) approx BBs exec'd 0 ====
+
+	0x25475210:  7C035A14  add r0,r3,r11
+	   0: GETL       	R3, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475214:  280A0001  cmpli cr0,r10,1
+	   5: GETL       	R10, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25475218:  900B0000  stw r0,0(r11)
+	  10: GETL       	R0, t10
+	  11: GETL       	R11, t12
+	  12: STL       	t10, (t12)
+	  13: INCEIPL       	$4
+
+	0x2547521C:  7D6B1A14  add r11,r11,r3
+	  14: GETL       	R11, t14
+	  15: GETL       	R3, t16
+	  16: ADDL       	t14, t16
+	  17: PUTL       	t16, R11
+	  18: INCEIPL       	$4
+
+	0x25475220:  40A1FF9C  bc 5,1,0x254751BC
+	  19: Jc01o       	$0x254751BC
+
+
+. 0 25475210 20
+. 7C 03 5A 14 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+
+==== BB 277 (0x254751BC) approx BBs exec'd 0 ====
+
+	0x254751BC:  555B103A  rlwinm r27,r10,2,0,29
+	   0: GETL       	R10, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x254751C0:  7C863840  cmpl cr1,r6,r7
+	   4: GETL       	R6, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254751C4:  7F5B282E  lwzx r26,r27,r5
+	   9: GETL       	R5, t8
+	  10: GETL       	R27, t10
+	  11: ADDL       	t10, t8
+	  12: LDL       	(t8), t12
+	  13: PUTL       	t12, R26
+	  14: INCEIPL       	$4
+
+	0x254751C8:  917C0000  stw r11,0(r28)
+	  15: GETL       	R11, t14
+	  16: GETL       	R28, t16
+	  17: STL       	t14, (t16)
+	  18: INCEIPL       	$4
+
+	0x254751CC:  3B9C0004  addi r28,r28,4
+	  19: GETL       	R28, t18
+	  20: ADDL       	$0x4, t18
+	  21: PUTL       	t18, R28
+	  22: INCEIPL       	$4
+
+	0x254751D0:  7D9A4214  add r12,r26,r8
+	  23: GETL       	R26, t20
+	  24: GETL       	R8, t22
+	  25: ADDL       	t20, t22
+	  26: PUTL       	t22, R12
+	  27: INCEIPL       	$4
+
+	0x254751D4:  910B000C  stw r8,12(r11)
+	  28: GETL       	R8, t24
+	  29: GETL       	R11, t26
+	  30: ADDL       	$0xC, t26
+	  31: STL       	t24, (t26)
+	  32: INCEIPL       	$4
+
+	0x254751D8:  908B0004  stw r4,4(r11)
+	  33: GETL       	R4, t28
+	  34: GETL       	R11, t30
+	  35: ADDL       	$0x4, t30
+	  36: STL       	t28, (t30)
+	  37: INCEIPL       	$4
+
+	0x254751DC:  390C0001  addi r8,r12,1
+	  38: GETL       	R12, t32
+	  39: ADDL       	$0x1, t32
+	  40: PUTL       	t32, R8
+	  41: INCEIPL       	$4
+
+	0x254751E0:  90CB0008  stw r6,8(r11)
+	  42: GETL       	R6, t34
+	  43: GETL       	R11, t36
+	  44: ADDL       	$0x8, t36
+	  45: STL       	t34, (t36)
+	  46: INCEIPL       	$4
+
+	0x254751E4:  934B0010  stw r26,16(r11)
+	  47: GETL       	R26, t38
+	  48: GETL       	R11, t40
+	  49: ADDL       	$0x10, t40
+	  50: STL       	t38, (t40)
+	  51: INCEIPL       	$4
+
+	0x254751E8:  4084001C  bc 4,4,0x25475204
+	  52: Jc04o       	$0x25475204
+
+
+. 0 254751BC 48
+. 55 5B 10 3A 7C 86 38 40 7F 5B 28 2E 91 7C 00 00 3B 9C 00 04 7D 9A 42 14 91 0B 00 0C 90 8B 00 04 39 0C 00 01 90 CB 00 08 93 4B 00 10 40 84 00 1C
+
+==== BB 278 (0x254752C8) approx BBs exec'd 0 ====
+
+	0x254752C8:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254752CC:  4BFFFF48  b 0x25475214
+	   3: JMPo       	$0x25475214  ($4)
+
+
+. 0 254752C8 8
+. 38 00 00 00 4B FF FF 48
+
+==== BB 279 (0x25475214) approx BBs exec'd 0 ====
+
+	0x25475214:  280A0001  cmpli cr0,r10,1
+	   0: GETL       	R10, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25475218:  900B0000  stw r0,0(r11)
+	   5: GETL       	R0, t6
+	   6: GETL       	R11, t8
+	   7: STL       	t6, (t8)
+	   8: INCEIPL       	$4
+
+	0x2547521C:  7D6B1A14  add r11,r11,r3
+	   9: GETL       	R11, t10
+	  10: GETL       	R3, t12
+	  11: ADDL       	t10, t12
+	  12: PUTL       	t12, R11
+	  13: INCEIPL       	$4
+
+	0x25475220:  40A1FF9C  bc 5,1,0x254751BC
+	  14: Jc01o       	$0x254751BC
+
+
+. 0 25475214 16
+. 28 0A 00 01 90 0B 00 00 7D 6B 1A 14 40 A1 FF 9C
+
+==== BB 280 (0x25475224) approx BBs exec'd 0 ====
+
+	0x25475224:  83B70000  lwz r29,0(r23)
+	   0: GETL       	R23, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x25475228:  38000009  li r0,9
+	   4: MOVL       	$0x9, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0x2547522C:  809E0158  lwz r4,344(r30)
+	   7: GETL       	R30, t6
+	   8: ADDL       	$0x158, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x25475230:  39200000  li r9,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25475234:  2F9D0000  cmpi cr7,r29,0
+	  15: GETL       	R29, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25475238:  913C0000  stw r9,0(r28)
+	  19: GETL       	R9, t16
+	  20: GETL       	R28, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0x2547523C:  90040000  stw r0,0(r4)
+	  23: GETL       	R0, t20
+	  24: GETL       	R4, t22
+	  25: STL       	t20, (t22)
+	  26: INCEIPL       	$4
+
+	0x25475240:  419E0028  bc 12,30,0x25475268
+	  27: Js30o       	$0x25475268
+
+
+. 0 25475224 32
+. 83 B7 00 00 38 00 00 09 80 9E 01 58 39 20 00 00 2F 9D 00 00 91 3C 00 00 90 04 00 00 41 9E 00 28
+
+==== BB 281 (0x25475244) approx BBs exec'd 0 ====
+
+	0x25475244:  839D0094  lwz r28,148(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25475248:  2C9C0000  cmpi cr1,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547524C:  408601BC  bc 4,6,0x25475408
+	   9: Jc06o       	$0x25475408
+
+
+. 0 25475244 12
+. 83 9D 00 94 2C 9C 00 00 40 86 01 BC
+
+==== BB 282 (0x25475250) approx BBs exec'd 0 ====
+
+	0x25475250:  817D005C  lwz r11,92(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x5C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25475254:  3800FFFF  li r0,-1
+	   5: MOVL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25475258:  901D01E0  stw r0,480(r29)
+	   8: GETL       	R0, t6
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0x1E0, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547525C:  2F0B0000  cmpi cr6,r11,0
+	  13: GETL       	R11, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25475260:  409A0084  bc 4,26,0x254752E4
+	  17: Jc26o       	$0x254752E4
+
+
+. 0 25475250 20
+. 81 7D 00 5C 38 00 FF FF 90 1D 01 E0 2F 0B 00 00 40 9A 00 84
+
+==== BB 283 (0x25475264) approx BBs exec'd 0 ====
+
+	0x25475264:  901D018C  stw r0,396(r29)
+	   0: GETL       	R0, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x18C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475268:  2C190000  cmpi cr0,r25,0
+	   5: GETL       	R25, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547526C:  41820010  bc 12,2,0x2547527C
+	   9: Js02o       	$0x2547527C
+
+
+. 0 25475264 12
+. 90 1D 01 8C 2C 19 00 00 41 82 00 10
+
+==== BB 284 (0x25475270) approx BBs exec'd 0 ====
+
+	0x25475270:  8BB90000  lbz r29,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x25475274:  2F9D0000  cmpi cr7,r29,0
+	   4: GETL       	R29, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x25475278:  409E0094  bc 4,30,0x2547530C
+	   8: Jc30o       	$0x2547530C
+
+
+. 0 25475270 12
+. 8B B9 00 00 2F 9D 00 00 40 9E 00 94
+
+==== BB 285 (0x2547530C) approx BBs exec'd 0 ====
+
+	0x2547530C:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475310:  7F3DCB78  or r29,r25,r25
+	   3: GETL       	R25, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x25475314:  4800DCBD  bl 0x25482FD0
+	   6: MOVL       	$0x25475318, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 2547530C 12
+. 7F 23 CB 78 7F 3D CB 78 48 00 DC BD
+
+==== BB 286 (0x25483008) approx BBs exec'd 0 ====
+
+	0x25483008:  3CC0FEFF  lis r6,-257
+	   0: MOVL       	$0xFEFF0000, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2548300C:  38C6FEFF  addi r6,r6,-257
+	   3: MOVL       	$0xFEFEFEFF, t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x25483010:  419D001C  bc 12,29,0x2548302C
+	   6: Js29o       	$0x2548302C
+
+
+. 0 25483008 12
+. 3C C0 FE FF 38 C6 FE FF 41 9D 00 1C
+
+==== BB 287 (0x25483014) approx BBs exec'd 0 ====
+
+	0x25483014:  85040004  lwzu r8,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0x25483018:  7CE04038  and r0,r7,r8
+	   6: GETL       	R7, t4
+	   7: GETL       	R8, t6
+	   8: ANDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x2548301C:  7CEA4378  or r10,r7,r8
+	  11: GETL       	R7, t8
+	  12: GETL       	R8, t10
+	  13: ORL       	t10, t8
+	  14: PUTL       	t8, R10
+	  15: INCEIPL       	$4
+
+	0x25483020:  7C003A14  add r0,r0,r7
+	  16: GETL       	R0, t12
+	  17: GETL       	R7, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x25483024:  7D4800F9  nor. r8,r10,r0
+	  21: GETL       	R10, t16
+	  22: GETL       	R0, t18
+	  23: ORL       	t18, t16
+	  24: NOTL       	t16
+	  25: PUTL       	t16, R8
+	  26: CMP0L       	t16, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x25483028:  4082004C  bc 4,2,0x25483074
+	  29: Jc02o       	$0x25483074
+
+
+. 0 25483014 24
+. 85 04 00 04 7C E0 40 38 7C EA 43 78 7C 00 3A 14 7D 48 00 F9 40 82 00 4C
+
+==== BB 288 (0x2548302C) approx BBs exec'd 0 ====
+
+	0x2548302C:  81040004  lwz r8,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25483030:  85240008  lwzu r9,8(r4)
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0x25483034:  7C064214  add r0,r6,r8
+	  11: GETL       	R6, t8
+	  12: GETL       	R8, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: INCEIPL       	$4
+
+	0x25483038:  7CEA40F8  nor r10,r7,r8
+	  16: GETL       	R7, t12
+	  17: GETL       	R8, t14
+	  18: ORL       	t14, t12
+	  19: NOTL       	t12
+	  20: PUTL       	t12, R10
+	  21: INCEIPL       	$4
+
+	0x2548303C:  7C005039  and. r0,r0,r10
+	  22: GETL       	R0, t16
+	  23: GETL       	R10, t18
+	  24: ANDL       	t16, t18
+	  25: PUTL       	t18, R0
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x25483040:  7D664A14  add r11,r6,r9
+	  29: GETL       	R6, t22
+	  30: GETL       	R9, t24
+	  31: ADDL       	t22, t24
+	  32: PUTL       	t24, R11
+	  33: INCEIPL       	$4
+
+	0x25483044:  7CEC48F8  nor r12,r7,r9
+	  34: GETL       	R7, t26
+	  35: GETL       	R9, t28
+	  36: ORL       	t28, t26
+	  37: NOTL       	t26
+	  38: PUTL       	t26, R12
+	  39: INCEIPL       	$4
+
+	0x25483048:  4082001C  bc 4,2,0x25483064
+	  40: Jc02o       	$0x25483064
+
+
+. 0 2548302C 32
+. 81 04 00 04 85 24 00 08 7C 06 42 14 7C EA 40 F8 7C 00 50 39 7D 66 4A 14 7C EC 48 F8 40 82 00 1C
+
+==== BB 289 (0x2548304C) approx BBs exec'd 0 ====
+
+	0x2548304C:  7D606039  and. r0,r11,r12
+	   0: GETL       	R11, t0
+	   1: GETL       	R12, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x25483050:  4182FFDC  bc 12,2,0x2548302C
+	   7: Js02o       	$0x2548302C
+
+
+. 0 2548304C 8
+. 7D 60 60 39 41 82 FF DC
+
+==== BB 290 (0x25483064) approx BBs exec'd 0 ====
+
+	0x25483064:  7CE04038  and r0,r7,r8
+	   0: GETL       	R7, t0
+	   1: GETL       	R8, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483068:  3884FFFC  addi r4,r4,-4
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0xFFFFFFFC, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2548306C:  7C003A14  add r0,r0,r7
+	   9: GETL       	R0, t6
+	  10: GETL       	R7, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x25483070:  7D480078  andc r8,r10,r0
+	  14: GETL       	R10, t10
+	  15: GETL       	R0, t12
+	  16: NOTL       	t12
+	  17: ANDL       	t10, t12
+	  18: PUTL       	t12, R8
+	  19: INCEIPL       	$4
+
+	0x25483074:  7D0B0034  cntlzw r11,r8
+	  20: GETL       	R8, t14
+	  21: CNTLZL       	t14
+	  22: PUTL       	t14, R11
+	  23: INCEIPL       	$4
+
+	0x25483078:  7C032050  subf r0,r3,r4
+	  24: GETL       	R3, t16
+	  25: GETL       	R4, t18
+	  26: SUBL       	t16, t18
+	  27: PUTL       	t18, R0
+	  28: INCEIPL       	$4
+
+	0x2548307C:  556BE8FE  rlwinm r11,r11,29,3,31
+	  29: GETL       	R11, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R11
+	  32: INCEIPL       	$4
+
+	0x25483080:  7C605A14  add r3,r0,r11
+	  33: GETL       	R0, t22
+	  34: GETL       	R11, t24
+	  35: ADDL       	t22, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0x25483084:  4E800020  blr
+	  38: GETL       	LR, t26
+	  39: JMPo-r       	t26  ($4)
+
+
+. 0 25483064 36
+. 7C E0 40 38 38 84 FF FC 7C 00 3A 14 7D 48 00 78 7D 0B 00 34 7C 03 20 50 55 6B E8 FE 7C 60 5A 14 4E 80 00 20
+
+==== BB 291 (0x25475318) approx BBs exec'd 0 ====
+
+	0x25475318:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547531C:  3B63001F  addi r27,r3,31
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0x1F, t2
+	   5: PUTL       	t2, R27
+	   6: INCEIPL       	$4
+
+	0x25475320:  81810000  lwz r12,0(r1)
+	   7: GETL       	R1, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x25475324:  577A0036  rlwinm r26,r27,0,0,27
+	  11: GETL       	R27, t8
+	  12: ANDL       	$0xFFFFFFF0, t8
+	  13: PUTL       	t8, R26
+	  14: INCEIPL       	$4
+
+	0x25475328:  7C791B78  or r25,r3,r3
+	  15: GETL       	R3, t10
+	  16: PUTL       	t10, R25
+	  17: INCEIPL       	$4
+
+	0x2547532C:  7D7A00D0  neg r11,r26
+	  18: GETL       	R26, t12
+	  19: NEGL       	t12
+	  20: PUTL       	t12, R11
+	  21: INCEIPL       	$4
+
+	0x25475330:  38B90001  addi r5,r25,1
+	  22: GETL       	R25, t14
+	  23: ADDL       	$0x1, t14
+	  24: PUTL       	t14, R5
+	  25: INCEIPL       	$4
+
+	0x25475334:  7D81596E  stwux r12,r1,r11
+	  26: GETL       	R11, t16
+	  27: GETL       	R1, t18
+	  28: ADDL       	t18, t16
+	  29: PUTL       	t16, R1
+	  30: GETL       	R12, t20
+	  31: STL       	t20, (t16)
+	  32: INCEIPL       	$4
+
+	0x25475338:  39410017  addi r10,r1,23
+	  33: GETL       	R1, t22
+	  34: ADDL       	$0x17, t22
+	  35: PUTL       	t22, R10
+	  36: INCEIPL       	$4
+
+	0x2547533C:  55430036  rlwinm r3,r10,0,0,27
+	  37: GETL       	R10, t24
+	  38: ANDL       	$0xFFFFFFF0, t24
+	  39: PUTL       	t24, R3
+	  40: INCEIPL       	$4
+
+	0x25475340:  4800E8A1  bl 0x25483BE0
+	  41: MOVL       	$0x25475344, t26
+	  42: PUTL       	t26, LR
+	  43: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 25475318 44
+. 7F A4 EB 78 3B 63 00 1F 81 81 00 00 57 7A 00 36 7C 79 1B 78 7D 7A 00 D0 38 B9 00 01 7D 81 59 6E 39 41 00 17 55 43 00 36 48 00 E8 A1
+
+==== BB 292 (0x25483D00) approx BBs exec'd 0 ====
+
+	0x25483D00:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25483D04:  38A5FFFF  addi r5,r5,-1
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25483D08:  38840004  addi r4,r4,4
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x25483D0C:  80040000  lwz r0,0(r4)
+	  12: GETL       	R4, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R0
+	  15: INCEIPL       	$4
+
+	0x25483D10:  91230000  stw r9,0(r3)
+	  16: GETL       	R9, t12
+	  17: GETL       	R3, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25483D14:  81240004  lwz r9,4(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x4, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R9
+	  24: INCEIPL       	$4
+
+	0x25483D18:  90030004  stw r0,4(r3)
+	  25: GETL       	R0, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x4, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25483D1C:  80040008  lwz r0,8(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R0
+	  34: INCEIPL       	$4
+
+	0x25483D20:  91230008  stw r9,8(r3)
+	  35: GETL       	R9, t28
+	  36: GETL       	R3, t30
+	  37: ADDL       	$0x8, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25483D24:  8124000C  lwz r9,12(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0xC, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R9
+	  44: INCEIPL       	$4
+
+	0x25483D28:  9003000C  stw r0,12(r3)
+	  45: GETL       	R0, t36
+	  46: GETL       	R3, t38
+	  47: ADDL       	$0xC, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0x25483D2C:  80040010  lwz r0,16(r4)
+	  50: GETL       	R4, t40
+	  51: ADDL       	$0x10, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R0
+	  54: INCEIPL       	$4
+
+	0x25483D30:  91230010  stw r9,16(r3)
+	  55: GETL       	R9, t44
+	  56: GETL       	R3, t46
+	  57: ADDL       	$0x10, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	  60: GETL       	R4, t48
+	  61: ADDL       	$0x14, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R9
+	  64: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	  65: GETL       	R0, t52
+	  66: GETL       	R3, t54
+	  67: ADDL       	$0x14, t54
+	  68: STL       	t52, (t54)
+	  69: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  70: GETL       	R4, t56
+	  71: ADDL       	$0x18, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R0
+	  74: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  75: GETL       	R9, t60
+	  76: GETL       	R3, t62
+	  77: ADDL       	$0x18, t62
+	  78: STL       	t60, (t62)
+	  79: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  80: GETL       	R5, t64
+	  81: ADCL       	$0xFFFFFFF8, t64  (-wCa)
+	  82: PUTL       	t64, R5
+	  83: CMP0L       	t64, t66  (-rSo)
+	  84: ICRFL       	t66, $0x0, CR
+	  85: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  86: GETL       	R4, t68
+	  87: ADDL       	$0x1C, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R9
+	  90: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  91: GETL       	R0, t72
+	  92: GETL       	R3, t74
+	  93: ADDL       	$0x1C, t74
+	  94: STL       	t72, (t74)
+	  95: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  96: GETL       	R4, t76
+	  97: ADDL       	$0x20, t76
+	  98: PUTL       	t76, R4
+	  99: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	 100: GETL       	R3, t78
+	 101: ADDL       	$0x20, t78
+	 102: PUTL       	t78, R3
+	 103: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	 104: Jc02o       	$0x25483D0C
+
+
+. 0 25483D00 92
+. 81 24 00 00 38 A5 FF FF 38 84 00 04 80 04 00 00 91 23 00 00 81 24 00 04 90 03 00 04 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+==== BB 293 (0x25475344) approx BBs exec'd 0 ====
+
+	0x25475344:  893D0000  lbz r9,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25475348:  7C7C1B78  or r28,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R28
+	   6: INCEIPL       	$4
+
+	0x2547534C:  38600001  li r3,1
+	   7: MOVL       	$0x1, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25475350:  48000018  b 0x25475368
+	  10: JMPo       	$0x25475368  ($4)
+
+
+. 0 25475344 16
+. 89 3D 00 00 7C 7C 1B 78 38 60 00 01 48 00 00 18
+
+==== BB 294 (0x25475368) approx BBs exec'd 0 ====
+
+	0x25475368:  2C890000  cmpi cr1,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547536C:  4086FFE8  bc 4,6,0x25475354
+	   4: Jc06o       	$0x25475354
+
+
+. 0 25475368 8
+. 2C 89 00 00 40 86 FF E8
+
+==== BB 295 (0x25475354) approx BBs exec'd 0 ====
+
+	0x25475354:  3809FFC6  addi r0,r9,-58
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xFFFFFFC6, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25475358:  8D3D0001  lbzu r9,1(r29)
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x1, t2
+	   6: PUTL       	t2, R29
+	   7: LDB       	(t2), t4
+	   8: PUTL       	t4, R9
+	   9: INCEIPL       	$4
+
+	0x2547535C:  20800001  subfic r4,r0,1
+	  10: GETL       	R0, t6
+	  11: MOVL       	$0x1, t8
+	  12: SBBL       	t6, t8  (-wCa)
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0x25475360:  7C830194  addze r4,r3
+	  15: GETL       	R3, t10
+	  16: ADCL       	$0x0, t10  (-rCa-wCa)
+	  17: PUTL       	t10, R4
+	  18: INCEIPL       	$4
+
+	0x25475364:  7C832378  or r3,r4,r4
+	  19: GETL       	R4, t12
+	  20: PUTL       	t12, R3
+	  21: INCEIPL       	$4
+
+	0x25475368:  2C890000  cmpi cr1,r9,0
+	  22: GETL       	R9, t14
+	  23: CMP0L       	t14, t16  (-rSo)
+	  24: ICRFL       	t16, $0x1, CR
+	  25: INCEIPL       	$4
+
+	0x2547536C:  4086FFE8  bc 4,6,0x25475354
+	  26: Jc06o       	$0x25475354
+
+
+. 0 25475354 28
+. 38 09 FF C6 8D 3D 00 01 20 80 00 01 7C 83 01 94 7C 83 23 78 2C 89 00 00 40 86 FF E8
+
+==== BB 296 (0x25475370) approx BBs exec'd 0 ====
+
+	0x25475370:  5469103A  rlwinm r9,r3,2,0,29
+	   0: GETL       	R3, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25475374:  83BE0168  lwz r29,360(r30)
+	   4: GETL       	R30, t2
+	   5: ADDL       	$0x168, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x25475378:  38690004  addi r3,r9,4
+	   9: GETL       	R9, t6
+	  10: ADDL       	$0x4, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x2547537C:  480226A9  bl 0x25497A24
+	  13: MOVL       	$0x25475380, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 25475370 16
+. 54 69 10 3A 83 BE 01 68 38 69 00 04 48 02 26 A9
+
+==== BB 297 (0x25475380) approx BBs exec'd 0 ====
+
+	0x25475380:  80DE0188  lwz r6,392(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x188, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25475384:  2F030000  cmpi cr6,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475388:  7C641B78  or r4,r3,r3
+	   9: GETL       	R3, t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x2547538C:  907D0000  stw r3,0(r29)
+	  12: GETL       	R3, t10
+	  13: GETL       	R29, t12
+	  14: STL       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0x25475390:  41BAFF44  bc 13,26,0x254752D4
+	  16: Js26o       	$0x254752D4
+
+
+. 0 25475380 20
+. 80 DE 01 88 2F 03 00 00 7C 64 1B 78 90 7D 00 00 41 BA FF 44
+
+==== BB 298 (0x25475394) approx BBs exec'd 0 ====
+
+	0x25475394:  807E04B4  lwz r3,1204(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4B4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475398:  39000000  li r8,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x2547539C:  80BE01A0  lwz r5,416(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1A0, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x254753A0:  80C30000  lwz r6,0(r3)
+	  13: GETL       	R3, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R6
+	  16: INCEIPL       	$4
+
+	0x254753A4:  7F83E378  or r3,r28,r28
+	  17: GETL       	R28, t14
+	  18: PUTL       	t14, R3
+	  19: INCEIPL       	$4
+
+	0x254753A8:  80FE01A4  lwz r7,420(r30)
+	  20: GETL       	R30, t16
+	  21: ADDL       	$0x1A4, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R7
+	  24: INCEIPL       	$4
+
+	0x254753AC:  4BFFF79D  bl 0x25474B48
+	  25: MOVL       	$0x254753B0, t20
+	  26: PUTL       	t20, LR
+	  27: JMPo-c       	$0x25474B48  ($4)
+
+
+. 0 25475394 28
+. 80 7E 04 B4 39 00 00 00 80 BE 01 A0 80 C3 00 00 7F 83 E3 78 80 FE 01 A4 4B FF F7 9D
+
+==== BB 299 fillin_rpath(0x25474B48) approx BBs exec'd 0 ====
+
+	0x25474B48:  9421FFA0  stwu r1,-96(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFA0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25474B4C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25474B50:  480224B1  bl 0x25497000
+	   9: MOVL       	$0x25474B54, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25474B48 12
+. 94 21 FF A0 7C 08 02 A6 48 02 24 B1
+
+==== BB 300 (0x25474B54) approx BBs exec'd 0 ====
+
+	0x25474B54:  93C10058  stw r30,88(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25474B58:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25474B5C:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x25474B60:  91E1001C  stw r15,28(r1)
+	  11: GETL       	R15, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x1C, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25474B64:  2D880000  cmpi cr3,r8,0
+	  16: GETL       	R8, t12
+	  17: CMP0L       	t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x3, CR
+	  19: INCEIPL       	$4
+
+	0x25474B68:  92010020  stw r16,32(r1)
+	  20: GETL       	R16, t16
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x20, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25474B6C:  92210024  stw r17,36(r1)
+	  25: GETL       	R17, t20
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x24, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25474B70:  92410028  stw r18,40(r1)
+	  30: GETL       	R18, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x28, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x25474B74:  81FE0148  lwz r15,328(r30)
+	  35: GETL       	R30, t28
+	  36: ADDL       	$0x148, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R15
+	  39: INCEIPL       	$4
+
+	0x25474B78:  823E04C8  lwz r17,1224(r30)
+	  40: GETL       	R30, t32
+	  41: ADDL       	$0x4C8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R17
+	  44: INCEIPL       	$4
+
+	0x25474B7C:  821E0154  lwz r16,340(r30)
+	  45: GETL       	R30, t36
+	  46: ADDL       	$0x154, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R16
+	  49: INCEIPL       	$4
+
+	0x25474B80:  825E0158  lwz r18,344(r30)
+	  50: GETL       	R30, t40
+	  51: ADDL       	$0x158, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R18
+	  54: INCEIPL       	$4
+
+	0x25474B84:  9261002C  stw r19,44(r1)
+	  55: GETL       	R19, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x2C, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25474B88:  7CF33B78  or r19,r7,r7
+	  60: GETL       	R7, t48
+	  61: PUTL       	t48, R19
+	  62: INCEIPL       	$4
+
+	0x25474B8C:  92810030  stw r20,48(r1)
+	  63: GETL       	R20, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x30, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x25474B90:  7CD43378  or r20,r6,r6
+	  68: GETL       	R6, t54
+	  69: PUTL       	t54, R20
+	  70: INCEIPL       	$4
+
+	0x25474B94:  92A10034  stw r21,52(r1)
+	  71: GETL       	R21, t56
+	  72: GETL       	R1, t58
+	  73: ADDL       	$0x34, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0x25474B98:  7CB52B78  or r21,r5,r5
+	  76: GETL       	R5, t60
+	  77: PUTL       	t60, R21
+	  78: INCEIPL       	$4
+
+	0x25474B9C:  92C10038  stw r22,56(r1)
+	  79: GETL       	R22, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x38, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25474BA0:  7C962378  or r22,r4,r4
+	  84: GETL       	R4, t66
+	  85: PUTL       	t66, R22
+	  86: INCEIPL       	$4
+
+	0x25474BA4:  92E1003C  stw r23,60(r1)
+	  87: GETL       	R23, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x3C, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x25474BA8:  7D174378  or r23,r8,r8
+	  92: GETL       	R8, t72
+	  93: PUTL       	t72, R23
+	  94: INCEIPL       	$4
+
+	0x25474BAC:  93010040  stw r24,64(r1)
+	  95: GETL       	R24, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x40, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x25474BB0:  3B000000  li r24,0
+	 100: MOVL       	$0x0, t78
+	 101: PUTL       	t78, R24
+	 102: INCEIPL       	$4
+
+	0x25474BB4:  93210044  stw r25,68(r1)
+	 103: GETL       	R25, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x44, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x25474BB8:  93410048  stw r26,72(r1)
+	 108: GETL       	R26, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0x48, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25474BBC:  9361004C  stw r27,76(r1)
+	 113: GETL       	R27, t88
+	 114: GETL       	R1, t90
+	 115: ADDL       	$0x4C, t90
+	 116: STL       	t88, (t90)
+	 117: INCEIPL       	$4
+
+	0x25474BC0:  93810050  stw r28,80(r1)
+	 118: GETL       	R28, t92
+	 119: GETL       	R1, t94
+	 120: ADDL       	$0x50, t94
+	 121: STL       	t92, (t94)
+	 122: INCEIPL       	$4
+
+	0x25474BC4:  93A10054  stw r29,84(r1)
+	 123: GETL       	R29, t96
+	 124: GETL       	R1, t98
+	 125: ADDL       	$0x54, t98
+	 126: STL       	t96, (t98)
+	 127: INCEIPL       	$4
+
+	0x25474BC8:  93E1005C  stw r31,92(r1)
+	 128: GETL       	R31, t100
+	 129: GETL       	R1, t102
+	 130: ADDL       	$0x5C, t102
+	 131: STL       	t100, (t102)
+	 132: INCEIPL       	$4
+
+	0x25474BCC:  90010064  stw r0,100(r1)
+	 133: GETL       	R0, t104
+	 134: GETL       	R1, t106
+	 135: ADDL       	$0x64, t106
+	 136: STL       	t104, (t106)
+	 137: INCEIPL       	$4
+
+	0x25474BD0:  91810018  stw r12,24(r1)
+	 138: GETL       	R12, t108
+	 139: GETL       	R1, t110
+	 140: ADDL       	$0x18, t110
+	 141: STL       	t108, (t110)
+	 142: INCEIPL       	$4
+
+	0x25474BD4:  90610008  stw r3,8(r1)
+	 143: GETL       	R3, t112
+	 144: GETL       	R1, t114
+	 145: ADDL       	$0x8, t114
+	 146: STL       	t112, (t114)
+	 147: INCEIPL       	$4
+
+	0x25474BD8:  38610008  addi r3,r1,8
+	 148: GETL       	R1, t116
+	 149: ADDL       	$0x8, t116
+	 150: PUTL       	t116, R3
+	 151: INCEIPL       	$4
+
+	0x25474BDC:  7EA4AB78  or r4,r21,r21
+	 152: GETL       	R21, t118
+	 153: PUTL       	t118, R4
+	 154: INCEIPL       	$4
+
+	0x25474BE0:  4800B75D  bl 0x2548033C
+	 155: MOVL       	$0x25474BE4, t120
+	 156: PUTL       	t120, LR
+	 157: JMPo-c       	$0x2548033C  ($4)
+
+
+. 0 25474B54 144
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 E1 00 1C 2D 88 00 00 92 01 00 20 92 21 00 24 92 41 00 28 81 FE 01 48 82 3E 04 C8 82 1E 01 54 82 5E 01 58 92 61 00 2C 7C F3 3B 78 92 81 00 30 7C D4 33 78 92 A1 00 34 7C B5 2B 78 92 C1 00 38 7C 96 23 78 92 E1 00 3C 7D 17 43 78 93 01 00 40 3B 00 00 00 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 93 E1 00 5C 90 01 00 64 91 81 00 18 90 61 00 08 38 61 00 08 7E A4 AB 78 48 00 B7 5D
+
+==== BB 301 __strsep_g(0x2548033C) approx BBs exec'd 0 ====
+
+	0x2548033C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25480340:  7C681B78  or r8,r3,r3
+	   6: GETL       	R3, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x25480344:  80630000  lwz r3,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x25480348:  2F830000  cmpi cr7,r3,0
+	  13: GETL       	R3, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x2548034C:  419E0054  bc 12,30,0x254803A0
+	  17: Js30o       	$0x254803A0
+
+
+. 0 2548033C 20
+. 94 21 FF F0 7C 68 1B 78 80 63 00 00 2F 83 00 00 41 9E 00 54
+
+==== BB 302 (0x25480350) approx BBs exec'd 0 ====
+
+	0x25480350:  88030000  lbz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25480354:  7C6A1B78  or r10,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0x25480358:  39200000  li r9,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2548035C:  2C000000  cmpi cr0,r0,0
+	  10: GETL       	R0, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x25480360:  4182003C  bc 12,2,0x2548039C
+	  14: Js02o       	$0x2548039C
+
+
+. 0 25480350 20
+. 88 03 00 00 7C 6A 1B 78 39 20 00 00 2C 00 00 00 41 82 00 3C
+
+==== BB 303 (0x25480364) approx BBs exec'd 0 ====
+
+	0x25480364:  7C0B0378  or r11,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x25480368:  88040000  lbz r0,0(r4)
+	   3: GETL       	R4, t2
+	   4: LDB       	(t2), t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0x2548036C:  7C892378  or r9,r4,r4
+	   7: GETL       	R4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25480370:  7C805800  cmp cr1,r0,r11
+	  10: GETL       	R0, t8
+	  11: GETL       	R11, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x25480374:  41860010  bc 12,6,0x25480384
+	  15: Js06o       	$0x25480384
+
+
+. 0 25480364 20
+. 7C 0B 03 78 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+
+==== BB 304 (0x25480378) approx BBs exec'd 0 ====
+
+	0x25480378:  8C090001  lbzu r0,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x2548037C:  2F000000  cmpi cr6,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25480380:  409AFFF0  bc 4,26,0x25480370
+	  10: Jc26o       	$0x25480370
+
+
+. 0 25480378 12
+. 8C 09 00 01 2F 00 00 00 40 9A FF F0
+
+==== BB 305 (0x25480370) approx BBs exec'd 0 ====
+
+	0x25480370:  7C805800  cmp cr1,r0,r11
+	   0: GETL       	R0, t0
+	   1: GETL       	R11, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25480374:  41860010  bc 12,6,0x25480384
+	   5: Js06o       	$0x25480384
+
+
+. 0 25480370 8
+. 7C 80 58 00 41 86 00 10
+
+==== BB 306 (0x25480384) approx BBs exec'd 0 ====
+
+	0x25480384:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25480388:  409E0020  bc 4,30,0x254803A8
+	   4: Jc30o       	$0x254803A8
+
+
+. 0 25480384 8
+. 2F 80 00 00 40 9E 00 20
+
+==== BB 307 (0x2548038C) approx BBs exec'd 0 ====
+
+	0x2548038C:  8D6A0001  lbzu r11,1(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R10
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x25480390:  2C0B0000  cmpi cr0,r11,0
+	   6: GETL       	R11, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25480394:  4082FFD4  bc 4,2,0x25480368
+	  10: Jc02o       	$0x25480368
+
+
+. 0 2548038C 12
+. 8D 6A 00 01 2C 0B 00 00 40 82 FF D4
+
+==== BB 308 (0x25480368) approx BBs exec'd 0 ====
+
+	0x25480368:  88040000  lbz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2548036C:  7C892378  or r9,r4,r4
+	   4: GETL       	R4, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x25480370:  7C805800  cmp cr1,r0,r11
+	   7: GETL       	R0, t6
+	   8: GETL       	R11, t8
+	   9: CMPL       	t6, t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25480374:  41860010  bc 12,6,0x25480384
+	  12: Js06o       	$0x25480384
+
+
+. 0 25480368 16
+. 88 04 00 00 7C 89 23 78 7C 80 58 00 41 86 00 10
+
+==== BB 309 (0x254803A8) approx BBs exec'd 0 ====
+
+	0x254803A8:  392A0001  addi r9,r10,1
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x254803AC:  38000000  li r0,0
+	   4: MOVL       	$0x0, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0x254803B0:  980A0000  stb r0,0(r10)
+	   7: GETL       	R0, t4
+	   8: GETL       	R10, t6
+	   9: STB       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x254803B4:  91280000  stw r9,0(r8)
+	  11: GETL       	R9, t8
+	  12: GETL       	R8, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254803B8:  4BFFFFE8  b 0x254803A0
+	  15: JMPo       	$0x254803A0  ($4)
+
+
+. 0 254803A8 20
+. 39 2A 00 01 38 00 00 00 98 0A 00 00 91 28 00 00 4B FF FF E8
+
+==== BB 310 (0x254803A0) approx BBs exec'd 0 ====
+
+	0x254803A0:  38210010  addi r1,r1,16
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R1
+	   3: INCEIPL       	$4
+
+	0x254803A4:  4E800020  blr
+	   4: GETL       	LR, t2
+	   5: JMPo-r       	t2  ($4)
+
+
+. 0 254803A0 8
+. 38 21 00 10 4E 80 00 20
+
+==== BB 311 (0x25474BE4) approx BBs exec'd 0 ====
+
+	0x25474BE4:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25474BE8:  7C7F1B78  or r31,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R31
+	   6: INCEIPL       	$4
+
+	0x25474BEC:  7C7C1B78  or r28,r3,r3
+	   7: GETL       	R3, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25474BF0:  4192014C  bc 12,18,0x25474D3C
+	  10: Js18o       	$0x25474D3C
+
+
+. 0 25474BE4 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 41 92 01 4C
+
+==== BB 312 (0x25474BF4) approx BBs exec'd 0 ====
+
+	0x25474BF4:  4800E3DD  bl 0x25482FD0
+	   0: MOVL       	$0x25474BF8, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 25474BF4 4
+. 48 00 E3 DD
+
+==== BB 313 (0x25474BF8) approx BBs exec'd 0 ====
+
+	0x25474BF8:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25474BFC:  408201A8  bc 4,2,0x25474DA4
+	   5: Jc02o       	$0x25474DA4
+
+
+. 0 25474BF8 8
+. 7C 7D 1B 79 40 82 01 A8
+
+==== BB 314 (0x25474DA4) approx BBs exec'd 0 ====
+
+	0x25474DA4:  2B9D0001  cmpli cr7,r29,1
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25474DA8:  409D0024  bc 4,29,0x25474DCC
+	   5: Jc29o       	$0x25474DCC
+
+
+. 0 25474DA4 8
+. 2B 9D 00 01 40 9D 00 24
+
+==== BB 315 (0x25474DAC) approx BBs exec'd 0 ====
+
+	0x25474DAC:  7D3FEA14  add r9,r31,r29
+	   0: GETL       	R31, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25474DB0:  8869FFFF  lbz r3,-1(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: LDB       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25474DB4:  2C83002F  cmpi cr1,r3,47
+	  10: GETL       	R3, t8
+	  11: MOVL       	$0x2F, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x25474DB8:  40860014  bc 4,6,0x25474DCC
+	  15: Jc06o       	$0x25474DCC
+
+
+. 0 25474DAC 16
+. 7D 3F EA 14 88 69 FF FF 2C 83 00 2F 40 86 00 14
+
+==== BB 316 (0x25474DCC) approx BBs exec'd 0 ====
+
+	0x25474DCC:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25474DD0:  41BAFE34  bc 13,26,0x25474C04
+	   4: Js26o       	$0x25474C04
+
+
+. 0 25474DCC 8
+. 2F 1D 00 00 41 BA FE 34
+
+==== BB 317 (0x25474DD4) approx BBs exec'd 0 ====
+
+	0x25474DD4:  7D3CEA14  add r9,r28,r29
+	   0: GETL       	R28, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25474DD8:  8889FFFF  lbz r4,-1(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: LDB       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25474DDC:  2C04002F  cmpi cr0,r4,47
+	  10: GETL       	R4, t8
+	  11: MOVL       	$0x2F, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25474DE0:  41A2FE24  bc 13,2,0x25474C04
+	  15: Js02o       	$0x25474C04
+
+
+. 0 25474DD4 16
+. 7D 3C EA 14 88 89 FF FF 2C 04 00 2F 41 A2 FE 24
+
+==== BB 318 (0x25474DE4) approx BBs exec'd 0 ====
+
+	0x25474DE4:  2F940000  cmpi cr7,r20,0
+	   0: GETL       	R20, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25474DE8:  38A0002F  li r5,47
+	   4: MOVL       	$0x2F, t4
+	   5: PUTL       	t4, R5
+	   6: INCEIPL       	$4
+
+	0x25474DEC:  7CBCE9AE  stbx r5,r28,r29
+	   7: GETL       	R29, t6
+	   8: GETL       	R28, t8
+	   9: ADDL       	t8, t6
+	  10: GETL       	R5, t10
+	  11: STB       	t10, (t6)
+	  12: INCEIPL       	$4
+
+	0x25474DF0:  3BBD0001  addi r29,r29,1
+	  13: GETL       	R29, t12
+	  14: ADDL       	$0x1, t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0x25474DF4:  419EFE18  bc 12,30,0x25474C0C
+	  17: Js30o       	$0x25474C0C
+
+
+. 0 25474DE4 20
+. 2F 94 00 00 38 A0 00 2F 7C BC E9 AE 3B BD 00 01 41 9E FE 18
+
+==== BB 319 (0x25474C0C) approx BBs exec'd 0 ====
+
+	0x25474C0C:  83F101B0  lwz r31,432(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x1B0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25474C10:  7E398B78  or r25,r17,r17
+	   5: GETL       	R17, t4
+	   6: PUTL       	t4, R25
+	   7: INCEIPL       	$4
+
+	0x25474C14:  2E1F0000  cmpi cr4,r31,0
+	   8: GETL       	R31, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x4, CR
+	  11: INCEIPL       	$4
+
+	0x25474C18:  41920024  bc 12,18,0x25474C3C
+	  12: Js18o       	$0x25474C3C
+
+
+. 0 25474C0C 16
+. 83 F1 01 B0 7E 39 8B 78 2E 1F 00 00 41 92 00 24
+
+==== BB 320 (0x25474C1C) approx BBs exec'd 0 ====
+
+	0x25474C1C:  815F0010  lwz r10,16(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25474C20:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25474C24:  7FA5EB78  or r5,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25474C28:  7F8AE800  cmp cr7,r10,r29
+	  11: GETL       	R10, t8
+	  12: GETL       	R29, t10
+	  13: CMPL       	t8, t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0x25474C2C:  419E0240  bc 12,30,0x25474E6C
+	  16: Js30o       	$0x25474E6C
+
+
+. 0 25474C1C 20
+. 81 5F 00 10 7F 83 E3 78 7F A5 EB 78 7F 8A E8 00 41 9E 02 40
+
+==== BB 321 (0x25474C30) approx BBs exec'd 0 ====
+
+	0x25474C30:  83FF0000  lwz r31,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0x25474C34:  2E1F0000  cmpi cr4,r31,0
+	   4: GETL       	R31, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x25474C38:  4092FFE4  bc 4,18,0x25474C1C
+	   8: Jc18o       	$0x25474C1C
+
+
+. 0 25474C30 12
+. 83 FF 00 00 2E 1F 00 00 40 92 FF E4
+
+==== BB 322 (0x25474C3C) approx BBs exec'd 0 ====
+
+	0x25474C3C:  418E0284  bc 12,14,0x25474EC0
+	   0: Js14o       	$0x25474EC0
+
+
+. 0 25474C3C 4
+. 41 8E 02 84
+
+==== BB 323 (0x25474EC0) approx BBs exec'd 0 ====
+
+	0x25474EC0:  80700000  lwz r3,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25474EC4:  3B600000  li r27,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R27
+	   6: INCEIPL       	$4
+
+	0x25474EC8:  7E1A8378  or r26,r16,r16
+	   7: GETL       	R16, t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25474ECC:  5460103A  rlwinm r0,r3,2,0,29
+	  10: GETL       	R3, t8
+	  11: SHLL       	$0x2, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x25474ED0:  7D80DA14  add r12,r0,r27
+	  14: GETL       	R0, t10
+	  15: GETL       	R27, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0x25474ED4:  7FECEA14  add r31,r12,r29
+	  19: GETL       	R12, t14
+	  20: GETL       	R29, t16
+	  21: ADDL       	t14, t16
+	  22: PUTL       	t16, R31
+	  23: INCEIPL       	$4
+
+	0x25474ED8:  387F0015  addi r3,r31,21
+	  24: GETL       	R31, t18
+	  25: ADDL       	$0x15, t18
+	  26: PUTL       	t18, R3
+	  27: INCEIPL       	$4
+
+	0x25474EDC:  48022B49  bl 0x25497A24
+	  28: MOVL       	$0x25474EE0, t20
+	  29: PUTL       	t20, LR
+	  30: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 25474EC0 32
+. 80 70 00 00 3B 60 00 00 7E 1A 83 78 54 60 10 3A 7D 80 DA 14 7F EC EA 14 38 7F 00 15 48 02 2B 49
+
+==== BB 324 (0x25474EE0) approx BBs exec'd 0 ====
+
+	0x25474EE0:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25474EE4:  4082FD8C  bc 4,2,0x25474C70
+	   5: Jc02o       	$0x25474C70
+
+
+. 0 25474EE0 8
+. 7C 7F 1B 79 40 82 FD 8C
+
+==== BB 325 (0x25474C70) approx BBs exec'd 0 ====
+
+	0x25474C70:  80F00000  lwz r7,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25474C74:  7F84E378  or r4,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0x25474C78:  54E6103A  rlwinm r6,r7,2,0,29
+	   7: GETL       	R7, t6
+	   8: SHLL       	$0x2, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x25474C7C:  7CA6FA14  add r5,r6,r31
+	  11: GETL       	R6, t8
+	  12: GETL       	R31, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25474C80:  38650014  addi r3,r5,20
+	  16: GETL       	R5, t12
+	  17: ADDL       	$0x14, t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x25474C84:  7FA5EB78  or r5,r29,r29
+	  20: GETL       	R29, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x25474C88:  907F000C  stw r3,12(r31)
+	  23: GETL       	R3, t16
+	  24: GETL       	R31, t18
+	  25: ADDL       	$0xC, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x25474C8C:  4800ED95  bl 0x25483A20
+	  28: MOVL       	$0x25474C90, t20
+	  29: PUTL       	t20, LR
+	  30: JMPo-c       	$0x25483A20  ($4)
+
+
+. 0 25474C70 32
+. 80 F0 00 00 7F 84 E3 78 54 E6 10 3A 7C A6 FA 14 38 65 00 14 7F A5 EB 78 90 7F 00 0C 48 00 ED 95
+
+==== BB 326 (0x25483A4C) approx BBs exec'd 0 ====
+
+	0x25483A4C:  7C8300D0  neg r4,r3
+	   0: GETL       	R3, t0
+	   1: NEGL       	t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25483A50:  548307BE  rlwinm r3,r4,0,30,31
+	   4: GETL       	R4, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25483A54:  7C691B79  or. r9,r3,r3
+	   8: GETL       	R3, t4
+	   9: PUTL       	t4, R9
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25483A58:  7F832850  subf r28,r3,r5
+	  13: GETL       	R3, t8
+	  14: GETL       	R5, t10
+	  15: SUBL       	t8, t10
+	  16: PUTL       	t10, R28
+	  17: INCEIPL       	$4
+
+	0x25483A5C:  4182001C  bc 12,2,0x25483A78
+	  18: Js02o       	$0x25483A78
+
+
+. 0 25483A4C 20
+. 7C 83 00 D0 54 83 07 BE 7C 69 1B 79 7F 83 28 50 41 82 00 1C
+
+==== BB 327 (0x25483A78) approx BBs exec'd 0 ====
+
+	0x25483A78:  73E00003  andi. r0,r31,0x3
+	   0: GETL       	R31, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483A7C:  40820068  bc 4,2,0x25483AE4
+	   6: Jc02o       	$0x25483AE4
+
+
+. 0 25483A78 8
+. 73 E0 00 03 40 82 00 68
+
+==== BB 328 (0x25483A80) approx BBs exec'd 0 ====
+
+	0x25483A80:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25483A84:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25483A88:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x25483A8C:  48000239  bl 0x25483CC4
+	  10: MOVL       	$0x25483A90, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483CC4  ($4)
+
+
+. 0 25483A80 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 02 39
+
+==== BB 329 (0x25483DA8) approx BBs exec'd 0 ====
+
+	0x25483DA8:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25483DAC:  3884FFF0  addi r4,r4,-16
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25483DB0:  81240014  lwz r9,20(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x25483DB4:  3863FFEC  addi r3,r3,-20
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFEC, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x25483DB8:  38A50004  addi r5,r5,4
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x4, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0x25483DBC:  90030014  stw r0,20(r3)
+	  21: GETL       	R0, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x14, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x25483DC0:  4BFFFF7C  b 0x25483D3C
+	  26: JMPo       	$0x25483D3C  ($4)
+
+
+. 0 25483DA8 28
+. 80 04 00 00 38 84 FF F0 81 24 00 14 38 63 FF EC 38 A5 00 04 90 03 00 14 4B FF FF 7C
+
+==== BB 330 (0x25483A90) approx BBs exec'd 0 ====
+
+	0x25483A90:  5786003A  rlwinm r6,r28,0,0,29
+	   0: GETL       	R28, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25483A94:  578507BE  rlwinm r5,r28,0,30,31
+	   4: GETL       	R28, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x25483A98:  7FBD3214  add r29,r29,r6
+	   8: GETL       	R29, t4
+	   9: GETL       	R6, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R29
+	  12: INCEIPL       	$4
+
+	0x25483A9C:  7FFF3214  add r31,r31,r6
+	  13: GETL       	R31, t8
+	  14: GETL       	R6, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R31
+	  17: INCEIPL       	$4
+
+	0x25483AA0:  2C850000  cmpi cr1,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0x25483AA4:  4186001C  bc 12,6,0x25483AC0
+	  22: Js06o       	$0x25483AC0
+
+
+. 0 25483A90 24
+. 57 86 00 3A 57 85 07 BE 7F BD 32 14 7F FF 32 14 2C 85 00 00 41 86 00 1C
+
+==== BB 331 (0x25474C90) approx BBs exec'd 0 ====
+
+	0x25474C90:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25474C94:  99230000  stb r9,0(r3)
+	   3: GETL       	R9, t2
+	   4: GETL       	R3, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x25474C98:  80920000  lwz r4,0(r18)
+	   7: GETL       	R18, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R4
+	  10: INCEIPL       	$4
+
+	0x25474C9C:  93BF0010  stw r29,16(r31)
+	  11: GETL       	R29, t10
+	  12: GETL       	R31, t12
+	  13: ADDL       	$0x10, t12
+	  14: STL       	t10, (t12)
+	  15: INCEIPL       	$4
+
+	0x25474CA0:  7F9D2040  cmpl cr7,r29,r4
+	  16: GETL       	R29, t14
+	  17: GETL       	R4, t16
+	  18: CMPUL       	t14, t16, t18  (-rSo)
+	  19: ICRFL       	t18, $0x7, CR
+	  20: INCEIPL       	$4
+
+	0x25474CA4:  409D0008  bc 4,29,0x25474CAC
+	  21: Jc29o       	$0x25474CAC
+
+
+. 0 25474C90 24
+. 39 20 00 00 99 23 00 00 80 92 00 00 93 BF 00 10 7F 9D 20 40 40 9D 00 08
+
+==== BB 332 (0x25474CA8) approx BBs exec'd 0 ====
+
+	0x25474CA8:  93B20000  stw r29,0(r18)
+	   0: GETL       	R29, t0
+	   1: GETL       	R18, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25474CAC:  813A0000  lwz r9,0(r26)
+	   4: GETL       	R26, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R9
+	   7: INCEIPL       	$4
+
+	0x25474CB0:  895C0000  lbz r10,0(r28)
+	   8: GETL       	R28, t8
+	   9: LDB       	(t8), t10
+	  10: PUTL       	t10, R10
+	  11: INCEIPL       	$4
+
+	0x25474CB4:  2C890000  cmpi cr1,r9,0
+	  12: GETL       	R9, t12
+	  13: CMP0L       	t12, t14  (-rSo)
+	  14: ICRFL       	t14, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x25474CB8:  6948002F  xori r8,r10,0x2F
+	  16: GETL       	R10, t16
+	  17: XORL       	$0x2F, t16
+	  18: PUTL       	t16, R8
+	  19: INCEIPL       	$4
+
+	0x25474CBC:  7F8800D0  neg r28,r8
+	  20: GETL       	R8, t18
+	  21: NEGL       	t18
+	  22: PUTL       	t18, R28
+	  23: INCEIPL       	$4
+
+	0x25474CC0:  578317BC  rlwinm r3,r28,2,30,30
+	  24: GETL       	R28, t20
+	  25: ROLL       	$0x2, t20
+	  26: ANDL       	$0x2, t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x25474CC4:  41860018  bc 12,6,0x25474CDC
+	  29: Js06o       	$0x25474CDC
+
+
+. 0 25474CA8 32
+. 93 B2 00 00 81 3A 00 00 89 5C 00 00 2C 89 00 00 69 48 00 2F 7F 88 00 D0 57 83 17 BC 41 86 00 18
+
+==== BB 333 (0x25474CC8) approx BBs exec'd 0 ====
+
+	0x25474CC8:  7D2903A6  mtctr r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x25474CCC:  397F0014  addi r11,r31,20
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x14, t2
+	   5: PUTL       	t2, R11
+	   6: INCEIPL       	$4
+
+	0x25474CD0:  906B0000  stw r3,0(r11)
+	   7: GETL       	R3, t4
+	   8: GETL       	R11, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x25474CD4:  396B0004  addi r11,r11,4
+	  11: GETL       	R11, t8
+	  12: ADDL       	$0x4, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25474CD8:  4200FFF8  bc 16,0,0x25474CD0
+	  15: GETL       	CTR, t10
+	  16: ADDL       	$0xFFFFFFFF, t10
+	  17: PUTL       	t10, CTR
+	  18: JIFZL       	t10, $0x25474CDC
+	  19: JMPo       	$0x25474CD0  ($4)
+
+
+. 0 25474CC8 20
+. 7D 29 03 A6 39 7F 00 14 90 6B 00 00 39 6B 00 04 42 00 FF F8
+
+==== BB 334 (0x25474CD0) approx BBs exec'd 0 ====
+
+	0x25474CD0:  906B0000  stw r3,0(r11)
+	   0: GETL       	R3, t0
+	   1: GETL       	R11, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25474CD4:  396B0004  addi r11,r11,4
+	   4: GETL       	R11, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x25474CD8:  4200FFF8  bc 16,0,0x25474CD0
+	   8: GETL       	CTR, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, CTR
+	  11: JIFZL       	t6, $0x25474CDC
+	  12: JMPo       	$0x25474CD0  ($4)
+
+
+. 0 25474CD0 12
+. 90 6B 00 00 39 6B 00 04 42 00 FF F8
+
+==== BB 335 (0x25474CDC) approx BBs exec'd 0 ====
+
+	0x25474CDC:  927F0004  stw r19,4(r31)
+	   0: GETL       	R19, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25474CE0:  418E021C  bc 12,14,0x25474EFC
+	   5: Js14o       	$0x25474EFC
+
+
+. 0 25474CDC 8
+. 92 7F 00 04 41 8E 02 1C
+
+==== BB 336 (0x25474EFC) approx BBs exec'd 0 ====
+
+	0x25474EFC:  92FF0008  stw r23,8(r31)
+	   0: GETL       	R23, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25474F00:  4BFFFE08  b 0x25474D08
+	   5: JMPo       	$0x25474D08  ($4)
+
+
+. 0 25474EFC 8
+. 92 FF 00 08 4B FF FE 08
+
+==== BB 337 (0x25474D08) approx BBs exec'd 0 ====
+
+	0x25474D08:  801901B0  lwz r0,432(r25)
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1B0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25474D0C:  571D103A  rlwinm r29,r24,2,0,29
+	   5: GETL       	R24, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x25474D10:  38610008  addi r3,r1,8
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x8, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x25474D14:  7EA4AB78  or r4,r21,r21
+	  13: GETL       	R21, t8
+	  14: PUTL       	t8, R4
+	  15: INCEIPL       	$4
+
+	0x25474D18:  901F0000  stw r0,0(r31)
+	  16: GETL       	R0, t10
+	  17: GETL       	R31, t12
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25474D1C:  3B180001  addi r24,r24,1
+	  20: GETL       	R24, t14
+	  21: ADDL       	$0x1, t14
+	  22: PUTL       	t14, R24
+	  23: INCEIPL       	$4
+
+	0x25474D20:  93F901B0  stw r31,432(r25)
+	  24: GETL       	R31, t16
+	  25: GETL       	R25, t18
+	  26: ADDL       	$0x1B0, t18
+	  27: STL       	t16, (t18)
+	  28: INCEIPL       	$4
+
+	0x25474D24:  7FFDB12E  stwx r31,r29,r22
+	  29: GETL       	R22, t20
+	  30: GETL       	R29, t22
+	  31: ADDL       	t22, t20
+	  32: GETL       	R31, t24
+	  33: STL       	t24, (t20)
+	  34: INCEIPL       	$4
+
+	0x25474D28:  4800B615  bl 0x2548033C
+	  35: MOVL       	$0x25474D2C, t26
+	  36: PUTL       	t26, LR
+	  37: JMPo-c       	$0x2548033C  ($4)
+
+
+. 0 25474D08 36
+. 80 19 01 B0 57 1D 10 3A 38 61 00 08 7E A4 AB 78 90 1F 00 00 3B 18 00 01 93 F9 01 B0 7F FD B1 2E 48 00 B6 15
+
+==== BB 338 (0x25474D2C) approx BBs exec'd 0 ====
+
+	0x25474D2C:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25474D30:  7C7F1B78  or r31,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R31
+	   6: INCEIPL       	$4
+
+	0x25474D34:  7C7C1B78  or r28,r3,r3
+	   7: GETL       	R3, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25474D38:  4092FEBC  bc 4,18,0x25474BF4
+	  10: Jc18o       	$0x25474BF4
+
+
+. 0 25474D2C 16
+. 2E 03 00 00 7C 7F 1B 78 7C 7C 1B 78 40 92 FE BC
+
+==== BB 339 (0x25483054) approx BBs exec'd 0 ====
+
+	0x25483054:  7CE04838  and r0,r7,r9
+	   0: GETL       	R7, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483058:  7C003A14  add r0,r0,r7
+	   5: GETL       	R0, t4
+	   6: GETL       	R7, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x2548305C:  7D880078  andc r8,r12,r0
+	  10: GETL       	R12, t8
+	  11: GETL       	R0, t10
+	  12: NOTL       	t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R8
+	  15: INCEIPL       	$4
+
+	0x25483060:  48000014  b 0x25483074
+	  16: JMPo       	$0x25483074  ($4)
+
+
+. 0 25483054 16
+. 7C E0 48 38 7C 00 3A 14 7D 88 00 78 48 00 00 14
+
+==== BB 340 (0x2547FE20) approx BBs exec'd 0 ====
+
+	0x2547FE20:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547FE24:  81890004  lwz r12,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x2547FE28:  7C8CE214  add r4,r12,r28
+	  10: GETL       	R12, t8
+	  11: GETL       	R28, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x2547FE2C:  7D4C00D0  neg r10,r12
+	  15: GETL       	R12, t12
+	  16: NEGL       	t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547FE30:  3964FFFF  addi r11,r4,-1
+	  19: GETL       	R4, t14
+	  20: ADDL       	$0xFFFFFFFF, t14
+	  21: PUTL       	t14, R11
+	  22: INCEIPL       	$4
+
+	0x2547FE34:  7D7D5038  and r29,r11,r10
+	  23: GETL       	R11, t16
+	  24: GETL       	R10, t18
+	  25: ANDL       	t16, t18
+	  26: PUTL       	t18, R29
+	  27: INCEIPL       	$4
+
+	0x2547FE38:  7FA4EB78  or r4,r29,r29
+	  28: GETL       	R29, t20
+	  29: PUTL       	t20, R4
+	  30: INCEIPL       	$4
+
+	0x2547FE3C:  4800295D  bl 0x25482798
+	  31: MOVL       	$0x2547FE40, t22
+	  32: PUTL       	t22, LR
+	  33: JMPo-c       	$0x25482798  ($4)
+
+
+. 0 2547FE20 32
+. 81 3E 04 F4 81 89 00 04 7C 8C E2 14 7D 4C 00 D0 39 64 FF FF 7D 7D 50 38 7F A4 EB 78 48 00 29 5D
+
+==== BB 341 mmap(0x25482798) approx BBs exec'd 0 ====
+
+	0x25482798:  3800005A  li r0,90
+	   0: MOVL       	$0x5A, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2548279C:  44000002  sc
+	   3: JMPo-sys       	$0x254827A0  ($4)
+
+
+. 0 25482798 8
+. 38 00 00 5A 44 00 00 02
+
+==== BB 342 (0x254827A0) approx BBs exec'd 0 ====
+
+	0x254827A0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 254827A0 4
+. 4C A3 00 20
+
+==== BB 343 (0x2547FE40) approx BBs exec'd 0 ====
+
+	0x2547FE40:  80BB0000  lwz r5,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x2547FE44:  7D23EA14  add r9,r3,r29
+	   4: GETL       	R3, t4
+	   5: GETL       	R29, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x2547FE48:  7C832800  cmp cr1,r3,r5
+	   9: GETL       	R3, t8
+	  10: GETL       	R5, t10
+	  11: CMPL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547FE4C:  41860008  bc 12,6,0x2547FE54
+	  14: Js06o       	$0x2547FE54
+
+
+. 0 2547FE40 16
+. 80 BB 00 00 7D 23 EA 14 7C 83 28 00 41 86 00 08
+
+==== BB 344 (0x2547FE50) approx BBs exec'd 0 ====
+
+	0x2547FE50:  907F0000  stw r3,0(r31)
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547FE54:  807F0000  lwz r3,0(r31)
+	   4: GETL       	R31, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R3
+	   7: INCEIPL       	$4
+
+	0x2547FE58:  80C10024  lwz r6,36(r1)
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0x24, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R6
+	  12: INCEIPL       	$4
+
+	0x2547FE5C:  7D03E214  add r8,r3,r28
+	  13: GETL       	R3, t12
+	  14: GETL       	R28, t14
+	  15: ADDL       	t12, t14
+	  16: PUTL       	t14, R8
+	  17: INCEIPL       	$4
+
+	0x2547FE60:  80FE0420  lwz r7,1056(r30)
+	  18: GETL       	R30, t16
+	  19: ADDL       	$0x420, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R7
+	  22: INCEIPL       	$4
+
+	0x2547FE64:  913B0000  stw r9,0(r27)
+	  23: GETL       	R9, t20
+	  24: GETL       	R27, t22
+	  25: STL       	t20, (t22)
+	  26: INCEIPL       	$4
+
+	0x2547FE68:  7CC803A6  mtlr r6
+	  27: GETL       	R6, t24
+	  28: PUTL       	t24, LR
+	  29: INCEIPL       	$4
+
+	0x2547FE6C:  911F0000  stw r8,0(r31)
+	  30: GETL       	R8, t26
+	  31: GETL       	R31, t28
+	  32: STL       	t26, (t28)
+	  33: INCEIPL       	$4
+
+	0x2547FE70:  8361000C  lwz r27,12(r1)
+	  34: GETL       	R1, t30
+	  35: ADDL       	$0xC, t30
+	  36: LDL       	(t30), t32
+	  37: PUTL       	t32, R27
+	  38: INCEIPL       	$4
+
+	0x2547FE74:  83810010  lwz r28,16(r1)
+	  39: GETL       	R1, t34
+	  40: ADDL       	$0x10, t34
+	  41: LDL       	(t34), t36
+	  42: PUTL       	t36, R28
+	  43: INCEIPL       	$4
+
+	0x2547FE78:  83A10014  lwz r29,20(r1)
+	  44: GETL       	R1, t38
+	  45: ADDL       	$0x14, t38
+	  46: LDL       	(t38), t40
+	  47: PUTL       	t40, R29
+	  48: INCEIPL       	$4
+
+	0x2547FE7C:  83C10018  lwz r30,24(r1)
+	  49: GETL       	R1, t42
+	  50: ADDL       	$0x18, t42
+	  51: LDL       	(t42), t44
+	  52: PUTL       	t44, R30
+	  53: INCEIPL       	$4
+
+	0x2547FE80:  83E1001C  lwz r31,28(r1)
+	  54: GETL       	R1, t46
+	  55: ADDL       	$0x1C, t46
+	  56: LDL       	(t46), t48
+	  57: PUTL       	t48, R31
+	  58: INCEIPL       	$4
+
+	0x2547FE84:  38210020  addi r1,r1,32
+	  59: GETL       	R1, t50
+	  60: ADDL       	$0x20, t50
+	  61: PUTL       	t50, R1
+	  62: INCEIPL       	$4
+
+	0x2547FE88:  90670000  stw r3,0(r7)
+	  63: GETL       	R3, t52
+	  64: GETL       	R7, t54
+	  65: STL       	t52, (t54)
+	  66: INCEIPL       	$4
+
+	0x2547FE8C:  4E800020  blr
+	  67: GETL       	LR, t56
+	  68: JMPo-r       	t56  ($4)
+
+
+. 0 2547FE50 64
+. 90 7F 00 00 80 7F 00 00 80 C1 00 24 7D 03 E2 14 80 FE 04 20 91 3B 00 00 7C C8 03 A6 91 1F 00 00 83 61 00 0C 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 90 67 00 00 4E 80 00 20
+
+==== BB 345 (0x25483AE4) approx BBs exec'd 0 ====
+
+	0x25483AE4:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25483AE8:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25483AEC:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x25483AF0:  48000329  bl 0x25483E18
+	  10: MOVL       	$0x25483AF4, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483E18  ($4)
+
+
+. 0 25483AE4 16
+. 7F A3 EB 78 7F E4 FB 78 57 85 F0 BE 48 00 03 29
+
+==== BB 346 _wordcopy_fwd_dest_aligned(0x25483E18) approx BBs exec'd 0 ====
+
+	0x25483E18:  54A007BE  rlwinm r0,r5,0,30,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25483E1C:  54881EF8  rlwinm r8,r4,3,27,28
+	   4: GETL       	R4, t2
+	   5: ROLL       	$0x3, t2
+	   6: ANDL       	$0x18, t2
+	   7: PUTL       	t2, R8
+	   8: INCEIPL       	$4
+
+	0x25483E20:  2F800001  cmpi cr7,r0,1
+	   9: GETL       	R0, t4
+	  10: MOVL       	$0x1, t8
+	  11: CMPL       	t4, t8, t6  (-rSo)
+	  12: ICRFL       	t6, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25483E24:  9421FFF0  stwu r1,-16(r1)
+	  14: GETL       	R1, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0xFFFFFFF0, t12
+	  17: PUTL       	t12, R1
+	  18: STL       	t10, (t12)
+	  19: INCEIPL       	$4
+
+	0x25483E28:  5484003A  rlwinm r4,r4,0,0,29
+	  20: GETL       	R4, t14
+	  21: ANDL       	$0xFFFFFFFC, t14
+	  22: PUTL       	t14, R4
+	  23: INCEIPL       	$4
+
+	0x25483E2C:  20E80020  subfic r7,r8,32
+	  24: GETL       	R8, t16
+	  25: MOVL       	$0x20, t18
+	  26: SBBL       	t16, t18  (-wCa)
+	  27: PUTL       	t18, R7
+	  28: INCEIPL       	$4
+
+	0x25483E30:  419E00B8  bc 12,30,0x25483EE8
+	  29: Js30o       	$0x25483EE8
+
+
+. 0 25483E18 28
+. 54 A0 07 BE 54 88 1E F8 2F 80 00 01 94 21 FF F0 54 84 00 3A 20 E8 00 20 41 9E 00 B8
+
+==== BB 347 (0x25483E34) approx BBs exec'd 0 ====
+
+	0x25483E34:  28000001  cmpli cr0,r0,1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25483E38:  4180008C  bc 12,0,0x25483EC4
+	   5: Js00o       	$0x25483EC4
+
+
+. 0 25483E34 8
+. 28 00 00 01 41 80 00 8C
+
+==== BB 348 (0x25483E3C) approx BBs exec'd 0 ====
+
+	0x25483E3C:  2C800002  cmpi cr1,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25483E40:  418600BC  bc 12,6,0x25483EFC
+	   5: Js06o       	$0x25483EFC
+
+
+. 0 25483E3C 8
+. 2C 80 00 02 41 86 00 BC
+
+==== BB 349 (0x25483EFC) approx BBs exec'd 0 ====
+
+	0x25483EFC:  81440000  lwz r10,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x25483F00:  3863FFF4  addi r3,r3,-12
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFF4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483F04:  81640004  lwz r11,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x25483F08:  38A50002  addi r5,r5,2
+	  13: GETL       	R5, t10
+	  14: ADDL       	$0x2, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0x25483F0C:  3884FFFC  addi r4,r4,-4
+	  17: GETL       	R4, t12
+	  18: ADDL       	$0xFFFFFFFC, t12
+	  19: PUTL       	t12, R4
+	  20: INCEIPL       	$4
+
+	0x25483F10:  4BFFFF78  b 0x25483E88
+	  21: JMPo       	$0x25483E88  ($4)
+
+
+. 0 25483EFC 24
+. 81 44 00 00 38 63 FF F4 81 64 00 04 38 A5 00 02 38 84 FF FC 4B FF FF 78
+
+==== BB 350 (0x25483E88) approx BBs exec'd 0 ====
+
+	0x25483E88:  34A5FFFC  addic. r5,r5,-4
+	   0: GETL       	R5, t0
+	   1: ADCL       	$0xFFFFFFFC, t0  (-wCa)
+	   2: PUTL       	t0, R5
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483E8C:  7D464030  slw r6,r10,r8
+	   6: GETL       	R10, t6
+	   7: GETL       	R8, t4
+	   8: SHLL       	t4, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x25483E90:  7D693C30  srw r9,r11,r7
+	  11: GETL       	R11, t10
+	  12: GETL       	R7, t8
+	  13: SHRL       	t8, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0x25483E94:  8144000C  lwz r10,12(r4)
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0xC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R10
+	  20: INCEIPL       	$4
+
+	0x25483E98:  7CC04B78  or r0,r6,r9
+	  21: GETL       	R6, t16
+	  22: GETL       	R9, t18
+	  23: ORL       	t18, t16
+	  24: PUTL       	t16, R0
+	  25: INCEIPL       	$4
+
+	0x25483E9C:  38840010  addi r4,r4,16
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x10, t20
+	  28: PUTL       	t20, R4
+	  29: INCEIPL       	$4
+
+	0x25483EA0:  9003000C  stw r0,12(r3)
+	  30: GETL       	R0, t22
+	  31: GETL       	R3, t24
+	  32: ADDL       	$0xC, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0x25483EA4:  38630010  addi r3,r3,16
+	  35: GETL       	R3, t26
+	  36: ADDL       	$0x10, t26
+	  37: PUTL       	t26, R3
+	  38: INCEIPL       	$4
+
+	0x25483EA8:  40A2FFA4  bc 5,2,0x25483E4C
+	  39: Jc02o       	$0x25483E4C
+
+
+. 0 25483E88 36
+. 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+==== BB 351 (0x25483E4C) approx BBs exec'd 0 ====
+
+	0x25483E4C:  7D664030  slw r6,r11,r8
+	   0: GETL       	R11, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25483E50:  7D493C30  srw r9,r10,r7
+	   5: GETL       	R10, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25483E54:  7CC04B78  or r0,r6,r9
+	  10: GETL       	R6, t8
+	  11: GETL       	R9, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R0
+	  14: INCEIPL       	$4
+
+	0x25483E58:  81640000  lwz r11,0(r4)
+	  15: GETL       	R4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R11
+	  18: INCEIPL       	$4
+
+	0x25483E5C:  90030000  stw r0,0(r3)
+	  19: GETL       	R0, t16
+	  20: GETL       	R3, t18
+	  21: STL       	t16, (t18)
+	  22: INCEIPL       	$4
+
+	0x25483E60:  7D404030  slw r0,r10,r8
+	  23: GETL       	R10, t22
+	  24: GETL       	R8, t20
+	  25: SHLL       	t20, t22
+	  26: PUTL       	t22, R0
+	  27: INCEIPL       	$4
+
+	0x25483E64:  7D6A3C30  srw r10,r11,r7
+	  28: GETL       	R11, t26
+	  29: GETL       	R7, t24
+	  30: SHRL       	t24, t26
+	  31: PUTL       	t26, R10
+	  32: INCEIPL       	$4
+
+	0x25483E68:  7C0C5378  or r12,r0,r10
+	  33: GETL       	R0, t28
+	  34: GETL       	R10, t30
+	  35: ORL       	t30, t28
+	  36: PUTL       	t28, R12
+	  37: INCEIPL       	$4
+
+	0x25483E6C:  81440004  lwz r10,4(r4)
+	  38: GETL       	R4, t32
+	  39: ADDL       	$0x4, t32
+	  40: LDL       	(t32), t34
+	  41: PUTL       	t34, R10
+	  42: INCEIPL       	$4
+
+	0x25483E70:  91830004  stw r12,4(r3)
+	  43: GETL       	R12, t36
+	  44: GETL       	R3, t38
+	  45: ADDL       	$0x4, t38
+	  46: STL       	t36, (t38)
+	  47: INCEIPL       	$4
+
+	0x25483E74:  7D6C4030  slw r12,r11,r8
+	  48: GETL       	R11, t42
+	  49: GETL       	R8, t40
+	  50: SHLL       	t40, t42
+	  51: PUTL       	t42, R12
+	  52: INCEIPL       	$4
+
+	0x25483E78:  7D4B3C30  srw r11,r10,r7
+	  53: GETL       	R10, t46
+	  54: GETL       	R7, t44
+	  55: SHRL       	t44, t46
+	  56: PUTL       	t46, R11
+	  57: INCEIPL       	$4
+
+	0x25483E7C:  7D865B78  or r6,r12,r11
+	  58: GETL       	R12, t48
+	  59: GETL       	R11, t50
+	  60: ORL       	t50, t48
+	  61: PUTL       	t48, R6
+	  62: INCEIPL       	$4
+
+	0x25483E80:  81640008  lwz r11,8(r4)
+	  63: GETL       	R4, t52
+	  64: ADDL       	$0x8, t52
+	  65: LDL       	(t52), t54
+	  66: PUTL       	t54, R11
+	  67: INCEIPL       	$4
+
+	0x25483E84:  90C30008  stw r6,8(r3)
+	  68: GETL       	R6, t56
+	  69: GETL       	R3, t58
+	  70: ADDL       	$0x8, t58
+	  71: STL       	t56, (t58)
+	  72: INCEIPL       	$4
+
+	0x25483E88:  34A5FFFC  addic. r5,r5,-4
+	  73: GETL       	R5, t60
+	  74: ADCL       	$0xFFFFFFFC, t60  (-wCa)
+	  75: PUTL       	t60, R5
+	  76: CMP0L       	t60, t62  (-rSo)
+	  77: ICRFL       	t62, $0x0, CR
+	  78: INCEIPL       	$4
+
+	0x25483E8C:  7D464030  slw r6,r10,r8
+	  79: GETL       	R10, t66
+	  80: GETL       	R8, t64
+	  81: SHLL       	t64, t66
+	  82: PUTL       	t66, R6
+	  83: INCEIPL       	$4
+
+	0x25483E90:  7D693C30  srw r9,r11,r7
+	  84: GETL       	R11, t70
+	  85: GETL       	R7, t68
+	  86: SHRL       	t68, t70
+	  87: PUTL       	t70, R9
+	  88: INCEIPL       	$4
+
+	0x25483E94:  8144000C  lwz r10,12(r4)
+	  89: GETL       	R4, t72
+	  90: ADDL       	$0xC, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R10
+	  93: INCEIPL       	$4
+
+	0x25483E98:  7CC04B78  or r0,r6,r9
+	  94: GETL       	R6, t76
+	  95: GETL       	R9, t78
+	  96: ORL       	t78, t76
+	  97: PUTL       	t76, R0
+	  98: INCEIPL       	$4
+
+	0x25483E9C:  38840010  addi r4,r4,16
+	  99: GETL       	R4, t80
+	 100: ADDL       	$0x10, t80
+	 101: PUTL       	t80, R4
+	 102: INCEIPL       	$4
+
+	0x25483EA0:  9003000C  stw r0,12(r3)
+	 103: GETL       	R0, t82
+	 104: GETL       	R3, t84
+	 105: ADDL       	$0xC, t84
+	 106: STL       	t82, (t84)
+	 107: INCEIPL       	$4
+
+	0x25483EA4:  38630010  addi r3,r3,16
+	 108: GETL       	R3, t86
+	 109: ADDL       	$0x10, t86
+	 110: PUTL       	t86, R3
+	 111: INCEIPL       	$4
+
+	0x25483EA8:  40A2FFA4  bc 5,2,0x25483E4C
+	 112: Jc02o       	$0x25483E4C
+
+
+. 0 25483E4C 96
+. 7D 66 40 30 7D 49 3C 30 7C C0 4B 78 81 64 00 00 90 03 00 00 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+==== BB 352 (0x25483EAC) approx BBs exec'd 0 ====
+
+	0x25483EAC:  7D654030  slw r5,r11,r8
+	   0: GETL       	R11, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25483EB0:  7D483C30  srw r8,r10,r7
+	   5: GETL       	R10, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25483EB4:  7CA44378  or r4,r5,r8
+	  10: GETL       	R5, t8
+	  11: GETL       	R8, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0x25483EB8:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x10, t12
+	  17: PUTL       	t12, R1
+	  18: INCEIPL       	$4
+
+	0x25483EBC:  90830000  stw r4,0(r3)
+	  19: GETL       	R4, t14
+	  20: GETL       	R3, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25483EC0:  4E800020  blr
+	  23: GETL       	LR, t18
+	  24: JMPo-r       	t18  ($4)
+
+
+. 0 25483EAC 24
+. 7D 65 40 30 7D 48 3C 30 7C A4 43 78 38 21 00 10 90 83 00 00 4E 80 00 20
+
+==== BB 353 (0x25483AF4) approx BBs exec'd 0 ====
+
+	0x25483AF4:  4BFFFF9C  b 0x25483A90
+	   0: JMPo       	$0x25483A90  ($4)
+
+
+. 0 25483AF4 4
+. 4B FF FF 9C
+
+==== BB 354 (0x25474CAC) approx BBs exec'd 0 ====
+
+	0x25474CAC:  813A0000  lwz r9,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25474CB0:  895C0000  lbz r10,0(r28)
+	   4: GETL       	R28, t4
+	   5: LDB       	(t4), t6
+	   6: PUTL       	t6, R10
+	   7: INCEIPL       	$4
+
+	0x25474CB4:  2C890000  cmpi cr1,r9,0
+	   8: GETL       	R9, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25474CB8:  6948002F  xori r8,r10,0x2F
+	  12: GETL       	R10, t12
+	  13: XORL       	$0x2F, t12
+	  14: PUTL       	t12, R8
+	  15: INCEIPL       	$4
+
+	0x25474CBC:  7F8800D0  neg r28,r8
+	  16: GETL       	R8, t14
+	  17: NEGL       	t14
+	  18: PUTL       	t14, R28
+	  19: INCEIPL       	$4
+
+	0x25474CC0:  578317BC  rlwinm r3,r28,2,30,30
+	  20: GETL       	R28, t16
+	  21: ROLL       	$0x2, t16
+	  22: ANDL       	$0x2, t16
+	  23: PUTL       	t16, R3
+	  24: INCEIPL       	$4
+
+	0x25474CC4:  41860018  bc 12,6,0x25474CDC
+	  25: Js06o       	$0x25474CDC
+
+
+. 0 25474CAC 28
+. 81 3A 00 00 89 5C 00 00 2C 89 00 00 69 48 00 2F 7F 88 00 D0 57 83 17 BC 41 86 00 18
+
+==== BB 355 (0x25483EE8) approx BBs exec'd 0 ====
+
+	0x25483EE8:  81640000  lwz r11,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25483EEC:  38A5FFFF  addi r5,r5,-1
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25483EF0:  81440004  lwz r10,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25483EF4:  38840008  addi r4,r4,8
+	  13: GETL       	R4, t10
+	  14: ADDL       	$0x8, t10
+	  15: PUTL       	t10, R4
+	  16: INCEIPL       	$4
+
+	0x25483EF8:  4BFFFF54  b 0x25483E4C
+	  17: JMPo       	$0x25483E4C  ($4)
+
+
+. 0 25483EE8 20
+. 81 64 00 00 38 A5 FF FF 81 44 00 04 38 84 00 08 4B FF FF 54
+
+==== BB 356 (0x2548039C) approx BBs exec'd 0 ====
+
+	0x2548039C:  91280000  stw r9,0(r8)
+	   0: GETL       	R9, t0
+	   1: GETL       	R8, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x254803A0:  38210010  addi r1,r1,16
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x10, t4
+	   6: PUTL       	t4, R1
+	   7: INCEIPL       	$4
+
+	0x254803A4:  4E800020  blr
+	   8: GETL       	LR, t6
+	   9: JMPo-r       	t6  ($4)
+
+
+. 0 2548039C 12
+. 91 28 00 00 38 21 00 10 4E 80 00 20
+
+==== BB 357 (0x25474C00) approx BBs exec'd 0 ====
+
+	0x25474C00:  839E0150  lwz r28,336(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x150, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25474C04:  2F940000  cmpi cr7,r20,0
+	   5: GETL       	R20, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25474C08:  409E01F0  bc 4,30,0x25474DF8
+	   9: Jc30o       	$0x25474DF8
+
+
+. 0 25474C00 12
+. 83 9E 01 50 2F 94 00 00 40 9E 01 F0
+
+==== BB 358 (0x25474D3C) approx BBs exec'd 0 ====
+
+	0x25474D3C:  81E10064  lwz r15,100(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x64, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x25474D40:  5710103A  rlwinm r16,r24,2,0,29
+	   5: GETL       	R24, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R16
+	   8: INCEIPL       	$4
+
+	0x25474D44:  81810018  lwz r12,24(r1)
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x18, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x25474D48:  7C70B12E  stwx r3,r16,r22
+	  14: GETL       	R22, t10
+	  15: GETL       	R16, t12
+	  16: ADDL       	t12, t10
+	  17: GETL       	R3, t14
+	  18: STL       	t14, (t10)
+	  19: INCEIPL       	$4
+
+	0x25474D4C:  7DE803A6  mtlr r15
+	  20: GETL       	R15, t16
+	  21: PUTL       	t16, LR
+	  22: INCEIPL       	$4
+
+	0x25474D50:  7EC3B378  or r3,r22,r22
+	  23: GETL       	R22, t18
+	  24: PUTL       	t18, R3
+	  25: INCEIPL       	$4
+
+	0x25474D54:  81E1001C  lwz r15,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R15
+	  30: INCEIPL       	$4
+
+	0x25474D58:  82010020  lwz r16,32(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R16
+	  35: INCEIPL       	$4
+
+	0x25474D5C:  7D818120  mtcrf 0x18,r12
+	  36: GETL       	R12, t28
+	  37: ICRFL       	t28, $0x3, CR
+	  38: ICRFL       	t28, $0x4, CR
+	  39: INCEIPL       	$4
+
+	0x25474D60:  82210024  lwz r17,36(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x24, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R17
+	  44: INCEIPL       	$4
+
+	0x25474D64:  82410028  lwz r18,40(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x28, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R18
+	  49: INCEIPL       	$4
+
+	0x25474D68:  8261002C  lwz r19,44(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x2C, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R19
+	  54: INCEIPL       	$4
+
+	0x25474D6C:  82810030  lwz r20,48(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x30, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R20
+	  59: INCEIPL       	$4
+
+	0x25474D70:  82A10034  lwz r21,52(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x34, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R21
+	  64: INCEIPL       	$4
+
+	0x25474D74:  82C10038  lwz r22,56(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x38, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R22
+	  69: INCEIPL       	$4
+
+	0x25474D78:  82E1003C  lwz r23,60(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x3C, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R23
+	  74: INCEIPL       	$4
+
+	0x25474D7C:  83010040  lwz r24,64(r1)
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x40, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R24
+	  79: INCEIPL       	$4
+
+	0x25474D80:  83210044  lwz r25,68(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x44, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R25
+	  84: INCEIPL       	$4
+
+	0x25474D84:  83410048  lwz r26,72(r1)
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x48, t66
+	  87: LDL       	(t66), t68
+	  88: PUTL       	t68, R26
+	  89: INCEIPL       	$4
+
+	0x25474D88:  8361004C  lwz r27,76(r1)
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x4C, t70
+	  92: LDL       	(t70), t72
+	  93: PUTL       	t72, R27
+	  94: INCEIPL       	$4
+
+	0x25474D8C:  83810050  lwz r28,80(r1)
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x50, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R28
+	  99: INCEIPL       	$4
+
+	0x25474D90:  83A10054  lwz r29,84(r1)
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x54, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R29
+	 104: INCEIPL       	$4
+
+	0x25474D94:  83C10058  lwz r30,88(r1)
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x58, t82
+	 107: LDL       	(t82), t84
+	 108: PUTL       	t84, R30
+	 109: INCEIPL       	$4
+
+	0x25474D98:  83E1005C  lwz r31,92(r1)
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x5C, t86
+	 112: LDL       	(t86), t88
+	 113: PUTL       	t88, R31
+	 114: INCEIPL       	$4
+
+	0x25474D9C:  38210060  addi r1,r1,96
+	 115: GETL       	R1, t90
+	 116: ADDL       	$0x60, t90
+	 117: PUTL       	t90, R1
+	 118: INCEIPL       	$4
+
+	0x25474DA0:  4E800020  blr
+	 119: GETL       	LR, t92
+	 120: JMPo-r       	t92  ($4)
+
+
+. 0 25474D3C 104
+. 81 E1 00 64 57 10 10 3A 81 81 00 18 7C 70 B1 2E 7D E8 03 A6 7E C3 B3 78 81 E1 00 1C 82 01 00 20 7D 81 81 20 82 21 00 24 82 41 00 28 82 61 00 2C 82 81 00 30 82 A1 00 34 82 C1 00 38 82 E1 00 3C 83 01 00 40 83 21 00 44 83 41 00 48 83 61 00 4C 83 81 00 50 83 A1 00 54 83 C1 00 58 83 E1 00 5C 38 21 00 60 4E 80 00 20
+
+==== BB 359 (0x254753B0) approx BBs exec'd 0 ====
+
+	0x254753B0:  807D0000  lwz r3,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x254753B4:  83830000  lwz r28,0(r3)
+	   4: GETL       	R3, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R28
+	   7: INCEIPL       	$4
+
+	0x254753B8:  2C1C0000  cmpi cr0,r28,0
+	   8: GETL       	R28, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x254753BC:  41820074  bc 12,2,0x25475430
+	  12: Js02o       	$0x25475430
+
+
+. 0 254753B0 16
+. 80 7D 00 00 83 83 00 00 2C 1C 00 00 41 82 00 74
+
+==== BB 360 (0x254753C0) approx BBs exec'd 0 ====
+
+	0x254753C0:  38E00000  li r7,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254753C4:  90FD0004  stw r7,4(r29)
+	   3: GETL       	R7, t2
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x4, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254753C8:  83B701B0  lwz r29,432(r23)
+	   8: GETL       	R23, t6
+	   9: ADDL       	$0x1B0, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x254753CC:  93B80060  stw r29,96(r24)
+	  13: GETL       	R29, t10
+	  14: GETL       	R24, t12
+	  15: ADDL       	$0x60, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254753D0:  81410000  lwz r10,0(r1)
+	  18: GETL       	R1, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R10
+	  21: INCEIPL       	$4
+
+	0x254753D4:  82EA0004  lwz r23,4(r10)
+	  22: GETL       	R10, t18
+	  23: ADDL       	$0x4, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R23
+	  26: INCEIPL       	$4
+
+	0x254753D8:  830AFFE0  lwz r24,-32(r10)
+	  27: GETL       	R10, t22
+	  28: ADDL       	$0xFFFFFFE0, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R24
+	  31: INCEIPL       	$4
+
+	0x254753DC:  7EE803A6  mtlr r23
+	  32: GETL       	R23, t26
+	  33: PUTL       	t26, LR
+	  34: INCEIPL       	$4
+
+	0x254753E0:  832AFFE4  lwz r25,-28(r10)
+	  35: GETL       	R10, t28
+	  36: ADDL       	$0xFFFFFFE4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R25
+	  39: INCEIPL       	$4
+
+	0x254753E4:  82EAFFDC  lwz r23,-36(r10)
+	  40: GETL       	R10, t32
+	  41: ADDL       	$0xFFFFFFDC, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R23
+	  44: INCEIPL       	$4
+
+	0x254753E8:  834AFFE8  lwz r26,-24(r10)
+	  45: GETL       	R10, t36
+	  46: ADDL       	$0xFFFFFFE8, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R26
+	  49: INCEIPL       	$4
+
+	0x254753EC:  836AFFEC  lwz r27,-20(r10)
+	  50: GETL       	R10, t40
+	  51: ADDL       	$0xFFFFFFEC, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R27
+	  54: INCEIPL       	$4
+
+	0x254753F0:  838AFFF0  lwz r28,-16(r10)
+	  55: GETL       	R10, t44
+	  56: ADDL       	$0xFFFFFFF0, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R28
+	  59: INCEIPL       	$4
+
+	0x254753F4:  83AAFFF4  lwz r29,-12(r10)
+	  60: GETL       	R10, t48
+	  61: ADDL       	$0xFFFFFFF4, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R29
+	  64: INCEIPL       	$4
+
+	0x254753F8:  83CAFFF8  lwz r30,-8(r10)
+	  65: GETL       	R10, t52
+	  66: ADDL       	$0xFFFFFFF8, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R30
+	  69: INCEIPL       	$4
+
+	0x254753FC:  83EAFFFC  lwz r31,-4(r10)
+	  70: GETL       	R10, t56
+	  71: ADDL       	$0xFFFFFFFC, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R31
+	  74: INCEIPL       	$4
+
+	0x25475400:  7D415378  or r1,r10,r10
+	  75: GETL       	R10, t60
+	  76: PUTL       	t60, R1
+	  77: INCEIPL       	$4
+
+	0x25475404:  4E800020  blr
+	  78: GETL       	LR, t62
+	  79: JMPo-r       	t62  ($4)
+
+
+. 0 254753C0 72
+. 38 E0 00 00 90 FD 00 04 83 B7 01 B0 93 B8 00 60 81 41 00 00 82 EA 00 04 83 0A FF E0 7E E8 03 A6 83 2A FF E4 82 EA FF DC 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+
+==== BB 361 (0x25472F70) approx BBs exec'd 0 ====
+
+	0x25472F70:  4BFFF76C  b 0x254726DC
+	   0: JMPo       	$0x254726DC  ($4)
+
+
+. 0 25472F70 4
+. 4B FF F7 6C
+
+==== BB 362 (0x254726DC) approx BBs exec'd 0 ====
+
+	0x254726DC:  80EE01BC  lwz r7,444(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1BC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254726E0:  2E070000  cmpi cr4,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254726E4:  40920010  bc 4,18,0x254726F4
+	   9: Jc18o       	$0x254726F4
+
+
+. 0 254726DC 12
+. 80 EE 01 BC 2E 07 00 00 40 92 00 10
+
+==== BB 363 (0x254726E8) approx BBs exec'd 0 ====
+
+	0x254726E8:  83AE01D4  lwz r29,468(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1D4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254726EC:  815D0000  lwz r10,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x254726F0:  914E01BC  stw r10,444(r14)
+	   9: GETL       	R10, t8
+	  10: GETL       	R14, t10
+	  11: ADDL       	$0x1BC, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x254726F4:  834F004C  lwz r26,76(r15)
+	  14: GETL       	R15, t12
+	  15: ADDL       	$0x4C, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R26
+	  18: INCEIPL       	$4
+
+	0x254726F8:  3AE00001  li r23,1
+	  19: MOVL       	$0x1, t16
+	  20: PUTL       	t16, R23
+	  21: INCEIPL       	$4
+
+	0x254726FC:  808E0338  lwz r4,824(r14)
+	  22: GETL       	R14, t18
+	  23: ADDL       	$0x338, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R4
+	  26: INCEIPL       	$4
+
+	0x25472700:  386E01B8  addi r3,r14,440
+	  27: GETL       	R14, t22
+	  28: ADDL       	$0x1B8, t22
+	  29: PUTL       	t22, R3
+	  30: INCEIPL       	$4
+
+	0x25472704:  2C1AFFFE  cmpi cr0,r26,-2
+	  31: GETL       	R26, t24
+	  32: MOVL       	$0xFFFFFFFE, t28
+	  33: CMPL       	t24, t28, t26  (-rSo)
+	  34: ICRFL       	t26, $0x0, CR
+	  35: INCEIPL       	$4
+
+	0x25472708:  826E0004  lwz r19,4(r14)
+	  36: GETL       	R14, t30
+	  37: ADDL       	$0x4, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R19
+	  40: INCEIPL       	$4
+
+	0x2547270C:  816E0198  lwz r11,408(r14)
+	  41: GETL       	R14, t34
+	  42: ADDL       	$0x198, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R11
+	  45: INCEIPL       	$4
+
+	0x25472710:  52E4F002  rlwimi r4,r23,30,0,1
+	  46: GETL       	R4, t38
+	  47: GETL       	R23, t40
+	  48: ROLL       	$0x1E, t40
+	  49: ANDL       	$0xC0000000, t40
+	  50: ANDL       	$0x3FFFFFFF, t38
+	  51: ORL       	t38, t40
+	  52: PUTL       	t40, R4
+	  53: INCEIPL       	$4
+
+	0x25472714:  818E019C  lwz r12,412(r14)
+	  54: GETL       	R14, t42
+	  55: ADDL       	$0x19C, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R12
+	  58: INCEIPL       	$4
+
+	0x25472718:  38B30001  addi r5,r19,1
+	  59: GETL       	R19, t46
+	  60: ADDL       	$0x1, t46
+	  61: PUTL       	t46, R5
+	  62: INCEIPL       	$4
+
+	0x2547271C:  908E0338  stw r4,824(r14)
+	  63: GETL       	R4, t48
+	  64: GETL       	R14, t50
+	  65: ADDL       	$0x338, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25472720:  310C0001  addic r8,r12,1
+	  68: GETL       	R12, t52
+	  69: ADCL       	$0x1, t52  (-wCa)
+	  70: PUTL       	t52, R8
+	  71: INCEIPL       	$4
+
+	0x25472724:  7CEB0194  addze r7,r11
+	  72: GETL       	R11, t54
+	  73: ADCL       	$0x0, t54  (-rCa-wCa)
+	  74: PUTL       	t54, R7
+	  75: INCEIPL       	$4
+
+	0x25472728:  90AE0004  stw r5,4(r14)
+	  76: GETL       	R5, t56
+	  77: GETL       	R14, t58
+	  78: ADDL       	$0x4, t58
+	  79: STL       	t56, (t58)
+	  80: INCEIPL       	$4
+
+	0x2547272C:  9074000C  stw r3,12(r20)
+	  81: GETL       	R3, t60
+	  82: GETL       	R20, t62
+	  83: ADDL       	$0xC, t62
+	  84: STL       	t60, (t62)
+	  85: INCEIPL       	$4
+
+	0x25472730:  90EE0198  stw r7,408(r14)
+	  86: GETL       	R7, t64
+	  87: GETL       	R14, t66
+	  88: ADDL       	$0x198, t66
+	  89: STL       	t64, (t66)
+	  90: INCEIPL       	$4
+
+	0x25472734:  910E019C  stw r8,412(r14)
+	  91: GETL       	R8, t68
+	  92: GETL       	R14, t70
+	  93: ADDL       	$0x19C, t70
+	  94: STL       	t68, (t70)
+	  95: INCEIPL       	$4
+
+	0x25472738:  928E01C8  stw r20,456(r14)
+	  96: GETL       	R20, t72
+	  97: GETL       	R14, t74
+	  98: ADDL       	$0x1C8, t74
+	  99: STL       	t72, (t74)
+	 100: INCEIPL       	$4
+
+	0x2547273C:  41820838  bc 12,2,0x25472F74
+	 101: Js02o       	$0x25472F74
+
+
+. 0 254726E8 88
+. 83 AE 01 D4 81 5D 00 00 91 4E 01 BC 83 4F 00 4C 3A E0 00 01 80 8E 03 38 38 6E 01 B8 2C 1A FF FE 82 6E 00 04 81 6E 01 98 52 E4 F0 02 81 8E 01 9C 38 B3 00 01 90 8E 03 38 31 0C 00 01 7C EB 01 94 90 AE 00 04 90 74 00 0C 90 EE 01 98 91 0E 01 9C 92 8E 01 C8 41 82 08 38
+
+==== BB 364 (0x25472F74) approx BBs exec'd 0 ====
+
+	0x25472F74:  80140000  lwz r0,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25472F78:  7DE97B78  or r9,r15,r15
+	   4: GETL       	R15, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x25472F7C:  2F000000  cmpi cr6,r0,0
+	   7: GETL       	R0, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x6, CR
+	  10: INCEIPL       	$4
+
+	0x25472F80:  3800FFFF  li r0,-1
+	  11: MOVL       	$0xFFFFFFFF, t10
+	  12: PUTL       	t10, R0
+	  13: INCEIPL       	$4
+
+	0x25472F84:  419A0008  bc 12,26,0x25472F8C
+	  14: Js26o       	$0x25472F8C
+
+
+. 0 25472F74 20
+. 80 14 00 00 7D E9 7B 78 2F 00 00 00 38 00 FF FF 41 9A 00 08
+
+==== BB 365 (0x25472F8C) approx BBs exec'd 0 ====
+
+	0x25472F8C:  9009004C  stw r0,76(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x4C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472F90:  4BFFF7B0  b 0x25472740
+	   5: JMPo       	$0x25472740  ($4)
+
+
+. 0 25472F8C 8
+. 90 09 00 4C 4B FF F7 B0
+
+==== BB 366 (0x25472740) approx BBs exec'd 0 ====
+
+	0x25472740:  818E0358  lwz r12,856(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x358, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x25472744:  A16C002C  lhz r11,44(r12)
+	   5: GETL       	R12, t4
+	   6: ADDL       	$0x2C, t4
+	   7: LDW       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25472748:  838C001C  lwz r28,28(r12)
+	  10: GETL       	R12, t8
+	  11: ADDL       	$0x1C, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0x2547274C:  B16E030C  sth r11,780(r14)
+	  15: GETL       	R11, t12
+	  16: GETL       	R14, t14
+	  17: ADDL       	$0x30C, t14
+	  18: STW       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25472750:  A32C002C  lhz r25,44(r12)
+	  20: GETL       	R12, t16
+	  21: ADDL       	$0x2C, t16
+	  22: LDW       	(t16), t18
+	  23: PUTL       	t18, R25
+	  24: INCEIPL       	$4
+
+	0x25472754:  7D6CE214  add r11,r12,r28
+	  25: GETL       	R12, t20
+	  26: GETL       	R28, t22
+	  27: ADDL       	t20, t22
+	  28: PUTL       	t22, R11
+	  29: INCEIPL       	$4
+
+	0x25472758:  916E0304  stw r11,772(r14)
+	  30: GETL       	R11, t24
+	  31: GETL       	R14, t26
+	  32: ADDL       	$0x304, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x2547275C:  2F990000  cmpi cr7,r25,0
+	  35: GETL       	R25, t28
+	  36: CMP0L       	t28, t30  (-rSo)
+	  37: ICRFL       	t30, $0x7, CR
+	  38: INCEIPL       	$4
+
+	0x25472760:  3959FFFF  addi r10,r25,-1
+	  39: GETL       	R25, t32
+	  40: ADDL       	$0xFFFFFFFF, t32
+	  41: PUTL       	t32, R10
+	  42: INCEIPL       	$4
+
+	0x25472764:  419E002C  bc 12,30,0x25472790
+	  43: Js30o       	$0x25472790
+
+
+. 0 25472740 40
+. 81 8E 03 58 A1 6C 00 2C 83 8C 00 1C B1 6E 03 0C A3 2C 00 2C 7D 6C E2 14 91 6E 03 04 2F 99 00 00 39 59 FF FF 41 9E 00 2C
+
+==== BB 367 (0x25472768) approx BBs exec'd 0 ====
+
+	0x25472768:  3D206474  lis r9,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x2547276C:  6127E552  ori r7,r9,0xE552
+	   3: MOVL       	$0x6474E552, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x25472770:  55582834  rlwinm r24,r10,5,0,26
+	   6: GETL       	R10, t4
+	   7: SHLL       	$0x5, t4
+	   8: PUTL       	t4, R24
+	   9: INCEIPL       	$4
+
+	0x25472774:  7F78582E  lwzx r27,r24,r11
+	  10: GETL       	R11, t6
+	  11: GETL       	R24, t8
+	  12: ADDL       	t8, t6
+	  13: LDL       	(t6), t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0x25472778:  7D185A14  add r8,r24,r11
+	  16: GETL       	R24, t12
+	  17: GETL       	R11, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0x2547277C:  7C9B3800  cmp cr1,r27,r7
+	  21: GETL       	R27, t16
+	  22: GETL       	R7, t18
+	  23: CMPL       	t16, t18, t20  (-rSo)
+	  24: ICRFL       	t20, $0x1, CR
+	  25: INCEIPL       	$4
+
+	0x25472780:  41860C80  bc 12,6,0x25473400
+	  26: Js06o       	$0x25473400
+
+
+. 0 25472768 28
+. 3D 20 64 74 61 27 E5 52 55 58 28 34 7F 78 58 2E 7D 18 5A 14 7C 9B 38 00 41 86 0C 80
+
+==== BB 368 (0x25473400) approx BBs exec'd 0 ====
+
+	0x25473400:  83B10000  lwz r29,0(r17)
+	   0: GETL       	R17, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R29
+	   3: INCEIPL       	$4
+
+	0x25473404:  3B400000  li r26,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R26
+	   6: INCEIPL       	$4
+
+	0x25473408:  82C80008  lwz r22,8(r8)
+	   7: GETL       	R8, t6
+	   8: ADDL       	$0x8, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R22
+	  11: INCEIPL       	$4
+
+	0x2547340C:  3B200000  li r25,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R25
+	  14: INCEIPL       	$4
+
+	0x25473410:  2C1D0000  cmpi cr0,r29,0
+	  15: GETL       	R29, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25473414:  92CE03EC  stw r22,1004(r14)
+	  19: GETL       	R22, t16
+	  20: GETL       	R14, t18
+	  21: ADDL       	$0x3EC, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x25473418:  80C80014  lwz r6,20(r8)
+	  24: GETL       	R8, t20
+	  25: ADDL       	$0x14, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R6
+	  28: INCEIPL       	$4
+
+	0x2547341C:  90CE03F0  stw r6,1008(r14)
+	  29: GETL       	R6, t24
+	  30: GETL       	R14, t26
+	  31: ADDL       	$0x3F0, t26
+	  32: STL       	t24, (t26)
+	  33: INCEIPL       	$4
+
+	0x25473420:  4182F384  bc 12,2,0x254727A4
+	  34: Js02o       	$0x254727A4
+
+
+. 0 25473400 36
+. 83 B1 00 00 3B 40 00 00 82 C8 00 08 3B 20 00 00 2C 1D 00 00 92 CE 03 EC 80 C8 00 14 90 CE 03 F0 41 82 F3 84
+
+==== BB 369 (0x25473424) approx BBs exec'd 0 ====
+
+	0x25473424:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25473428:  4800FBA9  bl 0x25482FD0
+	   3: MOVL       	$0x2547342C, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 25473424 8
+. 7F A3 EB 78 48 00 FB A9
+
+==== BB 370 (0x2547342C) approx BBs exec'd 0 ====
+
+	0x2547342C:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25473430:  7C711B78  or r17,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R17
+	   5: INCEIPL       	$4
+
+	0x25473434:  3863001F  addi r3,r3,31
+	   6: GETL       	R3, t4
+	   7: ADDL       	$0x1F, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x25473438:  546A0036  rlwinm r10,r3,0,0,27
+	  10: GETL       	R3, t6
+	  11: ANDL       	$0xFFFFFFF0, t6
+	  12: PUTL       	t6, R10
+	  13: INCEIPL       	$4
+
+	0x2547343C:  80E10000  lwz r7,0(r1)
+	  14: GETL       	R1, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0x25473440:  7EAA00D0  neg r21,r10
+	  18: GETL       	R10, t12
+	  19: NEGL       	t12
+	  20: PUTL       	t12, R21
+	  21: INCEIPL       	$4
+
+	0x25473444:  38B10001  addi r5,r17,1
+	  22: GETL       	R17, t14
+	  23: ADDL       	$0x1, t14
+	  24: PUTL       	t14, R5
+	  25: INCEIPL       	$4
+
+	0x25473448:  7CE1A96E  stwux r7,r1,r21
+	  26: GETL       	R21, t16
+	  27: GETL       	R1, t18
+	  28: ADDL       	t18, t16
+	  29: PUTL       	t16, R1
+	  30: GETL       	R7, t20
+	  31: STL       	t20, (t16)
+	  32: INCEIPL       	$4
+
+	0x2547344C:  39010017  addi r8,r1,23
+	  33: GETL       	R1, t22
+	  34: ADDL       	$0x17, t22
+	  35: PUTL       	t22, R8
+	  36: INCEIPL       	$4
+
+	0x25473450:  55030036  rlwinm r3,r8,0,0,27
+	  37: GETL       	R8, t24
+	  38: ANDL       	$0xFFFFFFF0, t24
+	  39: PUTL       	t24, R3
+	  40: INCEIPL       	$4
+
+	0x25473454:  4801078D  bl 0x25483BE0
+	  41: MOVL       	$0x25473458, t26
+	  42: PUTL       	t26, LR
+	  43: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 2547342C 44
+. 7F A4 EB 78 7C 71 1B 78 38 63 00 1F 54 6A 00 36 80 E1 00 00 7E AA 00 D0 38 B1 00 01 7C E1 A9 6E 39 01 00 17 55 03 00 36 48 01 07 8D
+
+==== BB 371 (0x25483CB0) approx BBs exec'd 0 ====
+
+	0x25483CB0:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25483CB4:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25483CB8:  5785F0BE  rlwinm r5,r28,30,2,31
+	   6: GETL       	R28, t4
+	   7: SHRL       	$0x2, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x25483CBC:  4800015D  bl 0x25483E18
+	  10: MOVL       	$0x25483CC0, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483E18  ($4)
+
+
+. 0 25483CB0 16
+. 7F E3 FB 78 7F A4 EB 78 57 85 F0 BE 48 00 01 5D
+
+==== BB 372 (0x25483EC4) approx BBs exec'd 0 ====
+
+	0x25483EC4:  81440000  lwz r10,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x25483EC8:  3863FFFC  addi r3,r3,-4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483ECC:  85640004  lwzu r11,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: PUTL       	t6, R4
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0x25483ED0:  4BFFFF90  b 0x25483E60
+	  14: JMPo       	$0x25483E60  ($4)
+
+
+. 0 25483EC4 16
+. 81 44 00 00 38 63 FF FC 85 64 00 04 4B FF FF 90
+
+==== BB 373 (0x25483E60) approx BBs exec'd 0 ====
+
+	0x25483E60:  7D404030  slw r0,r10,r8
+	   0: GETL       	R10, t2
+	   1: GETL       	R8, t0
+	   2: SHLL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483E64:  7D6A3C30  srw r10,r11,r7
+	   5: GETL       	R11, t6
+	   6: GETL       	R7, t4
+	   7: SHRL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25483E68:  7C0C5378  or r12,r0,r10
+	  10: GETL       	R0, t8
+	  11: GETL       	R10, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0x25483E6C:  81440004  lwz r10,4(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R10
+	  19: INCEIPL       	$4
+
+	0x25483E70:  91830004  stw r12,4(r3)
+	  20: GETL       	R12, t16
+	  21: GETL       	R3, t18
+	  22: ADDL       	$0x4, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25483E74:  7D6C4030  slw r12,r11,r8
+	  25: GETL       	R11, t22
+	  26: GETL       	R8, t20
+	  27: SHLL       	t20, t22
+	  28: PUTL       	t22, R12
+	  29: INCEIPL       	$4
+
+	0x25483E78:  7D4B3C30  srw r11,r10,r7
+	  30: GETL       	R10, t26
+	  31: GETL       	R7, t24
+	  32: SHRL       	t24, t26
+	  33: PUTL       	t26, R11
+	  34: INCEIPL       	$4
+
+	0x25483E7C:  7D865B78  or r6,r12,r11
+	  35: GETL       	R12, t28
+	  36: GETL       	R11, t30
+	  37: ORL       	t30, t28
+	  38: PUTL       	t28, R6
+	  39: INCEIPL       	$4
+
+	0x25483E80:  81640008  lwz r11,8(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0x8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R11
+	  44: INCEIPL       	$4
+
+	0x25483E84:  90C30008  stw r6,8(r3)
+	  45: GETL       	R6, t36
+	  46: GETL       	R3, t38
+	  47: ADDL       	$0x8, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0x25483E88:  34A5FFFC  addic. r5,r5,-4
+	  50: GETL       	R5, t40
+	  51: ADCL       	$0xFFFFFFFC, t40  (-wCa)
+	  52: PUTL       	t40, R5
+	  53: CMP0L       	t40, t42  (-rSo)
+	  54: ICRFL       	t42, $0x0, CR
+	  55: INCEIPL       	$4
+
+	0x25483E8C:  7D464030  slw r6,r10,r8
+	  56: GETL       	R10, t46
+	  57: GETL       	R8, t44
+	  58: SHLL       	t44, t46
+	  59: PUTL       	t46, R6
+	  60: INCEIPL       	$4
+
+	0x25483E90:  7D693C30  srw r9,r11,r7
+	  61: GETL       	R11, t50
+	  62: GETL       	R7, t48
+	  63: SHRL       	t48, t50
+	  64: PUTL       	t50, R9
+	  65: INCEIPL       	$4
+
+	0x25483E94:  8144000C  lwz r10,12(r4)
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0xC, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R10
+	  70: INCEIPL       	$4
+
+	0x25483E98:  7CC04B78  or r0,r6,r9
+	  71: GETL       	R6, t56
+	  72: GETL       	R9, t58
+	  73: ORL       	t58, t56
+	  74: PUTL       	t56, R0
+	  75: INCEIPL       	$4
+
+	0x25483E9C:  38840010  addi r4,r4,16
+	  76: GETL       	R4, t60
+	  77: ADDL       	$0x10, t60
+	  78: PUTL       	t60, R4
+	  79: INCEIPL       	$4
+
+	0x25483EA0:  9003000C  stw r0,12(r3)
+	  80: GETL       	R0, t62
+	  81: GETL       	R3, t64
+	  82: ADDL       	$0xC, t64
+	  83: STL       	t62, (t64)
+	  84: INCEIPL       	$4
+
+	0x25483EA4:  38630010  addi r3,r3,16
+	  85: GETL       	R3, t66
+	  86: ADDL       	$0x10, t66
+	  87: PUTL       	t66, R3
+	  88: INCEIPL       	$4
+
+	0x25483EA8:  40A2FFA4  bc 5,2,0x25483E4C
+	  89: Jc02o       	$0x25483E4C
+
+
+. 0 25483E60 76
+. 7D 40 40 30 7D 6A 3C 30 7C 0C 53 78 81 44 00 04 91 83 00 04 7D 6C 40 30 7D 4B 3C 30 7D 86 5B 78 81 64 00 08 90 C3 00 08 34 A5 FF FC 7D 46 40 30 7D 69 3C 30 81 44 00 0C 7C C0 4B 78 38 84 00 10 90 03 00 0C 38 63 00 10 40 A2 FF A4
+
+==== BB 374 (0x25483CC0) approx BBs exec'd 0 ====
+
+	0x25483CC0:  4BFFFF98  b 0x25483C58
+	   0: JMPo       	$0x25483C58  ($4)
+
+
+. 0 25483CC0 4
+. 4B FF FF 98
+
+==== BB 375 (0x25473458) approx BBs exec'd 0 ====
+
+	0x25473458:  907F0044  stw r3,68(r31)
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x44, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547345C:  809E00D0  lwz r4,208(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0xD0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25473460:  387F0044  addi r3,r31,68
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x44, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x25473464:  4800CED9  bl 0x2548033C
+	  14: MOVL       	$0x25473468, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x2548033C  ($4)
+
+
+. 0 25473458 16
+. 90 7F 00 44 80 9E 00 D0 38 7F 00 44 48 00 CE D9
+
+==== BB 376 (0x25480398) approx BBs exec'd 0 ====
+
+	0x25480398:  39200000  li r9,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x2548039C:  91280000  stw r9,0(r8)
+	   3: GETL       	R9, t2
+	   4: GETL       	R8, t4
+	   5: STL       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x254803A0:  38210010  addi r1,r1,16
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x10, t6
+	   9: PUTL       	t6, R1
+	  10: INCEIPL       	$4
+
+	0x254803A4:  4E800020  blr
+	  11: GETL       	LR, t8
+	  12: JMPo-r       	t8  ($4)
+
+
+. 0 25480398 16
+. 39 20 00 00 91 28 00 00 38 21 00 10 4E 80 00 20
+
+==== BB 377 (0x25473468) approx BBs exec'd 0 ====
+
+	0x25473468:  7C7D1B79  or. r29,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R29
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547346C:  41A2F338  bc 13,2,0x254727A4
+	   5: Js02o       	$0x254727A4
+
+
+. 0 25473468 8
+. 7C 7D 1B 79 41 A2 F3 38
+
+==== BB 378 (0x25473470) approx BBs exec'd 0 ====
+
+	0x25473470:  88BD0000  lbz r5,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x25473474:  2F050000  cmpi cr6,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x25473478:  41BAFFE4  bc 13,26,0x2547345C
+	   8: Js26o       	$0x2547345C
+
+
+. 0 25473470 12
+. 88 BD 00 00 2F 05 00 00 41 BA FF E4
+
+==== BB 379 (0x2547347C) approx BBs exec'd 0 ====
+
+	0x2547347C:  80900000  lwz r4,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25473480:  2F840000  cmpi cr7,r4,0
+	   4: GETL       	R4, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x25473484:  409E0774  bc 4,30,0x25473BF8
+	   8: Jc30o       	$0x25473BF8
+
+
+. 0 2547347C 12
+. 80 90 00 00 2F 84 00 00 40 9E 07 74
+
+==== BB 380 (0x25473488) approx BBs exec'd 0 ====
+
+	0x25473488:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547348C:  38A00001  li r5,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25473490:  38C00001  li r6,1
+	   6: MOVL       	$0x1, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25473494:  38E00000  li r7,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x25473498:  39000000  li r8,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R8
+	  14: INCEIPL       	$4
+
+	0x2547349C:  39200000  li r9,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254734A0:  7E83A378  or r3,r20,r20
+	  18: GETL       	R20, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0x254734A4:  48003845  bl 0x25476CE8
+	  21: MOVL       	$0x254734A8, t14
+	  22: PUTL       	t14, LR
+	  23: JMPo-c       	$0x25476CE8  ($4)
+
+
+. 0 25473488 32
+. 7F A4 EB 78 38 A0 00 01 38 C0 00 01 38 E0 00 00 39 00 00 00 39 20 00 00 7E 83 A3 78 48 00 38 45
+
+==== BB 381 _dl_map_object(0x25476CE8) approx BBs exec'd 0 ====
+
+	0x25476CE8:  9421FD70  stwu r1,-656(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFD70, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25476CEC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25476CF0:  48020311  bl 0x25497000
+	   9: MOVL       	$0x25476CF4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25476CE8 12
+. 94 21 FD 70 7C 08 02 A6 48 02 03 11
+
+==== BB 382 (0x25476CF4) approx BBs exec'd 0 ====
+
+	0x25476CF4:  93C10288  stw r30,648(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x288, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476CF8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25476CFC:  9261025C  stw r19,604(r1)
+	   8: GETL       	R19, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x25C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25476D00:  92A10264  stw r21,612(r1)
+	  13: GETL       	R21, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x264, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25476D04:  1E690018  mulli r19,r9,24
+	  18: GETL       	R9, t14
+	  19: MULL       	$0x18, t14
+	  20: PUTL       	t14, R19
+	  21: INCEIPL       	$4
+
+	0x25476D08:  90010294  stw r0,660(r1)
+	  22: GETL       	R0, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x294, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25476D0C:  7D800026  mfcr r12
+	  27: GETL       	CR, t20
+	  28: PUTL       	t20, R12
+	  29: INCEIPL       	$4
+
+	0x25476D10:  82BE04C8  lwz r21,1224(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0x4C8, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R21
+	  34: INCEIPL       	$4
+
+	0x25476D14:  93A10284  stw r29,644(r1)
+	  35: GETL       	R29, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x284, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x25476D18:  7FB3A82E  lwzx r29,r19,r21
+	  40: GETL       	R21, t30
+	  41: GETL       	R19, t32
+	  42: ADDL       	t32, t30
+	  43: LDL       	(t30), t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0x25476D1C:  92010250  stw r16,592(r1)
+	  46: GETL       	R16, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x250, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0x25476D20:  7CD03378  or r16,r6,r6
+	  51: GETL       	R6, t40
+	  52: PUTL       	t40, R16
+	  53: INCEIPL       	$4
+
+	0x25476D24:  2F9D0000  cmpi cr7,r29,0
+	  54: GETL       	R29, t42
+	  55: CMP0L       	t42, t44  (-rSo)
+	  56: ICRFL       	t44, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x25476D28:  92210254  stw r17,596(r1)
+	  58: GETL       	R17, t46
+	  59: GETL       	R1, t48
+	  60: ADDL       	$0x254, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x25476D2C:  92E1026C  stw r23,620(r1)
+	  63: GETL       	R23, t50
+	  64: GETL       	R1, t52
+	  65: ADDL       	$0x26C, t52
+	  66: STL       	t50, (t52)
+	  67: INCEIPL       	$4
+
+	0x25476D30:  7CF13B78  or r17,r7,r7
+	  68: GETL       	R7, t54
+	  69: PUTL       	t54, R17
+	  70: INCEIPL       	$4
+
+	0x25476D34:  93010270  stw r24,624(r1)
+	  71: GETL       	R24, t56
+	  72: GETL       	R1, t58
+	  73: ADDL       	$0x270, t58
+	  74: STL       	t56, (t58)
+	  75: INCEIPL       	$4
+
+	0x25476D38:  7CB72B78  or r23,r5,r5
+	  76: GETL       	R5, t60
+	  77: PUTL       	t60, R23
+	  78: INCEIPL       	$4
+
+	0x25476D3C:  93210274  stw r25,628(r1)
+	  79: GETL       	R25, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x274, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25476D40:  7D184378  or r24,r8,r8
+	  84: GETL       	R8, t66
+	  85: PUTL       	t66, R24
+	  86: INCEIPL       	$4
+
+	0x25476D44:  9361027C  stw r27,636(r1)
+	  87: GETL       	R27, t68
+	  88: GETL       	R1, t70
+	  89: ADDL       	$0x27C, t70
+	  90: STL       	t68, (t70)
+	  91: INCEIPL       	$4
+
+	0x25476D48:  7D394B78  or r25,r9,r9
+	  92: GETL       	R9, t72
+	  93: PUTL       	t72, R25
+	  94: INCEIPL       	$4
+
+	0x25476D4C:  93E1028C  stw r31,652(r1)
+	  95: GETL       	R31, t74
+	  96: GETL       	R1, t76
+	  97: ADDL       	$0x28C, t76
+	  98: STL       	t74, (t76)
+	  99: INCEIPL       	$4
+
+	0x25476D50:  7C7B1B78  or r27,r3,r3
+	 100: GETL       	R3, t78
+	 101: PUTL       	t78, R27
+	 102: INCEIPL       	$4
+
+	0x25476D54:  91C10248  stw r14,584(r1)
+	 103: GETL       	R14, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x248, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x25476D58:  7C9F2378  or r31,r4,r4
+	 108: GETL       	R4, t84
+	 109: PUTL       	t84, R31
+	 110: INCEIPL       	$4
+
+	0x25476D5C:  91E1024C  stw r15,588(r1)
+	 111: GETL       	R15, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x24C, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x25476D60:  92410258  stw r18,600(r1)
+	 116: GETL       	R18, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x258, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x25476D64:  92810260  stw r20,608(r1)
+	 121: GETL       	R20, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x260, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0x25476D68:  92C10268  stw r22,616(r1)
+	 126: GETL       	R22, t98
+	 127: GETL       	R1, t100
+	 128: ADDL       	$0x268, t100
+	 129: STL       	t98, (t100)
+	 130: INCEIPL       	$4
+
+	0x25476D6C:  93410278  stw r26,632(r1)
+	 131: GETL       	R26, t102
+	 132: GETL       	R1, t104
+	 133: ADDL       	$0x278, t104
+	 134: STL       	t102, (t104)
+	 135: INCEIPL       	$4
+
+	0x25476D70:  93810280  stw r28,640(r1)
+	 136: GETL       	R28, t106
+	 137: GETL       	R1, t108
+	 138: ADDL       	$0x280, t108
+	 139: STL       	t106, (t108)
+	 140: INCEIPL       	$4
+
+	0x25476D74:  91810244  stw r12,580(r1)
+	 141: GETL       	R12, t110
+	 142: GETL       	R1, t112
+	 143: ADDL       	$0x244, t112
+	 144: STL       	t110, (t112)
+	 145: INCEIPL       	$4
+
+	0x25476D78:  419E0040  bc 12,30,0x25476DB8
+	 146: Js30o       	$0x25476DB8
+
+
+. 0 25476CF4 136
+. 93 C1 02 88 7F C8 02 A6 92 61 02 5C 92 A1 02 64 1E 69 00 18 90 01 02 94 7D 80 00 26 82 BE 04 C8 93 A1 02 84 7F B3 A8 2E 92 01 02 50 7C D0 33 78 2F 9D 00 00 92 21 02 54 92 E1 02 6C 7C F1 3B 78 93 01 02 70 7C B7 2B 78 93 21 02 74 7D 18 43 78 93 61 02 7C 7D 39 4B 78 93 E1 02 8C 7C 7B 1B 78 91 C1 02 48 7C 9F 23 78 91 E1 02 4C 92 41 02 58 92 81 02 60 92 C1 02 68 93 41 02 78 93 81 02 80 91 81 02 44 41 9E 00 40
+
+==== BB 383 (0x25476D7C) approx BBs exec'd 0 ====
+
+	0x25476D7C:  3AC00000  li r22,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R22
+	   2: INCEIPL       	$4
+
+	0x25476D80:  80BD0180  lwz r5,384(r29)
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0x180, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25476D84:  7FA4EB78  or r4,r29,r29
+	   8: GETL       	R29, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x25476D88:  7FE3FB78  or r3,r31,r31
+	  11: GETL       	R31, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x25476D8C:  74A90040  andis. r9,r5,0x40
+	  14: GETL       	R5, t10
+	  15: ANDL       	$0x400000, t10
+	  16: PUTL       	t10, R9
+	  17: CMP0L       	t10, t12  (-rSo)
+	  18: ICRFL       	t12, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x25476D90:  4082001C  bc 4,2,0x25476DAC
+	  20: Jc02o       	$0x25476DAC
+
+
+. 0 25476D7C 24
+. 3A C0 00 00 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+
+==== BB 384 (0x25476D94) approx BBs exec'd 0 ====
+
+	0x25476D94:  48005941  bl 0x2547C6D4
+	   0: MOVL       	$0x25476D98, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547C6D4  ($4)
+
+
+. 0 25476D94 4
+. 48 00 59 41
+
+==== BB 385 _dl_name_match_p(0x2547C6D4) approx BBs exec'd 0 ====
+
+	0x2547C6D4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2547C6D8:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547C6DC:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547C6E0:  7C9F2378  or r31,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0x2547C6E4:  93A10014  stw r29,20(r1)
+	  17: GETL       	R29, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0x2547C6E8:  7C7D1B78  or r29,r3,r3
+	  22: GETL       	R3, t16
+	  23: PUTL       	t16, R29
+	  24: INCEIPL       	$4
+
+	0x2547C6EC:  90010024  stw r0,36(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x24, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x2547C6F0:  80840004  lwz r4,4(r4)
+	  30: GETL       	R4, t22
+	  31: ADDL       	$0x4, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R4
+	  34: INCEIPL       	$4
+
+	0x2547C6F4:  93C10018  stw r30,24(r1)
+	  35: GETL       	R30, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x18, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x2547C6F8:  480067E9  bl 0x25482EE0
+	  40: MOVL       	$0x2547C6FC, t30
+	  41: PUTL       	t30, LR
+	  42: JMPo-c       	$0x25482EE0  ($4)
+
+
+. 0 2547C6D4 40
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C 9F 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 80 84 00 04 93 C1 00 18 48 00 67 E9
+
+==== BB 386 strcmp(0x25482EE0) approx BBs exec'd 0 ====
+
+	0x25482EE0:  7C801B78  or r0,r4,r3
+	   0: GETL       	R4, t0
+	   1: GETL       	R3, t2
+	   2: ORL       	t2, t0
+	   3: PUTL       	t0, R0
+	   4: INCEIPL       	$4
+
+	0x25482EE4:  540007BF  rlwinm. r0,r0,0,30,31
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x3, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25482EE8:  3CE0FEFF  lis r7,-257
+	  11: MOVL       	$0xFEFF0000, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x25482EEC:  40820094  bc 4,2,0x25482F80
+	  14: Jc02o       	$0x25482F80
+
+
+. 0 25482EE0 16
+. 7C 80 1B 78 54 00 07 BF 3C E0 FE FF 40 82 00 94
+
+==== BB 387 (0x25482EF0) approx BBs exec'd 0 ====
+
+	0x25482EF0:  80A30000  lwz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x25482EF4:  80C40000  lwz r6,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0x25482EF8:  3D007F7F  lis r8,32639
+	   8: MOVL       	$0x7F7F0000, t8
+	   9: PUTL       	t8, R8
+	  10: INCEIPL       	$4
+
+	0x25482EFC:  38E7FEFF  addi r7,r7,-257
+	  11: GETL       	R7, t10
+	  12: ADDL       	$0xFFFFFEFF, t10
+	  13: PUTL       	t10, R7
+	  14: INCEIPL       	$4
+
+	0x25482F00:  39087F7F  addi r8,r8,32639
+	  15: MOVL       	$0x7F7F7F7F, t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0x25482F04:  48000010  b 0x25482F14
+	  18: JMPo       	$0x25482F14  ($4)
+
+
+. 0 25482EF0 24
+. 80 A3 00 00 80 C4 00 00 3D 00 7F 7F 38 E7 FE FF 39 08 7F 7F 48 00 00 10
+
+==== BB 388 (0x25482F14) approx BBs exec'd 0 ====
+
+	0x25482F14:  7C072A14  add r0,r7,r5
+	   0: GETL       	R7, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482F18:  7D0928F8  nor r9,r8,r5
+	   5: GETL       	R8, t4
+	   6: GETL       	R5, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0x25482F1C:  7C004839  and. r0,r0,r9
+	  11: GETL       	R0, t8
+	  12: GETL       	R9, t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R0
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0x25482F20:  7C853000  cmp cr1,r5,r6
+	  18: GETL       	R5, t14
+	  19: GETL       	R6, t16
+	  20: CMPL       	t14, t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x25482F24:  4182FFE4  bc 12,2,0x25482F08
+	  23: Js02o       	$0x25482F08
+
+
+. 0 25482F14 20
+. 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+
+==== BB 389 (0x25482F08) approx BBs exec'd 0 ====
+
+	0x25482F08:  84A30004  lwzu r5,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R3
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482F0C:  40860054  bc 4,6,0x25482F60
+	   6: Jc06o       	$0x25482F60
+
+
+. 0 25482F08 8
+. 84 A3 00 04 40 86 00 54
+
+==== BB 390 (0x25482F60) approx BBs exec'd 0 ====
+
+	0x25482F60:  80A3FFFC  lwz r5,-4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25482F64:  7CAA3279  xor. r10,r5,r6
+	   5: GETL       	R5, t4
+	   6: GETL       	R6, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25482F68:  7C662850  subf r3,r6,r5
+	  12: GETL       	R6, t10
+	  13: GETL       	R5, t12
+	  14: SUBL       	t10, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x25482F6C:  4CA00020  bclr 5,0
+	  17: GETL       	LR, t14
+	  18: Jc00o-r       	t14
+
+
+. 0 25482F60 16
+. 80 A3 FF FC 7C AA 32 79 7C 66 28 50 4C A0 00 20
+
+==== BB 391 (0x2547C6FC) approx BBs exec'd 0 ====
+
+	0x2547C6FC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547C700:  38600001  li r3,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2547C704:  419E0030  bc 12,30,0x2547C734
+	   7: Js30o       	$0x2547C734
+
+
+. 0 2547C6FC 12
+. 2F 83 00 00 38 60 00 01 41 9E 00 30
+
+==== BB 392 (0x2547C708) approx BBs exec'd 0 ====
+
+	0x2547C708:  83FF001C  lwz r31,28(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547C70C:  4800001C  b 0x2547C728
+	   5: JMPo       	$0x2547C728  ($4)
+
+
+. 0 2547C708 8
+. 83 FF 00 1C 48 00 00 1C
+
+==== BB 393 (0x2547C728) approx BBs exec'd 0 ====
+
+	0x2547C728:  2C1F0000  cmpi cr0,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547C72C:  4082FFE4  bc 4,2,0x2547C710
+	   4: Jc02o       	$0x2547C710
+
+
+. 0 2547C728 8
+. 2C 1F 00 00 40 82 FF E4
+
+==== BB 394 (0x2547C710) approx BBs exec'd 0 ====
+
+	0x2547C710:  809F0000  lwz r4,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x2547C714:  7FA3EB78  or r3,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2547C718:  480067C9  bl 0x25482EE0
+	   7: MOVL       	$0x2547C71C, t6
+	   8: PUTL       	t6, LR
+	   9: JMPo-c       	$0x25482EE0  ($4)
+
+
+. 0 2547C710 12
+. 80 9F 00 00 7F A3 EB 78 48 00 67 C9
+
+==== BB 395 (0x2547C71C) approx BBs exec'd 0 ====
+
+	0x2547C71C:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547C720:  41860030  bc 12,6,0x2547C750
+	   4: Js06o       	$0x2547C750
+
+
+. 0 2547C71C 8
+. 2C 83 00 00 41 86 00 30
+
+==== BB 396 (0x2547C724) approx BBs exec'd 0 ====
+
+	0x2547C724:  83FF0004  lwz r31,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547C728:  2C1F0000  cmpi cr0,r31,0
+	   5: GETL       	R31, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547C72C:  4082FFE4  bc 4,2,0x2547C710
+	   9: Jc02o       	$0x2547C710
+
+
+. 0 2547C724 12
+. 83 FF 00 04 2C 1F 00 00 40 82 FF E4
+
+==== BB 397 (0x2547C730) approx BBs exec'd 0 ====
+
+	0x2547C730:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547C734:  80810024  lwz r4,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547C738:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547C73C:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x2547C740:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547C744:  83E1001C  lwz r31,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x2547C748:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0x2547C74C:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+. 0 2547C730 32
+. 38 60 00 00 80 81 00 24 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 398 (0x25476D98) approx BBs exec'd 0 ====
+
+	0x25476D98:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25476D9C:  408601B8  bc 4,6,0x25476F54
+	   4: Jc06o       	$0x25476F54
+
+
+. 0 25476D98 8
+. 2C 83 00 00 40 86 01 B8
+
+==== BB 399 (0x25476DA0) approx BBs exec'd 0 ====
+
+	0x25476DA0:  807D0180  lwz r3,384(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25476DA4:  74690080  andis. r9,r3,0x80
+	   5: GETL       	R3, t4
+	   6: ANDL       	$0x800000, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25476DA8:  418200F8  bc 12,2,0x25476EA0
+	  11: Js02o       	$0x25476EA0
+
+
+. 0 25476DA0 12
+. 80 7D 01 80 74 69 00 80 41 82 00 F8
+
+==== BB 400 (0x25476EA0) approx BBs exec'd 0 ====
+
+	0x25476EA0:  817D0058  lwz r11,88(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x58, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25476EA4:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25476EA8:  2D8B0000  cmpi cr3,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x3, CR
+	  11: INCEIPL       	$4
+
+	0x25476EAC:  41AEFF00  bc 13,14,0x25476DAC
+	  12: Js14o       	$0x25476DAC
+
+
+. 0 25476EA0 16
+. 81 7D 00 58 7F E3 FB 78 2D 8B 00 00 41 AE FF 00
+
+==== BB 401 (0x25476DAC) approx BBs exec'd 0 ====
+
+	0x25476DAC:  83BD000C  lwz r29,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25476DB0:  2C9D0000  cmpi cr1,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25476DB4:  4086FFCC  bc 4,6,0x25476D80
+	   9: Jc06o       	$0x25476D80
+
+
+. 0 25476DAC 12
+. 83 BD 00 0C 2C 9D 00 00 40 86 FF CC
+
+==== BB 402 (0x25476D80) approx BBs exec'd 0 ====
+
+	0x25476D80:  80BD0180  lwz r5,384(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25476D84:  7FA4EB78  or r4,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25476D88:  7FE3FB78  or r3,r31,r31
+	   8: GETL       	R31, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x25476D8C:  74A90040  andis. r9,r5,0x40
+	  11: GETL       	R5, t8
+	  12: ANDL       	$0x400000, t8
+	  13: PUTL       	t8, R9
+	  14: CMP0L       	t8, t10  (-rSo)
+	  15: ICRFL       	t10, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x25476D90:  4082001C  bc 4,2,0x25476DAC
+	  17: Jc02o       	$0x25476DAC
+
+
+. 0 25476D80 20
+. 80 BD 01 80 7F A4 EB 78 7F E3 FB 78 74 A9 00 40 40 82 00 1C
+
+==== BB 403 (0x25476EB0) approx BBs exec'd 0 ====
+
+	0x25476EB0:  813D0034  lwz r9,52(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25476EB4:  816B0004  lwz r11,4(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25476EB8:  80890004  lwz r4,4(r9)
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x25476EBC:  7F845A14  add r28,r4,r11
+	  15: GETL       	R4, t12
+	  16: GETL       	R11, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R28
+	  19: INCEIPL       	$4
+
+	0x25476EC0:  7F84E378  or r4,r28,r28
+	  20: GETL       	R28, t16
+	  21: PUTL       	t16, R4
+	  22: INCEIPL       	$4
+
+	0x25476EC4:  4800C01D  bl 0x25482EE0
+	  23: MOVL       	$0x25476EC8, t18
+	  24: PUTL       	t18, LR
+	  25: JMPo-c       	$0x25482EE0  ($4)
+
+
+. 0 25476EB0 24
+. 81 3D 00 34 81 6B 00 04 80 89 00 04 7F 84 5A 14 7F 84 E3 78 48 00 C0 1D
+
+==== BB 404 (0x25482F80) approx BBs exec'd 0 ====
+
+	0x25482F80:  88A30000  lbz r5,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x25482F84:  88C40000  lbz r6,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDB       	(t4), t6
+	   6: PUTL       	t6, R6
+	   7: INCEIPL       	$4
+
+	0x25482F88:  48000010  b 0x25482F98
+	   8: JMPo       	$0x25482F98  ($4)
+
+
+. 0 25482F80 12
+. 88 A3 00 00 88 C4 00 00 48 00 00 10
+
+==== BB 405 (0x25482F98) approx BBs exec'd 0 ====
+
+	0x25482F98:  2C850000  cmpi cr1,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x25482F9C:  41860020  bc 12,6,0x25482FBC
+	   4: Js06o       	$0x25482FBC
+
+
+. 0 25482F98 8
+. 2C 85 00 00 41 86 00 20
+
+==== BB 406 (0x25482FA0) approx BBs exec'd 0 ====
+
+	0x25482FA0:  7C053000  cmp cr0,r5,r6
+	   0: GETL       	R5, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25482FA4:  40820018  bc 4,2,0x25482FBC
+	   5: Jc02o       	$0x25482FBC
+
+
+. 0 25482FA0 8
+. 7C 05 30 00 40 82 00 18
+
+==== BB 407 (0x25482FBC) approx BBs exec'd 0 ====
+
+	0x25482FBC:  7C662850  subf r3,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25482FC0:  4E800020  blr
+	   5: GETL       	LR, t4
+	   6: JMPo-r       	t4  ($4)
+
+
+. 0 25482FBC 8
+. 7C 66 28 50 4E 80 00 20
+
+==== BB 408 (0x25476EC8) approx BBs exec'd 0 ====
+
+	0x25476EC8:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25476ECC:  4092FEE0  bc 4,18,0x25476DAC
+	   4: Jc18o       	$0x25476DAC
+
+
+. 0 25476EC8 8
+. 2E 03 00 00 40 92 FE E0
+
+==== BB 409 (0x25476DB8) approx BBs exec'd 0 ====
+
+	0x25476DB8:  829E04F4  lwz r20,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0x25476DBC:  2D9B0000  cmpi cr3,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x3, CR
+	   8: INCEIPL       	$4
+
+	0x25476DC0:  81540000  lwz r10,0(r20)
+	   9: GETL       	R20, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R10
+	  12: INCEIPL       	$4
+
+	0x25476DC4:  71490040  andi. r9,r10,0x40
+	  13: GETL       	R10, t12
+	  14: ANDL       	$0x40, t12
+	  15: PUTL       	t12, R9
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25476DC8:  40820390  bc 4,2,0x25477158
+	  19: Jc02o       	$0x25477158
+
+
+. 0 25476DB8 20
+. 82 9E 04 F4 2D 9B 00 00 81 54 00 00 71 49 00 40 40 82 03 90
+
+==== BB 410 (0x25476DCC) approx BBs exec'd 0 ====
+
+	0x25476DCC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476DD0:  3880002F  li r4,47
+	   3: MOVL       	$0x2F, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25476DD4:  4800C02D  bl 0x25482E00
+	   6: MOVL       	$0x25476DD8, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25482E00  ($4)
+
+
+. 0 25476DCC 12
+. 7F E3 FB 78 38 80 00 2F 48 00 C0 2D
+
+==== BB 411 (0x25482EB4) approx BBs exec'd 0 ====
+
+	0x25482EB4:  7CE06038  and r0,r7,r12
+	   0: GETL       	R7, t0
+	   1: GETL       	R12, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482EB8:  7CEA6378  or r10,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R10
+	   9: INCEIPL       	$4
+
+	0x25482EBC:  7C003A14  add r0,r0,r7
+	  10: GETL       	R0, t8
+	  11: GETL       	R7, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25482EC0:  7D4900F8  nor r9,r10,r0
+	  15: GETL       	R10, t12
+	  16: GETL       	R0, t14
+	  17: ORL       	t14, t12
+	  18: NOTL       	t12
+	  19: PUTL       	t12, R9
+	  20: INCEIPL       	$4
+
+	0x25482EC4:  7D240034  cntlzw r4,r9
+	  21: GETL       	R9, t16
+	  22: CNTLZL       	t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0x25482EC8:  3908FFFC  addi r8,r8,-4
+	  25: GETL       	R8, t18
+	  26: ADDL       	$0xFFFFFFFC, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0x25482ECC:  5484E8FE  rlwinm r4,r4,29,3,31
+	  29: GETL       	R4, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R4
+	  32: INCEIPL       	$4
+
+	0x25482ED0:  7C682214  add r3,r8,r4
+	  33: GETL       	R8, t22
+	  34: GETL       	R4, t24
+	  35: ADDL       	t22, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0x25482ED4:  4E800020  blr
+	  38: GETL       	LR, t26
+	  39: JMPo-r       	t26  ($4)
+
+
+. 0 25482EB4 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+
+==== BB 412 (0x25476DD8) approx BBs exec'd 0 ====
+
+	0x25476DD8:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25476DDC:  419A0290  bc 12,26,0x2547706C
+	   4: Js26o       	$0x2547706C
+
+
+. 0 25476DD8 8
+. 2F 03 00 00 41 9A 02 90
+
+==== BB 413 (0x25476DE0) approx BBs exec'd 0 ====
+
+	0x25476DE0:  418E01D8  bc 12,14,0x25476FB8
+	   0: Js14o       	$0x25476FB8
+
+
+. 0 25476DE0 4
+. 41 8E 01 D8
+
+==== BB 414 (0x25476DE4) approx BBs exec'd 0 ====
+
+	0x25476DE4:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476DE8:  7FE4FB78  or r4,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25476DEC:  4BFFD941  bl 0x2547472C
+	   6: MOVL       	$0x25476DF0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x2547472C  ($4)
+
+
+. 0 25476DE4 12
+. 7F 63 DB 78 7F E4 FB 78 4B FF D9 41
+
+==== BB 415 expand_dynamic_string_token(0x2547472C) approx BBs exec'd 0 ====
+
+	0x2547472C:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25474730:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25474734:  480228CD  bl 0x25497000
+	   9: MOVL       	$0x25474738, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547472C 12
+. 94 21 FF D0 7C 08 02 A6 48 02 28 CD
+
+==== BB 416 (0x25474738) approx BBs exec'd 0 ====
+
+	0x25474738:  7D800026  mfcr r12
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0x2547473C:  93C10028  stw r30,40(r1)
+	   3: GETL       	R30, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x28, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25474740:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0x25474744:  93A10024  stw r29,36(r1)
+	  11: GETL       	R29, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x24, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25474748:  7C9D2378  or r29,r4,r4
+	  16: GETL       	R4, t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x2547474C:  93410018  stw r26,24(r1)
+	  19: GETL       	R26, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x18, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x25474750:  38800024  li r4,36
+	  24: MOVL       	$0x24, t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x25474754:  93810020  stw r28,32(r1)
+	  27: GETL       	R28, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x25474758:  7C7C1B78  or r28,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R28
+	  34: INCEIPL       	$4
+
+	0x2547475C:  92E1000C  stw r23,12(r1)
+	  35: GETL       	R23, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0xC, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x25474760:  7FA3EB78  or r3,r29,r29
+	  40: GETL       	R29, t30
+	  41: PUTL       	t30, R3
+	  42: INCEIPL       	$4
+
+	0x25474764:  93010010  stw r24,16(r1)
+	  43: GETL       	R24, t32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x10, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0x25474768:  93210014  stw r25,20(r1)
+	  48: GETL       	R25, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x14, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x2547476C:  9361001C  stw r27,28(r1)
+	  53: GETL       	R27, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x1C, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25474770:  93E1002C  stw r31,44(r1)
+	  58: GETL       	R31, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x2C, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x25474774:  90010034  stw r0,52(r1)
+	  63: GETL       	R0, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x34, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25474778:  91810008  stw r12,8(r1)
+	  68: GETL       	R12, t52
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x8, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x2547477C:  4800E685  bl 0x25482E00
+	  73: MOVL       	$0x25474780, t56
+	  74: PUTL       	t56, LR
+	  75: JMPo-c       	$0x25482E00  ($4)
+
+
+. 0 25474738 72
+. 7D 80 00 26 93 C1 00 28 7F C8 02 A6 93 A1 00 24 7C 9D 23 78 93 41 00 18 38 80 00 24 93 81 00 20 7C 7C 1B 78 92 E1 00 0C 7F A3 EB 78 93 01 00 10 93 21 00 14 93 61 00 1C 93 E1 00 2C 90 01 00 34 91 81 00 08 48 00 E6 85
+
+==== BB 417 (0x25474780) approx BBs exec'd 0 ====
+
+	0x25474780:  7C7A1B79  or. r26,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R26
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25474784:  40820074  bc 4,2,0x254747F8
+	   5: Jc02o       	$0x254747F8
+
+
+. 0 25474780 8
+. 7C 7A 1B 79 40 82 00 74
+
+==== BB 418 (0x25474788) approx BBs exec'd 0 ====
+
+	0x25474788:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547478C:  4800E845  bl 0x25482FD0
+	   3: MOVL       	$0x25474790, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 25474788 8
+. 7F A3 EB 78 48 00 E8 45
+
+==== BB 419 (0x25474790) approx BBs exec'd 0 ====
+
+	0x25474790:  3BE30001  addi r31,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25474794:  7FE3FB78  or r3,r31,r31
+	   4: GETL       	R31, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x25474798:  4802328D  bl 0x25497A24
+	   7: MOVL       	$0x2547479C, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 25474790 12
+. 3B E3 00 01 7F E3 FB 78 48 02 32 8D
+
+==== BB 420 (0x2547479C) approx BBs exec'd 0 ====
+
+	0x2547479C:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254747A0:  2C030000  cmpi cr0,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0x254747A4:  41820014  bc 12,2,0x254747B8
+	   7: Js02o       	$0x254747B8
+
+
+. 0 2547479C 12
+. 38 00 00 00 2C 03 00 00 41 82 00 14
+
+==== BB 421 (0x254747A8) approx BBs exec'd 0 ====
+
+	0x254747A8:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254747AC:  7FE5FB78  or r5,r31,r31
+	   3: GETL       	R31, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x254747B0:  4800F431  bl 0x25483BE0
+	   6: MOVL       	$0x254747B4, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 254747A8 12
+. 7F A4 EB 78 7F E5 FB 78 48 00 F4 31
+
+==== BB 422 (0x25483D6C) approx BBs exec'd 0 ====
+
+	0x25483D6C:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25483D70:  3863FFFC  addi r3,r3,-4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xFFFFFFFC, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25483D74:  81240004  lwz r9,4(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x25483D78:  90030004  stw r0,4(r3)
+	  13: GETL       	R0, t10
+	  14: GETL       	R3, t12
+	  15: ADDL       	$0x4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25483D7C:  4BFFFFA0  b 0x25483D1C
+	  18: JMPo       	$0x25483D1C  ($4)
+
+
+. 0 25483D6C 20
+. 80 04 00 00 38 63 FF FC 81 24 00 04 90 03 00 04 4B FF FF A0
+
+==== BB 423 (0x25483D1C) approx BBs exec'd 0 ====
+
+	0x25483D1C:  80040008  lwz r0,8(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483D20:  91230008  stw r9,8(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D24:  8124000C  lwz r9,12(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25483D28:  9003000C  stw r0,12(r3)
+	  15: GETL       	R0, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0xC, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25483D2C:  80040010  lwz r0,16(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x10, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0x25483D30:  91230010  stw r9,16(r3)
+	  25: GETL       	R9, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x10, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0x14, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R9
+	  34: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	  35: GETL       	R0, t28
+	  36: GETL       	R3, t30
+	  37: ADDL       	$0x14, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0x18, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R0
+	  44: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  45: GETL       	R9, t36
+	  46: GETL       	R3, t38
+	  47: ADDL       	$0x18, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  50: GETL       	R5, t40
+	  51: ADCL       	$0xFFFFFFF8, t40  (-wCa)
+	  52: PUTL       	t40, R5
+	  53: CMP0L       	t40, t42  (-rSo)
+	  54: ICRFL       	t42, $0x0, CR
+	  55: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  56: GETL       	R4, t44
+	  57: ADDL       	$0x1C, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R9
+	  60: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  61: GETL       	R0, t48
+	  62: GETL       	R3, t50
+	  63: ADDL       	$0x1C, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0x20, t52
+	  68: PUTL       	t52, R4
+	  69: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  70: GETL       	R3, t54
+	  71: ADDL       	$0x20, t54
+	  72: PUTL       	t54, R3
+	  73: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  74: Jc02o       	$0x25483D0C
+
+
+. 0 25483D1C 64
+. 80 04 00 08 91 23 00 08 81 24 00 0C 90 03 00 0C 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+==== BB 424 (0x254747B4) approx BBs exec'd 0 ====
+
+	0x254747B4:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254747B8:  7C030378  or r3,r0,r0
+	   3: GETL       	R0, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0x254747BC:  83810034  lwz r28,52(r1)
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x34, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0x254747C0:  81410008  lwz r10,8(r1)
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0x8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R10
+	  15: INCEIPL       	$4
+
+	0x254747C4:  7F8803A6  mtlr r28
+	  16: GETL       	R28, t12
+	  17: PUTL       	t12, LR
+	  18: INCEIPL       	$4
+
+	0x254747C8:  82E1000C  lwz r23,12(r1)
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0xC, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R23
+	  23: INCEIPL       	$4
+
+	0x254747CC:  83010010  lwz r24,16(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x10, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R24
+	  28: INCEIPL       	$4
+
+	0x254747D0:  7D408120  mtcrf 0x8,r10
+	  29: GETL       	R10, t22
+	  30: ICRFL       	t22, $0x4, CR
+	  31: INCEIPL       	$4
+
+	0x254747D4:  83210014  lwz r25,20(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x14, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R25
+	  36: INCEIPL       	$4
+
+	0x254747D8:  83410018  lwz r26,24(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x18, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R26
+	  41: INCEIPL       	$4
+
+	0x254747DC:  8361001C  lwz r27,28(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x1C, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R27
+	  46: INCEIPL       	$4
+
+	0x254747E0:  83810020  lwz r28,32(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x20, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R28
+	  51: INCEIPL       	$4
+
+	0x254747E4:  83A10024  lwz r29,36(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x24, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R29
+	  56: INCEIPL       	$4
+
+	0x254747E8:  83C10028  lwz r30,40(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x28, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R30
+	  61: INCEIPL       	$4
+
+	0x254747EC:  83E1002C  lwz r31,44(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x2C, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R31
+	  66: INCEIPL       	$4
+
+	0x254747F0:  38210030  addi r1,r1,48
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x30, t52
+	  69: PUTL       	t52, R1
+	  70: INCEIPL       	$4
+
+	0x254747F4:  4E800020  blr
+	  71: GETL       	LR, t54
+	  72: JMPo-r       	t54  ($4)
+
+
+. 0 254747B4 68
+. 7C 60 1B 78 7C 03 03 78 83 81 00 34 81 41 00 08 7F 88 03 A6 82 E1 00 0C 83 01 00 10 7D 40 81 20 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+==== BB 425 (0x25476DF0) approx BBs exec'd 0 ====
+
+	0x25476DF0:  3B40FFFF  li r26,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R26
+	   2: INCEIPL       	$4
+
+	0x25476DF4:  2F030000  cmpi cr6,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0x25476DF8:  90610228  stw r3,552(r1)
+	   7: GETL       	R3, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0x228, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25476DFC:  2E1AFFFF  cmpi cr4,r26,-1
+	  12: GETL       	R26, t10
+	  13: MOVL       	$0xFFFFFFFF, t14
+	  14: CMPL       	t10, t14, t12  (-rSo)
+	  15: ICRFL       	t12, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x25476E00:  409A0200  bc 4,26,0x25477000
+	  17: Jc26o       	$0x25477000
+
+
+. 0 25476DF0 20
+. 3B 40 FF FF 2F 03 00 00 90 61 02 28 2E 1A FF FF 40 9A 02 00
+
+==== BB 426 (0x25477000) approx BBs exec'd 0 ====
+
+	0x25477000:  38810018  addi r4,r1,24
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x18, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25477004:  4BFFF441  bl 0x25476444
+	   4: MOVL       	$0x25477008, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0x25476444  ($4)
+
+
+. 0 25477000 8
+. 38 81 00 18 4B FF F4 41
+
+==== BB 427 open_verify(0x25476444) approx BBs exec'd 0 ====
+
+	0x25476444:  9421FFA0  stwu r1,-96(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFA0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25476448:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547644C:  48020BB5  bl 0x25497000
+	   9: MOVL       	$0x25476450, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25476444 12
+. 94 21 FF A0 7C 08 02 A6 48 02 0B B5
+
+==== BB 428 (0x25476450) approx BBs exec'd 0 ====
+
+	0x25476450:  93C10058  stw r30,88(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476454:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25476458:  93010040  stw r24,64(r1)
+	   8: GETL       	R24, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x40, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547645C:  93E1005C  stw r31,92(r1)
+	  13: GETL       	R31, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x5C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25476460:  7C982378  or r24,r4,r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, R24
+	  20: INCEIPL       	$4
+
+	0x25476464:  7C3F0B78  or r31,r1,r1
+	  21: GETL       	R1, t16
+	  22: PUTL       	t16, R31
+	  23: INCEIPL       	$4
+
+	0x25476468:  38800000  li r4,0
+	  24: MOVL       	$0x0, t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x2547646C:  92C10038  stw r22,56(r1)
+	  27: GETL       	R22, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x38, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x25476470:  7C761B78  or r22,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R22
+	  34: INCEIPL       	$4
+
+	0x25476474:  93210044  stw r25,68(r1)
+	  35: GETL       	R25, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x44, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x25476478:  9361004C  stw r27,76(r1)
+	  40: GETL       	R27, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x4C, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547647C:  3B600000  li r27,0
+	  45: MOVL       	$0x0, t34
+	  46: PUTL       	t34, R27
+	  47: INCEIPL       	$4
+
+	0x25476480:  92A10034  stw r21,52(r1)
+	  48: GETL       	R21, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x34, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25476484:  92E1003C  stw r23,60(r1)
+	  53: GETL       	R23, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x3C, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x25476488:  93410048  stw r26,72(r1)
+	  58: GETL       	R26, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x48, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547648C:  93810050  stw r28,80(r1)
+	  63: GETL       	R28, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x50, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x25476490:  93A10054  stw r29,84(r1)
+	  68: GETL       	R29, t52
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x54, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x25476494:  90010064  stw r0,100(r1)
+	  73: GETL       	R0, t56
+	  74: GETL       	R1, t58
+	  75: ADDL       	$0x64, t58
+	  76: STL       	t56, (t58)
+	  77: INCEIPL       	$4
+
+	0x25476498:  4800BB49  bl 0x25481FE0
+	  78: MOVL       	$0x2547649C, t60
+	  79: PUTL       	t60, LR
+	  80: JMPo-c       	$0x25481FE0  ($4)
+
+
+. 0 25476450 76
+. 93 C1 00 58 7F C8 02 A6 93 01 00 40 93 E1 00 5C 7C 98 23 78 7C 3F 0B 78 38 80 00 00 92 C1 00 38 7C 76 1B 78 93 21 00 44 93 61 00 4C 3B 60 00 00 92 A1 00 34 92 E1 00 3C 93 41 00 48 93 81 00 50 93 A1 00 54 90 01 00 64 48 00 BB 49
+
+==== BB 429 open(0x25481FE0) approx BBs exec'd 0 ====
+
+	0x25481FE0:  38000005  li r0,5
+	   0: MOVL       	$0x5, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481FE4:  44000002  sc
+	   3: JMPo-sys       	$0x25481FE8  ($4)
+
+
+. 0 25481FE0 8
+. 38 00 00 05 44 00 00 02
+
+==== BB 430 (0x25481FE8) approx BBs exec'd 0 ====
+
+	0x25481FE8:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 25481FE8 4
+. 4C A3 00 20
+
+==== BB 431 (0x2547649C) approx BBs exec'd 0 ====
+
+	0x2547649C:  2F83FFFF  cmpi cr7,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254764A0:  7C791B78  or r25,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R25
+	   7: INCEIPL       	$4
+
+	0x254764A4:  419E00CC  bc 12,30,0x25476570
+	   8: Js30o       	$0x25476570
+
+
+. 0 2547649C 12
+. 2F 83 FF FF 7C 79 1B 78 41 9E 00 CC
+
+==== BB 432 (0x254764A8) approx BBs exec'd 0 ====
+
+	0x254764A8:  82FE0514  lwz r23,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x254764AC:  3B980004  addi r28,r24,4
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0x4, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x254764B0:  7F84E378  or r4,r28,r28
+	   9: GETL       	R28, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x254764B4:  38A00200  li r5,512
+	  12: MOVL       	$0x200, t8
+	  13: PUTL       	t8, R5
+	  14: INCEIPL       	$4
+
+	0x254764B8:  93770000  stw r27,0(r23)
+	  15: GETL       	R27, t10
+	  16: GETL       	R23, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x254764BC:  4800BB45  bl 0x25482000
+	  19: MOVL       	$0x254764C0, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25482000  ($4)
+
+
+. 0 254764A8 24
+. 82 FE 05 14 3B 98 00 04 7F 84 E3 78 38 A0 02 00 93 77 00 00 48 00 BB 45
+
+==== BB 433 read(0x25482000) approx BBs exec'd 0 ====
+
+	0x25482000:  38000003  li r0,3
+	   0: MOVL       	$0x3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25482004:  44000002  sc
+	   3: JMPo-sys       	$0x25482008  ($4)
+
+
+. 0 25482000 8
+. 38 00 00 03 44 00 00 02
+
+==== BB 434 (0x25482008) approx BBs exec'd 0 ====
+
+	0x25482008:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 25482008 4
+. 4C A3 00 20
+
+==== BB 435 (0x254764C0) approx BBs exec'd 0 ====
+
+	0x254764C0:  2C030033  cmpi cr0,r3,51
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0x33, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254764C4:  7C7D1B78  or r29,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R29
+	   7: INCEIPL       	$4
+
+	0x254764C8:  90780000  stw r3,0(r24)
+	   8: GETL       	R3, t8
+	   9: GETL       	R24, t10
+	  10: STL       	t8, (t10)
+	  11: INCEIPL       	$4
+
+	0x254764CC:  4081014C  bc 4,1,0x25476618
+	  12: Jc01o       	$0x25476618
+
+
+. 0 254764C0 16
+. 2C 03 00 33 7C 7D 1B 78 90 78 00 00 40 81 01 4C
+
+==== BB 436 (0x254764D0) approx BBs exec'd 0 ====
+
+	0x254764D0:  809E016C  lwz r4,364(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x16C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254764D4:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254764D8:  38A00009  li r5,9
+	   8: MOVL       	$0x9, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x254764DC:  4800CDBD  bl 0x25483298
+	  11: MOVL       	$0x254764E0, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+. 0 254764D0 16
+. 80 9E 01 6C 7F 83 E3 78 38 A0 00 09 48 00 CD BD
+
+==== BB 437 (0x254764E0) approx BBs exec'd 0 ====
+
+	0x254764E0:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254764E4:  409A0234  bc 4,26,0x25476718
+	   4: Jc26o       	$0x25476718
+
+
+. 0 254764E0 8
+. 2F 03 00 00 40 9A 02 34
+
+==== BB 438 (0x254764E8) approx BBs exec'd 0 ====
+
+	0x254764E8:  819C0014  lwz r12,20(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x254764EC:  811E0200  lwz r8,512(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x200, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x254764F0:  2C8C0001  cmpi cr1,r12,1
+	  10: GETL       	R12, t8
+	  11: MOVL       	$0x1, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x254764F4:  40860138  bc 4,6,0x2547662C
+	  15: Jc06o       	$0x2547662C
+
+
+. 0 254764E8 16
+. 81 9C 00 14 81 1E 02 00 2C 8C 00 01 40 86 01 38
+
+==== BB 439 (0x254764F8) approx BBs exec'd 0 ====
+
+	0x254764F8:  A11C0012  lhz r8,18(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x12, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254764FC:  2F080014  cmpi cr6,r8,20
+	   5: GETL       	R8, t4
+	   6: MOVL       	$0x14, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25476500:  409A01D0  bc 4,26,0x254766D0
+	  10: Jc26o       	$0x254766D0
+
+
+. 0 254764F8 12
+. A1 1C 00 12 2F 08 00 14 40 9A 01 D0
+
+==== BB 440 (0x25476504) approx BBs exec'd 0 ====
+
+	0x25476504:  A01C0010  lhz r0,16(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25476508:  2F800003  cmpi cr7,r0,3
+	   5: GETL       	R0, t4
+	   6: MOVL       	$0x3, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x2547650C:  409E0278  bc 4,30,0x25476784
+	  10: Jc30o       	$0x25476784
+
+
+. 0 25476504 12
+. A0 1C 00 10 2F 80 00 03 40 9E 02 78
+
+==== BB 441 (0x25476510) approx BBs exec'd 0 ====
+
+	0x25476510:  A2BC002A  lhz r21,42(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x2A, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x25476514:  811E0204  lwz r8,516(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x204, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x25476518:  2C950020  cmpi cr1,r21,32
+	  10: GETL       	R21, t8
+	  11: MOVL       	$0x20, t12
+	  12: CMPL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547651C:  40860110  bc 4,6,0x2547662C
+	  15: Jc06o       	$0x2547662C
+
+
+. 0 25476510 16
+. A2 BC 00 2A 81 1E 02 04 2C 95 00 20 40 86 01 10
+
+==== BB 442 (0x25476520) approx BBs exec'd 0 ====
+
+	0x25476520:  A17C002C  lhz r11,44(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x2C, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25476524:  809C001C  lwz r4,28(r28)
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25476528:  557B2834  rlwinm r27,r11,5,0,26
+	  10: GETL       	R11, t8
+	  11: SHLL       	$0x5, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x2547652C:  7F44DA14  add r26,r4,r27
+	  14: GETL       	R4, t10
+	  15: GETL       	R27, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R26
+	  18: INCEIPL       	$4
+
+	0x25476530:  7F1AE840  cmpl cr6,r26,r29
+	  19: GETL       	R26, t14
+	  20: GETL       	R29, t16
+	  21: CMPUL       	t14, t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25476534:  7FA4C214  add r29,r4,r24
+	  24: GETL       	R4, t20
+	  25: GETL       	R24, t22
+	  26: ADDL       	t20, t22
+	  27: PUTL       	t22, R29
+	  28: INCEIPL       	$4
+
+	0x25476538:  3B5D0004  addi r26,r29,4
+	  29: GETL       	R29, t24
+	  30: ADDL       	$0x4, t24
+	  31: PUTL       	t24, R26
+	  32: INCEIPL       	$4
+
+	0x2547653C:  41990108  bc 12,25,0x25476644
+	  33: Js25o       	$0x25476644
+
+
+. 0 25476520 32
+. A1 7C 00 2C 80 9C 00 1C 55 7B 28 34 7F 44 DA 14 7F 1A E8 40 7F A4 C2 14 3B 5D 00 04 41 99 01 08
+
+==== BB 443 (0x25476540) approx BBs exec'd 0 ====
+
+	0x25476540:  557B2834  rlwinm r27,r11,5,0,26
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25476544:  7F5DD378  or r29,r26,r26
+	   4: GETL       	R26, t2
+	   5: PUTL       	t2, R29
+	   6: INCEIPL       	$4
+
+	0x25476548:  7C1BD214  add r0,r27,r26
+	   7: GETL       	R27, t4
+	   8: GETL       	R26, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0x2547654C:  4800001C  b 0x25476568
+	  12: JMPo       	$0x25476568  ($4)
+
+
+. 0 25476540 16
+. 55 7B 28 34 7F 5D D3 78 7C 1B D2 14 48 00 00 1C
+
+==== BB 444 (0x25476568) approx BBs exec'd 0 ====
+
+	0x25476568:  7C00E840  cmpl cr0,r0,r29
+	   0: GETL       	R0, t0
+	   1: GETL       	R29, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547656C:  4181FFE4  bc 12,1,0x25476550
+	   5: Js01o       	$0x25476550
+
+
+. 0 25476568 8
+. 7C 00 E8 40 41 81 FF E4
+
+==== BB 445 (0x25476550) approx BBs exec'd 0 ====
+
+	0x25476550:  809D0000  lwz r4,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25476554:  2C840004  cmpi cr1,r4,4
+	   4: GETL       	R4, t4
+	   5: MOVL       	$0x4, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25476558:  4186005C  bc 12,6,0x254765B4
+	   9: Js06o       	$0x254765B4
+
+
+. 0 25476550 12
+. 80 9D 00 00 2C 84 00 04 41 86 00 5C
+
+==== BB 446 (0x2547655C) approx BBs exec'd 0 ====
+
+	0x2547655C:  55662834  rlwinm r6,r11,5,0,26
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25476560:  3BBD0020  addi r29,r29,32
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x20, t2
+	   6: PUTL       	t2, R29
+	   7: INCEIPL       	$4
+
+	0x25476564:  7C06D214  add r0,r6,r26
+	   8: GETL       	R6, t4
+	   9: GETL       	R26, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R0
+	  12: INCEIPL       	$4
+
+	0x25476568:  7C00E840  cmpl cr0,r0,r29
+	  13: GETL       	R0, t8
+	  14: GETL       	R29, t10
+	  15: CMPUL       	t8, t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x0, CR
+	  17: INCEIPL       	$4
+
+	0x2547656C:  4181FFE4  bc 12,1,0x25476550
+	  18: Js01o       	$0x25476550
+
+
+. 0 2547655C 20
+. 55 66 28 34 3B BD 00 20 7C 06 D2 14 7C 00 E8 40 41 81 FF E4
+
+==== BB 447 (0x25476570) approx BBs exec'd 0 ====
+
+	0x25476570:  80810000  lwz r4,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25476574:  7F23CB78  or r3,r25,r25
+	   4: GETL       	R25, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25476578:  82E40004  lwz r23,4(r4)
+	   7: GETL       	R4, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R23
+	  11: INCEIPL       	$4
+
+	0x2547657C:  82A4FFD4  lwz r21,-44(r4)
+	  12: GETL       	R4, t10
+	  13: ADDL       	$0xFFFFFFD4, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R21
+	  16: INCEIPL       	$4
+
+	0x25476580:  7EE803A6  mtlr r23
+	  17: GETL       	R23, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0x25476584:  82C4FFD8  lwz r22,-40(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0xFFFFFFD8, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R22
+	  24: INCEIPL       	$4
+
+	0x25476588:  82E4FFDC  lwz r23,-36(r4)
+	  25: GETL       	R4, t20
+	  26: ADDL       	$0xFFFFFFDC, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R23
+	  29: INCEIPL       	$4
+
+	0x2547658C:  8304FFE0  lwz r24,-32(r4)
+	  30: GETL       	R4, t24
+	  31: ADDL       	$0xFFFFFFE0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R24
+	  34: INCEIPL       	$4
+
+	0x25476590:  8324FFE4  lwz r25,-28(r4)
+	  35: GETL       	R4, t28
+	  36: ADDL       	$0xFFFFFFE4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R25
+	  39: INCEIPL       	$4
+
+	0x25476594:  8344FFE8  lwz r26,-24(r4)
+	  40: GETL       	R4, t32
+	  41: ADDL       	$0xFFFFFFE8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R26
+	  44: INCEIPL       	$4
+
+	0x25476598:  8364FFEC  lwz r27,-20(r4)
+	  45: GETL       	R4, t36
+	  46: ADDL       	$0xFFFFFFEC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R27
+	  49: INCEIPL       	$4
+
+	0x2547659C:  8384FFF0  lwz r28,-16(r4)
+	  50: GETL       	R4, t40
+	  51: ADDL       	$0xFFFFFFF0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R28
+	  54: INCEIPL       	$4
+
+	0x254765A0:  83A4FFF4  lwz r29,-12(r4)
+	  55: GETL       	R4, t44
+	  56: ADDL       	$0xFFFFFFF4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R29
+	  59: INCEIPL       	$4
+
+	0x254765A4:  83C4FFF8  lwz r30,-8(r4)
+	  60: GETL       	R4, t48
+	  61: ADDL       	$0xFFFFFFF8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R30
+	  64: INCEIPL       	$4
+
+	0x254765A8:  83E4FFFC  lwz r31,-4(r4)
+	  65: GETL       	R4, t52
+	  66: ADDL       	$0xFFFFFFFC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R31
+	  69: INCEIPL       	$4
+
+	0x254765AC:  7C812378  or r1,r4,r4
+	  70: GETL       	R4, t56
+	  71: PUTL       	t56, R1
+	  72: INCEIPL       	$4
+
+	0x254765B0:  4E800020  blr
+	  73: GETL       	LR, t58
+	  74: JMPo-r       	t58  ($4)
+
+
+. 0 25476570 68
+. 80 81 00 00 7F 23 CB 78 82 E4 00 04 82 A4 FF D4 7E E8 03 A6 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+
+==== BB 448 (0x25477008) approx BBs exec'd 0 ====
+
+	0x25477008:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547700C:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R26
+	   7: INCEIPL       	$4
+
+	0x25477010:  4092FDF4  bc 4,18,0x25476E04
+	   8: Jc18o       	$0x25476E04
+
+
+. 0 25477008 12
+. 2E 03 FF FF 7C 7A 1B 78 40 92 FD F4
+
+==== BB 449 (0x25476E04) approx BBs exec'd 0 ====
+
+	0x25476E04:  571227FE  rlwinm r18,r24,4,31,31
+	   0: GETL       	R24, t0
+	   1: ROLL       	$0x4, t0
+	   2: ANDL       	$0x1, t0
+	   3: PUTL       	t0, R18
+	   4: INCEIPL       	$4
+
+	0x25476E08:  32D2FFFF  addic r22,r18,-1
+	   5: GETL       	R18, t2
+	   6: ADCL       	$0xFFFFFFFF, t2  (-wCa)
+	   7: PUTL       	t2, R22
+	   8: INCEIPL       	$4
+
+	0x25476E0C:  7ED6B110  subfe r22,r22,r22
+	   9: GETL       	R22, t4
+	  10: GETL       	R22, t6
+	  11: SBBL       	t4, t6  (-rCa-wCa)
+	  12: PUTL       	t6, R22
+	  13: INCEIPL       	$4
+
+	0x25476E10:  7F7CB038  and r28,r27,r22
+	  14: GETL       	R27, t8
+	  15: GETL       	R22, t10
+	  16: ANDL       	t8, t10
+	  17: PUTL       	t10, R28
+	  18: INCEIPL       	$4
+
+	0x25476E14:  4092020C  bc 4,18,0x25477020
+	  19: Jc18o       	$0x25477020
+
+
+. 0 25476E04 20
+. 57 12 27 FE 32 D2 FF FF 7E D6 B1 10 7F 7C B0 38 40 92 02 0C
+
+==== BB 450 (0x25477020) approx BBs exec'd 0 ====
+
+	0x25477020:  80FE04C0  lwz r7,1216(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25477024:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25477028:  80C10228  lwz r6,552(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x228, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0x2547702C:  7F44D378  or r4,r26,r26
+	  13: GETL       	R26, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0x25477030:  81C70000  lwz r14,0(r7)
+	  16: GETL       	R7, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R14
+	  19: INCEIPL       	$4
+
+	0x25477034:  7E088378  or r8,r16,r16
+	  20: GETL       	R16, t16
+	  21: PUTL       	t16, R8
+	  22: INCEIPL       	$4
+
+	0x25477038:  7F87E378  or r7,r28,r28
+	  23: GETL       	R28, t18
+	  24: PUTL       	t18, R7
+	  25: INCEIPL       	$4
+
+	0x2547703C:  7F09C378  or r9,r24,r24
+	  26: GETL       	R24, t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0x25477040:  38A10018  addi r5,r1,24
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x18, t22
+	  31: PUTL       	t22, R5
+	  32: INCEIPL       	$4
+
+	0x25477044:  3941022C  addi r10,r1,556
+	  33: GETL       	R1, t24
+	  34: ADDL       	$0x22C, t24
+	  35: PUTL       	t24, R10
+	  36: INCEIPL       	$4
+
+	0x25477048:  91C1022C  stw r14,556(r1)
+	  37: GETL       	R14, t26
+	  38: GETL       	R1, t28
+	  39: ADDL       	$0x22C, t28
+	  40: STL       	t26, (t28)
+	  41: INCEIPL       	$4
+
+	0x2547704C:  93210008  stw r25,8(r1)
+	  42: GETL       	R25, t30
+	  43: GETL       	R1, t32
+	  44: ADDL       	$0x8, t32
+	  45: STL       	t30, (t32)
+	  46: INCEIPL       	$4
+
+	0x25477050:  4BFFE4B9  bl 0x25475508
+	  47: MOVL       	$0x25477054, t34
+	  48: PUTL       	t34, LR
+	  49: JMPo-c       	$0x25475508  ($4)
+
+
+. 0 25477020 52
+. 80 FE 04 C0 7F E3 FB 78 80 C1 02 28 7F 44 D3 78 81 C7 00 00 7E 08 83 78 7F 87 E3 78 7F 09 C3 78 38 A1 00 18 39 41 02 2C 91 C1 02 2C 93 21 00 08 4B FF E4 B9
+
+==== BB 451 _dl_map_object_from_fd(0x25475508) approx BBs exec'd 0 ====
+
+	0x25475508:  9421FF00  stwu r1,-256(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF00, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547550C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25475510:  48021AF1  bl 0x25497000
+	   9: MOVL       	$0x25475514, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25475508 12
+. 94 21 FF 00 7C 08 02 A6 48 02 1A F1
+
+==== BB 452 (0x25475514) approx BBs exec'd 0 ====
+
+	0x25475514:  7D800026  mfcr r12
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0x25475518:  93C100F8  stw r30,248(r1)
+	   3: GETL       	R30, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xF8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x2547551C:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0x25475520:  93E100FC  stw r31,252(r1)
+	  11: GETL       	R31, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0xFC, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25475524:  7C3F0B78  or r31,r1,r1
+	  16: GETL       	R1, t12
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0x25475528:  90010104  stw r0,260(r1)
+	  19: GETL       	R0, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x104, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547552C:  920100C0  stw r16,192(r1)
+	  24: GETL       	R16, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0xC0, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25475530:  7CD03378  or r16,r6,r6
+	  29: GETL       	R6, t22
+	  30: PUTL       	t22, R16
+	  31: INCEIPL       	$4
+
+	0x25475534:  926100CC  stw r19,204(r1)
+	  32: GETL       	R19, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0xCC, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25475538:  7C932378  or r19,r4,r4
+	  37: GETL       	R4, t28
+	  38: PUTL       	t28, R19
+	  39: INCEIPL       	$4
+
+	0x2547553C:  928100D0  stw r20,208(r1)
+	  40: GETL       	R20, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0xD0, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x25475540:  7C741B78  or r20,r3,r3
+	  45: GETL       	R3, t34
+	  46: PUTL       	t34, R20
+	  47: INCEIPL       	$4
+
+	0x25475544:  92C100D8  stw r22,216(r1)
+	  48: GETL       	R22, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0xD8, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25475548:  38600003  li r3,3
+	  53: MOVL       	$0x3, t40
+	  54: PUTL       	t40, R3
+	  55: INCEIPL       	$4
+
+	0x2547554C:  92E100DC  stw r23,220(r1)
+	  56: GETL       	R23, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0xDC, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x25475550:  7D364B78  or r22,r9,r9
+	  61: GETL       	R9, t46
+	  62: PUTL       	t46, R22
+	  63: INCEIPL       	$4
+
+	0x25475554:  932100E4  stw r25,228(r1)
+	  64: GETL       	R25, t48
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0xE4, t50
+	  67: STL       	t48, (t50)
+	  68: INCEIPL       	$4
+
+	0x25475558:  7CF73B78  or r23,r7,r7
+	  69: GETL       	R7, t52
+	  70: PUTL       	t52, R23
+	  71: INCEIPL       	$4
+
+	0x2547555C:  934100E8  stw r26,232(r1)
+	  72: GETL       	R26, t54
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0xE8, t56
+	  75: STL       	t54, (t56)
+	  76: INCEIPL       	$4
+
+	0x25475560:  7D194378  or r25,r8,r8
+	  77: GETL       	R8, t58
+	  78: PUTL       	t58, R25
+	  79: INCEIPL       	$4
+
+	0x25475564:  936100EC  stw r27,236(r1)
+	  80: GETL       	R27, t60
+	  81: GETL       	R1, t62
+	  82: ADDL       	$0xEC, t62
+	  83: STL       	t60, (t62)
+	  84: INCEIPL       	$4
+
+	0x25475568:  3B400000  li r26,0
+	  85: MOVL       	$0x0, t64
+	  86: PUTL       	t64, R26
+	  87: INCEIPL       	$4
+
+	0x2547556C:  938100F0  stw r28,240(r1)
+	  88: GETL       	R28, t66
+	  89: GETL       	R1, t68
+	  90: ADDL       	$0xF0, t68
+	  91: STL       	t66, (t68)
+	  92: INCEIPL       	$4
+
+	0x25475570:  7CBC2B78  or r28,r5,r5
+	  93: GETL       	R5, t70
+	  94: PUTL       	t70, R28
+	  95: INCEIPL       	$4
+
+	0x25475574:  91C100B8  stw r14,184(r1)
+	  96: GETL       	R14, t72
+	  97: GETL       	R1, t74
+	  98: ADDL       	$0xB8, t74
+	  99: STL       	t72, (t74)
+	 100: INCEIPL       	$4
+
+	0x25475578:  38BF0028  addi r5,r31,40
+	 101: GETL       	R31, t76
+	 102: ADDL       	$0x28, t76
+	 103: PUTL       	t76, R5
+	 104: INCEIPL       	$4
+
+	0x2547557C:  91E100BC  stw r15,188(r1)
+	 105: GETL       	R15, t78
+	 106: GETL       	R1, t80
+	 107: ADDL       	$0xBC, t80
+	 108: STL       	t78, (t80)
+	 109: INCEIPL       	$4
+
+	0x25475580:  922100C4  stw r17,196(r1)
+	 110: GETL       	R17, t82
+	 111: GETL       	R1, t84
+	 112: ADDL       	$0xC4, t84
+	 113: STL       	t82, (t84)
+	 114: INCEIPL       	$4
+
+	0x25475584:  924100C8  stw r18,200(r1)
+	 115: GETL       	R18, t86
+	 116: GETL       	R1, t88
+	 117: ADDL       	$0xC8, t88
+	 118: STL       	t86, (t88)
+	 119: INCEIPL       	$4
+
+	0x25475588:  92A100D4  stw r21,212(r1)
+	 120: GETL       	R21, t90
+	 121: GETL       	R1, t92
+	 122: ADDL       	$0xD4, t92
+	 123: STL       	t90, (t92)
+	 124: INCEIPL       	$4
+
+	0x2547558C:  930100E0  stw r24,224(r1)
+	 125: GETL       	R24, t94
+	 126: GETL       	R1, t96
+	 127: ADDL       	$0xE0, t96
+	 128: STL       	t94, (t96)
+	 129: INCEIPL       	$4
+
+	0x25475590:  93A100F4  stw r29,244(r1)
+	 130: GETL       	R29, t98
+	 131: GETL       	R1, t100
+	 132: ADDL       	$0xF4, t100
+	 133: STL       	t98, (t100)
+	 134: INCEIPL       	$4
+
+	0x25475594:  918100B4  stw r12,180(r1)
+	 135: GETL       	R12, t102
+	 136: GETL       	R1, t104
+	 137: ADDL       	$0xB4, t104
+	 138: STL       	t102, (t104)
+	 139: INCEIPL       	$4
+
+	0x25475598:  915F0098  stw r10,152(r31)
+	 140: GETL       	R10, t106
+	 141: GETL       	R31, t108
+	 142: ADDL       	$0x98, t108
+	 143: STL       	t106, (t108)
+	 144: INCEIPL       	$4
+
+	0x2547559C:  837F0108  lwz r27,264(r31)
+	 145: GETL       	R31, t110
+	 146: ADDL       	$0x108, t110
+	 147: LDL       	(t110), t112
+	 148: PUTL       	t112, R27
+	 149: INCEIPL       	$4
+
+	0x254755A0:  4800C905  bl 0x25481EA4
+	 150: MOVL       	$0x254755A4, t114
+	 151: PUTL       	t114, LR
+	 152: JMPo-c       	$0x25481EA4  ($4)
+
+
+. 0 25475514 144
+. 7D 80 00 26 93 C1 00 F8 7F C8 02 A6 93 E1 00 FC 7C 3F 0B 78 90 01 01 04 92 01 00 C0 7C D0 33 78 92 61 00 CC 7C 93 23 78 92 81 00 D0 7C 74 1B 78 92 C1 00 D8 38 60 00 03 92 E1 00 DC 7D 36 4B 78 93 21 00 E4 7C F7 3B 78 93 41 00 E8 7D 19 43 78 93 61 00 EC 3B 40 00 00 93 81 00 F0 7C BC 2B 78 91 C1 00 B8 38 BF 00 28 91 E1 00 BC 92 21 00 C4 92 41 00 C8 92 A1 00 D4 93 01 00 E0 93 A1 00 F4 91 81 00 B4 91 5F 00 98 83 7F 01 08 48 00 C9 05
+
+==== BB 453 __GI___fxstat64(0x25481EA4) approx BBs exec'd 0 ====
+
+	0x25481EA4:  9421FF80  stwu r1,-128(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF80, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25481EA8:  7CC802A6  mflr r6
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25481EAC:  48015155  bl 0x25497000
+	   9: MOVL       	$0x25481EB0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25481EA4 12
+. 94 21 FF 80 7C C8 02 A6 48 01 51 55
+
+==== BB 454 (0x25481EB0) approx BBs exec'd 0 ====
+
+	0x25481EB0:  93C10078  stw r30,120(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x78, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25481EB4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25481EB8:  9361006C  stw r27,108(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x6C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25481EBC:  90C10084  stw r6,132(r1)
+	  13: GETL       	R6, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x84, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25481EC0:  93210064  stw r25,100(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x64, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25481EC4:  7C791B78  or r25,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x25481EC8:  837E04EC  lwz r27,1260(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4EC, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0x25481ECC:  93810070  stw r28,112(r1)
+	  31: GETL       	R28, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x70, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x25481ED0:  7CBC2B78  or r28,r5,r5
+	  36: GETL       	R5, t28
+	  37: PUTL       	t28, R28
+	  38: INCEIPL       	$4
+
+	0x25481ED4:  801B0000  lwz r0,0(r27)
+	  39: GETL       	R27, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x25481ED8:  93E1007C  stw r31,124(r1)
+	  43: GETL       	R31, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x7C, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25481EDC:  7C9F2378  or r31,r4,r4
+	  48: GETL       	R4, t38
+	  49: PUTL       	t38, R31
+	  50: INCEIPL       	$4
+
+	0x25481EE0:  2F800000  cmpi cr7,r0,0
+	  51: GETL       	R0, t40
+	  52: CMP0L       	t40, t42  (-rSo)
+	  53: ICRFL       	t42, $0x7, CR
+	  54: INCEIPL       	$4
+
+	0x25481EE4:  93410068  stw r26,104(r1)
+	  55: GETL       	R26, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x68, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25481EE8:  93A10074  stw r29,116(r1)
+	  60: GETL       	R29, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0x74, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x25481EEC:  409E0048  bc 4,30,0x25481F34
+	  65: Jc30o       	$0x25481F34
+
+
+. 0 25481EB0 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+
+==== BB 455 (0x25481EF0) approx BBs exec'd 0 ====
+
+	0x25481EF0:  83BE0514  lwz r29,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25481EF4:  7C832378  or r3,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25481EF8:  380000C5  li r0,197
+	   8: MOVL       	$0xC5, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x25481EFC:  7CA42B78  or r4,r5,r5
+	  11: GETL       	R5, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25481F00:  835D0000  lwz r26,0(r29)
+	  14: GETL       	R29, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0x25481F04:  44000002  sc
+	  18: JMPo-sys       	$0x25481F08  ($4)
+
+
+. 0 25481EF0 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C5 7C A4 2B 78 83 5D 00 00 44 00 00 02
+
+==== BB 456 (0x25481F08) approx BBs exec'd 0 ====
+
+	0x25481F08:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481F0C:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25481F10:  408200C4  bc 4,2,0x25481FD4
+	   9: Jc02o       	$0x25481FD4
+
+
+. 0 25481F08 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+
+==== BB 457 (0x25481F14) approx BBs exec'd 0 ====
+
+	0x25481F14:  2C83FFFF  cmpi cr1,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25481F18:  40A60044  bc 5,6,0x25481F5C
+	   5: Jc06o       	$0x25481F5C
+
+
+. 0 25481F14 8
+. 2C 83 FF FF 40 A6 00 44
+
+==== BB 458 (0x25481F5C) approx BBs exec'd 0 ====
+
+	0x25481F5C:  80A10084  lwz r5,132(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25481F60:  83210064  lwz r25,100(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x64, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x25481F64:  83410068  lwz r26,104(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x68, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25481F68:  7CA803A6  mtlr r5
+	  15: GETL       	R5, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x25481F6C:  8361006C  lwz r27,108(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x6C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0x25481F70:  83810070  lwz r28,112(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x70, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0x25481F74:  83A10074  lwz r29,116(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x74, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R29
+	  32: INCEIPL       	$4
+
+	0x25481F78:  83C10078  lwz r30,120(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x78, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R30
+	  37: INCEIPL       	$4
+
+	0x25481F7C:  83E1007C  lwz r31,124(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x7C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0x25481F80:  38210080  addi r1,r1,128
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x80, t34
+	  45: PUTL       	t34, R1
+	  46: INCEIPL       	$4
+
+	0x25481F84:  4E800020  blr
+	  47: GETL       	LR, t36
+	  48: JMPo-r       	t36  ($4)
+
+
+. 0 25481F5C 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+
+==== BB 459 (0x254755A4) approx BBs exec'd 0 ====
+
+	0x254755A4:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x254755A8:  935F00A0  stw r26,160(r31)
+	   4: GETL       	R26, t4
+	   5: GETL       	R31, t6
+	   6: ADDL       	$0xA0, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x254755AC:  419C082C  bc 12,28,0x25475DD8
+	   9: Js28o       	$0x25475DD8
+
+
+. 0 254755A4 12
+. 2F 83 00 00 93 5F 00 A0 41 9C 08 2C
+
+==== BB 460 (0x254755B0) approx BBs exec'd 0 ====
+
+	0x254755B0:  1CBB0018  mulli r5,r27,24
+	   0: GETL       	R27, t0
+	   1: MULL       	$0x18, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x254755B4:  809E04C8  lwz r4,1224(r30)
+	   4: GETL       	R30, t2
+	   5: ADDL       	$0x4C8, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x254755B8:  7FA5202E  lwzx r29,r5,r4
+	   9: GETL       	R4, t6
+	  10: GETL       	R5, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x254755BC:  2C1D0000  cmpi cr0,r29,0
+	  15: GETL       	R29, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x254755C0:  41820148  bc 12,2,0x25475708
+	  19: Js02o       	$0x25475708
+
+
+. 0 254755B0 20
+. 1C BB 00 18 80 9E 04 C8 7F A5 20 2E 2C 1D 00 00 41 82 01 48
+
+==== BB 461 (0x254755C4) approx BBs exec'd 0 ====
+
+	0x254755C4:  817F0030  lwz r11,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254755C8:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x254755CC:  48000010  b 0x254755DC
+	   8: JMPo       	$0x254755DC  ($4)
+
+
+. 0 254755C4 12
+. 81 7F 00 30 3B 40 00 00 48 00 00 10
+
+==== BB 462 (0x254755DC) approx BBs exec'd 0 ====
+
+	0x254755DC:  80DD01D8  lwz r6,472(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1D8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254755E0:  7C865800  cmp cr1,r6,r11
+	   5: GETL       	R6, t4
+	   6: GETL       	R11, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x254755E4:  4086FFEC  bc 4,6,0x254755D0
+	  10: Jc06o       	$0x254755D0
+
+
+. 0 254755DC 12
+. 80 DD 01 D8 7C 86 58 00 40 86 FF EC
+
+==== BB 463 (0x254755E8) approx BBs exec'd 0 ====
+
+	0x254755E8:  811D01DC  lwz r8,476(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1DC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254755EC:  80FF0034  lwz r7,52(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x254755F0:  7E083800  cmp cr4,r8,r7
+	  10: GETL       	R8, t8
+	  11: GETL       	R7, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x4, CR
+	  14: INCEIPL       	$4
+
+	0x254755F4:  4092FFDC  bc 4,18,0x254755D0
+	  15: Jc18o       	$0x254755D0
+
+
+. 0 254755E8 16
+. 81 1D 01 DC 80 FF 00 34 7E 08 38 00 40 92 FF DC
+
+==== BB 464 (0x254755D0) approx BBs exec'd 0 ====
+
+	0x254755D0:  83BD000C  lwz r29,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254755D4:  2E1D0000  cmpi cr4,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254755D8:  41920130  bc 12,18,0x25475708
+	   9: Js18o       	$0x25475708
+
+
+. 0 254755D0 12
+. 83 BD 00 0C 2E 1D 00 00 41 92 01 30
+
+==== BB 465 (0x25475708) approx BBs exec'd 0 ====
+
+	0x25475708:  2F1B0000  cmpi cr6,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547570C:  409A06E0  bc 4,26,0x25475DEC
+	   4: Jc26o       	$0x25475DEC
+
+
+. 0 25475708 8
+. 2F 1B 00 00 40 9A 06 E0
+
+==== BB 466 (0x25475710) approx BBs exec'd 0 ====
+
+	0x25475710:  72C00004  andi. r0,r22,0x4
+	   0: GETL       	R22, t0
+	   1: ANDL       	$0x4, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475714:  38600000  li r3,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x25475718:  40A2FF8C  bc 5,2,0x254756A4
+	   9: Jc02o       	$0x254756A4
+
+
+. 0 25475710 12
+. 72 C0 00 04 38 60 00 00 40 A2 FF 8C
+
+==== BB 467 (0x2547571C) approx BBs exec'd 0 ====
+
+	0x2547571C:  823E04F4  lwz r17,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0x25475720:  80910000  lwz r4,0(r17)
+	   5: GETL       	R17, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25475724:  70890040  andi. r9,r4,0x40
+	   9: GETL       	R4, t8
+	  10: ANDL       	$0x40, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25475728:  40820738  bc 4,2,0x25475E60
+	  15: Jc02o       	$0x25475E60
+
+
+. 0 2547571C 16
+. 82 3E 04 F4 80 91 00 00 70 89 00 40 40 82 07 38
+
+==== BB 468 (0x2547572C) approx BBs exec'd 0 ====
+
+	0x2547572C:  7F25CB78  or r5,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25475730:  7F68DB78  or r8,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0x25475734:  7E038378  or r3,r16,r16
+	   6: GETL       	R16, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x25475738:  7E84A378  or r4,r20,r20
+	   9: GETL       	R20, t6
+	  10: PUTL       	t6, R4
+	  11: INCEIPL       	$4
+
+	0x2547573C:  7EE6BB78  or r6,r23,r23
+	  12: GETL       	R23, t8
+	  13: PUTL       	t8, R6
+	  14: INCEIPL       	$4
+
+	0x25475740:  7EC7B378  or r7,r22,r22
+	  15: GETL       	R22, t10
+	  16: PUTL       	t10, R7
+	  17: INCEIPL       	$4
+
+	0x25475744:  480039B1  bl 0x254790F4
+	  18: MOVL       	$0x25475748, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x254790F4  ($4)
+
+
+. 0 2547572C 28
+. 7F 25 CB 78 7F 68 DB 78 7E 03 83 78 7E 84 A3 78 7E E6 BB 78 7E C7 B3 78 48 00 39 B1
+
+==== BB 469 (0x25479318) approx BBs exec'd 0 ====
+
+	0x25479318:  7D695B78  or r9,r11,r11
+	   0: GETL       	R11, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x2547931C:  48000008  b 0x25479324
+	   3: JMPo       	$0x25479324  ($4)
+
+
+. 0 25479318 8
+. 7D 69 5B 78 48 00 00 08
+
+==== BB 470 (0x25479324) approx BBs exec'd 0 ====
+
+	0x25479324:  8009000C  lwz r0,12(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25479328:  2C800000  cmpi cr1,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547932C:  4086FFF4  bc 4,6,0x25479320
+	   9: Jc06o       	$0x25479320
+
+
+. 0 25479324 12
+. 80 09 00 0C 2C 80 00 00 40 86 FF F4
+
+==== BB 471 (0x25479320) approx BBs exec'd 0 ====
+
+	0x25479320:  7C090378  or r9,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25479324:  8009000C  lwz r0,12(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25479328:  2C800000  cmpi cr1,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x2547932C:  4086FFF4  bc 4,6,0x25479320
+	  12: Jc06o       	$0x25479320
+
+
+. 0 25479320 16
+. 7C 09 03 78 80 09 00 0C 2C 80 00 00 40 86 FF F4
+
+==== BB 472 (0x25479330) approx BBs exec'd 0 ====
+
+	0x25479330:  93E9000C  stw r31,12(r9)
+	   0: GETL       	R31, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25479334:  2F1C0000  cmpi cr6,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25479338:  913F0010  stw r9,16(r31)
+	   9: GETL       	R9, t8
+	  10: GETL       	R31, t10
+	  11: ADDL       	$0x10, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x2547933C:  38800001  li r4,1
+	  14: MOVL       	$0x1, t12
+	  15: PUTL       	t12, R4
+	  16: INCEIPL       	$4
+
+	0x25479340:  7D0A282E  lwzx r8,r10,r5
+	  17: GETL       	R5, t14
+	  18: GETL       	R10, t16
+	  19: ADDL       	t16, t14
+	  20: LDL       	(t14), t18
+	  21: PUTL       	t18, R8
+	  22: INCEIPL       	$4
+
+	0x25479344:  7D4A2A14  add r10,r10,r5
+	  23: GETL       	R10, t20
+	  24: GETL       	R5, t22
+	  25: ADDL       	t20, t22
+	  26: PUTL       	t22, R10
+	  27: INCEIPL       	$4
+
+	0x25479348:  832A0004  lwz r25,4(r10)
+	  28: GETL       	R10, t24
+	  29: ADDL       	$0x4, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R25
+	  32: INCEIPL       	$4
+
+	0x2547934C:  81650198  lwz r11,408(r5)
+	  33: GETL       	R5, t28
+	  34: ADDL       	$0x198, t28
+	  35: LDL       	(t28), t30
+	  36: PUTL       	t30, R11
+	  37: INCEIPL       	$4
+
+	0x25479350:  38E80158  addi r7,r8,344
+	  38: GETL       	R8, t32
+	  39: ADDL       	$0x158, t32
+	  40: PUTL       	t32, R7
+	  41: INCEIPL       	$4
+
+	0x25479354:  8185019C  lwz r12,412(r5)
+	  42: GETL       	R5, t34
+	  43: ADDL       	$0x19C, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R12
+	  46: INCEIPL       	$4
+
+	0x25479358:  3B190001  addi r24,r25,1
+	  47: GETL       	R25, t38
+	  48: ADDL       	$0x1, t38
+	  49: PUTL       	t38, R24
+	  50: INCEIPL       	$4
+
+	0x2547935C:  90E60000  stw r7,0(r6)
+	  51: GETL       	R7, t40
+	  52: GETL       	R6, t42
+	  53: STL       	t40, (t42)
+	  54: INCEIPL       	$4
+
+	0x25479360:  310C0001  addic r8,r12,1
+	  55: GETL       	R12, t44
+	  56: ADCL       	$0x1, t44  (-wCa)
+	  57: PUTL       	t44, R8
+	  58: INCEIPL       	$4
+
+	0x25479364:  7CEB0194  addze r7,r11
+	  59: GETL       	R11, t46
+	  60: ADCL       	$0x0, t46  (-rCa-wCa)
+	  61: PUTL       	t46, R7
+	  62: INCEIPL       	$4
+
+	0x25479368:  930A0004  stw r24,4(r10)
+	  63: GETL       	R24, t48
+	  64: GETL       	R10, t50
+	  65: ADDL       	$0x4, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x2547936C:  90E50198  stw r7,408(r5)
+	  68: GETL       	R7, t52
+	  69: GETL       	R5, t54
+	  70: ADDL       	$0x198, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x25479370:  9105019C  stw r8,412(r5)
+	  73: GETL       	R8, t56
+	  74: GETL       	R5, t58
+	  75: ADDL       	$0x19C, t58
+	  76: STL       	t56, (t58)
+	  77: INCEIPL       	$4
+
+	0x25479374:  409AFEA8  bc 4,26,0x2547921C
+	  78: Jc26o       	$0x2547921C
+
+
+. 0 25479330 72
+. 93 E9 00 0C 2F 1C 00 00 91 3F 00 10 38 80 00 01 7D 0A 28 2E 7D 4A 2A 14 83 2A 00 04 81 65 01 98 38 E8 01 58 81 85 01 9C 3B 19 00 01 90 E6 00 00 31 0C 00 01 7C EB 01 94 93 0A 00 04 90 E5 01 98 91 05 01 9C 40 9A FE A8
+
+==== BB 473 (0x2547921C) approx BBs exec'd 0 ====
+
+	0x2547921C:  801C0168  lwz r0,360(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x168, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25479220:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25479224:  4082FFF4  bc 4,2,0x25479218
+	   9: Jc02o       	$0x25479218
+
+
+. 0 2547921C 12
+. 80 1C 01 68 2C 00 00 00 40 82 FF F4
+
+==== BB 474 (0x25479234) approx BBs exec'd 0 ====
+
+	0x25479234:  81660000  lwz r11,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25479238:  7C895800  cmp cr1,r9,r11
+	   4: GETL       	R9, t4
+	   5: GETL       	R11, t6
+	   6: CMPL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547923C:  41860024  bc 12,6,0x25479260
+	   9: Js06o       	$0x25479260
+
+
+. 0 25479234 12
+. 81 66 00 00 7C 89 58 00 41 86 00 24
+
+==== BB 475 (0x25479260) approx BBs exec'd 0 ====
+
+	0x25479260:  3B7F0158  addi r27,r31,344
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x158, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25479264:  937F01C4  stw r27,452(r31)
+	   4: GETL       	R27, t2
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x1C4, t4
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x25479268:  8BB70000  lbz r29,0(r23)
+	   9: GETL       	R23, t6
+	  10: LDB       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547926C:  2F1D0000  cmpi cr6,r29,0
+	  13: GETL       	R29, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25479270:  419A006C  bc 12,26,0x254792DC
+	  17: Js26o       	$0x254792DC
+
+
+. 0 25479260 20
+. 3B 7F 01 58 93 7F 01 C4 8B B7 00 00 2F 1D 00 00 41 9A 00 6C
+
+==== BB 476 (0x25479274) approx BBs exec'd 0 ====
+
+	0x25479274:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25479278:  3B800000  li r28,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R28
+	   5: INCEIPL       	$4
+
+	0x2547927C:  48009D55  bl 0x25482FD0
+	   6: MOVL       	$0x25479280, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 25479274 12
+. 7E E3 BB 78 3B 80 00 00 48 00 9D 55
+
+==== BB 477 (0x25479280) approx BBs exec'd 0 ====
+
+	0x25479280:  2F9D002F  cmpi cr7,r29,47
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x2F, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479284:  3B630001  addi r27,r3,1
+	   5: GETL       	R3, t6
+	   6: ADDL       	$0x1, t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0x25479288:  835E0514  lwz r26,1300(r30)
+	   9: GETL       	R30, t8
+	  10: ADDL       	$0x514, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R26
+	  13: INCEIPL       	$4
+
+	0x2547928C:  7F7DDB78  or r29,r27,r27
+	  14: GETL       	R27, t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0x25479290:  419E0154  bc 12,30,0x254793E4
+	  17: Js30o       	$0x254793E4
+
+
+. 0 25479280 20
+. 2F 9D 00 2F 3B 63 00 01 83 5E 05 14 7F 7D DB 78 41 9E 01 54
+
+==== BB 478 (0x254793E4) approx BBs exec'd 0 ====
+
+	0x254793E4:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254793E8:  4801E63D  bl 0x25497A24
+	   3: MOVL       	$0x254793EC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 254793E4 8
+. 7F 63 DB 78 48 01 E6 3D
+
+==== BB 479 (0x254793EC) approx BBs exec'd 0 ====
+
+	0x254793EC:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x254793F0:  7C7C1B78  or r28,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R28
+	   6: INCEIPL       	$4
+
+	0x254793F4:  4086FFB0  bc 4,6,0x254793A4
+	   7: Jc06o       	$0x254793A4
+
+
+. 0 254793EC 12
+. 2C 83 00 00 7C 7C 1B 78 40 86 FF B0
+
+==== BB 480 (0x254793A4) approx BBs exec'd 0 ====
+
+	0x254793A4:  7EE4BB78  or r4,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254793A8:  7F65DB78  or r5,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x254793AC:  4800A675  bl 0x25483A20
+	   6: MOVL       	$0x254793B0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483A20  ($4)
+
+
+. 0 254793A4 12
+. 7E E4 BB 78 7F 65 DB 78 48 00 A6 75
+
+==== BB 481 (0x254793B0) approx BBs exec'd 0 ====
+
+	0x254793B0:  8EE3FFFF  lbzu r23,-1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R23
+	   5: INCEIPL       	$4
+
+	0x254793B4:  2C17002F  cmpi cr0,r23,47
+	   6: GETL       	R23, t4
+	   7: MOVL       	$0x2F, t8
+	   8: CMPL       	t4, t8, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x254793B8:  4082FFF8  bc 4,2,0x254793B0
+	  11: Jc02o       	$0x254793B0
+
+
+. 0 254793B0 12
+. 8E E3 FF FF 2C 17 00 2F 40 82 FF F8
+
+==== BB 482 (0x254793BC) approx BBs exec'd 0 ====
+
+	0x254793BC:  7F03E000  cmp cr6,r3,r28
+	   0: GETL       	R3, t0
+	   1: GETL       	R28, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x254793C0:  419A0014  bc 12,26,0x254793D4
+	   5: Js26o       	$0x254793D4
+
+
+. 0 254793BC 8
+. 7F 03 E0 00 41 9A 00 14
+
+==== BB 483 (0x254793C4) approx BBs exec'd 0 ====
+
+	0x254793C4:  38C00000  li r6,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x254793C8:  98C30000  stb r6,0(r3)
+	   3: GETL       	R6, t2
+	   4: GETL       	R3, t4
+	   5: STB       	t2, (t4)
+	   6: INCEIPL       	$4
+
+	0x254793CC:  939F019C  stw r28,412(r31)
+	   7: GETL       	R28, t6
+	   8: GETL       	R31, t8
+	   9: ADDL       	$0x19C, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x254793D0:  4BFFFF0C  b 0x254792DC
+	  12: JMPo       	$0x254792DC  ($4)
+
+
+. 0 254793C4 16
+. 38 C0 00 00 98 C3 00 00 93 9F 01 9C 4B FF FF 0C
+
+==== BB 484 (0x25475748) approx BBs exec'd 0 ====
+
+	0x25475748:  3B1C0004  addi r24,r28,4
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547574C:  7C7A1B79  or. r26,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R26
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25475750:  41820708  bc 12,2,0x25475E58
+	   9: Js02o       	$0x25475E58
+
+
+. 0 25475748 12
+. 3B 1C 00 04 7C 7A 1B 79 41 82 07 08
+
+==== BB 485 (0x25475754) approx BBs exec'd 0 ====
+
+	0x25475754:  A1F8002C  lhz r15,44(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x2C, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x25475758:  81D80018  lwz r14,24(r24)
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0x18, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R14
+	   9: INCEIPL       	$4
+
+	0x2547575C:  A1580010  lhz r10,16(r24)
+	  10: GETL       	R24, t8
+	  11: ADDL       	$0x10, t8
+	  12: LDW       	(t8), t10
+	  13: PUTL       	t10, R10
+	  14: INCEIPL       	$4
+
+	0x25475760:  B1FA0154  sth r15,340(r26)
+	  15: GETL       	R15, t12
+	  16: GETL       	R26, t14
+	  17: ADDL       	$0x154, t14
+	  18: STW       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25475764:  91DA0150  stw r14,336(r26)
+	  20: GETL       	R14, t16
+	  21: GETL       	R26, t18
+	  22: ADDL       	$0x150, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25475768:  A118002C  lhz r8,44(r24)
+	  25: GETL       	R24, t20
+	  26: ADDL       	$0x2C, t20
+	  27: LDW       	(t20), t22
+	  28: PUTL       	t22, R8
+	  29: INCEIPL       	$4
+
+	0x2547576C:  8378001C  lwz r27,28(r24)
+	  30: GETL       	R24, t24
+	  31: ADDL       	$0x1C, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R27
+	  34: INCEIPL       	$4
+
+	0x25475770:  551D2834  rlwinm r29,r8,5,0,26
+	  35: GETL       	R8, t28
+	  36: SHLL       	$0x5, t28
+	  37: PUTL       	t28, R29
+	  38: INCEIPL       	$4
+
+	0x25475774:  80DC0000  lwz r6,0(r28)
+	  39: GETL       	R28, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R6
+	  42: INCEIPL       	$4
+
+	0x25475778:  7CFBEA14  add r7,r27,r29
+	  43: GETL       	R27, t34
+	  44: GETL       	R29, t36
+	  45: ADDL       	t34, t36
+	  46: PUTL       	t36, R7
+	  47: INCEIPL       	$4
+
+	0x2547577C:  7F3BE214  add r25,r27,r28
+	  48: GETL       	R27, t38
+	  49: GETL       	R28, t40
+	  50: ADDL       	t38, t40
+	  51: PUTL       	t40, R25
+	  52: INCEIPL       	$4
+
+	0x25475780:  7F873040  cmpl cr7,r7,r6
+	  53: GETL       	R7, t42
+	  54: GETL       	R6, t44
+	  55: CMPUL       	t42, t44, t46  (-rSo)
+	  56: ICRFL       	t46, $0x7, CR
+	  57: INCEIPL       	$4
+
+	0x25475784:  915F009C  stw r10,156(r31)
+	  58: GETL       	R10, t48
+	  59: GETL       	R31, t50
+	  60: ADDL       	$0x9C, t50
+	  61: STL       	t48, (t50)
+	  62: INCEIPL       	$4
+
+	0x25475788:  3B790004  addi r27,r25,4
+	  63: GETL       	R25, t52
+	  64: ADDL       	$0x4, t52
+	  65: PUTL       	t52, R27
+	  66: INCEIPL       	$4
+
+	0x2547578C:  419D0438  bc 12,29,0x25475BC4
+	  67: Js29o       	$0x25475BC4
+
+
+. 0 25475754 60
+. A1 F8 00 2C 81 D8 00 18 A1 58 00 10 B1 FA 01 54 91 DA 01 50 A1 18 00 2C 83 78 00 1C 55 1D 28 34 80 DC 00 00 7C FB EA 14 7F 3B E2 14 7F 87 30 40 91 5F 00 9C 3B 79 00 04 41 9D 04 38
+
+==== BB 486 (0x25475790) approx BBs exec'd 0 ====
+
+	0x25475790:  A11A0154  lhz r8,340(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475794:  7C320B78  or r18,r1,r1
+	   5: GETL       	R1, t4
+	   6: PUTL       	t4, R18
+	   7: INCEIPL       	$4
+
+	0x25475798:  80810000  lwz r4,0(r1)
+	   8: GETL       	R1, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R4
+	  11: INCEIPL       	$4
+
+	0x2547579C:  3BA00007  li r29,7
+	  12: MOVL       	$0x7, t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x254757A0:  1CE80018  mulli r7,r8,24
+	  15: GETL       	R8, t12
+	  16: MULL       	$0x18, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x254757A4:  3B800000  li r28,0
+	  19: MOVL       	$0x0, t14
+	  20: PUTL       	t14, R28
+	  21: INCEIPL       	$4
+
+	0x254757A8:  38C7001E  addi r6,r7,30
+	  22: GETL       	R7, t16
+	  23: ADDL       	$0x1E, t16
+	  24: PUTL       	t16, R6
+	  25: INCEIPL       	$4
+
+	0x254757AC:  54D902B6  rlwinm r25,r6,0,10,27
+	  26: GETL       	R6, t18
+	  27: ANDL       	$0x3FFFF0, t18
+	  28: PUTL       	t18, R25
+	  29: INCEIPL       	$4
+
+	0x254757B0:  7CB900D0  neg r5,r25
+	  30: GETL       	R25, t20
+	  31: NEGL       	t20
+	  32: PUTL       	t20, R5
+	  33: INCEIPL       	$4
+
+	0x254757B4:  7C81296E  stwux r4,r1,r5
+	  34: GETL       	R5, t22
+	  35: GETL       	R1, t24
+	  36: ADDL       	t24, t22
+	  37: PUTL       	t22, R1
+	  38: GETL       	R4, t26
+	  39: STL       	t26, (t22)
+	  40: INCEIPL       	$4
+
+	0x254757B8:  A0FA0154  lhz r7,340(r26)
+	  41: GETL       	R26, t28
+	  42: ADDL       	$0x154, t28
+	  43: LDW       	(t28), t30
+	  44: PUTL       	t30, R7
+	  45: INCEIPL       	$4
+
+	0x254757BC:  3861002F  addi r3,r1,47
+	  46: GETL       	R1, t32
+	  47: ADDL       	$0x2F, t32
+	  48: PUTL       	t32, R3
+	  49: INCEIPL       	$4
+
+	0x254757C0:  93BF00A4  stw r29,164(r31)
+	  50: GETL       	R29, t34
+	  51: GETL       	R31, t36
+	  52: ADDL       	$0xA4, t36
+	  53: STL       	t34, (t36)
+	  54: INCEIPL       	$4
+
+	0x254757C4:  54750036  rlwinm r21,r3,0,0,27
+	  55: GETL       	R3, t38
+	  56: ANDL       	$0xFFFFFFF0, t38
+	  57: PUTL       	t38, R21
+	  58: INCEIPL       	$4
+
+	0x254757C8:  54E92834  rlwinm r9,r7,5,0,26
+	  59: GETL       	R7, t40
+	  60: SHLL       	$0x5, t40
+	  61: PUTL       	t40, R9
+	  62: INCEIPL       	$4
+
+	0x254757CC:  939F00A8  stw r28,168(r31)
+	  63: GETL       	R28, t42
+	  64: GETL       	R31, t44
+	  65: ADDL       	$0xA8, t44
+	  66: STL       	t42, (t44)
+	  67: INCEIPL       	$4
+
+	0x254757D0:  7C09DA14  add r0,r9,r27
+	  68: GETL       	R9, t46
+	  69: GETL       	R27, t48
+	  70: ADDL       	t46, t48
+	  71: PUTL       	t48, R0
+	  72: INCEIPL       	$4
+
+	0x254757D4:  7F7DDB78  or r29,r27,r27
+	  73: GETL       	R27, t50
+	  74: PUTL       	t50, R29
+	  75: INCEIPL       	$4
+
+	0x254757D8:  7E00D840  cmpl cr4,r0,r27
+	  76: GETL       	R0, t52
+	  77: GETL       	R27, t54
+	  78: CMPUL       	t52, t54, t56  (-rSo)
+	  79: ICRFL       	t56, $0x4, CR
+	  80: INCEIPL       	$4
+
+	0x254757DC:  409103A8  bc 4,17,0x25475B84
+	  81: Jc17o       	$0x25475B84
+
+
+. 0 25475790 80
+. A1 1A 01 54 7C 32 0B 78 80 81 00 00 3B A0 00 07 1C E8 00 18 3B 80 00 00 38 C7 00 1E 54 D9 02 B6 7C B9 00 D0 7C 81 29 6E A0 FA 01 54 38 61 00 2F 93 BF 00 A4 54 75 00 36 54 E9 28 34 93 9F 00 A8 7C 09 DA 14 7F 7D DB 78 7E 00 D8 40 40 91 03 A8
+
+==== BB 487 (0x254757E0) approx BBs exec'd 0 ====
+
+	0x254757E0:  3B200000  li r25,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x254757E4:  48000028  b 0x2547580C
+	   3: JMPo       	$0x2547580C  ($4)
+
+
+. 0 254757E0 8
+. 3B 20 00 00 48 00 00 28
+
+==== BB 488 (0x2547580C) approx BBs exec'd 0 ====
+
+	0x2547580C:  813D0000  lwz r9,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25475810:  2F090006  cmpi cr6,r9,6
+	   4: GETL       	R9, t4
+	   5: MOVL       	$0x6, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475814:  419A0094  bc 12,26,0x254758A8
+	   9: Js26o       	$0x254758A8
+
+
+. 0 2547580C 12
+. 81 3D 00 00 2F 09 00 06 41 9A 00 94
+
+==== BB 489 (0x25475818) approx BBs exec'd 0 ====
+
+	0x25475818:  28090006  cmpli cr0,r9,6
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x6, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547581C:  40A1FFCC  bc 5,1,0x254757E8
+	   5: Jc01o       	$0x254757E8
+
+
+. 0 25475818 8
+. 28 09 00 06 40 A1 FF CC
+
+==== BB 490 (0x254757E8) approx BBs exec'd 0 ====
+
+	0x254757E8:  2F890001  cmpi cr7,r9,1
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254757EC:  419E02C4  bc 12,30,0x25475AB0
+	   5: Js30o       	$0x25475AB0
+
+
+. 0 254757E8 8
+. 2F 89 00 01 41 9E 02 C4
+
+==== BB 491 (0x25475AB0) approx BBs exec'd 0 ====
+
+	0x25475AB0:  81310004  lwz r9,4(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475AB4:  817D001C  lwz r11,28(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25475AB8:  3909FFFF  addi r8,r9,-1
+	  10: GETL       	R9, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R8
+	  13: INCEIPL       	$4
+
+	0x25475ABC:  7D694039  and. r9,r11,r8
+	  14: GETL       	R11, t10
+	  15: GETL       	R8, t12
+	  16: ANDL       	t10, t12
+	  17: PUTL       	t12, R9
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x25475AC0:  4082062C  bc 4,2,0x254760EC
+	  21: Jc02o       	$0x254760EC
+
+
+. 0 25475AB0 20
+. 81 31 00 04 81 7D 00 1C 39 09 FF FF 7D 69 40 39 40 82 06 2C
+
+==== BB 492 (0x25475AC4) approx BBs exec'd 0 ====
+
+	0x25475AC4:  815D0008  lwz r10,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475AC8:  38ABFFFF  addi r5,r11,-1
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25475ACC:  80DD0004  lwz r6,4(r29)
+	   9: GETL       	R29, t6
+	  10: ADDL       	$0x4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0x25475AD0:  7C865050  subf r4,r6,r10
+	  14: GETL       	R6, t10
+	  15: GETL       	R10, t12
+	  16: SUBL       	t10, t12
+	  17: PUTL       	t12, R4
+	  18: INCEIPL       	$4
+
+	0x25475AD4:  7C8B2839  and. r11,r4,r5
+	  19: GETL       	R4, t14
+	  20: GETL       	R5, t16
+	  21: ANDL       	t14, t16
+	  22: PUTL       	t16, R11
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x25475AD8:  40820650  bc 4,2,0x25476128
+	  26: Jc02o       	$0x25476128
+
+
+. 0 25475AC4 24
+. 81 5D 00 08 38 AB FF FF 80 DD 00 04 7C 86 50 50 7C 8B 28 39 40 82 06 50
+
+==== BB 493 (0x25475ADC) approx BBs exec'd 0 ====
+
+	0x25475ADC:  7D4A4078  andc r10,r10,r8
+	   0: GETL       	R10, t0
+	   1: GETL       	R8, t2
+	   2: NOTL       	t2
+	   3: ANDL       	t0, t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0x25475AE0:  3B9C0001  addi r28,r28,1
+	   6: GETL       	R28, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R28
+	   9: INCEIPL       	$4
+
+	0x25475AE4:  7D55C92E  stwx r10,r21,r25
+	  10: GETL       	R25, t6
+	  11: GETL       	R21, t8
+	  12: ADDL       	t8, t6
+	  13: GETL       	R10, t10
+	  14: STL       	t10, (t6)
+	  15: INCEIPL       	$4
+
+	0x25475AE8:  289C0001  cmpli cr1,r28,1
+	  16: GETL       	R28, t12
+	  17: MOVL       	$0x1, t16
+	  18: CMPUL       	t12, t16, t14  (-rSo)
+	  19: ICRFL       	t14, $0x1, CR
+	  20: INCEIPL       	$4
+
+	0x25475AEC:  813D0008  lwz r9,8(r29)
+	  21: GETL       	R29, t18
+	  22: ADDL       	$0x8, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R9
+	  25: INCEIPL       	$4
+
+	0x25475AF0:  801D0010  lwz r0,16(r29)
+	  26: GETL       	R29, t22
+	  27: ADDL       	$0x10, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R0
+	  30: INCEIPL       	$4
+
+	0x25475AF4:  81F10004  lwz r15,4(r17)
+	  31: GETL       	R17, t26
+	  32: ADDL       	$0x4, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R15
+	  35: INCEIPL       	$4
+
+	0x25475AF8:  7C690214  add r3,r9,r0
+	  36: GETL       	R9, t30
+	  37: GETL       	R0, t32
+	  38: ADDL       	t30, t32
+	  39: PUTL       	t32, R3
+	  40: INCEIPL       	$4
+
+	0x25475AFC:  7D837A14  add r12,r3,r15
+	  41: GETL       	R3, t34
+	  42: GETL       	R15, t36
+	  43: ADDL       	t34, t36
+	  44: PUTL       	t36, R12
+	  45: INCEIPL       	$4
+
+	0x25475B00:  7DCF00D0  neg r14,r15
+	  46: GETL       	R15, t38
+	  47: NEGL       	t38
+	  48: PUTL       	t38, R14
+	  49: INCEIPL       	$4
+
+	0x25475B04:  396CFFFF  addi r11,r12,-1
+	  50: GETL       	R12, t40
+	  51: ADDL       	$0xFFFFFFFF, t40
+	  52: PUTL       	t40, R11
+	  53: INCEIPL       	$4
+
+	0x25475B08:  7D687038  and r8,r11,r14
+	  54: GETL       	R11, t42
+	  55: GETL       	R14, t44
+	  56: ANDL       	t42, t44
+	  57: PUTL       	t44, R8
+	  58: INCEIPL       	$4
+
+	0x25475B0C:  7D75CA14  add r11,r21,r25
+	  59: GETL       	R21, t46
+	  60: GETL       	R25, t48
+	  61: ADDL       	t46, t48
+	  62: PUTL       	t48, R11
+	  63: INCEIPL       	$4
+
+	0x25475B10:  910B0004  stw r8,4(r11)
+	  64: GETL       	R8, t50
+	  65: GETL       	R11, t52
+	  66: ADDL       	$0x4, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0x25475B14:  3B390018  addi r25,r25,24
+	  69: GETL       	R25, t54
+	  70: ADDL       	$0x18, t54
+	  71: PUTL       	t54, R25
+	  72: INCEIPL       	$4
+
+	0x25475B18:  809D0008  lwz r4,8(r29)
+	  73: GETL       	R29, t56
+	  74: ADDL       	$0x8, t56
+	  75: LDL       	(t56), t58
+	  76: PUTL       	t58, R4
+	  77: INCEIPL       	$4
+
+	0x25475B1C:  80DD0010  lwz r6,16(r29)
+	  78: GETL       	R29, t60
+	  79: ADDL       	$0x10, t60
+	  80: LDL       	(t60), t62
+	  81: PUTL       	t62, R6
+	  82: INCEIPL       	$4
+
+	0x25475B20:  7CA43214  add r5,r4,r6
+	  83: GETL       	R4, t64
+	  84: GETL       	R6, t66
+	  85: ADDL       	t64, t66
+	  86: PUTL       	t66, R5
+	  87: INCEIPL       	$4
+
+	0x25475B24:  90AB0008  stw r5,8(r11)
+	  88: GETL       	R5, t68
+	  89: GETL       	R11, t70
+	  90: ADDL       	$0x8, t70
+	  91: STL       	t68, (t70)
+	  92: INCEIPL       	$4
+
+	0x25475B28:  813D0008  lwz r9,8(r29)
+	  93: GETL       	R29, t72
+	  94: ADDL       	$0x8, t72
+	  95: LDL       	(t72), t74
+	  96: PUTL       	t74, R9
+	  97: INCEIPL       	$4
+
+	0x25475B2C:  801D0014  lwz r0,20(r29)
+	  98: GETL       	R29, t76
+	  99: ADDL       	$0x14, t76
+	 100: LDL       	(t76), t78
+	 101: PUTL       	t78, R0
+	 102: INCEIPL       	$4
+
+	0x25475B30:  7C690214  add r3,r9,r0
+	 103: GETL       	R9, t80
+	 104: GETL       	R0, t82
+	 105: ADDL       	t80, t82
+	 106: PUTL       	t82, R3
+	 107: INCEIPL       	$4
+
+	0x25475B34:  906B000C  stw r3,12(r11)
+	 108: GETL       	R3, t84
+	 109: GETL       	R11, t86
+	 110: ADDL       	$0xC, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25475B38:  81910004  lwz r12,4(r17)
+	 113: GETL       	R17, t88
+	 114: ADDL       	$0x4, t88
+	 115: LDL       	(t88), t90
+	 116: PUTL       	t90, R12
+	 117: INCEIPL       	$4
+
+	0x25475B3C:  81DD0004  lwz r14,4(r29)
+	 118: GETL       	R29, t92
+	 119: ADDL       	$0x4, t92
+	 120: LDL       	(t92), t94
+	 121: PUTL       	t94, R14
+	 122: INCEIPL       	$4
+
+	0x25475B40:  7DEC00D0  neg r15,r12
+	 123: GETL       	R12, t96
+	 124: NEGL       	t96
+	 125: PUTL       	t96, R15
+	 126: INCEIPL       	$4
+
+	0x25475B44:  7DC87838  and r8,r14,r15
+	 127: GETL       	R14, t98
+	 128: GETL       	R15, t100
+	 129: ANDL       	t98, t100
+	 130: PUTL       	t100, R8
+	 131: INCEIPL       	$4
+
+	0x25475B48:  910B0010  stw r8,16(r11)
+	 132: GETL       	R8, t102
+	 133: GETL       	R11, t104
+	 134: ADDL       	$0x10, t104
+	 135: STL       	t102, (t104)
+	 136: INCEIPL       	$4
+
+	0x25475B4C:  40850018  bc 4,5,0x25475B64
+	 137: Jc05o       	$0x25475B64
+
+
+. 0 25475ADC 116
+. 7D 4A 40 78 3B 9C 00 01 7D 55 C9 2E 28 9C 00 01 81 3D 00 08 80 1D 00 10 81 F1 00 04 7C 69 02 14 7D 83 7A 14 7D CF 00 D0 39 6C FF FF 7D 68 70 38 7D 75 CA 14 91 0B 00 04 3B 39 00 18 80 9D 00 08 80 DD 00 10 7C A4 32 14 90 AB 00 08 81 3D 00 08 80 1D 00 14 7C 69 02 14 90 6B 00 0C 81 91 00 04 81 DD 00 04 7D EC 00 D0 7D C8 78 38 91 0B 00 10 40 85 00 18
+
+==== BB 494 (0x25475B64) approx BBs exec'd 0 ====
+
+	0x25475B64:  807D0018  lwz r3,24(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475B68:  3D807351  lis r12,29521
+	   5: MOVL       	$0x73510000, t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x25475B6C:  618E6240  ori r14,r12,0x6240
+	   8: MOVL       	$0x73516240, t6
+	   9: PUTL       	t6, R14
+	  10: INCEIPL       	$4
+
+	0x25475B70:  546F16FA  rlwinm r15,r3,2,27,29
+	  11: GETL       	R3, t8
+	  12: ROLL       	$0x2, t8
+	  13: ANDL       	$0x1C, t8
+	  14: PUTL       	t8, R15
+	  15: INCEIPL       	$4
+
+	0x25475B74:  7DC87E30  sraw r8,r14,r15
+	  16: GETL       	R14, t12
+	  17: GETL       	R15, t10
+	  18: SARL       	t10, t12  (-wCa)
+	  19: PUTL       	t12, R8
+	  20: INCEIPL       	$4
+
+	0x25475B78:  5506073E  rlwinm r6,r8,0,28,31
+	  21: GETL       	R8, t14
+	  22: ANDL       	$0xF, t14
+	  23: PUTL       	t14, R6
+	  24: INCEIPL       	$4
+
+	0x25475B7C:  90CB0014  stw r6,20(r11)
+	  25: GETL       	R6, t16
+	  26: GETL       	R11, t18
+	  27: ADDL       	$0x14, t18
+	  28: STL       	t16, (t18)
+	  29: INCEIPL       	$4
+
+	0x25475B80:  4BFFFC78  b 0x254757F8
+	  30: JMPo       	$0x254757F8  ($4)
+
+
+. 0 25475B64 32
+. 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+
+==== BB 495 (0x254757F8) approx BBs exec'd 0 ====
+
+	0x254757F8:  54EF2834  rlwinm r15,r7,5,0,26
+	   0: GETL       	R7, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R15
+	   3: INCEIPL       	$4
+
+	0x254757FC:  3BBD0020  addi r29,r29,32
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x20, t2
+	   6: PUTL       	t2, R29
+	   7: INCEIPL       	$4
+
+	0x25475800:  7DCFDA14  add r14,r15,r27
+	   8: GETL       	R15, t4
+	   9: GETL       	R27, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R14
+	  12: INCEIPL       	$4
+
+	0x25475804:  7C8EE840  cmpl cr1,r14,r29
+	  13: GETL       	R14, t8
+	  14: GETL       	R29, t10
+	  15: CMPUL       	t8, t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x25475808:  408500BC  bc 4,5,0x254758C4
+	  18: Jc05o       	$0x254758C4
+
+
+. 0 254757F8 20
+. 54 EF 28 34 3B BD 00 20 7D CF DA 14 7C 8E E8 40 40 85 00 BC
+
+==== BB 496 (0x25475B50) approx BBs exec'd 0 ====
+
+	0x25475B50:  808BFFEC  lwz r4,-20(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xFFFFFFEC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25475B54:  7E045000  cmp cr4,r4,r10
+	   5: GETL       	R4, t4
+	   6: GETL       	R10, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x4, CR
+	   9: INCEIPL       	$4
+
+	0x25475B58:  4192000C  bc 12,18,0x25475B64
+	  10: Js18o       	$0x25475B64
+
+
+. 0 25475B50 12
+. 80 8B FF EC 7E 04 50 00 41 92 00 0C
+
+==== BB 497 (0x25475B5C) approx BBs exec'd 0 ====
+
+	0x25475B5C:  39400001  li r10,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x25475B60:  915F00A8  stw r10,168(r31)
+	   3: GETL       	R10, t2
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0xA8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x25475B64:  807D0018  lwz r3,24(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x25475B68:  3D807351  lis r12,29521
+	  13: MOVL       	$0x73510000, t10
+	  14: PUTL       	t10, R12
+	  15: INCEIPL       	$4
+
+	0x25475B6C:  618E6240  ori r14,r12,0x6240
+	  16: MOVL       	$0x73516240, t12
+	  17: PUTL       	t12, R14
+	  18: INCEIPL       	$4
+
+	0x25475B70:  546F16FA  rlwinm r15,r3,2,27,29
+	  19: GETL       	R3, t14
+	  20: ROLL       	$0x2, t14
+	  21: ANDL       	$0x1C, t14
+	  22: PUTL       	t14, R15
+	  23: INCEIPL       	$4
+
+	0x25475B74:  7DC87E30  sraw r8,r14,r15
+	  24: GETL       	R14, t18
+	  25: GETL       	R15, t16
+	  26: SARL       	t16, t18  (-wCa)
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0x25475B78:  5506073E  rlwinm r6,r8,0,28,31
+	  29: GETL       	R8, t20
+	  30: ANDL       	$0xF, t20
+	  31: PUTL       	t20, R6
+	  32: INCEIPL       	$4
+
+	0x25475B7C:  90CB0014  stw r6,20(r11)
+	  33: GETL       	R6, t22
+	  34: GETL       	R11, t24
+	  35: ADDL       	$0x14, t24
+	  36: STL       	t22, (t24)
+	  37: INCEIPL       	$4
+
+	0x25475B80:  4BFFFC78  b 0x254757F8
+	  38: JMPo       	$0x254757F8  ($4)
+
+
+. 0 25475B5C 40
+. 39 40 00 01 91 5F 00 A8 80 7D 00 18 3D 80 73 51 61 8E 62 40 54 6F 16 FA 7D C8 7E 30 55 06 07 3E 90 CB 00 14 4B FF FC 78
+
+==== BB 498 (0x254757F0) approx BBs exec'd 0 ====
+
+	0x254757F0:  2C890002  cmpi cr1,r9,2
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254757F4:  418603B8  bc 12,6,0x25475BAC
+	   5: Js06o       	$0x25475BAC
+
+
+. 0 254757F0 8
+. 2C 89 00 02 41 86 03 B8
+
+==== BB 499 (0x25475BAC) approx BBs exec'd 0 ====
+
+	0x25475BAC:  807D0014  lwz r3,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475BB0:  819D0008  lwz r12,8(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25475BB4:  546BE8FE  rlwinm r11,r3,29,3,31
+	  10: GETL       	R3, t8
+	  11: SHRL       	$0x3, t8
+	  12: PUTL       	t8, R11
+	  13: INCEIPL       	$4
+
+	0x25475BB8:  919A0008  stw r12,8(r26)
+	  14: GETL       	R12, t10
+	  15: GETL       	R26, t12
+	  16: ADDL       	$0x8, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25475BBC:  B17A0156  sth r11,342(r26)
+	  19: GETL       	R11, t14
+	  20: GETL       	R26, t16
+	  21: ADDL       	$0x156, t16
+	  22: STW       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x25475BC0:  4BFFFC38  b 0x254757F8
+	  24: JMPo       	$0x254757F8  ($4)
+
+
+. 0 25475BAC 24
+. 80 7D 00 14 81 9D 00 08 54 6B E8 FE 91 9A 00 08 B1 7A 01 56 4B FF FC 38
+
+==== BB 500 (0x25475820) approx BBs exec'd 0 ====
+
+	0x25475820:  3DC06474  lis r14,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R14
+	   2: INCEIPL       	$4
+
+	0x25475824:  61C0E551  ori r0,r14,0xE551
+	   3: MOVL       	$0x6474E551, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x25475828:  7E090000  cmp cr4,r9,r0
+	   6: GETL       	R9, t4
+	   7: GETL       	R0, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x4, CR
+	  10: INCEIPL       	$4
+
+	0x2547582C:  41920278  bc 12,18,0x25475AA4
+	  11: Js18o       	$0x25475AA4
+
+
+. 0 25475820 16
+. 3D C0 64 74 61 C0 E5 51 7E 09 00 00 41 92 02 78
+
+==== BB 501 (0x25475AA4) approx BBs exec'd 0 ====
+
+	0x25475AA4:  815D0018  lwz r10,24(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475AA8:  915F00A4  stw r10,164(r31)
+	   5: GETL       	R10, t4
+	   6: GETL       	R31, t6
+	   7: ADDL       	$0xA4, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25475AAC:  4BFFFD4C  b 0x254757F8
+	  10: JMPo       	$0x254757F8  ($4)
+
+
+. 0 25475AA4 12
+. 81 5D 00 18 91 5F 00 A4 4B FF FD 4C
+
+==== BB 502 (0x254758C4) approx BBs exec'd 0 ====
+
+	0x254758C4:  2E1C0000  cmpi cr4,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x254758C8:  419202BC  bc 12,18,0x25475B84
+	   4: Js18o       	$0x25475B84
+
+
+. 0 254758C4 8
+. 2E 1C 00 00 41 92 02 BC
+
+==== BB 503 (0x254758CC) approx BBs exec'd 0 ====
+
+	0x254758CC:  1DFC0018  mulli r15,r28,24
+	   0: GETL       	R28, t0
+	   1: MULL       	$0x18, t0
+	   2: PUTL       	t0, R15
+	   3: INCEIPL       	$4
+
+	0x254758D0:  839F009C  lwz r28,156(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x9C, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x254758D4:  81750000  lwz r11,0(r21)
+	   9: GETL       	R21, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x254758D8:  7EBDAB78  or r29,r21,r21
+	  13: GETL       	R21, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x254758DC:  2E1C0003  cmpi cr4,r28,3
+	  16: GETL       	R28, t12
+	  17: MOVL       	$0x3, t16
+	  18: CMPL       	t12, t16, t14  (-rSo)
+	  19: ICRFL       	t14, $0x4, CR
+	  20: INCEIPL       	$4
+
+	0x254758E0:  7F8FAA14  add r28,r15,r21
+	  21: GETL       	R15, t18
+	  22: GETL       	R21, t20
+	  23: ADDL       	t18, t20
+	  24: PUTL       	t20, R28
+	  25: INCEIPL       	$4
+
+	0x254758E4:  833CFFF4  lwz r25,-12(r28)
+	  26: GETL       	R28, t22
+	  27: ADDL       	$0xFFFFFFF4, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R25
+	  30: INCEIPL       	$4
+
+	0x254758E8:  7DCBC850  subf r14,r11,r25
+	  31: GETL       	R11, t26
+	  32: GETL       	R25, t28
+	  33: SUBL       	t26, t28
+	  34: PUTL       	t28, R14
+	  35: INCEIPL       	$4
+
+	0x254758EC:  40920818  bc 4,18,0x25476104
+	  36: Jc18o       	$0x25476104
+
+
+. 0 254758CC 36
+. 1D FC 00 18 83 9F 00 9C 81 75 00 00 7E BD AB 78 2E 1C 00 03 7F 8F AA 14 83 3C FF F4 7D CB C8 50 40 92 08 18
+
+==== BB 504 (0x254758F0) approx BBs exec'd 0 ====
+
+	0x254758F0:  8191004C  lwz r12,76(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x4C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x254758F4:  7DC47378  or r4,r14,r14
+	   5: GETL       	R14, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254758F8:  7EE3BB78  or r3,r23,r23
+	   8: GETL       	R23, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x254758FC:  7D656038  and r5,r11,r12
+	  11: GETL       	R11, t8
+	  12: GETL       	R12, t10
+	  13: ANDL       	t8, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25475900:  4800ADE9  bl 0x254806E8
+	  16: MOVL       	$0x25475904, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0x254806E8  ($4)
+
+
+. 0 254758F0 20
+. 81 91 00 4C 7D C4 73 78 7E E3 BB 78 7D 65 60 38 48 00 AD E9
+
+==== BB 505 __elf_preferred_address(0x254806E8) approx BBs exec'd 0 ====
+
+	0x254806E8:  7CA32B79  or. r3,r5,r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, R3
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254806EC:  7D8802A6  mflr r12
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x254806F0:  9421FFE0  stwu r1,-32(r1)
+	   8: GETL       	R1, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xFFFFFFE0, t8
+	  11: PUTL       	t8, R1
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x254806F4:  4801690D  bl 0x25497000
+	  14: MOVL       	$0x254806F8, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254806E8 16
+. 7C A3 2B 79 7D 88 02 A6 94 21 FF E0 48 01 69 0D
+
+==== BB 506 (0x254806F8) approx BBs exec'd 0 ====
+
+	0x254806F8:  93A10014  stw r29,20(r1)
+	   0: GETL       	R29, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254806FC:  7C9D2378  or r29,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25480700:  93C10018  stw r30,24(r1)
+	   8: GETL       	R30, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25480704:  7FC802A6  mflr r30
+	  13: GETL       	LR, t10
+	  14: PUTL       	t10, R30
+	  15: INCEIPL       	$4
+
+	0x25480708:  93810010  stw r28,16(r1)
+	  16: GETL       	R28, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x10, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2548070C:  93E1001C  stw r31,28(r1)
+	  21: GETL       	R31, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25480710:  7D8803A6  mtlr r12
+	  26: GETL       	R12, t20
+	  27: PUTL       	t20, LR
+	  28: INCEIPL       	$4
+
+	0x25480714:  408200FC  bc 4,2,0x25480810
+	  29: Jc02o       	$0x25480810
+
+
+. 0 254806F8 32
+. 93 A1 00 14 7C 9D 23 78 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 7D 88 03 A6 40 82 00 FC
+
+==== BB 507 (0x25480718) approx BBs exec'd 0 ====
+
+	0x25480718:  3CC00003  lis r6,3
+	   0: MOVL       	$0x30000, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2548071C:  83FE04F4  lwz r31,1268(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0x4F4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x25480720:  809E04C8  lwz r4,1224(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x4C8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25480724:  60C6FFFF  ori r6,r6,0xFFFF
+	  13: MOVL       	$0x3FFFF, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x25480728:  3CE07000  lis r7,28672
+	  16: MOVL       	$0x70000000, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x2548072C:  3980000F  li r12,15
+	  19: MOVL       	$0xF, t14
+	  20: PUTL       	t14, R12
+	  21: INCEIPL       	$4
+
+	0x25480730:  81640000  lwz r11,0(r4)
+	  22: GETL       	R4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R11
+	  25: INCEIPL       	$4
+
+	0x25480734:  38840018  addi r4,r4,24
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x18, t20
+	  28: PUTL       	t20, R4
+	  29: INCEIPL       	$4
+
+	0x25480738:  2F8B0000  cmpi cr7,r11,0
+	  30: GETL       	R11, t22
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0x2548073C:  419E009C  bc 12,30,0x254807D8
+	  34: Js30o       	$0x254807D8
+
+
+. 0 25480718 40
+. 3C C0 00 03 83 FE 04 F4 80 9E 04 C8 60 C6 FF FF 3C E0 70 00 39 80 00 0F 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+
+==== BB 508 (0x25480740) approx BBs exec'd 0 ====
+
+	0x25480740:  813F0004  lwz r9,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25480744:  38A9FFFF  addi r5,r9,-1
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25480748:  7CA328F8  nor r3,r5,r5
+	   9: GETL       	R5, t6
+	  10: NOTL       	t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x2548074C:  800B01A4  lwz r0,420(r11)
+	  13: GETL       	R11, t8
+	  14: ADDL       	$0x1A4, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R0
+	  17: INCEIPL       	$4
+
+	0x25480750:  838B01A0  lwz r28,416(r11)
+	  18: GETL       	R11, t12
+	  19: ADDL       	$0x1A0, t12
+	  20: LDL       	(t12), t14
+	  21: PUTL       	t14, R28
+	  22: INCEIPL       	$4
+
+	0x25480754:  7C082B78  or r8,r0,r5
+	  23: GETL       	R0, t16
+	  24: GETL       	R5, t18
+	  25: ORL       	t18, t16
+	  26: PUTL       	t16, R8
+	  27: INCEIPL       	$4
+
+	0x25480758:  7C883840  cmpl cr1,r8,r7
+	  28: GETL       	R8, t20
+	  29: GETL       	R7, t22
+	  30: CMPUL       	t20, t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x1, CR
+	  32: INCEIPL       	$4
+
+	0x2548075C:  7F8A1838  and r10,r28,r3
+	  33: GETL       	R28, t26
+	  34: GETL       	R3, t28
+	  35: ANDL       	t26, t28
+	  36: PUTL       	t28, R10
+	  37: INCEIPL       	$4
+
+	0x25480760:  40840010  bc 4,4,0x25480770
+	  38: Jc04o       	$0x25480770
+
+
+. 0 25480740 36
+. 81 3F 00 04 38 A9 FF FF 7C A3 28 F8 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+
+==== BB 509 (0x25480764) approx BBs exec'd 0 ====
+
+	0x25480764:  838B0180  lwz r28,384(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25480768:  7789C000  andis. r9,r28,0xC000
+	   5: GETL       	R28, t4
+	   6: ANDL       	$0xC0000000, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2548076C:  4082000C  bc 4,2,0x25480778
+	  11: Jc02o       	$0x25480778
+
+
+. 0 25480764 12
+. 83 8B 01 80 77 89 C0 00 40 82 00 0C
+
+==== BB 510 (0x25480770) approx BBs exec'd 0 ====
+
+	0x25480770:  7F075040  cmpl cr6,r7,r10
+	   0: GETL       	R7, t0
+	   1: GETL       	R10, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25480774:  40980054  bc 4,24,0x254807C8
+	   5: Jc24o       	$0x254807C8
+
+
+. 0 25480770 8
+. 7F 07 50 40 40 98 00 54
+
+==== BB 511 (0x254807C8) approx BBs exec'd 0 ====
+
+	0x254807C8:  7D475378  or r7,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254807CC:  816B000C  lwz r11,12(r11)
+	   3: GETL       	R11, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x254807D0:  2C8B0000  cmpi cr1,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x254807D4:  4086FF78  bc 4,6,0x2548074C
+	  12: Jc06o       	$0x2548074C
+
+
+. 0 254807C8 16
+. 7D 47 53 78 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+
+==== BB 512 (0x2548074C) approx BBs exec'd 0 ====
+
+	0x2548074C:  800B01A4  lwz r0,420(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1A4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25480750:  838B01A0  lwz r28,416(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x1A0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25480754:  7C082B78  or r8,r0,r5
+	  10: GETL       	R0, t8
+	  11: GETL       	R5, t10
+	  12: ORL       	t10, t8
+	  13: PUTL       	t8, R8
+	  14: INCEIPL       	$4
+
+	0x25480758:  7C883840  cmpl cr1,r8,r7
+	  15: GETL       	R8, t12
+	  16: GETL       	R7, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x2548075C:  7F8A1838  and r10,r28,r3
+	  20: GETL       	R28, t18
+	  21: GETL       	R3, t20
+	  22: ANDL       	t18, t20
+	  23: PUTL       	t20, R10
+	  24: INCEIPL       	$4
+
+	0x25480760:  40840010  bc 4,4,0x25480770
+	  25: Jc04o       	$0x25480770
+
+
+. 0 2548074C 24
+. 80 0B 01 A4 83 8B 01 A0 7C 08 2B 78 7C 88 38 40 7F 8A 18 38 40 84 00 10
+
+==== BB 513 (0x25480778) approx BBs exec'd 0 ====
+
+	0x25480778:  7C064010  subfc r0,r6,r8
+	   0: GETL       	R6, t0
+	   1: GETL       	R8, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2548077C:  38000000  li r0,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25480780:  7C000114  adde r0,r0,r0
+	   8: GETL       	R0, t6
+	   9: GETL       	R0, t8
+	  10: ADCL       	t6, t8  (-rCa-wCa)
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x25480784:  7D2A3010  subfc r9,r10,r6
+	  13: GETL       	R10, t10
+	  14: GETL       	R6, t12
+	  15: SBBL       	t10, t12  (-wCa)
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x25480788:  39200000  li r9,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x2548078C:  7D294914  adde r9,r9,r9
+	  21: GETL       	R9, t16
+	  22: GETL       	R9, t18
+	  23: ADCL       	t16, t18  (-rCa-wCa)
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0x25480790:  7C1C4839  and. r28,r0,r9
+	  26: GETL       	R0, t20
+	  27: GETL       	R9, t22
+	  28: ANDL       	t20, t22
+	  29: PUTL       	t22, R28
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0x25480794:  40820094  bc 4,2,0x25480828
+	  33: Jc02o       	$0x25480828
+
+
+. 0 25480778 32
+. 7C 06 40 10 38 00 00 00 7C 00 01 14 7D 2A 30 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 40 82 00 94
+
+==== BB 514 (0x25480798) approx BBs exec'd 0 ====
+
+	0x25480798:  7C083810  subfc r0,r8,r7
+	   0: GETL       	R8, t0
+	   1: GETL       	R7, t2
+	   2: SBBL       	t0, t2  (-wCa)
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2548079C:  38000000  li r0,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x254807A0:  7C000114  adde r0,r0,r0
+	   8: GETL       	R0, t6
+	   9: GETL       	R0, t8
+	  10: ADCL       	t6, t8  (-rCa-wCa)
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x254807A4:  7D265010  subfc r9,r6,r10
+	  13: GETL       	R6, t10
+	  14: GETL       	R10, t12
+	  15: SBBL       	t10, t12  (-wCa)
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x254807A8:  39200000  li r9,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x254807AC:  7D294914  adde r9,r9,r9
+	  21: GETL       	R9, t16
+	  22: GETL       	R9, t18
+	  23: ADCL       	t16, t18  (-rCa-wCa)
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0x254807B0:  7C1C4839  and. r28,r0,r9
+	  26: GETL       	R0, t20
+	  27: GETL       	R9, t22
+	  28: ANDL       	t20, t22
+	  29: PUTL       	t22, R28
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0x254807B4:  41820018  bc 12,2,0x254807CC
+	  33: Js02o       	$0x254807CC
+
+
+. 0 25480798 32
+. 7C 08 38 10 38 00 00 00 7C 00 01 14 7D 26 50 10 39 20 00 00 7D 29 49 14 7C 1C 48 39 41 82 00 18
+
+==== BB 515 (0x254807CC) approx BBs exec'd 0 ====
+
+	0x254807CC:  816B000C  lwz r11,12(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254807D0:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254807D4:  4086FF78  bc 4,6,0x2548074C
+	   9: Jc06o       	$0x2548074C
+
+
+. 0 254807CC 12
+. 81 6B 00 0C 2C 8B 00 00 40 86 FF 78
+
+==== BB 516 (0x254807D8) approx BBs exec'd 0 ====
+
+	0x254807D8:  358CFFFF  addic. r12,r12,-1
+	   0: GETL       	R12, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R12
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x254807DC:  4080FF54  bc 4,0,0x25480730
+	   6: Jc00o       	$0x25480730
+
+
+. 0 254807D8 8
+. 35 8C FF FF 40 80 FF 54
+
+==== BB 517 (0x25480730) approx BBs exec'd 0 ====
+
+	0x25480730:  81640000  lwz r11,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25480734:  38840018  addi r4,r4,24
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25480738:  2F8B0000  cmpi cr7,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2548073C:  419E009C  bc 12,30,0x254807D8
+	  12: Js30o       	$0x254807D8
+
+
+. 0 25480730 16
+. 81 64 00 00 38 84 00 18 2F 8B 00 00 41 9E 00 9C
+
+==== BB 518 (0x254807E0) approx BBs exec'd 0 ====
+
+	0x254807E0:  3C67FFFF  addis r3,r7,-1
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0xFFFF0000, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x254807E4:  80FF0004  lwz r7,4(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x254807E8:  7F033040  cmpl cr6,r3,r6
+	   9: GETL       	R3, t6
+	  10: GETL       	R6, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x254807EC:  3887FFFF  addi r4,r7,-1
+	  14: GETL       	R7, t12
+	  15: ADDL       	$0xFFFFFFFF, t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0x254807F0:  7C85EB78  or r5,r4,r29
+	  18: GETL       	R4, t14
+	  19: GETL       	R29, t16
+	  20: ORL       	t16, t14
+	  21: PUTL       	t14, R5
+	  22: INCEIPL       	$4
+
+	0x254807F4:  38850001  addi r4,r5,1
+	  23: GETL       	R5, t18
+	  24: ADDL       	$0x1, t18
+	  25: PUTL       	t18, R4
+	  26: INCEIPL       	$4
+
+	0x254807F8:  40990014  bc 4,25,0x2548080C
+	  27: Jc25o       	$0x2548080C
+
+
+. 0 254807E0 28
+. 3C 67 FF FF 80 FF 00 04 7F 03 30 40 38 87 FF FF 7C 85 EB 78 38 85 00 01 40 99 00 14
+
+==== BB 519 (0x254807FC) approx BBs exec'd 0 ====
+
+	0x254807FC:  7D061850  subf r8,r6,r3
+	   0: GETL       	R6, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25480800:  7C641850  subf r3,r4,r3
+	   5: GETL       	R4, t4
+	   6: GETL       	R3, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25480804:  7F882040  cmpl cr7,r8,r4
+	  10: GETL       	R8, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x25480808:  409C0008  bc 4,28,0x25480810
+	  15: Jc28o       	$0x25480810
+
+
+. 0 254807FC 16
+. 7D 06 18 50 7C 64 18 50 7F 88 20 40 40 9C 00 08
+
+==== BB 520 (0x25480810) approx BBs exec'd 0 ====
+
+	0x25480810:  83810010  lwz r28,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25480814:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25480818:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x2548081C:  83E1001C  lwz r31,28(r1)
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R31
+	  19: INCEIPL       	$4
+
+	0x25480820:  38210020  addi r1,r1,32
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x20, t16
+	  22: PUTL       	t16, R1
+	  23: INCEIPL       	$4
+
+	0x25480824:  4E800020  blr
+	  24: GETL       	LR, t18
+	  25: JMPo-r       	t18  ($4)
+
+
+. 0 25480810 24
+. 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 521 (0x25475904) approx BBs exec'd 0 ====
+
+	0x25475904:  80B50014  lwz r5,20(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475908:  81150010  lwz r8,16(r21)
+	   5: GETL       	R21, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x2547590C:  7DC47378  or r4,r14,r14
+	  10: GETL       	R14, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25475910:  38C00802  li r6,2050
+	  13: MOVL       	$0x802, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x25475914:  7E679B78  or r7,r19,r19
+	  16: GETL       	R19, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x25475918:  4800CE81  bl 0x25482798
+	  19: MOVL       	$0x2547591C, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25482798  ($4)
+
+
+. 0 25475904 24
+. 80 B5 00 14 81 15 00 10 7D C4 73 78 38 C0 08 02 7E 67 9B 78 48 00 CE 81
+
+==9225== Reading syms from /home/sewardj/valgrind-2.2.0-ppc/Inst/lib/valgrind/vg_inject.so (0xFFDD000)
+==== BB 522 (0x2547591C) approx BBs exec'd 0 ====
+
+	0x2547591C:  2F03FFFF  cmpi cr6,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475920:  907A01A0  stw r3,416(r26)
+	   5: GETL       	R3, t6
+	   6: GETL       	R26, t8
+	   7: ADDL       	$0x1A0, t8
+	   8: STL       	t6, (t8)
+	   9: INCEIPL       	$4
+
+	0x25475924:  419A013C  bc 12,26,0x25475A60
+	  10: Js26o       	$0x25475A60
+
+
+. 0 2547591C 12
+. 2F 03 FF FF 90 7A 01 A0 41 9A 01 3C
+
+==== BB 523 (0x25475928) approx BBs exec'd 0 ====
+
+	0x25475928:  817F00A8  lwz r11,168(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xA8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547592C:  7CA37214  add r5,r3,r14
+	   5: GETL       	R3, t4
+	   6: GETL       	R14, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R5
+	   9: INCEIPL       	$4
+
+	0x25475930:  90BA01A4  stw r5,420(r26)
+	  10: GETL       	R5, t8
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x1A4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475934:  2F8B0000  cmpi cr7,r11,0
+	  15: GETL       	R11, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25475938:  80F50000  lwz r7,0(r21)
+	  19: GETL       	R21, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R7
+	  22: INCEIPL       	$4
+
+	0x2547593C:  7C071850  subf r0,r7,r3
+	  23: GETL       	R7, t20
+	  24: GETL       	R3, t22
+	  25: SUBL       	t20, t22
+	  26: PUTL       	t22, R0
+	  27: INCEIPL       	$4
+
+	0x25475940:  901A0000  stw r0,0(r26)
+	  28: GETL       	R0, t24
+	  29: GETL       	R26, t26
+	  30: STL       	t24, (t26)
+	  31: INCEIPL       	$4
+
+	0x25475944:  409E0534  bc 4,30,0x25475E78
+	  32: Jc30o       	$0x25475E78
+
+
+. 0 25475928 32
+. 81 7F 00 A8 7C A3 72 14 90 BA 01 A4 2F 8B 00 00 80 F5 00 00 7C 07 18 50 90 1A 00 00 40 9E 05 34
+
+==== BB 524 (0x25475E78) approx BBs exec'd 0 ====
+
+	0x25475E78:  80750004  lwz r3,4(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475E7C:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25475E80:  809CFFF4  lwz r4,-12(r28)
+	   8: GETL       	R28, t6
+	   9: ADDL       	$0xFFFFFFF4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25475E84:  7C832050  subf r4,r3,r4
+	  13: GETL       	R3, t10
+	  14: GETL       	R4, t12
+	  15: SUBL       	t10, t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0x25475E88:  7C601A14  add r3,r0,r3
+	  18: GETL       	R0, t14
+	  19: GETL       	R3, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0x25475E8C:  4800C92D  bl 0x254827B8
+	  23: MOVL       	$0x25475E90, t18
+	  24: PUTL       	t18, LR
+	  25: JMPo-c       	$0x254827B8  ($4)
+
+
+. 0 25475E78 24
+. 80 75 00 04 38 A0 00 00 80 9C FF F4 7C 83 20 50 7C 60 1A 14 48 00 C9 2D
+
+==== BB 525 mprotect(0x254827B8) approx BBs exec'd 0 ====
+
+	0x254827B8:  3800007D  li r0,125
+	   0: MOVL       	$0x7D, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254827BC:  44000002  sc
+	   3: JMPo-sys       	$0x254827C0  ($4)
+
+
+. 0 254827B8 8
+. 38 00 00 7D 44 00 00 02
+
+==== BB 526 (0x254827C0) approx BBs exec'd 0 ====
+
+	0x254827C0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 254827C0 4
+. 4C A3 00 20
+
+==== BB 527 (0x25475E90) approx BBs exec'd 0 ====
+
+	0x25475E90:  4BFFFAB8  b 0x25475948
+	   0: JMPo       	$0x25475948  ($4)
+
+
+. 0 25475E90 4
+. 4B FF FA B8
+
+==== BB 528 (0x25475948) approx BBs exec'd 0 ====
+
+	0x25475948:  80DD0014  lwz r6,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547594C:  70C90004  andi. r9,r6,0x4
+	   5: GETL       	R6, t4
+	   6: ANDL       	$0x4, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475950:  41820014  bc 12,2,0x25475964
+	  11: Js02o       	$0x25475964
+
+
+. 0 25475948 12
+. 80 DD 00 14 70 C9 00 04 41 82 00 14
+
+==== BB 529 (0x25475954) approx BBs exec'd 0 ====
+
+	0x25475954:  839D0004  lwz r28,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25475958:  833A0000  lwz r25,0(r26)
+	   5: GETL       	R26, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x2547595C:  7D19E214  add r8,r25,r28
+	   9: GETL       	R25, t8
+	  10: GETL       	R28, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R8
+	  13: INCEIPL       	$4
+
+	0x25475960:  911A01A8  stw r8,424(r26)
+	  14: GETL       	R8, t12
+	  15: GETL       	R26, t14
+	  16: ADDL       	$0x1A8, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25475964:  819A014C  lwz r12,332(r26)
+	  19: GETL       	R26, t16
+	  20: ADDL       	$0x14C, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R12
+	  23: INCEIPL       	$4
+
+	0x25475968:  2F8C0000  cmpi cr7,r12,0
+	  24: GETL       	R12, t20
+	  25: CMP0L       	t20, t22  (-rSo)
+	  26: ICRFL       	t22, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547596C:  409E0038  bc 4,30,0x254759A4
+	  28: Jc30o       	$0x254759A4
+
+
+. 0 25475954 28
+. 83 9D 00 04 83 3A 00 00 7D 19 E2 14 91 1A 01 A8 81 9A 01 4C 2F 8C 00 00 40 9E 00 38
+
+==== BB 530 (0x25475970) approx BBs exec'd 0 ====
+
+	0x25475970:  815D0010  lwz r10,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475974:  8178001C  lwz r11,28(r24)
+	   5: GETL       	R24, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25475978:  7C8A5840  cmpl cr1,r10,r11
+	  10: GETL       	R10, t8
+	  11: GETL       	R11, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547597C:  41850028  bc 12,5,0x254759A4
+	  15: Js05o       	$0x254759A4
+
+
+. 0 25475970 16
+. 81 5D 00 10 81 78 00 1C 7C 8A 58 40 41 85 00 28
+
+==== BB 531 (0x25475980) approx BBs exec'd 0 ====
+
+	0x25475980:  801D0004  lwz r0,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475984:  811D0000  lwz r8,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0x25475988:  A138002C  lhz r9,44(r24)
+	   9: GETL       	R24, t8
+	  10: ADDL       	$0x2C, t8
+	  11: LDW       	(t8), t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547598C:  7C880050  subf r4,r8,r0
+	  14: GETL       	R8, t12
+	  15: GETL       	R0, t14
+	  16: SUBL       	t12, t14
+	  17: PUTL       	t14, R4
+	  18: INCEIPL       	$4
+
+	0x25475990:  55232834  rlwinm r3,r9,5,0,26
+	  19: GETL       	R9, t16
+	  20: SHLL       	$0x5, t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0x25475994:  7CA45214  add r5,r4,r10
+	  23: GETL       	R4, t18
+	  24: GETL       	R10, t20
+	  25: ADDL       	t18, t20
+	  26: PUTL       	t20, R5
+	  27: INCEIPL       	$4
+
+	0x25475998:  7CEB1A14  add r7,r11,r3
+	  28: GETL       	R11, t22
+	  29: GETL       	R3, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R7
+	  32: INCEIPL       	$4
+
+	0x2547599C:  7F053840  cmpl cr6,r5,r7
+	  33: GETL       	R5, t26
+	  34: GETL       	R7, t28
+	  35: CMPUL       	t26, t28, t30  (-rSo)
+	  36: ICRFL       	t30, $0x6, CR
+	  37: INCEIPL       	$4
+
+	0x254759A0:  4098069C  bc 4,24,0x2547603C
+	  38: Jc24o       	$0x2547603C
+
+
+. 0 25475980 36
+. 80 1D 00 04 81 1D 00 00 A1 38 00 2C 7C 88 00 50 55 23 28 34 7C A4 52 14 7C EB 1A 14 7F 05 38 40 40 98 06 9C
+
+==== BB 532 (0x2547603C) approx BBs exec'd 0 ====
+
+	0x2547603C:  7EE85A14  add r23,r8,r11
+	   0: GETL       	R8, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25476040:  7D6AB850  subf r11,r10,r23
+	   5: GETL       	R10, t4
+	   6: GETL       	R23, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25476044:  917A014C  stw r11,332(r26)
+	  10: GETL       	R11, t8
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x14C, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25476048:  4BFFF95C  b 0x254759A4
+	  15: JMPo       	$0x254759A4  ($4)
+
+
+. 0 2547603C 16
+. 7E E8 5A 14 7D 6A B8 50 91 7A 01 4C 4B FF F9 5C
+
+==== BB 533 (0x254759A4) approx BBs exec'd 0 ====
+
+	0x254759A4:  809D000C  lwz r4,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254759A8:  807D0008  lwz r3,8(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x254759AC:  7C041840  cmpl cr0,r4,r3
+	  10: GETL       	R4, t8
+	  11: GETL       	R3, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x254759B0:  40810068  bc 4,1,0x25475A18
+	  15: Jc01o       	$0x25475A18
+
+
+. 0 254759A4 16
+. 80 9D 00 0C 80 7D 00 08 7C 04 18 40 40 81 00 68
+
+==== BB 534 (0x25475A18) approx BBs exec'd 0 ====
+
+	0x25475A18:  3BBD0018  addi r29,r29,24
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x25475A1C:  7F357A14  add r25,r21,r15
+	   4: GETL       	R21, t2
+	   5: GETL       	R15, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R25
+	   8: INCEIPL       	$4
+
+	0x25475A20:  7F99E840  cmpl cr7,r25,r29
+	   9: GETL       	R25, t6
+	  10: GETL       	R29, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25475A24:  409D01F8  bc 4,29,0x25475C1C
+	  14: Jc29o       	$0x25475C1C
+
+
+. 0 25475A18 16
+. 3B BD 00 18 7F 35 7A 14 7F 99 E8 40 40 9D 01 F8
+
+==== BB 535 (0x25475A28) approx BBs exec'd 0 ====
+
+	0x25475A28:  809D0004  lwz r4,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25475A2C:  801D0000  lwz r0,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x25475A30:  7C840040  cmpl cr1,r4,r0
+	   9: GETL       	R4, t8
+	  10: GETL       	R0, t10
+	  11: CMPUL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25475A34:  4085FF14  bc 4,5,0x25475948
+	  14: Jc05o       	$0x25475948
+
+
+. 0 25475A28 16
+. 80 9D 00 04 80 1D 00 00 7C 84 00 40 40 85 FF 14
+
+==== BB 536 (0x25475A38) approx BBs exec'd 0 ====
+
+	0x25475A38:  815A0000  lwz r10,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x25475A3C:  7C802050  subf r4,r0,r4
+	   4: GETL       	R0, t4
+	   5: GETL       	R4, t6
+	   6: SUBL       	t4, t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25475A40:  80BD0014  lwz r5,20(r29)
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0x14, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R5
+	  13: INCEIPL       	$4
+
+	0x25475A44:  38C00812  li r6,2066
+	  14: MOVL       	$0x812, t12
+	  15: PUTL       	t12, R6
+	  16: INCEIPL       	$4
+
+	0x25475A48:  811D0010  lwz r8,16(r29)
+	  17: GETL       	R29, t14
+	  18: ADDL       	$0x10, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R8
+	  21: INCEIPL       	$4
+
+	0x25475A4C:  7C6A0214  add r3,r10,r0
+	  22: GETL       	R10, t18
+	  23: GETL       	R0, t20
+	  24: ADDL       	t18, t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0x25475A50:  7E679B78  or r7,r19,r19
+	  27: GETL       	R19, t22
+	  28: PUTL       	t22, R7
+	  29: INCEIPL       	$4
+
+	0x25475A54:  4800CD45  bl 0x25482798
+	  30: MOVL       	$0x25475A58, t24
+	  31: PUTL       	t24, LR
+	  32: JMPo-c       	$0x25482798  ($4)
+
+
+. 0 25475A38 32
+. 81 5A 00 00 7C 80 20 50 80 BD 00 14 38 C0 08 12 81 1D 00 10 7C 6A 02 14 7E 67 9B 78 48 00 CD 45
+
+==== BB 537 (0x25475A58) approx BBs exec'd 0 ====
+
+	0x25475A58:  2F03FFFF  cmpi cr6,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475A5C:  409AFEEC  bc 4,26,0x25475948
+	   5: Jc26o       	$0x25475948
+
+
+. 0 25475A58 8
+. 2F 03 FF FF 40 9A FE EC
+
+==== BB 538 (0x254759B4) approx BBs exec'd 0 ====
+
+	0x254759B4:  80DA0000  lwz r6,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x254759B8:  81710004  lwz r11,4(r17)
+	   4: GETL       	R17, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x254759BC:  7F261A14  add r25,r6,r3
+	   9: GETL       	R6, t8
+	  10: GETL       	R3, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R25
+	  13: INCEIPL       	$4
+
+	0x254759C0:  7EE62214  add r23,r6,r4
+	  14: GETL       	R6, t12
+	  15: GETL       	R4, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R23
+	  18: INCEIPL       	$4
+
+	0x254759C4:  7D0BCA14  add r8,r11,r25
+	  19: GETL       	R11, t16
+	  20: GETL       	R25, t18
+	  21: ADDL       	t16, t18
+	  22: PUTL       	t18, R8
+	  23: INCEIPL       	$4
+
+	0x254759C8:  7C6B00D0  neg r3,r11
+	  24: GETL       	R11, t20
+	  25: NEGL       	t20
+	  26: PUTL       	t20, R3
+	  27: INCEIPL       	$4
+
+	0x254759CC:  3948FFFF  addi r10,r8,-1
+	  28: GETL       	R8, t22
+	  29: ADDL       	$0xFFFFFFFF, t22
+	  30: PUTL       	t22, R10
+	  31: INCEIPL       	$4
+
+	0x254759D0:  7D5C1838  and r28,r10,r3
+	  32: GETL       	R10, t24
+	  33: GETL       	R3, t26
+	  34: ANDL       	t24, t26
+	  35: PUTL       	t26, R28
+	  36: INCEIPL       	$4
+
+	0x254759D4:  7F97E040  cmpl cr7,r23,r28
+	  37: GETL       	R23, t28
+	  38: GETL       	R28, t30
+	  39: CMPUL       	t28, t30, t32  (-rSo)
+	  40: ICRFL       	t32, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0x254759D8:  409C0008  bc 4,28,0x254759E0
+	  42: Jc28o       	$0x254759E0
+
+
+. 0 254759B4 40
+. 80 DA 00 00 81 71 00 04 7F 26 1A 14 7E E6 22 14 7D 0B CA 14 7C 6B 00 D0 39 48 FF FF 7D 5C 18 38 7F 97 E0 40 40 9C 00 08
+
+==== BB 539 (0x254759DC) approx BBs exec'd 0 ====
+
+	0x254759DC:  7EFCBB78  or r28,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x254759E0:  7C9CC840  cmpl cr1,r28,r25
+	   3: GETL       	R28, t2
+	   4: GETL       	R25, t4
+	   5: CMPUL       	t2, t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x254759E4:  4085002C  bc 4,5,0x25475A10
+	   8: Jc05o       	$0x25475A10
+
+
+. 0 254759DC 12
+. 7E FC BB 78 7C 9C C8 40 40 85 00 2C
+
+==== BB 540 (0x254759E8) approx BBs exec'd 0 ====
+
+	0x254759E8:  80BD0014  lwz r5,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254759EC:  70AA0002  andi. r10,r5,0x2
+	   5: GETL       	R5, t4
+	   6: ANDL       	$0x2, t4
+	   7: PUTL       	t4, R10
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x254759F0:  41820838  bc 12,2,0x25476228
+	  11: Js02o       	$0x25476228
+
+
+. 0 254759E8 12
+. 80 BD 00 14 70 AA 00 02 41 82 08 38
+
+==== BB 541 (0x254759F4) approx BBs exec'd 0 ====
+
+	0x254759F4:  7CB9E050  subf r5,r25,r28
+	   0: GETL       	R25, t0
+	   1: GETL       	R28, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254759F8:  7F23CB78  or r3,r25,r25
+	   5: GETL       	R25, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254759FC:  38800000  li r4,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x25475A00:  4800DD45  bl 0x25483744
+	  11: MOVL       	$0x25475A04, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483744  ($4)
+
+
+. 0 254759F4 16
+. 7C B9 E0 50 7F 23 CB 78 38 80 00 00 48 00 DD 45
+
+==== BB 542 memset(0x25483744) approx BBs exec'd 0 ====
+
+	0x25483744:  28850004  cmpli cr1,r5,4
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x4, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25483748:  70670003  andi. r7,r3,0x3
+	   5: GETL       	R3, t6
+	   6: ANDL       	$0x3, t6
+	   7: PUTL       	t6, R7
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2548374C:  7C661B78  or r6,r3,r3
+	  11: GETL       	R3, t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0x25483750:  40850190  bc 4,5,0x254838E0
+	  14: Jc05o       	$0x254838E0
+
+
+. 0 25483744 16
+. 28 85 00 04 70 67 00 03 7C 66 1B 78 40 85 01 90
+
+==== BB 543 (0x25483754) approx BBs exec'd 0 ====
+
+	0x25483754:  2A85001F  cmpli cr5,r5,31
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x1F, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x5, CR
+	   4: INCEIPL       	$4
+
+	0x25483758:  5084442E  rlwimi r4,r4,8,16,23
+	   5: GETL       	R4, t6
+	   6: GETL       	R4, t8
+	   7: ROLL       	$0x8, t8
+	   8: ANDL       	$0xFF00, t8
+	   9: ANDL       	$0xFFFF00FF, t6
+	  10: ORL       	t6, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2548375C:  41A20024  bc 13,2,0x25483780
+	  13: Js02o       	$0x25483780
+
+
+. 0 25483754 12
+. 2A 85 00 1F 50 84 44 2E 41 A2 00 24
+
+==== BB 544 (0x25483780) approx BBs exec'd 0 ====
+
+	0x25483780:  7CA01120  mtcrf 0x1,r5
+	   0: GETL       	R5, t0
+	   1: ICRFL       	t0, $0x7, CR
+	   2: INCEIPL       	$4
+
+	0x25483784:  5084801E  rlwimi r4,r4,16,0,15
+	   3: GETL       	R4, t2
+	   4: GETL       	R4, t4
+	   5: ROLL       	$0x10, t4
+	   6: ANDL       	$0xFFFF0000, t4
+	   7: ANDL       	$0xFFFF, t2
+	   8: ORL       	t2, t4
+	   9: PUTL       	t4, R4
+	  10: INCEIPL       	$4
+
+	0x25483788:  40950198  bc 4,21,0x25483920
+	  11: Jc21o       	$0x25483920
+
+
+. 0 25483780 12
+. 7C A0 11 20 50 84 80 1E 40 95 01 98
+
+==== BB 545 (0x2548378C) approx BBs exec'd 0 ====
+
+	0x2548378C:  70C7001C  andi. r7,r6,0x1C
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0x1C, t0
+	   2: PUTL       	t0, R7
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25483790:  20E70020  subfic r7,r7,32
+	   6: GETL       	R7, t4
+	   7: MOVL       	$0x20, t6
+	   8: SBBL       	t4, t6  (-wCa)
+	   9: PUTL       	t6, R7
+	  10: INCEIPL       	$4
+
+	0x25483794:  41820040  bc 12,2,0x254837D4
+	  11: Js02o       	$0x254837D4
+
+
+. 0 2548378C 12
+. 70 C7 00 1C 20 E7 00 20 41 82 00 40
+
+==== BB 546 (0x25483798) approx BBs exec'd 0 ====
+
+	0x25483798:  7CE01120  mtcrf 0x1,r7
+	   0: GETL       	R7, t0
+	   1: ICRFL       	t0, $0x7, CR
+	   2: INCEIPL       	$4
+
+	0x2548379C:  7CC63A14  add r6,r6,r7
+	   3: GETL       	R6, t2
+	   4: GETL       	R7, t4
+	   5: ADDL       	t2, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x254837A0:  7CA72850  subf r5,r7,r5
+	   8: GETL       	R7, t6
+	   9: GETL       	R5, t8
+	  10: SUBL       	t6, t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x254837A4:  28870010  cmpli cr1,r7,16
+	  13: GETL       	R7, t10
+	  14: MOVL       	$0x10, t14
+	  15: CMPUL       	t10, t14, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x254837A8:  7CC83378  or r8,r6,r6
+	  18: GETL       	R6, t16
+	  19: PUTL       	t16, R8
+	  20: INCEIPL       	$4
+
+	0x254837AC:  409C000C  bc 4,28,0x254837B8
+	  21: Jc28o       	$0x254837B8
+
+
+. 0 25483798 24
+. 7C E0 11 20 7C C6 3A 14 7C A7 28 50 28 87 00 10 7C C8 33 78 40 9C 00 0C
+
+==== BB 547 (0x254837B8) approx BBs exec'd 0 ====
+
+	0x254837B8:  41840014  bc 12,4,0x254837CC
+	   0: Js04o       	$0x254837CC
+
+
+. 0 254837B8 4
+. 41 84 00 14
+
+==== BB 548 (0x254837CC) approx BBs exec'd 0 ====
+
+	0x254837CC:  409D0008  bc 4,29,0x254837D4
+	   0: Jc29o       	$0x254837D4
+
+
+. 0 254837CC 4
+. 40 9D 00 08
+
+==== BB 549 (0x254837D0) approx BBs exec'd 0 ====
+
+	0x254837D0:  9088FFFC  stw r4,-4(r8)
+	   0: GETL       	R4, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254837D4:  28840000  cmpli cr1,r4,0
+	   5: GETL       	R4, t4
+	   6: MOVL       	$0x0, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x254837D8:  54A70035  rlwinm. r7,r5,0,0,26
+	  10: GETL       	R5, t10
+	  11: ANDL       	$0xFFFFFFE0, t10
+	  12: PUTL       	t10, R7
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x254837DC:  7CA01120  mtcrf 0x1,r5
+	  16: GETL       	R5, t14
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x254837E0:  418601A0  bc 12,6,0x25483980
+	  19: Js06o       	$0x25483980
+
+
+. 0 254837D0 20
+. 90 88 FF FC 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+
+==== BB 550 (0x25483980) approx BBs exec'd 0 ====
+
+	0x25483980:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25483984:  4182FF9C  bc 12,2,0x25483920
+	   3: Js02o       	$0x25483920
+
+
+. 0 25483980 8
+. 7C 08 02 A6 41 82 FF 9C
+
+==== BB 551 (0x25483988) approx BBs exec'd 0 ====
+
+	0x25483988:  48013679  bl 0x25497000
+	   0: MOVL       	$0x2548398C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25483988 4
+. 48 01 36 79
+
+==== BB 552 (0x2548398C) approx BBs exec'd 0 ====
+
+	0x2548398C:  7D2802A6  mflr r9
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25483990:  812904F0  lwz r9,1264(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0x4F0, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25483994:  81090000  lwz r8,0(r9)
+	   8: GETL       	R9, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0x25483998:  7C0803A6  mtlr r0
+	  12: GETL       	R0, t10
+	  13: PUTL       	t10, LR
+	  14: INCEIPL       	$4
+
+	0x2548399C:  28880000  cmpli cr1,r8,0
+	  15: GETL       	R8, t12
+	  16: MOVL       	$0x0, t16
+	  17: CMPUL       	t12, t16, t14  (-rSo)
+	  18: ICRFL       	t14, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x254839A0:  4186FE44  bc 12,6,0x254837E4
+	  20: Js06o       	$0x254837E4
+
+
+. 0 2548398C 24
+. 7D 28 02 A6 81 29 04 F0 81 09 00 00 7C 08 03 A6 28 88 00 00 41 86 FE 44
+
+==== BB 553 (0x254839A4) approx BBs exec'd 0 ====
+
+	0x254839A4:  28880020  cmpli cr1,r8,32
+	   0: GETL       	R8, t0
+	   1: MOVL       	$0x20, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254839A8:  4186FEBC  bc 12,6,0x25483864
+	   5: Js06o       	$0x25483864
+
+
+. 0 254839A4 8
+. 28 88 00 20 41 86 FE BC
+
+==== BB 554 (0x25483864) approx BBs exec'd 0 ====
+
+	0x25483864:  54A506FE  rlwinm r5,r5,0,27,31
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0x1F, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25483868:  7CE02120  mtcrf 0x2,r7
+	   4: GETL       	R7, t2
+	   5: ICRFL       	t2, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0x2548386C:  54E0C9FF  rlwinm. r0,r7,25,7,31
+	   7: GETL       	R7, t4
+	   8: SHRL       	$0x7, t4
+	   9: PUTL       	t4, R0
+	  10: CMP0L       	t4, t6  (-rSo)
+	  11: ICRFL       	t6, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0x25483870:  7C0903A6  mtctr r0
+	  13: GETL       	R0, t8
+	  14: PUTL       	t8, CTR
+	  15: INCEIPL       	$4
+
+	0x25483874:  38E00020  li r7,32
+	  16: MOVL       	$0x20, t10
+	  17: PUTL       	t10, R7
+	  18: INCEIPL       	$4
+
+	0x25483878:  3900FFC0  li r8,-64
+	  19: MOVL       	$0xFFFFFFC0, t12
+	  20: PUTL       	t12, R8
+	  21: INCEIPL       	$4
+
+	0x2548387C:  28850010  cmpli cr1,r5,16
+	  22: GETL       	R5, t14
+	  23: MOVL       	$0x10, t18
+	  24: CMPUL       	t14, t18, t16  (-rSo)
+	  25: ICRFL       	t16, $0x1, CR
+	  26: INCEIPL       	$4
+
+	0x25483880:  409A000C  bc 4,26,0x2548388C
+	  27: Jc26o       	$0x2548388C
+
+
+. 0 25483864 32
+. 54 A5 06 FE 7C E0 21 20 54 E0 C9 FF 7C 09 03 A6 38 E0 00 20 39 00 FF C0 28 85 00 10 40 9A 00 0C
+
+==== BB 555 (0x2548388C) approx BBs exec'd 0 ====
+
+	0x2548388C:  3920FFE0  li r9,-32
+	   0: MOVL       	$0xFFFFFFE0, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25483890:  40990010  bc 4,25,0x254838A0
+	   3: Jc25o       	$0x254838A0
+
+
+. 0 2548388C 8
+. 39 20 FF E0 40 99 00 10
+
+==== BB 556 (0x254838A0) approx BBs exec'd 0 ====
+
+	0x254838A0:  2A850000  cmpli cr5,r5,0
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x0, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x5, CR
+	   4: INCEIPL       	$4
+
+	0x254838A4:  4182007C  bc 12,2,0x25483920
+	   5: Js02o       	$0x25483920
+
+
+. 0 254838A0 8
+. 2A 85 00 00 41 82 00 7C
+
+==== BB 557 (0x254838A8) approx BBs exec'd 0 ====
+
+	0x254838A8:  7C0037EC  dcbz r0,r6
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0xFFFFFFE0, t0
+	   2: MOVL       	$0x0, t2
+	   3: STL       	t2, (t0)
+	   4: ADDL       	$0x4, t0
+	   5: STL       	t2, (t0)
+	   6: ADDL       	$0x4, t0
+	   7: STL       	t2, (t0)
+	   8: ADDL       	$0x4, t0
+	   9: STL       	t2, (t0)
+	  10: ADDL       	$0x4, t0
+	  11: STL       	t2, (t0)
+	  12: ADDL       	$0x4, t0
+	  13: STL       	t2, (t0)
+	  14: ADDL       	$0x4, t0
+	  15: STL       	t2, (t0)
+	  16: ADDL       	$0x4, t0
+	  17: STL       	t2, (t0)
+	  18: INCEIPL       	$4
+
+	0x254838AC:  7C0737EC  dcbz r7,r6
+	  19: GETL       	R6, t4
+	  20: GETL       	R7, t6
+	  21: ADDL       	t6, t4
+	  22: ANDL       	$0xFFFFFFE0, t4
+	  23: MOVL       	$0x0, t8
+	  24: STL       	t8, (t4)
+	  25: ADDL       	$0x4, t4
+	  26: STL       	t8, (t4)
+	  27: ADDL       	$0x4, t4
+	  28: STL       	t8, (t4)
+	  29: ADDL       	$0x4, t4
+	  30: STL       	t8, (t4)
+	  31: ADDL       	$0x4, t4
+	  32: STL       	t8, (t4)
+	  33: ADDL       	$0x4, t4
+	  34: STL       	t8, (t4)
+	  35: ADDL       	$0x4, t4
+	  36: STL       	t8, (t4)
+	  37: ADDL       	$0x4, t4
+	  38: STL       	t8, (t4)
+	  39: INCEIPL       	$4
+
+	0x254838B0:  38C60080  addi r6,r6,128
+	  40: GETL       	R6, t10
+	  41: ADDL       	$0x80, t10
+	  42: PUTL       	t10, R6
+	  43: INCEIPL       	$4
+
+	0x254838B4:  7C0837EC  dcbz r8,r6
+	  44: GETL       	R6, t12
+	  45: GETL       	R8, t14
+	  46: ADDL       	t14, t12
+	  47: ANDL       	$0xFFFFFFE0, t12
+	  48: MOVL       	$0x0, t16
+	  49: STL       	t16, (t12)
+	  50: ADDL       	$0x4, t12
+	  51: STL       	t16, (t12)
+	  52: ADDL       	$0x4, t12
+	  53: STL       	t16, (t12)
+	  54: ADDL       	$0x4, t12
+	  55: STL       	t16, (t12)
+	  56: ADDL       	$0x4, t12
+	  57: STL       	t16, (t12)
+	  58: ADDL       	$0x4, t12
+	  59: STL       	t16, (t12)
+	  60: ADDL       	$0x4, t12
+	  61: STL       	t16, (t12)
+	  62: ADDL       	$0x4, t12
+	  63: STL       	t16, (t12)
+	  64: INCEIPL       	$4
+
+	0x254838B8:  7C0937EC  dcbz r9,r6
+	  65: GETL       	R6, t18
+	  66: GETL       	R9, t20
+	  67: ADDL       	t20, t18
+	  68: ANDL       	$0xFFFFFFE0, t18
+	  69: MOVL       	$0x0, t22
+	  70: STL       	t22, (t18)
+	  71: ADDL       	$0x4, t18
+	  72: STL       	t22, (t18)
+	  73: ADDL       	$0x4, t18
+	  74: STL       	t22, (t18)
+	  75: ADDL       	$0x4, t18
+	  76: STL       	t22, (t18)
+	  77: ADDL       	$0x4, t18
+	  78: STL       	t22, (t18)
+	  79: ADDL       	$0x4, t18
+	  80: STL       	t22, (t18)
+	  81: ADDL       	$0x4, t18
+	  82: STL       	t22, (t18)
+	  83: ADDL       	$0x4, t18
+	  84: STL       	t22, (t18)
+	  85: INCEIPL       	$4
+
+	0x254838BC:  4200FFEC  bc 16,0,0x254838A8
+	  86: GETL       	CTR, t24
+	  87: ADDL       	$0xFFFFFFFF, t24
+	  88: PUTL       	t24, CTR
+	  89: JIFZL       	t24, $0x254838C0
+	  90: JMPo       	$0x254838A8  ($4)
+
+
+. 0 254838A8 24
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 80 7C 08 37 EC 7C 09 37 EC 42 00 FF EC
+
+==== BB 558 (0x254838C0) approx BBs exec'd 0 ====
+
+	0x254838C0:  4D960020  bclr 12,22
+	   0: GETL       	LR, t0
+	   1: Js22o-r       	t0
+
+
+. 0 254838C0 4
+. 4D 96 00 20
+
+==== BB 559 (0x254838C4) approx BBs exec'd 0 ====
+
+	0x254838C4:  48000060  b 0x25483924
+	   0: JMPo       	$0x25483924  ($4)
+
+
+. 0 254838C4 4
+. 48 00 00 60
+
+==== BB 560 (0x25483924) approx BBs exec'd 0 ====
+
+	0x25483924:  7CC62A14  add r6,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25483928:  419F0020  bc 12,31,0x25483948
+	   5: Js31o       	$0x25483948
+
+
+. 0 25483924 8
+. 7C C6 2A 14 41 9F 00 20
+
+==== BB 561 (0x2548392C) approx BBs exec'd 0 ====
+
+	0x2548392C:  419E0024  bc 12,30,0x25483950
+	   0: Js30o       	$0x25483950
+
+
+. 0 2548392C 4
+. 41 9E 00 24
+
+==== BB 562 (0x25483930) approx BBs exec'd 0 ====
+
+	0x25483930:  419D0028  bc 12,29,0x25483958
+	   0: Js29o       	$0x25483958
+
+
+. 0 25483930 4
+. 41 9D 00 28
+
+==== BB 563 (0x25483958) approx BBs exec'd 0 ====
+
+	0x25483958:  9486FFFC  stwu r4,-4(r6)
+	   0: GETL       	R4, t0
+	   1: GETL       	R6, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: PUTL       	t2, R6
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2548395C:  41840014  bc 12,4,0x25483970
+	   6: Js04o       	$0x25483970
+
+
+. 0 25483958 8
+. 94 86 FF FC 41 84 00 14
+
+==== BB 564 (0x25483970) approx BBs exec'd 0 ====
+
+	0x25483970:  4C9C0020  bclr 4,28
+	   0: GETL       	LR, t0
+	   1: Jc28o-r       	t0
+
+
+. 0 25483970 4
+. 4C 9C 00 20
+
+==== BB 565 (0x25475A04) approx BBs exec'd 0 ====
+
+	0x25475A04:  80BD0014  lwz r5,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475A08:  70A00002  andi. r0,r5,0x2
+	   5: GETL       	R5, t4
+	   6: ANDL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475A0C:  41820808  bc 12,2,0x25476214
+	  11: Js02o       	$0x25476214
+
+
+. 0 25475A04 12
+. 80 BD 00 14 70 A0 00 02 41 82 08 08
+
+==== BB 566 (0x25475A10) approx BBs exec'd 0 ====
+
+	0x25475A10:  7F97E040  cmpl cr7,r23,r28
+	   0: GETL       	R23, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25475A14:  419D06AC  bc 12,29,0x254760C0
+	   5: Js29o       	$0x254760C0
+
+
+. 0 25475A10 8
+. 7F 97 E0 40 41 9D 06 AC
+
+==== BB 567 (0x25475C1C) approx BBs exec'd 0 ====
+
+	0x25475C1C:  82A10000  lwz r21,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R21
+	   3: INCEIPL       	$4
+
+	0x25475C20:  7E419378  or r1,r18,r18
+	   4: GETL       	R18, t4
+	   5: PUTL       	t4, R1
+	   6: INCEIPL       	$4
+
+	0x25475C24:  92A10000  stw r21,0(r1)
+	   7: GETL       	R21, t6
+	   8: GETL       	R1, t8
+	   9: STL       	t6, (t8)
+	  10: INCEIPL       	$4
+
+	0x25475C28:  813A0008  lwz r9,8(r26)
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x8, t10
+	  13: LDL       	(t10), t12
+	  14: PUTL       	t12, R9
+	  15: INCEIPL       	$4
+
+	0x25475C2C:  2F090000  cmpi cr6,r9,0
+	  16: GETL       	R9, t14
+	  17: CMP0L       	t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0x25475C30:  409A0264  bc 4,26,0x25475E94
+	  20: Jc26o       	$0x25475E94
+
+
+. 0 25475C1C 24
+. 82 A1 00 00 7E 41 93 78 92 A1 00 00 81 3A 00 08 2F 09 00 00 40 9A 02 64
+
+==== BB 568 (0x25475E94) approx BBs exec'd 0 ====
+
+	0x25475E94:  825A0000  lwz r18,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R18
+	   3: INCEIPL       	$4
+
+	0x25475E98:  7D299214  add r9,r9,r18
+	   4: GETL       	R9, t4
+	   5: GETL       	R18, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x25475E9C:  913A0008  stw r9,8(r26)
+	   9: GETL       	R9, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	$0x8, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475EA0:  4BFFFD98  b 0x25475C38
+	  14: JMPo       	$0x25475C38  ($4)
+
+
+. 0 25475E94 16
+. 82 5A 00 00 7D 29 92 14 91 3A 00 08 4B FF FD 98
+
+==== BB 569 (0x25475C38) approx BBs exec'd 0 ====
+
+	0x25475C38:  2E090000  cmpi cr4,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25475C3C:  7D2A4B78  or r10,r9,r9
+	   4: GETL       	R9, t4
+	   5: PUTL       	t4, R10
+	   6: INCEIPL       	$4
+
+	0x25475C40:  41920264  bc 12,18,0x25475EA4
+	   7: Js18o       	$0x25475EA4
+
+
+. 0 25475C38 12
+. 2E 09 00 00 7D 2A 4B 78 41 92 02 64
+
+==== BB 570 (0x25475C44) approx BBs exec'd 0 ====
+
+	0x25475C44:  81690000  lwz r11,0(r9)
+	   0: GETL       	R9, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25475C48:  391A0020  addi r8,r26,32
+	   4: GETL       	R26, t4
+	   5: ADDL       	$0x20, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x25475C4C:  2C0B0000  cmpi cr0,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25475C50:  41820064  bc 12,2,0x25475CB4
+	  12: Js02o       	$0x25475CB4
+
+
+. 0 25475C44 16
+. 81 69 00 00 39 1A 00 20 2C 0B 00 00 41 82 00 64
+
+==== BB 571 (0x25475C54) approx BBs exec'd 0 ====
+
+	0x25475C54:  3C607000  lis r3,28672
+	   0: MOVL       	$0x70000000, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475C58:  3CE06FFF  lis r7,28671
+	   3: MOVL       	$0x6FFF0000, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x25475C5C:  3CA06FFF  lis r5,28671
+	   6: MOVL       	$0x6FFF0000, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x25475C60:  3F806FFF  lis r28,28671
+	   9: MOVL       	$0x6FFF0000, t6
+	  10: PUTL       	t6, R28
+	  11: INCEIPL       	$4
+
+	0x25475C64:  3DE06FFF  lis r15,28671
+	  12: MOVL       	$0x6FFF0000, t8
+	  13: PUTL       	t8, R15
+	  14: INCEIPL       	$4
+
+	0x25475C68:  3FA06FFF  lis r29,28671
+	  15: MOVL       	$0x6FFF0000, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x25475C6C:  60660021  ori r6,r3,0x21
+	  18: MOVL       	$0x70000021, t12
+	  19: PUTL       	t12, R6
+	  20: INCEIPL       	$4
+
+	0x25475C70:  60E7FFFF  ori r7,r7,0xFFFF
+	  21: MOVL       	$0x6FFFFFFF, t14
+	  22: PUTL       	t14, R7
+	  23: INCEIPL       	$4
+
+	0x25475C74:  60A5FDFF  ori r5,r5,0xFDFF
+	  24: MOVL       	$0x6FFFFDFF, t16
+	  25: PUTL       	t16, R5
+	  26: INCEIPL       	$4
+
+	0x25475C78:  6384FE34  ori r4,r28,0xFE34
+	  27: MOVL       	$0x6FFFFE34, t18
+	  28: PUTL       	t18, R4
+	  29: INCEIPL       	$4
+
+	0x25475C7C:  61E3FEFF  ori r3,r15,0xFEFF
+	  30: MOVL       	$0x6FFFFEFF, t20
+	  31: PUTL       	t20, R3
+	  32: INCEIPL       	$4
+
+	0x25475C80:  63BDFF40  ori r29,r29,0xFF40
+	  33: MOVL       	$0x6FFFFF40, t22
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x25475C84:  2F8B0021  cmpi cr7,r11,33
+	  36: GETL       	R11, t24
+	  37: MOVL       	$0x21, t28
+	  38: CMPL       	t24, t28, t26  (-rSo)
+	  39: ICRFL       	t26, $0x7, CR
+	  40: INCEIPL       	$4
+
+	0x25475C88:  5560103A  rlwinm r0,r11,2,0,29
+	  41: GETL       	R11, t30
+	  42: SHLL       	$0x2, t30
+	  43: PUTL       	t30, R0
+	  44: INCEIPL       	$4
+
+	0x25475C8C:  409D0018  bc 4,29,0x25475CA4
+	  45: Jc29o       	$0x25475CA4
+
+
+. 0 25475C54 60
+. 3C 60 70 00 3C E0 6F FF 3C A0 6F FF 3F 80 6F FF 3D E0 6F FF 3F A0 6F FF 60 66 00 21 60 E7 FF FF 60 A5 FD FF 63 84 FE 34 61 E3 FE FF 63 BD FF 40 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+
+==== BB 572 (0x25475CA4) approx BBs exec'd 0 ====
+
+	0x25475CA4:  7D48012E  stwx r10,r8,r0
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t2, t0
+	   3: GETL       	R10, t4
+	   4: STL       	t4, (t0)
+	   5: INCEIPL       	$4
+
+	0x25475CA8:  856A0008  lwzu r11,8(r10)
+	   6: GETL       	R10, t6
+	   7: ADDL       	$0x8, t6
+	   8: PUTL       	t6, R10
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25475CAC:  2F8B0000  cmpi cr7,r11,0
+	  12: GETL       	R11, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0x25475CB0:  409EFFD4  bc 4,30,0x25475C84
+	  16: Jc30o       	$0x25475C84
+
+
+. 0 25475CA4 16
+. 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+==== BB 573 (0x25475C84) approx BBs exec'd 0 ====
+
+	0x25475C84:  2F8B0021  cmpi cr7,r11,33
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x21, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25475C88:  5560103A  rlwinm r0,r11,2,0,29
+	   5: GETL       	R11, t6
+	   6: SHLL       	$0x2, t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x25475C8C:  409D0018  bc 4,29,0x25475CA4
+	   9: Jc29o       	$0x25475CA4
+
+
+. 0 25475C84 12
+. 2F 8B 00 21 55 60 10 3A 40 9D 00 18
+
+==== BB 574 (0x25475C90) approx BBs exec'd 0 ====
+
+	0x25475C90:  7D2B3850  subf r9,r11,r7
+	   0: GETL       	R11, t0
+	   1: GETL       	R7, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475C94:  2889000F  cmpli cr1,r9,15
+	   5: GETL       	R9, t4
+	   6: MOVL       	$0xF, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25475C98:  418503BC  bc 12,5,0x25476054
+	  10: Js05o       	$0x25476054
+
+
+. 0 25475C90 12
+. 7D 2B 38 50 28 89 00 0F 41 85 03 BC
+
+==== BB 575 (0x25475C9C) approx BBs exec'd 0 ====
+
+	0x25475C9C:  7C0B3050  subf r0,r11,r6
+	   0: GETL       	R11, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475CA0:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25475CA4:  7D48012E  stwx r10,r8,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x25475CA8:  856A0008  lwzu r11,8(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x8, t12
+	  17: PUTL       	t12, R10
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R11
+	  20: INCEIPL       	$4
+
+	0x25475CAC:  2F8B0000  cmpi cr7,r11,0
+	  21: GETL       	R11, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25475CB0:  409EFFD4  bc 4,30,0x25475C84
+	  25: Jc30o       	$0x25475C84
+
+
+. 0 25475C9C 24
+. 7C 0B 30 50 54 00 10 3A 7D 48 01 2E 85 6A 00 08 2F 8B 00 00 40 9E FF D4
+
+==== BB 576 (0x25475CB4) approx BBs exec'd 0 ====
+
+	0x25475CB4:  817A0000  lwz r11,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25475CB8:  2C8B0000  cmpi cr1,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x25475CBC:  418600AC  bc 12,6,0x25475D68
+	   8: Js06o       	$0x25475D68
+
+
+. 0 25475CB4 12
+. 81 7A 00 00 2C 8B 00 00 41 86 00 AC
+
+==== BB 577 (0x25475CC0) approx BBs exec'd 0 ====
+
+	0x25475CC0:  81280010  lwz r9,16(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475CC4:  2F090000  cmpi cr6,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475CC8:  419A0010  bc 12,26,0x25475CD8
+	   9: Js26o       	$0x25475CD8
+
+
+. 0 25475CC0 12
+. 81 28 00 10 2F 09 00 00 41 9A 00 10
+
+==== BB 578 (0x25475CCC) approx BBs exec'd 0 ====
+
+	0x25475CCC:  81490004  lwz r10,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25475CD0:  7C8A5A14  add r4,r10,r11
+	   5: GETL       	R10, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25475CD4:  90890004  stw r4,4(r9)
+	  10: GETL       	R4, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475CD8:  8128000C  lwz r9,12(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475CDC:  2E090000  cmpi cr4,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x4, CR
+	  23: INCEIPL       	$4
+
+	0x25475CE0:  41920010  bc 12,18,0x25475CF0
+	  24: Js18o       	$0x25475CF0
+
+
+. 0 25475CCC 24
+. 81 49 00 04 7C 8A 5A 14 90 89 00 04 81 28 00 0C 2E 09 00 00 41 92 00 10
+
+==== BB 579 (0x25475CE4) approx BBs exec'd 0 ====
+
+	0x25475CE4:  82490004  lwz r18,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x25475CE8:  7CD25A14  add r6,r18,r11
+	   5: GETL       	R18, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25475CEC:  90C90004  stw r6,4(r9)
+	  10: GETL       	R6, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475CF0:  81280014  lwz r9,20(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x14, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475CF4:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25475CF8:  41820010  bc 12,2,0x25475D08
+	  24: Js02o       	$0x25475D08
+
+
+. 0 25475CE4 24
+. 82 49 00 04 7C D2 5A 14 90 C9 00 04 81 28 00 14 2C 09 00 00 41 82 00 10
+
+==== BB 580 (0x25475CFC) approx BBs exec'd 0 ====
+
+	0x25475CFC:  81E90004  lwz r15,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x25475D00:  7FAF5A14  add r29,r15,r11
+	   5: GETL       	R15, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25475D04:  93A90004  stw r29,4(r9)
+	  10: GETL       	R29, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D08:  81280018  lwz r9,24(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x18, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D0C:  2F890000  cmpi cr7,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0x25475D10:  419E0010  bc 12,30,0x25475D20
+	  24: Js30o       	$0x25475D20
+
+
+. 0 25475CFC 24
+. 81 E9 00 04 7F AF 5A 14 93 A9 00 04 81 28 00 18 2F 89 00 00 41 9E 00 10
+
+==== BB 581 (0x25475D14) approx BBs exec'd 0 ====
+
+	0x25475D14:  80A90004  lwz r5,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475D18:  7F855A14  add r28,r5,r11
+	   5: GETL       	R5, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25475D1C:  93890004  stw r28,4(r9)
+	  10: GETL       	R28, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D20:  8128001C  lwz r9,28(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D24:  2C890000  cmpi cr1,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x1, CR
+	  23: INCEIPL       	$4
+
+	0x25475D28:  41860010  bc 12,6,0x25475D38
+	  24: Js06o       	$0x25475D38
+
+
+. 0 25475D14 24
+. 80 A9 00 04 7F 85 5A 14 93 89 00 04 81 28 00 1C 2C 89 00 00 41 86 00 10
+
+==== BB 582 (0x25475D2C) approx BBs exec'd 0 ====
+
+	0x25475D2C:  80E90004  lwz r7,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25475D30:  7C675A14  add r3,r7,r11
+	   5: GETL       	R7, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25475D34:  90690004  stw r3,4(r9)
+	  10: GETL       	R3, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D38:  8128005C  lwz r9,92(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x5C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D3C:  2F090000  cmpi cr6,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x6, CR
+	  23: INCEIPL       	$4
+
+	0x25475D40:  419A0010  bc 12,26,0x25475D50
+	  24: Js26o       	$0x25475D50
+
+
+. 0 25475D2C 24
+. 80 E9 00 04 7C 67 5A 14 90 69 00 04 81 28 00 5C 2F 09 00 00 41 9A 00 10
+
+==== BB 583 (0x25475D44) approx BBs exec'd 0 ====
+
+	0x25475D44:  80090004  lwz r0,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475D48:  7EE05A14  add r23,r0,r11
+	   5: GETL       	R0, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0x25475D4C:  92E90004  stw r23,4(r9)
+	  10: GETL       	R23, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D50:  812800C4  lwz r9,196(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0xC4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D54:  2E090000  cmpi cr4,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x4, CR
+	  23: INCEIPL       	$4
+
+	0x25475D58:  41920010  bc 12,18,0x25475D68
+	  24: Js18o       	$0x25475D68
+
+
+. 0 25475D44 24
+. 80 09 00 04 7E E0 5A 14 92 E9 00 04 81 28 00 C4 2E 09 00 00 41 92 00 10
+
+==== BB 584 (0x25475D5C) approx BBs exec'd 0 ====
+
+	0x25475D5C:  83290004  lwz r25,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25475D60:  7D995A14  add r12,r25,r11
+	   5: GETL       	R25, t4
+	   6: GETL       	R11, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25475D64:  91890004  stw r12,4(r9)
+	  10: GETL       	R12, t8
+	  11: GETL       	R9, t10
+	  12: ADDL       	$0x4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475D68:  81280078  lwz r9,120(r8)
+	  15: GETL       	R8, t12
+	  16: ADDL       	$0x78, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25475D6C:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25475D70:  41820030  bc 12,2,0x25475DA0
+	  24: Js02o       	$0x25475DA0
+
+
+. 0 25475D5C 24
+. 83 29 00 04 7D 99 5A 14 91 89 00 04 81 28 00 78 2C 09 00 00 41 82 00 30
+
+==== BB 585 (0x25475DA0) approx BBs exec'd 0 ====
+
+	0x25475DA0:  81280098  lwz r9,152(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x98, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475DA4:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25475DA8:  419E0354  bc 12,30,0x254760FC
+	   9: Js30o       	$0x254760FC
+
+
+. 0 25475DA0 12
+. 81 28 00 98 2F 89 00 00 41 9E 03 54
+
+==== BB 586 (0x25475DAC) approx BBs exec'd 0 ====
+
+	0x25475DAC:  81690004  lwz r11,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25475DB0:  71600001  andi. r0,r11,0x1
+	   5: GETL       	R11, t4
+	   6: ANDL       	$0x1, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475DB4:  917A01FC  stw r11,508(r26)
+	  11: GETL       	R11, t8
+	  12: GETL       	R26, t10
+	  13: ADDL       	$0x1FC, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25475DB8:  41820008  bc 12,2,0x25475DC0
+	  16: Js02o       	$0x25475DC0
+
+
+. 0 25475DAC 16
+. 81 69 00 04 71 60 00 01 91 7A 01 FC 41 82 00 08
+
+==== BB 587 (0x25475DC0) approx BBs exec'd 0 ====
+
+	0x25475DC0:  81280074  lwz r9,116(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475DC4:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25475DC8:  418600E0  bc 12,6,0x25475EA8
+	   9: Js06o       	$0x25475EA8
+
+
+. 0 25475DC0 12
+. 81 28 00 74 2C 89 00 00 41 86 00 E0
+
+==== BB 588 (0x25475EA8) approx BBs exec'd 0 ====
+
+	0x25475EA8:  71690040  andi. r9,r11,0x40
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x40, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475EAC:  408202A4  bc 4,2,0x25476150
+	   6: Jc02o       	$0x25476150
+
+
+. 0 25475EA8 8
+. 71 69 00 40 40 82 02 A4
+
+==== BB 589 (0x25475EB0) approx BBs exec'd 0 ====
+
+	0x25475EB0:  813A014C  lwz r9,332(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x14C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475EB4:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25475EB8:  419E01CC  bc 12,30,0x25476084
+	   9: Js30o       	$0x25476084
+
+
+. 0 25475EB0 12
+. 81 3A 01 4C 2F 89 00 00 41 9E 01 CC
+
+==== BB 590 (0x25475EBC) approx BBs exec'd 0 ====
+
+	0x25475EBC:  825A0000  lwz r18,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R18
+	   3: INCEIPL       	$4
+
+	0x25475EC0:  7CC99214  add r6,r9,r18
+	   4: GETL       	R9, t4
+	   5: GETL       	R18, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x25475EC4:  90DA014C  stw r6,332(r26)
+	   9: GETL       	R6, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	$0x14C, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475EC8:  80BE04C8  lwz r5,1224(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x4C8, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R5
+	  18: INCEIPL       	$4
+
+	0x25475ECC:  839F00A4  lwz r28,164(r31)
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0xA4, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R28
+	  23: INCEIPL       	$4
+
+	0x25475ED0:  81E50400  lwz r15,1024(r5)
+	  24: GETL       	R5, t20
+	  25: ADDL       	$0x400, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R15
+	  28: INCEIPL       	$4
+
+	0x25475ED4:  7F9D7878  andc r29,r28,r15
+	  29: GETL       	R28, t24
+	  30: GETL       	R15, t26
+	  31: NOTL       	t26
+	  32: ANDL       	t24, t26
+	  33: PUTL       	t26, R29
+	  34: INCEIPL       	$4
+
+	0x25475ED8:  73AB0001  andi. r11,r29,0x1
+	  35: GETL       	R29, t28
+	  36: ANDL       	$0x1, t28
+	  37: PUTL       	t28, R11
+	  38: CMP0L       	t28, t30  (-rSo)
+	  39: ICRFL       	t30, $0x0, CR
+	  40: INCEIPL       	$4
+
+	0x25475EDC:  408202B0  bc 4,2,0x2547618C
+	  41: Jc02o       	$0x2547618C
+
+
+. 0 25475EBC 36
+. 82 5A 00 00 7C C9 92 14 90 DA 01 4C 80 BE 04 C8 83 9F 00 A4 81 E5 04 00 7F 9D 78 78 73 AB 00 01 40 82 02 B0
+
+==== BB 591 (0x25475EE0) approx BBs exec'd 0 ====
+
+	0x25475EE0:  813A0218  lwz r9,536(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x218, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475EE4:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25475EE8:  419E0010  bc 12,30,0x25475EF8
+	   9: Js30o       	$0x25475EF8
+
+
+. 0 25475EE0 12
+. 81 3A 02 18 2F 89 00 00 41 9E 00 10
+
+==== BB 592 (0x25475EF8) approx BBs exec'd 0 ====
+
+	0x25475EF8:  7E639B78  or r3,r19,r19
+	   0: GETL       	R19, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475EFC:  4800C0F5  bl 0x25481FF0
+	   3: MOVL       	$0x25475F00, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25481FF0  ($4)
+
+
+. 0 25475EF8 8
+. 7E 63 9B 78 48 00 C0 F5
+
+==== BB 593 close(0x25481FF0) approx BBs exec'd 0 ====
+
+	0x25481FF0:  38000006  li r0,6
+	   0: MOVL       	$0x6, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481FF4:  44000002  sc
+	   3: JMPo-sys       	$0x25481FF8  ($4)
+
+
+. 0 25481FF0 8
+. 38 00 00 06 44 00 00 02
+
+==== BB 594 (0x25481FF8) approx BBs exec'd 0 ====
+
+	0x25481FF8:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 25481FF8 4
+. 4C A3 00 20
+
+==== BB 595 (0x25475F00) approx BBs exec'd 0 ====
+
+	0x25475F00:  811E01C8  lwz r8,456(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475F04:  2C830000  cmpi cr1,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x25475F08:  40A6FB68  bc 5,6,0x25475A70
+	   9: Jc06o       	$0x25475A70
+
+
+. 0 25475F00 12
+. 81 1E 01 C8 2C 83 00 00 40 A6 FB 68
+
+==== BB 596 (0x25475F0C) approx BBs exec'd 0 ====
+
+	0x25475F0C:  817A0180  lwz r11,384(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25475F10:  81FF009C  lwz r15,156(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x9C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R15
+	   9: INCEIPL       	$4
+
+	0x25475F14:  557D0002  rlwinm r29,r11,0,0,1
+	  10: GETL       	R11, t8
+	  11: ANDL       	$0xC0000000, t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0x25475F18:  69F30002  xori r19,r15,0x2
+	  14: GETL       	R15, t10
+	  15: XORL       	$0x2, t10
+	  16: PUTL       	t10, R19
+	  17: INCEIPL       	$4
+
+	0x25475F1C:  21330000  subfic r9,r19,0
+	  18: GETL       	R19, t12
+	  19: MOVL       	$0x0, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x25475F20:  7E699914  adde r19,r9,r19
+	  23: GETL       	R9, t16
+	  24: GETL       	R19, t18
+	  25: ADCL       	t16, t18  (-rCa-wCa)
+	  26: PUTL       	t18, R19
+	  27: INCEIPL       	$4
+
+	0x25475F24:  6FB24000  xoris r18,r29,0x4000
+	  28: GETL       	R29, t20
+	  29: XORL       	$0x40000000, t20
+	  30: PUTL       	t20, R18
+	  31: INCEIPL       	$4
+
+	0x25475F28:  21520000  subfic r10,r18,0
+	  32: GETL       	R18, t22
+	  33: MOVL       	$0x0, t24
+	  34: SBBL       	t22, t24  (-wCa)
+	  35: PUTL       	t24, R10
+	  36: INCEIPL       	$4
+
+	0x25475F2C:  7E4A9114  adde r18,r10,r18
+	  37: GETL       	R10, t26
+	  38: GETL       	R18, t28
+	  39: ADCL       	t26, t28  (-rCa-wCa)
+	  40: PUTL       	t28, R18
+	  41: INCEIPL       	$4
+
+	0x25475F30:  7E4A9839  and. r10,r18,r19
+	  42: GETL       	R18, t30
+	  43: GETL       	R19, t32
+	  44: ANDL       	t30, t32
+	  45: PUTL       	t32, R10
+	  46: CMP0L       	t32, t34  (-rSo)
+	  47: ICRFL       	t34, $0x0, CR
+	  48: INCEIPL       	$4
+
+	0x25475F34:  3A60FFFF  li r19,-1
+	  49: MOVL       	$0xFFFFFFFF, t36
+	  50: PUTL       	t36, R19
+	  51: INCEIPL       	$4
+
+	0x25475F38:  408200BC  bc 4,2,0x25475FF4
+	  52: Jc02o       	$0x25475FF4
+
+
+. 0 25475F0C 48
+. 81 7A 01 80 81 FF 00 9C 55 7D 00 02 69 F3 00 02 21 33 00 00 7E 69 99 14 6F B2 40 00 21 52 00 00 7E 4A 91 14 7E 4A 98 39 3A 60 FF FF 40 82 00 BC
+
+==== BB 597 (0x25475F3C) approx BBs exec'd 0 ====
+
+	0x25475F3C:  807A0150  lwz r3,336(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x150, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25475F40:  80FA0000  lwz r7,0(r26)
+	   5: GETL       	R26, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R7
+	   8: INCEIPL       	$4
+
+	0x25475F44:  7D633A14  add r11,r3,r7
+	   9: GETL       	R3, t8
+	  10: GETL       	R7, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x25475F48:  917A0150  stw r11,336(r26)
+	  14: GETL       	R11, t12
+	  15: GETL       	R26, t14
+	  16: ADDL       	$0x150, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25475F4C:  80B10000  lwz r5,0(r17)
+	  19: GETL       	R17, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R5
+	  22: INCEIPL       	$4
+
+	0x25475F50:  70A90040  andi. r9,r5,0x40
+	  23: GETL       	R5, t20
+	  24: ANDL       	$0x40, t20
+	  25: PUTL       	t20, R9
+	  26: CMP0L       	t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x25475F54:  40820278  bc 4,2,0x254761CC
+	  29: Jc02o       	$0x254761CC
+
+
+. 0 25475F3C 28
+. 80 7A 01 50 80 FA 00 00 7D 63 3A 14 91 7A 01 50 80 B1 00 00 70 A9 00 40 40 82 02 78
+
+==== BB 598 (0x25475F58) approx BBs exec'd 0 ====
+
+	0x25475F58:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475F5C:  48002611  bl 0x2547856C
+	   3: MOVL       	$0x25475F60, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547856C  ($4)
+
+
+. 0 25475F58 8
+. 7F 43 D3 78 48 00 26 11
+
+==== BB 599 (0x25475F60) approx BBs exec'd 0 ====
+
+	0x25475F60:  72C00008  andi. r0,r22,0x8
+	   0: GETL       	R22, t0
+	   1: ANDL       	$0x8, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475F64:  40820010  bc 4,2,0x25475F74
+	   6: Jc02o       	$0x25475F74
+
+
+. 0 25475F60 8
+. 72 C0 00 08 40 82 00 10
+
+==== BB 600 (0x25475F68) approx BBs exec'd 0 ====
+
+	0x25475F68:  81DA0060  lwz r14,96(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R14
+	   4: INCEIPL       	$4
+
+	0x25475F6C:  2F0E0000  cmpi cr6,r14,0
+	   5: GETL       	R14, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475F70:  409A02E0  bc 4,26,0x25476250
+	   9: Jc26o       	$0x25476250
+
+
+. 0 25475F68 12
+. 81 DA 00 60 2F 0E 00 00 40 9A 02 E0
+
+==== BB 601 (0x25475F74) approx BBs exec'd 0 ====
+
+	0x25475F74:  811A01FC  lwz r8,508(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475F78:  71090020  andi. r9,r8,0x20
+	   5: GETL       	R8, t4
+	   6: ANDL       	$0x20, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475F7C:  4182000C  bc 12,2,0x25475F88
+	  11: Js02o       	$0x25475F88
+
+
+. 0 25475F74 12
+. 81 1A 01 FC 71 09 00 20 41 82 00 0C
+
+==== BB 602 (0x25475F80) approx BBs exec'd 0 ====
+
+	0x25475F80:  813E04C8  lwz r9,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475F84:  934901A0  stw r26,416(r9)
+	   5: GETL       	R26, t4
+	   6: GETL       	R9, t6
+	   7: ADDL       	$0x1A0, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25475F88:  80910050  lwz r4,80(r17)
+	  10: GETL       	R17, t8
+	  11: ADDL       	$0x50, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x25475F8C:  C81F0028  lfd f0,40(r31)
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x28, t12
+	  17: FPU_RQ       	(t12), 0x0:0x0
+	  18: INCEIPL       	$4
+
+	0x25475F90:  2C840000  cmpi cr1,r4,0
+	  19: GETL       	R4, t14
+	  20: CMP0L       	t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x25475F94:  D81A01D0  stfd f0,464(r26)
+	  23: GETL       	R26, t18
+	  24: ADDL       	$0x1D0, t18
+	  25: FPU_WQ       	0x0:0x0, (t18)
+	  26: INCEIPL       	$4
+
+	0x25475F98:  C9BF0030  lfd f13,48(r31)
+	  27: GETL       	R31, t20
+	  28: ADDL       	$0x30, t20
+	  29: FPU_RQ       	(t20), 0x0:0xD
+	  30: INCEIPL       	$4
+
+	0x25475F9C:  D9BA01D8  stfd f13,472(r26)
+	  31: GETL       	R26, t22
+	  32: ADDL       	$0x1D8, t22
+	  33: FPU_WQ       	0x0:0xD, (t22)
+	  34: INCEIPL       	$4
+
+	0x25475FA0:  4186FEB0  bc 12,6,0x25475E50
+	  35: Js06o       	$0x25475E50
+
+
+. 0 25475F80 36
+. 81 3E 04 C8 93 49 01 A0 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+
+==== BB 603 (0x25475E50) approx BBs exec'd 0 ====
+
+	0x25475E50:  7F43D378  or r3,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25475E54:  4BFFF850  b 0x254756A4
+	   3: JMPo       	$0x254756A4  ($4)
+
+
+. 0 25475E50 8
+. 7F 43 D3 78 4B FF F8 50
+
+==== BB 604 (0x254756A4) approx BBs exec'd 0 ====
+
+	0x254756A4:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x254756A8:  83450004  lwz r26,4(r5)
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R26
+	   8: INCEIPL       	$4
+
+	0x254756AC:  8185FFB4  lwz r12,-76(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x254756B0:  7F4803A6  mtlr r26
+	  14: GETL       	R26, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x254756B4:  81C5FFB8  lwz r14,-72(r5)
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0x254756B8:  81E5FFBC  lwz r15,-68(r5)
+	  22: GETL       	R5, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0x254756BC:  7D808120  mtcrf 0x8,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x254756C0:  8205FFC0  lwz r16,-64(r5)
+	  30: GETL       	R5, t24
+	  31: ADDL       	$0xFFFFFFC0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R16
+	  34: INCEIPL       	$4
+
+	0x254756C4:  8225FFC4  lwz r17,-60(r5)
+	  35: GETL       	R5, t28
+	  36: ADDL       	$0xFFFFFFC4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R17
+	  39: INCEIPL       	$4
+
+	0x254756C8:  8245FFC8  lwz r18,-56(r5)
+	  40: GETL       	R5, t32
+	  41: ADDL       	$0xFFFFFFC8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R18
+	  44: INCEIPL       	$4
+
+	0x254756CC:  8265FFCC  lwz r19,-52(r5)
+	  45: GETL       	R5, t36
+	  46: ADDL       	$0xFFFFFFCC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R19
+	  49: INCEIPL       	$4
+
+	0x254756D0:  8285FFD0  lwz r20,-48(r5)
+	  50: GETL       	R5, t40
+	  51: ADDL       	$0xFFFFFFD0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R20
+	  54: INCEIPL       	$4
+
+	0x254756D4:  82A5FFD4  lwz r21,-44(r5)
+	  55: GETL       	R5, t44
+	  56: ADDL       	$0xFFFFFFD4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R21
+	  59: INCEIPL       	$4
+
+	0x254756D8:  82C5FFD8  lwz r22,-40(r5)
+	  60: GETL       	R5, t48
+	  61: ADDL       	$0xFFFFFFD8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R22
+	  64: INCEIPL       	$4
+
+	0x254756DC:  82E5FFDC  lwz r23,-36(r5)
+	  65: GETL       	R5, t52
+	  66: ADDL       	$0xFFFFFFDC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R23
+	  69: INCEIPL       	$4
+
+	0x254756E0:  8305FFE0  lwz r24,-32(r5)
+	  70: GETL       	R5, t56
+	  71: ADDL       	$0xFFFFFFE0, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R24
+	  74: INCEIPL       	$4
+
+	0x254756E4:  8325FFE4  lwz r25,-28(r5)
+	  75: GETL       	R5, t60
+	  76: ADDL       	$0xFFFFFFE4, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R25
+	  79: INCEIPL       	$4
+
+	0x254756E8:  8345FFE8  lwz r26,-24(r5)
+	  80: GETL       	R5, t64
+	  81: ADDL       	$0xFFFFFFE8, t64
+	  82: LDL       	(t64), t66
+	  83: PUTL       	t66, R26
+	  84: INCEIPL       	$4
+
+	0x254756EC:  8365FFEC  lwz r27,-20(r5)
+	  85: GETL       	R5, t68
+	  86: ADDL       	$0xFFFFFFEC, t68
+	  87: LDL       	(t68), t70
+	  88: PUTL       	t70, R27
+	  89: INCEIPL       	$4
+
+	0x254756F0:  8385FFF0  lwz r28,-16(r5)
+	  90: GETL       	R5, t72
+	  91: ADDL       	$0xFFFFFFF0, t72
+	  92: LDL       	(t72), t74
+	  93: PUTL       	t74, R28
+	  94: INCEIPL       	$4
+
+	0x254756F4:  83A5FFF4  lwz r29,-12(r5)
+	  95: GETL       	R5, t76
+	  96: ADDL       	$0xFFFFFFF4, t76
+	  97: LDL       	(t76), t78
+	  98: PUTL       	t78, R29
+	  99: INCEIPL       	$4
+
+	0x254756F8:  83C5FFF8  lwz r30,-8(r5)
+	 100: GETL       	R5, t80
+	 101: ADDL       	$0xFFFFFFF8, t80
+	 102: LDL       	(t80), t82
+	 103: PUTL       	t82, R30
+	 104: INCEIPL       	$4
+
+	0x254756FC:  83E5FFFC  lwz r31,-4(r5)
+	 105: GETL       	R5, t84
+	 106: ADDL       	$0xFFFFFFFC, t84
+	 107: LDL       	(t84), t86
+	 108: PUTL       	t86, R31
+	 109: INCEIPL       	$4
+
+	0x25475700:  7CA12B78  or r1,r5,r5
+	 110: GETL       	R5, t88
+	 111: PUTL       	t88, R1
+	 112: INCEIPL       	$4
+
+	0x25475704:  4E800020  blr
+	 113: GETL       	LR, t90
+	 114: JMPo-r       	t90  ($4)
+
+
+. 0 254756A4 100
+. 80 A1 00 00 83 45 00 04 81 85 FF B4 7F 48 03 A6 81 C5 FF B8 81 E5 FF BC 7D 80 81 20 82 05 FF C0 82 25 FF C4 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+==== BB 605 (0x25477054) approx BBs exec'd 0 ====
+
+	0x25477054:  4BFFFF04  b 0x25476F58
+	   0: JMPo       	$0x25476F58  ($4)
+
+
+. 0 25477054 4
+. 4B FF FF 04
+
+==== BB 606 (0x25476F58) approx BBs exec'd 0 ====
+
+	0x25476F58:  83E10294  lwz r31,660(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x294, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25476F5C:  81810244  lwz r12,580(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x244, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25476F60:  7FE803A6  mtlr r31
+	  10: GETL       	R31, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x25476F64:  81C10248  lwz r14,584(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x248, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R14
+	  17: INCEIPL       	$4
+
+	0x25476F68:  81E1024C  lwz r15,588(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x24C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R15
+	  22: INCEIPL       	$4
+
+	0x25476F6C:  7D818120  mtcrf 0x18,r12
+	  23: GETL       	R12, t18
+	  24: ICRFL       	t18, $0x3, CR
+	  25: ICRFL       	t18, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0x25476F70:  82010250  lwz r16,592(r1)
+	  27: GETL       	R1, t20
+	  28: ADDL       	$0x250, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R16
+	  31: INCEIPL       	$4
+
+	0x25476F74:  82210254  lwz r17,596(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x254, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R17
+	  36: INCEIPL       	$4
+
+	0x25476F78:  82410258  lwz r18,600(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x258, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R18
+	  41: INCEIPL       	$4
+
+	0x25476F7C:  8261025C  lwz r19,604(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x25C, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R19
+	  46: INCEIPL       	$4
+
+	0x25476F80:  82810260  lwz r20,608(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x260, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R20
+	  51: INCEIPL       	$4
+
+	0x25476F84:  82A10264  lwz r21,612(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x264, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R21
+	  56: INCEIPL       	$4
+
+	0x25476F88:  82C10268  lwz r22,616(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x268, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R22
+	  61: INCEIPL       	$4
+
+	0x25476F8C:  82E1026C  lwz r23,620(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x26C, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R23
+	  66: INCEIPL       	$4
+
+	0x25476F90:  83010270  lwz r24,624(r1)
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x270, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R24
+	  71: INCEIPL       	$4
+
+	0x25476F94:  83210274  lwz r25,628(r1)
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x274, t56
+	  74: LDL       	(t56), t58
+	  75: PUTL       	t58, R25
+	  76: INCEIPL       	$4
+
+	0x25476F98:  83410278  lwz r26,632(r1)
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x278, t60
+	  79: LDL       	(t60), t62
+	  80: PUTL       	t62, R26
+	  81: INCEIPL       	$4
+
+	0x25476F9C:  8361027C  lwz r27,636(r1)
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x27C, t64
+	  84: LDL       	(t64), t66
+	  85: PUTL       	t66, R27
+	  86: INCEIPL       	$4
+
+	0x25476FA0:  83810280  lwz r28,640(r1)
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x280, t68
+	  89: LDL       	(t68), t70
+	  90: PUTL       	t70, R28
+	  91: INCEIPL       	$4
+
+	0x25476FA4:  83A10284  lwz r29,644(r1)
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x284, t72
+	  94: LDL       	(t72), t74
+	  95: PUTL       	t74, R29
+	  96: INCEIPL       	$4
+
+	0x25476FA8:  83C10288  lwz r30,648(r1)
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x288, t76
+	  99: LDL       	(t76), t78
+	 100: PUTL       	t78, R30
+	 101: INCEIPL       	$4
+
+	0x25476FAC:  83E1028C  lwz r31,652(r1)
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x28C, t80
+	 104: LDL       	(t80), t82
+	 105: PUTL       	t82, R31
+	 106: INCEIPL       	$4
+
+	0x25476FB0:  38210290  addi r1,r1,656
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x290, t84
+	 109: PUTL       	t84, R1
+	 110: INCEIPL       	$4
+
+	0x25476FB4:  4E800020  blr
+	 111: GETL       	LR, t86
+	 112: JMPo-r       	t86  ($4)
+
+
+. 0 25476F58 96
+. 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+==== BB 607 (0x254734A8) approx BBs exec'd 0 ====
+
+	0x254734A8:  809E00D0  lwz r4,208(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xD0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254734AC:  82E30178  lwz r23,376(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x178, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0x254734B0:  3A770001  addi r19,r23,1
+	  10: GETL       	R23, t8
+	  11: ADDL       	$0x1, t8
+	  12: PUTL       	t8, R19
+	  13: INCEIPL       	$4
+
+	0x254734B4:  92630178  stw r19,376(r3)
+	  14: GETL       	R19, t10
+	  15: GETL       	R3, t12
+	  16: ADDL       	$0x178, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x254734B8:  387F0044  addi r3,r31,68
+	  19: GETL       	R31, t14
+	  20: ADDL       	$0x44, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0x254734BC:  6A7D0001  xori r29,r19,0x1
+	  23: GETL       	R19, t16
+	  24: XORL       	$0x1, t16
+	  25: PUTL       	t16, R29
+	  26: INCEIPL       	$4
+
+	0x254734C0:  23BD0000  subfic r29,r29,0
+	  27: GETL       	R29, t18
+	  28: MOVL       	$0x0, t20
+	  29: SBBL       	t18, t20  (-wCa)
+	  30: PUTL       	t20, R29
+	  31: INCEIPL       	$4
+
+	0x254734C4:  7FB90194  addze r29,r25
+	  32: GETL       	R25, t22
+	  33: ADCL       	$0x0, t22  (-rCa-wCa)
+	  34: PUTL       	t22, R29
+	  35: INCEIPL       	$4
+
+	0x254734C8:  4800CE75  bl 0x2548033C
+	  36: MOVL       	$0x254734CC, t24
+	  37: PUTL       	t24, LR
+	  38: JMPo-c       	$0x2548033C  ($4)
+
+
+. 0 254734A8 36
+. 80 9E 00 D0 82 E3 01 78 3A 77 00 01 92 63 01 78 38 7F 00 44 6A 7D 00 01 23 BD 00 00 7F B9 01 94 48 00 CE 75
+
+==== BB 608 (0x254734CC) approx BBs exec'd 0 ====
+
+	0x254734CC:  7FB9EB78  or r25,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x254734D0:  7C7D1B79  or. r29,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R29
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x254734D4:  4082FF9C  bc 4,2,0x25473470
+	   8: Jc02o       	$0x25473470
+
+
+. 0 254734CC 12
+. 7F B9 EB 78 7C 7D 1B 79 40 82 FF 9C
+
+==== BB 609 (0x254734D8) approx BBs exec'd 0 ====
+
+	0x254734D8:  807E003C  lwz r3,60(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x3C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254734DC:  38800004  li r4,4
+	   5: MOVL       	$0x4, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254734E0:  4800EB51  bl 0x25482030
+	   8: MOVL       	$0x254734E4, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25482030  ($4)
+
+
+. 0 254734D8 12
+. 80 7E 00 3C 38 80 00 04 48 00 EB 51
+
+==== BB 610 access(0x25482030) approx BBs exec'd 0 ====
+
+	0x25482030:  38000021  li r0,33
+	   0: MOVL       	$0x21, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25482034:  44000002  sc
+	   3: JMPo-sys       	$0x25482038  ($4)
+
+
+. 0 25482030 8
+. 38 00 00 21 44 00 00 02
+
+==== BB 611 (0x25482038) approx BBs exec'd 0 ====
+
+	0x25482038:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 25482038 4
+. 4C A3 00 20
+
+==== BB 612 (0x2548203C) approx BBs exec'd 0 ====
+
+	0x2548203C:  4BFFF2F4  b 0x25481330
+	   0: JMPo       	$0x25481330  ($4)
+
+
+. 0 2548203C 4
+. 4B FF F2 F4
+
+==== BB 613 __syscall_error(0x25481330) approx BBs exec'd 0 ====
+
+	0x25481330:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25481334:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25481338:  48015CC9  bl 0x25497000
+	   9: MOVL       	$0x2548133C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25481330 12
+. 94 21 FF F0 7D 88 02 A6 48 01 5C C9
+
+==== BB 614 (0x2548133C) approx BBs exec'd 0 ====
+
+	0x2548133C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25481340:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25481344:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x25481348:  813E0514  lwz r9,1300(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x514, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0x2548134C:  83C10008  lwz r30,8(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x8, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0x25481350:  38210010  addi r1,r1,16
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: PUTL       	t16, R1
+	  24: INCEIPL       	$4
+
+	0x25481354:  90690000  stw r3,0(r9)
+	  25: GETL       	R3, t18
+	  26: GETL       	R9, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25481358:  3860FFFF  li r3,-1
+	  29: MOVL       	$0xFFFFFFFF, t22
+	  30: PUTL       	t22, R3
+	  31: INCEIPL       	$4
+
+	0x2548135C:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+. 0 2548133C 36
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 81 3E 05 14 83 C1 00 08 38 21 00 10 90 69 00 00 38 60 FF FF 4E 80 00 20
+
+==== BB 615 (0x254734E4) approx BBs exec'd 0 ====
+
+	0x254734E4:  2E030000  cmpi cr4,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x254734E8:  4092F2D0  bc 4,18,0x254727B8
+	   4: Jc18o       	$0x254727B8
+
+
+. 0 254734E4 8
+. 2E 03 00 00 40 92 F2 D0
+
+==== BB 616 (0x254727B8) approx BBs exec'd 0 ====
+
+	0x254727B8:  2C190000  cmpi cr0,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x254727BC:  408213F0  bc 4,2,0x25473BAC
+	   4: Jc02o       	$0x25473BAC
+
+
+. 0 254727B8 8
+. 2C 19 00 00 40 82 13 F0
+
+==== BB 617 (0x25473BAC) approx BBs exec'd 0 ====
+
+	0x25473BAC:  5736103A  rlwinm r22,r25,2,0,29
+	   0: GETL       	R25, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R22
+	   3: INCEIPL       	$4
+
+	0x25473BB0:  83A10000  lwz r29,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25473BB4:  38D6001E  addi r6,r22,30
+	   8: GETL       	R22, t6
+	   9: ADDL       	$0x1E, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x25473BB8:  39600000  li r11,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25473BBC:  54C00036  rlwinm r0,r6,0,0,27
+	  15: GETL       	R6, t10
+	  16: ANDL       	$0xFFFFFFF0, t10
+	  17: PUTL       	t10, R0
+	  18: INCEIPL       	$4
+
+	0x25473BC0:  7F4000D0  neg r26,r0
+	  19: GETL       	R0, t12
+	  20: NEGL       	t12
+	  21: PUTL       	t12, R26
+	  22: INCEIPL       	$4
+
+	0x25473BC4:  7FA1D16E  stwux r29,r1,r26
+	  23: GETL       	R26, t14
+	  24: GETL       	R1, t16
+	  25: ADDL       	t16, t14
+	  26: PUTL       	t14, R1
+	  27: GETL       	R29, t18
+	  28: STL       	t18, (t14)
+	  29: INCEIPL       	$4
+
+	0x25473BC8:  3B610017  addi r27,r1,23
+	  30: GETL       	R1, t20
+	  31: ADDL       	$0x17, t20
+	  32: PUTL       	t20, R27
+	  33: INCEIPL       	$4
+
+	0x25473BCC:  812E01C4  lwz r9,452(r14)
+	  34: GETL       	R14, t22
+	  35: ADDL       	$0x1C4, t22
+	  36: LDL       	(t22), t24
+	  37: PUTL       	t24, R9
+	  38: INCEIPL       	$4
+
+	0x25473BD0:  577A0036  rlwinm r26,r27,0,0,27
+	  39: GETL       	R27, t26
+	  40: ANDL       	$0xFFFFFFF0, t26
+	  41: PUTL       	t26, R26
+	  42: INCEIPL       	$4
+
+	0x25473BD4:  556C103A  rlwinm r12,r11,2,0,29
+	  43: GETL       	R11, t28
+	  44: SHLL       	$0x2, t28
+	  45: PUTL       	t28, R12
+	  46: INCEIPL       	$4
+
+	0x25473BD8:  396B0001  addi r11,r11,1
+	  47: MOVL       	$0x1, t30
+	  48: PUTL       	t30, R11
+	  49: INCEIPL       	$4
+
+	0x25473BDC:  7D2CD12E  stwx r9,r12,r26
+	  50: GETL       	R26, t32
+	  51: GETL       	R12, t34
+	  52: ADDL       	t34, t32
+	  53: GETL       	R9, t36
+	  54: STL       	t36, (t32)
+	  55: INCEIPL       	$4
+
+	0x25473BE0:  8129000C  lwz r9,12(r9)
+	  56: GETL       	R9, t38
+	  57: ADDL       	$0xC, t38
+	  58: LDL       	(t38), t40
+	  59: PUTL       	t40, R9
+	  60: INCEIPL       	$4
+
+	0x25473BE4:  2F890000  cmpi cr7,r9,0
+	  61: GETL       	R9, t42
+	  62: CMP0L       	t42, t44  (-rSo)
+	  63: ICRFL       	t44, $0x7, CR
+	  64: INCEIPL       	$4
+
+	0x25473BE8:  409EFFEC  bc 4,30,0x25473BD4
+	  65: Jc30o       	$0x25473BD4
+
+
+. 0 25473BAC 64
+. 57 36 10 3A 83 A1 00 00 38 D6 00 1E 39 60 00 00 54 C0 00 36 7F 40 00 D0 7F A1 D1 6E 3B 61 00 17 81 2E 01 C4 57 7A 00 36 55 6C 10 3A 39 6B 00 01 7D 2C D1 2E 81 29 00 0C 2F 89 00 00 40 9E FF EC
+
+==== BB 618 (0x25473BEC) approx BBs exec'd 0 ====
+
+	0x25473BEC:  4BFFEBD4  b 0x254727C0
+	   0: JMPo       	$0x254727C0  ($4)
+
+
+. 0 25473BEC 4
+. 4B FF EB D4
+
+==== BB 619 (0x254727C0) approx BBs exec'd 0 ====
+
+	0x254727C0:  817F0030  lwz r11,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254727C4:  38E00000  li r7,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x254727C8:  7F44D378  or r4,r26,r26
+	   8: GETL       	R26, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x254727CC:  7F25CB78  or r5,r25,r25
+	  11: GETL       	R25, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x254727D0:  69660003  xori r6,r11,0x3
+	  14: GETL       	R11, t10
+	  15: XORL       	$0x3, t10
+	  16: PUTL       	t10, R6
+	  17: INCEIPL       	$4
+
+	0x254727D4:  20060000  subfic r0,r6,0
+	  18: GETL       	R6, t12
+	  19: MOVL       	$0x0, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R0
+	  22: INCEIPL       	$4
+
+	0x254727D8:  7CC03114  adde r6,r0,r6
+	  23: GETL       	R0, t16
+	  24: GETL       	R6, t18
+	  25: ADCL       	t16, t18  (-rCa-wCa)
+	  26: PUTL       	t18, R6
+	  27: INCEIPL       	$4
+
+	0x254727DC:  7E83A378  or r3,r20,r20
+	  28: GETL       	R20, t20
+	  29: PUTL       	t20, R3
+	  30: INCEIPL       	$4
+
+	0x254727E0:  480079E5  bl 0x2547A1C4
+	  31: MOVL       	$0x254727E4, t22
+	  32: PUTL       	t22, LR
+	  33: JMPo-c       	$0x2547A1C4  ($4)
+
+
+. 0 254727C0 36
+. 81 7F 00 30 38 E0 00 00 7F 44 D3 78 7F 25 CB 78 69 66 00 03 20 06 00 00 7C C0 31 14 7E 83 A3 78 48 00 79 E5
+
+==== BB 620 _dl_map_object_deps(0x2547A1C4) approx BBs exec'd 0 ====
+
+	0x2547A1C4:  9421FF60  stwu r1,-160(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF60, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547A1C8:  7D2802A6  mflr r9
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547A1CC:  91E1005C  stw r15,92(r1)
+	   9: GETL       	R15, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x5C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547A1D0:  1DE5000C  mulli r15,r5,12
+	  14: GETL       	R5, t10
+	  15: MULL       	$0xC, t10
+	  16: PUTL       	t10, R15
+	  17: INCEIPL       	$4
+
+	0x2547A1D4:  91C10058  stw r14,88(r1)
+	  18: GETL       	R14, t12
+	  19: GETL       	R1, t14
+	  20: ADDL       	$0x58, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x2547A1D8:  4801CE29  bl 0x25497000
+	  23: MOVL       	$0x2547A1DC, t16
+	  24: PUTL       	t16, LR
+	  25: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547A1C4 24
+. 94 21 FF 60 7D 28 02 A6 91 E1 00 5C 1D E5 00 0C 91 C1 00 58 48 01 CE 29
+
+==== BB 621 (0x2547A1DC) approx BBs exec'd 0 ====
+
+	0x2547A1DC:  39CF0036  addi r14,r15,54
+	   0: GETL       	R15, t0
+	   1: ADDL       	$0x36, t0
+	   2: PUTL       	t0, R14
+	   3: INCEIPL       	$4
+
+	0x2547A1E0:  81410000  lwz r10,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0x2547A1E4:  55CC0036  rlwinm r12,r14,0,0,27
+	   8: GETL       	R14, t6
+	   9: ANDL       	$0xFFFFFFF0, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x2547A1E8:  93A10094  stw r29,148(r1)
+	  12: GETL       	R29, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x94, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x2547A1EC:  7D0C00D0  neg r8,r12
+	  17: GETL       	R12, t12
+	  18: NEGL       	t12
+	  19: PUTL       	t12, R8
+	  20: INCEIPL       	$4
+
+	0x2547A1F0:  3BA00000  li r29,0
+	  21: MOVL       	$0x0, t14
+	  22: PUTL       	t14, R29
+	  23: INCEIPL       	$4
+
+	0x2547A1F4:  912100A4  stw r9,164(r1)
+	  24: GETL       	R9, t16
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0xA4, t18
+	  27: STL       	t16, (t18)
+	  28: INCEIPL       	$4
+
+	0x2547A1F8:  7F9D2840  cmpl cr7,r29,r5
+	  29: GETL       	R29, t20
+	  30: GETL       	R5, t22
+	  31: CMPUL       	t20, t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0x2547A1FC:  92E1007C  stw r23,124(r1)
+	  34: GETL       	R23, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x7C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x2547A200:  7C771B78  or r23,r3,r3
+	  39: GETL       	R3, t30
+	  40: PUTL       	t30, R23
+	  41: INCEIPL       	$4
+
+	0x2547A204:  93C10098  stw r30,152(r1)
+	  42: GETL       	R30, t32
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x98, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0x2547A208:  38000000  li r0,0
+	  47: MOVL       	$0x0, t36
+	  48: PUTL       	t36, R0
+	  49: INCEIPL       	$4
+
+	0x2547A20C:  93E1009C  stw r31,156(r1)
+	  50: GETL       	R31, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x9C, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0x2547A210:  7C3F0B78  or r31,r1,r1
+	  55: GETL       	R1, t42
+	  56: PUTL       	t42, R31
+	  57: INCEIPL       	$4
+
+	0x2547A214:  92010060  stw r16,96(r1)
+	  58: GETL       	R16, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x60, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547A218:  7FC802A6  mflr r30
+	  63: GETL       	LR, t48
+	  64: PUTL       	t48, R30
+	  65: INCEIPL       	$4
+
+	0x2547A21C:  92210064  stw r17,100(r1)
+	  66: GETL       	R17, t50
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x64, t52
+	  69: STL       	t50, (t52)
+	  70: INCEIPL       	$4
+
+	0x2547A220:  7CCF3378  or r15,r6,r6
+	  71: GETL       	R6, t54
+	  72: PUTL       	t54, R15
+	  73: INCEIPL       	$4
+
+	0x2547A224:  92410068  stw r18,104(r1)
+	  74: GETL       	R18, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x68, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0x2547A228:  7CEE3B78  or r14,r7,r7
+	  79: GETL       	R7, t60
+	  80: PUTL       	t60, R14
+	  81: INCEIPL       	$4
+
+	0x2547A22C:  9261006C  stw r19,108(r1)
+	  82: GETL       	R19, t62
+	  83: GETL       	R1, t64
+	  84: ADDL       	$0x6C, t64
+	  85: STL       	t62, (t64)
+	  86: INCEIPL       	$4
+
+	0x2547A230:  92810070  stw r20,112(r1)
+	  87: GETL       	R20, t66
+	  88: GETL       	R1, t68
+	  89: ADDL       	$0x70, t68
+	  90: STL       	t66, (t68)
+	  91: INCEIPL       	$4
+
+	0x2547A234:  92A10074  stw r21,116(r1)
+	  92: GETL       	R21, t70
+	  93: GETL       	R1, t72
+	  94: ADDL       	$0x74, t72
+	  95: STL       	t70, (t72)
+	  96: INCEIPL       	$4
+
+	0x2547A238:  92C10078  stw r22,120(r1)
+	  97: GETL       	R22, t74
+	  98: GETL       	R1, t76
+	  99: ADDL       	$0x78, t76
+	 100: STL       	t74, (t76)
+	 101: INCEIPL       	$4
+
+	0x2547A23C:  93010080  stw r24,128(r1)
+	 102: GETL       	R24, t78
+	 103: GETL       	R1, t80
+	 104: ADDL       	$0x80, t80
+	 105: STL       	t78, (t80)
+	 106: INCEIPL       	$4
+
+	0x2547A240:  93210084  stw r25,132(r1)
+	 107: GETL       	R25, t82
+	 108: GETL       	R1, t84
+	 109: ADDL       	$0x84, t84
+	 110: STL       	t82, (t84)
+	 111: INCEIPL       	$4
+
+	0x2547A244:  93410088  stw r26,136(r1)
+	 112: GETL       	R26, t86
+	 113: GETL       	R1, t88
+	 114: ADDL       	$0x88, t88
+	 115: STL       	t86, (t88)
+	 116: INCEIPL       	$4
+
+	0x2547A248:  9361008C  stw r27,140(r1)
+	 117: GETL       	R27, t90
+	 118: GETL       	R1, t92
+	 119: ADDL       	$0x8C, t92
+	 120: STL       	t90, (t92)
+	 121: INCEIPL       	$4
+
+	0x2547A24C:  93810090  stw r28,144(r1)
+	 122: GETL       	R28, t94
+	 123: GETL       	R1, t96
+	 124: ADDL       	$0x90, t96
+	 125: STL       	t94, (t96)
+	 126: INCEIPL       	$4
+
+	0x2547A250:  7D41416E  stwux r10,r1,r8
+	 127: GETL       	R8, t98
+	 128: GETL       	R1, t100
+	 129: ADDL       	t100, t98
+	 130: PUTL       	t98, R1
+	 131: GETL       	R10, t102
+	 132: STL       	t102, (t98)
+	 133: INCEIPL       	$4
+
+	0x2547A254:  39000001  li r8,1
+	 134: MOVL       	$0x1, t104
+	 135: PUTL       	t104, R8
+	 136: INCEIPL       	$4
+
+	0x2547A258:  81230180  lwz r9,384(r3)
+	 137: GETL       	R3, t106
+	 138: ADDL       	$0x180, t106
+	 139: LDL       	(t106), t108
+	 140: PUTL       	t108, R9
+	 141: INCEIPL       	$4
+
+	0x2547A25C:  38610017  addi r3,r1,23
+	 142: GETL       	R1, t110
+	 143: ADDL       	$0x17, t110
+	 144: PUTL       	t110, R3
+	 145: INCEIPL       	$4
+
+	0x2547A260:  546A0036  rlwinm r10,r3,0,0,27
+	 146: GETL       	R3, t112
+	 147: ANDL       	$0xFFFFFFF0, t112
+	 148: PUTL       	t112, R10
+	 149: INCEIPL       	$4
+
+	0x2547A264:  5109C94C  rlwimi r9,r8,25,5,6
+	 150: GETL       	R9, t114
+	 151: GETL       	R8, t116
+	 152: ROLL       	$0x19, t116
+	 153: ANDL       	$0x6000000, t116
+	 154: ANDL       	$0xF9FFFFFF, t114
+	 155: ORL       	t114, t116
+	 156: PUTL       	t116, R9
+	 157: INCEIPL       	$4
+
+	0x2547A268:  396A000C  addi r11,r10,12
+	 158: GETL       	R10, t118
+	 159: ADDL       	$0xC, t118
+	 160: PUTL       	t118, R11
+	 161: INCEIPL       	$4
+
+	0x2547A26C:  900A0000  stw r0,0(r10)
+	 162: GETL       	R0, t120
+	 163: GETL       	R10, t122
+	 164: STL       	t120, (t122)
+	 165: INCEIPL       	$4
+
+	0x2547A270:  916A0008  stw r11,8(r10)
+	 166: GETL       	R11, t124
+	 167: GETL       	R10, t126
+	 168: ADDL       	$0x8, t126
+	 169: STL       	t124, (t126)
+	 170: INCEIPL       	$4
+
+	0x2547A274:  91370180  stw r9,384(r23)
+	 171: GETL       	R9, t128
+	 172: GETL       	R23, t130
+	 173: ADDL       	$0x180, t130
+	 174: STL       	t128, (t130)
+	 175: INCEIPL       	$4
+
+	0x2547A278:  915F0008  stw r10,8(r31)
+	 176: GETL       	R10, t132
+	 177: GETL       	R31, t134
+	 178: ADDL       	$0x8, t134
+	 179: STL       	t132, (t134)
+	 180: INCEIPL       	$4
+
+	0x2547A27C:  92EA0004  stw r23,4(r10)
+	 181: GETL       	R23, t136
+	 182: GETL       	R10, t138
+	 183: ADDL       	$0x4, t138
+	 184: STL       	t136, (t138)
+	 185: INCEIPL       	$4
+
+	0x2547A280:  911F000C  stw r8,12(r31)
+	 186: GETL       	R8, t140
+	 187: GETL       	R31, t142
+	 188: ADDL       	$0xC, t142
+	 189: STL       	t140, (t142)
+	 190: INCEIPL       	$4
+
+	0x2547A284:  409C005C  bc 4,28,0x2547A2E0
+	 191: Jc28o       	$0x2547A2E0
+
+
+. 0 2547A1DC 172
+. 39 CF 00 36 81 41 00 00 55 CC 00 36 93 A1 00 94 7D 0C 00 D0 3B A0 00 00 91 21 00 A4 7F 9D 28 40 92 E1 00 7C 7C 77 1B 78 93 C1 00 98 38 00 00 00 93 E1 00 9C 7C 3F 0B 78 92 01 00 60 7F C8 02 A6 92 21 00 64 7C CF 33 78 92 41 00 68 7C EE 3B 78 92 61 00 6C 92 81 00 70 92 A1 00 74 92 C1 00 78 93 01 00 80 93 21 00 84 93 41 00 88 93 61 00 8C 93 81 00 90 7D 41 41 6E 39 00 00 01 81 23 01 80 38 61 00 17 54 6A 00 36 51 09 C9 4C 39 6A 00 0C 90 0A 00 00 91 6A 00 08 91 37 01 80 91 5F 00 08 92 EA 00 04 91 1F 00 0C 40 9C 00 5C
+
+==== BB 622 (0x2547A288) approx BBs exec'd 0 ====
+
+	0x2547A288:  7CA903A6  mtctr r5
+	   0: GETL       	R5, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x2547A28C:  7D475378  or r7,r10,r10
+	   3: GETL       	R10, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x2547A290:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A294:  38C00001  li r6,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x2547A298:  48000008  b 0x2547A2A0
+	  12: JMPo       	$0x2547A2A0  ($4)
+
+
+. 0 2547A288 20
+. 7C A9 03 A6 7D 47 53 78 38 A0 00 00 38 C0 00 01 48 00 00 08
+
+==== BB 623 (0x2547A2A0) approx BBs exec'd 0 ====
+
+	0x2547A2A0:  1E68000C  mulli r19,r8,12
+	   0: GETL       	R8, t0
+	   1: MULL       	$0xC, t0
+	   2: PUTL       	t0, R19
+	   3: INCEIPL       	$4
+
+	0x2547A2A4:  57B5103A  rlwinm r21,r29,2,0,29
+	   4: GETL       	R29, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R21
+	   7: INCEIPL       	$4
+
+	0x2547A2A8:  7E15202E  lwzx r16,r21,r4
+	   8: GETL       	R4, t4
+	   9: GETL       	R21, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R16
+	  13: INCEIPL       	$4
+
+	0x2547A2AC:  39280001  addi r9,r8,1
+	  14: GETL       	R8, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x2547A2B0:  7E533A14  add r18,r19,r7
+	  18: GETL       	R19, t12
+	  19: GETL       	R7, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R18
+	  22: INCEIPL       	$4
+
+	0x2547A2B4:  7CB3392E  stwx r5,r19,r7
+	  23: GETL       	R7, t16
+	  24: GETL       	R19, t18
+	  25: ADDL       	t18, t16
+	  26: GETL       	R5, t20
+	  27: STL       	t20, (t16)
+	  28: INCEIPL       	$4
+
+	0x2547A2B8:  3A92000C  addi r20,r18,12
+	  29: GETL       	R18, t22
+	  30: ADDL       	$0xC, t22
+	  31: PUTL       	t22, R20
+	  32: INCEIPL       	$4
+
+	0x2547A2BC:  92120004  stw r16,4(r18)
+	  33: GETL       	R16, t24
+	  34: GETL       	R18, t26
+	  35: ADDL       	$0x4, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x2547A2C0:  92920008  stw r20,8(r18)
+	  38: GETL       	R20, t28
+	  39: GETL       	R18, t30
+	  40: ADDL       	$0x8, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0x2547A2C4:  3BBD0001  addi r29,r29,1
+	  43: GETL       	R29, t32
+	  44: ADDL       	$0x1, t32
+	  45: PUTL       	t32, R29
+	  46: INCEIPL       	$4
+
+	0x2547A2C8:  82300180  lwz r17,384(r16)
+	  47: GETL       	R16, t34
+	  48: ADDL       	$0x180, t34
+	  49: LDL       	(t34), t36
+	  50: PUTL       	t36, R17
+	  51: INCEIPL       	$4
+
+	0x2547A2CC:  913F000C  stw r9,12(r31)
+	  52: GETL       	R9, t38
+	  53: GETL       	R31, t40
+	  54: ADDL       	$0xC, t40
+	  55: STL       	t38, (t40)
+	  56: INCEIPL       	$4
+
+	0x2547A2D0:  50D1C94C  rlwimi r17,r6,25,5,6
+	  57: GETL       	R17, t42
+	  58: GETL       	R6, t44
+	  59: ROLL       	$0x19, t44
+	  60: ANDL       	$0x6000000, t44
+	  61: ANDL       	$0xF9FFFFFF, t42
+	  62: ORL       	t42, t44
+	  63: PUTL       	t44, R17
+	  64: INCEIPL       	$4
+
+	0x2547A2D4:  92300180  stw r17,384(r16)
+	  65: GETL       	R17, t46
+	  66: GETL       	R16, t48
+	  67: ADDL       	$0x180, t48
+	  68: STL       	t46, (t48)
+	  69: INCEIPL       	$4
+
+	0x2547A2D8:  4200FFC4  bc 16,0,0x2547A29C
+	  70: GETL       	CTR, t50
+	  71: ADDL       	$0xFFFFFFFF, t50
+	  72: PUTL       	t50, CTR
+	  73: JIFZL       	t50, $0x2547A2DC
+	  74: JMPo       	$0x2547A29C  ($4)
+
+
+. 0 2547A2A0 60
+. 1E 68 00 0C 57 B5 10 3A 7E 15 20 2E 39 28 00 01 7E 53 3A 14 7C B3 39 2E 3A 92 00 0C 92 12 00 04 92 92 00 08 3B BD 00 01 82 30 01 80 91 3F 00 0C 50 D1 C9 4C 92 30 01 80 42 00 FF C4
+
+==== BB 624 (0x2547A2DC) approx BBs exec'd 0 ====
+
+	0x2547A2DC:  7D284B78  or r8,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x2547A2E0:  831F0008  lwz r24,8(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R24
+	   7: INCEIPL       	$4
+
+	0x2547A2E4:  1EC8000C  mulli r22,r8,12
+	   8: GETL       	R8, t6
+	   9: MULL       	$0xC, t6
+	  10: PUTL       	t6, R22
+	  11: INCEIPL       	$4
+
+	0x2547A2E8:  821E0514  lwz r16,1300(r30)
+	  12: GETL       	R30, t8
+	  13: ADDL       	$0x514, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R16
+	  16: INCEIPL       	$4
+
+	0x2547A2EC:  3A200000  li r17,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R17
+	  19: INCEIPL       	$4
+
+	0x2547A2F0:  7F1BC379  or. r27,r24,r24
+	  20: GETL       	R24, t14
+	  21: PUTL       	t14, R27
+	  22: CMP0L       	t14, t16  (-rSo)
+	  23: ICRFL       	t16, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0x2547A2F4:  7CF6C214  add r7,r22,r24
+	  25: GETL       	R22, t18
+	  26: GETL       	R24, t20
+	  27: ADDL       	t18, t20
+	  28: PUTL       	t20, R7
+	  29: INCEIPL       	$4
+
+	0x2547A2F8:  80900000  lwz r4,0(r16)
+	  30: GETL       	R16, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R4
+	  33: INCEIPL       	$4
+
+	0x2547A2FC:  38A7FFF4  addi r5,r7,-12
+	  34: GETL       	R7, t26
+	  35: ADDL       	$0xFFFFFFF4, t26
+	  36: PUTL       	t26, R5
+	  37: INCEIPL       	$4
+
+	0x2547A300:  38C00000  li r6,0
+	  38: MOVL       	$0x0, t28
+	  39: PUTL       	t28, R6
+	  40: INCEIPL       	$4
+
+	0x2547A304:  7CB32B78  or r19,r5,r5
+	  41: GETL       	R5, t30
+	  42: PUTL       	t30, R19
+	  43: INCEIPL       	$4
+
+	0x2547A308:  90C50008  stw r6,8(r5)
+	  44: GETL       	R6, t32
+	  45: GETL       	R5, t34
+	  46: ADDL       	$0x8, t34
+	  47: STL       	t32, (t34)
+	  48: INCEIPL       	$4
+
+	0x2547A30C:  923F0034  stw r17,52(r31)
+	  49: GETL       	R17, t36
+	  50: GETL       	R31, t38
+	  51: ADDL       	$0x34, t38
+	  52: STL       	t36, (t38)
+	  53: INCEIPL       	$4
+
+	0x2547A310:  909F0038  stw r4,56(r31)
+	  54: GETL       	R4, t40
+	  55: GETL       	R31, t42
+	  56: ADDL       	$0x38, t42
+	  57: STL       	t40, (t42)
+	  58: INCEIPL       	$4
+
+	0x2547A314:  92300000  stw r17,0(r16)
+	  59: GETL       	R17, t44
+	  60: GETL       	R16, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547A318:  418202AC  bc 12,2,0x2547A5C4
+	  63: Js02o       	$0x2547A5C4
+
+
+. 0 2547A2DC 64
+. 7D 28 4B 78 83 1F 00 08 1E C8 00 0C 82 1E 05 14 3A 20 00 00 7F 1B C3 79 7C F6 C2 14 80 90 00 00 38 A7 FF F4 38 C0 00 00 7C B3 2B 78 90 C5 00 08 92 3F 00 34 90 9F 00 38 92 30 00 00 41 82 02 AC
+
+==== BB 625 (0x2547A31C) approx BBs exec'd 0 ====
+
+	0x2547A31C:  835B0004  lwz r26,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547A320:  3B800001  li r28,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547A324:  939B0000  stw r28,0(r27)
+	   8: GETL       	R28, t6
+	   9: GETL       	R27, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x2547A328:  3B000000  li r24,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0x2547A32C:  833A0158  lwz r25,344(r26)
+	  15: GETL       	R26, t12
+	  16: ADDL       	$0x158, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R25
+	  19: INCEIPL       	$4
+
+	0x2547A330:  3AC00000  li r22,0
+	  20: MOVL       	$0x0, t16
+	  21: PUTL       	t16, R22
+	  22: INCEIPL       	$4
+
+	0x2547A334:  2C990000  cmpi cr1,r25,0
+	  23: GETL       	R25, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x1, CR
+	  26: INCEIPL       	$4
+
+	0x2547A338:  40860030  bc 4,6,0x2547A368
+	  27: Jc06o       	$0x2547A368
+
+
+. 0 2547A31C 32
+. 83 5B 00 04 3B 80 00 01 93 9B 00 00 3B 00 00 00 83 3A 01 58 3A C0 00 00 2C 99 00 00 40 86 00 30
+
+==== BB 626 (0x2547A33C) approx BBs exec'd 0 ====
+
+	0x2547A33C:  807A01E8  lwz r3,488(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547A340:  7F4BBA78  xor r11,r26,r23
+	   5: GETL       	R26, t4
+	   6: GETL       	R23, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x2547A344:  314BFFFF  addic r10,r11,-1
+	  10: GETL       	R11, t8
+	  11: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547A348:  7FAA5910  subfe r29,r10,r11
+	  14: GETL       	R10, t10
+	  15: GETL       	R11, t12
+	  16: SBBL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x2547A34C:  21230000  subfic r9,r3,0
+	  19: GETL       	R3, t14
+	  20: MOVL       	$0x0, t16
+	  21: SBBL       	t14, t16  (-wCa)
+	  22: PUTL       	t16, R9
+	  23: INCEIPL       	$4
+
+	0x2547A350:  7C091914  adde r0,r9,r3
+	  24: GETL       	R9, t18
+	  25: GETL       	R3, t20
+	  26: ADCL       	t18, t20  (-rCa-wCa)
+	  27: PUTL       	t20, R0
+	  28: INCEIPL       	$4
+
+	0x2547A354:  7C0BE839  and. r11,r0,r29
+	  29: GETL       	R0, t22
+	  30: GETL       	R29, t24
+	  31: ANDL       	t22, t24
+	  32: PUTL       	t24, R11
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x0, CR
+	  35: INCEIPL       	$4
+
+	0x2547A358:  41820010  bc 12,2,0x2547A368
+	  36: Js02o       	$0x2547A368
+
+
+. 0 2547A33C 32
+. 80 7A 01 E8 7F 4B BA 78 31 4B FF FF 7F AA 59 10 21 23 00 00 7C 09 19 14 7C 0B E8 39 41 82 00 10
+
+==== BB 627 (0x2547A368) approx BBs exec'd 0 ====
+
+	0x2547A368:  82BA0024  lwz r21,36(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547A36C:  2F950000  cmpi cr7,r21,0
+	   5: GETL       	R21, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547A370:  409E001C  bc 4,30,0x2547A38C
+	   9: Jc30o       	$0x2547A38C
+
+
+. 0 2547A368 12
+. 82 BA 00 24 2F 95 00 00 40 9E 00 1C
+
+==== BB 628 (0x2547A38C) approx BBs exec'd 0 ====
+
+	0x2547A38C:  80DA0034  lwz r6,52(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547A390:  7F72DB78  or r18,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R18
+	   7: INCEIPL       	$4
+
+	0x2547A394:  839A0008  lwz r28,8(r26)
+	   8: GETL       	R26, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x2547A398:  82A60004  lwz r21,4(r6)
+	  13: GETL       	R6, t10
+	  14: ADDL       	$0x4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R21
+	  17: INCEIPL       	$4
+
+	0x2547A39C:  91FF0014  stw r15,20(r31)
+	  18: GETL       	R15, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547A3A0:  91DF0018  stw r14,24(r31)
+	  23: GETL       	R14, t18
+	  24: GETL       	R31, t20
+	  25: ADDL       	$0x18, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x2547A3A4:  813C0000  lwz r9,0(r28)
+	  28: GETL       	R28, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R9
+	  31: INCEIPL       	$4
+
+	0x2547A3A8:  92BF001C  stw r21,28(r31)
+	  32: GETL       	R21, t26
+	  33: GETL       	R31, t28
+	  34: ADDL       	$0x1C, t28
+	  35: STL       	t26, (t28)
+	  36: INCEIPL       	$4
+
+	0x2547A3AC:  2F090000  cmpi cr6,r9,0
+	  37: GETL       	R9, t30
+	  38: CMP0L       	t30, t32  (-rSo)
+	  39: ICRFL       	t32, $0x6, CR
+	  40: INCEIPL       	$4
+
+	0x2547A3B0:  935F0010  stw r26,16(r31)
+	  41: GETL       	R26, t34
+	  42: GETL       	R31, t36
+	  43: ADDL       	$0x10, t36
+	  44: STL       	t34, (t36)
+	  45: INCEIPL       	$4
+
+	0x2547A3B4:  419A0504  bc 12,26,0x2547A8B8
+	  46: Js26o       	$0x2547A8B8
+
+
+. 0 2547A38C 44
+. 80 DA 00 34 7F 72 DB 78 83 9A 00 08 82 A6 00 04 91 FF 00 14 91 DF 00 18 81 3C 00 00 92 BF 00 1C 2F 09 00 00 93 5F 00 10 41 9A 05 04
+
+==== BB 629 (0x2547A3B8) approx BBs exec'd 0 ====
+
+	0x2547A3B8:  3F207FFF  lis r25,32767
+	   0: MOVL       	$0x7FFF0000, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x2547A3BC:  6327FFFD  ori r7,r25,0xFFFD
+	   3: MOVL       	$0x7FFFFFFD, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x2547A3C0:  90FF0040  stw r7,64(r31)
+	   6: GETL       	R7, t4
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0x40, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x2547A3C4:  480000BC  b 0x2547A480
+	  11: JMPo       	$0x2547A480  ($4)
+
+
+. 0 2547A3B8 16
+. 3F 20 7F FF 63 27 FF FD 90 FF 00 40 48 00 00 BC
+
+==== BB 630 (0x2547A480) approx BBs exec'd 0 ====
+
+	0x2547A480:  2F890001  cmpi cr7,r9,1
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x2547A484:  419EFF44  bc 12,30,0x2547A3C8
+	   5: Js30o       	$0x2547A3C8
+
+
+. 0 2547A480 8
+. 2F 89 00 01 41 9E FF 44
+
+==== BB 631 (0x2547A3C8) approx BBs exec'd 0 ====
+
+	0x2547A3C8:  801C0004  lwz r0,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547A3CC:  38800024  li r4,36
+	   5: MOVL       	$0x24, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547A3D0:  7FA0AA14  add r29,r0,r21
+	   8: GETL       	R0, t6
+	   9: GETL       	R21, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547A3D4:  7FA3EB78  or r3,r29,r29
+	  13: GETL       	R29, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x2547A3D8:  7FB9EB78  or r25,r29,r29
+	  16: GETL       	R29, t12
+	  17: PUTL       	t12, R25
+	  18: INCEIPL       	$4
+
+	0x2547A3DC:  48008A25  bl 0x25482E00
+	  19: MOVL       	$0x2547A3E0, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25482E00  ($4)
+
+
+. 0 2547A3C8 24
+. 80 1C 00 04 38 80 00 24 7F A0 AA 14 7F A3 EB 78 7F B9 EB 78 48 00 8A 25
+
+==== BB 632 (0x2547A3E0) approx BBs exec'd 0 ====
+
+	0x2547A3E0:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A3E4:  40860588  bc 4,6,0x2547A96C
+	   4: Jc06o       	$0x2547A96C
+
+
+. 0 2547A3E0 8
+. 2C 83 00 00 40 86 05 88
+
+==== BB 633 (0x2547A3E8) approx BBs exec'd 0 ====
+
+	0x2547A3E8:  80BE02F8  lwz r5,760(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x2F8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547A3EC:  387F0030  addi r3,r31,48
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x30, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x2547A3F0:  389F0034  addi r4,r31,52
+	   9: GETL       	R31, t6
+	  10: ADDL       	$0x34, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0x2547A3F4:  38DF0010  addi r6,r31,16
+	  13: GETL       	R31, t8
+	  14: ADDL       	$0x10, t8
+	  15: PUTL       	t8, R6
+	  16: INCEIPL       	$4
+
+	0x2547A3F8:  93BF0020  stw r29,32(r31)
+	  17: GETL       	R29, t10
+	  18: GETL       	R31, t12
+	  19: ADDL       	$0x20, t12
+	  20: STL       	t10, (t12)
+	  21: INCEIPL       	$4
+
+	0x2547A3FC:  480011A5  bl 0x2547B5A0
+	  22: MOVL       	$0x2547A400, t14
+	  23: PUTL       	t14, LR
+	  24: JMPo-c       	$0x2547B5A0  ($4)
+
+
+. 0 2547A3E8 24
+. 80 BE 02 F8 38 7F 00 30 38 9F 00 34 38 DF 00 10 93 BF 00 20 48 00 11 A5
+
+==== BB 634 _dl_catch_error(0x2547B5A0) approx BBs exec'd 0 ====
+
+	0x2547B5A0:  9421FD60  stwu r1,-672(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFD60, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547B5A4:  7D4802A6  mflr r10
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547B5A8:  4801BA59  bl 0x25497000
+	   9: MOVL       	$0x2547B5AC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547B5A0 12
+. 94 21 FD 60 7D 48 02 A6 48 01 BA 59
+
+==== BB 635 (0x2547B5AC) approx BBs exec'd 0 ====
+
+	0x2547B5AC:  93C10298  stw r30,664(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x298, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B5B0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547B5B4:  39000000  li r8,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0x2547B5B8:  914102A4  stw r10,676(r1)
+	  11: GETL       	R10, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x2A4, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547B5BC:  93E1029C  stw r31,668(r1)
+	  16: GETL       	R31, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x29C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547B5C0:  80FE04C8  lwz r7,1224(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x4C8, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R7
+	  25: INCEIPL       	$4
+
+	0x2547B5C4:  91010014  stw r8,20(r1)
+	  26: GETL       	R8, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x14, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x2547B5C8:  812701B4  lwz r9,436(r7)
+	  31: GETL       	R7, t24
+	  32: ADDL       	$0x1B4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R9
+	  35: INCEIPL       	$4
+
+	0x2547B5CC:  90810274  stw r4,628(r1)
+	  36: GETL       	R4, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x274, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0x2547B5D0:  7D2903A6  mtctr r9
+	  41: GETL       	R9, t32
+	  42: PUTL       	t32, CTR
+	  43: INCEIPL       	$4
+
+	0x2547B5D4:  90A10278  stw r5,632(r1)
+	  44: GETL       	R5, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0x278, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x2547B5D8:  90C1027C  stw r6,636(r1)
+	  49: GETL       	R6, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x27C, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0x2547B5DC:  90610270  stw r3,624(r1)
+	  54: GETL       	R3, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x270, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x2547B5E0:  4E800421  bctrl
+	  59: MOVL       	$0x2547B5E4, t46
+	  60: PUTL       	t46, LR
+	  61: GETL       	CTR, t48
+	  62: JMPo-c       	t48  ($4)
+
+
+. 0 2547B5AC 56
+. 93 C1 02 98 7F C8 02 A6 39 00 00 00 91 41 02 A4 93 E1 02 9C 80 FE 04 C8 91 01 00 14 81 27 01 B4 90 81 02 74 7D 29 03 A6 90 A1 02 78 90 C1 02 7C 90 61 02 70 4E 80 04 21
+
+==== BB 636 _dl_initial_error_catch_tsd(0x2547185C) approx BBs exec'd 0 ====
+
+	0x2547185C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25471860:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25471864:  4802579D  bl 0x25497000
+	   9: MOVL       	$0x25471868, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547185C 12
+. 94 21 FF F0 7D 88 02 A6 48 02 57 9D
+
+==== BB 637 (0x25471868) approx BBs exec'd 0 ====
+
+	0x25471868:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547186C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25471870:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x25471874:  807E0018  lwz r3,24(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x18, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x25471878:  83C10008  lwz r30,8(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x8, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R30
+	  20: INCEIPL       	$4
+
+	0x2547187C:  38210010  addi r1,r1,16
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: PUTL       	t16, R1
+	  24: INCEIPL       	$4
+
+	0x25471880:  4E800020  blr
+	  25: GETL       	LR, t18
+	  26: JMPo-r       	t18  ($4)
+
+
+. 0 25471868 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 7E 00 18 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+==== BB 638 (0x2547B5E4) approx BBs exec'd 0 ====
+
+	0x2547B5E4:  80030000  lwz r0,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547B5E8:  90610280  stw r3,640(r1)
+	   4: GETL       	R3, t4
+	   5: GETL       	R1, t6
+	   6: ADDL       	$0x280, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547B5EC:  38610020  addi r3,r1,32
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20, t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x2547B5F0:  90010008  stw r0,8(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x8, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547B5F4:  480072BD  bl 0x254828B0
+	  18: MOVL       	$0x2547B5F8, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0x254828B0  ($4)
+
+
+. 0 2547B5E4 20
+. 80 03 00 00 90 61 02 80 38 61 00 20 90 01 00 08 48 00 72 BD
+
+==== BB 639 _setjmp(0x254828B0) approx BBs exec'd 0 ====
+
+	0x254828B0:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254828B4:  480001DC  b 0x25482A90
+	   3: JMPo       	$0x25482A90  ($4)
+
+
+. 0 254828B0 8
+. 38 80 00 00 48 00 01 DC
+
+==== BB 640 __sigsetjmp(0x25482A90) approx BBs exec'd 0 ====
+
+	0x25482A90:  90230000  stw r1,0(r3)
+	   0: GETL       	R1, t0
+	   1: GETL       	R3, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25482A94:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0x25482A98:  91C3000C  stw r14,12(r3)
+	   7: GETL       	R14, t6
+	   8: GETL       	R3, t8
+	   9: ADDL       	$0xC, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25482A9C:  D9C30058  stfd f14,88(r3)
+	  12: GETL       	R3, t10
+	  13: ADDL       	$0x58, t10
+	  14: FPU_WQ       	0x0:0xE, (t10)
+	  15: INCEIPL       	$4
+
+	0x25482AA0:  90030008  stw r0,8(r3)
+	  16: GETL       	R0, t12
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x8, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x25482AA4:  91E30010  stw r15,16(r3)
+	  21: GETL       	R15, t16
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25482AA8:  D9E30060  stfd f15,96(r3)
+	  26: GETL       	R3, t20
+	  27: ADDL       	$0x60, t20
+	  28: FPU_WQ       	0x0:0xF, (t20)
+	  29: INCEIPL       	$4
+
+	0x25482AAC:  7C000026  mfcr r0
+	  30: GETL       	CR, t22
+	  31: PUTL       	t22, R0
+	  32: INCEIPL       	$4
+
+	0x25482AB0:  92030014  stw r16,20(r3)
+	  33: GETL       	R16, t24
+	  34: GETL       	R3, t26
+	  35: ADDL       	$0x14, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x25482AB4:  DA030068  stfd f16,104(r3)
+	  38: GETL       	R3, t28
+	  39: ADDL       	$0x68, t28
+	  40: FPU_WQ       	0x0:0x10, (t28)
+	  41: INCEIPL       	$4
+
+	0x25482AB8:  90030054  stw r0,84(r3)
+	  42: GETL       	R0, t30
+	  43: GETL       	R3, t32
+	  44: ADDL       	$0x54, t32
+	  45: STL       	t30, (t32)
+	  46: INCEIPL       	$4
+
+	0x25482ABC:  92230018  stw r17,24(r3)
+	  47: GETL       	R17, t34
+	  48: GETL       	R3, t36
+	  49: ADDL       	$0x18, t36
+	  50: STL       	t34, (t36)
+	  51: INCEIPL       	$4
+
+	0x25482AC0:  DA230070  stfd f17,112(r3)
+	  52: GETL       	R3, t38
+	  53: ADDL       	$0x70, t38
+	  54: FPU_WQ       	0x0:0x11, (t38)
+	  55: INCEIPL       	$4
+
+	0x25482AC4:  9243001C  stw r18,28(r3)
+	  56: GETL       	R18, t40
+	  57: GETL       	R3, t42
+	  58: ADDL       	$0x1C, t42
+	  59: STL       	t40, (t42)
+	  60: INCEIPL       	$4
+
+	0x25482AC8:  DA430078  stfd f18,120(r3)
+	  61: GETL       	R3, t44
+	  62: ADDL       	$0x78, t44
+	  63: FPU_WQ       	0x0:0x12, (t44)
+	  64: INCEIPL       	$4
+
+	0x25482ACC:  92630020  stw r19,32(r3)
+	  65: GETL       	R19, t46
+	  66: GETL       	R3, t48
+	  67: ADDL       	$0x20, t48
+	  68: STL       	t46, (t48)
+	  69: INCEIPL       	$4
+
+	0x25482AD0:  DA630080  stfd f19,128(r3)
+	  70: GETL       	R3, t50
+	  71: ADDL       	$0x80, t50
+	  72: FPU_WQ       	0x0:0x13, (t50)
+	  73: INCEIPL       	$4
+
+	0x25482AD4:  92830024  stw r20,36(r3)
+	  74: GETL       	R20, t52
+	  75: GETL       	R3, t54
+	  76: ADDL       	$0x24, t54
+	  77: STL       	t52, (t54)
+	  78: INCEIPL       	$4
+
+	0x25482AD8:  DA830088  stfd f20,136(r3)
+	  79: GETL       	R3, t56
+	  80: ADDL       	$0x88, t56
+	  81: FPU_WQ       	0x0:0x14, (t56)
+	  82: INCEIPL       	$4
+
+	0x25482ADC:  92A30028  stw r21,40(r3)
+	  83: GETL       	R21, t58
+	  84: GETL       	R3, t60
+	  85: ADDL       	$0x28, t60
+	  86: STL       	t58, (t60)
+	  87: INCEIPL       	$4
+
+	0x25482AE0:  DAA30090  stfd f21,144(r3)
+	  88: GETL       	R3, t62
+	  89: ADDL       	$0x90, t62
+	  90: FPU_WQ       	0x0:0x15, (t62)
+	  91: INCEIPL       	$4
+
+	0x25482AE4:  92C3002C  stw r22,44(r3)
+	  92: GETL       	R22, t64
+	  93: GETL       	R3, t66
+	  94: ADDL       	$0x2C, t66
+	  95: STL       	t64, (t66)
+	  96: INCEIPL       	$4
+
+	0x25482AE8:  DAC30098  stfd f22,152(r3)
+	  97: GETL       	R3, t68
+	  98: ADDL       	$0x98, t68
+	  99: FPU_WQ       	0x0:0x16, (t68)
+	 100: INCEIPL       	$4
+
+	0x25482AEC:  92E30030  stw r23,48(r3)
+	 101: GETL       	R23, t70
+	 102: GETL       	R3, t72
+	 103: ADDL       	$0x30, t72
+	 104: STL       	t70, (t72)
+	 105: INCEIPL       	$4
+
+	0x25482AF0:  DAE300A0  stfd f23,160(r3)
+	 106: GETL       	R3, t74
+	 107: ADDL       	$0xA0, t74
+	 108: FPU_WQ       	0x0:0x17, (t74)
+	 109: INCEIPL       	$4
+
+	0x25482AF4:  93030034  stw r24,52(r3)
+	 110: GETL       	R24, t76
+	 111: GETL       	R3, t78
+	 112: ADDL       	$0x34, t78
+	 113: STL       	t76, (t78)
+	 114: INCEIPL       	$4
+
+	0x25482AF8:  DB0300A8  stfd f24,168(r3)
+	 115: GETL       	R3, t80
+	 116: ADDL       	$0xA8, t80
+	 117: FPU_WQ       	0x0:0x18, (t80)
+	 118: INCEIPL       	$4
+
+	0x25482AFC:  93230038  stw r25,56(r3)
+	 119: GETL       	R25, t82
+	 120: GETL       	R3, t84
+	 121: ADDL       	$0x38, t84
+	 122: STL       	t82, (t84)
+	 123: INCEIPL       	$4
+
+	0x25482B00:  DB2300B0  stfd f25,176(r3)
+	 124: GETL       	R3, t86
+	 125: ADDL       	$0xB0, t86
+	 126: FPU_WQ       	0x0:0x19, (t86)
+	 127: INCEIPL       	$4
+
+	0x25482B04:  9343003C  stw r26,60(r3)
+	 128: GETL       	R26, t88
+	 129: GETL       	R3, t90
+	 130: ADDL       	$0x3C, t90
+	 131: STL       	t88, (t90)
+	 132: INCEIPL       	$4
+
+	0x25482B08:  DB4300B8  stfd f26,184(r3)
+	 133: GETL       	R3, t92
+	 134: ADDL       	$0xB8, t92
+	 135: FPU_WQ       	0x0:0x1A, (t92)
+	 136: INCEIPL       	$4
+
+	0x25482B0C:  93630040  stw r27,64(r3)
+	 137: GETL       	R27, t94
+	 138: GETL       	R3, t96
+	 139: ADDL       	$0x40, t96
+	 140: STL       	t94, (t96)
+	 141: INCEIPL       	$4
+
+	0x25482B10:  DB6300C0  stfd f27,192(r3)
+	 142: GETL       	R3, t98
+	 143: ADDL       	$0xC0, t98
+	 144: FPU_WQ       	0x0:0x1B, (t98)
+	 145: INCEIPL       	$4
+
+	0x25482B14:  93830044  stw r28,68(r3)
+	 146: GETL       	R28, t100
+	 147: GETL       	R3, t102
+	 148: ADDL       	$0x44, t102
+	 149: STL       	t100, (t102)
+	 150: INCEIPL       	$4
+
+	0x25482B18:  DB8300C8  stfd f28,200(r3)
+	 151: GETL       	R3, t104
+	 152: ADDL       	$0xC8, t104
+	 153: FPU_WQ       	0x0:0x1C, (t104)
+	 154: INCEIPL       	$4
+
+	0x25482B1C:  93A30048  stw r29,72(r3)
+	 155: GETL       	R29, t106
+	 156: GETL       	R3, t108
+	 157: ADDL       	$0x48, t108
+	 158: STL       	t106, (t108)
+	 159: INCEIPL       	$4
+
+	0x25482B20:  DBA300D0  stfd f29,208(r3)
+	 160: GETL       	R3, t110
+	 161: ADDL       	$0xD0, t110
+	 162: FPU_WQ       	0x0:0x1D, (t110)
+	 163: INCEIPL       	$4
+
+	0x25482B24:  93C3004C  stw r30,76(r3)
+	 164: GETL       	R30, t112
+	 165: GETL       	R3, t114
+	 166: ADDL       	$0x4C, t114
+	 167: STL       	t112, (t114)
+	 168: INCEIPL       	$4
+
+	0x25482B28:  DBC300D8  stfd f30,216(r3)
+	 169: GETL       	R3, t116
+	 170: ADDL       	$0xD8, t116
+	 171: FPU_WQ       	0x0:0x1E, (t116)
+	 172: INCEIPL       	$4
+
+	0x25482B2C:  93E30050  stw r31,80(r3)
+	 173: GETL       	R31, t118
+	 174: GETL       	R3, t120
+	 175: ADDL       	$0x50, t120
+	 176: STL       	t118, (t120)
+	 177: INCEIPL       	$4
+
+	0x25482B30:  DBE300E0  stfd f31,224(r3)
+	 178: GETL       	R3, t122
+	 179: ADDL       	$0xE0, t122
+	 180: FPU_WQ       	0x0:0x1F, (t122)
+	 181: INCEIPL       	$4
+
+	0x25482B34:  7CC802A6  mflr r6
+	 182: GETL       	LR, t124
+	 183: PUTL       	t124, R6
+	 184: INCEIPL       	$4
+
+	0x25482B38:  480144C9  bl 0x25497000
+	 185: MOVL       	$0x25482B3C, t126
+	 186: PUTL       	t126, LR
+	 187: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25482A90 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 01 44 C9
+
+==== BB 641 (0x25482B3C) approx BBs exec'd 0 ====
+
+	0x25482B3C:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25482B40:  80A504E4  lwz r5,1252(r5)
+	   3: GETL       	R5, t2
+	   4: ADDL       	$0x4E4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25482B44:  7CC803A6  mtlr r6
+	   8: GETL       	R6, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x25482B48:  80A5003C  lwz r5,60(r5)
+	  11: GETL       	R5, t8
+	  12: ADDL       	$0x3C, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25482B4C:  74A51000  andis. r5,r5,0x1000
+	  16: GETL       	R5, t12
+	  17: ANDL       	$0x10000000, t12
+	  18: PUTL       	t12, R5
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0x25482B50:  41820158  bc 12,2,0x25482CA8
+	  22: Js02o       	$0x25482CA8
+
+
+. 0 25482B3C 24
+. 7C A8 02 A6 80 A5 04 E4 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+
+==== BB 642 (0x25482CA8) approx BBs exec'd 0 ====
+
+	0x25482CA8:  4BFFD2F0  b 0x2547FF98
+	   0: JMPo       	$0x2547FF98  ($4)
+
+
+. 0 25482CA8 4
+. 4B FF D2 F0
+
+==== BB 643 __sigjmp_save(0x2547FF98) approx BBs exec'd 0 ====
+
+	0x2547FF98:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547FF9C:  38000000  li r0,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547FFA0:  900301C0  stw r0,448(r3)
+	   9: GETL       	R0, t6
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1C0, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547FFA4:  38210010  addi r1,r1,16
+	  14: GETL       	R1, t10
+	  15: ADDL       	$0x10, t10
+	  16: PUTL       	t10, R1
+	  17: INCEIPL       	$4
+
+	0x2547FFA8:  38600000  li r3,0
+	  18: MOVL       	$0x0, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0x2547FFAC:  4E800020  blr
+	  21: GETL       	LR, t14
+	  22: JMPo-r       	t14  ($4)
+
+
+. 0 2547FF98 24
+. 94 21 FF F0 38 00 00 00 90 03 01 C0 38 21 00 10 38 60 00 00 4E 80 00 20
+
+==== BB 644 (0x2547B5F8) approx BBs exec'd 0 ====
+
+	0x2547B5F8:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547B5FC:  8061027C  lwz r3,636(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x27C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547B600:  7FEAFB78  or r10,r31,r31
+	  10: GETL       	R31, t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x2547B604:  2F9FFFFF  cmpi cr7,r31,-1
+	  13: GETL       	R31, t10
+	  14: MOVL       	$0xFFFFFFFF, t14
+	  15: CMPL       	t10, t14, t12  (-rSo)
+	  16: ICRFL       	t12, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0x2547B608:  40820054  bc 4,2,0x2547B65C
+	  18: Jc02o       	$0x2547B65C
+
+
+. 0 2547B5F8 20
+. 7C 7F 1B 79 80 61 02 7C 7F EA FB 78 2F 9F FF FF 40 82 00 54
+
+==== BB 645 (0x2547B60C) approx BBs exec'd 0 ====
+
+	0x2547B60C:  81210280  lwz r9,640(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x280, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547B610:  38010010  addi r0,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547B614:  81810278  lwz r12,632(r1)
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x278, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x2547B618:  90090000  stw r0,0(r9)
+	  14: GETL       	R0, t10
+	  15: GETL       	R9, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547B61C:  7D8903A6  mtctr r12
+	  18: GETL       	R12, t14
+	  19: PUTL       	t14, CTR
+	  20: INCEIPL       	$4
+
+	0x2547B620:  4E800421  bctrl
+	  21: MOVL       	$0x2547B624, t16
+	  22: PUTL       	t16, LR
+	  23: GETL       	CTR, t18
+	  24: JMPo-c       	t18  ($4)
+
+
+. 0 2547B60C 24
+. 81 21 02 80 38 01 00 10 81 81 02 78 90 09 00 00 7D 89 03 A6 4E 80 04 21
+
+==== BB 646 openaux(0x2547A0BC) approx BBs exec'd 0 ====
+
+	0x2547A0BC:  7C8802A6  mflr r4
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547A0C0:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547A0C4:  93E10008  stw r31,8(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547A0C8:  7C7F1B78  or r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0x2547A0CC:  38A00000  li r5,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R5
+	  19: INCEIPL       	$4
+
+	0x2547A0D0:  90810014  stw r4,20(r1)
+	  20: GETL       	R4, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x2547A0D4:  80630000  lwz r3,0(r3)
+	  25: GETL       	R3, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R3
+	  28: INCEIPL       	$4
+
+	0x2547A0D8:  809F0010  lwz r4,16(r31)
+	  29: GETL       	R31, t22
+	  30: ADDL       	$0x10, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R4
+	  33: INCEIPL       	$4
+
+	0x2547A0DC:  80030180  lwz r0,384(r3)
+	  34: GETL       	R3, t26
+	  35: ADDL       	$0x180, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R0
+	  38: INCEIPL       	$4
+
+	0x2547A0E0:  7409C000  andis. r9,r0,0xC000
+	  39: GETL       	R0, t30
+	  40: ANDL       	$0xC0000000, t30
+	  41: PUTL       	t30, R9
+	  42: CMP0L       	t30, t32  (-rSo)
+	  43: ICRFL       	t32, $0x0, CR
+	  44: INCEIPL       	$4
+
+	0x2547A0E4:  540617BE  rlwinm r6,r0,2,30,31
+	  45: GETL       	R0, t34
+	  46: SHRL       	$0x1E, t34
+	  47: PUTL       	t34, R6
+	  48: INCEIPL       	$4
+
+	0x2547A0E8:  40820008  bc 4,2,0x2547A0F0
+	  49: Jc02o       	$0x2547A0F0
+
+
+. 0 2547A0BC 48
+. 7C 88 02 A6 94 21 FF F0 93 E1 00 08 7C 7F 1B 78 38 A0 00 00 90 81 00 14 80 63 00 00 80 9F 00 10 80 03 01 80 74 09 C0 00 54 06 17 BE 40 82 00 08
+
+==== BB 647 (0x2547A0EC) approx BBs exec'd 0 ====
+
+	0x2547A0EC:  38C00001  li r6,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2547A0F0:  80FF0004  lwz r7,4(r31)
+	   3: GETL       	R31, t2
+	   4: ADDL       	$0x4, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x2547A0F4:  811F0008  lwz r8,8(r31)
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0x2547A0F8:  81230018  lwz r9,24(r3)
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x2547A0FC:  4BFFCBED  bl 0x25476CE8
+	  18: MOVL       	$0x2547A100, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0x25476CE8  ($4)
+
+
+. 0 2547A0EC 20
+. 38 C0 00 01 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+
+==== BB 648 (0x25482FA8) approx BBs exec'd 0 ====
+
+	0x25482FA8:  8CA30001  lbzu r5,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482FAC:  8CC40001  lbzu r6,1(r4)
+	   6: GETL       	R4, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R4
+	   9: LDB       	(t4), t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x25482FB0:  2C850000  cmpi cr1,r5,0
+	  12: GETL       	R5, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0x25482FB4:  7C053000  cmp cr0,r5,r6
+	  16: GETL       	R5, t12
+	  17: GETL       	R6, t14
+	  18: CMPL       	t12, t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x25482FB8:  4086FFD4  bc 4,6,0x25482F8C
+	  21: Jc06o       	$0x25482F8C
+
+
+. 0 25482FA8 20
+. 8C A3 00 01 8C C4 00 01 2C 85 00 00 7C 05 30 00 40 86 FF D4
+
+==== BB 649 (0x25482F8C) approx BBs exec'd 0 ====
+
+	0x25482F8C:  8CA30001  lbzu r5,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R3
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25482F90:  40820034  bc 4,2,0x25482FC4
+	   6: Jc02o       	$0x25482FC4
+
+
+. 0 25482F8C 8
+. 8C A3 00 01 40 82 00 34
+
+==== BB 650 (0x25482FC4) approx BBs exec'd 0 ====
+
+	0x25482FC4:  88A3FFFF  lbz r5,-1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25482FC8:  7C662850  subf r3,r6,r5
+	   5: GETL       	R6, t4
+	   6: GETL       	R5, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25482FCC:  4E800020  blr
+	  10: GETL       	LR, t8
+	  11: JMPo-r       	t8  ($4)
+
+
+. 0 25482FC4 12
+. 88 A3 FF FF 7C 66 28 50 4E 80 00 20
+
+==== BB 651 (0x2547706C) approx BBs exec'd 0 ====
+
+	0x2547706C:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25477070:  4800BF61  bl 0x25482FD0
+	   3: MOVL       	$0x25477074, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 2547706C 8
+. 7F E3 FB 78 48 00 BF 61
+
+==== BB 652 (0x25477074) approx BBs exec'd 0 ====
+
+	0x25477074:  83540000  lwz r26,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0x25477078:  3AC30001  addi r22,r3,1
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x2547707C:  73490001  andi. r9,r26,0x1
+	   8: GETL       	R26, t6
+	   9: ANDL       	$0x1, t6
+	  10: PUTL       	t6, R9
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x25477080:  408203C4  bc 4,2,0x25477444
+	  14: Jc02o       	$0x25477444
+
+
+. 0 25477074 16
+. 83 54 00 00 3A C3 00 01 73 49 00 01 40 82 03 C4
+
+==== BB 653 (0x25477084) approx BBs exec'd 0 ====
+
+	0x25477084:  3B40FFFF  li r26,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R26
+	   2: INCEIPL       	$4
+
+	0x25477088:  418E0020  bc 12,14,0x254770A8
+	   3: Js14o       	$0x254770A8
+
+
+. 0 25477084 8
+. 3B 40 FF FF 41 8E 00 20
+
+==== BB 654 (0x2547708C) approx BBs exec'd 0 ====
+
+	0x2547708C:  839B0094  lwz r28,148(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x94, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x25477090:  7F6CFE70  srawi r12,r27,31
+	   5: GETL       	R27, t4
+	   6: SARL       	$0x1F, t4  (-wCa)
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25477094:  7D9DDA78  xor r29,r12,r27
+	   9: GETL       	R12, t6
+	  10: GETL       	R27, t8
+	  11: XORL       	t6, t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0x25477098:  2E1AFFFF  cmpi cr4,r26,-1
+	  14: GETL       	R26, t10
+	  15: MOVL       	$0xFFFFFFFF, t14
+	  16: CMPL       	t10, t14, t12  (-rSo)
+	  17: ICRFL       	t12, $0x4, CR
+	  18: INCEIPL       	$4
+
+	0x2547709C:  2F9C0000  cmpi cr7,r28,0
+	  19: GETL       	R28, t16
+	  20: CMP0L       	t16, t18  (-rSo)
+	  21: ICRFL       	t18, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x254770A0:  7DCCE850  subf r14,r12,r29
+	  23: GETL       	R12, t20
+	  24: GETL       	R29, t22
+	  25: SUBL       	t20, t22
+	  26: PUTL       	t22, R14
+	  27: INCEIPL       	$4
+
+	0x254770A4:  409E016C  bc 4,30,0x25477210
+	  28: Jc30o       	$0x25477210
+
+
+. 0 2547708C 28
+. 83 9B 00 94 7F 6C FE 70 7D 9D DA 78 2E 1A FF FF 2F 9C 00 00 7D CC E8 50 40 9E 01 6C
+
+==== BB 655 (0x254770A8) approx BBs exec'd 0 ====
+
+	0x254770A8:  2C9B0000  cmpi cr1,r27,0
+	   0: GETL       	R27, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x254770AC:  7F65FE70  srawi r5,r27,31
+	   4: GETL       	R27, t4
+	   5: SARL       	$0x1F, t4  (-wCa)
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x254770B0:  7CA0DA78  xor r0,r5,r27
+	   8: GETL       	R5, t6
+	   9: GETL       	R27, t8
+	  10: XORL       	t6, t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x254770B4:  7F7DDB78  or r29,r27,r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x254770B8:  7DC50050  subf r14,r5,r0
+	  16: GETL       	R5, t12
+	  17: GETL       	R0, t14
+	  18: SUBL       	t12, t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0x254770BC:  41860370  bc 12,6,0x2547742C
+	  21: Js06o       	$0x2547742C
+
+
+. 0 254770A8 24
+. 2C 9B 00 00 7F 65 FE 70 7C A0 DA 78 7F 7D DB 78 7D C5 00 50 41 86 03 70
+
+==== BB 656 (0x254770C0) approx BBs exec'd 0 ====
+
+	0x254770C0:  825E019C  lwz r18,412(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x19C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x254770C4:  39E0FFFF  li r15,-1
+	   5: MOVL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R15
+	   7: INCEIPL       	$4
+
+	0x254770C8:  48000024  b 0x254770EC
+	   8: JMPo       	$0x254770EC  ($4)
+
+
+. 0 254770C0 12
+. 82 5E 01 9C 39 E0 FF FF 48 00 00 24
+
+==== BB 657 (0x254770EC) approx BBs exec'd 0 ====
+
+	0x254770EC:  807D018C  lwz r3,396(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x18C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254770F0:  3B9D018C  addi r28,r29,396
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x18C, t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x254770F4:  38000000  li r0,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0x254770F8:  2E03FFFF  cmpi cr4,r3,-1
+	  12: GETL       	R3, t8
+	  13: MOVL       	$0xFFFFFFFF, t12
+	  14: CMPL       	t8, t12, t10  (-rSo)
+	  15: ICRFL       	t10, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x254770FC:  2F030000  cmpi cr6,r3,0
+	  17: GETL       	R3, t14
+	  18: CMP0L       	t14, t16  (-rSo)
+	  19: ICRFL       	t16, $0x6, CR
+	  20: INCEIPL       	$4
+
+	0x25477100:  4192002C  bc 12,18,0x2547712C
+	  21: Js18o       	$0x2547712C
+
+
+. 0 254770EC 24
+. 80 7D 01 8C 3B 9D 01 8C 38 00 00 00 2E 03 FF FF 2F 03 00 00 41 92 00 2C
+
+==== BB 658 (0x2547712C) approx BBs exec'd 0 ====
+
+	0x2547712C:  2C000000  cmpi cr0,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25477130:  7F86E378  or r6,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x25477134:  7FE3FB78  or r3,r31,r31
+	   7: GETL       	R31, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25477138:  7EC4B378  or r4,r22,r22
+	  10: GETL       	R22, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2547713C:  7EE5BB78  or r5,r23,r23
+	  13: GETL       	R23, t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x25477140:  38E10228  addi r7,r1,552
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x228, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x25477144:  39010018  addi r8,r1,24
+	  20: GETL       	R1, t14
+	  21: ADDL       	$0x18, t14
+	  22: PUTL       	t14, R8
+	  23: INCEIPL       	$4
+
+	0x25477148:  4182FF84  bc 12,2,0x254770CC
+	  24: Js02o       	$0x254770CC
+
+
+. 0 2547712C 32
+. 2C 00 00 00 7F 86 E3 78 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 41 82 FF 84
+
+==== BB 659 (0x254770CC) approx BBs exec'd 0 ====
+
+	0x254770CC:  2E1AFFFF  cmpi cr4,r26,-1
+	   0: GETL       	R26, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x254770D0:  83BD0168  lwz r29,360(r29)
+	   5: GETL       	R29, t6
+	   6: ADDL       	$0x168, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R29
+	   9: INCEIPL       	$4
+
+	0x254770D4:  313DFFFF  addic r9,r29,-1
+	  10: GETL       	R29, t10
+	  11: ADCL       	$0xFFFFFFFF, t10  (-wCa)
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x254770D8:  7CC9E910  subfe r6,r9,r29
+	  14: GETL       	R9, t12
+	  15: GETL       	R29, t14
+	  16: SBBL       	t12, t14  (-rCa-wCa)
+	  17: PUTL       	t14, R6
+	  18: INCEIPL       	$4
+
+	0x254770DC:  7D200026  mfcr r9
+	  19: GETL       	CR, t16
+	  20: PUTL       	t16, R9
+	  21: INCEIPL       	$4
+
+	0x254770E0:  55299FFE  rlwinm r9,r9,19,31,31
+	  22: GETL       	R9, t18
+	  23: ROLL       	$0x13, t18
+	  24: ANDL       	$0x1, t18
+	  25: PUTL       	t18, R9
+	  26: INCEIPL       	$4
+
+	0x254770E4:  7D2B3039  and. r11,r9,r6
+	  27: GETL       	R9, t20
+	  28: GETL       	R6, t22
+	  29: ANDL       	t20, t22
+	  30: PUTL       	t22, R11
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0x254770E8:  418200B0  bc 12,2,0x25477198
+	  34: Js02o       	$0x25477198
+
+
+. 0 254770CC 32
+. 2E 1A FF FF 83 BD 01 68 31 3D FF FF 7C C9 E9 10 7D 20 00 26 55 29 9F FE 7D 2B 30 39 41 82 00 B0
+
+==== BB 660 (0x25477198) approx BBs exec'd 0 ====
+
+	0x25477198:  409200A8  bc 4,18,0x25477240
+	   0: Jc18o       	$0x25477240
+
+
+. 0 25477198 4
+. 40 92 00 A8
+
+==== BB 661 (0x2547719C) approx BBs exec'd 0 ====
+
+	0x2547719C:  80B50000  lwz r5,0(r21)
+	   0: GETL       	R21, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x254771A0:  2F850000  cmpi cr7,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x254771A4:  419E0068  bc 12,30,0x2547720C
+	   8: Js30o       	$0x2547720C
+
+
+. 0 2547719C 12
+. 80 B5 00 00 2F 85 00 00 41 9E 00 68
+
+==== BB 662 (0x254771A8) approx BBs exec'd 0 ====
+
+	0x254771A8:  83A50180  lwz r29,384(r5)
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254771AC:  7CAADA78  xor r10,r5,r27
+	   5: GETL       	R5, t4
+	   6: GETL       	R27, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x254771B0:  300AFFFF  addic r0,r10,-1
+	  10: GETL       	R10, t8
+	  11: ADCL       	$0xFFFFFFFF, t8  (-wCa)
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x254771B4:  7D005110  subfe r8,r0,r10
+	  14: GETL       	R0, t10
+	  15: GETL       	R10, t12
+	  16: SBBL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R8
+	  18: INCEIPL       	$4
+
+	0x254771B8:  57BC0002  rlwinm r28,r29,0,0,1
+	  19: GETL       	R29, t14
+	  20: ANDL       	$0xC0000000, t14
+	  21: PUTL       	t14, R28
+	  22: INCEIPL       	$4
+
+	0x254771BC:  6F928000  xoris r18,r28,0x8000
+	  23: GETL       	R28, t16
+	  24: XORL       	$0x80000000, t16
+	  25: PUTL       	t16, R18
+	  26: INCEIPL       	$4
+
+	0x254771C0:  3132FFFF  addic r9,r18,-1
+	  27: GETL       	R18, t18
+	  28: ADCL       	$0xFFFFFFFF, t18  (-wCa)
+	  29: PUTL       	t18, R9
+	  30: INCEIPL       	$4
+
+	0x254771C4:  7DE99110  subfe r15,r9,r18
+	  31: GETL       	R9, t20
+	  32: GETL       	R18, t22
+	  33: SBBL       	t20, t22  (-rCa-wCa)
+	  34: PUTL       	t22, R15
+	  35: INCEIPL       	$4
+
+	0x254771C8:  7DE04039  and. r0,r15,r8
+	  36: GETL       	R15, t24
+	  37: GETL       	R8, t26
+	  38: ANDL       	t24, t26
+	  39: PUTL       	t26, R0
+	  40: CMP0L       	t26, t28  (-rSo)
+	  41: ICRFL       	t28, $0x0, CR
+	  42: INCEIPL       	$4
+
+	0x254771CC:  41820040  bc 12,2,0x2547720C
+	  43: Js02o       	$0x2547720C
+
+
+. 0 254771A8 40
+. 83 A5 01 80 7C AA DA 78 30 0A FF FF 7D 00 51 10 57 BC 00 02 6F 92 80 00 31 32 FF FF 7D E9 91 10 7D E0 40 39 41 82 00 40
+
+==== BB 663 (0x2547720C) approx BBs exec'd 0 ====
+
+	0x2547720C:  40920034  bc 4,18,0x25477240
+	   0: Jc18o       	$0x25477240
+
+
+. 0 2547720C 4
+. 40 92 00 34
+
+==== BB 664 (0x25477210) approx BBs exec'd 0 ====
+
+	0x25477210:  80DE0168  lwz r6,360(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x168, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25477214:  80A60000  lwz r5,0(r6)
+	   5: GETL       	R6, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R5
+	   8: INCEIPL       	$4
+
+	0x25477218:  2C85FFFF  cmpi cr1,r5,-1
+	   9: GETL       	R5, t8
+	  10: MOVL       	$0xFFFFFFFF, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547721C:  41860024  bc 12,6,0x25477240
+	  14: Js06o       	$0x25477240
+
+
+. 0 25477210 16
+. 80 DE 01 68 80 A6 00 00 2C 85 FF FF 41 86 00 24
+
+==== BB 665 (0x25477220) approx BBs exec'd 0 ====
+
+	0x25477220:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25477224:  7EC4B378  or r4,r22,r22
+	   3: GETL       	R22, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25477228:  7EE5BB78  or r5,r23,r23
+	   6: GETL       	R23, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547722C:  38E10228  addi r7,r1,552
+	   9: GETL       	R1, t6
+	  10: ADDL       	$0x228, t6
+	  11: PUTL       	t6, R7
+	  12: INCEIPL       	$4
+
+	0x25477230:  39010018  addi r8,r1,24
+	  13: GETL       	R1, t8
+	  14: ADDL       	$0x18, t8
+	  15: PUTL       	t8, R8
+	  16: INCEIPL       	$4
+
+	0x25477234:  4BFFF561  bl 0x25476794
+	  17: MOVL       	$0x25477238, t10
+	  18: PUTL       	t10, LR
+	  19: JMPo-c       	$0x25476794  ($4)
+
+
+. 0 25477220 24
+. 7F E3 FB 78 7E C4 B3 78 7E E5 BB 78 38 E1 02 28 39 01 00 18 4B FF F5 61
+
+==== BB 666 open_path(0x25476794) approx BBs exec'd 0 ====
+
+	0x25476794:  9421FF00  stwu r1,-256(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF00, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25476798:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547679C:  48020865  bl 0x25497000
+	   9: MOVL       	$0x254767A0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25476794 12
+. 94 21 FF 00 7C 08 02 A6 48 02 08 65
+
+==== BB 667 (0x254767A0) approx BBs exec'd 0 ====
+
+	0x254767A0:  93C100F8  stw r30,248(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xF8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254767A4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x254767A8:  924100C8  stw r18,200(r1)
+	   8: GETL       	R18, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC8, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254767AC:  926100CC  stw r19,204(r1)
+	  13: GETL       	R19, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xCC, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254767B0:  7D200026  mfcr r9
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R9
+	  20: INCEIPL       	$4
+
+	0x254767B4:  90010104  stw r0,260(r1)
+	  21: GETL       	R0, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x104, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x254767B8:  38000000  li r0,0
+	  26: MOVL       	$0x0, t20
+	  27: PUTL       	t20, R0
+	  28: INCEIPL       	$4
+
+	0x254767BC:  825E015C  lwz r18,348(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x15C, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R18
+	  33: INCEIPL       	$4
+
+	0x254767C0:  827E0158  lwz r19,344(r30)
+	  34: GETL       	R30, t26
+	  35: ADDL       	$0x158, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R19
+	  38: INCEIPL       	$4
+
+	0x254767C4:  920100C0  stw r16,192(r1)
+	  39: GETL       	R16, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0xC0, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x254767C8:  922100C4  stw r17,196(r1)
+	  44: GETL       	R17, t34
+	  45: GETL       	R1, t36
+	  46: ADDL       	$0xC4, t36
+	  47: STL       	t34, (t36)
+	  48: INCEIPL       	$4
+
+	0x254767CC:  82130000  lwz r16,0(r19)
+	  49: GETL       	R19, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R16
+	  52: INCEIPL       	$4
+
+	0x254767D0:  82320000  lwz r17,0(r18)
+	  53: GETL       	R18, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R17
+	  56: INCEIPL       	$4
+
+	0x254767D4:  7D124378  or r18,r8,r8
+	  57: GETL       	R8, t46
+	  58: PUTL       	t46, R18
+	  59: INCEIPL       	$4
+
+	0x254767D8:  91E100BC  stw r15,188(r1)
+	  60: GETL       	R15, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0xBC, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x254767DC:  7DF08A14  add r15,r16,r17
+	  65: GETL       	R16, t52
+	  66: GETL       	R17, t54
+	  67: ADDL       	t52, t54
+	  68: PUTL       	t54, R15
+	  69: INCEIPL       	$4
+
+	0x254767E0:  91C100B8  stw r14,184(r1)
+	  70: GETL       	R14, t56
+	  71: GETL       	R1, t58
+	  72: ADDL       	$0xB8, t58
+	  73: STL       	t56, (t58)
+	  74: INCEIPL       	$4
+
+	0x254767E4:  93E100FC  stw r31,252(r1)
+	  75: GETL       	R31, t60
+	  76: GETL       	R1, t62
+	  77: ADDL       	$0xFC, t62
+	  78: STL       	t60, (t62)
+	  79: INCEIPL       	$4
+
+	0x254767E8:  7DCF2214  add r14,r15,r4
+	  80: GETL       	R15, t64
+	  81: GETL       	R4, t66
+	  82: ADDL       	t64, t66
+	  83: PUTL       	t66, R14
+	  84: INCEIPL       	$4
+
+	0x254767EC:  7C3F0B78  or r31,r1,r1
+	  85: GETL       	R1, t68
+	  86: PUTL       	t68, R31
+	  87: INCEIPL       	$4
+
+	0x254767F0:  398E001E  addi r12,r14,30
+	  88: GETL       	R14, t70
+	  89: ADDL       	$0x1E, t70
+	  90: PUTL       	t70, R12
+	  91: INCEIPL       	$4
+
+	0x254767F4:  90DF0084  stw r6,132(r31)
+	  92: GETL       	R6, t72
+	  93: GETL       	R31, t74
+	  94: ADDL       	$0x84, t74
+	  95: STL       	t72, (t74)
+	  96: INCEIPL       	$4
+
+	0x254767F8:  558B0036  rlwinm r11,r12,0,0,27
+	  97: GETL       	R12, t76
+	  98: ANDL       	$0xFFFFFFF0, t76
+	  99: PUTL       	t76, R11
+	 100: INCEIPL       	$4
+
+	0x254767FC:  81410000  lwz r10,0(r1)
+	 101: GETL       	R1, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R10
+	 104: INCEIPL       	$4
+
+	0x25476800:  7C711B78  or r17,r3,r3
+	 105: GETL       	R3, t82
+	 106: PUTL       	t82, R17
+	 107: INCEIPL       	$4
+
+	0x25476804:  92A100D4  stw r21,212(r1)
+	 108: GETL       	R21, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0xD4, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25476808:  39E00000  li r15,0
+	 113: MOVL       	$0x0, t88
+	 114: PUTL       	t88, R15
+	 115: INCEIPL       	$4
+
+	0x2547680C:  912100B4  stw r9,180(r1)
+	 116: GETL       	R9, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0xB4, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x25476810:  7D2B00D0  neg r9,r11
+	 121: GETL       	R11, t94
+	 122: NEGL       	t94
+	 123: PUTL       	t94, R9
+	 124: INCEIPL       	$4
+
+	0x25476814:  82A60000  lwz r21,0(r6)
+	 125: GETL       	R6, t96
+	 126: LDL       	(t96), t98
+	 127: PUTL       	t98, R21
+	 128: INCEIPL       	$4
+
+	0x25476818:  92C100D8  stw r22,216(r1)
+	 129: GETL       	R22, t100
+	 130: GETL       	R1, t102
+	 131: ADDL       	$0xD8, t102
+	 132: STL       	t100, (t102)
+	 133: INCEIPL       	$4
+
+	0x2547681C:  7C962378  or r22,r4,r4
+	 134: GETL       	R4, t104
+	 135: PUTL       	t104, R22
+	 136: INCEIPL       	$4
+
+	0x25476820:  934100E8  stw r26,232(r1)
+	 137: GETL       	R26, t106
+	 138: GETL       	R1, t108
+	 139: ADDL       	$0xE8, t108
+	 140: STL       	t106, (t108)
+	 141: INCEIPL       	$4
+
+	0x25476824:  3B40FFFF  li r26,-1
+	 142: MOVL       	$0xFFFFFFFF, t110
+	 143: PUTL       	t110, R26
+	 144: INCEIPL       	$4
+
+	0x25476828:  936100EC  stw r27,236(r1)
+	 145: GETL       	R27, t112
+	 146: GETL       	R1, t114
+	 147: ADDL       	$0xEC, t114
+	 148: STL       	t112, (t114)
+	 149: INCEIPL       	$4
+
+	0x2547682C:  2E1AFFFF  cmpi cr4,r26,-1
+	 150: GETL       	R26, t116
+	 151: MOVL       	$0xFFFFFFFF, t120
+	 152: CMPL       	t116, t120, t118  (-rSo)
+	 153: ICRFL       	t118, $0x4, CR
+	 154: INCEIPL       	$4
+
+	0x25476830:  93A100F4  stw r29,244(r1)
+	 155: GETL       	R29, t122
+	 156: GETL       	R1, t124
+	 157: ADDL       	$0xF4, t124
+	 158: STL       	t122, (t124)
+	 159: INCEIPL       	$4
+
+	0x25476834:  928100D0  stw r20,208(r1)
+	 160: GETL       	R20, t126
+	 161: GETL       	R1, t128
+	 162: ADDL       	$0xD0, t128
+	 163: STL       	t126, (t128)
+	 164: INCEIPL       	$4
+
+	0x25476838:  92E100DC  stw r23,220(r1)
+	 165: GETL       	R23, t130
+	 166: GETL       	R1, t132
+	 167: ADDL       	$0xDC, t132
+	 168: STL       	t130, (t132)
+	 169: INCEIPL       	$4
+
+	0x2547683C:  930100E0  stw r24,224(r1)
+	 170: GETL       	R24, t134
+	 171: GETL       	R1, t136
+	 172: ADDL       	$0xE0, t136
+	 173: STL       	t134, (t136)
+	 174: INCEIPL       	$4
+
+	0x25476840:  932100E4  stw r25,228(r1)
+	 175: GETL       	R25, t138
+	 176: GETL       	R1, t140
+	 177: ADDL       	$0xE4, t140
+	 178: STL       	t138, (t140)
+	 179: INCEIPL       	$4
+
+	0x25476844:  938100F0  stw r28,240(r1)
+	 180: GETL       	R28, t142
+	 181: GETL       	R1, t144
+	 182: ADDL       	$0xF0, t144
+	 183: STL       	t142, (t144)
+	 184: INCEIPL       	$4
+
+	0x25476848:  7D41496E  stwux r10,r1,r9
+	 185: GETL       	R9, t146
+	 186: GETL       	R1, t148
+	 187: ADDL       	t148, t146
+	 188: PUTL       	t146, R1
+	 189: GETL       	R10, t150
+	 190: STL       	t150, (t146)
+	 191: INCEIPL       	$4
+
+	0x2547684C:  38810017  addi r4,r1,23
+	 192: GETL       	R1, t152
+	 193: ADDL       	$0x17, t152
+	 194: PUTL       	t152, R4
+	 195: INCEIPL       	$4
+
+	0x25476850:  83B50000  lwz r29,0(r21)
+	 196: GETL       	R21, t154
+	 197: LDL       	(t154), t156
+	 198: PUTL       	t156, R29
+	 199: INCEIPL       	$4
+
+	0x25476854:  81DE04F4  lwz r14,1268(r30)
+	 200: GETL       	R30, t158
+	 201: ADDL       	$0x4F4, t158
+	 202: LDL       	(t158), t160
+	 203: PUTL       	t160, R14
+	 204: INCEIPL       	$4
+
+	0x25476858:  549B0036  rlwinm r27,r4,0,0,27
+	 205: GETL       	R4, t162
+	 206: ANDL       	$0xFFFFFFF0, t162
+	 207: PUTL       	t162, R27
+	 208: INCEIPL       	$4
+
+	0x2547685C:  90BF0080  stw r5,128(r31)
+	 209: GETL       	R5, t164
+	 210: GETL       	R31, t166
+	 211: ADDL       	$0x80, t166
+	 212: STL       	t164, (t166)
+	 213: INCEIPL       	$4
+
+	0x25476860:  90FF0088  stw r7,136(r31)
+	 214: GETL       	R7, t168
+	 215: GETL       	R31, t170
+	 216: ADDL       	$0x88, t170
+	 217: STL       	t168, (t170)
+	 218: INCEIPL       	$4
+
+	0x25476864:  901F008C  stw r0,140(r31)
+	 219: GETL       	R0, t172
+	 220: GETL       	R31, t174
+	 221: ADDL       	$0x8C, t174
+	 222: STL       	t172, (t174)
+	 223: INCEIPL       	$4
+
+	0x25476868:  806E0000  lwz r3,0(r14)
+	 224: GETL       	R14, t176
+	 225: LDL       	(t176), t178
+	 226: PUTL       	t178, R3
+	 227: INCEIPL       	$4
+
+	0x2547686C:  3A600000  li r19,0
+	 228: MOVL       	$0x0, t180
+	 229: PUTL       	t180, R19
+	 230: INCEIPL       	$4
+
+	0x25476870:  3AE00000  li r23,0
+	 231: MOVL       	$0x0, t182
+	 232: PUTL       	t182, R23
+	 233: INCEIPL       	$4
+
+	0x25476874:  70690001  andi. r9,r3,0x1
+	 234: GETL       	R3, t184
+	 235: ANDL       	$0x1, t184
+	 236: PUTL       	t184, R9
+	 237: CMP0L       	t184, t186  (-rSo)
+	 238: ICRFL       	t186, $0x0, CR
+	 239: INCEIPL       	$4
+
+	0x25476878:  4082025C  bc 4,2,0x25476AD4
+	 240: Jc02o       	$0x25476AD4
+
+
+. 0 254767A0 220
+. 93 C1 00 F8 7F C8 02 A6 92 41 00 C8 92 61 00 CC 7D 20 00 26 90 01 01 04 38 00 00 00 82 5E 01 5C 82 7E 01 58 92 01 00 C0 92 21 00 C4 82 13 00 00 82 32 00 00 7D 12 43 78 91 E1 00 BC 7D F0 8A 14 91 C1 00 B8 93 E1 00 FC 7D CF 22 14 7C 3F 0B 78 39 8E 00 1E 90 DF 00 84 55 8B 00 36 81 41 00 00 7C 71 1B 78 92 A1 00 D4 39 E0 00 00 91 21 00 B4 7D 2B 00 D0 82 A6 00 00 92 C1 00 D8 7C 96 23 78 93 41 00 E8 3B 40 FF FF 93 61 00 EC 2E 1A FF FF 93 A1 00 F4 92 81 00 D0 92 E1 00 DC 93 01 00 E0 93 21 00 E4 93 81 00 F0 7D 41 49 6E 38 81 00 17 83 B5 00 00 81 DE 04 F4 54 9B 00 36 90 BF 00 80 90 FF 00 88 90 1F 00 8C 80 6E 00 00 3A 60 00 00 3A E0 00 00 70 69 00 01 40 82 02 5C
+
+==== BB 668 (0x2547687C) approx BBs exec'd 0 ====
+
+	0x2547687C:  809D000C  lwz r4,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25476880:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25476884:  80BD0010  lwz r5,16(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x25476888:  3B800000  li r28,0
+	  13: MOVL       	$0x0, t10
+	  14: PUTL       	t10, R28
+	  15: INCEIPL       	$4
+
+	0x2547688C:  4800D195  bl 0x25483A20
+	  16: MOVL       	$0x25476890, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0x25483A20  ($4)
+
+
+. 0 2547687C 20
+. 80 9D 00 0C 7F 63 DB 78 80 BD 00 10 3B 80 00 00 48 00 D1 95
+
+==== BB 669 (0x25476890) approx BBs exec'd 0 ====
+
+	0x25476890:  7C781B78  or r24,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x25476894:  409201A0  bc 4,18,0x25476A34
+	   3: Jc18o       	$0x25476A34
+
+
+. 0 25476890 8
+. 7C 78 1B 78 40 92 01 A0
+
+==== BB 670 (0x25476898) approx BBs exec'd 0 ====
+
+	0x25476898:  833E0154  lwz r25,340(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x2547689C:  81990000  lwz r12,0(r25)
+	   5: GETL       	R25, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0x254768A0:  7F9C6040  cmpl cr7,r28,r12
+	   9: GETL       	R28, t8
+	  10: GETL       	R12, t10
+	  11: CMPUL       	t8, t10, t12  (-rSo)
+	  12: ICRFL       	t12, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x254768A4:  409C014C  bc 4,28,0x254769F0
+	  14: Jc28o       	$0x254769F0
+
+
+. 0 25476898 16
+. 83 3E 01 54 81 99 00 00 7F 9C 60 40 40 9C 01 4C
+
+==== BB 671 (0x254768A8) approx BBs exec'd 0 ====
+
+	0x254768A8:  817E0228  lwz r11,552(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x228, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254768AC:  3A800002  li r20,2
+	   5: MOVL       	$0x2, t4
+	   6: PUTL       	t4, R20
+	   7: INCEIPL       	$4
+
+	0x254768B0:  820B0000  lwz r16,0(r11)
+	   8: GETL       	R11, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R16
+	  11: INCEIPL       	$4
+
+	0x254768B4:  4800001C  b 0x254768D0
+	  12: JMPo       	$0x254768D0  ($4)
+
+
+. 0 254768A8 16
+. 81 7E 02 28 3A 80 00 02 82 0B 00 00 48 00 00 1C
+
+==== BB 672 (0x254768D0) approx BBs exec'd 0 ====
+
+	0x254768D0:  80DD0014  lwz r6,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254768D4:  2C860001  cmpi cr1,r6,1
+	   5: GETL       	R6, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x254768D8:  41A6FFE0  bc 13,6,0x254768B8
+	  10: Js06o       	$0x254768B8
+
+
+. 0 254768D0 12
+. 80 DD 00 14 2C 86 00 01 41 A6 FF E0
+
+==== BB 673 (0x254768DC) approx BBs exec'd 0 ====
+
+	0x254768DC:  813E0160  lwz r9,352(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x160, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x254768E0:  578A1838  rlwinm r10,r28,3,0,28
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x3, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x254768E4:  7F03C378  or r3,r24,r24
+	   9: GETL       	R24, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x254768E8:  80A90000  lwz r5,0(r9)
+	  12: GETL       	R9, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0x254768EC:  7E6A2A14  add r19,r10,r5
+	  16: GETL       	R10, t12
+	  17: GETL       	R5, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R19
+	  20: INCEIPL       	$4
+
+	0x254768F0:  7C8A282E  lwzx r4,r10,r5
+	  21: GETL       	R5, t16
+	  22: GETL       	R10, t18
+	  23: ADDL       	t18, t16
+	  24: LDL       	(t16), t20
+	  25: PUTL       	t20, R4
+	  26: INCEIPL       	$4
+
+	0x254768F4:  80B30004  lwz r5,4(r19)
+	  27: GETL       	R19, t22
+	  28: ADDL       	$0x4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R5
+	  31: INCEIPL       	$4
+
+	0x254768F8:  4800D129  bl 0x25483A20
+	  32: MOVL       	$0x254768FC, t26
+	  33: PUTL       	t26, LR
+	  34: JMPo-c       	$0x25483A20  ($4)
+
+
+. 0 254768DC 32
+. 81 3E 01 60 57 8A 18 38 7F 03 C3 78 80 A9 00 00 7E 6A 2A 14 7C 8A 28 2E 80 B3 00 04 48 00 D1 29
+
+==== BB 674 (0x254768FC) approx BBs exec'd 0 ====
+
+	0x254768FC:  7E248B78  or r4,r17,r17
+	   0: GETL       	R17, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25476900:  7EC5B378  or r5,r22,r22
+	   3: GETL       	R22, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25476904:  4800D11D  bl 0x25483A20
+	   6: MOVL       	$0x25476908, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483A20  ($4)
+
+
+. 0 254768FC 12
+. 7E 24 8B 78 7E C5 B3 78 48 00 D1 1D
+
+==== BB 675 (0x25476908) approx BBs exec'd 0 ====
+
+	0x25476908:  834E0000  lwz r26,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R26
+	   3: INCEIPL       	$4
+
+	0x2547690C:  7E7B1850  subf r19,r27,r3
+	   4: GETL       	R27, t4
+	   5: GETL       	R3, t6
+	   6: SUBL       	t4, t6
+	   7: PUTL       	t6, R19
+	   8: INCEIPL       	$4
+
+	0x25476910:  73490001  andi. r9,r26,0x1
+	   9: GETL       	R26, t8
+	  10: ANDL       	$0x1, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25476914:  408201AC  bc 4,2,0x25476AC0
+	  15: Jc02o       	$0x25476AC0
+
+
+. 0 25476908 16
+. 83 4E 00 00 7E 7B 18 50 73 49 00 01 40 82 01 AC
+
+==== BB 676 (0x25476918) approx BBs exec'd 0 ====
+
+	0x25476918:  7F63DB78  or r3,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547691C:  7E449378  or r4,r18,r18
+	   3: GETL       	R18, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x25476920:  4BFFFB25  bl 0x25476444
+	   6: MOVL       	$0x25476924, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25476444  ($4)
+
+
+. 0 25476918 12
+. 7F 63 DB 78 7E 44 93 78 4B FF FB 25
+
+==== BB 677 (0x25481FEC) approx BBs exec'd 0 ====
+
+	0x25481FEC:  4BFFF344  b 0x25481330
+	   0: JMPo       	$0x25481330  ($4)
+
+
+. 0 25481FEC 4
+. 4B FF F3 44
+
+==== BB 678 (0x25476924) approx BBs exec'd 0 ====
+
+	0x25476924:  801D0014  lwz r0,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25476928:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547692C:  2E03FFFF  cmpi cr4,r3,-1
+	   8: GETL       	R3, t6
+	   9: MOVL       	$0xFFFFFFFF, t10
+	  10: CMPL       	t6, t10, t8  (-rSo)
+	  11: ICRFL       	t8, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0x25476930:  2F000000  cmpi cr6,r0,0
+	  13: GETL       	R0, t12
+	  14: CMP0L       	t12, t14  (-rSo)
+	  15: ICRFL       	t14, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25476934:  409A000C  bc 4,26,0x25476940
+	  17: Jc26o       	$0x25476940
+
+
+. 0 25476924 20
+. 80 1D 00 14 7C 7A 1B 78 2E 03 FF FF 2F 00 00 00 40 9A 00 0C
+
+==== BB 679 (0x25476938) approx BBs exec'd 0 ====
+
+	0x25476938:  41920078  bc 12,18,0x254769B0
+	   0: Js18o       	$0x254769B0
+
+
+. 0 25476938 4
+. 41 92 00 78
+
+==== BB 680 (0x254769B0) approx BBs exec'd 0 ====
+
+	0x254769B0:  7C769850  subf r3,r22,r19
+	   0: GETL       	R22, t0
+	   1: GETL       	R19, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254769B4:  7F64DB78  or r4,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x254769B8:  7CFB1A14  add r7,r27,r3
+	   8: GETL       	R27, t6
+	   9: GETL       	R3, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0x254769BC:  38BF0010  addi r5,r31,16
+	  13: GETL       	R31, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0x254769C0:  9807FFFF  stb r0,-1(r7)
+	  17: GETL       	R0, t12
+	  18: GETL       	R7, t14
+	  19: ADDL       	$0xFFFFFFFF, t14
+	  20: STB       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0x254769C4:  38600003  li r3,3
+	  22: MOVL       	$0x3, t16
+	  23: PUTL       	t16, R3
+	  24: INCEIPL       	$4
+
+	0x254769C8:  4800B3A1  bl 0x25481D68
+	  25: MOVL       	$0x254769CC, t18
+	  26: PUTL       	t18, LR
+	  27: JMPo-c       	$0x25481D68  ($4)
+
+
+. 0 254769B0 28
+. 7C 76 98 50 7F 64 DB 78 7C FB 1A 14 38 BF 00 10 98 07 FF FF 38 60 00 03 48 00 B3 A1
+
+==== BB 681 __GI___xstat64(0x25481D68) approx BBs exec'd 0 ====
+
+	0x25481D68:  9421FF80  stwu r1,-128(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF80, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25481D6C:  7CC802A6  mflr r6
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x25481D70:  48015291  bl 0x25497000
+	   9: MOVL       	$0x25481D74, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25481D68 12
+. 94 21 FF 80 7C C8 02 A6 48 01 52 91
+
+==== BB 682 (0x25481D74) approx BBs exec'd 0 ====
+
+	0x25481D74:  93C10078  stw r30,120(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x78, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25481D78:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25481D7C:  9361006C  stw r27,108(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x6C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25481D80:  90C10084  stw r6,132(r1)
+	  13: GETL       	R6, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x84, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25481D84:  93210064  stw r25,100(r1)
+	  18: GETL       	R25, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x64, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25481D88:  7C791B78  or r25,r3,r3
+	  23: GETL       	R3, t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x25481D8C:  837E04EC  lwz r27,1260(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4EC, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0x25481D90:  93810070  stw r28,112(r1)
+	  31: GETL       	R28, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x70, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x25481D94:  7CBC2B78  or r28,r5,r5
+	  36: GETL       	R5, t28
+	  37: PUTL       	t28, R28
+	  38: INCEIPL       	$4
+
+	0x25481D98:  801B0000  lwz r0,0(r27)
+	  39: GETL       	R27, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x25481D9C:  93E1007C  stw r31,124(r1)
+	  43: GETL       	R31, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x7C, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25481DA0:  7C9F2378  or r31,r4,r4
+	  48: GETL       	R4, t38
+	  49: PUTL       	t38, R31
+	  50: INCEIPL       	$4
+
+	0x25481DA4:  2F800000  cmpi cr7,r0,0
+	  51: GETL       	R0, t40
+	  52: CMP0L       	t40, t42  (-rSo)
+	  53: ICRFL       	t42, $0x7, CR
+	  54: INCEIPL       	$4
+
+	0x25481DA8:  93410068  stw r26,104(r1)
+	  55: GETL       	R26, t44
+	  56: GETL       	R1, t46
+	  57: ADDL       	$0x68, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25481DAC:  93A10074  stw r29,116(r1)
+	  60: GETL       	R29, t48
+	  61: GETL       	R1, t50
+	  62: ADDL       	$0x74, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x25481DB0:  409E0048  bc 4,30,0x25481DF8
+	  65: Jc30o       	$0x25481DF8
+
+
+. 0 25481D74 64
+. 93 C1 00 78 7F C8 02 A6 93 61 00 6C 90 C1 00 84 93 21 00 64 7C 79 1B 78 83 7E 04 EC 93 81 00 70 7C BC 2B 78 80 1B 00 00 93 E1 00 7C 7C 9F 23 78 2F 80 00 00 93 41 00 68 93 A1 00 74 40 9E 00 48
+
+==== BB 683 (0x25481DB4) approx BBs exec'd 0 ====
+
+	0x25481DB4:  83BE0514  lwz r29,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25481DB8:  7C832378  or r3,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25481DBC:  380000C3  li r0,195
+	   8: MOVL       	$0xC3, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x25481DC0:  7CA42B78  or r4,r5,r5
+	  11: GETL       	R5, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25481DC4:  835D0000  lwz r26,0(r29)
+	  14: GETL       	R29, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R26
+	  17: INCEIPL       	$4
+
+	0x25481DC8:  44000002  sc
+	  18: JMPo-sys       	$0x25481DCC  ($4)
+
+
+. 0 25481DB4 24
+. 83 BE 05 14 7C 83 23 78 38 00 00 C3 7C A4 2B 78 83 5D 00 00 44 00 00 02
+
+==== BB 684 (0x25481DCC) approx BBs exec'd 0 ====
+
+	0x25481DCC:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25481DD0:  74091000  andis. r9,r0,0x1000
+	   3: GETL       	R0, t2
+	   4: ANDL       	$0x10000000, t2
+	   5: PUTL       	t2, R9
+	   6: CMP0L       	t2, t4  (-rSo)
+	   7: ICRFL       	t4, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25481DD4:  408200C4  bc 4,2,0x25481E98
+	   9: Jc02o       	$0x25481E98
+
+
+. 0 25481DCC 12
+. 7C 00 00 26 74 09 10 00 40 82 00 C4
+
+==== BB 685 (0x25481E98) approx BBs exec'd 0 ====
+
+	0x25481E98:  907D0000  stw r3,0(r29)
+	   0: GETL       	R3, t0
+	   1: GETL       	R29, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25481E9C:  3860FFFF  li r3,-1
+	   4: MOVL       	$0xFFFFFFFF, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25481EA0:  4BFFFF38  b 0x25481DD8
+	   7: JMPo       	$0x25481DD8  ($4)
+
+
+. 0 25481E98 12
+. 90 7D 00 00 38 60 FF FF 4B FF FF 38
+
+==== BB 686 (0x25481DD8) approx BBs exec'd 0 ====
+
+	0x25481DD8:  2C83FFFF  cmpi cr1,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25481DDC:  40A60044  bc 5,6,0x25481E20
+	   5: Jc06o       	$0x25481E20
+
+
+. 0 25481DD8 8
+. 2C 83 FF FF 40 A6 00 44
+
+==== BB 687 (0x25481DE0) approx BBs exec'd 0 ====
+
+	0x25481DE0:  809D0000  lwz r4,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25481DE4:  2F040026  cmpi cr6,r4,38
+	   4: GETL       	R4, t4
+	   5: MOVL       	$0x26, t8
+	   6: CMPL       	t4, t8, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25481DE8:  40BA0038  bc 5,26,0x25481E20
+	   9: Jc26o       	$0x25481E20
+
+
+. 0 25481DE0 12
+. 80 9D 00 00 2F 04 00 26 40 BA 00 38
+
+==== BB 688 (0x25481E20) approx BBs exec'd 0 ====
+
+	0x25481E20:  80A10084  lwz r5,132(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25481E24:  83210064  lwz r25,100(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x64, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x25481E28:  83410068  lwz r26,104(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x68, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25481E2C:  7CA803A6  mtlr r5
+	  15: GETL       	R5, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x25481E30:  8361006C  lwz r27,108(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x6C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R27
+	  22: INCEIPL       	$4
+
+	0x25481E34:  83810070  lwz r28,112(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x70, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0x25481E38:  83A10074  lwz r29,116(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x74, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R29
+	  32: INCEIPL       	$4
+
+	0x25481E3C:  83C10078  lwz r30,120(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x78, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R30
+	  37: INCEIPL       	$4
+
+	0x25481E40:  83E1007C  lwz r31,124(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x7C, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R31
+	  42: INCEIPL       	$4
+
+	0x25481E44:  38210080  addi r1,r1,128
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x80, t34
+	  45: PUTL       	t34, R1
+	  46: INCEIPL       	$4
+
+	0x25481E48:  4E800020  blr
+	  47: GETL       	LR, t36
+	  48: JMPo-r       	t36  ($4)
+
+
+. 0 25481E20 44
+. 80 A1 00 84 83 21 00 64 83 41 00 68 7C A8 03 A6 83 61 00 6C 83 81 00 70 83 A1 00 74 83 C1 00 78 83 E1 00 7C 38 21 00 80 4E 80 00 20
+
+==== BB 689 (0x254769CC) approx BBs exec'd 0 ====
+
+	0x254769CC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x254769D0:  409E0014  bc 4,30,0x254769E4
+	   4: Jc30o       	$0x254769E4
+
+
+. 0 254769CC 8
+. 2F 83 00 00 40 9E 00 14
+
+==== BB 690 (0x254769E4) approx BBs exec'd 0 ====
+
+	0x254769E4:  38800001  li r4,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254769E8:  909D0014  stw r4,20(r29)
+	   3: GETL       	R4, t2
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x14, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254769EC:  4BFFFF54  b 0x25476940
+	   8: JMPo       	$0x25476940  ($4)
+
+
+. 0 254769E4 12
+. 38 80 00 01 90 9D 00 14 4B FF FF 54
+
+==== BB 691 (0x25476940) approx BBs exec'd 0 ====
+
+	0x25476940:  817D0014  lwz r11,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25476944:  696C0002  xori r12,r11,0x2
+	   5: GETL       	R11, t4
+	   6: XORL       	$0x2, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25476948:  212C0000  subfic r9,r12,0
+	   9: GETL       	R12, t6
+	  10: MOVL       	$0x0, t8
+	  11: SBBL       	t6, t8  (-wCa)
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0x2547694C:  7D896114  adde r12,r9,r12
+	  14: GETL       	R9, t10
+	  15: GETL       	R12, t12
+	  16: ADCL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0x25476950:  7EF76378  or r23,r23,r12
+	  19: GETL       	R23, t14
+	  20: GETL       	R12, t16
+	  21: ORL       	t16, t14
+	  22: PUTL       	t14, R23
+	  23: INCEIPL       	$4
+
+	0x25476954:  41B2FF64  bc 13,18,0x254768B8
+	  24: Js18o       	$0x254768B8
+
+
+. 0 25476940 24
+. 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+
+==== BB 692 (0x254768B8) approx BBs exec'd 0 ====
+
+	0x254768B8:  3B9C0001  addi r28,r28,1
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x254768BC:  3BBD0004  addi r29,r29,4
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x4, t2
+	   6: PUTL       	t2, R29
+	   7: INCEIPL       	$4
+
+	0x254768C0:  40920174  bc 4,18,0x25476A34
+	   8: Jc18o       	$0x25476A34
+
+
+. 0 254768B8 12
+. 3B 9C 00 01 3B BD 00 04 40 92 01 74
+
+==== BB 693 (0x254768C4) approx BBs exec'd 0 ====
+
+	0x254768C4:  81390000  lwz r9,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x254768C8:  7C9C4840  cmpl cr1,r28,r9
+	   4: GETL       	R28, t4
+	   5: GETL       	R9, t6
+	   6: CMPUL       	t4, t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254768CC:  40840124  bc 4,4,0x254769F0
+	   9: Jc04o       	$0x254769F0
+
+
+. 0 254768C4 12
+. 81 39 00 00 7C 9C 48 40 40 84 01 24
+
+==== BB 694 (0x254769D4) approx BBs exec'd 0 ====
+
+	0x254769D4:  811F0020  lwz r8,32(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254769D8:  55000426  rlwinm r0,r8,0,16,19
+	   5: GETL       	R8, t4
+	   6: ANDL       	$0xF000, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254769DC:  2C804000  cmpi cr1,r0,16384
+	   9: GETL       	R0, t6
+	  10: MOVL       	$0x4000, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x254769E0:  41A6FF5C  bc 13,6,0x2547693C
+	  14: Js06o       	$0x2547693C
+
+
+. 0 254769D4 16
+. 81 1F 00 20 55 00 04 26 2C 80 40 00 41 A6 FF 5C
+
+==== BB 695 (0x2547693C) approx BBs exec'd 0 ====
+
+	0x2547693C:  929D0014  stw r20,20(r29)
+	   0: GETL       	R20, t0
+	   1: GETL       	R29, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476940:  817D0014  lwz r11,20(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25476944:  696C0002  xori r12,r11,0x2
+	  10: GETL       	R11, t8
+	  11: XORL       	$0x2, t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0x25476948:  212C0000  subfic r9,r12,0
+	  14: GETL       	R12, t10
+	  15: MOVL       	$0x0, t12
+	  16: SBBL       	t10, t12  (-wCa)
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0x2547694C:  7D896114  adde r12,r9,r12
+	  19: GETL       	R9, t14
+	  20: GETL       	R12, t16
+	  21: ADCL       	t14, t16  (-rCa-wCa)
+	  22: PUTL       	t16, R12
+	  23: INCEIPL       	$4
+
+	0x25476950:  7EF76378  or r23,r23,r12
+	  24: GETL       	R23, t18
+	  25: GETL       	R12, t20
+	  26: ORL       	t20, t18
+	  27: PUTL       	t18, R23
+	  28: INCEIPL       	$4
+
+	0x25476954:  41B2FF64  bc 13,18,0x254768B8
+	  29: Js18o       	$0x254768B8
+
+
+. 0 2547693C 28
+. 92 9D 00 14 81 7D 00 14 69 6C 00 02 21 2C 00 00 7D 89 61 14 7E F7 63 78 41 B2 FF 64
+
+==== BB 696 (0x254769F0) approx BBs exec'd 0 ====
+
+	0x254769F0:  40920044  bc 4,18,0x25476A34
+	   0: Jc18o       	$0x25476A34
+
+
+. 0 254769F0 4
+. 40 92 00 44
+
+==== BB 697 (0x254769F4) approx BBs exec'd 0 ====
+
+	0x254769F4:  2F170000  cmpi cr6,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254769F8:  419A001C  bc 12,26,0x25476A14
+	   4: Js26o       	$0x25476A14
+
+
+. 0 254769F4 8
+. 2F 17 00 00 41 9A 00 1C
+
+==== BB 698 (0x254769FC) approx BBs exec'd 0 ====
+
+	0x254769FC:  83BE0514  lwz r29,1300(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x514, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25476A00:  813D0000  lwz r9,0(r29)
+	   5: GETL       	R29, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x25476A04:  2F890002  cmpi cr7,r9,2
+	   9: GETL       	R9, t8
+	  10: MOVL       	$0x2, t12
+	  11: CMPL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25476A08:  419E000C  bc 12,30,0x25476A14
+	  14: Js30o       	$0x25476A14
+
+
+. 0 254769FC 16
+. 83 BE 05 14 81 3D 00 00 2F 89 00 02 41 9E 00 0C
+
+==== BB 699 (0x25476A14) approx BBs exec'd 0 ====
+
+	0x25476A14:  87B50004  lwzu r29,4(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R21
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x25476A18:  7DEFBB78  or r15,r15,r23
+	   6: GETL       	R15, t4
+	   7: GETL       	R23, t6
+	   8: ORL       	t6, t4
+	   9: PUTL       	t4, R15
+	  10: INCEIPL       	$4
+
+	0x25476A1C:  2C9D0000  cmpi cr1,r29,0
+	  11: GETL       	R29, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x25476A20:  4086FE48  bc 4,6,0x25476868
+	  15: Jc06o       	$0x25476868
+
+
+. 0 25476A14 16
+. 87 B5 00 04 7D EF BB 78 2C 9D 00 00 40 86 FE 48
+
+==== BB 700 (0x25476868) approx BBs exec'd 0 ====
+
+	0x25476868:  806E0000  lwz r3,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x2547686C:  3A600000  li r19,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R19
+	   6: INCEIPL       	$4
+
+	0x25476870:  3AE00000  li r23,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R23
+	   9: INCEIPL       	$4
+
+	0x25476874:  70690001  andi. r9,r3,0x1
+	  10: GETL       	R3, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R9
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25476878:  4082025C  bc 4,2,0x25476AD4
+	  16: Jc02o       	$0x25476AD4
+
+
+. 0 25476868 20
+. 80 6E 00 00 3A 60 00 00 3A E0 00 00 70 69 00 01 40 82 02 5C
+
+==== BB 701 (0x25483DE0) approx BBs exec'd 0 ====
+
+	0x25483DE0:  80040000  lwz r0,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25483DE4:  3884FFF8  addi r4,r4,-8
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFF8, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25483DE8:  8124000C  lwz r9,12(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x25483DEC:  3863FFF4  addi r3,r3,-12
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFF4, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x25483DF0:  38A50002  addi r5,r5,2
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x2, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0x25483DF4:  9003000C  stw r0,12(r3)
+	  21: GETL       	R0, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0xC, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x25483DF8:  4BFFFF34  b 0x25483D2C
+	  26: JMPo       	$0x25483D2C  ($4)
+
+
+. 0 25483DE0 28
+. 80 04 00 00 38 84 FF F8 81 24 00 0C 38 63 FF F4 38 A5 00 02 90 03 00 0C 4B FF FF 34
+
+==== BB 702 (0x25483D2C) approx BBs exec'd 0 ====
+
+	0x25483D2C:  80040010  lwz r0,16(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25483D30:  91230010  stw r9,16(r3)
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x10, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	  15: GETL       	R0, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x14, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  20: GETL       	R4, t16
+	  21: ADDL       	$0x18, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  25: GETL       	R9, t20
+	  26: GETL       	R3, t22
+	  27: ADDL       	$0x18, t22
+	  28: STL       	t20, (t22)
+	  29: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  30: GETL       	R5, t24
+	  31: ADCL       	$0xFFFFFFF8, t24  (-wCa)
+	  32: PUTL       	t24, R5
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x0, CR
+	  35: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  36: GETL       	R4, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R9
+	  40: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  41: GETL       	R0, t32
+	  42: GETL       	R3, t34
+	  43: ADDL       	$0x1C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  46: GETL       	R4, t36
+	  47: ADDL       	$0x20, t36
+	  48: PUTL       	t36, R4
+	  49: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  50: GETL       	R3, t38
+	  51: ADDL       	$0x20, t38
+	  52: PUTL       	t38, R3
+	  53: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  54: Jc02o       	$0x25483D0C
+
+
+. 0 25483D2C 48
+. 80 04 00 10 91 23 00 10 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+==== BB 703 (0x25483DC4) approx BBs exec'd 0 ====
+
+	0x25483DC4:  81240000  lwz r9,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25483DC8:  3884FFF4  addi r4,r4,-12
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0xFFFFFFF4, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25483DCC:  80040010  lwz r0,16(r4)
+	   8: GETL       	R4, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x25483DD0:  3863FFF0  addi r3,r3,-16
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0xFFFFFFF0, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x25483DD4:  38A50003  addi r5,r5,3
+	  17: GETL       	R5, t12
+	  18: ADDL       	$0x3, t12
+	  19: PUTL       	t12, R5
+	  20: INCEIPL       	$4
+
+	0x25483DD8:  91230010  stw r9,16(r3)
+	  21: GETL       	R9, t14
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x10, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x25483DDC:  4BFFFF58  b 0x25483D34
+	  26: JMPo       	$0x25483D34  ($4)
+
+
+. 0 25483DC4 28
+. 81 24 00 00 38 84 FF F4 80 04 00 10 38 63 FF F0 38 A5 00 03 91 23 00 10 4B FF FF 58
+
+==== BB 704 (0x25483D34) approx BBs exec'd 0 ====
+
+	0x25483D34:  81240014  lwz r9,20(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25483D38:  90030014  stw r0,20(r3)
+	   5: GETL       	R0, t4
+	   6: GETL       	R3, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483D3C:  80040018  lwz r0,24(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25483D40:  91230018  stw r9,24(r3)
+	  15: GETL       	R9, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x18, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25483D44:  34A5FFF8  addic. r5,r5,-8
+	  20: GETL       	R5, t16
+	  21: ADCL       	$0xFFFFFFF8, t16  (-wCa)
+	  22: PUTL       	t16, R5
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x25483D48:  8124001C  lwz r9,28(r4)
+	  26: GETL       	R4, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x25483D4C:  9003001C  stw r0,28(r3)
+	  31: GETL       	R0, t24
+	  32: GETL       	R3, t26
+	  33: ADDL       	$0x1C, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x25483D50:  38840020  addi r4,r4,32
+	  36: GETL       	R4, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x25483D54:  38630020  addi r3,r3,32
+	  40: GETL       	R3, t30
+	  41: ADDL       	$0x20, t30
+	  42: PUTL       	t30, R3
+	  43: INCEIPL       	$4
+
+	0x25483D58:  40A2FFB4  bc 5,2,0x25483D0C
+	  44: Jc02o       	$0x25483D0C
+
+
+. 0 25483D34 40
+. 81 24 00 14 90 03 00 14 80 04 00 18 91 23 00 18 34 A5 FF F8 81 24 00 1C 90 03 00 1C 38 84 00 20 38 63 00 20 40 A2 FF B4
+
+==== BB 705 (0x25476A24) approx BBs exec'd 0 ====
+
+	0x25476A24:  2E0F0000  cmpi cr4,r15,0
+	   0: GETL       	R15, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25476A28:  4192026C  bc 12,18,0x25476C94
+	   4: Js18o       	$0x25476C94
+
+
+. 0 25476A24 8
+. 2E 0F 00 00 41 92 02 6C
+
+==== BB 706 (0x25476A2C) approx BBs exec'd 0 ====
+
+	0x25476A2C:  3860FFFF  li r3,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476A30:  4800002C  b 0x25476A5C
+	   3: JMPo       	$0x25476A5C  ($4)
+
+
+. 0 25476A2C 8
+. 38 60 FF FF 48 00 00 2C
+
+==== BB 707 (0x25476A5C) approx BBs exec'd 0 ====
+
+	0x25476A5C:  81010000  lwz r8,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x25476A60:  81E80004  lwz r15,4(r8)
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R15
+	   8: INCEIPL       	$4
+
+	0x25476A64:  8088FFB4  lwz r4,-76(r8)
+	   9: GETL       	R8, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0x25476A68:  7DE803A6  mtlr r15
+	  14: GETL       	R15, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x25476A6C:  81C8FFB8  lwz r14,-72(r8)
+	  17: GETL       	R8, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0x25476A70:  81E8FFBC  lwz r15,-68(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0x25476A74:  7C808120  mtcrf 0x8,r4
+	  27: GETL       	R4, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x25476A78:  8208FFC0  lwz r16,-64(r8)
+	  30: GETL       	R8, t24
+	  31: ADDL       	$0xFFFFFFC0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R16
+	  34: INCEIPL       	$4
+
+	0x25476A7C:  8228FFC4  lwz r17,-60(r8)
+	  35: GETL       	R8, t28
+	  36: ADDL       	$0xFFFFFFC4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R17
+	  39: INCEIPL       	$4
+
+	0x25476A80:  8248FFC8  lwz r18,-56(r8)
+	  40: GETL       	R8, t32
+	  41: ADDL       	$0xFFFFFFC8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R18
+	  44: INCEIPL       	$4
+
+	0x25476A84:  8268FFCC  lwz r19,-52(r8)
+	  45: GETL       	R8, t36
+	  46: ADDL       	$0xFFFFFFCC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R19
+	  49: INCEIPL       	$4
+
+	0x25476A88:  8288FFD0  lwz r20,-48(r8)
+	  50: GETL       	R8, t40
+	  51: ADDL       	$0xFFFFFFD0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R20
+	  54: INCEIPL       	$4
+
+	0x25476A8C:  82A8FFD4  lwz r21,-44(r8)
+	  55: GETL       	R8, t44
+	  56: ADDL       	$0xFFFFFFD4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R21
+	  59: INCEIPL       	$4
+
+	0x25476A90:  82C8FFD8  lwz r22,-40(r8)
+	  60: GETL       	R8, t48
+	  61: ADDL       	$0xFFFFFFD8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R22
+	  64: INCEIPL       	$4
+
+	0x25476A94:  82E8FFDC  lwz r23,-36(r8)
+	  65: GETL       	R8, t52
+	  66: ADDL       	$0xFFFFFFDC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R23
+	  69: INCEIPL       	$4
+
+	0x25476A98:  8308FFE0  lwz r24,-32(r8)
+	  70: GETL       	R8, t56
+	  71: ADDL       	$0xFFFFFFE0, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R24
+	  74: INCEIPL       	$4
+
+	0x25476A9C:  8328FFE4  lwz r25,-28(r8)
+	  75: GETL       	R8, t60
+	  76: ADDL       	$0xFFFFFFE4, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R25
+	  79: INCEIPL       	$4
+
+	0x25476AA0:  8348FFE8  lwz r26,-24(r8)
+	  80: GETL       	R8, t64
+	  81: ADDL       	$0xFFFFFFE8, t64
+	  82: LDL       	(t64), t66
+	  83: PUTL       	t66, R26
+	  84: INCEIPL       	$4
+
+	0x25476AA4:  8368FFEC  lwz r27,-20(r8)
+	  85: GETL       	R8, t68
+	  86: ADDL       	$0xFFFFFFEC, t68
+	  87: LDL       	(t68), t70
+	  88: PUTL       	t70, R27
+	  89: INCEIPL       	$4
+
+	0x25476AA8:  8388FFF0  lwz r28,-16(r8)
+	  90: GETL       	R8, t72
+	  91: ADDL       	$0xFFFFFFF0, t72
+	  92: LDL       	(t72), t74
+	  93: PUTL       	t74, R28
+	  94: INCEIPL       	$4
+
+	0x25476AAC:  83A8FFF4  lwz r29,-12(r8)
+	  95: GETL       	R8, t76
+	  96: ADDL       	$0xFFFFFFF4, t76
+	  97: LDL       	(t76), t78
+	  98: PUTL       	t78, R29
+	  99: INCEIPL       	$4
+
+	0x25476AB0:  83C8FFF8  lwz r30,-8(r8)
+	 100: GETL       	R8, t80
+	 101: ADDL       	$0xFFFFFFF8, t80
+	 102: LDL       	(t80), t82
+	 103: PUTL       	t82, R30
+	 104: INCEIPL       	$4
+
+	0x25476AB4:  83E8FFFC  lwz r31,-4(r8)
+	 105: GETL       	R8, t84
+	 106: ADDL       	$0xFFFFFFFC, t84
+	 107: LDL       	(t84), t86
+	 108: PUTL       	t86, R31
+	 109: INCEIPL       	$4
+
+	0x25476AB8:  7D014378  or r1,r8,r8
+	 110: GETL       	R8, t88
+	 111: PUTL       	t88, R1
+	 112: INCEIPL       	$4
+
+	0x25476ABC:  4E800020  blr
+	 113: GETL       	LR, t90
+	 114: JMPo-r       	t90  ($4)
+
+
+. 0 25476A5C 100
+. 81 01 00 00 81 E8 00 04 80 88 FF B4 7D E8 03 A6 81 C8 FF B8 81 E8 FF BC 7C 80 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+==== BB 708 (0x25477238) approx BBs exec'd 0 ====
+
+	0x25477238:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x2547723C:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R26
+	   7: INCEIPL       	$4
+
+	0x25477240:  7D6E00D0  neg r11,r14
+	   8: GETL       	R14, t8
+	   9: NEGL       	t8
+	  10: PUTL       	t8, R11
+	  11: INCEIPL       	$4
+
+	0x25477244:  7CE00026  mfcr r7
+	  12: GETL       	CR, t10
+	  13: PUTL       	t10, R7
+	  14: INCEIPL       	$4
+
+	0x25477248:  54E79FFE  rlwinm r7,r7,19,31,31
+	  15: GETL       	R7, t12
+	  16: ROLL       	$0x13, t12
+	  17: ANDL       	$0x1, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x2547724C:  556E0FFE  rlwinm r14,r11,1,31,31
+	  20: GETL       	R11, t14
+	  21: SHRL       	$0x1F, t14
+	  22: PUTL       	t14, R14
+	  23: INCEIPL       	$4
+
+	0x25477250:  7CEB7039  and. r11,r7,r14
+	  24: GETL       	R7, t16
+	  25: GETL       	R14, t18
+	  26: ANDL       	t16, t18
+	  27: PUTL       	t18, R11
+	  28: CMP0L       	t18, t20  (-rSo)
+	  29: ICRFL       	t20, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0x25477254:  41820044  bc 12,2,0x25477298
+	  31: Js02o       	$0x25477298
+
+
+. 0 25477238 32
+. 2E 03 FF FF 7C 7A 1B 78 7D 6E 00 D0 7C E0 00 26 54 E7 9F FE 55 6E 0F FE 7C EB 70 39 41 82 00 44
+
+==== BB 709 (0x25477258) approx BBs exec'd 0 ====
+
+	0x25477258:  801B01E0  lwz r0,480(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x1E0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547725C:  3BBB01E0  addi r29,r27,480
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x1E0, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x25477260:  39200000  li r9,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x25477264:  2F00FFFF  cmpi cr6,r0,-1
+	  12: GETL       	R0, t8
+	  13: MOVL       	$0xFFFFFFFF, t12
+	  14: CMPL       	t8, t12, t10  (-rSo)
+	  15: ICRFL       	t10, $0x6, CR
+	  16: INCEIPL       	$4
+
+	0x25477268:  419A0028  bc 12,26,0x25477290
+	  17: Js26o       	$0x25477290
+
+
+. 0 25477258 20
+. 80 1B 01 E0 3B BB 01 E0 39 20 00 00 2F 00 FF FF 41 9A 00 28
+
+==== BB 710 (0x25477290) approx BBs exec'd 0 ====
+
+	0x25477290:  2C090000  cmpi cr0,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25477294:  408200DC  bc 4,2,0x25477370
+	   4: Jc02o       	$0x25477370
+
+
+. 0 25477290 8
+. 2C 09 00 00 40 82 00 DC
+
+==== BB 711 (0x25477298) approx BBs exec'd 0 ====
+
+	0x25477298:  4092008C  bc 4,18,0x25477324
+	   0: Jc18o       	$0x25477324
+
+
+. 0 25477298 4
+. 40 92 00 8C
+
+==== BB 712 (0x2547729C) approx BBs exec'd 0 ====
+
+	0x2547729C:  2F170000  cmpi cr6,r23,0
+	   0: GETL       	R23, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254772A0:  409A01E4  bc 4,26,0x25477484
+	   4: Jc26o       	$0x25477484
+
+
+. 0 2547729C 8
+. 2F 17 00 00 40 9A 01 E4
+
+==== BB 713 (0x254772A4) approx BBs exec'd 0 ====
+
+	0x254772A4:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254772A8:  48000949  bl 0x25477BF0
+	   3: MOVL       	$0x254772AC, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25477BF0  ($4)
+
+
+. 0 254772A4 8
+. 7F E3 FB 78 48 00 09 49
+
+==== BB 714 _dl_load_cache_lookup(0x25477BF0) approx BBs exec'd 0 ====
+
+	0x25477BF0:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25477BF4:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25477BF8:  4801F409  bl 0x25497000
+	   9: MOVL       	$0x25477BFC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25477BF0 12
+. 94 21 FF C0 7C 88 02 A6 48 01 F4 09
+
+==== BB 715 (0x25477BFC) approx BBs exec'd 0 ====
+
+	0x25477BFC:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25477C00:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25477C04:  9261000C  stw r19,12(r1)
+	   8: GETL       	R19, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25477C08:  90810044  stw r4,68(r1)
+	  13: GETL       	R4, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x44, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25477C0C:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0x25477C10:  92E1001C  stw r23,28(r1)
+	  21: GETL       	R23, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25477C14:  7C771B78  or r23,r3,r3
+	  26: GETL       	R3, t20
+	  27: PUTL       	t20, R23
+	  28: INCEIPL       	$4
+
+	0x25477C18:  827E04F4  lwz r19,1268(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4F4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R19
+	  33: INCEIPL       	$4
+
+	0x25477C1C:  92810010  stw r20,16(r1)
+	  34: GETL       	R20, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x10, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x25477C20:  80130000  lwz r0,0(r19)
+	  39: GETL       	R19, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x25477C24:  92A10014  stw r21,20(r1)
+	  43: GETL       	R21, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x14, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x25477C28:  70090001  andi. r9,r0,0x1
+	  48: GETL       	R0, t38
+	  49: ANDL       	$0x1, t38
+	  50: PUTL       	t38, R9
+	  51: CMP0L       	t38, t40  (-rSo)
+	  52: ICRFL       	t40, $0x0, CR
+	  53: INCEIPL       	$4
+
+	0x25477C2C:  92C10018  stw r22,24(r1)
+	  54: GETL       	R22, t42
+	  55: GETL       	R1, t44
+	  56: ADDL       	$0x18, t44
+	  57: STL       	t42, (t44)
+	  58: INCEIPL       	$4
+
+	0x25477C30:  93010020  stw r24,32(r1)
+	  59: GETL       	R24, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x20, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0x25477C34:  93210024  stw r25,36(r1)
+	  64: GETL       	R25, t50
+	  65: GETL       	R1, t52
+	  66: ADDL       	$0x24, t52
+	  67: STL       	t50, (t52)
+	  68: INCEIPL       	$4
+
+	0x25477C38:  93410028  stw r26,40(r1)
+	  69: GETL       	R26, t54
+	  70: GETL       	R1, t56
+	  71: ADDL       	$0x28, t56
+	  72: STL       	t54, (t56)
+	  73: INCEIPL       	$4
+
+	0x25477C3C:  9361002C  stw r27,44(r1)
+	  74: GETL       	R27, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x2C, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x25477C40:  93810030  stw r28,48(r1)
+	  79: GETL       	R28, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x30, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x25477C44:  93A10034  stw r29,52(r1)
+	  84: GETL       	R29, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x34, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0x25477C48:  93E1003C  stw r31,60(r1)
+	  89: GETL       	R31, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x3C, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0x25477C4C:  91810008  stw r12,8(r1)
+	  94: GETL       	R12, t74
+	  95: GETL       	R1, t76
+	  96: ADDL       	$0x8, t76
+	  97: STL       	t74, (t76)
+	  98: INCEIPL       	$4
+
+	0x25477C50:  4082037C  bc 4,2,0x25477FCC
+	  99: Jc02o       	$0x25477FCC
+
+
+. 0 25477BFC 88
+. 93 C1 00 38 7F C8 02 A6 92 61 00 0C 90 81 00 44 7D 80 00 26 92 E1 00 1C 7C 77 1B 78 82 7E 04 F4 92 81 00 10 80 13 00 00 92 A1 00 14 70 09 00 01 92 C1 00 18 93 01 00 20 93 21 00 24 93 41 00 28 93 61 00 2C 93 81 00 30 93 A1 00 34 93 E1 00 3C 91 81 00 08 40 82 03 7C
+
+==== BB 716 (0x25477C54) approx BBs exec'd 0 ====
+
+	0x25477C54:  82DE0254  lwz r22,596(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x254, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R22
+	   4: INCEIPL       	$4
+
+	0x25477C58:  80760000  lwz r3,0(r22)
+	   5: GETL       	R22, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0x25477C5C:  2F830000  cmpi cr7,r3,0
+	   9: GETL       	R3, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x25477C60:  419E00FC  bc 12,30,0x25477D5C
+	  13: Js30o       	$0x25477D5C
+
+
+. 0 25477C54 16
+. 82 DE 02 54 80 76 00 00 2F 83 00 00 41 9E 00 FC
+
+==== BB 717 (0x25477D5C) approx BBs exec'd 0 ====
+
+	0x25477D5C:  807E0260  lwz r3,608(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x260, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477D60:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25477D64:  809E025C  lwz r4,604(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x25C, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25477D68:  48004245  bl 0x2547BFAC
+	  13: MOVL       	$0x25477D6C, t10
+	  14: PUTL       	t10, LR
+	  15: JMPo-c       	$0x2547BFAC  ($4)
+
+
+. 0 25477D5C 16
+. 80 7E 02 60 38 A0 00 01 80 9E 02 5C 48 00 42 45
+
+==== BB 718 _dl_sysdep_read_whole_file(0x2547BFAC) approx BBs exec'd 0 ====
+
+	0x2547BFAC:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2547BFB0:  9421FF60  stwu r1,-160(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF60, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547BFB4:  93810090  stw r28,144(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x90, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BFB8:  7C9C2378  or r28,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R28
+	  16: INCEIPL       	$4
+
+	0x2547BFBC:  38800000  li r4,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R4
+	  19: INCEIPL       	$4
+
+	0x2547BFC0:  9361008C  stw r27,140(r1)
+	  20: GETL       	R27, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x8C, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x2547BFC4:  93A10094  stw r29,148(r1)
+	  25: GETL       	R29, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x94, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x2547BFC8:  7CBD2B78  or r29,r5,r5
+	  30: GETL       	R5, t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0x2547BFCC:  93E1009C  stw r31,156(r1)
+	  33: GETL       	R31, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x9C, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x2547BFD0:  3B60FFFF  li r27,-1
+	  38: MOVL       	$0xFFFFFFFF, t28
+	  39: PUTL       	t28, R27
+	  40: INCEIPL       	$4
+
+	0x2547BFD4:  93C10098  stw r30,152(r1)
+	  41: GETL       	R30, t30
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x98, t32
+	  44: STL       	t30, (t32)
+	  45: INCEIPL       	$4
+
+	0x2547BFD8:  900100A4  stw r0,164(r1)
+	  46: GETL       	R0, t34
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0xA4, t36
+	  49: STL       	t34, (t36)
+	  50: INCEIPL       	$4
+
+	0x2547BFDC:  48006005  bl 0x25481FE0
+	  51: MOVL       	$0x2547BFE0, t38
+	  52: PUTL       	t38, LR
+	  53: JMPo-c       	$0x25481FE0  ($4)
+
+
+. 0 2547BFAC 52
+. 7C 08 02 A6 94 21 FF 60 93 81 00 90 7C 9C 23 78 38 80 00 00 93 61 00 8C 93 A1 00 94 7C BD 2B 78 93 E1 00 9C 3B 60 FF FF 93 C1 00 98 90 01 00 A4 48 00 60 05
+
+==== BB 719 (0x2547BFE0) approx BBs exec'd 0 ====
+
+	0x2547BFE0:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547BFE4:  38A10010  addi r5,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547BFE8:  38600003  li r3,3
+	   9: MOVL       	$0x3, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x2547BFEC:  7FE4FB78  or r4,r31,r31
+	  12: GETL       	R31, t8
+	  13: PUTL       	t8, R4
+	  14: INCEIPL       	$4
+
+	0x2547BFF0:  41800040  bc 12,0,0x2547C030
+	  15: Js00o       	$0x2547C030
+
+
+. 0 2547BFE0 20
+. 7C 7F 1B 79 38 A1 00 10 38 60 00 03 7F E4 FB 78 41 80 00 40
+
+==== BB 720 (0x2547BFF4) approx BBs exec'd 0 ====
+
+	0x2547BFF4:  48005EB1  bl 0x25481EA4
+	   0: MOVL       	$0x2547BFF8, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25481EA4  ($4)
+
+
+. 0 2547BFF4 4
+. 48 00 5E B1
+
+==== BB 721 (0x2547BFF8) approx BBs exec'd 0 ====
+
+	0x2547BFF8:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547BFFC:  419C002C  bc 12,28,0x2547C028
+	   4: Js28o       	$0x2547C028
+
+
+. 0 2547BFF8 8
+. 2F 83 00 00 41 9C 00 2C
+
+==== BB 722 (0x2547C000) approx BBs exec'd 0 ====
+
+	0x2547C000:  81210044  lwz r9,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547C004:  7FA5EB78  or r5,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547C008:  38C00002  li r6,2
+	   8: MOVL       	$0x2, t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x2547C00C:  7FE7FB78  or r7,r31,r31
+	  11: GETL       	R31, t8
+	  12: PUTL       	t8, R7
+	  13: INCEIPL       	$4
+
+	0x2547C010:  2C890000  cmpi cr1,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x2547C014:  7D244B78  or r4,r9,r9
+	  18: GETL       	R9, t14
+	  19: PUTL       	t14, R4
+	  20: INCEIPL       	$4
+
+	0x2547C018:  39000000  li r8,0
+	  21: MOVL       	$0x0, t16
+	  22: PUTL       	t16, R8
+	  23: INCEIPL       	$4
+
+	0x2547C01C:  38600000  li r3,0
+	  24: MOVL       	$0x0, t18
+	  25: PUTL       	t18, R3
+	  26: INCEIPL       	$4
+
+	0x2547C020:  913C0000  stw r9,0(r28)
+	  27: GETL       	R9, t20
+	  28: GETL       	R28, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x2547C024:  40860034  bc 4,6,0x2547C058
+	  31: Jc06o       	$0x2547C058
+
+
+. 0 2547C000 40
+. 81 21 00 44 7F A5 EB 78 38 C0 00 02 7F E7 FB 78 2C 89 00 00 7D 24 4B 78 39 00 00 00 38 60 00 00 91 3C 00 00 40 86 00 34
+
+==== BB 723 (0x2547C058) approx BBs exec'd 0 ====
+
+	0x2547C058:  48006741  bl 0x25482798
+	   0: MOVL       	$0x2547C05C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25482798  ($4)
+
+
+. 0 2547C058 4
+. 48 00 67 41
+
+==== BB 724 (0x2547C05C) approx BBs exec'd 0 ====
+
+	0x2547C05C:  7C7B1B78  or r27,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0x2547C060:  4BFFFFC8  b 0x2547C028
+	   3: JMPo       	$0x2547C028  ($4)
+
+
+. 0 2547C05C 8
+. 7C 7B 1B 78 4B FF FF C8
+
+==== BB 725 (0x2547C028) approx BBs exec'd 0 ====
+
+	0x2547C028:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547C02C:  48005FC5  bl 0x25481FF0
+	   3: MOVL       	$0x2547C030, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25481FF0  ($4)
+
+
+. 0 2547C028 8
+. 7F E3 FB 78 48 00 5F C5
+
+==== BB 726 (0x2547C030) approx BBs exec'd 0 ====
+
+	0x2547C030:  808100A4  lwz r4,164(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xA4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547C034:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547C038:  83810090  lwz r28,144(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x90, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x2547C03C:  8361008C  lwz r27,140(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x2547C040:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547C044:  83A10094  lwz r29,148(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x94, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x2547C048:  83C10098  lwz r30,152(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x98, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x2547C04C:  83E1009C  lwz r31,156(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x9C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x2547C050:  382100A0  addi r1,r1,160
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0xA0, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x2547C054:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+. 0 2547C030 40
+. 80 81 00 A4 7F 63 DB 78 83 81 00 90 83 61 00 8C 7C 88 03 A6 83 A1 00 94 83 C1 00 98 83 E1 00 9C 38 21 00 A0 4E 80 00 20
+
+==== BB 727 (0x25477D6C) approx BBs exec'd 0 ====
+
+	0x25477D6C:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x25477D70:  7C7F1B78  or r31,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R31
+	   7: INCEIPL       	$4
+
+	0x25477D74:  41920154  bc 12,18,0x25477EC8
+	   8: Js18o       	$0x25477EC8
+
+
+. 0 25477D6C 12
+. 2E 03 FF FF 7C 7F 1B 78 41 92 01 54
+
+==== BB 728 (0x25477D78) approx BBs exec'd 0 ====
+
+	0x25477D78:  813E025C  lwz r9,604(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x25C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25477D7C:  83A90000  lwz r29,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0x25477D80:  289D0010  cmpli cr1,r29,16
+	   9: GETL       	R29, t8
+	  10: MOVL       	$0x10, t12
+	  11: CMPUL       	t8, t12, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25477D84:  408500E8  bc 4,5,0x25477E6C
+	  14: Jc05o       	$0x25477E6C
+
+
+. 0 25477D78 16
+. 81 3E 02 5C 83 A9 00 00 28 9D 00 10 40 85 00 E8
+
+==== BB 729 (0x25477D88) approx BBs exec'd 0 ====
+
+	0x25477D88:  809E0264  lwz r4,612(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x264, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25477D8C:  38A0000B  li r5,11
+	   5: MOVL       	$0xB, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25477D90:  4800B509  bl 0x25483298
+	   8: MOVL       	$0x25477D94, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x25483298  ($4)
+
+
+. 0 25477D88 12
+. 80 9E 02 64 38 A0 00 0B 48 00 B5 09
+
+==== BB 730 (0x25477D94) approx BBs exec'd 0 ====
+
+	0x25477D94:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25477D98:  409A00D4  bc 4,26,0x25477E6C
+	   4: Jc26o       	$0x25477E6C
+
+
+. 0 25477D94 8
+. 2F 03 00 00 40 9A 00 D4
+
+==== BB 731 (0x25477D9C) approx BBs exec'd 0 ====
+
+	0x25477D9C:  815F000C  lwz r10,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25477DA0:  93F60000  stw r31,0(r22)
+	   5: GETL       	R31, t4
+	   6: GETL       	R22, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25477DA4:  1D0A000C  mulli r8,r10,12
+	   9: GETL       	R10, t8
+	  10: MULL       	$0xC, t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0x25477DA8:  839E0258  lwz r28,600(r30)
+	  13: GETL       	R30, t10
+	  14: ADDL       	$0x258, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x25477DAC:  38E80017  addi r7,r8,23
+	  18: GETL       	R8, t14
+	  19: ADDL       	$0x17, t14
+	  20: PUTL       	t14, R7
+	  21: INCEIPL       	$4
+
+	0x25477DB0:  54E50038  rlwinm r5,r7,0,0,28
+	  22: GETL       	R7, t16
+	  23: ANDL       	$0xFFFFFFF8, t16
+	  24: PUTL       	t16, R5
+	  25: INCEIPL       	$4
+
+	0x25477DB4:  38C50030  addi r6,r5,48
+	  26: GETL       	R5, t18
+	  27: ADDL       	$0x30, t18
+	  28: PUTL       	t18, R6
+	  29: INCEIPL       	$4
+
+	0x25477DB8:  7C7F2A14  add r3,r31,r5
+	  30: GETL       	R31, t20
+	  31: GETL       	R5, t22
+	  32: ADDL       	t20, t22
+	  33: PUTL       	t22, R3
+	  34: INCEIPL       	$4
+
+	0x25477DBC:  7E06E840  cmpl cr4,r6,r29
+	  35: GETL       	R6, t24
+	  36: GETL       	R29, t26
+	  37: CMPUL       	t24, t26, t28  (-rSo)
+	  38: ICRFL       	t28, $0x4, CR
+	  39: INCEIPL       	$4
+
+	0x25477DC0:  4191001C  bc 12,17,0x25477DDC
+	  40: Js17o       	$0x25477DDC
+
+
+. 0 25477D9C 40
+. 81 5F 00 0C 93 F6 00 00 1D 0A 00 0C 83 9E 02 58 38 E8 00 17 54 E5 00 38 38 C5 00 30 7C 7F 2A 14 7E 06 E8 40 41 91 00 1C
+
+==== BB 732 (0x25477DC4) approx BBs exec'd 0 ====
+
+	0x25477DC4:  809E0268  lwz r4,616(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x268, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25477DC8:  38A00014  li r5,20
+	   5: MOVL       	$0x14, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x25477DCC:  907C0000  stw r3,0(r28)
+	   8: GETL       	R3, t6
+	   9: GETL       	R28, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25477DD0:  4800B4C9  bl 0x25483298
+	  12: MOVL       	$0x25477DD4, t10
+	  13: PUTL       	t10, LR
+	  14: JMPo-c       	$0x25483298  ($4)
+
+
+. 0 25477DC4 16
+. 80 9E 02 68 38 A0 00 14 90 7C 00 00 48 00 B4 C9
+
+==== BB 733 (0x254832B8) approx BBs exec'd 0 ====
+
+	0x254832B8:  548007BE  rlwinm r0,r4,0,30,31
+	   0: GETL       	R4, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x254832BC:  2C000000  cmpi cr0,r0,0
+	   4: GETL       	R0, t2
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x254832C0:  4182002C  bc 12,2,0x254832EC
+	   8: Js02o       	$0x254832EC
+
+
+. 0 254832B8 12
+. 54 80 07 BE 2C 00 00 00 41 82 00 2C
+
+==== BB 734 (0x254832EC) approx BBs exec'd 0 ====
+
+	0x254832EC:  556907BE  rlwinm r9,r11,0,30,31
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0x3, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x254832F0:  2F090000  cmpi cr6,r9,0
+	   4: GETL       	R9, t2
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x254832F4:  409A00C0  bc 4,26,0x254833B4
+	   8: Jc26o       	$0x254833B4
+
+
+. 0 254832EC 12
+. 55 69 07 BE 2F 09 00 00 40 9A 00 C0
+
+==== BB 735 (0x254832F8) approx BBs exec'd 0 ====
+
+	0x254832F8:  54A8F0BE  rlwinm r8,r5,30,2,31
+	   0: GETL       	R5, t0
+	   1: SHRL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x254832FC:  7D6A5B78  or r10,r11,r11
+	   4: GETL       	R11, t2
+	   5: PUTL       	t2, R10
+	   6: INCEIPL       	$4
+
+	0x25483300:  550007BE  rlwinm r0,r8,0,30,31
+	   7: GETL       	R8, t4
+	   8: ANDL       	$0x3, t4
+	   9: PUTL       	t4, R0
+	  10: INCEIPL       	$4
+
+	0x25483304:  7C892378  or r9,r4,r4
+	  11: GETL       	R4, t6
+	  12: PUTL       	t6, R9
+	  13: INCEIPL       	$4
+
+	0x25483308:  2F800001  cmpi cr7,r0,1
+	  14: GETL       	R0, t8
+	  15: MOVL       	$0x1, t12
+	  16: CMPL       	t8, t12, t10  (-rSo)
+	  17: ICRFL       	t10, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2548330C:  419E0150  bc 12,30,0x2548345C
+	  19: Js30o       	$0x2548345C
+
+
+. 0 254832F8 24
+. 54 A8 F0 BE 7D 6A 5B 78 55 00 07 BE 7C 89 23 78 2F 80 00 01 41 9E 01 50
+
+==== BB 736 (0x2548345C) approx BBs exec'd 0 ====
+
+	0x2548345C:  806B0000  lwz r3,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25483460:  3908FFFF  addi r8,r8,-1
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R8
+	   7: INCEIPL       	$4
+
+	0x25483464:  80C40000  lwz r6,0(r4)
+	   8: GETL       	R4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R6
+	  11: INCEIPL       	$4
+
+	0x25483468:  394B0004  addi r10,r11,4
+	  12: GETL       	R11, t10
+	  13: ADDL       	$0x4, t10
+	  14: PUTL       	t10, R10
+	  15: INCEIPL       	$4
+
+	0x2548346C:  39240004  addi r9,r4,4
+	  16: GETL       	R4, t12
+	  17: ADDL       	$0x4, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x25483470:  7F033000  cmp cr6,r3,r6
+	  20: GETL       	R3, t14
+	  21: GETL       	R6, t16
+	  22: CMPL       	t14, t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x6, CR
+	  24: INCEIPL       	$4
+
+	0x25483474:  80EA0000  lwz r7,0(r10)
+	  25: GETL       	R10, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R7
+	  28: INCEIPL       	$4
+
+	0x25483478:  80090000  lwz r0,0(r9)
+	  29: GETL       	R9, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R0
+	  32: INCEIPL       	$4
+
+	0x2548347C:  419A00C8  bc 12,26,0x25483544
+	  33: Js26o       	$0x25483544
+
+
+. 0 2548345C 36
+. 80 6B 00 00 39 08 FF FF 80 C4 00 00 39 4B 00 04 39 24 00 04 7F 03 30 00 80 EA 00 00 80 09 00 00 41 9A 00 C8
+
+==== BB 737 (0x25483544) approx BBs exec'd 0 ====
+
+	0x25483544:  7F870000  cmp cr7,r7,r0
+	   0: GETL       	R7, t0
+	   1: GETL       	R0, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25483548:  806A0004  lwz r3,4(r10)
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0x4, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R3
+	   9: INCEIPL       	$4
+
+	0x2548354C:  80C90004  lwz r6,4(r9)
+	  10: GETL       	R9, t10
+	  11: ADDL       	$0x4, t10
+	  12: LDL       	(t10), t12
+	  13: PUTL       	t12, R6
+	  14: INCEIPL       	$4
+
+	0x25483550:  409EFDF4  bc 4,30,0x25483344
+	  15: Jc30o       	$0x25483344
+
+
+. 0 25483544 16
+. 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+
+==== BB 738 (0x25483554) approx BBs exec'd 0 ====
+
+	0x25483554:  7C033000  cmp cr0,r3,r6
+	   0: GETL       	R3, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25483558:  80EA0008  lwz r7,8(r10)
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0x8, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R7
+	   9: INCEIPL       	$4
+
+	0x2548355C:  80090008  lwz r0,8(r9)
+	  10: GETL       	R9, t10
+	  11: ADDL       	$0x8, t10
+	  12: LDL       	(t10), t12
+	  13: PUTL       	t12, R0
+	  14: INCEIPL       	$4
+
+	0x25483560:  41A2FDD4  bc 13,2,0x25483334
+	  15: Js02o       	$0x25483334
+
+
+. 0 25483554 16
+. 7C 03 30 00 80 EA 00 08 80 09 00 08 41 A2 FD D4
+
+==== BB 739 (0x25483334) approx BBs exec'd 0 ====
+
+	0x25483334:  7C870000  cmp cr1,r7,r0
+	   0: GETL       	R7, t0
+	   1: GETL       	R0, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25483338:  806A000C  lwz r3,12(r10)
+	   5: GETL       	R10, t6
+	   6: ADDL       	$0xC, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R3
+	   9: INCEIPL       	$4
+
+	0x2548333C:  80C9000C  lwz r6,12(r9)
+	  10: GETL       	R9, t10
+	  11: ADDL       	$0xC, t10
+	  12: LDL       	(t10), t12
+	  13: PUTL       	t12, R6
+	  14: INCEIPL       	$4
+
+	0x25483340:  41860148  bc 12,6,0x25483488
+	  15: Js06o       	$0x25483488
+
+
+. 0 25483334 16
+. 7C 87 00 00 80 6A 00 0C 80 C9 00 0C 41 86 01 48
+
+==== BB 740 (0x25483488) approx BBs exec'd 0 ====
+
+	0x25483488:  3908FFFC  addi r8,r8,-4
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2548348C:  394A0010  addi r10,r10,16
+	   4: GETL       	R10, t2
+	   5: ADDL       	$0x10, t2
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x25483490:  2F080000  cmpi cr6,r8,0
+	   8: GETL       	R8, t4
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25483494:  39290010  addi r9,r9,16
+	  12: GETL       	R9, t8
+	  13: ADDL       	$0x10, t8
+	  14: PUTL       	t8, R9
+	  15: INCEIPL       	$4
+
+	0x25483498:  40BAFFD8  bc 5,26,0x25483470
+	  16: Jc26o       	$0x25483470
+
+
+. 0 25483488 20
+. 39 08 FF FC 39 4A 00 10 2F 08 00 00 39 29 00 10 40 BA FF D8
+
+==== BB 741 (0x2548349C) approx BBs exec'd 0 ====
+
+	0x2548349C:  7F833000  cmp cr7,r3,r6
+	   0: GETL       	R3, t0
+	   1: GETL       	R6, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254834A0:  7C033010  subfc r0,r3,r6
+	   5: GETL       	R3, t6
+	   6: GETL       	R6, t8
+	   7: SBBL       	t6, t8  (-wCa)
+	   8: PUTL       	t8, R0
+	   9: INCEIPL       	$4
+
+	0x254834A4:  7C000110  subfe r0,r0,r0
+	  10: GETL       	R0, t10
+	  11: GETL       	R0, t12
+	  12: SBBL       	t10, t12  (-rCa-wCa)
+	  13: PUTL       	t12, R0
+	  14: INCEIPL       	$4
+
+	0x254834A8:  7C0003B8  nand r0,r0,r0
+	  15: GETL       	R0, t14
+	  16: GETL       	R0, t16
+	  17: ANDL       	t14, t16
+	  18: NOTL       	t16
+	  19: PUTL       	t16, R0
+	  20: INCEIPL       	$4
+
+	0x254834AC:  60000001  ori r0,r0,0x1
+	  21: GETL       	R0, t18
+	  22: ORL       	$0x1, t18
+	  23: PUTL       	t18, R0
+	  24: INCEIPL       	$4
+
+	0x254834B0:  409EFEA4  bc 4,30,0x25483354
+	  25: Jc30o       	$0x25483354
+
+
+. 0 2548349C 24
+. 7F 83 30 00 7C 03 30 10 7C 00 01 10 7C 00 03 B8 60 00 00 01 40 9E FE A4
+
+==== BB 742 (0x254834B4) approx BBs exec'd 0 ====
+
+	0x254834B4:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254834B8:  4BFFFE9C  b 0x25483354
+	   3: JMPo       	$0x25483354  ($4)
+
+
+. 0 254834B4 8
+. 38 00 00 00 4B FF FE 9C
+
+==== BB 743 (0x25483354) approx BBs exec'd 0 ====
+
+	0x25483354:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25483358:  7C030378  or r3,r0,r0
+	   4: GETL       	R0, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2548335C:  409E0040  bc 4,30,0x2548339C
+	   7: Jc30o       	$0x2548339C
+
+
+. 0 25483354 12
+. 2F 80 00 00 7C 03 03 78 40 9E 00 40
+
+==== BB 744 (0x25483360) approx BBs exec'd 0 ====
+
+	0x25483360:  54BC003A  rlwinm r28,r5,0,0,29
+	   0: GETL       	R5, t0
+	   1: ANDL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x25483364:  54A507BE  rlwinm r5,r5,0,30,31
+	   4: GETL       	R5, t2
+	   5: ANDL       	$0x3, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x25483368:  7C84E214  add r4,r4,r28
+	   8: GETL       	R4, t4
+	   9: GETL       	R28, t6
+	  10: ADDL       	t4, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0x2548336C:  7D6BE214  add r11,r11,r28
+	  13: GETL       	R11, t8
+	  14: GETL       	R28, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R11
+	  17: INCEIPL       	$4
+
+	0x25483370:  2C050000  cmpi cr0,r5,0
+	  18: GETL       	R5, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0x25483374:  41820024  bc 12,2,0x25483398
+	  22: Js02o       	$0x25483398
+
+
+. 0 25483360 24
+. 54 BC 00 3A 54 A5 07 BE 7C 84 E2 14 7D 6B E2 14 2C 05 00 00 41 82 00 24
+
+==== BB 745 (0x25477DD4) approx BBs exec'd 0 ====
+
+	0x25477DD4:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25477DD8:  41A2FE8C  bc 13,2,0x25477C64
+	   4: Js02o       	$0x25477C64
+
+
+. 0 25477DD4 8
+. 2C 03 00 00 41 A2 FE 8C
+
+==== BB 746 (0x25477C64) approx BBs exec'd 0 ====
+
+	0x25477C64:  81560000  lwz r10,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x25477C68:  38600000  li r3,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25477C6C:  2F0AFFFF  cmpi cr6,r10,-1
+	   7: GETL       	R10, t6
+	   8: MOVL       	$0xFFFFFFFF, t10
+	   9: CMPL       	t6, t10, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25477C70:  419A0094  bc 12,26,0x25477D04
+	  12: Js26o       	$0x25477D04
+
+
+. 0 25477C64 16
+. 81 56 00 00 38 60 00 00 2F 0A FF FF 41 9A 00 94
+
+==== BB 747 (0x25477C74) approx BBs exec'd 0 ====
+
+	0x25477C74:  831E0258  lwz r24,600(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x258, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x25477C78:  3AA00000  li r21,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R21
+	   7: INCEIPL       	$4
+
+	0x25477C7C:  83780000  lwz r27,0(r24)
+	   8: GETL       	R24, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0x25477C80:  2E1BFFFF  cmpi cr4,r27,-1
+	  12: GETL       	R27, t10
+	  13: MOVL       	$0xFFFFFFFF, t14
+	  14: CMPL       	t10, t14, t12  (-rSo)
+	  15: ICRFL       	t12, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x25477C84:  41920164  bc 12,18,0x25477DE8
+	  17: Js18o       	$0x25477DE8
+
+
+. 0 25477C74 20
+. 83 1E 02 58 3A A0 00 00 83 78 00 00 2E 1B FF FF 41 92 01 64
+
+==== BB 748 (0x25477C88) approx BBs exec'd 0 ====
+
+	0x25477C88:  83FB0014  lwz r31,20(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25477C8C:  839E025C  lwz r28,604(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x25C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25477C90:  3B5FFFFF  addi r26,r31,-1
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0xFFFFFFFF, t8
+	  12: PUTL       	t8, R26
+	  13: INCEIPL       	$4
+
+	0x25477C94:  7F95D000  cmp cr7,r21,r26
+	  14: GETL       	R21, t10
+	  15: GETL       	R26, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25477C98:  83BC0000  lwz r29,0(r28)
+	  19: GETL       	R28, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R29
+	  22: INCEIPL       	$4
+
+	0x25477C9C:  3B800000  li r28,0
+	  23: MOVL       	$0x0, t20
+	  24: PUTL       	t20, R28
+	  25: INCEIPL       	$4
+
+	0x25477CA0:  7ECAEA14  add r22,r10,r29
+	  26: GETL       	R10, t22
+	  27: GETL       	R29, t24
+	  28: ADDL       	t22, t24
+	  29: PUTL       	t24, R22
+	  30: INCEIPL       	$4
+
+	0x25477CA4:  7F3BB050  subf r25,r27,r22
+	  31: GETL       	R27, t26
+	  32: GETL       	R22, t28
+	  33: SUBL       	t26, t28
+	  34: PUTL       	t28, R25
+	  35: INCEIPL       	$4
+
+	0x25477CA8:  419D004C  bc 12,29,0x25477CF4
+	  36: Js29o       	$0x25477CF4
+
+
+. 0 25477C88 36
+. 83 FB 00 14 83 9E 02 5C 3B 5F FF FF 7F 95 D0 00 83 BC 00 00 3B 80 00 00 7E CA EA 14 7F 3B B0 50 41 9D 00 4C
+
+==== BB 749 (0x25477CAC) approx BBs exec'd 0 ====
+
+	0x25477CAC:  7C7CD214  add r3,r28,r26
+	   0: GETL       	R28, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477CB0:  80980000  lwz r4,0(r24)
+	   5: GETL       	R24, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25477CB4:  7C7D0E70  srawi r29,r3,1
+	   9: GETL       	R3, t8
+	  10: SARL       	$0x1, t8  (-wCa)
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x25477CB8:  7FBD0194  addze r29,r29
+	  13: GETL       	R29, t10
+	  14: ADCL       	$0x0, t10  (-rCa-wCa)
+	  15: PUTL       	t10, R29
+	  16: INCEIPL       	$4
+
+	0x25477CBC:  7EE3BB78  or r3,r23,r23
+	  17: GETL       	R23, t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x25477CC0:  1FFD0018  mulli r31,r29,24
+	  20: GETL       	R29, t14
+	  21: MULL       	$0x18, t14
+	  22: PUTL       	t14, R31
+	  23: INCEIPL       	$4
+
+	0x25477CC4:  7D9F2214  add r12,r31,r4
+	  24: GETL       	R31, t16
+	  25: GETL       	R4, t18
+	  26: ADDL       	t16, t18
+	  27: PUTL       	t18, R12
+	  28: INCEIPL       	$4
+
+	0x25477CC8:  800C0034  lwz r0,52(r12)
+	  29: GETL       	R12, t20
+	  30: ADDL       	$0x34, t20
+	  31: LDL       	(t20), t22
+	  32: PUTL       	t22, R0
+	  33: INCEIPL       	$4
+
+	0x25477CCC:  7C00C840  cmpl cr0,r0,r25
+	  34: GETL       	R0, t24
+	  35: GETL       	R25, t26
+	  36: CMPUL       	t24, t26, t28  (-rSo)
+	  37: ICRFL       	t28, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0x25477CD0:  7C9B0214  add r4,r27,r0
+	  39: GETL       	R27, t30
+	  40: GETL       	R0, t32
+	  41: ADDL       	t30, t32
+	  42: PUTL       	t32, R4
+	  43: INCEIPL       	$4
+
+	0x25477CD4:  40800020  bc 4,0,0x25477CF4
+	  44: Jc00o       	$0x25477CF4
+
+
+. 0 25477CAC 44
+. 7C 7C D2 14 80 98 00 00 7C 7D 0E 70 7F BD 01 94 7E E3 BB 78 1F FD 00 18 7D 9F 22 14 80 0C 00 34 7C 00 C8 40 7C 9B 02 14 40 80 00 20
+
+==== BB 750 (0x25477CD8) approx BBs exec'd 0 ====
+
+	0x25477CD8:  4BFFFDFD  bl 0x25477AD4
+	   0: MOVL       	$0x25477CDC, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25477AD4  ($4)
+
+
+. 0 25477CD8 4
+. 4B FF FD FD
+
+==== BB 751 _dl_cache_libcmp(0x25477AD4) approx BBs exec'd 0 ====
+
+	0x25477AD4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25477AD8:  7C671B78  or r7,r3,r3
+	   6: GETL       	R3, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x25477ADC:  89430000  lbz r10,0(r3)
+	   9: GETL       	R3, t6
+	  10: LDB       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25477AE0:  2F8A0000  cmpi cr7,r10,0
+	  13: GETL       	R10, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x25477AE4:  419E00DC  bc 12,30,0x25477BC0
+	  17: Js30o       	$0x25477BC0
+
+
+. 0 25477AD4 20
+. 94 21 FF F0 7C 67 1B 78 89 43 00 00 2F 8A 00 00 41 9E 00 DC
+
+==== BB 752 (0x25477AE8) approx BBs exec'd 0 ====
+
+	0x25477AE8:  88640000  lbz r3,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25477AEC:  380AFFD0  addi r0,r10,-48
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0xFFFFFFD0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25477AF0:  7D4B5378  or r11,r10,r10
+	   8: GETL       	R10, t6
+	   9: PUTL       	t6, R11
+	  10: INCEIPL       	$4
+
+	0x25477AF4:  28000009  cmpli cr0,r0,9
+	  11: GETL       	R0, t8
+	  12: MOVL       	$0x9, t12
+	  13: CMPUL       	t8, t12, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25477AF8:  4181008C  bc 12,1,0x25477B84
+	  16: Js01o       	$0x25477B84
+
+
+. 0 25477AE8 20
+. 88 64 00 00 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+
+==== BB 753 (0x25477B84) approx BBs exec'd 0 ====
+
+	0x25477B84:  3903FFD0  addi r8,r3,-48
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x25477B88:  7C601B78  or r0,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0x25477B8C:  28080009  cmpli cr0,r8,9
+	   7: GETL       	R8, t4
+	   8: MOVL       	$0x9, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25477B90:  40810048  bc 4,1,0x25477BD8
+	  12: Jc01o       	$0x25477BD8
+
+
+. 0 25477B84 16
+. 39 03 FF D0 7C 60 1B 78 28 08 00 09 40 81 00 48
+
+==== BB 754 (0x25477B94) approx BBs exec'd 0 ====
+
+	0x25477B94:  5543063E  rlwinm r3,r10,0,24,31
+	   0: GETL       	R10, t0
+	   1: ANDL       	$0xFF, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x25477B98:  5400063E  rlwinm r0,r0,0,24,31
+	   4: GETL       	R0, t2
+	   5: ANDL       	$0xFF, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x25477B9C:  7C830000  cmp cr1,r3,r0
+	   8: GETL       	R3, t4
+	   9: GETL       	R0, t6
+	  10: CMPL       	t4, t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0x25477BA0:  40860044  bc 4,6,0x25477BE4
+	  13: Jc06o       	$0x25477BE4
+
+
+. 0 25477B94 16
+. 55 43 06 3E 54 00 06 3E 7C 83 00 00 40 86 00 44
+
+==== BB 755 (0x25477BA4) approx BBs exec'd 0 ====
+
+	0x25477BA4:  8D470001  lbzu r10,1(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R7
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0x25477BA8:  8C640001  lbzu r3,1(r4)
+	   6: GETL       	R4, t4
+	   7: ADDL       	$0x1, t4
+	   8: PUTL       	t4, R4
+	   9: LDB       	(t4), t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x25477BAC:  2F0A0000  cmpi cr6,r10,0
+	  12: GETL       	R10, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x6, CR
+	  15: INCEIPL       	$4
+
+	0x25477BB0:  409AFF3C  bc 4,26,0x25477AEC
+	  16: Jc26o       	$0x25477AEC
+
+
+. 0 25477BA4 16
+. 8D 47 00 01 8C 64 00 01 2F 0A 00 00 40 9A FF 3C
+
+==== BB 756 (0x25477AEC) approx BBs exec'd 0 ====
+
+	0x25477AEC:  380AFFD0  addi r0,r10,-48
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25477AF0:  7D4B5378  or r11,r10,r10
+	   4: GETL       	R10, t2
+	   5: PUTL       	t2, R11
+	   6: INCEIPL       	$4
+
+	0x25477AF4:  28000009  cmpli cr0,r0,9
+	   7: GETL       	R0, t4
+	   8: MOVL       	$0x9, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25477AF8:  4181008C  bc 12,1,0x25477B84
+	  12: Js01o       	$0x25477B84
+
+
+. 0 25477AEC 16
+. 38 0A FF D0 7D 4B 53 78 28 00 00 09 41 81 00 8C
+
+==== BB 757 (0x25477BE4) approx BBs exec'd 0 ====
+
+	0x25477BE4:  7C601850  subf r3,r0,r3
+	   0: GETL       	R0, t0
+	   1: GETL       	R3, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477BE8:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x25477BEC:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+. 0 25477BE4 12
+. 7C 60 18 50 38 21 00 10 4E 80 00 20
+
+==== BB 758 (0x25477CDC) approx BBs exec'd 0 ====
+
+	0x25477CDC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25477CE0:  419E0214  bc 12,30,0x25477EF4
+	   4: Js30o       	$0x25477EF4
+
+
+. 0 25477CDC 8
+. 2F 83 00 00 41 9E 02 14
+
+==== BB 759 (0x25477CE4) approx BBs exec'd 0 ====
+
+	0x25477CE4:  409C006C  bc 4,28,0x25477D50
+	   0: Jc28o       	$0x25477D50
+
+
+. 0 25477CE4 4
+. 40 9C 00 6C
+
+==== BB 760 (0x25477CE8) approx BBs exec'd 0 ====
+
+	0x25477CE8:  3B9D0001  addi r28,r29,1
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x25477CEC:  7F9CD000  cmp cr7,r28,r26
+	   4: GETL       	R28, t2
+	   5: GETL       	R26, t4
+	   6: CMPL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25477CF0:  409DFFBC  bc 4,29,0x25477CAC
+	   9: Jc29o       	$0x25477CAC
+
+
+. 0 25477CE8 12
+. 3B 9D 00 01 7F 9C D0 00 40 9D FF BC
+
+==== BB 761 (0x25477D50) approx BBs exec'd 0 ====
+
+	0x25477D50:  3B5DFFFF  addi r26,r29,-1
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25477D54:  7F9CD000  cmp cr7,r28,r26
+	   4: GETL       	R28, t2
+	   5: GETL       	R26, t4
+	   6: CMPL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25477D58:  4BFFFF98  b 0x25477CF0
+	   9: JMPo       	$0x25477CF0  ($4)
+
+
+. 0 25477D50 12
+. 3B 5D FF FF 7F 9C D0 00 4B FF FF 98
+
+==== BB 762 (0x25477CF0) approx BBs exec'd 0 ====
+
+	0x25477CF0:  409DFFBC  bc 4,29,0x25477CAC
+	   0: Jc29o       	$0x25477CAC
+
+
+. 0 25477CF0 4
+. 40 9D FF BC
+
+==== BB 763 (0x25477AFC) approx BBs exec'd 0 ====
+
+	0x25477AFC:  38A3FFD0  addi r5,r3,-48
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25477B00:  7C691B78  or r9,r3,r3
+	   4: GETL       	R3, t2
+	   5: PUTL       	t2, R9
+	   6: INCEIPL       	$4
+
+	0x25477B04:  28850009  cmpli cr1,r5,9
+	   7: GETL       	R5, t4
+	   8: MOVL       	$0x9, t8
+	   9: CMPUL       	t4, t8, t6  (-rSo)
+	  10: ICRFL       	t6, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25477B08:  418500C4  bc 12,5,0x25477BCC
+	  12: Js05o       	$0x25477BCC
+
+
+. 0 25477AFC 16
+. 38 A3 FF D0 7C 69 1B 78 28 85 00 09 41 85 00 C4
+
+==== BB 764 (0x25477B0C) approx BBs exec'd 0 ====
+
+	0x25477B0C:  8D470001  lbzu r10,1(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R7
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R10
+	   5: INCEIPL       	$4
+
+	0x25477B10:  556B063E  rlwinm r11,r11,0,24,31
+	   6: GETL       	R11, t4
+	   7: ANDL       	$0xFF, t4
+	   8: PUTL       	t4, R11
+	   9: INCEIPL       	$4
+
+	0x25477B14:  5529063E  rlwinm r9,r9,0,24,31
+	  10: GETL       	R9, t6
+	  11: ANDL       	$0xFF, t6
+	  12: PUTL       	t6, R9
+	  13: INCEIPL       	$4
+
+	0x25477B18:  390BFFD0  addi r8,r11,-48
+	  14: GETL       	R11, t8
+	  15: ADDL       	$0xFFFFFFD0, t8
+	  16: PUTL       	t8, R8
+	  17: INCEIPL       	$4
+
+	0x25477B1C:  386AFFD0  addi r3,r10,-48
+	  18: GETL       	R10, t10
+	  19: ADDL       	$0xFFFFFFD0, t10
+	  20: PUTL       	t10, R3
+	  21: INCEIPL       	$4
+
+	0x25477B20:  3969FFD0  addi r11,r9,-48
+	  22: GETL       	R9, t12
+	  23: ADDL       	$0xFFFFFFD0, t12
+	  24: PUTL       	t12, R11
+	  25: INCEIPL       	$4
+
+	0x25477B24:  2B830009  cmpli cr7,r3,9
+	  26: GETL       	R3, t14
+	  27: MOVL       	$0x9, t18
+	  28: CMPUL       	t14, t18, t16  (-rSo)
+	  29: ICRFL       	t16, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0x25477B28:  38840001  addi r4,r4,1
+	  31: GETL       	R4, t20
+	  32: ADDL       	$0x1, t20
+	  33: PUTL       	t20, R4
+	  34: INCEIPL       	$4
+
+	0x25477B2C:  419D0020  bc 12,29,0x25477B4C
+	  35: Js29o       	$0x25477B4C
+
+
+. 0 25477B0C 36
+. 8D 47 00 01 55 6B 06 3E 55 29 06 3E 39 0B FF D0 38 6A FF D0 39 69 FF D0 2B 83 00 09 38 84 00 01 41 9D 00 20
+
+==== BB 765 (0x25477B4C) approx BBs exec'd 0 ====
+
+	0x25477B4C:  88640000  lbz r3,0(r4)
+	   0: GETL       	R4, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25477B50:  48000014  b 0x25477B64
+	   4: JMPo       	$0x25477B64  ($4)
+
+
+. 0 25477B4C 8
+. 88 64 00 00 48 00 00 14
+
+==== BB 766 (0x25477B64) approx BBs exec'd 0 ====
+
+	0x25477B64:  38A3FFD0  addi r5,r3,-48
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0xFFFFFFD0, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25477B68:  2B050009  cmpli cr6,r5,9
+	   4: GETL       	R5, t2
+	   5: MOVL       	$0x9, t6
+	   6: CMPUL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25477B6C:  4099FFE8  bc 4,25,0x25477B54
+	   9: Jc25o       	$0x25477B54
+
+
+. 0 25477B64 12
+. 38 A3 FF D0 2B 05 00 09 40 99 FF E8
+
+==== BB 767 (0x25477B70) approx BBs exec'd 0 ====
+
+	0x25477B70:  7F885800  cmp cr7,r8,r11
+	   0: GETL       	R8, t0
+	   1: GETL       	R11, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25477B74:  41BE0038  bc 13,30,0x25477BAC
+	   5: Js30o       	$0x25477BAC
+
+
+. 0 25477B70 8
+. 7F 88 58 00 41 BE 00 38
+
+==== BB 768 (0x25477BAC) approx BBs exec'd 0 ====
+
+	0x25477BAC:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25477BB0:  409AFF3C  bc 4,26,0x25477AEC
+	   4: Jc26o       	$0x25477AEC
+
+
+. 0 25477BAC 8
+. 2F 0A 00 00 40 9A FF 3C
+
+==== BB 769 (0x25477BB4) approx BBs exec'd 0 ====
+
+	0x25477BB4:  7C635050  subf r3,r3,r10
+	   0: GETL       	R3, t0
+	   1: GETL       	R10, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25477BB8:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x25477BBC:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+. 0 25477BB4 12
+. 7C 63 50 50 38 21 00 10 4E 80 00 20
+
+==== BB 770 (0x25477EF4) approx BBs exec'd 0 ====
+
+	0x25477EF4:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25477EF8:  7FB6EB78  or r22,r29,r29
+	   4: GETL       	R29, t4
+	   5: PUTL       	t4, R22
+	   6: INCEIPL       	$4
+
+	0x25477EFC:  7FFCFB78  or r28,r31,r31
+	   7: GETL       	R31, t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x25477F00:  41BD0020  bc 13,29,0x25477F20
+	  10: Js29o       	$0x25477F20
+
+
+. 0 25477EF4 16
+. 2F 9D 00 00 7F B6 EB 78 7F FC FB 78 41 BD 00 20
+
+==== BB 771 (0x25477F20) approx BBs exec'd 0 ====
+
+	0x25477F20:  80D80000  lwz r6,0(r24)
+	   0: GETL       	R24, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x25477F24:  7EE3BB78  or r3,r23,r23
+	   4: GETL       	R23, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x25477F28:  3B9CFFE8  addi r28,r28,-24
+	   7: GETL       	R28, t6
+	   8: ADDL       	$0xFFFFFFE8, t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0x25477F2C:  7D3F3214  add r9,r31,r6
+	  11: GETL       	R31, t8
+	  12: GETL       	R6, t10
+	  13: ADDL       	t8, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0x25477F30:  80A9001C  lwz r5,28(r9)
+	  16: GETL       	R9, t12
+	  17: ADDL       	$0x1C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0x25477F34:  7C85C840  cmpl cr1,r5,r25
+	  21: GETL       	R5, t16
+	  22: GETL       	R25, t18
+	  23: CMPUL       	t16, t18, t20  (-rSo)
+	  24: ICRFL       	t20, $0x1, CR
+	  25: INCEIPL       	$4
+
+	0x25477F38:  7C9B2A14  add r4,r27,r5
+	  26: GETL       	R27, t22
+	  27: GETL       	R5, t24
+	  28: ADDL       	t22, t24
+	  29: PUTL       	t24, R4
+	  30: INCEIPL       	$4
+
+	0x25477F3C:  4184FFCC  bc 12,4,0x25477F08
+	  31: Js04o       	$0x25477F08
+
+
+. 0 25477F20 32
+. 80 D8 00 00 7E E3 BB 78 3B 9C FF E8 7D 3F 32 14 80 A9 00 1C 7C 85 C8 40 7C 9B 2A 14 41 84 FF CC
+
+==== BB 772 (0x25477F08) approx BBs exec'd 0 ====
+
+	0x25477F08:  4BFFFBCD  bl 0x25477AD4
+	   0: MOVL       	$0x25477F0C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25477AD4  ($4)
+
+
+. 0 25477F08 4
+. 4B FF FB CD
+
+==== BB 773 (0x25477F0C) approx BBs exec'd 0 ====
+
+	0x25477F0C:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25477F10:  409A0030  bc 4,26,0x25477F40
+	   4: Jc26o       	$0x25477F40
+
+
+. 0 25477F0C 8
+. 2F 03 00 00 40 9A 00 30
+
+==== BB 774 (0x25477F40) approx BBs exec'd 0 ====
+
+	0x25477F40:  7E1DB000  cmp cr4,r29,r22
+	   0: GETL       	R29, t0
+	   1: GETL       	R22, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x25477F44:  80F80000  lwz r7,0(r24)
+	   5: GETL       	R24, t6
+	   6: LDL       	(t6), t8
+	   7: PUTL       	t8, R7
+	   8: INCEIPL       	$4
+
+	0x25477F48:  7FFF3A14  add r31,r31,r7
+	   9: GETL       	R31, t10
+	  10: GETL       	R7, t12
+	  11: ADDL       	t10, t12
+	  12: PUTL       	t12, R31
+	  13: INCEIPL       	$4
+
+	0x25477F4C:  3B9F0030  addi r28,r31,48
+	  14: GETL       	R31, t14
+	  15: ADDL       	$0x30, t14
+	  16: PUTL       	t14, R28
+	  17: INCEIPL       	$4
+
+	0x25477F50:  40910024  bc 4,17,0x25477F74
+	  18: Jc17o       	$0x25477F74
+
+
+. 0 25477F40 20
+. 7E 1D B0 00 80 F8 00 00 7F FF 3A 14 3B 9F 00 30 40 91 00 24
+
+==== BB 775 (0x25477F74) approx BBs exec'd 0 ====
+
+	0x25477F74:  811F0030  lwz r8,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25477F78:  691F0001  xori r31,r8,0x1
+	   5: GETL       	R8, t4
+	   6: XORL       	$0x1, t4
+	   7: PUTL       	t4, R31
+	   8: INCEIPL       	$4
+
+	0x25477F7C:  213F0000  subfic r9,r31,0
+	   9: GETL       	R31, t6
+	  10: MOVL       	$0x0, t8
+	  11: SBBL       	t6, t8  (-wCa)
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0x25477F80:  7FE9F914  adde r31,r9,r31
+	  14: GETL       	R9, t10
+	  15: GETL       	R31, t12
+	  16: ADCL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R31
+	  18: INCEIPL       	$4
+
+	0x25477F84:  69140003  xori r20,r8,0x3
+	  19: GETL       	R8, t14
+	  20: XORL       	$0x3, t14
+	  21: PUTL       	t14, R20
+	  22: INCEIPL       	$4
+
+	0x25477F88:  21540000  subfic r10,r20,0
+	  23: GETL       	R20, t16
+	  24: MOVL       	$0x0, t18
+	  25: SBBL       	t16, t18  (-wCa)
+	  26: PUTL       	t18, R10
+	  27: INCEIPL       	$4
+
+	0x25477F8C:  7E8AA114  adde r20,r10,r20
+	  28: GETL       	R10, t20
+	  29: GETL       	R20, t22
+	  30: ADCL       	t20, t22  (-rCa-wCa)
+	  31: PUTL       	t22, R20
+	  32: INCEIPL       	$4
+
+	0x25477F90:  7FEBA379  or. r11,r31,r20
+	  33: GETL       	R31, t24
+	  34: GETL       	R20, t26
+	  35: ORL       	t26, t24
+	  36: PUTL       	t24, R11
+	  37: CMP0L       	t24, t28  (-rSo)
+	  38: ICRFL       	t28, $0x0, CR
+	  39: INCEIPL       	$4
+
+	0x25477F94:  41820024  bc 12,2,0x25477FB8
+	  40: Js02o       	$0x25477FB8
+
+
+. 0 25477F74 36
+. 81 1F 00 30 69 1F 00 01 21 3F 00 00 7F E9 F9 14 69 14 00 03 21 54 00 00 7E 8A A1 14 7F EB A3 79 41 82 00 24
+
+==== BB 776 (0x25477F98) approx BBs exec'd 0 ====
+
+	0x25477F98:  80FC0008  lwz r7,8(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x25477F9C:  7F07C840  cmpl cr6,r7,r25
+	   5: GETL       	R7, t4
+	   6: GETL       	R25, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25477FA0:  40980018  bc 4,24,0x25477FB8
+	  10: Jc24o       	$0x25477FB8
+
+
+. 0 25477F98 12
+. 80 FC 00 08 7F 07 C8 40 40 98 00 18
+
+==== BB 777 (0x25477FA4) approx BBs exec'd 0 ====
+
+	0x25477FA4:  2E150000  cmpi cr4,r21,0
+	   0: GETL       	R21, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x25477FA8:  41920128  bc 12,18,0x254780D0
+	   4: Js18o       	$0x254780D0
+
+
+. 0 25477FA4 8
+. 2E 15 00 00 41 92 01 28
+
+==== BB 778 (0x254780D0) approx BBs exec'd 0 ====
+
+	0x254780D0:  81330008  lwz r9,8(r19)
+	   0: GETL       	R19, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x254780D4:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254780D8:  41860010  bc 12,6,0x254780E8
+	   9: Js06o       	$0x254780E8
+
+
+. 0 254780D0 12
+. 81 33 00 08 2C 89 00 00 41 86 00 10
+
+==== BB 779 (0x254780DC) approx BBs exec'd 0 ====
+
+	0x254780DC:  817C000C  lwz r11,12(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254780E0:  7C0B4840  cmpl cr0,r11,r9
+	   5: GETL       	R11, t4
+	   6: GETL       	R9, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x254780E4:  4181FED4  bc 12,1,0x25477FB8
+	  10: Js01o       	$0x25477FB8
+
+
+. 0 254780DC 12
+. 81 7C 00 0C 7C 0B 48 40 41 81 FE D4
+
+==== BB 780 (0x254780E8) approx BBs exec'd 0 ====
+
+	0x254780E8:  80D3003C  lwz r6,60(r19)
+	   0: GETL       	R19, t0
+	   1: ADDL       	$0x3C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254780EC:  3A800000  li r20,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R20
+	   7: INCEIPL       	$4
+
+	0x254780F0:  807C0010  lwz r3,16(r28)
+	   8: GETL       	R28, t6
+	   9: ADDL       	$0x10, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x254780F4:  66898000  oris r9,r20,0x8000
+	  13: MOVL       	$0x80000000, t10
+	  14: PUTL       	t10, R9
+	  15: INCEIPL       	$4
+
+	0x254780F8:  801C0014  lwz r0,20(r28)
+	  16: GETL       	R28, t12
+	  17: ADDL       	$0x14, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x254780FC:  7D2548F8  nor r5,r9,r9
+	  21: GETL       	R9, t16
+	  22: NOTL       	t16
+	  23: PUTL       	t16, R5
+	  24: INCEIPL       	$4
+
+	0x25478100:  7CCC30F8  nor r12,r6,r6
+	  25: GETL       	R6, t18
+	  26: NOTL       	t18
+	  27: PUTL       	t18, R12
+	  28: INCEIPL       	$4
+
+	0x25478104:  7C642838  and r4,r3,r5
+	  29: GETL       	R3, t20
+	  30: GETL       	R5, t22
+	  31: ANDL       	t20, t22
+	  32: PUTL       	t22, R4
+	  33: INCEIPL       	$4
+
+	0x25478108:  7C1C6038  and r28,r0,r12
+	  34: GETL       	R0, t24
+	  35: GETL       	R12, t26
+	  36: ANDL       	t24, t26
+	  37: PUTL       	t26, R28
+	  38: INCEIPL       	$4
+
+	0x2547810C:  7C80E379  or. r0,r4,r28
+	  39: GETL       	R4, t28
+	  40: GETL       	R28, t30
+	  41: ORL       	t30, t28
+	  42: PUTL       	t28, R0
+	  43: CMP0L       	t28, t32  (-rSo)
+	  44: ICRFL       	t32, $0x0, CR
+	  45: INCEIPL       	$4
+
+	0x25478110:  4082FEA8  bc 4,2,0x25477FB8
+	  46: Jc02o       	$0x25477FB8
+
+
+. 0 254780E8 44
+. 80 D3 00 3C 3A 80 00 00 80 7C 00 10 66 89 80 00 80 1C 00 14 7D 25 48 F8 7C CC 30 F8 7C 64 28 38 7C 1C 60 38 7C 80 E3 79 40 82 FE A8
+
+==== BB 781 (0x25478114) approx BBs exec'd 0 ====
+
+	0x25478114:  83F30038  lwz r31,56(r19)
+	   0: GETL       	R19, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x25478118:  7EA7DA14  add r21,r7,r27
+	   5: GETL       	R7, t4
+	   6: GETL       	R27, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R21
+	   9: INCEIPL       	$4
+
+	0x2547811C:  7F1F4000  cmp cr6,r31,r8
+	  10: GETL       	R31, t8
+	  11: GETL       	R8, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x25478120:  409AFE98  bc 4,26,0x25477FB8
+	  15: Jc26o       	$0x25477FB8
+
+
+. 0 25478114 16
+. 83 F3 00 38 7E A7 DA 14 7F 1F 40 00 40 9A FE 98
+
+==== BB 782 (0x25478124) approx BBs exec'd 0 ====
+
+	0x25478124:  4BFFFBD0  b 0x25477CF4
+	   0: JMPo       	$0x25477CF4  ($4)
+
+
+. 0 25478124 4
+. 4B FF FB D0
+
+==== BB 783 (0x25477CF4) approx BBs exec'd 0 ====
+
+	0x25477CF4:  82F30000  lwz r23,0(r19)
+	   0: GETL       	R19, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R23
+	   3: INCEIPL       	$4
+
+	0x25477CF8:  72EB0001  andi. r11,r23,0x1
+	   4: GETL       	R23, t4
+	   5: ANDL       	$0x1, t4
+	   6: PUTL       	t4, R11
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25477CFC:  408201AC  bc 4,2,0x25477EA8
+	  10: Jc02o       	$0x25477EA8
+
+
+. 0 25477CF4 12
+. 82 F3 00 00 72 EB 00 01 40 82 01 AC
+
+==== BB 784 (0x25477D00) approx BBs exec'd 0 ====
+
+	0x25477D00:  7EA3AB78  or r3,r21,r21
+	   0: GETL       	R21, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25477D04:  82610044  lwz r19,68(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x44, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R19
+	   7: INCEIPL       	$4
+
+	0x25477D08:  80E10008  lwz r7,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R7
+	  12: INCEIPL       	$4
+
+	0x25477D0C:  7E6803A6  mtlr r19
+	  13: GETL       	R19, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x25477D10:  82810010  lwz r20,16(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x10, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R20
+	  20: INCEIPL       	$4
+
+	0x25477D14:  8261000C  lwz r19,12(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R19
+	  25: INCEIPL       	$4
+
+	0x25477D18:  7CE08120  mtcrf 0x8,r7
+	  26: GETL       	R7, t20
+	  27: ICRFL       	t20, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0x25477D1C:  82A10014  lwz r21,20(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x14, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R21
+	  33: INCEIPL       	$4
+
+	0x25477D20:  82C10018  lwz r22,24(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x18, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R22
+	  38: INCEIPL       	$4
+
+	0x25477D24:  82E1001C  lwz r23,28(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x1C, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R23
+	  43: INCEIPL       	$4
+
+	0x25477D28:  83010020  lwz r24,32(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x20, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R24
+	  48: INCEIPL       	$4
+
+	0x25477D2C:  83210024  lwz r25,36(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x24, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R25
+	  53: INCEIPL       	$4
+
+	0x25477D30:  83410028  lwz r26,40(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x28, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R26
+	  58: INCEIPL       	$4
+
+	0x25477D34:  8361002C  lwz r27,44(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x2C, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R27
+	  63: INCEIPL       	$4
+
+	0x25477D38:  83810030  lwz r28,48(r1)
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x30, t50
+	  66: LDL       	(t50), t52
+	  67: PUTL       	t52, R28
+	  68: INCEIPL       	$4
+
+	0x25477D3C:  83A10034  lwz r29,52(r1)
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x34, t54
+	  71: LDL       	(t54), t56
+	  72: PUTL       	t56, R29
+	  73: INCEIPL       	$4
+
+	0x25477D40:  83C10038  lwz r30,56(r1)
+	  74: GETL       	R1, t58
+	  75: ADDL       	$0x38, t58
+	  76: LDL       	(t58), t60
+	  77: PUTL       	t60, R30
+	  78: INCEIPL       	$4
+
+	0x25477D44:  83E1003C  lwz r31,60(r1)
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0x3C, t62
+	  81: LDL       	(t62), t64
+	  82: PUTL       	t64, R31
+	  83: INCEIPL       	$4
+
+	0x25477D48:  38210040  addi r1,r1,64
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x40, t66
+	  86: PUTL       	t66, R1
+	  87: INCEIPL       	$4
+
+	0x25477D4C:  4E800020  blr
+	  88: GETL       	LR, t68
+	  89: JMPo-r       	t68  ($4)
+
+
+. 0 25477D00 80
+. 7E A3 AB 78 82 61 00 44 80 E1 00 08 7E 68 03 A6 82 81 00 10 82 61 00 0C 7C E0 81 20 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 83 C1 00 38 83 E1 00 3C 38 21 00 40 4E 80 00 20
+
+==== BB 785 (0x254772AC) approx BBs exec'd 0 ====
+
+	0x254772AC:  7C721B79  or. r18,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R18
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254772B0:  7F800026  mfcr r28
+	   5: GETL       	CR, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x254772B4:  41820020  bc 12,2,0x254772D4
+	   8: Js02o       	$0x254772D4
+
+
+. 0 254772AC 12
+. 7C 72 1B 79 7F 80 00 26 41 82 00 20
+
+==== BB 786 (0x254772B8) approx BBs exec'd 0 ====
+
+	0x254772B8:  418E017C  bc 12,14,0x25477434
+	   0: Js14o       	$0x25477434
+
+
+. 0 254772B8 4
+. 41 8E 01 7C
+
+==== BB 787 (0x254772BC) approx BBs exec'd 0 ====
+
+	0x254772BC:  7F69DB78  or r9,r27,r27
+	   0: GETL       	R27, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x254772C0:  83A901FC  lwz r29,508(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0x1FC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x254772C4:  73A90800  andi. r9,r29,0x800
+	   8: GETL       	R29, t6
+	   9: ANDL       	$0x800, t6
+	  10: PUTL       	t6, R9
+	  11: CMP0L       	t6, t8  (-rSo)
+	  12: ICRFL       	t8, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x254772C8:  408201D4  bc 4,2,0x2547749C
+	  14: Jc02o       	$0x2547749C
+
+
+. 0 254772BC 16
+. 7F 69 DB 78 83 A9 01 FC 73 A9 08 00 40 82 01 D4
+
+==== BB 788 (0x254772CC) approx BBs exec'd 0 ====
+
+	0x254772CC:  7F880120  mtcrf 0x80,r28
+	   0: GETL       	R28, t0
+	   1: ICRFL       	t0, $0x0, CR
+	   2: INCEIPL       	$4
+
+	0x254772D0:  408200C8  bc 4,2,0x25477398
+	   3: Jc02o       	$0x25477398
+
+
+. 0 254772CC 8
+. 7F 88 01 20 40 82 00 C8
+
+==== BB 789 (0x25477398) approx BBs exec'd 0 ====
+
+	0x25477398:  7E439378  or r3,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547739C:  38810018  addi r4,r1,24
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x18, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x254773A0:  4BFFF0A5  bl 0x25476444
+	   7: MOVL       	$0x254773A4, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25476444  ($4)
+
+
+. 0 25477398 12
+. 7E 43 93 78 38 81 00 18 4B FF F0 A5
+
+==== BB 790 (0x254765B4) approx BBs exec'd 0 ====
+
+	0x254765B4:  80FD0010  lwz r7,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254765B8:  2F070020  cmpi cr6,r7,32
+	   5: GETL       	R7, t4
+	   6: MOVL       	$0x20, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254765BC:  409AFFA0  bc 4,26,0x2547655C
+	  10: Jc26o       	$0x2547655C
+
+
+. 0 254765B4 12
+. 80 FD 00 10 2F 07 00 20 40 9A FF A0
+
+==== BB 791 (0x254765C0) approx BBs exec'd 0 ====
+
+	0x254765C0:  815D001C  lwz r10,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254765C4:  2B8A0003  cmpli cr7,r10,3
+	   5: GETL       	R10, t4
+	   6: MOVL       	$0x3, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0x254765C8:  40BDFF94  bc 5,29,0x2547655C
+	  10: Jc29o       	$0x2547655C
+
+
+. 0 254765C0 12
+. 81 5D 00 1C 2B 8A 00 03 40 BD FF 94
+
+==== BB 792 (0x254765CC) approx BBs exec'd 0 ====
+
+	0x254765CC:  819D0004  lwz r12,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x254765D0:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x254765D4:  81180000  lwz r8,0(r24)
+	   8: GETL       	R24, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0x254765D8:  3ABF0010  addi r21,r31,16
+	  12: GETL       	R31, t10
+	  13: ADDL       	$0x10, t10
+	  14: PUTL       	t10, R21
+	  15: INCEIPL       	$4
+
+	0x254765DC:  380C0020  addi r0,r12,32
+	  16: GETL       	R12, t12
+	  17: ADDL       	$0x20, t12
+	  18: PUTL       	t12, R0
+	  19: INCEIPL       	$4
+
+	0x254765E0:  7D6CC214  add r11,r12,r24
+	  20: GETL       	R12, t14
+	  21: GETL       	R24, t16
+	  22: ADDL       	t14, t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0x254765E4:  7C004040  cmpl cr0,r0,r8
+	  25: GETL       	R0, t18
+	  26: GETL       	R8, t20
+	  27: CMPUL       	t18, t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x0, CR
+	  29: INCEIPL       	$4
+
+	0x254765E8:  7F23CB78  or r3,r25,r25
+	  30: GETL       	R25, t24
+	  31: PUTL       	t24, R3
+	  32: INCEIPL       	$4
+
+	0x254765EC:  7D846378  or r4,r12,r12
+	  33: GETL       	R12, t26
+	  34: PUTL       	t26, R4
+	  35: INCEIPL       	$4
+
+	0x254765F0:  3B6B0004  addi r27,r11,4
+	  36: GETL       	R11, t28
+	  37: ADDL       	$0x4, t28
+	  38: PUTL       	t28, R27
+	  39: INCEIPL       	$4
+
+	0x254765F4:  418100FC  bc 12,1,0x254766F0
+	  40: Js01o       	$0x254766F0
+
+
+. 0 254765CC 44
+. 81 9D 00 04 38 A0 00 00 81 18 00 00 3A BF 00 10 38 0C 00 20 7D 6C C2 14 7C 00 40 40 7F 23 CB 78 7D 84 63 78 3B 6B 00 04 41 81 00 FC
+
+==== BB 793 (0x254765F8) approx BBs exec'd 0 ====
+
+	0x254765F8:  809E0170  lwz r4,368(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x170, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254765FC:  7F63DB78  or r3,r27,r27
+	   5: GETL       	R27, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25476600:  38A00010  li r5,16
+	   8: MOVL       	$0x10, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25476604:  4800CC95  bl 0x25483298
+	  11: MOVL       	$0x25476608, t8
+	  12: PUTL       	t8, LR
+	  13: JMPo-c       	$0x25483298  ($4)
+
+
+. 0 254765F8 16
+. 80 9E 01 70 7F 63 DB 78 38 A0 00 10 48 00 CC 95
+
+==== BB 794 (0x25483310) approx BBs exec'd 0 ====
+
+	0x25483310:  28000001  cmpli cr0,r0,1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25483314:  41800228  bc 12,0,0x2548353C
+	   5: Js00o       	$0x2548353C
+
+
+. 0 25483310 8
+. 28 00 00 01 41 80 02 28
+
+==== BB 795 (0x2548353C) approx BBs exec'd 0 ====
+
+	0x2548353C:  80EB0000  lwz r7,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25483540:  80040000  lwz r0,0(r4)
+	   4: GETL       	R4, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0x25483544:  7F870000  cmp cr7,r7,r0
+	   8: GETL       	R7, t8
+	   9: GETL       	R0, t10
+	  10: CMPL       	t8, t10, t12  (-rSo)
+	  11: ICRFL       	t12, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x25483548:  806A0004  lwz r3,4(r10)
+	  13: GETL       	R10, t14
+	  14: ADDL       	$0x4, t14
+	  15: LDL       	(t14), t16
+	  16: PUTL       	t16, R3
+	  17: INCEIPL       	$4
+
+	0x2548354C:  80C90004  lwz r6,4(r9)
+	  18: GETL       	R9, t18
+	  19: ADDL       	$0x4, t18
+	  20: LDL       	(t18), t20
+	  21: PUTL       	t20, R6
+	  22: INCEIPL       	$4
+
+	0x25483550:  409EFDF4  bc 4,30,0x25483344
+	  23: Jc30o       	$0x25483344
+
+
+. 0 2548353C 24
+. 80 EB 00 00 80 04 00 00 7F 87 00 00 80 6A 00 04 80 C9 00 04 40 9E FD F4
+
+==== BB 796 (0x25476608) approx BBs exec'd 0 ====
+
+	0x25476608:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547660C:  419A0084  bc 12,26,0x25476690
+	   4: Js26o       	$0x25476690
+
+
+. 0 25476608 8
+. 2F 03 00 00 41 9A 00 84
+
+==== BB 797 (0x25476690) approx BBs exec'd 0 ====
+
+	0x25476690:  813B0010  lwz r9,16(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25476694:  8B5B0017  lbz r26,23(r27)
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x17, t4
+	   7: LDB       	(t4), t6
+	   8: PUTL       	t6, R26
+	   9: INCEIPL       	$4
+
+	0x25476698:  8BBB001B  lbz r29,27(r27)
+	  10: GETL       	R27, t8
+	  11: ADDL       	$0x1B, t8
+	  12: LDB       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x2547669C:  2F890000  cmpi cr7,r9,0
+	  15: GETL       	R9, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x254766A0:  5755402E  rlwinm r21,r26,8,0,23
+	  19: GETL       	R26, t16
+	  20: SHLL       	$0x8, t16
+	  21: PUTL       	t16, R21
+	  22: INCEIPL       	$4
+
+	0x254766A4:  8B1B001F  lbz r24,31(r27)
+	  23: GETL       	R27, t18
+	  24: ADDL       	$0x1F, t18
+	  25: LDB       	(t18), t20
+	  26: PUTL       	t20, R24
+	  27: INCEIPL       	$4
+
+	0x254766A8:  7F95EA14  add r28,r21,r29
+	  28: GETL       	R21, t22
+	  29: GETL       	R29, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R28
+	  32: INCEIPL       	$4
+
+	0x254766AC:  5796402E  rlwinm r22,r28,8,0,23
+	  33: GETL       	R28, t26
+	  34: SHLL       	$0x8, t26
+	  35: PUTL       	t26, R22
+	  36: INCEIPL       	$4
+
+	0x254766B0:  7C16C214  add r0,r22,r24
+	  37: GETL       	R22, t28
+	  38: GETL       	R24, t30
+	  39: ADDL       	t28, t30
+	  40: PUTL       	t30, R0
+	  41: INCEIPL       	$4
+
+	0x254766B4:  409E001C  bc 4,30,0x254766D0
+	  42: Jc30o       	$0x254766D0
+
+
+. 0 25476690 40
+. 81 3B 00 10 8B 5B 00 17 8B BB 00 1B 2F 89 00 00 57 55 40 2E 8B 1B 00 1F 7F 95 EA 14 57 96 40 2E 7C 16 C2 14 40 9E 00 1C
+
+==== BB 798 (0x254766B8) approx BBs exec'd 0 ====
+
+	0x254766B8:  807E04F4  lwz r3,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254766BC:  81230008  lwz r9,8(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x254766C0:  2C090000  cmpi cr0,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x0, CR
+	  13: INCEIPL       	$4
+
+	0x254766C4:  41A2FEAC  bc 13,2,0x25476570
+	  14: Js02o       	$0x25476570
+
+
+. 0 254766B8 16
+. 80 7E 04 F4 81 23 00 08 2C 09 00 00 41 A2 FE AC
+
+==== BB 799 (0x254766C8) approx BBs exec'd 0 ====
+
+	0x254766C8:  7C890040  cmpl cr1,r9,r0
+	   0: GETL       	R9, t0
+	   1: GETL       	R0, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254766CC:  40A4FEA4  bc 5,4,0x25476570
+	   5: Jc04o       	$0x25476570
+
+
+. 0 254766C8 8
+. 7C 89 00 40 40 A4 FE A4
+
+==== BB 800 (0x254773A4) approx BBs exec'd 0 ====
+
+	0x254773A4:  2E03FFFF  cmpi cr4,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x4, CR
+	   4: INCEIPL       	$4
+
+	0x254773A8:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t6
+	   6: PUTL       	t6, R26
+	   7: INCEIPL       	$4
+
+	0x254773AC:  41B2FF2C  bc 13,18,0x254772D8
+	   8: Js18o       	$0x254772D8
+
+
+. 0 254773A4 12
+. 2E 03 FF FF 7C 7A 1B 78 41 B2 FF 2C
+
+==== BB 801 (0x254773B0) approx BBs exec'd 0 ====
+
+	0x254773B0:  7E439378  or r3,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x254773B4:  4800BC1D  bl 0x25482FD0
+	   3: MOVL       	$0x254773B8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 254773B0 8
+. 7E 43 93 78 48 00 BC 1D
+
+==== BB 802 (0x254773B8) approx BBs exec'd 0 ====
+
+	0x254773B8:  3BA30001  addi r29,r3,1
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x254773BC:  7FA3EB78  or r3,r29,r29
+	   4: GETL       	R29, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x254773C0:  48020665  bl 0x25497A24
+	   7: MOVL       	$0x254773C4, t4
+	   8: PUTL       	t4, LR
+	   9: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 254773B8 12
+. 3B A3 00 01 7F A3 EB 78 48 02 06 65
+
+==== BB 803 (0x254773C4) approx BBs exec'd 0 ====
+
+	0x254773C4:  38000000  li r0,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254773C8:  2F030000  cmpi cr6,r3,0
+	   3: GETL       	R3, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x6, CR
+	   6: INCEIPL       	$4
+
+	0x254773CC:  419A0014  bc 12,26,0x254773E0
+	   7: Js26o       	$0x254773E0
+
+
+. 0 254773C4 12
+. 38 00 00 00 2F 03 00 00 41 9A 00 14
+
+==== BB 804 (0x254773D0) approx BBs exec'd 0 ====
+
+	0x254773D0:  7E449378  or r4,r18,r18
+	   0: GETL       	R18, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254773D4:  7FA5EB78  or r5,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x254773D8:  4800C809  bl 0x25483BE0
+	   6: MOVL       	$0x254773DC, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 254773D0 12
+. 7E 44 93 78 7F A5 EB 78 48 00 C8 09
+
+==== BB 805 (0x25483C74) approx BBs exec'd 0 ====
+
+	0x25483C74:  88FD0000  lbz r7,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDB       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25483C78:  3BBD0001  addi r29,r29,1
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25483C7C:  98FF0000  stb r7,0(r31)
+	   8: GETL       	R7, t6
+	   9: GETL       	R31, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25483C80:  3BFF0001  addi r31,r31,1
+	  12: GETL       	R31, t10
+	  13: ADDL       	$0x1, t10
+	  14: PUTL       	t10, R31
+	  15: INCEIPL       	$4
+
+	0x25483C84:  4200FFF0  bc 16,0,0x25483C74
+	  16: GETL       	CTR, t12
+	  17: ADDL       	$0xFFFFFFFF, t12
+	  18: PUTL       	t12, CTR
+	  19: JIFZL       	t12, $0x25483C88
+	  20: JMPo       	$0x25483C74  ($4)
+
+
+. 0 25483C74 20
+. 88 FD 00 00 3B BD 00 01 98 FF 00 00 3B FF 00 01 42 00 FF F0
+
+==== BB 806 (0x254773DC) approx BBs exec'd 0 ====
+
+	0x254773DC:  7C601B78  or r0,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254773E0:  2F800000  cmpi cr7,r0,0
+	   3: GETL       	R0, t2
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x7, CR
+	   6: INCEIPL       	$4
+
+	0x254773E4:  90010228  stw r0,552(r1)
+	   7: GETL       	R0, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0x228, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x254773E8:  409EFEEC  bc 4,30,0x254772D4
+	  12: Jc30o       	$0x254772D4
+
+
+. 0 254773DC 16
+. 7C 60 1B 78 2F 80 00 00 90 01 02 28 40 9E FE EC
+
+==== BB 807 (0x254772D4) approx BBs exec'd 0 ====
+
+	0x254772D4:  40920050  bc 4,18,0x25477324
+	   0: Jc18o       	$0x25477324
+
+
+. 0 254772D4 4
+. 40 92 00 50
+
+==== BB 808 (0x25477324) approx BBs exec'd 0 ====
+
+	0x25477324:  82F40000  lwz r23,0(r20)
+	   0: GETL       	R20, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R23
+	   3: INCEIPL       	$4
+
+	0x25477328:  72E90001  andi. r9,r23,0x1
+	   4: GETL       	R23, t4
+	   5: ANDL       	$0x1, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547732C:  4182FAD8  bc 12,2,0x25476E04
+	  10: Js02o       	$0x25476E04
+
+
+. 0 25477324 12
+. 82 F4 00 00 72 E9 00 01 41 82 FA D8
+
+==== BB 809 (0x254758A8) approx BBs exec'd 0 ====
+
+	0x254758A8:  54EF2834  rlwinm r15,r7,5,0,26
+	   0: GETL       	R7, t0
+	   1: SHLL       	$0x5, t0
+	   2: PUTL       	t0, R15
+	   3: INCEIPL       	$4
+
+	0x254758AC:  801D0008  lwz r0,8(r29)
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x8, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254758B0:  7DCFDA14  add r14,r15,r27
+	   9: GETL       	R15, t6
+	  10: GETL       	R27, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R14
+	  13: INCEIPL       	$4
+
+	0x254758B4:  3BBD0020  addi r29,r29,32
+	  14: GETL       	R29, t10
+	  15: ADDL       	$0x20, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x254758B8:  7C8EE840  cmpl cr1,r14,r29
+	  18: GETL       	R14, t12
+	  19: GETL       	R29, t14
+	  20: CMPUL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x1, CR
+	  22: INCEIPL       	$4
+
+	0x254758BC:  901A014C  stw r0,332(r26)
+	  23: GETL       	R0, t18
+	  24: GETL       	R26, t20
+	  25: ADDL       	$0x14C, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x254758C0:  4185FF4C  bc 12,5,0x2547580C
+	  28: Js05o       	$0x2547580C
+
+
+. 0 254758A8 28
+. 54 EF 28 34 80 1D 00 08 7D CF DA 14 3B BD 00 20 7C 8E E8 40 90 1A 01 4C 41 85 FF 4C
+
+==== BB 810 (0x25475830) approx BBs exec'd 0 ====
+
+	0x25475830:  7F090040  cmpl cr6,r9,r0
+	   0: GETL       	R9, t0
+	   1: GETL       	R0, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25475834:  4199024C  bc 12,25,0x25475A80
+	   5: Js25o       	$0x25475A80
+
+
+. 0 25475830 8
+. 7F 09 00 40 41 99 02 4C
+
+==== BB 811 (0x25475838) approx BBs exec'd 0 ====
+
+	0x25475838:  2C090007  cmpi cr0,r9,7
+	   0: GETL       	R9, t0
+	   1: MOVL       	$0x7, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547583C:  4082FFBC  bc 4,2,0x254757F8
+	   5: Jc02o       	$0x254757F8
+
+
+. 0 25475838 8
+. 2C 09 00 07 40 82 FF BC
+
+==== BB 812 (0x25475840) approx BBs exec'd 0 ====
+
+	0x25475840:  801D0014  lwz r0,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475844:  2F000000  cmpi cr6,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25475848:  41BAFFB0  bc 13,26,0x254757F8
+	   9: Js26o       	$0x254757F8
+
+
+. 0 25475840 12
+. 80 1D 00 14 2F 00 00 00 41 BA FF B0
+
+==== BB 813 (0x2547584C) approx BBs exec'd 0 ====
+
+	0x2547584C:  901A0220  stw r0,544(r26)
+	   0: GETL       	R0, t0
+	   1: GETL       	R26, t2
+	   2: ADDL       	$0x220, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475850:  817D001C  lwz r11,28(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x1C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0x25475854:  917A0224  stw r11,548(r26)
+	  10: GETL       	R11, t8
+	  11: GETL       	R26, t10
+	  12: ADDL       	$0x224, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25475858:  813D001C  lwz r9,28(r29)
+	  15: GETL       	R29, t12
+	  16: ADDL       	$0x1C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x2547585C:  2F890000  cmpi cr7,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x7, CR
+	  23: INCEIPL       	$4
+
+	0x25475860:  419E07EC  bc 12,30,0x2547604C
+	  24: Js30o       	$0x2547604C
+
+
+. 0 2547584C 24
+. 90 1A 02 20 81 7D 00 1C 91 7A 02 24 81 3D 00 1C 2F 89 00 00 41 9E 07 EC
+
+==== BB 814 (0x25475864) approx BBs exec'd 0 ====
+
+	0x25475864:  80BD0008  lwz r5,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25475868:  3929FFFF  addi r9,r9,-1
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547586C:  7CA04838  and r0,r5,r9
+	   9: GETL       	R5, t6
+	  10: GETL       	R9, t8
+	  11: ANDL       	t6, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x25475870:  901A0228  stw r0,552(r26)
+	  14: GETL       	R0, t10
+	  15: GETL       	R26, t12
+	  16: ADDL       	$0x228, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25475874:  81DA0180  lwz r14,384(r26)
+	  19: GETL       	R26, t14
+	  20: ADDL       	$0x180, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R14
+	  23: INCEIPL       	$4
+
+	0x25475878:  3CC04000  lis r6,16384
+	  24: MOVL       	$0x40000000, t18
+	  25: PUTL       	t18, R6
+	  26: INCEIPL       	$4
+
+	0x2547587C:  815D0010  lwz r10,16(r29)
+	  27: GETL       	R29, t20
+	  28: ADDL       	$0x10, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R10
+	  31: INCEIPL       	$4
+
+	0x25475880:  55C80002  rlwinm r8,r14,0,0,1
+	  32: GETL       	R14, t24
+	  33: ANDL       	$0xC0000000, t24
+	  34: PUTL       	t24, R8
+	  35: INCEIPL       	$4
+
+	0x25475884:  7C083000  cmp cr0,r8,r6
+	  36: GETL       	R8, t26
+	  37: GETL       	R6, t28
+	  38: CMPL       	t26, t28, t30  (-rSo)
+	  39: ICRFL       	t30, $0x0, CR
+	  40: INCEIPL       	$4
+
+	0x25475888:  915A021C  stw r10,540(r26)
+	  41: GETL       	R10, t32
+	  42: GETL       	R26, t34
+	  43: ADDL       	$0x21C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0x2547588C:  809D0008  lwz r4,8(r29)
+	  46: GETL       	R29, t36
+	  47: ADDL       	$0x8, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R4
+	  50: INCEIPL       	$4
+
+	0x25475890:  909A0218  stw r4,536(r26)
+	  51: GETL       	R4, t40
+	  52: GETL       	R26, t42
+	  53: ADDL       	$0x218, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0x25475894:  40820A24  bc 4,2,0x254762B8
+	  56: Jc02o       	$0x254762B8
+
+
+. 0 25475864 52
+. 80 BD 00 08 39 29 FF FF 7C A0 48 38 90 1A 02 28 81 DA 01 80 3C C0 40 00 81 5D 00 10 55 C8 00 02 7C 08 30 00 91 5A 02 1C 80 9D 00 08 90 9A 02 18 40 82 0A 24
+
+==== BB 815 (0x25475898) approx BBs exec'd 0 ====
+
+	0x25475898:  4800837D  bl 0x2547DC14
+	   0: MOVL       	$0x2547589C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547DC14  ($4)
+
+
+. 0 25475898 4
+. 48 00 83 7D
+
+==== BB 816 _dl_next_tls_modid(0x2547DC14) approx BBs exec'd 0 ====
+
+	0x2547DC14:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DC18:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547DC1C:  480193E5  bl 0x25497000
+	   9: MOVL       	$0x2547DC20, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547DC14 12
+. 94 21 FF F0 7D 88 02 A6 48 01 93 E5
+
+==== BB 817 (0x2547DC20) approx BBs exec'd 0 ====
+
+	0x2547DC20:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DC24:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547DC28:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0x2547DC2C:  80DE04C8  lwz r6,1224(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x4C8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x2547DC30:  8806040C  lbz r0,1036(r6)
+	  16: GETL       	R6, t12
+	  17: ADDL       	$0x40C, t12
+	  18: LDB       	(t12), t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x2547DC34:  2F800000  cmpi cr7,r0,0
+	  21: GETL       	R0, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x2547DC38:  409E0020  bc 4,30,0x2547DC58
+	  25: Jc30o       	$0x2547DC58
+
+
+. 0 2547DC20 28
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 DE 04 C8 88 06 04 0C 2F 80 00 00 40 9E 00 20
+
+==== BB 818 (0x2547DC3C) approx BBs exec'd 0 ====
+
+	0x2547DC3C:  81460408  lwz r10,1032(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547DC40:  390A0001  addi r8,r10,1
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2547DC44:  91060408  stw r8,1032(r6)
+	   9: GETL       	R8, t6
+	  10: GETL       	R6, t8
+	  11: ADDL       	$0x408, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547DC48:  7D034378  or r3,r8,r8
+	  14: GETL       	R8, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x2547DC4C:  83C10008  lwz r30,8(r1)
+	  17: GETL       	R1, t12
+	  18: ADDL       	$0x8, t12
+	  19: LDL       	(t12), t14
+	  20: PUTL       	t14, R30
+	  21: INCEIPL       	$4
+
+	0x2547DC50:  38210010  addi r1,r1,16
+	  22: GETL       	R1, t16
+	  23: ADDL       	$0x10, t16
+	  24: PUTL       	t16, R1
+	  25: INCEIPL       	$4
+
+	0x2547DC54:  4E800020  blr
+	  26: GETL       	LR, t18
+	  27: JMPo-r       	t18  ($4)
+
+
+. 0 2547DC3C 28
+. 81 46 04 08 39 0A 00 01 91 06 04 08 7D 03 43 78 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+==== BB 819 (0x2547589C) approx BBs exec'd 0 ====
+
+	0x2547589C:  A0FA0154  lhz r7,340(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x154, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254758A0:  907A0230  stw r3,560(r26)
+	   5: GETL       	R3, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x230, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x254758A4:  4BFFFF54  b 0x254757F8
+	  10: JMPo       	$0x254757F8  ($4)
+
+
+. 0 2547589C 12
+. A0 FA 01 54 90 7A 02 30 4B FF FF 54
+
+==== BB 820 (0x25475A80) approx BBs exec'd 0 ====
+
+	0x25475A80:  3D406474  lis r10,25716
+	   0: MOVL       	$0x64740000, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x25475A84:  614FE552  ori r15,r10,0xE552
+	   3: MOVL       	$0x6474E552, t2
+	   4: PUTL       	t2, R15
+	   5: INCEIPL       	$4
+
+	0x25475A88:  7F897800  cmp cr7,r9,r15
+	   6: GETL       	R9, t4
+	   7: GETL       	R15, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x25475A8C:  409EFD6C  bc 4,30,0x254757F8
+	  11: Jc30o       	$0x254757F8
+
+
+. 0 25475A80 16
+. 3D 40 64 74 61 4F E5 52 7F 89 78 00 40 9E FD 6C
+
+==== BB 821 (0x25475A90) approx BBs exec'd 0 ====
+
+	0x25475A90:  811D0008  lwz r8,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25475A94:  911A0234  stw r8,564(r26)
+	   5: GETL       	R8, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	$0x234, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25475A98:  80DD0014  lwz r6,20(r29)
+	  10: GETL       	R29, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0x25475A9C:  90DA0238  stw r6,568(r26)
+	  15: GETL       	R6, t12
+	  16: GETL       	R26, t14
+	  17: ADDL       	$0x238, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25475AA0:  4BFFFD58  b 0x254757F8
+	  20: JMPo       	$0x254757F8  ($4)
+
+
+. 0 25475A90 20
+. 81 1D 00 08 91 1A 02 34 80 DD 00 14 90 DA 02 38 4B FF FD 58
+
+==9225== Reading syms from /lib/tls/libc-2.3.4.so (0xFE80000)
+==9225==    object doesn't have any debug info
+==== BB 822 (0x254759E0) approx BBs exec'd 0 ====
+
+	0x254759E0:  7C9CC840  cmpl cr1,r28,r25
+	   0: GETL       	R28, t0
+	   1: GETL       	R25, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254759E4:  4085002C  bc 4,5,0x25475A10
+	   5: Jc05o       	$0x25475A10
+
+
+. 0 254759E0 8
+. 7C 9C C8 40 40 85 00 2C
+
+==== BB 823 (0x254837B0) approx BBs exec'd 0 ====
+
+	0x254837B0:  9088FFFC  stw r4,-4(r8)
+	   0: GETL       	R4, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254837B4:  9488FFF8  stwu r4,-8(r8)
+	   5: GETL       	R4, t4
+	   6: GETL       	R8, t6
+	   7: ADDL       	$0xFFFFFFF8, t6
+	   8: PUTL       	t6, R8
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x254837B8:  41840014  bc 12,4,0x254837CC
+	  11: Js04o       	$0x254837CC
+
+
+. 0 254837B0 12
+. 90 88 FF FC 94 88 FF F8 41 84 00 14
+
+==== BB 824 (0x254837D4) approx BBs exec'd 0 ====
+
+	0x254837D4:  28840000  cmpli cr1,r4,0
+	   0: GETL       	R4, t0
+	   1: MOVL       	$0x0, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254837D8:  54A70035  rlwinm. r7,r5,0,0,26
+	   5: GETL       	R5, t6
+	   6: ANDL       	$0xFFFFFFE0, t6
+	   7: PUTL       	t6, R7
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x254837DC:  7CA01120  mtcrf 0x1,r5
+	  11: GETL       	R5, t10
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x254837E0:  418601A0  bc 12,6,0x25483980
+	  14: Js06o       	$0x25483980
+
+
+. 0 254837D4 16
+. 28 84 00 00 54 A7 00 35 7C A0 11 20 41 86 01 A0
+
+==== BB 825 (0x25483884) approx BBs exec'd 0 ====
+
+	0x25483884:  7C0037EC  dcbz r0,r6
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0xFFFFFFE0, t0
+	   2: MOVL       	$0x0, t2
+	   3: STL       	t2, (t0)
+	   4: ADDL       	$0x4, t0
+	   5: STL       	t2, (t0)
+	   6: ADDL       	$0x4, t0
+	   7: STL       	t2, (t0)
+	   8: ADDL       	$0x4, t0
+	   9: STL       	t2, (t0)
+	  10: ADDL       	$0x4, t0
+	  11: STL       	t2, (t0)
+	  12: ADDL       	$0x4, t0
+	  13: STL       	t2, (t0)
+	  14: ADDL       	$0x4, t0
+	  15: STL       	t2, (t0)
+	  16: ADDL       	$0x4, t0
+	  17: STL       	t2, (t0)
+	  18: INCEIPL       	$4
+
+	0x25483888:  38C60020  addi r6,r6,32
+	  19: GETL       	R6, t4
+	  20: ADDL       	$0x20, t4
+	  21: PUTL       	t4, R6
+	  22: INCEIPL       	$4
+
+	0x2548388C:  3920FFE0  li r9,-32
+	  23: MOVL       	$0xFFFFFFE0, t6
+	  24: PUTL       	t6, R9
+	  25: INCEIPL       	$4
+
+	0x25483890:  40990010  bc 4,25,0x254838A0
+	  26: Jc25o       	$0x254838A0
+
+
+. 0 25483884 16
+. 7C 00 37 EC 38 C6 00 20 39 20 FF E0 40 99 00 10
+
+==== BB 826 (0x254760C0) approx BBs exec'd 0 ====
+
+	0x254760C0:  80BD0014  lwz r5,20(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254760C4:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254760C8:  7C9CB850  subf r4,r28,r23
+	   8: GETL       	R28, t6
+	   9: GETL       	R23, t8
+	  10: SUBL       	t6, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x254760CC:  38C00032  li r6,50
+	  13: MOVL       	$0x32, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x254760D0:  38E0FFFF  li r7,-1
+	  16: MOVL       	$0xFFFFFFFF, t12
+	  17: PUTL       	t12, R7
+	  18: INCEIPL       	$4
+
+	0x254760D4:  39000000  li r8,0
+	  19: MOVL       	$0x0, t14
+	  20: PUTL       	t14, R8
+	  21: INCEIPL       	$4
+
+	0x254760D8:  4800C6C1  bl 0x25482798
+	  22: MOVL       	$0x254760DC, t16
+	  23: PUTL       	t16, LR
+	  24: JMPo-c       	$0x25482798  ($4)
+
+
+. 0 254760C0 28
+. 80 BD 00 14 7F 83 E3 78 7C 9C B8 50 38 C0 00 32 38 E0 FF FF 39 00 00 00 48 00 C6 C1
+
+==== BB 827 (0x254760DC) approx BBs exec'd 0 ====
+
+	0x254760DC:  2C83FFFF  cmpi cr1,r3,-1
+	   0: GETL       	R3, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x254760E0:  4086F938  bc 4,6,0x25475A18
+	   5: Jc06o       	$0x25475A18
+
+
+. 0 254760DC 8
+. 2C 83 FF FF 40 86 F9 38
+
+==== BB 828 (0x25476054) approx BBs exec'd 0 ====
+
+	0x25476054:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0x25476058:  7D800E70  srawi r0,r12,1
+	   4: GETL       	R12, t2
+	   5: SARL       	$0x1, t2  (-wCa)
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x2547605C:  7C1700F8  nor r23,r0,r0
+	   8: GETL       	R0, t4
+	   9: NOTL       	t4
+	  10: PUTL       	t4, R23
+	  11: INCEIPL       	$4
+
+	0x25476060:  20000031  subfic r0,r0,49
+	  12: GETL       	R0, t6
+	  13: MOVL       	$0x31, t8
+	  14: SBBL       	t6, t8  (-wCa)
+	  15: PUTL       	t8, R0
+	  16: INCEIPL       	$4
+
+	0x25476064:  2B170002  cmpli cr6,r23,2
+	  17: GETL       	R23, t10
+	  18: MOVL       	$0x2, t14
+	  19: CMPUL       	t10, t14, t12  (-rSo)
+	  20: ICRFL       	t12, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x25476068:  40B9FC38  bc 5,25,0x25475CA0
+	  22: Jc25o       	$0x25475CA0
+
+
+. 0 25476054 24
+. 55 6C 08 3C 7D 80 0E 70 7C 17 00 F8 20 00 00 31 2B 17 00 02 40 B9 FC 38
+
+==== BB 829 (0x2547606C) approx BBs exec'd 0 ====
+
+	0x2547606C:  7F2B2850  subf r25,r11,r5
+	   0: GETL       	R11, t0
+	   1: GETL       	R5, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x25476070:  2A19000B  cmpli cr4,r25,11
+	   5: GETL       	R25, t4
+	   6: MOVL       	$0xB, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x4, CR
+	   9: INCEIPL       	$4
+
+	0x25476074:  419100C4  bc 12,17,0x25476138
+	  10: Js17o       	$0x25476138
+
+
+. 0 2547606C 12
+. 7F 2B 28 50 2A 19 00 0B 41 91 00 C4
+
+==== BB 830 (0x25476078) approx BBs exec'd 0 ====
+
+	0x25476078:  7C0B2050  subf r0,r11,r4
+	   0: GETL       	R11, t0
+	   1: GETL       	R4, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547607C:  5400103A  rlwinm r0,r0,2,0,29
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25476080:  4BFFFC24  b 0x25475CA4
+	   9: JMPo       	$0x25475CA4  ($4)
+
+
+. 0 25476078 12
+. 7C 0B 20 50 54 00 10 3A 4B FF FC 24
+
+==== BB 831 (0x25475D68) approx BBs exec'd 0 ====
+
+	0x25475D68:  81280078  lwz r9,120(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x78, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25475D6C:  2C090000  cmpi cr0,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25475D70:  41820030  bc 12,2,0x25475DA0
+	   9: Js02o       	$0x25475DA0
+
+
+. 0 25475D68 12
+. 81 28 00 78 2C 09 00 00 41 82 00 30
+
+==== BB 832 (0x25475D74) approx BBs exec'd 0 ====
+
+	0x25475D74:  80090004  lwz r0,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25475D78:  700A0002  andi. r10,r0,0x2
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x2, t4
+	   7: PUTL       	t4, R10
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25475D7C:  901A0200  stw r0,512(r26)
+	  11: GETL       	R0, t8
+	  12: GETL       	R26, t10
+	  13: ADDL       	$0x200, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x25475D80:  41820008  bc 12,2,0x25475D88
+	  16: Js02o       	$0x25475D88
+
+
+. 0 25475D74 16
+. 80 09 00 04 70 0A 00 02 90 1A 02 00 41 82 00 08
+
+==== BB 833 (0x25475D88) approx BBs exec'd 0 ====
+
+	0x25475D88:  700B0004  andi. r11,r0,0x4
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x4, t0
+	   2: PUTL       	t0, R11
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475D8C:  41820008  bc 12,2,0x25475D94
+	   6: Js02o       	$0x25475D94
+
+
+. 0 25475D88 8
+. 70 0B 00 04 41 82 00 08
+
+==== BB 834 (0x25475D94) approx BBs exec'd 0 ====
+
+	0x25475D94:  700A0008  andi. r10,r0,0x8
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x8, t0
+	   2: PUTL       	t0, R10
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25475D98:  41820008  bc 12,2,0x25475DA0
+	   6: Js02o       	$0x25475DA0
+
+
+. 0 25475D94 8
+. 70 0A 00 08 41 82 00 08
+
+==== BB 835 (0x25475D9C) approx BBs exec'd 0 ====
+
+	0x25475D9C:  91280060  stw r9,96(r8)
+	   0: GETL       	R9, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475DA0:  81280098  lwz r9,152(r8)
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0x98, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25475DA4:  2F890000  cmpi cr7,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25475DA8:  419E0354  bc 12,30,0x254760FC
+	  14: Js30o       	$0x254760FC
+
+
+. 0 25475D9C 16
+. 91 28 00 60 81 28 00 98 2F 89 00 00 41 9E 03 54
+
+==== BB 836 (0x25475DBC) approx BBs exec'd 0 ====
+
+	0x25475DBC:  91280060  stw r9,96(r8)
+	   0: GETL       	R9, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25475DC0:  81280074  lwz r9,116(r8)
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0x74, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25475DC4:  2C890000  cmpi cr1,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25475DC8:  418600E0  bc 12,6,0x25475EA8
+	  14: Js06o       	$0x25475EA8
+
+
+. 0 25475DBC 16
+. 91 28 00 60 81 28 00 74 2C 89 00 00 41 86 00 E0
+
+==== BB 837 (0x25475EEC) approx BBs exec'd 0 ====
+
+	0x25475EEC:  80DA0000  lwz r6,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x25475EF0:  7D493214  add r10,r9,r6
+	   4: GETL       	R9, t4
+	   5: GETL       	R6, t6
+	   6: ADDL       	t4, t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x25475EF4:  915A0218  stw r10,536(r26)
+	   9: GETL       	R10, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	$0x218, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25475EF8:  7E639B78  or r3,r19,r19
+	  14: GETL       	R19, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x25475EFC:  4800C0F5  bl 0x25481FF0
+	  17: MOVL       	$0x25475F00, t14
+	  18: PUTL       	t14, LR
+	  19: JMPo-c       	$0x25481FF0  ($4)
+
+
+. 0 25475EEC 20
+. 80 DA 00 00 7D 49 32 14 91 5A 02 18 7E 63 9B 78 48 00 C0 F5
+
+==== BB 838 (0x25475F88) approx BBs exec'd 0 ====
+
+	0x25475F88:  80910050  lwz r4,80(r17)
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25475F8C:  C81F0028  lfd f0,40(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x28, t4
+	   7: FPU_RQ       	(t4), 0x0:0x0
+	   8: INCEIPL       	$4
+
+	0x25475F90:  2C840000  cmpi cr1,r4,0
+	   9: GETL       	R4, t6
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0x25475F94:  D81A01D0  stfd f0,464(r26)
+	  13: GETL       	R26, t10
+	  14: ADDL       	$0x1D0, t10
+	  15: FPU_WQ       	0x0:0x0, (t10)
+	  16: INCEIPL       	$4
+
+	0x25475F98:  C9BF0030  lfd f13,48(r31)
+	  17: GETL       	R31, t12
+	  18: ADDL       	$0x30, t12
+	  19: FPU_RQ       	(t12), 0x0:0xD
+	  20: INCEIPL       	$4
+
+	0x25475F9C:  D9BA01D8  stfd f13,472(r26)
+	  21: GETL       	R26, t14
+	  22: ADDL       	$0x1D8, t14
+	  23: FPU_WQ       	0x0:0xD, (t14)
+	  24: INCEIPL       	$4
+
+	0x25475FA0:  4186FEB0  bc 12,6,0x25475E50
+	  25: Js06o       	$0x25475E50
+
+
+. 0 25475F88 28
+. 80 91 00 50 C8 1F 00 28 2C 84 00 00 D8 1A 01 D0 C9 BF 00 30 D9 BA 01 D8 41 86 FE B0
+
+==== BB 839 (0x2547A100) approx BBs exec'd 0 ====
+
+	0x2547A100:  907F0014  stw r3,20(r31)
+	   0: GETL       	R3, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547A104:  80610014  lwz r3,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547A108:  83E10008  lwz r31,8(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x2547A10C:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x10, t12
+	  17: PUTL       	t12, R1
+	  18: INCEIPL       	$4
+
+	0x2547A110:  7C6803A6  mtlr r3
+	  19: GETL       	R3, t14
+	  20: PUTL       	t14, LR
+	  21: INCEIPL       	$4
+
+	0x2547A114:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+. 0 2547A100 24
+. 90 7F 00 14 80 61 00 14 83 E1 00 08 38 21 00 10 7C 68 03 A6 4E 80 00 20
+
+==== BB 840 (0x2547B624) approx BBs exec'd 0 ====
+
+	0x2547B624:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547B628:  81610008  lwz r11,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547B62C:  80C10280  lwz r6,640(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x280, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R6
+	  12: INCEIPL       	$4
+
+	0x2547B630:  80A10270  lwz r5,624(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x270, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x2547B634:  80810274  lwz r4,628(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x274, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R4
+	  22: INCEIPL       	$4
+
+	0x2547B638:  93E50000  stw r31,0(r5)
+	  23: GETL       	R31, t18
+	  24: GETL       	R5, t20
+	  25: STL       	t18, (t20)
+	  26: INCEIPL       	$4
+
+	0x2547B63C:  91660000  stw r11,0(r6)
+	  27: GETL       	R11, t22
+	  28: GETL       	R6, t24
+	  29: STL       	t22, (t24)
+	  30: INCEIPL       	$4
+
+	0x2547B640:  93E40000  stw r31,0(r4)
+	  31: GETL       	R31, t26
+	  32: GETL       	R4, t28
+	  33: STL       	t26, (t28)
+	  34: INCEIPL       	$4
+
+	0x2547B644:  814102A4  lwz r10,676(r1)
+	  35: GETL       	R1, t30
+	  36: ADDL       	$0x2A4, t30
+	  37: LDL       	(t30), t32
+	  38: PUTL       	t32, R10
+	  39: INCEIPL       	$4
+
+	0x2547B648:  83C10298  lwz r30,664(r1)
+	  40: GETL       	R1, t34
+	  41: ADDL       	$0x298, t34
+	  42: LDL       	(t34), t36
+	  43: PUTL       	t36, R30
+	  44: INCEIPL       	$4
+
+	0x2547B64C:  83E1029C  lwz r31,668(r1)
+	  45: GETL       	R1, t38
+	  46: ADDL       	$0x29C, t38
+	  47: LDL       	(t38), t40
+	  48: PUTL       	t40, R31
+	  49: INCEIPL       	$4
+
+	0x2547B650:  7D4803A6  mtlr r10
+	  50: GETL       	R10, t42
+	  51: PUTL       	t42, LR
+	  52: INCEIPL       	$4
+
+	0x2547B654:  382102A0  addi r1,r1,672
+	  53: GETL       	R1, t44
+	  54: ADDL       	$0x2A0, t44
+	  55: PUTL       	t44, R1
+	  56: INCEIPL       	$4
+
+	0x2547B658:  4E800020  blr
+	  57: GETL       	LR, t46
+	  58: JMPo-r       	t46  ($4)
+
+
+. 0 2547B624 56
+. 38 60 00 00 81 61 00 08 80 C1 02 80 80 A1 02 70 80 81 02 74 93 E5 00 00 91 66 00 00 93 E4 00 00 81 41 02 A4 83 C1 02 98 83 E1 02 9C 7D 48 03 A6 38 21 02 A0 4E 80 00 20
+
+==== BB 841 (0x2547A400) approx BBs exec'd 0 ====
+
+	0x2547A400:  833F0034  lwz r25,52(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R25
+	   4: INCEIPL       	$4
+
+	0x2547A404:  2F190000  cmpi cr6,r25,0
+	   5: GETL       	R25, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A408:  409A01AC  bc 4,26,0x2547A5B4
+	   9: Jc26o       	$0x2547A5B4
+
+
+. 0 2547A400 12
+. 83 3F 00 34 2F 19 00 00 40 9A 01 AC
+
+==== BB 842 (0x2547A40C) approx BBs exec'd 0 ====
+
+	0x2547A40C:  815F0024  lwz r10,36(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547A410:  808A0180  lwz r4,384(r10)
+	   5: GETL       	R10, t4
+	   6: ADDL       	$0x180, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547A414:  54893FBE  rlwinm r9,r4,7,30,31
+	  10: GETL       	R4, t8
+	  11: ROLL       	$0x7, t8
+	  12: ANDL       	$0x3, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x2547A418:  2F890000  cmpi cr7,r9,0
+	  15: GETL       	R9, t10
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547A41C:  409E0044  bc 4,30,0x2547A460
+	  19: Jc30o       	$0x2547A460
+
+
+. 0 2547A40C 20
+. 81 5F 00 24 80 8A 01 80 54 89 3F BE 2F 89 00 00 40 9E 00 44
+
+==== BB 843 (0x2547A420) approx BBs exec'd 0 ====
+
+	0x2547A420:  80010000  lwz r0,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547A424:  38C00001  li r6,1
+	   4: MOVL       	$0x1, t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x2547A428:  80FF000C  lwz r7,12(r31)
+	   7: GETL       	R31, t6
+	   8: ADDL       	$0xC, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R7
+	  11: INCEIPL       	$4
+
+	0x2547A42C:  9401FFE0  stwu r0,-32(r1)
+	  12: GETL       	R0, t10
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0xFFFFFFE0, t12
+	  15: PUTL       	t12, R1
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547A430:  3A810017  addi r20,r1,23
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x17, t14
+	  20: PUTL       	t14, R20
+	  21: INCEIPL       	$4
+
+	0x2547A434:  56850036  rlwinm r5,r20,0,0,27
+	  22: GETL       	R20, t16
+	  23: ANDL       	$0xFFFFFFF0, t16
+	  24: PUTL       	t16, R5
+	  25: INCEIPL       	$4
+
+	0x2547A438:  91250008  stw r9,8(r5)
+	  26: GETL       	R9, t18
+	  27: GETL       	R5, t20
+	  28: ADDL       	$0x8, t20
+	  29: STL       	t18, (t20)
+	  30: INCEIPL       	$4
+
+	0x2547A43C:  91250000  stw r9,0(r5)
+	  31: GETL       	R9, t22
+	  32: GETL       	R5, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0x2547A440:  39270001  addi r9,r7,1
+	  35: GETL       	R7, t26
+	  36: ADDL       	$0x1, t26
+	  37: PUTL       	t26, R9
+	  38: INCEIPL       	$4
+
+	0x2547A444:  90B30008  stw r5,8(r19)
+	  39: GETL       	R5, t28
+	  40: GETL       	R19, t30
+	  41: ADDL       	$0x8, t30
+	  42: STL       	t28, (t30)
+	  43: INCEIPL       	$4
+
+	0x2547A448:  91450004  stw r10,4(r5)
+	  44: GETL       	R10, t32
+	  45: GETL       	R5, t34
+	  46: ADDL       	$0x4, t34
+	  47: STL       	t32, (t34)
+	  48: INCEIPL       	$4
+
+	0x2547A44C:  826A0180  lwz r19,384(r10)
+	  49: GETL       	R10, t36
+	  50: ADDL       	$0x180, t36
+	  51: LDL       	(t36), t38
+	  52: PUTL       	t38, R19
+	  53: INCEIPL       	$4
+
+	0x2547A450:  913F000C  stw r9,12(r31)
+	  54: GETL       	R9, t40
+	  55: GETL       	R31, t42
+	  56: ADDL       	$0xC, t42
+	  57: STL       	t40, (t42)
+	  58: INCEIPL       	$4
+
+	0x2547A454:  50D3C94C  rlwimi r19,r6,25,5,6
+	  59: GETL       	R19, t44
+	  60: GETL       	R6, t46
+	  61: ROLL       	$0x19, t46
+	  62: ANDL       	$0x6000000, t46
+	  63: ANDL       	$0xF9FFFFFF, t44
+	  64: ORL       	t44, t46
+	  65: PUTL       	t46, R19
+	  66: INCEIPL       	$4
+
+	0x2547A458:  926A0180  stw r19,384(r10)
+	  67: GETL       	R19, t48
+	  68: GETL       	R10, t50
+	  69: ADDL       	$0x180, t50
+	  70: STL       	t48, (t50)
+	  71: INCEIPL       	$4
+
+	0x2547A45C:  7CB32B78  or r19,r5,r5
+	  72: GETL       	R5, t52
+	  73: PUTL       	t52, R19
+	  74: INCEIPL       	$4
+
+	0x2547A460:  2C980000  cmpi cr1,r24,0
+	  75: GETL       	R24, t54
+	  76: CMP0L       	t54, t56  (-rSo)
+	  77: ICRFL       	t56, $0x1, CR
+	  78: INCEIPL       	$4
+
+	0x2547A464:  41860010  bc 12,6,0x2547A474
+	  79: Js06o       	$0x2547A474
+
+
+. 0 2547A420 72
+. 80 01 00 00 38 C0 00 01 80 FF 00 0C 94 01 FF E0 3A 81 00 17 56 85 00 36 91 25 00 08 91 25 00 00 39 27 00 01 90 B3 00 08 91 45 00 04 82 6A 01 80 91 3F 00 0C 50 D3 C9 4C 92 6A 01 80 7C B3 2B 78 2C 98 00 00 41 86 00 10
+
+==== BB 844 (0x2547A474) approx BBs exec'd 0 ====
+
+	0x2547A474:  853C0008  lwzu r9,8(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R28
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x2547A478:  2C090000  cmpi cr0,r9,0
+	   6: GETL       	R9, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547A47C:  4182043C  bc 12,2,0x2547A8B8
+	  10: Js02o       	$0x2547A8B8
+
+
+. 0 2547A474 12
+. 85 3C 00 08 2C 09 00 00 41 82 04 3C
+
+==== BB 845 (0x2547A488) approx BBs exec'd 0 ====
+
+	0x2547A488:  807F0040  lwz r3,64(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x40, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547A48C:  7C091800  cmp cr0,r9,r3
+	   5: GETL       	R9, t4
+	   6: GETL       	R3, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547A490:  41820014  bc 12,2,0x2547A4A4
+	  10: Js02o       	$0x2547A4A4
+
+
+. 0 2547A488 12
+. 80 7F 00 40 7C 09 18 00 41 82 00 14
+
+==== BB 846 (0x2547A494) approx BBs exec'd 0 ====
+
+	0x2547A494:  3D407FFF  lis r10,32767
+	   0: MOVL       	$0x7FFF0000, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x2547A498:  6148FFFF  ori r8,r10,0xFFFF
+	   3: MOVL       	$0x7FFFFFFF, t2
+	   4: PUTL       	t2, R8
+	   5: INCEIPL       	$4
+
+	0x2547A49C:  7F094000  cmp cr6,r9,r8
+	   6: GETL       	R9, t4
+	   7: GETL       	R8, t6
+	   8: CMPL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x6, CR
+	  10: INCEIPL       	$4
+
+	0x2547A4A0:  409AFFD4  bc 4,26,0x2547A474
+	  11: Jc26o       	$0x2547A474
+
+
+. 0 2547A494 16
+. 3D 40 7F FF 61 48 FF FF 7F 09 40 00 40 9A FF D4
+
+==== BB 847 (0x2547A8B8) approx BBs exec'd 0 ====
+
+	0x2547A8B8:  2F980000  cmpi cr7,r24,0
+	   0: GETL       	R24, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547A8BC:  419E005C  bc 12,30,0x2547A918
+	   4: Js30o       	$0x2547A918
+
+
+. 0 2547A8B8 8
+. 2F 98 00 00 41 9E 00 5C
+
+==== BB 848 (0x2547A918) approx BBs exec'd 0 ====
+
+	0x2547A918:  801B0000  lwz r0,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547A91C:  2F1B0000  cmpi cr6,r27,0
+	   4: GETL       	R27, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x2547A920:  2C000000  cmpi cr0,r0,0
+	   8: GETL       	R0, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x2547A924:  4182001C  bc 12,2,0x2547A940
+	  12: Js02o       	$0x2547A940
+
+
+. 0 2547A918 16
+. 80 1B 00 00 2F 1B 00 00 2C 00 00 00 41 82 00 1C
+
+==== BB 849 (0x2547A928) approx BBs exec'd 0 ====
+
+	0x2547A928:  837B0008  lwz r27,8(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547A92C:  2F1B0000  cmpi cr6,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A930:  41BAFC94  bc 13,26,0x2547A5C4
+	   9: Js26o       	$0x2547A5C4
+
+
+. 0 2547A928 12
+. 83 7B 00 08 2F 1B 00 00 41 BA FC 94
+
+==== BB 850 (0x2547A934) approx BBs exec'd 0 ====
+
+	0x2547A934:  801B0000  lwz r0,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x2547A938:  2C000000  cmpi cr0,r0,0
+	   4: GETL       	R0, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x2547A93C:  4082FFEC  bc 4,2,0x2547A928
+	   8: Jc02o       	$0x2547A928
+
+
+. 0 2547A934 12
+. 80 1B 00 00 2C 00 00 00 40 82 FF EC
+
+==== BB 851 (0x2547A940) approx BBs exec'd 0 ====
+
+	0x2547A940:  409AF9DC  bc 4,26,0x2547A31C
+	   0: Jc26o       	$0x2547A31C
+
+
+. 0 2547A940 4
+. 40 9A F9 DC
+
+==== BB 852 (0x2547A35C) approx BBs exec'd 0 ====
+
+	0x2547A35C:  A13A0156  lhz r9,342(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x156, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547A360:  2F090000  cmpi cr6,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A364:  409A05E4  bc 4,26,0x2547A948
+	   9: Jc26o       	$0x2547A948
+
+
+. 0 2547A35C 12
+. A1 3A 01 56 2F 09 00 00 40 9A 05 E4
+
+==== BB 853 (0x2547A948) approx BBs exec'd 0 ====
+
+	0x2547A948:  5534103A  rlwinm r20,r9,2,0,29
+	   0: GETL       	R9, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R20
+	   3: INCEIPL       	$4
+
+	0x2547A94C:  81810000  lwz r12,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x2547A950:  3A54001E  addi r18,r20,30
+	   8: GETL       	R20, t6
+	   9: ADDL       	$0x1E, t6
+	  10: PUTL       	t6, R18
+	  11: INCEIPL       	$4
+
+	0x2547A954:  56490376  rlwinm r9,r18,0,13,27
+	  12: GETL       	R18, t8
+	  13: ANDL       	$0x7FFF0, t8
+	  14: PUTL       	t8, R9
+	  15: INCEIPL       	$4
+
+	0x2547A958:  7D4900D0  neg r10,r9
+	  16: GETL       	R9, t10
+	  17: NEGL       	t10
+	  18: PUTL       	t10, R10
+	  19: INCEIPL       	$4
+
+	0x2547A95C:  7D81516E  stwux r12,r1,r10
+	  20: GETL       	R10, t12
+	  21: GETL       	R1, t14
+	  22: ADDL       	t14, t12
+	  23: PUTL       	t12, R1
+	  24: GETL       	R12, t16
+	  25: STL       	t16, (t12)
+	  26: INCEIPL       	$4
+
+	0x2547A960:  39010017  addi r8,r1,23
+	  27: GETL       	R1, t18
+	  28: ADDL       	$0x17, t18
+	  29: PUTL       	t18, R8
+	  30: INCEIPL       	$4
+
+	0x2547A964:  55180036  rlwinm r24,r8,0,0,27
+	  31: GETL       	R8, t20
+	  32: ANDL       	$0xFFFFFFF0, t20
+	  33: PUTL       	t20, R24
+	  34: INCEIPL       	$4
+
+	0x2547A968:  4BFFFA00  b 0x2547A368
+	  35: JMPo       	$0x2547A368  ($4)
+
+
+. 0 2547A948 36
+. 55 34 10 3A 81 81 00 00 3A 54 00 1E 56 49 03 76 7D 49 00 D0 7D 81 51 6E 39 01 00 17 55 18 00 36 4B FF FA 00
+
+==== BB 854 (0x2547A0F0) approx BBs exec'd 0 ====
+
+	0x2547A0F0:  80FF0004  lwz r7,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547A0F4:  811F0008  lwz r8,8(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x2547A0F8:  81230018  lwz r9,24(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x2547A0FC:  4BFFCBED  bl 0x25476CE8
+	  15: MOVL       	$0x2547A100, t12
+	  16: PUTL       	t12, LR
+	  17: JMPo-c       	$0x25476CE8  ($4)
+
+
+. 0 2547A0F0 16
+. 80 FF 00 04 81 1F 00 08 81 23 00 18 4B FF CB ED
+
+==== BB 855 (0x25482F10) approx BBs exec'd 0 ====
+
+	0x25482F10:  84C40004  lwzu r6,4(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R4
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x25482F14:  7C072A14  add r0,r7,r5
+	   6: GETL       	R7, t4
+	   7: GETL       	R5, t6
+	   8: ADDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x25482F18:  7D0928F8  nor r9,r8,r5
+	  11: GETL       	R8, t8
+	  12: GETL       	R5, t10
+	  13: ORL       	t10, t8
+	  14: NOTL       	t8
+	  15: PUTL       	t8, R9
+	  16: INCEIPL       	$4
+
+	0x25482F1C:  7C004839  and. r0,r0,r9
+	  17: GETL       	R0, t12
+	  18: GETL       	R9, t14
+	  19: ANDL       	t12, t14
+	  20: PUTL       	t14, R0
+	  21: CMP0L       	t14, t16  (-rSo)
+	  22: ICRFL       	t16, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x25482F20:  7C853000  cmp cr1,r5,r6
+	  24: GETL       	R5, t18
+	  25: GETL       	R6, t20
+	  26: CMPL       	t18, t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x1, CR
+	  28: INCEIPL       	$4
+
+	0x25482F24:  4182FFE4  bc 12,2,0x25482F08
+	  29: Js02o       	$0x25482F08
+
+
+. 0 25482F10 24
+. 84 C4 00 04 7C 07 2A 14 7D 09 28 F8 7C 00 48 39 7C 85 30 00 41 82 FF E4
+
+==== BB 856 (0x25482F28) approx BBs exec'd 0 ====
+
+	0x25482F28:  7D002838  and r0,r8,r5
+	   0: GETL       	R8, t0
+	   1: GETL       	R5, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482F2C:  4186002C  bc 12,6,0x25482F58
+	   5: Js06o       	$0x25482F58
+
+
+. 0 25482F28 8
+. 7D 00 28 38 41 86 00 2C
+
+==== BB 857 (0x25482F30) approx BBs exec'd 0 ====
+
+	0x25482F30:  7C004214  add r0,r0,r8
+	   0: GETL       	R0, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x25482F34:  7CAA3279  xor. r10,r5,r6
+	   5: GETL       	R5, t4
+	   6: GETL       	R6, t6
+	   7: XORL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25482F38:  7D290078  andc r9,r9,r0
+	  12: GETL       	R9, t10
+	  13: GETL       	R0, t12
+	  14: NOTL       	t12
+	  15: ANDL       	t10, t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x25482F3C:  41800034  bc 12,0,0x25482F70
+	  18: Js00o       	$0x25482F70
+
+
+. 0 25482F30 16
+. 7C 00 42 14 7C AA 32 79 7D 29 00 78 41 80 00 34
+
+==== BB 858 (0x25482F40) approx BBs exec'd 0 ====
+
+	0x25482F40:  7D4A0034  cntlzw r10,r10
+	   0: GETL       	R10, t0
+	   1: CNTLZL       	t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0x25482F44:  7D290034  cntlzw r9,r9
+	   4: GETL       	R9, t2
+	   5: CNTLZL       	t2
+	   6: PUTL       	t2, R9
+	   7: INCEIPL       	$4
+
+	0x25482F48:  39290007  addi r9,r9,7
+	   8: GETL       	R9, t4
+	   9: ADDL       	$0x7, t4
+	  10: PUTL       	t4, R9
+	  11: INCEIPL       	$4
+
+	0x25482F4C:  7C895000  cmp cr1,r9,r10
+	  12: GETL       	R9, t6
+	  13: GETL       	R10, t8
+	  14: CMPL       	t6, t8, t10  (-rSo)
+	  15: ICRFL       	t10, $0x1, CR
+	  16: INCEIPL       	$4
+
+	0x25482F50:  7C662850  subf r3,r6,r5
+	  17: GETL       	R6, t12
+	  18: GETL       	R5, t14
+	  19: SUBL       	t12, t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0x25482F54:  4CA40020  bclr 5,4
+	  22: GETL       	LR, t16
+	  23: Jc04o-r       	t16
+
+
+. 0 25482F40 24
+. 7D 4A 00 34 7D 29 00 34 39 29 00 07 7C 89 50 00 7C 66 28 50 4C A4 00 20
+
+==== BB 859 (0x25482F58) approx BBs exec'd 0 ====
+
+	0x25482F58:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25482F5C:  4E800020  blr
+	   3: GETL       	LR, t2
+	   4: JMPo-r       	t2  ($4)
+
+
+. 0 25482F58 8
+. 38 60 00 00 4E 80 00 20
+
+==== BB 860 (0x2547C750) approx BBs exec'd 0 ====
+
+	0x2547C750:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547C754:  38600001  li r3,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547C758:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0x2547C75C:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x2547C760:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547C764:  83E1001C  lwz r31,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x2547C768:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0x2547C76C:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+. 0 2547C750 32
+. 80 81 00 24 38 60 00 01 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 861 (0x25476F54) approx BBs exec'd 0 ====
+
+	0x25476F54:  7FA3EB78  or r3,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476F58:  83E10294  lwz r31,660(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x294, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x25476F5C:  81810244  lwz r12,580(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x244, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R12
+	  12: INCEIPL       	$4
+
+	0x25476F60:  7FE803A6  mtlr r31
+	  13: GETL       	R31, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x25476F64:  81C10248  lwz r14,584(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x248, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0x25476F68:  81E1024C  lwz r15,588(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x24C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R15
+	  25: INCEIPL       	$4
+
+	0x25476F6C:  7D818120  mtcrf 0x18,r12
+	  26: GETL       	R12, t20
+	  27: ICRFL       	t20, $0x3, CR
+	  28: ICRFL       	t20, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x25476F70:  82010250  lwz r16,592(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x250, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R16
+	  34: INCEIPL       	$4
+
+	0x25476F74:  82210254  lwz r17,596(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x254, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R17
+	  39: INCEIPL       	$4
+
+	0x25476F78:  82410258  lwz r18,600(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x258, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R18
+	  44: INCEIPL       	$4
+
+	0x25476F7C:  8261025C  lwz r19,604(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x25C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R19
+	  49: INCEIPL       	$4
+
+	0x25476F80:  82810260  lwz r20,608(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x260, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R20
+	  54: INCEIPL       	$4
+
+	0x25476F84:  82A10264  lwz r21,612(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x264, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R21
+	  59: INCEIPL       	$4
+
+	0x25476F88:  82C10268  lwz r22,616(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x268, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R22
+	  64: INCEIPL       	$4
+
+	0x25476F8C:  82E1026C  lwz r23,620(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x26C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R23
+	  69: INCEIPL       	$4
+
+	0x25476F90:  83010270  lwz r24,624(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x270, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R24
+	  74: INCEIPL       	$4
+
+	0x25476F94:  83210274  lwz r25,628(r1)
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x274, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R25
+	  79: INCEIPL       	$4
+
+	0x25476F98:  83410278  lwz r26,632(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x278, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R26
+	  84: INCEIPL       	$4
+
+	0x25476F9C:  8361027C  lwz r27,636(r1)
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x27C, t66
+	  87: LDL       	(t66), t68
+	  88: PUTL       	t68, R27
+	  89: INCEIPL       	$4
+
+	0x25476FA0:  83810280  lwz r28,640(r1)
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x280, t70
+	  92: LDL       	(t70), t72
+	  93: PUTL       	t72, R28
+	  94: INCEIPL       	$4
+
+	0x25476FA4:  83A10284  lwz r29,644(r1)
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x284, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R29
+	  99: INCEIPL       	$4
+
+	0x25476FA8:  83C10288  lwz r30,648(r1)
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x288, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R30
+	 104: INCEIPL       	$4
+
+	0x25476FAC:  83E1028C  lwz r31,652(r1)
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x28C, t82
+	 107: LDL       	(t82), t84
+	 108: PUTL       	t84, R31
+	 109: INCEIPL       	$4
+
+	0x25476FB0:  38210290  addi r1,r1,656
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x290, t86
+	 112: PUTL       	t86, R1
+	 113: INCEIPL       	$4
+
+	0x25476FB4:  4E800020  blr
+	 114: GETL       	LR, t88
+	 115: JMPo-r       	t88  ($4)
+
+
+. 0 25476F54 100
+. 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+==== BB 862 (0x2547A460) approx BBs exec'd 0 ====
+
+	0x2547A460:  2C980000  cmpi cr1,r24,0
+	   0: GETL       	R24, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A464:  41860010  bc 12,6,0x2547A474
+	   4: Js06o       	$0x2547A474
+
+
+. 0 2547A460 8
+. 2C 98 00 00 41 86 00 10
+
+==== BB 863 (0x2547A468) approx BBs exec'd 0 ====
+
+	0x2547A468:  56CB103A  rlwinm r11,r22,2,0,29
+	   0: GETL       	R22, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R11
+	   3: INCEIPL       	$4
+
+	0x2547A46C:  3AD60001  addi r22,r22,1
+	   4: GETL       	R22, t2
+	   5: ADDL       	$0x1, t2
+	   6: PUTL       	t2, R22
+	   7: INCEIPL       	$4
+
+	0x2547A470:  7D4BC12E  stwx r10,r11,r24
+	   8: GETL       	R24, t4
+	   9: GETL       	R11, t6
+	  10: ADDL       	t6, t4
+	  11: GETL       	R10, t8
+	  12: STL       	t8, (t4)
+	  13: INCEIPL       	$4
+
+	0x2547A474:  853C0008  lwzu r9,8(r28)
+	  14: GETL       	R28, t10
+	  15: ADDL       	$0x8, t10
+	  16: PUTL       	t10, R28
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x2547A478:  2C090000  cmpi cr0,r9,0
+	  20: GETL       	R9, t14
+	  21: CMP0L       	t14, t16  (-rSo)
+	  22: ICRFL       	t16, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x2547A47C:  4182043C  bc 12,2,0x2547A8B8
+	  24: Js02o       	$0x2547A8B8
+
+
+. 0 2547A468 24
+. 56 CB 10 3A 3A D6 00 01 7D 4B C1 2E 85 3C 00 08 2C 09 00 00 41 82 04 3C
+
+==== BB 864 (0x2547A8C0) approx BBs exec'd 0 ====
+
+	0x2547A8C0:  3BB60001  addi r29,r22,1
+	   0: GETL       	R22, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547A8C4:  56DC103A  rlwinm r28,r22,2,0,29
+	   4: GETL       	R22, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R28
+	   7: INCEIPL       	$4
+
+	0x2547A8C8:  3A400000  li r18,0
+	   8: MOVL       	$0x0, t4
+	   9: PUTL       	t4, R18
+	  10: INCEIPL       	$4
+
+	0x2547A8CC:  57B61838  rlwinm r22,r29,3,0,28
+	  11: GETL       	R29, t6
+	  12: SHLL       	$0x3, t6
+	  13: PUTL       	t6, R22
+	  14: INCEIPL       	$4
+
+	0x2547A8D0:  7E5CC12E  stwx r18,r28,r24
+	  15: GETL       	R24, t8
+	  16: GETL       	R28, t10
+	  17: ADDL       	t10, t8
+	  18: GETL       	R18, t12
+	  19: STL       	t12, (t8)
+	  20: INCEIPL       	$4
+
+	0x2547A8D4:  38760004  addi r3,r22,4
+	  21: GETL       	R22, t14
+	  22: ADDL       	$0x4, t14
+	  23: PUTL       	t14, R3
+	  24: INCEIPL       	$4
+
+	0x2547A8D8:  4801D14D  bl 0x25497A24
+	  25: MOVL       	$0x2547A8DC, t16
+	  26: PUTL       	t16, LR
+	  27: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 2547A8C0 28
+. 3B B6 00 01 56 DC 10 3A 3A 40 00 00 57 B6 18 38 7E 5C C1 2E 38 76 00 04 48 01 D1 4D
+
+==== BB 865 (0x2547A8DC) approx BBs exec'd 0 ====
+
+	0x2547A8DC:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A8E0:  7C691B78  or r9,r3,r3
+	   4: GETL       	R3, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x2547A8E4:  907A01E8  stw r3,488(r26)
+	   7: GETL       	R3, t6
+	   8: GETL       	R26, t8
+	   9: ADDL       	$0x1E8, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x2547A8E8:  418605C4  bc 12,6,0x2547AEAC
+	  12: Js06o       	$0x2547AEAC
+
+
+. 0 2547A8DC 16
+. 2C 83 00 00 7C 69 1B 78 90 7A 01 E8 41 86 05 C4
+
+==== BB 866 (0x2547A8EC) approx BBs exec'd 0 ====
+
+	0x2547A8EC:  57B5103A  rlwinm r21,r29,2,0,29
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R21
+	   3: INCEIPL       	$4
+
+	0x2547A8F0:  7F04C378  or r4,r24,r24
+	   4: GETL       	R24, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x2547A8F4:  7EA5AB78  or r5,r21,r21
+	   7: GETL       	R21, t4
+	   8: PUTL       	t4, R5
+	   9: INCEIPL       	$4
+
+	0x2547A8F8:  93490000  stw r26,0(r9)
+	  10: GETL       	R26, t6
+	  11: GETL       	R9, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547A8FC:  38630004  addi r3,r3,4
+	  14: GETL       	R3, t10
+	  15: ADDL       	$0x4, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0x2547A900:  480092E1  bl 0x25483BE0
+	  18: MOVL       	$0x2547A904, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 2547A8EC 24
+. 57 B5 10 3A 7F 04 C3 78 7E A5 AB 78 93 49 00 00 38 63 00 04 48 00 92 E1
+
+==== BB 867 (0x2547A904) approx BBs exec'd 0 ====
+
+	0x2547A904:  809A01E8  lwz r4,488(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547A908:  7EA5AB78  or r5,r21,r21
+	   5: GETL       	R21, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547A90C:  7F552214  add r26,r21,r4
+	   8: GETL       	R21, t6
+	   9: GETL       	R4, t8
+	  10: ADDL       	t6, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547A910:  387A0004  addi r3,r26,4
+	  13: GETL       	R26, t10
+	  14: ADDL       	$0x4, t10
+	  15: PUTL       	t10, R3
+	  16: INCEIPL       	$4
+
+	0x2547A914:  480092CD  bl 0x25483BE0
+	  17: MOVL       	$0x2547A918, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 2547A904 20
+. 80 9A 01 E8 7E A5 AB 78 7F 55 22 14 38 7A 00 04 48 00 92 CD
+
+==== BB 868 (0x25482F94) approx BBs exec'd 0 ====
+
+	0x25482F94:  8CC40001  lbzu r6,1(r4)
+	   0: GETL       	R4, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R4
+	   3: LDB       	(t0), t2
+	   4: PUTL       	t2, R6
+	   5: INCEIPL       	$4
+
+	0x25482F98:  2C850000  cmpi cr1,r5,0
+	   6: GETL       	R5, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25482F9C:  41860020  bc 12,6,0x25482FBC
+	  10: Js06o       	$0x25482FBC
+
+
+. 0 25482F94 12
+. 8C C4 00 01 2C 85 00 00 41 86 00 20
+
+==== BB 869 (0x25476ED0) approx BBs exec'd 0 ====
+
+	0x25476ED0:  809D001C  lwz r4,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25476ED4:  3B600000  li r27,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0x25476ED8:  48000020  b 0x25476EF8
+	   8: JMPo       	$0x25476EF8  ($4)
+
+
+. 0 25476ED0 12
+. 80 9D 00 1C 3B 60 00 00 48 00 00 20
+
+==== BB 870 (0x25476EF8) approx BBs exec'd 0 ====
+
+	0x25476EF8:  2F040000  cmpi cr6,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25476EFC:  409AFFE0  bc 4,26,0x25476EDC
+	   4: Jc26o       	$0x25476EDC
+
+
+. 0 25476EF8 8
+. 2F 04 00 00 40 9A FF E0
+
+==== BB 871 (0x25476EDC) approx BBs exec'd 0 ====
+
+	0x25476EDC:  7C9B2378  or r27,r4,r4
+	   0: GETL       	R4, t0
+	   1: PUTL       	t0, R27
+	   2: INCEIPL       	$4
+
+	0x25476EE0:  80840000  lwz r4,0(r4)
+	   3: GETL       	R4, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0x25476EE4:  7F83E378  or r3,r28,r28
+	   7: GETL       	R28, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x25476EE8:  4800BFF9  bl 0x25482EE0
+	  10: MOVL       	$0x25476EEC, t8
+	  11: PUTL       	t8, LR
+	  12: JMPo-c       	$0x25482EE0  ($4)
+
+
+. 0 25476EDC 16
+. 7C 9B 23 78 80 84 00 00 7F 83 E3 78 48 00 BF F9
+
+==== BB 872 (0x25476EEC) approx BBs exec'd 0 ====
+
+	0x25476EEC:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25476EF0:  419E0058  bc 12,30,0x25476F48
+	   4: Js30o       	$0x25476F48
+
+
+. 0 25476EEC 8
+. 2F 83 00 00 41 9E 00 58
+
+==== BB 873 (0x25476EF4) approx BBs exec'd 0 ====
+
+	0x25476EF4:  809B0004  lwz r4,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25476EF8:  2F040000  cmpi cr6,r4,0
+	   5: GETL       	R4, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25476EFC:  409AFFE0  bc 4,26,0x25476EDC
+	   9: Jc26o       	$0x25476EDC
+
+
+. 0 25476EF4 12
+. 80 9B 00 04 2F 04 00 00 40 9A FF E0
+
+==== BB 874 (0x25476F00) approx BBs exec'd 0 ====
+
+	0x25476F00:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25476F04:  4800C0CD  bl 0x25482FD0
+	   3: MOVL       	$0x25476F08, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x25482FD0  ($4)
+
+
+. 0 25476F00 8
+. 7F 83 E3 78 48 00 C0 CD
+
+==== BB 875 (0x25476F08) approx BBs exec'd 0 ====
+
+	0x25476F08:  7C661B78  or r6,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x25476F0C:  3863000D  addi r3,r3,13
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0xD, t2
+	   5: PUTL       	t2, R3
+	   6: INCEIPL       	$4
+
+	0x25476F10:  3B460001  addi r26,r6,1
+	   7: GETL       	R6, t4
+	   8: ADDL       	$0x1, t4
+	   9: PUTL       	t4, R26
+	  10: INCEIPL       	$4
+
+	0x25476F14:  48020B11  bl 0x25497A24
+	  11: MOVL       	$0x25476F18, t6
+	  12: PUTL       	t6, LR
+	  13: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 25476F08 16
+. 7C 66 1B 78 38 63 00 0D 3B 46 00 01 48 02 0B 11
+
+==== BB 876 (0x25476F18) approx BBs exec'd 0 ====
+
+	0x25476F18:  7C7F1B79  or. r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25476F1C:  80DE01AC  lwz r6,428(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1AC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25476F20:  7F84E378  or r4,r28,r28
+	  10: GETL       	R28, t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x25476F24:  4182013C  bc 12,2,0x25477060
+	  13: Js02o       	$0x25477060
+
+
+. 0 25476F18 16
+. 7C 7F 1B 79 80 DE 01 AC 7F 84 E3 78 41 82 01 3C
+
+==== BB 877 (0x25476F28) approx BBs exec'd 0 ====
+
+	0x25476F28:  7F84E378  or r4,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x25476F2C:  7F45D378  or r5,r26,r26
+	   3: GETL       	R26, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0x25476F30:  387F000C  addi r3,r31,12
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0xC, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x25476F34:  4800CCAD  bl 0x25483BE0
+	  10: MOVL       	$0x25476F38, t6
+	  11: PUTL       	t6, LR
+	  12: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 25476F28 16
+. 7F 84 E3 78 7F 45 D3 78 38 7F 00 0C 48 00 CC AD
+
+==== BB 878 (0x25476F38) approx BBs exec'd 0 ====
+
+	0x25476F38:  92DF0004  stw r22,4(r31)
+	   0: GETL       	R22, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25476F3C:  907F0000  stw r3,0(r31)
+	   5: GETL       	R3, t4
+	   6: GETL       	R31, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25476F40:  92DF0008  stw r22,8(r31)
+	   9: GETL       	R22, t8
+	  10: GETL       	R31, t10
+	  11: ADDL       	$0x8, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25476F44:  93FB0004  stw r31,4(r27)
+	  14: GETL       	R31, t12
+	  15: GETL       	R27, t14
+	  16: ADDL       	$0x4, t14
+	  17: STL       	t12, (t14)
+	  18: INCEIPL       	$4
+
+	0x25476F48:  811D0180  lwz r8,384(r29)
+	  19: GETL       	R29, t16
+	  20: ADDL       	$0x180, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R8
+	  23: INCEIPL       	$4
+
+	0x25476F4C:  65070080  oris r7,r8,0x80
+	  24: GETL       	R8, t20
+	  25: ORL       	$0x800000, t20
+	  26: PUTL       	t20, R7
+	  27: INCEIPL       	$4
+
+	0x25476F50:  90FD0180  stw r7,384(r29)
+	  28: GETL       	R7, t22
+	  29: GETL       	R29, t24
+	  30: ADDL       	$0x180, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0x25476F54:  7FA3EB78  or r3,r29,r29
+	  33: GETL       	R29, t26
+	  34: PUTL       	t26, R3
+	  35: INCEIPL       	$4
+
+	0x25476F58:  83E10294  lwz r31,660(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x294, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0x25476F5C:  81810244  lwz r12,580(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x244, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R12
+	  45: INCEIPL       	$4
+
+	0x25476F60:  7FE803A6  mtlr r31
+	  46: GETL       	R31, t36
+	  47: PUTL       	t36, LR
+	  48: INCEIPL       	$4
+
+	0x25476F64:  81C10248  lwz r14,584(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x248, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R14
+	  53: INCEIPL       	$4
+
+	0x25476F68:  81E1024C  lwz r15,588(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x24C, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R15
+	  58: INCEIPL       	$4
+
+	0x25476F6C:  7D818120  mtcrf 0x18,r12
+	  59: GETL       	R12, t46
+	  60: ICRFL       	t46, $0x3, CR
+	  61: ICRFL       	t46, $0x4, CR
+	  62: INCEIPL       	$4
+
+	0x25476F70:  82010250  lwz r16,592(r1)
+	  63: GETL       	R1, t48
+	  64: ADDL       	$0x250, t48
+	  65: LDL       	(t48), t50
+	  66: PUTL       	t50, R16
+	  67: INCEIPL       	$4
+
+	0x25476F74:  82210254  lwz r17,596(r1)
+	  68: GETL       	R1, t52
+	  69: ADDL       	$0x254, t52
+	  70: LDL       	(t52), t54
+	  71: PUTL       	t54, R17
+	  72: INCEIPL       	$4
+
+	0x25476F78:  82410258  lwz r18,600(r1)
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0x258, t56
+	  75: LDL       	(t56), t58
+	  76: PUTL       	t58, R18
+	  77: INCEIPL       	$4
+
+	0x25476F7C:  8261025C  lwz r19,604(r1)
+	  78: GETL       	R1, t60
+	  79: ADDL       	$0x25C, t60
+	  80: LDL       	(t60), t62
+	  81: PUTL       	t62, R19
+	  82: INCEIPL       	$4
+
+	0x25476F80:  82810260  lwz r20,608(r1)
+	  83: GETL       	R1, t64
+	  84: ADDL       	$0x260, t64
+	  85: LDL       	(t64), t66
+	  86: PUTL       	t66, R20
+	  87: INCEIPL       	$4
+
+	0x25476F84:  82A10264  lwz r21,612(r1)
+	  88: GETL       	R1, t68
+	  89: ADDL       	$0x264, t68
+	  90: LDL       	(t68), t70
+	  91: PUTL       	t70, R21
+	  92: INCEIPL       	$4
+
+	0x25476F88:  82C10268  lwz r22,616(r1)
+	  93: GETL       	R1, t72
+	  94: ADDL       	$0x268, t72
+	  95: LDL       	(t72), t74
+	  96: PUTL       	t74, R22
+	  97: INCEIPL       	$4
+
+	0x25476F8C:  82E1026C  lwz r23,620(r1)
+	  98: GETL       	R1, t76
+	  99: ADDL       	$0x26C, t76
+	 100: LDL       	(t76), t78
+	 101: PUTL       	t78, R23
+	 102: INCEIPL       	$4
+
+	0x25476F90:  83010270  lwz r24,624(r1)
+	 103: GETL       	R1, t80
+	 104: ADDL       	$0x270, t80
+	 105: LDL       	(t80), t82
+	 106: PUTL       	t82, R24
+	 107: INCEIPL       	$4
+
+	0x25476F94:  83210274  lwz r25,628(r1)
+	 108: GETL       	R1, t84
+	 109: ADDL       	$0x274, t84
+	 110: LDL       	(t84), t86
+	 111: PUTL       	t86, R25
+	 112: INCEIPL       	$4
+
+	0x25476F98:  83410278  lwz r26,632(r1)
+	 113: GETL       	R1, t88
+	 114: ADDL       	$0x278, t88
+	 115: LDL       	(t88), t90
+	 116: PUTL       	t90, R26
+	 117: INCEIPL       	$4
+
+	0x25476F9C:  8361027C  lwz r27,636(r1)
+	 118: GETL       	R1, t92
+	 119: ADDL       	$0x27C, t92
+	 120: LDL       	(t92), t94
+	 121: PUTL       	t94, R27
+	 122: INCEIPL       	$4
+
+	0x25476FA0:  83810280  lwz r28,640(r1)
+	 123: GETL       	R1, t96
+	 124: ADDL       	$0x280, t96
+	 125: LDL       	(t96), t98
+	 126: PUTL       	t98, R28
+	 127: INCEIPL       	$4
+
+	0x25476FA4:  83A10284  lwz r29,644(r1)
+	 128: GETL       	R1, t100
+	 129: ADDL       	$0x284, t100
+	 130: LDL       	(t100), t102
+	 131: PUTL       	t102, R29
+	 132: INCEIPL       	$4
+
+	0x25476FA8:  83C10288  lwz r30,648(r1)
+	 133: GETL       	R1, t104
+	 134: ADDL       	$0x288, t104
+	 135: LDL       	(t104), t106
+	 136: PUTL       	t106, R30
+	 137: INCEIPL       	$4
+
+	0x25476FAC:  83E1028C  lwz r31,652(r1)
+	 138: GETL       	R1, t108
+	 139: ADDL       	$0x28C, t108
+	 140: LDL       	(t108), t110
+	 141: PUTL       	t110, R31
+	 142: INCEIPL       	$4
+
+	0x25476FB0:  38210290  addi r1,r1,656
+	 143: GETL       	R1, t112
+	 144: ADDL       	$0x290, t112
+	 145: PUTL       	t112, R1
+	 146: INCEIPL       	$4
+
+	0x25476FB4:  4E800020  blr
+	 147: GETL       	LR, t114
+	 148: JMPo-r       	t114  ($4)
+
+
+. 0 25476F38 128
+. 92 DF 00 04 90 7F 00 00 92 DF 00 08 93 FB 00 04 81 1D 01 80 65 07 00 80 90 FD 01 80 7F A3 EB 78 83 E1 02 94 81 81 02 44 7F E8 03 A6 81 C1 02 48 81 E1 02 4C 7D 81 81 20 82 01 02 50 82 21 02 54 82 41 02 58 82 61 02 5C 82 81 02 60 82 A1 02 64 82 C1 02 68 82 E1 02 6C 83 01 02 70 83 21 02 74 83 41 02 78 83 61 02 7C 83 81 02 80 83 A1 02 84 83 C1 02 88 83 E1 02 8C 38 21 02 90 4E 80 00 20
+
+==== BB 879 (0x2547A374) approx BBs exec'd 0 ====
+
+	0x2547A374:  809A00F0  lwz r4,240(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xF0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547A378:  2C840000  cmpi cr1,r4,0
+	   5: GETL       	R4, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547A37C:  40860010  bc 4,6,0x2547A38C
+	   9: Jc06o       	$0x2547A38C
+
+
+. 0 2547A374 12
+. 80 9A 00 F0 2C 84 00 00 40 86 00 10
+
+==== BB 880 (0x2547A380) approx BBs exec'd 0 ====
+
+	0x2547A380:  80BA00E8  lwz r5,232(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xE8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547A384:  2C050000  cmpi cr0,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547A388:  41820530  bc 12,2,0x2547A8B8
+	   9: Js02o       	$0x2547A8B8
+
+
+. 0 2547A380 12
+. 80 BA 00 E8 2C 05 00 00 41 82 05 30
+
+==== BB 881 (0x2547A5C4) approx BBs exec'd 0 ====
+
+	0x2547A5C4:  83100000  lwz r24,0(r16)
+	   0: GETL       	R16, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R24
+	   3: INCEIPL       	$4
+
+	0x2547A5C8:  827F0038  lwz r19,56(r31)
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x38, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R19
+	   8: INCEIPL       	$4
+
+	0x2547A5CC:  21780000  subfic r11,r24,0
+	   9: GETL       	R24, t8
+	  10: MOVL       	$0x0, t10
+	  11: SBBL       	t8, t10  (-wCa)
+	  12: PUTL       	t10, R11
+	  13: INCEIPL       	$4
+
+	0x2547A5D0:  7DCBC114  adde r14,r11,r24
+	  14: GETL       	R11, t12
+	  15: GETL       	R24, t14
+	  16: ADCL       	t12, t14  (-rCa-wCa)
+	  17: PUTL       	t14, R14
+	  18: INCEIPL       	$4
+
+	0x2547A5D4:  3153FFFF  addic r10,r19,-1
+	  19: GETL       	R19, t16
+	  20: ADCL       	$0xFFFFFFFF, t16  (-wCa)
+	  21: PUTL       	t16, R10
+	  22: INCEIPL       	$4
+
+	0x2547A5D8:  7F6A9910  subfe r27,r10,r19
+	  23: GETL       	R10, t18
+	  24: GETL       	R19, t20
+	  25: SBBL       	t18, t20  (-rCa-wCa)
+	  26: PUTL       	t20, R27
+	  27: INCEIPL       	$4
+
+	0x2547A5DC:  7DC8D839  and. r8,r14,r27
+	  28: GETL       	R14, t22
+	  29: GETL       	R27, t24
+	  30: ANDL       	t22, t24
+	  31: PUTL       	t24, R8
+	  32: CMP0L       	t24, t26  (-rSo)
+	  33: ICRFL       	t26, $0x0, CR
+	  34: INCEIPL       	$4
+
+	0x2547A5E0:  4182000C  bc 12,2,0x2547A5EC
+	  35: Js02o       	$0x2547A5EC
+
+
+. 0 2547A5C4 32
+. 83 10 00 00 82 7F 00 38 21 78 00 00 7D CB C1 14 31 53 FF FF 7F 6A 99 10 7D C8 D8 39 41 82 00 0C
+
+==== BB 882 (0x2547A5E4) approx BBs exec'd 0 ====
+
+	0x2547A5E4:  811F0038  lwz r8,56(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x38, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547A5E8:  91100000  stw r8,0(r16)
+	   5: GETL       	R8, t4
+	   6: GETL       	R16, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547A5EC:  807701E8  lwz r3,488(r23)
+	   9: GETL       	R23, t8
+	  10: ADDL       	$0x1E8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0x2547A5F0:  2F030000  cmpi cr6,r3,0
+	  14: GETL       	R3, t12
+	  15: CMP0L       	t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x6, CR
+	  17: INCEIPL       	$4
+
+	0x2547A5F4:  419A0018  bc 12,26,0x2547A60C
+	  18: Js26o       	$0x2547A60C
+
+
+. 0 2547A5E4 20
+. 81 1F 00 38 91 10 00 00 80 77 01 E8 2F 03 00 00 41 9A 00 18
+
+==== BB 883 (0x2547A60C) approx BBs exec'd 0 ====
+
+	0x2547A60C:  80DF000C  lwz r6,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547A610:  54C51838  rlwinm r5,r6,3,0,28
+	   5: GETL       	R6, t4
+	   6: SHLL       	$0x3, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A614:  38650004  addi r3,r5,4
+	   9: GETL       	R5, t6
+	  10: ADDL       	$0x4, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x2547A618:  4801D40D  bl 0x25497A24
+	  13: MOVL       	$0x2547A61C, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 2547A60C 16
+. 80 DF 00 0C 54 C5 18 38 38 65 00 04 48 01 D4 0D
+
+==== BB 884 (0x2547A61C) approx BBs exec'd 0 ====
+
+	0x2547A61C:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547A620:  907701E8  stw r3,488(r23)
+	   4: GETL       	R3, t4
+	   5: GETL       	R23, t6
+	   6: ADDL       	$0x1E8, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547A624:  4186089C  bc 12,6,0x2547AEC0
+	   9: Js06o       	$0x2547AEC0
+
+
+. 0 2547A61C 12
+. 2C 83 00 00 90 77 01 E8 41 86 08 9C
+
+==== BB 885 (0x2547A628) approx BBs exec'd 0 ====
+
+	0x2547A628:  80FF0008  lwz r7,8(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547A62C:  38A00000  li r5,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547A630:  829F000C  lwz r20,12(r31)
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R20
+	  12: INCEIPL       	$4
+
+	0x2547A634:  2C070000  cmpi cr0,r7,0
+	  13: GETL       	R7, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x2547A638:  569D103A  rlwinm r29,r20,2,0,29
+	  17: GETL       	R20, t14
+	  18: SHLL       	$0x2, t14
+	  19: PUTL       	t14, R29
+	  20: INCEIPL       	$4
+
+	0x2547A63C:  9297015C  stw r20,348(r23)
+	  21: GETL       	R20, t16
+	  22: GETL       	R23, t18
+	  23: ADDL       	$0x15C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547A640:  7D23EA14  add r9,r3,r29
+	  26: GETL       	R3, t20
+	  27: GETL       	R29, t22
+	  28: ADDL       	t20, t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x2547A644:  90BF000C  stw r5,12(r31)
+	  31: GETL       	R5, t24
+	  32: GETL       	R31, t26
+	  33: ADDL       	$0xC, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547A648:  38890004  addi r4,r9,4
+	  36: GETL       	R9, t28
+	  37: ADDL       	$0x4, t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x2547A64C:  90970158  stw r4,344(r23)
+	  40: GETL       	R4, t30
+	  41: GETL       	R23, t32
+	  42: ADDL       	$0x158, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547A650:  41820050  bc 12,2,0x2547A6A0
+	  45: Js02o       	$0x2547A6A0
+
+
+. 0 2547A628 44
+. 80 FF 00 08 38 A0 00 00 82 9F 00 0C 2C 07 00 00 56 9D 10 3A 92 97 01 5C 7D 23 EA 14 90 BF 00 0C 38 89 00 04 90 97 01 58 41 82 00 50
+
+==== BB 886 (0x2547A654) approx BBs exec'd 0 ====
+
+	0x2547A654:  2F0F0000  cmpi cr6,r15,0
+	   0: GETL       	R15, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547A658:  38C00000  li r6,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x2547A65C:  409A01F8  bc 4,26,0x2547A854
+	   7: Jc26o       	$0x2547A854
+
+
+. 0 2547A654 12
+. 2F 0F 00 00 38 C0 00 00 40 9A 01 F8
+
+==== BB 887 (0x2547A660) approx BBs exec'd 0 ====
+
+	0x2547A660:  817F000C  lwz r11,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547A664:  80070004  lwz r0,4(r7)
+	   5: GETL       	R7, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x2547A668:  83370158  lwz r25,344(r23)
+	  10: GETL       	R23, t8
+	  11: ADDL       	$0x158, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R25
+	  14: INCEIPL       	$4
+
+	0x2547A66C:  5576103A  rlwinm r22,r11,2,0,29
+	  15: GETL       	R11, t12
+	  16: SHLL       	$0x2, t12
+	  17: PUTL       	t12, R22
+	  18: INCEIPL       	$4
+
+	0x2547A670:  398B0001  addi r12,r11,1
+	  19: GETL       	R11, t14
+	  20: ADDL       	$0x1, t14
+	  21: PUTL       	t14, R12
+	  22: INCEIPL       	$4
+
+	0x2547A674:  7C16C92E  stwx r0,r22,r25
+	  23: GETL       	R25, t16
+	  24: GETL       	R22, t18
+	  25: ADDL       	t18, t16
+	  26: GETL       	R0, t20
+	  27: STL       	t20, (t16)
+	  28: INCEIPL       	$4
+
+	0x2547A678:  81E70004  lwz r15,4(r7)
+	  29: GETL       	R7, t22
+	  30: ADDL       	$0x4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R15
+	  33: INCEIPL       	$4
+
+	0x2547A67C:  806F0180  lwz r3,384(r15)
+	  34: GETL       	R15, t26
+	  35: ADDL       	$0x180, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R3
+	  38: INCEIPL       	$4
+
+	0x2547A680:  919F000C  stw r12,12(r31)
+	  39: GETL       	R12, t30
+	  40: GETL       	R31, t32
+	  41: ADDL       	$0xC, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x2547A684:  50C3C94C  rlwimi r3,r6,25,5,6
+	  44: GETL       	R3, t34
+	  45: GETL       	R6, t36
+	  46: ROLL       	$0x19, t36
+	  47: ANDL       	$0x6000000, t36
+	  48: ANDL       	$0xF9FFFFFF, t34
+	  49: ORL       	t34, t36
+	  50: PUTL       	t36, R3
+	  51: INCEIPL       	$4
+
+	0x2547A688:  906F0180  stw r3,384(r15)
+	  52: GETL       	R3, t38
+	  53: GETL       	R15, t40
+	  54: ADDL       	$0x180, t40
+	  55: STL       	t38, (t40)
+	  56: INCEIPL       	$4
+
+	0x2547A68C:  80E70008  lwz r7,8(r7)
+	  57: GETL       	R7, t42
+	  58: ADDL       	$0x8, t42
+	  59: LDL       	(t42), t44
+	  60: PUTL       	t44, R7
+	  61: INCEIPL       	$4
+
+	0x2547A690:  2F070000  cmpi cr6,r7,0
+	  62: GETL       	R7, t46
+	  63: CMP0L       	t46, t48  (-rSo)
+	  64: ICRFL       	t48, $0x6, CR
+	  65: INCEIPL       	$4
+
+	0x2547A694:  409AFFCC  bc 4,26,0x2547A660
+	  66: Jc26o       	$0x2547A660
+
+
+. 0 2547A660 56
+. 81 7F 00 0C 80 07 00 04 83 37 01 58 55 76 10 3A 39 8B 00 01 7C 16 C9 2E 81 E7 00 04 80 6F 01 80 91 9F 00 0C 50 C3 C9 4C 90 6F 01 80 80 E7 00 08 2F 07 00 00 40 9A FF CC
+
+==== BB 888 (0x2547A698) approx BBs exec'd 0 ====
+
+	0x2547A698:  80BF000C  lwz r5,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547A69C:  80970158  lwz r4,344(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x158, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547A6A0:  839E04F4  lwz r28,1268(r30)
+	  10: GETL       	R30, t8
+	  11: ADDL       	$0x4F4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R28
+	  14: INCEIPL       	$4
+
+	0x2547A6A4:  80FC0000  lwz r7,0(r28)
+	  15: GETL       	R28, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R7
+	  18: INCEIPL       	$4
+
+	0x2547A6A8:  70E80400  andi. r8,r7,0x400
+	  19: GETL       	R7, t16
+	  20: ANDL       	$0x400, t16
+	  21: PUTL       	t16, R8
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0x2547A6AC:  408204E0  bc 4,2,0x2547AB8C
+	  25: Jc02o       	$0x2547AB8C
+
+
+. 0 2547A698 24
+. 80 BF 00 0C 80 97 01 58 83 9E 04 F4 80 FC 00 00 70 E8 04 00 40 82 04 E0
+
+==== BB 889 (0x2547A6B0) approx BBs exec'd 0 ====
+
+	0x2547A6B0:  80D701F0  lwz r6,496(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x1F0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547A6B4:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547A6B8:  2B860000  cmpli cr7,r6,0
+	   8: GETL       	R6, t6
+	   9: MOVL       	$0x0, t10
+	  10: CMPUL       	t6, t10, t8  (-rSo)
+	  11: ICRFL       	t8, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x2547A6BC:  409D0048  bc 4,29,0x2547A704
+	  13: Jc29o       	$0x2547A704
+
+
+. 0 2547A6B0 16
+. 80 D7 01 F0 38 60 00 00 2B 86 00 00 40 9D 00 48
+
+==== BB 890 (0x2547A704) approx BBs exec'd 0 ====
+
+	0x2547A704:  807701E8  lwz r3,488(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547A708:  54A5103A  rlwinm r5,r5,2,0,29
+	   5: GETL       	R5, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A70C:  3B000001  li r24,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R24
+	  11: INCEIPL       	$4
+
+	0x2547A710:  480094D1  bl 0x25483BE0
+	  12: MOVL       	$0x2547A714, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25483BE0  ($4)
+
+
+. 0 2547A704 16
+. 80 77 01 E8 54 A5 10 3A 3B 00 00 01 48 00 94 D1
+
+==== BB 891 (0x2547A714) approx BBs exec'd 0 ====
+
+	0x2547A714:  80FF000C  lwz r7,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547A718:  7C183840  cmpl cr0,r24,r7
+	   5: GETL       	R24, t4
+	   6: GETL       	R7, t6
+	   7: CMPUL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547A71C:  4080032C  bc 4,0,0x2547AA48
+	  10: Jc00o       	$0x2547AA48
+
+
+. 0 2547A714 12
+. 80 FF 00 0C 7C 18 38 40 40 80 03 2C
+
+==== BB 892 (0x2547A720) approx BBs exec'd 0 ====
+
+	0x2547A720:  815701E8  lwz r10,488(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547A724:  82570158  lwz r18,344(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x158, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R18
+	   9: INCEIPL       	$4
+
+	0x2547A728:  5715103A  rlwinm r21,r24,2,0,29
+	  10: GETL       	R24, t8
+	  11: SHLL       	$0x2, t8
+	  12: PUTL       	t8, R21
+	  13: INCEIPL       	$4
+
+	0x2547A72C:  806A0004  lwz r3,4(r10)
+	  14: GETL       	R10, t10
+	  15: ADDL       	$0x4, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R3
+	  18: INCEIPL       	$4
+
+	0x2547A730:  3B200001  li r25,1
+	  19: MOVL       	$0x1, t14
+	  20: PUTL       	t14, R25
+	  21: INCEIPL       	$4
+
+	0x2547A734:  7F55902E  lwzx r26,r21,r18
+	  22: GETL       	R18, t16
+	  23: GETL       	R21, t18
+	  24: ADDL       	t18, t16
+	  25: LDL       	(t16), t20
+	  26: PUTL       	t20, R26
+	  27: INCEIPL       	$4
+
+	0x2547A738:  7F83D000  cmp cr7,r3,r26
+	  28: GETL       	R3, t22
+	  29: GETL       	R26, t24
+	  30: CMPL       	t22, t24, t26  (-rSo)
+	  31: ICRFL       	t26, $0x7, CR
+	  32: INCEIPL       	$4
+
+	0x2547A73C:  419E0018  bc 12,30,0x2547A754
+	  33: Js30o       	$0x2547A754
+
+
+. 0 2547A720 32
+. 81 57 01 E8 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+
+==== BB 893 (0x2547A754) approx BBs exec'd 0 ====
+
+	0x2547A754:  3B790001  addi r27,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x2547A758:  7C9B3840  cmpl cr1,r27,r7
+	   4: GETL       	R27, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547A75C:  4084007C  bc 4,4,0x2547A7D8
+	   9: Jc04o       	$0x2547A7D8
+
+
+. 0 2547A754 12
+. 3B 79 00 01 7C 9B 38 40 40 84 00 7C
+
+==== BB 894 (0x2547A760) approx BBs exec'd 0 ====
+
+	0x2547A760:  5768103A  rlwinm r8,r27,2,0,29
+	   0: GETL       	R27, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2547A764:  7E68502E  lwzx r19,r8,r10
+	   4: GETL       	R10, t2
+	   5: GETL       	R8, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R19
+	   9: INCEIPL       	$4
+
+	0x2547A768:  817301E8  lwz r11,488(r19)
+	  10: GETL       	R19, t8
+	  11: ADDL       	$0x1E8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547A76C:  2F0B0000  cmpi cr6,r11,0
+	  15: GETL       	R11, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0x2547A770:  419A005C  bc 12,26,0x2547A7CC
+	  19: Js26o       	$0x2547A7CC
+
+
+. 0 2547A760 20
+. 57 68 10 3A 7E 68 50 2E 81 73 01 E8 2F 0B 00 00 41 9A 00 5C
+
+==== BB 895 (0x2547A774) approx BBs exec'd 0 ====
+
+	0x2547A774:  812B0000  lwz r9,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547A778:  2F890000  cmpi cr7,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x2547A77C:  419E0050  bc 12,30,0x2547A7CC
+	   8: Js30o       	$0x2547A7CC
+
+
+. 0 2547A774 12
+. 81 2B 00 00 2F 89 00 00 41 9E 00 50
+
+==== BB 896 (0x2547A780) approx BBs exec'd 0 ====
+
+	0x2547A780:  7E19D850  subf r16,r25,r27
+	   0: GETL       	R25, t0
+	   1: GETL       	R27, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R16
+	   4: INCEIPL       	$4
+
+	0x2547A784:  5605103A  rlwinm r5,r16,2,0,29
+	   5: GETL       	R16, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547A788:  48000010  b 0x2547A798
+	   9: JMPo       	$0x2547A798  ($4)
+
+
+. 0 2547A780 12
+. 7E 19 D8 50 56 05 10 3A 48 00 00 10
+
+==== BB 897 (0x2547A798) approx BBs exec'd 0 ====
+
+	0x2547A798:  7C09D000  cmp cr0,r9,r26
+	   0: GETL       	R9, t0
+	   1: GETL       	R26, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547A79C:  396B0004  addi r11,r11,4
+	   5: GETL       	R11, t6
+	   6: ADDL       	$0x4, t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x2547A7A0:  4082FFEC  bc 4,2,0x2547A78C
+	   9: Jc02o       	$0x2547A78C
+
+
+. 0 2547A798 12
+. 7C 09 D0 00 39 6B 00 04 40 82 FF EC
+
+==== BB 898 (0x2547A78C) approx BBs exec'd 0 ====
+
+	0x2547A78C:  812B0000  lwz r9,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547A790:  2C890000  cmpi cr1,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547A794:  41860038  bc 12,6,0x2547A7CC
+	   8: Js06o       	$0x2547A7CC
+
+
+. 0 2547A78C 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 38
+
+==== BB 899 (0x2547A7CC) approx BBs exec'd 0 ====
+
+	0x2547A7CC:  3B7B0001  addi r27,r27,1
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x2547A7D0:  7C9B3840  cmpl cr1,r27,r7
+	   4: GETL       	R27, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547A7D4:  4184FF8C  bc 12,4,0x2547A760
+	   9: Js04o       	$0x2547A760
+
+
+. 0 2547A7CC 12
+. 3B 7B 00 01 7C 9B 38 40 41 84 FF 8C
+
+==== BB 900 (0x2547A7D8) approx BBs exec'd 0 ====
+
+	0x2547A7D8:  3B180001  addi r24,r24,1
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547A7DC:  7F183840  cmpl cr6,r24,r7
+	   4: GETL       	R24, t2
+	   5: GETL       	R7, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547A7E0:  4198FF44  bc 12,24,0x2547A724
+	   9: Js24o       	$0x2547A724
+
+
+. 0 2547A7D8 12
+. 3B 18 00 01 7F 18 38 40 41 98 FF 44
+
+==== BB 901 (0x2547A724) approx BBs exec'd 0 ====
+
+	0x2547A724:  82570158  lwz r18,344(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x158, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x2547A728:  5715103A  rlwinm r21,r24,2,0,29
+	   5: GETL       	R24, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R21
+	   8: INCEIPL       	$4
+
+	0x2547A72C:  806A0004  lwz r3,4(r10)
+	   9: GETL       	R10, t6
+	  10: ADDL       	$0x4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x2547A730:  3B200001  li r25,1
+	  14: MOVL       	$0x1, t10
+	  15: PUTL       	t10, R25
+	  16: INCEIPL       	$4
+
+	0x2547A734:  7F55902E  lwzx r26,r21,r18
+	  17: GETL       	R18, t12
+	  18: GETL       	R21, t14
+	  19: ADDL       	t14, t12
+	  20: LDL       	(t12), t16
+	  21: PUTL       	t16, R26
+	  22: INCEIPL       	$4
+
+	0x2547A738:  7F83D000  cmp cr7,r3,r26
+	  23: GETL       	R3, t18
+	  24: GETL       	R26, t20
+	  25: CMPL       	t18, t20, t22  (-rSo)
+	  26: ICRFL       	t22, $0x7, CR
+	  27: INCEIPL       	$4
+
+	0x2547A73C:  419E0018  bc 12,30,0x2547A754
+	  28: Js30o       	$0x2547A754
+
+
+. 0 2547A724 28
+. 82 57 01 58 57 15 10 3A 80 6A 00 04 3B 20 00 01 7F 55 90 2E 7F 83 D0 00 41 9E 00 18
+
+==== BB 902 (0x2547A740) approx BBs exec'd 0 ====
+
+	0x2547A740:  3B390001  addi r25,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x2547A744:  572E103A  rlwinm r14,r25,2,0,29
+	   4: GETL       	R25, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R14
+	   7: INCEIPL       	$4
+
+	0x2547A748:  7F6E502E  lwzx r27,r14,r10
+	   8: GETL       	R10, t4
+	   9: GETL       	R14, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x2547A74C:  7F9BD000  cmp cr7,r27,r26
+	  14: GETL       	R27, t10
+	  15: GETL       	R26, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547A750:  409EFFF0  bc 4,30,0x2547A740
+	  19: Jc30o       	$0x2547A740
+
+
+. 0 2547A740 20
+. 3B 39 00 01 57 2E 10 3A 7F 6E 50 2E 7F 9B D0 00 40 9E FF F0
+
+==== BB 903 (0x2547A7E4) approx BBs exec'd 0 ====
+
+	0x2547A7E4:  2F910000  cmpi cr7,r17,0
+	   0: GETL       	R17, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547A7E8:  54F7103A  rlwinm r23,r7,2,0,29
+	   4: GETL       	R7, t4
+	   5: SHLL       	$0x2, t4
+	   6: PUTL       	t4, R23
+	   7: INCEIPL       	$4
+
+	0x2547A7EC:  3B400000  li r26,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R26
+	  10: INCEIPL       	$4
+
+	0x2547A7F0:  7F57512E  stwx r26,r23,r10
+	  11: GETL       	R10, t8
+	  12: GETL       	R23, t10
+	  13: ADDL       	t10, t8
+	  14: GETL       	R26, t12
+	  15: STL       	t12, (t8)
+	  16: INCEIPL       	$4
+
+	0x2547A7F4:  409E06DC  bc 4,30,0x2547AED0
+	  17: Jc30o       	$0x2547AED0
+
+
+. 0 2547A7E4 20
+. 2F 91 00 00 54 F7 10 3A 3B 40 00 00 7F 57 51 2E 40 9E 06 DC
+
+==== BB 904 (0x2547A7F8) approx BBs exec'd 0 ====
+
+	0x2547A7F8:  81010000  lwz r8,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x2547A7FC:  82280004  lwz r17,4(r8)
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R17
+	   8: INCEIPL       	$4
+
+	0x2547A800:  81C8FFB8  lwz r14,-72(r8)
+	   9: GETL       	R8, t8
+	  10: ADDL       	$0xFFFFFFB8, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R14
+	  13: INCEIPL       	$4
+
+	0x2547A804:  7E2803A6  mtlr r17
+	  14: GETL       	R17, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x2547A808:  81E8FFBC  lwz r15,-68(r8)
+	  17: GETL       	R8, t14
+	  18: ADDL       	$0xFFFFFFBC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R15
+	  21: INCEIPL       	$4
+
+	0x2547A80C:  8208FFC0  lwz r16,-64(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0xFFFFFFC0, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R16
+	  26: INCEIPL       	$4
+
+	0x2547A810:  8228FFC4  lwz r17,-60(r8)
+	  27: GETL       	R8, t22
+	  28: ADDL       	$0xFFFFFFC4, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R17
+	  31: INCEIPL       	$4
+
+	0x2547A814:  8248FFC8  lwz r18,-56(r8)
+	  32: GETL       	R8, t26
+	  33: ADDL       	$0xFFFFFFC8, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R18
+	  36: INCEIPL       	$4
+
+	0x2547A818:  8268FFCC  lwz r19,-52(r8)
+	  37: GETL       	R8, t30
+	  38: ADDL       	$0xFFFFFFCC, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R19
+	  41: INCEIPL       	$4
+
+	0x2547A81C:  8288FFD0  lwz r20,-48(r8)
+	  42: GETL       	R8, t34
+	  43: ADDL       	$0xFFFFFFD0, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R20
+	  46: INCEIPL       	$4
+
+	0x2547A820:  82A8FFD4  lwz r21,-44(r8)
+	  47: GETL       	R8, t38
+	  48: ADDL       	$0xFFFFFFD4, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R21
+	  51: INCEIPL       	$4
+
+	0x2547A824:  82C8FFD8  lwz r22,-40(r8)
+	  52: GETL       	R8, t42
+	  53: ADDL       	$0xFFFFFFD8, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R22
+	  56: INCEIPL       	$4
+
+	0x2547A828:  82E8FFDC  lwz r23,-36(r8)
+	  57: GETL       	R8, t46
+	  58: ADDL       	$0xFFFFFFDC, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R23
+	  61: INCEIPL       	$4
+
+	0x2547A82C:  8308FFE0  lwz r24,-32(r8)
+	  62: GETL       	R8, t50
+	  63: ADDL       	$0xFFFFFFE0, t50
+	  64: LDL       	(t50), t52
+	  65: PUTL       	t52, R24
+	  66: INCEIPL       	$4
+
+	0x2547A830:  8328FFE4  lwz r25,-28(r8)
+	  67: GETL       	R8, t54
+	  68: ADDL       	$0xFFFFFFE4, t54
+	  69: LDL       	(t54), t56
+	  70: PUTL       	t56, R25
+	  71: INCEIPL       	$4
+
+	0x2547A834:  8348FFE8  lwz r26,-24(r8)
+	  72: GETL       	R8, t58
+	  73: ADDL       	$0xFFFFFFE8, t58
+	  74: LDL       	(t58), t60
+	  75: PUTL       	t60, R26
+	  76: INCEIPL       	$4
+
+	0x2547A838:  8368FFEC  lwz r27,-20(r8)
+	  77: GETL       	R8, t62
+	  78: ADDL       	$0xFFFFFFEC, t62
+	  79: LDL       	(t62), t64
+	  80: PUTL       	t64, R27
+	  81: INCEIPL       	$4
+
+	0x2547A83C:  8388FFF0  lwz r28,-16(r8)
+	  82: GETL       	R8, t66
+	  83: ADDL       	$0xFFFFFFF0, t66
+	  84: LDL       	(t66), t68
+	  85: PUTL       	t68, R28
+	  86: INCEIPL       	$4
+
+	0x2547A840:  83A8FFF4  lwz r29,-12(r8)
+	  87: GETL       	R8, t70
+	  88: ADDL       	$0xFFFFFFF4, t70
+	  89: LDL       	(t70), t72
+	  90: PUTL       	t72, R29
+	  91: INCEIPL       	$4
+
+	0x2547A844:  83C8FFF8  lwz r30,-8(r8)
+	  92: GETL       	R8, t74
+	  93: ADDL       	$0xFFFFFFF8, t74
+	  94: LDL       	(t74), t76
+	  95: PUTL       	t76, R30
+	  96: INCEIPL       	$4
+
+	0x2547A848:  83E8FFFC  lwz r31,-4(r8)
+	  97: GETL       	R8, t78
+	  98: ADDL       	$0xFFFFFFFC, t78
+	  99: LDL       	(t78), t80
+	 100: PUTL       	t80, R31
+	 101: INCEIPL       	$4
+
+	0x2547A84C:  7D014378  or r1,r8,r8
+	 102: GETL       	R8, t82
+	 103: PUTL       	t82, R1
+	 104: INCEIPL       	$4
+
+	0x2547A850:  4E800020  blr
+	 105: GETL       	LR, t84
+	 106: JMPo-r       	t84  ($4)
+
+
+. 0 2547A7F8 92
+. 81 01 00 00 82 28 00 04 81 C8 FF B8 7E 28 03 A6 81 E8 FF BC 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+==== BB 905 (0x254727E4) approx BBs exec'd 0 ====
+
+	0x254727E4:  80F4015C  lwz r7,348(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x15C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254727E8:  2C870000  cmpi cr1,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x254727EC:  4186003C  bc 12,6,0x25472828
+	   9: Js06o       	$0x25472828
+
+
+. 0 254727E4 12
+. 80 F4 01 5C 2C 87 00 00 41 86 00 3C
+
+==== BB 906 (0x254727F0) approx BBs exec'd 0 ====
+
+	0x254727F0:  81140158  lwz r8,344(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x158, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254727F4:  7CE903A6  mtctr r7
+	   5: GETL       	R7, t4
+	   6: PUTL       	t4, CTR
+	   7: INCEIPL       	$4
+
+	0x254727F8:  38E7FFFF  addi r7,r7,-1
+	   8: GETL       	R7, t6
+	   9: ADDL       	$0xFFFFFFFF, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x254727FC:  54F1103A  rlwinm r17,r7,2,0,29
+	  12: GETL       	R7, t8
+	  13: SHLL       	$0x2, t8
+	  14: PUTL       	t8, R17
+	  15: INCEIPL       	$4
+
+	0x25472800:  7F11402E  lwzx r24,r17,r8
+	  16: GETL       	R8, t10
+	  17: GETL       	R17, t12
+	  18: ADDL       	t12, t10
+	  19: LDL       	(t10), t14
+	  20: PUTL       	t14, R24
+	  21: INCEIPL       	$4
+
+	0x25472804:  82B80180  lwz r21,384(r24)
+	  22: GETL       	R24, t16
+	  23: ADDL       	$0x180, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R21
+	  26: INCEIPL       	$4
+
+	0x25472808:  66A80800  oris r8,r21,0x800
+	  27: GETL       	R21, t20
+	  28: ORL       	$0x8000000, t20
+	  29: PUTL       	t20, R8
+	  30: INCEIPL       	$4
+
+	0x2547280C:  91180180  stw r8,384(r24)
+	  31: GETL       	R8, t22
+	  32: GETL       	R24, t24
+	  33: ADDL       	$0x180, t24
+	  34: STL       	t22, (t24)
+	  35: INCEIPL       	$4
+
+	0x25472810:  81140158  lwz r8,344(r20)
+	  36: GETL       	R20, t26
+	  37: ADDL       	$0x158, t26
+	  38: LDL       	(t26), t28
+	  39: PUTL       	t28, R8
+	  40: INCEIPL       	$4
+
+	0x25472814:  7F31402E  lwzx r25,r17,r8
+	  41: GETL       	R8, t30
+	  42: GETL       	R17, t32
+	  43: ADDL       	t32, t30
+	  44: LDL       	(t30), t34
+	  45: PUTL       	t34, R25
+	  46: INCEIPL       	$4
+
+	0x25472818:  81390178  lwz r9,376(r25)
+	  47: GETL       	R25, t36
+	  48: ADDL       	$0x178, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R9
+	  51: INCEIPL       	$4
+
+	0x2547281C:  3B890001  addi r28,r9,1
+	  52: GETL       	R9, t40
+	  53: ADDL       	$0x1, t40
+	  54: PUTL       	t40, R28
+	  55: INCEIPL       	$4
+
+	0x25472820:  93990178  stw r28,376(r25)
+	  56: GETL       	R28, t42
+	  57: GETL       	R25, t44
+	  58: ADDL       	$0x178, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x25472824:  4200FFD4  bc 16,0,0x254727F8
+	  61: GETL       	CTR, t46
+	  62: ADDL       	$0xFFFFFFFF, t46
+	  63: PUTL       	t46, CTR
+	  64: JIFZL       	t46, $0x25472828
+	  65: JMPo       	$0x254727F8  ($4)
+
+
+. 0 254727F0 56
+. 81 14 01 58 7C E9 03 A6 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+
+==== BB 907 (0x254727F8) approx BBs exec'd 0 ====
+
+	0x254727F8:  38E7FFFF  addi r7,r7,-1
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0xFFFFFFFF, t0
+	   2: PUTL       	t0, R7
+	   3: INCEIPL       	$4
+
+	0x254727FC:  54F1103A  rlwinm r17,r7,2,0,29
+	   4: GETL       	R7, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R17
+	   7: INCEIPL       	$4
+
+	0x25472800:  7F11402E  lwzx r24,r17,r8
+	   8: GETL       	R8, t4
+	   9: GETL       	R17, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R24
+	  13: INCEIPL       	$4
+
+	0x25472804:  82B80180  lwz r21,384(r24)
+	  14: GETL       	R24, t10
+	  15: ADDL       	$0x180, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R21
+	  18: INCEIPL       	$4
+
+	0x25472808:  66A80800  oris r8,r21,0x800
+	  19: GETL       	R21, t14
+	  20: ORL       	$0x8000000, t14
+	  21: PUTL       	t14, R8
+	  22: INCEIPL       	$4
+
+	0x2547280C:  91180180  stw r8,384(r24)
+	  23: GETL       	R8, t16
+	  24: GETL       	R24, t18
+	  25: ADDL       	$0x180, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x25472810:  81140158  lwz r8,344(r20)
+	  28: GETL       	R20, t20
+	  29: ADDL       	$0x158, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R8
+	  32: INCEIPL       	$4
+
+	0x25472814:  7F31402E  lwzx r25,r17,r8
+	  33: GETL       	R8, t24
+	  34: GETL       	R17, t26
+	  35: ADDL       	t26, t24
+	  36: LDL       	(t24), t28
+	  37: PUTL       	t28, R25
+	  38: INCEIPL       	$4
+
+	0x25472818:  81390178  lwz r9,376(r25)
+	  39: GETL       	R25, t30
+	  40: ADDL       	$0x178, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R9
+	  43: INCEIPL       	$4
+
+	0x2547281C:  3B890001  addi r28,r9,1
+	  44: GETL       	R9, t34
+	  45: ADDL       	$0x1, t34
+	  46: PUTL       	t34, R28
+	  47: INCEIPL       	$4
+
+	0x25472820:  93990178  stw r28,376(r25)
+	  48: GETL       	R28, t36
+	  49: GETL       	R25, t38
+	  50: ADDL       	$0x178, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25472824:  4200FFD4  bc 16,0,0x254727F8
+	  53: GETL       	CTR, t40
+	  54: ADDL       	$0xFFFFFFFF, t40
+	  55: PUTL       	t40, CTR
+	  56: JIFZL       	t40, $0x25472828
+	  57: JMPo       	$0x254727F8  ($4)
+
+
+. 0 254727F8 48
+. 38 E7 FF FF 54 F1 10 3A 7F 11 40 2E 82 B8 01 80 66 A8 08 00 91 18 01 80 81 14 01 58 7F 31 40 2E 81 39 01 78 3B 89 00 01 93 99 01 78 42 00 FF D4
+
+==== BB 908 (0x25472828) approx BBs exec'd 0 ====
+
+	0x25472828:  816E01C8  lwz r11,456(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547282C:  80EE01C4  lwz r7,452(r14)
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x1C4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x25472830:  90EB000C  stw r7,12(r11)
+	  10: GETL       	R7, t8
+	  11: GETL       	R11, t10
+	  12: ADDL       	$0xC, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x25472834:  812E01C4  lwz r9,452(r14)
+	  15: GETL       	R14, t12
+	  16: ADDL       	$0x1C4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25472838:  2E090000  cmpi cr4,r9,0
+	  20: GETL       	R9, t16
+	  21: CMP0L       	t16, t18  (-rSo)
+	  22: ICRFL       	t18, $0x4, CR
+	  23: INCEIPL       	$4
+
+	0x2547283C:  41920008  bc 12,18,0x25472844
+	  24: Js18o       	$0x25472844
+
+
+. 0 25472828 24
+. 81 6E 01 C8 80 EE 01 C4 90 EB 00 0C 81 2E 01 C4 2E 09 00 00 41 92 00 08
+
+==== BB 909 (0x25472840) approx BBs exec'd 0 ====
+
+	0x25472840:  91690010  stw r11,16(r9)
+	   0: GETL       	R11, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x10, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472844:  814E0330  lwz r10,816(r14)
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x330, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25472848:  2B0A0001  cmpli cr6,r10,1
+	  10: GETL       	R10, t8
+	  11: MOVL       	$0x1, t12
+	  12: CMPUL       	t8, t12, t10  (-rSo)
+	  13: ICRFL       	t10, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x2547284C:  40991358  bc 4,25,0x25473BA4
+	  15: Jc25o       	$0x25473BA4
+
+
+. 0 25472840 16
+. 91 69 00 10 81 4E 03 30 2B 0A 00 01 40 99 13 58
+
+==== BB 910 (0x25472850) approx BBs exec'd 0 ====
+
+	0x25472850:  81540158  lwz r10,344(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x158, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25472854:  390E01B8  addi r8,r14,440
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x1B8, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x25472858:  39200001  li r9,1
+	   9: MOVL       	$0x1, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x2547285C:  39600004  li r11,4
+	  12: MOVL       	$0x4, t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25472860:  806A0004  lwz r3,4(r10)
+	  15: GETL       	R10, t10
+	  16: ADDL       	$0x4, t10
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R3
+	  19: INCEIPL       	$4
+
+	0x25472864:  7F834000  cmp cr7,r3,r8
+	  20: GETL       	R3, t14
+	  21: GETL       	R8, t16
+	  22: CMPL       	t14, t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0x25472868:  419E0018  bc 12,30,0x25472880
+	  25: Js30o       	$0x25472880
+
+
+. 0 25472850 28
+. 81 54 01 58 39 0E 01 B8 39 20 00 01 39 60 00 04 80 6A 00 04 7F 83 40 00 41 9E 00 18
+
+==== BB 911 (0x2547286C) approx BBs exec'd 0 ====
+
+	0x2547286C:  39290001  addi r9,r9,1
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25472870:  552B103A  rlwinm r11,r9,2,0,29
+	   4: GETL       	R9, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R11
+	   7: INCEIPL       	$4
+
+	0x25472874:  7C8B502E  lwzx r4,r11,r10
+	   8: GETL       	R10, t4
+	   9: GETL       	R11, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25472878:  7F844000  cmp cr7,r4,r8
+	  14: GETL       	R4, t10
+	  15: GETL       	R8, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547287C:  409EFFF0  bc 4,30,0x2547286C
+	  19: Jc30o       	$0x2547286C
+
+
+. 0 2547286C 20
+. 39 29 00 01 55 2B 10 3A 7C 8B 50 2E 7F 84 40 00 40 9E FF F0
+
+==== BB 912 (0x25472880) approx BBs exec'd 0 ====
+
+	0x25472880:  811F0030  lwz r8,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25472884:  7D4B5214  add r10,r11,r10
+	   5: GETL       	R11, t4
+	   6: GETL       	R10, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25472888:  816AFFFC  lwz r11,-4(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0xFFFFFFFC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547288C:  2C080000  cmpi cr0,r8,0
+	  15: GETL       	R8, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25472890:  916E01C8  stw r11,456(r14)
+	  19: GETL       	R11, t16
+	  20: GETL       	R14, t18
+	  21: ADDL       	$0x1C8, t18
+	  22: STL       	t16, (t18)
+	  23: INCEIPL       	$4
+
+	0x25472894:  40820F34  bc 4,2,0x254737C8
+	  24: Jc02o       	$0x254737C8
+
+
+. 0 25472880 24
+. 81 1F 00 30 7D 4B 52 14 81 6A FF FC 2C 08 00 00 91 6E 01 C8 40 82 0F 34
+
+==== BB 913 (0x25472898) approx BBs exec'd 0 ====
+
+	0x25472898:  8274015C  lwz r19,348(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x15C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0x2547289C:  3AE90001  addi r23,r9,1
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R23
+	   8: INCEIPL       	$4
+
+	0x254728A0:  38000000  li r0,0
+	   9: MOVL       	$0x0, t6
+	  10: PUTL       	t6, R0
+	  11: INCEIPL       	$4
+
+	0x254728A4:  7DC97378  or r9,r14,r14
+	  12: GETL       	R14, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x254728A8:  7F979840  cmpl cr7,r23,r19
+	  15: GETL       	R23, t10
+	  16: GETL       	R19, t12
+	  17: CMPUL       	t10, t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x7, CR
+	  19: INCEIPL       	$4
+
+	0x254728AC:  409C0008  bc 4,28,0x254728B4
+	  20: Jc28o       	$0x254728B4
+
+
+. 0 25472898 24
+. 82 74 01 5C 3A E9 00 01 38 00 00 00 7D C9 73 78 7F 97 98 40 40 9C 00 08
+
+==== BB 914 (0x254728B4) approx BBs exec'd 0 ====
+
+	0x254728B4:  900901C4  stw r0,452(r9)
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x1C4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254728B8:  820E01C8  lwz r16,456(r14)
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x1C8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R16
+	   9: INCEIPL       	$4
+
+	0x254728BC:  380E01B8  addi r0,r14,440
+	  10: GETL       	R14, t8
+	  11: ADDL       	$0x1B8, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x254728C0:  9010000C  stw r0,12(r16)
+	  14: GETL       	R0, t10
+	  15: GETL       	R16, t12
+	  16: ADDL       	$0xC, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x254728C4:  812E01C4  lwz r9,452(r14)
+	  19: GETL       	R14, t14
+	  20: ADDL       	$0x1C4, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R9
+	  23: INCEIPL       	$4
+
+	0x254728C8:  2C890000  cmpi cr1,r9,0
+	  24: GETL       	R9, t18
+	  25: CMP0L       	t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x1, CR
+	  27: INCEIPL       	$4
+
+	0x254728CC:  41860008  bc 12,6,0x254728D4
+	  28: Js06o       	$0x254728D4
+
+
+. 0 254728B4 28
+. 90 09 01 C4 82 0E 01 C8 38 0E 01 B8 90 10 00 0C 81 2E 01 C4 2C 89 00 00 41 86 00 08
+
+==== BB 915 (0x254728D4) approx BBs exec'd 0 ====
+
+	0x254728D4:  807E002C  lwz r3,44(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x2C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254728D8:  691A0003  xori r26,r8,0x3
+	   5: GETL       	R8, t4
+	   6: XORL       	$0x3, t4
+	   7: PUTL       	t4, R26
+	   8: INCEIPL       	$4
+
+	0x254728DC:  201A0000  subfic r0,r26,0
+	   9: GETL       	R26, t6
+	  10: MOVL       	$0x0, t8
+	  11: SBBL       	t6, t8  (-wCa)
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x254728E0:  7F40D114  adde r26,r0,r26
+	  14: GETL       	R0, t10
+	  15: GETL       	R26, t12
+	  16: ADCL       	t10, t12  (-rCa-wCa)
+	  17: PUTL       	t12, R26
+	  18: INCEIPL       	$4
+
+	0x254728E4:  809E0014  lwz r4,20(r30)
+	  19: GETL       	R30, t14
+	  20: ADDL       	$0x14, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R4
+	  23: INCEIPL       	$4
+
+	0x254728E8:  21480000  subfic r10,r8,0
+	  24: GETL       	R8, t18
+	  25: MOVL       	$0x0, t20
+	  26: SBBL       	t18, t20  (-wCa)
+	  27: PUTL       	t20, R10
+	  28: INCEIPL       	$4
+
+	0x254728EC:  7FAA4114  adde r29,r10,r8
+	  29: GETL       	R10, t22
+	  30: GETL       	R8, t24
+	  31: ADCL       	t22, t24  (-rCa-wCa)
+	  32: PUTL       	t24, R29
+	  33: INCEIPL       	$4
+
+	0x254728F0:  38BF0058  addi r5,r31,88
+	  34: GETL       	R31, t26
+	  35: ADDL       	$0x58, t26
+	  36: PUTL       	t26, R5
+	  37: INCEIPL       	$4
+
+	0x254728F4:  93BF0058  stw r29,88(r31)
+	  38: GETL       	R29, t28
+	  39: GETL       	R31, t30
+	  40: ADDL       	$0x58, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0x254728F8:  935F005C  stw r26,92(r31)
+	  43: GETL       	R26, t32
+	  44: GETL       	R31, t34
+	  45: ADDL       	$0x5C, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0x254728FC:  48008D9D  bl 0x2547B698
+	  48: MOVL       	$0x25472900, t36
+	  49: PUTL       	t36, LR
+	  50: JMPo-c       	$0x2547B698  ($4)
+
+
+. 0 254728D4 44
+. 80 7E 00 2C 69 1A 00 03 20 1A 00 00 7F 40 D1 14 80 9E 00 14 21 48 00 00 7F AA 41 14 38 BF 00 58 93 BF 00 58 93 5F 00 5C 48 00 8D 9D
+
+==== BB 916 _dl_receive_error(0x2547B698) approx BBs exec'd 0 ====
+
+	0x2547B698:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547B69C:  7CC802A6  mflr r6
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x2547B6A0:  4801B961  bl 0x25497000
+	   9: MOVL       	$0x2547B6A4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547B698 12
+. 94 21 FF D0 7C C8 02 A6 48 01 B9 61
+
+==== BB 917 (0x2547B6A4) approx BBs exec'd 0 ====
+
+	0x2547B6A4:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B6A8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547B6AC:  9361001C  stw r27,28(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547B6B0:  7CBB2B78  or r27,r5,r5
+	  13: GETL       	R5, t10
+	  14: PUTL       	t10, R27
+	  15: INCEIPL       	$4
+
+	0x2547B6B4:  90C10034  stw r6,52(r1)
+	  16: GETL       	R6, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x34, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547B6B8:  92E1000C  stw r23,12(r1)
+	  21: GETL       	R23, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547B6BC:  7C972378  or r23,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R23
+	  28: INCEIPL       	$4
+
+	0x2547B6C0:  80BE04C8  lwz r5,1224(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4C8, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R5
+	  33: INCEIPL       	$4
+
+	0x2547B6C4:  93210014  stw r25,20(r1)
+	  34: GETL       	R25, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x14, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x2547B6C8:  7C791B78  or r25,r3,r3
+	  39: GETL       	R3, t30
+	  40: PUTL       	t30, R25
+	  41: INCEIPL       	$4
+
+	0x2547B6CC:  812501B4  lwz r9,436(r5)
+	  42: GETL       	R5, t32
+	  43: ADDL       	$0x1B4, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R9
+	  46: INCEIPL       	$4
+
+	0x2547B6D0:  93410018  stw r26,24(r1)
+	  47: GETL       	R26, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x18, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547B6D4:  93810020  stw r28,32(r1)
+	  52: GETL       	R28, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x20, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x2547B6D8:  7D2903A6  mtctr r9
+	  57: GETL       	R9, t44
+	  58: PUTL       	t44, CTR
+	  59: INCEIPL       	$4
+
+	0x2547B6DC:  93A10024  stw r29,36(r1)
+	  60: GETL       	R29, t46
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x24, t48
+	  63: STL       	t46, (t48)
+	  64: INCEIPL       	$4
+
+	0x2547B6E0:  839E031C  lwz r28,796(r30)
+	  65: GETL       	R30, t50
+	  66: ADDL       	$0x31C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R28
+	  69: INCEIPL       	$4
+
+	0x2547B6E4:  4E800421  bctrl
+	  70: MOVL       	$0x2547B6E8, t54
+	  71: PUTL       	t54, LR
+	  72: GETL       	CTR, t56
+	  73: JMPo-c       	t56  ($4)
+
+
+. 0 2547B6A4 68
+. 93 C1 00 28 7F C8 02 A6 93 61 00 1C 7C BB 2B 78 90 C1 00 34 92 E1 00 0C 7C 97 23 78 80 BE 04 C8 93 21 00 14 7C 79 1B 78 81 25 01 B4 93 41 00 18 93 81 00 20 7D 29 03 A6 93 A1 00 24 83 9E 03 1C 4E 80 04 21
+
+==== BB 918 (0x2547B6E8) approx BBs exec'd 0 ====
+
+	0x2547B6E8:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547B6EC:  7C7D1B78  or r29,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x2547B6F0:  7EE803A6  mtlr r23
+	   6: GETL       	R23, t4
+	   7: PUTL       	t4, LR
+	   8: INCEIPL       	$4
+
+	0x2547B6F4:  835D0000  lwz r26,0(r29)
+	   9: GETL       	R29, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547B6F8:  7F63DB78  or r3,r27,r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, R3
+	  15: INCEIPL       	$4
+
+	0x2547B6FC:  909D0000  stw r4,0(r29)
+	  16: GETL       	R4, t12
+	  17: GETL       	R29, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x2547B700:  837C0000  lwz r27,0(r28)
+	  20: GETL       	R28, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R27
+	  23: INCEIPL       	$4
+
+	0x2547B704:  933C0000  stw r25,0(r28)
+	  24: GETL       	R25, t20
+	  25: GETL       	R28, t22
+	  26: STL       	t20, (t22)
+	  27: INCEIPL       	$4
+
+	0x2547B708:  4E800021  blrl
+	  28: GETL       	LR, t24
+	  29: MOVL       	$0x2547B70C, t26
+	  30: PUTL       	t26, LR
+	  31: JMPo-r       	t24  ($4)
+
+
+. 0 2547B6E8 36
+. 38 80 00 00 7C 7D 1B 78 7E E8 03 A6 83 5D 00 00 7F 63 DB 78 90 9D 00 00 83 7C 00 00 93 3C 00 00 4E 80 00 21
+
+==== BB 919 version_check_doit(0x254717D4) approx BBs exec'd 0 ====
+
+	0x254717D4:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x254717D8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x254717DC:  48025825  bl 0x25497000
+	   9: MOVL       	$0x254717E0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254717D4 12
+. 94 21 FF E0 7C 08 02 A6 48 02 58 25
+
+==== BB 920 (0x254717E0) approx BBs exec'd 0 ====
+
+	0x254717E0:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254717E4:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x254717E8:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254717EC:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254717F0:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0x254717F4:  80BF0004  lwz r5,4(r31)
+	  21: GETL       	R31, t16
+	  22: ADDL       	$0x4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R5
+	  25: INCEIPL       	$4
+
+	0x254717F8:  38800001  li r4,1
+	  26: MOVL       	$0x1, t20
+	  27: PUTL       	t20, R4
+	  28: INCEIPL       	$4
+
+	0x254717FC:  813E04C8  lwz r9,1224(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4C8, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x25471800:  93A10014  stw r29,20(r1)
+	  34: GETL       	R29, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x14, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0x25471804:  80690000  lwz r3,0(r9)
+	  39: GETL       	R9, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R3
+	  42: INCEIPL       	$4
+
+	0x25471808:  4800B7C1  bl 0x2547CFC8
+	  43: MOVL       	$0x2547180C, t34
+	  44: PUTL       	t34, LR
+	  45: JMPo-c       	$0x2547CFC8  ($4)
+
+
+. 0 254717E0 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 90 01 00 24 7C 7F 1B 78 80 BF 00 04 38 80 00 01 81 3E 04 C8 93 A1 00 14 80 69 00 00 48 00 B7 C1
+
+==== BB 921 _dl_check_all_versions(0x2547CFC8) approx BBs exec'd 0 ====
+
+	0x2547CFC8:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547CFCC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547CFD0:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547CFD4:  7C7F1B79  or. r31,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R31
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x2547CFD8:  93410008  stw r26,8(r1)
+	  19: GETL       	R26, t14
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0x8, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547CFDC:  7C9A2378  or r26,r4,r4
+	  24: GETL       	R4, t18
+	  25: PUTL       	t18, R26
+	  26: INCEIPL       	$4
+
+	0x2547CFE0:  9361000C  stw r27,12(r1)
+	  27: GETL       	R27, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0xC, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x2547CFE4:  7CBB2B78  or r27,r5,r5
+	  32: GETL       	R5, t24
+	  33: PUTL       	t24, R27
+	  34: INCEIPL       	$4
+
+	0x2547CFE8:  93810010  stw r28,16(r1)
+	  35: GETL       	R28, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x10, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0x2547CFEC:  3B800000  li r28,0
+	  40: MOVL       	$0x0, t30
+	  41: PUTL       	t30, R28
+	  42: INCEIPL       	$4
+
+	0x2547CFF0:  93A10014  stw r29,20(r1)
+	  43: GETL       	R29, t32
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x14, t34
+	  46: STL       	t32, (t34)
+	  47: INCEIPL       	$4
+
+	0x2547CFF4:  93C10018  stw r30,24(r1)
+	  48: GETL       	R30, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x18, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x2547CFF8:  90010024  stw r0,36(r1)
+	  53: GETL       	R0, t40
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x24, t42
+	  56: STL       	t40, (t42)
+	  57: INCEIPL       	$4
+
+	0x2547CFFC:  40A20018  bc 5,2,0x2547D014
+	  58: Jc02o       	$0x2547D014
+
+
+. 0 2547CFC8 56
+. 94 21 FF E0 7C 08 02 A6 93 E1 00 1C 7C 7F 1B 79 93 41 00 08 7C 9A 23 78 93 61 00 0C 7C BB 2B 78 93 81 00 10 3B 80 00 00 93 A1 00 14 93 C1 00 18 90 01 00 24 40 A2 00 18
+
+==== BB 922 (0x2547D014) approx BBs exec'd 0 ====
+
+	0x2547D014:  80DF0180  lwz r6,384(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547D018:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547D01C:  7F44D378  or r4,r26,r26
+	   8: GETL       	R26, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x2547D020:  7F65DB78  or r5,r27,r27
+	  11: GETL       	R27, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x2547D024:  74C90040  andis. r9,r6,0x40
+	  14: GETL       	R6, t10
+	  15: ANDL       	$0x400000, t10
+	  16: PUTL       	t10, R9
+	  17: CMP0L       	t10, t12  (-rSo)
+	  18: ICRFL       	t12, $0x0, CR
+	  19: INCEIPL       	$4
+
+	0x2547D028:  3BA00000  li r29,0
+	  20: MOVL       	$0x0, t14
+	  21: PUTL       	t14, R29
+	  22: INCEIPL       	$4
+
+	0x2547D02C:  4082FFD8  bc 4,2,0x2547D004
+	  23: Jc02o       	$0x2547D004
+
+
+. 0 2547D014 28
+. 80 DF 01 80 7F E3 FB 78 7F 44 D3 78 7F 65 DB 78 74 C9 00 40 3B A0 00 00 40 82 FF D8
+
+==== BB 923 (0x2547D030) approx BBs exec'd 0 ====
+
+	0x2547D030:  4BFFFB41  bl 0x2547CB70
+	   0: MOVL       	$0x2547D034, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547CB70  ($4)
+
+
+. 0 2547D030 4
+. 4B FF FB 41
+
+==== BB 924 _dl_check_map_versions(0x2547CB70) approx BBs exec'd 0 ====
+
+	0x2547CB70:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x2547CB74:  9421FF60  stwu r1,-160(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF60, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547CB78:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x2547CB7C:  4801A485  bl 0x25497000
+	  12: MOVL       	$0x2547CB80, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547CB70 16
+. 7C 08 02 A6 94 21 FF 60 7D 80 00 26 48 01 A4 85
+
+==== BB 925 (0x2547CB80) approx BBs exec'd 0 ====
+
+	0x2547CB80:  92010060  stw r16,96(r1)
+	   0: GETL       	R16, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547CB84:  3A000000  li r16,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R16
+	   7: INCEIPL       	$4
+
+	0x2547CB88:  900100A4  stw r0,164(r1)
+	   8: GETL       	R0, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xA4, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547CB8C:  81230034  lwz r9,52(r3)
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x34, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x2547CB90:  92A10074  stw r21,116(r1)
+	  18: GETL       	R21, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x74, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547CB94:  7C952378  or r21,r4,r4
+	  23: GETL       	R4, t18
+	  24: PUTL       	t18, R21
+	  25: INCEIPL       	$4
+
+	0x2547CB98:  2F890000  cmpi cr7,r9,0
+	  26: GETL       	R9, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0x2547CB9C:  92C10078  stw r22,120(r1)
+	  30: GETL       	R22, t24
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x78, t26
+	  33: STL       	t24, (t26)
+	  34: INCEIPL       	$4
+
+	0x2547CBA0:  93010080  stw r24,128(r1)
+	  35: GETL       	R24, t28
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x80, t30
+	  38: STL       	t28, (t30)
+	  39: INCEIPL       	$4
+
+	0x2547CBA4:  3AC00000  li r22,0
+	  40: MOVL       	$0x0, t32
+	  41: PUTL       	t32, R22
+	  42: INCEIPL       	$4
+
+	0x2547CBA8:  9361008C  stw r27,140(r1)
+	  43: GETL       	R27, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0x8C, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x2547CBAC:  3B000000  li r24,0
+	  48: MOVL       	$0x0, t38
+	  49: PUTL       	t38, R24
+	  50: INCEIPL       	$4
+
+	0x2547CBB0:  93C10098  stw r30,152(r1)
+	  51: GETL       	R30, t40
+	  52: GETL       	R1, t42
+	  53: ADDL       	$0x98, t42
+	  54: STL       	t40, (t42)
+	  55: INCEIPL       	$4
+
+	0x2547CBB4:  7C7B1B78  or r27,r3,r3
+	  56: GETL       	R3, t44
+	  57: PUTL       	t44, R27
+	  58: INCEIPL       	$4
+
+	0x2547CBB8:  93E1009C  stw r31,156(r1)
+	  59: GETL       	R31, t46
+	  60: GETL       	R1, t48
+	  61: ADDL       	$0x9C, t48
+	  62: STL       	t46, (t48)
+	  63: INCEIPL       	$4
+
+	0x2547CBBC:  7FC802A6  mflr r30
+	  64: GETL       	LR, t50
+	  65: PUTL       	t50, R30
+	  66: INCEIPL       	$4
+
+	0x2547CBC0:  92210064  stw r17,100(r1)
+	  67: GETL       	R17, t52
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x64, t54
+	  70: STL       	t52, (t54)
+	  71: INCEIPL       	$4
+
+	0x2547CBC4:  7C3F0B78  or r31,r1,r1
+	  72: GETL       	R1, t56
+	  73: PUTL       	t56, R31
+	  74: INCEIPL       	$4
+
+	0x2547CBC8:  92410068  stw r18,104(r1)
+	  75: GETL       	R18, t58
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x68, t60
+	  78: STL       	t58, (t60)
+	  79: INCEIPL       	$4
+
+	0x2547CBCC:  38600000  li r3,0
+	  80: MOVL       	$0x0, t62
+	  81: PUTL       	t62, R3
+	  82: INCEIPL       	$4
+
+	0x2547CBD0:  9261006C  stw r19,108(r1)
+	  83: GETL       	R19, t64
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x6C, t66
+	  86: STL       	t64, (t66)
+	  87: INCEIPL       	$4
+
+	0x2547CBD4:  92810070  stw r20,112(r1)
+	  88: GETL       	R20, t68
+	  89: GETL       	R1, t70
+	  90: ADDL       	$0x70, t70
+	  91: STL       	t68, (t70)
+	  92: INCEIPL       	$4
+
+	0x2547CBD8:  92E1007C  stw r23,124(r1)
+	  93: GETL       	R23, t72
+	  94: GETL       	R1, t74
+	  95: ADDL       	$0x7C, t74
+	  96: STL       	t72, (t74)
+	  97: INCEIPL       	$4
+
+	0x2547CBDC:  93210084  stw r25,132(r1)
+	  98: GETL       	R25, t76
+	  99: GETL       	R1, t78
+	 100: ADDL       	$0x84, t78
+	 101: STL       	t76, (t78)
+	 102: INCEIPL       	$4
+
+	0x2547CBE0:  93410088  stw r26,136(r1)
+	 103: GETL       	R26, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x88, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x2547CBE4:  93810090  stw r28,144(r1)
+	 108: GETL       	R28, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0x90, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x2547CBE8:  93A10094  stw r29,148(r1)
+	 113: GETL       	R29, t88
+	 114: GETL       	R1, t90
+	 115: ADDL       	$0x94, t90
+	 116: STL       	t88, (t90)
+	 117: INCEIPL       	$4
+
+	0x2547CBEC:  9181005C  stw r12,92(r1)
+	 118: GETL       	R12, t92
+	 119: GETL       	R1, t94
+	 120: ADDL       	$0x5C, t94
+	 121: STL       	t92, (t94)
+	 122: INCEIPL       	$4
+
+	0x2547CBF0:  419E0180  bc 12,30,0x2547CD70
+	 123: Js30o       	$0x2547CD70
+
+
+. 0 2547CB80 116
+. 92 01 00 60 3A 00 00 00 90 01 00 A4 81 23 00 34 92 A1 00 74 7C 95 23 78 2F 89 00 00 92 C1 00 78 93 01 00 80 3A C0 00 00 93 61 00 8C 3B 00 00 00 93 C1 00 98 7C 7B 1B 78 93 E1 00 9C 7F C8 02 A6 92 21 00 64 7C 3F 0B 78 92 41 00 68 38 60 00 00 92 61 00 6C 92 81 00 70 92 E1 00 7C 93 21 00 84 93 41 00 88 93 81 00 90 93 A1 00 94 91 81 00 5C 41 9E 01 80
+
+==== BB 926 (0x2547CBF4) approx BBs exec'd 0 ====
+
+	0x2547CBF4:  825B00AC  lwz r18,172(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xAC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x2547CBF8:  83290004  lwz r25,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x2547CBFC:  2D920000  cmpi cr3,r18,0
+	  10: GETL       	R18, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x3, CR
+	  13: INCEIPL       	$4
+
+	0x2547CC00:  823B00B4  lwz r17,180(r27)
+	  14: GETL       	R27, t12
+	  15: ADDL       	$0xB4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R17
+	  18: INCEIPL       	$4
+
+	0x2547CC04:  418E0128  bc 12,14,0x2547CD2C
+	  19: Js14o       	$0x2547CD2C
+
+
+. 0 2547CBF4 20
+. 82 5B 00 AC 83 29 00 04 2D 92 00 00 82 3B 00 B4 41 8E 01 28
+
+==== BB 927 (0x2547CC08) approx BBs exec'd 0 ====
+
+	0x2547CC08:  817B0000  lwz r11,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x2547CC0C:  2E050000  cmpi cr4,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x4, CR
+	   7: INCEIPL       	$4
+
+	0x2547CC10:  81320004  lwz r9,4(r18)
+	   8: GETL       	R18, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R9
+	  12: INCEIPL       	$4
+
+	0x2547CC14:  829E04C8  lwz r20,1224(r30)
+	  13: GETL       	R30, t12
+	  14: ADDL       	$0x4C8, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R20
+	  17: INCEIPL       	$4
+
+	0x2547CC18:  7C8B4A2E  lhzx r4,r11,r9
+	  18: GETL       	R9, t16
+	  19: GETL       	R11, t18
+	  20: ADDL       	t18, t16
+	  21: LDW       	(t16), t20
+	  22: PUTL       	t20, R4
+	  23: INCEIPL       	$4
+
+	0x2547CC1C:  7EEB4A14  add r23,r11,r9
+	  24: GETL       	R11, t22
+	  25: GETL       	R9, t24
+	  26: ADDL       	t22, t24
+	  27: PUTL       	t24, R23
+	  28: INCEIPL       	$4
+
+	0x2547CC20:  827E04D4  lwz r19,1236(r30)
+	  29: GETL       	R30, t26
+	  30: ADDL       	$0x4D4, t26
+	  31: LDL       	(t26), t28
+	  32: PUTL       	t28, R19
+	  33: INCEIPL       	$4
+
+	0x2547CC24:  2C040001  cmpi cr0,r4,1
+	  34: GETL       	R4, t30
+	  35: MOVL       	$0x1, t34
+	  36: CMPL       	t30, t34, t32  (-rSo)
+	  37: ICRFL       	t32, $0x0, CR
+	  38: INCEIPL       	$4
+
+	0x2547CC28:  408202D0  bc 4,2,0x2547CEF8
+	  39: Jc02o       	$0x2547CEF8
+
+
+. 0 2547CC08 36
+. 81 7B 00 00 2E 05 00 00 81 32 00 04 82 9E 04 C8 7C 8B 4A 2E 7E EB 4A 14 82 7E 04 D4 2C 04 00 01 40 82 02 D0
+
+==== BB 928 (0x2547CC2C) approx BBs exec'd 0 ====
+
+	0x2547CC2C:  835B0018  lwz r26,24(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x18, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547CC30:  83970004  lwz r28,4(r23)
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x2547CC34:  1E1A0018  mulli r16,r26,24
+	  10: GETL       	R26, t8
+	  11: MULL       	$0x18, t8
+	  12: PUTL       	t8, R16
+	  13: INCEIPL       	$4
+
+	0x2547CC38:  7F5CCA14  add r26,r28,r25
+	  14: GETL       	R28, t10
+	  15: GETL       	R25, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R26
+	  18: INCEIPL       	$4
+
+	0x2547CC3C:  7FB0A02E  lwzx r29,r16,r20
+	  19: GETL       	R20, t14
+	  20: GETL       	R16, t16
+	  21: ADDL       	t16, t14
+	  22: LDL       	(t14), t18
+	  23: PUTL       	t18, R29
+	  24: INCEIPL       	$4
+
+	0x2547CC40:  4800001C  b 0x2547CC5C
+	  25: JMPo       	$0x2547CC5C  ($4)
+
+
+. 0 2547CC2C 24
+. 83 5B 00 18 83 97 00 04 1E 1A 00 18 7F 5C CA 14 7F B0 A0 2E 48 00 00 1C
+
+==== BB 929 (0x2547CC5C) approx BBs exec'd 0 ====
+
+	0x2547CC5C:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547CC60:  409AFFE4  bc 4,26,0x2547CC44
+	   4: Jc26o       	$0x2547CC44
+
+
+. 0 2547CC5C 8
+. 2F 1D 00 00 40 9A FF E4
+
+==== BB 930 (0x2547CC44) approx BBs exec'd 0 ====
+
+	0x2547CC44:  7FA4EB78  or r4,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547CC48:  7F43D378  or r3,r26,r26
+	   3: GETL       	R26, t2
+	   4: PUTL       	t2, R3
+	   5: INCEIPL       	$4
+
+	0x2547CC4C:  4BFFFA89  bl 0x2547C6D4
+	   6: MOVL       	$0x2547CC50, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x2547C6D4  ($4)
+
+
+. 0 2547CC44 12
+. 7F A4 EB 78 7F 43 D3 78 4B FF FA 89
+
+==== BB 931 (0x2547CC50) approx BBs exec'd 0 ====
+
+	0x2547CC50:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547CC54:  409E0220  bc 4,30,0x2547CE74
+	   4: Jc30o       	$0x2547CE74
+
+
+. 0 2547CC50 8
+. 2F 83 00 00 40 9E 02 20
+
+==== BB 932 (0x2547CC58) approx BBs exec'd 0 ====
+
+	0x2547CC58:  83BD000C  lwz r29,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547CC5C:  2F1D0000  cmpi cr6,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547CC60:  409AFFE4  bc 4,26,0x2547CC44
+	   9: Jc26o       	$0x2547CC44
+
+
+. 0 2547CC58 12
+. 83 BD 00 0C 2F 1D 00 00 40 9A FF E4
+
+==== BB 933 (0x2547CE74) approx BBs exec'd 0 ====
+
+	0x2547CE74:  7FBCEB78  or r28,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x2547CE78:  4192FE30  bc 12,18,0x2547CCA8
+	   3: Js18o       	$0x2547CCA8
+
+
+. 0 2547CE74 8
+. 7F BC EB 78 41 92 FE 30
+
+==== BB 934 (0x2547CCA8) approx BBs exec'd 0 ====
+
+	0x2547CCA8:  80970008  lwz r4,8(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547CCAC:  7FA4BA14  add r29,r4,r23
+	   5: GETL       	R4, t4
+	   6: GETL       	R23, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547CCB0:  7E7A9B78  or r26,r19,r19
+	  10: GETL       	R19, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547CCB4:  811B0004  lwz r8,4(r27)
+	  13: GETL       	R27, t10
+	  14: ADDL       	$0x4, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R8
+	  17: INCEIPL       	$4
+
+	0x2547CCB8:  7EA7AB78  or r7,r21,r21
+	  18: GETL       	R21, t14
+	  19: PUTL       	t14, R7
+	  20: INCEIPL       	$4
+
+	0x2547CCBC:  88A80000  lbz r5,0(r8)
+	  21: GETL       	R8, t16
+	  22: LDB       	(t16), t18
+	  23: PUTL       	t18, R5
+	  24: INCEIPL       	$4
+
+	0x2547CCC0:  7D034378  or r3,r8,r8
+	  25: GETL       	R8, t20
+	  26: PUTL       	t20, R3
+	  27: INCEIPL       	$4
+
+	0x2547CCC4:  2C850000  cmpi cr1,r5,0
+	  28: GETL       	R5, t22
+	  29: CMP0L       	t22, t24  (-rSo)
+	  30: ICRFL       	t24, $0x1, CR
+	  31: INCEIPL       	$4
+
+	0x2547CCC8:  4086000C  bc 4,6,0x2547CCD4
+	  32: Jc06o       	$0x2547CCD4
+
+
+. 0 2547CCA8 36
+. 80 97 00 08 7F A4 BA 14 7E 7A 9B 78 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+
+==== BB 935 (0x2547CCCC) approx BBs exec'd 0 ====
+
+	0x2547CCCC:  80DA0000  lwz r6,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x2547CCD0:  80660000  lwz r3,0(r6)
+	   4: GETL       	R6, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R3
+	   7: INCEIPL       	$4
+
+	0x2547CCD4:  815D0008  lwz r10,8(r29)
+	   8: GETL       	R29, t8
+	   9: ADDL       	$0x8, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R10
+	  12: INCEIPL       	$4
+
+	0x2547CCD8:  A21D0004  lhz r16,4(r29)
+	  13: GETL       	R29, t12
+	  14: ADDL       	$0x4, t12
+	  15: LDW       	(t12), t14
+	  16: PUTL       	t14, R16
+	  17: INCEIPL       	$4
+
+	0x2547CCDC:  809D0000  lwz r4,0(r29)
+	  18: GETL       	R29, t16
+	  19: LDL       	(t16), t18
+	  20: PUTL       	t18, R4
+	  21: INCEIPL       	$4
+
+	0x2547CCE0:  7CAACA14  add r5,r10,r25
+	  22: GETL       	R10, t20
+	  23: GETL       	R25, t22
+	  24: ADDL       	t20, t22
+	  25: PUTL       	t22, R5
+	  26: INCEIPL       	$4
+
+	0x2547CCE4:  80DC0014  lwz r6,20(r28)
+	  27: GETL       	R28, t24
+	  28: ADDL       	$0x14, t24
+	  29: LDL       	(t24), t26
+	  30: PUTL       	t26, R6
+	  31: INCEIPL       	$4
+
+	0x2547CCE8:  560807BC  rlwinm r8,r16,0,30,30
+	  32: GETL       	R16, t28
+	  33: ANDL       	$0x2, t28
+	  34: PUTL       	t28, R8
+	  35: INCEIPL       	$4
+
+	0x2547CCEC:  4BFFFA85  bl 0x2547C770
+	  36: MOVL       	$0x2547CCF0, t30
+	  37: PUTL       	t30, LR
+	  38: JMPo-c       	$0x2547C770  ($4)
+
+
+. 0 2547CCCC 36
+. 80 DA 00 00 80 66 00 00 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+
+==== BB 936 match_symbol(0x2547C770) approx BBs exec'd 0 ====
+
+	0x2547C770:  9421FF20  stwu r1,-224(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFF20, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547C774:  7D4802A6  mflr r10
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547C778:  4801A889  bl 0x25497000
+	   9: MOVL       	$0x2547C77C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547C770 12
+. 94 21 FF 20 7D 48 02 A6 48 01 A8 89
+
+==== BB 937 (0x2547C77C) approx BBs exec'd 0 ====
+
+	0x2547C77C:  93C100D8  stw r30,216(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xD8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547C780:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547C784:  926100AC  stw r19,172(r1)
+	   8: GETL       	R19, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xAC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547C788:  914100E4  stw r10,228(r1)
+	  13: GETL       	R10, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0xE4, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547C78C:  3A600000  li r19,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R19
+	  20: INCEIPL       	$4
+
+	0x2547C790:  81660034  lwz r11,52(r6)
+	  21: GETL       	R6, t16
+	  22: ADDL       	$0x34, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R11
+	  25: INCEIPL       	$4
+
+	0x2547C794:  813E04F4  lwz r9,1268(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4F4, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R9
+	  30: INCEIPL       	$4
+
+	0x2547C798:  92C100B8  stw r22,184(r1)
+	  31: GETL       	R22, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0xB8, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547C79C:  7CF63B78  or r22,r7,r7
+	  36: GETL       	R7, t28
+	  37: PUTL       	t28, R22
+	  38: INCEIPL       	$4
+
+	0x2547C7A0:  80090000  lwz r0,0(r9)
+	  39: GETL       	R9, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R0
+	  42: INCEIPL       	$4
+
+	0x2547C7A4:  92E100BC  stw r23,188(r1)
+	  43: GETL       	R23, t34
+	  44: GETL       	R1, t36
+	  45: ADDL       	$0xBC, t36
+	  46: STL       	t34, (t36)
+	  47: INCEIPL       	$4
+
+	0x2547C7A8:  7D174378  or r23,r8,r8
+	  48: GETL       	R8, t38
+	  49: PUTL       	t38, R23
+	  50: INCEIPL       	$4
+
+	0x2547C7AC:  70090010  andi. r9,r0,0x10
+	  51: GETL       	R0, t40
+	  52: ANDL       	$0x10, t40
+	  53: PUTL       	t40, R9
+	  54: CMP0L       	t40, t42  (-rSo)
+	  55: ICRFL       	t42, $0x0, CR
+	  56: INCEIPL       	$4
+
+	0x2547C7B0:  930100C0  stw r24,192(r1)
+	  57: GETL       	R24, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0xC0, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0x2547C7B4:  932100C4  stw r25,196(r1)
+	  62: GETL       	R25, t48
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0xC4, t50
+	  65: STL       	t48, (t50)
+	  66: INCEIPL       	$4
+
+	0x2547C7B8:  7C781B78  or r24,r3,r3
+	  67: GETL       	R3, t52
+	  68: PUTL       	t52, R24
+	  69: INCEIPL       	$4
+
+	0x2547C7BC:  936100CC  stw r27,204(r1)
+	  70: GETL       	R27, t54
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0xCC, t56
+	  73: STL       	t54, (t56)
+	  74: INCEIPL       	$4
+
+	0x2547C7C0:  7CD93378  or r25,r6,r6
+	  75: GETL       	R6, t58
+	  76: PUTL       	t58, R25
+	  77: INCEIPL       	$4
+
+	0x2547C7C4:  938100D0  stw r28,208(r1)
+	  78: GETL       	R28, t60
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0xD0, t62
+	  81: STL       	t60, (t62)
+	  82: INCEIPL       	$4
+
+	0x2547C7C8:  7C9B2378  or r27,r4,r4
+	  83: GETL       	R4, t64
+	  84: PUTL       	t64, R27
+	  85: INCEIPL       	$4
+
+	0x2547C7CC:  93E100DC  stw r31,220(r1)
+	  86: GETL       	R31, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0xDC, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0x2547C7D0:  7CBC2B78  or r28,r5,r5
+	  91: GETL       	R5, t70
+	  92: PUTL       	t70, R28
+	  93: INCEIPL       	$4
+
+	0x2547C7D4:  928100B0  stw r20,176(r1)
+	  94: GETL       	R20, t72
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0xB0, t74
+	  97: STL       	t72, (t74)
+	  98: INCEIPL       	$4
+
+	0x2547C7D8:  7C3F0B78  or r31,r1,r1
+	  99: GETL       	R1, t76
+	 100: PUTL       	t76, R31
+	 101: INCEIPL       	$4
+
+	0x2547C7DC:  92A100B4  stw r21,180(r1)
+	 102: GETL       	R21, t78
+	 103: GETL       	R1, t80
+	 104: ADDL       	$0xB4, t80
+	 105: STL       	t78, (t80)
+	 106: INCEIPL       	$4
+
+	0x2547C7E0:  934100C8  stw r26,200(r1)
+	 107: GETL       	R26, t82
+	 108: GETL       	R1, t84
+	 109: ADDL       	$0xC8, t84
+	 110: STL       	t82, (t84)
+	 111: INCEIPL       	$4
+
+	0x2547C7E4:  93A100D4  stw r29,212(r1)
+	 112: GETL       	R29, t86
+	 113: GETL       	R1, t88
+	 114: ADDL       	$0xD4, t88
+	 115: STL       	t86, (t88)
+	 116: INCEIPL       	$4
+
+	0x2547C7E8:  834B0004  lwz r26,4(r11)
+	 117: GETL       	R11, t90
+	 118: ADDL       	$0x4, t90
+	 119: LDL       	(t90), t92
+	 120: PUTL       	t92, R26
+	 121: INCEIPL       	$4
+
+	0x2547C7EC:  40820230  bc 4,2,0x2547CA1C
+	 122: Jc02o       	$0x2547CA1C
+
+
+. 0 2547C77C 116
+. 93 C1 00 D8 7F C8 02 A6 92 61 00 AC 91 41 00 E4 3A 60 00 00 81 66 00 34 81 3E 04 F4 92 C1 00 B8 7C F6 3B 78 80 09 00 00 92 E1 00 BC 7D 17 43 78 70 09 00 10 93 01 00 C0 93 21 00 C4 7C 78 1B 78 93 61 00 CC 7C D9 33 78 93 81 00 D0 7C 9B 23 78 93 E1 00 DC 7C BC 2B 78 92 81 00 B0 7C 3F 0B 78 92 A1 00 B4 93 41 00 C8 93 A1 00 D4 83 4B 00 04 40 82 02 30
+
+==== BB 938 (0x2547C7F0) approx BBs exec'd 0 ====
+
+	0x2547C7F0:  813900B4  lwz r9,180(r25)
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0xB4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547C7F4:  2C890000  cmpi cr1,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547C7F8:  41860264  bc 12,6,0x2547CA5C
+	   9: Js06o       	$0x2547CA5C
+
+
+. 0 2547C7F0 12
+. 81 39 00 B4 2C 89 00 00 41 86 02 64
+
+==== BB 939 (0x2547C7FC) approx BBs exec'd 0 ====
+
+	0x2547C7FC:  81690004  lwz r11,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547C800:  3A9F0008  addi r20,r31,8
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R20
+	   8: INCEIPL       	$4
+
+	0x2547C804:  80190000  lwz r0,0(r25)
+	   9: GETL       	R25, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x2547C808:  82BE0358  lwz r21,856(r30)
+	  13: GETL       	R30, t10
+	  14: ADDL       	$0x358, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R21
+	  17: INCEIPL       	$4
+
+	0x2547C80C:  7FA05A14  add r29,r0,r11
+	  18: GETL       	R0, t14
+	  19: GETL       	R11, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R29
+	  22: INCEIPL       	$4
+
+	0x2547C810:  A13D0000  lhz r9,0(r29)
+	  23: GETL       	R29, t18
+	  24: LDW       	(t18), t20
+	  25: PUTL       	t20, R9
+	  26: INCEIPL       	$4
+
+	0x2547C814:  7F83E378  or r3,r28,r28
+	  27: GETL       	R28, t22
+	  28: PUTL       	t22, R3
+	  29: INCEIPL       	$4
+
+	0x2547C818:  2F890001  cmpi cr7,r9,1
+	  30: GETL       	R9, t24
+	  31: MOVL       	$0x1, t28
+	  32: CMPL       	t24, t28, t26  (-rSo)
+	  33: ICRFL       	t26, $0x7, CR
+	  34: INCEIPL       	$4
+
+	0x2547C81C:  409E0124  bc 4,30,0x2547C940
+	  35: Jc30o       	$0x2547C940
+
+
+. 0 2547C7FC 36
+. 81 69 00 04 3A 9F 00 08 80 19 00 00 82 BE 03 58 7F A0 5A 14 A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+
+==== BB 940 (0x2547C820) approx BBs exec'd 0 ====
+
+	0x2547C820:  809D0008  lwz r4,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547C824:  7C84D800  cmp cr1,r4,r27
+	   5: GETL       	R4, t4
+	   6: GETL       	R27, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x2547C828:  418600B4  bc 12,6,0x2547C8DC
+	  10: Js06o       	$0x2547C8DC
+
+
+. 0 2547C820 12
+. 80 9D 00 08 7C 84 D8 00 41 86 00 B4
+
+==== BB 941 (0x2547C82C) approx BBs exec'd 0 ====
+
+	0x2547C82C:  80FD0010  lwz r7,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547C830:  2F870000  cmpi cr7,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547C834:  7FBD3A14  add r29,r29,r7
+	   9: GETL       	R29, t8
+	  10: GETL       	R7, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0x2547C838:  409EFFD8  bc 4,30,0x2547C810
+	  14: Jc30o       	$0x2547C810
+
+
+. 0 2547C82C 16
+. 80 FD 00 10 2F 87 00 00 7F BD 3A 14 40 9E FF D8
+
+==== BB 942 (0x2547C810) approx BBs exec'd 0 ====
+
+	0x2547C810:  A13D0000  lhz r9,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDW       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547C814:  7F83E378  or r3,r28,r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, R3
+	   6: INCEIPL       	$4
+
+	0x2547C818:  2F890001  cmpi cr7,r9,1
+	   7: GETL       	R9, t6
+	   8: MOVL       	$0x1, t10
+	   9: CMPL       	t6, t10, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2547C81C:  409E0124  bc 4,30,0x2547C940
+	  12: Jc30o       	$0x2547C940
+
+
+. 0 2547C810 16
+. A1 3D 00 00 7F 83 E3 78 2F 89 00 01 40 9E 01 24
+
+==== BB 943 (0x2547C8DC) approx BBs exec'd 0 ====
+
+	0x2547C8DC:  80BD000C  lwz r5,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547C8E0:  7CC5E82E  lwzx r6,r5,r29
+	   5: GETL       	R29, t4
+	   6: GETL       	R5, t6
+	   7: ADDL       	t6, t4
+	   8: LDL       	(t4), t8
+	   9: PUTL       	t8, R6
+	  10: INCEIPL       	$4
+
+	0x2547C8E4:  7C86D214  add r4,r6,r26
+	  11: GETL       	R6, t10
+	  12: GETL       	R26, t12
+	  13: ADDL       	t10, t12
+	  14: PUTL       	t12, R4
+	  15: INCEIPL       	$4
+
+	0x2547C8E8:  480065F9  bl 0x25482EE0
+	  16: MOVL       	$0x2547C8EC, t14
+	  17: PUTL       	t14, LR
+	  18: JMPo-c       	$0x25482EE0  ($4)
+
+
+. 0 2547C8DC 16
+. 80 BD 00 0C 7C C5 E8 2E 7C 86 D2 14 48 00 65 F9
+
+==== BB 944 (0x2547C8EC) approx BBs exec'd 0 ====
+
+	0x2547C8EC:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547C8F0:  40BAFF3C  bc 5,26,0x2547C82C
+	   4: Jc26o       	$0x2547C82C
+
+
+. 0 2547C8EC 8
+. 2F 03 00 00 40 BA FF 3C
+
+==== BB 945 (0x2547C8F4) approx BBs exec'd 0 ====
+
+	0x2547C8F4:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547C8F8:  80C10000  lwz r6,0(r1)
+	   3: GETL       	R1, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R6
+	   6: INCEIPL       	$4
+
+	0x2547C8FC:  80E60004  lwz r7,4(r6)
+	   7: GETL       	R6, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R7
+	  11: INCEIPL       	$4
+
+	0x2547C900:  8266FFCC  lwz r19,-52(r6)
+	  12: GETL       	R6, t10
+	  13: ADDL       	$0xFFFFFFCC, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R19
+	  16: INCEIPL       	$4
+
+	0x2547C904:  8286FFD0  lwz r20,-48(r6)
+	  17: GETL       	R6, t14
+	  18: ADDL       	$0xFFFFFFD0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R20
+	  21: INCEIPL       	$4
+
+	0x2547C908:  7CE803A6  mtlr r7
+	  22: GETL       	R7, t18
+	  23: PUTL       	t18, LR
+	  24: INCEIPL       	$4
+
+	0x2547C90C:  82A6FFD4  lwz r21,-44(r6)
+	  25: GETL       	R6, t20
+	  26: ADDL       	$0xFFFFFFD4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R21
+	  29: INCEIPL       	$4
+
+	0x2547C910:  82C6FFD8  lwz r22,-40(r6)
+	  30: GETL       	R6, t24
+	  31: ADDL       	$0xFFFFFFD8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R22
+	  34: INCEIPL       	$4
+
+	0x2547C914:  82E6FFDC  lwz r23,-36(r6)
+	  35: GETL       	R6, t28
+	  36: ADDL       	$0xFFFFFFDC, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R23
+	  39: INCEIPL       	$4
+
+	0x2547C918:  8306FFE0  lwz r24,-32(r6)
+	  40: GETL       	R6, t32
+	  41: ADDL       	$0xFFFFFFE0, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R24
+	  44: INCEIPL       	$4
+
+	0x2547C91C:  8326FFE4  lwz r25,-28(r6)
+	  45: GETL       	R6, t36
+	  46: ADDL       	$0xFFFFFFE4, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R25
+	  49: INCEIPL       	$4
+
+	0x2547C920:  8346FFE8  lwz r26,-24(r6)
+	  50: GETL       	R6, t40
+	  51: ADDL       	$0xFFFFFFE8, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R26
+	  54: INCEIPL       	$4
+
+	0x2547C924:  8366FFEC  lwz r27,-20(r6)
+	  55: GETL       	R6, t44
+	  56: ADDL       	$0xFFFFFFEC, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R27
+	  59: INCEIPL       	$4
+
+	0x2547C928:  8386FFF0  lwz r28,-16(r6)
+	  60: GETL       	R6, t48
+	  61: ADDL       	$0xFFFFFFF0, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R28
+	  64: INCEIPL       	$4
+
+	0x2547C92C:  83A6FFF4  lwz r29,-12(r6)
+	  65: GETL       	R6, t52
+	  66: ADDL       	$0xFFFFFFF4, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R29
+	  69: INCEIPL       	$4
+
+	0x2547C930:  83C6FFF8  lwz r30,-8(r6)
+	  70: GETL       	R6, t56
+	  71: ADDL       	$0xFFFFFFF8, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R30
+	  74: INCEIPL       	$4
+
+	0x2547C934:  83E6FFFC  lwz r31,-4(r6)
+	  75: GETL       	R6, t60
+	  76: ADDL       	$0xFFFFFFFC, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R31
+	  79: INCEIPL       	$4
+
+	0x2547C938:  7CC13378  or r1,r6,r6
+	  80: GETL       	R6, t64
+	  81: PUTL       	t64, R1
+	  82: INCEIPL       	$4
+
+	0x2547C93C:  4E800020  blr
+	  83: GETL       	LR, t66
+	  84: JMPo-r       	t66  ($4)
+
+
+. 0 2547C8F4 76
+. 38 60 00 00 80 C1 00 00 80 E6 00 04 82 66 FF CC 82 86 FF D0 7C E8 03 A6 82 A6 FF D4 82 C6 FF D8 82 E6 FF DC 83 06 FF E0 83 26 FF E4 83 46 FF E8 83 66 FF EC 83 86 FF F0 83 A6 FF F4 83 C6 FF F8 83 E6 FF FC 7C C1 33 78 4E 80 00 20
+
+==== BB 946 (0x2547CCF0) approx BBs exec'd 0 ====
+
+	0x2547CCF0:  A17D0006  lhz r11,6(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x6, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547CCF4:  7ED61B78  or r22,r22,r3
+	   5: GETL       	R22, t4
+	   6: GETL       	R3, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R22
+	   9: INCEIPL       	$4
+
+	0x2547CCF8:  5560047E  rlwinm r0,r11,0,17,31
+	  10: GETL       	R11, t8
+	  11: ANDL       	$0x7FFF, t8
+	  12: PUTL       	t8, R0
+	  13: INCEIPL       	$4
+
+	0x2547CCFC:  7F00C040  cmpl cr6,r0,r24
+	  14: GETL       	R0, t10
+	  15: GETL       	R24, t12
+	  16: CMPUL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0x2547CD00:  40990008  bc 4,25,0x2547CD08
+	  19: Jc25o       	$0x2547CD08
+
+
+. 0 2547CCF0 20
+. A1 7D 00 06 7E D6 1B 78 55 60 04 7E 7F 00 C0 40 40 99 00 08
+
+==== BB 947 (0x2547CD04) approx BBs exec'd 0 ====
+
+	0x2547CD04:  7C180378  or r24,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x2547CD08:  807D000C  lwz r3,12(r29)
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547CD0C:  2F830000  cmpi cr7,r3,0
+	   8: GETL       	R3, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2547CD10:  7FBD1A14  add r29,r29,r3
+	  12: GETL       	R29, t10
+	  13: GETL       	R3, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R29
+	  16: INCEIPL       	$4
+
+	0x2547CD14:  409EFFA0  bc 4,30,0x2547CCB4
+	  17: Jc30o       	$0x2547CCB4
+
+
+. 0 2547CD04 20
+. 7C 18 03 78 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+
+==== BB 948 (0x2547CD18) approx BBs exec'd 0 ====
+
+	0x2547CD18:  8017000C  lwz r0,12(r23)
+	   0: GETL       	R23, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547CD1C:  2C000000  cmpi cr0,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547CD20:  4182000C  bc 12,2,0x2547CD2C
+	   9: Js02o       	$0x2547CD2C
+
+
+. 0 2547CD18 12
+. 80 17 00 0C 2C 00 00 00 41 82 00 0C
+
+==== BB 949 (0x2547CD2C) approx BBs exec'd 0 ====
+
+	0x2547CD2C:  2E110000  cmpi cr4,r17,0
+	   0: GETL       	R17, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x2547CD30:  41920034  bc 12,18,0x2547CD64
+	   4: Js18o       	$0x2547CD64
+
+
+. 0 2547CD2C 8
+. 2E 11 00 00 41 92 00 34
+
+==== BB 950 (0x2547CD64) approx BBs exec'd 0 ====
+
+	0x2547CD64:  2F980000  cmpi cr7,r24,0
+	   0: GETL       	R24, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547CD68:  409E0064  bc 4,30,0x2547CDCC
+	   4: Jc30o       	$0x2547CDCC
+
+
+. 0 2547CD64 8
+. 2F 98 00 00 40 9E 00 64
+
+==== BB 951 (0x2547CDCC) approx BBs exec'd 0 ====
+
+	0x2547CDCC:  3BB80001  addi r29,r24,1
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547CDD0:  38800010  li r4,16
+	   4: MOVL       	$0x10, t2
+	   5: PUTL       	t2, R4
+	   6: INCEIPL       	$4
+
+	0x2547CDD4:  7FA3EB78  or r3,r29,r29
+	   7: GETL       	R29, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0x2547CDD8:  3A00000C  li r16,12
+	  10: MOVL       	$0xC, t6
+	  11: PUTL       	t6, R16
+	  12: INCEIPL       	$4
+
+	0x2547CDDC:  4801AC51  bl 0x25497A2C
+	  13: MOVL       	$0x2547CDE0, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0x25497A2C  ($4)
+
+
+. 0 2547CDCC 20
+. 3B B8 00 01 38 80 00 10 7F A3 EB 78 3A 00 00 0C 48 01 AC 51
+
+==== BB 952 (0x2547CDE0) approx BBs exec'd 0 ====
+
+	0x2547CDE0:  80DE0378  lwz r6,888(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x378, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547CDE4:  2C030000  cmpi cr0,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547CDE8:  7C651B78  or r5,r3,r3
+	   9: GETL       	R3, t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x2547CDEC:  907B0188  stw r3,392(r27)
+	  12: GETL       	R3, t10
+	  13: GETL       	R27, t12
+	  14: ADDL       	$0x188, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0x2547CDF0:  418201AC  bc 12,2,0x2547CF9C
+	  17: Js02o       	$0x2547CF9C
+
+
+. 0 2547CDE0 20
+. 80 DE 03 78 2C 03 00 00 7C 65 1B 78 90 7B 01 88 41 82 01 AC
+
+==== BB 953 (0x2547CDF4) approx BBs exec'd 0 ====
+
+	0x2547CDF4:  839B00E4  lwz r28,228(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xE4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547CDF8:  93BB0184  stw r29,388(r27)
+	   5: GETL       	R29, t4
+	   6: GETL       	R27, t6
+	   7: ADDL       	$0x184, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547CDFC:  831C0004  lwz r24,4(r28)
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R24
+	  14: INCEIPL       	$4
+
+	0x2547CE00:  931B0198  stw r24,408(r27)
+	  15: GETL       	R24, t12
+	  16: GETL       	R27, t14
+	  17: ADDL       	$0x198, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x2547CE04:  418E0088  bc 12,14,0x2547CE8C
+	  20: Js14o       	$0x2547CE8C
+
+
+. 0 2547CDF4 20
+. 83 9B 00 E4 93 BB 01 84 83 1C 00 04 93 1B 01 98 41 8E 00 88
+
+==== BB 954 (0x2547CE08) approx BBs exec'd 0 ====
+
+	0x2547CE08:  83520004  lwz r26,4(r18)
+	   0: GETL       	R18, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547CE0C:  825B0000  lwz r18,0(r27)
+	   5: GETL       	R27, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R18
+	   8: INCEIPL       	$4
+
+	0x2547CE10:  7CD2D214  add r6,r18,r26
+	   9: GETL       	R18, t8
+	  10: GETL       	R26, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0x2547CE14:  81860008  lwz r12,8(r6)
+	  14: GETL       	R6, t12
+	  15: ADDL       	$0x8, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R12
+	  18: INCEIPL       	$4
+
+	0x2547CE18:  7CEC3214  add r7,r12,r6
+	  19: GETL       	R12, t16
+	  20: GETL       	R6, t18
+	  21: ADDL       	t16, t18
+	  22: PUTL       	t18, R7
+	  23: INCEIPL       	$4
+
+	0x2547CE1C:  A2A70006  lhz r21,6(r7)
+	  24: GETL       	R7, t20
+	  25: ADDL       	$0x6, t20
+	  26: LDW       	(t20), t22
+	  27: PUTL       	t22, R21
+	  28: INCEIPL       	$4
+
+	0x2547CE20:  80670000  lwz r3,0(r7)
+	  29: GETL       	R7, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R3
+	  32: INCEIPL       	$4
+
+	0x2547CE24:  56A42376  rlwinm r4,r21,4,13,27
+	  33: GETL       	R21, t28
+	  34: ROLL       	$0x4, t28
+	  35: ANDL       	$0x7FFF0, t28
+	  36: PUTL       	t28, R4
+	  37: INCEIPL       	$4
+
+	0x2547CE28:  56B00420  rlwinm r16,r21,0,16,16
+	  38: GETL       	R21, t30
+	  39: ANDL       	$0x8000, t30
+	  40: PUTL       	t30, R16
+	  41: INCEIPL       	$4
+
+	0x2547CE2C:  7FA42A14  add r29,r4,r5
+	  42: GETL       	R4, t32
+	  43: GETL       	R5, t34
+	  44: ADDL       	t32, t34
+	  45: PUTL       	t34, R29
+	  46: INCEIPL       	$4
+
+	0x2547CE30:  907D0004  stw r3,4(r29)
+	  47: GETL       	R3, t36
+	  48: GETL       	R29, t38
+	  49: ADDL       	$0x4, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547CE34:  921D0008  stw r16,8(r29)
+	  52: GETL       	R16, t40
+	  53: GETL       	R29, t42
+	  54: ADDL       	$0x8, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0x2547CE38:  8167000C  lwz r11,12(r7)
+	  57: GETL       	R7, t44
+	  58: ADDL       	$0xC, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R11
+	  61: INCEIPL       	$4
+
+	0x2547CE3C:  81470008  lwz r10,8(r7)
+	  62: GETL       	R7, t48
+	  63: ADDL       	$0x8, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R10
+	  66: INCEIPL       	$4
+
+	0x2547CE40:  2D8B0000  cmpi cr3,r11,0
+	  67: GETL       	R11, t52
+	  68: CMP0L       	t52, t54  (-rSo)
+	  69: ICRFL       	t54, $0x3, CR
+	  70: INCEIPL       	$4
+
+	0x2547CE44:  81060004  lwz r8,4(r6)
+	  71: GETL       	R6, t56
+	  72: ADDL       	$0x4, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R8
+	  75: INCEIPL       	$4
+
+	0x2547CE48:  7D2ACA14  add r9,r10,r25
+	  76: GETL       	R10, t60
+	  77: GETL       	R25, t62
+	  78: ADDL       	t60, t62
+	  79: PUTL       	t62, R9
+	  80: INCEIPL       	$4
+
+	0x2547CE4C:  7CE75A14  add r7,r7,r11
+	  81: GETL       	R7, t64
+	  82: GETL       	R11, t66
+	  83: ADDL       	t64, t66
+	  84: PUTL       	t66, R7
+	  85: INCEIPL       	$4
+
+	0x2547CE50:  7C08CA14  add r0,r8,r25
+	  86: GETL       	R8, t68
+	  87: GETL       	R25, t70
+	  88: ADDL       	t68, t70
+	  89: PUTL       	t70, R0
+	  90: INCEIPL       	$4
+
+	0x2547CE54:  7D24292E  stwx r9,r4,r5
+	  91: GETL       	R5, t72
+	  92: GETL       	R4, t74
+	  93: ADDL       	t74, t72
+	  94: GETL       	R9, t76
+	  95: STL       	t76, (t72)
+	  96: INCEIPL       	$4
+
+	0x2547CE58:  901D000C  stw r0,12(r29)
+	  97: GETL       	R0, t78
+	  98: GETL       	R29, t80
+	  99: ADDL       	$0xC, t80
+	 100: STL       	t78, (t80)
+	 101: INCEIPL       	$4
+
+	0x2547CE5C:  408EFFC0  bc 4,14,0x2547CE1C
+	 102: Jc14o       	$0x2547CE1C
+
+
+. 0 2547CE08 88
+. 83 52 00 04 82 5B 00 00 7C D2 D2 14 81 86 00 08 7C EC 32 14 A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+==== BB 955 (0x2547CE60) approx BBs exec'd 0 ====
+
+	0x2547CE60:  8006000C  lwz r0,12(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547CE64:  2C800000  cmpi cr1,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547CE68:  41860024  bc 12,6,0x2547CE8C
+	   9: Js06o       	$0x2547CE8C
+
+
+. 0 2547CE60 12
+. 80 06 00 0C 2C 80 00 00 41 86 00 24
+
+==== BB 956 (0x2547CE8C) approx BBs exec'd 0 ====
+
+	0x2547CE8C:  41B2FEE0  bc 13,18,0x2547CD6C
+	   0: Js18o       	$0x2547CD6C
+
+
+. 0 2547CE8C 4
+. 41 B2 FE E0
+
+==== BB 957 (0x2547CD6C) approx BBs exec'd 0 ====
+
+	0x2547CD6C:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547CD70:  80A10000  lwz r5,0(r1)
+	   3: GETL       	R1, t2
+	   4: LDL       	(t2), t4
+	   5: PUTL       	t4, R5
+	   6: INCEIPL       	$4
+
+	0x2547CD74:  82C50004  lwz r22,4(r5)
+	   7: GETL       	R5, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R22
+	  11: INCEIPL       	$4
+
+	0x2547CD78:  8185FFBC  lwz r12,-68(r5)
+	  12: GETL       	R5, t10
+	  13: ADDL       	$0xFFFFFFBC, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R12
+	  16: INCEIPL       	$4
+
+	0x2547CD7C:  7EC803A6  mtlr r22
+	  17: GETL       	R22, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0x2547CD80:  8205FFC0  lwz r16,-64(r5)
+	  20: GETL       	R5, t16
+	  21: ADDL       	$0xFFFFFFC0, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R16
+	  24: INCEIPL       	$4
+
+	0x2547CD84:  8225FFC4  lwz r17,-60(r5)
+	  25: GETL       	R5, t20
+	  26: ADDL       	$0xFFFFFFC4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R17
+	  29: INCEIPL       	$4
+
+	0x2547CD88:  7D818120  mtcrf 0x18,r12
+	  30: GETL       	R12, t24
+	  31: ICRFL       	t24, $0x3, CR
+	  32: ICRFL       	t24, $0x4, CR
+	  33: INCEIPL       	$4
+
+	0x2547CD8C:  8245FFC8  lwz r18,-56(r5)
+	  34: GETL       	R5, t26
+	  35: ADDL       	$0xFFFFFFC8, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R18
+	  38: INCEIPL       	$4
+
+	0x2547CD90:  8265FFCC  lwz r19,-52(r5)
+	  39: GETL       	R5, t30
+	  40: ADDL       	$0xFFFFFFCC, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R19
+	  43: INCEIPL       	$4
+
+	0x2547CD94:  8285FFD0  lwz r20,-48(r5)
+	  44: GETL       	R5, t34
+	  45: ADDL       	$0xFFFFFFD0, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R20
+	  48: INCEIPL       	$4
+
+	0x2547CD98:  82A5FFD4  lwz r21,-44(r5)
+	  49: GETL       	R5, t38
+	  50: ADDL       	$0xFFFFFFD4, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R21
+	  53: INCEIPL       	$4
+
+	0x2547CD9C:  82C5FFD8  lwz r22,-40(r5)
+	  54: GETL       	R5, t42
+	  55: ADDL       	$0xFFFFFFD8, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R22
+	  58: INCEIPL       	$4
+
+	0x2547CDA0:  82E5FFDC  lwz r23,-36(r5)
+	  59: GETL       	R5, t46
+	  60: ADDL       	$0xFFFFFFDC, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R23
+	  63: INCEIPL       	$4
+
+	0x2547CDA4:  8305FFE0  lwz r24,-32(r5)
+	  64: GETL       	R5, t50
+	  65: ADDL       	$0xFFFFFFE0, t50
+	  66: LDL       	(t50), t52
+	  67: PUTL       	t52, R24
+	  68: INCEIPL       	$4
+
+	0x2547CDA8:  8325FFE4  lwz r25,-28(r5)
+	  69: GETL       	R5, t54
+	  70: ADDL       	$0xFFFFFFE4, t54
+	  71: LDL       	(t54), t56
+	  72: PUTL       	t56, R25
+	  73: INCEIPL       	$4
+
+	0x2547CDAC:  8345FFE8  lwz r26,-24(r5)
+	  74: GETL       	R5, t58
+	  75: ADDL       	$0xFFFFFFE8, t58
+	  76: LDL       	(t58), t60
+	  77: PUTL       	t60, R26
+	  78: INCEIPL       	$4
+
+	0x2547CDB0:  8365FFEC  lwz r27,-20(r5)
+	  79: GETL       	R5, t62
+	  80: ADDL       	$0xFFFFFFEC, t62
+	  81: LDL       	(t62), t64
+	  82: PUTL       	t64, R27
+	  83: INCEIPL       	$4
+
+	0x2547CDB4:  8385FFF0  lwz r28,-16(r5)
+	  84: GETL       	R5, t66
+	  85: ADDL       	$0xFFFFFFF0, t66
+	  86: LDL       	(t66), t68
+	  87: PUTL       	t68, R28
+	  88: INCEIPL       	$4
+
+	0x2547CDB8:  83A5FFF4  lwz r29,-12(r5)
+	  89: GETL       	R5, t70
+	  90: ADDL       	$0xFFFFFFF4, t70
+	  91: LDL       	(t70), t72
+	  92: PUTL       	t72, R29
+	  93: INCEIPL       	$4
+
+	0x2547CDBC:  83C5FFF8  lwz r30,-8(r5)
+	  94: GETL       	R5, t74
+	  95: ADDL       	$0xFFFFFFF8, t74
+	  96: LDL       	(t74), t76
+	  97: PUTL       	t76, R30
+	  98: INCEIPL       	$4
+
+	0x2547CDC0:  83E5FFFC  lwz r31,-4(r5)
+	  99: GETL       	R5, t78
+	 100: ADDL       	$0xFFFFFFFC, t78
+	 101: LDL       	(t78), t80
+	 102: PUTL       	t80, R31
+	 103: INCEIPL       	$4
+
+	0x2547CDC4:  7CA12B78  or r1,r5,r5
+	 104: GETL       	R5, t82
+	 105: PUTL       	t82, R1
+	 106: INCEIPL       	$4
+
+	0x2547CDC8:  4E800020  blr
+	 107: GETL       	LR, t84
+	 108: JMPo-r       	t84  ($4)
+
+
+. 0 2547CD6C 96
+. 7E C3 B3 78 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+==== BB 958 (0x2547D034) approx BBs exec'd 0 ====
+
+	0x2547D034:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547D038:  41BEFFCC  bc 13,30,0x2547D004
+	   4: Js30o       	$0x2547D004
+
+
+. 0 2547D034 8
+. 2F 83 00 00 41 BE FF CC
+
+==== BB 959 (0x2547D004) approx BBs exec'd 0 ====
+
+	0x2547D004:  83FF000C  lwz r31,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0x2547D008:  7F9CEB78  or r28,r28,r29
+	   5: GETL       	R28, t4
+	   6: GETL       	R29, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R28
+	   9: INCEIPL       	$4
+
+	0x2547D00C:  2C9F0000  cmpi cr1,r31,0
+	  10: GETL       	R31, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547D010:  41860040  bc 12,6,0x2547D050
+	  14: Js06o       	$0x2547D050
+
+
+. 0 2547D004 16
+. 83 FF 00 0C 7F 9C EB 78 2C 9F 00 00 41 86 00 40
+
+==== BB 960 (0x2547CCD4) approx BBs exec'd 0 ====
+
+	0x2547CCD4:  815D0008  lwz r10,8(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547CCD8:  A21D0004  lhz r16,4(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDW       	(t4), t6
+	   8: PUTL       	t6, R16
+	   9: INCEIPL       	$4
+
+	0x2547CCDC:  809D0000  lwz r4,0(r29)
+	  10: GETL       	R29, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R4
+	  13: INCEIPL       	$4
+
+	0x2547CCE0:  7CAACA14  add r5,r10,r25
+	  14: GETL       	R10, t12
+	  15: GETL       	R25, t14
+	  16: ADDL       	t12, t14
+	  17: PUTL       	t14, R5
+	  18: INCEIPL       	$4
+
+	0x2547CCE4:  80DC0014  lwz r6,20(r28)
+	  19: GETL       	R28, t16
+	  20: ADDL       	$0x14, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R6
+	  23: INCEIPL       	$4
+
+	0x2547CCE8:  560807BC  rlwinm r8,r16,0,30,30
+	  24: GETL       	R16, t20
+	  25: ANDL       	$0x2, t20
+	  26: PUTL       	t20, R8
+	  27: INCEIPL       	$4
+
+	0x2547CCEC:  4BFFFA85  bl 0x2547C770
+	  28: MOVL       	$0x2547CCF0, t22
+	  29: PUTL       	t22, LR
+	  30: JMPo-c       	$0x2547C770  ($4)
+
+
+. 0 2547CCD4 28
+. 81 5D 00 08 A2 1D 00 04 80 9D 00 00 7C AA CA 14 80 DC 00 14 56 08 07 BC 4B FF FA 85
+
+==== BB 961 (0x2547CCB4) approx BBs exec'd 0 ====
+
+	0x2547CCB4:  811B0004  lwz r8,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547CCB8:  7EA7AB78  or r7,r21,r21
+	   5: GETL       	R21, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x2547CCBC:  88A80000  lbz r5,0(r8)
+	   8: GETL       	R8, t6
+	   9: LDB       	(t6), t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x2547CCC0:  7D034378  or r3,r8,r8
+	  12: GETL       	R8, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x2547CCC4:  2C850000  cmpi cr1,r5,0
+	  15: GETL       	R5, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x1, CR
+	  18: INCEIPL       	$4
+
+	0x2547CCC8:  4086000C  bc 4,6,0x2547CCD4
+	  19: Jc06o       	$0x2547CCD4
+
+
+. 0 2547CCB4 24
+. 81 1B 00 04 7E A7 AB 78 88 A8 00 00 7D 03 43 78 2C 85 00 00 40 86 00 0C
+
+==== BB 962 (0x2547CD08) approx BBs exec'd 0 ====
+
+	0x2547CD08:  807D000C  lwz r3,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547CD0C:  2F830000  cmpi cr7,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547CD10:  7FBD1A14  add r29,r29,r3
+	   9: GETL       	R29, t8
+	  10: GETL       	R3, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R29
+	  13: INCEIPL       	$4
+
+	0x2547CD14:  409EFFA0  bc 4,30,0x2547CCB4
+	  14: Jc30o       	$0x2547CCB4
+
+
+. 0 2547CD08 16
+. 80 7D 00 0C 2F 83 00 00 7F BD 1A 14 40 9E FF A0
+
+==== BB 963 (0x2547CE1C) approx BBs exec'd 0 ====
+
+	0x2547CE1C:  A2A70006  lhz r21,6(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0x6, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547CE20:  80670000  lwz r3,0(r7)
+	   5: GETL       	R7, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0x2547CE24:  56A42376  rlwinm r4,r21,4,13,27
+	   9: GETL       	R21, t8
+	  10: ROLL       	$0x4, t8
+	  11: ANDL       	$0x7FFF0, t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x2547CE28:  56B00420  rlwinm r16,r21,0,16,16
+	  14: GETL       	R21, t10
+	  15: ANDL       	$0x8000, t10
+	  16: PUTL       	t10, R16
+	  17: INCEIPL       	$4
+
+	0x2547CE2C:  7FA42A14  add r29,r4,r5
+	  18: GETL       	R4, t12
+	  19: GETL       	R5, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R29
+	  22: INCEIPL       	$4
+
+	0x2547CE30:  907D0004  stw r3,4(r29)
+	  23: GETL       	R3, t16
+	  24: GETL       	R29, t18
+	  25: ADDL       	$0x4, t18
+	  26: STL       	t16, (t18)
+	  27: INCEIPL       	$4
+
+	0x2547CE34:  921D0008  stw r16,8(r29)
+	  28: GETL       	R16, t20
+	  29: GETL       	R29, t22
+	  30: ADDL       	$0x8, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0x2547CE38:  8167000C  lwz r11,12(r7)
+	  33: GETL       	R7, t24
+	  34: ADDL       	$0xC, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R11
+	  37: INCEIPL       	$4
+
+	0x2547CE3C:  81470008  lwz r10,8(r7)
+	  38: GETL       	R7, t28
+	  39: ADDL       	$0x8, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R10
+	  42: INCEIPL       	$4
+
+	0x2547CE40:  2D8B0000  cmpi cr3,r11,0
+	  43: GETL       	R11, t32
+	  44: CMP0L       	t32, t34  (-rSo)
+	  45: ICRFL       	t34, $0x3, CR
+	  46: INCEIPL       	$4
+
+	0x2547CE44:  81060004  lwz r8,4(r6)
+	  47: GETL       	R6, t36
+	  48: ADDL       	$0x4, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R8
+	  51: INCEIPL       	$4
+
+	0x2547CE48:  7D2ACA14  add r9,r10,r25
+	  52: GETL       	R10, t40
+	  53: GETL       	R25, t42
+	  54: ADDL       	t40, t42
+	  55: PUTL       	t42, R9
+	  56: INCEIPL       	$4
+
+	0x2547CE4C:  7CE75A14  add r7,r7,r11
+	  57: GETL       	R7, t44
+	  58: GETL       	R11, t46
+	  59: ADDL       	t44, t46
+	  60: PUTL       	t46, R7
+	  61: INCEIPL       	$4
+
+	0x2547CE50:  7C08CA14  add r0,r8,r25
+	  62: GETL       	R8, t48
+	  63: GETL       	R25, t50
+	  64: ADDL       	t48, t50
+	  65: PUTL       	t50, R0
+	  66: INCEIPL       	$4
+
+	0x2547CE54:  7D24292E  stwx r9,r4,r5
+	  67: GETL       	R5, t52
+	  68: GETL       	R4, t54
+	  69: ADDL       	t54, t52
+	  70: GETL       	R9, t56
+	  71: STL       	t56, (t52)
+	  72: INCEIPL       	$4
+
+	0x2547CE58:  901D000C  stw r0,12(r29)
+	  73: GETL       	R0, t58
+	  74: GETL       	R29, t60
+	  75: ADDL       	$0xC, t60
+	  76: STL       	t58, (t60)
+	  77: INCEIPL       	$4
+
+	0x2547CE5C:  408EFFC0  bc 4,14,0x2547CE1C
+	  78: Jc14o       	$0x2547CE1C
+
+
+. 0 2547CE1C 68
+. A2 A7 00 06 80 67 00 00 56 A4 23 76 56 B0 04 20 7F A4 2A 14 90 7D 00 04 92 1D 00 08 81 67 00 0C 81 47 00 08 2D 8B 00 00 81 06 00 04 7D 2A CA 14 7C E7 5A 14 7C 08 CA 14 7D 24 29 2E 90 1D 00 0C 40 8E FF C0
+
+==== BB 964 (0x2547CD34) approx BBs exec'd 0 ====
+
+	0x2547CD34:  82BB0000  lwz r21,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R21
+	   3: INCEIPL       	$4
+
+	0x2547CD38:  82F10004  lwz r23,4(r17)
+	   4: GETL       	R17, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R23
+	   8: INCEIPL       	$4
+
+	0x2547CD3C:  7D35BA14  add r9,r21,r23
+	   9: GETL       	R21, t8
+	  10: GETL       	R23, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547CD40:  A2690004  lhz r19,4(r9)
+	  14: GETL       	R9, t12
+	  15: ADDL       	$0x4, t12
+	  16: LDW       	(t12), t14
+	  17: PUTL       	t14, R19
+	  18: INCEIPL       	$4
+
+	0x2547CD44:  5660047E  rlwinm r0,r19,0,17,31
+	  19: GETL       	R19, t16
+	  20: ANDL       	$0x7FFF, t16
+	  21: PUTL       	t16, R0
+	  22: INCEIPL       	$4
+
+	0x2547CD48:  7C80C040  cmpl cr1,r0,r24
+	  23: GETL       	R0, t18
+	  24: GETL       	R24, t20
+	  25: CMPUL       	t18, t20, t22  (-rSo)
+	  26: ICRFL       	t22, $0x1, CR
+	  27: INCEIPL       	$4
+
+	0x2547CD4C:  40850008  bc 4,5,0x2547CD54
+	  28: Jc05o       	$0x2547CD54
+
+
+. 0 2547CD34 28
+. 82 BB 00 00 82 F1 00 04 7D 35 BA 14 A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+
+==== BB 965 (0x2547CD54) approx BBs exec'd 0 ====
+
+	0x2547CD54:  82890010  lwz r20,16(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0x2547CD58:  2F140000  cmpi cr6,r20,0
+	   5: GETL       	R20, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547CD5C:  7D29A214  add r9,r9,r20
+	   9: GETL       	R9, t8
+	  10: GETL       	R20, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547CD60:  409AFFE0  bc 4,26,0x2547CD40
+	  14: Jc26o       	$0x2547CD40
+
+
+. 0 2547CD54 16
+. 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+
+==== BB 966 (0x2547CD40) approx BBs exec'd 0 ====
+
+	0x2547CD40:  A2690004  lhz r19,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0x2547CD44:  5660047E  rlwinm r0,r19,0,17,31
+	   5: GETL       	R19, t4
+	   6: ANDL       	$0x7FFF, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547CD48:  7C80C040  cmpl cr1,r0,r24
+	   9: GETL       	R0, t6
+	  10: GETL       	R24, t8
+	  11: CMPUL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x2547CD4C:  40850008  bc 4,5,0x2547CD54
+	  14: Jc05o       	$0x2547CD54
+
+
+. 0 2547CD40 16
+. A2 69 00 04 56 60 04 7E 7C 80 C0 40 40 85 00 08
+
+==== BB 967 (0x2547CE90) approx BBs exec'd 0 ====
+
+	0x2547CE90:  80DB0000  lwz r6,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R6
+	   3: INCEIPL       	$4
+
+	0x2547CE94:  38E00000  li r7,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R7
+	   6: INCEIPL       	$4
+
+	0x2547CE98:  83710004  lwz r27,4(r17)
+	   7: GETL       	R17, t6
+	   8: ADDL       	$0x4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0x2547CE9C:  7C66DA14  add r3,r6,r27
+	  12: GETL       	R6, t10
+	  13: GETL       	R27, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R3
+	  16: INCEIPL       	$4
+
+	0x2547CEA0:  A2230002  lhz r17,2(r3)
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x2, t14
+	  19: LDW       	(t14), t16
+	  20: PUTL       	t16, R17
+	  21: INCEIPL       	$4
+
+	0x2547CEA4:  8103000C  lwz r8,12(r3)
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0xC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R8
+	  26: INCEIPL       	$4
+
+	0x2547CEA8:  72290001  andi. r9,r17,0x1
+	  27: GETL       	R17, t22
+	  28: ANDL       	$0x1, t22
+	  29: PUTL       	t22, R9
+	  30: CMP0L       	t22, t24  (-rSo)
+	  31: ICRFL       	t24, $0x0, CR
+	  32: INCEIPL       	$4
+
+	0x2547CEAC:  40820028  bc 4,2,0x2547CED4
+	  33: Jc02o       	$0x2547CED4
+
+
+. 0 2547CE90 32
+. 80 DB 00 00 38 E0 00 00 83 71 00 04 7C 66 DA 14 A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+
+==== BB 968 (0x2547CED4) approx BBs exec'd 0 ====
+
+	0x2547CED4:  82430010  lwz r18,16(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x2547CED8:  2E120000  cmpi cr4,r18,0
+	   5: GETL       	R18, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547CEDC:  7C639214  add r3,r3,r18
+	   9: GETL       	R3, t8
+	  10: GETL       	R18, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0x2547CEE0:  4092FFC0  bc 4,18,0x2547CEA0
+	  14: Jc18o       	$0x2547CEA0
+
+
+. 0 2547CED4 16
+. 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+
+==== BB 969 (0x2547CEA0) approx BBs exec'd 0 ====
+
+	0x2547CEA0:  A2230002  lhz r17,2(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x2, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R17
+	   4: INCEIPL       	$4
+
+	0x2547CEA4:  8103000C  lwz r8,12(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R8
+	   9: INCEIPL       	$4
+
+	0x2547CEA8:  72290001  andi. r9,r17,0x1
+	  10: GETL       	R17, t8
+	  11: ANDL       	$0x1, t8
+	  12: PUTL       	t8, R9
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x2547CEAC:  40820028  bc 4,2,0x2547CED4
+	  16: Jc02o       	$0x2547CED4
+
+
+. 0 2547CEA0 16
+. A2 23 00 02 81 03 00 0C 72 29 00 01 40 82 00 28
+
+==== BB 970 (0x2547CEB0) approx BBs exec'd 0 ====
+
+	0x2547CEB0:  A3430004  lhz r26,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547CEB4:  83830008  lwz r28,8(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x2547CEB8:  57572376  rlwinm r23,r26,4,13,27
+	  10: GETL       	R26, t8
+	  11: ROLL       	$0x4, t8
+	  12: ANDL       	$0x7FFF0, t8
+	  13: PUTL       	t8, R23
+	  14: INCEIPL       	$4
+
+	0x2547CEBC:  7F172A14  add r24,r23,r5
+	  15: GETL       	R23, t10
+	  16: GETL       	R5, t12
+	  17: ADDL       	t10, t12
+	  18: PUTL       	t12, R24
+	  19: INCEIPL       	$4
+
+	0x2547CEC0:  93980004  stw r28,4(r24)
+	  20: GETL       	R28, t14
+	  21: GETL       	R24, t16
+	  22: ADDL       	$0x4, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x2547CEC4:  7E88182E  lwzx r20,r8,r3
+	  25: GETL       	R3, t18
+	  26: GETL       	R8, t20
+	  27: ADDL       	t20, t18
+	  28: LDL       	(t18), t22
+	  29: PUTL       	t22, R20
+	  30: INCEIPL       	$4
+
+	0x2547CEC8:  90F8000C  stw r7,12(r24)
+	  31: GETL       	R7, t24
+	  32: GETL       	R24, t26
+	  33: ADDL       	$0xC, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547CECC:  7E74CA14  add r19,r20,r25
+	  36: GETL       	R20, t28
+	  37: GETL       	R25, t30
+	  38: ADDL       	t28, t30
+	  39: PUTL       	t30, R19
+	  40: INCEIPL       	$4
+
+	0x2547CED0:  7E77292E  stwx r19,r23,r5
+	  41: GETL       	R5, t32
+	  42: GETL       	R23, t34
+	  43: ADDL       	t34, t32
+	  44: GETL       	R19, t36
+	  45: STL       	t36, (t32)
+	  46: INCEIPL       	$4
+
+	0x2547CED4:  82430010  lwz r18,16(r3)
+	  47: GETL       	R3, t38
+	  48: ADDL       	$0x10, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R18
+	  51: INCEIPL       	$4
+
+	0x2547CED8:  2E120000  cmpi cr4,r18,0
+	  52: GETL       	R18, t42
+	  53: CMP0L       	t42, t44  (-rSo)
+	  54: ICRFL       	t44, $0x4, CR
+	  55: INCEIPL       	$4
+
+	0x2547CEDC:  7C639214  add r3,r3,r18
+	  56: GETL       	R3, t46
+	  57: GETL       	R18, t48
+	  58: ADDL       	t46, t48
+	  59: PUTL       	t48, R3
+	  60: INCEIPL       	$4
+
+	0x2547CEE0:  4092FFC0  bc 4,18,0x2547CEA0
+	  61: Jc18o       	$0x2547CEA0
+
+
+. 0 2547CEB0 52
+. A3 43 00 04 83 83 00 08 57 57 23 76 7F 17 2A 14 93 98 00 04 7E 88 18 2E 90 F8 00 0C 7E 74 CA 14 7E 77 29 2E 82 43 00 10 2E 12 00 00 7C 63 92 14 40 92 FF C0
+
+==== BB 971 (0x2547CEE4) approx BBs exec'd 0 ====
+
+	0x2547CEE4:  7EC3B378  or r3,r22,r22
+	   0: GETL       	R22, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547CEE8:  4BFFFE88  b 0x2547CD70
+	   3: JMPo       	$0x2547CD70  ($4)
+
+
+. 0 2547CEE4 8
+. 7E C3 B3 78 4B FF FE 88
+
+==== BB 972 (0x2547CD70) approx BBs exec'd 0 ====
+
+	0x2547CD70:  80A10000  lwz r5,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x2547CD74:  82C50004  lwz r22,4(r5)
+	   4: GETL       	R5, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R22
+	   8: INCEIPL       	$4
+
+	0x2547CD78:  8185FFBC  lwz r12,-68(r5)
+	   9: GETL       	R5, t8
+	  10: ADDL       	$0xFFFFFFBC, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x2547CD7C:  7EC803A6  mtlr r22
+	  14: GETL       	R22, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x2547CD80:  8205FFC0  lwz r16,-64(r5)
+	  17: GETL       	R5, t14
+	  18: ADDL       	$0xFFFFFFC0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R16
+	  21: INCEIPL       	$4
+
+	0x2547CD84:  8225FFC4  lwz r17,-60(r5)
+	  22: GETL       	R5, t18
+	  23: ADDL       	$0xFFFFFFC4, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R17
+	  26: INCEIPL       	$4
+
+	0x2547CD88:  7D818120  mtcrf 0x18,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x3, CR
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0x2547CD8C:  8245FFC8  lwz r18,-56(r5)
+	  31: GETL       	R5, t24
+	  32: ADDL       	$0xFFFFFFC8, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R18
+	  35: INCEIPL       	$4
+
+	0x2547CD90:  8265FFCC  lwz r19,-52(r5)
+	  36: GETL       	R5, t28
+	  37: ADDL       	$0xFFFFFFCC, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R19
+	  40: INCEIPL       	$4
+
+	0x2547CD94:  8285FFD0  lwz r20,-48(r5)
+	  41: GETL       	R5, t32
+	  42: ADDL       	$0xFFFFFFD0, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R20
+	  45: INCEIPL       	$4
+
+	0x2547CD98:  82A5FFD4  lwz r21,-44(r5)
+	  46: GETL       	R5, t36
+	  47: ADDL       	$0xFFFFFFD4, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R21
+	  50: INCEIPL       	$4
+
+	0x2547CD9C:  82C5FFD8  lwz r22,-40(r5)
+	  51: GETL       	R5, t40
+	  52: ADDL       	$0xFFFFFFD8, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R22
+	  55: INCEIPL       	$4
+
+	0x2547CDA0:  82E5FFDC  lwz r23,-36(r5)
+	  56: GETL       	R5, t44
+	  57: ADDL       	$0xFFFFFFDC, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R23
+	  60: INCEIPL       	$4
+
+	0x2547CDA4:  8305FFE0  lwz r24,-32(r5)
+	  61: GETL       	R5, t48
+	  62: ADDL       	$0xFFFFFFE0, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R24
+	  65: INCEIPL       	$4
+
+	0x2547CDA8:  8325FFE4  lwz r25,-28(r5)
+	  66: GETL       	R5, t52
+	  67: ADDL       	$0xFFFFFFE4, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R25
+	  70: INCEIPL       	$4
+
+	0x2547CDAC:  8345FFE8  lwz r26,-24(r5)
+	  71: GETL       	R5, t56
+	  72: ADDL       	$0xFFFFFFE8, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R26
+	  75: INCEIPL       	$4
+
+	0x2547CDB0:  8365FFEC  lwz r27,-20(r5)
+	  76: GETL       	R5, t60
+	  77: ADDL       	$0xFFFFFFEC, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R27
+	  80: INCEIPL       	$4
+
+	0x2547CDB4:  8385FFF0  lwz r28,-16(r5)
+	  81: GETL       	R5, t64
+	  82: ADDL       	$0xFFFFFFF0, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R28
+	  85: INCEIPL       	$4
+
+	0x2547CDB8:  83A5FFF4  lwz r29,-12(r5)
+	  86: GETL       	R5, t68
+	  87: ADDL       	$0xFFFFFFF4, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R29
+	  90: INCEIPL       	$4
+
+	0x2547CDBC:  83C5FFF8  lwz r30,-8(r5)
+	  91: GETL       	R5, t72
+	  92: ADDL       	$0xFFFFFFF8, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R30
+	  95: INCEIPL       	$4
+
+	0x2547CDC0:  83E5FFFC  lwz r31,-4(r5)
+	  96: GETL       	R5, t76
+	  97: ADDL       	$0xFFFFFFFC, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R31
+	 100: INCEIPL       	$4
+
+	0x2547CDC4:  7CA12B78  or r1,r5,r5
+	 101: GETL       	R5, t80
+	 102: PUTL       	t80, R1
+	 103: INCEIPL       	$4
+
+	0x2547CDC8:  4E800020  blr
+	 104: GETL       	LR, t82
+	 105: JMPo-r       	t82  ($4)
+
+
+. 0 2547CD70 92
+. 80 A1 00 00 82 C5 00 04 81 85 FF BC 7E C8 03 A6 82 05 FF C0 82 25 FF C4 7D 81 81 20 82 45 FF C8 82 65 FF CC 82 85 FF D0 82 A5 FF D4 82 C5 FF D8 82 E5 FF DC 83 05 FF E0 83 25 FF E4 83 45 FF E8 83 65 FF EC 83 85 FF F0 83 A5 FF F4 83 C5 FF F8 83 E5 FF FC 7C A1 2B 78 4E 80 00 20
+
+==== BB 973 (0x2547CD50) approx BBs exec'd 0 ====
+
+	0x2547CD50:  7C180378  or r24,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x2547CD54:  82890010  lwz r20,16(r9)
+	   3: GETL       	R9, t2
+	   4: ADDL       	$0x10, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R20
+	   7: INCEIPL       	$4
+
+	0x2547CD58:  2F140000  cmpi cr6,r20,0
+	   8: GETL       	R20, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x2547CD5C:  7D29A214  add r9,r9,r20
+	  12: GETL       	R9, t10
+	  13: GETL       	R20, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R9
+	  16: INCEIPL       	$4
+
+	0x2547CD60:  409AFFE0  bc 4,26,0x2547CD40
+	  17: Jc26o       	$0x2547CD40
+
+
+. 0 2547CD50 20
+. 7C 18 03 78 82 89 00 10 2F 14 00 00 7D 29 A2 14 40 9A FF E0
+
+==== BB 974 (0x2547D050) approx BBs exec'd 0 ====
+
+	0x2547D050:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547D054:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547D058:  83410008  lwz r26,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x2547D05C:  8361000C  lwz r27,12(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x2547D060:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547D064:  83810010  lwz r28,16(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R28
+	  25: INCEIPL       	$4
+
+	0x2547D068:  83A10014  lwz r29,20(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R29
+	  30: INCEIPL       	$4
+
+	0x2547D06C:  83C10018  lwz r30,24(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x18, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R30
+	  35: INCEIPL       	$4
+
+	0x2547D070:  83E1001C  lwz r31,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0x2547D074:  38210020  addi r1,r1,32
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: PUTL       	t32, R1
+	  44: INCEIPL       	$4
+
+	0x2547D078:  4E800020  blr
+	  45: GETL       	LR, t34
+	  46: JMPo-r       	t34  ($4)
+
+
+. 0 2547D050 44
+. 80 81 00 24 7F 83 E3 78 83 41 00 08 83 61 00 0C 7C 88 03 A6 83 81 00 10 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 975 (0x2547180C) approx BBs exec'd 0 ====
+
+	0x2547180C:  2F830000  cmpi cr7,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25471810:  419E0014  bc 12,30,0x25471824
+	   4: Js30o       	$0x25471824
+
+
+. 0 2547180C 8
+. 2F 83 00 00 41 9E 00 14
+
+==== BB 976 (0x25471824) approx BBs exec'd 0 ====
+
+	0x25471824:  80610024  lwz r3,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25471828:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547182C:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x25471830:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x25471834:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x25471838:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0x2547183C:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+. 0 25471824 28
+. 80 61 00 24 83 A1 00 14 83 C1 00 18 7C 68 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 977 (0x2547B70C) approx BBs exec'd 0 ====
+
+	0x2547B70C:  935D0000  stw r26,0(r29)
+	   0: GETL       	R26, t0
+	   1: GETL       	R29, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547B710:  80010034  lwz r0,52(r1)
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0x34, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R0
+	   8: INCEIPL       	$4
+
+	0x2547B714:  937C0000  stw r27,0(r28)
+	   9: GETL       	R27, t8
+	  10: GETL       	R28, t10
+	  11: STL       	t8, (t10)
+	  12: INCEIPL       	$4
+
+	0x2547B718:  82E1000C  lwz r23,12(r1)
+	  13: GETL       	R1, t12
+	  14: ADDL       	$0xC, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R23
+	  17: INCEIPL       	$4
+
+	0x2547B71C:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t16
+	  19: PUTL       	t16, LR
+	  20: INCEIPL       	$4
+
+	0x2547B720:  83210014  lwz r25,20(r1)
+	  21: GETL       	R1, t18
+	  22: ADDL       	$0x14, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R25
+	  25: INCEIPL       	$4
+
+	0x2547B724:  83410018  lwz r26,24(r1)
+	  26: GETL       	R1, t22
+	  27: ADDL       	$0x18, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R26
+	  30: INCEIPL       	$4
+
+	0x2547B728:  8361001C  lwz r27,28(r1)
+	  31: GETL       	R1, t26
+	  32: ADDL       	$0x1C, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R27
+	  35: INCEIPL       	$4
+
+	0x2547B72C:  83810020  lwz r28,32(r1)
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x20, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R28
+	  40: INCEIPL       	$4
+
+	0x2547B730:  83A10024  lwz r29,36(r1)
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x24, t34
+	  43: LDL       	(t34), t36
+	  44: PUTL       	t36, R29
+	  45: INCEIPL       	$4
+
+	0x2547B734:  83C10028  lwz r30,40(r1)
+	  46: GETL       	R1, t38
+	  47: ADDL       	$0x28, t38
+	  48: LDL       	(t38), t40
+	  49: PUTL       	t40, R30
+	  50: INCEIPL       	$4
+
+	0x2547B738:  38210030  addi r1,r1,48
+	  51: GETL       	R1, t42
+	  52: ADDL       	$0x30, t42
+	  53: PUTL       	t42, R1
+	  54: INCEIPL       	$4
+
+	0x2547B73C:  4E800020  blr
+	  55: GETL       	LR, t44
+	  56: JMPo-r       	t44  ($4)
+
+
+. 0 2547B70C 52
+. 93 5D 00 00 80 01 00 34 93 7C 00 00 82 E1 00 0C 7C 08 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 38 21 00 30 4E 80 00 20
+
+==== BB 978 (0x25472900) approx BBs exec'd 0 ====
+
+	0x25472900:  836E03D8  lwz r27,984(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x3D8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25472904:  2E1B0000  cmpi cr4,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x25472908:  40920AEC  bc 4,18,0x254733F4
+	   9: Jc18o       	$0x254733F4
+
+
+. 0 25472900 12
+. 83 6E 03 D8 2E 1B 00 00 40 92 0A EC
+
+==== BB 979 (0x2547290C) approx BBs exec'd 0 ====
+
+	0x2547290C:  816E0408  lwz r11,1032(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25472910:  3ACB003F  addi r22,r11,63
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x3F, t4
+	   7: PUTL       	t4, R22
+	   8: INCEIPL       	$4
+
+	0x25472914:  916E0414  stw r11,1044(r14)
+	   9: GETL       	R11, t6
+	  10: GETL       	R14, t8
+	  11: ADDL       	$0x414, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x25472918:  56D91838  rlwinm r25,r22,3,0,28
+	  14: GETL       	R22, t10
+	  15: SHLL       	$0x3, t10
+	  16: PUTL       	t10, R25
+	  17: INCEIPL       	$4
+
+	0x2547291C:  38790008  addi r3,r25,8
+	  18: GETL       	R25, t12
+	  19: ADDL       	$0x8, t12
+	  20: PUTL       	t12, R3
+	  21: INCEIPL       	$4
+
+	0x25472920:  48025105  bl 0x25497A24
+	  22: MOVL       	$0x25472924, t14
+	  23: PUTL       	t14, LR
+	  24: JMPo-c       	$0x25497A24  ($4)
+
+
+. 0 2547290C 24
+. 81 6E 04 08 3A CB 00 3F 91 6E 04 14 56 D9 18 38 38 79 00 08 48 02 51 05
+
+==== BB 980 (0x25472924) approx BBs exec'd 0 ====
+
+	0x25472924:  7F25CB78  or r5,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x25472928:  7C6C1B78  or r12,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0x2547292C:  38800000  li r4,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25472930:  38630008  addi r3,r3,8
+	   9: GETL       	R3, t6
+	  10: ADDL       	$0x8, t6
+	  11: PUTL       	t6, R3
+	  12: INCEIPL       	$4
+
+	0x25472934:  918E0410  stw r12,1040(r14)
+	  13: GETL       	R12, t8
+	  14: GETL       	R14, t10
+	  15: ADDL       	$0x410, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0x25472938:  48010E0D  bl 0x25483744
+	  18: MOVL       	$0x2547293C, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x25483744  ($4)
+
+
+. 0 25472924 24
+. 7F 25 CB 78 7C 6C 1B 78 38 80 00 00 38 63 00 08 91 8E 04 10 48 01 0E 0D
+
+==== BB 981 (0x2547293C) approx BBs exec'd 0 ====
+
+	0x2547293C:  80CE0410  lwz r6,1040(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x410, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x25472940:  38000000  li r0,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x25472944:  7E89A378  or r9,r20,r20
+	   8: GETL       	R20, t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0x25472948:  90060004  stw r0,4(r6)
+	  11: GETL       	R0, t8
+	  12: GETL       	R6, t10
+	  13: ADDL       	$0x4, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547294C:  92C60000  stw r22,0(r6)
+	  16: GETL       	R22, t12
+	  17: GETL       	R6, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x25472950:  418E0024  bc 12,14,0x25472974
+	  20: Js14o       	$0x25472974
+
+
+. 0 2547293C 24
+. 80 CE 04 10 38 00 00 00 7E 89 A3 78 90 06 00 04 92 C6 00 00 41 8E 00 24
+
+==== BB 982 (0x25472954) approx BBs exec'd 0 ====
+
+	0x25472954:  38630004  addi r3,r3,4
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x25472958:  83890220  lwz r28,544(r9)
+	   4: GETL       	R9, t2
+	   5: ADDL       	$0x220, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x2547295C:  2F1C0000  cmpi cr6,r28,0
+	   9: GETL       	R28, t6
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0x25472960:  419A0008  bc 12,26,0x25472968
+	  13: Js26o       	$0x25472968
+
+
+. 0 25472954 16
+. 38 63 00 04 83 89 02 20 2F 1C 00 00 41 9A 00 08
+
+==== BB 983 (0x25472968) approx BBs exec'd 0 ====
+
+	0x25472968:  8129000C  lwz r9,12(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547296C:  2C090000  cmpi cr0,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25472970:  4082FFE8  bc 4,2,0x25472958
+	   9: Jc02o       	$0x25472958
+
+
+. 0 25472968 12
+. 81 29 00 0C 2C 09 00 00 40 82 FF E8
+
+==== BB 984 (0x25472958) approx BBs exec'd 0 ====
+
+	0x25472958:  83890220  lwz r28,544(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x220, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547295C:  2F1C0000  cmpi cr6,r28,0
+	   5: GETL       	R28, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x25472960:  419A0008  bc 12,26,0x25472968
+	   9: Js26o       	$0x25472968
+
+
+. 0 25472958 12
+. 83 89 02 20 2F 1C 00 00 41 9A 00 08
+
+==== BB 985 (0x25472964) approx BBs exec'd 0 ====
+
+	0x25472964:  95230008  stwu r9,8(r3)
+	   0: GETL       	R9, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0x8, t2
+	   3: PUTL       	t2, R3
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25472968:  8129000C  lwz r9,12(r9)
+	   6: GETL       	R9, t4
+	   7: ADDL       	$0xC, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R9
+	  10: INCEIPL       	$4
+
+	0x2547296C:  2C090000  cmpi cr0,r9,0
+	  11: GETL       	R9, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25472970:  4082FFE8  bc 4,2,0x25472958
+	  15: Jc02o       	$0x25472958
+
+
+. 0 25472964 16
+. 95 23 00 08 81 29 00 0C 2C 09 00 00 40 82 FF E8
+
+==== BB 986 (0x25472974) approx BBs exec'd 0 ====
+
+	0x25472974:  4800B359  bl 0x2547DCCC
+	   0: MOVL       	$0x25472978, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547DCCC  ($4)
+
+
+. 0 25472974 4
+. 48 00 B3 59
+
+==== BB 987 _dl_determine_tlsoffset(0x2547DCCC) approx BBs exec'd 0 ====
+
+	0x2547DCCC:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DCD0:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547DCD4:  4801932D  bl 0x25497000
+	   9: MOVL       	$0x2547DCD8, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547DCCC 12
+. 94 21 FF E0 7D 88 02 A6 48 01 93 2D
+
+==== BB 988 (0x2547DCD8) approx BBs exec'd 0 ====
+
+	0x2547DCD8:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DCDC:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547DCE0:  9361000C  stw r27,12(r1)
+	   8: GETL       	R27, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547DCE4:  93810010  stw r28,16(r1)
+	  13: GETL       	R28, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547DCE8:  38600000  li r3,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0x2547DCEC:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547DCF0:  7D8803A6  mtlr r12
+	  26: GETL       	R12, t20
+	  27: PUTL       	t20, LR
+	  28: INCEIPL       	$4
+
+	0x2547DCF4:  837E04C8  lwz r27,1224(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x4C8, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R27
+	  33: INCEIPL       	$4
+
+	0x2547DCF8:  3B800020  li r28,32
+	  34: MOVL       	$0x20, t26
+	  35: PUTL       	t26, R28
+	  36: INCEIPL       	$4
+
+	0x2547DCFC:  93E1001C  stw r31,28(r1)
+	  37: GETL       	R31, t28
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x1C, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x2547DD00:  3BA00000  li r29,0
+	  42: MOVL       	$0x0, t32
+	  43: PUTL       	t32, R29
+	  44: INCEIPL       	$4
+
+	0x2547DD04:  813B0410  lwz r9,1040(r27)
+	  45: GETL       	R27, t34
+	  46: ADDL       	$0x410, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R9
+	  49: INCEIPL       	$4
+
+	0x2547DD08:  3BE00000  li r31,0
+	  50: MOVL       	$0x0, t38
+	  51: PUTL       	t38, R31
+	  52: INCEIPL       	$4
+
+	0x2547DD0C:  39800001  li r12,1
+	  53: MOVL       	$0x1, t40
+	  54: PUTL       	t40, R12
+	  55: INCEIPL       	$4
+
+	0x2547DD10:  38A90008  addi r5,r9,8
+	  56: GETL       	R9, t42
+	  57: ADDL       	$0x8, t42
+	  58: PUTL       	t42, R5
+	  59: INCEIPL       	$4
+
+	0x2547DD14:  8005000C  lwz r0,12(r5)
+	  60: GETL       	R5, t44
+	  61: ADDL       	$0xC, t44
+	  62: LDL       	(t44), t46
+	  63: PUTL       	t46, R0
+	  64: INCEIPL       	$4
+
+	0x2547DD18:  2F800000  cmpi cr7,r0,0
+	  65: GETL       	R0, t48
+	  66: CMP0L       	t48, t50  (-rSo)
+	  67: ICRFL       	t50, $0x7, CR
+	  68: INCEIPL       	$4
+
+	0x2547DD1C:  419E0104  bc 12,30,0x2547DE20
+	  69: Js30o       	$0x2547DE20
+
+
+. 0 2547DCD8 72
+. 93 C1 00 18 7F C8 02 A6 93 61 00 0C 93 81 00 10 38 60 00 00 93 A1 00 14 7D 88 03 A6 83 7E 04 C8 3B 80 00 20 93 E1 00 1C 3B A0 00 00 81 3B 04 10 3B E0 00 00 39 80 00 01 38 A9 00 08 80 05 00 0C 2F 80 00 00 41 9E 01 04
+
+==== BB 989 (0x2547DD20) approx BBs exec'd 0 ====
+
+	0x2547DD20:  38C00008  li r6,8
+	   0: MOVL       	$0x8, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x2547DD24:  48000058  b 0x2547DD7C
+	   3: JMPo       	$0x2547DD7C  ($4)
+
+
+. 0 2547DD20 8
+. 38 C0 00 08 48 00 00 58
+
+==== BB 990 (0x2547DD7C) approx BBs exec'd 0 ====
+
+	0x2547DD7C:  7D662A14  add r11,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547DD80:  814B0004  lwz r10,4(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x2547DD84:  816A0224  lwz r11,548(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0x224, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547DD88:  810A0228  lwz r8,552(r10)
+	  15: GETL       	R10, t12
+	  16: ADDL       	$0x228, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R8
+	  19: INCEIPL       	$4
+
+	0x2547DD8C:  7C1C5840  cmpl cr0,r28,r11
+	  20: GETL       	R28, t16
+	  21: GETL       	R11, t18
+	  22: CMPUL       	t16, t18, t20  (-rSo)
+	  23: ICRFL       	t20, $0x0, CR
+	  24: INCEIPL       	$4
+
+	0x2547DD90:  388BFFFF  addi r4,r11,-1
+	  25: GETL       	R11, t22
+	  26: ADDL       	$0xFFFFFFFF, t22
+	  27: PUTL       	t22, R4
+	  28: INCEIPL       	$4
+
+	0x2547DD94:  7CE800D0  neg r7,r8
+	  29: GETL       	R8, t24
+	  30: NEGL       	t24
+	  31: PUTL       	t24, R7
+	  32: INCEIPL       	$4
+
+	0x2547DD98:  7CE82038  and r8,r7,r4
+	  33: GETL       	R7, t26
+	  34: GETL       	R4, t28
+	  35: ANDL       	t26, t28
+	  36: PUTL       	t28, R8
+	  37: INCEIPL       	$4
+
+	0x2547DD9C:  40800008  bc 4,0,0x2547DDA4
+	  38: Jc00o       	$0x2547DDA4
+
+
+. 0 2547DD7C 36
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 81 0A 02 28 7C 1C 58 40 38 8B FF FF 7C E8 00 D0 7C E8 20 38 40 80 00 08
+
+==== BB 991 (0x2547DDA4) approx BBs exec'd 0 ====
+
+	0x2547DDA4:  80EA0220  lwz r7,544(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x220, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547DDA8:  7C9FE850  subf r4,r31,r29
+	   5: GETL       	R31, t4
+	   6: GETL       	R29, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x2547DDAC:  7C872040  cmpl cr1,r7,r4
+	  10: GETL       	R7, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547DDB0:  40A5FF78  bc 5,5,0x2547DD28
+	  15: Jc05o       	$0x2547DD28
+
+
+. 0 2547DDA4 16
+. 80 EA 02 20 7C 9F E8 50 7C 87 20 40 40 A5 FF 78
+
+==== BB 992 (0x2547DDB4) approx BBs exec'd 0 ====
+
+	0x2547DDB4:  7D662A14  add r11,r6,r5
+	   0: GETL       	R6, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547DDB8:  814B0004  lwz r10,4(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x2547DDBC:  816A0224  lwz r11,548(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0x224, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547DDC0:  7CCB1A14  add r6,r11,r3
+	  15: GETL       	R11, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R6
+	  19: INCEIPL       	$4
+
+	0x2547DDC4:  3806FFFF  addi r0,r6,-1
+	  20: GETL       	R6, t16
+	  21: ADDL       	$0xFFFFFFFF, t16
+	  22: PUTL       	t16, R0
+	  23: INCEIPL       	$4
+
+	0x2547DDC8:  7D205B96  divwu r9, r0, r11
+	  24: GETL       	R0, t20
+	  25: GETL       	R11, t18
+	  26: UDIVL       	t18, t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0x2547DDCC:  7D2959D6  mullw r9,r9,r11
+	  29: GETL       	R9, t22
+	  30: GETL       	R11, t24
+	  31: MULL       	t22, t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x2547DDD0:  7CE34850  subf r7,r3,r9
+	  34: GETL       	R3, t26
+	  35: GETL       	R9, t28
+	  36: SUBL       	t26, t28
+	  37: PUTL       	t28, R7
+	  38: INCEIPL       	$4
+
+	0x2547DDD4:  7C074040  cmpl cr0,r7,r8
+	  39: GETL       	R7, t30
+	  40: GETL       	R8, t32
+	  41: CMPUL       	t30, t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x0, CR
+	  43: INCEIPL       	$4
+
+	0x2547DDD8:  40800008  bc 4,0,0x2547DDE0
+	  44: Jc00o       	$0x2547DDE0
+
+
+. 0 2547DDB4 40
+. 7D 66 2A 14 81 4B 00 04 81 6A 02 24 7C CB 1A 14 38 06 FF FF 7D 20 5B 96 7D 29 59 D6 7C E3 48 50 7C 07 40 40 40 80 00 08
+
+==== BB 993 (0x2547DDE0) approx BBs exec'd 0 ====
+
+	0x2547DDE0:  7D684850  subf r11,r8,r9
+	   0: GETL       	R8, t0
+	   1: GETL       	R9, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547DDE4:  7CE35850  subf r7,r3,r11
+	   5: GETL       	R3, t4
+	   6: GETL       	R11, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R7
+	   9: INCEIPL       	$4
+
+	0x2547DDE8:  916A022C  stw r11,556(r10)
+	  10: GETL       	R11, t8
+	  11: GETL       	R10, t10
+	  12: ADDL       	$0x22C, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x2547DDEC:  7C872040  cmpl cr1,r7,r4
+	  15: GETL       	R7, t12
+	  16: GETL       	R4, t14
+	  17: CMPUL       	t12, t14, t16  (-rSo)
+	  18: ICRFL       	t16, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x2547DDF0:  4085000C  bc 4,5,0x2547DDFC
+	  20: Jc05o       	$0x2547DDFC
+
+
+. 0 2547DDE0 20
+. 7D 68 48 50 7C E3 58 50 91 6A 02 2C 7C 87 20 40 40 85 00 0C
+
+==== BB 994 (0x2547DDFC) approx BBs exec'd 0 ====
+
+	0x2547DDFC:  806A0220  lwz r3,544(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x220, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547DE00:  398C0001  addi r12,r12,1
+	   5: GETL       	R12, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547DE04:  55861838  rlwinm r6,r12,3,0,28
+	   9: GETL       	R12, t6
+	  10: SHLL       	$0x3, t6
+	  11: PUTL       	t6, R6
+	  12: INCEIPL       	$4
+
+	0x2547DE08:  7D434A14  add r10,r3,r9
+	  13: GETL       	R3, t8
+	  14: GETL       	R9, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R10
+	  17: INCEIPL       	$4
+
+	0x2547DE0C:  7C685050  subf r3,r8,r10
+	  18: GETL       	R8, t12
+	  19: GETL       	R10, t14
+	  20: SUBL       	t12, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0x2547DE10:  7D062A14  add r8,r6,r5
+	  23: GETL       	R6, t16
+	  24: GETL       	R5, t18
+	  25: ADDL       	t16, t18
+	  26: PUTL       	t18, R8
+	  27: INCEIPL       	$4
+
+	0x2547DE14:  80880004  lwz r4,4(r8)
+	  28: GETL       	R8, t20
+	  29: ADDL       	$0x4, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R4
+	  32: INCEIPL       	$4
+
+	0x2547DE18:  2F040000  cmpi cr6,r4,0
+	  33: GETL       	R4, t24
+	  34: CMP0L       	t24, t26  (-rSo)
+	  35: ICRFL       	t26, $0x6, CR
+	  36: INCEIPL       	$4
+
+	0x2547DE1C:  409AFF60  bc 4,26,0x2547DD7C
+	  37: Jc26o       	$0x2547DD7C
+
+
+. 0 2547DDFC 36
+. 80 6A 02 20 39 8C 00 01 55 86 18 38 7D 43 4A 14 7C 68 50 50 7D 06 2A 14 80 88 00 04 2F 04 00 00 40 9A FF 60
+
+==== BB 995 (0x2547DE20) approx BBs exec'd 0 ====
+
+	0x2547DE20:  3BA3069F  addi r29,r3,1695
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x69F, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0x2547DE24:  939B0420  stw r28,1056(r27)
+	   4: GETL       	R28, t2
+	   5: GETL       	R27, t4
+	   6: ADDL       	$0x420, t4
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547DE28:  57A50034  rlwinm r5,r29,0,0,26
+	   9: GETL       	R29, t6
+	  10: ANDL       	$0xFFFFFFE0, t6
+	  11: PUTL       	t6, R5
+	  12: INCEIPL       	$4
+
+	0x2547DE2C:  907B041C  stw r3,1052(r27)
+	  13: GETL       	R3, t8
+	  14: GETL       	R27, t10
+	  15: ADDL       	$0x41C, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0x2547DE30:  90BB0418  stw r5,1048(r27)
+	  18: GETL       	R5, t12
+	  19: GETL       	R27, t14
+	  20: ADDL       	$0x418, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x2547DE34:  83810010  lwz r28,16(r1)
+	  23: GETL       	R1, t16
+	  24: ADDL       	$0x10, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R28
+	  27: INCEIPL       	$4
+
+	0x2547DE38:  8361000C  lwz r27,12(r1)
+	  28: GETL       	R1, t20
+	  29: ADDL       	$0xC, t20
+	  30: LDL       	(t20), t22
+	  31: PUTL       	t22, R27
+	  32: INCEIPL       	$4
+
+	0x2547DE3C:  83A10014  lwz r29,20(r1)
+	  33: GETL       	R1, t24
+	  34: ADDL       	$0x14, t24
+	  35: LDL       	(t24), t26
+	  36: PUTL       	t26, R29
+	  37: INCEIPL       	$4
+
+	0x2547DE40:  83C10018  lwz r30,24(r1)
+	  38: GETL       	R1, t28
+	  39: ADDL       	$0x18, t28
+	  40: LDL       	(t28), t30
+	  41: PUTL       	t30, R30
+	  42: INCEIPL       	$4
+
+	0x2547DE44:  83E1001C  lwz r31,28(r1)
+	  43: GETL       	R1, t32
+	  44: ADDL       	$0x1C, t32
+	  45: LDL       	(t32), t34
+	  46: PUTL       	t34, R31
+	  47: INCEIPL       	$4
+
+	0x2547DE48:  38210020  addi r1,r1,32
+	  48: GETL       	R1, t36
+	  49: ADDL       	$0x20, t36
+	  50: PUTL       	t36, R1
+	  51: INCEIPL       	$4
+
+	0x2547DE4C:  4E800020  blr
+	  52: GETL       	LR, t38
+	  53: JMPo-r       	t38  ($4)
+
+
+. 0 2547DE20 48
+. 3B A3 06 9F 93 9B 04 20 57 A5 00 34 90 7B 04 1C 90 BB 04 18 83 81 00 10 83 61 00 0C 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 996 (0x25472978) approx BBs exec'd 0 ====
+
+	0x25472978:  4800B57D  bl 0x2547DEF4
+	   0: MOVL       	$0x2547297C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547DEF4  ($4)
+
+
+. 0 25472978 4
+. 48 00 B5 7D
+
+==== BB 997 _dl_allocate_tls_storage(0x2547DEF4) approx BBs exec'd 0 ====
+
+	0x2547DEF4:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DEF8:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x2547DEFC:  48019105  bl 0x25497000
+	   9: MOVL       	$0x2547DF00, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547DEF4 12
+. 94 21 FF E0 7C 68 02 A6 48 01 91 05
+
+==== BB 998 (0x2547DF00) approx BBs exec'd 0 ====
+
+	0x2547DF00:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DF04:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547DF08:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547DF0C:  90610024  stw r3,36(r1)
+	  13: GETL       	R3, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547DF10:  93A10014  stw r29,20(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x14, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547DF14:  839E04C8  lwz r28,1224(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x4C8, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R28
+	  27: INCEIPL       	$4
+
+	0x2547DF18:  9361000C  stw r27,12(r1)
+	  28: GETL       	R27, t22
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0xC, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0x2547DF1C:  807C0420  lwz r3,1056(r28)
+	  33: GETL       	R28, t26
+	  34: ADDL       	$0x420, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R3
+	  37: INCEIPL       	$4
+
+	0x2547DF20:  809C0418  lwz r4,1048(r28)
+	  38: GETL       	R28, t30
+	  39: ADDL       	$0x418, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R4
+	  42: INCEIPL       	$4
+
+	0x2547DF24:  38A3046F  addi r5,r3,1135
+	  43: GETL       	R3, t34
+	  44: ADDL       	$0x46F, t34
+	  45: PUTL       	t34, R5
+	  46: INCEIPL       	$4
+
+	0x2547DF28:  7D2300D0  neg r9,r3
+	  47: GETL       	R3, t36
+	  48: NEGL       	t36
+	  49: PUTL       	t36, R9
+	  50: INCEIPL       	$4
+
+	0x2547DF2C:  7CA04838  and r0,r5,r9
+	  51: GETL       	R5, t38
+	  52: GETL       	R9, t40
+	  53: ANDL       	t38, t40
+	  54: PUTL       	t40, R0
+	  55: INCEIPL       	$4
+
+	0x2547DF30:  93E1001C  stw r31,28(r1)
+	  56: GETL       	R31, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x1C, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x2547DF34:  7FA40214  add r29,r4,r0
+	  61: GETL       	R4, t46
+	  62: GETL       	R0, t48
+	  63: ADDL       	t46, t48
+	  64: PUTL       	t48, R29
+	  65: INCEIPL       	$4
+
+	0x2547DF38:  7FA4EB78  or r4,r29,r29
+	  66: GETL       	R29, t50
+	  67: PUTL       	t50, R4
+	  68: INCEIPL       	$4
+
+	0x2547DF3C:  48019AE1  bl 0x25497A1C
+	  69: MOVL       	$0x2547DF40, t52
+	  70: PUTL       	t52, LR
+	  71: JMPo-c       	$0x25497A1C  ($4)
+
+
+. 0 2547DF00 64
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 90 61 00 24 93 A1 00 14 83 9E 04 C8 93 61 00 0C 80 7C 04 20 80 9C 04 18 38 A3 04 6F 7D 23 00 D0 7C A0 48 38 93 E1 00 1C 7F A4 02 14 7F A4 EB 78 48 01 9A E1
+
+==== BB 999 (0x2547DF40) approx BBs exec'd 0 ====
+
+	0x2547DF40:  38A00470  li r5,1136
+	   0: MOVL       	$0x470, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547DF44:  7C7F1B79  or. r31,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R31
+	   5: CMP0L       	t2, t4  (-rSo)
+	   6: ICRFL       	t4, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x2547DF48:  38800000  li r4,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0x2547DF4C:  7FFBFB78  or r27,r31,r31
+	  11: GETL       	R31, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0x2547DF50:  7C1FEA14  add r0,r31,r29
+	  14: GETL       	R31, t10
+	  15: GETL       	R29, t12
+	  16: ADDL       	t10, t12
+	  17: PUTL       	t12, R0
+	  18: INCEIPL       	$4
+
+	0x2547DF54:  4182004C  bc 12,2,0x2547DFA0
+	  19: Js02o       	$0x2547DFA0
+
+
+. 0 2547DF40 24
+. 38 A0 04 70 7C 7F 1B 79 38 80 00 00 7F FB FB 78 7C 1F EA 14 41 82 00 4C
+
+==== BB 1000 (0x2547DF58) approx BBs exec'd 0 ====
+
+	0x2547DF58:  80FC0418  lwz r7,1048(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x418, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547DF5C:  7FA70050  subf r29,r7,r0
+	   5: GETL       	R7, t4
+	   6: GETL       	R0, t6
+	   7: SUBL       	t4, t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547DF60:  387DFB90  addi r3,r29,-1136
+	  10: GETL       	R29, t8
+	  11: ADDL       	$0xFFFFFB90, t8
+	  12: PUTL       	t8, R3
+	  13: INCEIPL       	$4
+
+	0x2547DF64:  480057E1  bl 0x25483744
+	  14: MOVL       	$0x2547DF68, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25483744  ($4)
+
+
+. 0 2547DF58 16
+. 80 FC 04 18 7F A7 00 50 38 7D FB 90 48 00 57 E1
+
+==== BB 1001 (0x254837BC) approx BBs exec'd 0 ====
+
+	0x254837BC:  9088FFFC  stw r4,-4(r8)
+	   0: GETL       	R4, t0
+	   1: GETL       	R8, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254837C0:  9088FFF8  stw r4,-8(r8)
+	   5: GETL       	R4, t4
+	   6: GETL       	R8, t6
+	   7: ADDL       	$0xFFFFFFF8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x254837C4:  9088FFF4  stw r4,-12(r8)
+	  10: GETL       	R4, t8
+	  11: GETL       	R8, t10
+	  12: ADDL       	$0xFFFFFFF4, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254837C8:  9488FFF0  stwu r4,-16(r8)
+	  15: GETL       	R4, t12
+	  16: GETL       	R8, t14
+	  17: ADDL       	$0xFFFFFFF0, t14
+	  18: PUTL       	t14, R8
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x254837CC:  409D0008  bc 4,29,0x254837D4
+	  21: Jc29o       	$0x254837D4
+
+
+. 0 254837BC 20
+. 90 88 FF FC 90 88 FF F8 90 88 FF F4 94 88 FF F0 40 9D 00 08
+
+==== BB 1002 (0x25483894) approx BBs exec'd 0 ====
+
+	0x25483894:  7C0037EC  dcbz r0,r6
+	   0: GETL       	R6, t0
+	   1: ANDL       	$0xFFFFFFE0, t0
+	   2: MOVL       	$0x0, t2
+	   3: STL       	t2, (t0)
+	   4: ADDL       	$0x4, t0
+	   5: STL       	t2, (t0)
+	   6: ADDL       	$0x4, t0
+	   7: STL       	t2, (t0)
+	   8: ADDL       	$0x4, t0
+	   9: STL       	t2, (t0)
+	  10: ADDL       	$0x4, t0
+	  11: STL       	t2, (t0)
+	  12: ADDL       	$0x4, t0
+	  13: STL       	t2, (t0)
+	  14: ADDL       	$0x4, t0
+	  15: STL       	t2, (t0)
+	  16: ADDL       	$0x4, t0
+	  17: STL       	t2, (t0)
+	  18: INCEIPL       	$4
+
+	0x25483898:  7C0737EC  dcbz r7,r6
+	  19: GETL       	R6, t4
+	  20: GETL       	R7, t6
+	  21: ADDL       	t6, t4
+	  22: ANDL       	$0xFFFFFFE0, t4
+	  23: MOVL       	$0x0, t8
+	  24: STL       	t8, (t4)
+	  25: ADDL       	$0x4, t4
+	  26: STL       	t8, (t4)
+	  27: ADDL       	$0x4, t4
+	  28: STL       	t8, (t4)
+	  29: ADDL       	$0x4, t4
+	  30: STL       	t8, (t4)
+	  31: ADDL       	$0x4, t4
+	  32: STL       	t8, (t4)
+	  33: ADDL       	$0x4, t4
+	  34: STL       	t8, (t4)
+	  35: ADDL       	$0x4, t4
+	  36: STL       	t8, (t4)
+	  37: ADDL       	$0x4, t4
+	  38: STL       	t8, (t4)
+	  39: INCEIPL       	$4
+
+	0x2548389C:  38C60040  addi r6,r6,64
+	  40: GETL       	R6, t10
+	  41: ADDL       	$0x40, t10
+	  42: PUTL       	t10, R6
+	  43: INCEIPL       	$4
+
+	0x254838A0:  2A850000  cmpli cr5,r5,0
+	  44: GETL       	R5, t12
+	  45: MOVL       	$0x0, t16
+	  46: CMPUL       	t12, t16, t14  (-rSo)
+	  47: ICRFL       	t14, $0x5, CR
+	  48: INCEIPL       	$4
+
+	0x254838A4:  4182007C  bc 12,2,0x25483920
+	  49: Js02o       	$0x25483920
+
+
+. 0 25483894 20
+. 7C 00 37 EC 7C 07 37 EC 38 C6 00 40 2A 85 00 00 41 82 00 7C
+
+==== BB 1003 (0x2547DF68) approx BBs exec'd 0 ====
+
+	0x2547DF68:  80DC0408  lwz r6,1032(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547DF6C:  38800004  li r4,4
+	   5: MOVL       	$0x4, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x2547DF70:  38660010  addi r3,r6,16
+	   8: GETL       	R6, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x2547DF74:  3BE6000E  addi r31,r6,14
+	  12: GETL       	R6, t8
+	  13: ADDL       	$0xE, t8
+	  14: PUTL       	t8, R31
+	  15: INCEIPL       	$4
+
+	0x2547DF78:  48019AB5  bl 0x25497A2C
+	  16: MOVL       	$0x2547DF7C, t10
+	  17: PUTL       	t10, LR
+	  18: JMPo-c       	$0x25497A2C  ($4)
+
+
+. 0 2547DF68 20
+. 80 DC 04 08 38 80 00 04 38 66 00 10 3B E6 00 0E 48 01 9A B5
+
+==== BB 1004 (0x2547DF7C) approx BBs exec'd 0 ====
+
+	0x2547DF7C:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0x2547DF80:  38030004  addi r0,r3,4
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547DF84:  41860044  bc 12,6,0x2547DFC8
+	   8: Js06o       	$0x2547DFC8
+
+
+. 0 2547DF7C 12
+. 2C 83 00 00 38 03 00 04 41 86 00 44
+
+==== BB 1005 (0x2547DF88) approx BBs exec'd 0 ====
+
+	0x2547DF88:  93E30000  stw r31,0(r3)
+	   0: GETL       	R31, t0
+	   1: GETL       	R3, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x2547DF8C:  901DFFFC  stw r0,-4(r29)
+	   4: GETL       	R0, t4
+	   5: GETL       	R29, t6
+	   6: ADDL       	$0xFFFFFFFC, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x2547DF90:  2F9D0000  cmpi cr7,r29,0
+	   9: GETL       	R29, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x2547DF94:  7FBFEB78  or r31,r29,r29
+	  13: GETL       	R29, t12
+	  14: PUTL       	t12, R31
+	  15: INCEIPL       	$4
+
+	0x2547DF98:  7F63DB78  or r3,r27,r27
+	  16: GETL       	R27, t14
+	  17: PUTL       	t14, R3
+	  18: INCEIPL       	$4
+
+	0x2547DF9C:  419E0034  bc 12,30,0x2547DFD0
+	  19: Js30o       	$0x2547DFD0
+
+
+. 0 2547DF88 24
+. 93 E3 00 00 90 1D FF FC 2F 9D 00 00 7F BF EB 78 7F 63 DB 78 41 9E 00 34
+
+==== BB 1006 (0x2547DFA0) approx BBs exec'd 0 ====
+
+	0x2547DFA0:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547DFA4:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547DFA8:  8361000C  lwz r27,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0x2547DFAC:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x2547DFB0:  7D0803A6  mtlr r8
+	  18: GETL       	R8, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547DFB4:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x2547DFB8:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x2547DFBC:  83E1001C  lwz r31,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R31
+	  35: INCEIPL       	$4
+
+	0x2547DFC0:  38210020  addi r1,r1,32
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: PUTL       	t28, R1
+	  39: INCEIPL       	$4
+
+	0x2547DFC4:  4E800020  blr
+	  40: GETL       	LR, t30
+	  41: JMPo-r       	t30  ($4)
+
+
+. 0 2547DFA0 40
+. 81 01 00 24 7F E3 FB 78 83 61 00 0C 83 81 00 10 7D 08 03 A6 83 A1 00 14 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1007 (0x2547297C) approx BBs exec'd 0 ====
+
+	0x2547297C:  7C711B79  or. r17,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R17
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x25472980:  41820E04  bc 12,2,0x25473784
+	   5: Js02o       	$0x25473784
+
+
+. 0 2547297C 8
+. 7C 71 1B 79 41 82 0E 04
+
+==== BB 1008 (0x25472984) approx BBs exec'd 0 ====
+
+	0x25472984:  813F0030  lwz r9,48(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x30, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472988:  39540158  addi r10,r20,344
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x158, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547298C:  8311FFFC  lwz r24,-4(r17)
+	   9: GETL       	R17, t6
+	  10: ADDL       	$0xFFFFFFFC, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R24
+	  13: INCEIPL       	$4
+
+	0x25472990:  2F890000  cmpi cr7,r9,0
+	  14: GETL       	R9, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0x25472994:  915F0078  stw r10,120(r31)
+	  18: GETL       	R10, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x78, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25472998:  930E0424  stw r24,1060(r14)
+	  23: GETL       	R24, t18
+	  24: GETL       	R14, t20
+	  25: ADDL       	$0x424, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x2547299C:  409E0E38  bc 4,30,0x254737D4
+	  28: Jc30o       	$0x254737D4
+
+
+. 0 25472984 28
+. 81 3F 00 30 39 54 01 58 83 11 FF FC 2F 89 00 00 91 5F 00 78 93 0E 04 24 40 9E 0E 38
+
+==== BB 1009 (0x254729A0) approx BBs exec'd 0 ====
+
+	0x254729A0:  8074013C  lwz r3,316(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x13C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254729A4:  2E120000  cmpi cr4,r18,0
+	   5: GETL       	R18, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254729A8:  2F830000  cmpi cr7,r3,0
+	   9: GETL       	R3, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x254729AC:  419E0118  bc 12,30,0x25472AC4
+	  13: Js30o       	$0x25472AC4
+
+
+. 0 254729A0 16
+. 80 74 01 3C 2E 12 00 00 2F 83 00 00 41 9E 01 18
+
+==== BB 1010 (0x25472AC4) approx BBs exec'd 0 ====
+
+	0x25472AC4:  806E01B8  lwz r3,440(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1B8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25472AC8:  4800947D  bl 0x2547BF44
+	   5: MOVL       	$0x25472ACC, t4
+	   6: PUTL       	t4, LR
+	   7: JMPo-c       	$0x2547BF44  ($4)
+
+
+. 0 25472AC4 8
+. 80 6E 01 B8 48 00 94 7D
+
+==== BB 1011 _dl_debug_initialize(0x2547BF44) approx BBs exec'd 0 ====
+
+	0x2547BF44:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547BF48:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x2547BF4C:  4801B0B5  bl 0x25497000
+	   9: MOVL       	$0x2547BF50, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547BF44 12
+. 94 21 FF F0 7D 88 02 A6 48 01 B0 B5
+
+==== BB 1012 (0x2547BF50) approx BBs exec'd 0 ====
+
+	0x2547BF50:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547BF54:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547BF58:  7C681B78  or r8,r3,r3
+	   8: GETL       	R3, t6
+	   9: PUTL       	t6, R8
+	  10: INCEIPL       	$4
+
+	0x2547BF5C:  7D8803A6  mtlr r12
+	  11: GETL       	R12, t8
+	  12: PUTL       	t8, LR
+	  13: INCEIPL       	$4
+
+	0x2547BF60:  815E0518  lwz r10,1304(r30)
+	  14: GETL       	R30, t10
+	  15: ADDL       	$0x518, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547BF64:  800A0008  lwz r0,8(r10)
+	  19: GETL       	R10, t14
+	  20: ADDL       	$0x8, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R0
+	  23: INCEIPL       	$4
+
+	0x2547BF68:  7D435378  or r3,r10,r10
+	  24: GETL       	R10, t18
+	  25: PUTL       	t18, R3
+	  26: INCEIPL       	$4
+
+	0x2547BF6C:  2F800000  cmpi cr7,r0,0
+	  27: GETL       	R0, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0x2547BF70:  409E0024  bc 4,30,0x2547BF94
+	  31: Jc30o       	$0x2547BF94
+
+
+. 0 2547BF50 36
+. 93 C1 00 08 7F C8 02 A6 7C 68 1B 78 7D 88 03 A6 81 5E 05 18 80 0A 00 08 7D 43 53 78 2F 80 00 00 40 9E 00 24
+
+==== BB 1013 (0x2547BF74) approx BBs exec'd 0 ====
+
+	0x2547BF74:  813E04C8  lwz r9,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BF78:  38A00001  li r5,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0x2547BF7C:  809E0508  lwz r4,1288(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x508, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R4
+	  12: INCEIPL       	$4
+
+	0x2547BF80:  81690000  lwz r11,0(r9)
+	  13: GETL       	R9, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R11
+	  16: INCEIPL       	$4
+
+	0x2547BF84:  90AA0000  stw r5,0(r10)
+	  17: GETL       	R5, t14
+	  18: GETL       	R10, t16
+	  19: STL       	t14, (t16)
+	  20: INCEIPL       	$4
+
+	0x2547BF88:  910A0010  stw r8,16(r10)
+	  21: GETL       	R8, t18
+	  22: GETL       	R10, t20
+	  23: ADDL       	$0x10, t20
+	  24: STL       	t18, (t20)
+	  25: INCEIPL       	$4
+
+	0x2547BF8C:  916A0004  stw r11,4(r10)
+	  26: GETL       	R11, t22
+	  27: GETL       	R10, t24
+	  28: ADDL       	$0x4, t24
+	  29: STL       	t22, (t24)
+	  30: INCEIPL       	$4
+
+	0x2547BF90:  908A0008  stw r4,8(r10)
+	  31: GETL       	R4, t26
+	  32: GETL       	R10, t28
+	  33: ADDL       	$0x8, t28
+	  34: STL       	t26, (t28)
+	  35: INCEIPL       	$4
+
+	0x2547BF94:  83C10008  lwz r30,8(r1)
+	  36: GETL       	R1, t30
+	  37: ADDL       	$0x8, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R30
+	  40: INCEIPL       	$4
+
+	0x2547BF98:  38210010  addi r1,r1,16
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x10, t34
+	  43: PUTL       	t34, R1
+	  44: INCEIPL       	$4
+
+	0x2547BF9C:  4E800020  blr
+	  45: GETL       	LR, t36
+	  46: JMPo-r       	t36  ($4)
+
+
+. 0 2547BF74 44
+. 81 3E 04 C8 38 A0 00 01 80 9E 05 08 81 69 00 00 90 AA 00 00 91 0A 00 10 91 6A 00 04 90 8A 00 08 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+==== BB 1014 (0x25472ACC) approx BBs exec'd 0 ====
+
+	0x25472ACC:  81340074  lwz r9,116(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25472AD0:  7C7A1B78  or r26,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x25472AD4:  2F090000  cmpi cr6,r9,0
+	   8: GETL       	R9, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25472AD8:  419A0008  bc 12,26,0x25472AE0
+	  12: Js26o       	$0x25472AE0
+
+
+. 0 25472ACC 16
+. 81 34 00 74 7C 7A 1B 78 2F 09 00 00 41 9A 00 08
+
+==== BB 1015 (0x25472ADC) approx BBs exec'd 0 ====
+
+	0x25472ADC:  90690004  stw r3,4(r9)
+	   0: GETL       	R3, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	$0x4, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25472AE0:  812E022C  lwz r9,556(r14)
+	   5: GETL       	R14, t4
+	   6: ADDL       	$0x22C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x25472AE4:  2F890000  cmpi cr7,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25472AE8:  419E0008  bc 12,30,0x25472AF0
+	  14: Js30o       	$0x25472AF0
+
+
+. 0 25472ADC 16
+. 90 69 00 04 81 2E 02 2C 2F 89 00 00 41 9E 00 08
+
+==== BB 1016 (0x25472AF0) approx BBs exec'd 0 ====
+
+	0x25472AF0:  825F0078  lwz r18,120(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x78, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x25472AF4:  C8140158  lfd f0,344(r20)
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x158, t4
+	   7: FPU_RQ       	(t4), 0x0:0x0
+	   8: INCEIPL       	$4
+
+	0x25472AF8:  924E0008  stw r18,8(r14)
+	   9: GETL       	R18, t6
+	  10: GETL       	R14, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x25472AFC:  D80F0014  stfd f0,20(r15)
+	  14: GETL       	R15, t10
+	  15: ADDL       	$0x14, t10
+	  16: FPU_WQ       	0x0:0x0, (t10)
+	  17: INCEIPL       	$4
+
+	0x25472B00:  924E0010  stw r18,16(r14)
+	  18: GETL       	R18, t12
+	  19: GETL       	R14, t14
+	  20: ADDL       	$0x10, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x25472B04:  41920844  bc 12,18,0x25473348
+	  23: Js18o       	$0x25473348
+
+
+. 0 25472AF0 24
+. 82 5F 00 78 C8 14 01 58 92 4E 00 08 D8 0F 00 14 92 4E 00 10 41 92 08 44
+
+==== BB 1017 (0x25473348) approx BBs exec'd 0 ====
+
+	0x25473348:  8174000C  lwz r11,12(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547334C:  7E9DA378  or r29,r20,r20
+	   5: GETL       	R20, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25473350:  820F0050  lwz r16,80(r15)
+	   8: GETL       	R15, t6
+	   9: ADDL       	$0x50, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R16
+	  12: INCEIPL       	$4
+
+	0x25473354:  2C0B0000  cmpi cr0,r11,0
+	  13: GETL       	R11, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x25473358:  82EF0028  lwz r23,40(r15)
+	  17: GETL       	R15, t14
+	  18: ADDL       	$0x28, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R23
+	  21: INCEIPL       	$4
+
+	0x2547335C:  3150FFFF  addic r10,r16,-1
+	  22: GETL       	R16, t18
+	  23: ADCL       	$0xFFFFFFFF, t18  (-wCa)
+	  24: PUTL       	t18, R10
+	  25: INCEIPL       	$4
+
+	0x25473360:  7F6A8110  subfe r27,r10,r16
+	  26: GETL       	R10, t20
+	  27: GETL       	R16, t22
+	  28: SBBL       	t20, t22  (-rCa-wCa)
+	  29: PUTL       	t22, R27
+	  30: INCEIPL       	$4
+
+	0x25473364:  7EF3DB78  or r19,r23,r27
+	  31: GETL       	R23, t24
+	  32: GETL       	R27, t26
+	  33: ORL       	t26, t24
+	  34: PUTL       	t24, R19
+	  35: INCEIPL       	$4
+
+	0x25473368:  926F0028  stw r19,40(r15)
+	  36: GETL       	R19, t28
+	  37: GETL       	R15, t30
+	  38: ADDL       	$0x28, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0x2547336C:  41820018  bc 12,2,0x25473384
+	  41: Js02o       	$0x25473384
+
+
+. 0 25473348 40
+. 81 74 00 0C 7E 9D A3 78 82 0F 00 50 2C 0B 00 00 82 EF 00 28 31 50 FF FF 7F 6A 81 10 7E F3 DB 78 92 6F 00 28 41 82 00 18
+
+==== BB 1018 (0x25473370) approx BBs exec'd 0 ====
+
+	0x25473370:  7D605B78  or r0,r11,r11
+	   0: GETL       	R11, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25473374:  7C1D0378  or r29,r0,r0
+	   3: GETL       	R0, t2
+	   4: PUTL       	t2, R29
+	   5: INCEIPL       	$4
+
+	0x25473378:  801D000C  lwz r0,12(r29)
+	   6: GETL       	R29, t4
+	   7: ADDL       	$0xC, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x2547337C:  2F000000  cmpi cr6,r0,0
+	  11: GETL       	R0, t8
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x25473380:  409AFFF4  bc 4,26,0x25473374
+	  15: Jc26o       	$0x25473374
+
+
+. 0 25473370 20
+. 7D 60 5B 78 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+
+==== BB 1019 (0x25473374) approx BBs exec'd 0 ====
+
+	0x25473374:  7C1D0378  or r29,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0x25473378:  801D000C  lwz r0,12(r29)
+	   3: GETL       	R29, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547337C:  2F000000  cmpi cr6,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0x25473380:  409AFFF4  bc 4,26,0x25473374
+	  12: Jc26o       	$0x25473374
+
+
+. 0 25473374 16
+. 7C 1D 03 78 80 1D 00 0C 2F 00 00 00 40 9A FF F4
+
+==== BB 1020 (0x25473384) approx BBs exec'd 0 ====
+
+	0x25473384:  82DD001C  lwz r22,28(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R22
+	   4: INCEIPL       	$4
+
+	0x25473388:  81360004  lwz r9,4(r22)
+	   5: GETL       	R22, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2547338C:  2F890000  cmpi cr7,r9,0
+	  10: GETL       	R9, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25473390:  409E0420  bc 4,30,0x254737B0
+	  14: Jc30o       	$0x254737B0
+
+
+. 0 25473384 16
+. 82 DD 00 1C 81 36 00 04 2F 89 00 00 40 9E 04 20
+
+==== BB 1021 (0x254737B0) approx BBs exec'd 0 ====
+
+	0x254737B0:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254737B4:  90090008  stw r0,8(r9)
+	   3: GETL       	R0, t2
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x8, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254737B8:  81290004  lwz r9,4(r9)
+	   8: GETL       	R9, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R9
+	  12: INCEIPL       	$4
+
+	0x254737BC:  2C890000  cmpi cr1,r9,0
+	  13: GETL       	R9, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x1, CR
+	  16: INCEIPL       	$4
+
+	0x254737C0:  4186FBD4  bc 12,6,0x25473394
+	  17: Js06o       	$0x25473394
+
+
+. 0 254737B0 20
+. 38 00 00 01 90 09 00 08 81 29 00 04 2C 89 00 00 41 86 FB D4
+
+==== BB 1022 (0x25473394) approx BBs exec'd 0 ====
+
+	0x25473394:  3B8E01B8  addi r28,r14,440
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1B8, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x25473398:  7D9CE800  cmp cr3,r28,r29
+	   4: GETL       	R28, t2
+	   5: GETL       	R29, t4
+	   6: CMPL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x3, CR
+	   8: INCEIPL       	$4
+
+	0x2547339C:  418E0018  bc 12,14,0x254733B4
+	   9: Js14o       	$0x254733B4
+
+
+. 0 25473394 12
+. 3B 8E 01 B8 7D 9C E8 00 41 8E 00 18
+
+==== BB 1023 (0x254733B4) approx BBs exec'd 0 ====
+
+	0x254733B4:  83BD0010  lwz r29,16(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254733B8:  2E1D0000  cmpi cr4,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254733BC:  4092FFC8  bc 4,18,0x25473384
+	   9: Jc18o       	$0x25473384
+
+
+. 0 254733B4 12
+. 83 BD 00 10 2E 1D 00 00 40 92 FF C8
+
+==== BB 1024 (0x254733A0) approx BBs exec'd 0 ====
+
+	0x254733A0:  809D01C0  lwz r4,448(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254733A4:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254733A8:  80AF0028  lwz r5,40(r15)
+	   8: GETL       	R15, t6
+	   9: ADDL       	$0x28, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R5
+	  12: INCEIPL       	$4
+
+	0x254733AC:  7F66DB78  or r6,r27,r27
+	  13: GETL       	R27, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x254733B0:  48006211  bl 0x254795C0
+	  16: MOVL       	$0x254733B4, t12
+	  17: PUTL       	t12, LR
+	  18: JMPo-c       	$0x254795C0  ($4)
+
+
+. 0 254733A0 20
+. 80 9D 01 C0 7F A3 EB 78 80 AF 00 28 7F 66 DB 78 48 00 62 11
+
+==== BB 1025 _dl_relocate_object(0x254795C0) approx BBs exec'd 0 ====
+
+	0x254795C0:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254795C4:  9421FF50  stwu r1,-176(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF50, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x254795C8:  7D800026  mfcr r12
+	   9: GETL       	CR, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x254795CC:  93E100AC  stw r31,172(r1)
+	  12: GETL       	R31, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xAC, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x254795D0:  4801DA31  bl 0x25497000
+	  17: MOVL       	$0x254795D4, t12
+	  18: PUTL       	t12, LR
+	  19: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254795C0 20
+. 7C E8 02 A6 94 21 FF 50 7D 80 00 26 93 E1 00 AC 48 01 DA 31
+
+==== BB 1026 (0x254795D4) approx BBs exec'd 0 ====
+
+	0x254795D4:  7C3F0B78  or r31,r1,r1
+	   0: GETL       	R1, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x254795D8:  90E100B4  stw r7,180(r1)
+	   3: GETL       	R7, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xB4, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x254795DC:  80030180  lwz r0,384(r3)
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x180, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x254795E0:  92210074  stw r17,116(r1)
+	  13: GETL       	R17, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x74, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254795E4:  3A200000  li r17,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R17
+	  20: INCEIPL       	$4
+
+	0x254795E8:  74082000  andis. r8,r0,0x2000
+	  21: GETL       	R0, t16
+	  22: ANDL       	$0x20000000, t16
+	  23: PUTL       	t16, R8
+	  24: CMP0L       	t16, t18  (-rSo)
+	  25: ICRFL       	t18, $0x0, CR
+	  26: INCEIPL       	$4
+
+	0x254795EC:  9361009C  stw r27,156(r1)
+	  27: GETL       	R27, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x9C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x254795F0:  938100A0  stw r28,160(r1)
+	  32: GETL       	R28, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0xA0, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x254795F4:  7CDB3378  or r27,r6,r6
+	  37: GETL       	R6, t28
+	  38: PUTL       	t28, R27
+	  39: INCEIPL       	$4
+
+	0x254795F8:  93C100A8  stw r30,168(r1)
+	  40: GETL       	R30, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0xA8, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x254795FC:  7CBC2B78  or r28,r5,r5
+	  45: GETL       	R5, t34
+	  46: PUTL       	t34, R28
+	  47: INCEIPL       	$4
+
+	0x25479600:  92010070  stw r16,112(r1)
+	  48: GETL       	R16, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x70, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x25479604:  7FC802A6  mflr r30
+	  53: GETL       	LR, t40
+	  54: PUTL       	t40, R30
+	  55: INCEIPL       	$4
+
+	0x25479608:  92410078  stw r18,120(r1)
+	  56: GETL       	R18, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x78, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x2547960C:  9261007C  stw r19,124(r1)
+	  61: GETL       	R19, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x7C, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0x25479610:  92810080  stw r20,128(r1)
+	  66: GETL       	R20, t50
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x80, t52
+	  69: STL       	t50, (t52)
+	  70: INCEIPL       	$4
+
+	0x25479614:  92A10084  stw r21,132(r1)
+	  71: GETL       	R21, t54
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x84, t56
+	  74: STL       	t54, (t56)
+	  75: INCEIPL       	$4
+
+	0x25479618:  92C10088  stw r22,136(r1)
+	  76: GETL       	R22, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x88, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0x2547961C:  92E1008C  stw r23,140(r1)
+	  81: GETL       	R23, t62
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x8C, t64
+	  84: STL       	t62, (t64)
+	  85: INCEIPL       	$4
+
+	0x25479620:  93010090  stw r24,144(r1)
+	  86: GETL       	R24, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x90, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0x25479624:  93210094  stw r25,148(r1)
+	  91: GETL       	R25, t70
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x94, t72
+	  94: STL       	t70, (t72)
+	  95: INCEIPL       	$4
+
+	0x25479628:  93410098  stw r26,152(r1)
+	  96: GETL       	R26, t74
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x98, t76
+	  99: STL       	t74, (t76)
+	 100: INCEIPL       	$4
+
+	0x2547962C:  93A100A4  stw r29,164(r1)
+	 101: GETL       	R29, t78
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0xA4, t80
+	 104: STL       	t78, (t80)
+	 105: INCEIPL       	$4
+
+	0x25479630:  9181006C  stw r12,108(r1)
+	 106: GETL       	R12, t82
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x6C, t84
+	 109: STL       	t82, (t84)
+	 110: INCEIPL       	$4
+
+	0x25479634:  909F0010  stw r4,16(r31)
+	 111: GETL       	R4, t86
+	 112: GETL       	R31, t88
+	 113: ADDL       	$0x10, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x25479638:  907F000C  stw r3,12(r31)
+	 116: GETL       	R3, t90
+	 117: GETL       	R31, t92
+	 118: ADDL       	$0xC, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x2547963C:  40820118  bc 4,2,0x25479754
+	 121: Jc02o       	$0x25479754
+
+
+. 0 254795D4 108
+. 7C 3F 0B 78 90 E1 00 B4 80 03 01 80 92 21 00 74 3A 20 00 00 74 08 20 00 93 61 00 9C 93 81 00 A0 7C DB 33 78 93 C1 00 A8 7C BC 2B 78 92 01 00 70 7F C8 02 A6 92 41 00 78 92 61 00 7C 92 81 00 80 92 A1 00 84 92 C1 00 88 92 E1 00 8C 93 01 00 90 93 21 00 94 93 41 00 98 93 A1 00 A4 91 81 00 6C 90 9F 00 10 90 7F 00 0C 40 82 01 18
+
+==== BB 1027 (0x25479640) approx BBs exec'd 0 ====
+
+	0x25479640:  2D860000  cmpi cr3,r6,0
+	   0: GETL       	R6, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x3, CR
+	   3: INCEIPL       	$4
+
+	0x25479644:  418E016C  bc 12,14,0x254797B0
+	   4: Js14o       	$0x254797B0
+
+
+. 0 25479640 8
+. 2D 86 00 00 41 8E 01 6C
+
+==== BB 1028 (0x254797B0) approx BBs exec'd 0 ====
+
+	0x254797B0:  80A30080  lwz r5,128(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x80, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254797B4:  3085FFFF  addic r4,r5,-1
+	   5: GETL       	R5, t4
+	   6: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x254797B8:  7C842110  subfe r4,r4,r4
+	   9: GETL       	R4, t6
+	  10: GETL       	R4, t8
+	  11: SBBL       	t6, t8  (-rCa-wCa)
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x254797BC:  7F9C2038  and r28,r28,r4
+	  14: GETL       	R28, t10
+	  15: GETL       	R4, t12
+	  16: ANDL       	t10, t12
+	  17: PUTL       	t12, R28
+	  18: INCEIPL       	$4
+
+	0x254797C0:  4BFFFE88  b 0x25479648
+	  19: JMPo       	$0x25479648  ($4)
+
+
+. 0 254797B0 20
+. 80 A3 00 80 30 85 FF FF 7C 84 21 10 7F 9C 20 38 4B FF FE 88
+
+==== BB 1029 (0x25479648) approx BBs exec'd 0 ====
+
+	0x25479648:  835E04F4  lwz r26,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547964C:  80DA0000  lwz r6,0(r26)
+	   5: GETL       	R26, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x25479650:  70C90020  andi. r9,r6,0x20
+	   9: GETL       	R6, t8
+	  10: ANDL       	$0x20, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x25479654:  408207B4  bc 4,2,0x25479E08
+	  15: Jc02o       	$0x25479E08
+
+
+. 0 25479648 16
+. 83 5E 04 F4 80 DA 00 00 70 C9 00 20 40 82 07 B4
+
+==== BB 1030 (0x25479658) approx BBs exec'd 0 ====
+
+	0x25479658:  807F000C  lwz r3,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547965C:  81430078  lwz r10,120(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x78, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25479660:  2E0A0000  cmpi cr4,r10,0
+	  10: GETL       	R10, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x4, CR
+	  13: INCEIPL       	$4
+
+	0x25479664:  409206C0  bc 4,18,0x25479D24
+	  14: Jc18o       	$0x25479D24
+
+
+. 0 25479658 16
+. 80 7F 00 0C 81 43 00 78 2E 0A 00 00 40 92 06 C0
+
+==== BB 1031 (0x25479668) approx BBs exec'd 0 ====
+
+	0x25479668:  80C30034  lwz r6,52(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547966C:  7F84E378  or r4,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0x25479670:  7F65DB78  or r5,r27,r27
+	   8: GETL       	R27, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x25479674:  3B800001  li r28,1
+	  11: MOVL       	$0x1, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x25479678:  83A60004  lwz r29,4(r6)
+	  14: GETL       	R6, t10
+	  15: ADDL       	$0x4, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R29
+	  18: INCEIPL       	$4
+
+	0x2547967C:  3B600000  li r27,0
+	  19: MOVL       	$0x0, t14
+	  20: PUTL       	t14, R27
+	  21: INCEIPL       	$4
+
+	0x25479680:  93BF0008  stw r29,8(r31)
+	  22: GETL       	R29, t16
+	  23: GETL       	R31, t18
+	  24: ADDL       	$0x8, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25479684:  480071AD  bl 0x25480830
+	  27: MOVL       	$0x25479688, t20
+	  28: PUTL       	t20, LR
+	  29: JMPo-c       	$0x25480830  ($4)
+
+
+. 0 25479668 32
+. 80 C3 00 34 7F 84 E3 78 7F 65 DB 78 3B 80 00 01 83 A6 00 04 3B 60 00 00 93 BF 00 08 48 00 71 AD
+
+==== BB 1032 (0x25479688) approx BBs exec'd 0 ====
+
+	0x25479688:  83BF000C  lwz r29,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547968C:  38C00000  li r6,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x25479690:  939F0034  stw r28,52(r31)
+	   8: GETL       	R28, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x34, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25479694:  817D003C  lwz r11,60(r29)
+	  13: GETL       	R29, t10
+	  14: ADDL       	$0x3C, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R11
+	  17: INCEIPL       	$4
+
+	0x25479698:  937F0030  stw r27,48(r31)
+	  18: GETL       	R27, t14
+	  19: GETL       	R31, t16
+	  20: ADDL       	$0x30, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547969C:  2C8B0000  cmpi cr1,r11,0
+	  23: GETL       	R11, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x1, CR
+	  26: INCEIPL       	$4
+
+	0x254796A0:  937F0040  stw r27,64(r31)
+	  27: GETL       	R27, t22
+	  28: GETL       	R31, t24
+	  29: ADDL       	$0x40, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0x254796A4:  937F0028  stw r27,40(r31)
+	  32: GETL       	R27, t26
+	  33: GETL       	R31, t28
+	  34: ADDL       	$0x28, t28
+	  35: STL       	t26, (t28)
+	  36: INCEIPL       	$4
+
+	0x254796A8:  937F003C  stw r27,60(r31)
+	  37: GETL       	R27, t30
+	  38: GETL       	R31, t32
+	  39: ADDL       	$0x3C, t32
+	  40: STL       	t30, (t32)
+	  41: INCEIPL       	$4
+
+	0x254796AC:  90DF0024  stw r6,36(r31)
+	  42: GETL       	R6, t34
+	  43: GETL       	R31, t36
+	  44: ADDL       	$0x24, t36
+	  45: STL       	t34, (t36)
+	  46: INCEIPL       	$4
+
+	0x254796B0:  41860018  bc 12,6,0x254796C8
+	  47: Js06o       	$0x254796C8
+
+
+. 0 25479688 44
+. 83 BF 00 0C 38 C0 00 00 93 9F 00 34 81 7D 00 3C 93 7F 00 30 2C 8B 00 00 93 7F 00 40 93 7F 00 28 93 7F 00 3C 90 DF 00 24 41 86 00 18
+
+==== BB 1033 (0x254796B4) approx BBs exec'd 0 ====
+
+	0x254796B4:  810B0004  lwz r8,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x254796B8:  809D0040  lwz r4,64(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x40, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x254796BC:  911F0020  stw r8,32(r31)
+	  10: GETL       	R8, t8
+	  11: GETL       	R31, t10
+	  12: ADDL       	$0x20, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0x254796C0:  80C40004  lwz r6,4(r4)
+	  15: GETL       	R4, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R6
+	  19: INCEIPL       	$4
+
+	0x254796C4:  90DF0024  stw r6,36(r31)
+	  20: GETL       	R6, t16
+	  21: GETL       	R31, t18
+	  22: ADDL       	$0x24, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x254796C8:  2E030000  cmpi cr4,r3,0
+	  25: GETL       	R3, t20
+	  26: CMP0L       	t20, t22  (-rSo)
+	  27: ICRFL       	t22, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0x254796CC:  41920010  bc 12,18,0x254796DC
+	  29: Js18o       	$0x254796DC
+
+
+. 0 254796B4 28
+. 81 0B 00 04 80 9D 00 40 91 1F 00 20 80 C4 00 04 90 DF 00 24 2E 03 00 00 41 92 00 10
+
+==== BB 1034 (0x254796DC) approx BBs exec'd 0 ====
+
+	0x254796DC:  821E04C8  lwz r16,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R16
+	   4: INCEIPL       	$4
+
+	0x254796E0:  3ABF0008  addi r21,r31,8
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R21
+	   8: INCEIPL       	$4
+
+	0x254796E4:  3A400002  li r18,2
+	   9: MOVL       	$0x2, t6
+	  10: PUTL       	t6, R18
+	  11: INCEIPL       	$4
+
+	0x254796E8:  80B50020  lwz r5,32(r21)
+	  12: GETL       	R21, t8
+	  13: ADDL       	$0x20, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0x254796EC:  7FB6EB78  or r22,r29,r29
+	  17: GETL       	R29, t12
+	  18: PUTL       	t12, R22
+	  19: INCEIPL       	$4
+
+	0x254796F0:  81750018  lwz r11,24(r21)
+	  20: GETL       	R21, t14
+	  21: ADDL       	$0x18, t14
+	  22: LDL       	(t14), t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0x254796F4:  2C050000  cmpi cr0,r5,0
+	  25: GETL       	R5, t18
+	  26: CMP0L       	t18, t20  (-rSo)
+	  27: ICRFL       	t20, $0x0, CR
+	  28: INCEIPL       	$4
+
+	0x254796F8:  8115001C  lwz r8,28(r21)
+	  29: GETL       	R21, t22
+	  30: ADDL       	$0x1C, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R8
+	  33: INCEIPL       	$4
+
+	0x254796FC:  7D695B78  or r9,r11,r11
+	  34: GETL       	R11, t26
+	  35: PUTL       	t26, R9
+	  36: INCEIPL       	$4
+
+	0x25479700:  833D0000  lwz r25,0(r29)
+	  37: GETL       	R29, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R25
+	  40: INCEIPL       	$4
+
+	0x25479704:  7F0B4214  add r24,r11,r8
+	  41: GETL       	R11, t32
+	  42: GETL       	R8, t34
+	  43: ADDL       	t32, t34
+	  44: PUTL       	t34, R24
+	  45: INCEIPL       	$4
+
+	0x25479708:  418200BC  bc 12,2,0x254797C4
+	  46: Js02o       	$0x254797C4
+
+
+. 0 254796DC 48
+. 82 1E 04 C8 3A BF 00 08 3A 40 00 02 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+
+==== BB 1035 (0x254797C4) approx BBs exec'd 0 ====
+
+	0x254797C4:  815D00C0  lwz r10,192(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254797C8:  38E00000  li r7,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x254797CC:  807D0038  lwz r3,56(r29)
+	   8: GETL       	R29, t6
+	   9: ADDL       	$0x38, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0x254797D0:  2F8A0000  cmpi cr7,r10,0
+	  13: GETL       	R10, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x254797D4:  82630004  lwz r19,4(r3)
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x4, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R19
+	  21: INCEIPL       	$4
+
+	0x254797D8:  419E0008  bc 12,30,0x254797E0
+	  22: Js30o       	$0x254797E0
+
+
+. 0 254797C4 24
+. 81 5D 00 C0 38 E0 00 00 80 7D 00 38 2F 8A 00 00 82 63 00 04 41 9E 00 08
+
+==== BB 1036 (0x254797DC) approx BBs exec'd 0 ====
+
+	0x254797DC:  80EA0004  lwz r7,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254797E0:  3C00AAAA  lis r0,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x254797E4:  600CAAAB  ori r12,r0,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x254797E8:  7EE86016  mulhwu r23,r8,r12
+	  11: GETL       	R8, t8
+	  12: GETL       	R12, t10
+	  13: UMULHL       	t8, t10
+	  14: PUTL       	t10, R23
+	  15: INCEIPL       	$4
+
+	0x254797EC:  56E9E8FE  rlwinm r9,r23,29,3,31
+	  16: GETL       	R23, t12
+	  17: SHRL       	$0x3, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x254797F0:  7C893840  cmpl cr1,r9,r7
+	  20: GETL       	R9, t14
+	  21: GETL       	R7, t16
+	  22: CMPUL       	t14, t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x1, CR
+	  24: INCEIPL       	$4
+
+	0x254797F4:  40850008  bc 4,5,0x254797FC
+	  25: Jc05o       	$0x254797FC
+
+
+. 0 254797DC 28
+. 80 EA 00 04 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+
+==== BB 1037 (0x254797F8) approx BBs exec'd 0 ====
+
+	0x254797F8:  7CE93B78  or r9,r7,r7
+	   0: GETL       	R7, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x254797FC:  3B7001B8  addi r27,r16,440
+	   3: GETL       	R16, t2
+	   4: ADDL       	$0x1B8, t2
+	   5: PUTL       	t2, R27
+	   6: INCEIPL       	$4
+
+	0x25479800:  1CE9000C  mulli r7,r9,12
+	   7: GETL       	R9, t4
+	   8: MULL       	$0xC, t4
+	   9: PUTL       	t4, R7
+	  10: INCEIPL       	$4
+
+	0x25479804:  7E1DD800  cmp cr4,r29,r27
+	  11: GETL       	R29, t6
+	  12: GETL       	R27, t8
+	  13: CMPL       	t6, t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x4, CR
+	  15: INCEIPL       	$4
+
+	0x25479808:  7F8B3A14  add r28,r11,r7
+	  16: GETL       	R11, t12
+	  17: GETL       	R7, t14
+	  18: ADDL       	t12, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0x2547980C:  41920018  bc 12,18,0x25479824
+	  21: Js18o       	$0x25479824
+
+
+. 0 254797F8 24
+. 7C E9 3B 78 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+
+==== BB 1038 (0x25479810) approx BBs exec'd 0 ====
+
+	0x25479810:  2F190000  cmpi cr6,r25,0
+	   0: GETL       	R25, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479814:  409A01CC  bc 4,26,0x254799E0
+	   4: Jc26o       	$0x254799E0
+
+
+. 0 25479810 8
+. 2F 19 00 00 40 9A 01 CC
+
+==== BB 1039 (0x25479818) approx BBs exec'd 0 ====
+
+	0x25479818:  80DD011C  lwz r6,284(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x11C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547981C:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25479820:  418201C0  bc 12,2,0x254799E0
+	   9: Js02o       	$0x254799E0
+
+
+. 0 25479818 12
+. 80 DD 01 1C 2C 06 00 00 41 82 01 C0
+
+==== BB 1040 (0x25479824) approx BBs exec'd 0 ====
+
+	0x25479824:  813D00E4  lwz r9,228(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xE4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25479828:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547982C:  419E0280  bc 12,30,0x25479AAC
+	   9: Js30o       	$0x25479AAC
+
+
+. 0 25479824 12
+. 81 3D 00 E4 2F 89 00 00 41 9E 02 80
+
+==== BB 1041 (0x25479830) approx BBs exec'd 0 ====
+
+	0x25479830:  7C9CC040  cmpl cr1,r28,r24
+	   0: GETL       	R28, t0
+	   1: GETL       	R24, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25479834:  82890004  lwz r20,4(r9)
+	   5: GETL       	R9, t6
+	   6: ADDL       	$0x4, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R20
+	   9: INCEIPL       	$4
+
+	0x25479838:  40A4FEE8  bc 5,4,0x25479720
+	  10: Jc04o       	$0x25479720
+
+
+. 0 25479830 12
+. 7C 9C C0 40 82 89 00 04 40 A4 FE E8
+
+==== BB 1042 (0x2547983C) approx BBs exec'd 0 ====
+
+	0x2547983C:  819C0004  lwz r12,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x25479840:  81560188  lwz r10,392(r22)
+	   5: GETL       	R22, t4
+	   6: ADDL       	$0x188, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x25479844:  5597C23E  rlwinm r23,r12,24,8,31
+	  10: GETL       	R12, t8
+	  11: SHRL       	$0x8, t8
+	  12: PUTL       	t8, R23
+	  13: INCEIPL       	$4
+
+	0x25479848:  559D063E  rlwinm r29,r12,0,24,31
+	  14: GETL       	R12, t10
+	  15: ANDL       	$0xFF, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0x2547984C:  56EB083C  rlwinm r11,r23,1,0,30
+	  18: GETL       	R23, t12
+	  19: SHLL       	$0x1, t12
+	  20: PUTL       	t12, R11
+	  21: INCEIPL       	$4
+
+	0x25479850:  2E1D0016  cmpi cr4,r29,22
+	  22: GETL       	R29, t14
+	  23: MOVL       	$0x16, t18
+	  24: CMPL       	t14, t18, t16  (-rSo)
+	  25: ICRFL       	t16, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0x25479854:  7CABA22E  lhzx r5,r11,r20
+	  27: GETL       	R20, t20
+	  28: GETL       	R11, t22
+	  29: ADDL       	t22, t20
+	  30: LDW       	(t20), t24
+	  31: PUTL       	t24, R5
+	  32: INCEIPL       	$4
+
+	0x25479858:  56E32036  rlwinm r3,r23,4,0,27
+	  33: GETL       	R23, t26
+	  34: SHLL       	$0x4, t26
+	  35: PUTL       	t26, R3
+	  36: INCEIPL       	$4
+
+	0x2547985C:  817C0000  lwz r11,0(r28)
+	  37: GETL       	R28, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R11
+	  40: INCEIPL       	$4
+
+	0x25479860:  7F639A14  add r27,r3,r19
+	  41: GETL       	R3, t32
+	  42: GETL       	R19, t34
+	  43: ADDL       	t32, t34
+	  44: PUTL       	t34, R27
+	  45: INCEIPL       	$4
+
+	0x25479864:  54BA2376  rlwinm r26,r5,4,13,27
+	  46: GETL       	R5, t36
+	  47: ROLL       	$0x4, t36
+	  48: ANDL       	$0x7FFF0, t36
+	  49: PUTL       	t36, R26
+	  50: INCEIPL       	$4
+
+	0x25479868:  937F0050  stw r27,80(r31)
+	  51: GETL       	R27, t38
+	  52: GETL       	R31, t40
+	  53: ADDL       	$0x50, t40
+	  54: STL       	t38, (t40)
+	  55: INCEIPL       	$4
+
+	0x2547986C:  7D4AD214  add r10,r10,r26
+	  56: GETL       	R10, t42
+	  57: GETL       	R26, t44
+	  58: ADDL       	t42, t44
+	  59: PUTL       	t44, R10
+	  60: INCEIPL       	$4
+
+	0x25479870:  7EEBCA14  add r23,r11,r25
+	  61: GETL       	R11, t46
+	  62: GETL       	R25, t48
+	  63: ADDL       	t46, t48
+	  64: PUTL       	t48, R23
+	  65: INCEIPL       	$4
+
+	0x25479874:  7F66DB78  or r6,r27,r27
+	  66: GETL       	R27, t50
+	  67: PUTL       	t50, R6
+	  68: INCEIPL       	$4
+
+	0x25479878:  3B400000  li r26,0
+	  69: MOVL       	$0x0, t52
+	  70: PUTL       	t52, R26
+	  71: INCEIPL       	$4
+
+	0x2547987C:  419201DC  bc 12,18,0x25479A58
+	  72: Js18o       	$0x25479A58
+
+
+. 0 2547983C 68
+. 81 9C 00 04 81 56 01 88 55 97 C2 3E 55 9D 06 3E 56 EB 08 3C 2E 1D 00 16 7C AB A2 2E 56 E3 20 36 81 7C 00 00 7F 63 9A 14 54 BA 23 76 93 7F 00 50 7D 4A D2 14 7E EB CA 14 7F 66 DB 78 3B 40 00 00 41 92 01 DC
+
+==== BB 1043 (0x25479880) approx BBs exec'd 0 ====
+
+	0x25479880:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479884:  419A0148  bc 12,26,0x254799CC
+	   4: Js26o       	$0x254799CC
+
+
+. 0 25479880 8
+. 2F 1D 00 00 41 9A 01 48
+
+==== BB 1044 (0x25479888) approx BBs exec'd 0 ====
+
+	0x25479888:  889B000C  lbz r4,12(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547988C:  5488E13F  rlwinm. r8,r4,28,4,31
+	   5: GETL       	R4, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R8
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x25479890:  4182062C  bc 12,2,0x25479EBC
+	  11: Js02o       	$0x25479EBC
+
+
+. 0 25479888 12
+. 88 9B 00 0C 54 88 E1 3F 41 82 06 2C
+
+==== BB 1045 (0x25479894) approx BBs exec'd 0 ====
+
+	0x25479894:  8B5B000C  lbz r26,12(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479898:  5749E13F  rlwinm. r9,r26,28,4,31
+	   5: GETL       	R26, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2547989C:  418201B4  bc 12,2,0x25479A50
+	  11: Js02o       	$0x25479A50
+
+
+. 0 25479894 12
+. 8B 5B 00 0C 57 49 E1 3F 41 82 01 B4
+
+==== BB 1046 (0x254798A0) approx BBs exec'd 0 ====
+
+	0x254798A0:  809F000C  lwz r4,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254798A4:  2E1D0015  cmpi cr4,r29,21
+	   5: GETL       	R29, t4
+	   6: MOVL       	$0x15, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x4, CR
+	   9: INCEIPL       	$4
+
+	0x254798A8:  2C9D000A  cmpi cr1,r29,10
+	  10: GETL       	R29, t10
+	  11: MOVL       	$0xA, t14
+	  12: CMPL       	t10, t14, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x254798AC:  2F1D0013  cmpi cr6,r29,19
+	  15: GETL       	R29, t16
+	  16: MOVL       	$0x13, t20
+	  17: CMPL       	t16, t20, t18  (-rSo)
+	  18: ICRFL       	t18, $0x6, CR
+	  19: INCEIPL       	$4
+
+	0x254798B0:  80C40208  lwz r6,520(r4)
+	  20: GETL       	R4, t22
+	  21: ADDL       	$0x208, t22
+	  22: LDL       	(t22), t24
+	  23: PUTL       	t24, R6
+	  24: INCEIPL       	$4
+
+	0x254798B4:  7F86D800  cmp cr7,r6,r27
+	  25: GETL       	R6, t26
+	  26: GETL       	R27, t28
+	  27: CMPL       	t26, t28, t30  (-rSo)
+	  28: ICRFL       	t30, $0x7, CR
+	  29: INCEIPL       	$4
+
+	0x254798B8:  419E062C  bc 12,30,0x25479EE4
+	  30: Js30o       	$0x25479EE4
+
+
+. 0 254798A0 28
+. 80 9F 00 0C 2E 1D 00 15 2C 9D 00 0A 2F 1D 00 13 80 C4 02 08 7F 86 D8 00 41 9E 06 2C
+
+==== BB 1047 (0x254798BC) approx BBs exec'd 0 ====
+
+	0x254798BC:  7CC00026  mfcr r6
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R6
+	   2: INCEIPL       	$4
+
+	0x254798C0:  54C69FFE  rlwinm r6,r6,19,31,31
+	   3: GETL       	R6, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R6
+	   7: INCEIPL       	$4
+
+	0x254798C4:  39000000  li r8,0
+	   8: MOVL       	$0x0, t4
+	   9: PUTL       	t4, R8
+	  10: INCEIPL       	$4
+
+	0x254798C8:  7F400026  mfcr r26
+	  11: GETL       	CR, t6
+	  12: PUTL       	t6, R26
+	  13: INCEIPL       	$4
+
+	0x254798CC:  575A3FFE  rlwinm r26,r26,7,31,31
+	  14: GETL       	R26, t8
+	  15: ROLL       	$0x7, t8
+	  16: ANDL       	$0x1, t8
+	  17: PUTL       	t8, R26
+	  18: INCEIPL       	$4
+
+	0x254798D0:  7CCBD379  or. r11,r6,r26
+	  19: GETL       	R6, t10
+	  20: GETL       	R26, t12
+	  21: ORL       	t12, t10
+	  22: PUTL       	t10, R11
+	  23: CMP0L       	t10, t14  (-rSo)
+	  24: ICRFL       	t14, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x254798D4:  40820130  bc 4,2,0x25479A04
+	  26: Jc02o       	$0x25479A04
+
+
+. 0 254798BC 28
+. 7C C0 00 26 54 C6 9F FE 39 00 00 00 7F 40 00 26 57 5A 3F FE 7C CB D3 79 40 82 01 30
+
+==== BB 1048 (0x254798D8) approx BBs exec'd 0 ====
+
+	0x254798D8:  387DFFBC  addi r3,r29,-68
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xFFFFFFBC, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x254798DC:  6BA50002  xori r5,r29,0x2
+	   4: GETL       	R29, t2
+	   5: XORL       	$0x2, t2
+	   6: PUTL       	t2, R5
+	   7: INCEIPL       	$4
+
+	0x254798E0:  21650000  subfic r11,r5,0
+	   8: GETL       	R5, t4
+	   9: MOVL       	$0x0, t6
+	  10: SBBL       	t4, t6  (-wCa)
+	  11: PUTL       	t6, R11
+	  12: INCEIPL       	$4
+
+	0x254798E4:  7CAB2914  adde r5,r11,r5
+	  13: GETL       	R11, t8
+	  14: GETL       	R5, t10
+	  15: ADCL       	t8, t10  (-rCa-wCa)
+	  16: PUTL       	t10, R5
+	  17: INCEIPL       	$4
+
+	0x254798E8:  2123000A  subfic r9,r3,10
+	  18: GETL       	R3, t12
+	  19: MOVL       	$0xA, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x254798EC:  39200000  li r9,0
+	  23: MOVL       	$0x0, t16
+	  24: PUTL       	t16, R9
+	  25: INCEIPL       	$4
+
+	0x254798F0:  7D294914  adde r9,r9,r9
+	  26: GETL       	R9, t18
+	  27: GETL       	R9, t20
+	  28: ADCL       	t18, t20  (-rCa-wCa)
+	  29: PUTL       	t20, R9
+	  30: INCEIPL       	$4
+
+	0x254798F4:  7D2B2B79  or. r11,r9,r5
+	  31: GETL       	R9, t22
+	  32: GETL       	R5, t24
+	  33: ORL       	t24, t22
+	  34: PUTL       	t22, R11
+	  35: CMP0L       	t22, t26  (-rSo)
+	  36: ICRFL       	t26, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0x254798F8:  4082010C  bc 4,2,0x25479A04
+	  38: Jc02o       	$0x25479A04
+
+
+. 0 254798D8 36
+. 38 7D FF BC 6B A5 00 02 21 65 00 00 7C AB 29 14 21 23 00 0A 39 20 00 00 7D 29 49 14 7D 2B 2B 79 40 82 01 0C
+
+==== BB 1049 (0x254798FC) approx BBs exec'd 0 ====
+
+	0x254798FC:  7C000026  mfcr r0
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x25479900:  5400DFFE  rlwinm r0,r0,27,31,31
+	   3: GETL       	R0, t2
+	   4: ROLL       	$0x1B, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R0
+	   7: INCEIPL       	$4
+
+	0x25479904:  2C8A0000  cmpi cr1,r10,0
+	   8: GETL       	R10, t4
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x25479908:  93640208  stw r27,520(r4)
+	  12: GETL       	R27, t8
+	  13: GETL       	R4, t10
+	  14: ADDL       	$0x208, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x2547990C:  38E00000  li r7,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R7
+	  19: INCEIPL       	$4
+
+	0x25479910:  540C083C  rlwinm r12,r0,1,0,30
+	  20: GETL       	R0, t14
+	  21: SHLL       	$0x1, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0x25479914:  39600001  li r11,1
+	  24: MOVL       	$0x1, t16
+	  25: PUTL       	t16, R11
+	  26: INCEIPL       	$4
+
+	0x25479918:  7D884378  or r8,r12,r8
+	  27: GETL       	R12, t18
+	  28: GETL       	R8, t20
+	  29: ORL       	t20, t18
+	  30: PUTL       	t18, R8
+	  31: INCEIPL       	$4
+
+	0x2547991C:  9104020C  stw r8,524(r4)
+	  32: GETL       	R8, t22
+	  33: GETL       	R4, t24
+	  34: ADDL       	$0x20C, t24
+	  35: STL       	t22, (t24)
+	  36: INCEIPL       	$4
+
+	0x25479920:  41860018  bc 12,6,0x25479938
+	  37: Js06o       	$0x25479938
+
+
+. 0 254798FC 40
+. 7C 00 00 26 54 00 DF FE 2C 8A 00 00 93 64 02 08 38 E0 00 00 54 0C 08 3C 39 60 00 01 7D 88 43 78 91 04 02 0C 41 86 00 18
+
+==== BB 1050 (0x25479924) approx BBs exec'd 0 ====
+
+	0x25479924:  834A0004  lwz r26,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479928:  2E1A0000  cmpi cr4,r26,0
+	   5: GETL       	R26, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547992C:  4192000C  bc 12,18,0x25479938
+	   9: Js18o       	$0x25479938
+
+
+. 0 25479924 12
+. 83 4A 00 04 2E 1A 00 00 41 92 00 0C
+
+==== BB 1051 (0x25479930) approx BBs exec'd 0 ====
+
+	0x25479930:  7D475378  or r7,r10,r10
+	   0: GETL       	R10, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x25479934:  39600000  li r11,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x25479938:  80DF0050  lwz r6,80(r31)
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0x50, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R6
+	  10: INCEIPL       	$4
+
+	0x2547993C:  7D695B78  or r9,r11,r11
+	  11: GETL       	R11, t8
+	  12: PUTL       	t8, R9
+	  13: INCEIPL       	$4
+
+	0x25479940:  815F0008  lwz r10,8(r31)
+	  14: GETL       	R31, t10
+	  15: ADDL       	$0x8, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x25479944:  80A60000  lwz r5,0(r6)
+	  19: GETL       	R6, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R5
+	  22: INCEIPL       	$4
+
+	0x25479948:  80DF0010  lwz r6,16(r31)
+	  23: GETL       	R31, t18
+	  24: ADDL       	$0x10, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R6
+	  27: INCEIPL       	$4
+
+	0x2547994C:  7C655214  add r3,r5,r10
+	  28: GETL       	R5, t22
+	  29: GETL       	R10, t24
+	  30: ADDL       	t22, t24
+	  31: PUTL       	t24, R3
+	  32: INCEIPL       	$4
+
+	0x25479950:  38BF0050  addi r5,r31,80
+	  33: GETL       	R31, t26
+	  34: ADDL       	$0x50, t26
+	  35: PUTL       	t26, R5
+	  36: INCEIPL       	$4
+
+	0x25479954:  39400000  li r10,0
+	  37: MOVL       	$0x0, t28
+	  38: PUTL       	t28, R10
+	  39: INCEIPL       	$4
+
+	0x25479958:  4BFFEC4D  bl 0x254785A4
+	  40: MOVL       	$0x2547995C, t30
+	  41: PUTL       	t30, LR
+	  42: JMPo-c       	$0x254785A4  ($4)
+
+
+. 0 25479930 44
+. 7D 47 53 78 39 60 00 00 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+
+==== BB 1052 _dl_lookup_symbol_x(0x254785A4) approx BBs exec'd 0 ====
+
+	0x254785A4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254785A8:  7D800026  mfcr r12
+	   3: GETL       	CR, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0x254785AC:  9421FF60  stwu r1,-160(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFF60, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x254785B0:  4801EA51  bl 0x25497000
+	  12: MOVL       	$0x254785B4, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254785A4 16
+. 7C 08 02 A6 7D 80 00 26 94 21 FF 60 48 01 EA 51
+
+==== BB 1053 (0x254785B4) approx BBs exec'd 0 ====
+
+	0x254785B4:  9261006C  stw r19,108(r1)
+	   0: GETL       	R19, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x6C, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254785B8:  7C932378  or r19,r4,r4
+	   5: GETL       	R4, t4
+	   6: PUTL       	t4, R19
+	   7: INCEIPL       	$4
+
+	0x254785BC:  92810070  stw r20,112(r1)
+	   8: GETL       	R20, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x70, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x254785C0:  7D144378  or r20,r8,r8
+	  13: GETL       	R8, t10
+	  14: PUTL       	t10, R20
+	  15: INCEIPL       	$4
+
+	0x254785C4:  92A10074  stw r21,116(r1)
+	  16: GETL       	R21, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x74, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x254785C8:  7CD53378  or r21,r6,r6
+	  21: GETL       	R6, t16
+	  22: PUTL       	t16, R21
+	  23: INCEIPL       	$4
+
+	0x254785CC:  92C10078  stw r22,120(r1)
+	  24: GETL       	R22, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x78, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x254785D0:  7CB62B78  or r22,r5,r5
+	  29: GETL       	R5, t22
+	  30: PUTL       	t22, R22
+	  31: INCEIPL       	$4
+
+	0x254785D4:  92E1007C  stw r23,124(r1)
+	  32: GETL       	R23, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x7C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x254785D8:  7D374B78  or r23,r9,r9
+	  37: GETL       	R9, t28
+	  38: PUTL       	t28, R23
+	  39: INCEIPL       	$4
+
+	0x254785DC:  93010080  stw r24,128(r1)
+	  40: GETL       	R24, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x80, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x254785E0:  7CF83B78  or r24,r7,r7
+	  45: GETL       	R7, t34
+	  46: PUTL       	t34, R24
+	  47: INCEIPL       	$4
+
+	0x254785E4:  93210084  stw r25,132(r1)
+	  48: GETL       	R25, t36
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x84, t38
+	  51: STL       	t36, (t38)
+	  52: INCEIPL       	$4
+
+	0x254785E8:  7C791B78  or r25,r3,r3
+	  53: GETL       	R3, t40
+	  54: PUTL       	t40, R25
+	  55: INCEIPL       	$4
+
+	0x254785EC:  93410088  stw r26,136(r1)
+	  56: GETL       	R26, t42
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x88, t44
+	  59: STL       	t42, (t44)
+	  60: INCEIPL       	$4
+
+	0x254785F0:  7D5A5378  or r26,r10,r10
+	  61: GETL       	R10, t46
+	  62: PUTL       	t46, R26
+	  63: INCEIPL       	$4
+
+	0x254785F4:  93810090  stw r28,144(r1)
+	  64: GETL       	R28, t48
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x90, t50
+	  67: STL       	t48, (t50)
+	  68: INCEIPL       	$4
+
+	0x254785F8:  3B800000  li r28,0
+	  69: MOVL       	$0x0, t52
+	  70: PUTL       	t52, R28
+	  71: INCEIPL       	$4
+
+	0x254785FC:  93C10098  stw r30,152(r1)
+	  72: GETL       	R30, t54
+	  73: GETL       	R1, t56
+	  74: ADDL       	$0x98, t56
+	  75: STL       	t54, (t56)
+	  76: INCEIPL       	$4
+
+	0x25478600:  7FC802A6  mflr r30
+	  77: GETL       	LR, t58
+	  78: PUTL       	t58, R30
+	  79: INCEIPL       	$4
+
+	0x25478604:  93E1009C  stw r31,156(r1)
+	  80: GETL       	R31, t60
+	  81: GETL       	R1, t62
+	  82: ADDL       	$0x9C, t62
+	  83: STL       	t60, (t62)
+	  84: INCEIPL       	$4
+
+	0x25478608:  7C3F0B78  or r31,r1,r1
+	  85: GETL       	R1, t64
+	  86: PUTL       	t64, R31
+	  87: INCEIPL       	$4
+
+	0x2547860C:  92210064  stw r17,100(r1)
+	  88: GETL       	R17, t66
+	  89: GETL       	R1, t68
+	  90: ADDL       	$0x64, t68
+	  91: STL       	t66, (t68)
+	  92: INCEIPL       	$4
+
+	0x25478610:  92410068  stw r18,104(r1)
+	  93: GETL       	R18, t70
+	  94: GETL       	R1, t72
+	  95: ADDL       	$0x68, t72
+	  96: STL       	t70, (t72)
+	  97: INCEIPL       	$4
+
+	0x25478614:  9361008C  stw r27,140(r1)
+	  98: GETL       	R27, t74
+	  99: GETL       	R1, t76
+	 100: ADDL       	$0x8C, t76
+	 101: STL       	t74, (t76)
+	 102: INCEIPL       	$4
+
+	0x25478618:  93A10094  stw r29,148(r1)
+	 103: GETL       	R29, t78
+	 104: GETL       	R1, t80
+	 105: ADDL       	$0x94, t80
+	 106: STL       	t78, (t80)
+	 107: INCEIPL       	$4
+
+	0x2547861C:  900100A4  stw r0,164(r1)
+	 108: GETL       	R0, t82
+	 109: GETL       	R1, t84
+	 110: ADDL       	$0xA4, t84
+	 111: STL       	t82, (t84)
+	 112: INCEIPL       	$4
+
+	0x25478620:  91810060  stw r12,96(r1)
+	 113: GETL       	R12, t86
+	 114: GETL       	R1, t88
+	 115: ADDL       	$0x60, t88
+	 116: STL       	t86, (t88)
+	 117: INCEIPL       	$4
+
+	0x25478624:  88030000  lbz r0,0(r3)
+	 118: GETL       	R3, t90
+	 119: LDB       	(t90), t92
+	 120: PUTL       	t92, R0
+	 121: INCEIPL       	$4
+
+	0x25478628:  2F800000  cmpi cr7,r0,0
+	 122: GETL       	R0, t94
+	 123: CMP0L       	t94, t96  (-rSo)
+	 124: ICRFL       	t96, $0x7, CR
+	 125: INCEIPL       	$4
+
+	0x2547862C:  419E0018  bc 12,30,0x25478644
+	 126: Js30o       	$0x25478644
+
+
+. 0 254785B4 124
+. 92 61 00 6C 7C 93 23 78 92 81 00 70 7D 14 43 78 92 A1 00 74 7C D5 33 78 92 C1 00 78 7C B6 2B 78 92 E1 00 7C 7D 37 4B 78 93 01 00 80 7C F8 3B 78 93 21 00 84 7C 79 1B 78 93 41 00 88 7D 5A 53 78 93 81 00 90 3B 80 00 00 93 C1 00 98 7F C8 02 A6 93 E1 00 9C 7C 3F 0B 78 92 21 00 64 92 41 00 68 93 61 00 8C 93 A1 00 94 90 01 00 A4 91 81 00 60 88 03 00 00 2F 80 00 00 41 9E 00 18
+
+==== BB 1054 (0x25478630) approx BBs exec'd 0 ====
+
+	0x25478630:  89630001  lbz r11,1(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25478634:  7C1C0378  or r28,r0,r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x25478638:  39230001  addi r9,r3,1
+	   8: GETL       	R3, t6
+	   9: ADDL       	$0x1, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x2547863C:  2C0B0000  cmpi cr0,r11,0
+	  12: GETL       	R11, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x0, CR
+	  15: INCEIPL       	$4
+
+	0x25478640:  4082015C  bc 4,2,0x2547879C
+	  16: Jc02o       	$0x2547879C
+
+
+. 0 25478630 20
+. 89 63 00 01 7C 1C 03 78 39 23 00 01 2C 0B 00 00 40 82 01 5C
+
+==== BB 1055 (0x2547879C) approx BBs exec'd 0 ====
+
+	0x2547879C:  89490001  lbz r10,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254787A0:  54032036  rlwinm r3,r0,4,0,27
+	   5: GETL       	R0, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x254787A4:  7F835A14  add r28,r3,r11
+	   9: GETL       	R3, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787A8:  39290001  addi r9,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254787AC:  2C8A0000  cmpi cr1,r10,0
+	  18: GETL       	R10, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0x254787B0:  41A6FE94  bc 13,6,0x25478644
+	  22: Js06o       	$0x25478644
+
+
+. 0 2547879C 24
+. 89 49 00 01 54 03 20 36 7F 83 5A 14 39 29 00 01 2C 8A 00 00 41 A6 FE 94
+
+==== BB 1056 (0x254787B4) approx BBs exec'd 0 ====
+
+	0x254787B4:  89690001  lbz r11,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254787B8:  57842036  rlwinm r4,r28,4,0,27
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x254787BC:  7F845214  add r28,r4,r10
+	   9: GETL       	R4, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787C0:  39290001  addi r9,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254787C4:  2E0B0000  cmpi cr4,r11,0
+	  18: GETL       	R11, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x4, CR
+	  21: INCEIPL       	$4
+
+	0x254787C8:  41B2FE7C  bc 13,18,0x25478644
+	  22: Js18o       	$0x25478644
+
+
+. 0 254787B4 24
+. 89 69 00 01 57 84 20 36 7F 84 52 14 39 29 00 01 2E 0B 00 00 41 B2 FE 7C
+
+==== BB 1057 (0x254787CC) approx BBs exec'd 0 ====
+
+	0x254787CC:  89490001  lbz r10,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254787D0:  57852036  rlwinm r5,r28,4,0,27
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x254787D4:  7F855A14  add r28,r5,r11
+	   9: GETL       	R5, t6
+	  10: GETL       	R11, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787D8:  39290001  addi r9,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x254787DC:  2F0A0000  cmpi cr6,r10,0
+	  18: GETL       	R10, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x254787E0:  41BAFE64  bc 13,26,0x25478644
+	  22: Js26o       	$0x25478644
+
+
+. 0 254787CC 24
+. 89 49 00 01 57 85 20 36 7F 85 5A 14 39 29 00 01 2F 0A 00 00 41 BA FE 64
+
+==== BB 1058 (0x25478644) approx BBs exec'd 0 ====
+
+	0x25478644:  825E04C8  lwz r18,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R18
+	   4: INCEIPL       	$4
+
+	0x25478648:  2E1A0000  cmpi cr4,r26,0
+	   5: GETL       	R26, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547864C:  3B600000  li r27,0
+	   9: MOVL       	$0x0, t8
+	  10: PUTL       	t8, R27
+	  11: INCEIPL       	$4
+
+	0x25478650:  39400000  li r10,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R10
+	  14: INCEIPL       	$4
+
+	0x25478654:  83B201A8  lwz r29,424(r18)
+	  15: GETL       	R18, t12
+	  16: ADDL       	$0x1A8, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R29
+	  19: INCEIPL       	$4
+
+	0x25478658:  937F0048  stw r27,72(r31)
+	  20: GETL       	R27, t16
+	  21: GETL       	R31, t18
+	  22: ADDL       	$0x48, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x2547865C:  3B600000  li r27,0
+	  25: MOVL       	$0x0, t20
+	  26: PUTL       	t20, R27
+	  27: INCEIPL       	$4
+
+	0x25478660:  397D0001  addi r11,r29,1
+	  28: GETL       	R29, t22
+	  29: ADDL       	$0x1, t22
+	  30: PUTL       	t22, R11
+	  31: INCEIPL       	$4
+
+	0x25478664:  915F004C  stw r10,76(r31)
+	  32: GETL       	R10, t24
+	  33: GETL       	R31, t26
+	  34: ADDL       	$0x4C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25478668:  917201A8  stw r11,424(r18)
+	  37: GETL       	R11, t28
+	  38: GETL       	R18, t30
+	  39: ADDL       	$0x1A8, t30
+	  40: STL       	t28, (t30)
+	  41: INCEIPL       	$4
+
+	0x2547866C:  7EBDAB78  or r29,r21,r21
+	  42: GETL       	R21, t32
+	  43: PUTL       	t32, R29
+	  44: INCEIPL       	$4
+
+	0x25478670:  40920538  bc 4,18,0x25478BA8
+	  45: Jc18o       	$0x25478BA8
+
+
+. 0 25478644 48
+. 82 5E 04 C8 2E 1A 00 00 3B 60 00 00 39 40 00 00 83 B2 01 A8 93 7F 00 48 3B 60 00 00 39 7D 00 01 91 5F 00 4C 91 72 01 A8 7E BD AB 78 40 92 05 38
+
+==== BB 1059 (0x25478674) approx BBs exec'd 0 ====
+
+	0x25478674:  81750000  lwz r11,0(r21)
+	   0: GETL       	R21, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x25478678:  2C8B0000  cmpi cr1,r11,0
+	   4: GETL       	R11, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547867C:  7F68DB78  or r8,r27,r27
+	   8: GETL       	R27, t8
+	   9: PUTL       	t8, R8
+	  10: INCEIPL       	$4
+
+	0x25478680:  4186004C  bc 12,6,0x254786CC
+	  11: Js06o       	$0x254786CC
+
+
+. 0 25478674 16
+. 81 75 00 00 2C 8B 00 00 7F 68 DB 78 41 86 00 4C
+
+==== BB 1060 (0x25478684) approx BBs exec'd 0 ====
+
+	0x25478684:  80F50000  lwz r7,0(r21)
+	   0: GETL       	R21, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25478688:  3A3F0018  addi r17,r31,24
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R17
+	   7: INCEIPL       	$4
+
+	0x2547868C:  80B60000  lwz r5,0(r22)
+	   8: GETL       	R22, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x25478690:  7F23CB78  or r3,r25,r25
+	  12: GETL       	R25, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x25478694:  7F84E378  or r4,r28,r28
+	  15: GETL       	R28, t12
+	  16: PUTL       	t12, R4
+	  17: INCEIPL       	$4
+
+	0x25478698:  38DF0048  addi r6,r31,72
+	  18: GETL       	R31, t14
+	  19: ADDL       	$0x48, t14
+	  20: PUTL       	t14, R6
+	  21: INCEIPL       	$4
+
+	0x2547869C:  7F09C378  or r9,r24,r24
+	  22: GETL       	R24, t16
+	  23: PUTL       	t16, R9
+	  24: INCEIPL       	$4
+
+	0x254786A0:  7EEABB78  or r10,r23,r23
+	  25: GETL       	R23, t18
+	  26: PUTL       	t18, R10
+	  27: INCEIPL       	$4
+
+	0x254786A4:  93410008  stw r26,8(r1)
+	  28: GETL       	R26, t20
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x8, t22
+	  31: STL       	t20, (t22)
+	  32: INCEIPL       	$4
+
+	0x254786A8:  9281000C  stw r20,12(r1)
+	  33: GETL       	R20, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0xC, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0x254786AC:  4BFFFAFD  bl 0x254781A8
+	  38: MOVL       	$0x254786B0, t28
+	  39: PUTL       	t28, LR
+	  40: JMPo-c       	$0x254781A8  ($4)
+
+
+. 0 25478684 44
+. 80 F5 00 00 3A 3F 00 18 80 B6 00 00 7F 23 CB 78 7F 84 E3 78 38 DF 00 48 7F 09 C3 78 7E EA BB 78 93 41 00 08 92 81 00 0C 4B FF FA FD
+
+==== BB 1061 do_lookup_x(0x254781A8) approx BBs exec'd 0 ====
+
+	0x254781A8:  7D6802A6  mflr r11
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x254781AC:  9421FF90  stwu r1,-112(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFF90, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x254781B0:  4801EE51  bl 0x25497000
+	   9: MOVL       	$0x254781B4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254781A8 12
+. 7D 68 02 A6 94 21 FF 90 48 01 EE 51
+
+==== BB 1062 (0x254781B4) approx BBs exec'd 0 ====
+
+	0x254781B4:  93C10068  stw r30,104(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x68, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x254781B8:  80010078  lwz r0,120(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x78, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x254781BC:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x254781C0:  91610074  stw r11,116(r1)
+	  13: GETL       	R11, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x74, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x254781C4:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0x254781C8:  91E1002C  stw r15,44(r1)
+	  21: GETL       	R15, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x2C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x254781CC:  92010030  stw r16,48(r1)
+	  26: GETL       	R16, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x30, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x254781D0:  81E70004  lwz r15,4(r7)
+	  31: GETL       	R7, t24
+	  32: ADDL       	$0x4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R15
+	  35: INCEIPL       	$4
+
+	0x254781D4:  82070000  lwz r16,0(r7)
+	  36: GETL       	R7, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R16
+	  39: INCEIPL       	$4
+
+	0x254781D8:  91C10028  stw r14,40(r1)
+	  40: GETL       	R14, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x28, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0x254781DC:  7C8E2378  or r14,r4,r4
+	  45: GETL       	R4, t36
+	  46: PUTL       	t36, R14
+	  47: INCEIPL       	$4
+
+	0x254781E0:  92210034  stw r17,52(r1)
+	  48: GETL       	R17, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x34, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0x254781E4:  92A10044  stw r21,68(r1)
+	  53: GETL       	R21, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x44, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x254781E8:  7D354B78  or r21,r9,r9
+	  58: GETL       	R9, t46
+	  59: PUTL       	t46, R21
+	  60: INCEIPL       	$4
+
+	0x254781EC:  93410058  stw r26,88(r1)
+	  61: GETL       	R26, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x58, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0x254781F0:  3120FFFF  addic r9,r0,-1
+	  66: GETL       	R0, t52
+	  67: ADCL       	$0xFFFFFFFF, t52  (-wCa)
+	  68: PUTL       	t52, R9
+	  69: INCEIPL       	$4
+
+	0x254781F4:  7E290110  subfe r17,r9,r0
+	  70: GETL       	R9, t54
+	  71: GETL       	R0, t56
+	  72: SBBL       	t54, t56  (-rCa-wCa)
+	  73: PUTL       	t56, R17
+	  74: INCEIPL       	$4
+
+	0x254781F8:  7D1A4378  or r26,r8,r8
+	  75: GETL       	R8, t58
+	  76: PUTL       	t58, R26
+	  77: INCEIPL       	$4
+
+	0x254781FC:  92410038  stw r18,56(r1)
+	  78: GETL       	R18, t60
+	  79: GETL       	R1, t62
+	  80: ADDL       	$0x38, t62
+	  81: STL       	t60, (t62)
+	  82: INCEIPL       	$4
+
+	0x25478200:  9261003C  stw r19,60(r1)
+	  83: GETL       	R19, t64
+	  84: GETL       	R1, t66
+	  85: ADDL       	$0x3C, t66
+	  86: STL       	t64, (t66)
+	  87: INCEIPL       	$4
+
+	0x25478204:  92810040  stw r20,64(r1)
+	  88: GETL       	R20, t68
+	  89: GETL       	R1, t70
+	  90: ADDL       	$0x40, t70
+	  91: STL       	t68, (t70)
+	  92: INCEIPL       	$4
+
+	0x25478208:  92C10048  stw r22,72(r1)
+	  93: GETL       	R22, t72
+	  94: GETL       	R1, t74
+	  95: ADDL       	$0x48, t74
+	  96: STL       	t72, (t74)
+	  97: INCEIPL       	$4
+
+	0x2547820C:  92E1004C  stw r23,76(r1)
+	  98: GETL       	R23, t76
+	  99: GETL       	R1, t78
+	 100: ADDL       	$0x4C, t78
+	 101: STL       	t76, (t78)
+	 102: INCEIPL       	$4
+
+	0x25478210:  93010050  stw r24,80(r1)
+	 103: GETL       	R24, t80
+	 104: GETL       	R1, t82
+	 105: ADDL       	$0x50, t82
+	 106: STL       	t80, (t82)
+	 107: INCEIPL       	$4
+
+	0x25478214:  93210054  stw r25,84(r1)
+	 108: GETL       	R25, t84
+	 109: GETL       	R1, t86
+	 110: ADDL       	$0x54, t86
+	 111: STL       	t84, (t86)
+	 112: INCEIPL       	$4
+
+	0x25478218:  9361005C  stw r27,92(r1)
+	 113: GETL       	R27, t88
+	 114: GETL       	R1, t90
+	 115: ADDL       	$0x5C, t90
+	 116: STL       	t88, (t90)
+	 117: INCEIPL       	$4
+
+	0x2547821C:  93810060  stw r28,96(r1)
+	 118: GETL       	R28, t92
+	 119: GETL       	R1, t94
+	 120: ADDL       	$0x60, t94
+	 121: STL       	t92, (t94)
+	 122: INCEIPL       	$4
+
+	0x25478220:  93A10064  stw r29,100(r1)
+	 123: GETL       	R29, t96
+	 124: GETL       	R1, t98
+	 125: ADDL       	$0x64, t98
+	 126: STL       	t96, (t98)
+	 127: INCEIPL       	$4
+
+	0x25478224:  93E1006C  stw r31,108(r1)
+	 128: GETL       	R31, t100
+	 129: GETL       	R1, t102
+	 130: ADDL       	$0x6C, t102
+	 131: STL       	t100, (t102)
+	 132: INCEIPL       	$4
+
+	0x25478228:  91810024  stw r12,36(r1)
+	 133: GETL       	R12, t104
+	 134: GETL       	R1, t106
+	 135: ADDL       	$0x24, t106
+	 136: STL       	t104, (t106)
+	 137: INCEIPL       	$4
+
+	0x2547822C:  90610008  stw r3,8(r1)
+	 138: GETL       	R3, t108
+	 139: GETL       	R1, t110
+	 140: ADDL       	$0x8, t110
+	 141: STL       	t108, (t110)
+	 142: INCEIPL       	$4
+
+	0x25478230:  90A1000C  stw r5,12(r1)
+	 143: GETL       	R5, t112
+	 144: GETL       	R1, t114
+	 145: ADDL       	$0xC, t114
+	 146: STL       	t112, (t114)
+	 147: INCEIPL       	$4
+
+	0x25478234:  90C10010  stw r6,16(r1)
+	 148: GETL       	R6, t116
+	 149: GETL       	R1, t118
+	 150: ADDL       	$0x10, t118
+	 151: STL       	t116, (t118)
+	 152: INCEIPL       	$4
+
+	0x25478238:  91410014  stw r10,20(r1)
+	 153: GETL       	R10, t120
+	 154: GETL       	R1, t122
+	 155: ADDL       	$0x14, t122
+	 156: STL       	t120, (t122)
+	 157: INCEIPL       	$4
+
+	0x2547823C:  48000010  b 0x2547824C
+	 158: JMPo       	$0x2547824C  ($4)
+
+
+. 0 254781B4 140
+. 93 C1 00 68 80 01 00 78 7F C8 02 A6 91 61 00 74 7D 80 00 26 91 E1 00 2C 92 01 00 30 81 E7 00 04 82 07 00 00 91 C1 00 28 7C 8E 23 78 92 21 00 34 92 A1 00 44 7D 35 4B 78 93 41 00 58 31 20 FF FF 7E 29 01 10 7D 1A 43 78 92 41 00 38 92 61 00 3C 92 81 00 40 92 C1 00 48 92 E1 00 4C 93 01 00 50 93 21 00 54 93 61 00 5C 93 81 00 60 93 A1 00 64 93 E1 00 6C 91 81 00 24 90 61 00 08 90 A1 00 0C 90 C1 00 10 91 41 00 14 48 00 00 10
+
+==== BB 1063 (0x2547824C) approx BBs exec'd 0 ====
+
+	0x2547824C:  5745103A  rlwinm r5,r26,2,0,29
+	   0: GETL       	R26, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R5
+	   3: INCEIPL       	$4
+
+	0x25478250:  80810078  lwz r4,120(r1)
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x78, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x25478254:  7D25802E  lwzx r9,r5,r16
+	   9: GETL       	R16, t6
+	  10: GETL       	R5, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R9
+	  14: INCEIPL       	$4
+
+	0x25478258:  3A800000  li r20,0
+	  15: MOVL       	$0x0, t12
+	  16: PUTL       	t12, R20
+	  17: INCEIPL       	$4
+
+	0x2547825C:  3A600000  li r19,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R19
+	  20: INCEIPL       	$4
+
+	0x25478260:  83890014  lwz r28,20(r9)
+	  21: GETL       	R9, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R28
+	  25: INCEIPL       	$4
+
+	0x25478264:  7F832278  xor r3,r28,r4
+	  26: GETL       	R28, t20
+	  27: GETL       	R4, t22
+	  28: XORL       	t20, t22
+	  29: PUTL       	t22, R3
+	  30: INCEIPL       	$4
+
+	0x25478268:  21630000  subfic r11,r3,0
+	  31: GETL       	R3, t24
+	  32: MOVL       	$0x0, t26
+	  33: SBBL       	t24, t26  (-wCa)
+	  34: PUTL       	t26, R11
+	  35: INCEIPL       	$4
+
+	0x2547826C:  7C6B1914  adde r3,r11,r3
+	  36: GETL       	R11, t28
+	  37: GETL       	R3, t30
+	  38: ADCL       	t28, t30  (-rCa-wCa)
+	  39: PUTL       	t30, R3
+	  40: INCEIPL       	$4
+
+	0x25478270:  7E291839  and. r9,r17,r3
+	  41: GETL       	R17, t32
+	  42: GETL       	R3, t34
+	  43: ANDL       	t32, t34
+	  44: PUTL       	t34, R9
+	  45: CMP0L       	t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x0, CR
+	  47: INCEIPL       	$4
+
+	0x25478274:  4082FFCC  bc 4,2,0x25478240
+	  48: Jc02o       	$0x25478240
+
+
+. 0 2547824C 44
+. 57 45 10 3A 80 81 00 78 7D 25 80 2E 3A 80 00 00 3A 60 00 00 83 89 00 14 7F 83 22 78 21 63 00 00 7C 6B 19 14 7E 29 18 39 40 82 FF CC
+
+==== BB 1064 (0x25478278) approx BBs exec'd 0 ====
+
+	0x25478278:  80C1007C  lwz r6,124(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x7C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547827C:  54CBFFFF  rlwinm. r11,r6,31,31,31
+	   5: GETL       	R6, t4
+	   6: ROLL       	$0x1F, t4
+	   7: ANDL       	$0x1, t4
+	   8: PUTL       	t4, R11
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x25478280:  41820010  bc 12,2,0x25478290
+	  12: Js02o       	$0x25478290
+
+
+. 0 25478278 12
+. 80 C1 00 7C 54 CB FF FF 41 82 00 10
+
+==== BB 1065 (0x25478290) approx BBs exec'd 0 ====
+
+	0x25478290:  815E04F4  lwz r10,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25478294:  810A0000  lwz r8,0(r10)
+	   5: GETL       	R10, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R8
+	   8: INCEIPL       	$4
+
+	0x25478298:  71090008  andi. r9,r8,0x8
+	   9: GETL       	R8, t8
+	  10: ANDL       	$0x8, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x2547829C:  40820248  bc 4,2,0x254784E4
+	  15: Jc02o       	$0x254784E4
+
+
+. 0 25478290 16
+. 81 5E 04 F4 81 0A 00 00 71 09 00 08 40 82 02 48
+
+==== BB 1066 (0x254782A0) approx BBs exec'd 0 ====
+
+	0x254782A0:  801C016C  lwz r0,364(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x16C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x254782A4:  833C0170  lwz r25,368(r28)
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x170, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x254782A8:  7D8E0396  divwu r12, r14, r0
+	  10: GETL       	R14, t10
+	  11: GETL       	R0, t8
+	  12: UDIVL       	t8, t10
+	  13: PUTL       	t10, R12
+	  14: INCEIPL       	$4
+
+	0x254782AC:  817C0038  lwz r11,56(r28)
+	  15: GETL       	R28, t12
+	  16: ADDL       	$0x38, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R11
+	  19: INCEIPL       	$4
+
+	0x254782B0:  807C0034  lwz r3,52(r28)
+	  20: GETL       	R28, t16
+	  21: ADDL       	$0x34, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R3
+	  24: INCEIPL       	$4
+
+	0x254782B4:  830B0004  lwz r24,4(r11)
+	  25: GETL       	R11, t20
+	  26: ADDL       	$0x4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R24
+	  29: INCEIPL       	$4
+
+	0x254782B8:  82430004  lwz r18,4(r3)
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0x4, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R18
+	  34: INCEIPL       	$4
+
+	0x254782BC:  82DC0198  lwz r22,408(r28)
+	  35: GETL       	R28, t28
+	  36: ADDL       	$0x198, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R22
+	  39: INCEIPL       	$4
+
+	0x254782C0:  7FEC01D6  mullw r31,r12,r0
+	  40: GETL       	R12, t32
+	  41: GETL       	R0, t34
+	  42: MULL       	t32, t34
+	  43: PUTL       	t34, R31
+	  44: INCEIPL       	$4
+
+	0x254782C4:  7FBF7050  subf r29,r31,r14
+	  45: GETL       	R31, t36
+	  46: GETL       	R14, t38
+	  47: SUBL       	t36, t38
+	  48: PUTL       	t38, R29
+	  49: INCEIPL       	$4
+
+	0x254782C8:  57BB103A  rlwinm r27,r29,2,0,29
+	  50: GETL       	R29, t40
+	  51: SHLL       	$0x2, t40
+	  52: PUTL       	t40, R27
+	  53: INCEIPL       	$4
+
+	0x254782CC:  7FFBC82E  lwzx r31,r27,r25
+	  54: GETL       	R25, t42
+	  55: GETL       	R27, t44
+	  56: ADDL       	t44, t42
+	  57: LDL       	(t42), t46
+	  58: PUTL       	t46, R31
+	  59: INCEIPL       	$4
+
+	0x254782D0:  2E1F0000  cmpi cr4,r31,0
+	  60: GETL       	R31, t48
+	  61: CMP0L       	t48, t50  (-rSo)
+	  62: ICRFL       	t50, $0x4, CR
+	  63: INCEIPL       	$4
+
+	0x254782D4:  419200F8  bc 12,18,0x254783CC
+	  64: Js18o       	$0x254783CC
+
+
+. 0 254782A0 56
+. 80 1C 01 6C 83 3C 01 70 7D 8E 03 96 81 7C 00 38 80 7C 00 34 83 0B 00 04 82 43 00 04 82 DC 01 98 7F EC 01 D6 7F BF 70 50 57 BB 10 3A 7F FB C8 2E 2E 1F 00 00 41 92 00 F8
+
+==== BB 1067 (0x254782D8) approx BBs exec'd 0 ====
+
+	0x254782D8:  2D960000  cmpi cr3,r22,0
+	   0: GETL       	R22, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x3, CR
+	   3: INCEIPL       	$4
+
+	0x254782DC:  48000028  b 0x25478304
+	   4: JMPo       	$0x25478304  ($4)
+
+
+. 0 254782D8 8
+. 2D 96 00 00 48 00 00 28
+
+==== BB 1068 (0x25478304) approx BBs exec'd 0 ====
+
+	0x25478304:  57E92036  rlwinm r9,r31,4,0,27
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25478308:  7FA9C214  add r29,r9,r24
+	   4: GETL       	R9, t2
+	   5: GETL       	R24, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0x2547830C:  809D0004  lwz r4,4(r29)
+	   9: GETL       	R29, t6
+	  10: ADDL       	$0x4, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R4
+	  13: INCEIPL       	$4
+
+	0x25478310:  2C840000  cmpi cr1,r4,0
+	  14: GETL       	R4, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0x25478314:  41A6FFCC  bc 13,6,0x254782E0
+	  18: Js06o       	$0x254782E0
+
+
+. 0 25478304 20
+. 57 E9 20 36 7F A9 C2 14 80 9D 00 04 2C 84 00 00 41 A6 FF CC
+
+==== BB 1069 (0x25478318) approx BBs exec'd 0 ====
+
+	0x25478318:  A0FD000E  lhz r7,14(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xE, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x2547831C:  2C070000  cmpi cr0,r7,0
+	   5: GETL       	R7, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x25478320:  40820010  bc 4,2,0x25478330
+	   9: Jc02o       	$0x25478330
+
+
+. 0 25478318 12
+. A0 FD 00 0E 2C 07 00 00 40 82 00 10
+
+==== BB 1070 (0x25478330) approx BBs exec'd 0 ====
+
+	0x25478330:  8B7D000C  lbz r27,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25478334:  5760073E  rlwinm r0,r27,0,28,31
+	   5: GETL       	R27, t4
+	   6: ANDL       	$0xF, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x25478338:  2F800002  cmpi cr7,r0,2
+	   9: GETL       	R0, t6
+	  10: MOVL       	$0x2, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x2547833C:  409D000C  bc 4,29,0x25478348
+	  14: Jc29o       	$0x25478348
+
+
+. 0 25478330 16
+. 8B 7D 00 0C 57 60 07 3E 2F 80 00 02 40 9D 00 0C
+
+==== BB 1071 (0x25478348) approx BBs exec'd 0 ====
+
+	0x25478348:  8141000C  lwz r10,12(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547834C:  7F1D5000  cmp cr6,r29,r10
+	   5: GETL       	R29, t4
+	   6: GETL       	R10, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x25478350:  419A001C  bc 12,26,0x2547836C
+	  10: Js26o       	$0x2547836C
+
+
+. 0 25478348 12
+. 81 41 00 0C 7F 1D 50 00 41 9A 00 1C
+
+==== BB 1072 (0x25478354) approx BBs exec'd 0 ====
+
+	0x25478354:  7EE9C02E  lwzx r23,r9,r24
+	   0: GETL       	R24, t0
+	   1: GETL       	R9, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R23
+	   5: INCEIPL       	$4
+
+	0x25478358:  80810008  lwz r4,8(r1)
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x8, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R4
+	  10: INCEIPL       	$4
+
+	0x2547835C:  7C779214  add r3,r23,r18
+	  11: GETL       	R23, t10
+	  12: GETL       	R18, t12
+	  13: ADDL       	t10, t12
+	  14: PUTL       	t12, R3
+	  15: INCEIPL       	$4
+
+	0x25478360:  4800AB81  bl 0x25482EE0
+	  16: MOVL       	$0x25478364, t14
+	  17: PUTL       	t14, LR
+	  18: JMPo-c       	$0x25482EE0  ($4)
+
+
+. 0 25478354 16
+. 7E E9 C0 2E 80 81 00 08 7C 77 92 14 48 00 AB 81
+
+==== BB 1073 (0x25478364) approx BBs exec'd 0 ====
+
+	0x25478364:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25478368:  4082FF88  bc 4,2,0x254782F0
+	   4: Jc02o       	$0x254782F0
+
+
+. 0 25478364 8
+. 2C 03 00 00 40 82 FF 88
+
+==== BB 1074 (0x254782F0) approx BBs exec'd 0 ====
+
+	0x254782F0:  83BC0174  lwz r29,372(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x174, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254782F4:  57E7103A  rlwinm r7,r31,2,0,29
+	   5: GETL       	R31, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x254782F8:  7FE7E82E  lwzx r31,r7,r29
+	   9: GETL       	R29, t6
+	  10: GETL       	R7, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x254782FC:  2E1F0000  cmpi cr4,r31,0
+	  15: GETL       	R31, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x4, CR
+	  18: INCEIPL       	$4
+
+	0x25478300:  41920108  bc 12,18,0x25478408
+	  19: Js18o       	$0x25478408
+
+
+. 0 254782F0 20
+. 83 BC 01 74 57 E7 10 3A 7F E7 E8 2E 2E 1F 00 00 41 92 01 08
+
+==== BB 1075 (0x25478408) approx BBs exec'd 0 ====
+
+	0x25478408:  2D940001  cmpi cr3,r20,1
+	   0: GETL       	R20, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x3, CR
+	   4: INCEIPL       	$4
+
+	0x2547840C:  408EFFC0  bc 4,14,0x254783CC
+	   5: Jc14o       	$0x254783CC
+
+
+. 0 25478408 8
+. 2D 94 00 01 40 8E FF C0
+
+==== BB 1076 (0x254783CC) approx BBs exec'd 0 ====
+
+	0x254783CC:  7F000026  mfcr r24
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R24
+	   2: INCEIPL       	$4
+
+	0x254783D0:  57189FFE  rlwinm r24,r24,19,31,31
+	   3: GETL       	R24, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R24
+	   7: INCEIPL       	$4
+
+	0x254783D4:  3175FFFF  addic r11,r21,-1
+	   8: GETL       	R21, t4
+	   9: ADCL       	$0xFFFFFFFF, t4  (-wCa)
+	  10: PUTL       	t4, R11
+	  11: INCEIPL       	$4
+
+	0x254783D8:  7ECBA910  subfe r22,r11,r21
+	  12: GETL       	R11, t6
+	  13: GETL       	R21, t8
+	  14: SBBL       	t6, t8  (-rCa-wCa)
+	  15: PUTL       	t8, R22
+	  16: INCEIPL       	$4
+
+	0x254783DC:  7F0BB039  and. r11,r24,r22
+	  17: GETL       	R24, t10
+	  18: GETL       	R22, t12
+	  19: ANDL       	t10, t12
+	  20: PUTL       	t12, R11
+	  21: CMP0L       	t12, t14  (-rSo)
+	  22: ICRFL       	t14, $0x0, CR
+	  23: INCEIPL       	$4
+
+	0x254783E0:  41A2FE60  bc 13,2,0x25478240
+	  24: Js02o       	$0x25478240
+
+
+. 0 254783CC 24
+. 7F 00 00 26 57 18 9F FE 31 75 FF FF 7E CB A9 10 7F 0B B0 39 41 A2 FE 60
+
+==== BB 1077 (0x254783E4) approx BBs exec'd 0 ====
+
+	0x254783E4:  8075000C  lwz r3,12(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254783E8:  2E030000  cmpi cr4,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254783EC:  4192FE54  bc 12,18,0x25478240
+	   9: Js18o       	$0x25478240
+
+
+. 0 254783E4 12
+. 80 75 00 0C 2E 03 00 00 41 92 FE 54
+
+==== BB 1078 (0x25478240) approx BBs exec'd 0 ====
+
+	0x25478240:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x25478244:  7F9A7840  cmpl cr7,r26,r15
+	   4: GETL       	R26, t2
+	   5: GETL       	R15, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25478248:  409C01DC  bc 4,28,0x25478424
+	   9: Jc28o       	$0x25478424
+
+
+. 0 25478240 12
+. 3B 5A 00 01 7F 9A 78 40 40 9C 01 DC
+
+==== BB 1079 (0x2547836C) approx BBs exec'd 0 ====
+
+	0x2547836C:  2F950000  cmpi cr7,r21,0
+	   0: GETL       	R21, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25478370:  419E0130  bc 12,30,0x254784A0
+	   4: Js30o       	$0x254784A0
+
+
+. 0 2547836C 8
+. 2F 95 00 00 41 9E 01 30
+
+==== BB 1080 (0x25478374) approx BBs exec'd 0 ====
+
+	0x25478374:  418E0044  bc 12,14,0x254783B8
+	   0: Js14o       	$0x254783B8
+
+
+. 0 25478374 4
+. 41 8E 00 44
+
+==== BB 1081 (0x25478378) approx BBs exec'd 0 ====
+
+	0x25478378:  57F9083C  rlwinm r25,r31,1,0,30
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x2547837C:  815C0188  lwz r10,392(r28)
+	   4: GETL       	R28, t2
+	   5: ADDL       	$0x188, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x25478380:  7EF9B22E  lhzx r23,r25,r22
+	   9: GETL       	R22, t6
+	  10: GETL       	R25, t8
+	  11: ADDL       	t8, t6
+	  12: LDW       	(t6), t10
+	  13: PUTL       	t10, R23
+	  14: INCEIPL       	$4
+
+	0x25478384:  81350004  lwz r9,4(r21)
+	  15: GETL       	R21, t12
+	  16: ADDL       	$0x4, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R9
+	  19: INCEIPL       	$4
+
+	0x25478388:  56EB2376  rlwinm r11,r23,4,13,27
+	  20: GETL       	R23, t16
+	  21: ROLL       	$0x4, t16
+	  22: ANDL       	$0x7FFF0, t16
+	  23: PUTL       	t16, R11
+	  24: INCEIPL       	$4
+
+	0x2547838C:  7D8B5214  add r12,r11,r10
+	  25: GETL       	R11, t18
+	  26: GETL       	R10, t20
+	  27: ADDL       	t18, t20
+	  28: PUTL       	t20, R12
+	  29: INCEIPL       	$4
+
+	0x25478390:  832C0004  lwz r25,4(r12)
+	  30: GETL       	R12, t22
+	  31: ADDL       	$0x4, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R25
+	  34: INCEIPL       	$4
+
+	0x25478394:  7C994800  cmp cr1,r25,r9
+	  35: GETL       	R25, t26
+	  36: GETL       	R9, t28
+	  37: CMPL       	t26, t28, t30  (-rSo)
+	  38: ICRFL       	t30, $0x1, CR
+	  39: INCEIPL       	$4
+
+	0x25478398:  418600F0  bc 12,6,0x25478488
+	  40: Js06o       	$0x25478488
+
+
+. 0 25478378 36
+. 57 F9 08 3C 81 5C 01 88 7E F9 B2 2E 81 35 00 04 56 EB 23 76 7D 8B 52 14 83 2C 00 04 7C 99 48 00 41 86 00 F0
+
+==== BB 1082 (0x25478488) approx BBs exec'd 0 ====
+
+	0x25478488:  7C6B502E  lwzx r3,r11,r10
+	   0: GETL       	R10, t0
+	   1: GETL       	R11, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R3
+	   5: INCEIPL       	$4
+
+	0x2547848C:  80950000  lwz r4,0(r21)
+	   6: GETL       	R21, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R4
+	   9: INCEIPL       	$4
+
+	0x25478490:  4800AA51  bl 0x25482EE0
+	  10: MOVL       	$0x25478494, t10
+	  11: PUTL       	t10, LR
+	  12: JMPo-c       	$0x25482EE0  ($4)
+
+
+. 0 25478488 12
+. 7C 6B 50 2E 80 95 00 00 48 00 AA 51
+
+==== BB 1083 (0x25478494) approx BBs exec'd 0 ====
+
+	0x25478494:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25478498:  409AFF04  bc 4,26,0x2547839C
+	   4: Jc26o       	$0x2547839C
+
+
+. 0 25478494 8
+. 2F 03 00 00 40 9A FF 04
+
+==== BB 1084 (0x2547849C) approx BBs exec'd 0 ====
+
+	0x2547849C:  4BFFFF1C  b 0x254783B8
+	   0: JMPo       	$0x254783B8  ($4)
+
+
+. 0 2547849C 4
+. 4B FF FF 1C
+
+==== BB 1085 (0x254783B8) approx BBs exec'd 0 ====
+
+	0x254783B8:  5760E13E  rlwinm r0,r27,28,4,31
+	   0: GETL       	R27, t0
+	   1: SHRL       	$0x4, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x254783BC:  2F800001  cmpi cr7,r0,1
+	   4: GETL       	R0, t2
+	   5: MOVL       	$0x1, t6
+	   6: CMPL       	t2, t6, t4  (-rSo)
+	   7: ICRFL       	t4, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x254783C0:  419E017C  bc 12,30,0x2547853C
+	   9: Js30o       	$0x2547853C
+
+
+. 0 254783B8 12
+. 57 60 E1 3E 2F 80 00 01 41 9E 01 7C
+
+==== BB 1086 (0x2547853C) approx BBs exec'd 0 ====
+
+	0x2547853C:  81C10010  lwz r14,16(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R14
+	   4: INCEIPL       	$4
+
+	0x25478540:  38600001  li r3,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25478544:  938E0004  stw r28,4(r14)
+	   8: GETL       	R28, t6
+	   9: GETL       	R14, t8
+	  10: ADDL       	$0x4, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25478548:  93AE0000  stw r29,0(r14)
+	  13: GETL       	R29, t10
+	  14: GETL       	R14, t12
+	  15: STL       	t10, (t12)
+	  16: INCEIPL       	$4
+
+	0x2547854C:  4BFFFEDC  b 0x25478428
+	  17: JMPo       	$0x25478428  ($4)
+
+
+. 0 2547853C 20
+. 81 C1 00 10 38 60 00 01 93 8E 00 04 93 AE 00 00 4B FF FE DC
+
+==== BB 1087 (0x25478428) approx BBs exec'd 0 ====
+
+	0x25478428:  81E10074  lwz r15,116(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x74, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R15
+	   4: INCEIPL       	$4
+
+	0x2547842C:  81810024  lwz r12,36(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x24, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25478430:  7DE803A6  mtlr r15
+	  10: GETL       	R15, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x25478434:  81C10028  lwz r14,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R14
+	  17: INCEIPL       	$4
+
+	0x25478438:  81E1002C  lwz r15,44(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x2C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R15
+	  22: INCEIPL       	$4
+
+	0x2547843C:  7D818120  mtcrf 0x18,r12
+	  23: GETL       	R12, t18
+	  24: ICRFL       	t18, $0x3, CR
+	  25: ICRFL       	t18, $0x4, CR
+	  26: INCEIPL       	$4
+
+	0x25478440:  82010030  lwz r16,48(r1)
+	  27: GETL       	R1, t20
+	  28: ADDL       	$0x30, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R16
+	  31: INCEIPL       	$4
+
+	0x25478444:  82210034  lwz r17,52(r1)
+	  32: GETL       	R1, t24
+	  33: ADDL       	$0x34, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R17
+	  36: INCEIPL       	$4
+
+	0x25478448:  82410038  lwz r18,56(r1)
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x38, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R18
+	  41: INCEIPL       	$4
+
+	0x2547844C:  8261003C  lwz r19,60(r1)
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x3C, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R19
+	  46: INCEIPL       	$4
+
+	0x25478450:  82810040  lwz r20,64(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x40, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R20
+	  51: INCEIPL       	$4
+
+	0x25478454:  82A10044  lwz r21,68(r1)
+	  52: GETL       	R1, t40
+	  53: ADDL       	$0x44, t40
+	  54: LDL       	(t40), t42
+	  55: PUTL       	t42, R21
+	  56: INCEIPL       	$4
+
+	0x25478458:  82C10048  lwz r22,72(r1)
+	  57: GETL       	R1, t44
+	  58: ADDL       	$0x48, t44
+	  59: LDL       	(t44), t46
+	  60: PUTL       	t46, R22
+	  61: INCEIPL       	$4
+
+	0x2547845C:  82E1004C  lwz r23,76(r1)
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x4C, t48
+	  64: LDL       	(t48), t50
+	  65: PUTL       	t50, R23
+	  66: INCEIPL       	$4
+
+	0x25478460:  83010050  lwz r24,80(r1)
+	  67: GETL       	R1, t52
+	  68: ADDL       	$0x50, t52
+	  69: LDL       	(t52), t54
+	  70: PUTL       	t54, R24
+	  71: INCEIPL       	$4
+
+	0x25478464:  83210054  lwz r25,84(r1)
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x54, t56
+	  74: LDL       	(t56), t58
+	  75: PUTL       	t58, R25
+	  76: INCEIPL       	$4
+
+	0x25478468:  83410058  lwz r26,88(r1)
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x58, t60
+	  79: LDL       	(t60), t62
+	  80: PUTL       	t62, R26
+	  81: INCEIPL       	$4
+
+	0x2547846C:  8361005C  lwz r27,92(r1)
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x5C, t64
+	  84: LDL       	(t64), t66
+	  85: PUTL       	t66, R27
+	  86: INCEIPL       	$4
+
+	0x25478470:  83810060  lwz r28,96(r1)
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x60, t68
+	  89: LDL       	(t68), t70
+	  90: PUTL       	t70, R28
+	  91: INCEIPL       	$4
+
+	0x25478474:  83A10064  lwz r29,100(r1)
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x64, t72
+	  94: LDL       	(t72), t74
+	  95: PUTL       	t74, R29
+	  96: INCEIPL       	$4
+
+	0x25478478:  83C10068  lwz r30,104(r1)
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x68, t76
+	  99: LDL       	(t76), t78
+	 100: PUTL       	t78, R30
+	 101: INCEIPL       	$4
+
+	0x2547847C:  83E1006C  lwz r31,108(r1)
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x6C, t80
+	 104: LDL       	(t80), t82
+	 105: PUTL       	t82, R31
+	 106: INCEIPL       	$4
+
+	0x25478480:  38210070  addi r1,r1,112
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x70, t84
+	 109: PUTL       	t84, R1
+	 110: INCEIPL       	$4
+
+	0x25478484:  4E800020  blr
+	 111: GETL       	LR, t86
+	 112: JMPo-r       	t86  ($4)
+
+
+. 0 25478428 96
+. 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+==== BB 1088 (0x254786B0) approx BBs exec'd 0 ====
+
+	0x254786B0:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x254786B4:  39000000  li r8,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R8
+	   6: INCEIPL       	$4
+
+	0x254786B8:  41810014  bc 12,1,0x254786CC
+	   7: Js01o       	$0x254786CC
+
+
+. 0 254786B0 12
+. 2C 03 00 00 39 00 00 00 41 81 00 14
+
+==== BB 1089 (0x254786CC) approx BBs exec'd 0 ====
+
+	0x254786CC:  815F0048  lwz r10,72(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x254786D0:  2F8A0000  cmpi cr7,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x254786D4:  419E0504  bc 12,30,0x25478BD8
+	   9: Js30o       	$0x25478BD8
+
+
+. 0 254786CC 12
+. 81 5F 00 48 2F 8A 00 00 41 9E 05 04
+
+==== BB 1090 (0x254786D8) approx BBs exec'd 0 ====
+
+	0x254786D8:  80B60000  lwz r5,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0x254786DC:  2F850000  cmpi cr7,r5,0
+	   4: GETL       	R5, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0x254786E0:  419E0140  bc 12,30,0x25478820
+	   8: Js30o       	$0x25478820
+
+
+. 0 254786D8 12
+. 80 B6 00 00 2F 85 00 00 41 9E 01 40
+
+==== BB 1091 (0x254786E4) approx BBs exec'd 0 ====
+
+	0x254786E4:  88E5000D  lbz r7,13(r5)
+	   0: GETL       	R5, t0
+	   1: ADDL       	$0xD, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R7
+	   4: INCEIPL       	$4
+
+	0x254786E8:  39200000  li r9,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x254786EC:  2E090000  cmpi cr4,r9,0
+	   8: GETL       	R9, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x4, CR
+	  11: INCEIPL       	$4
+
+	0x254786F0:  54E607BE  rlwinm r6,r7,0,30,31
+	  12: GETL       	R7, t10
+	  13: ANDL       	$0x3, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x254786F4:  2F060003  cmpi cr6,r6,3
+	  16: GETL       	R6, t12
+	  17: MOVL       	$0x3, t16
+	  18: CMPL       	t12, t16, t14  (-rSo)
+	  19: ICRFL       	t14, $0x6, CR
+	  20: INCEIPL       	$4
+
+	0x254786F8:  419A0234  bc 12,26,0x2547892C
+	  21: Js26o       	$0x2547892C
+
+
+. 0 254786E4 24
+. 88 E5 00 0D 39 20 00 00 2E 09 00 00 54 E6 07 BE 2F 06 00 03 41 9A 02 34
+
+==== BB 1092 (0x254786FC) approx BBs exec'd 0 ====
+
+	0x254786FC:  3B7F0048  addi r27,r31,72
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x48, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25478700:  839B0004  lwz r28,4(r27)
+	   4: GETL       	R27, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R28
+	   8: INCEIPL       	$4
+
+	0x25478704:  3D808000  lis r12,-32768
+	   9: MOVL       	$0x80000000, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x25478708:  801C0180  lwz r0,384(r28)
+	  12: GETL       	R28, t8
+	  13: ADDL       	$0x180, t8
+	  14: LDL       	(t8), t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0x2547870C:  541D0002  rlwinm r29,r0,0,0,1
+	  17: GETL       	R0, t12
+	  18: ANDL       	$0xC0000000, t12
+	  19: PUTL       	t12, R29
+	  20: INCEIPL       	$4
+
+	0x25478710:  7F1D6000  cmp cr6,r29,r12
+	  21: GETL       	R29, t14
+	  22: GETL       	R12, t16
+	  23: CMPL       	t14, t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x6, CR
+	  25: INCEIPL       	$4
+
+	0x25478714:  419A05BC  bc 12,26,0x25478CD0
+	  26: Js26o       	$0x25478CD0
+
+
+. 0 254786FC 28
+. 3B 7F 00 48 83 9B 00 04 3D 80 80 00 80 1C 01 80 54 1D 00 02 7F 1D 60 00 41 9A 05 BC
+
+==== BB 1093 (0x25478718) approx BBs exec'd 0 ====
+
+	0x25478718:  82BB0004  lwz r21,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547871C:  83BE04F4  lwz r29,1268(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x4F4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x25478720:  83550180  lwz r26,384(r21)
+	  10: GETL       	R21, t8
+	  11: ADDL       	$0x180, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25478724:  67570010  oris r23,r26,0x10
+	  15: GETL       	R26, t12
+	  16: ORL       	$0x100000, t12
+	  17: PUTL       	t12, R23
+	  18: INCEIPL       	$4
+
+	0x25478728:  92F50180  stw r23,384(r21)
+	  19: GETL       	R23, t14
+	  20: GETL       	R21, t16
+	  21: ADDL       	$0x180, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547872C:  801D0000  lwz r0,0(r29)
+	  24: GETL       	R29, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0x25478730:  70090404  andi. r9,r0,0x404
+	  28: GETL       	R0, t22
+	  29: ANDL       	$0x404, t22
+	  30: PUTL       	t22, R9
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x0, CR
+	  33: INCEIPL       	$4
+
+	0x25478734:  408202A0  bc 4,2,0x254789D4
+	  34: Jc02o       	$0x254789D4
+
+
+. 0 25478718 32
+. 82 BB 00 04 83 BE 04 F4 83 55 01 80 67 57 00 10 92 F5 01 80 80 1D 00 00 70 09 04 04 40 82 02 A0
+
+==== BB 1094 (0x25478738) approx BBs exec'd 0 ====
+
+	0x25478738:  807B0004  lwz r3,4(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x2547873C:  801F0048  lwz r0,72(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x48, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R0
+	   9: INCEIPL       	$4
+
+	0x25478740:  90160000  stw r0,0(r22)
+	  10: GETL       	R0, t8
+	  11: GETL       	R22, t10
+	  12: STL       	t8, (t10)
+	  13: INCEIPL       	$4
+
+	0x25478744:  80E10000  lwz r7,0(r1)
+	  14: GETL       	R1, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R7
+	  17: INCEIPL       	$4
+
+	0x25478748:  82C70004  lwz r22,4(r7)
+	  18: GETL       	R7, t16
+	  19: ADDL       	$0x4, t16
+	  20: LDL       	(t16), t18
+	  21: PUTL       	t18, R22
+	  22: INCEIPL       	$4
+
+	0x2547874C:  8107FFC0  lwz r8,-64(r7)
+	  23: GETL       	R7, t20
+	  24: ADDL       	$0xFFFFFFC0, t20
+	  25: LDL       	(t20), t22
+	  26: PUTL       	t22, R8
+	  27: INCEIPL       	$4
+
+	0x25478750:  7EC803A6  mtlr r22
+	  28: GETL       	R22, t24
+	  29: PUTL       	t24, LR
+	  30: INCEIPL       	$4
+
+	0x25478754:  8227FFC4  lwz r17,-60(r7)
+	  31: GETL       	R7, t26
+	  32: ADDL       	$0xFFFFFFC4, t26
+	  33: LDL       	(t26), t28
+	  34: PUTL       	t28, R17
+	  35: INCEIPL       	$4
+
+	0x25478758:  8247FFC8  lwz r18,-56(r7)
+	  36: GETL       	R7, t30
+	  37: ADDL       	$0xFFFFFFC8, t30
+	  38: LDL       	(t30), t32
+	  39: PUTL       	t32, R18
+	  40: INCEIPL       	$4
+
+	0x2547875C:  7D008120  mtcrf 0x8,r8
+	  41: GETL       	R8, t34
+	  42: ICRFL       	t34, $0x4, CR
+	  43: INCEIPL       	$4
+
+	0x25478760:  8267FFCC  lwz r19,-52(r7)
+	  44: GETL       	R7, t36
+	  45: ADDL       	$0xFFFFFFCC, t36
+	  46: LDL       	(t36), t38
+	  47: PUTL       	t38, R19
+	  48: INCEIPL       	$4
+
+	0x25478764:  8287FFD0  lwz r20,-48(r7)
+	  49: GETL       	R7, t40
+	  50: ADDL       	$0xFFFFFFD0, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R20
+	  53: INCEIPL       	$4
+
+	0x25478768:  82A7FFD4  lwz r21,-44(r7)
+	  54: GETL       	R7, t44
+	  55: ADDL       	$0xFFFFFFD4, t44
+	  56: LDL       	(t44), t46
+	  57: PUTL       	t46, R21
+	  58: INCEIPL       	$4
+
+	0x2547876C:  82C7FFD8  lwz r22,-40(r7)
+	  59: GETL       	R7, t48
+	  60: ADDL       	$0xFFFFFFD8, t48
+	  61: LDL       	(t48), t50
+	  62: PUTL       	t50, R22
+	  63: INCEIPL       	$4
+
+	0x25478770:  82E7FFDC  lwz r23,-36(r7)
+	  64: GETL       	R7, t52
+	  65: ADDL       	$0xFFFFFFDC, t52
+	  66: LDL       	(t52), t54
+	  67: PUTL       	t54, R23
+	  68: INCEIPL       	$4
+
+	0x25478774:  8307FFE0  lwz r24,-32(r7)
+	  69: GETL       	R7, t56
+	  70: ADDL       	$0xFFFFFFE0, t56
+	  71: LDL       	(t56), t58
+	  72: PUTL       	t58, R24
+	  73: INCEIPL       	$4
+
+	0x25478778:  8327FFE4  lwz r25,-28(r7)
+	  74: GETL       	R7, t60
+	  75: ADDL       	$0xFFFFFFE4, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R25
+	  78: INCEIPL       	$4
+
+	0x2547877C:  8347FFE8  lwz r26,-24(r7)
+	  79: GETL       	R7, t64
+	  80: ADDL       	$0xFFFFFFE8, t64
+	  81: LDL       	(t64), t66
+	  82: PUTL       	t66, R26
+	  83: INCEIPL       	$4
+
+	0x25478780:  8367FFEC  lwz r27,-20(r7)
+	  84: GETL       	R7, t68
+	  85: ADDL       	$0xFFFFFFEC, t68
+	  86: LDL       	(t68), t70
+	  87: PUTL       	t70, R27
+	  88: INCEIPL       	$4
+
+	0x25478784:  8387FFF0  lwz r28,-16(r7)
+	  89: GETL       	R7, t72
+	  90: ADDL       	$0xFFFFFFF0, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R28
+	  93: INCEIPL       	$4
+
+	0x25478788:  83A7FFF4  lwz r29,-12(r7)
+	  94: GETL       	R7, t76
+	  95: ADDL       	$0xFFFFFFF4, t76
+	  96: LDL       	(t76), t78
+	  97: PUTL       	t78, R29
+	  98: INCEIPL       	$4
+
+	0x2547878C:  83C7FFF8  lwz r30,-8(r7)
+	  99: GETL       	R7, t80
+	 100: ADDL       	$0xFFFFFFF8, t80
+	 101: LDL       	(t80), t82
+	 102: PUTL       	t82, R30
+	 103: INCEIPL       	$4
+
+	0x25478790:  83E7FFFC  lwz r31,-4(r7)
+	 104: GETL       	R7, t84
+	 105: ADDL       	$0xFFFFFFFC, t84
+	 106: LDL       	(t84), t86
+	 107: PUTL       	t86, R31
+	 108: INCEIPL       	$4
+
+	0x25478794:  7CE13B78  or r1,r7,r7
+	 109: GETL       	R7, t88
+	 110: PUTL       	t88, R1
+	 111: INCEIPL       	$4
+
+	0x25478798:  4E800020  blr
+	 112: GETL       	LR, t90
+	 113: JMPo-r       	t90  ($4)
+
+
+. 0 25478738 100
+. 80 7B 00 04 80 1F 00 48 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+
+==== BB 1095 (0x2547995C) approx BBs exec'd 0 ====
+
+	0x2547995C:  811F0050  lwz r8,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25479960:  809F000C  lwz r4,12(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R4
+	   9: INCEIPL       	$4
+
+	0x25479964:  7C7A1B78  or r26,r3,r3
+	  10: GETL       	R3, t8
+	  11: PUTL       	t8, R26
+	  12: INCEIPL       	$4
+
+	0x25479968:  7D064378  or r6,r8,r8
+	  13: GETL       	R8, t10
+	  14: PUTL       	t10, R6
+	  15: INCEIPL       	$4
+
+	0x2547996C:  90640210  stw r3,528(r4)
+	  16: GETL       	R3, t12
+	  17: GETL       	R4, t14
+	  18: ADDL       	$0x210, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x25479970:  91040214  stw r8,532(r4)
+	  21: GETL       	R8, t16
+	  22: GETL       	R4, t18
+	  23: ADDL       	$0x214, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x25479974:  2F060000  cmpi cr6,r6,0
+	  26: GETL       	R6, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x6, CR
+	  29: INCEIPL       	$4
+
+	0x25479978:  39200000  li r9,0
+	  30: MOVL       	$0x0, t24
+	  31: PUTL       	t24, R9
+	  32: INCEIPL       	$4
+
+	0x2547997C:  419A0010  bc 12,26,0x2547998C
+	  33: Js26o       	$0x2547998C
+
+
+. 0 2547995C 36
+. 81 1F 00 50 80 9F 00 0C 7C 7A 1B 78 7D 06 43 78 90 64 02 10 91 04 02 14 2F 06 00 00 39 20 00 00 41 9A 00 10
+
+==== BB 1096 (0x25479980) approx BBs exec'd 0 ====
+
+	0x25479980:  80FA0000  lwz r7,0(r26)
+	   0: GETL       	R26, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R7
+	   3: INCEIPL       	$4
+
+	0x25479984:  81660004  lwz r11,4(r6)
+	   4: GETL       	R6, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R11
+	   8: INCEIPL       	$4
+
+	0x25479988:  7D275A14  add r9,r7,r11
+	   9: GETL       	R7, t8
+	  10: GETL       	R11, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0x2547998C:  2F9D0044  cmpi cr7,r29,68
+	  14: GETL       	R29, t12
+	  15: MOVL       	$0x44, t16
+	  16: CMPL       	t12, t16, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25479990:  801C0008  lwz r0,8(r28)
+	  19: GETL       	R28, t18
+	  20: ADDL       	$0x8, t18
+	  21: LDL       	(t18), t20
+	  22: PUTL       	t20, R0
+	  23: INCEIPL       	$4
+
+	0x25479994:  7D290214  add r9,r9,r0
+	  24: GETL       	R9, t22
+	  25: GETL       	R0, t24
+	  26: ADDL       	t22, t24
+	  27: PUTL       	t24, R9
+	  28: INCEIPL       	$4
+
+	0x25479998:  419E0100  bc 12,30,0x25479A98
+	  29: Js30o       	$0x25479A98
+
+
+. 0 25479980 28
+. 80 FA 00 00 81 66 00 04 7D 27 5A 14 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+
+==== BB 1097 (0x2547999C) approx BBs exec'd 0 ====
+
+	0x2547999C:  419D00D0  bc 12,29,0x25479A6C
+	   0: Js29o       	$0x25479A6C
+
+
+. 0 2547999C 4
+. 41 9D 00 D0
+
+==== BB 1098 (0x254799A0) approx BBs exec'd 0 ====
+
+	0x254799A0:  2F9D0001  cmpi cr7,r29,1
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254799A4:  419E00EC  bc 12,30,0x25479A90
+	   5: Js30o       	$0x25479A90
+
+
+. 0 254799A0 8
+. 2F 9D 00 01 41 9E 00 EC
+
+==== BB 1099 (0x25479A90) approx BBs exec'd 0 ====
+
+	0x25479A90:  91370000  stw r9,0(r23)
+	   0: GETL       	R9, t0
+	   1: GETL       	R23, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25479A94:  4BFFFF38  b 0x254799CC
+	   4: JMPo       	$0x254799CC  ($4)
+
+
+. 0 25479A90 8
+. 91 37 00 00 4B FF FF 38
+
+==== BB 1100 (0x254799CC) approx BBs exec'd 0 ====
+
+	0x254799CC:  3B9C000C  addi r28,r28,12
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0x254799D0:  7E1CC040  cmpl cr4,r28,r24
+	   4: GETL       	R28, t2
+	   5: GETL       	R24, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x254799D4:  4190FE68  bc 12,16,0x2547983C
+	   9: Js16o       	$0x2547983C
+
+
+. 0 254799CC 12
+. 3B 9C 00 0C 7E 1C C0 40 41 90 FE 68
+
+==== BB 1101 (0x254787E4) approx BBs exec'd 0 ====
+
+	0x254787E4:  89690001  lbz r11,1(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x1, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254787E8:  57862036  rlwinm r6,r28,4,0,27
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x4, t4
+	   7: PUTL       	t4, R6
+	   8: INCEIPL       	$4
+
+	0x254787EC:  7F865214  add r28,r6,r10
+	   9: GETL       	R6, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R28
+	  13: INCEIPL       	$4
+
+	0x254787F0:  39490001  addi r10,r9,1
+	  14: GETL       	R9, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R10
+	  17: INCEIPL       	$4
+
+	0x254787F4:  2F8B0000  cmpi cr7,r11,0
+	  18: GETL       	R11, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0x254787F8:  41BEFE4C  bc 13,30,0x25478644
+	  22: Js30o       	$0x25478644
+
+
+. 0 254787E4 24
+. 89 69 00 01 57 86 20 36 7F 86 52 14 39 49 00 01 2F 8B 00 00 41 BE FE 4C
+
+==== BB 1102 (0x254787FC) approx BBs exec'd 0 ====
+
+	0x254787FC:  57922036  rlwinm r18,r28,4,0,27
+	   0: GETL       	R28, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R18
+	   3: INCEIPL       	$4
+
+	0x25478800:  7E325A14  add r17,r18,r11
+	   4: GETL       	R18, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R17
+	   8: INCEIPL       	$4
+
+	0x25478804:  8D6A0001  lbzu r11,1(r10)
+	   9: GETL       	R10, t6
+	  10: ADDL       	$0x1, t6
+	  11: PUTL       	t6, R10
+	  12: LDB       	(t6), t8
+	  13: PUTL       	t8, R11
+	  14: INCEIPL       	$4
+
+	0x25478808:  56280006  rlwinm r8,r17,0,0,3
+	  15: GETL       	R17, t10
+	  16: ANDL       	$0xF0000000, t10
+	  17: PUTL       	t10, R8
+	  18: INCEIPL       	$4
+
+	0x2547880C:  2F8B0000  cmpi cr7,r11,0
+	  19: GETL       	R11, t12
+	  20: CMP0L       	t12, t14  (-rSo)
+	  21: ICRFL       	t14, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x25478810:  7E274278  xor r7,r17,r8
+	  23: GETL       	R17, t16
+	  24: GETL       	R8, t18
+	  25: XORL       	t16, t18
+	  26: PUTL       	t18, R7
+	  27: INCEIPL       	$4
+
+	0x25478814:  5509463E  rlwinm r9,r8,8,24,31
+	  28: GETL       	R8, t20
+	  29: SHRL       	$0x18, t20
+	  30: PUTL       	t20, R9
+	  31: INCEIPL       	$4
+
+	0x25478818:  7CFC4A78  xor r28,r7,r9
+	  32: GETL       	R7, t22
+	  33: GETL       	R9, t24
+	  34: XORL       	t22, t24
+	  35: PUTL       	t24, R28
+	  36: INCEIPL       	$4
+
+	0x2547881C:  4BFFFFDC  b 0x254787F8
+	  37: JMPo       	$0x254787F8  ($4)
+
+
+. 0 254787FC 36
+. 57 92 20 36 7E 32 5A 14 8D 6A 00 01 56 28 00 06 2F 8B 00 00 7E 27 42 78 55 09 46 3E 7C FC 4A 78 4B FF FF DC
+
+==== BB 1103 (0x254787F8) approx BBs exec'd 0 ====
+
+	0x254787F8:  41BEFE4C  bc 13,30,0x25478644
+	   0: Js30o       	$0x25478644
+
+
+. 0 254787F8 4
+. 41 BE FE 4C
+
+==== BB 1104 (0x25478324) approx BBs exec'd 0 ====
+
+	0x25478324:  8101007C  lwz r8,124(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x7C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25478328:  71000001  andi. r0,r8,0x1
+	   5: GETL       	R8, t4
+	   6: ANDL       	$0x1, t4
+	   7: PUTL       	t4, R0
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2547832C:  40A2FFC4  bc 5,2,0x254782F0
+	  11: Jc02o       	$0x254782F0
+
+
+. 0 25478324 12
+. 81 01 00 7C 71 00 00 01 40 A2 FF C4
+
+==== BB 1105 (0x254782E0) approx BBs exec'd 0 ====
+
+	0x254782E0:  88DD000C  lbz r6,12(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254782E4:  54C5073E  rlwinm r5,r6,0,28,31
+	   5: GETL       	R6, t4
+	   6: ANDL       	$0xF, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x254782E8:  2F050006  cmpi cr6,r5,6
+	   9: GETL       	R5, t6
+	  10: MOVL       	$0x6, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x254782EC:  419A002C  bc 12,26,0x25478318
+	  14: Js26o       	$0x25478318
+
+
+. 0 254782E0 16
+. 88 DD 00 0C 54 C5 07 3E 2F 05 00 06 41 9A 00 2C
+
+==== BB 1106 (0x25479EE4) approx BBs exec'd 0 ====
+
+	0x25479EE4:  7D200026  mfcr r9
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x25479EE8:  55299FFE  rlwinm r9,r9,19,31,31
+	   3: GETL       	R9, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R9
+	   7: INCEIPL       	$4
+
+	0x25479EEC:  39600000  li r11,0
+	   8: MOVL       	$0x0, t4
+	   9: PUTL       	t4, R11
+	  10: INCEIPL       	$4
+
+	0x25479EF0:  7CA00026  mfcr r5
+	  11: GETL       	CR, t6
+	  12: PUTL       	t6, R5
+	  13: INCEIPL       	$4
+
+	0x25479EF4:  54A53FFE  rlwinm r5,r5,7,31,31
+	  14: GETL       	R5, t8
+	  15: ROLL       	$0x7, t8
+	  16: ANDL       	$0x1, t8
+	  17: PUTL       	t8, R5
+	  18: INCEIPL       	$4
+
+	0x25479EF8:  7D282B79  or. r8,r9,r5
+	  19: GETL       	R9, t10
+	  20: GETL       	R5, t12
+	  21: ORL       	t12, t10
+	  22: PUTL       	t10, R8
+	  23: CMP0L       	t10, t14  (-rSo)
+	  24: ICRFL       	t14, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x25479EFC:  40820058  bc 4,2,0x25479F54
+	  26: Jc02o       	$0x25479F54
+
+
+. 0 25479EE4 28
+. 7D 20 00 26 55 29 9F FE 39 60 00 00 7C A0 00 26 54 A5 3F FE 7D 28 2B 79 40 82 00 58
+
+==== BB 1107 (0x25479F00) approx BBs exec'd 0 ====
+
+	0x25479F00:  381DFFBC  addi r0,r29,-68
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0xFFFFFFBC, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x25479F04:  6BA30002  xori r3,r29,0x2
+	   4: GETL       	R29, t2
+	   5: XORL       	$0x2, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25479F08:  21030000  subfic r8,r3,0
+	   8: GETL       	R3, t4
+	   9: MOVL       	$0x0, t6
+	  10: SBBL       	t4, t6  (-wCa)
+	  11: PUTL       	t6, R8
+	  12: INCEIPL       	$4
+
+	0x25479F0C:  7C681914  adde r3,r8,r3
+	  13: GETL       	R8, t8
+	  14: GETL       	R3, t10
+	  15: ADCL       	t8, t10  (-rCa-wCa)
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0x25479F10:  2180000A  subfic r12,r0,10
+	  18: GETL       	R0, t12
+	  19: MOVL       	$0xA, t14
+	  20: SBBL       	t12, t14  (-wCa)
+	  21: PUTL       	t14, R12
+	  22: INCEIPL       	$4
+
+	0x25479F14:  39800000  li r12,0
+	  23: MOVL       	$0x0, t16
+	  24: PUTL       	t16, R12
+	  25: INCEIPL       	$4
+
+	0x25479F18:  7D8C6114  adde r12,r12,r12
+	  26: GETL       	R12, t18
+	  27: GETL       	R12, t20
+	  28: ADCL       	t18, t20  (-rCa-wCa)
+	  29: PUTL       	t20, R12
+	  30: INCEIPL       	$4
+
+	0x25479F1C:  7D881B79  or. r8,r12,r3
+	  31: GETL       	R12, t22
+	  32: GETL       	R3, t24
+	  33: ORL       	t24, t22
+	  34: PUTL       	t22, R8
+	  35: CMP0L       	t22, t26  (-rSo)
+	  36: ICRFL       	t26, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0x25479F20:  40820034  bc 4,2,0x25479F54
+	  38: Jc02o       	$0x25479F54
+
+
+. 0 25479F00 36
+. 38 1D FF BC 6B A3 00 02 21 03 00 00 7C 68 19 14 21 80 00 0A 39 80 00 00 7D 8C 61 14 7D 88 1B 79 40 82 00 34
+
+==== BB 1108 (0x25479F24) approx BBs exec'd 0 ====
+
+	0x25479F24:  2F1D0013  cmpi cr6,r29,19
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x13, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x25479F28:  8124020C  lwz r9,524(r4)
+	   5: GETL       	R4, t6
+	   6: ADDL       	$0x20C, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R9
+	   9: INCEIPL       	$4
+
+	0x25479F2C:  419A00B4  bc 12,26,0x25479FE0
+	  10: Js26o       	$0x25479FE0
+
+
+. 0 25479F24 12
+. 2F 1D 00 13 81 24 02 0C 41 9A 00 B4
+
+==== BB 1109 (0x25479F30) approx BBs exec'd 0 ====
+
+	0x25479F30:  7F8B4800  cmp cr7,r11,r9
+	   0: GETL       	R11, t0
+	   1: GETL       	R9, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479F34:  409EF988  bc 4,30,0x254798BC
+	   5: Jc30o       	$0x254798BC
+
+
+. 0 25479F30 8
+. 7F 8B 48 00 40 9E F9 88
+
+==== BB 1110 (0x25479F38) approx BBs exec'd 0 ====
+
+	0x25479F38:  817001AC  lwz r11,428(r16)
+	   0: GETL       	R16, t0
+	   1: ADDL       	$0x1AC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25479F3C:  80C40214  lwz r6,532(r4)
+	   5: GETL       	R4, t4
+	   6: ADDL       	$0x214, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25479F40:  83440210  lwz r26,528(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x210, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R26
+	  14: INCEIPL       	$4
+
+	0x25479F44:  394B0001  addi r10,r11,1
+	  15: GETL       	R11, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x25479F48:  915001AC  stw r10,428(r16)
+	  19: GETL       	R10, t14
+	  20: GETL       	R16, t16
+	  21: ADDL       	$0x1AC, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x25479F4C:  90DF0050  stw r6,80(r31)
+	  24: GETL       	R6, t18
+	  25: GETL       	R31, t20
+	  26: ADDL       	$0x50, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x25479F50:  4BFFFA24  b 0x25479974
+	  29: JMPo       	$0x25479974  ($4)
+
+
+. 0 25479F38 28
+. 81 70 01 AC 80 C4 02 14 83 44 02 10 39 4B 00 01 91 50 01 AC 90 DF 00 50 4B FF FA 24
+
+==== BB 1111 (0x25479974) approx BBs exec'd 0 ====
+
+	0x25479974:  2F060000  cmpi cr6,r6,0
+	   0: GETL       	R6, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479978:  39200000  li r9,0
+	   4: MOVL       	$0x0, t4
+	   5: PUTL       	t4, R9
+	   6: INCEIPL       	$4
+
+	0x2547997C:  419A0010  bc 12,26,0x2547998C
+	   7: Js26o       	$0x2547998C
+
+
+. 0 25479974 12
+. 2F 06 00 00 39 20 00 00 41 9A 00 10
+
+==== BB 1112 (0x254799A8) approx BBs exec'd 0 ====
+
+	0x254799A8:  2C1D0014  cmpi cr0,r29,20
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x14, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254799AC:  418200E4  bc 12,2,0x25479A90
+	   5: Js02o       	$0x25479A90
+
+
+. 0 254799A8 8
+. 2C 1D 00 14 41 82 00 E4
+
+==== BB 1113 (0x25479EBC) approx BBs exec'd 0 ====
+
+	0x25479EBC:  A11B000E  lhz r8,14(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xE, t0
+	   2: LDW       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x25479EC0:  2F880000  cmpi cr7,r8,0
+	   5: GETL       	R8, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25479EC4:  41BEF9D0  bc 13,30,0x25479894
+	   9: Js30o       	$0x25479894
+
+
+. 0 25479EBC 12
+. A1 1B 00 0E 2F 88 00 00 41 BE F9 D0
+
+==== BB 1114 (0x25479A50) approx BBs exec'd 0 ====
+
+	0x25479A50:  835F000C  lwz r26,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479A54:  4BFFFF20  b 0x25479974
+	   5: JMPo       	$0x25479974  ($4)
+
+
+. 0 25479A50 8
+. 83 5F 00 0C 4B FF FF 20
+
+==== BB 1115 (0x25479A98) approx BBs exec'd 0 ====
+
+	0x25479A98:  2F1A0000  cmpi cr6,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x25479A9C:  41BAFF30  bc 13,26,0x254799CC
+	   4: Js26o       	$0x254799CC
+
+
+. 0 25479A98 8
+. 2F 1A 00 00 41 BA FF 30
+
+==== BB 1116 (0x25479AA0) approx BBs exec'd 0 ====
+
+	0x25479AA0:  837A0230  lwz r27,560(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x230, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x25479AA4:  93770000  stw r27,0(r23)
+	   5: GETL       	R27, t4
+	   6: GETL       	R23, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0x25479AA8:  4BFFFF24  b 0x254799CC
+	   9: JMPo       	$0x254799CC  ($4)
+
+
+. 0 25479AA0 12
+. 83 7A 02 30 93 77 00 00 4B FF FF 24
+
+==== BB 1117 (0x25479A6C) approx BBs exec'd 0 ====
+
+	0x25479A6C:  2C9D0049  cmpi cr1,r29,73
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x49, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25479A70:  41860238  bc 12,6,0x25479CA8
+	   5: Js06o       	$0x25479CA8
+
+
+. 0 25479A6C 8
+. 2C 9D 00 49 41 86 02 38
+
+==== BB 1118 (0x25479CA8) approx BBs exec'd 0 ====
+
+	0x25479CA8:  2C1A0000  cmpi cr0,r26,0
+	   0: GETL       	R26, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x25479CAC:  41A2FD20  bc 13,2,0x254799CC
+	   4: Js02o       	$0x254799CC
+
+
+. 0 25479CA8 8
+. 2C 1A 00 00 41 A2 FD 20
+
+==== BB 1119 (0x25479CB0) approx BBs exec'd 0 ====
+
+	0x25479CB0:  817A022C  lwz r11,556(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x22C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25479CB4:  2C8BFFFF  cmpi cr1,r11,-1
+	   5: GETL       	R11, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x25479CB8:  4186034C  bc 12,6,0x2547A004
+	  10: Js06o       	$0x2547A004
+
+
+. 0 25479CB0 12
+. 81 7A 02 2C 2C 8B FF FF 41 86 03 4C
+
+==== BB 1120 (0x25479CBC) approx BBs exec'd 0 ====
+
+	0x25479CBC:  83460004  lwz r26,4(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479CC0:  7D8BD214  add r12,r11,r26
+	   5: GETL       	R11, t4
+	   6: GETL       	R26, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R12
+	   9: INCEIPL       	$4
+
+	0x25479CC4:  7C6C0214  add r3,r12,r0
+	  10: GETL       	R12, t8
+	  11: GETL       	R0, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R3
+	  14: INCEIPL       	$4
+
+	0x25479CC8:  39239000  addi r9,r3,-28672
+	  15: GETL       	R3, t12
+	  16: ADDL       	$0xFFFF9000, t12
+	  17: PUTL       	t12, R9
+	  18: INCEIPL       	$4
+
+	0x25479CCC:  91370000  stw r9,0(r23)
+	  19: GETL       	R9, t14
+	  20: GETL       	R23, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x25479CD0:  4BFFFCFC  b 0x254799CC
+	  23: JMPo       	$0x254799CC  ($4)
+
+
+. 0 25479CBC 24
+. 83 46 00 04 7D 8B D2 14 7C 6C 02 14 39 23 90 00 91 37 00 00 4B FF FC FC
+
+==== BB 1121 (0x254783C4) approx BBs exec'd 0 ====
+
+	0x254783C4:  2C000002  cmpi cr0,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x254783C8:  41820164  bc 12,2,0x2547852C
+	   5: Js02o       	$0x2547852C
+
+
+. 0 254783C4 8
+. 2C 00 00 02 41 82 01 64
+
+==== BB 1122 (0x2547852C) approx BBs exec'd 0 ====
+
+	0x2547852C:  829E04F4  lwz r20,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R20
+	   4: INCEIPL       	$4
+
+	0x25478530:  82740030  lwz r19,48(r20)
+	   5: GETL       	R20, t4
+	   6: ADDL       	$0x30, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R19
+	   9: INCEIPL       	$4
+
+	0x25478534:  2C930000  cmpi cr1,r19,0
+	  10: GETL       	R19, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25478538:  40860018  bc 4,6,0x25478550
+	  14: Jc06o       	$0x25478550
+
+
+. 0 2547852C 16
+. 82 9E 04 F4 82 74 00 30 2C 93 00 00 40 86 00 18
+
+==== BB 1123 (0x254783F0) approx BBs exec'd 0 ====
+
+	0x254783F0:  7F84E378  or r4,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x254783F4:  480042E1  bl 0x2547C6D4
+	   3: MOVL       	$0x254783F8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547C6D4  ($4)
+
+
+. 0 254783F0 8
+. 7F 84 E3 78 48 00 42 E1
+
+==== BB 1124 (0x254783F8) approx BBs exec'd 0 ====
+
+	0x254783F8:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x254783FC:  419AFE44  bc 12,26,0x25478240
+	   4: Js26o       	$0x25478240
+
+
+. 0 254783F8 8
+. 2F 03 00 00 41 9A FE 44
+
+==== BB 1125 (0x25479938) approx BBs exec'd 0 ====
+
+	0x25479938:  80DF0050  lwz r6,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547993C:  7D695B78  or r9,r11,r11
+	   5: GETL       	R11, t4
+	   6: PUTL       	t4, R9
+	   7: INCEIPL       	$4
+
+	0x25479940:  815F0008  lwz r10,8(r31)
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x25479944:  80A60000  lwz r5,0(r6)
+	  13: GETL       	R6, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R5
+	  16: INCEIPL       	$4
+
+	0x25479948:  80DF0010  lwz r6,16(r31)
+	  17: GETL       	R31, t14
+	  18: ADDL       	$0x10, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R6
+	  21: INCEIPL       	$4
+
+	0x2547994C:  7C655214  add r3,r5,r10
+	  22: GETL       	R5, t18
+	  23: GETL       	R10, t20
+	  24: ADDL       	t18, t20
+	  25: PUTL       	t20, R3
+	  26: INCEIPL       	$4
+
+	0x25479950:  38BF0050  addi r5,r31,80
+	  27: GETL       	R31, t22
+	  28: ADDL       	$0x50, t22
+	  29: PUTL       	t22, R5
+	  30: INCEIPL       	$4
+
+	0x25479954:  39400000  li r10,0
+	  31: MOVL       	$0x0, t24
+	  32: PUTL       	t24, R10
+	  33: INCEIPL       	$4
+
+	0x25479958:  4BFFEC4D  bl 0x254785A4
+	  34: MOVL       	$0x2547995C, t26
+	  35: PUTL       	t26, LR
+	  36: JMPo-c       	$0x254785A4  ($4)
+
+
+. 0 25479938 36
+. 80 DF 00 50 7D 69 5B 78 81 5F 00 08 80 A6 00 00 80 DF 00 10 7C 65 52 14 38 BF 00 50 39 40 00 00 4B FF EC 4D
+
+==== BB 1126 (0x25478424) approx BBs exec'd 0 ====
+
+	0x25478424:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25478428:  81E10074  lwz r15,116(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x74, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R15
+	   7: INCEIPL       	$4
+
+	0x2547842C:  81810024  lwz r12,36(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x24, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R12
+	  12: INCEIPL       	$4
+
+	0x25478430:  7DE803A6  mtlr r15
+	  13: GETL       	R15, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x25478434:  81C10028  lwz r14,40(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x28, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R14
+	  20: INCEIPL       	$4
+
+	0x25478438:  81E1002C  lwz r15,44(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R15
+	  25: INCEIPL       	$4
+
+	0x2547843C:  7D818120  mtcrf 0x18,r12
+	  26: GETL       	R12, t20
+	  27: ICRFL       	t20, $0x3, CR
+	  28: ICRFL       	t20, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x25478440:  82010030  lwz r16,48(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x30, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R16
+	  34: INCEIPL       	$4
+
+	0x25478444:  82210034  lwz r17,52(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x34, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R17
+	  39: INCEIPL       	$4
+
+	0x25478448:  82410038  lwz r18,56(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x38, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R18
+	  44: INCEIPL       	$4
+
+	0x2547844C:  8261003C  lwz r19,60(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x3C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R19
+	  49: INCEIPL       	$4
+
+	0x25478450:  82810040  lwz r20,64(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x40, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R20
+	  54: INCEIPL       	$4
+
+	0x25478454:  82A10044  lwz r21,68(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x44, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R21
+	  59: INCEIPL       	$4
+
+	0x25478458:  82C10048  lwz r22,72(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x48, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R22
+	  64: INCEIPL       	$4
+
+	0x2547845C:  82E1004C  lwz r23,76(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x4C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R23
+	  69: INCEIPL       	$4
+
+	0x25478460:  83010050  lwz r24,80(r1)
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x50, t54
+	  72: LDL       	(t54), t56
+	  73: PUTL       	t56, R24
+	  74: INCEIPL       	$4
+
+	0x25478464:  83210054  lwz r25,84(r1)
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x54, t58
+	  77: LDL       	(t58), t60
+	  78: PUTL       	t60, R25
+	  79: INCEIPL       	$4
+
+	0x25478468:  83410058  lwz r26,88(r1)
+	  80: GETL       	R1, t62
+	  81: ADDL       	$0x58, t62
+	  82: LDL       	(t62), t64
+	  83: PUTL       	t64, R26
+	  84: INCEIPL       	$4
+
+	0x2547846C:  8361005C  lwz r27,92(r1)
+	  85: GETL       	R1, t66
+	  86: ADDL       	$0x5C, t66
+	  87: LDL       	(t66), t68
+	  88: PUTL       	t68, R27
+	  89: INCEIPL       	$4
+
+	0x25478470:  83810060  lwz r28,96(r1)
+	  90: GETL       	R1, t70
+	  91: ADDL       	$0x60, t70
+	  92: LDL       	(t70), t72
+	  93: PUTL       	t72, R28
+	  94: INCEIPL       	$4
+
+	0x25478474:  83A10064  lwz r29,100(r1)
+	  95: GETL       	R1, t74
+	  96: ADDL       	$0x64, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R29
+	  99: INCEIPL       	$4
+
+	0x25478478:  83C10068  lwz r30,104(r1)
+	 100: GETL       	R1, t78
+	 101: ADDL       	$0x68, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R30
+	 104: INCEIPL       	$4
+
+	0x2547847C:  83E1006C  lwz r31,108(r1)
+	 105: GETL       	R1, t82
+	 106: ADDL       	$0x6C, t82
+	 107: LDL       	(t82), t84
+	 108: PUTL       	t84, R31
+	 109: INCEIPL       	$4
+
+	0x25478480:  38210070  addi r1,r1,112
+	 110: GETL       	R1, t86
+	 111: ADDL       	$0x70, t86
+	 112: PUTL       	t86, R1
+	 113: INCEIPL       	$4
+
+	0x25478484:  4E800020  blr
+	 114: GETL       	LR, t88
+	 115: JMPo-r       	t88  ($4)
+
+
+. 0 25478424 100
+. 38 60 00 00 81 E1 00 74 81 81 00 24 7D E8 03 A6 81 C1 00 28 81 E1 00 2C 7D 81 81 20 82 01 00 30 82 21 00 34 82 41 00 38 82 61 00 3C 82 81 00 40 82 A1 00 44 82 C1 00 48 82 E1 00 4C 83 01 00 50 83 21 00 54 83 41 00 58 83 61 00 5C 83 81 00 60 83 A1 00 64 83 C1 00 68 83 E1 00 6C 38 21 00 70 4E 80 00 20
+
+==== BB 1127 (0x254786BC) approx BBs exec'd 0 ====
+
+	0x254786BC:  4180016C  bc 12,0,0x25478828
+	   0: Js00o       	$0x25478828
+
+
+. 0 254786BC 4
+. 41 80 01 6C
+
+==== BB 1128 (0x254786C0) approx BBs exec'd 0 ====
+
+	0x254786C0:  84FD0004  lwzu r7,4(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R29
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x254786C4:  2F070000  cmpi cr6,r7,0
+	   6: GETL       	R7, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254786C8:  409AFFC4  bc 4,26,0x2547868C
+	  10: Jc26o       	$0x2547868C
+
+
+. 0 254786C0 12
+. 84 FD 00 04 2F 07 00 00 40 9A FF C4
+
+==== BB 1129 (0x25478BD8) approx BBs exec'd 0 ====
+
+	0x25478BD8:  81360000  lwz r9,0(r22)
+	   0: GETL       	R22, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x25478BDC:  2C090000  cmpi cr0,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x0, CR
+	   7: INCEIPL       	$4
+
+	0x25478BE0:  408201B8  bc 4,2,0x25478D98
+	   8: Jc02o       	$0x25478D98
+
+
+. 0 25478BD8 12
+. 81 36 00 00 2C 09 00 00 40 82 01 B8
+
+==== BB 1130 (0x25478D98) approx BBs exec'd 0 ====
+
+	0x25478D98:  8BA9000C  lbz r29,12(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x25478D9C:  57BBE13E  rlwinm r27,r29,28,4,31
+	   5: GETL       	R29, t4
+	   6: SHRL       	$0x4, t4
+	   7: PUTL       	t4, R27
+	   8: INCEIPL       	$4
+
+	0x25478DA0:  2C9B0002  cmpi cr1,r27,2
+	   9: GETL       	R27, t6
+	  10: MOVL       	$0x2, t10
+	  11: CMPL       	t6, t10, t8  (-rSo)
+	  12: ICRFL       	t8, $0x1, CR
+	  13: INCEIPL       	$4
+
+	0x25478DA4:  4086FE40  bc 4,6,0x25478BE4
+	  14: Jc06o       	$0x25478BE4
+
+
+. 0 25478D98 16
+. 8B A9 00 0C 57 BB E1 3E 2C 9B 00 02 40 86 FE 40
+
+==== BB 1131 (0x25478DA8) approx BBs exec'd 0 ====
+
+	0x25478DA8:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25478DAC:  38000000  li r0,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x25478DB0:  4BFFF990  b 0x25478740
+	   6: JMPo       	$0x25478740  ($4)
+
+
+. 0 25478DA8 12
+. 38 60 00 00 38 00 00 00 4B FF F9 90
+
+==== BB 1132 (0x25478740) approx BBs exec'd 0 ====
+
+	0x25478740:  90160000  stw r0,0(r22)
+	   0: GETL       	R0, t0
+	   1: GETL       	R22, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25478744:  80E10000  lwz r7,0(r1)
+	   4: GETL       	R1, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R7
+	   7: INCEIPL       	$4
+
+	0x25478748:  82C70004  lwz r22,4(r7)
+	   8: GETL       	R7, t8
+	   9: ADDL       	$0x4, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R22
+	  12: INCEIPL       	$4
+
+	0x2547874C:  8107FFC0  lwz r8,-64(r7)
+	  13: GETL       	R7, t12
+	  14: ADDL       	$0xFFFFFFC0, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R8
+	  17: INCEIPL       	$4
+
+	0x25478750:  7EC803A6  mtlr r22
+	  18: GETL       	R22, t16
+	  19: PUTL       	t16, LR
+	  20: INCEIPL       	$4
+
+	0x25478754:  8227FFC4  lwz r17,-60(r7)
+	  21: GETL       	R7, t18
+	  22: ADDL       	$0xFFFFFFC4, t18
+	  23: LDL       	(t18), t20
+	  24: PUTL       	t20, R17
+	  25: INCEIPL       	$4
+
+	0x25478758:  8247FFC8  lwz r18,-56(r7)
+	  26: GETL       	R7, t22
+	  27: ADDL       	$0xFFFFFFC8, t22
+	  28: LDL       	(t22), t24
+	  29: PUTL       	t24, R18
+	  30: INCEIPL       	$4
+
+	0x2547875C:  7D008120  mtcrf 0x8,r8
+	  31: GETL       	R8, t26
+	  32: ICRFL       	t26, $0x4, CR
+	  33: INCEIPL       	$4
+
+	0x25478760:  8267FFCC  lwz r19,-52(r7)
+	  34: GETL       	R7, t28
+	  35: ADDL       	$0xFFFFFFCC, t28
+	  36: LDL       	(t28), t30
+	  37: PUTL       	t30, R19
+	  38: INCEIPL       	$4
+
+	0x25478764:  8287FFD0  lwz r20,-48(r7)
+	  39: GETL       	R7, t32
+	  40: ADDL       	$0xFFFFFFD0, t32
+	  41: LDL       	(t32), t34
+	  42: PUTL       	t34, R20
+	  43: INCEIPL       	$4
+
+	0x25478768:  82A7FFD4  lwz r21,-44(r7)
+	  44: GETL       	R7, t36
+	  45: ADDL       	$0xFFFFFFD4, t36
+	  46: LDL       	(t36), t38
+	  47: PUTL       	t38, R21
+	  48: INCEIPL       	$4
+
+	0x2547876C:  82C7FFD8  lwz r22,-40(r7)
+	  49: GETL       	R7, t40
+	  50: ADDL       	$0xFFFFFFD8, t40
+	  51: LDL       	(t40), t42
+	  52: PUTL       	t42, R22
+	  53: INCEIPL       	$4
+
+	0x25478770:  82E7FFDC  lwz r23,-36(r7)
+	  54: GETL       	R7, t44
+	  55: ADDL       	$0xFFFFFFDC, t44
+	  56: LDL       	(t44), t46
+	  57: PUTL       	t46, R23
+	  58: INCEIPL       	$4
+
+	0x25478774:  8307FFE0  lwz r24,-32(r7)
+	  59: GETL       	R7, t48
+	  60: ADDL       	$0xFFFFFFE0, t48
+	  61: LDL       	(t48), t50
+	  62: PUTL       	t50, R24
+	  63: INCEIPL       	$4
+
+	0x25478778:  8327FFE4  lwz r25,-28(r7)
+	  64: GETL       	R7, t52
+	  65: ADDL       	$0xFFFFFFE4, t52
+	  66: LDL       	(t52), t54
+	  67: PUTL       	t54, R25
+	  68: INCEIPL       	$4
+
+	0x2547877C:  8347FFE8  lwz r26,-24(r7)
+	  69: GETL       	R7, t56
+	  70: ADDL       	$0xFFFFFFE8, t56
+	  71: LDL       	(t56), t58
+	  72: PUTL       	t58, R26
+	  73: INCEIPL       	$4
+
+	0x25478780:  8367FFEC  lwz r27,-20(r7)
+	  74: GETL       	R7, t60
+	  75: ADDL       	$0xFFFFFFEC, t60
+	  76: LDL       	(t60), t62
+	  77: PUTL       	t62, R27
+	  78: INCEIPL       	$4
+
+	0x25478784:  8387FFF0  lwz r28,-16(r7)
+	  79: GETL       	R7, t64
+	  80: ADDL       	$0xFFFFFFF0, t64
+	  81: LDL       	(t64), t66
+	  82: PUTL       	t66, R28
+	  83: INCEIPL       	$4
+
+	0x25478788:  83A7FFF4  lwz r29,-12(r7)
+	  84: GETL       	R7, t68
+	  85: ADDL       	$0xFFFFFFF4, t68
+	  86: LDL       	(t68), t70
+	  87: PUTL       	t70, R29
+	  88: INCEIPL       	$4
+
+	0x2547878C:  83C7FFF8  lwz r30,-8(r7)
+	  89: GETL       	R7, t72
+	  90: ADDL       	$0xFFFFFFF8, t72
+	  91: LDL       	(t72), t74
+	  92: PUTL       	t74, R30
+	  93: INCEIPL       	$4
+
+	0x25478790:  83E7FFFC  lwz r31,-4(r7)
+	  94: GETL       	R7, t76
+	  95: ADDL       	$0xFFFFFFFC, t76
+	  96: LDL       	(t76), t78
+	  97: PUTL       	t78, R31
+	  98: INCEIPL       	$4
+
+	0x25478794:  7CE13B78  or r1,r7,r7
+	  99: GETL       	R7, t80
+	 100: PUTL       	t80, R1
+	 101: INCEIPL       	$4
+
+	0x25478798:  4E800020  blr
+	 102: GETL       	LR, t82
+	 103: JMPo-r       	t82  ($4)
+
+
+. 0 25478740 92
+. 90 16 00 00 80 E1 00 00 82 C7 00 04 81 07 FF C0 7E C8 03 A6 82 27 FF C4 82 47 FF C8 7D 00 81 20 82 67 FF CC 82 87 FF D0 82 A7 FF D4 82 C7 FF D8 82 E7 FF DC 83 07 FF E0 83 27 FF E4 83 47 FF E8 83 67 FF EC 83 87 FF F0 83 A7 FF F4 83 C7 FF F8 83 E7 FF FC 7C E1 3B 78 4E 80 00 20
+
+==== BB 1133 (0x2547998C) approx BBs exec'd 0 ====
+
+	0x2547998C:  2F9D0044  cmpi cr7,r29,68
+	   0: GETL       	R29, t0
+	   1: MOVL       	$0x44, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479990:  801C0008  lwz r0,8(r28)
+	   5: GETL       	R28, t6
+	   6: ADDL       	$0x8, t6
+	   7: LDL       	(t6), t8
+	   8: PUTL       	t8, R0
+	   9: INCEIPL       	$4
+
+	0x25479994:  7D290214  add r9,r9,r0
+	  10: GETL       	R9, t10
+	  11: GETL       	R0, t12
+	  12: ADDL       	t10, t12
+	  13: PUTL       	t12, R9
+	  14: INCEIPL       	$4
+
+	0x25479998:  419E0100  bc 12,30,0x25479A98
+	  15: Js30o       	$0x25479A98
+
+
+. 0 2547998C 16
+. 2F 9D 00 44 80 1C 00 08 7D 29 02 14 41 9E 01 00
+
+==== BB 1134 (0x254784A0) approx BBs exec'd 0 ====
+
+	0x254784A0:  41AEFF18  bc 13,14,0x254783B8
+	   0: Js14o       	$0x254783B8
+
+
+. 0 254784A0 4
+. 41 AE FF 18
+
+==== BB 1135 (0x254784A4) approx BBs exec'd 0 ====
+
+	0x254784A4:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x254784A8:  5469FFFF  rlwinm. r9,r3,31,31,31
+	   5: GETL       	R3, t4
+	   6: ROLL       	$0x1F, t4
+	   7: ANDL       	$0x1, t4
+	   8: PUTL       	t4, R9
+	   9: CMP0L       	t4, t6  (-rSo)
+	  10: ICRFL       	t6, $0x0, CR
+	  11: INCEIPL       	$4
+
+	0x254784AC:  4182006C  bc 12,2,0x25478518
+	  12: Js02o       	$0x25478518
+
+
+. 0 254784A4 12
+. 80 61 00 14 54 69 FF FF 41 82 00 6C
+
+==== BB 1136 (0x25478518) approx BBs exec'd 0 ====
+
+	0x25478518:  57E9083C  rlwinm r9,r31,1,0,30
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x2547851C:  7CC9B22E  lhzx r6,r9,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R9, t4
+	   6: ADDL       	t4, t2
+	   7: LDW       	(t2), t6
+	   8: PUTL       	t6, R6
+	   9: INCEIPL       	$4
+
+	0x25478520:  54C5047E  rlwinm r5,r6,0,17,31
+	  10: GETL       	R6, t8
+	  11: ANDL       	$0x7FFF, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0x25478524:  2F850002  cmpi cr7,r5,2
+	  14: GETL       	R5, t10
+	  15: MOVL       	$0x2, t14
+	  16: CMPL       	t10, t14, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x25478528:  4BFFFF98  b 0x254784C0
+	  19: JMPo       	$0x254784C0  ($4)
+
+
+. 0 25478518 20
+. 57 E9 08 3C 7C C9 B2 2E 54 C5 04 7E 2F 85 00 02 4B FF FF 98
+
+==== BB 1137 (0x254784C0) approx BBs exec'd 0 ====
+
+	0x254784C0:  40BDFEF8  bc 5,29,0x254783B8
+	   0: Jc29o       	$0x254783B8
+
+
+. 0 254784C0 4
+. 40 BD FE F8
+
+==== BB 1138 (0x25479A04) approx BBs exec'd 0 ====
+
+	0x25479A04:  39000001  li r8,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x25479A08:  4BFFFEF4  b 0x254798FC
+	   3: JMPo       	$0x254798FC  ($4)
+
+
+. 0 25479A04 8
+. 39 00 00 01 4B FF FE F4
+
+==== BB 1139 (0x254799B0) approx BBs exec'd 0 ====
+
+	0x254799B0:  7F45D378  or r5,r26,r26
+	   0: GETL       	R26, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x254799B4:  7F67DB78  or r7,r27,r27
+	   3: GETL       	R27, t2
+	   4: PUTL       	t2, R7
+	   5: INCEIPL       	$4
+
+	0x254799B8:  7EE8BB78  or r8,r23,r23
+	   6: GETL       	R23, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x254799BC:  7FAAEB78  or r10,r29,r29
+	   9: GETL       	R29, t6
+	  10: PUTL       	t6, R10
+	  11: INCEIPL       	$4
+
+	0x254799C0:  7EC3B378  or r3,r22,r22
+	  12: GETL       	R22, t8
+	  13: PUTL       	t8, R3
+	  14: INCEIPL       	$4
+
+	0x254799C4:  7F84E378  or r4,r28,r28
+	  15: GETL       	R28, t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0x254799C8:  480072E1  bl 0x25480CA8
+	  18: MOVL       	$0x254799CC, t12
+	  19: PUTL       	t12, LR
+	  20: JMPo-c       	$0x25480CA8  ($4)
+
+
+. 0 254799B0 28
+. 7F 45 D3 78 7F 67 DB 78 7E E8 BB 78 7F AA EB 78 7E C3 B3 78 7F 84 E3 78 48 00 72 E1
+
+==== BB 1140 (0x25480FF0) approx BBs exec'd 0 ====
+
+	0x25480FF0:  3D3AFE00  addis r9,r26,-512
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0xFE000000, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25480FF4:  3D60FC00  lis r11,-1024
+	   4: MOVL       	$0xFC000000, t2
+	   5: PUTL       	t2, R11
+	   6: INCEIPL       	$4
+
+	0x25480FF8:  38690003  addi r3,r9,3
+	   7: GETL       	R9, t4
+	   8: ADDL       	$0x3, t4
+	   9: PUTL       	t4, R3
+	  10: INCEIPL       	$4
+
+	0x25480FFC:  61600002  ori r0,r11,0x2
+	  11: MOVL       	$0xFC000002, t6
+	  12: PUTL       	t6, R0
+	  13: INCEIPL       	$4
+
+	0x25481000:  7C030040  cmpl cr0,r3,r0
+	  14: GETL       	R3, t8
+	  15: GETL       	R0, t10
+	  16: CMPUL       	t8, t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x25481004:  40810194  bc 4,1,0x25481198
+	  19: Jc01o       	$0x25481198
+
+
+. 0 25480FF0 24
+. 3D 3A FE 00 3D 60 FC 00 38 69 00 03 61 60 00 02 7C 03 00 40 40 81 01 94
+
+==== BB 1141 (0x25481198) approx BBs exec'd 0 ====
+
+	0x25481198:  81610008  lwz r11,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2548119C:  814B002C  lwz r10,44(r11)
+	   5: GETL       	R11, t4
+	   6: ADDL       	$0x2C, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R10
+	   9: INCEIPL       	$4
+
+	0x254811A0:  810A0004  lwz r8,4(r10)
+	  10: GETL       	R10, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R8
+	  14: INCEIPL       	$4
+
+	0x254811A4:  7CE82850  subf r7,r8,r5
+	  15: GETL       	R8, t12
+	  16: GETL       	R5, t14
+	  17: SUBL       	t12, t14
+	  18: PUTL       	t14, R7
+	  19: INCEIPL       	$4
+
+	0x254811A8:  7CEA1670  srawi r10,r7,2
+	  20: GETL       	R7, t16
+	  21: SARL       	$0x2, t16  (-wCa)
+	  22: PUTL       	t16, R10
+	  23: INCEIPL       	$4
+
+	0x254811AC:  288A4011  cmpli cr1,r10,16401
+	  24: GETL       	R10, t18
+	  25: MOVL       	$0x4011, t22
+	  26: CMPUL       	t18, t22, t20  (-rSo)
+	  27: ICRFL       	t20, $0x1, CR
+	  28: INCEIPL       	$4
+
+	0x254811B0:  418500A0  bc 12,5,0x25481250
+	  29: Js05o       	$0x25481250
+
+
+. 0 25481198 28
+. 81 61 00 08 81 4B 00 2C 81 0A 00 04 7C E8 28 50 7C EA 16 70 28 8A 40 11 41 85 00 A0
+
+==== BB 1142 (0x254811B4) approx BBs exec'd 0 ====
+
+	0x254811B4:  816B0028  lwz r11,40(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x28, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254811B8:  3C60AAAA  lis r3,-21846
+	   5: MOVL       	$0xAAAA0000, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254811BC:  6060AAAB  ori r0,r3,0xAAAB
+	   8: MOVL       	$0xAAAAAAAB, t6
+	   9: PUTL       	t6, R0
+	  10: INCEIPL       	$4
+
+	0x254811C0:  3BAAFFEE  addi r29,r10,-18
+	  11: GETL       	R10, t8
+	  12: ADDL       	$0xFFFFFFEE, t8
+	  13: PUTL       	t8, R29
+	  14: INCEIPL       	$4
+
+	0x254811C4:  83EB0004  lwz r31,4(r11)
+	  15: GETL       	R11, t10
+	  16: ADDL       	$0x4, t10
+	  17: LDL       	(t10), t12
+	  18: PUTL       	t12, R31
+	  19: INCEIPL       	$4
+
+	0x254811C8:  57ABF87E  rlwinm r11,r29,31,1,31
+	  20: GETL       	R29, t14
+	  21: SHRL       	$0x1, t14
+	  22: PUTL       	t14, R11
+	  23: INCEIPL       	$4
+
+	0x254811CC:  7F7F0016  mulhwu r27,r31,r0
+	  24: GETL       	R31, t16
+	  25: GETL       	R0, t18
+	  26: UMULHL       	t16, t18
+	  27: PUTL       	t18, R27
+	  28: INCEIPL       	$4
+
+	0x254811D0:  577CE8FE  rlwinm r28,r27,29,3,31
+	  29: GETL       	R27, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R28
+	  32: INCEIPL       	$4
+
+	0x254811D4:  2B1C2000  cmpli cr6,r28,8192
+	  33: GETL       	R28, t22
+	  34: MOVL       	$0x2000, t26
+	  35: CMPUL       	t22, t26, t24  (-rSo)
+	  36: ICRFL       	t24, $0x6, CR
+	  37: INCEIPL       	$4
+
+	0x254811D8:  5787083C  rlwinm r7,r28,1,0,30
+	  38: GETL       	R28, t28
+	  39: SHLL       	$0x1, t28
+	  40: PUTL       	t28, R7
+	  41: INCEIPL       	$4
+
+	0x254811DC:  39270012  addi r9,r7,18
+	  42: GETL       	R7, t30
+	  43: ADDL       	$0x12, t30
+	  44: PUTL       	t30, R9
+	  45: INCEIPL       	$4
+
+	0x254811E0:  552C103A  rlwinm r12,r9,2,0,29
+	  46: GETL       	R9, t32
+	  47: SHLL       	$0x2, t32
+	  48: PUTL       	t32, R12
+	  49: INCEIPL       	$4
+
+	0x254811E4:  7CCC4214  add r6,r12,r8
+	  50: GETL       	R12, t34
+	  51: GETL       	R8, t36
+	  52: ADDL       	t34, t36
+	  53: PUTL       	t36, R6
+	  54: INCEIPL       	$4
+
+	0x254811E8:  40990014  bc 4,25,0x254811FC
+	  55: Jc25o       	$0x254811FC
+
+
+. 0 254811B4 56
+. 81 6B 00 28 3C 60 AA AA 60 60 AA AB 3B AA FF EE 83 EB 00 04 57 AB F8 7E 7F 7F 00 16 57 7C E8 FE 2B 1C 20 00 57 87 08 3C 39 27 00 12 55 2C 10 3A 7C CC 42 14 40 99 00 14
+
+==== BB 1143 (0x254811FC) approx BBs exec'd 0 ====
+
+	0x254811FC:  1FEAFFFC  mulli r31,r10,-4
+	   0: GETL       	R10, t0
+	   1: MULL       	$0xFFFFFFFC, t0
+	   2: PUTL       	t0, R31
+	   3: INCEIPL       	$4
+
+	0x25481200:  556C103A  rlwinm r12,r11,2,0,29
+	   4: GETL       	R11, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R12
+	   7: INCEIPL       	$4
+
+	0x25481204:  559B043E  rlwinm r27,r12,0,16,31
+	   8: GETL       	R12, t4
+	   9: ANDL       	$0xFFFF, t4
+	  10: PUTL       	t4, R27
+	  11: INCEIPL       	$4
+
+	0x25481208:  7F4C312E  stwx r26,r12,r6
+	  12: GETL       	R6, t6
+	  13: GETL       	R12, t8
+	  14: ADDL       	t8, t6
+	  15: GETL       	R26, t10
+	  16: STL       	t10, (t6)
+	  17: INCEIPL       	$4
+
+	0x2548120C:  3BBFFFFC  addi r29,r31,-4
+	  18: GETL       	R31, t12
+	  19: ADDL       	$0xFFFFFFFC, t12
+	  20: PUTL       	t12, R29
+	  21: INCEIPL       	$4
+
+	0x25481210:  676A3960  oris r10,r27,0x3960
+	  22: GETL       	R27, t14
+	  23: ORL       	$0x39600000, t14
+	  24: PUTL       	t14, R10
+	  25: INCEIPL       	$4
+
+	0x25481214:  57BC01BA  rlwinm r28,r29,0,6,29
+	  26: GETL       	R29, t16
+	  27: ANDL       	$0x3FFFFFC, t16
+	  28: PUTL       	t16, R28
+	  29: INCEIPL       	$4
+
+	0x25481218:  91450000  stw r10,0(r5)
+	  30: GETL       	R10, t18
+	  31: GETL       	R5, t20
+	  32: STL       	t18, (t20)
+	  33: INCEIPL       	$4
+
+	0x2548121C:  67874800  oris r7,r28,0x4800
+	  34: GETL       	R28, t22
+	  35: ORL       	$0x48000000, t22
+	  36: PUTL       	t22, R7
+	  37: INCEIPL       	$4
+
+	0x25481220:  94E50004  stwu r7,4(r5)
+	  38: GETL       	R7, t24
+	  39: GETL       	R5, t26
+	  40: ADDL       	$0x4, t26
+	  41: PUTL       	t26, R5
+	  42: STL       	t24, (t26)
+	  43: INCEIPL       	$4
+
+	0x25481224:  7C00286C  dcbst r0,r5
+	  44: INCEIPL       	$4
+
+	0x25481228:  7C0004AC  sync
+	  45: INCEIPL       	$4
+
+	0x2548122C:  80A1000C  lwz r5,12(r1)
+	  46: GETL       	R1, t28
+	  47: ADDL       	$0xC, t28
+	  48: LDL       	(t28), t30
+	  49: PUTL       	t30, R5
+	  50: INCEIPL       	$4
+
+	0x25481230:  39050004  addi r8,r5,4
+	  51: GETL       	R5, t32
+	  52: ADDL       	$0x4, t32
+	  53: PUTL       	t32, R8
+	  54: INCEIPL       	$4
+
+	0x25481234:  7C0047AC  icbi r0,r8
+	  55: GETL       	R8, t34
+	  56: CALLM_So       	
+	  57: PUSHL       	t34
+	  58: CALLMo       	$0x68
+	  59: CALLM_Eo       	
+	  60: INCEIPL       	$4
+
+	0x25481238:  80A1000C  lwz r5,12(r1)
+	  61: GETL       	R1, t36
+	  62: ADDL       	$0xC, t36
+	  63: LDL       	(t36), t38
+	  64: PUTL       	t38, R5
+	  65: INCEIPL       	$4
+
+	0x2548123C:  4BFFFBC4  b 0x25480E00
+	  66: JMPo       	$0x25480E00  ($4)
+
+
+. 0 254811FC 68
+. 1F EA FF FC 55 6C 10 3A 55 9B 04 3E 7F 4C 31 2E 3B BF FF FC 67 6A 39 60 57 BC 01 BA 91 45 00 00 67 87 48 00 94 E5 00 04 7C 00 28 6C 7C 00 04 AC 80 A1 00 0C 39 05 00 04 7C 00 47 AC 80 A1 00 0C 4B FF FB C4
+
+==== BB 1144 (0x254799D8) approx BBs exec'd 0 ====
+
+	0x254799D8:  83BF000C  lwz r29,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x254799DC:  4BFFFD44  b 0x25479720
+	   5: JMPo       	$0x25479720  ($4)
+
+
+. 0 254799D8 8
+. 83 BF 00 0C 4B FF FD 44
+
+==== BB 1145 (0x25479720) approx BBs exec'd 0 ====
+
+	0x25479720:  3652FFFF  addic. r18,r18,-1
+	   0: GETL       	R18, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R18
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x25479724:  3AB5000C  addi r21,r21,12
+	   6: GETL       	R21, t4
+	   7: ADDL       	$0xC, t4
+	   8: PUTL       	t4, R21
+	   9: INCEIPL       	$4
+
+	0x25479728:  4080FFC0  bc 4,0,0x254796E8
+	  10: Jc00o       	$0x254796E8
+
+
+. 0 25479720 12
+. 36 52 FF FF 3A B5 00 0C 40 80 FF C0
+
+==== BB 1146 (0x254796E8) approx BBs exec'd 0 ====
+
+	0x254796E8:  80B50020  lwz r5,32(r21)
+	   0: GETL       	R21, t0
+	   1: ADDL       	$0x20, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x254796EC:  7FB6EB78  or r22,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x254796F0:  81750018  lwz r11,24(r21)
+	   8: GETL       	R21, t6
+	   9: ADDL       	$0x18, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x254796F4:  2C050000  cmpi cr0,r5,0
+	  13: GETL       	R5, t10
+	  14: CMP0L       	t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x0, CR
+	  16: INCEIPL       	$4
+
+	0x254796F8:  8115001C  lwz r8,28(r21)
+	  17: GETL       	R21, t14
+	  18: ADDL       	$0x1C, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R8
+	  21: INCEIPL       	$4
+
+	0x254796FC:  7D695B78  or r9,r11,r11
+	  22: GETL       	R11, t18
+	  23: PUTL       	t18, R9
+	  24: INCEIPL       	$4
+
+	0x25479700:  833D0000  lwz r25,0(r29)
+	  25: GETL       	R29, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R25
+	  28: INCEIPL       	$4
+
+	0x25479704:  7F0B4214  add r24,r11,r8
+	  29: GETL       	R11, t24
+	  30: GETL       	R8, t26
+	  31: ADDL       	t24, t26
+	  32: PUTL       	t26, R24
+	  33: INCEIPL       	$4
+
+	0x25479708:  418200BC  bc 12,2,0x254797C4
+	  34: Js02o       	$0x254797C4
+
+
+. 0 254796E8 36
+. 80 B5 00 20 7F B6 EB 78 81 75 00 18 2C 05 00 00 81 15 00 1C 7D 69 5B 78 83 3D 00 00 7F 0B 42 14 41 82 00 BC
+
+==== BB 1147 (0x2547970C) approx BBs exec'd 0 ====
+
+	0x2547970C:  7F8BC040  cmpl cr7,r11,r24
+	   0: GETL       	R11, t0
+	   1: GETL       	R24, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x25479710:  409C0010  bc 4,28,0x25479720
+	   5: Jc28o       	$0x25479720
+
+
+. 0 2547970C 8
+. 7F 8B C0 40 40 9C 00 10
+
+==== BB 1148 (0x254797FC) approx BBs exec'd 0 ====
+
+	0x254797FC:  3B7001B8  addi r27,r16,440
+	   0: GETL       	R16, t0
+	   1: ADDL       	$0x1B8, t0
+	   2: PUTL       	t0, R27
+	   3: INCEIPL       	$4
+
+	0x25479800:  1CE9000C  mulli r7,r9,12
+	   4: GETL       	R9, t2
+	   5: MULL       	$0xC, t2
+	   6: PUTL       	t2, R7
+	   7: INCEIPL       	$4
+
+	0x25479804:  7E1DD800  cmp cr4,r29,r27
+	   8: GETL       	R29, t4
+	   9: GETL       	R27, t6
+	  10: CMPL       	t4, t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0x25479808:  7F8B3A14  add r28,r11,r7
+	  13: GETL       	R11, t10
+	  14: GETL       	R7, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x2547980C:  41920018  bc 12,18,0x25479824
+	  18: Js18o       	$0x25479824
+
+
+. 0 254797FC 20
+. 3B 70 01 B8 1C E9 00 0C 7E 1D D8 00 7F 8B 3A 14 41 92 00 18
+
+==== BB 1149 (0x2547972C) approx BBs exec'd 0 ====
+
+	0x2547972C:  408E0710  bc 4,14,0x25479E3C
+	   0: Jc14o       	$0x25479E3C
+
+
+. 0 2547972C 4
+. 40 8E 07 10
+
+==== BB 1150 (0x25479730) approx BBs exec'd 0 ====
+
+	0x25479730:  827D0180  lwz r19,384(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R19
+	   4: INCEIPL       	$4
+
+	0x25479734:  2F910000  cmpi cr7,r17,0
+	   5: GETL       	R17, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x25479738:  66722000  oris r18,r19,0x2000
+	   9: GETL       	R19, t8
+	  10: ORL       	$0x20000000, t8
+	  11: PUTL       	t8, R18
+	  12: INCEIPL       	$4
+
+	0x2547973C:  925D0180  stw r18,384(r29)
+	  13: GETL       	R18, t10
+	  14: GETL       	R29, t12
+	  15: ADDL       	$0x180, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25479740:  409E05C0  bc 4,30,0x25479D00
+	  18: Jc30o       	$0x25479D00
+
+
+. 0 25479730 20
+. 82 7D 01 80 2F 91 00 00 66 72 20 00 92 5D 01 80 40 9E 05 C0
+
+==== BB 1151 (0x25479744) approx BBs exec'd 0 ====
+
+	0x25479744:  807F000C  lwz r3,12(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0x25479748:  82A30238  lwz r21,568(r3)
+	   5: GETL       	R3, t4
+	   6: ADDL       	$0x238, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R21
+	   9: INCEIPL       	$4
+
+	0x2547974C:  2F950000  cmpi cr7,r21,0
+	  10: GETL       	R21, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x7, CR
+	  13: INCEIPL       	$4
+
+	0x25479750:  409E02BC  bc 4,30,0x25479A0C
+	  14: Jc30o       	$0x25479A0C
+
+
+. 0 25479744 16
+. 80 7F 00 0C 82 A3 02 38 2F 95 00 00 40 9E 02 BC
+
+==== BB 1152 (0x25479A0C) approx BBs exec'd 0 ====
+
+	0x25479A0C:  4BFFFB1D  bl 0x25479528
+	   0: MOVL       	$0x25479A10, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25479528  ($4)
+
+
+. 0 25479A0C 4
+. 4B FF FB 1D
+
+==== BB 1153 _dl_protect_relro(0x25479528) approx BBs exec'd 0 ====
+
+	0x25479528:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547952C:  7D8802A6  mflr r12
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R12
+	   8: INCEIPL       	$4
+
+	0x25479530:  4801DAD1  bl 0x25497000
+	   9: MOVL       	$0x25479534, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25479528 12
+. 94 21 FF F0 7D 88 02 A6 48 01 DA D1
+
+==== BB 1154 (0x25479534) approx BBs exec'd 0 ====
+
+	0x25479534:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25479538:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547953C:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25479540:  91810014  stw r12,20(r1)
+	  13: GETL       	R12, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x25479544:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0x25479548:  80A30000  lwz r5,0(r3)
+	  21: GETL       	R3, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R5
+	  24: INCEIPL       	$4
+
+	0x2547954C:  813E04F4  lwz r9,1268(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x4F4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R9
+	  29: INCEIPL       	$4
+
+	0x25479550:  81030234  lwz r8,564(r3)
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0x234, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R8
+	  34: INCEIPL       	$4
+
+	0x25479554:  80E30238  lwz r7,568(r3)
+	  35: GETL       	R3, t28
+	  36: ADDL       	$0x238, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R7
+	  39: INCEIPL       	$4
+
+	0x25479558:  80C90004  lwz r6,4(r9)
+	  40: GETL       	R9, t32
+	  41: ADDL       	$0x4, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R6
+	  44: INCEIPL       	$4
+
+	0x2547955C:  7C654214  add r3,r5,r8
+	  45: GETL       	R5, t36
+	  46: GETL       	R8, t38
+	  47: ADDL       	t36, t38
+	  48: PUTL       	t38, R3
+	  49: INCEIPL       	$4
+
+	0x25479560:  7C833A14  add r4,r3,r7
+	  50: GETL       	R3, t40
+	  51: GETL       	R7, t42
+	  52: ADDL       	t40, t42
+	  53: PUTL       	t42, R4
+	  54: INCEIPL       	$4
+
+	0x25479564:  38A00001  li r5,1
+	  55: MOVL       	$0x1, t44
+	  56: PUTL       	t44, R5
+	  57: INCEIPL       	$4
+
+	0x25479568:  7D4600D0  neg r10,r6
+	  58: GETL       	R6, t46
+	  59: NEGL       	t46
+	  60: PUTL       	t46, R10
+	  61: INCEIPL       	$4
+
+	0x2547956C:  7C605038  and r0,r3,r10
+	  62: GETL       	R3, t48
+	  63: GETL       	R10, t50
+	  64: ANDL       	t48, t50
+	  65: PUTL       	t50, R0
+	  66: INCEIPL       	$4
+
+	0x25479570:  7C8B5038  and r11,r4,r10
+	  67: GETL       	R4, t52
+	  68: GETL       	R10, t54
+	  69: ANDL       	t52, t54
+	  70: PUTL       	t54, R11
+	  71: INCEIPL       	$4
+
+	0x25479574:  7F805800  cmp cr7,r0,r11
+	  72: GETL       	R0, t56
+	  73: GETL       	R11, t58
+	  74: CMPL       	t56, t58, t60  (-rSo)
+	  75: ICRFL       	t60, $0x7, CR
+	  76: INCEIPL       	$4
+
+	0x25479578:  7C030378  or r3,r0,r0
+	  77: GETL       	R0, t62
+	  78: PUTL       	t62, R3
+	  79: INCEIPL       	$4
+
+	0x2547957C:  7C805850  subf r4,r0,r11
+	  80: GETL       	R0, t64
+	  81: GETL       	R11, t66
+	  82: SUBL       	t64, t66
+	  83: PUTL       	t66, R4
+	  84: INCEIPL       	$4
+
+	0x25479580:  419E0010  bc 12,30,0x25479590
+	  85: Js30o       	$0x25479590
+
+
+. 0 25479534 80
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 91 81 00 14 7C 7F 1B 78 80 A3 00 00 81 3E 04 F4 81 03 02 34 80 E3 02 38 80 C9 00 04 7C 65 42 14 7C 83 3A 14 38 A0 00 01 7D 46 00 D0 7C 60 50 38 7C 8B 50 38 7F 80 58 00 7C 03 03 78 7C 80 58 50 41 9E 00 10
+
+==== BB 1155 (0x25479584) approx BBs exec'd 0 ====
+
+	0x25479584:  48009235  bl 0x254827B8
+	   0: MOVL       	$0x25479588, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x254827B8  ($4)
+
+
+. 0 25479584 4
+. 48 00 92 35
+
+==== BB 1156 (0x25479588) approx BBs exec'd 0 ====
+
+	0x25479588:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547958C:  4180001C  bc 12,0,0x254795A8
+	   4: Js00o       	$0x254795A8
+
+
+. 0 25479588 8
+. 2C 03 00 00 41 80 00 1C
+
+==== BB 1157 (0x25479590) approx BBs exec'd 0 ====
+
+	0x25479590:  81610014  lwz r11,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x25479594:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0x25479598:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x2547959C:  7D6803A6  mtlr r11
+	  15: GETL       	R11, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x254795A0:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0x254795A4:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+. 0 25479590 24
+. 81 61 00 14 83 C1 00 08 83 E1 00 0C 7D 68 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1158 (0x25479A10) approx BBs exec'd 0 ====
+
+	0x25479A10:  4BFFFD44  b 0x25479754
+	   0: JMPo       	$0x25479754  ($4)
+
+
+. 0 25479A10 4
+. 4B FF FD 44
+
+==== BB 1159 (0x25479754) approx BBs exec'd 0 ====
+
+	0x25479754:  80810000  lwz r4,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R4
+	   3: INCEIPL       	$4
+
+	0x25479758:  83240004  lwz r25,4(r4)
+	   4: GETL       	R4, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x2547975C:  8184FFBC  lwz r12,-68(r4)
+	   9: GETL       	R4, t8
+	  10: ADDL       	$0xFFFFFFBC, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x25479760:  7F2803A6  mtlr r25
+	  14: GETL       	R25, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x25479764:  8204FFC0  lwz r16,-64(r4)
+	  17: GETL       	R4, t14
+	  18: ADDL       	$0xFFFFFFC0, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R16
+	  21: INCEIPL       	$4
+
+	0x25479768:  8224FFC4  lwz r17,-60(r4)
+	  22: GETL       	R4, t18
+	  23: ADDL       	$0xFFFFFFC4, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R17
+	  26: INCEIPL       	$4
+
+	0x2547976C:  7D818120  mtcrf 0x18,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x3, CR
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0x25479770:  8244FFC8  lwz r18,-56(r4)
+	  31: GETL       	R4, t24
+	  32: ADDL       	$0xFFFFFFC8, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R18
+	  35: INCEIPL       	$4
+
+	0x25479774:  8264FFCC  lwz r19,-52(r4)
+	  36: GETL       	R4, t28
+	  37: ADDL       	$0xFFFFFFCC, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R19
+	  40: INCEIPL       	$4
+
+	0x25479778:  8284FFD0  lwz r20,-48(r4)
+	  41: GETL       	R4, t32
+	  42: ADDL       	$0xFFFFFFD0, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R20
+	  45: INCEIPL       	$4
+
+	0x2547977C:  82A4FFD4  lwz r21,-44(r4)
+	  46: GETL       	R4, t36
+	  47: ADDL       	$0xFFFFFFD4, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R21
+	  50: INCEIPL       	$4
+
+	0x25479780:  82C4FFD8  lwz r22,-40(r4)
+	  51: GETL       	R4, t40
+	  52: ADDL       	$0xFFFFFFD8, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R22
+	  55: INCEIPL       	$4
+
+	0x25479784:  82E4FFDC  lwz r23,-36(r4)
+	  56: GETL       	R4, t44
+	  57: ADDL       	$0xFFFFFFDC, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R23
+	  60: INCEIPL       	$4
+
+	0x25479788:  8304FFE0  lwz r24,-32(r4)
+	  61: GETL       	R4, t48
+	  62: ADDL       	$0xFFFFFFE0, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R24
+	  65: INCEIPL       	$4
+
+	0x2547978C:  8324FFE4  lwz r25,-28(r4)
+	  66: GETL       	R4, t52
+	  67: ADDL       	$0xFFFFFFE4, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R25
+	  70: INCEIPL       	$4
+
+	0x25479790:  8344FFE8  lwz r26,-24(r4)
+	  71: GETL       	R4, t56
+	  72: ADDL       	$0xFFFFFFE8, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R26
+	  75: INCEIPL       	$4
+
+	0x25479794:  8364FFEC  lwz r27,-20(r4)
+	  76: GETL       	R4, t60
+	  77: ADDL       	$0xFFFFFFEC, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R27
+	  80: INCEIPL       	$4
+
+	0x25479798:  8384FFF0  lwz r28,-16(r4)
+	  81: GETL       	R4, t64
+	  82: ADDL       	$0xFFFFFFF0, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R28
+	  85: INCEIPL       	$4
+
+	0x2547979C:  83A4FFF4  lwz r29,-12(r4)
+	  86: GETL       	R4, t68
+	  87: ADDL       	$0xFFFFFFF4, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R29
+	  90: INCEIPL       	$4
+
+	0x254797A0:  83C4FFF8  lwz r30,-8(r4)
+	  91: GETL       	R4, t72
+	  92: ADDL       	$0xFFFFFFF8, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R30
+	  95: INCEIPL       	$4
+
+	0x254797A4:  83E4FFFC  lwz r31,-4(r4)
+	  96: GETL       	R4, t76
+	  97: ADDL       	$0xFFFFFFFC, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R31
+	 100: INCEIPL       	$4
+
+	0x254797A8:  7C812378  or r1,r4,r4
+	 101: GETL       	R4, t80
+	 102: PUTL       	t80, R1
+	 103: INCEIPL       	$4
+
+	0x254797AC:  4E800020  blr
+	 104: GETL       	LR, t82
+	 105: JMPo-r       	t82  ($4)
+
+
+. 0 25479754 92
+. 80 81 00 00 83 24 00 04 81 84 FF BC 7F 28 03 A6 82 04 FF C0 82 24 FF C4 7D 81 81 20 82 44 FF C8 82 64 FF CC 82 84 FF D0 82 A4 FF D4 82 C4 FF D8 82 E4 FF DC 83 04 FF E0 83 24 FF E4 83 44 FF E8 83 64 FF EC 83 84 FF F0 83 A4 FF F4 83 C4 FF F8 83 E4 FF FC 7C 81 23 78 4E 80 00 20
+
+==== BB 1160 (0x25480904) approx BBs exec'd 0 ====
+
+	0x25480904:  2F850000  cmpi cr7,r5,0
+	   0: GETL       	R5, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x25480908:  3B9F0018  addi r28,r31,24
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x18, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2548090C:  419E0264  bc 12,30,0x25480B70
+	   8: Js30o       	$0x25480B70
+
+
+. 0 25480904 12
+. 2F 85 00 00 3B 9F 00 18 41 9E 02 64
+
+==== BB 1161 (0x25480B70) approx BBs exec'd 0 ====
+
+	0x25480B70:  831E04CC  lwz r24,1228(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4CC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x25480B74:  4BFFFDA0  b 0x25480914
+	   5: JMPo       	$0x25480914  ($4)
+
+
+. 0 25480B70 8
+. 83 1E 04 CC 4B FF FD A0
+
+==== BB 1162 (0x25480914) approx BBs exec'd 0 ====
+
+	0x25480914:  409E02C0  bc 4,30,0x25480BD4
+	   0: Jc30o       	$0x25480BD4
+
+
+. 0 25480914 4
+. 40 9E 02 C0
+
+==== BB 1163 (0x25480918) approx BBs exec'd 0 ====
+
+	0x25480918:  3C98FE00  addis r4,r24,-512
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0xFE000000, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x2548091C:  3C00FC00  lis r0,-1024
+	   4: MOVL       	$0xFC000000, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0x25480920:  38640003  addi r3,r4,3
+	   7: GETL       	R4, t4
+	   8: ADDL       	$0x3, t4
+	   9: PUTL       	t4, R3
+	  10: INCEIPL       	$4
+
+	0x25480924:  60060002  ori r6,r0,0x2
+	  11: MOVL       	$0xFC000002, t6
+	  12: PUTL       	t6, R6
+	  13: INCEIPL       	$4
+
+	0x25480928:  39800000  li r12,0
+	  14: MOVL       	$0x0, t8
+	  15: PUTL       	t8, R12
+	  16: INCEIPL       	$4
+
+	0x2548092C:  7F033040  cmpl cr6,r3,r6
+	  17: GETL       	R3, t10
+	  18: GETL       	R6, t12
+	  19: CMPUL       	t10, t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x25480930:  618A8000  ori r10,r12,0x8000
+	  22: MOVL       	$0x8000, t16
+	  23: PUTL       	t16, R10
+	  24: INCEIPL       	$4
+
+	0x25480934:  7CBA00D0  neg r5,r26
+	  25: GETL       	R26, t18
+	  26: NEGL       	t18
+	  27: PUTL       	t18, R5
+	  28: INCEIPL       	$4
+
+	0x25480938:  7D1A5050  subf r8,r26,r10
+	  29: GETL       	R26, t20
+	  30: GETL       	R10, t22
+	  31: SUBL       	t20, t22
+	  32: PUTL       	t22, R8
+	  33: INCEIPL       	$4
+
+	0x2548093C:  54A9043E  rlwinm r9,r5,0,16,31
+	  34: GETL       	R5, t24
+	  35: ANDL       	$0xFFFF, t24
+	  36: PUTL       	t24, R9
+	  37: INCEIPL       	$4
+
+	0x25480940:  5507843E  rlwinm r7,r8,16,16,31
+	  38: GETL       	R8, t26
+	  39: SHRL       	$0x10, t26
+	  40: PUTL       	t26, R7
+	  41: INCEIPL       	$4
+
+	0x25480944:  3D60556C  lis r11,21868
+	  42: MOVL       	$0x556C0000, t28
+	  43: PUTL       	t28, R11
+	  44: INCEIPL       	$4
+
+	0x25480948:  3C807D6C  lis r4,32108
+	  45: MOVL       	$0x7D6C0000, t30
+	  46: PUTL       	t30, R4
+	  47: INCEIPL       	$4
+
+	0x2548094C:  64E03D6B  oris r0,r7,0x3D6B
+	  48: GETL       	R7, t32
+	  49: ORL       	$0x3D6B0000, t32
+	  50: PUTL       	t32, R0
+	  51: INCEIPL       	$4
+
+	0x25480950:  6523396B  oris r3,r9,0x396B
+	  52: GETL       	R9, t34
+	  53: ORL       	$0x396B0000, t34
+	  54: PUTL       	t34, R3
+	  55: INCEIPL       	$4
+
+	0x25480954:  6166083C  ori r6,r11,0x83C
+	  56: MOVL       	$0x556C083C, t36
+	  57: PUTL       	t36, R6
+	  58: INCEIPL       	$4
+
+	0x25480958:  608C5A14  ori r12,r4,0x5A14
+	  59: MOVL       	$0x7D6C5A14, t38
+	  60: PUTL       	t38, R12
+	  61: INCEIPL       	$4
+
+	0x2548095C:  901C0000  stw r0,0(r28)
+	  62: GETL       	R0, t40
+	  63: GETL       	R28, t42
+	  64: STL       	t40, (t42)
+	  65: INCEIPL       	$4
+
+	0x25480960:  907C0004  stw r3,4(r28)
+	  66: GETL       	R3, t44
+	  67: GETL       	R28, t46
+	  68: ADDL       	$0x4, t46
+	  69: STL       	t44, (t46)
+	  70: INCEIPL       	$4
+
+	0x25480964:  90DC0008  stw r6,8(r28)
+	  71: GETL       	R6, t48
+	  72: GETL       	R28, t50
+	  73: ADDL       	$0x8, t50
+	  74: STL       	t48, (t50)
+	  75: INCEIPL       	$4
+
+	0x25480968:  919C000C  stw r12,12(r28)
+	  76: GETL       	R12, t52
+	  77: GETL       	R28, t54
+	  78: ADDL       	$0xC, t54
+	  79: STL       	t52, (t54)
+	  80: INCEIPL       	$4
+
+	0x2548096C:  4099020C  bc 4,25,0x25480B78
+	  81: Jc25o       	$0x25480B78
+
+
+. 0 25480918 88
+. 3C 98 FE 00 3C 00 FC 00 38 64 00 03 60 06 00 02 39 80 00 00 7F 03 30 40 61 8A 80 00 7C BA 00 D0 7D 1A 50 50 54 A9 04 3E 55 07 84 3E 3D 60 55 6C 3C 80 7D 6C 64 E0 3D 6B 65 23 39 6B 61 66 08 3C 60 8C 5A 14 90 1C 00 00 90 7C 00 04 90 DC 00 08 91 9C 00 0C 40 99 02 0C
+
+==== BB 1164 (0x25480B78) approx BBs exec'd 0 ====
+
+	0x25480B78:  3C980001  addis r4,r24,1
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x10000, t0
+	   2: PUTL       	t0, R4
+	   3: INCEIPL       	$4
+
+	0x25480B7C:  3C7D0001  addis r3,r29,1
+	   4: GETL       	R29, t2
+	   5: ADDL       	$0x10000, t2
+	   6: PUTL       	t2, R3
+	   7: INCEIPL       	$4
+
+	0x25480B80:  38C48000  addi r6,r4,-32768
+	   8: GETL       	R4, t4
+	   9: ADDL       	$0xFFFF8000, t4
+	  10: PUTL       	t4, R6
+	  11: INCEIPL       	$4
+
+	0x25480B84:  39838000  addi r12,r3,-32768
+	  12: GETL       	R3, t6
+	  13: ADDL       	$0xFFFF8000, t6
+	  14: PUTL       	t6, R12
+	  15: INCEIPL       	$4
+
+	0x25480B88:  54C9843E  rlwinm r9,r6,16,16,31
+	  16: GETL       	R6, t8
+	  17: SHRL       	$0x10, t8
+	  18: PUTL       	t8, R9
+	  19: INCEIPL       	$4
+
+	0x25480B8C:  57A7043E  rlwinm r7,r29,0,16,31
+	  20: GETL       	R29, t10
+	  21: ANDL       	$0xFFFF, t10
+	  22: PUTL       	t10, R7
+	  23: INCEIPL       	$4
+
+	0x25480B90:  5708043E  rlwinm r8,r24,0,16,31
+	  24: GETL       	R24, t12
+	  25: ANDL       	$0xFFFF, t12
+	  26: PUTL       	t12, R8
+	  27: INCEIPL       	$4
+
+	0x25480B94:  558B843E  rlwinm r11,r12,16,16,31
+	  28: GETL       	R12, t14
+	  29: SHRL       	$0x10, t14
+	  30: PUTL       	t14, R11
+	  31: INCEIPL       	$4
+
+	0x25480B98:  3D407D89  lis r10,32137
+	  32: MOVL       	$0x7D890000, t16
+	  33: PUTL       	t16, R10
+	  34: INCEIPL       	$4
+
+	0x25480B9C:  3CA04E80  lis r5,20096
+	  35: MOVL       	$0x4E800000, t18
+	  36: PUTL       	t18, R5
+	  37: INCEIPL       	$4
+
+	0x25480BA0:  65183980  oris r24,r8,0x3980
+	  38: GETL       	R8, t20
+	  39: ORL       	$0x39800000, t20
+	  40: PUTL       	t20, R24
+	  41: INCEIPL       	$4
+
+	0x25480BA4:  653D3D8C  oris r29,r9,0x3D8C
+	  42: GETL       	R9, t22
+	  43: ORL       	$0x3D8C0000, t22
+	  44: PUTL       	t22, R29
+	  45: INCEIPL       	$4
+
+	0x25480BA8:  614403A6  ori r4,r10,0x3A6
+	  46: MOVL       	$0x7D8903A6, t24
+	  47: PUTL       	t24, R4
+	  48: INCEIPL       	$4
+
+	0x25480BAC:  64E33980  oris r3,r7,0x3980
+	  49: GETL       	R7, t26
+	  50: ORL       	$0x39800000, t26
+	  51: PUTL       	t26, R3
+	  52: INCEIPL       	$4
+
+	0x25480BB0:  65663D8C  oris r6,r11,0x3D8C
+	  53: GETL       	R11, t28
+	  54: ORL       	$0x3D8C0000, t28
+	  55: PUTL       	t28, R6
+	  56: INCEIPL       	$4
+
+	0x25480BB4:  60A00420  ori r0,r5,0x420
+	  57: MOVL       	$0x4E800420, t30
+	  58: PUTL       	t30, R0
+	  59: INCEIPL       	$4
+
+	0x25480BB8:  901C0024  stw r0,36(r28)
+	  60: GETL       	R0, t32
+	  61: GETL       	R28, t34
+	  62: ADDL       	$0x24, t34
+	  63: STL       	t32, (t34)
+	  64: INCEIPL       	$4
+
+	0x25480BBC:  931C0010  stw r24,16(r28)
+	  65: GETL       	R24, t36
+	  66: GETL       	R28, t38
+	  67: ADDL       	$0x10, t38
+	  68: STL       	t36, (t38)
+	  69: INCEIPL       	$4
+
+	0x25480BC0:  93BC0014  stw r29,20(r28)
+	  70: GETL       	R29, t40
+	  71: GETL       	R28, t42
+	  72: ADDL       	$0x14, t42
+	  73: STL       	t40, (t42)
+	  74: INCEIPL       	$4
+
+	0x25480BC4:  909C0018  stw r4,24(r28)
+	  75: GETL       	R4, t44
+	  76: GETL       	R28, t46
+	  77: ADDL       	$0x18, t46
+	  78: STL       	t44, (t46)
+	  79: INCEIPL       	$4
+
+	0x25480BC8:  907C001C  stw r3,28(r28)
+	  80: GETL       	R3, t48
+	  81: GETL       	R28, t50
+	  82: ADDL       	$0x1C, t50
+	  83: STL       	t48, (t50)
+	  84: INCEIPL       	$4
+
+	0x25480BCC:  90DC0020  stw r6,32(r28)
+	  85: GETL       	R6, t52
+	  86: GETL       	R28, t54
+	  87: ADDL       	$0x20, t54
+	  88: STL       	t52, (t54)
+	  89: INCEIPL       	$4
+
+	0x25480BD0:  4BFFFDD0  b 0x254809A0
+	  90: JMPo       	$0x254809A0  ($4)
+
+
+. 0 25480B78 92
+. 3C 98 00 01 3C 7D 00 01 38 C4 80 00 39 83 80 00 54 C9 84 3E 57 A7 04 3E 57 08 04 3E 55 8B 84 3E 3D 40 7D 89 3C A0 4E 80 65 18 39 80 65 3D 3D 8C 61 44 03 A6 64 E3 39 80 65 66 3D 8C 60 A0 04 20 90 1C 00 24 93 1C 00 10 93 BC 00 14 90 9C 00 18 90 7C 00 1C 90 DC 00 20 4B FF FD D0
+
+==== BB 1165 (0x254809A0) approx BBs exec'd 0 ====
+
+	0x254809A0:  39000000  li r8,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R8
+	   2: INCEIPL       	$4
+
+	0x254809A4:  38800012  li r4,18
+	   3: MOVL       	$0x12, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x254809A8:  7F88D840  cmpl cr7,r8,r27
+	   6: GETL       	R8, t4
+	   7: GETL       	R27, t6
+	   8: CMPUL       	t4, t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0x254809AC:  409C0054  bc 4,28,0x25480A00
+	  11: Jc28o       	$0x25480A00
+
+
+. 0 254809A0 16
+. 39 00 00 00 38 80 00 12 7F 88 D8 40 40 9C 00 54
+
+==== BB 1166 (0x254809B0) approx BBs exec'd 0 ====
+
+	0x254809B0:  38E0FFD4  li r7,-44
+	   0: MOVL       	$0xFFFFFFD4, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x254809B4:  550A143A  rlwinm r10,r8,2,16,29
+	   3: GETL       	R8, t2
+	   4: ROLL       	$0x2, t2
+	   5: ANDL       	$0xFFFC, t2
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x254809B8:  39080001  addi r8,r8,1
+	   8: GETL       	R8, t4
+	   9: ADDL       	$0x1, t4
+	  10: PUTL       	t4, R8
+	  11: INCEIPL       	$4
+
+	0x254809BC:  7D7B4010  subfc r11,r27,r8
+	  12: GETL       	R27, t6
+	  13: GETL       	R8, t8
+	  14: SBBL       	t6, t8  (-wCa)
+	  15: PUTL       	t8, R11
+	  16: INCEIPL       	$4
+
+	0x254809C0:  7D6B5910  subfe r11,r11,r11
+	  17: GETL       	R11, t10
+	  18: GETL       	R11, t12
+	  19: SBBL       	t10, t12  (-rCa-wCa)
+	  20: PUTL       	t12, R11
+	  21: INCEIPL       	$4
+
+	0x254809C4:  7D6B00D0  neg r11,r11
+	  22: GETL       	R11, t14
+	  23: NEGL       	t14
+	  24: PUTL       	t14, R11
+	  25: INCEIPL       	$4
+
+	0x254809C8:  21881FFF  subfic r12,r8,8191
+	  26: GETL       	R8, t16
+	  27: MOVL       	$0x1FFF, t18
+	  28: SBBL       	t16, t18  (-wCa)
+	  29: PUTL       	t18, R12
+	  30: INCEIPL       	$4
+
+	0x254809CC:  39800000  li r12,0
+	  31: MOVL       	$0x0, t20
+	  32: PUTL       	t20, R12
+	  33: INCEIPL       	$4
+
+	0x254809D0:  7D8C6114  adde r12,r12,r12
+	  34: GETL       	R12, t22
+	  35: GETL       	R12, t24
+	  36: ADCL       	t22, t24  (-rCa-wCa)
+	  37: PUTL       	t24, R12
+	  38: INCEIPL       	$4
+
+	0x254809D4:  7D696039  and. r9,r11,r12
+	  39: GETL       	R11, t26
+	  40: GETL       	R12, t28
+	  41: ANDL       	t26, t28
+	  42: PUTL       	t28, R9
+	  43: CMP0L       	t28, t30  (-rSo)
+	  44: ICRFL       	t30, $0x0, CR
+	  45: INCEIPL       	$4
+
+	0x254809D8:  5498103A  rlwinm r24,r4,2,0,29
+	  46: GETL       	R4, t32
+	  47: SHLL       	$0x2, t32
+	  48: PUTL       	t32, R24
+	  49: INCEIPL       	$4
+
+	0x254809DC:  54E501BA  rlwinm r5,r7,0,6,29
+	  50: GETL       	R7, t34
+	  51: ANDL       	$0x3FFFFFC, t34
+	  52: PUTL       	t34, R5
+	  53: INCEIPL       	$4
+
+	0x254809E0:  65403960  oris r0,r10,0x3960
+	  54: GETL       	R10, t36
+	  55: ORL       	$0x39600000, t36
+	  56: PUTL       	t36, R0
+	  57: INCEIPL       	$4
+
+	0x254809E4:  64BD4800  oris r29,r5,0x4800
+	  58: GETL       	R5, t38
+	  59: ORL       	$0x48000000, t38
+	  60: PUTL       	t38, R29
+	  61: INCEIPL       	$4
+
+	0x254809E8:  7F98FA14  add r28,r24,r31
+	  62: GETL       	R24, t40
+	  63: GETL       	R31, t42
+	  64: ADDL       	t40, t42
+	  65: PUTL       	t42, R28
+	  66: INCEIPL       	$4
+
+	0x254809EC:  7C18F92E  stwx r0,r24,r31
+	  67: GETL       	R31, t44
+	  68: GETL       	R24, t46
+	  69: ADDL       	t46, t44
+	  70: GETL       	R0, t48
+	  71: STL       	t48, (t44)
+	  72: INCEIPL       	$4
+
+	0x254809F0:  38840002  addi r4,r4,2
+	  73: GETL       	R4, t50
+	  74: ADDL       	$0x2, t50
+	  75: PUTL       	t50, R4
+	  76: INCEIPL       	$4
+
+	0x254809F4:  93BC0004  stw r29,4(r28)
+	  77: GETL       	R29, t52
+	  78: GETL       	R28, t54
+	  79: ADDL       	$0x4, t54
+	  80: STL       	t52, (t54)
+	  81: INCEIPL       	$4
+
+	0x254809F8:  38E7FFF8  addi r7,r7,-8
+	  82: MOVL       	$0xFFFFFFCC, t56
+	  83: PUTL       	t56, R7
+	  84: INCEIPL       	$4
+
+	0x254809FC:  4082FFB8  bc 4,2,0x254809B4
+	  85: Jc02o       	$0x254809B4
+
+
+. 0 254809B0 80
+. 38 E0 FF D4 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+
+==== BB 1167 (0x254809B4) approx BBs exec'd 0 ====
+
+	0x254809B4:  550A143A  rlwinm r10,r8,2,16,29
+	   0: GETL       	R8, t0
+	   1: ROLL       	$0x2, t0
+	   2: ANDL       	$0xFFFC, t0
+	   3: PUTL       	t0, R10
+	   4: INCEIPL       	$4
+
+	0x254809B8:  39080001  addi r8,r8,1
+	   5: GETL       	R8, t2
+	   6: ADDL       	$0x1, t2
+	   7: PUTL       	t2, R8
+	   8: INCEIPL       	$4
+
+	0x254809BC:  7D7B4010  subfc r11,r27,r8
+	   9: GETL       	R27, t4
+	  10: GETL       	R8, t6
+	  11: SBBL       	t4, t6  (-wCa)
+	  12: PUTL       	t6, R11
+	  13: INCEIPL       	$4
+
+	0x254809C0:  7D6B5910  subfe r11,r11,r11
+	  14: GETL       	R11, t8
+	  15: GETL       	R11, t10
+	  16: SBBL       	t8, t10  (-rCa-wCa)
+	  17: PUTL       	t10, R11
+	  18: INCEIPL       	$4
+
+	0x254809C4:  7D6B00D0  neg r11,r11
+	  19: GETL       	R11, t12
+	  20: NEGL       	t12
+	  21: PUTL       	t12, R11
+	  22: INCEIPL       	$4
+
+	0x254809C8:  21881FFF  subfic r12,r8,8191
+	  23: GETL       	R8, t14
+	  24: MOVL       	$0x1FFF, t16
+	  25: SBBL       	t14, t16  (-wCa)
+	  26: PUTL       	t16, R12
+	  27: INCEIPL       	$4
+
+	0x254809CC:  39800000  li r12,0
+	  28: MOVL       	$0x0, t18
+	  29: PUTL       	t18, R12
+	  30: INCEIPL       	$4
+
+	0x254809D0:  7D8C6114  adde r12,r12,r12
+	  31: GETL       	R12, t20
+	  32: GETL       	R12, t22
+	  33: ADCL       	t20, t22  (-rCa-wCa)
+	  34: PUTL       	t22, R12
+	  35: INCEIPL       	$4
+
+	0x254809D4:  7D696039  and. r9,r11,r12
+	  36: GETL       	R11, t24
+	  37: GETL       	R12, t26
+	  38: ANDL       	t24, t26
+	  39: PUTL       	t26, R9
+	  40: CMP0L       	t26, t28  (-rSo)
+	  41: ICRFL       	t28, $0x0, CR
+	  42: INCEIPL       	$4
+
+	0x254809D8:  5498103A  rlwinm r24,r4,2,0,29
+	  43: GETL       	R4, t30
+	  44: SHLL       	$0x2, t30
+	  45: PUTL       	t30, R24
+	  46: INCEIPL       	$4
+
+	0x254809DC:  54E501BA  rlwinm r5,r7,0,6,29
+	  47: GETL       	R7, t32
+	  48: ANDL       	$0x3FFFFFC, t32
+	  49: PUTL       	t32, R5
+	  50: INCEIPL       	$4
+
+	0x254809E0:  65403960  oris r0,r10,0x3960
+	  51: GETL       	R10, t34
+	  52: ORL       	$0x39600000, t34
+	  53: PUTL       	t34, R0
+	  54: INCEIPL       	$4
+
+	0x254809E4:  64BD4800  oris r29,r5,0x4800
+	  55: GETL       	R5, t36
+	  56: ORL       	$0x48000000, t36
+	  57: PUTL       	t36, R29
+	  58: INCEIPL       	$4
+
+	0x254809E8:  7F98FA14  add r28,r24,r31
+	  59: GETL       	R24, t38
+	  60: GETL       	R31, t40
+	  61: ADDL       	t38, t40
+	  62: PUTL       	t40, R28
+	  63: INCEIPL       	$4
+
+	0x254809EC:  7C18F92E  stwx r0,r24,r31
+	  64: GETL       	R31, t42
+	  65: GETL       	R24, t44
+	  66: ADDL       	t44, t42
+	  67: GETL       	R0, t46
+	  68: STL       	t46, (t42)
+	  69: INCEIPL       	$4
+
+	0x254809F0:  38840002  addi r4,r4,2
+	  70: GETL       	R4, t48
+	  71: ADDL       	$0x2, t48
+	  72: PUTL       	t48, R4
+	  73: INCEIPL       	$4
+
+	0x254809F4:  93BC0004  stw r29,4(r28)
+	  74: GETL       	R29, t50
+	  75: GETL       	R28, t52
+	  76: ADDL       	$0x4, t52
+	  77: STL       	t50, (t52)
+	  78: INCEIPL       	$4
+
+	0x254809F8:  38E7FFF8  addi r7,r7,-8
+	  79: GETL       	R7, t54
+	  80: ADDL       	$0xFFFFFFF8, t54
+	  81: PUTL       	t54, R7
+	  82: INCEIPL       	$4
+
+	0x254809FC:  4082FFB8  bc 4,2,0x254809B4
+	  83: Jc02o       	$0x254809B4
+
+
+. 0 254809B4 76
+. 55 0A 14 3A 39 08 00 01 7D 7B 40 10 7D 6B 59 10 7D 6B 00 D0 21 88 1F FF 39 80 00 00 7D 8C 61 14 7D 69 60 39 54 98 10 3A 54 E5 01 BA 65 40 39 60 64 BD 48 00 7F 98 FA 14 7C 18 F9 2E 38 84 00 02 93 BC 00 04 38 E7 FF F8 40 82 FF B8
+
+==== BB 1168 (0x25480A00) approx BBs exec'd 0 ====
+
+	0x25480A00:  7C88D840  cmpl cr1,r8,r27
+	   0: GETL       	R8, t0
+	   1: GETL       	R27, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25480A04:  40840074  bc 4,4,0x25480A78
+	   5: Jc04o       	$0x25480A78
+
+
+. 0 25480A00 8
+. 7C 88 D8 40 40 84 00 74
+
+==== BB 1169 (0x25480A98) approx BBs exec'd 0 ====
+
+	0x25480A98:  800B0000  lwz r0,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0x25480A9C:  2F000000  cmpi cr6,r0,0
+	   4: GETL       	R0, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x25480AA0:  409A00C4  bc 4,26,0x25480B64
+	   8: Jc26o       	$0x25480B64
+
+
+. 0 25480A98 12
+. 80 0B 00 00 2F 00 00 00 40 9A 00 C4
+
+==== BB 1170 (0x25480B64) approx BBs exec'd 0 ====
+
+	0x25480B64:  7C0A1670  srawi r10,r0,2
+	   0: GETL       	R0, t0
+	   1: SARL       	$0x2, t0  (-wCa)
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0x25480B68:  7D4A0194  addze r10,r10
+	   4: GETL       	R10, t2
+	   5: ADCL       	$0x0, t2  (-rCa-wCa)
+	   6: PUTL       	t2, R10
+	   7: INCEIPL       	$4
+
+	0x25480B6C:  4BFFFF38  b 0x25480AA4
+	   8: JMPo       	$0x25480AA4  ($4)
+
+
+. 0 25480B64 12
+. 7C 0A 16 70 7D 4A 01 94 4B FF FF 38
+
+==== BB 1171 (0x25480AA8) approx BBs exec'd 0 ====
+
+	0x25480AA8:  7EEBBB78  or r11,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x25480AAC:  39200000  li r9,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x25480AB0:  48000014  b 0x25480AC4
+	   6: JMPo       	$0x25480AC4  ($4)
+
+
+. 0 25480AA8 12
+. 7E EB BB 78 39 20 00 00 48 00 00 14
+
+==== BB 1172 (0x254796D0) approx BBs exec'd 0 ====
+
+	0x254796D0:  817D0070  lwz r11,112(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x70, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254796D4:  2F0B0000  cmpi cr6,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x254796D8:  409A033C  bc 4,26,0x25479A14
+	   9: Jc26o       	$0x25479A14
+
+
+. 0 254796D0 12
+. 81 7D 00 70 2F 0B 00 00 40 9A 03 3C
+
+==== BB 1173 (0x25479A14) approx BBs exec'd 0 ====
+
+	0x25479A14:  835D007C  lwz r26,124(r29)
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x7C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x25479A18:  833D0028  lwz r25,40(r29)
+	   5: GETL       	R29, t4
+	   6: ADDL       	$0x28, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0x25479A1C:  829A0004  lwz r20,4(r26)
+	  10: GETL       	R26, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R20
+	  14: INCEIPL       	$4
+
+	0x25479A20:  82BF0020  lwz r21,32(r31)
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x20, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R21
+	  19: INCEIPL       	$4
+
+	0x25479A24:  929F002C  stw r20,44(r31)
+	  20: GETL       	R20, t16
+	  21: GETL       	R31, t18
+	  22: ADDL       	$0x2C, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0x25479A28:  82590004  lwz r18,4(r25)
+	  25: GETL       	R25, t20
+	  26: ADDL       	$0x4, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R18
+	  29: INCEIPL       	$4
+
+	0x25479A2C:  7D553214  add r10,r21,r6
+	  30: GETL       	R21, t24
+	  31: GETL       	R6, t26
+	  32: ADDL       	t24, t26
+	  33: PUTL       	t26, R10
+	  34: INCEIPL       	$4
+
+	0x25479A30:  7D35A050  subf r9,r21,r20
+	  35: GETL       	R21, t28
+	  36: GETL       	R20, t30
+	  37: SUBL       	t28, t30
+	  38: PUTL       	t30, R9
+	  39: INCEIPL       	$4
+
+	0x25479A34:  7E149214  add r16,r20,r18
+	  40: GETL       	R20, t32
+	  41: GETL       	R18, t34
+	  42: ADDL       	t32, t34
+	  43: PUTL       	t34, R16
+	  44: INCEIPL       	$4
+
+	0x25479A38:  913F0024  stw r9,36(r31)
+	  45: GETL       	R9, t36
+	  46: GETL       	R31, t38
+	  47: ADDL       	$0x24, t38
+	  48: STL       	t36, (t38)
+	  49: INCEIPL       	$4
+
+	0x25479A3C:  7E705050  subf r19,r16,r10
+	  50: GETL       	R16, t40
+	  51: GETL       	R10, t42
+	  52: SUBL       	t40, t42
+	  53: PUTL       	t42, R19
+	  54: INCEIPL       	$4
+
+	0x25479A40:  925F0030  stw r18,48(r31)
+	  55: GETL       	R18, t44
+	  56: GETL       	R31, t46
+	  57: ADDL       	$0x30, t46
+	  58: STL       	t44, (t46)
+	  59: INCEIPL       	$4
+
+	0x25479A44:  927F003C  stw r19,60(r31)
+	  60: GETL       	R19, t48
+	  61: GETL       	R31, t50
+	  62: ADDL       	$0x3C, t50
+	  63: STL       	t48, (t50)
+	  64: INCEIPL       	$4
+
+	0x25479A48:  921F0038  stw r16,56(r31)
+	  65: GETL       	R16, t52
+	  66: GETL       	R31, t54
+	  67: ADDL       	$0x38, t54
+	  68: STL       	t52, (t54)
+	  69: INCEIPL       	$4
+
+	0x25479A4C:  4BFFFC90  b 0x254796DC
+	  70: JMPo       	$0x254796DC  ($4)
+
+
+. 0 25479A14 60
+. 83 5D 00 7C 83 3D 00 28 82 9A 00 04 82 BF 00 20 92 9F 00 2C 82 59 00 04 7D 55 32 14 7D 35 A0 50 7E 14 92 14 91 3F 00 24 7E 70 50 50 92 5F 00 30 92 7F 00 3C 92 1F 00 38 4B FF FC 90
+
+==== BB 1174 (0x254799E0) approx BBs exec'd 0 ====
+
+	0x254799E0:  7F8BE040  cmpl cr7,r11,r28
+	   0: GETL       	R11, t0
+	   1: GETL       	R28, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x254799E4:  40BCFE40  bc 5,28,0x25479824
+	   5: Jc28o       	$0x25479824
+
+
+. 0 254799E0 8
+. 7F 8B E0 40 40 BC FE 40
+
+==== BB 1175 (0x254799E8) approx BBs exec'd 0 ====
+
+	0x254799E8:  812B0008  lwz r9,8(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x254799EC:  808B0000  lwz r4,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x254799F0:  396B000C  addi r11,r11,12
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0xC, t8
+	  11: PUTL       	t8, R11
+	  12: INCEIPL       	$4
+
+	0x254799F4:  7F8BE040  cmpl cr7,r11,r28
+	  13: GETL       	R11, t10
+	  14: GETL       	R28, t12
+	  15: CMPUL       	t10, t12, t14  (-rSo)
+	  16: ICRFL       	t14, $0x7, CR
+	  17: INCEIPL       	$4
+
+	0x254799F8:  7D09CA14  add r8,r9,r25
+	  18: GETL       	R9, t16
+	  19: GETL       	R25, t18
+	  20: ADDL       	t16, t18
+	  21: PUTL       	t18, R8
+	  22: INCEIPL       	$4
+
+	0x254799FC:  7D04C92E  stwx r8,r4,r25
+	  23: GETL       	R25, t20
+	  24: GETL       	R4, t22
+	  25: ADDL       	t22, t20
+	  26: GETL       	R8, t24
+	  27: STL       	t24, (t20)
+	  28: INCEIPL       	$4
+
+	0x25479A00:  4BFFFFE4  b 0x254799E4
+	  29: JMPo       	$0x254799E4  ($4)
+
+
+. 0 254799E8 28
+. 81 2B 00 08 80 8B 00 00 39 6B 00 0C 7F 8B E0 40 7D 09 CA 14 7D 04 C9 2E 4B FF FF E4
+
+==== BB 1176 (0x254799E4) approx BBs exec'd 0 ====
+
+	0x254799E4:  40BCFE40  bc 5,28,0x25479824
+	   0: Jc28o       	$0x25479824
+
+
+. 0 254799E4 4
+. 40 BC FE 40
+
+==== BB 1177 (0x25479714) approx BBs exec'd 0 ====
+
+	0x25479714:  3929000C  addi r9,r9,12
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0xC, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x25479718:  7F89C040  cmpl cr7,r9,r24
+	   4: GETL       	R9, t2
+	   5: GETL       	R24, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547971C:  419CFFF8  bc 12,28,0x25479714
+	   9: Js28o       	$0x25479714
+
+
+. 0 25479714 12
+. 39 29 00 0C 7F 89 C0 40 41 9C FF F8
+
+==== BB 1178 (0x254797E0) approx BBs exec'd 0 ====
+
+	0x254797E0:  3C00AAAA  lis r0,-21846
+	   0: MOVL       	$0xAAAA0000, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254797E4:  600CAAAB  ori r12,r0,0xAAAB
+	   3: MOVL       	$0xAAAAAAAB, t2
+	   4: PUTL       	t2, R12
+	   5: INCEIPL       	$4
+
+	0x254797E8:  7EE86016  mulhwu r23,r8,r12
+	   6: GETL       	R8, t4
+	   7: GETL       	R12, t6
+	   8: UMULHL       	t4, t6
+	   9: PUTL       	t6, R23
+	  10: INCEIPL       	$4
+
+	0x254797EC:  56E9E8FE  rlwinm r9,r23,29,3,31
+	  11: GETL       	R23, t8
+	  12: SHRL       	$0x3, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x254797F0:  7C893840  cmpl cr1,r9,r7
+	  15: GETL       	R9, t10
+	  16: GETL       	R7, t12
+	  17: CMPUL       	t10, t12, t14  (-rSo)
+	  18: ICRFL       	t14, $0x1, CR
+	  19: INCEIPL       	$4
+
+	0x254797F4:  40850008  bc 4,5,0x254797FC
+	  20: Jc05o       	$0x254797FC
+
+
+. 0 254797E0 24
+. 3C 00 AA AA 60 0C AA AB 7E E8 60 16 56 E9 E8 FE 7C 89 38 40 40 85 00 08
+
+==== BB 1179 (0x254733C0) approx BBs exec'd 0 ====
+
+	0x254733C0:  4800C225  bl 0x2547F5E4
+	   0: MOVL       	$0x254733C4, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x2547F5E4  ($4)
+
+
+. 0 254733C0 4
+. 48 00 C2 25
+
+==== BB 1180 _dl_sysdep_start_cleanup(0x2547F5E4) approx BBs exec'd 0 ====
+
+	0x2547F5E4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547F5E8:  38210010  addi r1,r1,16
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x10, t4
+	   8: PUTL       	t4, R1
+	   9: INCEIPL       	$4
+
+	0x2547F5EC:  4E800020  blr
+	  10: GETL       	LR, t6
+	  11: JMPo-r       	t6  ($4)
+
+
+. 0 2547F5E4 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+
+==== BB 1181 (0x254733C4) approx BBs exec'd 0 ====
+
+	0x254733C4:  80CE01A4  lwz r6,420(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x1A4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x254733C8:  2C060000  cmpi cr0,r6,0
+	   5: GETL       	R6, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x254733CC:  4082094C  bc 4,2,0x25473D18
+	   9: Jc02o       	$0x25473D18
+
+
+. 0 254733C4 12
+. 80 CE 01 A4 2C 06 00 00 40 82 09 4C
+
+==== BB 1182 (0x254733D0) approx BBs exec'd 0 ====
+
+	0x254733D0:  816E0330  lwz r11,816(r14)
+	   0: GETL       	R14, t0
+	   1: ADDL       	$0x330, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x254733D4:  2B0B0001  cmpli cr6,r11,1
+	   5: GETL       	R11, t4
+	   6: MOVL       	$0x1, t8
+	   7: CMPUL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0x254733D8:  4099F784  bc 4,25,0x25472B5C
+	  10: Jc25o       	$0x25472B5C
+
+
+. 0 254733D0 12
+. 81 6E 03 30 2B 0B 00 01 40 99 F7 84
+
+==== BB 1183 (0x254733DC) approx BBs exec'd 0 ====
+
+	0x254733DC:  809401C0  lwz r4,448(r20)
+	   0: GETL       	R20, t0
+	   1: ADDL       	$0x1C0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254733E0:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x254733E4:  38A00000  li r5,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R5
+	  10: INCEIPL       	$4
+
+	0x254733E8:  38C00000  li r6,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0x254733EC:  480061D5  bl 0x254795C0
+	  14: MOVL       	$0x254733F0, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x254795C0  ($4)
+
+
+. 0 254733DC 20
+. 80 94 01 C0 7F 83 E3 78 38 A0 00 00 38 C0 00 00 48 00 61 D5
+
+==== BB 1184 (0x254733F0) approx BBs exec'd 0 ====
+
+	0x254733F0:  4BFFF76C  b 0x25472B5C
+	   0: JMPo       	$0x25472B5C  ($4)
+
+
+. 0 254733F0 4
+. 4B FF F7 6C
+
+==== BB 1185 (0x25472B5C) approx BBs exec'd 0 ====
+
+	0x25472B5C:  7E238B78  or r3,r17,r17
+	   0: GETL       	R17, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x25472B60:  4800B479  bl 0x2547DFD8
+	   3: MOVL       	$0x25472B64, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0x2547DFD8  ($4)
+
+
+. 0 25472B5C 8
+. 7E 23 8B 78 48 00 B4 79
+
+==== BB 1186 __GI__dl_allocate_tls_init(0x2547DFD8) approx BBs exec'd 0 ====
+
+	0x2547DFD8:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547DFDC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547DFE0:  93210014  stw r25,20(r1)
+	   9: GETL       	R25, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547DFE4:  7C791B79  or. r25,r3,r3
+	  14: GETL       	R3, t10
+	  15: PUTL       	t10, R25
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0x2547DFE8:  48019019  bl 0x25497000
+	  19: MOVL       	$0x2547DFEC, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547DFD8 20
+. 94 21 FF D0 7C 08 02 A6 93 21 00 14 7C 79 1B 79 48 01 90 19
+
+==== BB 1187 (0x2547DFEC) approx BBs exec'd 0 ====
+
+	0x2547DFEC:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547DFF0:  92E1000C  stw r23,12(r1)
+	   5: GETL       	R23, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xC, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547DFF4:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x2547DFF8:  93010010  stw r24,16(r1)
+	  13: GETL       	R24, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547DFFC:  38600000  li r3,0
+	  18: MOVL       	$0x0, t14
+	  19: PUTL       	t14, R3
+	  20: INCEIPL       	$4
+
+	0x2547E000:  93410018  stw r26,24(r1)
+	  21: GETL       	R26, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x18, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547E004:  9361001C  stw r27,28(r1)
+	  26: GETL       	R27, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x1C, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0x2547E008:  93810020  stw r28,32(r1)
+	  31: GETL       	R28, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x20, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547E00C:  93A10024  stw r29,36(r1)
+	  36: GETL       	R29, t28
+	  37: GETL       	R1, t30
+	  38: ADDL       	$0x24, t30
+	  39: STL       	t28, (t30)
+	  40: INCEIPL       	$4
+
+	0x2547E010:  93E1002C  stw r31,44(r1)
+	  41: GETL       	R31, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x2C, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0x2547E014:  90010034  stw r0,52(r1)
+	  46: GETL       	R0, t36
+	  47: GETL       	R1, t38
+	  48: ADDL       	$0x34, t38
+	  49: STL       	t36, (t38)
+	  50: INCEIPL       	$4
+
+	0x2547E018:  418200D8  bc 12,2,0x2547E0F0
+	  51: Js02o       	$0x2547E0F0
+
+
+. 0 2547DFEC 48
+. 93 C1 00 28 92 E1 00 0C 7F C8 02 A6 93 01 00 10 38 60 00 00 93 41 00 18 93 61 00 1C 93 81 00 20 93 A1 00 24 93 E1 00 2C 90 01 00 34 41 82 00 D8
+
+==== BB 1188 (0x2547E01C) approx BBs exec'd 0 ====
+
+	0x2547E01C:  831E04C8  lwz r24,1224(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4C8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0x2547E020:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547E024:  82F9FFFC  lwz r23,-4(r25)
+	   8: GETL       	R25, t6
+	   9: ADDL       	$0xFFFFFFFC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0x2547E028:  83780410  lwz r27,1040(r24)
+	  13: GETL       	R24, t10
+	  14: ADDL       	$0x410, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R27
+	  17: INCEIPL       	$4
+
+	0x2547E02C:  817B0000  lwz r11,0(r27)
+	  18: GETL       	R27, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R11
+	  21: INCEIPL       	$4
+
+	0x2547E030:  201A0000  subfic r0,r26,0
+	  22: GETL       	R26, t18
+	  23: MOVL       	$0x0, t20
+	  24: SBBL       	t18, t20  (-wCa)
+	  25: PUTL       	t20, R0
+	  26: INCEIPL       	$4
+
+	0x2547E034:  7FE0D114  adde r31,r0,r26
+	  27: GETL       	R0, t22
+	  28: GETL       	R26, t24
+	  29: ADCL       	t22, t24  (-rCa-wCa)
+	  30: PUTL       	t24, R31
+	  31: INCEIPL       	$4
+
+	0x2547E038:  7D3AFA14  add r9,r26,r31
+	  32: GETL       	R26, t26
+	  33: GETL       	R31, t28
+	  34: ADDL       	t26, t28
+	  35: PUTL       	t28, R9
+	  36: INCEIPL       	$4
+
+	0x2547E03C:  7F8BF840  cmpl cr7,r11,r31
+	  37: GETL       	R11, t30
+	  38: GETL       	R31, t32
+	  39: CMPUL       	t30, t32, t34  (-rSo)
+	  40: ICRFL       	t34, $0x7, CR
+	  41: INCEIPL       	$4
+
+	0x2547E040:  409D0064  bc 4,29,0x2547E0A4
+	  42: Jc29o       	$0x2547E0A4
+
+
+. 0 2547E01C 40
+. 83 1E 04 C8 3B 40 00 00 82 F9 FF FC 83 78 04 10 81 7B 00 00 20 1A 00 00 7F E0 D1 14 7D 3A FA 14 7F 8B F8 40 40 9D 00 64
+
+==== BB 1189 (0x2547E044) approx BBs exec'd 0 ====
+
+	0x2547E044:  57E31838  rlwinm r3,r31,3,0,28
+	   0: GETL       	R31, t0
+	   1: SHLL       	$0x3, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0x2547E048:  7D23DA14  add r9,r3,r27
+	   4: GETL       	R3, t2
+	   5: GETL       	R27, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547E04C:  3B89000C  addi r28,r9,12
+	   9: GETL       	R9, t6
+	  10: ADDL       	$0xC, t6
+	  11: PUTL       	t6, R28
+	  12: INCEIPL       	$4
+
+	0x2547E050:  80980408  lwz r4,1032(r24)
+	  13: GETL       	R24, t8
+	  14: ADDL       	$0x408, t8
+	  15: LDL       	(t8), t10
+	  16: PUTL       	t10, R4
+	  17: INCEIPL       	$4
+
+	0x2547E054:  7D3AFA14  add r9,r26,r31
+	  18: GETL       	R26, t12
+	  19: GETL       	R31, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0x2547E058:  7C892040  cmpl cr1,r9,r4
+	  23: GETL       	R9, t16
+	  24: GETL       	R4, t18
+	  25: CMPUL       	t16, t18, t20  (-rSo)
+	  26: ICRFL       	t20, $0x1, CR
+	  27: INCEIPL       	$4
+
+	0x2547E05C:  41850048  bc 12,5,0x2547E0A4
+	  28: Js05o       	$0x2547E0A4
+
+
+. 0 2547E044 28
+. 57 E3 18 38 7D 23 DA 14 3B 89 00 0C 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+
+==== BB 1190 (0x2547E060) approx BBs exec'd 0 ====
+
+	0x2547E060:  813C0000  lwz r9,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547E064:  2F090000  cmpi cr6,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x6, CR
+	   7: INCEIPL       	$4
+
+	0x2547E068:  419A0028  bc 12,26,0x2547E090
+	   8: Js26o       	$0x2547E090
+
+
+. 0 2547E060 12
+. 81 3C 00 00 2F 09 00 00 41 9A 00 28
+
+==== BB 1191 (0x2547E06C) approx BBs exec'd 0 ====
+
+	0x2547E06C:  8169022C  lwz r11,556(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x22C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547E070:  2C0BFFFF  cmpi cr0,r11,-1
+	   5: GETL       	R11, t4
+	   6: MOVL       	$0xFFFFFFFF, t8
+	   7: CMPL       	t4, t8, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547E074:  7D4BCA14  add r10,r11,r25
+	  10: GETL       	R11, t10
+	  11: GETL       	R25, t12
+	  12: ADDL       	t10, t12
+	  13: PUTL       	t12, R10
+	  14: INCEIPL       	$4
+
+	0x2547E078:  7D435378  or r3,r10,r10
+	  15: GETL       	R10, t14
+	  16: PUTL       	t14, R3
+	  17: INCEIPL       	$4
+
+	0x2547E07C:  40820040  bc 4,2,0x2547E0BC
+	  18: Jc02o       	$0x2547E0BC
+
+
+. 0 2547E06C 20
+. 81 69 02 2C 2C 0B FF FF 7D 4B CA 14 7D 43 53 78 40 82 00 40
+
+==== BB 1192 (0x2547E0BC) approx BBs exec'd 0 ====
+
+	0x2547E0BC:  81690230  lwz r11,560(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x230, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547E0C0:  5568103A  rlwinm r8,r11,2,0,29
+	   5: GETL       	R11, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2547E0C4:  7D48B92E  stwx r10,r8,r23
+	   9: GETL       	R23, t6
+	  10: GETL       	R8, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R10, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x2547E0C8:  80A9021C  lwz r5,540(r9)
+	  15: GETL       	R9, t12
+	  16: ADDL       	$0x21C, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R5
+	  19: INCEIPL       	$4
+
+	0x2547E0CC:  80E90220  lwz r7,544(r9)
+	  20: GETL       	R9, t16
+	  21: ADDL       	$0x220, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R7
+	  24: INCEIPL       	$4
+
+	0x2547E0D0:  80890218  lwz r4,536(r9)
+	  25: GETL       	R9, t20
+	  26: ADDL       	$0x218, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R4
+	  29: INCEIPL       	$4
+
+	0x2547E0D4:  7FA53850  subf r29,r5,r7
+	  30: GETL       	R5, t24
+	  31: GETL       	R7, t26
+	  32: SUBL       	t24, t26
+	  33: PUTL       	t26, R29
+	  34: INCEIPL       	$4
+
+	0x2547E0D8:  48005949  bl 0x25483A20
+	  35: MOVL       	$0x2547E0DC, t28
+	  36: PUTL       	t28, LR
+	  37: JMPo-c       	$0x25483A20  ($4)
+
+
+. 0 2547E0BC 32
+. 81 69 02 30 55 68 10 3A 7D 48 B9 2E 80 A9 02 1C 80 E9 02 20 80 89 02 18 7F A5 38 50 48 00 59 49
+
+==== BB 1193 (0x2547E0DC) approx BBs exec'd 0 ====
+
+	0x2547E0DC:  7FA5EB78  or r5,r29,r29
+	   0: GETL       	R29, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0x2547E0E0:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x2547E0E4:  48005661  bl 0x25483744
+	   6: MOVL       	$0x2547E0E8, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x25483744  ($4)
+
+
+. 0 2547E0DC 12
+. 7F A5 EB 78 38 80 00 00 48 00 56 61
+
+==== BB 1194 (0x25483920) approx BBs exec'd 0 ====
+
+	0x25483920:  28850010  cmpli cr1,r5,16
+	   0: GETL       	R5, t0
+	   1: MOVL       	$0x10, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0x25483924:  7CC62A14  add r6,r6,r5
+	   5: GETL       	R6, t6
+	   6: GETL       	R5, t8
+	   7: ADDL       	t6, t8
+	   8: PUTL       	t8, R6
+	   9: INCEIPL       	$4
+
+	0x25483928:  419F0020  bc 12,31,0x25483948
+	  10: Js31o       	$0x25483948
+
+
+. 0 25483920 12
+. 28 85 00 10 7C C6 2A 14 41 9F 00 20
+
+==== BB 1195 (0x25483934) approx BBs exec'd 0 ====
+
+	0x25483934:  4084002C  bc 4,4,0x25483960
+	   0: Jc04o       	$0x25483960
+
+
+. 0 25483934 4
+. 40 84 00 2C
+
+==== BB 1196 (0x25483938) approx BBs exec'd 0 ====
+
+	0x25483938:  4C9C0020  bclr 4,28
+	   0: GETL       	LR, t0
+	   1: Jc28o-r       	t0
+
+
+. 0 25483938 4
+. 4C 9C 00 20
+
+==== BB 1197 (0x2548393C) approx BBs exec'd 0 ====
+
+	0x2548393C:  9086FFFC  stw r4,-4(r6)
+	   0: GETL       	R4, t0
+	   1: GETL       	R6, t2
+	   2: ADDL       	$0xFFFFFFFC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25483940:  9086FFF8  stw r4,-8(r6)
+	   5: GETL       	R4, t4
+	   6: GETL       	R6, t6
+	   7: ADDL       	$0xFFFFFFF8, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x25483944:  4E800020  blr
+	  10: GETL       	LR, t8
+	  11: JMPo-r       	t8  ($4)
+
+
+. 0 2548393C 12
+. 90 86 FF FC 90 86 FF F8 4E 80 00 20
+
+==== BB 1198 (0x2547E0E8) approx BBs exec'd 0 ====
+
+	0x2547E0E8:  4BFFFFA4  b 0x2547E08C
+	   0: JMPo       	$0x2547E08C  ($4)
+
+
+. 0 2547E0E8 4
+. 4B FF FF A4
+
+==== BB 1199 (0x2547E08C) approx BBs exec'd 0 ====
+
+	0x2547E08C:  817B0000  lwz r11,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R11
+	   3: INCEIPL       	$4
+
+	0x2547E090:  3BFF0001  addi r31,r31,1
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0x2547E094:  3B9C0008  addi r28,r28,8
+	   8: GETL       	R28, t6
+	   9: ADDL       	$0x8, t6
+	  10: PUTL       	t6, R28
+	  11: INCEIPL       	$4
+
+	0x2547E098:  7F8BF840  cmpl cr7,r11,r31
+	  12: GETL       	R11, t8
+	  13: GETL       	R31, t10
+	  14: CMPUL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x7, CR
+	  16: INCEIPL       	$4
+
+	0x2547E09C:  419DFFB4  bc 12,29,0x2547E050
+	  17: Js29o       	$0x2547E050
+
+
+. 0 2547E08C 20
+. 81 7B 00 00 3B FF 00 01 3B 9C 00 08 7F 8B F8 40 41 9D FF B4
+
+==== BB 1200 (0x2547E050) approx BBs exec'd 0 ====
+
+	0x2547E050:  80980408  lwz r4,1032(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547E054:  7D3AFA14  add r9,r26,r31
+	   5: GETL       	R26, t4
+	   6: GETL       	R31, t6
+	   7: ADDL       	t4, t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2547E058:  7C892040  cmpl cr1,r9,r4
+	  10: GETL       	R9, t8
+	  11: GETL       	R4, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0x2547E05C:  41850048  bc 12,5,0x2547E0A4
+	  15: Js05o       	$0x2547E0A4
+
+
+. 0 2547E050 16
+. 80 98 04 08 7D 3A FA 14 7C 89 20 40 41 85 00 48
+
+==== BB 1201 (0x2547E0A4) approx BBs exec'd 0 ====
+
+	0x2547E0A4:  81580408  lwz r10,1032(r24)
+	   0: GETL       	R24, t0
+	   1: ADDL       	$0x408, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547E0A8:  7D3A4B78  or r26,r9,r9
+	   5: GETL       	R9, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547E0AC:  7C8A4840  cmpl cr1,r10,r9
+	   8: GETL       	R10, t6
+	   9: GETL       	R9, t8
+	  10: CMPUL       	t6, t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0x2547E0B0:  4085003C  bc 4,5,0x2547E0EC
+	  13: Jc05o       	$0x2547E0EC
+
+
+. 0 2547E0A4 16
+. 81 58 04 08 7D 3A 4B 78 7C 8A 48 40 40 85 00 3C
+
+==== BB 1202 (0x2547E0EC) approx BBs exec'd 0 ====
+
+	0x2547E0EC:  7F23CB78  or r3,r25,r25
+	   0: GETL       	R25, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547E0F0:  81810034  lwz r12,52(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x34, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0x2547E0F4:  82E1000C  lwz r23,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0x2547E0F8:  83010010  lwz r24,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R24
+	  17: INCEIPL       	$4
+
+	0x2547E0FC:  7D8803A6  mtlr r12
+	  18: GETL       	R12, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547E100:  83210014  lwz r25,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R25
+	  25: INCEIPL       	$4
+
+	0x2547E104:  83410018  lwz r26,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R26
+	  30: INCEIPL       	$4
+
+	0x2547E108:  8361001C  lwz r27,28(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x1C, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R27
+	  35: INCEIPL       	$4
+
+	0x2547E10C:  83810020  lwz r28,32(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x20, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R28
+	  40: INCEIPL       	$4
+
+	0x2547E110:  83A10024  lwz r29,36(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0x2547E114:  83C10028  lwz r30,40(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x28, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R30
+	  50: INCEIPL       	$4
+
+	0x2547E118:  83E1002C  lwz r31,44(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x2C, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0x2547E11C:  38210030  addi r1,r1,48
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x30, t44
+	  58: PUTL       	t44, R1
+	  59: INCEIPL       	$4
+
+	0x2547E120:  4E800020  blr
+	  60: GETL       	LR, t46
+	  61: JMPo-r       	t46  ($4)
+
+
+. 0 2547E0EC 56
+. 7F 23 CB 78 81 81 00 34 82 E1 00 0C 83 01 00 10 7D 88 03 A6 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 83 C1 00 28 83 E1 00 2C 38 21 00 30 4E 80 00 20
+
+==== BB 1203 (0x25472B64) approx BBs exec'd 0 ====
+
+	0x25472B64:  38517000  addi r2,r17,28672
+	   0: GETL       	R17, t0
+	   1: ADDL       	$0x7000, t0
+	   2: PUTL       	t0, R2
+	   3: INCEIPL       	$4
+
+	0x25472B68:  3B000001  li r24,1
+	   4: MOVL       	$0x1, t2
+	   5: PUTL       	t2, R24
+	   6: INCEIPL       	$4
+
+	0x25472B6C:  931A000C  stw r24,12(r26)
+	   7: GETL       	R24, t4
+	   8: GETL       	R26, t6
+	   9: ADDL       	$0xC, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x25472B70:  48009431  bl 0x2547BFA0
+	  12: MOVL       	$0x25472B74, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x2547BFA0  ($4)
+
+
+. 0 25472B64 16
+. 38 51 70 00 3B 00 00 01 93 1A 00 0C 48 00 94 31
+
+==== BB 1204 __GI__dl_debug_state(0x2547BFA0) approx BBs exec'd 0 ====
+
+	0x2547BFA0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547BFA4:  38210010  addi r1,r1,16
+	   6: GETL       	R1, t4
+	   7: ADDL       	$0x10, t4
+	   8: PUTL       	t4, R1
+	   9: INCEIPL       	$4
+
+	0x2547BFA8:  4E800020  blr
+	  10: GETL       	LR, t6
+	  11: JMPo-r       	t6  ($4)
+
+
+. 0 2547BFA0 12
+. 94 21 FF F0 38 21 00 10 4E 80 00 20
+
+==== BB 1205 (0x25472B74) approx BBs exec'd 0 ====
+
+	0x25472B74:  480055BD  bl 0x25478130
+	   0: MOVL       	$0x25472B78, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25478130  ($4)
+
+
+. 0 25472B74 4
+. 48 00 55 BD
+
+==== BB 1206 _dl_unload_cache(0x25478130) approx BBs exec'd 0 ====
+
+	0x25478130:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x25478134:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x25478138:  4801EEC9  bl 0x25497000
+	   9: MOVL       	$0x2547813C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 25478130 12
+. 94 21 FF F0 7C 68 02 A6 48 01 EE C9
+
+==== BB 1207 (0x2547813C) approx BBs exec'd 0 ====
+
+	0x2547813C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x25478140:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x25478144:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x25478148:  90610014  stw r3,20(r1)
+	  13: GETL       	R3, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547814C:  3800FFFD  li r0,-3
+	  18: MOVL       	$0xFFFFFFFD, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0x25478150:  83FE0254  lwz r31,596(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x254, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0x25478154:  807F0000  lwz r3,0(r31)
+	  26: GETL       	R31, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R3
+	  29: INCEIPL       	$4
+
+	0x25478158:  3923FFFF  addi r9,r3,-1
+	  30: GETL       	R3, t24
+	  31: ADDL       	$0xFFFFFFFF, t24
+	  32: PUTL       	t24, R9
+	  33: INCEIPL       	$4
+
+	0x2547815C:  7F890040  cmpl cr7,r9,r0
+	  34: GETL       	R9, t26
+	  35: GETL       	R0, t28
+	  36: CMPUL       	t26, t28, t30  (-rSo)
+	  37: ICRFL       	t30, $0x7, CR
+	  38: INCEIPL       	$4
+
+	0x25478160:  409D001C  bc 4,29,0x2547817C
+	  39: Jc29o       	$0x2547817C
+
+
+. 0 2547813C 40
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 61 00 14 38 00 FF FD 83 FE 02 54 80 7F 00 00 39 23 FF FF 7F 89 00 40 40 9D 00 1C
+
+==== BB 1208 (0x2547817C) approx BBs exec'd 0 ====
+
+	0x2547817C:  80BE025C  lwz r5,604(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x25C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x25478180:  80850000  lwz r4,0(r5)
+	   5: GETL       	R5, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x25478184:  4800A625  bl 0x254827A8
+	   9: MOVL       	$0x25478188, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0x254827A8  ($4)
+
+
+. 0 2547817C 12
+. 80 BE 02 5C 80 85 00 00 48 00 A6 25
+
+==== BB 1209 munmap(0x254827A8) approx BBs exec'd 0 ====
+
+	0x254827A8:  3800005B  li r0,91
+	   0: MOVL       	$0x5B, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x254827AC:  44000002  sc
+	   3: JMPo-sys       	$0x254827B0  ($4)
+
+
+. 0 254827A8 8
+. 38 00 00 5B 44 00 00 02
+
+==== BB 1210 (0x254827B0) approx BBs exec'd 0 ====
+
+	0x254827B0:  4CA30020  bclr 5,3
+	   0: GETL       	LR, t0
+	   1: Jc03o-r       	t0
+
+
+. 0 254827B0 4
+. 4C A3 00 20
+
+==== BB 1211 (0x25478188) approx BBs exec'd 0 ====
+
+	0x25478188:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0x2547818C:  80C10014  lwz r6,20(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x14, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R6
+	   7: INCEIPL       	$4
+
+	0x25478190:  909F0000  stw r4,0(r31)
+	   8: GETL       	R4, t6
+	   9: GETL       	R31, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0x25478194:  83C10008  lwz r30,8(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x8, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R30
+	  16: INCEIPL       	$4
+
+	0x25478198:  7CC803A6  mtlr r6
+	  17: GETL       	R6, t14
+	  18: PUTL       	t14, LR
+	  19: INCEIPL       	$4
+
+	0x2547819C:  83E1000C  lwz r31,12(r1)
+	  20: GETL       	R1, t16
+	  21: ADDL       	$0xC, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R31
+	  24: INCEIPL       	$4
+
+	0x254781A0:  38210010  addi r1,r1,16
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x10, t20
+	  27: PUTL       	t20, R1
+	  28: INCEIPL       	$4
+
+	0x254781A4:  4E800020  blr
+	  29: GETL       	LR, t22
+	  30: JMPo-r       	t22  ($4)
+
+
+. 0 25478188 32
+. 38 80 00 00 80 C1 00 14 90 9F 00 00 83 C1 00 08 7C C8 03 A6 83 E1 00 0C 38 21 00 10 4E 80 00 20
+
+==== BB 1212 (0x25472B78) approx BBs exec'd 0 ====
+
+	0x25472B78:  81010000  lwz r8,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R8
+	   3: INCEIPL       	$4
+
+	0x25472B7C:  82280004  lwz r17,4(r8)
+	   4: GETL       	R8, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R17
+	   8: INCEIPL       	$4
+
+	0x25472B80:  8188FFB4  lwz r12,-76(r8)
+	   9: GETL       	R8, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x25472B84:  7E2803A6  mtlr r17
+	  14: GETL       	R17, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x25472B88:  81C8FFB8  lwz r14,-72(r8)
+	  17: GETL       	R8, t14
+	  18: ADDL       	$0xFFFFFFB8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R14
+	  21: INCEIPL       	$4
+
+	0x25472B8C:  81E8FFBC  lwz r15,-68(r8)
+	  22: GETL       	R8, t18
+	  23: ADDL       	$0xFFFFFFBC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R15
+	  26: INCEIPL       	$4
+
+	0x25472B90:  7D818120  mtcrf 0x18,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x3, CR
+	  29: ICRFL       	t22, $0x4, CR
+	  30: INCEIPL       	$4
+
+	0x25472B94:  8208FFC0  lwz r16,-64(r8)
+	  31: GETL       	R8, t24
+	  32: ADDL       	$0xFFFFFFC0, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R16
+	  35: INCEIPL       	$4
+
+	0x25472B98:  8228FFC4  lwz r17,-60(r8)
+	  36: GETL       	R8, t28
+	  37: ADDL       	$0xFFFFFFC4, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R17
+	  40: INCEIPL       	$4
+
+	0x25472B9C:  8248FFC8  lwz r18,-56(r8)
+	  41: GETL       	R8, t32
+	  42: ADDL       	$0xFFFFFFC8, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R18
+	  45: INCEIPL       	$4
+
+	0x25472BA0:  8268FFCC  lwz r19,-52(r8)
+	  46: GETL       	R8, t36
+	  47: ADDL       	$0xFFFFFFCC, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R19
+	  50: INCEIPL       	$4
+
+	0x25472BA4:  8288FFD0  lwz r20,-48(r8)
+	  51: GETL       	R8, t40
+	  52: ADDL       	$0xFFFFFFD0, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R20
+	  55: INCEIPL       	$4
+
+	0x25472BA8:  82A8FFD4  lwz r21,-44(r8)
+	  56: GETL       	R8, t44
+	  57: ADDL       	$0xFFFFFFD4, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R21
+	  60: INCEIPL       	$4
+
+	0x25472BAC:  82C8FFD8  lwz r22,-40(r8)
+	  61: GETL       	R8, t48
+	  62: ADDL       	$0xFFFFFFD8, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R22
+	  65: INCEIPL       	$4
+
+	0x25472BB0:  82E8FFDC  lwz r23,-36(r8)
+	  66: GETL       	R8, t52
+	  67: ADDL       	$0xFFFFFFDC, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R23
+	  70: INCEIPL       	$4
+
+	0x25472BB4:  8308FFE0  lwz r24,-32(r8)
+	  71: GETL       	R8, t56
+	  72: ADDL       	$0xFFFFFFE0, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R24
+	  75: INCEIPL       	$4
+
+	0x25472BB8:  8328FFE4  lwz r25,-28(r8)
+	  76: GETL       	R8, t60
+	  77: ADDL       	$0xFFFFFFE4, t60
+	  78: LDL       	(t60), t62
+	  79: PUTL       	t62, R25
+	  80: INCEIPL       	$4
+
+	0x25472BBC:  8348FFE8  lwz r26,-24(r8)
+	  81: GETL       	R8, t64
+	  82: ADDL       	$0xFFFFFFE8, t64
+	  83: LDL       	(t64), t66
+	  84: PUTL       	t66, R26
+	  85: INCEIPL       	$4
+
+	0x25472BC0:  8368FFEC  lwz r27,-20(r8)
+	  86: GETL       	R8, t68
+	  87: ADDL       	$0xFFFFFFEC, t68
+	  88: LDL       	(t68), t70
+	  89: PUTL       	t70, R27
+	  90: INCEIPL       	$4
+
+	0x25472BC4:  8388FFF0  lwz r28,-16(r8)
+	  91: GETL       	R8, t72
+	  92: ADDL       	$0xFFFFFFF0, t72
+	  93: LDL       	(t72), t74
+	  94: PUTL       	t74, R28
+	  95: INCEIPL       	$4
+
+	0x25472BC8:  83A8FFF4  lwz r29,-12(r8)
+	  96: GETL       	R8, t76
+	  97: ADDL       	$0xFFFFFFF4, t76
+	  98: LDL       	(t76), t78
+	  99: PUTL       	t78, R29
+	 100: INCEIPL       	$4
+
+	0x25472BCC:  83C8FFF8  lwz r30,-8(r8)
+	 101: GETL       	R8, t80
+	 102: ADDL       	$0xFFFFFFF8, t80
+	 103: LDL       	(t80), t82
+	 104: PUTL       	t82, R30
+	 105: INCEIPL       	$4
+
+	0x25472BD0:  83E8FFFC  lwz r31,-4(r8)
+	 106: GETL       	R8, t84
+	 107: ADDL       	$0xFFFFFFFC, t84
+	 108: LDL       	(t84), t86
+	 109: PUTL       	t86, R31
+	 110: INCEIPL       	$4
+
+	0x25472BD4:  7D014378  or r1,r8,r8
+	 111: GETL       	R8, t88
+	 112: PUTL       	t88, R1
+	 113: INCEIPL       	$4
+
+	0x25472BD8:  4E800020  blr
+	 114: GETL       	LR, t90
+	 115: JMPo-r       	t90  ($4)
+
+
+. 0 25472B78 100
+. 81 01 00 00 82 28 00 04 81 88 FF B4 7E 28 03 A6 81 C8 FF B8 81 E8 FF BC 7D 81 81 20 82 08 FF C0 82 28 FF C4 82 48 FF C8 82 68 FF CC 82 88 FF D0 82 A8 FF D4 82 C8 FF D8 82 E8 FF DC 83 08 FF E0 83 28 FF E4 83 48 FF E8 83 68 FF EC 83 88 FF F0 83 A8 FF F4 83 C8 FF F8 83 E8 FF FC 7D 01 43 78 4E 80 00 20
+
+==== BB 1213 (0x2547F324) approx BBs exec'd 0 ====
+
+	0x2547F324:  82A10224  lwz r21,548(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x224, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547F328:  806101E0  lwz r3,480(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x1E0, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547F32C:  7EA803A6  mtlr r21
+	  10: GETL       	R21, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x2547F330:  82C101F8  lwz r22,504(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x1F8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R22
+	  17: INCEIPL       	$4
+
+	0x2547F334:  82A101F4  lwz r21,500(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1F4, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R21
+	  22: INCEIPL       	$4
+
+	0x2547F338:  82E101FC  lwz r23,508(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1FC, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R23
+	  27: INCEIPL       	$4
+
+	0x2547F33C:  83010200  lwz r24,512(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x200, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R24
+	  32: INCEIPL       	$4
+
+	0x2547F340:  83210204  lwz r25,516(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x204, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R25
+	  37: INCEIPL       	$4
+
+	0x2547F344:  83410208  lwz r26,520(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x208, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R26
+	  42: INCEIPL       	$4
+
+	0x2547F348:  8361020C  lwz r27,524(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x20C, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R27
+	  47: INCEIPL       	$4
+
+	0x2547F34C:  83810210  lwz r28,528(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x210, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R28
+	  52: INCEIPL       	$4
+
+	0x2547F350:  83A10214  lwz r29,532(r1)
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x214, t42
+	  55: LDL       	(t42), t44
+	  56: PUTL       	t44, R29
+	  57: INCEIPL       	$4
+
+	0x2547F354:  83C10218  lwz r30,536(r1)
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x218, t46
+	  60: LDL       	(t46), t48
+	  61: PUTL       	t48, R30
+	  62: INCEIPL       	$4
+
+	0x2547F358:  83E1021C  lwz r31,540(r1)
+	  63: GETL       	R1, t50
+	  64: ADDL       	$0x21C, t50
+	  65: LDL       	(t50), t52
+	  66: PUTL       	t52, R31
+	  67: INCEIPL       	$4
+
+	0x2547F35C:  38210220  addi r1,r1,544
+	  68: GETL       	R1, t54
+	  69: ADDL       	$0x220, t54
+	  70: PUTL       	t54, R1
+	  71: INCEIPL       	$4
+
+	0x2547F360:  4E800020  blr
+	  72: GETL       	LR, t56
+	  73: JMPo-r       	t56  ($4)
+
+
+. 0 2547F324 64
+. 82 A1 02 24 80 61 01 E0 7E A8 03 A6 82 C1 01 F8 82 A1 01 F4 82 E1 01 FC 83 01 02 00 83 21 02 04 83 41 02 08 83 61 02 0C 83 81 02 10 83 A1 02 14 83 C1 02 18 83 E1 02 1C 38 21 02 20 4E 80 00 20
+
+==== BB 1214 (0x25471A20) approx BBs exec'd 0 ====
+
+	0x25471A20:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x25471A24:  7C7D1B78  or r29,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x25471A28:  38600000  li r3,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0x25471A2C:  80090000  lwz r0,0(r9)
+	  11: GETL       	R9, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0x25471A30:  70090080  andi. r9,r0,0x80
+	  15: GETL       	R0, t12
+	  16: ANDL       	$0x80, t12
+	  17: PUTL       	t12, R9
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x25471A34:  40820028  bc 4,2,0x25471A5C
+	  21: Jc02o       	$0x25471A5C
+
+
+. 0 25471A20 24
+. 81 3E 04 F4 7C 7D 1B 78 38 60 00 00 80 09 00 00 70 09 00 80 40 82 00 28
+
+==== BB 1215 (0x25471A38) approx BBs exec'd 0 ====
+
+	0x25471A38:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25471A3C:  7FA3EB78  or r3,r29,r29
+	   5: GETL       	R29, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x25471A40:  8361000C  lwz r27,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R27
+	  12: INCEIPL       	$4
+
+	0x25471A44:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0x25471A48:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x25471A4C:  83A10014  lwz r29,20(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R29
+	  25: INCEIPL       	$4
+
+	0x25471A50:  83C10018  lwz r30,24(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x18, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R30
+	  30: INCEIPL       	$4
+
+	0x25471A54:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0x25471A58:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+. 0 25471A38 36
+. 80 81 00 24 7F A3 EB 78 83 61 00 0C 83 81 00 10 7C 88 03 A6 83 A1 00 14 83 C1 00 18 38 21 00 20 4E 80 00 20
+
+==== BB 1216 (0x25471DD0) approx BBs exec'd 0 ====
+
+	0x25471DD0:  82E102B4  lwz r23,692(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x2B4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x25471DD4:  83010290  lwz r24,656(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x290, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R24
+	   9: INCEIPL       	$4
+
+	0x25471DD8:  7EE803A6  mtlr r23
+	  10: GETL       	R23, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x25471DDC:  83210294  lwz r25,660(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x294, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R25
+	  17: INCEIPL       	$4
+
+	0x25471DE0:  82E1028C  lwz r23,652(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x28C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R23
+	  22: INCEIPL       	$4
+
+	0x25471DE4:  83410298  lwz r26,664(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x298, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R26
+	  27: INCEIPL       	$4
+
+	0x25471DE8:  8361029C  lwz r27,668(r1)
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x29C, t22
+	  30: LDL       	(t22), t24
+	  31: PUTL       	t24, R27
+	  32: INCEIPL       	$4
+
+	0x25471DEC:  838102A0  lwz r28,672(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x2A0, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R28
+	  37: INCEIPL       	$4
+
+	0x25471DF0:  83A102A4  lwz r29,676(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x2A4, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R29
+	  42: INCEIPL       	$4
+
+	0x25471DF4:  83C102A8  lwz r30,680(r1)
+	  43: GETL       	R1, t34
+	  44: ADDL       	$0x2A8, t34
+	  45: LDL       	(t34), t36
+	  46: PUTL       	t36, R30
+	  47: INCEIPL       	$4
+
+	0x25471DF8:  83E102AC  lwz r31,684(r1)
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x2AC, t38
+	  50: LDL       	(t38), t40
+	  51: PUTL       	t40, R31
+	  52: INCEIPL       	$4
+
+	0x25471DFC:  382102B0  addi r1,r1,688
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x2B0, t42
+	  55: PUTL       	t42, R1
+	  56: INCEIPL       	$4
+
+	0x25471E00:  4E800020  blr
+	  57: GETL       	LR, t44
+	  58: JMPo-r       	t44  ($4)
+
+
+. 0 25471DD0 52
+. 82 E1 02 B4 83 01 02 90 7E E8 03 A6 83 21 02 94 82 E1 02 8C 83 41 02 98 83 61 02 9C 83 81 02 A0 83 A1 02 A4 83 C1 02 A8 83 E1 02 AC 38 21 02 B0 4E 80 00 20
+
+==== BB 1217 (0x254804E8) approx BBs exec'd 0 ====
+
+	0x254804E8:  48016B19  bl 0x25497000
+	   0: MOVL       	$0x254804EC, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 254804E8 4
+. 48 01 6B 19
+
+==== BB 1218 (0x254804EC) approx BBs exec'd 0 ====
+
+	0x254804EC:  7FE802A6  mflr r31
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x254804F0:  7C7E1B78  or r30,r3,r3
+	   3: GETL       	R3, t2
+	   4: PUTL       	t2, R30
+	   5: INCEIPL       	$4
+
+	0x254804F4:  839F0498  lwz r28,1176(r31)
+	   6: GETL       	R31, t4
+	   7: ADDL       	$0x498, t4
+	   8: LDL       	(t4), t6
+	   9: PUTL       	t6, R28
+	  10: INCEIPL       	$4
+
+	0x254804F8:  83BF04D0  lwz r29,1232(r31)
+	  11: GETL       	R31, t8
+	  12: ADDL       	$0x4D0, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R29
+	  15: INCEIPL       	$4
+
+	0x254804FC:  837F04DC  lwz r27,1244(r31)
+	  16: GETL       	R31, t12
+	  17: ADDL       	$0x4DC, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R27
+	  20: INCEIPL       	$4
+
+	0x25480500:  807C0000  lwz r3,0(r28)
+	  21: GETL       	R28, t16
+	  22: LDL       	(t16), t18
+	  23: PUTL       	t18, R3
+	  24: INCEIPL       	$4
+
+	0x25480504:  809D0000  lwz r4,0(r29)
+	  25: GETL       	R29, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R4
+	  28: INCEIPL       	$4
+
+	0x25480508:  80BB0000  lwz r5,0(r27)
+	  29: GETL       	R27, t24
+	  30: LDL       	(t24), t26
+	  31: PUTL       	t26, R5
+	  32: INCEIPL       	$4
+
+	0x2548050C:  5486103A  rlwinm r6,r4,2,0,29
+	  33: GETL       	R4, t28
+	  34: SHLL       	$0x2, t28
+	  35: PUTL       	t28, R6
+	  36: INCEIPL       	$4
+
+	0x25480510:  7CC53214  add r6,r5,r6
+	  37: GETL       	R5, t30
+	  38: GETL       	R6, t32
+	  39: ADDL       	t30, t32
+	  40: PUTL       	t32, R6
+	  41: INCEIPL       	$4
+
+	0x25480514:  38C60004  addi r6,r6,4
+	  42: GETL       	R6, t34
+	  43: ADDL       	$0x4, t34
+	  44: PUTL       	t34, R6
+	  45: INCEIPL       	$4
+
+	0x25480518:  4BFFB229  bl 0x2547B740
+	  46: MOVL       	$0x2548051C, t36
+	  47: PUTL       	t36, LR
+	  48: JMPo-c       	$0x2547B740  ($4)
+
+
+. 0 254804EC 48
+. 7F E8 02 A6 7C 7E 1B 78 83 9F 04 98 83 BF 04 D0 83 7F 04 DC 80 7C 00 00 80 9D 00 00 80 BB 00 00 54 86 10 3A 7C C5 32 14 38 C6 00 04 4B FF B2 29
+
+==== BB 1219 _dl_init_internal(0x2547B740) approx BBs exec'd 0 ====
+
+	0x2547B740:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547B744:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547B748:  4801B8B9  bl 0x25497000
+	   9: MOVL       	$0x2547B74C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547B740 12
+. 94 21 FF C0 7C 08 02 A6 48 01 B8 B9
+
+==== BB 1220 (0x2547B74C) approx BBs exec'd 0 ====
+
+	0x2547B74C:  93C10038  stw r30,56(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x38, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B750:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547B754:  92C10018  stw r22,24(r1)
+	   8: GETL       	R22, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x18, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0x2547B758:  90010044  stw r0,68(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x44, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0x2547B75C:  93E1003C  stw r31,60(r1)
+	  18: GETL       	R31, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x3C, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0x2547B760:  82DE04C8  lwz r22,1224(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x4C8, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R22
+	  27: INCEIPL       	$4
+
+	0x2547B764:  92E1001C  stw r23,28(r1)
+	  28: GETL       	R23, t22
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x1C, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0x2547B768:  7C972378  or r23,r4,r4
+	  33: GETL       	R4, t26
+	  34: PUTL       	t26, R23
+	  35: INCEIPL       	$4
+
+	0x2547B76C:  83F601A0  lwz r31,416(r22)
+	  36: GETL       	R22, t28
+	  37: ADDL       	$0x1A0, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R31
+	  40: INCEIPL       	$4
+
+	0x2547B770:  93010020  stw r24,32(r1)
+	  41: GETL       	R24, t32
+	  42: GETL       	R1, t34
+	  43: ADDL       	$0x20, t34
+	  44: STL       	t32, (t34)
+	  45: INCEIPL       	$4
+
+	0x2547B774:  7CB82B78  or r24,r5,r5
+	  46: GETL       	R5, t36
+	  47: PUTL       	t36, R24
+	  48: INCEIPL       	$4
+
+	0x2547B778:  2F9F0000  cmpi cr7,r31,0
+	  49: GETL       	R31, t38
+	  50: CMP0L       	t38, t40  (-rSo)
+	  51: ICRFL       	t40, $0x7, CR
+	  52: INCEIPL       	$4
+
+	0x2547B77C:  93210024  stw r25,36(r1)
+	  53: GETL       	R25, t42
+	  54: GETL       	R1, t44
+	  55: ADDL       	$0x24, t44
+	  56: STL       	t42, (t44)
+	  57: INCEIPL       	$4
+
+	0x2547B780:  93410028  stw r26,40(r1)
+	  58: GETL       	R26, t46
+	  59: GETL       	R1, t48
+	  60: ADDL       	$0x28, t48
+	  61: STL       	t46, (t48)
+	  62: INCEIPL       	$4
+
+	0x2547B784:  7CD93378  or r25,r6,r6
+	  63: GETL       	R6, t50
+	  64: PUTL       	t50, R25
+	  65: INCEIPL       	$4
+
+	0x2547B788:  9361002C  stw r27,44(r1)
+	  66: GETL       	R27, t52
+	  67: GETL       	R1, t54
+	  68: ADDL       	$0x2C, t54
+	  69: STL       	t52, (t54)
+	  70: INCEIPL       	$4
+
+	0x2547B78C:  7C7A1B78  or r26,r3,r3
+	  71: GETL       	R3, t56
+	  72: PUTL       	t56, R26
+	  73: INCEIPL       	$4
+
+	0x2547B790:  93A10034  stw r29,52(r1)
+	  74: GETL       	R29, t58
+	  75: GETL       	R1, t60
+	  76: ADDL       	$0x34, t60
+	  77: STL       	t58, (t60)
+	  78: INCEIPL       	$4
+
+	0x2547B794:  92810010  stw r20,16(r1)
+	  79: GETL       	R20, t62
+	  80: GETL       	R1, t64
+	  81: ADDL       	$0x10, t64
+	  82: STL       	t62, (t64)
+	  83: INCEIPL       	$4
+
+	0x2547B798:  92A10014  stw r21,20(r1)
+	  84: GETL       	R21, t66
+	  85: GETL       	R1, t68
+	  86: ADDL       	$0x14, t68
+	  87: STL       	t66, (t68)
+	  88: INCEIPL       	$4
+
+	0x2547B79C:  93810030  stw r28,48(r1)
+	  89: GETL       	R28, t70
+	  90: GETL       	R1, t72
+	  91: ADDL       	$0x30, t72
+	  92: STL       	t70, (t72)
+	  93: INCEIPL       	$4
+
+	0x2547B7A0:  83A300A0  lwz r29,160(r3)
+	  94: GETL       	R3, t74
+	  95: ADDL       	$0xA0, t74
+	  96: LDL       	(t74), t76
+	  97: PUTL       	t76, R29
+	  98: INCEIPL       	$4
+
+	0x2547B7A4:  836300A4  lwz r27,164(r3)
+	  99: GETL       	R3, t78
+	 100: ADDL       	$0xA4, t78
+	 101: LDL       	(t78), t80
+	 102: PUTL       	t80, R27
+	 103: INCEIPL       	$4
+
+	0x2547B7A8:  409E019C  bc 4,30,0x2547B944
+	 104: Jc30o       	$0x2547B944
+
+
+. 0 2547B74C 96
+. 93 C1 00 38 7F C8 02 A6 92 C1 00 18 90 01 00 44 93 E1 00 3C 82 DE 04 C8 92 E1 00 1C 7C 97 23 78 83 F6 01 A0 93 01 00 20 7C B8 2B 78 2F 9F 00 00 93 21 00 24 93 41 00 28 7C D9 33 78 93 61 00 2C 7C 7A 1B 78 93 A1 00 34 92 81 00 10 92 A1 00 14 93 81 00 30 83 A3 00 A0 83 63 00 A4 40 9E 01 9C
+
+==== BB 1221 (0x2547B944) approx BBs exec'd 0 ====
+
+	0x2547B944:  801F0180  lwz r0,384(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x180, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x2547B948:  74091000  andis. r9,r0,0x1000
+	   5: GETL       	R0, t4
+	   6: ANDL       	$0x10000000, t4
+	   7: PUTL       	t4, R9
+	   8: CMP0L       	t4, t6  (-rSo)
+	   9: ICRFL       	t6, $0x0, CR
+	  10: INCEIPL       	$4
+
+	0x2547B94C:  40820110  bc 4,2,0x2547BA5C
+	  11: Jc02o       	$0x2547BA5C
+
+
+. 0 2547B944 12
+. 80 1F 01 80 74 09 10 00 40 82 01 10
+
+==== BB 1222 (0x2547B950) approx BBs exec'd 0 ====
+
+	0x2547B950:  64091000  oris r9,r0,0x1000
+	   0: GETL       	R0, t0
+	   1: ORL       	$0x10000000, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x2547B954:  809F0004  lwz r4,4(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547B958:  913F0180  stw r9,384(r31)
+	   9: GETL       	R9, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x180, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547B95C:  88640000  lbz r3,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDB       	(t10), t12
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0x2547B960:  2F030000  cmpi cr6,r3,0
+	  18: GETL       	R3, t14
+	  19: CMP0L       	t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x2547B964:  419A00F0  bc 12,26,0x2547BA54
+	  22: Js26o       	$0x2547BA54
+
+
+. 0 2547B950 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 00 F0
+
+==== BB 1223 (0x2547B968) approx BBs exec'd 0 ====
+
+	0x2547B968:  815F0050  lwz r10,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547B96C:  2C8A0000  cmpi cr1,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B970:  40860010  bc 4,6,0x2547B980
+	   9: Jc06o       	$0x2547B980
+
+
+. 0 2547B968 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 10
+
+==== BB 1224 (0x2547B980) approx BBs exec'd 0 ====
+
+	0x2547B980:  813E04F4  lwz r9,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547B984:  80C90000  lwz r6,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R6
+	   8: INCEIPL       	$4
+
+	0x2547B988:  70C90002  andi. r9,r6,0x2
+	   9: GETL       	R6, t8
+	  10: ANDL       	$0x2, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x2547B98C:  40820130  bc 4,2,0x2547BABC
+	  15: Jc02o       	$0x2547BABC
+
+
+. 0 2547B980 16
+. 81 3E 04 F4 80 C9 00 00 70 C9 00 02 40 82 01 30
+
+==== BB 1225 (0x2547B990) approx BBs exec'd 0 ====
+
+	0x2547B990:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547B994:  409A00D4  bc 4,26,0x2547BA68
+	   4: Jc26o       	$0x2547BA68
+
+
+. 0 2547B990 8
+. 2F 0A 00 00 40 9A 00 D4
+
+==== BB 1226 (0x2547BA68) approx BBs exec'd 0 ====
+
+	0x2547BA68:  818A0004  lwz r12,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0x2547BA6C:  7EE3BB78  or r3,r23,r23
+	   5: GETL       	R23, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547BA70:  811F0000  lwz r8,0(r31)
+	   8: GETL       	R31, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R8
+	  11: INCEIPL       	$4
+
+	0x2547BA74:  7F04C378  or r4,r24,r24
+	  12: GETL       	R24, t10
+	  13: PUTL       	t10, R4
+	  14: INCEIPL       	$4
+
+	0x2547BA78:  7F25CB78  or r5,r25,r25
+	  15: GETL       	R25, t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x2547BA7C:  7CE86214  add r7,r8,r12
+	  18: GETL       	R8, t14
+	  19: GETL       	R12, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R7
+	  22: INCEIPL       	$4
+
+	0x2547BA80:  7CE903A6  mtctr r7
+	  23: GETL       	R7, t18
+	  24: PUTL       	t18, CTR
+	  25: INCEIPL       	$4
+
+	0x2547BA84:  4E800421  bctrl
+	  26: MOVL       	$0x2547BA88, t20
+	  27: PUTL       	t20, LR
+	  28: GETL       	CTR, t22
+	  29: JMPo-c       	t22  ($4)
+
+
+. 0 2547BA68 32
+. 81 8A 00 04 7E E3 BB 78 81 1F 00 00 7F 04 C3 78 7F 25 CB 78 7C E8 62 14 7C E9 03 A6 4E 80 04 21
+
+==== BB 1227 (0xFFDE898) approx BBs exec'd 0 ====
+
+	0xFFDE898:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE89C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE8A0:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFDE8A4:  4800001D  bl 0xFFDE8C0
+	  14: MOVL       	$0xFFDE8A8, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFDE8C0  ($4)
+
+
+. 0 FFDE898 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 1D
+
+==== BB 1228 (0xFFDE8C0) approx BBs exec'd 0 ====
+
+	0xFFDE8C0:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE8C4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE8C8:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFDE8CC:  90010014  stw r0,20(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFFDE8D0:  48010839  bl 0xFFEF108
+	  19: MOVL       	$0xFFDE8D4, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFEF108  ($4)
+
+
+. 0 FFDE8C0 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 08 39
+
+==== BB 1229 (0xFFEF108) approx BBs exec'd 0 ====
+
+	0xFFEF108:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0xFFEF10C, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+. 0 FFEF108 4
+. 4E 80 00 21
+
+==== BB 1230 (0xFFDE8D4) approx BBs exec'd 0 ====
+
+	0xFFDE8D4:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0xFFDE8D8:  801E000C  lwz r0,12(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFFDE8DC:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFFDE8E0:  419E000C  bc 12,30,0xFFDE8EC
+	  12: Js30o       	$0xFFDE8EC
+
+
+. 0 FFDE8D4 16
+. 7F C8 02 A6 80 1E 00 0C 2F 80 00 00 41 9E 00 0C
+
+==== BB 1231 (0xFFDE8EC) approx BBs exec'd 0 ====
+
+	0xFFDE8EC:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDE8F0:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFDE8F4:  83C10008  lwz r30,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFFDE8F8:  38210010  addi r1,r1,16
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0xFFDE8FC:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+. 0 FFDE8EC 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+==== BB 1232 (0xFFDE8A8) approx BBs exec'd 0 ====
+
+	0xFFDE8A8:  48000115  bl 0xFFDE9BC
+	   0: MOVL       	$0xFFDE8AC, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFDE9BC  ($4)
+
+
+. 0 FFDE8A8 4
+. 48 00 01 15
+
+==== BB 1233 (0xFFDE9BC) approx BBs exec'd 0 ====
+
+	0xFFDE9BC:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE9C0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE9C4:  429F0005  bcl 20,31,0xFFDE9C8
+	   9: MOVL       	$0xFFDE9C8, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFDE9C8:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFDE9CC:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFDE9D0:  90010014  stw r0,20(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFDE9D4:  801EFFF0  lwz r0,-16(r30)
+	  25: GETL       	R30, t18
+	  26: ADDL       	$0xFFFFFFF0, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0xFFDE9D8:  7FC0F214  add r30,r0,r30
+	  30: GETL       	R0, t22
+	  31: GETL       	R30, t24
+	  32: ADDL       	t22, t24
+	  33: PUTL       	t24, R30
+	  34: INCEIPL       	$4
+
+	0xFFDE9DC:  807E8010  lwz r3,-32752(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFF8010, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R3
+	  39: INCEIPL       	$4
+
+	0xFFDE9E0:  80030000  lwz r0,0(r3)
+	  40: GETL       	R3, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R0
+	  43: INCEIPL       	$4
+
+	0xFFDE9E4:  2F800000  cmpi cr7,r0,0
+	  44: GETL       	R0, t34
+	  45: CMP0L       	t34, t36  (-rSo)
+	  46: ICRFL       	t36, $0x7, CR
+	  47: INCEIPL       	$4
+
+	0xFFDE9E8:  419E0018  bc 12,30,0xFFDEA00
+	  48: Js30o       	$0xFFDEA00
+
+
+. 0 FFDE9BC 48
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 90 01 00 14 80 1E FF F0 7F C0 F2 14 80 7E 80 10 80 03 00 00 2F 80 00 00 41 9E 00 18
+
+==== BB 1234 (0xFFDEA00) approx BBs exec'd 0 ====
+
+	0xFFDEA00:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDEA04:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFFDEA08:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0xFFDEA0C:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0xFFDEA10:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+. 0 FFDEA00 20
+. 80 01 00 14 83 C1 00 08 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+==== BB 1235 (0xFFDE8AC) approx BBs exec'd 0 ====
+
+	0xFFDE8AC:  48000659  bl 0xFFDEF04
+	   0: MOVL       	$0xFFDE8B0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFFDEF04  ($4)
+
+
+. 0 FFDE8AC 4
+. 48 00 06 59
+
+==== BB 1236 (0xFFDEF04) approx BBs exec'd 0 ====
+
+	0xFFDEF04:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDEF08:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDEF0C:  429F0005  bcl 20,31,0xFFDEF10
+	   9: MOVL       	$0xFFDEF10, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFDEF10:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFDEF14:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFDEF18:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFDEF1C:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFFDEF20:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFFDEF24:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xFFDEF28:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFFDEF2C:  8009FFFC  lwz r0,-4(r9)
+	  45: GETL       	R9, t34
+	  46: ADDL       	$0xFFFFFFFC, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R0
+	  49: INCEIPL       	$4
+
+	0xFFDEF30:  3BE9FFFC  addi r31,r9,-4
+	  50: GETL       	R9, t38
+	  51: ADDL       	$0xFFFFFFFC, t38
+	  52: PUTL       	t38, R31
+	  53: INCEIPL       	$4
+
+	0xFFDEF34:  48000010  b 0xFFDEF44
+	  54: JMPo       	$0xFFDEF44  ($4)
+
+
+. 0 FFDEF04 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 80 09 FF FC 3B E9 FF FC 48 00 00 10
+
+==== BB 1237 (0xFFDEF44) approx BBs exec'd 0 ====
+
+	0xFFDEF44:  2F80FFFF  cmpi cr7,r0,-1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFFDEF48:  409EFFF0  bc 4,30,0xFFDEF38
+	   5: Jc30o       	$0xFFDEF38
+
+
+. 0 FFDEF44 8
+. 2F 80 FF FF 40 9E FF F0
+
+==== BB 1238 (0xFFDEF4C) approx BBs exec'd 0 ====
+
+	0xFFDEF4C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDEF50:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFFDEF54:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFFDEF58:  7C0803A6  mtlr r0
+	  15: GETL       	R0, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFFDEF5C:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFFDEF60:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+. 0 FFDEF4C 24
+. 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1239 (0xFFDE8B0) approx BBs exec'd 0 ====
+
+	0xFFDE8B0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDE8B4:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFDE8B8:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFFDE8BC:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+. 0 FFDE8B0 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1240 (0x2547BA88) approx BBs exec'd 0 ====
+
+	0x2547BA88:  4BFFFF10  b 0x2547B998
+	   0: JMPo       	$0x2547B998  ($4)
+
+
+. 0 2547BA88 4
+. 4B FF FF 10
+
+==== BB 1241 (0x2547B998) approx BBs exec'd 0 ====
+
+	0x2547B998:  817F0084  lwz r11,132(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547B99C:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B9A0:  418600BC  bc 12,6,0x2547BA5C
+	   9: Js06o       	$0x2547BA5C
+
+
+. 0 2547B998 12
+. 81 7F 00 84 2C 8B 00 00 41 86 00 BC
+
+==== BB 1242 (0x2547BA5C) approx BBs exec'd 0 ====
+
+	0x2547BA5C:  3B800000  li r28,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R28
+	   2: INCEIPL       	$4
+
+	0x2547BA60:  939601A0  stw r28,416(r22)
+	   3: GETL       	R28, t2
+	   4: GETL       	R22, t4
+	   5: ADDL       	$0x1A0, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x2547BA64:  4BFFFD48  b 0x2547B7AC
+	   8: JMPo       	$0x2547B7AC  ($4)
+
+
+. 0 2547BA5C 12
+. 3B 80 00 00 93 96 01 A0 4B FF FD 48
+
+==== BB 1243 (0x2547B7AC) approx BBs exec'd 0 ====
+
+	0x2547B7AC:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547B7B0:  409E0240  bc 4,30,0x2547B9F0
+	   4: Jc30o       	$0x2547B9F0
+
+
+. 0 2547B7AC 8
+. 2F 9D 00 00 40 9E 02 40
+
+==== BB 1244 (0x2547B7B4) approx BBs exec'd 0 ====
+
+	0x2547B7B4:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547B7B8:  3A800001  li r20,1
+	   3: MOVL       	$0x1, t2
+	   4: PUTL       	t2, R20
+	   5: INCEIPL       	$4
+
+	0x2547B7BC:  48000789  bl 0x2547BF44
+	   6: MOVL       	$0x2547B7C0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0x2547BF44  ($4)
+
+
+. 0 2547B7B4 12
+. 38 60 00 00 3A 80 00 01 48 00 07 89
+
+==== BB 1245 (0x2547BF94) approx BBs exec'd 0 ====
+
+	0x2547BF94:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0x2547BF98:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x2547BF9C:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+. 0 2547BF94 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+==== BB 1246 (0x2547B7C0) approx BBs exec'd 0 ====
+
+	0x2547B7C0:  9283000C  stw r20,12(r3)
+	   0: GETL       	R20, t0
+	   1: GETL       	R3, t2
+	   2: ADDL       	$0xC, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B7C4:  7C761B78  or r22,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R22
+	   7: INCEIPL       	$4
+
+	0x2547B7C8:  480007D9  bl 0x2547BFA0
+	   8: MOVL       	$0x2547B7CC, t6
+	   9: PUTL       	t6, LR
+	  10: JMPo-c       	$0x2547BFA0  ($4)
+
+
+. 0 2547B7C0 12
+. 92 83 00 0C 7C 76 1B 78 48 00 07 D9
+
+==== BB 1247 (0x2547B7CC) approx BBs exec'd 0 ====
+
+	0x2547B7CC:  83BA015C  lwz r29,348(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x15C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0x2547B7D0:  2F9D0000  cmpi cr7,r29,0
+	   5: GETL       	R29, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547B7D4:  3B9DFFFF  addi r28,r29,-1
+	   9: GETL       	R29, t8
+	  10: ADDL       	$0xFFFFFFFF, t8
+	  11: PUTL       	t8, R28
+	  12: INCEIPL       	$4
+
+	0x2547B7D8:  419E0058  bc 12,30,0x2547B830
+	  13: Js30o       	$0x2547B830
+
+
+. 0 2547B7CC 16
+. 83 BA 01 5C 2F 9D 00 00 3B 9D FF FF 41 9E 00 58
+
+==== BB 1248 (0x2547B7DC) approx BBs exec'd 0 ====
+
+	0x2547B7DC:  82BA01E8  lwz r21,488(r26)
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R21
+	   4: INCEIPL       	$4
+
+	0x2547B7E0:  5780103A  rlwinm r0,r28,2,0,29
+	   5: GETL       	R28, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547B7E4:  7FF5002E  lwzx r31,r21,r0
+	   9: GETL       	R0, t6
+	  10: GETL       	R21, t8
+	  11: ADDL       	t8, t6
+	  12: LDL       	(t6), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0x2547B7E8:  801F0180  lwz r0,384(r31)
+	  15: GETL       	R31, t12
+	  16: ADDL       	$0x180, t12
+	  17: LDL       	(t12), t14
+	  18: PUTL       	t14, R0
+	  19: INCEIPL       	$4
+
+	0x2547B7EC:  74091000  andis. r9,r0,0x1000
+	  20: GETL       	R0, t16
+	  21: ANDL       	$0x10000000, t16
+	  22: PUTL       	t16, R9
+	  23: CMP0L       	t16, t18  (-rSo)
+	  24: ICRFL       	t18, $0x0, CR
+	  25: INCEIPL       	$4
+
+	0x2547B7F0:  40820034  bc 4,2,0x2547B824
+	  26: Jc02o       	$0x2547B824
+
+
+. 0 2547B7DC 24
+. 82 BA 01 E8 57 80 10 3A 7F F5 00 2E 80 1F 01 80 74 09 10 00 40 82 00 34
+
+==== BB 1249 (0x2547B7F4) approx BBs exec'd 0 ====
+
+	0x2547B7F4:  64091000  oris r9,r0,0x1000
+	   0: GETL       	R0, t0
+	   1: ORL       	$0x10000000, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0x2547B7F8:  809F0004  lwz r4,4(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547B7FC:  913F0180  stw r9,384(r31)
+	   9: GETL       	R9, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	$0x180, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547B800:  88640000  lbz r3,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDB       	(t10), t12
+	  16: PUTL       	t12, R3
+	  17: INCEIPL       	$4
+
+	0x2547B804:  2F030000  cmpi cr6,r3,0
+	  18: GETL       	R3, t14
+	  19: CMP0L       	t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x2547B808:  419A0104  bc 12,26,0x2547B90C
+	  22: Js26o       	$0x2547B90C
+
+
+. 0 2547B7F4 24
+. 64 09 10 00 80 9F 00 04 91 3F 01 80 88 64 00 00 2F 03 00 00 41 9A 01 04
+
+==== BB 1250 (0x2547B80C) approx BBs exec'd 0 ====
+
+	0x2547B80C:  815F0050  lwz r10,80(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x50, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547B810:  2C8A0000  cmpi cr1,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B814:  40860064  bc 4,6,0x2547B878
+	   9: Jc06o       	$0x2547B878
+
+
+. 0 2547B80C 12
+. 81 5F 00 50 2C 8A 00 00 40 86 00 64
+
+==== BB 1251 (0x2547B818) approx BBs exec'd 0 ====
+
+	0x2547B818:  80BF0084  lwz r5,132(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547B81C:  2F850000  cmpi cr7,r5,0
+	   5: GETL       	R5, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547B820:  409E0058  bc 4,30,0x2547B878
+	   9: Jc30o       	$0x2547B878
+
+
+. 0 2547B818 12
+. 80 BF 00 84 2F 85 00 00 40 9E 00 58
+
+==== BB 1252 (0x2547B824) approx BBs exec'd 0 ====
+
+	0x2547B824:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547B828:  3B9CFFFF  addi r28,r28,-1
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547B82C:  409EFFB0  bc 4,30,0x2547B7DC
+	   8: Jc30o       	$0x2547B7DC
+
+
+. 0 2547B824 12
+. 2F 9C 00 00 3B 9C FF FF 40 9E FF B0
+
+==== BB 1253 (0x2547B878) approx BBs exec'd 0 ====
+
+	0x2547B878:  80DE04F4  lwz r6,1268(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x4F4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0x2547B87C:  83660000  lwz r27,0(r6)
+	   5: GETL       	R6, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0x2547B880:  73690002  andi. r9,r27,0x2
+	   9: GETL       	R27, t8
+	  10: ANDL       	$0x2, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0x2547B884:  4082009C  bc 4,2,0x2547B920
+	  15: Jc02o       	$0x2547B920
+
+
+. 0 2547B878 16
+. 80 DE 04 F4 83 66 00 00 73 69 00 02 40 82 00 9C
+
+==== BB 1254 (0x2547B888) approx BBs exec'd 0 ====
+
+	0x2547B888:  2F0A0000  cmpi cr6,r10,0
+	   0: GETL       	R10, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547B88C:  409A005C  bc 4,26,0x2547B8E8
+	   4: Jc26o       	$0x2547B8E8
+
+
+. 0 2547B888 8
+. 2F 0A 00 00 40 9A 00 5C
+
+==== BB 1255 (0x2547B8E8) approx BBs exec'd 0 ====
+
+	0x2547B8E8:  810A0004  lwz r8,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547B8EC:  7EE3BB78  or r3,r23,r23
+	   5: GETL       	R23, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0x2547B8F0:  80FF0000  lwz r7,0(r31)
+	   8: GETL       	R31, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R7
+	  11: INCEIPL       	$4
+
+	0x2547B8F4:  7F25CB78  or r5,r25,r25
+	  12: GETL       	R25, t10
+	  13: PUTL       	t10, R5
+	  14: INCEIPL       	$4
+
+	0x2547B8F8:  7C874214  add r4,r7,r8
+	  15: GETL       	R7, t12
+	  16: GETL       	R8, t14
+	  17: ADDL       	t12, t14
+	  18: PUTL       	t14, R4
+	  19: INCEIPL       	$4
+
+	0x2547B8FC:  7C8903A6  mtctr r4
+	  20: GETL       	R4, t16
+	  21: PUTL       	t16, CTR
+	  22: INCEIPL       	$4
+
+	0x2547B900:  7F04C378  or r4,r24,r24
+	  23: GETL       	R24, t18
+	  24: PUTL       	t18, R4
+	  25: INCEIPL       	$4
+
+	0x2547B904:  4E800421  bctrl
+	  26: MOVL       	$0x2547B908, t20
+	  27: PUTL       	t20, LR
+	  28: GETL       	CTR, t22
+	  29: JMPo-c       	t22  ($4)
+
+
+. 0 2547B8E8 32
+. 81 0A 00 04 7E E3 BB 78 80 FF 00 00 7F 25 CB 78 7C 87 42 14 7C 89 03 A6 7F 04 C3 78 4E 80 04 21
+
+==== BB 1256 _init(0xFE9B620) approx BBs exec'd 0 ====
+
+	0xFE9B620:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE9B624:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE9B628:  4812C829  bl 0xFFC7E50
+	   9: MOVL       	$0xFE9B62C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FE9B620 12
+. 94 21 FF E0 7C 08 02 A6 48 12 C8 29
+
+==== BB 1257 (0xFFC7E50) approx BBs exec'd 0 ====
+
+	0xFFC7E50:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0xFFC7E54, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+. 0 FFC7E50 4
+. 4E 80 00 21
+
+==== BB 1258 (0xFE9B62C) approx BBs exec'd 0 ====
+
+	0xFE9B62C:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE9B630:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE9B634:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE9B638:  93A10014  stw r29,20(r1)
+	  13: GETL       	R29, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE9B63C:  7CBC2B78  or r28,r5,r5
+	  18: GETL       	R5, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFE9B640:  93E1001C  stw r31,28(r1)
+	  21: GETL       	R31, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x1C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFE9B644:  7C9D2378  or r29,r4,r4
+	  26: GETL       	R4, t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFE9B648:  817E1CE4  lwz r11,7396(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1CE4, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R11
+	  33: INCEIPL       	$4
+
+	0xFE9B64C:  7C7F1B78  or r31,r3,r3
+	  34: GETL       	R3, t26
+	  35: PUTL       	t26, R31
+	  36: INCEIPL       	$4
+
+	0xFE9B650:  811E1AA4  lwz r8,6820(r30)
+	  37: GETL       	R30, t28
+	  38: ADDL       	$0x1AA4, t28
+	  39: LDL       	(t28), t30
+	  40: PUTL       	t30, R8
+	  41: INCEIPL       	$4
+
+	0xFE9B654:  39200000  li r9,0
+	  42: MOVL       	$0x0, t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFE9B658:  2F8B0000  cmpi cr7,r11,0
+	  45: GETL       	R11, t34
+	  46: CMP0L       	t34, t36  (-rSo)
+	  47: ICRFL       	t36, $0x7, CR
+	  48: INCEIPL       	$4
+
+	0xFE9B65C:  90010024  stw r0,36(r1)
+	  49: GETL       	R0, t38
+	  50: GETL       	R1, t40
+	  51: ADDL       	$0x24, t40
+	  52: STL       	t38, (t40)
+	  53: INCEIPL       	$4
+
+	0xFE9B660:  7D0A4378  or r10,r8,r8
+	  54: GETL       	R8, t42
+	  55: PUTL       	t42, R10
+	  56: INCEIPL       	$4
+
+	0xFE9B664:  419E0010  bc 12,30,0xFE9B674
+	  57: Js30o       	$0xFE9B674
+
+
+. 0 FE9B62C 60
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 7C BC 2B 78 93 E1 00 1C 7C 9D 23 78 81 7E 1C E4 7C 7F 1B 78 81 1E 1A A4 39 20 00 00 2F 8B 00 00 90 01 00 24 7D 0A 43 78 41 9E 00 10
+
+==== BB 1259 (0xFE9B674) approx BBs exec'd 0 ====
+
+	0xFE9B674:  912A0000  stw r9,0(r10)
+	   0: GETL       	R9, t0
+	   1: GETL       	R10, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFE9B678:  80880000  lwz r4,0(r8)
+	   4: GETL       	R8, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R4
+	   7: INCEIPL       	$4
+
+	0xFE9B67C:  2C840000  cmpi cr1,r4,0
+	   8: GETL       	R4, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0xFE9B680:  40860024  bc 4,6,0xFE9B6A4
+	  12: Jc06o       	$0xFE9B6A4
+
+
+. 0 FE9B674 16
+. 91 2A 00 00 80 88 00 00 2C 84 00 00 40 86 00 24
+
+==== BB 1260 (0xFE9B684) approx BBs exec'd 0 ====
+
+	0xFE9B684:  813E1BF4  lwz r9,7156(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BF4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFE9B688:  817E1BC8  lwz r11,7112(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1BC8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R11
+	   9: INCEIPL       	$4
+
+	0xFE9B68C:  81490000  lwz r10,0(r9)
+	  10: GETL       	R9, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0xFE9B690:  80AB0034  lwz r5,52(r11)
+	  14: GETL       	R11, t12
+	  15: ADDL       	$0x34, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R5
+	  18: INCEIPL       	$4
+
+	0xFE9B694:  7D435378  or r3,r10,r10
+	  19: GETL       	R10, t16
+	  20: PUTL       	t16, R3
+	  21: INCEIPL       	$4
+
+	0xFE9B698:  7F055000  cmp cr6,r5,r10
+	  22: GETL       	R5, t18
+	  23: GETL       	R10, t20
+	  24: CMPL       	t18, t20, t22  (-rSo)
+	  25: ICRFL       	t22, $0x6, CR
+	  26: INCEIPL       	$4
+
+	0xFE9B69C:  419A0008  bc 12,26,0xFE9B6A4
+	  27: Js26o       	$0xFE9B6A4
+
+
+. 0 FE9B684 28
+. 81 3E 1B F4 81 7E 1B C8 81 49 00 00 80 AB 00 34 7D 43 53 78 7F 05 50 00 41 9A 00 08
+
+==== BB 1261 (0xFE9B6A4) approx BBs exec'd 0 ====
+
+	0xFE9B6A4:  819E1A8C  lwz r12,6796(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1A8C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R12
+	   4: INCEIPL       	$4
+
+	0xFE9B6A8:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFE9B6AC:  811E1DD4  lwz r8,7636(r30)
+	   8: GETL       	R30, t6
+	   9: ADDL       	$0x1DD4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0xFE9B6B0:  7FA4EB78  or r4,r29,r29
+	  13: GETL       	R29, t10
+	  14: PUTL       	t10, R4
+	  15: INCEIPL       	$4
+
+	0xFE9B6B4:  80FE1B84  lwz r7,7044(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x1B84, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R7
+	  20: INCEIPL       	$4
+
+	0xFE9B6B8:  7F85E378  or r5,r28,r28
+	  21: GETL       	R28, t16
+	  22: PUTL       	t16, R5
+	  23: INCEIPL       	$4
+
+	0xFE9B6BC:  93EC0000  stw r31,0(r12)
+	  24: GETL       	R31, t18
+	  25: GETL       	R12, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0xFE9B6C0:  93A80000  stw r29,0(r8)
+	  28: GETL       	R29, t22
+	  29: GETL       	R8, t24
+	  30: STL       	t22, (t24)
+	  31: INCEIPL       	$4
+
+	0xFE9B6C4:  93870000  stw r28,0(r7)
+	  32: GETL       	R28, t26
+	  33: GETL       	R7, t28
+	  34: STL       	t26, (t28)
+	  35: INCEIPL       	$4
+
+	0xFE9B6C8:  480B6421  bl 0xFF51AE8
+	  36: MOVL       	$0xFE9B6CC, t30
+	  37: PUTL       	t30, LR
+	  38: JMPo-c       	$0xFF51AE8  ($4)
+
+
+. 0 FE9B6A4 40
+. 81 9E 1A 8C 7F E3 FB 78 81 1E 1D D4 7F A4 EB 78 80 FE 1B 84 7F 85 E3 78 93 EC 00 00 93 A8 00 00 93 87 00 00 48 0B 64 21
+
+==== BB 1262 __init_misc(0xFF51AE8) approx BBs exec'd 0 ====
+
+	0xFF51AE8:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF51AEC:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFF51AF0:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFF51AF4:  7C9D2379  or. r29,r4,r4
+	  14: GETL       	R4, t10
+	  15: PUTL       	t10, R29
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x0, CR
+	  18: INCEIPL       	$4
+
+	0xFF51AF8:  48076359  bl 0xFFC7E50
+	  19: MOVL       	$0xFF51AFC, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FF51AE8 20
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 7C 9D 23 79 48 07 63 59
+
+==== BB 1263 (0xFF51AFC) approx BBs exec'd 0 ====
+
+	0xFF51AFC:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF51B00:  93E1001C  stw r31,28(r1)
+	   5: GETL       	R31, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x1C, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFF51B04:  7FC802A6  mflr r30
+	  10: GETL       	LR, t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0xFF51B08:  90010024  stw r0,36(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x24, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF51B0C:  4182003C  bc 12,2,0xFF51B48
+	  18: Js02o       	$0xFF51B48
+
+
+. 0 FF51AFC 20
+. 93 C1 00 18 93 E1 00 1C 7F C8 02 A6 90 01 00 24 41 82 00 3C
+
+==== BB 1264 (0xFF51B10) approx BBs exec'd 0 ====
+
+	0xFF51B10:  83FD0000  lwz r31,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R31
+	   3: INCEIPL       	$4
+
+	0xFF51B14:  3880002F  li r4,47
+	   4: MOVL       	$0x2F, t4
+	   5: PUTL       	t4, R4
+	   6: INCEIPL       	$4
+
+	0xFF51B18:  2F9F0000  cmpi cr7,r31,0
+	   7: GETL       	R31, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x7, CR
+	  10: INCEIPL       	$4
+
+	0xFF51B1C:  7FE3FB78  or r3,r31,r31
+	  11: GETL       	R31, t10
+	  12: PUTL       	t10, R3
+	  13: INCEIPL       	$4
+
+	0xFF51B20:  419E0028  bc 12,30,0xFF51B48
+	  14: Js30o       	$0xFF51B48
+
+
+. 0 FF51B10 20
+. 83 FD 00 00 38 80 00 2F 2F 9F 00 00 7F E3 FB 78 41 9E 00 28
+
+==== BB 1265 (0xFF51B24) approx BBs exec'd 0 ====
+
+	0xFF51B24:  4BFA5459  bl 0xFEF6F7C
+	   0: MOVL       	$0xFF51B28, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFEF6F7C  ($4)
+
+
+. 0 FF51B24 4
+. 4B FA 54 59
+
+==== BB 1266 strrchr(0xFEF6F7C) approx BBs exec'd 0 ====
+
+	0xFEF6F7C:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEF6F80:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEF6F84:  93A10014  stw r29,20(r1)
+	   9: GETL       	R29, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEF6F88:  549D063E  rlwinm r29,r4,0,24,31
+	  14: GETL       	R4, t10
+	  15: ANDL       	$0xFF, t10
+	  16: PUTL       	t10, R29
+	  17: INCEIPL       	$4
+
+	0xFEF6F8C:  2F9D0000  cmpi cr7,r29,0
+	  18: GETL       	R29, t12
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x7, CR
+	  21: INCEIPL       	$4
+
+	0xFEF6F90:  93C10018  stw r30,24(r1)
+	  22: GETL       	R30, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x18, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0xFEF6F94:  90010024  stw r0,36(r1)
+	  27: GETL       	R0, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x24, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFEF6F98:  7C601B78  or r0,r3,r3
+	  32: GETL       	R3, t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFEF6F9C:  93E1001C  stw r31,28(r1)
+	  35: GETL       	R31, t26
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: STL       	t26, (t28)
+	  39: INCEIPL       	$4
+
+	0xFEF6FA0:  409E0024  bc 4,30,0xFEF6FC4
+	  40: Jc30o       	$0xFEF6FC4
+
+
+. 0 FEF6F7C 40
+. 94 21 FF E0 7C 08 02 A6 93 A1 00 14 54 9D 06 3E 2F 9D 00 00 93 C1 00 18 90 01 00 24 7C 60 1B 78 93 E1 00 1C 40 9E 00 24
+
+==== BB 1267 (0xFEF6FC4) approx BBs exec'd 0 ====
+
+	0xFEF6FC4:  3BE00000  li r31,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFEF6FC8:  4800000C  b 0xFEF6FD4
+	   3: JMPo       	$0xFEF6FD4  ($4)
+
+
+. 0 FEF6FC4 8
+. 3B E0 00 00 48 00 00 0C
+
+==== BB 1268 (0xFEF6FD4) approx BBs exec'd 0 ====
+
+	0xFEF6FD4:  7C030378  or r3,r0,r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEF6FD8:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFEF6FDC:  4BFFF2F1  bl 0xFEF62CC
+	   6: MOVL       	$0xFEF6FE0, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFEF62CC  ($4)
+
+
+. 0 FEF6FD4 12
+. 7C 03 03 78 7F A4 EB 78 4B FF F2 F1
+
+==== BB 1269 strchr(0xFEF62CC) approx BBs exec'd 0 ====
+
+	0xFEF62CC:  5084442E  rlwimi r4,r4,8,16,23
+	   0: GETL       	R4, t0
+	   1: GETL       	R4, t2
+	   2: ROLL       	$0x8, t2
+	   3: ANDL       	$0xFF00, t2
+	   4: ANDL       	$0xFFFF00FF, t0
+	   5: ORL       	t0, t2
+	   6: PUTL       	t2, R4
+	   7: INCEIPL       	$4
+
+	0xFEF62D0:  3960FFFF  li r11,-1
+	   8: MOVL       	$0xFFFFFFFF, t4
+	   9: PUTL       	t4, R11
+	  10: INCEIPL       	$4
+
+	0xFEF62D4:  5084801E  rlwimi r4,r4,16,0,15
+	  11: GETL       	R4, t6
+	  12: GETL       	R4, t8
+	  13: ROLL       	$0x10, t8
+	  14: ANDL       	$0xFFFF0000, t8
+	  15: ANDL       	$0xFFFF, t6
+	  16: ORL       	t6, t8
+	  17: PUTL       	t8, R4
+	  18: INCEIPL       	$4
+
+	0xFEF62D8:  546A1EF8  rlwinm r10,r3,3,27,28
+	  19: GETL       	R3, t10
+	  20: ROLL       	$0x3, t10
+	  21: ANDL       	$0x18, t10
+	  22: PUTL       	t10, R10
+	  23: INCEIPL       	$4
+
+	0xFEF62DC:  3CC0FEFF  lis r6,-257
+	  24: MOVL       	$0xFEFF0000, t12
+	  25: PUTL       	t12, R6
+	  26: INCEIPL       	$4
+
+	0xFEF62E0:  3CE07F7F  lis r7,32639
+	  27: MOVL       	$0x7F7F0000, t14
+	  28: PUTL       	t14, R7
+	  29: INCEIPL       	$4
+
+	0xFEF62E4:  5468003A  rlwinm r8,r3,0,0,29
+	  30: GETL       	R3, t16
+	  31: ANDL       	$0xFFFFFFFC, t16
+	  32: PUTL       	t16, R8
+	  33: INCEIPL       	$4
+
+	0xFEF62E8:  38C6FEFF  addi r6,r6,-257
+	  34: MOVL       	$0xFEFEFEFF, t18
+	  35: PUTL       	t18, R6
+	  36: INCEIPL       	$4
+
+	0xFEF62EC:  38E77F7F  addi r7,r7,32639
+	  37: MOVL       	$0x7F7F7F7F, t20
+	  38: PUTL       	t20, R7
+	  39: INCEIPL       	$4
+
+	0xFEF62F0:  80A80000  lwz r5,0(r8)
+	  40: GETL       	R8, t22
+	  41: LDL       	(t22), t24
+	  42: PUTL       	t24, R5
+	  43: INCEIPL       	$4
+
+	0xFEF62F4:  7D6B5430  srw r11,r11,r10
+	  44: GETL       	R11, t28
+	  45: GETL       	R10, t26
+	  46: SHRL       	t26, t28
+	  47: PUTL       	t28, R11
+	  48: INCEIPL       	$4
+
+	0xFEF62F8:  7CA55B38  orc r5,r5,r11
+	  49: GETL       	R5, t30
+	  50: GETL       	R11, t32
+	  51: NOTL       	t32
+	  52: ORL       	t30, t32
+	  53: PUTL       	t32, R5
+	  54: INCEIPL       	$4
+
+	0xFEF62FC:  7C062A14  add r0,r6,r5
+	  55: GETL       	R6, t34
+	  56: GETL       	R5, t36
+	  57: ADDL       	t34, t36
+	  58: PUTL       	t36, R0
+	  59: INCEIPL       	$4
+
+	0xFEF6300:  7CE928F8  nor r9,r7,r5
+	  60: GETL       	R7, t38
+	  61: GETL       	R5, t40
+	  62: ORL       	t40, t38
+	  63: NOTL       	t38
+	  64: PUTL       	t38, R9
+	  65: INCEIPL       	$4
+
+	0xFEF6304:  7C004839  and. r0,r0,r9
+	  66: GETL       	R0, t42
+	  67: GETL       	R9, t44
+	  68: ANDL       	t42, t44
+	  69: PUTL       	t44, R0
+	  70: CMP0L       	t44, t46  (-rSo)
+	  71: ICRFL       	t46, $0x0, CR
+	  72: INCEIPL       	$4
+
+	0xFEF6308:  7C8C2A78  xor r12,r4,r5
+	  73: GETL       	R4, t48
+	  74: GETL       	R5, t50
+	  75: XORL       	t48, t50
+	  76: PUTL       	t50, R12
+	  77: INCEIPL       	$4
+
+	0xFEF630C:  7D8C5B38  orc r12,r12,r11
+	  78: GETL       	R12, t52
+	  79: GETL       	R11, t54
+	  80: NOTL       	t54
+	  81: ORL       	t52, t54
+	  82: PUTL       	t54, R12
+	  83: INCEIPL       	$4
+
+	0xFEF6310:  48000020  b 0xFEF6330
+	  84: JMPo       	$0xFEF6330  ($4)
+
+
+. 0 FEF62CC 72
+. 50 84 44 2E 39 60 FF FF 50 84 80 1E 54 6A 1E F8 3C C0 FE FF 3C E0 7F 7F 54 68 00 3A 38 C6 FE FF 38 E7 7F 7F 80 A8 00 00 7D 6B 54 30 7C A5 5B 38 7C 06 2A 14 7C E9 28 F8 7C 00 48 39 7C 8C 2A 78 7D 8C 5B 38 48 00 00 20
+
+==== BB 1270 (0xFEF6330) approx BBs exec'd 0 ====
+
+	0xFEF6330:  7C066214  add r0,r6,r12
+	   0: GETL       	R6, t0
+	   1: GETL       	R12, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEF6334:  7CE960F8  nor r9,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: NOTL       	t4
+	   9: PUTL       	t4, R9
+	  10: INCEIPL       	$4
+
+	0xFEF6338:  4182FFDC  bc 12,2,0xFEF6314
+	  11: Js02o       	$0xFEF6314
+
+
+. 0 FEF6330 12
+. 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+==== BB 1271 (0xFEF6314) approx BBs exec'd 0 ====
+
+	0xFEF6314:  84A80004  lwzu r5,4(r8)
+	   0: GETL       	R8, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R8
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFEF6318:  7C004839  and. r0,r0,r9
+	   6: GETL       	R0, t4
+	   7: GETL       	R9, t6
+	   8: ANDL       	t4, t6
+	   9: PUTL       	t6, R0
+	  10: CMP0L       	t6, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFEF631C:  7C062A14  add r0,r6,r5
+	  13: GETL       	R6, t10
+	  14: GETL       	R5, t12
+	  15: ADDL       	t10, t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0xFEF6320:  7CE928F8  nor r9,r7,r5
+	  18: GETL       	R7, t14
+	  19: GETL       	R5, t16
+	  20: ORL       	t16, t14
+	  21: NOTL       	t14
+	  22: PUTL       	t14, R9
+	  23: INCEIPL       	$4
+
+	0xFEF6324:  4082005C  bc 4,2,0xFEF6380
+	  24: Jc02o       	$0xFEF6380
+
+
+. 0 FEF6314 20
+. 84 A8 00 04 7C 00 48 39 7C 06 2A 14 7C E9 28 F8 40 82 00 5C
+
+==== BB 1272 (0xFEF6380) approx BBs exec'd 0 ====
+
+	0xFEF6380:  7CE06038  and r0,r7,r12
+	   0: GETL       	R7, t0
+	   1: GETL       	R12, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFEF6384:  7CEA6378  or r10,r7,r12
+	   5: GETL       	R7, t4
+	   6: GETL       	R12, t6
+	   7: ORL       	t6, t4
+	   8: PUTL       	t4, R10
+	   9: INCEIPL       	$4
+
+	0xFEF6388:  7C003A14  add r0,r0,r7
+	  10: GETL       	R0, t8
+	  11: GETL       	R7, t10
+	  12: ADDL       	t8, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFEF638C:  7D4900F8  nor r9,r10,r0
+	  15: GETL       	R10, t12
+	  16: GETL       	R0, t14
+	  17: ORL       	t14, t12
+	  18: NOTL       	t12
+	  19: PUTL       	t12, R9
+	  20: INCEIPL       	$4
+
+	0xFEF6390:  7D240034  cntlzw r4,r9
+	  21: GETL       	R9, t16
+	  22: CNTLZL       	t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0xFEF6394:  3908FFFC  addi r8,r8,-4
+	  25: GETL       	R8, t18
+	  26: ADDL       	$0xFFFFFFFC, t18
+	  27: PUTL       	t18, R8
+	  28: INCEIPL       	$4
+
+	0xFEF6398:  5484E8FE  rlwinm r4,r4,29,3,31
+	  29: GETL       	R4, t20
+	  30: SHRL       	$0x3, t20
+	  31: PUTL       	t20, R4
+	  32: INCEIPL       	$4
+
+	0xFEF639C:  7C682214  add r3,r8,r4
+	  33: GETL       	R8, t22
+	  34: GETL       	R4, t24
+	  35: ADDL       	t22, t24
+	  36: PUTL       	t24, R3
+	  37: INCEIPL       	$4
+
+	0xFEF63A0:  4E800020  blr
+	  38: GETL       	LR, t26
+	  39: JMPo-r       	t26  ($4)
+
+
+. 0 FEF6380 36
+. 7C E0 60 38 7C EA 63 78 7C 00 3A 14 7D 49 00 F8 7D 24 00 34 39 08 FF FC 54 84 E8 FE 7C 68 22 14 4E 80 00 20
+
+==== BB 1273 (0xFEF6FE0) approx BBs exec'd 0 ====
+
+	0xFEF6FE0:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEF6FE4:  4082FFE8  bc 4,2,0xFEF6FCC
+	   4: Jc02o       	$0xFEF6FCC
+
+
+. 0 FEF6FE0 8
+. 2C 03 00 00 40 82 FF E8
+
+==== BB 1274 (0xFEF6FCC) approx BBs exec'd 0 ====
+
+	0xFEF6FCC:  7C7F1B78  or r31,r3,r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0xFEF6FD0:  38030001  addi r0,r3,1
+	   3: GETL       	R3, t2
+	   4: ADDL       	$0x1, t2
+	   5: PUTL       	t2, R0
+	   6: INCEIPL       	$4
+
+	0xFEF6FD4:  7C030378  or r3,r0,r0
+	   7: GETL       	R0, t4
+	   8: PUTL       	t4, R3
+	   9: INCEIPL       	$4
+
+	0xFEF6FD8:  7FA4EB78  or r4,r29,r29
+	  10: GETL       	R29, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0xFEF6FDC:  4BFFF2F1  bl 0xFEF62CC
+	  13: MOVL       	$0xFEF6FE0, t8
+	  14: PUTL       	t8, LR
+	  15: JMPo-c       	$0xFEF62CC  ($4)
+
+
+. 0 FEF6FCC 20
+. 7C 7F 1B 78 38 03 00 01 7C 03 03 78 7F A4 EB 78 4B FF F2 F1
+
+==== BB 1275 (0xFEF6328) approx BBs exec'd 0 ====
+
+	0xFEF6328:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFEF632C:  7C8C2A78  xor r12,r4,r5
+	   7: GETL       	R4, t6
+	   8: GETL       	R5, t8
+	   9: XORL       	t6, t8
+	  10: PUTL       	t8, R12
+	  11: INCEIPL       	$4
+
+	0xFEF6330:  7C066214  add r0,r6,r12
+	  12: GETL       	R6, t10
+	  13: GETL       	R12, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xFEF6334:  7CE960F8  nor r9,r7,r12
+	  17: GETL       	R7, t14
+	  18: GETL       	R12, t16
+	  19: ORL       	t16, t14
+	  20: NOTL       	t14
+	  21: PUTL       	t14, R9
+	  22: INCEIPL       	$4
+
+	0xFEF6338:  4182FFDC  bc 12,2,0xFEF6314
+	  23: Js02o       	$0xFEF6314
+
+
+. 0 FEF6328 20
+. 7C 00 48 39 7C 8C 2A 78 7C 06 62 14 7C E9 60 F8 41 82 FF DC
+
+==== BB 1276 (0xFEF633C) approx BBs exec'd 0 ====
+
+	0xFEF633C:  7C004839  and. r0,r0,r9
+	   0: GETL       	R0, t0
+	   1: GETL       	R9, t2
+	   2: ANDL       	t0, t2
+	   3: PUTL       	t2, R0
+	   4: CMP0L       	t2, t4  (-rSo)
+	   5: ICRFL       	t4, $0x0, CR
+	   6: INCEIPL       	$4
+
+	0xFEF6340:  38600000  li r3,0
+	   7: MOVL       	$0x0, t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0xFEF6344:  4D820020  bclr 12,2
+	  10: GETL       	LR, t8
+	  11: Js02o-r       	t8
+
+
+. 0 FEF633C 12
+. 7C 00 48 39 38 60 00 00 4D 82 00 20
+
+==== BB 1277 (0xFEF6FE8) approx BBs exec'd 0 ====
+
+	0xFEF6FE8:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEF6FEC:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEF6FF0:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFEF6FF4:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xFEF6FF8:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFEF6FFC:  83E1001C  lwz r31,28(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0xFEF7000:  38210020  addi r1,r1,32
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x20, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0xFEF7004:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+. 0 FEF6FE8 32
+. 80 81 00 24 7F E3 FB 78 83 A1 00 14 83 C1 00 18 7C 88 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1278 (0xFF51B28) approx BBs exec'd 0 ====
+
+	0xFF51B28:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFF51B2C:  38630001  addi r3,r3,1
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x1, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFF51B30:  41860034  bc 12,6,0xFF51B64
+	   8: Js06o       	$0xFF51B64
+
+
+. 0 FF51B28 12
+. 2C 83 00 00 38 63 00 01 41 86 00 34
+
+==== BB 1279 (0xFF51B34) approx BBs exec'd 0 ====
+
+	0xFF51B34:  809E1C34  lwz r4,7220(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1C34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFF51B38:  90640000  stw r3,0(r4)
+	   5: GETL       	R3, t4
+	   6: GETL       	R4, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFF51B3C:  80BD0000  lwz r5,0(r29)
+	   9: GETL       	R29, t8
+	  10: LDL       	(t8), t10
+	  11: PUTL       	t10, R5
+	  12: INCEIPL       	$4
+
+	0xFF51B40:  807E1E24  lwz r3,7716(r30)
+	  13: GETL       	R30, t12
+	  14: ADDL       	$0x1E24, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R3
+	  17: INCEIPL       	$4
+
+	0xFF51B44:  90A30000  stw r5,0(r3)
+	  18: GETL       	R5, t16
+	  19: GETL       	R3, t18
+	  20: STL       	t16, (t18)
+	  21: INCEIPL       	$4
+
+	0xFF51B48:  80C10024  lwz r6,36(r1)
+	  22: GETL       	R1, t20
+	  23: ADDL       	$0x24, t20
+	  24: LDL       	(t20), t22
+	  25: PUTL       	t22, R6
+	  26: INCEIPL       	$4
+
+	0xFF51B4C:  83A10014  lwz r29,20(r1)
+	  27: GETL       	R1, t24
+	  28: ADDL       	$0x14, t24
+	  29: LDL       	(t24), t26
+	  30: PUTL       	t26, R29
+	  31: INCEIPL       	$4
+
+	0xFF51B50:  83C10018  lwz r30,24(r1)
+	  32: GETL       	R1, t28
+	  33: ADDL       	$0x18, t28
+	  34: LDL       	(t28), t30
+	  35: PUTL       	t30, R30
+	  36: INCEIPL       	$4
+
+	0xFF51B54:  7CC803A6  mtlr r6
+	  37: GETL       	R6, t32
+	  38: PUTL       	t32, LR
+	  39: INCEIPL       	$4
+
+	0xFF51B58:  83E1001C  lwz r31,28(r1)
+	  40: GETL       	R1, t34
+	  41: ADDL       	$0x1C, t34
+	  42: LDL       	(t34), t36
+	  43: PUTL       	t36, R31
+	  44: INCEIPL       	$4
+
+	0xFF51B5C:  38210020  addi r1,r1,32
+	  45: GETL       	R1, t38
+	  46: ADDL       	$0x20, t38
+	  47: PUTL       	t38, R1
+	  48: INCEIPL       	$4
+
+	0xFF51B60:  4E800020  blr
+	  49: GETL       	LR, t40
+	  50: JMPo-r       	t40  ($4)
+
+
+. 0 FF51B34 48
+. 80 9E 1C 34 90 64 00 00 80 BD 00 00 80 7E 1E 24 90 A3 00 00 80 C1 00 24 83 A1 00 14 83 C1 00 18 7C C8 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1280 (0xFE9B6CC) approx BBs exec'd 0 ====
+
+	0xFE9B6CC:  4BFFFE95  bl 0xFE9B560
+	   0: MOVL       	$0xFE9B6D0, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFE9B560  ($4)
+
+
+. 0 FE9B6CC 4
+. 4B FF FE 95
+
+==== BB 1281 __libc_global_ctors(0xFE9B560) approx BBs exec'd 0 ====
+
+	0xFE9B560:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE9B564:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE9B568:  4812C8E9  bl 0xFFC7E50
+	   9: MOVL       	$0xFE9B56C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FE9B560 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 E9
+
+==== BB 1282 (0xFE9B56C) approx BBs exec'd 0 ====
+
+	0xFE9B56C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE9B570:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE9B574:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE9B578:  90010014  stw r0,20(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE9B57C:  813E0018  lwz r9,24(r30)
+	  18: GETL       	R30, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R9
+	  22: INCEIPL       	$4
+
+	0xFE9B580:  80090004  lwz r0,4(r9)
+	  23: GETL       	R9, t18
+	  24: ADDL       	$0x4, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0xFE9B584:  3BE90004  addi r31,r9,4
+	  28: GETL       	R9, t22
+	  29: ADDL       	$0x4, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0xFE9B588:  2F800000  cmpi cr7,r0,0
+	  32: GETL       	R0, t24
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0xFE9B58C:  419E0018  bc 12,30,0xFE9B5A4
+	  36: Js30o       	$0xFE9B5A4
+
+
+. 0 FE9B56C 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 18 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+
+==== BB 1283 (0xFE9B590) approx BBs exec'd 0 ====
+
+	0xFE9B590:  7C0903A6  mtctr r0
+	   0: GETL       	R0, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0xFE9B594:  4E800421  bctrl
+	   3: MOVL       	$0xFE9B598, t2
+	   4: PUTL       	t2, LR
+	   5: GETL       	CTR, t4
+	   6: JMPo-c       	t4  ($4)
+
+
+. 0 FE9B590 8
+. 7C 09 03 A6 4E 80 04 21
+
+==== BB 1284 _IO_check_libio(0xFEED850) approx BBs exec'd 0 ====
+
+	0xFEED850:  7D8802A6  mflr r12
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R12
+	   2: INCEIPL       	$4
+
+	0xFEED854:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEED858:  480DA5F9  bl 0xFFC7E50
+	   9: MOVL       	$0xFEED85C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FEED850 12
+. 7D 88 02 A6 94 21 FF F0 48 0D A5 F9
+
+==== BB 1285 (0xFEED85C) approx BBs exec'd 0 ====
+
+	0xFEED85C:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEED860:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEED864:  7D8803A6  mtlr r12
+	   8: GETL       	R12, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFEED868:  801E1DC8  lwz r0,7624(r30)
+	  11: GETL       	R30, t8
+	  12: ADDL       	$0x1DC8, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R0
+	  15: INCEIPL       	$4
+
+	0xFEED86C:  811E1B00  lwz r8,6912(r30)
+	  16: GETL       	R30, t12
+	  17: ADDL       	$0x1B00, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R8
+	  20: INCEIPL       	$4
+
+	0xFEED870:  2F800000  cmpi cr7,r0,0
+	  21: GETL       	R0, t16
+	  22: CMP0L       	t16, t18  (-rSo)
+	  23: ICRFL       	t18, $0x7, CR
+	  24: INCEIPL       	$4
+
+	0xFEED874:  815E1BCC  lwz r10,7116(r30)
+	  25: GETL       	R30, t20
+	  26: ADDL       	$0x1BCC, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R10
+	  29: INCEIPL       	$4
+
+	0xFEED878:  80FE1AE8  lwz r7,6888(r30)
+	  30: GETL       	R30, t24
+	  31: ADDL       	$0x1AE8, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R7
+	  34: INCEIPL       	$4
+
+	0xFEED87C:  3800FFB4  li r0,-76
+	  35: MOVL       	$0xFFFFFFB4, t28
+	  36: PUTL       	t28, R0
+	  37: INCEIPL       	$4
+
+	0xFEED880:  419E0010  bc 12,30,0xFEED890
+	  38: Js30o       	$0xFEED890
+
+
+. 0 FEED85C 40
+. 93 C1 00 08 7F C8 02 A6 7D 88 03 A6 80 1E 1D C8 81 1E 1B 00 2F 80 00 00 81 5E 1B CC 80 FE 1A E8 38 00 FF B4 41 9E 00 10
+
+==== BB 1286 (0xFEED884) approx BBs exec'd 0 ====
+
+	0xFEED884:  83C10008  lwz r30,8(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R30
+	   4: INCEIPL       	$4
+
+	0xFEED888:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0xFEED88C:  4E800020  blr
+	   9: GETL       	LR, t6
+	  10: JMPo-r       	t6  ($4)
+
+
+. 0 FEED884 12
+. 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+==== BB 1287 (0xFE9B598) approx BBs exec'd 0 ====
+
+	0xFE9B598:  841F0004  lwzu r0,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R31
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFE9B59C:  2F800000  cmpi cr7,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x7, CR
+	   9: INCEIPL       	$4
+
+	0xFE9B5A0:  409EFFF0  bc 4,30,0xFE9B590
+	  10: Jc30o       	$0xFE9B590
+
+
+. 0 FE9B598 12
+. 84 1F 00 04 2F 80 00 00 40 9E FF F0
+
+==== BB 1288 (0xFE9B5A4) approx BBs exec'd 0 ====
+
+	0xFE9B5A4:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE9B5A8:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFE9B5AC:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFE9B5B0:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFE9B5B4:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFE9B5B8:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+. 0 FE9B5A4 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1289 (0xFE9B6D0) approx BBs exec'd 0 ====
+
+	0xFE9B6D0:  80C10024  lwz r6,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFE9B6D4:  83810010  lwz r28,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0xFE9B6D8:  83A10014  lwz r29,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0xFE9B6DC:  7CC803A6  mtlr r6
+	  15: GETL       	R6, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFE9B6E0:  83C10018  lwz r30,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0xFE9B6E4:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0xFE9B6E8:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0xFE9B6EC:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+. 0 FE9B6D0 32
+. 80 C1 00 24 83 81 00 10 83 A1 00 14 7C C8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1290 (0x2547B908) approx BBs exec'd 0 ====
+
+	0x2547B908:  4BFFFF88  b 0x2547B890
+	   0: JMPo       	$0x2547B890  ($4)
+
+
+. 0 2547B908 4
+. 4B FF FF 88
+
+==== BB 1291 (0x2547B890) approx BBs exec'd 0 ====
+
+	0x2547B890:  817F0084  lwz r11,132(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x84, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547B894:  2C8B0000  cmpi cr1,r11,0
+	   5: GETL       	R11, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547B898:  41A6FF8C  bc 13,6,0x2547B824
+	   9: Js06o       	$0x2547B824
+
+
+. 0 2547B890 12
+. 81 7F 00 84 2C 8B 00 00 41 A6 FF 8C
+
+==== BB 1292 (0x2547B90C) approx BBs exec'd 0 ====
+
+	0x2547B90C:  7520C000  andis. r0,r9,0xC000
+	   0: GETL       	R9, t0
+	   1: ANDL       	$0xC0000000, t0
+	   2: PUTL       	t0, R0
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547B910:  40A2FEFC  bc 5,2,0x2547B80C
+	   6: Jc02o       	$0x2547B80C
+
+
+. 0 2547B90C 8
+. 75 20 C0 00 40 A2 FE FC
+
+==== BB 1293 (0x2547B914) approx BBs exec'd 0 ====
+
+	0x2547B914:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547B918:  3B9CFFFF  addi r28,r28,-1
+	   4: GETL       	R28, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0x2547B91C:  4BFFFF10  b 0x2547B82C
+	   8: JMPo       	$0x2547B82C  ($4)
+
+
+. 0 2547B914 12
+. 2F 9C 00 00 3B 9C FF FF 4B FF FF 10
+
+==== BB 1294 (0x2547B82C) approx BBs exec'd 0 ====
+
+	0x2547B82C:  409EFFB0  bc 4,30,0x2547B7DC
+	   0: Jc30o       	$0x2547B7DC
+
+
+. 0 2547B82C 4
+. 40 9E FF B0
+
+==== BB 1295 (0x2547B830) approx BBs exec'd 0 ====
+
+	0x2547B830:  82E10044  lwz r23,68(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x44, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x2547B834:  3B000000  li r24,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R24
+	   7: INCEIPL       	$4
+
+	0x2547B838:  83C10038  lwz r30,56(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x38, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x2547B83C:  7EE803A6  mtlr r23
+	  13: GETL       	R23, t10
+	  14: PUTL       	t10, LR
+	  15: INCEIPL       	$4
+
+	0x2547B840:  83E1003C  lwz r31,60(r1)
+	  16: GETL       	R1, t12
+	  17: ADDL       	$0x3C, t12
+	  18: LDL       	(t12), t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0x2547B844:  9316000C  stw r24,12(r22)
+	  21: GETL       	R24, t16
+	  22: GETL       	R22, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0x2547B848:  82810010  lwz r20,16(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x10, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R20
+	  30: INCEIPL       	$4
+
+	0x2547B84C:  82A10014  lwz r21,20(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x14, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R21
+	  35: INCEIPL       	$4
+
+	0x2547B850:  82C10018  lwz r22,24(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x18, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R22
+	  40: INCEIPL       	$4
+
+	0x2547B854:  82E1001C  lwz r23,28(r1)
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x1C, t32
+	  43: LDL       	(t32), t34
+	  44: PUTL       	t34, R23
+	  45: INCEIPL       	$4
+
+	0x2547B858:  83010020  lwz r24,32(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x20, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R24
+	  50: INCEIPL       	$4
+
+	0x2547B85C:  83210024  lwz r25,36(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x24, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R25
+	  55: INCEIPL       	$4
+
+	0x2547B860:  83410028  lwz r26,40(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x28, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R26
+	  60: INCEIPL       	$4
+
+	0x2547B864:  8361002C  lwz r27,44(r1)
+	  61: GETL       	R1, t48
+	  62: ADDL       	$0x2C, t48
+	  63: LDL       	(t48), t50
+	  64: PUTL       	t50, R27
+	  65: INCEIPL       	$4
+
+	0x2547B868:  83810030  lwz r28,48(r1)
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x30, t52
+	  68: LDL       	(t52), t54
+	  69: PUTL       	t54, R28
+	  70: INCEIPL       	$4
+
+	0x2547B86C:  83A10034  lwz r29,52(r1)
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x34, t56
+	  73: LDL       	(t56), t58
+	  74: PUTL       	t58, R29
+	  75: INCEIPL       	$4
+
+	0x2547B870:  38210040  addi r1,r1,64
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x40, t60
+	  78: PUTL       	t60, R1
+	  79: INCEIPL       	$4
+
+	0x2547B874:  4800072C  b 0x2547BFA0
+	  80: JMPo       	$0x2547BFA0  ($4)
+
+
+. 0 2547B830 72
+. 82 E1 00 44 3B 00 00 00 83 C1 00 38 7E E8 03 A6 83 E1 00 3C 93 16 00 0C 82 81 00 10 82 A1 00 14 82 C1 00 18 82 E1 00 1C 83 01 00 20 83 21 00 24 83 41 00 28 83 61 00 2C 83 81 00 30 83 A1 00 34 38 21 00 40 48 00 07 2C
+
+==== BB 1296 (0x2548051C) approx BBs exec'd 0 ====
+
+	0x2548051C:  807D0000  lwz r3,0(r29)
+	   0: GETL       	R29, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0x25480520:  809B0000  lwz r4,0(r27)
+	   4: GETL       	R27, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R4
+	   7: INCEIPL       	$4
+
+	0x25480524:  5465103A  rlwinm r5,r3,2,0,29
+	   8: GETL       	R3, t8
+	   9: SHLL       	$0x2, t8
+	  10: PUTL       	t8, R5
+	  11: INCEIPL       	$4
+
+	0x25480528:  7CC42A14  add r6,r4,r5
+	  12: GETL       	R4, t10
+	  13: GETL       	R5, t12
+	  14: ADDL       	t10, t12
+	  15: PUTL       	t12, R6
+	  16: INCEIPL       	$4
+
+	0x2548052C:  38A60004  addi r5,r6,4
+	  17: GETL       	R6, t14
+	  18: ADDL       	$0x4, t14
+	  19: PUTL       	t14, R5
+	  20: INCEIPL       	$4
+
+	0x25480530:  84060004  lwzu r0,4(r6)
+	  21: GETL       	R6, t16
+	  22: ADDL       	$0x4, t16
+	  23: PUTL       	t16, R6
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R0
+	  26: INCEIPL       	$4
+
+	0x25480534:  2C000000  cmpi cr0,r0,0
+	  27: GETL       	R0, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x0, CR
+	  30: INCEIPL       	$4
+
+	0x25480538:  4082FFF8  bc 4,2,0x25480530
+	  31: Jc02o       	$0x25480530
+
+
+. 0 2548051C 32
+. 80 7D 00 00 80 9B 00 00 54 65 10 3A 7C C4 2A 14 38 A6 00 04 84 06 00 04 2C 00 00 00 40 82 FF F8
+
+==== BB 1297 (0x25480530) approx BBs exec'd 0 ====
+
+	0x25480530:  84060004  lwzu r0,4(r6)
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R6
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0x25480534:  2C000000  cmpi cr0,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x25480538:  4082FFF8  bc 4,2,0x25480530
+	  10: Jc02o       	$0x25480530
+
+
+. 0 25480530 12
+. 84 06 00 04 2C 00 00 00 40 82 FF F8
+
+==== BB 1298 (0x2548053C) approx BBs exec'd 0 ====
+
+	0x2548053C:  38C60004  addi r6,r6,4
+	   0: GETL       	R6, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x25480540:  80FF04AC  lwz r7,1196(r31)
+	   4: GETL       	R31, t2
+	   5: ADDL       	$0x4AC, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R7
+	   8: INCEIPL       	$4
+
+	0x25480544:  7FC903A6  mtctr r30
+	   9: GETL       	R30, t6
+	  10: PUTL       	t6, CTR
+	  11: INCEIPL       	$4
+
+	0x25480548:  3BE00000  li r31,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R31
+	  14: INCEIPL       	$4
+
+	0x2548054C:  93E10000  stw r31,0(r1)
+	  15: GETL       	R31, t10
+	  16: GETL       	R1, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x25480550:  7FE803A6  mtlr r31
+	  19: GETL       	R31, t14
+	  20: PUTL       	t14, LR
+	  21: INCEIPL       	$4
+
+	0x25480554:  93E10004  stw r31,4(r1)
+	  22: GETL       	R31, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x4, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x25480558:  93E10008  stw r31,8(r1)
+	  27: GETL       	R31, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x8, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0x2548055C:  93E1000C  stw r31,12(r1)
+	  32: GETL       	R31, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0xC, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x25480560:  4E800420  bctr
+	  37: GETL       	CTR, t28
+	  38: JMPo       	t28  ($4)
+
+
+. 0 2548053C 40
+. 38 C6 00 04 80 FF 04 AC 7F C9 03 A6 3B E0 00 00 93 E1 00 00 7F E8 03 A6 93 E1 00 04 93 E1 00 08 93 E1 00 0C 4E 80 04 20
+
+==== BB 1299 _start(0x100002A0) approx BBs exec'd 0 ====
+
+	0x100002A0:  7C290B78  or r9,r1,r1
+	   0: GETL       	R1, t0
+	   1: PUTL       	t0, R9
+	   2: INCEIPL       	$4
+
+	0x100002A4:  54210036  rlwinm r1,r1,0,0,27
+	   3: GETL       	R1, t2
+	   4: ANDL       	$0xFFFFFFF0, t2
+	   5: PUTL       	t2, R1
+	   6: INCEIPL       	$4
+
+	0x100002A8:  38000000  li r0,0
+	   7: MOVL       	$0x0, t4
+	   8: PUTL       	t4, R0
+	   9: INCEIPL       	$4
+
+	0x100002AC:  9421FFF0  stwu r1,-16(r1)
+	  10: GETL       	R1, t6
+	  11: GETL       	R1, t8
+	  12: ADDL       	$0xFFFFFFF0, t8
+	  13: PUTL       	t8, R1
+	  14: STL       	t6, (t8)
+	  15: INCEIPL       	$4
+
+	0x100002B0:  7C0803A6  mtlr r0
+	  16: GETL       	R0, t10
+	  17: PUTL       	t10, LR
+	  18: INCEIPL       	$4
+
+	0x100002B4:  90010000  stw r0,0(r1)
+	  19: GETL       	R0, t12
+	  20: GETL       	R1, t14
+	  21: STL       	t12, (t14)
+	  22: INCEIPL       	$4
+
+	0x100002B8:  3D001000  lis r8,4096
+	  23: MOVL       	$0x10000000, t16
+	  24: PUTL       	t16, R8
+	  25: INCEIPL       	$4
+
+	0x100002BC:  85A8077C  lwzu r13,1916(r8)
+	  26: MOVL       	$0x1000077C, t18
+	  27: PUTL       	t18, R8
+	  28: LDL       	(t18), t20
+	  29: PUTL       	t20, R13
+	  30: INCEIPL       	$4
+
+	0x100002C0:  48010624  b 0x100108E4
+	  31: JMPo       	$0x100108E4  ($4)
+
+
+. 0 100002A0 36
+. 7C 29 0B 78 54 21 00 36 38 00 00 00 94 21 FF F0 7C 08 03 A6 90 01 00 00 3D 00 10 00 85 A8 07 7C 48 01 06 24
+
+==== BB 1300 (0x100108E4) approx BBs exec'd 0 ====
+
+	0x100108E4:  39600000  li r11,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0x100108E8:  4BFFFFD4  b 0x100108BC
+	   3: JMPo       	$0x100108BC  ($4)
+
+
+. 0 100108E4 8
+. 39 60 00 00 4B FF FF D4
+
+==== BB 1301 (0x100108BC) approx BBs exec'd 0 ====
+
+	0x100108BC:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0x100108C0:  7D6C5A14  add r11,r12,r11
+	   4: GETL       	R12, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0x100108C4:  3980AF5C  li r12,-20644
+	   9: MOVL       	$0xFFFFAF5C, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0x100108C8:  3D8C2548  addis r12,r12,9544
+	  12: MOVL       	$0x2547AF5C, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0x100108CC:  7D8903A6  mtctr r12
+	  15: GETL       	R12, t10
+	  16: PUTL       	t10, CTR
+	  17: INCEIPL       	$4
+
+	0x100108D0:  39807A88  li r12,31368
+	  18: MOVL       	$0x7A88, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0x100108D4:  3D8C2549  addis r12,r12,9545
+	  21: MOVL       	$0x25497A88, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0x100108D8:  4E800420  bctr
+	  24: GETL       	CTR, t16
+	  25: JMPo       	t16  ($4)
+
+
+. 0 100108BC 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 7A 88 3D 8C 25 49 4E 80 04 20
+
+==== BB 1302 _dl_runtime_resolve(0x2547AF5C) approx BBs exec'd 0 ====
+
+	0x2547AF5C:  9421FFC0  stwu r1,-64(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFC0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547AF60:  9001000C  stw r0,12(r1)
+	   6: GETL       	R0, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xC, t6
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x2547AF64:  90610010  stw r3,16(r1)
+	  11: GETL       	R3, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x10, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547AF68:  90810014  stw r4,20(r1)
+	  16: GETL       	R4, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x14, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547AF6C:  7D836378  or r3,r12,r12
+	  21: GETL       	R12, t16
+	  22: PUTL       	t16, R3
+	  23: INCEIPL       	$4
+
+	0x2547AF70:  90A10018  stw r5,24(r1)
+	  24: GETL       	R5, t18
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x18, t20
+	  27: STL       	t18, (t20)
+	  28: INCEIPL       	$4
+
+	0x2547AF74:  7D645B78  or r4,r11,r11
+	  29: GETL       	R11, t22
+	  30: PUTL       	t22, R4
+	  31: INCEIPL       	$4
+
+	0x2547AF78:  90C1001C  stw r6,28(r1)
+	  32: GETL       	R6, t24
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x1C, t26
+	  35: STL       	t24, (t26)
+	  36: INCEIPL       	$4
+
+	0x2547AF7C:  7C0802A6  mflr r0
+	  37: GETL       	LR, t28
+	  38: PUTL       	t28, R0
+	  39: INCEIPL       	$4
+
+	0x2547AF80:  90E10020  stw r7,32(r1)
+	  40: GETL       	R7, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x20, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x2547AF84:  90010030  stw r0,48(r1)
+	  45: GETL       	R0, t34
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x30, t36
+	  48: STL       	t34, (t36)
+	  49: INCEIPL       	$4
+
+	0x2547AF88:  91010024  stw r8,36(r1)
+	  50: GETL       	R8, t38
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x24, t40
+	  53: STL       	t38, (t40)
+	  54: INCEIPL       	$4
+
+	0x2547AF8C:  7C000026  mfcr r0
+	  55: GETL       	CR, t42
+	  56: PUTL       	t42, R0
+	  57: INCEIPL       	$4
+
+	0x2547AF90:  91210028  stw r9,40(r1)
+	  58: GETL       	R9, t44
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x28, t46
+	  61: STL       	t44, (t46)
+	  62: INCEIPL       	$4
+
+	0x2547AF94:  9141002C  stw r10,44(r1)
+	  63: GETL       	R10, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x2C, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x2547AF98:  90010008  stw r0,8(r1)
+	  68: GETL       	R0, t52
+	  69: GETL       	R1, t54
+	  70: ADDL       	$0x8, t54
+	  71: STL       	t52, (t54)
+	  72: INCEIPL       	$4
+
+	0x2547AF9C:  480000C9  bl 0x2547B064
+	  73: MOVL       	$0x2547AFA0, t56
+	  74: PUTL       	t56, LR
+	  75: JMPo-c       	$0x2547B064  ($4)
+
+
+. 0 2547AF5C 68
+. 94 21 FF C0 90 01 00 0C 90 61 00 10 90 81 00 14 7D 83 63 78 90 A1 00 18 7D 64 5B 78 90 C1 00 1C 7C 08 02 A6 90 E1 00 20 90 01 00 30 91 01 00 24 7C 00 00 26 91 21 00 28 91 41 00 2C 90 01 00 08 48 00 00 C9
+
+==== BB 1303 fixup(0x2547B064) approx BBs exec'd 0 ====
+
+	0x2547B064:  7CE802A6  mflr r7
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R7
+	   2: INCEIPL       	$4
+
+	0x2547B068:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x2547B06C:  93810010  stw r28,16(r1)
+	   9: GETL       	R28, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547B070:  4801BF91  bl 0x25497000
+	  14: MOVL       	$0x2547B074, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547B064 16
+. 7C E8 02 A6 94 21 FF E0 93 81 00 10 48 01 BF 91
+
+==== BB 1304 (0x2547B074) approx BBs exec'd 0 ====
+
+	0x2547B074:  93A10014  stw r29,20(r1)
+	   0: GETL       	R29, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x14, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547B078:  90E10024  stw r7,36(r1)
+	   5: GETL       	R7, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x24, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0x2547B07C:  80C3007C  lwz r6,124(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x7C, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R6
+	  14: INCEIPL       	$4
+
+	0x2547B080:  93C10018  stw r30,24(r1)
+	  15: GETL       	R30, t12
+	  16: GETL       	R1, t14
+	  17: ADDL       	$0x18, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0x2547B084:  7FC802A6  mflr r30
+	  20: GETL       	LR, t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0x2547B088:  93E1001C  stw r31,28(r1)
+	  23: GETL       	R31, t18
+	  24: GETL       	R1, t20
+	  25: ADDL       	$0x1C, t20
+	  26: STL       	t18, (t20)
+	  27: INCEIPL       	$4
+
+	0x2547B08C:  7C7F1B78  or r31,r3,r3
+	  28: GETL       	R3, t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0x2547B090:  81460004  lwz r10,4(r6)
+	  31: GETL       	R6, t24
+	  32: ADDL       	$0x4, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R10
+	  35: INCEIPL       	$4
+
+	0x2547B094:  80A30038  lwz r5,56(r3)
+	  36: GETL       	R3, t28
+	  37: ADDL       	$0x38, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R5
+	  40: INCEIPL       	$4
+
+	0x2547B098:  7FAA2214  add r29,r10,r4
+	  41: GETL       	R10, t32
+	  42: GETL       	R4, t34
+	  43: ADDL       	t32, t34
+	  44: PUTL       	t34, R29
+	  45: INCEIPL       	$4
+
+	0x2547B09C:  7D6A202E  lwzx r11,r10,r4
+	  46: GETL       	R4, t36
+	  47: GETL       	R10, t38
+	  48: ADDL       	t38, t36
+	  49: LDL       	(t36), t40
+	  50: PUTL       	t40, R11
+	  51: INCEIPL       	$4
+
+	0x2547B0A0:  807D0004  lwz r3,4(r29)
+	  52: GETL       	R29, t42
+	  53: ADDL       	$0x4, t42
+	  54: LDL       	(t42), t44
+	  55: PUTL       	t44, R3
+	  56: INCEIPL       	$4
+
+	0x2547B0A4:  81850004  lwz r12,4(r5)
+	  57: GETL       	R5, t46
+	  58: ADDL       	$0x4, t46
+	  59: LDL       	(t46), t48
+	  60: PUTL       	t48, R12
+	  61: INCEIPL       	$4
+
+	0x2547B0A8:  5466C23E  rlwinm r6,r3,24,8,31
+	  62: GETL       	R3, t50
+	  63: SHRL       	$0x8, t50
+	  64: PUTL       	t50, R6
+	  65: INCEIPL       	$4
+
+	0x2547B0AC:  807F0000  lwz r3,0(r31)
+	  66: GETL       	R31, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R3
+	  69: INCEIPL       	$4
+
+	0x2547B0B0:  54C52036  rlwinm r5,r6,4,0,27
+	  70: GETL       	R6, t56
+	  71: SHLL       	$0x4, t56
+	  72: PUTL       	t56, R5
+	  73: INCEIPL       	$4
+
+	0x2547B0B4:  7D056214  add r8,r5,r12
+	  74: GETL       	R5, t58
+	  75: GETL       	R12, t60
+	  76: ADDL       	t58, t60
+	  77: PUTL       	t60, R8
+	  78: INCEIPL       	$4
+
+	0x2547B0B8:  7F835A14  add r28,r3,r11
+	  79: GETL       	R3, t62
+	  80: GETL       	R11, t64
+	  81: ADDL       	t62, t64
+	  82: PUTL       	t64, R28
+	  83: INCEIPL       	$4
+
+	0x2547B0BC:  8808000D  lbz r0,13(r8)
+	  84: GETL       	R8, t66
+	  85: ADDL       	$0xD, t66
+	  86: LDB       	(t66), t68
+	  87: PUTL       	t68, R0
+	  88: INCEIPL       	$4
+
+	0x2547B0C0:  70090003  andi. r9,r0,0x3
+	  89: GETL       	R0, t70
+	  90: ANDL       	$0x3, t70
+	  91: PUTL       	t70, R9
+	  92: CMP0L       	t70, t72  (-rSo)
+	  93: ICRFL       	t72, $0x0, CR
+	  94: INCEIPL       	$4
+
+	0x2547B0C4:  813F0034  lwz r9,52(r31)
+	  95: GETL       	R31, t74
+	  96: ADDL       	$0x34, t74
+	  97: LDL       	(t74), t76
+	  98: PUTL       	t76, R9
+	  99: INCEIPL       	$4
+
+	0x2547B0C8:  80890004  lwz r4,4(r9)
+	 100: GETL       	R9, t78
+	 101: ADDL       	$0x4, t78
+	 102: LDL       	(t78), t80
+	 103: PUTL       	t80, R4
+	 104: INCEIPL       	$4
+
+	0x2547B0CC:  91010008  stw r8,8(r1)
+	 105: GETL       	R8, t82
+	 106: GETL       	R1, t84
+	 107: ADDL       	$0x8, t84
+	 108: STL       	t82, (t84)
+	 109: INCEIPL       	$4
+
+	0x2547B0D0:  408200E0  bc 4,2,0x2547B1B0
+	 110: Jc02o       	$0x2547B1B0
+
+
+. 0 2547B074 96
+. 93 A1 00 14 90 E1 00 24 80 C3 00 7C 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 7C 7F 1B 78 81 46 00 04 80 A3 00 38 7F AA 22 14 7D 6A 20 2E 80 7D 00 04 81 85 00 04 54 66 C2 3E 80 7F 00 00 54 C5 20 36 7D 05 62 14 7F 83 5A 14 88 08 00 0D 70 09 00 03 81 3F 00 34 80 89 00 04 91 01 00 08 40 82 00 E0
+
+==== BB 1305 (0x2547B0D4) approx BBs exec'd 0 ====
+
+	0x2547B0D4:  815F00E4  lwz r10,228(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0xE4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x2547B0D8:  38E00000  li r7,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R7
+	   7: INCEIPL       	$4
+
+	0x2547B0DC:  2F8A0000  cmpi cr7,r10,0
+	   8: GETL       	R10, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x2547B0E0:  409E009C  bc 4,30,0x2547B17C
+	  12: Jc30o       	$0x2547B17C
+
+
+. 0 2547B0D4 16
+. 81 5F 00 E4 38 E0 00 00 2F 8A 00 00 40 9E 00 9C
+
+==== BB 1306 (0x2547B17C) approx BBs exec'd 0 ====
+
+	0x2547B17C:  816A0004  lwz r11,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547B180:  54C9083C  rlwinm r9,r6,1,0,30
+	   5: GETL       	R6, t4
+	   6: SHLL       	$0x1, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0x2547B184:  80DF0188  lwz r6,392(r31)
+	   9: GETL       	R31, t6
+	  10: ADDL       	$0x188, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R6
+	  13: INCEIPL       	$4
+
+	0x2547B188:  7D095A2E  lhzx r8,r9,r11
+	  14: GETL       	R11, t10
+	  15: GETL       	R9, t12
+	  16: ADDL       	t12, t10
+	  17: LDW       	(t10), t14
+	  18: PUTL       	t14, R8
+	  19: INCEIPL       	$4
+
+	0x2547B18C:  55072376  rlwinm r7,r8,4,13,27
+	  20: GETL       	R8, t16
+	  21: ROLL       	$0x4, t16
+	  22: ANDL       	$0x7FFF0, t16
+	  23: PUTL       	t16, R7
+	  24: INCEIPL       	$4
+
+	0x2547B190:  7D663A14  add r11,r6,r7
+	  25: GETL       	R6, t18
+	  26: GETL       	R7, t20
+	  27: ADDL       	t18, t20
+	  28: PUTL       	t20, R11
+	  29: INCEIPL       	$4
+
+	0x2547B194:  806B0004  lwz r3,4(r11)
+	  30: GETL       	R11, t22
+	  31: ADDL       	$0x4, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R3
+	  34: INCEIPL       	$4
+
+	0x2547B198:  7C6AFE70  srawi r10,r3,31
+	  35: GETL       	R3, t26
+	  36: SARL       	$0x1F, t26  (-wCa)
+	  37: PUTL       	t26, R10
+	  38: INCEIPL       	$4
+
+	0x2547B19C:  7D401A78  xor r0,r10,r3
+	  39: GETL       	R10, t28
+	  40: GETL       	R3, t30
+	  41: XORL       	t28, t30
+	  42: PUTL       	t30, R0
+	  43: INCEIPL       	$4
+
+	0x2547B1A0:  7D205050  subf r9,r0,r10
+	  44: GETL       	R0, t32
+	  45: GETL       	R10, t34
+	  46: SUBL       	t32, t34
+	  47: PUTL       	t34, R9
+	  48: INCEIPL       	$4
+
+	0x2547B1A4:  7D28FE70  srawi r8,r9,31
+	  49: GETL       	R9, t36
+	  50: SARL       	$0x1F, t36  (-wCa)
+	  51: PUTL       	t36, R8
+	  52: INCEIPL       	$4
+
+	0x2547B1A8:  7D674038  and r7,r11,r8
+	  53: GETL       	R11, t38
+	  54: GETL       	R8, t40
+	  55: ANDL       	t38, t40
+	  56: PUTL       	t40, R7
+	  57: INCEIPL       	$4
+
+	0x2547B1AC:  4BFFFF38  b 0x2547B0E4
+	  58: JMPo       	$0x2547B0E4  ($4)
+
+
+. 0 2547B17C 52
+. 81 6A 00 04 54 C9 08 3C 80 DF 01 88 7D 09 5A 2E 55 07 23 76 7D 66 3A 14 80 6B 00 04 7C 6A FE 70 7D 40 1A 78 7D 20 50 50 7D 28 FE 70 7D 67 40 38 4B FF FF 38
+
+==== BB 1307 (0x2547B0E4) approx BBs exec'd 0 ====
+
+	0x2547B0E4:  7C05602E  lwzx r0,r5,r12
+	   0: GETL       	R12, t0
+	   1: GETL       	R5, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R0
+	   5: INCEIPL       	$4
+
+	0x2547B0E8:  39400000  li r10,0
+	   6: MOVL       	$0x0, t6
+	   7: PUTL       	t6, R10
+	   8: INCEIPL       	$4
+
+	0x2547B0EC:  80DF01C0  lwz r6,448(r31)
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x1C0, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R6
+	  13: INCEIPL       	$4
+
+	0x2547B0F0:  38A10008  addi r5,r1,8
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x8, t12
+	  16: PUTL       	t12, R5
+	  17: INCEIPL       	$4
+
+	0x2547B0F4:  7C602214  add r3,r0,r4
+	  18: GETL       	R0, t14
+	  19: GETL       	R4, t16
+	  20: ADDL       	t14, t16
+	  21: PUTL       	t16, R3
+	  22: INCEIPL       	$4
+
+	0x2547B0F8:  39000001  li r8,1
+	  23: MOVL       	$0x1, t18
+	  24: PUTL       	t18, R8
+	  25: INCEIPL       	$4
+
+	0x2547B0FC:  7FE4FB78  or r4,r31,r31
+	  26: GETL       	R31, t20
+	  27: PUTL       	t20, R4
+	  28: INCEIPL       	$4
+
+	0x2547B100:  39200001  li r9,1
+	  29: MOVL       	$0x1, t22
+	  30: PUTL       	t22, R9
+	  31: INCEIPL       	$4
+
+	0x2547B104:  4BFFD4A1  bl 0x254785A4
+	  32: MOVL       	$0x2547B108, t24
+	  33: PUTL       	t24, LR
+	  34: JMPo-c       	$0x254785A4  ($4)
+
+
+. 0 2547B0E4 36
+. 7C 05 60 2E 39 40 00 00 80 DF 01 C0 38 A1 00 08 7C 60 22 14 39 00 00 01 7F E4 FB 78 39 20 00 01 4B FF D4 A1
+
+==== BB 1308 (0x2547B108) approx BBs exec'd 0 ====
+
+	0x2547B108:  39400000  li r10,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0x2547B10C:  81610008  lwz r11,8(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547B110:  2C8B0000  cmpi cr1,r11,0
+	   8: GETL       	R11, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x1, CR
+	  11: INCEIPL       	$4
+
+	0x2547B114:  41860018  bc 12,6,0x2547B12C
+	  12: Js06o       	$0x2547B12C
+
+
+. 0 2547B108 16
+. 39 40 00 00 81 61 00 08 2C 8B 00 00 41 86 00 18
+
+==== BB 1309 (0x2547B118) approx BBs exec'd 0 ====
+
+	0x2547B118:  2F030000  cmpi cr6,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547B11C:  419A00A0  bc 12,26,0x2547B1BC
+	   4: Js26o       	$0x2547B1BC
+
+
+. 0 2547B118 8
+. 2F 03 00 00 41 9A 00 A0
+
+==== BB 1310 (0x2547B120) approx BBs exec'd 0 ====
+
+	0x2547B120:  81830000  lwz r12,0(r3)
+	   0: GETL       	R3, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R12
+	   3: INCEIPL       	$4
+
+	0x2547B124:  808B0004  lwz r4,4(r11)
+	   4: GETL       	R11, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R4
+	   8: INCEIPL       	$4
+
+	0x2547B128:  7D4C2214  add r10,r12,r4
+	   9: GETL       	R12, t8
+	  10: GETL       	R4, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R10
+	  13: INCEIPL       	$4
+
+	0x2547B12C:  80FE04F4  lwz r7,1268(r30)
+	  14: GETL       	R30, t12
+	  15: ADDL       	$0x4F4, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R7
+	  18: INCEIPL       	$4
+
+	0x2547B130:  80DD0008  lwz r6,8(r29)
+	  19: GETL       	R29, t16
+	  20: ADDL       	$0x8, t16
+	  21: LDL       	(t16), t18
+	  22: PUTL       	t18, R6
+	  23: INCEIPL       	$4
+
+	0x2547B134:  8067002C  lwz r3,44(r7)
+	  24: GETL       	R7, t20
+	  25: ADDL       	$0x2C, t20
+	  26: LDL       	(t20), t22
+	  27: PUTL       	t22, R3
+	  28: INCEIPL       	$4
+
+	0x2547B138:  7D665214  add r11,r6,r10
+	  29: GETL       	R6, t24
+	  30: GETL       	R10, t26
+	  31: ADDL       	t24, t26
+	  32: PUTL       	t26, R11
+	  33: INCEIPL       	$4
+
+	0x2547B13C:  2C030000  cmpi cr0,r3,0
+	  34: GETL       	R3, t28
+	  35: CMP0L       	t28, t30  (-rSo)
+	  36: ICRFL       	t30, $0x0, CR
+	  37: INCEIPL       	$4
+
+	0x2547B140:  7D635B78  or r3,r11,r11
+	  38: GETL       	R11, t32
+	  39: PUTL       	t32, R3
+	  40: INCEIPL       	$4
+
+	0x2547B144:  40820018  bc 4,2,0x2547B15C
+	  41: Jc02o       	$0x2547B15C
+
+
+. 0 2547B120 40
+. 81 83 00 00 80 8B 00 04 7D 4C 22 14 80 FE 04 F4 80 DD 00 08 80 67 00 2C 7D 66 52 14 2C 03 00 00 7D 63 5B 78 40 82 00 18
+
+==== BB 1311 (0x2547B148) approx BBs exec'd 0 ====
+
+	0x2547B148:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x2547B14C:  7FA4EB78  or r4,r29,r29
+	   3: GETL       	R29, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0x2547B150:  7F85E378  or r5,r28,r28
+	   6: GETL       	R28, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547B154:  7D665B78  or r6,r11,r11
+	   9: GETL       	R11, t6
+	  10: PUTL       	t6, R6
+	  11: INCEIPL       	$4
+
+	0x2547B158:  4800540D  bl 0x25480564
+	  12: MOVL       	$0x2547B15C, t8
+	  13: PUTL       	t8, LR
+	  14: JMPo-c       	$0x25480564  ($4)
+
+
+. 0 2547B148 20
+. 7F E3 FB 78 7F A4 EB 78 7F 85 E3 78 7D 66 5B 78 48 00 54 0D
+
+==== BB 1312 __elf_machine_fixup_plt(0x25480564) approx BBs exec'd 0 ====
+
+	0x25480564:  7D453050  subf r10,r5,r6
+	   0: GETL       	R5, t0
+	   1: GETL       	R6, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0x25480568:  3D06FE00  addis r8,r6,-512
+	   5: GETL       	R6, t4
+	   6: ADDL       	$0xFE000000, t4
+	   7: PUTL       	t4, R8
+	   8: INCEIPL       	$4
+
+	0x2548056C:  554C3032  rlwinm r12,r10,6,0,25
+	   9: GETL       	R10, t6
+	  10: SHLL       	$0x6, t6
+	  11: PUTL       	t6, R12
+	  12: INCEIPL       	$4
+
+	0x25480570:  3CE0FC00  lis r7,-1024
+	  13: MOVL       	$0xFC000000, t8
+	  14: PUTL       	t8, R7
+	  15: INCEIPL       	$4
+
+	0x25480574:  7D843670  srawi r4,r12,6
+	  16: GETL       	R12, t10
+	  17: SARL       	$0x6, t10  (-wCa)
+	  18: PUTL       	t10, R4
+	  19: INCEIPL       	$4
+
+	0x25480578:  39280003  addi r9,r8,3
+	  20: GETL       	R8, t12
+	  21: ADDL       	$0x3, t12
+	  22: PUTL       	t12, R9
+	  23: INCEIPL       	$4
+
+	0x2548057C:  7F845000  cmp cr7,r4,r10
+	  24: GETL       	R4, t14
+	  25: GETL       	R10, t16
+	  26: CMPL       	t14, t16, t18  (-rSo)
+	  27: ICRFL       	t18, $0x7, CR
+	  28: INCEIPL       	$4
+
+	0x25480580:  60E00002  ori r0,r7,0x2
+	  29: MOVL       	$0xFC000002, t20
+	  30: PUTL       	t20, R0
+	  31: INCEIPL       	$4
+
+	0x25480584:  548B01BA  rlwinm r11,r4,0,6,29
+	  32: GETL       	R4, t22
+	  33: ANDL       	$0x3FFFFFC, t22
+	  34: PUTL       	t22, R11
+	  35: INCEIPL       	$4
+
+	0x25480588:  9421FFF0  stwu r1,-16(r1)
+	  36: GETL       	R1, t24
+	  37: GETL       	R1, t26
+	  38: ADDL       	$0xFFFFFFF0, t26
+	  39: PUTL       	t26, R1
+	  40: STL       	t24, (t26)
+	  41: INCEIPL       	$4
+
+	0x2548058C:  7F090040  cmpl cr6,r9,r0
+	  42: GETL       	R9, t28
+	  43: GETL       	R0, t30
+	  44: CMPUL       	t28, t30, t32  (-rSo)
+	  45: ICRFL       	t32, $0x6, CR
+	  46: INCEIPL       	$4
+
+	0x25480590:  656B4800  oris r11,r11,0x4800
+	  47: GETL       	R11, t34
+	  48: ORL       	$0x48000000, t34
+	  49: PUTL       	t34, R11
+	  50: INCEIPL       	$4
+
+	0x25480594:  419E00EC  bc 12,30,0x25480680
+	  51: Js30o       	$0x25480680
+
+
+. 0 25480564 52
+. 7D 45 30 50 3D 06 FE 00 55 4C 30 32 3C E0 FC 00 7D 84 36 70 39 28 00 03 7F 84 50 00 60 E0 00 02 54 8B 01 BA 94 21 FF F0 7F 09 00 40 65 6B 48 00 41 9E 00 EC
+
+==== BB 1313 (0x25480680) approx BBs exec'd 0 ====
+
+	0x25480680:  91650000  stw r11,0(r5)
+	   0: GETL       	R11, t0
+	   1: GETL       	R5, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0x25480684:  7C00286C  dcbst r0,r5
+	   4: INCEIPL       	$4
+
+	0x25480688:  7C0004AC  sync
+	   5: INCEIPL       	$4
+
+	0x2548068C:  7C002FAC  icbi r0,r5
+	   6: GETL       	R5, t4
+	   7: CALLM_So       	
+	   8: PUSHL       	t4
+	   9: CALLMo       	$0x68
+	  10: CALLM_Eo       	
+	  11: INCEIPL       	$4
+
+	0x25480690:  7C0004AC  sync
+	  12: INCEIPL       	$4
+
+	0x25480694:  4C00012C  	  13: INCEIPL       	$4
+
+	0x25480698:  7CC33378  or r3,r6,r6
+	  14: GETL       	R6, t6
+	  15: PUTL       	t6, R3
+	  16: INCEIPL       	$4
+
+	0x2548069C:  38210010  addi r1,r1,16
+	  17: GETL       	R1, t8
+	  18: ADDL       	$0x10, t8
+	  19: PUTL       	t8, R1
+	  20: INCEIPL       	$4
+
+	0x254806A0:  4E800020  blr
+	  21: GETL       	LR, t10
+	  22: JMPo-r       	t10  ($4)
+
+
+. 0 25480680 36
+. 91 65 00 00 7C 00 28 6C 7C 00 04 AC 7C 00 2F AC 7C 00 04 AC 4C 00 01 2C 7C C3 33 78 38 21 00 10 4E 80 00 20
+
+==== BB 1314 (0x2547B15C) approx BBs exec'd 0 ====
+
+	0x2547B15C:  83810024  lwz r28,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547B160:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547B164:  7F8803A6  mtlr r28
+	  10: GETL       	R28, t8
+	  11: PUTL       	t8, LR
+	  12: INCEIPL       	$4
+
+	0x2547B168:  83C10018  lwz r30,24(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x2547B16C:  83810010  lwz r28,16(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R28
+	  22: INCEIPL       	$4
+
+	0x2547B170:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0x2547B174:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0x2547B178:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+. 0 2547B15C 32
+. 83 81 00 24 83 A1 00 14 7F 88 03 A6 83 C1 00 18 83 81 00 10 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1315 (0x2547AFA0) approx BBs exec'd 0 ====
+
+	0x2547AFA0:  7C6903A6  mtctr r3
+	   0: GETL       	R3, t0
+	   1: PUTL       	t0, CTR
+	   2: INCEIPL       	$4
+
+	0x2547AFA4:  80010030  lwz r0,48(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x30, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x2547AFA8:  8141002C  lwz r10,44(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x2C, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R10
+	  12: INCEIPL       	$4
+
+	0x2547AFAC:  81210028  lwz r9,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R9
+	  17: INCEIPL       	$4
+
+	0x2547AFB0:  7C0803A6  mtlr r0
+	  18: GETL       	R0, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0x2547AFB4:  81010024  lwz r8,36(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x24, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R8
+	  25: INCEIPL       	$4
+
+	0x2547AFB8:  80010008  lwz r0,8(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x8, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R0
+	  30: INCEIPL       	$4
+
+	0x2547AFBC:  80E10020  lwz r7,32(r1)
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: LDL       	(t24), t26
+	  34: PUTL       	t26, R7
+	  35: INCEIPL       	$4
+
+	0x2547AFC0:  80C1001C  lwz r6,28(r1)
+	  36: GETL       	R1, t28
+	  37: ADDL       	$0x1C, t28
+	  38: LDL       	(t28), t30
+	  39: PUTL       	t30, R6
+	  40: INCEIPL       	$4
+
+	0x2547AFC4:  7C0FF120  mtcr r0
+	  41: GETL       	R0, t32
+	  42: PUTL       	t32, CR
+	  43: INCEIPL       	$4
+
+	0x2547AFC8:  80A10018  lwz r5,24(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x18, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R5
+	  48: INCEIPL       	$4
+
+	0x2547AFCC:  80810014  lwz r4,20(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x14, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R4
+	  53: INCEIPL       	$4
+
+	0x2547AFD0:  80610010  lwz r3,16(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x10, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R3
+	  58: INCEIPL       	$4
+
+	0x2547AFD4:  8001000C  lwz r0,12(r1)
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0xC, t46
+	  61: LDL       	(t46), t48
+	  62: PUTL       	t48, R0
+	  63: INCEIPL       	$4
+
+	0x2547AFD8:  38210040  addi r1,r1,64
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x40, t50
+	  66: PUTL       	t50, R1
+	  67: INCEIPL       	$4
+
+	0x2547AFDC:  4E800420  bctr
+	  68: GETL       	CTR, t52
+	  69: JMPo       	t52  ($4)
+
+
+. 0 2547AFA0 64
+. 7C 69 03 A6 80 01 00 30 81 41 00 2C 81 21 00 28 7C 08 03 A6 81 01 00 24 80 01 00 08 80 E1 00 20 80 C1 00 1C 7C 0F F1 20 80 A1 00 18 80 81 00 14 80 61 00 10 80 01 00 0C 38 21 00 40 4E 80 04 20
+
+==== BB 1316 __libc_start_main(0xFE9B8C4) approx BBs exec'd 0 ====
+
+	0xFE9B8C4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFE9B8C8:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFE9B8CC:  4812C585  bl 0xFFC7E50
+	   9: MOVL       	$0xFE9B8D0, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FE9B8C4 12
+. 7C 08 02 A6 94 21 FF F0 48 12 C5 85
+
+==== BB 1317 (0xFE9B8D0) approx BBs exec'd 0 ====
+
+	0xFE9B8D0:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE9B8D4:  7D2A4B78  or r10,r9,r9
+	   5: GETL       	R9, t4
+	   6: PUTL       	t4, R10
+	   7: INCEIPL       	$4
+
+	0xFE9B8D8:  7FC802A6  mflr r30
+	   8: GETL       	LR, t6
+	   9: PUTL       	t6, R30
+	  10: INCEIPL       	$4
+
+	0xFE9B8DC:  90010014  stw r0,20(r1)
+	  11: GETL       	R0, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFE9B8E0:  7C6C1B78  or r12,r3,r3
+	  16: GETL       	R3, t12
+	  17: PUTL       	t12, R12
+	  18: INCEIPL       	$4
+
+	0xFE9B8E4:  80090000  lwz r0,0(r9)
+	  19: GETL       	R9, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R0
+	  22: INCEIPL       	$4
+
+	0xFE9B8E8:  7C852378  or r5,r4,r4
+	  23: GETL       	R4, t18
+	  24: PUTL       	t18, R5
+	  25: INCEIPL       	$4
+
+	0xFE9B8EC:  7CE93B78  or r9,r7,r7
+	  26: GETL       	R7, t20
+	  27: PUTL       	t20, R9
+	  28: INCEIPL       	$4
+
+	0xFE9B8F0:  7D074378  or r7,r8,r8
+	  29: GETL       	R8, t22
+	  30: PUTL       	t22, R7
+	  31: INCEIPL       	$4
+
+	0xFE9B8F4:  2F800000  cmpi cr7,r0,0
+	  32: GETL       	R0, t24
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0xFE9B8F8:  419E0034  bc 12,30,0xFE9B92C
+	  36: Js30o       	$0xFE9B92C
+
+
+. 0 FE9B8D0 44
+. 93 C1 00 08 7D 2A 4B 78 7F C8 02 A6 90 01 00 14 7C 6C 1B 78 80 09 00 00 7C 85 23 78 7C E9 3B 78 7D 07 43 78 2F 80 00 00 41 9E 00 34
+
+==== BB 1318 (0xFE9B92C) approx BBs exec'd 0 ====
+
+	0xFE9B92C:  80060000  lwz r0,0(r6)
+	   0: GETL       	R6, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFE9B930:  7CCB3378  or r11,r6,r6
+	   4: GETL       	R6, t4
+	   5: PUTL       	t4, R11
+	   6: INCEIPL       	$4
+
+	0xFE9B934:  2C800000  cmpi cr1,r0,0
+	   7: GETL       	R0, t6
+	   8: CMP0L       	t6, t8  (-rSo)
+	   9: ICRFL       	t8, $0x1, CR
+	  10: INCEIPL       	$4
+
+	0xFE9B938:  4186003C  bc 12,6,0xFE9B974
+	  11: Js06o       	$0xFE9B974
+
+
+. 0 FE9B92C 16
+. 80 06 00 00 7C CB 33 78 2C 80 00 00 41 86 00 3C
+
+==== BB 1319 (0xFE9B93C) approx BBs exec'd 0 ====
+
+	0xFE9B93C:  811E1CB0  lwz r8,7344(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1CB0, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE9B940:  48000010  b 0xFE9B950
+	   5: JMPo       	$0xFE9B950  ($4)
+
+
+. 0 FE9B93C 8
+. 81 1E 1C B0 48 00 00 10
+
+==== BB 1320 (0xFE9B950) approx BBs exec'd 0 ====
+
+	0xFE9B950:  2F800013  cmpi cr7,r0,19
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x13, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFE9B954:  2F080000  cmpi cr6,r8,0
+	   5: GETL       	R8, t6
+	   6: CMP0L       	t6, t8  (-rSo)
+	   7: ICRFL       	t8, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFE9B958:  409EFFEC  bc 4,30,0xFE9B944
+	   9: Jc30o       	$0xFE9B944
+
+
+. 0 FE9B950 12
+. 2F 80 00 13 2F 08 00 00 40 9E FF EC
+
+==== BB 1321 (0xFE9B944) approx BBs exec'd 0 ====
+
+	0xFE9B944:  840B0008  lwzu r0,8(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R11
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R0
+	   5: INCEIPL       	$4
+
+	0xFE9B948:  2F000000  cmpi cr6,r0,0
+	   6: GETL       	R0, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFE9B94C:  419A0028  bc 12,26,0xFE9B974
+	  10: Js26o       	$0xFE9B974
+
+
+. 0 FE9B944 12
+. 84 0B 00 08 2F 00 00 00 41 9A 00 28
+
+==== BB 1322 (0xFE9B95C) approx BBs exec'd 0 ====
+
+	0xFE9B95C:  41BAFFE8  bc 13,26,0xFE9B944
+	   0: Js26o       	$0xFE9B944
+
+
+. 0 FE9B95C 4
+. 41 BA FF E8
+
+==== BB 1323 (0xFE9B960) approx BBs exec'd 0 ====
+
+	0xFE9B960:  808B0004  lwz r4,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFE9B964:  90880000  stw r4,0(r8)
+	   5: GETL       	R4, t4
+	   6: GETL       	R8, t6
+	   7: STL       	t4, (t6)
+	   8: INCEIPL       	$4
+
+	0xFE9B968:  840B0008  lwzu r0,8(r11)
+	   9: GETL       	R11, t8
+	  10: ADDL       	$0x8, t8
+	  11: PUTL       	t8, R11
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFE9B96C:  2F000000  cmpi cr6,r0,0
+	  15: GETL       	R0, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0xFE9B970:  409AFFE0  bc 4,26,0xFE9B950
+	  19: Jc26o       	$0xFE9B950
+
+
+. 0 FE9B960 20
+. 80 8B 00 04 90 88 00 00 84 0B 00 08 2F 00 00 00 40 9A FF E0
+
+==== BB 1324 (0xFE9B974) approx BBs exec'd 0 ====
+
+	0xFE9B974:  8107000C  lwz r8,12(r7)
+	   0: GETL       	R7, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFE9B978:  7D846378  or r4,r12,r12
+	   5: GETL       	R12, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFE9B97C:  80670004  lwz r3,4(r7)
+	   8: GETL       	R7, t6
+	   9: ADDL       	$0x4, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFE9B980:  80E70008  lwz r7,8(r7)
+	  13: GETL       	R7, t10
+	  14: ADDL       	$0x8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R7
+	  17: INCEIPL       	$4
+
+	0xFE9B984:  4BFFFD91  bl 0xFE9B714
+	  18: MOVL       	$0xFE9B988, t14
+	  19: PUTL       	t14, LR
+	  20: JMPo-c       	$0xFE9B714  ($4)
+
+
+. 0 FE9B974 20
+. 81 07 00 0C 7D 84 63 78 80 67 00 04 80 E7 00 08 4B FF FD 91
+
+==== BB 1325 generic_start_main(0xFE9B714) approx BBs exec'd 0 ====
+
+	0xFE9B714:  9421FDE0  stwu r1,-544(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFDE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE9B718:  7D4802A6  mflr r10
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0xFE9B71C:  4812C735  bl 0xFFC7E50
+	   9: MOVL       	$0xFE9B720, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FE9B714 12
+. 94 21 FD E0 7D 48 02 A6 48 12 C7 35
+
+==== BB 1326 (0xFE9B720) approx BBs exec'd 0 ====
+
+	0xFE9B720:  93C10218  stw r30,536(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x218, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE9B724:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE9B728:  93A10214  stw r29,532(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x214, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE9B72C:  5480103A  rlwinm r0,r4,2,0,29
+	  13: GETL       	R4, t10
+	  14: SHLL       	$0x2, t10
+	  15: PUTL       	t10, R0
+	  16: INCEIPL       	$4
+
+	0xFE9B730:  93810210  stw r28,528(r1)
+	  17: GETL       	R28, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x210, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFE9B734:  7D602A14  add r11,r0,r5
+	  22: GETL       	R0, t16
+	  23: GETL       	R5, t18
+	  24: ADDL       	t16, t18
+	  25: PUTL       	t18, R11
+	  26: INCEIPL       	$4
+
+	0xFE9B738:  93E1021C  stw r31,540(r1)
+	  27: GETL       	R31, t20
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x21C, t22
+	  30: STL       	t20, (t22)
+	  31: INCEIPL       	$4
+
+	0xFE9B73C:  83BE1CE4  lwz r29,7396(r30)
+	  32: GETL       	R30, t24
+	  33: ADDL       	$0x1CE4, t24
+	  34: LDL       	(t24), t26
+	  35: PUTL       	t26, R29
+	  36: INCEIPL       	$4
+
+	0xFE9B740:  7CFC3B78  or r28,r7,r7
+	  37: GETL       	R7, t28
+	  38: PUTL       	t28, R28
+	  39: INCEIPL       	$4
+
+	0xFE9B744:  91410224  stw r10,548(r1)
+	  40: GETL       	R10, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x224, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0xFE9B748:  7D1F4378  or r31,r8,r8
+	  45: GETL       	R8, t34
+	  46: PUTL       	t34, R31
+	  47: INCEIPL       	$4
+
+	0xFE9B74C:  2F9D0000  cmpi cr7,r29,0
+	  48: GETL       	R29, t36
+	  49: CMP0L       	t36, t38  (-rSo)
+	  50: ICRFL       	t38, $0x7, CR
+	  51: INCEIPL       	$4
+
+	0xFE9B750:  906101F0  stw r3,496(r1)
+	  52: GETL       	R3, t40
+	  53: GETL       	R1, t42
+	  54: ADDL       	$0x1F0, t42
+	  55: STL       	t40, (t42)
+	  56: INCEIPL       	$4
+
+	0xFE9B754:  9361020C  stw r27,524(r1)
+	  57: GETL       	R27, t44
+	  58: GETL       	R1, t46
+	  59: ADDL       	$0x20C, t46
+	  60: STL       	t44, (t46)
+	  61: INCEIPL       	$4
+
+	0xFE9B758:  7D234B78  or r3,r9,r9
+	  62: GETL       	R9, t48
+	  63: PUTL       	t48, R3
+	  64: INCEIPL       	$4
+
+	0xFE9B75C:  908101F4  stw r4,500(r1)
+	  65: GETL       	R4, t50
+	  66: GETL       	R1, t52
+	  67: ADDL       	$0x1F4, t52
+	  68: STL       	t50, (t52)
+	  69: INCEIPL       	$4
+
+	0xFE9B760:  396B0004  addi r11,r11,4
+	  70: GETL       	R11, t54
+	  71: ADDL       	$0x4, t54
+	  72: PUTL       	t54, R11
+	  73: INCEIPL       	$4
+
+	0xFE9B764:  90A101F8  stw r5,504(r1)
+	  74: GETL       	R5, t56
+	  75: GETL       	R1, t58
+	  76: ADDL       	$0x1F8, t58
+	  77: STL       	t56, (t58)
+	  78: INCEIPL       	$4
+
+	0xFE9B768:  39400000  li r10,0
+	  79: MOVL       	$0x0, t60
+	  80: PUTL       	t60, R10
+	  81: INCEIPL       	$4
+
+	0xFE9B76C:  90C101FC  stw r6,508(r1)
+	  82: GETL       	R6, t62
+	  83: GETL       	R1, t64
+	  84: ADDL       	$0x1FC, t64
+	  85: STL       	t62, (t64)
+	  86: INCEIPL       	$4
+
+	0xFE9B770:  419E0010  bc 12,30,0xFE9B780
+	  87: Js30o       	$0xFE9B780
+
+
+. 0 FE9B720 84
+. 93 C1 02 18 7F C8 02 A6 93 A1 02 14 54 80 10 3A 93 81 02 10 7D 60 2A 14 93 E1 02 1C 83 BE 1C E4 7C FC 3B 78 91 41 02 24 7D 1F 43 78 2F 9D 00 00 90 61 01 F0 93 61 02 0C 7D 23 4B 78 90 81 01 F4 39 6B 00 04 90 A1 01 F8 39 40 00 00 90 C1 01 FC 41 9E 00 10
+
+==== BB 1327 (0xFE9B780) approx BBs exec'd 0 ====
+
+	0xFE9B780:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE9B784:  837E1B84  lwz r27,7044(r30)
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x1B84, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R27
+	   8: INCEIPL       	$4
+
+	0xFE9B788:  813E1AA4  lwz r9,6820(r30)
+	   9: GETL       	R30, t8
+	  10: ADDL       	$0x1AA4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R9
+	  13: INCEIPL       	$4
+
+	0xFE9B78C:  917B0000  stw r11,0(r27)
+	  14: GETL       	R11, t12
+	  15: GETL       	R27, t14
+	  16: STL       	t12, (t14)
+	  17: INCEIPL       	$4
+
+	0xFE9B790:  91490000  stw r10,0(r9)
+	  18: GETL       	R10, t16
+	  19: GETL       	R9, t18
+	  20: STL       	t16, (t18)
+	  21: INCEIPL       	$4
+
+	0xFE9B794:  41860010  bc 12,6,0xFE9B7A4
+	  22: Js06o       	$0xFE9B7A4
+
+
+. 0 FE9B780 24
+. 2C 83 00 00 83 7E 1B 84 81 3E 1A A4 91 7B 00 00 91 49 00 00 41 86 00 10
+
+==== BB 1328 (0xFE9B798) approx BBs exec'd 0 ====
+
+	0xFE9B798:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFE9B79C:  38A00000  li r5,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R5
+	   5: INCEIPL       	$4
+
+	0xFE9B7A0:  48018731  bl 0xFEB3ED0
+	   6: MOVL       	$0xFE9B7A4, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFEB3ED0  ($4)
+
+
+. 0 FE9B798 12
+. 38 80 00 00 38 A0 00 00 48 01 87 31
+
+==== BB 1329 __cxa_atexit_internal(0xFEB3ED0) approx BBs exec'd 0 ====
+
+	0xFEB3ED0:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0xFEB3ED4:  9421FFE0  stwu r1,-32(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFE0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0xFEB3ED8:  93E1001C  stw r31,28(r1)
+	   9: GETL       	R31, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x1C, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFEB3EDC:  7CBF2B78  or r31,r5,r5
+	  14: GETL       	R5, t10
+	  15: PUTL       	t10, R31
+	  16: INCEIPL       	$4
+
+	0xFEB3EE0:  93810010  stw r28,16(r1)
+	  17: GETL       	R28, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFEB3EE4:  7C9C2378  or r28,r4,r4
+	  22: GETL       	R4, t16
+	  23: PUTL       	t16, R28
+	  24: INCEIPL       	$4
+
+	0xFEB3EE8:  93A10014  stw r29,20(r1)
+	  25: GETL       	R29, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFEB3EEC:  7C7D1B78  or r29,r3,r3
+	  30: GETL       	R3, t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0xFEB3EF0:  90010024  stw r0,36(r1)
+	  33: GETL       	R0, t24
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x24, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0xFEB3EF4:  93C10018  stw r30,24(r1)
+	  38: GETL       	R30, t28
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x18, t30
+	  41: STL       	t28, (t30)
+	  42: INCEIPL       	$4
+
+	0xFEB3EF8:  4BFFFE35  bl 0xFEB3D2C
+	  43: MOVL       	$0xFEB3EFC, t32
+	  44: PUTL       	t32, LR
+	  45: JMPo-c       	$0xFEB3D2C  ($4)
+
+
+. 0 FEB3ED0 44
+. 7C 08 02 A6 94 21 FF E0 93 E1 00 1C 7C BF 2B 78 93 81 00 10 7C 9C 23 78 93 A1 00 14 7C 7D 1B 78 90 01 00 24 93 C1 00 18 4B FF FE 35
+
+==== BB 1330 __new_exitfn(0xFEB3D2C) approx BBs exec'd 0 ====
+
+	0xFEB3D2C:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEB3D30:  7C6802A6  mflr r3
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0xFEB3D34:  4811411D  bl 0xFFC7E50
+	   9: MOVL       	$0xFEB3D38, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FEB3D2C 12
+. 94 21 FF E0 7C 68 02 A6 48 11 41 1D
+
+==== BB 1331 (0xFEB3D38) approx BBs exec'd 0 ====
+
+	0xFEB3D38:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEB3D3C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEB3D40:  93810010  stw r28,16(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEB3D44:  93A10014  stw r29,20(r1)
+	  13: GETL       	R29, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEB3D48:  38000001  li r0,1
+	  18: MOVL       	$0x1, t14
+	  19: PUTL       	t14, R0
+	  20: INCEIPL       	$4
+
+	0xFEB3D4C:  9361000C  stw r27,12(r1)
+	  21: GETL       	R27, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEB3D50:  3BA00000  li r29,0
+	  26: MOVL       	$0x0, t20
+	  27: PUTL       	t20, R29
+	  28: INCEIPL       	$4
+
+	0xFEB3D54:  93E1001C  stw r31,28(r1)
+	  29: GETL       	R31, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1C, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFEB3D58:  90610024  stw r3,36(r1)
+	  34: GETL       	R3, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x24, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFEB3D5C:  839E02B4  lwz r28,692(r30)
+	  39: GETL       	R30, t30
+	  40: ADDL       	$0x2B4, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R28
+	  43: INCEIPL       	$4
+
+	0xFEB3D60:  7D20E028  lwarx r9,r0,r28
+	  44: GETL       	R28, t34
+	  45: LOCKo       	
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R9
+	  48: INCEIPL       	$4
+
+	0xFEB3D64:  7C09E800  cmp cr0,r9,r29
+	  49: GETL       	R9, t38
+	  50: GETL       	R29, t40
+	  51: CMPL       	t38, t40, t42  (-rSo)
+	  52: ICRFL       	t42, $0x0, CR
+	  53: INCEIPL       	$4
+
+	0xFEB3D68:  4082000C  bc 4,2,0xFEB3D74
+	  54: Jc02o       	$0xFEB3D74
+
+
+. 0 FEB3D38 52
+. 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 A1 00 14 38 00 00 01 93 61 00 0C 3B A0 00 00 93 E1 00 1C 90 61 00 24 83 9E 02 B4 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+
+==== BB 1332 (0xFEB3D6C) approx BBs exec'd 0 ====
+
+	0xFEB3D6C:  7C00E12D  stwcx. r0,r0,r28
+	   0: GETL       	R28, t0
+	   1: GETL       	R0, t2
+	   2: LOCKo       	
+	   3: STL       	t2, (t0)  (-rSo)
+	   4: ICRFL       	cr, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEB3D70:  40A2FFF0  bc 5,2,0xFEB3D60
+	   6: Jc02o       	$0xFEB3D60
+
+
+. 0 FEB3D6C 8
+. 7C 00 E1 2D 40 A2 FF F0
+
+==== BB 1333 (0xFEB3D60) approx BBs exec'd 0 ====
+
+	0xFEB3D60:  7D20E028  lwarx r9,r0,r28
+	   0: GETL       	R28, t0
+	   1: LOCKo       	
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEB3D64:  7C09E800  cmp cr0,r9,r29
+	   5: GETL       	R9, t4
+	   6: GETL       	R29, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEB3D68:  4082000C  bc 4,2,0xFEB3D74
+	  10: Jc02o       	$0xFEB3D74
+
+
+. 0 FEB3D60 12
+. 7D 20 E0 28 7C 09 E8 00 40 82 00 0C
+
+==== BB 1334 (0xFEB3D74) approx BBs exec'd 0 ====
+
+	0xFEB3D74:  4C00012C  	   0: INCEIPL       	$4
+
+	0xFEB3D78:  2F890000  cmpi cr7,r9,0
+	   1: GETL       	R9, t0
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEB3D7C:  409E0134  bc 4,30,0xFEB3EB0
+	   5: Jc30o       	$0xFEB3EB0
+
+
+. 0 FEB3D74 12
+. 4C 00 01 2C 2F 89 00 00 40 9E 01 34
+
+==== BB 1335 (0xFEB3D80) approx BBs exec'd 0 ====
+
+	0xFEB3D80:  837E1AFC  lwz r27,6908(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1AFC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0xFEB3D84:  83FB0000  lwz r31,0(r27)
+	   5: GETL       	R27, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R31
+	   8: INCEIPL       	$4
+
+	0xFEB3D88:  2F1F0000  cmpi cr6,r31,0
+	   9: GETL       	R31, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x6, CR
+	  12: INCEIPL       	$4
+
+	0xFEB3D8C:  419A0050  bc 12,26,0xFEB3DDC
+	  13: Js26o       	$0xFEB3DDC
+
+
+. 0 FEB3D80 16
+. 83 7E 1A FC 83 FB 00 00 2F 1F 00 00 41 9A 00 50
+
+==== BB 1336 (0xFEB3D90) approx BBs exec'd 0 ====
+
+	0xFEB3D90:  817F0004  lwz r11,4(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFEB3D94:  3BA00000  li r29,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0xFEB3D98:  280B0000  cmpli cr0,r11,0
+	   8: GETL       	R11, t6
+	   9: MOVL       	$0x0, t10
+	  10: CMPUL       	t6, t10, t8  (-rSo)
+	  11: ICRFL       	t8, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFEB3D9C:  40810024  bc 4,1,0xFEB3DC0
+	  13: Jc01o       	$0xFEB3DC0
+
+
+. 0 FEB3D90 16
+. 81 7F 00 04 3B A0 00 00 28 0B 00 00 40 81 00 24
+
+==== BB 1337 (0xFEB3DC0) approx BBs exec'd 0 ====
+
+	0xFEB3DC0:  7C0BE840  cmpl cr0,r11,r29
+	   0: GETL       	R11, t0
+	   1: GETL       	R29, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0xFEB3DC4:  41810104  bc 12,1,0xFEB3EC8
+	   5: Js01o       	$0xFEB3EC8
+
+
+. 0 FEB3DC0 8
+. 7C 0B E8 40 41 81 01 04
+
+==== BB 1338 (0xFEB3DC8) approx BBs exec'd 0 ====
+
+	0xFEB3DC8:  288B001F  cmpli cr1,r11,31
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x1F, t4
+	   2: CMPUL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEB3DCC:  408500F0  bc 4,5,0xFEB3EBC
+	   5: Jc05o       	$0xFEB3EBC
+
+
+. 0 FEB3DC8 8
+. 28 8B 00 1F 40 85 00 F0
+
+==== BB 1339 (0xFEB3EBC) approx BBs exec'd 0 ====
+
+	0xFEB3EBC:  392B0001  addi r9,r11,1
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFEB3EC0:  7D7D5B78  or r29,r11,r11
+	   4: GETL       	R11, t2
+	   5: PUTL       	t2, R29
+	   6: INCEIPL       	$4
+
+	0xFEB3EC4:  913F0004  stw r9,4(r31)
+	   7: GETL       	R9, t4
+	   8: GETL       	R31, t6
+	   9: ADDL       	$0x4, t6
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0xFEB3EC8:  40BAFF40  bc 5,26,0xFEB3E08
+	  12: Jc26o       	$0xFEB3E08
+
+
+. 0 FEB3EBC 16
+. 39 2B 00 01 7D 7D 5B 78 91 3F 00 04 40 BA FF 40
+
+==== BB 1340 (0xFEB3E08) approx BBs exec'd 0 ====
+
+	0xFEB3E08:  57AA2036  rlwinm r10,r29,4,0,27
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R10
+	   3: INCEIPL       	$4
+
+	0xFEB3E0C:  39000001  li r8,1
+	   4: MOVL       	$0x1, t2
+	   5: PUTL       	t2, R8
+	   6: INCEIPL       	$4
+
+	0xFEB3E10:  7CEAFA14  add r7,r10,r31
+	   7: GETL       	R10, t4
+	   8: GETL       	R31, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0xFEB3E14:  91070008  stw r8,8(r7)
+	  12: GETL       	R8, t8
+	  13: GETL       	R7, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFEB3E18:  39800000  li r12,0
+	  17: MOVL       	$0x0, t12
+	  18: PUTL       	t12, R12
+	  19: INCEIPL       	$4
+
+	0xFEB3E1C:  7C0004AC  sync
+	  20: INCEIPL       	$4
+
+	0xFEB3E20:  7D60E028  lwarx r11,r0,r28
+	  21: GETL       	R28, t14
+	  22: LOCKo       	
+	  23: LDL       	(t14), t16
+	  24: PUTL       	t16, R11
+	  25: INCEIPL       	$4
+
+	0xFEB3E24:  7D80E12D  stwcx. r12,r0,r28
+	  26: GETL       	R28, t18
+	  27: GETL       	R12, t20
+	  28: LOCKo       	
+	  29: STL       	t20, (t18)  (-rSo)
+	  30: ICRFL       	cr, $0x0, CR
+	  31: INCEIPL       	$4
+
+	0xFEB3E28:  40A2FFF8  bc 5,2,0xFEB3E20
+	  32: Jc02o       	$0xFEB3E20
+
+
+. 0 FEB3E08 36
+. 57 AA 20 36 39 00 00 01 7C EA FA 14 91 07 00 08 39 80 00 00 7C 00 04 AC 7D 60 E0 28 7D 80 E1 2D 40 A2 FF F8
+
+==== BB 1341 (0xFEB3E2C) approx BBs exec'd 0 ====
+
+	0xFEB3E2C:  2F8B0001  cmpi cr7,r11,1
+	   0: GETL       	R11, t0
+	   1: MOVL       	$0x1, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEB3E30:  419D0060  bc 12,29,0xFEB3E90
+	   5: Js29o       	$0xFEB3E90
+
+
+. 0 FEB3E2C 8
+. 2F 8B 00 01 41 9D 00 60
+
+==== BB 1342 (0xFEB3E34) approx BBs exec'd 0 ====
+
+	0xFEB3E34:  419A0034  bc 12,26,0xFEB3E68
+	   0: Js26o       	$0xFEB3E68
+
+
+. 0 FEB3E34 4
+. 41 9A 00 34
+
+==== BB 1343 (0xFEB3E38) approx BBs exec'd 0 ====
+
+	0xFEB3E38:  57BC2036  rlwinm r28,r29,4,0,27
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x4, t0
+	   2: PUTL       	t0, R28
+	   3: INCEIPL       	$4
+
+	0xFEB3E3C:  83A10024  lwz r29,36(r1)
+	   4: GETL       	R1, t2
+	   5: ADDL       	$0x24, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFEB3E40:  7F7CFA14  add r27,r28,r31
+	   9: GETL       	R28, t6
+	  10: GETL       	R31, t8
+	  11: ADDL       	t6, t8
+	  12: PUTL       	t8, R27
+	  13: INCEIPL       	$4
+
+	0xFEB3E44:  83C10018  lwz r30,24(r1)
+	  14: GETL       	R1, t10
+	  15: ADDL       	$0x18, t10
+	  16: LDL       	(t10), t12
+	  17: PUTL       	t12, R30
+	  18: INCEIPL       	$4
+
+	0xFEB3E48:  387B0008  addi r3,r27,8
+	  19: GETL       	R27, t14
+	  20: ADDL       	$0x8, t14
+	  21: PUTL       	t14, R3
+	  22: INCEIPL       	$4
+
+	0xFEB3E4C:  7FA803A6  mtlr r29
+	  23: GETL       	R29, t16
+	  24: PUTL       	t16, LR
+	  25: INCEIPL       	$4
+
+	0xFEB3E50:  8361000C  lwz r27,12(r1)
+	  26: GETL       	R1, t18
+	  27: ADDL       	$0xC, t18
+	  28: LDL       	(t18), t20
+	  29: PUTL       	t20, R27
+	  30: INCEIPL       	$4
+
+	0xFEB3E54:  83810010  lwz r28,16(r1)
+	  31: GETL       	R1, t22
+	  32: ADDL       	$0x10, t22
+	  33: LDL       	(t22), t24
+	  34: PUTL       	t24, R28
+	  35: INCEIPL       	$4
+
+	0xFEB3E58:  83A10014  lwz r29,20(r1)
+	  36: GETL       	R1, t26
+	  37: ADDL       	$0x14, t26
+	  38: LDL       	(t26), t28
+	  39: PUTL       	t28, R29
+	  40: INCEIPL       	$4
+
+	0xFEB3E5C:  83E1001C  lwz r31,28(r1)
+	  41: GETL       	R1, t30
+	  42: ADDL       	$0x1C, t30
+	  43: LDL       	(t30), t32
+	  44: PUTL       	t32, R31
+	  45: INCEIPL       	$4
+
+	0xFEB3E60:  38210020  addi r1,r1,32
+	  46: GETL       	R1, t34
+	  47: ADDL       	$0x20, t34
+	  48: PUTL       	t34, R1
+	  49: INCEIPL       	$4
+
+	0xFEB3E64:  4E800020  blr
+	  50: GETL       	LR, t36
+	  51: JMPo-r       	t36  ($4)
+
+
+. 0 FEB3E38 48
+. 57 BC 20 36 83 A1 00 24 7F 7C FA 14 83 C1 00 18 38 7B 00 08 7F A8 03 A6 83 61 00 0C 83 81 00 10 83 A1 00 14 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1344 (0xFEB3EFC) approx BBs exec'd 0 ====
+
+	0xFEB3EFC:  2C030000  cmpi cr0,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEB3F00:  3800FFFF  li r0,-1
+	   4: MOVL       	$0xFFFFFFFF, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFEB3F04:  4182001C  bc 12,2,0xFEB3F20
+	   7: Js02o       	$0xFEB3F20
+
+
+. 0 FEB3EFC 12
+. 2C 03 00 00 38 00 FF FF 41 82 00 1C
+
+==== BB 1345 (0xFEB3F08) approx BBs exec'd 0 ====
+
+	0xFEB3F08:  38800004  li r4,4
+	   0: MOVL       	$0x4, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEB3F0C:  93E3000C  stw r31,12(r3)
+	   3: GETL       	R31, t2
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0xC, t4
+	   6: STL       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0xFEB3F10:  90830000  stw r4,0(r3)
+	   8: GETL       	R4, t6
+	   9: GETL       	R3, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFEB3F14:  38000000  li r0,0
+	  12: MOVL       	$0x0, t10
+	  13: PUTL       	t10, R0
+	  14: INCEIPL       	$4
+
+	0xFEB3F18:  93A30004  stw r29,4(r3)
+	  15: GETL       	R29, t12
+	  16: GETL       	R3, t14
+	  17: ADDL       	$0x4, t14
+	  18: STL       	t12, (t14)
+	  19: INCEIPL       	$4
+
+	0xFEB3F1C:  93830008  stw r28,8(r3)
+	  20: GETL       	R28, t16
+	  21: GETL       	R3, t18
+	  22: ADDL       	$0x8, t18
+	  23: STL       	t16, (t18)
+	  24: INCEIPL       	$4
+
+	0xFEB3F20:  80A10024  lwz r5,36(r1)
+	  25: GETL       	R1, t20
+	  26: ADDL       	$0x24, t20
+	  27: LDL       	(t20), t22
+	  28: PUTL       	t22, R5
+	  29: INCEIPL       	$4
+
+	0xFEB3F24:  7C030378  or r3,r0,r0
+	  30: GETL       	R0, t24
+	  31: PUTL       	t24, R3
+	  32: INCEIPL       	$4
+
+	0xFEB3F28:  83810010  lwz r28,16(r1)
+	  33: GETL       	R1, t26
+	  34: ADDL       	$0x10, t26
+	  35: LDL       	(t26), t28
+	  36: PUTL       	t28, R28
+	  37: INCEIPL       	$4
+
+	0xFEB3F2C:  83A10014  lwz r29,20(r1)
+	  38: GETL       	R1, t30
+	  39: ADDL       	$0x14, t30
+	  40: LDL       	(t30), t32
+	  41: PUTL       	t32, R29
+	  42: INCEIPL       	$4
+
+	0xFEB3F30:  7CA803A6  mtlr r5
+	  43: GETL       	R5, t34
+	  44: PUTL       	t34, LR
+	  45: INCEIPL       	$4
+
+	0xFEB3F34:  83C10018  lwz r30,24(r1)
+	  46: GETL       	R1, t36
+	  47: ADDL       	$0x18, t36
+	  48: LDL       	(t36), t38
+	  49: PUTL       	t38, R30
+	  50: INCEIPL       	$4
+
+	0xFEB3F38:  83E1001C  lwz r31,28(r1)
+	  51: GETL       	R1, t40
+	  52: ADDL       	$0x1C, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0xFEB3F3C:  38210020  addi r1,r1,32
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x20, t44
+	  58: PUTL       	t44, R1
+	  59: INCEIPL       	$4
+
+	0xFEB3F40:  4E800020  blr
+	  60: GETL       	LR, t46
+	  61: JMPo-r       	t46  ($4)
+
+
+. 0 FEB3F08 60
+. 38 80 00 04 93 E3 00 0C 90 83 00 00 38 00 00 00 93 A3 00 04 93 83 00 08 80 A1 00 24 7C 03 03 78 83 81 00 10 83 A1 00 14 7C A8 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1346 (0xFE9B7A4) approx BBs exec'd 0 ====
+
+	0xFE9B7A4:  2F1F0000  cmpi cr6,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFE9B7A8:  419A0014  bc 12,26,0xFE9B7BC
+	   4: Js26o       	$0xFE9B7BC
+
+
+. 0 FE9B7A4 8
+. 2F 1F 00 00 41 9A 00 14
+
+==== BB 1347 (0xFE9B7AC) approx BBs exec'd 0 ====
+
+	0xFE9B7AC:  7FE3FB78  or r3,r31,r31
+	   0: GETL       	R31, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFE9B7B0:  38800000  li r4,0
+	   3: MOVL       	$0x0, t2
+	   4: PUTL       	t2, R4
+	   5: INCEIPL       	$4
+
+	0xFE9B7B4:  38A00000  li r5,0
+	   6: MOVL       	$0x0, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0xFE9B7B8:  48018719  bl 0xFEB3ED0
+	   9: MOVL       	$0xFE9B7BC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFEB3ED0  ($4)
+
+
+. 0 FE9B7AC 16
+. 7F E3 FB 78 38 80 00 00 38 A0 00 00 48 01 87 19
+
+==== BB 1348 (0xFEB3DA0) approx BBs exec'd 0 ====
+
+	0xFEB3DA0:  393F0008  addi r9,r31,8
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x8, t0
+	   2: PUTL       	t0, R9
+	   3: INCEIPL       	$4
+
+	0xFEB3DA4:  80890000  lwz r4,0(r9)
+	   4: GETL       	R9, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFEB3DA8:  39290010  addi r9,r9,16
+	   8: GETL       	R9, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0xFEB3DAC:  2C840000  cmpi cr1,r4,0
+	  12: GETL       	R4, t8
+	  13: CMP0L       	t8, t10  (-rSo)
+	  14: ICRFL       	t10, $0x1, CR
+	  15: INCEIPL       	$4
+
+	0xFEB3DB0:  41860010  bc 12,6,0xFEB3DC0
+	  16: Js06o       	$0xFEB3DC0
+
+
+. 0 FEB3DA0 20
+. 39 3F 00 08 80 89 00 00 39 29 00 10 2C 84 00 00 41 86 00 10
+
+==== BB 1349 (0xFEB3DB4) approx BBs exec'd 0 ====
+
+	0xFEB3DB4:  3BBD0001  addi r29,r29,1
+	   0: GETL       	R29, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R29
+	   3: INCEIPL       	$4
+
+	0xFEB3DB8:  7F8BE840  cmpl cr7,r11,r29
+	   4: GETL       	R11, t2
+	   5: GETL       	R29, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFEB3DBC:  419DFFE8  bc 12,29,0xFEB3DA4
+	   9: Js29o       	$0xFEB3DA4
+
+
+. 0 FEB3DB4 12
+. 3B BD 00 01 7F 8B E8 40 41 9D FF E8
+
+==== BB 1350 (0xFE9B7BC) approx BBs exec'd 0 ====
+
+	0xFE9B7BC:  83FE1BC8  lwz r31,7112(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BC8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFE9B7C0:  807F0000  lwz r3,0(r31)
+	   5: GETL       	R31, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFE9B7C4:  70690002  andi. r9,r3,0x2
+	   9: GETL       	R3, t8
+	  10: ANDL       	$0x2, t8
+	  11: PUTL       	t8, R9
+	  12: CMP0L       	t8, t10  (-rSo)
+	  13: ICRFL       	t10, $0x0, CR
+	  14: INCEIPL       	$4
+
+	0xFE9B7C8:  7FA00026  mfcr r29
+	  15: GETL       	CR, t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFE9B7CC:  40820080  bc 4,2,0xFE9B84C
+	  18: Jc02o       	$0xFE9B84C
+
+
+. 0 FE9B7BC 20
+. 83 FE 1B C8 80 7F 00 00 70 69 00 02 7F A0 00 26 40 82 00 80
+
+==== BB 1351 (0xFE9B7D0) approx BBs exec'd 0 ====
+
+	0xFE9B7D0:  2F9C0000  cmpi cr7,r28,0
+	   0: GETL       	R28, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFE9B7D4:  419E001C  bc 12,30,0xFE9B7F0
+	   4: Js30o       	$0xFE9B7F0
+
+
+. 0 FE9B7D0 8
+. 2F 9C 00 00 41 9E 00 1C
+
+==== BB 1352 (0xFE9B7D8) approx BBs exec'd 0 ====
+
+	0xFE9B7D8:  80BB0000  lwz r5,0(r27)
+	   0: GETL       	R27, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R5
+	   3: INCEIPL       	$4
+
+	0xFE9B7DC:  7F8803A6  mtlr r28
+	   4: GETL       	R28, t4
+	   5: PUTL       	t4, LR
+	   6: INCEIPL       	$4
+
+	0xFE9B7E0:  806101F4  lwz r3,500(r1)
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0x1F4, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R3
+	  11: INCEIPL       	$4
+
+	0xFE9B7E4:  808101F8  lwz r4,504(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x1F8, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R4
+	  16: INCEIPL       	$4
+
+	0xFE9B7E8:  80C101FC  lwz r6,508(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x1FC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R6
+	  21: INCEIPL       	$4
+
+	0xFE9B7EC:  4E800021  blrl
+	  22: GETL       	LR, t18
+	  23: MOVL       	$0xFE9B7F0, t20
+	  24: PUTL       	t20, LR
+	  25: JMPo-r       	t18  ($4)
+
+
+. 0 FE9B7D8 24
+. 80 BB 00 00 7F 88 03 A6 80 61 01 F4 80 81 01 F8 80 C1 01 FC 4E 80 00 21
+
+==== BB 1353 __libc_csu_init(0x100003FC) approx BBs exec'd 0 ====
+
+	0x100003FC:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10000400:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x10000404:  429F0005  bcl 20,31,0x10000408
+	   9: MOVL       	$0x10000408, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10000408:  93C10018  stw r30,24(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x1000040C:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0x10000410:  93810010  stw r28,16(r1)
+	  20: GETL       	R28, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x10, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x10000414:  93E1001C  stw r31,28(r1)
+	  25: GETL       	R31, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x10000418:  93A10014  stw r29,20(r1)
+	  30: GETL       	R29, t22
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x14, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0x1000041C:  809EFFF0  lwz r4,-16(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFFFFF0, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x10000420:  90A10024  stw r5,36(r1)
+	  40: GETL       	R5, t30
+	  41: GETL       	R1, t32
+	  42: ADDL       	$0x24, t32
+	  43: STL       	t30, (t32)
+	  44: INCEIPL       	$4
+
+	0x10000424:  7FC4F214  add r30,r4,r30
+	  45: GETL       	R4, t34
+	  46: GETL       	R30, t36
+	  47: ADDL       	t34, t36
+	  48: PUTL       	t36, R30
+	  49: INCEIPL       	$4
+
+	0x10000428:  4BFFFE51  bl 0x10000278
+	  50: MOVL       	$0x1000042C, t38
+	  51: PUTL       	t38, LR
+	  52: JMPo-c       	$0x10000278  ($4)
+
+
+. 0 100003FC 48
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 81 00 10 93 E1 00 1C 93 A1 00 14 80 9E FF F0 90 A1 00 24 7F C4 F2 14 4B FF FE 51
+
+==== BB 1354 (0x10000278) approx BBs exec'd 0 ====
+
+	0x10000278:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x1000027C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10000280:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x10000284:  48000041  bl 0x100002C4
+	  14: MOVL       	$0x10000288, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x100002C4  ($4)
+
+
+. 0 10000278 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 48 00 00 41
+
+==== BB 1355 (0x100002C4) approx BBs exec'd 0 ====
+
+	0x100002C4:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x100002C8:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x100002CC:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x100002D0:  90010014  stw r0,20(r1)
+	  14: GETL       	R0, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x14, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0x100002D4:  480105B5  bl 0x10010888
+	  19: MOVL       	$0x100002D8, t14
+	  20: PUTL       	t14, LR
+	  21: JMPo-c       	$0x10010888  ($4)
+
+
+. 0 100002C4 20
+. 94 21 FF F0 7C 08 02 A6 93 C1 00 08 90 01 00 14 48 01 05 B5
+
+==== BB 1356 (0x10010888) approx BBs exec'd 0 ====
+
+	0x10010888:  4E800021  blrl
+	   0: GETL       	LR, t0
+	   1: MOVL       	$0x1001088C, t2
+	   2: PUTL       	t2, LR
+	   3: JMPo-r       	t0  ($4)
+
+
+. 0 10010888 4
+. 4E 80 00 21
+
+==== BB 1357 (0x100002D8) approx BBs exec'd 0 ====
+
+	0x100002D8:  7FC802A6  mflr r30
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R30
+	   2: INCEIPL       	$4
+
+	0x100002DC:  801E000C  lwz r0,12(r30)
+	   3: GETL       	R30, t2
+	   4: ADDL       	$0xC, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0x100002E0:  2F800000  cmpi cr7,r0,0
+	   8: GETL       	R0, t6
+	   9: CMP0L       	t6, t8  (-rSo)
+	  10: ICRFL       	t8, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0x100002E4:  419E000C  bc 12,30,0x100002F0
+	  12: Js30o       	$0x100002F0
+
+
+. 0 100002D8 16
+. 7F C8 02 A6 80 1E 00 0C 2F 80 00 00 41 9E 00 0C
+
+==== BB 1358 (0x100002F0) approx BBs exec'd 0 ====
+
+	0x100002F0:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x100002F4:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0x100002F8:  83C10008  lwz r30,8(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R30
+	  12: INCEIPL       	$4
+
+	0x100002FC:  38210010  addi r1,r1,16
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: PUTL       	t10, R1
+	  16: INCEIPL       	$4
+
+	0x10000300:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+. 0 100002F0 20
+. 80 01 00 14 7C 08 03 A6 83 C1 00 08 38 21 00 10 4E 80 00 20
+
+==== BB 1359 (0x10000288) approx BBs exec'd 0 ====
+
+	0x10000288:  48000105  bl 0x1000038C
+	   0: MOVL       	$0x1000028C, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x1000038C  ($4)
+
+
+. 0 10000288 4
+. 48 00 01 05
+
+==== BB 1360 (0x1000038C) approx BBs exec'd 0 ====
+
+	0x1000038C:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10000390:  3D601001  lis r11,4097
+	   3: MOVL       	$0x10010000, t2
+	   4: PUTL       	t2, R11
+	   5: INCEIPL       	$4
+
+	0x10000394:  9421FFF0  stwu r1,-16(r1)
+	   6: GETL       	R1, t4
+	   7: GETL       	R1, t6
+	   8: ADDL       	$0xFFFFFFF0, t6
+	   9: PUTL       	t6, R1
+	  10: STL       	t4, (t6)
+	  11: INCEIPL       	$4
+
+	0x10000398:  3D200000  lis r9,0
+	  12: MOVL       	$0x0, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x1000039C:  39290000  addi r9,r9,0
+	  15: MOVL       	$0x0, t10
+	  16: PUTL       	t10, R9
+	  17: INCEIPL       	$4
+
+	0x100003A0:  386B07A4  addi r3,r11,1956
+	  18: MOVL       	$0x100107A4, t12
+	  19: PUTL       	t12, R3
+	  20: INCEIPL       	$4
+
+	0x100003A4:  90010014  stw r0,20(r1)
+	  21: GETL       	R0, t14
+	  22: GETL       	R1, t16
+	  23: ADDL       	$0x14, t16
+	  24: STL       	t14, (t16)
+	  25: INCEIPL       	$4
+
+	0x100003A8:  800B07A4  lwz r0,1956(r11)
+	  26: MOVL       	$0x100107A4, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R0
+	  29: INCEIPL       	$4
+
+	0x100003AC:  2F800000  cmpi cr7,r0,0
+	  30: GETL       	R0, t22
+	  31: CMP0L       	t22, t24  (-rSo)
+	  32: ICRFL       	t24, $0x7, CR
+	  33: INCEIPL       	$4
+
+	0x100003B0:  419E0014  bc 12,30,0x100003C4
+	  34: Js30o       	$0x100003C4
+
+
+. 0 1000038C 40
+. 7C 08 02 A6 3D 60 10 01 94 21 FF F0 3D 20 00 00 39 29 00 00 38 6B 07 A4 90 01 00 14 80 0B 07 A4 2F 80 00 00 41 9E 00 14
+
+==== BB 1361 (0x100003C4) approx BBs exec'd 0 ====
+
+	0x100003C4:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x100003C8:  38210010  addi r1,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R1
+	   8: INCEIPL       	$4
+
+	0x100003CC:  7C0803A6  mtlr r0
+	   9: GETL       	R0, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x100003D0:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+. 0 100003C4 16
+. 80 01 00 14 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+==== BB 1362 (0x1000028C) approx BBs exec'd 0 ====
+
+	0x1000028C:  48000469  bl 0x100006F4
+	   0: MOVL       	$0x10000290, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x100006F4  ($4)
+
+
+. 0 1000028C 4
+. 48 00 04 69
+
+==== BB 1363 (0x100006F4) approx BBs exec'd 0 ====
+
+	0x100006F4:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x100006F8:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x100006FC:  3D201001  lis r9,4097
+	   9: MOVL       	$0x10010000, t6
+	  10: PUTL       	t6, R9
+	  11: INCEIPL       	$4
+
+	0x10000700:  93E1000C  stw r31,12(r1)
+	  12: GETL       	R31, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0xC, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x10000704:  39290798  addi r9,r9,1944
+	  17: MOVL       	$0x10010798, t12
+	  18: PUTL       	t12, R9
+	  19: INCEIPL       	$4
+
+	0x10000708:  90010014  stw r0,20(r1)
+	  20: GETL       	R0, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x14, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x1000070C:  3BE9FFFC  addi r31,r9,-4
+	  25: MOVL       	$0x10010794, t18
+	  26: PUTL       	t18, R31
+	  27: INCEIPL       	$4
+
+	0x10000710:  8009FFFC  lwz r0,-4(r9)
+	  28: MOVL       	$0x10010794, t20
+	  29: LDL       	(t20), t22
+	  30: PUTL       	t22, R0
+	  31: INCEIPL       	$4
+
+	0x10000714:  48000010  b 0x10000724
+	  32: JMPo       	$0x10000724  ($4)
+
+
+. 0 100006F4 36
+. 7C 08 02 A6 94 21 FF F0 3D 20 10 01 93 E1 00 0C 39 29 07 98 90 01 00 14 3B E9 FF FC 80 09 FF FC 48 00 00 10
+
+==== BB 1364 (0x10000724) approx BBs exec'd 0 ====
+
+	0x10000724:  2F80FFFF  cmpi cr7,r0,-1
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0xFFFFFFFF, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0x10000728:  409EFFF0  bc 4,30,0x10000718
+	   5: Jc30o       	$0x10000718
+
+
+. 0 10000724 8
+. 2F 80 FF FF 40 9E FF F0
+
+==== BB 1365 (0x1000072C) approx BBs exec'd 0 ====
+
+	0x1000072C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10000730:  83E1000C  lwz r31,12(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0xC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0x10000734:  38210010  addi r1,r1,16
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x10, t8
+	  12: PUTL       	t8, R1
+	  13: INCEIPL       	$4
+
+	0x10000738:  7C0803A6  mtlr r0
+	  14: GETL       	R0, t10
+	  15: PUTL       	t10, LR
+	  16: INCEIPL       	$4
+
+	0x1000073C:  4E800020  blr
+	  17: GETL       	LR, t12
+	  18: JMPo-r       	t12  ($4)
+
+
+. 0 1000072C 20
+. 80 01 00 14 83 E1 00 0C 38 21 00 10 7C 08 03 A6 4E 80 00 20
+
+==== BB 1366 (0x10000290) approx BBs exec'd 0 ====
+
+	0x10000290:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10000294:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0x10000298:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0x1000029C:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+. 0 10000290 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1367 (0x1000042C) approx BBs exec'd 0 ====
+
+	0x1000042C:  839E8004  lwz r28,-32764(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8004, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x10000430:  807E8000  lwz r3,-32768(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0xFFFF8000, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x10000434:  3BE00000  li r31,0
+	  10: MOVL       	$0x0, t8
+	  11: PUTL       	t8, R31
+	  12: INCEIPL       	$4
+
+	0x10000438:  7C1C1850  subf r0,r28,r3
+	  13: GETL       	R28, t10
+	  14: GETL       	R3, t12
+	  15: SUBL       	t10, t12
+	  16: PUTL       	t12, R0
+	  17: INCEIPL       	$4
+
+	0x1000043C:  7C001670  srawi r0,r0,2
+	  18: GETL       	R0, t14
+	  19: SARL       	$0x2, t14  (-wCa)
+	  20: PUTL       	t14, R0
+	  21: INCEIPL       	$4
+
+	0x10000440:  7F9F0040  cmpl cr7,r31,r0
+	  22: GETL       	R31, t16
+	  23: GETL       	R0, t18
+	  24: CMPUL       	t16, t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x7, CR
+	  26: INCEIPL       	$4
+
+	0x10000444:  409C0024  bc 4,28,0x10000468
+	  27: Jc28o       	$0x10000468
+
+
+. 0 1000042C 28
+. 83 9E 80 04 80 7E 80 00 3B E0 00 00 7C 1C 18 50 7C 00 16 70 7F 9F 00 40 40 9C 00 24
+
+==== BB 1368 (0x10000468) approx BBs exec'd 0 ====
+
+	0x10000468:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x1000046C:  83810010  lwz r28,16(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x10000470:  83A10014  lwz r29,20(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R29
+	  14: INCEIPL       	$4
+
+	0x10000474:  7D0803A6  mtlr r8
+	  15: GETL       	R8, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x10000478:  83C10018  lwz r30,24(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x18, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R30
+	  22: INCEIPL       	$4
+
+	0x1000047C:  83E1001C  lwz r31,28(r1)
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x1C, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R31
+	  27: INCEIPL       	$4
+
+	0x10000480:  38210020  addi r1,r1,32
+	  28: GETL       	R1, t22
+	  29: ADDL       	$0x20, t22
+	  30: PUTL       	t22, R1
+	  31: INCEIPL       	$4
+
+	0x10000484:  4E800020  blr
+	  32: GETL       	LR, t24
+	  33: JMPo-r       	t24  ($4)
+
+
+. 0 10000468 32
+. 81 01 00 24 83 81 00 10 83 A1 00 14 7D 08 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1369 (0xFE9B7F0) approx BBs exec'd 0 ====
+
+	0xFE9B7F0:  7FA80120  mtcrf 0x80,r29
+	   0: GETL       	R29, t0
+	   1: ICRFL       	t0, $0x0, CR
+	   2: INCEIPL       	$4
+
+	0xFE9B7F4:  408200B0  bc 4,2,0xFE9B8A4
+	   3: Jc02o       	$0xFE9B8A4
+
+
+. 0 FE9B7F0 8
+. 7F A8 01 20 40 82 00 B0
+
+==== BB 1370 (0xFE9B7F8) approx BBs exec'd 0 ====
+
+	0xFE9B7F8:  38610010  addi r3,r1,16
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x10, t0
+	   2: PUTL       	t0, R3
+	   3: INCEIPL       	$4
+
+	0xFE9B7FC:  48014F9D  bl 0xFEB0798
+	   4: MOVL       	$0xFE9B800, t2
+	   5: PUTL       	t2, LR
+	   6: JMPo-c       	$0xFEB0798  ($4)
+
+
+. 0 FE9B7F8 8
+. 38 61 00 10 48 01 4F 9D
+
+==== BB 1371 __GI__setjmp(0xFEB0798) approx BBs exec'd 0 ====
+
+	0xFEB0798:  38800000  li r4,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R4
+	   2: INCEIPL       	$4
+
+	0xFEB079C:  4BFFFC94  b 0xFEB0430
+	   3: JMPo       	$0xFEB0430  ($4)
+
+
+. 0 FEB0798 8
+. 38 80 00 00 4B FF FC 94
+
+==== BB 1372 __sigsetjmp@@GLIBC_2.3.4(0xFEB0430) approx BBs exec'd 0 ====
+
+	0xFEB0430:  90230000  stw r1,0(r3)
+	   0: GETL       	R1, t0
+	   1: GETL       	R3, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFEB0434:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFEB0438:  91C3000C  stw r14,12(r3)
+	   7: GETL       	R14, t6
+	   8: GETL       	R3, t8
+	   9: ADDL       	$0xC, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFEB043C:  D9C30058  stfd f14,88(r3)
+	  12: GETL       	R3, t10
+	  13: ADDL       	$0x58, t10
+	  14: FPU_WQ       	0x0:0xE, (t10)
+	  15: INCEIPL       	$4
+
+	0xFEB0440:  90030008  stw r0,8(r3)
+	  16: GETL       	R0, t12
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0x8, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEB0444:  91E30010  stw r15,16(r3)
+	  21: GETL       	R15, t16
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0x10, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEB0448:  D9E30060  stfd f15,96(r3)
+	  26: GETL       	R3, t20
+	  27: ADDL       	$0x60, t20
+	  28: FPU_WQ       	0x0:0xF, (t20)
+	  29: INCEIPL       	$4
+
+	0xFEB044C:  7C000026  mfcr r0
+	  30: GETL       	CR, t22
+	  31: PUTL       	t22, R0
+	  32: INCEIPL       	$4
+
+	0xFEB0450:  92030014  stw r16,20(r3)
+	  33: GETL       	R16, t24
+	  34: GETL       	R3, t26
+	  35: ADDL       	$0x14, t26
+	  36: STL       	t24, (t26)
+	  37: INCEIPL       	$4
+
+	0xFEB0454:  DA030068  stfd f16,104(r3)
+	  38: GETL       	R3, t28
+	  39: ADDL       	$0x68, t28
+	  40: FPU_WQ       	0x0:0x10, (t28)
+	  41: INCEIPL       	$4
+
+	0xFEB0458:  90030054  stw r0,84(r3)
+	  42: GETL       	R0, t30
+	  43: GETL       	R3, t32
+	  44: ADDL       	$0x54, t32
+	  45: STL       	t30, (t32)
+	  46: INCEIPL       	$4
+
+	0xFEB045C:  92230018  stw r17,24(r3)
+	  47: GETL       	R17, t34
+	  48: GETL       	R3, t36
+	  49: ADDL       	$0x18, t36
+	  50: STL       	t34, (t36)
+	  51: INCEIPL       	$4
+
+	0xFEB0460:  DA230070  stfd f17,112(r3)
+	  52: GETL       	R3, t38
+	  53: ADDL       	$0x70, t38
+	  54: FPU_WQ       	0x0:0x11, (t38)
+	  55: INCEIPL       	$4
+
+	0xFEB0464:  9243001C  stw r18,28(r3)
+	  56: GETL       	R18, t40
+	  57: GETL       	R3, t42
+	  58: ADDL       	$0x1C, t42
+	  59: STL       	t40, (t42)
+	  60: INCEIPL       	$4
+
+	0xFEB0468:  DA430078  stfd f18,120(r3)
+	  61: GETL       	R3, t44
+	  62: ADDL       	$0x78, t44
+	  63: FPU_WQ       	0x0:0x12, (t44)
+	  64: INCEIPL       	$4
+
+	0xFEB046C:  92630020  stw r19,32(r3)
+	  65: GETL       	R19, t46
+	  66: GETL       	R3, t48
+	  67: ADDL       	$0x20, t48
+	  68: STL       	t46, (t48)
+	  69: INCEIPL       	$4
+
+	0xFEB0470:  DA630080  stfd f19,128(r3)
+	  70: GETL       	R3, t50
+	  71: ADDL       	$0x80, t50
+	  72: FPU_WQ       	0x0:0x13, (t50)
+	  73: INCEIPL       	$4
+
+	0xFEB0474:  92830024  stw r20,36(r3)
+	  74: GETL       	R20, t52
+	  75: GETL       	R3, t54
+	  76: ADDL       	$0x24, t54
+	  77: STL       	t52, (t54)
+	  78: INCEIPL       	$4
+
+	0xFEB0478:  DA830088  stfd f20,136(r3)
+	  79: GETL       	R3, t56
+	  80: ADDL       	$0x88, t56
+	  81: FPU_WQ       	0x0:0x14, (t56)
+	  82: INCEIPL       	$4
+
+	0xFEB047C:  92A30028  stw r21,40(r3)
+	  83: GETL       	R21, t58
+	  84: GETL       	R3, t60
+	  85: ADDL       	$0x28, t60
+	  86: STL       	t58, (t60)
+	  87: INCEIPL       	$4
+
+	0xFEB0480:  DAA30090  stfd f21,144(r3)
+	  88: GETL       	R3, t62
+	  89: ADDL       	$0x90, t62
+	  90: FPU_WQ       	0x0:0x15, (t62)
+	  91: INCEIPL       	$4
+
+	0xFEB0484:  92C3002C  stw r22,44(r3)
+	  92: GETL       	R22, t64
+	  93: GETL       	R3, t66
+	  94: ADDL       	$0x2C, t66
+	  95: STL       	t64, (t66)
+	  96: INCEIPL       	$4
+
+	0xFEB0488:  DAC30098  stfd f22,152(r3)
+	  97: GETL       	R3, t68
+	  98: ADDL       	$0x98, t68
+	  99: FPU_WQ       	0x0:0x16, (t68)
+	 100: INCEIPL       	$4
+
+	0xFEB048C:  92E30030  stw r23,48(r3)
+	 101: GETL       	R23, t70
+	 102: GETL       	R3, t72
+	 103: ADDL       	$0x30, t72
+	 104: STL       	t70, (t72)
+	 105: INCEIPL       	$4
+
+	0xFEB0490:  DAE300A0  stfd f23,160(r3)
+	 106: GETL       	R3, t74
+	 107: ADDL       	$0xA0, t74
+	 108: FPU_WQ       	0x0:0x17, (t74)
+	 109: INCEIPL       	$4
+
+	0xFEB0494:  93030034  stw r24,52(r3)
+	 110: GETL       	R24, t76
+	 111: GETL       	R3, t78
+	 112: ADDL       	$0x34, t78
+	 113: STL       	t76, (t78)
+	 114: INCEIPL       	$4
+
+	0xFEB0498:  DB0300A8  stfd f24,168(r3)
+	 115: GETL       	R3, t80
+	 116: ADDL       	$0xA8, t80
+	 117: FPU_WQ       	0x0:0x18, (t80)
+	 118: INCEIPL       	$4
+
+	0xFEB049C:  93230038  stw r25,56(r3)
+	 119: GETL       	R25, t82
+	 120: GETL       	R3, t84
+	 121: ADDL       	$0x38, t84
+	 122: STL       	t82, (t84)
+	 123: INCEIPL       	$4
+
+	0xFEB04A0:  DB2300B0  stfd f25,176(r3)
+	 124: GETL       	R3, t86
+	 125: ADDL       	$0xB0, t86
+	 126: FPU_WQ       	0x0:0x19, (t86)
+	 127: INCEIPL       	$4
+
+	0xFEB04A4:  9343003C  stw r26,60(r3)
+	 128: GETL       	R26, t88
+	 129: GETL       	R3, t90
+	 130: ADDL       	$0x3C, t90
+	 131: STL       	t88, (t90)
+	 132: INCEIPL       	$4
+
+	0xFEB04A8:  DB4300B8  stfd f26,184(r3)
+	 133: GETL       	R3, t92
+	 134: ADDL       	$0xB8, t92
+	 135: FPU_WQ       	0x0:0x1A, (t92)
+	 136: INCEIPL       	$4
+
+	0xFEB04AC:  93630040  stw r27,64(r3)
+	 137: GETL       	R27, t94
+	 138: GETL       	R3, t96
+	 139: ADDL       	$0x40, t96
+	 140: STL       	t94, (t96)
+	 141: INCEIPL       	$4
+
+	0xFEB04B0:  DB6300C0  stfd f27,192(r3)
+	 142: GETL       	R3, t98
+	 143: ADDL       	$0xC0, t98
+	 144: FPU_WQ       	0x0:0x1B, (t98)
+	 145: INCEIPL       	$4
+
+	0xFEB04B4:  93830044  stw r28,68(r3)
+	 146: GETL       	R28, t100
+	 147: GETL       	R3, t102
+	 148: ADDL       	$0x44, t102
+	 149: STL       	t100, (t102)
+	 150: INCEIPL       	$4
+
+	0xFEB04B8:  DB8300C8  stfd f28,200(r3)
+	 151: GETL       	R3, t104
+	 152: ADDL       	$0xC8, t104
+	 153: FPU_WQ       	0x0:0x1C, (t104)
+	 154: INCEIPL       	$4
+
+	0xFEB04BC:  93A30048  stw r29,72(r3)
+	 155: GETL       	R29, t106
+	 156: GETL       	R3, t108
+	 157: ADDL       	$0x48, t108
+	 158: STL       	t106, (t108)
+	 159: INCEIPL       	$4
+
+	0xFEB04C0:  DBA300D0  stfd f29,208(r3)
+	 160: GETL       	R3, t110
+	 161: ADDL       	$0xD0, t110
+	 162: FPU_WQ       	0x0:0x1D, (t110)
+	 163: INCEIPL       	$4
+
+	0xFEB04C4:  93C3004C  stw r30,76(r3)
+	 164: GETL       	R30, t112
+	 165: GETL       	R3, t114
+	 166: ADDL       	$0x4C, t114
+	 167: STL       	t112, (t114)
+	 168: INCEIPL       	$4
+
+	0xFEB04C8:  DBC300D8  stfd f30,216(r3)
+	 169: GETL       	R3, t116
+	 170: ADDL       	$0xD8, t116
+	 171: FPU_WQ       	0x0:0x1E, (t116)
+	 172: INCEIPL       	$4
+
+	0xFEB04CC:  93E30050  stw r31,80(r3)
+	 173: GETL       	R31, t118
+	 174: GETL       	R3, t120
+	 175: ADDL       	$0x50, t120
+	 176: STL       	t118, (t120)
+	 177: INCEIPL       	$4
+
+	0xFEB04D0:  DBE300E0  stfd f31,224(r3)
+	 178: GETL       	R3, t122
+	 179: ADDL       	$0xE0, t122
+	 180: FPU_WQ       	0x0:0x1F, (t122)
+	 181: INCEIPL       	$4
+
+	0xFEB04D4:  7CC802A6  mflr r6
+	 182: GETL       	LR, t124
+	 183: PUTL       	t124, R6
+	 184: INCEIPL       	$4
+
+	0xFEB04D8:  48117979  bl 0xFFC7E50
+	 185: MOVL       	$0xFEB04DC, t126
+	 186: PUTL       	t126, LR
+	 187: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FEB0430 172
+. 90 23 00 00 7C 08 02 A6 91 C3 00 0C D9 C3 00 58 90 03 00 08 91 E3 00 10 D9 E3 00 60 7C 00 00 26 92 03 00 14 DA 03 00 68 90 03 00 54 92 23 00 18 DA 23 00 70 92 43 00 1C DA 43 00 78 92 63 00 20 DA 63 00 80 92 83 00 24 DA 83 00 88 92 A3 00 28 DA A3 00 90 92 C3 00 2C DA C3 00 98 92 E3 00 30 DA E3 00 A0 93 03 00 34 DB 03 00 A8 93 23 00 38 DB 23 00 B0 93 43 00 3C DB 43 00 B8 93 63 00 40 DB 63 00 C0 93 83 00 44 DB 83 00 C8 93 A3 00 48 DB A3 00 D0 93 C3 00 4C DB C3 00 D8 93 E3 00 50 DB E3 00 E0 7C C8 02 A6 48 11 79 79
+
+==== BB 1373 (0xFEB04DC) approx BBs exec'd 0 ====
+
+	0xFEB04DC:  7CA802A6  mflr r5
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R5
+	   2: INCEIPL       	$4
+
+	0xFEB04E0:  80A51BC8  lwz r5,7112(r5)
+	   3: GETL       	R5, t2
+	   4: ADDL       	$0x1BC8, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R5
+	   7: INCEIPL       	$4
+
+	0xFEB04E4:  7CC803A6  mtlr r6
+	   8: GETL       	R6, t6
+	   9: PUTL       	t6, LR
+	  10: INCEIPL       	$4
+
+	0xFEB04E8:  80A5003C  lwz r5,60(r5)
+	  11: GETL       	R5, t8
+	  12: ADDL       	$0x3C, t8
+	  13: LDL       	(t8), t10
+	  14: PUTL       	t10, R5
+	  15: INCEIPL       	$4
+
+	0xFEB04EC:  74A51000  andis. r5,r5,0x1000
+	  16: GETL       	R5, t12
+	  17: ANDL       	$0x10000000, t12
+	  18: PUTL       	t12, R5
+	  19: CMP0L       	t12, t14  (-rSo)
+	  20: ICRFL       	t14, $0x0, CR
+	  21: INCEIPL       	$4
+
+	0xFEB04F0:  41820158  bc 12,2,0xFEB0648
+	  22: Js02o       	$0xFEB0648
+
+
+. 0 FEB04DC 24
+. 7C A8 02 A6 80 A5 1B C8 7C C8 03 A6 80 A5 00 3C 74 A5 10 00 41 82 01 58
+
+==== BB 1374 (0xFEB0648) approx BBs exec'd 0 ====
+
+	0xFEB0648:  480000AC  b 0xFEB06F4
+	   0: JMPo       	$0xFEB06F4  ($4)
+
+
+. 0 FEB0648 4
+. 48 00 00 AC
+
+==== BB 1375 __vmx__sigjmp_save(0xFEB06F4) approx BBs exec'd 0 ====
+
+	0xFEB06F4:  2F840000  cmpi cr7,r4,0
+	   0: GETL       	R4, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEB06F8:  7C0802A6  mflr r0
+	   4: GETL       	LR, t4
+	   5: PUTL       	t4, R0
+	   6: INCEIPL       	$4
+
+	0xFEB06FC:  9421FFE0  stwu r1,-32(r1)
+	   7: GETL       	R1, t6
+	   8: GETL       	R1, t8
+	   9: ADDL       	$0xFFFFFFE0, t8
+	  10: PUTL       	t8, R1
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEB0700:  38A301C4  addi r5,r3,452
+	  13: GETL       	R3, t10
+	  14: ADDL       	$0x1C4, t10
+	  15: PUTL       	t10, R5
+	  16: INCEIPL       	$4
+
+	0xFEB0704:  93A10014  stw r29,20(r1)
+	  17: GETL       	R29, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x14, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0xFEB0708:  38800000  li r4,0
+	  22: MOVL       	$0x0, t16
+	  23: PUTL       	t16, R4
+	  24: INCEIPL       	$4
+
+	0xFEB070C:  93E1001C  stw r31,28(r1)
+	  25: GETL       	R31, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFEB0710:  3BA00000  li r29,0
+	  30: MOVL       	$0x0, t22
+	  31: PUTL       	t22, R29
+	  32: INCEIPL       	$4
+
+	0xFEB0714:  7C7F1B78  or r31,r3,r3
+	  33: GETL       	R3, t24
+	  34: PUTL       	t24, R31
+	  35: INCEIPL       	$4
+
+	0xFEB0718:  93C10018  stw r30,24(r1)
+	  36: GETL       	R30, t26
+	  37: GETL       	R1, t28
+	  38: ADDL       	$0x18, t28
+	  39: STL       	t26, (t28)
+	  40: INCEIPL       	$4
+
+	0xFEB071C:  90010024  stw r0,36(r1)
+	  41: GETL       	R0, t30
+	  42: GETL       	R1, t32
+	  43: ADDL       	$0x24, t32
+	  44: STL       	t30, (t32)
+	  45: INCEIPL       	$4
+
+	0xFEB0720:  38600000  li r3,0
+	  46: MOVL       	$0x0, t34
+	  47: PUTL       	t34, R3
+	  48: INCEIPL       	$4
+
+	0xFEB0724:  409E0028  bc 4,30,0xFEB074C
+	  49: Jc30o       	$0xFEB074C
+
+
+. 0 FEB06F4 52
+. 2F 84 00 00 7C 08 02 A6 94 21 FF E0 38 A3 01 C4 93 A1 00 14 38 80 00 00 93 E1 00 1C 3B A0 00 00 7C 7F 1B 78 93 C1 00 18 90 01 00 24 38 60 00 00 40 9E 00 28
+
+==== BB 1376 (0xFEB0728) approx BBs exec'd 0 ====
+
+	0xFEB0728:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEB072C:  38600000  li r3,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEB0730:  93BF01C0  stw r29,448(r31)
+	   8: GETL       	R29, t6
+	   9: GETL       	R31, t8
+	  10: ADDL       	$0x1C0, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEB0734:  83A10014  lwz r29,20(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x14, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R29
+	  17: INCEIPL       	$4
+
+	0xFEB0738:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFEB073C:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0xFEB0740:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFEB0744:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFEB0748:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+. 0 FEB0728 36
+. 80 81 00 24 38 60 00 00 93 BF 01 C0 83 A1 00 14 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1377 (0xFE9B800) approx BBs exec'd 0 ====
+
+	0xFE9B800:  2C830000  cmpi cr1,r3,0
+	   0: GETL       	R3, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x1, CR
+	   3: INCEIPL       	$4
+
+	0xFE9B804:  40860068  bc 4,6,0xFE9B86C
+	   4: Jc06o       	$0xFE9B86C
+
+
+. 0 FE9B800 8
+. 2C 83 00 00 40 86 00 68
+
+==== BB 1378 (0xFE9B808) approx BBs exec'd 0 ====
+
+	0xFE9B808:  83A28BF4  lwz r29,-29708(r2)
+	   0: GETL       	R2, t0
+	   1: ADDL       	$0xFFFF8BF4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R29
+	   4: INCEIPL       	$4
+
+	0xFE9B80C:  3B610010  addi r27,r1,16
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x10, t4
+	   7: PUTL       	t4, R27
+	   8: INCEIPL       	$4
+
+	0xFE9B810:  819E1B84  lwz r12,7044(r30)
+	   9: GETL       	R30, t6
+	  10: ADDL       	$0x1B84, t6
+	  11: LDL       	(t6), t8
+	  12: PUTL       	t8, R12
+	  13: INCEIPL       	$4
+
+	0xFE9B814:  93A101E0  stw r29,480(r1)
+	  14: GETL       	R29, t10
+	  15: GETL       	R1, t12
+	  16: ADDL       	$0x1E0, t12
+	  17: STL       	t10, (t12)
+	  18: INCEIPL       	$4
+
+	0xFE9B818:  83828BF0  lwz r28,-29712(r2)
+	  19: GETL       	R2, t14
+	  20: ADDL       	$0xFFFF8BF0, t14
+	  21: LDL       	(t14), t16
+	  22: PUTL       	t16, R28
+	  23: INCEIPL       	$4
+
+	0xFE9B81C:  810101F0  lwz r8,496(r1)
+	  24: GETL       	R1, t18
+	  25: ADDL       	$0x1F0, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R8
+	  28: INCEIPL       	$4
+
+	0xFE9B820:  938101E4  stw r28,484(r1)
+	  29: GETL       	R28, t22
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x1E4, t24
+	  32: STL       	t22, (t24)
+	  33: INCEIPL       	$4
+
+	0xFE9B824:  806101F4  lwz r3,500(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x1F4, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R3
+	  38: INCEIPL       	$4
+
+	0xFE9B828:  7D0803A6  mtlr r8
+	  39: GETL       	R8, t30
+	  40: PUTL       	t30, LR
+	  41: INCEIPL       	$4
+
+	0xFE9B82C:  93628BF4  stw r27,-29708(r2)
+	  42: GETL       	R27, t32
+	  43: GETL       	R2, t34
+	  44: ADDL       	$0xFFFF8BF4, t34
+	  45: STL       	t32, (t34)
+	  46: INCEIPL       	$4
+
+	0xFE9B830:  808101F8  lwz r4,504(r1)
+	  47: GETL       	R1, t36
+	  48: ADDL       	$0x1F8, t36
+	  49: LDL       	(t36), t38
+	  50: PUTL       	t38, R4
+	  51: INCEIPL       	$4
+
+	0xFE9B834:  80AC0000  lwz r5,0(r12)
+	  52: GETL       	R12, t40
+	  53: LDL       	(t40), t42
+	  54: PUTL       	t42, R5
+	  55: INCEIPL       	$4
+
+	0xFE9B838:  80C101FC  lwz r6,508(r1)
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x1FC, t44
+	  58: LDL       	(t44), t46
+	  59: PUTL       	t46, R6
+	  60: INCEIPL       	$4
+
+	0xFE9B83C:  4E800021  blrl
+	  61: GETL       	LR, t48
+	  62: MOVL       	$0xFE9B840, t50
+	  63: PUTL       	t50, LR
+	  64: JMPo-r       	t48  ($4)
+
+
+. 0 FE9B808 56
+. 83 A2 8B F4 3B 61 00 10 81 9E 1B 84 93 A1 01 E0 83 82 8B F0 81 01 01 F0 93 81 01 E4 80 61 01 F4 7D 08 03 A6 93 62 8B F4 80 81 01 F8 80 AC 00 00 80 C1 01 FC 4E 80 00 21
+
+==== BB 1379 main(0x100003F0) approx BBs exec'd 0 ====
+
+	0x100003F0:  38600000  li r3,0
+	   0: MOVL       	$0x0, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0x100003F4:  4E800020  blr
+	   3: GETL       	LR, t2
+	   4: JMPo-r       	t2  ($4)
+
+
+. 0 100003F0 8
+. 38 60 00 00 4E 80 00 20
+
+==== BB 1380 (0xFE9B840) approx BBs exec'd 0 ====
+
+	0xFE9B840:  48018359  bl 0xFEB3B98
+	   0: MOVL       	$0xFE9B844, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0xFEB3B98  ($4)
+
+
+. 0 FE9B840 4
+. 48 01 83 59
+
+==== BB 1381 exit(0xFEB3B98) approx BBs exec'd 0 ====
+
+	0xFEB3B98:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEB3B9C:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEB3BA0:  481142B1  bl 0xFFC7E50
+	   9: MOVL       	$0xFEB3BA4, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FEB3B98 12
+. 94 21 FF E0 7C 08 02 A6 48 11 42 B1
+
+==== BB 1382 (0xFEB3BA4) approx BBs exec'd 0 ====
+
+	0xFEB3BA4:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEB3BA8:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEB3BAC:  93E1001C  stw r31,28(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x1C, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEB3BB0:  93810010  stw r28,16(r1)
+	  13: GETL       	R28, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x10, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEB3BB4:  7C7C1B78  or r28,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R28
+	  20: INCEIPL       	$4
+
+	0xFEB3BB8:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEB3BBC:  83FE1AFC  lwz r31,6908(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x1AFC, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFEB3BC0:  90010024  stw r0,36(r1)
+	  31: GETL       	R0, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x24, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0xFEB3BC4:  801F0000  lwz r0,0(r31)
+	  36: GETL       	R31, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R0
+	  39: INCEIPL       	$4
+
+	0xFEB3BC8:  2F800000  cmpi cr7,r0,0
+	  40: GETL       	R0, t32
+	  41: CMP0L       	t32, t34  (-rSo)
+	  42: ICRFL       	t34, $0x7, CR
+	  43: INCEIPL       	$4
+
+	0xFEB3BCC:  419E00CC  bc 12,30,0xFEB3C98
+	  44: Js30o       	$0xFEB3C98
+
+
+. 0 FEB3BA4 44
+. 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 81 00 10 7C 7C 1B 78 93 A1 00 14 83 FE 1A FC 90 01 00 24 80 1F 00 00 2F 80 00 00 41 9E 00 CC
+
+==== BB 1383 (0xFEB3BD0) approx BBs exec'd 0 ====
+
+	0xFEB3BD0:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFEB3BD4:  80690004  lwz r3,4(r9)
+	   4: GETL       	R9, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFEB3BD8:  2C030000  cmpi cr0,r3,0
+	   9: GETL       	R3, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x0, CR
+	  12: INCEIPL       	$4
+
+	0xFEB3BDC:  41820078  bc 12,2,0xFEB3C54
+	  13: Js02o       	$0xFEB3C54
+
+
+. 0 FEB3BD0 16
+. 81 3F 00 00 80 69 00 04 2C 03 00 00 41 82 00 78
+
+==== BB 1384 (0xFEB3BE0) approx BBs exec'd 0 ====
+
+	0xFEB3BE0:  7D2A4B78  or r10,r9,r9
+	   0: GETL       	R9, t0
+	   1: PUTL       	t0, R10
+	   2: INCEIPL       	$4
+
+	0xFEB3BE4:  48000018  b 0xFEB3BFC
+	   3: JMPo       	$0xFEB3BFC  ($4)
+
+
+. 0 FEB3BE0 8
+. 7D 2A 4B 78 48 00 00 18
+
+==== BB 1385 (0xFEB3BFC) approx BBs exec'd 0 ====
+
+	0xFEB3BFC:  80AA0004  lwz r5,4(r10)
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEB3C00:  3925FFFF  addi r9,r5,-1
+	   5: GETL       	R5, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R9
+	   8: INCEIPL       	$4
+
+	0xFEB3C04:  55242036  rlwinm r4,r9,4,0,27
+	   9: GETL       	R9, t6
+	  10: SHLL       	$0x4, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0xFEB3C08:  912A0004  stw r9,4(r10)
+	  13: GETL       	R9, t8
+	  14: GETL       	R10, t10
+	  15: ADDL       	$0x4, t10
+	  16: STL       	t8, (t10)
+	  17: INCEIPL       	$4
+
+	0xFEB3C0C:  7D645214  add r11,r4,r10
+	  18: GETL       	R4, t12
+	  19: GETL       	R10, t14
+	  20: ADDL       	t12, t14
+	  21: PUTL       	t14, R11
+	  22: INCEIPL       	$4
+
+	0xFEB3C10:  800B0008  lwz r0,8(r11)
+	  23: GETL       	R11, t16
+	  24: ADDL       	$0x8, t16
+	  25: LDL       	(t16), t18
+	  26: PUTL       	t18, R0
+	  27: INCEIPL       	$4
+
+	0xFEB3C14:  396B0008  addi r11,r11,8
+	  28: GETL       	R11, t20
+	  29: ADDL       	$0x8, t20
+	  30: PUTL       	t20, R11
+	  31: INCEIPL       	$4
+
+	0xFEB3C18:  2F800003  cmpi cr7,r0,3
+	  32: GETL       	R0, t22
+	  33: MOVL       	$0x3, t26
+	  34: CMPL       	t22, t26, t24  (-rSo)
+	  35: ICRFL       	t24, $0x7, CR
+	  36: INCEIPL       	$4
+
+	0xFEB3C1C:  419E0054  bc 12,30,0xFEB3C70
+	  37: Js30o       	$0xFEB3C70
+
+
+. 0 FEB3BFC 36
+. 80 AA 00 04 39 25 FF FF 55 24 20 36 91 2A 00 04 7D 64 52 14 80 0B 00 08 39 6B 00 08 2F 80 00 03 41 9E 00 54
+
+==== BB 1386 (0xFEB3C20) approx BBs exec'd 0 ====
+
+	0xFEB3C20:  2C800002  cmpi cr1,r0,2
+	   0: GETL       	R0, t0
+	   1: MOVL       	$0x2, t4
+	   2: CMPL       	t0, t4, t2  (-rSo)
+	   3: ICRFL       	t2, $0x1, CR
+	   4: INCEIPL       	$4
+
+	0xFEB3C24:  2F000004  cmpi cr6,r0,4
+	   5: GETL       	R0, t6
+	   6: MOVL       	$0x4, t10
+	   7: CMPL       	t6, t10, t8  (-rSo)
+	   8: ICRFL       	t8, $0x6, CR
+	   9: INCEIPL       	$4
+
+	0xFEB3C28:  40BDFFC0  bc 5,29,0xFEB3BE8
+	  10: Jc29o       	$0xFEB3BE8
+
+
+. 0 FEB3C20 12
+. 2C 80 00 02 2F 00 00 04 40 BD FF C0
+
+==== BB 1387 (0xFEB3C2C) approx BBs exec'd 0 ====
+
+	0xFEB3C2C:  409AFFC0  bc 4,26,0xFEB3BEC
+	   0: Jc26o       	$0xFEB3BEC
+
+
+. 0 FEB3C2C 4
+. 40 9A FF C0
+
+==== BB 1388 (0xFEB3C30) approx BBs exec'd 0 ====
+
+	0xFEB3C30:  810B0004  lwz r8,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0xFEB3C34:  7F84E378  or r4,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R4
+	   7: INCEIPL       	$4
+
+	0xFEB3C38:  806B0008  lwz r3,8(r11)
+	   8: GETL       	R11, t6
+	   9: ADDL       	$0x8, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R3
+	  12: INCEIPL       	$4
+
+	0xFEB3C3C:  7D0903A6  mtctr r8
+	  13: GETL       	R8, t10
+	  14: PUTL       	t10, CTR
+	  15: INCEIPL       	$4
+
+	0xFEB3C40:  4E800421  bctrl
+	  16: MOVL       	$0xFEB3C44, t12
+	  17: PUTL       	t12, LR
+	  18: GETL       	CTR, t14
+	  19: JMPo-c       	t14  ($4)
+
+
+. 0 FEB3C30 20
+. 81 0B 00 04 7F 84 E3 78 80 6B 00 08 7D 09 03 A6 4E 80 04 21
+
+==== BB 1389 __libc_csu_fini(0x1000048C) approx BBs exec'd 0 ====
+
+	0x1000048C:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10000490:  7CA802A6  mflr r5
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x10000494:  429F0005  bcl 20,31,0x10000498
+	   9: MOVL       	$0x10000498, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0x10000498:  93C10018  stw r30,24(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x18, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0x1000049C:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0x100004A0:  93E1001C  stw r31,28(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x1C, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0x100004A4:  93A10014  stw r29,20(r1)
+	  25: GETL       	R29, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0x100004A8:  90A10024  stw r5,36(r1)
+	  30: GETL       	R5, t22
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x24, t24
+	  33: STL       	t22, (t24)
+	  34: INCEIPL       	$4
+
+	0x100004AC:  809EFFF0  lwz r4,-16(r30)
+	  35: GETL       	R30, t26
+	  36: ADDL       	$0xFFFFFFF0, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R4
+	  39: INCEIPL       	$4
+
+	0x100004B0:  7FC4F214  add r30,r4,r30
+	  40: GETL       	R4, t30
+	  41: GETL       	R30, t32
+	  42: ADDL       	t30, t32
+	  43: PUTL       	t32, R30
+	  44: INCEIPL       	$4
+
+	0x100004B4:  817E800C  lwz r11,-32756(r30)
+	  45: GETL       	R30, t34
+	  46: ADDL       	$0xFFFF800C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R11
+	  49: INCEIPL       	$4
+
+	0x100004B8:  807E8008  lwz r3,-32760(r30)
+	  50: GETL       	R30, t38
+	  51: ADDL       	$0xFFFF8008, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R3
+	  54: INCEIPL       	$4
+
+	0x100004BC:  7C0B1850  subf r0,r11,r3
+	  55: GETL       	R11, t42
+	  56: GETL       	R3, t44
+	  57: SUBL       	t42, t44
+	  58: PUTL       	t44, R0
+	  59: INCEIPL       	$4
+
+	0x100004C0:  7C091670  srawi r9,r0,2
+	  60: GETL       	R0, t46
+	  61: SARL       	$0x2, t46  (-wCa)
+	  62: PUTL       	t46, R9
+	  63: INCEIPL       	$4
+
+	0x100004C4:  2F890000  cmpi cr7,r9,0
+	  64: GETL       	R9, t48
+	  65: CMP0L       	t48, t50  (-rSo)
+	  66: ICRFL       	t50, $0x7, CR
+	  67: INCEIPL       	$4
+
+	0x100004C8:  3BE9FFFF  addi r31,r9,-1
+	  68: GETL       	R9, t52
+	  69: ADDL       	$0xFFFFFFFF, t52
+	  70: PUTL       	t52, R31
+	  71: INCEIPL       	$4
+
+	0x100004CC:  419E0028  bc 12,30,0x100004F4
+	  72: Js30o       	$0x100004F4
+
+
+. 0 1000048C 68
+. 94 21 FF E0 7C A8 02 A6 42 9F 00 05 93 C1 00 18 7F C8 02 A6 93 E1 00 1C 93 A1 00 14 90 A1 00 24 80 9E FF F0 7F C4 F2 14 81 7E 80 0C 80 7E 80 08 7C 0B 18 50 7C 09 16 70 2F 89 00 00 3B E9 FF FF 41 9E 00 28
+
+==== BB 1390 (0x100004F4) approx BBs exec'd 0 ====
+
+	0x100004F4:  48000269  bl 0x1000075C
+	   0: MOVL       	$0x100004F8, t0
+	   1: PUTL       	t0, LR
+	   2: JMPo-c       	$0x1000075C  ($4)
+
+
+. 0 100004F4 4
+. 48 00 02 69
+
+==== BB 1391 (0x1000075C) approx BBs exec'd 0 ====
+
+	0x1000075C:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x10000760:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x10000764:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x10000768:  4BFFFB9D  bl 0x10000304
+	  14: MOVL       	$0x1000076C, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0x10000304  ($4)
+
+
+. 0 1000075C 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF FB 9D
+
+==== BB 1392 (0x10000304) approx BBs exec'd 0 ====
+
+	0x10000304:  7C0802A6  mflr r0
+	   0: GETL       	LR, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10000308:  9421FFF0  stwu r1,-16(r1)
+	   3: GETL       	R1, t2
+	   4: GETL       	R1, t4
+	   5: ADDL       	$0xFFFFFFF0, t4
+	   6: PUTL       	t4, R1
+	   7: STL       	t2, (t4)
+	   8: INCEIPL       	$4
+
+	0x1000030C:  93C10008  stw r30,8(r1)
+	   9: GETL       	R30, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x8, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x10000310:  3FC01001  lis r30,4097
+	  14: MOVL       	$0x10010000, t10
+	  15: PUTL       	t10, R30
+	  16: INCEIPL       	$4
+
+	0x10000314:  93E1000C  stw r31,12(r1)
+	  17: GETL       	R31, t12
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: STL       	t12, (t14)
+	  21: INCEIPL       	$4
+
+	0x10000318:  90010014  stw r0,20(r1)
+	  22: GETL       	R0, t16
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x14, t18
+	  25: STL       	t16, (t18)
+	  26: INCEIPL       	$4
+
+	0x1000031C:  881E08F0  lbz r0,2288(r30)
+	  27: MOVL       	$0x100108F0, t20
+	  28: LDB       	(t20), t22
+	  29: PUTL       	t22, R0
+	  30: INCEIPL       	$4
+
+	0x10000320:  2F800000  cmpi cr7,r0,0
+	  31: GETL       	R0, t24
+	  32: CMP0L       	t24, t26  (-rSo)
+	  33: ICRFL       	t26, $0x7, CR
+	  34: INCEIPL       	$4
+
+	0x10000324:  409E0034  bc 4,30,0x10000358
+	  35: Jc30o       	$0x10000358
+
+
+. 0 10000304 36
+. 7C 08 02 A6 94 21 FF F0 93 C1 00 08 3F C0 10 01 93 E1 00 0C 90 01 00 14 88 1E 08 F0 2F 80 00 00 40 9E 00 34
+
+==== BB 1393 (0x10000328) approx BBs exec'd 0 ====
+
+	0x10000328:  3FE01001  lis r31,4097
+	   0: MOVL       	$0x10010000, t0
+	   1: PUTL       	t0, R31
+	   2: INCEIPL       	$4
+
+	0x1000032C:  48000014  b 0x10000340
+	   3: JMPo       	$0x10000340  ($4)
+
+
+. 0 10000328 8
+. 3F E0 10 01 48 00 00 14
+
+==== BB 1394 (0x10000340) approx BBs exec'd 0 ====
+
+	0x10000340:  817F0884  lwz r11,2180(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x884, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x10000344:  812B0000  lwz r9,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R9
+	   8: INCEIPL       	$4
+
+	0x10000348:  2F890000  cmpi cr7,r9,0
+	   9: GETL       	R9, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0x1000034C:  409EFFE4  bc 4,30,0x10000330
+	  13: Jc30o       	$0x10000330
+
+
+. 0 10000340 16
+. 81 7F 08 84 81 2B 00 00 2F 89 00 00 40 9E FF E4
+
+==== BB 1395 (0x10000350) approx BBs exec'd 0 ====
+
+	0x10000350:  38000001  li r0,1
+	   0: MOVL       	$0x1, t0
+	   1: PUTL       	t0, R0
+	   2: INCEIPL       	$4
+
+	0x10000354:  981E08F0  stb r0,2288(r30)
+	   3: GETL       	R0, t2
+	   4: GETL       	R30, t4
+	   5: ADDL       	$0x8F0, t4
+	   6: STB       	t2, (t4)
+	   7: INCEIPL       	$4
+
+	0x10000358:  80010014  lwz r0,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R0
+	  12: INCEIPL       	$4
+
+	0x1000035C:  83C10008  lwz r30,8(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0x10000360:  83E1000C  lwz r31,12(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0xC, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x10000364:  7C0803A6  mtlr r0
+	  23: GETL       	R0, t18
+	  24: PUTL       	t18, LR
+	  25: INCEIPL       	$4
+
+	0x10000368:  38210010  addi r1,r1,16
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x10, t20
+	  28: PUTL       	t20, R1
+	  29: INCEIPL       	$4
+
+	0x1000036C:  4E800020  blr
+	  30: GETL       	LR, t22
+	  31: JMPo-r       	t22  ($4)
+
+
+. 0 10000350 32
+. 38 00 00 01 98 1E 08 F0 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1396 (0x1000076C) approx BBs exec'd 0 ====
+
+	0x1000076C:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0x10000770:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0x10000774:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0x10000778:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+. 0 1000076C 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1397 (0x100004F8) approx BBs exec'd 0 ====
+
+	0x100004F8:  81010024  lwz r8,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x100004FC:  83A10014  lwz r29,20(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x14, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x10000500:  83C10018  lwz r30,24(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x18, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R30
+	  14: INCEIPL       	$4
+
+	0x10000504:  7D0803A6  mtlr r8
+	  15: GETL       	R8, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0x10000508:  83E1001C  lwz r31,28(r1)
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x1C, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R31
+	  22: INCEIPL       	$4
+
+	0x1000050C:  38210020  addi r1,r1,32
+	  23: GETL       	R1, t18
+	  24: ADDL       	$0x20, t18
+	  25: PUTL       	t18, R1
+	  26: INCEIPL       	$4
+
+	0x10000510:  4E800020  blr
+	  27: GETL       	LR, t20
+	  28: JMPo-r       	t20  ($4)
+
+
+. 0 100004F8 28
+. 81 01 00 24 83 A1 00 14 83 C1 00 18 7D 08 03 A6 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1398 (0xFEB3C44) approx BBs exec'd 0 ====
+
+	0xFEB3C44:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFEB3C48:  818A0004  lwz r12,4(r10)
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R12
+	   8: INCEIPL       	$4
+
+	0xFEB3C4C:  2C8C0000  cmpi cr1,r12,0
+	   9: GETL       	R12, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x1, CR
+	  12: INCEIPL       	$4
+
+	0xFEB3C50:  4086FFAC  bc 4,6,0xFEB3BFC
+	  13: Jc06o       	$0xFEB3BFC
+
+
+. 0 FEB3C44 16
+. 81 5F 00 00 81 8A 00 04 2C 8C 00 00 40 86 FF AC
+
+==== BB 1399 _dl_fini(0x2547BAE0) approx BBs exec'd 0 ====
+
+	0x2547BAE0:  9421FFA0  stwu r1,-96(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFA0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0x2547BAE4:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0x2547BAE8:  4801B519  bl 0x25497000
+	   9: MOVL       	$0x2547BAEC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0x25497000  ($4)
+
+
+. 0 2547BAE0 12
+. 94 21 FF A0 7C 08 02 A6 48 01 B5 19
+
+==== BB 1400 (0x2547BAEC) approx BBs exec'd 0 ====
+
+	0x2547BAEC:  93C10058  stw r30,88(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x58, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0x2547BAF0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0x2547BAF4:  7D800026  mfcr r12
+	   8: GETL       	CR, t6
+	   9: PUTL       	t6, R12
+	  10: INCEIPL       	$4
+
+	0x2547BAF8:  91C10018  stw r14,24(r1)
+	  11: GETL       	R14, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x18, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0x2547BAFC:  92410028  stw r18,40(r1)
+	  16: GETL       	R18, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x28, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0x2547BB00:  81DE04F4  lwz r14,1268(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x4F4, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R14
+	  25: INCEIPL       	$4
+
+	0x2547BB04:  825E04C8  lwz r18,1224(r30)
+	  26: GETL       	R30, t20
+	  27: ADDL       	$0x4C8, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R18
+	  30: INCEIPL       	$4
+
+	0x2547BB08:  91E1001C  stw r15,28(r1)
+	  31: GETL       	R15, t24
+	  32: GETL       	R1, t26
+	  33: ADDL       	$0x1C, t26
+	  34: STL       	t24, (t26)
+	  35: INCEIPL       	$4
+
+	0x2547BB0C:  39E0000F  li r15,15
+	  36: MOVL       	$0xF, t28
+	  37: PUTL       	t28, R15
+	  38: INCEIPL       	$4
+
+	0x2547BB10:  92010020  stw r16,32(r1)
+	  39: GETL       	R16, t30
+	  40: GETL       	R1, t32
+	  41: ADDL       	$0x20, t32
+	  42: STL       	t30, (t32)
+	  43: INCEIPL       	$4
+
+	0x2547BB14:  3A000000  li r16,0
+	  44: MOVL       	$0x0, t34
+	  45: PUTL       	t34, R16
+	  46: INCEIPL       	$4
+
+	0x2547BB18:  92210024  stw r17,36(r1)
+	  47: GETL       	R17, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x24, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0x2547BB1C:  3A200168  li r17,360
+	  52: MOVL       	$0x168, t40
+	  53: PUTL       	t40, R17
+	  54: INCEIPL       	$4
+
+	0x2547BB20:  92C10038  stw r22,56(r1)
+	  55: GETL       	R22, t42
+	  56: GETL       	R1, t44
+	  57: ADDL       	$0x38, t44
+	  58: STL       	t42, (t44)
+	  59: INCEIPL       	$4
+
+	0x2547BB24:  3AC00000  li r22,0
+	  60: MOVL       	$0x0, t46
+	  61: PUTL       	t46, R22
+	  62: INCEIPL       	$4
+
+	0x2547BB28:  93E1005C  stw r31,92(r1)
+	  63: GETL       	R31, t48
+	  64: GETL       	R1, t50
+	  65: ADDL       	$0x5C, t50
+	  66: STL       	t48, (t50)
+	  67: INCEIPL       	$4
+
+	0x2547BB2C:  7C3F0B78  or r31,r1,r1
+	  68: GETL       	R1, t52
+	  69: PUTL       	t52, R31
+	  70: INCEIPL       	$4
+
+	0x2547BB30:  9261002C  stw r19,44(r1)
+	  71: GETL       	R19, t54
+	  72: GETL       	R1, t56
+	  73: ADDL       	$0x2C, t56
+	  74: STL       	t54, (t56)
+	  75: INCEIPL       	$4
+
+	0x2547BB34:  92810030  stw r20,48(r1)
+	  76: GETL       	R20, t58
+	  77: GETL       	R1, t60
+	  78: ADDL       	$0x30, t60
+	  79: STL       	t58, (t60)
+	  80: INCEIPL       	$4
+
+	0x2547BB38:  92A10034  stw r21,52(r1)
+	  81: GETL       	R21, t62
+	  82: GETL       	R1, t64
+	  83: ADDL       	$0x34, t64
+	  84: STL       	t62, (t64)
+	  85: INCEIPL       	$4
+
+	0x2547BB3C:  92E1003C  stw r23,60(r1)
+	  86: GETL       	R23, t66
+	  87: GETL       	R1, t68
+	  88: ADDL       	$0x3C, t68
+	  89: STL       	t66, (t68)
+	  90: INCEIPL       	$4
+
+	0x2547BB40:  93010040  stw r24,64(r1)
+	  91: GETL       	R24, t70
+	  92: GETL       	R1, t72
+	  93: ADDL       	$0x40, t72
+	  94: STL       	t70, (t72)
+	  95: INCEIPL       	$4
+
+	0x2547BB44:  93210044  stw r25,68(r1)
+	  96: GETL       	R25, t74
+	  97: GETL       	R1, t76
+	  98: ADDL       	$0x44, t76
+	  99: STL       	t74, (t76)
+	 100: INCEIPL       	$4
+
+	0x2547BB48:  93410048  stw r26,72(r1)
+	 101: GETL       	R26, t78
+	 102: GETL       	R1, t80
+	 103: ADDL       	$0x48, t80
+	 104: STL       	t78, (t80)
+	 105: INCEIPL       	$4
+
+	0x2547BB4C:  9361004C  stw r27,76(r1)
+	 106: GETL       	R27, t82
+	 107: GETL       	R1, t84
+	 108: ADDL       	$0x4C, t84
+	 109: STL       	t82, (t84)
+	 110: INCEIPL       	$4
+
+	0x2547BB50:  93810050  stw r28,80(r1)
+	 111: GETL       	R28, t86
+	 112: GETL       	R1, t88
+	 113: ADDL       	$0x50, t88
+	 114: STL       	t86, (t88)
+	 115: INCEIPL       	$4
+
+	0x2547BB54:  93A10054  stw r29,84(r1)
+	 116: GETL       	R29, t90
+	 117: GETL       	R1, t92
+	 118: ADDL       	$0x54, t92
+	 119: STL       	t90, (t92)
+	 120: INCEIPL       	$4
+
+	0x2547BB58:  90010064  stw r0,100(r1)
+	 121: GETL       	R0, t94
+	 122: GETL       	R1, t96
+	 123: ADDL       	$0x64, t96
+	 124: STL       	t94, (t96)
+	 125: INCEIPL       	$4
+
+	0x2547BB5C:  91810014  stw r12,20(r1)
+	 126: GETL       	R12, t98
+	 127: GETL       	R1, t100
+	 128: ADDL       	$0x14, t100
+	 129: STL       	t98, (t100)
+	 130: INCEIPL       	$4
+
+	0x2547BB60:  809203F8  lwz r4,1016(r18)
+	 131: GETL       	R18, t102
+	 132: ADDL       	$0x3F8, t102
+	 133: LDL       	(t102), t104
+	 134: PUTL       	t104, R4
+	 135: INCEIPL       	$4
+
+	0x2547BB64:  38720180  addi r3,r18,384
+	 136: GETL       	R18, t106
+	 137: ADDL       	$0x180, t106
+	 138: PUTL       	t106, R3
+	 139: INCEIPL       	$4
+
+	0x2547BB68:  7C8903A6  mtctr r4
+	 140: GETL       	R4, t108
+	 141: PUTL       	t108, CTR
+	 142: INCEIPL       	$4
+
+	0x2547BB6C:  4E800421  bctrl
+	 143: MOVL       	$0x2547BB70, t110
+	 144: PUTL       	t110, LR
+	 145: GETL       	CTR, t112
+	 146: JMPo-c       	t112  ($4)
+
+
+. 0 2547BAEC 132
+. 93 C1 00 58 7F C8 02 A6 7D 80 00 26 91 C1 00 18 92 41 00 28 81 DE 04 F4 82 5E 04 C8 91 E1 00 1C 39 E0 00 0F 92 01 00 20 3A 00 00 00 92 21 00 24 3A 20 01 68 92 C1 00 38 3A C0 00 00 93 E1 00 5C 7C 3F 0B 78 92 61 00 2C 92 81 00 30 92 A1 00 34 92 E1 00 3C 93 01 00 40 93 21 00 44 93 41 00 48 93 61 00 4C 93 81 00 50 93 A1 00 54 90 01 00 64 91 81 00 14 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+
+==== BB 1401 rtld_lock_default_lock_recursive(0x25471884) approx BBs exec'd 0 ====
+
+	0x25471884:  80830004  lwz r4,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x25471888:  9421FFF0  stwu r1,-16(r1)
+	   5: GETL       	R1, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xFFFFFFF0, t6
+	   8: PUTL       	t6, R1
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x2547188C:  39240001  addi r9,r4,1
+	  11: GETL       	R4, t8
+	  12: ADDL       	$0x1, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x25471890:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t10
+	  16: ADDL       	$0x10, t10
+	  17: PUTL       	t10, R1
+	  18: INCEIPL       	$4
+
+	0x25471894:  91230004  stw r9,4(r3)
+	  19: GETL       	R9, t12
+	  20: GETL       	R3, t14
+	  21: ADDL       	$0x4, t14
+	  22: STL       	t12, (t14)
+	  23: INCEIPL       	$4
+
+	0x25471898:  4E800020  blr
+	  24: GETL       	LR, t16
+	  25: JMPo-r       	t16  ($4)
+
+
+. 0 25471884 24
+. 80 83 00 04 94 21 FF F0 39 24 00 01 38 21 00 10 91 23 00 04 4E 80 00 20
+
+==== BB 1402 (0x2547BB70) approx BBs exec'd 0 ====
+
+	0x2547BB70:  7D319214  add r9,r17,r18
+	   0: GETL       	R17, t0
+	   1: GETL       	R18, t2
+	   2: ADDL       	t0, t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BB74:  80690004  lwz r3,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547BB78:  546A103A  rlwinm r10,r3,2,0,29
+	  10: GETL       	R3, t8
+	  11: SHLL       	$0x2, t8
+	  12: PUTL       	t8, R10
+	  13: INCEIPL       	$4
+
+	0x2547BB7C:  7F8A8040  cmpl cr7,r10,r16
+	  14: GETL       	R10, t10
+	  15: GETL       	R16, t12
+	  16: CMPUL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547BB80:  409D002C  bc 4,29,0x2547BBAC
+	  19: Jc29o       	$0x2547BBAC
+
+
+. 0 2547BB70 20
+. 7D 31 92 14 80 69 00 04 54 6A 10 3A 7F 8A 80 40 40 9D 00 2C
+
+==== BB 1403 (0x2547BBAC) approx BBs exec'd 0 ====
+
+	0x2547BBAC:  7D71902E  lwzx r11,r17,r18
+	   0: GETL       	R18, t0
+	   1: GETL       	R17, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R11
+	   5: INCEIPL       	$4
+
+	0x2547BBB0:  3AE00000  li r23,0
+	   6: MOVL       	$0x0, t6
+	   7: PUTL       	t6, R23
+	   8: INCEIPL       	$4
+
+	0x2547BBB4:  2E0B0000  cmpi cr4,r11,0
+	   9: GETL       	R11, t8
+	  10: CMP0L       	t8, t10  (-rSo)
+	  11: ICRFL       	t10, $0x4, CR
+	  12: INCEIPL       	$4
+
+	0x2547BBB8:  41920020  bc 12,18,0x2547BBD8
+	  13: Js18o       	$0x2547BBD8
+
+
+. 0 2547BBAC 16
+. 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+
+==== BB 1404 (0x2547BBD8) approx BBs exec'd 0 ====
+
+	0x2547BBD8:  7EF3BB79  or. r19,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R19
+	   2: CMP0L       	t0, t2  (-rSo)
+	   3: ICRFL       	t2, $0x0, CR
+	   4: INCEIPL       	$4
+
+	0x2547BBDC:  41820110  bc 12,2,0x2547BCEC
+	   5: Js02o       	$0x2547BCEC
+
+
+. 0 2547BBD8 8
+. 7E F3 BB 79 41 82 01 10
+
+==== BB 1405 (0x2547BCEC) approx BBs exec'd 0 ====
+
+	0x2547BCEC:  80B203FC  lwz r5,1020(r18)
+	   0: GETL       	R18, t0
+	   1: ADDL       	$0x3FC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547BCF0:  3B400000  li r26,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R26
+	   7: INCEIPL       	$4
+
+	0x2547BCF4:  38720180  addi r3,r18,384
+	   8: GETL       	R18, t6
+	   9: ADDL       	$0x180, t6
+	  10: PUTL       	t6, R3
+	  11: INCEIPL       	$4
+
+	0x2547BCF8:  7E1A9840  cmpl cr4,r26,r19
+	  12: GETL       	R26, t8
+	  13: GETL       	R19, t10
+	  14: CMPUL       	t8, t10, t12  (-rSo)
+	  15: ICRFL       	t12, $0x4, CR
+	  16: INCEIPL       	$4
+
+	0x2547BCFC:  7CA903A6  mtctr r5
+	  17: GETL       	R5, t14
+	  18: PUTL       	t14, CTR
+	  19: INCEIPL       	$4
+
+	0x2547BD00:  4E800421  bctrl
+	  20: MOVL       	$0x2547BD04, t16
+	  21: PUTL       	t16, LR
+	  22: GETL       	CTR, t18
+	  23: JMPo-c       	t18  ($4)
+
+
+. 0 2547BCEC 24
+. 80 B2 03 FC 3B 40 00 00 38 72 01 80 7E 1A 98 40 7C A9 03 A6 4E 80 04 21
+
+==== BB 1406 rtld_lock_default_unlock_recursive(0x2547189C) approx BBs exec'd 0 ====
+
+	0x2547189C:  80830004  lwz r4,4(r3)
+	   0: GETL       	R3, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x254718A0:  9421FFF0  stwu r1,-16(r1)
+	   5: GETL       	R1, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0xFFFFFFF0, t6
+	   8: PUTL       	t6, R1
+	   9: STL       	t4, (t6)
+	  10: INCEIPL       	$4
+
+	0x254718A4:  3924FFFF  addi r9,r4,-1
+	  11: GETL       	R4, t8
+	  12: ADDL       	$0xFFFFFFFF, t8
+	  13: PUTL       	t8, R9
+	  14: INCEIPL       	$4
+
+	0x254718A8:  38210010  addi r1,r1,16
+	  15: GETL       	R1, t10
+	  16: ADDL       	$0x10, t10
+	  17: PUTL       	t10, R1
+	  18: INCEIPL       	$4
+
+	0x254718AC:  91230004  stw r9,4(r3)
+	  19: GETL       	R9, t12
+	  20: GETL       	R3, t14
+	  21: ADDL       	$0x4, t14
+	  22: STL       	t12, (t14)
+	  23: INCEIPL       	$4
+
+	0x254718B0:  4E800020  blr
+	  24: GETL       	LR, t16
+	  25: JMPo-r       	t16  ($4)
+
+
+. 0 2547189C 24
+. 80 83 00 04 94 21 FF F0 39 24 FF FF 38 21 00 10 91 23 00 04 4E 80 00 20
+
+==== BB 1407 (0x2547BD04) approx BBs exec'd 0 ====
+
+	0x2547BD04:  40900068  bc 4,16,0x2547BD6C
+	   0: Jc16o       	$0x2547BD6C
+
+
+. 0 2547BD04 4
+. 40 90 00 68
+
+==== BB 1408 (0x2547BD6C) approx BBs exec'd 0 ====
+
+	0x2547BD6C:  35EFFFFF  addic. r15,r15,-1
+	   0: GETL       	R15, t0
+	   1: ADCL       	$0xFFFFFFFF, t0  (-wCa)
+	   2: PUTL       	t0, R15
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547BD70:  3A31FFE8  addi r17,r17,-24
+	   6: GETL       	R17, t4
+	   7: ADDL       	$0xFFFFFFE8, t4
+	   8: PUTL       	t4, R17
+	   9: INCEIPL       	$4
+
+	0x2547BD74:  4080FDEC  bc 4,0,0x2547BB60
+	  10: Jc00o       	$0x2547BB60
+
+
+. 0 2547BD6C 12
+. 35 EF FF FF 3A 31 FF E8 40 80 FD EC
+
+==== BB 1409 (0x2547BB60) approx BBs exec'd 0 ====
+
+	0x2547BB60:  809203F8  lwz r4,1016(r18)
+	   0: GETL       	R18, t0
+	   1: ADDL       	$0x3F8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547BB64:  38720180  addi r3,r18,384
+	   5: GETL       	R18, t4
+	   6: ADDL       	$0x180, t4
+	   7: PUTL       	t4, R3
+	   8: INCEIPL       	$4
+
+	0x2547BB68:  7C8903A6  mtctr r4
+	   9: GETL       	R4, t6
+	  10: PUTL       	t6, CTR
+	  11: INCEIPL       	$4
+
+	0x2547BB6C:  4E800421  bctrl
+	  12: MOVL       	$0x2547BB70, t8
+	  13: PUTL       	t8, LR
+	  14: GETL       	CTR, t10
+	  15: JMPo-c       	t10  ($4)
+
+
+. 0 2547BB60 16
+. 80 92 03 F8 38 72 01 80 7C 89 03 A6 4E 80 04 21
+
+==== BB 1410 (0x2547BB84) approx BBs exec'd 0 ====
+
+	0x2547BB84:  2C100000  cmpi cr0,r16,0
+	   0: GETL       	R16, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0x2547BB88:  40820110  bc 4,2,0x2547BC98
+	   4: Jc02o       	$0x2547BC98
+
+
+. 0 2547BB84 8
+. 2C 10 00 00 40 82 01 10
+
+==== BB 1411 (0x2547BB8C) approx BBs exec'd 0 ====
+
+	0x2547BB8C:  390A001E  addi r8,r10,30
+	   0: GETL       	R10, t0
+	   1: ADDL       	$0x1E, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2547BB90:  81610000  lwz r11,0(r1)
+	   4: GETL       	R1, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R11
+	   7: INCEIPL       	$4
+
+	0x2547BB94:  55070036  rlwinm r7,r8,0,0,27
+	   8: GETL       	R8, t6
+	   9: ANDL       	$0xFFFFFFF0, t6
+	  10: PUTL       	t6, R7
+	  11: INCEIPL       	$4
+
+	0x2547BB98:  7D505378  or r16,r10,r10
+	  12: GETL       	R10, t8
+	  13: PUTL       	t8, R16
+	  14: INCEIPL       	$4
+
+	0x2547BB9C:  7CC700D0  neg r6,r7
+	  15: GETL       	R7, t10
+	  16: NEGL       	t10
+	  17: PUTL       	t10, R6
+	  18: INCEIPL       	$4
+
+	0x2547BBA0:  7D61316E  stwux r11,r1,r6
+	  19: GETL       	R6, t12
+	  20: GETL       	R1, t14
+	  21: ADDL       	t14, t12
+	  22: PUTL       	t12, R1
+	  23: GETL       	R11, t16
+	  24: STL       	t16, (t12)
+	  25: INCEIPL       	$4
+
+	0x2547BBA4:  38A10017  addi r5,r1,23
+	  26: GETL       	R1, t18
+	  27: ADDL       	$0x17, t18
+	  28: PUTL       	t18, R5
+	  29: INCEIPL       	$4
+
+	0x2547BBA8:  54B60036  rlwinm r22,r5,0,0,27
+	  30: GETL       	R5, t20
+	  31: ANDL       	$0xFFFFFFF0, t20
+	  32: PUTL       	t20, R22
+	  33: INCEIPL       	$4
+
+	0x2547BBAC:  7D71902E  lwzx r11,r17,r18
+	  34: GETL       	R18, t22
+	  35: GETL       	R17, t24
+	  36: ADDL       	t24, t22
+	  37: LDL       	(t22), t26
+	  38: PUTL       	t26, R11
+	  39: INCEIPL       	$4
+
+	0x2547BBB0:  3AE00000  li r23,0
+	  40: MOVL       	$0x0, t28
+	  41: PUTL       	t28, R23
+	  42: INCEIPL       	$4
+
+	0x2547BBB4:  2E0B0000  cmpi cr4,r11,0
+	  43: GETL       	R11, t30
+	  44: CMP0L       	t30, t32  (-rSo)
+	  45: ICRFL       	t32, $0x4, CR
+	  46: INCEIPL       	$4
+
+	0x2547BBB8:  41920020  bc 12,18,0x2547BBD8
+	  47: Js18o       	$0x2547BBD8
+
+
+. 0 2547BB8C 48
+. 39 0A 00 1E 81 61 00 00 55 07 00 36 7D 50 53 78 7C C7 00 D0 7D 61 31 6E 38 A1 00 17 54 B6 00 36 7D 71 90 2E 3A E0 00 00 2E 0B 00 00 41 92 00 20
+
+==== BB 1412 (0x2547BBBC) approx BBs exec'd 0 ====
+
+	0x2547BBBC:  834B0014  lwz r26,20(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0x2547BBC0:  56EA103A  rlwinm r10,r23,2,0,29
+	   5: GETL       	R23, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R10
+	   8: INCEIPL       	$4
+
+	0x2547BBC4:  7F1A5800  cmp cr6,r26,r11
+	   9: GETL       	R26, t6
+	  10: GETL       	R11, t8
+	  11: CMPL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x2547BBC8:  419A0108  bc 12,26,0x2547BCD0
+	  14: Js26o       	$0x2547BCD0
+
+
+. 0 2547BBBC 16
+. 83 4B 00 14 56 EA 10 3A 7F 1A 58 00 41 9A 01 08
+
+==== BB 1413 (0x2547BCD0) approx BBs exec'd 0 ====
+
+	0x2547BCD0:  836B0178  lwz r27,376(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x178, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547BCD4:  3AF70001  addi r23,r23,1
+	   5: GETL       	R23, t4
+	   6: ADDL       	$0x1, t4
+	   7: PUTL       	t4, R23
+	   8: INCEIPL       	$4
+
+	0x2547BCD8:  7D6AB12E  stwx r11,r10,r22
+	   9: GETL       	R22, t6
+	  10: GETL       	R10, t8
+	  11: ADDL       	t8, t6
+	  12: GETL       	R11, t10
+	  13: STL       	t10, (t6)
+	  14: INCEIPL       	$4
+
+	0x2547BCDC:  395B0001  addi r10,r27,1
+	  15: GETL       	R27, t12
+	  16: ADDL       	$0x1, t12
+	  17: PUTL       	t12, R10
+	  18: INCEIPL       	$4
+
+	0x2547BCE0:  914B0178  stw r10,376(r11)
+	  19: GETL       	R10, t14
+	  20: GETL       	R11, t16
+	  21: ADDL       	$0x178, t16
+	  22: STL       	t14, (t16)
+	  23: INCEIPL       	$4
+
+	0x2547BCE4:  816B000C  lwz r11,12(r11)
+	  24: GETL       	R11, t18
+	  25: ADDL       	$0xC, t18
+	  26: LDL       	(t18), t20
+	  27: PUTL       	t20, R11
+	  28: INCEIPL       	$4
+
+	0x2547BCE8:  4BFFFEE8  b 0x2547BBD0
+	  29: JMPo       	$0x2547BBD0  ($4)
+
+
+. 0 2547BCD0 28
+. 83 6B 01 78 3A F7 00 01 7D 6A B1 2E 39 5B 00 01 91 4B 01 78 81 6B 00 0C 4B FF FE E8
+
+==== BB 1414 (0x2547BBD0) approx BBs exec'd 0 ====
+
+	0x2547BBD0:  2E0B0000  cmpi cr4,r11,0
+	   0: GETL       	R11, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x2547BBD4:  4092FFE8  bc 4,18,0x2547BBBC
+	   4: Jc18o       	$0x2547BBBC
+
+
+. 0 2547BBD0 8
+. 2E 0B 00 00 40 92 FF E8
+
+==== BB 1415 (0x2547BBE0) approx BBs exec'd 0 ====
+
+	0x2547BBE0:  2E0F0000  cmpi cr4,r15,0
+	   0: GETL       	R15, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x4, CR
+	   3: INCEIPL       	$4
+
+	0x2547BBE4:  7F71902E  lwzx r27,r17,r18
+	   4: GETL       	R18, t4
+	   5: GETL       	R17, t6
+	   6: ADDL       	t6, t4
+	   7: LDL       	(t4), t8
+	   8: PUTL       	t8, R27
+	   9: INCEIPL       	$4
+
+	0x2547BBE8:  40920008  bc 4,18,0x2547BBF0
+	  10: Jc18o       	$0x2547BBF0
+
+
+. 0 2547BBE0 12
+. 2E 0F 00 00 7F 71 90 2E 40 92 00 08
+
+==== BB 1416 (0x2547BBEC) approx BBs exec'd 0 ====
+
+	0x2547BBEC:  837B000C  lwz r27,12(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0xC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R27
+	   4: INCEIPL       	$4
+
+	0x2547BBF0:  2F9B0000  cmpi cr7,r27,0
+	   5: GETL       	R27, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BBF4:  419E00F8  bc 12,30,0x2547BCEC
+	   9: Js30o       	$0x2547BCEC
+
+
+. 0 2547BBEC 12
+. 83 7B 00 0C 2F 9B 00 00 41 9E 00 F8
+
+==== BB 1417 (0x2547BBF8) approx BBs exec'd 0 ====
+
+	0x2547BBF8:  839B0014  lwz r28,20(r27)
+	   0: GETL       	R27, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R28
+	   4: INCEIPL       	$4
+
+	0x2547BBFC:  7C9CD800  cmp cr1,r28,r27
+	   5: GETL       	R28, t4
+	   6: GETL       	R27, t6
+	   7: CMPL       	t4, t6, t8  (-rSo)
+	   8: ICRFL       	t8, $0x1, CR
+	   9: INCEIPL       	$4
+
+	0x2547BC00:  4086FFEC  bc 4,6,0x2547BBEC
+	  10: Jc06o       	$0x2547BBEC
+
+
+. 0 2547BBF8 12
+. 83 9B 00 14 7C 9C D8 00 40 86 FF EC
+
+==== BB 1418 (0x2547BC04) approx BBs exec'd 0 ====
+
+	0x2547BC04:  7F200026  mfcr r25
+	   0: GETL       	CR, t0
+	   1: PUTL       	t0, R25
+	   2: INCEIPL       	$4
+
+	0x2547BC08:  57399FFE  rlwinm r25,r25,19,31,31
+	   3: GETL       	R25, t2
+	   4: ROLL       	$0x13, t2
+	   5: ANDL       	$0x1, t2
+	   6: PUTL       	t2, R25
+	   7: INCEIPL       	$4
+
+	0x2547BC0C:  48000008  b 0x2547BC14
+	   8: JMPo       	$0x2547BC14  ($4)
+
+
+. 0 2547BC04 12
+. 7F 20 00 26 57 39 9F FE 48 00 00 08
+
+==== BB 1419 (0x2547BC14) approx BBs exec'd 0 ====
+
+	0x2547BC14:  572C103A  rlwinm r12,r25,2,0,29
+	   0: GETL       	R25, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0x2547BC18:  7FACB02E  lwzx r29,r12,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R12, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0x2547BC1C:  7F1DD800  cmp cr6,r29,r27
+	  10: GETL       	R29, t8
+	  11: GETL       	R27, t10
+	  12: CMPL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x6, CR
+	  14: INCEIPL       	$4
+
+	0x2547BC20:  409AFFF0  bc 4,26,0x2547BC10
+	  15: Jc26o       	$0x2547BC10
+
+
+. 0 2547BC14 16
+. 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+
+==== BB 1420 (0x2547BC24) approx BBs exec'd 0 ====
+
+	0x2547BC24:  3B590001  addi r26,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BC28:  7C1AB840  cmpl cr0,r26,r23
+	   4: GETL       	R26, t2
+	   5: GETL       	R23, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0x2547BC2C:  40A0FFC0  bc 5,0,0x2547BBEC
+	   9: Jc00o       	$0x2547BBEC
+
+
+. 0 2547BC24 12
+. 3B 59 00 01 7C 1A B8 40 40 A0 FF C0
+
+==== BB 1421 (0x2547BC30) approx BBs exec'd 0 ====
+
+	0x2547BC30:  5720103A  rlwinm r0,r25,2,0,29
+	   0: GETL       	R25, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R0
+	   3: INCEIPL       	$4
+
+	0x2547BC34:  7C140378  or r20,r0,r0
+	   4: GETL       	R0, t2
+	   5: PUTL       	t2, R20
+	   6: INCEIPL       	$4
+
+	0x2547BC38:  7EA0B214  add r21,r0,r22
+	   7: GETL       	R0, t4
+	   8: GETL       	R22, t6
+	   9: ADDL       	t4, t6
+	  10: PUTL       	t6, R21
+	  11: INCEIPL       	$4
+
+	0x2547BC3C:  5758103A  rlwinm r24,r26,2,0,29
+	  12: GETL       	R26, t8
+	  13: SHLL       	$0x2, t8
+	  14: PUTL       	t8, R24
+	  15: INCEIPL       	$4
+
+	0x2547BC40:  7C78B02E  lwzx r3,r24,r22
+	  16: GETL       	R22, t10
+	  17: GETL       	R24, t12
+	  18: ADDL       	t12, t10
+	  19: LDL       	(t10), t14
+	  20: PUTL       	t14, R3
+	  21: INCEIPL       	$4
+
+	0x2547BC44:  816301E8  lwz r11,488(r3)
+	  22: GETL       	R3, t16
+	  23: ADDL       	$0x1E8, t16
+	  24: LDL       	(t16), t18
+	  25: PUTL       	t18, R11
+	  26: INCEIPL       	$4
+
+	0x2547BC48:  2F8B0000  cmpi cr7,r11,0
+	  27: GETL       	R11, t20
+	  28: CMP0L       	t20, t22  (-rSo)
+	  29: ICRFL       	t22, $0x7, CR
+	  30: INCEIPL       	$4
+
+	0x2547BC4C:  419E002C  bc 12,30,0x2547BC78
+	  31: Js30o       	$0x2547BC78
+
+
+. 0 2547BC30 32
+. 57 20 10 3A 7C 14 03 78 7E A0 B2 14 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+
+==== BB 1422 (0x2547BC50) approx BBs exec'd 0 ====
+
+	0x2547BC50:  812B0000  lwz r9,0(r11)
+	   0: GETL       	R11, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0x2547BC54:  2C890000  cmpi cr1,r9,0
+	   4: GETL       	R9, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x1, CR
+	   7: INCEIPL       	$4
+
+	0x2547BC58:  41860020  bc 12,6,0x2547BC78
+	   8: Js06o       	$0x2547BC78
+
+
+. 0 2547BC50 12
+. 81 2B 00 00 2C 89 00 00 41 86 00 20
+
+==== BB 1423 (0x2547BC5C) approx BBs exec'd 0 ====
+
+	0x2547BC5C:  7C99D050  subf r4,r25,r26
+	   0: GETL       	R25, t0
+	   1: GETL       	R26, t2
+	   2: SUBL       	t0, t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547BC60:  5485103A  rlwinm r5,r4,2,0,29
+	   5: GETL       	R4, t4
+	   6: SHLL       	$0x2, t4
+	   7: PUTL       	t4, R5
+	   8: INCEIPL       	$4
+
+	0x2547BC64:  7F09D800  cmp cr6,r9,r27
+	   9: GETL       	R9, t6
+	  10: GETL       	R27, t8
+	  11: CMPL       	t6, t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0x2547BC68:  419A0240  bc 12,26,0x2547BEA8
+	  14: Js26o       	$0x2547BEA8
+
+
+. 0 2547BC5C 16
+. 7C 99 D0 50 54 85 10 3A 7F 09 D8 00 41 9A 02 40
+
+==== BB 1424 (0x2547BC6C) approx BBs exec'd 0 ====
+
+	0x2547BC6C:  852B0004  lwzu r9,4(r11)
+	   0: GETL       	R11, t0
+	   1: ADDL       	$0x4, t0
+	   2: PUTL       	t0, R11
+	   3: LDL       	(t0), t2
+	   4: PUTL       	t2, R9
+	   5: INCEIPL       	$4
+
+	0x2547BC70:  2C090000  cmpi cr0,r9,0
+	   6: GETL       	R9, t4
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547BC74:  4082FFF0  bc 4,2,0x2547BC64
+	  10: Jc02o       	$0x2547BC64
+
+
+. 0 2547BC6C 12
+. 85 2B 00 04 2C 09 00 00 40 82 FF F0
+
+==== BB 1425 (0x2547BC64) approx BBs exec'd 0 ====
+
+	0x2547BC64:  7F09D800  cmp cr6,r9,r27
+	   0: GETL       	R9, t0
+	   1: GETL       	R27, t2
+	   2: CMPL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x6, CR
+	   4: INCEIPL       	$4
+
+	0x2547BC68:  419A0240  bc 12,26,0x2547BEA8
+	   5: Js26o       	$0x2547BEA8
+
+
+. 0 2547BC64 8
+. 7F 09 D8 00 41 9A 02 40
+
+==== BB 1426 (0x2547BC78) approx BBs exec'd 0 ====
+
+	0x2547BC78:  7D38B02E  lwzx r9,r24,r22
+	   0: GETL       	R22, t0
+	   1: GETL       	R24, t2
+	   2: ADDL       	t2, t0
+	   3: LDL       	(t0), t4
+	   4: PUTL       	t4, R9
+	   5: INCEIPL       	$4
+
+	0x2547BC7C:  814901F4  lwz r10,500(r9)
+	   6: GETL       	R9, t6
+	   7: ADDL       	$0x1F4, t6
+	   8: LDL       	(t6), t8
+	   9: PUTL       	t8, R10
+	  10: INCEIPL       	$4
+
+	0x2547BC80:  2F8A0000  cmpi cr7,r10,0
+	  11: GETL       	R10, t10
+	  12: CMP0L       	t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0x2547BC84:  409E0258  bc 4,30,0x2547BEDC
+	  15: Jc30o       	$0x2547BEDC
+
+
+. 0 2547BC78 16
+. 7D 38 B0 2E 81 49 01 F4 2F 8A 00 00 40 9E 02 58
+
+==== BB 1427 (0x2547BC88) approx BBs exec'd 0 ====
+
+	0x2547BC88:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BC8C:  7F1AB840  cmpl cr6,r26,r23
+	   4: GETL       	R26, t2
+	   5: GETL       	R23, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0x2547BC90:  4198FFAC  bc 12,24,0x2547BC3C
+	   9: Js24o       	$0x2547BC3C
+
+
+. 0 2547BC88 12
+. 3B 5A 00 01 7F 1A B8 40 41 98 FF AC
+
+==== BB 1428 (0x2547BC3C) approx BBs exec'd 0 ====
+
+	0x2547BC3C:  5758103A  rlwinm r24,r26,2,0,29
+	   0: GETL       	R26, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R24
+	   3: INCEIPL       	$4
+
+	0x2547BC40:  7C78B02E  lwzx r3,r24,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R24, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547BC44:  816301E8  lwz r11,488(r3)
+	  10: GETL       	R3, t8
+	  11: ADDL       	$0x1E8, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547BC48:  2F8B0000  cmpi cr7,r11,0
+	  15: GETL       	R11, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0x2547BC4C:  419E002C  bc 12,30,0x2547BC78
+	  19: Js30o       	$0x2547BC78
+
+
+. 0 2547BC3C 20
+. 57 58 10 3A 7C 78 B0 2E 81 63 01 E8 2F 8B 00 00 41 9E 00 2C
+
+==== BB 1429 (0x2547BC94) approx BBs exec'd 0 ====
+
+	0x2547BC94:  4BFFFF58  b 0x2547BBEC
+	   0: JMPo       	$0x2547BBEC  ($4)
+
+
+. 0 2547BC94 4
+. 4B FF FF 58
+
+==== BB 1430 (0x2547BC10) approx BBs exec'd 0 ====
+
+	0x2547BC10:  3B390001  addi r25,r25,1
+	   0: GETL       	R25, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R25
+	   3: INCEIPL       	$4
+
+	0x2547BC14:  572C103A  rlwinm r12,r25,2,0,29
+	   4: GETL       	R25, t2
+	   5: SHLL       	$0x2, t2
+	   6: PUTL       	t2, R12
+	   7: INCEIPL       	$4
+
+	0x2547BC18:  7FACB02E  lwzx r29,r12,r22
+	   8: GETL       	R22, t4
+	   9: GETL       	R12, t6
+	  10: ADDL       	t6, t4
+	  11: LDL       	(t4), t8
+	  12: PUTL       	t8, R29
+	  13: INCEIPL       	$4
+
+	0x2547BC1C:  7F1DD800  cmp cr6,r29,r27
+	  14: GETL       	R29, t10
+	  15: GETL       	R27, t12
+	  16: CMPL       	t10, t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x6, CR
+	  18: INCEIPL       	$4
+
+	0x2547BC20:  409AFFF0  bc 4,26,0x2547BC10
+	  19: Jc26o       	$0x2547BC10
+
+
+. 0 2547BC10 20
+. 3B 39 00 01 57 2C 10 3A 7F AC B0 2E 7F 1D D8 00 40 9A FF F0
+
+==== BB 1431 (0x2547BD08) approx BBs exec'd 0 ====
+
+	0x2547BD08:  817E034C  lwz r11,844(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x34C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0x2547BD0C:  832B0000  lwz r25,0(r11)
+	   5: GETL       	R11, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R25
+	   8: INCEIPL       	$4
+
+	0x2547BD10:  48000030  b 0x2547BD40
+	   9: JMPo       	$0x2547BD40  ($4)
+
+
+. 0 2547BD08 12
+. 81 7E 03 4C 83 2B 00 00 48 00 00 30
+
+==== BB 1432 (0x2547BD40) approx BBs exec'd 0 ====
+
+	0x2547BD40:  5748103A  rlwinm r8,r26,2,0,29
+	   0: GETL       	R26, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R8
+	   3: INCEIPL       	$4
+
+	0x2547BD44:  7F88B02E  lwzx r28,r8,r22
+	   4: GETL       	R22, t2
+	   5: GETL       	R8, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R28
+	   9: INCEIPL       	$4
+
+	0x2547BD48:  817C0180  lwz r11,384(r28)
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x180, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R11
+	  14: INCEIPL       	$4
+
+	0x2547BD4C:  75601000  andis. r0,r11,0x1000
+	  15: GETL       	R11, t12
+	  16: ANDL       	$0x10000000, t12
+	  17: PUTL       	t12, R0
+	  18: CMP0L       	t12, t14  (-rSo)
+	  19: ICRFL       	t14, $0x0, CR
+	  20: INCEIPL       	$4
+
+	0x2547BD50:  40A2FFC4  bc 5,2,0x2547BD14
+	  21: Jc02o       	$0x2547BD14
+
+
+. 0 2547BD40 20
+. 57 48 10 3A 7F 88 B0 2E 81 7C 01 80 75 60 10 00 40 A2 FF C4
+
+==== BB 1433 (0x2547BD14) approx BBs exec'd 0 ====
+
+	0x2547BD14:  55750104  rlwinm r21,r11,0,4,2
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0xEFFFFFFF, t0
+	   2: PUTL       	t0, R21
+	   3: INCEIPL       	$4
+
+	0x2547BD18:  809C0004  lwz r4,4(r28)
+	   4: GETL       	R28, t2
+	   5: ADDL       	$0x4, t2
+	   6: LDL       	(t2), t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0x2547BD1C:  92BC0180  stw r21,384(r28)
+	   9: GETL       	R21, t6
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x180, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BD20:  8A840000  lbz r20,0(r4)
+	  14: GETL       	R4, t10
+	  15: LDB       	(t10), t12
+	  16: PUTL       	t12, R20
+	  17: INCEIPL       	$4
+
+	0x2547BD24:  2F140000  cmpi cr6,r20,0
+	  18: GETL       	R20, t14
+	  19: CMP0L       	t14, t16  (-rSo)
+	  20: ICRFL       	t16, $0x6, CR
+	  21: INCEIPL       	$4
+
+	0x2547BD28:  409A00C0  bc 4,26,0x2547BDE8
+	  22: Jc26o       	$0x2547BDE8
+
+
+. 0 2547BD14 24
+. 55 75 01 04 80 9C 00 04 92 BC 01 80 8A 84 00 00 2F 14 00 00 40 9A 00 C0
+
+==== BB 1434 (0x2547BD2C) approx BBs exec'd 0 ====
+
+	0x2547BD2C:  7569C000  andis. r9,r11,0xC000
+	   0: GETL       	R11, t0
+	   1: ANDL       	$0xC0000000, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0x2547BD30:  408200B8  bc 4,2,0x2547BDE8
+	   6: Jc02o       	$0x2547BDE8
+
+
+. 0 2547BD2C 8
+. 75 69 C0 00 40 82 00 B8
+
+==== BB 1435 (0x2547BD34) approx BBs exec'd 0 ====
+
+	0x2547BD34:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BD38:  7F9A9840  cmpl cr7,r26,r19
+	   4: GETL       	R26, t2
+	   5: GETL       	R19, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BD3C:  409C0030  bc 4,28,0x2547BD6C
+	   9: Jc28o       	$0x2547BD6C
+
+
+. 0 2547BD34 12
+. 3B 5A 00 01 7F 9A 98 40 40 9C 00 30
+
+==== BB 1436 (0x2547BDE8) approx BBs exec'd 0 ====
+
+	0x2547BDE8:  813C0088  lwz r9,136(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x88, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BDEC:  2F890000  cmpi cr7,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BDF0:  409E0010  bc 4,30,0x2547BE00
+	   9: Jc30o       	$0x2547BE00
+
+
+. 0 2547BDE8 12
+. 81 3C 00 88 2F 89 00 00 40 9E 00 10
+
+==== BB 1437 (0x2547BDF4) approx BBs exec'd 0 ====
+
+	0x2547BDF4:  82FC0054  lwz r23,84(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x54, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R23
+	   4: INCEIPL       	$4
+
+	0x2547BDF8:  2C970000  cmpi cr1,r23,0
+	   5: GETL       	R23, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x1, CR
+	   8: INCEIPL       	$4
+
+	0x2547BDFC:  4186FF38  bc 12,6,0x2547BD34
+	   9: Js06o       	$0x2547BD34
+
+
+. 0 2547BDF4 12
+. 82 FC 00 54 2C 97 00 00 41 86 FF 38
+
+==== BB 1438 (0x2547BE00) approx BBs exec'd 0 ====
+
+	0x2547BE00:  830E0000  lwz r24,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R24
+	   3: INCEIPL       	$4
+
+	0x2547BE04:  730B0002  andi. r11,r24,0x2
+	   4: GETL       	R24, t4
+	   5: ANDL       	$0x2, t4
+	   6: PUTL       	t4, R11
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547BE08:  4082007C  bc 4,2,0x2547BE84
+	  10: Jc02o       	$0x2547BE84
+
+
+. 0 2547BE00 12
+. 83 0E 00 00 73 0B 00 02 40 82 00 7C
+
+==== BB 1439 (0x2547BE0C) approx BBs exec'd 0 ====
+
+	0x2547BE0C:  2F090000  cmpi cr6,r9,0
+	   0: GETL       	R9, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0x2547BE10:  419A0044  bc 12,26,0x2547BE54
+	   4: Js26o       	$0x2547BE54
+
+
+. 0 2547BE0C 8
+. 2F 09 00 00 41 9A 00 44
+
+==== BB 1440 (0x2547BE54) approx BBs exec'd 0 ====
+
+	0x2547BE54:  813C0054  lwz r9,84(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x54, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0x2547BE58:  2E090000  cmpi cr4,r9,0
+	   5: GETL       	R9, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x4, CR
+	   8: INCEIPL       	$4
+
+	0x2547BE5C:  4192FEF8  bc 12,18,0x2547BD54
+	   9: Js18o       	$0x2547BD54
+
+
+. 0 2547BE54 12
+. 81 3C 00 54 2E 09 00 00 41 92 FE F8
+
+==== BB 1441 (0x2547BE60) approx BBs exec'd 0 ====
+
+	0x2547BE60:  80A90004  lwz r5,4(r9)
+	   0: GETL       	R9, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0x2547BE64:  83BC0000  lwz r29,0(r28)
+	   5: GETL       	R28, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R29
+	   8: INCEIPL       	$4
+
+	0x2547BE68:  7CFD2A14  add r7,r29,r5
+	   9: GETL       	R29, t8
+	  10: GETL       	R5, t10
+	  11: ADDL       	t8, t10
+	  12: PUTL       	t10, R7
+	  13: INCEIPL       	$4
+
+	0x2547BE6C:  7CE903A6  mtctr r7
+	  14: GETL       	R7, t12
+	  15: PUTL       	t12, CTR
+	  16: INCEIPL       	$4
+
+	0x2547BE70:  4E800421  bctrl
+	  17: MOVL       	$0x2547BE74, t14
+	  18: PUTL       	t14, LR
+	  19: GETL       	CTR, t16
+	  20: JMPo-c       	t16  ($4)
+
+
+. 0 2547BE60 20
+. 80 A9 00 04 83 BC 00 00 7C FD 2A 14 7C E9 03 A6 4E 80 04 21
+
+==== BB 1442 (0xFFDEF80) approx BBs exec'd 0 ====
+
+	0xFFDEF80:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDEF84:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDEF88:  90010014  stw r0,20(r1)
+	   9: GETL       	R0, t6
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0x14, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0xFFDEF8C:  4BFFF979  bl 0xFFDE904
+	  14: MOVL       	$0xFFDEF90, t10
+	  15: PUTL       	t10, LR
+	  16: JMPo-c       	$0xFFDE904  ($4)
+
+
+. 0 FFDEF80 16
+. 94 21 FF F0 7C 08 02 A6 90 01 00 14 4B FF F9 79
+
+==== BB 1443 (0xFFDE904) approx BBs exec'd 0 ====
+
+	0xFFDE904:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFFDE908:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFFDE90C:  429F0005  bcl 20,31,0xFFDE910
+	   9: MOVL       	$0xFFDE910, t6
+	  10: PUTL       	t6, LR
+	  11: INCEIPL       	$4
+
+	0xFFDE910:  93C10008  stw r30,8(r1)
+	  12: GETL       	R30, t8
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x8, t10
+	  15: STL       	t8, (t10)
+	  16: INCEIPL       	$4
+
+	0xFFDE914:  7FC802A6  mflr r30
+	  17: GETL       	LR, t12
+	  18: PUTL       	t12, R30
+	  19: INCEIPL       	$4
+
+	0xFFDE918:  93E1000C  stw r31,12(r1)
+	  20: GETL       	R31, t14
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0xC, t16
+	  23: STL       	t14, (t16)
+	  24: INCEIPL       	$4
+
+	0xFFDE91C:  90010014  stw r0,20(r1)
+	  25: GETL       	R0, t18
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x14, t20
+	  28: STL       	t18, (t20)
+	  29: INCEIPL       	$4
+
+	0xFFDE920:  801EFFF0  lwz r0,-16(r30)
+	  30: GETL       	R30, t22
+	  31: ADDL       	$0xFFFFFFF0, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R0
+	  34: INCEIPL       	$4
+
+	0xFFDE924:  7FC0F214  add r30,r0,r30
+	  35: GETL       	R0, t26
+	  36: GETL       	R30, t28
+	  37: ADDL       	t26, t28
+	  38: PUTL       	t28, R30
+	  39: INCEIPL       	$4
+
+	0xFFDE928:  813E8000  lwz r9,-32768(r30)
+	  40: GETL       	R30, t30
+	  41: ADDL       	$0xFFFF8000, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R9
+	  44: INCEIPL       	$4
+
+	0xFFDE92C:  88090000  lbz r0,0(r9)
+	  45: GETL       	R9, t34
+	  46: LDB       	(t34), t36
+	  47: PUTL       	t36, R0
+	  48: INCEIPL       	$4
+
+	0xFFDE930:  2F800000  cmpi cr7,r0,0
+	  49: GETL       	R0, t38
+	  50: CMP0L       	t38, t40  (-rSo)
+	  51: ICRFL       	t40, $0x7, CR
+	  52: INCEIPL       	$4
+
+	0xFFDE934:  409E0050  bc 4,30,0xFFDE984
+	  53: Jc30o       	$0xFFDE984
+
+
+. 0 FFDE904 52
+. 94 21 FF F0 7C 08 02 A6 42 9F 00 05 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 80 1E FF F0 7F C0 F2 14 81 3E 80 00 88 09 00 00 2F 80 00 00 40 9E 00 50
+
+==== BB 1444 (0xFFDE938) approx BBs exec'd 0 ====
+
+	0xFFDE938:  801E8004  lwz r0,-32764(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8004, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDE93C:  2F800000  cmpi cr7,r0,0
+	   5: GETL       	R0, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0xFFDE940:  419E0010  bc 12,30,0xFFDE950
+	   9: Js30o       	$0xFFDE950
+
+
+. 0 FFDE938 12
+. 80 1E 80 04 2F 80 00 00 41 9E 00 10
+
+==== BB 1445 (0xFFDE944) approx BBs exec'd 0 ====
+
+	0xFFDE944:  813E8008  lwz r9,-32760(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8008, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFFDE948:  80690000  lwz r3,0(r9)
+	   5: GETL       	R9, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R3
+	   8: INCEIPL       	$4
+
+	0xFFDE94C:  48010831  bl 0xFFEF17C
+	   9: MOVL       	$0xFFDE950, t8
+	  10: PUTL       	t8, LR
+	  11: JMPo-c       	$0xFFEF17C  ($4)
+
+
+. 0 FFDE944 12
+. 81 3E 80 08 80 69 00 00 48 01 08 31
+
+==== BB 1446 (0xFFEF17C) approx BBs exec'd 0 ====
+
+	0xFFEF17C:  3960000C  li r11,12
+	   0: MOVL       	$0xC, t0
+	   1: PUTL       	t0, R11
+	   2: INCEIPL       	$4
+
+	0xFFEF180:  4BFFFFBC  b 0xFFEF13C
+	   3: JMPo       	$0xFFEF13C  ($4)
+
+
+. 0 FFEF17C 8
+. 39 60 00 0C 4B FF FF BC
+
+==== BB 1447 (0xFFEF13C) approx BBs exec'd 0 ====
+
+	0xFFEF13C:  556C083C  rlwinm r12,r11,1,0,30
+	   0: GETL       	R11, t0
+	   1: SHLL       	$0x1, t0
+	   2: PUTL       	t0, R12
+	   3: INCEIPL       	$4
+
+	0xFFEF140:  7D6C5A14  add r11,r12,r11
+	   4: GETL       	R12, t2
+	   5: GETL       	R11, t4
+	   6: ADDL       	t2, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0xFFEF144:  3980AF5C  li r12,-20644
+	   9: MOVL       	$0xFFFFAF5C, t6
+	  10: PUTL       	t6, R12
+	  11: INCEIPL       	$4
+
+	0xFFEF148:  3D8C2548  addis r12,r12,9544
+	  12: MOVL       	$0x2547AF5C, t8
+	  13: PUTL       	t8, R12
+	  14: INCEIPL       	$4
+
+	0xFFEF14C:  7D8903A6  mtctr r12
+	  15: GETL       	R12, t10
+	  16: PUTL       	t10, CTR
+	  17: INCEIPL       	$4
+
+	0xFFEF150:  398080D0  li r12,-32560
+	  18: MOVL       	$0xFFFF80D0, t12
+	  19: PUTL       	t12, R12
+	  20: INCEIPL       	$4
+
+	0xFFEF154:  3D8C2547  addis r12,r12,9543
+	  21: MOVL       	$0x254680D0, t14
+	  22: PUTL       	t14, R12
+	  23: INCEIPL       	$4
+
+	0xFFEF158:  4E800420  bctr
+	  24: GETL       	CTR, t16
+	  25: JMPo       	t16  ($4)
+
+
+. 0 FFEF13C 32
+. 55 6C 08 3C 7D 6C 5A 14 39 80 AF 5C 3D 8C 25 48 7D 89 03 A6 39 80 80 D0 3D 8C 25 47 4E 80 04 20
+
+==== BB 1448 __cxa_finalize(0xFEB3F44) approx BBs exec'd 0 ====
+
+	0xFEB3F44:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEB3F48:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEB3F4C:  48113F05  bl 0xFFC7E50
+	   9: MOVL       	$0xFEB3F50, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FEB3F44 12
+. 94 21 FF D0 7C 08 02 A6 48 11 3F 05
+
+==== BB 1449 (0xFEB3F50) approx BBs exec'd 0 ====
+
+	0xFEB3F50:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEB3F54:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEB3F58:  93810020  stw r28,32(r1)
+	   8: GETL       	R28, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x20, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEB3F5C:  90010034  stw r0,52(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEB3F60:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0xFEB3F64:  93210014  stw r25,20(r1)
+	  21: GETL       	R25, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEB3F68:  2E030000  cmpi cr4,r3,0
+	  26: GETL       	R3, t20
+	  27: CMP0L       	t20, t22  (-rSo)
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0xFEB3F6C:  813E1AFC  lwz r9,6908(r30)
+	  30: GETL       	R30, t24
+	  31: ADDL       	$0x1AFC, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R9
+	  34: INCEIPL       	$4
+
+	0xFEB3F70:  7C791B78  or r25,r3,r3
+	  35: GETL       	R3, t28
+	  36: PUTL       	t28, R25
+	  37: INCEIPL       	$4
+
+	0xFEB3F74:  93010010  stw r24,16(r1)
+	  38: GETL       	R24, t30
+	  39: GETL       	R1, t32
+	  40: ADDL       	$0x10, t32
+	  41: STL       	t30, (t32)
+	  42: INCEIPL       	$4
+
+	0xFEB3F78:  83890000  lwz r28,0(r9)
+	  43: GETL       	R9, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R28
+	  46: INCEIPL       	$4
+
+	0xFEB3F7C:  93410018  stw r26,24(r1)
+	  47: GETL       	R26, t38
+	  48: GETL       	R1, t40
+	  49: ADDL       	$0x18, t40
+	  50: STL       	t38, (t40)
+	  51: INCEIPL       	$4
+
+	0xFEB3F80:  2F9C0000  cmpi cr7,r28,0
+	  52: GETL       	R28, t42
+	  53: CMP0L       	t42, t44  (-rSo)
+	  54: ICRFL       	t44, $0x7, CR
+	  55: INCEIPL       	$4
+
+	0xFEB3F84:  9361001C  stw r27,28(r1)
+	  56: GETL       	R27, t46
+	  57: GETL       	R1, t48
+	  58: ADDL       	$0x1C, t48
+	  59: STL       	t46, (t48)
+	  60: INCEIPL       	$4
+
+	0xFEB3F88:  93A10024  stw r29,36(r1)
+	  61: GETL       	R29, t50
+	  62: GETL       	R1, t52
+	  63: ADDL       	$0x24, t52
+	  64: STL       	t50, (t52)
+	  65: INCEIPL       	$4
+
+	0xFEB3F8C:  93E1002C  stw r31,44(r1)
+	  66: GETL       	R31, t54
+	  67: GETL       	R1, t56
+	  68: ADDL       	$0x2C, t56
+	  69: STL       	t54, (t56)
+	  70: INCEIPL       	$4
+
+	0xFEB3F90:  9181000C  stw r12,12(r1)
+	  71: GETL       	R12, t58
+	  72: GETL       	R1, t60
+	  73: ADDL       	$0xC, t60
+	  74: STL       	t58, (t60)
+	  75: INCEIPL       	$4
+
+	0xFEB3F94:  419E0088  bc 12,30,0xFEB401C
+	  76: Js30o       	$0xFEB401C
+
+
+. 0 FEB3F50 72
+. 93 C1 00 28 7F C8 02 A6 93 81 00 20 90 01 00 34 7D 80 00 26 93 21 00 14 2E 03 00 00 81 3E 1A FC 7C 79 1B 78 93 01 00 10 83 89 00 00 93 41 00 18 2F 9C 00 00 93 61 00 1C 93 A1 00 24 93 E1 00 2C 91 81 00 0C 41 9E 00 88
+
+==== BB 1450 (0xFEB3F98) approx BBs exec'd 0 ====
+
+	0xFEB3F98:  80BC0004  lwz r5,4(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R5
+	   4: INCEIPL       	$4
+
+	0xFEB3F9C:  3BBC0008  addi r29,r28,8
+	   5: GETL       	R28, t4
+	   6: ADDL       	$0x8, t4
+	   7: PUTL       	t4, R29
+	   8: INCEIPL       	$4
+
+	0xFEB3FA0:  54A42036  rlwinm r4,r5,4,0,27
+	   9: GETL       	R5, t6
+	  10: SHLL       	$0x4, t6
+	  11: PUTL       	t6, R4
+	  12: INCEIPL       	$4
+
+	0xFEB3FA4:  7C64E214  add r3,r4,r28
+	  13: GETL       	R4, t8
+	  14: GETL       	R28, t10
+	  15: ADDL       	t8, t10
+	  16: PUTL       	t10, R3
+	  17: INCEIPL       	$4
+
+	0xFEB3FA8:  3BE3FFF8  addi r31,r3,-8
+	  18: GETL       	R3, t12
+	  19: ADDL       	$0xFFFFFFF8, t12
+	  20: PUTL       	t12, R31
+	  21: INCEIPL       	$4
+
+	0xFEB3FAC:  7C1DF840  cmpl cr0,r29,r31
+	  22: GETL       	R29, t14
+	  23: GETL       	R31, t16
+	  24: CMPUL       	t14, t16, t18  (-rSo)
+	  25: ICRFL       	t18, $0x0, CR
+	  26: INCEIPL       	$4
+
+	0xFEB3FB0:  41810060  bc 12,1,0xFEB4010
+	  27: Js01o       	$0xFEB4010
+
+
+. 0 FEB3F98 28
+. 80 BC 00 04 3B BC 00 08 54 A4 20 36 7C 64 E2 14 3B E3 FF F8 7C 1D F8 40 41 81 00 60
+
+==== BB 1451 (0xFEB4010) approx BBs exec'd 0 ====
+
+	0xFEB4010:  839C0000  lwz r28,0(r28)
+	   0: GETL       	R28, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R28
+	   3: INCEIPL       	$4
+
+	0xFEB4014:  2F9C0000  cmpi cr7,r28,0
+	   4: GETL       	R28, t4
+	   5: CMP0L       	t4, t6  (-rSo)
+	   6: ICRFL       	t6, $0x7, CR
+	   7: INCEIPL       	$4
+
+	0xFEB4018:  409EFF80  bc 4,30,0xFEB3F98
+	   8: Jc30o       	$0xFEB3F98
+
+
+. 0 FEB4010 12
+. 83 9C 00 00 2F 9C 00 00 40 9E FF 80
+
+==== BB 1452 (0xFEB401C) approx BBs exec'd 0 ====
+
+	0xFEB401C:  40920098  bc 4,18,0xFEB40B4
+	   0: Jc18o       	$0xFEB40B4
+
+
+. 0 FEB401C 4
+. 40 92 00 98
+
+==== BB 1453 (0xFEB40B4) approx BBs exec'd 0 ====
+
+	0xFEB40B4:  81410034  lwz r10,52(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEB40B8:  7F23CB78  or r3,r25,r25
+	   5: GETL       	R25, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEB40BC:  8101000C  lwz r8,12(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0xC, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R8
+	  12: INCEIPL       	$4
+
+	0xFEB40C0:  83C10028  lwz r30,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R30
+	  17: INCEIPL       	$4
+
+	0xFEB40C4:  7D4803A6  mtlr r10
+	  18: GETL       	R10, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFEB40C8:  83E1002C  lwz r31,44(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x2C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R31
+	  25: INCEIPL       	$4
+
+	0xFEB40CC:  7D008120  mtcrf 0x8,r8
+	  26: GETL       	R8, t20
+	  27: ICRFL       	t20, $0x4, CR
+	  28: INCEIPL       	$4
+
+	0xFEB40D0:  83010010  lwz r24,16(r1)
+	  29: GETL       	R1, t22
+	  30: ADDL       	$0x10, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R24
+	  33: INCEIPL       	$4
+
+	0xFEB40D4:  83210014  lwz r25,20(r1)
+	  34: GETL       	R1, t26
+	  35: ADDL       	$0x14, t26
+	  36: LDL       	(t26), t28
+	  37: PUTL       	t28, R25
+	  38: INCEIPL       	$4
+
+	0xFEB40D8:  83410018  lwz r26,24(r1)
+	  39: GETL       	R1, t30
+	  40: ADDL       	$0x18, t30
+	  41: LDL       	(t30), t32
+	  42: PUTL       	t32, R26
+	  43: INCEIPL       	$4
+
+	0xFEB40DC:  8361001C  lwz r27,28(r1)
+	  44: GETL       	R1, t34
+	  45: ADDL       	$0x1C, t34
+	  46: LDL       	(t34), t36
+	  47: PUTL       	t36, R27
+	  48: INCEIPL       	$4
+
+	0xFEB40E0:  83810020  lwz r28,32(r1)
+	  49: GETL       	R1, t38
+	  50: ADDL       	$0x20, t38
+	  51: LDL       	(t38), t40
+	  52: PUTL       	t40, R28
+	  53: INCEIPL       	$4
+
+	0xFEB40E4:  83A10024  lwz r29,36(r1)
+	  54: GETL       	R1, t42
+	  55: ADDL       	$0x24, t42
+	  56: LDL       	(t42), t44
+	  57: PUTL       	t44, R29
+	  58: INCEIPL       	$4
+
+	0xFEB40E8:  38210030  addi r1,r1,48
+	  59: GETL       	R1, t46
+	  60: ADDL       	$0x30, t46
+	  61: PUTL       	t46, R1
+	  62: INCEIPL       	$4
+
+	0xFEB40EC:  480ABD24  b 0xFF5FE10
+	  63: JMPo       	$0xFF5FE10  ($4)
+
+
+. 0 FEB40B4 60
+. 81 41 00 34 7F 23 CB 78 81 01 00 0C 83 C1 00 28 7D 48 03 A6 83 E1 00 2C 7D 00 81 20 83 01 00 10 83 21 00 14 83 41 00 18 83 61 00 1C 83 81 00 20 83 A1 00 24 38 21 00 30 48 0A BD 24
+
+==== BB 1454 __unregister_atfork(0xFF5FE10) approx BBs exec'd 0 ====
+
+	0xFF5FE10:  9421FFD0  stwu r1,-48(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFD0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF5FE14:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFF5FE18:  48068039  bl 0xFFC7E50
+	   9: MOVL       	$0xFF5FE1C, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FF5FE10 12
+. 94 21 FF D0 7C 08 02 A6 48 06 80 39
+
+==== BB 1455 (0xFF5FE1C) approx BBs exec'd 0 ====
+
+	0xFF5FE1C:  93C10028  stw r30,40(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x28, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF5FE20:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF5FE24:  93010010  stw r24,16(r1)
+	   8: GETL       	R24, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x10, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF5FE28:  90010034  stw r0,52(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x34, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF5FE2C:  93A10024  stw r29,36(r1)
+	  18: GETL       	R29, t14
+	  19: GETL       	R1, t16
+	  20: ADDL       	$0x24, t16
+	  21: STL       	t14, (t16)
+	  22: INCEIPL       	$4
+
+	0xFF5FE30:  831E1CD4  lwz r24,7380(r30)
+	  23: GETL       	R30, t18
+	  24: ADDL       	$0x1CD4, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R24
+	  27: INCEIPL       	$4
+
+	0xFF5FE34:  93410018  stw r26,24(r1)
+	  28: GETL       	R26, t22
+	  29: GETL       	R1, t24
+	  30: ADDL       	$0x18, t24
+	  31: STL       	t22, (t24)
+	  32: INCEIPL       	$4
+
+	0xFF5FE38:  7C7A1B78  or r26,r3,r3
+	  33: GETL       	R3, t26
+	  34: PUTL       	t26, R26
+	  35: INCEIPL       	$4
+
+	0xFF5FE3C:  83B80000  lwz r29,0(r24)
+	  36: GETL       	R24, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R29
+	  39: INCEIPL       	$4
+
+	0xFF5FE40:  9361001C  stw r27,28(r1)
+	  40: GETL       	R27, t32
+	  41: GETL       	R1, t34
+	  42: ADDL       	$0x1C, t34
+	  43: STL       	t32, (t34)
+	  44: INCEIPL       	$4
+
+	0xFF5FE44:  3B600000  li r27,0
+	  45: MOVL       	$0x0, t36
+	  46: PUTL       	t36, R27
+	  47: INCEIPL       	$4
+
+	0xFF5FE48:  93E1002C  stw r31,44(r1)
+	  48: GETL       	R31, t38
+	  49: GETL       	R1, t40
+	  50: ADDL       	$0x2C, t40
+	  51: STL       	t38, (t40)
+	  52: INCEIPL       	$4
+
+	0xFF5FE4C:  7C3F0B78  or r31,r1,r1
+	  53: GETL       	R1, t42
+	  54: PUTL       	t42, R31
+	  55: INCEIPL       	$4
+
+	0xFF5FE50:  93210014  stw r25,20(r1)
+	  56: GETL       	R25, t44
+	  57: GETL       	R1, t46
+	  58: ADDL       	$0x14, t46
+	  59: STL       	t44, (t46)
+	  60: INCEIPL       	$4
+
+	0xFF5FE54:  93810020  stw r28,32(r1)
+	  61: GETL       	R28, t48
+	  62: GETL       	R1, t50
+	  63: ADDL       	$0x20, t50
+	  64: STL       	t48, (t50)
+	  65: INCEIPL       	$4
+
+	0xFF5FE58:  48000018  b 0xFF5FE70
+	  66: JMPo       	$0xFF5FE70  ($4)
+
+
+. 0 FF5FE1C 64
+. 93 C1 00 28 7F C8 02 A6 93 01 00 10 90 01 00 34 93 A1 00 24 83 1E 1C D4 93 41 00 18 7C 7A 1B 78 83 B8 00 00 93 61 00 1C 3B 60 00 00 93 E1 00 2C 7C 3F 0B 78 93 21 00 14 93 81 00 20 48 00 00 18
+
+==== BB 1456 (0xFF5FE70) approx BBs exec'd 0 ====
+
+	0xFF5FE70:  2F1D0000  cmpi cr6,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x6, CR
+	   3: INCEIPL       	$4
+
+	0xFF5FE74:  409AFFE8  bc 4,26,0xFF5FE5C
+	   4: Jc26o       	$0xFF5FE5C
+
+
+. 0 FF5FE70 8
+. 2F 1D 00 00 40 9A FF E8
+
+==== BB 1457 (0xFF5FE78) approx BBs exec'd 0 ====
+
+	0xFF5FE78:  80610000  lwz r3,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFF5FE7C:  83830004  lwz r28,4(r3)
+	   4: GETL       	R3, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R28
+	   8: INCEIPL       	$4
+
+	0xFF5FE80:  8303FFE0  lwz r24,-32(r3)
+	   9: GETL       	R3, t8
+	  10: ADDL       	$0xFFFFFFE0, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R24
+	  13: INCEIPL       	$4
+
+	0xFF5FE84:  7F8803A6  mtlr r28
+	  14: GETL       	R28, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0xFF5FE88:  8323FFE4  lwz r25,-28(r3)
+	  17: GETL       	R3, t14
+	  18: ADDL       	$0xFFFFFFE4, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R25
+	  21: INCEIPL       	$4
+
+	0xFF5FE8C:  8343FFE8  lwz r26,-24(r3)
+	  22: GETL       	R3, t18
+	  23: ADDL       	$0xFFFFFFE8, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R26
+	  26: INCEIPL       	$4
+
+	0xFF5FE90:  8363FFEC  lwz r27,-20(r3)
+	  27: GETL       	R3, t22
+	  28: ADDL       	$0xFFFFFFEC, t22
+	  29: LDL       	(t22), t24
+	  30: PUTL       	t24, R27
+	  31: INCEIPL       	$4
+
+	0xFF5FE94:  8383FFF0  lwz r28,-16(r3)
+	  32: GETL       	R3, t26
+	  33: ADDL       	$0xFFFFFFF0, t26
+	  34: LDL       	(t26), t28
+	  35: PUTL       	t28, R28
+	  36: INCEIPL       	$4
+
+	0xFF5FE98:  83A3FFF4  lwz r29,-12(r3)
+	  37: GETL       	R3, t30
+	  38: ADDL       	$0xFFFFFFF4, t30
+	  39: LDL       	(t30), t32
+	  40: PUTL       	t32, R29
+	  41: INCEIPL       	$4
+
+	0xFF5FE9C:  83C3FFF8  lwz r30,-8(r3)
+	  42: GETL       	R3, t34
+	  43: ADDL       	$0xFFFFFFF8, t34
+	  44: LDL       	(t34), t36
+	  45: PUTL       	t36, R30
+	  46: INCEIPL       	$4
+
+	0xFF5FEA0:  83E3FFFC  lwz r31,-4(r3)
+	  47: GETL       	R3, t38
+	  48: ADDL       	$0xFFFFFFFC, t38
+	  49: LDL       	(t38), t40
+	  50: PUTL       	t40, R31
+	  51: INCEIPL       	$4
+
+	0xFF5FEA4:  7C611B78  or r1,r3,r3
+	  52: GETL       	R3, t42
+	  53: PUTL       	t42, R1
+	  54: INCEIPL       	$4
+
+	0xFF5FEA8:  4E800020  blr
+	  55: GETL       	LR, t44
+	  56: JMPo-r       	t44  ($4)
+
+
+. 0 FF5FE78 52
+. 80 61 00 00 83 83 00 04 83 03 FF E0 7F 88 03 A6 83 23 FF E4 83 43 FF E8 83 63 FF EC 83 83 FF F0 83 A3 FF F4 83 C3 FF F8 83 E3 FF FC 7C 61 1B 78 4E 80 00 20
+
+==== BB 1458 (0xFFDE950) approx BBs exec'd 0 ====
+
+	0xFFDE950:  83FE800C  lwz r31,-32756(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF800C, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFFDE954:  48000014  b 0xFFDE968
+	   5: JMPo       	$0xFFDE968  ($4)
+
+
+. 0 FFDE950 8
+. 83 FE 80 0C 48 00 00 14
+
+==== BB 1459 (0xFFDE968) approx BBs exec'd 0 ====
+
+	0xFFDE968:  813F0000  lwz r9,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R9
+	   3: INCEIPL       	$4
+
+	0xFFDE96C:  81690000  lwz r11,0(r9)
+	   4: GETL       	R9, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R11
+	   7: INCEIPL       	$4
+
+	0xFFDE970:  2F8B0000  cmpi cr7,r11,0
+	   8: GETL       	R11, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x7, CR
+	  11: INCEIPL       	$4
+
+	0xFFDE974:  409EFFE4  bc 4,30,0xFFDE958
+	  12: Jc30o       	$0xFFDE958
+
+
+. 0 FFDE968 16
+. 81 3F 00 00 81 69 00 00 2F 8B 00 00 40 9E FF E4
+
+==== BB 1460 (0xFFDE978) approx BBs exec'd 0 ====
+
+	0xFFDE978:  813E8000  lwz r9,-32768(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0xFFFF8000, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFFDE97C:  38000001  li r0,1
+	   5: MOVL       	$0x1, t4
+	   6: PUTL       	t4, R0
+	   7: INCEIPL       	$4
+
+	0xFFDE980:  98090000  stb r0,0(r9)
+	   8: GETL       	R0, t6
+	   9: GETL       	R9, t8
+	  10: STB       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFFDE984:  80010014  lwz r0,20(r1)
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x14, t10
+	  14: LDL       	(t10), t12
+	  15: PUTL       	t12, R0
+	  16: INCEIPL       	$4
+
+	0xFFDE988:  83C10008  lwz r30,8(r1)
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x8, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R30
+	  21: INCEIPL       	$4
+
+	0xFFDE98C:  83E1000C  lwz r31,12(r1)
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0xC, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R31
+	  26: INCEIPL       	$4
+
+	0xFFDE990:  7C0803A6  mtlr r0
+	  27: GETL       	R0, t22
+	  28: PUTL       	t22, LR
+	  29: INCEIPL       	$4
+
+	0xFFDE994:  38210010  addi r1,r1,16
+	  30: GETL       	R1, t24
+	  31: ADDL       	$0x10, t24
+	  32: PUTL       	t24, R1
+	  33: INCEIPL       	$4
+
+	0xFFDE998:  4E800020  blr
+	  34: GETL       	LR, t26
+	  35: JMPo-r       	t26  ($4)
+
+
+. 0 FFDE978 36
+. 81 3E 80 00 38 00 00 01 98 09 00 00 80 01 00 14 83 C1 00 08 83 E1 00 0C 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1461 (0xFFDEF90) approx BBs exec'd 0 ====
+
+	0xFFDEF90:  80010014  lwz r0,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R0
+	   4: INCEIPL       	$4
+
+	0xFFDEF94:  7C0803A6  mtlr r0
+	   5: GETL       	R0, t4
+	   6: PUTL       	t4, LR
+	   7: INCEIPL       	$4
+
+	0xFFDEF98:  38210010  addi r1,r1,16
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x10, t6
+	  10: PUTL       	t6, R1
+	  11: INCEIPL       	$4
+
+	0xFFDEF9C:  4E800020  blr
+	  12: GETL       	LR, t8
+	  13: JMPo-r       	t8  ($4)
+
+
+. 0 FFDEF90 16
+. 80 01 00 14 7C 08 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1462 (0x2547BE74) approx BBs exec'd 0 ====
+
+	0x2547BE74:  811C0178  lwz r8,376(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x178, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547BE78:  3968FFFF  addi r11,r8,-1
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0x2547BE7C:  917C0178  stw r11,376(r28)
+	   9: GETL       	R11, t6
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x178, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BE80:  4BFFFEE0  b 0x2547BD60
+	  14: JMPo       	$0x2547BD60  ($4)
+
+
+. 0 2547BE74 16
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 4B FF FE E0
+
+==== BB 1463 (0x2547BD60) approx BBs exec'd 0 ====
+
+	0x2547BD60:  3B5A0001  addi r26,r26,1
+	   0: GETL       	R26, t0
+	   1: ADDL       	$0x1, t0
+	   2: PUTL       	t0, R26
+	   3: INCEIPL       	$4
+
+	0x2547BD64:  7F9A9840  cmpl cr7,r26,r19
+	   4: GETL       	R26, t2
+	   5: GETL       	R19, t4
+	   6: CMPUL       	t2, t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x7, CR
+	   8: INCEIPL       	$4
+
+	0x2547BD68:  419CFFD8  bc 12,28,0x2547BD40
+	   9: Js28o       	$0x2547BD40
+
+
+. 0 2547BD60 12
+. 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+
+==== BB 1464 (0x2547BE14) approx BBs exec'd 0 ====
+
+	0x2547BE14:  809C0090  lwz r4,144(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x90, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0x2547BE18:  80690004  lwz r3,4(r9)
+	   5: GETL       	R9, t4
+	   6: ADDL       	$0x4, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R3
+	   9: INCEIPL       	$4
+
+	0x2547BE1C:  83640004  lwz r27,4(r4)
+	  10: GETL       	R4, t8
+	  11: ADDL       	$0x4, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R27
+	  14: INCEIPL       	$4
+
+	0x2547BE20:  801C0000  lwz r0,0(r28)
+	  15: GETL       	R28, t12
+	  16: LDL       	(t12), t14
+	  17: PUTL       	t14, R0
+	  18: INCEIPL       	$4
+
+	0x2547BE24:  576CF0BE  rlwinm r12,r27,30,2,31
+	  19: GETL       	R27, t16
+	  20: SHRL       	$0x2, t16
+	  21: PUTL       	t16, R12
+	  22: INCEIPL       	$4
+
+	0x2547BE28:  2F8C0000  cmpi cr7,r12,0
+	  23: GETL       	R12, t18
+	  24: CMP0L       	t18, t20  (-rSo)
+	  25: ICRFL       	t20, $0x7, CR
+	  26: INCEIPL       	$4
+
+	0x2547BE2C:  7F601A14  add r27,r0,r3
+	  27: GETL       	R0, t22
+	  28: GETL       	R3, t24
+	  29: ADDL       	t22, t24
+	  30: PUTL       	t24, R27
+	  31: INCEIPL       	$4
+
+	0x2547BE30:  3BACFFFF  addi r29,r12,-1
+	  32: GETL       	R12, t26
+	  33: ADDL       	$0xFFFFFFFF, t26
+	  34: PUTL       	t26, R29
+	  35: INCEIPL       	$4
+
+	0x2547BE34:  419E0020  bc 12,30,0x2547BE54
+	  36: Js30o       	$0x2547BE54
+
+
+. 0 2547BE14 36
+. 80 9C 00 90 80 69 00 04 83 64 00 04 80 1C 00 00 57 6C F0 BE 2F 8C 00 00 7F 60 1A 14 3B AC FF FF 41 9E 00 20
+
+==== BB 1465 (0x2547BE38) approx BBs exec'd 0 ====
+
+	0x2547BE38:  57A6103A  rlwinm r6,r29,2,0,29
+	   0: GETL       	R29, t0
+	   1: SHLL       	$0x2, t0
+	   2: PUTL       	t0, R6
+	   3: INCEIPL       	$4
+
+	0x2547BE3C:  7D26D82E  lwzx r9,r6,r27
+	   4: GETL       	R27, t2
+	   5: GETL       	R6, t4
+	   6: ADDL       	t4, t2
+	   7: LDL       	(t2), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0x2547BE40:  7D2903A6  mtctr r9
+	  10: GETL       	R9, t8
+	  11: PUTL       	t8, CTR
+	  12: INCEIPL       	$4
+
+	0x2547BE44:  4E800421  bctrl
+	  13: MOVL       	$0x2547BE48, t10
+	  14: PUTL       	t10, LR
+	  15: GETL       	CTR, t12
+	  16: JMPo-c       	t12  ($4)
+
+
+. 0 2547BE38 16
+. 57 A6 10 3A 7D 26 D8 2E 7D 29 03 A6 4E 80 04 21
+
+==== BB 1466 __libc_fini(0xFE9B5BC) approx BBs exec'd 0 ====
+
+	0xFE9B5BC:  9421FFF0  stwu r1,-16(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFF0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFE9B5C0:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFE9B5C4:  4812C88D  bl 0xFFC7E50
+	   9: MOVL       	$0xFE9B5C8, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FE9B5BC 12
+. 94 21 FF F0 7C 08 02 A6 48 12 C8 8D
+
+==== BB 1467 (0xFE9B5C8) approx BBs exec'd 0 ====
+
+	0xFE9B5C8:  93C10008  stw r30,8(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x8, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFE9B5CC:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFE9B5D0:  93E1000C  stw r31,12(r1)
+	   8: GETL       	R31, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0xC, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFE9B5D4:  90010014  stw r0,20(r1)
+	  13: GETL       	R0, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x14, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFE9B5D8:  813E0014  lwz r9,20(r30)
+	  18: GETL       	R30, t14
+	  19: ADDL       	$0x14, t14
+	  20: LDL       	(t14), t16
+	  21: PUTL       	t16, R9
+	  22: INCEIPL       	$4
+
+	0xFE9B5DC:  80090004  lwz r0,4(r9)
+	  23: GETL       	R9, t18
+	  24: ADDL       	$0x4, t18
+	  25: LDL       	(t18), t20
+	  26: PUTL       	t20, R0
+	  27: INCEIPL       	$4
+
+	0xFE9B5E0:  3BE90004  addi r31,r9,4
+	  28: GETL       	R9, t22
+	  29: ADDL       	$0x4, t22
+	  30: PUTL       	t22, R31
+	  31: INCEIPL       	$4
+
+	0xFE9B5E4:  2F800000  cmpi cr7,r0,0
+	  32: GETL       	R0, t24
+	  33: CMP0L       	t24, t26  (-rSo)
+	  34: ICRFL       	t26, $0x7, CR
+	  35: INCEIPL       	$4
+
+	0xFE9B5E8:  419E0018  bc 12,30,0xFE9B600
+	  36: Js30o       	$0xFE9B600
+
+
+. 0 FE9B5C8 36
+. 93 C1 00 08 7F C8 02 A6 93 E1 00 0C 90 01 00 14 81 3E 00 14 80 09 00 04 3B E9 00 04 2F 80 00 00 41 9E 00 18
+
+==== BB 1468 (0xFE9B600) approx BBs exec'd 0 ====
+
+	0xFE9B600:  80610014  lwz r3,20(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFE9B604:  83C10008  lwz r30,8(r1)
+	   5: GETL       	R1, t4
+	   6: ADDL       	$0x8, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R30
+	   9: INCEIPL       	$4
+
+	0xFE9B608:  83E1000C  lwz r31,12(r1)
+	  10: GETL       	R1, t8
+	  11: ADDL       	$0xC, t8
+	  12: LDL       	(t8), t10
+	  13: PUTL       	t10, R31
+	  14: INCEIPL       	$4
+
+	0xFE9B60C:  7C6803A6  mtlr r3
+	  15: GETL       	R3, t12
+	  16: PUTL       	t12, LR
+	  17: INCEIPL       	$4
+
+	0xFE9B610:  38210010  addi r1,r1,16
+	  18: GETL       	R1, t14
+	  19: ADDL       	$0x10, t14
+	  20: PUTL       	t14, R1
+	  21: INCEIPL       	$4
+
+	0xFE9B614:  4E800020  blr
+	  22: GETL       	LR, t16
+	  23: JMPo-r       	t16  ($4)
+
+
+. 0 FE9B600 24
+. 80 61 00 14 83 C1 00 08 83 E1 00 0C 7C 68 03 A6 38 21 00 10 4E 80 00 20
+
+==== BB 1469 (0x2547BE48) approx BBs exec'd 0 ====
+
+	0x2547BE48:  2F9D0000  cmpi cr7,r29,0
+	   0: GETL       	R29, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0x2547BE4C:  3BBDFFFF  addi r29,r29,-1
+	   4: GETL       	R29, t4
+	   5: ADDL       	$0xFFFFFFFF, t4
+	   6: PUTL       	t4, R29
+	   7: INCEIPL       	$4
+
+	0x2547BE50:  409EFFE8  bc 4,30,0x2547BE38
+	   8: Jc30o       	$0x2547BE38
+
+
+. 0 2547BE48 12
+. 2F 9D 00 00 3B BD FF FF 40 9E FF E8
+
+==== BB 1470 (0x2547BD54) approx BBs exec'd 0 ====
+
+	0x2547BD54:  811C0178  lwz r8,376(r28)
+	   0: GETL       	R28, t0
+	   1: ADDL       	$0x178, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R8
+	   4: INCEIPL       	$4
+
+	0x2547BD58:  3968FFFF  addi r11,r8,-1
+	   5: GETL       	R8, t4
+	   6: ADDL       	$0xFFFFFFFF, t4
+	   7: PUTL       	t4, R11
+	   8: INCEIPL       	$4
+
+	0x2547BD5C:  917C0178  stw r11,376(r28)
+	   9: GETL       	R11, t6
+	  10: GETL       	R28, t8
+	  11: ADDL       	$0x178, t8
+	  12: STL       	t6, (t8)
+	  13: INCEIPL       	$4
+
+	0x2547BD60:  3B5A0001  addi r26,r26,1
+	  14: GETL       	R26, t10
+	  15: ADDL       	$0x1, t10
+	  16: PUTL       	t10, R26
+	  17: INCEIPL       	$4
+
+	0x2547BD64:  7F9A9840  cmpl cr7,r26,r19
+	  18: GETL       	R26, t12
+	  19: GETL       	R19, t14
+	  20: CMPUL       	t12, t14, t16  (-rSo)
+	  21: ICRFL       	t16, $0x7, CR
+	  22: INCEIPL       	$4
+
+	0x2547BD68:  419CFFD8  bc 12,28,0x2547BD40
+	  23: Js28o       	$0x2547BD40
+
+
+. 0 2547BD54 24
+. 81 1C 01 78 39 68 FF FF 91 7C 01 78 3B 5A 00 01 7F 9A 98 40 41 9C FF D8
+
+==== BB 1471 (0x2547BD78) approx BBs exec'd 0 ====
+
+	0x2547BD78:  81EE0000  lwz r15,0(r14)
+	   0: GETL       	R14, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R15
+	   3: INCEIPL       	$4
+
+	0x2547BD7C:  71E90080  andi. r9,r15,0x80
+	   4: GETL       	R15, t4
+	   5: ANDL       	$0x80, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0x2547BD80:  408201AC  bc 4,2,0x2547BF2C
+	  10: Jc02o       	$0x2547BF2C
+
+
+. 0 2547BD78 12
+. 81 EE 00 00 71 E9 00 80 40 82 01 AC
+
+==== BB 1472 (0x2547BD84) approx BBs exec'd 0 ====
+
+	0x2547BD84:  81410000  lwz r10,0(r1)
+	   0: GETL       	R1, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0x2547BD88:  81CA0004  lwz r14,4(r10)
+	   4: GETL       	R10, t4
+	   5: ADDL       	$0x4, t4
+	   6: LDL       	(t4), t6
+	   7: PUTL       	t6, R14
+	   8: INCEIPL       	$4
+
+	0x2547BD8C:  818AFFB4  lwz r12,-76(r10)
+	   9: GETL       	R10, t8
+	  10: ADDL       	$0xFFFFFFB4, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R12
+	  13: INCEIPL       	$4
+
+	0x2547BD90:  7DC803A6  mtlr r14
+	  14: GETL       	R14, t12
+	  15: PUTL       	t12, LR
+	  16: INCEIPL       	$4
+
+	0x2547BD94:  81EAFFBC  lwz r15,-68(r10)
+	  17: GETL       	R10, t14
+	  18: ADDL       	$0xFFFFFFBC, t14
+	  19: LDL       	(t14), t16
+	  20: PUTL       	t16, R15
+	  21: INCEIPL       	$4
+
+	0x2547BD98:  81CAFFB8  lwz r14,-72(r10)
+	  22: GETL       	R10, t18
+	  23: ADDL       	$0xFFFFFFB8, t18
+	  24: LDL       	(t18), t20
+	  25: PUTL       	t20, R14
+	  26: INCEIPL       	$4
+
+	0x2547BD9C:  7D808120  mtcrf 0x8,r12
+	  27: GETL       	R12, t22
+	  28: ICRFL       	t22, $0x4, CR
+	  29: INCEIPL       	$4
+
+	0x2547BDA0:  820AFFC0  lwz r16,-64(r10)
+	  30: GETL       	R10, t24
+	  31: ADDL       	$0xFFFFFFC0, t24
+	  32: LDL       	(t24), t26
+	  33: PUTL       	t26, R16
+	  34: INCEIPL       	$4
+
+	0x2547BDA4:  822AFFC4  lwz r17,-60(r10)
+	  35: GETL       	R10, t28
+	  36: ADDL       	$0xFFFFFFC4, t28
+	  37: LDL       	(t28), t30
+	  38: PUTL       	t30, R17
+	  39: INCEIPL       	$4
+
+	0x2547BDA8:  824AFFC8  lwz r18,-56(r10)
+	  40: GETL       	R10, t32
+	  41: ADDL       	$0xFFFFFFC8, t32
+	  42: LDL       	(t32), t34
+	  43: PUTL       	t34, R18
+	  44: INCEIPL       	$4
+
+	0x2547BDAC:  826AFFCC  lwz r19,-52(r10)
+	  45: GETL       	R10, t36
+	  46: ADDL       	$0xFFFFFFCC, t36
+	  47: LDL       	(t36), t38
+	  48: PUTL       	t38, R19
+	  49: INCEIPL       	$4
+
+	0x2547BDB0:  828AFFD0  lwz r20,-48(r10)
+	  50: GETL       	R10, t40
+	  51: ADDL       	$0xFFFFFFD0, t40
+	  52: LDL       	(t40), t42
+	  53: PUTL       	t42, R20
+	  54: INCEIPL       	$4
+
+	0x2547BDB4:  82AAFFD4  lwz r21,-44(r10)
+	  55: GETL       	R10, t44
+	  56: ADDL       	$0xFFFFFFD4, t44
+	  57: LDL       	(t44), t46
+	  58: PUTL       	t46, R21
+	  59: INCEIPL       	$4
+
+	0x2547BDB8:  82CAFFD8  lwz r22,-40(r10)
+	  60: GETL       	R10, t48
+	  61: ADDL       	$0xFFFFFFD8, t48
+	  62: LDL       	(t48), t50
+	  63: PUTL       	t50, R22
+	  64: INCEIPL       	$4
+
+	0x2547BDBC:  82EAFFDC  lwz r23,-36(r10)
+	  65: GETL       	R10, t52
+	  66: ADDL       	$0xFFFFFFDC, t52
+	  67: LDL       	(t52), t54
+	  68: PUTL       	t54, R23
+	  69: INCEIPL       	$4
+
+	0x2547BDC0:  830AFFE0  lwz r24,-32(r10)
+	  70: GETL       	R10, t56
+	  71: ADDL       	$0xFFFFFFE0, t56
+	  72: LDL       	(t56), t58
+	  73: PUTL       	t58, R24
+	  74: INCEIPL       	$4
+
+	0x2547BDC4:  832AFFE4  lwz r25,-28(r10)
+	  75: GETL       	R10, t60
+	  76: ADDL       	$0xFFFFFFE4, t60
+	  77: LDL       	(t60), t62
+	  78: PUTL       	t62, R25
+	  79: INCEIPL       	$4
+
+	0x2547BDC8:  834AFFE8  lwz r26,-24(r10)
+	  80: GETL       	R10, t64
+	  81: ADDL       	$0xFFFFFFE8, t64
+	  82: LDL       	(t64), t66
+	  83: PUTL       	t66, R26
+	  84: INCEIPL       	$4
+
+	0x2547BDCC:  836AFFEC  lwz r27,-20(r10)
+	  85: GETL       	R10, t68
+	  86: ADDL       	$0xFFFFFFEC, t68
+	  87: LDL       	(t68), t70
+	  88: PUTL       	t70, R27
+	  89: INCEIPL       	$4
+
+	0x2547BDD0:  838AFFF0  lwz r28,-16(r10)
+	  90: GETL       	R10, t72
+	  91: ADDL       	$0xFFFFFFF0, t72
+	  92: LDL       	(t72), t74
+	  93: PUTL       	t74, R28
+	  94: INCEIPL       	$4
+
+	0x2547BDD4:  83AAFFF4  lwz r29,-12(r10)
+	  95: GETL       	R10, t76
+	  96: ADDL       	$0xFFFFFFF4, t76
+	  97: LDL       	(t76), t78
+	  98: PUTL       	t78, R29
+	  99: INCEIPL       	$4
+
+	0x2547BDD8:  83CAFFF8  lwz r30,-8(r10)
+	 100: GETL       	R10, t80
+	 101: ADDL       	$0xFFFFFFF8, t80
+	 102: LDL       	(t80), t82
+	 103: PUTL       	t82, R30
+	 104: INCEIPL       	$4
+
+	0x2547BDDC:  83EAFFFC  lwz r31,-4(r10)
+	 105: GETL       	R10, t84
+	 106: ADDL       	$0xFFFFFFFC, t84
+	 107: LDL       	(t84), t86
+	 108: PUTL       	t86, R31
+	 109: INCEIPL       	$4
+
+	0x2547BDE0:  7D415378  or r1,r10,r10
+	 110: GETL       	R10, t88
+	 111: PUTL       	t88, R1
+	 112: INCEIPL       	$4
+
+	0x2547BDE4:  4E800020  blr
+	 113: GETL       	LR, t90
+	 114: JMPo-r       	t90  ($4)
+
+
+. 0 2547BD84 100
+. 81 41 00 00 81 CA 00 04 81 8A FF B4 7D C8 03 A6 81 EA FF BC 81 CA FF B8 7D 80 81 20 82 0A FF C0 82 2A FF C4 82 4A FF C8 82 6A FF CC 82 8A FF D0 82 AA FF D4 82 CA FF D8 82 EA FF DC 83 0A FF E0 83 2A FF E4 83 4A FF E8 83 6A FF EC 83 8A FF F0 83 AA FF F4 83 CA FF F8 83 EA FF FC 7D 41 53 78 4E 80 00 20
+
+==== BB 1473 (0xFEB3C54) approx BBs exec'd 0 ====
+
+	0xFEB3C54:  807F0000  lwz r3,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R3
+	   3: INCEIPL       	$4
+
+	0xFEB3C58:  80030000  lwz r0,0(r3)
+	   4: GETL       	R3, t4
+	   5: LDL       	(t4), t6
+	   6: PUTL       	t6, R0
+	   7: INCEIPL       	$4
+
+	0xFEB3C5C:  2F000000  cmpi cr6,r0,0
+	   8: GETL       	R0, t8
+	   9: CMP0L       	t8, t10  (-rSo)
+	  10: ICRFL       	t10, $0x6, CR
+	  11: INCEIPL       	$4
+
+	0xFEB3C60:  901F0000  stw r0,0(r31)
+	  12: GETL       	R0, t12
+	  13: GETL       	R31, t14
+	  14: STL       	t12, (t14)
+	  15: INCEIPL       	$4
+
+	0xFEB3C64:  419AFF64  bc 12,26,0xFEB3BC8
+	  16: Js26o       	$0xFEB3BC8
+
+
+. 0 FEB3C54 20
+. 80 7F 00 00 80 03 00 00 2F 00 00 00 90 1F 00 00 41 9A FF 64
+
+==== BB 1474 (0xFEB3BC8) approx BBs exec'd 0 ====
+
+	0xFEB3BC8:  2F800000  cmpi cr7,r0,0
+	   0: GETL       	R0, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x7, CR
+	   3: INCEIPL       	$4
+
+	0xFEB3BCC:  419E00CC  bc 12,30,0xFEB3C98
+	   4: Js30o       	$0xFEB3C98
+
+
+. 0 FEB3BC8 8
+. 2F 80 00 00 41 9E 00 CC
+
+==== BB 1475 (0xFEB3C98) approx BBs exec'd 0 ====
+
+	0xFEB3C98:  83FE1BBC  lwz r31,7100(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BBC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFEB3C9C:  83BE1C38  lwz r29,7224(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x1C38, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R29
+	   9: INCEIPL       	$4
+
+	0xFEB3CA0:  7F9FE840  cmpl cr7,r31,r29
+	  10: GETL       	R31, t8
+	  11: GETL       	R29, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x7, CR
+	  14: INCEIPL       	$4
+
+	0xFEB3CA4:  409C001C  bc 4,28,0xFEB3CC0
+	  15: Jc28o       	$0xFEB3CC0
+
+
+. 0 FEB3C98 16
+. 83 FE 1B BC 83 BE 1C 38 7F 9F E8 40 40 9C 00 1C
+
+==== BB 1476 (0xFEB3CA8) approx BBs exec'd 0 ====
+
+	0xFEB3CA8:  815F0000  lwz r10,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R10
+	   3: INCEIPL       	$4
+
+	0xFEB3CAC:  3BFF0004  addi r31,r31,4
+	   4: GETL       	R31, t4
+	   5: ADDL       	$0x4, t4
+	   6: PUTL       	t4, R31
+	   7: INCEIPL       	$4
+
+	0xFEB3CB0:  7D4903A6  mtctr r10
+	   8: GETL       	R10, t6
+	   9: PUTL       	t6, CTR
+	  10: INCEIPL       	$4
+
+	0xFEB3CB4:  4E800421  bctrl
+	  11: MOVL       	$0xFEB3CB8, t8
+	  12: PUTL       	t8, LR
+	  13: GETL       	CTR, t10
+	  14: JMPo-c       	t10  ($4)
+
+
+. 0 FEB3CA8 16
+. 81 5F 00 00 3B FF 00 04 7D 49 03 A6 4E 80 04 21
+
+==== BB 1477 _IO_cleanup(0xFEECB0C) approx BBs exec'd 0 ====
+
+	0xFEECB0C:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEECB10:  7C0802A6  mflr r0
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R0
+	   8: INCEIPL       	$4
+
+	0xFEECB14:  480DB33D  bl 0xFFC7E50
+	   9: MOVL       	$0xFEECB18, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FEECB0C 12
+. 94 21 FF E0 7C 08 02 A6 48 0D B3 3D
+
+==== BB 1478 (0xFEECB18) approx BBs exec'd 0 ====
+
+	0xFEECB18:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEECB1C:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEECB20:  38600000  li r3,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R3
+	  10: INCEIPL       	$4
+
+	0xFEECB24:  93810010  stw r28,16(r1)
+	  11: GETL       	R28, t8
+	  12: GETL       	R1, t10
+	  13: ADDL       	$0x10, t10
+	  14: STL       	t8, (t10)
+	  15: INCEIPL       	$4
+
+	0xFEECB28:  93E1001C  stw r31,28(r1)
+	  16: GETL       	R31, t12
+	  17: GETL       	R1, t14
+	  18: ADDL       	$0x1C, t14
+	  19: STL       	t12, (t14)
+	  20: INCEIPL       	$4
+
+	0xFEECB2C:  93A10014  stw r29,20(r1)
+	  21: GETL       	R29, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x14, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEECB30:  90010024  stw r0,36(r1)
+	  26: GETL       	R0, t20
+	  27: GETL       	R1, t22
+	  28: ADDL       	$0x24, t22
+	  29: STL       	t20, (t22)
+	  30: INCEIPL       	$4
+
+	0xFEECB34:  4BFFF98D  bl 0xFEEC4C0
+	  31: MOVL       	$0xFEECB38, t24
+	  32: PUTL       	t24, LR
+	  33: JMPo-c       	$0xFEEC4C0  ($4)
+
+
+. 0 FEECB18 32
+. 93 C1 00 18 7F C8 02 A6 38 60 00 00 93 81 00 10 93 E1 00 1C 93 A1 00 14 90 01 00 24 4B FF F9 8D
+
+==== BB 1479 _IO_flush_all_lockp(0xFEEC4C0) approx BBs exec'd 0 ====
+
+	0xFEEC4C0:  9421FFB0  stwu r1,-80(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFB0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFEEC4C4:  7C8802A6  mflr r4
+	   6: GETL       	LR, t4
+	   7: PUTL       	t4, R4
+	   8: INCEIPL       	$4
+
+	0xFEEC4C8:  480DB989  bl 0xFFC7E50
+	   9: MOVL       	$0xFEEC4CC, t6
+	  10: PUTL       	t6, LR
+	  11: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FEEC4C0 12
+. 94 21 FF B0 7C 88 02 A6 48 0D B9 89
+
+==== BB 1480 (0xFEEC4CC) approx BBs exec'd 0 ====
+
+	0xFEEC4CC:  93C10048  stw r30,72(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x48, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEEC4D0:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFEEC4D4:  92C10028  stw r22,40(r1)
+	   8: GETL       	R22, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x28, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFEEC4D8:  90810054  stw r4,84(r1)
+	  13: GETL       	R4, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x54, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFEEC4DC:  7D800026  mfcr r12
+	  18: GETL       	CR, t14
+	  19: PUTL       	t14, R12
+	  20: INCEIPL       	$4
+
+	0xFEEC4E0:  92E1002C  stw r23,44(r1)
+	  21: GETL       	R23, t16
+	  22: GETL       	R1, t18
+	  23: ADDL       	$0x2C, t18
+	  24: STL       	t16, (t18)
+	  25: INCEIPL       	$4
+
+	0xFEEC4E4:  3AE00000  li r23,0
+	  26: MOVL       	$0x0, t20
+	  27: PUTL       	t20, R23
+	  28: INCEIPL       	$4
+
+	0xFEEC4E8:  82DE1B48  lwz r22,6984(r30)
+	  29: GETL       	R30, t22
+	  30: ADDL       	$0x1B48, t22
+	  31: LDL       	(t22), t24
+	  32: PUTL       	t24, R22
+	  33: INCEIPL       	$4
+
+	0xFEEC4EC:  93E1004C  stw r31,76(r1)
+	  34: GETL       	R31, t26
+	  35: GETL       	R1, t28
+	  36: ADDL       	$0x4C, t28
+	  37: STL       	t26, (t28)
+	  38: INCEIPL       	$4
+
+	0xFEEC4F0:  7C7F1B78  or r31,r3,r3
+	  39: GETL       	R3, t30
+	  40: PUTL       	t30, R31
+	  41: INCEIPL       	$4
+
+	0xFEEC4F4:  813600B8  lwz r9,184(r22)
+	  42: GETL       	R22, t32
+	  43: ADDL       	$0xB8, t32
+	  44: LDL       	(t32), t34
+	  45: PUTL       	t34, R9
+	  46: INCEIPL       	$4
+
+	0xFEEC4F8:  93010030  stw r24,48(r1)
+	  47: GETL       	R24, t36
+	  48: GETL       	R1, t38
+	  49: ADDL       	$0x30, t38
+	  50: STL       	t36, (t38)
+	  51: INCEIPL       	$4
+
+	0xFEEC4FC:  3169FFFF  addic r11,r9,-1
+	  52: GETL       	R9, t40
+	  53: ADCL       	$0xFFFFFFFF, t40  (-wCa)
+	  54: PUTL       	t40, R11
+	  55: INCEIPL       	$4
+
+	0xFEEC500:  7C0B4910  subfe r0,r11,r9
+	  56: GETL       	R11, t42
+	  57: GETL       	R9, t44
+	  58: SBBL       	t42, t44  (-rCa-wCa)
+	  59: PUTL       	t44, R0
+	  60: INCEIPL       	$4
+
+	0xFEEC504:  93210034  stw r25,52(r1)
+	  61: GETL       	R25, t46
+	  62: GETL       	R1, t48
+	  63: ADDL       	$0x34, t48
+	  64: STL       	t46, (t48)
+	  65: INCEIPL       	$4
+
+	0xFEEC508:  2D800000  cmpi cr3,r0,0
+	  66: GETL       	R0, t50
+	  67: CMP0L       	t50, t52  (-rSo)
+	  68: ICRFL       	t52, $0x3, CR
+	  69: INCEIPL       	$4
+
+	0xFEEC50C:  93410038  stw r26,56(r1)
+	  70: GETL       	R26, t54
+	  71: GETL       	R1, t56
+	  72: ADDL       	$0x38, t56
+	  73: STL       	t54, (t56)
+	  74: INCEIPL       	$4
+
+	0xFEEC510:  9361003C  stw r27,60(r1)
+	  75: GETL       	R27, t58
+	  76: GETL       	R1, t60
+	  77: ADDL       	$0x3C, t60
+	  78: STL       	t58, (t60)
+	  79: INCEIPL       	$4
+
+	0xFEEC514:  93810040  stw r28,64(r1)
+	  80: GETL       	R28, t62
+	  81: GETL       	R1, t64
+	  82: ADDL       	$0x40, t64
+	  83: STL       	t62, (t64)
+	  84: INCEIPL       	$4
+
+	0xFEEC518:  93A10044  stw r29,68(r1)
+	  85: GETL       	R29, t66
+	  86: GETL       	R1, t68
+	  87: ADDL       	$0x44, t68
+	  88: STL       	t66, (t68)
+	  89: INCEIPL       	$4
+
+	0xFEEC51C:  91810024  stw r12,36(r1)
+	  90: GETL       	R12, t70
+	  91: GETL       	R1, t72
+	  92: ADDL       	$0x24, t72
+	  93: STL       	t70, (t72)
+	  94: INCEIPL       	$4
+
+	0xFEEC520:  408E0248  bc 4,14,0xFEEC768
+	  95: Jc14o       	$0xFEEC768
+
+
+. 0 FEEC4CC 88
+. 93 C1 00 48 7F C8 02 A6 92 C1 00 28 90 81 00 54 7D 80 00 26 92 E1 00 2C 3A E0 00 00 82 DE 1B 48 93 E1 00 4C 7C 7F 1B 78 81 36 00 B8 93 01 00 30 31 69 FF FF 7C 0B 49 10 93 21 00 34 2D 80 00 00 93 41 00 38 93 61 00 3C 93 81 00 40 93 A1 00 44 91 81 00 24 40 8E 02 48
+
+==== BB 1481 (0xFEEC524) approx BBs exec'd 0 ====
+
+	0xFEEC524:  807E05E4  lwz r3,1508(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5E4, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEEC528:  92E10014  stw r23,20(r1)
+	   5: GETL       	R23, t4
+	   6: GETL       	R1, t6
+	   7: ADDL       	$0x14, t6
+	   8: STL       	t4, (t6)
+	   9: INCEIPL       	$4
+
+	0xFEEC52C:  90610010  stw r3,16(r1)
+	  10: GETL       	R3, t8
+	  11: GETL       	R1, t10
+	  12: ADDL       	$0x10, t10
+	  13: STL       	t8, (t10)
+	  14: INCEIPL       	$4
+
+	0xFEEC530:  2E1F0000  cmpi cr4,r31,0
+	  15: GETL       	R31, t12
+	  16: CMP0L       	t12, t14  (-rSo)
+	  17: ICRFL       	t14, $0x4, CR
+	  18: INCEIPL       	$4
+
+	0xFEEC534:  41920050  bc 12,18,0xFEEC584
+	  19: Js18o       	$0xFEEC584
+
+
+. 0 FEEC524 20
+. 80 7E 05 E4 92 E1 00 14 90 61 00 10 2E 1F 00 00 41 92 00 50
+
+==== BB 1482 (0xFEEC584) approx BBs exec'd 0 ====
+
+	0xFEEC584:  831E1BEC  lwz r24,7148(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BEC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R24
+	   4: INCEIPL       	$4
+
+	0xFEEC588:  833E05EC  lwz r25,1516(r30)
+	   5: GETL       	R30, t4
+	   6: ADDL       	$0x5EC, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R25
+	   9: INCEIPL       	$4
+
+	0xFEEC58C:  83F80000  lwz r31,0(r24)
+	  10: GETL       	R24, t8
+	  11: LDL       	(t8), t10
+	  12: PUTL       	t10, R31
+	  13: INCEIPL       	$4
+
+	0xFEEC590:  83990000  lwz r28,0(r25)
+	  14: GETL       	R25, t12
+	  15: LDL       	(t12), t14
+	  16: PUTL       	t14, R28
+	  17: INCEIPL       	$4
+
+	0xFEEC594:  2C9F0000  cmpi cr1,r31,0
+	  18: GETL       	R31, t16
+	  19: CMP0L       	t16, t18  (-rSo)
+	  20: ICRFL       	t18, $0x1, CR
+	  21: INCEIPL       	$4
+
+	0xFEEC598:  41860128  bc 12,6,0xFEEC6C0
+	  22: Js06o       	$0xFEEC6C0
+
+
+. 0 FEEC584 24
+. 83 1E 1B EC 83 3E 05 EC 83 F8 00 00 83 99 00 00 2C 9F 00 00 41 86 01 28
+
+==== BB 1483 (0xFEEC59C) approx BBs exec'd 0 ====
+
+	0xFEEC59C:  835E05E8  lwz r26,1512(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x5E8, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R26
+	   4: INCEIPL       	$4
+
+	0xFEEC5A0:  3B600000  li r27,0
+	   5: MOVL       	$0x0, t4
+	   6: PUTL       	t4, R27
+	   7: INCEIPL       	$4
+
+	0xFEEC5A4:  93FA0000  stw r31,0(r26)
+	   8: GETL       	R31, t6
+	   9: GETL       	R26, t8
+	  10: STL       	t6, (t8)
+	  11: INCEIPL       	$4
+
+	0xFEEC5A8:  4192005C  bc 12,18,0xFEEC604
+	  12: Js18o       	$0xFEEC604
+
+
+. 0 FEEC59C 16
+. 83 5E 05 E8 3B 60 00 00 93 FA 00 00 41 92 00 5C
+
+==== BB 1484 (0xFEEC604) approx BBs exec'd 0 ====
+
+	0xFEEC604:  807F0060  lwz r3,96(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R3
+	   4: INCEIPL       	$4
+
+	0xFEEC608:  2F030000  cmpi cr6,r3,0
+	   5: GETL       	R3, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x6, CR
+	   8: INCEIPL       	$4
+
+	0xFEEC60C:  40990144  bc 4,25,0xFEEC750
+	   9: Jc25o       	$0xFEEC750
+
+
+. 0 FEEC604 12
+. 80 7F 00 60 2F 03 00 00 40 99 01 44
+
+==== BB 1485 (0xFEEC750) approx BBs exec'd 0 ====
+
+	0xFEEC750:  817F0014  lwz r11,20(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x14, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R11
+	   4: INCEIPL       	$4
+
+	0xFEEC754:  813F0010  lwz r9,16(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x10, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R9
+	   9: INCEIPL       	$4
+
+	0xFEEC758:  7C8B4840  cmpl cr1,r11,r9
+	  10: GETL       	R11, t8
+	  11: GETL       	R9, t10
+	  12: CMPUL       	t8, t10, t12  (-rSo)
+	  13: ICRFL       	t12, $0x1, CR
+	  14: INCEIPL       	$4
+
+	0xFEEC75C:  40A5FEB4  bc 5,5,0xFEEC610
+	  15: Jc05o       	$0xFEEC610
+
+
+. 0 FEEC750 16
+. 81 7F 00 14 81 3F 00 10 7C 8B 48 40 40 A5 FE B4
+
+==== BB 1486 (0xFEEC610) approx BBs exec'd 0 ====
+
+	0xFEEC610:  895F0046  lbz r10,70(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x46, t0
+	   2: LDB       	(t0), t2
+	   3: PUTL       	t2, R10
+	   4: INCEIPL       	$4
+
+	0xFEEC614:  2C0A0000  cmpi cr0,r10,0
+	   5: GETL       	R10, t4
+	   6: CMP0L       	t4, t6  (-rSo)
+	   7: ICRFL       	t6, $0x0, CR
+	   8: INCEIPL       	$4
+
+	0xFEEC618:  40820044  bc 4,2,0xFEEC65C
+	   9: Jc02o       	$0xFEEC65C
+
+
+. 0 FEEC610 12
+. 89 5F 00 46 2C 0A 00 00 40 82 00 44
+
+==== BB 1487 (0xFEEC61C) approx BBs exec'd 0 ====
+
+	0xFEEC61C:  40990040  bc 4,25,0xFEEC65C
+	   0: Jc25o       	$0xFEEC65C
+
+
+. 0 FEEC61C 4
+. 40 99 00 40
+
+==== BB 1488 (0xFEEC65C) approx BBs exec'd 0 ====
+
+	0xFEEC65C:  41920044  bc 12,18,0xFEEC6A0
+	   0: Js18o       	$0xFEEC6A0
+
+
+. 0 FEEC65C 4
+. 41 92 00 44
+
+==== BB 1489 (0xFEEC6A0) approx BBs exec'd 0 ====
+
+	0xFEEC6A0:  80190000  lwz r0,0(r25)
+	   0: GETL       	R25, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEEC6A4:  937A0000  stw r27,0(r26)
+	   4: GETL       	R27, t4
+	   5: GETL       	R26, t6
+	   6: STL       	t4, (t6)
+	   7: INCEIPL       	$4
+
+	0xFEEC6A8:  7F9C0000  cmp cr7,r28,r0
+	   8: GETL       	R28, t8
+	   9: GETL       	R0, t10
+	  10: CMPL       	t8, t10, t12  (-rSo)
+	  11: ICRFL       	t12, $0x7, CR
+	  12: INCEIPL       	$4
+
+	0xFEEC6AC:  419E009C  bc 12,30,0xFEEC748
+	  13: Js30o       	$0xFEEC748
+
+
+. 0 FEEC6A0 16
+. 80 19 00 00 93 7A 00 00 7F 9C 00 00 41 9E 00 9C
+
+==== BB 1490 (0xFEEC748) approx BBs exec'd 0 ====
+
+	0xFEEC748:  83FF0034  lwz r31,52(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x34, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R31
+	   4: INCEIPL       	$4
+
+	0xFEEC74C:  4BFFFF6C  b 0xFEEC6B8
+	   5: JMPo       	$0xFEEC6B8  ($4)
+
+
+. 0 FEEC748 8
+. 83 FF 00 34 4B FF FF 6C
+
+==== BB 1491 (0xFEEC6B8) approx BBs exec'd 0 ====
+
+	0xFEEC6B8:  2C1F0000  cmpi cr0,r31,0
+	   0: GETL       	R31, t0
+	   1: CMP0L       	t0, t2  (-rSo)
+	   2: ICRFL       	t2, $0x0, CR
+	   3: INCEIPL       	$4
+
+	0xFEEC6BC:  4082FEE8  bc 4,2,0xFEEC5A4
+	   4: Jc02o       	$0xFEEC5A4
+
+
+. 0 FEEC6B8 8
+. 2C 1F 00 00 40 82 FE E8
+
+==== BB 1492 (0xFEEC5A4) approx BBs exec'd 0 ====
+
+	0xFEEC5A4:  93FA0000  stw r31,0(r26)
+	   0: GETL       	R31, t0
+	   1: GETL       	R26, t2
+	   2: STL       	t0, (t2)
+	   3: INCEIPL       	$4
+
+	0xFEEC5A8:  4192005C  bc 12,18,0xFEEC604
+	   4: Js18o       	$0xFEEC604
+
+
+. 0 FEEC5A4 8
+. 93 FA 00 00 41 92 00 5C
+
+==== BB 1493 (0xFEEC6C0) approx BBs exec'd 0 ====
+
+	0xFEEC6C0:  41920038  bc 12,18,0xFEEC6F8
+	   0: Js18o       	$0xFEEC6F8
+
+
+. 0 FEEC6C0 4
+. 41 92 00 38
+
+==== BB 1494 (0xFEEC6F8) approx BBs exec'd 0 ====
+
+	0xFEEC6F8:  408E0088  bc 4,14,0xFEEC780
+	   0: Jc14o       	$0xFEEC780
+
+
+. 0 FEEC6F8 4
+. 40 8E 00 88
+
+==== BB 1495 (0xFEEC6FC) approx BBs exec'd 0 ====
+
+	0xFEEC6FC:  7EE3BB78  or r3,r23,r23
+	   0: GETL       	R23, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEEC700:  81810024  lwz r12,36(r1)
+	   3: GETL       	R1, t2
+	   4: ADDL       	$0x24, t2
+	   5: LDL       	(t2), t4
+	   6: PUTL       	t4, R12
+	   7: INCEIPL       	$4
+
+	0xFEEC704:  82E10054  lwz r23,84(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x54, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R23
+	  12: INCEIPL       	$4
+
+	0xFEEC708:  82C10028  lwz r22,40(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x28, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R22
+	  17: INCEIPL       	$4
+
+	0xFEEC70C:  7D818120  mtcrf 0x18,r12
+	  18: GETL       	R12, t14
+	  19: ICRFL       	t14, $0x3, CR
+	  20: ICRFL       	t14, $0x4, CR
+	  21: INCEIPL       	$4
+
+	0xFEEC710:  7EE803A6  mtlr r23
+	  22: GETL       	R23, t16
+	  23: PUTL       	t16, LR
+	  24: INCEIPL       	$4
+
+	0xFEEC714:  83010030  lwz r24,48(r1)
+	  25: GETL       	R1, t18
+	  26: ADDL       	$0x30, t18
+	  27: LDL       	(t18), t20
+	  28: PUTL       	t20, R24
+	  29: INCEIPL       	$4
+
+	0xFEEC718:  82E1002C  lwz r23,44(r1)
+	  30: GETL       	R1, t22
+	  31: ADDL       	$0x2C, t22
+	  32: LDL       	(t22), t24
+	  33: PUTL       	t24, R23
+	  34: INCEIPL       	$4
+
+	0xFEEC71C:  83210034  lwz r25,52(r1)
+	  35: GETL       	R1, t26
+	  36: ADDL       	$0x34, t26
+	  37: LDL       	(t26), t28
+	  38: PUTL       	t28, R25
+	  39: INCEIPL       	$4
+
+	0xFEEC720:  83410038  lwz r26,56(r1)
+	  40: GETL       	R1, t30
+	  41: ADDL       	$0x38, t30
+	  42: LDL       	(t30), t32
+	  43: PUTL       	t32, R26
+	  44: INCEIPL       	$4
+
+	0xFEEC724:  8361003C  lwz r27,60(r1)
+	  45: GETL       	R1, t34
+	  46: ADDL       	$0x3C, t34
+	  47: LDL       	(t34), t36
+	  48: PUTL       	t36, R27
+	  49: INCEIPL       	$4
+
+	0xFEEC728:  83810040  lwz r28,64(r1)
+	  50: GETL       	R1, t38
+	  51: ADDL       	$0x40, t38
+	  52: LDL       	(t38), t40
+	  53: PUTL       	t40, R28
+	  54: INCEIPL       	$4
+
+	0xFEEC72C:  83A10044  lwz r29,68(r1)
+	  55: GETL       	R1, t42
+	  56: ADDL       	$0x44, t42
+	  57: LDL       	(t42), t44
+	  58: PUTL       	t44, R29
+	  59: INCEIPL       	$4
+
+	0xFEEC730:  83C10048  lwz r30,72(r1)
+	  60: GETL       	R1, t46
+	  61: ADDL       	$0x48, t46
+	  62: LDL       	(t46), t48
+	  63: PUTL       	t48, R30
+	  64: INCEIPL       	$4
+
+	0xFEEC734:  83E1004C  lwz r31,76(r1)
+	  65: GETL       	R1, t50
+	  66: ADDL       	$0x4C, t50
+	  67: LDL       	(t50), t52
+	  68: PUTL       	t52, R31
+	  69: INCEIPL       	$4
+
+	0xFEEC738:  38210050  addi r1,r1,80
+	  70: GETL       	R1, t54
+	  71: ADDL       	$0x50, t54
+	  72: PUTL       	t54, R1
+	  73: INCEIPL       	$4
+
+	0xFEEC73C:  4E800020  blr
+	  74: GETL       	LR, t56
+	  75: JMPo-r       	t56  ($4)
+
+
+. 0 FEEC6FC 68
+. 7E E3 BB 78 81 81 00 24 82 E1 00 54 82 C1 00 28 7D 81 81 20 7E E8 03 A6 83 01 00 30 82 E1 00 2C 83 21 00 34 83 41 00 38 83 61 00 3C 83 81 00 40 83 A1 00 44 83 C1 00 48 83 E1 00 4C 38 21 00 50 4E 80 00 20
+
+==== BB 1496 (0xFEECB38) approx BBs exec'd 0 ====
+
+	0xFEECB38:  813E1BEC  lwz r9,7148(r30)
+	   0: GETL       	R30, t0
+	   1: ADDL       	$0x1BEC, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R9
+	   4: INCEIPL       	$4
+
+	0xFEECB3C:  7C7C1B78  or r28,r3,r3
+	   5: GETL       	R3, t4
+	   6: PUTL       	t4, R28
+	   7: INCEIPL       	$4
+
+	0xFEECB40:  83E90000  lwz r31,0(r9)
+	   8: GETL       	R9, t6
+	   9: LDL       	(t6), t8
+	  10: PUTL       	t8, R31
+	  11: INCEIPL       	$4
+
+	0xFEECB44:  2F9F0000  cmpi cr7,r31,0
+	  12: GETL       	R31, t10
+	  13: CMP0L       	t10, t12  (-rSo)
+	  14: ICRFL       	t12, $0x7, CR
+	  15: INCEIPL       	$4
+
+	0xFEECB48:  419E0080  bc 12,30,0xFEECBC8
+	  16: Js30o       	$0xFEECBC8
+
+
+. 0 FEECB38 20
+. 81 3E 1B EC 7C 7C 1B 78 83 E9 00 00 2F 9F 00 00 41 9E 00 80
+
+==== BB 1497 (0xFEECB4C) approx BBs exec'd 0 ====
+
+	0xFEECB4C:  3BA0FFFF  li r29,-1
+	   0: MOVL       	$0xFFFFFFFF, t0
+	   1: PUTL       	t0, R29
+	   2: INCEIPL       	$4
+
+	0xFEECB50:  48000014  b 0xFEECB64
+	   3: JMPo       	$0xFEECB64  ($4)
+
+
+. 0 FEECB4C 8
+. 3B A0 FF FF 48 00 00 14
+
+==== BB 1498 (0xFEECB64) approx BBs exec'd 0 ====
+
+	0xFEECB64:  801F0000  lwz r0,0(r31)
+	   0: GETL       	R31, t0
+	   1: LDL       	(t0), t2
+	   2: PUTL       	t2, R0
+	   3: INCEIPL       	$4
+
+	0xFEECB68:  70090002  andi. r9,r0,0x2
+	   4: GETL       	R0, t4
+	   5: ANDL       	$0x2, t4
+	   6: PUTL       	t4, R9
+	   7: CMP0L       	t4, t6  (-rSo)
+	   8: ICRFL       	t6, $0x0, CR
+	   9: INCEIPL       	$4
+
+	0xFEECB6C:  5403EFFE  rlwinm r3,r0,29,31,31
+	  10: GETL       	R0, t8
+	  11: ROLL       	$0x1D, t8
+	  12: ANDL       	$0x1, t8
+	  13: PUTL       	t8, R3
+	  14: INCEIPL       	$4
+
+	0xFEECB70:  2F830000  cmpi cr7,r3,0
+	  15: GETL       	R3, t10
+	  16: CMP0L       	t10, t12  (-rSo)
+	  17: ICRFL       	t12, $0x7, CR
+	  18: INCEIPL       	$4
+
+	0xFEECB74:  40A2FFE0  bc 5,2,0xFEECB54
+	  19: Jc02o       	$0xFEECB54
+
+
+. 0 FEECB64 20
+. 80 1F 00 00 70 09 00 02 54 03 EF FE 2F 83 00 00 40 A2 FF E0
+
+==== BB 1499 (0xFEECB54) approx BBs exec'd 0 ====
+
+	0xFEECB54:  93BF0060  stw r29,96(r31)
+	   0: GETL       	R29, t0
+	   1: GETL       	R31, t2
+	   2: ADDL       	$0x60, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFEECB58:  83FF0034  lwz r31,52(r31)
+	   5: GETL       	R31, t4
+	   6: ADDL       	$0x34, t4
+	   7: LDL       	(t4), t6
+	   8: PUTL       	t6, R31
+	   9: INCEIPL       	$4
+
+	0xFEECB5C:  2F1F0000  cmpi cr6,r31,0
+	  10: GETL       	R31, t8
+	  11: CMP0L       	t8, t10  (-rSo)
+	  12: ICRFL       	t10, $0x6, CR
+	  13: INCEIPL       	$4
+
+	0xFEECB60:  419A0068  bc 12,26,0xFEECBC8
+	  14: Js26o       	$0xFEECBC8
+
+
+. 0 FEECB54 16
+. 93 BF 00 60 83 FF 00 34 2F 1F 00 00 41 9A 00 68
+
+==== BB 1500 (0xFEECB78) approx BBs exec'd 0 ====
+
+	0xFEECB78:  70091000  andi. r9,r0,0x1000
+	   0: GETL       	R0, t0
+	   1: ANDL       	$0x1000, t0
+	   2: PUTL       	t0, R9
+	   3: CMP0L       	t0, t2  (-rSo)
+	   4: ICRFL       	t2, $0x0, CR
+	   5: INCEIPL       	$4
+
+	0xFEECB7C:  419E0008  bc 12,30,0xFEECB84
+	   6: Js30o       	$0xFEECB84
+
+
+. 0 FEECB78 8
+. 70 09 10 00 41 9E 00 08
+
+==== BB 1501 (0xFEECB84) approx BBs exec'd 0 ====
+
+	0xFEECB84:  80DF0060  lwz r6,96(r31)
+	   0: GETL       	R31, t0
+	   1: ADDL       	$0x60, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R6
+	   4: INCEIPL       	$4
+
+	0xFEECB88:  7FE3FB78  or r3,r31,r31
+	   5: GETL       	R31, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEECB8C:  38800000  li r4,0
+	   8: MOVL       	$0x0, t6
+	   9: PUTL       	t6, R4
+	  10: INCEIPL       	$4
+
+	0xFEECB90:  38A00000  li r5,0
+	  11: MOVL       	$0x0, t8
+	  12: PUTL       	t8, R5
+	  13: INCEIPL       	$4
+
+	0xFEECB94:  2C860000  cmpi cr1,r6,0
+	  14: GETL       	R6, t10
+	  15: CMP0L       	t10, t12  (-rSo)
+	  16: ICRFL       	t12, $0x1, CR
+	  17: INCEIPL       	$4
+
+	0xFEECB98:  4186FFBC  bc 12,6,0xFEECB54
+	  18: Js06o       	$0xFEECB54
+
+
+. 0 FEECB84 24
+. 80 DF 00 60 7F E3 FB 78 38 80 00 00 38 A0 00 00 2C 86 00 00 41 86 FF BC
+
+==== BB 1502 (0xFEECB80) approx BBs exec'd 0 ====
+
+	0xFEECB80:  41A2FFD4  bc 13,2,0xFEECB54
+	   0: Js02o       	$0xFEECB54
+
+
+. 0 FEECB80 4
+. 41 A2 FF D4
+
+==== BB 1503 (0xFEECBC8) approx BBs exec'd 0 ====
+
+	0xFEECBC8:  80810024  lwz r4,36(r1)
+	   0: GETL       	R1, t0
+	   1: ADDL       	$0x24, t0
+	   2: LDL       	(t0), t2
+	   3: PUTL       	t2, R4
+	   4: INCEIPL       	$4
+
+	0xFEECBCC:  7F83E378  or r3,r28,r28
+	   5: GETL       	R28, t4
+	   6: PUTL       	t4, R3
+	   7: INCEIPL       	$4
+
+	0xFEECBD0:  83A10014  lwz r29,20(r1)
+	   8: GETL       	R1, t6
+	   9: ADDL       	$0x14, t6
+	  10: LDL       	(t6), t8
+	  11: PUTL       	t8, R29
+	  12: INCEIPL       	$4
+
+	0xFEECBD4:  83810010  lwz r28,16(r1)
+	  13: GETL       	R1, t10
+	  14: ADDL       	$0x10, t10
+	  15: LDL       	(t10), t12
+	  16: PUTL       	t12, R28
+	  17: INCEIPL       	$4
+
+	0xFEECBD8:  7C8803A6  mtlr r4
+	  18: GETL       	R4, t14
+	  19: PUTL       	t14, LR
+	  20: INCEIPL       	$4
+
+	0xFEECBDC:  83C10018  lwz r30,24(r1)
+	  21: GETL       	R1, t16
+	  22: ADDL       	$0x18, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R30
+	  25: INCEIPL       	$4
+
+	0xFEECBE0:  83E1001C  lwz r31,28(r1)
+	  26: GETL       	R1, t20
+	  27: ADDL       	$0x1C, t20
+	  28: LDL       	(t20), t22
+	  29: PUTL       	t22, R31
+	  30: INCEIPL       	$4
+
+	0xFEECBE4:  38210020  addi r1,r1,32
+	  31: GETL       	R1, t24
+	  32: ADDL       	$0x20, t24
+	  33: PUTL       	t24, R1
+	  34: INCEIPL       	$4
+
+	0xFEECBE8:  4E800020  blr
+	  35: GETL       	LR, t26
+	  36: JMPo-r       	t26  ($4)
+
+
+. 0 FEECBC8 36
+. 80 81 00 24 7F 83 E3 78 83 A1 00 14 83 81 00 10 7C 88 03 A6 83 C1 00 18 83 E1 00 1C 38 21 00 20 4E 80 00 20
+
+==== BB 1504 (0xFEB3CB8) approx BBs exec'd 0 ====
+
+	0xFEB3CB8:  7F9FE840  cmpl cr7,r31,r29
+	   0: GETL       	R31, t0
+	   1: GETL       	R29, t2
+	   2: CMPUL       	t0, t2, t4  (-rSo)
+	   3: ICRFL       	t4, $0x7, CR
+	   4: INCEIPL       	$4
+
+	0xFEB3CBC:  419CFFEC  bc 12,28,0xFEB3CA8
+	   5: Js28o       	$0xFEB3CA8
+
+
+. 0 FEB3CB8 8
+. 7F 9F E8 40 41 9C FF EC
+
+==== BB 1505 (0xFEB3CC0) approx BBs exec'd 0 ====
+
+	0xFEB3CC0:  7F83E378  or r3,r28,r28
+	   0: GETL       	R28, t0
+	   1: PUTL       	t0, R3
+	   2: INCEIPL       	$4
+
+	0xFEB3CC4:  48062141  bl 0xFF15E04
+	   3: MOVL       	$0xFEB3CC8, t2
+	   4: PUTL       	t2, LR
+	   5: JMPo-c       	$0xFF15E04  ($4)
+
+
+. 0 FEB3CC0 8
+. 7F 83 E3 78 48 06 21 41
+
+==== BB 1506 __GI__exit(0xFF15E04) approx BBs exec'd 0 ====
+
+	0xFF15E04:  9421FFE0  stwu r1,-32(r1)
+	   0: GETL       	R1, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0xFFFFFFE0, t2
+	   3: PUTL       	t2, R1
+	   4: STL       	t0, (t2)
+	   5: INCEIPL       	$4
+
+	0xFF15E08:  480B2049  bl 0xFFC7E50
+	   6: MOVL       	$0xFF15E0C, t4
+	   7: PUTL       	t4, LR
+	   8: JMPo-c       	$0xFFC7E50  ($4)
+
+
+. 0 FF15E04 8
+. 94 21 FF E0 48 0B 20 49
+
+==== BB 1507 (0xFF15E0C) approx BBs exec'd 0 ====
+
+	0xFF15E0C:  93C10018  stw r30,24(r1)
+	   0: GETL       	R30, t0
+	   1: GETL       	R1, t2
+	   2: ADDL       	$0x18, t2
+	   3: STL       	t0, (t2)
+	   4: INCEIPL       	$4
+
+	0xFF15E10:  7FC802A6  mflr r30
+	   5: GETL       	LR, t4
+	   6: PUTL       	t4, R30
+	   7: INCEIPL       	$4
+
+	0xFF15E14:  93A10014  stw r29,20(r1)
+	   8: GETL       	R29, t6
+	   9: GETL       	R1, t8
+	  10: ADDL       	$0x14, t8
+	  11: STL       	t6, (t8)
+	  12: INCEIPL       	$4
+
+	0xFF15E18:  93E1001C  stw r31,28(r1)
+	  13: GETL       	R31, t10
+	  14: GETL       	R1, t12
+	  15: ADDL       	$0x1C, t12
+	  16: STL       	t10, (t12)
+	  17: INCEIPL       	$4
+
+	0xFF15E1C:  7C7F1B78  or r31,r3,r3
+	  18: GETL       	R3, t14
+	  19: PUTL       	t14, R31
+	  20: INCEIPL       	$4
+
+	0xFF15E20:  813E1C4C  lwz r9,7244(r30)
+	  21: GETL       	R30, t16
+	  22: ADDL       	$0x1C4C, t16
+	  23: LDL       	(t16), t18
+	  24: PUTL       	t18, R9
+	  25: INCEIPL       	$4
+
+	0xFF15E24:  7FA91214  add r29,r9,r2
+	  26: GETL       	R9, t20
+	  27: GETL       	R2, t22
+	  28: ADDL       	t20, t22
+	  29: PUTL       	t22, R29
+	  30: INCEIPL       	$4
+
+	0xFF15E28:  380000EA  li r0,234
+	  31: MOVL       	$0xEA, t24
+	  32: PUTL       	t24, R0
+	  33: INCEIPL       	$4
+
+	0xFF15E2C:  7FE3FB78  or r3,r31,r31
+	  34: GETL       	R31, t26
+	  35: PUTL       	t26, R3
+	  36: INCEIPL       	$4
+
+	0xFF15E30:  44000002  sc
+	  37: JMPo-sys       	$0xFF15E34  ($4)
+
+
+. 0 FF15E0C 40
+. 93 C1 00 18 7F C8 02 A6 93 A1 00 14 93 E1 00 1C 7C 7F 1B 78 81 3E 1C 4C 7F A9 12 14 38 00 00 EA 7F E3 FB 78 44 00 00 02
+
diff --git a/VEX/orig_x86/exit42.orig b/VEX/orig_x86/exit42.orig
new file mode 100644
index 0000000..a2a3285
--- /dev/null
+++ b/VEX/orig_x86/exit42.orig
@@ -0,0 +1,12580 @@
+==6382== Nulgrind, a binary JIT-compiler for x86-linux.
+==6382== Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.
+==6382== Using valgrind-2.1.2.CVS, a program supervision framework for x86-linux.
+==6382== Copyright (C) 2000-2004, and GNU GPL'd, by Julian Seward.
+==6382== For more details, rerun with: -v
+==6382== 
+==== BB 0 (0x3A965880) in 0B, out 0B, BBs exec'd 0 ====
+	0x3A965880:  movl %esp,%eax
+	0x3A965882:  call 0x3A965C80
+. 0 3A965880 7
+. 89 E0 E8 F9 03 00 00 
+
+==== BB 1 _dl_start(0x3A965C80) in 7B, out 55B, BBs exec'd 0 ====
+	0x3A965C80:  pushl %ebp
+	0x3A965C81:  movl %esp,%ebp
+	0x3A965C83:  pushl %edi
+	0x3A965C84:  pushl %esi
+	0x3A965C85:  pushl %ebx
+	0x3A965C86:  subl $0x54, %esp
+	0x3A965C89:  call 0x3A97592B
+. 1 3A965C80 14
+. 55 89 E5 57 56 53 83 EC 54 E8 9D FC 00 00 
+
+==== BB 2 (0x3A97592B) in 21B, out 188B, BBs exec'd 0 ====
+	0x3A97592B:  movl (%esp,,),%ebx
+	0x3A97592E:  ret
+. 2 3A97592B 4
+. 8B 1C 24 C3 
+
+==== BB 3 (0x3A965C8E) in 25B, out 228B, BBs exec'd 0 ====
+	0x3A965C8E:  addl $0x129DE, %ebx
+	0x3A965C94:  movl %eax,-48(%ebp)
+	0x3A965C97:  movl 0x0(%ebx),%edx
+	0x3A965C9D:  leal 0xFFFFFF50(%ebx), %eax
+	0x3A965CA3:  subl %edx,%eax
+	0x3A965CA5:  movl %eax,0xFFFFF9E4(%ebx)
+	0x3A965CAB:  addl %edx,%eax
+	0x3A965CAD:  movl %eax,0xFFFFF9EC(%ebx)
+	0x3A965CB3:  leal 0xFFFFF994(%ebx), %eax
+	0x3A965CB9:  movl %eax,-56(%ebp)
+	0x3A965CBC:  addl $0x50, %eax
+	0x3A965CBF:  movl -56(%ebp),%edi
+	0x3A965CC2:  movl %eax,-52(%ebp)
+	0x3A965CC5:  movl 8(%eax),%esi
+	0x3A965CC8:  addl $0x68, %edi
+	0x3A965CCB:  movl (%esi),%edx
+	0x3A965CCD:  testl %edx,%edx
+	0x3A965CCF:  jnz-8 0x3A965CE1
+. 3 3A965C8E 67
+. 81 C3 DE 29 01 00 89 45 D0 8B 93 00 00 00 00 8D 83 50 FF FF FF 29 D0 89 83 E4 F9 FF FF 01 D0 89 83 EC F9 FF FF 8D 83 94 F9 FF FF 89 45 C8 83 C0 50 8B 7D C8 89 45 CC 8B 70 08 83 C7 68 8B 16 85 D2 75 10 
+
+==== BB 4 (0x3A965CE1) in 92B, out 457B, BBs exec'd 0 ====
+	0x3A965CE1:  cmpl $0x21, %edx
+	0x3A965CE4:  jle-8 0x3A965CD3
+. 4 3A965CE1 5
+. 83 FA 21 7E ED 
+
+==== BB 5 (0x3A965CD3) in 97B, out 514B, BBs exec'd 0 ====
+	0x3A965CD3:  movl %esi,(%edi,%edx,4)
+	0x3A965CD6:  addl $0x8, %esi
+	0x3A965CD9:  movl (%esi),%eax
+	0x3A965CDB:  testl %eax,%eax
+	0x3A965CDD:  movl %eax,%edx
+	0x3A965CDF:  jz-8 0x3A965D10
+. 5 3A965CD3 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 2F 
+
+==== BB 6 (0x3A965CE6) in 111B, out 617B, BBs exec'd 0 ====
+	0x3A965CE6:  movl $0x6FFFFFFF,%eax
+	0x3A965CEB:  subl %edx,%eax
+	0x3A965CED:  cmpl $0xF, %eax
+	0x3A965CF0:  jnbe-32 0x3A965EE0
+. 6 3A965CE6 16
+. B8 FF FF FF 6F 29 D0 83 F8 0F 0F 87 EA 01 00 00 
+
+==== BB 7 (0x3A965CF6) in 127B, out 690B, BBs exec'd 0 ====
+	0x3A965CF6:  movl $0x70000021,%eax
+	0x3A965CFB:  subl %edx,%eax
+	0x3A965CFD:  movl %esi,(%edi,%eax,4)
+	0x3A965D00:  addl $0x8, %esi
+	0x3A965D03:  movl (%esi),%eax
+	0x3A965D05:  testl %eax,%eax
+	0x3A965D07:  movl %eax,%edx
+	0x3A965D09:  jnz-8 0x3A965CE1
+. 7 3A965CF6 21
+. B8 21 00 00 70 29 D0 89 34 87 83 C6 08 8B 06 85 C0 89 C2 75 D6 
+
+==== BB 8 (0x3A965D0B) in 148B, out 809B, BBs exec'd 0 ====
+	0x3A965D0B:  nop
+	0x3A965D0C:  leal 0(%esi,,), %esi
+	0x3A965D10:  movl -52(%ebp),%ecx
+	0x3A965D13:  movl (%ecx),%edx
+	0x3A965D15:  testl %edx,%edx
+	0x3A965D17:  jz-8 0x3A965D70
+. 8 3A965D0B 14
+. 90 8D 74 26 00 8B 4D CC 8B 11 85 D2 74 57 
+
+==== BB 9 (0x3A965D19) in 162B, out 908B, BBs exec'd 0 ====
+	0x3A965D19:  movl 16(%edi),%eax
+	0x3A965D1C:  testl %eax,%eax
+	0x3A965D1E:  jz-8 0x3A965D23
+. 9 3A965D19 7
+. 8B 47 10 85 C0 74 03 
+
+==== BB 10 (0x3A965D20) in 169B, out 980B, BBs exec'd 0 ====
+	0x3A965D20:  addl %edx,4(%eax)
+	0x3A965D23:  movl 12(%edi),%eax
+	0x3A965D26:  testl %eax,%eax
+	0x3A965D28:  jz-8 0x3A965D2D
+. 10 3A965D20 10
+. 01 50 04 8B 47 0C 85 C0 74 03 
+
+==== BB 11 (0x3A965D2A) in 179B, out 1071B, BBs exec'd 0 ====
+	0x3A965D2A:  addl %edx,4(%eax)
+	0x3A965D2D:  movl 20(%edi),%eax
+	0x3A965D30:  testl %eax,%eax
+	0x3A965D32:  jz-8 0x3A965D37
+. 11 3A965D2A 10
+. 01 50 04 8B 47 14 85 C0 74 03 
+
+==== BB 12 (0x3A965D34) in 189B, out 1162B, BBs exec'd 0 ====
+	0x3A965D34:  addl %edx,4(%eax)
+	0x3A965D37:  movl 24(%edi),%eax
+	0x3A965D3A:  testl %eax,%eax
+	0x3A965D3C:  jz-8 0x3A965D41
+. 12 3A965D34 10
+. 01 50 04 8B 47 18 85 C0 74 03 
+
+==== BB 13 (0x3A965D3E) in 199B, out 1253B, BBs exec'd 0 ====
+	0x3A965D3E:  addl %edx,4(%eax)
+	0x3A965D41:  movl 68(%edi),%eax
+	0x3A965D44:  testl %eax,%eax
+	0x3A965D46:  jz-8 0x3A965D4B
+. 13 3A965D3E 10
+. 01 50 04 8B 47 44 85 C0 74 03 
+
+==== BB 14 (0x3A965D48) in 209B, out 1344B, BBs exec'd 0 ====
+	0x3A965D48:  addl %edx,4(%eax)
+	0x3A965D4B:  movl 92(%edi),%eax
+	0x3A965D4E:  testl %eax,%eax
+	0x3A965D50:  jz-8 0x3A965D55
+. 14 3A965D48 10
+. 01 50 04 8B 47 5C 85 C0 74 03 
+
+==== BB 15 (0x3A965D52) in 219B, out 1435B, BBs exec'd 0 ====
+	0x3A965D52:  addl %edx,4(%eax)
+	0x3A965D55:  movl 0xC4(%edi),%eax
+	0x3A965D5B:  testl %eax,%eax
+	0x3A965D5D:  jz-8 0x3A965D70
+. 15 3A965D52 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 11 
+
+==== BB 16 (0x3A965D5F) in 232B, out 1529B, BBs exec'd 0 ====
+	0x3A965D5F:  addl %edx,4(%eax)
+	0x3A965D62:  leal 0(%esi,,), %esi
+	0x3A965D69:  leal 0(%edi,,), %edi
+	0x3A965D70:  movl 0xFFFFF9E4(%ebx),%eax
+	0x3A965D76:  testl %eax,%eax
+	0x3A965D78:  jnz-8 0x3A965D88
+. 16 3A965D5F 27
+. 01 50 04 8D B4 26 00 00 00 00 8D BC 27 00 00 00 00 8B 83 E4 F9 FF FF 85 C0 75 0E 
+
+==== BB 17 (0x3A965D88) in 259B, out 1649B, BBs exec'd 0 ====
+	0x3A965D88:  movl $0x0, -36(%ebp)
+	0x3A965D8F:  movl -56(%ebp),%ecx
+	0x3A965D92:  movl 0xFFFFFA40(%ebx),%eax
+	0x3A965D98:  movl $0x0, -28(%ebp)
+	0x3A965D9F:  addl $0x50, %ecx
+	0x3A965DA2:  testl %eax,%eax
+	0x3A965DA4:  movl $0x0, -40(%ebp)
+	0x3A965DAB:  movl $0x0, -44(%ebp)
+	0x3A965DB2:  jz-8 0x3A965DC6
+. 17 3A965D88 44
+. C7 45 DC 00 00 00 00 8B 4D C8 8B 83 40 FA FF FF C7 45 E4 00 00 00 00 83 C1 50 85 C0 C7 45 D8 00 00 00 00 C7 45 D4 00 00 00 00 74 12 
+
+==== BB 18 (0x3A965DB4) in 303B, out 1807B, BBs exec'd 0 ====
+	0x3A965DB4:  movl 4(%eax),%eax
+	0x3A965DB7:  movl %eax,-44(%ebp)
+	0x3A965DBA:  movl 0xFFFFFA44(%ebx),%eax
+	0x3A965DC0:  movl 4(%eax),%eax
+	0x3A965DC3:  movl %eax,-40(%ebp)
+	0x3A965DC6:  movl 0xFFFFFA4C(%ebx),%edi
+	0x3A965DCC:  testl %edi,%edi
+	0x3A965DCE:  jz-8 0x3A965DDC
+. 18 3A965DB4 28
+. 8B 40 04 89 45 D4 8B 83 44 FA FF FF 8B 40 04 89 45 D8 8B BB 4C FA FF FF 85 FF 74 0C 
+
+==== BB 19 (0x3A965DD0) in 331B, out 1944B, BBs exec'd 0 ====
+	0x3A965DD0:  movl 0xFFFFFA04(%ebx),%eax
+	0x3A965DD6:  movl 4(%eax),%eax
+	0x3A965DD9:  addl %eax,-40(%ebp)
+	0x3A965DDC:  movl -44(%ebp),%edx
+	0x3A965DDF:  movl -40(%ebp),%esi
+	0x3A965DE2:  leal (%esi,%edx,1), %eax
+	0x3A965DE5:  movl %eax,-60(%ebp)
+	0x3A965DE8:  movl -56(%ebp),%eax
+	0x3A965DEB:  movl 80(%eax),%edi
+	0x3A965DEE:  movl 48(%ecx),%eax
+	0x3A965DF1:  movl 4(%eax),%eax
+	0x3A965DF4:  movl %eax,-64(%ebp)
+	0x3A965DF7:  movl 0xB4(%ecx),%eax
+	0x3A965DFD:  xorl %ecx, %ecx
+	0x3A965DFF:  testl %eax,%eax
+	0x3A965E01:  jz-8 0x3A965E06
+. 19 3A965DD0 51
+. 8B 83 04 FA FF FF 8B 40 04 01 45 D8 8B 55 D4 8B 75 D8 8D 04 16 89 45 C4 8B 45 C8 8B 78 50 8B 41 30 8B 40 04 89 45 C0 8B 81 B4 00 00 00 31 C9 85 C0 74 03 
+
+==== BB 20 (0x3A965E03) in 382B, out 2177B, BBs exec'd 0 ====
+	0x3A965E03:  movl 4(%eax),%ecx
+	0x3A965E06:  movl %esi,%eax
+	0x3A965E08:  shrl $0x3, %eax
+	0x3A965E0B:  cmpl %ecx,%eax
+	0x3A965E0D:  jbe-8 0x3A965E11
+. 20 3A965E03 12
+. 8B 48 04 89 F0 C1 E8 03 39 C8 76 02 
+
+==== BB 21 (0x3A965E0F) in 394B, out 2263B, BBs exec'd 0 ====
+	0x3A965E0F:  movl %ecx,%eax
+	0x3A965E11:  leal (%edx,%eax,8), %ecx
+	0x3A965E14:  cmpl %ecx,%edx
+	0x3A965E16:  jnb-8 0x3A965E2F
+. 21 3A965E0F 9
+. 89 C8 8D 0C C2 39 CA 73 17 
+
+==== BB 22 (0x3A965E18) in 403B, out 2340B, BBs exec'd 0 ====
+	0x3A965E18:  nop
+	0x3A965E19:  leal 0(%esi,,), %esi
+	0x3A965E20:  movl (%edx),%esi
+	0x3A965E22:  movl %edi,%eax
+	0x3A965E24:  addl $0x8, %edx
+	0x3A965E27:  addl %esi,%eax
+	0x3A965E29:  addl %edi,(%eax)
+	0x3A965E2B:  cmpl %ecx,%edx
+	0x3A965E2D:  jb-8 0x3A965E20
+. 22 3A965E18 23
+. 90 8D B4 26 00 00 00 00 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1 
+
+==== BB 23 (0x3A965E20) in 426B, out 2460B, BBs exec'd 0 ====
+	0x3A965E20:  movl (%edx),%esi
+	0x3A965E22:  movl %edi,%eax
+	0x3A965E24:  addl $0x8, %edx
+	0x3A965E27:  addl %esi,%eax
+	0x3A965E29:  addl %edi,(%eax)
+	0x3A965E2B:  cmpl %ecx,%edx
+	0x3A965E2D:  jb-8 0x3A965E20
+. 23 3A965E20 15
+. 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1 
+
+==== BB 24 (0x3A965E2F) in 441B, out 2566B, BBs exec'd 0 ====
+	0x3A965E2F:  cmpl -60(%ebp),%ecx
+	0x3A965E32:  jnb-8 0x3A965E81
+. 24 3A965E2F 5
+. 3B 4D C4 73 4D 
+
+==== BB 25 (0x3A965E34) in 446B, out 2631B, BBs exec'd 0 ====
+	0x3A965E34:  leal 0x0(%esi), %esi
+	0x3A965E3A:  leal 0x0(%edi), %edi
+	0x3A965E40:  movl 4(%ecx),%edx
+	0x3A965E43:  movl -64(%ebp),%esi
+	0x3A965E46:  movl %edx,%eax
+	0x3A965E48:  shrl $0x8, %eax
+	0x3A965E4B:  shll $0x4, %eax
+	0x3A965E4E:  addl %esi,%eax
+	0x3A965E50:  movl (%ecx),%esi
+	0x3A965E52:  addl %edi,%esi
+	0x3A965E54:  movl %esi,-68(%ebp)
+	0x3A965E57:  movzbl %dl,%esi
+	0x3A965E5A:  xorl %edx, %edx
+	0x3A965E5C:  cmpw $0x0, 14(%eax)
+	0x3A965E61:  jz-8 0x3A965E69
+. 25 3A965E34 47
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06 
+
+==== BB 26 (0x3A965E63) in 493B, out 2819B, BBs exec'd 0 ====
+	0x3A965E63:  movl 0xFFFFF9E4(%ebx),%edx
+	0x3A965E69:  addl 4(%eax),%edx
+	0x3A965E6C:  leal -6(%esi), %eax
+	0x3A965E6F:  cmpl $0x1, %eax
+	0x3A965E72:  jnbe-8 0x3A965E79
+. 26 3A965E63 17
+. 8B 93 E4 F9 FF FF 03 50 04 8D 46 FA 83 F8 01 77 05 
+
+==== BB 27 (0x3A965E74) in 510B, out 2920B, BBs exec'd 0 ====
+	0x3A965E74:  movl -68(%ebp),%eax
+	0x3A965E77:  movl %edx,(%eax)
+	0x3A965E79:  addl $0x8, %ecx
+	0x3A965E7C:  cmpl -60(%ebp),%ecx
+	0x3A965E7F:  jb-8 0x3A965E40
+. 27 3A965E74 13
+. 8B 45 BC 89 10 83 C1 08 3B 4D C4 72 BF 
+
+==== BB 28 (0x3A965E40) in 523B, out 3018B, BBs exec'd 0 ====
+	0x3A965E40:  movl 4(%ecx),%edx
+	0x3A965E43:  movl -64(%ebp),%esi
+	0x3A965E46:  movl %edx,%eax
+	0x3A965E48:  shrl $0x8, %eax
+	0x3A965E4B:  shll $0x4, %eax
+	0x3A965E4E:  addl %esi,%eax
+	0x3A965E50:  movl (%ecx),%esi
+	0x3A965E52:  addl %edi,%esi
+	0x3A965E54:  movl %esi,-68(%ebp)
+	0x3A965E57:  movzbl %dl,%esi
+	0x3A965E5A:  xorl %edx, %edx
+	0x3A965E5C:  cmpw $0x0, 14(%eax)
+	0x3A965E61:  jz-8 0x3A965E69
+. 28 3A965E40 35
+. 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06 
+
+==== BB 29 (0x3A965E81) in 558B, out 3184B, BBs exec'd 0 ====
+	0x3A965E81:  movl -56(%ebp),%eax
+	0x3A965E84:  movl $0x1,%esi
+	0x3A965E89:  addl $0x50, %eax
+	0x3A965E8C:  call 0x3A96BD80
+. 29 3A965E81 16
+. 8B 45 C8 BE 01 00 00 00 83 C0 50 E8 EF 5E 00 00 
+
+==== BB 30 _dl_setup_hash(0x3A96BD80) in 574B, out 3268B, BBs exec'd 0 ====
+	0x3A96BD80:  pushl %ebp
+	0x3A96BD81:  movl %eax,%ecx
+	0x3A96BD83:  movl 40(%eax),%eax
+	0x3A96BD86:  movl %esp,%ebp
+	0x3A96BD88:  testl %eax,%eax
+	0x3A96BD8A:  jz-8 0x3A96BDA9
+. 30 3A96BD80 12
+. 55 89 C1 8B 40 28 89 E5 85 C0 74 1D 
+
+==== BB 31 (0x3A96BD8C) in 586B, out 3376B, BBs exec'd 0 ====
+	0x3A96BD8C:  movl 4(%eax),%eax
+	0x3A96BD8F:  movl (%eax),%edx
+	0x3A96BD91:  addl $0x8, %eax
+	0x3A96BD94:  movl %eax,0x168(%ecx)
+	0x3A96BD9A:  leal (%eax,%edx,4), %eax
+	0x3A96BD9D:  movl %edx,0x164(%ecx)
+	0x3A96BDA3:  movl %eax,0x16C(%ecx)
+	0x3A96BDA9:  popl %ebp
+	0x3A96BDAA:  ret
+. 31 3A96BD8C 31
+. 8B 40 04 8B 10 83 C0 08 89 81 68 01 00 00 8D 04 90 89 91 64 01 00 00 89 81 6C 01 00 00 5D C3 
+
+==== BB 32 (0x3A965E91) in 617B, out 3509B, BBs exec'd 0 ====
+	0x3A965E91:  movl %esi,0xFFFFFB54(%ebx)
+	0x3A965E97:  leal 0xFFFEC994(%ebx), %eax
+	0x3A965E9D:  movl %eax,0xFFFFFB78(%ebx)
+	0x3A965EA3:  leal 0x228(%ebx), %eax
+	0x3A965EA9:  movl %eax,0xFFFFFB7C(%ebx)
+	0x3A965EAF:  leal 0xFFFEDD24(%ebx), %eax
+	0x3A965EB5:  movl %eax,4(%esp,,)
+	0x3A965EB9:  movl -48(%ebp),%edx
+	0x3A965EBC:  movl %ebp,0x128(%ebx)
+	0x3A965EC2:  movl %edx,(%esp,,)
+	0x3A965EC5:  call 0x3A972DC0
+. 32 3A965E91 57
+. 89 B3 54 FB FF FF 8D 83 94 C9 FE FF 89 83 78 FB FF FF 8D 83 28 02 00 00 89 83 7C FB FF FF 8D 83 24 DD FE FF 89 44 24 04 8B 55 D0 89 AB 28 01 00 00 89 14 24 E8 F6 CE 00 00 
+
+==== BB 33 _dl_sysdep_start(0x3A972DC0) in 674B, out 3678B, BBs exec'd 0 ====
+	0x3A972DC0:  pushl %ebp
+	0x3A972DC1:  xorl %edx, %edx
+	0x3A972DC3:  movl %esp,%ebp
+	0x3A972DC5:  pushl %edi
+	0x3A972DC6:  xorl %edi, %edi
+	0x3A972DC8:  xorl %ecx, %ecx
+	0x3A972DCA:  pushl %esi
+	0x3A972DCB:  xorl %esi, %esi
+	0x3A972DCD:  pushl %ebx
+	0x3A972DCE:  subl $0x204, %esp
+	0x3A972DD4:  movl 8(%ebp),%eax
+	0x3A972DD7:  movl %edx,0xFFFFFE1C(%ebp)
+	0x3A972DDD:  xorl %edx, %edx
+	0x3A972DDF:  movl %edx,0xFFFFFE0C(%ebp)
+	0x3A972DE5:  movl (%eax),%edx
+	0x3A972DE7:  call 0x3A97592B
+. 33 3A972DC0 44
+. 55 31 D2 89 E5 57 31 FF 31 C9 56 31 F6 53 81 EC 04 02 00 00 8B 45 08 89 95 1C FE FF FF 31 D2 89 95 0C FE FF FF 8B 10 E8 3F 2B 00 00 
+
+==== BB 34 (0x3A972DEC) in 718B, out 3927B, BBs exec'd 0 ====
+	0x3A972DEC:  addl $0x5880, %ebx
+	0x3A972DF2:  movl %eax,0x128(%ebx)
+	0x3A972DF8:  addl $0x4, %eax
+	0x3A972DFB:  movl %edx,0x20C(%ebx)
+	0x3A972E01:  leal (%eax,%edx,4), %edx
+	0x3A972E04:  movl %esi,0xFFFFFE14(%ebp)
+	0x3A972E0A:  movl 4(%edx),%esi
+	0x3A972E0D:  movl %edi,0xFFFFFE18(%ebp)
+	0x3A972E13:  xorl %edi, %edi
+	0x3A972E15:  testl %esi,%esi
+	0x3A972E17:  movl %eax,0x38(%ebx)
+	0x3A972E1D:  leal 4(%edx), %eax
+	0x3A972E20:  movl %ecx,0xFFFFFE10(%ebp)
+	0x3A972E26:  movl %eax,%ecx
+	0x3A972E28:  movl %edi,0xFFFFFE08(%ebp)
+	0x3A972E2E:  movl %eax,0x200(%ebx)
+	0x3A972E34:  jz-8 0x3A972E3F
+. 34 3A972DEC 74
+. 81 C3 80 58 00 00 89 83 28 01 00 00 83 C0 04 89 93 0C 02 00 00 8D 14 90 89 B5 14 FE FF FF 8B 72 04 89 BD 18 FE FF FF 31 FF 85 F6 89 83 38 00 00 00 8D 42 04 89 8D 10 FE FF FF 89 C1 89 BD 08 FE FF FF 89 83 00 02 00 00 74 09 
+
+==== BB 35 (0x3A972E36) in 792B, out 4192B, BBs exec'd 0 ====
+	0x3A972E36:  addl $0x4, %ecx
+	0x3A972E39:  movl (%ecx),%edx
+	0x3A972E3B:  testl %edx,%edx
+	0x3A972E3D:  jnz-8 0x3A972E36
+. 35 3A972E36 9
+. 83 C1 04 8B 11 85 D2 75 F7 
+
+==== BB 36 (0x3A972E3F) in 801B, out 4271B, BBs exec'd 0 ====
+	0x3A972E3F:  addl $0x4, %ecx
+	0x3A972E42:  leal 0xFFFED214(%ebx), %eax
+	0x3A972E48:  movl %ecx,%edx
+	0x3A972E4A:  movl %ecx,0x134(%ebx)
+	0x3A972E50:  movl (%ecx),%ecx
+	0x3A972E52:  movl %eax,0xFFFFFE20(%ebp)
+	0x3A972E58:  xorl %eax, %eax
+	0x3A972E5A:  testl %ecx,%ecx
+	0x3A972E5C:  movl %eax,0xFFFFFC20(%ebx)
+	0x3A972E62:  jz-8 0x3A972EB6
+. 36 3A972E3F 37
+. 83 C1 04 8D 83 14 D2 FE FF 89 CA 89 8B 34 01 00 00 8B 09 89 85 20 FE FF FF 31 C0 85 C9 89 83 20 FC FF FF 74 52 
+
+==== BB 37 (0x3A972E64) in 838B, out 4429B, BBs exec'd 0 ====
+	0x3A972E64:  leal 0x0(%esi), %esi
+	0x3A972E6A:  leal 0x0(%edi), %edi
+	0x3A972E70:  leal -3(%ecx), %eax
+	0x3A972E73:  cmpl $0x1E, %eax
+	0x3A972E76:  jnbe-8 0x3A972EA0
+. 37 3A972E64 20
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8D 41 FD 83 F8 1E 77 28 
+
+==== BB 38 (0x3A972EA0) in 858B, out 4522B, BBs exec'd 0 ====
+	0x3A972EA0:  addl $0x8, %edx
+	0x3A972EA3:  movl $0x1,%eax
+	0x3A972EA8:  shll %cl, %eax
+	0x3A972EAA:  orl %eax,0xFFFFFE0C(%ebp)
+	0x3A972EB0:  movl (%edx),%ecx
+	0x3A972EB2:  testl %ecx,%ecx
+	0x3A972EB4:  jnz-8 0x3A972E70
+. 38 3A972EA0 22
+. 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA 
+
+==== BB 39 (0x3A972E70) in 880B, out 4648B, BBs exec'd 0 ====
+	0x3A972E70:  leal -3(%ecx), %eax
+	0x3A972E73:  cmpl $0x1E, %eax
+	0x3A972E76:  jnbe-8 0x3A972EA0
+. 39 3A972E70 8
+. 8D 41 FD 83 F8 1E 77 28 
+
+==== BB 40 (0x3A972E78) in 888B, out 4715B, BBs exec'd 0 ====
+	0x3A972E78:  movl -9388(%ebx,%eax,4),%eax
+	0x3A972E7F:  addl %ebx,%eax
+	0x3A972E81:  jmp*l %eax
+. 40 3A972E78 11
+. 8B 84 83 54 DB FF FF 01 D8 FF E0 
+
+==== BB 41 (0x3A9731AC) in 899B, out 4766B, BBs exec'd 0 ====
+	0x3A9731AC:  movl 4(%edx),%eax
+	0x3A9731AF:  movl %eax,0xFFFFFC54(%ebx)
+	0x3A9731B5:  jmp 0x3A972EA0
+. 41 3A9731AC 14
+. 8B 42 04 89 83 54 FC FF FF E9 E6 FC FF FF 
+
+==== BB 42 (0x3A9731F2) in 913B, out 4826B, BBs exec'd 0 ====
+	0x3A9731F2:  movl 4(%edx),%eax
+	0x3A9731F5:  movl %eax,0xFFFFFC18(%ebx)
+	0x3A9731FB:  jmp 0x3A972E97
+. 42 3A9731F2 14
+. 8B 42 04 89 83 18 FC FF FF E9 97 FC FF FF 
+
+==== BB 43 (0x3A972E97) in 927B, out 4886B, BBs exec'd 0 ====
+	0x3A972E97:  movl (%edx),%ecx
+	0x3A972E99:  leal 0(%esi,,), %esi
+	0x3A972EA0:  addl $0x8, %edx
+	0x3A972EA3:  movl $0x1,%eax
+	0x3A972EA8:  shll %cl, %eax
+	0x3A972EAA:  orl %eax,0xFFFFFE0C(%ebp)
+	0x3A972EB0:  movl (%edx),%ecx
+	0x3A972EB2:  testl %ecx,%ecx
+	0x3A972EB4:  jnz-8 0x3A972E70
+. 43 3A972E97 31
+. 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA 
+
+==== BB 44 (0x3A97319E) in 958B, out 5039B, BBs exec'd 0 ====
+	0x3A97319E:  movl 4(%edx),%eax
+	0x3A9731A1:  movl %eax,0xFFFFFC30(%ebx)
+	0x3A9731A7:  jmp 0x3A972E97
+. 44 3A97319E 14
+. 8B 42 04 89 83 30 FC FF FF E9 EB FC FF FF 
+
+==== BB 45 (0x3A97320E) in 972B, out 5099B, BBs exec'd 0 ====
+	0x3A97320E:  movl 4(%edx),%eax
+	0x3A973211:  movl %eax,0xFFFFFE1C(%ebp)
+	0x3A973217:  jmp 0x3A972EA0
+. 45 3A97320E 14
+. 8B 42 04 89 85 1C FE FF FF E9 84 FC FF FF 
+
+==== BB 46 (0x3A973200) in 986B, out 5159B, BBs exec'd 0 ====
+	0x3A973200:  movl 4(%edx),%eax
+	0x3A973203:  movl %eax,0xFFFFFE18(%ebp)
+	0x3A973209:  jmp 0x3A972EA0
+. 46 3A973200 14
+. 8B 42 04 89 85 18 FE FF FF E9 92 FC FF FF 
+
+==== BB 47 (0x3A9731E4) in 1000B, out 5219B, BBs exec'd 0 ====
+	0x3A9731E4:  movl 4(%edx),%eax
+	0x3A9731E7:  movl %eax,0xFFFFFE20(%ebp)
+	0x3A9731ED:  jmp 0x3A972E97
+. 47 3A9731E4 14
+. 8B 42 04 89 85 20 FE FF FF E9 A5 FC FF FF 
+
+==== BB 48 (0x3A9731D6) in 1014B, out 5279B, BBs exec'd 0 ====
+	0x3A9731D6:  movl 4(%edx),%eax
+	0x3A9731D9:  xorl %eax,0xFFFFFE14(%ebp)
+	0x3A9731DF:  jmp 0x3A972EA0
+. 48 3A9731D6 14
+. 8B 42 04 31 85 14 FE FF FF E9 BC FC FF FF 
+
+==== BB 49 (0x3A9731C8) in 1028B, out 5348B, BBs exec'd 0 ====
+	0x3A9731C8:  movl 4(%edx),%eax
+	0x3A9731CB:  xorl %eax,0xFFFFFE10(%ebp)
+	0x3A9731D1:  jmp 0x3A972EA0
+. 49 3A9731C8 14
+. 8B 42 04 31 85 10 FE FF FF E9 CA FC FF FF 
+
+==== BB 50 (0x3A972E83) in 1042B, out 5417B, BBs exec'd 0 ====
+	0x3A972E83:  movl $0xFFFFFFFF,%eax
+	0x3A972E88:  movl %eax,0xFFFFFE0C(%ebp)
+	0x3A972E8E:  movl 4(%edx),%eax
+	0x3A972E91:  movl %eax,0x130(%ebx)
+	0x3A972E97:  movl (%edx),%ecx
+	0x3A972E99:  leal 0(%esi,,), %esi
+	0x3A972EA0:  addl $0x8, %edx
+	0x3A972EA3:  movl $0x1,%eax
+	0x3A972EA8:  shll %cl, %eax
+	0x3A972EAA:  orl %eax,0xFFFFFE0C(%ebp)
+	0x3A972EB0:  movl (%edx),%ecx
+	0x3A972EB2:  testl %ecx,%ecx
+	0x3A972EB4:  jnz-8 0x3A972E70
+. 50 3A972E83 51
+. B8 FF FF FF FF 89 85 0C FE FF FF 8B 42 04 89 83 30 01 00 00 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA 
+
+==== BB 51 (0x3A9731BA) in 1093B, out 5614B, BBs exec'd 0 ====
+	0x3A9731BA:  movl 4(%edx),%eax
+	0x3A9731BD:  movl %eax,0xFFFFFC20(%ebx)
+	0x3A9731C3:  jmp 0x3A972EA0
+. 51 3A9731BA 14
+. 8B 42 04 89 83 20 FC FF FF E9 D8 FC FF FF 
+
+==== BB 52 (0x3A972EB6) in 1107B, out 5674B, BBs exec'd 0 ====
+	0x3A972EB6:  leal -76(%ebp), %edx
+	0x3A972EB9:  leal 0xFFFFFE24(%ebp), %eax
+	0x3A972EBF:  movl %edx,0xFFFFFE04(%ebp)
+	0x3A972EC5:  movl %eax,(%esp,,)
+	0x3A972EC8:  call 0x3A974D50
+. 52 3A972EB6 23
+. 8D 55 B4 8D 85 24 FE FF FF 89 95 04 FE FF FF 89 04 24 E8 83 1E 00 00 
+
+==== BB 53 uname(0x3A974D50) in 1130B, out 5765B, BBs exec'd 0 ====
+	0x3A974D50:  movl %ebx,%edx
+	0x3A974D52:  movl 4(%esp,,),%ebx
+	0x3A974D56:  movl $0x7A,%eax
+	0x3A974D5B:  int $0x80
+. 53 3A974D50 13
+. 89 DA 8B 5C 24 04 B8 7A 00 00 00 CD 80 
+
+==== BB 54 (0x3A974D5D) in 1143B, out 5829B, BBs exec'd 0 ====
+	0x3A974D5D:  movl %edx,%ebx
+	0x3A974D5F:  cmpl $0xFFFFF001, %eax
+	0x3A974D64:  jnb-8 0x3A974D67
+. 54 3A974D5D 9
+. 89 D3 3D 01 F0 FF FF 73 01 
+
+==== BB 55 (0x3A974D66) in 1152B, out 5899B, BBs exec'd 0 ====
+	0x3A974D66:  ret
+. 55 3A974D66 1
+. C3 
+
+==== BB 56 (0x3A972ECD) in 1153B, out 5929B, BBs exec'd 0 ====
+	0x3A972ECD:  testl %eax,%eax
+	0x3A972ECF:  jnz-32 0x3A9730B1
+. 56 3A972ECD 8
+. 85 C0 0F 85 DC 01 00 00 
+
+==== BB 57 (0x3A972ED5) in 1161B, out 5988B, BBs exec'd 0 ====
+	0x3A972ED5:  leal 0xFFFFFEA6(%ebp), %eax
+	0x3A972EDB:  movl %eax,0xFFFFFE04(%ebp)
+	0x3A972EE1:  movl 0xFFFFFE04(%ebp),%esi
+	0x3A972EE7:  xorl %eax, %eax
+	0x3A972EE9:  xorl %edi, %edi
+	0x3A972EEB:  movl %eax,0xFFFFFE00(%ebp)
+	0x3A972EF1:  movzbl (%esi),%edx
+	0x3A972EF4:  movb %dl,%al
+	0x3A972EF6:  subb $0x30, %al
+	0x3A972EF8:  cmpb $0x9, %al
+	0x3A972EFA:  jnbe-8 0x3A972F65
+. 57 3A972ED5 39
+. 8D 85 A6 FE FF FF 89 85 04 FE FF FF 8B B5 04 FE FF FF 31 C0 31 FF 89 85 00 FE FF FF 0F B6 16 88 D0 2C 30 3C 09 77 69 
+
+==== BB 58 (0x3A972EFC) in 1200B, out 6164B, BBs exec'd 0 ====
+	0x3A972EFC:  leal 0(%esi,,), %esi
+	0x3A972F00:  incl %esi
+	0x3A972F01:  movsbl %dl,%eax
+	0x3A972F04:  leal -48(%eax), %edx
+	0x3A972F07:  movzbl (%esi),%ecx
+	0x3A972F0A:  movb %cl,%al
+	0x3A972F0C:  subb $0x30, %al
+	0x3A972F0E:  movb %cl,0xFFFFFDFF(%ebp)
+	0x3A972F14:  cmpb $0x9, %al
+	0x3A972F16:  jnbe-8 0x3A972F40
+. 58 3A972EFC 28
+. 8D 74 26 00 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28 
+
+==== BB 59 (0x3A972F40) in 1228B, out 6330B, BBs exec'd 0 ====
+	0x3A972F40:  incl 0xFFFFFE00(%ebp)
+	0x3A972F46:  shll $0x8, %edi
+	0x3A972F49:  orl %edx,%edi
+	0x3A972F4B:  incl %esi
+	0x3A972F4C:  cmpb $0x2E, %cl
+	0x3A972F4F:  jnz-8 0x3A972F5C
+. 59 3A972F40 17
+. FF 85 00 FE FF FF C1 E7 08 09 D7 46 80 F9 2E 75 0B 
+
+==== BB 60 (0x3A972F51) in 1245B, out 6441B, BBs exec'd 0 ====
+	0x3A972F51:  movzbl (%esi),%edx
+	0x3A972F54:  movb %dl,%al
+	0x3A972F56:  subb $0x30, %al
+	0x3A972F58:  cmpb $0x9, %al
+	0x3A972F5A:  jbe-8 0x3A972F00
+. 60 3A972F51 11
+. 0F B6 16 88 D0 2C 30 3C 09 76 A4 
+
+==== BB 61 (0x3A972F00) in 1256B, out 6537B, BBs exec'd 0 ====
+	0x3A972F00:  incl %esi
+	0x3A972F01:  movsbl %dl,%eax
+	0x3A972F04:  leal -48(%eax), %edx
+	0x3A972F07:  movzbl (%esi),%ecx
+	0x3A972F0A:  movb %cl,%al
+	0x3A972F0C:  subb $0x30, %al
+	0x3A972F0E:  movb %cl,0xFFFFFDFF(%ebp)
+	0x3A972F14:  cmpb $0x9, %al
+	0x3A972F16:  jnbe-8 0x3A972F40
+. 61 3A972F00 24
+. 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28 
+
+==== BB 62 (0x3A972F5C) in 1280B, out 6691B, BBs exec'd 0 ====
+	0x3A972F5C:  cmpl $0x2, 0xFFFFFE00(%ebp)
+	0x3A972F63:  jnle-8 0x3A972F77
+. 62 3A972F5C 9
+. 83 BD 00 FE FF FF 02 7F 12 
+
+==== BB 63 (0x3A972F77) in 1289B, out 6757B, BBs exec'd 0 ====
+	0x3A972F77:  cmpl $0x20204, %edi
+	0x3A972F7D:  jbe-32 0x3A973240
+. 63 3A972F77 12
+. 81 FF 04 02 02 00 0F 86 BD 02 00 00 
+
+==== BB 64 (0x3A972F83) in 1301B, out 6817B, BBs exec'd 0 ====
+	0x3A972F83:  cmpl $0xFFFFFFFF, 0xFFFFFE0C(%ebp)
+	0x3A972F8A:  movl %edi,0xFFFFFC1C(%ebx)
+	0x3A972F90:  jz-8 0x3A972FEA
+. 64 3A972F83 15
+. 83 BD 0C FE FF FF FF 89 BB 1C FC FF FF 74 58 
+
+==== BB 65 (0x3A972FEA) in 1316B, out 6902B, BBs exec'd 0 ====
+	0x3A972FEA:  movl 0xFFFFFC18(%ebx),%edi
+	0x3A972FF0:  testl %edi,%edi
+	0x3A972FF2:  jnz-8 0x3A972FFF
+. 65 3A972FEA 10
+. 8B BB 18 FC FF FF 85 FF 75 0B 
+
+==== BB 66 (0x3A972FFF) in 1326B, out 6977B, BBs exec'd 0 ====
+	0x3A972FFF:  movl 0xFFFFFD94(%ebx),%esi
+	0x3A973005:  testl %esi,%esi
+	0x3A973007:  jz-8 0x3A97301F
+. 66 3A972FFF 10
+. 8B B3 94 FD FF FF 85 F6 74 16 
+
+==== BB 67 (0x3A97301F) in 1336B, out 7055B, BBs exec'd 0 ====
+	0x3A97301F:  movl $0x0, (%esp,,)
+	0x3A973026:  call 0x3A9738A0
+. 67 3A97301F 12
+. C7 04 24 00 00 00 00 E8 75 08 00 00 
+
+==== BB 68 brk(0x3A9738A0) in 1348B, out 7112B, BBs exec'd 0 ====
+	0x3A9738A0:  pushl %ebp
+	0x3A9738A1:  movl %esp,%ebp
+	0x3A9738A3:  pushl %ebx
+	0x3A9738A4:  movl 8(%ebp),%ecx
+	0x3A9738A7:  call 0x3A97592B
+. 68 3A9738A0 12
+. 55 89 E5 53 8B 4D 08 E8 7F 20 00 00 
+
+==== BB 69 (0x3A9738AC) in 1360B, out 7212B, BBs exec'd 0 ====
+	0x3A9738AC:  addl $0x4DC0, %ebx
+	0x3A9738B2:  xchgl %ecx, %ebx
+	0x3A9738B4:  movl $0x2D,%eax
+	0x3A9738B9:  int $0x80
+. 69 3A9738AC 15
+. 81 C3 C0 4D 00 00 87 CB B8 2D 00 00 00 CD 80 
+
+==== BB 70 (0x3A9738BB) in 1375B, out 7280B, BBs exec'd 0 ====
+	0x3A9738BB:  xchgl %ecx, %ebx
+	0x3A9738BD:  movl %eax,0x144(%ebx)
+	0x3A9738C3:  xorl %edx, %edx
+	0x3A9738C5:  cmpl %ecx,%eax
+	0x3A9738C7:  jb-8 0x3A9738CE
+. 70 3A9738BB 14
+. 87 CB 89 83 44 01 00 00 31 D2 39 C8 72 05 
+
+==== BB 71 (0x3A9738C9) in 1389B, out 7379B, BBs exec'd 0 ====
+	0x3A9738C9:  popl %ebx
+	0x3A9738CA:  movl %edx,%eax
+	0x3A9738CC:  popl %ebp
+	0x3A9738CD:  ret
+. 71 3A9738C9 5
+. 5B 89 D0 5D C3 
+
+==== BB 72 (0x3A97302B) in 1394B, out 7451B, BBs exec'd 0 ====
+	0x3A97302B:  movl 0xFFFFFC20(%ebx),%eax
+	0x3A973031:  testl %eax,%eax
+	0x3A973033:  jz-8 0x3A973056
+. 72 3A97302B 10
+. 8B 83 20 FC FF FF 85 C0 74 21 
+
+==== BB 73 (0x3A973035) in 1404B, out 7526B, BBs exec'd 0 ====
+	0x3A973035:  cmpb $0x0, (%eax)
+	0x3A973038:  jnz-8 0x3A973044
+. 73 3A973035 5
+. 80 38 00 75 0A 
+
+==== BB 74 (0x3A973044) in 1409B, out 7586B, BBs exec'd 0 ====
+	0x3A973044:  testl %eax,%eax
+	0x3A973046:  jz-8 0x3A973056
+. 74 3A973044 4
+. 85 C0 74 0E 
+
+==== BB 75 (0x3A973048) in 1413B, out 7645B, BBs exec'd 0 ====
+	0x3A973048:  movl %eax,(%esp,,)
+	0x3A97304B:  call 0x3A9752D0
+. 75 3A973048 8
+. 89 04 24 E8 80 22 00 00 
+
+==== BB 76 strlen(0x3A9752D0) in 1421B, out 7700B, BBs exec'd 0 ====
+	0x3A9752D0:  movl 4(%esp,,),%eax
+	0x3A9752D4:  movl $0x3,%edx
+	0x3A9752D9:  andl %eax,%edx
+	0x3A9752DB:  jz-8 0x3A975301
+. 76 3A9752D0 13
+. 8B 44 24 04 BA 03 00 00 00 21 C2 74 24 
+
+==== BB 77 (0x3A9752DD) in 1434B, out 7781B, BBs exec'd 0 ====
+	0x3A9752DD:  jp-8 0x3A9752F6
+. 77 3A9752DD 2
+. 7A 17 
+
+==== BB 78 (0x3A9752F6) in 1436B, out 7828B, BBs exec'd 0 ====
+	0x3A9752F6:  cmpb %dh{si},(%eax)
+	0x3A9752F8:  jz-32 0x3A975386
+. 78 3A9752F6 8
+. 38 30 0F 84 88 00 00 00 
+
+==== BB 79 (0x3A9752FE) in 1444B, out 7888B, BBs exec'd 0 ====
+	0x3A9752FE:  incl %eax
+	0x3A9752FF:  xorl %edx, %edx
+	0x3A975301:  movl (%eax),%ecx
+	0x3A975303:  addl $0x4, %eax
+	0x3A975306:  subl %ecx,%edx
+	0x3A975308:  addl $0xFEFEFEFF, %ecx
+	0x3A97530E:  decl %edx
+	0x3A97530F:  jnb-8 0x3A975369
+. 79 3A9752FE 19
+. 40 31 D2 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58 
+
+==== BB 80 (0x3A975369) in 1463B, out 8001B, BBs exec'd 0 ====
+	0x3A975369:  subl $0x4, %eax
+	0x3A97536C:  subl $0xFEFEFEFF, %ecx
+	0x3A975372:  cmpb $0x0, %cl
+	0x3A975375:  jz-8 0x3A975386
+. 80 3A975369 14
+. 83 E8 04 81 E9 FF FE FE FE 80 F9 00 74 0F 
+
+==== BB 81 (0x3A975377) in 1477B, out 8088B, BBs exec'd 0 ====
+	0x3A975377:  incl %eax
+	0x3A975378:  testb %ch{bp},%ch{bp}
+	0x3A97537A:  jz-8 0x3A975386
+. 81 3A975377 5
+. 40 84 ED 74 0A 
+
+==== BB 82 (0x3A97537C) in 1482B, out 8160B, BBs exec'd 0 ====
+	0x3A97537C:  shrl $0x10, %ecx
+	0x3A97537F:  incl %eax
+	0x3A975380:  cmpb $0x0, %cl
+	0x3A975383:  jz-8 0x3A975386
+. 82 3A97537C 9
+. C1 E9 10 40 80 F9 00 74 01 
+
+==== BB 83 (0x3A975385) in 1491B, out 8242B, BBs exec'd 0 ====
+	0x3A975385:  incl %eax
+	0x3A975386:  subl 4(%esp,,),%eax
+	0x3A97538A:  ret
+. 83 3A975385 6
+. 40 2B 44 24 04 C3 
+
+==== BB 84 (0x3A973050) in 1497B, out 8299B, BBs exec'd 0 ====
+	0x3A973050:  movl %eax,0xFFFFFC24(%ebx)
+	0x3A973056:  movl $0x0, (%esp,,)
+	0x3A97305D:  call 0x3A9738E0
+. 84 3A973050 18
+. 89 83 24 FC FF FF C7 04 24 00 00 00 00 E8 7E 08 00 00 
+
+==== BB 85 sbrk(0x3A9738E0) in 1515B, out 8375B, BBs exec'd 0 ====
+	0x3A9738E0:  pushl %ebp
+	0x3A9738E1:  movl %esp,%ebp
+	0x3A9738E3:  subl $0x10, %esp
+	0x3A9738E6:  movl %ebx,-12(%ebp)
+	0x3A9738E9:  movl %edi,-4(%ebp)
+	0x3A9738EC:  movl 8(%ebp),%edi
+	0x3A9738EF:  call 0x3A97592B
+. 85 3A9738E0 20
+. 55 89 E5 83 EC 10 89 5D F4 89 7D FC 8B 7D 08 E8 37 20 00 00 
+
+==== BB 86 (0x3A9738F4) in 1535B, out 8499B, BBs exec'd 0 ====
+	0x3A9738F4:  addl $0x4D78, %ebx
+	0x3A9738FA:  movl %esi,-8(%ebp)
+	0x3A9738FD:  movl 0x144(%ebx),%esi
+	0x3A973903:  testl %esi,%esi
+	0x3A973905:  jz-8 0x3A973911
+. 86 3A9738F4 19
+. 81 C3 78 4D 00 00 89 75 F8 8B B3 44 01 00 00 85 F6 74 0A 
+
+==== BB 87 (0x3A973907) in 1554B, out 8606B, BBs exec'd 0 ====
+	0x3A973907:  movl 0x12C(%ebx),%eax
+	0x3A97390D:  testl %eax,%eax
+	0x3A97390F:  jz-8 0x3A97392C
+. 87 3A973907 10
+. 8B 83 2C 01 00 00 85 C0 74 1B 
+
+==== BB 88 (0x3A97392C) in 1564B, out 8681B, BBs exec'd 0 ====
+	0x3A97392C:  testl %edi,%edi
+	0x3A97392E:  movl %esi,%edx
+	0x3A973930:  jnz-8 0x3A973941
+. 88 3A97392C 6
+. 85 FF 89 F2 75 0F 
+
+==== BB 89 (0x3A973932) in 1570B, out 8750B, BBs exec'd 0 ====
+	0x3A973932:  movl -12(%ebp),%ebx
+	0x3A973935:  movl %edx,%eax
+	0x3A973937:  movl -8(%ebp),%esi
+	0x3A97393A:  movl -4(%ebp),%edi
+	0x3A97393D:  movl %ebp,%esp
+	0x3A97393F:  popl %ebp
+	0x3A973940:  ret
+. 89 3A973932 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 90 (0x3A973062) in 1585B, out 8852B, BBs exec'd 0 ====
+	0x3A973062:  movl %eax,%ecx
+	0x3A973064:  leal 0x228(%ebx), %eax
+	0x3A97306A:  cmpl %eax,%ecx
+	0x3A97306C:  jz-32 0x3A973226
+. 90 3A973062 16
+. 89 C1 8D 83 28 02 00 00 39 C1 0F 84 B4 01 00 00 
+
+==== BB 91 (0x3A973072) in 1601B, out 8931B, BBs exec'd 0 ====
+	0x3A973072:  movl 0x130(%ebx),%eax
+	0x3A973078:  testl %eax,%eax
+	0x3A97307A:  jnz-32 0x3A97321C
+. 91 3A973072 14
+. 8B 83 30 01 00 00 85 C0 0F 85 9C 01 00 00 
+
+==== BB 92 (0x3A973080) in 1615B, out 9006B, BBs exec'd 0 ====
+	0x3A973080:  leal 0xFFFFFE20(%ebp), %eax
+	0x3A973086:  movl %eax,8(%esp,,)
+	0x3A97308A:  movl 0xFFFFFE18(%ebp),%eax
+	0x3A973090:  movl %eax,4(%esp,,)
+	0x3A973094:  movl 0xFFFFFE1C(%ebp),%edx
+	0x3A97309A:  movl %edx,(%esp,,)
+	0x3A97309D:  call*l 12(%ebp)
+. 92 3A973080 32
+. 8D 85 20 FE FF FF 89 44 24 08 8B 85 18 FE FF FF 89 44 24 04 8B 95 1C FE FF FF 89 14 24 FF 55 0C 
+
+==== BB 93 dl_main(0x3A966390) in 1647B, out 9117B, BBs exec'd 0 ====
+	0x3A966390:  pushl %ebp
+	0x3A966391:  movl %esp,%ebp
+	0x3A966393:  pushl %edi
+	0x3A966394:  pushl %esi
+	0x3A966395:  pushl %ebx
+	0x3A966396:  subl $0x1C8, %esp
+	0x3A96639C:  call 0x3A97592B
+. 93 3A966390 17
+. 55 89 E5 57 56 53 81 EC C8 01 00 00 E8 8A F5 00 00 
+
+==== BB 94 (0x3A9663A1) in 1664B, out 9253B, BBs exec'd 0 ====
+	0x3A9663A1:  addl $0x122CB, %ebx
+	0x3A9663A7:  movb $0x0, 0xFFFFFF0B(%ebp)
+	0x3A9663AE:  leal 0xFFFED254(%ebx), %eax
+	0x3A9663B4:  movl 0x130(%ebx),%edi
+	0x3A9663BA:  movl %eax,0xFFFFF9E0(%ebx)
+	0x3A9663C0:  leal 0xFFFED274(%ebx), %eax
+	0x3A9663C6:  movl %eax,0xFFFFFBF8(%ebx)
+	0x3A9663CC:  leal 0xFFFED284(%ebx), %eax
+	0x3A9663D2:  movl %eax,0xFFFFFBFC(%ebx)
+	0x3A9663D8:  leal 0xFFFF9FD4(%ebx), %eax
+	0x3A9663DE:  movl %eax,0xFFFFFC04(%ebx)
+	0x3A9663E4:  movl 0x200(%ebx),%eax
+	0x3A9663EA:  movb $0x0, 0xFFFFFF03(%ebp)
+	0x3A9663F1:  movb $0x0, 0xFFFFFF02(%ebp)
+	0x3A9663F8:  movl %eax,0xFFFFFF48(%ebp)
+	0x3A9663FE:  xorl %eax, %eax
+	0x3A966400:  movl %eax,0xFFFFFEF8(%ebp)
+	0x3A966406:  xorl %eax, %eax
+	0x3A966408:  testl %edi,%edi
+	0x3A96640A:  movl %eax,0xFFFFFEF4(%ebp)
+	0x3A966410:  leal 0xFFFFD5BA(%ebx), %eax
+	0x3A966416:  jz-8 0x3A96641B
+. 94 3A9663A1 119
+. 81 C3 CB 22 01 00 C6 85 0B FF FF FF 00 8D 83 54 D2 FE FF 8B BB 30 01 00 00 89 83 E0 F9 FF FF 8D 83 74 D2 FE FF 89 83 F8 FB FF FF 8D 83 84 D2 FE FF 89 83 FC FB FF FF 8D 83 D4 9F FF FF 89 83 04 FC FF FF 8B 83 00 02 00 00 C6 85 03 FF FF FF 00 C6 85 02 FF FF FF 00 89 85 48 FF FF FF 31 C0 89 85 F8 FE FF FF 31 C0 85 FF 89 85 F4 FE FF FF 8D 83 BA D5 FF FF 74 03 
+
+==== BB 95 (0x3A96641B) in 1783B, out 9585B, BBs exec'd 0 ====
+	0x3A96641B:  movl %eax,0xFFFFFD80(%ebx)
+	0x3A966421:  leal 0xFFFFFF48(%ebp), %eax
+	0x3A966427:  call 0x3A9732E0
+. 95 3A96641B 17
+. 89 83 80 FD FF FF 8D 85 48 FF FF FF E8 B4 CE 00 00 
+
+==== BB 96 _dl_next_ld_env_entry(0x3A9732E0) in 1800B, out 9665B, BBs exec'd 0 ====
+	0x3A9732E0:  pushl %ebp
+	0x3A9732E1:  movl %eax,%ecx
+	0x3A9732E3:  movl %esp,%ebp
+	0x3A9732E5:  pushl %esi
+	0x3A9732E6:  movl (%eax),%edx
+	0x3A9732E8:  xorl %esi, %esi
+	0x3A9732EA:  movl (%edx),%eax
+	0x3A9732EC:  testl %eax,%eax
+	0x3A9732EE:  jz-8 0x3A9732FE
+. 96 3A9732E0 16
+. 55 89 C1 89 E5 56 8B 10 31 F6 8B 02 85 C0 74 0E 
+
+==== BB 97 (0x3A9732F0) in 1816B, out 9813B, BBs exec'd 0 ====
+	0x3A9732F0:  cmpb $0x4C, (%eax)
+	0x3A9732F3:  jz-8 0x3A973303
+. 97 3A9732F0 5
+. 80 38 4C 74 0E 
+
+==== BB 98 (0x3A973303) in 1821B, out 9873B, BBs exec'd 0 ====
+	0x3A973303:  cmpb $0x44, 1(%eax)
+	0x3A973307:  jnz-8 0x3A9732F5
+. 98 3A973303 6
+. 80 78 01 44 75 EC 
+
+==== BB 99 (0x3A9732F5) in 1827B, out 9936B, BBs exec'd 0 ====
+	0x3A9732F5:  addl $0x4, %edx
+	0x3A9732F8:  movl (%edx),%eax
+	0x3A9732FA:  testl %eax,%eax
+	0x3A9732FC:  jnz-8 0x3A9732F0
+. 99 3A9732F5 9
+. 83 C2 04 8B 02 85 C0 75 F2 
+
+==== BB 100 (0x3A973309) in 1836B, out 10015B, BBs exec'd 0 ====
+	0x3A973309:  cmpb $0x5F, 2(%eax)
+	0x3A97330D:  leal 0(%esi), %esi
+	0x3A973310:  jnz-8 0x3A9732F5
+. 100 3A973309 9
+. 80 78 02 5F 8D 76 00 75 E3 
+
+==== BB 101 (0x3A973312) in 1845B, out 10091B, BBs exec'd 0 ====
+	0x3A973312:  addl $0x4, %edx
+	0x3A973315:  leal 3(%eax), %esi
+	0x3A973318:  movl %edx,(%ecx)
+	0x3A97331A:  jmp-8 0x3A9732FE
+. 101 3A973312 10
+. 83 C2 04 8D 70 03 89 11 EB E2 
+
+==== BB 102 (0x3A9732FE) in 1855B, out 10159B, BBs exec'd 0 ====
+	0x3A9732FE:  movl %esi,%eax
+	0x3A973300:  popl %esi
+	0x3A973301:  popl %ebp
+	0x3A973302:  ret
+. 102 3A9732FE 5
+. 89 F0 5E 5D C3 
+
+==== BB 103 (0x3A96642C) in 1860B, out 10234B, BBs exec'd 0 ====
+	0x3A96642C:  movl %eax,0xFFFFFEFC(%ebp)
+	0x3A966432:  testl %eax,%eax
+	0x3A966434:  jz-8 0x3A9664A8
+. 103 3A96642C 10
+. 89 85 FC FE FF FF 85 C0 74 72 
+
+==== BB 104 (0x3A966436) in 1870B, out 10309B, BBs exec'd 0 ====
+	0x3A966436:  movl 0xFFFFFEFC(%ebp),%edx
+	0x3A96643C:  xorl %esi, %esi
+	0x3A96643E:  movzbl (%edx),%eax
+	0x3A966441:  testb %al,%al
+	0x3A966443:  setnz %dl
+	0x3A966446:  cmpb $0x3D, %al
+	0x3A966448:  setnz %al
+	0x3A96644B:  andl %edx,%eax
+	0x3A96644D:  testb $0x1, %al
+	0x3A96644F:  jz-8 0x3A966485
+. 104 3A966436 27
+. 8B 95 FC FE FF FF 31 F6 0F B6 02 84 C0 0F 95 C2 3C 3D 0F 95 C0 21 D0 A8 01 74 34 
+
+==== BB 105 (0x3A966451) in 1897B, out 10486B, BBs exec'd 0 ====
+	0x3A966451:  movl 0xFFFFFEFC(%ebp),%eax
+	0x3A966457:  incl %esi
+	0x3A966458:  movzbl (%esi,%eax,1),%ecx
+	0x3A96645C:  testb %cl,%cl
+	0x3A96645E:  setnz %al
+	0x3A966461:  cmpb $0x3D, %cl
+	0x3A966464:  setnz %dl
+	0x3A966467:  andl %edx,%eax
+	0x3A966469:  testb $0x1, %al
+	0x3A96646B:  jnz-8 0x3A966451
+. 105 3A966451 28
+. 8B 85 FC FE FF FF 46 0F B6 0C 06 84 C9 0F 95 C0 80 F9 3D 0F 95 C2 21 D0 A8 01 75 E4 
+
+==== BB 106 (0x3A96646D) in 1925B, out 10660B, BBs exec'd 0 ====
+	0x3A96646D:  cmpb $0x3D, %cl
+	0x3A966470:  jnz-8 0x3A966421
+. 106 3A96646D 5
+. 80 F9 3D 75 AF 
+
+==== BB 107 (0x3A966472) in 1930B, out 10718B, BBs exec'd 0 ====
+	0x3A966472:  leal -4(%esi), %eax
+	0x3A966475:  cmpl $0x10, %eax
+	0x3A966478:  jnbe-8 0x3A966421
+. 107 3A966472 8
+. 8D 46 FC 83 F8 10 77 A7 
+
+==== BB 108 (0x3A96647A) in 1938B, out 10785B, BBs exec'd 0 ====
+	0x3A96647A:  movl -10800(%ebx,%eax,4),%eax
+	0x3A966481:  addl %ebx,%eax
+	0x3A966483:  jmp*l %eax
+. 108 3A96647A 11
+. 8B 84 83 D0 D5 FF FF 01 D8 FF E0 
+
+==== BB 109 (0x3A9677CA) in 1949B, out 10836B, BBs exec'd 0 ====
+	0x3A9677CA:  cld
+	0x3A9677CB:  movl 0xFFFFFEFC(%ebp),%esi
+	0x3A9677D1:  movl $0xA,%ecx
+	0x3A9677D6:  leal 0xFFFFED45(%ebx), %edi
+	0x3A9677DC:  repe cmpsb
+. 109 3A9677CA 20
+. FC 8B B5 FC FE FF FF B9 0A 00 00 00 8D BB 45 ED FF FF F3 A6 
+
+==== BB 110 (0x3A9677DC) in 1969B, out 11005B, BBs exec'd 0 ====
+	0x3A9677DC:  repe cmpsb
+. 110 3A9677DC 2
+. F3 A6 
+
+==== BB 111 (0x3A9677DE) in 1971B, out 11130B, BBs exec'd 0 ====
+	0x3A9677DE:  jnz-32 0x3A966421
+. 111 3A9677DE 6
+. 0F 85 3D EC FF FF 
+
+==== BB 112 (0x3A9677E4) in 1977B, out 11177B, BBs exec'd 0 ====
+	0x3A9677E4:  xorl %ecx, %ecx
+	0x3A9677E6:  xorl %edx, %edx
+	0x3A9677E8:  xorl %esi, %esi
+	0x3A9677EA:  movl %esi,12(%esp,,)
+	0x3A9677EE:  movl %ecx,8(%esp,,)
+	0x3A9677F2:  movl %edx,4(%esp,,)
+	0x3A9677F6:  movl 0xFFFFFEFC(%ebp),%eax
+	0x3A9677FC:  addl $0xB, %eax
+	0x3A9677FF:  movl %eax,(%esp,,)
+	0x3A967802:  call 0x3A973340
+. 112 3A9677E4 35
+. 31 C9 31 D2 31 F6 89 74 24 0C 89 4C 24 08 89 54 24 04 8B 85 FC FE FF FF 83 C0 0B 89 04 24 E8 39 BB 00 00 
+
+==== BB 113 __strtoul_internal(0x3A973340) in 2012B, out 11334B, BBs exec'd 0 ====
+	0x3A973340:  pushl %ebp
+	0x3A973341:  movl %esp,%ebp
+	0x3A973343:  pushl %edi
+	0x3A973344:  xorl %edi, %edi
+	0x3A973346:  pushl %esi
+	0x3A973347:  pushl %ebx
+	0x3A973348:  subl $0x8, %esp
+	0x3A97334B:  movl 8(%ebp),%esi
+	0x3A97334E:  movl $0x1, -20(%ebp)
+	0x3A973355:  call 0x3A97592B
+. 113 3A973340 26
+. 55 89 E5 57 31 FF 56 53 83 EC 08 8B 75 08 C7 45 EC 01 00 00 00 E8 D1 25 00 00 
+
+==== BB 114 (0x3A97335A) in 2038B, out 11509B, BBs exec'd 0 ====
+	0x3A97335A:  addl $0x5312, %ebx
+	0x3A973360:  movzbl (%esi),%ecx
+	0x3A973363:  cmpb $0x20, %cl
+	0x3A973366:  setz %al
+	0x3A973369:  cmpb $0x9, %cl
+	0x3A97336C:  setz %dl
+	0x3A97336F:  orl %edx,%eax
+	0x3A973371:  testb $0x1, %al
+	0x3A973373:  jz-8 0x3A973396
+. 114 3A97335A 27
+. 81 C3 12 53 00 00 0F B6 0E 80 F9 20 0F 94 C0 80 F9 09 0F 94 C2 09 D0 A8 01 74 21 
+
+==== BB 115 (0x3A973396) in 2065B, out 11667B, BBs exec'd 0 ====
+	0x3A973396:  cmpb $0x2D, %cl
+	0x3A973399:  jz-32 0x3A973435
+. 115 3A973396 9
+. 80 F9 2D 0F 84 96 00 00 00 
+
+==== BB 116 (0x3A97339F) in 2074B, out 11725B, BBs exec'd 0 ====
+	0x3A97339F:  cmpb $0x2B, %cl
+	0x3A9733A2:  jz-32 0x3A97343C
+. 116 3A97339F 9
+. 80 F9 2B 0F 84 94 00 00 00 
+
+==== BB 117 (0x3A9733A8) in 2083B, out 11783B, BBs exec'd 0 ====
+	0x3A9733A8:  movb %cl,%al
+	0x3A9733AA:  subb $0x30, %al
+	0x3A9733AC:  cmpb $0x9, %al
+	0x3A9733AE:  jbe-8 0x3A9733C6
+. 117 3A9733A8 8
+. 88 C8 2C 30 3C 09 76 16 
+
+==== BB 118 (0x3A9733C6) in 2091B, out 11866B, BBs exec'd 0 ====
+	0x3A9733C6:  movl $0xA, -16(%ebp)
+	0x3A9733CD:  cmpb $0x30, %cl
+	0x3A9733D0:  jz-8 0x3A973445
+. 118 3A9733C6 12
+. C7 45 F0 0A 00 00 00 80 F9 30 74 73 
+
+==== BB 119 (0x3A973445) in 2103B, out 11942B, BBs exec'd 0 ====
+	0x3A973445:  movzbl 1(%esi),%eax
+	0x3A973449:  movl $0x8, -16(%ebp)
+	0x3A973450:  cmpb $0x78, %al
+	0x3A973452:  setz %dl
+	0x3A973455:  cmpb $0x58, %al
+	0x3A973457:  setz %al
+	0x3A97345A:  orl %edx,%eax
+	0x3A97345C:  testb $0x1, %al
+	0x3A97345E:  jz-32 0x3A9733D2
+. 119 3A973445 31
+. 0F B6 46 01 C7 45 F0 08 00 00 00 3C 78 0F 94 C2 3C 58 0F 94 C0 09 D0 A8 01 0F 84 6E FF FF FF 
+
+==== BB 120 (0x3A973464) in 2134B, out 12101B, BBs exec'd 0 ====
+	0x3A973464:  movl $0x10, -16(%ebp)
+	0x3A97346B:  addl $0x2, %esi
+	0x3A97346E:  movzbl (%esi),%ecx
+	0x3A973471:  jmp 0x3A9733D2
+. 120 3A973464 18
+. C7 45 F0 10 00 00 00 83 C6 02 0F B6 0E E9 5C FF FF FF 
+
+==== BB 121 (0x3A9733D2) in 2152B, out 12174B, BBs exec'd 0 ====
+	0x3A9733D2:  movb %cl,%al
+	0x3A9733D4:  movb %cl,%dl
+	0x3A9733D6:  subb $0x30, %al
+	0x3A9733D8:  cmpb $0x9, %al
+	0x3A9733DA:  jnbe-8 0x3A97341B
+. 121 3A9733D2 10
+. 88 C8 88 CA 2C 30 3C 09 77 3F 
+
+==== BB 122 (0x3A9733DC) in 2162B, out 12268B, BBs exec'd 0 ====
+	0x3A9733DC:  leal 0(%esi,,), %esi
+	0x3A9733E0:  movsbl %dl,%eax
+	0x3A9733E3:  cmpl $0xCCCCCCC, %edi
+	0x3A9733E9:  leal -48(%eax), %ecx
+	0x3A9733EC:  jnbe-32 0x3A973476
+. 122 3A9733DC 22
+. 8D 74 26 00 0F BE C2 81 FF CC CC CC 0C 8D 48 D0 0F 87 84 00 00 00 
+
+==== BB 123 (0x3A9733F2) in 2184B, out 12368B, BBs exec'd 0 ====
+	0x3A9733F2:  cmpl $0x19999999, %edi
+	0x3A9733F8:  setz %al
+	0x3A9733FB:  cmpl $0x5, %ecx
+	0x3A9733FE:  setnbe %dl
+	0x3A973401:  andl %edx,%eax
+	0x3A973403:  testb $0x1, %al
+	0x3A973405:  jnz-8 0x3A973476
+. 123 3A9733F2 21
+. 81 FF 99 99 99 19 0F 94 C0 83 F9 05 0F 97 C2 21 D0 A8 01 75 6F 
+
+==== BB 124 (0x3A973407) in 2205B, out 12495B, BBs exec'd 0 ====
+	0x3A973407:  movl -16(%ebp),%edx
+	0x3A97340A:  incl %esi
+	0x3A97340B:  imull %edx, %edi
+	0x3A97340E:  movzbl (%esi),%edx
+	0x3A973411:  addl %ecx,%edi
+	0x3A973413:  movb %dl,%al
+	0x3A973415:  subb $0x30, %al
+	0x3A973417:  cmpb $0x9, %al
+	0x3A973419:  jbe-8 0x3A9733E0
+. 124 3A973407 20
+. 8B 55 F0 46 0F AF FA 0F B6 16 01 CF 88 D0 2C 30 3C 09 76 C5 
+
+==== BB 125 (0x3A9733E0) in 2225B, out 12634B, BBs exec'd 0 ====
+	0x3A9733E0:  movsbl %dl,%eax
+	0x3A9733E3:  cmpl $0xCCCCCCC, %edi
+	0x3A9733E9:  leal -48(%eax), %ecx
+	0x3A9733EC:  jnbe-32 0x3A973476
+. 125 3A9733E0 18
+. 0F BE C2 81 FF CC CC CC 0C 8D 48 D0 0F 87 84 00 00 00 
+
+==== BB 126 (0x3A97341B) in 2243B, out 12721B, BBs exec'd 0 ====
+	0x3A97341B:  movl 12(%ebp),%eax
+	0x3A97341E:  testl %eax,%eax
+	0x3A973420:  jz-8 0x3A973427
+. 126 3A97341B 7
+. 8B 45 0C 85 C0 74 05 
+
+==== BB 127 (0x3A973427) in 2250B, out 12793B, BBs exec'd 0 ====
+	0x3A973427:  movl -20(%ebp),%eax
+	0x3A97342A:  imull %edi, %eax
+	0x3A97342D:  addl $0x8, %esp
+	0x3A973430:  popl %ebx
+	0x3A973431:  popl %esi
+	0x3A973432:  popl %edi
+	0x3A973433:  popl %ebp
+	0x3A973434:  ret
+. 127 3A973427 14
+. 8B 45 EC 0F AF C7 83 C4 08 5B 5E 5F 5D C3 
+
+==== BB 128 (0x3A967807) in 2264B, out 12923B, BBs exec'd 0 ====
+	0x3A967807:  movl %eax,0xFFFFFC58(%ebx)
+	0x3A96780D:  jmp 0x3A966421
+. 128 3A967807 11
+. 89 83 58 FC FF FF E9 0F EC FF FF 
+
+==== BB 129 (0x3A966421) in 2275B, out 12970B, BBs exec'd 0 ====
+	0x3A966421:  leal 0xFFFFFF48(%ebp), %eax
+	0x3A966427:  call 0x3A9732E0
+. 129 3A966421 11
+. 8D 85 48 FF FF FF E8 B4 CE 00 00 
+
+==== BB 130 (0x3A967760) in 2286B, out 13031B, BBs exec'd 0 ====
+	0x3A967760:  cld
+	0x3A967761:  movl 0xFFFFFEFC(%ebp),%esi
+	0x3A967767:  movl $0xC,%ecx
+	0x3A96776C:  leal 0xFFFFF02D(%ebx), %edi
+	0x3A967772:  repe cmpsb
+. 130 3A967760 20
+. FC 8B B5 FC FE FF FF B9 0C 00 00 00 8D BB 2D F0 FF FF F3 A6 
+
+==== BB 131 (0x3A967772) in 2306B, out 13200B, BBs exec'd 0 ====
+	0x3A967772:  repe cmpsb
+. 131 3A967772 2
+. F3 A6 
+
+==== BB 132 (0x3A967774) in 2308B, out 13325B, BBs exec'd 0 ====
+	0x3A967774:  jnz-32 0x3A967A85
+. 132 3A967774 6
+. 0F 85 0B 03 00 00 
+
+==== BB 133 (0x3A96777A) in 2314B, out 13372B, BBs exec'd 0 ====
+	0x3A96777A:  movl 0xFFFFFEFC(%ebp),%eax
+	0x3A967780:  addl $0xD, %eax
+	0x3A967783:  movl %eax,0xAC(%ebx)
+	0x3A967789:  jmp 0x3A966421
+. 133 3A96777A 20
+. 8B 85 FC FE FF FF 83 C0 0D 89 83 AC 00 00 00 E9 93 EC FF FF 
+
+==== BB 134 (0x3A9678A7) in 2334B, out 13446B, BBs exec'd 0 ====
+	0x3A9678A7:  cld
+	0x3A9678A8:  movl 0xFFFFFEFC(%ebp),%esi
+	0x3A9678AE:  movl $0x7,%ecx
+	0x3A9678B3:  leal 0xFFFFED69(%ebx), %edi
+	0x3A9678B9:  repe cmpsb
+. 134 3A9678A7 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB 69 ED FF FF F3 A6 
+
+==== BB 135 (0x3A9678BB) in 2354B, out 13615B, BBs exec'd 0 ====
+	0x3A9678BB:  jnz-32 0x3A967B31
+. 135 3A9678BB 6
+. 0F 85 70 02 00 00 
+
+==== BB 136 (0x3A967B31) in 2360B, out 13662B, BBs exec'd 0 ====
+	0x3A967B31:  cld
+	0x3A967B32:  movl 0xFFFFFEFC(%ebp),%esi
+	0x3A967B38:  movl $0x7,%ecx
+	0x3A967B3D:  leal 0xFFFFEDCF(%ebx), %edi
+	0x3A967B43:  repe cmpsb
+. 136 3A967B31 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB CF ED FF FF F3 A6 
+
+==== BB 137 (0x3A967B43) in 2380B, out 13831B, BBs exec'd 0 ====
+	0x3A967B43:  repe cmpsb
+. 137 3A967B43 2
+. F3 A6 
+
+==== BB 138 (0x3A967B45) in 2382B, out 13956B, BBs exec'd 0 ====
+	0x3A967B45:  jnz-32 0x3A967EDB
+. 138 3A967B45 6
+. 0F 85 90 03 00 00 
+
+==== BB 139 (0x3A967B4B) in 2388B, out 14003B, BBs exec'd 0 ====
+	0x3A967B4B:  movl 0xFFFFFEFC(%ebp),%eax
+	0x3A967B51:  addl $0x8, %eax
+	0x3A967B54:  movl %eax,0xB0(%ebx)
+	0x3A967B5A:  jmp 0x3A966421
+. 139 3A967B4B 20
+. 8B 85 FC FE FF FF 83 C0 08 89 83 B0 00 00 00 E9 C2 E8 FF FF 
+
+==== BB 140 (0x3A9664A8) in 2408B, out 14077B, BBs exec'd 0 ====
+	0x3A9664A8:  movl 0xFFFFFEF8(%ebp),%eax
+	0x3A9664AE:  movl %eax,0xFFFFFF50(%ebp)
+	0x3A9664B4:  movl 0x130(%ebx),%eax
+	0x3A9664BA:  testl %eax,%eax
+	0x3A9664BC:  jnz-32 0x3A967A31
+. 140 3A9664A8 26
+. 8B 85 F8 FE FF FF 89 85 50 FF FF FF 8B 83 30 01 00 00 85 C0 0F 85 6F 15 00 00 
+
+==== BB 141 (0x3A9664C2) in 2434B, out 14181B, BBs exec'd 0 ====
+	0x3A9664C2:  movl 0xB8(%ebx),%edi
+	0x3A9664C8:  testl %edi,%edi
+	0x3A9664CA:  jz-32 0x3A966580
+. 141 3A9664C2 14
+. 8B BB B8 00 00 00 85 FF 0F 84 B0 00 00 00 
+
+==== BB 142 (0x3A966580) in 2448B, out 14256B, BBs exec'd 0 ====
+	0x3A966580:  movl 16(%ebp),%edx
+	0x3A966583:  movl $0x1,%eax
+	0x3A966588:  movl %eax,0x34(%ebx)
+	0x3A96658E:  leal 0xFFFED214(%ebx), %eax
+	0x3A966594:  cmpl %eax,(%edx)
+	0x3A966596:  jz-32 0x3A966A99
+. 142 3A966580 28
+. 8B 55 10 B8 01 00 00 00 89 83 34 00 00 00 8D 83 14 D2 FE FF 39 02 0F 84 FD 04 00 00 
+
+==== BB 143 (0x3A96659C) in 2476B, out 14363B, BBs exec'd 0 ====
+	0x3A96659C:  movl $0x0, (%esp,,)
+	0x3A9665A3:  leal 0xFFFFEDF6(%ebx), %edx
+	0x3A9665A9:  xorl %ecx, %ecx
+	0x3A9665AB:  movl %edx,%eax
+	0x3A9665AD:  call 0x3A96C9D0
+. 143 3A96659C 22
+. C7 04 24 00 00 00 00 8D 93 F6 ED FF FF 31 C9 89 D0 E8 1E 64 00 00 
+
+==== BB 144 _dl_new_object(0x3A96C9D0) in 2498B, out 14461B, BBs exec'd 0 ====
+	0x3A96C9D0:  pushl %ebp
+	0x3A96C9D1:  movl %esp,%ebp
+	0x3A96C9D3:  pushl %edi
+	0x3A96C9D4:  pushl %esi
+	0x3A96C9D5:  pushl %ebx
+	0x3A96C9D6:  subl $0x2C, %esp
+	0x3A96C9D9:  movl 8(%ebp),%edi
+	0x3A96C9DC:  call 0x3A97592B
+. 144 3A96C9D0 17
+. 55 89 E5 57 56 53 83 EC 2C 8B 7D 08 E8 4A 8F 00 00 
+
+==== BB 145 (0x3A96C9E1) in 2515B, out 14607B, BBs exec'd 0 ====
+	0x3A96C9E1:  addl $0xBC8B, %ebx
+	0x3A96C9E7:  movl %edx,-20(%ebp)
+	0x3A96C9EA:  movl %ecx,-24(%ebp)
+	0x3A96C9ED:  movl %eax,-16(%ebp)
+	0x3A96C9F0:  movl %edx,(%esp,,)
+	0x3A96C9F3:  call 0x3A9752D0
+. 145 3A96C9E1 23
+. 81 C3 8B BC 00 00 89 55 EC 89 4D E8 89 45 F0 89 14 24 E8 D8 88 00 00 
+
+==== BB 146 (0x3A9752DF) in 2538B, out 14721B, BBs exec'd 0 ====
+	0x3A9752DF:  cmpb %dh{si},(%eax)
+	0x3A9752E1:  jz-32 0x3A975386
+. 146 3A9752DF 8
+. 38 30 0F 84 9F 00 00 00 
+
+==== BB 147 (0x3A975386) in 2546B, out 14781B, BBs exec'd 0 ====
+	0x3A975386:  subl 4(%esp,,),%eax
+	0x3A97538A:  ret
+. 147 3A975386 5
+. 2B 44 24 04 C3 
+
+==== BB 148 (0x3A96C9F8) in 2551B, out 14833B, BBs exec'd 0 ====
+	0x3A96C9F8:  leal 1(%eax), %edx
+	0x3A96C9FB:  movl $0x1,%ecx
+	0x3A96CA00:  addl $0x221, %eax
+	0x3A96CA05:  movl %edx,-28(%ebp)
+	0x3A96CA08:  movl %ecx,4(%esp,,)
+	0x3A96CA0C:  movl %eax,(%esp,,)
+	0x3A96CA0F:  call 0x3A96582C
+. 148 3A96C9F8 28
+. 8D 50 01 B9 01 00 00 00 05 21 02 00 00 89 55 E4 89 4C 24 04 89 04 24 E8 18 8E FF FF 
+
+==== BB 149 (0x3A96582C) in 2579B, out 14953B, BBs exec'd 0 ====
+	0x3A96582C:  jmp*l 0x14(%ebx)
+. 149 3A96582C 6
+. FF A3 14 00 00 00 
+
+==== BB 150 calloc(0x3A973870) in 2585B, out 14980B, BBs exec'd 0 ====
+	0x3A973870:  pushl %ebp
+	0x3A973871:  movl %esp,%ebp
+	0x3A973873:  pushl %ebx
+	0x3A973874:  subl $0x4, %esp
+	0x3A973877:  movl 12(%ebp),%eax
+	0x3A97387A:  movl 8(%ebp),%ecx
+	0x3A97387D:  call 0x3A97592B
+. 150 3A973870 18
+. 55 89 E5 53 83 EC 04 8B 45 0C 8B 4D 08 E8 A9 20 00 00 
+
+==== BB 151 (0x3A973882) in 2603B, out 15107B, BBs exec'd 0 ====
+	0x3A973882:  addl $0x4DEA, %ebx
+	0x3A973888:  imull %ecx, %eax
+	0x3A97388B:  movl %eax,(%esp,,)
+	0x3A97388E:  call 0x3A96581C
+. 151 3A973882 17
+. 81 C3 EA 4D 00 00 0F AF C1 89 04 24 E8 89 1F FF FF 
+
+==== BB 152 (0x3A96581C) in 2620B, out 15193B, BBs exec'd 0 ====
+	0x3A96581C:  jmp*l 0x10(%ebx)
+. 152 3A96581C 6
+. FF A3 10 00 00 00 
+
+==== BB 153 malloc(0x3A973800) in 2626B, out 15220B, BBs exec'd 0 ====
+	0x3A973800:  pushl %ebp
+	0x3A973801:  movl %esp,%ebp
+	0x3A973803:  pushl %ebx
+	0x3A973804:  subl $0x8, %esp
+	0x3A973807:  movl 8(%ebp),%eax
+	0x3A97380A:  call 0x3A97592B
+. 153 3A973800 15
+. 55 89 E5 53 83 EC 08 8B 45 08 E8 1C 21 00 00 
+
+==== BB 154 (0x3A97380F) in 2641B, out 15334B, BBs exec'd 0 ====
+	0x3A97380F:  addl $0x4E5D, %ebx
+	0x3A973815:  movl $0x8, (%esp,,)
+	0x3A97381C:  movl %eax,4(%esp,,)
+	0x3A973820:  call 0x3A96580C
+. 154 3A97380F 22
+. 81 C3 5D 4E 00 00 C7 04 24 08 00 00 00 89 44 24 04 E8 E7 1F FF FF 
+
+==== BB 155 (0x3A96580C) in 2663B, out 15424B, BBs exec'd 0 ====
+	0x3A96580C:  jmp*l 0xC(%ebx)
+. 155 3A96580C 6
+. FF A3 0C 00 00 00 
+
+==== BB 156 __libc_memalign(0x3A973700) in 2669B, out 15451B, BBs exec'd 0 ====
+	0x3A973700:  pushl %ebp
+	0x3A973701:  movl %esp,%ebp
+	0x3A973703:  subl $0x24, %esp
+	0x3A973706:  movl %ebx,-12(%ebp)
+	0x3A973709:  movl %esi,-8(%ebp)
+	0x3A97370C:  movl 8(%ebp),%esi
+	0x3A97370F:  movl %edi,-4(%ebp)
+	0x3A973712:  movl 12(%ebp),%edi
+	0x3A973715:  call 0x3A97592B
+. 156 3A973700 26
+. 55 89 E5 83 EC 24 89 5D F4 89 75 F8 8B 75 08 89 7D FC 8B 7D 0C E8 11 22 00 00 
+
+==== BB 157 (0x3A97371A) in 2695B, out 15601B, BBs exec'd 0 ====
+	0x3A97371A:  addl $0x4F52, %ebx
+	0x3A973720:  movl 0x13C(%ebx),%ecx
+	0x3A973726:  testl %ecx,%ecx
+	0x3A973728:  jnz-8 0x3A97374A
+. 157 3A97371A 16
+. 81 C3 52 4F 00 00 8B 8B 3C 01 00 00 85 C9 75 20 
+
+==== BB 158 (0x3A97372A) in 2711B, out 15689B, BBs exec'd 0 ====
+	0x3A97372A:  movl 0xFFFFFC18(%ebx),%eax
+	0x3A973730:  leal 0x228(%ebx), %edx
+	0x3A973736:  movl %edx,0x138(%ebx)
+	0x3A97373C:  leal -1(%edx,%eax,1), %ecx
+	0x3A973740:  negl %eax
+	0x3A973742:  andl %eax,%ecx
+	0x3A973744:  movl %ecx,0x13C(%ebx)
+	0x3A97374A:  movl 0x138(%ebx),%eax
+	0x3A973750:  addl %esi,%eax
+	0x3A973752:  leal -1(%eax), %edx
+	0x3A973755:  negl %esi
+	0x3A973757:  andl %esi,%edx
+	0x3A973759:  leal (%edi,%edx,1), %eax
+	0x3A97375C:  cmpl %ecx,%eax
+	0x3A97375E:  jnb-8 0x3A973780
+. 158 3A97372A 54
+. 8B 83 18 FC FF FF 8D 93 28 02 00 00 89 93 38 01 00 00 8D 4C 02 FF F7 D8 21 C1 89 8B 3C 01 00 00 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20 
+
+==== BB 159 (0x3A973760) in 2765B, out 15879B, BBs exec'd 0 ====
+	0x3A973760:  movl %edx,0x140(%ebx)
+	0x3A973766:  movl %edx,%eax
+	0x3A973768:  leal (%edi,%edx,1), %edx
+	0x3A97376B:  movl %edx,0x138(%ebx)
+	0x3A973771:  movl -12(%ebp),%ebx
+	0x3A973774:  movl -8(%ebp),%esi
+	0x3A973777:  movl -4(%ebp),%edi
+	0x3A97377A:  movl %ebp,%esp
+	0x3A97377C:  popl %ebp
+	0x3A97377D:  ret
+. 159 3A973760 30
+. 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 160 (0x3A973825) in 2795B, out 16027B, BBs exec'd 0 ====
+	0x3A973825:  addl $0x8, %esp
+	0x3A973828:  popl %ebx
+	0x3A973829:  popl %ebp
+	0x3A97382A:  ret
+. 160 3A973825 6
+. 83 C4 08 5B 5D C3 
+
+==== BB 161 (0x3A973893) in 2801B, out 16103B, BBs exec'd 0 ====
+	0x3A973893:  popl %edx
+	0x3A973894:  popl %ebx
+	0x3A973895:  popl %ebp
+	0x3A973896:  ret
+. 161 3A973893 4
+. 5A 5B 5D C3 
+
+==== BB 162 (0x3A96CA14) in 2805B, out 16181B, BBs exec'd 0 ====
+	0x3A96CA14:  movl %eax,-32(%ebp)
+	0x3A96CA17:  movl -32(%ebp),%edx
+	0x3A96CA1A:  xorl %eax, %eax
+	0x3A96CA1C:  testl %edx,%edx
+	0x3A96CA1E:  jz-32 0x3A96CC29
+. 162 3A96CA14 16
+. 89 45 E0 8B 55 E0 31 C0 85 D2 0F 84 05 02 00 00 
+
+==== BB 163 (0x3A96CA24) in 2821B, out 16280B, BBs exec'd 0 ====
+	0x3A96CA24:  movl -32(%ebp),%esi
+	0x3A96CA27:  movl -32(%ebp),%ecx
+	0x3A96CA2A:  movl -28(%ebp),%edx
+	0x3A96CA2D:  addl $0x214, %esi
+	0x3A96CA33:  movl %ecx,%eax
+	0x3A96CA35:  addl $0x220, %eax
+	0x3A96CA3A:  movl %esi,20(%ecx)
+	0x3A96CA3D:  movl %edx,8(%esp,,)
+	0x3A96CA41:  movl -20(%ebp),%ecx
+	0x3A96CA44:  movl %eax,(%esp,,)
+	0x3A96CA47:  movl %ecx,4(%esp,,)
+	0x3A96CA4B:  call 0x3A975870
+. 163 3A96CA24 44
+. 8B 75 E0 8B 4D E0 8B 55 E4 81 C6 14 02 00 00 89 C8 05 20 02 00 00 89 71 14 89 54 24 08 8B 4D EC 89 04 24 89 4C 24 04 E8 20 8E 00 00 
+
+==== BB 164 memcpy(0x3A975870) in 2865B, out 16451B, BBs exec'd 0 ====
+	0x3A975870:  pushl %edi
+	0x3A975871:  pushl %esi
+	0x3A975872:  movl 12(%esp,,),%edi
+	0x3A975876:  movl 16(%esp,,),%esi
+	0x3A97587A:  movl 20(%esp,,),%ecx
+	0x3A97587E:  movl %edi,%eax
+	0x3A975880:  cld
+	0x3A975881:  cmpl $0x20, %ecx
+	0x3A975884:  jbe-8 0x3A9758DC
+. 164 3A975870 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 56 
+
+==== BB 165 (0x3A9758DC) in 2887B, out 16600B, BBs exec'd 0 ====
+	0x3A9758DC:  rep movsb
+. 165 3A9758DC 2
+. F3 A4 
+
+==== BB 166 (0x3A9758DE) in 2889B, out 16695B, BBs exec'd 0 ====
+	0x3A9758DE:  popl %esi
+	0x3A9758DF:  popl %edi
+	0x3A9758E0:  ret
+. 166 3A9758DE 3
+. 5E 5F C3 
+
+==== BB 167 (0x3A96CA50) in 2892B, out 16757B, BBs exec'd 0 ====
+	0x3A96CA50:  movl $0x1, 8(%esi)
+	0x3A96CA57:  movl -32(%ebp),%edx
+	0x3A96CA5A:  movl $0x4,%esi
+	0x3A96CA5F:  movl -16(%ebp),%ecx
+	0x3A96CA62:  movl %eax,0x214(%edx)
+	0x3A96CA68:  movl %ecx,4(%edx)
+	0x3A96CA6B:  movl -32(%ebp),%ecx
+	0x3A96CA6E:  movzbl -24(%ebp),%edx
+	0x3A96CA72:  movzbl 0x174(%ecx),%eax
+	0x3A96CA79:  movl %edi,0x160(%ecx)
+	0x3A96CA7F:  andb $0x3, %dl
+	0x3A96CA82:  movl %esi,0x1AC(%ecx)
+	0x3A96CA88:  andb $0xFC, %al
+	0x3A96CA8A:  orb %dl,%al
+	0x3A96CA8C:  movb %al,0x174(%ecx)
+	0x3A96CA92:  movl %ecx,%edx
+	0x3A96CA94:  movl 0xFFFFF994(%ebx),%eax
+	0x3A96CA9A:  addl $0x19C, %edx
+	0x3A96CAA0:  movl %edx,0x1B0(%ecx)
+	0x3A96CAA6:  xorl %ecx, %ecx
+	0x3A96CAA8:  testl %eax,%eax
+	0x3A96CAAA:  jz-32 0x3A96CC33
+. 167 3A96CA50 96
+. C7 46 08 01 00 00 00 8B 55 E0 BE 04 00 00 00 8B 4D F0 89 82 14 02 00 00 89 4A 04 8B 4D E0 0F B6 55 E8 0F B6 81 74 01 00 00 89 B9 60 01 00 00 80 E2 03 89 B1 AC 01 00 00 24 FC 08 D0 88 81 74 01 00 00 89 CA 8B 83 94 F9 FF FF 81 C2 9C 01 00 00 89 91 B0 01 00 00 31 C9 85 C0 0F 84 83 01 00 00 
+
+==== BB 168 (0x3A96CC33) in 2988B, out 17098B, BBs exec'd 0 ====
+	0x3A96CC33:  movl -32(%ebp),%eax
+	0x3A96CC36:  movl %eax,0xFFFFF994(%ebx)
+	0x3A96CC3C:  incl 0xFFFFF998(%ebx)
+	0x3A96CC42:  addl $0x1, 0xFFFFF9C4(%ebx)
+	0x3A96CC49:  adcl $0x0, 0xFFFFF9C8(%ebx)
+	0x3A96CC50:  testl %edi,%edi
+	0x3A96CC52:  jnz-32 0x3A96CB07
+. 168 3A96CC33 37
+. 8B 45 E0 89 83 94 F9 FF FF FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 85 AF FE FF FF 
+
+==== BB 169 (0x3A96CC58) in 3025B, out 17244B, BBs exec'd 0 ====
+	0x3A96CC58:  movl -32(%ebp),%edi
+	0x3A96CC5B:  jmp 0x3A96CB2C
+. 169 3A96CC58 8
+. 8B 7D E0 E9 CC FE FF FF 
+
+==== BB 170 (0x3A96CB2C) in 3033B, out 17288B, BBs exec'd 0 ====
+	0x3A96CB2C:  testl %ecx,%ecx
+	0x3A96CB2E:  leal 0x150(%edi), %eax
+	0x3A96CB34:  jz-8 0x3A96CB3A
+. 170 3A96CB2C 10
+. 85 C9 8D 87 50 01 00 00 74 04 
+
+==== BB 171 (0x3A96CB3A) in 3043B, out 17363B, BBs exec'd 0 ====
+	0x3A96CB3A:  movl %eax,(%edx,%ecx,4)
+	0x3A96CB3D:  movl -32(%ebp),%eax
+	0x3A96CB40:  movl -32(%ebp),%edx
+	0x3A96CB43:  movl -16(%ebp),%ecx
+	0x3A96CB46:  addl $0x150, %eax
+	0x3A96CB4B:  movl %eax,0x1B4(%edx)
+	0x3A96CB51:  movzbl (%ecx),%ecx
+	0x3A96CB54:  testb %cl,%cl
+	0x3A96CB56:  movb %cl,-33(%ebp)
+	0x3A96CB59:  jz-32 0x3A96CC26
+. 171 3A96CB3A 37
+. 89 04 8A 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00 
+
+==== BB 172 (0x3A96CC26) in 3080B, out 17529B, BBs exec'd 0 ====
+	0x3A96CC26:  movl -32(%ebp),%eax
+	0x3A96CC29:  addl $0x2C, %esp
+	0x3A96CC2C:  popl %ebx
+	0x3A96CC2D:  popl %esi
+	0x3A96CC2E:  popl %edi
+	0x3A96CC2F:  popl %ebp
+	0x3A96CC30:  ret 4
+. 172 3A96CC26 13
+. 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00 
+
+==== BB 173 (0x3A9665B2) in 3093B, out 17651B, BBs exec'd 0 ====
+	0x3A9665B2:  movl 0xFFFFF994(%ebx),%ecx
+	0x3A9665B8:  subl $0x4, %esp
+	0x3A9665BB:  testl %ecx,%ecx
+	0x3A9665BD:  jz-32 0x3A9679C8
+. 173 3A9665B2 17
+. 8B 8B 94 F9 FF FF 83 EC 04 85 C9 0F 84 05 14 00 00 
+
+==== BB 174 (0x3A9665C3) in 3110B, out 17739B, BBs exec'd 0 ====
+	0x3A9665C3:  movl 8(%ebp),%esi
+	0x3A9665C6:  movl 12(%ebp),%eax
+	0x3A9665C9:  movl %esi,0x144(%ecx)
+	0x3A9665CF:  movl 16(%ebp),%esi
+	0x3A9665D2:  movw %ax,0x14C(%ecx)
+	0x3A9665D9:  movl (%esi),%eax
+	0x3A9665DB:  movl %eax,0x148(%ecx)
+	0x3A9665E1:  incl 0x170(%ecx)
+	0x3A9665E7:  xorl %eax, %eax
+	0x3A9665E9:  movl 8(%ebp),%esi
+	0x3A9665EC:  movl %eax,0x198(%ecx)
+	0x3A9665F2:  movl $0xFFFFFFFF,%eax
+	0x3A9665F7:  movl %eax,0x194(%ecx)
+	0x3A9665FD:  shll $0x5, 12(%ebp)
+	0x3A966601:  movl 12(%ebp),%edx
+	0x3A966604:  leal (%edx,%esi,1), %eax
+	0x3A966607:  cmpl %eax,%esi
+	0x3A966609:  jnb-32 0x3A9666B9
+. 174 3A9665C3 76
+. 8B 75 08 8B 45 0C 89 B1 44 01 00 00 8B 75 10 66 89 81 4C 01 00 00 8B 06 89 81 48 01 00 00 FF 81 70 01 00 00 31 C0 8B 75 08 89 81 98 01 00 00 B8 FF FF FF FF 89 81 94 01 00 00 C1 65 0C 05 8B 55 0C 8D 04 32 39 C6 0F 83 AA 00 00 00 
+
+==== BB 175 (0x3A96660F) in 3186B, out 18001B, BBs exec'd 0 ====
+	0x3A96660F:  movl %eax,0xFFFFFEE4(%ebp)
+	0x3A966615:  jmp-8 0x3A96663A
+. 175 3A96660F 8
+. 89 85 E4 FE FF FF EB 23 
+
+==== BB 176 (0x3A96663A) in 3194B, out 18048B, BBs exec'd 0 ====
+	0x3A96663A:  movl (%esi),%eax
+	0x3A96663C:  cmpl $0x3, %eax
+	0x3A96663F:  jz-8 0x3A966670
+. 176 3A96663A 7
+. 8B 06 83 F8 03 74 2F 
+
+==== BB 177 (0x3A966641) in 3201B, out 18115B, BBs exec'd 0 ====
+	0x3A966641:  cmpl $0x3, %eax
+	0x3A966644:  jbe-8 0x3A966617
+. 177 3A966641 5
+. 83 F8 03 76 D1 
+
+==== BB 178 (0x3A966646) in 3206B, out 18172B, BBs exec'd 0 ====
+	0x3A966646:  cmpl $0x6474E551, %eax
+	0x3A96664B:  jz-32 0x3A966CC0
+. 178 3A966646 11
+. 3D 51 E5 74 64 0F 84 6F 06 00 00 
+
+==== BB 179 (0x3A966651) in 3217B, out 18232B, BBs exec'd 0 ====
+	0x3A966651:  cmpl $0x6474E551, %eax
+	0x3A966656:  jnbe-32 0x3A966C9B
+. 179 3A966651 11
+. 3D 51 E5 74 64 0F 87 3F 06 00 00 
+
+==== BB 180 (0x3A96665C) in 3228B, out 18292B, BBs exec'd 0 ====
+	0x3A96665C:  cmpl $0x6, %eax
+	0x3A96665F:  nop
+	0x3A966660:  jnz-8 0x3A96662F
+. 180 3A96665C 6
+. 83 F8 06 90 75 CD 
+
+==== BB 181 (0x3A966662) in 3234B, out 18353B, BBs exec'd 0 ====
+	0x3A966662:  movl 8(%esi),%edi
+	0x3A966665:  movl 8(%ebp),%eax
+	0x3A966668:  subl %edi,%eax
+	0x3A96666A:  movl %eax,(%ecx)
+	0x3A96666C:  jmp-8 0x3A96662F
+. 181 3A966662 12
+. 8B 7E 08 8B 45 08 29 F8 89 01 EB C1 
+
+==== BB 182 (0x3A96662F) in 3246B, out 18433B, BBs exec'd 0 ====
+	0x3A96662F:  addl $0x20, %esi
+	0x3A966632:  cmpl 0xFFFFFEE4(%ebp),%esi
+	0x3A966638:  jnb-8 0x3A9666B9
+. 182 3A96662F 11
+. 83 C6 20 3B B5 E4 FE FF FF 73 7F 
+
+==== BB 183 (0x3A966670) in 3257B, out 18511B, BBs exec'd 0 ====
+	0x3A966670:  movl (%ecx),%eax
+	0x3A966672:  movl 8(%esi),%edx
+	0x3A966675:  addl %eax,%edx
+	0x3A966677:  movl %edx,0x94(%ebx)
+	0x3A96667D:  leal 0x94(%ebx), %eax
+	0x3A966683:  movl %eax,0xFFFFF9F8(%ebx)
+	0x3A966689:  movl 0xFFFFF9EC(%ebx),%eax
+	0x3A96668F:  testl %eax,%eax
+	0x3A966691:  jnz-32 0x3A967586
+. 183 3A966670 39
+. 8B 01 8B 56 08 01 C2 89 93 94 00 00 00 8D 83 94 00 00 00 89 83 F8 F9 FF FF 8B 83 EC F9 FF FF 85 C0 0F 85 EF 0E 00 00 
+
+==== BB 184 (0x3A967586) in 3296B, out 18654B, BBs exec'd 0 ====
+	0x3A967586:  movb $0x1, 0xFFFFFF0B(%ebp)
+	0x3A96758D:  jmp 0x3A96662F
+. 184 3A967586 12
+. C6 85 0B FF FF FF 01 E9 9D F0 FF FF 
+
+==== BB 185 (0x3A966617) in 3308B, out 18702B, BBs exec'd 0 ====
+	0x3A966617:  cmpl $0x1, %eax
+	0x3A96661A:  jz-32 0x3A966B96
+. 185 3A966617 9
+. 83 F8 01 0F 84 76 05 00 00 
+
+==== BB 186 (0x3A966B96) in 3317B, out 18759B, BBs exec'd 0 ====
+	0x3A966B96:  movl 28(%esi),%eax
+	0x3A966B99:  movl 8(%esi),%edx
+	0x3A966B9C:  movl (%ecx),%edi
+	0x3A966B9E:  decl %eax
+	0x3A966B9F:  notl %eax
+	0x3A966BA1:  andl %edx,%eax
+	0x3A966BA3:  leal (%eax,%edi,1), %eax
+	0x3A966BA6:  cmpl %eax,0x194(%ecx)
+	0x3A966BAC:  jbe-8 0x3A966BB7
+. 186 3A966B96 24
+. 8B 46 1C 8B 56 08 8B 39 48 F7 D0 21 D0 8D 04 38 39 81 94 01 00 00 76 09 
+
+==== BB 187 (0x3A966BAE) in 3341B, out 18888B, BBs exec'd 0 ====
+	0x3A966BAE:  movl %eax,0x194(%ecx)
+	0x3A966BB4:  movl 8(%esi),%edx
+	0x3A966BB7:  leal (%edx,%edi,1), %eax
+	0x3A966BBA:  movl 20(%esi),%edi
+	0x3A966BBD:  addl %edi,%eax
+	0x3A966BBF:  cmpl %eax,0x198(%ecx)
+	0x3A966BC5:  jnb-32 0x3A96662F
+. 187 3A966BAE 29
+. 89 81 94 01 00 00 8B 56 08 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF 
+
+==== BB 188 (0x3A966BCB) in 3370B, out 19018B, BBs exec'd 0 ====
+	0x3A966BCB:  movl %eax,0x198(%ecx)
+	0x3A966BD1:  jmp 0x3A96662F
+. 188 3A966BCB 11
+. 89 81 98 01 00 00 E9 59 FA FF FF 
+
+==== BB 189 (0x3A966BB7) in 3381B, out 19065B, BBs exec'd 0 ====
+	0x3A966BB7:  leal (%edx,%edi,1), %eax
+	0x3A966BBA:  movl 20(%esi),%edi
+	0x3A966BBD:  addl %edi,%eax
+	0x3A966BBF:  cmpl %eax,0x198(%ecx)
+	0x3A966BC5:  jnb-32 0x3A96662F
+. 189 3A966BB7 20
+. 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF 
+
+==== BB 190 (0x3A966620) in 3401B, out 19169B, BBs exec'd 0 ====
+	0x3A966620:  cmpl $0x2, %eax
+	0x3A966623:  jnz-8 0x3A96662F
+. 190 3A966620 5
+. 83 F8 02 75 0A 
+
+==== BB 191 (0x3A966625) in 3406B, out 19226B, BBs exec'd 0 ====
+	0x3A966625:  movl 8(%esi),%eax
+	0x3A966628:  movl (%ecx),%edx
+	0x3A96662A:  addl %edx,%eax
+	0x3A96662C:  movl %eax,8(%ecx)
+	0x3A96662F:  addl $0x20, %esi
+	0x3A966632:  cmpl 0xFFFFFEE4(%ebp),%esi
+	0x3A966638:  jnb-8 0x3A9666B9
+. 191 3A966625 21
+. 8B 46 08 8B 11 01 D0 89 41 08 83 C6 20 3B B5 E4 FE FF FF 73 7F 
+
+==== BB 192 (0x3A966CC0) in 3427B, out 19346B, BBs exec'd 0 ====
+	0x3A966CC0:  movl 24(%esi),%eax
+	0x3A966CC3:  movl %eax,0xFFFFFC00(%ebx)
+	0x3A966CC9:  jmp 0x3A96662F
+. 192 3A966CC0 14
+. 8B 46 18 89 83 00 FC FF FF E9 61 F9 FF FF 
+
+==== BB 193 (0x3A9666B9) in 3441B, out 19406B, BBs exec'd 0 ====
+	0x3A9666B9:  movl 0x198(%ecx),%edx
+	0x3A9666BF:  testl %edx,%edx
+	0x3A9666C1:  jnz-8 0x3A9666CE
+. 193 3A9666B9 10
+. 8B 91 98 01 00 00 85 D2 75 0B 
+
+==== BB 194 (0x3A9666CE) in 3451B, out 19481B, BBs exec'd 0 ====
+	0x3A9666CE:  movl 0xFFFFF9F8(%ebx),%eax
+	0x3A9666D4:  testl %eax,%eax
+	0x3A9666D6:  jnz-8 0x3A9666F4
+. 194 3A9666CE 10
+. 8B 83 F8 F9 FF FF 85 C0 75 1C 
+
+==== BB 195 (0x3A9666F4) in 3461B, out 19556B, BBs exec'd 0 ====
+	0x3A9666F4:  cmpb $0x0, 0xFFFFFF02(%ebp)
+	0x3A9666FB:  jnz-32 0x3A966837
+. 195 3A9666F4 13
+. 80 BD 02 FF FF FF 00 0F 85 36 01 00 00 
+
+==== BB 196 (0x3A966701) in 3474B, out 19622B, BBs exec'd 0 ====
+	0x3A966701:  movl %ecx,0xFFFFFEE0(%ebp)
+	0x3A966707:  movl 8(%ecx),%esi
+	0x3A96670A:  testl %esi,%esi
+	0x3A96670C:  movl %esi,0xFFFFFEDC(%ebp)
+	0x3A966712:  jz-32 0x3A96682C
+. 196 3A966701 23
+. 89 8D E0 FE FF FF 8B 71 08 85 F6 89 B5 DC FE FF FF 0F 84 14 01 00 00 
+
+==== BB 197 (0x3A966718) in 3497B, out 19723B, BBs exec'd 0 ====
+	0x3A966718:  movl (%esi),%esi
+	0x3A96671A:  leal 24(%ecx), %edi
+	0x3A96671D:  testl %esi,%esi
+	0x3A96671F:  jnz-8 0x3A966741
+. 197 3A966718 9
+. 8B 36 8D 79 18 85 F6 75 20 
+
+==== BB 198 (0x3A966741) in 3506B, out 19805B, BBs exec'd 0 ====
+	0x3A966741:  cmpl $0x21, %esi
+	0x3A966744:  jle-8 0x3A966723
+. 198 3A966741 5
+. 83 FE 21 7E DD 
+
+==== BB 199 (0x3A966723) in 3511B, out 19862B, BBs exec'd 0 ====
+	0x3A966723:  movl 0xFFFFFEDC(%ebp),%edx
+	0x3A966729:  movl %edx,(%edi,%esi,4)
+	0x3A96672C:  addl $0x8, 0xFFFFFEDC(%ebp)
+	0x3A966733:  movl 0xFFFFFEDC(%ebp),%esi
+	0x3A966739:  movl (%esi),%eax
+	0x3A96673B:  testl %eax,%eax
+	0x3A96673D:  movl %eax,%esi
+	0x3A96673F:  jz-8 0x3A966768
+. 199 3A966723 30
+. 8B 95 DC FE FF FF 89 14 B7 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27 
+
+==== BB 200 (0x3A966746) in 3541B, out 20003B, BBs exec'd 0 ====
+	0x3A966746:  movl $0x6FFFFFFF,%eax
+	0x3A96674B:  subl %esi,%eax
+	0x3A96674D:  cmpl $0xF, %eax
+	0x3A966750:  jnbe-32 0x3A966BD6
+. 200 3A966746 16
+. B8 FF FF FF 6F 29 F0 83 F8 0F 0F 87 80 04 00 00 
+
+==== BB 201 (0x3A966756) in 3557B, out 20076B, BBs exec'd 0 ====
+	0x3A966756:  movl $0x70000021,%eax
+	0x3A96675B:  subl %esi,%eax
+	0x3A96675D:  movl 0xFFFFFEDC(%ebp),%esi
+	0x3A966763:  movl %esi,(%edi,%eax,4)
+	0x3A966766:  jmp-8 0x3A96672C
+. 201 3A966756 18
+. B8 21 00 00 70 29 F0 8B B5 DC FE FF FF 89 34 87 EB C4 
+
+==== BB 202 (0x3A96672C) in 3575B, out 20160B, BBs exec'd 0 ====
+	0x3A96672C:  addl $0x8, 0xFFFFFEDC(%ebp)
+	0x3A966733:  movl 0xFFFFFEDC(%ebp),%esi
+	0x3A966739:  movl (%esi),%eax
+	0x3A96673B:  testl %eax,%eax
+	0x3A96673D:  movl %eax,%esi
+	0x3A96673F:  jz-8 0x3A966768
+. 202 3A96672C 21
+. 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27 
+
+==== BB 203 (0x3A966768) in 3596B, out 20268B, BBs exec'd 0 ====
+	0x3A966768:  movl (%ecx),%edx
+	0x3A96676A:  testl %edx,%edx
+	0x3A96676C:  jz-8 0x3A9667C7
+. 203 3A966768 6
+. 8B 11 85 D2 74 59 
+
+==== BB 204 (0x3A9667C7) in 3602B, out 20337B, BBs exec'd 0 ====
+	0x3A9667C7:  movl 120(%edi),%edx
+	0x3A9667CA:  testl %edx,%edx
+	0x3A9667CC:  jz-8 0x3A9667FE
+. 204 3A9667C7 7
+. 8B 57 78 85 D2 74 30 
+
+==== BB 205 (0x3A9667FE) in 3609B, out 20409B, BBs exec'd 0 ====
+	0x3A9667FE:  movl 0x98(%edi),%edx
+	0x3A966804:  testl %edx,%edx
+	0x3A966806:  jz-8 0x3A96681E
+. 205 3A9667FE 10
+. 8B 97 98 00 00 00 85 D2 74 16 
+
+==== BB 206 (0x3A96681E) in 3619B, out 20487B, BBs exec'd 0 ====
+	0x3A96681E:  movl 116(%edi),%eax
+	0x3A966821:  testl %eax,%eax
+	0x3A966823:  jz-8 0x3A96682C
+. 206 3A96681E 7
+. 8B 47 74 85 C0 74 07 
+
+==== BB 207 (0x3A96682C) in 3626B, out 20559B, BBs exec'd 0 ====
+	0x3A96682C:  movl 40(%ecx),%eax
+	0x3A96682F:  testl %eax,%eax
+	0x3A966831:  jnz-32 0x3A966D00
+. 207 3A96682C 11
+. 8B 41 28 85 C0 0F 85 C9 04 00 00 
+
+==== BB 208 (0x3A966D00) in 3637B, out 20631B, BBs exec'd 0 ====
+	0x3A966D00:  movl %ecx,%eax
+	0x3A966D02:  call 0x3A96BD80
+. 208 3A966D00 7
+. 89 C8 E8 79 50 00 00 
+
+==== BB 209 (0x3A966D07) in 3644B, out 20686B, BBs exec'd 0 ====
+	0x3A966D07:  jmp 0x3A966837
+. 209 3A966D07 5
+. E9 2B FB FF FF 
+
+==== BB 210 (0x3A966837) in 3649B, out 20714B, BBs exec'd 0 ====
+	0x3A966837:  cmpl $0x2, 0xFFFFFF50(%ebp)
+	0x3A96683E:  jz-32 0x3A967D63
+. 210 3A966837 13
+. 83 BD 50 FF FF FF 02 0F 84 1F 15 00 00 
+
+==== BB 211 (0x3A966844) in 3662B, out 20780B, BBs exec'd 0 ====
+	0x3A966844:  cmpb $0x0, 0xFFFFFF02(%ebp)
+	0x3A96684B:  jz-32 0x3A966CF0
+. 211 3A966844 13
+. 80 BD 02 FF FF FF 00 0F 84 9F 04 00 00 
+
+==== BB 212 (0x3A966CF0) in 3675B, out 20846B, BBs exec'd 0 ====
+	0x3A966CF0:  movl 0xAC(%ebx),%eax
+	0x3A966CF6:  call 0x3A96AB30
+. 212 3A966CF0 11
+. 8B 83 AC 00 00 00 E8 35 3E 00 00 
+
+==== BB 213 _dl_init_paths(0x3A96AB30) in 3686B, out 20910B, BBs exec'd 0 ====
+	0x3A96AB30:  pushl %ebp
+	0x3A96AB31:  movl %esp,%ebp
+	0x3A96AB33:  pushl %edi
+	0x3A96AB34:  pushl %esi
+	0x3A96AB35:  pushl %ebx
+	0x3A96AB36:  subl $0x2C, %esp
+	0x3A96AB39:  call 0x3A97592B
+. 213 3A96AB30 14
+. 55 89 E5 57 56 53 83 EC 2C E8 ED AD 00 00 
+
+==== BB 214 (0x3A96AB3E) in 3700B, out 21043B, BBs exec'd 0 ====
+	0x3A96AB3E:  addl $0xDB2E, %ebx
+	0x3A96AB44:  movl %eax,-16(%ebp)
+	0x3A96AB47:  movl 0xFFFFFC24(%ebx),%edx
+	0x3A96AB4D:  leal 0xCC(%ebx), %ecx
+	0x3A96AB53:  movl 0xFFFFFC20(%ebx),%eax
+	0x3A96AB59:  movl %ecx,(%esp,,)
+	0x3A96AB5C:  leal 0xC8(%ebx), %ecx
+	0x3A96AB62:  call 0x3A972740
+. 214 3A96AB3E 41
+. 81 C3 2E DB 00 00 89 45 F0 8B 93 24 FC FF FF 8D 8B CC 00 00 00 8B 83 20 FC FF FF 89 0C 24 8D 8B C8 00 00 00 E8 D9 7B 00 00 
+
+==== BB 215 _dl_important_hwcaps(0x3A972740) in 3741B, out 21186B, BBs exec'd 0 ====
+	0x3A972740:  pushl %ebp
+	0x3A972741:  movl %esp,%ebp
+	0x3A972743:  pushl %edi
+	0x3A972744:  pushl %esi
+	0x3A972745:  pushl %ebx
+	0x3A972746:  subl $0x38, %esp
+	0x3A972749:  call 0x3A97592B
+. 215 3A972740 14
+. 55 89 E5 57 56 53 83 EC 38 E8 DD 31 00 00 
+
+==== BB 216 (0x3A97274E) in 3755B, out 21319B, BBs exec'd 0 ====
+	0x3A97274E:  addl $0x5F1E, %ebx
+	0x3A972754:  movl %edx,-20(%ebp)
+	0x3A972757:  testl %eax,%eax
+	0x3A972759:  movl 0xFFFFFC58(%ebx),%edx
+	0x3A97275F:  movl %eax,-16(%ebp)
+	0x3A972762:  setnz %al
+	0x3A972765:  xorl %edi, %edi
+	0x3A972767:  andl 0xFFFFFC54(%ebx),%edx
+	0x3A97276D:  movzbl %al,%eax
+	0x3A972770:  movl %ecx,-24(%ebp)
+	0x3A972773:  movl %eax,-32(%ebp)
+	0x3A972776:  movl %edx,-28(%ebp)
+	0x3A972779:  jz-8 0x3A9727A0
+. 216 3A97274E 45
+. 81 C3 1E 5F 00 00 89 55 EC 85 C0 8B 93 58 FC FF FF 89 45 F0 0F 95 C0 31 FF 23 93 54 FC FF FF 0F B6 C0 89 4D E8 89 45 E0 89 55 E4 74 25 
+
+==== BB 217 (0x3A9727A0) in 3800B, out 21523B, BBs exec'd 0 ====
+	0x3A9727A0:  movl -32(%ebp),%eax
+	0x3A9727A3:  testl %eax,%eax
+	0x3A9727A5:  jz-32 0x3A9728CB
+. 217 3A9727A0 11
+. 8B 45 E0 85 C0 0F 84 20 01 00 00 
+
+==== BB 218 (0x3A9727AB) in 3811B, out 21595B, BBs exec'd 0 ====
+	0x3A9727AB:  movl -32(%ebp),%edx
+	0x3A9727AE:  xorl %esi, %esi
+	0x3A9727B0:  xorl %edi, %edi
+	0x3A9727B2:  leal 0x10(,%edx,8), %eax
+	0x3A9727B9:  subl %eax,%esp
+	0x3A9727BB:  movl -28(%ebp),%eax
+	0x3A9727BE:  leal 27(%esp,,), %ecx
+	0x3A9727C2:  andl $0xFFFFFFF0, %ecx
+	0x3A9727C5:  testl %eax,%eax
+	0x3A9727C7:  movl %ecx,-40(%ebp)
+	0x3A9727CA:  jz-8 0x3A97280C
+. 218 3A9727AB 33
+. 8B 55 E0 31 F6 31 FF 8D 04 D5 10 00 00 00 29 C4 8B 45 E4 8D 4C 24 1B 83 E1 F0 85 C0 89 4D D8 74 40 
+
+==== BB 219 (0x3A97280C) in 3844B, out 21764B, BBs exec'd 0 ====
+	0x3A97280C:  movl -16(%ebp),%edi
+	0x3A97280F:  testl %edi,%edi
+	0x3A972811:  jz-8 0x3A972823
+. 219 3A97280C 7
+. 8B 7D F0 85 FF 74 10 
+
+==== BB 220 (0x3A972813) in 3851B, out 21836B, BBs exec'd 0 ====
+	0x3A972813:  movl -40(%ebp),%eax
+	0x3A972816:  movl -16(%ebp),%edx
+	0x3A972819:  movl -20(%ebp),%ecx
+	0x3A97281C:  movl %edx,(%eax,%esi,8)
+	0x3A97281F:  movl %ecx,4(%eax,%esi,8)
+	0x3A972823:  cmpl $0x1, -32(%ebp)
+	0x3A972827:  jz-32 0x3A9728FD
+. 220 3A972813 26
+. 8B 45 D8 8B 55 F0 8B 4D EC 89 14 F0 89 4C F0 04 83 7D E0 01 0F 84 D0 00 00 00 
+
+==== BB 221 (0x3A9728FD) in 3877B, out 21963B, BBs exec'd 0 ====
+	0x3A9728FD:  movl -40(%ebp),%esi
+	0x3A972900:  movl 4(%esi),%edx
+	0x3A972903:  incl %edx
+	0x3A972904:  movzbl -32(%ebp),%ecx
+	0x3A972908:  movl $0x1,%esi
+	0x3A97290D:  movl %esi,%eax
+	0x3A97290F:  shll %cl, %eax
+	0x3A972911:  movl -24(%ebp),%ecx
+	0x3A972914:  movl %eax,(%ecx)
+	0x3A972916:  leal (%edx,%eax,8), %eax
+	0x3A972919:  movl %eax,(%esp,,)
+	0x3A97291C:  call 0x3A96581C
+. 221 3A9728FD 36
+. 8B 75 D8 8B 56 04 42 0F B6 4D E0 BE 01 00 00 00 89 F0 D3 E0 8B 4D E8 89 01 8D 04 C2 89 04 24 E8 FB 2E FF FF 
+
+==== BB 222 (0x3A97374A) in 3913B, out 22127B, BBs exec'd 0 ====
+	0x3A97374A:  movl 0x138(%ebx),%eax
+	0x3A973750:  addl %esi,%eax
+	0x3A973752:  leal -1(%eax), %edx
+	0x3A973755:  negl %esi
+	0x3A973757:  andl %esi,%edx
+	0x3A973759:  leal (%edi,%edx,1), %eax
+	0x3A97375C:  cmpl %ecx,%eax
+	0x3A97375E:  jnb-8 0x3A973780
+. 222 3A97374A 22
+. 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20 
+
+==== BB 223 (0x3A972921) in 3935B, out 22246B, BBs exec'd 0 ====
+	0x3A972921:  movl %eax,-36(%ebp)
+	0x3A972924:  testl %eax,%eax
+	0x3A972926:  jz-8 0x3A9728B4
+. 223 3A972921 7
+. 89 45 DC 85 C0 74 8C 
+
+==== BB 224 (0x3A972928) in 3942B, out 22318B, BBs exec'd 0 ====
+	0x3A972928:  cmpl $0x1, -32(%ebp)
+	0x3A97292C:  jz-32 0x3A972A0B
+. 224 3A972928 10
+. 83 7D E0 01 0F 84 D9 00 00 00 
+
+==== BB 225 (0x3A972A0B) in 3952B, out 22381B, BBs exec'd 0 ====
+	0x3A972A0B:  movl -24(%ebp),%esi
+	0x3A972A0E:  movl -36(%ebp),%edx
+	0x3A972A11:  movl -40(%ebp),%ecx
+	0x3A972A14:  movl (%esi),%eax
+	0x3A972A16:  leal (%edx,%eax,8), %eax
+	0x3A972A19:  movl %eax,(%edx)
+	0x3A972A1B:  movl 4(%ecx),%eax
+	0x3A972A1E:  incl %eax
+	0x3A972A1F:  movl %eax,4(%edx)
+	0x3A972A22:  movl (%esi),%eax
+	0x3A972A24:  movl $0x0, 12(%edx)
+	0x3A972A2B:  leal (%edx,%eax,8), %eax
+	0x3A972A2E:  movl %eax,8(%edx)
+	0x3A972A31:  movl 4(%ecx),%eax
+	0x3A972A34:  movl %eax,8(%esp,,)
+	0x3A972A38:  movl (%ecx),%eax
+	0x3A972A3A:  movl %eax,4(%esp,,)
+	0x3A972A3E:  movl (%esi),%eax
+	0x3A972A40:  leal (%edx,%eax,8), %eax
+	0x3A972A43:  movl %eax,(%esp,,)
+	0x3A972A46:  call 0x3A975770
+. 225 3A972A0B 64
+. 8B 75 E8 8B 55 DC 8B 4D D8 8B 06 8D 04 C2 89 02 8B 41 04 40 89 42 04 8B 06 C7 42 0C 00 00 00 00 8D 04 C2 89 42 08 8B 41 04 89 44 24 08 8B 01 89 44 24 04 8B 06 8D 04 C2 89 04 24 E8 25 2D 00 00 
+
+==== BB 226 mempcpy(0x3A975770) in 4016B, out 22625B, BBs exec'd 0 ====
+	0x3A975770:  pushl %edi
+	0x3A975771:  pushl %esi
+	0x3A975772:  movl 12(%esp,,),%edi
+	0x3A975776:  movl 16(%esp,,),%esi
+	0x3A97577A:  movl 20(%esp,,),%ecx
+	0x3A97577E:  movl %edi,%eax
+	0x3A975780:  cld
+	0x3A975781:  cmpl $0x20, %ecx
+	0x3A975784:  jbe-8 0x3A9757D8
+. 226 3A975770 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 52 
+
+==== BB 227 (0x3A9757D8) in 4038B, out 22774B, BBs exec'd 0 ====
+	0x3A9757D8:  rep movsb
+. 227 3A9757D8 2
+. F3 A4 
+
+==== BB 228 (0x3A9757DA) in 4040B, out 22869B, BBs exec'd 0 ====
+	0x3A9757DA:  movl %edi,%eax
+	0x3A9757DC:  popl %esi
+	0x3A9757DD:  popl %edi
+	0x3A9757DE:  ret
+. 228 3A9757DA 5
+. 89 F8 5E 5F C3 
+
+==== BB 229 (0x3A972A4B) in 4045B, out 22941B, BBs exec'd 0 ====
+	0x3A972A4B:  movb $0x2F, (%eax)
+	0x3A972A4E:  movl 8(%ebp),%edx
+	0x3A972A51:  movl $0x2, (%esi)
+	0x3A972A57:  movl -36(%ebp),%esi
+	0x3A972A5A:  movl 4(%esi),%eax
+	0x3A972A5D:  movl %eax,(%edx)
+	0x3A972A5F:  jmp 0x3A9728F0
+. 229 3A972A4B 25
+. C6 00 2F 8B 55 08 C7 06 02 00 00 00 8B 75 DC 8B 46 04 89 02 E9 8C FE FF FF 
+
+==== BB 230 (0x3A9728F0) in 4070B, out 23047B, BBs exec'd 0 ====
+	0x3A9728F0:  movl -36(%ebp),%eax
+	0x3A9728F3:  leal -12(%ebp), %esp
+	0x3A9728F6:  popl %ebx
+	0x3A9728F7:  popl %esi
+	0x3A9728F8:  popl %edi
+	0x3A9728F9:  popl %ebp
+	0x3A9728FA:  ret 4
+. 230 3A9728F0 13
+. 8B 45 DC 8D 65 F4 5B 5E 5F 5D C2 04 00 
+
+==== BB 231 (0x3A96AB67) in 4083B, out 23162B, BBs exec'd 0 ====
+	0x3A96AB67:  movl %eax,0xC4(%ebx)
+	0x3A96AB6D:  subl $0x4, %esp
+	0x3A96AB70:  movl $0xC, (%esp,,)
+	0x3A96AB77:  call 0x3A96581C
+. 231 3A96AB67 21
+. 89 83 C4 00 00 00 83 EC 04 C7 04 24 0C 00 00 00 E8 A0 AC FF FF 
+
+==== BB 232 (0x3A96AB7C) in 4104B, out 23252B, BBs exec'd 0 ====
+	0x3A96AB7C:  movl %eax,0xD0(%ebx)
+	0x3A96AB82:  testl %eax,%eax
+	0x3A96AB84:  movl %eax,%esi
+	0x3A96AB86:  jz-32 0x3A96AE1F
+. 232 3A96AB7C 16
+. 89 83 D0 00 00 00 85 C0 89 C6 0F 84 93 02 00 00 
+
+==== BB 233 (0x3A96AB8C) in 4120B, out 23334B, BBs exec'd 0 ====
+	0x3A96AB8C:  movl 0xC8(%ebx),%edx
+	0x3A96AB92:  movl $0xCCCCCCCD,%eax
+	0x3A96AB97:  leal 0x27(,%edx,4), %edx
+	0x3A96AB9E:  mull %edx
+	0x3A96ABA0:  shrl $0x4, %edx
+	0x3A96ABA3:  leal (%edx,%edx,4), %edx
+	0x3A96ABA6:  movl %edx,-24(%ebp)
+	0x3A96ABA9:  movl %edx,%eax
+	0x3A96ABAB:  shll $0x6, %eax
+	0x3A96ABAE:  movl %eax,(%esp,,)
+	0x3A96ABB1:  call 0x3A96581C
+. 233 3A96AB8C 42
+. 8B 93 C8 00 00 00 B8 CD CC CC CC 8D 14 95 27 00 00 00 F7 E2 C1 EA 04 8D 14 92 89 55 E8 89 D0 C1 E0 06 89 04 24 E8 66 AC FF FF 
+
+==== BB 234 (0x3A96ABB6) in 4162B, out 23505B, BBs exec'd 0 ====
+	0x3A96ABB6:  movl %eax,(%esi)
+	0x3A96ABB8:  movl 0xD0(%ebx),%eax
+	0x3A96ABBE:  leal 0xFFFFE390(%ebx), %edx
+	0x3A96ABC4:  movl (%eax),%eax
+	0x3A96ABC6:  testl %eax,%eax
+	0x3A96ABC8:  jz-32 0x3A96AE25
+. 234 3A96ABB6 24
+. 89 06 8B 83 D0 00 00 00 8D 93 90 E3 FF FF 8B 00 85 C0 0F 84 57 02 00 00 
+
+==== BB 235 (0x3A96ABCE) in 4186B, out 23613B, BBs exec'd 0 ====
+	0x3A96ABCE:  movl %eax,0xFFFFF9DC(%ebx)
+	0x3A96ABD4:  xorl %edx, %edx
+	0x3A96ABD6:  movl 0xC8(%ebx),%ecx
+	0x3A96ABDC:  movl %edx,0xD4(%ebx)
+	0x3A96ABE2:  movl %eax,%edx
+	0x3A96ABE4:  leal 0xFFFFD61C(%ebx), %eax
+	0x3A96ABEA:  movl %eax,-20(%ebp)
+	0x3A96ABED:  movl -24(%ebp),%eax
+	0x3A96ABF0:  leal 0xFFFFF017(%ebx), %edi
+	0x3A96ABF6:  movl $0x0, -40(%ebp)
+	0x3A96ABFD:  shll $0x2, %eax
+	0x3A96AC00:  movl %edi,-32(%ebp)
+	0x3A96AC03:  movl %eax,-28(%ebp)
+	0x3A96AC06:  movl %edx,(%esi)
+	0x3A96AC08:  movl -32(%ebp),%edi
+	0x3A96AC0B:  addl $0x4, %esi
+	0x3A96AC0E:  movl $0x0, 8(%edx)
+	0x3A96AC15:  movl -20(%ebp),%eax
+	0x3A96AC18:  movl %edi,4(%edx)
+	0x3A96AC1B:  movl -40(%ebp),%edi
+	0x3A96AC1E:  movl %eax,12(%edx)
+	0x3A96AC21:  movl -10732(%ebx,%edi,4),%edi
+	0x3A96AC28:  leal 1(%edi,%eax,1), %eax
+	0x3A96AC2C:  movl %eax,-20(%ebp)
+	0x3A96AC2F:  xorl %eax, %eax
+	0x3A96AC31:  cmpl %ecx,%eax
+	0x3A96AC33:  movl %edi,16(%edx)
+	0x3A96AC36:  jnb-8 0x3A96AC4B
+. 235 3A96ABCE 106
+. 89 83 DC F9 FF FF 31 D2 8B 8B C8 00 00 00 89 93 D4 00 00 00 89 C2 8D 83 1C D6 FF FF 89 45 EC 8B 45 E8 8D BB 17 F0 FF FF C7 45 D8 00 00 00 00 C1 E0 02 89 7D E0 89 45 E4 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13 
+
+==== BB 236 (0x3A96AC38) in 4292B, out 23976B, BBs exec'd 0 ====
+	0x3A96AC38:  nop
+	0x3A96AC39:  leal 0(%esi,,), %esi
+	0x3A96AC40:  xorl %edi, %edi
+	0x3A96AC42:  movl %edi,20(%edx,%eax,4)
+	0x3A96AC46:  incl %eax
+	0x3A96AC47:  cmpl %ecx,%eax
+	0x3A96AC49:  jb-8 0x3A96AC40
+. 236 3A96AC38 19
+. 90 8D B4 26 00 00 00 00 31 FF 89 7C 82 14 40 39 C8 72 F5 
+
+==== BB 237 (0x3A96AC40) in 4311B, out 24086B, BBs exec'd 0 ====
+	0x3A96AC40:  xorl %edi, %edi
+	0x3A96AC42:  movl %edi,20(%edx,%eax,4)
+	0x3A96AC46:  incl %eax
+	0x3A96AC47:  cmpl %ecx,%eax
+	0x3A96AC49:  jb-8 0x3A96AC40
+. 237 3A96AC40 11
+. 31 FF 89 7C 82 14 40 39 C8 72 F5 
+
+==== BB 238 (0x3A96AC4B) in 4322B, out 24179B, BBs exec'd 0 ====
+	0x3A96AC4B:  incl -40(%ebp)
+	0x3A96AC4E:  movl $0x0, -36(%ebp)
+	0x3A96AC55:  cmpl $0x2, -40(%ebp)
+	0x3A96AC59:  jz-8 0x3A96AC63
+. 238 3A96AC4B 16
+. FF 45 D8 C7 45 DC 00 00 00 00 83 7D D8 02 74 08 
+
+==== BB 239 (0x3A96AC5B) in 4338B, out 24271B, BBs exec'd 0 ====
+	0x3A96AC5B:  movl -28(%ebp),%edi
+	0x3A96AC5E:  addl %edx,%edi
+	0x3A96AC60:  movl %edi,-36(%ebp)
+	0x3A96AC63:  movl -36(%ebp),%eax
+	0x3A96AC66:  movl %eax,(%edx)
+	0x3A96AC68:  movl -28(%ebp),%eax
+	0x3A96AC6B:  addl %eax,%edx
+	0x3A96AC6D:  cmpl $0x1, -40(%ebp)
+	0x3A96AC71:  jbe-8 0x3A96AC06
+. 239 3A96AC5B 24
+. 8B 7D E4 01 D7 89 7D DC 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93 
+
+==== BB 240 (0x3A96AC06) in 4362B, out 24406B, BBs exec'd 0 ====
+	0x3A96AC06:  movl %edx,(%esi)
+	0x3A96AC08:  movl -32(%ebp),%edi
+	0x3A96AC0B:  addl $0x4, %esi
+	0x3A96AC0E:  movl $0x0, 8(%edx)
+	0x3A96AC15:  movl -20(%ebp),%eax
+	0x3A96AC18:  movl %edi,4(%edx)
+	0x3A96AC1B:  movl -40(%ebp),%edi
+	0x3A96AC1E:  movl %eax,12(%edx)
+	0x3A96AC21:  movl -10732(%ebx,%edi,4),%edi
+	0x3A96AC28:  leal 1(%edi,%eax,1), %eax
+	0x3A96AC2C:  movl %eax,-20(%ebp)
+	0x3A96AC2F:  xorl %eax, %eax
+	0x3A96AC31:  cmpl %ecx,%eax
+	0x3A96AC33:  movl %edi,16(%edx)
+	0x3A96AC36:  jnb-8 0x3A96AC4B
+. 240 3A96AC06 50
+. 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13 
+
+==== BB 241 (0x3A96AC63) in 4412B, out 24613B, BBs exec'd 0 ====
+	0x3A96AC63:  movl -36(%ebp),%eax
+	0x3A96AC66:  movl %eax,(%edx)
+	0x3A96AC68:  movl -28(%ebp),%eax
+	0x3A96AC6B:  addl %eax,%edx
+	0x3A96AC6D:  cmpl $0x1, -40(%ebp)
+	0x3A96AC71:  jbe-8 0x3A96AC06
+. 241 3A96AC63 16
+. 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93 
+
+==== BB 242 (0x3A96AC73) in 4428B, out 24718B, BBs exec'd 0 ====
+	0x3A96AC73:  movl $0x0, (%esi)
+	0x3A96AC79:  movl 0xFFFFF994(%ebx),%esi
+	0x3A96AC7F:  movl $0x9,%edi
+	0x3A96AC84:  movl %edi,0xD8(%ebx)
+	0x3A96AC8A:  testl %esi,%esi
+	0x3A96AC8C:  jz-8 0x3A96ACB9
+. 242 3A96AC73 27
+. C7 06 00 00 00 00 8B B3 94 F9 FF FF BF 09 00 00 00 89 BB D8 00 00 00 85 F6 74 2B 
+
+==== BB 243 (0x3A96AC8E) in 4455B, out 24833B, BBs exec'd 0 ====
+	0x3A96AC8E:  movl 0x8C(%esi),%edx
+	0x3A96AC94:  testl %edx,%edx
+	0x3A96AC96:  jnz-8 0x3A96ACE7
+. 243 3A96AC8E 10
+. 8B 96 8C 00 00 00 85 D2 75 4F 
+
+==== BB 244 (0x3A96AC98) in 4465B, out 24908B, BBs exec'd 0 ====
+	0x3A96AC98:  movl 84(%esi),%edx
+	0x3A96AC9B:  movl $0xFFFFFFFF,%edi
+	0x3A96ACA0:  movl %edi,0x1CC(%esi)
+	0x3A96ACA6:  testl %edx,%edx
+	0x3A96ACA8:  jnz-32 0x3A96ADD9
+. 244 3A96AC98 22
+. 8B 56 54 BF FF FF FF FF 89 BE CC 01 00 00 85 D2 0F 85 2B 01 00 00 
+
+==== BB 245 (0x3A96ACAE) in 4487B, out 25005B, BBs exec'd 0 ====
+	0x3A96ACAE:  movl $0xFFFFFFFF,%edi
+	0x3A96ACB3:  movl %edi,0x180(%esi)
+	0x3A96ACB9:  movl -16(%ebp),%esi
+	0x3A96ACBC:  testl %esi,%esi
+	0x3A96ACBE:  jz-8 0x3A96ACC8
+. 245 3A96ACAE 18
+. BF FF FF FF FF 89 BE 80 01 00 00 8B 75 F0 85 F6 74 08 
+
+==== BB 246 (0x3A96ACC0) in 4505B, out 25105B, BBs exec'd 0 ====
+	0x3A96ACC0:  movl -16(%ebp),%edi
+	0x3A96ACC3:  cmpb $0x0, (%edi)
+	0x3A96ACC6:  jnz-8 0x3A96AD0D
+. 246 3A96ACC0 8
+. 8B 7D F0 80 3F 00 75 45 
+
+==== BB 247 (0x3A96AD0D) in 4513B, out 25178B, BBs exec'd 0 ====
+	0x3A96AD0D:  movl %edi,(%esp,,)
+	0x3A96AD10:  movl %edi,%esi
+	0x3A96AD12:  call 0x3A9752D0
+. 247 3A96AD0D 10
+. 89 3C 24 89 FE E8 B9 A5 00 00 
+
+==== BB 248 (0x3A975311) in 4523B, out 25240B, BBs exec'd 0 ====
+	0x3A975311:  xorl %ecx,%edx
+	0x3A975313:  andl $0x1010100, %edx
+	0x3A975319:  jnz-8 0x3A975369
+. 248 3A975311 10
+. 31 CA 81 E2 00 01 01 01 75 4E 
+
+==== BB 249 (0x3A97531B) in 4533B, out 25311B, BBs exec'd 0 ====
+	0x3A97531B:  movl (%eax),%ecx
+	0x3A97531D:  addl $0x4, %eax
+	0x3A975320:  subl %ecx,%edx
+	0x3A975322:  addl $0xFEFEFEFF, %ecx
+	0x3A975328:  decl %edx
+	0x3A975329:  jnb-8 0x3A975369
+. 249 3A97531B 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 3E 
+
+==== BB 250 (0x3A97532B) in 4549B, out 25408B, BBs exec'd 0 ====
+	0x3A97532B:  xorl %ecx,%edx
+	0x3A97532D:  andl $0x1010100, %edx
+	0x3A975333:  jnz-8 0x3A975369
+. 250 3A97532B 10
+. 31 CA 81 E2 00 01 01 01 75 34 
+
+==== BB 251 (0x3A975335) in 4559B, out 25479B, BBs exec'd 0 ====
+	0x3A975335:  movl (%eax),%ecx
+	0x3A975337:  addl $0x4, %eax
+	0x3A97533A:  subl %ecx,%edx
+	0x3A97533C:  addl $0xFEFEFEFF, %ecx
+	0x3A975342:  decl %edx
+	0x3A975343:  jnb-8 0x3A975369
+. 251 3A975335 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 24 
+
+==== BB 252 (0x3A975345) in 4575B, out 25576B, BBs exec'd 0 ====
+	0x3A975345:  xorl %ecx,%edx
+	0x3A975347:  andl $0x1010100, %edx
+	0x3A97534D:  jnz-8 0x3A975369
+. 252 3A975345 10
+. 31 CA 81 E2 00 01 01 01 75 1A 
+
+==== BB 253 (0x3A97534F) in 4585B, out 25647B, BBs exec'd 0 ====
+	0x3A97534F:  movl (%eax),%ecx
+	0x3A975351:  addl $0x4, %eax
+	0x3A975354:  subl %ecx,%edx
+	0x3A975356:  addl $0xFEFEFEFF, %ecx
+	0x3A97535C:  decl %edx
+	0x3A97535D:  jnb-8 0x3A975369
+. 253 3A97534F 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 0A 
+
+==== BB 254 (0x3A97535F) in 4601B, out 25744B, BBs exec'd 0 ====
+	0x3A97535F:  xorl %ecx,%edx
+	0x3A975361:  andl $0x1010100, %edx
+	0x3A975367:  jz-8 0x3A975301
+. 254 3A97535F 10
+. 31 CA 81 E2 00 01 01 01 74 98 
+
+==== BB 255 (0x3A975301) in 4611B, out 25815B, BBs exec'd 0 ====
+	0x3A975301:  movl (%eax),%ecx
+	0x3A975303:  addl $0x4, %eax
+	0x3A975306:  subl %ecx,%edx
+	0x3A975308:  addl $0xFEFEFEFF, %ecx
+	0x3A97530E:  decl %edx
+	0x3A97530F:  jnb-8 0x3A975369
+. 255 3A975301 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58 
+
+==== BB 256 (0x3A96AD17) in 4627B, out 25912B, BBs exec'd 0 ====
+	0x3A96AD17:  leal 1(%eax), %edx
+	0x3A96AD1A:  addl $0x13, %eax
+	0x3A96AD1D:  andl $0xFFFFFFFC, %eax
+	0x3A96AD20:  subl %eax,%esp
+	0x3A96AD22:  leal 27(%esp,,), %eax
+	0x3A96AD26:  andl $0xFFFFFFF0, %eax
+	0x3A96AD29:  movl %edx,8(%esp,,)
+	0x3A96AD2D:  movl %edi,4(%esp,,)
+	0x3A96AD31:  movl %eax,(%esp,,)
+	0x3A96AD34:  call 0x3A975870
+. 256 3A96AD17 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 1B 83 E0 F0 89 54 24 08 89 7C 24 04 89 04 24 E8 37 AB 00 00 
+
+==== BB 257 (0x3A975886) in 4661B, out 26052B, BBs exec'd 0 ====
+	0x3A975886:  negl %eax
+	0x3A975888:  andl $0x3, %eax
+	0x3A97588B:  subl %eax,%ecx
+	0x3A97588D:  xchgl %eax, %ecx
+	0x3A97588E:  rep movsb
+. 257 3A975886 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4 
+
+==== BB 258 (0x3A975890) in 4671B, out 26179B, BBs exec'd 0 ====
+	0x3A975890:  movl %eax,%ecx
+	0x3A975892:  subl $0x20, %ecx
+	0x3A975895:  js-8 0x3A9758D5
+. 258 3A975890 7
+. 89 C1 83 E9 20 78 3E 
+
+==== BB 259 (0x3A975897) in 4678B, out 26243B, BBs exec'd 0 ====
+	0x3A975897:  movl (%edi),%eax
+	0x3A975899:  movl 28(%edi),%edx
+	0x3A97589C:  subl $0x20, %ecx
+	0x3A97589F:  movl (%esi),%eax
+	0x3A9758A1:  movl 4(%esi),%edx
+	0x3A9758A4:  movl %eax,(%edi)
+	0x3A9758A6:  movl %edx,4(%edi)
+	0x3A9758A9:  movl 8(%esi),%eax
+	0x3A9758AC:  movl 12(%esi),%edx
+	0x3A9758AF:  movl %eax,8(%edi)
+	0x3A9758B2:  movl %edx,12(%edi)
+	0x3A9758B5:  movl 16(%esi),%eax
+	0x3A9758B8:  movl 20(%esi),%edx
+	0x3A9758BB:  movl %eax,16(%edi)
+	0x3A9758BE:  movl %edx,20(%edi)
+	0x3A9758C1:  movl 24(%esi),%eax
+	0x3A9758C4:  movl 28(%esi),%edx
+	0x3A9758C7:  movl %eax,24(%edi)
+	0x3A9758CA:  movl %edx,28(%edi)
+	0x3A9758CD:  leal 32(%esi), %esi
+	0x3A9758D0:  leal 32(%edi), %edi
+	0x3A9758D3:  jns-8 0x3A975899
+. 259 3A975897 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4 
+
+==== BB 260 (0x3A9758D5) in 4740B, out 26506B, BBs exec'd 0 ====
+	0x3A9758D5:  addl $0x20, %ecx
+	0x3A9758D8:  movl 12(%esp,,),%eax
+	0x3A9758DC:  rep movsb
+. 260 3A9758D5 9
+. 83 C1 20 8B 44 24 0C F3 A4 
+
+==== BB 261 (0x3A96AD39) in 4749B, out 26623B, BBs exec'd 0 ====
+	0x3A96AD39:  movl %eax,%edi
+	0x3A96AD3B:  movzbl (%esi),%eax
+	0x3A96AD3E:  movl $0x1,%edx
+	0x3A96AD43:  testb %al,%al
+	0x3A96AD45:  jz-8 0x3A96AD64
+. 261 3A96AD39 14
+. 89 C7 0F B6 06 BA 01 00 00 00 84 C0 74 1D 
+
+==== BB 262 (0x3A96AD47) in 4763B, out 26723B, BBs exec'd 0 ====
+	0x3A96AD47:  movl %esi,%esi
+	0x3A96AD49:  leal 0(%edi,,), %edi
+	0x3A96AD50:  subb $0x3A, %al
+	0x3A96AD52:  cmpb $0x1, %al
+	0x3A96AD54:  setbe %al
+	0x3A96AD57:  movzbl %al,%eax
+	0x3A96AD5A:  incl %esi
+	0x3A96AD5B:  addl %eax,%edx
+	0x3A96AD5D:  movzbl (%esi),%eax
+	0x3A96AD60:  testb %al,%al
+	0x3A96AD62:  jnz-8 0x3A96AD50
+. 262 3A96AD47 29
+. 89 F6 8D BC 27 00 00 00 00 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC 
+
+==== BB 263 (0x3A96AD50) in 4792B, out 26887B, BBs exec'd 0 ====
+	0x3A96AD50:  subb $0x3A, %al
+	0x3A96AD52:  cmpb $0x1, %al
+	0x3A96AD54:  setbe %al
+	0x3A96AD57:  movzbl %al,%eax
+	0x3A96AD5A:  incl %esi
+	0x3A96AD5B:  addl %eax,%edx
+	0x3A96AD5D:  movzbl (%esi),%eax
+	0x3A96AD60:  testb %al,%al
+	0x3A96AD62:  jnz-8 0x3A96AD50
+. 263 3A96AD50 20
+. 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC 
+
+==== BB 264 (0x3A96AD64) in 4812B, out 27028B, BBs exec'd 0 ====
+	0x3A96AD64:  leal 0x4(,%edx,4), %eax
+	0x3A96AD6B:  movl %eax,(%esp,,)
+	0x3A96AD6E:  call 0x3A96581C
+. 264 3A96AD64 15
+. 8D 04 95 04 00 00 00 89 04 24 E8 A9 AA FF FF 
+
+==== BB 265 (0x3A96AD73) in 4827B, out 27099B, BBs exec'd 0 ====
+	0x3A96AD73:  movl %eax,0xBC(%ebx)
+	0x3A96AD79:  testl %eax,%eax
+	0x3A96AD7B:  movl %eax,%esi
+	0x3A96AD7D:  leal 0xFFFFE390(%ebx), %edx
+	0x3A96AD83:  jz-32 0x3A96AE25
+. 265 3A96AD73 22
+. 89 83 BC 00 00 00 85 C0 89 C6 8D 93 90 E3 FF FF 0F 84 9C 00 00 00 
+
+==== BB 266 (0x3A96AD89) in 4849B, out 27194B, BBs exec'd 0 ====
+	0x3A96AD89:  xorl %ecx, %ecx
+	0x3A96AD8B:  leal 0xFFFFF02A(%ebx), %eax
+	0x3A96AD91:  movl %esi,%edx
+	0x3A96AD93:  movl %ecx,8(%esp,,)
+	0x3A96AD97:  leal 0xFFFFF03A(%ebx), %ecx
+	0x3A96AD9D:  movl %eax,4(%esp,,)
+	0x3A96ADA1:  movl 0x130(%ebx),%eax
+	0x3A96ADA7:  movl %eax,(%esp,,)
+	0x3A96ADAA:  movl %edi,%eax
+	0x3A96ADAC:  call 0x3A968AC0
+. 266 3A96AD89 40
+. 31 C9 8D 83 2A F0 FF FF 89 F2 89 4C 24 08 8D 8B 3A F0 FF FF 89 44 24 04 8B 83 30 01 00 00 89 04 24 89 F8 E8 0F DD FF FF 
+
+==== BB 267 fillin_rpath(0x3A968AC0) in 4889B, out 27340B, BBs exec'd 0 ====
+	0x3A968AC0:  pushl %ebp
+	0x3A968AC1:  movl %esp,%ebp
+	0x3A968AC3:  pushl %edi
+	0x3A968AC4:  pushl %esi
+	0x3A968AC5:  pushl %ebx
+	0x3A968AC6:  subl $0x3C, %esp
+	0x3A968AC9:  call 0x3A97592B
+. 267 3A968AC0 14
+. 55 89 E5 57 56 53 83 EC 3C E8 5D CE 00 00 
+
+==== BB 268 (0x3A968ACE) in 4903B, out 27473B, BBs exec'd 0 ====
+	0x3A968ACE:  addl $0xFB9E, %ebx
+	0x3A968AD4:  movl %eax,-16(%ebp)
+	0x3A968AD7:  leal -16(%ebp), %eax
+	0x3A968ADA:  movl %edx,-20(%ebp)
+	0x3A968ADD:  movl %ecx,-24(%ebp)
+	0x3A968AE0:  movl $0x0, -32(%ebp)
+	0x3A968AE7:  movl %eax,-60(%ebp)
+	0x3A968AEA:  leal 0x0(%esi), %esi
+	0x3A968AF0:  movl -24(%ebp),%eax
+	0x3A968AF3:  movl %eax,4(%esp,,)
+	0x3A968AF7:  movl -60(%ebp),%edx
+	0x3A968AFA:  movl %edx,(%esp,,)
+	0x3A968AFD:  call 0x3A973530
+. 268 3A968ACE 52
+. 81 C3 9E FB 00 00 89 45 F0 8D 45 F0 89 55 EC 89 4D E8 C7 45 E0 00 00 00 00 89 45 C4 8D B6 00 00 00 00 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00 
+
+==== BB 269 __strsep_g(0x3A973530) in 4955B, out 27668B, BBs exec'd 0 ====
+	0x3A973530:  pushl %ebp
+	0x3A973531:  movl %esp,%ebp
+	0x3A973533:  pushl %edi
+	0x3A973534:  pushl %esi
+	0x3A973535:  movl 8(%ebp),%eax
+	0x3A973538:  movl (%eax),%edi
+	0x3A97353A:  testl %edi,%edi
+	0x3A97353C:  jz-8 0x3A97358F
+. 269 3A973530 14
+. 55 89 E5 57 56 8B 45 08 8B 38 85 FF 74 51 
+
+==== BB 270 (0x3A97353E) in 4969B, out 27808B, BBs exec'd 0 ====
+	0x3A97353E:  movzbl (%edi),%eax
+	0x3A973541:  movl %edi,%esi
+	0x3A973543:  testb %al,%al
+	0x3A973545:  jz-8 0x3A973588
+. 270 3A97353E 9
+. 0F B6 07 89 FE 84 C0 74 41 
+
+==== BB 271 (0x3A973547) in 4978B, out 27892B, BBs exec'd 0 ====
+	0x3A973547:  movl %esi,%esi
+	0x3A973549:  leal 0(%edi,,), %edi
+	0x3A973550:  movl 12(%ebp),%ecx
+	0x3A973553:  movzbl (%ecx),%edx
+	0x3A973556:  cmpb %al,%dl
+	0x3A973558:  jz-8 0x3A97356C
+. 271 3A973547 19
+. 89 F6 8D BC 27 00 00 00 00 8B 4D 0C 0F B6 11 38 C2 74 12 
+
+==== BB 272 (0x3A97355A) in 4997B, out 27999B, BBs exec'd 0 ====
+	0x3A97355A:  leal 0x0(%esi), %esi
+	0x3A973560:  incl %ecx
+	0x3A973561:  movzbl (%ecx),%edx
+	0x3A973564:  testb %dl,%dl
+	0x3A973566:  jz-8 0x3A973580
+. 272 3A97355A 14
+. 8D B6 00 00 00 00 41 0F B6 11 84 D2 74 18 
+
+==== BB 273 (0x3A973568) in 5011B, out 28098B, BBs exec'd 0 ====
+	0x3A973568:  cmpb %al,%dl
+	0x3A97356A:  jnz-8 0x3A973560
+. 273 3A973568 4
+. 38 C2 75 F4 
+
+==== BB 274 (0x3A973560) in 5015B, out 28156B, BBs exec'd 0 ====
+	0x3A973560:  incl %ecx
+	0x3A973561:  movzbl (%ecx),%edx
+	0x3A973564:  testb %dl,%dl
+	0x3A973566:  jz-8 0x3A973580
+. 274 3A973560 8
+. 41 0F B6 11 84 D2 74 18 
+
+==== BB 275 (0x3A973580) in 5023B, out 28238B, BBs exec'd 0 ====
+	0x3A973580:  incl %esi
+	0x3A973581:  movzbl (%esi),%eax
+	0x3A973584:  testb %al,%al
+	0x3A973586:  jnz-8 0x3A973550
+. 275 3A973580 8
+. 46 0F B6 06 84 C0 75 C8 
+
+==== BB 276 (0x3A973550) in 5031B, out 28320B, BBs exec'd 0 ====
+	0x3A973550:  movl 12(%ebp),%ecx
+	0x3A973553:  movzbl (%ecx),%edx
+	0x3A973556:  cmpb %al,%dl
+	0x3A973558:  jz-8 0x3A97356C
+. 276 3A973550 10
+. 8B 4D 0C 0F B6 11 38 C2 74 12 
+
+==== BB 277 (0x3A973588) in 5041B, out 28404B, BBs exec'd 0 ====
+	0x3A973588:  xorl %esi, %esi
+	0x3A97358A:  movl 8(%ebp),%eax
+	0x3A97358D:  movl %esi,(%eax)
+	0x3A97358F:  popl %esi
+	0x3A973590:  movl %edi,%eax
+	0x3A973592:  popl %edi
+	0x3A973593:  popl %ebp
+	0x3A973594:  ret
+. 277 3A973588 13
+. 31 F6 8B 45 08 89 30 5E 89 F8 5F 5D C3 
+
+==== BB 278 (0x3A968B02) in 5054B, out 28525B, BBs exec'd 0 ====
+	0x3A968B02:  movl %eax,-28(%ebp)
+	0x3A968B05:  testl %eax,%eax
+	0x3A968B07:  jz-32 0x3A968BFB
+. 278 3A968B02 11
+. 89 45 E4 85 C0 0F 84 EE 00 00 00 
+
+==== BB 279 (0x3A968B0D) in 5065B, out 28597B, BBs exec'd 0 ====
+	0x3A968B0D:  movl %eax,(%esp,,)
+	0x3A968B10:  call 0x3A9752D0
+. 279 3A968B0D 8
+. 89 04 24 E8 BB C7 00 00 
+
+==== BB 280 (0x3A968B15) in 5073B, out 28652B, BBs exec'd 0 ====
+	0x3A968B15:  movl %eax,-40(%ebp)
+	0x3A968B18:  testl %eax,%eax
+	0x3A968B1A:  jnz-8 0x3A968B25
+. 280 3A968B15 7
+. 89 45 D8 85 C0 75 09 
+
+==== BB 281 (0x3A968B25) in 5080B, out 28724B, BBs exec'd 0 ====
+	0x3A968B25:  cmpl $0x1, -40(%ebp)
+	0x3A968B29:  jbe-8 0x3A968B3C
+. 281 3A968B25 6
+. 83 7D D8 01 76 11 
+
+==== BB 282 (0x3A968B2B) in 5086B, out 28787B, BBs exec'd 0 ====
+	0x3A968B2B:  movl -28(%ebp),%ecx
+	0x3A968B2E:  movl -40(%ebp),%edi
+	0x3A968B31:  cmpb $0x2F, -1(%edi,%ecx,1)
+	0x3A968B36:  jz-32 0x3A968C12
+. 282 3A968B2B 17
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 0F 84 D6 00 00 00 
+
+==== BB 283 (0x3A968B3C) in 5103B, out 28877B, BBs exec'd 0 ====
+	0x3A968B3C:  movl -40(%ebp),%eax
+	0x3A968B3F:  testl %eax,%eax
+	0x3A968B41:  jz-8 0x3A968B58
+. 283 3A968B3C 7
+. 8B 45 D8 85 C0 74 15 
+
+==== BB 284 (0x3A968B43) in 5110B, out 28949B, BBs exec'd 0 ====
+	0x3A968B43:  movl -28(%ebp),%ecx
+	0x3A968B46:  movl -40(%ebp),%edi
+	0x3A968B49:  cmpb $0x2F, -1(%edi,%ecx,1)
+	0x3A968B4E:  jz-8 0x3A968B58
+. 284 3A968B43 13
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 74 08 
+
+==== BB 285 (0x3A968B50) in 5123B, out 29039B, BBs exec'd 0 ====
+	0x3A968B50:  movb $0x2F, (%edi,%ecx,1)
+	0x3A968B54:  incl %edi
+	0x3A968B55:  movl %edi,-40(%ebp)
+	0x3A968B58:  movl 8(%ebp),%eax
+	0x3A968B5B:  testl %eax,%eax
+	0x3A968B5D:  jnz-32 0x3A968D35
+. 285 3A968B50 19
+. C6 04 0F 2F 47 89 7D D8 8B 45 08 85 C0 0F 85 D2 01 00 00 
+
+==== BB 286 (0x3A968B63) in 5142B, out 29150B, BBs exec'd 0 ====
+	0x3A968B63:  movl 0xFFFFF9DC(%ebx),%edx
+	0x3A968B69:  testl %edx,%edx
+	0x3A968B6B:  movl %edx,-36(%ebp)
+	0x3A968B6E:  jnz-8 0x3A968B85
+. 286 3A968B63 13
+. 8B 93 DC F9 FF FF 85 D2 89 55 DC 75 15 
+
+==== BB 287 (0x3A968B85) in 5155B, out 29238B, BBs exec'd 0 ====
+	0x3A968B85:  movl -36(%ebp),%ecx
+	0x3A968B88:  movl -40(%ebp),%edi
+	0x3A968B8B:  cmpl %edi,16(%ecx)
+	0x3A968B8E:  jnz-8 0x3A968B75
+. 287 3A968B85 11
+. 8B 4D DC 8B 7D D8 39 79 10 75 E5 
+
+==== BB 288 (0x3A968B75) in 5166B, out 29326B, BBs exec'd 0 ====
+	0x3A968B75:  movl -36(%ebp),%edx
+	0x3A968B78:  movl (%edx),%edx
+	0x3A968B7A:  testl %edx,%edx
+	0x3A968B7C:  movl %edx,-36(%ebp)
+	0x3A968B7F:  jz-32 0x3A968C31
+. 288 3A968B75 16
+. 8B 55 DC 8B 12 85 D2 89 55 DC 0F 84 AC 00 00 00 
+
+==== BB 289 (0x3A968C31) in 5182B, out 29415B, BBs exec'd 0 ====
+	0x3A968C31:  movl 16(%ebp),%ecx
+	0x3A968C34:  xorl %esi, %esi
+	0x3A968C36:  testl %ecx,%ecx
+	0x3A968C38:  jz-8 0x3A968C48
+. 289 3A968C31 9
+. 8B 4D 10 31 F6 85 C9 74 0E 
+
+==== BB 290 (0x3A968C48) in 5191B, out 29501B, BBs exec'd 0 ====
+	0x3A968C48:  movl 0xC8(%ebx),%eax
+	0x3A968C4E:  movl -40(%ebp),%edx
+	0x3A968C51:  leal (%esi,%eax,4), %eax
+	0x3A968C54:  leal 21(%edx,%eax,1), %eax
+	0x3A968C58:  movl %eax,(%esp,,)
+	0x3A968C5B:  call 0x3A96581C
+. 290 3A968C48 24
+. 8B 83 C8 00 00 00 8B 55 D8 8D 04 86 8D 44 02 15 89 04 24 E8 BC CB FF FF 
+
+==== BB 291 (0x3A968C60) in 5215B, out 29607B, BBs exec'd 0 ====
+	0x3A968C60:  movl %eax,-36(%ebp)
+	0x3A968C63:  testl %eax,%eax
+	0x3A968C65:  jz-32 0x3A968DBF
+. 291 3A968C60 11
+. 89 45 DC 85 C0 0F 84 54 01 00 00 
+
+==== BB 292 (0x3A968C6B) in 5226B, out 29679B, BBs exec'd 0 ====
+	0x3A968C6B:  movl -36(%ebp),%ecx
+	0x3A968C6E:  movl 0xC8(%ebx),%eax
+	0x3A968C74:  movl -40(%ebp),%edi
+	0x3A968C77:  leal 20(%ecx,%eax,4), %eax
+	0x3A968C7B:  movl %eax,12(%ecx)
+	0x3A968C7E:  movl %edi,8(%esp,,)
+	0x3A968C82:  movl -28(%ebp),%eax
+	0x3A968C85:  movl %eax,4(%esp,,)
+	0x3A968C89:  movl 12(%ecx),%eax
+	0x3A968C8C:  movl %eax,(%esp,,)
+	0x3A968C8F:  call 0x3A975770
+. 292 3A968C6B 41
+. 8B 4D DC 8B 83 C8 00 00 00 8B 7D D8 8D 44 81 14 89 41 0C 89 7C 24 08 8B 45 E4 89 44 24 04 8B 41 0C 89 04 24 E8 DC CA 00 00 
+
+==== BB 293 (0x3A975786) in 5267B, out 29837B, BBs exec'd 0 ====
+	0x3A975786:  negl %eax
+	0x3A975788:  andl $0x3, %eax
+	0x3A97578B:  subl %eax,%ecx
+	0x3A97578D:  xchgl %eax, %ecx
+	0x3A97578E:  rep movsb
+. 293 3A975786 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4 
+
+==== BB 294 (0x3A975790) in 5277B, out 29964B, BBs exec'd 0 ====
+	0x3A975790:  movl %eax,%ecx
+	0x3A975792:  subl $0x20, %ecx
+	0x3A975795:  js-8 0x3A9757D5
+. 294 3A975790 7
+. 89 C1 83 E9 20 78 3E 
+
+==== BB 295 (0x3A975797) in 5284B, out 30028B, BBs exec'd 0 ====
+	0x3A975797:  movl (%edi),%eax
+	0x3A975799:  movl 28(%edi),%edx
+	0x3A97579C:  subl $0x20, %ecx
+	0x3A97579F:  movl (%esi),%eax
+	0x3A9757A1:  movl 4(%esi),%edx
+	0x3A9757A4:  movl %eax,(%edi)
+	0x3A9757A6:  movl %edx,4(%edi)
+	0x3A9757A9:  movl 8(%esi),%eax
+	0x3A9757AC:  movl 12(%esi),%edx
+	0x3A9757AF:  movl %eax,8(%edi)
+	0x3A9757B2:  movl %edx,12(%edi)
+	0x3A9757B5:  movl 16(%esi),%eax
+	0x3A9757B8:  movl 20(%esi),%edx
+	0x3A9757BB:  movl %eax,16(%edi)
+	0x3A9757BE:  movl %edx,20(%edi)
+	0x3A9757C1:  movl 24(%esi),%eax
+	0x3A9757C4:  movl 28(%esi),%edx
+	0x3A9757C7:  movl %eax,24(%edi)
+	0x3A9757CA:  movl %edx,28(%edi)
+	0x3A9757CD:  leal 32(%esi), %esi
+	0x3A9757D0:  leal 32(%edi), %edi
+	0x3A9757D3:  jns-8 0x3A975799
+. 295 3A975797 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4 
+
+==== BB 296 (0x3A9757D5) in 5346B, out 30291B, BBs exec'd 0 ====
+	0x3A9757D5:  addl $0x20, %ecx
+	0x3A9757D8:  rep movsb
+. 296 3A9757D5 5
+. 83 C1 20 F3 A4 
+
+==== BB 297 (0x3A968C94) in 5351B, out 30396B, BBs exec'd 0 ====
+	0x3A968C94:  movb $0x0, (%eax)
+	0x3A968C97:  movl -36(%ebp),%edx
+	0x3A968C9A:  cmpl 0xD8(%ebx),%edi
+	0x3A968CA0:  movl %edi,16(%edx)
+	0x3A968CA3:  jbe-8 0x3A968CAB
+. 297 3A968C94 17
+. C6 00 00 8B 55 DC 3B BB D8 00 00 00 89 7A 10 76 06 
+
+==== BB 298 (0x3A968CA5) in 5368B, out 30507B, BBs exec'd 0 ====
+	0x3A968CA5:  movl %edi,0xD8(%ebx)
+	0x3A968CAB:  movl -28(%ebp),%ecx
+	0x3A968CAE:  xorl %eax, %eax
+	0x3A968CB0:  cmpb $0x2F, (%ecx)
+	0x3A968CB3:  movl 0xC8(%ebx),%ecx
+	0x3A968CB9:  setnz %al
+	0x3A968CBC:  xorl %edx, %edx
+	0x3A968CBE:  addl %eax,%eax
+	0x3A968CC0:  cmpl %ecx,%edx
+	0x3A968CC2:  jnb-8 0x3A968CDC
+. 298 3A968CA5 31
+. 89 BB D8 00 00 00 8B 4D E4 31 C0 80 39 2F 8B 8B C8 00 00 00 0F 95 C0 31 D2 01 C0 39 CA 73 18 
+
+==== BB 299 (0x3A968CC4) in 5399B, out 30672B, BBs exec'd 0 ====
+	0x3A968CC4:  leal 0x0(%esi), %esi
+	0x3A968CCA:  leal 0x0(%edi), %edi
+	0x3A968CD0:  movl -36(%ebp),%edi
+	0x3A968CD3:  movl %eax,20(%edi,%edx,4)
+	0x3A968CD7:  incl %edx
+	0x3A968CD8:  cmpl %ecx,%edx
+	0x3A968CDA:  jb-8 0x3A968CD0
+. 299 3A968CC4 24
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 7D DC 89 44 97 14 42 39 CA 72 F4 
+
+==== BB 300 (0x3A968CD0) in 5423B, out 30790B, BBs exec'd 0 ====
+	0x3A968CD0:  movl -36(%ebp),%edi
+	0x3A968CD3:  movl %eax,20(%edi,%edx,4)
+	0x3A968CD7:  incl %edx
+	0x3A968CD8:  cmpl %ecx,%edx
+	0x3A968CDA:  jb-8 0x3A968CD0
+. 300 3A968CD0 12
+. 8B 7D DC 89 44 97 14 42 39 CA 72 F4 
+
+==== BB 301 (0x3A968CDC) in 5435B, out 30885B, BBs exec'd 0 ====
+	0x3A968CDC:  movl 12(%ebp),%edx
+	0x3A968CDF:  movl -36(%ebp),%eax
+	0x3A968CE2:  movl %edx,4(%eax)
+	0x3A968CE5:  movl 16(%ebp),%edx
+	0x3A968CE8:  testl %edx,%edx
+	0x3A968CEA:  jz-32 0x3A968DB0
+. 301 3A968CDC 20
+. 8B 55 0C 8B 45 DC 89 50 04 8B 55 10 85 D2 0F 84 C0 00 00 00 
+
+==== BB 302 (0x3A968DB0) in 5455B, out 30990B, BBs exec'd 0 ====
+	0x3A968DB0:  movl -36(%ebp),%edi
+	0x3A968DB3:  movl $0x0, 8(%edi)
+	0x3A968DBA:  jmp 0x3A968D12
+. 302 3A968DB0 15
+. 8B 7D DC C7 47 08 00 00 00 00 E9 53 FF FF FF 
+
+==== BB 303 (0x3A968D12) in 5470B, out 31049B, BBs exec'd 0 ====
+	0x3A968D12:  movl 0xFFFFF9DC(%ebx),%eax
+	0x3A968D18:  movl -36(%ebp),%edx
+	0x3A968D1B:  movl -32(%ebp),%ecx
+	0x3A968D1E:  movl -20(%ebp),%edi
+	0x3A968D21:  movl %eax,(%edx)
+	0x3A968D23:  movl %edx,0xFFFFF9DC(%ebx)
+	0x3A968D29:  movl %edx,(%edi,%ecx,4)
+	0x3A968D2C:  incl %ecx
+	0x3A968D2D:  movl %ecx,-32(%ebp)
+	0x3A968D30:  jmp 0x3A968AF0
+. 303 3A968D12 35
+. 8B 83 DC F9 FF FF 8B 55 DC 8B 4D E0 8B 7D EC 89 02 89 93 DC F9 FF FF 89 14 8F 41 89 4D E0 E9 BB FD FF FF 
+
+==== BB 304 (0x3A968AF0) in 5505B, out 31198B, BBs exec'd 0 ====
+	0x3A968AF0:  movl -24(%ebp),%eax
+	0x3A968AF3:  movl %eax,4(%esp,,)
+	0x3A968AF7:  movl -60(%ebp),%edx
+	0x3A968AFA:  movl %edx,(%esp,,)
+	0x3A968AFD:  call 0x3A973530
+. 304 3A968AF0 18
+. 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00 
+
+==== BB 305 (0x3A97358F) in 5523B, out 31289B, BBs exec'd 0 ====
+	0x3A97358F:  popl %esi
+	0x3A973590:  movl %edi,%eax
+	0x3A973592:  popl %edi
+	0x3A973593:  popl %ebp
+	0x3A973594:  ret
+. 305 3A97358F 6
+. 5E 89 F8 5F 5D C3 
+
+==== BB 306 (0x3A968BFB) in 5529B, out 31377B, BBs exec'd 0 ====
+	0x3A968BFB:  movl -20(%ebp),%edi
+	0x3A968BFE:  movl -32(%ebp),%ecx
+	0x3A968C01:  movl %edi,%eax
+	0x3A968C03:  movl $0x0, (%edi,%ecx,4)
+	0x3A968C0A:  leal -12(%ebp), %esp
+	0x3A968C0D:  popl %ebx
+	0x3A968C0E:  popl %esi
+	0x3A968C0F:  popl %edi
+	0x3A968C10:  popl %ebp
+	0x3A968C11:  ret
+. 306 3A968BFB 23
+. 8B 7D EC 8B 4D E0 89 F8 C7 04 8F 00 00 00 00 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 307 (0x3A96ADB1) in 5552B, out 31534B, BBs exec'd 0 ====
+	0x3A96ADB1:  movl 0xBC(%ebx),%eax
+	0x3A96ADB7:  movl (%eax),%edx
+	0x3A96ADB9:  testl %edx,%edx
+	0x3A96ADBB:  jz-8 0x3A96AE0A
+. 307 3A96ADB1 12
+. 8B 83 BC 00 00 00 8B 10 85 D2 74 4D 
+
+==== BB 308 (0x3A96ADBD) in 5564B, out 31619B, BBs exec'd 0 ====
+	0x3A96ADBD:  xorl %eax, %eax
+	0x3A96ADBF:  movl %eax,0xC0(%ebx)
+	0x3A96ADC5:  movl 0xFFFFF9DC(%ebx),%eax
+	0x3A96ADCB:  movl %eax,0xFFFFFD8C(%ebx)
+	0x3A96ADD1:  leal -12(%ebp), %esp
+	0x3A96ADD4:  popl %ebx
+	0x3A96ADD5:  popl %esi
+	0x3A96ADD6:  popl %edi
+	0x3A96ADD7:  popl %ebp
+	0x3A96ADD8:  ret
+. 308 3A96ADBD 28
+. 31 C0 89 83 C0 00 00 00 8B 83 DC F9 FF FF 89 83 8C FD FF FF 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 309 (0x3A966CFB) in 5592B, out 31781B, BBs exec'd 0 ====
+	0x3A966CFB:  jmp 0x3A966851
+. 309 3A966CFB 5
+. E9 51 FB FF FF 
+
+==== BB 310 (0x3A966851) in 5597B, out 31809B, BBs exec'd 0 ====
+	0x3A966851:  movl 0xFFFFF9E8(%ebx),%esi
+	0x3A966857:  testl %esi,%esi
+	0x3A966859:  jnz-8 0x3A966869
+. 310 3A966851 10
+. 8B B3 E8 F9 FF FF 85 F6 75 0E 
+
+==== BB 311 (0x3A96685B) in 5607B, out 31884B, BBs exec'd 0 ====
+	0x3A96685B:  movl 0xFFFFF9F8(%ebx),%eax
+	0x3A966861:  movl (%eax),%eax
+	0x3A966863:  movl %eax,0xFFFFF9E8(%ebx)
+	0x3A966869:  movzbl 0xFFFFFB58(%ebx),%eax
+	0x3A966870:  incl 0xFFFFF998(%ebx)
+	0x3A966876:  andb $0xFC, %al
+	0x3A966878:  orb $0x1, %al
+	0x3A96687A:  movb %al,0xFFFFFB58(%ebx)
+	0x3A966880:  leal 0xFFFFF994(%ebx), %eax
+	0x3A966886:  movl %eax,%edx
+	0x3A966888:  movl %eax,0xFFFFFED0(%ebp)
+	0x3A96688E:  addl $0x50, %edx
+	0x3A966891:  movl 0xFFFFF994(%ebx),%eax
+	0x3A966897:  addl $0x1, 0xFFFFF9C4(%ebx)
+	0x3A96689E:  movl %edx,12(%eax)
+	0x3A9668A1:  movl 0xFFFFF994(%ebx),%ecx
+	0x3A9668A7:  adcl $0x0, 0xFFFFF9C8(%ebx)
+	0x3A9668AE:  cmpl $0xFFFFFFFE, 0xFFFFFD78(%ebx)
+	0x3A9668B5:  movl %ecx,0xFFFFF9F4(%ebx)
+	0x3A9668BB:  jz-32 0x3A966CD8
+. 311 3A96685B 102
+. 8B 83 F8 F9 FF FF 8B 00 89 83 E8 F9 FF FF 0F B6 83 58 FB FF FF FF 83 98 F9 FF FF 24 FC 0C 01 88 83 58 FB FF FF 8D 83 94 F9 FF FF 89 C2 89 85 D0 FE FF FF 83 C2 50 8B 83 94 F9 FF FF 83 83 C4 F9 FF FF 01 89 50 0C 8B 8B 94 F9 FF FF 83 93 C8 F9 FF FF 00 83 BB 78 FD FF FF FE 89 8B F4 F9 FF FF 0F 84 17 04 00 00 
+
+==== BB 312 (0x3A966CD8) in 5709B, out 32207B, BBs exec'd 0 ====
+	0x3A966CD8:  movl (%ecx),%ecx
+	0x3A966CDA:  movl $0xFFFFFFFF,%eax
+	0x3A966CDF:  testl %ecx,%ecx
+	0x3A966CE1:  jz-8 0x3A966CE5
+. 312 3A966CD8 11
+. 8B 09 B8 FF FF FF FF 85 C9 74 02 
+
+==== BB 313 (0x3A966CE5) in 5720B, out 32288B, BBs exec'd 0 ====
+	0x3A966CE5:  movl %eax,0xFFFFFD78(%ebx)
+	0x3A966CEB:  jmp 0x3A9668C1
+. 313 3A966CE5 11
+. 89 83 78 FD FF FF E9 D1 FB FF FF 
+
+==== BB 314 (0x3A9668C1) in 5731B, out 32335B, BBs exec'd 0 ====
+	0x3A9668C1:  movl 0xFFFFFB78(%ebx),%eax
+	0x3A9668C7:  movl 28(%eax),%edx
+	0x3A9668CA:  movl %eax,%esi
+	0x3A9668CC:  addl %edx,%esi
+	0x3A9668CE:  movzwl 44(%eax),%edx
+	0x3A9668D2:  movl %esi,0xFFFFFB28(%ebx)
+	0x3A9668D8:  movw %dx,0xFFFFFB30(%ebx)
+	0x3A9668DF:  movzwl 44(%eax),%edx
+	0x3A9668E3:  movl %edx,%eax
+	0x3A9668E5:  decl %edx
+	0x3A9668E6:  testl %eax,%eax
+	0x3A9668E8:  jz-8 0x3A96690C
+. 314 3A9668C1 41
+. 8B 83 78 FB FF FF 8B 50 1C 89 C6 01 D6 0F B7 50 2C 89 B3 28 FB FF FF 66 89 93 30 FB FF FF 0F B7 50 2C 89 D0 4A 85 C0 74 22 
+
+==== BB 315 (0x3A9668EA) in 5772B, out 32511B, BBs exec'd 0 ====
+	0x3A9668EA:  movl %edx,%eax
+	0x3A9668EC:  shll $0x5, %eax
+	0x3A9668EF:  addl %esi,%eax
+	0x3A9668F1:  movl %edx,%ecx
+	0x3A9668F3:  movl %edx,%edi
+	0x3A9668F5:  shll $0x5, %ecx
+	0x3A9668F8:  cmpl $0x6474E552, (%eax)
+	0x3A9668FE:  jz-32 0x3A967D4A
+. 315 3A9668EA 26
+. 89 D0 C1 E0 05 01 F0 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00 
+
+==== BB 316 (0x3A966904) in 5798B, out 32622B, BBs exec'd 0 ====
+	0x3A966904:  decl %edx
+	0x3A966905:  subl $0x20, %eax
+	0x3A966908:  testl %edi,%edi
+	0x3A96690A:  jnz-8 0x3A9668F1
+. 316 3A966904 8
+. 4A 83 E8 20 85 FF 75 E5 
+
+==== BB 317 (0x3A9668F1) in 5806B, out 32705B, BBs exec'd 0 ====
+	0x3A9668F1:  movl %edx,%ecx
+	0x3A9668F3:  movl %edx,%edi
+	0x3A9668F5:  shll $0x5, %ecx
+	0x3A9668F8:  cmpl $0x6474E552, (%eax)
+	0x3A9668FE:  jz-32 0x3A967D4A
+. 317 3A9668F1 19
+. 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00 
+
+==== BB 318 (0x3A96690C) in 5825B, out 32795B, BBs exec'd 0 ====
+	0x3A96690C:  movl 0xB0(%ebx),%esi
+	0x3A966912:  xorl %eax, %eax
+	0x3A966914:  movl %eax,0xFFFFFF14(%ebp)
+	0x3A96691A:  xorl %eax, %eax
+	0x3A96691C:  testl %esi,%esi
+	0x3A96691E:  movl %eax,0xFFFFFF10(%ebp)
+	0x3A966924:  jnz-32 0x3A967BDA
+. 318 3A96690C 30
+. 8B B3 B0 00 00 00 31 C0 89 85 14 FF FF FF 31 C0 85 F6 89 85 10 FF FF FF 0F 85 B0 12 00 00 
+
+==== BB 319 (0x3A967BDA) in 5855B, out 32924B, BBs exec'd 0 ====
+	0x3A967BDA:  movl %esi,(%esp,,)
+	0x3A967BDD:  leal 0xFFFFFF3C(%ebp), %edi
+	0x3A967BE3:  call 0x3A9752D0
+. 319 3A967BDA 14
+. 89 34 24 8D BD 3C FF FF FF E8 E8 D6 00 00 
+
+==== BB 320 (0x3A9752E7) in 5869B, out 32995B, BBs exec'd 0 ====
+	0x3A9752E7:  incl %eax
+	0x3A9752E8:  cmpb %dh{si},(%eax)
+	0x3A9752EA:  jz-32 0x3A975386
+. 320 3A9752E7 9
+. 40 38 30 0F 84 96 00 00 00 
+
+==== BB 321 (0x3A9752F0) in 5878B, out 33063B, BBs exec'd 0 ====
+	0x3A9752F0:  incl %eax
+	0x3A9752F1:  xorl $0x2, %edx
+	0x3A9752F4:  jz-8 0x3A975301
+. 321 3A9752F0 6
+. 40 83 F2 02 74 0B 
+
+==== BB 322 (0x3A967BE8) in 5884B, out 33134B, BBs exec'd 0 ====
+	0x3A967BE8:  leal 1(%eax), %edx
+	0x3A967BEB:  addl $0x13, %eax
+	0x3A967BEE:  andl $0xFFFFFFFC, %eax
+	0x3A967BF1:  subl %eax,%esp
+	0x3A967BF3:  leal 47(%esp,,), %eax
+	0x3A967BF7:  andl $0xFFFFFFF0, %eax
+	0x3A967BFA:  movl %edx,8(%esp,,)
+	0x3A967BFE:  movl %esi,4(%esp,,)
+	0x3A967C02:  movl %eax,(%esp,,)
+	0x3A967C05:  call 0x3A975870
+. 322 3A967BE8 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 2F 83 E0 F0 89 54 24 08 89 74 24 04 89 04 24 E8 66 DC 00 00 
+
+==== BB 323 (0x3A967C0A) in 5918B, out 33277B, BBs exec'd 0 ====
+	0x3A967C0A:  movl %eax,0xFFFFFF3C(%ebp)
+	0x3A967C10:  leal 0xFFFFEDD7(%ebx), %edx
+	0x3A967C16:  movl %edx,0xFFFFFE60(%ebp)
+	0x3A967C1C:  movl 0xFFFFFE60(%ebp),%ecx
+	0x3A967C22:  movl %edi,(%esp,,)
+	0x3A967C25:  movl %ecx,4(%esp,,)
+	0x3A967C29:  call 0x3A973530
+. 323 3A967C0A 36
+. 89 85 3C FF FF FF 8D 93 D7 ED FF FF 89 95 60 FE FF FF 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00 
+
+==== BB 324 (0x3A967C2E) in 5954B, out 33406B, BBs exec'd 0 ====
+	0x3A967C2E:  testl %eax,%eax
+	0x3A967C30:  movl %eax,%esi
+	0x3A967C32:  jz-32 0x3A96692A
+. 324 3A967C2E 10
+. 85 C0 89 C6 0F 84 F2 EC FF FF 
+
+==== BB 325 (0x3A967C38) in 5964B, out 33472B, BBs exec'd 0 ====
+	0x3A967C38:  cmpb $0x0, (%esi)
+	0x3A967C3B:  jz-8 0x3A967C1C
+. 325 3A967C38 5
+. 80 3E 00 74 DF 
+
+==== BB 326 (0x3A967C3D) in 5969B, out 33532B, BBs exec'd 0 ====
+	0x3A967C3D:  movl 0x130(%ebx),%eax
+	0x3A967C43:  testl %eax,%eax
+	0x3A967C45:  jnz-32 0x3A968175
+. 326 3A967C3D 14
+. 8B 83 30 01 00 00 85 C0 0F 85 2A 05 00 00 
+
+==== BB 327 (0x3A967C4B) in 5983B, out 33607B, BBs exec'd 0 ====
+	0x3A967C4B:  movl 0xFFFFF994(%ebx),%eax
+	0x3A967C51:  xorl %ecx, %ecx
+	0x3A967C53:  xorl %edx, %edx
+	0x3A967C55:  movl %ecx,8(%esp,,)
+	0x3A967C59:  movl $0x1,%ecx
+	0x3A967C5E:  movl %edx,4(%esp,,)
+	0x3A967C62:  movl %esi,%edx
+	0x3A967C64:  movl $0x1, (%esp,,)
+	0x3A967C6B:  call 0x3A96AE40
+. 327 3A967C4B 37
+. 8B 83 94 F9 FF FF 31 C9 31 D2 89 4C 24 08 B9 01 00 00 00 89 54 24 04 89 F2 C7 04 24 01 00 00 00 E8 D0 31 00 00 
+
+==== BB 328 _dl_map_object(0x3A96AE40) in 6020B, out 33751B, BBs exec'd 0 ====
+	0x3A96AE40:  pushl %ebp
+	0x3A96AE41:  movl %esp,%ebp
+	0x3A96AE43:  pushl %edi
+	0x3A96AE44:  pushl %esi
+	0x3A96AE45:  pushl %ebx
+	0x3A96AE46:  subl $0x258, %esp
+	0x3A96AE4C:  call 0x3A97592B
+. 328 3A96AE40 17
+. 55 89 E5 57 56 53 81 EC 58 02 00 00 E8 DA AA 00 00 
+
+==== BB 329 (0x3A96AE51) in 6037B, out 33887B, BBs exec'd 0 ====
+	0x3A96AE51:  addl $0xD81B, %ebx
+	0x3A96AE57:  movl %eax,0xFFFFFDD8(%ebp)
+	0x3A96AE5D:  movl 0xFFFFF994(%ebx),%esi
+	0x3A96AE63:  movl %edx,0xFFFFFDD4(%ebp)
+	0x3A96AE69:  movl %ecx,0xFFFFFDD0(%ebp)
+	0x3A96AE6F:  testl %esi,%esi
+	0x3A96AE71:  jz-8 0x3A96AEB2
+. 329 3A96AE51 34
+. 81 C3 1B D8 00 00 89 85 D8 FD FF FF 8B B3 94 F9 FF FF 89 95 D4 FD FF FF 89 8D D0 FD FF FF 85 F6 74 3F 
+
+==== BB 330 (0x3A96AE73) in 6071B, out 34026B, BBs exec'd 0 ====
+	0x3A96AE73:  leal 0x0(%esi), %esi
+	0x3A96AE79:  leal 0(%edi,,), %edi
+	0x3A96AE80:  testb $0x2, 0x175(%esi)
+	0x3A96AE87:  jnz-8 0x3A96AEAB
+. 330 3A96AE73 22
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 F6 86 75 01 00 00 02 75 22 
+
+==== BB 331 (0x3A96AE89) in 6093B, out 34123B, BBs exec'd 0 ====
+	0x3A96AE89:  movl 0xFFFFFDD4(%ebp),%eax
+	0x3A96AE8F:  movl %esi,%edx
+	0x3A96AE91:  call 0x3A970B80
+. 331 3A96AE89 13
+. 8B 85 D4 FD FF FF 89 F2 E8 EA 5C 00 00 
+
+==== BB 332 _dl_name_match_p(0x3A970B80) in 6106B, out 34197B, BBs exec'd 0 ====
+	0x3A970B80:  pushl %ebp
+	0x3A970B81:  movl %esp,%ebp
+	0x3A970B83:  pushl %edi
+	0x3A970B84:  movl %eax,%edi
+	0x3A970B86:  pushl %esi
+	0x3A970B87:  movl %edx,%esi
+	0x3A970B89:  pushl %ebx
+	0x3A970B8A:  subl $0x8, %esp
+	0x3A970B8D:  movl 4(%edx),%eax
+	0x3A970B90:  call 0x3A97592B
+. 332 3A970B80 21
+. 55 89 E5 57 89 C7 56 89 D6 53 83 EC 08 8B 42 04 E8 96 4D 00 00 
+
+==== BB 333 (0x3A970B95) in 6127B, out 34366B, BBs exec'd 0 ====
+	0x3A970B95:  addl $0x7AD7, %ebx
+	0x3A970B9B:  movl %edi,(%esp,,)
+	0x3A970B9E:  movl %eax,4(%esp,,)
+	0x3A970BA2:  call 0x3A975280
+. 333 3A970B95 18
+. 81 C3 D7 7A 00 00 89 3C 24 89 44 24 04 E8 D9 46 00 00 
+
+==== BB 334 strcmp(0x3A975280) in 6145B, out 34454B, BBs exec'd 0 ====
+	0x3A975280:  pushl %ebp
+	0x3A975281:  movl %esp,%ebp
+	0x3A975283:  pushl %esi
+	0x3A975284:  subl $0x4, %esp
+	0x3A975287:  movl 8(%ebp),%ecx
+	0x3A97528A:  movl 12(%ebp),%edx
+	0x3A97528D:  leal 0(%esi), %esi
+	0x3A975290:  movzbl (%ecx),%esi
+	0x3A975293:  incl %ecx
+	0x3A975294:  movzbl (%edx),%eax
+	0x3A975297:  incl %edx
+	0x3A975298:  movb %al,-5(%ebp)
+	0x3A97529B:  movl %esi,%eax
+	0x3A97529D:  testb %al,%al
+	0x3A97529F:  jz-8 0x3A9752B7
+. 334 3A975280 33
+. 55 89 E5 56 83 EC 04 8B 4D 08 8B 55 0C 8D 76 00 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16 
+
+==== BB 335 (0x3A9752A1) in 6178B, out 34665B, BBs exec'd 0 ====
+	0x3A9752A1:  movl %esi,%eax
+	0x3A9752A3:  cmpb -5(%ebp),%al
+	0x3A9752A6:  jz-8 0x3A975290
+. 335 3A9752A1 7
+. 89 F0 3A 45 FB 74 E8 
+
+==== BB 336 (0x3A9752A8) in 6185B, out 34741B, BBs exec'd 0 ====
+	0x3A9752A8:  movzbl %al,%edx
+	0x3A9752AB:  movzbl -5(%ebp),%eax
+	0x3A9752AF:  subl %eax,%edx
+	0x3A9752B1:  movl %edx,%eax
+	0x3A9752B3:  popl %edx
+	0x3A9752B4:  popl %esi
+	0x3A9752B5:  popl %ebp
+	0x3A9752B6:  ret
+. 336 3A9752A8 15
+. 0F B6 D0 0F B6 45 FB 29 C2 89 D0 5A 5E 5D C3 
+
+==== BB 337 (0x3A970BA7) in 6200B, out 34855B, BBs exec'd 0 ====
+	0x3A970BA7:  testl %eax,%eax
+	0x3A970BA9:  movl $0x1,%edx
+	0x3A970BAE:  jz-8 0x3A970BE0
+. 337 3A970BA7 9
+. 85 C0 BA 01 00 00 00 74 30 
+
+==== BB 338 (0x3A970BB0) in 6209B, out 34926B, BBs exec'd 0 ====
+	0x3A970BB0:  movl 20(%esi),%esi
+	0x3A970BB3:  testl %esi,%esi
+	0x3A970BB5:  jz-8 0x3A970BDE
+. 338 3A970BB0 7
+. 8B 76 14 85 F6 74 27 
+
+==== BB 339 (0x3A970BB7) in 6216B, out 34998B, BBs exec'd 0 ====
+	0x3A970BB7:  movl %esi,%esi
+	0x3A970BB9:  leal 0(%edi,,), %edi
+	0x3A970BC0:  movl (%esi),%eax
+	0x3A970BC2:  movl %edi,(%esp,,)
+	0x3A970BC5:  movl %eax,4(%esp,,)
+	0x3A970BC9:  call 0x3A975280
+. 339 3A970BB7 23
+. 89 F6 8D BC 27 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00 
+
+==== BB 340 (0x3A970BCE) in 6239B, out 35093B, BBs exec'd 0 ====
+	0x3A970BCE:  testl %eax,%eax
+	0x3A970BD0:  movl $0x1,%edx
+	0x3A970BD5:  jz-8 0x3A970BE0
+. 340 3A970BCE 9
+. 85 C0 BA 01 00 00 00 74 09 
+
+==== BB 341 (0x3A970BD7) in 6248B, out 35164B, BBs exec'd 0 ====
+	0x3A970BD7:  movl 4(%esi),%esi
+	0x3A970BDA:  testl %esi,%esi
+	0x3A970BDC:  jnz-8 0x3A970BC0
+. 341 3A970BD7 7
+. 8B 76 04 85 F6 75 E2 
+
+==== BB 342 (0x3A970BDE) in 6255B, out 35236B, BBs exec'd 0 ====
+	0x3A970BDE:  xorl %edx, %edx
+	0x3A970BE0:  addl $0x8, %esp
+	0x3A970BE3:  movl %edx,%eax
+	0x3A970BE5:  popl %ebx
+	0x3A970BE6:  popl %esi
+	0x3A970BE7:  popl %edi
+	0x3A970BE8:  popl %ebp
+	0x3A970BE9:  ret
+. 342 3A970BDE 12
+. 31 D2 83 C4 08 89 D0 5B 5E 5F 5D C3 
+
+==== BB 343 (0x3A96AE96) in 6267B, out 35363B, BBs exec'd 0 ====
+	0x3A96AE96:  testl %eax,%eax
+	0x3A96AE98:  jnz-32 0x3A96AFA7
+. 343 3A96AE96 8
+. 85 C0 0F 85 09 01 00 00 
+
+==== BB 344 (0x3A96AE9E) in 6275B, out 35422B, BBs exec'd 0 ====
+	0x3A96AE9E:  testb $0x1, 0x175(%esi)
+	0x3A96AEA5:  jz-32 0x3A96AFB3
+. 344 3A96AE9E 13
+. F6 86 75 01 00 00 01 0F 84 08 01 00 00 
+
+==== BB 345 (0x3A96AFB3) in 6288B, out 35492B, BBs exec'd 0 ====
+	0x3A96AFB3:  movl 80(%esi),%edx
+	0x3A96AFB6:  testl %edx,%edx
+	0x3A96AFB8:  jz-32 0x3A96AEAB
+. 345 3A96AFB3 11
+. 8B 56 50 85 D2 0F 84 ED FE FF FF 
+
+==== BB 346 (0x3A96AEAB) in 6299B, out 35564B, BBs exec'd 0 ====
+	0x3A96AEAB:  movl 12(%esi),%esi
+	0x3A96AEAE:  testl %esi,%esi
+	0x3A96AEB0:  jnz-8 0x3A96AE80
+. 346 3A96AEAB 7
+. 8B 76 0C 85 F6 75 CE 
+
+==== BB 347 (0x3A96AE80) in 6306B, out 35636B, BBs exec'd 0 ====
+	0x3A96AE80:  testb $0x2, 0x175(%esi)
+	0x3A96AE87:  jnz-8 0x3A96AEAB
+. 347 3A96AE80 9
+. F6 86 75 01 00 00 02 75 22 
+
+==== BB 348 (0x3A975290) in 6315B, out 35706B, BBs exec'd 0 ====
+	0x3A975290:  movzbl (%ecx),%esi
+	0x3A975293:  incl %ecx
+	0x3A975294:  movzbl (%edx),%eax
+	0x3A975297:  incl %edx
+	0x3A975298:  movb %al,-5(%ebp)
+	0x3A97529B:  movl %esi,%eax
+	0x3A97529D:  testb %al,%al
+	0x3A97529F:  jz-8 0x3A9752B7
+. 348 3A975290 17
+. 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16 
+
+==== BB 349 (0x3A96AFBE) in 6332B, out 35838B, BBs exec'd 0 ====
+	0x3A96AFBE:  movl 44(%esi),%eax
+	0x3A96AFC1:  movl 4(%edx),%edi
+	0x3A96AFC4:  movl 4(%eax),%edx
+	0x3A96AFC7:  addl %edx,%edi
+	0x3A96AFC9:  movl %edi,4(%esp,,)
+	0x3A96AFCD:  movl 0xFFFFFDD4(%ebp),%eax
+	0x3A96AFD3:  movl %eax,(%esp,,)
+	0x3A96AFD6:  call 0x3A975280
+. 349 3A96AFBE 29
+. 8B 46 2C 8B 7A 04 8B 50 04 01 D7 89 7C 24 04 8B 85 D4 FD FF FF 89 04 24 E8 A5 A2 00 00 
+
+==== BB 350 (0x3A96AFDB) in 6361B, out 35971B, BBs exec'd 0 ====
+	0x3A96AFDB:  testl %eax,%eax
+	0x3A96AFDD:  jnz-32 0x3A96AEAB
+. 350 3A96AFDB 8
+. 85 C0 0F 85 C8 FE FF FF 
+
+==== BB 351 (0x3A96AEB2) in 6369B, out 36030B, BBs exec'd 0 ====
+	0x3A96AEB2:  testb $0x40, 0xFFFFFC14(%ebx)
+	0x3A96AEB9:  jnz-32 0x3A96B2BE
+. 351 3A96AEB2 13
+. F6 83 14 FC FF FF 40 0F 85 FF 03 00 00 
+
+==== BB 352 (0x3A96AEBF) in 6382B, out 36100B, BBs exec'd 0 ====
+	0x3A96AEBF:  movl $0x2F,%eax
+	0x3A96AEC4:  movl %eax,4(%esp,,)
+	0x3A96AEC8:  movl 0xFFFFFDD4(%ebp),%eax
+	0x3A96AECE:  movl %eax,(%esp,,)
+	0x3A96AED1:  call 0x3A9750B0
+. 352 3A96AEBF 23
+. B8 2F 00 00 00 89 44 24 04 8B 85 D4 FD FF FF 89 04 24 E8 DA A1 00 00 
+
+==== BB 353 strchr(0x3A9750B0) in 6405B, out 36190B, BBs exec'd 0 ====
+	0x3A9750B0:  pushl %edi
+	0x3A9750B1:  pushl %esi
+	0x3A9750B2:  pushl %ebx
+	0x3A9750B3:  pushl %ebp
+	0x3A9750B4:  movl 20(%esp,,),%eax
+	0x3A9750B8:  movl 24(%esp,,),%edx
+	0x3A9750BC:  movl %eax,%edi
+	0x3A9750BE:  xorl %ecx, %ecx
+	0x3A9750C0:  movb %dl,%dh{si}
+	0x3A9750C2:  movb %dl,%cl
+	0x3A9750C4:  shll $0x10, %edx
+	0x3A9750C7:  movb %cl,%ch{bp}
+	0x3A9750C9:  orl %ecx,%edx
+	0x3A9750CB:  andl $0x3, %edi
+	0x3A9750CE:  jz-8 0x3A975111
+. 353 3A9750B0 32
+. 57 56 53 55 8B 44 24 14 8B 54 24 18 89 C7 31 C9 88 D6 88 D1 C1 E2 10 88 CD 09 CA 83 E7 03 74 41 
+
+==== BB 354 (0x3A975111) in 6437B, out 36417B, BBs exec'd 0 ====
+	0x3A975111:  movl (%eax),%ecx
+	0x3A975113:  movl $0xFEFEFEFF,%ebp
+	0x3A975118:  movl $0xFEFEFEFF,%edi
+	0x3A97511D:  addl %ecx,%ebp
+	0x3A97511F:  xorl %ecx,%ebp
+	0x3A975121:  addl %ecx,%edi
+	0x3A975123:  leal 4(%eax), %eax
+	0x3A975126:  jnb-32 0x3A975242
+. 354 3A975111 27
+. 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00 
+
+==== BB 355 (0x3A97512C) in 6464B, out 36529B, BBs exec'd 0 ====
+	0x3A97512C:  movl %ecx,%ebx
+	0x3A97512E:  orl $0xFEFEFEFF, %ebp
+	0x3A975134:  addl $0x1, %ebp
+	0x3A975137:  jnz-32 0x3A975242
+. 355 3A97512C 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 05 01 00 00 
+
+==== BB 356 (0x3A97513D) in 6481B, out 36610B, BBs exec'd 0 ====
+	0x3A97513D:  movl $0xFEFEFEFF,%esi
+	0x3A975142:  xorl %edx,%ebx
+	0x3A975144:  movl (%eax),%ecx
+	0x3A975146:  addl %ebx,%esi
+	0x3A975148:  movl $0xFEFEFEFF,%edi
+	0x3A97514D:  jnb-32 0x3A975227
+. 356 3A97513D 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 D4 00 00 00 
+
+==== BB 357 (0x3A975153) in 6503B, out 36713B, BBs exec'd 0 ====
+	0x3A975153:  movl %edi,%ebp
+	0x3A975155:  xorl %ebx,%esi
+	0x3A975157:  addl %ecx,%ebp
+	0x3A975159:  orl $0xFEFEFEFF, %esi
+	0x3A97515F:  addl $0x1, %esi
+	0x3A975162:  jnz-32 0x3A975227
+. 357 3A975153 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 85 BF 00 00 00 
+
+==== BB 358 (0x3A975227) in 6524B, out 36808B, BBs exec'd 0 ====
+	0x3A975227:  subl $0x4, %eax
+	0x3A97522A:  testb %bl,%bl
+	0x3A97522C:  jz-8 0x3A97523D
+. 358 3A975227 7
+. 83 E8 04 84 DB 74 0F 
+
+==== BB 359 (0x3A97523D) in 6531B, out 36882B, BBs exec'd 0 ====
+	0x3A97523D:  popl %ebp
+	0x3A97523E:  popl %ebx
+	0x3A97523F:  popl %esi
+	0x3A975240:  popl %edi
+	0x3A975241:  ret
+. 359 3A97523D 5
+. 5D 5B 5E 5F C3 
+
+==== BB 360 (0x3A96AED6) in 6536B, out 36976B, BBs exec'd 0 ====
+	0x3A96AED6:  testl %eax,%eax
+	0x3A96AED8:  jz-32 0x3A96B088
+. 360 3A96AED6 8
+. 85 C0 0F 84 AA 01 00 00 
+
+==== BB 361 (0x3A96AEDE) in 6544B, out 37035B, BBs exec'd 0 ====
+	0x3A96AEDE:  movl 0xFFFFFDD8(%ebp),%edi
+	0x3A96AEE4:  testl %edi,%edi
+	0x3A96AEE6:  jz-32 0x3A96B165
+. 361 3A96AEDE 14
+. 8B BD D8 FD FF FF 85 FF 0F 84 79 02 00 00 
+
+==== BB 362 (0x3A96AEEC) in 6558B, out 37110B, BBs exec'd 0 ====
+	0x3A96AEEC:  movl 0xFFFFFDD4(%ebp),%edx
+	0x3A96AEF2:  movl 0xFFFFFDD8(%ebp),%eax
+	0x3A96AEF8:  call 0x3A96A5D0
+. 362 3A96AEEC 17
+. 8B 95 D4 FD FF FF 8B 85 D8 FD FF FF E8 D3 F6 FF FF 
+
+==== BB 363 expand_dynamic_string_token(0x3A96A5D0) in 6575B, out 37190B, BBs exec'd 0 ====
+	0x3A96A5D0:  pushl %ebp
+	0x3A96A5D1:  movl $0x24,%ecx
+	0x3A96A5D6:  movl %esp,%ebp
+	0x3A96A5D8:  subl $0x24, %esp
+	0x3A96A5DB:  movl %ebx,-12(%ebp)
+	0x3A96A5DE:  movl %esi,-8(%ebp)
+	0x3A96A5E1:  xorl %esi, %esi
+	0x3A96A5E3:  movl %edi,-4(%ebp)
+	0x3A96A5E6:  movl %edx,%edi
+	0x3A96A5E8:  movl %eax,-16(%ebp)
+	0x3A96A5EB:  call 0x3A97592B
+. 363 3A96A5D0 32
+. 55 B9 24 00 00 00 89 E5 83 EC 24 89 5D F4 89 75 F8 31 F6 89 7D FC 89 D7 89 45 F0 E8 3B B3 00 00 
+
+==== BB 364 (0x3A96A5F0) in 6607B, out 37363B, BBs exec'd 0 ====
+	0x3A96A5F0:  addl $0xE07C, %ebx
+	0x3A96A5F6:  movl %ecx,4(%esp,,)
+	0x3A96A5FA:  movl %edx,(%esp,,)
+	0x3A96A5FD:  call 0x3A9750B0
+. 364 3A96A5F0 18
+. 81 C3 7C E0 00 00 89 4C 24 04 89 14 24 E8 AE AA 00 00 
+
+==== BB 365 (0x3A975168) in 6625B, out 37451B, BBs exec'd 0 ====
+	0x3A975168:  xorl %ecx,%ebp
+	0x3A97516A:  addl %ecx,%edi
+	0x3A97516C:  leal 4(%eax), %eax
+	0x3A97516F:  jnb-32 0x3A975242
+. 365 3A975168 13
+. 31 CD 01 CF 8D 40 04 0F 83 CD 00 00 00 
+
+==== BB 366 (0x3A975175) in 6638B, out 37537B, BBs exec'd 0 ====
+	0x3A975175:  movl %ecx,%ebx
+	0x3A975177:  orl $0xFEFEFEFF, %ebp
+	0x3A97517D:  addl $0x1, %ebp
+	0x3A975180:  jnz-32 0x3A975242
+. 366 3A975175 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 BC 00 00 00 
+
+==== BB 367 (0x3A975186) in 6655B, out 37618B, BBs exec'd 0 ====
+	0x3A975186:  movl $0xFEFEFEFF,%esi
+	0x3A97518B:  xorl %edx,%ebx
+	0x3A97518D:  movl (%eax),%ecx
+	0x3A97518F:  addl %ebx,%esi
+	0x3A975191:  movl $0xFEFEFEFF,%edi
+	0x3A975196:  jnb-32 0x3A975227
+. 367 3A975186 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 8B 00 00 00 
+
+==== BB 368 (0x3A97519C) in 6677B, out 37721B, BBs exec'd 0 ====
+	0x3A97519C:  movl %edi,%ebp
+	0x3A97519E:  xorl %ebx,%esi
+	0x3A9751A0:  addl %ecx,%ebp
+	0x3A9751A2:  orl $0xFEFEFEFF, %esi
+	0x3A9751A8:  addl $0x1, %esi
+	0x3A9751AB:  jnz-8 0x3A975227
+. 368 3A97519C 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 7A 
+
+==== BB 369 (0x3A9751AD) in 6694B, out 37816B, BBs exec'd 0 ====
+	0x3A9751AD:  xorl %ecx,%ebp
+	0x3A9751AF:  addl %ecx,%edi
+	0x3A9751B1:  leal 4(%eax), %eax
+	0x3A9751B4:  jnb-32 0x3A975242
+. 369 3A9751AD 13
+. 31 CD 01 CF 8D 40 04 0F 83 88 00 00 00 
+
+==== BB 370 (0x3A9751BA) in 6707B, out 37902B, BBs exec'd 0 ====
+	0x3A9751BA:  movl %ecx,%ebx
+	0x3A9751BC:  orl $0xFEFEFEFF, %ebp
+	0x3A9751C2:  addl $0x1, %ebp
+	0x3A9751C5:  jnz-8 0x3A975242
+. 370 3A9751BA 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 7B 
+
+==== BB 371 (0x3A9751C7) in 6720B, out 37983B, BBs exec'd 0 ====
+	0x3A9751C7:  movl $0xFEFEFEFF,%esi
+	0x3A9751CC:  xorl %edx,%ebx
+	0x3A9751CE:  movl (%eax),%ecx
+	0x3A9751D0:  addl %ebx,%esi
+	0x3A9751D2:  movl $0xFEFEFEFF,%edi
+	0x3A9751D7:  jnb-8 0x3A975227
+. 371 3A9751C7 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 4E 
+
+==== BB 372 (0x3A9751D9) in 6738B, out 38086B, BBs exec'd 0 ====
+	0x3A9751D9:  movl %edi,%ebp
+	0x3A9751DB:  xorl %ebx,%esi
+	0x3A9751DD:  addl %ecx,%ebp
+	0x3A9751DF:  orl $0xFEFEFEFF, %esi
+	0x3A9751E5:  addl $0x1, %esi
+	0x3A9751E8:  jnz-8 0x3A975227
+. 372 3A9751D9 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 3D 
+
+==== BB 373 (0x3A9751EA) in 6755B, out 38181B, BBs exec'd 0 ====
+	0x3A9751EA:  xorl %ecx,%ebp
+	0x3A9751EC:  addl %ecx,%edi
+	0x3A9751EE:  leal 4(%eax), %eax
+	0x3A9751F1:  jnb-8 0x3A975242
+. 373 3A9751EA 9
+. 31 CD 01 CF 8D 40 04 73 4F 
+
+==== BB 374 (0x3A9751F3) in 6764B, out 38267B, BBs exec'd 0 ====
+	0x3A9751F3:  movl %ecx,%ebx
+	0x3A9751F5:  orl $0xFEFEFEFF, %ebp
+	0x3A9751FB:  addl $0x1, %ebp
+	0x3A9751FE:  jnz-8 0x3A975242
+. 374 3A9751F3 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 42 
+
+==== BB 375 (0x3A975200) in 6777B, out 38348B, BBs exec'd 0 ====
+	0x3A975200:  movl $0xFEFEFEFF,%esi
+	0x3A975205:  xorl %edx,%ebx
+	0x3A975207:  movl (%eax),%ecx
+	0x3A975209:  addl %ebx,%esi
+	0x3A97520B:  movl $0xFEFEFEFF,%edi
+	0x3A975210:  jnb-8 0x3A975227
+. 375 3A975200 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 15 
+
+==== BB 376 (0x3A975212) in 6795B, out 38451B, BBs exec'd 0 ====
+	0x3A975212:  movl %edi,%ebp
+	0x3A975214:  xorl %ebx,%esi
+	0x3A975216:  addl %ecx,%ebp
+	0x3A975218:  orl $0xFEFEFEFF, %esi
+	0x3A97521E:  addl $0x1, %esi
+	0x3A975221:  jz-32 0x3A97511F
+. 376 3A975212 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 84 F8 FE FF FF 
+
+==== BB 377 (0x3A97511F) in 6816B, out 38546B, BBs exec'd 0 ====
+	0x3A97511F:  xorl %ecx,%ebp
+	0x3A975121:  addl %ecx,%edi
+	0x3A975123:  leal 4(%eax), %eax
+	0x3A975126:  jnb-32 0x3A975242
+. 377 3A97511F 13
+. 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00 
+
+==== BB 378 (0x3A975242) in 6829B, out 38632B, BBs exec'd 0 ====
+	0x3A975242:  subl $0x4, %eax
+	0x3A975245:  cmpb %dl,%cl
+	0x3A975247:  jz-8 0x3A97523D
+. 378 3A975242 7
+. 83 E8 04 38 D1 74 F4 
+
+==== BB 379 (0x3A975249) in 6836B, out 38703B, BBs exec'd 0 ====
+	0x3A975249:  cmpb $0x0, %cl
+	0x3A97524C:  jz-8 0x3A97526A
+. 379 3A975249 5
+. 80 F9 00 74 1C 
+
+==== BB 380 (0x3A97526A) in 6841B, out 38761B, BBs exec'd 0 ====
+	0x3A97526A:  xorl %eax, %eax
+	0x3A97526C:  popl %ebp
+	0x3A97526D:  popl %ebx
+	0x3A97526E:  popl %esi
+	0x3A97526F:  popl %edi
+	0x3A975270:  ret
+. 380 3A97526A 7
+. 31 C0 5D 5B 5E 5F C3 
+
+==== BB 381 (0x3A96A602) in 6848B, out 38871B, BBs exec'd 0 ====
+	0x3A96A602:  testl %eax,%eax
+	0x3A96A604:  jnz-8 0x3A96A647
+. 381 3A96A602 4
+. 85 C0 75 41 
+
+==== BB 382 (0x3A96A606) in 6852B, out 38930B, BBs exec'd 0 ====
+	0x3A96A606:  testl %esi,%esi
+	0x3A96A608:  jnz-8 0x3A96A65C
+. 382 3A96A606 4
+. 85 F6 75 52 
+
+==== BB 383 (0x3A96A60A) in 6856B, out 38989B, BBs exec'd 0 ====
+	0x3A96A60A:  movl %edi,(%esp,,)
+	0x3A96A60D:  leal 0(%esi), %esi
+	0x3A96A610:  call 0x3A9752D0
+. 383 3A96A60A 11
+. 89 3C 24 8D 76 00 E8 BB AC 00 00 
+
+==== BB 384 (0x3A96A615) in 6867B, out 39057B, BBs exec'd 0 ====
+	0x3A96A615:  leal 1(%eax), %esi
+	0x3A96A618:  movl %esi,(%esp,,)
+	0x3A96A61B:  call 0x3A96581C
+. 384 3A96A615 11
+. 8D 70 01 89 34 24 E8 FC B1 FF FF 
+
+==== BB 385 (0x3A96A620) in 6878B, out 39122B, BBs exec'd 0 ====
+	0x3A96A620:  xorl %edx, %edx
+	0x3A96A622:  testl %eax,%eax
+	0x3A96A624:  jz-8 0x3A96A638
+. 385 3A96A620 6
+. 31 D2 85 C0 74 12 
+
+==== BB 386 (0x3A96A626) in 6884B, out 39195B, BBs exec'd 0 ====
+	0x3A96A626:  movl %esi,8(%esp,,)
+	0x3A96A62A:  movl %edi,4(%esp,,)
+	0x3A96A62E:  movl %eax,(%esp,,)
+	0x3A96A631:  call 0x3A975870
+. 386 3A96A626 16
+. 89 74 24 08 89 7C 24 04 89 04 24 E8 3A B2 00 00 
+
+==== BB 387 (0x3A96A636) in 6900B, out 39276B, BBs exec'd 0 ====
+	0x3A96A636:  movl %eax,%edx
+	0x3A96A638:  movl %edx,%eax
+	0x3A96A63A:  movl -12(%ebp),%ebx
+	0x3A96A63D:  movl -8(%ebp),%esi
+	0x3A96A640:  movl -4(%ebp),%edi
+	0x3A96A643:  movl %ebp,%esp
+	0x3A96A645:  popl %ebp
+	0x3A96A646:  ret
+. 387 3A96A636 17
+. 89 C2 89 D0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 388 (0x3A96AEFD) in 6917B, out 39383B, BBs exec'd 0 ====
+	0x3A96AEFD:  movl %eax,0xFFFFFDE0(%ebp)
+	0x3A96AF03:  testl %eax,%eax
+	0x3A96AF05:  jnz-32 0x3A96B010
+. 388 3A96AEFD 14
+. 89 85 E0 FD FF FF 85 C0 0F 85 05 01 00 00 
+
+==== BB 389 (0x3A96B010) in 6931B, out 39461B, BBs exec'd 0 ====
+	0x3A96B010:  leal 0xFFFFFDE4(%ebp), %ecx
+	0x3A96B016:  movl %ecx,%edx
+	0x3A96B018:  movl %ecx,0xFFFFFDB8(%ebp)
+	0x3A96B01E:  call 0x3A968F20
+. 389 3A96B010 19
+. 8D 8D E4 FD FF FF 89 CA 89 8D B8 FD FF FF E8 FD DE FF FF 
+
+==== BB 390 open_verify(0x3A968F20) in 6950B, out 39545B, BBs exec'd 0 ====
+	0x3A968F20:  pushl %ebp
+	0x3A968F21:  movl %esp,%ebp
+	0x3A968F23:  pushl %edi
+	0x3A968F24:  pushl %esi
+	0x3A968F25:  xorl %esi, %esi
+	0x3A968F27:  pushl %ebx
+	0x3A968F28:  subl $0x4C, %esp
+	0x3A968F2B:  call 0x3A97592B
+. 390 3A968F20 16
+. 55 89 E5 57 56 31 F6 53 83 EC 4C E8 FB C9 00 00 
+
+==== BB 391 (0x3A968F30) in 6966B, out 39692B, BBs exec'd 0 ====
+	0x3A968F30:  addl $0xF73C, %ebx
+	0x3A968F36:  movl %eax,-48(%ebp)
+	0x3A968F39:  movl %edx,-52(%ebp)
+	0x3A968F3C:  movl $0x0, -60(%ebp)
+	0x3A968F43:  movl %esi,4(%esp,,)
+	0x3A968F47:  movl %eax,(%esp,,)
+	0x3A968F4A:  call 0x3A974430
+. 391 3A968F30 31
+. 81 C3 3C F7 00 00 89 45 D0 89 55 CC C7 45 C4 00 00 00 00 89 74 24 04 89 04 24 E8 E1 B4 00 00 
+
+==== BB 392 open(0x3A974430) in 6997B, out 39821B, BBs exec'd 0 ====
+	0x3A974430:  pushl %ebx
+	0x3A974431:  movl 16(%esp,,),%edx
+	0x3A974435:  movl 12(%esp,,),%ecx
+	0x3A974439:  movl 8(%esp,,),%ebx
+	0x3A97443D:  movl $0x5,%eax
+	0x3A974442:  int $0x80
+. 392 3A974430 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 05 00 00 00 CD 80 
+
+==== BB 393 (0x3A974444) in 7017B, out 39917B, BBs exec'd 0 ====
+	0x3A974444:  popl %ebx
+	0x3A974445:  cmpl $0xFFFFF001, %eax
+	0x3A97444A:  jnb-8 0x3A97444D
+. 393 3A974444 8
+. 5B 3D 01 F0 FF FF 73 01 
+
+==== BB 394 (0x3A97444C) in 7025B, out 39996B, BBs exec'd 0 ====
+	0x3A97444C:  ret
+. 394 3A97444C 1
+. C3 
+
+==== BB 395 (0x3A968F4F) in 7026B, out 40026B, BBs exec'd 0 ====
+	0x3A968F4F:  movl %eax,-56(%ebp)
+	0x3A968F52:  cmpl $0xFFFFFFFF, %eax
+	0x3A968F55:  jz-32 0x3A969150
+. 395 3A968F4F 12
+. 89 45 C8 83 F8 FF 0F 84 F5 01 00 00 
+
+==== BB 396 (0x3A968F5B) in 7038B, out 40096B, BBs exec'd 0 ====
+	0x3A968F5B:  xorl %ecx, %ecx
+	0x3A968F5D:  movl $0x200,%edx
+	0x3A968F62:  movl %ecx,0x148(%ebx)
+	0x3A968F68:  movl %edx,8(%esp,,)
+	0x3A968F6C:  movl -52(%ebp),%edx
+	0x3A968F6F:  addl $0x4, %edx
+	0x3A968F72:  movl %edx,-72(%ebp)
+	0x3A968F75:  movl %edx,4(%esp,,)
+	0x3A968F79:  movl %eax,(%esp,,)
+	0x3A968F7C:  call 0x3A9744B0
+. 396 3A968F5B 38
+. 31 C9 BA 00 02 00 00 89 8B 48 01 00 00 89 54 24 08 8B 55 CC 83 C2 04 89 55 B8 89 54 24 04 89 04 24 E8 2F B5 00 00 
+
+==== BB 397 read(0x3A9744B0) in 7076B, out 40247B, BBs exec'd 0 ====
+	0x3A9744B0:  pushl %ebx
+	0x3A9744B1:  movl 16(%esp,,),%edx
+	0x3A9744B5:  movl 12(%esp,,),%ecx
+	0x3A9744B9:  movl 8(%esp,,),%ebx
+	0x3A9744BD:  movl $0x3,%eax
+	0x3A9744C2:  int $0x80
+. 397 3A9744B0 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 03 00 00 00 CD 80 
+
+==== BB 398 (0x3A9744C4) in 7096B, out 40343B, BBs exec'd 0 ====
+	0x3A9744C4:  popl %ebx
+	0x3A9744C5:  cmpl $0xFFFFF001, %eax
+	0x3A9744CA:  jnb-8 0x3A9744CD
+. 398 3A9744C4 8
+. 5B 3D 01 F0 FF FF 73 01 
+
+==== BB 399 (0x3A9744CC) in 7104B, out 40422B, BBs exec'd 0 ====
+	0x3A9744CC:  ret
+. 399 3A9744CC 1
+. C3 
+
+==== BB 400 (0x3A968F81) in 7105B, out 40452B, BBs exec'd 0 ====
+	0x3A968F81:  movl -52(%ebp),%esi
+	0x3A968F84:  cmpl $0x33, %eax
+	0x3A968F87:  movl %eax,(%esi)
+	0x3A968F89:  jle-32 0x3A96919F
+. 400 3A968F81 14
+. 8B 75 CC 83 F8 33 89 06 0F 8E 10 02 00 00 
+
+==== BB 401 (0x3A968F8F) in 7119B, out 40535B, BBs exec'd 0 ====
+	0x3A968F8F:  cld
+	0x3A968F90:  movl -72(%ebp),%esi
+	0x3A968F93:  movl $0x9,%ecx
+	0x3A968F98:  leal 0xFFFFD62F(%ebx), %edi
+	0x3A968F9E:  repe cmpsb
+. 401 3A968F8F 17
+. FC 8B 75 B8 B9 09 00 00 00 8D BB 2F D6 FF FF F3 A6 
+
+==== BB 402 (0x3A968F9E) in 7136B, out 40701B, BBs exec'd 0 ====
+	0x3A968F9E:  repe cmpsb
+. 402 3A968F9E 2
+. F3 A6 
+
+==== BB 403 (0x3A968FA0) in 7138B, out 40826B, BBs exec'd 0 ====
+	0x3A968FA0:  jnz-32 0x3A9691BB
+. 403 3A968FA0 6
+. 0F 85 15 02 00 00 
+
+==== BB 404 (0x3A968FA6) in 7144B, out 40873B, BBs exec'd 0 ====
+	0x3A968FA6:  movl -72(%ebp),%esi
+	0x3A968FA9:  leal 0xFFFFE3B4(%ebx), %edx
+	0x3A968FAF:  cmpl $0x1, 20(%esi)
+	0x3A968FB3:  jnz-32 0x3A9690D9
+. 404 3A968FA6 19
+. 8B 75 B8 8D 93 B4 E3 FF FF 83 7E 14 01 0F 85 20 01 00 00 
+
+==== BB 405 (0x3A968FB9) in 7163B, out 40965B, BBs exec'd 0 ====
+	0x3A968FB9:  cmpw $0x3, 18(%esi)
+	0x3A968FBE:  jnz-32 0x3A969132
+. 405 3A968FB9 11
+. 66 83 7E 12 03 0F 85 6E 01 00 00 
+
+==== BB 406 (0x3A968FC4) in 7174B, out 41029B, BBs exec'd 0 ====
+	0x3A968FC4:  cmpw $0x20, 42(%esi)
+	0x3A968FC9:  leal 0xFFFFE3E0(%ebx), %edx
+	0x3A968FCF:  jnz-32 0x3A9690D9
+. 406 3A968FC4 17
+. 66 83 7E 2A 20 8D 93 E0 E3 FF FF 0F 85 04 01 00 00 
+
+==== BB 407 (0x3A968FD5) in 7191B, out 41109B, BBs exec'd 0 ====
+	0x3A968FD5:  movzwl 16(%esi),%eax
+	0x3A968FD9:  cmpl $0x3, %eax
+	0x3A968FDC:  jnz-32 0x3A969230
+. 407 3A968FD5 13
+. 0F B7 46 10 83 F8 03 0F 85 4E 02 00 00 
+
+==== BB 408 (0x3A968FE2) in 7204B, out 41179B, BBs exec'd 0 ====
+	0x3A968FE2:  movl -72(%ebp),%edi
+	0x3A968FE5:  movl 28(%edi),%edx
+	0x3A968FE8:  movzwl 44(%edi),%ecx
+	0x3A968FEC:  movl -52(%ebp),%edi
+	0x3A968FEF:  leal 4(%edx,%edi,1), %edi
+	0x3A968FF3:  movl %edi,-64(%ebp)
+	0x3A968FF6:  movzwl %cx,%esi
+	0x3A968FF9:  movl -52(%ebp),%edi
+	0x3A968FFC:  shll $0x5, %esi
+	0x3A968FFF:  leal (%esi,%edx,1), %eax
+	0x3A969002:  cmpl (%edi),%eax
+	0x3A969004:  jnbe-32 0x3A969087
+. 408 3A968FE2 40
+. 8B 7D B8 8B 57 1C 0F B7 4F 2C 8B 7D CC 8D 7C 3A 04 89 7D C0 0F B7 F1 8B 7D CC C1 E6 05 8D 04 16 3B 07 0F 87 7D 00 00 00 
+
+==== BB 409 (0x3A96900A) in 7244B, out 41347B, BBs exec'd 0 ====
+	0x3A96900A:  movl -64(%ebp),%esi
+	0x3A96900D:  movzwl %cx,%eax
+	0x3A969010:  shll $0x5, %eax
+	0x3A969013:  movl %esi,-68(%ebp)
+	0x3A969016:  addl %esi,%eax
+	0x3A969018:  cmpl %eax,%esi
+	0x3A96901A:  jb-8 0x3A969039
+. 409 3A96900A 18
+. 8B 75 C0 0F B7 C1 C1 E0 05 89 75 BC 01 F0 39 C6 72 1D 
+
+==== BB 410 (0x3A969039) in 7262B, out 41450B, BBs exec'd 0 ====
+	0x3A969039:  movl -68(%ebp),%edi
+	0x3A96903C:  cmpl $0x4, (%edi)
+	0x3A96903F:  jnz-8 0x3A969021
+. 410 3A969039 8
+. 8B 7D BC 83 3F 04 75 E0 
+
+==== BB 411 (0x3A969021) in 7270B, out 41523B, BBs exec'd 0 ====
+	0x3A969021:  addl $0x20, -68(%ebp)
+	0x3A969025:  movzwl %cx,%eax
+	0x3A969028:  movl -64(%ebp),%edx
+	0x3A96902B:  shll $0x5, %eax
+	0x3A96902E:  addl %edx,%eax
+	0x3A969030:  cmpl %eax,-68(%ebp)
+	0x3A969033:  jnb-32 0x3A969150
+. 411 3A969021 24
+. 83 45 BC 20 0F B7 C1 8B 55 C0 C1 E0 05 01 D0 39 45 BC 0F 83 17 01 00 00 
+
+==== BB 412 (0x3A969150) in 7294B, out 41638B, BBs exec'd 0 ====
+	0x3A969150:  movl -56(%ebp),%eax
+	0x3A969153:  leal -12(%ebp), %esp
+	0x3A969156:  popl %ebx
+	0x3A969157:  popl %esi
+	0x3A969158:  popl %edi
+	0x3A969159:  popl %ebp
+	0x3A96915A:  ret
+. 412 3A969150 11
+. 8B 45 C8 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 413 (0x3A96B023) in 7305B, out 41753B, BBs exec'd 0 ====
+	0x3A96B023:  movl %eax,0xFFFFFDCC(%ebp)
+	0x3A96B029:  incl %eax
+	0x3A96B02A:  jz-32 0x3A96B4B0
+. 413 3A96B023 13
+. 89 85 CC FD FF FF 40 0F 84 80 04 00 00 
+
+==== BB 414 (0x3A96B030) in 7318B, out 41831B, BBs exec'd 0 ====
+	0x3A96B030:  movl 0x128(%ebx),%eax
+	0x3A96B036:  movl %eax,0xFFFFFDDC(%ebp)
+	0x3A96B03C:  leal 0xFFFFFDDC(%ebp), %eax
+	0x3A96B042:  movl %eax,16(%esp,,)
+	0x3A96B046:  movl 16(%ebp),%eax
+	0x3A96B049:  movl %eax,12(%esp,,)
+	0x3A96B04D:  movl 8(%ebp),%ecx
+	0x3A96B050:  movl %ecx,8(%esp,,)
+	0x3A96B054:  movl 0xFFFFFDD8(%ebp),%eax
+	0x3A96B05A:  movl %eax,4(%esp,,)
+	0x3A96B05E:  movl 0xFFFFFDE0(%ebp),%eax
+	0x3A96B064:  movl %eax,(%esp,,)
+	0x3A96B067:  movl 0xFFFFFDB8(%ebp),%ecx
+	0x3A96B06D:  movl 0xFFFFFDCC(%ebp),%edx
+	0x3A96B073:  movl 0xFFFFFDD4(%ebp),%eax
+	0x3A96B079:  call 0x3A969790
+. 414 3A96B030 78
+. 8B 83 28 01 00 00 89 85 DC FD FF FF 8D 85 DC FD FF FF 89 44 24 10 8B 45 10 89 44 24 0C 8B 4D 08 89 4C 24 08 8B 85 D8 FD FF FF 89 44 24 04 8B 85 E0 FD FF FF 89 04 24 8B 8D B8 FD FF FF 8B 95 CC FD FF FF 8B 85 D4 FD FF FF E8 12 E7 FF FF 
+
+==== BB 415 _dl_map_object_from_fd(0x3A969790) in 7396B, out 42059B, BBs exec'd 0 ====
+	0x3A969790:  pushl %ebp
+	0x3A969791:  movl %esp,%ebp
+	0x3A969793:  pushl %edi
+	0x3A969794:  pushl %esi
+	0x3A969795:  pushl %ebx
+	0x3A969796:  subl $0x108, %esp
+	0x3A96979C:  movl %eax,-112(%ebp)
+	0x3A96979F:  leal -108(%ebp), %eax
+	0x3A9697A2:  movl %ecx,-120(%ebp)
+	0x3A9697A5:  xorl %ecx, %ecx
+	0x3A9697A7:  movl %edx,-116(%ebp)
+	0x3A9697AA:  movl $0x0, -124(%ebp)
+	0x3A9697B1:  movl %ecx,0xFFFFFF74(%ebp)
+	0x3A9697B7:  movl %eax,8(%esp,,)
+	0x3A9697BB:  movl -116(%ebp),%eax
+	0x3A9697BE:  call 0x3A97592B
+. 415 3A969790 51
+. 55 89 E5 57 56 53 81 EC 08 01 00 00 89 45 90 8D 45 94 89 4D 88 31 C9 89 55 8C C7 45 84 00 00 00 00 89 8D 74 FF FF FF 89 44 24 08 8B 45 8C E8 68 C1 00 00 
+
+==== BB 416 (0x3A9697C3) in 7447B, out 42306B, BBs exec'd 0 ====
+	0x3A9697C3:  addl $0xEEA9, %ebx
+	0x3A9697C9:  movl $0x3, (%esp,,)
+	0x3A9697D0:  movl %eax,4(%esp,,)
+	0x3A9697D4:  call 0x3A974320
+. 416 3A9697C3 22
+. 81 C3 A9 EE 00 00 C7 04 24 03 00 00 00 89 44 24 04 E8 47 AB 00 00 
+
+==== BB 417 __GI___fxstat64(0x3A974320) in 7469B, out 42396B, BBs exec'd 0 ====
+	0x3A974320:  pushl %ebp
+	0x3A974321:  movl %esp,%ebp
+	0x3A974323:  subl $0x58, %esp
+	0x3A974326:  movl %ebx,-12(%ebp)
+	0x3A974329:  call 0x3A97592B
+. 417 3A974320 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 FD 15 00 00 
+
+==== BB 418 (0x3A97432E) in 7483B, out 42494B, BBs exec'd 0 ====
+	0x3A97432E:  addl $0x433E, %ebx
+	0x3A974334:  movl %esi,-8(%ebp)
+	0x3A974337:  movl 0x24(%ebx),%esi
+	0x3A97433D:  movl %edi,-4(%ebp)
+	0x3A974340:  movl (%esi),%eax
+	0x3A974342:  testl %eax,%eax
+	0x3A974344:  jnz-8 0x3A9743C0
+. 418 3A97432E 24
+. 81 C3 3E 43 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A 
+
+==== BB 419 (0x3A974346) in 7507B, out 42618B, BBs exec'd 0 ====
+	0x3A974346:  movl 0x148(%ebx),%edi
+	0x3A97434C:  movl 12(%ebp),%edx
+	0x3A97434F:  movl 16(%ebp),%ecx
+	0x3A974352:  xchgl %edx, %ebx
+	0x3A974354:  movl $0xC5,%eax
+	0x3A974359:  int $0x80
+. 419 3A974346 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C5 00 00 00 CD 80 
+
+==== BB 420 (0x3A97435B) in 7528B, out 42711B, BBs exec'd 0 ====
+	0x3A97435B:  xchgl %edx, %ebx
+	0x3A97435D:  cmpl $0xFFFFF000, %eax
+	0x3A974362:  jnbe-32 0x3A974416
+. 420 3A97435B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00 
+
+==== BB 421 (0x3A974368) in 7541B, out 42787B, BBs exec'd 0 ====
+	0x3A974368:  cmpl $0xFFFFFFFF, %eax
+	0x3A97436B:  movl %eax,%edx
+	0x3A97436D:  jz-8 0x3A9743A0
+. 421 3A974368 7
+. 83 F8 FF 89 C2 74 31 
+
+==== BB 422 (0x3A97436F) in 7548B, out 42854B, BBs exec'd 0 ====
+	0x3A97436F:  testl %edx,%edx
+	0x3A974371:  jnz-8 0x3A974390
+. 422 3A97436F 4
+. 85 D2 75 1D 
+
+==== BB 423 (0x3A974373) in 7552B, out 42913B, BBs exec'd 0 ====
+	0x3A974373:  movl 16(%ebp),%ecx
+	0x3A974376:  movl 88(%ecx),%eax
+	0x3A974379:  cmpl %eax,12(%ecx)
+	0x3A97437C:  jz-8 0x3A974390
+. 423 3A974373 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12 
+
+==== BB 424 (0x3A974390) in 7563B, out 43001B, BBs exec'd 0 ====
+	0x3A974390:  movl -12(%ebp),%ebx
+	0x3A974393:  movl %edx,%eax
+	0x3A974395:  movl -8(%ebp),%esi
+	0x3A974398:  movl -4(%ebp),%edi
+	0x3A97439B:  movl %ebp,%esp
+	0x3A97439D:  popl %ebp
+	0x3A97439E:  ret
+. 424 3A974390 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 425 (0x3A9697D9) in 7578B, out 43103B, BBs exec'd 0 ====
+	0x3A9697D9:  testl %eax,%eax
+	0x3A9697DB:  js-32 0x3A969DCE
+. 425 3A9697D9 8
+. 85 C0 0F 88 ED 05 00 00 
+
+==== BB 426 (0x3A9697E1) in 7586B, out 43162B, BBs exec'd 0 ====
+	0x3A9697E1:  movl 0xFFFFF994(%ebx),%edx
+	0x3A9697E7:  testl %edx,%edx
+	0x3A9697E9:  movl %edx,-124(%ebp)
+	0x3A9697EC:  jz-8 0x3A96985D
+. 426 3A9697E1 13
+. 8B 93 94 F9 FF FF 85 D2 89 55 84 74 6F 
+
+==== BB 427 (0x3A9697EE) in 7599B, out 43250B, BBs exec'd 0 ====
+	0x3A9697EE:  movl -108(%ebp),%eax
+	0x3A9697F1:  movl -104(%ebp),%edx
+	0x3A9697F4:  movl -20(%ebp),%esi
+	0x3A9697F7:  movl %eax,0xFFFFFF6C(%ebp)
+	0x3A9697FD:  movl -16(%ebp),%edi
+	0x3A969800:  movl %edx,0xFFFFFF70(%ebp)
+	0x3A969806:  leal 0(%esi), %esi
+	0x3A969809:  leal 0(%edi,,), %edi
+	0x3A969810:  movl -124(%ebp),%ecx
+	0x3A969813:  movl 0x1C8(%ecx),%edx
+	0x3A969819:  movl 0x1C4(%ecx),%eax
+	0x3A96981F:  movl %edx,%ecx
+	0x3A969821:  xorl %edi,%ecx
+	0x3A969823:  xorl %esi,%eax
+	0x3A969825:  orl %eax,%ecx
+	0x3A969827:  jnz-8 0x3A969850
+. 427 3A9697EE 59
+. 8B 45 94 8B 55 98 8B 75 EC 89 85 6C FF FF FF 8B 7D F0 89 95 70 FF FF FF 8D 76 00 8D BC 27 00 00 00 00 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27 
+
+==== BB 428 (0x3A969850) in 7658B, out 43456B, BBs exec'd 0 ====
+	0x3A969850:  movl -124(%ebp),%eax
+	0x3A969853:  movl 12(%eax),%eax
+	0x3A969856:  testl %eax,%eax
+	0x3A969858:  movl %eax,-124(%ebp)
+	0x3A96985B:  jnz-8 0x3A969810
+. 428 3A969850 13
+. 8B 45 84 8B 40 0C 85 C0 89 45 84 75 B3 
+
+==== BB 429 (0x3A969810) in 7671B, out 43548B, BBs exec'd 0 ====
+	0x3A969810:  movl -124(%ebp),%ecx
+	0x3A969813:  movl 0x1C8(%ecx),%edx
+	0x3A969819:  movl 0x1C4(%ecx),%eax
+	0x3A96981F:  movl %edx,%ecx
+	0x3A969821:  xorl %edi,%ecx
+	0x3A969823:  xorl %esi,%eax
+	0x3A969825:  orl %eax,%ecx
+	0x3A969827:  jnz-8 0x3A969850
+. 429 3A969810 25
+. 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27 
+
+==== BB 430 (0x3A96985D) in 7696B, out 43667B, BBs exec'd 0 ====
+	0x3A96985D:  xorl %eax, %eax
+	0x3A96985F:  testb $0x4, 20(%ebp)
+	0x3A969863:  jnz-32 0x3A969DC6
+. 430 3A96985D 12
+. 31 C0 F6 45 14 04 0F 85 5D 05 00 00 
+
+==== BB 431 (0x3A969869) in 7708B, out 43752B, BBs exec'd 0 ====
+	0x3A969869:  testb $0x40, 0xFFFFFC14(%ebx)
+	0x3A969870:  jnz-32 0x3A96A050
+. 431 3A969869 13
+. F6 83 14 FC FF FF 40 0F 85 DA 07 00 00 
+
+==== BB 432 (0x3A969876) in 7721B, out 43822B, BBs exec'd 0 ====
+	0x3A969876:  movl -120(%ebp),%ecx
+	0x3A969879:  movl 12(%ebp),%eax
+	0x3A96987C:  addl $0x4, %ecx
+	0x3A96987F:  movl %ecx,-128(%ebp)
+	0x3A969882:  movl %eax,(%esp,,)
+	0x3A969885:  movl -112(%ebp),%edx
+	0x3A969888:  movl 16(%ebp),%ecx
+	0x3A96988B:  movl 8(%ebp),%eax
+	0x3A96988E:  call 0x3A96C9D0
+. 432 3A969876 29
+. 8B 4D 88 8B 45 0C 83 C1 04 89 4D 80 89 04 24 8B 55 90 8B 4D 10 8B 45 08 E8 3D 31 00 00 
+
+==== BB 433 (0x3A973780) in 7750B, out 43957B, BBs exec'd 0 ====
+	0x3A973780:  movl %edx,0x138(%ebx)
+	0x3A973786:  movl 0xFFFFFC18(%ebx),%eax
+	0x3A97378C:  xorl %ecx, %ecx
+	0x3A97378E:  movl %ecx,20(%esp,,)
+	0x3A973792:  movl $0xFFFFFFFF,%edx
+	0x3A973797:  movl %edx,16(%esp,,)
+	0x3A97379B:  leal -1(%eax,%edi,1), %esi
+	0x3A97379F:  negl %eax
+	0x3A9737A1:  movl $0x0, (%esp,,)
+	0x3A9737A8:  andl %eax,%esi
+	0x3A9737AA:  movl $0x22,%eax
+	0x3A9737AF:  movl %eax,12(%esp,,)
+	0x3A9737B3:  movl $0x3,%eax
+	0x3A9737B8:  movl %eax,8(%esp,,)
+	0x3A9737BC:  movl %esi,4(%esp,,)
+	0x3A9737C0:  call 0x3A974C50
+. 433 3A973780 69
+. 89 93 38 01 00 00 8B 83 18 FC FF FF 31 C9 89 4C 24 14 BA FF FF FF FF 89 54 24 10 8D 74 38 FF F7 D8 C7 04 24 00 00 00 00 21 C6 B8 22 00 00 00 89 44 24 0C B8 03 00 00 00 89 44 24 08 89 74 24 04 E8 8B 14 00 00 
+
+==== BB 434 mmap(0x3A974C50) in 7819B, out 44173B, BBs exec'd 0 ====
+	0x3A974C50:  movl %ebx,%edx
+	0x3A974C52:  movl $0x5A,%eax
+	0x3A974C57:  leal 4(%esp,,), %ebx
+	0x3A974C5B:  int $0x80
+. 434 3A974C50 13
+. 89 DA B8 5A 00 00 00 8D 5C 24 04 CD 80 
+
+==== BB 435 (0x3A974C5D) in 7832B, out 44234B, BBs exec'd 0 ====
+	0x3A974C5D:  movl %edx,%ebx
+	0x3A974C5F:  cmpl $0xFFFFF000, %eax
+	0x3A974C64:  jnbe-8 0x3A974C67
+. 435 3A974C5D 9
+. 89 D3 3D 00 F0 FF FF 77 01 
+
+==== BB 436 (0x3A974C66) in 7841B, out 44304B, BBs exec'd 0 ====
+	0x3A974C66:  ret
+. 436 3A974C66 1
+. C3 
+
+==== BB 437 (0x3A9737C5) in 7842B, out 44334B, BBs exec'd 0 ====
+	0x3A9737C5:  cmpl 0x13C(%ebx),%eax
+	0x3A9737CB:  jz-8 0x3A9737D3
+. 437 3A9737C5 8
+. 3B 83 3C 01 00 00 74 06 
+
+==== BB 438 (0x3A9737CD) in 7850B, out 44402B, BBs exec'd 0 ====
+	0x3A9737CD:  movl %eax,0x138(%ebx)
+	0x3A9737D3:  movl 0x138(%ebx),%edx
+	0x3A9737D9:  addl %esi,%eax
+	0x3A9737DB:  movl %eax,0x13C(%ebx)
+	0x3A9737E1:  movl %edx,0x140(%ebx)
+	0x3A9737E7:  movl %edx,%eax
+	0x3A9737E9:  leal (%edi,%edx,1), %edx
+	0x3A9737EC:  movl %edx,0x138(%ebx)
+	0x3A9737F2:  movl -12(%ebp),%ebx
+	0x3A9737F5:  movl -8(%ebp),%esi
+	0x3A9737F8:  movl -4(%ebp),%edi
+	0x3A9737FB:  movl %ebp,%esp
+	0x3A9737FD:  popl %ebp
+	0x3A9737FE:  ret
+. 438 3A9737CD 50
+. 89 83 38 01 00 00 8B 93 38 01 00 00 01 F0 89 83 3C 01 00 00 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 439 (0x3A96CAB0) in 7900B, out 44603B, BBs exec'd 0 ====
+	0x3A96CAB0:  movl %eax,%edx
+	0x3A96CAB2:  movl 12(%eax),%eax
+	0x3A96CAB5:  testl %eax,%eax
+	0x3A96CAB7:  jz-8 0x3A96CAC9
+. 439 3A96CAB0 9
+. 89 C2 8B 40 0C 85 C0 74 10 
+
+==== BB 440 (0x3A96CAB9) in 7909B, out 44685B, BBs exec'd 0 ====
+	0x3A96CAB9:  leal 0(%esi,,), %esi
+	0x3A96CAC0:  movl %eax,%edx
+	0x3A96CAC2:  movl 12(%eax),%eax
+	0x3A96CAC5:  testl %eax,%eax
+	0x3A96CAC7:  jnz-8 0x3A96CAC0
+. 440 3A96CAB9 16
+. 8D B4 26 00 00 00 00 89 C2 8B 40 0C 85 C0 75 F7 
+
+==== BB 441 (0x3A96CAC9) in 7925B, out 44780B, BBs exec'd 0 ====
+	0x3A96CAC9:  movl -32(%ebp),%eax
+	0x3A96CACC:  movl %edx,16(%eax)
+	0x3A96CACF:  movl %eax,12(%edx)
+	0x3A96CAD2:  movl 0x1B0(%eax),%edx
+	0x3A96CAD8:  movl 0xFFFFF994(%ebx),%eax
+	0x3A96CADE:  addl $0x150, %eax
+	0x3A96CAE3:  movl %eax,(%edx,%ecx,4)
+	0x3A96CAE6:  movl $0x1,%ecx
+	0x3A96CAEB:  incl 0xFFFFF998(%ebx)
+	0x3A96CAF1:  addl $0x1, 0xFFFFF9C4(%ebx)
+	0x3A96CAF8:  adcl $0x0, 0xFFFFF9C8(%ebx)
+	0x3A96CAFF:  testl %edi,%edi
+	0x3A96CB01:  jz-32 0x3A96CC58
+. 441 3A96CAC9 62
+. 8B 45 E0 89 50 10 89 42 0C 8B 90 B0 01 00 00 8B 83 94 F9 FF FF 05 50 01 00 00 89 04 8A B9 01 00 00 00 FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 84 51 01 00 00 
+
+==== BB 442 (0x3A96CB07) in 7987B, out 45004B, BBs exec'd 0 ====
+	0x3A96CB07:  movl 0x160(%edi),%eax
+	0x3A96CB0D:  testl %eax,%eax
+	0x3A96CB0F:  jz-8 0x3A96CB2C
+. 442 3A96CB07 10
+. 8B 87 60 01 00 00 85 C0 74 1B 
+
+==== BB 443 (0x3A96CB36) in 7997B, out 45079B, BBs exec'd 0 ====
+	0x3A96CB36:  cmpl (%edx),%eax
+	0x3A96CB38:  jz-8 0x3A96CB3D
+. 443 3A96CB36 4
+. 3B 02 74 03 
+
+==== BB 444 (0x3A96CB3D) in 8001B, out 45141B, BBs exec'd 0 ====
+	0x3A96CB3D:  movl -32(%ebp),%eax
+	0x3A96CB40:  movl -32(%ebp),%edx
+	0x3A96CB43:  movl -16(%ebp),%ecx
+	0x3A96CB46:  addl $0x150, %eax
+	0x3A96CB4B:  movl %eax,0x1B4(%edx)
+	0x3A96CB51:  movzbl (%ecx),%ecx
+	0x3A96CB54:  testb %cl,%cl
+	0x3A96CB56:  movb %cl,-33(%ebp)
+	0x3A96CB59:  jz-32 0x3A96CC26
+. 444 3A96CB3D 34
+. 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00 
+
+==== BB 445 (0x3A96CB5F) in 8035B, out 45287B, BBs exec'd 0 ====
+	0x3A96CB5F:  movl -16(%ebp),%eax
+	0x3A96CB62:  movl %eax,(%esp,,)
+	0x3A96CB65:  call 0x3A9752D0
+. 445 3A96CB5F 11
+. 8B 45 F0 89 04 24 E8 66 87 00 00 
+
+==== BB 446 (0x3A96CB6A) in 8046B, out 45355B, BBs exec'd 0 ====
+	0x3A96CB6A:  incl %eax
+	0x3A96CB6B:  cmpb $0x2F, -33(%ebp)
+	0x3A96CB6F:  movl %eax,-40(%ebp)
+	0x3A96CB72:  jz-32 0x3A96CC60
+. 446 3A96CB6A 14
+. 40 80 7D DF 2F 89 45 D8 0F 84 E8 00 00 00 
+
+==== BB 447 (0x3A96CC60) in 8060B, out 45439B, BBs exec'd 0 ====
+	0x3A96CC60:  movl %eax,(%esp,,)
+	0x3A96CC63:  call 0x3A96581C
+. 447 3A96CC60 8
+. 89 04 24 E8 B4 8B FF FF 
+
+==== BB 448 (0x3A96CC68) in 8068B, out 45494B, BBs exec'd 0 ====
+	0x3A96CC68:  testl %eax,%eax
+	0x3A96CC6A:  movl %eax,%edi
+	0x3A96CC6C:  jnz-32 0x3A96CBEB
+. 448 3A96CC68 10
+. 85 C0 89 C7 0F 85 79 FF FF FF 
+
+==== BB 449 (0x3A96CBEB) in 8078B, out 45560B, BBs exec'd 0 ====
+	0x3A96CBEB:  movl -40(%ebp),%edx
+	0x3A96CBEE:  movl %edx,8(%esp,,)
+	0x3A96CBF2:  movl -16(%ebp),%ecx
+	0x3A96CBF5:  movl %eax,(%esp,,)
+	0x3A96CBF8:  movl %ecx,4(%esp,,)
+	0x3A96CBFC:  call 0x3A975770
+. 449 3A96CBEB 22
+. 8B 55 D8 89 54 24 08 8B 4D F0 89 04 24 89 4C 24 04 E8 6F 8B 00 00 
+
+==== BB 450 (0x3A96CC01) in 8100B, out 45664B, BBs exec'd 0 ====
+	0x3A96CC01:  jmp-8 0x3A96CC10
+. 450 3A96CC01 2
+. EB 0D 
+
+==== BB 451 (0x3A96CC10) in 8102B, out 45692B, BBs exec'd 0 ====
+	0x3A96CC10:  decl %eax
+	0x3A96CC11:  cmpb $0x2F, (%eax)
+	0x3A96CC14:  jnz-8 0x3A96CC10
+. 451 3A96CC10 6
+. 48 80 38 2F 75 FA 
+
+==== BB 452 (0x3A96CC16) in 8108B, out 45760B, BBs exec'd 0 ====
+	0x3A96CC16:  cmpl %edi,%eax
+	0x3A96CC18:  jz-8 0x3A96CC83
+. 452 3A96CC16 4
+. 39 F8 74 69 
+
+==== BB 453 (0x3A96CC1A) in 8112B, out 45817B, BBs exec'd 0 ====
+	0x3A96CC1A:  movb $0x0, (%eax)
+	0x3A96CC1D:  movl -32(%ebp),%eax
+	0x3A96CC20:  movl %edi,0x190(%eax)
+	0x3A96CC26:  movl -32(%ebp),%eax
+	0x3A96CC29:  addl $0x2C, %esp
+	0x3A96CC2C:  popl %ebx
+	0x3A96CC2D:  popl %esi
+	0x3A96CC2E:  popl %edi
+	0x3A96CC2F:  popl %ebp
+	0x3A96CC30:  ret 4
+. 453 3A96CC1A 25
+. C6 00 00 8B 45 E0 89 B8 90 01 00 00 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00 
+
+==== BB 454 (0x3A969893) in 8137B, out 45981B, BBs exec'd 0 ====
+	0x3A969893:  movl %eax,-124(%ebp)
+	0x3A969896:  subl $0x4, %esp
+	0x3A969899:  testl %eax,%eax
+	0x3A96989B:  leal 0xFFFFE494(%ebx), %edx
+	0x3A9698A1:  jz-32 0x3A969D74
+. 454 3A969893 20
+. 89 45 84 83 EC 04 85 C0 8D 93 94 E4 FF FF 0F 84 CD 04 00 00 
+
+==== BB 455 (0x3A9698A7) in 8157B, out 46082B, BBs exec'd 0 ====
+	0x3A9698A7:  movl -128(%ebp),%esi
+	0x3A9698AA:  movl -124(%ebp),%edx
+	0x3A9698AD:  movl 24(%esi),%eax
+	0x3A9698B0:  movzwl 16(%esi),%ecx
+	0x3A9698B4:  movl %eax,0x148(%edx)
+	0x3A9698BA:  movzwl 44(%esi),%eax
+	0x3A9698BE:  movl %ecx,0xFFFFFF78(%ebp)
+	0x3A9698C4:  movl -120(%ebp),%ecx
+	0x3A9698C7:  movw %ax,0x14C(%edx)
+	0x3A9698CE:  movl -128(%ebp),%edx
+	0x3A9698D1:  movzwl 44(%esi),%esi
+	0x3A9698D5:  movl 28(%edx),%eax
+	0x3A9698D8:  shll $0x5, %esi
+	0x3A9698DB:  movl %esi,0xFFFFFF28(%ebp)
+	0x3A9698E1:  leal (%esi,%eax,1), %edx
+	0x3A9698E4:  leal 4(%eax,%ecx,1), %eax
+	0x3A9698E8:  cmpl (%ecx),%edx
+	0x3A9698EA:  movl %eax,0xFFFFFF7C(%ebp)
+	0x3A9698F0:  jnbe-32 0x3A969D15
+. 455 3A9698A7 79
+. 8B 75 80 8B 55 84 8B 46 18 0F B7 4E 10 89 82 48 01 00 00 0F B7 46 2C 89 8D 78 FF FF FF 8B 4D 88 66 89 82 4C 01 00 00 8B 55 80 0F B7 76 2C 8B 42 1C C1 E6 05 89 B5 28 FF FF FF 8D 14 06 8D 44 08 04 3B 11 89 85 7C FF FF FF 0F 87 1F 04 00 00 
+
+==== BB 456 (0x3A9698F6) in 8236B, out 46339B, BBs exec'd 0 ====
+	0x3A9698F6:  movl %esp,0xFFFFFF64(%ebp)
+	0x3A9698FC:  movl -124(%ebp),%ecx
+	0x3A9698FF:  movl $0x7,%edi
+	0x3A969904:  movl %edi,0xFFFFFF68(%ebp)
+	0x3A96990A:  movb $0x0, 0xFFFFFF5B(%ebp)
+	0x3A969911:  movzwl 0x14C(%ecx),%edx
+	0x3A969918:  leal -3(%edx,%edx,2), %eax
+	0x3A96991C:  leal 0x28(,%eax,8), %eax
+	0x3A969923:  subl %eax,%esp
+	0x3A969925:  shll $0x5, %edx
+	0x3A969928:  leal 67(%esp,,), %esi
+	0x3A96992C:  andl $0xFFFFFFF0, %esi
+	0x3A96992F:  movl %esi,0xFFFFFF60(%ebp)
+	0x3A969935:  xorl %esi, %esi
+	0x3A969937:  movl %esi,0xFFFFFF5C(%ebp)
+	0x3A96993D:  movl 0xFFFFFF7C(%ebp),%esi
+	0x3A969943:  movl %esi,%eax
+	0x3A969945:  addl %edx,%eax
+	0x3A969947:  cmpl %eax,%esi
+	0x3A969949:  jnb-32 0x3A969DD6
+. 456 3A9698F6 89
+. 89 A5 64 FF FF FF 8B 4D 84 BF 07 00 00 00 89 BD 68 FF FF FF C6 85 5B FF FF FF 00 0F B7 91 4C 01 00 00 8D 44 52 FD 8D 04 C5 28 00 00 00 29 C4 C1 E2 05 8D 74 24 43 83 E6 F0 89 B5 60 FF FF FF 31 F6 89 B5 5C FF FF FF 8B B5 7C FF FF FF 89 F0 01 D0 39 C6 0F 83 87 04 00 00 
+
+==== BB 457 (0x3A96994F) in 8325B, out 46615B, BBs exec'd 0 ====
+	0x3A96994F:  movl %eax,0xFFFFFF54(%ebp)
+	0x3A969955:  movl 0xFFFFFF60(%ebp),%eax
+	0x3A96995B:  movl %eax,0xFFFFFF2C(%ebp)
+	0x3A969961:  jmp-8 0x3A96998B
+. 457 3A96994F 20
+. 89 85 54 FF FF FF 8B 85 60 FF FF FF 89 85 2C FF FF FF EB 28 
+
+==== BB 458 (0x3A96998B) in 8345B, out 46691B, BBs exec'd 0 ====
+	0x3A96998B:  movl (%esi),%eax
+	0x3A96998D:  cmpl $0x6, %eax
+	0x3A969990:  jz-8 0x3A9699C5
+. 458 3A96998B 7
+. 8B 06 83 F8 06 74 33 
+
+==== BB 459 (0x3A969992) in 8352B, out 46758B, BBs exec'd 0 ====
+	0x3A969992:  cmpl $0x6, %eax
+	0x3A969995:  jbe-8 0x3A969963
+. 459 3A969992 5
+. 83 F8 06 76 CC 
+
+==== BB 460 (0x3A969963) in 8357B, out 46815B, BBs exec'd 0 ====
+	0x3A969963:  cmpl $0x1, %eax
+	0x3A969966:  jz-32 0x3A969C3A
+. 460 3A969963 9
+. 83 F8 01 0F 84 CE 02 00 00 
+
+==== BB 461 (0x3A969C3A) in 8366B, out 46872B, BBs exec'd 0 ====
+	0x3A969C3A:  movl 0xFFFFFC18(%ebx),%eax
+	0x3A969C40:  movl 28(%esi),%edx
+	0x3A969C43:  decl %eax
+	0x3A969C44:  testl %eax,%edx
+	0x3A969C46:  jnz-32 0x3A96A1E5
+. 461 3A969C3A 18
+. 8B 83 18 FC FF FF 8B 56 1C 48 85 D0 0F 85 99 05 00 00 
+
+==== BB 462 (0x3A969C4C) in 8384B, out 46965B, BBs exec'd 0 ====
+	0x3A969C4C:  movl 8(%esi),%eax
+	0x3A969C4F:  decl %edx
+	0x3A969C50:  movl 4(%esi),%ecx
+	0x3A969C53:  movl %eax,0xFFFFFF50(%ebp)
+	0x3A969C59:  subl %ecx,%eax
+	0x3A969C5B:  testl %edx,%eax
+	0x3A969C5D:  jnz-32 0x3A96A1F0
+. 462 3A969C4C 23
+. 8B 46 08 4A 8B 4E 04 89 85 50 FF FF FF 29 C8 85 C2 0F 85 8D 05 00 00 
+
+==== BB 463 (0x3A969C63) in 8407B, out 47080B, BBs exec'd 0 ====
+	0x3A969C63:  incl 0xFFFFFF5C(%ebp)
+	0x3A969C69:  movl 0xFFFFFF2C(%ebp),%edi
+	0x3A969C6F:  movl 0xFFFFFF50(%ebp),%eax
+	0x3A969C75:  leal 24(%edi), %ecx
+	0x3A969C78:  movl %ecx,0xFFFFFF2C(%ebp)
+	0x3A969C7E:  movl %edx,%ecx
+	0x3A969C80:  notl %ecx
+	0x3A969C82:  andl %eax,%ecx
+	0x3A969C84:  movl 0xFFFFFF2C(%ebp),%eax
+	0x3A969C8A:  movl %ecx,-24(%eax)
+	0x3A969C8D:  movl 8(%esi),%edx
+	0x3A969C90:  movl 16(%esi),%eax
+	0x3A969C93:  addl %edx,%eax
+	0x3A969C95:  movl 0xFFFFFC18(%ebx),%edx
+	0x3A969C9B:  leal -1(%edx,%eax,1), %eax
+	0x3A969C9F:  negl %edx
+	0x3A969CA1:  andl %edx,%eax
+	0x3A969CA3:  movl 0xFFFFFF2C(%ebp),%edx
+	0x3A969CA9:  movl %eax,-20(%edx)
+	0x3A969CAC:  movl 16(%esi),%eax
+	0x3A969CAF:  addl 8(%esi),%eax
+	0x3A969CB2:  movl %eax,-16(%edx)
+	0x3A969CB5:  movl 20(%esi),%eax
+	0x3A969CB8:  addl 8(%esi),%eax
+	0x3A969CBB:  movl %eax,-12(%edx)
+	0x3A969CBE:  movl 28(%esi),%eax
+	0x3A969CC1:  decl %eax
+	0x3A969CC2:  notl %eax
+	0x3A969CC4:  andl 4(%esi),%eax
+	0x3A969CC7:  cmpl $0x1, 0xFFFFFF5C(%ebp)
+	0x3A969CCE:  movl %eax,-8(%edx)
+	0x3A969CD1:  jbe-8 0x3A969CDF
+. 463 3A969C63 112
+. FF 85 5C FF FF FF 8B BD 2C FF FF FF 8B 85 50 FF FF FF 8D 4F 18 89 8D 2C FF FF FF 89 D1 F7 D1 21 C1 8B 85 2C FF FF FF 89 48 E8 8B 56 08 8B 46 10 01 D0 8B 93 18 FC FF FF 8D 44 02 FF F7 DA 21 D0 8B 95 2C FF FF FF 89 42 EC 8B 46 10 03 46 08 89 42 F0 8B 46 14 03 46 08 89 42 F4 8B 46 1C 48 F7 D0 23 46 04 83 BD 5C FF FF FF 01 89 42 F8 76 0C 
+
+==== BB 464 (0x3A969CDF) in 8519B, out 47458B, BBs exec'd 0 ====
+	0x3A969CDF:  movl 24(%esi),%ecx
+	0x3A969CE2:  movl $0x73516240,%eax
+	0x3A969CE7:  andl $0x7, %ecx
+	0x3A969CEA:  shll $0x2, %ecx
+	0x3A969CED:  sarl %cl, %eax
+	0x3A969CEF:  andl $0xF, %eax
+	0x3A969CF2:  movl %eax,20(%edi)
+	0x3A969CF5:  jmp 0x3A969980
+. 464 3A969CDF 27
+. 8B 4E 18 B8 40 62 51 73 83 E1 07 C1 E1 02 D3 F8 83 E0 0F 89 47 14 E9 86 FC FF FF 
+
+==== BB 465 (0x3A969980) in 8546B, out 47578B, BBs exec'd 0 ====
+	0x3A969980:  addl $0x20, %esi
+	0x3A969983:  cmpl 0xFFFFFF54(%ebp),%esi
+	0x3A969989:  jnb-8 0x3A9699DC
+. 465 3A969980 11
+. 83 C6 20 3B B5 54 FF FF FF 73 51 
+
+==== BB 466 (0x3A969CD3) in 8557B, out 47656B, BBs exec'd 0 ====
+	0x3A969CD3:  cmpl %ecx,-20(%edi)
+	0x3A969CD6:  jz-8 0x3A969CDF
+. 466 3A969CD3 5
+. 39 4F EC 74 07 
+
+==== BB 467 (0x3A96996C) in 8562B, out 47719B, BBs exec'd 0 ====
+	0x3A96996C:  cmpl $0x2, %eax
+	0x3A96996F:  jz-32 0x3A969CFA
+. 467 3A96996C 9
+. 83 F8 02 0F 84 85 03 00 00 
+
+==== BB 468 (0x3A969CFA) in 8571B, out 47776B, BBs exec'd 0 ====
+	0x3A969CFA:  movl 8(%esi),%eax
+	0x3A969CFD:  movl -124(%ebp),%edx
+	0x3A969D00:  movl %eax,8(%edx)
+	0x3A969D03:  movl 20(%esi),%eax
+	0x3A969D06:  shrl $0x3, %eax
+	0x3A969D09:  movw %ax,0x14E(%edx)
+	0x3A969D10:  jmp 0x3A969980
+. 468 3A969CFA 27
+. 8B 46 08 8B 55 84 89 42 08 8B 46 14 C1 E8 03 66 89 82 4E 01 00 00 E9 6B FC FF FF 
+
+==== BB 469 (0x3A969997) in 8598B, out 47888B, BBs exec'd 0 ====
+	0x3A969997:  cmpl $0x6474E551, %eax
+	0x3A96999C:  jz-32 0x3A969C2C
+. 469 3A969997 11
+. 3D 51 E5 74 64 0F 84 8A 02 00 00 
+
+==== BB 470 (0x3A969C2C) in 8609B, out 47948B, BBs exec'd 0 ====
+	0x3A969C2C:  movl 24(%esi),%ecx
+	0x3A969C2F:  movl %ecx,0xFFFFFF68(%ebp)
+	0x3A969C35:  jmp 0x3A969980
+. 470 3A969C2C 14
+. 8B 4E 18 89 8D 68 FF FF FF E9 46 FD FF FF 
+
+==== BB 471 (0x3A9699DC) in 8623B, out 48008B, BBs exec'd 0 ====
+	0x3A9699DC:  movl 0xFFFFFF5C(%ebp),%eax
+	0x3A9699E2:  testl %eax,%eax
+	0x3A9699E4:  jz-32 0x3A969DD6
+. 471 3A9699DC 14
+. 8B 85 5C FF FF FF 85 C0 0F 84 EC 03 00 00 
+
+==== BB 472 (0x3A9699EA) in 8637B, out 48083B, BBs exec'd 0 ====
+	0x3A9699EA:  movl 0xFFFFFF5C(%ebp),%ecx
+	0x3A9699F0:  movl 0xFFFFFF60(%ebp),%edi
+	0x3A9699F6:  leal (%ecx,%ecx,2), %eax
+	0x3A9699F9:  leal (%edi,%eax,8), %eax
+	0x3A9699FC:  movl (%edi),%edx
+	0x3A9699FE:  movl -12(%eax),%esi
+	0x3A969A01:  movl %eax,0xFFFFFF4C(%ebp)
+	0x3A969A07:  subl %edx,%esi
+	0x3A969A09:  cmpl $0x3, 0xFFFFFF78(%ebp)
+	0x3A969A10:  movl %esi,0xFFFFFF28(%ebp)
+	0x3A969A16:  jnz-32 0x3A96A1FB
+. 472 3A9699EA 50
+. 8B 8D 5C FF FF FF 8B BD 60 FF FF FF 8D 04 49 8D 04 C7 8B 17 8B 70 F4 89 85 4C FF FF FF 29 D6 83 BD 78 FF FF FF 03 89 B5 28 FF FF FF 0F 85 DF 07 00 00 
+
+==== BB 473 (0x3A969A1C) in 8687B, out 48261B, BBs exec'd 0 ====
+	0x3A969A1C:  movl 0xFFFFFD78(%ebx),%eax
+	0x3A969A22:  andl %eax,%edx
+	0x3A969A24:  movl 16(%edi),%eax
+	0x3A969A27:  movl %eax,20(%esp,,)
+	0x3A969A2B:  movl -116(%ebp),%eax
+	0x3A969A2E:  movl %eax,16(%esp,,)
+	0x3A969A32:  movl $0x2,%eax
+	0x3A969A37:  movl %eax,12(%esp,,)
+	0x3A969A3B:  movl 20(%edi),%eax
+	0x3A969A3E:  movl %edx,(%esp,,)
+	0x3A969A41:  movl %esi,4(%esp,,)
+	0x3A969A45:  movl %eax,8(%esp,,)
+	0x3A969A49:  call 0x3A974C50
+. 473 3A969A1C 50
+. 8B 83 78 FD FF FF 21 C2 8B 47 10 89 44 24 14 8B 45 8C 89 44 24 10 B8 02 00 00 00 89 44 24 0C 8B 47 14 89 14 24 89 74 24 04 89 44 24 08 E8 02 B2 00 00 
+
+==== BB 474 (0x3A969A4E) in 8737B, out 48446B, BBs exec'd 0 ====
+	0x3A969A4E:  movl -124(%ebp),%edx
+	0x3A969A51:  movl %eax,0x194(%edx)
+	0x3A969A57:  incl %eax
+	0x3A969A58:  jz-32 0x3A969BF6
+. 474 3A969A4E 16
+. 8B 55 84 89 82 94 01 00 00 40 0F 84 98 01 00 00 
+
+==== BB 475 (0x3A969A5E) in 8753B, out 48537B, BBs exec'd 0 ====
+	0x3A969A5E:  movl 0xFFFFFC40(%ebx),%esi
+	0x3A969A64:  testl %esi,%esi
+	0x3A969A66:  jnz-32 0x3A96A06A
+. 475 3A969A5E 14
+. 8B B3 40 FC FF FF 85 F6 0F 85 FE 05 00 00 
+
+==== BB 476 (0x3A96A06A) in 8767B, out 48612B, BBs exec'd 0 ====
+	0x3A96A06A:  movl $0x3,%ecx
+	0x3A96A06F:  movl %ecx,8(%esp,,)
+	0x3A96A073:  movl 0xFFFFFF28(%ebp),%ecx
+	0x3A96A079:  movl %ecx,4(%esp,,)
+	0x3A96A07D:  movl -124(%ebp),%esi
+	0x3A96A080:  movl 0x194(%esi),%eax
+	0x3A96A086:  movl %eax,(%esp,,)
+	0x3A96A089:  call 0x3A974D10
+. 476 3A96A06A 36
+. B9 03 00 00 00 89 4C 24 08 8B 8D 28 FF FF FF 89 4C 24 04 8B 75 84 8B 86 94 01 00 00 89 04 24 E8 82 AC 00 00 
+
+==== BB 477 madvise(0x3A974D10) in 8803B, out 48741B, BBs exec'd 0 ====
+	0x3A974D10:  pushl %ebx
+	0x3A974D11:  movl 16(%esp,,),%edx
+	0x3A974D15:  movl 12(%esp,,),%ecx
+	0x3A974D19:  movl 8(%esp,,),%ebx
+	0x3A974D1D:  movl $0xDB,%eax
+	0x3A974D22:  int $0x80
+. 477 3A974D10 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 DB 00 00 00 CD 80 
+
+==== BB 478 (0x3A974D24) in 8823B, out 48837B, BBs exec'd 0 ====
+	0x3A974D24:  popl %ebx
+	0x3A974D25:  cmpl $0xFFFFF001, %eax
+	0x3A974D2A:  jnb-8 0x3A974D2D
+. 478 3A974D24 8
+. 5B 3D 01 F0 FF FF 73 01 
+
+==== BB 479 (0x3A974D2C) in 8831B, out 48916B, BBs exec'd 0 ====
+	0x3A974D2C:  ret
+. 479 3A974D2C 1
+. C3 
+
+==== BB 480 (0x3A96A08E) in 8832B, out 48946B, BBs exec'd 0 ====
+	0x3A96A08E:  jmp 0x3A969A6C
+. 480 3A96A08E 5
+. E9 D9 F9 FF FF 
+
+==== BB 481 (0x3A969A6C) in 8837B, out 48974B, BBs exec'd 0 ====
+	0x3A969A6C:  movl -124(%ebp),%edx
+	0x3A969A6F:  movl 0xFFFFFF28(%ebp),%ecx
+	0x3A969A75:  movl -124(%ebp),%esi
+	0x3A969A78:  movl 0x194(%edx),%eax
+	0x3A969A7E:  leal (%ecx,%eax,1), %edx
+	0x3A969A81:  movl %edx,0x198(%esi)
+	0x3A969A87:  movl (%edi),%edx
+	0x3A969A89:  subl %edx,%eax
+	0x3A969A8B:  cmpb $0x0, 0xFFFFFF5B(%ebp)
+	0x3A969A92:  movl %eax,(%esi)
+	0x3A969A94:  jnz-32 0x3A96A093
+. 481 3A969A6C 46
+. 8B 55 84 8B 8D 28 FF FF FF 8B 75 84 8B 82 94 01 00 00 8D 14 01 89 96 98 01 00 00 8B 17 29 D0 80 BD 5B FF FF FF 00 89 06 0F 85 F9 05 00 00 
+
+==== BB 482 (0x3A969A9A) in 8883B, out 49142B, BBs exec'd 0 ====
+	0x3A969A9A:  movl -124(%ebp),%ecx
+	0x3A969A9D:  movl 0x144(%ecx),%eax
+	0x3A969AA3:  testl %eax,%eax
+	0x3A969AA5:  jnz-8 0x3A969AF0
+. 482 3A969A9A 13
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 75 49 
+
+==== BB 483 (0x3A969AA7) in 8896B, out 49230B, BBs exec'd 0 ====
+	0x3A969AA7:  movl -128(%ebp),%eax
+	0x3A969AAA:  movl 16(%edi),%esi
+	0x3A969AAD:  movl 28(%eax),%ecx
+	0x3A969AB0:  movl %esi,0xFFFFFF48(%ebp)
+	0x3A969AB6:  cmpl %ecx,%esi
+	0x3A969AB8:  jnbe-8 0x3A969AF0
+. 483 3A969AA7 19
+. 8B 45 80 8B 77 10 8B 48 1C 89 B5 48 FF FF FF 39 CE 77 36 
+
+==== BB 484 (0x3A969ABA) in 8915B, out 49341B, BBs exec'd 0 ====
+	0x3A969ABA:  movzwl 44(%eax),%eax
+	0x3A969ABE:  movl (%edi),%esi
+	0x3A969AC0:  movl 4(%edi),%edx
+	0x3A969AC3:  movl %esi,0xFFFFFF44(%ebp)
+	0x3A969AC9:  shll $0x5, %eax
+	0x3A969ACC:  subl %esi,%edx
+	0x3A969ACE:  addl 0xFFFFFF48(%ebp),%edx
+	0x3A969AD4:  leal (%eax,%ecx,1), %eax
+	0x3A969AD7:  cmpl %eax,%edx
+	0x3A969AD9:  jb-8 0x3A969AF0
+. 484 3A969ABA 33
+. 0F B7 40 2C 8B 37 8B 57 04 89 B5 44 FF FF FF C1 E0 05 29 F2 03 95 48 FF FF FF 8D 04 08 39 C2 72 15 
+
+==== BB 485 (0x3A969ADB) in 8948B, out 49491B, BBs exec'd 0 ====
+	0x3A969ADB:  movl %esi,%eax
+	0x3A969ADD:  movl -124(%ebp),%edx
+	0x3A969AE0:  addl %ecx,%eax
+	0x3A969AE2:  movl 0xFFFFFF48(%ebp),%ecx
+	0x3A969AE8:  subl %ecx,%eax
+	0x3A969AEA:  movl %eax,0x144(%edx)
+	0x3A969AF0:  movl 12(%edi),%ecx
+	0x3A969AF3:  movl 8(%edi),%edx
+	0x3A969AF6:  cmpl %edx,%ecx
+	0x3A969AF8:  jbe-32 0x3A969B8D
+. 485 3A969ADB 35
+. 89 F0 8B 55 84 01 C8 8B 8D 48 FF FF FF 29 C8 89 82 44 01 00 00 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00 
+
+==== BB 486 (0x3A969B8D) in 8983B, out 49635B, BBs exec'd 0 ====
+	0x3A969B8D:  movl 0xFFFFFF5C(%ebp),%esi
+	0x3A969B93:  addl $0x18, %edi
+	0x3A969B96:  movl 0xFFFFFF60(%ebp),%edx
+	0x3A969B9C:  leal (%esi,%esi,2), %eax
+	0x3A969B9F:  leal (%edx,%eax,8), %eax
+	0x3A969BA2:  cmpl %eax,%edi
+	0x3A969BA4:  jnb-32 0x3A969DE4
+. 486 3A969B8D 29
+. 8B B5 5C FF FF FF 83 C7 18 8B 95 60 FF FF FF 8D 04 76 8D 04 C2 39 C7 0F 83 3A 02 00 00 
+
+==== BB 487 (0x3A969BAA) in 9012B, out 49758B, BBs exec'd 0 ====
+	0x3A969BAA:  movl (%edi),%eax
+	0x3A969BAC:  cmpl %eax,4(%edi)
+	0x3A969BAF:  jbe-32 0x3A969A9A
+. 487 3A969BAA 11
+. 8B 07 39 47 04 0F 86 E5 FE FF FF 
+
+==== BB 488 (0x3A969BB5) in 9023B, out 49830B, BBs exec'd 0 ====
+	0x3A969BB5:  movl 16(%edi),%eax
+	0x3A969BB8:  movl %eax,20(%esp,,)
+	0x3A969BBC:  movl -116(%ebp),%esi
+	0x3A969BBF:  movl $0x12,%eax
+	0x3A969BC4:  movl %eax,12(%esp,,)
+	0x3A969BC8:  movl %esi,16(%esp,,)
+	0x3A969BCC:  movl 20(%edi),%eax
+	0x3A969BCF:  movl %eax,8(%esp,,)
+	0x3A969BD3:  movl 4(%edi),%eax
+	0x3A969BD6:  movl (%edi),%esi
+	0x3A969BD8:  subl %esi,%eax
+	0x3A969BDA:  movl %eax,4(%esp,,)
+	0x3A969BDE:  movl -124(%ebp),%edx
+	0x3A969BE1:  movl (%edi),%eax
+	0x3A969BE3:  movl (%edx),%ecx
+	0x3A969BE5:  addl %ecx,%eax
+	0x3A969BE7:  movl %eax,(%esp,,)
+	0x3A969BEA:  call 0x3A974C50
+. 488 3A969BB5 58
+. 8B 47 10 89 44 24 14 8B 75 8C B8 12 00 00 00 89 44 24 0C 89 74 24 10 8B 47 14 89 44 24 08 8B 47 04 8B 37 29 F0 89 44 24 04 8B 55 84 8B 07 8B 0A 01 C8 89 04 24 E8 61 B0 00 00 
+
+==== BB 489 (0x3A969BEF) in 9081B, out 50046B, BBs exec'd 0 ====
+	0x3A969BEF:  incl %eax
+	0x3A969BF0:  jnz-32 0x3A969A9A
+. 489 3A969BEF 7
+. 40 0F 85 A4 FE FF FF 
+
+==== BB 490 (0x3A969AF0) in 9088B, out 50108B, BBs exec'd 0 ====
+	0x3A969AF0:  movl 12(%edi),%ecx
+	0x3A969AF3:  movl 8(%edi),%edx
+	0x3A969AF6:  cmpl %edx,%ecx
+	0x3A969AF8:  jbe-32 0x3A969B8D
+. 490 3A969AF0 14
+. 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00 
+
+==== BB 491 (0x3A969AFE) in 9102B, out 50190B, BBs exec'd 0 ====
+	0x3A969AFE:  movl -124(%ebp),%esi
+	0x3A969B01:  movl 0xFFFFFC18(%ebx),%eax
+	0x3A969B07:  movl (%esi),%esi
+	0x3A969B09:  addl %esi,%edx
+	0x3A969B0B:  addl %esi,%ecx
+	0x3A969B0D:  movl %edx,0xFFFFFF40(%ebp)
+	0x3A969B13:  leal -1(%eax,%edx,1), %edx
+	0x3A969B17:  negl %eax
+	0x3A969B19:  movl %ecx,0xFFFFFF3C(%ebp)
+	0x3A969B1F:  andl %eax,%edx
+	0x3A969B21:  cmpl %edx,%ecx
+	0x3A969B23:  movl %edx,0xFFFFFF38(%ebp)
+	0x3A969B29:  jnb-8 0x3A969B31
+. 491 3A969AFE 45
+. 8B 75 84 8B 83 18 FC FF FF 8B 36 01 F2 01 F1 89 95 40 FF FF FF 8D 54 10 FF F7 D8 89 8D 3C FF FF FF 21 C2 39 D1 89 95 38 FF FF FF 73 06 
+
+==== BB 492 (0x3A969B2B) in 9147B, out 50371B, BBs exec'd 0 ====
+	0x3A969B2B:  movl %ecx,0xFFFFFF38(%ebp)
+	0x3A969B31:  movl 0xFFFFFF40(%ebp),%ecx
+	0x3A969B37:  cmpl %ecx,0xFFFFFF38(%ebp)
+	0x3A969B3D:  jbe-8 0x3A969B7B
+. 492 3A969B2B 20
+. 89 8D 38 FF FF FF 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C 
+
+==== BB 493 (0x3A969B3F) in 9167B, out 50468B, BBs exec'd 0 ====
+	0x3A969B3F:  movl 20(%edi),%eax
+	0x3A969B42:  testb $0x2, %al
+	0x3A969B44:  jz-32 0x3A96A3D8
+. 493 3A969B3F 11
+. 8B 47 14 A8 02 0F 84 8E 08 00 00 
+
+==== BB 494 (0x3A969B4A) in 9178B, out 50550B, BBs exec'd 0 ====
+	0x3A969B4A:  movl 0xFFFFFF38(%ebp),%eax
+	0x3A969B50:  movl 0xFFFFFF40(%ebp),%edx
+	0x3A969B56:  subl %edx,%eax
+	0x3A969B58:  movl %eax,8(%esp,,)
+	0x3A969B5C:  xorl %eax, %eax
+	0x3A969B5E:  movl %eax,4(%esp,,)
+	0x3A969B62:  movl 0xFFFFFF40(%ebp),%esi
+	0x3A969B68:  movl %esi,(%esp,,)
+	0x3A969B6B:  call 0x3A975700
+. 494 3A969B4A 38
+. 8B 85 38 FF FF FF 8B 95 40 FF FF FF 29 D0 89 44 24 08 31 C0 89 44 24 04 8B B5 40 FF FF FF 89 34 24 E8 90 BB 00 00 
+
+==== BB 495 memset(0x3A975700) in 9216B, out 50694B, BBs exec'd 0 ====
+	0x3A975700:  pushl %edi
+	0x3A975701:  movl 8(%esp,,),%edi
+	0x3A975705:  movl 16(%esp,,),%edx
+	0x3A975709:  movb 12(%esp,,),%al
+	0x3A97570D:  movb %al,%ah{sp}
+	0x3A97570F:  movl %eax,%ecx
+	0x3A975711:  shll $0x10, %eax
+	0x3A975714:  movw %cx,%ax
+	0x3A975717:  cld
+	0x3A975718:  cmpl $0x24, %edx
+	0x3A97571B:  movl %edx,%ecx
+	0x3A97571D:  jl-8 0x3A975756
+. 495 3A975700 31
+. 57 8B 7C 24 08 8B 54 24 10 8A 44 24 0C 88 C4 89 C1 C1 E0 10 66 89 C8 FC 83 FA 24 89 D1 7C 37 
+
+==== BB 496 (0x3A975756) in 9247B, out 50876B, BBs exec'd 0 ====
+	0x3A975756:  shrl $0x2, %ecx
+	0x3A975759:  rep stosl
+. 496 3A975756 5
+. C1 E9 02 F3 AB 
+
+==== BB 497 (0x3A975759) in 9252B, out 50973B, BBs exec'd 0 ====
+	0x3A975759:  rep stosl
+. 497 3A975759 2
+. F3 AB 
+
+==== BB 498 (0x3A97575B) in 9254B, out 51060B, BBs exec'd 0 ====
+	0x3A97575B:  movl %edx,%ecx
+	0x3A97575D:  andl $0x3, %ecx
+	0x3A975760:  rep stosb
+. 498 3A97575B 7
+. 89 D1 83 E1 03 F3 AA 
+
+==== BB 499 (0x3A975762) in 9261B, out 51162B, BBs exec'd 0 ====
+	0x3A975762:  movl 8(%esp,,),%eax
+	0x3A975766:  popl %edi
+	0x3A975767:  ret
+. 499 3A975762 6
+. 8B 44 24 08 5F C3 
+
+==== BB 500 (0x3A969B70) in 9267B, out 51221B, BBs exec'd 0 ====
+	0x3A969B70:  movl 20(%edi),%eax
+	0x3A969B73:  testb $0x2, %al
+	0x3A969B75:  jz-32 0x3A96A3B2
+. 500 3A969B70 11
+. 8B 47 14 A8 02 0F 84 37 08 00 00 
+
+==== BB 501 (0x3A969B7B) in 9278B, out 51303B, BBs exec'd 0 ====
+	0x3A969B7B:  movl 0xFFFFFF38(%ebp),%eax
+	0x3A969B81:  cmpl %eax,0xFFFFFF3C(%ebp)
+	0x3A969B87:  jnbe-32 0x3A96A171
+. 501 3A969B7B 18
+. 8B 85 38 FF FF FF 39 85 3C FF FF FF 0F 87 E4 05 00 00 
+
+==== BB 502 (0x3A969DE4) in 9296B, out 51384B, BBs exec'd 0 ====
+	0x3A969DE4:  movl -124(%ebp),%ecx
+	0x3A969DE7:  movl 0x144(%ecx),%eax
+	0x3A969DED:  testl %eax,%eax
+	0x3A969DEF:  jz-32 0x3A96A0E8
+. 502 3A969DE4 17
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 0F 84 F3 02 00 00 
+
+==== BB 503 (0x3A969DF5) in 9313B, out 51472B, BBs exec'd 0 ====
+	0x3A969DF5:  movl -124(%ebp),%ecx
+	0x3A969DF8:  movl (%ecx),%esi
+	0x3A969DFA:  addl %esi,%eax
+	0x3A969DFC:  movl %eax,0x144(%ecx)
+	0x3A969E02:  movl 0xFFFFFF64(%ebp),%esp
+	0x3A969E08:  movl -116(%ebp),%esi
+	0x3A969E0B:  movl %esi,(%esp,,)
+	0x3A969E0E:  call 0x3A974470
+. 503 3A969DF5 30
+. 8B 4D 84 8B 31 01 F0 89 81 44 01 00 00 8B A5 64 FF FF FF 8B 75 8C 89 34 24 E8 5D A6 00 00 
+
+==== BB 504 close(0x3A974470) in 9343B, out 51605B, BBs exec'd 0 ====
+	0x3A974470:  movl %ebx,%edx
+	0x3A974472:  movl 4(%esp,,),%ebx
+	0x3A974476:  movl $0x6,%eax
+	0x3A97447B:  int $0x80
+. 504 3A974470 13
+. 89 DA 8B 5C 24 04 B8 06 00 00 00 CD 80 
+
+==== BB 505 (0x3A97447D) in 9356B, out 51669B, BBs exec'd 0 ====
+	0x3A97447D:  movl %edx,%ebx
+	0x3A97447F:  cmpl $0xFFFFF001, %eax
+	0x3A974484:  jnb-8 0x3A974487
+. 505 3A97447D 9
+. 89 D3 3D 01 F0 FF FF 73 01 
+
+==== BB 506 (0x3A974486) in 9365B, out 51739B, BBs exec'd 0 ====
+	0x3A974486:  ret
+. 506 3A974486 1
+. C3 
+
+==== BB 507 (0x3A969E13) in 9366B, out 51769B, BBs exec'd 0 ====
+	0x3A969E13:  movl $0xFFFFFFFF, -116(%ebp)
+	0x3A969E1A:  movl -124(%ebp),%eax
+	0x3A969E1D:  movzbl 0x174(%eax),%ecx
+	0x3A969E24:  movb %cl,%al
+	0x3A969E26:  andb $0x3, %al
+	0x3A969E28:  decb %al
+	0x3A969E2A:  setz %dl
+	0x3A969E2D:  cmpl $0x2, 0xFFFFFF78(%ebp)
+	0x3A969E34:  setz %al
+	0x3A969E37:  andl %edx,%eax
+	0x3A969E39:  testb $0x1, %al
+	0x3A969E3B:  jz-8 0x3A969E49
+. 507 3A969E13 42
+. C7 45 8C FF FF FF FF 8B 45 84 0F B6 88 74 01 00 00 88 C8 24 03 FE C8 0F 94 C2 83 BD 78 FF FF FF 02 0F 94 C0 21 D0 A8 01 74 0C 
+
+==== BB 508 (0x3A969E49) in 9408B, out 51975B, BBs exec'd 0 ====
+	0x3A969E49:  movl -124(%ebp),%ecx
+	0x3A969E4C:  movl 8(%ecx),%eax
+	0x3A969E4F:  testl %eax,%eax
+	0x3A969E51:  jnz-32 0x3A96A0D9
+. 508 3A969E49 14
+. 8B 4D 84 8B 41 08 85 C0 0F 85 82 02 00 00 
+
+==== BB 509 (0x3A96A0D9) in 9422B, out 52060B, BBs exec'd 0 ====
+	0x3A96A0D9:  movl -124(%ebp),%esi
+	0x3A96A0DC:  movl (%esi),%edx
+	0x3A96A0DE:  addl %edx,%eax
+	0x3A96A0E0:  movl %eax,8(%esi)
+	0x3A96A0E3:  jmp 0x3A969E6F
+. 509 3A96A0D9 15
+. 8B 75 84 8B 16 01 D0 89 46 08 E9 87 FD FF FF 
+
+==== BB 510 (0x3A969E6F) in 9437B, out 52140B, BBs exec'd 0 ====
+	0x3A969E6F:  movl -124(%ebp),%ecx
+	0x3A969E72:  addl %edx,0x148(%ecx)
+	0x3A969E78:  testb $0x40, 0xFFFFFC14(%ebx)
+	0x3A969E7F:  jnz-32 0x3A96A230
+. 510 3A969E6F 22
+. 8B 4D 84 01 91 48 01 00 00 F6 83 14 FC FF FF 40 0F 85 AB 03 00 00 
+
+==== BB 511 (0x3A969E85) in 9459B, out 52245B, BBs exec'd 0 ====
+	0x3A969E85:  movl -124(%ebp),%eax
+	0x3A969E88:  movl 8(%eax),%esi
+	0x3A969E8B:  testl %esi,%esi
+	0x3A969E8D:  jz-32 0x3A969F9E
+. 511 3A969E85 14
+. 8B 45 84 8B 70 08 85 F6 0F 84 0B 01 00 00 
+
+==== BB 512 (0x3A969E93) in 9473B, out 52330B, BBs exec'd 0 ====
+	0x3A969E93:  movl (%esi),%edx
+	0x3A969E95:  movl %eax,%edi
+	0x3A969E97:  addl $0x18, %edi
+	0x3A969E9A:  testl %edx,%edx
+	0x3A969E9C:  jnz-8 0x3A969EAE
+. 512 3A969E93 11
+. 8B 16 89 C7 83 C7 18 85 D2 75 10 
+
+==== BB 513 (0x3A969EAE) in 9484B, out 52416B, BBs exec'd 0 ====
+	0x3A969EAE:  cmpl $0x21, %edx
+	0x3A969EB1:  jle-8 0x3A969EA0
+. 513 3A969EAE 5
+. 83 FA 21 7E ED 
+
+==== BB 514 (0x3A969EA0) in 9489B, out 52473B, BBs exec'd 0 ====
+	0x3A969EA0:  movl %esi,(%edi,%edx,4)
+	0x3A969EA3:  addl $0x8, %esi
+	0x3A969EA6:  movl (%esi),%eax
+	0x3A969EA8:  testl %eax,%eax
+	0x3A969EAA:  movl %eax,%edx
+	0x3A969EAC:  jz-8 0x3A969EE6
+. 514 3A969EA0 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 38 
+
+==== BB 515 (0x3A969EB3) in 9503B, out 52576B, BBs exec'd 0 ====
+	0x3A969EB3:  movl $0x6FFFFFFF,%eax
+	0x3A969EB8:  subl %edx,%eax
+	0x3A969EBA:  cmpl $0xF, %eax
+	0x3A969EBD:  jnbe-8 0x3A969ECB
+. 515 3A969EB3 12
+. B8 FF FF FF 6F 29 D0 83 F8 0F 77 0C 
+
+==== BB 516 (0x3A969EBF) in 9515B, out 52649B, BBs exec'd 0 ====
+	0x3A969EBF:  movl $0x70000021,%eax
+	0x3A969EC4:  subl %edx,%eax
+	0x3A969EC6:  movl %esi,(%edi,%eax,4)
+	0x3A969EC9:  jmp-8 0x3A969EA3
+. 516 3A969EBF 12
+. B8 21 00 00 70 29 D0 89 34 87 EB D8 
+
+==== BB 517 (0x3A969EA3) in 9527B, out 52717B, BBs exec'd 0 ====
+	0x3A969EA3:  addl $0x8, %esi
+	0x3A969EA6:  movl (%esi),%eax
+	0x3A969EA8:  testl %eax,%eax
+	0x3A969EAA:  movl %eax,%edx
+	0x3A969EAC:  jz-8 0x3A969EE6
+. 517 3A969EA3 11
+. 83 C6 08 8B 06 85 C0 89 C2 74 38 
+
+==== BB 518 (0x3A969EE6) in 9538B, out 52803B, BBs exec'd 0 ====
+	0x3A969EE6:  movl -124(%ebp),%ecx
+	0x3A969EE9:  movl (%ecx),%edx
+	0x3A969EEB:  testl %edx,%edx
+	0x3A969EED:  jz-8 0x3A969F42
+. 518 3A969EE6 9
+. 8B 4D 84 8B 11 85 D2 74 53 
+
+==== BB 519 (0x3A969EEF) in 9547B, out 52885B, BBs exec'd 0 ====
+	0x3A969EEF:  movl 16(%edi),%eax
+	0x3A969EF2:  testl %eax,%eax
+	0x3A969EF4:  jz-8 0x3A969EF9
+. 519 3A969EEF 7
+. 8B 47 10 85 C0 74 03 
+
+==== BB 520 (0x3A969EF6) in 9554B, out 52957B, BBs exec'd 0 ====
+	0x3A969EF6:  addl %edx,4(%eax)
+	0x3A969EF9:  movl 12(%edi),%eax
+	0x3A969EFC:  testl %eax,%eax
+	0x3A969EFE:  jz-8 0x3A969F03
+. 520 3A969EF6 10
+. 01 50 04 8B 47 0C 85 C0 74 03 
+
+==== BB 521 (0x3A969F00) in 9564B, out 53048B, BBs exec'd 0 ====
+	0x3A969F00:  addl %edx,4(%eax)
+	0x3A969F03:  movl 20(%edi),%eax
+	0x3A969F06:  testl %eax,%eax
+	0x3A969F08:  jz-8 0x3A969F0D
+. 521 3A969F00 10
+. 01 50 04 8B 47 14 85 C0 74 03 
+
+==== BB 522 (0x3A969F0A) in 9574B, out 53139B, BBs exec'd 0 ====
+	0x3A969F0A:  addl %edx,4(%eax)
+	0x3A969F0D:  movl 24(%edi),%eax
+	0x3A969F10:  testl %eax,%eax
+	0x3A969F12:  jz-8 0x3A969F17
+. 522 3A969F0A 10
+. 01 50 04 8B 47 18 85 C0 74 03 
+
+==== BB 523 (0x3A969F14) in 9584B, out 53230B, BBs exec'd 0 ====
+	0x3A969F14:  addl %edx,4(%eax)
+	0x3A969F17:  movl 28(%edi),%eax
+	0x3A969F1A:  testl %eax,%eax
+	0x3A969F1C:  jz-8 0x3A969F21
+. 523 3A969F14 10
+. 01 50 04 8B 47 1C 85 C0 74 03 
+
+==== BB 524 (0x3A969F21) in 9594B, out 53321B, BBs exec'd 0 ====
+	0x3A969F21:  movl 68(%edi),%eax
+	0x3A969F24:  testl %eax,%eax
+	0x3A969F26:  jz-8 0x3A969F2B
+. 524 3A969F21 7
+. 8B 47 44 85 C0 74 03 
+
+==== BB 525 (0x3A969F28) in 9601B, out 53393B, BBs exec'd 0 ====
+	0x3A969F28:  addl %edx,4(%eax)
+	0x3A969F2B:  movl 92(%edi),%eax
+	0x3A969F2E:  testl %eax,%eax
+	0x3A969F30:  jz-8 0x3A969F35
+. 525 3A969F28 10
+. 01 50 04 8B 47 5C 85 C0 74 03 
+
+==== BB 526 (0x3A969F32) in 9611B, out 53484B, BBs exec'd 0 ====
+	0x3A969F32:  addl %edx,4(%eax)
+	0x3A969F35:  movl 0xC4(%edi),%eax
+	0x3A969F3B:  testl %eax,%eax
+	0x3A969F3D:  jz-8 0x3A969F42
+. 526 3A969F32 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 03 
+
+==== BB 527 (0x3A969F3F) in 9624B, out 53578B, BBs exec'd 0 ====
+	0x3A969F3F:  addl %edx,4(%eax)
+	0x3A969F42:  movl 120(%edi),%edx
+	0x3A969F45:  testl %edx,%edx
+	0x3A969F47:  jz-8 0x3A969F73
+. 527 3A969F3F 10
+. 01 50 04 8B 57 78 85 D2 74 2A 
+
+==== BB 528 (0x3A969F73) in 9634B, out 53669B, BBs exec'd 0 ====
+	0x3A969F73:  movl 0x98(%edi),%edx
+	0x3A969F79:  testl %edx,%edx
+	0x3A969F7B:  jz-8 0x3A969F90
+. 528 3A969F73 10
+. 8B 97 98 00 00 00 85 D2 74 13 
+
+==== BB 529 (0x3A969F7D) in 9644B, out 53744B, BBs exec'd 0 ====
+	0x3A969F7D:  movl 4(%edx),%eax
+	0x3A969F80:  movl -124(%ebp),%esi
+	0x3A969F83:  testb $0x1, %al
+	0x3A969F85:  movl %eax,0x1E8(%esi)
+	0x3A969F8B:  jz-8 0x3A969F90
+. 529 3A969F7D 16
+. 8B 42 04 8B 75 84 A8 01 89 86 E8 01 00 00 74 03 
+
+==== BB 530 (0x3A969F90) in 9660B, out 53854B, BBs exec'd 0 ====
+	0x3A969F90:  movl 116(%edi),%eax
+	0x3A969F93:  testl %eax,%eax
+	0x3A969F95:  jz-8 0x3A969F9E
+. 530 3A969F90 7
+. 8B 47 74 85 C0 74 07 
+
+==== BB 531 (0x3A969F9E) in 9667B, out 53926B, BBs exec'd 0 ====
+	0x3A969F9E:  movl -124(%ebp),%eax
+	0x3A969FA1:  testb $0x40, 0x1E8(%eax)
+	0x3A969FA8:  jnz-32 0x3A96A336
+. 531 3A969F9E 16
+. 8B 45 84 F6 80 E8 01 00 00 40 0F 85 88 03 00 00 
+
+==== BB 532 (0x3A969FAE) in 9683B, out 54017B, BBs exec'd 0 ====
+	0x3A969FAE:  movl -124(%ebp),%eax
+	0x3A969FB1:  movl 40(%eax),%edx
+	0x3A969FB4:  testl %edx,%edx
+	0x3A969FB6:  jnz-32 0x3A96A1DA
+. 532 3A969FAE 14
+. 8B 45 84 8B 50 28 85 D2 0F 85 1E 02 00 00 
+
+==== BB 533 (0x3A96A1DA) in 9697B, out 54102B, BBs exec'd 0 ====
+	0x3A96A1DA:  call 0x3A96BD80
+. 533 3A96A1DA 5
+. E8 A1 1B 00 00 
+
+==== BB 534 (0x3A96A1DF) in 9702B, out 54147B, BBs exec'd 0 ====
+	0x3A96A1DF:  nop
+	0x3A96A1E0:  jmp 0x3A969FBC
+. 534 3A96A1DF 6
+. 90 E9 D7 FD FF FF 
+
+==== BB 535 (0x3A969FBC) in 9708B, out 54179B, BBs exec'd 0 ====
+	0x3A969FBC:  movl -124(%ebp),%edx
+	0x3A969FBF:  movl 88(%edx),%eax
+	0x3A969FC2:  testl %eax,%eax
+	0x3A969FC4:  jnz-32 0x3A96A2AF
+. 535 3A969FBC 14
+. 8B 55 84 8B 42 58 85 C0 0F 85 E5 02 00 00 
+
+==== BB 536 (0x3A969FCA) in 9722B, out 54264B, BBs exec'd 0 ====
+	0x3A969FCA:  movl -124(%ebp),%eax
+	0x3A969FCD:  testb $0x20, 0x1E8(%eax)
+	0x3A969FD4:  jz-8 0x3A969FDC
+. 536 3A969FCA 12
+. 8B 45 84 F6 80 E8 01 00 00 20 74 06 
+
+==== BB 537 (0x3A969FD6) in 9734B, out 54355B, BBs exec'd 0 ====
+	0x3A969FD6:  movl %eax,0xFFFFF9CC(%ebx)
+	0x3A969FDC:  movl -108(%ebp),%eax
+	0x3A969FDF:  movl -104(%ebp),%edx
+	0x3A969FE2:  movl -124(%ebp),%ecx
+	0x3A969FE5:  movl %eax,0x1BC(%ecx)
+	0x3A969FEB:  movl %edx,0x1C0(%ecx)
+	0x3A969FF1:  movl -20(%ebp),%eax
+	0x3A969FF4:  movl -16(%ebp),%edx
+	0x3A969FF7:  movl %eax,0x1C4(%ecx)
+	0x3A969FFD:  movl 0xFFFFFC00(%ebx),%eax
+	0x3A96A003:  movl %edx,0x1C8(%ecx)
+	0x3A96A009:  notl %eax
+	0x3A96A00B:  andl %eax,0xFFFFFF68(%ebp)
+	0x3A96A011:  testb $0x1, 0xFFFFFF68(%ebp)
+	0x3A96A018:  jnz-32 0x3A96A390
+. 537 3A969FD6 72
+. 89 83 CC F9 FF FF 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00 
+
+==== BB 538 (0x3A96A01E) in 9806B, out 54595B, BBs exec'd 0 ====
+	0x3A96A01E:  movl 0xFFFFFD7C(%ebx),%edi
+	0x3A96A024:  testl %edi,%edi
+	0x3A96A026:  jz-32 0x3A969DC3
+. 538 3A96A01E 14
+. 8B BB 7C FD FF FF 85 FF 0F 84 97 FD FF FF 
+
+==== BB 539 (0x3A969DC3) in 9820B, out 54670B, BBs exec'd 0 ====
+	0x3A969DC3:  movl -124(%ebp),%eax
+	0x3A969DC6:  leal -12(%ebp), %esp
+	0x3A969DC9:  popl %ebx
+	0x3A969DCA:  popl %esi
+	0x3A969DCB:  popl %edi
+	0x3A969DCC:  popl %ebp
+	0x3A969DCD:  ret
+. 539 3A969DC3 11
+. 8B 45 84 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 540 (0x3A96B07E) in 9831B, out 54785B, BBs exec'd 0 ====
+	0x3A96B07E:  leal -12(%ebp), %esp
+	0x3A96B081:  popl %ebx
+	0x3A96B082:  popl %esi
+	0x3A96B083:  popl %edi
+	0x3A96B084:  popl %ebp
+	0x3A96B085:  ret 12
+. 540 3A96B07E 10
+. 8D 65 F4 5B 5E 5F 5D C2 0C 00 
+
+==== BB 541 (0x3A967C70) in 9841B, out 54887B, BBs exec'd 0 ====
+	0x3A967C70:  movl 0x170(%eax),%edx
+	0x3A967C76:  subl $0xC, %esp
+	0x3A967C79:  incl %edx
+	0x3A967C7A:  movl %edx,0x170(%eax)
+	0x3A967C80:  xorl %eax, %eax
+	0x3A967C82:  cmpl $0x1, %edx
+	0x3A967C85:  setz %al
+	0x3A967C88:  addl %eax,0xFFFFFF10(%ebp)
+	0x3A967C8E:  jmp-8 0x3A967C1C
+. 541 3A967C70 32
+. 8B 90 70 01 00 00 83 EC 0C 42 89 90 70 01 00 00 31 C0 83 FA 01 0F 94 C0 01 85 10 FF FF FF EB 8C 
+
+==== BB 542 (0x3A967C1C) in 9873B, out 55022B, BBs exec'd 0 ====
+	0x3A967C1C:  movl 0xFFFFFE60(%ebp),%ecx
+	0x3A967C22:  movl %edi,(%esp,,)
+	0x3A967C25:  movl %ecx,4(%esp,,)
+	0x3A967C29:  call 0x3A973530
+. 542 3A967C1C 18
+. 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00 
+
+==== BB 543 (0x3A96692A) in 9891B, out 55106B, BBs exec'd 0 ====
+	0x3A96692A:  movl 0xFFFFEC7E(%ebx),%eax
+	0x3A966930:  leal -76(%ebp), %esi
+	0x3A966933:  movl $0x3,%ecx
+	0x3A966938:  movl %esi,0xFFFFFECC(%ebp)
+	0x3A96693E:  leal 0xFFFFFF38(%ebp), %edx
+	0x3A966944:  movl %eax,-76(%ebp)
+	0x3A966947:  movl 0xFFFFEC82(%ebx),%eax
+	0x3A96694D:  movl %eax,-72(%ebp)
+	0x3A966950:  movl 0xFFFFEC86(%ebx),%eax
+	0x3A966956:  movl %eax,-68(%ebp)
+	0x3A966959:  movl 0xFFFFEC8A(%ebx),%eax
+	0x3A96695F:  movl %eax,-64(%ebp)
+	0x3A966962:  movzwl 0xFFFFEC8E(%ebx),%eax
+	0x3A966969:  movw %ax,-60(%ebp)
+	0x3A96696D:  movzbl 0xFFFFEC90(%ebx),%eax
+	0x3A966974:  movb %al,-58(%ebp)
+	0x3A966977:  movl %esi,%eax
+	0x3A966979:  call 0x3A971070
+. 543 3A96692A 84
+. 8B 83 7E EC FF FF 8D 75 B4 B9 03 00 00 00 89 B5 CC FE FF FF 8D 95 38 FF FF FF 89 45 B4 8B 83 82 EC FF FF 89 45 B8 8B 83 86 EC FF FF 89 45 BC 8B 83 8A EC FF FF 89 45 C0 0F B7 83 8E EC FF FF 66 89 45 C4 0F B6 83 90 EC FF FF 88 45 C6 89 F0 E8 F2 A6 00 00 
+
+==== BB 544 _dl_sysdep_read_whole_file(0x3A971070) in 9975B, out 55364B, BBs exec'd 0 ====
+	0x3A971070:  pushl %ebp
+	0x3A971071:  movl %esp,%ebp
+	0x3A971073:  subl $0x8C, %esp
+	0x3A971079:  movl %esi,-8(%ebp)
+	0x3A97107C:  xorl %esi, %esi
+	0x3A97107E:  movl %ebx,-12(%ebp)
+	0x3A971081:  movl %edi,-4(%ebp)
+	0x3A971084:  movl %edx,%edi
+	0x3A971086:  movl %ecx,-112(%ebp)
+	0x3A971089:  movl $0xFFFFFFFF, -116(%ebp)
+	0x3A971090:  call 0x3A97592B
+. 544 3A971070 37
+. 55 89 E5 81 EC 8C 00 00 00 89 75 F8 31 F6 89 5D F4 89 7D FC 89 D7 89 4D 90 C7 45 8C FF FF FF FF E8 96 48 00 00 
+
+==== BB 545 (0x3A971095) in 10012B, out 55543B, BBs exec'd 0 ====
+	0x3A971095:  addl $0x75D7, %ebx
+	0x3A97109B:  movl %esi,4(%esp,,)
+	0x3A97109F:  movl %eax,(%esp,,)
+	0x3A9710A2:  call 0x3A974430
+. 545 3A971095 18
+. 81 C3 D7 75 00 00 89 74 24 04 89 04 24 E8 89 33 00 00 
+
+==== BB 546 (0x3A97444D) in 10030B, out 55631B, BBs exec'd 0 ====
+	0x3A97444D:  call 0x3A975927
+. 546 3A97444D 5
+. E8 D5 14 00 00 
+
+==== BB 547 __i686.get_pc_thunk.cx(0x3A975927) in 10035B, out 55676B, BBs exec'd 0 ====
+	0x3A975927:  movl (%esp,,),%ecx
+	0x3A97592A:  ret
+. 547 3A975927 4
+. 8B 0C 24 C3 
+
+==== BB 548 (0x3A974452) in 10039B, out 55716B, BBs exec'd 0 ====
+	0x3A974452:  addl $0x421A, %ecx
+	0x3A974458:  xorl %edx, %edx
+	0x3A97445A:  subl %eax,%edx
+	0x3A97445C:  movl %edx,0x148(%ecx)
+	0x3A974462:  orl $0xFFFFFFFF, %eax
+	0x3A974465:  jmp-8 0x3A97444C
+. 548 3A974452 21
+. 81 C1 1A 42 00 00 31 D2 29 C2 89 91 48 01 00 00 83 C8 FF EB E5 
+
+==== BB 549 (0x3A9710A7) in 10060B, out 55815B, BBs exec'd 0 ====
+	0x3A9710A7:  testl %eax,%eax
+	0x3A9710A9:  movl %eax,%esi
+	0x3A9710AB:  js-8 0x3A9710D9
+. 549 3A9710A7 6
+. 85 C0 89 C6 78 2C 
+
+==== BB 550 (0x3A9710D9) in 10066B, out 55881B, BBs exec'd 0 ====
+	0x3A9710D9:  movl -116(%ebp),%eax
+	0x3A9710DC:  movl -12(%ebp),%ebx
+	0x3A9710DF:  movl -8(%ebp),%esi
+	0x3A9710E2:  movl -4(%ebp),%edi
+	0x3A9710E5:  movl %ebp,%esp
+	0x3A9710E7:  popl %ebp
+	0x3A9710E8:  ret
+. 550 3A9710D9 16
+. 8B 45 8C 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 551 (0x3A96697E) in 10082B, out 55986B, BBs exec'd 0 ====
+	0x3A96697E:  movl %eax,0xFFFFFF0C(%ebp)
+	0x3A966984:  cmpl $0xFFFFFFFF, %eax
+	0x3A966987:  jnz-32 0x3A967E6E
+. 551 3A96697E 15
+. 89 85 0C FF FF FF 83 F8 FF 0F 85 E1 14 00 00 
+
+==== BB 552 (0x3A96698D) in 10097B, out 56059B, BBs exec'd 0 ====
+	0x3A96698D:  movl 0xFFFFF994(%ebx),%eax
+	0x3A966993:  movl 0xA4(%eax),%edi
+	0x3A966999:  testl %edi,%edi
+	0x3A96699B:  jz-32 0x3A9674A5
+. 552 3A96698D 20
+. 8B 83 94 F9 FF FF 8B B8 A4 00 00 00 85 FF 0F 84 04 0B 00 00 
+
+==== BB 553 (0x3A9669A1) in 10117B, out 56150B, BBs exec'd 0 ====
+	0x3A9669A1:  movl 0xFFFFFF10(%ebp),%esi
+	0x3A9669A7:  testl %esi,%esi
+	0x3A9669A9:  jnz-32 0x3A967E28
+. 553 3A9669A1 14
+. 8B B5 10 FF FF FF 85 F6 0F 85 79 14 00 00 
+
+==== BB 554 (0x3A967E28) in 10131B, out 56225B, BBs exec'd 0 ====
+	0x3A967E28:  movl 0xFFFFFF10(%ebp),%esi
+	0x3A967E2E:  xorl %ecx, %ecx
+	0x3A967E30:  movl %ecx,0xFFFFFF04(%ebp)
+	0x3A967E36:  leal 0x10(,%esi,4), %eax
+	0x3A967E3D:  subl %eax,%esp
+	0x3A967E3F:  leal 47(%esp,,), %edx
+	0x3A967E43:  movl 0xFFFFF9F0(%ebx),%eax
+	0x3A967E49:  andl $0xFFFFFFF0, %edx
+	0x3A967E4C:  movl %edx,0xFFFFFF14(%ebp)
+	0x3A967E52:  movl 0xFFFFFF04(%ebp),%ecx
+	0x3A967E58:  movl %eax,(%edx,%ecx,4)
+	0x3A967E5B:  movl 12(%eax),%eax
+	0x3A967E5E:  incl %ecx
+	0x3A967E5F:  movl %ecx,0xFFFFFF04(%ebp)
+	0x3A967E65:  testl %eax,%eax
+	0x3A967E67:  jnz-8 0x3A967E52
+. 554 3A967E28 65
+. 8B B5 10 FF FF FF 31 C9 89 8D 04 FF FF FF 8D 04 B5 10 00 00 00 29 C4 8D 54 24 2F 8B 83 F0 F9 FF FF 83 E2 F0 89 95 14 FF FF FF 8B 8D 04 FF FF FF 89 04 8A 8B 40 0C 41 89 8D 04 FF FF FF 85 C0 75 E9 
+
+==== BB 555 (0x3A967E69) in 10196B, out 56457B, BBs exec'd 0 ====
+	0x3A967E69:  jmp 0x3A9669AF
+. 555 3A967E69 5
+. E9 41 EB FF FF 
+
+==== BB 556 (0x3A9669AF) in 10201B, out 56485B, BBs exec'd 0 ====
+	0x3A9669AF:  movl 0xFFFFFD94(%ebx),%esi
+	0x3A9669B5:  xorl %edi, %edi
+	0x3A9669B7:  movl %edi,0xFFFFFEC4(%ebp)
+	0x3A9669BD:  testl %esi,%esi
+	0x3A9669BF:  jz-32 0x3A967064
+. 556 3A9669AF 22
+. 8B B3 94 FD FF FF 31 FF 89 BD C4 FE FF FF 85 F6 0F 84 9F 06 00 00 
+
+==== BB 557 (0x3A967064) in 10223B, out 56590B, BBs exec'd 0 ====
+	0x3A967064:  xorl %edx, %edx
+	0x3A967066:  movl 0xFFFFF994(%ebx),%eax
+	0x3A96706C:  movl %edx,4(%esp,,)
+	0x3A967070:  xorl %edx, %edx
+	0x3A967072:  cmpl $0x3, 0xFFFFFF50(%ebp)
+	0x3A967079:  setz %dl
+	0x3A96707C:  movl %edx,(%esp,,)
+	0x3A96707F:  movl 0xFFFFFF10(%ebp),%ecx
+	0x3A967085:  movl 0xFFFFFF14(%ebp),%edx
+	0x3A96708B:  call 0x3A96F1A0
+. 557 3A967064 44
+. 31 D2 8B 83 94 F9 FF FF 89 54 24 04 31 D2 83 BD 50 FF FF FF 03 0F 94 C2 89 14 24 8B 8D 10 FF FF FF 8B 95 14 FF FF FF E8 10 81 00 00 
+
+==== BB 558 _dl_map_object_deps(0x3A96F1A0) in 10267B, out 56764B, BBs exec'd 0 ====
+	0x3A96F1A0:  pushl %ebp
+	0x3A96F1A1:  movl %esp,%ebp
+	0x3A96F1A3:  pushl %edi
+	0x3A96F1A4:  pushl %esi
+	0x3A96F1A5:  pushl %ebx
+	0x3A96F1A6:  subl $0xA0, %esp
+	0x3A96F1AC:  movl %eax,-64(%ebp)
+	0x3A96F1AF:  leal (%ecx,%ecx,2), %eax
+	0x3A96F1B2:  leal 0x28(,%eax,4), %eax
+	0x3A96F1B9:  movl %edx,-68(%ebp)
+	0x3A96F1BC:  subl %eax,%esp
+	0x3A96F1BE:  movl -64(%ebp),%edx
+	0x3A96F1C1:  movl %ecx,-72(%ebp)
+	0x3A96F1C4:  leal 31(%esp,,), %eax
+	0x3A96F1C8:  andl $0xFFFFFFF0, %eax
+	0x3A96F1CB:  movl %edx,4(%eax)
+	0x3A96F1CE:  movl -64(%ebp),%ecx
+	0x3A96F1D1:  leal 12(%eax), %edx
+	0x3A96F1D4:  movl %eax,-16(%ebp)
+	0x3A96F1D7:  movl $0x0, (%eax)
+	0x3A96F1DD:  movl %edx,8(%eax)
+	0x3A96F1E0:  movzbl 0x174(%ecx),%eax
+	0x3A96F1E7:  movl $0x1, -20(%ebp)
+	0x3A96F1EE:  call 0x3A97592B
+. 558 3A96F1A0 83
+. 55 89 E5 57 56 53 81 EC A0 00 00 00 89 45 C0 8D 04 49 8D 04 85 28 00 00 00 89 55 BC 29 C4 8B 55 C0 89 4D B8 8D 44 24 1F 83 E0 F0 89 50 04 8B 4D C0 8D 50 0C 89 45 F0 C7 00 00 00 00 00 89 50 08 0F B6 81 74 01 00 00 C7 45 EC 01 00 00 00 E8 38 67 00 00 
+
+==== BB 559 (0x3A96F1F3) in 10350B, out 57096B, BBs exec'd 0 ====
+	0x3A96F1F3:  addl $0x9479, %ebx
+	0x3A96F1F9:  andb $0x9F, %al
+	0x3A96F1FB:  orb $0x20, %al
+	0x3A96F1FD:  movb %al,0x174(%ecx)
+	0x3A96F203:  movl -72(%ebp),%esi
+	0x3A96F206:  movl $0x0, -84(%ebp)
+	0x3A96F20D:  cmpl %esi,-84(%ebp)
+	0x3A96F210:  jnb-8 0x3A96F25A
+. 559 3A96F1F3 31
+. 81 C3 79 94 00 00 24 9F 0C 20 88 81 74 01 00 00 8B 75 B8 C7 45 AC 00 00 00 00 39 75 AC 73 48 
+
+==== BB 560 (0x3A96F212) in 10381B, out 57264B, BBs exec'd 0 ====
+	0x3A96F212:  movl -84(%ebp),%eax
+	0x3A96F215:  movl -68(%ebp),%edx
+	0x3A96F218:  movl -20(%ebp),%esi
+	0x3A96F21B:  movl (%edx,%eax,4),%edi
+	0x3A96F21E:  movl -16(%ebp),%eax
+	0x3A96F221:  leal (%esi,%esi,2), %edx
+	0x3A96F224:  shll $0x2, %edx
+	0x3A96F227:  incl %esi
+	0x3A96F228:  leal 12(%edx,%eax,1), %ecx
+	0x3A96F22C:  movl $0x0, (%eax,%edx,1)
+	0x3A96F233:  movl %ecx,8(%eax,%edx,1)
+	0x3A96F237:  movl %edi,4(%eax,%edx,1)
+	0x3A96F23B:  movzbl 0x174(%edi),%eax
+	0x3A96F242:  movl %esi,-20(%ebp)
+	0x3A96F245:  andb $0x9F, %al
+	0x3A96F247:  orb $0x20, %al
+	0x3A96F249:  movb %al,0x174(%edi)
+	0x3A96F24F:  movl -72(%ebp),%ecx
+	0x3A96F252:  incl -84(%ebp)
+	0x3A96F255:  cmpl %ecx,-84(%ebp)
+	0x3A96F258:  jb-8 0x3A96F212
+. 560 3A96F212 72
+. 8B 45 AC 8B 55 BC 8B 75 EC 8B 3C 82 8B 45 F0 8D 14 76 C1 E2 02 46 8D 4C 02 0C C7 04 10 00 00 00 00 89 4C 10 08 89 7C 10 04 0F B6 87 74 01 00 00 89 75 EC 24 9F 0C 20 88 87 74 01 00 00 8B 4D B8 FF 45 AC 39 4D AC 72 B8 
+
+==== BB 561 (0x3A96F25A) in 10453B, out 57575B, BBs exec'd 0 ====
+	0x3A96F25A:  movl $0x0, -92(%ebp)
+	0x3A96F261:  movl -20(%ebp),%eax
+	0x3A96F264:  xorl %ecx, %ecx
+	0x3A96F266:  movl $0x0, -56(%ebp)
+	0x3A96F26D:  movl -16(%ebp),%edx
+	0x3A96F270:  movl 0x148(%ebx),%esi
+	0x3A96F276:  movl %ecx,0x148(%ebx)
+	0x3A96F27C:  leal (%eax,%eax,2), %eax
+	0x3A96F27F:  leal -12(%edx,%eax,4), %eax
+	0x3A96F283:  movl $0x0, 8(%eax)
+	0x3A96F28A:  testl %edx,%edx
+	0x3A96F28C:  movl %eax,-80(%ebp)
+	0x3A96F28F:  movl %esi,-88(%ebp)
+	0x3A96F292:  movl %edx,-76(%ebp)
+	0x3A96F295:  jz-32 0x3A96F605
+. 561 3A96F25A 65
+. C7 45 A4 00 00 00 00 8B 45 EC 31 C9 C7 45 C8 00 00 00 00 8B 55 F0 8B B3 48 01 00 00 89 8B 48 01 00 00 8D 04 40 8D 44 82 F4 C7 40 08 00 00 00 00 85 D2 89 45 B0 89 75 A8 89 55 B4 0F 84 6A 03 00 00 
+
+==== BB 562 (0x3A96F29B) in 10518B, out 57809B, BBs exec'd 0 ====
+	0x3A96F29B:  movl $0x0, -100(%ebp)
+	0x3A96F2A2:  movl -76(%ebp),%eax
+	0x3A96F2A5:  movl -76(%ebp),%edx
+	0x3A96F2A8:  movl $0x0, -104(%ebp)
+	0x3A96F2AF:  movl 4(%eax),%eax
+	0x3A96F2B2:  movl $0x1, (%edx)
+	0x3A96F2B8:  movl 0x150(%eax),%edx
+	0x3A96F2BE:  movl %eax,-96(%ebp)
+	0x3A96F2C1:  testl %edx,%edx
+	0x3A96F2C3:  jnz-8 0x3A96F309
+. 562 3A96F29B 42
+. C7 45 9C 00 00 00 00 8B 45 B4 8B 55 B4 C7 45 98 00 00 00 00 8B 40 04 C7 02 01 00 00 00 8B 90 50 01 00 00 89 45 A0 85 D2 75 44 
+
+==== BB 563 (0x3A96F2C5) in 10560B, out 57969B, BBs exec'd 0 ====
+	0x3A96F2C5:  movl 0x1D4(%eax),%eax
+	0x3A96F2CB:  movl -64(%ebp),%ecx
+	0x3A96F2CE:  testl %eax,%eax
+	0x3A96F2D0:  setz %al
+	0x3A96F2D3:  cmpl %ecx,-96(%ebp)
+	0x3A96F2D6:  setnz %dl
+	0x3A96F2D9:  andl %edx,%eax
+	0x3A96F2DB:  testb $0x1, %al
+	0x3A96F2DD:  jz-8 0x3A96F309
+. 563 3A96F2C5 26
+. 8B 80 D4 01 00 00 8B 4D C0 85 C0 0F 94 C0 39 4D A0 0F 95 C2 21 D0 A8 01 74 2A 
+
+==== BB 564 (0x3A96F309) in 10586B, out 58126B, BBs exec'd 0 ====
+	0x3A96F309:  movl -96(%ebp),%edx
+	0x3A96F30C:  movl 28(%edx),%eax
+	0x3A96F30F:  testl %eax,%eax
+	0x3A96F311:  jnz-8 0x3A96F32B
+. 564 3A96F309 10
+. 8B 55 A0 8B 42 1C 85 C0 75 18 
+
+==== BB 565 (0x3A96F32B) in 10596B, out 58211B, BBs exec'd 0 ====
+	0x3A96F32B:  movl -96(%ebp),%ecx
+	0x3A96F32E:  movl 8(%ebp),%esi
+	0x3A96F331:  movl -76(%ebp),%edx
+	0x3A96F334:  movl 44(%ecx),%eax
+	0x3A96F337:  movl 4(%eax),%eax
+	0x3A96F33A:  movl %ecx,-52(%ebp)
+	0x3A96F33D:  movl 8(%ecx),%ecx
+	0x3A96F340:  movl %eax,-108(%ebp)
+	0x3A96F343:  movl %eax,-40(%ebp)
+	0x3A96F346:  movl 12(%ebp),%eax
+	0x3A96F349:  movl %esi,-48(%ebp)
+	0x3A96F34C:  movl %edx,-112(%ebp)
+	0x3A96F34F:  movl %eax,-44(%ebp)
+	0x3A96F352:  movl (%ecx),%edx
+	0x3A96F354:  movl %ecx,-116(%ebp)
+	0x3A96F357:  testl %edx,%edx
+	0x3A96F359:  jnz-32 0x3A96F41C
+. 565 3A96F32B 52
+. 8B 4D A0 8B 75 08 8B 55 B4 8B 41 2C 8B 40 04 89 4D CC 8B 49 08 89 45 94 89 45 D8 8B 45 0C 89 75 D0 89 55 90 89 45 D4 8B 11 89 4D 8C 85 D2 0F 85 BD 00 00 00 
+
+==== BB 566 (0x3A96F41C) in 10648B, out 58429B, BBs exec'd 0 ====
+	0x3A96F41C:  cmpl $0x1, %edx
+	0x3A96F41F:  jz-32 0x3A96F364
+. 566 3A96F41C 9
+. 83 FA 01 0F 84 3F FF FF FF 
+
+==== BB 567 (0x3A96F364) in 10657B, out 58486B, BBs exec'd 0 ====
+	0x3A96F364:  movl -116(%ebp),%esi
+	0x3A96F367:  movl $0x24,%ecx
+	0x3A96F36C:  movl -108(%ebp),%edi
+	0x3A96F36F:  movl 4(%esi),%eax
+	0x3A96F372:  movl %ecx,4(%esp,,)
+	0x3A96F376:  addl %eax,%edi
+	0x3A96F378:  movl %edi,%esi
+	0x3A96F37A:  movl %edi,(%esp,,)
+	0x3A96F37D:  call 0x3A9750B0
+. 567 3A96F364 30
+. 8B 75 8C B9 24 00 00 00 8B 7D 94 8B 46 04 89 4C 24 04 01 C7 89 FE 89 3C 24 E8 2E 5D 00 00 
+
+==== BB 568 (0x3A9750D0) in 10687B, out 58619B, BBs exec'd 0 ====
+	0x3A9750D0:  movb %dl,%cl
+	0x3A9750D2:  jp-8 0x3A9750FD
+. 568 3A9750D0 4
+. 88 D1 7A 29 
+
+==== BB 569 (0x3A9750D4) in 10691B, out 58677B, BBs exec'd 0 ====
+	0x3A9750D4:  xorb (%eax),%cl
+	0x3A9750D6:  jz-32 0x3A97523D
+. 569 3A9750D4 8
+. 32 08 0F 84 61 01 00 00 
+
+==== BB 570 (0x3A9750DC) in 10699B, out 58740B, BBs exec'd 0 ====
+	0x3A9750DC:  xorb %dl,%cl
+	0x3A9750DE:  jz-32 0x3A97526A
+. 570 3A9750DC 8
+. 30 D1 0F 84 86 01 00 00 
+
+==== BB 571 (0x3A9750E4) in 10707B, out 58801B, BBs exec'd 0 ====
+	0x3A9750E4:  movb 1(%eax),%cl
+	0x3A9750E7:  incl %eax
+	0x3A9750E8:  cmpb %cl,%dl
+	0x3A9750EA:  jz-32 0x3A97523D
+. 571 3A9750E4 12
+. 8A 48 01 40 38 CA 0F 84 4D 01 00 00 
+
+==== BB 572 (0x3A9750F0) in 10719B, out 58883B, BBs exec'd 0 ====
+	0x3A9750F0:  cmpb $0x0, %cl
+	0x3A9750F3:  jz-32 0x3A97526A
+. 572 3A9750F0 9
+. 80 F9 00 0F 84 71 01 00 00 
+
+==== BB 573 (0x3A9750F9) in 10728B, out 58941B, BBs exec'd 0 ====
+	0x3A9750F9:  incl %eax
+	0x3A9750FA:  decl %edi
+	0x3A9750FB:  jnz-8 0x3A975111
+. 573 3A9750F9 4
+. 40 4F 75 14 
+
+==== BB 574 (0x3A9750FD) in 10732B, out 59014B, BBs exec'd 0 ====
+	0x3A9750FD:  movb (%eax),%cl
+	0x3A9750FF:  cmpb %cl,%dl
+	0x3A975101:  jz-32 0x3A97523D
+. 574 3A9750FD 10
+. 8A 08 38 CA 0F 84 36 01 00 00 
+
+==== BB 575 (0x3A975107) in 10742B, out 59088B, BBs exec'd 0 ====
+	0x3A975107:  cmpb $0x0, %cl
+	0x3A97510A:  jz-32 0x3A97526A
+. 575 3A975107 9
+. 80 F9 00 0F 84 5A 01 00 00 
+
+==== BB 576 (0x3A975110) in 10751B, out 59146B, BBs exec'd 0 ====
+	0x3A975110:  incl %eax
+	0x3A975111:  movl (%eax),%ecx
+	0x3A975113:  movl $0xFEFEFEFF,%ebp
+	0x3A975118:  movl $0xFEFEFEFF,%edi
+	0x3A97511D:  addl %ecx,%ebp
+	0x3A97511F:  xorl %ecx,%ebp
+	0x3A975121:  addl %ecx,%edi
+	0x3A975123:  leal 4(%eax), %eax
+	0x3A975126:  jnb-32 0x3A975242
+. 576 3A975110 28
+. 40 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00 
+
+==== BB 577 (0x3A97524E) in 10779B, out 59263B, BBs exec'd 0 ====
+	0x3A97524E:  incl %eax
+	0x3A97524F:  cmpb %dl,%ch{bp}
+	0x3A975251:  jz-8 0x3A97523D
+. 577 3A97524E 5
+. 40 38 D5 74 EA 
+
+==== BB 578 (0x3A975253) in 10784B, out 59332B, BBs exec'd 0 ====
+	0x3A975253:  cmpb $0x0, %ch{bp}
+	0x3A975256:  jz-8 0x3A97526A
+. 578 3A975253 5
+. 80 FD 00 74 12 
+
+==== BB 579 (0x3A975258) in 10789B, out 59390B, BBs exec'd 0 ====
+	0x3A975258:  shrl $0x10, %ecx
+	0x3A97525B:  incl %eax
+	0x3A97525C:  cmpb %dl,%cl
+	0x3A97525E:  jz-8 0x3A97523D
+. 579 3A975258 8
+. C1 E9 10 40 38 D1 74 DD 
+
+==== BB 580 (0x3A975260) in 10797B, out 59472B, BBs exec'd 0 ====
+	0x3A975260:  cmpb $0x0, %cl
+	0x3A975263:  jz-8 0x3A97526A
+. 580 3A975260 5
+. 80 F9 00 74 05 
+
+==== BB 581 (0x3A96F382) in 10802B, out 59530B, BBs exec'd 0 ====
+	0x3A96F382:  testl %eax,%eax
+	0x3A96F384:  jnz-32 0x3A96F8B4
+. 581 3A96F382 8
+. 85 C0 0F 85 2A 05 00 00 
+
+==== BB 582 (0x3A96F38A) in 10810B, out 59589B, BBs exec'd 0 ====
+	0x3A96F38A:  movl %esi,-36(%ebp)
+	0x3A96F38D:  leal -52(%ebp), %eax
+	0x3A96F390:  leal -56(%ebp), %edx
+	0x3A96F393:  movl %eax,(%esp,,)
+	0x3A96F396:  leal 0xFFFF7834(%ebx), %ecx
+	0x3A96F39C:  leal -60(%ebp), %eax
+	0x3A96F39F:  call 0x3A9701D0
+. 582 3A96F38A 26
+. 89 75 DC 8D 45 CC 8D 55 C8 89 04 24 8D 8B 34 78 FF FF 8D 45 C4 E8 2C 0E 00 00 
+
+==== BB 583 _dl_catch_error(0x3A9701D0) in 10836B, out 59700B, BBs exec'd 0 ====
+	0x3A9701D0:  pushl %ebp
+	0x3A9701D1:  movl %esp,%ebp
+	0x3A9701D3:  subl $0xF8, %esp
+	0x3A9701D9:  movl %eax,0xFFFFFF24(%ebp)
+	0x3A9701DF:  xorl %eax, %eax
+	0x3A9701E1:  movl %ebx,-12(%ebp)
+	0x3A9701E4:  movl %esi,-8(%ebp)
+	0x3A9701E7:  movl %edi,-4(%ebp)
+	0x3A9701EA:  call 0x3A97592B
+. 583 3A9701D0 31
+. 55 89 E5 81 EC F8 00 00 00 89 85 24 FF FF FF 31 C0 89 5D F4 89 75 F8 89 7D FC E8 3C 57 00 00 
+
+==== BB 584 (0x3A9701EF) in 10867B, out 59857B, BBs exec'd 0 ====
+	0x3A9701EF:  addl $0x847D, %ebx
+	0x3A9701F5:  movl %edx,0xFFFFFF20(%ebp)
+	0x3A9701FB:  movl %ecx,0xFFFFFF1C(%ebp)
+	0x3A970201:  movl %eax,0xFFFFFF2C(%ebp)
+	0x3A970207:  call*l 0xFFFFF9E0(%ebx)
+. 584 3A9701EF 30
+. 81 C3 7D 84 00 00 89 95 20 FF FF FF 89 8D 1C FF FF FF 89 85 2C FF FF FF FF 93 E0 F9 FF FF 
+
+==== BB 585 _dl_initial_error_catch_tsd(0x3A9658C0) in 10897B, out 59973B, BBs exec'd 0 ====
+	0x3A9658C0:  pushl %ebp
+	0x3A9658C1:  movl %esp,%ebp
+	0x3A9658C3:  popl %ebp
+	0x3A9658C4:  call 0x3A975927
+. 585 3A9658C0 9
+. 55 89 E5 5D E8 5E 00 01 00 
+
+==== BB 586 (0x3A9658C9) in 10906B, out 60057B, BBs exec'd 0 ====
+	0x3A9658C9:  addl $0x12DA3, %ecx
+	0x3A9658CF:  leal 0x3C(%ecx), %eax
+	0x3A9658D5:  ret
+. 586 3A9658C9 13
+. 81 C1 A3 2D 01 00 8D 81 3C 00 00 00 C3 
+
+==== BB 587 (0x3A97020D) in 10919B, out 60117B, BBs exec'd 0 ====
+	0x3A97020D:  movl %eax,0xFFFFFF18(%ebp)
+	0x3A970213:  movl (%eax),%eax
+	0x3A970215:  movl %eax,-28(%ebp)
+	0x3A970218:  leal 0xFFFFFF30(%ebp), %eax
+	0x3A97021E:  movl %eax,(%esp,,)
+	0x3A970221:  call 0x3A974F30
+. 587 3A97020D 25
+. 89 85 18 FF FF FF 8B 00 89 45 E4 8D 85 30 FF FF FF 89 04 24 E8 0A 4D 00 00 
+
+==== BB 588 _setjmp(0x3A974F30) in 10944B, out 60218B, BBs exec'd 0 ====
+	0x3A974F30:  xorl %eax, %eax
+	0x3A974F32:  movl 4(%esp,,),%edx
+	0x3A974F36:  movl %ebx,0(%edx)
+	0x3A974F39:  movl %esi,4(%edx)
+	0x3A974F3C:  movl %edi,8(%edx)
+	0x3A974F3F:  leal 4(%esp,,), %ecx
+	0x3A974F43:  movl %ecx,16(%edx)
+	0x3A974F46:  movl 0(%esp,,),%ecx
+	0x3A974F4A:  movl %ecx,20(%edx)
+	0x3A974F4D:  movl %ebp,12(%edx)
+	0x3A974F50:  movl %eax,24(%edx)
+	0x3A974F53:  ret
+. 588 3A974F30 36
+. 31 C0 8B 54 24 04 89 5A 00 89 72 04 89 7A 08 8D 4C 24 04 89 4A 10 8B 4C 24 00 89 4A 14 89 6A 0C 89 42 18 C3 
+
+==== BB 589 (0x3A970226) in 10980B, out 60379B, BBs exec'd 0 ====
+	0x3A970226:  testl %eax,%eax
+	0x3A970228:  movl %eax,%edx
+	0x3A97022A:  jnz-8 0x3A97027A
+. 589 3A970226 6
+. 85 C0 89 C2 75 4E 
+
+==== BB 590 (0x3A97022C) in 10986B, out 60445B, BBs exec'd 0 ====
+	0x3A97022C:  movl 0xFFFFFF18(%ebp),%ecx
+	0x3A970232:  leal 0xFFFFFF28(%ebp), %eax
+	0x3A970238:  movl %eax,(%ecx)
+	0x3A97023A:  movl 8(%ebp),%eax
+	0x3A97023D:  movl %eax,(%esp,,)
+	0x3A970240:  call*l 0xFFFFFF1C(%ebp)
+. 590 3A97022C 26
+. 8B 8D 18 FF FF FF 8D 85 28 FF FF FF 89 01 8B 45 08 89 04 24 FF 95 1C FF FF FF 
+
+==== BB 591 openaux(0x3A96FEA0) in 11012B, out 60543B, BBs exec'd 0 ====
+	0x3A96FEA0:  pushl %ebp
+	0x3A96FEA1:  movl $0x1,%ecx
+	0x3A96FEA6:  movl %esp,%ebp
+	0x3A96FEA8:  pushl %esi
+	0x3A96FEA9:  subl $0xC, %esp
+	0x3A96FEAC:  movl 8(%ebp),%esi
+	0x3A96FEAF:  movl 8(%esi),%eax
+	0x3A96FEB2:  movl 16(%esi),%edx
+	0x3A96FEB5:  movl %eax,8(%esp,,)
+	0x3A96FEB9:  movl 4(%esi),%eax
+	0x3A96FEBC:  movl %eax,4(%esp,,)
+	0x3A96FEC0:  movl (%esi),%eax
+	0x3A96FEC2:  movzbl 0x174(%eax),%eax
+	0x3A96FEC9:  testb $0x3, %al
+	0x3A96FECB:  jz-8 0x3A96FED2
+. 591 3A96FEA0 45
+. 55 B9 01 00 00 00 89 E5 56 83 EC 0C 8B 75 08 8B 46 08 8B 56 10 89 44 24 08 8B 46 04 89 44 24 04 8B 06 0F B6 80 74 01 00 00 A8 03 74 05 
+
+==== BB 592 (0x3A96FED2) in 11057B, out 60761B, BBs exec'd 0 ====
+	0x3A96FED2:  movl %ecx,(%esp,,)
+	0x3A96FED5:  movl (%esi),%eax
+	0x3A96FED7:  xorl %ecx, %ecx
+	0x3A96FED9:  call 0x3A96AE40
+. 592 3A96FED2 12
+. 89 0C 24 8B 06 31 C9 E8 62 AF FF FF 
+
+==== BB 593 (0x3A96B088) in 11069B, out 60847B, BBs exec'd 0 ====
+	0x3A96B088:  movl 0xFFFFFDD4(%ebp),%edx
+	0x3A96B08E:  movl %edx,(%esp,,)
+	0x3A96B091:  call 0x3A9752D0
+. 593 3A96B088 14
+. 8B 95 D4 FD FF FF 89 14 24 E8 3A A2 00 00 
+
+==== BB 594 (0x3A96B096) in 11083B, out 60918B, BBs exec'd 0 ====
+	0x3A96B096:  incl %eax
+	0x3A96B097:  testb $0x1, 0xFFFFFC14(%ebx)
+	0x3A96B09E:  movl %eax,0xFFFFFDC8(%ebp)
+	0x3A96B0A4:  jnz-32 0x3A96B4D6
+. 594 3A96B096 20
+. 40 F6 83 14 FC FF FF 01 89 85 C8 FD FF FF 0F 85 2C 04 00 00 
+
+==== BB 595 (0x3A96B0AA) in 11103B, out 61019B, BBs exec'd 0 ====
+	0x3A96B0AA:  movl 0xFFFFFDD8(%ebp),%esi
+	0x3A96B0B0:  movl $0xFFFFFFFF,%edi
+	0x3A96B0B5:  movl %edi,0xFFFFFDCC(%ebp)
+	0x3A96B0BB:  testl %esi,%esi
+	0x3A96B0BD:  jz-8 0x3A96B0D3
+. 595 3A96B0AA 21
+. 8B B5 D8 FD FF FF BF FF FF FF FF 89 BD CC FD FF FF 85 F6 74 14 
+
+==== BB 596 (0x3A96B0BF) in 11124B, out 61119B, BBs exec'd 0 ====
+	0x3A96B0BF:  movl 0xFFFFFDD8(%ebp),%eax
+	0x3A96B0C5:  movl 0x8C(%eax),%ecx
+	0x3A96B0CB:  testl %ecx,%ecx
+	0x3A96B0CD:  jnz-32 0x3A96B276
+. 596 3A96B0BF 20
+. 8B 85 D8 FD FF FF 8B 88 8C 00 00 00 85 C9 0F 85 A3 01 00 00 
+
+==== BB 597 (0x3A96B0D3) in 11144B, out 61210B, BBs exec'd 0 ====
+	0x3A96B0D3:  movl 0xFFFFFDD8(%ebp),%esi
+	0x3A96B0D9:  testl %esi,%esi
+	0x3A96B0DB:  jz-32 0x3A96B1A3
+. 597 3A96B0D3 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 C2 00 00 00 
+
+==== BB 598 (0x3A96B0E1) in 11158B, out 61285B, BBs exec'd 0 ====
+	0x3A96B0E1:  leal 0xFFFFF009(%ebx), %edx
+	0x3A96B0E7:  movl %edx,0xFFFFFDB4(%ebp)
+	0x3A96B0ED:  jmp-8 0x3A96B10E
+. 598 3A96B0E1 14
+. 8D 93 09 F0 FF FF 89 95 B4 FD FF FF EB 1F 
+
+==== BB 599 (0x3A96B10E) in 11172B, out 61345B, BBs exec'd 0 ====
+	0x3A96B10E:  movl 0xFFFFFDB4(%ebp),%ecx
+	0x3A96B114:  leal 0x180(%esi), %edi
+	0x3A96B11A:  movl %esi,%eax
+	0x3A96B11C:  movl %edi,%edx
+	0x3A96B11E:  movl %ecx,(%esp,,)
+	0x3A96B121:  movl $0xF,%ecx
+	0x3A96B126:  call 0x3A96A890
+. 599 3A96B10E 29
+. 8B 8D B4 FD FF FF 8D BE 80 01 00 00 89 F0 89 FA 89 0C 24 B9 0F 00 00 00 E8 65 F7 FF FF 
+
+==== BB 600 cache_rpath(0x3A96A890) in 11201B, out 61455B, BBs exec'd 0 ====
+	0x3A96A890:  pushl %ebp
+	0x3A96A891:  movl %esp,%ebp
+	0x3A96A893:  subl $0xC, %esp
+	0x3A96A896:  movl %esi,-8(%ebp)
+	0x3A96A899:  movl %edx,%esi
+	0x3A96A89B:  movl %edi,-4(%ebp)
+	0x3A96A89E:  movl (%edx),%edx
+	0x3A96A8A0:  movl %eax,%edi
+	0x3A96A8A2:  xorl %eax, %eax
+	0x3A96A8A4:  cmpl $0xFFFFFFFF, %edx
+	0x3A96A8A7:  jz-8 0x3A96A8D0
+. 600 3A96A890 25
+. 55 89 E5 83 EC 0C 89 75 F8 89 D6 89 7D FC 8B 12 89 C7 31 C0 83 FA FF 74 27 
+
+==== BB 601 (0x3A96A8D0) in 11226B, out 61621B, BBs exec'd 0 ====
+	0x3A96A8D0:  movl -8(%ebp),%esi
+	0x3A96A8D3:  movl -4(%ebp),%edi
+	0x3A96A8D6:  movl %ebp,%esp
+	0x3A96A8D8:  popl %ebp
+	0x3A96A8D9:  ret
+. 601 3A96A8D0 10
+. 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 602 (0x3A96B12B) in 11236B, out 61700B, BBs exec'd 0 ====
+	0x3A96B12B:  testb %al,%al
+	0x3A96B12D:  jz-8 0x3A96B0EF
+. 602 3A96B12B 4
+. 84 C0 74 C0 
+
+==== BB 603 (0x3A96B0EF) in 11240B, out 61761B, BBs exec'd 0 ====
+	0x3A96B0EF:  cmpl $0xFFFFFFFF, 0xFFFFFDCC(%ebp)
+	0x3A96B0F6:  movl 0x160(%esi),%esi
+	0x3A96B0FC:  setz %al
+	0x3A96B0FF:  testl %esi,%esi
+	0x3A96B101:  setnz %dl
+	0x3A96B104:  andl %edx,%eax
+	0x3A96B106:  testb $0x1, %al
+	0x3A96B108:  jz-32 0x3A96B1A3
+. 603 3A96B0EF 31
+. 83 BD CC FD FF FF FF 8B B6 60 01 00 00 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 0F 84 95 00 00 00 
+
+==== BB 604 (0x3A96B1A3) in 11271B, out 61912B, BBs exec'd 0 ====
+	0x3A96B1A3:  cmpl $0xFFFFFFFF, 0xFFFFFDCC(%ebp)
+	0x3A96B1AA:  movl 0xFFFFF994(%ebx),%esi
+	0x3A96B1B0:  setz %al
+	0x3A96B1B3:  testl %esi,%esi
+	0x3A96B1B5:  setnz %dl
+	0x3A96B1B8:  andl %edx,%eax
+	0x3A96B1BA:  testb $0x1, %al
+	0x3A96B1BC:  jz-8 0x3A96B216
+. 604 3A96B1A3 27
+. 83 BD CC FD FF FF FF 8B B3 94 F9 FF FF 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 74 58 
+
+==== BB 605 (0x3A96B1BE) in 11298B, out 62060B, BBs exec'd 0 ====
+	0x3A96B1BE:  movzbl 0x174(%esi),%eax
+	0x3A96B1C5:  andb $0x3, %al
+	0x3A96B1C7:  cmpb $0x2, %al
+	0x3A96B1C9:  jz-8 0x3A96B216
+. 605 3A96B1BE 13
+. 0F B6 86 74 01 00 00 24 03 3C 02 74 4B 
+
+==== BB 606 (0x3A96B1CB) in 11311B, out 62157B, BBs exec'd 0 ====
+	0x3A96B1CB:  cmpl 0xFFFFFDD8(%ebp),%esi
+	0x3A96B1D1:  jz-8 0x3A96B216
+. 606 3A96B1CB 8
+. 3B B5 D8 FD FF FF 74 43 
+
+==== BB 607 (0x3A96B216) in 11319B, out 62225B, BBs exec'd 0 ====
+	0x3A96B216:  cmpl $0xFFFFFFFF, 0xFFFFFDCC(%ebp)
+	0x3A96B21D:  jz-8 0x3A96B276
+. 607 3A96B216 9
+. 83 BD CC FD FF FF FF 74 57 
+
+==== BB 608 (0x3A96B276) in 11328B, out 62291B, BBs exec'd 0 ====
+	0x3A96B276:  cmpl $0xFFFFFFFF, 0xBC(%ebx)
+	0x3A96B27D:  jz-8 0x3A96B21F
+. 608 3A96B276 9
+. 83 BB BC 00 00 00 FF 74 A0 
+
+==== BB 609 (0x3A96B27F) in 11337B, out 62357B, BBs exec'd 0 ====
+	0x3A96B27F:  leal 0xFFFFFDE0(%ebp), %eax
+	0x3A96B285:  leal 0xFFFFFDE4(%ebp), %ecx
+	0x3A96B28B:  movl %eax,4(%esp,,)
+	0x3A96B28F:  leal 0xBC(%ebx), %eax
+	0x3A96B295:  movl %ecx,8(%esp,,)
+	0x3A96B299:  movl %eax,(%esp,,)
+	0x3A96B29C:  movl 0xFFFFFDD0(%ebp),%ecx
+	0x3A96B2A2:  movl 0xFFFFFDC8(%ebp),%edx
+	0x3A96B2A8:  movl 0xFFFFFDD4(%ebp),%eax
+	0x3A96B2AE:  call 0x3A969250
+. 609 3A96B27F 52
+. 8D 85 E0 FD FF FF 8D 8D E4 FD FF FF 89 44 24 04 8D 83 BC 00 00 00 89 4C 24 08 89 04 24 8B 8D D0 FD FF FF 8B 95 C8 FD FF FF 8B 85 D4 FD FF FF E8 9D DF FF FF 
+
+==== BB 610 open_path(0x3A969250) in 11389B, out 62513B, BBs exec'd 0 ====
+	0x3A969250:  pushl %ebp
+	0x3A969251:  movl %esp,%ebp
+	0x3A969253:  pushl %edi
+	0x3A969254:  xorl %edi, %edi
+	0x3A969256:  pushl %esi
+	0x3A969257:  xorl %esi, %esi
+	0x3A969259:  pushl %ebx
+	0x3A96925A:  subl $0xB4, %esp
+	0x3A969260:  movl %eax,-112(%ebp)
+	0x3A969263:  movl 8(%ebp),%eax
+	0x3A969266:  call 0x3A97592B
+. 610 3A969250 27
+. 55 89 E5 57 31 FF 56 31 F6 53 81 EC B4 00 00 00 89 45 90 8B 45 08 E8 C0 C6 00 00 
+
+==== BB 611 (0x3A96926B) in 11416B, out 62703B, BBs exec'd 0 ====
+	0x3A96926B:  addl $0xF401, %ebx
+	0x3A969271:  movl %ecx,-120(%ebp)
+	0x3A969274:  movl (%eax),%eax
+	0x3A969276:  movl %edx,-116(%ebp)
+	0x3A969279:  movl 0xD8(%ebx),%ecx
+	0x3A96927F:  movl %edi,0xFFFFFF7C(%ebp)
+	0x3A969285:  movl %eax,-124(%ebp)
+	0x3A969288:  movl 0xCC(%ebx),%eax
+	0x3A96928E:  movl $0xFFFFFFFF, -128(%ebp)
+	0x3A969295:  movl %esi,0xFFFFFF78(%ebp)
+	0x3A96929B:  addl %ecx,%eax
+	0x3A96929D:  leal 18(%edx,%eax,1), %eax
+	0x3A9692A1:  movl -124(%ebp),%ecx
+	0x3A9692A4:  andl $0xFFFFFFFC, %eax
+	0x3A9692A7:  subl %eax,%esp
+	0x3A9692A9:  leal 27(%esp,,), %edx
+	0x3A9692AD:  movl (%ecx),%edi
+	0x3A9692AF:  andl $0xFFFFFFF0, %edx
+	0x3A9692B2:  movl %edx,0xFFFFFF74(%ebp)
+	0x3A9692B8:  nop
+	0x3A9692B9:  leal 0(%esi,,), %esi
+	0x3A9692C0:  xorl %eax, %eax
+	0x3A9692C2:  movl %eax,0xFFFFFF70(%ebp)
+	0x3A9692C8:  xorl %eax, %eax
+	0x3A9692CA:  testb $0x1, 0xFFFFFC14(%ebx)
+	0x3A9692D1:  movl %eax,0xFFFFFF68(%ebp)
+	0x3A9692D7:  jnz-32 0x3A969555
+. 611 3A96926B 114
+. 81 C3 01 F4 00 00 89 4D 88 8B 00 89 55 8C 8B 8B D8 00 00 00 89 BD 7C FF FF FF 89 45 84 8B 83 CC 00 00 00 C7 45 80 FF FF FF FF 89 B5 78 FF FF FF 01 C8 8D 44 02 12 8B 4D 84 83 E0 FC 29 C4 8D 54 24 1B 8B 39 83 E2 F0 89 95 74 FF FF FF 90 8D B4 26 00 00 00 00 31 C0 89 85 70 FF FF FF 31 C0 F6 83 14 FC FF FF 01 89 85 68 FF FF FF 0F 85 78 02 00 00 
+
+==== BB 612 (0x3A9692DD) in 11530B, out 63073B, BBs exec'd 0 ====
+	0x3A9692DD:  movl 16(%edi),%eax
+	0x3A9692E0:  xorl %esi, %esi
+	0x3A9692E2:  movl %eax,8(%esp,,)
+	0x3A9692E6:  movl 12(%edi),%eax
+	0x3A9692E9:  movl %eax,4(%esp,,)
+	0x3A9692ED:  movl 0xFFFFFF74(%ebp),%ecx
+	0x3A9692F3:  movl %ecx,(%esp,,)
+	0x3A9692F6:  call 0x3A975770
+. 612 3A9692DD 30
+. 8B 47 10 31 F6 89 44 24 08 8B 47 0C 89 44 24 04 8B 8D 74 FF FF FF 89 0C 24 E8 75 C4 00 00 
+
+==== BB 613 (0x3A9692FB) in 11560B, out 63208B, BBs exec'd 0 ====
+	0x3A9692FB:  cmpl $0xFFFFFFFF, -128(%ebp)
+	0x3A9692FF:  movl %eax,0xFFFFFF6C(%ebp)
+	0x3A969305:  jnz-32 0x3A9694A3
+. 613 3A9692FB 16
+. 83 7D 80 FF 89 85 6C FF FF FF 0F 85 98 01 00 00 
+
+==== BB 614 (0x3A96930B) in 11576B, out 63290B, BBs exec'd 0 ====
+	0x3A96930B:  cmpl 0xC8(%ebx),%esi
+	0x3A969311:  jb-8 0x3A969332
+. 614 3A96930B 8
+. 3B B3 C8 00 00 00 72 1F 
+
+==== BB 615 (0x3A969332) in 11584B, out 63358B, BBs exec'd 0 ====
+	0x3A969332:  cmpl $0x1, 20(%edi,%esi,4)
+	0x3A969337:  jz-8 0x3A969318
+. 615 3A969332 7
+. 83 7C B7 14 01 74 DF 
+
+==== BB 616 (0x3A969339) in 11591B, out 63425B, BBs exec'd 0 ====
+	0x3A969339:  movl 0xC4(%ebx),%eax
+	0x3A96933F:  movl 4(%eax,%esi,8),%eax
+	0x3A969343:  movl %eax,8(%esp,,)
+	0x3A969347:  movl 0xC4(%ebx),%eax
+	0x3A96934D:  movl (%eax,%esi,8),%eax
+	0x3A969350:  movl %eax,4(%esp,,)
+	0x3A969354:  movl 0xFFFFFF6C(%ebp),%eax
+	0x3A96935A:  movl %eax,(%esp,,)
+	0x3A96935D:  call 0x3A975770
+. 616 3A969339 41
+. 8B 83 C4 00 00 00 8B 44 F0 04 89 44 24 08 8B 83 C4 00 00 00 8B 04 F0 89 44 24 04 8B 85 6C FF FF FF 89 04 24 E8 0E C4 00 00 
+
+==== BB 617 (0x3A969362) in 11632B, out 63570B, BBs exec'd 0 ====
+	0x3A969362:  movl -116(%ebp),%edx
+	0x3A969365:  movl %edx,8(%esp,,)
+	0x3A969369:  movl -112(%ebp),%ecx
+	0x3A96936C:  movl %eax,(%esp,,)
+	0x3A96936F:  movl %ecx,4(%esp,,)
+	0x3A969373:  call 0x3A975770
+. 617 3A969362 22
+. 8B 55 8C 89 54 24 08 8B 4D 90 89 04 24 89 4C 24 04 E8 F8 C3 00 00 
+
+==== BB 618 (0x3A969378) in 11654B, out 63674B, BBs exec'd 0 ====
+	0x3A969378:  movl %eax,0xFFFFFF70(%ebp)
+	0x3A96937E:  movl 0xFFFFFF74(%ebp),%eax
+	0x3A969384:  subl %eax,0xFFFFFF70(%ebp)
+	0x3A96938A:  testb $0x1, 0xFFFFFC14(%ebx)
+	0x3A969391:  jnz-32 0x3A9694E7
+. 618 3A969378 31
+. 89 85 70 FF FF FF 8B 85 74 FF FF FF 29 85 70 FF FF FF F6 83 14 FC FF FF 01 0F 85 50 01 00 00 
+
+==== BB 619 (0x3A969397) in 11685B, out 63797B, BBs exec'd 0 ====
+	0x3A969397:  movl 16(%ebp),%edx
+	0x3A96939A:  movl 0xFFFFFF74(%ebp),%eax
+	0x3A9693A0:  call 0x3A968F20
+. 619 3A969397 14
+. 8B 55 10 8B 85 74 FF FF FF E8 7B FB FF FF 
+
+==== BB 620 (0x3A9693A5) in 11699B, out 63874B, BBs exec'd 0 ====
+	0x3A9693A5:  movl %eax,-128(%ebp)
+	0x3A9693A8:  movl 20(%edi,%esi,4),%eax
+	0x3A9693AC:  testl %eax,%eax
+	0x3A9693AE:  jnz-8 0x3A9693C8
+. 620 3A9693A5 11
+. 89 45 80 8B 44 B7 14 85 C0 75 18 
+
+==== BB 621 (0x3A9693B0) in 11710B, out 63966B, BBs exec'd 0 ====
+	0x3A9693B0:  cmpl $0xFFFFFFFF, -128(%ebp)
+	0x3A9693B4:  jz-32 0x3A9694FE
+. 621 3A9693B0 10
+. 83 7D 80 FF 0F 84 44 01 00 00 
+
+==== BB 622 (0x3A9694FE) in 11720B, out 64029B, BBs exec'd 0 ====
+	0x3A9694FE:  movl 0xFFFFFF70(%ebp),%eax
+	0x3A969504:  movl -116(%ebp),%ecx
+	0x3A969507:  movl 0xFFFFFF74(%ebp),%edx
+	0x3A96950D:  subl %ecx,%eax
+	0x3A96950F:  movb $0x0, -1(%eax,%edx,1)
+	0x3A969514:  leal -108(%ebp), %eax
+	0x3A969517:  movl %eax,8(%esp,,)
+	0x3A96951B:  movl %edx,4(%esp,,)
+	0x3A96951F:  movl $0x3, (%esp,,)
+	0x3A969526:  call 0x3A974210
+. 622 3A9694FE 45
+. 8B 85 70 FF FF FF 8B 4D 8C 8B 95 74 FF FF FF 29 C8 C6 44 10 FF 00 8D 45 94 89 44 24 08 89 54 24 04 C7 04 24 03 00 00 00 E8 E5 AC 00 00 
+
+==== BB 623 __GI___xstat64(0x3A974210) in 11765B, out 64189B, BBs exec'd 0 ====
+	0x3A974210:  pushl %ebp
+	0x3A974211:  movl %esp,%ebp
+	0x3A974213:  subl $0x58, %esp
+	0x3A974216:  movl %ebx,-12(%ebp)
+	0x3A974219:  call 0x3A97592B
+. 623 3A974210 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 0D 17 00 00 
+
+==== BB 624 (0x3A97421E) in 11779B, out 64287B, BBs exec'd 0 ====
+	0x3A97421E:  addl $0x444E, %ebx
+	0x3A974224:  movl %esi,-8(%ebp)
+	0x3A974227:  movl 0x24(%ebx),%esi
+	0x3A97422D:  movl %edi,-4(%ebp)
+	0x3A974230:  movl (%esi),%eax
+	0x3A974232:  testl %eax,%eax
+	0x3A974234:  jnz-8 0x3A9742B0
+. 624 3A97421E 24
+. 81 C3 4E 44 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A 
+
+==== BB 625 (0x3A974236) in 11803B, out 64411B, BBs exec'd 0 ====
+	0x3A974236:  movl 0x148(%ebx),%edi
+	0x3A97423C:  movl 12(%ebp),%edx
+	0x3A97423F:  movl 16(%ebp),%ecx
+	0x3A974242:  xchgl %edx, %ebx
+	0x3A974244:  movl $0xC3,%eax
+	0x3A974249:  int $0x80
+. 625 3A974236 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C3 00 00 00 CD 80 
+
+==== BB 626 (0x3A97424B) in 11824B, out 64504B, BBs exec'd 0 ====
+	0x3A97424B:  xchgl %edx, %ebx
+	0x3A97424D:  cmpl $0xFFFFF000, %eax
+	0x3A974252:  jnbe-32 0x3A974306
+. 626 3A97424B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00 
+
+==== BB 627 (0x3A974306) in 11837B, out 64580B, BBs exec'd 0 ====
+	0x3A974306:  negl %eax
+	0x3A974308:  movl %eax,0x148(%ebx)
+	0x3A97430E:  movl $0xFFFFFFFF,%eax
+	0x3A974313:  jmp 0x3A974258
+. 627 3A974306 18
+. F7 D8 89 83 48 01 00 00 B8 FF FF FF FF E9 40 FF FF FF 
+
+==== BB 628 (0x3A974258) in 11855B, out 64649B, BBs exec'd 0 ====
+	0x3A974258:  cmpl $0xFFFFFFFF, %eax
+	0x3A97425B:  movl %eax,%edx
+	0x3A97425D:  jz-8 0x3A974290
+. 628 3A974258 7
+. 83 F8 FF 89 C2 74 31 
+
+==== BB 629 (0x3A974290) in 11862B, out 64716B, BBs exec'd 0 ====
+	0x3A974290:  cmpl $0x26, 0x148(%ebx)
+	0x3A974297:  jnz-8 0x3A97425F
+. 629 3A974290 9
+. 83 BB 48 01 00 00 26 75 C6 
+
+==== BB 630 (0x3A97425F) in 11871B, out 64782B, BBs exec'd 0 ====
+	0x3A97425F:  testl %edx,%edx
+	0x3A974261:  jnz-8 0x3A974280
+. 630 3A97425F 4
+. 85 D2 75 1D 
+
+==== BB 631 (0x3A974280) in 11875B, out 64841B, BBs exec'd 0 ====
+	0x3A974280:  movl -12(%ebp),%ebx
+	0x3A974283:  movl %edx,%eax
+	0x3A974285:  movl -8(%ebp),%esi
+	0x3A974288:  movl -4(%ebp),%edi
+	0x3A97428B:  movl %ebp,%esp
+	0x3A97428D:  popl %ebp
+	0x3A97428E:  ret
+. 631 3A974280 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 632 (0x3A96952B) in 11890B, out 64943B, BBs exec'd 0 ====
+	0x3A96952B:  testl %eax,%eax
+	0x3A96952D:  jnz-8 0x3A969542
+. 632 3A96952B 4
+. 85 C0 75 13 
+
+==== BB 633 (0x3A969542) in 11894B, out 65002B, BBs exec'd 0 ====
+	0x3A969542:  movl $0x1,%edx
+	0x3A969547:  movl $0x1,%eax
+	0x3A96954C:  movl %edx,20(%edi,%esi,4)
+	0x3A969550:  jmp 0x3A9693C8
+. 633 3A969542 19
+. BA 01 00 00 00 B8 01 00 00 00 89 54 B7 14 E9 73 FE FF FF 
+
+==== BB 634 (0x3A9693C8) in 11913B, out 65071B, BBs exec'd 0 ====
+	0x3A9693C8:  cmpl $0x2, %eax
+	0x3A9693CB:  setz %al
+	0x3A9693CE:  movzbl %al,%eax
+	0x3A9693D1:  orl %eax,0xFFFFFF68(%ebp)
+	0x3A9693D7:  cmpl $0xFFFFFFFF, -128(%ebp)
+	0x3A9693DB:  jz-32 0x3A969318
+. 634 3A9693C8 25
+. 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF 
+
+==== BB 635 (0x3A969318) in 11938B, out 65187B, BBs exec'd 0 ====
+	0x3A969318:  incl %esi
+	0x3A969319:  cmpl $0xFFFFFFFF, -128(%ebp)
+	0x3A96931D:  leal 0(%esi), %esi
+	0x3A969320:  jnz-32 0x3A9694A3
+. 635 3A969318 14
+. 46 83 7D 80 FF 8D 76 00 0F 85 7D 01 00 00 
+
+==== BB 636 (0x3A969326) in 11952B, out 65268B, BBs exec'd 0 ====
+	0x3A969326:  cmpl 0xC8(%ebx),%esi
+	0x3A96932C:  jnb-32 0x3A969444
+. 636 3A969326 12
+. 3B B3 C8 00 00 00 0F 83 12 01 00 00 
+
+==== BB 637 (0x3A974263) in 11964B, out 65336B, BBs exec'd 0 ====
+	0x3A974263:  movl 16(%ebp),%ecx
+	0x3A974266:  movl 88(%ecx),%eax
+	0x3A974269:  cmpl %eax,12(%ecx)
+	0x3A97426C:  jz-8 0x3A974280
+. 637 3A974263 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12 
+
+==== BB 638 (0x3A96952F) in 11975B, out 65424B, BBs exec'd 0 ====
+	0x3A96952F:  movl -92(%ebp),%eax
+	0x3A969532:  andl $0xF000, %eax
+	0x3A969537:  cmpl $0x4000, %eax
+	0x3A96953C:  jz-32 0x3A9693BA
+. 638 3A96952F 19
+. 8B 45 A4 25 00 F0 00 00 3D 00 40 00 00 0F 84 78 FE FF FF 
+
+==== BB 639 (0x3A9693BA) in 11994B, out 65508B, BBs exec'd 0 ====
+	0x3A9693BA:  movl $0x2,%eax
+	0x3A9693BF:  movl %eax,20(%edi,%esi,4)
+	0x3A9693C3:  movl $0x2,%eax
+	0x3A9693C8:  cmpl $0x2, %eax
+	0x3A9693CB:  setz %al
+	0x3A9693CE:  movzbl %al,%eax
+	0x3A9693D1:  orl %eax,0xFFFFFF68(%ebp)
+	0x3A9693D7:  cmpl $0xFFFFFFFF, -128(%ebp)
+	0x3A9693DB:  jz-32 0x3A969318
+. 639 3A9693BA 39
+. B8 02 00 00 00 89 44 B7 14 B8 02 00 00 00 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF 
+
+==== BB 640 (0x3A969444) in 12033B, out 65663B, BBs exec'd 0 ====
+	0x3A969444:  cmpl $0xFFFFFFFF, -128(%ebp)
+	0x3A969448:  jnz-8 0x3A9694A3
+. 640 3A969444 6
+. 83 7D 80 FF 75 59 
+
+==== BB 641 (0x3A96944A) in 12039B, out 65726B, BBs exec'd 0 ====
+	0x3A96944A:  movl 0xFFFFFF68(%ebp),%eax
+	0x3A969450:  testl %eax,%eax
+	0x3A969452:  jz-8 0x3A969469
+. 641 3A96944A 10
+. 8B 85 68 FF FF FF 85 C0 74 15 
+
+==== BB 642 (0x3A969454) in 12049B, out 65801B, BBs exec'd 0 ====
+	0x3A969454:  movl 0x148(%ebx),%edx
+	0x3A96945A:  cmpl $0x2, %edx
+	0x3A96945D:  jz-8 0x3A969469
+. 642 3A969454 11
+. 8B 93 48 01 00 00 83 FA 02 74 0A 
+
+==== BB 643 (0x3A969469) in 12060B, out 65874B, BBs exec'd 0 ====
+	0x3A969469:  addl $0x4, -124(%ebp)
+	0x3A96946D:  movl 0xFFFFFF68(%ebp),%ecx
+	0x3A969473:  orl %ecx,0xFFFFFF78(%ebp)
+	0x3A969479:  movl -124(%ebp),%edx
+	0x3A96947C:  movl (%edx),%eax
+	0x3A96947E:  testl %eax,%eax
+	0x3A969480:  movl %eax,%edi
+	0x3A969482:  jnz-32 0x3A9692C0
+. 643 3A969469 31
+. 83 45 84 04 8B 8D 68 FF FF FF 09 8D 78 FF FF FF 8B 55 84 8B 02 85 C0 89 C7 0F 85 38 FE FF FF 
+
+==== BB 644 (0x3A969488) in 12091B, out 66013B, BBs exec'd 0 ====
+	0x3A969488:  movl 0xFFFFFF78(%ebp),%eax
+	0x3A96948E:  testl %eax,%eax
+	0x3A969490:  jz-32 0x3A969732
+. 644 3A969488 14
+. 8B 85 78 FF FF FF 85 C0 0F 84 9C 02 00 00 
+
+==== BB 645 (0x3A969496) in 12105B, out 66088B, BBs exec'd 0 ====
+	0x3A969496:  movl $0xFFFFFFFF,%eax
+	0x3A96949B:  leal -12(%ebp), %esp
+	0x3A96949E:  popl %ebx
+	0x3A96949F:  popl %esi
+	0x3A9694A0:  popl %edi
+	0x3A9694A1:  popl %ebp
+	0x3A9694A2:  ret
+. 645 3A969496 13
+. B8 FF FF FF FF 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 646 (0x3A96B2B3) in 12118B, out 66202B, BBs exec'd 0 ====
+	0x3A96B2B3:  movl %eax,0xFFFFFDCC(%ebp)
+	0x3A96B2B9:  jmp 0x3A96B21F
+. 646 3A96B2B3 11
+. 89 85 CC FD FF FF E9 61 FF FF FF 
+
+==== BB 647 (0x3A96B21F) in 12129B, out 66249B, BBs exec'd 0 ====
+	0x3A96B21F:  cmpl $0xFFFFFFFF, 0xFFFFFDCC(%ebp)
+	0x3A96B226:  movl 0xFFFFFDD8(%ebp),%edx
+	0x3A96B22C:  setz %al
+	0x3A96B22F:  testl %edx,%edx
+	0x3A96B231:  setnz %dl
+	0x3A96B234:  andl %edx,%eax
+	0x3A96B236:  testb $0x1, %al
+	0x3A96B238:  jnz-32 0x3A96B31F
+. 647 3A96B21F 31
+. 83 BD CC FD FF FF FF 8B 95 D8 FD FF FF 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 85 E1 00 00 00 
+
+==== BB 648 (0x3A96B31F) in 12160B, out 66398B, BBs exec'd 0 ====
+	0x3A96B31F:  movl 0xFFFFFDD8(%ebp),%esi
+	0x3A96B325:  leal 0xFFFFF00F(%ebx), %eax
+	0x3A96B32B:  movl $0x1D,%ecx
+	0x3A96B330:  movl %eax,(%esp,,)
+	0x3A96B333:  movl 0xFFFFFDD8(%ebp),%eax
+	0x3A96B339:  addl $0x1CC, %esi
+	0x3A96B33F:  movl %esi,%edx
+	0x3A96B341:  call 0x3A96A890
+. 648 3A96B31F 39
+. 8B B5 D8 FD FF FF 8D 83 0F F0 FF FF B9 1D 00 00 00 89 04 24 8B 85 D8 FD FF FF 81 C6 CC 01 00 00 89 F2 E8 4A F5 FF FF 
+
+==== BB 649 (0x3A96B346) in 12199B, out 66531B, BBs exec'd 0 ====
+	0x3A96B346:  testb %al,%al
+	0x3A96B348:  jz-32 0x3A96B23E
+. 649 3A96B346 8
+. 84 C0 0F 84 F0 FE FF FF 
+
+==== BB 650 (0x3A96B23E) in 12207B, out 66592B, BBs exec'd 0 ====
+	0x3A96B23E:  cmpl $0xFFFFFFFF, 0xFFFFFDCC(%ebp)
+	0x3A96B245:  jz-32 0x3A96B387
+. 650 3A96B23E 13
+. 83 BD CC FD FF FF FF 0F 84 3C 01 00 00 
+
+==== BB 651 (0x3A96B387) in 12220B, out 66658B, BBs exec'd 0 ====
+	0x3A96B387:  movl 0xFFFFFDD0(%ebp),%eax
+	0x3A96B38D:  testl %eax,%eax
+	0x3A96B38F:  jnz-32 0x3A96B4FE
+. 651 3A96B387 14
+. 8B 85 D0 FD FF FF 85 C0 0F 85 69 01 00 00 
+
+==== BB 652 (0x3A96B395) in 12234B, out 66733B, BBs exec'd 0 ====
+	0x3A96B395:  movl 0xFFFFFDD4(%ebp),%eax
+	0x3A96B39B:  call 0x3A96B720
+. 652 3A96B395 11
+. 8B 85 D4 FD FF FF E8 80 03 00 00 
+
+==== BB 653 _dl_load_cache_lookup(0x3A96B720) in 12245B, out 66797B, BBs exec'd 0 ====
+	0x3A96B720:  pushl %ebp
+	0x3A96B721:  movl %esp,%ebp
+	0x3A96B723:  pushl %edi
+	0x3A96B724:  pushl %esi
+	0x3A96B725:  pushl %ebx
+	0x3A96B726:  subl $0x64, %esp
+	0x3A96B729:  call 0x3A97592B
+. 653 3A96B720 14
+. 55 89 E5 57 56 53 83 EC 64 E8 FD A1 00 00 
+
+==== BB 654 (0x3A96B72E) in 12259B, out 66930B, BBs exec'd 0 ====
+	0x3A96B72E:  addl $0xCF3E, %ebx
+	0x3A96B734:  testb $0x1, 0xFFFFFC14(%ebx)
+	0x3A96B73B:  movl %eax,-16(%ebp)
+	0x3A96B73E:  jnz-32 0x3A96BBA2
+. 654 3A96B72E 22
+. 81 C3 3E CF 00 00 F6 83 14 FC FF FF 01 89 45 F0 0F 85 5E 04 00 00 
+
+==== BB 655 (0x3A96B744) in 12281B, out 67029B, BBs exec'd 0 ====
+	0x3A96B744:  movl 0xDC(%ebx),%ecx
+	0x3A96B74A:  testl %ecx,%ecx
+	0x3A96B74C:  jz-32 0x3A96B872
+. 655 3A96B744 14
+. 8B 8B DC 00 00 00 85 C9 0F 84 20 01 00 00 
+
+==== BB 656 (0x3A96B872) in 12295B, out 67104B, BBs exec'd 0 ====
+	0x3A96B872:  movl $0x1,%ecx
+	0x3A96B877:  leal 0xE4(%ebx), %edx
+	0x3A96B87D:  leal 0xFFFFF072(%ebx), %eax
+	0x3A96B883:  call 0x3A971070
+. 656 3A96B872 22
+. B9 01 00 00 00 8D 93 E4 00 00 00 8D 83 72 F0 FF FF E8 E8 57 00 00 
+
+==== BB 657 (0x3A9710AD) in 12317B, out 67190B, BBs exec'd 0 ====
+	0x3A9710AD:  movl %esi,4(%esp,,)
+	0x3A9710B1:  leal -108(%ebp), %eax
+	0x3A9710B4:  movl %eax,8(%esp,,)
+	0x3A9710B8:  movl $0x3, (%esp,,)
+	0x3A9710BF:  call 0x3A974320
+. 657 3A9710AD 23
+. 89 74 24 04 8D 45 94 89 44 24 08 C7 04 24 03 00 00 00 E8 5C 32 00 00 
+
+==== BB 658 (0x3A9710C4) in 12340B, out 67283B, BBs exec'd 0 ====
+	0x3A9710C4:  testl %eax,%eax
+	0x3A9710C6:  js-8 0x3A9710D1
+. 658 3A9710C4 4
+. 85 C0 78 09 
+
+==== BB 659 (0x3A9710C8) in 12344B, out 67342B, BBs exec'd 0 ====
+	0x3A9710C8:  movl -64(%ebp),%eax
+	0x3A9710CB:  testl %eax,%eax
+	0x3A9710CD:  movl %eax,(%edi)
+	0x3A9710CF:  jnz-8 0x3A9710F0
+. 659 3A9710C8 9
+. 8B 45 C0 85 C0 89 07 75 1F 
+
+==== BB 660 (0x3A9710F0) in 12353B, out 67424B, BBs exec'd 0 ====
+	0x3A9710F0:  movl %esi,16(%esp,,)
+	0x3A9710F4:  xorl %ecx, %ecx
+	0x3A9710F6:  movl $0x2,%edx
+	0x3A9710FB:  movl %ecx,20(%esp,,)
+	0x3A9710FF:  movl %edx,12(%esp,,)
+	0x3A971103:  movl -112(%ebp),%edx
+	0x3A971106:  movl %eax,4(%esp,,)
+	0x3A97110A:  movl $0x0, (%esp,,)
+	0x3A971111:  movl %edx,8(%esp,,)
+	0x3A971115:  call 0x3A974C50
+. 660 3A9710F0 42
+. 89 74 24 10 31 C9 BA 02 00 00 00 89 4C 24 14 89 54 24 0C 8B 55 90 89 44 24 04 C7 04 24 00 00 00 00 89 54 24 08 E8 36 3B 00 00 
+
+==== BB 661 (0x3A97111A) in 12395B, out 67583B, BBs exec'd 0 ====
+	0x3A97111A:  movl %eax,-116(%ebp)
+	0x3A97111D:  jmp-8 0x3A9710D1
+. 661 3A97111A 5
+. 89 45 8C EB B2 
+
+==== BB 662 (0x3A9710D1) in 12400B, out 67627B, BBs exec'd 0 ====
+	0x3A9710D1:  movl %esi,(%esp,,)
+	0x3A9710D4:  call 0x3A974470
+. 662 3A9710D1 8
+. 89 34 24 E8 97 33 00 00 
+
+==== BB 663 (0x3A96B888) in 12408B, out 67682B, BBs exec'd 0 ====
+	0x3A96B888:  movl %eax,-40(%ebp)
+	0x3A96B88B:  incl %eax
+	0x3A96B88C:  jz-32 0x3A96BA05
+. 663 3A96B888 10
+. 89 45 D8 40 0F 84 73 01 00 00 
+
+==== BB 664 (0x3A96B892) in 12418B, out 67757B, BBs exec'd 0 ====
+	0x3A96B892:  movl 0xE4(%ebx),%eax
+	0x3A96B898:  cmpl $0x10, %eax
+	0x3A96B89B:  movl %eax,-96(%ebp)
+	0x3A96B89E:  jbe-32 0x3A96B9B2
+. 664 3A96B892 18
+. 8B 83 E4 00 00 00 83 F8 10 89 45 A0 0F 86 0E 01 00 00 
+
+==== BB 665 (0x3A96B8A4) in 12436B, out 67846B, BBs exec'd 0 ====
+	0x3A96B8A4:  cld
+	0x3A96B8A5:  movl -40(%ebp),%esi
+	0x3A96B8A8:  movl $0xB,%edx
+	0x3A96B8AD:  leal 0xFFFFF083(%ebx), %edi
+	0x3A96B8B3:  movl %edx,%ecx
+	0x3A96B8B5:  repe cmpsb
+. 665 3A96B8A4 19
+. FC 8B 75 D8 BA 0B 00 00 00 8D BB 83 F0 FF FF 89 D1 F3 A6 
+
+==== BB 666 (0x3A96B8B5) in 12455B, out 68019B, BBs exec'd 0 ====
+	0x3A96B8B5:  repe cmpsb
+. 666 3A96B8B5 2
+. F3 A6 
+
+==== BB 667 (0x3A96B8B7) in 12457B, out 68144B, BBs exec'd 0 ====
+	0x3A96B8B7:  jnz-32 0x3A96B9B2
+. 667 3A96B8B7 6
+. 0F 85 F5 00 00 00 
+
+==== BB 668 (0x3A96B8BD) in 12463B, out 68191B, BBs exec'd 0 ====
+	0x3A96B8BD:  movl -40(%ebp),%eax
+	0x3A96B8C0:  movl %eax,%edx
+	0x3A96B8C2:  movl %eax,0xDC(%ebx)
+	0x3A96B8C8:  movl 12(%eax),%eax
+	0x3A96B8CB:  leal (%eax,%eax,2), %eax
+	0x3A96B8CE:  leal 0x10(,%eax,4), %eax
+	0x3A96B8D5:  addl %eax,%edx
+	0x3A96B8D7:  addl $0x30, %eax
+	0x3A96B8DA:  cmpl %eax,-96(%ebp)
+	0x3A96B8DD:  jb-8 0x3A96B905
+. 668 3A96B8BD 34
+. 8B 45 D8 89 C2 89 83 DC 00 00 00 8B 40 0C 8D 04 40 8D 04 85 10 00 00 00 01 C2 83 C0 30 39 45 A0 72 26 
+
+==== BB 669 (0x3A96B8DF) in 12497B, out 68345B, BBs exec'd 0 ====
+	0x3A96B8DF:  movl %edx,0xE0(%ebx)
+	0x3A96B8E5:  movl $0x14,%ecx
+	0x3A96B8EA:  movl %edx,%esi
+	0x3A96B8EC:  leal 0xFFFFF08F(%ebx), %edi
+	0x3A96B8F2:  repe cmpsb
+. 669 3A96B8DF 21
+. 89 93 E0 00 00 00 B9 14 00 00 00 89 D6 8D BB 8F F0 FF FF F3 A6 
+
+==== BB 670 (0x3A96B8F2) in 12518B, out 68504B, BBs exec'd 0 ====
+	0x3A96B8F2:  repe cmpsb
+. 670 3A96B8F2 2
+. F3 A6 
+
+==== BB 671 (0x3A96B8F4) in 12520B, out 68629B, BBs exec'd 0 ====
+	0x3A96B8F4:  setnbe %dl
+	0x3A96B8F7:  setb %al
+	0x3A96B8FA:  movl -40(%ebp),%ecx
+	0x3A96B8FD:  cmpb %al,%dl
+	0x3A96B8FF:  jz-32 0x3A96B752
+. 671 3A96B8F4 17
+. 0F 97 C2 0F 92 C0 8B 4D D8 38 C2 0F 84 4D FE FF FF 
+
+==== BB 672 (0x3A96B752) in 12537B, out 68729B, BBs exec'd 0 ====
+	0x3A96B752:  xorl %eax, %eax
+	0x3A96B754:  cmpl $0xFFFFFFFF, %ecx
+	0x3A96B757:  jz-32 0x3A96BB5C
+. 672 3A96B752 11
+. 31 C0 83 F9 FF 0F 84 FF 03 00 00 
+
+==== BB 673 (0x3A96B75D) in 12548B, out 68800B, BBs exec'd 0 ====
+	0x3A96B75D:  movl $0x0, -36(%ebp)
+	0x3A96B764:  movl 0xE0(%ebx),%eax
+	0x3A96B76A:  cmpl $0xFFFFFFFF, %eax
+	0x3A96B76D:  movl %eax,-44(%ebp)
+	0x3A96B770:  movl %eax,%edi
+	0x3A96B772:  jz-32 0x3A96B918
+. 673 3A96B75D 27
+. C7 45 DC 00 00 00 00 8B 83 E0 00 00 00 83 F8 FF 89 45 D4 89 C7 0F 84 A0 01 00 00 
+
+==== BB 674 (0x3A96B778) in 12575B, out 68911B, BBs exec'd 0 ====
+	0x3A96B778:  movl 0xE4(%ebx),%edx
+	0x3A96B77E:  addl %edx,%ecx
+	0x3A96B780:  movl 0xFFFFFC20(%ebx),%edx
+	0x3A96B786:  subl %eax,%ecx
+	0x3A96B788:  movl %ecx,-32(%ebp)
+	0x3A96B78B:  testl %edx,%edx
+	0x3A96B78D:  movl %edx,-56(%ebp)
+	0x3A96B790:  jz-8 0x3A96B7C3
+. 674 3A96B778 26
+. 8B 93 E4 00 00 00 01 D1 8B 93 20 FC FF FF 29 C1 89 4D E0 85 D2 89 55 C8 74 31 
+
+==== BB 675 (0x3A96B792) in 12601B, out 69041B, BBs exec'd 0 ====
+	0x3A96B792:  movl $0x0, -60(%ebp)
+	0x3A96B799:  leal 0xFFFFFD5C(%ebx), %esi
+	0x3A96B79F:  nop
+	0x3A96B7A0:  movl %esi,4(%esp,,)
+	0x3A96B7A4:  movl -56(%ebp),%ecx
+	0x3A96B7A7:  movl %ecx,(%esp,,)
+	0x3A96B7AA:  call 0x3A975280
+. 675 3A96B792 29
+. C7 45 C4 00 00 00 00 8D B3 5C FD FF FF 90 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00 
+
+==== BB 676 (0x3A96B7AF) in 12630B, out 69154B, BBs exec'd 0 ====
+	0x3A96B7AF:  testl %eax,%eax
+	0x3A96B7B1:  jz-32 0x3A96BBE0
+. 676 3A96B7AF 8
+. 85 C0 0F 84 29 04 00 00 
+
+==== BB 677 (0x3A96B7B7) in 12638B, out 69213B, BBs exec'd 0 ====
+	0x3A96B7B7:  incl -60(%ebp)
+	0x3A96B7BA:  addl $0x5, %esi
+	0x3A96B7BD:  cmpl $0x3, -60(%ebp)
+	0x3A96B7C1:  jle-8 0x3A96B7A0
+. 677 3A96B7B7 12
+. FF 45 C4 83 C6 05 83 7D C4 03 7E DD 
+
+==== BB 678 (0x3A96B7A0) in 12650B, out 69303B, BBs exec'd 0 ====
+	0x3A96B7A0:  movl %esi,4(%esp,,)
+	0x3A96B7A4:  movl -56(%ebp),%ecx
+	0x3A96B7A7:  movl %ecx,(%esp,,)
+	0x3A96B7AA:  call 0x3A975280
+. 678 3A96B7A0 15
+. 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00 
+
+==== BB 679 (0x3A9752B7) in 12665B, out 69384B, BBs exec'd 0 ====
+	0x3A9752B7:  movzbl -5(%ebp),%edx
+	0x3A9752BB:  negl %edx
+	0x3A9752BD:  movl %edx,%eax
+	0x3A9752BF:  popl %edx
+	0x3A9752C0:  popl %esi
+	0x3A9752C1:  popl %ebp
+	0x3A9752C2:  ret
+. 679 3A9752B7 12
+. 0F B6 55 FB F7 DA 89 D0 5A 5E 5D C3 
+
+==== BB 680 (0x3A96BBE0) in 12677B, out 69490B, BBs exec'd 0 ====
+	0x3A96BBE0:  movl -60(%ebp),%eax
+	0x3A96BBE3:  addl $0x30, %eax
+	0x3A96BBE6:  jmp 0x3A96B7C8
+. 680 3A96BBE0 11
+. 8B 45 C4 83 C0 30 E9 DD FB FF FF 
+
+==== BB 681 (0x3A96B7C8) in 12688B, out 69545B, BBs exec'd 0 ====
+	0x3A96B7C8:  movl %eax,-52(%ebp)
+	0x3A96B7CB:  cdqq
+	0x3A96B7CC:  movl -52(%ebp),%eax
+	0x3A96B7CF:  movl %edx,-48(%ebp)
+	0x3A96B7D2:  movl -48(%ebp),%esi
+	0x3A96B7D5:  andl %esi,%eax
+	0x3A96B7D7:  incl %eax
+	0x3A96B7D8:  jz-8 0x3A96B7F9
+. 681 3A96B7C8 18
+. 89 45 CC 99 8B 45 CC 89 55 D0 8B 75 D0 21 F0 40 74 1F 
+
+==== BB 682 (0x3A96B7DA) in 12706B, out 69665B, BBs exec'd 0 ====
+	0x3A96B7DA:  movzbl -52(%ebp),%ecx
+	0x3A96B7DE:  movl $0x1,%eax
+	0x3A96B7E3:  xorl %edx, %edx
+	0x3A96B7E5:  shldl %cl, %eax, %edx
+	0x3A96B7E8:  shll %cl, %eax
+	0x3A96B7EA:  testb $0x20, %cl
+	0x3A96B7ED:  jz-8 0x3A96B7F3
+. 682 3A96B7DA 21
+. 0F B6 4D CC B8 01 00 00 00 31 D2 0F A5 C2 D3 E0 F6 C1 20 74 04 
+
+==== BB 683 (0x3A96B7EF) in 12727B, out 69815B, BBs exec'd 0 ====
+	0x3A96B7EF:  movl %eax,%edx
+	0x3A96B7F1:  xorl %eax, %eax
+	0x3A96B7F3:  movl %eax,-52(%ebp)
+	0x3A96B7F6:  movl %edx,-48(%ebp)
+	0x3A96B7F9:  movl $0x0, -20(%ebp)
+	0x3A96B800:  movl -44(%ebp),%edx
+	0x3A96B803:  movl 20(%edx),%ecx
+	0x3A96B806:  decl %ecx
+	0x3A96B807:  cmpl %ecx,-20(%ebp)
+	0x3A96B80A:  movl %ecx,-24(%ebp)
+	0x3A96B80D:  jle-8 0x3A96B82A
+. 683 3A96B7EF 32
+. 89 C2 31 C0 89 45 CC 89 55 D0 C7 45 EC 00 00 00 00 8B 55 D4 8B 4A 14 49 39 4D EC 89 4D E8 7E 1B 
+
+==== BB 684 (0x3A96B82A) in 12759B, out 69977B, BBs exec'd 0 ====
+	0x3A96B82A:  movl -20(%ebp),%eax
+	0x3A96B82D:  movl -24(%ebp),%ecx
+	0x3A96B830:  addl %ecx,%eax
+	0x3A96B832:  movl %eax,%edx
+	0x3A96B834:  shrl $0x1F, %edx
+	0x3A96B837:  leal (%edx,%eax,1), %esi
+	0x3A96B83A:  sarl $0x1, %esi
+	0x3A96B83C:  leal (%esi,%esi,2), %eax
+	0x3A96B83F:  shll $0x3, %eax
+	0x3A96B842:  movl %eax,-64(%ebp)
+	0x3A96B845:  movl 52(%edi,%eax,1),%eax
+	0x3A96B849:  cmpl -32(%ebp),%eax
+	0x3A96B84C:  jnb-32 0x3A96BB50
+. 684 3A96B82A 40
+. 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00 
+
+==== BB 685 (0x3A96B852) in 12799B, out 70151B, BBs exec'd 0 ====
+	0x3A96B852:  movl -44(%ebp),%edx
+	0x3A96B855:  addl %eax,%edx
+	0x3A96B857:  movl -16(%ebp),%eax
+	0x3A96B85A:  call 0x3A96B5B0
+. 685 3A96B852 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 51 FD FF FF 
+
+==== BB 686 _dl_cache_libcmp(0x3A96B5B0) in 12812B, out 70236B, BBs exec'd 0 ====
+	0x3A96B5B0:  pushl %ebp
+	0x3A96B5B1:  movl %esp,%ebp
+	0x3A96B5B3:  pushl %edi
+	0x3A96B5B4:  movl %eax,%edi
+	0x3A96B5B6:  pushl %esi
+	0x3A96B5B7:  subl $0xC, %esp
+	0x3A96B5BA:  movl %edx,-12(%ebp)
+	0x3A96B5BD:  movzbl (%eax),%eax
+	0x3A96B5C0:  testb %al,%al
+	0x3A96B5C2:  movb %al,-17(%ebp)
+	0x3A96B5C5:  jz-32 0x3A96B6AA
+. 686 3A96B5B0 27
+. 55 89 E5 57 89 C7 56 83 EC 0C 89 55 F4 0F B6 00 84 C0 88 45 EF 0F 84 DF 00 00 00 
+
+==== BB 687 (0x3A96B5CB) in 12839B, out 70417B, BBs exec'd 0 ====
+	0x3A96B5CB:  movzbl (%edx),%ecx
+	0x3A96B5CE:  movzbl -17(%ebp),%eax
+	0x3A96B5D2:  subb $0x30, %al
+	0x3A96B5D4:  cmpb $0x9, %al
+	0x3A96B5D6:  jnbe-32 0x3A96B686
+. 687 3A96B5CB 17
+. 0F B6 0A 0F B6 45 EF 2C 30 3C 09 0F 87 AA 00 00 00 
+
+==== BB 688 (0x3A96B686) in 12856B, out 70520B, BBs exec'd 0 ====
+	0x3A96B686:  movb %cl,%al
+	0x3A96B688:  movl $0xFFFFFFFF,%edx
+	0x3A96B68D:  subb $0x30, %al
+	0x3A96B68F:  cmpb $0x9, %al
+	0x3A96B691:  jbe-8 0x3A96B6B9
+. 688 3A96B686 13
+. 88 C8 BA FF FF FF FF 2C 30 3C 09 76 26 
+
+==== BB 689 (0x3A96B693) in 12869B, out 70615B, BBs exec'd 0 ====
+	0x3A96B693:  cmpb %cl,-17(%ebp)
+	0x3A96B696:  jnz-8 0x3A96B6B0
+. 689 3A96B693 5
+. 38 4D EF 75 18 
+
+==== BB 690 (0x3A96B698) in 12874B, out 70678B, BBs exec'd 0 ====
+	0x3A96B698:  incl -12(%ebp)
+	0x3A96B69B:  incl %edi
+	0x3A96B69C:  movl -12(%ebp),%eax
+	0x3A96B69F:  movzbl (%edi),%ecx
+	0x3A96B6A2:  movb %cl,-17(%ebp)
+	0x3A96B6A5:  movzbl (%eax),%ecx
+	0x3A96B6A8:  jmp-8 0x3A96B672
+. 690 3A96B698 18
+. FF 45 F4 47 8B 45 F4 0F B6 0F 88 4D EF 0F B6 08 EB C8 
+
+==== BB 691 (0x3A96B672) in 12892B, out 70788B, BBs exec'd 0 ====
+	0x3A96B672:  cmpb $0x0, -17(%ebp)
+	0x3A96B676:  jz-8 0x3A96B6B0
+. 691 3A96B672 6
+. 80 7D EF 00 74 38 
+
+==== BB 692 (0x3A96B678) in 12898B, out 70851B, BBs exec'd 0 ====
+	0x3A96B678:  movzbl -17(%ebp),%eax
+	0x3A96B67C:  subb $0x30, %al
+	0x3A96B67E:  cmpb $0x9, %al
+	0x3A96B680:  jbe-32 0x3A96B5E0
+. 692 3A96B678 14
+. 0F B6 45 EF 2C 30 3C 09 0F 86 5A FF FF FF 
+
+==== BB 693 (0x3A96B6B0) in 12912B, out 70939B, BBs exec'd 0 ====
+	0x3A96B6B0:  movsbl -17(%ebp),%edx
+	0x3A96B6B4:  movsbl %cl,%eax
+	0x3A96B6B7:  subl %eax,%edx
+	0x3A96B6B9:  addl $0xC, %esp
+	0x3A96B6BC:  movl %edx,%eax
+	0x3A96B6BE:  popl %esi
+	0x3A96B6BF:  popl %edi
+	0x3A96B6C0:  popl %ebp
+	0x3A96B6C1:  ret
+. 693 3A96B6B0 18
+. 0F BE 55 EF 0F BE C1 29 C2 83 C4 0C 89 D0 5E 5F 5D C3 
+
+==== BB 694 (0x3A96B85F) in 12930B, out 71078B, BBs exec'd 0 ====
+	0x3A96B85F:  testl %eax,%eax
+	0x3A96B861:  jz-32 0x3A96BC8C
+. 694 3A96B85F 8
+. 85 C0 0F 84 25 04 00 00 
+
+==== BB 695 (0x3A96B867) in 12938B, out 71137B, BBs exec'd 0 ====
+	0x3A96B867:  testl %eax,%eax
+	0x3A96B869:  jns-8 0x3A96B814
+. 695 3A96B867 4
+. 85 C0 79 A9 
+
+==== BB 696 (0x3A96B86B) in 12942B, out 71196B, BBs exec'd 0 ====
+	0x3A96B86B:  incl %esi
+	0x3A96B86C:  movl %esi,-20(%ebp)
+	0x3A96B86F:  nop
+	0x3A96B870:  jmp-8 0x3A96B818
+. 696 3A96B86B 7
+. 46 89 75 EC 90 EB A6 
+
+==== BB 697 (0x3A96B818) in 12949B, out 71260B, BBs exec'd 0 ====
+	0x3A96B818:  movl -24(%ebp),%ecx
+	0x3A96B81B:  cmpl %ecx,-20(%ebp)
+	0x3A96B81E:  jnle-32 0x3A96BB50
+. 697 3A96B818 12
+. 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00 
+
+==== BB 698 (0x3A96B824) in 12961B, out 71335B, BBs exec'd 0 ====
+	0x3A96B824:  movl 0xE0(%ebx),%edi
+	0x3A96B82A:  movl -20(%ebp),%eax
+	0x3A96B82D:  movl -24(%ebp),%ecx
+	0x3A96B830:  addl %ecx,%eax
+	0x3A96B832:  movl %eax,%edx
+	0x3A96B834:  shrl $0x1F, %edx
+	0x3A96B837:  leal (%edx,%eax,1), %esi
+	0x3A96B83A:  sarl $0x1, %esi
+	0x3A96B83C:  leal (%esi,%esi,2), %eax
+	0x3A96B83F:  shll $0x3, %eax
+	0x3A96B842:  movl %eax,-64(%ebp)
+	0x3A96B845:  movl 52(%edi,%eax,1),%eax
+	0x3A96B849:  cmpl -32(%ebp),%eax
+	0x3A96B84C:  jnb-32 0x3A96BB50
+. 698 3A96B824 46
+. 8B BB E0 00 00 00 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00 
+
+==== BB 699 (0x3A96B814) in 13007B, out 71525B, BBs exec'd 0 ====
+	0x3A96B814:  decl %esi
+	0x3A96B815:  movl %esi,-24(%ebp)
+	0x3A96B818:  movl -24(%ebp),%ecx
+	0x3A96B81B:  cmpl %ecx,-20(%ebp)
+	0x3A96B81E:  jnle-32 0x3A96BB50
+. 699 3A96B814 16
+. 4E 89 75 E8 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00 
+
+==== BB 700 (0x3A96B5E0) in 13023B, out 71621B, BBs exec'd 0 ====
+	0x3A96B5E0:  movb %cl,%al
+	0x3A96B5E2:  movl $0x1,%edx
+	0x3A96B5E7:  subb $0x30, %al
+	0x3A96B5E9:  cmpb $0x9, %al
+	0x3A96B5EB:  jnbe-32 0x3A96B6B9
+. 700 3A96B5E0 17
+. 88 C8 BA 01 00 00 00 2C 30 3C 09 0F 87 C8 00 00 00 
+
+==== BB 701 (0x3A96B5F1) in 13040B, out 71716B, BBs exec'd 0 ====
+	0x3A96B5F1:  movsbl -17(%ebp),%eax
+	0x3A96B5F5:  incl %edi
+	0x3A96B5F6:  incl -12(%ebp)
+	0x3A96B5F9:  subl $0x30, %eax
+	0x3A96B5FC:  movl %eax,-16(%ebp)
+	0x3A96B5FF:  movsbl %cl,%eax
+	0x3A96B602:  leal -48(%eax), %esi
+	0x3A96B605:  movzbl (%edi),%ecx
+	0x3A96B608:  movb %cl,%al
+	0x3A96B60A:  movb %cl,-17(%ebp)
+	0x3A96B60D:  subb $0x30, %al
+	0x3A96B60F:  movb %cl,%dl
+	0x3A96B611:  cmpb $0x9, %al
+	0x3A96B613:  jnbe-8 0x3A96B63F
+. 701 3A96B5F1 36
+. 0F BE 45 EF 47 FF 45 F4 83 E8 30 89 45 F0 0F BE C1 8D 70 D0 0F B6 0F 88 C8 88 4D EF 2C 30 88 CA 3C 09 77 2A 
+
+==== BB 702 (0x3A96B63F) in 13076B, out 71924B, BBs exec'd 0 ====
+	0x3A96B63F:  movl -12(%ebp),%eax
+	0x3A96B642:  movzbl (%eax),%ecx
+	0x3A96B645:  movb %cl,%al
+	0x3A96B647:  movb %cl,%dl
+	0x3A96B649:  subb $0x30, %al
+	0x3A96B64B:  cmpb $0x9, %al
+	0x3A96B64D:  jnbe-8 0x3A96B66D
+. 702 3A96B63F 16
+. 8B 45 F4 0F B6 08 88 C8 88 CA 2C 30 3C 09 77 1E 
+
+==== BB 703 (0x3A96B66D) in 13092B, out 72050B, BBs exec'd 0 ====
+	0x3A96B66D:  cmpl %esi,-16(%ebp)
+	0x3A96B670:  jnz-8 0x3A96B6C2
+. 703 3A96B66D 5
+. 39 75 F0 75 50 
+
+==== BB 704 (0x3A96BC8C) in 13097B, out 72113B, BBs exec'd 0 ====
+	0x3A96BC8C:  movl %esi,-20(%ebp)
+	0x3A96BC8F:  testl %esi,%esi
+	0x3A96BC91:  jle-32 0x3A96BA2A
+. 704 3A96BC8C 11
+. 89 75 EC 85 F6 0F 8E 93 FD FF FF 
+
+==== BB 705 (0x3A96BC97) in 13108B, out 72185B, BBs exec'd 0 ====
+	0x3A96BC97:  movl -64(%ebp),%ecx
+	0x3A96BC9A:  movl %ecx,-92(%ebp)
+	0x3A96BC9D:  leal 0(%esi), %esi
+	0x3A96BCA0:  movl 0xE0(%ebx),%edi
+	0x3A96BCA6:  movl -92(%ebp),%edx
+	0x3A96BCA9:  movl 28(%edi,%edx,1),%eax
+	0x3A96BCAD:  cmpl -32(%ebp),%eax
+	0x3A96BCB0:  jnb-32 0x3A96BA30
+. 705 3A96BC97 31
+. 8B 4D C0 89 4D A4 8D 76 00 8B BB E0 00 00 00 8B 55 A4 8B 44 17 1C 3B 45 E0 0F 83 7A FD FF FF 
+
+==== BB 706 (0x3A96BCB6) in 13139B, out 72329B, BBs exec'd 0 ====
+	0x3A96BCB6:  movl -44(%ebp),%edx
+	0x3A96BCB9:  addl %eax,%edx
+	0x3A96BCBB:  movl -16(%ebp),%eax
+	0x3A96BCBE:  call 0x3A96B5B0
+. 706 3A96BCB6 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 ED F8 FF FF 
+
+==== BB 707 (0x3A96BCC3) in 13152B, out 72414B, BBs exec'd 0 ====
+	0x3A96BCC3:  testl %eax,%eax
+	0x3A96BCC5:  jnz-32 0x3A96BA2A
+. 707 3A96BCC3 8
+. 85 C0 0F 85 5F FD FF FF 
+
+==== BB 708 (0x3A96BA2A) in 13160B, out 72473B, BBs exec'd 0 ====
+	0x3A96BA2A:  movl 0xE0(%ebx),%edi
+	0x3A96BA30:  cmpl -20(%ebp),%esi
+	0x3A96BA33:  leal (%esi,%esi,2), %eax
+	0x3A96BA36:  leal (%edi,%eax,8), %edi
+	0x3A96BA39:  leal 48(%edi), %ecx
+	0x3A96BA3C:  movl %ecx,-68(%ebp)
+	0x3A96BA3F:  jle-8 0x3A96BA62
+. 708 3A96BA2A 23
+. 8B BB E0 00 00 00 3B 75 EC 8D 04 76 8D 3C C7 8D 4F 30 89 4D BC 7E 21 
+
+==== BB 709 (0x3A96BA62) in 13183B, out 72602B, BBs exec'd 0 ====
+	0x3A96BA62:  movl 48(%edi),%ecx
+	0x3A96BA65:  cmpl $0x1, %ecx
+	0x3A96BA68:  setz %al
+	0x3A96BA6B:  cmpl $0x3, %ecx
+	0x3A96BA6E:  setz %dl
+	0x3A96BA71:  orl %edx,%eax
+	0x3A96BA73:  testb $0x1, %al
+	0x3A96BA75:  jz-8 0x3A96BA20
+. 709 3A96BA62 21
+. 8B 4F 30 83 F9 01 0F 94 C0 83 F9 03 0F 94 C2 09 D0 A8 01 74 A9 
+
+==== BB 710 (0x3A96BA77) in 13204B, out 72740B, BBs exec'd 0 ====
+	0x3A96BA77:  movl -68(%ebp),%eax
+	0x3A96BA7A:  movl 8(%eax),%edi
+	0x3A96BA7D:  cmpl -32(%ebp),%edi
+	0x3A96BA80:  jnb-8 0x3A96BA20
+. 710 3A96BA77 11
+. 8B 45 BC 8B 78 08 3B 7D E0 73 9E 
+
+==== BB 711 (0x3A96BA82) in 13215B, out 72828B, BBs exec'd 0 ====
+	0x3A96BA82:  movl -36(%ebp),%edx
+	0x3A96BA85:  testl %edx,%edx
+	0x3A96BA87:  jz-8 0x3A96BA91
+. 711 3A96BA82 7
+. 8B 55 DC 85 D2 74 08 
+
+==== BB 712 (0x3A96BA91) in 13222B, out 72900B, BBs exec'd 0 ====
+	0x3A96BA91:  movl 0xFFFFFC1C(%ebx),%eax
+	0x3A96BA97:  testl %eax,%eax
+	0x3A96BA99:  jz-8 0x3A96BAA7
+. 712 3A96BA91 10
+. 8B 83 1C FC FF FF 85 C0 74 0C 
+
+==== BB 713 (0x3A96BA9B) in 13232B, out 72975B, BBs exec'd 0 ====
+	0x3A96BA9B:  movl -68(%ebp),%edx
+	0x3A96BA9E:  cmpl %eax,12(%edx)
+	0x3A96BAA1:  jnbe-32 0x3A96BA20
+. 713 3A96BA9B 12
+. 8B 55 BC 39 42 0C 0F 87 79 FF FF FF 
+
+==== BB 714 (0x3A96BAA7) in 13244B, out 73051B, BBs exec'd 0 ====
+	0x3A96BAA7:  movl -48(%ebp),%edx
+	0x3A96BAAA:  movl -52(%ebp),%eax
+	0x3A96BAAD:  notl %edx
+	0x3A96BAAF:  notl %eax
+	0x3A96BAB1:  orl %eax,%edx
+	0x3A96BAB3:  jz-32 0x3A96BCD9
+. 714 3A96BAA7 18
+. 8B 55 D0 8B 45 CC F7 D2 F7 D0 09 C2 0F 84 20 02 00 00 
+
+==== BB 715 (0x3A96BAB9) in 13262B, out 73145B, BBs exec'd 0 ====
+	0x3A96BAB9:  movl $0x0, -76(%ebp)
+	0x3A96BAC0:  movl -68(%ebp),%edx
+	0x3A96BAC3:  movl 16(%edx),%eax
+	0x3A96BAC6:  movl 20(%edx),%edx
+	0x3A96BAC9:  movl %eax,-88(%ebp)
+	0x3A96BACC:  movl %edx,-84(%ebp)
+	0x3A96BACF:  andl $0xF0000, %edx
+	0x3A96BAD5:  movl %edx,%eax
+	0x3A96BAD7:  orl -76(%ebp),%eax
+	0x3A96BADA:  movl %edx,-72(%ebp)
+	0x3A96BADD:  jz-8 0x3A96BAF1
+. 715 3A96BAB9 38
+. C7 45 B4 00 00 00 00 8B 55 BC 8B 42 10 8B 52 14 89 45 A8 89 55 AC 81 E2 00 00 0F 00 89 D0 0B 45 B4 89 55 B8 74 12 
+
+==== BB 716 (0x3A96BAF1) in 13300B, out 73306B, BBs exec'd 0 ====
+	0x3A96BAF1:  movl 0xFFFFFC54(%ebx),%eax
+	0x3A96BAF7:  xorl %edx, %edx
+	0x3A96BAF9:  orl $0xF0000, %edx
+	0x3A96BAFF:  movl %edx,-100(%ebp)
+	0x3A96BB02:  movl %eax,%edx
+	0x3A96BB04:  movl -100(%ebp),%eax
+	0x3A96BB07:  notl %edx
+	0x3A96BB09:  movl %edx,-104(%ebp)
+	0x3A96BB0C:  notl %eax
+	0x3A96BB0E:  movl -88(%ebp),%edx
+	0x3A96BB11:  movl %eax,-100(%ebp)
+	0x3A96BB14:  movl -104(%ebp),%eax
+	0x3A96BB17:  andl %eax,%edx
+	0x3A96BB19:  movl %edx,-104(%ebp)
+	0x3A96BB1C:  movl -84(%ebp),%eax
+	0x3A96BB1F:  movl -100(%ebp),%edx
+	0x3A96BB22:  andl %edx,%eax
+	0x3A96BB24:  movl %eax,%edx
+	0x3A96BB26:  orl -104(%ebp),%edx
+	0x3A96BB29:  movl %eax,-100(%ebp)
+	0x3A96BB2C:  jnz-32 0x3A96BA20
+. 716 3A96BAF1 65
+. 8B 83 54 FC FF FF 31 D2 81 CA 00 00 0F 00 89 55 9C 89 C2 8B 45 9C F7 D2 89 55 98 F7 D0 8B 55 A8 89 45 9C 8B 45 98 21 C2 89 55 98 8B 45 AC 8B 55 9C 21 D0 89 C2 0B 55 98 89 45 9C 0F 85 EE FE FF FF 
+
+==== BB 717 (0x3A96BB32) in 13365B, out 73550B, BBs exec'd 0 ====
+	0x3A96BB32:  movl -44(%ebp),%eax
+	0x3A96BB35:  addl %eax,%edi
+	0x3A96BB37:  cmpl 0xFFFFFC50(%ebx),%ecx
+	0x3A96BB3D:  movl %edi,-36(%ebp)
+	0x3A96BB40:  jnz-32 0x3A96BA20
+. 717 3A96BB32 20
+. 8B 45 D4 01 C7 3B 8B 50 FC FF FF 89 7D DC 0F 85 DA FE FF FF 
+
+==== BB 718 (0x3A96BB46) in 13385B, out 73656B, BBs exec'd 0 ====
+	0x3A96BB46:  leal 0(%esi), %esi
+	0x3A96BB49:  leal 0(%edi,,), %edi
+	0x3A96BB50:  testb $0x1, 0xFFFFFC14(%ebx)
+	0x3A96BB57:  jnz-8 0x3A96BBBF
+. 718 3A96BB46 19
+. 8D 76 00 8D BC 27 00 00 00 00 F6 83 14 FC FF FF 01 75 66 
+
+==== BB 719 (0x3A96BB59) in 13404B, out 73752B, BBs exec'd 0 ====
+	0x3A96BB59:  movl -36(%ebp),%eax
+	0x3A96BB5C:  addl $0x64, %esp
+	0x3A96BB5F:  popl %ebx
+	0x3A96BB60:  popl %esi
+	0x3A96BB61:  popl %edi
+	0x3A96BB62:  popl %ebp
+	0x3A96BB63:  ret
+. 719 3A96BB59 11
+. 8B 45 DC 83 C4 64 5B 5E 5F 5D C3 
+
+==== BB 720 (0x3A96B3A0) in 13415B, out 73874B, BBs exec'd 0 ====
+	0x3A96B3A0:  movl %eax,0xFFFFFDC4(%ebp)
+	0x3A96B3A6:  testl %eax,%eax
+	0x3A96B3A8:  jz-32 0x3A96B438
+. 720 3A96B3A0 14
+. 89 85 C4 FD FF FF 85 C0 0F 84 8A 00 00 00 
+
+==== BB 721 (0x3A96B3AE) in 13429B, out 73949B, BBs exec'd 0 ====
+	0x3A96B3AE:  movl 0xFFFFFDD8(%ebp),%esi
+	0x3A96B3B4:  testl %esi,%esi
+	0x3A96B3B6:  jz-32 0x3A96B4F3
+. 721 3A96B3AE 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 37 01 00 00 
+
+==== BB 722 (0x3A96B3BC) in 13443B, out 74024B, BBs exec'd 0 ====
+	0x3A96B3BC:  testb $0x8, 0x1E9(%esi)
+	0x3A96B3C3:  jnz-32 0x3A96B511
+. 722 3A96B3BC 13
+. F6 86 E9 01 00 00 08 0F 85 48 01 00 00 
+
+==== BB 723 (0x3A96B3C9) in 13456B, out 74094B, BBs exec'd 0 ====
+	0x3A96B3C9:  movl 0xFFFFFDC4(%ebp),%eax
+	0x3A96B3CF:  testl %eax,%eax
+	0x3A96B3D1:  jz-8 0x3A96B438
+. 723 3A96B3C9 10
+. 8B 85 C4 FD FF FF 85 C0 74 65 
+
+==== BB 724 (0x3A96B3D3) in 13466B, out 74169B, BBs exec'd 0 ====
+	0x3A96B3D3:  movl 0xFFFFFDC4(%ebp),%eax
+	0x3A96B3D9:  leal 0xFFFFFDE4(%ebp), %edx
+	0x3A96B3DF:  movl %edx,0xFFFFFDB8(%ebp)
+	0x3A96B3E5:  call 0x3A968F20
+. 724 3A96B3D3 23
+. 8B 85 C4 FD FF FF 8D 95 E4 FD FF FF 89 95 B8 FD FF FF E8 36 DB FF FF 
+
+==== BB 725 (0x3A969041) in 13489B, out 74259B, BBs exec'd 0 ====
+	0x3A969041:  cmpl $0x20, 16(%edi)
+	0x3A969045:  jnz-8 0x3A969021
+. 725 3A969041 6
+. 83 7F 10 20 75 DA 
+
+==== BB 726 (0x3A969047) in 13495B, out 74322B, BBs exec'd 0 ====
+	0x3A969047:  cmpl $0x3, 28(%edi)
+	0x3A96904B:  jbe-8 0x3A969021
+. 726 3A969047 6
+. 83 7F 1C 03 76 D4 
+
+==== BB 727 (0x3A96904D) in 13501B, out 74385B, BBs exec'd 0 ====
+	0x3A96904D:  movl 4(%edi),%eax
+	0x3A969050:  movl -52(%ebp),%ecx
+	0x3A969053:  leal 32(%eax), %edx
+	0x3A969056:  cmpl (%ecx),%edx
+	0x3A969058:  leal 4(%eax,%ecx,1), %eax
+	0x3A96905C:  movl %eax,-76(%ebp)
+	0x3A96905F:  jnbe-32 0x3A96915B
+. 727 3A96904D 24
+. 8B 47 04 8B 4D CC 8D 50 20 3B 11 8D 44 08 04 89 45 B4 0F 87 F6 00 00 00 
+
+==== BB 728 (0x3A969065) in 13525B, out 74504B, BBs exec'd 0 ====
+	0x3A969065:  cld
+	0x3A969066:  movl -76(%ebp),%esi
+	0x3A969069:  movl $0x10,%edx
+	0x3A96906E:  leal 0xFFFFD638(%ebx), %edi
+	0x3A969074:  movl %edx,%ecx
+	0x3A969076:  repe cmpsb
+. 728 3A969065 19
+. FC 8B 75 B4 BA 10 00 00 00 8D BB 38 D6 FF FF 89 D1 F3 A6 
+
+==== BB 729 (0x3A969076) in 13544B, out 74677B, BBs exec'd 0 ====
+	0x3A969076:  repe cmpsb
+. 729 3A969076 2
+. F3 A6 
+
+==== BB 730 (0x3A969078) in 13546B, out 74802B, BBs exec'd 0 ====
+	0x3A969078:  jz-32 0x3A969104
+. 730 3A969078 6
+. 0F 84 86 00 00 00 
+
+==== BB 731 (0x3A969104) in 13552B, out 74849B, BBs exec'd 0 ====
+	0x3A969104:  movl -76(%ebp),%esi
+	0x3A969107:  movzbl 20(%esi),%eax
+	0x3A96910B:  movzbl 24(%esi),%edx
+	0x3A96910F:  shll $0x8, %eax
+	0x3A969112:  addl %edx,%eax
+	0x3A969114:  movzbl 28(%esi),%edx
+	0x3A969118:  shll $0x8, %eax
+	0x3A96911B:  movl 16(%esi),%esi
+	0x3A96911E:  addl %edx,%eax
+	0x3A969120:  testl %esi,%esi
+	0x3A969122:  jnz-8 0x3A969132
+. 731 3A969104 32
+. 8B 75 B4 0F B6 46 14 0F B6 56 18 C1 E0 08 01 D0 0F B6 56 1C C1 E0 08 8B 76 10 01 D0 85 F6 75 0E 
+
+==== BB 732 (0x3A969124) in 13584B, out 74993B, BBs exec'd 0 ====
+	0x3A969124:  movl 0xFFFFFC1C(%ebx),%edx
+	0x3A96912A:  testl %edx,%edx
+	0x3A96912C:  jz-8 0x3A969150
+. 732 3A969124 10
+. 8B 93 1C FC FF FF 85 D2 74 22 
+
+==== BB 733 (0x3A96912E) in 13594B, out 75068B, BBs exec'd 0 ====
+	0x3A96912E:  cmpl %eax,%edx
+	0x3A969130:  jnb-8 0x3A969150
+. 733 3A96912E 4
+. 39 C2 73 1E 
+
+==== BB 734 (0x3A96B3EA) in 13598B, out 75125B, BBs exec'd 0 ====
+	0x3A96B3EA:  movl %eax,0xFFFFFDCC(%ebp)
+	0x3A96B3F0:  incl %eax
+	0x3A96B3F1:  jz-8 0x3A96B445
+. 734 3A96B3EA 9
+. 89 85 CC FD FF FF 40 74 52 
+
+==== BB 735 (0x3A96B3F3) in 13607B, out 75203B, BBs exec'd 0 ====
+	0x3A96B3F3:  movl 0xFFFFFDC4(%ebp),%ecx
+	0x3A96B3F9:  movl %ecx,(%esp,,)
+	0x3A96B3FC:  call 0x3A9752D0
+. 735 3A96B3F3 14
+. 8B 8D C4 FD FF FF 89 0C 24 E8 CF 9E 00 00 
+
+==== BB 736 (0x3A96B401) in 13621B, out 75274B, BBs exec'd 0 ====
+	0x3A96B401:  leal 1(%eax), %esi
+	0x3A96B404:  movl %esi,(%esp,,)
+	0x3A96B407:  call 0x3A96581C
+. 736 3A96B401 11
+. 8D 70 01 89 34 24 E8 10 A4 FF FF 
+
+==== BB 737 (0x3A96B40C) in 13632B, out 75339B, BBs exec'd 0 ====
+	0x3A96B40C:  movl %eax,%edx
+	0x3A96B40E:  xorl %eax, %eax
+	0x3A96B410:  testl %edx,%edx
+	0x3A96B412:  jz-8 0x3A96B42A
+. 737 3A96B40C 8
+. 89 C2 31 C0 85 D2 74 16 
+
+==== BB 738 (0x3A96B414) in 13640B, out 75419B, BBs exec'd 0 ====
+	0x3A96B414:  movl %esi,8(%esp,,)
+	0x3A96B418:  movl 0xFFFFFDC4(%ebp),%eax
+	0x3A96B41E:  movl %edx,(%esp,,)
+	0x3A96B421:  movl %eax,4(%esp,,)
+	0x3A96B425:  call 0x3A975870
+. 738 3A96B414 22
+. 89 74 24 08 8B 85 C4 FD FF FF 89 14 24 89 44 24 04 E8 46 A4 00 00 
+
+==== BB 739 (0x3A96B42A) in 13662B, out 75516B, BBs exec'd 0 ====
+	0x3A96B42A:  movl %eax,0xFFFFFDE0(%ebp)
+	0x3A96B430:  testl %eax,%eax
+	0x3A96B432:  jz-32 0x3A96B580
+. 739 3A96B42A 14
+. 89 85 E0 FD FF FF 85 C0 0F 84 48 01 00 00 
+
+==== BB 740 (0x3A96B438) in 13676B, out 75591B, BBs exec'd 0 ====
+	0x3A96B438:  cmpl $0xFFFFFFFF, 0xFFFFFDCC(%ebp)
+	0x3A96B43F:  jnz-32 0x3A96B24B
+. 740 3A96B438 13
+. 83 BD CC FD FF FF FF 0F 85 06 FE FF FF 
+
+==== BB 741 (0x3A96B24B) in 13689B, out 75657B, BBs exec'd 0 ====
+	0x3A96B24B:  testb $0x1, 0xFFFFFC14(%ebx)
+	0x3A96B252:  jnz-32 0x3A96B4C3
+. 741 3A96B24B 13
+. F6 83 14 FC FF FF 01 0F 85 6B 02 00 00 
+
+==== BB 742 (0x3A96B258) in 13702B, out 75727B, BBs exec'd 0 ====
+	0x3A96B258:  cmpl $0xFFFFFFFF, 0xFFFFFDCC(%ebp)
+	0x3A96B25F:  leal 0xFFFFFDE4(%ebp), %eax
+	0x3A96B265:  movl %eax,0xFFFFFDB8(%ebp)
+	0x3A96B26B:  jnz-32 0x3A96B030
+. 742 3A96B258 25
+. 83 BD CC FD FF FF FF 8D 85 E4 FD FF FF 89 85 B8 FD FF FF 0F 85 BF FD FF FF 
+
+==== BB 743 (0x3A96CAC0) in 13727B, out 75819B, BBs exec'd 0 ====
+	0x3A96CAC0:  movl %eax,%edx
+	0x3A96CAC2:  movl 12(%eax),%eax
+	0x3A96CAC5:  testl %eax,%eax
+	0x3A96CAC7:  jnz-8 0x3A96CAC0
+. 743 3A96CAC0 9
+. 89 C2 8B 40 0C 85 C0 75 F7 
+
+==== BB 744 (0x3A9699C5) in 13736B, out 75901B, BBs exec'd 0 ====
+	0x3A9699C5:  movl 8(%esi),%eax
+	0x3A9699C8:  movl -124(%ebp),%ecx
+	0x3A9699CB:  movl %eax,0x144(%ecx)
+	0x3A9699D1:  addl $0x20, %esi
+	0x3A9699D4:  cmpl 0xFFFFFF54(%ebp),%esi
+	0x3A9699DA:  jb-8 0x3A96998B
+. 744 3A9699C5 23
+. 8B 46 08 8B 4D 84 89 81 44 01 00 00 83 C6 20 3B B5 54 FF FF FF 72 AF 
+
+==== BB 745 (0x3A969975) in 13759B, out 76018B, BBs exec'd 0 ====
+	0x3A969975:  leal 0(%esi,,), %esi
+	0x3A969979:  leal 0(%edi,,), %edi
+	0x3A969980:  addl $0x20, %esi
+	0x3A969983:  cmpl 0xFFFFFF54(%ebp),%esi
+	0x3A969989:  jnb-8 0x3A9699DC
+. 745 3A969975 22
+. 8D 74 26 00 8D BC 27 00 00 00 00 83 C6 20 3B B5 54 FF FF FF 73 51 
+
+==== BB 746 (0x3A9699A2) in 13781B, out 76116B, BBs exec'd 0 ====
+	0x3A9699A2:  cmpl $0x6474E551, %eax
+	0x3A9699A7:  jnbe-32 0x3A969C07
+. 746 3A9699A2 11
+. 3D 51 E5 74 64 0F 87 5A 02 00 00 
+
+==== BB 747 (0x3A9699AD) in 13792B, out 76176B, BBs exec'd 0 ====
+	0x3A9699AD:  cmpl $0x7, %eax
+	0x3A9699B0:  jnz-8 0x3A969980
+. 747 3A9699AD 5
+. 83 F8 07 75 CE 
+
+==== BB 748 (0x3A969B31) in 13797B, out 76233B, BBs exec'd 0 ====
+	0x3A969B31:  movl 0xFFFFFF40(%ebp),%ecx
+	0x3A969B37:  cmpl %ecx,0xFFFFFF38(%ebp)
+	0x3A969B3D:  jbe-8 0x3A969B7B
+. 748 3A969B31 14
+. 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C 
+
+==== BB 749 (0x3A97571F) in 13811B, out 76314B, BBs exec'd 0 ====
+	0x3A97571F:  movl %edi,%ecx
+	0x3A975721:  negl %ecx
+	0x3A975723:  andl $0x3, %ecx
+	0x3A975726:  subl %ecx,%edx
+	0x3A975728:  rep stosb
+. 749 3A97571F 11
+. 89 F9 F7 D9 83 E1 03 29 CA F3 AA 
+
+==== BB 750 (0x3A97572A) in 13822B, out 76438B, BBs exec'd 0 ====
+	0x3A97572A:  subl $0x20, %edx
+	0x3A97572D:  movl (%edi),%ecx
+	0x3A97572F:  nop
+	0x3A975730:  movl 28(%edi),%ecx
+	0x3A975733:  subl $0x20, %edx
+	0x3A975736:  movl %eax,0(%edi)
+	0x3A975739:  movl %eax,4(%edi)
+	0x3A97573C:  movl %eax,8(%edi)
+	0x3A97573F:  movl %eax,12(%edi)
+	0x3A975742:  movl %eax,16(%edi)
+	0x3A975745:  movl %eax,20(%edi)
+	0x3A975748:  movl %eax,24(%edi)
+	0x3A97574B:  movl %eax,28(%edi)
+	0x3A97574E:  leal 32(%edi), %edi
+	0x3A975751:  jnl-8 0x3A975730
+. 750 3A97572A 41
+. 83 EA 20 8B 0F 90 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD 
+
+==== BB 751 (0x3A975730) in 13863B, out 76625B, BBs exec'd 0 ====
+	0x3A975730:  movl 28(%edi),%ecx
+	0x3A975733:  subl $0x20, %edx
+	0x3A975736:  movl %eax,0(%edi)
+	0x3A975739:  movl %eax,4(%edi)
+	0x3A97573C:  movl %eax,8(%edi)
+	0x3A97573F:  movl %eax,12(%edi)
+	0x3A975742:  movl %eax,16(%edi)
+	0x3A975745:  movl %eax,20(%edi)
+	0x3A975748:  movl %eax,24(%edi)
+	0x3A97574B:  movl %eax,28(%edi)
+	0x3A97574E:  leal 32(%edi), %edi
+	0x3A975751:  jnl-8 0x3A975730
+. 751 3A975730 35
+. 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD 
+
+==== BB 752 (0x3A975753) in 13898B, out 76794B, BBs exec'd 0 ====
+	0x3A975753:  leal 32(%edx), %ecx
+	0x3A975756:  shrl $0x2, %ecx
+	0x3A975759:  rep stosl
+. 752 3A975753 8
+. 8D 4A 20 C1 E9 02 F3 AB 
+
+==== BB 753 (0x3A96A171) in 13906B, out 76898B, BBs exec'd 0 ====
+	0x3A96A171:  xorl %eax, %eax
+	0x3A96A173:  movl %eax,20(%esp,,)
+	0x3A96A177:  movl $0xFFFFFFFF,%eax
+	0x3A96A17C:  movl %eax,16(%esp,,)
+	0x3A96A180:  movl $0x32,%eax
+	0x3A96A185:  movl %eax,12(%esp,,)
+	0x3A96A189:  movl 20(%edi),%eax
+	0x3A96A18C:  movl %eax,8(%esp,,)
+	0x3A96A190:  movl 0xFFFFFF38(%ebp),%edx
+	0x3A96A196:  subl %edx,0xFFFFFF3C(%ebp)
+	0x3A96A19C:  movl 0xFFFFFF3C(%ebp),%ecx
+	0x3A96A1A2:  movl %edx,(%esp,,)
+	0x3A96A1A5:  movl %ecx,4(%esp,,)
+	0x3A96A1A9:  call 0x3A974C50
+. 753 3A96A171 61
+. 31 C0 89 44 24 14 B8 FF FF FF FF 89 44 24 10 B8 32 00 00 00 89 44 24 0C 8B 47 14 89 44 24 08 8B 95 38 FF FF FF 29 95 3C FF FF FF 8B 8D 3C FF FF FF 89 14 24 89 4C 24 04 E8 A2 AA 00 00 
+
+==== BB 754 (0x3A96A1AE) in 13967B, out 77102B, BBs exec'd 0 ====
+	0x3A96A1AE:  incl %eax
+	0x3A96A1AF:  jnz-32 0x3A969B8D
+. 754 3A96A1AE 7
+. 40 0F 85 D8 F9 FF FF 
+
+==== BB 755 (0x3A969FDC) in 13974B, out 77164B, BBs exec'd 0 ====
+	0x3A969FDC:  movl -108(%ebp),%eax
+	0x3A969FDF:  movl -104(%ebp),%edx
+	0x3A969FE2:  movl -124(%ebp),%ecx
+	0x3A969FE5:  movl %eax,0x1BC(%ecx)
+	0x3A969FEB:  movl %edx,0x1C0(%ecx)
+	0x3A969FF1:  movl -20(%ebp),%eax
+	0x3A969FF4:  movl -16(%ebp),%edx
+	0x3A969FF7:  movl %eax,0x1C4(%ecx)
+	0x3A969FFD:  movl 0xFFFFFC00(%ebx),%eax
+	0x3A96A003:  movl %edx,0x1C8(%ecx)
+	0x3A96A009:  notl %eax
+	0x3A96A00B:  andl %eax,0xFFFFFF68(%ebp)
+	0x3A96A011:  testb $0x1, 0xFFFFFF68(%ebp)
+	0x3A96A018:  jnz-32 0x3A96A390
+. 755 3A969FDC 66
+. 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00 
+
+==== BB 756 (0x3A96FEDE) in 14040B, out 77388B, BBs exec'd 0 ====
+	0x3A96FEDE:  movl %eax,20(%esi)
+	0x3A96FEE1:  subl $0xC, %esp
+	0x3A96FEE4:  movl -4(%ebp),%esi
+	0x3A96FEE7:  leave	0x3A96FEE8:  ret
+. 756 3A96FEDE 11
+. 89 46 14 83 EC 0C 8B 75 FC C9 C3 
+
+==== BB 757 (0x3A970246) in 14051B, out 77481B, BBs exec'd 0 ====
+	0x3A970246:  movl -28(%ebp),%eax
+	0x3A970249:  movl 0xFFFFFF18(%ebp),%edx
+	0x3A97024F:  movl 0xFFFFFF24(%ebp),%ecx
+	0x3A970255:  movl %eax,(%edx)
+	0x3A970257:  movl 0xFFFFFF20(%ebp),%eax
+	0x3A97025D:  movl $0x0, (%ecx)
+	0x3A970263:  movl $0x0, (%eax)
+	0x3A970269:  xorl %eax, %eax
+	0x3A97026B:  movl -12(%ebp),%ebx
+	0x3A97026E:  movl -8(%ebp),%esi
+	0x3A970271:  movl -4(%ebp),%edi
+	0x3A970274:  movl %ebp,%esp
+	0x3A970276:  popl %ebp
+	0x3A970277:  ret 4
+. 757 3A970246 52
+. 8B 45 E4 8B 95 18 FF FF FF 8B 8D 24 FF FF FF 89 02 8B 85 20 FF FF FF C7 01 00 00 00 00 C7 00 00 00 00 00 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C2 04 00 
+
+==== BB 758 (0x3A96F3A4) in 14103B, out 77677B, BBs exec'd 0 ====
+	0x3A96F3A4:  movl -56(%ebp),%edx
+	0x3A96F3A7:  subl $0x4, %esp
+	0x3A96F3AA:  testl %edx,%edx
+	0x3A96F3AC:  jnz-32 0x3A96F5F6
+. 758 3A96F3A4 14
+. 8B 55 C8 83 EC 04 85 D2 0F 85 44 02 00 00 
+
+==== BB 759 (0x3A96F3B2) in 14117B, out 77762B, BBs exec'd 0 ====
+	0x3A96F3B2:  movl -32(%ebp),%edx
+	0x3A96F3B5:  testb $0x60, 0x174(%edx)
+	0x3A96F3BC:  jnz-8 0x3A96F3F5
+. 759 3A96F3B2 12
+. 8B 55 E0 F6 82 74 01 00 00 60 75 37 
+
+==== BB 760 (0x3A96F3BE) in 14129B, out 77853B, BBs exec'd 0 ====
+	0x3A96F3BE:  movl -80(%ebp),%ecx
+	0x3A96F3C1:  subl $0x1C, %esp
+	0x3A96F3C4:  leal 31(%esp,,), %eax
+	0x3A96F3C8:  andl $0xFFFFFFF0, %eax
+	0x3A96F3CB:  movl $0x0, (%eax)
+	0x3A96F3D1:  movl $0x0, 8(%eax)
+	0x3A96F3D8:  movl %edx,4(%eax)
+	0x3A96F3DB:  movl %eax,8(%ecx)
+	0x3A96F3DE:  movl %eax,-80(%ebp)
+	0x3A96F3E1:  movzbl 0x174(%edx),%eax
+	0x3A96F3E8:  incl -20(%ebp)
+	0x3A96F3EB:  andb $0x9F, %al
+	0x3A96F3ED:  orb $0x20, %al
+	0x3A96F3EF:  movb %al,0x174(%edx)
+	0x3A96F3F5:  movl -100(%ebp),%edi
+	0x3A96F3F8:  testl %edi,%edi
+	0x3A96F3FA:  jz-8 0x3A96F409
+. 760 3A96F3BE 62
+. 8B 4D B0 83 EC 1C 8D 44 24 1F 83 E0 F0 C7 00 00 00 00 00 C7 40 08 00 00 00 00 89 50 04 89 41 08 89 45 B0 0F B6 82 74 01 00 00 FF 45 EC 24 9F 0C 20 88 82 74 01 00 00 8B 7D 9C 85 FF 74 0D 
+
+==== BB 761 (0x3A96F409) in 14191B, out 78111B, BBs exec'd 0 ====
+	0x3A96F409:  addl $0x8, -116(%ebp)
+	0x3A96F40D:  movl -116(%ebp),%esi
+	0x3A96F410:  movl (%esi),%eax
+	0x3A96F412:  testl %eax,%eax
+	0x3A96F414:  movl %eax,%edx
+	0x3A96F416:  jz-32 0x3A96F523
+. 761 3A96F409 19
+. 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00 
+
+==== BB 762 (0x3A96F425) in 14210B, out 78216B, BBs exec'd 0 ====
+	0x3A96F425:  cmpl $0x7FFFFFFD, %edx
+	0x3A96F42B:  setz %al
+	0x3A96F42E:  cmpl $0x7FFFFFFF, %edx
+	0x3A96F434:  setz %dl
+	0x3A96F437:  orl %edx,%eax
+	0x3A96F439:  testb $0x1, %al
+	0x3A96F43B:  jz-8 0x3A96F409
+. 762 3A96F425 24
+. 81 FA FD FF FF 7F 0F 94 C0 81 FA FF FF FF 7F 0F 94 C2 09 D0 A8 01 74 CC 
+
+==== BB 763 (0x3A96F523) in 14234B, out 78343B, BBs exec'd 0 ====
+	0x3A96F523:  movl -100(%ebp),%eax
+	0x3A96F526:  testl %eax,%eax
+	0x3A96F528:  jz-32 0x3A96F5CD
+. 763 3A96F523 11
+. 8B 45 9C 85 C0 0F 84 9F 00 00 00 
+
+==== BB 764 (0x3A96F5CD) in 14245B, out 78415B, BBs exec'd 0 ====
+	0x3A96F5CD:  movl -76(%ebp),%ecx
+	0x3A96F5D0:  movl (%ecx),%eax
+	0x3A96F5D2:  testl %eax,%eax
+	0x3A96F5D4:  jz-8 0x3A96F5E9
+. 764 3A96F5CD 9
+. 8B 4D B4 8B 01 85 C0 74 13 
+
+==== BB 765 (0x3A96F5D6) in 14254B, out 78497B, BBs exec'd 0 ====
+	0x3A96F5D6:  movl -76(%ebp),%esi
+	0x3A96F5D9:  movl 8(%esi),%esi
+	0x3A96F5DC:  testl %esi,%esi
+	0x3A96F5DE:  movl %esi,-76(%ebp)
+	0x3A96F5E1:  jz-8 0x3A96F605
+. 765 3A96F5D6 13
+. 8B 75 B4 8B 76 08 85 F6 89 75 B4 74 22 
+
+==== BB 766 (0x3A96F5E3) in 14267B, out 78589B, BBs exec'd 0 ====
+	0x3A96F5E3:  movl (%esi),%eax
+	0x3A96F5E5:  testl %eax,%eax
+	0x3A96F5E7:  jnz-8 0x3A96F5D6
+. 766 3A96F5E3 6
+. 8B 06 85 C0 75 ED 
+
+==== BB 767 (0x3A96F5E9) in 14273B, out 78658B, BBs exec'd 0 ====
+	0x3A96F5E9:  movl -76(%ebp),%eax
+	0x3A96F5EC:  testl %eax,%eax
+	0x3A96F5EE:  jnz-32 0x3A96F29B
+. 767 3A96F5E9 11
+. 8B 45 B4 85 C0 0F 85 A7 FC FF FF 
+
+==== BB 768 (0x3A96F2DF) in 14284B, out 78730B, BBs exec'd 0 ====
+	0x3A96F2DF:  movl -96(%ebp),%esi
+	0x3A96F2E2:  movzwl 0x14E(%esi),%eax
+	0x3A96F2E9:  testw %ax,%ax
+	0x3A96F2EC:  jz-8 0x3A96F309
+. 768 3A96F2DF 15
+. 8B 75 A0 0F B7 86 4E 01 00 00 66 85 C0 74 1B 
+
+==== BB 769 (0x3A96F2EE) in 14299B, out 78824B, BBs exec'd 0 ====
+	0x3A96F2EE:  movzwl %ax,%eax
+	0x3A96F2F1:  leal 0x12(,%eax,4), %eax
+	0x3A96F2F8:  andl $0xFFFFC, %eax
+	0x3A96F2FD:  subl %eax,%esp
+	0x3A96F2FF:  leal 31(%esp,,), %eax
+	0x3A96F303:  andl $0xFFFFFFF0, %eax
+	0x3A96F306:  movl %eax,-100(%ebp)
+	0x3A96F309:  movl -96(%ebp),%edx
+	0x3A96F30C:  movl 28(%edx),%eax
+	0x3A96F30F:  testl %eax,%eax
+	0x3A96F311:  jnz-8 0x3A96F32B
+. 769 3A96F2EE 37
+. 0F B7 C0 8D 04 85 12 00 00 00 25 FC FF 0F 00 29 C4 8D 44 24 1F 83 E0 F0 89 45 9C 8B 55 A0 8B 42 1C 85 C0 75 18 
+
+==== BB 770 (0x3A96FECD) in 14336B, out 78984B, BBs exec'd 0 ====
+	0x3A96FECD:  movl %eax,%ecx
+	0x3A96FECF:  andl $0x3, %ecx
+	0x3A96FED2:  movl %ecx,(%esp,,)
+	0x3A96FED5:  movl (%esi),%eax
+	0x3A96FED7:  xorl %ecx, %ecx
+	0x3A96FED9:  call 0x3A96AE40
+. 770 3A96FECD 17
+. 89 C1 83 E1 03 89 0C 24 8B 06 31 C9 E8 62 AF FF FF 
+
+==== BB 771 (0x3A970BE0) in 14353B, out 79085B, BBs exec'd 0 ====
+	0x3A970BE0:  addl $0x8, %esp
+	0x3A970BE3:  movl %edx,%eax
+	0x3A970BE5:  popl %ebx
+	0x3A970BE6:  popl %esi
+	0x3A970BE7:  popl %edi
+	0x3A970BE8:  popl %ebp
+	0x3A970BE9:  ret
+. 771 3A970BE0 10
+. 83 C4 08 89 D0 5B 5E 5F 5D C3 
+
+==== BB 772 (0x3A96AFA7) in 14363B, out 79203B, BBs exec'd 0 ====
+	0x3A96AFA7:  movl %esi,%eax
+	0x3A96AFA9:  leal -12(%ebp), %esp
+	0x3A96AFAC:  popl %ebx
+	0x3A96AFAD:  popl %esi
+	0x3A96AFAE:  popl %edi
+	0x3A96AFAF:  popl %ebp
+	0x3A96AFB0:  ret 12
+. 772 3A96AFA7 12
+. 89 F0 8D 65 F4 5B 5E 5F 5D C2 0C 00 
+
+==== BB 773 (0x3A96F3F5) in 14375B, out 79315B, BBs exec'd 0 ====
+	0x3A96F3F5:  movl -100(%ebp),%edi
+	0x3A96F3F8:  testl %edi,%edi
+	0x3A96F3FA:  jz-8 0x3A96F409
+. 773 3A96F3F5 7
+. 8B 7D 9C 85 FF 74 0D 
+
+==== BB 774 (0x3A96F3FC) in 14382B, out 79387B, BBs exec'd 0 ====
+	0x3A96F3FC:  movl -104(%ebp),%esi
+	0x3A96F3FF:  movl -100(%ebp),%eax
+	0x3A96F402:  movl %edx,(%eax,%esi,4)
+	0x3A96F405:  incl %esi
+	0x3A96F406:  movl %esi,-104(%ebp)
+	0x3A96F409:  addl $0x8, -116(%ebp)
+	0x3A96F40D:  movl -116(%ebp),%esi
+	0x3A96F410:  movl (%esi),%eax
+	0x3A96F412:  testl %eax,%eax
+	0x3A96F414:  movl %eax,%edx
+	0x3A96F416:  jz-32 0x3A96F523
+. 774 3A96F3FC 32
+. 8B 75 98 8B 45 9C 89 14 B0 46 89 75 98 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00 
+
+==== BB 775 (0x3A96F52E) in 14414B, out 79544B, BBs exec'd 0 ====
+	0x3A96F52E:  movl -104(%ebp),%eax
+	0x3A96F531:  movl -100(%ebp),%edx
+	0x3A96F534:  movl $0x0, (%edx,%eax,4)
+	0x3A96F53B:  incl %eax
+	0x3A96F53C:  movl %eax,-104(%ebp)
+	0x3A96F53F:  leal 0x4(,%eax,8), %eax
+	0x3A96F546:  movl %eax,(%esp,,)
+	0x3A96F549:  call 0x3A96581C
+. 775 3A96F52E 32
+. 8B 45 98 8B 55 9C C7 04 82 00 00 00 00 40 89 45 98 8D 04 C5 04 00 00 00 89 04 24 E8 CE 62 FF FF 
+
+==== BB 776 (0x3A96F54E) in 14446B, out 79677B, BBs exec'd 0 ====
+	0x3A96F54E:  movl -96(%ebp),%ecx
+	0x3A96F551:  testl %eax,%eax
+	0x3A96F553:  movl %eax,%edx
+	0x3A96F555:  movl %eax,0x1D4(%ecx)
+	0x3A96F55B:  jz-32 0x3A96FAA9
+. 776 3A96F54E 19
+. 8B 4D A0 85 C0 89 C2 89 81 D4 01 00 00 0F 84 48 05 00 00 
+
+==== BB 777 (0x3A96F561) in 14465B, out 79775B, BBs exec'd 0 ====
+	0x3A96F561:  movl -104(%ebp),%ecx
+	0x3A96F564:  leal 4(%edx), %edi
+	0x3A96F567:  movl -96(%ebp),%eax
+	0x3A96F56A:  movl -100(%ebp),%esi
+	0x3A96F56D:  shll $0x2, %ecx
+	0x3A96F570:  cmpl $0x7, %ecx
+	0x3A96F573:  movl %eax,(%edx)
+	0x3A96F575:  jbe-8 0x3A96F58D
+. 777 3A96F561 22
+. 8B 4D 98 8D 7A 04 8B 45 A0 8B 75 9C C1 E1 02 83 F9 07 89 02 76 16 
+
+==== BB 778 (0x3A96F577) in 14487B, out 79898B, BBs exec'd 0 ====
+	0x3A96F577:  testl $0x4, %edi
+	0x3A96F57D:  jz-8 0x3A96F58D
+. 778 3A96F577 8
+. F7 C7 04 00 00 00 74 0E 
+
+==== BB 779 (0x3A96F57F) in 14495B, out 79959B, BBs exec'd 0 ====
+	0x3A96F57F:  movl (%esi),%eax
+	0x3A96F581:  leal 8(%edx), %edi
+	0x3A96F584:  addl $0x4, %esi
+	0x3A96F587:  subl $0x4, %ecx
+	0x3A96F58A:  movl %eax,4(%edx)
+	0x3A96F58D:  cld
+	0x3A96F58E:  shrl $0x2, %ecx
+	0x3A96F591:  rep movsl
+. 779 3A96F57F 20
+. 8B 06 8D 7A 08 83 C6 04 83 E9 04 89 42 04 FC C1 E9 02 F3 A5 
+
+==== BB 780 (0x3A96F591) in 14515B, out 80128B, BBs exec'd 0 ====
+	0x3A96F591:  rep movsl
+. 780 3A96F591 2
+. F3 A5 
+
+==== BB 781 (0x3A96F593) in 14517B, out 80223B, BBs exec'd 0 ====
+	0x3A96F593:  movl -96(%ebp),%edx
+	0x3A96F596:  movl -104(%ebp),%eax
+	0x3A96F599:  movl 0x1D4(%edx),%esi
+	0x3A96F59F:  shll $0x2, %eax
+	0x3A96F5A2:  cmpl $0x7, %eax
+	0x3A96F5A5:  movl %eax,%edx
+	0x3A96F5A7:  leal (%esi,%eax,1), %ecx
+	0x3A96F5AA:  leal 4(%ecx), %edi
+	0x3A96F5AD:  jbe-8 0x3A96F5C5
+. 781 3A96F593 28
+. 8B 55 A0 8B 45 98 8B B2 D4 01 00 00 C1 E0 02 83 F8 07 89 C2 8D 0C 06 8D 79 04 76 16 
+
+==== BB 782 (0x3A96F5AF) in 14545B, out 80360B, BBs exec'd 0 ====
+	0x3A96F5AF:  testl $0x4, %edi
+	0x3A96F5B5:  jz-8 0x3A96F5C5
+. 782 3A96F5AF 8
+. F7 C7 04 00 00 00 74 0E 
+
+==== BB 783 (0x3A96F5B7) in 14553B, out 80421B, BBs exec'd 0 ====
+	0x3A96F5B7:  movl (%esi),%eax
+	0x3A96F5B9:  leal 8(%ecx), %edi
+	0x3A96F5BC:  addl $0x4, %esi
+	0x3A96F5BF:  subl $0x4, %edx
+	0x3A96F5C2:  movl %eax,4(%ecx)
+	0x3A96F5C5:  cld
+	0x3A96F5C6:  movl %edx,%ecx
+	0x3A96F5C8:  shrl $0x2, %ecx
+	0x3A96F5CB:  rep movsl
+. 783 3A96F5B7 22
+. 8B 06 8D 79 08 83 C6 04 83 EA 04 89 41 04 FC 89 D1 C1 E9 02 F3 A5 
+
+==== BB 784 (0x3A96F5CB) in 14575B, out 80594B, BBs exec'd 0 ====
+	0x3A96F5CB:  rep movsl
+. 784 3A96F5CB 2
+. F3 A5 
+
+==== BB 785 (0x3A96AFE3) in 14577B, out 80689B, BBs exec'd 0 ====
+	0x3A96AFE3:  movl %edi,%edx
+	0x3A96AFE5:  movl %esi,%eax
+	0x3A96AFE7:  call 0x3A968DE0
+. 785 3A96AFE3 9
+. 89 FA 89 F0 E8 F4 DD FF FF 
+
+==== BB 786 add_name_to_object(0x3A968DE0) in 14586B, out 80754B, BBs exec'd 0 ====
+	0x3A968DE0:  pushl %ebp
+	0x3A968DE1:  movl %esp,%ebp
+	0x3A968DE3:  pushl %edi
+	0x3A968DE4:  movl %edx,%edi
+	0x3A968DE6:  pushl %esi
+	0x3A968DE7:  pushl %ebx
+	0x3A968DE8:  subl $0x14, %esp
+	0x3A968DEB:  movl 20(%eax),%esi
+	0x3A968DEE:  call 0x3A97592B
+. 786 3A968DE0 19
+. 55 89 E5 57 89 D7 56 53 83 EC 14 8B 70 14 E8 38 CB 00 00 
+
+==== BB 787 (0x3A968DF3) in 14605B, out 80913B, BBs exec'd 0 ====
+	0x3A968DF3:  addl $0xF879, %ebx
+	0x3A968DF9:  movl $0x0, -16(%ebp)
+	0x3A968E00:  testl %esi,%esi
+	0x3A968E02:  jz-8 0x3A968E2C
+. 787 3A968DF3 17
+. 81 C3 79 F8 00 00 C7 45 F0 00 00 00 00 85 F6 74 28 
+
+==== BB 788 (0x3A968E04) in 14622B, out 81009B, BBs exec'd 0 ====
+	0x3A968E04:  leal 0x0(%esi), %esi
+	0x3A968E0A:  leal 0x0(%edi), %edi
+	0x3A968E10:  movl (%esi),%eax
+	0x3A968E12:  movl %edi,(%esp,,)
+	0x3A968E15:  movl %eax,4(%esp,,)
+	0x3A968E19:  call 0x3A975280
+. 788 3A968E04 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 62 C4 00 00 
+
+==== BB 789 (0x3A968E1E) in 14648B, out 81107B, BBs exec'd 0 ====
+	0x3A968E1E:  testl %eax,%eax
+	0x3A968E20:  jz-8 0x3A968E77
+. 789 3A968E1E 4
+. 85 C0 74 55 
+
+==== BB 790 (0x3A968E22) in 14652B, out 81166B, BBs exec'd 0 ====
+	0x3A968E22:  movl %esi,-16(%ebp)
+	0x3A968E25:  movl 4(%esi),%esi
+	0x3A968E28:  testl %esi,%esi
+	0x3A968E2A:  jnz-8 0x3A968E10
+. 790 3A968E22 10
+. 89 75 F0 8B 76 04 85 F6 75 E4 
+
+==== BB 791 (0x3A968E2C) in 14662B, out 81251B, BBs exec'd 0 ====
+	0x3A968E2C:  movl %edi,(%esp,,)
+	0x3A968E2F:  call 0x3A9752D0
+. 791 3A968E2C 8
+. 89 3C 24 E8 9C C4 00 00 
+
+==== BB 792 (0x3A968E34) in 14670B, out 81306B, BBs exec'd 0 ====
+	0x3A968E34:  leal 1(%eax), %edx
+	0x3A968E37:  addl $0xD, %eax
+	0x3A968E3A:  movl %edx,-20(%ebp)
+	0x3A968E3D:  movl %eax,(%esp,,)
+	0x3A968E40:  call 0x3A96581C
+. 792 3A968E34 17
+. 8D 50 01 83 C0 0D 89 55 EC 89 04 24 E8 D7 C9 FF FF 
+
+==== BB 793 (0x3A968E45) in 14687B, out 81398B, BBs exec'd 0 ====
+	0x3A968E45:  testl %eax,%eax
+	0x3A968E47:  movl %eax,%esi
+	0x3A968E49:  jz-8 0x3A968E7F
+. 793 3A968E45 6
+. 85 C0 89 C6 74 34 
+
+==== BB 794 (0x3A968E4B) in 14693B, out 81464B, BBs exec'd 0 ====
+	0x3A968E4B:  movl -20(%ebp),%edx
+	0x3A968E4E:  leal 12(%eax), %eax
+	0x3A968E51:  movl %edi,4(%esp,,)
+	0x3A968E55:  movl %eax,(%esp,,)
+	0x3A968E58:  movl %edx,8(%esp,,)
+	0x3A968E5C:  call 0x3A975870
+. 794 3A968E4B 22
+. 8B 55 EC 8D 40 0C 89 7C 24 04 89 04 24 89 54 24 08 E8 0F CA 00 00 
+
+==== BB 795 (0x3A968E61) in 14715B, out 81568B, BBs exec'd 0 ====
+	0x3A968E61:  movl %eax,(%esi)
+	0x3A968E63:  movl -16(%ebp),%eax
+	0x3A968E66:  movl $0x0, 4(%esi)
+	0x3A968E6D:  movl $0x0, 8(%esi)
+	0x3A968E74:  movl %esi,4(%eax)
+	0x3A968E77:  leal -12(%ebp), %esp
+	0x3A968E7A:  popl %ebx
+	0x3A968E7B:  popl %esi
+	0x3A968E7C:  popl %edi
+	0x3A968E7D:  popl %ebp
+	0x3A968E7E:  ret
+. 795 3A968E61 30
+. 89 06 8B 45 F0 C7 46 04 00 00 00 00 C7 46 08 00 00 00 00 89 70 04 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 796 (0x3A96AFEC) in 14745B, out 81738B, BBs exec'd 0 ====
+	0x3A96AFEC:  orb $0x1, 0x175(%esi)
+	0x3A96AFF3:  jmp-8 0x3A96AFA7
+. 796 3A96AFEC 9
+. 80 8E 75 01 00 00 01 EB B2 
+
+==== BB 797 (0x3A96F313) in 14754B, out 81795B, BBs exec'd 0 ====
+	0x3A96F313:  movl 0xE8(%edx),%eax
+	0x3A96F319:  testl %eax,%eax
+	0x3A96F31B:  jnz-8 0x3A96F32B
+. 797 3A96F313 10
+. 8B 82 E8 00 00 00 85 C0 75 0E 
+
+==== BB 798 (0x3A96F31D) in 14764B, out 81870B, BBs exec'd 0 ====
+	0x3A96F31D:  movl 0xE0(%edx),%eax
+	0x3A96F323:  testl %eax,%eax
+	0x3A96F325:  jz-32 0x3A96F523
+. 798 3A96F31D 14
+. 8B 82 E0 00 00 00 85 C0 0F 84 F8 01 00 00 
+
+==== BB 799 (0x3A96F605) in 14778B, out 81945B, BBs exec'd 0 ====
+	0x3A96F605:  movl 0x148(%ebx),%eax
+	0x3A96F60B:  movl -88(%ebp),%edi
+	0x3A96F60E:  testl %eax,%eax
+	0x3A96F610:  setz %al
+	0x3A96F613:  testl %edi,%edi
+	0x3A96F615:  setnz %dl
+	0x3A96F618:  andl %edx,%eax
+	0x3A96F61A:  testb $0x1, %al
+	0x3A96F61C:  jz-8 0x3A96F627
+. 799 3A96F605 25
+. 8B 83 48 01 00 00 8B 7D A8 85 C0 0F 94 C0 85 FF 0F 95 C2 21 D0 A8 01 74 09 
+
+==== BB 800 (0x3A96F61E) in 14803B, out 82099B, BBs exec'd 0 ====
+	0x3A96F61E:  movl -88(%ebp),%eax
+	0x3A96F621:  movl %eax,0x148(%ebx)
+	0x3A96F627:  movl -64(%ebp),%ecx
+	0x3A96F62A:  movl 0x1D4(%ecx),%edx
+	0x3A96F630:  testl %edx,%edx
+	0x3A96F632:  jz-8 0x3A96F645
+. 800 3A96F61E 22
+. 8B 45 A8 89 83 48 01 00 00 8B 4D C0 8B 91 D4 01 00 00 85 D2 74 11 
+
+==== BB 801 (0x3A96F645) in 14825B, out 82216B, BBs exec'd 0 ====
+	0x3A96F645:  movl -20(%ebp),%eax
+	0x3A96F648:  leal 0x4(,%eax,8), %eax
+	0x3A96F64F:  movl %eax,(%esp,,)
+	0x3A96F652:  call 0x3A96581C
+. 801 3A96F645 18
+. 8B 45 EC 8D 04 C5 04 00 00 00 89 04 24 E8 C5 61 FF FF 
+
+==== BB 802 (0x3A96F657) in 14843B, out 82297B, BBs exec'd 0 ====
+	0x3A96F657:  movl -64(%ebp),%esi
+	0x3A96F65A:  testl %eax,%eax
+	0x3A96F65C:  movl %eax,%ecx
+	0x3A96F65E:  movl %eax,0x1D4(%esi)
+	0x3A96F664:  jz-32 0x3A96FC0E
+. 802 3A96F657 19
+. 8B 75 C0 85 C0 89 C1 89 86 D4 01 00 00 0F 84 A4 05 00 00 
+
+==== BB 803 (0x3A96F66A) in 14862B, out 82395B, BBs exec'd 0 ====
+	0x3A96F66A:  movl -20(%ebp),%eax
+	0x3A96F66D:  movl -64(%ebp),%edx
+	0x3A96F670:  leal 4(%ecx,%eax,4), %ecx
+	0x3A96F674:  movl %ecx,0x150(%edx)
+	0x3A96F67A:  movl -16(%ebp),%ecx
+	0x3A96F67D:  movl %eax,0x154(%edx)
+	0x3A96F683:  movl $0x0, -20(%ebp)
+	0x3A96F68A:  testl %ecx,%ecx
+	0x3A96F68C:  movl %ecx,-76(%ebp)
+	0x3A96F68F:  jz-8 0x3A96F6CF
+. 803 3A96F66A 39
+. 8B 45 EC 8B 55 C0 8D 4C 81 04 89 8A 50 01 00 00 8B 4D F0 89 82 54 01 00 00 C7 45 EC 00 00 00 00 85 C9 89 4D B4 74 3E 
+
+==== BB 804 (0x3A96F691) in 14901B, out 82555B, BBs exec'd 0 ====
+	0x3A96F691:  movl 8(%ebp),%esi
+	0x3A96F694:  testl %esi,%esi
+	0x3A96F696:  jnz-32 0x3A96FAC4
+. 804 3A96F691 11
+. 8B 75 08 85 F6 0F 85 28 04 00 00 
+
+==== BB 805 (0x3A96F69C) in 14912B, out 82627B, BBs exec'd 0 ====
+	0x3A96F69C:  movl -76(%ebp),%edx
+	0x3A96F69F:  movl 4(%edx),%ecx
+	0x3A96F6A2:  movl -64(%ebp),%esi
+	0x3A96F6A5:  movl -20(%ebp),%eax
+	0x3A96F6A8:  movl 0x150(%esi),%edx
+	0x3A96F6AE:  movl %ecx,(%edx,%eax,4)
+	0x3A96F6B1:  incl %eax
+	0x3A96F6B2:  movl %eax,-20(%ebp)
+	0x3A96F6B5:  movl -76(%ebp),%eax
+	0x3A96F6B8:  movl 4(%eax),%ecx
+	0x3A96F6BB:  andb $0xFFFFFF9F, 0x174(%ecx)
+	0x3A96F6C2:  movl -76(%ebp),%edx
+	0x3A96F6C5:  movl 8(%edx),%edx
+	0x3A96F6C8:  movl %edx,-76(%ebp)
+	0x3A96F6CB:  testl %edx,%edx
+	0x3A96F6CD:  jnz-8 0x3A96F691
+. 805 3A96F69C 51
+. 8B 55 B4 8B 4A 04 8B 75 C0 8B 45 EC 8B 96 50 01 00 00 89 0C 82 40 89 45 EC 8B 45 B4 8B 48 04 80 A1 74 01 00 00 9F 8B 55 B4 8B 52 08 89 55 B4 85 D2 75 C2 
+
+==== BB 806 (0x3A96F6CF) in 14963B, out 82849B, BBs exec'd 0 ====
+	0x3A96F6CF:  testb $0x2, 0xFFFFFC15(%ebx)
+	0x3A96F6D6:  jnz-32 0x3A96FCFC
+. 806 3A96F6CF 13
+. F6 83 15 FC FF FF 02 0F 85 20 06 00 00 
+
+==== BB 807 (0x3A96F6DC) in 14976B, out 82919B, BBs exec'd 0 ====
+	0x3A96F6DC:  movl -64(%ebp),%edx
+	0x3A96F6DF:  movl -20(%ebp),%eax
+	0x3A96F6E2:  movl 0x150(%edx),%edx
+	0x3A96F6E8:  movl %eax,0xFFFFFF78(%ebp)
+	0x3A96F6EE:  movl %edx,0xFFFFFF74(%ebp)
+	0x3A96F6F4:  movl $0x0, -84(%ebp)
+	0x3A96F6FB:  movl -64(%ebp),%edx
+	0x3A96F6FE:  movl 0x1DC(%edx),%ecx
+	0x3A96F704:  cmpl %ecx,-84(%ebp)
+	0x3A96F707:  jnb-8 0x3A96F752
+. 807 3A96F6DC 45
+. 8B 55 C0 8B 45 EC 8B 92 50 01 00 00 89 85 78 FF FF FF 89 95 74 FF FF FF C7 45 AC 00 00 00 00 8B 55 C0 8B 8A DC 01 00 00 39 4D AC 73 49 
+
+==== BB 808 (0x3A96F752) in 15021B, out 83090B, BBs exec'd 0 ====
+	0x3A96F752:  movl 0xFFFFFF78(%ebp),%edx
+	0x3A96F758:  movl -64(%ebp),%eax
+	0x3A96F75B:  movl 0xFFFFFF74(%ebp),%esi
+	0x3A96F761:  shll $0x2, %edx
+	0x3A96F764:  cmpl $0x7, %edx
+	0x3A96F767:  movl 0x1D4(%eax),%edi
+	0x3A96F76D:  jbe-8 0x3A96F784
+. 808 3A96F752 29
+. 8B 95 78 FF FF FF 8B 45 C0 8B B5 74 FF FF FF C1 E2 02 83 FA 07 8B B8 D4 01 00 00 76 15 
+
+==== BB 809 (0x3A96F76F) in 15050B, out 83215B, BBs exec'd 0 ====
+	0x3A96F76F:  testl $0x4, %edi
+	0x3A96F775:  jz-8 0x3A96F784
+. 809 3A96F76F 8
+. F7 C7 04 00 00 00 74 0D 
+
+==== BB 810 (0x3A96F784) in 15058B, out 83276B, BBs exec'd 0 ====
+	0x3A96F784:  cld
+	0x3A96F785:  movl %edx,%ecx
+	0x3A96F787:  shrl $0x2, %ecx
+	0x3A96F78A:  rep movsl
+. 810 3A96F784 8
+. FC 89 D1 C1 E9 02 F3 A5 
+
+==== BB 811 (0x3A96F78A) in 15066B, out 83399B, BBs exec'd 0 ====
+	0x3A96F78A:  rep movsl
+. 811 3A96F78A 2
+. F3 A5 
+
+==== BB 812 (0x3A96F78C) in 15068B, out 83494B, BBs exec'd 0 ====
+	0x3A96F78C:  movl $0x1, -84(%ebp)
+	0x3A96F793:  movl -20(%ebp),%edx
+	0x3A96F796:  cmpl %edx,-84(%ebp)
+	0x3A96F799:  movl %edx,0xFFFFFF78(%ebp)
+	0x3A96F79F:  jnb-32 0x3A96FC92
+. 812 3A96F78C 25
+. C7 45 AC 01 00 00 00 8B 55 EC 39 55 AC 89 95 78 FF FF FF 0F 83 ED 04 00 00 
+
+==== BB 813 (0x3A96F7A5) in 15093B, out 83597B, BBs exec'd 0 ====
+	0x3A96F7A5:  movl -64(%ebp),%esi
+	0x3A96F7A8:  movl 0x1D4(%esi),%ecx
+	0x3A96F7AE:  movl -64(%ebp),%edx
+	0x3A96F7B1:  movl -84(%ebp),%esi
+	0x3A96F7B4:  movl 0x150(%edx),%eax
+	0x3A96F7BA:  movl $0x1,%edx
+	0x3A96F7BF:  movl (%eax,%esi,4),%eax
+	0x3A96F7C2:  cmpl %eax,4(%ecx)
+	0x3A96F7C5:  movl %eax,0xFFFFFF7C(%ebp)
+	0x3A96F7CB:  jz-8 0x3A96F7DE
+. 813 3A96F7A5 40
+. 8B 75 C0 8B 8E D4 01 00 00 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11 
+
+==== BB 814 (0x3A96F7DE) in 15133B, out 83760B, BBs exec'd 0 ====
+	0x3A96F7DE:  leal 1(%edx), %edi
+	0x3A96F7E1:  cmpl 0xFFFFFF78(%ebp),%edi
+	0x3A96F7E7:  jnb-32 0x3A96F89D
+. 814 3A96F7DE 15
+. 8D 7A 01 3B BD 78 FF FF FF 0F 83 B0 00 00 00 
+
+==== BB 815 (0x3A96F7ED) in 15148B, out 83838B, BBs exec'd 0 ====
+	0x3A96F7ED:  shll $0x2, %edx
+	0x3A96F7F0:  movl %edx,0xFFFFFF6C(%ebp)
+	0x3A96F7F6:  negl %edx
+	0x3A96F7F8:  movl %edx,0xFFFFFF68(%ebp)
+	0x3A96F7FE:  movl %esi,%esi
+	0x3A96F800:  leal 0x0(,%edi,4), %esi
+	0x3A96F807:  movl (%ecx,%esi,1),%eax
+	0x3A96F80A:  movl 0x1D4(%eax),%edx
+	0x3A96F810:  testl %edx,%edx
+	0x3A96F812:  jz-8 0x3A96F890
+. 815 3A96F7ED 39
+. C1 E2 02 89 95 6C FF FF FF F7 DA 89 95 68 FF FF FF 89 F6 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C 
+
+==== BB 816 (0x3A96F814) in 15187B, out 84001B, BBs exec'd 0 ====
+	0x3A96F814:  movl (%edx),%eax
+	0x3A96F816:  testl %eax,%eax
+	0x3A96F818:  jz-8 0x3A96F890
+. 816 3A96F814 6
+. 8B 02 85 C0 74 76 
+
+==== BB 817 (0x3A96F81A) in 15193B, out 84070B, BBs exec'd 0 ====
+	0x3A96F81A:  addl 0xFFFFFF68(%ebp),%esi
+	0x3A96F820:  movl %esi,0xFFFFFF70(%ebp)
+	0x3A96F826:  jmp-8 0x3A96F82E
+. 817 3A96F81A 14
+. 03 B5 68 FF FF FF 89 B5 70 FF FF FF EB 06 
+
+==== BB 818 (0x3A96F82E) in 15207B, out 84137B, BBs exec'd 0 ====
+	0x3A96F82E:  addl $0x4, %edx
+	0x3A96F831:  cmpl 0xFFFFFF7C(%ebp),%eax
+	0x3A96F837:  jnz-8 0x3A96F828
+. 818 3A96F82E 11
+. 83 C2 04 3B 85 7C FF FF FF 75 EF 
+
+==== BB 819 (0x3A96F828) in 15218B, out 84218B, BBs exec'd 0 ====
+	0x3A96F828:  movl (%edx),%eax
+	0x3A96F82A:  testl %eax,%eax
+	0x3A96F82C:  jz-8 0x3A96F890
+. 819 3A96F828 6
+. 8B 02 85 C0 74 62 
+
+==== BB 820 (0x3A96F890) in 15224B, out 84287B, BBs exec'd 0 ====
+	0x3A96F890:  incl %edi
+	0x3A96F891:  cmpl 0xFFFFFF78(%ebp),%edi
+	0x3A96F897:  jb-32 0x3A96F800
+. 820 3A96F890 13
+. 47 3B BD 78 FF FF FF 0F 82 63 FF FF FF 
+
+==== BB 821 (0x3A96F800) in 15237B, out 84363B, BBs exec'd 0 ====
+	0x3A96F800:  leal 0x0(,%edi,4), %esi
+	0x3A96F807:  movl (%ecx,%esi,1),%eax
+	0x3A96F80A:  movl 0x1D4(%eax),%edx
+	0x3A96F810:  testl %edx,%edx
+	0x3A96F812:  jz-8 0x3A96F890
+. 821 3A96F800 20
+. 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C 
+
+==== BB 822 (0x3A96F89D) in 15257B, out 84471B, BBs exec'd 0 ====
+	0x3A96F89D:  incl -84(%ebp)
+	0x3A96F8A0:  movl 0xFFFFFF78(%ebp),%eax
+	0x3A96F8A6:  cmpl %eax,-84(%ebp)
+	0x3A96F8A9:  jb-32 0x3A96F7AE
+. 822 3A96F89D 18
+. FF 45 AC 8B 85 78 FF FF FF 39 45 AC 0F 82 FF FE FF FF 
+
+==== BB 823 (0x3A96F7AE) in 15275B, out 84563B, BBs exec'd 0 ====
+	0x3A96F7AE:  movl -64(%ebp),%edx
+	0x3A96F7B1:  movl -84(%ebp),%esi
+	0x3A96F7B4:  movl 0x150(%edx),%eax
+	0x3A96F7BA:  movl $0x1,%edx
+	0x3A96F7BF:  movl (%eax,%esi,4),%eax
+	0x3A96F7C2:  cmpl %eax,4(%ecx)
+	0x3A96F7C5:  movl %eax,0xFFFFFF7C(%ebp)
+	0x3A96F7CB:  jz-8 0x3A96F7DE
+. 823 3A96F7AE 31
+. 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11 
+
+==== BB 824 (0x3A96F7CD) in 15306B, out 84703B, BBs exec'd 0 ====
+	0x3A96F7CD:  leal 4(%ecx), %eax
+	0x3A96F7D0:  addl $0x4, %eax
+	0x3A96F7D3:  movl 0xFFFFFF7C(%ebp),%esi
+	0x3A96F7D9:  incl %edx
+	0x3A96F7DA:  cmpl %esi,(%eax)
+	0x3A96F7DC:  jnz-8 0x3A96F7D0
+. 824 3A96F7CD 17
+. 8D 41 04 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2 
+
+==== BB 825 (0x3A96F7D0) in 15323B, out 84809B, BBs exec'd 0 ====
+	0x3A96F7D0:  addl $0x4, %eax
+	0x3A96F7D3:  movl 0xFFFFFF7C(%ebp),%esi
+	0x3A96F7D9:  incl %edx
+	0x3A96F7DA:  cmpl %esi,(%eax)
+	0x3A96F7DC:  jnz-8 0x3A96F7D0
+. 825 3A96F7D0 14
+. 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2 
+
+==== BB 826 (0x3A96F8AF) in 15337B, out 84908B, BBs exec'd 0 ====
+	0x3A96F8AF:  jmp 0x3A96FC9B
+. 826 3A96F8AF 5
+. E9 E7 03 00 00 
+
+==== BB 827 (0x3A96FC9B) in 15342B, out 84936B, BBs exec'd 0 ====
+	0x3A96FC9B:  movl 0xFFFFFF78(%ebp),%esi
+	0x3A96FCA1:  movl $0x0, (%ecx,%esi,4)
+	0x3A96FCA8:  movl -92(%ebp),%esi
+	0x3A96FCAB:  testl %esi,%esi
+	0x3A96FCAD:  jnz-32 0x3A96FE5C
+. 827 3A96FC9B 24
+. 8B B5 78 FF FF FF C7 04 B1 00 00 00 00 8B 75 A4 85 F6 0F 85 A9 01 00 00 
+
+==== BB 828 (0x3A96FCB3) in 15366B, out 85040B, BBs exec'd 0 ====
+	0x3A96FCB3:  leal -12(%ebp), %esp
+	0x3A96FCB6:  popl %ebx
+	0x3A96FCB7:  popl %esi
+	0x3A96FCB8:  popl %edi
+	0x3A96FCB9:  popl %ebp
+	0x3A96FCBA:  ret 8
+. 828 3A96FCB3 10
+. 8D 65 F4 5B 5E 5F 5D C2 08 00 
+
+==== BB 829 (0x3A967090) in 15376B, out 85142B, BBs exec'd 0 ====
+	0x3A967090:  movl 0xFFFFF994(%ebx),%ecx
+	0x3A967096:  subl $0x8, %esp
+	0x3A967099:  movl 0x154(%ecx),%edx
+	0x3A96709F:  movl %edx,0xFFFFFF04(%ebp)
+	0x3A9670A5:  testl %edx,%edx
+	0x3A9670A7:  jz-8 0x3A9670E4
+. 829 3A967090 25
+. 8B 8B 94 F9 FF FF 83 EC 08 8B 91 54 01 00 00 89 95 04 FF FF FF 85 D2 74 3B 
+
+==== BB 830 (0x3A9670A9) in 15401B, out 85262B, BBs exec'd 0 ====
+	0x3A9670A9:  decl 0xFFFFFF04(%ebp)
+	0x3A9670AF:  movl 0x150(%ecx),%eax
+	0x3A9670B5:  movl 0xFFFFFF04(%ebp),%ecx
+	0x3A9670BB:  movl (%eax,%ecx,4),%eax
+	0x3A9670BE:  orb $0x10, 0x174(%eax)
+	0x3A9670C5:  movl 0xFFFFF994(%ebx),%ecx
+	0x3A9670CB:  movl 0xFFFFFF04(%ebp),%esi
+	0x3A9670D1:  movl 0x150(%ecx),%eax
+	0x3A9670D7:  movl (%eax,%esi,4),%eax
+	0x3A9670DA:  incl 0x170(%eax)
+	0x3A9670E0:  testl %esi,%esi
+	0x3A9670E2:  jnz-8 0x3A9670A9
+. 830 3A9670A9 59
+. FF 8D 04 FF FF FF 8B 81 50 01 00 00 8B 8D 04 FF FF FF 8B 04 88 80 88 74 01 00 00 10 8B 8B 94 F9 FF FF 8B B5 04 FF FF FF 8B 81 50 01 00 00 8B 04 B0 FF 80 70 01 00 00 85 F6 75 C5 
+
+==== BB 831 (0x3A9670E4) in 15460B, out 85479B, BBs exec'd 0 ====
+	0x3A9670E4:  movl 0xFFFFF9F0(%ebx),%eax
+	0x3A9670EA:  movl 0xFFFFF9F4(%ebx),%edx
+	0x3A9670F0:  movl %eax,12(%edx)
+	0x3A9670F3:  movl 0xFFFFF9F0(%ebx),%eax
+	0x3A9670F9:  testl %eax,%eax
+	0x3A9670FB:  jz-8 0x3A967100
+. 831 3A9670E4 25
+. 8B 83 F0 F9 FF FF 8B 93 F4 F9 FF FF 89 42 0C 8B 83 F0 F9 FF FF 85 C0 74 03 
+
+==== BB 832 (0x3A9670FD) in 15485B, out 85593B, BBs exec'd 0 ====
+	0x3A9670FD:  movl %edx,16(%eax)
+	0x3A967100:  cmpl $0x1, 0xFFFFFB54(%ebx)
+	0x3A967107:  jbe-32 0x3A967B5F
+. 832 3A9670FD 16
+. 89 50 10 83 BB 54 FB FF FF 01 0F 86 52 0A 00 00 
+
+==== BB 833 (0x3A96710D) in 15501B, out 85678B, BBs exec'd 0 ====
+	0x3A96710D:  movl 0xFFFFF994(%ebx),%ecx
+	0x3A967113:  movl $0x1,%eax
+	0x3A967118:  movl 0xFFFFFED0(%ebp),%edx
+	0x3A96711E:  movl %eax,0xFFFFFF04(%ebp)
+	0x3A967124:  movl 0x150(%ecx),%eax
+	0x3A96712A:  addl $0x50, %edx
+	0x3A96712D:  cmpl %edx,4(%eax)
+	0x3A967130:  jz-8 0x3A967143
+. 833 3A96710D 37
+. 8B 8B 94 F9 FF FF B8 01 00 00 00 8B 95 D0 FE FF FF 89 85 04 FF FF FF 8B 81 50 01 00 00 83 C2 50 39 50 04 74 11 
+
+==== BB 834 (0x3A967132) in 15538B, out 85820B, BBs exec'd 0 ====
+	0x3A967132:  incl 0xFFFFFF04(%ebp)
+	0x3A967138:  movl 0xFFFFFF04(%ebp),%esi
+	0x3A96713E:  cmpl %edx,(%eax,%esi,4)
+	0x3A967141:  jnz-8 0x3A967132
+. 834 3A967132 17
+. FF 85 04 FF FF FF 8B B5 04 FF FF FF 39 14 B0 75 EF 
+
+==== BB 835 (0x3A967143) in 15555B, out 85920B, BBs exec'd 0 ====
+	0x3A967143:  movl 0x150(%ecx),%eax
+	0x3A967149:  movl 0xFFFFFF04(%ebp),%esi
+	0x3A96714F:  movl 0xFFFFFF50(%ebp),%edi
+	0x3A967155:  movl %eax,0xFFFFFEA8(%ebp)
+	0x3A96715B:  movl -4(%eax,%esi,4),%edx
+	0x3A96715F:  testl %edi,%edi
+	0x3A967161:  movl %edx,0xFFFFF9F4(%ebx)
+	0x3A967167:  jnz-32 0x3A968345
+. 835 3A967143 42
+. 8B 81 50 01 00 00 8B B5 04 FF FF FF 8B BD 50 FF FF FF 89 85 A8 FE FF FF 8B 54 B0 FC 85 FF 89 93 F4 F9 FF FF 0F 85 D8 11 00 00 
+
+==== BB 836 (0x3A96716D) in 15597B, out 86073B, BBs exec'd 0 ====
+	0x3A96716D:  movl %esi,%eax
+	0x3A96716F:  incl %eax
+	0x3A967170:  xorl %esi, %esi
+	0x3A967172:  cmpl 0x154(%ecx),%eax
+	0x3A967178:  jnb-8 0x3A96718A
+. 836 3A96716D 13
+. 89 F0 40 31 F6 3B 81 54 01 00 00 73 10 
+
+==== BB 837 (0x3A96718A) in 15610B, out 86167B, BBs exec'd 0 ====
+	0x3A96718A:  movl %esi,0xFFFFF9F0(%ebx)
+	0x3A967190:  movl 0xFFFFFEC4(%ebp),%eax
+	0x3A967196:  testl %eax,%eax
+	0x3A967198:  jz-32 0x3A96833A
+. 837 3A96718A 20
+. 89 B3 F0 F9 FF FF 8B 85 C4 FE FF FF 85 C0 0F 84 9C 11 00 00 
+
+==== BB 838 (0x3A96833A) in 15630B, out 86261B, BBs exec'd 0 ====
+	0x3A96833A:  movl 0xFFFFF9F4(%ebx),%edx
+	0x3A968340:  jmp 0x3A9671B3
+. 838 3A96833A 11
+. 8B 93 F4 F9 FF FF E9 6E EE FF FF 
+
+==== BB 839 (0x3A9671B3) in 15641B, out 86308B, BBs exec'd 0 ====
+	0x3A9671B3:  movl 0xFFFFFED0(%ebp),%ecx
+	0x3A9671B9:  addl $0x50, %ecx
+	0x3A9671BC:  movl %ecx,12(%edx)
+	0x3A9671BF:  movl 0xFFFFF9F0(%ebx),%eax
+	0x3A9671C5:  testl %eax,%eax
+	0x3A9671C7:  jz-8 0x3A9671CC
+. 839 3A9671B3 22
+. 8B 8D D0 FE FF FF 83 C1 50 89 4A 0C 8B 83 F0 F9 FF FF 85 C0 74 03 
+
+==== BB 840 (0x3A9671CC) in 15663B, out 86422B, BBs exec'd 0 ====
+	0x3A9671CC:  xorl %eax, %eax
+	0x3A9671CE:  testl %edi,%edi
+	0x3A9671D0:  setz %al
+	0x3A9671D3:  movl %eax,0xFFFFFF24(%ebp)
+	0x3A9671D9:  xorl %eax, %eax
+	0x3A9671DB:  cmpl $0x3, %edi
+	0x3A9671DE:  setz %al
+	0x3A9671E1:  leal 0xFFFFFF24(%ebp), %ecx
+	0x3A9671E7:  leal 0xFFFF0244(%ebx), %edx
+	0x3A9671ED:  movl %eax,0xFFFFFF28(%ebp)
+	0x3A9671F3:  leal 0xFFFED9B4(%ebx), %eax
+	0x3A9671F9:  call 0x3A970160
+. 840 3A9671CC 50
+. 31 C0 85 FF 0F 94 C0 89 85 24 FF FF FF 31 C0 83 FF 03 0F 94 C0 8D 8D 24 FF FF FF 8D 93 44 02 FF FF 89 85 28 FF FF FF 8D 83 B4 D9 FE FF E8 62 8F 00 00 
+
+==== BB 841 _dl_receive_error(0x3A970160) in 15713B, out 86619B, BBs exec'd 0 ====
+	0x3A970160:  pushl %ebp
+	0x3A970161:  movl %esp,%ebp
+	0x3A970163:  subl $0x28, %esp
+	0x3A970166:  movl %ebx,-12(%ebp)
+	0x3A970169:  movl %esi,-8(%ebp)
+	0x3A97016C:  call 0x3A97592B
+. 841 3A970160 17
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 E8 BA 57 00 00 
+
+==== BB 842 (0x3A970171) in 15730B, out 86730B, BBs exec'd 0 ====
+	0x3A970171:  addl $0x84FB, %ebx
+	0x3A970177:  movl %edi,-4(%ebp)
+	0x3A97017A:  movl %edx,-20(%ebp)
+	0x3A97017D:  movl %ecx,-24(%ebp)
+	0x3A970180:  movl %eax,-16(%ebp)
+	0x3A970183:  call*l 0xFFFFF9E0(%ebx)
+. 842 3A970171 24
+. 81 C3 FB 84 00 00 89 7D FC 89 55 EC 89 4D E8 89 45 F0 FF 93 E0 F9 FF FF 
+
+==== BB 843 (0x3A970189) in 15754B, out 86847B, BBs exec'd 0 ====
+	0x3A970189:  movl (%eax),%edi
+	0x3A97018B:  movl %eax,%esi
+	0x3A97018D:  movl 0xE8(%ebx),%eax
+	0x3A970193:  movl $0x0, (%esi)
+	0x3A970199:  movl %eax,-28(%ebp)
+	0x3A97019C:  movl -16(%ebp),%eax
+	0x3A97019F:  movl %eax,0xE8(%ebx)
+	0x3A9701A5:  movl -24(%ebp),%eax
+	0x3A9701A8:  movl %eax,(%esp,,)
+	0x3A9701AB:  call*l -20(%ebp)
+. 843 3A970189 37
+. 8B 38 89 C6 8B 83 E8 00 00 00 C7 06 00 00 00 00 89 45 E4 8B 45 F0 89 83 E8 00 00 00 8B 45 E8 89 04 24 FF 55 EC 
+
+==== BB 844 version_check_doit(0x3A9688B0) in 15791B, out 86990B, BBs exec'd 0 ====
+	0x3A9688B0:  pushl %ebp
+	0x3A9688B1:  movl $0x1,%edx
+	0x3A9688B6:  movl %esp,%ebp
+	0x3A9688B8:  subl $0xC, %esp
+	0x3A9688BB:  movl %esi,-4(%ebp)
+	0x3A9688BE:  movl 8(%ebp),%esi
+	0x3A9688C1:  movl %ebx,-8(%ebp)
+	0x3A9688C4:  call 0x3A97592B
+. 844 3A9688B0 25
+. 55 BA 01 00 00 00 89 E5 83 EC 0C 89 75 FC 8B 75 08 89 5D F8 E8 62 D0 00 00 
+
+==== BB 845 (0x3A9688C9) in 15816B, out 87126B, BBs exec'd 0 ====
+	0x3A9688C9:  addl $0xFDA3, %ebx
+	0x3A9688CF:  movl 4(%esi),%ecx
+	0x3A9688D2:  movl 0xFFFFF994(%ebx),%eax
+	0x3A9688D8:  call 0x3A971860
+. 845 3A9688C9 20
+. 81 C3 A3 FD 00 00 8B 4E 04 8B 83 94 F9 FF FF E8 83 8F 00 00 
+
+==== BB 846 _dl_check_all_versions(0x3A971860) in 15836B, out 87223B, BBs exec'd 0 ====
+	0x3A971860:  pushl %ebp
+	0x3A971861:  movl %esp,%ebp
+	0x3A971863:  pushl %edi
+	0x3A971864:  xorl %edi, %edi
+	0x3A971866:  pushl %esi
+	0x3A971867:  movl %eax,%esi
+	0x3A971869:  pushl %ebx
+	0x3A97186A:  subl $0xC, %esp
+	0x3A97186D:  call 0x3A97592B
+. 846 3A971860 18
+. 55 89 E5 57 31 FF 56 89 C6 53 83 EC 0C E8 B9 40 00 00 
+
+==== BB 847 (0x3A971872) in 15854B, out 87380B, BBs exec'd 0 ====
+	0x3A971872:  addl $0x6DFA, %ebx
+	0x3A971878:  movl %edx,-16(%ebp)
+	0x3A97187B:  testl %eax,%eax
+	0x3A97187D:  movl %ecx,-20(%ebp)
+	0x3A971880:  jnz-8 0x3A971890
+. 847 3A971872 16
+. 81 C3 FA 6D 00 00 89 55 F0 85 C0 89 4D EC 75 0E 
+
+==== BB 848 (0x3A971890) in 15870B, out 87484B, BBs exec'd 0 ====
+	0x3A971890:  testb $0x2, 0x175(%esi)
+	0x3A971897:  movl $0x0, -24(%ebp)
+	0x3A97189E:  jnz-8 0x3A971884
+. 848 3A971890 16
+. F6 86 75 01 00 00 02 C7 45 E8 00 00 00 00 75 E4 
+
+==== BB 849 (0x3A9718A0) in 15886B, out 87572B, BBs exec'd 0 ====
+	0x3A9718A0:  movl -20(%ebp),%ecx
+	0x3A9718A3:  movl %esi,%eax
+	0x3A9718A5:  movl -16(%ebp),%edx
+	0x3A9718A8:  call 0x3A9714A0
+. 849 3A9718A0 13
+. 8B 4D EC 89 F0 8B 55 F0 E8 F3 FB FF FF 
+
+==== BB 850 _dl_check_map_versions(0x3A9714A0) in 15899B, out 87656B, BBs exec'd 0 ====
+	0x3A9714A0:  pushl %ebp
+	0x3A9714A1:  movl %esp,%ebp
+	0x3A9714A3:  pushl %edi
+	0x3A9714A4:  pushl %esi
+	0x3A9714A5:  pushl %ebx
+	0x3A9714A6:  subl $0x84, %esp
+	0x3A9714AC:  movl %eax,-80(%ebp)
+	0x3A9714AF:  movl 44(%eax),%eax
+	0x3A9714B2:  call 0x3A97592B
+. 850 3A9714A0 23
+. 55 89 E5 57 56 53 81 EC 84 00 00 00 89 45 B0 8B 40 2C E8 74 44 00 00 
+
+==== BB 851 (0x3A9714B7) in 15922B, out 87818B, BBs exec'd 0 ====
+	0x3A9714B7:  addl $0x71B5, %ebx
+	0x3A9714BD:  movl %edx,-84(%ebp)
+	0x3A9714C0:  xorl %edx, %edx
+	0x3A9714C2:  testl %eax,%eax
+	0x3A9714C4:  movl %ecx,-88(%ebp)
+	0x3A9714C7:  movl $0x0, -92(%ebp)
+	0x3A9714CE:  movl $0x0, -108(%ebp)
+	0x3A9714D5:  movl $0x0, -112(%ebp)
+	0x3A9714DC:  jz-32 0x3A971630
+. 851 3A9714B7 43
+. 81 C3 B5 71 00 00 89 55 AC 31 D2 85 C0 89 4D A8 C7 45 A4 00 00 00 00 C7 45 94 00 00 00 00 C7 45 90 00 00 00 00 0F 84 4E 01 00 00 
+
+==== BB 852 (0x3A9714E2) in 15965B, out 87981B, BBs exec'd 0 ====
+	0x3A9714E2:  movl 4(%eax),%eax
+	0x3A9714E5:  movl -80(%ebp),%ecx
+	0x3A9714E8:  movl %eax,-96(%ebp)
+	0x3A9714EB:  movl -80(%ebp),%eax
+	0x3A9714EE:  movl 0xA4(%ecx),%ecx
+	0x3A9714F4:  movl 0xAC(%eax),%eax
+	0x3A9714FA:  testl %ecx,%ecx
+	0x3A9714FC:  movl %ecx,-100(%ebp)
+	0x3A9714FF:  movl %eax,-104(%ebp)
+	0x3A971502:  jz-32 0x3A9715F2
+. 852 3A9714E2 38
+. 8B 40 04 8B 4D B0 89 45 A0 8B 45 B0 8B 89 A4 00 00 00 8B 80 AC 00 00 00 85 C9 89 4D 9C 89 45 98 0F 84 EA 00 00 00 
+
+==== BB 853 (0x3A971508) in 16003B, out 88138B, BBs exec'd 0 ====
+	0x3A971508:  movl 4(%ecx),%edx
+	0x3A97150B:  movl -80(%ebp),%ecx
+	0x3A97150E:  movl (%ecx),%ecx
+	0x3A971510:  addl %ecx,%edx
+	0x3A971512:  cmpw $0x1, (%edx)
+	0x3A971516:  movl %edx,-116(%ebp)
+	0x3A971519:  jnz-32 0x3A971696
+. 853 3A971508 23
+. 8B 51 04 8B 4D B0 8B 09 01 CA 66 83 3A 01 89 55 8C 0F 85 77 01 00 00 
+
+==== BB 854 (0x3A97151F) in 16026B, out 88251B, BBs exec'd 0 ====
+	0x3A97151F:  nop
+	0x3A971520:  movl -116(%ebp),%edx
+	0x3A971523:  movl -96(%ebp),%edi
+	0x3A971526:  movl 0xFFFFF994(%ebx),%esi
+	0x3A97152C:  movl 4(%edx),%eax
+	0x3A97152F:  addl %eax,%edi
+	0x3A971531:  testl %esi,%esi
+	0x3A971533:  jz-8 0x3A971557
+. 854 3A97151F 22
+. 90 8B 55 8C 8B 7D A0 8B B3 94 F9 FF FF 8B 42 04 01 C7 85 F6 74 22 
+
+==== BB 855 (0x3A971535) in 16048B, out 88378B, BBs exec'd 0 ====
+	0x3A971535:  leal 0(%esi,,), %esi
+	0x3A971539:  leal 0(%edi,,), %edi
+	0x3A971540:  movl %esi,%edx
+	0x3A971542:  movl %edi,%eax
+	0x3A971544:  call 0x3A970B80
+. 855 3A971535 20
+. 8D 74 26 00 8D BC 27 00 00 00 00 89 F2 89 F8 E8 37 F6 FF FF 
+
+==== BB 856 (0x3A971549) in 16068B, out 88463B, BBs exec'd 0 ====
+	0x3A971549:  movl %esi,-120(%ebp)
+	0x3A97154C:  testl %eax,%eax
+	0x3A97154E:  jnz-8 0x3A97156F
+. 856 3A971549 7
+. 89 75 88 85 C0 75 1F 
+
+==== BB 857 (0x3A971550) in 16075B, out 88538B, BBs exec'd 0 ====
+	0x3A971550:  movl 12(%esi),%esi
+	0x3A971553:  testl %esi,%esi
+	0x3A971555:  jnz-8 0x3A971540
+. 857 3A971550 7
+. 8B 76 0C 85 F6 75 E9 
+
+==== BB 858 (0x3A971540) in 16082B, out 88610B, BBs exec'd 0 ====
+	0x3A971540:  movl %esi,%edx
+	0x3A971542:  movl %edi,%eax
+	0x3A971544:  call 0x3A970B80
+. 858 3A971540 9
+. 89 F2 89 F8 E8 37 F6 FF FF 
+
+==== BB 859 (0x3A97156F) in 16091B, out 88675B, BBs exec'd 0 ====
+	0x3A97156F:  movl -88(%ebp),%eax
+	0x3A971572:  testl %eax,%eax
+	0x3A971574:  jnz-32 0x3A971681
+. 859 3A97156F 11
+. 8B 45 A8 85 C0 0F 85 07 01 00 00 
+
+==== BB 860 (0x3A97157A) in 16102B, out 88747B, BBs exec'd 0 ====
+	0x3A97157A:  movl -116(%ebp),%esi
+	0x3A97157D:  movl 8(%esi),%eax
+	0x3A971580:  addl %eax,%esi
+	0x3A971582:  movl -80(%ebp),%edx
+	0x3A971585:  movl 8(%esi),%edi
+	0x3A971588:  movl -96(%ebp),%ecx
+	0x3A97158B:  movl 4(%edx),%eax
+	0x3A97158E:  addl %edi,%ecx
+	0x3A971590:  movl %eax,%edi
+	0x3A971592:  cmpb $0x0, (%eax)
+	0x3A971595:  jnz-8 0x3A97159F
+. 860 3A97157A 29
+. 8B 75 8C 8B 46 08 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08 
+
+==== BB 861 (0x3A971597) in 16131B, out 88901B, BBs exec'd 0 ====
+	0x3A971597:  movl 0x38(%ebx),%eax
+	0x3A97159D:  movl (%eax),%edi
+	0x3A97159F:  movzwl 4(%esi),%eax
+	0x3A9715A3:  andl $0x2, %eax
+	0x3A9715A6:  movl %eax,8(%esp,,)
+	0x3A9715AA:  movl -84(%ebp),%eax
+	0x3A9715AD:  movl %eax,4(%esp,,)
+	0x3A9715B1:  movl -120(%ebp),%edx
+	0x3A9715B4:  movl %edi,%eax
+	0x3A9715B6:  movl %edx,(%esp,,)
+	0x3A9715B9:  movl (%esi),%edx
+	0x3A9715BB:  call 0x3A971120
+. 861 3A971597 41
+. 8B 83 38 00 00 00 8B 38 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF 
+
+==== BB 862 match_symbol(0x3A971120) in 16172B, out 89064B, BBs exec'd 0 ====
+	0x3A971120:  pushl %ebp
+	0x3A971121:  movl %esp,%ebp
+	0x3A971123:  pushl %edi
+	0x3A971124:  movl %edx,%edi
+	0x3A971126:  pushl %esi
+	0x3A971127:  xorl %esi, %esi
+	0x3A971129:  pushl %ebx
+	0x3A97112A:  subl $0xB4, %esp
+	0x3A971130:  movl 8(%ebp),%edx
+	0x3A971133:  movl %eax,0xFFFFFF60(%ebp)
+	0x3A971139:  call 0x3A97592B
+. 862 3A971120 30
+. 55 89 E5 57 89 D7 56 31 F6 53 81 EC B4 00 00 00 8B 55 08 89 85 60 FF FF FF E8 ED 47 00 00 
+
+==== BB 863 (0x3A97113E) in 16202B, out 89253B, BBs exec'd 0 ====
+	0x3A97113E:  addl $0x752E, %ebx
+	0x3A971144:  movl %ecx,0xFFFFFF5C(%ebp)
+	0x3A97114A:  movl 44(%edx),%eax
+	0x3A97114D:  testb $0x10, 0xFFFFFC14(%ebx)
+	0x3A971154:  movl 4(%eax),%eax
+	0x3A971157:  movl %esi,0xFFFFFF54(%ebp)
+	0x3A97115D:  movl %eax,0xFFFFFF58(%ebp)
+	0x3A971163:  jnz-32 0x3A97132A
+. 863 3A97113E 43
+. 81 C3 2E 75 00 00 89 8D 5C FF FF FF 8B 42 2C F6 83 14 FC FF FF 10 8B 40 04 89 B5 54 FF FF FF 89 85 58 FF FF FF 0F 85 C1 01 00 00 
+
+==== BB 864 (0x3A971169) in 16245B, out 89410B, BBs exec'd 0 ====
+	0x3A971169:  movl 8(%ebp),%edx
+	0x3A97116C:  movl 0xAC(%edx),%eax
+	0x3A971172:  testl %eax,%eax
+	0x3A971174:  jz-32 0x3A9712B9
+. 864 3A971169 17
+. 8B 55 08 8B 82 AC 00 00 00 85 C0 0F 84 3F 01 00 00 
+
+==== BB 865 (0x3A97117A) in 16262B, out 89498B, BBs exec'd 0 ====
+	0x3A97117A:  movl 8(%ebp),%edx
+	0x3A97117D:  movl (%edx),%esi
+	0x3A97117F:  movl 4(%eax),%edx
+	0x3A971182:  addl %edx,%esi
+	0x3A971184:  cmpw $0x1, (%esi)
+	0x3A971188:  jnz-8 0x3A9711A6
+. 865 3A97117A 16
+. 8B 55 08 8B 32 8B 50 04 01 D6 66 83 3E 01 75 1C 
+
+==== BB 866 (0x3A97118A) in 16278B, out 89601B, BBs exec'd 0 ====
+	0x3A97118A:  cmpl 8(%esi),%edi
+	0x3A97118D:  jz-32 0x3A971280
+. 866 3A97118A 9
+. 3B 7E 08 0F 84 ED 00 00 00 
+
+==== BB 867 (0x3A971193) in 16287B, out 89666B, BBs exec'd 0 ====
+	0x3A971193:  movl 16(%esi),%eax
+	0x3A971196:  testl %eax,%eax
+	0x3A971198:  jz-32 0x3A971365
+. 867 3A971193 11
+. 8B 46 10 85 C0 0F 84 C7 01 00 00 
+
+==== BB 868 (0x3A97119E) in 16298B, out 89738B, BBs exec'd 0 ====
+	0x3A97119E:  addl %eax,%esi
+	0x3A9711A0:  cmpw $0x1, (%esi)
+	0x3A9711A4:  jz-8 0x3A97118A
+. 868 3A97119E 8
+. 01 C6 66 83 3E 01 74 E4 
+
+==== BB 869 (0x3A971280) in 16306B, out 89809B, BBs exec'd 0 ====
+	0x3A971280:  movl 12(%esi),%edx
+	0x3A971283:  movl %esi,%eax
+	0x3A971285:  addl %edx,%eax
+	0x3A971287:  movl (%eax),%ecx
+	0x3A971289:  movl 0xFFFFFF58(%ebp),%edx
+	0x3A97128F:  addl %ecx,%edx
+	0x3A971291:  movl %edx,4(%esp,,)
+	0x3A971295:  movl 0xFFFFFF5C(%ebp),%eax
+	0x3A97129B:  movl %eax,(%esp,,)
+	0x3A97129E:  call 0x3A975280
+. 869 3A971280 35
+. 8B 56 0C 89 F0 01 D0 8B 08 8B 95 58 FF FF FF 01 CA 89 54 24 04 8B 85 5C FF FF FF 89 04 24 E8 DD 3F 00 00 
+
+==== BB 870 (0x3A9712A3) in 16341B, out 89949B, BBs exec'd 0 ====
+	0x3A9712A3:  xorl %edx, %edx
+	0x3A9712A5:  testl %eax,%eax
+	0x3A9712A7:  jnz-32 0x3A971193
+. 870 3A9712A3 10
+. 31 D2 85 C0 0F 85 E6 FE FF FF 
+
+==== BB 871 (0x3A9712AD) in 16351B, out 90022B, BBs exec'd 0 ====
+	0x3A9712AD:  leal -12(%ebp), %esp
+	0x3A9712B0:  movl %edx,%eax
+	0x3A9712B2:  popl %ebx
+	0x3A9712B3:  popl %esi
+	0x3A9712B4:  popl %edi
+	0x3A9712B5:  popl %ebp
+	0x3A9712B6:  ret 12
+. 871 3A9712AD 12
+. 8D 65 F4 89 D0 5B 5E 5F 5D C2 0C 00 
+
+==== BB 872 (0x3A9715C0) in 16363B, out 90134B, BBs exec'd 0 ====
+	0x3A9715C0:  orl %eax,-92(%ebp)
+	0x3A9715C3:  subl $0xC, %esp
+	0x3A9715C6:  movzwl 6(%esi),%eax
+	0x3A9715CA:  andl $0x7FFF, %eax
+	0x3A9715CF:  cmpl -108(%ebp),%eax
+	0x3A9715D2:  jbe-8 0x3A9715D7
+. 872 3A9715C0 20
+. 09 45 A4 83 EC 0C 0F B7 46 06 25 FF 7F 00 00 3B 45 94 76 03 
+
+==== BB 873 (0x3A9715D4) in 16383B, out 90254B, BBs exec'd 0 ====
+	0x3A9715D4:  movl %eax,-108(%ebp)
+	0x3A9715D7:  movl 12(%esi),%eax
+	0x3A9715DA:  testl %eax,%eax
+	0x3A9715DC:  jnz-8 0x3A971580
+. 873 3A9715D4 10
+. 89 45 94 8B 46 0C 85 C0 75 A2 
+
+==== BB 874 (0x3A9715DE) in 16393B, out 90342B, BBs exec'd 0 ====
+	0x3A9715DE:  movl -116(%ebp),%ecx
+	0x3A9715E1:  movl 12(%ecx),%eax
+	0x3A9715E4:  testl %eax,%eax
+	0x3A9715E6:  jz-8 0x3A9715F2
+. 874 3A9715DE 10
+. 8B 4D 8C 8B 41 0C 85 C0 74 0A 
+
+==== BB 875 (0x3A9715F2) in 16403B, out 90427B, BBs exec'd 0 ====
+	0x3A9715F2:  movl -104(%ebp),%esi
+	0x3A9715F5:  testl %esi,%esi
+	0x3A9715F7:  jz-8 0x3A971622
+. 875 3A9715F2 7
+. 8B 75 98 85 F6 74 29 
+
+==== BB 876 (0x3A971622) in 16410B, out 90499B, BBs exec'd 0 ====
+	0x3A971622:  movl -108(%ebp),%eax
+	0x3A971625:  testl %eax,%eax
+	0x3A971627:  jnz-32 0x3A97174A
+. 876 3A971622 11
+. 8B 45 94 85 C0 0F 85 1D 01 00 00 
+
+==== BB 877 (0x3A97174A) in 16421B, out 90571B, BBs exec'd 0 ====
+	0x3A97174A:  movl $0x10,%edi
+	0x3A97174F:  movl %edi,4(%esp,,)
+	0x3A971753:  movl -108(%ebp),%esi
+	0x3A971756:  incl %esi
+	0x3A971757:  movl %esi,(%esp,,)
+	0x3A97175A:  call 0x3A96582C
+. 877 3A97174A 21
+. BF 10 00 00 00 89 7C 24 04 8B 75 94 46 89 34 24 E8 CD 40 FF FF 
+
+==== BB 878 (0x3A97175F) in 16442B, out 90674B, BBs exec'd 0 ====
+	0x3A97175F:  movl $0xC, -112(%ebp)
+	0x3A971766:  movl -80(%ebp),%edx
+	0x3A971769:  testl %eax,%eax
+	0x3A97176B:  movl %eax,%edi
+	0x3A97176D:  leal 0xFFFFEB7C(%ebx), %ecx
+	0x3A971773:  movl %eax,0x17C(%edx)
+	0x3A971779:  jz-8 0x3A97172A
+. 878 3A97175F 28
+. C7 45 90 0C 00 00 00 8B 55 B0 85 C0 89 C7 8D 8B 7C EB FF FF 89 82 7C 01 00 00 74 AF 
+
+==== BB 879 (0x3A97177B) in 16470B, out 90803B, BBs exec'd 0 ====
+	0x3A97177B:  movl %esi,0x178(%edx)
+	0x3A971781:  movl 0xDC(%edx),%eax
+	0x3A971787:  movl -100(%ebp),%esi
+	0x3A97178A:  movl 4(%eax),%eax
+	0x3A97178D:  testl %esi,%esi
+	0x3A97178F:  movl %eax,0x18C(%edx)
+	0x3A971795:  jz-8 0x3A9717FA
+. 879 3A97177B 28
+. 89 B2 78 01 00 00 8B 82 DC 00 00 00 8B 75 9C 8B 40 04 85 F6 89 82 8C 01 00 00 74 63 
+
+==== BB 880 (0x3A971797) in 16498B, out 90933B, BBs exec'd 0 ====
+	0x3A971797:  movl -100(%ebp),%ecx
+	0x3A97179A:  movl (%edx),%eax
+	0x3A97179C:  movl 4(%ecx),%ecx
+	0x3A97179F:  addl %eax,%ecx
+	0x3A9717A1:  movl %ecx,-124(%ebp)
+	0x3A9717A4:  movl -124(%ebp),%esi
+	0x3A9717A7:  movl 8(%esi),%ecx
+	0x3A9717AA:  addl %ecx,%esi
+	0x3A9717AC:  jmp-8 0x3A9717B2
+. 880 3A971797 23
+. 8B 4D 9C 8B 02 8B 49 04 01 C1 89 4D 84 8B 75 84 8B 4E 08 01 CE EB 04 
+
+==== BB 881 (0x3A9717B2) in 16521B, out 91049B, BBs exec'd 0 ====
+	0x3A9717B2:  movzwl 6(%esi),%edx
+	0x3A9717B6:  movl (%esi),%eax
+	0x3A9717B8:  movl %edx,%ecx
+	0x3A9717BA:  andl $0x7FFF, %ecx
+	0x3A9717C0:  andl $0x8000, %edx
+	0x3A9717C6:  shll $0x4, %ecx
+	0x3A9717C9:  movl %eax,4(%edi,%ecx,1)
+	0x3A9717CD:  movl -96(%ebp),%eax
+	0x3A9717D0:  movl %edx,8(%edi,%ecx,1)
+	0x3A9717D4:  movl 8(%esi),%edx
+	0x3A9717D7:  addl %edx,%eax
+	0x3A9717D9:  movl %eax,(%edi,%ecx,1)
+	0x3A9717DC:  movl -124(%ebp),%edx
+	0x3A9717DF:  movl -96(%ebp),%eax
+	0x3A9717E2:  addl 4(%edx),%eax
+	0x3A9717E5:  movl %eax,12(%edi,%ecx,1)
+	0x3A9717E9:  movl 12(%esi),%eax
+	0x3A9717EC:  testl %eax,%eax
+	0x3A9717EE:  jnz-8 0x3A9717B0
+. 881 3A9717B2 62
+. 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0 
+
+==== BB 882 (0x3A9717F0) in 16583B, out 91297B, BBs exec'd 0 ====
+	0x3A9717F0:  movl -124(%ebp),%ecx
+	0x3A9717F3:  movl 12(%ecx),%eax
+	0x3A9717F6:  testl %eax,%eax
+	0x3A9717F8:  jnz-8 0x3A97179F
+. 882 3A9717F0 10
+. 8B 4D 84 8B 41 0C 85 C0 75 A5 
+
+==== BB 883 (0x3A9717FA) in 16593B, out 91382B, BBs exec'd 0 ====
+	0x3A9717FA:  movl -104(%ebp),%esi
+	0x3A9717FD:  testl %esi,%esi
+	0x3A9717FF:  jz-32 0x3A97162D
+. 883 3A9717FA 11
+. 8B 75 98 85 F6 0F 84 28 FE FF FF 
+
+==== BB 884 (0x3A97162D) in 16604B, out 91454B, BBs exec'd 0 ====
+	0x3A97162D:  movl -92(%ebp),%edx
+	0x3A971630:  leal -12(%ebp), %esp
+	0x3A971633:  movl %edx,%eax
+	0x3A971635:  popl %ebx
+	0x3A971636:  popl %esi
+	0x3A971637:  popl %edi
+	0x3A971638:  popl %ebp
+	0x3A971639:  ret
+. 884 3A97162D 13
+. 8B 55 A4 8D 65 F4 89 D0 5B 5E 5F 5D C3 
+
+==== BB 885 (0x3A9718AD) in 16617B, out 91576B, BBs exec'd 0 ====
+	0x3A9718AD:  testl %eax,%eax
+	0x3A9718AF:  jz-8 0x3A971884
+. 885 3A9718AD 4
+. 85 C0 74 D3 
+
+==== BB 886 (0x3A971884) in 16621B, out 91635B, BBs exec'd 0 ====
+	0x3A971884:  movl -24(%ebp),%edx
+	0x3A971887:  movl 12(%esi),%esi
+	0x3A97188A:  orl %edx,%edi
+	0x3A97188C:  testl %esi,%esi
+	0x3A97188E:  jz-8 0x3A9718D0
+. 886 3A971884 12
+. 8B 55 E8 8B 76 0C 09 D7 85 F6 74 40 
+
+==== BB 887 (0x3A97159F) in 16633B, out 91735B, BBs exec'd 0 ====
+	0x3A97159F:  movzwl 4(%esi),%eax
+	0x3A9715A3:  andl $0x2, %eax
+	0x3A9715A6:  movl %eax,8(%esp,,)
+	0x3A9715AA:  movl -84(%ebp),%eax
+	0x3A9715AD:  movl %eax,4(%esp,,)
+	0x3A9715B1:  movl -120(%ebp),%edx
+	0x3A9715B4:  movl %edi,%eax
+	0x3A9715B6:  movl %edx,(%esp,,)
+	0x3A9715B9:  movl (%esi),%edx
+	0x3A9715BB:  call 0x3A971120
+. 887 3A97159F 33
+. 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF 
+
+==== BB 888 (0x3A971580) in 16666B, out 91878B, BBs exec'd 0 ====
+	0x3A971580:  addl %eax,%esi
+	0x3A971582:  movl -80(%ebp),%edx
+	0x3A971585:  movl 8(%esi),%edi
+	0x3A971588:  movl -96(%ebp),%ecx
+	0x3A97158B:  movl 4(%edx),%eax
+	0x3A97158E:  addl %edi,%ecx
+	0x3A971590:  movl %eax,%edi
+	0x3A971592:  cmpb $0x0, (%eax)
+	0x3A971595:  jnz-8 0x3A97159F
+. 888 3A971580 23
+. 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08 
+
+==== BB 889 (0x3A9715D7) in 16689B, out 92016B, BBs exec'd 0 ====
+	0x3A9715D7:  movl 12(%esi),%eax
+	0x3A9715DA:  testl %eax,%eax
+	0x3A9715DC:  jnz-8 0x3A971580
+. 889 3A9715D7 7
+. 8B 46 0C 85 C0 75 A2 
+
+==== BB 890 (0x3A9717B0) in 16696B, out 92088B, BBs exec'd 0 ====
+	0x3A9717B0:  addl %eax,%esi
+	0x3A9717B2:  movzwl 6(%esi),%edx
+	0x3A9717B6:  movl (%esi),%eax
+	0x3A9717B8:  movl %edx,%ecx
+	0x3A9717BA:  andl $0x7FFF, %ecx
+	0x3A9717C0:  andl $0x8000, %edx
+	0x3A9717C6:  shll $0x4, %ecx
+	0x3A9717C9:  movl %eax,4(%edi,%ecx,1)
+	0x3A9717CD:  movl -96(%ebp),%eax
+	0x3A9717D0:  movl %edx,8(%edi,%ecx,1)
+	0x3A9717D4:  movl 8(%esi),%edx
+	0x3A9717D7:  addl %edx,%eax
+	0x3A9717D9:  movl %eax,(%edi,%ecx,1)
+	0x3A9717DC:  movl -124(%ebp),%edx
+	0x3A9717DF:  movl -96(%ebp),%eax
+	0x3A9717E2:  addl 4(%edx),%eax
+	0x3A9717E5:  movl %eax,12(%edi,%ecx,1)
+	0x3A9717E9:  movl 12(%esi),%eax
+	0x3A9717EC:  testl %eax,%eax
+	0x3A9717EE:  jnz-8 0x3A9717B0
+. 890 3A9717B0 64
+. 01 C6 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0 
+
+==== BB 891 (0x3A970BC0) in 16760B, out 92346B, BBs exec'd 0 ====
+	0x3A970BC0:  movl (%esi),%eax
+	0x3A970BC2:  movl %edi,(%esp,,)
+	0x3A970BC5:  movl %eax,4(%esp,,)
+	0x3A970BC9:  call 0x3A975280
+. 891 3A970BC0 14
+. 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00 
+
+==== BB 892 (0x3A9715F9) in 16774B, out 92424B, BBs exec'd 0 ====
+	0x3A9715F9:  movl -104(%ebp),%ecx
+	0x3A9715FC:  movl -80(%ebp),%eax
+	0x3A9715FF:  movl 4(%ecx),%edx
+	0x3A971602:  movl (%eax),%ecx
+	0x3A971604:  addl %ecx,%edx
+	0x3A971606:  jmp-8 0x3A97160A
+. 892 3A9715F9 15
+. 8B 4D 98 8B 45 B0 8B 51 04 8B 08 01 CA EB 02 
+
+==== BB 893 (0x3A97160A) in 16789B, out 92514B, BBs exec'd 0 ====
+	0x3A97160A:  movzwl 4(%edx),%eax
+	0x3A97160E:  andl $0x7FFF, %eax
+	0x3A971613:  cmpl -108(%ebp),%eax
+	0x3A971616:  jbe-8 0x3A97161B
+. 893 3A97160A 14
+. 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03 
+
+==== BB 894 (0x3A97161B) in 16803B, out 92603B, BBs exec'd 0 ====
+	0x3A97161B:  movl 16(%edx),%eax
+	0x3A97161E:  testl %eax,%eax
+	0x3A971620:  jnz-8 0x3A971608
+. 894 3A97161B 7
+. 8B 42 10 85 C0 75 E6 
+
+==== BB 895 (0x3A971608) in 16810B, out 92675B, BBs exec'd 0 ====
+	0x3A971608:  addl %eax,%edx
+	0x3A97160A:  movzwl 4(%edx),%eax
+	0x3A97160E:  andl $0x7FFF, %eax
+	0x3A971613:  cmpl -108(%ebp),%eax
+	0x3A971616:  jbe-8 0x3A97161B
+. 895 3A971608 16
+. 01 C2 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03 
+
+==== BB 896 (0x3A971805) in 16826B, out 92774B, BBs exec'd 0 ====
+	0x3A971805:  movl -104(%ebp),%eax
+	0x3A971808:  movl -80(%ebp),%edx
+	0x3A97180B:  movl 4(%eax),%esi
+	0x3A97180E:  movl (%edx),%ecx
+	0x3A971810:  addl %ecx,%esi
+	0x3A971812:  jmp-8 0x3A971816
+. 896 3A971805 15
+. 8B 45 98 8B 55 B0 8B 70 04 8B 0A 01 CE EB 02 
+
+==== BB 897 (0x3A971816) in 16841B, out 92864B, BBs exec'd 0 ====
+	0x3A971816:  movl 12(%esi),%eax
+	0x3A971819:  movl %esi,%edi
+	0x3A97181B:  addl %eax,%edi
+	0x3A97181D:  testb $0x1, 2(%esi)
+	0x3A971821:  jnz-8 0x3A97184D
+. 897 3A971816 13
+. 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A 
+
+==== BB 898 (0x3A97184D) in 16854B, out 92964B, BBs exec'd 0 ====
+	0x3A97184D:  movl 16(%esi),%eax
+	0x3A971850:  testl %eax,%eax
+	0x3A971852:  jnz-8 0x3A971814
+. 898 3A97184D 7
+. 8B 46 10 85 C0 75 C0 
+
+==== BB 899 (0x3A971814) in 16861B, out 93036B, BBs exec'd 0 ====
+	0x3A971814:  addl %eax,%esi
+	0x3A971816:  movl 12(%esi),%eax
+	0x3A971819:  movl %esi,%edi
+	0x3A97181B:  addl %eax,%edi
+	0x3A97181D:  testb $0x1, 2(%esi)
+	0x3A971821:  jnz-8 0x3A97184D
+. 899 3A971814 15
+. 01 C6 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A 
+
+==== BB 900 (0x3A971823) in 16876B, out 93146B, BBs exec'd 0 ====
+	0x3A971823:  movzwl 4(%esi),%eax
+	0x3A971827:  movl -80(%ebp),%edx
+	0x3A97182A:  movl 0x17C(%edx),%ecx
+	0x3A971830:  movl 8(%esi),%edx
+	0x3A971833:  andl $0x7FFF, %eax
+	0x3A971838:  shll $0x4, %eax
+	0x3A97183B:  movl %edx,4(%ecx,%eax,1)
+	0x3A97183F:  movl -96(%ebp),%edx
+	0x3A971842:  addl (%edi),%edx
+	0x3A971844:  xorl %edi, %edi
+	0x3A971846:  movl %edi,12(%ecx,%eax,1)
+	0x3A97184A:  movl %edx,(%ecx,%eax,1)
+	0x3A97184D:  movl 16(%esi),%eax
+	0x3A971850:  testl %eax,%eax
+	0x3A971852:  jnz-8 0x3A971814
+. 900 3A971823 49
+. 0F B7 46 04 8B 55 B0 8B 8A 7C 01 00 00 8B 56 08 25 FF 7F 00 00 C1 E0 04 89 54 01 04 8B 55 A0 03 17 31 FF 89 7C 01 0C 89 14 01 8B 46 10 85 C0 75 C0 
+
+==== BB 901 (0x3A971854) in 16925B, out 93357B, BBs exec'd 0 ====
+	0x3A971854:  jmp 0x3A97162D
+. 901 3A971854 5
+. E9 D4 FD FF FF 
+
+==== BB 902 (0x3A971618) in 16930B, out 93385B, BBs exec'd 0 ====
+	0x3A971618:  movl %eax,-108(%ebp)
+	0x3A97161B:  movl 16(%edx),%eax
+	0x3A97161E:  testl %eax,%eax
+	0x3A971620:  jnz-8 0x3A971608
+. 902 3A971618 10
+. 89 45 94 8B 42 10 85 C0 75 E6 
+
+==== BB 903 (0x3A9718D0) in 16940B, out 93473B, BBs exec'd 0 ====
+	0x3A9718D0:  addl $0xC, %esp
+	0x3A9718D3:  movl %edi,%eax
+	0x3A9718D5:  popl %ebx
+	0x3A9718D6:  popl %esi
+	0x3A9718D7:  popl %edi
+	0x3A9718D8:  popl %ebp
+	0x3A9718D9:  ret
+. 903 3A9718D0 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3 
+
+==== BB 904 (0x3A9688DD) in 16950B, out 93591B, BBs exec'd 0 ====
+	0x3A9688DD:  testl %eax,%eax
+	0x3A9688DF:  jz-8 0x3A9688E7
+. 904 3A9688DD 4
+. 85 C0 74 06 
+
+==== BB 905 (0x3A9688E7) in 16954B, out 93650B, BBs exec'd 0 ====
+	0x3A9688E7:  movl -8(%ebp),%ebx
+	0x3A9688EA:  movl -4(%ebp),%esi
+	0x3A9688ED:  movl %ebp,%esp
+	0x3A9688EF:  popl %ebp
+	0x3A9688F0:  ret
+. 905 3A9688E7 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3 
+
+==== BB 906 (0x3A9701AE) in 16964B, out 93729B, BBs exec'd 0 ====
+	0x3A9701AE:  movl %edi,(%esi)
+	0x3A9701B0:  movl -28(%ebp),%eax
+	0x3A9701B3:  movl %eax,0xE8(%ebx)
+	0x3A9701B9:  movl -12(%ebp),%ebx
+	0x3A9701BC:  movl -8(%ebp),%esi
+	0x3A9701BF:  movl -4(%ebp),%edi
+	0x3A9701C2:  movl %ebp,%esp
+	0x3A9701C4:  popl %ebp
+	0x3A9701C5:  ret
+. 906 3A9701AE 24
+. 89 3E 8B 45 E4 89 83 E8 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 907 (0x3A9671FE) in 16988B, out 93861B, BBs exec'd 0 ====
+	0x3A9671FE:  movl 0xFFFFFF50(%ebp),%edi
+	0x3A967204:  testl %edi,%edi
+	0x3A967206:  jnz-32 0x3A9681A2
+. 907 3A9671FE 14
+. 8B BD 50 FF FF FF 85 FF 0F 85 96 0F 00 00 
+
+==== BB 908 (0x3A96720C) in 17002B, out 93939B, BBs exec'd 0 ====
+	0x3A96720C:  movl 0xFFFFF994(%ebx),%ecx
+	0x3A967212:  movl 0x134(%ecx),%edx
+	0x3A967218:  testl %edx,%edx
+	0x3A96721A:  jz-32 0x3A967313
+. 908 3A96720C 20
+. 8B 8B 94 F9 FF FF 8B 91 34 01 00 00 85 D2 0F 84 F3 00 00 00 
+
+==== BB 909 (0x3A967313) in 17022B, out 94030B, BBs exec'd 0 ====
+	0x3A967313:  movl 0xFFFFF9E4(%ebx),%eax
+	0x3A967319:  call 0x3A970B30
+. 909 3A967313 11
+. 8B 83 E4 F9 FF FF E8 12 98 00 00 
+
+==== BB 910 _dl_debug_initialize(0x3A970B30) in 17033B, out 94094B, BBs exec'd 0 ====
+	0x3A970B30:  call 0x3A975927
+. 910 3A970B30 5
+. E8 F2 4D 00 00 
+
+==== BB 911 (0x3A970B35) in 17038B, out 94139B, BBs exec'd 0 ====
+	0x3A970B35:  addl $0x7B37, %ecx
+	0x3A970B3B:  pushl %ebp
+	0x3A970B3C:  movl 0x28(%ecx),%edx
+	0x3A970B42:  movl %esp,%ebp
+	0x3A970B44:  cmpl $0x0, 8(%edx)
+	0x3A970B48:  jnz-8 0x3A970B65
+. 911 3A970B35 21
+. 81 C1 37 7B 00 00 55 8B 91 28 00 00 00 89 E5 83 7A 08 00 75 1B 
+
+==== BB 912 (0x3A970B4A) in 17059B, out 94254B, BBs exec'd 0 ====
+	0x3A970B4A:  movl %eax,16(%edx)
+	0x3A970B4D:  movl 0xFFFFF994(%ecx),%eax
+	0x3A970B53:  movl $0x1, (%edx)
+	0x3A970B59:  movl %eax,4(%edx)
+	0x3A970B5C:  leal 0xFFFF8504(%ecx), %eax
+	0x3A970B62:  movl %eax,8(%edx)
+	0x3A970B65:  popl %ebp
+	0x3A970B66:  movl %edx,%eax
+	0x3A970B68:  ret
+. 912 3A970B4A 31
+. 89 42 10 8B 81 94 F9 FF FF C7 02 01 00 00 00 89 42 04 8D 81 04 85 FF FF 89 42 08 5D 89 D0 C3 
+
+==== BB 913 (0x3A96731E) in 17090B, out 94379B, BBs exec'd 0 ====
+	0x3A96731E:  movl %eax,0xFFFFFE78(%ebp)
+	0x3A967324:  movl 0xFFFFF994(%ebx),%eax
+	0x3A96732A:  movl 108(%eax),%eax
+	0x3A96732D:  testl %eax,%eax
+	0x3A96732F:  jz-8 0x3A96733A
+. 913 3A96731E 19
+. 89 85 78 FE FF FF 8B 83 94 F9 FF FF 8B 40 6C 85 C0 74 09 
+
+==== BB 914 (0x3A967331) in 17109B, out 94483B, BBs exec'd 0 ====
+	0x3A967331:  movl 0xFFFFFE78(%ebp),%edx
+	0x3A967337:  movl %edx,4(%eax)
+	0x3A96733A:  movl 0xFFFFFA50(%ebx),%eax
+	0x3A967340:  testl %eax,%eax
+	0x3A967342:  jz-8 0x3A96734D
+. 914 3A967331 19
+. 8B 95 78 FE FF FF 89 50 04 8B 83 50 FA FF FF 85 C0 74 09 
+
+==== BB 915 (0x3A96734D) in 17128B, out 94590B, BBs exec'd 0 ====
+	0x3A96734D:  cmpb $0x0, 0xFFFFFF03(%ebp)
+	0x3A967354:  movl 0xFFFFF994(%ebx),%esi
+	0x3A96735A:  leal 0x150(%esi), %eax
+	0x3A967360:  movl %eax,0xFFFFF9A4(%ebx)
+	0x3A967366:  movl 0x154(%esi),%edx
+	0x3A96736C:  movl %esi,%ecx
+	0x3A96736E:  movl %eax,0xFFFFF99C(%ebx)
+	0x3A967374:  movl 0x150(%esi),%eax
+	0x3A96737A:  movl %edx,0xFFFFFC2C(%ebx)
+	0x3A967380:  movl %eax,0xFFFFFC28(%ebx)
+	0x3A967386:  jz-32 0x3A967910
+. 915 3A96734D 63
+. 80 BD 03 FF FF FF 00 8B B3 94 F9 FF FF 8D 86 50 01 00 00 89 83 A4 F9 FF FF 8B 96 54 01 00 00 89 F1 89 83 9C F9 FF FF 8B 86 50 01 00 00 89 93 2C FC FF FF 89 83 28 FC FF FF 0F 84 84 05 00 00 
+
+==== BB 916 (0x3A967910) in 17191B, out 94779B, BBs exec'd 0 ====
+	0x3A967910:  xorl %eax, %eax
+	0x3A967912:  cmpl $0x0, 0xFFFFFD7C(%ebx)
+	0x3A967919:  setnz %al
+	0x3A96791C:  orl %eax,0xFFFFFC3C(%ebx)
+	0x3A967922:  movl %eax,0xFFFFFE74(%ebp)
+	0x3A967928:  movl 12(%esi),%eax
+	0x3A96792B:  testl %eax,%eax
+	0x3A96792D:  jz-8 0x3A96798C
+. 916 3A967910 31
+. 31 C0 83 BB 7C FD FF FF 00 0F 95 C0 09 83 3C FC FF FF 89 85 74 FE FF FF 8B 46 0C 85 C0 74 5D 
+
+==== BB 917 (0x3A96792F) in 17222B, out 94939B, BBs exec'd 0 ====
+	0x3A96792F:  movl %eax,%esi
+	0x3A967931:  movl 12(%eax),%eax
+	0x3A967934:  testl %eax,%eax
+	0x3A967936:  jnz-8 0x3A96792F
+. 917 3A96792F 9
+. 89 C6 8B 40 0C 85 C0 75 F7 
+
+==== BB 918 (0x3A967938) in 17231B, out 95021B, BBs exec'd 0 ====
+	0x3A967938:  jmp-8 0x3A96798C
+. 918 3A967938 2
+. EB 52 
+
+==== BB 919 (0x3A96798C) in 17233B, out 95049B, BBs exec'd 0 ====
+	0x3A96798C:  movl 20(%esi),%eax
+	0x3A96798F:  movl 4(%eax),%eax
+	0x3A967992:  testl %eax,%eax
+	0x3A967994:  jz-8 0x3A96794E
+. 919 3A96798C 10
+. 8B 46 14 8B 40 04 85 C0 74 B8 
+
+==== BB 920 (0x3A967996) in 17243B, out 95131B, BBs exec'd 0 ====
+	0x3A967996:  jmp-8 0x3A967940
+. 920 3A967996 2
+. EB A8 
+
+==== BB 921 (0x3A967940) in 17245B, out 95159B, BBs exec'd 0 ====
+	0x3A967940:  movl $0x1, 8(%eax)
+	0x3A967947:  movl 4(%eax),%eax
+	0x3A96794A:  testl %eax,%eax
+	0x3A96794C:  jnz-8 0x3A967940
+. 921 3A967940 14
+. C7 40 08 01 00 00 00 8B 40 04 85 C0 75 F2 
+
+==== BB 922 (0x3A96794E) in 17259B, out 95246B, BBs exec'd 0 ====
+	0x3A96794E:  movl 0xFFFFFED0(%ebp),%edi
+	0x3A967954:  addl $0x50, %edi
+	0x3A967957:  cmpl %edi,%esi
+	0x3A967959:  jz-8 0x3A967981
+. 922 3A96794E 13
+. 8B BD D0 FE FF FF 83 C7 50 39 FE 74 26 
+
+==== BB 923 (0x3A967981) in 17272B, out 95328B, BBs exec'd 0 ====
+	0x3A967981:  movl 16(%esi),%esi
+	0x3A967984:  testl %esi,%esi
+	0x3A967986:  jz-32 0x3A968122
+. 923 3A967981 11
+. 8B 76 10 85 F6 0F 84 96 07 00 00 
+
+==== BB 924 (0x3A96795B) in 17283B, out 95400B, BBs exec'd 0 ====
+	0x3A96795B:  movl 0xFFFFFE74(%ebp),%eax
+	0x3A967961:  movl %eax,12(%esp,,)
+	0x3A967965:  movl 0xFFFFFC3C(%ebx),%eax
+	0x3A96796B:  movl %eax,8(%esp,,)
+	0x3A96796F:  movl 0x1B0(%esi),%eax
+	0x3A967975:  movl %esi,(%esp,,)
+	0x3A967978:  movl %eax,4(%esp,,)
+	0x3A96797C:  call 0x3A96DEB0
+. 924 3A96795B 38
+. 8B 85 74 FE FF FF 89 44 24 0C 8B 83 3C FC FF FF 89 44 24 08 8B 86 B0 01 00 00 89 34 24 89 44 24 04 E8 2F 65 00 00 
+
+==== BB 925 _dl_relocate_object(0x3A96DEB0) in 17321B, out 95533B, BBs exec'd 0 ====
+	0x3A96DEB0:  pushl %ebp
+	0x3A96DEB1:  movl %esp,%ebp
+	0x3A96DEB3:  leal 8(%ebp), %eax
+	0x3A96DEB6:  pushl %edi
+	0x3A96DEB7:  pushl %esi
+	0x3A96DEB8:  pushl %ebx
+	0x3A96DEB9:  subl $0xFC, %esp
+	0x3A96DEBF:  movl 8(%ebp),%esi
+	0x3A96DEC2:  movl %eax,-20(%ebp)
+	0x3A96DEC5:  call 0x3A97592B
+. 925 3A96DEB0 26
+. 55 89 E5 8D 45 08 57 56 53 81 EC FC 00 00 00 8B 75 08 89 45 EC E8 61 7A 00 00 
+
+==== BB 926 (0x3A96DECA) in 17347B, out 95702B, BBs exec'd 0 ====
+	0x3A96DECA:  addl $0xA7A2, %ebx
+	0x3A96DED0:  testb $0x4, 0x174(%esi)
+	0x3A96DED7:  movl $0x0, -104(%ebp)
+	0x3A96DEDE:  movl %esi,%edx
+	0x3A96DEE0:  jnz-32 0x3A96E300
+. 926 3A96DECA 28
+. 81 C3 A2 A7 00 00 F6 86 74 01 00 00 04 C7 45 98 00 00 00 00 89 F2 0F 85 1A 04 00 00 
+
+==== BB 927 (0x3A96DEE6) in 17375B, out 95817B, BBs exec'd 0 ====
+	0x3A96DEE6:  movl 20(%ebp),%edi
+	0x3A96DEE9:  testl %edi,%edi
+	0x3A96DEEB:  jnz-8 0x3A96DEFB
+. 927 3A96DEE6 7
+. 8B 7D 14 85 FF 75 0E 
+
+==== BB 928 (0x3A96DEED) in 17382B, out 95889B, BBs exec'd 0 ====
+	0x3A96DEED:  xorl %eax, %eax
+	0x3A96DEEF:  cmpl $0x0, 120(%esi)
+	0x3A96DEF3:  setz %al
+	0x3A96DEF6:  negl %eax
+	0x3A96DEF8:  andl %eax,16(%ebp)
+	0x3A96DEFB:  testb $0x20, 0xFFFFFC14(%ebx)
+	0x3A96DF02:  jnz-32 0x3A96E90C
+. 928 3A96DEED 27
+. 31 C0 83 7E 78 00 0F 94 C0 F7 D8 21 45 10 F6 83 14 FC FF FF 20 0F 85 04 0A 00 00 
+
+==== BB 929 (0x3A96DF08) in 17409B, out 96040B, BBs exec'd 0 ====
+	0x3A96DF08:  movl 112(%esi),%ecx
+	0x3A96DF0B:  testl %ecx,%ecx
+	0x3A96DF0D:  jnz-32 0x3A96E94D
+. 929 3A96DF08 11
+. 8B 4E 70 85 C9 0F 85 3A 0A 00 00 
+
+==== BB 930 (0x3A96DF13) in 17420B, out 96112B, BBs exec'd 0 ====
+	0x3A96DF13:  movl 44(%esi),%eax
+	0x3A96DF16:  movl 4(%eax),%eax
+	0x3A96DF19:  movl %eax,-16(%ebp)
+	0x3A96DF1C:  movl 116(%esi),%eax
+	0x3A96DF1F:  testl %eax,%eax
+	0x3A96DF21:  jz-8 0x3A96DF70
+. 930 3A96DF13 16
+. 8B 46 2C 8B 40 04 89 45 F0 8B 46 74 85 C0 74 4D 
+
+==== BB 931 (0x3A96DF23) in 17436B, out 96217B, BBs exec'd 0 ====
+	0x3A96DF23:  movl 16(%ebp),%edi
+	0x3A96DF26:  testl %edi,%edi
+	0x3A96DF28:  jz-8 0x3A96DF70
+. 931 3A96DF23 7
+. 8B 7D 10 85 FF 74 46 
+
+==== BB 932 (0x3A96DF2A) in 17443B, out 96289B, BBs exec'd 0 ====
+	0x3A96DF2A:  movl 36(%esi),%eax
+	0x3A96DF2D:  movl 4(%eax),%edx
+	0x3A96DF30:  movl 4(%edx),%eax
+	0x3A96DF33:  testl %eax,%eax
+	0x3A96DF35:  jz-8 0x3A96DF4A
+. 932 3A96DF2A 13
+. 8B 46 24 8B 50 04 8B 42 04 85 C0 74 13 
+
+==== BB 933 (0x3A96DF4A) in 17456B, out 96384B, BBs exec'd 0 ====
+	0x3A96DF4A:  movl %esi,4(%edx)
+	0x3A96DF4D:  movl 20(%ebp),%eax
+	0x3A96DF50:  testl %eax,%eax
+	0x3A96DF52:  jnz-32 0x3A96EA37
+. 933 3A96DF4A 14
+. 89 72 04 8B 45 14 85 C0 0F 85 DF 0A 00 00 
+
+==== BB 934 (0x3A96DF58) in 17470B, out 96472B, BBs exec'd 0 ====
+	0x3A96DF58:  leal 0xFFFF7884(%ebx), %eax
+	0x3A96DF5E:  movl %eax,8(%edx)
+	0x3A96DF61:  jmp-8 0x3A96DF70
+. 934 3A96DF58 11
+. 8D 83 84 78 FF FF 89 42 08 EB 0D 
+
+==== BB 935 (0x3A96DF70) in 17481B, out 96529B, BBs exec'd 0 ====
+	0x3A96DF70:  movl $0x0, -44(%ebp)
+	0x3A96DF77:  movl 92(%esi),%eax
+	0x3A96DF7A:  movl $0x0, -36(%ebp)
+	0x3A96DF81:  movl $0x0, -48(%ebp)
+	0x3A96DF88:  testl %eax,%eax
+	0x3A96DF8A:  movl $0x0, -52(%ebp)
+	0x3A96DF91:  jz-8 0x3A96DFA2
+. 935 3A96DF70 35
+. C7 45 D4 00 00 00 00 8B 46 5C C7 45 DC 00 00 00 00 C7 45 D0 00 00 00 00 85 C0 C7 45 CC 00 00 00 00 74 0F 
+
+==== BB 936 (0x3A96DF93) in 17516B, out 96664B, BBs exec'd 0 ====
+	0x3A96DF93:  movl 4(%eax),%eax
+	0x3A96DF96:  movl %eax,-52(%ebp)
+	0x3A96DF99:  movl 96(%esi),%eax
+	0x3A96DF9C:  movl 4(%eax),%eax
+	0x3A96DF9F:  movl %eax,-48(%ebp)
+	0x3A96DFA2:  movl 104(%esi),%eax
+	0x3A96DFA5:  testl %eax,%eax
+	0x3A96DFA7:  jz-8 0x3A96DFB3
+. 936 3A96DF93 22
+. 8B 40 04 89 45 CC 8B 46 60 8B 40 04 89 45 D0 8B 46 68 85 C0 74 0A 
+
+==== BB 937 (0x3A96DFA9) in 17538B, out 96792B, BBs exec'd 0 ====
+	0x3A96DFA9:  cmpl $0x11, 4(%eax)
+	0x3A96DFAD:  jz-32 0x3A96E226
+. 937 3A96DFA9 10
+. 83 78 04 11 0F 84 73 02 00 00 
+
+==== BB 938 (0x3A96E226) in 17548B, out 96855B, BBs exec'd 0 ====
+	0x3A96E226:  movl 16(%ebp),%edi
+	0x3A96E229:  movl 116(%esi),%eax
+	0x3A96E22C:  testl %edi,%edi
+	0x3A96E22E:  movl 4(%eax),%ecx
+	0x3A96E231:  jnz-8 0x3A96E23F
+. 938 3A96E226 13
+. 8B 7D 10 8B 46 74 85 FF 8B 48 04 75 0C 
+
+==== BB 939 (0x3A96E23F) in 17561B, out 96956B, BBs exec'd 0 ====
+	0x3A96E23F:  movl %ecx,-40(%ebp)
+	0x3A96E242:  movl 32(%esi),%eax
+	0x3A96E245:  movl 16(%ebp),%edx
+	0x3A96E248:  movl 4(%eax),%eax
+	0x3A96E24B:  movl %edx,-32(%ebp)
+	0x3A96E24E:  movl %eax,-36(%ebp)
+	0x3A96E251:  jmp 0x3A96DFB3
+. 939 3A96E23F 23
+. 89 4D D8 8B 46 20 8B 55 10 8B 40 04 89 55 E0 89 45 DC E9 5D FD FF FF 
+
+==== BB 940 (0x3A96DFB3) in 17584B, out 97059B, BBs exec'd 0 ====
+	0x3A96DFB3:  movl $0x0, -112(%ebp)
+	0x3A96DFBA:  movl %esi,-116(%ebp)
+	0x3A96DFBD:  movl -112(%ebp),%ecx
+	0x3A96DFC0:  leal (%ecx,%ecx,2), %eax
+	0x3A96DFC3:  leal -12(%ebp,%eax,4), %eax
+	0x3A96DFC7:  leal -40(%eax), %edx
+	0x3A96DFCA:  movl -40(%eax),%ecx
+	0x3A96DFCD:  movl 4(%edx),%edi
+	0x3A96DFD0:  movl %ecx,%eax
+	0x3A96DFD2:  addl %edi,%eax
+	0x3A96DFD4:  movl %eax,-124(%ebp)
+	0x3A96DFD7:  movl (%esi),%eax
+	0x3A96DFD9:  movl %ecx,-120(%ebp)
+	0x3A96DFDC:  movl %eax,-128(%ebp)
+	0x3A96DFDF:  movl 8(%edx),%eax
+	0x3A96DFE2:  testl %eax,%eax
+	0x3A96DFE4:  jz-8 0x3A96E035
+. 940 3A96DFB3 51
+. C7 45 90 00 00 00 00 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F 
+
+==== BB 941 (0x3A96E035) in 17635B, out 97277B, BBs exec'd 0 ====
+	0x3A96E035:  movl 48(%esi),%eax
+	0x3A96E038:  xorl %edx, %edx
+	0x3A96E03A:  movl 4(%eax),%eax
+	0x3A96E03D:  movl %edx,0xFFFFFF78(%ebp)
+	0x3A96E043:  movl %eax,0xFFFFFF7C(%ebp)
+	0x3A96E049:  movl 0xB4(%esi),%eax
+	0x3A96E04F:  testl %eax,%eax
+	0x3A96E051:  jz-8 0x3A96E05C
+. 941 3A96E035 30
+. 8B 46 30 31 D2 8B 40 04 89 95 78 FF FF FF 89 85 7C FF FF FF 8B 86 B4 00 00 00 85 C0 74 09 
+
+==== BB 942 (0x3A96E053) in 17665B, out 97415B, BBs exec'd 0 ====
+	0x3A96E053:  movl 4(%eax),%eax
+	0x3A96E056:  movl %eax,0xFFFFFF78(%ebp)
+	0x3A96E05C:  movl %edi,%eax
+	0x3A96E05E:  movl %ecx,%edx
+	0x3A96E060:  shrl $0x3, %eax
+	0x3A96E063:  cmpl 0xFFFFFF78(%ebp),%eax
+	0x3A96E069:  jbe-8 0x3A96E071
+. 942 3A96E053 24
+. 8B 40 04 89 85 78 FF FF FF 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06 
+
+==== BB 943 (0x3A96E06B) in 17689B, out 97533B, BBs exec'd 0 ====
+	0x3A96E06B:  movl 0xFFFFFF78(%ebp),%eax
+	0x3A96E071:  leal 0xFFFFF9E4(%ebx), %ecx
+	0x3A96E077:  leal (%edx,%eax,8), %eax
+	0x3A96E07A:  cmpl %ecx,%esi
+	0x3A96E07C:  movl %eax,-120(%ebp)
+	0x3A96E07F:  jz-8 0x3A96E0B0
+. 943 3A96E06B 22
+. 8B 85 78 FF FF FF 8D 8B E4 F9 FF FF 8D 04 C2 39 CE 89 45 88 74 2F 
+
+==== BB 944 (0x3A96E081) in 17711B, out 97645B, BBs exec'd 0 ====
+	0x3A96E081:  movl -128(%ebp),%edi
+	0x3A96E084:  testl %edi,%edi
+	0x3A96E086:  jz-8 0x3A96E0B0
+. 944 3A96E081 7
+. 8B 7D 80 85 FF 74 28 
+
+==== BB 945 (0x3A96E088) in 17718B, out 97717B, BBs exec'd 0 ====
+	0x3A96E088:  cmpl %eax,%edx
+	0x3A96E08A:  jnb-8 0x3A96E0B0
+. 945 3A96E088 4
+. 39 C2 73 24 
+
+==== BB 946 (0x3A96E08C) in 17722B, out 97774B, BBs exec'd 0 ====
+	0x3A96E08C:  leal 0(%esi,,), %esi
+	0x3A96E090:  movl (%edx),%ecx
+	0x3A96E092:  addl $0x8, %edx
+	0x3A96E095:  movl -128(%ebp),%eax
+	0x3A96E098:  movl -128(%ebp),%edi
+	0x3A96E09B:  addl %ecx,%eax
+	0x3A96E09D:  addl %edi,(%eax)
+	0x3A96E09F:  cmpl -120(%ebp),%edx
+	0x3A96E0A2:  jb-8 0x3A96E090
+. 946 3A96E08C 24
+. 8D 74 26 00 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC 
+
+==== BB 947 (0x3A96E090) in 17746B, out 97916B, BBs exec'd 0 ====
+	0x3A96E090:  movl (%edx),%ecx
+	0x3A96E092:  addl $0x8, %edx
+	0x3A96E095:  movl -128(%ebp),%eax
+	0x3A96E098:  movl -128(%ebp),%edi
+	0x3A96E09B:  addl %ecx,%eax
+	0x3A96E09D:  addl %edi,(%eax)
+	0x3A96E09F:  cmpl -120(%ebp),%edx
+	0x3A96E0A2:  jb-8 0x3A96E090
+. 947 3A96E090 20
+. 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC 
+
+==== BB 948 (0x3A96E0A4) in 17766B, out 98045B, BBs exec'd 0 ====
+	0x3A96E0A4:  leal 0x0(%esi), %esi
+	0x3A96E0AA:  leal 0x0(%edi), %edi
+	0x3A96E0B0:  movl 0xDC(%esi),%eax
+	0x3A96E0B6:  testl %eax,%eax
+	0x3A96E0B8:  jz-32 0x3A96E631
+. 948 3A96E0A4 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00 
+
+==== BB 949 (0x3A96E0BE) in 17792B, out 98143B, BBs exec'd 0 ====
+	0x3A96E0BE:  movl 4(%eax),%eax
+	0x3A96E0C1:  movl %eax,0xFFFFFF74(%ebp)
+	0x3A96E0C7:  movl -124(%ebp),%eax
+	0x3A96E0CA:  cmpl %eax,-120(%ebp)
+	0x3A96E0CD:  jnb-32 0x3A96E330
+. 949 3A96E0BE 21
+. 8B 40 04 89 85 74 FF FF FF 8B 45 84 39 45 88 0F 83 5D 02 00 00 
+
+==== BB 950 (0x3A96E0D3) in 17813B, out 98244B, BBs exec'd 0 ====
+	0x3A96E0D3:  leal 0x0(%esi), %esi
+	0x3A96E0D9:  leal 0(%edi,,), %edi
+	0x3A96E0E0:  movl -120(%ebp),%edx
+	0x3A96E0E3:  movl 0xFFFFFF74(%ebp),%edi
+	0x3A96E0E9:  movl 4(%edx),%edx
+	0x3A96E0EC:  movl %edx,0xFFFFFF0C(%ebp)
+	0x3A96E0F2:  movl %edx,%eax
+	0x3A96E0F4:  shrl $0x8, %eax
+	0x3A96E0F7:  movzwl (%edi,%eax,2),%ecx
+	0x3A96E0FB:  movl 0xFFFFFF7C(%ebp),%edi
+	0x3A96E101:  shll $0x4, %eax
+	0x3A96E104:  addl %eax,%edi
+	0x3A96E106:  movl -116(%ebp),%eax
+	0x3A96E109:  andl $0x7FFF, %ecx
+	0x3A96E10F:  movl %edi,-88(%ebp)
+	0x3A96E112:  shll $0x4, %ecx
+	0x3A96E115:  movl 0x17C(%eax),%edx
+	0x3A96E11B:  movl -128(%ebp),%eax
+	0x3A96E11E:  addl %edx,%ecx
+	0x3A96E120:  movl -120(%ebp),%edx
+	0x3A96E123:  addl (%edx),%eax
+	0x3A96E125:  movzbl 0xFFFFFF0C(%ebp),%edx
+	0x3A96E12C:  movl %eax,0xFFFFFF70(%ebp)
+	0x3A96E132:  cmpl $0x8, %edx
+	0x3A96E135:  movl %edx,0xFFFFFF6C(%ebp)
+	0x3A96E13B:  jz-32 0x3A96ECE5
+. 950 3A96E0D3 110
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00 
+
+==== BB 951 (0x3A96E141) in 17923B, out 98561B, BBs exec'd 0 ====
+	0x3A96E141:  movl 0xFFFFFF6C(%ebp),%eax
+	0x3A96E147:  testl %eax,%eax
+	0x3A96E149:  jz-32 0x3A96E320
+. 951 3A96E141 14
+. 8B 85 6C FF FF FF 85 C0 0F 84 D1 01 00 00 
+
+==== BB 952 (0x3A96E14F) in 17937B, out 98636B, BBs exec'd 0 ====
+	0x3A96E14F:  movl %edi,0xFFFFFF68(%ebp)
+	0x3A96E155:  movzbl 12(%edi),%eax
+	0x3A96E159:  shrb $0x4, %al
+	0x3A96E15C:  testb %al,%al
+	0x3A96E15E:  jz-32 0x3A96E740
+. 952 3A96E14F 21
+. 89 BD 68 FF FF FF 0F B6 47 0C C0 E8 04 84 C0 0F 84 DC 05 00 00 
+
+==== BB 953 (0x3A96E164) in 17958B, out 98751B, BBs exec'd 0 ====
+	0x3A96E164:  cmpl 0x1FC(%esi),%edi
+	0x3A96E16A:  jz-32 0x3A96ED40
+. 953 3A96E164 12
+. 3B BE FC 01 00 00 0F 84 D0 0B 00 00 
+
+==== BB 954 (0x3A96E170) in 17970B, out 98819B, BBs exec'd 0 ====
+	0x3A96E170:  xorl %edx, %edx
+	0x3A96E172:  cmpl $0x7, 0xFFFFFF6C(%ebp)
+	0x3A96E179:  setz %dl
+	0x3A96E17C:  cmpl $0x5, 0xFFFFFF6C(%ebp)
+	0x3A96E183:  jz-32 0x3A96E904
+. 954 3A96E170 25
+. 31 D2 83 BD 6C FF FF FF 07 0F 94 C2 83 BD 6C FF FF FF 05 0F 84 7B 07 00 00 
+
+==== BB 955 (0x3A96E189) in 17995B, out 98929B, BBs exec'd 0 ====
+	0x3A96E189:  movl %edx,0x200(%esi)
+	0x3A96E18F:  movl 0xFFFFFF68(%ebp),%edi
+	0x3A96E195:  movl %edi,0x1FC(%esi)
+	0x3A96E19B:  xorl %edi, %edi
+	0x3A96E19D:  testl %ecx,%ecx
+	0x3A96E19F:  movl $0x1,%esi
+	0x3A96E1A4:  jz-8 0x3A96E1B1
+. 955 3A96E189 29
+. 89 96 00 02 00 00 8B BD 68 FF FF FF 89 BE FC 01 00 00 31 FF 85 C9 BE 01 00 00 00 74 0B 
+
+==== BB 956 (0x3A96E1A6) in 18024B, out 99062B, BBs exec'd 0 ====
+	0x3A96E1A6:  movl 4(%ecx),%eax
+	0x3A96E1A9:  testl %eax,%eax
+	0x3A96E1AB:  jz-8 0x3A96E1B1
+. 956 3A96E1A6 7
+. 8B 41 04 85 C0 74 04 
+
+==== BB 957 (0x3A96E1AD) in 18031B, out 99134B, BBs exec'd 0 ====
+	0x3A96E1AD:  movl %ecx,%edi
+	0x3A96E1AF:  xorl %esi, %esi
+	0x3A96E1B1:  movl -88(%ebp),%eax
+	0x3A96E1B4:  movl -16(%ebp),%ecx
+	0x3A96E1B7:  movl (%eax),%eax
+	0x3A96E1B9:  movl %esi,12(%esp,,)
+	0x3A96E1BD:  movl %edi,4(%esp,,)
+	0x3A96E1C1:  addl %ecx,%eax
+	0x3A96E1C3:  xorl %ecx, %ecx
+	0x3A96E1C5:  movl %ecx,16(%esp,,)
+	0x3A96E1C9:  leal -88(%ebp), %ecx
+	0x3A96E1CC:  movl %edx,8(%esp,,)
+	0x3A96E1D0:  movl 12(%ebp),%edx
+	0x3A96E1D3:  movl %edx,(%esp,,)
+	0x3A96E1D6:  movl 8(%ebp),%edx
+	0x3A96E1D9:  call 0x3A96C0E0
+. 957 3A96E1AD 49
+. 89 CF 31 F6 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF 
+
+==== BB 958 _dl_lookup_symbol_x(0x3A96C0E0) in 18080B, out 99346B, BBs exec'd 0 ====
+	0x3A96C0E0:  pushl %ebp
+	0x3A96C0E1:  movl %esp,%ebp
+	0x3A96C0E3:  pushl %edi
+	0x3A96C0E4:  xorl %edi, %edi
+	0x3A96C0E6:  pushl %esi
+	0x3A96C0E7:  pushl %ebx
+	0x3A96C0E8:  subl $0xA0, %esp
+	0x3A96C0EE:  call 0x3A97592B
+. 958 3A96C0E0 19
+. 55 89 E5 57 31 FF 56 53 81 EC A0 00 00 00 E8 38 98 00 00 
+
+==== BB 959 (0x3A96C0F3) in 18099B, out 99496B, BBs exec'd 0 ====
+	0x3A96C0F3:  addl $0xC579, %ebx
+	0x3A96C0F9:  movl %eax,-88(%ebp)
+	0x3A96C0FC:  movl %edx,-92(%ebp)
+	0x3A96C0FF:  movl %ecx,-96(%ebp)
+	0x3A96C102:  call 0x3A96BCF0
+. 959 3A96C0F3 20
+. 81 C3 79 C5 00 00 89 45 A8 89 55 A4 89 4D A0 E8 E9 FB FF FF 
+
+==== BB 960 _dl_elf_hash(0x3A96BCF0) in 18119B, out 99606B, BBs exec'd 0 ====
+	0x3A96BCF0:  pushl %ebp
+	0x3A96BCF1:  movl %eax,%ecx
+	0x3A96BCF3:  xorl %edx, %edx
+	0x3A96BCF5:  movzbl (%eax),%eax
+	0x3A96BCF8:  movl %esp,%ebp
+	0x3A96BCFA:  testb %al,%al
+	0x3A96BCFC:  jz-8 0x3A96BD70
+. 960 3A96BCF0 14
+. 55 89 C1 31 D2 0F B6 00 89 E5 84 C0 74 72 
+
+==== BB 961 (0x3A96BCFE) in 18133B, out 99734B, BBs exec'd 0 ====
+	0x3A96BCFE:  incl %ecx
+	0x3A96BCFF:  movzbl %al,%edx
+	0x3A96BD02:  movzbl (%ecx),%eax
+	0x3A96BD05:  testb %al,%al
+	0x3A96BD07:  jz-8 0x3A96BD70
+. 961 3A96BCFE 11
+. 41 0F B6 D0 0F B6 01 84 C0 74 67 
+
+==== BB 962 (0x3A96BD09) in 18144B, out 99830B, BBs exec'd 0 ====
+	0x3A96BD09:  shll $0x4, %edx
+	0x3A96BD0C:  movzbl %al,%eax
+	0x3A96BD0F:  incl %ecx
+	0x3A96BD10:  addl %eax,%edx
+	0x3A96BD12:  movzbl (%ecx),%eax
+	0x3A96BD15:  testb %al,%al
+	0x3A96BD17:  jz-8 0x3A96BD70
+. 962 3A96BD09 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 57 
+
+==== BB 963 (0x3A96BD19) in 18160B, out 99943B, BBs exec'd 0 ====
+	0x3A96BD19:  shll $0x4, %edx
+	0x3A96BD1C:  movzbl %al,%eax
+	0x3A96BD1F:  incl %ecx
+	0x3A96BD20:  addl %eax,%edx
+	0x3A96BD22:  movzbl (%ecx),%eax
+	0x3A96BD25:  testb %al,%al
+	0x3A96BD27:  jz-8 0x3A96BD70
+. 963 3A96BD19 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 47 
+
+==== BB 964 (0x3A96BD29) in 18176B, out 100056B, BBs exec'd 0 ====
+	0x3A96BD29:  shll $0x4, %edx
+	0x3A96BD2C:  movzbl %al,%eax
+	0x3A96BD2F:  incl %ecx
+	0x3A96BD30:  addl %eax,%edx
+	0x3A96BD32:  movzbl (%ecx),%eax
+	0x3A96BD35:  testb %al,%al
+	0x3A96BD37:  jz-8 0x3A96BD70
+. 964 3A96BD29 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 37 
+
+==== BB 965 (0x3A96BD39) in 18192B, out 100169B, BBs exec'd 0 ====
+	0x3A96BD39:  shll $0x4, %edx
+	0x3A96BD3C:  movzbl %al,%eax
+	0x3A96BD3F:  incl %ecx
+	0x3A96BD40:  addl %eax,%edx
+	0x3A96BD42:  movzbl (%ecx),%eax
+	0x3A96BD45:  testb %al,%al
+	0x3A96BD47:  jz-8 0x3A96BD70
+. 965 3A96BD39 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 27 
+
+==== BB 966 (0x3A96BD49) in 18208B, out 100282B, BBs exec'd 0 ====
+	0x3A96BD49:  leal 0(%esi,,), %esi
+	0x3A96BD50:  shll $0x4, %edx
+	0x3A96BD53:  movzbl %al,%eax
+	0x3A96BD56:  addl %eax,%edx
+	0x3A96BD58:  movl %edx,%eax
+	0x3A96BD5A:  incl %ecx
+	0x3A96BD5B:  andl $0xF0000000, %eax
+	0x3A96BD60:  xorl %eax,%edx
+	0x3A96BD62:  shrl $0x18, %eax
+	0x3A96BD65:  xorl %eax,%edx
+	0x3A96BD67:  movzbl (%ecx),%eax
+	0x3A96BD6A:  testb %al,%al
+	0x3A96BD6C:  jnz-8 0x3A96BD50
+. 966 3A96BD49 37
+. 8D B4 26 00 00 00 00 C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2 
+
+==== BB 967 (0x3A96BD50) in 18245B, out 100444B, BBs exec'd 0 ====
+	0x3A96BD50:  shll $0x4, %edx
+	0x3A96BD53:  movzbl %al,%eax
+	0x3A96BD56:  addl %eax,%edx
+	0x3A96BD58:  movl %edx,%eax
+	0x3A96BD5A:  incl %ecx
+	0x3A96BD5B:  andl $0xF0000000, %eax
+	0x3A96BD60:  xorl %eax,%edx
+	0x3A96BD62:  shrl $0x18, %eax
+	0x3A96BD65:  xorl %eax,%edx
+	0x3A96BD67:  movzbl (%ecx),%eax
+	0x3A96BD6A:  testb %al,%al
+	0x3A96BD6C:  jnz-8 0x3A96BD50
+. 967 3A96BD50 30
+. C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2 
+
+==== BB 968 (0x3A96BD6E) in 18275B, out 100597B, BBs exec'd 0 ====
+	0x3A96BD6E:  movl %esi,%esi
+	0x3A96BD70:  popl %ebp
+	0x3A96BD71:  movl %edx,%eax
+	0x3A96BD73:  ret
+. 968 3A96BD6E 6
+. 89 F6 5D 89 D0 C3 
+
+==== BB 969 (0x3A96C107) in 18281B, out 100663B, BBs exec'd 0 ====
+	0x3A96C107:  movl %eax,-100(%ebp)
+	0x3A96C10A:  movl 24(%ebp),%eax
+	0x3A96C10D:  movl 8(%ebp),%esi
+	0x3A96C110:  incl 0xFFFFF9D4(%ebx)
+	0x3A96C116:  testl %eax,%eax
+	0x3A96C118:  movl $0x0, -68(%ebp)
+	0x3A96C11F:  movl $0x0, -64(%ebp)
+	0x3A96C126:  jnz-32 0x3A96C3BC
+. 969 3A96C107 37
+. 89 45 9C 8B 45 18 8B 75 08 FF 83 D4 F9 FF FF 85 C0 C7 45 BC 00 00 00 00 C7 45 C0 00 00 00 00 0F 85 90 02 00 00 
+
+==== BB 970 (0x3A96C12C) in 18318B, out 100811B, BBs exec'd 0 ====
+	0x3A96C12C:  movl 8(%ebp),%eax
+	0x3A96C12F:  movl (%eax),%edx
+	0x3A96C131:  testl %edx,%edx
+	0x3A96C133:  movl %edi,%eax
+	0x3A96C135:  jz-8 0x3A96C195
+. 970 3A96C12C 11
+. 8B 45 08 8B 10 85 D2 89 F8 74 5E 
+
+==== BB 971 (0x3A96C137) in 18329B, out 100900B, BBs exec'd 0 ====
+	0x3A96C137:  leal -68(%ebp), %edx
+	0x3A96C13A:  movl %edx,-124(%ebp)
+	0x3A96C13D:  leal 0(%esi), %esi
+	0x3A96C140:  movl 16(%ebp),%ecx
+	0x3A96C143:  movl %ecx,24(%esp,,)
+	0x3A96C147:  movl 24(%ebp),%edx
+	0x3A96C14A:  movl %edx,20(%esp,,)
+	0x3A96C14E:  movl 20(%ebp),%ecx
+	0x3A96C151:  movl %ecx,16(%esp,,)
+	0x3A96C155:  movl 12(%ebp),%edx
+	0x3A96C158:  movl %eax,8(%esp,,)
+	0x3A96C15C:  movl %edx,12(%esp,,)
+	0x3A96C160:  movl (%esi),%eax
+	0x3A96C162:  movl %eax,4(%esp,,)
+	0x3A96C166:  movl -124(%ebp),%ecx
+	0x3A96C169:  movl %ecx,(%esp,,)
+	0x3A96C16C:  movl -96(%ebp),%eax
+	0x3A96C16F:  movl -100(%ebp),%edx
+	0x3A96C172:  movl (%eax),%ecx
+	0x3A96C174:  movl -88(%ebp),%eax
+	0x3A96C177:  call 0x3A96BDB0
+. 971 3A96C137 69
+. 8D 55 BC 89 55 84 8D 76 00 8B 4D 10 89 4C 24 18 8B 55 18 89 54 24 14 8B 4D 14 89 4C 24 10 8B 55 0C 89 44 24 08 89 54 24 0C 8B 06 89 44 24 04 8B 4D 84 89 0C 24 8B 45 A0 8B 55 9C 8B 08 8B 45 A8 E8 34 FC FF FF 
+
+==== BB 972 do_lookup_x(0x3A96BDB0) in 18398B, out 101151B, BBs exec'd 0 ====
+	0x3A96BDB0:  pushl %ebp
+	0x3A96BDB1:  movl %esp,%ebp
+	0x3A96BDB3:  pushl %edi
+	0x3A96BDB4:  pushl %esi
+	0x3A96BDB5:  pushl %ebx
+	0x3A96BDB6:  subl $0x3C, %esp
+	0x3A96BDB9:  movl %eax,-16(%ebp)
+	0x3A96BDBC:  movl 12(%ebp),%eax
+	0x3A96BDBF:  movl %edx,-20(%ebp)
+	0x3A96BDC2:  call 0x3A97592B
+. 972 3A96BDB0 23
+. 55 89 E5 57 56 53 83 EC 3C 89 45 F0 8B 45 0C 89 55 EC E8 64 9B 00 00 
+
+==== BB 973 (0x3A96BDC7) in 18421B, out 101323B, BBs exec'd 0 ====
+	0x3A96BDC7:  addl $0xC8A5, %ebx
+	0x3A96BDCD:  movl %ecx,-24(%ebp)
+	0x3A96BDD0:  movl (%eax),%edx
+	0x3A96BDD2:  movl 4(%eax),%eax
+	0x3A96BDD5:  movl %edx,-28(%ebp)
+	0x3A96BDD8:  movl %eax,-32(%ebp)
+	0x3A96BDDB:  jmp-8 0x3A96BDEF
+. 973 3A96BDC7 22
+. 81 C3 A5 C8 00 00 89 4D E8 8B 10 8B 40 04 89 55 E4 89 45 E0 EB 12 
+
+==== BB 974 (0x3A96BDEF) in 18443B, out 101433B, BBs exec'd 0 ====
+	0x3A96BDEF:  movl $0x0, -52(%ebp)
+	0x3A96BDF6:  movl -28(%ebp),%eax
+	0x3A96BDF9:  movl 16(%ebp),%ecx
+	0x3A96BDFC:  movl $0x0, -56(%ebp)
+	0x3A96BE03:  movl 28(%ebp),%edx
+	0x3A96BE06:  movl (%eax,%ecx,4),%ecx
+	0x3A96BE09:  movl 28(%ebp),%eax
+	0x3A96BE0C:  movl %ecx,-36(%ebp)
+	0x3A96BE0F:  testl %eax,%eax
+	0x3A96BE11:  setnz %al
+	0x3A96BE14:  cmpl %edx,%ecx
+	0x3A96BE16:  setz %dl
+	0x3A96BE19:  andl %edx,%eax
+	0x3A96BE1B:  testb $0x1, %al
+	0x3A96BE1D:  jnz-8 0x3A96BDE0
+. 974 3A96BDEF 48
+. C7 45 CC 00 00 00 00 8B 45 E4 8B 4D 10 C7 45 C8 00 00 00 00 8B 55 1C 8B 0C 88 8B 45 1C 89 4D DC 85 C0 0F 95 C0 39 D1 0F 94 C2 21 D0 A8 01 75 C1 
+
+==== BB 975 (0x3A96BE1F) in 18491B, out 101655B, BBs exec'd 0 ====
+	0x3A96BE1F:  testb $0x2, 32(%ebp)
+	0x3A96BE23:  jz-8 0x3A96BE30
+. 975 3A96BE1F 6
+. F6 45 20 02 74 0B 
+
+==== BB 976 (0x3A96BE30) in 18497B, out 101722B, BBs exec'd 0 ====
+	0x3A96BE30:  testb $0x8, 0xFFFFFC14(%ebx)
+	0x3A96BE37:  jnz-32 0x3A96C038
+. 976 3A96BE30 13
+. F6 83 14 FC FF FF 08 0F 85 FB 01 00 00 
+
+==== BB 977 (0x3A96BE3D) in 18510B, out 101792B, BBs exec'd 0 ====
+	0x3A96BE3D:  movl -36(%ebp),%edx
+	0x3A96BE40:  movl 48(%edx),%eax
+	0x3A96BE43:  movl 0x18C(%edx),%ecx
+	0x3A96BE49:  movl 4(%eax),%eax
+	0x3A96BE4C:  movl %eax,-40(%ebp)
+	0x3A96BE4F:  movl 44(%edx),%eax
+	0x3A96BE52:  movl 4(%eax),%eax
+	0x3A96BE55:  movl %ecx,-48(%ebp)
+	0x3A96BE58:  movl %edx,%ecx
+	0x3A96BE5A:  xorl %edx, %edx
+	0x3A96BE5C:  movl %eax,-44(%ebp)
+	0x3A96BE5F:  movl -20(%ebp),%eax
+	0x3A96BE62:  divl 0x164(%ecx)
+	0x3A96BE68:  movl 0x168(%ecx),%eax
+	0x3A96BE6E:  movl (%eax,%edx,4),%esi
+	0x3A96BE71:  testl %esi,%esi
+	0x3A96BE73:  jnz-8 0x3A96BE94
+. 977 3A96BE3D 56
+. 8B 55 DC 8B 42 30 8B 8A 8C 01 00 00 8B 40 04 89 45 D8 8B 42 2C 8B 40 04 89 4D D0 89 D1 31 D2 89 45 D4 8B 45 EC F7 B1 64 01 00 00 8B 81 68 01 00 00 8B 34 90 85 F6 75 1F 
+
+==== BB 978 (0x3A96BE94) in 18566B, out 102042B, BBs exec'd 0 ====
+	0x3A96BE94:  movl -40(%ebp),%edi
+	0x3A96BE97:  movl %esi,%eax
+	0x3A96BE99:  shll $0x4, %eax
+	0x3A96BE9C:  addl %eax,%edi
+	0x3A96BE9E:  movl 4(%edi),%eax
+	0x3A96BEA1:  testl %eax,%eax
+	0x3A96BEA3:  jz-8 0x3A96BE80
+. 978 3A96BE94 17
+. 8B 7D D8 89 F0 C1 E0 04 01 C7 8B 47 04 85 C0 74 DB 
+
+==== BB 979 (0x3A96BE80) in 18583B, out 102147B, BBs exec'd 0 ====
+	0x3A96BE80:  movl -36(%ebp),%edx
+	0x3A96BE83:  movl 0x16C(%edx),%eax
+	0x3A96BE89:  movl (%eax,%esi,4),%esi
+	0x3A96BE8C:  testl %esi,%esi
+	0x3A96BE8E:  jz-32 0x3A96BF70
+. 979 3A96BE80 20
+. 8B 55 DC 8B 82 6C 01 00 00 8B 34 B0 85 F6 0F 84 DC 00 00 00 
+
+==== BB 980 (0x3A96BF70) in 18603B, out 102252B, BBs exec'd 0 ====
+	0x3A96BF70:  cmpl $0x1, -52(%ebp)
+	0x3A96BF74:  movl -56(%ebp),%edi
+	0x3A96BF77:  jz-8 0x3A96BF7B
+. 980 3A96BF70 9
+. 83 7D CC 01 8B 7D C8 74 02 
+
+==== BB 981 (0x3A96BF79) in 18612B, out 102328B, BBs exec'd 0 ====
+	0x3A96BF79:  xorl %edi, %edi
+	0x3A96BF7B:  testl %edi,%edi
+	0x3A96BF7D:  jz-8 0x3A96BF97
+. 981 3A96BF79 6
+. 31 FF 85 FF 74 18 
+
+==== BB 982 (0x3A96BF97) in 18618B, out 102398B, BBs exec'd 0 ====
+	0x3A96BF97:  movl 20(%ebp),%edx
+	0x3A96BF9A:  testl %esi,%esi
+	0x3A96BF9C:  setz %al
+	0x3A96BF9F:  testl %edx,%edx
+	0x3A96BFA1:  setnz %dl
+	0x3A96BFA4:  andl %edx,%eax
+	0x3A96BFA6:  testb $0x1, %al
+	0x3A96BFA8:  jz-32 0x3A96BDE0
+. 982 3A96BF97 23
+. 8B 55 14 85 F6 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 84 32 FE FF FF 
+
+==== BB 983 (0x3A96BFAE) in 18641B, out 102540B, BBs exec'd 0 ====
+	0x3A96BFAE:  movl 20(%ebp),%edx
+	0x3A96BFB1:  movl 12(%edx),%eax
+	0x3A96BFB4:  testl %eax,%eax
+	0x3A96BFB6:  jz-32 0x3A96BDE0
+. 983 3A96BFAE 14
+. 8B 55 14 8B 42 0C 85 C0 0F 84 24 FE FF FF 
+
+==== BB 984 (0x3A96BDE0) in 18655B, out 102625B, BBs exec'd 0 ====
+	0x3A96BDE0:  incl 16(%ebp)
+	0x3A96BDE3:  movl -32(%ebp),%ecx
+	0x3A96BDE6:  cmpl %ecx,16(%ebp)
+	0x3A96BDE9:  jnb-32 0x3A96BFDC
+. 984 3A96BDE0 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 83 ED 01 00 00 
+
+==== BB 985 (0x3A96BEA5) in 18670B, out 102714B, BBs exec'd 0 ====
+	0x3A96BEA5:  cmpw $0x0, 14(%edi)
+	0x3A96BEAA:  jnz-8 0x3A96BEB2
+. 985 3A96BEA5 7
+. 66 83 7F 0E 00 75 06 
+
+==== BB 986 (0x3A96BEB2) in 18677B, out 102778B, BBs exec'd 0 ====
+	0x3A96BEB2:  movzbl 12(%edi),%eax
+	0x3A96BEB6:  andl $0xF, %eax
+	0x3A96BEB9:  cmpl $0x2, %eax
+	0x3A96BEBC:  jnle-8 0x3A96BE80
+. 986 3A96BEB2 12
+. 0F B6 47 0C 83 E0 0F 83 F8 02 7F C2 
+
+==== BB 987 (0x3A96BEBE) in 18689B, out 102859B, BBs exec'd 0 ====
+	0x3A96BEBE:  cmpl -24(%ebp),%edi
+	0x3A96BEC1:  jz-8 0x3A96BEDD
+. 987 3A96BEBE 5
+. 3B 7D E8 74 1A 
+
+==== BB 988 (0x3A96BEC3) in 18694B, out 102924B, BBs exec'd 0 ====
+	0x3A96BEC3:  movl (%edi),%ecx
+	0x3A96BEC5:  movl -44(%ebp),%eax
+	0x3A96BEC8:  movl -16(%ebp),%edx
+	0x3A96BECB:  addl %ecx,%eax
+	0x3A96BECD:  movl %edx,4(%esp,,)
+	0x3A96BED1:  movl %eax,(%esp,,)
+	0x3A96BED4:  call 0x3A975280
+. 988 3A96BEC3 22
+. 8B 0F 8B 45 D4 8B 55 F0 01 C8 89 54 24 04 89 04 24 E8 A7 93 00 00 
+
+==== BB 989 (0x3A96BED9) in 18716B, out 103038B, BBs exec'd 0 ====
+	0x3A96BED9:  testl %eax,%eax
+	0x3A96BEDB:  jnz-8 0x3A96BE80
+. 989 3A96BED9 4
+. 85 C0 75 A3 
+
+==== BB 990 (0x3A96BEDD) in 18720B, out 103097B, BBs exec'd 0 ====
+	0x3A96BEDD:  movl 20(%ebp),%edx
+	0x3A96BEE0:  testl %edx,%edx
+	0x3A96BEE2:  jz-32 0x3A96C069
+. 990 3A96BEDD 11
+. 8B 55 14 85 D2 0F 84 81 01 00 00 
+
+==== BB 991 (0x3A96BEE8) in 18731B, out 103169B, BBs exec'd 0 ====
+	0x3A96BEE8:  movl -48(%ebp),%eax
+	0x3A96BEEB:  testl %eax,%eax
+	0x3A96BEED:  jz-32 0x3A96BF7F
+. 991 3A96BEE8 11
+. 8B 45 D0 85 C0 0F 84 8C 00 00 00 
+
+==== BB 992 (0x3A96BEF3) in 18742B, out 103241B, BBs exec'd 0 ====
+	0x3A96BEF3:  movl -48(%ebp),%ecx
+	0x3A96BEF6:  movl -36(%ebp),%eax
+	0x3A96BEF9:  movzwl (%ecx,%esi,2),%ecx
+	0x3A96BEFD:  movl 0x17C(%eax),%edx
+	0x3A96BF03:  andl $0x7FFF, %ecx
+	0x3A96BF09:  shll $0x4, %ecx
+	0x3A96BF0C:  movl %ecx,-60(%ebp)
+	0x3A96BF0F:  movl 20(%ebp),%ecx
+	0x3A96BF12:  movl 4(%ecx),%eax
+	0x3A96BF15:  movl -60(%ebp),%ecx
+	0x3A96BF18:  cmpl %eax,4(%edx,%ecx,1)
+	0x3A96BF1C:  jz-32 0x3A96BFE8
+. 992 3A96BEF3 47
+. 8B 4D D0 8B 45 DC 0F B7 0C 71 8B 90 7C 01 00 00 81 E1 FF 7F 00 00 C1 E1 04 89 4D C4 8B 4D 14 8B 41 04 8B 4D C4 39 44 0A 04 0F 84 C6 00 00 00 
+
+==== BB 993 (0x3A96BFE8) in 18789B, out 103421B, BBs exec'd 0 ====
+	0x3A96BFE8:  movl 20(%ebp),%ecx
+	0x3A96BFEB:  movl (%ecx),%eax
+	0x3A96BFED:  movl %eax,4(%esp,,)
+	0x3A96BFF1:  movl -60(%ebp),%ecx
+	0x3A96BFF4:  movl (%edx,%ecx,1),%eax
+	0x3A96BFF7:  movl %eax,(%esp,,)
+	0x3A96BFFA:  call 0x3A975280
+. 993 3A96BFE8 23
+. 8B 4D 14 8B 01 89 44 24 04 8B 4D C4 8B 04 0A 89 04 24 E8 81 92 00 00 
+
+==== BB 994 (0x3A96BFFF) in 18812B, out 103533B, BBs exec'd 0 ====
+	0x3A96BFFF:  testl %eax,%eax
+	0x3A96C001:  jnz-32 0x3A96BF22
+. 994 3A96BFFF 8
+. 85 C0 0F 85 1B FF FF FF 
+
+==== BB 995 (0x3A96C007) in 18820B, out 103595B, BBs exec'd 0 ====
+	0x3A96C007:  jmp 0x3A96BF7F
+. 995 3A96C007 5
+. E9 73 FF FF FF 
+
+==== BB 996 (0x3A96BF7F) in 18825B, out 103623B, BBs exec'd 0 ====
+	0x3A96BF7F:  movzbl 12(%edi),%eax
+	0x3A96BF83:  shrb $0x4, %al
+	0x3A96BF86:  movzbl %al,%eax
+	0x3A96BF89:  cmpl $0x1, %eax
+	0x3A96BF8C:  jz-32 0x3A96C01E
+. 996 3A96BF7F 19
+. 0F B6 47 0C C0 E8 04 0F B6 C0 83 F8 01 0F 84 8C 00 00 00 
+
+==== BB 997 (0x3A96C01E) in 18844B, out 103718B, BBs exec'd 0 ====
+	0x3A96C01E:  movl 8(%ebp),%edx
+	0x3A96C021:  movl -36(%ebp),%ecx
+	0x3A96C024:  movl %edi,(%edx)
+	0x3A96C026:  movl %ecx,4(%edx)
+	0x3A96C029:  movl $0x1,%edx
+	0x3A96C02E:  addl $0x3C, %esp
+	0x3A96C031:  movl %edx,%eax
+	0x3A96C033:  popl %ebx
+	0x3A96C034:  popl %esi
+	0x3A96C035:  popl %edi
+	0x3A96C036:  popl %ebp
+	0x3A96C037:  ret
+. 997 3A96C01E 26
+. 8B 55 08 8B 4D DC 89 3A 89 4A 04 BA 01 00 00 00 83 C4 3C 89 D0 5B 5E 5F 5D C3 
+
+==== BB 998 (0x3A96C17C) in 18870B, out 103889B, BBs exec'd 0 ====
+	0x3A96C17C:  testl %eax,%eax
+	0x3A96C17E:  movl %eax,%edx
+	0x3A96C180:  jnle-8 0x3A96C195
+. 998 3A96C17C 6
+. 85 C0 89 C2 7F 13 
+
+==== BB 999 (0x3A96C195) in 18876B, out 103955B, BBs exec'd 0 ====
+	0x3A96C195:  movl -68(%ebp),%esi
+	0x3A96C198:  testl %esi,%esi
+	0x3A96C19A:  jz-32 0x3A96C3DE
+. 999 3A96C195 11
+. 8B 75 BC 85 F6 0F 84 3E 02 00 00 
+
+==== BB 1000 (0x3A96C1A0) in 18887B, out 104027B, BBs exec'd 0 ====
+	0x3A96C1A0:  movl $0x0, -112(%ebp)
+	0x3A96C1A7:  movl -96(%ebp),%ecx
+	0x3A96C1AA:  movl (%ecx),%edx
+	0x3A96C1AC:  testl %edx,%edx
+	0x3A96C1AE:  jz-8 0x3A96C1BC
+. 1000 3A96C1A0 16
+. C7 45 90 00 00 00 00 8B 4D A0 8B 11 85 D2 74 0C 
+
+==== BB 1001 (0x3A96C1B0) in 18903B, out 104124B, BBs exec'd 0 ====
+	0x3A96C1B0:  movzbl 13(%edx),%eax
+	0x3A96C1B4:  andl $0x3, %eax
+	0x3A96C1B7:  cmpl $0x3, %eax
+	0x3A96C1BA:  jz-8 0x3A96C204
+. 1001 3A96C1B0 12
+. 0F B6 42 0D 83 E0 03 83 F8 03 74 48 
+
+==== BB 1002 (0x3A96C1BC) in 18915B, out 104205B, BBs exec'd 0 ====
+	0x3A96C1BC:  leal -68(%ebp), %edx
+	0x3A96C1BF:  movl %edx,-124(%ebp)
+	0x3A96C1C2:  movl -124(%ebp),%ecx
+	0x3A96C1C5:  movl 4(%ecx),%esi
+	0x3A96C1C8:  movzbl 0x174(%esi),%eax
+	0x3A96C1CF:  andb $0x3, %al
+	0x3A96C1D1:  cmpb $0x2, %al
+	0x3A96C1D3:  jz-32 0x3A96C607
+. 1002 3A96C1BC 29
+. 8D 55 BC 89 55 84 8B 4D 84 8B 71 04 0F B6 86 74 01 00 00 24 03 3C 02 0F 84 2E 04 00 00 
+
+==== BB 1003 (0x3A96C1D9) in 18944B, out 104346B, BBs exec'd 0 ====
+	0x3A96C1D9:  movl 0xFFFFFC14(%ebx),%eax
+	0x3A96C1DF:  testl $0x204, %eax
+	0x3A96C1E4:  jnz-32 0x3A96C412
+. 1003 3A96C1D9 17
+. 8B 83 14 FC FF FF A9 04 02 00 00 0F 85 28 02 00 00 
+
+==== BB 1004 (0x3A96C1EA) in 18961B, out 104423B, BBs exec'd 0 ====
+	0x3A96C1EA:  movl -68(%ebp),%eax
+	0x3A96C1ED:  movl -96(%ebp),%ecx
+	0x3A96C1F0:  movl -124(%ebp),%edx
+	0x3A96C1F3:  movl %eax,(%ecx)
+	0x3A96C1F5:  movl 4(%edx),%eax
+	0x3A96C1F8:  movl (%eax),%eax
+	0x3A96C1FA:  leal -12(%ebp), %esp
+	0x3A96C1FD:  popl %ebx
+	0x3A96C1FE:  popl %esi
+	0x3A96C1FF:  popl %edi
+	0x3A96C200:  popl %ebp
+	0x3A96C201:  ret 20
+. 1004 3A96C1EA 26
+. 8B 45 BC 8B 4D A0 8B 55 84 89 01 8B 42 04 8B 00 8D 65 F4 5B 5E 5F 5D C2 14 00 
+
+==== BB 1005 (0x3A96E1DE) in 18987B, out 104591B, BBs exec'd 0 ====
+	0x3A96E1DE:  movl %eax,0xFFFFFF64(%ebp)
+	0x3A96E1E4:  movl -88(%ebp),%edx
+	0x3A96E1E7:  subl $0x14, %esp
+	0x3A96E1EA:  movl 8(%ebp),%esi
+	0x3A96E1ED:  movl %edx,%edi
+	0x3A96E1EF:  movl %edx,0x208(%esi)
+	0x3A96E1F5:  movl %eax,0x204(%esi)
+	0x3A96E1FB:  testl %edi,%edi
+	0x3A96E1FD:  jz-8 0x3A96E208
+. 1005 3A96E1DE 33
+. 89 85 64 FF FF FF 8B 55 A8 83 EC 14 8B 75 08 89 D7 89 96 08 02 00 00 89 86 04 02 00 00 85 FF 74 09 
+
+==== BB 1006 (0x3A96E1FF) in 19020B, out 104741B, BBs exec'd 0 ====
+	0x3A96E1FF:  movl 4(%edi),%edx
+	0x3A96E202:  addl %edx,0xFFFFFF64(%ebp)
+	0x3A96E208:  cmpl $0x7, 0xFFFFFF6C(%ebp)
+	0x3A96E20F:  jnbe-32 0x3A96EA6B
+. 1006 3A96E1FF 22
+. 8B 57 04 01 95 64 FF FF FF 83 BD 6C FF FF FF 07 0F 87 56 08 00 00 
+
+==== BB 1007 (0x3A96E215) in 19042B, out 104844B, BBs exec'd 0 ====
+	0x3A96E215:  movl 0xFFFFFF6C(%ebp),%ecx
+	0x3A96E21B:  movl -10320(%ebx,%ecx,4),%eax
+	0x3A96E222:  addl %ebx,%eax
+	0x3A96E224:  jmp*l %eax
+. 1007 3A96E215 17
+. 8B 8D 6C FF FF FF 8B 84 8B B0 D7 FF FF 01 D8 FF E0 
+
+==== BB 1008 (0x3A96EADD) in 19059B, out 104911B, BBs exec'd 0 ====
+	0x3A96EADD:  movl 0xFFFFFF70(%ebp),%edx
+	0x3A96EAE3:  movl 0xFFFFFF64(%ebp),%ecx
+	0x3A96EAE9:  addl %ecx,(%edx)
+	0x3A96EAEB:  jmp 0x3A96E320
+. 1008 3A96EADD 19
+. 8B 95 70 FF FF FF 8B 8D 64 FF FF FF 01 0A E9 30 F8 FF FF 
+
+==== BB 1009 (0x3A96E320) in 19078B, out 104990B, BBs exec'd 0 ====
+	0x3A96E320:  addl $0x8, -120(%ebp)
+	0x3A96E324:  movl -124(%ebp),%edi
+	0x3A96E327:  cmpl %edi,-120(%ebp)
+	0x3A96E32A:  jb-32 0x3A96E0E0
+. 1009 3A96E320 16
+. 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF 
+
+==== BB 1010 (0x3A96E0E0) in 19094B, out 105081B, BBs exec'd 0 ====
+	0x3A96E0E0:  movl -120(%ebp),%edx
+	0x3A96E0E3:  movl 0xFFFFFF74(%ebp),%edi
+	0x3A96E0E9:  movl 4(%edx),%edx
+	0x3A96E0EC:  movl %edx,0xFFFFFF0C(%ebp)
+	0x3A96E0F2:  movl %edx,%eax
+	0x3A96E0F4:  shrl $0x8, %eax
+	0x3A96E0F7:  movzwl (%edi,%eax,2),%ecx
+	0x3A96E0FB:  movl 0xFFFFFF7C(%ebp),%edi
+	0x3A96E101:  shll $0x4, %eax
+	0x3A96E104:  addl %eax,%edi
+	0x3A96E106:  movl -116(%ebp),%eax
+	0x3A96E109:  andl $0x7FFF, %ecx
+	0x3A96E10F:  movl %edi,-88(%ebp)
+	0x3A96E112:  shll $0x4, %ecx
+	0x3A96E115:  movl 0x17C(%eax),%edx
+	0x3A96E11B:  movl -128(%ebp),%eax
+	0x3A96E11E:  addl %edx,%ecx
+	0x3A96E120:  movl -120(%ebp),%edx
+	0x3A96E123:  addl (%edx),%eax
+	0x3A96E125:  movzbl 0xFFFFFF0C(%ebp),%edx
+	0x3A96E12C:  movl %eax,0xFFFFFF70(%ebp)
+	0x3A96E132:  cmpl $0x8, %edx
+	0x3A96E135:  movl %edx,0xFFFFFF6C(%ebp)
+	0x3A96E13B:  jz-32 0x3A96ECE5
+. 1010 3A96E0E0 97
+. 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00 
+
+==== BB 1011 (0x3A96ED40) in 19191B, out 105375B, BBs exec'd 0 ====
+	0x3A96ED40:  xorl %eax, %eax
+	0x3A96ED42:  cmpl $0x7, 0xFFFFFF6C(%ebp)
+	0x3A96ED49:  movl 0x200(%esi),%edx
+	0x3A96ED4F:  setz %al
+	0x3A96ED52:  cmpl $0x5, 0xFFFFFF6C(%ebp)
+	0x3A96ED59:  jz-32 0x3A96EEBE
+. 1011 3A96ED40 31
+. 31 C0 83 BD 6C FF FF FF 07 8B 96 00 02 00 00 0F 94 C0 83 BD 6C FF FF FF 05 0F 84 5F 01 00 00 
+
+==== BB 1012 (0x3A96ED5F) in 19222B, out 105500B, BBs exec'd 0 ====
+	0x3A96ED5F:  cmpl %edx,%eax
+	0x3A96ED61:  jnz-32 0x3A96E170
+. 1012 3A96ED5F 8
+. 39 D0 0F 85 09 F4 FF FF 
+
+==== BB 1013 (0x3A96ED67) in 19230B, out 105557B, BBs exec'd 0 ====
+	0x3A96ED67:  incl 0xFFFFF9D8(%ebx)
+	0x3A96ED6D:  movl 0x208(%esi),%edi
+	0x3A96ED73:  movl 0x204(%esi),%eax
+	0x3A96ED79:  movl %edi,-88(%ebp)
+	0x3A96ED7C:  jmp 0x3A96E742
+. 1013 3A96ED67 26
+. FF 83 D8 F9 FF FF 8B BE 08 02 00 00 8B 86 04 02 00 00 89 7D A8 E9 C1 F9 FF FF 
+
+==== BB 1014 (0x3A96E742) in 19256B, out 105661B, BBs exec'd 0 ====
+	0x3A96E742:  movl %eax,0xFFFFFF64(%ebp)
+	0x3A96E748:  jmp 0x3A96E1FB
+. 1014 3A96E742 11
+. 89 85 64 FF FF FF E9 AE FA FF FF 
+
+==== BB 1015 (0x3A96E1FB) in 19267B, out 105708B, BBs exec'd 0 ====
+	0x3A96E1FB:  testl %edi,%edi
+	0x3A96E1FD:  jz-8 0x3A96E208
+. 1015 3A96E1FB 4
+. 85 FF 74 09 
+
+==== BB 1016 (0x3A96E308) in 19271B, out 105767B, BBs exec'd 0 ====
+	0x3A96E308:  movl 0xFFFFFF64(%ebp),%eax
+	0x3A96E30E:  movl 0xFFFFFF70(%ebp),%edi
+	0x3A96E314:  movl %eax,(%edi)
+	0x3A96E316:  leal 0(%esi), %esi
+	0x3A96E319:  leal 0(%edi,,), %edi
+	0x3A96E320:  addl $0x8, -120(%ebp)
+	0x3A96E324:  movl -124(%ebp),%edi
+	0x3A96E327:  cmpl %edi,-120(%ebp)
+	0x3A96E32A:  jb-32 0x3A96E0E0
+. 1016 3A96E308 40
+. 8B 85 64 FF FF FF 8B BD 70 FF FF FF 89 07 8D 76 00 8D BC 27 00 00 00 00 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF 
+
+==== BB 1017 (0x3A96BF92) in 19311B, out 105914B, BBs exec'd 0 ====
+	0x3A96BF92:  cmpl $0x2, %eax
+	0x3A96BF95:  jz-8 0x3A96C010
+. 1017 3A96BF92 5
+. 83 F8 02 74 79 
+
+==== BB 1018 (0x3A96C010) in 19316B, out 105971B, BBs exec'd 0 ====
+	0x3A96C010:  movl 0xFFFFFC48(%ebx),%eax
+	0x3A96C016:  testl %eax,%eax
+	0x3A96C018:  jnz-32 0x3A96C0C0
+. 1018 3A96C010 14
+. 8B 83 48 FC FF FF 85 C0 0F 85 A2 00 00 00 
+
+==== BB 1019 (0x3A96BE75) in 19330B, out 106046B, BBs exec'd 0 ====
+	0x3A96BE75:  jmp 0x3A96BF79
+. 1019 3A96BE75 5
+. E9 FF 00 00 00 
+
+==== BB 1020 (0x3A96BFBC) in 19335B, out 106074B, BBs exec'd 0 ====
+	0x3A96BFBC:  movl -36(%ebp),%edx
+	0x3A96BFBF:  call 0x3A970B80
+. 1020 3A96BFBC 8
+. 8B 55 DC E8 BC 4B 00 00 
+
+==== BB 1021 (0x3A96BFC4) in 19343B, out 106135B, BBs exec'd 0 ====
+	0x3A96BFC4:  testl %eax,%eax
+	0x3A96BFC6:  movl $0xFFFFFFFF,%edx
+	0x3A96BFCB:  jnz-8 0x3A96C02E
+. 1021 3A96BFC4 9
+. 85 C0 BA FF FF FF FF 75 61 
+
+==== BB 1022 (0x3A96BFCD) in 19352B, out 106206B, BBs exec'd 0 ====
+	0x3A96BFCD:  incl 16(%ebp)
+	0x3A96BFD0:  movl -32(%ebp),%ecx
+	0x3A96BFD3:  cmpl %ecx,16(%ebp)
+	0x3A96BFD6:  jb-32 0x3A96BDEF
+. 1022 3A96BFCD 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 82 13 FE FF FF 
+
+==== BB 1023 (0x3A96E1B1) in 19367B, out 106295B, BBs exec'd 0 ====
+	0x3A96E1B1:  movl -88(%ebp),%eax
+	0x3A96E1B4:  movl -16(%ebp),%ecx
+	0x3A96E1B7:  movl (%eax),%eax
+	0x3A96E1B9:  movl %esi,12(%esp,,)
+	0x3A96E1BD:  movl %edi,4(%esp,,)
+	0x3A96E1C1:  addl %ecx,%eax
+	0x3A96E1C3:  xorl %ecx, %ecx
+	0x3A96E1C5:  movl %ecx,16(%esp,,)
+	0x3A96E1C9:  leal -88(%ebp), %ecx
+	0x3A96E1CC:  movl %edx,8(%esp,,)
+	0x3A96E1D0:  movl 12(%ebp),%edx
+	0x3A96E1D3:  movl %edx,(%esp,,)
+	0x3A96E1D6:  movl 8(%ebp),%edx
+	0x3A96E1D9:  call 0x3A96C0E0
+. 1023 3A96E1B1 45
+. 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF 
+
+==== BB 1024 (0x3A96BFDC) in 19412B, out 106483B, BBs exec'd 0 ====
+	0x3A96BFDC:  xorl %edx, %edx
+	0x3A96BFDE:  addl $0x3C, %esp
+	0x3A96BFE1:  movl %edx,%eax
+	0x3A96BFE3:  popl %ebx
+	0x3A96BFE4:  popl %esi
+	0x3A96BFE5:  popl %edi
+	0x3A96BFE6:  popl %ebp
+	0x3A96BFE7:  ret
+. 1024 3A96BFDC 12
+. 31 D2 83 C4 3C 89 D0 5B 5E 5F 5D C3 
+
+==== BB 1025 (0x3A96C182) in 19424B, out 106610B, BBs exec'd 0 ====
+	0x3A96C182:  testl %eax,%eax
+	0x3A96C184:  js-32 0x3A96C2D0
+. 1025 3A96C182 8
+. 85 C0 0F 88 46 01 00 00 
+
+==== BB 1026 (0x3A96C18A) in 19432B, out 106669B, BBs exec'd 0 ====
+	0x3A96C18A:  addl $0x4, %esi
+	0x3A96C18D:  xorl %eax, %eax
+	0x3A96C18F:  movl (%esi),%edx
+	0x3A96C191:  testl %edx,%edx
+	0x3A96C193:  jnz-8 0x3A96C140
+. 1026 3A96C18A 11
+. 83 C6 04 31 C0 8B 16 85 D2 75 AB 
+
+==== BB 1027 (0x3A96C3DE) in 19443B, out 106762B, BBs exec'd 0 ====
+	0x3A96C3DE:  movl -96(%ebp),%edx
+	0x3A96C3E1:  movl (%edx),%eax
+	0x3A96C3E3:  testl %eax,%eax
+	0x3A96C3E5:  jz-8 0x3A96C3F2
+. 1027 3A96C3DE 9
+. 8B 55 A0 8B 02 85 C0 74 0B 
+
+==== BB 1028 (0x3A96C3E7) in 19452B, out 106844B, BBs exec'd 0 ====
+	0x3A96C3E7:  movzbl 12(%eax),%eax
+	0x3A96C3EB:  shrb $0x4, %al
+	0x3A96C3EE:  cmpb $0x2, %al
+	0x3A96C3F0:  jz-8 0x3A96C3FD
+. 1028 3A96C3E7 11
+. 0F B6 40 0C C0 E8 04 3C 02 74 0B 
+
+==== BB 1029 (0x3A96C3FD) in 19463B, out 106932B, BBs exec'd 0 ====
+	0x3A96C3FD:  movl -96(%ebp),%edx
+	0x3A96C400:  movl $0x0, (%edx)
+	0x3A96C406:  xorl %eax, %eax
+	0x3A96C408:  leal -12(%ebp), %esp
+	0x3A96C40B:  popl %ebx
+	0x3A96C40C:  popl %esi
+	0x3A96C40D:  popl %edi
+	0x3A96C40E:  popl %ebp
+	0x3A96C40F:  ret 20
+. 1029 3A96C3FD 21
+. 8B 55 A0 C7 02 00 00 00 00 31 C0 8D 65 F4 5B 5E 5F 5D C2 14 00 
+
+==== BB 1030 (0x3A96E208) in 19484B, out 107080B, BBs exec'd 0 ====
+	0x3A96E208:  cmpl $0x7, 0xFFFFFF6C(%ebp)
+	0x3A96E20F:  jnbe-32 0x3A96EA6B
+. 1030 3A96E208 13
+. 83 BD 6C FF FF FF 07 0F 87 56 08 00 00 
+
+==== BB 1031 (0x3A96BD70) in 19497B, out 107146B, BBs exec'd 0 ====
+	0x3A96BD70:  popl %ebp
+	0x3A96BD71:  movl %edx,%eax
+	0x3A96BD73:  ret
+. 1031 3A96BD70 4
+. 5D 89 D0 C3 
+
+==== BB 1032 (0x3A96C069) in 19501B, out 107202B, BBs exec'd 0 ====
+	0x3A96C069:  movl -48(%ebp),%eax
+	0x3A96C06C:  testl %eax,%eax
+	0x3A96C06E:  jz-32 0x3A96BF7F
+. 1032 3A96C069 11
+. 8B 45 D0 85 C0 0F 84 0B FF FF FF 
+
+==== BB 1033 (0x3A96C074) in 19512B, out 107274B, BBs exec'd 0 ====
+	0x3A96C074:  testb $0x2, 24(%ebp)
+	0x3A96C078:  jz-8 0x3A96C0AD
+. 1033 3A96C074 6
+. F6 45 18 02 74 33 
+
+==== BB 1034 (0x3A96C0AD) in 19518B, out 107341B, BBs exec'd 0 ====
+	0x3A96C0AD:  movl -48(%ebp),%eax
+	0x3A96C0B0:  movzwl (%eax,%esi,2),%edx
+	0x3A96C0B4:  movl %edx,%eax
+	0x3A96C0B6:  andl $0x7FFF, %eax
+	0x3A96C0BB:  cmpl $0x2, %eax
+	0x3A96C0BE:  jmp-8 0x3A96C089
+. 1034 3A96C0AD 19
+. 8B 45 D0 0F B7 14 70 89 D0 25 FF 7F 00 00 83 F8 02 EB C9 
+
+==== BB 1035 (0x3A96C089) in 19537B, out 107428B, BBs exec'd 0 ====
+	0x3A96C089:  jle-32 0x3A96BF7F
+. 1035 3A96C089 6
+. 0F 8E F0 FE FF FF 
+
+==== BB 1036 (0x3A96E330) in 19543B, out 107487B, BBs exec'd 0 ====
+	0x3A96E330:  incl -112(%ebp)
+	0x3A96E333:  cmpl $0x1, -112(%ebp)
+	0x3A96E337:  jle-32 0x3A96DFBA
+. 1036 3A96E330 13
+. FF 45 90 83 7D 90 01 0F 8E 7D FC FF FF 
+
+==== BB 1037 (0x3A96DFBA) in 19556B, out 107564B, BBs exec'd 0 ====
+	0x3A96DFBA:  movl %esi,-116(%ebp)
+	0x3A96DFBD:  movl -112(%ebp),%ecx
+	0x3A96DFC0:  leal (%ecx,%ecx,2), %eax
+	0x3A96DFC3:  leal -12(%ebp,%eax,4), %eax
+	0x3A96DFC7:  leal -40(%eax), %edx
+	0x3A96DFCA:  movl -40(%eax),%ecx
+	0x3A96DFCD:  movl 4(%edx),%edi
+	0x3A96DFD0:  movl %ecx,%eax
+	0x3A96DFD2:  addl %edi,%eax
+	0x3A96DFD4:  movl %eax,-124(%ebp)
+	0x3A96DFD7:  movl (%esi),%eax
+	0x3A96DFD9:  movl %ecx,-120(%ebp)
+	0x3A96DFDC:  movl %eax,-128(%ebp)
+	0x3A96DFDF:  movl 8(%edx),%eax
+	0x3A96DFE2:  testl %eax,%eax
+	0x3A96DFE4:  jz-8 0x3A96E035
+. 1037 3A96DFBA 44
+. 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F 
+
+==== BB 1038 (0x3A96DFE6) in 19600B, out 107767B, BBs exec'd 0 ====
+	0x3A96DFE6:  movl -124(%ebp),%edx
+	0x3A96DFE9:  cmpl %edx,%ecx
+	0x3A96DFEB:  jb-8 0x3A96E015
+. 1038 3A96DFE6 7
+. 8B 55 84 39 D1 72 28 
+
+==== BB 1039 (0x3A96E015) in 19607B, out 107839B, BBs exec'd 0 ====
+	0x3A96E015:  movl -120(%ebp),%edi
+	0x3A96E018:  movl -128(%ebp),%ecx
+	0x3A96E01B:  movl (%edi),%eax
+	0x3A96E01D:  movzbl 4(%edi),%edx
+	0x3A96E021:  addl %eax,%ecx
+	0x3A96E023:  cmpl $0x7, %edx
+	0x3A96E026:  jz-8 0x3A96DFF2
+. 1039 3A96E015 19
+. 8B 7D 88 8B 4D 80 8B 07 0F B6 57 04 01 C1 83 FA 07 74 CA 
+
+==== BB 1040 (0x3A96DFF2) in 19626B, out 107951B, BBs exec'd 0 ====
+	0x3A96DFF2:  movl 0x1F4(%esi),%edx
+	0x3A96DFF8:  testl %edx,%edx
+	0x3A96DFFA:  jnz-32 0x3A96E8CE
+. 1040 3A96DFF2 14
+. 8B 96 F4 01 00 00 85 D2 0F 85 CE 08 00 00 
+
+==== BB 1041 (0x3A96E000) in 19640B, out 108026B, BBs exec'd 0 ====
+	0x3A96E000:  movl -128(%ebp),%eax
+	0x3A96E003:  addl %eax,(%ecx)
+	0x3A96E005:  addl $0x8, -120(%ebp)
+	0x3A96E009:  movl -124(%ebp),%edx
+	0x3A96E00C:  cmpl %edx,-120(%ebp)
+	0x3A96E00F:  jnb-32 0x3A96E330
+. 1041 3A96E000 21
+. 8B 45 80 01 01 83 45 88 08 8B 55 84 39 55 88 0F 83 1B 03 00 00 
+
+==== BB 1042 (0x3A96E33D) in 19661B, out 108145B, BBs exec'd 0 ====
+	0x3A96E33D:  movl $0x0, -76(%ebp)
+	0x3A96E344:  movl 52(%esi),%eax
+	0x3A96E347:  movl $0x0, -68(%ebp)
+	0x3A96E34E:  movl $0x0, -80(%ebp)
+	0x3A96E355:  testl %eax,%eax
+	0x3A96E357:  movl $0x0, -84(%ebp)
+	0x3A96E35E:  jz-8 0x3A96E36F
+. 1042 3A96E33D 35
+. C7 45 B4 00 00 00 00 8B 46 34 C7 45 BC 00 00 00 00 C7 45 B0 00 00 00 00 85 C0 C7 45 AC 00 00 00 00 74 0F 
+
+==== BB 1043 (0x3A96E36F) in 19696B, out 108280B, BBs exec'd 0 ====
+	0x3A96E36F:  movl 104(%esi),%eax
+	0x3A96E372:  testl %eax,%eax
+	0x3A96E374:  jz-8 0x3A96E380
+. 1043 3A96E36F 7
+. 8B 46 68 85 C0 74 0A 
+
+==== BB 1044 (0x3A96E376) in 19703B, out 108352B, BBs exec'd 0 ====
+	0x3A96E376:  cmpl $0x7, 4(%eax)
+	0x3A96E37A:  jz-32 0x3A96E5F1
+. 1044 3A96E376 10
+. 83 78 04 07 0F 84 71 02 00 00 
+
+==== BB 1045 (0x3A96E380) in 19713B, out 108415B, BBs exec'd 0 ====
+	0x3A96E380:  xorl %eax, %eax
+	0x3A96E382:  leal 0xFFFFF9E4(%ebx), %edi
+	0x3A96E388:  movl %eax,0xFFFFFF54(%ebp)
+	0x3A96E38E:  movl %edi,0xFFFFFF14(%ebp)
+	0x3A96E394:  movl %esi,0xFFFFFF50(%ebp)
+	0x3A96E39A:  movl 0xFFFFFF54(%ebp),%edx
+	0x3A96E3A0:  xorl %edi, %edi
+	0x3A96E3A2:  movl (%esi),%ecx
+	0x3A96E3A4:  leal (%edx,%edx,2), %eax
+	0x3A96E3A7:  leal -12(%ebp,%eax,4), %eax
+	0x3A96E3AB:  movl %ecx,0xFFFFFF40(%ebp)
+	0x3A96E3B1:  movl -72(%eax),%edx
+	0x3A96E3B4:  movl -68(%eax),%eax
+	0x3A96E3B7:  movl %eax,0xFFFFFF4C(%ebp)
+	0x3A96E3BD:  addl %edx,%eax
+	0x3A96E3BF:  movl %eax,0xFFFFFF44(%ebp)
+	0x3A96E3C5:  movl 48(%esi),%eax
+	0x3A96E3C8:  movl 4(%eax),%eax
+	0x3A96E3CB:  movl %eax,0xFFFFFF3C(%ebp)
+	0x3A96E3D1:  movl 0xB8(%esi),%eax
+	0x3A96E3D7:  testl %eax,%eax
+	0x3A96E3D9:  jz-8 0x3A96E3DE
+. 1045 3A96E380 91
+. 31 C0 8D BB E4 F9 FF FF 89 85 54 FF FF FF 89 BD 14 FF FF FF 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03 
+
+==== BB 1046 (0x3A96E3DE) in 19804B, out 108716B, BBs exec'd 0 ====
+	0x3A96E3DE:  movl 0xFFFFFF4C(%ebp),%eax
+	0x3A96E3E4:  movl %edx,%ecx
+	0x3A96E3E6:  movl $0xAAAAAAAB,%edx
+	0x3A96E3EB:  mull %edx
+	0x3A96E3ED:  shrl $0x3, %edx
+	0x3A96E3F0:  cmpl %edi,%edx
+	0x3A96E3F2:  jbe-8 0x3A96E3F6
+. 1046 3A96E3DE 22
+. 8B 85 4C FF FF FF 89 D1 BA AB AA AA AA F7 E2 C1 EA 03 39 FA 76 02 
+
+==== BB 1047 (0x3A96E3F6) in 19826B, out 108844B, BBs exec'd 0 ====
+	0x3A96E3F6:  cmpl 0xFFFFFF14(%ebp),%esi
+	0x3A96E3FC:  leal (%edx,%edx,2), %eax
+	0x3A96E3FF:  leal (%ecx,%eax,4), %eax
+	0x3A96E402:  movl %eax,0xFFFFFF48(%ebp)
+	0x3A96E408:  jz-8 0x3A96E452
+. 1047 3A96E3F6 20
+. 3B B5 14 FF FF FF 8D 04 52 8D 04 81 89 85 48 FF FF FF 74 48 
+
+==== BB 1048 (0x3A96E40A) in 19846B, out 108956B, BBs exec'd 0 ====
+	0x3A96E40A:  movl 0xFFFFFF40(%ebp),%eax
+	0x3A96E410:  testl %eax,%eax
+	0x3A96E412:  jnz-8 0x3A96E41E
+. 1048 3A96E40A 10
+. 8B 85 40 FF FF FF 85 C0 75 0A 
+
+==== BB 1049 (0x3A96E41E) in 19856B, out 109031B, BBs exec'd 0 ====
+	0x3A96E41E:  cmpl 0xFFFFFF48(%ebp),%ecx
+	0x3A96E424:  jnb-8 0x3A96E452
+. 1049 3A96E41E 8
+. 3B 8D 48 FF FF FF 73 2C 
+
+==== BB 1050 (0x3A96E452) in 19864B, out 109099B, BBs exec'd 0 ====
+	0x3A96E452:  movl 0xDC(%esi),%eax
+	0x3A96E458:  testl %eax,%eax
+	0x3A96E45A:  jz-32 0x3A96E750
+. 1050 3A96E452 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 F0 02 00 00 
+
+==== BB 1051 (0x3A96E460) in 19878B, out 109174B, BBs exec'd 0 ====
+	0x3A96E460:  movl 0xFFFFFF44(%ebp),%edx
+	0x3A96E466:  movl 4(%eax),%eax
+	0x3A96E469:  cmpl %edx,0xFFFFFF48(%ebp)
+	0x3A96E46F:  movl %eax,0xFFFFFF38(%ebp)
+	0x3A96E475:  jnb-32 0x3A96E2B0
+. 1051 3A96E460 27
+. 8B 95 44 FF FF FF 8B 40 04 39 95 48 FF FF FF 89 85 38 FF FF FF 0F 83 35 FE FF FF 
+
+==== BB 1052 (0x3A96E2B0) in 19905B, out 109284B, BBs exec'd 0 ====
+	0x3A96E2B0:  incl 0xFFFFFF54(%ebp)
+	0x3A96E2B6:  cmpl $0x1, 0xFFFFFF54(%ebp)
+	0x3A96E2BD:  jle-32 0x3A96E394
+. 1052 3A96E2B0 19
+. FF 85 54 FF FF FF 83 BD 54 FF FF FF 01 0F 8E D1 00 00 00 
+
+==== BB 1053 (0x3A96E394) in 19924B, out 109367B, BBs exec'd 0 ====
+	0x3A96E394:  movl %esi,0xFFFFFF50(%ebp)
+	0x3A96E39A:  movl 0xFFFFFF54(%ebp),%edx
+	0x3A96E3A0:  xorl %edi, %edi
+	0x3A96E3A2:  movl (%esi),%ecx
+	0x3A96E3A4:  leal (%edx,%edx,2), %eax
+	0x3A96E3A7:  leal -12(%ebp,%eax,4), %eax
+	0x3A96E3AB:  movl %ecx,0xFFFFFF40(%ebp)
+	0x3A96E3B1:  movl -72(%eax),%edx
+	0x3A96E3B4:  movl -68(%eax),%eax
+	0x3A96E3B7:  movl %eax,0xFFFFFF4C(%ebp)
+	0x3A96E3BD:  addl %edx,%eax
+	0x3A96E3BF:  movl %eax,0xFFFFFF44(%ebp)
+	0x3A96E3C5:  movl 48(%esi),%eax
+	0x3A96E3C8:  movl 4(%eax),%eax
+	0x3A96E3CB:  movl %eax,0xFFFFFF3C(%ebp)
+	0x3A96E3D1:  movl 0xB8(%esi),%eax
+	0x3A96E3D7:  testl %eax,%eax
+	0x3A96E3D9:  jz-8 0x3A96E3DE
+. 1053 3A96E394 71
+. 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03 
+
+==== BB 1054 (0x3A96E2C3) in 19995B, out 109618B, BBs exec'd 0 ====
+	0x3A96E2C3:  movl 20(%ebp),%eax
+	0x3A96E2C6:  testl %eax,%eax
+	0x3A96E2C8:  jnz-32 0x3A96EDB7
+. 1054 3A96E2C3 11
+. 8B 45 14 85 C0 0F 85 E9 0A 00 00 
+
+==== BB 1055 (0x3A96E2CE) in 20006B, out 109690B, BBs exec'd 0 ====
+	0x3A96E2CE:  orb $0x4, 0x174(%esi)
+	0x3A96E2D5:  movl -104(%ebp),%ecx
+	0x3A96E2D8:  testl %ecx,%ecx
+	0x3A96E2DA:  jnz-32 0x3A96ED81
+. 1055 3A96E2CE 18
+. 80 8E 74 01 00 00 04 8B 4D 98 85 C9 0F 85 A1 0A 00 00 
+
+==== BB 1056 (0x3A96E2E0) in 20024B, out 109787B, BBs exec'd 0 ====
+	0x3A96E2E0:  movl 8(%ebp),%eax
+	0x3A96E2E3:  movl 0x210(%eax),%edx
+	0x3A96E2E9:  testl %edx,%edx
+	0x3A96E2EB:  jz-8 0x3A96E300
+. 1056 3A96E2E0 13
+. 8B 45 08 8B 90 10 02 00 00 85 D2 74 13 
+
+==== BB 1057 (0x3A96E300) in 20037B, out 109875B, BBs exec'd 0 ====
+	0x3A96E300:  leal -12(%ebp), %esp
+	0x3A96E303:  popl %ebx
+	0x3A96E304:  popl %esi
+	0x3A96E305:  popl %edi
+	0x3A96E306:  popl %ebp
+	0x3A96E307:  ret
+. 1057 3A96E300 8
+. 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 1058 (0x3A96E05C) in 20045B, out 109977B, BBs exec'd 0 ====
+	0x3A96E05C:  movl %edi,%eax
+	0x3A96E05E:  movl %ecx,%edx
+	0x3A96E060:  shrl $0x3, %eax
+	0x3A96E063:  cmpl 0xFFFFFF78(%ebp),%eax
+	0x3A96E069:  jbe-8 0x3A96E071
+. 1058 3A96E05C 15
+. 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06 
+
+==== BB 1059 (0x3A96E0B0) in 20060B, out 110069B, BBs exec'd 0 ====
+	0x3A96E0B0:  movl 0xDC(%esi),%eax
+	0x3A96E0B6:  testl %eax,%eax
+	0x3A96E0B8:  jz-32 0x3A96E631
+. 1059 3A96E0B0 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00 
+
+==== BB 1060 (0x3A96E414) in 20074B, out 110144B, BBs exec'd 0 ====
+	0x3A96E414:  movl 0x114(%esi),%eax
+	0x3A96E41A:  testl %eax,%eax
+	0x3A96E41C:  jnz-8 0x3A96E452
+. 1060 3A96E414 10
+. 8B 86 14 01 00 00 85 C0 75 34 
+
+==== BB 1061 (0x3A968122) in 20084B, out 110219B, BBs exec'd 0 ====
+	0x3A968122:  call 0x3A972730
+. 1061 3A968122 5
+. E8 09 A6 00 00 
+
+==== BB 1062 _dl_sysdep_start_cleanup(0x3A972730) in 20089B, out 110264B, BBs exec'd 0 ====
+	0x3A972730:  pushl %ebp
+	0x3A972731:  movl %esp,%ebp
+	0x3A972733:  popl %ebp
+	0x3A972734:  ret
+. 1062 3A972730 5
+. 55 89 E5 5D C3 
+
+==== BB 1063 (0x3A968127) in 20094B, out 110333B, BBs exec'd 0 ====
+	0x3A968127:  movl 0xFFFFF9D0(%ebx),%eax
+	0x3A96812D:  testl %eax,%eax
+	0x3A96812F:  jnz-8 0x3A968193
+. 1063 3A968127 10
+. 8B 83 D0 F9 FF FF 85 C0 75 62 
+
+==== BB 1064 (0x3A968131) in 20104B, out 110408B, BBs exec'd 0 ====
+	0x3A968131:  cmpl $0x1, 0xFFFFFB54(%ebx)
+	0x3A968138:  jbe-32 0x3A9673C4
+. 1064 3A968131 13
+. 83 BB 54 FB FF FF 01 0F 86 86 F2 FF FF 
+
+==== BB 1065 (0x3A96813E) in 20117B, out 110474B, BBs exec'd 0 ====
+	0x3A96813E:  xorl %eax, %eax
+	0x3A968140:  movl %eax,12(%esp,,)
+	0x3A968144:  xorl %eax, %eax
+	0x3A968146:  movl %eax,8(%esp,,)
+	0x3A96814A:  movl 0xFFFFF994(%ebx),%eax
+	0x3A968150:  movl 0x1B0(%eax),%eax
+	0x3A968156:  movl %edi,(%esp,,)
+	0x3A968159:  movl %eax,4(%esp,,)
+	0x3A96815D:  call 0x3A96DEB0
+. 1065 3A96813E 36
+. 31 C0 89 44 24 0C 31 C0 89 44 24 08 8B 83 94 F9 FF FF 8B 80 B0 01 00 00 89 3C 24 89 44 24 04 E8 4E 5D 00 00 
+
+==== BB 1066 (0x3A96E233) in 20153B, out 110617B, BBs exec'd 0 ====
+	0x3A96E233:  movl -48(%ebp),%edx
+	0x3A96E236:  movl -52(%ebp),%eax
+	0x3A96E239:  addl %edx,%eax
+	0x3A96E23B:  cmpl %ecx,%eax
+	0x3A96E23D:  jz-8 0x3A96E256
+. 1066 3A96E233 12
+. 8B 55 D0 8B 45 CC 01 D0 39 C8 74 17 
+
+==== BB 1067 (0x3A96E256) in 20165B, out 110706B, BBs exec'd 0 ====
+	0x3A96E256:  movl 32(%esi),%eax
+	0x3A96E259:  movl 4(%eax),%ecx
+	0x3A96E25C:  addl %ecx,%edx
+	0x3A96E25E:  movl %edx,-48(%ebp)
+	0x3A96E261:  jmp 0x3A96DFB3
+. 1067 3A96E256 16
+. 8B 46 20 8B 48 04 01 CA 89 55 D0 E9 4D FD FF FF 
+
+==== BB 1068 (0x3A96DFED) in 20181B, out 110792B, BBs exec'd 0 ====
+	0x3A96DFED:  jmp 0x3A96E330
+. 1068 3A96DFED 5
+. E9 3E 03 00 00 
+
+==== BB 1069 (0x3A968162) in 20186B, out 110820B, BBs exec'd 0 ====
+	0x3A968162:  jmp 0x3A9673C4
+. 1069 3A968162 5
+. E9 5D F2 FF FF 
+
+==== BB 1070 (0x3A9673C4) in 20191B, out 110848B, BBs exec'd 0 ====
+	0x3A9673C4:  movl 0xFFFFFE78(%ebp),%edx
+	0x3A9673CA:  movl $0x1, 12(%edx)
+	0x3A9673D1:  call 0x3A970B70
+. 1070 3A9673C4 18
+. 8B 95 78 FE FF FF C7 42 0C 01 00 00 00 E8 9A 97 00 00 
+
+==== BB 1071 _dl_debug_state(0x3A970B70) in 20209B, out 110927B, BBs exec'd 0 ====
+	0x3A970B70:  pushl %ebp
+	0x3A970B71:  movl %esp,%ebp
+	0x3A970B73:  popl %ebp
+	0x3A970B74:  ret
+. 1071 3A970B70 5
+. 55 89 E5 5D C3 
+
+==== BB 1072 (0x3A9673D6) in 20214B, out 110996B, BBs exec'd 0 ====
+	0x3A9673D6:  call 0x3A96B6D0
+. 1072 3A9673D6 5
+. E8 F5 42 00 00 
+
+==== BB 1073 _dl_unload_cache(0x3A96B6D0) in 20219B, out 111041B, BBs exec'd 0 ====
+	0x3A96B6D0:  pushl %ebp
+	0x3A96B6D1:  movl %esp,%ebp
+	0x3A96B6D3:  pushl %ebx
+	0x3A96B6D4:  subl $0x8, %esp
+	0x3A96B6D7:  call 0x3A97592B
+. 1073 3A96B6D0 12
+. 55 89 E5 53 83 EC 08 E8 4F A2 00 00 
+
+==== BB 1074 (0x3A96B6DC) in 20231B, out 111142B, BBs exec'd 0 ====
+	0x3A96B6DC:  addl $0xCF90, %ebx
+	0x3A96B6E2:  movl 0xDC(%ebx),%ecx
+	0x3A96B6E8:  testl %ecx,%ecx
+	0x3A96B6EA:  setnz %al
+	0x3A96B6ED:  cmpl $0xFFFFFFFF, %ecx
+	0x3A96B6F0:  setnz %dl
+	0x3A96B6F3:  andl %edx,%eax
+	0x3A96B6F5:  testb $0x1, %al
+	0x3A96B6F7:  jnz-8 0x3A96B700
+. 1074 3A96B6DC 29
+. 81 C3 90 CF 00 00 8B 8B DC 00 00 00 85 C9 0F 95 C0 83 F9 FF 0F 95 C2 21 D0 A8 01 75 07 
+
+==== BB 1075 (0x3A96B700) in 20260B, out 111299B, BBs exec'd 0 ====
+	0x3A96B700:  movl %ecx,(%esp,,)
+	0x3A96B703:  movl 0xE4(%ebx),%eax
+	0x3A96B709:  movl %eax,4(%esp,,)
+	0x3A96B70D:  call 0x3A974C90
+. 1075 3A96B700 18
+. 89 0C 24 8B 83 E4 00 00 00 89 44 24 04 E8 7E 95 00 00 
+
+==== BB 1076 munmap(0x3A974C90) in 20278B, out 111383B, BBs exec'd 0 ====
+	0x3A974C90:  movl %ebx,%edx
+	0x3A974C92:  movl 8(%esp,,),%ecx
+	0x3A974C96:  movl 4(%esp,,),%ebx
+	0x3A974C9A:  movl $0x5B,%eax
+	0x3A974C9F:  int $0x80
+. 1076 3A974C90 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80 
+
+==== BB 1077 (0x3A974CA1) in 20295B, out 111460B, BBs exec'd 0 ====
+	0x3A974CA1:  movl %edx,%ebx
+	0x3A974CA3:  cmpl $0xFFFFF001, %eax
+	0x3A974CA8:  jnb-8 0x3A974CAB
+. 1077 3A974CA1 9
+. 89 D3 3D 01 F0 FF FF 73 01 
+
+==== BB 1078 (0x3A974CAA) in 20304B, out 111530B, BBs exec'd 0 ====
+	0x3A974CAA:  ret
+. 1078 3A974CAA 1
+. C3 
+
+==== BB 1079 (0x3A96B712) in 20305B, out 111560B, BBs exec'd 0 ====
+	0x3A96B712:  xorl %eax, %eax
+	0x3A96B714:  movl %eax,0xDC(%ebx)
+	0x3A96B71A:  addl $0x8, %esp
+	0x3A96B71D:  popl %ebx
+	0x3A96B71E:  popl %ebp
+	0x3A96B71F:  ret
+. 1079 3A96B712 14
+. 31 C0 89 83 DC 00 00 00 83 C4 08 5B 5D C3 
+
+==== BB 1080 (0x3A9673DB) in 20319B, out 111664B, BBs exec'd 0 ====
+	0x3A9673DB:  leal -12(%ebp), %esp
+	0x3A9673DE:  popl %ebx
+	0x3A9673DF:  popl %esi
+	0x3A9673E0:  popl %edi
+	0x3A9673E1:  popl %ebp
+	0x3A9673E2:  ret
+. 1080 3A9673DB 8
+. 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 1081 (0x3A9730A0) in 20327B, out 111766B, BBs exec'd 0 ====
+	0x3A9730A0:  movl 0xFFFFFE20(%ebp),%eax
+	0x3A9730A6:  addl $0x204, %esp
+	0x3A9730AC:  popl %ebx
+	0x3A9730AD:  popl %esi
+	0x3A9730AE:  popl %edi
+	0x3A9730AF:  popl %ebp
+	0x3A9730B0:  ret
+. 1081 3A9730A0 17
+. 8B 85 20 FE FF FF 81 C4 04 02 00 00 5B 5E 5F 5D C3 
+
+==== BB 1082 (0x3A965ECA) in 20344B, out 111894B, BBs exec'd 0 ====
+	0x3A965ECA:  cmpb $0x0, 0xFFFFFC14(%ebx)
+	0x3A965ED1:  movl %eax,%edi
+	0x3A965ED3:  js-8 0x3A965F30
+. 1082 3A965ECA 11
+. 80 BB 14 FC FF FF 00 89 C7 78 5B 
+
+==== BB 1083 (0x3A965ED5) in 20355B, out 111970B, BBs exec'd 0 ====
+	0x3A965ED5:  addl $0x54, %esp
+	0x3A965ED8:  movl %edi,%eax
+	0x3A965EDA:  popl %ebx
+	0x3A965EDB:  popl %esi
+	0x3A965EDC:  popl %edi
+	0x3A965EDD:  popl %ebp
+	0x3A965EDE:  ret
+. 1083 3A965ED5 10
+. 83 C4 54 89 F8 5B 5E 5F 5D C3 
+
+==== BB 1084 (0x3A965887) in 20365B, out 112088B, BBs exec'd 0 ====
+	0x3A965887:  movl %eax,%edi
+	0x3A965889:  call 0x3A965870
+. 1084 3A965887 7
+. 89 C7 E8 E2 FF FF FF 
+
+==== BB 1085 (0x3A965870) in 20372B, out 112143B, BBs exec'd 0 ====
+	0x3A965870:  movl (%esp,,),%ebx
+	0x3A965873:  ret
+. 1085 3A965870 4
+. 8B 1C 24 C3 
+
+==== BB 1086 (0x3A96588E) in 20376B, out 112183B, BBs exec'd 0 ====
+	0x3A96588E:  addl $0x12DDE, %ebx
+	0x3A965894:  movl 0x208(%ebx),%eax
+	0x3A96589A:  popl %edx
+	0x3A96589B:  leal (%esp,%eax,4), %esp
+	0x3A96589E:  subl %eax,%edx
+	0x3A9658A0:  pushl %edx
+	0x3A9658A1:  movl 0xFFFFF994(%ebx),%eax
+	0x3A9658A7:  leal 8(%esp,%edx,4), %esi
+	0x3A9658AB:  leal 4(%esp,,), %ecx
+	0x3A9658AF:  pushl %esi
+	0x3A9658B0:  call 0x3A970680
+. 1086 3A96588E 39
+. 81 C3 DE 2D 01 00 8B 83 08 02 00 00 5A 8D 24 84 29 C2 52 8B 83 94 F9 FF FF 8D 74 94 08 8D 4C 24 04 56 E8 CB AD 00 00 
+
+==== BB 1087 _dl_init_internal(0x3A970680) in 20415B, out 112357B, BBs exec'd 0 ====
+	0x3A970680:  pushl %ebp
+	0x3A970681:  movl %esp,%ebp
+	0x3A970683:  pushl %edi
+	0x3A970684:  movl %eax,%edi
+	0x3A970686:  pushl %esi
+	0x3A970687:  pushl %ebx
+	0x3A970688:  subl $0x2C, %esp
+	0x3A97068B:  movl 0x98(%eax),%eax
+	0x3A970691:  call 0x3A97592B
+. 1087 3A970680 22
+. 55 89 E5 57 89 C7 56 53 83 EC 2C 8B 80 98 00 00 00 E8 95 52 00 00 
+
+==== BB 1088 (0x3A970696) in 20437B, out 112519B, BBs exec'd 0 ====
+	0x3A970696:  addl $0x7FD6, %ebx
+	0x3A97069C:  movl %edx,-16(%ebp)
+	0x3A97069F:  movl 0x9C(%edi),%esi
+	0x3A9706A5:  movl %eax,-24(%ebp)
+	0x3A9706A8:  movl 0xFFFFF9CC(%ebx),%eax
+	0x3A9706AE:  movl %ecx,-20(%ebp)
+	0x3A9706B1:  testl %eax,%eax
+	0x3A9706B3:  jnz-32 0x3A97074A
+. 1088 3A970696 35
+. 81 C3 D6 7F 00 00 89 55 F0 8B B7 9C 00 00 00 89 45 E8 8B 83 CC F9 FF FF 89 4D EC 85 C0 0F 85 91 00 00 00 
+
+==== BB 1089 (0x3A97074A) in 20472B, out 112668B, BBs exec'd 0 ====
+	0x3A97074A:  movl 8(%ebp),%edx
+	0x3A97074D:  movl %edx,(%esp,,)
+	0x3A970750:  movl -16(%ebp),%edx
+	0x3A970753:  call 0x3A970560
+. 1089 3A97074A 14
+. 8B 55 08 89 14 24 8B 55 F0 E8 08 FE FF FF 
+
+==== BB 1090 call_init(0x3A970560) in 20486B, out 112746B, BBs exec'd 0 ====
+	0x3A970560:  pushl %ebp
+	0x3A970561:  movl %esp,%ebp
+	0x3A970563:  subl $0x28, %esp
+	0x3A970566:  movl %ebx,-12(%ebp)
+	0x3A970569:  movl %esi,-8(%ebp)
+	0x3A97056C:  movl %eax,%esi
+	0x3A97056E:  movl %edi,-4(%ebp)
+	0x3A970571:  movzbl 0x174(%eax),%eax
+	0x3A970578:  call 0x3A97592B
+. 1090 3A970560 29
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 89 C6 89 7D FC 0F B6 80 74 01 00 00 E8 AE 53 00 00 
+
+==== BB 1091 (0x3A97057D) in 20515B, out 112899B, BBs exec'd 0 ====
+	0x3A97057D:  addl $0x80EF, %ebx
+	0x3A970583:  movl %edx,-16(%ebp)
+	0x3A970586:  movl %ecx,-20(%ebp)
+	0x3A970589:  testb $0x8, %al
+	0x3A97058B:  jnz-8 0x3A9705C0
+. 1091 3A97057D 16
+. 81 C3 EF 80 00 00 89 55 F0 89 4D EC A8 08 75 33 
+
+==== BB 1092 (0x3A97058D) in 20531B, out 113006B, BBs exec'd 0 ====
+	0x3A97058D:  movb %al,%dl
+	0x3A97058F:  movl 4(%esi),%eax
+	0x3A970592:  orb $0x8, %dl
+	0x3A970595:  movb %dl,0x174(%esi)
+	0x3A97059B:  movzbl (%eax),%ecx
+	0x3A97059E:  testb %cl,%cl
+	0x3A9705A0:  jz-32 0x3A97063E
+. 1092 3A97058D 25
+. 88 C2 8B 46 04 80 CA 08 88 96 74 01 00 00 0F B6 08 84 C9 0F 84 98 00 00 00 
+
+==== BB 1093 (0x3A9705A6) in 20556B, out 113148B, BBs exec'd 0 ====
+	0x3A9705A6:  movl 72(%esi),%edx
+	0x3A9705A9:  testl %edx,%edx
+	0x3A9705AB:  jnz-8 0x3A9705D0
+. 1093 3A9705A6 7
+. 8B 56 48 85 D2 75 23 
+
+==== BB 1094 (0x3A9705D0) in 20563B, out 113220B, BBs exec'd 0 ====
+	0x3A9705D0:  testb $0x2, 0xFFFFFC14(%ebx)
+	0x3A9705D7:  jnz-8 0x3A970655
+. 1094 3A9705D0 9
+. F6 83 14 FC FF FF 02 75 7C 
+
+==== BB 1095 (0x3A9705D9) in 20572B, out 113290B, BBs exec'd 0 ====
+	0x3A9705D9:  testl %edx,%edx
+	0x3A9705DB:  jnz-8 0x3A97061F
+. 1095 3A9705D9 4
+. 85 D2 75 42 
+
+==== BB 1096 (0x3A97061F) in 20576B, out 113349B, BBs exec'd 0 ====
+	0x3A97061F:  movl 4(%edx),%eax
+	0x3A970622:  movl 8(%ebp),%edx
+	0x3A970625:  movl (%esi),%edi
+	0x3A970627:  movl %edx,8(%esp,,)
+	0x3A97062B:  movl -20(%ebp),%edx
+	0x3A97062E:  addl %edi,%eax
+	0x3A970630:  movl %edx,4(%esp,,)
+	0x3A970634:  movl -16(%ebp),%edx
+	0x3A970637:  movl %edx,(%esp,,)
+	0x3A97063A:  call*l %eax
+. 1096 3A97061F 29
+. 8B 42 04 8B 55 08 8B 3E 89 54 24 08 8B 55 EC 01 F8 89 54 24 04 8B 55 F0 89 14 24 FF D0 
+
+==== BB 1097 (0x3A97C92C) in 20605B, out 113486B, BBs exec'd 0 ====
+	0x3A97C92C:  pushl %ebp
+	0x3A97C92D:  movl %esp,%ebp
+	0x3A97C92F:  subl $0x8, %esp
+	0x3A97C932:  call 0x3A97C9C0
+. 1097 3A97C92C 11
+. 55 89 E5 83 EC 08 E8 89 00 00 00 
+
+==== BB 1098 (0x3A97C9C0) in 20616B, out 113571B, BBs exec'd 0 ====
+	0x3A97C9C0:  pushl %ebp
+	0x3A97C9C1:  movl %esp,%ebp
+	0x3A97C9C3:  pushl %ebx
+	0x3A97C9C4:  call 0x3A97C9C9 ; popl %ebx
+	0x3A97C9CA:  addl $0x1417, %ebx
+	0x3A97C9D0:  pushl %edx
+	0x3A97C9D1:  movl 0x2C(%ebx),%eax
+	0x3A97C9D7:  testl %eax,%eax
+	0x3A97C9D9:  jz-8 0x3A97C9DD
+. 1098 3A97C9C0 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 17 14 00 00 52 8B 83 2C 00 00 00 85 C0 74 02 
+
+==== BB 1099 (0x3A97C9DD) in 20643B, out 113723B, BBs exec'd 0 ====
+	0x3A97C9DD:  popl %eax
+	0x3A97C9DE:  popl %ebx
+	0x3A97C9DF:  leave	0x3A97C9E0:  ret
+. 1099 3A97C9DD 4
+. 58 5B C9 C3 
+
+==== BB 1100 (0x3A97C937) in 20647B, out 113807B, BBs exec'd 0 ====
+	0x3A97C937:  call 0x3A97CA60
+. 1100 3A97C937 5
+. E8 24 01 00 00 
+
+==== BB 1101 (0x3A97CA60) in 20652B, out 113852B, BBs exec'd 0 ====
+	0x3A97CA60:  pushl %ebp
+	0x3A97CA61:  movl %esp,%ebp
+	0x3A97CA63:  pushl %ebx
+	0x3A97CA64:  call 0x3A97CA69 ; popl %ebx
+	0x3A97CA6A:  addl $0x1377, %ebx
+	0x3A97CA70:  pushl %eax
+	0x3A97CA71:  movl 0xFFFFFFFC(%ebx),%eax
+	0x3A97CA77:  testl %eax,%eax
+	0x3A97CA79:  jz-8 0x3A97CA85
+. 1101 3A97CA60 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 77 13 00 00 50 8B 83 FC FF FF FF 85 C0 74 0A 
+
+==== BB 1102 (0x3A97CA85) in 20679B, out 114004B, BBs exec'd 0 ====
+	0x3A97CA85:  movl -4(%ebp),%ebx
+	0x3A97CA88:  leave	0x3A97CA89:  ret
+. 1102 3A97CA85 5
+. 8B 5D FC C9 C3 
+
+==== BB 1103 (0x3A97C93C) in 20684B, out 114066B, BBs exec'd 0 ====
+	0x3A97C93C:  call 0x3A97CCA0
+. 1103 3A97C93C 5
+. E8 5F 03 00 00 
+
+==== BB 1104 (0x3A97CCA0) in 20689B, out 114111B, BBs exec'd 0 ====
+	0x3A97CCA0:  pushl %ebp
+	0x3A97CCA1:  movl %esp,%ebp
+	0x3A97CCA3:  pushl %esi
+	0x3A97CCA4:  pushl %ebx
+	0x3A97CCA5:  call 0x3A97CCAA ; popl %ebx
+	0x3A97CCAB:  addl $0x1136, %ebx
+	0x3A97CCB1:  leal 0xFFFFFFF0(%ebx), %eax
+	0x3A97CCB7:  leal -4(%eax), %esi
+	0x3A97CCBA:  movl -4(%eax),%eax
+	0x3A97CCBD:  cmpl $0xFFFFFFFF, %eax
+	0x3A97CCC0:  jz-8 0x3A97CCCE
+. 1104 3A97CCA0 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 36 11 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0C 
+
+==== BB 1105 (0x3A97CCCE) in 20723B, out 114278B, BBs exec'd 0 ====
+	0x3A97CCCE:  popl %ebx
+	0x3A97CCCF:  popl %esi
+	0x3A97CCD0:  popl %ebp
+	0x3A97CCD1:  ret
+. 1105 3A97CCCE 4
+. 5B 5E 5D C3 
+
+==== BB 1106 (0x3A97C941) in 20727B, out 114356B, BBs exec'd 0 ====
+	0x3A97C941:  leave	0x3A97C942:  ret
+. 1106 3A97C941 2
+. C9 C3 
+
+==== BB 1107 (0x3A97063C) in 20729B, out 114405B, BBs exec'd 0 ====
+	0x3A97063C:  jmp-8 0x3A9705DD
+. 1107 3A97063C 2
+. EB 9F 
+
+==== BB 1108 (0x3A9705DD) in 20731B, out 114433B, BBs exec'd 0 ====
+	0x3A9705DD:  movl 124(%esi),%edx
+	0x3A9705E0:  testl %edx,%edx
+	0x3A9705E2:  jz-8 0x3A9705C0
+. 1108 3A9705DD 7
+. 8B 56 7C 85 D2 74 DC 
+
+==== BB 1109 (0x3A9705C0) in 20738B, out 114505B, BBs exec'd 0 ====
+	0x3A9705C0:  movl -12(%ebp),%ebx
+	0x3A9705C3:  movl -8(%ebp),%esi
+	0x3A9705C6:  movl -4(%ebp),%edi
+	0x3A9705C9:  movl %ebp,%esp
+	0x3A9705CB:  popl %ebp
+	0x3A9705CC:  ret
+. 1109 3A9705C0 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 1110 (0x3A970758) in 20751B, out 114597B, BBs exec'd 0 ====
+	0x3A970758:  xorl %eax, %eax
+	0x3A97075A:  movl %eax,0xFFFFF9CC(%ebx)
+	0x3A970760:  movl -24(%ebp),%eax
+	0x3A970763:  testl %eax,%eax
+	0x3A970765:  jz-32 0x3A9706C4
+. 1110 3A970758 19
+. 31 C0 89 83 CC F9 FF FF 8B 45 E8 85 C0 0F 84 59 FF FF FF 
+
+==== BB 1111 (0x3A9706C4) in 20770B, out 114696B, BBs exec'd 0 ====
+	0x3A9706C4:  xorl %eax, %eax
+	0x3A9706C6:  call 0x3A970B30
+. 1111 3A9706C4 7
+. 31 C0 E8 65 04 00 00 
+
+==== BB 1112 (0x3A970B65) in 20777B, out 114759B, BBs exec'd 0 ====
+	0x3A970B65:  popl %ebp
+	0x3A970B66:  movl %edx,%eax
+	0x3A970B68:  ret
+. 1112 3A970B65 4
+. 5D 89 D0 C3 
+
+==== BB 1113 (0x3A9706CB) in 20781B, out 114815B, BBs exec'd 0 ====
+	0x3A9706CB:  movl %eax,-28(%ebp)
+	0x3A9706CE:  movl $0x1, 12(%eax)
+	0x3A9706D5:  call 0x3A970B70
+. 1113 3A9706CB 15
+. 89 45 E4 C7 40 0C 01 00 00 00 E8 96 04 00 00 
+
+==== BB 1114 (0x3A9706DA) in 20796B, out 114891B, BBs exec'd 0 ====
+	0x3A9706DA:  movl 0x154(%edi),%esi
+	0x3A9706E0:  movl %esi,%eax
+	0x3A9706E2:  decl %esi
+	0x3A9706E3:  testl %eax,%eax
+	0x3A9706E5:  jnz-8 0x3A970708
+. 1114 3A9706DA 13
+. 8B B7 54 01 00 00 89 F0 4E 85 C0 75 21 
+
+==== BB 1115 (0x3A970708) in 20809B, out 114984B, BBs exec'd 0 ====
+	0x3A970708:  movl 0x1D4(%edi),%eax
+	0x3A97070E:  movl 8(%ebp),%edx
+	0x3A970711:  movl (%eax,%esi,4),%eax
+	0x3A970714:  movl %edx,(%esp,,)
+	0x3A970717:  movl -20(%ebp),%ecx
+	0x3A97071A:  movl -16(%ebp),%edx
+	0x3A97071D:  call 0x3A970560
+. 1115 3A970708 26
+. 8B 87 D4 01 00 00 8B 55 08 8B 04 B0 89 14 24 8B 4D EC 8B 55 F0 E8 3E FE FF FF 
+
+==== BB 1116 (0x3A9705AD) in 20835B, out 115108B, BBs exec'd 0 ====
+	0x3A9705AD:  movl 124(%esi),%edi
+	0x3A9705B0:  testl %edi,%edi
+	0x3A9705B2:  jnz-8 0x3A9705D0
+. 1116 3A9705AD 7
+. 8B 7E 7C 85 FF 75 1C 
+
+==== BB 1117 (0x3A9705B4) in 20842B, out 115180B, BBs exec'd 0 ====
+	0x3A9705B4:  leal 0x0(%esi), %esi
+	0x3A9705BA:  leal 0x0(%edi), %edi
+	0x3A9705C0:  movl -12(%ebp),%ebx
+	0x3A9705C3:  movl -8(%ebp),%esi
+	0x3A9705C6:  movl -4(%ebp),%edi
+	0x3A9705C9:  movl %ebp,%esp
+	0x3A9705CB:  popl %ebp
+	0x3A9705CC:  ret
+. 1117 3A9705B4 25
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 1118 (0x3A970722) in 20867B, out 115290B, BBs exec'd 0 ====
+	0x3A970722:  movl %esi,%eax
+	0x3A970724:  decl %esi
+	0x3A970725:  testl %eax,%eax
+	0x3A970727:  jnz-8 0x3A970708
+. 1118 3A970722 7
+. 89 F0 4E 85 C0 75 DF 
+
+==== BB 1119 _init(0x3A9B37C0) in 20874B, out 115367B, BBs exec'd 0 ====
+	0x3A9B37C0:  pushl %ebp
+	0x3A9B37C1:  xorl %eax, %eax
+	0x3A9B37C3:  movl %esp,%ebp
+	0x3A9B37C5:  subl $0x18, %esp
+	0x3A9B37C8:  movl %ebx,-12(%ebp)
+	0x3A9B37CB:  call 0x3A9B3791
+. 1119 3A9B37C0 16
+. 55 31 C0 89 E5 83 EC 18 89 5D F4 E8 C1 FF FF FF 
+
+==== BB 1120 __i686.get_pc_thunk.bx(0x3A9B3791) in 20890B, out 115479B, BBs exec'd 0 ====
+	0x3A9B3791:  movl (%esp,,),%ebx
+	0x3A9B3794:  ret
+. 1120 3A9B3791 4
+. 8B 1C 24 C3 
+
+==== BB 1121 (0x3A9B37D0) in 20894B, out 115519B, BBs exec'd 0 ====
+	0x3A9B37D0:  addl $0xFC840, %ebx
+	0x3A9B37D6:  movl %esi,-8(%ebp)
+	0x3A9B37D9:  movl 0xA4(%ebx),%edx
+	0x3A9B37DF:  movl 16(%ebp),%esi
+	0x3A9B37E2:  movl %edi,-4(%ebp)
+	0x3A9B37E5:  testl %edx,%edx
+	0x3A9B37E7:  movl 12(%ebp),%edi
+	0x3A9B37EA:  jz-8 0x3A9B3800
+. 1121 3A9B37D0 28
+. 81 C3 40 C8 0F 00 89 75 F8 8B 93 A4 00 00 00 8B 75 10 89 7D FC 85 D2 8B 7D 0C 74 14 
+
+==== BB 1122 (0x3A9B37EC) in 20922B, out 115662B, BBs exec'd 0 ====
+	0x3A9B37EC:  movl (%edx),%edx
+	0x3A9B37EE:  testl %edx,%edx
+	0x3A9B37F0:  jnz-8 0x3A9B3800
+. 1122 3A9B37EC 6
+. 8B 12 85 D2 75 0E 
+
+==== BB 1123 (0x3A9B3800) in 20928B, out 115731B, BBs exec'd 0 ====
+	0x3A9B3800:  movl %eax,0xFFFF9010(%ebx)
+	0x3A9B3806:  testl %eax,%eax
+	0x3A9B3808:  jnz-8 0x3A9B3830
+. 1123 3A9B3800 10
+. 89 83 10 90 FF FF 85 C0 75 26 
+
+==== BB 1124 (0x3A9B380A) in 20938B, out 115806B, BBs exec'd 0 ====
+	0x3A9B380A:  movl 0x180(%ebx),%eax
+	0x3A9B3810:  movzwl (%eax),%edx
+	0x3A9B3813:  movl 0x168(%ebx),%eax
+	0x3A9B3819:  cmpw 56(%eax),%dx
+	0x3A9B381D:  jz-8 0x3A9B3830
+. 1124 3A9B380A 21
+. 8B 83 80 01 00 00 0F B7 10 8B 83 68 01 00 00 66 3B 50 38 74 11 
+
+==== BB 1125 (0x3A9B3830) in 20959B, out 115912B, BBs exec'd 0 ====
+	0x3A9B3830:  movl %edi,0x28D8(%ebx)
+	0x3A9B3836:  movl 8(%ebp),%eax
+	0x3A9B3839:  movl %eax,0x28D4(%ebx)
+	0x3A9B383F:  movl 0x13C(%ebx),%eax
+	0x3A9B3845:  movl %esi,(%eax)
+	0x3A9B3847:  movl %esi,8(%esp,,)
+	0x3A9B384B:  movl %edi,4(%esp,,)
+	0x3A9B384F:  movl 8(%ebp),%eax
+	0x3A9B3852:  movl %eax,(%esp,,)
+	0x3A9B3855:  call 0x3AA57E00
+. 1125 3A9B3830 42
+. 89 BB D8 28 00 00 8B 45 08 89 83 D4 28 00 00 8B 83 3C 01 00 00 89 30 89 74 24 08 89 7C 24 04 8B 45 08 89 04 24 E8 A6 45 0A 00 
+
+==== BB 1126 __init_misc(0x3AA57E00) in 21001B, out 116065B, BBs exec'd 0 ====
+	0x3AA57E00:  pushl %ebp
+	0x3AA57E01:  movl %esp,%ebp
+	0x3AA57E03:  subl $0x14, %esp
+	0x3AA57E06:  movl %edi,-4(%ebp)
+	0x3AA57E09:  movl 12(%ebp),%edi
+	0x3AA57E0C:  movl %ebx,-12(%ebp)
+	0x3AA57E0F:  call 0x3A9B3791
+. 1126 3AA57E00 20
+. 55 89 E5 83 EC 14 89 7D FC 8B 7D 0C 89 5D F4 E8 7D B9 F5 FF 
+
+==== BB 1127 (0x3AA57E14) in 21021B, out 116189B, BBs exec'd 0 ====
+	0x3AA57E14:  addl $0x581FC, %ebx
+	0x3AA57E1A:  movl %esi,-8(%ebp)
+	0x3AA57E1D:  testl %edi,%edi
+	0x3AA57E1F:  jz-8 0x3AA57E51
+. 1127 3AA57E14 13
+. 81 C3 FC 81 05 00 89 75 F8 85 FF 74 30 
+
+==== BB 1128 (0x3AA57E21) in 21034B, out 116280B, BBs exec'd 0 ====
+	0x3AA57E21:  movl (%edi),%esi
+	0x3AA57E23:  testl %esi,%esi
+	0x3AA57E25:  jz-8 0x3AA57E51
+. 1128 3AA57E21 6
+. 8B 37 85 F6 74 2A 
+
+==== BB 1129 (0x3AA57E27) in 21040B, out 116349B, BBs exec'd 0 ====
+	0x3AA57E27:  movl %esi,(%esp,,)
+	0x3AA57E2A:  movl $0x2F,%eax
+	0x3AA57E2F:  movl %eax,4(%esp,,)
+	0x3AA57E33:  call 0x3A9B35E0
+. 1129 3AA57E27 17
+. 89 34 24 B8 2F 00 00 00 89 44 24 04 E8 A8 B7 F5 FF 
+
+==== BB 1130 (0x3A9B35E0) in 21057B, out 116426B, BBs exec'd 0 ====
+	0x3A9B35E0:  jmp*l 0x40(%ebx)
+. 1130 3A9B35E0 6
+. FF A3 40 00 00 00 
+
+==== BB 1131 (0x3A9B35E6) in 21063B, out 116453B, BBs exec'd 0 ====
+	0x3A9B35E6:  pushl $0x68
+	0x3A9B35EB:  jmp 0x3A9B3500
+. 1131 3A9B35E6 10
+. 68 68 00 00 00 E9 10 FF FF FF 
+
+==== BB 1132 (0x3A9B3500) in 21073B, out 116502B, BBs exec'd 0 ====
+	0x3A9B3500:  pushl 0x4(%ebx)
+	0x3A9B3506:  jmp*l 0x8(%ebx)
+. 1132 3A9B3500 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00 
+
+==== BB 1133 _dl_runtime_resolve(0x3A96FEF0) in 21085B, out 116551B, BBs exec'd 0 ====
+	0x3A96FEF0:  pushl %eax
+	0x3A96FEF1:  pushl %ecx
+	0x3A96FEF2:  pushl %edx
+	0x3A96FEF3:  movl 16(%esp,,),%edx
+	0x3A96FEF7:  movl 12(%esp,,),%eax
+	0x3A96FEFB:  call 0x3A970060
+. 1133 3A96FEF0 16
+. 50 51 52 8B 54 24 10 8B 44 24 0C E8 60 01 00 00 
+
+==== BB 1134 fixup(0x3A970060) in 21101B, out 116670B, BBs exec'd 0 ====
+	0x3A970060:  pushl %ebp
+	0x3A970061:  movl %esp,%ebp
+	0x3A970063:  pushl %edi
+	0x3A970064:  pushl %esi
+	0x3A970065:  movl %eax,%esi
+	0x3A970067:  pushl %ebx
+	0x3A970068:  subl $0x24, %esp
+	0x3A97006B:  movl 48(%eax),%ecx
+	0x3A97006E:  movl 44(%eax),%eax
+	0x3A970071:  call 0x3A97592B
+. 1134 3A970060 22
+. 55 89 E5 57 56 89 C6 53 83 EC 24 8B 48 30 8B 40 2C E8 B5 58 00 00 
+
+==== BB 1135 (0x3A970076) in 21123B, out 116842B, BBs exec'd 0 ====
+	0x3A970076:  addl $0x85F6, %ebx
+	0x3A97007C:  movl 4(%eax),%eax
+	0x3A97007F:  movl %eax,-20(%ebp)
+	0x3A970082:  movl 116(%esi),%eax
+	0x3A970085:  movl 4(%eax),%edi
+	0x3A970088:  addl %edi,%edx
+	0x3A97008A:  movl 4(%edx),%edi
+	0x3A97008D:  movl (%edx),%edx
+	0x3A97008F:  shrl $0x8, %edi
+	0x3A970092:  movl %edi,%eax
+	0x3A970094:  shll $0x4, %eax
+	0x3A970097:  addl 4(%ecx),%eax
+	0x3A97009A:  movl (%esi),%ecx
+	0x3A97009C:  addl %ecx,%edx
+	0x3A97009E:  movl %edx,-24(%ebp)
+	0x3A9700A1:  movl %eax,-28(%ebp)
+	0x3A9700A4:  testb $0x3, 13(%eax)
+	0x3A9700A8:  movl %eax,-16(%ebp)
+	0x3A9700AB:  jnz-32 0x3A970148
+. 1135 3A970076 59
+. 81 C3 F6 85 00 00 8B 40 04 89 45 EC 8B 46 74 8B 78 04 01 FA 8B 7A 04 8B 12 C1 EF 08 89 F8 C1 E0 04 03 41 04 8B 0E 01 CA 89 55 E8 89 45 E4 F6 40 0D 03 89 45 F0 0F 85 97 00 00 00 
+
+==== BB 1136 (0x3A9700B1) in 21182B, out 117085B, BBs exec'd 0 ====
+	0x3A9700B1:  movl 0xDC(%esi),%eax
+	0x3A9700B7:  xorl %edx, %edx
+	0x3A9700B9:  testl %eax,%eax
+	0x3A9700BB:  jz-8 0x3A9700E2
+. 1136 3A9700B1 12
+. 8B 86 DC 00 00 00 31 D2 85 C0 74 25 
+
+==== BB 1137 (0x3A9700BD) in 21194B, out 117174B, BBs exec'd 0 ====
+	0x3A9700BD:  movl 4(%eax),%eax
+	0x3A9700C0:  movzwl (%eax,%edi,2),%edx
+	0x3A9700C4:  movl 0x17C(%esi),%eax
+	0x3A9700CA:  andl $0x7FFF, %edx
+	0x3A9700D0:  shll $0x4, %edx
+	0x3A9700D3:  addl %eax,%edx
+	0x3A9700D5:  xorl %eax, %eax
+	0x3A9700D7:  cmpl $0x0, 4(%edx)
+	0x3A9700DB:  setnz %al
+	0x3A9700DE:  negl %eax
+	0x3A9700E0:  andl %eax,%edx
+	0x3A9700E2:  movl -28(%ebp),%eax
+	0x3A9700E5:  movl $0x1,%edi
+	0x3A9700EA:  leal -16(%ebp), %ecx
+	0x3A9700ED:  movl (%eax),%eax
+	0x3A9700EF:  addl %eax,-20(%ebp)
+	0x3A9700F2:  xorl %eax, %eax
+	0x3A9700F4:  movl %eax,16(%esp,,)
+	0x3A9700F8:  movl $0x1,%eax
+	0x3A9700FD:  movl %edx,4(%esp,,)
+	0x3A970101:  movl %esi,%edx
+	0x3A970103:  movl %eax,12(%esp,,)
+	0x3A970107:  movl %edi,8(%esp,,)
+	0x3A97010B:  movl 0x1B0(%esi),%eax
+	0x3A970111:  movl %eax,(%esp,,)
+	0x3A970114:  movl -20(%ebp),%eax
+	0x3A970117:  call 0x3A96C0E0
+. 1137 3A9700BD 95
+. 8B 40 04 0F B7 14 78 8B 86 7C 01 00 00 81 E2 FF 7F 00 00 C1 E2 04 01 C2 31 C0 83 7A 04 00 0F 95 C0 F7 D8 21 C2 8B 45 E4 BF 01 00 00 00 8D 4D F0 8B 00 01 45 EC 31 C0 89 44 24 10 B8 01 00 00 00 89 54 24 04 89 F2 89 44 24 0C 89 7C 24 08 8B 86 B0 01 00 00 89 04 24 8B 45 EC E8 C4 BF FF FF 
+
+==== BB 1138 (0x3A97011C) in 21289B, out 117515B, BBs exec'd 0 ====
+	0x3A97011C:  movl -16(%ebp),%edx
+	0x3A97011F:  subl $0x14, %esp
+	0x3A970122:  movl %eax,%ecx
+	0x3A970124:  xorl %eax, %eax
+	0x3A970126:  testl %edx,%edx
+	0x3A970128:  jz-8 0x3A970131
+. 1138 3A97011C 14
+. 8B 55 F0 83 EC 14 89 C1 31 C0 85 D2 74 07 
+
+==== BB 1139 (0x3A97012A) in 21303B, out 117624B, BBs exec'd 0 ====
+	0x3A97012A:  movl %ecx,%eax
+	0x3A97012C:  movl 4(%edx),%ecx
+	0x3A97012F:  addl %ecx,%eax
+	0x3A970131:  movl 0xFFFFFC44(%ebx),%esi
+	0x3A970137:  testl %esi,%esi
+	0x3A970139:  jnz-8 0x3A970140
+. 1139 3A97012A 17
+. 89 C8 8B 4A 04 01 C8 8B B3 44 FC FF FF 85 F6 75 05 
+
+==== BB 1140 (0x3A97013B) in 21320B, out 117731B, BBs exec'd 0 ====
+	0x3A97013B:  movl -24(%ebp),%edx
+	0x3A97013E:  movl %eax,(%edx)
+	0x3A970140:  leal -12(%ebp), %esp
+	0x3A970143:  popl %ebx
+	0x3A970144:  popl %esi
+	0x3A970145:  popl %edi
+	0x3A970146:  popl %ebp
+	0x3A970147:  ret
+. 1140 3A97013B 13
+. 8B 55 E8 89 02 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 1141 (0x3A96FF00) in 21333B, out 117856B, BBs exec'd 0 ====
+	0x3A96FF00:  popl %edx
+	0x3A96FF01:  popl %ecx
+	0x3A96FF02:  xchgl %eax, (%esp,,)
+	0x3A96FF05:  ret 8
+. 1141 3A96FF00 8
+. 5A 59 87 04 24 C2 08 00 
+
+==== BB 1142 strrchr(0x3AA05860) in 21341B, out 117934B, BBs exec'd 0 ====
+	0x3AA05860:  pushl %edi
+	0x3AA05861:  pushl %esi
+	0x3AA05862:  xorl %eax, %eax
+	0x3AA05864:  movl 12(%esp,,),%esi
+	0x3AA05868:  movl 16(%esp,,),%ecx
+	0x3AA0586C:  movb %cl,%ch{bp}
+	0x3AA0586E:  movl %ecx,%edx
+	0x3AA05870:  shll $0x10, %ecx
+	0x3AA05873:  movw %dx,%cx
+	0x3AA05876:  testl $0x3, %esi
+	0x3AA0587C:  jz-32 0x3AA05903
+. 1142 3AA05860 34
+. 57 56 31 C0 8B 74 24 0C 8B 4C 24 10 88 CD 89 CA C1 E1 10 66 89 D1 F7 C6 03 00 00 00 0F 84 81 00 00 00 
+
+==== BB 1143 (0x3AA05903) in 21375B, out 118113B, BBs exec'd 0 ====
+	0x3AA05903:  movl (%esi),%edx
+	0x3AA05905:  movl $0xFEFEFEFF,%edi
+	0x3AA0590A:  addl %edx,%edi
+	0x3AA0590C:  jnb-32 0x3AA059EC
+. 1143 3AA05903 15
+. 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00 
+
+==== BB 1144 (0x3AA05912) in 21390B, out 118191B, BBs exec'd 0 ====
+	0x3AA05912:  xorl %edx,%edi
+	0x3AA05914:  orl $0xFEFEFEFF, %edi
+	0x3AA0591A:  incl %edi
+	0x3AA0591B:  jnz-32 0x3AA059EC
+. 1144 3AA05912 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 CB 00 00 00 
+
+==== BB 1145 (0x3AA05921) in 21405B, out 118267B, BBs exec'd 0 ====
+	0x3AA05921:  xorl %ecx,%edx
+	0x3AA05923:  movl $0xFEFEFEFF,%edi
+	0x3AA05928:  addl %edx,%edi
+	0x3AA0592A:  jnb-8 0x3AA058CA
+. 1145 3AA05921 11
+. 31 CA BF FF FE FE FE 01 D7 73 9E 
+
+==== BB 1146 (0x3AA0592C) in 21416B, out 118345B, BBs exec'd 0 ====
+	0x3AA0592C:  xorl %edx,%edi
+	0x3AA0592E:  orl $0xFEFEFEFF, %edi
+	0x3AA05934:  incl %edi
+	0x3AA05935:  jnz-8 0x3AA058E0
+. 1146 3AA0592C 11
+. 31 D7 81 CF FF FE FE FE 47 75 A9 
+
+==== BB 1147 (0x3AA058E0) in 21427B, out 118421B, BBs exec'd 0 ====
+	0x3AA058E0:  subl $0x4, %esi
+	0x3AA058E3:  subl $0x4, %esi
+	0x3AA058E6:  subl $0x4, %esi
+	0x3AA058E9:  testl $0xFF0000, %edx
+	0x3AA058EF:  jnz-8 0x3AA058F6
+. 1147 3AA058E0 17
+. 83 EE 04 83 EE 04 83 EE 04 F7 C2 00 00 FF 00 75 05 
+
+==== BB 1148 (0x3AA058F6) in 21444B, out 118509B, BBs exec'd 0 ====
+	0x3AA058F6:  leal 12(%esi), %eax
+	0x3AA058F9:  testb %dh{si},%dh{si}
+	0x3AA058FB:  jnz-8 0x3AA05900
+. 1148 3AA058F6 7
+. 8D 46 0C 84 F6 75 03 
+
+==== BB 1149 (0x3AA058FD) in 21451B, out 118583B, BBs exec'd 0 ====
+	0x3AA058FD:  leal 13(%esi), %eax
+	0x3AA05900:  addl $0x10, %esi
+	0x3AA05903:  movl (%esi),%edx
+	0x3AA05905:  movl $0xFEFEFEFF,%edi
+	0x3AA0590A:  addl %edx,%edi
+	0x3AA0590C:  jnb-32 0x3AA059EC
+. 1149 3AA058FD 21
+. 8D 46 0D 83 C6 10 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00 
+
+==== BB 1150 (0x3AA05937) in 21472B, out 118684B, BBs exec'd 0 ====
+	0x3AA05937:  movl 4(%esi),%edx
+	0x3AA0593A:  movl $0xFEFEFEFF,%edi
+	0x3AA0593F:  addl %edx,%edi
+	0x3AA05941:  jnb-32 0x3AA059E9
+. 1150 3AA05937 16
+. 8B 56 04 BF FF FE FE FE 01 D7 0F 83 A2 00 00 00 
+
+==== BB 1151 (0x3AA05947) in 21488B, out 118765B, BBs exec'd 0 ====
+	0x3AA05947:  xorl %edx,%edi
+	0x3AA05949:  orl $0xFEFEFEFF, %edi
+	0x3AA0594F:  incl %edi
+	0x3AA05950:  jnz-32 0x3AA059E9
+. 1151 3AA05947 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 93 00 00 00 
+
+==== BB 1152 (0x3AA059E9) in 21503B, out 118841B, BBs exec'd 0 ====
+	0x3AA059E9:  addl $0x4, %esi
+	0x3AA059EC:  cmpb %cl,%dl
+	0x3AA059EE:  jnz-8 0x3AA059F2
+. 1152 3AA059E9 7
+. 83 C6 04 38 CA 75 02 
+
+==== BB 1153 (0x3AA059F2) in 21510B, out 118912B, BBs exec'd 0 ====
+	0x3AA059F2:  testb %dl,%dl
+	0x3AA059F4:  jz-8 0x3AA05A16
+. 1153 3AA059F2 4
+. 84 D2 74 20 
+
+==== BB 1154 (0x3AA05A16) in 21514B, out 118973B, BBs exec'd 0 ====
+	0x3AA05A16:  popl %esi
+	0x3AA05A17:  popl %edi
+	0x3AA05A18:  ret
+. 1154 3AA05A16 3
+. 5E 5F C3 
+
+==== BB 1155 (0x3AA57E38) in 21517B, out 119035B, BBs exec'd 0 ====
+	0x3AA57E38:  testl %eax,%eax
+	0x3AA57E3A:  jz-8 0x3AA57E5E
+. 1155 3AA57E38 4
+. 85 C0 74 22 
+
+==== BB 1156 (0x3AA57E3C) in 21521B, out 119094B, BBs exec'd 0 ====
+	0x3AA57E3C:  leal 1(%eax), %edx
+	0x3AA57E3F:  movl 0x198(%ebx),%eax
+	0x3AA57E45:  movl %edx,(%eax)
+	0x3AA57E47:  movl (%edi),%edx
+	0x3AA57E49:  movl 0x270(%ebx),%eax
+	0x3AA57E4F:  movl %edx,(%eax)
+	0x3AA57E51:  movl -12(%ebp),%ebx
+	0x3AA57E54:  movl -8(%ebp),%esi
+	0x3AA57E57:  movl -4(%ebp),%edi
+	0x3AA57E5A:  movl %ebp,%esp
+	0x3AA57E5C:  popl %ebp
+	0x3AA57E5D:  ret
+. 1156 3AA57E3C 34
+. 8D 50 01 8B 83 98 01 00 00 89 10 8B 17 8B 83 70 02 00 00 89 10 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 1157 (0x3A9B385A) in 21555B, out 119255B, BBs exec'd 0 ====
+	0x3A9B385A:  call 0x3A9B3720
+. 1157 3A9B385A 5
+. E8 C1 FE FF FF 
+
+==== BB 1158 __libc_global_ctors(0x3A9B3720) in 21560B, out 119300B, BBs exec'd 0 ====
+	0x3A9B3720:  pushl %ebp
+	0x3A9B3721:  movl %esp,%ebp
+	0x3A9B3723:  pushl %esi
+	0x3A9B3724:  pushl %ebx
+	0x3A9B3725:  call 0x3A9B3791
+. 1158 3A9B3720 10
+. 55 89 E5 56 53 E8 67 00 00 00 
+
+==== BB 1159 (0x3A9B372A) in 21570B, out 119403B, BBs exec'd 0 ====
+	0x3A9B372A:  addl $0xFC8E6, %ebx
+	0x3A9B3730:  leal 0xFFFFFF94(%ebx), %esi
+	0x3A9B3736:  movl (%esi),%eax
+	0x3A9B3738:  testl %eax,%eax
+	0x3A9B373A:  jnz-8 0x3A9B3740
+. 1159 3A9B372A 18
+. 81 C3 E6 C8 0F 00 8D B3 94 FF FF FF 8B 06 85 C0 75 04 
+
+==== BB 1160 (0x3A9B3740) in 21588B, out 119495B, BBs exec'd 0 ====
+	0x3A9B3740:  addl $0x4, %esi
+	0x3A9B3743:  call*l %eax
+. 1160 3A9B3740 5
+. 83 C6 04 FF D0 
+
+==== BB 1161 _IO_check_libio(0x3A9FE7B0) in 21593B, out 119550B, BBs exec'd 0 ====
+	0x3A9FE7B0:  pushl %ebp
+	0x3A9FE7B1:  movl %esp,%ebp
+	0x3A9FE7B3:  subl $0x8, %esp
+	0x3A9FE7B6:  movl %ebx,(%esp,,)
+	0x3A9FE7B9:  call 0x3A9B3791
+. 1161 3A9FE7B0 14
+. 55 89 E5 83 EC 08 89 1C 24 E8 D3 4F FB FF 
+
+==== BB 1162 (0x3A9FE7BE) in 21607B, out 119645B, BBs exec'd 0 ====
+	0x3A9FE7BE:  addl $0xB1852, %ebx
+	0x3A9FE7C4:  movl %esi,4(%esp,,)
+	0x3A9FE7C8:  movl 0x238(%ebx),%eax
+	0x3A9FE7CE:  testl %eax,%eax
+	0x3A9FE7D0:  jnz-8 0x3A9FE820
+. 1162 3A9FE7BE 20
+. 81 C3 52 18 0B 00 89 74 24 04 8B 83 38 02 00 00 85 C0 75 4E 
+
+==== BB 1163 (0x3A9FE820) in 21627B, out 119749B, BBs exec'd 0 ====
+	0x3A9FE820:  movl (%esp,,),%ebx
+	0x3A9FE823:  movl 4(%esp,,),%esi
+	0x3A9FE827:  movl %ebp,%esp
+	0x3A9FE829:  popl %ebp
+	0x3A9FE82A:  ret
+. 1163 3A9FE820 11
+. 8B 1C 24 8B 74 24 04 89 EC 5D C3 
+
+==== BB 1164 (0x3A9B3745) in 21638B, out 119826B, BBs exec'd 0 ====
+	0x3A9B3745:  movl (%esi),%eax
+	0x3A9B3747:  testl %eax,%eax
+	0x3A9B3749:  jnz-8 0x3A9B3740
+. 1164 3A9B3745 6
+. 8B 06 85 C0 75 F5 
+
+==== BB 1165 (0x3A9B374B) in 21644B, out 119895B, BBs exec'd 0 ====
+	0x3A9B374B:  popl %ebx
+	0x3A9B374C:  popl %esi
+	0x3A9B374D:  popl %ebp
+	0x3A9B374E:  movl %esi,%esi
+	0x3A9B3750:  ret
+. 1165 3A9B374B 6
+. 5B 5E 5D 89 F6 C3 
+
+==== BB 1166 (0x3A9B385F) in 21650B, out 119977B, BBs exec'd 0 ====
+	0x3A9B385F:  movl -12(%ebp),%ebx
+	0x3A9B3862:  movl -8(%ebp),%esi
+	0x3A9B3865:  movl -4(%ebp),%edi
+	0x3A9B3868:  movl %ebp,%esp
+	0x3A9B386A:  popl %ebp
+	0x3A9B386B:  ret
+. 1166 3A9B385F 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 1167 (0x3A97063E) in 21663B, out 120069B, BBs exec'd 0 ====
+	0x3A97063E:  testb $0x3, %dl
+	0x3A970641:  jz-32 0x3A9705C0
+. 1167 3A97063E 9
+. F6 C2 03 0F 84 79 FF FF FF 
+
+==== BB 1168 (0x3A970729) in 21672B, out 120131B, BBs exec'd 0 ====
+	0x3A970729:  movl -28(%ebp),%eax
+	0x3A97072C:  movl $0x0, 12(%eax)
+	0x3A970733:  call 0x3A970B70
+. 1168 3A970729 15
+. 8B 45 E4 C7 40 0C 00 00 00 00 E8 38 04 00 00 
+
+==== BB 1169 (0x3A970738) in 21687B, out 120207B, BBs exec'd 0 ====
+	0x3A970738:  xorl %eax, %eax
+	0x3A97073A:  movl %eax,0x34(%ebx)
+	0x3A970740:  addl $0x2C, %esp
+	0x3A970743:  popl %ebx
+	0x3A970744:  popl %esi
+	0x3A970745:  popl %edi
+	0x3A970746:  popl %ebp
+	0x3A970747:  ret 4
+. 1169 3A970738 18
+. 31 C0 89 83 34 00 00 00 83 C4 2C 5B 5E 5F 5D C2 04 00 
+
+==== BB 1170 (0x3A9658B5) in 21705B, out 120340B, BBs exec'd 0 ====
+	0x3A9658B5:  leal 0xFFFF8194(%ebx), %edx
+	0x3A9658BB:  jmp*l %edi
+. 1170 3A9658B5 8
+. 8D 93 94 81 FF FF FF E7 
+
+==== BB 1171 (0x80482A0) in 21713B, out 120377B, BBs exec'd 0 ====
+	0x80482A0:  xorl %ebp, %ebp
+	0x80482A2:  popl %esi
+	0x80482A3:  movl %esp,%ecx
+	0x80482A5:  andl $0xFFFFFFF0, %esp
+	0x80482A8:  pushl %eax
+	0x80482A9:  pushl %esp
+	0x80482AA:  pushl %edx
+	0x80482AB:  pushl $0x8048370
+	0x80482B0:  pushl $0x80483E0
+	0x80482B5:  pushl %ecx
+	0x80482B6:  pushl %esi
+	0x80482B7:  pushl $0x804835C
+	0x80482BC:  call 0x804828C
+. 1171 80482A0 33
+. 31 ED 5E 89 E1 83 E4 F0 50 54 52 68 70 83 04 08 68 E0 83 04 08 51 56 68 5C 83 04 08 E8 CB FF FF FF 
+
+==== BB 1172 (0x804828C) in 21746B, out 120608B, BBs exec'd 0 ====
+	0x804828C:  jmp*l (0x8049580)
+. 1172 804828C 6
+. FF 25 80 95 04 08 
+
+==== BB 1173 (0x8048292) in 21752B, out 120634B, BBs exec'd 0 ====
+	0x8048292:  pushl $0x0
+	0x8048297:  jmp 0x804827C
+. 1173 8048292 10
+. 68 00 00 00 00 E9 E0 FF FF FF 
+
+==== BB 1174 (0x804827C) in 21762B, out 120683B, BBs exec'd 0 ====
+	0x804827C:  pushl (0x8049578)
+	0x8048282:  jmp*l (0x804957C)
+. 1174 804827C 12
+. FF 35 78 95 04 08 FF 25 7C 95 04 08 
+
+==== BB 1175 __libc_start_main(0x3A9B3870) in 21774B, out 120733B, BBs exec'd 0 ====
+	0x3A9B3870:  pushl %ebp
+	0x3A9B3871:  xorl %ecx, %ecx
+	0x3A9B3873:  movl %esp,%ebp
+	0x3A9B3875:  pushl %edi
+	0x3A9B3876:  pushl %esi
+	0x3A9B3877:  pushl %ebx
+	0x3A9B3878:  subl $0xC, %esp
+	0x3A9B387B:  movl 12(%ebp),%eax
+	0x3A9B387E:  movl 16(%ebp),%edi
+	0x3A9B3881:  call 0x3A9B3791
+. 1175 3A9B3870 22
+. 55 31 C9 89 E5 57 56 53 83 EC 0C 8B 45 0C 8B 7D 10 E8 0B FF FF FF 
+
+==== BB 1176 (0x3A9B3886) in 21796B, out 120906B, BBs exec'd 0 ====
+	0x3A9B3886:  addl $0xFC78A, %ebx
+	0x3A9B388C:  movl 28(%ebp),%esi
+	0x3A9B388F:  leal 4(%edi,%eax,4), %edx
+	0x3A9B3893:  movl 0xA4(%ebx),%eax
+	0x3A9B3899:  testl %eax,%eax
+	0x3A9B389B:  jz-8 0x3A9B38B0
+. 1176 3A9B3886 23
+. 81 C3 8A C7 0F 00 8B 75 1C 8D 54 87 04 8B 83 A4 00 00 00 85 C0 74 13 
+
+==== BB 1177 (0x3A9B389D) in 21819B, out 121027B, BBs exec'd 0 ====
+	0x3A9B389D:  movl (%eax),%eax
+	0x3A9B389F:  testl %eax,%eax
+	0x3A9B38A1:  jnz-8 0x3A9B38B0
+. 1177 3A9B389D 6
+. 8B 00 85 C0 75 0D 
+
+==== BB 1178 (0x3A9B38A3) in 21825B, out 121096B, BBs exec'd 0 ====
+	0x3A9B38A3:  movl $0x1,%ecx
+	0x3A9B38A8:  nop
+	0x3A9B38A9:  leal 0(%esi,,), %esi
+	0x3A9B38B0:  movl 0xB4(%ebx),%eax
+	0x3A9B38B6:  testl %esi,%esi
+	0x3A9B38B8:  movl %ecx,(%eax)
+	0x3A9B38BA:  movl 0x13C(%ebx),%eax
+	0x3A9B38C0:  movl %edx,(%eax)
+	0x3A9B38C2:  jz-8 0x3A9B38D8
+. 1178 3A9B38A3 33
+. B9 01 00 00 00 90 8D B4 26 00 00 00 00 8B 83 B4 00 00 00 85 F6 89 08 8B 83 3C 01 00 00 89 10 74 14 
+
+==== BB 1179 (0x3A9B38C4) in 21858B, out 121230B, BBs exec'd 0 ====
+	0x3A9B38C4:  movl %esi,(%esp,,)
+	0x3A9B38C7:  xorl %eax, %eax
+	0x3A9B38C9:  movl %eax,8(%esp,,)
+	0x3A9B38CD:  xorl %eax, %eax
+	0x3A9B38CF:  movl %eax,4(%esp,,)
+	0x3A9B38D3:  call 0x3A9C8E60
+. 1179 3A9B38C4 20
+. 89 34 24 31 C0 89 44 24 08 31 C0 89 44 24 04 E8 88 55 01 00 
+
+==== BB 1180 __cxa_atexit_internal(0x3A9C8E60) in 21878B, out 121334B, BBs exec'd 0 ====
+	0x3A9C8E60:  pushl %ebp
+	0x3A9C8E61:  movl %esp,%ebp
+	0x3A9C8E63:  pushl %ebx
+	0x3A9C8E64:  call 0x3A9B3791
+. 1180 3A9C8E60 9
+. 55 89 E5 53 E8 28 A9 FE FF 
+
+==== BB 1181 (0x3A9C8E69) in 21887B, out 121421B, BBs exec'd 0 ====
+	0x3A9C8E69:  addl $0xE71A7, %ebx
+	0x3A9C8E6F:  call 0x3A9C8D70
+. 1181 3A9C8E69 11
+. 81 C3 A7 71 0E 00 E8 FC FE FF FF 
+
+==== BB 1182 __new_exitfn(0x3A9C8D70) in 21898B, out 121486B, BBs exec'd 0 ====
+	0x3A9C8D70:  pushl %ebp
+	0x3A9C8D71:  movl %esp,%ebp
+	0x3A9C8D73:  pushl %edi
+	0x3A9C8D74:  xorl %edi, %edi
+	0x3A9C8D76:  pushl %esi
+	0x3A9C8D77:  pushl %ebx
+	0x3A9C8D78:  subl $0x4, %esp
+	0x3A9C8D7B:  call 0x3A9B3791
+. 1182 3A9C8D70 16
+. 55 89 E5 57 31 FF 56 53 83 EC 04 E8 11 AA FE FF 
+
+==== BB 1183 (0x3A9C8D80) in 21914B, out 121633B, BBs exec'd 0 ====
+	0x3A9C8D80:  addl $0xE7290, %ebx
+	0x3A9C8D86:  movl 0x2B3C(%ebx),%edx
+	0x3A9C8D8C:  testl %edx,%edx
+	0x3A9C8D8E:  jnz-32 0x3A9C8E34
+. 1183 3A9C8D80 20
+. 81 C3 90 72 0E 00 8B 93 3C 2B 00 00 85 D2 0F 85 A0 00 00 00 
+
+==== BB 1184 (0x3A9C8D94) in 21934B, out 121721B, BBs exec'd 0 ====
+	0x3A9C8D94:  movl 0xFFFF9EBC(%ebx),%esi
+	0x3A9C8D9A:  testl %esi,%esi
+	0x3A9C8D9C:  jz-8 0x3A9C8DC9
+. 1184 3A9C8D94 10
+. 8B B3 BC 9E FF FF 85 F6 74 2B 
+
+==== BB 1185 (0x3A9C8D9E) in 21944B, out 121796B, BBs exec'd 0 ====
+	0x3A9C8D9E:  movl %esi,%esi
+	0x3A9C8DA0:  movl 4(%esi),%edx
+	0x3A9C8DA3:  xorl %edi, %edi
+	0x3A9C8DA5:  cmpl %edx,%edi
+	0x3A9C8DA7:  jnb-8 0x3A9C8DBE
+. 1185 3A9C8D9E 11
+. 89 F6 8B 56 04 31 FF 39 D7 73 15 
+
+==== BB 1186 (0x3A9C8DBE) in 21955B, out 121886B, BBs exec'd 0 ====
+	0x3A9C8DBE:  cmpl $0x1F, %edx
+	0x3A9C8DC1:  jbe-8 0x3A9C8DFD
+. 1186 3A9C8DBE 5
+. 83 FA 1F 76 3A 
+
+==== BB 1187 (0x3A9C8DFD) in 21960B, out 121943B, BBs exec'd 0 ====
+	0x3A9C8DFD:  leal 1(%edx), %eax
+	0x3A9C8E00:  movl %edx,%edi
+	0x3A9C8E02:  movl %eax,4(%esi)
+	0x3A9C8E05:  testl %esi,%esi
+	0x3A9C8E07:  jz-8 0x3A9C8DC9
+. 1187 3A9C8DFD 12
+. 8D 42 01 89 D7 89 46 04 85 F6 74 C0 
+
+==== BB 1188 (0x3A9C8E09) in 21972B, out 122035B, BBs exec'd 0 ====
+	0x3A9C8E09:  movl %edi,%eax
+	0x3A9C8E0B:  movl $0x1,%ecx
+	0x3A9C8E10:  shll $0x4, %eax
+	0x3A9C8E13:  movl %ecx,8(%esi,%eax,1)
+	0x3A9C8E17:  movl 0x2B44(%ebx),%edx
+	0x3A9C8E1D:  testl %edx,%edx
+	0x3A9C8E1F:  jnz-8 0x3A9C8E44
+. 1188 3A9C8E09 24
+. 89 F8 B9 01 00 00 00 C1 E0 04 89 4C 06 08 8B 93 44 2B 00 00 85 D2 75 23 
+
+==== BB 1189 (0x3A9C8E21) in 21996B, out 122153B, BBs exec'd 0 ====
+	0x3A9C8E21:  xorl %eax, %eax
+	0x3A9C8E23:  testl %esi,%esi
+	0x3A9C8E25:  jz-8 0x3A9C8E2E
+. 1189 3A9C8E21 6
+. 31 C0 85 F6 74 07 
+
+==== BB 1190 (0x3A9C8E27) in 22002B, out 122226B, BBs exec'd 0 ====
+	0x3A9C8E27:  shll $0x4, %edi
+	0x3A9C8E2A:  leal 8(%edi,%esi,1), %eax
+	0x3A9C8E2E:  popl %edx
+	0x3A9C8E2F:  popl %ebx
+	0x3A9C8E30:  popl %esi
+	0x3A9C8E31:  popl %edi
+	0x3A9C8E32:  popl %ebp
+	0x3A9C8E33:  ret
+. 1190 3A9C8E27 13
+. C1 E7 04 8D 44 37 08 5A 5B 5E 5F 5D C3 
+
+==== BB 1191 (0x3A9C8E74) in 22015B, out 122362B, BBs exec'd 0 ====
+	0x3A9C8E74:  movl %eax,%edx
+	0x3A9C8E76:  testl %edx,%edx
+	0x3A9C8E78:  movl $0xFFFFFFFF,%eax
+	0x3A9C8E7D:  jz-8 0x3A9C8E99
+. 1191 3A9C8E74 11
+. 89 C2 85 D2 B8 FF FF FF FF 74 1A 
+
+==== BB 1192 (0x3A9C8E7F) in 22026B, out 122440B, BBs exec'd 0 ====
+	0x3A9C8E7F:  movl $0x4, (%edx)
+	0x3A9C8E85:  movl 8(%ebp),%eax
+	0x3A9C8E88:  movl %eax,4(%edx)
+	0x3A9C8E8B:  movl 12(%ebp),%eax
+	0x3A9C8E8E:  movl %eax,8(%edx)
+	0x3A9C8E91:  movl 16(%ebp),%eax
+	0x3A9C8E94:  movl %eax,12(%edx)
+	0x3A9C8E97:  xorl %eax, %eax
+	0x3A9C8E99:  popl %ebx
+	0x3A9C8E9A:  popl %ebp
+	0x3A9C8E9B:  ret
+. 1192 3A9C8E7F 29
+. C7 02 04 00 00 00 8B 45 08 89 42 04 8B 45 0C 89 42 08 8B 45 10 89 42 0C 31 C0 5B 5D C3 
+
+==== BB 1193 (0x3A9B38D8) in 22055B, out 122598B, BBs exec'd 0 ====
+	0x3A9B38D8:  movl 24(%ebp),%esi
+	0x3A9B38DB:  testl %esi,%esi
+	0x3A9B38DD:  jz-8 0x3A9B38F6
+. 1193 3A9B38D8 7
+. 8B 75 18 85 F6 74 17 
+
+==== BB 1194 (0x3A9B38DF) in 22062B, out 122670B, BBs exec'd 0 ====
+	0x3A9B38DF:  xorl %edx, %edx
+	0x3A9B38E1:  xorl %ecx, %ecx
+	0x3A9B38E3:  movl %ecx,8(%esp,,)
+	0x3A9B38E7:  movl %edx,4(%esp,,)
+	0x3A9B38EB:  movl 24(%ebp),%edx
+	0x3A9B38EE:  movl %edx,(%esp,,)
+	0x3A9B38F1:  call 0x3A9C8E60
+. 1194 3A9B38DF 23
+. 31 D2 31 C9 89 4C 24 08 89 54 24 04 8B 55 18 89 14 24 E8 6A 55 01 00 
+
+==== BB 1195 (0x3A9C8DA9) in 22085B, out 122787B, BBs exec'd 0 ====
+	0x3A9C8DA9:  leal 8(%esi), %eax
+	0x3A9C8DAC:  leal 0(%esi,,), %esi
+	0x3A9C8DB0:  movl (%eax),%ecx
+	0x3A9C8DB2:  testl %ecx,%ecx
+	0x3A9C8DB4:  jz-8 0x3A9C8DF4
+. 1195 3A9C8DA9 13
+. 8D 46 08 8D 74 26 00 8B 08 85 C9 74 3E 
+
+==== BB 1196 (0x3A9C8DB6) in 22098B, out 122876B, BBs exec'd 0 ====
+	0x3A9C8DB6:  incl %edi
+	0x3A9C8DB7:  addl $0x10, %eax
+	0x3A9C8DBA:  cmpl %edx,%edi
+	0x3A9C8DBC:  jb-8 0x3A9C8DB0
+. 1196 3A9C8DB6 8
+. 47 83 C0 10 39 D7 72 F2 
+
+==== BB 1197 (0x3A9B38F6) in 22106B, out 122954B, BBs exec'd 0 ====
+	0x3A9B38F6:  movl 0x168(%ebx),%esi
+	0x3A9B38FC:  testb $0x2, (%esi)
+	0x3A9B38FF:  jnz-8 0x3A9B3933
+. 1197 3A9B38F6 11
+. 8B B3 68 01 00 00 F6 06 02 75 32 
+
+==== BB 1198 (0x3A9B3901) in 22117B, out 123038B, BBs exec'd 0 ====
+	0x3A9B3901:  movl 20(%ebp),%eax
+	0x3A9B3904:  testl %eax,%eax
+	0x3A9B3906:  jz-8 0x3A9B390B
+. 1198 3A9B3901 7
+. 8B 45 14 85 C0 74 03 
+
+==== BB 1199 (0x3A9B3908) in 22124B, out 123110B, BBs exec'd 0 ====
+	0x3A9B3908:  call*l 20(%ebp)
+. 1199 3A9B3908 3
+. FF 55 14 
+
+==== BB 1200 __libc_csu_init(0x80483E0) in 22127B, out 123154B, BBs exec'd 0 ====
+	0x80483E0:  pushl %ebp
+	0x80483E1:  movl %esp,%ebp
+	0x80483E3:  subl $0x18, %esp
+	0x80483E6:  movl %ebx,-12(%ebp)
+	0x80483E9:  movl %esi,-8(%ebp)
+	0x80483EC:  xorl %esi, %esi
+	0x80483EE:  call 0x8048438
+. 1200 80483E0 19
+. 55 89 E5 83 EC 18 89 5D F4 89 75 F8 31 F6 E8 45 00 00 00 
+
+==== BB 1201 (0x8048438) in 22146B, out 123279B, BBs exec'd 0 ====
+	0x8048438:  movl (%esp,,),%ebx
+	0x804843B:  ret
+. 1201 8048438 4
+. 8B 1C 24 C3 
+
+==== BB 1202 (0x80483F3) in 22150B, out 123319B, BBs exec'd 0 ====
+	0x80483F3:  addl $0x1181, %ebx
+	0x80483F9:  movl %edi,-4(%ebp)
+	0x80483FC:  call 0x8048264
+. 1202 80483F3 14
+. 81 C3 81 11 00 00 89 7D FC E8 63 FE FF FF 
+
+==== BB 1203 (0x8048264) in 22164B, out 123400B, BBs exec'd 0 ====
+	0x8048264:  pushl %ebp
+	0x8048265:  movl %esp,%ebp
+	0x8048267:  subl $0x8, %esp
+	0x804826A:  call 0x80482C4
+. 1203 8048264 11
+. 55 89 E5 83 EC 08 E8 55 00 00 00 
+
+==== BB 1204 (0x80482C4) in 22175B, out 123485B, BBs exec'd 0 ====
+	0x80482C4:  pushl %ebp
+	0x80482C5:  movl %esp,%ebp
+	0x80482C7:  pushl %ebx
+	0x80482C8:  call 0x80482CD ; popl %ebx
+	0x80482CE:  addl $0x12A7, %ebx
+	0x80482D4:  pushl %edx
+	0x80482D5:  movl 0x10(%ebx),%eax
+	0x80482DB:  testl %eax,%eax
+	0x80482DD:  jz-8 0x80482E1
+. 1204 80482C4 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 A7 12 00 00 52 8B 83 10 00 00 00 85 C0 74 02 
+
+==== BB 1205 (0x80482E1) in 22202B, out 123637B, BBs exec'd 0 ====
+	0x80482E1:  popl %eax
+	0x80482E2:  popl %ebx
+	0x80482E3:  leave	0x80482E4:  ret
+. 1205 80482E1 4
+. 58 5B C9 C3 
+
+==== BB 1206 (0x804826F) in 22206B, out 123721B, BBs exec'd 0 ====
+	0x804826F:  call 0x8048330
+. 1206 804826F 5
+. E8 BC 00 00 00 
+
+==== BB 1207 (0x8048330) in 22211B, out 123766B, BBs exec'd 0 ====
+	0x8048330:  pushl %ebp
+	0x8048331:  movl %esp,%ebp
+	0x8048333:  pushl %ecx
+	0x8048334:  pushl %ecx
+	0x8048335:  movl (0x8049570),%edx
+	0x804833B:  testl %edx,%edx
+	0x804833D:  jz-8 0x8048358
+. 1207 8048330 15
+. 55 89 E5 51 51 8B 15 70 95 04 08 85 D2 74 19 
+
+==== BB 1208 (0x8048358) in 22226B, out 123895B, BBs exec'd 0 ====
+	0x8048358:  leave	0x8048359:  ret
+. 1208 8048358 2
+. C9 C3 
+
+==== BB 1209 (0x8048274) in 22228B, out 123944B, BBs exec'd 0 ====
+	0x8048274:  call 0x8048440
+. 1209 8048274 5
+. E8 C7 01 00 00 
+
+==== BB 1210 (0x8048440) in 22233B, out 123989B, BBs exec'd 0 ====
+	0x8048440:  pushl %ebp
+	0x8048441:  movl %esp,%ebp
+	0x8048443:  pushl %ebx
+	0x8048444:  pushl %edx
+	0x8048445:  movl $0x8049560,%ebx
+	0x804844A:  movl 0x8049560, %eax
+	0x804844F:  cmpl $0xFFFFFFFF, %eax
+	0x8048452:  jz-8 0x8048460
+. 1210 8048440 20
+. 55 89 E5 53 52 BB 60 95 04 08 A1 60 95 04 08 83 F8 FF 74 0C 
+
+==== BB 1211 (0x8048460) in 22253B, out 124131B, BBs exec'd 0 ====
+	0x8048460:  popl %eax
+	0x8048461:  popl %ebx
+	0x8048462:  popl %ebp
+	0x8048463:  ret
+. 1211 8048460 4
+. 58 5B 5D C3 
+
+==== BB 1212 (0x8048279) in 22257B, out 124209B, BBs exec'd 0 ====
+	0x8048279:  leave	0x804827A:  ret
+. 1212 8048279 2
+. C9 C3 
+
+==== BB 1213 (0x8048401) in 22259B, out 124258B, BBs exec'd 0 ====
+	0x8048401:  leal 0xFFFFFF14(%ebx), %edx
+	0x8048407:  leal 0xFFFFFF14(%ebx), %eax
+	0x804840D:  subl %eax,%edx
+	0x804840F:  sarl $0x2, %edx
+	0x8048412:  cmpl %edx,%esi
+	0x8048414:  jnb-8 0x804842B
+. 1213 8048401 21
+. 8D 93 14 FF FF FF 8D 83 14 FF FF FF 29 C2 C1 FA 02 39 D6 73 15 
+
+==== BB 1214 (0x804842B) in 22280B, out 124356B, BBs exec'd 0 ====
+	0x804842B:  movl -12(%ebp),%ebx
+	0x804842E:  movl -8(%ebp),%esi
+	0x8048431:  movl -4(%ebp),%edi
+	0x8048434:  movl %ebp,%esp
+	0x8048436:  popl %ebp
+	0x8048437:  ret
+. 1214 804842B 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 1215 (0x3A9B390B) in 22293B, out 124448B, BBs exec'd 0 ====
+	0x3A9B390B:  testb $0x2, (%esi)
+	0x3A9B390E:  movl %esi,%esi
+	0x3A9B3910:  jnz-8 0x3A9B394A
+. 1215 3A9B390B 7
+. F6 06 02 89 F6 75 38 
+
+==== BB 1216 (0x3A9B3912) in 22300B, out 124519B, BBs exec'd 0 ====
+	0x3A9B3912:  movl 0x13C(%ebx),%edx
+	0x3A9B3918:  movl (%edx),%eax
+	0x3A9B391A:  movl %edi,4(%esp,,)
+	0x3A9B391E:  movl %eax,8(%esp,,)
+	0x3A9B3922:  movl 12(%ebp),%eax
+	0x3A9B3925:  movl %eax,(%esp,,)
+	0x3A9B3928:  call*l 8(%ebp)
+. 1216 3A9B3912 25
+. 8B 93 3C 01 00 00 8B 02 89 7C 24 04 89 44 24 08 8B 45 0C 89 04 24 FF 55 08 
+
+==== BB 1217 main(0x804835C) in 22325B, out 124630B, BBs exec'd 0 ====
+	0x804835C:  pushl %ebp
+	0x804835D:  movl %esp,%ebp
+	0x804835F:  subl $0x8, %esp
+	0x8048362:  andl $0xFFFFFFF0, %esp
+	0x8048365:  movl $0x2A,%eax
+	0x804836A:  leave	0x804836B:  ret
+. 1217 804835C 16
+. 55 89 E5 83 EC 08 83 E4 F0 B8 2A 00 00 00 C9 C3 
+
+==== BB 1218 (0x3A9B392B) in 22341B, out 124740B, BBs exec'd 0 ====
+	0x3A9B392B:  movl %eax,(%esp,,)
+	0x3A9B392E:  call 0x3A9C8C50
+. 1218 3A9B392B 8
+. 89 04 24 E8 1D 53 01 00 
+
+==== BB 1219 exit(0x3A9C8C50) in 22349B, out 124795B, BBs exec'd 0 ====
+	0x3A9C8C50:  pushl %ebp
+	0x3A9C8C51:  movl %esp,%ebp
+	0x3A9C8C53:  pushl %edi
+	0x3A9C8C54:  pushl %esi
+	0x3A9C8C55:  pushl %ebx
+	0x3A9C8C56:  subl $0xC, %esp
+	0x3A9C8C59:  call 0x3A9B3791
+. 1219 3A9C8C50 14
+. 55 89 E5 57 56 53 83 EC 0C E8 33 AB FE FF 
+
+==== BB 1220 (0x3A9C8C5E) in 22363B, out 124928B, BBs exec'd 0 ====
+	0x3A9C8C5E:  addl $0xE73B2, %ebx
+	0x3A9C8C64:  movl 0xFFFF9EBC(%ebx),%edx
+	0x3A9C8C6A:  testl %edx,%edx
+	0x3A9C8C6C:  jz-8 0x3A9C8CDD
+. 1220 3A9C8C5E 16
+. 81 C3 B2 73 0E 00 8B 93 BC 9E FF FF 85 D2 74 6F 
+
+==== BB 1221 (0x3A9C8C6E) in 22379B, out 125016B, BBs exec'd 0 ====
+	0x3A9C8C6E:  movl %esi,%esi
+	0x3A9C8C70:  movl 4(%edx),%eax
+	0x3A9C8C73:  testl %eax,%eax
+	0x3A9C8C75:  jz-8 0x3A9C8CBD
+. 1221 3A9C8C6E 9
+. 89 F6 8B 42 04 85 C0 74 46 
+
+==== BB 1222 (0x3A9C8C77) in 22388B, out 125098B, BBs exec'd 0 ====
+	0x3A9C8C77:  movl %esi,%esi
+	0x3A9C8C79:  leal 0(%edi,,), %edi
+	0x3A9C8C80:  decl %eax
+	0x3A9C8C81:  movl %eax,4(%edx)
+	0x3A9C8C84:  shll $0x4, %eax
+	0x3A9C8C87:  leal (%eax,%edx,1), %eax
+	0x3A9C8C8A:  leal 8(%eax), %ecx
+	0x3A9C8C8D:  movl 8(%eax),%eax
+	0x3A9C8C90:  cmpl $0x4, %eax
+	0x3A9C8C93:  jnbe-8 0x3A9C8CB6
+. 1222 3A9C8C77 30
+. 89 F6 8D BC 27 00 00 00 00 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21 
+
+==== BB 1223 (0x3A9C8C95) in 22418B, out 125234B, BBs exec'd 0 ====
+	0x3A9C8C95:  movl -75896(%ebx,%eax,4),%eax
+	0x3A9C8C9C:  addl %ebx,%eax
+	0x3A9C8C9E:  jmp*l %eax
+. 1223 3A9C8C95 11
+. 8B 84 83 88 D7 FE FF 01 D8 FF E0 
+
+==== BB 1224 (0x3A9C8CA0) in 22429B, out 125285B, BBs exec'd 0 ====
+	0x3A9C8CA0:  movl 8(%ebp),%eax
+	0x3A9C8CA3:  movl %eax,4(%esp,,)
+	0x3A9C8CA7:  movl 8(%ecx),%eax
+	0x3A9C8CAA:  movl %eax,(%esp,,)
+	0x3A9C8CAD:  call*l 4(%ecx)
+. 1224 3A9C8CA0 16
+. 8B 45 08 89 44 24 04 8B 41 08 89 04 24 FF 51 04 
+
+==== BB 1225 __libc_csu_fini(0x8048370) in 22445B, out 125372B, BBs exec'd 0 ====
+	0x8048370:  pushl %ebp
+	0x8048371:  movl %esp,%ebp
+	0x8048373:  subl $0x18, %esp
+	0x8048376:  movl %ebx,-12(%ebp)
+	0x8048379:  call 0x8048438
+. 1225 8048370 14
+. 55 89 E5 83 EC 18 89 5D F4 E8 BA 00 00 00 
+
+==== BB 1226 (0x804837E) in 22459B, out 125470B, BBs exec'd 0 ====
+	0x804837E:  addl $0x11F6, %ebx
+	0x8048384:  movl %edi,-4(%ebp)
+	0x8048387:  leal 0xFFFFFF14(%ebx), %eax
+	0x804838D:  leal 0xFFFFFF14(%ebx), %edi
+	0x8048393:  movl %esi,-8(%ebp)
+	0x8048396:  subl %edi,%eax
+	0x8048398:  sarl $0x2, %eax
+	0x804839B:  testl %eax,%eax
+	0x804839D:  leal -1(%eax), %esi
+	0x80483A0:  jnz-8 0x80483B4
+. 1226 804837E 36
+. 81 C3 F6 11 00 00 89 7D FC 8D 83 14 FF FF FF 8D BB 14 FF FF FF 89 75 F8 29 F8 C1 F8 02 85 C0 8D 70 FF 75 12 
+
+==== BB 1227 (0x80483A2) in 22495B, out 125620B, BBs exec'd 0 ====
+	0x80483A2:  call 0x8048464
+. 1227 80483A2 5
+. E8 BD 00 00 00 
+
+==== BB 1228 (0x8048464) in 22500B, out 125665B, BBs exec'd 0 ====
+	0x8048464:  pushl %ebp
+	0x8048465:  movl %esp,%ebp
+	0x8048467:  pushl %ebx
+	0x8048468:  call 0x804846D ; popl %ebx
+	0x804846E:  addl $0x1107, %ebx
+	0x8048474:  pushl %eax
+	0x8048475:  call 0x80482F0
+. 1228 8048464 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 07 11 00 00 50 E8 76 FE FF FF 
+
+==== BB 1229 (0x80482F0) in 22522B, out 125794B, BBs exec'd 0 ====
+	0x80482F0:  pushl %ebp
+	0x80482F1:  movl %esp,%ebp
+	0x80482F3:  pushl %eax
+	0x80482F4:  pushl %eax
+	0x80482F5:  cmpb $0x0, (0x8049588)
+	0x80482FC:  jnz-8 0x804832C
+. 1229 80482F0 14
+. 55 89 E5 50 50 80 3D 88 95 04 08 00 75 2E 
+
+==== BB 1230 (0x80482FE) in 22536B, out 125916B, BBs exec'd 0 ====
+	0x80482FE:  movl 0x8049490, %eax
+	0x8048303:  movl (%eax),%edx
+	0x8048305:  testl %edx,%edx
+	0x8048307:  jz-8 0x8048325
+. 1230 80482FE 11
+. A1 90 94 04 08 8B 10 85 D2 74 1C 
+
+==== BB 1231 (0x8048325) in 22547B, out 126000B, BBs exec'd 0 ====
+	0x8048325:  movb $0x1, (0x8049588)
+	0x804832C:  leave	0x804832D:  ret
+. 1231 8048325 9
+. C6 05 88 95 04 08 01 C9 C3 
+
+==== BB 1232 (0x804847A) in 22556B, out 126065B, BBs exec'd 0 ====
+	0x804847A:  popl %ecx
+	0x804847B:  popl %ebx
+	0x804847C:  leave	0x804847D:  ret
+. 1232 804847A 4
+. 59 5B C9 C3 
+
+==== BB 1233 (0x80483A7) in 22560B, out 126149B, BBs exec'd 0 ====
+	0x80483A7:  movl -12(%ebp),%ebx
+	0x80483AA:  movl -8(%ebp),%esi
+	0x80483AD:  movl -4(%ebp),%edi
+	0x80483B0:  movl %ebp,%esp
+	0x80483B2:  popl %ebp
+	0x80483B3:  ret
+. 1233 80483A7 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3 
+
+==== BB 1234 (0x3A9C8CB0) in 22573B, out 126241B, BBs exec'd 0 ====
+	0x3A9C8CB0:  movl 0xFFFF9EBC(%ebx),%edx
+	0x3A9C8CB6:  movl 4(%edx),%eax
+	0x3A9C8CB9:  testl %eax,%eax
+	0x3A9C8CBB:  jnz-8 0x3A9C8C80
+. 1234 3A9C8CB0 13
+. 8B 93 BC 9E FF FF 8B 42 04 85 C0 75 C3 
+
+==== BB 1235 (0x3A9C8C80) in 22586B, out 126329B, BBs exec'd 0 ====
+	0x3A9C8C80:  decl %eax
+	0x3A9C8C81:  movl %eax,4(%edx)
+	0x3A9C8C84:  shll $0x4, %eax
+	0x3A9C8C87:  leal (%eax,%edx,1), %eax
+	0x3A9C8C8A:  leal 8(%eax), %ecx
+	0x3A9C8C8D:  movl 8(%eax),%eax
+	0x3A9C8C90:  cmpl $0x4, %eax
+	0x3A9C8C93:  jnbe-8 0x3A9C8CB6
+. 1235 3A9C8C80 21
+. 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21 
+
+==== BB 1236 _dl_fini(0x3A970800) in 22607B, out 126442B, BBs exec'd 0 ====
+	0x3A970800:  pushl %ebp
+	0x3A970801:  movl %esp,%ebp
+	0x3A970803:  pushl %edi
+	0x3A970804:  pushl %esi
+	0x3A970805:  pushl %ebx
+	0x3A970806:  subl $0x4C, %esp
+	0x3A970809:  call 0x3A97592B
+. 1236 3A970800 14
+. 55 89 E5 57 56 53 83 EC 4C E8 1D 51 00 00 
+
+==== BB 1237 (0x3A97080E) in 22621B, out 126575B, BBs exec'd 0 ====
+	0x3A97080E:  addl $0x7E5E, %ebx
+	0x3A970814:  leal 0xFFFFF9AC(%ebx), %eax
+	0x3A97081A:  movl %eax,-52(%ebp)
+	0x3A97081D:  movl %eax,(%esp,,)
+	0x3A970820:  call*l 0xFFFFFBF8(%ebx)
+. 1237 3A97080E 24
+. 81 C3 5E 7E 00 00 8D 83 AC F9 FF FF 89 45 CC 89 04 24 FF 93 F8 FB FF FF 
+
+==== BB 1238 rtld_lock_default_lock_recursive(0x3A9658E0) in 22645B, out 126670B, BBs exec'd 0 ====
+	0x3A9658E0:  pushl %ebp
+	0x3A9658E1:  movl %esp,%ebp
+	0x3A9658E3:  movl 8(%ebp),%eax
+	0x3A9658E6:  incl 4(%eax)
+	0x3A9658E9:  popl %ebp
+	0x3A9658EA:  ret
+. 1238 3A9658E0 11
+. 55 89 E5 8B 45 08 FF 40 04 5D C3 
+
+==== BB 1239 (0x3A970826) in 22656B, out 126774B, BBs exec'd 0 ====
+	0x3A970826:  movl $0x0, -16(%ebp)
+	0x3A97082D:  movl 0xFFFFF998(%ebx),%edx
+	0x3A970833:  movl 0xFFFFF994(%ebx),%edi
+	0x3A970839:  leal 0xF(,%edx,4), %eax
+	0x3A970840:  andl $0xFFFFFFF0, %eax
+	0x3A970843:  subl %eax,%esp
+	0x3A970845:  leal 12(%esp,,), %ecx
+	0x3A970849:  testl %edi,%edi
+	0x3A97084B:  movl %edi,%eax
+	0x3A97084D:  movl %ecx,-20(%ebp)
+	0x3A970850:  jz-8 0x3A970872
+. 1239 3A970826 44
+. C7 45 F0 00 00 00 00 8B 93 98 F9 FF FF 8B BB 94 F9 FF FF 8D 04 95 0F 00 00 00 83 E0 F0 29 C4 8D 4C 24 0C 85 FF 89 F8 89 4D EC 74 20 
+
+==== BB 1240 (0x3A970852) in 22700B, out 126946B, BBs exec'd 0 ====
+	0x3A970852:  incl 0x170(%edi)
+	0x3A970858:  movl -16(%ebp),%esi
+	0x3A97085B:  movl -20(%ebp),%edx
+	0x3A97085E:  movl %edi,(%edx,%esi,4)
+	0x3A970861:  movl 12(%edi),%edi
+	0x3A970864:  incl %esi
+	0x3A970865:  movl %esi,-16(%ebp)
+	0x3A970868:  testl %edi,%edi
+	0x3A97086A:  jnz-8 0x3A970852
+. 1240 3A970852 26
+. FF 87 70 01 00 00 8B 75 F0 8B 55 EC 89 3C B2 8B 7F 0C 46 89 75 F0 85 FF 75 E6 
+
+==== BB 1241 (0x3A97086C) in 22726B, out 127090B, BBs exec'd 0 ====
+	0x3A97086C:  movl 0xFFFFF998(%ebx),%edx
+	0x3A970872:  movl 12(%eax),%edi
+	0x3A970875:  testl %edi,%edi
+	0x3A970877:  jz-32 0x3A97098E
+. 1241 3A97086C 17
+. 8B 93 98 F9 FF FF 8B 78 0C 85 FF 0F 84 11 01 00 00 
+
+==== BB 1242 (0x3A97087D) in 22743B, out 127181B, BBs exec'd 0 ====
+	0x3A97087D:  movl $0x1, -24(%ebp)
+	0x3A970884:  movl -20(%ebp),%ecx
+	0x3A970887:  cmpl %edi,4(%ecx)
+	0x3A97088A:  jz-8 0x3A97089E
+. 1242 3A97087D 15
+. C7 45 E8 01 00 00 00 8B 4D EC 39 79 04 74 12 
+
+==== BB 1243 (0x3A97089E) in 22758B, out 127272B, BBs exec'd 0 ====
+	0x3A97089E:  movl -24(%ebp),%ecx
+	0x3A9708A1:  incl %ecx
+	0x3A9708A2:  cmpl %edx,%ecx
+	0x3A9708A4:  movl %ecx,-68(%ebp)
+	0x3A9708A7:  jnb-32 0x3A970983
+. 1243 3A97089E 15
+. 8B 4D E8 41 39 D1 89 4D BC 0F 83 D6 00 00 00 
+
+==== BB 1244 (0x3A9708AD) in 22773B, out 127360B, BBs exec'd 0 ====
+	0x3A9708AD:  movl -24(%ebp),%eax
+	0x3A9708B0:  movl -20(%ebp),%esi
+	0x3A9708B3:  shll $0x2, %eax
+	0x3A9708B6:  movl %eax,-64(%ebp)
+	0x3A9708B9:  addl %eax,%esi
+	0x3A9708BB:  negl -64(%ebp)
+	0x3A9708BE:  movl %esi,-60(%ebp)
+	0x3A9708C1:  jmp-8 0x3A9708D0
+. 1244 3A9708AD 22
+. 8B 45 E8 8B 75 EC C1 E0 02 89 45 C0 01 C6 F7 5D C0 89 75 C4 EB 0D 
+
+==== BB 1245 (0x3A9708D0) in 22795B, out 127469B, BBs exec'd 0 ====
+	0x3A9708D0:  movl -20(%ebp),%edx
+	0x3A9708D3:  movl -68(%ebp),%eax
+	0x3A9708D6:  movl (%edx,%eax,4),%eax
+	0x3A9708D9:  movl %eax,-48(%ebp)
+	0x3A9708DC:  movl 0x1D4(%eax),%edx
+	0x3A9708E2:  movl %eax,-28(%ebp)
+	0x3A9708E5:  testl %edx,%edx
+	0x3A9708E7:  jz-8 0x3A970960
+. 1245 3A9708D0 25
+. 8B 55 EC 8B 45 BC 8B 04 82 89 45 D0 8B 90 D4 01 00 00 89 45 E4 85 D2 74 77 
+
+==== BB 1246 (0x3A9708E9) in 22820B, out 127598B, BBs exec'd 0 ====
+	0x3A9708E9:  movl (%edx),%eax
+	0x3A9708EB:  testl %eax,%eax
+	0x3A9708ED:  jz-8 0x3A970960
+. 1246 3A9708E9 6
+. 8B 02 85 C0 74 71 
+
+==== BB 1247 (0x3A9708EF) in 22826B, out 127667B, BBs exec'd 0 ====
+	0x3A9708EF:  movl -20(%ebp),%esi
+	0x3A9708F2:  movl -24(%ebp),%ecx
+	0x3A9708F5:  leal (%esi,%ecx,4), %ecx
+	0x3A9708F8:  movl %ecx,-56(%ebp)
+	0x3A9708FB:  movl -68(%ebp),%esi
+	0x3A9708FE:  movl -64(%ebp),%ecx
+	0x3A970901:  leal (%ecx,%esi,4), %esi
+	0x3A970904:  movl %esi,-72(%ebp)
+	0x3A970907:  cmpl %edi,%eax
+	0x3A970909:  jz-8 0x3A97091D
+. 1247 3A9708EF 28
+. 8B 75 EC 8B 4D E8 8D 0C 8E 89 4D C8 8B 75 BC 8B 4D C0 8D 34 B1 89 75 B8 39 F8 74 12 
+
+==== BB 1248 (0x3A97090B) in 22854B, out 127812B, BBs exec'd 0 ====
+	0x3A97090B:  nop
+	0x3A97090C:  leal 0(%esi,,), %esi
+	0x3A970910:  addl $0x4, %edx
+	0x3A970913:  movl (%edx),%eax
+	0x3A970915:  testl %eax,%eax
+	0x3A970917:  jz-8 0x3A970960
+. 1248 3A97090B 14
+. 90 8D 74 26 00 83 C2 04 8B 02 85 C0 74 47 
+
+==== BB 1249 (0x3A970919) in 22868B, out 127908B, BBs exec'd 0 ====
+	0x3A970919:  cmpl %edi,%eax
+	0x3A97091B:  jnz-8 0x3A970910
+. 1249 3A970919 4
+. 39 F8 75 F3 
+
+==== BB 1250 (0x3A970910) in 22872B, out 127965B, BBs exec'd 0 ====
+	0x3A970910:  addl $0x4, %edx
+	0x3A970913:  movl (%edx),%eax
+	0x3A970915:  testl %eax,%eax
+	0x3A970917:  jz-8 0x3A970960
+. 1250 3A970910 9
+. 83 C2 04 8B 02 85 C0 74 47 
+
+==== BB 1251 (0x3A970960) in 22881B, out 128044B, BBs exec'd 0 ====
+	0x3A970960:  movl -48(%ebp),%edx
+	0x3A970963:  movl 0x1E0(%edx),%ecx
+	0x3A970969:  testl %ecx,%ecx
+	0x3A97096B:  jnz-32 0x3A970A77
+. 1251 3A970960 17
+. 8B 55 D0 8B 8A E0 01 00 00 85 C9 0F 85 06 01 00 00 
+
+==== BB 1252 (0x3A970971) in 22898B, out 128132B, BBs exec'd 0 ====
+	0x3A970971:  incl -68(%ebp)
+	0x3A970974:  movl 0xFFFFF998(%ebx),%edx
+	0x3A97097A:  cmpl %edx,-68(%ebp)
+	0x3A97097D:  jb-32 0x3A9708D0
+. 1252 3A970971 18
+. FF 45 BC 8B 93 98 F9 FF FF 39 55 BC 0F 82 4D FF FF FF 
+
+==== BB 1253 (0x3A970983) in 22916B, out 128227B, BBs exec'd 0 ====
+	0x3A970983:  movl 12(%edi),%edi
+	0x3A970986:  testl %edi,%edi
+	0x3A970988:  jnz-32 0x3A97087D
+. 1253 3A970983 11
+. 8B 7F 0C 85 FF 0F 85 EF FE FF FF 
+
+==== BB 1254 (0x3A97088C) in 22927B, out 128299B, BBs exec'd 0 ====
+	0x3A97088C:  leal 0(%esi,,), %esi
+	0x3A970890:  incl -24(%ebp)
+	0x3A970893:  movl -20(%ebp),%eax
+	0x3A970896:  movl -24(%ebp),%esi
+	0x3A970899:  cmpl %edi,(%eax,%esi,4)
+	0x3A97089C:  jnz-8 0x3A970890
+. 1254 3A97088C 18
+. 8D 74 26 00 FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2 
+
+==== BB 1255 (0x3A970890) in 22945B, out 128413B, BBs exec'd 0 ====
+	0x3A970890:  incl -24(%ebp)
+	0x3A970893:  movl -20(%ebp),%eax
+	0x3A970896:  movl -24(%ebp),%esi
+	0x3A970899:  cmpl %edi,(%eax,%esi,4)
+	0x3A97089C:  jnz-8 0x3A970890
+. 1255 3A970890 14
+. FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2 
+
+==== BB 1256 (0x3A97098E) in 22959B, out 128517B, BBs exec'd 0 ====
+	0x3A97098E:  movl $0x0, -16(%ebp)
+	0x3A970995:  cmpl %edx,-16(%ebp)
+	0x3A970998:  jb-8 0x3A9709C6
+. 1256 3A97098E 12
+. C7 45 F0 00 00 00 00 39 55 F0 72 2C 
+
+==== BB 1257 (0x3A9709C6) in 22971B, out 128595B, BBs exec'd 0 ====
+	0x3A9709C6:  movl -16(%ebp),%edx
+	0x3A9709C9:  movl -20(%ebp),%ecx
+	0x3A9709CC:  movl (%ecx,%edx,4),%edi
+	0x3A9709CF:  movzbl 0x174(%edi),%edx
+	0x3A9709D6:  testb $0x8, %dl
+	0x3A9709D9:  jz-8 0x3A970A23
+. 1257 3A9709C6 21
+. 8B 55 F0 8B 4D EC 8B 3C 91 0F B6 97 74 01 00 00 F6 C2 08 74 48 
+
+==== BB 1258 (0x3A9709DB) in 22992B, out 128713B, BBs exec'd 0 ====
+	0x3A9709DB:  jmp-8 0x3A97099F
+. 1258 3A9709DB 2
+. EB C2 
+
+==== BB 1259 (0x3A97099F) in 22994B, out 128741B, BBs exec'd 0 ====
+	0x3A97099F:  movb %dl,%al
+	0x3A9709A1:  andb $0xF7, %al
+	0x3A9709A3:  movb %al,0x174(%edi)
+	0x3A9709A9:  movl 4(%edi),%eax
+	0x3A9709AC:  movzbl (%eax),%ecx
+	0x3A9709AF:  testb %cl,%cl
+	0x3A9709B1:  jnz-8 0x3A9709DD
+. 1259 3A97099F 20
+. 88 D0 24 F7 88 87 74 01 00 00 8B 47 04 0F B6 08 84 C9 75 2A 
+
+==== BB 1260 (0x3A9709B3) in 23014B, out 128877B, BBs exec'd 0 ====
+	0x3A9709B3:  testb $0x3, %dl
+	0x3A9709B6:  jnz-8 0x3A9709DD
+. 1260 3A9709B3 5
+. F6 C2 03 75 25 
+
+==== BB 1261 (0x3A9709B8) in 23019B, out 128939B, BBs exec'd 0 ====
+	0x3A9709B8:  incl -16(%ebp)
+	0x3A9709BB:  movl -16(%ebp),%edx
+	0x3A9709BE:  cmpl 0xFFFFF998(%ebx),%edx
+	0x3A9709C4:  jnb-8 0x3A970A37
+. 1261 3A9709B8 14
+. FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 73 71 
+
+==== BB 1262 (0x3A9709DD) in 23033B, out 129034B, BBs exec'd 0 ====
+	0x3A9709DD:  movl 0x80(%edi),%edx
+	0x3A9709E3:  testl %edx,%edx
+	0x3A9709E5:  jnz-8 0x3A9709EE
+. 1262 3A9709DD 10
+. 8B 97 80 00 00 00 85 D2 75 07 
+
+==== BB 1263 (0x3A9709E7) in 23043B, out 129109B, BBs exec'd 0 ====
+	0x3A9709E7:  movl 76(%edi),%esi
+	0x3A9709EA:  testl %esi,%esi
+	0x3A9709EC:  jz-8 0x3A9709B8
+. 1263 3A9709E7 7
+. 8B 77 4C 85 F6 74 CA 
+
+==== BB 1264 (0x3A9709EE) in 23050B, out 129181B, BBs exec'd 0 ====
+	0x3A9709EE:  testb $0x2, 0xFFFFFC14(%ebx)
+	0x3A9709F5:  jnz-32 0x3A970ADC
+. 1264 3A9709EE 13
+. F6 83 14 FC FF FF 02 0F 85 E1 00 00 00 
+
+==== BB 1265 (0x3A9709FB) in 23063B, out 129251B, BBs exec'd 0 ====
+	0x3A9709FB:  testl %edx,%edx
+	0x3A9709FD:  jz-8 0x3A970A1C
+. 1265 3A9709FB 4
+. 85 D2 74 1D 
+
+==== BB 1266 (0x3A970A1C) in 23067B, out 129310B, BBs exec'd 0 ====
+	0x3A970A1C:  movl 76(%edi),%eax
+	0x3A970A1F:  testl %eax,%eax
+	0x3A970A21:  jnz-8 0x3A970A6C
+. 1266 3A970A1C 7
+. 8B 47 4C 85 C0 75 49 
+
+==== BB 1267 (0x3A970A6C) in 23074B, out 129382B, BBs exec'd 0 ====
+	0x3A970A6C:  movl 4(%eax),%eax
+	0x3A970A6F:  movl (%edi),%edx
+	0x3A970A71:  addl %edx,%eax
+	0x3A970A73:  call*l %eax
+. 1267 3A970A6C 9
+. 8B 40 04 8B 17 01 D0 FF D0 
+
+==== BB 1268 (0x3A97CCD4) in 23083B, out 129456B, BBs exec'd 0 ====
+	0x3A97CCD4:  pushl %ebp
+	0x3A97CCD5:  movl %esp,%ebp
+	0x3A97CCD7:  pushl %ebx
+	0x3A97CCD8:  call 0x3A97CCDD ; popl %ebx
+	0x3A97CCDE:  addl $0x1103, %ebx
+	0x3A97CCE4:  pushl %eax
+	0x3A97CCE5:  call 0x3A97C9F0
+. 1268 3A97CCD4 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 03 11 00 00 50 E8 06 FD FF FF 
+
+==== BB 1269 (0x3A97C9F0) in 23105B, out 129585B, BBs exec'd 0 ====
+	0x3A97C9F0:  pushl %ebp
+	0x3A97C9F1:  movl %esp,%ebp
+	0x3A97C9F3:  pushl %ebx
+	0x3A97C9F4:  call 0x3A97C9F9 ; popl %ebx
+	0x3A97C9FA:  addl $0x13E7, %ebx
+	0x3A97CA00:  pushl %ecx
+	0x3A97CA01:  cmpb $0x0, 0x30(%ebx)
+	0x3A97CA08:  jnz-8 0x3A97CA3E
+. 1269 3A97C9F0 26
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 E7 13 00 00 51 80 BB 30 00 00 00 00 75 34 
+
+==== BB 1270 (0x3A97CA0A) in 23131B, out 129731B, BBs exec'd 0 ====
+	0x3A97CA0A:  movl 0x24(%ebx),%edx
+	0x3A97CA10:  testl %edx,%edx
+	0x3A97CA12:  jnz-8 0x3A97CA43
+. 1270 3A97CA0A 10
+. 8B 93 24 00 00 00 85 D2 75 2F 
+
+==== BB 1271 (0x3A97CA43) in 23141B, out 129803B, BBs exec'd 0 ====
+	0x3A97CA43:  subl $0xC, %esp
+	0x3A97CA46:  movl 0xFFFFFF10(%ebx),%eax
+	0x3A97CA4C:  pushl %eax
+	0x3A97CA4D:  call 0x3A97C984
+. 1271 3A97CA43 15
+. 83 EC 0C 8B 83 10 FF FF FF 50 E8 32 FF FF FF 
+
+==== BB 1272 (0x3A97C984) in 23156B, out 129894B, BBs exec'd 0 ====
+	0x3A97C984:  jmp*l 0x18(%ebx)
+. 1272 3A97C984 6
+. FF A3 18 00 00 00 
+
+==== BB 1273 (0x3A97C98A) in 23162B, out 129921B, BBs exec'd 0 ====
+	0x3A97C98A:  pushl $0x18
+	0x3A97C98F:  jmp 0x3A97C944
+. 1273 3A97C98A 10
+. 68 18 00 00 00 E9 B0 FF FF FF 
+
+==== BB 1274 (0x3A97C944) in 23172B, out 129970B, BBs exec'd 0 ====
+	0x3A97C944:  pushl 0x4(%ebx)
+	0x3A97C94A:  jmp*l 0x8(%ebx)
+. 1274 3A97C944 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00 
+
+==== BB 1275 __cxa_finalize(0x3A9C8EA0) in 23184B, out 130019B, BBs exec'd 0 ====
+	0x3A9C8EA0:  pushl %ebp
+	0x3A9C8EA1:  movl %esp,%ebp
+	0x3A9C8EA3:  pushl %edi
+	0x3A9C8EA4:  pushl %esi
+	0x3A9C8EA5:  pushl %ebx
+	0x3A9C8EA6:  subl $0xC, %esp
+	0x3A9C8EA9:  call 0x3A9B3791
+. 1275 3A9C8EA0 14
+. 55 89 E5 57 56 53 83 EC 0C E8 E3 A8 FE FF 
+
+==== BB 1276 (0x3A9C8EAE) in 23198B, out 130152B, BBs exec'd 0 ====
+	0x3A9C8EAE:  addl $0xE7162, %ebx
+	0x3A9C8EB4:  movl 0xFFFF9EBC(%ebx),%edi
+	0x3A9C8EBA:  testl %edi,%edi
+	0x3A9C8EBC:  jz-8 0x3A9C8F26
+. 1276 3A9C8EAE 16
+. 81 C3 62 71 0E 00 8B BB BC 9E FF FF 85 FF 74 68 
+
+==== BB 1277 (0x3A9C8EBE) in 23214B, out 130240B, BBs exec'd 0 ====
+	0x3A9C8EBE:  movl %esi,%esi
+	0x3A9C8EC0:  movl 4(%edi),%eax
+	0x3A9C8EC3:  shll $0x4, %eax
+	0x3A9C8EC6:  leal -8(%eax,%edi,1), %esi
+	0x3A9C8ECA:  leal 8(%edi), %eax
+	0x3A9C8ECD:  cmpl %eax,%esi
+	0x3A9C8ECF:  movl %eax,-16(%ebp)
+	0x3A9C8ED2:  jnb-8 0x3A9C8EE6
+. 1277 3A9C8EBE 22
+. 89 F6 8B 47 04 C1 E0 04 8D 74 38 F8 8D 47 08 39 C6 89 45 F0 73 12 
+
+==== BB 1278 (0x3A9C8ED4) in 23236B, out 130354B, BBs exec'd 0 ====
+	0x3A9C8ED4:  jmp-8 0x3A9C8F20
+. 1278 3A9C8ED4 2
+. EB 4A 
+
+==== BB 1279 (0x3A9C8F20) in 23238B, out 130382B, BBs exec'd 0 ====
+	0x3A9C8F20:  movl (%edi),%edi
+	0x3A9C8F22:  testl %edi,%edi
+	0x3A9C8F24:  jnz-8 0x3A9C8EC0
+. 1279 3A9C8F20 6
+. 8B 3F 85 FF 75 9A 
+
+==== BB 1280 (0x3A9C8F26) in 23244B, out 130451B, BBs exec'd 0 ====
+	0x3A9C8F26:  movl 8(%ebp),%eax
+	0x3A9C8F29:  testl %eax,%eax
+	0x3A9C8F2B:  jz-8 0x3A9C8F38
+. 1280 3A9C8F26 7
+. 8B 45 08 85 C0 74 0B 
+
+==== BB 1281 (0x3A9C8F2D) in 23251B, out 130523B, BBs exec'd 0 ====
+	0x3A9C8F2D:  movl 8(%ebp),%eax
+	0x3A9C8F30:  movl %eax,(%esp,,)
+	0x3A9C8F33:  call 0x3AA648D0
+. 1281 3A9C8F2D 11
+. 8B 45 08 89 04 24 E8 98 B9 09 00 
+
+==== BB 1282 __unregister_atfork(0x3AA648D0) in 23262B, out 130591B, BBs exec'd 0 ====
+	0x3AA648D0:  pushl %ebp
+	0x3AA648D1:  movl %esp,%ebp
+	0x3AA648D3:  pushl %edi
+	0x3AA648D4:  pushl %esi
+	0x3AA648D5:  pushl %ebx
+	0x3AA648D6:  subl $0x10, %esp
+	0x3AA648D9:  movl 8(%ebp),%edi
+	0x3AA648DC:  call 0x3A9B3791
+. 1282 3AA648D0 17
+. 55 89 E5 57 56 53 83 EC 10 8B 7D 08 E8 B0 EE F4 FF 
+
+==== BB 1283 (0x3AA648E1) in 23279B, out 130737B, BBs exec'd 0 ====
+	0x3AA648E1:  addl $0x4B72F, %ebx
+	0x3AA648E7:  movl 0x2B3C(%ebx),%edx
+	0x3AA648ED:  testl %edx,%edx
+	0x3AA648EF:  jnz-32 0x3AA649D2
+. 1283 3AA648E1 20
+. 81 C3 2F B7 04 00 8B 93 3C 2B 00 00 85 D2 0F 85 DD 00 00 00 
+
+==== BB 1284 (0x3AA648F5) in 23299B, out 130825B, BBs exec'd 0 ====
+	0x3AA648F5:  movl 0xFFFF9B8C(%ebx),%ecx
+	0x3AA648FB:  leal 0xFFFF9B88(%ebx), %eax
+	0x3AA64901:  movl %eax,-16(%ebp)
+	0x3AA64904:  cmpl %eax,%ecx
+	0x3AA64906:  movl 4(%ecx),%esi
+	0x3AA64909:  jnz-8 0x3AA6491A
+. 1284 3AA648F5 22
+. 8B 8B 8C 9B FF FF 8D 83 88 9B FF FF 89 45 F0 39 C1 8B 71 04 75 0F 
+
+==== BB 1285 (0x3AA6490B) in 23321B, out 130942B, BBs exec'd 0 ====
+	0x3AA6490B:  jmp-8 0x3AA64940
+. 1285 3AA6490B 2
+. EB 33 
+
+==== BB 1286 (0x3AA64940) in 23323B, out 130970B, BBs exec'd 0 ====
+	0x3AA64940:  movl 0xFFFF9B94(%ebx),%ecx
+	0x3AA64946:  leal 0xFFFF9B90(%ebx), %eax
+	0x3AA6494C:  movl %eax,-20(%ebp)
+	0x3AA6494F:  cmpl %eax,%ecx
+	0x3AA64951:  movl 4(%ecx),%esi
+	0x3AA64954:  jnz-8 0x3AA64962
+. 1286 3AA64940 22
+. 8B 8B 94 9B FF FF 8D 83 90 9B FF FF 89 45 EC 39 C1 8B 71 04 75 0C 
+
+==== BB 1287 (0x3AA64956) in 23345B, out 131084B, BBs exec'd 0 ====
+	0x3AA64956:  jmp-8 0x3AA64980
+. 1287 3AA64956 2
+. EB 28 
+
+==== BB 1288 (0x3AA64980) in 23347B, out 131112B, BBs exec'd 0 ====
+	0x3AA64980:  movl 0xFFFF9B9C(%ebx),%ecx
+	0x3AA64986:  leal 0xFFFF9B98(%ebx), %eax
+	0x3AA6498C:  movl %eax,-24(%ebp)
+	0x3AA6498F:  cmpl %eax,%ecx
+	0x3AA64991:  movl 4(%ecx),%esi
+	0x3AA64994:  jnz-8 0x3AA649A2
+. 1288 3AA64980 22
+. 8B 8B 9C 9B FF FF 8D 83 98 9B FF FF 89 45 E8 39 C1 8B 71 04 75 0C 
+
+==== BB 1289 (0x3AA64996) in 23369B, out 131226B, BBs exec'd 0 ====
+	0x3AA64996:  jmp-8 0x3AA649C0
+. 1289 3AA64996 2
+. EB 28 
+
+==== BB 1290 (0x3AA649C0) in 23371B, out 131254B, BBs exec'd 0 ====
+	0x3AA649C0:  movl 0x2B44(%ebx),%edx
+	0x3AA649C6:  testl %edx,%edx
+	0x3AA649C8:  jnz-8 0x3AA649E2
+. 1290 3AA649C0 10
+. 8B 93 44 2B 00 00 85 D2 75 18 
+
+==== BB 1291 (0x3AA649CA) in 23381B, out 131329B, BBs exec'd 0 ====
+	0x3AA649CA:  addl $0x10, %esp
+	0x3AA649CD:  popl %ebx
+	0x3AA649CE:  popl %esi
+	0x3AA649CF:  popl %edi
+	0x3AA649D0:  popl %ebp
+	0x3AA649D1:  ret
+. 1291 3AA649CA 8
+. 83 C4 10 5B 5E 5F 5D C3 
+
+==== BB 1292 (0x3A9C8F38) in 23389B, out 131437B, BBs exec'd 0 ====
+	0x3A9C8F38:  addl $0xC, %esp
+	0x3A9C8F3B:  popl %ebx
+	0x3A9C8F3C:  popl %esi
+	0x3A9C8F3D:  popl %edi
+	0x3A9C8F3E:  popl %ebp
+	0x3A9C8F3F:  ret
+. 1292 3A9C8F38 8
+. 83 C4 0C 5B 5E 5F 5D C3 
+
+==== BB 1293 (0x3A97CA52) in 23397B, out 131545B, BBs exec'd 0 ====
+	0x3A97CA52:  addl $0x10, %esp
+	0x3A97CA55:  jmp-8 0x3A97CA14
+. 1293 3A97CA52 5
+. 83 C4 10 EB BD 
+
+==== BB 1294 (0x3A97CA14) in 23402B, out 131590B, BBs exec'd 0 ====
+	0x3A97CA14:  movl 0xFFFFFF14(%ebx),%eax
+	0x3A97CA1A:  movl (%eax),%edx
+	0x3A97CA1C:  testl %edx,%edx
+	0x3A97CA1E:  jz-8 0x3A97CA37
+. 1294 3A97CA14 12
+. 8B 83 14 FF FF FF 8B 10 85 D2 74 17 
+
+==== BB 1295 (0x3A97CA37) in 23414B, out 131675B, BBs exec'd 0 ====
+	0x3A97CA37:  movb $0x1, 0x30(%ebx)
+	0x3A97CA3E:  movl -4(%ebp),%ebx
+	0x3A97CA41:  leave	0x3A97CA42:  ret
+. 1295 3A97CA37 12
+. C6 83 30 00 00 00 01 8B 5D FC C9 C3 
+
+==== BB 1296 (0x3A97CCEA) in 23426B, out 131752B, BBs exec'd 0 ====
+	0x3A97CCEA:  popl %ecx
+	0x3A97CCEB:  popl %ebx
+	0x3A97CCEC:  leave	0x3A97CCED:  ret
+. 1296 3A97CCEA 4
+. 59 5B C9 C3 
+
+==== BB 1297 (0x3A970A75) in 23430B, out 131836B, BBs exec'd 0 ====
+	0x3A970A75:  jmp-8 0x3A970A23
+. 1297 3A970A75 2
+. EB AC 
+
+==== BB 1298 (0x3A970A23) in 23432B, out 131864B, BBs exec'd 0 ====
+	0x3A970A23:  decl 0x170(%edi)
+	0x3A970A29:  incl -16(%ebp)
+	0x3A970A2C:  movl -16(%ebp),%edx
+	0x3A970A2F:  cmpl 0xFFFFF998(%ebx),%edx
+	0x3A970A35:  jb-8 0x3A9709C6
+. 1298 3A970A23 20
+. FF 8F 70 01 00 00 FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 72 8F 
+
+==== BB 1299 (0x3A9709FF) in 23452B, out 131979B, BBs exec'd 0 ====
+	0x3A9709FF:  movl 4(%edx),%edx
+	0x3A970A02:  movl (%edi),%esi
+	0x3A970A04:  movl 0x88(%edi),%eax
+	0x3A970A0A:  addl %esi,%edx
+	0x3A970A0C:  movl %edx,-44(%ebp)
+	0x3A970A0F:  movl 4(%eax),%esi
+	0x3A970A12:  shrl $0x2, %esi
+	0x3A970A15:  movl %esi,%eax
+	0x3A970A17:  decl %esi
+	0x3A970A18:  testl %eax,%eax
+	0x3A970A1A:  jnz-8 0x3A970A58
+. 1299 3A9709FF 29
+. 8B 52 04 8B 37 8B 87 88 00 00 00 01 F2 89 55 D4 8B 70 04 C1 EE 02 89 F0 4E 85 C0 75 3C 
+
+==== BB 1300 (0x3A970A58) in 23481B, out 132134B, BBs exec'd 0 ====
+	0x3A970A58:  movl -44(%ebp),%eax
+	0x3A970A5B:  call*l (%eax,%esi,4)
+. 1300 3A970A58 6
+. 8B 45 D4 FF 14 B0 
+
+==== BB 1301 __libc_fini(0x3A9B3760) in 23487B, out 132195B, BBs exec'd 0 ====
+	0x3A9B3760:  pushl %ebp
+	0x3A9B3761:  movl %esp,%ebp
+	0x3A9B3763:  pushl %esi
+	0x3A9B3764:  pushl %ebx
+	0x3A9B3765:  call 0x3A9B3791
+. 1301 3A9B3760 10
+. 55 89 E5 56 53 E8 27 00 00 00 
+
+==== BB 1302 (0x3A9B376A) in 23497B, out 132298B, BBs exec'd 0 ====
+	0x3A9B376A:  addl $0xFC8A6, %ebx
+	0x3A9B3770:  leal 0xFFFFFFA0(%ebx), %esi
+	0x3A9B3776:  movl (%esi),%eax
+	0x3A9B3778:  testl %eax,%eax
+	0x3A9B377A:  jnz-8 0x3A9B3780
+. 1302 3A9B376A 18
+. 81 C3 A6 C8 0F 00 8D B3 A0 FF FF FF 8B 06 85 C0 75 04 
+
+==== BB 1303 (0x3A9B377C) in 23515B, out 132390B, BBs exec'd 0 ====
+	0x3A9B377C:  popl %ebx
+	0x3A9B377D:  popl %esi
+	0x3A9B377E:  popl %ebp
+	0x3A9B377F:  ret
+. 1303 3A9B377C 4
+. 5B 5E 5D C3 
+
+==== BB 1304 (0x3A970A5E) in 23519B, out 132468B, BBs exec'd 0 ====
+	0x3A970A5E:  movl %esi,%eax
+	0x3A970A60:  decl %esi
+	0x3A970A61:  testl %eax,%eax
+	0x3A970A63:  jnz-8 0x3A970A58
+. 1304 3A970A5E 7
+. 89 F0 4E 85 C0 75 F3 
+
+==== BB 1305 (0x3A970A65) in 23526B, out 132545B, BBs exec'd 0 ====
+	0x3A970A65:  movl 76(%edi),%eax
+	0x3A970A68:  testl %eax,%eax
+	0x3A970A6A:  jz-8 0x3A970A23
+. 1305 3A970A65 7
+. 8B 47 4C 85 C0 74 B7 
+
+==== BB 1306 (0x3A970A37) in 23533B, out 132617B, BBs exec'd 0 ====
+	0x3A970A37:  movl -52(%ebp),%ecx
+	0x3A970A3A:  movl %ecx,(%esp,,)
+	0x3A970A3D:  call*l 0xFFFFFBFC(%ebx)
+. 1306 3A970A37 12
+. 8B 4D CC 89 0C 24 FF 93 FC FB FF FF 
+
+==== BB 1307 rtld_lock_default_unlock_recursive(0x3A9658F0) in 23545B, out 132685B, BBs exec'd 0 ====
+	0x3A9658F0:  pushl %ebp
+	0x3A9658F1:  movl %esp,%ebp
+	0x3A9658F3:  movl 8(%ebp),%eax
+	0x3A9658F6:  decl 4(%eax)
+	0x3A9658F9:  popl %ebp
+	0x3A9658FA:  ret
+. 1307 3A9658F0 11
+. 55 89 E5 8B 45 08 FF 48 04 5D C3 
+
+==== BB 1308 (0x3A970A43) in 23556B, out 132789B, BBs exec'd 0 ====
+	0x3A970A43:  cmpb $0x0, 0xFFFFFC14(%ebx)
+	0x3A970A4A:  js-32 0x3A970B05
+. 1308 3A970A43 13
+. 80 BB 14 FC FF FF 00 0F 88 B5 00 00 00 
+
+==== BB 1309 (0x3A970A50) in 23569B, out 132855B, BBs exec'd 0 ====
+	0x3A970A50:  leal -12(%ebp), %esp
+	0x3A970A53:  popl %ebx
+	0x3A970A54:  popl %esi
+	0x3A970A55:  popl %edi
+	0x3A970A56:  popl %ebp
+	0x3A970A57:  ret
+. 1309 3A970A50 8
+. 8D 65 F4 5B 5E 5F 5D C3 
+
+==== BB 1310 (0x3A9C8CBD) in 23577B, out 132957B, BBs exec'd 0 ====
+	0x3A9C8CBD:  movl %edx,%eax
+	0x3A9C8CBF:  movl (%edx),%edx
+	0x3A9C8CC1:  testl %edx,%edx
+	0x3A9C8CC3:  movl %edx,0xFFFF9EBC(%ebx)
+	0x3A9C8CC9:  jz-8 0x3A9C8CDD
+. 1310 3A9C8CBD 14
+. 89 D0 8B 12 85 D2 89 93 BC 9E FF FF 74 12 
+
+==== BB 1311 (0x3A9C8CDD) in 23591B, out 133052B, BBs exec'd 0 ====
+	0x3A9C8CDD:  leal 0xFFFFFFF4(%ebx), %esi
+	0x3A9C8CE3:  leal 0xFFFFFFF8(%ebx), %edi
+	0x3A9C8CE9:  cmpl %edi,%esi
+	0x3A9C8CEB:  jb-8 0x3A9C8CF8
+. 1311 3A9C8CDD 16
+. 8D B3 F4 FF FF FF 8D BB F8 FF FF FF 39 FE 72 0B 
+
+==== BB 1312 (0x3A9C8CF8) in 23607B, out 133128B, BBs exec'd 0 ====
+	0x3A9C8CF8:  call*l (%esi)
+. 1312 3A9C8CF8 2
+. FF 16 
+
+==== BB 1313 _IO_cleanup(0x3A9FD870) in 23609B, out 133169B, BBs exec'd 0 ====
+	0x3A9FD870:  pushl %ebp
+	0x3A9FD871:  movl %esp,%ebp
+	0x3A9FD873:  pushl %edi
+	0x3A9FD874:  pushl %esi
+	0x3A9FD875:  pushl %ebx
+	0x3A9FD876:  subl $0xC, %esp
+	0x3A9FD879:  call 0x3A9B3791
+. 1313 3A9FD870 14
+. 55 89 E5 57 56 53 83 EC 0C E8 13 5F FB FF 
+
+==== BB 1314 (0x3A9FD87E) in 23623B, out 133302B, BBs exec'd 0 ====
+	0x3A9FD87E:  addl $0xB2792, %ebx
+	0x3A9FD884:  movl $0x0, (%esp,,)
+	0x3A9FD88B:  call 0x3A9FD660
+. 1314 3A9FD87E 18
+. 81 C3 92 27 0B 00 C7 04 24 00 00 00 00 E8 D0 FD FF FF 
+
+==== BB 1315 _IO_flush_all_lockp(0x3A9FD660) in 23641B, out 133379B, BBs exec'd 0 ====
+	0x3A9FD660:  pushl %ebp
+	0x3A9FD661:  xorl %eax, %eax
+	0x3A9FD663:  movl %esp,%ebp
+	0x3A9FD665:  pushl %edi
+	0x3A9FD666:  pushl %esi
+	0x3A9FD667:  pushl %ebx
+	0x3A9FD668:  subl $0x24, %esp
+	0x3A9FD66B:  call 0x3A9B3791
+. 1315 3A9FD660 16
+. 55 31 C0 89 E5 57 56 53 83 EC 24 E8 21 61 FB FF 
+
+==== BB 1316 (0x3A9FD670) in 23657B, out 133526B, BBs exec'd 0 ====
+	0x3A9FD670:  addl $0xB29A0, %ebx
+	0x3A9FD676:  cmpl $0x0, 0x1B8(%ebx)
+	0x3A9FD67D:  movl $0x0, -32(%ebp)
+	0x3A9FD684:  setnz %al
+	0x3A9FD687:  testl %eax,%eax
+	0x3A9FD689:  movl %eax,-36(%ebp)
+	0x3A9FD68C:  jnz-32 0x3A9FD80D
+. 1316 3A9FD670 34
+. 81 C3 A0 29 0B 00 83 BB B8 01 00 00 00 C7 45 E0 00 00 00 00 0F 95 C0 85 C0 89 45 DC 0F 85 7B 01 00 00 
+
+==== BB 1317 (0x3A9FD692) in 23691B, out 133655B, BBs exec'd 0 ====
+	0x3A9FD692:  movl 8(%ebp),%esi
+	0x3A9FD695:  testl %esi,%esi
+	0x3A9FD697:  jz-8 0x3A9FD6A7
+. 1317 3A9FD692 7
+. 8B 75 08 85 F6 74 0E 
+
+==== BB 1318 (0x3A9FD6A7) in 23698B, out 133727B, BBs exec'd 0 ====
+	0x3A9FD6A7:  movl 0xFFFF9570(%ebx),%esi
+	0x3A9FD6AD:  movl 0x9B0(%ebx),%edi
+	0x3A9FD6B3:  testl %esi,%esi
+	0x3A9FD6B5:  jnz-8 0x3A9FD6D0
+. 1318 3A9FD6A7 16
+. 8B B3 70 95 FF FF 8B BB B0 09 00 00 85 F6 75 19 
+
+==== BB 1319 (0x3A9FD6D0) in 23714B, out 133818B, BBs exec'd 0 ====
+	0x3A9FD6D0:  movl %esi,0x9B4(%ebx)
+	0x3A9FD6D6:  movl 8(%ebp),%ecx
+	0x3A9FD6D9:  testl %ecx,%ecx
+	0x3A9FD6DB:  jz-8 0x3A9FD6EE
+. 1319 3A9FD6D0 13
+. 89 B3 B4 09 00 00 8B 4D 08 85 C9 74 11 
+
+==== BB 1320 (0x3A9FD6EE) in 23727B, out 133909B, BBs exec'd 0 ====
+	0x3A9FD6EE:  movl 92(%esi),%edx
+	0x3A9FD6F1:  testl %edx,%edx
+	0x3A9FD6F3:  jle-32 0x3A9FD7C0
+. 1320 3A9FD6EE 11
+. 8B 56 5C 85 D2 0F 8E C7 00 00 00 
+
+==== BB 1321 (0x3A9FD7C0) in 23738B, out 133981B, BBs exec'd 0 ====
+	0x3A9FD7C0:  movl 16(%esi),%eax
+	0x3A9FD7C3:  cmpl %eax,20(%esi)
+	0x3A9FD7C6:  jbe-32 0x3A9FD6F9
+. 1321 3A9FD7C0 12
+. 8B 46 10 39 46 14 0F 86 2D FF FF FF 
+
+==== BB 1322 (0x3A9FD6F9) in 23750B, out 134056B, BBs exec'd 0 ====
+	0x3A9FD6F9:  movzbl 70(%esi),%ecx
+	0x3A9FD6FD:  testb %cl,%cl
+	0x3A9FD6FF:  jnz-8 0x3A9FD730
+. 1322 3A9FD6F9 8
+. 0F B6 4E 46 84 C9 75 2F 
+
+==== BB 1323 (0x3A9FD701) in 23758B, out 134133B, BBs exec'd 0 ====
+	0x3A9FD701:  testl %edx,%edx
+	0x3A9FD703:  jle-8 0x3A9FD730
+. 1323 3A9FD701 4
+. 85 D2 7E 2B 
+
+==== BB 1324 (0x3A9FD730) in 23762B, out 134192B, BBs exec'd 0 ====
+	0x3A9FD730:  movl 8(%ebp),%eax
+	0x3A9FD733:  testl %eax,%eax
+	0x3A9FD735:  jz-8 0x3A9FD748
+. 1324 3A9FD730 7
+. 8B 45 08 85 C0 74 11 
+
+==== BB 1325 (0x3A9FD748) in 23769B, out 134264B, BBs exec'd 0 ====
+	0x3A9FD748:  xorl %eax, %eax
+	0x3A9FD74A:  movl %eax,0x9B4(%ebx)
+	0x3A9FD750:  movl 0x9B0(%ebx),%eax
+	0x3A9FD756:  cmpl %eax,%edi
+	0x3A9FD758:  jnz-32 0x3A9FD6C0
+. 1325 3A9FD748 22
+. 31 C0 89 83 B4 09 00 00 8B 83 B0 09 00 00 39 C7 0F 85 62 FF FF FF 
+
+==== BB 1326 (0x3A9FD75E) in 23791B, out 134363B, BBs exec'd 0 ====
+	0x3A9FD75E:  movl 52(%esi),%esi
+	0x3A9FD761:  testl %esi,%esi
+	0x3A9FD763:  jnz-32 0x3A9FD6D0
+. 1326 3A9FD75E 11
+. 8B 76 34 85 F6 0F 85 67 FF FF FF 
+
+==== BB 1327 (0x3A9FD769) in 23802B, out 134435B, BBs exec'd 0 ====
+	0x3A9FD769:  leal 0(%esi,,), %esi
+	0x3A9FD770:  movl 8(%ebp),%eax
+	0x3A9FD773:  testl %eax,%eax
+	0x3A9FD775:  jz-8 0x3A9FD785
+. 1327 3A9FD769 14
+. 8D B4 26 00 00 00 00 8B 45 08 85 C0 74 0E 
+
+==== BB 1328 (0x3A9FD785) in 23816B, out 134520B, BBs exec'd 0 ====
+	0x3A9FD785:  movl -36(%ebp),%edi
+	0x3A9FD788:  testl %edi,%edi
+	0x3A9FD78A:  jnz-32 0x3A9FD82D
+. 1328 3A9FD785 11
+. 8B 7D DC 85 FF 0F 85 9D 00 00 00 
+
+==== BB 1329 (0x3A9FD790) in 23827B, out 134592B, BBs exec'd 0 ====
+	0x3A9FD790:  movl -32(%ebp),%eax
+	0x3A9FD793:  addl $0x24, %esp
+	0x3A9FD796:  popl %ebx
+	0x3A9FD797:  popl %esi
+	0x3A9FD798:  popl %edi
+	0x3A9FD799:  popl %ebp
+	0x3A9FD79A:  ret
+. 1329 3A9FD790 11
+. 8B 45 E0 83 C4 24 5B 5E 5F 5D C3 
+
+==== BB 1330 (0x3A9FD890) in 23838B, out 134714B, BBs exec'd 0 ====
+	0x3A9FD890:  movl 0xFFFF9570(%ebx),%esi
+	0x3A9FD896:  movl %eax,%edi
+	0x3A9FD898:  testl %esi,%esi
+	0x3A9FD89A:  jz-8 0x3A9FD8CE
+. 1330 3A9FD890 12
+. 8B B3 70 95 FF FF 89 C7 85 F6 74 32 
+
+==== BB 1331 (0x3A9FD89C) in 23850B, out 134799B, BBs exec'd 0 ====
+	0x3A9FD89C:  leal 0(%esi,,), %esi
+	0x3A9FD8A0:  movl (%esi),%eax
+	0x3A9FD8A2:  testb $0x2, %al
+	0x3A9FD8A4:  jnz-8 0x3A9FD8C0
+. 1331 3A9FD89C 10
+. 8D 74 26 00 8B 06 A8 02 75 1A 
+
+==== BB 1332 (0x3A9FD8C0) in 23860B, out 134888B, BBs exec'd 0 ====
+	0x3A9FD8C0:  movl $0xFFFFFFFF, 92(%esi)
+	0x3A9FD8C7:  movl 52(%esi),%esi
+	0x3A9FD8CA:  testl %esi,%esi
+	0x3A9FD8CC:  jnz-8 0x3A9FD8A0
+. 1332 3A9FD8C0 14
+. C7 46 5C FF FF FF FF 8B 76 34 85 F6 75 D2 
+
+==== BB 1333 (0x3A9FD8A0) in 23874B, out 134975B, BBs exec'd 0 ====
+	0x3A9FD8A0:  movl (%esi),%eax
+	0x3A9FD8A2:  testb $0x2, %al
+	0x3A9FD8A4:  jnz-8 0x3A9FD8C0
+. 1333 3A9FD8A0 6
+. 8B 06 A8 02 75 1A 
+
+==== BB 1334 (0x3A9FD8A6) in 23880B, out 135050B, BBs exec'd 0 ====
+	0x3A9FD8A6:  andl $0x1008, %eax
+	0x3A9FD8AB:  cmpl $0x8, %eax
+	0x3A9FD8AE:  jz-8 0x3A9FD8C0
+. 1334 3A9FD8A6 10
+. 25 08 10 00 00 83 F8 08 74 10 
+
+==== BB 1335 (0x3A9FD8B0) in 23890B, out 135121B, BBs exec'd 0 ====
+	0x3A9FD8B0:  movl 92(%esi),%eax
+	0x3A9FD8B3:  testl %eax,%eax
+	0x3A9FD8B5:  jnz-8 0x3A9FD8D8
+. 1335 3A9FD8B0 7
+. 8B 46 5C 85 C0 75 21 
+
+==== BB 1336 (0x3A9FD8B7) in 23897B, out 135193B, BBs exec'd 0 ====
+	0x3A9FD8B7:  movl %esi,%esi
+	0x3A9FD8B9:  leal 0(%edi,,), %edi
+	0x3A9FD8C0:  movl $0xFFFFFFFF, 92(%esi)
+	0x3A9FD8C7:  movl 52(%esi),%esi
+	0x3A9FD8CA:  testl %esi,%esi
+	0x3A9FD8CC:  jnz-8 0x3A9FD8A0
+. 1336 3A9FD8B7 23
+. 89 F6 8D BC 27 00 00 00 00 C7 46 5C FF FF FF FF 8B 76 34 85 F6 75 D2 
+
+==== BB 1337 (0x3A9FD8CE) in 23920B, out 135297B, BBs exec'd 0 ====
+	0x3A9FD8CE:  addl $0xC, %esp
+	0x3A9FD8D1:  movl %edi,%eax
+	0x3A9FD8D3:  popl %ebx
+	0x3A9FD8D4:  popl %esi
+	0x3A9FD8D5:  popl %edi
+	0x3A9FD8D6:  popl %ebp
+	0x3A9FD8D7:  ret
+. 1337 3A9FD8CE 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3 
+
+==== BB 1338 (0x3A9C8CFA) in 23930B, out 135415B, BBs exec'd 0 ====
+	0x3A9C8CFA:  addl $0x4, %esi
+	0x3A9C8CFD:  cmpl %edi,%esi
+	0x3A9C8CFF:  jb-8 0x3A9C8CF8
+. 1338 3A9C8CFA 7
+. 83 C6 04 39 FE 72 F7 
+
+==== BB 1339 (0x3A9C8D01) in 23937B, out 135482B, BBs exec'd 0 ====
+	0x3A9C8D01:  jmp-8 0x3A9C8CED
+. 1339 3A9C8D01 2
+. EB EA 
+
+==== BB 1340 (0x3A9C8CED) in 23939B, out 135510B, BBs exec'd 0 ====
+	0x3A9C8CED:  movl 8(%ebp),%eax
+	0x3A9C8CF0:  movl %eax,(%esp,,)
+	0x3A9C8CF3:  call 0x3AA2658C
+. 1340 3A9C8CED 11
+. 8B 45 08 89 04 24 E8 94 D8 05 00 
+
+==== BB 1341 __GI__exit(0x3AA2658C) in 23950B, out 135578B, BBs exec'd 0 ====
+	0x3AA2658C:  movl 4(%esp,,),%ebx
+	0x3AA26590:  movl $0xFC,%eax
+	0x3AA26595:  int $0x80
+. 1341 3AA2658C 11
+. 8B 5C 24 04 B8 FC 00 00 00 CD 80 
+
+==6382== 
diff --git a/VEX/orig_x86/fpu_mmx_sse.orig b/VEX/orig_x86/fpu_mmx_sse.orig
new file mode 100644
index 0000000..3ce2487
--- /dev/null
+++ b/VEX/orig_x86/fpu_mmx_sse.orig
@@ -0,0 +1,35448 @@
+
+. 0 3A965880 7
+. 89 E0 E8 F9 03 00 00
+
+. 0 3A965C80 14
+. 55 89 E5 57 56 53 83 EC 54 E8 9D FC 00 00
+
+. 0 3A97592B 4
+. 8B 1C 24 C3
+
+. 0 3A965C8E 67
+. 81 C3 DE 29 01 00 89 45 D0 8B 93 00 00 00 00 8D 83 50 FF FF FF 29 D0 89 83 E4 F9 FF FF 01 D0 89 83 EC F9 FF FF 8D 83 94 F9 FF FF 89 45 C8 83 C0 50 8B 7D C8 89 45 CC 8B 70 08 83 C7 68 8B 16 85 D2 75 10
+
+. 0 3A965CE1 5
+. 83 FA 21 7E ED
+
+. 0 3A965CD3 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 2F
+
+. 0 3A965CE6 16
+. B8 FF FF FF 6F 29 D0 83 F8 0F 0F 87 EA 01 00 00
+
+. 0 3A965CF6 21
+. B8 21 00 00 70 29 D0 89 34 87 83 C6 08 8B 06 85 C0 89 C2 75 D6
+
+. 0 3A965D0B 14
+. 90 8D 74 26 00 8B 4D CC 8B 11 85 D2 74 57
+
+. 0 3A965D19 7
+. 8B 47 10 85 C0 74 03
+
+. 0 3A965D20 10
+. 01 50 04 8B 47 0C 85 C0 74 03
+
+. 0 3A965D2A 10
+. 01 50 04 8B 47 14 85 C0 74 03
+
+. 0 3A965D34 10
+. 01 50 04 8B 47 18 85 C0 74 03
+
+. 0 3A965D3E 10
+. 01 50 04 8B 47 44 85 C0 74 03
+
+. 0 3A965D48 10
+. 01 50 04 8B 47 5C 85 C0 74 03
+
+. 0 3A965D52 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 11
+
+. 0 3A965D5F 27
+. 01 50 04 8D B4 26 00 00 00 00 8D BC 27 00 00 00 00 8B 83 E4 F9 FF FF 85 C0 75 0E
+
+. 0 3A965D88 44
+. C7 45 DC 00 00 00 00 8B 4D C8 8B 83 40 FA FF FF C7 45 E4 00 00 00 00 83 C1 50 85 C0 C7 45 D8 00 00 00 00 C7 45 D4 00 00 00 00 74 12
+
+. 0 3A965DB4 28
+. 8B 40 04 89 45 D4 8B 83 44 FA FF FF 8B 40 04 89 45 D8 8B BB 4C FA FF FF 85 FF 74 0C
+
+. 0 3A965DD0 51
+. 8B 83 04 FA FF FF 8B 40 04 01 45 D8 8B 55 D4 8B 75 D8 8D 04 16 89 45 C4 8B 45 C8 8B 78 50 8B 41 30 8B 40 04 89 45 C0 8B 81 B4 00 00 00 31 C9 85 C0 74 03
+
+. 0 3A965E03 12
+. 8B 48 04 89 F0 C1 E8 03 39 C8 76 02
+
+. 0 3A965E0F 9
+. 89 C8 8D 0C C2 39 CA 73 17
+
+. 0 3A965E18 23
+. 90 8D B4 26 00 00 00 00 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1
+
+. 0 3A965E20 15
+. 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1
+
+. 0 3A965E2F 5
+. 3B 4D C4 73 4D
+
+. 0 3A965E34 47
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06
+
+. 0 3A965E63 17
+. 8B 93 E4 F9 FF FF 03 50 04 8D 46 FA 83 F8 01 77 05
+
+. 0 3A965E74 13
+. 8B 45 BC 89 10 83 C1 08 3B 4D C4 72 BF
+
+. 0 3A965E40 35
+. 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06
+
+. 0 3A965E81 16
+. 8B 45 C8 BE 01 00 00 00 83 C0 50 E8 EF 5E 00 00
+
+. 0 3A96BD80 12
+. 55 89 C1 8B 40 28 89 E5 85 C0 74 1D
+
+. 0 3A96BD8C 31
+. 8B 40 04 8B 10 83 C0 08 89 81 68 01 00 00 8D 04 90 89 91 64 01 00 00 89 81 6C 01 00 00 5D C3
+
+. 0 3A965E91 57
+. 89 B3 54 FB FF FF 8D 83 94 C9 FE FF 89 83 78 FB FF FF 8D 83 28 02 00 00 89 83 7C FB FF FF 8D 83 24 DD FE FF 89 44 24 04 8B 55 D0 89 AB 28 01 00 00 89 14 24 E8 F6 CE 00 00
+
+. 0 3A972DC0 44
+. 55 31 D2 89 E5 57 31 FF 31 C9 56 31 F6 53 81 EC 04 02 00 00 8B 45 08 89 95 1C FE FF FF 31 D2 89 95 0C FE FF FF 8B 10 E8 3F 2B 00 00
+
+. 0 3A972DEC 74
+. 81 C3 80 58 00 00 89 83 28 01 00 00 83 C0 04 89 93 0C 02 00 00 8D 14 90 89 B5 14 FE FF FF 8B 72 04 89 BD 18 FE FF FF 31 FF 85 F6 89 83 38 00 00 00 8D 42 04 89 8D 10 FE FF FF 89 C1 89 BD 08 FE FF FF 89 83 00 02 00 00 74 09
+
+. 0 3A972E36 9
+. 83 C1 04 8B 11 85 D2 75 F7
+
+. 0 3A972E3F 37
+. 83 C1 04 8D 83 14 D2 FE FF 89 CA 89 8B 34 01 00 00 8B 09 89 85 20 FE FF FF 31 C0 85 C9 89 83 20 FC FF FF 74 52
+
+. 0 3A972E64 20
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8D 41 FD 83 F8 1E 77 28
+
+. 0 3A972EA0 22
+. 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A972E70 8
+. 8D 41 FD 83 F8 1E 77 28
+
+. 0 3A972E78 11
+. 8B 84 83 54 DB FF FF 01 D8 FF E0
+
+. 0 3A9731AC 14
+. 8B 42 04 89 83 54 FC FF FF E9 E6 FC FF FF
+
+. 0 3A9731F2 14
+. 8B 42 04 89 83 18 FC FF FF E9 97 FC FF FF
+
+. 0 3A972E97 31
+. 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A97319E 14
+. 8B 42 04 89 83 30 FC FF FF E9 EB FC FF FF
+
+. 0 3A97320E 14
+. 8B 42 04 89 85 1C FE FF FF E9 84 FC FF FF
+
+. 0 3A973200 14
+. 8B 42 04 89 85 18 FE FF FF E9 92 FC FF FF
+
+. 0 3A9731E4 14
+. 8B 42 04 89 85 20 FE FF FF E9 A5 FC FF FF
+
+. 0 3A9731D6 14
+. 8B 42 04 31 85 14 FE FF FF E9 BC FC FF FF
+
+. 0 3A9731C8 14
+. 8B 42 04 31 85 10 FE FF FF E9 CA FC FF FF
+
+. 0 3A972E83 51
+. B8 FF FF FF FF 89 85 0C FE FF FF 8B 42 04 89 83 30 01 00 00 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A9731BA 14
+. 8B 42 04 89 83 20 FC FF FF E9 D8 FC FF FF
+
+. 0 3A972EB6 23
+. 8D 55 B4 8D 85 24 FE FF FF 89 95 04 FE FF FF 89 04 24 E8 83 1E 00 00
+
+. 0 3A974D50 13
+. 89 DA 8B 5C 24 04 B8 7A 00 00 00 CD 80
+
+. 0 3A974D5D 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974D66 1
+. C3
+
+. 0 3A972ECD 8
+. 85 C0 0F 85 DC 01 00 00
+
+. 0 3A972ED5 39
+. 8D 85 A6 FE FF FF 89 85 04 FE FF FF 8B B5 04 FE FF FF 31 C0 31 FF 89 85 00 FE FF FF 0F B6 16 88 D0 2C 30 3C 09 77 69
+
+. 0 3A972EFC 28
+. 8D 74 26 00 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28
+
+. 0 3A972F40 17
+. FF 85 00 FE FF FF C1 E7 08 09 D7 46 80 F9 2E 75 0B
+
+. 0 3A972F51 11
+. 0F B6 16 88 D0 2C 30 3C 09 76 A4
+
+. 0 3A972F00 24
+. 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28
+
+. 0 3A972F5C 9
+. 83 BD 00 FE FF FF 02 7F 12
+
+. 0 3A972F77 12
+. 81 FF 04 02 02 00 0F 86 BD 02 00 00
+
+. 0 3A972F83 15
+. 83 BD 0C FE FF FF FF 89 BB 1C FC FF FF 74 58
+
+. 0 3A972FEA 10
+. 8B BB 18 FC FF FF 85 FF 75 0B
+
+. 0 3A972FFF 10
+. 8B B3 94 FD FF FF 85 F6 74 16
+
+. 0 3A97301F 12
+. C7 04 24 00 00 00 00 E8 75 08 00 00
+
+. 0 3A9738A0 12
+. 55 89 E5 53 8B 4D 08 E8 7F 20 00 00
+
+. 0 3A9738AC 15
+. 81 C3 C0 4D 00 00 87 CB B8 2D 00 00 00 CD 80
+
+. 0 3A9738BB 14
+. 87 CB 89 83 44 01 00 00 31 D2 39 C8 72 05
+
+. 0 3A9738C9 5
+. 5B 89 D0 5D C3
+
+. 0 3A97302B 10
+. 8B 83 20 FC FF FF 85 C0 74 21
+
+. 0 3A973035 5
+. 80 38 00 75 0A
+
+. 0 3A973044 4
+. 85 C0 74 0E
+
+. 0 3A973048 8
+. 89 04 24 E8 80 22 00 00
+
+. 0 3A9752D0 13
+. 8B 44 24 04 BA 03 00 00 00 21 C2 74 24
+
+. 0 3A9752DD 2
+. 7A 17
+
+. 0 3A9752DF 8
+. 38 30 0F 84 9F 00 00 00
+
+. 0 3A9752E7 9
+. 40 38 30 0F 84 96 00 00 00
+
+. 0 3A9752F0 6
+. 40 83 F2 02 74 0B
+
+. 0 3A975301 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58
+
+. 0 3A975369 14
+. 83 E8 04 81 E9 FF FE FE FE 80 F9 00 74 0F
+
+. 0 3A975377 5
+. 40 84 ED 74 0A
+
+. 0 3A97537C 9
+. C1 E9 10 40 80 F9 00 74 01
+
+. 0 3A975386 5
+. 2B 44 24 04 C3
+
+. 0 3A973050 18
+. 89 83 24 FC FF FF C7 04 24 00 00 00 00 E8 7E 08 00 00
+
+. 0 3A9738E0 20
+. 55 89 E5 83 EC 10 89 5D F4 89 7D FC 8B 7D 08 E8 37 20 00 00
+
+. 0 3A9738F4 19
+. 81 C3 78 4D 00 00 89 75 F8 8B B3 44 01 00 00 85 F6 74 0A
+
+. 0 3A973907 10
+. 8B 83 2C 01 00 00 85 C0 74 1B
+
+. 0 3A97392C 6
+. 85 FF 89 F2 75 0F
+
+. 0 3A973932 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A973062 16
+. 89 C1 8D 83 28 02 00 00 39 C1 0F 84 B4 01 00 00
+
+. 0 3A973072 14
+. 8B 83 30 01 00 00 85 C0 0F 85 9C 01 00 00
+
+. 0 3A973080 32
+. 8D 85 20 FE FF FF 89 44 24 08 8B 85 18 FE FF FF 89 44 24 04 8B 95 1C FE FF FF 89 14 24 FF 55 0C
+
+. 0 3A966390 17
+. 55 89 E5 57 56 53 81 EC C8 01 00 00 E8 8A F5 00 00
+
+. 0 3A9663A1 119
+. 81 C3 CB 22 01 00 C6 85 0B FF FF FF 00 8D 83 54 D2 FE FF 8B BB 30 01 00 00 89 83 E0 F9 FF FF 8D 83 74 D2 FE FF 89 83 F8 FB FF FF 8D 83 84 D2 FE FF 89 83 FC FB FF FF 8D 83 D4 9F FF FF 89 83 04 FC FF FF 8B 83 00 02 00 00 C6 85 03 FF FF FF 00 C6 85 02 FF FF FF 00 89 85 48 FF FF FF 31 C0 89 85 F8 FE FF FF 31 C0 85 FF 89 85 F4 FE FF FF 8D 83 BA D5 FF FF 74 03
+
+. 0 3A96641B 17
+. 89 83 80 FD FF FF 8D 85 48 FF FF FF E8 B4 CE 00 00
+
+. 0 3A9732E0 16
+. 55 89 C1 89 E5 56 8B 10 31 F6 8B 02 85 C0 74 0E
+
+. 0 3A9732F0 5
+. 80 38 4C 74 0E
+
+. 0 3A973303 6
+. 80 78 01 44 75 EC
+
+. 0 3A9732F5 9
+. 83 C2 04 8B 02 85 C0 75 F2
+
+. 0 3A973309 9
+. 80 78 02 5F 8D 76 00 75 E3
+
+. 0 3A973312 10
+. 83 C2 04 8D 70 03 89 11 EB E2
+
+. 0 3A9732FE 5
+. 89 F0 5E 5D C3
+
+. 0 3A96642C 10
+. 89 85 FC FE FF FF 85 C0 74 72
+
+. 0 3A966436 27
+. 8B 95 FC FE FF FF 31 F6 0F B6 02 84 C0 0F 95 C2 3C 3D 0F 95 C0 21 D0 A8 01 74 34
+
+. 0 3A966451 28
+. 8B 85 FC FE FF FF 46 0F B6 0C 06 84 C9 0F 95 C0 80 F9 3D 0F 95 C2 21 D0 A8 01 75 E4
+
+. 0 3A96646D 5
+. 80 F9 3D 75 AF
+
+. 0 3A966472 8
+. 8D 46 FC 83 F8 10 77 A7
+
+. 0 3A96647A 11
+. 8B 84 83 D0 D5 FF FF 01 D8 FF E0
+
+. 0 3A967760 20
+. FC 8B B5 FC FE FF FF B9 0C 00 00 00 8D BB 2D F0 FF FF F3 A6
+
+. 0 3A967772 2
+. F3 A6
+
+. 0 3A967774 6
+. 0F 85 0B 03 00 00
+
+. 0 3A96777A 20
+. 8B 85 FC FE FF FF 83 C0 0D 89 83 AC 00 00 00 E9 93 EC FF FF
+
+. 0 3A966421 11
+. 8D 85 48 FF FF FF E8 B4 CE 00 00
+
+. 0 3A9678A7 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB 69 ED FF FF F3 A6
+
+. 0 3A9678BB 6
+. 0F 85 70 02 00 00
+
+. 0 3A967B31 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB CF ED FF FF F3 A6
+
+. 0 3A967B43 2
+. F3 A6
+
+. 0 3A967B45 6
+. 0F 85 90 03 00 00
+
+. 0 3A967B4B 20
+. 8B 85 FC FE FF FF 83 C0 08 89 83 B0 00 00 00 E9 C2 E8 FF FF
+
+. 0 3A9664A8 26
+. 8B 85 F8 FE FF FF 89 85 50 FF FF FF 8B 83 30 01 00 00 85 C0 0F 85 6F 15 00 00
+
+. 0 3A9664C2 14
+. 8B BB B8 00 00 00 85 FF 0F 84 B0 00 00 00
+
+. 0 3A966580 28
+. 8B 55 10 B8 01 00 00 00 89 83 34 00 00 00 8D 83 14 D2 FE FF 39 02 0F 84 FD 04 00 00
+
+. 0 3A96659C 22
+. C7 04 24 00 00 00 00 8D 93 F6 ED FF FF 31 C9 89 D0 E8 1E 64 00 00
+
+. 0 3A96C9D0 17
+. 55 89 E5 57 56 53 83 EC 2C 8B 7D 08 E8 4A 8F 00 00
+
+. 0 3A96C9E1 23
+. 81 C3 8B BC 00 00 89 55 EC 89 4D E8 89 45 F0 89 14 24 E8 D8 88 00 00
+
+. 0 3A96C9F8 28
+. 8D 50 01 B9 01 00 00 00 05 21 02 00 00 89 55 E4 89 4C 24 04 89 04 24 E8 18 8E FF FF
+
+. 0 3A96582C 6
+. FF A3 14 00 00 00
+
+. 0 3A973870 18
+. 55 89 E5 53 83 EC 04 8B 45 0C 8B 4D 08 E8 A9 20 00 00
+
+. 0 3A973882 17
+. 81 C3 EA 4D 00 00 0F AF C1 89 04 24 E8 89 1F FF FF
+
+. 0 3A96581C 6
+. FF A3 10 00 00 00
+
+. 0 3A973800 15
+. 55 89 E5 53 83 EC 08 8B 45 08 E8 1C 21 00 00
+
+. 0 3A97380F 22
+. 81 C3 5D 4E 00 00 C7 04 24 08 00 00 00 89 44 24 04 E8 E7 1F FF FF
+
+. 0 3A96580C 6
+. FF A3 0C 00 00 00
+
+. 0 3A973700 26
+. 55 89 E5 83 EC 24 89 5D F4 89 75 F8 8B 75 08 89 7D FC 8B 7D 0C E8 11 22 00 00
+
+. 0 3A97371A 16
+. 81 C3 52 4F 00 00 8B 8B 3C 01 00 00 85 C9 75 20
+
+. 0 3A97372A 54
+. 8B 83 18 FC FF FF 8D 93 28 02 00 00 89 93 38 01 00 00 8D 4C 02 FF F7 D8 21 C1 89 8B 3C 01 00 00 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20
+
+. 0 3A973760 30
+. 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A973825 6
+. 83 C4 08 5B 5D C3
+
+. 0 3A973893 4
+. 5A 5B 5D C3
+
+. 0 3A96CA14 16
+. 89 45 E0 8B 55 E0 31 C0 85 D2 0F 84 05 02 00 00
+
+. 0 3A96CA24 44
+. 8B 75 E0 8B 4D E0 8B 55 E4 81 C6 14 02 00 00 89 C8 05 20 02 00 00 89 71 14 89 54 24 08 8B 4D EC 89 04 24 89 4C 24 04 E8 20 8E 00 00
+
+. 0 3A975870 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 56
+
+. 0 3A9758DC 2
+. F3 A4
+
+. 0 3A9758DE 3
+. 5E 5F C3
+
+. 0 3A96CA50 96
+. C7 46 08 01 00 00 00 8B 55 E0 BE 04 00 00 00 8B 4D F0 89 82 14 02 00 00 89 4A 04 8B 4D E0 0F B6 55 E8 0F B6 81 74 01 00 00 89 B9 60 01 00 00 80 E2 03 89 B1 AC 01 00 00 24 FC 08 D0 88 81 74 01 00 00 89 CA 8B 83 94 F9 FF FF 81 C2 9C 01 00 00 89 91 B0 01 00 00 31 C9 85 C0 0F 84 83 01 00 00
+
+. 0 3A96CC33 37
+. 8B 45 E0 89 83 94 F9 FF FF FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 85 AF FE FF FF
+
+. 0 3A96CC58 8
+. 8B 7D E0 E9 CC FE FF FF
+
+. 0 3A96CB2C 10
+. 85 C9 8D 87 50 01 00 00 74 04
+
+. 0 3A96CB3A 37
+. 89 04 8A 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00
+
+. 0 3A96CC26 13
+. 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A9665B2 17
+. 8B 8B 94 F9 FF FF 83 EC 04 85 C9 0F 84 05 14 00 00
+
+. 0 3A9665C3 76
+. 8B 75 08 8B 45 0C 89 B1 44 01 00 00 8B 75 10 66 89 81 4C 01 00 00 8B 06 89 81 48 01 00 00 FF 81 70 01 00 00 31 C0 8B 75 08 89 81 98 01 00 00 B8 FF FF FF FF 89 81 94 01 00 00 C1 65 0C 05 8B 55 0C 8D 04 32 39 C6 0F 83 AA 00 00 00
+
+. 0 3A96660F 8
+. 89 85 E4 FE FF FF EB 23
+
+. 0 3A96663A 7
+. 8B 06 83 F8 03 74 2F
+
+. 0 3A966641 5
+. 83 F8 03 76 D1
+
+. 0 3A966646 11
+. 3D 51 E5 74 64 0F 84 6F 06 00 00
+
+. 0 3A966651 11
+. 3D 51 E5 74 64 0F 87 3F 06 00 00
+
+. 0 3A96665C 6
+. 83 F8 06 90 75 CD
+
+. 0 3A966662 12
+. 8B 7E 08 8B 45 08 29 F8 89 01 EB C1
+
+. 0 3A96662F 11
+. 83 C6 20 3B B5 E4 FE FF FF 73 7F
+
+. 0 3A966670 39
+. 8B 01 8B 56 08 01 C2 89 93 94 00 00 00 8D 83 94 00 00 00 89 83 F8 F9 FF FF 8B 83 EC F9 FF FF 85 C0 0F 85 EF 0E 00 00
+
+. 0 3A967586 12
+. C6 85 0B FF FF FF 01 E9 9D F0 FF FF
+
+. 0 3A966617 9
+. 83 F8 01 0F 84 76 05 00 00
+
+. 0 3A966B96 24
+. 8B 46 1C 8B 56 08 8B 39 48 F7 D0 21 D0 8D 04 38 39 81 94 01 00 00 76 09
+
+. 0 3A966BAE 29
+. 89 81 94 01 00 00 8B 56 08 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF
+
+. 0 3A966BCB 11
+. 89 81 98 01 00 00 E9 59 FA FF FF
+
+. 0 3A966BB7 20
+. 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF
+
+. 0 3A966620 5
+. 83 F8 02 75 0A
+
+. 0 3A966625 21
+. 8B 46 08 8B 11 01 D0 89 41 08 83 C6 20 3B B5 E4 FE FF FF 73 7F
+
+. 0 3A966CC0 14
+. 8B 46 18 89 83 00 FC FF FF E9 61 F9 FF FF
+
+. 0 3A9666B9 10
+. 8B 91 98 01 00 00 85 D2 75 0B
+
+. 0 3A9666CE 10
+. 8B 83 F8 F9 FF FF 85 C0 75 1C
+
+. 0 3A9666F4 13
+. 80 BD 02 FF FF FF 00 0F 85 36 01 00 00
+
+. 0 3A966701 23
+. 89 8D E0 FE FF FF 8B 71 08 85 F6 89 B5 DC FE FF FF 0F 84 14 01 00 00
+
+. 0 3A966718 9
+. 8B 36 8D 79 18 85 F6 75 20
+
+. 0 3A966741 5
+. 83 FE 21 7E DD
+
+. 0 3A966723 30
+. 8B 95 DC FE FF FF 89 14 B7 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27
+
+. 0 3A966746 16
+. B8 FF FF FF 6F 29 F0 83 F8 0F 0F 87 80 04 00 00
+
+. 0 3A966756 18
+. B8 21 00 00 70 29 F0 8B B5 DC FE FF FF 89 34 87 EB C4
+
+. 0 3A96672C 21
+. 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27
+
+. 0 3A966768 6
+. 8B 11 85 D2 74 59
+
+. 0 3A9667C7 7
+. 8B 57 78 85 D2 74 30
+
+. 0 3A9667FE 10
+. 8B 97 98 00 00 00 85 D2 74 16
+
+. 0 3A96681E 7
+. 8B 47 74 85 C0 74 07
+
+. 0 3A96682C 11
+. 8B 41 28 85 C0 0F 85 C9 04 00 00
+
+. 0 3A966D00 7
+. 89 C8 E8 79 50 00 00
+
+. 0 3A966D07 5
+. E9 2B FB FF FF
+
+. 0 3A966837 13
+. 83 BD 50 FF FF FF 02 0F 84 1F 15 00 00
+
+. 0 3A966844 13
+. 80 BD 02 FF FF FF 00 0F 84 9F 04 00 00
+
+. 0 3A966CF0 11
+. 8B 83 AC 00 00 00 E8 35 3E 00 00
+
+. 0 3A96AB30 14
+. 55 89 E5 57 56 53 83 EC 2C E8 ED AD 00 00
+
+. 0 3A96AB3E 41
+. 81 C3 2E DB 00 00 89 45 F0 8B 93 24 FC FF FF 8D 8B CC 00 00 00 8B 83 20 FC FF FF 89 0C 24 8D 8B C8 00 00 00 E8 D9 7B 00 00
+
+. 0 3A972740 14
+. 55 89 E5 57 56 53 83 EC 38 E8 DD 31 00 00
+
+. 0 3A97274E 45
+. 81 C3 1E 5F 00 00 89 55 EC 85 C0 8B 93 58 FC FF FF 89 45 F0 0F 95 C0 31 FF 23 93 54 FC FF FF 0F B6 C0 89 4D E8 89 45 E0 89 55 E4 74 25
+
+. 0 3A9727A0 11
+. 8B 45 E0 85 C0 0F 84 20 01 00 00
+
+. 0 3A9727AB 33
+. 8B 55 E0 31 F6 31 FF 8D 04 D5 10 00 00 00 29 C4 8B 45 E4 8D 4C 24 1B 83 E1 F0 85 C0 89 4D D8 74 40
+
+. 0 3A97280C 7
+. 8B 7D F0 85 FF 74 10
+
+. 0 3A972813 26
+. 8B 45 D8 8B 55 F0 8B 4D EC 89 14 F0 89 4C F0 04 83 7D E0 01 0F 84 D0 00 00 00
+
+. 0 3A9728FD 36
+. 8B 75 D8 8B 56 04 42 0F B6 4D E0 BE 01 00 00 00 89 F0 D3 E0 8B 4D E8 89 01 8D 04 C2 89 04 24 E8 FB 2E FF FF
+
+. 0 3A97374A 22
+. 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20
+
+. 0 3A972921 7
+. 89 45 DC 85 C0 74 8C
+
+. 0 3A972928 10
+. 83 7D E0 01 0F 84 D9 00 00 00
+
+. 0 3A972A0B 64
+. 8B 75 E8 8B 55 DC 8B 4D D8 8B 06 8D 04 C2 89 02 8B 41 04 40 89 42 04 8B 06 C7 42 0C 00 00 00 00 8D 04 C2 89 42 08 8B 41 04 89 44 24 08 8B 01 89 44 24 04 8B 06 8D 04 C2 89 04 24 E8 25 2D 00 00
+
+. 0 3A975770 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 52
+
+. 0 3A9757D8 2
+. F3 A4
+
+. 0 3A9757DA 5
+. 89 F8 5E 5F C3
+
+. 0 3A972A4B 25
+. C6 00 2F 8B 55 08 C7 06 02 00 00 00 8B 75 DC 8B 46 04 89 02 E9 8C FE FF FF
+
+. 0 3A9728F0 13
+. 8B 45 DC 8D 65 F4 5B 5E 5F 5D C2 04 00
+
+. 0 3A96AB67 21
+. 89 83 C4 00 00 00 83 EC 04 C7 04 24 0C 00 00 00 E8 A0 AC FF FF
+
+. 0 3A96AB7C 16
+. 89 83 D0 00 00 00 85 C0 89 C6 0F 84 93 02 00 00
+
+. 0 3A96AB8C 42
+. 8B 93 C8 00 00 00 B8 CD CC CC CC 8D 14 95 27 00 00 00 F7 E2 C1 EA 04 8D 14 92 89 55 E8 89 D0 C1 E0 06 89 04 24 E8 66 AC FF FF
+
+. 0 3A96ABB6 24
+. 89 06 8B 83 D0 00 00 00 8D 93 90 E3 FF FF 8B 00 85 C0 0F 84 57 02 00 00
+
+. 0 3A96ABCE 106
+. 89 83 DC F9 FF FF 31 D2 8B 8B C8 00 00 00 89 93 D4 00 00 00 89 C2 8D 83 1C D6 FF FF 89 45 EC 8B 45 E8 8D BB 17 F0 FF FF C7 45 D8 00 00 00 00 C1 E0 02 89 7D E0 89 45 E4 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13
+
+. 0 3A96AC38 19
+. 90 8D B4 26 00 00 00 00 31 FF 89 7C 82 14 40 39 C8 72 F5
+
+. 0 3A96AC40 11
+. 31 FF 89 7C 82 14 40 39 C8 72 F5
+
+. 0 3A96AC4B 16
+. FF 45 D8 C7 45 DC 00 00 00 00 83 7D D8 02 74 08
+
+. 0 3A96AC5B 24
+. 8B 7D E4 01 D7 89 7D DC 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93
+
+. 0 3A96AC06 50
+. 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13
+
+. 0 3A96AC63 16
+. 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93
+
+. 0 3A96AC73 27
+. C7 06 00 00 00 00 8B B3 94 F9 FF FF BF 09 00 00 00 89 BB D8 00 00 00 85 F6 74 2B
+
+. 0 3A96AC8E 10
+. 8B 96 8C 00 00 00 85 D2 75 4F
+
+. 0 3A96AC98 22
+. 8B 56 54 BF FF FF FF FF 89 BE CC 01 00 00 85 D2 0F 85 2B 01 00 00
+
+. 0 3A96ACAE 18
+. BF FF FF FF FF 89 BE 80 01 00 00 8B 75 F0 85 F6 74 08
+
+. 0 3A96ACC0 8
+. 8B 7D F0 80 3F 00 75 45
+
+. 0 3A96AD0D 10
+. 89 3C 24 89 FE E8 B9 A5 00 00
+
+. 0 3A975311 10
+. 31 CA 81 E2 00 01 01 01 75 4E
+
+. 0 3A97531B 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 3E
+
+. 0 3A97532B 10
+. 31 CA 81 E2 00 01 01 01 75 34
+
+. 0 3A975335 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 24
+
+. 0 3A975345 10
+. 31 CA 81 E2 00 01 01 01 75 1A
+
+. 0 3A97534F 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 0A
+
+. 0 3A97535F 10
+. 31 CA 81 E2 00 01 01 01 74 98
+
+. 0 3A96AD17 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 1B 83 E0 F0 89 54 24 08 89 7C 24 04 89 04 24 E8 37 AB 00 00
+
+. 0 3A975886 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4
+
+. 0 3A975890 7
+. 89 C1 83 E9 20 78 3E
+
+. 0 3A975897 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A975899 60
+. 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A9758D5 9
+. 83 C1 20 8B 44 24 0C F3 A4
+
+. 0 3A96AD39 14
+. 89 C7 0F B6 06 BA 01 00 00 00 84 C0 74 1D
+
+. 0 3A96AD47 29
+. 89 F6 8D BC 27 00 00 00 00 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC
+
+. 0 3A96AD50 20
+. 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC
+
+. 0 3A96AD64 15
+. 8D 04 95 04 00 00 00 89 04 24 E8 A9 AA FF FF
+
+. 0 3A96AD73 22
+. 89 83 BC 00 00 00 85 C0 89 C6 8D 93 90 E3 FF FF 0F 84 9C 00 00 00
+
+. 0 3A96AD89 40
+. 31 C9 8D 83 2A F0 FF FF 89 F2 89 4C 24 08 8D 8B 3A F0 FF FF 89 44 24 04 8B 83 30 01 00 00 89 04 24 89 F8 E8 0F DD FF FF
+
+. 0 3A968AC0 14
+. 55 89 E5 57 56 53 83 EC 3C E8 5D CE 00 00
+
+. 0 3A968ACE 52
+. 81 C3 9E FB 00 00 89 45 F0 8D 45 F0 89 55 EC 89 4D E8 C7 45 E0 00 00 00 00 89 45 C4 8D B6 00 00 00 00 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00
+
+. 0 3A973530 14
+. 55 89 E5 57 56 8B 45 08 8B 38 85 FF 74 51
+
+. 0 3A97353E 9
+. 0F B6 07 89 FE 84 C0 74 41
+
+. 0 3A973547 19
+. 89 F6 8D BC 27 00 00 00 00 8B 4D 0C 0F B6 11 38 C2 74 12
+
+. 0 3A97355A 14
+. 8D B6 00 00 00 00 41 0F B6 11 84 D2 74 18
+
+. 0 3A973568 4
+. 38 C2 75 F4
+
+. 0 3A973560 8
+. 41 0F B6 11 84 D2 74 18
+
+. 0 3A973580 8
+. 46 0F B6 06 84 C0 75 C8
+
+. 0 3A973550 10
+. 8B 4D 0C 0F B6 11 38 C2 74 12
+
+. 0 3A97356C 6
+. 84 D2 89 F6 75 23
+
+. 0 3A973595 6
+. C6 06 00 46 EB EF
+
+. 0 3A97358A 11
+. 8B 45 08 89 30 5E 89 F8 5F 5D C3
+
+. 0 3A968B02 11
+. 89 45 E4 85 C0 0F 84 EE 00 00 00
+
+. 0 3A968B0D 8
+. 89 04 24 E8 BB C7 00 00
+
+. 0 3A968B15 7
+. 89 45 D8 85 C0 75 09
+
+. 0 3A968B25 6
+. 83 7D D8 01 76 11
+
+. 0 3A968B2B 17
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 0F 84 D6 00 00 00
+
+. 0 3A968B3C 7
+. 8B 45 D8 85 C0 74 15
+
+. 0 3A968B43 13
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 74 08
+
+. 0 3A968B50 19
+. C6 04 0F 2F 47 89 7D D8 8B 45 08 85 C0 0F 85 D2 01 00 00
+
+. 0 3A968B63 13
+. 8B 93 DC F9 FF FF 85 D2 89 55 DC 75 15
+
+. 0 3A968B85 11
+. 8B 4D DC 8B 7D D8 39 79 10 75 E5
+
+. 0 3A968B75 16
+. 8B 55 DC 8B 12 85 D2 89 55 DC 0F 84 AC 00 00 00
+
+. 0 3A968C31 9
+. 8B 4D 10 31 F6 85 C9 74 0E
+
+. 0 3A968C48 24
+. 8B 83 C8 00 00 00 8B 55 D8 8D 04 86 8D 44 02 15 89 04 24 E8 BC CB FF FF
+
+. 0 3A968C60 11
+. 89 45 DC 85 C0 0F 84 54 01 00 00
+
+. 0 3A968C6B 41
+. 8B 4D DC 8B 83 C8 00 00 00 8B 7D D8 8D 44 81 14 89 41 0C 89 7C 24 08 8B 45 E4 89 44 24 04 8B 41 0C 89 04 24 E8 DC CA 00 00
+
+. 0 3A975786 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4
+
+. 0 3A975790 7
+. 89 C1 83 E9 20 78 3E
+
+. 0 3A975797 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A9757D5 5
+. 83 C1 20 F3 A4
+
+. 0 3A968C94 17
+. C6 00 00 8B 55 DC 3B BB D8 00 00 00 89 7A 10 76 06
+
+. 0 3A968CA5 31
+. 89 BB D8 00 00 00 8B 4D E4 31 C0 80 39 2F 8B 8B C8 00 00 00 0F 95 C0 31 D2 01 C0 39 CA 73 18
+
+. 0 3A968CC4 24
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 7D DC 89 44 97 14 42 39 CA 72 F4
+
+. 0 3A968CD0 12
+. 8B 7D DC 89 44 97 14 42 39 CA 72 F4
+
+. 0 3A968CDC 20
+. 8B 55 0C 8B 45 DC 89 50 04 8B 55 10 85 D2 0F 84 C0 00 00 00
+
+. 0 3A968DB0 15
+. 8B 7D DC C7 47 08 00 00 00 00 E9 53 FF FF FF
+
+. 0 3A968D12 35
+. 8B 83 DC F9 FF FF 8B 55 DC 8B 4D E0 8B 7D EC 89 02 89 93 DC F9 FF FF 89 14 8F 41 89 4D E0 E9 BB FD FF FF
+
+. 0 3A968AF0 18
+. 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00
+
+. 0 3A9752F6 8
+. 38 30 0F 84 88 00 00 00
+
+. 0 3A9752FE 19
+. 40 31 D2 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58
+
+. 0 3A975385 6
+. 40 2B 44 24 04 C3
+
+. 0 3A968CAB 25
+. 8B 4D E4 31 C0 80 39 2F 8B 8B C8 00 00 00 0F 95 C0 31 D2 01 C0 39 CA 73 18
+
+. 0 3A968B90 16
+. FC 8B 45 D8 8B 79 0C 8B 75 E4 39 C0 89 C1 F3 A6
+
+. 0 3A968B9E 2
+. F3 A6
+
+. 0 3A968BA0 2
+. 75 D3
+
+. 0 3A968BA2 11
+. 8B 75 DC 85 F6 0F 84 84 00 00 00
+
+. 0 3A968BAD 7
+. 31 C0 3B 45 E0 73 11
+
+. 0 3A968BB4 11
+. 8B 4D EC 8B 7D DC 39 3C 81 74 06
+
+. 0 3A968BBF 6
+. 40 3B 45 E0 72 EF
+
+. 0 3A968BC5 9
+. 3B 45 E0 0F 85 22 FF FF FF
+
+. 0 3A973588 13
+. 31 F6 8B 45 08 89 30 5E 89 F8 5F 5D C3
+
+. 0 3A968B1C 15
+. 8D 93 2C D6 FF FF 89 55 E4 83 7D D8 01 76 11
+
+. 0 3A968B58 11
+. 8B 45 08 85 C0 0F 85 D2 01 00 00
+
+. 0 3A97358F 6
+. 5E 89 F8 5F 5D C3
+
+. 0 3A968BFB 23
+. 8B 7D EC 8B 4D E0 89 F8 C7 04 8F 00 00 00 00 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96ADB1 12
+. 8B 83 BC 00 00 00 8B 10 85 D2 74 4D
+
+. 0 3A96ADBD 28
+. 31 C0 89 83 C0 00 00 00 8B 83 DC F9 FF FF 89 83 8C FD FF FF 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A966CFB 5
+. E9 51 FB FF FF
+
+. 0 3A966851 10
+. 8B B3 E8 F9 FF FF 85 F6 75 0E
+
+. 0 3A96685B 102
+. 8B 83 F8 F9 FF FF 8B 00 89 83 E8 F9 FF FF 0F B6 83 58 FB FF FF FF 83 98 F9 FF FF 24 FC 0C 01 88 83 58 FB FF FF 8D 83 94 F9 FF FF 89 C2 89 85 D0 FE FF FF 83 C2 50 8B 83 94 F9 FF FF 83 83 C4 F9 FF FF 01 89 50 0C 8B 8B 94 F9 FF FF 83 93 C8 F9 FF FF 00 83 BB 78 FD FF FF FE 89 8B F4 F9 FF FF 0F 84 17 04 00 00
+
+. 0 3A966CD8 11
+. 8B 09 B8 FF FF FF FF 85 C9 74 02
+
+. 0 3A966CE5 11
+. 89 83 78 FD FF FF E9 D1 FB FF FF
+
+. 0 3A9668C1 41
+. 8B 83 78 FB FF FF 8B 50 1C 89 C6 01 D6 0F B7 50 2C 89 B3 28 FB FF FF 66 89 93 30 FB FF FF 0F B7 50 2C 89 D0 4A 85 C0 74 22
+
+. 0 3A9668EA 26
+. 89 D0 C1 E0 05 01 F0 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00
+
+. 0 3A966904 8
+. 4A 83 E8 20 85 FF 75 E5
+
+. 0 3A9668F1 19
+. 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00
+
+. 0 3A96690C 30
+. 8B B3 B0 00 00 00 31 C0 89 85 14 FF FF FF 31 C0 85 F6 89 85 10 FF FF FF 0F 85 B0 12 00 00
+
+. 0 3A967BDA 14
+. 89 34 24 8D BD 3C FF FF FF E8 E8 D6 00 00
+
+. 0 3A967BE8 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 2F 83 E0 F0 89 54 24 08 89 74 24 04 89 04 24 E8 66 DC 00 00
+
+. 0 3A967C0A 36
+. 89 85 3C FF FF FF 8D 93 D7 ED FF FF 89 95 60 FE FF FF 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00
+
+. 0 3A967C2E 10
+. 85 C0 89 C6 0F 84 F2 EC FF FF
+
+. 0 3A967C38 5
+. 80 3E 00 74 DF
+
+. 0 3A967C3D 14
+. 8B 83 30 01 00 00 85 C0 0F 85 2A 05 00 00
+
+. 0 3A967C4B 37
+. 8B 83 94 F9 FF FF 31 C9 31 D2 89 4C 24 08 B9 01 00 00 00 89 54 24 04 89 F2 C7 04 24 01 00 00 00 E8 D0 31 00 00
+
+. 0 3A96AE40 17
+. 55 89 E5 57 56 53 81 EC 58 02 00 00 E8 DA AA 00 00
+
+. 0 3A96AE51 34
+. 81 C3 1B D8 00 00 89 85 D8 FD FF FF 8B B3 94 F9 FF FF 89 95 D4 FD FF FF 89 8D D0 FD FF FF 85 F6 74 3F
+
+. 0 3A96AE73 22
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 F6 86 75 01 00 00 02 75 22
+
+. 0 3A96AE89 13
+. 8B 85 D4 FD FF FF 89 F2 E8 EA 5C 00 00
+
+. 0 3A970B80 21
+. 55 89 E5 57 89 C7 56 89 D6 53 83 EC 08 8B 42 04 E8 96 4D 00 00
+
+. 0 3A970B95 18
+. 81 C3 D7 7A 00 00 89 3C 24 89 44 24 04 E8 D9 46 00 00
+
+. 0 3A975280 33
+. 55 89 E5 56 83 EC 04 8B 4D 08 8B 55 0C 8D 76 00 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16
+
+. 0 3A9752A1 7
+. 89 F0 3A 45 FB 74 E8
+
+. 0 3A9752A8 15
+. 0F B6 D0 0F B6 45 FB 29 C2 89 D0 5A 5E 5D C3
+
+. 0 3A970BA7 9
+. 85 C0 BA 01 00 00 00 74 30
+
+. 0 3A970BB0 7
+. 8B 76 14 85 F6 74 27
+
+. 0 3A970BB7 23
+. 89 F6 8D BC 27 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00
+
+. 0 3A970BCE 9
+. 85 C0 BA 01 00 00 00 74 09
+
+. 0 3A970BD7 7
+. 8B 76 04 85 F6 75 E2
+
+. 0 3A970BDE 12
+. 31 D2 83 C4 08 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96AE96 8
+. 85 C0 0F 85 09 01 00 00
+
+. 0 3A96AE9E 13
+. F6 86 75 01 00 00 01 0F 84 08 01 00 00
+
+. 0 3A96AFB3 11
+. 8B 56 50 85 D2 0F 84 ED FE FF FF
+
+. 0 3A96AEAB 7
+. 8B 76 0C 85 F6 75 CE
+
+. 0 3A96AE80 9
+. F6 86 75 01 00 00 02 75 22
+
+. 0 3A975290 17
+. 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16
+
+. 0 3A96AFBE 29
+. 8B 46 2C 8B 7A 04 8B 50 04 01 D7 89 7C 24 04 8B 85 D4 FD FF FF 89 04 24 E8 A5 A2 00 00
+
+. 0 3A96AFDB 8
+. 85 C0 0F 85 C8 FE FF FF
+
+. 0 3A96AEB2 13
+. F6 83 14 FC FF FF 40 0F 85 FF 03 00 00
+
+. 0 3A96AEBF 23
+. B8 2F 00 00 00 89 44 24 04 8B 85 D4 FD FF FF 89 04 24 E8 DA A1 00 00
+
+. 0 3A9750B0 32
+. 57 56 53 55 8B 44 24 14 8B 54 24 18 89 C7 31 C9 88 D6 88 D1 C1 E2 10 88 CD 09 CA 83 E7 03 74 41
+
+. 0 3A975111 27
+. 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A97512C 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 05 01 00 00
+
+. 0 3A97513D 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 D4 00 00 00
+
+. 0 3A975153 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 85 BF 00 00 00
+
+. 0 3A975227 7
+. 83 E8 04 84 DB 74 0F
+
+. 0 3A97523D 5
+. 5D 5B 5E 5F C3
+
+. 0 3A96AED6 8
+. 85 C0 0F 84 AA 01 00 00
+
+. 0 3A96AEDE 14
+. 8B BD D8 FD FF FF 85 FF 0F 84 79 02 00 00
+
+. 0 3A96AEEC 17
+. 8B 95 D4 FD FF FF 8B 85 D8 FD FF FF E8 D3 F6 FF FF
+
+. 0 3A96A5D0 32
+. 55 B9 24 00 00 00 89 E5 83 EC 24 89 5D F4 89 75 F8 31 F6 89 7D FC 89 D7 89 45 F0 E8 3B B3 00 00
+
+. 0 3A96A5F0 18
+. 81 C3 7C E0 00 00 89 4C 24 04 89 14 24 E8 AE AA 00 00
+
+. 0 3A975168 13
+. 31 CD 01 CF 8D 40 04 0F 83 CD 00 00 00
+
+. 0 3A975175 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 BC 00 00 00
+
+. 0 3A975186 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 8B 00 00 00
+
+. 0 3A97519C 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 7A
+
+. 0 3A9751AD 13
+. 31 CD 01 CF 8D 40 04 0F 83 88 00 00 00
+
+. 0 3A9751BA 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 7B
+
+. 0 3A9751C7 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 4E
+
+. 0 3A9751D9 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 3D
+
+. 0 3A9751EA 9
+. 31 CD 01 CF 8D 40 04 73 4F
+
+. 0 3A9751F3 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 42
+
+. 0 3A975200 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 15
+
+. 0 3A975212 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 84 F8 FE FF FF
+
+. 0 3A97511F 13
+. 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A975242 7
+. 83 E8 04 38 D1 74 F4
+
+. 0 3A975249 5
+. 80 F9 00 74 1C
+
+. 0 3A97524E 5
+. 40 38 D5 74 EA
+
+. 0 3A975253 5
+. 80 FD 00 74 12
+
+. 0 3A97526A 7
+. 31 C0 5D 5B 5E 5F C3
+
+. 0 3A96A602 4
+. 85 C0 75 41
+
+. 0 3A96A606 4
+. 85 F6 75 52
+
+. 0 3A96A60A 11
+. 89 3C 24 8D 76 00 E8 BB AC 00 00
+
+. 0 3A96A615 11
+. 8D 70 01 89 34 24 E8 FC B1 FF FF
+
+. 0 3A96A620 6
+. 31 D2 85 C0 74 12
+
+. 0 3A96A626 16
+. 89 74 24 08 89 7C 24 04 89 04 24 E8 3A B2 00 00
+
+. 0 3A96A636 17
+. 89 C2 89 D0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96AEFD 14
+. 89 85 E0 FD FF FF 85 C0 0F 85 05 01 00 00
+
+. 0 3A96B010 19
+. 8D 8D E4 FD FF FF 89 CA 89 8D B8 FD FF FF E8 FD DE FF FF
+
+. 0 3A968F20 16
+. 55 89 E5 57 56 31 F6 53 83 EC 4C E8 FB C9 00 00
+
+. 0 3A968F30 31
+. 81 C3 3C F7 00 00 89 45 D0 89 55 CC C7 45 C4 00 00 00 00 89 74 24 04 89 04 24 E8 E1 B4 00 00
+
+. 0 3A974430 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 05 00 00 00 CD 80
+
+. 0 3A974444 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A97444C 1
+. C3
+
+. 0 3A968F4F 12
+. 89 45 C8 83 F8 FF 0F 84 F5 01 00 00
+
+. 0 3A968F5B 38
+. 31 C9 BA 00 02 00 00 89 8B 48 01 00 00 89 54 24 08 8B 55 CC 83 C2 04 89 55 B8 89 54 24 04 89 04 24 E8 2F B5 00 00
+
+. 0 3A9744B0 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 03 00 00 00 CD 80
+
+. 0 3A9744C4 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A9744CC 1
+. C3
+
+. 0 3A968F81 14
+. 8B 75 CC 83 F8 33 89 06 0F 8E 10 02 00 00
+
+. 0 3A968F8F 17
+. FC 8B 75 B8 B9 09 00 00 00 8D BB 2F D6 FF FF F3 A6
+
+. 0 3A968F9E 2
+. F3 A6
+
+. 0 3A968FA0 6
+. 0F 85 15 02 00 00
+
+. 0 3A968FA6 19
+. 8B 75 B8 8D 93 B4 E3 FF FF 83 7E 14 01 0F 85 20 01 00 00
+
+. 0 3A968FB9 11
+. 66 83 7E 12 03 0F 85 6E 01 00 00
+
+. 0 3A968FC4 17
+. 66 83 7E 2A 20 8D 93 E0 E3 FF FF 0F 85 04 01 00 00
+
+. 0 3A968FD5 13
+. 0F B7 46 10 83 F8 03 0F 85 4E 02 00 00
+
+. 0 3A968FE2 40
+. 8B 7D B8 8B 57 1C 0F B7 4F 2C 8B 7D CC 8D 7C 3A 04 89 7D C0 0F B7 F1 8B 7D CC C1 E6 05 8D 04 16 3B 07 0F 87 7D 00 00 00
+
+. 0 3A96900A 18
+. 8B 75 C0 0F B7 C1 C1 E0 05 89 75 BC 01 F0 39 C6 72 1D
+
+. 0 3A969039 8
+. 8B 7D BC 83 3F 04 75 E0
+
+. 0 3A969021 24
+. 83 45 BC 20 0F B7 C1 8B 55 C0 C1 E0 05 01 D0 39 45 BC 0F 83 17 01 00 00
+
+. 0 3A969150 11
+. 8B 45 C8 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B023 13
+. 89 85 CC FD FF FF 40 0F 84 80 04 00 00
+
+. 0 3A96B030 78
+. 8B 83 28 01 00 00 89 85 DC FD FF FF 8D 85 DC FD FF FF 89 44 24 10 8B 45 10 89 44 24 0C 8B 4D 08 89 4C 24 08 8B 85 D8 FD FF FF 89 44 24 04 8B 85 E0 FD FF FF 89 04 24 8B 8D B8 FD FF FF 8B 95 CC FD FF FF 8B 85 D4 FD FF FF E8 12 E7 FF FF
+
+. 0 3A969790 51
+. 55 89 E5 57 56 53 81 EC 08 01 00 00 89 45 90 8D 45 94 89 4D 88 31 C9 89 55 8C C7 45 84 00 00 00 00 89 8D 74 FF FF FF 89 44 24 08 8B 45 8C E8 68 C1 00 00
+
+. 0 3A9697C3 22
+. 81 C3 A9 EE 00 00 C7 04 24 03 00 00 00 89 44 24 04 E8 47 AB 00 00
+
+. 0 3A974320 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 FD 15 00 00
+
+. 0 3A97432E 24
+. 81 C3 3E 43 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A
+
+. 0 3A974346 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C5 00 00 00 CD 80
+
+. 0 3A97435B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00
+
+. 0 3A974368 7
+. 83 F8 FF 89 C2 74 31
+
+. 0 3A97436F 4
+. 85 D2 75 1D
+
+. 0 3A974373 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12
+
+. 0 3A974390 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9697D9 8
+. 85 C0 0F 88 ED 05 00 00
+
+. 0 3A9697E1 13
+. 8B 93 94 F9 FF FF 85 D2 89 55 84 74 6F
+
+. 0 3A9697EE 59
+. 8B 45 94 8B 55 98 8B 75 EC 89 85 6C FF FF FF 8B 7D F0 89 95 70 FF FF FF 8D 76 00 8D BC 27 00 00 00 00 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27
+
+. 0 3A969850 13
+. 8B 45 84 8B 40 0C 85 C0 89 45 84 75 B3
+
+. 0 3A969810 25
+. 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27
+
+. 0 3A96985D 12
+. 31 C0 F6 45 14 04 0F 85 5D 05 00 00
+
+. 0 3A969869 13
+. F6 83 14 FC FF FF 40 0F 85 DA 07 00 00
+
+. 0 3A969876 29
+. 8B 4D 88 8B 45 0C 83 C1 04 89 4D 80 89 04 24 8B 55 90 8B 4D 10 8B 45 08 E8 3D 31 00 00
+
+. 0 3A973780 69
+. 89 93 38 01 00 00 8B 83 18 FC FF FF 31 C9 89 4C 24 14 BA FF FF FF FF 89 54 24 10 8D 74 38 FF F7 D8 C7 04 24 00 00 00 00 21 C6 B8 22 00 00 00 89 44 24 0C B8 03 00 00 00 89 44 24 08 89 74 24 04 E8 8B 14 00 00
+
+. 0 3A974C50 13
+. 89 DA B8 5A 00 00 00 8D 5C 24 04 CD 80
+
+. 0 3A974C5D 9
+. 89 D3 3D 00 F0 FF FF 77 01
+
+. 0 3A974C66 1
+. C3
+
+. 0 3A9737C5 8
+. 3B 83 3C 01 00 00 74 06
+
+. 0 3A9737CD 50
+. 89 83 38 01 00 00 8B 93 38 01 00 00 01 F0 89 83 3C 01 00 00 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96CAB0 9
+. 89 C2 8B 40 0C 85 C0 74 10
+
+. 0 3A96CAB9 16
+. 8D B4 26 00 00 00 00 89 C2 8B 40 0C 85 C0 75 F7
+
+. 0 3A96CAC9 62
+. 8B 45 E0 89 50 10 89 42 0C 8B 90 B0 01 00 00 8B 83 94 F9 FF FF 05 50 01 00 00 89 04 8A B9 01 00 00 00 FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 84 51 01 00 00
+
+. 0 3A96CB07 10
+. 8B 87 60 01 00 00 85 C0 74 1B
+
+. 0 3A96CB36 4
+. 3B 02 74 03
+
+. 0 3A96CB3D 34
+. 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00
+
+. 0 3A96CB5F 11
+. 8B 45 F0 89 04 24 E8 66 87 00 00
+
+. 0 3A96CB6A 14
+. 40 80 7D DF 2F 89 45 D8 0F 84 E8 00 00 00
+
+. 0 3A96CC60 8
+. 89 04 24 E8 B4 8B FF FF
+
+. 0 3A96CC68 10
+. 85 C0 89 C7 0F 85 79 FF FF FF
+
+. 0 3A96CBEB 22
+. 8B 55 D8 89 54 24 08 8B 4D F0 89 04 24 89 4C 24 04 E8 6F 8B 00 00
+
+. 0 3A975799 60
+. 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A96CC01 2
+. EB 0D
+
+. 0 3A96CC10 6
+. 48 80 38 2F 75 FA
+
+. 0 3A96CC16 4
+. 39 F8 74 69
+
+. 0 3A96CC1A 25
+. C6 00 00 8B 45 E0 89 B8 90 01 00 00 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A969893 20
+. 89 45 84 83 EC 04 85 C0 8D 93 94 E4 FF FF 0F 84 CD 04 00 00
+
+. 0 3A9698A7 79
+. 8B 75 80 8B 55 84 8B 46 18 0F B7 4E 10 89 82 48 01 00 00 0F B7 46 2C 89 8D 78 FF FF FF 8B 4D 88 66 89 82 4C 01 00 00 8B 55 80 0F B7 76 2C 8B 42 1C C1 E6 05 89 B5 28 FF FF FF 8D 14 06 8D 44 08 04 3B 11 89 85 7C FF FF FF 0F 87 1F 04 00 00
+
+. 0 3A9698F6 89
+. 89 A5 64 FF FF FF 8B 4D 84 BF 07 00 00 00 89 BD 68 FF FF FF C6 85 5B FF FF FF 00 0F B7 91 4C 01 00 00 8D 44 52 FD 8D 04 C5 28 00 00 00 29 C4 C1 E2 05 8D 74 24 43 83 E6 F0 89 B5 60 FF FF FF 31 F6 89 B5 5C FF FF FF 8B B5 7C FF FF FF 89 F0 01 D0 39 C6 0F 83 87 04 00 00
+
+. 0 3A96994F 20
+. 89 85 54 FF FF FF 8B 85 60 FF FF FF 89 85 2C FF FF FF EB 28
+
+. 0 3A96998B 7
+. 8B 06 83 F8 06 74 33
+
+. 0 3A969992 5
+. 83 F8 06 76 CC
+
+. 0 3A969963 9
+. 83 F8 01 0F 84 CE 02 00 00
+
+. 0 3A969C3A 18
+. 8B 83 18 FC FF FF 8B 56 1C 48 85 D0 0F 85 99 05 00 00
+
+. 0 3A969C4C 23
+. 8B 46 08 4A 8B 4E 04 89 85 50 FF FF FF 29 C8 85 C2 0F 85 8D 05 00 00
+
+. 0 3A969C63 112
+. FF 85 5C FF FF FF 8B BD 2C FF FF FF 8B 85 50 FF FF FF 8D 4F 18 89 8D 2C FF FF FF 89 D1 F7 D1 21 C1 8B 85 2C FF FF FF 89 48 E8 8B 56 08 8B 46 10 01 D0 8B 93 18 FC FF FF 8D 44 02 FF F7 DA 21 D0 8B 95 2C FF FF FF 89 42 EC 8B 46 10 03 46 08 89 42 F0 8B 46 14 03 46 08 89 42 F4 8B 46 1C 48 F7 D0 23 46 04 83 BD 5C FF FF FF 01 89 42 F8 76 0C
+
+. 0 3A969CDF 27
+. 8B 4E 18 B8 40 62 51 73 83 E1 07 C1 E1 02 D3 F8 83 E0 0F 89 47 14 E9 86 FC FF FF
+
+. 0 3A969980 11
+. 83 C6 20 3B B5 54 FF FF FF 73 51
+
+. 0 3A969CD3 5
+. 39 4F EC 74 07
+
+. 0 3A96996C 9
+. 83 F8 02 0F 84 85 03 00 00
+
+. 0 3A969CFA 27
+. 8B 46 08 8B 55 84 89 42 08 8B 46 14 C1 E8 03 66 89 82 4E 01 00 00 E9 6B FC FF FF
+
+. 0 3A969997 11
+. 3D 51 E5 74 64 0F 84 8A 02 00 00
+
+. 0 3A969C2C 14
+. 8B 4E 18 89 8D 68 FF FF FF E9 46 FD FF FF
+
+. 0 3A9699DC 14
+. 8B 85 5C FF FF FF 85 C0 0F 84 EC 03 00 00
+
+. 0 3A9699EA 50
+. 8B 8D 5C FF FF FF 8B BD 60 FF FF FF 8D 04 49 8D 04 C7 8B 17 8B 70 F4 89 85 4C FF FF FF 29 D6 83 BD 78 FF FF FF 03 89 B5 28 FF FF FF 0F 85 DF 07 00 00
+
+. 0 3A969A1C 50
+. 8B 83 78 FD FF FF 21 C2 8B 47 10 89 44 24 14 8B 45 8C 89 44 24 10 B8 02 00 00 00 89 44 24 0C 8B 47 14 89 14 24 89 74 24 04 89 44 24 08 E8 02 B2 00 00
+
+. 0 3A969A4E 16
+. 8B 55 84 89 82 94 01 00 00 40 0F 84 98 01 00 00
+
+. 0 3A969A5E 14
+. 8B B3 40 FC FF FF 85 F6 0F 85 FE 05 00 00
+
+. 0 3A96A06A 36
+. B9 03 00 00 00 89 4C 24 08 8B 8D 28 FF FF FF 89 4C 24 04 8B 75 84 8B 86 94 01 00 00 89 04 24 E8 82 AC 00 00
+
+. 0 3A974D10 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 DB 00 00 00 CD 80
+
+. 0 3A974D24 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A974D2C 1
+. C3
+
+. 0 3A96A08E 5
+. E9 D9 F9 FF FF
+
+. 0 3A969A6C 46
+. 8B 55 84 8B 8D 28 FF FF FF 8B 75 84 8B 82 94 01 00 00 8D 14 01 89 96 98 01 00 00 8B 17 29 D0 80 BD 5B FF FF FF 00 89 06 0F 85 F9 05 00 00
+
+. 0 3A969A9A 13
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 75 49
+
+. 0 3A969AA7 19
+. 8B 45 80 8B 77 10 8B 48 1C 89 B5 48 FF FF FF 39 CE 77 36
+
+. 0 3A969ABA 33
+. 0F B7 40 2C 8B 37 8B 57 04 89 B5 44 FF FF FF C1 E0 05 29 F2 03 95 48 FF FF FF 8D 04 08 39 C2 72 15
+
+. 0 3A969ADB 35
+. 89 F0 8B 55 84 01 C8 8B 8D 48 FF FF FF 29 C8 89 82 44 01 00 00 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00
+
+. 0 3A969B8D 29
+. 8B B5 5C FF FF FF 83 C7 18 8B 95 60 FF FF FF 8D 04 76 8D 04 C2 39 C7 0F 83 3A 02 00 00
+
+. 0 3A969BAA 11
+. 8B 07 39 47 04 0F 86 E5 FE FF FF
+
+. 0 3A969BB5 58
+. 8B 47 10 89 44 24 14 8B 75 8C B8 12 00 00 00 89 44 24 0C 89 74 24 10 8B 47 14 89 44 24 08 8B 47 04 8B 37 29 F0 89 44 24 04 8B 55 84 8B 07 8B 0A 01 C8 89 04 24 E8 61 B0 00 00
+
+. 0 3A969BEF 7
+. 40 0F 85 A4 FE FF FF
+
+. 0 3A969AF0 14
+. 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00
+
+. 0 3A969AFE 45
+. 8B 75 84 8B 83 18 FC FF FF 8B 36 01 F2 01 F1 89 95 40 FF FF FF 8D 54 10 FF F7 D8 89 8D 3C FF FF FF 21 C2 39 D1 89 95 38 FF FF FF 73 06
+
+. 0 3A969B2B 20
+. 89 8D 38 FF FF FF 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C
+
+. 0 3A969B3F 11
+. 8B 47 14 A8 02 0F 84 8E 08 00 00
+
+. 0 3A969B4A 38
+. 8B 85 38 FF FF FF 8B 95 40 FF FF FF 29 D0 89 44 24 08 31 C0 89 44 24 04 8B B5 40 FF FF FF 89 34 24 E8 90 BB 00 00
+
+. 0 3A975700 31
+. 57 8B 7C 24 08 8B 54 24 10 8A 44 24 0C 88 C4 89 C1 C1 E0 10 66 89 C8 FC 83 FA 24 89 D1 7C 37
+
+. 0 3A975756 5
+. C1 E9 02 F3 AB
+
+. 0 3A975759 2
+. F3 AB
+
+. 0 3A97575B 7
+. 89 D1 83 E1 03 F3 AA
+
+. 0 3A975762 6
+. 8B 44 24 08 5F C3
+
+. 0 3A969B70 11
+. 8B 47 14 A8 02 0F 84 37 08 00 00
+
+. 0 3A969B7B 18
+. 8B 85 38 FF FF FF 39 85 3C FF FF FF 0F 87 E4 05 00 00
+
+. 0 3A969DE4 17
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 0F 84 F3 02 00 00
+
+. 0 3A969DF5 30
+. 8B 4D 84 8B 31 01 F0 89 81 44 01 00 00 8B A5 64 FF FF FF 8B 75 8C 89 34 24 E8 5D A6 00 00
+
+. 0 3A974470 13
+. 89 DA 8B 5C 24 04 B8 06 00 00 00 CD 80
+
+. 0 3A97447D 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974486 1
+. C3
+
+. 0 3A969E13 42
+. C7 45 8C FF FF FF FF 8B 45 84 0F B6 88 74 01 00 00 88 C8 24 03 FE C8 0F 94 C2 83 BD 78 FF FF FF 02 0F 94 C0 21 D0 A8 01 74 0C
+
+. 0 3A969E49 14
+. 8B 4D 84 8B 41 08 85 C0 0F 85 82 02 00 00
+
+. 0 3A96A0D9 15
+. 8B 75 84 8B 16 01 D0 89 46 08 E9 87 FD FF FF
+
+. 0 3A969E6F 22
+. 8B 4D 84 01 91 48 01 00 00 F6 83 14 FC FF FF 40 0F 85 AB 03 00 00
+
+. 0 3A969E85 14
+. 8B 45 84 8B 70 08 85 F6 0F 84 0B 01 00 00
+
+. 0 3A969E93 11
+. 8B 16 89 C7 83 C7 18 85 D2 75 10
+
+. 0 3A969EAE 5
+. 83 FA 21 7E ED
+
+. 0 3A969EA0 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 38
+
+. 0 3A969EB3 12
+. B8 FF FF FF 6F 29 D0 83 F8 0F 77 0C
+
+. 0 3A969EBF 12
+. B8 21 00 00 70 29 D0 89 34 87 EB D8
+
+. 0 3A969EA3 11
+. 83 C6 08 8B 06 85 C0 89 C2 74 38
+
+. 0 3A969EE6 9
+. 8B 4D 84 8B 11 85 D2 74 53
+
+. 0 3A969EEF 7
+. 8B 47 10 85 C0 74 03
+
+. 0 3A969EF6 10
+. 01 50 04 8B 47 0C 85 C0 74 03
+
+. 0 3A969F00 10
+. 01 50 04 8B 47 14 85 C0 74 03
+
+. 0 3A969F0A 10
+. 01 50 04 8B 47 18 85 C0 74 03
+
+. 0 3A969F14 10
+. 01 50 04 8B 47 1C 85 C0 74 03
+
+. 0 3A969F21 7
+. 8B 47 44 85 C0 74 03
+
+. 0 3A969F28 10
+. 01 50 04 8B 47 5C 85 C0 74 03
+
+. 0 3A969F32 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 03
+
+. 0 3A969F3F 10
+. 01 50 04 8B 57 78 85 D2 74 2A
+
+. 0 3A969F73 10
+. 8B 97 98 00 00 00 85 D2 74 13
+
+. 0 3A969F7D 16
+. 8B 42 04 8B 75 84 A8 01 89 86 E8 01 00 00 74 03
+
+. 0 3A969F90 7
+. 8B 47 74 85 C0 74 07
+
+. 0 3A969F9E 16
+. 8B 45 84 F6 80 E8 01 00 00 40 0F 85 88 03 00 00
+
+. 0 3A969FAE 14
+. 8B 45 84 8B 50 28 85 D2 0F 85 1E 02 00 00
+
+. 0 3A96A1DA 5
+. E8 A1 1B 00 00
+
+. 0 3A96A1DF 6
+. 90 E9 D7 FD FF FF
+
+. 0 3A969FBC 14
+. 8B 55 84 8B 42 58 85 C0 0F 85 E5 02 00 00
+
+. 0 3A969FCA 12
+. 8B 45 84 F6 80 E8 01 00 00 20 74 06
+
+. 0 3A969FD6 72
+. 89 83 CC F9 FF FF 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00
+
+. 0 3A96A01E 14
+. 8B BB 7C FD FF FF 85 FF 0F 84 97 FD FF FF
+
+. 0 3A969DC3 11
+. 8B 45 84 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B07E 10
+. 8D 65 F4 5B 5E 5F 5D C2 0C 00
+
+. 0 3A967C70 32
+. 8B 90 70 01 00 00 83 EC 0C 42 89 90 70 01 00 00 31 C0 83 FA 01 0F 94 C0 01 85 10 FF FF FF EB 8C
+
+. 0 3A967C1C 18
+. 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00
+
+. 0 3A96692A 84
+. 8B 83 7E EC FF FF 8D 75 B4 B9 03 00 00 00 89 B5 CC FE FF FF 8D 95 38 FF FF FF 89 45 B4 8B 83 82 EC FF FF 89 45 B8 8B 83 86 EC FF FF 89 45 BC 8B 83 8A EC FF FF 89 45 C0 0F B7 83 8E EC FF FF 66 89 45 C4 0F B6 83 90 EC FF FF 88 45 C6 89 F0 E8 F2 A6 00 00
+
+. 0 3A971070 37
+. 55 89 E5 81 EC 8C 00 00 00 89 75 F8 31 F6 89 5D F4 89 7D FC 89 D7 89 4D 90 C7 45 8C FF FF FF FF E8 96 48 00 00
+
+. 0 3A971095 18
+. 81 C3 D7 75 00 00 89 74 24 04 89 04 24 E8 89 33 00 00
+
+. 0 3A97444D 5
+. E8 D5 14 00 00
+
+. 0 3A975927 4
+. 8B 0C 24 C3
+
+. 0 3A974452 21
+. 81 C1 1A 42 00 00 31 D2 29 C2 89 91 48 01 00 00 83 C8 FF EB E5
+
+. 0 3A9710A7 6
+. 85 C0 89 C6 78 2C
+
+. 0 3A9710D9 16
+. 8B 45 8C 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96697E 15
+. 89 85 0C FF FF FF 83 F8 FF 0F 85 E1 14 00 00
+
+. 0 3A96698D 20
+. 8B 83 94 F9 FF FF 8B B8 A4 00 00 00 85 FF 0F 84 04 0B 00 00
+
+. 0 3A9669A1 14
+. 8B B5 10 FF FF FF 85 F6 0F 85 79 14 00 00
+
+. 0 3A967E28 65
+. 8B B5 10 FF FF FF 31 C9 89 8D 04 FF FF FF 8D 04 B5 10 00 00 00 29 C4 8D 54 24 2F 8B 83 F0 F9 FF FF 83 E2 F0 89 95 14 FF FF FF 8B 8D 04 FF FF FF 89 04 8A 8B 40 0C 41 89 8D 04 FF FF FF 85 C0 75 E9
+
+. 0 3A967E69 5
+. E9 41 EB FF FF
+
+. 0 3A9669AF 22
+. 8B B3 94 FD FF FF 31 FF 89 BD C4 FE FF FF 85 F6 0F 84 9F 06 00 00
+
+. 0 3A967064 44
+. 31 D2 8B 83 94 F9 FF FF 89 54 24 04 31 D2 83 BD 50 FF FF FF 03 0F 94 C2 89 14 24 8B 8D 10 FF FF FF 8B 95 14 FF FF FF E8 10 81 00 00
+
+. 0 3A96F1A0 83
+. 55 89 E5 57 56 53 81 EC A0 00 00 00 89 45 C0 8D 04 49 8D 04 85 28 00 00 00 89 55 BC 29 C4 8B 55 C0 89 4D B8 8D 44 24 1F 83 E0 F0 89 50 04 8B 4D C0 8D 50 0C 89 45 F0 C7 00 00 00 00 00 89 50 08 0F B6 81 74 01 00 00 C7 45 EC 01 00 00 00 E8 38 67 00 00
+
+. 0 3A96F1F3 31
+. 81 C3 79 94 00 00 24 9F 0C 20 88 81 74 01 00 00 8B 75 B8 C7 45 AC 00 00 00 00 39 75 AC 73 48
+
+. 0 3A96F212 72
+. 8B 45 AC 8B 55 BC 8B 75 EC 8B 3C 82 8B 45 F0 8D 14 76 C1 E2 02 46 8D 4C 02 0C C7 04 10 00 00 00 00 89 4C 10 08 89 7C 10 04 0F B6 87 74 01 00 00 89 75 EC 24 9F 0C 20 88 87 74 01 00 00 8B 4D B8 FF 45 AC 39 4D AC 72 B8
+
+. 0 3A96F25A 65
+. C7 45 A4 00 00 00 00 8B 45 EC 31 C9 C7 45 C8 00 00 00 00 8B 55 F0 8B B3 48 01 00 00 89 8B 48 01 00 00 8D 04 40 8D 44 82 F4 C7 40 08 00 00 00 00 85 D2 89 45 B0 89 75 A8 89 55 B4 0F 84 6A 03 00 00
+
+. 0 3A96F29B 42
+. C7 45 9C 00 00 00 00 8B 45 B4 8B 55 B4 C7 45 98 00 00 00 00 8B 40 04 C7 02 01 00 00 00 8B 90 50 01 00 00 89 45 A0 85 D2 75 44
+
+. 0 3A96F2C5 26
+. 8B 80 D4 01 00 00 8B 4D C0 85 C0 0F 94 C0 39 4D A0 0F 95 C2 21 D0 A8 01 74 2A
+
+. 0 3A96F309 10
+. 8B 55 A0 8B 42 1C 85 C0 75 18
+
+. 0 3A96F32B 52
+. 8B 4D A0 8B 75 08 8B 55 B4 8B 41 2C 8B 40 04 89 4D CC 8B 49 08 89 45 94 89 45 D8 8B 45 0C 89 75 D0 89 55 90 89 45 D4 8B 11 89 4D 8C 85 D2 0F 85 BD 00 00 00
+
+. 0 3A96F41C 9
+. 83 FA 01 0F 84 3F FF FF FF
+
+. 0 3A96F364 30
+. 8B 75 8C B9 24 00 00 00 8B 7D 94 8B 46 04 89 4C 24 04 01 C7 89 FE 89 3C 24 E8 2E 5D 00 00
+
+. 0 3A9750D0 4
+. 88 D1 7A 29
+
+. 0 3A9750D4 8
+. 32 08 0F 84 61 01 00 00
+
+. 0 3A9750DC 8
+. 30 D1 0F 84 86 01 00 00
+
+. 0 3A9750E4 12
+. 8A 48 01 40 38 CA 0F 84 4D 01 00 00
+
+. 0 3A9750F0 9
+. 80 F9 00 0F 84 71 01 00 00
+
+. 0 3A9750F9 4
+. 40 4F 75 14
+
+. 0 3A9750FD 10
+. 8A 08 38 CA 0F 84 36 01 00 00
+
+. 0 3A975107 9
+. 80 F9 00 0F 84 5A 01 00 00
+
+. 0 3A975110 28
+. 40 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A975258 8
+. C1 E9 10 40 38 D1 74 DD
+
+. 0 3A975260 5
+. 80 F9 00 74 05
+
+. 0 3A96F382 8
+. 85 C0 0F 85 2A 05 00 00
+
+. 0 3A96F38A 26
+. 89 75 DC 8D 45 CC 8D 55 C8 89 04 24 8D 8B 34 78 FF FF 8D 45 C4 E8 2C 0E 00 00
+
+. 0 3A9701D0 31
+. 55 89 E5 81 EC F8 00 00 00 89 85 24 FF FF FF 31 C0 89 5D F4 89 75 F8 89 7D FC E8 3C 57 00 00
+
+. 0 3A9701EF 30
+. 81 C3 7D 84 00 00 89 95 20 FF FF FF 89 8D 1C FF FF FF 89 85 2C FF FF FF FF 93 E0 F9 FF FF
+
+. 0 3A9658C0 9
+. 55 89 E5 5D E8 5E 00 01 00
+
+. 0 3A9658C9 13
+. 81 C1 A3 2D 01 00 8D 81 3C 00 00 00 C3
+
+. 0 3A97020D 25
+. 89 85 18 FF FF FF 8B 00 89 45 E4 8D 85 30 FF FF FF 89 04 24 E8 0A 4D 00 00
+
+. 0 3A974F30 36
+. 31 C0 8B 54 24 04 89 5A 00 89 72 04 89 7A 08 8D 4C 24 04 89 4A 10 8B 4C 24 00 89 4A 14 89 6A 0C 89 42 18 C3
+
+. 0 3A970226 6
+. 85 C0 89 C2 75 4E
+
+. 0 3A97022C 26
+. 8B 8D 18 FF FF FF 8D 85 28 FF FF FF 89 01 8B 45 08 89 04 24 FF 95 1C FF FF FF
+
+. 0 3A96FEA0 45
+. 55 B9 01 00 00 00 89 E5 56 83 EC 0C 8B 75 08 8B 46 08 8B 56 10 89 44 24 08 8B 46 04 89 44 24 04 8B 06 0F B6 80 74 01 00 00 A8 03 74 05
+
+. 0 3A96FED2 12
+. 89 0C 24 8B 06 31 C9 E8 62 AF FF FF
+
+. 0 3A96B088 14
+. 8B 95 D4 FD FF FF 89 14 24 E8 3A A2 00 00
+
+. 0 3A96B096 20
+. 40 F6 83 14 FC FF FF 01 89 85 C8 FD FF FF 0F 85 2C 04 00 00
+
+. 0 3A96B0AA 21
+. 8B B5 D8 FD FF FF BF FF FF FF FF 89 BD CC FD FF FF 85 F6 74 14
+
+. 0 3A96B0BF 20
+. 8B 85 D8 FD FF FF 8B 88 8C 00 00 00 85 C9 0F 85 A3 01 00 00
+
+. 0 3A96B0D3 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 C2 00 00 00
+
+. 0 3A96B0E1 14
+. 8D 93 09 F0 FF FF 89 95 B4 FD FF FF EB 1F
+
+. 0 3A96B10E 29
+. 8B 8D B4 FD FF FF 8D BE 80 01 00 00 89 F0 89 FA 89 0C 24 B9 0F 00 00 00 E8 65 F7 FF FF
+
+. 0 3A96A890 25
+. 55 89 E5 83 EC 0C 89 75 F8 89 D6 89 7D FC 8B 12 89 C7 31 C0 83 FA FF 74 27
+
+. 0 3A96A8D0 10
+. 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96B12B 4
+. 84 C0 74 C0
+
+. 0 3A96B0EF 31
+. 83 BD CC FD FF FF FF 8B B6 60 01 00 00 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 0F 84 95 00 00 00
+
+. 0 3A96B1A3 27
+. 83 BD CC FD FF FF FF 8B B3 94 F9 FF FF 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 74 58
+
+. 0 3A96B1BE 13
+. 0F B6 86 74 01 00 00 24 03 3C 02 74 4B
+
+. 0 3A96B1CB 8
+. 3B B5 D8 FD FF FF 74 43
+
+. 0 3A96B216 9
+. 83 BD CC FD FF FF FF 74 57
+
+. 0 3A96B276 9
+. 83 BB BC 00 00 00 FF 74 A0
+
+. 0 3A96B27F 52
+. 8D 85 E0 FD FF FF 8D 8D E4 FD FF FF 89 44 24 04 8D 83 BC 00 00 00 89 4C 24 08 89 04 24 8B 8D D0 FD FF FF 8B 95 C8 FD FF FF 8B 85 D4 FD FF FF E8 9D DF FF FF
+
+. 0 3A969250 27
+. 55 89 E5 57 31 FF 56 31 F6 53 81 EC B4 00 00 00 89 45 90 8B 45 08 E8 C0 C6 00 00
+
+. 0 3A96926B 114
+. 81 C3 01 F4 00 00 89 4D 88 8B 00 89 55 8C 8B 8B D8 00 00 00 89 BD 7C FF FF FF 89 45 84 8B 83 CC 00 00 00 C7 45 80 FF FF FF FF 89 B5 78 FF FF FF 01 C8 8D 44 02 12 8B 4D 84 83 E0 FC 29 C4 8D 54 24 1B 8B 39 83 E2 F0 89 95 74 FF FF FF 90 8D B4 26 00 00 00 00 31 C0 89 85 70 FF FF FF 31 C0 F6 83 14 FC FF FF 01 89 85 68 FF FF FF 0F 85 78 02 00 00
+
+. 0 3A9692DD 30
+. 8B 47 10 31 F6 89 44 24 08 8B 47 0C 89 44 24 04 8B 8D 74 FF FF FF 89 0C 24 E8 75 C4 00 00
+
+. 0 3A9692FB 16
+. 83 7D 80 FF 89 85 6C FF FF FF 0F 85 98 01 00 00
+
+. 0 3A96930B 8
+. 3B B3 C8 00 00 00 72 1F
+
+. 0 3A969332 7
+. 83 7C B7 14 01 74 DF
+
+. 0 3A969339 41
+. 8B 83 C4 00 00 00 8B 44 F0 04 89 44 24 08 8B 83 C4 00 00 00 8B 04 F0 89 44 24 04 8B 85 6C FF FF FF 89 04 24 E8 0E C4 00 00
+
+. 0 3A969362 22
+. 8B 55 8C 89 54 24 08 8B 4D 90 89 04 24 89 4C 24 04 E8 F8 C3 00 00
+
+. 0 3A969378 31
+. 89 85 70 FF FF FF 8B 85 74 FF FF FF 29 85 70 FF FF FF F6 83 14 FC FF FF 01 0F 85 50 01 00 00
+
+. 0 3A969397 14
+. 8B 55 10 8B 85 74 FF FF FF E8 7B FB FF FF
+
+. 0 3A9693A5 11
+. 89 45 80 8B 44 B7 14 85 C0 75 18
+
+. 0 3A9693B0 10
+. 83 7D 80 FF 0F 84 44 01 00 00
+
+. 0 3A9694FE 45
+. 8B 85 70 FF FF FF 8B 4D 8C 8B 95 74 FF FF FF 29 C8 C6 44 10 FF 00 8D 45 94 89 44 24 08 89 54 24 04 C7 04 24 03 00 00 00 E8 E5 AC 00 00
+
+. 0 3A974210 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 0D 17 00 00
+
+. 0 3A97421E 24
+. 81 C3 4E 44 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A
+
+. 0 3A974236 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C3 00 00 00 CD 80
+
+. 0 3A97424B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00
+
+. 0 3A974306 18
+. F7 D8 89 83 48 01 00 00 B8 FF FF FF FF E9 40 FF FF FF
+
+. 0 3A974258 7
+. 83 F8 FF 89 C2 74 31
+
+. 0 3A974290 9
+. 83 BB 48 01 00 00 26 75 C6
+
+. 0 3A97425F 4
+. 85 D2 75 1D
+
+. 0 3A974280 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96952B 4
+. 85 C0 75 13
+
+. 0 3A969542 19
+. BA 01 00 00 00 B8 01 00 00 00 89 54 B7 14 E9 73 FE FF FF
+
+. 0 3A9693C8 25
+. 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF
+
+. 0 3A969318 14
+. 46 83 7D 80 FF 8D 76 00 0F 85 7D 01 00 00
+
+. 0 3A969326 12
+. 3B B3 C8 00 00 00 0F 83 12 01 00 00
+
+. 0 3A974263 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12
+
+. 0 3A96952F 19
+. 8B 45 A4 25 00 F0 00 00 3D 00 40 00 00 0F 84 78 FE FF FF
+
+. 0 3A9693BA 39
+. B8 02 00 00 00 89 44 B7 14 B8 02 00 00 00 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF
+
+. 0 3A969444 6
+. 83 7D 80 FF 75 59
+
+. 0 3A96944A 10
+. 8B 85 68 FF FF FF 85 C0 74 15
+
+. 0 3A969454 11
+. 8B 93 48 01 00 00 83 FA 02 74 0A
+
+. 0 3A969469 31
+. 83 45 84 04 8B 8D 68 FF FF FF 09 8D 78 FF FF FF 8B 55 84 8B 02 85 C0 89 C7 0F 85 38 FE FF FF
+
+. 0 3A9692C0 29
+. 31 C0 89 85 70 FF FF FF 31 C0 F6 83 14 FC FF FF 01 89 85 68 FF FF FF 0F 85 78 02 00 00
+
+. 0 3A969488 14
+. 8B 85 78 FF FF FF 85 C0 0F 84 9C 02 00 00
+
+. 0 3A969496 13
+. B8 FF FF FF FF 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B2B3 11
+. 89 85 CC FD FF FF E9 61 FF FF FF
+
+. 0 3A96B21F 31
+. 83 BD CC FD FF FF FF 8B 95 D8 FD FF FF 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 85 E1 00 00 00
+
+. 0 3A96B31F 39
+. 8B B5 D8 FD FF FF 8D 83 0F F0 FF FF B9 1D 00 00 00 89 04 24 8B 85 D8 FD FF FF 81 C6 CC 01 00 00 89 F2 E8 4A F5 FF FF
+
+. 0 3A96B346 8
+. 84 C0 0F 84 F0 FE FF FF
+
+. 0 3A96B23E 13
+. 83 BD CC FD FF FF FF 0F 84 3C 01 00 00
+
+. 0 3A96B387 14
+. 8B 85 D0 FD FF FF 85 C0 0F 85 69 01 00 00
+
+. 0 3A96B395 11
+. 8B 85 D4 FD FF FF E8 80 03 00 00
+
+. 0 3A96B720 14
+. 55 89 E5 57 56 53 83 EC 64 E8 FD A1 00 00
+
+. 0 3A96B72E 22
+. 81 C3 3E CF 00 00 F6 83 14 FC FF FF 01 89 45 F0 0F 85 5E 04 00 00
+
+. 0 3A96B744 14
+. 8B 8B DC 00 00 00 85 C9 0F 84 20 01 00 00
+
+. 0 3A96B872 22
+. B9 01 00 00 00 8D 93 E4 00 00 00 8D 83 72 F0 FF FF E8 E8 57 00 00
+
+. 0 3A9710AD 23
+. 89 74 24 04 8D 45 94 89 44 24 08 C7 04 24 03 00 00 00 E8 5C 32 00 00
+
+. 0 3A9710C4 4
+. 85 C0 78 09
+
+. 0 3A9710C8 9
+. 8B 45 C0 85 C0 89 07 75 1F
+
+. 0 3A9710F0 42
+. 89 74 24 10 31 C9 BA 02 00 00 00 89 4C 24 14 89 54 24 0C 8B 55 90 89 44 24 04 C7 04 24 00 00 00 00 89 54 24 08 E8 36 3B 00 00
+
+. 0 3A97111A 5
+. 89 45 8C EB B2
+
+. 0 3A9710D1 8
+. 89 34 24 E8 97 33 00 00
+
+. 0 3A96B888 10
+. 89 45 D8 40 0F 84 73 01 00 00
+
+. 0 3A96B892 18
+. 8B 83 E4 00 00 00 83 F8 10 89 45 A0 0F 86 0E 01 00 00
+
+. 0 3A96B8A4 19
+. FC 8B 75 D8 BA 0B 00 00 00 8D BB 83 F0 FF FF 89 D1 F3 A6
+
+. 0 3A96B8B5 2
+. F3 A6
+
+. 0 3A96B8B7 6
+. 0F 85 F5 00 00 00
+
+. 0 3A96B8BD 34
+. 8B 45 D8 89 C2 89 83 DC 00 00 00 8B 40 0C 8D 04 40 8D 04 85 10 00 00 00 01 C2 83 C0 30 39 45 A0 72 26
+
+. 0 3A96B8DF 21
+. 89 93 E0 00 00 00 B9 14 00 00 00 89 D6 8D BB 8F F0 FF FF F3 A6
+
+. 0 3A96B8F2 2
+. F3 A6
+
+. 0 3A96B8F4 17
+. 0F 97 C2 0F 92 C0 8B 4D D8 38 C2 0F 84 4D FE FF FF
+
+. 0 3A96B752 11
+. 31 C0 83 F9 FF 0F 84 FF 03 00 00
+
+. 0 3A96B75D 27
+. C7 45 DC 00 00 00 00 8B 83 E0 00 00 00 83 F8 FF 89 45 D4 89 C7 0F 84 A0 01 00 00
+
+. 0 3A96B778 26
+. 8B 93 E4 00 00 00 01 D1 8B 93 20 FC FF FF 29 C1 89 4D E0 85 D2 89 55 C8 74 31
+
+. 0 3A96B792 29
+. C7 45 C4 00 00 00 00 8D B3 5C FD FF FF 90 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00
+
+. 0 3A96B7AF 8
+. 85 C0 0F 84 29 04 00 00
+
+. 0 3A96B7B7 12
+. FF 45 C4 83 C6 05 83 7D C4 03 7E DD
+
+. 0 3A96B7A0 15
+. 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00
+
+. 0 3A9752B7 12
+. 0F B6 55 FB F7 DA 89 D0 5A 5E 5D C3
+
+. 0 3A96BBE0 11
+. 8B 45 C4 83 C0 30 E9 DD FB FF FF
+
+. 0 3A96B7C8 18
+. 89 45 CC 99 8B 45 CC 89 55 D0 8B 75 D0 21 F0 40 74 1F
+
+. 0 3A96B7DA 21
+. 0F B6 4D CC B8 01 00 00 00 31 D2 0F A5 C2 D3 E0 F6 C1 20 74 04
+
+. 0 3A96B7EF 32
+. 89 C2 31 C0 89 45 CC 89 55 D0 C7 45 EC 00 00 00 00 8B 55 D4 8B 4A 14 49 39 4D EC 89 4D E8 7E 1B
+
+. 0 3A96B82A 40
+. 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00
+
+. 0 3A96B852 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 51 FD FF FF
+
+. 0 3A96B5B0 27
+. 55 89 E5 57 89 C7 56 83 EC 0C 89 55 F4 0F B6 00 84 C0 88 45 EF 0F 84 DF 00 00 00
+
+. 0 3A96B5CB 17
+. 0F B6 0A 0F B6 45 EF 2C 30 3C 09 0F 87 AA 00 00 00
+
+. 0 3A96B686 13
+. 88 C8 BA FF FF FF FF 2C 30 3C 09 76 26
+
+. 0 3A96B693 5
+. 38 4D EF 75 18
+
+. 0 3A96B698 18
+. FF 45 F4 47 8B 45 F4 0F B6 0F 88 4D EF 0F B6 08 EB C8
+
+. 0 3A96B672 6
+. 80 7D EF 00 74 38
+
+. 0 3A96B678 14
+. 0F B6 45 EF 2C 30 3C 09 0F 86 5A FF FF FF
+
+. 0 3A96B6B0 18
+. 0F BE 55 EF 0F BE C1 29 C2 83 C4 0C 89 D0 5E 5F 5D C3
+
+. 0 3A96B85F 8
+. 85 C0 0F 84 25 04 00 00
+
+. 0 3A96B867 4
+. 85 C0 79 A9
+
+. 0 3A96B814 16
+. 4E 89 75 E8 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00
+
+. 0 3A96B824 46
+. 8B BB E0 00 00 00 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00
+
+. 0 3A96B86B 7
+. 46 89 75 EC 90 EB A6
+
+. 0 3A96B818 12
+. 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00
+
+. 0 3A96B5E0 17
+. 88 C8 BA 01 00 00 00 2C 30 3C 09 0F 87 C8 00 00 00
+
+. 0 3A96B5F1 36
+. 0F BE 45 EF 47 FF 45 F4 83 E8 30 89 45 F0 0F BE C1 8D 70 D0 0F B6 0F 88 C8 88 4D EF 2C 30 88 CA 3C 09 77 2A
+
+. 0 3A96B63F 16
+. 8B 45 F4 0F B6 08 88 C8 88 CA 2C 30 3C 09 77 1E
+
+. 0 3A96B66D 5
+. 39 75 F0 75 50
+
+. 0 3A96BC8C 11
+. 89 75 EC 85 F6 0F 8E 93 FD FF FF
+
+. 0 3A96BC97 31
+. 8B 4D C0 89 4D A4 8D 76 00 8B BB E0 00 00 00 8B 55 A4 8B 44 17 1C 3B 45 E0 0F 83 7A FD FF FF
+
+. 0 3A96BCB6 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 ED F8 FF FF
+
+. 0 3A96BCC3 8
+. 85 C0 0F 85 5F FD FF FF
+
+. 0 3A96BA2A 23
+. 8B BB E0 00 00 00 3B 75 EC 8D 04 76 8D 3C C7 8D 4F 30 89 4D BC 7E 21
+
+. 0 3A96BA62 21
+. 8B 4F 30 83 F9 01 0F 94 C0 83 F9 03 0F 94 C2 09 D0 A8 01 74 A9
+
+. 0 3A96BA77 11
+. 8B 45 BC 8B 78 08 3B 7D E0 73 9E
+
+. 0 3A96BA82 7
+. 8B 55 DC 85 D2 74 08
+
+. 0 3A96BA91 10
+. 8B 83 1C FC FF FF 85 C0 74 0C
+
+. 0 3A96BA9B 12
+. 8B 55 BC 39 42 0C 0F 87 79 FF FF FF
+
+. 0 3A96BAA7 18
+. 8B 55 D0 8B 45 CC F7 D2 F7 D0 09 C2 0F 84 20 02 00 00
+
+. 0 3A96BAB9 38
+. C7 45 B4 00 00 00 00 8B 55 BC 8B 42 10 8B 52 14 89 45 A8 89 55 AC 81 E2 00 00 0F 00 89 D0 0B 45 B4 89 55 B8 74 12
+
+. 0 3A96BAF1 65
+. 8B 83 54 FC FF FF 31 D2 81 CA 00 00 0F 00 89 55 9C 89 C2 8B 45 9C F7 D2 89 55 98 F7 D0 8B 55 A8 89 45 9C 8B 45 98 21 C2 89 55 98 8B 45 AC 8B 55 9C 21 D0 89 C2 0B 55 98 89 45 9C 0F 85 EE FE FF FF
+
+. 0 3A96BB32 20
+. 8B 45 D4 01 C7 3B 8B 50 FC FF FF 89 7D DC 0F 85 DA FE FF FF
+
+. 0 3A96BB46 19
+. 8D 76 00 8D BC 27 00 00 00 00 F6 83 14 FC FF FF 01 75 66
+
+. 0 3A96BB59 11
+. 8B 45 DC 83 C4 64 5B 5E 5F 5D C3
+
+. 0 3A96B3A0 14
+. 89 85 C4 FD FF FF 85 C0 0F 84 8A 00 00 00
+
+. 0 3A96B3AE 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 37 01 00 00
+
+. 0 3A96B3BC 13
+. F6 86 E9 01 00 00 08 0F 85 48 01 00 00
+
+. 0 3A96B3C9 10
+. 8B 85 C4 FD FF FF 85 C0 74 65
+
+. 0 3A96B3D3 23
+. 8B 85 C4 FD FF FF 8D 95 E4 FD FF FF 89 95 B8 FD FF FF E8 36 DB FF FF
+
+. 0 3A969041 6
+. 83 7F 10 20 75 DA
+
+. 0 3A969047 6
+. 83 7F 1C 03 76 D4
+
+. 0 3A96904D 24
+. 8B 47 04 8B 4D CC 8D 50 20 3B 11 8D 44 08 04 89 45 B4 0F 87 F6 00 00 00
+
+. 0 3A969065 19
+. FC 8B 75 B4 BA 10 00 00 00 8D BB 38 D6 FF FF 89 D1 F3 A6
+
+. 0 3A969076 2
+. F3 A6
+
+. 0 3A969078 6
+. 0F 84 86 00 00 00
+
+. 0 3A969104 32
+. 8B 75 B4 0F B6 46 14 0F B6 56 18 C1 E0 08 01 D0 0F B6 56 1C C1 E0 08 8B 76 10 01 D0 85 F6 75 0E
+
+. 0 3A969124 10
+. 8B 93 1C FC FF FF 85 D2 74 22
+
+. 0 3A96912E 4
+. 39 C2 73 1E
+
+. 0 3A96B3EA 9
+. 89 85 CC FD FF FF 40 74 52
+
+. 0 3A96B3F3 14
+. 8B 8D C4 FD FF FF 89 0C 24 E8 CF 9E 00 00
+
+. 0 3A96B401 11
+. 8D 70 01 89 34 24 E8 10 A4 FF FF
+
+. 0 3A96B40C 8
+. 89 C2 31 C0 85 D2 74 16
+
+. 0 3A96B414 22
+. 89 74 24 08 8B 85 C4 FD FF FF 89 14 24 89 44 24 04 E8 46 A4 00 00
+
+. 0 3A96B42A 14
+. 89 85 E0 FD FF FF 85 C0 0F 84 48 01 00 00
+
+. 0 3A96B438 13
+. 83 BD CC FD FF FF FF 0F 85 06 FE FF FF
+
+. 0 3A96B24B 13
+. F6 83 14 FC FF FF 01 0F 85 6B 02 00 00
+
+. 0 3A96B258 25
+. 83 BD CC FD FF FF FF 8D 85 E4 FD FF FF 89 85 B8 FD FF FF 0F 85 BF FD FF FF
+
+. 0 3A96CAC0 9
+. 89 C2 8B 40 0C 85 C0 75 F7
+
+. 0 3A9699C5 23
+. 8B 46 08 8B 4D 84 89 81 44 01 00 00 83 C6 20 3B B5 54 FF FF FF 72 AF
+
+. 0 3A969975 22
+. 8D 74 26 00 8D BC 27 00 00 00 00 83 C6 20 3B B5 54 FF FF FF 73 51
+
+. 0 3A97571F 11
+. 89 F9 F7 D9 83 E1 03 29 CA F3 AA
+
+. 0 3A97572A 41
+. 83 EA 20 8B 0F 90 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD
+
+. 0 3A975730 35
+. 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD
+
+. 0 3A975753 8
+. 8D 4A 20 C1 E9 02 F3 AB
+
+. 0 3A969FDC 66
+. 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00
+
+. 0 3A96FEDE 11
+. 89 46 14 83 EC 0C 8B 75 FC C9 C3
+
+. 0 3A970246 52
+. 8B 45 E4 8B 95 18 FF FF FF 8B 8D 24 FF FF FF 89 02 8B 85 20 FF FF FF C7 01 00 00 00 00 C7 00 00 00 00 00 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C2 04 00
+
+. 0 3A96F3A4 14
+. 8B 55 C8 83 EC 04 85 D2 0F 85 44 02 00 00
+
+. 0 3A96F3B2 12
+. 8B 55 E0 F6 82 74 01 00 00 60 75 37
+
+. 0 3A96F3BE 62
+. 8B 4D B0 83 EC 1C 8D 44 24 1F 83 E0 F0 C7 00 00 00 00 00 C7 40 08 00 00 00 00 89 50 04 89 41 08 89 45 B0 0F B6 82 74 01 00 00 FF 45 EC 24 9F 0C 20 88 82 74 01 00 00 8B 7D 9C 85 FF 74 0D
+
+. 0 3A96F409 19
+. 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00
+
+. 0 3A975265 5
+. 40 38 D5 74 D3
+
+. 0 3A9699A2 11
+. 3D 51 E5 74 64 0F 87 5A 02 00 00
+
+. 0 3A9699AD 5
+. 83 F8 07 75 CE
+
+. 0 3A969B31 14
+. 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C
+
+. 0 3A96A171 61
+. 31 C0 89 44 24 14 B8 FF FF FF FF 89 44 24 10 B8 32 00 00 00 89 44 24 0C 8B 47 14 89 44 24 08 8B 95 38 FF FF FF 29 95 3C FF FF FF 8B 8D 3C FF FF FF 89 14 24 89 4C 24 04 E8 A2 AA 00 00
+
+. 0 3A96A1AE 7
+. 40 0F 85 D8 F9 FF FF
+
+. 0 3A96F425 24
+. 81 FA FD FF FF 7F 0F 94 C0 81 FA FF FF FF 7F 0F 94 C2 09 D0 A8 01 74 CC
+
+. 0 3A96F523 11
+. 8B 45 9C 85 C0 0F 84 9F 00 00 00
+
+. 0 3A96F5CD 9
+. 8B 4D B4 8B 01 85 C0 74 13
+
+. 0 3A96F5D6 13
+. 8B 75 B4 8B 76 08 85 F6 89 75 B4 74 22
+
+. 0 3A96F5E3 6
+. 8B 06 85 C0 75 ED
+
+. 0 3A96F5E9 11
+. 8B 45 B4 85 C0 0F 85 A7 FC FF FF
+
+. 0 3A96F2DF 15
+. 8B 75 A0 0F B7 86 4E 01 00 00 66 85 C0 74 1B
+
+. 0 3A96F2EE 37
+. 0F B7 C0 8D 04 85 12 00 00 00 25 FC FF 0F 00 29 C4 8D 44 24 1F 83 E0 F0 89 45 9C 8B 55 A0 8B 42 1C 85 C0 75 18
+
+. 0 3A96FECD 17
+. 89 C1 83 E1 03 89 0C 24 8B 06 31 C9 E8 62 AF FF FF
+
+. 0 3A970BE0 10
+. 83 C4 08 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96AFA7 12
+. 89 F0 8D 65 F4 5B 5E 5F 5D C2 0C 00
+
+. 0 3A96F3F5 7
+. 8B 7D 9C 85 FF 74 0D
+
+. 0 3A96F3FC 32
+. 8B 75 98 8B 45 9C 89 14 B0 46 89 75 98 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00
+
+. 0 3A96F52E 32
+. 8B 45 98 8B 55 9C C7 04 82 00 00 00 00 40 89 45 98 8D 04 C5 04 00 00 00 89 04 24 E8 CE 62 FF FF
+
+. 0 3A96F54E 19
+. 8B 4D A0 85 C0 89 C2 89 81 D4 01 00 00 0F 84 48 05 00 00
+
+. 0 3A96F561 22
+. 8B 4D 98 8D 7A 04 8B 45 A0 8B 75 9C C1 E1 02 83 F9 07 89 02 76 16
+
+. 0 3A96F577 8
+. F7 C7 04 00 00 00 74 0E
+
+. 0 3A96F57F 20
+. 8B 06 8D 7A 08 83 C6 04 83 E9 04 89 42 04 FC C1 E9 02 F3 A5
+
+. 0 3A96F591 2
+. F3 A5
+
+. 0 3A96F593 28
+. 8B 55 A0 8B 45 98 8B B2 D4 01 00 00 C1 E0 02 83 F8 07 89 C2 8D 0C 06 8D 79 04 76 16
+
+. 0 3A96F5AF 8
+. F7 C7 04 00 00 00 74 0E
+
+. 0 3A96F5B7 22
+. 8B 06 8D 79 08 83 C6 04 83 EA 04 89 41 04 FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F5CB 2
+. F3 A5
+
+. 0 3A96AFE3 9
+. 89 FA 89 F0 E8 F4 DD FF FF
+
+. 0 3A968DE0 19
+. 55 89 E5 57 89 D7 56 53 83 EC 14 8B 70 14 E8 38 CB 00 00
+
+. 0 3A968DF3 17
+. 81 C3 79 F8 00 00 C7 45 F0 00 00 00 00 85 F6 74 28
+
+. 0 3A968E04 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 62 C4 00 00
+
+. 0 3A968E1E 4
+. 85 C0 74 55
+
+. 0 3A968E22 10
+. 89 75 F0 8B 76 04 85 F6 75 E4
+
+. 0 3A968E2C 8
+. 89 3C 24 E8 9C C4 00 00
+
+. 0 3A968E34 17
+. 8D 50 01 83 C0 0D 89 55 EC 89 04 24 E8 D7 C9 FF FF
+
+. 0 3A968E45 6
+. 85 C0 89 C6 74 34
+
+. 0 3A968E4B 22
+. 8B 55 EC 8D 40 0C 89 7C 24 04 89 04 24 89 54 24 08 E8 0F CA 00 00
+
+. 0 3A968E61 30
+. 89 06 8B 45 F0 C7 46 04 00 00 00 00 C7 46 08 00 00 00 00 89 70 04 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96AFEC 9
+. 80 8E 75 01 00 00 01 EB B2
+
+. 0 3A970BC0 14
+. 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00
+
+. 0 3A96F5C5 8
+. FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F313 10
+. 8B 82 E8 00 00 00 85 C0 75 0E
+
+. 0 3A96F31D 14
+. 8B 82 E0 00 00 00 85 C0 0F 84 F8 01 00 00
+
+. 0 3A96F605 25
+. 8B 83 48 01 00 00 8B 7D A8 85 C0 0F 94 C0 85 FF 0F 95 C2 21 D0 A8 01 74 09
+
+. 0 3A96F61E 22
+. 8B 45 A8 89 83 48 01 00 00 8B 4D C0 8B 91 D4 01 00 00 85 D2 74 11
+
+. 0 3A96F645 18
+. 8B 45 EC 8D 04 C5 04 00 00 00 89 04 24 E8 C5 61 FF FF
+
+. 0 3A96F657 19
+. 8B 75 C0 85 C0 89 C1 89 86 D4 01 00 00 0F 84 A4 05 00 00
+
+. 0 3A96F66A 39
+. 8B 45 EC 8B 55 C0 8D 4C 81 04 89 8A 50 01 00 00 8B 4D F0 89 82 54 01 00 00 C7 45 EC 00 00 00 00 85 C9 89 4D B4 74 3E
+
+. 0 3A96F691 11
+. 8B 75 08 85 F6 0F 85 28 04 00 00
+
+. 0 3A96F69C 51
+. 8B 55 B4 8B 4A 04 8B 75 C0 8B 45 EC 8B 96 50 01 00 00 89 0C 82 40 89 45 EC 8B 45 B4 8B 48 04 80 A1 74 01 00 00 9F 8B 55 B4 8B 52 08 89 55 B4 85 D2 75 C2
+
+. 0 3A96F6CF 13
+. F6 83 15 FC FF FF 02 0F 85 20 06 00 00
+
+. 0 3A96F6DC 45
+. 8B 55 C0 8B 45 EC 8B 92 50 01 00 00 89 85 78 FF FF FF 89 95 74 FF FF FF C7 45 AC 00 00 00 00 8B 55 C0 8B 8A DC 01 00 00 39 4D AC 73 49
+
+. 0 3A96F752 29
+. 8B 95 78 FF FF FF 8B 45 C0 8B B5 74 FF FF FF C1 E2 02 83 FA 07 8B B8 D4 01 00 00 76 15
+
+. 0 3A96F76F 8
+. F7 C7 04 00 00 00 74 0D
+
+. 0 3A96F784 8
+. FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F78A 2
+. F3 A5
+
+. 0 3A96F78C 25
+. C7 45 AC 01 00 00 00 8B 55 EC 39 55 AC 89 95 78 FF FF FF 0F 83 ED 04 00 00
+
+. 0 3A96F7A5 40
+. 8B 75 C0 8B 8E D4 01 00 00 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11
+
+. 0 3A96F7DE 15
+. 8D 7A 01 3B BD 78 FF FF FF 0F 83 B0 00 00 00
+
+. 0 3A96F7ED 39
+. C1 E2 02 89 95 6C FF FF FF F7 DA 89 95 68 FF FF FF 89 F6 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C
+
+. 0 3A96F814 6
+. 8B 02 85 C0 74 76
+
+. 0 3A96F81A 14
+. 03 B5 68 FF FF FF 89 B5 70 FF FF FF EB 06
+
+. 0 3A96F82E 11
+. 83 C2 04 3B 85 7C FF FF FF 75 EF
+
+. 0 3A96F828 6
+. 8B 02 85 C0 74 62
+
+. 0 3A96F890 13
+. 47 3B BD 78 FF FF FF 0F 82 63 FF FF FF
+
+. 0 3A96F800 20
+. 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C
+
+. 0 3A96F89D 18
+. FF 45 AC 8B 85 78 FF FF FF 39 45 AC 0F 82 FF FE FF FF
+
+. 0 3A96F7AE 31
+. 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11
+
+. 0 3A96F7CD 17
+. 8D 41 04 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2
+
+. 0 3A96F7D0 14
+. 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2
+
+. 0 3A96F8AF 5
+. E9 E7 03 00 00
+
+. 0 3A96FC9B 24
+. 8B B5 78 FF FF FF C7 04 B1 00 00 00 00 8B 75 A4 85 F6 0F 85 A9 01 00 00
+
+. 0 3A96FCB3 10
+. 8D 65 F4 5B 5E 5F 5D C2 08 00
+
+. 0 3A967090 25
+. 8B 8B 94 F9 FF FF 83 EC 08 8B 91 54 01 00 00 89 95 04 FF FF FF 85 D2 74 3B
+
+. 0 3A9670A9 59
+. FF 8D 04 FF FF FF 8B 81 50 01 00 00 8B 8D 04 FF FF FF 8B 04 88 80 88 74 01 00 00 10 8B 8B 94 F9 FF FF 8B B5 04 FF FF FF 8B 81 50 01 00 00 8B 04 B0 FF 80 70 01 00 00 85 F6 75 C5
+
+. 0 3A9670E4 25
+. 8B 83 F0 F9 FF FF 8B 93 F4 F9 FF FF 89 42 0C 8B 83 F0 F9 FF FF 85 C0 74 03
+
+. 0 3A9670FD 16
+. 89 50 10 83 BB 54 FB FF FF 01 0F 86 52 0A 00 00
+
+. 0 3A96710D 37
+. 8B 8B 94 F9 FF FF B8 01 00 00 00 8B 95 D0 FE FF FF 89 85 04 FF FF FF 8B 81 50 01 00 00 83 C2 50 39 50 04 74 11
+
+. 0 3A967132 17
+. FF 85 04 FF FF FF 8B B5 04 FF FF FF 39 14 B0 75 EF
+
+. 0 3A967143 42
+. 8B 81 50 01 00 00 8B B5 04 FF FF FF 8B BD 50 FF FF FF 89 85 A8 FE FF FF 8B 54 B0 FC 85 FF 89 93 F4 F9 FF FF 0F 85 D8 11 00 00
+
+. 0 3A96716D 13
+. 89 F0 40 31 F6 3B 81 54 01 00 00 73 10
+
+. 0 3A96718A 20
+. 89 B3 F0 F9 FF FF 8B 85 C4 FE FF FF 85 C0 0F 84 9C 11 00 00
+
+. 0 3A96833A 11
+. 8B 93 F4 F9 FF FF E9 6E EE FF FF
+
+. 0 3A9671B3 22
+. 8B 8D D0 FE FF FF 83 C1 50 89 4A 0C 8B 83 F0 F9 FF FF 85 C0 74 03
+
+. 0 3A9671CC 50
+. 31 C0 85 FF 0F 94 C0 89 85 24 FF FF FF 31 C0 83 FF 03 0F 94 C0 8D 8D 24 FF FF FF 8D 93 44 02 FF FF 89 85 28 FF FF FF 8D 83 B4 D9 FE FF E8 62 8F 00 00
+
+. 0 3A970160 17
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 E8 BA 57 00 00
+
+. 0 3A970171 24
+. 81 C3 FB 84 00 00 89 7D FC 89 55 EC 89 4D E8 89 45 F0 FF 93 E0 F9 FF FF
+
+. 0 3A970189 37
+. 8B 38 89 C6 8B 83 E8 00 00 00 C7 06 00 00 00 00 89 45 E4 8B 45 F0 89 83 E8 00 00 00 8B 45 E8 89 04 24 FF 55 EC
+
+. 0 3A9688B0 25
+. 55 BA 01 00 00 00 89 E5 83 EC 0C 89 75 FC 8B 75 08 89 5D F8 E8 62 D0 00 00
+
+. 0 3A9688C9 20
+. 81 C3 A3 FD 00 00 8B 4E 04 8B 83 94 F9 FF FF E8 83 8F 00 00
+
+. 0 3A971860 18
+. 55 89 E5 57 31 FF 56 89 C6 53 83 EC 0C E8 B9 40 00 00
+
+. 0 3A971872 16
+. 81 C3 FA 6D 00 00 89 55 F0 85 C0 89 4D EC 75 0E
+
+. 0 3A971890 16
+. F6 86 75 01 00 00 02 C7 45 E8 00 00 00 00 75 E4
+
+. 0 3A9718A0 13
+. 8B 4D EC 89 F0 8B 55 F0 E8 F3 FB FF FF
+
+. 0 3A9714A0 23
+. 55 89 E5 57 56 53 81 EC 84 00 00 00 89 45 B0 8B 40 2C E8 74 44 00 00
+
+. 0 3A9714B7 43
+. 81 C3 B5 71 00 00 89 55 AC 31 D2 85 C0 89 4D A8 C7 45 A4 00 00 00 00 C7 45 94 00 00 00 00 C7 45 90 00 00 00 00 0F 84 4E 01 00 00
+
+. 0 3A9714E2 38
+. 8B 40 04 8B 4D B0 89 45 A0 8B 45 B0 8B 89 A4 00 00 00 8B 80 AC 00 00 00 85 C9 89 4D 9C 89 45 98 0F 84 EA 00 00 00
+
+. 0 3A971508 23
+. 8B 51 04 8B 4D B0 8B 09 01 CA 66 83 3A 01 89 55 8C 0F 85 77 01 00 00
+
+. 0 3A97151F 22
+. 90 8B 55 8C 8B 7D A0 8B B3 94 F9 FF FF 8B 42 04 01 C7 85 F6 74 22
+
+. 0 3A971535 20
+. 8D 74 26 00 8D BC 27 00 00 00 00 89 F2 89 F8 E8 37 F6 FF FF
+
+. 0 3A971549 7
+. 89 75 88 85 C0 75 1F
+
+. 0 3A971550 7
+. 8B 76 0C 85 F6 75 E9
+
+. 0 3A971540 9
+. 89 F2 89 F8 E8 37 F6 FF FF
+
+. 0 3A97156F 11
+. 8B 45 A8 85 C0 0F 85 07 01 00 00
+
+. 0 3A97157A 29
+. 8B 75 8C 8B 46 08 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08
+
+. 0 3A971597 41
+. 8B 83 38 00 00 00 8B 38 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF
+
+. 0 3A971120 30
+. 55 89 E5 57 89 D7 56 31 F6 53 81 EC B4 00 00 00 8B 55 08 89 85 60 FF FF FF E8 ED 47 00 00
+
+. 0 3A97113E 43
+. 81 C3 2E 75 00 00 89 8D 5C FF FF FF 8B 42 2C F6 83 14 FC FF FF 10 8B 40 04 89 B5 54 FF FF FF 89 85 58 FF FF FF 0F 85 C1 01 00 00
+
+. 0 3A971169 17
+. 8B 55 08 8B 82 AC 00 00 00 85 C0 0F 84 3F 01 00 00
+
+. 0 3A97117A 16
+. 8B 55 08 8B 32 8B 50 04 01 D6 66 83 3E 01 75 1C
+
+. 0 3A97118A 9
+. 3B 7E 08 0F 84 ED 00 00 00
+
+. 0 3A971193 11
+. 8B 46 10 85 C0 0F 84 C7 01 00 00
+
+. 0 3A97119E 8
+. 01 C6 66 83 3E 01 74 E4
+
+. 0 3A971280 35
+. 8B 56 0C 89 F0 01 D0 8B 08 8B 95 58 FF FF FF 01 CA 89 54 24 04 8B 85 5C FF FF FF 89 04 24 E8 DD 3F 00 00
+
+. 0 3A9712A3 10
+. 31 D2 85 C0 0F 85 E6 FE FF FF
+
+. 0 3A9712AD 12
+. 8D 65 F4 89 D0 5B 5E 5F 5D C2 0C 00
+
+. 0 3A9715C0 20
+. 09 45 A4 83 EC 0C 0F B7 46 06 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A9715D4 10
+. 89 45 94 8B 46 0C 85 C0 75 A2
+
+. 0 3A9715DE 10
+. 8B 4D 8C 8B 41 0C 85 C0 74 0A
+
+. 0 3A9715E8 10
+. 01 C1 89 4D 8C E9 2E FF FF FF
+
+. 0 3A971520 21
+. 8B 55 8C 8B 7D A0 8B B3 94 F9 FF FF 8B 42 04 01 C7 85 F6 74 22
+
+. 0 3A9715D7 7
+. 8B 46 0C 85 C0 75 A2
+
+. 0 3A9715F2 7
+. 8B 75 98 85 F6 74 29
+
+. 0 3A971622 11
+. 8B 45 94 85 C0 0F 85 1D 01 00 00
+
+. 0 3A97174A 21
+. BF 10 00 00 00 89 7C 24 04 8B 75 94 46 89 34 24 E8 CD 40 FF FF
+
+. 0 3A97175F 28
+. C7 45 90 0C 00 00 00 8B 55 B0 85 C0 89 C7 8D 8B 7C EB FF FF 89 82 7C 01 00 00 74 AF
+
+. 0 3A97177B 28
+. 89 B2 78 01 00 00 8B 82 DC 00 00 00 8B 75 9C 8B 40 04 85 F6 89 82 8C 01 00 00 74 63
+
+. 0 3A971797 23
+. 8B 4D 9C 8B 02 8B 49 04 01 C1 89 4D 84 8B 75 84 8B 4E 08 01 CE EB 04
+
+. 0 3A9717B2 62
+. 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0
+
+. 0 3A9717F0 10
+. 8B 4D 84 8B 41 0C 85 C0 75 A5
+
+. 0 3A97179F 15
+. 01 C1 89 4D 84 8B 75 84 8B 4E 08 01 CE EB 04
+
+. 0 3A9717FA 11
+. 8B 75 98 85 F6 0F 84 28 FE FF FF
+
+. 0 3A97162D 13
+. 8B 55 A4 8D 65 F4 89 D0 5B 5E 5F 5D C3
+
+. 0 3A9718AD 4
+. 85 C0 74 D3
+
+. 0 3A971884 12
+. 8B 55 E8 8B 76 0C 09 D7 85 F6 74 40
+
+. 0 3A97159F 33
+. 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF
+
+. 0 3A971580 23
+. 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08
+
+. 0 3A9717B0 64
+. 01 C6 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0
+
+. 0 3A9715F9 15
+. 8B 4D 98 8B 45 B0 8B 51 04 8B 08 01 CA EB 02
+
+. 0 3A97160A 14
+. 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A97161B 7
+. 8B 42 10 85 C0 75 E6
+
+. 0 3A971608 16
+. 01 C2 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A971805 15
+. 8B 45 98 8B 55 B0 8B 70 04 8B 0A 01 CE EB 02
+
+. 0 3A971816 13
+. 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A
+
+. 0 3A97184D 7
+. 8B 46 10 85 C0 75 C0
+
+. 0 3A971814 15
+. 01 C6 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A
+
+. 0 3A971823 49
+. 0F B7 46 04 8B 55 B0 8B 8A 7C 01 00 00 8B 56 08 25 FF 7F 00 00 C1 E0 04 89 54 01 04 8B 55 A0 03 17 31 FF 89 7C 01 0C 89 14 01 8B 46 10 85 C0 75 C0
+
+. 0 3A971854 5
+. E9 D4 FD FF FF
+
+. 0 3A971618 10
+. 89 45 94 8B 42 10 85 C0 75 E6
+
+. 0 3A9718D0 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 0 3A9688DD 4
+. 85 C0 74 06
+
+. 0 3A9688E7 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3A9701AE 24
+. 89 3E 8B 45 E4 89 83 E8 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9671FE 14
+. 8B BD 50 FF FF FF 85 FF 0F 85 96 0F 00 00
+
+. 0 3A96720C 20
+. 8B 8B 94 F9 FF FF 8B 91 34 01 00 00 85 D2 0F 84 F3 00 00 00
+
+. 0 3A967313 11
+. 8B 83 E4 F9 FF FF E8 12 98 00 00
+
+. 0 3A970B30 5
+. E8 F2 4D 00 00
+
+. 0 3A970B35 21
+. 81 C1 37 7B 00 00 55 8B 91 28 00 00 00 89 E5 83 7A 08 00 75 1B
+
+. 0 3A970B4A 31
+. 89 42 10 8B 81 94 F9 FF FF C7 02 01 00 00 00 89 42 04 8D 81 04 85 FF FF 89 42 08 5D 89 D0 C3
+
+. 0 3A96731E 19
+. 89 85 78 FE FF FF 8B 83 94 F9 FF FF 8B 40 6C 85 C0 74 09
+
+. 0 3A967331 19
+. 8B 95 78 FE FF FF 89 50 04 8B 83 50 FA FF FF 85 C0 74 09
+
+. 0 3A96734D 63
+. 80 BD 03 FF FF FF 00 8B B3 94 F9 FF FF 8D 86 50 01 00 00 89 83 A4 F9 FF FF 8B 96 54 01 00 00 89 F1 89 83 9C F9 FF FF 8B 86 50 01 00 00 89 93 2C FC FF FF 89 83 28 FC FF FF 0F 84 84 05 00 00
+
+. 0 3A967910 31
+. 31 C0 83 BB 7C FD FF FF 00 0F 95 C0 09 83 3C FC FF FF 89 85 74 FE FF FF 8B 46 0C 85 C0 74 5D
+
+. 0 3A96792F 9
+. 89 C6 8B 40 0C 85 C0 75 F7
+
+. 0 3A967938 2
+. EB 52
+
+. 0 3A96798C 10
+. 8B 46 14 8B 40 04 85 C0 74 B8
+
+. 0 3A967996 2
+. EB A8
+
+. 0 3A967940 14
+. C7 40 08 01 00 00 00 8B 40 04 85 C0 75 F2
+
+. 0 3A96794E 13
+. 8B BD D0 FE FF FF 83 C7 50 39 FE 74 26
+
+. 0 3A967981 11
+. 8B 76 10 85 F6 0F 84 96 07 00 00
+
+. 0 3A96795B 38
+. 8B 85 74 FE FF FF 89 44 24 0C 8B 83 3C FC FF FF 89 44 24 08 8B 86 B0 01 00 00 89 34 24 89 44 24 04 E8 2F 65 00 00
+
+. 0 3A96DEB0 26
+. 55 89 E5 8D 45 08 57 56 53 81 EC FC 00 00 00 8B 75 08 89 45 EC E8 61 7A 00 00
+
+. 0 3A96DECA 28
+. 81 C3 A2 A7 00 00 F6 86 74 01 00 00 04 C7 45 98 00 00 00 00 89 F2 0F 85 1A 04 00 00
+
+. 0 3A96DEE6 7
+. 8B 7D 14 85 FF 75 0E
+
+. 0 3A96DEED 27
+. 31 C0 83 7E 78 00 0F 94 C0 F7 D8 21 45 10 F6 83 14 FC FF FF 20 0F 85 04 0A 00 00
+
+. 0 3A96DF08 11
+. 8B 4E 70 85 C9 0F 85 3A 0A 00 00
+
+. 0 3A96DF13 16
+. 8B 46 2C 8B 40 04 89 45 F0 8B 46 74 85 C0 74 4D
+
+. 0 3A96DF23 7
+. 8B 7D 10 85 FF 74 46
+
+. 0 3A96DF2A 13
+. 8B 46 24 8B 50 04 8B 42 04 85 C0 74 13
+
+. 0 3A96DF4A 14
+. 89 72 04 8B 45 14 85 C0 0F 85 DF 0A 00 00
+
+. 0 3A96DF58 11
+. 8D 83 84 78 FF FF 89 42 08 EB 0D
+
+. 0 3A96DF70 35
+. C7 45 D4 00 00 00 00 8B 46 5C C7 45 DC 00 00 00 00 C7 45 D0 00 00 00 00 85 C0 C7 45 CC 00 00 00 00 74 0F
+
+. 0 3A96DF93 22
+. 8B 40 04 89 45 CC 8B 46 60 8B 40 04 89 45 D0 8B 46 68 85 C0 74 0A
+
+. 0 3A96DFA9 10
+. 83 78 04 11 0F 84 73 02 00 00
+
+. 0 3A96E226 13
+. 8B 7D 10 8B 46 74 85 FF 8B 48 04 75 0C
+
+. 0 3A96E23F 23
+. 89 4D D8 8B 46 20 8B 55 10 8B 40 04 89 55 E0 89 45 DC E9 5D FD FF FF
+
+. 0 3A96DFB3 51
+. C7 45 90 00 00 00 00 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F
+
+. 0 3A96E035 30
+. 8B 46 30 31 D2 8B 40 04 89 95 78 FF FF FF 89 85 7C FF FF FF 8B 86 B4 00 00 00 85 C0 74 09
+
+. 0 3A96E053 24
+. 8B 40 04 89 85 78 FF FF FF 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06
+
+. 0 3A96E06B 22
+. 8B 85 78 FF FF FF 8D 8B E4 F9 FF FF 8D 04 C2 39 CE 89 45 88 74 2F
+
+. 0 3A96E081 7
+. 8B 7D 80 85 FF 74 28
+
+. 0 3A96E088 4
+. 39 C2 73 24
+
+. 0 3A96E08C 24
+. 8D 74 26 00 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC
+
+. 0 3A96E090 20
+. 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC
+
+. 0 3A96E0A4 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00
+
+. 0 3A96E0BE 21
+. 8B 40 04 89 85 74 FF FF FF 8B 45 84 39 45 88 0F 83 5D 02 00 00
+
+. 0 3A96E0D3 110
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00
+
+. 0 3A96E141 14
+. 8B 85 6C FF FF FF 85 C0 0F 84 D1 01 00 00
+
+. 0 3A96E14F 21
+. 89 BD 68 FF FF FF 0F B6 47 0C C0 E8 04 84 C0 0F 84 DC 05 00 00
+
+. 0 3A96E164 12
+. 3B BE FC 01 00 00 0F 84 D0 0B 00 00
+
+. 0 3A96E170 25
+. 31 D2 83 BD 6C FF FF FF 07 0F 94 C2 83 BD 6C FF FF FF 05 0F 84 7B 07 00 00
+
+. 0 3A96E189 29
+. 89 96 00 02 00 00 8B BD 68 FF FF FF 89 BE FC 01 00 00 31 FF 85 C9 BE 01 00 00 00 74 0B
+
+. 0 3A96E1A6 7
+. 8B 41 04 85 C0 74 04
+
+. 0 3A96E1AD 49
+. 89 CF 31 F6 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF
+
+. 0 3A96C0E0 19
+. 55 89 E5 57 31 FF 56 53 81 EC A0 00 00 00 E8 38 98 00 00
+
+. 0 3A96C0F3 20
+. 81 C3 79 C5 00 00 89 45 A8 89 55 A4 89 4D A0 E8 E9 FB FF FF
+
+. 0 3A96BCF0 14
+. 55 89 C1 31 D2 0F B6 00 89 E5 84 C0 74 72
+
+. 0 3A96BCFE 11
+. 41 0F B6 D0 0F B6 01 84 C0 74 67
+
+. 0 3A96BD09 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 57
+
+. 0 3A96BD19 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 47
+
+. 0 3A96BD29 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 37
+
+. 0 3A96BD39 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 27
+
+. 0 3A96BD49 37
+. 8D B4 26 00 00 00 00 C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2
+
+. 0 3A96BD50 30
+. C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2
+
+. 0 3A96BD6E 6
+. 89 F6 5D 89 D0 C3
+
+. 0 3A96C107 37
+. 89 45 9C 8B 45 18 8B 75 08 FF 83 D4 F9 FF FF 85 C0 C7 45 BC 00 00 00 00 C7 45 C0 00 00 00 00 0F 85 90 02 00 00
+
+. 0 3A96C12C 11
+. 8B 45 08 8B 10 85 D2 89 F8 74 5E
+
+. 0 3A96C137 69
+. 8D 55 BC 89 55 84 8D 76 00 8B 4D 10 89 4C 24 18 8B 55 18 89 54 24 14 8B 4D 14 89 4C 24 10 8B 55 0C 89 44 24 08 89 54 24 0C 8B 06 89 44 24 04 8B 4D 84 89 0C 24 8B 45 A0 8B 55 9C 8B 08 8B 45 A8 E8 34 FC FF FF
+
+. 0 3A96BDB0 23
+. 55 89 E5 57 56 53 83 EC 3C 89 45 F0 8B 45 0C 89 55 EC E8 64 9B 00 00
+
+. 0 3A96BDC7 22
+. 81 C3 A5 C8 00 00 89 4D E8 8B 10 8B 40 04 89 55 E4 89 45 E0 EB 12
+
+. 0 3A96BDEF 48
+. C7 45 CC 00 00 00 00 8B 45 E4 8B 4D 10 C7 45 C8 00 00 00 00 8B 55 1C 8B 0C 88 8B 45 1C 89 4D DC 85 C0 0F 95 C0 39 D1 0F 94 C2 21 D0 A8 01 75 C1
+
+. 0 3A96BE1F 6
+. F6 45 20 02 74 0B
+
+. 0 3A96BE30 13
+. F6 83 14 FC FF FF 08 0F 85 FB 01 00 00
+
+. 0 3A96BE3D 56
+. 8B 55 DC 8B 42 30 8B 8A 8C 01 00 00 8B 40 04 89 45 D8 8B 42 2C 8B 40 04 89 4D D0 89 D1 31 D2 89 45 D4 8B 45 EC F7 B1 64 01 00 00 8B 81 68 01 00 00 8B 34 90 85 F6 75 1F
+
+. 0 3A96BE94 17
+. 8B 7D D8 89 F0 C1 E0 04 01 C7 8B 47 04 85 C0 74 DB
+
+. 0 3A96BE80 20
+. 8B 55 DC 8B 82 6C 01 00 00 8B 34 B0 85 F6 0F 84 DC 00 00 00
+
+. 0 3A96BF70 9
+. 83 7D CC 01 8B 7D C8 74 02
+
+. 0 3A96BF79 6
+. 31 FF 85 FF 74 18
+
+. 0 3A96BF97 23
+. 8B 55 14 85 F6 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 84 32 FE FF FF
+
+. 0 3A96BFAE 14
+. 8B 55 14 8B 42 0C 85 C0 0F 84 24 FE FF FF
+
+. 0 3A96BDE0 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 83 ED 01 00 00
+
+. 0 3A96BEA5 7
+. 66 83 7F 0E 00 75 06
+
+. 0 3A96BEB2 12
+. 0F B6 47 0C 83 E0 0F 83 F8 02 7F C2
+
+. 0 3A96BEBE 5
+. 3B 7D E8 74 1A
+
+. 0 3A96BEC3 22
+. 8B 0F 8B 45 D4 8B 55 F0 01 C8 89 54 24 04 89 04 24 E8 A7 93 00 00
+
+. 0 3A96BED9 4
+. 85 C0 75 A3
+
+. 0 3A96BE75 5
+. E9 FF 00 00 00
+
+. 0 3A96BEDD 11
+. 8B 55 14 85 D2 0F 84 81 01 00 00
+
+. 0 3A96BEE8 11
+. 8B 45 D0 85 C0 0F 84 8C 00 00 00
+
+. 0 3A96BEF3 47
+. 8B 4D D0 8B 45 DC 0F B7 0C 71 8B 90 7C 01 00 00 81 E1 FF 7F 00 00 C1 E1 04 89 4D C4 8B 4D 14 8B 41 04 8B 4D C4 39 44 0A 04 0F 84 C6 00 00 00
+
+. 0 3A96BFE8 23
+. 8B 4D 14 8B 01 89 44 24 04 8B 4D C4 8B 04 0A 89 04 24 E8 81 92 00 00
+
+. 0 3A96BFFF 8
+. 85 C0 0F 85 1B FF FF FF
+
+. 0 3A96C007 5
+. E9 73 FF FF FF
+
+. 0 3A96BF7F 19
+. 0F B6 47 0C C0 E8 04 0F B6 C0 83 F8 01 0F 84 8C 00 00 00
+
+. 0 3A96C01E 26
+. 8B 55 08 8B 4D DC 89 3A 89 4A 04 BA 01 00 00 00 83 C4 3C 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96C17C 6
+. 85 C0 89 C2 7F 13
+
+. 0 3A96C195 11
+. 8B 75 BC 85 F6 0F 84 3E 02 00 00
+
+. 0 3A96C1A0 16
+. C7 45 90 00 00 00 00 8B 4D A0 8B 11 85 D2 74 0C
+
+. 0 3A96C1B0 12
+. 0F B6 42 0D 83 E0 03 83 F8 03 74 48
+
+. 0 3A96C1BC 29
+. 8D 55 BC 89 55 84 8B 4D 84 8B 71 04 0F B6 86 74 01 00 00 24 03 3C 02 0F 84 2E 04 00 00
+
+. 0 3A96C1D9 17
+. 8B 83 14 FC FF FF A9 04 02 00 00 0F 85 28 02 00 00
+
+. 0 3A96C1EA 26
+. 8B 45 BC 8B 4D A0 8B 55 84 89 01 8B 42 04 8B 00 8D 65 F4 5B 5E 5F 5D C2 14 00
+
+. 0 3A96E1DE 33
+. 89 85 64 FF FF FF 8B 55 A8 83 EC 14 8B 75 08 89 D7 89 96 08 02 00 00 89 86 04 02 00 00 85 FF 74 09
+
+. 0 3A96E1FF 22
+. 8B 57 04 01 95 64 FF FF FF 83 BD 6C FF FF FF 07 0F 87 56 08 00 00
+
+. 0 3A96E215 17
+. 8B 8D 6C FF FF FF 8B 84 8B B0 D7 FF FF 01 D8 FF E0
+
+. 0 3A96EADD 19
+. 8B 95 70 FF FF FF 8B 8D 64 FF FF FF 01 0A E9 30 F8 FF FF
+
+. 0 3A96E320 16
+. 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF
+
+. 0 3A96E0E0 97
+. 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00
+
+. 0 3A96ED40 31
+. 31 C0 83 BD 6C FF FF FF 07 8B 96 00 02 00 00 0F 94 C0 83 BD 6C FF FF FF 05 0F 84 5F 01 00 00
+
+. 0 3A96ED5F 8
+. 39 D0 0F 85 09 F4 FF FF
+
+. 0 3A96ED67 26
+. FF 83 D8 F9 FF FF 8B BE 08 02 00 00 8B 86 04 02 00 00 89 7D A8 E9 C1 F9 FF FF
+
+. 0 3A96E742 11
+. 89 85 64 FF FF FF E9 AE FA FF FF
+
+. 0 3A96E1FB 4
+. 85 FF 74 09
+
+. 0 3A96E308 40
+. 8B 85 64 FF FF FF 8B BD 70 FF FF FF 89 07 8D 76 00 8D BC 27 00 00 00 00 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF
+
+. 0 3A96BF92 5
+. 83 F8 02 74 79
+
+. 0 3A96C010 14
+. 8B 83 48 FC FF FF 85 C0 0F 85 A2 00 00 00
+
+. 0 3A96BFBC 8
+. 8B 55 DC E8 BC 4B 00 00
+
+. 0 3A96BFC4 9
+. 85 C0 BA FF FF FF FF 75 61
+
+. 0 3A96BFCD 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 82 13 FE FF FF
+
+. 0 3A96E1B1 45
+. 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF
+
+. 0 3A96BFDC 12
+. 31 D2 83 C4 3C 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96C182 8
+. 85 C0 0F 88 46 01 00 00
+
+. 0 3A96C18A 11
+. 83 C6 04 31 C0 8B 16 85 D2 75 AB
+
+. 0 3A96C3DE 9
+. 8B 55 A0 8B 02 85 C0 74 0B
+
+. 0 3A96C3E7 11
+. 0F B6 40 0C C0 E8 04 3C 02 74 0B
+
+. 0 3A96C3FD 21
+. 8B 55 A0 C7 02 00 00 00 00 31 C0 8D 65 F4 5B 5E 5F 5D C2 14 00
+
+. 0 3A96E208 13
+. 83 BD 6C FF FF FF 07 0F 87 56 08 00 00
+
+. 0 3A96BD70 4
+. 5D 89 D0 C3
+
+. 0 3A96C069 11
+. 8B 45 D0 85 C0 0F 84 0B FF FF FF
+
+. 0 3A96C074 6
+. F6 45 18 02 74 33
+
+. 0 3A96C0AD 19
+. 8B 45 D0 0F B7 14 70 89 D0 25 FF 7F 00 00 83 F8 02 EB C9
+
+. 0 3A96C089 6
+. 0F 8E F0 FE FF FF
+
+. 0 3A96E330 13
+. FF 45 90 83 7D 90 01 0F 8E 7D FC FF FF
+
+. 0 3A96DFBA 44
+. 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F
+
+. 0 3A96DFE6 7
+. 8B 55 84 39 D1 72 28
+
+. 0 3A96E015 19
+. 8B 7D 88 8B 4D 80 8B 07 0F B6 57 04 01 C1 83 FA 07 74 CA
+
+. 0 3A96DFF2 14
+. 8B 96 F4 01 00 00 85 D2 0F 85 CE 08 00 00
+
+. 0 3A96E000 21
+. 8B 45 80 01 01 83 45 88 08 8B 55 84 39 55 88 0F 83 1B 03 00 00
+
+. 0 3A96E33D 35
+. C7 45 B4 00 00 00 00 8B 46 34 C7 45 BC 00 00 00 00 C7 45 B0 00 00 00 00 85 C0 C7 45 AC 00 00 00 00 74 0F
+
+. 0 3A96E36F 7
+. 8B 46 68 85 C0 74 0A
+
+. 0 3A96E376 10
+. 83 78 04 07 0F 84 71 02 00 00
+
+. 0 3A96E380 91
+. 31 C0 8D BB E4 F9 FF FF 89 85 54 FF FF FF 89 BD 14 FF FF FF 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03
+
+. 0 3A96E3DE 22
+. 8B 85 4C FF FF FF 89 D1 BA AB AA AA AA F7 E2 C1 EA 03 39 FA 76 02
+
+. 0 3A96E3F6 20
+. 3B B5 14 FF FF FF 8D 04 52 8D 04 81 89 85 48 FF FF FF 74 48
+
+. 0 3A96E40A 10
+. 8B 85 40 FF FF FF 85 C0 75 0A
+
+. 0 3A96E41E 8
+. 3B 8D 48 FF FF FF 73 2C
+
+. 0 3A96E452 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 F0 02 00 00
+
+. 0 3A96E460 27
+. 8B 95 44 FF FF FF 8B 40 04 39 95 48 FF FF FF 89 85 38 FF FF FF 0F 83 35 FE FF FF
+
+. 0 3A96E2B0 19
+. FF 85 54 FF FF FF 83 BD 54 FF FF FF 01 0F 8E D1 00 00 00
+
+. 0 3A96E394 71
+. 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03
+
+. 0 3A96E2C3 11
+. 8B 45 14 85 C0 0F 85 E9 0A 00 00
+
+. 0 3A96E2CE 18
+. 80 8E 74 01 00 00 04 8B 4D 98 85 C9 0F 85 A1 0A 00 00
+
+. 0 3A96E2E0 13
+. 8B 45 08 8B 90 10 02 00 00 85 D2 74 13
+
+. 0 3A96E300 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96E05C 15
+. 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06
+
+. 0 3A96E0B0 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00
+
+. 0 3A96E414 10
+. 8B 86 14 01 00 00 85 C0 75 34
+
+. 0 3A968122 5
+. E8 09 A6 00 00
+
+. 0 3A972730 5
+. 55 89 E5 5D C3
+
+. 0 3A968127 10
+. 8B 83 D0 F9 FF FF 85 C0 75 62
+
+. 0 3A968131 13
+. 83 BB 54 FB FF FF 01 0F 86 86 F2 FF FF
+
+. 0 3A96813E 36
+. 31 C0 89 44 24 0C 31 C0 89 44 24 08 8B 83 94 F9 FF FF 8B 80 B0 01 00 00 89 3C 24 89 44 24 04 E8 4E 5D 00 00
+
+. 0 3A96E233 12
+. 8B 55 D0 8B 45 CC 01 D0 39 C8 74 17
+
+. 0 3A96E256 16
+. 8B 46 20 8B 48 04 01 CA 89 55 D0 E9 4D FD FF FF
+
+. 0 3A96DFED 5
+. E9 3E 03 00 00
+
+. 0 3A968162 5
+. E9 5D F2 FF FF
+
+. 0 3A9673C4 18
+. 8B 95 78 FE FF FF C7 42 0C 01 00 00 00 E8 9A 97 00 00
+
+. 0 3A970B70 5
+. 55 89 E5 5D C3
+
+. 0 3A9673D6 5
+. E8 F5 42 00 00
+
+. 0 3A96B6D0 12
+. 55 89 E5 53 83 EC 08 E8 4F A2 00 00
+
+. 0 3A96B6DC 29
+. 81 C3 90 CF 00 00 8B 8B DC 00 00 00 85 C9 0F 95 C0 83 F9 FF 0F 95 C2 21 D0 A8 01 75 07
+
+. 0 3A96B700 18
+. 89 0C 24 8B 83 E4 00 00 00 89 44 24 04 E8 7E 95 00 00
+
+. 0 3A974C90 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80
+
+. 0 3A974CA1 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974CAA 1
+. C3
+
+. 0 3A96B712 14
+. 31 C0 89 83 DC 00 00 00 83 C4 08 5B 5D C3
+
+. 0 3A9673DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A9730A0 17
+. 8B 85 20 FE FF FF 81 C4 04 02 00 00 5B 5E 5F 5D C3
+
+. 0 3A965ECA 11
+. 80 BB 14 FC FF FF 00 89 C7 78 5B
+
+. 0 3A965ED5 10
+. 83 C4 54 89 F8 5B 5E 5F 5D C3
+
+. 0 3A965887 7
+. 89 C7 E8 E2 FF FF FF
+
+. 0 3A965870 4
+. 8B 1C 24 C3
+
+. 0 3A96588E 39
+. 81 C3 DE 2D 01 00 8B 83 08 02 00 00 5A 8D 24 84 29 C2 52 8B 83 94 F9 FF FF 8D 74 94 08 8D 4C 24 04 56 E8 CB AD 00 00
+
+. 0 3A970680 22
+. 55 89 E5 57 89 C7 56 53 83 EC 2C 8B 80 98 00 00 00 E8 95 52 00 00
+
+. 0 3A970696 35
+. 81 C3 D6 7F 00 00 89 55 F0 8B B7 9C 00 00 00 89 45 E8 8B 83 CC F9 FF FF 89 4D EC 85 C0 0F 85 91 00 00 00
+
+. 0 3A97074A 14
+. 8B 55 08 89 14 24 8B 55 F0 E8 08 FE FF FF
+
+. 0 3A970560 29
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 89 C6 89 7D FC 0F B6 80 74 01 00 00 E8 AE 53 00 00
+
+. 0 3A97057D 16
+. 81 C3 EF 80 00 00 89 55 F0 89 4D EC A8 08 75 33
+
+. 0 3A97058D 25
+. 88 C2 8B 46 04 80 CA 08 88 96 74 01 00 00 0F B6 08 84 C9 0F 84 98 00 00 00
+
+. 0 3A9705A6 7
+. 8B 56 48 85 D2 75 23
+
+. 0 3A9705D0 9
+. F6 83 14 FC FF FF 02 75 7C
+
+. 0 3A9705D9 4
+. 85 D2 75 42
+
+. 0 3A97061F 29
+. 8B 42 04 8B 55 08 8B 3E 89 54 24 08 8B 55 EC 01 F8 89 54 24 04 8B 55 F0 89 14 24 FF D0
+
+. 0 3A97C92C 11
+. 55 89 E5 83 EC 08 E8 89 00 00 00
+
+. 0 3A97C9C0 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 17 14 00 00 52 8B 83 2C 00 00 00 85 C0 74 02
+
+. 0 3A97C9DD 4
+. 58 5B C9 C3
+
+. 0 3A97C937 5
+. E8 24 01 00 00
+
+. 0 3A97CA60 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 77 13 00 00 50 8B 83 FC FF FF FF 85 C0 74 0A
+
+. 0 3A97CA85 5
+. 8B 5D FC C9 C3
+
+. 0 3A97C93C 5
+. E8 5F 03 00 00
+
+. 0 3A97CCA0 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 36 11 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0C
+
+. 0 3A97CCCE 4
+. 5B 5E 5D C3
+
+. 0 3A97C941 2
+. C9 C3
+
+. 0 3A97063C 2
+. EB 9F
+
+. 0 3A9705DD 7
+. 8B 56 7C 85 D2 74 DC
+
+. 0 3A9705C0 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A970758 19
+. 31 C0 89 83 CC F9 FF FF 8B 45 E8 85 C0 0F 84 59 FF FF FF
+
+. 0 3A9706C4 7
+. 31 C0 E8 65 04 00 00
+
+. 0 3A970B65 4
+. 5D 89 D0 C3
+
+. 0 3A9706CB 15
+. 89 45 E4 C7 40 0C 01 00 00 00 E8 96 04 00 00
+
+. 0 3A9706DA 13
+. 8B B7 54 01 00 00 89 F0 4E 85 C0 75 21
+
+. 0 3A970708 26
+. 8B 87 D4 01 00 00 8B 55 08 8B 04 B0 89 14 24 8B 4D EC 8B 55 F0 E8 3E FE FF FF
+
+. 0 3A9705AD 7
+. 8B 7E 7C 85 FF 75 1C
+
+. 0 3A9705B4 25
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A970722 7
+. 89 F0 4E 85 C0 75 DF
+
+. 0 3A9D67C0 16
+. 55 31 C0 89 E5 83 EC 18 89 5D F4 E8 C1 FF FF FF
+
+. 0 3A9D6791 4
+. 8B 1C 24 C3
+
+. 0 3A9D67D0 28
+. 81 C3 40 C8 0F 00 89 75 F8 8B 93 A4 00 00 00 8B 75 10 89 7D FC 85 D2 8B 7D 0C 74 14
+
+. 0 3A9D67EC 6
+. 8B 12 85 D2 75 0E
+
+. 0 3A9D6800 10
+. 89 83 10 90 FF FF 85 C0 75 26
+
+. 0 3A9D680A 21
+. 8B 83 80 01 00 00 0F B7 10 8B 83 68 01 00 00 66 3B 50 38 74 11
+
+. 0 3A9D6830 42
+. 89 BB D8 28 00 00 8B 45 08 89 83 D4 28 00 00 8B 83 3C 01 00 00 89 30 89 74 24 08 89 7C 24 04 8B 45 08 89 04 24 E8 A6 45 0A 00
+
+. 0 3AA7AE00 20
+. 55 89 E5 83 EC 14 89 7D FC 8B 7D 0C 89 5D F4 E8 7D B9 F5 FF
+
+. 0 3AA7AE14 13
+. 81 C3 FC 81 05 00 89 75 F8 85 FF 74 30
+
+. 0 3AA7AE21 6
+. 8B 37 85 F6 74 2A
+
+. 0 3AA7AE27 17
+. 89 34 24 B8 2F 00 00 00 89 44 24 04 E8 A8 B7 F5 FF
+
+. 0 3A9D65E0 6
+. FF A3 40 00 00 00
+
+. 0 3A9D65E6 10
+. 68 68 00 00 00 E9 10 FF FF FF
+
+. 0 3A9D6500 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A96FEF0 16
+. 50 51 52 8B 54 24 10 8B 44 24 0C E8 60 01 00 00
+
+. 0 3A970060 22
+. 55 89 E5 57 56 89 C6 53 83 EC 24 8B 48 30 8B 40 2C E8 B5 58 00 00
+
+. 0 3A970076 59
+. 81 C3 F6 85 00 00 8B 40 04 89 45 EC 8B 46 74 8B 78 04 01 FA 8B 7A 04 8B 12 C1 EF 08 89 F8 C1 E0 04 03 41 04 8B 0E 01 CA 89 55 E8 89 45 E4 F6 40 0D 03 89 45 F0 0F 85 97 00 00 00
+
+. 0 3A9700B1 12
+. 8B 86 DC 00 00 00 31 D2 85 C0 74 25
+
+. 0 3A9700BD 95
+. 8B 40 04 0F B7 14 78 8B 86 7C 01 00 00 81 E2 FF 7F 00 00 C1 E2 04 01 C2 31 C0 83 7A 04 00 0F 95 C0 F7 D8 21 C2 8B 45 E4 BF 01 00 00 00 8D 4D F0 8B 00 01 45 EC 31 C0 89 44 24 10 B8 01 00 00 00 89 54 24 04 89 F2 89 44 24 0C 89 7C 24 08 8B 86 B0 01 00 00 89 04 24 8B 45 EC E8 C4 BF FF FF
+
+. 0 3A97011C 14
+. 8B 55 F0 83 EC 14 89 C1 31 C0 85 D2 74 07
+
+. 0 3A97012A 17
+. 89 C8 8B 4A 04 01 C8 8B B3 44 FC FF FF 85 F6 75 05
+
+. 0 3A97013B 13
+. 8B 55 E8 89 02 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96FF00 8
+. 5A 59 87 04 24 C2 08 00
+
+. 0 3AA28860 34
+. 57 56 31 C0 8B 74 24 0C 8B 4C 24 10 88 CD 89 CA C1 E1 10 66 89 D1 F7 C6 03 00 00 00 0F 84 81 00 00 00
+
+. 0 3AA28903 15
+. 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA28912 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 CB 00 00 00
+
+. 0 3AA28921 11
+. 31 CA BF FF FE FE FE 01 D7 73 9E
+
+. 0 3AA2892C 11
+. 31 D7 81 CF FF FE FE FE 47 75 A9
+
+. 0 3AA288E0 17
+. 83 EE 04 83 EE 04 83 EE 04 F7 C2 00 00 FF 00 75 05
+
+. 0 3AA288F6 7
+. 8D 46 0C 84 F6 75 03
+
+. 0 3AA288FD 21
+. 8D 46 0D 83 C6 10 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA288F1 5
+. 8D 46 0E EB 0A
+
+. 0 3AA28900 18
+. 83 C6 10 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA28937 16
+. 8B 56 04 BF FF FE FE FE 01 D7 0F 83 A2 00 00 00
+
+. 0 3AA28947 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 93 00 00 00
+
+. 0 3AA28956 15
+. 31 CA BF FF FE FE FE 01 D7 0F 83 68 FF FF FF
+
+. 0 3AA28965 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 6F FF FF FF
+
+. 0 3AA288E3 14
+. 83 EE 04 83 EE 04 F7 C2 00 00 FF 00 75 05
+
+. 0 3AA289E9 7
+. 83 C6 04 38 CA 75 02
+
+. 0 3AA289F2 4
+. 84 D2 74 20
+
+. 0 3AA289F6 4
+. 38 CE 75 03
+
+. 0 3AA289FD 4
+. 84 F6 74 15
+
+. 0 3AA28A16 3
+. 5E 5F C3
+
+. 0 3AA7AE38 4
+. 85 C0 74 22
+
+. 0 3AA7AE3C 34
+. 8D 50 01 8B 83 98 01 00 00 89 10 8B 17 8B 83 70 02 00 00 89 10 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9D685A 5
+. E8 C1 FE FF FF
+
+. 0 3A9D6720 10
+. 55 89 E5 56 53 E8 67 00 00 00
+
+. 0 3A9D672A 18
+. 81 C3 E6 C8 0F 00 8D B3 94 FF FF FF 8B 06 85 C0 75 04
+
+. 0 3A9D6740 5
+. 83 C6 04 FF D0
+
+. 0 3AA217B0 14
+. 55 89 E5 83 EC 08 89 1C 24 E8 D3 4F FB FF
+
+. 0 3AA217BE 20
+. 81 C3 52 18 0B 00 89 74 24 04 8B 83 38 02 00 00 85 C0 75 4E
+
+. 0 3AA21820 11
+. 8B 1C 24 8B 74 24 04 89 EC 5D C3
+
+. 0 3A9D6745 6
+. 8B 06 85 C0 75 F5
+
+. 0 3A9D674B 6
+. 5B 5E 5D 89 F6 C3
+
+. 0 3A9D685F 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9A148C 11
+. 55 89 E5 83 EC 08 E8 D9 00 00 00
+
+. 0 3A9A1570 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 CB E2 01 00 52 8B 83 50 00 00 00 85 C0 74 02
+
+. 0 3A9A158D 4
+. 58 5B C9 C3
+
+. 0 3A9A1497 5
+. E8 74 01 00 00
+
+. 0 3A9A1610 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 2B E2 01 00 50 8B 83 FC FF FF FF 85 C0 74 0A
+
+. 0 3A9A1635 5
+. 8B 5D FC C9 C3
+
+. 0 3A9A149C 5
+. E8 7F 8B 01 00
+
+. 0 3A9BA020 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 1A 58 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0C
+
+. 0 3A9BA04E 4
+. 5B 5E 5D C3
+
+. 0 3A9A14A1 2
+. C9 C3
+
+. 0 3A97063E 9
+. F6 C2 03 0F 84 79 FF FF FF
+
+. 0 3A970729 15
+. 8B 45 E4 C7 40 0C 00 00 00 00 E8 38 04 00 00
+
+. 0 3A970738 18
+. 31 C0 89 83 34 00 00 00 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A9658B5 8
+. 8D 93 94 81 FF FF FF E7
+
+. 0 8048430 33
+. 31 ED 5E 89 E1 83 E4 F0 50 54 52 68 00 37 06 08 68 70 37 06 08 51 56 68 F5 2D 06 08 E8 AF FF FF FF
+
+. 0 8048400 6
+. FF 25 10 A1 06 08
+
+. 0 8048406 10
+. 68 20 00 00 00 E9 A0 FF FF FF
+
+. 0 80483B0 12
+. FF 35 F8 A0 06 08 FF 25 FC A0 06 08
+
+. 0 3A9D6870 22
+. 55 31 C9 89 E5 57 56 53 83 EC 0C 8B 45 0C 8B 7D 10 E8 0B FF FF FF
+
+. 0 3A9D6886 23
+. 81 C3 8A C7 0F 00 8B 75 1C 8D 54 87 04 8B 83 A4 00 00 00 85 C0 74 13
+
+. 0 3A9D689D 6
+. 8B 00 85 C0 75 0D
+
+. 0 3A9D68A3 33
+. B9 01 00 00 00 90 8D B4 26 00 00 00 00 8B 83 B4 00 00 00 85 F6 89 08 8B 83 3C 01 00 00 89 10 74 14
+
+. 0 3A9D68C4 20
+. 89 34 24 31 C0 89 44 24 08 31 C0 89 44 24 04 E8 88 55 01 00
+
+. 0 3A9EBE60 9
+. 55 89 E5 53 E8 28 A9 FE FF
+
+. 0 3A9EBE69 11
+. 81 C3 A7 71 0E 00 E8 FC FE FF FF
+
+. 0 3A9EBD70 16
+. 55 89 E5 57 31 FF 56 53 83 EC 04 E8 11 AA FE FF
+
+. 0 3A9EBD80 20
+. 81 C3 90 72 0E 00 8B 93 3C 2B 00 00 85 D2 0F 85 A0 00 00 00
+
+. 0 3A9EBD94 10
+. 8B B3 BC 9E FF FF 85 F6 74 2B
+
+. 0 3A9EBD9E 11
+. 89 F6 8B 56 04 31 FF 39 D7 73 15
+
+. 0 3A9EBDBE 5
+. 83 FA 1F 76 3A
+
+. 0 3A9EBDFD 12
+. 8D 42 01 89 D7 89 46 04 85 F6 74 C0
+
+. 0 3A9EBE09 24
+. 89 F8 B9 01 00 00 00 C1 E0 04 89 4C 06 08 8B 93 44 2B 00 00 85 D2 75 23
+
+. 0 3A9EBE21 6
+. 31 C0 85 F6 74 07
+
+. 0 3A9EBE27 13
+. C1 E7 04 8D 44 37 08 5A 5B 5E 5F 5D C3
+
+. 0 3A9EBE74 11
+. 89 C2 85 D2 B8 FF FF FF FF 74 1A
+
+. 0 3A9EBE7F 29
+. C7 02 04 00 00 00 8B 45 08 89 42 04 8B 45 0C 89 42 08 8B 45 10 89 42 0C 31 C0 5B 5D C3
+
+. 0 3A9D68D8 7
+. 8B 75 18 85 F6 74 17
+
+. 0 3A9D68DF 23
+. 31 D2 31 C9 89 4C 24 08 89 54 24 04 8B 55 18 89 14 24 E8 6A 55 01 00
+
+. 0 3A9EBDA9 13
+. 8D 46 08 8D 74 26 00 8B 08 85 C9 74 3E
+
+. 0 3A9EBDB6 8
+. 47 83 C0 10 39 D7 72 F2
+
+. 0 3A9D68F6 11
+. 8B B3 68 01 00 00 F6 06 02 75 32
+
+. 0 3A9D6901 7
+. 8B 45 14 85 C0 74 03
+
+. 0 3A9D6908 3
+. FF 55 14
+
+. 0 8063770 19
+. 55 89 E5 83 EC 18 89 5D F4 89 75 F8 31 F6 E8 45 00 00 00
+
+. 0 80637C8 4
+. 8B 1C 24 C3
+
+. 0 8063783 14
+. 81 C3 71 69 00 00 89 7D FC E8 07 4C FE FF
+
+. 0 8048398 11
+. 55 89 E5 83 EC 08 E8 B1 00 00 00
+
+. 0 8048454 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 97 1C 02 00 52 8B 83 28 00 00 00 85 C0 74 02
+
+. 0 8048471 4
+. 58 5B C9 C3
+
+. 0 80483A3 5
+. E8 18 01 00 00
+
+. 0 80484C0 15
+. 55 89 E5 51 51 8B 15 F0 A0 06 08 85 D2 74 19
+
+. 0 80484E8 2
+. C9 C3
+
+. 0 80483A8 5
+. E8 23 B4 01 00
+
+. 0 80637D0 20
+. 55 89 E5 53 52 BB E0 A0 06 08 A1 E0 A0 06 08 83 F8 FF 74 0C
+
+. 0 80637F0 4
+. 58 5B 5D C3
+
+. 0 80483AD 2
+. C9 C3
+
+. 0 8063791 21
+. 8D 93 0C FF FF FF 8D 83 0C FF FF FF 29 C2 C1 FA 02 39 D6 73 15
+
+. 0 80637BB 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9D690B 7
+. F6 06 02 89 F6 75 38
+
+. 0 3A9D6912 25
+. 8B 93 3C 01 00 00 8B 02 89 7C 24 04 89 44 24 08 8B 45 0C 89 04 24 FF 55 08
+
+. 0 8062DF5 31
+. 55 89 E5 83 EC 08 83 E4 F0 B8 00 00 00 00 29 C4 83 EC 08 68 EC 84 04 08 6A 04 E8 BC 55 FE FF
+
+. 0 80483D0 6
+. FF 25 04 A1 06 08
+
+. 0 80483D6 10
+. 68 08 00 00 00 E9 D0 FF FF FF
+
+. 0 3A9E94D0 23
+. 55 89 E5 57 56 53 81 EC 2C 01 00 00 8B 4D 0C 8B 7D 08 E8 AA D2 FE FF
+
+. 0 3A9E94E7 23
+. 81 C3 29 9B 0E 00 83 F9 FF 0F 94 C0 85 FF 0F 9E C2 09 D0 A8 01 75 05
+
+. 0 3A9E94FE 5
+. 83 FF 40 7E 1D
+
+. 0 3A9E9520 19
+. 89 8D 64 FF FF FF 8D B5 68 FF FF FF B8 1F 00 00 00 EB 0D
+
+. 0 3A9E9540 10
+. C7 04 86 00 00 00 00 48 79 F6
+
+. 0 3A9E954A 75
+. 8D 57 FF B8 01 00 00 00 89 D1 83 E1 1F C1 EA 05 D3 E0 09 04 96 85 84 93 90 29 00 00 0F 95 C0 0F B6 C0 83 F8 01 19 C0 25 00 00 00 10 89 45 E8 8D 85 D4 FE FF FF 89 44 24 08 8D 85 64 FF FF FF 89 44 24 04 89 3C 24 E8 1B 02 00 00
+
+. 0 3A9E97B0 14
+. 55 89 E5 83 EC 14 89 5D F8 E8 D3 CF FE FF
+
+. 0 3A9E97BE 28
+. 81 C3 52 98 0E 00 8B 4D 0C 8B 55 10 89 75 FC 8B 83 6C 2B 00 00 8B 75 08 85 C0 75 1A
+
+. 0 3A9E97DA 16
+. 89 54 24 08 89 4C 24 04 89 34 24 E8 86 FE FF FF
+
+. 0 3A9E9670 22
+. 55 31 C9 89 E5 57 56 53 81 EC 28 01 00 00 8B 55 0C E8 0B D1 FE FF
+
+. 0 3A9E9686 10
+. 81 C3 8A 99 0E 00 85 D2 74 66
+
+. 0 3A9E9690 49
+. FC 8B 02 B9 20 00 00 00 8D BD 70 FF FF FF 8D 72 04 89 85 64 FF FF FF 8B 82 84 00 00 00 89 85 D0 FE FF FF 89 85 68 FF FF FF 8B 83 68 01 00 00 F3 A5
+
+. 0 3A9E96BF 2
+. F3 A5
+
+. 0 3A9E96C1 10
+. 8B B8 80 01 00 00 85 FF 75 25
+
+. 0 3A9E96CB 25
+. 81 8D 68 FF FF FF 00 00 00 04 8D 83 50 66 F1 FF F6 85 D0 FE FF FF 04 75 06
+
+. 0 3A9E96E4 27
+. 8D 83 58 66 F1 FF 89 85 6C FF FF FF 8D 8D 64 FF FF FF 8B 75 10 31 D2 85 F6 74 06
+
+. 0 3A9E96FF 23
+. 8D 95 D4 FE FF FF BE 08 00 00 00 8B 7D 08 87 DF B8 AE 00 00 00 CD 80
+
+. 0 3A9E9716 15
+. 87 FB 89 85 CC FE FF FF 3D 00 F0 FF FF 77 63
+
+. 0 3A9E9725 24
+. 31 D2 8B 85 CC FE FF FF 83 7D 10 00 F7 D0 0F 95 C2 C1 E8 1F 85 D0 74 3A
+
+. 0 3A9E973D 31
+. FC 8B 85 D4 FE FF FF B9 20 00 00 00 8B 7D 10 8D B5 E0 FE FF FF 89 07 8B 7D 10 83 C7 04 F3 A5
+
+. 0 3A9E975A 2
+. F3 A5
+
+. 0 3A9E975C 44
+. 8B 85 D8 FE FF FF 8B 55 10 89 82 84 00 00 00 8B 85 DC FE FF FF 89 82 88 00 00 00 8B 85 CC FE FF FF 81 C4 28 01 00 00 5B 5E 5F 5D C3
+
+. 0 3A9E97EA 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3A9E9595 13
+. 85 C0 BA FF FF FF FF 0F 88 71 FF FF FF
+
+. 0 3A9E95A2 19
+. 8B 95 D4 FE FF FF 81 C4 2C 01 00 00 89 D0 5B 5E 5F 5D C3
+
+. 0 8062E14 8
+. 83 C4 10 E8 15 58 FE FF
+
+. 0 8048631 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 8E FD FF FF
+
+. 0 80483F0 6
+. FF 25 0C A1 06 08
+
+. 0 80483F6 10
+. 68 18 00 00 00 E9 B0 FF FF FF
+
+. 0 3A9E92F0 35
+. 8B 44 24 04 89 58 00 89 70 04 89 78 08 8D 4C 24 04 89 48 10 8B 4C 24 00 89 48 14 89 68 0C E8 7B 5C 0C 00
+
+. 0 3AAAEF8E 4
+. 8B 0C 24 C3
+
+. 0 3A9E9313 14
+. 81 C1 FD 9C 0E 00 8D 89 20 63 F1 FF FF E1
+
+. 0 3A9E9330 22
+. 55 89 E5 83 EC 18 89 5D F4 8B 55 0C 89 75 F8 31 F6 E8 4B D4 FE FF
+
+. 0 3A9E9346 16
+. 81 C3 CA 9C 0E 00 89 7D FC 85 D2 8B 7D 08 75 12
+
+. 0 3A9E9368 25
+. C7 04 24 00 00 00 00 8D 47 1C 89 44 24 08 31 C0 89 44 24 04 E8 8F 04 00 00
+
+. 0 3A9E9810 36
+. 55 89 E5 83 EC 08 89 34 24 BE 08 00 00 00 89 7C 24 04 8B 7D 08 8B 4D 0C 8B 55 10 87 DF B8 AF 00 00 00 CD 80
+
+. 0 3A9E9834 11
+. 87 FB 3D 00 F0 FF FF 89 C6 77 0D
+
+. 0 3A9E983F 13
+. 89 F0 8B 7C 24 04 8B 34 24 89 EC 5D C3
+
+. 0 3A9E9381 4
+. 85 C0 75 D1
+
+. 0 3A9E9385 23
+. BE 01 00 00 00 89 77 18 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 8048662 7
+. 83 C4 10 85 C0 75 77
+
+. 0 8048669 38
+. 9B DD B5 68 FF FF FF D9 45 E0 D9 E1 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D8 E8 72 FE FF FF
+
+. 0 8048501 33
+. 55 89 E5 83 EC 28 C7 45 FC 00 00 00 00 D9 45 08 D9 45 0C D9 C9 DA E9 DF E0 80 E4 45 80 FC 40 74 54
+
+. 0 8048576 12
+. C7 45 FC 01 00 00 00 8B 45 FC C9 C3
+
+. 0 804868F 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048696 13
+. 83 EC 0C 68 28 38 06 08 E8 6D FD FF FF
+
+. 0 8048410 6
+. FF 25 14 A1 06 08
+
+. 0 8048416 10
+. 68 28 00 00 00 E9 90 FF FF FF
+
+. 0 3AA044D0 22
+. 55 89 E5 8D 45 0C 53 83 EC 0C 89 44 24 08 8B 45 08 E8 AB 22 FD FF
+
+. 0 3AA044E6 26
+. 81 C3 2A EB 0C 00 89 44 24 04 8B 83 B0 00 00 00 8B 00 89 04 24 E8 60 73 FF FF
+
+. 0 3A9FB860 22
+. 55 31 C9 89 E5 57 56 53 81 EC F0 05 00 00 8B 7D 0C E8 1B AF FD FF
+
+. 0 3A9FB876 17
+. 81 C3 9A 77 0D 00 89 8D 58 FB FF FF E8 A9 B2 FD FF
+
+. 0 3A9D6B30 12
+. 55 89 E5 53 83 EC 04 E8 55 FC FF FF
+
+. 0 3A9D6B3C 16
+. 81 C3 D4 C4 0F 00 8B 8B 14 1A 00 00 85 C9 75 0A
+
+. 0 3A9D6B4C 10
+. 8B 83 A4 01 00 00 5A 5B 5D C3
+
+. 0 3A9FB887 17
+. 8B 00 89 85 38 FB FF FF 8B 45 08 80 78 46 00 75 2B
+
+. 0 3A9FB898 14
+. 8B 93 38 02 00 00 85 D2 0F 84 F7 2A 00 00
+
+. 0 3A9FB8A6 9
+. 89 C2 8B 40 5C 85 C0 75 0C
+
+. 0 3A9FB8AF 20
+. C7 42 5C FF FF FF FF B8 FF FF FF FF 40 BA FF FF FF FF 75 5F
+
+. 0 3A9FB8C3 13
+. 8B 75 08 8B 06 A8 08 0F 85 23 2C 00 00
+
+. 0 3A9FB8D0 8
+. 85 FF 0F 84 FE 2F 00 00
+
+. 0 3A9FB8D8 9
+. 8B 55 08 80 7A 46 00 75 2E
+
+. 0 3A9FB8E1 14
+. 8B 8B 38 02 00 00 85 C9 0F 84 EB 2B 00 00
+
+. 0 3A9FB8EF 7
+. 8B 42 5C 85 C0 75 0C
+
+. 0 3A9FB902 8
+. 40 BA FF FF FF FF 75 18
+
+. 0 3A9FB90A 9
+. 8B 75 08 8B 06 A8 02 74 1D
+
+. 0 3A9FB930 68
+. B8 FF FF FF FF 8D 95 BC FB FF FF 89 85 54 FB FF FF 8B 45 10 89 95 34 FB FF FF 89 85 40 FB FF FF 31 C0 89 85 3C FB FF FF 31 C0 89 85 BC FB FF FF 31 C0 89 85 C0 FB FF FF 89 54 24 04 89 3C 24 E8 AC 76 01 00
+
+. 0 3AA13020 17
+. 55 89 E5 57 56 53 83 EC 0C 8B 75 08 E8 60 37 FC FF
+
+. 0 3AA13031 28
+. 81 C3 DF FF 0B 00 8B 7D 0C 0F B6 06 84 C0 0F 95 C2 3C 25 0F 95 C0 21 D0 A8 01 75 43
+
+. 0 3AA13090 11
+. C7 07 00 00 00 00 F6 06 80 75 B5
+
+. 0 3AA1309B 20
+. 46 0F B6 06 84 C0 0F 95 C2 3C 25 0F 95 C0 21 D0 A8 01 75 E1
+
+. 0 3AA130AF 10
+. 83 C4 0C 89 F0 5B 5E 5F 5D C3
+
+. 0 3A9FB974 38
+. 89 85 4C FB FF FF 8B 4D 08 89 85 B4 FB FF FF 31 C0 0F B7 11 89 85 30 FB FF FF 89 D0 25 00 80 FF FF 66 85 C0 75 0E
+
+. 0 3A9FB99A 14
+. 8B 83 B8 01 00 00 85 C0 0F 85 41 01 00 00
+
+. 0 3A9FB9A8 16
+. 89 D0 25 00 80 FF FF 66 85 C0 0F 84 21 01 00 00
+
+. 0 3A9FBAD9 11
+. 8B 45 08 89 04 24 E8 9C AA FD FF
+
+. 0 3A9D6580 6
+. FF A3 28 00 00 00
+
+. 0 3A9D6586 10
+. 68 38 00 00 00 E9 70 FF FF FF
+
+. 0 3AA12F20 12
+. 55 89 E5 53 83 EC 04 E8 65 38 FC FF
+
+. 0 3AA12F2C 16
+. 81 C3 E4 00 0C 00 8B 93 3C 2B 00 00 85 D2 75 04
+
+. 0 3AA12F3C 4
+. 58 5B 5D C3
+
+. 0 3A9FBAE4 5
+. E9 CF FE FF FF
+
+. 0 3A9FB9B8 36
+. 8B 55 08 8B B5 4C FB FF FF 0F BE 42 46 29 FE 8B 84 10 94 00 00 00 89 14 24 89 74 24 08 89 7C 24 04 FF 50 1C
+
+. 0 3AA1DFD0 28
+. 55 89 E5 83 EC 24 89 75 F8 8B 45 10 8B 75 10 89 5D F4 89 45 F0 31 C0 E8 A5 87 FB FF
+
+. 0 3AA1DFEC 27
+. 81 C3 24 50 0B 00 89 7D FC 85 F6 8B 7D 0C C7 45 EC 00 00 00 00 0F 84 C5 00 00 00
+
+. 0 3AA1E007 27
+. 8B 55 08 89 D1 8B 72 18 8B 01 8B 52 14 25 00 0A 00 00 29 D6 3D 00 0A 00 00 74 30
+
+. 0 3AA1E022 8
+. 85 F6 0F 84 8E 00 00 00
+
+. 0 3AA1E0B8 12
+. 8B 45 F0 8B 55 EC 01 D0 85 C0 75 1C
+
+. 0 3AA1E0E0 29
+. 8B 4D 08 BE FF FF FF FF 0F BE 41 46 8B 84 08 94 00 00 00 89 74 24 04 89 0C 24 FF 50 0C
+
+. 0 3AA1EBC0 17
+. 55 89 E5 57 56 53 83 EC 10 8B 75 08 E8 C0 7B FB FF
+
+. 0 3AA1EBD1 17
+. 81 C3 3F 44 0B 00 8B 0E F6 C1 08 0F 85 23 01 00 00
+
+. 0 3AA1EBE2 9
+. F6 C5 08 0F 84 C5 00 00 00
+
+. 0 3AA1ECB0 11
+. 8B 46 10 85 C0 0F 85 4D FF FF FF
+
+. 0 3AA1ECBB 5
+. E9 32 FF FF FF
+
+. 0 3AA1EBF2 8
+. 89 34 24 E8 36 22 00 00
+
+. 0 3AA20E30 20
+. 55 89 E5 83 EC 18 89 75 FC 8B 75 08 89 5D F8 E8 4D 59 FB FF
+
+. 0 3AA20E44 13
+. 81 C3 CC 21 0B 00 8B 46 1C 85 C0 75 20
+
+. 0 3AA20E51 5
+. F6 06 02 74 07
+
+. 0 3AA20E5D 17
+. 0F BE 46 46 8B 84 30 94 00 00 00 89 34 24 FF 50 34
+
+. 0 3AA13C10 29
+. 55 89 E5 81 EC 84 00 00 00 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 46 38 E8 64 2B FC FF
+
+. 0 3AA13C2D 15
+. 81 C3 E3 F3 0B 00 BF 00 20 00 00 85 C0 78 34
+
+. 0 3AA13C3C 24
+. 0F BE 46 46 8B 94 30 94 00 00 00 8D 45 94 89 44 24 04 89 34 24 FF 52 48
+
+. 0 3AA1E4F0 35
+. 55 89 E5 83 EC 0C 8B 45 0C 89 44 24 08 8B 45 08 8B 40 38 C7 04 24 03 00 00 00 89 44 24 04 E8 CD D7 04 00
+
+. 0 3AA6BCE0 14
+. 55 89 E5 83 EC 64 89 5D F4 E8 A3 AA F6 FF
+
+. 0 3AA6BCEE 31
+. 81 C3 22 73 06 00 89 75 F8 8B B3 BC 01 00 00 89 7D FC 8B 7D 10 8B 06 85 C0 0F 85 83 00 00 00
+
+. 0 3AA6BD0D 5
+. E8 1E AE F6 FF
+
+. 0 3AA6BD12 22
+. 89 45 A8 8B 00 89 F9 8B 55 0C 89 45 B0 87 D3 B8 C5 00 00 00 CD 80
+
+. 0 3AA6BD28 13
+. 87 D3 3D 00 F0 FF FF 0F 87 B7 00 00 00
+
+. 0 3AA6BD35 7
+. 83 F8 FF 89 C2 74 34
+
+. 0 3AA6BD3C 4
+. 85 D2 75 20
+
+. 0 3AA6BD40 8
+. 8B 47 58 39 47 0C 74 18
+
+. 0 3AA6BD60 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1E513 2
+. C9 C3
+
+. 0 3AA13C54 4
+. 85 C0 78 18
+
+. 0 3AA13C58 15
+. 8B 45 A4 25 00 F0 00 00 3D 00 20 00 00 74 7F
+
+. 0 3AA13CE6 43
+. 8B 55 B8 8B 45 B4 0F AC D0 08 C1 EA 08 89 C1 0F AC D0 18 81 E1 FF 0F 00 00 25 00 F0 FF FF 09 C1 81 E9 88 00 00 00 83 F9 07 77 0B
+
+. 0 3AA13D11 11
+. 81 0E 00 02 00 00 E9 4B FF FF FF
+
+. 0 3AA13C67 7
+. 8B 45 C8 85 C0 7E 02
+
+. 0 3AA13C6E 62
+. 89 C7 C7 04 24 00 00 00 00 31 C0 BA 03 00 00 00 89 44 24 14 B8 FF FF FF FF B9 22 00 00 00 89 44 24 10 8D 87 FF 0F 00 00 25 00 F0 FF FF 89 54 24 08 89 4C 24 0C 89 44 24 04 E8 04 3F 06 00
+
+. 0 3AA77BB0 13
+. 89 DA B8 5A 00 00 00 8D 5C 24 04 CD 80
+
+. 0 3AA77BBD 9
+. 89 D3 3D 00 F0 FF FF 77 01
+
+. 0 3AA77BC6 1
+. C3
+
+. 0 3AA13CAC 12
+. 89 C2 83 FA FF B8 FF FF FF FF 74 21
+
+. 0 3AA13CB8 28
+. 89 54 24 04 B8 01 00 00 00 89 44 24 0C 8D 04 17 89 44 24 08 89 34 24 E8 CC CE 00 00
+
+. 0 3AA20BA0 20
+. 55 89 E5 83 EC 10 89 75 FC 8B 75 08 89 5D F8 E8 DD 5B FB FF
+
+. 0 3AA20BB4 13
+. 81 C3 5C 24 0B 00 8B 4E 1C 85 C9 74 5F
+
+. 0 3AA20C20 4
+. 8B 16 EB A4
+
+. 0 3AA20BC8 19
+. 8B 45 0C 89 46 1C 8B 45 10 89 46 20 8B 45 14 85 C0 74 15
+
+. 0 3AA20BDB 15
+. 83 E2 FE 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA13CD4 18
+. B8 01 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA20E6E 3
+. 40 74 0F
+
+. 0 3AA20E71 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA1EBFA 23
+. 8B 46 1C 8B 0E 89 46 0C 89 46 04 89 46 08 F6 C5 01 0F 85 4E 01 00 00
+
+. 0 3AA1EC11 14
+. 8B 46 04 8B 7E 20 39 F8 0F 84 D8 00 00 00
+
+. 0 3AA1EC1F 40
+. 89 46 14 89 C2 89 46 10 8B 46 08 89 7D F0 89 46 04 89 46 0C 89 C8 8B 4E 5C 0D 00 08 00 00 89 06 85 C9 0F 8E DD 00 00 00
+
+. 0 3AA1ED24 7
+. A9 02 02 00 00 74 2C
+
+. 0 3AA1ED2B 8
+. 89 56 18 E9 1A FF FF FF
+
+. 0 3AA1EC4D 10
+. 83 7D 0C FF 0F 84 DC 00 00 00
+
+. 0 3AA1ED33 24
+. 8B 56 10 8B 46 14 89 34 24 89 54 24 04 29 D0 89 44 24 08 E8 35 F4 FF FF
+
+. 0 3AA1E180 17
+. 55 89 E5 56 8B 75 10 8B 45 08 8B 55 0C 85 F6 75 07
+
+. 0 3AA1E191 7
+. 31 D2 5E 89 D0 5D C3
+
+. 0 3AA1ED4B 12
+. 89 C2 83 C4 10 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA1E0FD 3
+. 40 74 C4
+
+. 0 3AA1E100 22
+. 8B 45 08 8B 50 1C 8B 48 20 29 D1 8B 55 F0 83 F9 7F 89 55 E8 76 0E
+
+. 0 3AA1E116 21
+. 89 D0 31 D2 F7 F1 8B 4D F0 29 D1 89 4D E8 8B 75 E8 85 F6 75 2B
+
+. 0 3AA1E12B 7
+. 8B 4D F0 85 C9 74 92
+
+. 0 3AA1E132 28
+. 8B 45 F0 89 44 24 08 8B 55 E8 8D 04 3A 89 44 24 04 8B 4D 08 89 0C 24 E8 72 1E 00 00
+
+. 0 3AA1FFC0 30
+. 55 89 E5 57 56 83 EC 10 8B 45 10 8B 55 10 8B 7D 0C 89 45 F4 31 C0 85 D2 0F 84 9D 00 00 00
+
+. 0 3AA1FFDE 17
+. 89 F6 8B 55 08 8B 72 18 8B 42 14 29 C6 85 F6 7E 54
+
+. 0 3AA20043 7
+. 8B 75 F4 85 F6 74 29
+
+. 0 3AA2004A 30
+. 8B 55 08 89 D1 0F BE 42 46 8B 94 10 94 00 00 00 0F B6 07 47 89 0C 24 89 44 24 04 FF 52 0C
+
+. 0 3AA1EBEB 7
+. 8B 46 10 85 C0 75 5B
+
+. 0 3AA1EC57 8
+. 8B 56 14 3B 56 20 74 61
+
+. 0 3AA1EC5F 20
+. 0F B6 45 0C 88 02 8B 4E 14 8B 16 41 F6 C2 02 89 4E 14 75 11
+
+. 0 3AA1EC73 17
+. C1 EA 09 83 7D 0C 0A 0F 94 C0 21 C2 F6 C2 01 74 1D
+
+. 0 3AA1ECA1 14
+. 0F B6 55 0C 83 C4 10 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA20068 3
+. 40 74 08
+
+. 0 3AA2006B 8
+. FF 4D F4 E9 6D FF FF FF
+
+. 0 3AA1FFE0 15
+. 8B 55 08 8B 72 18 8B 42 14 29 C6 85 F6 7E 54
+
+. 0 3AA1EC84 21
+. 8B 46 10 89 34 24 29 C1 89 4C 24 08 89 44 24 04 E8 E7 F4 FF FF
+
+. 0 3AA1E198 7
+. 89 F1 E8 F1 FC FF FF
+
+. 0 3AA1DE90 22
+. 55 89 E5 83 EC 24 89 5D F4 89 75 F8 89 C6 89 7D FC E8 EB 88 FB FF
+
+. 0 3AA1DEA6 18
+. 81 C3 6A 51 0B 00 F6 40 01 10 89 55 F0 89 4D EC 74 6E
+
+. 0 3AA1DF26 10
+. 8B 48 08 8B 50 10 39 D1 74 96
+
+. 0 3AA1DEC6 31
+. 0F BE 46 46 8B 55 EC 8B 84 30 94 00 00 00 89 54 24 08 8B 4D F0 89 34 24 89 4C 24 04 FF 50 3C
+
+. 0 3AA1E430 15
+. 55 89 E5 57 56 83 EC 14 8B 75 10 85 F6 7E 71
+
+. 0 3AA1E43F 9
+. 8B 45 08 F6 40 3C 02 75 37
+
+. 0 3AA1E448 33
+. 90 8D B4 26 00 00 00 00 89 74 24 08 8B 45 0C 89 44 24 04 8B 55 08 8B 42 38 89 04 24 E8 B7 E9 04 00
+
+. 0 3AA6CE20 5
+. E8 69 21 04 00
+
+. 0 3AA6CE25 15
+. 81 C1 EB 61 06 00 83 B9 14 1A 00 00 00 75 1D
+
+. 0 3AA6CE34 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 04 00 00 00 CD 80
+
+fabs_1 ... ok
+. 0 3AA6CE48 8
+. 5B 3D 01 F0 FF FF 73 2D
+
+. 0 3AA6CE50 1
+. C3
+
+. 0 3AA1E469 4
+. 85 C0 78 34
+
+. 0 3AA1E46D 9
+. 01 45 0C 29 C6 85 F6 7E 3A
+
+. 0 3AA1E4B0 16
+. 29 75 10 8B 45 08 8B 78 50 8B 70 4C 85 FF 78 11
+
+. 0 3AA1E4D1 10
+. 8B 45 10 83 C4 14 5E 5F 5D C3
+
+. 0 3AA1DEE5 11
+. 89 C7 0F B7 46 44 66 85 C0 74 04
+
+. 0 3AA1DEF4 29
+. 8B 56 5C 8B 46 1C 85 D2 89 46 0C 89 46 04 89 46 08 89 46 14 89 46 10 0F 8E 9F 00 00 00
+
+. 0 3AA1DFB0 12
+. F7 06 02 02 00 00 0F 85 58 FF FF FF
+
+. 0 3AA1DF14 18
+. 89 46 18 89 F8 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1E19F 9
+. 39 F0 BA FF FF FF FF 74 E9
+
+. 0 3AA1EC99 8
+. 40 BA FF FF FF FF 74 04
+
+. 0 3AA20073 15
+. 8B 45 10 8B 4D F4 29 C8 83 C4 10 5E 5F 5D C3
+
+. 0 3AA1E14E 8
+. 29 45 F0 E9 6E FF FF FF
+
+. 0 3AA1E0C4 21
+. 8B 45 10 8B 55 F0 29 D0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9FB9DC 19
+. BA FF FF FF FF 89 95 50 FB FF FF 39 F0 0F 85 41 01 00 00
+
+. 0 3A9FB9EF 21
+. 89 85 50 FB FF FF 8B 95 B4 FB FF FF 80 3A 00 0F 84 2C 01 00 00
+
+. 0 3A9FBB30 16
+. 8B 4D 08 0F B7 01 25 00 80 FF FF 66 85 C0 74 1A
+
+. 0 3A9FBB5A 8
+. 89 0C 24 E8 FE A9 FD FF
+
+. 0 3A9D6560 6
+. FF A3 20 00 00 00
+
+. 0 3A9D6566 10
+. 68 28 00 00 00 E9 90 FF FF FF
+
+. 0 3AA12F90 12
+. 55 89 E5 53 83 EC 04 E8 F5 37 FC FF
+
+. 0 3AA12F9C 16
+. 81 C3 74 00 0C 00 8B 93 44 2B 00 00 85 D2 75 04
+
+. 0 3AA12FAC 4
+. 58 5B 5D C3
+
+. 0 3A9FBB62 10
+. 8B 95 30 FB FF FF 85 D2 74 DE
+
+. 0 3A9FBB4A 16
+. 8B 95 50 FB FF FF 8D 65 F4 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA04500 6
+. 83 C4 0C 5B 5D C3
+
+. 0 80486A3 5
+. 83 C4 10 EB 48
+
+. 0 80486F0 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E1C 5
+. E8 D7 58 FE FF
+
+. 0 80486F8 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 C7 FC FF FF
+
+. 0 8048729 7
+. 83 C4 10 85 C0 75 77
+
+. 0 8048730 38
+. 9B DD B5 68 FF FF FF D9 45 E0 D9 E1 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D8 E8 AB FD FF FF
+
+. 0 8048756 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804875D 13
+. 83 EC 0C 68 89 38 06 08 E8 A6 FC FF FF
+
+. 0 3A9FB8BB 8
+. 40 BA FF FF FF FF 75 5F
+
+. 0 3AA1E052 10
+. 8B 71 20 29 D6 3B 75 10 72 C6
+
+. 0 3AA1E05C 10
+. 8B 4D 10 8D 04 39 39 F8 77 07
+
+. 0 3AA1E06D 6
+. 48 80 38 0A 75 F5
+
+. 0 3AA1E073 17
+. C7 45 EC 01 00 00 00 8B 4D 0C 29 C8 8D 70 01 EB 9E
+
+. 0 3AA1E02A 5
+. 3B 75 10 76 03
+
+. 0 3AA1E032 5
+. 83 FE 14 77 4D
+
+. 0 3AA1E037 7
+. 89 D1 89 F2 4A 78 0C
+
+. 0 3AA1E03E 12
+. 89 F6 0F B6 07 47 88 01 41 4A 79 F6
+
+. 0 3AA1E040 10
+. 0F B6 07 47 88 01 41 4A 79 F6
+
+. 0 3AA1E04A 8
+. 8B 45 08 89 48 14 EB 5E
+
+. 0 3AA1E0B0 20
+. 8B 55 10 29 F2 89 55 F0 8B 45 F0 8B 55 EC 01 D0 85 C0 75 1C
+
+fabs_2 ... ok
+. 0 804876A 5
+. 83 C4 10 EB 48
+
+. 0 80487B7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E21 5
+. E8 99 59 FE FF
+
+. 0 80487BF 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 59 A4 0C DC BA 29 8C 67 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 F8 FB FF FF
+
+. 0 80487F8 7
+. 83 C4 10 85 C0 75 79
+
+. 0 80487FF 44
+. 9B DD B5 68 FF FF FF DD 45 E0 D9 E1 DD 5D D8 DD A5 68 FF FF FF B8 59 A4 0C DC BA 29 8C 67 41 52 50 FF 75 DC FF 75 D8 E8 57 FD FF FF
+
+. 0 8048582 57
+. 55 89 E5 83 EC 38 8B 45 08 8B 55 0C 89 45 F8 89 55 FC 8B 45 10 8B 55 14 89 45 F0 89 55 F4 C7 45 EC 00 00 00 00 DD 45 F8 DD 45 F0 D9 C9 DA E9 DF E0 80 E4 45 80 FC 40 74 6A
+
+. 0 8048625 12
+. C7 45 EC 01 00 00 00 8B 45 EC C9 C3
+
+. 0 804882B 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048832 13
+. 83 EC 0C 68 BE 38 06 08 E8 D1 FB FF FF
+
+fabs_3 ... ok
+. 0 804883F 5
+. 83 C4 10 EB 44
+
+. 0 8048888 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E26 5
+. E8 65 5A FE FF
+
+. 0 8048890 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 59 A4 0C DC BA 29 8C 67 C1 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 27 FB FF FF
+
+. 0 80488C9 7
+. 83 C4 10 85 C0 75 79
+
+. 0 80488D0 44
+. 9B DD B5 68 FF FF FF DD 45 E0 D9 E1 DD 5D D8 DD A5 68 FF FF FF B8 59 A4 0C DC BA 29 8C 67 41 52 50 FF 75 DC FF 75 D8 E8 86 FC FF FF
+
+. 0 80488FC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048903 13
+. 83 EC 0C 68 1D 39 06 08 E8 00 FB FF FF
+
+fabs_4 ... ok
+. 0 8048910 5
+. 83 C4 10 EB 44
+
+. 0 8048959 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E2B 5
+. E8 31 5B FE FF
+
+. 0 8048961 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 4F FA FF FF
+
+. 0 80489A1 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80489A8 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 32 FB FF FF
+
+. 0 8048522 62
+. D9 45 08 D8 65 0C D9 E1 DD 5D F0 8B 45 08 89 45 E4 D9 45 E4 D9 E1 DD 05 18 38 06 08 DE C9 DD 5D E8 B8 00 00 00 00 BA 00 00 28 C0 52 50 B8 00 00 00 00 BA 00 00 00 40 52 50 E8 80 FE FF FF
+
+. 0 80483E0 6
+. FF 25 08 A1 06 08
+
+. 0 80483E6 10
+. 68 10 00 00 00 E9 C0 FF FF FF
+
+. 0 3A9A8860 20
+. 55 89 E5 53 83 EC 34 DD 45 08 DD 45 10 D9 C9 E8 DC B2 FF FF
+
+. 0 3A9A3B50 4
+. 8B 1C 24 C3
+
+. 0 3A9A8874 26
+. 81 C3 D0 6F 01 00 DD 55 F0 D9 C9 DD 55 E8 DD 5C 24 08 DD 1C 24 E8 72 D6 FF FF
+
+. 0 3A9A5F00 34
+. DD 44 24 0C D9 E5 E8 00 00 00 00 59 81 C1 39 99 01 00 DF E0 88 E2 80 E4 45 80 FC 40 0F 84 CE 00 00 00
+
+. 0 3A9A5F22 9
+. 80 FC 05 0F 84 D5 00 00 00
+
+. 0 3A9A5F2B 5
+. 80 FC 01 74 6A
+
+. 0 3A9A5F30 25
+. DD 44 24 04 83 EC 08 D9 E5 DF E0 88 E6 80 E4 45 80 FC 40 0F 84 87 01 00 00
+
+. 0 3A9A5F49 9
+. 80 FC 05 0F 84 FE 00 00 00
+
+. 0 3A9A5F52 17
+. D9 C9 D9 C0 DF 3C 24 DF 2C 24 DD E9 DF E0 9E 75 4D
+
+. 0 3A9A5F63 9
+. 58 5A 83 CA 00 DD D8 79 0D
+
+. 0 3A9A5F6C 27
+. DC B9 DC BD FF FF F7 D8 83 D2 00 F7 DA DD 81 DC BD FF FF D9 C9 0F AC D0 01 73 06
+
+. 0 3A9A5F8D 10
+. D8 C8 D1 EA 89 C1 09 D1 75 EA
+
+. 0 3A9A5F81 6
+. 0F AC D0 01 73 06
+
+. 0 3A9A5F87 16
+. D9 C9 D8 C9 D9 C9 D8 C8 D1 EA 89 C1 09 D1 75 EA
+
+. 0 3A9A5F97 3
+. DD D8 C3
+
+. 0 3A9A888E 18
+. 8B 83 38 00 00 00 DD 5D E0 83 38 FF 0F 84 40 01 00 00
+
+. 0 3A9A88A0 11
+. DD 45 E8 DD 1C 24 E8 45 29 00 00
+
+. 0 3A9AB1F0 40
+. 55 89 E5 8B 45 08 8B 55 0C 5D 89 D1 89 C2 F7 DA 81 E1 FF FF FF 7F 09 D0 C1 E8 1F 09 C1 B8 00 00 F0 7F 29 C8 C1 E8 1F C3
+
+. 0 3A9A88AB 8
+. 85 C0 0F 85 2D 01 00 00
+
+. 0 3A9A88B3 11
+. DD 45 F0 DD 1C 24 E8 32 29 00 00
+
+. 0 3A9A88BE 4
+. 85 C0 74 23
+
+. 0 3A9A88E5 14
+. DD 45 F0 D9 EE D9 C9 DD E9 DF E0 9E 75 72
+
+. 0 3A9A8965 13
+. DD D8 DD 45 E0 DD 1C 24 E8 AE 28 00 00
+
+. 0 3A9AB220 17
+. 8B 44 24 08 B9 FF FF EF FF 29 C1 31 C8 C1 E8 1F C3
+
+. 0 3A9A8972 4
+. 85 C0 75 3A
+
+. 0 3A9A89B0 14
+. DD 45 E0 D9 EE D9 C9 DA E9 DF E0 9E 75 22
+
+. 0 3A9A89E0 9
+. DD 45 E0 83 C4 34 5B 5D C3
+
+. 0 8048560 20
+. 83 C4 10 DC 4D E8 DD 45 F0 D9 C9 DA E9 DF E0 F6 C4 45 74 02
+
+. 0 80489CF 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80489D6 13
+. 83 EC 0C 68 52 39 06 08 E8 2D FA FF FF
+
+fadds_1 ... ok
+. 0 80489E3 5
+. 83 C4 10 EB 48
+
+. 0 8048A30 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E30 5
+. E8 03 5C FE FF
+
+. 0 8048A38 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 78 F9 FF FF
+
+. 0 8048A78 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8048A7F 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 5B FA FF FF
+
+. 0 8048AA6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048AAD 13
+. 83 EC 0C 68 8A 39 06 08 E8 56 F9 FF FF
+
+fadds_2 ... ok
+. 0 8048ABA 5
+. 83 C4 10 EB 48
+
+. 0 8048B07 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E35 5
+. E8 D5 5C FE FF
+
+. 0 8048B0F 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A1 F8 FF FF
+
+. 0 8048B4F 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8048B56 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 84 F9 FF FF
+
+. 0 8048B7D 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048B84 13
+. 83 EC 0C 68 C2 39 06 08 E8 7F F8 FF FF
+
+fadds_3 ... ok
+. 0 8048B91 5
+. 83 C4 10 EB 48
+
+. 0 8048BDE 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E3A 5
+. E8 A7 5D FE FF
+
+. 0 8048BE6 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 CA F7 FF FF
+
+. 0 8048C26 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8048C2D 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 AD F8 FF FF
+
+. 0 8048C54 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048C5B 13
+. 83 EC 0C 68 FA 39 06 08 E8 A8 F7 FF FF
+
+fadds_4 ... ok
+. 0 8048C68 5
+. 83 C4 10 EB 48
+
+. 0 8048CB5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E3F 5
+. E8 79 5E FE FF
+
+. 0 8048CBD 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 DC F6 FF FF
+
+. 0 8048D14 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8048D1B 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 45 D8 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 3A F8 FF FF
+
+. 0 8048D48 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048D4F 13
+. 83 EC 0C 68 32 3A 06 08 E8 B4 F6 FF FF
+
+faddl_1 ... ok
+. 0 8048D5C 5
+. 83 C4 10 EB 44
+
+. 0 8048DA5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E44 5
+. E8 64 5F FE FF
+
+. 0 8048DAD 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 EC F5 FF FF
+
+. 0 8048E04 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8048E0B 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 45 D8 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 4A F7 FF FF
+
+. 0 8048E38 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048E3F 13
+. 83 EC 0C 68 6A 3A 06 08 E8 C4 F5 FF FF
+
+faddl_2 ... ok
+. 0 8048E4C 5
+. 83 C4 10 EB 44
+
+. 0 8048E95 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E49 5
+. E8 4F 60 FE FF
+
+. 0 8048E9D 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 FC F4 FF FF
+
+. 0 8048EF4 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8048EFB 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 45 D8 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 5A F6 FF FF
+
+. 0 8048F28 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8048F2F 13
+. 83 EC 0C 68 A2 3A 06 08 E8 D4 F4 FF FF
+
+faddl_3 ... ok
+. 0 8048F3C 5
+. 83 C4 10 EB 44
+
+. 0 8048F85 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E4E 5
+. E8 3A 61 FE FF
+
+. 0 8048F8D 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 0C F4 FF FF
+
+. 0 8048FE4 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8048FEB 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 45 D8 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 6A F5 FF FF
+
+. 0 8049018 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804901F 13
+. 83 EC 0C 68 DA 3A 06 08 E8 E4 F3 FF FF
+
+faddl_4 ... ok
+. 0 804902C 5
+. 83 C4 10 EB 44
+
+. 0 8049075 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E53 5
+. E8 25 62 FE FF
+
+. 0 804907D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 2C F3 FF FF
+
+. 0 80490C4 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 80490CF 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC C2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 03 F4 FF FF
+
+. 0 80490FE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049105 13
+. 83 EC 0C 68 12 3B 06 08 E8 FE F2 FF FF
+
+fadd_1 ... ok
+. 0 8049112 5
+. 83 C4 10 EB 48
+
+. 0 804915F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E58 5
+. E8 0A 63 FE FF
+
+. 0 8049167 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 42 F2 FF FF
+
+. 0 80491AE 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 80491B9 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC C2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 19 F3 FF FF
+
+. 0 80491E8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80491EF 13
+. 83 EC 0C 68 47 3B 06 08 E8 14 F2 FF FF
+
+fadd_2 ... ok
+. 0 80491FC 5
+. 83 C4 10 EB 48
+
+. 0 8049249 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E5D 5
+. E8 EF 63 FE FF
+
+. 0 8049251 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 58 F1 FF FF
+
+. 0 8049298 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 80492A3 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC C2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 2F F2 FF FF
+
+. 0 80492D2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80492D9 13
+. 83 EC 0C 68 7C 3B 06 08 E8 2A F1 FF FF
+
+fadd_3 ... ok
+. 0 80492E6 5
+. 83 C4 10 EB 48
+
+. 0 8049333 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E62 5
+. E8 D4 64 FE FF
+
+. 0 804933B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 6E F0 FF FF
+
+. 0 8049382 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804938D 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC C2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 45 F1 FF FF
+
+. 0 80493BC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80493C3 13
+. 83 EC 0C 68 B1 3B 06 08 E8 40 F0 FF FF
+
+fadd_4 ... ok
+. 0 80493D0 5
+. 83 C4 10 EB 48
+
+. 0 804941D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E67 5
+. E8 B9 65 FE FF
+
+. 0 8049425 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 74 EF FF FF
+
+. 0 804947C 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8049487 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC C2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 C6 F0 FF FF
+
+. 0 80494BC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80494C3 13
+. 83 EC 0C 68 E6 3B 06 08 E8 40 EF FF FF
+
+fadd_5 ... ok
+. 0 80494D0 5
+. 83 C4 10 EB 44
+
+. 0 8049519 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E6C 5
+. E8 B0 66 FE FF
+
+. 0 8049521 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 78 EE FF FF
+
+. 0 8049578 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8049583 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC C2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 CA EF FF FF
+
+. 0 80495B8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80495BF 13
+. 83 EC 0C 68 1B 3C 06 08 E8 44 EE FF FF
+
+fadd_6 ... ok
+. 0 80495CC 5
+. 83 C4 10 EB 44
+
+. 0 8049615 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E71 5
+. E8 A7 67 FE FF
+
+. 0 804961D 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 7C ED FF FF
+
+. 0 8049674 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804967F 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC C2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 CE EE FF FF
+
+. 0 80496B4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80496BB 13
+. 83 EC 0C 68 50 3C 06 08 E8 48 ED FF FF
+
+fadd_7 ... ok
+. 0 80496C8 5
+. 83 C4 10 EB 44
+
+. 0 8049711 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E76 5
+. E8 9E 68 FE FF
+
+. 0 8049719 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 80 EC FF FF
+
+. 0 8049770 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804977B 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC C2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 D2 ED FF FF
+
+. 0 80497B0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80497B7 13
+. 83 EC 0C 68 85 3C 06 08 E8 4C EC FF FF
+
+fadd_8 ... ok
+. 0 80497C4 5
+. 83 C4 10 EB 44
+
+. 0 804980D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E7B 5
+. E8 95 69 FE FF
+
+. 0 8049815 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 94 EB FF FF
+
+. 0 804985C 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8049863 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 C2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 73 EC FF FF
+
+. 0 804988E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049895 13
+. 83 EC 0C 68 BA 3C 06 08 E8 6E EB FF FF
+
+fadd_9 ... ok
+. 0 80498A2 5
+. 83 C4 10 EB 48
+
+. 0 80498EF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E80 5
+. E8 72 6A FE FF
+
+. 0 80498F7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 EA FF FF
+
+. 0 804993E 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8049945 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 C2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 91 EB FF FF
+
+. 0 8049970 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049977 13
+. 83 EC 0C 68 EF 3C 06 08 E8 8C EA FF FF
+
+fadd_10 ... ok
+. 0 8049984 5
+. 83 C4 10 EB 48
+
+. 0 80499D1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E85 5
+. E8 4F 6B FE FF
+
+. 0 80499D9 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 D0 E9 FF FF
+
+. 0 8049A20 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8049A27 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 C2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 AF EA FF FF
+
+. 0 8049A52 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049A59 13
+. 83 EC 0C 68 27 3D 06 08 E8 AA E9 FF FF
+
+fadd_11 ... ok
+. 0 8049A66 5
+. 83 C4 10 EB 48
+
+. 0 8049AB3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E8A 5
+. E8 2C 6C FE FF
+
+. 0 8049ABB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 EE E8 FF FF
+
+. 0 8049B02 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8049B09 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 C2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 CD E9 FF FF
+
+. 0 8049B34 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049B3B 13
+. 83 EC 0C 68 5F 3D 06 08 E8 C8 E8 FF FF
+
+fadd_12 ... ok
+. 0 8049B48 5
+. 83 C4 10 EB 48
+
+. 0 8049B95 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E8F 5
+. E8 09 6D FE FF
+
+. 0 8049B9D 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 FC E7 FF FF
+
+. 0 8049BF4 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8049BFB 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 C2 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 56 E9 FF FF
+
+. 0 8049C2C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049C33 13
+. 83 EC 0C 68 97 3D 06 08 E8 D0 E7 FF FF
+
+fadd_13 ... ok
+. 0 8049C40 5
+. 83 C4 10 EB 44
+
+. 0 8049C89 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E94 5
+. E8 F8 6D FE FF
+
+. 0 8049C91 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 08 E7 FF FF
+
+. 0 8049CE8 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8049CEF 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 C2 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 62 E8 FF FF
+
+. 0 8049D20 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049D27 13
+. 83 EC 0C 68 CF 3D 06 08 E8 DC E6 FF FF
+
+fadd_14 ... ok
+. 0 8049D34 5
+. 83 C4 10 EB 44
+
+. 0 8049D7D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E99 5
+. E8 E7 6E FE FF
+
+. 0 8049D85 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 14 E6 FF FF
+
+. 0 8049DDC 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8049DE3 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 C2 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 6E E7 FF FF
+
+. 0 8049E14 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049E1B 13
+. 83 EC 0C 68 07 3E 06 08 E8 E8 E5 FF FF
+
+fadd_15 ... ok
+. 0 8049E28 5
+. 83 C4 10 EB 44
+
+. 0 8049E71 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062E9E 5
+. E8 D6 6F FE FF
+
+. 0 8049E79 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 20 E5 FF FF
+
+. 0 8049ED0 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8049ED7 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 C2 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 7A E6 FF FF
+
+. 0 8049F08 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049F0F 13
+. 83 EC 0C 68 3F 3E 06 08 E8 F4 E4 FF FF
+
+fadd_16 ... ok
+. 0 8049F1C 5
+. 83 C4 10 EB 44
+
+. 0 8049F65 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EA3 5
+. E8 C5 70 FE FF
+
+. 0 8049F6D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 3C E4 FF FF
+
+. 0 8049FB4 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8049FBB 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE C2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 19 E5 FF FF
+
+. 0 8049FE8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8049FEF 13
+. 83 EC 0C 68 77 3E 06 08 E8 14 E4 FF FF
+
+faddp_1 ... ok
+. 0 8049FFC 5
+. 83 C4 10 EB 48
+
+. 0 804A049 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EA8 5
+. E8 A4 71 FE FF
+
+. 0 804A051 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 58 E3 FF FF
+
+. 0 804A098 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804A09F 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE C2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 35 E4 FF FF
+
+. 0 804A0CC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A0D3 13
+. 83 EC 0C 68 AF 3E 06 08 E8 30 E3 FF FF
+
+faddp_2 ... ok
+. 0 804A0E0 5
+. 83 C4 10 EB 48
+
+. 0 804A12D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EAD 5
+. E8 83 72 FE FF
+
+. 0 804A135 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 74 E2 FF FF
+
+. 0 804A17C 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804A183 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE C2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 51 E3 FF FF
+
+. 0 804A1B0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A1B7 13
+. 83 EC 0C 68 E7 3E 06 08 E8 4C E2 FF FF
+
+faddp_3 ... ok
+. 0 804A1C4 5
+. 83 C4 10 EB 48
+
+. 0 804A211 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EB2 5
+. E8 62 73 FE FF
+
+. 0 804A219 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 90 E1 FF FF
+
+. 0 804A260 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804A267 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE C2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 6D E2 FF FF
+
+. 0 804A294 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A29B 13
+. 83 EC 0C 68 1F 3F 06 08 E8 68 E1 FF FF
+
+faddp_4 ... ok
+. 0 804A2A8 5
+. 83 C4 10 EB 48
+
+. 0 804A2F5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EB7 5
+. E8 41 74 FE FF
+
+. 0 804A2FD 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 9C E0 FF FF
+
+. 0 804A354 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804A35F 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE C2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 F0 E1 FF FF
+
+. 0 804A392 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A399 13
+. 83 EC 0C 68 57 3F 06 08 E8 6A E0 FF FF
+
+faddp_5 ... ok
+. 0 804A3A6 5
+. 83 C4 10 EB 44
+
+. 0 804A3EF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EBC 5
+. E8 36 75 FE FF
+
+. 0 804A3F7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A2 DF FF FF
+
+. 0 804A44E 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804A459 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE C2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 F6 E0 FF FF
+
+. 0 804A48C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A493 13
+. 83 EC 0C 68 8F 3F 06 08 E8 70 DF FF FF
+
+faddp_6 ... ok
+. 0 804A4A0 5
+. 83 C4 10 EB 44
+
+. 0 804A4E9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EC1 5
+. E8 2B 76 FE FF
+
+. 0 804A4F1 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A8 DE FF FF
+
+. 0 804A548 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804A553 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE C2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 FC DF FF FF
+
+. 0 804A586 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A58D 13
+. 83 EC 0C 68 C7 3F 06 08 E8 76 DE FF FF
+
+faddp_7 ... ok
+. 0 804A59A 5
+. 83 C4 10 EB 44
+
+. 0 804A5E3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EC6 5
+. E8 20 77 FE FF
+
+. 0 804A5EB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 AE DD FF FF
+
+. 0 804A642 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804A64D 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE C2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 02 DF FF FF
+
+. 0 804A680 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A687 13
+. 83 EC 0C 68 FF 3F 06 08 E8 7C DD FF FF
+
+faddp_8 ... ok
+. 0 804A694 5
+. 83 C4 10 EB 44
+
+. 0 804A6DD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062ECB 5
+. E8 15 78 FE FF
+
+. 0 804A6E5 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 C4 DC FF FF
+
+. 0 804A72C 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804A733 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 A5 DD FF FF
+
+. 0 804A75C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A763 13
+. 83 EC 0C 68 37 40 06 08 E8 A0 DC FF FF
+
+faddp_9 ... ok
+. 0 804A770 5
+. 83 C4 10 EB 48
+
+. 0 804A7BD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062ED0 5
+. E8 F0 78 FE FF
+
+. 0 804A7C5 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 E4 DB FF FF
+
+. 0 804A80C 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804A813 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 C5 DC FF FF
+
+. 0 804A83C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A843 13
+. 83 EC 0C 68 6F 40 06 08 E8 C0 DB FF FF
+
+faddp_10 ... ok
+. 0 804A850 5
+. 83 C4 10 EB 48
+
+. 0 804A89D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062ED5 5
+. E8 CB 79 FE FF
+
+. 0 804A8A5 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 04 DB FF FF
+
+. 0 804A8EC 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804A8F3 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 E5 DB FF FF
+
+. 0 804A91C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804A923 13
+. 83 EC 0C 68 AA 40 06 08 E8 E0 DA FF FF
+
+faddp_11 ... ok
+. 0 804A930 5
+. 83 C4 10 EB 48
+
+. 0 804A97D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EDA 5
+. E8 A6 7A FE FF
+
+. 0 804A985 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 24 DA FF FF
+
+. 0 804A9CC 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804A9D3 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 05 DB FF FF
+
+. 0 804A9FC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804AA03 13
+. 83 EC 0C 68 E5 40 06 08 E8 00 DA FF FF
+
+faddp_12 ... ok
+. 0 804AA10 5
+. 83 C4 10 EB 48
+
+. 0 804AA5D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EDF 5
+. E8 81 7B FE FF
+
+. 0 804AA65 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 34 D9 FF FF
+
+. 0 804AABC 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804AAC3 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C1 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 90 DA FF FF
+
+. 0 804AAF2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804AAF9 13
+. 83 EC 0C 68 20 41 06 08 E8 0A D9 FF FF
+
+faddp_13 ... ok
+. 0 804AB06 5
+. 83 C4 10 EB 44
+
+. 0 804AB4F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EE4 5
+. E8 6E 7C FE FF
+
+. 0 804AB57 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 42 D8 FF FF
+
+. 0 804ABAE 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804ABB5 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C1 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 9E D9 FF FF
+
+. 0 804ABE4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804ABEB 13
+. 83 EC 0C 68 5B 41 06 08 E8 18 D8 FF FF
+
+faddp_14 ... ok
+. 0 804ABF8 5
+. 83 C4 10 EB 44
+
+. 0 804AC41 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EE9 5
+. E8 5B 7D FE FF
+
+. 0 804AC49 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 50 D7 FF FF
+
+. 0 804ACA0 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804ACA7 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C1 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 AC D8 FF FF
+
+. 0 804ACD6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804ACDD 13
+. 83 EC 0C 68 96 41 06 08 E8 26 D7 FF FF
+
+faddp_15 ... ok
+. 0 804ACEA 5
+. 83 C4 10 EB 44
+
+. 0 804AD33 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EEE 5
+. E8 48 7E FE FF
+
+. 0 804AD3B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 5E D6 FF FF
+
+. 0 804AD92 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804AD99 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C1 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 BA D7 FF FF
+
+. 0 804ADC8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804ADCF 13
+. 83 EC 0C 68 D1 41 06 08 E8 34 D6 FF FF
+
+faddp_16 ... ok
+. 0 804ADDC 5
+. 83 C4 10 EB 44
+
+. 0 804AE25 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EF3 5
+. E8 35 7F FE FF
+
+. 0 804AE2D 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 86 D5 FF FF
+
+. 0 804AE6A 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804AE71 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 45 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 8B 9C AD 45 50 FF 75 D0 E8 69 D6 FF FF
+
+. 0 804AE98 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804AE9F 13
+. 83 EC 0C 68 0C 42 06 08 E8 64 D5 FF FF
+
+fiadds_1 ... ok
+. 0 804AEAC 5
+. 83 C4 10 EB 48
+
+. 0 804AEF9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EF8 5
+. E8 04 80 FE FF
+
+. 0 804AF01 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 D4 FF FF
+
+. 0 804AF3E 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804AF45 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 45 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA E6 40 45 50 FF 75 D0 E8 95 D5 FF FF
+
+. 0 804AF6C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804AF73 13
+. 83 EC 0C 68 47 42 06 08 E8 90 D4 FF FF
+
+fiadds_2 ... ok
+. 0 804AF80 5
+. 83 C4 10 EB 48
+
+. 0 804AFCD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062EFD 5
+. E8 D3 80 FE FF
+
+. 0 804AFD5 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 DE D3 FF FF
+
+. 0 804B012 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804B019 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 45 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA E6 40 C5 50 FF 75 D0 E8 C1 D4 FF FF
+
+. 0 804B040 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B047 13
+. 83 EC 0C 68 82 42 06 08 E8 BC D3 FF FF
+
+fiadds_3 ... ok
+. 0 804B054 5
+. 83 C4 10 EB 48
+
+. 0 804B0A1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F02 5
+. E8 A2 81 FE FF
+
+. 0 804B0A9 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 0A D3 FF FF
+
+. 0 804B0E6 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804B0ED 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 45 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 8B 9C AD C5 50 FF 75 D0 E8 ED D3 FF FF
+
+. 0 804B114 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B11B 13
+. 83 EC 0C 68 BD 42 06 08 E8 E8 D2 FF FF
+
+fiadds_4 ... ok
+. 0 804B128 5
+. 83 C4 10 EB 48
+
+. 0 804B175 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F07 5
+. E8 71 82 FE FF
+
+. 0 804B17D 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 2E D2 FF FF
+
+. 0 804B1C2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804B1C9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 45 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 68 E7 32 41 52 50 FF 75 D4 FF 75 D0 E8 8C D3 FF FF
+
+. 0 804B1F6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B1FD 13
+. 83 EC 0C 68 F8 42 06 08 E8 06 D2 FF FF
+
+fiadds_5 ... ok
+. 0 804B20A 5
+. 83 C4 10 EB 44
+
+. 0 804B253 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F0C 5
+. E8 4A 83 FE FF
+
+. 0 804B25B 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 50 D1 FF FF
+
+. 0 804B2A0 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804B2A7 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 45 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA A6 C5 32 C1 52 50 FF 75 D4 FF 75 D0 E8 AE D2 FF FF
+
+. 0 804B2D4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B2DB 13
+. 83 EC 0C 68 33 43 06 08 E8 28 D1 FF FF
+
+fiadds_6 ... ok
+. 0 804B2E8 5
+. 83 C4 10 EB 44
+
+. 0 804B331 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F11 5
+. E8 23 84 FE FF
+
+. 0 804B339 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 72 D0 FF FF
+
+. 0 804B37E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804B385 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 45 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA A6 C5 32 41 52 50 FF 75 D4 FF 75 D0 E8 D0 D1 FF FF
+
+. 0 804B3B2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B3B9 13
+. 83 EC 0C 68 6E 43 06 08 E8 4A D0 FF FF
+
+fiadds_7 ... ok
+. 0 804B3C6 5
+. 83 C4 10 EB 44
+
+. 0 804B40F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F16 5
+. E8 FC 84 FE FF
+
+. 0 804B417 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 94 CF FF FF
+
+. 0 804B45C 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804B463 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 45 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 68 E7 32 C1 52 50 FF 75 D4 FF 75 D0 E8 F2 D0 FF FF
+
+. 0 804B490 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B497 13
+. 83 EC 0C 68 A9 43 06 08 E8 6C CF FF FF
+
+fiadds_8 ... ok
+. 0 804B4A4 5
+. 83 C4 10 EB 44
+
+. 0 804B4ED 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F1B 5
+. E8 D5 85 FE FF
+
+. 0 804B4F5 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 BC CE FF FF
+
+. 0 804B534 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804B53B 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 30 A7 4C 50 FF 75 D0 E8 9F CF FF FF
+
+. 0 804B562 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B569 13
+. 83 EC 0C 68 E4 43 06 08 E8 9A CE FF FF
+
+fiaddl_1 ... ok
+. 0 804B576 5
+. 83 C4 10 EB 48
+
+. 0 804B5C3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F20 5
+. E8 A6 86 FE FF
+
+. 0 804B5CB 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 E6 CD FF FF
+
+. 0 804B60A 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804B611 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 5C 2F A7 4C 50 FF 75 D0 E8 C9 CE FF FF
+
+. 0 804B638 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B63F 13
+. 83 EC 0C 68 1F 44 06 08 E8 C4 CD FF FF
+
+fiaddl_2 ... ok
+. 0 804B64C 5
+. 83 C4 10 EB 48
+
+. 0 804B699 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F25 5
+. E8 77 87 FE FF
+
+. 0 804B6A1 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 10 CD FF FF
+
+. 0 804B6E0 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804B6E7 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 5C 2F A7 CC 50 FF 75 D0 E8 F3 CD FF FF
+
+. 0 804B70E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B715 13
+. 83 EC 0C 68 5A 44 06 08 E8 EE CC FF FF
+
+fiaddl_3 ... ok
+. 0 804B722 5
+. 83 C4 10 EB 48
+
+. 0 804B76F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F2A 5
+. E8 48 88 FE FF
+
+. 0 804B777 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 3A CC FF FF
+
+. 0 804B7B6 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804B7BD 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 45 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 30 A7 CC 50 FF 75 D0 E8 1D CD FF FF
+
+. 0 804B7E4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B7EB 13
+. 83 EC 0C 68 95 44 06 08 E8 18 CC FF FF
+
+fiaddl_4 ... ok
+. 0 804B7F8 5
+. 83 C4 10 EB 48
+
+. 0 804B845 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F2F 5
+. E8 19 89 FE FF
+
+. 0 804B84D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 5C CB FF FF
+
+. 0 804B894 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804B89B 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 45 DC DD 5D D0 DD A5 58 FF FF FF B8 6F CD 0F E3 BA 58 31 95 41 52 50 FF 75 D4 FF 75 D0 E8 BA CC FF FF
+
+. 0 804B8C8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B8CF 13
+. 83 EC 0C 68 D0 44 06 08 E8 34 CB FF FF
+
+fiaddl_5 ... ok
+. 0 804B8DC 5
+. 83 C4 10 EB 44
+
+. 0 804B925 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F34 5
+. E8 F4 89 FE FF
+
+. 0 804B92D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 7C CA FF FF
+
+. 0 804B974 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804B97B 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 45 DC DD 5D D0 DD A5 58 FF FF FF B8 91 32 F0 A4 BA A4 9A 94 41 52 50 FF 75 D4 FF 75 D0 E8 DA CB FF FF
+
+. 0 804B9A8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804B9AF 13
+. 83 EC 0C 68 0B 45 06 08 E8 54 CA FF FF
+
+fiaddl_6 ... ok
+. 0 804B9BC 5
+. 83 C4 10 EB 44
+
+. 0 804BA05 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F39 5
+. E8 CF 8A FE FF
+
+. 0 804BA0D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 9C C9 FF FF
+
+. 0 804BA54 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804BA5B 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 45 DC DD 5D D0 DD A5 58 FF FF FF B8 91 32 F0 A4 BA A4 9A 94 C1 52 50 FF 75 D4 FF 75 D0 E8 FA CA FF FF
+
+. 0 804BA88 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804BA8F 13
+. 83 EC 0C 68 46 45 06 08 E8 74 C9 FF FF
+
+fiaddl_7 ... ok
+. 0 804BA9C 5
+. 83 C4 10 EB 44
+
+. 0 804BAE5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F3E 5
+. E8 AA 8B FE FF
+
+. 0 804BAED 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 BC C8 FF FF
+
+. 0 804BB34 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804BB3B 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 45 DC DD 5D D0 DD A5 58 FF FF FF B8 6F CD 0F E3 BA 58 31 95 C1 52 50 FF 75 D4 FF 75 D0 E8 1A CA FF FF
+
+. 0 804BB68 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804BB6F 13
+. 83 EC 0C 68 81 45 06 08 E8 94 C8 FF FF
+
+fiaddl_8 ... ok
+. 0 804BB7C 5
+. 83 C4 10 EB 44
+
+. 0 804BBC5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F43 5
+. E8 85 8C FE FF
+
+. 0 804BBCD 71
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2C 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 DC C7 FF FF
+
+. 0 804BC14 11
+. 83 C4 10 85 C0 0F 85 F2 00 00 00
+
+. 0 804BC1F 62
+. 9B DD B5 48 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DB F2 9C 8F 45 C4 D9 5D D0 D9 F7 D9 5D C8 DD A5 48 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 A4 C8 FF FF
+
+. 0 804BC5D 7
+. 83 C4 10 85 C0 74 37
+
+. 0 804BC64 17
+. 83 EC 08 B8 2C 52 9A 44 50 FF 75 C8 E8 8C C8 FF FF
+
+. 0 804BC75 7
+. 83 C4 10 85 C0 74 1F
+
+. 0 804BC7C 10
+. 8B 45 C4 83 E0 45 85 C0 75 15
+
+. 0 804BC86 13
+. 83 EC 0C 68 BC 45 06 08 E8 7D C7 FF FF
+
+fcomi_1 ... ok
+. 0 804BC93 8
+. 83 C4 10 E9 86 00 00 00
+
+. 0 804BD21 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F48 5
+. E8 DC 8D FE FF
+
+. 0 804BD29 71
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2A 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 80 C6 FF FF
+
+. 0 804BD70 11
+. 83 C4 10 85 C0 0F 85 F3 00 00 00
+
+. 0 804BD7B 62
+. 9B DD B5 48 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DB F2 9C 8F 45 C4 D9 5D D0 D9 F7 D9 5D C8 DD A5 48 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 48 C7 FF FF
+
+. 0 804BDB9 7
+. 83 C4 10 85 C0 74 38
+
+. 0 804BDC0 17
+. 83 EC 08 B8 2A 52 9A 44 50 FF 75 C8 E8 30 C7 FF FF
+
+. 0 804BDD1 7
+. 83 C4 10 85 C0 74 20
+
+. 0 804BDD8 11
+. 8B 45 C4 83 E0 45 83 F8 01 75 15
+
+. 0 804BDE3 13
+. 83 EC 0C 68 4B 46 06 08 E8 20 C6 FF FF
+
+fcomi_2 ... ok
+. 0 804BDF0 8
+. 83 C4 10 E9 86 00 00 00
+
+. 0 804BE7E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F4D 5
+. E8 34 8F FE FF
+
+. 0 804BE86 71
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2B 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 23 C5 FF FF
+
+. 0 804BECD 11
+. 83 C4 10 85 C0 0F 85 F3 00 00 00
+
+. 0 804BED8 62
+. 9B DD B5 48 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DB F2 9C 8F 45 C4 D9 5D D0 D9 F7 D9 5D C8 DD A5 48 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 EB C5 FF FF
+
+. 0 804BF16 7
+. 83 C4 10 85 C0 74 38
+
+. 0 804BF1D 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 C8 E8 D3 C5 FF FF
+
+. 0 804BF2E 7
+. 83 C4 10 85 C0 74 20
+
+. 0 804BF35 11
+. 8B 45 C4 83 E0 45 83 F8 40 75 15
+
+. 0 804BF40 13
+. 83 EC 0C 68 83 46 06 08 E8 C3 C4 FF FF
+
+fcomi_3 ... ok
+. 0 804BF4D 8
+. 83 C4 10 E9 86 00 00 00
+
+. 0 804BFDB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F52 5
+. E8 8C 90 FE FF
+
+. 0 804BFE3 87
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 5A 5D F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B6 C3 FF FF
+
+. 0 804C03A 11
+. 83 C4 10 85 C0 0F 85 F3 00 00 00
+
+. 0 804C045 68
+. 9B DD B5 48 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DB F2 9C 8F 45 C4 DD 5D D0 D9 F7 DD 5D C8 DD A5 48 FF FF FF B8 5A 5D F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 F9 C4 FF FF
+
+. 0 804C089 7
+. 83 C4 10 85 C0 74 3A
+
+. 0 804C090 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 CC FF 75 C8 E8 DB C4 FF FF
+
+. 0 804C0A7 7
+. 83 C4 10 85 C0 74 1C
+
+. 0 804C0AE 10
+. 8B 45 C4 83 E0 45 85 C0 75 12
+
+. 0 804C0B8 13
+. 83 EC 0C 68 BB 46 06 08 E8 4B C3 FF FF
+
+fcomi_4 ... ok
+. 0 804C0C5 5
+. 83 C4 10 EB 7E
+
+. 0 804C148 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F57 5
+. E8 F4 91 FE FF
+
+. 0 804C150 87
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 FF 59 F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 49 C2 FF FF
+
+. 0 804C1A7 11
+. 83 C4 10 85 C0 0F 85 F4 00 00 00
+
+. 0 804C1B2 68
+. 9B DD B5 48 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DB F2 9C 8F 45 C4 DD 5D D0 D9 F7 DD 5D C8 DD A5 48 FF FF FF B8 FF 59 F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 8C C3 FF FF
+
+. 0 804C1F6 7
+. 83 C4 10 85 C0 74 3B
+
+. 0 804C1FD 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 CC FF 75 C8 E8 6E C3 FF FF
+
+. 0 804C214 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804C21B 11
+. 8B 45 C4 83 E0 45 83 F8 01 75 12
+
+. 0 804C226 13
+. 83 EC 0C 68 1E 47 06 08 E8 DD C1 FF FF
+
+fcomi_5 ... ok
+. 0 804C233 5
+. 83 C4 10 EB 7E
+
+. 0 804C2B6 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F5C 5
+. E8 5D 93 FE FF
+
+. 0 804C2BE 87
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 DB C0 FF FF
+
+. 0 804C315 11
+. 83 C4 10 85 C0 0F 85 F4 00 00 00
+
+. 0 804C320 68
+. 9B DD B5 48 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DB F2 9C 8F 45 C4 DD 5D D0 D9 F7 DD 5D C8 DD A5 48 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 1E C2 FF FF
+
+. 0 804C364 7
+. 83 C4 10 85 C0 74 3B
+
+. 0 804C36B 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 CC FF 75 C8 E8 00 C2 FF FF
+
+. 0 804C382 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804C389 11
+. 8B 45 C4 83 E0 45 83 F8 40 75 12
+
+. 0 804C394 13
+. 83 EC 0C 68 56 47 06 08 E8 6F C0 FF FF
+
+fcomi_6 ... ok
+. 0 804C3A1 5
+. 83 C4 10 EB 7E
+
+. 0 804C424 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F61 5
+. E8 C6 94 FE FF
+
+. 0 804C42C 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2C 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 7D BF FF FF
+
+. 0 804C473 11
+. 83 C4 10 85 C0 0F 85 AE 00 00 00
+
+. 0 804C47E 59
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DF F2 9C 8F 45 CC D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2C 52 9A 44 50 FF 75 D0 E8 48 C0 FF FF
+
+. 0 804C4B9 7
+. 83 C4 10 85 C0 74 1C
+
+. 0 804C4C0 10
+. 8B 45 CC 83 E0 45 85 C0 75 12
+
+. 0 804C4CA 13
+. 83 EC 0C 68 8E 47 06 08 E8 39 BF FF FF
+
+fcomip_1 ... ok
+. 0 804C4D7 5
+. 83 C4 10 EB 60
+
+. 0 804C53C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F66 5
+. E8 D9 95 FE FF
+
+. 0 804C544 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2A 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 65 BE FF FF
+
+. 0 804C58B 11
+. 83 C4 10 85 C0 0F 85 AF 00 00 00
+
+. 0 804C596 59
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DF F2 9C 8F 45 CC D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2A 52 9A 44 50 FF 75 D0 E8 30 BF FF FF
+
+. 0 804C5D1 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804C5D8 11
+. 8B 45 CC 83 E0 45 83 F8 01 75 12
+
+. 0 804C5E3 13
+. 83 EC 0C 68 C9 47 06 08 E8 20 BE FF FF
+
+fcomip_2 ... ok
+. 0 804C5F0 5
+. 83 C4 10 EB 60
+
+. 0 804C655 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F6B 5
+. E8 ED 96 FE FF
+
+. 0 804C65D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2B 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 4C BD FF FF
+
+. 0 804C6A4 11
+. 83 C4 10 85 C0 0F 85 AF 00 00 00
+
+. 0 804C6AF 59
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DF F2 9C 8F 45 CC D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 17 BE FF FF
+
+. 0 804C6EA 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804C6F1 11
+. 8B 45 CC 83 E0 45 83 F8 40 75 12
+
+. 0 804C6FC 13
+. 83 EC 0C 68 04 48 06 08 E8 07 BD FF FF
+
+fcomip_3 ... ok
+. 0 804C709 5
+. 83 C4 10 EB 60
+
+. 0 804C76E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F70 5
+. E8 01 98 FE FF
+
+. 0 804C776 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 5A 5D F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 23 BC FF FF
+
+. 0 804C7CD 11
+. 83 C4 10 85 C0 0F 85 B0 00 00 00
+
+. 0 804C7D8 65
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DF F2 9C 8F 45 CC D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 69 BD FF FF
+
+. 0 804C819 7
+. 83 C4 10 85 C0 74 1C
+
+. 0 804C820 10
+. 8B 45 CC 83 E0 45 85 C0 75 12
+
+. 0 804C82A 13
+. 83 EC 0C 68 3F 48 06 08 E8 D9 BB FF FF
+
+fcomip_4 ... ok
+. 0 804C837 5
+. 83 C4 10 EB 5C
+
+. 0 804C898 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F75 5
+. E8 26 99 FE FF
+
+. 0 804C8A0 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 FF 59 F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 F9 BA FF FF
+
+. 0 804C8F7 11
+. 83 C4 10 85 C0 0F 85 B1 00 00 00
+
+. 0 804C902 65
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DF F2 9C 8F 45 CC D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 3F BC FF FF
+
+. 0 804C943 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804C94A 11
+. 8B 45 CC 83 E0 45 83 F8 01 75 12
+
+. 0 804C955 13
+. 83 EC 0C 68 7A 48 06 08 E8 AE BA FF FF
+
+fcomip_5 ... ok
+. 0 804C962 5
+. 83 C4 10 EB 5C
+
+. 0 804C9C3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F7A 5
+. E8 4C 9A FE FF
+
+. 0 804C9CB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 CE B9 FF FF
+
+. 0 804CA22 11
+. 83 C4 10 85 C0 0F 85 B1 00 00 00
+
+. 0 804CA2D 65
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DF F2 9C 8F 45 CC D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 14 BB FF FF
+
+. 0 804CA6E 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804CA75 11
+. 8B 45 CC 83 E0 45 83 F8 40 75 12
+
+. 0 804CA80 13
+. 83 EC 0C 68 B5 48 06 08 E8 83 B9 FF FF
+
+fcomip_6 ... ok
+. 0 804CA8D 5
+. 83 C4 10 EB 5C
+
+. 0 804CAEE 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F7F 5
+. E8 72 9B FE FF
+
+. 0 804CAF6 71
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2C 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 B3 B8 FF FF
+
+. 0 804CB3D 11
+. 83 C4 10 85 C0 0F 85 F2 00 00 00
+
+. 0 804CB48 62
+. 9B DD B5 48 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DB EA 9C 8F 45 C4 D9 5D D0 D9 F7 D9 5D C8 DD A5 48 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 7B B9 FF FF
+
+. 0 804CB86 7
+. 83 C4 10 85 C0 74 37
+
+. 0 804CB8D 17
+. 83 EC 08 B8 2C 52 9A 44 50 FF 75 C8 E8 63 B9 FF FF
+
+. 0 804CB9E 7
+. 83 C4 10 85 C0 74 1F
+
+. 0 804CBA5 10
+. 8B 45 C4 83 E0 45 85 C0 75 15
+
+. 0 804CBAF 13
+. 83 EC 0C 68 F0 48 06 08 E8 54 B8 FF FF
+
+fucomi_1 ... ok
+. 0 804CBBC 8
+. 83 C4 10 E9 86 00 00 00
+
+. 0 804CC4A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F84 5
+. E8 C9 9C FE FF
+
+. 0 804CC52 71
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2A 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 57 B7 FF FF
+
+. 0 804CC99 11
+. 83 C4 10 85 C0 0F 85 F3 00 00 00
+
+. 0 804CCA4 62
+. 9B DD B5 48 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DB EA 9C 8F 45 C4 D9 5D D0 D9 F7 D9 5D C8 DD A5 48 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 1F B8 FF FF
+
+. 0 804CCE2 7
+. 83 C4 10 85 C0 74 38
+
+. 0 804CCE9 17
+. 83 EC 08 B8 2A 52 9A 44 50 FF 75 C8 E8 07 B8 FF FF
+
+. 0 804CCFA 7
+. 83 C4 10 85 C0 74 20
+
+. 0 804CD01 11
+. 8B 45 C4 83 E0 45 83 F8 01 75 15
+
+. 0 804CD0C 13
+. 83 EC 0C 68 2B 49 06 08 E8 F7 B6 FF FF
+
+fucomi_2 ... ok
+. 0 804CD19 8
+. 83 C4 10 E9 86 00 00 00
+
+. 0 804CDA7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F89 5
+. E8 21 9E FE FF
+
+. 0 804CDAF 71
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2B 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 FA B5 FF FF
+
+. 0 804CDF6 11
+. 83 C4 10 85 C0 0F 85 F3 00 00 00
+
+. 0 804CE01 62
+. 9B DD B5 48 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DB EA 9C 8F 45 C4 D9 5D D0 D9 F7 D9 5D C8 DD A5 48 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 C2 B6 FF FF
+
+. 0 804CE3F 7
+. 83 C4 10 85 C0 74 38
+
+. 0 804CE46 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 C8 E8 AA B6 FF FF
+
+. 0 804CE57 7
+. 83 C4 10 85 C0 74 20
+
+. 0 804CE5E 11
+. 8B 45 C4 83 E0 45 83 F8 40 75 15
+
+. 0 804CE69 13
+. 83 EC 0C 68 66 49 06 08 E8 9A B5 FF FF
+
+fucomi_3 ... ok
+. 0 804CE76 8
+. 83 C4 10 E9 86 00 00 00
+
+. 0 804CF04 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F8E 5
+. E8 79 9F FE FF
+
+. 0 804CF0C 87
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 5A 5D F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 8D B4 FF FF
+
+. 0 804CF63 11
+. 83 C4 10 85 C0 0F 85 F3 00 00 00
+
+. 0 804CF6E 68
+. 9B DD B5 48 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DB EA 9C 8F 45 C4 DD 5D D0 D9 F7 DD 5D C8 DD A5 48 FF FF FF B8 5A 5D F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 D0 B5 FF FF
+
+. 0 804CFB2 7
+. 83 C4 10 85 C0 74 3A
+
+. 0 804CFB9 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 CC FF 75 C8 E8 B2 B5 FF FF
+
+. 0 804CFD0 7
+. 83 C4 10 85 C0 74 1C
+
+. 0 804CFD7 10
+. 8B 45 C4 83 E0 45 85 C0 75 12
+
+. 0 804CFE1 13
+. 83 EC 0C 68 A1 49 06 08 E8 22 B4 FF FF
+
+fucomi_4 ... ok
+. 0 804CFEE 5
+. 83 C4 10 EB 7E
+
+. 0 804D071 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F93 5
+. E8 E1 A0 FE FF
+
+. 0 804D079 87
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 FF 59 F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 20 B3 FF FF
+
+. 0 804D0D0 11
+. 83 C4 10 85 C0 0F 85 F4 00 00 00
+
+. 0 804D0DB 68
+. 9B DD B5 48 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DB EA 9C 8F 45 C4 DD 5D D0 D9 F7 DD 5D C8 DD A5 48 FF FF FF B8 FF 59 F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 63 B4 FF FF
+
+. 0 804D11F 7
+. 83 C4 10 85 C0 74 3B
+
+. 0 804D126 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 CC FF 75 C8 E8 45 B4 FF FF
+
+. 0 804D13D 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804D144 11
+. 8B 45 C4 83 E0 45 83 F8 01 75 12
+
+. 0 804D14F 13
+. 83 EC 0C 68 DC 49 06 08 E8 B4 B2 FF FF
+
+fucomi_5 ... ok
+. 0 804D15C 5
+. 83 C4 10 EB 7E
+
+. 0 804D1DF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F98 5
+. E8 4A A2 FE FF
+
+. 0 804D1E7 87
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 B1 FF FF
+
+. 0 804D23E 11
+. 83 C4 10 85 C0 0F 85 F4 00 00 00
+
+. 0 804D249 68
+. 9B DD B5 48 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DB EA 9C 8F 45 C4 DD 5D D0 D9 F7 DD 5D C8 DD A5 48 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 F5 B2 FF FF
+
+. 0 804D28D 7
+. 83 C4 10 85 C0 74 3B
+
+. 0 804D294 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 CC FF 75 C8 E8 D7 B2 FF FF
+
+. 0 804D2AB 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804D2B2 11
+. 8B 45 C4 83 E0 45 83 F8 40 75 12
+
+. 0 804D2BD 13
+. 83 EC 0C 68 17 4A 06 08 E8 46 B1 FF FF
+
+fucomi_6 ... ok
+. 0 804D2CA 5
+. 83 C4 10 EB 7E
+
+. 0 804D34D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062F9D 5
+. E8 B3 A3 FE FF
+
+. 0 804D355 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2C 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 54 B0 FF FF
+
+. 0 804D39C 11
+. 83 C4 10 85 C0 0F 85 AE 00 00 00
+
+. 0 804D3A7 59
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DF EA 9C 8F 45 CC D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2C 52 9A 44 50 FF 75 D0 E8 1F B1 FF FF
+
+. 0 804D3E2 7
+. 83 C4 10 85 C0 74 1C
+
+. 0 804D3E9 10
+. 8B 45 CC 83 E0 45 85 C0 75 12
+
+. 0 804D3F3 13
+. 83 EC 0C 68 52 4A 06 08 E8 10 B0 FF FF
+
+fucomip_1 ... ok
+. 0 804D400 5
+. 83 C4 10 EB 60
+
+. 0 804D465 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FA2 5
+. E8 C6 A4 FE FF
+
+. 0 804D46D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2A 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 3C AF FF FF
+
+. 0 804D4B4 11
+. 83 C4 10 85 C0 0F 85 AF 00 00 00
+
+. 0 804D4BF 59
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DF EA 9C 8F 45 CC D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2A 52 9A 44 50 FF 75 D0 E8 07 B0 FF FF
+
+. 0 804D4FA 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804D501 11
+. 8B 45 CC 83 E0 45 83 F8 01 75 12
+
+. 0 804D50C 13
+. 83 EC 0C 68 90 4A 06 08 E8 F7 AE FF FF
+
+fucomip_2 ... ok
+. 0 804D519 5
+. 83 C4 10 EB 60
+
+. 0 804D57E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FA7 5
+. E8 DA A5 FE FF
+
+. 0 804D586 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 2B 52 9A 44 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 23 AE FF FF
+
+. 0 804D5CD 11
+. 83 C4 10 85 C0 0F 85 AF 00 00 00
+
+. 0 804D5D8 59
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DF EA 9C 8F 45 CC D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 EE AE FF FF
+
+. 0 804D613 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804D61A 11
+. 8B 45 CC 83 E0 45 83 F8 40 75 12
+
+. 0 804D625 13
+. 83 EC 0C 68 CE 4A 06 08 E8 DE AD FF FF
+
+fucomip_3 ... ok
+. 0 804D632 5
+. 83 C4 10 EB 60
+
+. 0 804D697 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FAC 5
+. E8 EE A6 FE FF
+
+. 0 804D69F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 5A 5D F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 FA AC FF FF
+
+. 0 804D6F6 11
+. 83 C4 10 85 C0 0F 85 B0 00 00 00
+
+. 0 804D701 65
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 45 9D DF EA 9C 8F 45 CC D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 40 AE FF FF
+
+. 0 804D742 7
+. 83 C4 10 85 C0 74 1C
+
+. 0 804D749 10
+. 8B 45 CC 83 E0 45 85 C0 75 12
+
+. 0 804D753 13
+. 83 EC 0C 68 0C 4B 06 08 E8 B0 AC FF FF
+
+fucomip_4 ... ok
+. 0 804D760 5
+. 83 C4 10 EB 5C
+
+. 0 804D7C1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FB1 5
+. E8 13 A8 FE FF
+
+. 0 804D7C9 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 FF 59 F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 D0 AB FF FF
+
+. 0 804D820 11
+. 83 C4 10 85 C0 0F 85 B1 00 00 00
+
+. 0 804D82B 65
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 44 9D DF EA 9C 8F 45 CC D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 16 AD FF FF
+
+. 0 804D86C 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804D873 11
+. 8B 45 CC 83 E0 45 83 F8 01 75 12
+
+. 0 804D87E 13
+. 83 EC 0C 68 4A 4B 06 08 E8 85 AB FF FF
+
+fucomip_5 ... ok
+. 0 804D88B 5
+. 83 C4 10 EB 5C
+
+. 0 804D8EC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FB6 5
+. E8 39 A9 FE FF
+
+. 0 804D8F4 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A5 AA FF FF
+
+. 0 804D94B 11
+. 83 C4 10 85 C0 0F 85 B1 00 00 00
+
+. 0 804D956 65
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 9C 83 24 24 BA 83 0C 24 05 9D DF EA 9C 8F 45 CC D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 EB AB FF FF
+
+. 0 804D997 7
+. 83 C4 10 85 C0 74 1D
+
+. 0 804D99E 11
+. 8B 45 CC 83 E0 45 83 F8 40 75 12
+
+. 0 804D9A9 13
+. 83 EC 0C 68 88 4B 06 08 E8 5A AA FF FF
+
+fucomip_6 ... ok
+. 0 804D9B6 5
+. 83 C4 10 EB 5C
+
+. 0 804DA17 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FBB 5
+. E8 5F AA FE FF
+
+. 0 804DA1F 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 A0 A9 FF FF
+
+. 0 804DA50 7
+. 83 C4 10 85 C0 75 77
+
+. 0 804DA57 38
+. 9B DD B5 68 FF FF FF D9 45 E0 D9 E0 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 2B 52 9A C4 50 FF 75 D8 E8 84 AA FF FF
+
+. 0 804DA7D 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804DA84 13
+. 83 EC 0C 68 C6 4B 06 08 E8 7F A9 FF FF
+
+fchs_1 ... ok
+. 0 804DA91 5
+. 83 C4 10 EB 48
+
+. 0 804DADE 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FC0 5
+. E8 21 AB FE FF
+
+. 0 804DAE6 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 D9 A8 FF FF
+
+. 0 804DB17 7
+. 83 C4 10 85 C0 75 77
+
+. 0 804DB1E 38
+. 9B DD B5 68 FF FF FF D9 45 E0 D9 E0 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D8 E8 BD A9 FF FF
+
+. 0 804DB44 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804DB4B 13
+. 83 EC 0C 68 FB 4B 06 08 E8 B8 A8 FF FF
+
+fchs_2 ... ok
+. 0 804DB58 5
+. 83 C4 10 EB 48
+
+. 0 804DBA5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FC5 5
+. E8 E3 AB FE FF
+
+. 0 804DBAD 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 59 A4 0C DC BA 29 8C 67 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 0A A8 FF FF
+
+. 0 804DBE6 7
+. 83 C4 10 85 C0 75 79
+
+. 0 804DBED 44
+. 9B DD B5 68 FF FF FF DD 45 E0 D9 E0 DD 5D D8 DD A5 68 FF FF FF B8 59 A4 0C DC BA 29 8C 67 C1 52 50 FF 75 DC FF 75 D8 E8 69 A9 FF FF
+
+. 0 804DC19 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804DC20 13
+. 83 EC 0C 68 30 4C 06 08 E8 E3 A7 FF FF
+
+fchs_3 ... ok
+. 0 804DC2D 5
+. 83 C4 10 EB 44
+
+. 0 804DC76 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FCA 5
+. E8 AF AC FE FF
+
+. 0 804DC7E 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 59 A4 0C DC BA 29 8C 67 C1 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 39 A7 FF FF
+
+. 0 804DCB7 7
+. 83 C4 10 85 C0 75 79
+
+. 0 804DCBE 44
+. 9B DD B5 68 FF FF FF DD 45 E0 D9 E0 DD 5D D8 DD A5 68 FF FF FF B8 59 A4 0C DC BA 29 8C 67 41 52 50 FF 75 DC FF 75 D8 E8 98 A8 FF FF
+
+. 0 804DCEA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804DCF1 13
+. 83 EC 0C 68 65 4C 06 08 E8 12 A7 FF FF
+
+fchs_4 ... ok
+. 0 804DCFE 5
+. 83 C4 10 EB 44
+
+. 0 804DD47 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FCF 5
+. E8 7B AD FE FF
+
+. 0 804DD4F 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 61 A6 FF FF
+
+. 0 804DD8F 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804DD96 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 44 A7 FF FF
+
+. 0 804DDBD 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804DDC4 13
+. 83 EC 0C 68 9A 4C 06 08 E8 3F A6 FF FF
+
+fdivs_1 ... ok
+. 0 804DDD1 5
+. 83 C4 10 EB 48
+
+. 0 804DE1E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FD4 5
+. E8 4D AE FE FF
+
+. 0 804DE26 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 8A A5 FF FF
+
+. 0 804DE66 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804DE6D 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 6D A6 FF FF
+
+. 0 804DE94 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804DE9B 13
+. 83 EC 0C 68 D2 4C 06 08 E8 68 A5 FF FF
+
+fdivs_2 ... ok
+. 0 804DEA8 5
+. 83 C4 10 EB 48
+
+. 0 804DEF5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FD9 5
+. E8 1F AF FE FF
+
+. 0 804DEFD 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B3 A4 FF FF
+
+. 0 804DF3D 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804DF44 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 96 A5 FF FF
+
+. 0 804DF6B 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804DF72 13
+. 83 EC 0C 68 0A 4D 06 08 E8 91 A4 FF FF
+
+fdivs_3 ... ok
+. 0 804DF7F 5
+. 83 C4 10 EB 48
+
+. 0 804DFCC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FDE 5
+. E8 F1 AF FE FF
+
+. 0 804DFD4 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 DC A3 FF FF
+
+. 0 804E014 7
+. 83 C4 10 85 C0 75 78
+
+. 0 804E01B 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 BF A4 FF FF
+
+. 0 804E042 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E049 13
+. 83 EC 0C 68 42 4D 06 08 E8 BA A3 FF FF
+
+fdivs_4 ... ok
+. 0 804E056 5
+. 83 C4 10 EB 48
+
+. 0 804E0A3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FE3 5
+. E8 C3 B0 FE FF
+
+. 0 804E0AB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 EE A2 FF FF
+
+. 0 804E102 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804E109 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 75 D8 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 4C A4 FF FF
+
+. 0 80485BB 84
+. DD 45 F8 DC 65 F0 DD 5D D0 8B 45 D0 8B 55 D4 81 E2 FF FF FF 7F 89 45 E0 89 55 E4 8B 45 F8 8B 55 FC 89 45 D0 89 55 D4 DD 45 D0 D9 E1 DD 05 20 38 06 08 DE C9 DD 5D D8 B8 00 00 00 00 BA 00 00 28 C0 52 50 B8 00 00 00 00 BA 00 00 00 40 52 50 E8 D1 FD FF FF
+
+. 0 804860F 20
+. 83 C4 10 DC 4D D8 DD 45 E0 D9 C9 DA E9 DF E0 F6 C4 45 74 02
+
+. 0 804E136 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E13D 13
+. 83 EC 0C 68 7A 4D 06 08 E8 C6 A2 FF FF
+
+fdivl_1 ... ok
+. 0 804E14A 5
+. 83 C4 10 EB 44
+
+. 0 804E193 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FE8 5
+. E8 AE B1 FE FF
+
+. 0 804E19B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 FE A1 FF FF
+
+. 0 804E1F2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804E1F9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 75 D8 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 5C A3 FF FF
+
+. 0 804E226 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E22D 13
+. 83 EC 0C 68 B2 4D 06 08 E8 D6 A1 FF FF
+
+fdivl_2 ... ok
+. 0 804E23A 5
+. 83 C4 10 EB 44
+
+. 0 804E283 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FED 5
+. E8 99 B2 FE FF
+
+. 0 804E28B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 0E A1 FF FF
+
+. 0 804E2E2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804E2E9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 75 D8 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 6C A2 FF FF
+
+. 0 804E316 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E31D 13
+. 83 EC 0C 68 EA 4D 06 08 E8 E6 A0 FF FF
+
+fdivl_3 ... ok
+. 0 804E32A 5
+. 83 C4 10 EB 44
+
+. 0 804E373 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FF2 5
+. E8 84 B3 FE FF
+
+. 0 804E37B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1E A0 FF FF
+
+. 0 804E3D2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804E3D9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 75 D8 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 7C A1 FF FF
+
+. 0 804E406 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E40D 13
+. 83 EC 0C 68 22 4E 06 08 E8 F6 9F FF FF
+
+fdivl_4 ... ok
+. 0 804E41A 5
+. 83 C4 10 EB 44
+
+. 0 804E463 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FF7 5
+. E8 6F B4 FE FF
+
+. 0 804E46B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 3E 9F FF FF
+
+. 0 804E4B2 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804E4BD 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC F2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 15 A0 FF FF
+
+. 0 804E4EC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E4F3 13
+. 83 EC 0C 68 5A 4E 06 08 E8 10 9F FF FF
+
+fdiv_1 ... ok
+. 0 804E500 5
+. 83 C4 10 EB 48
+
+. 0 804E54D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8062FFC 5
+. E8 54 B5 FE FF
+
+. 0 804E555 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 54 9E FF FF
+
+. 0 804E59C 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804E5A7 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC F2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 2B 9F FF FF
+
+. 0 804E5D6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E5DD 13
+. 83 EC 0C 68 8F 4E 06 08 E8 26 9E FF FF
+
+fdiv_2 ... ok
+. 0 804E5EA 5
+. 83 C4 10 EB 48
+
+. 0 804E637 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063001 5
+. E8 39 B6 FE FF
+
+. 0 804E63F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 6A 9D FF FF
+
+. 0 804E686 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804E691 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC F2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 41 9E FF FF
+
+. 0 804E6C0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E6C7 13
+. 83 EC 0C 68 C4 4E 06 08 E8 3C 9D FF FF
+
+fdiv_3 ... ok
+. 0 804E6D4 5
+. 83 C4 10 EB 48
+
+. 0 804E721 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063006 5
+. E8 1E B7 FE FF
+
+. 0 804E729 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 80 9C FF FF
+
+. 0 804E770 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804E77B 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC F2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 57 9D FF FF
+
+. 0 804E7AA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E7B1 13
+. 83 EC 0C 68 F9 4E 06 08 E8 52 9C FF FF
+
+fdiv_4 ... ok
+. 0 804E7BE 5
+. 83 C4 10 EB 48
+
+. 0 804E80B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806300B 5
+. E8 03 B8 FE FF
+
+. 0 804E813 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 86 9B FF FF
+
+. 0 804E86A 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804E875 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC F2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 D8 9C FF FF
+
+. 0 804E8AA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E8B1 13
+. 83 EC 0C 68 2E 4F 06 08 E8 52 9B FF FF
+
+fdiv_5 ... ok
+. 0 804E8BE 5
+. 83 C4 10 EB 44
+
+. 0 804E907 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063010 5
+. E8 FA B8 FE FF
+
+. 0 804E90F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 8A 9A FF FF
+
+. 0 804E966 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804E971 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC F2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 DC 9B FF FF
+
+. 0 804E9A6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E9AD 13
+. 83 EC 0C 68 63 4F 06 08 E8 56 9A FF FF
+
+fdiv_6 ... ok
+. 0 804E9BA 5
+. 83 C4 10 EB 44
+
+. 0 804EA03 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063015 5
+. E8 F1 B9 FE FF
+
+. 0 804EA0B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 8E 99 FF FF
+
+. 0 804EA62 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804EA6D 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC F2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 E0 9A FF FF
+
+. 0 804EAA2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804EAA9 13
+. 83 EC 0C 68 98 4F 06 08 E8 5A 99 FF FF
+
+fdiv_7 ... ok
+. 0 804EAB6 5
+. 83 C4 10 EB 44
+
+. 0 804EAFF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806301A 5
+. E8 E8 BA FE FF
+
+. 0 804EB07 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 92 98 FF FF
+
+. 0 804EB5E 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804EB69 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC F2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 E4 99 FF FF
+
+. 0 804EB9E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804EBA5 13
+. 83 EC 0C 68 CD 4F 06 08 E8 5E 98 FF FF
+
+fdiv_8 ... ok
+. 0 804EBB2 5
+. 83 C4 10 EB 44
+
+. 0 804EBFB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806301F 5
+. E8 DF BB FE FF
+
+. 0 804EC03 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 A6 97 FF FF
+
+. 0 804EC4A 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804EC51 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 F2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 85 98 FF FF
+
+. 0 804EC7C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804EC83 13
+. 83 EC 0C 68 02 50 06 08 E8 80 97 FF FF
+
+fdiv_9 ... ok
+. 0 804EC90 5
+. 83 C4 10 EB 48
+
+. 0 804ECDD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063024 5
+. E8 BC BC FE FF
+
+. 0 804ECE5 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 C4 96 FF FF
+
+. 0 804ED2C 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804ED33 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 F2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 A3 97 FF FF
+
+. 0 804ED5E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804ED65 13
+. 83 EC 0C 68 37 50 06 08 E8 9E 96 FF FF
+
+fdiv_10 ... ok
+. 0 804ED72 5
+. 83 C4 10 EB 48
+
+. 0 804EDBF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063029 5
+. E8 99 BD FE FF
+
+. 0 804EDC7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 E2 95 FF FF
+
+. 0 804EE0E 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804EE15 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 F2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 C1 96 FF FF
+
+. 0 804EE40 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804EE47 13
+. 83 EC 0C 68 6F 50 06 08 E8 BC 95 FF FF
+
+fdiv_11 ... ok
+. 0 804EE54 5
+. 83 C4 10 EB 48
+
+. 0 804EEA1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806302E 5
+. E8 76 BE FE FF
+
+. 0 804EEA9 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 00 95 FF FF
+
+. 0 804EEF0 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804EEF7 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 F2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 DF 95 FF FF
+
+. 0 804EF22 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804EF29 13
+. 83 EC 0C 68 A7 50 06 08 E8 DA 94 FF FF
+
+fdiv_12 ... ok
+. 0 804EF36 5
+. 83 C4 10 EB 48
+
+. 0 804EF83 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063033 5
+. E8 53 BF FE FF
+
+. 0 804EF8B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 0E 94 FF FF
+
+. 0 804EFE2 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804EFE9 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 F2 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 68 95 FF FF
+
+. 0 804F01A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F021 13
+. 83 EC 0C 68 DF 50 06 08 E8 E2 93 FF FF
+
+fdiv_13 ... ok
+. 0 804F02E 5
+. 83 C4 10 EB 44
+
+. 0 804F077 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063038 5
+. E8 42 C0 FE FF
+
+. 0 804F07F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1A 93 FF FF
+
+. 0 804F0D6 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804F0DD 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 F2 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 74 94 FF FF
+
+. 0 804F10E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F115 13
+. 83 EC 0C 68 17 51 06 08 E8 EE 92 FF FF
+
+fdiv_14 ... ok
+. 0 804F122 5
+. 83 C4 10 EB 44
+
+. 0 804F16B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806303D 5
+. E8 31 C1 FE FF
+
+. 0 804F173 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 26 92 FF FF
+
+. 0 804F1CA 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804F1D1 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 F2 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 80 93 FF FF
+
+. 0 804F202 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F209 13
+. 83 EC 0C 68 4F 51 06 08 E8 FA 91 FF FF
+
+fdiv_15 ... ok
+. 0 804F216 5
+. 83 C4 10 EB 44
+
+. 0 804F25F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063042 5
+. E8 20 C2 FE FF
+
+. 0 804F267 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 32 91 FF FF
+
+. 0 804F2BE 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804F2C5 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 F2 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 8C 92 FF FF
+
+. 0 804F2F6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F2FD 13
+. 83 EC 0C 68 87 51 06 08 E8 06 91 FF FF
+
+fdiv_16 ... ok
+. 0 804F30A 5
+. 83 C4 10 EB 44
+
+. 0 804F353 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063047 5
+. E8 0F C3 FE FF
+
+. 0 804F35B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 4E 90 FF FF
+
+. 0 804F3A2 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804F3A9 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE F2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 2B 91 FF FF
+
+. 0 804F3D6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F3DD 13
+. 83 EC 0C 68 BF 51 06 08 E8 26 90 FF FF
+
+fdivp_1 ... ok
+. 0 804F3EA 5
+. 83 C4 10 EB 48
+
+. 0 804F437 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806304C 5
+. E8 EE C3 FE FF
+
+. 0 804F43F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 6A 8F FF FF
+
+. 0 804F486 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804F48D 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE F2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 47 90 FF FF
+
+. 0 804F4BA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F4C1 13
+. 83 EC 0C 68 F7 51 06 08 E8 42 8F FF FF
+
+fdivp_2 ... ok
+. 0 804F4CE 5
+. 83 C4 10 EB 48
+
+. 0 804F51B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063051 5
+. E8 CD C4 FE FF
+
+. 0 804F523 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 86 8E FF FF
+
+. 0 804F56A 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804F571 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE F2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 63 8F FF FF
+
+. 0 804F59E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F5A5 13
+. 83 EC 0C 68 2F 52 06 08 E8 5E 8E FF FF
+
+fdivp_3 ... ok
+. 0 804F5B2 5
+. 83 C4 10 EB 48
+
+. 0 804F5FF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063056 5
+. E8 AC C5 FE FF
+
+. 0 804F607 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 A2 8D FF FF
+
+. 0 804F64E 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 804F655 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE F2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 7F 8E FF FF
+
+. 0 804F682 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F689 13
+. 83 EC 0C 68 67 52 06 08 E8 7A 8D FF FF
+
+fdivp_4 ... ok
+. 0 804F696 5
+. 83 C4 10 EB 48
+
+. 0 804F6E3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806305B 5
+. E8 8B C6 FE FF
+
+. 0 804F6EB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 AE 8C FF FF
+
+. 0 804F742 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804F74D 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE F2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 02 8E FF FF
+
+. 0 804F780 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F787 13
+. 83 EC 0C 68 9F 52 06 08 E8 7C 8C FF FF
+
+fdivp_5 ... ok
+. 0 804F794 5
+. 83 C4 10 EB 44
+
+. 0 804F7DD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063060 5
+. E8 80 C7 FE FF
+
+. 0 804F7E5 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B4 8B FF FF
+
+. 0 804F83C 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804F847 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE F2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 08 8D FF FF
+
+. 0 804F87A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F881 13
+. 83 EC 0C 68 D7 52 06 08 E8 82 8B FF FF
+
+fdivp_6 ... ok
+. 0 804F88E 5
+. 83 C4 10 EB 44
+
+. 0 804F8D7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063065 5
+. E8 75 C8 FE FF
+
+. 0 804F8DF 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 BA 8A FF FF
+
+. 0 804F936 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804F941 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE F2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 0E 8C FF FF
+
+. 0 804F974 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F97B 13
+. 83 EC 0C 68 0F 53 06 08 E8 88 8A FF FF
+
+fdivp_7 ... ok
+. 0 804F988 5
+. 83 C4 10 EB 44
+
+. 0 804F9D1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806306A 5
+. E8 6A C9 FE FF
+
+. 0 804F9D9 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 C0 89 FF FF
+
+. 0 804FA30 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 804FA3B 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE F2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 14 8B FF FF
+
+. 0 804FA6E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804FA75 13
+. 83 EC 0C 68 47 53 06 08 E8 8E 89 FF FF
+
+fdivp_8 ... ok
+. 0 804FA82 5
+. 83 C4 10 EB 44
+
+. 0 804FACB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806306F 5
+. E8 5F CA FE FF
+
+. 0 804FAD3 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 D6 88 FF FF
+
+. 0 804FB1A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804FB21 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 B7 89 FF FF
+
+. 0 804FB4A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804FB51 13
+. 83 EC 0C 68 7F 53 06 08 E8 B2 88 FF FF
+
+fdivp_9 ... ok
+. 0 804FB5E 5
+. 83 C4 10 EB 48
+
+. 0 804FBAB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063074 5
+. E8 3A CB FE FF
+
+. 0 804FBB3 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 F6 87 FF FF
+
+. 0 804FBFA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804FC01 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 D7 88 FF FF
+
+. 0 804FC2A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804FC31 13
+. 83 EC 0C 68 B7 53 06 08 E8 D2 87 FF FF
+
+fdivp_10 ... ok
+. 0 804FC3E 5
+. 83 C4 10 EB 48
+
+. 0 804FC8B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063079 5
+. E8 15 CC FE FF
+
+. 0 804FC93 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 16 87 FF FF
+
+. 0 804FCDA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804FCE1 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 F7 87 FF FF
+
+. 0 804FD0A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804FD11 13
+. 83 EC 0C 68 F2 53 06 08 E8 F2 86 FF FF
+
+fdivp_11 ... ok
+. 0 804FD1E 5
+. 83 C4 10 EB 48
+
+. 0 804FD6B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806307E 5
+. E8 F0 CC FE FF
+
+. 0 804FD73 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 36 86 FF FF
+
+. 0 804FDBA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804FDC1 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 17 87 FF FF
+
+. 0 804FDEA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804FDF1 13
+. 83 EC 0C 68 2D 54 06 08 E8 12 86 FF FF
+
+fdivp_12 ... ok
+. 0 804FDFE 5
+. 83 C4 10 EB 48
+
+. 0 804FE4B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063083 5
+. E8 CB CD FE FF
+
+. 0 804FE53 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 46 85 FF FF
+
+. 0 804FEAA 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804FEB1 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F1 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 A2 86 FF FF
+
+. 0 804FEE0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804FEE7 13
+. 83 EC 0C 68 68 54 06 08 E8 1C 85 FF FF
+
+fdivp_13 ... ok
+. 0 804FEF4 5
+. 83 C4 10 EB 44
+
+. 0 804FF3D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063088 5
+. E8 B8 CE FE FF
+
+. 0 804FF45 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 54 84 FF FF
+
+. 0 804FF9C 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804FFA3 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F1 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 B0 85 FF FF
+
+. 0 804FFD2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804FFD9 13
+. 83 EC 0C 68 A3 54 06 08 E8 2A 84 FF FF
+
+fdivp_14 ... ok
+. 0 804FFE6 5
+. 83 C4 10 EB 44
+
+. 0 805002F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806308D 5
+. E8 A5 CF FE FF
+
+. 0 8050037 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 62 83 FF FF
+
+. 0 805008E 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8050095 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F1 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 BE 84 FF FF
+
+. 0 80500C4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80500CB 13
+. 83 EC 0C 68 DE 54 06 08 E8 38 83 FF FF
+
+fdivp_15 ... ok
+. 0 80500D8 5
+. 83 C4 10 EB 44
+
+. 0 8050121 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063092 5
+. E8 92 D0 FE FF
+
+. 0 8050129 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 70 82 FF FF
+
+. 0 8050180 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8050187 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F1 DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 CC 83 FF FF
+
+. 0 80501B6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80501BD 13
+. 83 EC 0C 68 19 55 06 08 E8 46 82 FF FF
+
+fdivp_16 ... ok
+. 0 80501CA 5
+. 83 C4 10 EB 44
+
+. 0 8050213 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063097 5
+. E8 7F D1 FE FF
+
+. 0 805021B 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 98 81 FF FF
+
+. 0 8050258 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805025F 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 75 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 49 92 3E 50 FF 75 D0 E8 7B 82 FF FF
+
+. 0 8050286 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805028D 13
+. 83 EC 0C 68 54 55 06 08 E8 76 81 FF FF
+
+fidivs_1 ... ok
+. 0 805029A 5
+. 83 C4 10 EB 48
+
+. 0 80502E7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806309C 5
+. E8 4E D2 FE FF
+
+. 0 80502EF 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 C4 80 FF FF
+
+. 0 805032C 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8050333 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 75 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 49 92 BE 50 FF 75 D0 E8 A7 81 FF FF
+
+. 0 805035A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050361 13
+. 83 EC 0C 68 8F 55 06 08 E8 A2 80 FF FF
+
+fidivs_2 ... ok
+. 0 805036E 5
+. 83 C4 10 EB 48
+
+. 0 80503BB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630A1 5
+. E8 1D D3 FE FF
+
+. 0 80503C3 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 F0 7F FF FF
+
+. 0 8050400 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8050407 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 75 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 49 92 BE 50 FF 75 D0 E8 D3 80 FF FF
+
+. 0 805042E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050435 13
+. 83 EC 0C 68 CA 55 06 08 E8 CE 7F FF FF
+
+fidivs_3 ... ok
+. 0 8050442 5
+. 83 C4 10 EB 48
+
+. 0 805048F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630A6 5
+. E8 EC D3 FE FF
+
+. 0 8050497 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 1C 7F FF FF
+
+. 0 80504D4 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80504DB 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 75 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 49 92 3E 50 FF 75 D0 E8 FF 7F FF FF
+
+. 0 8050502 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050509 13
+. 83 EC 0C 68 05 56 06 08 E8 FA 7E FF FF
+
+fidivs_4 ... ok
+. 0 8050516 5
+. 83 C4 10 EB 48
+
+. 0 8050563 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630AB 5
+. E8 BB D4 FE FF
+
+. 0 805056B 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 40 7E FF FF
+
+. 0 80505B0 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 80505B7 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 75 DE DD 5D D0 DD A5 58 FF FF FF B8 2A 8D EC 3D BA 6A DB 71 40 52 50 FF 75 D4 FF 75 D0 E8 9E 7F FF FF
+
+. 0 80505E4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80505EB 13
+. 83 EC 0C 68 40 56 06 08 E8 18 7E FF FF
+
+fidivs_5 ... ok
+. 0 80505F8 5
+. 83 C4 10 EB 44
+
+. 0 8050641 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630B0 5
+. E8 94 D5 FE FF
+
+. 0 8050649 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 62 7D FF FF
+
+. 0 805068E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8050695 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 75 DE DD 5D D0 DD A5 58 FF FF FF B8 2A 8D EC 3D BA 6A DB 71 C0 52 50 FF 75 D4 FF 75 D0 E8 C0 7E FF FF
+
+. 0 80506C2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80506C9 13
+. 83 EC 0C 68 7B 56 06 08 E8 3A 7D FF FF
+
+fidivs_6 ... ok
+. 0 80506D6 5
+. 83 C4 10 EB 44
+
+. 0 805071F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630B5 5
+. E8 6D D6 FE FF
+
+. 0 8050727 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 84 7C FF FF
+
+. 0 805076C 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8050773 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 75 DE DD 5D D0 DD A5 58 FF FF FF B8 2A 8D EC 3D BA 6A DB 71 C0 52 50 FF 75 D4 FF 75 D0 E8 E2 7D FF FF
+
+. 0 80507A0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80507A7 13
+. 83 EC 0C 68 B6 56 06 08 E8 5C 7C FF FF
+
+fidivs_7 ... ok
+. 0 80507B4 5
+. 83 C4 10 EB 44
+
+. 0 80507FD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630BA 5
+. E8 46 D7 FE FF
+
+. 0 8050805 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 A6 7B FF FF
+
+. 0 805084A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8050851 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 75 DE DD 5D D0 DD A5 58 FF FF FF B8 2A 8D EC 3D BA 6A DB 71 40 52 50 FF 75 D4 FF 75 D0 E8 04 7D FF FF
+
+. 0 805087E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050885 13
+. 83 EC 0C 68 F1 56 06 08 E8 7E 7B FF FF
+
+fidivs_8 ... ok
+. 0 8050892 5
+. 83 C4 10 EB 44
+
+. 0 80508DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630BF 5
+. E8 1F D8 FE FF
+
+. 0 80508E3 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 CE 7A FF FF
+
+. 0 8050922 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8050929 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 7E 4C 6C 37 50 FF 75 D0 E8 B1 7B FF FF
+
+. 0 8050950 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050957 13
+. 83 EC 0C 68 2C 57 06 08 E8 AC 7A FF FF
+
+fidivl_1 ... ok
+. 0 8050964 5
+. 83 C4 10 EB 48
+
+. 0 80509B1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630C4 5
+. E8 F0 D8 FE FF
+
+. 0 80509B9 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 F8 79 FF FF
+
+. 0 80509F8 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80509FF 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 7E 4C 6C B7 50 FF 75 D0 E8 DB 7A FF FF
+
+. 0 8050A26 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050A2D 13
+. 83 EC 0C 68 67 57 06 08 E8 D6 79 FF FF
+
+fidivl_2 ... ok
+. 0 8050A3A 5
+. 83 C4 10 EB 48
+
+. 0 8050A87 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630C9 5
+. E8 C1 D9 FE FF
+
+. 0 8050A8F 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 22 79 FF FF
+
+. 0 8050ACE 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8050AD5 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 7E 4C 6C B7 50 FF 75 D0 E8 05 7A FF FF
+
+. 0 8050AFC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050B03 13
+. 83 EC 0C 68 A2 57 06 08 E8 00 79 FF FF
+
+fidivl_3 ... ok
+. 0 8050B10 5
+. 83 C4 10 EB 48
+
+. 0 8050B5D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630CE 5
+. E8 92 DA FE FF
+
+. 0 8050B65 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 4C 78 FF FF
+
+. 0 8050BA4 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8050BAB 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 75 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 7E 4C 6C 37 50 FF 75 D0 E8 2F 79 FF FF
+
+. 0 8050BD2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050BD9 13
+. 83 EC 0C 68 DD 57 06 08 E8 2A 78 FF FF
+
+fidivl_4 ... ok
+. 0 8050BE6 5
+. 83 C4 10 EB 48
+
+. 0 8050C33 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630D3 5
+. E8 63 DB FE FF
+
+. 0 8050C3B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC F1 FB 09 00 83 EC 08 6A 01 68 40 A1 06 08 E8 6E 77 FF FF
+
+. 0 8050C82 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8050C89 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 75 DC DD 5D D0 DD A5 58 FF FF FF B8 17 FD 2E 07 BA 4D 30 FE 3F 52 50 FF 75 D4 FF 75 D0 E8 CC 78 FF FF
+
+. 0 8050CB6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050CBD 13
+. 83 EC 0C 68 18 58 06 08 E8 46 77 FF FF
+
+fidivl_5 ... ok
+. 0 8050CCA 5
+. 83 C4 10 EB 44
+
+. 0 8050D13 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630D8 5
+. E8 3E DC FE FF
+
+. 0 8050D1B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC F1 FB 09 00 83 EC 08 6A 01 68 40 A1 06 08 E8 8E 76 FF FF
+
+. 0 8050D62 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8050D69 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 75 DC DD 5D D0 DD A5 58 FF FF FF B8 17 FD 2E 07 BA 4D 30 FE BF 52 50 FF 75 D4 FF 75 D0 E8 EC 77 FF FF
+
+. 0 8050D96 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050D9D 13
+. 83 EC 0C 68 53 58 06 08 E8 66 76 FF FF
+
+fidivl_6 ... ok
+. 0 8050DAA 5
+. 83 C4 10 EB 44
+
+. 0 8050DF3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630DD 5
+. E8 19 DD FE FF
+
+. 0 8050DFB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 0F 04 F6 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 AE 75 FF FF
+
+. 0 8050E42 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8050E49 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 75 DC DD 5D D0 DD A5 58 FF FF FF B8 17 FD 2E 07 BA 4D 30 FE BF 52 50 FF 75 D4 FF 75 D0 E8 0C 77 FF FF
+
+. 0 8050E76 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050E7D 13
+. 83 EC 0C 68 8E 58 06 08 E8 86 75 FF FF
+
+fidivl_7 ... ok
+. 0 8050E8A 5
+. 83 C4 10 EB 44
+
+. 0 8050ED3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630E2 5
+. E8 F4 DD FE FF
+
+. 0 8050EDB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 0F 04 F6 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 CE 74 FF FF
+
+. 0 8050F22 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8050F29 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 75 DC DD 5D D0 DD A5 58 FF FF FF B8 17 FD 2E 07 BA 4D 30 FE 3F 52 50 FF 75 D4 FF 75 D0 E8 2C 76 FF FF
+
+. 0 8050F56 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8050F5D 13
+. 83 EC 0C 68 C9 58 06 08 E8 A6 74 FF FF
+
+fidivl_8 ... ok
+. 0 8050F6A 5
+. 83 C4 10 EB 44
+
+. 0 8050FB3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630E7 5
+. E8 CF DE FE FF
+
+. 0 8050FBB 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 F5 73 FF FF
+
+. 0 8050FFB 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8051002 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 D8 74 FF FF
+
+. 0 8051029 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051030 13
+. 83 EC 0C 68 04 59 06 08 E8 D3 73 FF FF
+
+fdivrs_1 ... ok
+. 0 805103D 5
+. 83 C4 10 EB 48
+
+. 0 805108A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630EC 5
+. E8 A1 DF FE FF
+
+. 0 8051092 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1E 73 FF FF
+
+. 0 80510D2 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80510D9 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 01 74 FF FF
+
+. 0 8051100 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051107 13
+. 83 EC 0C 68 3F 59 06 08 E8 FC 72 FF FF
+
+fdivrs_2 ... ok
+. 0 8051114 5
+. 83 C4 10 EB 48
+
+. 0 8051161 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630F1 5
+. E8 73 E0 FE FF
+
+. 0 8051169 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 47 72 FF FF
+
+. 0 80511A9 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80511B0 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 2A 73 FF FF
+
+. 0 80511D7 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80511DE 13
+. 83 EC 0C 68 7A 59 06 08 E8 25 72 FF FF
+
+fdivrs_3 ... ok
+. 0 80511EB 5
+. 83 C4 10 EB 48
+
+. 0 8051238 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630F6 5
+. E8 45 E1 FE FF
+
+. 0 8051240 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 70 71 FF FF
+
+. 0 8051280 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8051287 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 53 72 FF FF
+
+. 0 80512AE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80512B5 13
+. 83 EC 0C 68 B5 59 06 08 E8 4E 71 FF FF
+
+fdivrs_4 ... ok
+. 0 80512C2 5
+. 83 C4 10 EB 48
+
+. 0 805130F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80630FB 5
+. E8 17 E2 FE FF
+
+. 0 8051317 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 82 70 FF FF
+
+. 0 805136E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8051375 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 7D D8 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 E0 71 FF FF
+
+. 0 80513A2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80513A9 13
+. 83 EC 0C 68 F0 59 06 08 E8 5A 70 FF FF
+
+fdivrl_1 ... ok
+. 0 80513B6 5
+. 83 C4 10 EB 44
+
+. 0 80513FF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063100 5
+. E8 02 E3 FE FF
+
+. 0 8051407 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 92 6F FF FF
+
+. 0 805145E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8051465 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 7D D8 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 F0 70 FF FF
+
+. 0 8051492 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051499 13
+. 83 EC 0C 68 2B 5A 06 08 E8 6A 6F FF FF
+
+fdivrl_2 ... ok
+. 0 80514A6 5
+. 83 C4 10 EB 44
+
+. 0 80514EF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063105 5
+. E8 ED E3 FE FF
+
+. 0 80514F7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A2 6E FF FF
+
+. 0 805154E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8051555 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 7D D8 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 00 70 FF FF
+
+. 0 8051582 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051589 13
+. 83 EC 0C 68 66 5A 06 08 E8 7A 6E FF FF
+
+fdivrl_3 ... ok
+. 0 8051596 5
+. 83 C4 10 EB 44
+
+. 0 80515DF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806310A 5
+. E8 D8 E4 FE FF
+
+. 0 80515E7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 6D FF FF
+
+. 0 805163E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8051645 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 7D D8 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 10 6F FF FF
+
+. 0 8051672 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051679 13
+. 83 EC 0C 68 A1 5A 06 08 E8 8A 6D FF FF
+
+fdivrl_4 ... ok
+. 0 8051686 5
+. 83 C4 10 EB 44
+
+. 0 80516CF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806310F 5
+. E8 C3 E5 FE FF
+
+. 0 80516D7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 D2 6C FF FF
+
+. 0 805171E 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8051729 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC FA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 A9 6D FF FF
+
+. 0 8051758 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805175F 13
+. 83 EC 0C 68 DC 5A 06 08 E8 A4 6C FF FF
+
+fdivr_1 ... ok
+. 0 805176C 5
+. 83 C4 10 EB 48
+
+. 0 80517B9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063114 5
+. E8 A8 E6 FE FF
+
+. 0 80517C1 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 E8 6B FF FF
+
+. 0 8051808 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8051813 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC FA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 BF 6C FF FF
+
+. 0 8051842 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051849 13
+. 83 EC 0C 68 14 5B 06 08 E8 BA 6B FF FF
+
+fdivr_2 ... ok
+. 0 8051856 5
+. 83 C4 10 EB 48
+
+. 0 80518A3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063119 5
+. E8 8D E7 FE FF
+
+. 0 80518AB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 FE 6A FF FF
+
+. 0 80518F2 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 80518FD 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC FA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 D5 6B FF FF
+
+. 0 805192C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051933 13
+. 83 EC 0C 68 4C 5B 06 08 E8 D0 6A FF FF
+
+fdivr_3 ... ok
+. 0 8051940 5
+. 83 C4 10 EB 48
+
+. 0 805198D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806311E 5
+. E8 72 E8 FE FF
+
+. 0 8051995 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 14 6A FF FF
+
+. 0 80519DC 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 80519E7 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC FA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 EB 6A FF FF
+
+. 0 8051A16 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051A1D 13
+. 83 EC 0C 68 84 5B 06 08 E8 E6 69 FF FF
+
+fdivr_4 ... ok
+. 0 8051A2A 5
+. 83 C4 10 EB 48
+
+. 0 8051A77 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063123 5
+. E8 57 E9 FE FF
+
+. 0 8051A7F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1A 69 FF FF
+
+. 0 8051AD6 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8051AE1 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC FA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 6C 6A FF FF
+
+. 0 8051B16 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051B1D 13
+. 83 EC 0C 68 BC 5B 06 08 E8 E6 68 FF FF
+
+fdivr_5 ... ok
+. 0 8051B2A 5
+. 83 C4 10 EB 44
+
+. 0 8051B73 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063128 5
+. E8 4E EA FE FF
+
+. 0 8051B7B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1E 68 FF FF
+
+. 0 8051BD2 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8051BDD 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC FA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 70 69 FF FF
+
+. 0 8051C12 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051C19 13
+. 83 EC 0C 68 F4 5B 06 08 E8 EA 67 FF FF
+
+fdivr_6 ... ok
+. 0 8051C26 5
+. 83 C4 10 EB 44
+
+. 0 8051C6F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806312D 5
+. E8 45 EB FE FF
+
+. 0 8051C77 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 22 67 FF FF
+
+. 0 8051CCE 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8051CD9 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC FA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 74 68 FF FF
+
+. 0 8051D0E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051D15 13
+. 83 EC 0C 68 2C 5C 06 08 E8 EE 66 FF FF
+
+fdivr_7 ... ok
+. 0 8051D22 5
+. 83 C4 10 EB 44
+
+. 0 8051D6B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063132 5
+. E8 3C EC FE FF
+
+. 0 8051D73 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 26 66 FF FF
+
+. 0 8051DCA 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8051DD5 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC FA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 78 67 FF FF
+
+. 0 8051E0A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051E11 13
+. 83 EC 0C 68 64 5C 06 08 E8 F2 65 FF FF
+
+fdivr_8 ... ok
+. 0 8051E1E 5
+. 83 C4 10 EB 44
+
+. 0 8051E67 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063137 5
+. E8 33 ED FE FF
+
+. 0 8051E6F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 3A 65 FF FF
+
+. 0 8051EB6 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8051EBD 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 FA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 19 66 FF FF
+
+. 0 8051EE8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051EEF 13
+. 83 EC 0C 68 9C 5C 06 08 E8 14 65 FF FF
+
+fdivr_9 ... ok
+. 0 8051EFC 5
+. 83 C4 10 EB 48
+
+. 0 8051F49 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806313C 5
+. E8 10 EE FE FF
+
+. 0 8051F51 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 58 64 FF FF
+
+. 0 8051F98 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8051F9F 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 FA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 37 65 FF FF
+
+. 0 8051FCA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8051FD1 13
+. 83 EC 0C 68 D4 5C 06 08 E8 32 64 FF FF
+
+fdivr_10 ... ok
+. 0 8051FDE 5
+. 83 C4 10 EB 48
+
+. 0 805202B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063141 5
+. E8 ED EE FE FF
+
+. 0 8052033 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 76 63 FF FF
+
+. 0 805207A 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8052081 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 FA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 BE 50 FF 75 D0 E8 55 64 FF FF
+
+. 0 80520AC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80520B3 13
+. 83 EC 0C 68 0F 5D 06 08 E8 50 63 FF FF
+
+fdivr_11 ... ok
+. 0 80520C0 5
+. 83 C4 10 EB 48
+
+. 0 805210D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063146 5
+. E8 CA EF FE FF
+
+. 0 8052115 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 94 62 FF FF
+
+. 0 805215C 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8052163 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 FA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 B0 39 10 3E 50 FF 75 D0 E8 73 63 FF FF
+
+. 0 805218E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052195 13
+. 83 EC 0C 68 4A 5D 06 08 E8 6E 62 FF FF
+
+fdivr_12 ... ok
+. 0 80521A2 5
+. 83 C4 10 EB 48
+
+. 0 80521EF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806314B 5
+. E8 A7 F0 FE FF
+
+. 0 80521F7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A2 61 FF FF
+
+. 0 805224E 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8052255 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 FA DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 FC 62 FF FF
+
+. 0 8052286 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805228D 13
+. 83 EC 0C 68 85 5D 06 08 E8 76 61 FF FF
+
+fdivr_13 ... ok
+. 0 805229A 5
+. 83 C4 10 EB 44
+
+. 0 80522E3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063150 5
+. E8 96 F1 FE FF
+
+. 0 80522EB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 AE 60 FF FF
+
+. 0 8052342 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8052349 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 FA DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 08 62 FF FF
+
+. 0 805237A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052381 13
+. 83 EC 0C 68 C0 5D 06 08 E8 82 60 FF FF
+
+fdivr_14 ... ok
+. 0 805238E 5
+. 83 C4 10 EB 44
+
+. 0 80523D7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063155 5
+. E8 85 F2 FE FF
+
+. 0 80523DF 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 BA 5F FF FF
+
+. 0 8052436 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805243D 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 FA DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 BF 52 50 FF 75 D4 FF 75 D0 E8 14 61 FF FF
+
+. 0 805246E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052475 13
+. 83 EC 0C 68 FB 5D 06 08 E8 8E 5F FF FF
+
+fdivr_15 ... ok
+. 0 8052482 5
+. 83 C4 10 EB 44
+
+. 0 80524CB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806315A 5
+. E8 74 F3 FE FF
+
+. 0 80524D3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 C6 5E FF FF
+
+. 0 805252A 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8052531 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 FA DD 5D D0 DD A5 58 FF FF FF B8 DB AA 13 1E BA 29 A5 C4 3F 52 50 FF 75 D4 FF 75 D0 E8 20 60 FF FF
+
+. 0 8052562 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052569 13
+. 83 EC 0C 68 36 5E 06 08 E8 9A 5E FF FF
+
+fdivr_16 ... ok
+. 0 8052576 5
+. 83 C4 10 EB 44
+
+. 0 80525BF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806315F 5
+. E8 63 F4 FE FF
+
+. 0 80525C7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 E2 5D FF FF
+
+. 0 805260E 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8052615 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE FA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 BF 5E FF FF
+
+. 0 8052642 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052649 13
+. 83 EC 0C 68 71 5E 06 08 E8 BA 5D FF FF
+
+fdivrp_1 ... ok
+. 0 8052656 5
+. 83 C4 10 EB 48
+
+. 0 80526A3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063164 5
+. E8 42 F5 FE FF
+
+. 0 80526AB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 FE 5C FF FF
+
+. 0 80526F2 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 80526F9 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE FA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 DB 5D FF FF
+
+. 0 8052726 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805272D 13
+. 83 EC 0C 68 AC 5E 06 08 E8 D6 5C FF FF
+
+fdivrp_2 ... ok
+. 0 805273A 5
+. 83 C4 10 EB 48
+
+. 0 8052787 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063169 5
+. E8 21 F6 FE FF
+
+. 0 805278F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 1A 5C FF FF
+
+. 0 80527D6 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 80527DD 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE FA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 F7 5C FF FF
+
+. 0 805280A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052811 13
+. 83 EC 0C 68 E7 5E 06 08 E8 F2 5B FF FF
+
+fdivrp_3 ... ok
+. 0 805281E 5
+. 83 C4 10 EB 48
+
+. 0 805286B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806316E 5
+. E8 00 F7 FE FF
+
+. 0 8052873 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 36 5B FF FF
+
+. 0 80528BA 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 80528C1 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE FA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 13 5C FF FF
+
+. 0 80528EE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80528F5 13
+. 83 EC 0C 68 22 5F 06 08 E8 0E 5B FF FF
+
+fdivrp_4 ... ok
+. 0 8052902 5
+. 83 C4 10 EB 48
+
+. 0 805294F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063173 5
+. E8 DF F7 FE FF
+
+. 0 8052957 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 42 5A FF FF
+
+. 0 80529AE 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 80529B9 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE FA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 96 5B FF FF
+
+. 0 80529EC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80529F3 13
+. 83 EC 0C 68 5D 5F 06 08 E8 10 5A FF FF
+
+fdivrp_5 ... ok
+. 0 8052A00 5
+. 83 C4 10 EB 44
+
+. 0 8052A49 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063178 5
+. E8 D4 F8 FE FF
+
+. 0 8052A51 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 48 59 FF FF
+
+. 0 8052AA8 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8052AB3 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE FA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 9C 5A FF FF
+
+. 0 8052AE6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052AED 13
+. 83 EC 0C 68 98 5F 06 08 E8 16 59 FF FF
+
+fdivrp_6 ... ok
+. 0 8052AFA 5
+. 83 C4 10 EB 44
+
+. 0 8052B43 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806317D 5
+. E8 C9 F9 FE FF
+
+. 0 8052B4B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 4E 58 FF FF
+
+. 0 8052BA2 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8052BAD 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE FA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 A2 59 FF FF
+
+. 0 8052BE0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052BE7 13
+. 83 EC 0C 68 D3 5F 06 08 E8 1C 58 FF FF
+
+fdivrp_7 ... ok
+. 0 8052BF4 5
+. 83 C4 10 EB 44
+
+. 0 8052C3D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063182 5
+. E8 BE FA FE FF
+
+. 0 8052C45 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 54 57 FF FF
+
+. 0 8052C9C 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8052CA7 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE FA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 A8 58 FF FF
+
+. 0 8052CDA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052CE1 13
+. 83 EC 0C 68 0E 60 06 08 E8 22 57 FF FF
+
+fdivrp_8 ... ok
+. 0 8052CEE 5
+. 83 C4 10 EB 44
+
+. 0 8052D37 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063187 5
+. E8 B3 FB FE FF
+
+. 0 8052D3F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 6A 56 FF FF
+
+. 0 8052D86 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8052D8D 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 4B 57 FF FF
+
+. 0 8052DB6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052DBD 13
+. 83 EC 0C 68 49 60 06 08 E8 46 56 FF FF
+
+fdivrp_9 ... ok
+. 0 8052DCA 5
+. 83 C4 10 EB 48
+
+. 0 8052E17 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806318C 5
+. E8 8E FC FE FF
+
+. 0 8052E1F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 8A 55 FF FF
+
+. 0 8052E66 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8052E6D 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 6B 56 FF FF
+
+. 0 8052E96 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052E9D 13
+. 83 EC 0C 68 84 60 06 08 E8 66 55 FF FF
+
+fdivrp_10 ... ok
+. 0 8052EAA 5
+. 83 C4 10 EB 48
+
+. 0 8052EF7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063191 5
+. E8 69 FD FE FF
+
+. 0 8052EFF 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 AA 54 FF FF
+
+. 0 8052F46 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8052F4D 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 C0 50 FF 75 D0 E8 8B 55 FF FF
+
+. 0 8052F76 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8052F7D 13
+. 83 EC 0C 68 C2 60 06 08 E8 86 54 FF FF
+
+fdivrp_11 ... ok
+. 0 8052F8A 5
+. 83 C4 10 EB 48
+
+. 0 8052FD7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063196 5
+. E8 44 FE FE FF
+
+. 0 8052FDF 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 CA 53 FF FF
+
+. 0 8053026 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805302D 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE F9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 34 33 E3 40 50 FF 75 D0 E8 AB 54 FF FF
+
+. 0 8053056 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805305D 13
+. 83 EC 0C 68 00 61 06 08 E8 A6 53 FF FF
+
+fdivrp_12 ... ok
+. 0 805306A 5
+. 83 C4 10 EB 48
+
+. 0 80530B7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806319B 5
+. E8 1F FF FE FF
+
+. 0 80530BF 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 DA 52 FF FF
+
+. 0 8053116 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805311D 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F9 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 36 54 FF FF
+
+. 0 805314C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053153 13
+. 83 EC 0C 68 3E 61 06 08 E8 B0 52 FF FF
+
+fdivrp_13 ... ok
+. 0 8053160 5
+. 83 C4 10 EB 44
+
+. 0 80531A9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631A0 5
+. E8 0C 00 FF FF
+
+. 0 80531B1 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 E8 51 FF FF
+
+. 0 8053208 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805320F 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F9 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 44 53 FF FF
+
+. 0 805323E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053245 13
+. 83 EC 0C 68 7C 61 06 08 E8 BE 51 FF FF
+
+fdivrp_14 ... ok
+. 0 8053252 5
+. 83 C4 10 EB 44
+
+. 0 805329B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631A5 5
+. E8 F9 00 FF FF
+
+. 0 80532A3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 F6 50 FF FF
+
+. 0 80532FA 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8053301 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F9 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 C0 52 50 FF 75 D4 FF 75 D0 E8 52 52 FF FF
+
+. 0 8053330 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053337 13
+. 83 EC 0C 68 BA 61 06 08 E8 CC 50 FF FF
+
+fdivrp_15 ... ok
+. 0 8053344 5
+. 83 C4 10 EB 44
+
+. 0 805338D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631AA 5
+. E8 E6 01 FF FF
+
+. 0 8053395 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 04 50 FF FF
+
+. 0 80533EC 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 80533F3 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE F9 DD 5D D0 DD A5 58 FF FF FF B8 AE 41 F3 01 BA CD CC 18 40 52 50 FF 75 D4 FF 75 D0 E8 60 51 FF FF
+
+. 0 8053422 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053429 13
+. 83 EC 0C 68 F8 61 06 08 E8 DA 4F FF FF
+
+fdivrp_16 ... ok
+. 0 8053436 5
+. 83 C4 10 EB 44
+
+. 0 805347F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631AF 5
+. E8 D3 02 FF FF
+
+. 0 8053487 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 2C 4F FF FF
+
+. 0 80534C4 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80534CB 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 7D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 00 60 40 50 FF 75 D0 E8 0F 50 FF FF
+
+. 0 80534F2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80534F9 13
+. 83 EC 0C 68 36 62 06 08 E8 0A 4F FF FF
+
+fidivrs_1 ... ok
+. 0 8053506 5
+. 83 C4 10 EB 48
+
+. 0 8053553 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631B4 5
+. E8 A2 03 FF FF
+
+. 0 805355B 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 58 4E FF FF
+
+. 0 8053598 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805359F 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 7D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 00 60 C0 50 FF 75 D0 E8 3B 4F FF FF
+
+. 0 80535C6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80535CD 13
+. 83 EC 0C 68 74 62 06 08 E8 36 4E FF FF
+
+fidivrs_2 ... ok
+. 0 80535DA 5
+. 83 C4 10 EB 48
+
+. 0 8053627 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631B9 5
+. E8 71 04 FF FF
+
+. 0 805362F 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 84 4D FF FF
+
+. 0 805366C 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8053673 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 7D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 00 60 C0 50 FF 75 D0 E8 67 4E FF FF
+
+. 0 805369A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80536A1 13
+. 83 EC 0C 68 B2 62 06 08 E8 62 4D FF FF
+
+fidivrs_3 ... ok
+. 0 80536AE 5
+. 83 C4 10 EB 48
+
+. 0 80536FB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631BE 5
+. E8 40 05 FF FF
+
+. 0 8053703 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 B0 4C FF FF
+
+. 0 8053740 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8053747 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 7D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 00 60 40 50 FF 75 D0 E8 93 4D FF FF
+
+. 0 805376E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053775 13
+. 83 EC 0C 68 F0 62 06 08 E8 8E 4C FF FF
+
+fidivrs_4 ... ok
+. 0 8053782 5
+. 83 C4 10 EB 48
+
+. 0 80537CF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631C3 5
+. E8 0F 06 FF FF
+
+. 0 80537D7 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 D4 4B FF FF
+
+. 0 805381C 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8053823 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 7D DE DD 5D D0 DD A5 58 FF FF FF B8 00 A8 74 C4 BA 0D AC 6C 3F 52 50 FF 75 D4 FF 75 D0 E8 32 4D FF FF
+
+. 0 8053850 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053857 13
+. 83 EC 0C 68 2E 63 06 08 E8 AC 4B FF FF
+
+fidivrs_5 ... ok
+. 0 8053864 5
+. 83 C4 10 EB 44
+
+. 0 80538AD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631C8 5
+. E8 E8 06 FF FF
+
+. 0 80538B5 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 F6 4A FF FF
+
+. 0 80538FA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8053901 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 7D DE DD 5D D0 DD A5 58 FF FF FF B8 00 A8 74 C4 BA 0D AC 6C BF 52 50 FF 75 D4 FF 75 D0 E8 54 4C FF FF
+
+. 0 805392E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053935 13
+. 83 EC 0C 68 6C 63 06 08 E8 CE 4A FF FF
+
+fidivrs_6 ... ok
+. 0 8053942 5
+. 83 C4 10 EB 44
+
+. 0 805398B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631CD 5
+. E8 C1 07 FF FF
+
+. 0 8053993 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 18 4A FF FF
+
+. 0 80539D8 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 80539DF 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 7D DE DD 5D D0 DD A5 58 FF FF FF B8 00 A8 74 C4 BA 0D AC 6C BF 52 50 FF 75 D4 FF 75 D0 E8 76 4B FF FF
+
+. 0 8053A0C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053A13 13
+. 83 EC 0C 68 AA 63 06 08 E8 F0 49 FF FF
+
+fidivrs_7 ... ok
+. 0 8053A20 5
+. 83 C4 10 EB 44
+
+. 0 8053A69 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631D2 5
+. E8 9A 08 FF FF
+
+. 0 8053A71 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 3A 49 FF FF
+
+. 0 8053AB6 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8053ABD 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 7D DE DD 5D D0 DD A5 58 FF FF FF B8 00 A8 74 C4 BA 0D AC 6C 3F 52 50 FF 75 D4 FF 75 D0 E8 98 4A FF FF
+
+. 0 8053AEA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053AF1 13
+. 83 EC 0C 68 E8 63 06 08 E8 12 49 FF FF
+
+fidivrs_8 ... ok
+. 0 8053AFE 5
+. 83 C4 10 EB 44
+
+. 0 8053B47 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631D7 5
+. E8 73 09 FF FF
+
+. 0 8053B4F 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 62 48 FF FF
+
+. 0 8053B8E 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8053B95 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 01 AC 8A 47 50 FF 75 D0 E8 45 49 FF FF
+
+. 0 8053BBC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053BC3 13
+. 83 EC 0C 68 26 64 06 08 E8 40 48 FF FF
+
+fidivrl_1 ... ok
+. 0 8053BD0 5
+. 83 C4 10 EB 48
+
+. 0 8053C1D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631DC 5
+. E8 44 0A FF FF
+
+. 0 8053C25 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 8C 47 FF FF
+
+. 0 8053C64 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8053C6B 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 01 AC 8A C7 50 FF 75 D0 E8 6F 48 FF FF
+
+. 0 8053C92 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053C99 13
+. 83 EC 0C 68 64 64 06 08 E8 6A 47 FF FF
+
+fidivrl_2 ... ok
+. 0 8053CA6 5
+. 83 C4 10 EB 48
+
+. 0 8053CF3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631E1 5
+. E8 15 0B FF FF
+
+. 0 8053CFB 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 B6 46 FF FF
+
+. 0 8053D3A 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8053D41 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 01 AC 8A C7 50 FF 75 D0 E8 99 47 FF FF
+
+. 0 8053D68 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053D6F 13
+. 83 EC 0C 68 A2 64 06 08 E8 94 46 FF FF
+
+fidivrl_3 ... ok
+. 0 8053D7C 5
+. 83 C4 10 EB 48
+
+. 0 8053DC9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631E6 5
+. E8 E6 0B FF FF
+
+. 0 8053DD1 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 E0 45 FF FF
+
+. 0 8053E10 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8053E17 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 7D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 01 AC 8A 47 50 FF 75 D0 E8 C3 46 FF FF
+
+. 0 8053E3E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053E45 13
+. 83 EC 0C 68 E0 64 06 08 E8 BE 45 FF FF
+
+fidivrl_4 ... ok
+. 0 8053E52 5
+. 83 C4 10 EB 48
+
+. 0 8053E9F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631EB 5
+. E8 B7 0C FF FF
+
+. 0 8053EA7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC F1 FB 09 00 83 EC 08 6A 01 68 40 A1 06 08 E8 02 45 FF FF
+
+. 0 8053EEE 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8053EF5 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 7D DC DD 5D D0 DD A5 58 FF FF FF B8 D2 3D 07 B4 BA C2 F5 E0 3F 52 50 FF 75 D4 FF 75 D0 E8 60 46 FF FF
+
+. 0 8053F22 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8053F29 13
+. 83 EC 0C 68 1E 65 06 08 E8 DA 44 FF FF
+
+fidivrl_5 ... ok
+. 0 8053F36 5
+. 83 C4 10 EB 44
+
+. 0 8053F7F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631F0 5
+. E8 92 0D FF FF
+
+. 0 8053F87 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC F1 FB 09 00 83 EC 08 6A 01 68 40 A1 06 08 E8 22 44 FF FF
+
+. 0 8053FCE 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8053FD5 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 7D DC DD 5D D0 DD A5 58 FF FF FF B8 D2 3D 07 B4 BA C2 F5 E0 BF 52 50 FF 75 D4 FF 75 D0 E8 80 45 FF FF
+
+. 0 8054002 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054009 13
+. 83 EC 0C 68 5C 65 06 08 E8 FA 43 FF FF
+
+fidivrl_6 ... ok
+. 0 8054016 5
+. 83 C4 10 EB 44
+
+. 0 805405F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631F5 5
+. E8 6D 0E FF FF
+
+. 0 8054067 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 0F 04 F6 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 42 43 FF FF
+
+. 0 80540AE 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 80540B5 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 7D DC DD 5D D0 DD A5 58 FF FF FF B8 D2 3D 07 B4 BA C2 F5 E0 BF 52 50 FF 75 D4 FF 75 D0 E8 A0 44 FF FF
+
+. 0 80540E2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80540E9 13
+. 83 EC 0C 68 9A 65 06 08 E8 1A 43 FF FF
+
+fidivrl_7 ... ok
+. 0 80540F6 5
+. 83 C4 10 EB 44
+
+. 0 805413F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631FA 5
+. E8 48 0F FF FF
+
+. 0 8054147 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 0F 04 F6 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 62 42 FF FF
+
+. 0 805418E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8054195 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 7D DC DD 5D D0 DD A5 58 FF FF FF B8 D2 3D 07 B4 BA C2 F5 E0 3F 52 50 FF 75 D4 FF 75 D0 E8 C0 43 FF FF
+
+. 0 80541C2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80541C9 13
+. 83 EC 0C 68 D8 65 06 08 E8 3A 42 FF FF
+
+fidivrl_8 ... ok
+. 0 80541D6 5
+. 83 C4 10 EB 44
+
+. 0 805421F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80631FF 5
+. E8 23 10 FF FF
+
+. 0 8054227 39
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 66 C7 45 E6 00 00 66 C7 45 E6 39 30 83 EC 08 6A 01 68 40 A1 06 08 E8 A2 41 FF FF
+
+. 0 805424E 7
+. 83 C4 10 85 C0 75 75
+
+. 0 8054255 36
+. 9B DD B5 68 FF FF FF DF 45 E6 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 E4 40 46 50 FF 75 D8 E8 88 42 FF FF
+
+. 0 8054279 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054280 13
+. 83 EC 0C 68 16 66 06 08 E8 83 41 FF FF
+
+filds_1 ... ok
+. 0 805428D 5
+. 83 C4 10 EB 48
+
+. 0 80542DA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063204 5
+. E8 D9 10 FF FF
+
+. 0 80542E2 39
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 66 C7 45 E6 00 00 66 C7 45 E6 C7 CF 83 EC 08 6A 01 68 40 A1 06 08 E8 E7 40 FF FF
+
+. 0 8054309 7
+. 83 C4 10 85 C0 75 75
+
+. 0 8054310 36
+. 9B DD B5 68 FF FF FF DF 45 E6 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 E4 40 C6 50 FF 75 D8 E8 CD 41 FF FF
+
+. 0 8054334 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805433B 13
+. 83 EC 0C 68 4E 66 06 08 E8 C8 40 FF FF
+
+filds_2 ... ok
+. 0 8054348 5
+. 83 C4 10 EB 48
+
+. 0 8054395 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063209 5
+. E8 8F 11 FF FF
+
+. 0 805439D 39
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 66 C7 45 E6 00 00 66 C7 45 E6 39 30 83 EC 08 6A 01 68 40 A1 06 08 E8 2C 40 FF FF
+
+. 0 80543C4 7
+. 83 C4 10 85 C0 75 77
+
+. 0 80543CB 42
+. 9B DD B5 68 FF FF FF DF 45 E6 DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 80 1C C8 40 52 50 FF 75 DC FF 75 D8 E8 8D 41 FF FF
+
+. 0 80543F5 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80543FC 13
+. 83 EC 0C 68 86 66 06 08 E8 07 40 FF FF
+
+filds_3 ... ok
+. 0 8054409 5
+. 83 C4 10 EB 44
+
+. 0 8054452 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806320E 5
+. E8 47 12 FF FF
+
+. 0 805445A 39
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 66 C7 45 E6 00 00 66 C7 45 E6 C7 CF 83 EC 08 6A 01 68 40 A1 06 08 E8 6F 3F FF FF
+
+. 0 8054481 7
+. 83 C4 10 85 C0 75 77
+
+. 0 8054488 42
+. 9B DD B5 68 FF FF FF DF 45 E6 DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 80 1C C8 C0 52 50 FF 75 DC FF 75 D8 E8 D0 40 FF FF
+
+. 0 80544B2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80544B9 13
+. 83 EC 0C 68 BE 66 06 08 E8 4A 3F FF FF
+
+filds_4 ... ok
+. 0 80544C6 5
+. 83 C4 10 EB 44
+
+. 0 805450F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063213 5
+. E8 FF 12 FF FF
+
+. 0 8054517 41
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 4E 61 BC 00 83 EC 08 6A 01 68 40 A1 06 08 E8 B0 3E FF FF
+
+. 0 8054540 7
+. 83 C4 10 85 C0 75 75
+
+. 0 8054547 36
+. 9B DD B5 68 FF FF FF DB 45 E4 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 4E 61 3C 4B 50 FF 75 D8 E8 96 3F FF FF
+
+. 0 805456B 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054572 13
+. 83 EC 0C 68 F6 66 06 08 E8 91 3E FF FF
+
+fildl_1 ... ok
+. 0 805457F 5
+. 83 C4 10 EB 48
+
+. 0 80545CC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063218 5
+. E8 B7 13 FF FF
+
+. 0 80545D4 41
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 B2 9E 43 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 F3 3D FF FF
+
+. 0 80545FD 7
+. 83 C4 10 85 C0 75 75
+
+. 0 8054604 36
+. 9B DD B5 68 FF FF FF DB 45 E4 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 4E 61 3C CB 50 FF 75 D8 E8 D9 3E FF FF
+
+. 0 8054628 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805462F 13
+. 83 EC 0C 68 2E 67 06 08 E8 D4 3D FF FF
+
+fildl_2 ... ok
+. 0 805463C 5
+. 83 C4 10 EB 48
+
+. 0 8054689 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806321D 5
+. E8 6F 14 FF FF
+
+. 0 8054691 41
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 4E 61 BC 00 83 EC 08 6A 01 68 40 A1 06 08 E8 36 3D FF FF
+
+. 0 80546BA 7
+. 83 C4 10 85 C0 75 77
+
+. 0 80546C1 42
+. 9B DD B5 68 FF FF FF DB 45 E4 DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 C0 BA 29 8C 67 41 52 50 FF 75 DC FF 75 D8 E8 97 3E FF FF
+
+. 0 80546EB 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80546F2 13
+. 83 EC 0C 68 66 67 06 08 E8 11 3D FF FF
+
+fildl_3 ... ok
+. 0 80546FF 5
+. 83 C4 10 EB 44
+
+. 0 8054748 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063222 5
+. E8 29 15 FF FF
+
+. 0 8054750 41
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 B2 9E 43 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 77 3C FF FF
+
+. 0 8054779 7
+. 83 C4 10 85 C0 75 77
+
+. 0 8054780 42
+. 9B DD B5 68 FF FF FF DB 45 E4 DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 C0 BA 29 8C 67 C1 52 50 FF 75 DC FF 75 D8 E8 D8 3D FF FF
+
+. 0 80547AA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80547B1 13
+. 83 EC 0C 68 9E 67 06 08 E8 52 3C FF FF
+
+fildl_4 ... ok
+. 0 80547BE 5
+. 83 C4 10 EB 44
+
+. 0 8054807 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063227 5
+. E8 E3 15 FF FF
+
+. 0 805480F 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 B1 26 F9 85 C7 45 E4 48 70 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 AA 3B FF FF
+
+. 0 8054846 7
+. 83 C4 10 85 C0 75 75
+
+. 0 805484D 36
+. 9B DD B5 68 FF FF FF DF 6D E0 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 0C 91 E0 56 50 FF 75 D8 E8 90 3C FF FF
+
+. 0 8054871 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054878 13
+. 83 EC 0C 68 D6 67 06 08 E8 8B 3B FF FF
+
+fildq_1 ... ok
+. 0 8054885 5
+. 83 C4 10 EB 48
+
+. 0 80548D2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806322C 5
+. E8 A9 16 FF FF
+
+. 0 80548DA 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4F D9 06 7A C7 45 E4 B7 8F FF FF 83 EC 08 6A 01 68 40 A1 06 08 E8 DF 3A FF FF
+
+. 0 8054911 7
+. 83 C4 10 85 C0 75 75
+
+. 0 8054918 36
+. 9B DD B5 68 FF FF FF DF 6D E0 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 0C 91 E0 D6 50 FF 75 D8 E8 C5 3B FF FF
+
+. 0 805493C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054943 13
+. 83 EC 0C 68 0E 68 06 08 E8 C0 3A FF FF
+
+fildq_2 ... ok
+. 0 8054950 5
+. 83 C4 10 EB 48
+
+. 0 805499D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063231 5
+. E8 6F 17 FF FF
+
+. 0 80549A5 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 B1 26 F9 85 C7 45 E4 48 70 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 14 3A FF FF
+
+. 0 80549DC 7
+. 83 C4 10 85 C0 75 77
+
+. 0 80549E3 42
+. 9B DD B5 68 FF FF FF DF 6D E0 DD 5D D8 DD A5 68 FF FF FF B8 40 AC 49 7E BA 21 12 DC 42 52 50 FF 75 DC FF 75 D8 E8 75 3B FF FF
+
+. 0 8054A0D 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054A14 13
+. 83 EC 0C 68 46 68 06 08 E8 EF 39 FF FF
+
+fildq_3 ... ok
+. 0 8054A21 5
+. 83 C4 10 EB 44
+
+. 0 8054A6A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063236 5
+. E8 37 18 FF FF
+
+. 0 8054A72 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4F D9 06 7A C7 45 E4 B7 8F FF FF 83 EC 08 6A 01 68 40 A1 06 08 E8 47 39 FF FF
+
+. 0 8054AA9 7
+. 83 C4 10 85 C0 75 77
+
+. 0 8054AB0 42
+. 9B DD B5 68 FF FF FF DF 6D E0 DD 5D D8 DD A5 68 FF FF FF B8 40 AC 49 7E BA 21 12 DC C2 52 50 FF 75 DC FF 75 D8 E8 A8 3A FF FF
+
+. 0 8054ADA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054AE1 13
+. 83 EC 0C 68 7E 68 06 08 E8 22 39 FF FF
+
+fildq_4 ... ok
+. 0 8054AEE 5
+. 83 C4 10 EB 44
+
+. 0 8054B37 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806323B 5
+. E8 FF 18 FF FF
+
+. 0 8054B3F 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 72 38 FF FF
+
+. 0 8054B7E 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 8054B89 54
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC D3 04 75 2A
+
+. 0 8054BBF 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 31 39 FF FF
+
+. 0 8054BD0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054BD7 13
+. 83 EC 0C 68 B6 68 06 08 E8 2C 38 FF FF
+
+fists_1 ... ok
+. 0 8054BE4 5
+. 83 C4 10 EB 62
+
+. 0 8054C4B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063240 5
+. E8 0E 1A FF FF
+
+. 0 8054C53 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 5E 37 FF FF
+
+. 0 8054C92 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 8054C9D 54
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC 2D FB 75 2A
+
+. 0 8054CD3 17
+. 83 EC 08 B8 2B 52 9A C4 50 FF 75 D0 E8 1D 38 FF FF
+
+. 0 8054CE4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054CEB 13
+. 83 EC 0C 68 14 69 06 08 E8 18 37 FF FF
+
+fists_2 ... ok
+. 0 8054CF8 5
+. 83 C4 10 EB 62
+
+. 0 8054D5F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063245 5
+. E8 1D 1B FF FF
+
+. 0 8054D67 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 4A 36 FF FF
+
+. 0 8054DA6 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8054DB1 55
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC D2 04 75 2A
+
+. 0 8054DE8 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 08 37 FF FF
+
+. 0 8054DF9 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054E00 13
+. 83 EC 0C 68 4C 69 06 08 E8 03 36 FF FF
+
+fists_3 ... ok
+. 0 8054E0D 5
+. 83 C4 10 EB 62
+
+. 0 8054E74 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806324A 5
+. E8 2D 1C FF FF
+
+. 0 8054E7C 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 35 35 FF FF
+
+. 0 8054EBB 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8054EC6 55
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC 2D FB 75 2A
+
+. 0 8054EFD 17
+. 83 EC 08 B8 2B 52 9A C4 50 FF 75 D0 E8 F3 35 FF FF
+
+. 0 8054F0E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8054F15 13
+. 83 EC 0C 68 84 69 06 08 E8 EE 34 FF FF
+
+fists_4 ... ok
+. 0 8054F22 5
+. 83 C4 10 EB 62
+
+. 0 8054F89 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806324F 5
+. E8 3D 1D FF FF
+
+. 0 8054F91 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 20 34 FF FF
+
+. 0 8054FD0 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8054FDB 55
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC D3 04 75 2A
+
+. 0 8055012 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 DE 34 FF FF
+
+. 0 8055023 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805502A 13
+. 83 EC 0C 68 BC 69 06 08 E8 D9 33 FF FF
+
+fists_5 ... ok
+. 0 8055037 5
+. 83 C4 10 EB 62
+
+. 0 805509E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063254 5
+. E8 4D 1E FF FF
+
+. 0 80550A6 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 0B 33 FF FF
+
+. 0 80550E5 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 80550F0 55
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC 2E FB 75 2A
+
+. 0 8055127 17
+. 83 EC 08 B8 2B 52 9A C4 50 FF 75 D0 E8 C9 33 FF FF
+
+. 0 8055138 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805513F 13
+. 83 EC 0C 68 F4 69 06 08 E8 C4 32 FF FF
+
+fists_6 ... ok
+. 0 805514C 5
+. 83 C4 10 EB 62
+
+. 0 80551B3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063259 5
+. E8 5D 1F FF FF
+
+. 0 80551BB 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 F6 31 FF FF
+
+. 0 80551FA 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8055205 55
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC D2 04 75 2A
+
+. 0 805523C 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 B4 32 FF FF
+
+. 0 805524D 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055254 13
+. 83 EC 0C 68 2C 6A 06 08 E8 AF 31 FF FF
+
+fists_7 ... ok
+. 0 8055261 5
+. 83 C4 10 EB 62
+
+. 0 80552C8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806325E 5
+. E8 6D 20 FF FF
+
+. 0 80552D0 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 8B 45 DE 66 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 E1 30 FF FF
+
+. 0 805530F 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 805531A 55
+. 9B DD B5 58 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DF 55 DC D9 5D D0 DD A5 58 FF FF FF 66 81 7D DC 2E FB 75 2A
+
+. 0 8055351 17
+. 83 EC 08 B8 2B 52 9A C4 50 FF 75 D0 E8 9F 31 FF FF
+
+. 0 8055362 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055369 13
+. 83 EC 0C 68 64 6A 06 08 E8 9A 30 FF FF
+
+fists_8 ... ok
+. 0 8055376 5
+. 83 C4 10 EB 62
+
+. 0 80553DD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063263 5
+. E8 7D 21 FF FF
+
+. 0 80553E5 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 C5 2F FF FF
+
+. 0 805542B 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8055436 55
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 88 D6 12 00 75 30
+
+. 0 805546D 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 FE 30 FF FF
+
+. 0 8055484 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805548B 13
+. 83 EC 0C 68 9C 6A 06 08 E8 78 2F FF FF
+
+fistl_1 ... ok
+. 0 8055498 5
+. 83 C4 10 EB 5C
+
+. 0 80554F9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063268 5
+. E8 94 22 FF FF
+
+. 0 8055501 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 A9 2E FF FF
+
+. 0 8055547 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8055552 55
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 78 29 ED FF 75 30
+
+. 0 8055589 23
+. B8 AD 5B F3 C3 BA 87 D6 32 C1 52 50 FF 75 D4 FF 75 D0 E8 E2 2F FF FF
+
+. 0 80555A0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80555A7 13
+. 83 EC 0C 68 FA 6A 06 08 E8 5C 2E FF FF
+
+fistl_2 ... ok
+. 0 80555B4 5
+. 83 C4 10 EB 5C
+
+. 0 8055615 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806326D 5
+. E8 AB 23 FF FF
+
+. 0 805561D 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 8D 2D FF FF
+
+. 0 8055663 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 805566E 56
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 87 D6 12 00 75 30
+
+. 0 80556A6 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 C5 2E FF FF
+
+. 0 80556BD 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80556C4 13
+. 83 EC 0C 68 32 6B 06 08 E8 3F 2D FF FF
+
+fistl_3 ... ok
+. 0 80556D1 5
+. 83 C4 10 EB 5C
+
+. 0 8055732 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063272 5
+. E8 C3 24 FF FF
+
+. 0 805573A 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 70 2C FF FF
+
+. 0 8055780 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 805578B 56
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 78 29 ED FF 75 30
+
+. 0 80557C3 23
+. B8 AD 5B F3 C3 BA 87 D6 32 C1 52 50 FF 75 D4 FF 75 D0 E8 A8 2D FF FF
+
+. 0 80557DA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80557E1 13
+. 83 EC 0C 68 6A 6B 06 08 E8 22 2C FF FF
+
+fistl_4 ... ok
+. 0 80557EE 5
+. 83 C4 10 EB 5C
+
+. 0 805584F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063277 5
+. E8 DB 25 FF FF
+
+. 0 8055857 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 53 2B FF FF
+
+. 0 805589D 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 80558A8 56
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 88 D6 12 00 75 30
+
+. 0 80558E0 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 8B 2C FF FF
+
+. 0 80558F7 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80558FE 13
+. 83 EC 0C 68 A2 6B 06 08 E8 05 2B FF FF
+
+fistl_5 ... ok
+. 0 805590B 5
+. 83 C4 10 EB 5C
+
+. 0 805596C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806327C 5
+. E8 F3 26 FF FF
+
+. 0 8055974 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 36 2A FF FF
+
+. 0 80559BA 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 80559C5 56
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 79 29 ED FF 75 30
+
+. 0 80559FD 23
+. B8 AD 5B F3 C3 BA 87 D6 32 C1 52 50 FF 75 D4 FF 75 D0 E8 6E 2B FF FF
+
+. 0 8055A14 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055A1B 13
+. 83 EC 0C 68 DA 6B 06 08 E8 E8 29 FF FF
+
+fistl_6 ... ok
+. 0 8055A28 5
+. 83 C4 10 EB 5C
+
+. 0 8055A89 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063281 5
+. E8 0B 28 FF FF
+
+. 0 8055A91 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 19 29 FF FF
+
+. 0 8055AD7 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 8055AE2 56
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 87 D6 12 00 75 30
+
+. 0 8055B1A 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 51 2A FF FF
+
+. 0 8055B31 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055B38 13
+. 83 EC 0C 68 12 6C 06 08 E8 CB 28 FF FF
+
+fistl_7 ... ok
+. 0 8055B45 5
+. 83 C4 10 EB 5C
+
+. 0 8055BA6 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063286 5
+. E8 23 29 FF FF
+
+. 0 8055BAE 70
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 FC 27 FF FF
+
+. 0 8055BF4 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 8055BFF 56
+. 9B DD B5 58 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DB 55 D8 DD 5D D0 DD A5 58 FF FF FF 81 7D D8 79 29 ED FF 75 30
+
+. 0 8055C37 23
+. B8 AD 5B F3 C3 BA 87 D6 32 C1 52 50 FF 75 D4 FF 75 D0 E8 34 29 FF FF
+
+. 0 8055C4E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055C55 13
+. 83 EC 0C 68 4A 6C 06 08 E8 AE 27 FF FF
+
+fistl_8 ... ok
+. 0 8055C62 5
+. 83 C4 10 EB 5C
+
+. 0 8055CC3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806328B 5
+. E8 3B 2A FF FF
+
+. 0 8055CCB 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 D0 26 FF FF
+
+. 0 8055D20 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8055D2B 57
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 D3 04 75 2A
+
+. 0 8055D64 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 8C 27 FF FF
+
+. 0 8055D75 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055D7C 13
+. 83 EC 0C 68 82 6C 06 08 E8 87 26 FF FF
+
+fistps_1 ... ok
+. 0 8055D89 5
+. 83 C4 10 EB 62
+
+. 0 8055DF0 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063290 5
+. E8 63 2B FF FF
+
+. 0 8055DF8 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 A3 25 FF FF
+
+. 0 8055E4D 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8055E58 57
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 2D FB 75 2A
+
+. 0 8055E91 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 5F 26 FF FF
+
+. 0 8055EA2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055EA9 13
+. 83 EC 0C 68 BD 6C 06 08 E8 5A 25 FF FF
+
+fistps_2 ... ok
+. 0 8055EB6 5
+. 83 C4 10 EB 62
+
+. 0 8055F1D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063295 5
+. E8 8B 2C FF FF
+
+. 0 8055F25 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 76 24 FF FF
+
+. 0 8055F7A 11
+. 83 C4 10 85 C0 0F 85 B6 00 00 00
+
+. 0 8055F85 58
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 D2 04 75 2A
+
+. 0 8055FBF 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 31 25 FF FF
+
+. 0 8055FD0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8055FD7 13
+. 83 EC 0C 68 F8 6C 06 08 E8 2C 24 FF FF
+
+fistps_3 ... ok
+. 0 8055FE4 5
+. 83 C4 10 EB 62
+
+. 0 805604B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806329A 5
+. E8 B4 2D FF FF
+
+. 0 8056053 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 48 23 FF FF
+
+. 0 80560A8 11
+. 83 C4 10 85 C0 0F 85 B6 00 00 00
+
+. 0 80560B3 58
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 2D FB 75 2A
+
+. 0 80560ED 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 03 24 FF FF
+
+. 0 80560FE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056105 13
+. 83 EC 0C 68 33 6D 06 08 E8 FE 22 FF FF
+
+fistps_4 ... ok
+. 0 8056112 5
+. 83 C4 10 EB 62
+
+. 0 8056179 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806329F 5
+. E8 DD 2E FF FF
+
+. 0 8056181 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 1A 22 FF FF
+
+. 0 80561D6 11
+. 83 C4 10 85 C0 0F 85 B6 00 00 00
+
+. 0 80561E1 58
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 D3 04 75 2A
+
+. 0 805621B 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 D5 22 FF FF
+
+. 0 805622C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056233 13
+. 83 EC 0C 68 6E 6D 06 08 E8 D0 21 FF FF
+
+fistps_5 ... ok
+. 0 8056240 5
+. 83 C4 10 EB 62
+
+. 0 80562A7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632A4 5
+. E8 06 30 FF FF
+
+. 0 80562AF 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 EC 20 FF FF
+
+. 0 8056304 11
+. 83 C4 10 85 C0 0F 85 B6 00 00 00
+
+. 0 805630F 58
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 2E FB 75 2A
+
+. 0 8056349 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 A7 21 FF FF
+
+. 0 805635A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056361 13
+. 83 EC 0C 68 A9 6D 06 08 E8 A2 20 FF FF
+
+fistps_6 ... ok
+. 0 805636E 5
+. 83 C4 10 EB 62
+
+. 0 80563D5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632A9 5
+. E8 2F 31 FF FF
+
+. 0 80563DD 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 BE 1F FF FF
+
+. 0 8056432 11
+. 83 C4 10 85 C0 0F 85 B6 00 00 00
+
+. 0 805643D 58
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 D2 04 75 2A
+
+. 0 8056477 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 79 20 FF FF
+
+. 0 8056488 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805648F 13
+. 83 EC 0C 68 E4 6D 06 08 E8 74 1F FF FF
+
+fistps_7 ... ok
+. 0 805649C 5
+. 83 C4 10 EB 62
+
+. 0 8056503 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632AE 5
+. E8 58 32 FF FF
+
+. 0 805650B 85
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 66 C7 45 D6 00 00 66 8B 45 D6 66 89 45 D4 83 EC 08 6A 01 68 40 A1 06 08 E8 90 1E FF FF
+
+. 0 8056560 11
+. 83 C4 10 85 C0 0F 85 B6 00 00 00
+
+. 0 805656B 58
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DF 5D D4 D9 5D C8 DD A5 58 FF FF FF 66 81 7D D4 2E FB 75 2A
+
+. 0 80565A5 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 4B 1F FF FF
+
+. 0 80565B6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80565BD 13
+. 83 EC 0C 68 1F 6E 06 08 E8 46 1E FF FF
+
+fistps_8 ... ok
+. 0 80565CA 5
+. 83 C4 10 EB 62
+
+. 0 8056631 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632B3 5
+. E8 81 33 FF FF
+
+. 0 8056639 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 5B 1D FF FF
+
+. 0 8056695 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 80566A0 58
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 88 D6 12 00 75 2A
+
+. 0 80566DA 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 16 1E FF FF
+
+. 0 80566EB 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80566F2 13
+. 83 EC 0C 68 5A 6E 06 08 E8 11 1D FF FF
+
+fistpl_1 ... ok
+. 0 80566FF 5
+. 83 C4 10 EB 60
+
+. 0 8056764 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632B8 5
+. E8 AF 34 FF FF
+
+. 0 805676C 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 28 1C FF FF
+
+. 0 80567C8 11
+. 83 C4 10 85 C0 0F 85 B4 00 00 00
+
+. 0 80567D3 58
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 78 29 ED FF 75 2A
+
+. 0 805680D 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 E3 1C FF FF
+
+. 0 805681E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056825 13
+. 83 EC 0C 68 95 6E 06 08 E8 DE 1B FF FF
+
+fistpl_2 ... ok
+. 0 8056832 5
+. 83 C4 10 EB 60
+
+. 0 8056897 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632BD 5
+. E8 DD 35 FF FF
+
+. 0 805689F 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 F5 1A FF FF
+
+. 0 80568FB 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8056906 59
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 87 D6 12 00 75 2A
+
+. 0 8056941 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 AF 1B FF FF
+
+. 0 8056952 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056959 13
+. 83 EC 0C 68 D0 6E 06 08 E8 AA 1A FF FF
+
+fistpl_3 ... ok
+. 0 8056966 5
+. 83 C4 10 EB 60
+
+. 0 80569CB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632C2 5
+. E8 0C 37 FF FF
+
+. 0 80569D3 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 C1 19 FF FF
+
+. 0 8056A2F 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8056A3A 59
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 78 29 ED FF 75 2A
+
+. 0 8056A75 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 7B 1A FF FF
+
+. 0 8056A86 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056A8D 13
+. 83 EC 0C 68 0B 6F 06 08 E8 76 19 FF FF
+
+fistpl_4 ... ok
+. 0 8056A9A 5
+. 83 C4 10 EB 60
+
+. 0 8056AFF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632C7 5
+. E8 3B 38 FF FF
+
+. 0 8056B07 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 8D 18 FF FF
+
+. 0 8056B63 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8056B6E 59
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 88 D6 12 00 75 2A
+
+. 0 8056BA9 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 47 19 FF FF
+
+. 0 8056BBA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056BC1 13
+. 83 EC 0C 68 46 6F 06 08 E8 42 18 FF FF
+
+fistpl_5 ... ok
+. 0 8056BCE 5
+. 83 C4 10 EB 60
+
+. 0 8056C33 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632CC 5
+. E8 6A 39 FF FF
+
+. 0 8056C3B 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 59 17 FF FF
+
+. 0 8056C97 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8056CA2 59
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 79 29 ED FF 75 2A
+
+. 0 8056CDD 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 13 18 FF FF
+
+. 0 8056CEE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056CF5 13
+. 83 EC 0C 68 81 6F 06 08 E8 0E 17 FF FF
+
+fistpl_6 ... ok
+. 0 8056D02 5
+. 83 C4 10 EB 60
+
+. 0 8056D67 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632D1 5
+. E8 99 3A FF FF
+
+. 0 8056D6F 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 25 16 FF FF
+
+. 0 8056DCB 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8056DD6 59
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 87 D6 12 00 75 2A
+
+. 0 8056E11 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 DF 16 FF FF
+
+. 0 8056E22 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056E29 13
+. 83 EC 0C 68 BC 6F 06 08 E8 DA 15 FF FF
+
+fistpl_7 ... ok
+. 0 8056E36 5
+. 83 C4 10 EB 60
+
+. 0 8056E9B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632D6 5
+. E8 C8 3B FF FF
+
+. 0 8056EA3 92
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D4 00 00 00 00 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 40 A1 06 08 E8 F1 14 FF FF
+
+. 0 8056EFF 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8056F0A 59
+. 9B DD B5 58 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DB 5D D0 D9 5D C8 DD A5 58 FF FF FF 81 7D D0 79 29 ED FF 75 2A
+
+. 0 8056F45 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C8 E8 AB 15 FF FF
+
+. 0 8056F56 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8056F5D 13
+. 83 EC 0C 68 F7 6F 06 08 E8 A6 14 FF FF
+
+fistpl_8 ... ok
+. 0 8056F6A 5
+. 83 C4 10 EB 60
+
+. 0 8056FCF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632DB 5
+. E8 F7 3C FF FF
+
+. 0 8056FD7 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC 42 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 B0 13 FF FF
+
+. 0 8057040 11
+. 83 C4 10 85 C0 0F 85 CA 00 00 00
+
+. 0 805704B 72
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 48 70 00 00 8B 55 C8 81 F2 B2 26 F9 85 09 D0 85 C0 75 2A
+
+. 0 8057093 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 5D 14 FF FF
+
+. 0 80570A4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80570AB 13
+. 83 EC 0C 68 32 70 06 08 E8 58 13 FF FF
+
+fistpq_1 ... ok
+. 0 80570B8 5
+. 83 C4 10 EB 68
+
+. 0 8057125 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632E0 5
+. E8 48 3E FF FF
+
+. 0 805712D 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC C2 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 5A 12 FF FF
+
+. 0 8057196 11
+. 83 C4 10 85 C0 0F 85 CA 00 00 00
+
+. 0 80571A1 72
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 B7 8F FF FF 8B 55 C8 81 F2 4E D9 06 7A 09 D0 85 C0 75 2A
+
+. 0 80571E9 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 07 13 FF FF
+
+. 0 80571FA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057201 13
+. 83 EC 0C 68 95 70 06 08 E8 02 12 FF FF
+
+fistpq_2 ... ok
+. 0 805720E 5
+. 83 C4 10 EB 68
+
+. 0 805727B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632E5 5
+. E8 99 3F FF FF
+
+. 0 8057283 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC 42 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 04 11 FF FF
+
+. 0 80572EC 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 80572F7 73
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 48 70 00 00 8B 55 C8 81 F2 B1 26 F9 85 09 D0 85 C0 75 2A
+
+. 0 8057340 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 B0 11 FF FF
+
+. 0 8057351 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057358 13
+. 83 EC 0C 68 D0 70 06 08 E8 AB 10 FF FF
+
+fistpq_3 ... ok
+. 0 8057365 5
+. 83 C4 10 EB 68
+
+. 0 80573D2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632EA 5
+. E8 EB 40 FF FF
+
+. 0 80573DA 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC C2 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 AD 0F FF FF
+
+. 0 8057443 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 805744E 73
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 B7 8F FF FF 8B 55 C8 81 F2 4E D9 06 7A 09 D0 85 C0 75 2A
+
+. 0 8057497 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 59 10 FF FF
+
+. 0 80574A8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80574AF 13
+. 83 EC 0C 68 0B 71 06 08 E8 54 0F FF FF
+
+fistpq_4 ... ok
+. 0 80574BC 5
+. 83 C4 10 EB 68
+
+. 0 8057529 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632EF 5
+. E8 3D 42 FF FF
+
+. 0 8057531 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC 42 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 56 0E FF FF
+
+. 0 805759A 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 80575A5 73
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 48 70 00 00 8B 55 C8 81 F2 B2 26 F9 85 09 D0 85 C0 75 2A
+
+. 0 80575EE 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 02 0F FF FF
+
+. 0 80575FF 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057606 13
+. 83 EC 0C 68 46 71 06 08 E8 FD 0D FF FF
+
+fistpq_5 ... ok
+. 0 8057613 5
+. 83 C4 10 EB 68
+
+. 0 8057680 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632F4 5
+. E8 8F 43 FF FF
+
+. 0 8057688 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC C2 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 FF 0C FF FF
+
+. 0 80576F1 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 80576FC 73
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 B7 8F FF FF 8B 55 C8 81 F2 4F D9 06 7A 09 D0 85 C0 75 2A
+
+. 0 8057745 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 AB 0D FF FF
+
+. 0 8057756 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805775D 13
+. 83 EC 0C 68 81 71 06 08 E8 A6 0C FF FF
+
+fistpq_6 ... ok
+. 0 805776A 5
+. 83 C4 10 EB 68
+
+. 0 80577D7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632F9 5
+. E8 E1 44 FF FF
+
+. 0 80577DF 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC 42 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 A8 0B FF FF
+
+. 0 8057848 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 8057853 73
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 48 70 00 00 8B 55 C8 81 F2 B1 26 F9 85 09 D0 85 C0 75 2A
+
+. 0 805789C 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 54 0C FF FF
+
+. 0 80578AD 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80578B4 13
+. 83 EC 0C 68 BC 71 06 08 E8 4F 0B FF FF
+
+fistpq_7 ... ok
+. 0 80578C1 5
+. 83 C4 10 EB 68
+
+. 0 805792E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80632FE 5
+. E8 33 46 FF FF
+
+. 0 8057936 105
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 66 AC 49 7E BA 21 12 DC C2 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 8E E3 8A 44 89 45 D8 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 40 A1 06 08 E8 51 0A FF FF
+
+. 0 805799F 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 80579AA 73
+. 9B DD B5 48 FF FF FF D9 45 D8 DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 DF 7D C8 D9 5D C0 DD A5 48 FF FF FF 8B 45 CC 35 B7 8F FF FF 8B 55 C8 81 F2 4F D9 06 7A 09 D0 85 C0 75 2A
+
+. 0 80579F3 17
+. 83 EC 08 B8 8E E3 8A 44 50 FF 75 C0 E8 FD 0A FF FF
+
+. 0 8057A04 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057A0B 13
+. 83 EC 0C 68 F7 71 06 08 E8 F8 09 FF FF
+
+fistpq_8 ... ok
+. 0 8057A18 5
+. 83 C4 10 EB 68
+
+. 0 8057A85 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063303 5
+. E8 85 47 FF FF
+
+. 0 8057A8D 42
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 39 09 FF FF
+
+. 0 8057AB7 7
+. 83 C4 10 85 C0 75 75
+
+. 0 8057ABE 36
+. 9B DD B5 68 FF FF FF D9 45 E4 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D8 E8 1F 0A FF FF
+
+. 0 8057AE2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057AE9 13
+. 83 EC 0C 68 32 72 06 08 E8 1A 09 FF FF
+
+flds_1 ... ok
+. 0 8057AF6 5
+. 83 C4 10 EB 48
+
+. 0 8057B43 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063308 5
+. E8 3E 48 FF FF
+
+. 0 8057B4B 42
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 7B 08 FF FF
+
+. 0 8057B75 7
+. 83 C4 10 85 C0 75 75
+
+. 0 8057B7C 36
+. 9B DD B5 68 FF FF FF D9 45 E4 D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 2B 52 9A C4 50 FF 75 D8 E8 61 09 FF FF
+
+. 0 8057BA0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057BA7 13
+. 83 EC 0C 68 67 72 06 08 E8 5C 08 FF FF
+
+flds_2 ... ok
+. 0 8057BB4 5
+. 83 C4 10 EB 48
+
+. 0 8057C01 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806330D 5
+. E8 F7 48 FF FF
+
+. 0 8057C09 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 AE 07 FF FF
+
+. 0 8057C42 7
+. 83 C4 10 85 C0 75 77
+
+. 0 8057C49 42
+. 9B DD B5 68 FF FF FF DD 45 E0 DD 5D D8 DD A5 68 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 DC FF 75 D8 E8 0F 09 FF FF
+
+. 0 8057C73 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057C7A 13
+. 83 EC 0C 68 9C 72 06 08 E8 89 07 FF FF
+
+fldl_1 ... ok
+. 0 8057C87 5
+. 83 C4 10 EB 44
+
+. 0 8057CD0 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063312 5
+. E8 C1 49 FF FF
+
+. 0 8057CD8 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 DF 06 FF FF
+
+. 0 8057D11 7
+. 83 C4 10 85 C0 75 77
+
+. 0 8057D18 42
+. 9B DD B5 68 FF FF FF DD 45 E0 DD 5D D8 DD A5 68 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 C1 52 50 FF 75 DC FF 75 D8 E8 40 08 FF FF
+
+. 0 8057D42 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057D49 13
+. 83 EC 0C 68 D1 72 06 08 E8 BA 06 FF FF
+
+fldl_2 ... ok
+. 0 8057D56 5
+. 83 C4 10 EB 44
+
+. 0 8057D9F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063317 5
+. E8 8B 4A FF FF
+
+. 0 8057DA7 49
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 18 06 FF FF
+
+. 0 8057DD8 11
+. 83 C4 10 85 C0 0F 85 C0 00 00 00
+
+. 0 8057DE3 49
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 EE D9 C2 D9 5D D8 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 52 9A 44 50 FF 75 D8 E8 ED 06 FF FF
+
+. 0 8057E14 7
+. 83 C4 10 85 C0 74 2A
+
+. 0 8057E1B 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 D0 E8 D5 06 FF FF
+
+. 0 8057E2C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057E33 13
+. 83 EC 0C 68 06 73 06 08 E8 D0 05 FF FF
+
+fld_1 ... ok
+. 0 8057E40 5
+. 83 C4 10 EB 6E
+
+. 0 8057EB3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806331C 5
+. E8 9A 4B FF FF
+
+. 0 8057EBB 49
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 04 05 FF FF
+
+. 0 8057EEC 11
+. 83 C4 10 85 C0 0F 85 C0 00 00 00
+
+. 0 8057EF7 49
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 EE D9 C2 D9 5D D8 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 2B 52 9A C4 50 FF 75 D8 E8 D9 05 FF FF
+
+. 0 8057F28 7
+. 83 C4 10 85 C0 74 2A
+
+. 0 8057F2F 17
+. 83 EC 08 B8 2B 52 9A C4 50 FF 75 D0 E8 C1 05 FF FF
+
+. 0 8057F40 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8057F47 13
+. 83 EC 0C 68 38 73 06 08 E8 BC 04 FF FF
+
+fld_2 ... ok
+. 0 8057F54 5
+. 83 C4 10 EB 6E
+
+. 0 8057FC7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063321 5
+. E8 A9 4C FF FF
+
+. 0 8057FCF 57
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 E8 03 FF FF
+
+. 0 8058008 11
+. 83 C4 10 85 C0 0F 85 C4 00 00 00
+
+. 0 8058013 55
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE D9 EE D9 C2 DD 5D D8 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 DC FF 75 D8 E8 38 05 FF FF
+
+. 0 805804A 7
+. 83 C4 10 85 C0 74 30
+
+. 0 8058051 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 D4 FF 75 D0 E8 1A 05 FF FF
+
+. 0 8058068 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805806F 13
+. 83 EC 0C 68 6A 73 06 08 E8 94 03 FF FF
+
+fld_3 ... ok
+. 0 805807C 5
+. 83 C4 10 EB 66
+
+. 0 80580E7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063326 5
+. E8 C4 4D FF FF
+
+. 0 80580EF 27
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 E6 02 FF FF
+
+. 0 805810A 7
+. 83 C4 10 85 C0 75 76
+
+. 0 8058111 41
+. 9B DD B5 68 FF FF FF D9 E8 DD 5D E0 DD A5 68 FF FF FF B8 00 00 00 00 BA 00 00 F0 3F 52 50 FF 75 E4 FF 75 E0 E8 48 04 FF FF
+
+. 0 805813A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058141 13
+. 83 EC 0C 68 9C 73 06 08 E8 C2 02 FF FF
+
+fld1_1 ... ok
+. 0 805814E 5
+. 83 C4 10 EB 44
+
+. 0 8058197 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806332B 5
+. E8 6F 4E FF FF
+
+. 0 805819F 27
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 36 02 FF FF
+
+. 0 80581BA 7
+. 83 C4 10 85 C0 75 76
+
+. 0 80581C1 41
+. 9B DD B5 68 FF FF FF D9 E9 DD 5D E0 DD A5 68 FF FF FF B8 71 A3 79 09 BA 4F 93 0A 40 52 50 FF 75 E4 FF 75 E0 E8 98 03 FF FF
+
+. 0 80581EA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80581F1 13
+. 83 EC 0C 68 D1 73 06 08 E8 12 02 FF FF
+
+fldl2t_1 ... ok
+. 0 80581FE 5
+. 83 C4 10 EB 44
+
+. 0 8058247 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063330 5
+. E8 1A 4F FF FF
+
+. 0 805824F 27
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 86 01 FF FF
+
+. 0 805826A 7
+. 83 C4 10 85 C0 75 76
+
+. 0 8058271 41
+. 9B DD B5 68 FF FF FF D9 EA DD 5D E0 DD A5 68 FF FF FF B8 FC 82 2B 65 BA 47 15 F7 3F 52 50 FF 75 E4 FF 75 E0 E8 E8 02 FF FF
+
+. 0 805829A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80582A1 13
+. 83 EC 0C 68 0C 74 06 08 E8 62 01 FF FF
+
+fldl2e_1 ... ok
+. 0 80582AE 5
+. 83 C4 10 EB 44
+
+. 0 80582F7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063335 5
+. E8 C5 4F FF FF
+
+. 0 80582FF 27
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 D6 00 FF FF
+
+. 0 805831A 7
+. 83 C4 10 85 C0 75 76
+
+. 0 8058321 41
+. 9B DD B5 68 FF FF FF D9 EB DD 5D E0 DD A5 68 FF FF FF B8 18 2D 44 54 BA FB 21 09 40 52 50 FF 75 E4 FF 75 E0 E8 38 02 FF FF
+
+. 0 805834A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058351 13
+. 83 EC 0C 68 47 74 06 08 E8 B2 00 FF FF
+
+fldpi_1 ... ok
+. 0 805835E 5
+. 83 C4 10 EB 44
+
+. 0 80583A7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806333A 5
+. E8 70 50 FF FF
+
+. 0 80583AF 27
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 26 00 FF FF
+
+. 0 80583CA 7
+. 83 C4 10 85 C0 75 76
+
+. 0 80583D1 41
+. 9B DD B5 68 FF FF FF D9 EC DD 5D E0 DD A5 68 FF FF FF B8 FF 79 9F 50 BA 13 44 D3 3F 52 50 FF 75 E4 FF 75 E0 E8 88 01 FF FF
+
+. 0 80583FA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058401 13
+. 83 EC 0C 68 7F 74 06 08 E8 02 00 FF FF
+
+fldlg2_1 ... ok
+. 0 805840E 5
+. 83 C4 10 EB 44
+
+. 0 8058457 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806333F 5
+. E8 1B 51 FF FF
+
+. 0 805845F 27
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 76 FF FE FF
+
+. 0 805847A 7
+. 83 C4 10 85 C0 75 76
+
+. 0 8058481 41
+. 9B DD B5 68 FF FF FF D9 ED DD 5D E0 DD A5 68 FF FF FF B8 EF 39 FA FE BA 42 2E E6 3F 52 50 FF 75 E4 FF 75 E0 E8 D8 00 FF FF
+
+. 0 80584AA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80584B1 13
+. 83 EC 0C 68 BA 74 06 08 E8 52 FF FE FF
+
+fldln2_1 ... ok
+. 0 80584BE 5
+. 83 C4 10 EB 44
+
+. 0 8058507 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063344 5
+. E8 C6 51 FF FF
+
+. 0 805850F 27
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 83 EC 08 6A 01 68 40 A1 06 08 E8 C6 FE FE FF
+
+. 0 805852A 7
+. 83 C4 10 85 C0 75 76
+
+. 0 8058531 41
+. 9B DD B5 68 FF FF FF D9 EE DD 5D E0 DD A5 68 FF FF FF B8 00 00 00 00 BA 00 00 00 00 52 50 FF 75 E4 FF 75 E0 E8 28 00 FF FF
+
+. 0 805855A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058561 13
+. 83 EC 0C 68 F5 74 06 08 E8 A2 FE FE FF
+
+fldz_1 ... ok
+. 0 805856E 5
+. 83 C4 10 EB 44
+
+. 0 80585B7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063349 5
+. E8 71 52 FF FF
+
+. 0 80585BF 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 F1 FD FE FF
+
+. 0 80585FF 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8058606 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 D4 FE FE FF
+
+. 0 805862D 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058634 13
+. 83 EC 0C 68 2A 75 06 08 E8 CF FD FE FF
+
+fmuls_1 ... ok
+. 0 8058641 5
+. 83 C4 10 EB 48
+
+. 0 805868E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806334E 5
+. E8 43 53 FF FF
+
+. 0 8058696 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1A FD FE FF
+
+. 0 80586D6 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80586DD 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 FD FD FE FF
+
+. 0 8058704 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805870B 13
+. 83 EC 0C 68 62 75 06 08 E8 F8 FC FE FF
+
+fmuls_2 ... ok
+. 0 8058718 5
+. 83 C4 10 EB 48
+
+. 0 8058765 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063353 5
+. E8 15 54 FF FF
+
+. 0 805876D 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 43 FC FE FF
+
+. 0 80587AD 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80587B4 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 26 FD FE FF
+
+. 0 80587DB 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80587E2 13
+. 83 EC 0C 68 9A 75 06 08 E8 21 FC FE FF
+
+fmuls_3 ... ok
+. 0 80587EF 5
+. 83 C4 10 EB 48
+
+. 0 805883C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063358 5
+. E8 E7 54 FF FF
+
+. 0 8058844 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 6C FB FE FF
+
+. 0 8058884 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805888B 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 4F FC FE FF
+
+. 0 80588B2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80588B9 13
+. 83 EC 0C 68 D2 75 06 08 E8 4A FB FE FF
+
+fmuls_4 ... ok
+. 0 80588C6 5
+. 83 C4 10 EB 48
+
+. 0 8058913 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806335D 5
+. E8 B9 55 FF FF
+
+. 0 805891B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 7E FA FE FF
+
+. 0 8058972 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8058979 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 4D D8 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 DC FB FE FF
+
+. 0 80589A6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80589AD 13
+. 83 EC 0C 68 0A 76 06 08 E8 56 FA FE FF
+
+fmull_1 ... ok
+. 0 80589BA 5
+. 83 C4 10 EB 44
+
+. 0 8058A03 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063362 5
+. E8 A4 56 FF FF
+
+. 0 8058A0B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 8E F9 FE FF
+
+. 0 8058A62 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8058A69 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 4D D8 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 EC FA FE FF
+
+. 0 8058A96 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058A9D 13
+. 83 EC 0C 68 42 76 06 08 E8 66 F9 FE FF
+
+fmull_2 ... ok
+. 0 8058AAA 5
+. 83 C4 10 EB 44
+
+. 0 8058AF3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063367 5
+. E8 8F 57 FF FF
+
+. 0 8058AFB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 9E F8 FE FF
+
+. 0 8058B52 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8058B59 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 4D D8 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 FC F9 FE FF
+
+. 0 8058B86 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058B8D 13
+. 83 EC 0C 68 7A 76 06 08 E8 76 F8 FE FF
+
+fmull_3 ... ok
+. 0 8058B9A 5
+. 83 C4 10 EB 44
+
+. 0 8058BE3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806336C 5
+. E8 7A 58 FF FF
+
+. 0 8058BEB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 AE F7 FE FF
+
+. 0 8058C42 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8058C49 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 4D D8 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 0C F9 FE FF
+
+. 0 8058C76 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058C7D 13
+. 83 EC 0C 68 B2 76 06 08 E8 86 F7 FE FF
+
+fmull_4 ... ok
+. 0 8058C8A 5
+. 83 C4 10 EB 44
+
+. 0 8058CD3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063371 5
+. E8 65 59 FF FF
+
+. 0 8058CDB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 CE F6 FE FF
+
+. 0 8058D22 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8058D2D 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC CA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 A5 F7 FE FF
+
+. 0 8058D5C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058D63 13
+. 83 EC 0C 68 EA 76 06 08 E8 A0 F6 FE FF
+
+fmul_1 ... ok
+. 0 8058D70 5
+. 83 C4 10 EB 48
+
+. 0 8058DBD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063376 5
+. E8 4A 5A FF FF
+
+. 0 8058DC5 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 E4 F5 FE FF
+
+. 0 8058E0C 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8058E17 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC CA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 BB F6 FE FF
+
+. 0 8058E46 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058E4D 13
+. 83 EC 0C 68 1F 77 06 08 E8 B6 F5 FE FF
+
+fmul_2 ... ok
+. 0 8058E5A 5
+. 83 C4 10 EB 48
+
+. 0 8058EA7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806337B 5
+. E8 2F 5B FF FF
+
+. 0 8058EAF 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 FA F4 FE FF
+
+. 0 8058EF6 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8058F01 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC CA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 D1 F5 FE FF
+
+. 0 8058F30 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8058F37 13
+. 83 EC 0C 68 54 77 06 08 E8 CC F4 FE FF
+
+fmul_3 ... ok
+. 0 8058F44 5
+. 83 C4 10 EB 48
+
+. 0 8058F91 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063380 5
+. E8 14 5C FF FF
+
+. 0 8058F99 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 10 F4 FE FF
+
+. 0 8058FE0 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8058FEB 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC CA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 E7 F4 FE FF
+
+. 0 805901A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059021 13
+. 83 EC 0C 68 89 77 06 08 E8 E2 F3 FE FF
+
+fmul_4 ... ok
+. 0 805902E 5
+. 83 C4 10 EB 48
+
+. 0 805907B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063385 5
+. E8 F9 5C FF FF
+
+. 0 8059083 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 16 F3 FE FF
+
+. 0 80590DA 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 80590E5 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC CA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 68 F4 FE FF
+
+. 0 805911A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059121 13
+. 83 EC 0C 68 BE 77 06 08 E8 E2 F2 FE FF
+
+fmul_5 ... ok
+. 0 805912E 5
+. 83 C4 10 EB 44
+
+. 0 8059177 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806338A 5
+. E8 F0 5D FF FF
+
+. 0 805917F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1A F2 FE FF
+
+. 0 80591D6 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 80591E1 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC CA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 6C F3 FE FF
+
+. 0 8059216 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805921D 13
+. 83 EC 0C 68 F3 77 06 08 E8 E6 F1 FE FF
+
+fmul_6 ... ok
+. 0 805922A 5
+. 83 C4 10 EB 44
+
+. 0 8059273 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806338F 5
+. E8 E7 5E FF FF
+
+. 0 805927B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1E F1 FE FF
+
+. 0 80592D2 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 80592DD 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC CA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 70 F2 FE FF
+
+. 0 8059312 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059319 13
+. 83 EC 0C 68 28 78 06 08 E8 EA F0 FE FF
+
+fmul_7 ... ok
+. 0 8059326 5
+. 83 C4 10 EB 44
+
+. 0 805936F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063394 5
+. E8 DE 5F FF FF
+
+. 0 8059377 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 22 F0 FE FF
+
+. 0 80593CE 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 80593D9 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC CA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 74 F1 FE FF
+
+. 0 805940E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059415 13
+. 83 EC 0C 68 5D 78 06 08 E8 EE EF FE FF
+
+fmul_8 ... ok
+. 0 8059422 5
+. 83 C4 10 EB 44
+
+. 0 805946B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063399 5
+. E8 D5 60 FF FF
+
+. 0 8059473 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 36 EF FE FF
+
+. 0 80594BA 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 80594C1 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 CA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 15 F0 FE FF
+
+. 0 80594EC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80594F3 13
+. 83 EC 0C 68 92 78 06 08 E8 10 EF FE FF
+
+fmul_9 ... ok
+. 0 8059500 5
+. 83 C4 10 EB 48
+
+. 0 805954D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806339E 5
+. E8 B2 61 FF FF
+
+. 0 8059555 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 54 EE FE FF
+
+. 0 805959C 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 80595A3 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 CA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 33 EF FE FF
+
+. 0 80595CE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80595D5 13
+. 83 EC 0C 68 C7 78 06 08 E8 2E EE FE FF
+
+fmul_10 ... ok
+. 0 80595E2 5
+. 83 C4 10 EB 48
+
+. 0 805962F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633A3 5
+. E8 8F 62 FF FF
+
+. 0 8059637 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 72 ED FE FF
+
+. 0 805967E 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8059685 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 CA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 51 EE FE FF
+
+. 0 80596B0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80596B7 13
+. 83 EC 0C 68 FF 78 06 08 E8 4C ED FE FF
+
+fmul_11 ... ok
+. 0 80596C4 5
+. 83 C4 10 EB 48
+
+. 0 8059711 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633A8 5
+. E8 6C 63 FF FF
+
+. 0 8059719 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 90 EC FE FF
+
+. 0 8059760 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8059767 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 CA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 6F ED FE FF
+
+. 0 8059792 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059799 13
+. 83 EC 0C 68 37 79 06 08 E8 6A EC FE FF
+
+fmul_12 ... ok
+. 0 80597A6 5
+. 83 C4 10 EB 48
+
+. 0 80597F3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633AD 5
+. E8 49 64 FF FF
+
+. 0 80597FB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 9E EB FE FF
+
+. 0 8059852 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8059859 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 CA DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 F8 EC FE FF
+
+. 0 805988A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059891 13
+. 83 EC 0C 68 6F 79 06 08 E8 72 EB FE FF
+
+fmul_13 ... ok
+. 0 805989E 5
+. 83 C4 10 EB 44
+
+. 0 80598E7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633B2 5
+. E8 38 65 FF FF
+
+. 0 80598EF 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 AA EA FE FF
+
+. 0 8059946 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805994D 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 CA DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 04 EC FE FF
+
+. 0 805997E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059985 13
+. 83 EC 0C 68 A7 79 06 08 E8 7E EA FE FF
+
+fmul_14 ... ok
+. 0 8059992 5
+. 83 C4 10 EB 44
+
+. 0 80599DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633B7 5
+. E8 27 66 FF FF
+
+. 0 80599E3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B6 E9 FE FF
+
+. 0 8059A3A 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8059A41 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 CA DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 10 EB FE FF
+
+. 0 8059A72 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059A79 13
+. 83 EC 0C 68 DF 79 06 08 E8 8A E9 FE FF
+
+fmul_15 ... ok
+. 0 8059A86 5
+. 83 C4 10 EB 44
+
+. 0 8059ACF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633BC 5
+. E8 16 67 FF FF
+
+. 0 8059AD7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 C2 E8 FE FF
+
+. 0 8059B2E 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8059B35 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 CA DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 1C EA FE FF
+
+. 0 8059B66 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059B6D 13
+. 83 EC 0C 68 17 7A 06 08 E8 96 E8 FE FF
+
+fmul_16 ... ok
+. 0 8059B7A 5
+. 83 C4 10 EB 44
+
+. 0 8059BC3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633C1 5
+. E8 05 68 FF FF
+
+. 0 8059BCB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 DE E7 FE FF
+
+. 0 8059C12 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8059C19 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE CA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 BB E8 FE FF
+
+. 0 8059C46 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059C4D 13
+. 83 EC 0C 68 4F 7A 06 08 E8 B6 E7 FE FF
+
+fmulp_1 ... ok
+. 0 8059C5A 5
+. 83 C4 10 EB 48
+
+. 0 8059CA7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633C6 5
+. E8 E4 68 FF FF
+
+. 0 8059CAF 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 FA E6 FE FF
+
+. 0 8059CF6 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8059CFD 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE CA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 D7 E7 FE FF
+
+. 0 8059D2A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059D31 13
+. 83 EC 0C 68 87 7A 06 08 E8 D2 E6 FE FF
+
+fmulp_2 ... ok
+. 0 8059D3E 5
+. 83 C4 10 EB 48
+
+. 0 8059D8B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633CB 5
+. E8 C3 69 FF FF
+
+. 0 8059D93 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 16 E6 FE FF
+
+. 0 8059DDA 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8059DE1 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE CA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 F3 E6 FE FF
+
+. 0 8059E0E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059E15 13
+. 83 EC 0C 68 BF 7A 06 08 E8 EE E5 FE FF
+
+fmulp_3 ... ok
+. 0 8059E22 5
+. 83 C4 10 EB 48
+
+. 0 8059E6F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633D0 5
+. E8 A2 6A FF FF
+
+. 0 8059E77 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 32 E5 FE FF
+
+. 0 8059EBE 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8059EC5 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE CA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 0F E6 FE FF
+
+. 0 8059EF2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059EF9 13
+. 83 EC 0C 68 F7 7A 06 08 E8 0A E5 FE FF
+
+fmulp_4 ... ok
+. 0 8059F06 5
+. 83 C4 10 EB 48
+
+. 0 8059F53 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633D5 5
+. E8 81 6B FF FF
+
+. 0 8059F5B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 3E E4 FE FF
+
+. 0 8059FB2 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8059FBD 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE CA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 92 E5 FE FF
+
+. 0 8059FF0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8059FF7 13
+. 83 EC 0C 68 2F 7B 06 08 E8 0C E4 FE FF
+
+fmulp_5 ... ok
+. 0 805A004 5
+. 83 C4 10 EB 44
+
+. 0 805A04D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633DA 5
+. E8 76 6C FF FF
+
+. 0 805A055 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 44 E3 FE FF
+
+. 0 805A0AC 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805A0B7 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE CA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 98 E4 FE FF
+
+. 0 805A0EA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A0F1 13
+. 83 EC 0C 68 67 7B 06 08 E8 12 E3 FE FF
+
+fmulp_6 ... ok
+. 0 805A0FE 5
+. 83 C4 10 EB 44
+
+. 0 805A147 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633DF 5
+. E8 6B 6D FF FF
+
+. 0 805A14F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 4A E2 FE FF
+
+. 0 805A1A6 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805A1B1 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE CA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 9E E3 FE FF
+
+. 0 805A1E4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A1EB 13
+. 83 EC 0C 68 9F 7B 06 08 E8 18 E2 FE FF
+
+fmulp_7 ... ok
+. 0 805A1F8 5
+. 83 C4 10 EB 44
+
+. 0 805A241 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633E4 5
+. E8 60 6E FF FF
+
+. 0 805A249 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 50 E1 FE FF
+
+. 0 805A2A0 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805A2AB 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE CA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 A4 E2 FE FF
+
+. 0 805A2DE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A2E5 13
+. 83 EC 0C 68 D7 7B 06 08 E8 1E E1 FE FF
+
+fmulp_8 ... ok
+. 0 805A2F2 5
+. 83 C4 10 EB 44
+
+. 0 805A33B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633E9 5
+. E8 55 6F FF FF
+
+. 0 805A343 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 66 E0 FE FF
+
+. 0 805A38A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805A391 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 47 E1 FE FF
+
+. 0 805A3BA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A3C1 13
+. 83 EC 0C 68 0F 7C 06 08 E8 42 E0 FE FF
+
+fmulp_9 ... ok
+. 0 805A3CE 5
+. 83 C4 10 EB 48
+
+. 0 805A41B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633EE 5
+. E8 30 70 FF FF
+
+. 0 805A423 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 86 DF FE FF
+
+. 0 805A46A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805A471 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 67 E0 FE FF
+
+. 0 805A49A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A4A1 13
+. 83 EC 0C 68 47 7C 06 08 E8 62 DF FE FF
+
+fmulp_10 ... ok
+. 0 805A4AE 5
+. 83 C4 10 EB 48
+
+. 0 805A4FB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633F3 5
+. E8 0B 71 FF FF
+
+. 0 805A503 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 A6 DE FE FF
+
+. 0 805A54A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805A551 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 CB 50 FF 75 D0 E8 87 DF FE FF
+
+. 0 805A57A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A581 13
+. 83 EC 0C 68 82 7C 06 08 E8 82 DE FE FF
+
+fmulp_11 ... ok
+. 0 805A58E 5
+. 83 C4 10 EB 48
+
+. 0 805A5DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633F8 5
+. E8 E6 71 FF FF
+
+. 0 805A5E3 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 C6 DD FE FF
+
+. 0 805A62A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805A631 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE C9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 1F 25 4B 50 FF 75 D0 E8 A7 DE FE FF
+
+. 0 805A65A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A661 13
+. 83 EC 0C 68 BD 7C 06 08 E8 A2 DD FE FF
+
+fmulp_12 ... ok
+. 0 805A66E 5
+. 83 C4 10 EB 48
+
+. 0 805A6BB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80633FD 5
+. E8 C1 72 FF FF
+
+. 0 805A6C3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 D6 DC FE FF
+
+. 0 805A71A 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805A721 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C9 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 32 DE FE FF
+
+. 0 805A750 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A757 13
+. 83 EC 0C 68 F8 7C 06 08 E8 AC DC FE FF
+
+fmulp_13 ... ok
+. 0 805A764 5
+. 83 C4 10 EB 44
+
+. 0 805A7AD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063402 5
+. E8 AE 73 FF FF
+
+. 0 805A7B5 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 E4 DB FE FF
+
+. 0 805A80C 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805A813 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C9 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 40 DD FE FF
+
+. 0 805A842 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A849 13
+. 83 EC 0C 68 33 7D 06 08 E8 BA DB FE FF
+
+fmulp_14 ... ok
+. 0 805A856 5
+. 83 C4 10 EB 44
+
+. 0 805A89F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063407 5
+. E8 9B 74 FF FF
+
+. 0 805A8A7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 F2 DA FE FF
+
+. 0 805A8FE 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805A905 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C9 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 C2 52 50 FF 75 D4 FF 75 D0 E8 4E DC FE FF
+
+. 0 805A934 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805A93B 13
+. 83 EC 0C 68 6E 7D 06 08 E8 C8 DA FE FF
+
+fmulp_15 ... ok
+. 0 805A948 5
+. 83 C4 10 EB 44
+
+. 0 805A991 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806340C 5
+. E8 88 75 FF FF
+
+. 0 805A999 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 00 DA FE FF
+
+. 0 805A9F0 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805A9F7 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE C9 DD 5D D0 DD A5 58 FF FF FF B8 52 CB 33 56 BA 65 30 A1 42 52 50 FF 75 D4 FF 75 D0 E8 5C DB FE FF
+
+. 0 805AA26 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805AA2D 13
+. 83 EC 0C 68 A9 7D 06 08 E8 D6 D9 FE FF
+
+fmulp_16 ... ok
+. 0 805AA3A 5
+. 83 C4 10 EB 44
+
+. 0 805AA83 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063411 5
+. E8 75 76 FF FF
+
+. 0 805AA8B 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 28 D9 FE FF
+
+. 0 805AAC8 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805AACF 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 4D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 4F CC A2 4A 50 FF 75 D0 E8 0B DA FE FF
+
+. 0 805AAF6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805AAFD 13
+. 83 EC 0C 68 E4 7D 06 08 E8 06 D9 FE FF
+
+fimuls_1 ... ok
+. 0 805AB0A 5
+. 83 C4 10 EB 48
+
+. 0 805AB57 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063416 5
+. E8 44 77 FF FF
+
+. 0 805AB5F 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 54 D8 FE FF
+
+. 0 805AB9C 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805ABA3 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 4D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 4F CC A2 CA 50 FF 75 D0 E8 37 D9 FE FF
+
+. 0 805ABCA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805ABD1 13
+. 83 EC 0C 68 1F 7E 06 08 E8 32 D8 FE FF
+
+fimuls_2 ... ok
+. 0 805ABDE 5
+. 83 C4 10 EB 48
+
+. 0 805AC2B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806341B 5
+. E8 13 78 FF FF
+
+. 0 805AC33 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 80 D7 FE FF
+
+. 0 805AC70 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805AC77 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 4D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 4F CC A2 CA 50 FF 75 D0 E8 63 D8 FE FF
+
+. 0 805AC9E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805ACA5 13
+. 83 EC 0C 68 5A 7E 06 08 E8 5E D7 FE FF
+
+fimuls_3 ... ok
+. 0 805ACB2 5
+. 83 C4 10 EB 48
+
+. 0 805ACFF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063420 5
+. E8 E2 78 FF FF
+
+. 0 805AD07 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 AC D6 FE FF
+
+. 0 805AD44 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805AD4B 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 4D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 4F CC A2 4A 50 FF 75 D0 E8 8F D7 FE FF
+
+. 0 805AD72 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805AD79 13
+. 83 EC 0C 68 95 7E 06 08 E8 8A D6 FE FF
+
+fimuls_4 ... ok
+. 0 805AD86 5
+. 83 C4 10 EB 48
+
+. 0 805ADD3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063425 5
+. E8 B1 79 FF FF
+
+. 0 805ADDB 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 D0 D5 FE FF
+
+. 0 805AE20 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805AE27 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 4D DE DD 5D D0 DD A5 58 FF FF FF B8 E2 E9 26 99 BA 70 DF F3 41 52 50 FF 75 D4 FF 75 D0 E8 2E D7 FE FF
+
+. 0 805AE54 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805AE5B 13
+. 83 EC 0C 68 D0 7E 06 08 E8 A8 D5 FE FF
+
+fimuls_5 ... ok
+. 0 805AE68 5
+. 83 C4 10 EB 44
+
+. 0 805AEB1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806342A 5
+. E8 8A 7A FF FF
+
+. 0 805AEB9 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 F2 D4 FE FF
+
+. 0 805AEFE 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805AF05 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 4D DE DD 5D D0 DD A5 58 FF FF FF B8 E2 E9 26 99 BA 70 DF F3 C1 52 50 FF 75 D4 FF 75 D0 E8 50 D6 FE FF
+
+. 0 805AF32 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805AF39 13
+. 83 EC 0C 68 0B 7F 06 08 E8 CA D4 FE FF
+
+fimuls_6 ... ok
+. 0 805AF46 5
+. 83 C4 10 EB 44
+
+. 0 805AF8F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806342F 5
+. E8 63 7B FF FF
+
+. 0 805AF97 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 14 D4 FE FF
+
+. 0 805AFDC 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805AFE3 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 4D DE DD 5D D0 DD A5 58 FF FF FF B8 E2 E9 26 99 BA 70 DF F3 C1 52 50 FF 75 D4 FF 75 D0 E8 72 D5 FE FF
+
+. 0 805B010 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B017 13
+. 83 EC 0C 68 46 7F 06 08 E8 EC D3 FE FF
+
+fimuls_7 ... ok
+. 0 805B024 5
+. 83 C4 10 EB 44
+
+. 0 805B06D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063434 5
+. E8 3C 7C FF FF
+
+. 0 805B075 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 36 D3 FE FF
+
+. 0 805B0BA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805B0C1 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 4D DE DD 5D D0 DD A5 58 FF FF FF B8 E2 E9 26 99 BA 70 DF F3 41 52 50 FF 75 D4 FF 75 D0 E8 94 D4 FE FF
+
+. 0 805B0EE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B0F5 13
+. 83 EC 0C 68 81 7F 06 08 E8 0E D3 FE FF
+
+fimuls_8 ... ok
+. 0 805B102 5
+. 83 C4 10 EB 44
+
+. 0 805B14B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063439 5
+. E8 15 7D FF FF
+
+. 0 805B153 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 5E D2 FE FF
+
+. 0 805B192 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805B199 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 91 C9 51 50 FF 75 D0 E8 41 D3 FE FF
+
+. 0 805B1C0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B1C7 13
+. 83 EC 0C 68 BC 7F 06 08 E8 3C D2 FE FF
+
+fimull_1 ... ok
+. 0 805B1D4 5
+. 83 C4 10 EB 48
+
+. 0 805B221 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806343E 5
+. E8 E6 7D FF FF
+
+. 0 805B229 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 88 D1 FE FF
+
+. 0 805B268 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805B26F 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 91 C9 D1 50 FF 75 D0 E8 6B D2 FE FF
+
+. 0 805B296 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B29D 13
+. 83 EC 0C 68 F7 7F 06 08 E8 66 D1 FE FF
+
+fimull_2 ... ok
+. 0 805B2AA 5
+. 83 C4 10 EB 48
+
+. 0 805B2F7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063443 5
+. E8 B7 7E FF FF
+
+. 0 805B2FF 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 D0 FE FF
+
+. 0 805B33E 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805B345 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 91 C9 D1 50 FF 75 D0 E8 95 D1 FE FF
+
+. 0 805B36C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B373 13
+. 83 EC 0C 68 32 80 06 08 E8 90 D0 FE FF
+
+fimull_3 ... ok
+. 0 805B380 5
+. 83 C4 10 EB 48
+
+. 0 805B3CD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063448 5
+. E8 88 7F FF FF
+
+. 0 805B3D5 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 DC CF FE FF
+
+. 0 805B414 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805B41B 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 4D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 08 91 C9 51 50 FF 75 D0 E8 BF D0 FE FF
+
+. 0 805B442 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B449 13
+. 83 EC 0C 68 6D 80 06 08 E8 BA CF FE FF
+
+fimull_4 ... ok
+. 0 805B456 5
+. 83 C4 10 EB 48
+
+. 0 805B4A3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806344D 5
+. E8 59 80 FF FF
+
+. 0 805B4AB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC F1 FB 09 00 83 EC 08 6A 01 68 40 A1 06 08 E8 FE CE FE FF
+
+. 0 805B4F2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805B4F9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 4D DC DD 5D D0 DD A5 58 FF FF FF B8 81 A9 CF 1E BA 9B 82 67 42 52 50 FF 75 D4 FF 75 D0 E8 5C D0 FE FF
+
+. 0 805B526 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B52D 13
+. 83 EC 0C 68 A8 80 06 08 E8 D6 CE FE FF
+
+fimull_5 ... ok
+. 0 805B53A 5
+. 83 C4 10 EB 44
+
+. 0 805B583 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063452 5
+. E8 34 81 FF FF
+
+. 0 805B58B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC F1 FB 09 00 83 EC 08 6A 01 68 40 A1 06 08 E8 1E CE FE FF
+
+. 0 805B5D2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805B5D9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 4D DC DD 5D D0 DD A5 58 FF FF FF B8 81 A9 CF 1E BA 9B 82 67 C2 52 50 FF 75 D4 FF 75 D0 E8 7C CF FE FF
+
+. 0 805B606 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B60D 13
+. 83 EC 0C 68 E3 80 06 08 E8 F6 CD FE FF
+
+fimull_6 ... ok
+. 0 805B61A 5
+. 83 C4 10 EB 44
+
+. 0 805B663 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063457 5
+. E8 0F 82 FF FF
+
+. 0 805B66B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 0F 04 F6 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 3E CD FE FF
+
+. 0 805B6B2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805B6B9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 4D DC DD 5D D0 DD A5 58 FF FF FF B8 81 A9 CF 1E BA 9B 82 67 C2 52 50 FF 75 D4 FF 75 D0 E8 9C CE FE FF
+
+. 0 805B6E6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B6ED 13
+. 83 EC 0C 68 1E 81 06 08 E8 16 CD FE FF
+
+fimull_7 ... ok
+. 0 805B6FA 5
+. 83 C4 10 EB 44
+
+. 0 805B743 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806345C 5
+. E8 EA 82 FF FF
+
+. 0 805B74B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 0F 04 F6 FF 83 EC 08 6A 01 68 40 A1 06 08 E8 5E CC FE FF
+
+. 0 805B792 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805B799 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 4D DC DD 5D D0 DD A5 58 FF FF FF B8 81 A9 CF 1E BA 9B 82 67 42 52 50 FF 75 D4 FF 75 D0 E8 BC CD FE FF
+
+. 0 805B7C6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B7CD 13
+. 83 EC 0C 68 59 81 06 08 E8 36 CC FE FF
+
+fimull_8 ... ok
+. 0 805B7DA 5
+. 83 C4 10 EB 44
+
+. 0 805B823 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063461 5
+. E8 C5 83 FF FF
+
+. 0 805B82B 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 94 CB FE FF
+
+. 0 805B85C 11
+. 83 C4 10 85 C0 0F 85 8F 00 00 00
+
+. 0 805B867 62
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 60 9A 44 50 FF 75 D8 E8 5C CC FE FF
+
+. 0 805B8A5 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B8AC 13
+. 83 EC 0C 68 94 81 06 08 E8 57 CB FE FF
+
+frndint_1 ... ok
+. 0 805B8B9 5
+. 83 C4 10 EB 48
+
+. 0 805B906 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063466 5
+. E8 A3 84 FF FF
+
+. 0 805B90E 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 B1 CA FE FF
+
+. 0 805B93F 11
+. 83 C4 10 85 C0 0F 85 8F 00 00 00
+
+. 0 805B94A 62
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 60 9A C4 50 FF 75 D8 E8 79 CB FE FF
+
+. 0 805B988 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805B98F 13
+. 83 EC 0C 68 D2 81 06 08 E8 74 CA FE FF
+
+frndint_2 ... ok
+. 0 805B99C 5
+. 83 C4 10 EB 48
+
+. 0 805B9E9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806346B 5
+. E8 81 85 FF FF
+
+. 0 805B9F1 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 CE C9 FE FF
+
+. 0 805BA22 11
+. 83 C4 10 85 C0 0F 85 90 00 00 00
+
+. 0 805BA2D 63
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 40 9A 44 50 FF 75 D8 E8 95 CA FE FF
+
+. 0 805BA6C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805BA73 13
+. 83 EC 0C 68 10 82 06 08 E8 90 C9 FE FF
+
+frndint_3 ... ok
+. 0 805BA80 5
+. 83 C4 10 EB 48
+
+. 0 805BACD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063470 5
+. E8 60 86 FF FF
+
+. 0 805BAD5 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 EA C8 FE FF
+
+. 0 805BB06 11
+. 83 C4 10 85 C0 0F 85 90 00 00 00
+
+. 0 805BB11 63
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 60 9A C4 50 FF 75 D8 E8 B1 C9 FE FF
+
+. 0 805BB50 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805BB57 13
+. 83 EC 0C 68 4E 82 06 08 E8 AC C8 FE FF
+
+frndint_4 ... ok
+. 0 805BB64 5
+. 83 C4 10 EB 48
+
+. 0 805BBB1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063475 5
+. E8 3F 87 FF FF
+
+. 0 805BBB9 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 06 C8 FE FF
+
+. 0 805BBEA 11
+. 83 C4 10 85 C0 0F 85 90 00 00 00
+
+. 0 805BBF5 63
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 60 9A 44 50 FF 75 D8 E8 CD C8 FE FF
+
+. 0 805BC34 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805BC3B 13
+. 83 EC 0C 68 8C 82 06 08 E8 C8 C7 FE FF
+
+frndint_5 ... ok
+. 0 805BC48 5
+. 83 C4 10 EB 48
+
+. 0 805BC95 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806347A 5
+. E8 1E 88 FF FF
+
+. 0 805BC9D 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 22 C7 FE FF
+
+. 0 805BCCE 11
+. 83 C4 10 85 C0 0F 85 90 00 00 00
+
+. 0 805BCD9 63
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 40 9A C4 50 FF 75 D8 E8 E9 C7 FE FF
+
+. 0 805BD18 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805BD1F 13
+. 83 EC 0C 68 CA 82 06 08 E8 E4 C6 FE FF
+
+frndint_6 ... ok
+. 0 805BD2C 5
+. 83 C4 10 EB 48
+
+. 0 805BD79 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806347F 5
+. E8 FD 88 FF FF
+
+. 0 805BD81 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 3E C6 FE FF
+
+. 0 805BDB2 11
+. 83 C4 10 85 C0 0F 85 90 00 00 00
+
+. 0 805BDBD 63
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 40 9A 44 50 FF 75 D8 E8 05 C7 FE FF
+
+. 0 805BDFC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805BE03 13
+. 83 EC 0C 68 08 83 06 08 E8 00 C6 FE FF
+
+frndint_7 ... ok
+. 0 805BE10 5
+. 83 C4 10 EB 48
+
+. 0 805BE5D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063484 5
+. E8 DC 89 FF FF
+
+. 0 805BE65 49
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 83 EC 08 6A 01 68 40 A1 06 08 E8 5A C5 FE FF
+
+. 0 805BE96 11
+. 83 C4 10 85 C0 0F 85 90 00 00 00
+
+. 0 805BEA1 63
+. 9B DD B5 68 FF FF FF D9 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 D9 FC D9 5D D8 DD A5 68 FF FF FF 83 EC 08 B8 00 40 9A C4 50 FF 75 D8 E8 21 C6 FE FF
+
+. 0 805BEE0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805BEE7 13
+. 83 EC 0C 68 46 83 06 08 E8 1C C5 FE FF
+
+frndint_8 ... ok
+. 0 805BEF4 5
+. 83 C4 10 EB 48
+
+. 0 805BF41 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063489 5
+. E8 BB 8A FF FF
+
+. 0 805BF49 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 6E C4 FE FF
+
+. 0 805BF82 11
+. 83 C4 10 85 C0 0F 85 91 00 00 00
+
+. 0 805BF8D 68
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 88 D6 32 41 52 50 FF 75 DC FF 75 D8 E8 B1 C5 FE FF
+
+. 0 805BFD1 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805BFD8 13
+. 83 EC 0C 68 84 83 06 08 E8 2B C4 FE FF
+
+frndint_9 ... ok
+. 0 805BFE5 5
+. 83 C4 10 EB 44
+
+. 0 805C02E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806348E 5
+. E8 A3 8B FF FF
+
+. 0 805C036 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 81 C3 FE FF
+
+. 0 805C06F 11
+. 83 C4 10 85 C0 0F 85 91 00 00 00
+
+. 0 805C07A 68
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 83 0C 24 00 D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 88 D6 32 C1 52 50 FF 75 DC FF 75 D8 E8 C4 C4 FE FF
+
+. 0 805C0BE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C0C5 13
+. 83 EC 0C 68 C2 83 06 08 E8 3E C3 FE FF
+
+frndint_10 ... ok
+. 0 805C0D2 5
+. 83 C4 10 EB 44
+
+. 0 805C11B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063493 5
+. E8 8B 8C FF FF
+
+. 0 805C123 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 94 C2 FE FF
+
+. 0 805C15C 11
+. 83 C4 10 85 C0 0F 85 92 00 00 00
+
+. 0 805C167 69
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 87 D6 32 41 52 50 FF 75 DC FF 75 D8 E8 D6 C3 FE FF
+
+. 0 805C1AC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C1B3 13
+. 83 EC 0C 68 03 84 06 08 E8 50 C2 FE FF
+
+frndint_11 ... ok
+. 0 805C1C0 5
+. 83 C4 10 EB 44
+
+. 0 805C209 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063498 5
+. E8 74 8D FF FF
+
+. 0 805C211 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 A6 C1 FE FF
+
+. 0 805C24A 11
+. 83 C4 10 85 C0 0F 85 92 00 00 00
+
+. 0 805C255 69
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 04 D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 88 D6 32 C1 52 50 FF 75 DC FF 75 D8 E8 E8 C2 FE FF
+
+. 0 805C29A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C2A1 13
+. 83 EC 0C 68 44 84 06 08 E8 62 C1 FE FF
+
+frndint_12 ... ok
+. 0 805C2AE 5
+. 83 C4 10 EB 44
+
+. 0 805C2F7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806349D 5
+. E8 5D 8E FF FF
+
+. 0 805C2FF 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 B8 C0 FE FF
+
+. 0 805C338 11
+. 83 C4 10 85 C0 0F 85 92 00 00 00
+
+. 0 805C343 69
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 88 D6 32 41 52 50 FF 75 DC FF 75 D8 E8 FA C1 FE FF
+
+. 0 805C388 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C38F 13
+. 83 EC 0C 68 85 84 06 08 E8 74 C0 FE FF
+
+frndint_13 ... ok
+. 0 805C39C 5
+. 83 C4 10 EB 44
+
+. 0 805C3E5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634A2 5
+. E8 46 8F FF FF
+
+. 0 805C3ED 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 CA BF FE FF
+
+. 0 805C426 11
+. 83 C4 10 85 C0 0F 85 92 00 00 00
+
+. 0 805C431 69
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 08 D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 87 D6 32 C1 52 50 FF 75 DC FF 75 D8 E8 0C C1 FE FF
+
+. 0 805C476 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C47D 13
+. 83 EC 0C 68 C6 84 06 08 E8 86 BF FE FF
+
+frndint_14 ... ok
+. 0 805C48A 5
+. 83 C4 10 EB 44
+
+. 0 805C4D3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634A7 5
+. E8 2F 90 FF FF
+
+. 0 805C4DB 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 DC BE FE FF
+
+. 0 805C514 11
+. 83 C4 10 85 C0 0F 85 92 00 00 00
+
+. 0 805C51F 69
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 87 D6 32 41 52 50 FF 75 DC FF 75 D8 E8 1E C0 FE FF
+
+. 0 805C564 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C56B 13
+. 83 EC 0C 68 07 85 06 08 E8 98 BE FE FF
+
+frndint_15 ... ok
+. 0 805C578 5
+. 83 C4 10 EB 44
+
+. 0 805C5C1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634AC 5
+. E8 18 91 FF FF
+
+. 0 805C5C9 57
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 83 EC 08 6A 01 68 40 A1 06 08 E8 EE BD FE FF
+
+. 0 805C602 11
+. 83 C4 10 85 C0 0F 85 92 00 00 00
+
+. 0 805C60D 69
+. 9B DD B5 68 FF FF FF DD 45 E0 83 EC 02 9B D9 3C 24 66 81 24 24 FF F3 66 81 0C 24 00 0C D9 2C 24 83 C4 02 D9 FC DD 5D D8 DD A5 68 FF FF FF B8 00 00 00 00 BA 87 D6 32 C1 52 50 FF 75 DC FF 75 D8 E8 30 BF FE FF
+
+. 0 805C652 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C659 13
+. 83 EC 0C 68 48 85 06 08 E8 AA BD FE FF
+
+frndint_16 ... ok
+. 0 805C666 5
+. 83 C4 10 EB 44
+
+. 0 805C6AF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634B1 5
+. E8 01 92 FF FF
+
+. 0 805C6B7 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 F9 BC FE FF
+
+. 0 805C6F7 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805C6FE 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 DC BD FE FF
+
+. 0 805C725 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C72C 13
+. 83 EC 0C 68 89 85 06 08 E8 D7 BC FE FF
+
+fsubs_1 ... ok
+. 0 805C739 5
+. 83 C4 10 EB 48
+
+. 0 805C786 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634B6 5
+. E8 D3 92 FF FF
+
+. 0 805C78E 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 22 BC FE FF
+
+. 0 805C7CE 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805C7D5 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 FF 3F 1C C6 50 FF 75 D0 E8 05 BD FE FF
+
+. 0 805C7FC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C803 13
+. 83 EC 0C 68 C1 85 06 08 E8 00 BC FE FF
+
+fsubs_2 ... ok
+. 0 805C810 5
+. 83 C4 10 EB 48
+
+. 0 805C85D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634BB 5
+. E8 A5 93 FF FF
+
+. 0 805C865 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 4B BB FE FF
+
+. 0 805C8A5 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805C8AC 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 2E BC FE FF
+
+. 0 805C8D3 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C8DA 13
+. 83 EC 0C 68 F9 85 06 08 E8 29 BB FE FF
+
+fsubs_3 ... ok
+. 0 805C8E7 5
+. 83 C4 10 EB 48
+
+. 0 805C934 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634C0 5
+. E8 77 94 FF FF
+
+. 0 805C93C 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 74 BA FE FF
+
+. 0 805C97C 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805C983 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 57 BB FE FF
+
+. 0 805C9AA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805C9B1 13
+. 83 EC 0C 68 31 86 06 08 E8 52 BA FE FF
+
+fsubs_4 ... ok
+. 0 805C9BE 5
+. 83 C4 10 EB 48
+
+. 0 805CA0B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634C5 5
+. E8 49 95 FF FF
+
+. 0 805CA13 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 86 B9 FE FF
+
+. 0 805CA6A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805CA71 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 65 D8 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 E4 BA FE FF
+
+. 0 805CA9E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805CAA5 13
+. 83 EC 0C 68 69 86 06 08 E8 5E B9 FE FF
+
+fsubl_1 ... ok
+. 0 805CAB2 5
+. 83 C4 10 EB 44
+
+. 0 805CAFB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634CA 5
+. E8 34 96 FF FF
+
+. 0 805CB03 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 96 B8 FE FF
+
+. 0 805CB5A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805CB61 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 65 D8 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 F4 B9 FE FF
+
+. 0 805CB8E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805CB95 13
+. 83 EC 0C 68 A1 86 06 08 E8 6E B8 FE FF
+
+fsubl_2 ... ok
+. 0 805CBA2 5
+. 83 C4 10 EB 44
+
+. 0 805CBEB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634CF 5
+. E8 1F 97 FF FF
+
+. 0 805CBF3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A6 B7 FE FF
+
+. 0 805CC4A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805CC51 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 65 D8 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 04 B9 FE FF
+
+. 0 805CC7E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805CC85 13
+. 83 EC 0C 68 D9 86 06 08 E8 7E B7 FE FF
+
+fsubl_3 ... ok
+. 0 805CC92 5
+. 83 C4 10 EB 44
+
+. 0 805CCDB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634D4 5
+. E8 0A 98 FF FF
+
+. 0 805CCE3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B6 B6 FE FF
+
+. 0 805CD3A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805CD41 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 65 D8 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 14 B8 FE FF
+
+. 0 805CD6E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805CD75 13
+. 83 EC 0C 68 11 87 06 08 E8 8E B6 FE FF
+
+fsubl_4 ... ok
+. 0 805CD82 5
+. 83 C4 10 EB 44
+
+. 0 805CDCB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634D9 5
+. E8 F5 98 FF FF
+
+. 0 805CDD3 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 D6 B5 FE FF
+
+. 0 805CE1A 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805CE25 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC E2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 AD B6 FE FF
+
+. 0 805CE54 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805CE5B 13
+. 83 EC 0C 68 49 87 06 08 E8 A8 B5 FE FF
+
+fsub_1 ... ok
+. 0 805CE68 5
+. 83 C4 10 EB 48
+
+. 0 805CEB5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634DE 5
+. E8 DA 99 FF FF
+
+. 0 805CEBD 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 EC B4 FE FF
+
+. 0 805CF04 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805CF0F 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC E2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 C3 B5 FE FF
+
+. 0 805CF3E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805CF45 13
+. 83 EC 0C 68 7E 87 06 08 E8 BE B4 FE FF
+
+fsub_2 ... ok
+. 0 805CF52 5
+. 83 C4 10 EB 48
+
+. 0 805CF9F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634E3 5
+. E8 BF 9A FF FF
+
+. 0 805CFA7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 02 B4 FE FF
+
+. 0 805CFEE 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805CFF9 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC E2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 D9 B4 FE FF
+
+. 0 805D028 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D02F 13
+. 83 EC 0C 68 B3 87 06 08 E8 D4 B3 FE FF
+
+fsub_3 ... ok
+. 0 805D03C 5
+. 83 C4 10 EB 48
+
+. 0 805D089 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634E8 5
+. E8 A4 9B FF FF
+
+. 0 805D091 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 18 B3 FE FF
+
+. 0 805D0D8 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805D0E3 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC E2 D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 EF B3 FE FF
+
+. 0 805D112 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D119 13
+. 83 EC 0C 68 E8 87 06 08 E8 EA B2 FE FF
+
+fsub_4 ... ok
+. 0 805D126 5
+. 83 C4 10 EB 48
+
+. 0 805D173 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634ED 5
+. E8 89 9C FF FF
+
+. 0 805D17B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1E B2 FE FF
+
+. 0 805D1D2 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 805D1DD 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC E2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 70 B3 FE FF
+
+. 0 805D212 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D219 13
+. 83 EC 0C 68 1D 88 06 08 E8 EA B1 FE FF
+
+fsub_5 ... ok
+. 0 805D226 5
+. 83 C4 10 EB 44
+
+. 0 805D26F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634F2 5
+. E8 80 9D FF FF
+
+. 0 805D277 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 22 B1 FE FF
+
+. 0 805D2CE 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 805D2D9 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC E2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 74 B2 FE FF
+
+. 0 805D30E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D315 13
+. 83 EC 0C 68 52 88 06 08 E8 EE B0 FE FF
+
+fsub_6 ... ok
+. 0 805D322 5
+. 83 C4 10 EB 44
+
+. 0 805D36B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634F7 5
+. E8 77 9E FF FF
+
+. 0 805D373 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 26 B0 FE FF
+
+. 0 805D3CA 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 805D3D5 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC E2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 78 B1 FE FF
+
+. 0 805D40A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D411 13
+. 83 EC 0C 68 87 88 06 08 E8 F2 AF FE FF
+
+fsub_7 ... ok
+. 0 805D41E 5
+. 83 C4 10 EB 44
+
+. 0 805D467 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80634FC 5
+. E8 6E 9F FF FF
+
+. 0 805D46F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 2A AF FE FF
+
+. 0 805D4C6 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 805D4D1 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC E2 D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 7C B0 FE FF
+
+. 0 805D506 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D50D 13
+. 83 EC 0C 68 BC 88 06 08 E8 F6 AE FE FF
+
+fsub_8 ... ok
+. 0 805D51A 5
+. 83 C4 10 EB 44
+
+. 0 805D563 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063501 5
+. E8 65 A0 FF FF
+
+. 0 805D56B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 3E AE FE FF
+
+. 0 805D5B2 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805D5B9 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 E2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 1D AF FE FF
+
+. 0 805D5E4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D5EB 13
+. 83 EC 0C 68 F1 88 06 08 E8 18 AE FE FF
+
+fsub_9 ... ok
+. 0 805D5F8 5
+. 83 C4 10 EB 48
+
+. 0 805D645 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063506 5
+. E8 42 A1 FF FF
+
+. 0 805D64D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 5C AD FE FF
+
+. 0 805D694 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805D69B 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 E2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 3B AE FE FF
+
+. 0 805D6C6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D6CD 13
+. 83 EC 0C 68 26 89 06 08 E8 36 AD FE FF
+
+fsub_10 ... ok
+. 0 805D6DA 5
+. 83 C4 10 EB 48
+
+. 0 805D727 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806350B 5
+. E8 1F A2 FF FF
+
+. 0 805D72F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 7A AC FE FF
+
+. 0 805D776 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805D77D 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 E2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 59 AD FE FF
+
+. 0 805D7A8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D7AF 13
+. 83 EC 0C 68 5E 89 06 08 E8 54 AC FE FF
+
+fsub_11 ... ok
+. 0 805D7BC 5
+. 83 C4 10 EB 48
+
+. 0 805D809 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063510 5
+. E8 FC A2 FF FF
+
+. 0 805D811 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 98 AB FE FF
+
+. 0 805D858 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805D85F 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 E2 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 77 AC FE FF
+
+. 0 805D88A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D891 13
+. 83 EC 0C 68 96 89 06 08 E8 72 AB FE FF
+
+fsub_12 ... ok
+. 0 805D89E 5
+. 83 C4 10 EB 48
+
+. 0 805D8EB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063515 5
+. E8 D9 A3 FF FF
+
+. 0 805D8F3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 A6 AA FE FF
+
+. 0 805D94A 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805D951 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 E2 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 00 AC FE FF
+
+. 0 805D982 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805D989 13
+. 83 EC 0C 68 CE 89 06 08 E8 7A AA FE FF
+
+fsub_13 ... ok
+. 0 805D996 5
+. 83 C4 10 EB 44
+
+. 0 805D9DF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806351A 5
+. E8 C8 A4 FF FF
+
+. 0 805D9E7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 A9 FE FF
+
+. 0 805DA3E 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805DA45 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 E2 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 0C AB FE FF
+
+. 0 805DA76 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805DA7D 13
+. 83 EC 0C 68 06 8A 06 08 E8 86 A9 FE FF
+
+fsub_14 ... ok
+. 0 805DA8A 5
+. 83 C4 10 EB 44
+
+. 0 805DAD3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806351F 5
+. E8 B7 A5 FF FF
+
+. 0 805DADB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 BE A8 FE FF
+
+. 0 805DB32 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805DB39 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 E2 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 18 AA FE FF
+
+. 0 805DB6A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805DB71 13
+. 83 EC 0C 68 3E 8A 06 08 E8 92 A8 FE FF
+
+fsub_15 ... ok
+. 0 805DB7E 5
+. 83 C4 10 EB 44
+
+. 0 805DBC7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063524 5
+. E8 A6 A6 FF FF
+
+. 0 805DBCF 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 CA A7 FE FF
+
+. 0 805DC26 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805DC2D 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 E2 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 24 A9 FE FF
+
+. 0 805DC5E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805DC65 13
+. 83 EC 0C 68 76 8A 06 08 E8 9E A7 FE FF
+
+fsub_16 ... ok
+. 0 805DC72 5
+. 83 C4 10 EB 44
+
+. 0 805DCBB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063529 5
+. E8 95 A7 FF FF
+
+. 0 805DCC3 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 E6 A6 FE FF
+
+. 0 805DD0A 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805DD11 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE E2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 C3 A7 FE FF
+
+. 0 805DD3E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805DD45 13
+. 83 EC 0C 68 AE 8A 06 08 E8 BE A6 FE FF
+
+fsubp_1 ... ok
+. 0 805DD52 5
+. 83 C4 10 EB 48
+
+. 0 805DD9F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806352E 5
+. E8 74 A8 FF FF
+
+. 0 805DDA7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 02 A6 FE FF
+
+. 0 805DDEE 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805DDF5 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE E2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 DF A6 FE FF
+
+. 0 805DE22 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805DE29 13
+. 83 EC 0C 68 E6 8A 06 08 E8 DA A5 FE FF
+
+fsubp_2 ... ok
+. 0 805DE36 5
+. 83 C4 10 EB 48
+
+. 0 805DE83 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063533 5
+. E8 53 A9 FF FF
+
+. 0 805DE8B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 1E A5 FE FF
+
+. 0 805DED2 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805DED9 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE E2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 FB A5 FE FF
+
+. 0 805DF06 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805DF0D 13
+. 83 EC 0C 68 1E 8B 06 08 E8 F6 A4 FE FF
+
+fsubp_3 ... ok
+. 0 805DF1A 5
+. 83 C4 10 EB 48
+
+. 0 805DF67 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063538 5
+. E8 32 AA FF FF
+
+. 0 805DF6F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 3A A4 FE FF
+
+. 0 805DFB6 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 805DFBD 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE E2 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 17 A5 FE FF
+
+. 0 805DFEA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805DFF1 13
+. 83 EC 0C 68 56 8B 06 08 E8 12 A4 FE FF
+
+fsubp_4 ... ok
+. 0 805DFFE 5
+. 83 C4 10 EB 48
+
+. 0 805E04B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806353D 5
+. E8 11 AB FF FF
+
+. 0 805E053 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 46 A3 FE FF
+
+. 0 805E0AA 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805E0B5 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE E2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 9A A4 FE FF
+
+. 0 805E0E8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E0EF 13
+. 83 EC 0C 68 8E 8B 06 08 E8 14 A3 FE FF
+
+fsubp_5 ... ok
+. 0 805E0FC 5
+. 83 C4 10 EB 44
+
+. 0 805E145 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063542 5
+. E8 06 AC FF FF
+
+. 0 805E14D 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 4C A2 FE FF
+
+. 0 805E1A4 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805E1AF 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE E2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 A0 A3 FE FF
+
+. 0 805E1E2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E1E9 13
+. 83 EC 0C 68 C6 8B 06 08 E8 1A A2 FE FF
+
+fsubp_6 ... ok
+. 0 805E1F6 5
+. 83 C4 10 EB 44
+
+. 0 805E23F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063547 5
+. E8 FB AC FF FF
+
+. 0 805E247 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 52 A1 FE FF
+
+. 0 805E29E 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805E2A9 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE E2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 A6 A2 FE FF
+
+. 0 805E2DC 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E2E3 13
+. 83 EC 0C 68 FE 8B 06 08 E8 20 A1 FE FF
+
+fsubp_7 ... ok
+. 0 805E2F0 5
+. 83 C4 10 EB 44
+
+. 0 805E339 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806354C 5
+. E8 F0 AD FF FF
+
+. 0 805E341 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 58 A0 FE FF
+
+. 0 805E398 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 805E3A3 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE E2 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 AC A1 FE FF
+
+. 0 805E3D6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E3DD 13
+. 83 EC 0C 68 36 8C 06 08 E8 26 A0 FE FF
+
+fsubp_8 ... ok
+. 0 805E3EA 5
+. 83 C4 10 EB 44
+
+. 0 805E433 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063551 5
+. E8 E5 AE FF FF
+
+. 0 805E43B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 6E 9F FE FF
+
+. 0 805E482 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805E489 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 4F A0 FE FF
+
+. 0 805E4B2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E4B9 13
+. 83 EC 0C 68 6E 8C 06 08 E8 4A 9F FE FF
+
+fsubp_9 ... ok
+. 0 805E4C6 5
+. 83 C4 10 EB 48
+
+. 0 805E513 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063556 5
+. E8 C0 AF FF FF
+
+. 0 805E51B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 8E 9E FE FF
+
+. 0 805E562 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805E569 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 6F 9F FE FF
+
+. 0 805E592 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E599 13
+. 83 EC 0C 68 A6 8C 06 08 E8 6A 9E FE FF
+
+fsubp_10 ... ok
+. 0 805E5A6 5
+. 83 C4 10 EB 48
+
+. 0 805E5F3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806355B 5
+. E8 9B B0 FF FF
+
+. 0 805E5FB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 AE 9D FE FF
+
+. 0 805E642 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805E649 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 8F 9E FE FF
+
+. 0 805E672 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E679 13
+. 83 EC 0C 68 E1 8C 06 08 E8 8A 9D FE FF
+
+fsubp_11 ... ok
+. 0 805E686 5
+. 83 C4 10 EB 48
+
+. 0 805E6D3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063560 5
+. E8 76 B1 FF FF
+
+. 0 805E6DB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 CE 9C FE FF
+
+. 0 805E722 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805E729 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E1 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 AF 9D FE FF
+
+. 0 805E752 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E759 13
+. 83 EC 0C 68 1C 8D 06 08 E8 AA 9C FE FF
+
+fsubp_12 ... ok
+. 0 805E766 5
+. 83 C4 10 EB 48
+
+. 0 805E7B3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063565 5
+. E8 51 B2 FF FF
+
+. 0 805E7BB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 DE 9B FE FF
+
+. 0 805E812 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805E819 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E1 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 3A 9D FE FF
+
+. 0 805E848 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E84F 13
+. 83 EC 0C 68 57 8D 06 08 E8 B4 9B FE FF
+
+fsubp_13 ... ok
+. 0 805E85C 5
+. 83 C4 10 EB 44
+
+. 0 805E8A5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806356A 5
+. E8 3E B3 FF FF
+
+. 0 805E8AD 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 EC 9A FE FF
+
+. 0 805E904 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805E90B 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E1 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 48 9C FE FF
+
+. 0 805E93A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805E941 13
+. 83 EC 0C 68 92 8D 06 08 E8 C2 9A FE FF
+
+fsubp_14 ... ok
+. 0 805E94E 5
+. 83 C4 10 EB 44
+
+. 0 805E997 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806356F 5
+. E8 2B B4 FF FF
+
+. 0 805E99F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 FA 99 FE FF
+
+. 0 805E9F6 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805E9FD 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E1 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 56 9B FE FF
+
+. 0 805EA2C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805EA33 13
+. 83 EC 0C 68 CD 8D 06 08 E8 D0 99 FE FF
+
+fsubp_15 ... ok
+. 0 805EA40 5
+. 83 C4 10 EB 44
+
+. 0 805EA89 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063574 5
+. E8 18 B5 FF FF
+
+. 0 805EA91 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 08 99 FE FF
+
+. 0 805EAE8 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 805EAEF 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E1 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 64 9A FE FF
+
+. 0 805EB1E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805EB25 13
+. 83 EC 0C 68 08 8E 06 08 E8 DE 98 FE FF
+
+fsubp_16 ... ok
+. 0 805EB32 5
+. 83 C4 10 EB 44
+
+. 0 805EB7B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063579 5
+. E8 05 B6 FF FF
+
+. 0 805EB83 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 30 98 FE FF
+
+. 0 805EBC0 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805EBC7 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 65 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA E6 40 C5 50 FF 75 D0 E8 13 99 FE FF
+
+. 0 805EBEE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805EBF5 13
+. 83 EC 0C 68 43 8E 06 08 E8 0E 98 FE FF
+
+fisubs_1 ... ok
+. 0 805EC02 5
+. 83 C4 10 EB 48
+
+. 0 805EC4F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806357E 5
+. E8 D4 B6 FF FF
+
+. 0 805EC57 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 5C 97 FE FF
+
+. 0 805EC94 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805EC9B 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 65 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 8B 9C AD C5 50 FF 75 D0 E8 3F 98 FE FF
+
+. 0 805ECC2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805ECC9 13
+. 83 EC 0C 68 7E 8E 06 08 E8 3A 97 FE FF
+
+fisubs_2 ... ok
+. 0 805ECD6 5
+. 83 C4 10 EB 48
+
+. 0 805ED23 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063583 5
+. E8 A3 B7 FF FF
+
+. 0 805ED2B 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 88 96 FE FF
+
+. 0 805ED68 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805ED6F 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 65 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 8B 9C AD 45 50 FF 75 D0 E8 6B 97 FE FF
+
+. 0 805ED96 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805ED9D 13
+. 83 EC 0C 68 B9 8E 06 08 E8 66 96 FE FF
+
+fisubs_3 ... ok
+. 0 805EDAA 5
+. 83 C4 10 EB 48
+
+. 0 805EDF7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063588 5
+. E8 72 B8 FF FF
+
+. 0 805EDFF 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 B4 95 FE FF
+
+. 0 805EE3C 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805EE43 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 65 DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA E6 40 45 50 FF 75 D0 E8 97 96 FE FF
+
+. 0 805EE6A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805EE71 13
+. 83 EC 0C 68 F4 8E 06 08 E8 92 95 FE FF
+
+fisubs_4 ... ok
+. 0 805EE7E 5
+. 83 C4 10 EB 48
+
+. 0 805EECB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806358D 5
+. E8 41 B9 FF FF
+
+. 0 805EED3 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 D8 94 FE FF
+
+. 0 805EF18 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805EF1F 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 65 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA A6 C5 32 41 52 50 FF 75 D4 FF 75 D0 E8 36 96 FE FF
+
+. 0 805EF4C 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805EF53 13
+. 83 EC 0C 68 2F 8F 06 08 E8 B0 94 FE FF
+
+fisubs_5 ... ok
+. 0 805EF60 5
+. 83 C4 10 EB 44
+
+. 0 805EFA9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063592 5
+. E8 1A BA FF FF
+
+. 0 805EFB1 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 FA 93 FE FF
+
+. 0 805EFF6 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805EFFD 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 65 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 68 E7 32 C1 52 50 FF 75 D4 FF 75 D0 E8 58 95 FE FF
+
+. 0 805F02A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F031 13
+. 83 EC 0C 68 6A 8F 06 08 E8 D2 93 FE FF
+
+fisubs_6 ... ok
+. 0 805F03E 5
+. 83 C4 10 EB 44
+
+. 0 805F087 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063597 5
+. E8 F3 BA FF FF
+
+. 0 805F08F 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 1C 93 FE FF
+
+. 0 805F0D4 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805F0DB 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 65 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 68 E7 32 41 52 50 FF 75 D4 FF 75 D0 E8 7A 94 FE FF
+
+. 0 805F108 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F10F 13
+. 83 EC 0C 68 A5 8F 06 08 E8 F4 92 FE FF
+
+fisubs_7 ... ok
+. 0 805F11C 5
+. 83 C4 10 EB 44
+
+. 0 805F165 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806359C 5
+. E8 CC BB FF FF
+
+. 0 805F16D 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 3E 92 FE FF
+
+. 0 805F1B2 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805F1B9 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 65 DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA A6 C5 32 C1 52 50 FF 75 D4 FF 75 D0 E8 9C 93 FE FF
+
+. 0 805F1E6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F1ED 13
+. 83 EC 0C 68 E0 8F 06 08 E8 16 92 FE FF
+
+fisubs_8 ... ok
+. 0 805F1FA 5
+. 83 C4 10 EB 44
+
+. 0 805F243 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635A1 5
+. E8 A5 BC FF FF
+
+. 0 805F24B 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 66 91 FE FF
+
+. 0 805F28A 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805F291 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 5C 2F A7 CC 50 FF 75 D0 E8 49 92 FE FF
+
+. 0 805F2B8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F2BF 13
+. 83 EC 0C 68 1B 90 06 08 E8 44 91 FE FF
+
+fisubl_1 ... ok
+. 0 805F2CC 5
+. 83 C4 10 EB 48
+
+. 0 805F319 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635A6 5
+. E8 76 BD FF FF
+
+. 0 805F321 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 90 90 FE FF
+
+. 0 805F360 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805F367 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 30 A7 CC 50 FF 75 D0 E8 73 91 FE FF
+
+. 0 805F38E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F395 13
+. 83 EC 0C 68 56 90 06 08 E8 6E 90 FE FF
+
+fisubl_2 ... ok
+. 0 805F3A2 5
+. 83 C4 10 EB 48
+
+. 0 805F3EF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635AB 5
+. E8 47 BE FF FF
+
+. 0 805F3F7 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 BA 8F FE FF
+
+. 0 805F436 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805F43D 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 30 A7 4C 50 FF 75 D0 E8 9D 90 FE FF
+
+. 0 805F464 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F46B 13
+. 83 EC 0C 68 91 90 06 08 E8 98 8F FE FF
+
+fisubl_3 ... ok
+. 0 805F478 5
+. 83 C4 10 EB 48
+
+. 0 805F4C5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635B0 5
+. E8 18 BF FF FF
+
+. 0 805F4CD 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 E4 8E FE FF
+
+. 0 805F50C 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805F513 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 65 DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 5C 2F A7 4C 50 FF 75 D0 E8 C7 8F FE FF
+
+. 0 805F53A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F541 13
+. 83 EC 0C 68 CC 90 06 08 E8 C2 8E FE FF
+
+fisubl_4 ... ok
+. 0 805F54E 5
+. 83 C4 10 EB 48
+
+. 0 805F59B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635B5 5
+. E8 E9 BF FF FF
+
+. 0 805F5A3 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 06 8E FE FF
+
+. 0 805F5EA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805F5F1 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 65 DC DD 5D D0 DD A5 58 FF FF FF B8 91 32 F0 A4 BA A4 9A 94 C1 52 50 FF 75 D4 FF 75 D0 E8 64 8F FE FF
+
+. 0 805F61E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F625 13
+. 83 EC 0C 68 07 91 06 08 E8 DE 8D FE FF
+
+fisubl_5 ... ok
+. 0 805F632 5
+. 83 C4 10 EB 44
+
+. 0 805F67B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635BA 5
+. E8 C4 C0 FF FF
+
+. 0 805F683 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 26 8D FE FF
+
+. 0 805F6CA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805F6D1 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 65 DC DD 5D D0 DD A5 58 FF FF FF B8 6F CD 0F E3 BA 58 31 95 C1 52 50 FF 75 D4 FF 75 D0 E8 84 8E FE FF
+
+. 0 805F6FE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F705 13
+. 83 EC 0C 68 42 91 06 08 E8 FE 8C FE FF
+
+fisubl_6 ... ok
+. 0 805F712 5
+. 83 C4 10 EB 44
+
+. 0 805F75B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635BF 5
+. E8 9F C1 FF FF
+
+. 0 805F763 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 46 8C FE FF
+
+. 0 805F7AA 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805F7B1 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 65 DC DD 5D D0 DD A5 58 FF FF FF B8 6F CD 0F E3 BA 58 31 95 41 52 50 FF 75 D4 FF 75 D0 E8 A4 8D FE FF
+
+. 0 805F7DE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F7E5 13
+. 83 EC 0C 68 7D 91 06 08 E8 1E 8C FE FF
+
+fisubl_7 ... ok
+. 0 805F7F2 5
+. 83 C4 10 EB 44
+
+. 0 805F83B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635C4 5
+. E8 7A C2 FF FF
+
+. 0 805F843 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 66 8B FE FF
+
+. 0 805F88A 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805F891 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 65 DC DD 5D D0 DD A5 58 FF FF FF B8 91 32 F0 A4 BA A4 9A 94 41 52 50 FF 75 D4 FF 75 D0 E8 C4 8C FE FF
+
+. 0 805F8BE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F8C5 13
+. 83 EC 0C 68 B8 91 06 08 E8 3E 8B FE FF
+
+fisubl_8 ... ok
+. 0 805F8D2 5
+. 83 C4 10 EB 44
+
+. 0 805F91B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635C9 5
+. E8 55 C3 FF FF
+
+. 0 805F923 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 8D 8A FE FF
+
+. 0 805F963 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805F96A 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 70 8B FE FF
+
+. 0 805F991 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805F998 13
+. 83 EC 0C 68 F3 91 06 08 E8 6B 8A FE FF
+
+fsubrs_1 ... ok
+. 0 805F9A5 5
+. 83 C4 10 EB 48
+
+. 0 805F9F2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635CE 5
+. E8 27 C4 FF FF
+
+. 0 805F9FA 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B6 89 FE FF
+
+. 0 805FA3A 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805FA41 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 FF 3F 1C 46 50 FF 75 D0 E8 99 8A FE FF
+
+. 0 805FA68 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805FA6F 13
+. 83 EC 0C 68 2E 92 06 08 E8 94 89 FE FF
+
+fsubrs_2 ... ok
+. 0 805FA7C 5
+. 83 C4 10 EB 48
+
+. 0 805FAC9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635D3 5
+. E8 F9 C4 FF FF
+
+. 0 805FAD1 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 DF 88 FE FF
+
+. 0 805FB11 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805FB18 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 C2 89 FE FF
+
+. 0 805FB3F 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805FB46 13
+. 83 EC 0C 68 69 92 06 08 E8 BD 88 FE FF
+
+fsubrs_3 ... ok
+. 0 805FB53 5
+. 83 C4 10 EB 48
+
+. 0 805FBA0 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635D8 5
+. E8 CB C5 FF FF
+
+. 0 805FBA8 64
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 08 88 FE FF
+
+. 0 805FBE8 7
+. 83 C4 10 85 C0 75 78
+
+. 0 805FBEF 39
+. 9B DD B5 58 FF FF FF D9 45 E0 D8 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 EB 88 FE FF
+
+. 0 805FC16 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805FC1D 13
+. 83 EC 0C 68 A4 92 06 08 E8 E6 87 FE FF
+
+fsubrs_4 ... ok
+. 0 805FC2A 5
+. 83 C4 10 EB 48
+
+. 0 805FC77 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635DD 5
+. E8 9D C6 FF FF
+
+. 0 805FC7F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 1A 87 FE FF
+
+. 0 805FCD6 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805FCDD 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 6D D8 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 78 88 FE FF
+
+. 0 805FD0A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805FD11 13
+. 83 EC 0C 68 DF 92 06 08 E8 F2 86 FE FF
+
+fsubrl_1 ... ok
+. 0 805FD1E 5
+. 83 C4 10 EB 44
+
+. 0 805FD67 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635E2 5
+. E8 88 C7 FF FF
+
+. 0 805FD6F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 2A 86 FE FF
+
+. 0 805FDC6 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805FDCD 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 6D D8 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 88 87 FE FF
+
+. 0 805FDFA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805FE01 13
+. 83 EC 0C 68 1A 93 06 08 E8 02 86 FE FF
+
+fsubrl_2 ... ok
+. 0 805FE0E 5
+. 83 C4 10 EB 44
+
+. 0 805FE57 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635E7 5
+. E8 73 C8 FF FF
+
+. 0 805FE5F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 3A 85 FE FF
+
+. 0 805FEB6 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805FEBD 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 6D D8 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 98 86 FE FF
+
+. 0 805FEEA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805FEF1 13
+. 83 EC 0C 68 55 93 06 08 E8 12 85 FE FF
+
+fsubrl_3 ... ok
+. 0 805FEFE 5
+. 83 C4 10 EB 44
+
+. 0 805FF47 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635EC 5
+. E8 5E C9 FF FF
+
+. 0 805FF4F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 4A 84 FE FF
+
+. 0 805FFA6 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805FFAD 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DC 6D D8 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 A8 85 FE FF
+
+. 0 805FFDA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 805FFE1 13
+. 83 EC 0C 68 90 93 06 08 E8 22 84 FE FF
+
+fsubrl_4 ... ok
+. 0 805FFEE 5
+. 83 C4 10 EB 44
+
+. 0 8060037 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635F1 5
+. E8 49 CA FF FF
+
+. 0 806003F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 6A 83 FE FF
+
+. 0 8060086 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8060091 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC EA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 41 84 FE FF
+
+. 0 80600C0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80600C7 13
+. 83 EC 0C 68 CB 93 06 08 E8 3C 83 FE FF
+
+fsubr_1 ... ok
+. 0 80600D4 5
+. 83 C4 10 EB 48
+
+. 0 8060121 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635F6 5
+. E8 2E CB FF FF
+
+. 0 8060129 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 80 82 FE FF
+
+. 0 8060170 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 806017B 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC EA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 57 83 FE FF
+
+. 0 80601AA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80601B1 13
+. 83 EC 0C 68 03 94 06 08 E8 52 82 FE FF
+
+fsubr_2 ... ok
+. 0 80601BE 5
+. 83 C4 10 EB 48
+
+. 0 806020B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80635FB 5
+. E8 13 CC FF FF
+
+. 0 8060213 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 96 81 FE FF
+
+. 0 806025A 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8060265 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC EA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 6D 82 FE FF
+
+. 0 8060294 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806029B 13
+. 83 EC 0C 68 3B 94 06 08 E8 68 81 FE FF
+
+fsubr_3 ... ok
+. 0 80602A8 5
+. 83 C4 10 EB 48
+
+. 0 80602F5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063600 5
+. E8 F8 CC FF FF
+
+. 0 80602FD 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 AC 80 FE FF
+
+. 0 8060344 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 806034F 47
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DC EA D9 F7 D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 83 81 FE FF
+
+. 0 806037E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060385 13
+. 83 EC 0C 68 73 94 06 08 E8 7E 80 FE FF
+
+fsubr_4 ... ok
+. 0 8060392 5
+. 83 C4 10 EB 48
+
+. 0 80603DF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063605 5
+. E8 DD CD FF FF
+
+. 0 80603E7 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 7F FE FF
+
+. 0 806043E 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8060449 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC EA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 04 81 FE FF
+
+. 0 806047E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060485 13
+. 83 EC 0C 68 AB 94 06 08 E8 7E 7F FE FF
+
+fsubr_5 ... ok
+. 0 8060492 5
+. 83 C4 10 EB 44
+
+. 0 80604DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806360A 5
+. E8 D4 CE FF FF
+
+. 0 80604E3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 B6 7E FE FF
+
+. 0 806053A 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8060545 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC EA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 08 80 FE FF
+
+. 0 806057A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060581 13
+. 83 EC 0C 68 E3 94 06 08 E8 82 7E FE FF
+
+fsubr_6 ... ok
+. 0 806058E 5
+. 83 C4 10 EB 44
+
+. 0 80605D7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806360F 5
+. E8 CB CF FF FF
+
+. 0 80605DF 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 BA 7D FE FF
+
+. 0 8060636 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8060641 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC EA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 0C 7F FE FF
+
+. 0 8060676 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806067D 13
+. 83 EC 0C 68 1B 95 06 08 E8 86 7D FE FF
+
+fsubr_7 ... ok
+. 0 806068A 5
+. 83 C4 10 EB 44
+
+. 0 80606D3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063614 5
+. E8 C2 D0 FF FF
+
+. 0 80606DB 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 BE 7C FE FF
+
+. 0 8060732 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 806073D 53
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DC EA D9 F7 D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 10 7E FE FF
+
+. 0 8060772 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060779 13
+. 83 EC 0C 68 53 95 06 08 E8 8A 7C FE FF
+
+fsubr_8 ... ok
+. 0 8060786 5
+. 83 C4 10 EB 44
+
+. 0 80607CF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063619 5
+. E8 B9 D1 FF FF
+
+. 0 80607D7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 D2 7B FE FF
+
+. 0 806081E 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8060825 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 EA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 B1 7C FE FF
+
+. 0 8060850 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060857 13
+. 83 EC 0C 68 8B 95 06 08 E8 AC 7B FE FF
+
+fsubr_9 ... ok
+. 0 8060864 5
+. 83 C4 10 EB 48
+
+. 0 80608B1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806361E 5
+. E8 96 D2 FF FF
+
+. 0 80608B9 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 F0 7A FE FF
+
+. 0 8060900 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8060907 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 EA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 CF 7B FE FF
+
+. 0 8060932 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060939 13
+. 83 EC 0C 68 C3 95 06 08 E8 CA 7A FE FF
+
+fsubr_10 ... ok
+. 0 8060946 5
+. 83 C4 10 EB 48
+
+. 0 8060993 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063623 5
+. E8 73 D3 FF FF
+
+. 0 806099B 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 0E 7A FE FF
+
+. 0 80609E2 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 80609E9 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 EA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 ED 7A FE FF
+
+. 0 8060A14 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060A1B 13
+. 83 EC 0C 68 FE 95 06 08 E8 E8 79 FE FF
+
+fsubr_11 ... ok
+. 0 8060A28 5
+. 83 C4 10 EB 48
+
+. 0 8060A75 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063628 5
+. E8 50 D4 FF FF
+
+. 0 8060A7D 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 2C 79 FE FF
+
+. 0 8060AC4 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8060ACB 43
+. 9B DD B5 58 FF FF FF D9 45 E0 D9 EE D9 45 D8 D8 EA D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 0B 7A FE FF
+
+. 0 8060AF6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060AFD 13
+. 83 EC 0C 68 39 96 06 08 E8 06 79 FE FF
+
+fsubr_12 ... ok
+. 0 8060B0A 5
+. 83 C4 10 EB 48
+
+. 0 8060B57 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806362D 5
+. E8 2D D5 FF FF
+
+. 0 8060B5F 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 3A 78 FE FF
+
+. 0 8060BB6 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8060BBD 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 EA DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 94 79 FE FF
+
+. 0 8060BEE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060BF5 13
+. 83 EC 0C 68 74 96 06 08 E8 0E 78 FE FF
+
+fsubr_13 ... ok
+. 0 8060C02 5
+. 83 C4 10 EB 44
+
+. 0 8060C4B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063632 5
+. E8 1C D6 FF FF
+
+. 0 8060C53 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 46 77 FE FF
+
+. 0 8060CAA 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8060CB1 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 EA DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 A0 78 FE FF
+
+. 0 8060CE2 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060CE9 13
+. 83 EC 0C 68 AF 96 06 08 E8 1A 77 FE FF
+
+fsubr_14 ... ok
+. 0 8060CF6 5
+. 83 C4 10 EB 44
+
+. 0 8060D3F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063637 5
+. E8 0B D7 FF FF
+
+. 0 8060D47 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 52 76 FE FF
+
+. 0 8060D9E 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8060DA5 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 EA DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 AC 77 FE FF
+
+. 0 8060DD6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060DDD 13
+. 83 EC 0C 68 EA 96 06 08 E8 26 76 FE FF
+
+fsubr_15 ... ok
+. 0 8060DEA 5
+. 83 C4 10 EB 44
+
+. 0 8060E33 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806363C 5
+. E8 FA D7 FF FF
+
+. 0 8060E3B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 5E 75 FE FF
+
+. 0 8060E92 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8060E99 49
+. 9B DD B5 58 FF FF FF DD 45 E0 D9 EE DD 45 D8 D8 EA DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 B8 76 FE FF
+
+. 0 8060ECA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060ED1 13
+. 83 EC 0C 68 25 97 06 08 E8 32 75 FE FF
+
+fsubr_16 ... ok
+. 0 8060EDE 5
+. 83 C4 10 EB 44
+
+. 0 8060F27 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063641 5
+. E8 E9 D8 FF FF
+
+. 0 8060F2F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 7A 74 FE FF
+
+. 0 8060F76 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8060F7D 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE EA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 57 75 FE FF
+
+. 0 8060FAA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8060FB1 13
+. 83 EC 0C 68 60 97 06 08 E8 52 74 FE FF
+
+fsubrp_1 ... ok
+. 0 8060FBE 5
+. 83 C4 10 EB 48
+
+. 0 806100B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063646 5
+. E8 C8 D9 FF FF
+
+. 0 8061013 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 96 73 FE FF
+
+. 0 806105A 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8061061 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE EA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 73 74 FE FF
+
+. 0 806108E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061095 13
+. 83 EC 0C 68 9B 97 06 08 E8 6E 73 FE FF
+
+fsubrp_2 ... ok
+. 0 80610A2 5
+. 83 C4 10 EB 48
+
+. 0 80610EF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806364B 5
+. E8 A7 DA FF FF
+
+. 0 80610F7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 B2 72 FE FF
+
+. 0 806113E 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8061145 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE EA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 8F 73 FE FF
+
+. 0 8061172 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061179 13
+. 83 EC 0C 68 D6 97 06 08 E8 8A 72 FE FF
+
+fsubrp_3 ... ok
+. 0 8061186 5
+. 83 C4 10 EB 48
+
+. 0 80611D3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063650 5
+. E8 86 DB FF FF
+
+. 0 80611DB 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 CE 71 FE FF
+
+. 0 8061222 7
+. 83 C4 10 85 C0 75 7E
+
+. 0 8061229 45
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 DE EA D9 F7 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 AB 72 FE FF
+
+. 0 8061256 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806125D 13
+. 83 EC 0C 68 11 98 06 08 E8 A6 71 FE FF
+
+fsubrp_4 ... ok
+. 0 806126A 5
+. 83 C4 10 EB 48
+
+. 0 80612B7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063655 5
+. E8 65 DC FF FF
+
+. 0 80612BF 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 DA 70 FE FF
+
+. 0 8061316 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8061321 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE EA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 2E 72 FE FF
+
+. 0 8061354 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806135B 13
+. 83 EC 0C 68 4C 98 06 08 E8 A8 70 FE FF
+
+fsubrp_5 ... ok
+. 0 8061368 5
+. 83 C4 10 EB 44
+
+. 0 80613B1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806365A 5
+. E8 5A DD FF FF
+
+. 0 80613B9 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 E0 6F FE FF
+
+. 0 8061410 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 806141B 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE EA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 34 71 FE FF
+
+. 0 806144E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061455 13
+. 83 EC 0C 68 87 98 06 08 E8 AE 6F FE FF
+
+fsubrp_6 ... ok
+. 0 8061462 5
+. 83 C4 10 EB 44
+
+. 0 80614AB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806365F 5
+. E8 4F DE FF FF
+
+. 0 80614B3 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 E6 6E FE FF
+
+. 0 806150A 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 8061515 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE EA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 3A 70 FE FF
+
+. 0 8061548 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806154F 13
+. 83 EC 0C 68 C2 98 06 08 E8 B4 6E FE FF
+
+fsubrp_7 ... ok
+. 0 806155C 5
+. 83 C4 10 EB 44
+
+. 0 80615A5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063664 5
+. E8 44 DF FF FF
+
+. 0 80615AD 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 EC 6D FE FF
+
+. 0 8061604 11
+. 83 C4 10 85 C0 0F 85 80 00 00 00
+
+. 0 806160F 51
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 DE EA D9 F7 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 40 6F FE FF
+
+. 0 8061642 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061649 13
+. 83 EC 0C 68 FD 98 06 08 E8 BA 6D FE FF
+
+fsubrp_8 ... ok
+. 0 8061656 5
+. 83 C4 10 EB 44
+
+. 0 806169F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063669 5
+. E8 39 E0 FF FF
+
+. 0 80616A7 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 02 6D FE FF
+
+. 0 80616EE 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 80616F5 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB 45 50 FF 75 D0 E8 E3 6D FE FF
+
+. 0 806171E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061725 13
+. 83 EC 0C 68 38 99 06 08 E8 DE 6C FE FF
+
+fsubrp_9 ... ok
+. 0 8061732 5
+. 83 C4 10 EB 48
+
+. 0 806177F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806366E 5
+. E8 14 E1 FF FF
+
+. 0 8061787 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 22 6C FE FF
+
+. 0 80617CE 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 80617D5 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C 46 50 FF 75 D0 E8 03 6D FE FF
+
+. 0 80617FE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061805 13
+. 83 EC 0C 68 73 99 06 08 E8 FE 6B FE FF
+
+fsubrp_10 ... ok
+. 0 8061812 5
+. 83 C4 10 EB 48
+
+. 0 806185F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063673 5
+. E8 EF E1 FF FF
+
+. 0 8061867 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 42 6B FE FF
+
+. 0 80618AE 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 80618B5 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 00 40 1C C6 50 FF 75 D0 E8 23 6C FE FF
+
+. 0 80618DE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80618E5 13
+. 83 EC 0C 68 B1 99 06 08 E8 1E 6B FE FF
+
+fsubrp_11 ... ok
+. 0 80618F2 5
+. 83 C4 10 EB 48
+
+. 0 806193F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063678 5
+. E8 CA E2 FF FF
+
+. 0 8061947 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 C6 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 62 6A FE FF
+
+. 0 806198E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8061995 41
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 45 E0 DE E9 D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA 56 EB C5 50 FF 75 D0 E8 43 6B FE FF
+
+. 0 80619BE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80619C5 13
+. 83 EC 0C 68 EF 99 06 08 E8 3E 6A FE FF
+
+fsubrp_12 ... ok
+. 0 80619D2 5
+. 83 C4 10 EB 48
+
+. 0 8061A1F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806367D 5
+. E8 A5 E3 FF FF
+
+. 0 8061A27 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 72 69 FE FF
+
+. 0 8061A7E 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8061A85 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E9 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 41 52 50 FF 75 D4 FF 75 D0 E8 CE 6A FE FF
+
+. 0 8061AB4 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061ABB 13
+. 83 EC 0C 68 2D 9A 06 08 E8 48 69 FE FF
+
+fsubrp_13 ... ok
+. 0 8061AC8 5
+. 83 C4 10 EB 44
+
+. 0 8061B11 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063682 5
+. E8 92 E4 FF FF
+
+. 0 8061B19 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 80 68 FE FF
+
+. 0 8061B70 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8061B77 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E9 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 41 52 50 FF 75 D4 FF 75 D0 E8 DC 69 FE FF
+
+. 0 8061BA6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061BAD 13
+. 83 EC 0C 68 6B 9A 06 08 E8 56 68 FE FF
+
+fsubrp_14 ... ok
+. 0 8061BBA 5
+. 83 C4 10 EB 44
+
+. 0 8061C03 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063687 5
+. E8 7F E5 FF FF
+
+. 0 8061C0B 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 8E 67 FE FF
+
+. 0 8061C62 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8061C69 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E9 DD 5D D0 DD A5 58 FF FF FF B8 ED C6 71 1C BA 47 F4 60 C1 52 50 FF 75 D4 FF 75 D0 E8 EA 68 FE FF
+
+. 0 8061C98 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061C9F 13
+. 83 EC 0C 68 A9 9A 06 08 E8 64 67 FE FF
+
+fsubrp_15 ... ok
+. 0 8061CAC 5
+. 83 C4 10 EB 44
+
+. 0 8061CF5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806368C 5
+. E8 6C E6 FF FF
+
+. 0 8061CFD 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D C1 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 9C 66 FE FF
+
+. 0 8061D54 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 8061D5B 47
+. 9B DD B5 58 FF FF FF DD 45 D8 DD 45 E0 DE E9 DD 5D D0 DD A5 58 FF FF FF B8 03 E0 E9 56 BA 4A 7D 58 C1 52 50 FF 75 D4 FF 75 D0 E8 F8 67 FE FF
+
+. 0 8061D8A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061D91 13
+. 83 EC 0C 68 E7 9A 06 08 E8 72 66 FE FF
+
+fsubrp_16 ... ok
+. 0 8061D9E 5
+. 83 C4 10 EB 44
+
+. 0 8061DE7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063691 5
+. E8 59 E7 FF FF
+
+. 0 8061DEF 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 C4 65 FE FF
+
+. 0 8061E2C 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8061E33 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 6D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA E6 40 45 50 FF 75 D0 E8 A7 66 FE FF
+
+. 0 8061E5A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061E61 13
+. 83 EC 0C 68 25 9B 06 08 E8 A2 65 FE FF
+
+fisubrs_1 ... ok
+. 0 8061E6E 5
+. 83 C4 10 EB 48
+
+. 0 8061EBB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8063696 5
+. E8 28 E8 FF FF
+
+. 0 8061EC3 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 F0 64 FE FF
+
+. 0 8061F00 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8061F07 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 6D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 8B 9C AD 45 50 FF 75 D0 E8 D3 65 FE FF
+
+. 0 8061F2E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8061F35 13
+. 83 EC 0C 68 63 9B 06 08 E8 CE 64 FE FF
+
+fisubrs_2 ... ok
+. 0 8061F42 5
+. 83 C4 10 EB 48
+
+. 0 8061F8F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 806369B 5
+. E8 F7 E8 FF FF
+
+. 0 8061F97 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 1C 64 FE FF
+
+. 0 8061FD4 7
+. 83 C4 10 85 C0 75 78
+
+. 0 8061FDB 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 6D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 8B 9C AD C5 50 FF 75 D0 E8 FF 64 FE FF
+
+. 0 8062002 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062009 13
+. 83 EC 0C 68 A1 9B 06 08 E8 FA 63 FE FF
+
+fisubrs_3 ... ok
+. 0 8062016 5
+. 83 C4 10 EB 48
+
+. 0 8062063 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636A0 5
+. E8 C6 E9 FF FF
+
+. 0 806206B 61
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 48 63 FE FF
+
+. 0 80620A8 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80620AF 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DE 6D DE D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 EA E6 40 C5 50 FF 75 D0 E8 2B 64 FE FF
+
+. 0 80620D6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80620DD 13
+. 83 EC 0C 68 DF 9B 06 08 E8 26 63 FE FF
+
+fisubrs_4 ... ok
+. 0 80620EA 5
+. 83 C4 10 EB 48
+
+. 0 8062137 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636A5 5
+. E8 95 EA FF FF
+
+. 0 806213F 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 6C 62 FE FF
+
+. 0 8062184 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 806218B 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 6D DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA A6 C5 32 C1 52 50 FF 75 D4 FF 75 D0 E8 CA 63 FE FF
+
+. 0 80621B8 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80621BF 13
+. 83 EC 0C 68 1D 9C 06 08 E8 44 62 FE FF
+
+fisubrs_5 ... ok
+. 0 80621CC 5
+. 83 C4 10 EB 44
+
+. 0 8062215 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636AA 5
+. E8 6E EB FF FF
+
+. 0 806221D 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE E1 10 83 EC 08 6A 01 68 40 A1 06 08 E8 8E 61 FE FF
+
+. 0 8062262 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8062269 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 6D DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 68 E7 32 41 52 50 FF 75 D4 FF 75 D0 E8 EC 62 FE FF
+
+. 0 8062296 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806229D 13
+. 83 EC 0C 68 5B 9C 06 08 E8 66 61 FE FF
+
+fisubrs_6 ... ok
+. 0 80622AA 5
+. 83 C4 10 EB 44
+
+. 0 80622F3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636AF 5
+. E8 47 EC FF FF
+
+. 0 80622FB 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 B0 60 FE FF
+
+. 0 8062340 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8062347 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 6D DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA 68 E7 32 C1 52 50 FF 75 D4 FF 75 D0 E8 0E 62 FE FF
+
+. 0 8062374 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806237B 13
+. 83 EC 0C 68 99 9C 06 08 E8 88 60 FE FF
+
+fisubrs_7 ... ok
+. 0 8062388 5
+. 83 C4 10 EB 44
+
+. 0 80623D1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636B4 5
+. E8 20 ED FF FF
+
+. 0 80623D9 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 66 C7 45 DE 00 00 66 C7 45 DE 1F EF 83 EC 08 6A 01 68 40 A1 06 08 E8 D2 5F FE FF
+
+. 0 806241E 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8062425 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DE 6D DE DD 5D D0 DD A5 58 FF FF FF B8 AD 5B F3 C3 BA A6 C5 32 41 52 50 FF 75 D4 FF 75 D0 E8 30 61 FE FF
+
+. 0 8062452 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062459 13
+. 83 EC 0C 68 D7 9C 06 08 E8 AA 5F FE FF
+
+fisubrs_8 ... ok
+. 0 8062466 5
+. 83 C4 10 EB 44
+
+. 0 80624AF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636B9 5
+. E8 F9 ED FF FF
+
+. 0 80624B7 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 FA 5E FE FF
+
+. 0 80624F6 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80624FD 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 30 A7 4C 50 FF 75 D0 E8 DD 5F FE FF
+
+. 0 8062524 7
+. 83 C4 10 85 C0 74 12
+
+. 0 806252B 13
+. 83 EC 0C 68 15 9D 06 08 E8 D8 5E FE FF
+
+fisubrl_1 ... ok
+. 0 8062538 5
+. 83 C4 10 EB 48
+
+. 0 8062585 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636BE 5
+. E8 CA EE FF FF
+
+. 0 806258D 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 24 5E FE FF
+
+. 0 80625CC 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80625D3 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 5C 2F A7 4C 50 FF 75 D0 E8 07 5F FE FF
+
+. 0 80625FA 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062601 13
+. 83 EC 0C 68 53 9D 06 08 E8 02 5E FE FF
+
+fisubrl_2 ... ok
+. 0 806260E 5
+. 83 C4 10 EB 48
+
+. 0 806265B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636C3 5
+. E8 9B EF FF FF
+
+. 0 8062663 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 4E 5D FE FF
+
+. 0 80626A2 7
+. 83 C4 10 85 C0 75 78
+
+. 0 80626A9 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 5C 2F A7 CC 50 FF 75 D0 E8 31 5E FE FF
+
+. 0 80626D0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80626D7 13
+. 83 EC 0C 68 91 9D 06 08 E8 2C 5D FE FF
+
+fisubrl_3 ... ok
+. 0 80626E4 5
+. 83 C4 10 EB 48
+
+. 0 8062731 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636C8 5
+. E8 6C F0 FF FF
+
+. 0 8062739 63
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A C4 89 45 E0 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 78 5C FE FF
+
+. 0 8062778 7
+. 83 C4 10 85 C0 75 78
+
+. 0 806277F 39
+. 9B DD B5 58 FF FF FF D9 45 E0 DA 6D DC D9 5D D0 DD A5 58 FF FF FF 83 EC 08 B8 90 30 A7 CC 50 FF 75 D0 E8 5B 5D FE FF
+
+. 0 80627A6 7
+. 83 C4 10 85 C0 74 12
+
+. 0 80627AD 13
+. 83 EC 0C 68 CF 9D 06 08 E8 56 5C FE FF
+
+fisubrl_4 ... ok
+. 0 80627BA 5
+. 83 C4 10 EB 48
+
+. 0 8062807 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636CD 5
+. E8 3D F1 FF FF
+
+. 0 806280F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 9A 5B FE FF
+
+. 0 8062856 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 806285D 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 6D DC DD 5D D0 DD A5 58 FF FF FF B8 91 32 F0 A4 BA A4 9A 94 41 52 50 FF 75 D4 FF 75 D0 E8 F8 5C FE FF
+
+. 0 806288A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062891 13
+. 83 EC 0C 68 0D 9E 06 08 E8 72 5B FE FF
+
+fisubrl_5 ... ok
+. 0 806289E 5
+. 83 C4 10 EB 44
+
+. 0 80628E7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636D2 5
+. E8 18 F2 FF FF
+
+. 0 80628EF 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 40 A1 06 08 E8 BA 5A FE FF
+
+. 0 8062936 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 806293D 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 6D DC DD 5D D0 DD A5 58 FF FF FF B8 6F CD 0F E3 BA 58 31 95 41 52 50 FF 75 D4 FF 75 D0 E8 18 5C FE FF
+
+. 0 806296A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062971 13
+. 83 EC 0C 68 4B 9E 06 08 E8 92 5A FE FF
+
+fisubrl_6 ... ok
+. 0 806297E 5
+. 83 C4 10 EB 44
+
+. 0 80629C7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636D7 5
+. E8 F3 F2 FF FF
+
+. 0 80629CF 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 DA 59 FE FF
+
+. 0 8062A16 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8062A1D 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 6D DC DD 5D D0 DD A5 58 FF FF FF B8 6F CD 0F E3 BA 58 31 95 C1 52 50 FF 75 D4 FF 75 D0 E8 38 5B FE FF
+
+. 0 8062A4A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062A51 13
+. 83 EC 0C 68 89 9E 06 08 E8 B2 59 FE FF
+
+fisubrl_7 ... ok
+. 0 8062A5E 5
+. 83 C4 10 EB 44
+
+. 0 8062AA7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636DC 5
+. E8 CE F3 FF FF
+
+. 0 8062AAF 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 C1 89 45 E0 89 55 E4 C7 45 DC 00 00 00 00 C7 45 DC 4F 80 C6 FA 83 EC 08 6A 01 68 40 A1 06 08 E8 FA 58 FE FF
+
+. 0 8062AF6 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 8062AFD 45
+. 9B DD B5 58 FF FF FF DD 45 E0 DA 6D DC DD 5D D0 DD A5 58 FF FF FF B8 91 32 F0 A4 BA A4 9A 94 C1 52 50 FF 75 D4 FF 75 D0 E8 58 5A FE FF
+
+. 0 8062B2A 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062B31 13
+. 83 EC 0C 68 C7 9E 06 08 E8 D2 58 FE FF
+
+fisubrl_8 ... ok
+. 0 8062B3E 5
+. 83 C4 10 EB 44
+
+. 0 8062B87 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636E1 5
+. E8 A9 F4 FF FF
+
+. 0 8062B8F 71
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 2B 52 9A 44 89 45 E0 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 BA F5 08 46 89 45 D8 83 EC 08 6A 01 68 40 A1 06 08 E8 1A 58 FE FF
+
+. 0 8062BD6 11
+. 83 C4 10 85 C0 0F 85 BF 00 00 00
+
+. 0 8062BE1 48
+. 9B DD B5 58 FF FF FF D9 45 D8 D9 EE D9 45 E0 D9 CA D9 5D D0 D9 F7 D9 5D C8 DD A5 58 FF FF FF 83 EC 08 B8 BA F5 08 46 50 FF 75 D0 E8 F0 58 FE FF
+
+. 0 8062C11 7
+. 83 C4 10 85 C0 74 2A
+
+. 0 8062C18 17
+. 83 EC 08 B8 2B 52 9A 44 50 FF 75 C8 E8 D8 58 FE FF
+
+. 0 8062C29 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062C30 13
+. 83 EC 0C 68 05 9F 06 08 E8 D3 57 FE FF
+
+fxch_1 ... ok
+. 0 8062C3D 5
+. 83 C4 10 EB 6E
+
+. 0 8062CB0 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636E6 5
+. E8 CD F5 FF FF
+
+. 0 8062CB8 87
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 AD 5B F3 C3 BA 87 D6 32 41 89 45 E0 89 55 E4 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 B8 EE B6 E6 47 BA EC 32 5D 41 89 45 D8 89 55 DC 83 EC 08 6A 01 68 40 A1 06 08 E8 E1 56 FE FF
+
+. 0 8062D0F 11
+. 83 C4 10 85 C0 0F 85 C3 00 00 00
+
+. 0 8062D1A 54
+. 9B DD B5 58 FF FF FF DD 45 D8 D9 EE DD 45 E0 D9 CA DD 5D D0 D9 F7 DD 5D C8 DD A5 58 FF FF FF B8 EE B6 E6 47 BA EC 32 5D 41 52 50 FF 75 D4 FF 75 D0 E8 32 58 FE FF
+
+. 0 8062D50 7
+. 83 C4 10 85 C0 74 30
+
+. 0 8062D57 23
+. B8 AD 5B F3 C3 BA 87 D6 32 41 52 50 FF 75 CC FF 75 C8 E8 14 58 FE FF
+
+. 0 8062D6E 7
+. 83 C4 10 85 C0 74 12
+
+. 0 8062D75 13
+. 83 EC 0C 68 3A 9F 06 08 E8 8E 56 FE FF
+
+fxch_2 ... ok
+. 0 8062D82 5
+. 83 C4 10 EB 66
+
+. 0 8062DED 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80636EB 10
+. 83 EC 0C 6A 00 E8 2B 4D FE FF
+
+. 0 8048420 6
+. FF 25 18 A1 06 08
+
+. 0 8048426 10
+. 68 30 00 00 00 E9 80 FF FF FF
+
+. 0 3A9EBC50 14
+. 55 89 E5 57 56 53 83 EC 0C E8 33 AB FE FF
+
+. 0 3A9EBC5E 16
+. 81 C3 B2 73 0E 00 8B 93 BC 9E FF FF 85 D2 74 6F
+
+. 0 3A9EBC6E 9
+. 89 F6 8B 42 04 85 C0 74 46
+
+. 0 3A9EBC77 30
+. 89 F6 8D BC 27 00 00 00 00 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21
+
+. 0 3A9EBC95 11
+. 8B 84 83 88 D7 FE FF 01 D8 FF E0
+
+. 0 3A9EBCA0 16
+. 8B 45 08 89 44 24 04 8B 41 08 89 04 24 FF 51 04
+
+. 0 8063700 14
+. 55 89 E5 83 EC 18 89 5D F4 E8 BA 00 00 00
+
+. 0 806370E 36
+. 81 C3 E6 69 00 00 89 7D FC 8D 83 0C FF FF FF 8D BB 0C FF FF FF 89 75 F8 29 F8 C1 F8 02 85 C0 8D 70 FF 75 12
+
+. 0 8063732 5
+. E8 BD 00 00 00
+
+. 0 80637F4 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 F7 68 00 00 50 E8 76 4C FE FF
+
+. 0 8048480 14
+. 55 89 E5 50 50 80 3D 20 A1 06 08 00 75 2E
+
+. 0 804848E 11
+. A1 08 A0 06 08 8B 10 85 D2 74 1C
+
+. 0 80484B5 9
+. C6 05 20 A1 06 08 01 C9 C3
+
+. 0 806380A 4
+. 59 5B C9 C3
+
+. 0 8063737 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9EBCB0 13
+. 8B 93 BC 9E FF FF 8B 42 04 85 C0 75 C3
+
+. 0 3A9EBC80 21
+. 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21
+
+. 0 3A970800 14
+. 55 89 E5 57 56 53 83 EC 4C E8 1D 51 00 00
+
+. 0 3A97080E 24
+. 81 C3 5E 7E 00 00 8D 83 AC F9 FF FF 89 45 CC 89 04 24 FF 93 F8 FB FF FF
+
+. 0 3A9658E0 11
+. 55 89 E5 8B 45 08 FF 40 04 5D C3
+
+. 0 3A970826 44
+. C7 45 F0 00 00 00 00 8B 93 98 F9 FF FF 8B BB 94 F9 FF FF 8D 04 95 0F 00 00 00 83 E0 F0 29 C4 8D 4C 24 0C 85 FF 89 F8 89 4D EC 74 20
+
+. 0 3A970852 26
+. FF 87 70 01 00 00 8B 75 F0 8B 55 EC 89 3C B2 8B 7F 0C 46 89 75 F0 85 FF 75 E6
+
+. 0 3A97086C 17
+. 8B 93 98 F9 FF FF 8B 78 0C 85 FF 0F 84 11 01 00 00
+
+. 0 3A97087D 15
+. C7 45 E8 01 00 00 00 8B 4D EC 39 79 04 74 12
+
+. 0 3A97089E 15
+. 8B 4D E8 41 39 D1 89 4D BC 0F 83 D6 00 00 00
+
+. 0 3A9708AD 22
+. 8B 45 E8 8B 75 EC C1 E0 02 89 45 C0 01 C6 F7 5D C0 89 75 C4 EB 0D
+
+. 0 3A9708D0 25
+. 8B 55 EC 8B 45 BC 8B 04 82 89 45 D0 8B 90 D4 01 00 00 89 45 E4 85 D2 74 77
+
+. 0 3A9708E9 6
+. 8B 02 85 C0 74 71
+
+. 0 3A9708EF 28
+. 8B 75 EC 8B 4D E8 8D 0C 8E 89 4D C8 8B 75 BC 8B 4D C0 8D 34 B1 89 75 B8 39 F8 74 12
+
+. 0 3A97090B 14
+. 90 8D 74 26 00 83 C2 04 8B 02 85 C0 74 47
+
+. 0 3A970919 4
+. 39 F8 75 F3
+
+. 0 3A970910 9
+. 83 C2 04 8B 02 85 C0 74 47
+
+. 0 3A970960 17
+. 8B 55 D0 8B 8A E0 01 00 00 85 C9 0F 85 06 01 00 00
+
+. 0 3A970971 18
+. FF 45 BC 8B 93 98 F9 FF FF 39 55 BC 0F 82 4D FF FF FF
+
+. 0 3A970983 11
+. 8B 7F 0C 85 FF 0F 85 EF FE FF FF
+
+. 0 3A97088C 18
+. 8D 74 26 00 FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2
+
+. 0 3A970890 14
+. FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2
+
+. 0 3A97098E 12
+. C7 45 F0 00 00 00 00 39 55 F0 72 2C
+
+. 0 3A9709C6 21
+. 8B 55 F0 8B 4D EC 8B 3C 91 0F B6 97 74 01 00 00 F6 C2 08 74 48
+
+. 0 3A9709DB 2
+. EB C2
+
+. 0 3A97099F 20
+. 88 D0 24 F7 88 87 74 01 00 00 8B 47 04 0F B6 08 84 C9 75 2A
+
+. 0 3A9709B3 5
+. F6 C2 03 75 25
+
+. 0 3A9709B8 14
+. FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 73 71
+
+. 0 3A9709DD 10
+. 8B 97 80 00 00 00 85 D2 75 07
+
+. 0 3A9709E7 7
+. 8B 77 4C 85 F6 74 CA
+
+. 0 3A9709EE 13
+. F6 83 14 FC FF FF 02 0F 85 E1 00 00 00
+
+. 0 3A9709FB 4
+. 85 D2 74 1D
+
+. 0 3A970A1C 7
+. 8B 47 4C 85 C0 75 49
+
+. 0 3A970A6C 9
+. 8B 40 04 8B 17 01 D0 FF D0
+
+. 0 3A97CCD4 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 03 11 00 00 50 E8 06 FD FF FF
+
+. 0 3A97C9F0 26
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 E7 13 00 00 51 80 BB 30 00 00 00 00 75 34
+
+. 0 3A97CA0A 10
+. 8B 93 24 00 00 00 85 D2 75 2F
+
+. 0 3A97CA43 15
+. 83 EC 0C 8B 83 10 FF FF FF 50 E8 32 FF FF FF
+
+. 0 3A97C984 6
+. FF A3 18 00 00 00
+
+. 0 3A97C98A 10
+. 68 18 00 00 00 E9 B0 FF FF FF
+
+. 0 3A97C944 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A9EBEA0 14
+. 55 89 E5 57 56 53 83 EC 0C E8 E3 A8 FE FF
+
+. 0 3A9EBEAE 16
+. 81 C3 62 71 0E 00 8B BB BC 9E FF FF 85 FF 74 68
+
+. 0 3A9EBEBE 22
+. 89 F6 8B 47 04 C1 E0 04 8D 74 38 F8 8D 47 08 39 C6 89 45 F0 73 12
+
+. 0 3A9EBED4 2
+. EB 4A
+
+. 0 3A9EBF20 6
+. 8B 3F 85 FF 75 9A
+
+. 0 3A9EBF26 7
+. 8B 45 08 85 C0 74 0B
+
+. 0 3A9EBF2D 11
+. 8B 45 08 89 04 24 E8 98 B9 09 00
+
+. 0 3AA878D0 17
+. 55 89 E5 57 56 53 83 EC 10 8B 7D 08 E8 B0 EE F4 FF
+
+. 0 3AA878E1 20
+. 81 C3 2F B7 04 00 8B 93 3C 2B 00 00 85 D2 0F 85 DD 00 00 00
+
+. 0 3AA878F5 22
+. 8B 8B 8C 9B FF FF 8D 83 88 9B FF FF 89 45 F0 39 C1 8B 71 04 75 0F
+
+. 0 3AA8790B 2
+. EB 33
+
+. 0 3AA87940 22
+. 8B 8B 94 9B FF FF 8D 83 90 9B FF FF 89 45 EC 39 C1 8B 71 04 75 0C
+
+. 0 3AA87956 2
+. EB 28
+
+. 0 3AA87980 22
+. 8B 8B 9C 9B FF FF 8D 83 98 9B FF FF 89 45 E8 39 C1 8B 71 04 75 0C
+
+. 0 3AA87996 2
+. EB 28
+
+. 0 3AA879C0 10
+. 8B 93 44 2B 00 00 85 D2 75 18
+
+. 0 3AA879CA 8
+. 83 C4 10 5B 5E 5F 5D C3
+
+. 0 3A9EBF38 8
+. 83 C4 0C 5B 5E 5F 5D C3
+
+. 0 3A97CA52 5
+. 83 C4 10 EB BD
+
+. 0 3A97CA14 12
+. 8B 83 14 FF FF FF 8B 10 85 D2 74 17
+
+. 0 3A97CA37 12
+. C6 83 30 00 00 00 01 8B 5D FC C9 C3
+
+. 0 3A97CCEA 4
+. 59 5B C9 C3
+
+. 0 3A970A75 2
+. EB AC
+
+. 0 3A970A23 20
+. FF 8F 70 01 00 00 FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 72 8F
+
+. 0 3A9BA054 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 E7 57 00 00 50 E8 36 75 FE FF
+
+. 0 3A9A15A0 26
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 9B E2 01 00 51 80 BB 5C 00 00 00 00 75 34
+
+. 0 3A9A15BA 10
+. 8B 93 44 00 00 00 85 D2 75 2F
+
+. 0 3A9A15F3 15
+. 83 EC 0C 8B 83 DC FE FF FF 50 E8 22 FF FF FF
+
+. 0 3A9A1524 6
+. FF A3 28 00 00 00
+
+. 0 3A9A152A 10
+. 68 38 00 00 00 E9 70 FF FF FF
+
+. 0 3A9A14A4 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A9A1602 5
+. 83 C4 10 EB BD
+
+. 0 3A9A15C4 12
+. 8B 83 E0 FE FF FF 8B 10 85 D2 74 17
+
+. 0 3A9A15E7 12
+. C6 83 5C 00 00 00 01 8B 5D FC C9 C3
+
+. 0 3A9BA06A 4
+. 59 5B C9 C3
+
+. 0 3A9709FF 29
+. 8B 52 04 8B 37 8B 87 88 00 00 00 01 F2 89 55 D4 8B 70 04 C1 EE 02 89 F0 4E 85 C0 75 3C
+
+. 0 3A970A58 6
+. 8B 45 D4 FF 14 B0
+
+. 0 3A9D6760 10
+. 55 89 E5 56 53 E8 27 00 00 00
+
+. 0 3A9D676A 18
+. 81 C3 A6 C8 0F 00 8D B3 A0 FF FF FF 8B 06 85 C0 75 04
+
+. 0 3A9D677C 4
+. 5B 5E 5D C3
+
+. 0 3A970A5E 7
+. 89 F0 4E 85 C0 75 F3
+
+. 0 3A970A65 7
+. 8B 47 4C 85 C0 74 B7
+
+. 0 3A970A37 12
+. 8B 4D CC 89 0C 24 FF 93 FC FB FF FF
+
+. 0 3A9658F0 11
+. 55 89 E5 8B 45 08 FF 48 04 5D C3
+
+. 0 3A970A43 13
+. 80 BB 14 FC FF FF 00 0F 88 B5 00 00 00
+
+. 0 3A970A50 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A9EBCBD 14
+. 89 D0 8B 12 85 D2 89 93 BC 9E FF FF 74 12
+
+. 0 3A9EBCDD 16
+. 8D B3 F4 FF FF FF 8D BB F8 FF FF FF 39 FE 72 0B
+
+. 0 3A9EBCF8 2
+. FF 16
+
+. 0 3AA20870 14
+. 55 89 E5 57 56 53 83 EC 0C E8 13 5F FB FF
+
+. 0 3AA2087E 18
+. 81 C3 92 27 0B 00 C7 04 24 00 00 00 00 E8 D0 FD FF FF
+
+. 0 3AA20660 16
+. 55 31 C0 89 E5 57 56 53 83 EC 24 E8 21 61 FB FF
+
+. 0 3AA20670 34
+. 81 C3 A0 29 0B 00 83 BB B8 01 00 00 00 C7 45 E0 00 00 00 00 0F 95 C0 85 C0 89 45 DC 0F 85 7B 01 00 00
+
+. 0 3AA20692 7
+. 8B 75 08 85 F6 74 0E
+
+. 0 3AA206A7 16
+. 8B B3 70 95 FF FF 8B BB B0 09 00 00 85 F6 75 19
+
+. 0 3AA206D0 13
+. 89 B3 B4 09 00 00 8B 4D 08 85 C9 74 11
+
+. 0 3AA206EE 11
+. 8B 56 5C 85 D2 0F 8E C7 00 00 00
+
+. 0 3AA207C0 12
+. 8B 46 10 39 46 14 0F 86 2D FF FF FF
+
+. 0 3AA206F9 8
+. 0F B6 4E 46 84 C9 75 2F
+
+. 0 3AA20701 4
+. 85 D2 7E 2B
+
+. 0 3AA20730 7
+. 8B 45 08 85 C0 74 11
+
+. 0 3AA20748 22
+. 31 C0 89 83 B4 09 00 00 8B 83 B0 09 00 00 39 C7 0F 85 62 FF FF FF
+
+. 0 3AA2075E 11
+. 8B 76 34 85 F6 0F 85 67 FF FF FF
+
+. 0 3AA20769 14
+. 8D B4 26 00 00 00 00 8B 45 08 85 C0 74 0E
+
+. 0 3AA20785 11
+. 8B 7D DC 85 FF 0F 85 9D 00 00 00
+
+. 0 3AA20790 11
+. 8B 45 E0 83 C4 24 5B 5E 5F 5D C3
+
+. 0 3AA20890 12
+. 8B B3 70 95 FF FF 89 C7 85 F6 74 32
+
+. 0 3AA2089C 10
+. 8D 74 26 00 8B 06 A8 02 75 1A
+
+. 0 3AA208C0 14
+. C7 46 5C FF FF FF FF 8B 76 34 85 F6 75 D2
+
+. 0 3AA208A0 6
+. 8B 06 A8 02 75 1A
+
+. 0 3AA208A6 10
+. 25 08 10 00 00 83 F8 08 74 10
+
+. 0 3AA208B0 7
+. 8B 46 5C 85 C0 75 21
+
+. 0 3AA208D8 29
+. 0F BE 46 46 31 C9 31 D2 8B 84 30 94 00 00 00 89 4C 24 08 89 54 24 04 89 34 24 FF 50 2C
+
+. 0 3AA1EFD0 19
+. 55 89 E5 56 53 83 EC 0C 8B 45 10 8B 75 08 E8 AE 77 FB FF
+
+. 0 3AA1EFE3 25
+. 81 C3 2D 40 0B 00 89 44 24 08 8B 45 0C 89 34 24 89 44 24 04 E8 74 1D 00 00
+
+. 0 3AA20D70 30
+. 55 89 E5 83 EC 1C 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 7D 0C 0F BE 46 46 E8 03 5A FB FF
+
+. 0 3AA20D8E 19
+. 81 C3 82 22 0B 00 8B 84 30 94 00 00 00 89 34 24 FF 50 30
+
+. 0 3AA1EAC0 26
+. 55 89 E5 83 EC 1C 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 56 14 E8 B7 7C FB FF
+
+. 0 3AA1EADA 15
+. 81 C3 36 45 0B 00 8B 46 10 31 FF 39 C2 76 47
+
+. 0 3AA1EB30 10
+. 8B 46 04 89 C2 2B 56 08 75 1D
+
+. 0 3AA1EB3A 29
+. C7 46 4C FF FF FF FF C7 46 50 FF FF FF FF 89 F8 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA20DA1 5
+. 31 D2 40 74 5E
+
+. 0 3AA20DA6 19
+. 8B 55 10 85 FF 0F 94 C0 85 D2 0F 94 C2 09 D0 A8 01 74 5A
+
+. 0 3AA20DB9 31
+. 83 0E 02 8D 46 48 31 FF 89 44 24 08 8D 46 47 89 7C 24 0C 89 44 24 04 89 34 24 E8 C8 FD FF FF
+
+. 0 3AA20BC1 7
+. 8B 16 F6 C2 01 74 38
+
+. 0 3AA20C00 27
+. 8B 46 20 89 0C 24 29 C8 05 FF 0F 00 00 25 00 F0 FF FF 89 44 24 04 E8 75 70 05 00
+
+. 0 3AA77C90 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80
+
+. 0 3AA77CA1 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3AA77CAA 1
+. C3
+
+. 0 3AA20C1B 9
+. 90 8D 74 26 00 8B 16 EB A4
+
+. 0 3AA20BF0 15
+. 83 CA 01 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA20DD8 59
+. C7 46 18 00 00 00 00 89 F2 C7 46 14 00 00 00 00 C7 46 10 00 00 00 00 C7 46 08 00 00 00 00 C7 46 04 00 00 00 00 C7 46 0C 00 00 00 00 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1EFFC 6
+. 31 D2 85 C0 74 17
+
+. 0 3AA1F002 32
+. 8B 46 1C 89 F2 89 46 18 89 46 14 89 46 10 89 46 0C 89 46 04 89 46 08 83 C4 0C 89 D0 5B 5E 5D C3
+
+. 0 3AA208F5 2
+. EB C9
+
+. 0 3AA208CE 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 0 3A9EBCFA 7
+. 83 C6 04 39 FE 72 F7
+
+. 0 3A9EBD01 2
+. EB EA
+
+. 0 3A9EBCED 11
+. 8B 45 08 89 04 24 E8 94 D8 05 00
+
+. 0 3AA4958C 11
+. 8B 5C 24 04 B8 FC 00 00 00 CD 80
+
+==2983== 
+sewardj@phoenix:~/VgSVN/valgrind/trunk$ ./vsvn --tool=none --vex-guest-chase-thresh=0 ./none/testss/x86/insn_fpummx
+==2988== Nulgrind, a binary JIT-compiler.
+==2988== Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.
+==2988== Using valgrind-2.3.0.CVS, a dynamic binary instrumentation framework.
+==2988== Copyright (C) 2000-2004, and GNU GPL'd, by Julian Seward et al.
+==2988== For more details, rerun with: -v
+==2988== 
+. 0 3A965880 7
+. 89 E0 E8 F9 03 00 00
+
+. 0 3A965C80 14
+. 55 89 E5 57 56 53 83 EC 54 E8 9D FC 00 00
+
+. 0 3A97592B 4
+. 8B 1C 24 C3
+
+. 0 3A965C8E 67
+. 81 C3 DE 29 01 00 89 45 D0 8B 93 00 00 00 00 8D 83 50 FF FF FF 29 D0 89 83 E4 F9 FF FF 01 D0 89 83 EC F9 FF FF 8D 83 94 F9 FF FF 89 45 C8 83 C0 50 8B 7D C8 89 45 CC 8B 70 08 83 C7 68 8B 16 85 D2 75 10
+
+. 0 3A965CE1 5
+. 83 FA 21 7E ED
+
+. 0 3A965CD3 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 2F
+
+. 0 3A965CE6 16
+. B8 FF FF FF 6F 29 D0 83 F8 0F 0F 87 EA 01 00 00
+
+. 0 3A965CF6 21
+. B8 21 00 00 70 29 D0 89 34 87 83 C6 08 8B 06 85 C0 89 C2 75 D6
+
+. 0 3A965D0B 14
+. 90 8D 74 26 00 8B 4D CC 8B 11 85 D2 74 57
+
+. 0 3A965D19 7
+. 8B 47 10 85 C0 74 03
+
+. 0 3A965D20 10
+. 01 50 04 8B 47 0C 85 C0 74 03
+
+. 0 3A965D2A 10
+. 01 50 04 8B 47 14 85 C0 74 03
+
+. 0 3A965D34 10
+. 01 50 04 8B 47 18 85 C0 74 03
+
+. 0 3A965D3E 10
+. 01 50 04 8B 47 44 85 C0 74 03
+
+. 0 3A965D48 10
+. 01 50 04 8B 47 5C 85 C0 74 03
+
+. 0 3A965D52 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 11
+
+. 0 3A965D5F 27
+. 01 50 04 8D B4 26 00 00 00 00 8D BC 27 00 00 00 00 8B 83 E4 F9 FF FF 85 C0 75 0E
+
+. 0 3A965D88 44
+. C7 45 DC 00 00 00 00 8B 4D C8 8B 83 40 FA FF FF C7 45 E4 00 00 00 00 83 C1 50 85 C0 C7 45 D8 00 00 00 00 C7 45 D4 00 00 00 00 74 12
+
+. 0 3A965DB4 28
+. 8B 40 04 89 45 D4 8B 83 44 FA FF FF 8B 40 04 89 45 D8 8B BB 4C FA FF FF 85 FF 74 0C
+
+. 0 3A965DD0 51
+. 8B 83 04 FA FF FF 8B 40 04 01 45 D8 8B 55 D4 8B 75 D8 8D 04 16 89 45 C4 8B 45 C8 8B 78 50 8B 41 30 8B 40 04 89 45 C0 8B 81 B4 00 00 00 31 C9 85 C0 74 03
+
+. 0 3A965E03 12
+. 8B 48 04 89 F0 C1 E8 03 39 C8 76 02
+
+. 0 3A965E0F 9
+. 89 C8 8D 0C C2 39 CA 73 17
+
+. 0 3A965E18 23
+. 90 8D B4 26 00 00 00 00 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1
+
+. 0 3A965E20 15
+. 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1
+
+. 0 3A965E2F 5
+. 3B 4D C4 73 4D
+
+. 0 3A965E34 47
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06
+
+. 0 3A965E63 17
+. 8B 93 E4 F9 FF FF 03 50 04 8D 46 FA 83 F8 01 77 05
+
+. 0 3A965E74 13
+. 8B 45 BC 89 10 83 C1 08 3B 4D C4 72 BF
+
+. 0 3A965E40 35
+. 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06
+
+. 0 3A965E81 16
+. 8B 45 C8 BE 01 00 00 00 83 C0 50 E8 EF 5E 00 00
+
+. 0 3A96BD80 12
+. 55 89 C1 8B 40 28 89 E5 85 C0 74 1D
+
+. 0 3A96BD8C 31
+. 8B 40 04 8B 10 83 C0 08 89 81 68 01 00 00 8D 04 90 89 91 64 01 00 00 89 81 6C 01 00 00 5D C3
+
+. 0 3A965E91 57
+. 89 B3 54 FB FF FF 8D 83 94 C9 FE FF 89 83 78 FB FF FF 8D 83 28 02 00 00 89 83 7C FB FF FF 8D 83 24 DD FE FF 89 44 24 04 8B 55 D0 89 AB 28 01 00 00 89 14 24 E8 F6 CE 00 00
+
+. 0 3A972DC0 44
+. 55 31 D2 89 E5 57 31 FF 31 C9 56 31 F6 53 81 EC 04 02 00 00 8B 45 08 89 95 1C FE FF FF 31 D2 89 95 0C FE FF FF 8B 10 E8 3F 2B 00 00
+
+. 0 3A972DEC 74
+. 81 C3 80 58 00 00 89 83 28 01 00 00 83 C0 04 89 93 0C 02 00 00 8D 14 90 89 B5 14 FE FF FF 8B 72 04 89 BD 18 FE FF FF 31 FF 85 F6 89 83 38 00 00 00 8D 42 04 89 8D 10 FE FF FF 89 C1 89 BD 08 FE FF FF 89 83 00 02 00 00 74 09
+
+. 0 3A972E36 9
+. 83 C1 04 8B 11 85 D2 75 F7
+
+. 0 3A972E3F 37
+. 83 C1 04 8D 83 14 D2 FE FF 89 CA 89 8B 34 01 00 00 8B 09 89 85 20 FE FF FF 31 C0 85 C9 89 83 20 FC FF FF 74 52
+
+. 0 3A972E64 20
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8D 41 FD 83 F8 1E 77 28
+
+. 0 3A972EA0 22
+. 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A972E70 8
+. 8D 41 FD 83 F8 1E 77 28
+
+. 0 3A972E78 11
+. 8B 84 83 54 DB FF FF 01 D8 FF E0
+
+. 0 3A9731AC 14
+. 8B 42 04 89 83 54 FC FF FF E9 E6 FC FF FF
+
+. 0 3A9731F2 14
+. 8B 42 04 89 83 18 FC FF FF E9 97 FC FF FF
+
+. 0 3A972E97 31
+. 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A97319E 14
+. 8B 42 04 89 83 30 FC FF FF E9 EB FC FF FF
+
+. 0 3A97320E 14
+. 8B 42 04 89 85 1C FE FF FF E9 84 FC FF FF
+
+. 0 3A973200 14
+. 8B 42 04 89 85 18 FE FF FF E9 92 FC FF FF
+
+. 0 3A9731E4 14
+. 8B 42 04 89 85 20 FE FF FF E9 A5 FC FF FF
+
+. 0 3A9731D6 14
+. 8B 42 04 31 85 14 FE FF FF E9 BC FC FF FF
+
+. 0 3A9731C8 14
+. 8B 42 04 31 85 10 FE FF FF E9 CA FC FF FF
+
+. 0 3A972E83 51
+. B8 FF FF FF FF 89 85 0C FE FF FF 8B 42 04 89 83 30 01 00 00 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A9731BA 14
+. 8B 42 04 89 83 20 FC FF FF E9 D8 FC FF FF
+
+. 0 3A972EB6 23
+. 8D 55 B4 8D 85 24 FE FF FF 89 95 04 FE FF FF 89 04 24 E8 83 1E 00 00
+
+. 0 3A974D50 13
+. 89 DA 8B 5C 24 04 B8 7A 00 00 00 CD 80
+
+. 0 3A974D5D 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974D66 1
+. C3
+
+. 0 3A972ECD 8
+. 85 C0 0F 85 DC 01 00 00
+
+. 0 3A972ED5 39
+. 8D 85 A6 FE FF FF 89 85 04 FE FF FF 8B B5 04 FE FF FF 31 C0 31 FF 89 85 00 FE FF FF 0F B6 16 88 D0 2C 30 3C 09 77 69
+
+. 0 3A972EFC 28
+. 8D 74 26 00 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28
+
+. 0 3A972F40 17
+. FF 85 00 FE FF FF C1 E7 08 09 D7 46 80 F9 2E 75 0B
+
+. 0 3A972F51 11
+. 0F B6 16 88 D0 2C 30 3C 09 76 A4
+
+. 0 3A972F00 24
+. 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28
+
+. 0 3A972F5C 9
+. 83 BD 00 FE FF FF 02 7F 12
+
+. 0 3A972F77 12
+. 81 FF 04 02 02 00 0F 86 BD 02 00 00
+
+. 0 3A972F83 15
+. 83 BD 0C FE FF FF FF 89 BB 1C FC FF FF 74 58
+
+. 0 3A972FEA 10
+. 8B BB 18 FC FF FF 85 FF 75 0B
+
+. 0 3A972FFF 10
+. 8B B3 94 FD FF FF 85 F6 74 16
+
+. 0 3A97301F 12
+. C7 04 24 00 00 00 00 E8 75 08 00 00
+
+. 0 3A9738A0 12
+. 55 89 E5 53 8B 4D 08 E8 7F 20 00 00
+
+. 0 3A9738AC 15
+. 81 C3 C0 4D 00 00 87 CB B8 2D 00 00 00 CD 80
+
+. 0 3A9738BB 14
+. 87 CB 89 83 44 01 00 00 31 D2 39 C8 72 05
+
+. 0 3A9738C9 5
+. 5B 89 D0 5D C3
+
+. 0 3A97302B 10
+. 8B 83 20 FC FF FF 85 C0 74 21
+
+. 0 3A973035 5
+. 80 38 00 75 0A
+
+. 0 3A973044 4
+. 85 C0 74 0E
+
+. 0 3A973048 8
+. 89 04 24 E8 80 22 00 00
+
+. 0 3A9752D0 13
+. 8B 44 24 04 BA 03 00 00 00 21 C2 74 24
+
+. 0 3A9752DD 2
+. 7A 17
+
+. 0 3A9752DF 8
+. 38 30 0F 84 9F 00 00 00
+
+. 0 3A9752E7 9
+. 40 38 30 0F 84 96 00 00 00
+
+. 0 3A9752F0 6
+. 40 83 F2 02 74 0B
+
+. 0 3A975301 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58
+
+. 0 3A975369 14
+. 83 E8 04 81 E9 FF FE FE FE 80 F9 00 74 0F
+
+. 0 3A975377 5
+. 40 84 ED 74 0A
+
+. 0 3A97537C 9
+. C1 E9 10 40 80 F9 00 74 01
+
+. 0 3A975386 5
+. 2B 44 24 04 C3
+
+. 0 3A973050 18
+. 89 83 24 FC FF FF C7 04 24 00 00 00 00 E8 7E 08 00 00
+
+. 0 3A9738E0 20
+. 55 89 E5 83 EC 10 89 5D F4 89 7D FC 8B 7D 08 E8 37 20 00 00
+
+. 0 3A9738F4 19
+. 81 C3 78 4D 00 00 89 75 F8 8B B3 44 01 00 00 85 F6 74 0A
+
+. 0 3A973907 10
+. 8B 83 2C 01 00 00 85 C0 74 1B
+
+. 0 3A97392C 6
+. 85 FF 89 F2 75 0F
+
+. 0 3A973932 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A973062 16
+. 89 C1 8D 83 28 02 00 00 39 C1 0F 84 B4 01 00 00
+
+. 0 3A973072 14
+. 8B 83 30 01 00 00 85 C0 0F 85 9C 01 00 00
+
+. 0 3A973080 32
+. 8D 85 20 FE FF FF 89 44 24 08 8B 85 18 FE FF FF 89 44 24 04 8B 95 1C FE FF FF 89 14 24 FF 55 0C
+
+. 0 3A966390 17
+. 55 89 E5 57 56 53 81 EC C8 01 00 00 E8 8A F5 00 00
+
+. 0 3A9663A1 119
+. 81 C3 CB 22 01 00 C6 85 0B FF FF FF 00 8D 83 54 D2 FE FF 8B BB 30 01 00 00 89 83 E0 F9 FF FF 8D 83 74 D2 FE FF 89 83 F8 FB FF FF 8D 83 84 D2 FE FF 89 83 FC FB FF FF 8D 83 D4 9F FF FF 89 83 04 FC FF FF 8B 83 00 02 00 00 C6 85 03 FF FF FF 00 C6 85 02 FF FF FF 00 89 85 48 FF FF FF 31 C0 89 85 F8 FE FF FF 31 C0 85 FF 89 85 F4 FE FF FF 8D 83 BA D5 FF FF 74 03
+
+. 0 3A96641B 17
+. 89 83 80 FD FF FF 8D 85 48 FF FF FF E8 B4 CE 00 00
+
+. 0 3A9732E0 16
+. 55 89 C1 89 E5 56 8B 10 31 F6 8B 02 85 C0 74 0E
+
+. 0 3A9732F0 5
+. 80 38 4C 74 0E
+
+. 0 3A973303 6
+. 80 78 01 44 75 EC
+
+. 0 3A9732F5 9
+. 83 C2 04 8B 02 85 C0 75 F2
+
+. 0 3A973309 9
+. 80 78 02 5F 8D 76 00 75 E3
+
+. 0 3A973312 10
+. 83 C2 04 8D 70 03 89 11 EB E2
+
+. 0 3A9732FE 5
+. 89 F0 5E 5D C3
+
+. 0 3A96642C 10
+. 89 85 FC FE FF FF 85 C0 74 72
+
+. 0 3A966436 27
+. 8B 95 FC FE FF FF 31 F6 0F B6 02 84 C0 0F 95 C2 3C 3D 0F 95 C0 21 D0 A8 01 74 34
+
+. 0 3A966451 28
+. 8B 85 FC FE FF FF 46 0F B6 0C 06 84 C9 0F 95 C0 80 F9 3D 0F 95 C2 21 D0 A8 01 75 E4
+
+. 0 3A96646D 5
+. 80 F9 3D 75 AF
+
+. 0 3A966472 8
+. 8D 46 FC 83 F8 10 77 A7
+
+. 0 3A96647A 11
+. 8B 84 83 D0 D5 FF FF 01 D8 FF E0
+
+. 0 3A967760 20
+. FC 8B B5 FC FE FF FF B9 0C 00 00 00 8D BB 2D F0 FF FF F3 A6
+
+. 0 3A967772 2
+. F3 A6
+
+. 0 3A967774 6
+. 0F 85 0B 03 00 00
+
+. 0 3A96777A 20
+. 8B 85 FC FE FF FF 83 C0 0D 89 83 AC 00 00 00 E9 93 EC FF FF
+
+. 0 3A966421 11
+. 8D 85 48 FF FF FF E8 B4 CE 00 00
+
+. 0 3A9678A7 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB 69 ED FF FF F3 A6
+
+. 0 3A9678BB 6
+. 0F 85 70 02 00 00
+
+. 0 3A967B31 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB CF ED FF FF F3 A6
+
+. 0 3A967B43 2
+. F3 A6
+
+. 0 3A967B45 6
+. 0F 85 90 03 00 00
+
+. 0 3A967B4B 20
+. 8B 85 FC FE FF FF 83 C0 08 89 83 B0 00 00 00 E9 C2 E8 FF FF
+
+. 0 3A9664A8 26
+. 8B 85 F8 FE FF FF 89 85 50 FF FF FF 8B 83 30 01 00 00 85 C0 0F 85 6F 15 00 00
+
+. 0 3A9664C2 14
+. 8B BB B8 00 00 00 85 FF 0F 84 B0 00 00 00
+
+. 0 3A966580 28
+. 8B 55 10 B8 01 00 00 00 89 83 34 00 00 00 8D 83 14 D2 FE FF 39 02 0F 84 FD 04 00 00
+
+. 0 3A96659C 22
+. C7 04 24 00 00 00 00 8D 93 F6 ED FF FF 31 C9 89 D0 E8 1E 64 00 00
+
+. 0 3A96C9D0 17
+. 55 89 E5 57 56 53 83 EC 2C 8B 7D 08 E8 4A 8F 00 00
+
+. 0 3A96C9E1 23
+. 81 C3 8B BC 00 00 89 55 EC 89 4D E8 89 45 F0 89 14 24 E8 D8 88 00 00
+
+. 0 3A96C9F8 28
+. 8D 50 01 B9 01 00 00 00 05 21 02 00 00 89 55 E4 89 4C 24 04 89 04 24 E8 18 8E FF FF
+
+. 0 3A96582C 6
+. FF A3 14 00 00 00
+
+. 0 3A973870 18
+. 55 89 E5 53 83 EC 04 8B 45 0C 8B 4D 08 E8 A9 20 00 00
+
+. 0 3A973882 17
+. 81 C3 EA 4D 00 00 0F AF C1 89 04 24 E8 89 1F FF FF
+
+. 0 3A96581C 6
+. FF A3 10 00 00 00
+
+. 0 3A973800 15
+. 55 89 E5 53 83 EC 08 8B 45 08 E8 1C 21 00 00
+
+. 0 3A97380F 22
+. 81 C3 5D 4E 00 00 C7 04 24 08 00 00 00 89 44 24 04 E8 E7 1F FF FF
+
+. 0 3A96580C 6
+. FF A3 0C 00 00 00
+
+. 0 3A973700 26
+. 55 89 E5 83 EC 24 89 5D F4 89 75 F8 8B 75 08 89 7D FC 8B 7D 0C E8 11 22 00 00
+
+. 0 3A97371A 16
+. 81 C3 52 4F 00 00 8B 8B 3C 01 00 00 85 C9 75 20
+
+. 0 3A97372A 54
+. 8B 83 18 FC FF FF 8D 93 28 02 00 00 89 93 38 01 00 00 8D 4C 02 FF F7 D8 21 C1 89 8B 3C 01 00 00 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20
+
+. 0 3A973760 30
+. 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A973825 6
+. 83 C4 08 5B 5D C3
+
+. 0 3A973893 4
+. 5A 5B 5D C3
+
+. 0 3A96CA14 16
+. 89 45 E0 8B 55 E0 31 C0 85 D2 0F 84 05 02 00 00
+
+. 0 3A96CA24 44
+. 8B 75 E0 8B 4D E0 8B 55 E4 81 C6 14 02 00 00 89 C8 05 20 02 00 00 89 71 14 89 54 24 08 8B 4D EC 89 04 24 89 4C 24 04 E8 20 8E 00 00
+
+. 0 3A975870 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 56
+
+. 0 3A9758DC 2
+. F3 A4
+
+. 0 3A9758DE 3
+. 5E 5F C3
+
+. 0 3A96CA50 96
+. C7 46 08 01 00 00 00 8B 55 E0 BE 04 00 00 00 8B 4D F0 89 82 14 02 00 00 89 4A 04 8B 4D E0 0F B6 55 E8 0F B6 81 74 01 00 00 89 B9 60 01 00 00 80 E2 03 89 B1 AC 01 00 00 24 FC 08 D0 88 81 74 01 00 00 89 CA 8B 83 94 F9 FF FF 81 C2 9C 01 00 00 89 91 B0 01 00 00 31 C9 85 C0 0F 84 83 01 00 00
+
+. 0 3A96CC33 37
+. 8B 45 E0 89 83 94 F9 FF FF FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 85 AF FE FF FF
+
+. 0 3A96CC58 8
+. 8B 7D E0 E9 CC FE FF FF
+
+. 0 3A96CB2C 10
+. 85 C9 8D 87 50 01 00 00 74 04
+
+. 0 3A96CB3A 37
+. 89 04 8A 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00
+
+. 0 3A96CC26 13
+. 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A9665B2 17
+. 8B 8B 94 F9 FF FF 83 EC 04 85 C9 0F 84 05 14 00 00
+
+. 0 3A9665C3 76
+. 8B 75 08 8B 45 0C 89 B1 44 01 00 00 8B 75 10 66 89 81 4C 01 00 00 8B 06 89 81 48 01 00 00 FF 81 70 01 00 00 31 C0 8B 75 08 89 81 98 01 00 00 B8 FF FF FF FF 89 81 94 01 00 00 C1 65 0C 05 8B 55 0C 8D 04 32 39 C6 0F 83 AA 00 00 00
+
+. 0 3A96660F 8
+. 89 85 E4 FE FF FF EB 23
+
+. 0 3A96663A 7
+. 8B 06 83 F8 03 74 2F
+
+. 0 3A966641 5
+. 83 F8 03 76 D1
+
+. 0 3A966646 11
+. 3D 51 E5 74 64 0F 84 6F 06 00 00
+
+. 0 3A966651 11
+. 3D 51 E5 74 64 0F 87 3F 06 00 00
+
+. 0 3A96665C 6
+. 83 F8 06 90 75 CD
+
+. 0 3A966662 12
+. 8B 7E 08 8B 45 08 29 F8 89 01 EB C1
+
+. 0 3A96662F 11
+. 83 C6 20 3B B5 E4 FE FF FF 73 7F
+
+. 0 3A966670 39
+. 8B 01 8B 56 08 01 C2 89 93 94 00 00 00 8D 83 94 00 00 00 89 83 F8 F9 FF FF 8B 83 EC F9 FF FF 85 C0 0F 85 EF 0E 00 00
+
+. 0 3A967586 12
+. C6 85 0B FF FF FF 01 E9 9D F0 FF FF
+
+. 0 3A966617 9
+. 83 F8 01 0F 84 76 05 00 00
+
+. 0 3A966B96 24
+. 8B 46 1C 8B 56 08 8B 39 48 F7 D0 21 D0 8D 04 38 39 81 94 01 00 00 76 09
+
+. 0 3A966BAE 29
+. 89 81 94 01 00 00 8B 56 08 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF
+
+. 0 3A966BCB 11
+. 89 81 98 01 00 00 E9 59 FA FF FF
+
+. 0 3A966BB7 20
+. 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF
+
+. 0 3A966620 5
+. 83 F8 02 75 0A
+
+. 0 3A966625 21
+. 8B 46 08 8B 11 01 D0 89 41 08 83 C6 20 3B B5 E4 FE FF FF 73 7F
+
+. 0 3A966CC0 14
+. 8B 46 18 89 83 00 FC FF FF E9 61 F9 FF FF
+
+. 0 3A9666B9 10
+. 8B 91 98 01 00 00 85 D2 75 0B
+
+. 0 3A9666CE 10
+. 8B 83 F8 F9 FF FF 85 C0 75 1C
+
+. 0 3A9666F4 13
+. 80 BD 02 FF FF FF 00 0F 85 36 01 00 00
+
+. 0 3A966701 23
+. 89 8D E0 FE FF FF 8B 71 08 85 F6 89 B5 DC FE FF FF 0F 84 14 01 00 00
+
+. 0 3A966718 9
+. 8B 36 8D 79 18 85 F6 75 20
+
+. 0 3A966741 5
+. 83 FE 21 7E DD
+
+. 0 3A966723 30
+. 8B 95 DC FE FF FF 89 14 B7 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27
+
+. 0 3A966746 16
+. B8 FF FF FF 6F 29 F0 83 F8 0F 0F 87 80 04 00 00
+
+. 0 3A966756 18
+. B8 21 00 00 70 29 F0 8B B5 DC FE FF FF 89 34 87 EB C4
+
+. 0 3A96672C 21
+. 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27
+
+. 0 3A966768 6
+. 8B 11 85 D2 74 59
+
+. 0 3A9667C7 7
+. 8B 57 78 85 D2 74 30
+
+. 0 3A9667FE 10
+. 8B 97 98 00 00 00 85 D2 74 16
+
+. 0 3A96681E 7
+. 8B 47 74 85 C0 74 07
+
+. 0 3A96682C 11
+. 8B 41 28 85 C0 0F 85 C9 04 00 00
+
+. 0 3A966D00 7
+. 89 C8 E8 79 50 00 00
+
+. 0 3A966D07 5
+. E9 2B FB FF FF
+
+. 0 3A966837 13
+. 83 BD 50 FF FF FF 02 0F 84 1F 15 00 00
+
+. 0 3A966844 13
+. 80 BD 02 FF FF FF 00 0F 84 9F 04 00 00
+
+. 0 3A966CF0 11
+. 8B 83 AC 00 00 00 E8 35 3E 00 00
+
+. 0 3A96AB30 14
+. 55 89 E5 57 56 53 83 EC 2C E8 ED AD 00 00
+
+. 0 3A96AB3E 41
+. 81 C3 2E DB 00 00 89 45 F0 8B 93 24 FC FF FF 8D 8B CC 00 00 00 8B 83 20 FC FF FF 89 0C 24 8D 8B C8 00 00 00 E8 D9 7B 00 00
+
+. 0 3A972740 14
+. 55 89 E5 57 56 53 83 EC 38 E8 DD 31 00 00
+
+. 0 3A97274E 45
+. 81 C3 1E 5F 00 00 89 55 EC 85 C0 8B 93 58 FC FF FF 89 45 F0 0F 95 C0 31 FF 23 93 54 FC FF FF 0F B6 C0 89 4D E8 89 45 E0 89 55 E4 74 25
+
+. 0 3A9727A0 11
+. 8B 45 E0 85 C0 0F 84 20 01 00 00
+
+. 0 3A9727AB 33
+. 8B 55 E0 31 F6 31 FF 8D 04 D5 10 00 00 00 29 C4 8B 45 E4 8D 4C 24 1B 83 E1 F0 85 C0 89 4D D8 74 40
+
+. 0 3A97280C 7
+. 8B 7D F0 85 FF 74 10
+
+. 0 3A972813 26
+. 8B 45 D8 8B 55 F0 8B 4D EC 89 14 F0 89 4C F0 04 83 7D E0 01 0F 84 D0 00 00 00
+
+. 0 3A9728FD 36
+. 8B 75 D8 8B 56 04 42 0F B6 4D E0 BE 01 00 00 00 89 F0 D3 E0 8B 4D E8 89 01 8D 04 C2 89 04 24 E8 FB 2E FF FF
+
+. 0 3A97374A 22
+. 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20
+
+. 0 3A972921 7
+. 89 45 DC 85 C0 74 8C
+
+. 0 3A972928 10
+. 83 7D E0 01 0F 84 D9 00 00 00
+
+. 0 3A972A0B 64
+. 8B 75 E8 8B 55 DC 8B 4D D8 8B 06 8D 04 C2 89 02 8B 41 04 40 89 42 04 8B 06 C7 42 0C 00 00 00 00 8D 04 C2 89 42 08 8B 41 04 89 44 24 08 8B 01 89 44 24 04 8B 06 8D 04 C2 89 04 24 E8 25 2D 00 00
+
+. 0 3A975770 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 52
+
+. 0 3A9757D8 2
+. F3 A4
+
+. 0 3A9757DA 5
+. 89 F8 5E 5F C3
+
+. 0 3A972A4B 25
+. C6 00 2F 8B 55 08 C7 06 02 00 00 00 8B 75 DC 8B 46 04 89 02 E9 8C FE FF FF
+
+. 0 3A9728F0 13
+. 8B 45 DC 8D 65 F4 5B 5E 5F 5D C2 04 00
+
+. 0 3A96AB67 21
+. 89 83 C4 00 00 00 83 EC 04 C7 04 24 0C 00 00 00 E8 A0 AC FF FF
+
+. 0 3A96AB7C 16
+. 89 83 D0 00 00 00 85 C0 89 C6 0F 84 93 02 00 00
+
+. 0 3A96AB8C 42
+. 8B 93 C8 00 00 00 B8 CD CC CC CC 8D 14 95 27 00 00 00 F7 E2 C1 EA 04 8D 14 92 89 55 E8 89 D0 C1 E0 06 89 04 24 E8 66 AC FF FF
+
+. 0 3A96ABB6 24
+. 89 06 8B 83 D0 00 00 00 8D 93 90 E3 FF FF 8B 00 85 C0 0F 84 57 02 00 00
+
+. 0 3A96ABCE 106
+. 89 83 DC F9 FF FF 31 D2 8B 8B C8 00 00 00 89 93 D4 00 00 00 89 C2 8D 83 1C D6 FF FF 89 45 EC 8B 45 E8 8D BB 17 F0 FF FF C7 45 D8 00 00 00 00 C1 E0 02 89 7D E0 89 45 E4 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13
+
+. 0 3A96AC38 19
+. 90 8D B4 26 00 00 00 00 31 FF 89 7C 82 14 40 39 C8 72 F5
+
+. 0 3A96AC40 11
+. 31 FF 89 7C 82 14 40 39 C8 72 F5
+
+. 0 3A96AC4B 16
+. FF 45 D8 C7 45 DC 00 00 00 00 83 7D D8 02 74 08
+
+. 0 3A96AC5B 24
+. 8B 7D E4 01 D7 89 7D DC 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93
+
+. 0 3A96AC06 50
+. 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13
+
+. 0 3A96AC63 16
+. 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93
+
+. 0 3A96AC73 27
+. C7 06 00 00 00 00 8B B3 94 F9 FF FF BF 09 00 00 00 89 BB D8 00 00 00 85 F6 74 2B
+
+. 0 3A96AC8E 10
+. 8B 96 8C 00 00 00 85 D2 75 4F
+
+. 0 3A96AC98 22
+. 8B 56 54 BF FF FF FF FF 89 BE CC 01 00 00 85 D2 0F 85 2B 01 00 00
+
+. 0 3A96ACAE 18
+. BF FF FF FF FF 89 BE 80 01 00 00 8B 75 F0 85 F6 74 08
+
+. 0 3A96ACC0 8
+. 8B 7D F0 80 3F 00 75 45
+
+. 0 3A96AD0D 10
+. 89 3C 24 89 FE E8 B9 A5 00 00
+
+. 0 3A975311 10
+. 31 CA 81 E2 00 01 01 01 75 4E
+
+. 0 3A97531B 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 3E
+
+. 0 3A97532B 10
+. 31 CA 81 E2 00 01 01 01 75 34
+
+. 0 3A975335 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 24
+
+. 0 3A975345 10
+. 31 CA 81 E2 00 01 01 01 75 1A
+
+. 0 3A97534F 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 0A
+
+. 0 3A97535F 10
+. 31 CA 81 E2 00 01 01 01 74 98
+
+. 0 3A96AD17 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 1B 83 E0 F0 89 54 24 08 89 7C 24 04 89 04 24 E8 37 AB 00 00
+
+. 0 3A975886 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4
+
+. 0 3A975890 7
+. 89 C1 83 E9 20 78 3E
+
+. 0 3A975897 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A975899 60
+. 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A9758D5 9
+. 83 C1 20 8B 44 24 0C F3 A4
+
+. 0 3A96AD39 14
+. 89 C7 0F B6 06 BA 01 00 00 00 84 C0 74 1D
+
+. 0 3A96AD47 29
+. 89 F6 8D BC 27 00 00 00 00 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC
+
+. 0 3A96AD50 20
+. 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC
+
+. 0 3A96AD64 15
+. 8D 04 95 04 00 00 00 89 04 24 E8 A9 AA FF FF
+
+. 0 3A96AD73 22
+. 89 83 BC 00 00 00 85 C0 89 C6 8D 93 90 E3 FF FF 0F 84 9C 00 00 00
+
+. 0 3A96AD89 40
+. 31 C9 8D 83 2A F0 FF FF 89 F2 89 4C 24 08 8D 8B 3A F0 FF FF 89 44 24 04 8B 83 30 01 00 00 89 04 24 89 F8 E8 0F DD FF FF
+
+. 0 3A968AC0 14
+. 55 89 E5 57 56 53 83 EC 3C E8 5D CE 00 00
+
+. 0 3A968ACE 52
+. 81 C3 9E FB 00 00 89 45 F0 8D 45 F0 89 55 EC 89 4D E8 C7 45 E0 00 00 00 00 89 45 C4 8D B6 00 00 00 00 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00
+
+. 0 3A973530 14
+. 55 89 E5 57 56 8B 45 08 8B 38 85 FF 74 51
+
+. 0 3A97353E 9
+. 0F B6 07 89 FE 84 C0 74 41
+
+. 0 3A973547 19
+. 89 F6 8D BC 27 00 00 00 00 8B 4D 0C 0F B6 11 38 C2 74 12
+
+. 0 3A97355A 14
+. 8D B6 00 00 00 00 41 0F B6 11 84 D2 74 18
+
+. 0 3A973568 4
+. 38 C2 75 F4
+
+. 0 3A973560 8
+. 41 0F B6 11 84 D2 74 18
+
+. 0 3A973580 8
+. 46 0F B6 06 84 C0 75 C8
+
+. 0 3A973550 10
+. 8B 4D 0C 0F B6 11 38 C2 74 12
+
+. 0 3A97356C 6
+. 84 D2 89 F6 75 23
+
+. 0 3A973595 6
+. C6 06 00 46 EB EF
+
+. 0 3A97358A 11
+. 8B 45 08 89 30 5E 89 F8 5F 5D C3
+
+. 0 3A968B02 11
+. 89 45 E4 85 C0 0F 84 EE 00 00 00
+
+. 0 3A968B0D 8
+. 89 04 24 E8 BB C7 00 00
+
+. 0 3A968B15 7
+. 89 45 D8 85 C0 75 09
+
+. 0 3A968B25 6
+. 83 7D D8 01 76 11
+
+. 0 3A968B2B 17
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 0F 84 D6 00 00 00
+
+. 0 3A968B3C 7
+. 8B 45 D8 85 C0 74 15
+
+. 0 3A968B43 13
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 74 08
+
+. 0 3A968B50 19
+. C6 04 0F 2F 47 89 7D D8 8B 45 08 85 C0 0F 85 D2 01 00 00
+
+. 0 3A968B63 13
+. 8B 93 DC F9 FF FF 85 D2 89 55 DC 75 15
+
+. 0 3A968B85 11
+. 8B 4D DC 8B 7D D8 39 79 10 75 E5
+
+. 0 3A968B75 16
+. 8B 55 DC 8B 12 85 D2 89 55 DC 0F 84 AC 00 00 00
+
+. 0 3A968C31 9
+. 8B 4D 10 31 F6 85 C9 74 0E
+
+. 0 3A968C48 24
+. 8B 83 C8 00 00 00 8B 55 D8 8D 04 86 8D 44 02 15 89 04 24 E8 BC CB FF FF
+
+. 0 3A968C60 11
+. 89 45 DC 85 C0 0F 84 54 01 00 00
+
+. 0 3A968C6B 41
+. 8B 4D DC 8B 83 C8 00 00 00 8B 7D D8 8D 44 81 14 89 41 0C 89 7C 24 08 8B 45 E4 89 44 24 04 8B 41 0C 89 04 24 E8 DC CA 00 00
+
+. 0 3A975786 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4
+
+. 0 3A975790 7
+. 89 C1 83 E9 20 78 3E
+
+. 0 3A975797 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A9757D5 5
+. 83 C1 20 F3 A4
+
+. 0 3A968C94 17
+. C6 00 00 8B 55 DC 3B BB D8 00 00 00 89 7A 10 76 06
+
+. 0 3A968CA5 31
+. 89 BB D8 00 00 00 8B 4D E4 31 C0 80 39 2F 8B 8B C8 00 00 00 0F 95 C0 31 D2 01 C0 39 CA 73 18
+
+. 0 3A968CC4 24
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 7D DC 89 44 97 14 42 39 CA 72 F4
+
+. 0 3A968CD0 12
+. 8B 7D DC 89 44 97 14 42 39 CA 72 F4
+
+. 0 3A968CDC 20
+. 8B 55 0C 8B 45 DC 89 50 04 8B 55 10 85 D2 0F 84 C0 00 00 00
+
+. 0 3A968DB0 15
+. 8B 7D DC C7 47 08 00 00 00 00 E9 53 FF FF FF
+
+. 0 3A968D12 35
+. 8B 83 DC F9 FF FF 8B 55 DC 8B 4D E0 8B 7D EC 89 02 89 93 DC F9 FF FF 89 14 8F 41 89 4D E0 E9 BB FD FF FF
+
+. 0 3A968AF0 18
+. 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00
+
+. 0 3A9752F6 8
+. 38 30 0F 84 88 00 00 00
+
+. 0 3A9752FE 19
+. 40 31 D2 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58
+
+. 0 3A975385 6
+. 40 2B 44 24 04 C3
+
+. 0 3A968CAB 25
+. 8B 4D E4 31 C0 80 39 2F 8B 8B C8 00 00 00 0F 95 C0 31 D2 01 C0 39 CA 73 18
+
+. 0 3A968B90 16
+. FC 8B 45 D8 8B 79 0C 8B 75 E4 39 C0 89 C1 F3 A6
+
+. 0 3A968B9E 2
+. F3 A6
+
+. 0 3A968BA0 2
+. 75 D3
+
+. 0 3A968BA2 11
+. 8B 75 DC 85 F6 0F 84 84 00 00 00
+
+. 0 3A968BAD 7
+. 31 C0 3B 45 E0 73 11
+
+. 0 3A968BB4 11
+. 8B 4D EC 8B 7D DC 39 3C 81 74 06
+
+. 0 3A968BBF 6
+. 40 3B 45 E0 72 EF
+
+. 0 3A968BC5 9
+. 3B 45 E0 0F 85 22 FF FF FF
+
+. 0 3A973588 13
+. 31 F6 8B 45 08 89 30 5E 89 F8 5F 5D C3
+
+. 0 3A968B1C 15
+. 8D 93 2C D6 FF FF 89 55 E4 83 7D D8 01 76 11
+
+. 0 3A968B58 11
+. 8B 45 08 85 C0 0F 85 D2 01 00 00
+
+. 0 3A97358F 6
+. 5E 89 F8 5F 5D C3
+
+. 0 3A968BFB 23
+. 8B 7D EC 8B 4D E0 89 F8 C7 04 8F 00 00 00 00 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96ADB1 12
+. 8B 83 BC 00 00 00 8B 10 85 D2 74 4D
+
+. 0 3A96ADBD 28
+. 31 C0 89 83 C0 00 00 00 8B 83 DC F9 FF FF 89 83 8C FD FF FF 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A966CFB 5
+. E9 51 FB FF FF
+
+. 0 3A966851 10
+. 8B B3 E8 F9 FF FF 85 F6 75 0E
+
+. 0 3A96685B 102
+. 8B 83 F8 F9 FF FF 8B 00 89 83 E8 F9 FF FF 0F B6 83 58 FB FF FF FF 83 98 F9 FF FF 24 FC 0C 01 88 83 58 FB FF FF 8D 83 94 F9 FF FF 89 C2 89 85 D0 FE FF FF 83 C2 50 8B 83 94 F9 FF FF 83 83 C4 F9 FF FF 01 89 50 0C 8B 8B 94 F9 FF FF 83 93 C8 F9 FF FF 00 83 BB 78 FD FF FF FE 89 8B F4 F9 FF FF 0F 84 17 04 00 00
+
+. 0 3A966CD8 11
+. 8B 09 B8 FF FF FF FF 85 C9 74 02
+
+. 0 3A966CE5 11
+. 89 83 78 FD FF FF E9 D1 FB FF FF
+
+. 0 3A9668C1 41
+. 8B 83 78 FB FF FF 8B 50 1C 89 C6 01 D6 0F B7 50 2C 89 B3 28 FB FF FF 66 89 93 30 FB FF FF 0F B7 50 2C 89 D0 4A 85 C0 74 22
+
+. 0 3A9668EA 26
+. 89 D0 C1 E0 05 01 F0 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00
+
+. 0 3A966904 8
+. 4A 83 E8 20 85 FF 75 E5
+
+. 0 3A9668F1 19
+. 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00
+
+. 0 3A96690C 30
+. 8B B3 B0 00 00 00 31 C0 89 85 14 FF FF FF 31 C0 85 F6 89 85 10 FF FF FF 0F 85 B0 12 00 00
+
+. 0 3A967BDA 14
+. 89 34 24 8D BD 3C FF FF FF E8 E8 D6 00 00
+
+. 0 3A967BE8 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 2F 83 E0 F0 89 54 24 08 89 74 24 04 89 04 24 E8 66 DC 00 00
+
+. 0 3A967C0A 36
+. 89 85 3C FF FF FF 8D 93 D7 ED FF FF 89 95 60 FE FF FF 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00
+
+. 0 3A967C2E 10
+. 85 C0 89 C6 0F 84 F2 EC FF FF
+
+. 0 3A967C38 5
+. 80 3E 00 74 DF
+
+. 0 3A967C3D 14
+. 8B 83 30 01 00 00 85 C0 0F 85 2A 05 00 00
+
+. 0 3A967C4B 37
+. 8B 83 94 F9 FF FF 31 C9 31 D2 89 4C 24 08 B9 01 00 00 00 89 54 24 04 89 F2 C7 04 24 01 00 00 00 E8 D0 31 00 00
+
+. 0 3A96AE40 17
+. 55 89 E5 57 56 53 81 EC 58 02 00 00 E8 DA AA 00 00
+
+. 0 3A96AE51 34
+. 81 C3 1B D8 00 00 89 85 D8 FD FF FF 8B B3 94 F9 FF FF 89 95 D4 FD FF FF 89 8D D0 FD FF FF 85 F6 74 3F
+
+. 0 3A96AE73 22
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 F6 86 75 01 00 00 02 75 22
+
+. 0 3A96AE89 13
+. 8B 85 D4 FD FF FF 89 F2 E8 EA 5C 00 00
+
+. 0 3A970B80 21
+. 55 89 E5 57 89 C7 56 89 D6 53 83 EC 08 8B 42 04 E8 96 4D 00 00
+
+. 0 3A970B95 18
+. 81 C3 D7 7A 00 00 89 3C 24 89 44 24 04 E8 D9 46 00 00
+
+. 0 3A975280 33
+. 55 89 E5 56 83 EC 04 8B 4D 08 8B 55 0C 8D 76 00 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16
+
+. 0 3A9752A1 7
+. 89 F0 3A 45 FB 74 E8
+
+. 0 3A9752A8 15
+. 0F B6 D0 0F B6 45 FB 29 C2 89 D0 5A 5E 5D C3
+
+. 0 3A970BA7 9
+. 85 C0 BA 01 00 00 00 74 30
+
+. 0 3A970BB0 7
+. 8B 76 14 85 F6 74 27
+
+. 0 3A970BB7 23
+. 89 F6 8D BC 27 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00
+
+. 0 3A970BCE 9
+. 85 C0 BA 01 00 00 00 74 09
+
+. 0 3A970BD7 7
+. 8B 76 04 85 F6 75 E2
+
+. 0 3A970BDE 12
+. 31 D2 83 C4 08 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96AE96 8
+. 85 C0 0F 85 09 01 00 00
+
+. 0 3A96AE9E 13
+. F6 86 75 01 00 00 01 0F 84 08 01 00 00
+
+. 0 3A96AFB3 11
+. 8B 56 50 85 D2 0F 84 ED FE FF FF
+
+. 0 3A96AEAB 7
+. 8B 76 0C 85 F6 75 CE
+
+. 0 3A96AE80 9
+. F6 86 75 01 00 00 02 75 22
+
+. 0 3A975290 17
+. 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16
+
+. 0 3A96AFBE 29
+. 8B 46 2C 8B 7A 04 8B 50 04 01 D7 89 7C 24 04 8B 85 D4 FD FF FF 89 04 24 E8 A5 A2 00 00
+
+. 0 3A96AFDB 8
+. 85 C0 0F 85 C8 FE FF FF
+
+. 0 3A96AEB2 13
+. F6 83 14 FC FF FF 40 0F 85 FF 03 00 00
+
+. 0 3A96AEBF 23
+. B8 2F 00 00 00 89 44 24 04 8B 85 D4 FD FF FF 89 04 24 E8 DA A1 00 00
+
+. 0 3A9750B0 32
+. 57 56 53 55 8B 44 24 14 8B 54 24 18 89 C7 31 C9 88 D6 88 D1 C1 E2 10 88 CD 09 CA 83 E7 03 74 41
+
+. 0 3A975111 27
+. 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A97512C 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 05 01 00 00
+
+. 0 3A97513D 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 D4 00 00 00
+
+. 0 3A975153 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 85 BF 00 00 00
+
+. 0 3A975227 7
+. 83 E8 04 84 DB 74 0F
+
+. 0 3A97523D 5
+. 5D 5B 5E 5F C3
+
+. 0 3A96AED6 8
+. 85 C0 0F 84 AA 01 00 00
+
+. 0 3A96AEDE 14
+. 8B BD D8 FD FF FF 85 FF 0F 84 79 02 00 00
+
+. 0 3A96AEEC 17
+. 8B 95 D4 FD FF FF 8B 85 D8 FD FF FF E8 D3 F6 FF FF
+
+. 0 3A96A5D0 32
+. 55 B9 24 00 00 00 89 E5 83 EC 24 89 5D F4 89 75 F8 31 F6 89 7D FC 89 D7 89 45 F0 E8 3B B3 00 00
+
+. 0 3A96A5F0 18
+. 81 C3 7C E0 00 00 89 4C 24 04 89 14 24 E8 AE AA 00 00
+
+. 0 3A975168 13
+. 31 CD 01 CF 8D 40 04 0F 83 CD 00 00 00
+
+. 0 3A975175 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 BC 00 00 00
+
+. 0 3A975186 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 8B 00 00 00
+
+. 0 3A97519C 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 7A
+
+. 0 3A9751AD 13
+. 31 CD 01 CF 8D 40 04 0F 83 88 00 00 00
+
+. 0 3A9751BA 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 7B
+
+. 0 3A9751C7 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 4E
+
+. 0 3A9751D9 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 3D
+
+. 0 3A9751EA 9
+. 31 CD 01 CF 8D 40 04 73 4F
+
+. 0 3A9751F3 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 42
+
+. 0 3A975200 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 15
+
+. 0 3A975212 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 84 F8 FE FF FF
+
+. 0 3A97511F 13
+. 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A975242 7
+. 83 E8 04 38 D1 74 F4
+
+. 0 3A975249 5
+. 80 F9 00 74 1C
+
+. 0 3A97524E 5
+. 40 38 D5 74 EA
+
+. 0 3A975253 5
+. 80 FD 00 74 12
+
+. 0 3A97526A 7
+. 31 C0 5D 5B 5E 5F C3
+
+. 0 3A96A602 4
+. 85 C0 75 41
+
+. 0 3A96A606 4
+. 85 F6 75 52
+
+. 0 3A96A60A 11
+. 89 3C 24 8D 76 00 E8 BB AC 00 00
+
+. 0 3A96A615 11
+. 8D 70 01 89 34 24 E8 FC B1 FF FF
+
+. 0 3A96A620 6
+. 31 D2 85 C0 74 12
+
+. 0 3A96A626 16
+. 89 74 24 08 89 7C 24 04 89 04 24 E8 3A B2 00 00
+
+. 0 3A96A636 17
+. 89 C2 89 D0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96AEFD 14
+. 89 85 E0 FD FF FF 85 C0 0F 85 05 01 00 00
+
+. 0 3A96B010 19
+. 8D 8D E4 FD FF FF 89 CA 89 8D B8 FD FF FF E8 FD DE FF FF
+
+. 0 3A968F20 16
+. 55 89 E5 57 56 31 F6 53 83 EC 4C E8 FB C9 00 00
+
+. 0 3A968F30 31
+. 81 C3 3C F7 00 00 89 45 D0 89 55 CC C7 45 C4 00 00 00 00 89 74 24 04 89 04 24 E8 E1 B4 00 00
+
+. 0 3A974430 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 05 00 00 00 CD 80
+
+. 0 3A974444 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A97444C 1
+. C3
+
+. 0 3A968F4F 12
+. 89 45 C8 83 F8 FF 0F 84 F5 01 00 00
+
+. 0 3A968F5B 38
+. 31 C9 BA 00 02 00 00 89 8B 48 01 00 00 89 54 24 08 8B 55 CC 83 C2 04 89 55 B8 89 54 24 04 89 04 24 E8 2F B5 00 00
+
+. 0 3A9744B0 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 03 00 00 00 CD 80
+
+. 0 3A9744C4 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A9744CC 1
+. C3
+
+. 0 3A968F81 14
+. 8B 75 CC 83 F8 33 89 06 0F 8E 10 02 00 00
+
+. 0 3A968F8F 17
+. FC 8B 75 B8 B9 09 00 00 00 8D BB 2F D6 FF FF F3 A6
+
+. 0 3A968F9E 2
+. F3 A6
+
+. 0 3A968FA0 6
+. 0F 85 15 02 00 00
+
+. 0 3A968FA6 19
+. 8B 75 B8 8D 93 B4 E3 FF FF 83 7E 14 01 0F 85 20 01 00 00
+
+. 0 3A968FB9 11
+. 66 83 7E 12 03 0F 85 6E 01 00 00
+
+. 0 3A968FC4 17
+. 66 83 7E 2A 20 8D 93 E0 E3 FF FF 0F 85 04 01 00 00
+
+. 0 3A968FD5 13
+. 0F B7 46 10 83 F8 03 0F 85 4E 02 00 00
+
+. 0 3A968FE2 40
+. 8B 7D B8 8B 57 1C 0F B7 4F 2C 8B 7D CC 8D 7C 3A 04 89 7D C0 0F B7 F1 8B 7D CC C1 E6 05 8D 04 16 3B 07 0F 87 7D 00 00 00
+
+. 0 3A96900A 18
+. 8B 75 C0 0F B7 C1 C1 E0 05 89 75 BC 01 F0 39 C6 72 1D
+
+. 0 3A969039 8
+. 8B 7D BC 83 3F 04 75 E0
+
+. 0 3A969021 24
+. 83 45 BC 20 0F B7 C1 8B 55 C0 C1 E0 05 01 D0 39 45 BC 0F 83 17 01 00 00
+
+. 0 3A969150 11
+. 8B 45 C8 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B023 13
+. 89 85 CC FD FF FF 40 0F 84 80 04 00 00
+
+. 0 3A96B030 78
+. 8B 83 28 01 00 00 89 85 DC FD FF FF 8D 85 DC FD FF FF 89 44 24 10 8B 45 10 89 44 24 0C 8B 4D 08 89 4C 24 08 8B 85 D8 FD FF FF 89 44 24 04 8B 85 E0 FD FF FF 89 04 24 8B 8D B8 FD FF FF 8B 95 CC FD FF FF 8B 85 D4 FD FF FF E8 12 E7 FF FF
+
+. 0 3A969790 51
+. 55 89 E5 57 56 53 81 EC 08 01 00 00 89 45 90 8D 45 94 89 4D 88 31 C9 89 55 8C C7 45 84 00 00 00 00 89 8D 74 FF FF FF 89 44 24 08 8B 45 8C E8 68 C1 00 00
+
+. 0 3A9697C3 22
+. 81 C3 A9 EE 00 00 C7 04 24 03 00 00 00 89 44 24 04 E8 47 AB 00 00
+
+. 0 3A974320 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 FD 15 00 00
+
+. 0 3A97432E 24
+. 81 C3 3E 43 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A
+
+. 0 3A974346 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C5 00 00 00 CD 80
+
+. 0 3A97435B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00
+
+. 0 3A974368 7
+. 83 F8 FF 89 C2 74 31
+
+. 0 3A97436F 4
+. 85 D2 75 1D
+
+. 0 3A974373 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12
+
+. 0 3A974390 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9697D9 8
+. 85 C0 0F 88 ED 05 00 00
+
+. 0 3A9697E1 13
+. 8B 93 94 F9 FF FF 85 D2 89 55 84 74 6F
+
+. 0 3A9697EE 59
+. 8B 45 94 8B 55 98 8B 75 EC 89 85 6C FF FF FF 8B 7D F0 89 95 70 FF FF FF 8D 76 00 8D BC 27 00 00 00 00 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27
+
+. 0 3A969850 13
+. 8B 45 84 8B 40 0C 85 C0 89 45 84 75 B3
+
+. 0 3A969810 25
+. 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27
+
+. 0 3A96985D 12
+. 31 C0 F6 45 14 04 0F 85 5D 05 00 00
+
+. 0 3A969869 13
+. F6 83 14 FC FF FF 40 0F 85 DA 07 00 00
+
+. 0 3A969876 29
+. 8B 4D 88 8B 45 0C 83 C1 04 89 4D 80 89 04 24 8B 55 90 8B 4D 10 8B 45 08 E8 3D 31 00 00
+
+. 0 3A973780 69
+. 89 93 38 01 00 00 8B 83 18 FC FF FF 31 C9 89 4C 24 14 BA FF FF FF FF 89 54 24 10 8D 74 38 FF F7 D8 C7 04 24 00 00 00 00 21 C6 B8 22 00 00 00 89 44 24 0C B8 03 00 00 00 89 44 24 08 89 74 24 04 E8 8B 14 00 00
+
+. 0 3A974C50 13
+. 89 DA B8 5A 00 00 00 8D 5C 24 04 CD 80
+
+. 0 3A974C5D 9
+. 89 D3 3D 00 F0 FF FF 77 01
+
+. 0 3A974C66 1
+. C3
+
+. 0 3A9737C5 8
+. 3B 83 3C 01 00 00 74 06
+
+. 0 3A9737CD 50
+. 89 83 38 01 00 00 8B 93 38 01 00 00 01 F0 89 83 3C 01 00 00 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96CAB0 9
+. 89 C2 8B 40 0C 85 C0 74 10
+
+. 0 3A96CAB9 16
+. 8D B4 26 00 00 00 00 89 C2 8B 40 0C 85 C0 75 F7
+
+. 0 3A96CAC9 62
+. 8B 45 E0 89 50 10 89 42 0C 8B 90 B0 01 00 00 8B 83 94 F9 FF FF 05 50 01 00 00 89 04 8A B9 01 00 00 00 FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 84 51 01 00 00
+
+. 0 3A96CB07 10
+. 8B 87 60 01 00 00 85 C0 74 1B
+
+. 0 3A96CB36 4
+. 3B 02 74 03
+
+. 0 3A96CB3D 34
+. 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00
+
+. 0 3A96CB5F 11
+. 8B 45 F0 89 04 24 E8 66 87 00 00
+
+. 0 3A96CB6A 14
+. 40 80 7D DF 2F 89 45 D8 0F 84 E8 00 00 00
+
+. 0 3A96CC60 8
+. 89 04 24 E8 B4 8B FF FF
+
+. 0 3A96CC68 10
+. 85 C0 89 C7 0F 85 79 FF FF FF
+
+. 0 3A96CBEB 22
+. 8B 55 D8 89 54 24 08 8B 4D F0 89 04 24 89 4C 24 04 E8 6F 8B 00 00
+
+. 0 3A975799 60
+. 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A96CC01 2
+. EB 0D
+
+. 0 3A96CC10 6
+. 48 80 38 2F 75 FA
+
+. 0 3A96CC16 4
+. 39 F8 74 69
+
+. 0 3A96CC1A 25
+. C6 00 00 8B 45 E0 89 B8 90 01 00 00 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A969893 20
+. 89 45 84 83 EC 04 85 C0 8D 93 94 E4 FF FF 0F 84 CD 04 00 00
+
+. 0 3A9698A7 79
+. 8B 75 80 8B 55 84 8B 46 18 0F B7 4E 10 89 82 48 01 00 00 0F B7 46 2C 89 8D 78 FF FF FF 8B 4D 88 66 89 82 4C 01 00 00 8B 55 80 0F B7 76 2C 8B 42 1C C1 E6 05 89 B5 28 FF FF FF 8D 14 06 8D 44 08 04 3B 11 89 85 7C FF FF FF 0F 87 1F 04 00 00
+
+. 0 3A9698F6 89
+. 89 A5 64 FF FF FF 8B 4D 84 BF 07 00 00 00 89 BD 68 FF FF FF C6 85 5B FF FF FF 00 0F B7 91 4C 01 00 00 8D 44 52 FD 8D 04 C5 28 00 00 00 29 C4 C1 E2 05 8D 74 24 43 83 E6 F0 89 B5 60 FF FF FF 31 F6 89 B5 5C FF FF FF 8B B5 7C FF FF FF 89 F0 01 D0 39 C6 0F 83 87 04 00 00
+
+. 0 3A96994F 20
+. 89 85 54 FF FF FF 8B 85 60 FF FF FF 89 85 2C FF FF FF EB 28
+
+. 0 3A96998B 7
+. 8B 06 83 F8 06 74 33
+
+. 0 3A969992 5
+. 83 F8 06 76 CC
+
+. 0 3A969963 9
+. 83 F8 01 0F 84 CE 02 00 00
+
+. 0 3A969C3A 18
+. 8B 83 18 FC FF FF 8B 56 1C 48 85 D0 0F 85 99 05 00 00
+
+. 0 3A969C4C 23
+. 8B 46 08 4A 8B 4E 04 89 85 50 FF FF FF 29 C8 85 C2 0F 85 8D 05 00 00
+
+. 0 3A969C63 112
+. FF 85 5C FF FF FF 8B BD 2C FF FF FF 8B 85 50 FF FF FF 8D 4F 18 89 8D 2C FF FF FF 89 D1 F7 D1 21 C1 8B 85 2C FF FF FF 89 48 E8 8B 56 08 8B 46 10 01 D0 8B 93 18 FC FF FF 8D 44 02 FF F7 DA 21 D0 8B 95 2C FF FF FF 89 42 EC 8B 46 10 03 46 08 89 42 F0 8B 46 14 03 46 08 89 42 F4 8B 46 1C 48 F7 D0 23 46 04 83 BD 5C FF FF FF 01 89 42 F8 76 0C
+
+. 0 3A969CDF 27
+. 8B 4E 18 B8 40 62 51 73 83 E1 07 C1 E1 02 D3 F8 83 E0 0F 89 47 14 E9 86 FC FF FF
+
+. 0 3A969980 11
+. 83 C6 20 3B B5 54 FF FF FF 73 51
+
+. 0 3A969CD3 5
+. 39 4F EC 74 07
+
+. 0 3A96996C 9
+. 83 F8 02 0F 84 85 03 00 00
+
+. 0 3A969CFA 27
+. 8B 46 08 8B 55 84 89 42 08 8B 46 14 C1 E8 03 66 89 82 4E 01 00 00 E9 6B FC FF FF
+
+. 0 3A969997 11
+. 3D 51 E5 74 64 0F 84 8A 02 00 00
+
+. 0 3A969C2C 14
+. 8B 4E 18 89 8D 68 FF FF FF E9 46 FD FF FF
+
+. 0 3A9699DC 14
+. 8B 85 5C FF FF FF 85 C0 0F 84 EC 03 00 00
+
+. 0 3A9699EA 50
+. 8B 8D 5C FF FF FF 8B BD 60 FF FF FF 8D 04 49 8D 04 C7 8B 17 8B 70 F4 89 85 4C FF FF FF 29 D6 83 BD 78 FF FF FF 03 89 B5 28 FF FF FF 0F 85 DF 07 00 00
+
+. 0 3A969A1C 50
+. 8B 83 78 FD FF FF 21 C2 8B 47 10 89 44 24 14 8B 45 8C 89 44 24 10 B8 02 00 00 00 89 44 24 0C 8B 47 14 89 14 24 89 74 24 04 89 44 24 08 E8 02 B2 00 00
+
+. 0 3A969A4E 16
+. 8B 55 84 89 82 94 01 00 00 40 0F 84 98 01 00 00
+
+. 0 3A969A5E 14
+. 8B B3 40 FC FF FF 85 F6 0F 85 FE 05 00 00
+
+. 0 3A96A06A 36
+. B9 03 00 00 00 89 4C 24 08 8B 8D 28 FF FF FF 89 4C 24 04 8B 75 84 8B 86 94 01 00 00 89 04 24 E8 82 AC 00 00
+
+. 0 3A974D10 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 DB 00 00 00 CD 80
+
+. 0 3A974D24 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A974D2C 1
+. C3
+
+. 0 3A96A08E 5
+. E9 D9 F9 FF FF
+
+. 0 3A969A6C 46
+. 8B 55 84 8B 8D 28 FF FF FF 8B 75 84 8B 82 94 01 00 00 8D 14 01 89 96 98 01 00 00 8B 17 29 D0 80 BD 5B FF FF FF 00 89 06 0F 85 F9 05 00 00
+
+. 0 3A969A9A 13
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 75 49
+
+. 0 3A969AA7 19
+. 8B 45 80 8B 77 10 8B 48 1C 89 B5 48 FF FF FF 39 CE 77 36
+
+. 0 3A969ABA 33
+. 0F B7 40 2C 8B 37 8B 57 04 89 B5 44 FF FF FF C1 E0 05 29 F2 03 95 48 FF FF FF 8D 04 08 39 C2 72 15
+
+. 0 3A969ADB 35
+. 89 F0 8B 55 84 01 C8 8B 8D 48 FF FF FF 29 C8 89 82 44 01 00 00 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00
+
+. 0 3A969B8D 29
+. 8B B5 5C FF FF FF 83 C7 18 8B 95 60 FF FF FF 8D 04 76 8D 04 C2 39 C7 0F 83 3A 02 00 00
+
+. 0 3A969BAA 11
+. 8B 07 39 47 04 0F 86 E5 FE FF FF
+
+. 0 3A969BB5 58
+. 8B 47 10 89 44 24 14 8B 75 8C B8 12 00 00 00 89 44 24 0C 89 74 24 10 8B 47 14 89 44 24 08 8B 47 04 8B 37 29 F0 89 44 24 04 8B 55 84 8B 07 8B 0A 01 C8 89 04 24 E8 61 B0 00 00
+
+. 0 3A969BEF 7
+. 40 0F 85 A4 FE FF FF
+
+. 0 3A969AF0 14
+. 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00
+
+. 0 3A969AFE 45
+. 8B 75 84 8B 83 18 FC FF FF 8B 36 01 F2 01 F1 89 95 40 FF FF FF 8D 54 10 FF F7 D8 89 8D 3C FF FF FF 21 C2 39 D1 89 95 38 FF FF FF 73 06
+
+. 0 3A969B2B 20
+. 89 8D 38 FF FF FF 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C
+
+. 0 3A969B3F 11
+. 8B 47 14 A8 02 0F 84 8E 08 00 00
+
+. 0 3A969B4A 38
+. 8B 85 38 FF FF FF 8B 95 40 FF FF FF 29 D0 89 44 24 08 31 C0 89 44 24 04 8B B5 40 FF FF FF 89 34 24 E8 90 BB 00 00
+
+. 0 3A975700 31
+. 57 8B 7C 24 08 8B 54 24 10 8A 44 24 0C 88 C4 89 C1 C1 E0 10 66 89 C8 FC 83 FA 24 89 D1 7C 37
+
+. 0 3A975756 5
+. C1 E9 02 F3 AB
+
+. 0 3A975759 2
+. F3 AB
+
+. 0 3A97575B 7
+. 89 D1 83 E1 03 F3 AA
+
+. 0 3A975762 6
+. 8B 44 24 08 5F C3
+
+. 0 3A969B70 11
+. 8B 47 14 A8 02 0F 84 37 08 00 00
+
+. 0 3A969B7B 18
+. 8B 85 38 FF FF FF 39 85 3C FF FF FF 0F 87 E4 05 00 00
+
+. 0 3A969DE4 17
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 0F 84 F3 02 00 00
+
+. 0 3A969DF5 30
+. 8B 4D 84 8B 31 01 F0 89 81 44 01 00 00 8B A5 64 FF FF FF 8B 75 8C 89 34 24 E8 5D A6 00 00
+
+. 0 3A974470 13
+. 89 DA 8B 5C 24 04 B8 06 00 00 00 CD 80
+
+. 0 3A97447D 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974486 1
+. C3
+
+. 0 3A969E13 42
+. C7 45 8C FF FF FF FF 8B 45 84 0F B6 88 74 01 00 00 88 C8 24 03 FE C8 0F 94 C2 83 BD 78 FF FF FF 02 0F 94 C0 21 D0 A8 01 74 0C
+
+. 0 3A969E49 14
+. 8B 4D 84 8B 41 08 85 C0 0F 85 82 02 00 00
+
+. 0 3A96A0D9 15
+. 8B 75 84 8B 16 01 D0 89 46 08 E9 87 FD FF FF
+
+. 0 3A969E6F 22
+. 8B 4D 84 01 91 48 01 00 00 F6 83 14 FC FF FF 40 0F 85 AB 03 00 00
+
+. 0 3A969E85 14
+. 8B 45 84 8B 70 08 85 F6 0F 84 0B 01 00 00
+
+. 0 3A969E93 11
+. 8B 16 89 C7 83 C7 18 85 D2 75 10
+
+. 0 3A969EAE 5
+. 83 FA 21 7E ED
+
+. 0 3A969EA0 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 38
+
+. 0 3A969EB3 12
+. B8 FF FF FF 6F 29 D0 83 F8 0F 77 0C
+
+. 0 3A969EBF 12
+. B8 21 00 00 70 29 D0 89 34 87 EB D8
+
+. 0 3A969EA3 11
+. 83 C6 08 8B 06 85 C0 89 C2 74 38
+
+. 0 3A969EE6 9
+. 8B 4D 84 8B 11 85 D2 74 53
+
+. 0 3A969EEF 7
+. 8B 47 10 85 C0 74 03
+
+. 0 3A969EF6 10
+. 01 50 04 8B 47 0C 85 C0 74 03
+
+. 0 3A969F00 10
+. 01 50 04 8B 47 14 85 C0 74 03
+
+. 0 3A969F0A 10
+. 01 50 04 8B 47 18 85 C0 74 03
+
+. 0 3A969F14 10
+. 01 50 04 8B 47 1C 85 C0 74 03
+
+. 0 3A969F21 7
+. 8B 47 44 85 C0 74 03
+
+. 0 3A969F28 10
+. 01 50 04 8B 47 5C 85 C0 74 03
+
+. 0 3A969F32 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 03
+
+. 0 3A969F3F 10
+. 01 50 04 8B 57 78 85 D2 74 2A
+
+. 0 3A969F73 10
+. 8B 97 98 00 00 00 85 D2 74 13
+
+. 0 3A969F7D 16
+. 8B 42 04 8B 75 84 A8 01 89 86 E8 01 00 00 74 03
+
+. 0 3A969F90 7
+. 8B 47 74 85 C0 74 07
+
+. 0 3A969F9E 16
+. 8B 45 84 F6 80 E8 01 00 00 40 0F 85 88 03 00 00
+
+. 0 3A969FAE 14
+. 8B 45 84 8B 50 28 85 D2 0F 85 1E 02 00 00
+
+. 0 3A96A1DA 5
+. E8 A1 1B 00 00
+
+. 0 3A96A1DF 6
+. 90 E9 D7 FD FF FF
+
+. 0 3A969FBC 14
+. 8B 55 84 8B 42 58 85 C0 0F 85 E5 02 00 00
+
+. 0 3A969FCA 12
+. 8B 45 84 F6 80 E8 01 00 00 20 74 06
+
+. 0 3A969FD6 72
+. 89 83 CC F9 FF FF 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00
+
+. 0 3A96A01E 14
+. 8B BB 7C FD FF FF 85 FF 0F 84 97 FD FF FF
+
+. 0 3A969DC3 11
+. 8B 45 84 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B07E 10
+. 8D 65 F4 5B 5E 5F 5D C2 0C 00
+
+. 0 3A967C70 32
+. 8B 90 70 01 00 00 83 EC 0C 42 89 90 70 01 00 00 31 C0 83 FA 01 0F 94 C0 01 85 10 FF FF FF EB 8C
+
+. 0 3A967C1C 18
+. 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00
+
+. 0 3A96692A 84
+. 8B 83 7E EC FF FF 8D 75 B4 B9 03 00 00 00 89 B5 CC FE FF FF 8D 95 38 FF FF FF 89 45 B4 8B 83 82 EC FF FF 89 45 B8 8B 83 86 EC FF FF 89 45 BC 8B 83 8A EC FF FF 89 45 C0 0F B7 83 8E EC FF FF 66 89 45 C4 0F B6 83 90 EC FF FF 88 45 C6 89 F0 E8 F2 A6 00 00
+
+. 0 3A971070 37
+. 55 89 E5 81 EC 8C 00 00 00 89 75 F8 31 F6 89 5D F4 89 7D FC 89 D7 89 4D 90 C7 45 8C FF FF FF FF E8 96 48 00 00
+
+. 0 3A971095 18
+. 81 C3 D7 75 00 00 89 74 24 04 89 04 24 E8 89 33 00 00
+
+. 0 3A97444D 5
+. E8 D5 14 00 00
+
+. 0 3A975927 4
+. 8B 0C 24 C3
+
+. 0 3A974452 21
+. 81 C1 1A 42 00 00 31 D2 29 C2 89 91 48 01 00 00 83 C8 FF EB E5
+
+. 0 3A9710A7 6
+. 85 C0 89 C6 78 2C
+
+. 0 3A9710D9 16
+. 8B 45 8C 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96697E 15
+. 89 85 0C FF FF FF 83 F8 FF 0F 85 E1 14 00 00
+
+. 0 3A96698D 20
+. 8B 83 94 F9 FF FF 8B B8 A4 00 00 00 85 FF 0F 84 04 0B 00 00
+
+. 0 3A9669A1 14
+. 8B B5 10 FF FF FF 85 F6 0F 85 79 14 00 00
+
+. 0 3A967E28 65
+. 8B B5 10 FF FF FF 31 C9 89 8D 04 FF FF FF 8D 04 B5 10 00 00 00 29 C4 8D 54 24 2F 8B 83 F0 F9 FF FF 83 E2 F0 89 95 14 FF FF FF 8B 8D 04 FF FF FF 89 04 8A 8B 40 0C 41 89 8D 04 FF FF FF 85 C0 75 E9
+
+. 0 3A967E69 5
+. E9 41 EB FF FF
+
+. 0 3A9669AF 22
+. 8B B3 94 FD FF FF 31 FF 89 BD C4 FE FF FF 85 F6 0F 84 9F 06 00 00
+
+. 0 3A967064 44
+. 31 D2 8B 83 94 F9 FF FF 89 54 24 04 31 D2 83 BD 50 FF FF FF 03 0F 94 C2 89 14 24 8B 8D 10 FF FF FF 8B 95 14 FF FF FF E8 10 81 00 00
+
+. 0 3A96F1A0 83
+. 55 89 E5 57 56 53 81 EC A0 00 00 00 89 45 C0 8D 04 49 8D 04 85 28 00 00 00 89 55 BC 29 C4 8B 55 C0 89 4D B8 8D 44 24 1F 83 E0 F0 89 50 04 8B 4D C0 8D 50 0C 89 45 F0 C7 00 00 00 00 00 89 50 08 0F B6 81 74 01 00 00 C7 45 EC 01 00 00 00 E8 38 67 00 00
+
+. 0 3A96F1F3 31
+. 81 C3 79 94 00 00 24 9F 0C 20 88 81 74 01 00 00 8B 75 B8 C7 45 AC 00 00 00 00 39 75 AC 73 48
+
+. 0 3A96F212 72
+. 8B 45 AC 8B 55 BC 8B 75 EC 8B 3C 82 8B 45 F0 8D 14 76 C1 E2 02 46 8D 4C 02 0C C7 04 10 00 00 00 00 89 4C 10 08 89 7C 10 04 0F B6 87 74 01 00 00 89 75 EC 24 9F 0C 20 88 87 74 01 00 00 8B 4D B8 FF 45 AC 39 4D AC 72 B8
+
+. 0 3A96F25A 65
+. C7 45 A4 00 00 00 00 8B 45 EC 31 C9 C7 45 C8 00 00 00 00 8B 55 F0 8B B3 48 01 00 00 89 8B 48 01 00 00 8D 04 40 8D 44 82 F4 C7 40 08 00 00 00 00 85 D2 89 45 B0 89 75 A8 89 55 B4 0F 84 6A 03 00 00
+
+. 0 3A96F29B 42
+. C7 45 9C 00 00 00 00 8B 45 B4 8B 55 B4 C7 45 98 00 00 00 00 8B 40 04 C7 02 01 00 00 00 8B 90 50 01 00 00 89 45 A0 85 D2 75 44
+
+. 0 3A96F2C5 26
+. 8B 80 D4 01 00 00 8B 4D C0 85 C0 0F 94 C0 39 4D A0 0F 95 C2 21 D0 A8 01 74 2A
+
+. 0 3A96F309 10
+. 8B 55 A0 8B 42 1C 85 C0 75 18
+
+. 0 3A96F32B 52
+. 8B 4D A0 8B 75 08 8B 55 B4 8B 41 2C 8B 40 04 89 4D CC 8B 49 08 89 45 94 89 45 D8 8B 45 0C 89 75 D0 89 55 90 89 45 D4 8B 11 89 4D 8C 85 D2 0F 85 BD 00 00 00
+
+. 0 3A96F41C 9
+. 83 FA 01 0F 84 3F FF FF FF
+
+. 0 3A96F364 30
+. 8B 75 8C B9 24 00 00 00 8B 7D 94 8B 46 04 89 4C 24 04 01 C7 89 FE 89 3C 24 E8 2E 5D 00 00
+
+. 0 3A9750D0 4
+. 88 D1 7A 29
+
+. 0 3A9750D4 8
+. 32 08 0F 84 61 01 00 00
+
+. 0 3A9750DC 8
+. 30 D1 0F 84 86 01 00 00
+
+. 0 3A9750E4 12
+. 8A 48 01 40 38 CA 0F 84 4D 01 00 00
+
+. 0 3A9750F0 9
+. 80 F9 00 0F 84 71 01 00 00
+
+. 0 3A9750F9 4
+. 40 4F 75 14
+
+. 0 3A9750FD 10
+. 8A 08 38 CA 0F 84 36 01 00 00
+
+. 0 3A975107 9
+. 80 F9 00 0F 84 5A 01 00 00
+
+. 0 3A975110 28
+. 40 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A975258 8
+. C1 E9 10 40 38 D1 74 DD
+
+. 0 3A975260 5
+. 80 F9 00 74 05
+
+. 0 3A96F382 8
+. 85 C0 0F 85 2A 05 00 00
+
+. 0 3A96F38A 26
+. 89 75 DC 8D 45 CC 8D 55 C8 89 04 24 8D 8B 34 78 FF FF 8D 45 C4 E8 2C 0E 00 00
+
+. 0 3A9701D0 31
+. 55 89 E5 81 EC F8 00 00 00 89 85 24 FF FF FF 31 C0 89 5D F4 89 75 F8 89 7D FC E8 3C 57 00 00
+
+. 0 3A9701EF 30
+. 81 C3 7D 84 00 00 89 95 20 FF FF FF 89 8D 1C FF FF FF 89 85 2C FF FF FF FF 93 E0 F9 FF FF
+
+. 0 3A9658C0 9
+. 55 89 E5 5D E8 5E 00 01 00
+
+. 0 3A9658C9 13
+. 81 C1 A3 2D 01 00 8D 81 3C 00 00 00 C3
+
+. 0 3A97020D 25
+. 89 85 18 FF FF FF 8B 00 89 45 E4 8D 85 30 FF FF FF 89 04 24 E8 0A 4D 00 00
+
+. 0 3A974F30 36
+. 31 C0 8B 54 24 04 89 5A 00 89 72 04 89 7A 08 8D 4C 24 04 89 4A 10 8B 4C 24 00 89 4A 14 89 6A 0C 89 42 18 C3
+
+. 0 3A970226 6
+. 85 C0 89 C2 75 4E
+
+. 0 3A97022C 26
+. 8B 8D 18 FF FF FF 8D 85 28 FF FF FF 89 01 8B 45 08 89 04 24 FF 95 1C FF FF FF
+
+. 0 3A96FEA0 45
+. 55 B9 01 00 00 00 89 E5 56 83 EC 0C 8B 75 08 8B 46 08 8B 56 10 89 44 24 08 8B 46 04 89 44 24 04 8B 06 0F B6 80 74 01 00 00 A8 03 74 05
+
+. 0 3A96FED2 12
+. 89 0C 24 8B 06 31 C9 E8 62 AF FF FF
+
+. 0 3A96B088 14
+. 8B 95 D4 FD FF FF 89 14 24 E8 3A A2 00 00
+
+. 0 3A96B096 20
+. 40 F6 83 14 FC FF FF 01 89 85 C8 FD FF FF 0F 85 2C 04 00 00
+
+. 0 3A96B0AA 21
+. 8B B5 D8 FD FF FF BF FF FF FF FF 89 BD CC FD FF FF 85 F6 74 14
+
+. 0 3A96B0BF 20
+. 8B 85 D8 FD FF FF 8B 88 8C 00 00 00 85 C9 0F 85 A3 01 00 00
+
+. 0 3A96B0D3 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 C2 00 00 00
+
+. 0 3A96B0E1 14
+. 8D 93 09 F0 FF FF 89 95 B4 FD FF FF EB 1F
+
+. 0 3A96B10E 29
+. 8B 8D B4 FD FF FF 8D BE 80 01 00 00 89 F0 89 FA 89 0C 24 B9 0F 00 00 00 E8 65 F7 FF FF
+
+. 0 3A96A890 25
+. 55 89 E5 83 EC 0C 89 75 F8 89 D6 89 7D FC 8B 12 89 C7 31 C0 83 FA FF 74 27
+
+. 0 3A96A8D0 10
+. 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96B12B 4
+. 84 C0 74 C0
+
+. 0 3A96B0EF 31
+. 83 BD CC FD FF FF FF 8B B6 60 01 00 00 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 0F 84 95 00 00 00
+
+. 0 3A96B1A3 27
+. 83 BD CC FD FF FF FF 8B B3 94 F9 FF FF 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 74 58
+
+. 0 3A96B1BE 13
+. 0F B6 86 74 01 00 00 24 03 3C 02 74 4B
+
+. 0 3A96B1CB 8
+. 3B B5 D8 FD FF FF 74 43
+
+. 0 3A96B216 9
+. 83 BD CC FD FF FF FF 74 57
+
+. 0 3A96B276 9
+. 83 BB BC 00 00 00 FF 74 A0
+
+. 0 3A96B27F 52
+. 8D 85 E0 FD FF FF 8D 8D E4 FD FF FF 89 44 24 04 8D 83 BC 00 00 00 89 4C 24 08 89 04 24 8B 8D D0 FD FF FF 8B 95 C8 FD FF FF 8B 85 D4 FD FF FF E8 9D DF FF FF
+
+. 0 3A969250 27
+. 55 89 E5 57 31 FF 56 31 F6 53 81 EC B4 00 00 00 89 45 90 8B 45 08 E8 C0 C6 00 00
+
+. 0 3A96926B 114
+. 81 C3 01 F4 00 00 89 4D 88 8B 00 89 55 8C 8B 8B D8 00 00 00 89 BD 7C FF FF FF 89 45 84 8B 83 CC 00 00 00 C7 45 80 FF FF FF FF 89 B5 78 FF FF FF 01 C8 8D 44 02 12 8B 4D 84 83 E0 FC 29 C4 8D 54 24 1B 8B 39 83 E2 F0 89 95 74 FF FF FF 90 8D B4 26 00 00 00 00 31 C0 89 85 70 FF FF FF 31 C0 F6 83 14 FC FF FF 01 89 85 68 FF FF FF 0F 85 78 02 00 00
+
+. 0 3A9692DD 30
+. 8B 47 10 31 F6 89 44 24 08 8B 47 0C 89 44 24 04 8B 8D 74 FF FF FF 89 0C 24 E8 75 C4 00 00
+
+. 0 3A9692FB 16
+. 83 7D 80 FF 89 85 6C FF FF FF 0F 85 98 01 00 00
+
+. 0 3A96930B 8
+. 3B B3 C8 00 00 00 72 1F
+
+. 0 3A969332 7
+. 83 7C B7 14 01 74 DF
+
+. 0 3A969339 41
+. 8B 83 C4 00 00 00 8B 44 F0 04 89 44 24 08 8B 83 C4 00 00 00 8B 04 F0 89 44 24 04 8B 85 6C FF FF FF 89 04 24 E8 0E C4 00 00
+
+. 0 3A969362 22
+. 8B 55 8C 89 54 24 08 8B 4D 90 89 04 24 89 4C 24 04 E8 F8 C3 00 00
+
+. 0 3A969378 31
+. 89 85 70 FF FF FF 8B 85 74 FF FF FF 29 85 70 FF FF FF F6 83 14 FC FF FF 01 0F 85 50 01 00 00
+
+. 0 3A969397 14
+. 8B 55 10 8B 85 74 FF FF FF E8 7B FB FF FF
+
+. 0 3A9693A5 11
+. 89 45 80 8B 44 B7 14 85 C0 75 18
+
+. 0 3A9693B0 10
+. 83 7D 80 FF 0F 84 44 01 00 00
+
+. 0 3A9694FE 45
+. 8B 85 70 FF FF FF 8B 4D 8C 8B 95 74 FF FF FF 29 C8 C6 44 10 FF 00 8D 45 94 89 44 24 08 89 54 24 04 C7 04 24 03 00 00 00 E8 E5 AC 00 00
+
+. 0 3A974210 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 0D 17 00 00
+
+. 0 3A97421E 24
+. 81 C3 4E 44 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A
+
+. 0 3A974236 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C3 00 00 00 CD 80
+
+. 0 3A97424B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00
+
+. 0 3A974306 18
+. F7 D8 89 83 48 01 00 00 B8 FF FF FF FF E9 40 FF FF FF
+
+. 0 3A974258 7
+. 83 F8 FF 89 C2 74 31
+
+. 0 3A974290 9
+. 83 BB 48 01 00 00 26 75 C6
+
+. 0 3A97425F 4
+. 85 D2 75 1D
+
+. 0 3A974280 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96952B 4
+. 85 C0 75 13
+
+. 0 3A969542 19
+. BA 01 00 00 00 B8 01 00 00 00 89 54 B7 14 E9 73 FE FF FF
+
+. 0 3A9693C8 25
+. 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF
+
+. 0 3A969318 14
+. 46 83 7D 80 FF 8D 76 00 0F 85 7D 01 00 00
+
+. 0 3A969326 12
+. 3B B3 C8 00 00 00 0F 83 12 01 00 00
+
+. 0 3A974263 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12
+
+. 0 3A96952F 19
+. 8B 45 A4 25 00 F0 00 00 3D 00 40 00 00 0F 84 78 FE FF FF
+
+. 0 3A9693BA 39
+. B8 02 00 00 00 89 44 B7 14 B8 02 00 00 00 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF
+
+. 0 3A969444 6
+. 83 7D 80 FF 75 59
+
+. 0 3A96944A 10
+. 8B 85 68 FF FF FF 85 C0 74 15
+
+. 0 3A969454 11
+. 8B 93 48 01 00 00 83 FA 02 74 0A
+
+. 0 3A969469 31
+. 83 45 84 04 8B 8D 68 FF FF FF 09 8D 78 FF FF FF 8B 55 84 8B 02 85 C0 89 C7 0F 85 38 FE FF FF
+
+. 0 3A9692C0 29
+. 31 C0 89 85 70 FF FF FF 31 C0 F6 83 14 FC FF FF 01 89 85 68 FF FF FF 0F 85 78 02 00 00
+
+. 0 3A969488 14
+. 8B 85 78 FF FF FF 85 C0 0F 84 9C 02 00 00
+
+. 0 3A969496 13
+. B8 FF FF FF FF 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B2B3 11
+. 89 85 CC FD FF FF E9 61 FF FF FF
+
+. 0 3A96B21F 31
+. 83 BD CC FD FF FF FF 8B 95 D8 FD FF FF 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 85 E1 00 00 00
+
+. 0 3A96B31F 39
+. 8B B5 D8 FD FF FF 8D 83 0F F0 FF FF B9 1D 00 00 00 89 04 24 8B 85 D8 FD FF FF 81 C6 CC 01 00 00 89 F2 E8 4A F5 FF FF
+
+. 0 3A96B346 8
+. 84 C0 0F 84 F0 FE FF FF
+
+. 0 3A96B23E 13
+. 83 BD CC FD FF FF FF 0F 84 3C 01 00 00
+
+. 0 3A96B387 14
+. 8B 85 D0 FD FF FF 85 C0 0F 85 69 01 00 00
+
+. 0 3A96B395 11
+. 8B 85 D4 FD FF FF E8 80 03 00 00
+
+. 0 3A96B720 14
+. 55 89 E5 57 56 53 83 EC 64 E8 FD A1 00 00
+
+. 0 3A96B72E 22
+. 81 C3 3E CF 00 00 F6 83 14 FC FF FF 01 89 45 F0 0F 85 5E 04 00 00
+
+. 0 3A96B744 14
+. 8B 8B DC 00 00 00 85 C9 0F 84 20 01 00 00
+
+. 0 3A96B872 22
+. B9 01 00 00 00 8D 93 E4 00 00 00 8D 83 72 F0 FF FF E8 E8 57 00 00
+
+. 0 3A9710AD 23
+. 89 74 24 04 8D 45 94 89 44 24 08 C7 04 24 03 00 00 00 E8 5C 32 00 00
+
+. 0 3A9710C4 4
+. 85 C0 78 09
+
+. 0 3A9710C8 9
+. 8B 45 C0 85 C0 89 07 75 1F
+
+. 0 3A9710F0 42
+. 89 74 24 10 31 C9 BA 02 00 00 00 89 4C 24 14 89 54 24 0C 8B 55 90 89 44 24 04 C7 04 24 00 00 00 00 89 54 24 08 E8 36 3B 00 00
+
+. 0 3A97111A 5
+. 89 45 8C EB B2
+
+. 0 3A9710D1 8
+. 89 34 24 E8 97 33 00 00
+
+. 0 3A96B888 10
+. 89 45 D8 40 0F 84 73 01 00 00
+
+. 0 3A96B892 18
+. 8B 83 E4 00 00 00 83 F8 10 89 45 A0 0F 86 0E 01 00 00
+
+. 0 3A96B8A4 19
+. FC 8B 75 D8 BA 0B 00 00 00 8D BB 83 F0 FF FF 89 D1 F3 A6
+
+. 0 3A96B8B5 2
+. F3 A6
+
+. 0 3A96B8B7 6
+. 0F 85 F5 00 00 00
+
+. 0 3A96B8BD 34
+. 8B 45 D8 89 C2 89 83 DC 00 00 00 8B 40 0C 8D 04 40 8D 04 85 10 00 00 00 01 C2 83 C0 30 39 45 A0 72 26
+
+. 0 3A96B8DF 21
+. 89 93 E0 00 00 00 B9 14 00 00 00 89 D6 8D BB 8F F0 FF FF F3 A6
+
+. 0 3A96B8F2 2
+. F3 A6
+
+. 0 3A96B8F4 17
+. 0F 97 C2 0F 92 C0 8B 4D D8 38 C2 0F 84 4D FE FF FF
+
+. 0 3A96B752 11
+. 31 C0 83 F9 FF 0F 84 FF 03 00 00
+
+. 0 3A96B75D 27
+. C7 45 DC 00 00 00 00 8B 83 E0 00 00 00 83 F8 FF 89 45 D4 89 C7 0F 84 A0 01 00 00
+
+. 0 3A96B778 26
+. 8B 93 E4 00 00 00 01 D1 8B 93 20 FC FF FF 29 C1 89 4D E0 85 D2 89 55 C8 74 31
+
+. 0 3A96B792 29
+. C7 45 C4 00 00 00 00 8D B3 5C FD FF FF 90 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00
+
+. 0 3A96B7AF 8
+. 85 C0 0F 84 29 04 00 00
+
+. 0 3A96B7B7 12
+. FF 45 C4 83 C6 05 83 7D C4 03 7E DD
+
+. 0 3A96B7A0 15
+. 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00
+
+. 0 3A9752B7 12
+. 0F B6 55 FB F7 DA 89 D0 5A 5E 5D C3
+
+. 0 3A96BBE0 11
+. 8B 45 C4 83 C0 30 E9 DD FB FF FF
+
+. 0 3A96B7C8 18
+. 89 45 CC 99 8B 45 CC 89 55 D0 8B 75 D0 21 F0 40 74 1F
+
+. 0 3A96B7DA 21
+. 0F B6 4D CC B8 01 00 00 00 31 D2 0F A5 C2 D3 E0 F6 C1 20 74 04
+
+. 0 3A96B7EF 32
+. 89 C2 31 C0 89 45 CC 89 55 D0 C7 45 EC 00 00 00 00 8B 55 D4 8B 4A 14 49 39 4D EC 89 4D E8 7E 1B
+
+. 0 3A96B82A 40
+. 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00
+
+. 0 3A96B852 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 51 FD FF FF
+
+. 0 3A96B5B0 27
+. 55 89 E5 57 89 C7 56 83 EC 0C 89 55 F4 0F B6 00 84 C0 88 45 EF 0F 84 DF 00 00 00
+
+. 0 3A96B5CB 17
+. 0F B6 0A 0F B6 45 EF 2C 30 3C 09 0F 87 AA 00 00 00
+
+. 0 3A96B686 13
+. 88 C8 BA FF FF FF FF 2C 30 3C 09 76 26
+
+. 0 3A96B693 5
+. 38 4D EF 75 18
+
+. 0 3A96B698 18
+. FF 45 F4 47 8B 45 F4 0F B6 0F 88 4D EF 0F B6 08 EB C8
+
+. 0 3A96B672 6
+. 80 7D EF 00 74 38
+
+. 0 3A96B678 14
+. 0F B6 45 EF 2C 30 3C 09 0F 86 5A FF FF FF
+
+. 0 3A96B6B0 18
+. 0F BE 55 EF 0F BE C1 29 C2 83 C4 0C 89 D0 5E 5F 5D C3
+
+. 0 3A96B85F 8
+. 85 C0 0F 84 25 04 00 00
+
+. 0 3A96B867 4
+. 85 C0 79 A9
+
+. 0 3A96B814 16
+. 4E 89 75 E8 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00
+
+. 0 3A96B824 46
+. 8B BB E0 00 00 00 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00
+
+. 0 3A96B86B 7
+. 46 89 75 EC 90 EB A6
+
+. 0 3A96B818 12
+. 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00
+
+. 0 3A96B5E0 17
+. 88 C8 BA 01 00 00 00 2C 30 3C 09 0F 87 C8 00 00 00
+
+. 0 3A96B5F1 36
+. 0F BE 45 EF 47 FF 45 F4 83 E8 30 89 45 F0 0F BE C1 8D 70 D0 0F B6 0F 88 C8 88 4D EF 2C 30 88 CA 3C 09 77 2A
+
+. 0 3A96B63F 16
+. 8B 45 F4 0F B6 08 88 C8 88 CA 2C 30 3C 09 77 1E
+
+. 0 3A96B66D 5
+. 39 75 F0 75 50
+
+. 0 3A96BC8C 11
+. 89 75 EC 85 F6 0F 8E 93 FD FF FF
+
+. 0 3A96BC97 31
+. 8B 4D C0 89 4D A4 8D 76 00 8B BB E0 00 00 00 8B 55 A4 8B 44 17 1C 3B 45 E0 0F 83 7A FD FF FF
+
+. 0 3A96BCB6 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 ED F8 FF FF
+
+. 0 3A96BCC3 8
+. 85 C0 0F 85 5F FD FF FF
+
+. 0 3A96BA2A 23
+. 8B BB E0 00 00 00 3B 75 EC 8D 04 76 8D 3C C7 8D 4F 30 89 4D BC 7E 21
+
+. 0 3A96BA62 21
+. 8B 4F 30 83 F9 01 0F 94 C0 83 F9 03 0F 94 C2 09 D0 A8 01 74 A9
+
+. 0 3A96BA77 11
+. 8B 45 BC 8B 78 08 3B 7D E0 73 9E
+
+. 0 3A96BA82 7
+. 8B 55 DC 85 D2 74 08
+
+. 0 3A96BA91 10
+. 8B 83 1C FC FF FF 85 C0 74 0C
+
+. 0 3A96BA9B 12
+. 8B 55 BC 39 42 0C 0F 87 79 FF FF FF
+
+. 0 3A96BAA7 18
+. 8B 55 D0 8B 45 CC F7 D2 F7 D0 09 C2 0F 84 20 02 00 00
+
+. 0 3A96BAB9 38
+. C7 45 B4 00 00 00 00 8B 55 BC 8B 42 10 8B 52 14 89 45 A8 89 55 AC 81 E2 00 00 0F 00 89 D0 0B 45 B4 89 55 B8 74 12
+
+. 0 3A96BAF1 65
+. 8B 83 54 FC FF FF 31 D2 81 CA 00 00 0F 00 89 55 9C 89 C2 8B 45 9C F7 D2 89 55 98 F7 D0 8B 55 A8 89 45 9C 8B 45 98 21 C2 89 55 98 8B 45 AC 8B 55 9C 21 D0 89 C2 0B 55 98 89 45 9C 0F 85 EE FE FF FF
+
+. 0 3A96BB32 20
+. 8B 45 D4 01 C7 3B 8B 50 FC FF FF 89 7D DC 0F 85 DA FE FF FF
+
+. 0 3A96BB46 19
+. 8D 76 00 8D BC 27 00 00 00 00 F6 83 14 FC FF FF 01 75 66
+
+. 0 3A96BB59 11
+. 8B 45 DC 83 C4 64 5B 5E 5F 5D C3
+
+. 0 3A96B3A0 14
+. 89 85 C4 FD FF FF 85 C0 0F 84 8A 00 00 00
+
+. 0 3A96B3AE 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 37 01 00 00
+
+. 0 3A96B3BC 13
+. F6 86 E9 01 00 00 08 0F 85 48 01 00 00
+
+. 0 3A96B3C9 10
+. 8B 85 C4 FD FF FF 85 C0 74 65
+
+. 0 3A96B3D3 23
+. 8B 85 C4 FD FF FF 8D 95 E4 FD FF FF 89 95 B8 FD FF FF E8 36 DB FF FF
+
+. 0 3A969041 6
+. 83 7F 10 20 75 DA
+
+. 0 3A969047 6
+. 83 7F 1C 03 76 D4
+
+. 0 3A96904D 24
+. 8B 47 04 8B 4D CC 8D 50 20 3B 11 8D 44 08 04 89 45 B4 0F 87 F6 00 00 00
+
+. 0 3A969065 19
+. FC 8B 75 B4 BA 10 00 00 00 8D BB 38 D6 FF FF 89 D1 F3 A6
+
+. 0 3A969076 2
+. F3 A6
+
+. 0 3A969078 6
+. 0F 84 86 00 00 00
+
+. 0 3A969104 32
+. 8B 75 B4 0F B6 46 14 0F B6 56 18 C1 E0 08 01 D0 0F B6 56 1C C1 E0 08 8B 76 10 01 D0 85 F6 75 0E
+
+. 0 3A969124 10
+. 8B 93 1C FC FF FF 85 D2 74 22
+
+. 0 3A96912E 4
+. 39 C2 73 1E
+
+. 0 3A96B3EA 9
+. 89 85 CC FD FF FF 40 74 52
+
+. 0 3A96B3F3 14
+. 8B 8D C4 FD FF FF 89 0C 24 E8 CF 9E 00 00
+
+. 0 3A96B401 11
+. 8D 70 01 89 34 24 E8 10 A4 FF FF
+
+. 0 3A96B40C 8
+. 89 C2 31 C0 85 D2 74 16
+
+. 0 3A96B414 22
+. 89 74 24 08 8B 85 C4 FD FF FF 89 14 24 89 44 24 04 E8 46 A4 00 00
+
+. 0 3A96B42A 14
+. 89 85 E0 FD FF FF 85 C0 0F 84 48 01 00 00
+
+. 0 3A96B438 13
+. 83 BD CC FD FF FF FF 0F 85 06 FE FF FF
+
+. 0 3A96B24B 13
+. F6 83 14 FC FF FF 01 0F 85 6B 02 00 00
+
+. 0 3A96B258 25
+. 83 BD CC FD FF FF FF 8D 85 E4 FD FF FF 89 85 B8 FD FF FF 0F 85 BF FD FF FF
+
+. 0 3A96CAC0 9
+. 89 C2 8B 40 0C 85 C0 75 F7
+
+. 0 3A9699C5 23
+. 8B 46 08 8B 4D 84 89 81 44 01 00 00 83 C6 20 3B B5 54 FF FF FF 72 AF
+
+. 0 3A969975 22
+. 8D 74 26 00 8D BC 27 00 00 00 00 83 C6 20 3B B5 54 FF FF FF 73 51
+
+. 0 3A97571F 11
+. 89 F9 F7 D9 83 E1 03 29 CA F3 AA
+
+. 0 3A97572A 41
+. 83 EA 20 8B 0F 90 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD
+
+. 0 3A975730 35
+. 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD
+
+. 0 3A975753 8
+. 8D 4A 20 C1 E9 02 F3 AB
+
+. 0 3A969FDC 66
+. 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00
+
+. 0 3A96FEDE 11
+. 89 46 14 83 EC 0C 8B 75 FC C9 C3
+
+. 0 3A970246 52
+. 8B 45 E4 8B 95 18 FF FF FF 8B 8D 24 FF FF FF 89 02 8B 85 20 FF FF FF C7 01 00 00 00 00 C7 00 00 00 00 00 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C2 04 00
+
+. 0 3A96F3A4 14
+. 8B 55 C8 83 EC 04 85 D2 0F 85 44 02 00 00
+
+. 0 3A96F3B2 12
+. 8B 55 E0 F6 82 74 01 00 00 60 75 37
+
+. 0 3A96F3BE 62
+. 8B 4D B0 83 EC 1C 8D 44 24 1F 83 E0 F0 C7 00 00 00 00 00 C7 40 08 00 00 00 00 89 50 04 89 41 08 89 45 B0 0F B6 82 74 01 00 00 FF 45 EC 24 9F 0C 20 88 82 74 01 00 00 8B 7D 9C 85 FF 74 0D
+
+. 0 3A96F409 19
+. 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00
+
+. 0 3A975265 5
+. 40 38 D5 74 D3
+
+. 0 3A9699A2 11
+. 3D 51 E5 74 64 0F 87 5A 02 00 00
+
+. 0 3A9699AD 5
+. 83 F8 07 75 CE
+
+. 0 3A969B31 14
+. 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C
+
+. 0 3A96A171 61
+. 31 C0 89 44 24 14 B8 FF FF FF FF 89 44 24 10 B8 32 00 00 00 89 44 24 0C 8B 47 14 89 44 24 08 8B 95 38 FF FF FF 29 95 3C FF FF FF 8B 8D 3C FF FF FF 89 14 24 89 4C 24 04 E8 A2 AA 00 00
+
+. 0 3A96A1AE 7
+. 40 0F 85 D8 F9 FF FF
+
+. 0 3A96F425 24
+. 81 FA FD FF FF 7F 0F 94 C0 81 FA FF FF FF 7F 0F 94 C2 09 D0 A8 01 74 CC
+
+. 0 3A96F523 11
+. 8B 45 9C 85 C0 0F 84 9F 00 00 00
+
+. 0 3A96F5CD 9
+. 8B 4D B4 8B 01 85 C0 74 13
+
+. 0 3A96F5D6 13
+. 8B 75 B4 8B 76 08 85 F6 89 75 B4 74 22
+
+. 0 3A96F5E3 6
+. 8B 06 85 C0 75 ED
+
+. 0 3A96F5E9 11
+. 8B 45 B4 85 C0 0F 85 A7 FC FF FF
+
+. 0 3A96F2DF 15
+. 8B 75 A0 0F B7 86 4E 01 00 00 66 85 C0 74 1B
+
+. 0 3A96F2EE 37
+. 0F B7 C0 8D 04 85 12 00 00 00 25 FC FF 0F 00 29 C4 8D 44 24 1F 83 E0 F0 89 45 9C 8B 55 A0 8B 42 1C 85 C0 75 18
+
+. 0 3A96FECD 17
+. 89 C1 83 E1 03 89 0C 24 8B 06 31 C9 E8 62 AF FF FF
+
+. 0 3A970BE0 10
+. 83 C4 08 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96AFA7 12
+. 89 F0 8D 65 F4 5B 5E 5F 5D C2 0C 00
+
+. 0 3A96F3F5 7
+. 8B 7D 9C 85 FF 74 0D
+
+. 0 3A96F3FC 32
+. 8B 75 98 8B 45 9C 89 14 B0 46 89 75 98 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00
+
+. 0 3A96F52E 32
+. 8B 45 98 8B 55 9C C7 04 82 00 00 00 00 40 89 45 98 8D 04 C5 04 00 00 00 89 04 24 E8 CE 62 FF FF
+
+. 0 3A96F54E 19
+. 8B 4D A0 85 C0 89 C2 89 81 D4 01 00 00 0F 84 48 05 00 00
+
+. 0 3A96F561 22
+. 8B 4D 98 8D 7A 04 8B 45 A0 8B 75 9C C1 E1 02 83 F9 07 89 02 76 16
+
+. 0 3A96F577 8
+. F7 C7 04 00 00 00 74 0E
+
+. 0 3A96F57F 20
+. 8B 06 8D 7A 08 83 C6 04 83 E9 04 89 42 04 FC C1 E9 02 F3 A5
+
+. 0 3A96F591 2
+. F3 A5
+
+. 0 3A96F593 28
+. 8B 55 A0 8B 45 98 8B B2 D4 01 00 00 C1 E0 02 83 F8 07 89 C2 8D 0C 06 8D 79 04 76 16
+
+. 0 3A96F5AF 8
+. F7 C7 04 00 00 00 74 0E
+
+. 0 3A96F5B7 22
+. 8B 06 8D 79 08 83 C6 04 83 EA 04 89 41 04 FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F5CB 2
+. F3 A5
+
+. 0 3A96AFE3 9
+. 89 FA 89 F0 E8 F4 DD FF FF
+
+. 0 3A968DE0 19
+. 55 89 E5 57 89 D7 56 53 83 EC 14 8B 70 14 E8 38 CB 00 00
+
+. 0 3A968DF3 17
+. 81 C3 79 F8 00 00 C7 45 F0 00 00 00 00 85 F6 74 28
+
+. 0 3A968E04 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 62 C4 00 00
+
+. 0 3A968E1E 4
+. 85 C0 74 55
+
+. 0 3A968E22 10
+. 89 75 F0 8B 76 04 85 F6 75 E4
+
+. 0 3A968E2C 8
+. 89 3C 24 E8 9C C4 00 00
+
+. 0 3A968E34 17
+. 8D 50 01 83 C0 0D 89 55 EC 89 04 24 E8 D7 C9 FF FF
+
+. 0 3A968E45 6
+. 85 C0 89 C6 74 34
+
+. 0 3A968E4B 22
+. 8B 55 EC 8D 40 0C 89 7C 24 04 89 04 24 89 54 24 08 E8 0F CA 00 00
+
+. 0 3A968E61 30
+. 89 06 8B 45 F0 C7 46 04 00 00 00 00 C7 46 08 00 00 00 00 89 70 04 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96AFEC 9
+. 80 8E 75 01 00 00 01 EB B2
+
+. 0 3A970BC0 14
+. 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00
+
+. 0 3A96F5C5 8
+. FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F313 10
+. 8B 82 E8 00 00 00 85 C0 75 0E
+
+. 0 3A96F31D 14
+. 8B 82 E0 00 00 00 85 C0 0F 84 F8 01 00 00
+
+. 0 3A96F605 25
+. 8B 83 48 01 00 00 8B 7D A8 85 C0 0F 94 C0 85 FF 0F 95 C2 21 D0 A8 01 74 09
+
+. 0 3A96F61E 22
+. 8B 45 A8 89 83 48 01 00 00 8B 4D C0 8B 91 D4 01 00 00 85 D2 74 11
+
+. 0 3A96F645 18
+. 8B 45 EC 8D 04 C5 04 00 00 00 89 04 24 E8 C5 61 FF FF
+
+. 0 3A96F657 19
+. 8B 75 C0 85 C0 89 C1 89 86 D4 01 00 00 0F 84 A4 05 00 00
+
+. 0 3A96F66A 39
+. 8B 45 EC 8B 55 C0 8D 4C 81 04 89 8A 50 01 00 00 8B 4D F0 89 82 54 01 00 00 C7 45 EC 00 00 00 00 85 C9 89 4D B4 74 3E
+
+. 0 3A96F691 11
+. 8B 75 08 85 F6 0F 85 28 04 00 00
+
+. 0 3A96F69C 51
+. 8B 55 B4 8B 4A 04 8B 75 C0 8B 45 EC 8B 96 50 01 00 00 89 0C 82 40 89 45 EC 8B 45 B4 8B 48 04 80 A1 74 01 00 00 9F 8B 55 B4 8B 52 08 89 55 B4 85 D2 75 C2
+
+. 0 3A96F6CF 13
+. F6 83 15 FC FF FF 02 0F 85 20 06 00 00
+
+. 0 3A96F6DC 45
+. 8B 55 C0 8B 45 EC 8B 92 50 01 00 00 89 85 78 FF FF FF 89 95 74 FF FF FF C7 45 AC 00 00 00 00 8B 55 C0 8B 8A DC 01 00 00 39 4D AC 73 49
+
+. 0 3A96F752 29
+. 8B 95 78 FF FF FF 8B 45 C0 8B B5 74 FF FF FF C1 E2 02 83 FA 07 8B B8 D4 01 00 00 76 15
+
+. 0 3A96F76F 8
+. F7 C7 04 00 00 00 74 0D
+
+. 0 3A96F784 8
+. FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F78A 2
+. F3 A5
+
+. 0 3A96F78C 25
+. C7 45 AC 01 00 00 00 8B 55 EC 39 55 AC 89 95 78 FF FF FF 0F 83 ED 04 00 00
+
+. 0 3A96F7A5 40
+. 8B 75 C0 8B 8E D4 01 00 00 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11
+
+. 0 3A96F7DE 15
+. 8D 7A 01 3B BD 78 FF FF FF 0F 83 B0 00 00 00
+
+. 0 3A96F7ED 39
+. C1 E2 02 89 95 6C FF FF FF F7 DA 89 95 68 FF FF FF 89 F6 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C
+
+. 0 3A96F814 6
+. 8B 02 85 C0 74 76
+
+. 0 3A96F81A 14
+. 03 B5 68 FF FF FF 89 B5 70 FF FF FF EB 06
+
+. 0 3A96F82E 11
+. 83 C2 04 3B 85 7C FF FF FF 75 EF
+
+. 0 3A96F828 6
+. 8B 02 85 C0 74 62
+
+. 0 3A96F890 13
+. 47 3B BD 78 FF FF FF 0F 82 63 FF FF FF
+
+. 0 3A96F800 20
+. 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C
+
+. 0 3A96F89D 18
+. FF 45 AC 8B 85 78 FF FF FF 39 45 AC 0F 82 FF FE FF FF
+
+. 0 3A96F7AE 31
+. 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11
+
+. 0 3A96F7CD 17
+. 8D 41 04 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2
+
+. 0 3A96F7D0 14
+. 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2
+
+. 0 3A96F8AF 5
+. E9 E7 03 00 00
+
+. 0 3A96FC9B 24
+. 8B B5 78 FF FF FF C7 04 B1 00 00 00 00 8B 75 A4 85 F6 0F 85 A9 01 00 00
+
+. 0 3A96FCB3 10
+. 8D 65 F4 5B 5E 5F 5D C2 08 00
+
+. 0 3A967090 25
+. 8B 8B 94 F9 FF FF 83 EC 08 8B 91 54 01 00 00 89 95 04 FF FF FF 85 D2 74 3B
+
+. 0 3A9670A9 59
+. FF 8D 04 FF FF FF 8B 81 50 01 00 00 8B 8D 04 FF FF FF 8B 04 88 80 88 74 01 00 00 10 8B 8B 94 F9 FF FF 8B B5 04 FF FF FF 8B 81 50 01 00 00 8B 04 B0 FF 80 70 01 00 00 85 F6 75 C5
+
+. 0 3A9670E4 25
+. 8B 83 F0 F9 FF FF 8B 93 F4 F9 FF FF 89 42 0C 8B 83 F0 F9 FF FF 85 C0 74 03
+
+. 0 3A9670FD 16
+. 89 50 10 83 BB 54 FB FF FF 01 0F 86 52 0A 00 00
+
+. 0 3A96710D 37
+. 8B 8B 94 F9 FF FF B8 01 00 00 00 8B 95 D0 FE FF FF 89 85 04 FF FF FF 8B 81 50 01 00 00 83 C2 50 39 50 04 74 11
+
+. 0 3A967132 17
+. FF 85 04 FF FF FF 8B B5 04 FF FF FF 39 14 B0 75 EF
+
+. 0 3A967143 42
+. 8B 81 50 01 00 00 8B B5 04 FF FF FF 8B BD 50 FF FF FF 89 85 A8 FE FF FF 8B 54 B0 FC 85 FF 89 93 F4 F9 FF FF 0F 85 D8 11 00 00
+
+. 0 3A96716D 13
+. 89 F0 40 31 F6 3B 81 54 01 00 00 73 10
+
+. 0 3A96718A 20
+. 89 B3 F0 F9 FF FF 8B 85 C4 FE FF FF 85 C0 0F 84 9C 11 00 00
+
+. 0 3A96833A 11
+. 8B 93 F4 F9 FF FF E9 6E EE FF FF
+
+. 0 3A9671B3 22
+. 8B 8D D0 FE FF FF 83 C1 50 89 4A 0C 8B 83 F0 F9 FF FF 85 C0 74 03
+
+. 0 3A9671CC 50
+. 31 C0 85 FF 0F 94 C0 89 85 24 FF FF FF 31 C0 83 FF 03 0F 94 C0 8D 8D 24 FF FF FF 8D 93 44 02 FF FF 89 85 28 FF FF FF 8D 83 B4 D9 FE FF E8 62 8F 00 00
+
+. 0 3A970160 17
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 E8 BA 57 00 00
+
+. 0 3A970171 24
+. 81 C3 FB 84 00 00 89 7D FC 89 55 EC 89 4D E8 89 45 F0 FF 93 E0 F9 FF FF
+
+. 0 3A970189 37
+. 8B 38 89 C6 8B 83 E8 00 00 00 C7 06 00 00 00 00 89 45 E4 8B 45 F0 89 83 E8 00 00 00 8B 45 E8 89 04 24 FF 55 EC
+
+. 0 3A9688B0 25
+. 55 BA 01 00 00 00 89 E5 83 EC 0C 89 75 FC 8B 75 08 89 5D F8 E8 62 D0 00 00
+
+. 0 3A9688C9 20
+. 81 C3 A3 FD 00 00 8B 4E 04 8B 83 94 F9 FF FF E8 83 8F 00 00
+
+. 0 3A971860 18
+. 55 89 E5 57 31 FF 56 89 C6 53 83 EC 0C E8 B9 40 00 00
+
+. 0 3A971872 16
+. 81 C3 FA 6D 00 00 89 55 F0 85 C0 89 4D EC 75 0E
+
+. 0 3A971890 16
+. F6 86 75 01 00 00 02 C7 45 E8 00 00 00 00 75 E4
+
+. 0 3A9718A0 13
+. 8B 4D EC 89 F0 8B 55 F0 E8 F3 FB FF FF
+
+. 0 3A9714A0 23
+. 55 89 E5 57 56 53 81 EC 84 00 00 00 89 45 B0 8B 40 2C E8 74 44 00 00
+
+. 0 3A9714B7 43
+. 81 C3 B5 71 00 00 89 55 AC 31 D2 85 C0 89 4D A8 C7 45 A4 00 00 00 00 C7 45 94 00 00 00 00 C7 45 90 00 00 00 00 0F 84 4E 01 00 00
+
+. 0 3A9714E2 38
+. 8B 40 04 8B 4D B0 89 45 A0 8B 45 B0 8B 89 A4 00 00 00 8B 80 AC 00 00 00 85 C9 89 4D 9C 89 45 98 0F 84 EA 00 00 00
+
+. 0 3A971508 23
+. 8B 51 04 8B 4D B0 8B 09 01 CA 66 83 3A 01 89 55 8C 0F 85 77 01 00 00
+
+. 0 3A97151F 22
+. 90 8B 55 8C 8B 7D A0 8B B3 94 F9 FF FF 8B 42 04 01 C7 85 F6 74 22
+
+. 0 3A971535 20
+. 8D 74 26 00 8D BC 27 00 00 00 00 89 F2 89 F8 E8 37 F6 FF FF
+
+. 0 3A971549 7
+. 89 75 88 85 C0 75 1F
+
+. 0 3A971550 7
+. 8B 76 0C 85 F6 75 E9
+
+. 0 3A971540 9
+. 89 F2 89 F8 E8 37 F6 FF FF
+
+. 0 3A97156F 11
+. 8B 45 A8 85 C0 0F 85 07 01 00 00
+
+. 0 3A97157A 29
+. 8B 75 8C 8B 46 08 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08
+
+. 0 3A971597 41
+. 8B 83 38 00 00 00 8B 38 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF
+
+. 0 3A971120 30
+. 55 89 E5 57 89 D7 56 31 F6 53 81 EC B4 00 00 00 8B 55 08 89 85 60 FF FF FF E8 ED 47 00 00
+
+. 0 3A97113E 43
+. 81 C3 2E 75 00 00 89 8D 5C FF FF FF 8B 42 2C F6 83 14 FC FF FF 10 8B 40 04 89 B5 54 FF FF FF 89 85 58 FF FF FF 0F 85 C1 01 00 00
+
+. 0 3A971169 17
+. 8B 55 08 8B 82 AC 00 00 00 85 C0 0F 84 3F 01 00 00
+
+. 0 3A97117A 16
+. 8B 55 08 8B 32 8B 50 04 01 D6 66 83 3E 01 75 1C
+
+. 0 3A97118A 9
+. 3B 7E 08 0F 84 ED 00 00 00
+
+. 0 3A971193 11
+. 8B 46 10 85 C0 0F 84 C7 01 00 00
+
+. 0 3A97119E 8
+. 01 C6 66 83 3E 01 74 E4
+
+. 0 3A971280 35
+. 8B 56 0C 89 F0 01 D0 8B 08 8B 95 58 FF FF FF 01 CA 89 54 24 04 8B 85 5C FF FF FF 89 04 24 E8 DD 3F 00 00
+
+. 0 3A9712A3 10
+. 31 D2 85 C0 0F 85 E6 FE FF FF
+
+. 0 3A9712AD 12
+. 8D 65 F4 89 D0 5B 5E 5F 5D C2 0C 00
+
+. 0 3A9715C0 20
+. 09 45 A4 83 EC 0C 0F B7 46 06 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A9715D4 10
+. 89 45 94 8B 46 0C 85 C0 75 A2
+
+. 0 3A9715DE 10
+. 8B 4D 8C 8B 41 0C 85 C0 74 0A
+
+. 0 3A9715E8 10
+. 01 C1 89 4D 8C E9 2E FF FF FF
+
+. 0 3A971520 21
+. 8B 55 8C 8B 7D A0 8B B3 94 F9 FF FF 8B 42 04 01 C7 85 F6 74 22
+
+. 0 3A9715D7 7
+. 8B 46 0C 85 C0 75 A2
+
+. 0 3A9715F2 7
+. 8B 75 98 85 F6 74 29
+
+. 0 3A971622 11
+. 8B 45 94 85 C0 0F 85 1D 01 00 00
+
+. 0 3A97174A 21
+. BF 10 00 00 00 89 7C 24 04 8B 75 94 46 89 34 24 E8 CD 40 FF FF
+
+. 0 3A97175F 28
+. C7 45 90 0C 00 00 00 8B 55 B0 85 C0 89 C7 8D 8B 7C EB FF FF 89 82 7C 01 00 00 74 AF
+
+. 0 3A97177B 28
+. 89 B2 78 01 00 00 8B 82 DC 00 00 00 8B 75 9C 8B 40 04 85 F6 89 82 8C 01 00 00 74 63
+
+. 0 3A971797 23
+. 8B 4D 9C 8B 02 8B 49 04 01 C1 89 4D 84 8B 75 84 8B 4E 08 01 CE EB 04
+
+. 0 3A9717B2 62
+. 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0
+
+. 0 3A9717F0 10
+. 8B 4D 84 8B 41 0C 85 C0 75 A5
+
+. 0 3A97179F 15
+. 01 C1 89 4D 84 8B 75 84 8B 4E 08 01 CE EB 04
+
+. 0 3A9717FA 11
+. 8B 75 98 85 F6 0F 84 28 FE FF FF
+
+. 0 3A97162D 13
+. 8B 55 A4 8D 65 F4 89 D0 5B 5E 5F 5D C3
+
+. 0 3A9718AD 4
+. 85 C0 74 D3
+
+. 0 3A971884 12
+. 8B 55 E8 8B 76 0C 09 D7 85 F6 74 40
+
+. 0 3A97159F 33
+. 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF
+
+. 0 3A971580 23
+. 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08
+
+. 0 3A9717B0 64
+. 01 C6 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0
+
+. 0 3A9715F9 15
+. 8B 4D 98 8B 45 B0 8B 51 04 8B 08 01 CA EB 02
+
+. 0 3A97160A 14
+. 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A97161B 7
+. 8B 42 10 85 C0 75 E6
+
+. 0 3A971608 16
+. 01 C2 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A971805 15
+. 8B 45 98 8B 55 B0 8B 70 04 8B 0A 01 CE EB 02
+
+. 0 3A971816 13
+. 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A
+
+. 0 3A97184D 7
+. 8B 46 10 85 C0 75 C0
+
+. 0 3A971814 15
+. 01 C6 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A
+
+. 0 3A971823 49
+. 0F B7 46 04 8B 55 B0 8B 8A 7C 01 00 00 8B 56 08 25 FF 7F 00 00 C1 E0 04 89 54 01 04 8B 55 A0 03 17 31 FF 89 7C 01 0C 89 14 01 8B 46 10 85 C0 75 C0
+
+. 0 3A971854 5
+. E9 D4 FD FF FF
+
+. 0 3A971618 10
+. 89 45 94 8B 42 10 85 C0 75 E6
+
+. 0 3A9718D0 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 0 3A9688DD 4
+. 85 C0 74 06
+
+. 0 3A9688E7 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3A9701AE 24
+. 89 3E 8B 45 E4 89 83 E8 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9671FE 14
+. 8B BD 50 FF FF FF 85 FF 0F 85 96 0F 00 00
+
+. 0 3A96720C 20
+. 8B 8B 94 F9 FF FF 8B 91 34 01 00 00 85 D2 0F 84 F3 00 00 00
+
+. 0 3A967313 11
+. 8B 83 E4 F9 FF FF E8 12 98 00 00
+
+. 0 3A970B30 5
+. E8 F2 4D 00 00
+
+. 0 3A970B35 21
+. 81 C1 37 7B 00 00 55 8B 91 28 00 00 00 89 E5 83 7A 08 00 75 1B
+
+. 0 3A970B4A 31
+. 89 42 10 8B 81 94 F9 FF FF C7 02 01 00 00 00 89 42 04 8D 81 04 85 FF FF 89 42 08 5D 89 D0 C3
+
+. 0 3A96731E 19
+. 89 85 78 FE FF FF 8B 83 94 F9 FF FF 8B 40 6C 85 C0 74 09
+
+. 0 3A967331 19
+. 8B 95 78 FE FF FF 89 50 04 8B 83 50 FA FF FF 85 C0 74 09
+
+. 0 3A96734D 63
+. 80 BD 03 FF FF FF 00 8B B3 94 F9 FF FF 8D 86 50 01 00 00 89 83 A4 F9 FF FF 8B 96 54 01 00 00 89 F1 89 83 9C F9 FF FF 8B 86 50 01 00 00 89 93 2C FC FF FF 89 83 28 FC FF FF 0F 84 84 05 00 00
+
+. 0 3A967910 31
+. 31 C0 83 BB 7C FD FF FF 00 0F 95 C0 09 83 3C FC FF FF 89 85 74 FE FF FF 8B 46 0C 85 C0 74 5D
+
+. 0 3A96792F 9
+. 89 C6 8B 40 0C 85 C0 75 F7
+
+. 0 3A967938 2
+. EB 52
+
+. 0 3A96798C 10
+. 8B 46 14 8B 40 04 85 C0 74 B8
+
+. 0 3A967996 2
+. EB A8
+
+. 0 3A967940 14
+. C7 40 08 01 00 00 00 8B 40 04 85 C0 75 F2
+
+. 0 3A96794E 13
+. 8B BD D0 FE FF FF 83 C7 50 39 FE 74 26
+
+. 0 3A967981 11
+. 8B 76 10 85 F6 0F 84 96 07 00 00
+
+. 0 3A96795B 38
+. 8B 85 74 FE FF FF 89 44 24 0C 8B 83 3C FC FF FF 89 44 24 08 8B 86 B0 01 00 00 89 34 24 89 44 24 04 E8 2F 65 00 00
+
+. 0 3A96DEB0 26
+. 55 89 E5 8D 45 08 57 56 53 81 EC FC 00 00 00 8B 75 08 89 45 EC E8 61 7A 00 00
+
+. 0 3A96DECA 28
+. 81 C3 A2 A7 00 00 F6 86 74 01 00 00 04 C7 45 98 00 00 00 00 89 F2 0F 85 1A 04 00 00
+
+. 0 3A96DEE6 7
+. 8B 7D 14 85 FF 75 0E
+
+. 0 3A96DEED 27
+. 31 C0 83 7E 78 00 0F 94 C0 F7 D8 21 45 10 F6 83 14 FC FF FF 20 0F 85 04 0A 00 00
+
+. 0 3A96DF08 11
+. 8B 4E 70 85 C9 0F 85 3A 0A 00 00
+
+. 0 3A96DF13 16
+. 8B 46 2C 8B 40 04 89 45 F0 8B 46 74 85 C0 74 4D
+
+. 0 3A96DF23 7
+. 8B 7D 10 85 FF 74 46
+
+. 0 3A96DF2A 13
+. 8B 46 24 8B 50 04 8B 42 04 85 C0 74 13
+
+. 0 3A96DF4A 14
+. 89 72 04 8B 45 14 85 C0 0F 85 DF 0A 00 00
+
+. 0 3A96DF58 11
+. 8D 83 84 78 FF FF 89 42 08 EB 0D
+
+. 0 3A96DF70 35
+. C7 45 D4 00 00 00 00 8B 46 5C C7 45 DC 00 00 00 00 C7 45 D0 00 00 00 00 85 C0 C7 45 CC 00 00 00 00 74 0F
+
+. 0 3A96DF93 22
+. 8B 40 04 89 45 CC 8B 46 60 8B 40 04 89 45 D0 8B 46 68 85 C0 74 0A
+
+. 0 3A96DFA9 10
+. 83 78 04 11 0F 84 73 02 00 00
+
+. 0 3A96E226 13
+. 8B 7D 10 8B 46 74 85 FF 8B 48 04 75 0C
+
+. 0 3A96E23F 23
+. 89 4D D8 8B 46 20 8B 55 10 8B 40 04 89 55 E0 89 45 DC E9 5D FD FF FF
+
+. 0 3A96DFB3 51
+. C7 45 90 00 00 00 00 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F
+
+. 0 3A96E035 30
+. 8B 46 30 31 D2 8B 40 04 89 95 78 FF FF FF 89 85 7C FF FF FF 8B 86 B4 00 00 00 85 C0 74 09
+
+. 0 3A96E053 24
+. 8B 40 04 89 85 78 FF FF FF 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06
+
+. 0 3A96E06B 22
+. 8B 85 78 FF FF FF 8D 8B E4 F9 FF FF 8D 04 C2 39 CE 89 45 88 74 2F
+
+. 0 3A96E081 7
+. 8B 7D 80 85 FF 74 28
+
+. 0 3A96E088 4
+. 39 C2 73 24
+
+. 0 3A96E08C 24
+. 8D 74 26 00 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC
+
+. 0 3A96E090 20
+. 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC
+
+. 0 3A96E0A4 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00
+
+. 0 3A96E0BE 21
+. 8B 40 04 89 85 74 FF FF FF 8B 45 84 39 45 88 0F 83 5D 02 00 00
+
+. 0 3A96E0D3 110
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00
+
+. 0 3A96E141 14
+. 8B 85 6C FF FF FF 85 C0 0F 84 D1 01 00 00
+
+. 0 3A96E14F 21
+. 89 BD 68 FF FF FF 0F B6 47 0C C0 E8 04 84 C0 0F 84 DC 05 00 00
+
+. 0 3A96E164 12
+. 3B BE FC 01 00 00 0F 84 D0 0B 00 00
+
+. 0 3A96E170 25
+. 31 D2 83 BD 6C FF FF FF 07 0F 94 C2 83 BD 6C FF FF FF 05 0F 84 7B 07 00 00
+
+. 0 3A96E189 29
+. 89 96 00 02 00 00 8B BD 68 FF FF FF 89 BE FC 01 00 00 31 FF 85 C9 BE 01 00 00 00 74 0B
+
+. 0 3A96E1A6 7
+. 8B 41 04 85 C0 74 04
+
+. 0 3A96E1AD 49
+. 89 CF 31 F6 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF
+
+. 0 3A96C0E0 19
+. 55 89 E5 57 31 FF 56 53 81 EC A0 00 00 00 E8 38 98 00 00
+
+. 0 3A96C0F3 20
+. 81 C3 79 C5 00 00 89 45 A8 89 55 A4 89 4D A0 E8 E9 FB FF FF
+
+. 0 3A96BCF0 14
+. 55 89 C1 31 D2 0F B6 00 89 E5 84 C0 74 72
+
+. 0 3A96BCFE 11
+. 41 0F B6 D0 0F B6 01 84 C0 74 67
+
+. 0 3A96BD09 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 57
+
+. 0 3A96BD19 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 47
+
+. 0 3A96BD29 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 37
+
+. 0 3A96BD39 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 27
+
+. 0 3A96BD49 37
+. 8D B4 26 00 00 00 00 C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2
+
+. 0 3A96BD50 30
+. C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2
+
+. 0 3A96BD6E 6
+. 89 F6 5D 89 D0 C3
+
+. 0 3A96C107 37
+. 89 45 9C 8B 45 18 8B 75 08 FF 83 D4 F9 FF FF 85 C0 C7 45 BC 00 00 00 00 C7 45 C0 00 00 00 00 0F 85 90 02 00 00
+
+. 0 3A96C12C 11
+. 8B 45 08 8B 10 85 D2 89 F8 74 5E
+
+. 0 3A96C137 69
+. 8D 55 BC 89 55 84 8D 76 00 8B 4D 10 89 4C 24 18 8B 55 18 89 54 24 14 8B 4D 14 89 4C 24 10 8B 55 0C 89 44 24 08 89 54 24 0C 8B 06 89 44 24 04 8B 4D 84 89 0C 24 8B 45 A0 8B 55 9C 8B 08 8B 45 A8 E8 34 FC FF FF
+
+. 0 3A96BDB0 23
+. 55 89 E5 57 56 53 83 EC 3C 89 45 F0 8B 45 0C 89 55 EC E8 64 9B 00 00
+
+. 0 3A96BDC7 22
+. 81 C3 A5 C8 00 00 89 4D E8 8B 10 8B 40 04 89 55 E4 89 45 E0 EB 12
+
+. 0 3A96BDEF 48
+. C7 45 CC 00 00 00 00 8B 45 E4 8B 4D 10 C7 45 C8 00 00 00 00 8B 55 1C 8B 0C 88 8B 45 1C 89 4D DC 85 C0 0F 95 C0 39 D1 0F 94 C2 21 D0 A8 01 75 C1
+
+. 0 3A96BE1F 6
+. F6 45 20 02 74 0B
+
+. 0 3A96BE30 13
+. F6 83 14 FC FF FF 08 0F 85 FB 01 00 00
+
+. 0 3A96BE3D 56
+. 8B 55 DC 8B 42 30 8B 8A 8C 01 00 00 8B 40 04 89 45 D8 8B 42 2C 8B 40 04 89 4D D0 89 D1 31 D2 89 45 D4 8B 45 EC F7 B1 64 01 00 00 8B 81 68 01 00 00 8B 34 90 85 F6 75 1F
+
+. 0 3A96BE94 17
+. 8B 7D D8 89 F0 C1 E0 04 01 C7 8B 47 04 85 C0 74 DB
+
+. 0 3A96BE80 20
+. 8B 55 DC 8B 82 6C 01 00 00 8B 34 B0 85 F6 0F 84 DC 00 00 00
+
+. 0 3A96BF70 9
+. 83 7D CC 01 8B 7D C8 74 02
+
+. 0 3A96BF79 6
+. 31 FF 85 FF 74 18
+
+. 0 3A96BF97 23
+. 8B 55 14 85 F6 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 84 32 FE FF FF
+
+. 0 3A96BFAE 14
+. 8B 55 14 8B 42 0C 85 C0 0F 84 24 FE FF FF
+
+. 0 3A96BDE0 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 83 ED 01 00 00
+
+. 0 3A96BEA5 7
+. 66 83 7F 0E 00 75 06
+
+. 0 3A96BEB2 12
+. 0F B6 47 0C 83 E0 0F 83 F8 02 7F C2
+
+. 0 3A96BEBE 5
+. 3B 7D E8 74 1A
+
+. 0 3A96BEC3 22
+. 8B 0F 8B 45 D4 8B 55 F0 01 C8 89 54 24 04 89 04 24 E8 A7 93 00 00
+
+. 0 3A96BED9 4
+. 85 C0 75 A3
+
+. 0 3A96BE75 5
+. E9 FF 00 00 00
+
+. 0 3A96BEDD 11
+. 8B 55 14 85 D2 0F 84 81 01 00 00
+
+. 0 3A96BEE8 11
+. 8B 45 D0 85 C0 0F 84 8C 00 00 00
+
+. 0 3A96BEF3 47
+. 8B 4D D0 8B 45 DC 0F B7 0C 71 8B 90 7C 01 00 00 81 E1 FF 7F 00 00 C1 E1 04 89 4D C4 8B 4D 14 8B 41 04 8B 4D C4 39 44 0A 04 0F 84 C6 00 00 00
+
+. 0 3A96BFE8 23
+. 8B 4D 14 8B 01 89 44 24 04 8B 4D C4 8B 04 0A 89 04 24 E8 81 92 00 00
+
+. 0 3A96BFFF 8
+. 85 C0 0F 85 1B FF FF FF
+
+. 0 3A96C007 5
+. E9 73 FF FF FF
+
+. 0 3A96BF7F 19
+. 0F B6 47 0C C0 E8 04 0F B6 C0 83 F8 01 0F 84 8C 00 00 00
+
+. 0 3A96C01E 26
+. 8B 55 08 8B 4D DC 89 3A 89 4A 04 BA 01 00 00 00 83 C4 3C 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96C17C 6
+. 85 C0 89 C2 7F 13
+
+. 0 3A96C195 11
+. 8B 75 BC 85 F6 0F 84 3E 02 00 00
+
+. 0 3A96C1A0 16
+. C7 45 90 00 00 00 00 8B 4D A0 8B 11 85 D2 74 0C
+
+. 0 3A96C1B0 12
+. 0F B6 42 0D 83 E0 03 83 F8 03 74 48
+
+. 0 3A96C1BC 29
+. 8D 55 BC 89 55 84 8B 4D 84 8B 71 04 0F B6 86 74 01 00 00 24 03 3C 02 0F 84 2E 04 00 00
+
+. 0 3A96C1D9 17
+. 8B 83 14 FC FF FF A9 04 02 00 00 0F 85 28 02 00 00
+
+. 0 3A96C1EA 26
+. 8B 45 BC 8B 4D A0 8B 55 84 89 01 8B 42 04 8B 00 8D 65 F4 5B 5E 5F 5D C2 14 00
+
+. 0 3A96E1DE 33
+. 89 85 64 FF FF FF 8B 55 A8 83 EC 14 8B 75 08 89 D7 89 96 08 02 00 00 89 86 04 02 00 00 85 FF 74 09
+
+. 0 3A96E1FF 22
+. 8B 57 04 01 95 64 FF FF FF 83 BD 6C FF FF FF 07 0F 87 56 08 00 00
+
+. 0 3A96E215 17
+. 8B 8D 6C FF FF FF 8B 84 8B B0 D7 FF FF 01 D8 FF E0
+
+. 0 3A96EADD 19
+. 8B 95 70 FF FF FF 8B 8D 64 FF FF FF 01 0A E9 30 F8 FF FF
+
+. 0 3A96E320 16
+. 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF
+
+. 0 3A96E0E0 97
+. 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00
+
+. 0 3A96ED40 31
+. 31 C0 83 BD 6C FF FF FF 07 8B 96 00 02 00 00 0F 94 C0 83 BD 6C FF FF FF 05 0F 84 5F 01 00 00
+
+. 0 3A96ED5F 8
+. 39 D0 0F 85 09 F4 FF FF
+
+. 0 3A96ED67 26
+. FF 83 D8 F9 FF FF 8B BE 08 02 00 00 8B 86 04 02 00 00 89 7D A8 E9 C1 F9 FF FF
+
+. 0 3A96E742 11
+. 89 85 64 FF FF FF E9 AE FA FF FF
+
+. 0 3A96E1FB 4
+. 85 FF 74 09
+
+. 0 3A96E308 40
+. 8B 85 64 FF FF FF 8B BD 70 FF FF FF 89 07 8D 76 00 8D BC 27 00 00 00 00 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF
+
+. 0 3A96BF92 5
+. 83 F8 02 74 79
+
+. 0 3A96C010 14
+. 8B 83 48 FC FF FF 85 C0 0F 85 A2 00 00 00
+
+. 0 3A96BFBC 8
+. 8B 55 DC E8 BC 4B 00 00
+
+. 0 3A96BFC4 9
+. 85 C0 BA FF FF FF FF 75 61
+
+. 0 3A96BFCD 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 82 13 FE FF FF
+
+. 0 3A96E1B1 45
+. 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF
+
+. 0 3A96BFDC 12
+. 31 D2 83 C4 3C 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96C182 8
+. 85 C0 0F 88 46 01 00 00
+
+. 0 3A96C18A 11
+. 83 C6 04 31 C0 8B 16 85 D2 75 AB
+
+. 0 3A96C3DE 9
+. 8B 55 A0 8B 02 85 C0 74 0B
+
+. 0 3A96C3E7 11
+. 0F B6 40 0C C0 E8 04 3C 02 74 0B
+
+. 0 3A96C3FD 21
+. 8B 55 A0 C7 02 00 00 00 00 31 C0 8D 65 F4 5B 5E 5F 5D C2 14 00
+
+. 0 3A96E208 13
+. 83 BD 6C FF FF FF 07 0F 87 56 08 00 00
+
+. 0 3A96BD70 4
+. 5D 89 D0 C3
+
+. 0 3A96C069 11
+. 8B 45 D0 85 C0 0F 84 0B FF FF FF
+
+. 0 3A96C074 6
+. F6 45 18 02 74 33
+
+. 0 3A96C0AD 19
+. 8B 45 D0 0F B7 14 70 89 D0 25 FF 7F 00 00 83 F8 02 EB C9
+
+. 0 3A96C089 6
+. 0F 8E F0 FE FF FF
+
+. 0 3A96E330 13
+. FF 45 90 83 7D 90 01 0F 8E 7D FC FF FF
+
+. 0 3A96DFBA 44
+. 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F
+
+. 0 3A96DFE6 7
+. 8B 55 84 39 D1 72 28
+
+. 0 3A96E015 19
+. 8B 7D 88 8B 4D 80 8B 07 0F B6 57 04 01 C1 83 FA 07 74 CA
+
+. 0 3A96DFF2 14
+. 8B 96 F4 01 00 00 85 D2 0F 85 CE 08 00 00
+
+. 0 3A96E000 21
+. 8B 45 80 01 01 83 45 88 08 8B 55 84 39 55 88 0F 83 1B 03 00 00
+
+. 0 3A96E33D 35
+. C7 45 B4 00 00 00 00 8B 46 34 C7 45 BC 00 00 00 00 C7 45 B0 00 00 00 00 85 C0 C7 45 AC 00 00 00 00 74 0F
+
+. 0 3A96E36F 7
+. 8B 46 68 85 C0 74 0A
+
+. 0 3A96E376 10
+. 83 78 04 07 0F 84 71 02 00 00
+
+. 0 3A96E380 91
+. 31 C0 8D BB E4 F9 FF FF 89 85 54 FF FF FF 89 BD 14 FF FF FF 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03
+
+. 0 3A96E3DE 22
+. 8B 85 4C FF FF FF 89 D1 BA AB AA AA AA F7 E2 C1 EA 03 39 FA 76 02
+
+. 0 3A96E3F6 20
+. 3B B5 14 FF FF FF 8D 04 52 8D 04 81 89 85 48 FF FF FF 74 48
+
+. 0 3A96E40A 10
+. 8B 85 40 FF FF FF 85 C0 75 0A
+
+. 0 3A96E41E 8
+. 3B 8D 48 FF FF FF 73 2C
+
+. 0 3A96E452 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 F0 02 00 00
+
+. 0 3A96E460 27
+. 8B 95 44 FF FF FF 8B 40 04 39 95 48 FF FF FF 89 85 38 FF FF FF 0F 83 35 FE FF FF
+
+. 0 3A96E2B0 19
+. FF 85 54 FF FF FF 83 BD 54 FF FF FF 01 0F 8E D1 00 00 00
+
+. 0 3A96E394 71
+. 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03
+
+. 0 3A96E2C3 11
+. 8B 45 14 85 C0 0F 85 E9 0A 00 00
+
+. 0 3A96E2CE 18
+. 80 8E 74 01 00 00 04 8B 4D 98 85 C9 0F 85 A1 0A 00 00
+
+. 0 3A96E2E0 13
+. 8B 45 08 8B 90 10 02 00 00 85 D2 74 13
+
+. 0 3A96E300 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96E05C 15
+. 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06
+
+. 0 3A96E0B0 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00
+
+. 0 3A96E414 10
+. 8B 86 14 01 00 00 85 C0 75 34
+
+. 0 3A968122 5
+. E8 09 A6 00 00
+
+. 0 3A972730 5
+. 55 89 E5 5D C3
+
+. 0 3A968127 10
+. 8B 83 D0 F9 FF FF 85 C0 75 62
+
+. 0 3A968131 13
+. 83 BB 54 FB FF FF 01 0F 86 86 F2 FF FF
+
+. 0 3A96813E 36
+. 31 C0 89 44 24 0C 31 C0 89 44 24 08 8B 83 94 F9 FF FF 8B 80 B0 01 00 00 89 3C 24 89 44 24 04 E8 4E 5D 00 00
+
+. 0 3A96E233 12
+. 8B 55 D0 8B 45 CC 01 D0 39 C8 74 17
+
+. 0 3A96E256 16
+. 8B 46 20 8B 48 04 01 CA 89 55 D0 E9 4D FD FF FF
+
+. 0 3A96DFED 5
+. E9 3E 03 00 00
+
+. 0 3A968162 5
+. E9 5D F2 FF FF
+
+. 0 3A9673C4 18
+. 8B 95 78 FE FF FF C7 42 0C 01 00 00 00 E8 9A 97 00 00
+
+. 0 3A970B70 5
+. 55 89 E5 5D C3
+
+. 0 3A9673D6 5
+. E8 F5 42 00 00
+
+. 0 3A96B6D0 12
+. 55 89 E5 53 83 EC 08 E8 4F A2 00 00
+
+. 0 3A96B6DC 29
+. 81 C3 90 CF 00 00 8B 8B DC 00 00 00 85 C9 0F 95 C0 83 F9 FF 0F 95 C2 21 D0 A8 01 75 07
+
+. 0 3A96B700 18
+. 89 0C 24 8B 83 E4 00 00 00 89 44 24 04 E8 7E 95 00 00
+
+. 0 3A974C90 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80
+
+. 0 3A974CA1 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974CAA 1
+. C3
+
+. 0 3A96B712 14
+. 31 C0 89 83 DC 00 00 00 83 C4 08 5B 5D C3
+
+. 0 3A9673DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A9730A0 17
+. 8B 85 20 FE FF FF 81 C4 04 02 00 00 5B 5E 5F 5D C3
+
+. 0 3A965ECA 11
+. 80 BB 14 FC FF FF 00 89 C7 78 5B
+
+. 0 3A965ED5 10
+. 83 C4 54 89 F8 5B 5E 5F 5D C3
+
+. 0 3A965887 7
+. 89 C7 E8 E2 FF FF FF
+
+. 0 3A965870 4
+. 8B 1C 24 C3
+
+. 0 3A96588E 39
+. 81 C3 DE 2D 01 00 8B 83 08 02 00 00 5A 8D 24 84 29 C2 52 8B 83 94 F9 FF FF 8D 74 94 08 8D 4C 24 04 56 E8 CB AD 00 00
+
+. 0 3A970680 22
+. 55 89 E5 57 89 C7 56 53 83 EC 2C 8B 80 98 00 00 00 E8 95 52 00 00
+
+. 0 3A970696 35
+. 81 C3 D6 7F 00 00 89 55 F0 8B B7 9C 00 00 00 89 45 E8 8B 83 CC F9 FF FF 89 4D EC 85 C0 0F 85 91 00 00 00
+
+. 0 3A97074A 14
+. 8B 55 08 89 14 24 8B 55 F0 E8 08 FE FF FF
+
+. 0 3A970560 29
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 89 C6 89 7D FC 0F B6 80 74 01 00 00 E8 AE 53 00 00
+
+. 0 3A97057D 16
+. 81 C3 EF 80 00 00 89 55 F0 89 4D EC A8 08 75 33
+
+. 0 3A97058D 25
+. 88 C2 8B 46 04 80 CA 08 88 96 74 01 00 00 0F B6 08 84 C9 0F 84 98 00 00 00
+
+. 0 3A9705A6 7
+. 8B 56 48 85 D2 75 23
+
+. 0 3A9705D0 9
+. F6 83 14 FC FF FF 02 75 7C
+
+. 0 3A9705D9 4
+. 85 D2 75 42
+
+. 0 3A97061F 29
+. 8B 42 04 8B 55 08 8B 3E 89 54 24 08 8B 55 EC 01 F8 89 54 24 04 8B 55 F0 89 14 24 FF D0
+
+. 0 3A97C92C 11
+. 55 89 E5 83 EC 08 E8 89 00 00 00
+
+. 0 3A97C9C0 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 17 14 00 00 52 8B 83 2C 00 00 00 85 C0 74 02
+
+. 0 3A97C9DD 4
+. 58 5B C9 C3
+
+. 0 3A97C937 5
+. E8 24 01 00 00
+
+. 0 3A97CA60 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 77 13 00 00 50 8B 83 FC FF FF FF 85 C0 74 0A
+
+. 0 3A97CA85 5
+. 8B 5D FC C9 C3
+
+. 0 3A97C93C 5
+. E8 5F 03 00 00
+
+. 0 3A97CCA0 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 36 11 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0C
+
+. 0 3A97CCCE 4
+. 5B 5E 5D C3
+
+. 0 3A97C941 2
+. C9 C3
+
+. 0 3A97063C 2
+. EB 9F
+
+. 0 3A9705DD 7
+. 8B 56 7C 85 D2 74 DC
+
+. 0 3A9705C0 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A970758 19
+. 31 C0 89 83 CC F9 FF FF 8B 45 E8 85 C0 0F 84 59 FF FF FF
+
+. 0 3A9706C4 7
+. 31 C0 E8 65 04 00 00
+
+. 0 3A970B65 4
+. 5D 89 D0 C3
+
+. 0 3A9706CB 15
+. 89 45 E4 C7 40 0C 01 00 00 00 E8 96 04 00 00
+
+. 0 3A9706DA 13
+. 8B B7 54 01 00 00 89 F0 4E 85 C0 75 21
+
+. 0 3A970708 26
+. 8B 87 D4 01 00 00 8B 55 08 8B 04 B0 89 14 24 8B 4D EC 8B 55 F0 E8 3E FE FF FF
+
+. 0 3A9705AD 7
+. 8B 7E 7C 85 FF 75 1C
+
+. 0 3A9705B4 25
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A970722 7
+. 89 F0 4E 85 C0 75 DF
+
+. 0 3A9D67C0 16
+. 55 31 C0 89 E5 83 EC 18 89 5D F4 E8 C1 FF FF FF
+
+. 0 3A9D6791 4
+. 8B 1C 24 C3
+
+. 0 3A9D67D0 28
+. 81 C3 40 C8 0F 00 89 75 F8 8B 93 A4 00 00 00 8B 75 10 89 7D FC 85 D2 8B 7D 0C 74 14
+
+. 0 3A9D67EC 6
+. 8B 12 85 D2 75 0E
+
+. 0 3A9D6800 10
+. 89 83 10 90 FF FF 85 C0 75 26
+
+. 0 3A9D680A 21
+. 8B 83 80 01 00 00 0F B7 10 8B 83 68 01 00 00 66 3B 50 38 74 11
+
+. 0 3A9D6830 42
+. 89 BB D8 28 00 00 8B 45 08 89 83 D4 28 00 00 8B 83 3C 01 00 00 89 30 89 74 24 08 89 7C 24 04 8B 45 08 89 04 24 E8 A6 45 0A 00
+
+. 0 3AA7AE00 20
+. 55 89 E5 83 EC 14 89 7D FC 8B 7D 0C 89 5D F4 E8 7D B9 F5 FF
+
+. 0 3AA7AE14 13
+. 81 C3 FC 81 05 00 89 75 F8 85 FF 74 30
+
+. 0 3AA7AE21 6
+. 8B 37 85 F6 74 2A
+
+. 0 3AA7AE27 17
+. 89 34 24 B8 2F 00 00 00 89 44 24 04 E8 A8 B7 F5 FF
+
+. 0 3A9D65E0 6
+. FF A3 40 00 00 00
+
+. 0 3A9D65E6 10
+. 68 68 00 00 00 E9 10 FF FF FF
+
+. 0 3A9D6500 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A96FEF0 16
+. 50 51 52 8B 54 24 10 8B 44 24 0C E8 60 01 00 00
+
+. 0 3A970060 22
+. 55 89 E5 57 56 89 C6 53 83 EC 24 8B 48 30 8B 40 2C E8 B5 58 00 00
+
+. 0 3A970076 59
+. 81 C3 F6 85 00 00 8B 40 04 89 45 EC 8B 46 74 8B 78 04 01 FA 8B 7A 04 8B 12 C1 EF 08 89 F8 C1 E0 04 03 41 04 8B 0E 01 CA 89 55 E8 89 45 E4 F6 40 0D 03 89 45 F0 0F 85 97 00 00 00
+
+. 0 3A9700B1 12
+. 8B 86 DC 00 00 00 31 D2 85 C0 74 25
+
+. 0 3A9700BD 95
+. 8B 40 04 0F B7 14 78 8B 86 7C 01 00 00 81 E2 FF 7F 00 00 C1 E2 04 01 C2 31 C0 83 7A 04 00 0F 95 C0 F7 D8 21 C2 8B 45 E4 BF 01 00 00 00 8D 4D F0 8B 00 01 45 EC 31 C0 89 44 24 10 B8 01 00 00 00 89 54 24 04 89 F2 89 44 24 0C 89 7C 24 08 8B 86 B0 01 00 00 89 04 24 8B 45 EC E8 C4 BF FF FF
+
+. 0 3A97011C 14
+. 8B 55 F0 83 EC 14 89 C1 31 C0 85 D2 74 07
+
+. 0 3A97012A 17
+. 89 C8 8B 4A 04 01 C8 8B B3 44 FC FF FF 85 F6 75 05
+
+. 0 3A97013B 13
+. 8B 55 E8 89 02 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96FF00 8
+. 5A 59 87 04 24 C2 08 00
+
+. 0 3AA28860 34
+. 57 56 31 C0 8B 74 24 0C 8B 4C 24 10 88 CD 89 CA C1 E1 10 66 89 D1 F7 C6 03 00 00 00 0F 84 81 00 00 00
+
+. 0 3AA28903 15
+. 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA28912 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 CB 00 00 00
+
+. 0 3AA28921 11
+. 31 CA BF FF FE FE FE 01 D7 73 9E
+
+. 0 3AA2892C 11
+. 31 D7 81 CF FF FE FE FE 47 75 A9
+
+. 0 3AA288E0 17
+. 83 EE 04 83 EE 04 83 EE 04 F7 C2 00 00 FF 00 75 05
+
+. 0 3AA288F6 7
+. 8D 46 0C 84 F6 75 03
+
+. 0 3AA288FD 21
+. 8D 46 0D 83 C6 10 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA288F1 5
+. 8D 46 0E EB 0A
+
+. 0 3AA28900 18
+. 83 C6 10 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA28937 16
+. 8B 56 04 BF FF FE FE FE 01 D7 0F 83 A2 00 00 00
+
+. 0 3AA28947 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 93 00 00 00
+
+. 0 3AA28956 15
+. 31 CA BF FF FE FE FE 01 D7 0F 83 68 FF FF FF
+
+. 0 3AA28965 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 6F FF FF FF
+
+. 0 3AA288E3 14
+. 83 EE 04 83 EE 04 F7 C2 00 00 FF 00 75 05
+
+. 0 3AA289E9 7
+. 83 C6 04 38 CA 75 02
+
+. 0 3AA289F2 4
+. 84 D2 74 20
+
+. 0 3AA289F6 4
+. 38 CE 75 03
+
+. 0 3AA289FD 4
+. 84 F6 74 15
+
+. 0 3AA28A16 3
+. 5E 5F C3
+
+. 0 3AA7AE38 4
+. 85 C0 74 22
+
+. 0 3AA7AE3C 34
+. 8D 50 01 8B 83 98 01 00 00 89 10 8B 17 8B 83 70 02 00 00 89 10 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9D685A 5
+. E8 C1 FE FF FF
+
+. 0 3A9D6720 10
+. 55 89 E5 56 53 E8 67 00 00 00
+
+. 0 3A9D672A 18
+. 81 C3 E6 C8 0F 00 8D B3 94 FF FF FF 8B 06 85 C0 75 04
+
+. 0 3A9D6740 5
+. 83 C6 04 FF D0
+
+. 0 3AA217B0 14
+. 55 89 E5 83 EC 08 89 1C 24 E8 D3 4F FB FF
+
+. 0 3AA217BE 20
+. 81 C3 52 18 0B 00 89 74 24 04 8B 83 38 02 00 00 85 C0 75 4E
+
+. 0 3AA21820 11
+. 8B 1C 24 8B 74 24 04 89 EC 5D C3
+
+. 0 3A9D6745 6
+. 8B 06 85 C0 75 F5
+
+. 0 3A9D674B 6
+. 5B 5E 5D 89 F6 C3
+
+. 0 3A9D685F 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9A148C 11
+. 55 89 E5 83 EC 08 E8 D9 00 00 00
+
+. 0 3A9A1570 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 CB E2 01 00 52 8B 83 50 00 00 00 85 C0 74 02
+
+. 0 3A9A158D 4
+. 58 5B C9 C3
+
+. 0 3A9A1497 5
+. E8 74 01 00 00
+
+. 0 3A9A1610 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 2B E2 01 00 50 8B 83 FC FF FF FF 85 C0 74 0A
+
+. 0 3A9A1635 5
+. 8B 5D FC C9 C3
+
+. 0 3A9A149C 5
+. E8 7F 8B 01 00
+
+. 0 3A9BA020 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 1A 58 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0C
+
+. 0 3A9BA04E 4
+. 5B 5E 5D C3
+
+. 0 3A9A14A1 2
+. C9 C3
+
+. 0 3A97063E 9
+. F6 C2 03 0F 84 79 FF FF FF
+
+. 0 3A970729 15
+. 8B 45 E4 C7 40 0C 00 00 00 00 E8 38 04 00 00
+
+. 0 3A970738 18
+. 31 C0 89 83 34 00 00 00 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A9658B5 8
+. 8D 93 94 81 FF FF FF E7
+
+. 0 8048430 33
+. 31 ED 5E 89 E1 83 E4 F0 50 54 52 68 80 0B 05 08 68 F0 0B 05 08 51 56 68 4C 09 05 08 E8 AF FF FF FF
+
+. 0 8048400 6
+. FF 25 B0 39 05 08
+
+. 0 8048406 10
+. 68 20 00 00 00 E9 A0 FF FF FF
+
+. 0 80483B0 12
+. FF 35 98 39 05 08 FF 25 9C 39 05 08
+
+. 0 3A9D6870 22
+. 55 31 C9 89 E5 57 56 53 83 EC 0C 8B 45 0C 8B 7D 10 E8 0B FF FF FF
+
+. 0 3A9D6886 23
+. 81 C3 8A C7 0F 00 8B 75 1C 8D 54 87 04 8B 83 A4 00 00 00 85 C0 74 13
+
+. 0 3A9D689D 6
+. 8B 00 85 C0 75 0D
+
+. 0 3A9D68A3 33
+. B9 01 00 00 00 90 8D B4 26 00 00 00 00 8B 83 B4 00 00 00 85 F6 89 08 8B 83 3C 01 00 00 89 10 74 14
+
+. 0 3A9D68C4 20
+. 89 34 24 31 C0 89 44 24 08 31 C0 89 44 24 04 E8 88 55 01 00
+
+. 0 3A9EBE60 9
+. 55 89 E5 53 E8 28 A9 FE FF
+
+. 0 3A9EBE69 11
+. 81 C3 A7 71 0E 00 E8 FC FE FF FF
+
+. 0 3A9EBD70 16
+. 55 89 E5 57 31 FF 56 53 83 EC 04 E8 11 AA FE FF
+
+. 0 3A9EBD80 20
+. 81 C3 90 72 0E 00 8B 93 3C 2B 00 00 85 D2 0F 85 A0 00 00 00
+
+. 0 3A9EBD94 10
+. 8B B3 BC 9E FF FF 85 F6 74 2B
+
+. 0 3A9EBD9E 11
+. 89 F6 8B 56 04 31 FF 39 D7 73 15
+
+. 0 3A9EBDBE 5
+. 83 FA 1F 76 3A
+
+. 0 3A9EBDFD 12
+. 8D 42 01 89 D7 89 46 04 85 F6 74 C0
+
+. 0 3A9EBE09 24
+. 89 F8 B9 01 00 00 00 C1 E0 04 89 4C 06 08 8B 93 44 2B 00 00 85 D2 75 23
+
+. 0 3A9EBE21 6
+. 31 C0 85 F6 74 07
+
+. 0 3A9EBE27 13
+. C1 E7 04 8D 44 37 08 5A 5B 5E 5F 5D C3
+
+. 0 3A9EBE74 11
+. 89 C2 85 D2 B8 FF FF FF FF 74 1A
+
+. 0 3A9EBE7F 29
+. C7 02 04 00 00 00 8B 45 08 89 42 04 8B 45 0C 89 42 08 8B 45 10 89 42 0C 31 C0 5B 5D C3
+
+. 0 3A9D68D8 7
+. 8B 75 18 85 F6 74 17
+
+. 0 3A9D68DF 23
+. 31 D2 31 C9 89 4C 24 08 89 54 24 04 8B 55 18 89 14 24 E8 6A 55 01 00
+
+. 0 3A9EBDA9 13
+. 8D 46 08 8D 74 26 00 8B 08 85 C9 74 3E
+
+. 0 3A9EBDB6 8
+. 47 83 C0 10 39 D7 72 F2
+
+. 0 3A9D68F6 11
+. 8B B3 68 01 00 00 F6 06 02 75 32
+
+. 0 3A9D6901 7
+. 8B 45 14 85 C0 74 03
+
+. 0 3A9D6908 3
+. FF 55 14
+
+. 0 8050BF0 19
+. 55 89 E5 83 EC 18 89 5D F4 89 75 F8 31 F6 E8 45 00 00 00
+
+. 0 8050C48 4
+. 8B 1C 24 C3
+
+. 0 8050C03 14
+. 81 C3 91 2D 00 00 89 7D FC E8 87 77 FF FF
+
+. 0 8048398 11
+. 55 89 E5 83 EC 08 E8 B1 00 00 00
+
+. 0 8048454 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 37 B5 00 00 52 8B 83 28 00 00 00 85 C0 74 02
+
+. 0 8048471 4
+. 58 5B C9 C3
+
+. 0 80483A3 5
+. E8 18 01 00 00
+
+. 0 80484C0 15
+. 55 89 E5 51 51 8B 15 90 39 05 08 85 D2 74 19
+
+. 0 80484E8 2
+. C9 C3
+
+. 0 80483A8 5
+. E8 A3 88 00 00
+
+. 0 8050C50 20
+. 55 89 E5 53 52 BB 80 39 05 08 A1 80 39 05 08 83 F8 FF 74 0C
+
+. 0 8050C70 4
+. 58 5B 5D C3
+
+. 0 80483AD 2
+. C9 C3
+
+. 0 8050C11 21
+. 8D 93 0C FF FF FF 8D 83 0C FF FF FF 29 C2 C1 FA 02 39 D6 73 15
+
+. 0 8050C3B 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9D690B 7
+. F6 06 02 89 F6 75 38
+
+. 0 3A9D6912 25
+. 8B 93 3C 01 00 00 8B 02 89 7C 24 04 89 44 24 08 8B 45 0C 89 04 24 FF 55 08
+
+. 0 805094C 31
+. 55 89 E5 83 EC 08 83 E4 F0 B8 00 00 00 00 29 C4 83 EC 08 68 EC 84 04 08 6A 04 E8 65 7A FF FF
+
+. 0 80483D0 6
+. FF 25 A4 39 05 08
+
+. 0 80483D6 10
+. 68 08 00 00 00 E9 D0 FF FF FF
+
+. 0 3A9E94D0 23
+. 55 89 E5 57 56 53 81 EC 2C 01 00 00 8B 4D 0C 8B 7D 08 E8 AA D2 FE FF
+
+. 0 3A9E94E7 23
+. 81 C3 29 9B 0E 00 83 F9 FF 0F 94 C0 85 FF 0F 9E C2 09 D0 A8 01 75 05
+
+. 0 3A9E94FE 5
+. 83 FF 40 7E 1D
+
+. 0 3A9E9520 19
+. 89 8D 64 FF FF FF 8D B5 68 FF FF FF B8 1F 00 00 00 EB 0D
+
+. 0 3A9E9540 10
+. C7 04 86 00 00 00 00 48 79 F6
+
+. 0 3A9E954A 75
+. 8D 57 FF B8 01 00 00 00 89 D1 83 E1 1F C1 EA 05 D3 E0 09 04 96 85 84 93 90 29 00 00 0F 95 C0 0F B6 C0 83 F8 01 19 C0 25 00 00 00 10 89 45 E8 8D 85 D4 FE FF FF 89 44 24 08 8D 85 64 FF FF FF 89 44 24 04 89 3C 24 E8 1B 02 00 00
+
+. 0 3A9E97B0 14
+. 55 89 E5 83 EC 14 89 5D F8 E8 D3 CF FE FF
+
+. 0 3A9E97BE 28
+. 81 C3 52 98 0E 00 8B 4D 0C 8B 55 10 89 75 FC 8B 83 6C 2B 00 00 8B 75 08 85 C0 75 1A
+
+. 0 3A9E97DA 16
+. 89 54 24 08 89 4C 24 04 89 34 24 E8 86 FE FF FF
+
+. 0 3A9E9670 22
+. 55 31 C9 89 E5 57 56 53 81 EC 28 01 00 00 8B 55 0C E8 0B D1 FE FF
+
+. 0 3A9E9686 10
+. 81 C3 8A 99 0E 00 85 D2 74 66
+
+. 0 3A9E9690 49
+. FC 8B 02 B9 20 00 00 00 8D BD 70 FF FF FF 8D 72 04 89 85 64 FF FF FF 8B 82 84 00 00 00 89 85 D0 FE FF FF 89 85 68 FF FF FF 8B 83 68 01 00 00 F3 A5
+
+. 0 3A9E96BF 2
+. F3 A5
+
+. 0 3A9E96C1 10
+. 8B B8 80 01 00 00 85 FF 75 25
+
+. 0 3A9E96CB 25
+. 81 8D 68 FF FF FF 00 00 00 04 8D 83 50 66 F1 FF F6 85 D0 FE FF FF 04 75 06
+
+. 0 3A9E96E4 27
+. 8D 83 58 66 F1 FF 89 85 6C FF FF FF 8D 8D 64 FF FF FF 8B 75 10 31 D2 85 F6 74 06
+
+. 0 3A9E96FF 23
+. 8D 95 D4 FE FF FF BE 08 00 00 00 8B 7D 08 87 DF B8 AE 00 00 00 CD 80
+
+. 0 3A9E9716 15
+. 87 FB 89 85 CC FE FF FF 3D 00 F0 FF FF 77 63
+
+. 0 3A9E9725 24
+. 31 D2 8B 85 CC FE FF FF 83 7D 10 00 F7 D0 0F 95 C2 C1 E8 1F 85 D0 74 3A
+
+. 0 3A9E973D 31
+. FC 8B 85 D4 FE FF FF B9 20 00 00 00 8B 7D 10 8D B5 E0 FE FF FF 89 07 8B 7D 10 83 C7 04 F3 A5
+
+. 0 3A9E975A 2
+. F3 A5
+
+. 0 3A9E975C 44
+. 8B 85 D8 FE FF FF 8B 55 10 89 82 84 00 00 00 8B 85 DC FE FF FF 89 82 88 00 00 00 8B 85 CC FE FF FF 81 C4 28 01 00 00 5B 5E 5F 5D C3
+
+. 0 3A9E97EA 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3A9E9595 13
+. 85 C0 BA FF FF FF FF 0F 88 71 FF FF FF
+
+. 0 3A9E95A2 19
+. 8B 95 D4 FE FF FF 81 C4 2C 01 00 00 89 D0 5B 5E 5F 5D C3
+
+. 0 805096B 8
+. 83 C4 10 E8 BE 7C FF FF
+
+. 0 8048631 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 D2 04 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 57 04 00 00 C7 45 DC AE 08 00 00 83 EC 08 6A 01 68 E0 39 05 08 E8 7A FD FF FF
+
+. 0 80483F0 6
+. FF 25 AC 39 05 08
+
+. 0 80483F6 10
+. 68 18 00 00 00 E9 B0 FF FF FF
+
+. 0 3A9E92F0 35
+. 8B 44 24 04 89 58 00 89 70 04 89 78 08 8D 4C 24 04 89 48 10 8B 4C 24 00 89 48 14 89 68 0C E8 7B 5C 0C 00
+
+. 0 3AAAEF8E 4
+. 8B 0C 24 C3
+
+. 0 3A9E9313 14
+. 81 C1 FD 9C 0E 00 8D 89 20 63 F1 FF FF E1
+
+. 0 3A9E9330 22
+. 55 89 E5 83 EC 18 89 5D F4 8B 55 0C 89 75 F8 31 F6 E8 4B D4 FE FF
+
+. 0 3A9E9346 16
+. 81 C3 CA 9C 0E 00 89 7D FC 85 D2 8B 7D 08 75 12
+
+. 0 3A9E9368 25
+. C7 04 24 00 00 00 00 8D 47 1C 89 44 24 08 31 C0 89 44 24 04 E8 8F 04 00 00
+
+. 0 3A9E9810 36
+. 55 89 E5 83 EC 08 89 34 24 BE 08 00 00 00 89 7C 24 04 8B 7D 08 8B 4D 0C 8B 55 10 87 DF B8 AF 00 00 00 CD 80
+
+. 0 3A9E9834 11
+. 87 FB 3D 00 F0 FF FF 89 C6 77 0D
+
+. 0 3A9E983F 13
+. 89 F0 8B 7C 24 04 8B 34 24 89 EC 5D C3
+
+. 0 3A9E9381 4
+. 85 C0 75 D1
+
+. 0 3A9E9385 23
+. BE 01 00 00 00 89 77 18 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 8048676 7
+. 83 C4 10 85 C0 75 7B
+
+. 0 804867D 36
+. 9B DD B5 58 FF FF FF 8B 45 E4 0F 6F 45 D8 0F 6E C0 0F 7F 45 D0 DD A5 58 FF FF FF 81 7D D0 D2 04 00 00 75 18
+
+. 0 80486A1 6
+. 83 7D D4 00 75 12
+
+. 0 80486A7 13
+. 83 EC 0C 68 A8 0C 05 08 E8 5C FD FF FF
+
+. 0 8048410 6
+. FF 25 B4 39 05 08
+
+. 0 8048416 10
+. 68 28 00 00 00 E9 90 FF FF FF
+
+. 0 3AA044D0 22
+. 55 89 E5 8D 45 0C 53 83 EC 0C 89 44 24 08 8B 45 08 E8 AB 22 FD FF
+
+. 0 3AA044E6 26
+. 81 C3 2A EB 0C 00 89 44 24 04 8B 83 B0 00 00 00 8B 00 89 04 24 E8 60 73 FF FF
+
+. 0 3A9FB860 22
+. 55 31 C9 89 E5 57 56 53 81 EC F0 05 00 00 8B 7D 0C E8 1B AF FD FF
+
+. 0 3A9FB876 17
+. 81 C3 9A 77 0D 00 89 8D 58 FB FF FF E8 A9 B2 FD FF
+
+. 0 3A9D6B30 12
+. 55 89 E5 53 83 EC 04 E8 55 FC FF FF
+
+. 0 3A9D6B3C 16
+. 81 C3 D4 C4 0F 00 8B 8B 14 1A 00 00 85 C9 75 0A
+
+. 0 3A9D6B4C 10
+. 8B 83 A4 01 00 00 5A 5B 5D C3
+
+. 0 3A9FB887 17
+. 8B 00 89 85 38 FB FF FF 8B 45 08 80 78 46 00 75 2B
+
+. 0 3A9FB898 14
+. 8B 93 38 02 00 00 85 D2 0F 84 F7 2A 00 00
+
+. 0 3A9FB8A6 9
+. 89 C2 8B 40 5C 85 C0 75 0C
+
+. 0 3A9FB8AF 20
+. C7 42 5C FF FF FF FF B8 FF FF FF FF 40 BA FF FF FF FF 75 5F
+
+. 0 3A9FB8C3 13
+. 8B 75 08 8B 06 A8 08 0F 85 23 2C 00 00
+
+. 0 3A9FB8D0 8
+. 85 FF 0F 84 FE 2F 00 00
+
+. 0 3A9FB8D8 9
+. 8B 55 08 80 7A 46 00 75 2E
+
+. 0 3A9FB8E1 14
+. 8B 8B 38 02 00 00 85 C9 0F 84 EB 2B 00 00
+
+. 0 3A9FB8EF 7
+. 8B 42 5C 85 C0 75 0C
+
+. 0 3A9FB902 8
+. 40 BA FF FF FF FF 75 18
+
+. 0 3A9FB90A 9
+. 8B 75 08 8B 06 A8 02 74 1D
+
+. 0 3A9FB930 68
+. B8 FF FF FF FF 8D 95 BC FB FF FF 89 85 54 FB FF FF 8B 45 10 89 95 34 FB FF FF 89 85 40 FB FF FF 31 C0 89 85 3C FB FF FF 31 C0 89 85 BC FB FF FF 31 C0 89 85 C0 FB FF FF 89 54 24 04 89 3C 24 E8 AC 76 01 00
+
+. 0 3AA13020 17
+. 55 89 E5 57 56 53 83 EC 0C 8B 75 08 E8 60 37 FC FF
+
+. 0 3AA13031 28
+. 81 C3 DF FF 0B 00 8B 7D 0C 0F B6 06 84 C0 0F 95 C2 3C 25 0F 95 C0 21 D0 A8 01 75 43
+
+. 0 3AA13090 11
+. C7 07 00 00 00 00 F6 06 80 75 B5
+
+. 0 3AA1309B 20
+. 46 0F B6 06 84 C0 0F 95 C2 3C 25 0F 95 C0 21 D0 A8 01 75 E1
+
+. 0 3AA130AF 10
+. 83 C4 0C 89 F0 5B 5E 5F 5D C3
+
+. 0 3A9FB974 38
+. 89 85 4C FB FF FF 8B 4D 08 89 85 B4 FB FF FF 31 C0 0F B7 11 89 85 30 FB FF FF 89 D0 25 00 80 FF FF 66 85 C0 75 0E
+
+. 0 3A9FB99A 14
+. 8B 83 B8 01 00 00 85 C0 0F 85 41 01 00 00
+
+. 0 3A9FB9A8 16
+. 89 D0 25 00 80 FF FF 66 85 C0 0F 84 21 01 00 00
+
+. 0 3A9FBAD9 11
+. 8B 45 08 89 04 24 E8 9C AA FD FF
+
+. 0 3A9D6580 6
+. FF A3 28 00 00 00
+
+. 0 3A9D6586 10
+. 68 38 00 00 00 E9 70 FF FF FF
+
+. 0 3AA12F20 12
+. 55 89 E5 53 83 EC 04 E8 65 38 FC FF
+
+. 0 3AA12F2C 16
+. 81 C3 E4 00 0C 00 8B 93 3C 2B 00 00 85 D2 75 04
+
+. 0 3AA12F3C 4
+. 58 5B 5D C3
+
+. 0 3A9FBAE4 5
+. E9 CF FE FF FF
+
+. 0 3A9FB9B8 36
+. 8B 55 08 8B B5 4C FB FF FF 0F BE 42 46 29 FE 8B 84 10 94 00 00 00 89 14 24 89 74 24 08 89 7C 24 04 FF 50 1C
+
+. 0 3AA1DFD0 28
+. 55 89 E5 83 EC 24 89 75 F8 8B 45 10 8B 75 10 89 5D F4 89 45 F0 31 C0 E8 A5 87 FB FF
+
+. 0 3AA1DFEC 27
+. 81 C3 24 50 0B 00 89 7D FC 85 F6 8B 7D 0C C7 45 EC 00 00 00 00 0F 84 C5 00 00 00
+
+. 0 3AA1E007 27
+. 8B 55 08 89 D1 8B 72 18 8B 01 8B 52 14 25 00 0A 00 00 29 D6 3D 00 0A 00 00 74 30
+
+. 0 3AA1E022 8
+. 85 F6 0F 84 8E 00 00 00
+
+. 0 3AA1E0B8 12
+. 8B 45 F0 8B 55 EC 01 D0 85 C0 75 1C
+
+. 0 3AA1E0E0 29
+. 8B 4D 08 BE FF FF FF FF 0F BE 41 46 8B 84 08 94 00 00 00 89 74 24 04 89 0C 24 FF 50 0C
+
+. 0 3AA1EBC0 17
+. 55 89 E5 57 56 53 83 EC 10 8B 75 08 E8 C0 7B FB FF
+
+. 0 3AA1EBD1 17
+. 81 C3 3F 44 0B 00 8B 0E F6 C1 08 0F 85 23 01 00 00
+
+. 0 3AA1EBE2 9
+. F6 C5 08 0F 84 C5 00 00 00
+
+. 0 3AA1ECB0 11
+. 8B 46 10 85 C0 0F 85 4D FF FF FF
+
+. 0 3AA1ECBB 5
+. E9 32 FF FF FF
+
+. 0 3AA1EBF2 8
+. 89 34 24 E8 36 22 00 00
+
+. 0 3AA20E30 20
+. 55 89 E5 83 EC 18 89 75 FC 8B 75 08 89 5D F8 E8 4D 59 FB FF
+
+. 0 3AA20E44 13
+. 81 C3 CC 21 0B 00 8B 46 1C 85 C0 75 20
+
+. 0 3AA20E51 5
+. F6 06 02 74 07
+
+. 0 3AA20E5D 17
+. 0F BE 46 46 8B 84 30 94 00 00 00 89 34 24 FF 50 34
+
+. 0 3AA13C10 29
+. 55 89 E5 81 EC 84 00 00 00 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 46 38 E8 64 2B FC FF
+
+. 0 3AA13C2D 15
+. 81 C3 E3 F3 0B 00 BF 00 20 00 00 85 C0 78 34
+
+. 0 3AA13C3C 24
+. 0F BE 46 46 8B 94 30 94 00 00 00 8D 45 94 89 44 24 04 89 34 24 FF 52 48
+
+. 0 3AA1E4F0 35
+. 55 89 E5 83 EC 0C 8B 45 0C 89 44 24 08 8B 45 08 8B 40 38 C7 04 24 03 00 00 00 89 44 24 04 E8 CD D7 04 00
+
+. 0 3AA6BCE0 14
+. 55 89 E5 83 EC 64 89 5D F4 E8 A3 AA F6 FF
+
+. 0 3AA6BCEE 31
+. 81 C3 22 73 06 00 89 75 F8 8B B3 BC 01 00 00 89 7D FC 8B 7D 10 8B 06 85 C0 0F 85 83 00 00 00
+
+. 0 3AA6BD0D 5
+. E8 1E AE F6 FF
+
+. 0 3AA6BD12 22
+. 89 45 A8 8B 00 89 F9 8B 55 0C 89 45 B0 87 D3 B8 C5 00 00 00 CD 80
+
+. 0 3AA6BD28 13
+. 87 D3 3D 00 F0 FF FF 0F 87 B7 00 00 00
+
+. 0 3AA6BD35 7
+. 83 F8 FF 89 C2 74 34
+
+. 0 3AA6BD3C 4
+. 85 D2 75 20
+
+. 0 3AA6BD40 8
+. 8B 47 58 39 47 0C 74 18
+
+. 0 3AA6BD60 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1E513 2
+. C9 C3
+
+. 0 3AA13C54 4
+. 85 C0 78 18
+
+. 0 3AA13C58 15
+. 8B 45 A4 25 00 F0 00 00 3D 00 20 00 00 74 7F
+
+. 0 3AA13CE6 43
+. 8B 55 B8 8B 45 B4 0F AC D0 08 C1 EA 08 89 C1 0F AC D0 18 81 E1 FF 0F 00 00 25 00 F0 FF FF 09 C1 81 E9 88 00 00 00 83 F9 07 77 0B
+
+. 0 3AA13D11 11
+. 81 0E 00 02 00 00 E9 4B FF FF FF
+
+. 0 3AA13C67 7
+. 8B 45 C8 85 C0 7E 02
+
+. 0 3AA13C6E 62
+. 89 C7 C7 04 24 00 00 00 00 31 C0 BA 03 00 00 00 89 44 24 14 B8 FF FF FF FF B9 22 00 00 00 89 44 24 10 8D 87 FF 0F 00 00 25 00 F0 FF FF 89 54 24 08 89 4C 24 0C 89 44 24 04 E8 04 3F 06 00
+
+. 0 3AA77BB0 13
+. 89 DA B8 5A 00 00 00 8D 5C 24 04 CD 80
+
+. 0 3AA77BBD 9
+. 89 D3 3D 00 F0 FF FF 77 01
+
+. 0 3AA77BC6 1
+. C3
+
+. 0 3AA13CAC 12
+. 89 C2 83 FA FF B8 FF FF FF FF 74 21
+
+. 0 3AA13CB8 28
+. 89 54 24 04 B8 01 00 00 00 89 44 24 0C 8D 04 17 89 44 24 08 89 34 24 E8 CC CE 00 00
+
+. 0 3AA20BA0 20
+. 55 89 E5 83 EC 10 89 75 FC 8B 75 08 89 5D F8 E8 DD 5B FB FF
+
+. 0 3AA20BB4 13
+. 81 C3 5C 24 0B 00 8B 4E 1C 85 C9 74 5F
+
+. 0 3AA20C20 4
+. 8B 16 EB A4
+
+. 0 3AA20BC8 19
+. 8B 45 0C 89 46 1C 8B 45 10 89 46 20 8B 45 14 85 C0 74 15
+
+. 0 3AA20BDB 15
+. 83 E2 FE 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA13CD4 18
+. B8 01 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA20E6E 3
+. 40 74 0F
+
+. 0 3AA20E71 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA1EBFA 23
+. 8B 46 1C 8B 0E 89 46 0C 89 46 04 89 46 08 F6 C5 01 0F 85 4E 01 00 00
+
+. 0 3AA1EC11 14
+. 8B 46 04 8B 7E 20 39 F8 0F 84 D8 00 00 00
+
+. 0 3AA1EC1F 40
+. 89 46 14 89 C2 89 46 10 8B 46 08 89 7D F0 89 46 04 89 46 0C 89 C8 8B 4E 5C 0D 00 08 00 00 89 06 85 C9 0F 8E DD 00 00 00
+
+. 0 3AA1ED24 7
+. A9 02 02 00 00 74 2C
+
+. 0 3AA1ED2B 8
+. 89 56 18 E9 1A FF FF FF
+
+. 0 3AA1EC4D 10
+. 83 7D 0C FF 0F 84 DC 00 00 00
+
+. 0 3AA1ED33 24
+. 8B 56 10 8B 46 14 89 34 24 89 54 24 04 29 D0 89 44 24 08 E8 35 F4 FF FF
+
+. 0 3AA1E180 17
+. 55 89 E5 56 8B 75 10 8B 45 08 8B 55 0C 85 F6 75 07
+
+. 0 3AA1E191 7
+. 31 D2 5E 89 D0 5D C3
+
+. 0 3AA1ED4B 12
+. 89 C2 83 C4 10 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA1E0FD 3
+. 40 74 C4
+
+. 0 3AA1E100 22
+. 8B 45 08 8B 50 1C 8B 48 20 29 D1 8B 55 F0 83 F9 7F 89 55 E8 76 0E
+
+. 0 3AA1E116 21
+. 89 D0 31 D2 F7 F1 8B 4D F0 29 D1 89 4D E8 8B 75 E8 85 F6 75 2B
+
+. 0 3AA1E12B 7
+. 8B 4D F0 85 C9 74 92
+
+. 0 3AA1E132 28
+. 8B 45 F0 89 44 24 08 8B 55 E8 8D 04 3A 89 44 24 04 8B 4D 08 89 0C 24 E8 72 1E 00 00
+
+. 0 3AA1FFC0 30
+. 55 89 E5 57 56 83 EC 10 8B 45 10 8B 55 10 8B 7D 0C 89 45 F4 31 C0 85 D2 0F 84 9D 00 00 00
+
+. 0 3AA1FFDE 17
+. 89 F6 8B 55 08 8B 72 18 8B 42 14 29 C6 85 F6 7E 54
+
+. 0 3AA20043 7
+. 8B 75 F4 85 F6 74 29
+
+. 0 3AA2004A 30
+. 8B 55 08 89 D1 0F BE 42 46 8B 94 10 94 00 00 00 0F B6 07 47 89 0C 24 89 44 24 04 FF 52 0C
+
+. 0 3AA1EBEB 7
+. 8B 46 10 85 C0 75 5B
+
+. 0 3AA1EC57 8
+. 8B 56 14 3B 56 20 74 61
+
+. 0 3AA1EC5F 20
+. 0F B6 45 0C 88 02 8B 4E 14 8B 16 41 F6 C2 02 89 4E 14 75 11
+
+. 0 3AA1EC73 17
+. C1 EA 09 83 7D 0C 0A 0F 94 C0 21 C2 F6 C2 01 74 1D
+
+. 0 3AA1ECA1 14
+. 0F B6 55 0C 83 C4 10 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA20068 3
+. 40 74 08
+
+. 0 3AA2006B 8
+. FF 4D F4 E9 6D FF FF FF
+
+. 0 3AA1FFE0 15
+. 8B 55 08 8B 72 18 8B 42 14 29 C6 85 F6 7E 54
+
+. 0 3AA1EC84 21
+. 8B 46 10 89 34 24 29 C1 89 4C 24 08 89 44 24 04 E8 E7 F4 FF FF
+
+. 0 3AA1E198 7
+. 89 F1 E8 F1 FC FF FF
+
+. 0 3AA1DE90 22
+. 55 89 E5 83 EC 24 89 5D F4 89 75 F8 89 C6 89 7D FC E8 EB 88 FB FF
+
+. 0 3AA1DEA6 18
+. 81 C3 6A 51 0B 00 F6 40 01 10 89 55 F0 89 4D EC 74 6E
+
+. 0 3AA1DF26 10
+. 8B 48 08 8B 50 10 39 D1 74 96
+
+. 0 3AA1DEC6 31
+. 0F BE 46 46 8B 55 EC 8B 84 30 94 00 00 00 89 54 24 08 8B 4D F0 89 34 24 89 4C 24 04 FF 50 3C
+
+. 0 3AA1E430 15
+. 55 89 E5 57 56 83 EC 14 8B 75 10 85 F6 7E 71
+
+. 0 3AA1E43F 9
+. 8B 45 08 F6 40 3C 02 75 37
+
+. 0 3AA1E448 33
+. 90 8D B4 26 00 00 00 00 89 74 24 08 8B 45 0C 89 44 24 04 8B 55 08 8B 42 38 89 04 24 E8 B7 E9 04 00
+
+. 0 3AA6CE20 5
+. E8 69 21 04 00
+
+. 0 3AA6CE25 15
+. 81 C1 EB 61 06 00 83 B9 14 1A 00 00 00 75 1D
+
+. 0 3AA6CE34 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 04 00 00 00 CD 80
+
+movd_1 ... ok
+. 0 3AA6CE48 8
+. 5B 3D 01 F0 FF FF 73 2D
+
+. 0 3AA6CE50 1
+. C3
+
+. 0 3AA1E469 4
+. 85 C0 78 34
+
+. 0 3AA1E46D 9
+. 01 45 0C 29 C6 85 F6 7E 3A
+
+. 0 3AA1E4B0 16
+. 29 75 10 8B 45 08 8B 78 50 8B 70 4C 85 FF 78 11
+
+. 0 3AA1E4D1 10
+. 8B 45 10 83 C4 14 5E 5F 5D C3
+
+. 0 3AA1DEE5 11
+. 89 C7 0F B7 46 44 66 85 C0 74 04
+
+. 0 3AA1DEF4 29
+. 8B 56 5C 8B 46 1C 85 D2 89 46 0C 89 46 04 89 46 08 89 46 14 89 46 10 0F 8E 9F 00 00 00
+
+. 0 3AA1DFB0 12
+. F7 06 02 02 00 00 0F 85 58 FF FF FF
+
+. 0 3AA1DF14 18
+. 89 46 18 89 F8 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1E19F 9
+. 39 F0 BA FF FF FF FF 74 E9
+
+. 0 3AA1EC99 8
+. 40 BA FF FF FF FF 74 04
+
+. 0 3AA20073 15
+. 8B 45 10 8B 4D F4 29 C8 83 C4 10 5E 5F 5D C3
+
+. 0 3AA1E14E 8
+. 29 45 F0 E9 6E FF FF FF
+
+. 0 3AA1E0C4 21
+. 8B 45 10 8B 55 F0 29 D0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9FB9DC 19
+. BA FF FF FF FF 89 95 50 FB FF FF 39 F0 0F 85 41 01 00 00
+
+. 0 3A9FB9EF 21
+. 89 85 50 FB FF FF 8B 95 B4 FB FF FF 80 3A 00 0F 84 2C 01 00 00
+
+. 0 3A9FBB30 16
+. 8B 4D 08 0F B7 01 25 00 80 FF FF 66 85 C0 74 1A
+
+. 0 3A9FBB5A 8
+. 89 0C 24 E8 FE A9 FD FF
+
+. 0 3A9D6560 6
+. FF A3 20 00 00 00
+
+. 0 3A9D6566 10
+. 68 28 00 00 00 E9 90 FF FF FF
+
+. 0 3AA12F90 12
+. 55 89 E5 53 83 EC 04 E8 F5 37 FC FF
+
+. 0 3AA12F9C 16
+. 81 C3 74 00 0C 00 8B 93 44 2B 00 00 85 D2 75 04
+
+. 0 3AA12FAC 4
+. 58 5B 5D C3
+
+. 0 3A9FBB62 10
+. 8B 95 30 FB FF FF 85 D2 74 DE
+
+. 0 3A9FBB4A 16
+. 8B 95 50 FB FF FF 8D 65 F4 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA04500 6
+. 83 C4 0C 5B 5D C3
+
+. 0 80486B4 5
+. 83 C4 10 EB 4F
+
+. 0 8048708 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050973 5
+. E8 98 7D FF FF
+
+. 0 8048710 69
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 D2 04 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 57 04 00 00 C7 45 DC AE 08 00 00 83 EC 08 6A 01 68 E0 39 05 08 E8 9B FC FF FF
+
+. 0 8048755 7
+. 83 C4 10 85 C0 75 79
+
+. 0 804875C 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 D8 0F 6E 45 E4 0F 7F 45 D0 DD A5 58 FF FF FF 81 7D D0 D2 04 00 00 75 18
+
+. 0 804877E 6
+. 83 7D D4 00 75 12
+
+. 0 8048784 13
+. 83 EC 0C 68 2D 0D 05 08 E8 7F FC FF FF
+
+. 0 3A9FB8BB 8
+. 40 BA FF FF FF FF 75 5F
+
+. 0 3AA1E052 10
+. 8B 71 20 29 D6 3B 75 10 72 C6
+
+. 0 3AA1E05C 10
+. 8B 4D 10 8D 04 39 39 F8 77 07
+
+. 0 3AA1E06D 6
+. 48 80 38 0A 75 F5
+
+. 0 3AA1E073 17
+. C7 45 EC 01 00 00 00 8B 4D 0C 29 C8 8D 70 01 EB 9E
+
+. 0 3AA1E02A 5
+. 3B 75 10 76 03
+
+. 0 3AA1E032 5
+. 83 FE 14 77 4D
+
+. 0 3AA1E037 7
+. 89 D1 89 F2 4A 78 0C
+
+. 0 3AA1E03E 12
+. 89 F6 0F B6 07 47 88 01 41 4A 79 F6
+
+. 0 3AA1E040 10
+. 0F B6 07 47 88 01 41 4A 79 F6
+
+. 0 3AA1E04A 8
+. 8B 45 08 89 48 14 EB 5E
+
+. 0 3AA1E0B0 20
+. 8B 55 10 29 F2 89 55 F0 8B 45 F0 8B 55 EC 01 D0 85 C0 75 1C
+
+movd_2 ... ok
+. 0 8048791 5
+. 83 C4 10 EB 4F
+
+. 0 80487E5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050978 5
+. E8 70 7E FF FF
+
+. 0 80487ED 69
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 D2 04 00 00 C7 45 E4 AE 08 00 00 C7 45 DC 00 00 00 00 C7 45 DC 57 04 00 00 83 EC 08 6A 01 68 E0 39 05 08 E8 BE FB FF FF
+
+. 0 8048832 7
+. 83 C4 10 85 C0 75 5F
+
+. 0 8048839 35
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 8B 45 DC 0F 7E C0 89 45 D8 DD A5 68 FF FF FF 81 7D D8 D2 04 00 00 75 12
+
+. 0 804885C 13
+. 83 EC 0C 68 62 0D 05 08 E8 A7 FB FF FF
+
+movd_3 ... ok
+. 0 8048869 5
+. 83 C4 10 EB 3A
+
+. 0 80488A8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805097D 5
+. E8 2E 7F FF FF
+
+. 0 80488B0 75
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 D2 04 00 00 C7 45 E4 AE 08 00 00 C7 45 DC 00 00 00 00 C7 45 DC 57 04 00 00 8B 45 DC 89 45 D8 83 EC 08 6A 01 68 E0 39 05 08 E8 F5 FA FF FF
+
+. 0 80488FB 7
+. 83 C4 10 85 C0 75 5A
+
+. 0 8048902 30
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 7E 45 D8 DD A5 68 FF FF FF 81 7D D8 D2 04 00 00 75 12
+
+. 0 8048920 13
+. 83 EC 0C 68 97 0D 05 08 E8 E3 FA FF FF
+
+movd_4 ... ok
+. 0 804892D 5
+. 83 C4 10 EB 3A
+
+. 0 804896C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050982 5
+. E8 ED 7F FF FF
+
+. 0 8048974 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 EF CD AB 78 C7 45 E4 56 34 12 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 34 34 34 34 C7 45 DC 12 12 12 12 83 EC 08 6A 01 68 E0 39 05 08 E8 29 FA FF FF
+
+. 0 80489C7 7
+. 83 C4 10 85 C0 75 77
+
+. 0 80489CE 51
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 6F C8 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 56 34 12 00 8B 55 D0 81 F2 EF CD AB 78 09 D0 85 C0 75 12
+
+. 0 8048A01 13
+. 83 EC 0C 68 CC 0D 05 08 E8 02 FA FF FF
+
+movq_1 ... ok
+. 0 8048A0E 5
+. 83 C4 10 EB 42
+
+. 0 8048A55 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050987 5
+. E8 D1 80 FF FF
+
+. 0 8048A5D 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 EF CD AB 78 C7 45 E4 56 34 12 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 34 34 34 34 C7 45 DC 12 12 12 12 83 EC 08 6A 01 68 E0 39 05 08 E8 40 F9 FF FF
+
+. 0 8048AB0 7
+. 83 C4 10 85 C0 75 74
+
+. 0 8048AB7 48
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 6F 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 56 34 12 00 8B 55 D0 81 F2 EF CD AB 78 09 D0 85 C0 75 12
+
+. 0 8048AE7 13
+. 83 EC 0C 68 2B 0E 05 08 E8 1C F9 FF FF
+
+movq_2 ... ok
+. 0 8048AF4 5
+. 83 C4 10 EB 42
+
+. 0 8048B3B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805098C 5
+. E8 B2 81 FF FF
+
+. 0 8048B43 95
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 EF CD AB 78 C7 45 E4 56 34 12 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 34 34 34 34 C7 45 DC 12 12 12 12 8B 45 D8 8B 55 DC 89 45 D0 89 55 D4 83 EC 08 6A 01 68 E0 39 05 08 E8 4E F8 FF FF
+
+. 0 8048BA2 7
+. 83 C4 10 85 C0 75 70
+
+. 0 8048BA9 44
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 7F 45 D0 DD A5 58 FF FF FF 8B 45 D4 35 56 34 12 00 8B 55 D0 81 F2 EF CD AB 78 09 D0 85 C0 75 12
+
+. 0 8048BD5 13
+. 83 EC 0C 68 60 0E 05 08 E8 2E F8 FF FF
+
+movq_3 ... ok
+. 0 8048BE2 5
+. 83 C4 10 EB 42
+
+. 0 8048C29 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050991 5
+. E8 9B 82 FF FF
+
+. 0 8048C31 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 39 30 00 00 C7 45 E4 40 E2 01 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 C7 CF FF FF C7 45 DC C0 1D FE FF 83 EC 08 6A 01 68 E0 39 05 08 E8 6C F7 FF FF
+
+. 0 8048C84 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 8048C8F 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 6B C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 C7 CF 75 2D
+
+. 0 8048CB3 8
+. 66 81 7D D2 00 80 75 25
+
+. 0 8048CBB 8
+. 66 81 7D D4 39 30 75 1D
+
+. 0 8048CC3 8
+. 66 81 7D D6 FF 7F 75 15
+
+. 0 8048CCB 13
+. 83 EC 0C 68 95 0E 05 08 E8 38 F7 FF FF
+
+packssdw_1 ... ok
+. 0 8048CD8 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 8048D6A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050996 5
+. E8 D7 83 FF FF
+
+. 0 8048D72 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 39 30 00 00 C7 45 E4 40 E2 01 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 C7 CF FF FF C7 45 DC C0 1D FE FF 83 EC 08 6A 01 68 E0 39 05 08 E8 2B F6 FF FF
+
+. 0 8048DC5 11
+. 83 C4 10 85 C0 0F 85 C8 00 00 00
+
+. 0 8048DD0 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 6B 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 C7 CF 75 2D
+
+. 0 8048DF1 8
+. 66 81 7D D2 00 80 75 25
+
+. 0 8048DF9 8
+. 66 81 7D D4 39 30 75 1D
+
+. 0 8048E01 8
+. 66 81 7D D6 FF 7F 75 15
+
+. 0 8048E09 13
+. 83 EC 0C 68 67 0F 05 08 E8 FA F5 FF FF
+
+packssdw_2 ... ok
+. 0 8048E16 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 8048EA8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805099B 5
+. E8 10 85 FF FF
+
+. 0 8048EB0 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 7B 00 66 C7 45 E2 85 FF 66 C7 45 E4 D2 04 66 C7 45 E6 2E FB C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 15 00 66 C7 45 DA EB FF 66 C7 45 DC 41 01 66 C7 45 DE BF FE 83 EC 08 6A 01 68 E0 39 05 08 E8 D9 F4 FF FF
+
+. 0 8048F17 11
+. 83 C4 10 85 C0 0F 85 2B 01 00 00
+
+. 0 8048F22 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 63 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 15 75 3F
+
+. 0 8048F44 6
+. 80 7D D1 EB 75 39
+
+. 0 8048F4A 6
+. 80 7D D2 7F 75 33
+
+. 0 8048F50 6
+. 80 7D D3 80 75 2D
+
+. 0 8048F56 6
+. 80 7D D4 7B 75 27
+
+. 0 8048F5C 6
+. 80 7D D5 85 75 21
+
+. 0 8048F62 6
+. 80 7D D6 7F 75 1B
+
+. 0 8048F68 6
+. 80 7D D7 80 75 15
+
+. 0 8048F6E 13
+. 83 EC 0C 68 A8 0F 05 08 E8 95 F4 FF FF
+
+packsswb_1 ... ok
+. 0 8048F7B 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 804905D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509A0 5
+. E8 C0 86 FF FF
+
+. 0 8049065 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 7B 00 66 C7 45 E2 85 FF 66 C7 45 E4 D2 04 66 C7 45 E6 2E FB C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 15 00 66 C7 45 DA EB FF 66 C7 45 DC 41 01 66 C7 45 DE BF FE 83 EC 08 6A 01 68 E0 39 05 08 E8 24 F3 FF FF
+
+. 0 80490CC 11
+. 83 C4 10 85 C0 0F 85 28 01 00 00
+
+. 0 80490D7 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 63 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 15 75 3F
+
+. 0 80490F6 6
+. 80 7D D1 EB 75 39
+
+. 0 80490FC 6
+. 80 7D D2 7F 75 33
+
+. 0 8049102 6
+. 80 7D D3 80 75 2D
+
+. 0 8049108 6
+. 80 7D D4 7B 75 27
+
+. 0 804910E 6
+. 80 7D D5 85 75 21
+
+. 0 8049114 6
+. 80 7D D6 7F 75 1B
+
+. 0 804911A 6
+. 80 7D D7 80 75 15
+
+. 0 8049120 13
+. 83 EC 0C 68 0B 11 05 08 E8 E3 F2 FF FF
+
+packsswb_2 ... ok
+. 0 804912D 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 804920F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509A5 5
+. E8 6D 88 FF FF
+
+. 0 8049217 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 7B 00 66 C7 45 E2 85 FF 66 C7 45 E4 D2 04 66 C7 45 E6 2E FB C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 15 00 66 C7 45 DA EB FF 66 C7 45 DC 41 01 66 C7 45 DE BF FE 83 EC 08 6A 01 68 E0 39 05 08 E8 72 F1 FF FF
+
+. 0 804927E 11
+. 83 C4 10 85 C0 0F 85 51 01 00 00
+
+. 0 8049289 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 67 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 15 75 3F
+
+. 0 80492AB 6
+. 80 7D D1 00 75 39
+
+. 0 80492B1 6
+. 80 7D D2 FF 75 33
+
+. 0 80492B7 6
+. 80 7D D3 00 75 2D
+
+. 0 80492BD 6
+. 80 7D D4 7B 75 27
+
+. 0 80492C3 6
+. 80 7D D5 00 75 21
+
+. 0 80492C9 6
+. 80 7D D6 FF 75 1B
+
+. 0 80492CF 6
+. 80 7D D7 00 75 15
+
+. 0 80492D5 13
+. 83 EC 0C 68 4C 11 05 08 E8 2E F1 FF FF
+
+packuswb_1 ... ok
+. 0 80492E2 8
+. 83 C4 10 E9 00 01 00 00
+
+. 0 80493EA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509AA 5
+. E8 43 8A FF FF
+
+. 0 80493F2 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 7B 00 66 C7 45 E2 85 FF 66 C7 45 E4 D2 04 66 C7 45 E6 2E FB C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 15 00 66 C7 45 DA EB FF 66 C7 45 DC 41 01 66 C7 45 DE BF FE 83 EC 08 6A 01 68 E0 39 05 08 E8 97 EF FF FF
+
+. 0 8049459 11
+. 83 C4 10 85 C0 0F 85 4E 01 00 00
+
+. 0 8049464 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 67 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 15 75 3F
+
+. 0 8049483 6
+. 80 7D D1 00 75 39
+
+. 0 8049489 6
+. 80 7D D2 FF 75 33
+
+. 0 804948F 6
+. 80 7D D3 00 75 2D
+
+. 0 8049495 6
+. 80 7D D4 7B 75 27
+
+. 0 804949B 6
+. 80 7D D5 00 75 21
+
+. 0 80494A1 6
+. 80 7D D6 FF 75 1B
+
+. 0 80494A7 6
+. 80 7D D7 00 75 15
+
+. 0 80494AD 13
+. 83 EC 0C 68 AF 12 05 08 E8 56 EF FF FF
+
+packuswb_2 ... ok
+. 0 80494BA 8
+. 83 C4 10 E9 00 01 00 00
+
+. 0 80495C2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509AF 5
+. E8 16 8C FF FF
+
+. 0 80495CA 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0C C6 45 E1 22 C6 45 E2 38 C6 45 E3 4E C6 45 E4 15 C6 45 E5 2B C6 45 E6 41 C6 45 E7 57 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 E0 39 05 08 E8 AF ED FF FF
+
+. 0 8049641 11
+. 83 C4 10 85 C0 0F 85 2B 01 00 00
+
+. 0 804964C 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F FC C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 14 75 3F
+
+. 0 804966E 6
+. 80 7D D1 29 75 39
+
+. 0 8049674 6
+. 80 7D D2 3E 75 33
+
+. 0 804967A 6
+. 80 7D D3 53 75 2D
+
+. 0 8049680 6
+. 80 7D D4 19 75 27
+
+. 0 8049686 6
+. 80 7D D5 2E 75 21
+
+. 0 804968C 6
+. 80 7D D6 43 75 1B
+
+. 0 8049692 6
+. 80 7D D7 58 75 15
+
+. 0 8049698 13
+. 83 EC 0C 68 F0 12 05 08 E8 6B ED FF FF
+
+paddb_1 ... ok
+. 0 80496A5 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 8049787 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509B4 5
+. E8 D6 8D FF FF
+
+. 0 804978F 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0C C6 45 E1 22 C6 45 E2 38 C6 45 E3 4E C6 45 E4 15 C6 45 E5 2B C6 45 E6 41 C6 45 E7 57 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 E0 39 05 08 E8 EA EB FF FF
+
+. 0 8049806 11
+. 83 C4 10 85 C0 0F 85 28 01 00 00
+
+. 0 8049811 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F FC 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 14 75 3F
+
+. 0 8049830 6
+. 80 7D D1 29 75 39
+
+. 0 8049836 6
+. 80 7D D2 3E 75 33
+
+. 0 804983C 6
+. 80 7D D3 53 75 2D
+
+. 0 8049842 6
+. 80 7D D4 19 75 27
+
+. 0 8049848 6
+. 80 7D D5 2E 75 21
+
+. 0 804984E 6
+. 80 7D D6 43 75 1B
+
+. 0 8049854 6
+. 80 7D D7 58 75 15
+
+. 0 804985A 13
+. 83 EC 0C 68 28 13 05 08 E8 A9 EB FF FF
+
+paddb_2 ... ok
+. 0 8049867 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 8049949 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509B9 5
+. E8 93 8F FF FF
+
+. 0 8049951 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4E 61 BC 00 C7 45 E4 B1 7F 39 05 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 3D 22 00 00 C7 45 DC E1 10 00 00 83 EC 08 6A 01 68 E0 39 05 08 E8 4C EA FF FF
+
+. 0 80499A4 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 80499AF 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F FE C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 8B 83 BC 00 75 1B
+
+. 0 80499D4 9
+. 81 7D D4 92 90 39 05 75 12
+
+. 0 80499DD 13
+. 83 EC 0C 68 60 13 05 08 E8 26 EA FF FF
+
+paddd_1 ... ok
+. 0 80499EA 5
+. 83 C4 10 EB 52
+
+. 0 8049A41 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509BE 5
+. E8 86 90 FF FF
+
+. 0 8049A49 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4E 61 BC 00 C7 45 E4 B1 7F 39 05 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 3D 22 00 00 C7 45 DC E1 10 00 00 83 EC 08 6A 01 68 E0 39 05 08 E8 54 E9 FF FF
+
+. 0 8049A9C 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 8049AA3 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F FE 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 8B 83 BC 00 75 1B
+
+. 0 8049AC5 9
+. 81 7D D4 92 90 39 05 75 12
+
+. 0 8049ACE 13
+. 83 EC 0C 68 98 13 05 08 E8 35 E9 FF FF
+
+paddd_2 ... ok
+. 0 8049ADB 5
+. 83 C4 10 EB 52
+
+. 0 8049B32 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509C3 5
+. E8 72 91 FF FF
+
+. 0 8049B3A 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 19 C6 45 E1 E7 C6 45 E2 32 C6 45 E3 CE C6 45 E4 64 C6 45 E5 9C C6 45 E6 7D C6 45 E7 83 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 28 C6 45 D9 D8 C6 45 DA 1E C6 45 DB E2 C6 45 DC 14 C6 45 DD EC C6 45 DE 0A C6 45 DF F6 83 EC 08 6A 01 68 E0 39 05 08 E8 3F E8 FF FF
+
+. 0 8049BB1 11
+. 83 C4 10 85 C0 0F 85 2B 01 00 00
+
+. 0 8049BBC 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F EC C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 41 75 3F
+
+. 0 8049BDE 6
+. 80 7D D1 BF 75 39
+
+. 0 8049BE4 6
+. 80 7D D2 50 75 33
+
+. 0 8049BEA 6
+. 80 7D D3 B0 75 2D
+
+. 0 8049BF0 6
+. 80 7D D4 78 75 27
+
+. 0 8049BF6 6
+. 80 7D D5 88 75 21
+
+. 0 8049BFC 6
+. 80 7D D6 7F 75 1B
+
+. 0 8049C02 6
+. 80 7D D7 80 75 15
+
+. 0 8049C08 13
+. 83 EC 0C 68 D0 13 05 08 E8 FB E7 FF FF
+
+paddsb_1 ... ok
+. 0 8049C15 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 8049CF7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509C8 5
+. E8 32 93 FF FF
+
+. 0 8049CFF 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 19 C6 45 E1 E7 C6 45 E2 32 C6 45 E3 CE C6 45 E4 64 C6 45 E5 9C C6 45 E6 7D C6 45 E7 83 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 28 C6 45 D9 D8 C6 45 DA 1E C6 45 DB E2 C6 45 DC 14 C6 45 DD EC C6 45 DE 0A C6 45 DF F6 83 EC 08 6A 01 68 E0 39 05 08 E8 7A E6 FF FF
+
+. 0 8049D76 11
+. 83 C4 10 85 C0 0F 85 28 01 00 00
+
+. 0 8049D81 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F EC 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 41 75 3F
+
+. 0 8049DA0 6
+. 80 7D D1 BF 75 39
+
+. 0 8049DA6 6
+. 80 7D D2 50 75 33
+
+. 0 8049DAC 6
+. 80 7D D3 B0 75 2D
+
+. 0 8049DB2 6
+. 80 7D D4 78 75 27
+
+. 0 8049DB8 6
+. 80 7D D5 88 75 21
+
+. 0 8049DBE 6
+. 80 7D D6 7F 75 1B
+
+. 0 8049DC4 6
+. 80 7D D7 80 75 15
+
+. 0 8049DCA 13
+. 83 EC 0C 68 0B 14 05 08 E8 39 E6 FF FF
+
+paddsb_2 ... ok
+. 0 8049DD7 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 8049EB9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509CD 5
+. E8 EF 94 FF FF
+
+. 0 8049EC1 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 39 30 66 C7 45 E2 C7 CF 66 C7 45 E4 91 7D 66 C7 45 E6 6F 82 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 91 7D 66 C7 45 DA 6F 82 66 C7 45 DC C7 CF 66 C7 45 DE 39 30 83 EC 08 6A 01 68 E0 39 05 08 E8 C8 E4 FF FF
+
+. 0 8049F28 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 8049F33 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F ED C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 FF 7F 75 2D
+
+. 0 8049F57 8
+. 66 81 7D D2 00 80 75 25
+
+. 0 8049F5F 8
+. 66 81 7D D4 58 4D 75 1D
+
+. 0 8049F67 8
+. 66 81 7D D6 A8 B2 75 15
+
+. 0 8049F6F 13
+. 83 EC 0C 68 46 14 05 08 E8 94 E4 FF FF
+
+paddsw_1 ... ok
+. 0 8049F7C 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804A00E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509D2 5
+. E8 3F 96 FF FF
+
+. 0 804A016 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 39 30 66 C7 45 E2 C7 CF 66 C7 45 E4 91 7D 66 C7 45 E6 6F 82 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 91 7D 66 C7 45 DA 6F 82 66 C7 45 DC C7 CF 66 C7 45 DE 39 30 83 EC 08 6A 01 68 E0 39 05 08 E8 73 E3 FF FF
+
+. 0 804A07D 11
+. 83 C4 10 85 C0 0F 85 C8 00 00 00
+
+. 0 804A088 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F ED 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 FF 7F 75 2D
+
+. 0 804A0A9 8
+. 66 81 7D D2 00 80 75 25
+
+. 0 804A0B1 8
+. 66 81 7D D4 58 4D 75 1D
+
+. 0 804A0B9 8
+. 66 81 7D D6 A8 B2 75 15
+
+. 0 804A0C1 13
+. 83 EC 0C 68 81 14 05 08 E8 42 E3 FF FF
+
+paddsw_2 ... ok
+. 0 804A0CE 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804A160 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509D7 5
+. E8 8C 97 FF FF
+
+. 0 804A168 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 19 C6 45 E1 32 C6 45 E2 4B C6 45 E3 64 C6 45 E4 7D C6 45 E5 96 C6 45 E6 AF C6 45 E7 C8 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0A C6 45 D9 14 C6 45 DA 1E C6 45 DB 28 C6 45 DC 32 C6 45 DD 3C C6 45 DE 46 C6 45 DF 50 83 EC 08 6A 01 68 E0 39 05 08 E8 11 E2 FF FF
+
+. 0 804A1DF 11
+. 83 C4 10 85 C0 0F 85 5A 01 00 00
+
+. 0 804A1EA 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F DC C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 23 75 3F
+
+. 0 804A20C 6
+. 80 7D D1 46 75 39
+
+. 0 804A212 6
+. 80 7D D2 69 75 33
+
+. 0 804A218 6
+. 80 7D D3 8C 75 2D
+
+. 0 804A21E 6
+. 80 7D D4 AF 75 27
+
+. 0 804A224 6
+. 80 7D D5 D2 75 21
+
+. 0 804A22A 6
+. 80 7D D6 F5 75 1B
+
+. 0 804A230 6
+. 80 7D D7 FF 75 15
+
+. 0 804A236 13
+. 83 EC 0C 68 BC 14 05 08 E8 CD E1 FF FF
+
+paddusb_1 ... ok
+. 0 804A243 8
+. 83 C4 10 E9 09 01 00 00
+
+. 0 804A354 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509DC 5
+. E8 7B 99 FF FF
+
+. 0 804A35C 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 19 C6 45 E1 32 C6 45 E2 4B C6 45 E3 64 C6 45 E4 7D C6 45 E5 96 C6 45 E6 AF C6 45 E7 C8 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0A C6 45 D9 14 C6 45 DA 1E C6 45 DB 28 C6 45 DC 32 C6 45 DD 3C C6 45 DE 46 C6 45 DF 50 83 EC 08 6A 01 68 E0 39 05 08 E8 1D E0 FF FF
+
+. 0 804A3D3 11
+. 83 C4 10 85 C0 0F 85 57 01 00 00
+
+. 0 804A3DE 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F DC 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 23 75 3F
+
+. 0 804A3FD 6
+. 80 7D D1 46 75 39
+
+. 0 804A403 6
+. 80 7D D2 69 75 33
+
+. 0 804A409 6
+. 80 7D D3 8C 75 2D
+
+. 0 804A40F 6
+. 80 7D D4 AF 75 27
+
+. 0 804A415 6
+. 80 7D D5 D2 75 21
+
+. 0 804A41B 6
+. 80 7D D6 F5 75 1B
+
+. 0 804A421 6
+. 80 7D D7 FF 75 15
+
+. 0 804A427 13
+. 83 EC 0C 68 FA 14 05 08 E8 DC DF FF FF
+
+paddusb_2 ... ok
+. 0 804A434 8
+. 83 C4 10 E9 09 01 00 00
+
+. 0 804A545 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509E1 5
+. E8 67 9B FF FF
+
+. 0 804A54D 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 CE 56 66 C7 45 E2 35 82 66 C7 45 E4 9C AD 66 C7 45 E6 03 D9 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 0A 1A 66 C7 45 DA 61 1E 66 C7 45 DC B8 22 66 C7 45 DE 0F 27 83 EC 08 6A 01 68 E0 39 05 08 E8 3C DE FF FF
+
+. 0 804A5B4 11
+. 83 C4 10 85 C0 0F 85 DC 00 00 00
+
+. 0 804A5BF 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F DD C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 D8 70 75 2C
+
+. 0 804A5E3 8
+. 66 81 7D D2 96 A0 75 24
+
+. 0 804A5EB 8
+. 66 81 7D D4 54 D0 75 1C
+
+. 0 804A5F3 7
+. 66 83 7D D6 FF 75 15
+
+. 0 804A5FA 13
+. 83 EC 0C 68 38 15 05 08 E8 09 DE FF FF
+
+paddusw_1 ... ok
+. 0 804A607 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804A6AB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509E6 5
+. E8 C8 9C FF FF
+
+. 0 804A6B3 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 CE 56 66 C7 45 E2 35 82 66 C7 45 E4 9C AD 66 C7 45 E6 03 D9 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 0A 1A 66 C7 45 DA 61 1E 66 C7 45 DC B8 22 66 C7 45 DE 0F 27 83 EC 08 6A 01 68 E0 39 05 08 E8 D6 DC FF FF
+
+. 0 804A71A 11
+. 83 C4 10 85 C0 0F 85 D9 00 00 00
+
+. 0 804A725 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F DD 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 D8 70 75 2C
+
+. 0 804A746 8
+. 66 81 7D D2 96 A0 75 24
+
+. 0 804A74E 8
+. 66 81 7D D4 54 D0 75 1C
+
+. 0 804A756 7
+. 66 83 7D D6 FF 75 15
+
+. 0 804A75D 13
+. 83 EC 0C 68 06 16 05 08 E8 A6 DC FF FF
+
+paddusw_2 ... ok
+. 0 804A76A 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804A80E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509EB 5
+. E8 26 9E FF FF
+
+. 0 804A816 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 E1 10 66 C7 45 E6 3D 22 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 57 00 66 C7 45 DA 41 00 66 C7 45 DC 2B 00 66 C7 45 DE 15 00 83 EC 08 6A 01 68 E0 39 05 08 E8 73 DB FF FF
+
+. 0 804A87D 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 804A888 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F FD C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 29 05 75 2D
+
+. 0 804A8AC 8
+. 66 81 7D D2 6F 16 75 25
+
+. 0 804A8B4 8
+. 66 81 7D D4 0C 11 75 1D
+
+. 0 804A8BC 8
+. 66 81 7D D6 52 22 75 15
+
+. 0 804A8C4 13
+. 83 EC 0C 68 44 16 05 08 E8 3F DB FF FF
+
+paddw_1 ... ok
+. 0 804A8D1 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804A963 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509F0 5
+. E8 76 9F FF FF
+
+. 0 804A96B 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 E1 10 66 C7 45 E6 3D 22 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 57 00 66 C7 45 DA 41 00 66 C7 45 DC 2B 00 66 C7 45 DE 15 00 83 EC 08 6A 01 68 E0 39 05 08 E8 1E DA FF FF
+
+. 0 804A9D2 11
+. 83 C4 10 85 C0 0F 85 C8 00 00 00
+
+. 0 804A9DD 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F FD 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 29 05 75 2D
+
+. 0 804A9FE 8
+. 66 81 7D D2 6F 16 75 25
+
+. 0 804AA06 8
+. 66 81 7D D4 0C 11 75 1D
+
+. 0 804AA0E 8
+. 66 81 7D D6 52 22 75 15
+
+. 0 804AA16 13
+. 83 EC 0C 68 7C 16 05 08 E8 ED D9 FF FF
+
+paddw_2 ... ok
+. 0 804AA23 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804AAB5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509F5 5
+. E8 C3 A0 FF FF
+
+. 0 804AABD 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 E0 D8 FF FF
+
+. 0 804AB10 7
+. 83 C4 10 85 C0 75 77
+
+. 0 804AB17 51
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F DB C8 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 21 45 21 01 8B 55 D0 81 F2 20 44 A8 88 09 D0 85 C0 75 12
+
+. 0 804AB4A 13
+. 83 EC 0C 68 B4 16 05 08 E8 B9 D8 FF FF
+
+pand_1 ... ok
+. 0 804AB57 5
+. 83 C4 10 EB 42
+
+. 0 804AB9E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509FA 5
+. E8 A7 A1 FF FF
+
+. 0 804ABA6 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 F7 D7 FF FF
+
+. 0 804ABF9 7
+. 83 C4 10 85 C0 75 74
+
+. 0 804AC00 48
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F DB 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 21 45 21 01 8B 55 D0 81 F2 20 44 A8 88 09 D0 85 C0 75 12
+
+. 0 804AC30 13
+. 83 EC 0C 68 E9 16 05 08 E8 D3 D7 FF FF
+
+pand_2 ... ok
+. 0 804AC3D 5
+. 83 C4 10 EB 42
+
+. 0 804AC84 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80509FF 5
+. E8 88 A2 FF FF
+
+. 0 804AC8C 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 11 D7 FF FF
+
+. 0 804ACDF 7
+. 83 C4 10 85 C0 75 77
+
+. 0 804ACE6 51
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F DF C8 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 10 30 98 FC 8B 55 D0 81 F2 00 20 00 64 09 D0 85 C0 75 12
+
+. 0 804AD19 13
+. 83 EC 0C 68 1E 17 05 08 E8 EA D6 FF FF
+
+pandn_1 ... ok
+. 0 804AD26 5
+. 83 C4 10 EB 42
+
+. 0 804AD6D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A04 5
+. E8 6C A3 FF FF
+
+. 0 804AD75 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 28 D6 FF FF
+
+. 0 804ADC8 7
+. 83 C4 10 85 C0 75 74
+
+. 0 804ADCF 48
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F DF 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 10 30 98 FC 8B 55 D0 81 F2 00 20 00 64 09 D0 85 C0 75 12
+
+. 0 804ADFF 13
+. 83 EC 0C 68 56 17 05 08 E8 04 D6 FF FF
+
+pandn_2 ... ok
+. 0 804AE0C 5
+. 83 C4 10 EB 42
+
+. 0 804AE53 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A09 5
+. E8 4D A4 FF FF
+
+. 0 804AE5B 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0B C6 45 E1 16 C6 45 E2 21 C6 45 E3 2C C6 45 E4 37 C6 45 E5 42 C6 45 E6 4D C6 45 E7 58 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0B C6 45 D9 0B C6 45 DA 21 C6 45 DB 21 C6 45 DC 37 C6 45 DD 37 C6 45 DE 4D C6 45 DF 4D 83 EC 08 6A 01 68 E0 39 05 08 E8 1E D5 FF FF
+
+. 0 804AED2 11
+. 83 C4 10 85 C0 0F 85 57 01 00 00
+
+. 0 804AEDD 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 74 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 FF 75 3F
+
+. 0 804AEFF 6
+. 80 7D D1 00 75 39
+
+. 0 804AF05 6
+. 80 7D D2 FF 75 33
+
+. 0 804AF0B 6
+. 80 7D D3 00 75 2D
+
+. 0 804AF11 6
+. 80 7D D4 FF 75 27
+
+. 0 804AF17 6
+. 80 7D D5 00 75 21
+
+. 0 804AF1D 6
+. 80 7D D6 FF 75 1B
+
+. 0 804AF23 6
+. 80 7D D7 00 75 15
+
+. 0 804AF29 13
+. 83 EC 0C 68 8E 17 05 08 E8 DA D4 FF FF
+
+pcmpeqb_1 ... ok
+. 0 804AF36 8
+. 83 C4 10 E9 06 01 00 00
+
+. 0 804B044 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A0E 5
+. E8 39 A6 FF FF
+
+. 0 804B04C 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0B C6 45 E1 16 C6 45 E2 21 C6 45 E3 2C C6 45 E4 37 C6 45 E5 42 C6 45 E6 4D C6 45 E7 58 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0B C6 45 D9 0B C6 45 DA 21 C6 45 DB 21 C6 45 DC 37 C6 45 DD 37 C6 45 DE 4D C6 45 DF 4D 83 EC 08 6A 01 68 E0 39 05 08 E8 2D D3 FF FF
+
+. 0 804B0C3 11
+. 83 C4 10 85 C0 0F 85 54 01 00 00
+
+. 0 804B0CE 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 74 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 FF 75 3F
+
+. 0 804B0ED 6
+. 80 7D D1 00 75 39
+
+. 0 804B0F3 6
+. 80 7D D2 FF 75 33
+
+. 0 804B0F9 6
+. 80 7D D3 00 75 2D
+
+. 0 804B0FF 6
+. 80 7D D4 FF 75 27
+
+. 0 804B105 6
+. 80 7D D5 00 75 21
+
+. 0 804B10B 6
+. 80 7D D6 FF 75 1B
+
+. 0 804B111 6
+. 80 7D D7 00 75 15
+
+. 0 804B117 13
+. 83 EC 0C 68 CC 17 05 08 E8 EC D2 FF FF
+
+pcmpeqb_2 ... ok
+. 0 804B124 8
+. 83 C4 10 E9 06 01 00 00
+
+. 0 804B232 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A13 5
+. E8 22 A8 FF FF
+
+. 0 804B23A 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 30 41 AB 00 C7 45 E4 4C 6C 51 03 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 30 41 AB 00 C7 45 DC 30 41 AB 00 83 EC 08 6A 01 68 E0 39 05 08 E8 63 D1 FF FF
+
+. 0 804B28D 7
+. 83 C4 10 85 C0 75 76
+
+. 0 804B294 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 76 C8 0F 7F 4D D0 DD A5 58 FF FF FF 83 7D D0 FF 75 18
+
+. 0 804B2B6 6
+. 83 7D D4 00 75 12
+
+. 0 804B2BC 13
+. 83 EC 0C 68 0A 18 05 08 E8 47 D1 FF FF
+
+pcmpeqd_1 ... ok
+. 0 804B2C9 5
+. 83 C4 10 EB 4C
+
+. 0 804B31A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A18 5
+. E8 05 A9 FF FF
+
+. 0 804B322 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 30 41 AB 00 C7 45 E4 4C 6C 51 03 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 30 41 AB 00 C7 45 DC 30 41 AB 00 83 EC 08 6A 01 68 E0 39 05 08 E8 7B D0 FF FF
+
+. 0 804B375 7
+. 83 C4 10 85 C0 75 73
+
+. 0 804B37C 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 76 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 83 7D D0 FF 75 18
+
+. 0 804B39B 6
+. 83 7D D4 00 75 12
+
+. 0 804B3A1 13
+. 83 EC 0C 68 98 18 05 08 E8 62 D0 FF FF
+
+pcmpeqd_2 ... ok
+. 0 804B3AE 5
+. 83 C4 10 EB 4C
+
+. 0 804B3FF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A1D 5
+. E8 E5 A9 FF FF
+
+. 0 804B407 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 62 04 66 C7 45 E2 10 0D 66 C7 45 E4 BE 15 66 C7 45 E6 6C 1E C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 62 04 66 C7 45 DA 62 04 66 C7 45 DC BE 15 66 C7 45 DE BE 15 83 EC 08 6A 01 68 E0 39 05 08 E8 82 CF FF FF
+
+. 0 804B46E 11
+. 83 C4 10 85 C0 0F 85 D3 00 00 00
+
+. 0 804B479 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 75 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 FF 75 2A
+
+. 0 804B49C 7
+. 66 83 7D D2 00 75 23
+
+. 0 804B4A3 7
+. 66 83 7D D4 FF 75 1C
+
+. 0 804B4AA 7
+. 66 83 7D D6 00 75 15
+
+. 0 804B4B1 13
+. 83 EC 0C 68 D6 18 05 08 E8 52 CF FF FF
+
+pcmpeqw_1 ... ok
+. 0 804B4BE 8
+. 83 C4 10 E9 96 00 00 00
+
+. 0 804B55C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A22 5
+. E8 3D AB FF FF
+
+. 0 804B564 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 62 04 66 C7 45 E2 10 0D 66 C7 45 E4 BE 15 66 C7 45 E6 6C 1E C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 62 04 66 C7 45 DA 62 04 66 C7 45 DC BE 15 66 C7 45 DE BE 15 83 EC 08 6A 01 68 E0 39 05 08 E8 25 CE FF FF
+
+. 0 804B5CB 11
+. 83 C4 10 85 C0 0F 85 D0 00 00 00
+
+. 0 804B5D6 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 75 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 FF 75 2A
+
+. 0 804B5F6 7
+. 66 83 7D D2 00 75 23
+
+. 0 804B5FD 7
+. 66 83 7D D4 FF 75 1C
+
+. 0 804B604 7
+. 66 83 7D D6 00 75 15
+
+. 0 804B60B 13
+. 83 EC 0C 68 14 19 05 08 E8 F8 CD FF FF
+
+pcmpeqw_2 ... ok
+. 0 804B618 8
+. 83 C4 10 E9 96 00 00 00
+
+. 0 804B6B6 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A27 5
+. E8 92 AC FF FF
+
+. 0 804B6BE 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 B3 C6 45 E1 C9 C6 45 E2 DF C6 45 E3 F5 C6 45 E4 0B C6 45 E5 21 C6 45 E6 37 C6 45 E7 4D C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 4D C6 45 D9 37 C6 45 DA 21 C6 45 DB 0B C6 45 DC F5 C6 45 DD DF C6 45 DE C9 C6 45 DF B3 83 EC 08 6A 01 68 E0 39 05 08 E8 BB CC FF FF
+
+. 0 804B735 11
+. 83 C4 10 85 C0 0F 85 57 01 00 00
+
+. 0 804B740 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 64 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 FF 75 3F
+
+. 0 804B762 6
+. 80 7D D1 FF 75 39
+
+. 0 804B768 6
+. 80 7D D2 FF 75 33
+
+. 0 804B76E 6
+. 80 7D D3 FF 75 2D
+
+. 0 804B774 6
+. 80 7D D4 00 75 27
+
+. 0 804B77A 6
+. 80 7D D5 00 75 21
+
+. 0 804B780 6
+. 80 7D D6 00 75 1B
+
+. 0 804B786 6
+. 80 7D D7 00 75 15
+
+. 0 804B78C 13
+. 83 EC 0C 68 52 19 05 08 E8 77 CC FF FF
+
+pcmpgtb_1 ... ok
+. 0 804B799 8
+. 83 C4 10 E9 06 01 00 00
+
+. 0 804B8A7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A2C 5
+. E8 7E AE FF FF
+
+. 0 804B8AF 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 B3 C6 45 E1 C9 C6 45 E2 DF C6 45 E3 F5 C6 45 E4 0B C6 45 E5 21 C6 45 E6 37 C6 45 E7 4D C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 4D C6 45 D9 37 C6 45 DA 21 C6 45 DB 0B C6 45 DC F5 C6 45 DD DF C6 45 DE C9 C6 45 DF B3 83 EC 08 6A 01 68 E0 39 05 08 E8 CA CA FF FF
+
+. 0 804B926 11
+. 83 C4 10 85 C0 0F 85 54 01 00 00
+
+. 0 804B931 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 64 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 FF 75 3F
+
+. 0 804B950 6
+. 80 7D D1 FF 75 39
+
+. 0 804B956 6
+. 80 7D D2 FF 75 33
+
+. 0 804B95C 6
+. 80 7D D3 FF 75 2D
+
+. 0 804B962 6
+. 80 7D D4 00 75 27
+
+. 0 804B968 6
+. 80 7D D5 00 75 21
+
+. 0 804B96E 6
+. 80 7D D6 00 75 1B
+
+. 0 804B974 6
+. 80 7D D7 00 75 15
+
+. 0 804B97A 13
+. 83 EC 0C 68 90 19 05 08 E8 89 CA FF FF
+
+pcmpgtb_2 ... ok
+. 0 804B987 8
+. 83 C4 10 E9 06 01 00 00
+
+. 0 804BA95 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A31 5
+. E8 67 B0 FF FF
+
+. 0 804BA9D 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 39 75 56 FF C7 45 E4 C7 8A A9 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 C7 8A A9 00 C7 45 DC 39 75 56 FF 83 EC 08 6A 01 68 E0 39 05 08 E8 00 C9 FF FF
+
+. 0 804BAF0 7
+. 83 C4 10 85 C0 75 76
+
+. 0 804BAF7 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 66 C8 0F 7F 4D D0 DD A5 58 FF FF FF 83 7D D0 FF 75 18
+
+. 0 804BB19 6
+. 83 7D D4 00 75 12
+
+. 0 804BB1F 13
+. 83 EC 0C 68 CE 19 05 08 E8 E4 C8 FF FF
+
+pcmpgtd_1 ... ok
+. 0 804BB2C 5
+. 83 C4 10 EB 4C
+
+. 0 804BB7D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A36 5
+. E8 4A B1 FF FF
+
+. 0 804BB85 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 39 75 56 FF C7 45 E4 C7 8A A9 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 C7 8A A9 00 C7 45 DC 39 75 56 FF 83 EC 08 6A 01 68 E0 39 05 08 E8 18 C8 FF FF
+
+. 0 804BBD8 7
+. 83 C4 10 85 C0 75 73
+
+. 0 804BBDF 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 66 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 83 7D D0 FF 75 18
+
+. 0 804BBFE 6
+. 83 7D D4 00 75 12
+
+. 0 804BC04 13
+. 83 EC 0C 68 0C 1A 05 08 E8 FF C7 FF FF
+
+pcmpgtd_2 ... ok
+. 0 804BC11 5
+. 83 C4 10 EB 4C
+
+. 0 804BC62 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A3B 5
+. E8 2A B2 FF FF
+
+. 0 804BC6A 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 FB F2 66 C7 45 E2 A9 FB 66 C7 45 E4 57 04 66 C7 45 E6 05 0D C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 05 0D 66 C7 45 DA 57 04 66 C7 45 DC A9 FB 66 C7 45 DE FB F2 83 EC 08 6A 01 68 E0 39 05 08 E8 1F C7 FF FF
+
+. 0 804BCD1 11
+. 83 C4 10 85 C0 0F 85 D3 00 00 00
+
+. 0 804BCDC 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 65 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 FF 75 2A
+
+. 0 804BCFF 7
+. 66 83 7D D2 FF 75 23
+
+. 0 804BD06 7
+. 66 83 7D D4 00 75 1C
+
+. 0 804BD0D 7
+. 66 83 7D D6 00 75 15
+
+. 0 804BD14 13
+. 83 EC 0C 68 4A 1A 05 08 E8 EF C6 FF FF
+
+pcmpgtw_1 ... ok
+. 0 804BD21 8
+. 83 C4 10 E9 96 00 00 00
+
+. 0 804BDBF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A40 5
+. E8 82 B3 FF FF
+
+. 0 804BDC7 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 FB F2 66 C7 45 E2 A9 FB 66 C7 45 E4 57 04 66 C7 45 E6 05 0D C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 05 0D 66 C7 45 DA 57 04 66 C7 45 DC A9 FB 66 C7 45 DE FB F2 83 EC 08 6A 01 68 E0 39 05 08 E8 C2 C5 FF FF
+
+. 0 804BE2E 11
+. 83 C4 10 85 C0 0F 85 D0 00 00 00
+
+. 0 804BE39 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 65 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 FF 75 2A
+
+. 0 804BE59 7
+. 66 83 7D D2 FF 75 23
+
+. 0 804BE60 7
+. 66 83 7D D4 00 75 1C
+
+. 0 804BE67 7
+. 66 83 7D D6 00 75 15
+
+. 0 804BE6E 13
+. 83 EC 0C 68 88 1A 05 08 E8 95 C5 FF FF
+
+pcmpgtw_2 ... ok
+. 0 804BE7B 8
+. 83 C4 10 E9 96 00 00 00
+
+. 0 804BF19 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A45 5
+. E8 D7 B4 FF FF
+
+. 0 804BF21 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 1F EF 66 C7 45 E6 C3 DD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 57 04 66 C7 45 DA 52 F7 66 C7 45 DC 05 0D 66 C7 45 DE A4 EE 83 EC 08 6A 01 68 E0 39 05 08 E8 68 C4 FF FF
+
+. 0 804BF88 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804BF93 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F F5 C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 1A 68 54 FF 75 1B
+
+. 0 804BFB8 9
+. 81 7D D4 87 99 76 01 75 12
+
+. 0 804BFC1 13
+. 83 EC 0C 68 C6 1A 05 08 E8 42 C4 FF FF
+
+pmaddwd_1 ... ok
+. 0 804BFCE 5
+. 83 C4 10 EB 52
+
+. 0 804C025 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A4A 5
+. E8 DE B5 FF FF
+
+. 0 804C02D 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 1F EF 66 C7 45 E6 C3 DD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 57 04 66 C7 45 DA 52 F7 66 C7 45 DC 05 0D 66 C7 45 DE A4 EE 83 EC 08 6A 01 68 E0 39 05 08 E8 5C C3 FF FF
+
+. 0 804C094 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804C09B 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F F5 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 1A 68 54 FF 75 1B
+
+. 0 804C0BD 9
+. 81 7D D4 87 99 76 01 75 12
+
+. 0 804C0C6 13
+. 83 EC 0C 68 04 1B 05 08 E8 3D C3 FF FF
+
+pmaddwd_2 ... ok
+. 0 804C0D3 5
+. 83 C4 10 EB 52
+
+. 0 804C12A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A4F 5
+. E8 DE B6 FF FF
+
+. 0 804C132 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 04 66 C7 45 E2 AE 08 66 C7 45 E4 A9 FB 66 C7 45 E6 52 F7 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 05 0D 66 C7 45 DA A4 EE 66 C7 45 DC 05 0D 66 C7 45 DE A4 EE 83 EC 08 6A 01 68 E0 39 05 08 E8 57 C2 FF FF
+
+. 0 804C199 11
+. 83 C4 10 85 C0 0F 85 D8 00 00 00
+
+. 0 804C1A4 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E5 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 38 75 2C
+
+. 0 804C1C7 8
+. 66 81 7D D2 69 FF 75 24
+
+. 0 804C1CF 7
+. 66 83 7D D4 C7 75 1D
+
+. 0 804C1D6 8
+. 66 81 7D D6 96 00 75 15
+
+. 0 804C1DE 13
+. 83 EC 0C 68 42 1B 05 08 E8 25 C2 FF FF
+
+pmulhw_1 ... ok
+. 0 804C1EB 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804C28C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A54 5
+. E8 3B B8 FF FF
+
+. 0 804C294 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 04 66 C7 45 E2 AE 08 66 C7 45 E4 A9 FB 66 C7 45 E6 52 F7 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 05 0D 66 C7 45 DA A4 EE 66 C7 45 DC 05 0D 66 C7 45 DE A4 EE 83 EC 08 6A 01 68 E0 39 05 08 E8 F5 C0 FF FF
+
+. 0 804C2FB 11
+. 83 C4 10 85 C0 0F 85 D5 00 00 00
+
+. 0 804C306 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E5 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 38 75 2C
+
+. 0 804C326 8
+. 66 81 7D D2 69 FF 75 24
+
+. 0 804C32E 7
+. 66 83 7D D4 C7 75 1D
+
+. 0 804C335 8
+. 66 81 7D D6 96 00 75 15
+
+. 0 804C33D 13
+. 83 EC 0C 68 7D 1B 05 08 E8 C6 C0 FF FF
+
+pmulhw_2 ... ok
+. 0 804C34A 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804C3EB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A59 5
+. E8 95 B9 FF FF
+
+. 0 804C3F3 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 04 66 C7 45 E2 AE 08 66 C7 45 E4 A9 FB 66 C7 45 E6 52 F7 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 05 0D 66 C7 45 DA A4 EE 66 C7 45 DC 05 0D 66 C7 45 DE A4 EE 83 EC 08 6A 01 68 E0 39 05 08 E8 96 BF FF FF
+
+. 0 804C45A 11
+. 83 C4 10 85 C0 0F 85 DD 00 00 00
+
+. 0 804C465 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F D5 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 B3 80 75 2D
+
+. 0 804C489 8
+. 66 81 7D D2 78 53 75 25
+
+. 0 804C491 8
+. 66 81 7D D4 4D 7F 75 1D
+
+. 0 804C499 8
+. 66 81 7D D6 88 AC 75 15
+
+. 0 804C4A1 13
+. 83 EC 0C 68 B8 1B 05 08 E8 62 BF FF FF
+
+pmullw_1 ... ok
+. 0 804C4AE 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804C552 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A5E 5
+. E8 F7 BA FF FF
+
+. 0 804C55A 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 04 66 C7 45 E2 AE 08 66 C7 45 E4 A9 FB 66 C7 45 E6 52 F7 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 05 0D 66 C7 45 DA A4 EE 66 C7 45 DC 05 0D 66 C7 45 DE A4 EE 83 EC 08 6A 01 68 E0 39 05 08 E8 2F BE FF FF
+
+. 0 804C5C1 11
+. 83 C4 10 85 C0 0F 85 DA 00 00 00
+
+. 0 804C5CC 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F D5 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 B3 80 75 2D
+
+. 0 804C5ED 8
+. 66 81 7D D2 78 53 75 25
+
+. 0 804C5F5 8
+. 66 81 7D D4 4D 7F 75 1D
+
+. 0 804C5FD 8
+. 66 81 7D D6 88 AC 75 15
+
+. 0 804C605 13
+. 83 EC 0C 68 F3 1B 05 08 E8 FE BD FF FF
+
+pmullw_2 ... ok
+. 0 804C612 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804C6B6 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A63 5
+. E8 56 BC FF FF
+
+. 0 804C6BE 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 DF BC FF FF
+
+. 0 804C711 7
+. 83 C4 10 85 C0 75 77
+
+. 0 804C718 51
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F EB C8 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 77 75 BB FD 8B 55 D0 81 F2 EF ED AB ED 09 D0 85 C0 75 12
+
+. 0 804C74B 13
+. 83 EC 0C 68 2E 1C 05 08 E8 B8 BC FF FF
+
+por_1 ... ok
+. 0 804C758 5
+. 83 C4 10 EB 42
+
+. 0 804C79F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A68 5
+. E8 3A BD FF FF
+
+. 0 804C7A7 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 F6 BB FF FF
+
+. 0 804C7FA 7
+. 83 C4 10 85 C0 75 74
+
+. 0 804C801 48
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F EB 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 77 75 BB FD 8B 55 D0 81 F2 EF ED AB ED 09 D0 85 C0 75 12
+
+. 0 804C831 13
+. 83 EC 0C 68 60 1C 05 08 E8 D2 BB FF FF
+
+por_2 ... ok
+. 0 804C83E 5
+. 83 C4 10 EB 42
+
+. 0 804C885 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A6D 5
+. E8 1B BE FF FF
+
+. 0 804C88D 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 67 45 23 01 C7 45 E4 EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 2C BB FF FF
+
+. 0 804C8C4 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804C8CB 34
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 72 F0 04 0F 7F 45 D8 DD A5 68 FF FF FF 81 7D D8 70 56 34 12 75 1B
+
+. 0 804C8ED 9
+. 81 7D DC F0 DE BC 9A 75 12
+
+. 0 804C8F6 13
+. 83 EC 0C 68 92 1C 05 08 E8 0D BB FF FF
+
+pslld_1 ... ok
+. 0 804C903 5
+. 83 C4 10 EB 52
+
+. 0 804C95A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A72 5
+. E8 EB BE FF FF
+
+. 0 804C962 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 67 45 23 01 C7 45 DC EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 3B BA FF FF
+
+. 0 804C9B5 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804C9C0 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F F2 C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 70 56 34 12 75 1B
+
+. 0 804C9E5 9
+. 81 7D D4 F0 DE BC 9A 75 12
+
+. 0 804C9EE 13
+. 83 EC 0C 68 CA 1C 05 08 E8 15 BA FF FF
+
+pslld_2 ... ok
+. 0 804C9FB 5
+. 83 C4 10 EB 52
+
+. 0 804CA52 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A77 5
+. E8 DE BF FF FF
+
+. 0 804CA5A 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 67 45 23 01 C7 45 DC EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 43 B9 FF FF
+
+. 0 804CAAD 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804CAB4 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F F2 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 70 56 34 12 75 1B
+
+. 0 804CAD6 9
+. 81 7D D4 F0 DE BC 9A 75 12
+
+. 0 804CADF 13
+. 83 EC 0C 68 02 1D 05 08 E8 24 B9 FF FF
+
+pslld_3 ... ok
+. 0 804CAEC 5
+. 83 C4 10 EB 52
+
+. 0 804CB43 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A7C 5
+. E8 CA C0 FF FF
+
+. 0 804CB4B 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 6E B8 FF FF
+
+. 0 804CB82 7
+. 83 C4 10 85 C0 75 74
+
+. 0 804CB89 48
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 73 F0 04 0F 7F 45 D8 DD A5 68 FF FF FF 8B 45 DC 35 78 56 34 12 8B 55 D8 81 F2 F0 DE BC 9A 09 D0 85 C0 75 12
+
+. 0 804CBB9 13
+. 83 EC 0C 68 3A 1D 05 08 E8 4A B8 FF FF
+
+psllq_1 ... ok
+. 0 804CBC6 5
+. 83 C4 10 EB 42
+
+. 0 804CC0D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A81 5
+. E8 8F C1 FF FF
+
+. 0 804CC15 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 88 B7 FF FF
+
+. 0 804CC68 7
+. 83 C4 10 85 C0 75 77
+
+. 0 804CC6F 51
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F F3 C8 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 78 56 34 12 8B 55 D0 81 F2 F0 DE BC 9A 09 D0 85 C0 75 12
+
+. 0 804CCA2 13
+. 83 EC 0C 68 72 1D 05 08 E8 61 B7 FF FF
+
+psllq_2 ... ok
+. 0 804CCAF 5
+. 83 C4 10 EB 42
+
+. 0 804CCF6 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A86 5
+. E8 73 C2 FF FF
+
+. 0 804CCFE 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 9F B6 FF FF
+
+. 0 804CD51 7
+. 83 C4 10 85 C0 75 74
+
+. 0 804CD58 48
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F F3 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 78 56 34 12 8B 55 D0 81 F2 F0 DE BC 9A 09 D0 85 C0 75 12
+
+. 0 804CD88 13
+. 83 EC 0C 68 AA 1D 05 08 E8 7B B6 FF FF
+
+psllq_3 ... ok
+. 0 804CD95 5
+. 83 C4 10 EB 42
+
+. 0 804CDDC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A8B 5
+. E8 54 C3 FF FF
+
+. 0 804CDE4 65
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 23 01 66 C7 45 E2 67 45 66 C7 45 E4 AB 89 66 C7 45 E6 EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 CB B5 FF FF
+
+. 0 804CE25 11
+. 83 C4 10 85 C0 0F 85 DA 00 00 00
+
+. 0 804CE30 33
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 71 F0 04 0F 7F 45 D8 DD A5 68 FF FF FF 66 81 7D D8 30 12 75 2D
+
+. 0 804CE51 8
+. 66 81 7D DA 70 56 75 25
+
+. 0 804CE59 8
+. 66 81 7D DC B0 9A 75 1D
+
+. 0 804CE61 8
+. 66 81 7D DE F0 DE 75 15
+
+. 0 804CE69 13
+. 83 EC 0C 68 E2 1D 05 08 E8 9A B5 FF FF
+
+psllw_1 ... ok
+. 0 804CE76 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804CF1A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A90 5
+. E8 8D C4 FF FF
+
+. 0 804CF22 93
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 23 01 66 C7 45 DA 67 45 66 C7 45 DC AB 89 66 C7 45 DE EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 71 B4 FF FF
+
+. 0 804CF7F 11
+. 83 C4 10 85 C0 0F 85 DD 00 00 00
+
+. 0 804CF8A 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F F1 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 30 12 75 2D
+
+. 0 804CFAE 8
+. 66 81 7D D2 70 56 75 25
+
+. 0 804CFB6 8
+. 66 81 7D D4 B0 9A 75 1D
+
+. 0 804CFBE 8
+. 66 81 7D D6 F0 DE 75 15
+
+. 0 804CFC6 13
+. 83 EC 0C 68 1A 1E 05 08 E8 3D B4 FF FF
+
+psllw_2 ... ok
+. 0 804CFD3 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804D077 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A95 5
+. E8 E5 C5 FF FF
+
+. 0 804D07F 93
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 23 01 66 C7 45 DA 67 45 66 C7 45 DC AB 89 66 C7 45 DE EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 14 B3 FF FF
+
+. 0 804D0DC 11
+. 83 C4 10 85 C0 0F 85 DA 00 00 00
+
+. 0 804D0E7 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F F1 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 30 12 75 2D
+
+. 0 804D108 8
+. 66 81 7D D2 70 56 75 25
+
+. 0 804D110 8
+. 66 81 7D D4 B0 9A 75 1D
+
+. 0 804D118 8
+. 66 81 7D D6 F0 DE 75 15
+
+. 0 804D120 13
+. 83 EC 0C 68 52 1E 05 08 E8 E3 B2 FF FF
+
+psllw_3 ... ok
+. 0 804D12D 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804D1D1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A9A 5
+. E8 3A C7 FF FF
+
+. 0 804D1D9 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 67 45 23 01 C7 45 E4 EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 E0 B1 FF FF
+
+. 0 804D210 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804D217 34
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 72 E0 04 0F 7F 45 D8 DD A5 68 FF FF FF 81 7D D8 56 34 12 00 75 1B
+
+. 0 804D239 9
+. 81 7D DC DE BC 9A F8 75 12
+
+. 0 804D242 13
+. 83 EC 0C 68 8A 1E 05 08 E8 C1 B1 FF FF
+
+psrad_1 ... ok
+. 0 804D24F 5
+. 83 C4 10 EB 52
+
+. 0 804D2A6 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050A9F 5
+. E8 0A C8 FF FF
+
+. 0 804D2AE 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 67 45 23 01 C7 45 DC EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 EF B0 FF FF
+
+. 0 804D301 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804D30C 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E2 C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 56 34 12 00 75 1B
+
+. 0 804D331 9
+. 81 7D D4 DE BC 9A F8 75 12
+
+. 0 804D33A 13
+. 83 EC 0C 68 C2 1E 05 08 E8 C9 B0 FF FF
+
+psrad_2 ... ok
+. 0 804D347 5
+. 83 C4 10 EB 52
+
+. 0 804D39E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AA4 5
+. E8 FD C8 FF FF
+
+. 0 804D3A6 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 67 45 23 01 C7 45 DC EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 F7 AF FF FF
+
+. 0 804D3F9 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804D400 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E2 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 56 34 12 00 75 1B
+
+. 0 804D422 9
+. 81 7D D4 DE BC 9A F8 75 12
+
+. 0 804D42B 13
+. 83 EC 0C 68 FA 1E 05 08 E8 D8 AF FF FF
+
+psrad_3 ... ok
+. 0 804D438 5
+. 83 C4 10 EB 52
+
+. 0 804D48F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AA9 5
+. E8 E9 C9 FF FF
+
+. 0 804D497 65
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 23 01 66 C7 45 E2 67 45 66 C7 45 E4 AB 89 66 C7 45 E6 EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 18 AF FF FF
+
+. 0 804D4D8 11
+. 83 C4 10 85 C0 0F 85 D6 00 00 00
+
+. 0 804D4E3 32
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 71 E0 04 0F 7F 45 D8 DD A5 68 FF FF FF 66 83 7D D8 12 75 2D
+
+. 0 804D503 8
+. 66 81 7D DA 56 04 75 25
+
+. 0 804D50B 8
+. 66 81 7D DC 9A F8 75 1D
+
+. 0 804D513 8
+. 66 81 7D DE DE FC 75 15
+
+. 0 804D51B 13
+. 83 EC 0C 68 32 1F 05 08 E8 E8 AE FF FF
+
+psraw_1 ... ok
+. 0 804D528 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804D5C9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AAE 5
+. E8 1E CB FF FF
+
+. 0 804D5D1 93
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 23 01 66 C7 45 DA 67 45 66 C7 45 DC AB 89 66 C7 45 DE EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 C2 AD FF FF
+
+. 0 804D62E 11
+. 83 C4 10 85 C0 0F 85 D9 00 00 00
+
+. 0 804D639 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E1 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 12 75 2D
+
+. 0 804D65C 8
+. 66 81 7D D2 56 04 75 25
+
+. 0 804D664 8
+. 66 81 7D D4 9A F8 75 1D
+
+. 0 804D66C 8
+. 66 81 7D D6 DE FC 75 15
+
+. 0 804D674 13
+. 83 EC 0C 68 6A 1F 05 08 E8 8F AD FF FF
+
+psraw_2 ... ok
+. 0 804D681 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804D722 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AB3 5
+. E8 72 CC FF FF
+
+. 0 804D72A 93
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 23 01 66 C7 45 DA 67 45 66 C7 45 DC AB 89 66 C7 45 DE EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 69 AC FF FF
+
+. 0 804D787 11
+. 83 C4 10 85 C0 0F 85 D6 00 00 00
+
+. 0 804D792 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E1 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 12 75 2D
+
+. 0 804D7B2 8
+. 66 81 7D D2 56 04 75 25
+
+. 0 804D7BA 8
+. 66 81 7D D4 9A F8 75 1D
+
+. 0 804D7C2 8
+. 66 81 7D D6 DE FC 75 15
+
+. 0 804D7CA 13
+. 83 EC 0C 68 A2 1F 05 08 E8 39 AC FF FF
+
+psraw_3 ... ok
+. 0 804D7D7 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804D878 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AB8 5
+. E8 C3 CD FF FF
+
+. 0 804D880 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 67 45 23 01 C7 45 E4 EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 39 AB FF FF
+
+. 0 804D8B7 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804D8BE 34
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 72 D0 04 0F 7F 45 D8 DD A5 68 FF FF FF 81 7D D8 56 34 12 00 75 1B
+
+. 0 804D8E0 9
+. 81 7D DC DE BC 9A 08 75 12
+
+. 0 804D8E9 13
+. 83 EC 0C 68 DA 1F 05 08 E8 1A AB FF FF
+
+psrld_1 ... ok
+. 0 804D8F6 5
+. 83 C4 10 EB 52
+
+. 0 804D94D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050ABD 5
+. E8 93 CE FF FF
+
+. 0 804D955 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 67 45 23 01 C7 45 DC EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 48 AA FF FF
+
+. 0 804D9A8 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804D9B3 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F D2 C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 56 34 12 00 75 1B
+
+. 0 804D9D8 9
+. 81 7D D4 DE BC 9A 08 75 12
+
+. 0 804D9E1 13
+. 83 EC 0C 68 12 20 05 08 E8 22 AA FF FF
+
+psrld_2 ... ok
+. 0 804D9EE 5
+. 83 C4 10 EB 52
+
+. 0 804DA45 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AC2 5
+. E8 86 CF FF FF
+
+. 0 804DA4D 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 67 45 23 01 C7 45 DC EF CD AB 89 83 EC 08 6A 01 68 E0 39 05 08 E8 50 A9 FF FF
+
+. 0 804DAA0 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804DAA7 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F D2 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 56 34 12 00 75 1B
+
+. 0 804DAC9 9
+. 81 7D D4 DE BC 9A 08 75 12
+
+. 0 804DAD2 13
+. 83 EC 0C 68 4A 20 05 08 E8 31 A9 FF FF
+
+psrld_3 ... ok
+. 0 804DADF 5
+. 83 C4 10 EB 52
+
+. 0 804DB36 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AC7 5
+. E8 72 D0 FF FF
+
+. 0 804DB3E 55
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 7B A8 FF FF
+
+. 0 804DB75 7
+. 83 C4 10 85 C0 75 74
+
+. 0 804DB7C 48
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 73 D0 04 0F 7F 45 D8 DD A5 68 FF FF FF 8B 45 DC 35 56 34 12 00 8B 55 D8 81 F2 DE BC 9A 78 09 D0 85 C0 75 12
+
+. 0 804DBAC 13
+. 83 EC 0C 68 82 20 05 08 E8 57 A8 FF FF
+
+psrlq_1 ... ok
+. 0 804DBB9 5
+. 83 C4 10 EB 42
+
+. 0 804DC00 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050ACC 5
+. E8 37 D1 FF FF
+
+. 0 804DC08 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 95 A7 FF FF
+
+. 0 804DC5B 7
+. 83 C4 10 85 C0 75 77
+
+. 0 804DC62 51
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F D3 C8 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 56 34 12 00 8B 55 D0 81 F2 DE BC 9A 78 09 D0 85 C0 75 12
+
+. 0 804DC95 13
+. 83 EC 0C 68 BA 20 05 08 E8 6E A7 FF FF
+
+psrlq_2 ... ok
+. 0 804DCA2 5
+. 83 C4 10 EB 42
+
+. 0 804DCE9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AD1 5
+. E8 1B D2 FF FF
+
+. 0 804DCF1 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 AC A6 FF FF
+
+. 0 804DD44 7
+. 83 C4 10 85 C0 75 74
+
+. 0 804DD4B 48
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F D3 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 56 34 12 00 8B 55 D0 81 F2 DE BC 9A 78 09 D0 85 C0 75 12
+
+. 0 804DD7B 13
+. 83 EC 0C 68 F2 20 05 08 E8 88 A6 FF FF
+
+psrlq_3 ... ok
+. 0 804DD88 5
+. 83 C4 10 EB 42
+
+. 0 804DDCF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AD6 5
+. E8 FC D2 FF FF
+
+. 0 804DDD7 65
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 23 01 66 C7 45 E2 67 45 66 C7 45 E4 AB 89 66 C7 45 E6 EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 D8 A5 FF FF
+
+. 0 804DE18 11
+. 83 C4 10 85 C0 0F 85 D6 00 00 00
+
+. 0 804DE23 32
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 0F 71 D0 04 0F 7F 45 D8 DD A5 68 FF FF FF 66 83 7D D8 12 75 2D
+
+. 0 804DE43 8
+. 66 81 7D DA 56 04 75 25
+
+. 0 804DE4B 8
+. 66 81 7D DC 9A 08 75 1D
+
+. 0 804DE53 8
+. 66 81 7D DE DE 0C 75 15
+
+. 0 804DE5B 13
+. 83 EC 0C 68 2A 21 05 08 E8 A8 A5 FF FF
+
+psrlw_1 ... ok
+. 0 804DE68 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804DF09 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050ADB 5
+. E8 31 D4 FF FF
+
+. 0 804DF11 93
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 23 01 66 C7 45 DA 67 45 66 C7 45 DC AB 89 66 C7 45 DE EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 82 A4 FF FF
+
+. 0 804DF6E 11
+. 83 C4 10 85 C0 0F 85 D9 00 00 00
+
+. 0 804DF79 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F D1 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 12 75 2D
+
+. 0 804DF9C 8
+. 66 81 7D D2 56 04 75 25
+
+. 0 804DFA4 8
+. 66 81 7D D4 9A 08 75 1D
+
+. 0 804DFAC 8
+. 66 81 7D D6 DE 0C 75 15
+
+. 0 804DFB4 13
+. 83 EC 0C 68 62 21 05 08 E8 4F A4 FF FF
+
+psrlw_2 ... ok
+. 0 804DFC1 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804E062 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AE0 5
+. E8 85 D5 FF FF
+
+. 0 804E06A 93
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 04 00 00 00 C7 45 E4 00 00 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 23 01 66 C7 45 DA 67 45 66 C7 45 DC AB 89 66 C7 45 DE EF CD 83 EC 08 6A 01 68 E0 39 05 08 E8 29 A3 FF FF
+
+. 0 804E0C7 11
+. 83 C4 10 85 C0 0F 85 D6 00 00 00
+
+. 0 804E0D2 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F D1 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 12 75 2D
+
+. 0 804E0F2 8
+. 66 81 7D D2 56 04 75 25
+
+. 0 804E0FA 8
+. 66 81 7D D4 9A 08 75 1D
+
+. 0 804E102 8
+. 66 81 7D D6 DE 0C 75 15
+
+. 0 804E10A 13
+. 83 EC 0C 68 9A 21 05 08 E8 F9 A2 FF FF
+
+psrlw_3 ... ok
+. 0 804E117 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 804E1B8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AE5 5
+. E8 D6 D6 FF FF
+
+. 0 804E1C0 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 08 C6 45 E1 07 C6 45 E2 06 C6 45 E3 05 C6 45 E4 04 C6 45 E5 03 C6 45 E6 02 C6 45 E7 01 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0C C6 45 D9 22 C6 45 DA 38 C6 45 DB 4E C6 45 DC 15 C6 45 DD 2B C6 45 DE 41 C6 45 DF 57 83 EC 08 6A 01 68 E0 39 05 08 E8 B9 A1 FF FF
+
+. 0 804E237 11
+. 83 C4 10 85 C0 0F 85 2B 01 00 00
+
+. 0 804E242 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F F8 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 04 75 3F
+
+. 0 804E264 6
+. 80 7D D1 1B 75 39
+
+. 0 804E26A 6
+. 80 7D D2 32 75 33
+
+. 0 804E270 6
+. 80 7D D3 49 75 2D
+
+. 0 804E276 6
+. 80 7D D4 11 75 27
+
+. 0 804E27C 6
+. 80 7D D5 28 75 21
+
+. 0 804E282 6
+. 80 7D D6 3F 75 1B
+
+. 0 804E288 6
+. 80 7D D7 56 75 15
+
+. 0 804E28E 13
+. 83 EC 0C 68 D2 21 05 08 E8 75 A1 FF FF
+
+psubb_1 ... ok
+. 0 804E29B 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 804E37D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AEA 5
+. E8 96 D8 FF FF
+
+. 0 804E385 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 08 C6 45 E1 07 C6 45 E2 06 C6 45 E3 05 C6 45 E4 04 C6 45 E5 03 C6 45 E6 02 C6 45 E7 01 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0C C6 45 D9 22 C6 45 DA 38 C6 45 DB 4E C6 45 DC 15 C6 45 DD 2B C6 45 DE 41 C6 45 DF 57 83 EC 08 6A 01 68 E0 39 05 08 E8 F4 9F FF FF
+
+. 0 804E3FC 11
+. 83 C4 10 85 C0 0F 85 28 01 00 00
+
+. 0 804E407 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F F8 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 04 75 3F
+
+. 0 804E426 6
+. 80 7D D1 1B 75 39
+
+. 0 804E42C 6
+. 80 7D D2 32 75 33
+
+. 0 804E432 6
+. 80 7D D3 49 75 2D
+
+. 0 804E438 6
+. 80 7D D4 11 75 27
+
+. 0 804E43E 6
+. 80 7D D5 28 75 21
+
+. 0 804E444 6
+. 80 7D D6 3F 75 1B
+
+. 0 804E44A 6
+. 80 7D D7 56 75 15
+
+. 0 804E450 13
+. 83 EC 0C 68 0A 22 05 08 E8 B3 9F FF FF
+
+psubb_2 ... ok
+. 0 804E45D 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 804E53F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AEF 5
+. E8 53 DA FF FF
+
+. 0 804E547 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 3D 22 00 00 C7 45 E4 E1 10 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 4E 61 BC 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 E0 39 05 08 E8 56 9E FF FF
+
+. 0 804E59A 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804E5A5 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F FA C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 11 3F BC 00 75 1B
+
+. 0 804E5CA 9
+. 81 7D D4 D0 6E 39 05 75 12
+
+. 0 804E5D3 13
+. 83 EC 0C 68 42 22 05 08 E8 30 9E FF FF
+
+psubd_1 ... ok
+. 0 804E5E0 5
+. 83 C4 10 EB 52
+
+. 0 804E637 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AF4 5
+. E8 46 DB FF FF
+
+. 0 804E63F 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 3D 22 00 00 C7 45 E4 E1 10 00 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 4E 61 BC 00 C7 45 DC B1 7F 39 05 83 EC 08 6A 01 68 E0 39 05 08 E8 5E 9D FF FF
+
+. 0 804E692 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804E699 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F FA 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 11 3F BC 00 75 1B
+
+. 0 804E6BB 9
+. 81 7D D4 D0 6E 39 05 75 12
+
+. 0 804E6C4 13
+. 83 EC 0C 68 7A 22 05 08 E8 3F 9D FF FF
+
+psubd_2 ... ok
+. 0 804E6D1 5
+. 83 C4 10 EB 52
+
+. 0 804E728 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AF9 5
+. E8 32 DC FF FF
+
+. 0 804E730 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 CE C6 45 E1 32 C6 45 E2 D8 C6 45 E3 28 C6 45 E4 E2 C6 45 E5 1E C6 45 E6 EC C6 45 E7 14 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 19 C6 45 D9 E7 C6 45 DA 32 C6 45 DB CE C6 45 DC 64 C6 45 DD 9C C6 45 DE 7D C6 45 DF 83 83 EC 08 6A 01 68 E0 39 05 08 E8 49 9C FF FF
+
+. 0 804E7A7 11
+. 83 C4 10 85 C0 0F 85 2B 01 00 00
+
+. 0 804E7B2 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E8 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 4B 75 3F
+
+. 0 804E7D4 6
+. 80 7D D1 B5 75 39
+
+. 0 804E7DA 6
+. 80 7D D2 5A 75 33
+
+. 0 804E7E0 6
+. 80 7D D3 A6 75 2D
+
+. 0 804E7E6 6
+. 80 7D D4 7F 75 27
+
+. 0 804E7EC 6
+. 80 7D D5 80 75 21
+
+. 0 804E7F2 6
+. 80 7D D6 7F 75 1B
+
+. 0 804E7F8 6
+. 80 7D D7 80 75 15
+
+. 0 804E7FE 13
+. 83 EC 0C 68 B2 22 05 08 E8 05 9C FF FF
+
+psubsb_1 ... ok
+. 0 804E80B 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 804E8ED 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050AFE 5
+. E8 F2 DD FF FF
+
+. 0 804E8F5 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 CE C6 45 E1 32 C6 45 E2 D8 C6 45 E3 28 C6 45 E4 E2 C6 45 E5 1E C6 45 E6 EC C6 45 E7 14 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 19 C6 45 D9 E7 C6 45 DA 32 C6 45 DB CE C6 45 DC 64 C6 45 DD 9C C6 45 DE 7D C6 45 DF 83 83 EC 08 6A 01 68 E0 39 05 08 E8 84 9A FF FF
+
+. 0 804E96C 11
+. 83 C4 10 85 C0 0F 85 28 01 00 00
+
+. 0 804E977 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E8 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 4B 75 3F
+
+. 0 804E996 6
+. 80 7D D1 B5 75 39
+
+. 0 804E99C 6
+. 80 7D D2 5A 75 33
+
+. 0 804E9A2 6
+. 80 7D D3 A6 75 2D
+
+. 0 804E9A8 6
+. 80 7D D4 7F 75 27
+
+. 0 804E9AE 6
+. 80 7D D5 80 75 21
+
+. 0 804E9B4 6
+. 80 7D D6 7F 75 1B
+
+. 0 804E9BA 6
+. 80 7D D7 80 75 15
+
+. 0 804E9C0 13
+. 83 EC 0C 68 ED 22 05 08 E8 43 9A FF FF
+
+psubsb_2 ... ok
+. 0 804E9CD 8
+. 83 C4 10 E9 DA 00 00 00
+
+. 0 804EAAF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B03 5
+. E8 AF DF FF FF
+
+. 0 804EAB7 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 6F 82 66 C7 45 E2 91 7D 66 C7 45 E4 39 30 66 C7 45 E6 C7 CF C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 39 30 66 C7 45 DA C7 CF 66 C7 45 DC 91 7D 66 C7 45 DE 6F 82 83 EC 08 6A 01 68 E0 39 05 08 E8 D2 98 FF FF
+
+. 0 804EB1E 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 804EB29 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E9 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 FF 7F 75 2D
+
+. 0 804EB4D 8
+. 66 81 7D D2 00 80 75 25
+
+. 0 804EB55 8
+. 66 81 7D D4 58 4D 75 1D
+
+. 0 804EB5D 8
+. 66 81 7D D6 A8 B2 75 15
+
+. 0 804EB65 13
+. 83 EC 0C 68 28 23 05 08 E8 9E 98 FF FF
+
+psubsw_1 ... ok
+. 0 804EB72 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804EC04 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B08 5
+. E8 FF E0 FF FF
+
+. 0 804EC0C 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 6F 82 66 C7 45 E2 91 7D 66 C7 45 E4 39 30 66 C7 45 E6 C7 CF C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 39 30 66 C7 45 DA C7 CF 66 C7 45 DC 91 7D 66 C7 45 DE 6F 82 83 EC 08 6A 01 68 E0 39 05 08 E8 7D 97 FF FF
+
+. 0 804EC73 11
+. 83 C4 10 85 C0 0F 85 C8 00 00 00
+
+. 0 804EC7E 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E9 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 FF 7F 75 2D
+
+. 0 804EC9F 8
+. 66 81 7D D2 00 80 75 25
+
+. 0 804ECA7 8
+. 66 81 7D D4 58 4D 75 1D
+
+. 0 804ECAF 8
+. 66 81 7D D6 A8 B2 75 15
+
+. 0 804ECB7 13
+. 83 EC 0C 68 63 23 05 08 E8 4C 97 FF FF
+
+psubsw_2 ... ok
+. 0 804ECC4 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804ED56 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B0D 5
+. E8 4C E2 FF FF
+
+. 0 804ED5E 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0B C6 45 E1 16 C6 45 E2 21 C6 45 E3 2C C6 45 E4 37 C6 45 E5 42 C6 45 E6 4D C6 45 E7 58 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 58 C6 45 D9 4D C6 45 DA 42 C6 45 DB 37 C6 45 DC 2C C6 45 DD 21 C6 45 DE 16 C6 45 DF 0B 83 EC 08 6A 01 68 E0 39 05 08 E8 1B 96 FF FF
+
+. 0 804EDD5 11
+. 83 C4 10 85 C0 0F 85 4B 01 00 00
+
+. 0 804EDE0 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F D8 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 4D 75 3F
+
+. 0 804EE02 6
+. 80 7D D1 37 75 39
+
+. 0 804EE08 6
+. 80 7D D2 21 75 33
+
+. 0 804EE0E 6
+. 80 7D D3 0B 75 2D
+
+. 0 804EE14 6
+. 80 7D D4 00 75 27
+
+. 0 804EE1A 6
+. 80 7D D5 00 75 21
+
+. 0 804EE20 6
+. 80 7D D6 00 75 1B
+
+. 0 804EE26 6
+. 80 7D D7 00 75 15
+
+. 0 804EE2C 13
+. 83 EC 0C 68 9E 23 05 08 E8 D7 95 FF FF
+
+psubusb_1 ... ok
+. 0 804EE39 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 804EF3B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B12 5
+. E8 2C E4 FF FF
+
+. 0 804EF43 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0B C6 45 E1 16 C6 45 E2 21 C6 45 E3 2C C6 45 E4 37 C6 45 E5 42 C6 45 E6 4D C6 45 E7 58 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 58 C6 45 D9 4D C6 45 DA 42 C6 45 DB 37 C6 45 DC 2C C6 45 DD 21 C6 45 DE 16 C6 45 DF 0B 83 EC 08 6A 01 68 E0 39 05 08 E8 36 94 FF FF
+
+. 0 804EFBA 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 804EFC5 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F D8 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 4D 75 3F
+
+. 0 804EFE4 6
+. 80 7D D1 37 75 39
+
+. 0 804EFEA 6
+. 80 7D D2 21 75 33
+
+. 0 804EFF0 6
+. 80 7D D3 0B 75 2D
+
+. 0 804EFF6 6
+. 80 7D D4 00 75 27
+
+. 0 804EFFC 6
+. 80 7D D5 00 75 21
+
+. 0 804F002 6
+. 80 7D D6 00 75 1B
+
+. 0 804F008 6
+. 80 7D D7 00 75 15
+
+. 0 804F00E 13
+. 83 EC 0C 68 DC 23 05 08 E8 F5 93 FF FF
+
+psubusb_2 ... ok
+. 0 804F01B 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 804F11D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B17 5
+. E8 09 E6 FF FF
+
+. 0 804F125 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 62 04 66 C7 45 E2 10 0D 66 C7 45 E4 BE 15 66 C7 45 E6 6C 1E C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 AD 22 66 C7 45 DA FF 19 66 C7 45 DC 51 11 66 C7 45 DE A3 08 83 EC 08 6A 01 68 E0 39 05 08 E8 64 92 FF FF
+
+. 0 804F18C 11
+. 83 C4 10 85 C0 0F 85 D5 00 00 00
+
+. 0 804F197 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F D9 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 4B 1E 75 2B
+
+. 0 804F1BB 8
+. 66 81 7D D2 EF 0C 75 23
+
+. 0 804F1C3 7
+. 66 83 7D D4 00 75 1C
+
+. 0 804F1CA 7
+. 66 83 7D D6 00 75 15
+
+. 0 804F1D1 13
+. 83 EC 0C 68 1A 24 05 08 E8 32 92 FF FF
+
+psubusw_1 ... ok
+. 0 804F1DE 8
+. 83 C4 10 E9 96 00 00 00
+
+. 0 804F27C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B1C 5
+. E8 63 E7 FF FF
+
+. 0 804F284 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 62 04 66 C7 45 E2 10 0D 66 C7 45 E4 BE 15 66 C7 45 E6 6C 1E C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 AD 22 66 C7 45 DA FF 19 66 C7 45 DC 51 11 66 C7 45 DE A3 08 83 EC 08 6A 01 68 E0 39 05 08 E8 05 91 FF FF
+
+. 0 804F2EB 11
+. 83 C4 10 85 C0 0F 85 D2 00 00 00
+
+. 0 804F2F6 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F D9 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 4B 1E 75 2B
+
+. 0 804F317 8
+. 66 81 7D D2 EF 0C 75 23
+
+. 0 804F31F 7
+. 66 83 7D D4 00 75 1C
+
+. 0 804F326 7
+. 66 83 7D D6 00 75 15
+
+. 0 804F32D 13
+. 83 EC 0C 68 58 24 05 08 E8 D6 90 FF FF
+
+psubusw_2 ... ok
+. 0 804F33A 8
+. 83 C4 10 E9 96 00 00 00
+
+. 0 804F3D8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B21 5
+. E8 BA E8 FF FF
+
+. 0 804F3E0 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 00 66 C7 45 E2 41 00 66 C7 45 E4 2B 00 66 C7 45 E6 15 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 D2 04 66 C7 45 DA 2E 16 66 C7 45 DC E1 10 66 C7 45 DE 3D 22 83 EC 08 6A 01 68 E0 39 05 08 E8 A9 8F FF FF
+
+. 0 804F447 11
+. 83 C4 10 85 C0 0F 85 CB 00 00 00
+
+. 0 804F452 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F F9 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 7B 04 75 2D
+
+. 0 804F476 8
+. 66 81 7D D2 ED 15 75 25
+
+. 0 804F47E 8
+. 66 81 7D D4 B6 10 75 1D
+
+. 0 804F486 8
+. 66 81 7D D6 28 22 75 15
+
+. 0 804F48E 13
+. 83 EC 0C 68 96 24 05 08 E8 75 8F FF FF
+
+psubw_1 ... ok
+. 0 804F49B 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804F52D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B26 5
+. E8 0A EA FF FF
+
+. 0 804F535 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 00 66 C7 45 E2 41 00 66 C7 45 E4 2B 00 66 C7 45 E6 15 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 D2 04 66 C7 45 DA 2E 16 66 C7 45 DC E1 10 66 C7 45 DE 3D 22 83 EC 08 6A 01 68 E0 39 05 08 E8 54 8E FF FF
+
+. 0 804F59C 11
+. 83 C4 10 85 C0 0F 85 C8 00 00 00
+
+. 0 804F5A7 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F F9 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 7B 04 75 2D
+
+. 0 804F5C8 8
+. 66 81 7D D2 ED 15 75 25
+
+. 0 804F5D0 8
+. 66 81 7D D4 B6 10 75 1D
+
+. 0 804F5D8 8
+. 66 81 7D D6 28 22 75 15
+
+. 0 804F5E0 13
+. 83 EC 0C 68 CE 24 05 08 E8 23 8E FF FF
+
+psubw_2 ... ok
+. 0 804F5ED 8
+. 83 C4 10 E9 8A 00 00 00
+
+. 0 804F67F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B2B 5
+. E8 57 EB FF FF
+
+. 0 804F687 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0C C6 45 E1 22 C6 45 E2 38 C6 45 E3 4E C6 45 E4 15 C6 45 E5 2B C6 45 E6 41 C6 45 E7 57 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0B C6 45 D9 16 C6 45 DA 21 C6 45 DB 2C C6 45 DC 37 C6 45 DD 42 C6 45 DE 4D C6 45 DF 58 83 EC 08 6A 01 68 E0 39 05 08 E8 F2 8C FF FF
+
+. 0 804F6FE 11
+. 83 C4 10 85 C0 0F 85 4B 01 00 00
+
+. 0 804F709 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 68 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 37 75 3F
+
+. 0 804F72B 6
+. 80 7D D1 15 75 39
+
+. 0 804F731 6
+. 80 7D D2 42 75 33
+
+. 0 804F737 6
+. 80 7D D3 2B 75 2D
+
+. 0 804F73D 6
+. 80 7D D4 4D 75 27
+
+. 0 804F743 6
+. 80 7D D5 41 75 21
+
+. 0 804F749 6
+. 80 7D D6 58 75 1B
+
+. 0 804F74F 6
+. 80 7D D7 57 75 15
+
+. 0 804F755 13
+. 83 EC 0C 68 06 25 05 08 E8 AE 8C FF FF
+
+punpckhbw_1 ... ok
+. 0 804F762 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 804F864 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B30 5
+. E8 37 ED FF FF
+
+. 0 804F86C 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0C C6 45 E1 22 C6 45 E2 38 C6 45 E3 4E C6 45 E4 15 C6 45 E5 2B C6 45 E6 41 C6 45 E7 57 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0B C6 45 D9 16 C6 45 DA 21 C6 45 DB 2C C6 45 DC 37 C6 45 DD 42 C6 45 DE 4D C6 45 DF 58 83 EC 08 6A 01 68 E0 39 05 08 E8 0D 8B FF FF
+
+. 0 804F8E3 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 804F8EE 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 68 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 37 75 3F
+
+. 0 804F90D 6
+. 80 7D D1 15 75 39
+
+. 0 804F913 6
+. 80 7D D2 42 75 33
+
+. 0 804F919 6
+. 80 7D D3 2B 75 2D
+
+. 0 804F91F 6
+. 80 7D D4 4D 75 27
+
+. 0 804F925 6
+. 80 7D D5 41 75 21
+
+. 0 804F92B 6
+. 80 7D D6 58 75 1B
+
+. 0 804F931 6
+. 80 7D D7 57 75 15
+
+. 0 804F937 13
+. 83 EC 0C 68 4A 25 05 08 E8 CC 8A FF FF
+
+punpckhbw_2 ... ok
+. 0 804F944 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 804FA46 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B35 5
+. E8 14 EF FF FF
+
+. 0 804FA4E 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4E 61 BC 00 C7 45 E4 AB 18 47 01 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 30 41 AB 00 C7 45 DC 4C 6C 51 03 83 EC 08 6A 01 68 E0 39 05 08 E8 4F 89 FF FF
+
+. 0 804FAA1 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804FAAC 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 6A C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 4C 6C 51 03 75 1B
+
+. 0 804FAD1 9
+. 81 7D D4 AB 18 47 01 75 12
+
+. 0 804FADA 13
+. 83 EC 0C 68 8E 25 05 08 E8 29 89 FF FF
+
+punpckhdq_1 ... ok
+. 0 804FAE7 5
+. 83 C4 10 EB 52
+
+. 0 804FB3E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B3A 5
+. E8 07 F0 FF FF
+
+. 0 804FB46 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4E 61 BC 00 C7 45 E4 AB 18 47 01 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 30 41 AB 00 C7 45 DC 4C 6C 51 03 83 EC 08 6A 01 68 E0 39 05 08 E8 57 88 FF FF
+
+. 0 804FB99 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 804FBA0 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 6A 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 4C 6C 51 03 75 1B
+
+. 0 804FBC2 9
+. 81 7D D4 AB 18 47 01 75 12
+
+. 0 804FBCB 13
+. 83 EC 0C 68 D2 25 05 08 E8 38 88 FF FF
+
+punpckhdq_2 ... ok
+. 0 804FBD8 5
+. 83 C4 10 EB 52
+
+. 0 804FC2F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B3F 5
+. E8 F3 F0 FF FF
+
+. 0 804FC37 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 5F 08 66 C7 45 E6 BB 19 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 62 04 66 C7 45 DA 10 0D 66 C7 45 DC BE 15 66 C7 45 DE 6C 1E 83 EC 08 6A 01 68 E0 39 05 08 E8 52 87 FF FF
+
+. 0 804FC9E 11
+. 83 C4 10 85 C0 0F 85 DD 00 00 00
+
+. 0 804FCA9 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 69 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 BE 15 75 2D
+
+. 0 804FCCD 8
+. 66 81 7D D2 5F 08 75 25
+
+. 0 804FCD5 8
+. 66 81 7D D4 6C 1E 75 1D
+
+. 0 804FCDD 8
+. 66 81 7D D6 BB 19 75 15
+
+. 0 804FCE5 13
+. 83 EC 0C 68 16 26 05 08 E8 1E 87 FF FF
+
+punpckhwd_1 ... ok
+. 0 804FCF2 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804FD96 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B44 5
+. E8 55 F2 FF FF
+
+. 0 804FD9E 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 5F 08 66 C7 45 E6 BB 19 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 62 04 66 C7 45 DA 10 0D 66 C7 45 DC BE 15 66 C7 45 DE 6C 1E 83 EC 08 6A 01 68 E0 39 05 08 E8 EB 85 FF FF
+
+. 0 804FE05 11
+. 83 C4 10 85 C0 0F 85 DA 00 00 00
+
+. 0 804FE10 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 69 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 BE 15 75 2D
+
+. 0 804FE31 8
+. 66 81 7D D2 5F 08 75 25
+
+. 0 804FE39 8
+. 66 81 7D D4 6C 1E 75 1D
+
+. 0 804FE41 8
+. 66 81 7D D6 BB 19 75 15
+
+. 0 804FE49 13
+. 83 EC 0C 68 5A 26 05 08 E8 BA 85 FF FF
+
+punpckhwd_2 ... ok
+. 0 804FE56 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 804FEFA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B49 5
+. E8 B4 F3 FF FF
+
+. 0 804FF02 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0C C6 45 E1 22 C6 45 E2 38 C6 45 E3 4E C6 45 E4 15 C6 45 E5 2B C6 45 E6 41 C6 45 E7 57 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0B C6 45 D9 16 C6 45 DA 21 C6 45 DB 2C C6 45 DC 37 C6 45 DD 42 C6 45 DE 4D C6 45 DF 58 83 EC 08 6A 01 68 E0 39 05 08 E8 77 84 FF FF
+
+. 0 804FF79 11
+. 83 C4 10 85 C0 0F 85 4B 01 00 00
+
+. 0 804FF84 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 60 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 0B 75 3F
+
+. 0 804FFA6 6
+. 80 7D D1 0C 75 39
+
+. 0 804FFAC 6
+. 80 7D D2 16 75 33
+
+. 0 804FFB2 6
+. 80 7D D3 22 75 2D
+
+. 0 804FFB8 6
+. 80 7D D4 21 75 27
+
+. 0 804FFBE 6
+. 80 7D D5 38 75 21
+
+. 0 804FFC4 6
+. 80 7D D6 2C 75 1B
+
+. 0 804FFCA 6
+. 80 7D D7 4E 75 15
+
+. 0 804FFD0 13
+. 83 EC 0C 68 9E 26 05 08 E8 33 84 FF FF
+
+punpcklbw_1 ... ok
+. 0 804FFDD 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 80500DF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B4E 5
+. E8 94 F5 FF FF
+
+. 0 80500E7 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0C C6 45 E1 22 C6 45 E2 38 C6 45 E3 4E C6 45 E4 15 C6 45 E5 2B C6 45 E6 41 C6 45 E7 57 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0B C6 45 D9 16 C6 45 DA 21 C6 45 DB 2C C6 45 DC 37 C6 45 DD 42 C6 45 DE 4D C6 45 DF 58 83 EC 08 6A 01 68 E0 39 05 08 E8 92 82 FF FF
+
+. 0 805015E 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 8050169 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 60 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 0B 75 3F
+
+. 0 8050188 6
+. 80 7D D1 0C 75 39
+
+. 0 805018E 6
+. 80 7D D2 16 75 33
+
+. 0 8050194 6
+. 80 7D D3 22 75 2D
+
+. 0 805019A 6
+. 80 7D D4 21 75 27
+
+. 0 80501A0 6
+. 80 7D D5 38 75 21
+
+. 0 80501A6 6
+. 80 7D D6 2C 75 1B
+
+. 0 80501AC 6
+. 80 7D D7 4E 75 15
+
+. 0 80501B2 13
+. 83 EC 0C 68 E2 26 05 08 E8 51 82 FF FF
+
+punpcklbw_2 ... ok
+. 0 80501BF 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 80502C1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B53 5
+. E8 71 F7 FF FF
+
+. 0 80502C9 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4E 61 BC 00 C7 45 E4 AB 18 47 01 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 30 41 AB 00 C7 45 DC 4C 6C 51 03 83 EC 08 6A 01 68 E0 39 05 08 E8 D4 80 FF FF
+
+. 0 805031C 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8050327 37
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 62 C8 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 30 41 AB 00 75 1B
+
+. 0 805034C 9
+. 81 7D D4 4E 61 BC 00 75 12
+
+. 0 8050355 13
+. 83 EC 0C 68 26 27 05 08 E8 AE 80 FF FF
+
+punpckldq_1 ... ok
+. 0 8050362 5
+. 83 C4 10 EB 52
+
+. 0 80503B9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B58 5
+. E8 64 F8 FF FF
+
+. 0 80503C1 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 4E 61 BC 00 C7 45 E4 AB 18 47 01 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 30 41 AB 00 C7 45 DC 4C 6C 51 03 83 EC 08 6A 01 68 E0 39 05 08 E8 DC 7F FF FF
+
+. 0 8050414 7
+. 83 C4 10 85 C0 75 7F
+
+. 0 805041B 34
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 62 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 81 7D D0 30 41 AB 00 75 1B
+
+. 0 805043D 9
+. 81 7D D4 4E 61 BC 00 75 12
+
+. 0 8050446 13
+. 83 EC 0C 68 6A 27 05 08 E8 BD 7F FF FF
+
+punpckldq_2 ... ok
+. 0 8050453 5
+. 83 C4 10 EB 52
+
+. 0 80504AA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B5D 5
+. E8 50 F9 FF FF
+
+. 0 80504B2 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 5F 08 66 C7 45 E6 BB 19 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 62 04 66 C7 45 DA 10 0D 66 C7 45 DC BE 15 66 C7 45 DE 6C 1E 83 EC 08 6A 01 68 E0 39 05 08 E8 D7 7E FF FF
+
+. 0 8050519 11
+. 83 C4 10 85 C0 0F 85 DD 00 00 00
+
+. 0 8050524 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 61 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 62 04 75 2D
+
+. 0 8050548 8
+. 66 81 7D D2 D2 04 75 25
+
+. 0 8050550 8
+. 66 81 7D D4 10 0D 75 1D
+
+. 0 8050558 8
+. 66 81 7D D6 2E 16 75 15
+
+. 0 8050560 13
+. 83 EC 0C 68 AE 27 05 08 E8 A3 7E FF FF
+
+punpcklwd_1 ... ok
+. 0 805056D 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 8050611 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B62 5
+. E8 B2 FA FF FF
+
+. 0 8050619 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 5F 08 66 C7 45 E6 BB 19 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 62 04 66 C7 45 DA 10 0D 66 C7 45 DC BE 15 66 C7 45 DE 6C 1E 83 EC 08 6A 01 68 E0 39 05 08 E8 70 7D FF FF
+
+. 0 8050680 11
+. 83 C4 10 85 C0 0F 85 DA 00 00 00
+
+. 0 805068B 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 61 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 62 04 75 2D
+
+. 0 80506AC 8
+. 66 81 7D D2 D2 04 75 25
+
+. 0 80506B4 8
+. 66 81 7D D4 10 0D 75 1D
+
+. 0 80506BC 8
+. 66 81 7D D6 2E 16 75 15
+
+. 0 80506C4 13
+. 83 EC 0C 68 F2 27 05 08 E8 3F 7D FF FF
+
+punpcklwd_2 ... ok
+. 0 80506D1 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 8050775 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B67 5
+. E8 11 FC FF FF
+
+. 0 805077D 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 20 7C FF FF
+
+. 0 80507D0 7
+. 83 C4 10 85 C0 75 77
+
+. 0 80507D7 51
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F EF C8 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 56 30 9A FC 8B 55 D0 81 F2 CF A9 03 65 09 D0 85 C0 75 12
+
+. 0 805080A 13
+. 83 EC 0C 68 36 28 05 08 E8 F9 7B FF FF
+
+pxor_1 ... ok
+. 0 8050817 5
+. 83 C4 10 EB 42
+
+. 0 805085E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B6C 5
+. E8 F5 FC FF FF
+
+. 0 8050866 83
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 20 64 A8 EC C7 45 E4 31 75 B9 FD C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 EF CD AB 89 C7 45 DC 67 45 23 01 83 EC 08 6A 01 68 E0 39 05 08 E8 37 7B FF FF
+
+. 0 80508B9 7
+. 83 C4 10 85 C0 75 74
+
+. 0 80508C0 48
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F EF 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 8B 45 D4 35 56 30 9A FC 8B 55 D0 81 F2 CF A9 03 65 09 D0 85 C0 75 12
+
+. 0 80508F0 13
+. 83 EC 0C 68 6B 28 05 08 E8 13 7B FF FF
+
+pxor_2 ... ok
+. 0 80508FD 5
+. 83 C4 10 EB 42
+
+. 0 8050944 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8050B71 10
+. 83 EC 0C 6A 00 E8 A5 78 FF FF
+
+. 0 8048420 6
+. FF 25 B8 39 05 08
+
+. 0 8048426 10
+. 68 30 00 00 00 E9 80 FF FF FF
+
+. 0 3A9EBC50 14
+. 55 89 E5 57 56 53 83 EC 0C E8 33 AB FE FF
+
+. 0 3A9EBC5E 16
+. 81 C3 B2 73 0E 00 8B 93 BC 9E FF FF 85 D2 74 6F
+
+. 0 3A9EBC6E 9
+. 89 F6 8B 42 04 85 C0 74 46
+
+. 0 3A9EBC77 30
+. 89 F6 8D BC 27 00 00 00 00 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21
+
+. 0 3A9EBC95 11
+. 8B 84 83 88 D7 FE FF 01 D8 FF E0
+
+. 0 3A9EBCA0 16
+. 8B 45 08 89 44 24 04 8B 41 08 89 04 24 FF 51 04
+
+. 0 8050B80 14
+. 55 89 E5 83 EC 18 89 5D F4 E8 BA 00 00 00
+
+. 0 8050B8E 36
+. 81 C3 06 2E 00 00 89 7D FC 8D 83 0C FF FF FF 8D BB 0C FF FF FF 89 75 F8 29 F8 C1 F8 02 85 C0 8D 70 FF 75 12
+
+. 0 8050BB2 5
+. E8 BD 00 00 00
+
+. 0 8050C74 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 17 2D 00 00 50 E8 F6 77 FF FF
+
+. 0 8048480 14
+. 55 89 E5 50 50 80 3D C0 39 05 08 00 75 2E
+
+. 0 804848E 11
+. A1 A8 38 05 08 8B 10 85 D2 74 1C
+
+. 0 80484B5 9
+. C6 05 C0 39 05 08 01 C9 C3
+
+. 0 8050C8A 4
+. 59 5B C9 C3
+
+. 0 8050BB7 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9EBCB0 13
+. 8B 93 BC 9E FF FF 8B 42 04 85 C0 75 C3
+
+. 0 3A9EBC80 21
+. 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21
+
+. 0 3A970800 14
+. 55 89 E5 57 56 53 83 EC 4C E8 1D 51 00 00
+
+. 0 3A97080E 24
+. 81 C3 5E 7E 00 00 8D 83 AC F9 FF FF 89 45 CC 89 04 24 FF 93 F8 FB FF FF
+
+. 0 3A9658E0 11
+. 55 89 E5 8B 45 08 FF 40 04 5D C3
+
+. 0 3A970826 44
+. C7 45 F0 00 00 00 00 8B 93 98 F9 FF FF 8B BB 94 F9 FF FF 8D 04 95 0F 00 00 00 83 E0 F0 29 C4 8D 4C 24 0C 85 FF 89 F8 89 4D EC 74 20
+
+. 0 3A970852 26
+. FF 87 70 01 00 00 8B 75 F0 8B 55 EC 89 3C B2 8B 7F 0C 46 89 75 F0 85 FF 75 E6
+
+. 0 3A97086C 17
+. 8B 93 98 F9 FF FF 8B 78 0C 85 FF 0F 84 11 01 00 00
+
+. 0 3A97087D 15
+. C7 45 E8 01 00 00 00 8B 4D EC 39 79 04 74 12
+
+. 0 3A97089E 15
+. 8B 4D E8 41 39 D1 89 4D BC 0F 83 D6 00 00 00
+
+. 0 3A9708AD 22
+. 8B 45 E8 8B 75 EC C1 E0 02 89 45 C0 01 C6 F7 5D C0 89 75 C4 EB 0D
+
+. 0 3A9708D0 25
+. 8B 55 EC 8B 45 BC 8B 04 82 89 45 D0 8B 90 D4 01 00 00 89 45 E4 85 D2 74 77
+
+. 0 3A9708E9 6
+. 8B 02 85 C0 74 71
+
+. 0 3A9708EF 28
+. 8B 75 EC 8B 4D E8 8D 0C 8E 89 4D C8 8B 75 BC 8B 4D C0 8D 34 B1 89 75 B8 39 F8 74 12
+
+. 0 3A97090B 14
+. 90 8D 74 26 00 83 C2 04 8B 02 85 C0 74 47
+
+. 0 3A970919 4
+. 39 F8 75 F3
+
+. 0 3A970910 9
+. 83 C2 04 8B 02 85 C0 74 47
+
+. 0 3A970960 17
+. 8B 55 D0 8B 8A E0 01 00 00 85 C9 0F 85 06 01 00 00
+
+. 0 3A970971 18
+. FF 45 BC 8B 93 98 F9 FF FF 39 55 BC 0F 82 4D FF FF FF
+
+. 0 3A970983 11
+. 8B 7F 0C 85 FF 0F 85 EF FE FF FF
+
+. 0 3A97088C 18
+. 8D 74 26 00 FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2
+
+. 0 3A970890 14
+. FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2
+
+. 0 3A97098E 12
+. C7 45 F0 00 00 00 00 39 55 F0 72 2C
+
+. 0 3A9709C6 21
+. 8B 55 F0 8B 4D EC 8B 3C 91 0F B6 97 74 01 00 00 F6 C2 08 74 48
+
+. 0 3A9709DB 2
+. EB C2
+
+. 0 3A97099F 20
+. 88 D0 24 F7 88 87 74 01 00 00 8B 47 04 0F B6 08 84 C9 75 2A
+
+. 0 3A9709B3 5
+. F6 C2 03 75 25
+
+. 0 3A9709B8 14
+. FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 73 71
+
+. 0 3A9709DD 10
+. 8B 97 80 00 00 00 85 D2 75 07
+
+. 0 3A9709E7 7
+. 8B 77 4C 85 F6 74 CA
+
+. 0 3A9709EE 13
+. F6 83 14 FC FF FF 02 0F 85 E1 00 00 00
+
+. 0 3A9709FB 4
+. 85 D2 74 1D
+
+. 0 3A970A1C 7
+. 8B 47 4C 85 C0 75 49
+
+. 0 3A970A6C 9
+. 8B 40 04 8B 17 01 D0 FF D0
+
+. 0 3A97CCD4 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 03 11 00 00 50 E8 06 FD FF FF
+
+. 0 3A97C9F0 26
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 E7 13 00 00 51 80 BB 30 00 00 00 00 75 34
+
+. 0 3A97CA0A 10
+. 8B 93 24 00 00 00 85 D2 75 2F
+
+. 0 3A97CA43 15
+. 83 EC 0C 8B 83 10 FF FF FF 50 E8 32 FF FF FF
+
+. 0 3A97C984 6
+. FF A3 18 00 00 00
+
+. 0 3A97C98A 10
+. 68 18 00 00 00 E9 B0 FF FF FF
+
+. 0 3A97C944 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A9EBEA0 14
+. 55 89 E5 57 56 53 83 EC 0C E8 E3 A8 FE FF
+
+. 0 3A9EBEAE 16
+. 81 C3 62 71 0E 00 8B BB BC 9E FF FF 85 FF 74 68
+
+. 0 3A9EBEBE 22
+. 89 F6 8B 47 04 C1 E0 04 8D 74 38 F8 8D 47 08 39 C6 89 45 F0 73 12
+
+. 0 3A9EBED4 2
+. EB 4A
+
+. 0 3A9EBF20 6
+. 8B 3F 85 FF 75 9A
+
+. 0 3A9EBF26 7
+. 8B 45 08 85 C0 74 0B
+
+. 0 3A9EBF2D 11
+. 8B 45 08 89 04 24 E8 98 B9 09 00
+
+. 0 3AA878D0 17
+. 55 89 E5 57 56 53 83 EC 10 8B 7D 08 E8 B0 EE F4 FF
+
+. 0 3AA878E1 20
+. 81 C3 2F B7 04 00 8B 93 3C 2B 00 00 85 D2 0F 85 DD 00 00 00
+
+. 0 3AA878F5 22
+. 8B 8B 8C 9B FF FF 8D 83 88 9B FF FF 89 45 F0 39 C1 8B 71 04 75 0F
+
+. 0 3AA8790B 2
+. EB 33
+
+. 0 3AA87940 22
+. 8B 8B 94 9B FF FF 8D 83 90 9B FF FF 89 45 EC 39 C1 8B 71 04 75 0C
+
+. 0 3AA87956 2
+. EB 28
+
+. 0 3AA87980 22
+. 8B 8B 9C 9B FF FF 8D 83 98 9B FF FF 89 45 E8 39 C1 8B 71 04 75 0C
+
+. 0 3AA87996 2
+. EB 28
+
+. 0 3AA879C0 10
+. 8B 93 44 2B 00 00 85 D2 75 18
+
+. 0 3AA879CA 8
+. 83 C4 10 5B 5E 5F 5D C3
+
+. 0 3A9EBF38 8
+. 83 C4 0C 5B 5E 5F 5D C3
+
+. 0 3A97CA52 5
+. 83 C4 10 EB BD
+
+. 0 3A97CA14 12
+. 8B 83 14 FF FF FF 8B 10 85 D2 74 17
+
+. 0 3A97CA37 12
+. C6 83 30 00 00 00 01 8B 5D FC C9 C3
+
+. 0 3A97CCEA 4
+. 59 5B C9 C3
+
+. 0 3A970A75 2
+. EB AC
+
+. 0 3A970A23 20
+. FF 8F 70 01 00 00 FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 72 8F
+
+. 0 3A9BA054 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 E7 57 00 00 50 E8 36 75 FE FF
+
+. 0 3A9A15A0 26
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 9B E2 01 00 51 80 BB 5C 00 00 00 00 75 34
+
+. 0 3A9A15BA 10
+. 8B 93 44 00 00 00 85 D2 75 2F
+
+. 0 3A9A15F3 15
+. 83 EC 0C 8B 83 DC FE FF FF 50 E8 22 FF FF FF
+
+. 0 3A9A1524 6
+. FF A3 28 00 00 00
+
+. 0 3A9A152A 10
+. 68 38 00 00 00 E9 70 FF FF FF
+
+. 0 3A9A14A4 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A9A1602 5
+. 83 C4 10 EB BD
+
+. 0 3A9A15C4 12
+. 8B 83 E0 FE FF FF 8B 10 85 D2 74 17
+
+. 0 3A9A15E7 12
+. C6 83 5C 00 00 00 01 8B 5D FC C9 C3
+
+. 0 3A9BA06A 4
+. 59 5B C9 C3
+
+. 0 3A9709FF 29
+. 8B 52 04 8B 37 8B 87 88 00 00 00 01 F2 89 55 D4 8B 70 04 C1 EE 02 89 F0 4E 85 C0 75 3C
+
+. 0 3A970A58 6
+. 8B 45 D4 FF 14 B0
+
+. 0 3A9D6760 10
+. 55 89 E5 56 53 E8 27 00 00 00
+
+. 0 3A9D676A 18
+. 81 C3 A6 C8 0F 00 8D B3 A0 FF FF FF 8B 06 85 C0 75 04
+
+. 0 3A9D677C 4
+. 5B 5E 5D C3
+
+. 0 3A970A5E 7
+. 89 F0 4E 85 C0 75 F3
+
+. 0 3A970A65 7
+. 8B 47 4C 85 C0 74 B7
+
+. 0 3A970A37 12
+. 8B 4D CC 89 0C 24 FF 93 FC FB FF FF
+
+. 0 3A9658F0 11
+. 55 89 E5 8B 45 08 FF 48 04 5D C3
+
+. 0 3A970A43 13
+. 80 BB 14 FC FF FF 00 0F 88 B5 00 00 00
+
+. 0 3A970A50 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A9EBCBD 14
+. 89 D0 8B 12 85 D2 89 93 BC 9E FF FF 74 12
+
+. 0 3A9EBCDD 16
+. 8D B3 F4 FF FF FF 8D BB F8 FF FF FF 39 FE 72 0B
+
+. 0 3A9EBCF8 2
+. FF 16
+
+. 0 3AA20870 14
+. 55 89 E5 57 56 53 83 EC 0C E8 13 5F FB FF
+
+. 0 3AA2087E 18
+. 81 C3 92 27 0B 00 C7 04 24 00 00 00 00 E8 D0 FD FF FF
+
+. 0 3AA20660 16
+. 55 31 C0 89 E5 57 56 53 83 EC 24 E8 21 61 FB FF
+
+. 0 3AA20670 34
+. 81 C3 A0 29 0B 00 83 BB B8 01 00 00 00 C7 45 E0 00 00 00 00 0F 95 C0 85 C0 89 45 DC 0F 85 7B 01 00 00
+
+. 0 3AA20692 7
+. 8B 75 08 85 F6 74 0E
+
+. 0 3AA206A7 16
+. 8B B3 70 95 FF FF 8B BB B0 09 00 00 85 F6 75 19
+
+. 0 3AA206D0 13
+. 89 B3 B4 09 00 00 8B 4D 08 85 C9 74 11
+
+. 0 3AA206EE 11
+. 8B 56 5C 85 D2 0F 8E C7 00 00 00
+
+. 0 3AA207C0 12
+. 8B 46 10 39 46 14 0F 86 2D FF FF FF
+
+. 0 3AA206F9 8
+. 0F B6 4E 46 84 C9 75 2F
+
+. 0 3AA20701 4
+. 85 D2 7E 2B
+
+. 0 3AA20730 7
+. 8B 45 08 85 C0 74 11
+
+. 0 3AA20748 22
+. 31 C0 89 83 B4 09 00 00 8B 83 B0 09 00 00 39 C7 0F 85 62 FF FF FF
+
+. 0 3AA2075E 11
+. 8B 76 34 85 F6 0F 85 67 FF FF FF
+
+. 0 3AA20769 14
+. 8D B4 26 00 00 00 00 8B 45 08 85 C0 74 0E
+
+. 0 3AA20785 11
+. 8B 7D DC 85 FF 0F 85 9D 00 00 00
+
+. 0 3AA20790 11
+. 8B 45 E0 83 C4 24 5B 5E 5F 5D C3
+
+. 0 3AA20890 12
+. 8B B3 70 95 FF FF 89 C7 85 F6 74 32
+
+. 0 3AA2089C 10
+. 8D 74 26 00 8B 06 A8 02 75 1A
+
+. 0 3AA208C0 14
+. C7 46 5C FF FF FF FF 8B 76 34 85 F6 75 D2
+
+. 0 3AA208A0 6
+. 8B 06 A8 02 75 1A
+
+. 0 3AA208A6 10
+. 25 08 10 00 00 83 F8 08 74 10
+
+. 0 3AA208B0 7
+. 8B 46 5C 85 C0 75 21
+
+. 0 3AA208D8 29
+. 0F BE 46 46 31 C9 31 D2 8B 84 30 94 00 00 00 89 4C 24 08 89 54 24 04 89 34 24 FF 50 2C
+
+. 0 3AA1EFD0 19
+. 55 89 E5 56 53 83 EC 0C 8B 45 10 8B 75 08 E8 AE 77 FB FF
+
+. 0 3AA1EFE3 25
+. 81 C3 2D 40 0B 00 89 44 24 08 8B 45 0C 89 34 24 89 44 24 04 E8 74 1D 00 00
+
+. 0 3AA20D70 30
+. 55 89 E5 83 EC 1C 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 7D 0C 0F BE 46 46 E8 03 5A FB FF
+
+. 0 3AA20D8E 19
+. 81 C3 82 22 0B 00 8B 84 30 94 00 00 00 89 34 24 FF 50 30
+
+. 0 3AA1EAC0 26
+. 55 89 E5 83 EC 1C 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 56 14 E8 B7 7C FB FF
+
+. 0 3AA1EADA 15
+. 81 C3 36 45 0B 00 8B 46 10 31 FF 39 C2 76 47
+
+. 0 3AA1EB30 10
+. 8B 46 04 89 C2 2B 56 08 75 1D
+
+. 0 3AA1EB3A 29
+. C7 46 4C FF FF FF FF C7 46 50 FF FF FF FF 89 F8 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA20DA1 5
+. 31 D2 40 74 5E
+
+. 0 3AA20DA6 19
+. 8B 55 10 85 FF 0F 94 C0 85 D2 0F 94 C2 09 D0 A8 01 74 5A
+
+. 0 3AA20DB9 31
+. 83 0E 02 8D 46 48 31 FF 89 44 24 08 8D 46 47 89 7C 24 0C 89 44 24 04 89 34 24 E8 C8 FD FF FF
+
+. 0 3AA20BC1 7
+. 8B 16 F6 C2 01 74 38
+
+. 0 3AA20C00 27
+. 8B 46 20 89 0C 24 29 C8 05 FF 0F 00 00 25 00 F0 FF FF 89 44 24 04 E8 75 70 05 00
+
+. 0 3AA77C90 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80
+
+. 0 3AA77CA1 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3AA77CAA 1
+. C3
+
+. 0 3AA20C1B 9
+. 90 8D 74 26 00 8B 16 EB A4
+
+. 0 3AA20BF0 15
+. 83 CA 01 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA20DD8 59
+. C7 46 18 00 00 00 00 89 F2 C7 46 14 00 00 00 00 C7 46 10 00 00 00 00 C7 46 08 00 00 00 00 C7 46 04 00 00 00 00 C7 46 0C 00 00 00 00 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1EFFC 6
+. 31 D2 85 C0 74 17
+
+. 0 3AA1F002 32
+. 8B 46 1C 89 F2 89 46 18 89 46 14 89 46 10 89 46 0C 89 46 04 89 46 08 83 C4 0C 89 D0 5B 5E 5D C3
+
+. 0 3AA208F5 2
+. EB C9
+
+. 0 3AA208CE 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 0 3A9EBCFA 7
+. 83 C6 04 39 FE 72 F7
+
+. 0 3A9EBD01 2
+. EB EA
+
+. 0 3A9EBCED 11
+. 8B 45 08 89 04 24 E8 94 D8 05 00
+
+. 0 3AA4958C 11
+. 8B 5C 24 04 B8 FC 00 00 00 CD 80
+
+==2988== 
+sewardj@phoenix:~/VgSVN/valgrind/trunk$ ./vsvn --tool=none --vex-guest-chase-thresh=0 ./none/testss/x86/insn_mmxsse
+==2993== Nulgrind, a binary JIT-compiler.
+==2993== Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.
+==2993== Using valgrind-2.3.0.CVS, a dynamic binary instrumentation framework.
+==2993== Copyright (C) 2000-2004, and GNU GPL'd, by Julian Seward et al.
+==2993== For more details, rerun with: -v
+==2993== 
+. 0 3A965880 7
+. 89 E0 E8 F9 03 00 00
+
+. 0 3A965C80 14
+. 55 89 E5 57 56 53 83 EC 54 E8 9D FC 00 00
+
+. 0 3A97592B 4
+. 8B 1C 24 C3
+
+. 0 3A965C8E 67
+. 81 C3 DE 29 01 00 89 45 D0 8B 93 00 00 00 00 8D 83 50 FF FF FF 29 D0 89 83 E4 F9 FF FF 01 D0 89 83 EC F9 FF FF 8D 83 94 F9 FF FF 89 45 C8 83 C0 50 8B 7D C8 89 45 CC 8B 70 08 83 C7 68 8B 16 85 D2 75 10
+
+. 0 3A965CE1 5
+. 83 FA 21 7E ED
+
+. 0 3A965CD3 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 2F
+
+. 0 3A965CE6 16
+. B8 FF FF FF 6F 29 D0 83 F8 0F 0F 87 EA 01 00 00
+
+. 0 3A965CF6 21
+. B8 21 00 00 70 29 D0 89 34 87 83 C6 08 8B 06 85 C0 89 C2 75 D6
+
+. 0 3A965D0B 14
+. 90 8D 74 26 00 8B 4D CC 8B 11 85 D2 74 57
+
+. 0 3A965D19 7
+. 8B 47 10 85 C0 74 03
+
+. 0 3A965D20 10
+. 01 50 04 8B 47 0C 85 C0 74 03
+
+. 0 3A965D2A 10
+. 01 50 04 8B 47 14 85 C0 74 03
+
+. 0 3A965D34 10
+. 01 50 04 8B 47 18 85 C0 74 03
+
+. 0 3A965D3E 10
+. 01 50 04 8B 47 44 85 C0 74 03
+
+. 0 3A965D48 10
+. 01 50 04 8B 47 5C 85 C0 74 03
+
+. 0 3A965D52 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 11
+
+. 0 3A965D5F 27
+. 01 50 04 8D B4 26 00 00 00 00 8D BC 27 00 00 00 00 8B 83 E4 F9 FF FF 85 C0 75 0E
+
+. 0 3A965D88 44
+. C7 45 DC 00 00 00 00 8B 4D C8 8B 83 40 FA FF FF C7 45 E4 00 00 00 00 83 C1 50 85 C0 C7 45 D8 00 00 00 00 C7 45 D4 00 00 00 00 74 12
+
+. 0 3A965DB4 28
+. 8B 40 04 89 45 D4 8B 83 44 FA FF FF 8B 40 04 89 45 D8 8B BB 4C FA FF FF 85 FF 74 0C
+
+. 0 3A965DD0 51
+. 8B 83 04 FA FF FF 8B 40 04 01 45 D8 8B 55 D4 8B 75 D8 8D 04 16 89 45 C4 8B 45 C8 8B 78 50 8B 41 30 8B 40 04 89 45 C0 8B 81 B4 00 00 00 31 C9 85 C0 74 03
+
+. 0 3A965E03 12
+. 8B 48 04 89 F0 C1 E8 03 39 C8 76 02
+
+. 0 3A965E0F 9
+. 89 C8 8D 0C C2 39 CA 73 17
+
+. 0 3A965E18 23
+. 90 8D B4 26 00 00 00 00 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1
+
+. 0 3A965E20 15
+. 8B 32 89 F8 83 C2 08 01 F0 01 38 39 CA 72 F1
+
+. 0 3A965E2F 5
+. 3B 4D C4 73 4D
+
+. 0 3A965E34 47
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06
+
+. 0 3A965E63 17
+. 8B 93 E4 F9 FF FF 03 50 04 8D 46 FA 83 F8 01 77 05
+
+. 0 3A965E74 13
+. 8B 45 BC 89 10 83 C1 08 3B 4D C4 72 BF
+
+. 0 3A965E40 35
+. 8B 51 04 8B 75 C0 89 D0 C1 E8 08 C1 E0 04 01 F0 8B 31 01 FE 89 75 BC 0F B6 F2 31 D2 66 83 78 0E 00 74 06
+
+. 0 3A965E81 16
+. 8B 45 C8 BE 01 00 00 00 83 C0 50 E8 EF 5E 00 00
+
+. 0 3A96BD80 12
+. 55 89 C1 8B 40 28 89 E5 85 C0 74 1D
+
+. 0 3A96BD8C 31
+. 8B 40 04 8B 10 83 C0 08 89 81 68 01 00 00 8D 04 90 89 91 64 01 00 00 89 81 6C 01 00 00 5D C3
+
+. 0 3A965E91 57
+. 89 B3 54 FB FF FF 8D 83 94 C9 FE FF 89 83 78 FB FF FF 8D 83 28 02 00 00 89 83 7C FB FF FF 8D 83 24 DD FE FF 89 44 24 04 8B 55 D0 89 AB 28 01 00 00 89 14 24 E8 F6 CE 00 00
+
+. 0 3A972DC0 44
+. 55 31 D2 89 E5 57 31 FF 31 C9 56 31 F6 53 81 EC 04 02 00 00 8B 45 08 89 95 1C FE FF FF 31 D2 89 95 0C FE FF FF 8B 10 E8 3F 2B 00 00
+
+. 0 3A972DEC 74
+. 81 C3 80 58 00 00 89 83 28 01 00 00 83 C0 04 89 93 0C 02 00 00 8D 14 90 89 B5 14 FE FF FF 8B 72 04 89 BD 18 FE FF FF 31 FF 85 F6 89 83 38 00 00 00 8D 42 04 89 8D 10 FE FF FF 89 C1 89 BD 08 FE FF FF 89 83 00 02 00 00 74 09
+
+. 0 3A972E36 9
+. 83 C1 04 8B 11 85 D2 75 F7
+
+. 0 3A972E3F 37
+. 83 C1 04 8D 83 14 D2 FE FF 89 CA 89 8B 34 01 00 00 8B 09 89 85 20 FE FF FF 31 C0 85 C9 89 83 20 FC FF FF 74 52
+
+. 0 3A972E64 20
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8D 41 FD 83 F8 1E 77 28
+
+. 0 3A972EA0 22
+. 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A972E70 8
+. 8D 41 FD 83 F8 1E 77 28
+
+. 0 3A972E78 11
+. 8B 84 83 54 DB FF FF 01 D8 FF E0
+
+. 0 3A9731AC 14
+. 8B 42 04 89 83 54 FC FF FF E9 E6 FC FF FF
+
+. 0 3A9731F2 14
+. 8B 42 04 89 83 18 FC FF FF E9 97 FC FF FF
+
+. 0 3A972E97 31
+. 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A97319E 14
+. 8B 42 04 89 83 30 FC FF FF E9 EB FC FF FF
+
+. 0 3A97320E 14
+. 8B 42 04 89 85 1C FE FF FF E9 84 FC FF FF
+
+. 0 3A973200 14
+. 8B 42 04 89 85 18 FE FF FF E9 92 FC FF FF
+
+. 0 3A9731E4 14
+. 8B 42 04 89 85 20 FE FF FF E9 A5 FC FF FF
+
+. 0 3A9731D6 14
+. 8B 42 04 31 85 14 FE FF FF E9 BC FC FF FF
+
+. 0 3A9731C8 14
+. 8B 42 04 31 85 10 FE FF FF E9 CA FC FF FF
+
+. 0 3A972E83 51
+. B8 FF FF FF FF 89 85 0C FE FF FF 8B 42 04 89 83 30 01 00 00 8B 0A 8D B4 26 00 00 00 00 83 C2 08 B8 01 00 00 00 D3 E0 09 85 0C FE FF FF 8B 0A 85 C9 75 BA
+
+. 0 3A9731BA 14
+. 8B 42 04 89 83 20 FC FF FF E9 D8 FC FF FF
+
+. 0 3A972EB6 23
+. 8D 55 B4 8D 85 24 FE FF FF 89 95 04 FE FF FF 89 04 24 E8 83 1E 00 00
+
+. 0 3A974D50 13
+. 89 DA 8B 5C 24 04 B8 7A 00 00 00 CD 80
+
+. 0 3A974D5D 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974D66 1
+. C3
+
+. 0 3A972ECD 8
+. 85 C0 0F 85 DC 01 00 00
+
+. 0 3A972ED5 39
+. 8D 85 A6 FE FF FF 89 85 04 FE FF FF 8B B5 04 FE FF FF 31 C0 31 FF 89 85 00 FE FF FF 0F B6 16 88 D0 2C 30 3C 09 77 69
+
+. 0 3A972EFC 28
+. 8D 74 26 00 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28
+
+. 0 3A972F40 17
+. FF 85 00 FE FF FF C1 E7 08 09 D7 46 80 F9 2E 75 0B
+
+. 0 3A972F51 11
+. 0F B6 16 88 D0 2C 30 3C 09 76 A4
+
+. 0 3A972F00 24
+. 46 0F BE C2 8D 50 D0 0F B6 0E 88 C8 2C 30 88 8D FF FD FF FF 3C 09 77 28
+
+. 0 3A972F5C 9
+. 83 BD 00 FE FF FF 02 7F 12
+
+. 0 3A972F77 12
+. 81 FF 04 02 02 00 0F 86 BD 02 00 00
+
+. 0 3A972F83 15
+. 83 BD 0C FE FF FF FF 89 BB 1C FC FF FF 74 58
+
+. 0 3A972FEA 10
+. 8B BB 18 FC FF FF 85 FF 75 0B
+
+. 0 3A972FFF 10
+. 8B B3 94 FD FF FF 85 F6 74 16
+
+. 0 3A97301F 12
+. C7 04 24 00 00 00 00 E8 75 08 00 00
+
+. 0 3A9738A0 12
+. 55 89 E5 53 8B 4D 08 E8 7F 20 00 00
+
+. 0 3A9738AC 15
+. 81 C3 C0 4D 00 00 87 CB B8 2D 00 00 00 CD 80
+
+. 0 3A9738BB 14
+. 87 CB 89 83 44 01 00 00 31 D2 39 C8 72 05
+
+. 0 3A9738C9 5
+. 5B 89 D0 5D C3
+
+. 0 3A97302B 10
+. 8B 83 20 FC FF FF 85 C0 74 21
+
+. 0 3A973035 5
+. 80 38 00 75 0A
+
+. 0 3A973044 4
+. 85 C0 74 0E
+
+. 0 3A973048 8
+. 89 04 24 E8 80 22 00 00
+
+. 0 3A9752D0 13
+. 8B 44 24 04 BA 03 00 00 00 21 C2 74 24
+
+. 0 3A9752DD 2
+. 7A 17
+
+. 0 3A9752DF 8
+. 38 30 0F 84 9F 00 00 00
+
+. 0 3A9752E7 9
+. 40 38 30 0F 84 96 00 00 00
+
+. 0 3A9752F0 6
+. 40 83 F2 02 74 0B
+
+. 0 3A975301 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58
+
+. 0 3A975369 14
+. 83 E8 04 81 E9 FF FE FE FE 80 F9 00 74 0F
+
+. 0 3A975377 5
+. 40 84 ED 74 0A
+
+. 0 3A97537C 9
+. C1 E9 10 40 80 F9 00 74 01
+
+. 0 3A975386 5
+. 2B 44 24 04 C3
+
+. 0 3A973050 18
+. 89 83 24 FC FF FF C7 04 24 00 00 00 00 E8 7E 08 00 00
+
+. 0 3A9738E0 20
+. 55 89 E5 83 EC 10 89 5D F4 89 7D FC 8B 7D 08 E8 37 20 00 00
+
+. 0 3A9738F4 19
+. 81 C3 78 4D 00 00 89 75 F8 8B B3 44 01 00 00 85 F6 74 0A
+
+. 0 3A973907 10
+. 8B 83 2C 01 00 00 85 C0 74 1B
+
+. 0 3A97392C 6
+. 85 FF 89 F2 75 0F
+
+. 0 3A973932 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A973062 16
+. 89 C1 8D 83 28 02 00 00 39 C1 0F 84 B4 01 00 00
+
+. 0 3A973072 14
+. 8B 83 30 01 00 00 85 C0 0F 85 9C 01 00 00
+
+. 0 3A973080 32
+. 8D 85 20 FE FF FF 89 44 24 08 8B 85 18 FE FF FF 89 44 24 04 8B 95 1C FE FF FF 89 14 24 FF 55 0C
+
+. 0 3A966390 17
+. 55 89 E5 57 56 53 81 EC C8 01 00 00 E8 8A F5 00 00
+
+. 0 3A9663A1 119
+. 81 C3 CB 22 01 00 C6 85 0B FF FF FF 00 8D 83 54 D2 FE FF 8B BB 30 01 00 00 89 83 E0 F9 FF FF 8D 83 74 D2 FE FF 89 83 F8 FB FF FF 8D 83 84 D2 FE FF 89 83 FC FB FF FF 8D 83 D4 9F FF FF 89 83 04 FC FF FF 8B 83 00 02 00 00 C6 85 03 FF FF FF 00 C6 85 02 FF FF FF 00 89 85 48 FF FF FF 31 C0 89 85 F8 FE FF FF 31 C0 85 FF 89 85 F4 FE FF FF 8D 83 BA D5 FF FF 74 03
+
+. 0 3A96641B 17
+. 89 83 80 FD FF FF 8D 85 48 FF FF FF E8 B4 CE 00 00
+
+. 0 3A9732E0 16
+. 55 89 C1 89 E5 56 8B 10 31 F6 8B 02 85 C0 74 0E
+
+. 0 3A9732F0 5
+. 80 38 4C 74 0E
+
+. 0 3A973303 6
+. 80 78 01 44 75 EC
+
+. 0 3A9732F5 9
+. 83 C2 04 8B 02 85 C0 75 F2
+
+. 0 3A973309 9
+. 80 78 02 5F 8D 76 00 75 E3
+
+. 0 3A973312 10
+. 83 C2 04 8D 70 03 89 11 EB E2
+
+. 0 3A9732FE 5
+. 89 F0 5E 5D C3
+
+. 0 3A96642C 10
+. 89 85 FC FE FF FF 85 C0 74 72
+
+. 0 3A966436 27
+. 8B 95 FC FE FF FF 31 F6 0F B6 02 84 C0 0F 95 C2 3C 3D 0F 95 C0 21 D0 A8 01 74 34
+
+. 0 3A966451 28
+. 8B 85 FC FE FF FF 46 0F B6 0C 06 84 C9 0F 95 C0 80 F9 3D 0F 95 C2 21 D0 A8 01 75 E4
+
+. 0 3A96646D 5
+. 80 F9 3D 75 AF
+
+. 0 3A966472 8
+. 8D 46 FC 83 F8 10 77 A7
+
+. 0 3A96647A 11
+. 8B 84 83 D0 D5 FF FF 01 D8 FF E0
+
+. 0 3A967760 20
+. FC 8B B5 FC FE FF FF B9 0C 00 00 00 8D BB 2D F0 FF FF F3 A6
+
+. 0 3A967772 2
+. F3 A6
+
+. 0 3A967774 6
+. 0F 85 0B 03 00 00
+
+. 0 3A96777A 20
+. 8B 85 FC FE FF FF 83 C0 0D 89 83 AC 00 00 00 E9 93 EC FF FF
+
+. 0 3A966421 11
+. 8D 85 48 FF FF FF E8 B4 CE 00 00
+
+. 0 3A9678A7 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB 69 ED FF FF F3 A6
+
+. 0 3A9678BB 6
+. 0F 85 70 02 00 00
+
+. 0 3A967B31 20
+. FC 8B B5 FC FE FF FF B9 07 00 00 00 8D BB CF ED FF FF F3 A6
+
+. 0 3A967B43 2
+. F3 A6
+
+. 0 3A967B45 6
+. 0F 85 90 03 00 00
+
+. 0 3A967B4B 20
+. 8B 85 FC FE FF FF 83 C0 08 89 83 B0 00 00 00 E9 C2 E8 FF FF
+
+. 0 3A9664A8 26
+. 8B 85 F8 FE FF FF 89 85 50 FF FF FF 8B 83 30 01 00 00 85 C0 0F 85 6F 15 00 00
+
+. 0 3A9664C2 14
+. 8B BB B8 00 00 00 85 FF 0F 84 B0 00 00 00
+
+. 0 3A966580 28
+. 8B 55 10 B8 01 00 00 00 89 83 34 00 00 00 8D 83 14 D2 FE FF 39 02 0F 84 FD 04 00 00
+
+. 0 3A96659C 22
+. C7 04 24 00 00 00 00 8D 93 F6 ED FF FF 31 C9 89 D0 E8 1E 64 00 00
+
+. 0 3A96C9D0 17
+. 55 89 E5 57 56 53 83 EC 2C 8B 7D 08 E8 4A 8F 00 00
+
+. 0 3A96C9E1 23
+. 81 C3 8B BC 00 00 89 55 EC 89 4D E8 89 45 F0 89 14 24 E8 D8 88 00 00
+
+. 0 3A96C9F8 28
+. 8D 50 01 B9 01 00 00 00 05 21 02 00 00 89 55 E4 89 4C 24 04 89 04 24 E8 18 8E FF FF
+
+. 0 3A96582C 6
+. FF A3 14 00 00 00
+
+. 0 3A973870 18
+. 55 89 E5 53 83 EC 04 8B 45 0C 8B 4D 08 E8 A9 20 00 00
+
+. 0 3A973882 17
+. 81 C3 EA 4D 00 00 0F AF C1 89 04 24 E8 89 1F FF FF
+
+. 0 3A96581C 6
+. FF A3 10 00 00 00
+
+. 0 3A973800 15
+. 55 89 E5 53 83 EC 08 8B 45 08 E8 1C 21 00 00
+
+. 0 3A97380F 22
+. 81 C3 5D 4E 00 00 C7 04 24 08 00 00 00 89 44 24 04 E8 E7 1F FF FF
+
+. 0 3A96580C 6
+. FF A3 0C 00 00 00
+
+. 0 3A973700 26
+. 55 89 E5 83 EC 24 89 5D F4 89 75 F8 8B 75 08 89 7D FC 8B 7D 0C E8 11 22 00 00
+
+. 0 3A97371A 16
+. 81 C3 52 4F 00 00 8B 8B 3C 01 00 00 85 C9 75 20
+
+. 0 3A97372A 54
+. 8B 83 18 FC FF FF 8D 93 28 02 00 00 89 93 38 01 00 00 8D 4C 02 FF F7 D8 21 C1 89 8B 3C 01 00 00 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20
+
+. 0 3A973760 30
+. 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A973825 6
+. 83 C4 08 5B 5D C3
+
+. 0 3A973893 4
+. 5A 5B 5D C3
+
+. 0 3A96CA14 16
+. 89 45 E0 8B 55 E0 31 C0 85 D2 0F 84 05 02 00 00
+
+. 0 3A96CA24 44
+. 8B 75 E0 8B 4D E0 8B 55 E4 81 C6 14 02 00 00 89 C8 05 20 02 00 00 89 71 14 89 54 24 08 8B 4D EC 89 04 24 89 4C 24 04 E8 20 8E 00 00
+
+. 0 3A975870 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 56
+
+. 0 3A9758DC 2
+. F3 A4
+
+. 0 3A9758DE 3
+. 5E 5F C3
+
+. 0 3A96CA50 96
+. C7 46 08 01 00 00 00 8B 55 E0 BE 04 00 00 00 8B 4D F0 89 82 14 02 00 00 89 4A 04 8B 4D E0 0F B6 55 E8 0F B6 81 74 01 00 00 89 B9 60 01 00 00 80 E2 03 89 B1 AC 01 00 00 24 FC 08 D0 88 81 74 01 00 00 89 CA 8B 83 94 F9 FF FF 81 C2 9C 01 00 00 89 91 B0 01 00 00 31 C9 85 C0 0F 84 83 01 00 00
+
+. 0 3A96CC33 37
+. 8B 45 E0 89 83 94 F9 FF FF FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 85 AF FE FF FF
+
+. 0 3A96CC58 8
+. 8B 7D E0 E9 CC FE FF FF
+
+. 0 3A96CB2C 10
+. 85 C9 8D 87 50 01 00 00 74 04
+
+. 0 3A96CB3A 37
+. 89 04 8A 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00
+
+. 0 3A96CC26 13
+. 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A9665B2 17
+. 8B 8B 94 F9 FF FF 83 EC 04 85 C9 0F 84 05 14 00 00
+
+. 0 3A9665C3 76
+. 8B 75 08 8B 45 0C 89 B1 44 01 00 00 8B 75 10 66 89 81 4C 01 00 00 8B 06 89 81 48 01 00 00 FF 81 70 01 00 00 31 C0 8B 75 08 89 81 98 01 00 00 B8 FF FF FF FF 89 81 94 01 00 00 C1 65 0C 05 8B 55 0C 8D 04 32 39 C6 0F 83 AA 00 00 00
+
+. 0 3A96660F 8
+. 89 85 E4 FE FF FF EB 23
+
+. 0 3A96663A 7
+. 8B 06 83 F8 03 74 2F
+
+. 0 3A966641 5
+. 83 F8 03 76 D1
+
+. 0 3A966646 11
+. 3D 51 E5 74 64 0F 84 6F 06 00 00
+
+. 0 3A966651 11
+. 3D 51 E5 74 64 0F 87 3F 06 00 00
+
+. 0 3A96665C 6
+. 83 F8 06 90 75 CD
+
+. 0 3A966662 12
+. 8B 7E 08 8B 45 08 29 F8 89 01 EB C1
+
+. 0 3A96662F 11
+. 83 C6 20 3B B5 E4 FE FF FF 73 7F
+
+. 0 3A966670 39
+. 8B 01 8B 56 08 01 C2 89 93 94 00 00 00 8D 83 94 00 00 00 89 83 F8 F9 FF FF 8B 83 EC F9 FF FF 85 C0 0F 85 EF 0E 00 00
+
+. 0 3A967586 12
+. C6 85 0B FF FF FF 01 E9 9D F0 FF FF
+
+. 0 3A966617 9
+. 83 F8 01 0F 84 76 05 00 00
+
+. 0 3A966B96 24
+. 8B 46 1C 8B 56 08 8B 39 48 F7 D0 21 D0 8D 04 38 39 81 94 01 00 00 76 09
+
+. 0 3A966BAE 29
+. 89 81 94 01 00 00 8B 56 08 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF
+
+. 0 3A966BCB 11
+. 89 81 98 01 00 00 E9 59 FA FF FF
+
+. 0 3A966BB7 20
+. 8D 04 3A 8B 7E 14 01 F8 39 81 98 01 00 00 0F 83 64 FA FF FF
+
+. 0 3A966620 5
+. 83 F8 02 75 0A
+
+. 0 3A966625 21
+. 8B 46 08 8B 11 01 D0 89 41 08 83 C6 20 3B B5 E4 FE FF FF 73 7F
+
+. 0 3A966CC0 14
+. 8B 46 18 89 83 00 FC FF FF E9 61 F9 FF FF
+
+. 0 3A9666B9 10
+. 8B 91 98 01 00 00 85 D2 75 0B
+
+. 0 3A9666CE 10
+. 8B 83 F8 F9 FF FF 85 C0 75 1C
+
+. 0 3A9666F4 13
+. 80 BD 02 FF FF FF 00 0F 85 36 01 00 00
+
+. 0 3A966701 23
+. 89 8D E0 FE FF FF 8B 71 08 85 F6 89 B5 DC FE FF FF 0F 84 14 01 00 00
+
+. 0 3A966718 9
+. 8B 36 8D 79 18 85 F6 75 20
+
+. 0 3A966741 5
+. 83 FE 21 7E DD
+
+. 0 3A966723 30
+. 8B 95 DC FE FF FF 89 14 B7 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27
+
+. 0 3A966746 16
+. B8 FF FF FF 6F 29 F0 83 F8 0F 0F 87 80 04 00 00
+
+. 0 3A966756 18
+. B8 21 00 00 70 29 F0 8B B5 DC FE FF FF 89 34 87 EB C4
+
+. 0 3A96672C 21
+. 83 85 DC FE FF FF 08 8B B5 DC FE FF FF 8B 06 85 C0 89 C6 74 27
+
+. 0 3A966768 6
+. 8B 11 85 D2 74 59
+
+. 0 3A9667C7 7
+. 8B 57 78 85 D2 74 30
+
+. 0 3A9667FE 10
+. 8B 97 98 00 00 00 85 D2 74 16
+
+. 0 3A96681E 7
+. 8B 47 74 85 C0 74 07
+
+. 0 3A96682C 11
+. 8B 41 28 85 C0 0F 85 C9 04 00 00
+
+. 0 3A966D00 7
+. 89 C8 E8 79 50 00 00
+
+. 0 3A966D07 5
+. E9 2B FB FF FF
+
+. 0 3A966837 13
+. 83 BD 50 FF FF FF 02 0F 84 1F 15 00 00
+
+. 0 3A966844 13
+. 80 BD 02 FF FF FF 00 0F 84 9F 04 00 00
+
+. 0 3A966CF0 11
+. 8B 83 AC 00 00 00 E8 35 3E 00 00
+
+. 0 3A96AB30 14
+. 55 89 E5 57 56 53 83 EC 2C E8 ED AD 00 00
+
+. 0 3A96AB3E 41
+. 81 C3 2E DB 00 00 89 45 F0 8B 93 24 FC FF FF 8D 8B CC 00 00 00 8B 83 20 FC FF FF 89 0C 24 8D 8B C8 00 00 00 E8 D9 7B 00 00
+
+. 0 3A972740 14
+. 55 89 E5 57 56 53 83 EC 38 E8 DD 31 00 00
+
+. 0 3A97274E 45
+. 81 C3 1E 5F 00 00 89 55 EC 85 C0 8B 93 58 FC FF FF 89 45 F0 0F 95 C0 31 FF 23 93 54 FC FF FF 0F B6 C0 89 4D E8 89 45 E0 89 55 E4 74 25
+
+. 0 3A9727A0 11
+. 8B 45 E0 85 C0 0F 84 20 01 00 00
+
+. 0 3A9727AB 33
+. 8B 55 E0 31 F6 31 FF 8D 04 D5 10 00 00 00 29 C4 8B 45 E4 8D 4C 24 1B 83 E1 F0 85 C0 89 4D D8 74 40
+
+. 0 3A97280C 7
+. 8B 7D F0 85 FF 74 10
+
+. 0 3A972813 26
+. 8B 45 D8 8B 55 F0 8B 4D EC 89 14 F0 89 4C F0 04 83 7D E0 01 0F 84 D0 00 00 00
+
+. 0 3A9728FD 36
+. 8B 75 D8 8B 56 04 42 0F B6 4D E0 BE 01 00 00 00 89 F0 D3 E0 8B 4D E8 89 01 8D 04 C2 89 04 24 E8 FB 2E FF FF
+
+. 0 3A97374A 22
+. 8B 83 38 01 00 00 01 F0 8D 50 FF F7 DE 21 F2 8D 04 17 39 C8 73 20
+
+. 0 3A972921 7
+. 89 45 DC 85 C0 74 8C
+
+. 0 3A972928 10
+. 83 7D E0 01 0F 84 D9 00 00 00
+
+. 0 3A972A0B 64
+. 8B 75 E8 8B 55 DC 8B 4D D8 8B 06 8D 04 C2 89 02 8B 41 04 40 89 42 04 8B 06 C7 42 0C 00 00 00 00 8D 04 C2 89 42 08 8B 41 04 89 44 24 08 8B 01 89 44 24 04 8B 06 8D 04 C2 89 04 24 E8 25 2D 00 00
+
+. 0 3A975770 22
+. 57 56 8B 7C 24 0C 8B 74 24 10 8B 4C 24 14 89 F8 FC 83 F9 20 76 52
+
+. 0 3A9757D8 2
+. F3 A4
+
+. 0 3A9757DA 5
+. 89 F8 5E 5F C3
+
+. 0 3A972A4B 25
+. C6 00 2F 8B 55 08 C7 06 02 00 00 00 8B 75 DC 8B 46 04 89 02 E9 8C FE FF FF
+
+. 0 3A9728F0 13
+. 8B 45 DC 8D 65 F4 5B 5E 5F 5D C2 04 00
+
+. 0 3A96AB67 21
+. 89 83 C4 00 00 00 83 EC 04 C7 04 24 0C 00 00 00 E8 A0 AC FF FF
+
+. 0 3A96AB7C 16
+. 89 83 D0 00 00 00 85 C0 89 C6 0F 84 93 02 00 00
+
+. 0 3A96AB8C 42
+. 8B 93 C8 00 00 00 B8 CD CC CC CC 8D 14 95 27 00 00 00 F7 E2 C1 EA 04 8D 14 92 89 55 E8 89 D0 C1 E0 06 89 04 24 E8 66 AC FF FF
+
+. 0 3A96ABB6 24
+. 89 06 8B 83 D0 00 00 00 8D 93 90 E3 FF FF 8B 00 85 C0 0F 84 57 02 00 00
+
+. 0 3A96ABCE 106
+. 89 83 DC F9 FF FF 31 D2 8B 8B C8 00 00 00 89 93 D4 00 00 00 89 C2 8D 83 1C D6 FF FF 89 45 EC 8B 45 E8 8D BB 17 F0 FF FF C7 45 D8 00 00 00 00 C1 E0 02 89 7D E0 89 45 E4 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13
+
+. 0 3A96AC38 19
+. 90 8D B4 26 00 00 00 00 31 FF 89 7C 82 14 40 39 C8 72 F5
+
+. 0 3A96AC40 11
+. 31 FF 89 7C 82 14 40 39 C8 72 F5
+
+. 0 3A96AC4B 16
+. FF 45 D8 C7 45 DC 00 00 00 00 83 7D D8 02 74 08
+
+. 0 3A96AC5B 24
+. 8B 7D E4 01 D7 89 7D DC 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93
+
+. 0 3A96AC06 50
+. 89 16 8B 7D E0 83 C6 04 C7 42 08 00 00 00 00 8B 45 EC 89 7A 04 8B 7D D8 89 42 0C 8B BC BB 14 D6 FF FF 8D 44 07 01 89 45 EC 31 C0 39 C8 89 7A 10 73 13
+
+. 0 3A96AC63 16
+. 8B 45 DC 89 02 8B 45 E4 01 C2 83 7D D8 01 76 93
+
+. 0 3A96AC73 27
+. C7 06 00 00 00 00 8B B3 94 F9 FF FF BF 09 00 00 00 89 BB D8 00 00 00 85 F6 74 2B
+
+. 0 3A96AC8E 10
+. 8B 96 8C 00 00 00 85 D2 75 4F
+
+. 0 3A96AC98 22
+. 8B 56 54 BF FF FF FF FF 89 BE CC 01 00 00 85 D2 0F 85 2B 01 00 00
+
+. 0 3A96ACAE 18
+. BF FF FF FF FF 89 BE 80 01 00 00 8B 75 F0 85 F6 74 08
+
+. 0 3A96ACC0 8
+. 8B 7D F0 80 3F 00 75 45
+
+. 0 3A96AD0D 10
+. 89 3C 24 89 FE E8 B9 A5 00 00
+
+. 0 3A975311 10
+. 31 CA 81 E2 00 01 01 01 75 4E
+
+. 0 3A97531B 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 3E
+
+. 0 3A97532B 10
+. 31 CA 81 E2 00 01 01 01 75 34
+
+. 0 3A975335 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 24
+
+. 0 3A975345 10
+. 31 CA 81 E2 00 01 01 01 75 1A
+
+. 0 3A97534F 16
+. 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 0A
+
+. 0 3A97535F 10
+. 31 CA 81 E2 00 01 01 01 74 98
+
+. 0 3A96AD17 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 1B 83 E0 F0 89 54 24 08 89 7C 24 04 89 04 24 E8 37 AB 00 00
+
+. 0 3A975886 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4
+
+. 0 3A975890 7
+. 89 C1 83 E9 20 78 3E
+
+. 0 3A975897 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A975899 60
+. 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A9758D5 9
+. 83 C1 20 8B 44 24 0C F3 A4
+
+. 0 3A96AD39 14
+. 89 C7 0F B6 06 BA 01 00 00 00 84 C0 74 1D
+
+. 0 3A96AD47 29
+. 89 F6 8D BC 27 00 00 00 00 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC
+
+. 0 3A96AD50 20
+. 2C 3A 3C 01 0F 96 C0 0F B6 C0 46 01 C2 0F B6 06 84 C0 75 EC
+
+. 0 3A96AD64 15
+. 8D 04 95 04 00 00 00 89 04 24 E8 A9 AA FF FF
+
+. 0 3A96AD73 22
+. 89 83 BC 00 00 00 85 C0 89 C6 8D 93 90 E3 FF FF 0F 84 9C 00 00 00
+
+. 0 3A96AD89 40
+. 31 C9 8D 83 2A F0 FF FF 89 F2 89 4C 24 08 8D 8B 3A F0 FF FF 89 44 24 04 8B 83 30 01 00 00 89 04 24 89 F8 E8 0F DD FF FF
+
+. 0 3A968AC0 14
+. 55 89 E5 57 56 53 83 EC 3C E8 5D CE 00 00
+
+. 0 3A968ACE 52
+. 81 C3 9E FB 00 00 89 45 F0 8D 45 F0 89 55 EC 89 4D E8 C7 45 E0 00 00 00 00 89 45 C4 8D B6 00 00 00 00 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00
+
+. 0 3A973530 14
+. 55 89 E5 57 56 8B 45 08 8B 38 85 FF 74 51
+
+. 0 3A97353E 9
+. 0F B6 07 89 FE 84 C0 74 41
+
+. 0 3A973547 19
+. 89 F6 8D BC 27 00 00 00 00 8B 4D 0C 0F B6 11 38 C2 74 12
+
+. 0 3A97355A 14
+. 8D B6 00 00 00 00 41 0F B6 11 84 D2 74 18
+
+. 0 3A973568 4
+. 38 C2 75 F4
+
+. 0 3A973560 8
+. 41 0F B6 11 84 D2 74 18
+
+. 0 3A973580 8
+. 46 0F B6 06 84 C0 75 C8
+
+. 0 3A973550 10
+. 8B 4D 0C 0F B6 11 38 C2 74 12
+
+. 0 3A97356C 6
+. 84 D2 89 F6 75 23
+
+. 0 3A973595 6
+. C6 06 00 46 EB EF
+
+. 0 3A97358A 11
+. 8B 45 08 89 30 5E 89 F8 5F 5D C3
+
+. 0 3A968B02 11
+. 89 45 E4 85 C0 0F 84 EE 00 00 00
+
+. 0 3A968B0D 8
+. 89 04 24 E8 BB C7 00 00
+
+. 0 3A968B15 7
+. 89 45 D8 85 C0 75 09
+
+. 0 3A968B25 6
+. 83 7D D8 01 76 11
+
+. 0 3A968B2B 17
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 0F 84 D6 00 00 00
+
+. 0 3A968B3C 7
+. 8B 45 D8 85 C0 74 15
+
+. 0 3A968B43 13
+. 8B 4D E4 8B 7D D8 80 7C 0F FF 2F 74 08
+
+. 0 3A968B50 19
+. C6 04 0F 2F 47 89 7D D8 8B 45 08 85 C0 0F 85 D2 01 00 00
+
+. 0 3A968B63 13
+. 8B 93 DC F9 FF FF 85 D2 89 55 DC 75 15
+
+. 0 3A968B85 11
+. 8B 4D DC 8B 7D D8 39 79 10 75 E5
+
+. 0 3A968B75 16
+. 8B 55 DC 8B 12 85 D2 89 55 DC 0F 84 AC 00 00 00
+
+. 0 3A968C31 9
+. 8B 4D 10 31 F6 85 C9 74 0E
+
+. 0 3A968C48 24
+. 8B 83 C8 00 00 00 8B 55 D8 8D 04 86 8D 44 02 15 89 04 24 E8 BC CB FF FF
+
+. 0 3A968C60 11
+. 89 45 DC 85 C0 0F 84 54 01 00 00
+
+. 0 3A968C6B 41
+. 8B 4D DC 8B 83 C8 00 00 00 8B 7D D8 8D 44 81 14 89 41 0C 89 7C 24 08 8B 45 E4 89 44 24 04 8B 41 0C 89 04 24 E8 DC CA 00 00
+
+. 0 3A975786 10
+. F7 D8 83 E0 03 29 C1 91 F3 A4
+
+. 0 3A975790 7
+. 89 C1 83 E9 20 78 3E
+
+. 0 3A975797 62
+. 8B 07 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A9757D5 5
+. 83 C1 20 F3 A4
+
+. 0 3A968C94 17
+. C6 00 00 8B 55 DC 3B BB D8 00 00 00 89 7A 10 76 06
+
+. 0 3A968CA5 31
+. 89 BB D8 00 00 00 8B 4D E4 31 C0 80 39 2F 8B 8B C8 00 00 00 0F 95 C0 31 D2 01 C0 39 CA 73 18
+
+. 0 3A968CC4 24
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 7D DC 89 44 97 14 42 39 CA 72 F4
+
+. 0 3A968CD0 12
+. 8B 7D DC 89 44 97 14 42 39 CA 72 F4
+
+. 0 3A968CDC 20
+. 8B 55 0C 8B 45 DC 89 50 04 8B 55 10 85 D2 0F 84 C0 00 00 00
+
+. 0 3A968DB0 15
+. 8B 7D DC C7 47 08 00 00 00 00 E9 53 FF FF FF
+
+. 0 3A968D12 35
+. 8B 83 DC F9 FF FF 8B 55 DC 8B 4D E0 8B 7D EC 89 02 89 93 DC F9 FF FF 89 14 8F 41 89 4D E0 E9 BB FD FF FF
+
+. 0 3A968AF0 18
+. 8B 45 E8 89 44 24 04 8B 55 C4 89 14 24 E8 2E AA 00 00
+
+. 0 3A9752F6 8
+. 38 30 0F 84 88 00 00 00
+
+. 0 3A9752FE 19
+. 40 31 D2 8B 08 83 C0 04 29 CA 81 C1 FF FE FE FE 4A 73 58
+
+. 0 3A975385 6
+. 40 2B 44 24 04 C3
+
+. 0 3A968CAB 25
+. 8B 4D E4 31 C0 80 39 2F 8B 8B C8 00 00 00 0F 95 C0 31 D2 01 C0 39 CA 73 18
+
+. 0 3A968B90 16
+. FC 8B 45 D8 8B 79 0C 8B 75 E4 39 C0 89 C1 F3 A6
+
+. 0 3A968B9E 2
+. F3 A6
+
+. 0 3A968BA0 2
+. 75 D3
+
+. 0 3A968BA2 11
+. 8B 75 DC 85 F6 0F 84 84 00 00 00
+
+. 0 3A968BAD 7
+. 31 C0 3B 45 E0 73 11
+
+. 0 3A968BB4 11
+. 8B 4D EC 8B 7D DC 39 3C 81 74 06
+
+. 0 3A968BBF 6
+. 40 3B 45 E0 72 EF
+
+. 0 3A968BC5 9
+. 3B 45 E0 0F 85 22 FF FF FF
+
+. 0 3A973588 13
+. 31 F6 8B 45 08 89 30 5E 89 F8 5F 5D C3
+
+. 0 3A968B1C 15
+. 8D 93 2C D6 FF FF 89 55 E4 83 7D D8 01 76 11
+
+. 0 3A968B58 11
+. 8B 45 08 85 C0 0F 85 D2 01 00 00
+
+. 0 3A97358F 6
+. 5E 89 F8 5F 5D C3
+
+. 0 3A968BFB 23
+. 8B 7D EC 8B 4D E0 89 F8 C7 04 8F 00 00 00 00 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96ADB1 12
+. 8B 83 BC 00 00 00 8B 10 85 D2 74 4D
+
+. 0 3A96ADBD 28
+. 31 C0 89 83 C0 00 00 00 8B 83 DC F9 FF FF 89 83 8C FD FF FF 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A966CFB 5
+. E9 51 FB FF FF
+
+. 0 3A966851 10
+. 8B B3 E8 F9 FF FF 85 F6 75 0E
+
+. 0 3A96685B 102
+. 8B 83 F8 F9 FF FF 8B 00 89 83 E8 F9 FF FF 0F B6 83 58 FB FF FF FF 83 98 F9 FF FF 24 FC 0C 01 88 83 58 FB FF FF 8D 83 94 F9 FF FF 89 C2 89 85 D0 FE FF FF 83 C2 50 8B 83 94 F9 FF FF 83 83 C4 F9 FF FF 01 89 50 0C 8B 8B 94 F9 FF FF 83 93 C8 F9 FF FF 00 83 BB 78 FD FF FF FE 89 8B F4 F9 FF FF 0F 84 17 04 00 00
+
+. 0 3A966CD8 11
+. 8B 09 B8 FF FF FF FF 85 C9 74 02
+
+. 0 3A966CE5 11
+. 89 83 78 FD FF FF E9 D1 FB FF FF
+
+. 0 3A9668C1 41
+. 8B 83 78 FB FF FF 8B 50 1C 89 C6 01 D6 0F B7 50 2C 89 B3 28 FB FF FF 66 89 93 30 FB FF FF 0F B7 50 2C 89 D0 4A 85 C0 74 22
+
+. 0 3A9668EA 26
+. 89 D0 C1 E0 05 01 F0 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00
+
+. 0 3A966904 8
+. 4A 83 E8 20 85 FF 75 E5
+
+. 0 3A9668F1 19
+. 89 D1 89 D7 C1 E1 05 81 38 52 E5 74 64 0F 84 46 14 00 00
+
+. 0 3A96690C 30
+. 8B B3 B0 00 00 00 31 C0 89 85 14 FF FF FF 31 C0 85 F6 89 85 10 FF FF FF 0F 85 B0 12 00 00
+
+. 0 3A967BDA 14
+. 89 34 24 8D BD 3C FF FF FF E8 E8 D6 00 00
+
+. 0 3A967BE8 34
+. 8D 50 01 83 C0 13 83 E0 FC 29 C4 8D 44 24 2F 83 E0 F0 89 54 24 08 89 74 24 04 89 04 24 E8 66 DC 00 00
+
+. 0 3A967C0A 36
+. 89 85 3C FF FF FF 8D 93 D7 ED FF FF 89 95 60 FE FF FF 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00
+
+. 0 3A967C2E 10
+. 85 C0 89 C6 0F 84 F2 EC FF FF
+
+. 0 3A967C38 5
+. 80 3E 00 74 DF
+
+. 0 3A967C3D 14
+. 8B 83 30 01 00 00 85 C0 0F 85 2A 05 00 00
+
+. 0 3A967C4B 37
+. 8B 83 94 F9 FF FF 31 C9 31 D2 89 4C 24 08 B9 01 00 00 00 89 54 24 04 89 F2 C7 04 24 01 00 00 00 E8 D0 31 00 00
+
+. 0 3A96AE40 17
+. 55 89 E5 57 56 53 81 EC 58 02 00 00 E8 DA AA 00 00
+
+. 0 3A96AE51 34
+. 81 C3 1B D8 00 00 89 85 D8 FD FF FF 8B B3 94 F9 FF FF 89 95 D4 FD FF FF 89 8D D0 FD FF FF 85 F6 74 3F
+
+. 0 3A96AE73 22
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 F6 86 75 01 00 00 02 75 22
+
+. 0 3A96AE89 13
+. 8B 85 D4 FD FF FF 89 F2 E8 EA 5C 00 00
+
+. 0 3A970B80 21
+. 55 89 E5 57 89 C7 56 89 D6 53 83 EC 08 8B 42 04 E8 96 4D 00 00
+
+. 0 3A970B95 18
+. 81 C3 D7 7A 00 00 89 3C 24 89 44 24 04 E8 D9 46 00 00
+
+. 0 3A975280 33
+. 55 89 E5 56 83 EC 04 8B 4D 08 8B 55 0C 8D 76 00 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16
+
+. 0 3A9752A1 7
+. 89 F0 3A 45 FB 74 E8
+
+. 0 3A9752A8 15
+. 0F B6 D0 0F B6 45 FB 29 C2 89 D0 5A 5E 5D C3
+
+. 0 3A970BA7 9
+. 85 C0 BA 01 00 00 00 74 30
+
+. 0 3A970BB0 7
+. 8B 76 14 85 F6 74 27
+
+. 0 3A970BB7 23
+. 89 F6 8D BC 27 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00
+
+. 0 3A970BCE 9
+. 85 C0 BA 01 00 00 00 74 09
+
+. 0 3A970BD7 7
+. 8B 76 04 85 F6 75 E2
+
+. 0 3A970BDE 12
+. 31 D2 83 C4 08 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96AE96 8
+. 85 C0 0F 85 09 01 00 00
+
+. 0 3A96AE9E 13
+. F6 86 75 01 00 00 01 0F 84 08 01 00 00
+
+. 0 3A96AFB3 11
+. 8B 56 50 85 D2 0F 84 ED FE FF FF
+
+. 0 3A96AEAB 7
+. 8B 76 0C 85 F6 75 CE
+
+. 0 3A96AE80 9
+. F6 86 75 01 00 00 02 75 22
+
+. 0 3A975290 17
+. 0F B6 31 41 0F B6 02 42 88 45 FB 89 F0 84 C0 74 16
+
+. 0 3A96AFBE 29
+. 8B 46 2C 8B 7A 04 8B 50 04 01 D7 89 7C 24 04 8B 85 D4 FD FF FF 89 04 24 E8 A5 A2 00 00
+
+. 0 3A96AFDB 8
+. 85 C0 0F 85 C8 FE FF FF
+
+. 0 3A96AEB2 13
+. F6 83 14 FC FF FF 40 0F 85 FF 03 00 00
+
+. 0 3A96AEBF 23
+. B8 2F 00 00 00 89 44 24 04 8B 85 D4 FD FF FF 89 04 24 E8 DA A1 00 00
+
+. 0 3A9750B0 32
+. 57 56 53 55 8B 44 24 14 8B 54 24 18 89 C7 31 C9 88 D6 88 D1 C1 E2 10 88 CD 09 CA 83 E7 03 74 41
+
+. 0 3A975111 27
+. 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A97512C 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 05 01 00 00
+
+. 0 3A97513D 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 D4 00 00 00
+
+. 0 3A975153 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 85 BF 00 00 00
+
+. 0 3A975227 7
+. 83 E8 04 84 DB 74 0F
+
+. 0 3A97523D 5
+. 5D 5B 5E 5F C3
+
+. 0 3A96AED6 8
+. 85 C0 0F 84 AA 01 00 00
+
+. 0 3A96AEDE 14
+. 8B BD D8 FD FF FF 85 FF 0F 84 79 02 00 00
+
+. 0 3A96AEEC 17
+. 8B 95 D4 FD FF FF 8B 85 D8 FD FF FF E8 D3 F6 FF FF
+
+. 0 3A96A5D0 32
+. 55 B9 24 00 00 00 89 E5 83 EC 24 89 5D F4 89 75 F8 31 F6 89 7D FC 89 D7 89 45 F0 E8 3B B3 00 00
+
+. 0 3A96A5F0 18
+. 81 C3 7C E0 00 00 89 4C 24 04 89 14 24 E8 AE AA 00 00
+
+. 0 3A975168 13
+. 31 CD 01 CF 8D 40 04 0F 83 CD 00 00 00
+
+. 0 3A975175 17
+. 89 CB 81 CD FF FE FE FE 83 C5 01 0F 85 BC 00 00 00
+
+. 0 3A975186 22
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 0F 83 8B 00 00 00
+
+. 0 3A97519C 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 7A
+
+. 0 3A9751AD 13
+. 31 CD 01 CF 8D 40 04 0F 83 88 00 00 00
+
+. 0 3A9751BA 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 7B
+
+. 0 3A9751C7 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 4E
+
+. 0 3A9751D9 17
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 75 3D
+
+. 0 3A9751EA 9
+. 31 CD 01 CF 8D 40 04 73 4F
+
+. 0 3A9751F3 13
+. 89 CB 81 CD FF FE FE FE 83 C5 01 75 42
+
+. 0 3A975200 18
+. BE FF FE FE FE 31 D3 8B 08 01 DE BF FF FE FE FE 73 15
+
+. 0 3A975212 21
+. 89 FD 31 DE 01 CD 81 CE FF FE FE FE 83 C6 01 0F 84 F8 FE FF FF
+
+. 0 3A97511F 13
+. 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A975242 7
+. 83 E8 04 38 D1 74 F4
+
+. 0 3A975249 5
+. 80 F9 00 74 1C
+
+. 0 3A97524E 5
+. 40 38 D5 74 EA
+
+. 0 3A975253 5
+. 80 FD 00 74 12
+
+. 0 3A97526A 7
+. 31 C0 5D 5B 5E 5F C3
+
+. 0 3A96A602 4
+. 85 C0 75 41
+
+. 0 3A96A606 4
+. 85 F6 75 52
+
+. 0 3A96A60A 11
+. 89 3C 24 8D 76 00 E8 BB AC 00 00
+
+. 0 3A96A615 11
+. 8D 70 01 89 34 24 E8 FC B1 FF FF
+
+. 0 3A96A620 6
+. 31 D2 85 C0 74 12
+
+. 0 3A96A626 16
+. 89 74 24 08 89 7C 24 04 89 04 24 E8 3A B2 00 00
+
+. 0 3A96A636 17
+. 89 C2 89 D0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96AEFD 14
+. 89 85 E0 FD FF FF 85 C0 0F 85 05 01 00 00
+
+. 0 3A96B010 19
+. 8D 8D E4 FD FF FF 89 CA 89 8D B8 FD FF FF E8 FD DE FF FF
+
+. 0 3A968F20 16
+. 55 89 E5 57 56 31 F6 53 83 EC 4C E8 FB C9 00 00
+
+. 0 3A968F30 31
+. 81 C3 3C F7 00 00 89 45 D0 89 55 CC C7 45 C4 00 00 00 00 89 74 24 04 89 04 24 E8 E1 B4 00 00
+
+. 0 3A974430 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 05 00 00 00 CD 80
+
+. 0 3A974444 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A97444C 1
+. C3
+
+. 0 3A968F4F 12
+. 89 45 C8 83 F8 FF 0F 84 F5 01 00 00
+
+. 0 3A968F5B 38
+. 31 C9 BA 00 02 00 00 89 8B 48 01 00 00 89 54 24 08 8B 55 CC 83 C2 04 89 55 B8 89 54 24 04 89 04 24 E8 2F B5 00 00
+
+. 0 3A9744B0 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 03 00 00 00 CD 80
+
+. 0 3A9744C4 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A9744CC 1
+. C3
+
+. 0 3A968F81 14
+. 8B 75 CC 83 F8 33 89 06 0F 8E 10 02 00 00
+
+. 0 3A968F8F 17
+. FC 8B 75 B8 B9 09 00 00 00 8D BB 2F D6 FF FF F3 A6
+
+. 0 3A968F9E 2
+. F3 A6
+
+. 0 3A968FA0 6
+. 0F 85 15 02 00 00
+
+. 0 3A968FA6 19
+. 8B 75 B8 8D 93 B4 E3 FF FF 83 7E 14 01 0F 85 20 01 00 00
+
+. 0 3A968FB9 11
+. 66 83 7E 12 03 0F 85 6E 01 00 00
+
+. 0 3A968FC4 17
+. 66 83 7E 2A 20 8D 93 E0 E3 FF FF 0F 85 04 01 00 00
+
+. 0 3A968FD5 13
+. 0F B7 46 10 83 F8 03 0F 85 4E 02 00 00
+
+. 0 3A968FE2 40
+. 8B 7D B8 8B 57 1C 0F B7 4F 2C 8B 7D CC 8D 7C 3A 04 89 7D C0 0F B7 F1 8B 7D CC C1 E6 05 8D 04 16 3B 07 0F 87 7D 00 00 00
+
+. 0 3A96900A 18
+. 8B 75 C0 0F B7 C1 C1 E0 05 89 75 BC 01 F0 39 C6 72 1D
+
+. 0 3A969039 8
+. 8B 7D BC 83 3F 04 75 E0
+
+. 0 3A969021 24
+. 83 45 BC 20 0F B7 C1 8B 55 C0 C1 E0 05 01 D0 39 45 BC 0F 83 17 01 00 00
+
+. 0 3A969150 11
+. 8B 45 C8 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B023 13
+. 89 85 CC FD FF FF 40 0F 84 80 04 00 00
+
+. 0 3A96B030 78
+. 8B 83 28 01 00 00 89 85 DC FD FF FF 8D 85 DC FD FF FF 89 44 24 10 8B 45 10 89 44 24 0C 8B 4D 08 89 4C 24 08 8B 85 D8 FD FF FF 89 44 24 04 8B 85 E0 FD FF FF 89 04 24 8B 8D B8 FD FF FF 8B 95 CC FD FF FF 8B 85 D4 FD FF FF E8 12 E7 FF FF
+
+. 0 3A969790 51
+. 55 89 E5 57 56 53 81 EC 08 01 00 00 89 45 90 8D 45 94 89 4D 88 31 C9 89 55 8C C7 45 84 00 00 00 00 89 8D 74 FF FF FF 89 44 24 08 8B 45 8C E8 68 C1 00 00
+
+. 0 3A9697C3 22
+. 81 C3 A9 EE 00 00 C7 04 24 03 00 00 00 89 44 24 04 E8 47 AB 00 00
+
+. 0 3A974320 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 FD 15 00 00
+
+. 0 3A97432E 24
+. 81 C3 3E 43 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A
+
+. 0 3A974346 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C5 00 00 00 CD 80
+
+. 0 3A97435B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00
+
+. 0 3A974368 7
+. 83 F8 FF 89 C2 74 31
+
+. 0 3A97436F 4
+. 85 D2 75 1D
+
+. 0 3A974373 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12
+
+. 0 3A974390 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9697D9 8
+. 85 C0 0F 88 ED 05 00 00
+
+. 0 3A9697E1 13
+. 8B 93 94 F9 FF FF 85 D2 89 55 84 74 6F
+
+. 0 3A9697EE 59
+. 8B 45 94 8B 55 98 8B 75 EC 89 85 6C FF FF FF 8B 7D F0 89 95 70 FF FF FF 8D 76 00 8D BC 27 00 00 00 00 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27
+
+. 0 3A969850 13
+. 8B 45 84 8B 40 0C 85 C0 89 45 84 75 B3
+
+. 0 3A969810 25
+. 8B 4D 84 8B 91 C8 01 00 00 8B 81 C4 01 00 00 89 D1 31 F9 31 F0 09 C1 75 27
+
+. 0 3A96985D 12
+. 31 C0 F6 45 14 04 0F 85 5D 05 00 00
+
+. 0 3A969869 13
+. F6 83 14 FC FF FF 40 0F 85 DA 07 00 00
+
+. 0 3A969876 29
+. 8B 4D 88 8B 45 0C 83 C1 04 89 4D 80 89 04 24 8B 55 90 8B 4D 10 8B 45 08 E8 3D 31 00 00
+
+. 0 3A973780 69
+. 89 93 38 01 00 00 8B 83 18 FC FF FF 31 C9 89 4C 24 14 BA FF FF FF FF 89 54 24 10 8D 74 38 FF F7 D8 C7 04 24 00 00 00 00 21 C6 B8 22 00 00 00 89 44 24 0C B8 03 00 00 00 89 44 24 08 89 74 24 04 E8 8B 14 00 00
+
+. 0 3A974C50 13
+. 89 DA B8 5A 00 00 00 8D 5C 24 04 CD 80
+
+. 0 3A974C5D 9
+. 89 D3 3D 00 F0 FF FF 77 01
+
+. 0 3A974C66 1
+. C3
+
+. 0 3A9737C5 8
+. 3B 83 3C 01 00 00 74 06
+
+. 0 3A9737CD 50
+. 89 83 38 01 00 00 8B 93 38 01 00 00 01 F0 89 83 3C 01 00 00 89 93 40 01 00 00 89 D0 8D 14 17 89 93 38 01 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96CAB0 9
+. 89 C2 8B 40 0C 85 C0 74 10
+
+. 0 3A96CAB9 16
+. 8D B4 26 00 00 00 00 89 C2 8B 40 0C 85 C0 75 F7
+
+. 0 3A96CAC9 62
+. 8B 45 E0 89 50 10 89 42 0C 8B 90 B0 01 00 00 8B 83 94 F9 FF FF 05 50 01 00 00 89 04 8A B9 01 00 00 00 FF 83 98 F9 FF FF 83 83 C4 F9 FF FF 01 83 93 C8 F9 FF FF 00 85 FF 0F 84 51 01 00 00
+
+. 0 3A96CB07 10
+. 8B 87 60 01 00 00 85 C0 74 1B
+
+. 0 3A96CB36 4
+. 3B 02 74 03
+
+. 0 3A96CB3D 34
+. 8B 45 E0 8B 55 E0 8B 4D F0 05 50 01 00 00 89 82 B4 01 00 00 0F B6 09 84 C9 88 4D DF 0F 84 C7 00 00 00
+
+. 0 3A96CB5F 11
+. 8B 45 F0 89 04 24 E8 66 87 00 00
+
+. 0 3A96CB6A 14
+. 40 80 7D DF 2F 89 45 D8 0F 84 E8 00 00 00
+
+. 0 3A96CC60 8
+. 89 04 24 E8 B4 8B FF FF
+
+. 0 3A96CC68 10
+. 85 C0 89 C7 0F 85 79 FF FF FF
+
+. 0 3A96CBEB 22
+. 8B 55 D8 89 54 24 08 8B 4D F0 89 04 24 89 4C 24 04 E8 6F 8B 00 00
+
+. 0 3A975799 60
+. 8B 57 1C 83 E9 20 8B 06 8B 56 04 89 07 89 57 04 8B 46 08 8B 56 0C 89 47 08 89 57 0C 8B 46 10 8B 56 14 89 47 10 89 57 14 8B 46 18 8B 56 1C 89 47 18 89 57 1C 8D 76 20 8D 7F 20 79 C4
+
+. 0 3A96CC01 2
+. EB 0D
+
+. 0 3A96CC10 6
+. 48 80 38 2F 75 FA
+
+. 0 3A96CC16 4
+. 39 F8 74 69
+
+. 0 3A96CC1A 25
+. C6 00 00 8B 45 E0 89 B8 90 01 00 00 8B 45 E0 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A969893 20
+. 89 45 84 83 EC 04 85 C0 8D 93 94 E4 FF FF 0F 84 CD 04 00 00
+
+. 0 3A9698A7 79
+. 8B 75 80 8B 55 84 8B 46 18 0F B7 4E 10 89 82 48 01 00 00 0F B7 46 2C 89 8D 78 FF FF FF 8B 4D 88 66 89 82 4C 01 00 00 8B 55 80 0F B7 76 2C 8B 42 1C C1 E6 05 89 B5 28 FF FF FF 8D 14 06 8D 44 08 04 3B 11 89 85 7C FF FF FF 0F 87 1F 04 00 00
+
+. 0 3A9698F6 89
+. 89 A5 64 FF FF FF 8B 4D 84 BF 07 00 00 00 89 BD 68 FF FF FF C6 85 5B FF FF FF 00 0F B7 91 4C 01 00 00 8D 44 52 FD 8D 04 C5 28 00 00 00 29 C4 C1 E2 05 8D 74 24 43 83 E6 F0 89 B5 60 FF FF FF 31 F6 89 B5 5C FF FF FF 8B B5 7C FF FF FF 89 F0 01 D0 39 C6 0F 83 87 04 00 00
+
+. 0 3A96994F 20
+. 89 85 54 FF FF FF 8B 85 60 FF FF FF 89 85 2C FF FF FF EB 28
+
+. 0 3A96998B 7
+. 8B 06 83 F8 06 74 33
+
+. 0 3A969992 5
+. 83 F8 06 76 CC
+
+. 0 3A969963 9
+. 83 F8 01 0F 84 CE 02 00 00
+
+. 0 3A969C3A 18
+. 8B 83 18 FC FF FF 8B 56 1C 48 85 D0 0F 85 99 05 00 00
+
+. 0 3A969C4C 23
+. 8B 46 08 4A 8B 4E 04 89 85 50 FF FF FF 29 C8 85 C2 0F 85 8D 05 00 00
+
+. 0 3A969C63 112
+. FF 85 5C FF FF FF 8B BD 2C FF FF FF 8B 85 50 FF FF FF 8D 4F 18 89 8D 2C FF FF FF 89 D1 F7 D1 21 C1 8B 85 2C FF FF FF 89 48 E8 8B 56 08 8B 46 10 01 D0 8B 93 18 FC FF FF 8D 44 02 FF F7 DA 21 D0 8B 95 2C FF FF FF 89 42 EC 8B 46 10 03 46 08 89 42 F0 8B 46 14 03 46 08 89 42 F4 8B 46 1C 48 F7 D0 23 46 04 83 BD 5C FF FF FF 01 89 42 F8 76 0C
+
+. 0 3A969CDF 27
+. 8B 4E 18 B8 40 62 51 73 83 E1 07 C1 E1 02 D3 F8 83 E0 0F 89 47 14 E9 86 FC FF FF
+
+. 0 3A969980 11
+. 83 C6 20 3B B5 54 FF FF FF 73 51
+
+. 0 3A969CD3 5
+. 39 4F EC 74 07
+
+. 0 3A96996C 9
+. 83 F8 02 0F 84 85 03 00 00
+
+. 0 3A969CFA 27
+. 8B 46 08 8B 55 84 89 42 08 8B 46 14 C1 E8 03 66 89 82 4E 01 00 00 E9 6B FC FF FF
+
+. 0 3A969997 11
+. 3D 51 E5 74 64 0F 84 8A 02 00 00
+
+. 0 3A969C2C 14
+. 8B 4E 18 89 8D 68 FF FF FF E9 46 FD FF FF
+
+. 0 3A9699DC 14
+. 8B 85 5C FF FF FF 85 C0 0F 84 EC 03 00 00
+
+. 0 3A9699EA 50
+. 8B 8D 5C FF FF FF 8B BD 60 FF FF FF 8D 04 49 8D 04 C7 8B 17 8B 70 F4 89 85 4C FF FF FF 29 D6 83 BD 78 FF FF FF 03 89 B5 28 FF FF FF 0F 85 DF 07 00 00
+
+. 0 3A969A1C 50
+. 8B 83 78 FD FF FF 21 C2 8B 47 10 89 44 24 14 8B 45 8C 89 44 24 10 B8 02 00 00 00 89 44 24 0C 8B 47 14 89 14 24 89 74 24 04 89 44 24 08 E8 02 B2 00 00
+
+. 0 3A969A4E 16
+. 8B 55 84 89 82 94 01 00 00 40 0F 84 98 01 00 00
+
+. 0 3A969A5E 14
+. 8B B3 40 FC FF FF 85 F6 0F 85 FE 05 00 00
+
+. 0 3A96A06A 36
+. B9 03 00 00 00 89 4C 24 08 8B 8D 28 FF FF FF 89 4C 24 04 8B 75 84 8B 86 94 01 00 00 89 04 24 E8 82 AC 00 00
+
+. 0 3A974D10 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 DB 00 00 00 CD 80
+
+. 0 3A974D24 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 0 3A974D2C 1
+. C3
+
+. 0 3A96A08E 5
+. E9 D9 F9 FF FF
+
+. 0 3A969A6C 46
+. 8B 55 84 8B 8D 28 FF FF FF 8B 75 84 8B 82 94 01 00 00 8D 14 01 89 96 98 01 00 00 8B 17 29 D0 80 BD 5B FF FF FF 00 89 06 0F 85 F9 05 00 00
+
+. 0 3A969A9A 13
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 75 49
+
+. 0 3A969AA7 19
+. 8B 45 80 8B 77 10 8B 48 1C 89 B5 48 FF FF FF 39 CE 77 36
+
+. 0 3A969ABA 33
+. 0F B7 40 2C 8B 37 8B 57 04 89 B5 44 FF FF FF C1 E0 05 29 F2 03 95 48 FF FF FF 8D 04 08 39 C2 72 15
+
+. 0 3A969ADB 35
+. 89 F0 8B 55 84 01 C8 8B 8D 48 FF FF FF 29 C8 89 82 44 01 00 00 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00
+
+. 0 3A969B8D 29
+. 8B B5 5C FF FF FF 83 C7 18 8B 95 60 FF FF FF 8D 04 76 8D 04 C2 39 C7 0F 83 3A 02 00 00
+
+. 0 3A969BAA 11
+. 8B 07 39 47 04 0F 86 E5 FE FF FF
+
+. 0 3A969BB5 58
+. 8B 47 10 89 44 24 14 8B 75 8C B8 12 00 00 00 89 44 24 0C 89 74 24 10 8B 47 14 89 44 24 08 8B 47 04 8B 37 29 F0 89 44 24 04 8B 55 84 8B 07 8B 0A 01 C8 89 04 24 E8 61 B0 00 00
+
+. 0 3A969BEF 7
+. 40 0F 85 A4 FE FF FF
+
+. 0 3A969AF0 14
+. 8B 4F 0C 8B 57 08 39 D1 0F 86 8F 00 00 00
+
+. 0 3A969AFE 45
+. 8B 75 84 8B 83 18 FC FF FF 8B 36 01 F2 01 F1 89 95 40 FF FF FF 8D 54 10 FF F7 D8 89 8D 3C FF FF FF 21 C2 39 D1 89 95 38 FF FF FF 73 06
+
+. 0 3A969B2B 20
+. 89 8D 38 FF FF FF 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C
+
+. 0 3A969B3F 11
+. 8B 47 14 A8 02 0F 84 8E 08 00 00
+
+. 0 3A969B4A 38
+. 8B 85 38 FF FF FF 8B 95 40 FF FF FF 29 D0 89 44 24 08 31 C0 89 44 24 04 8B B5 40 FF FF FF 89 34 24 E8 90 BB 00 00
+
+. 0 3A975700 31
+. 57 8B 7C 24 08 8B 54 24 10 8A 44 24 0C 88 C4 89 C1 C1 E0 10 66 89 C8 FC 83 FA 24 89 D1 7C 37
+
+. 0 3A975756 5
+. C1 E9 02 F3 AB
+
+. 0 3A975759 2
+. F3 AB
+
+. 0 3A97575B 7
+. 89 D1 83 E1 03 F3 AA
+
+. 0 3A975762 6
+. 8B 44 24 08 5F C3
+
+. 0 3A969B70 11
+. 8B 47 14 A8 02 0F 84 37 08 00 00
+
+. 0 3A969B7B 18
+. 8B 85 38 FF FF FF 39 85 3C FF FF FF 0F 87 E4 05 00 00
+
+. 0 3A969DE4 17
+. 8B 4D 84 8B 81 44 01 00 00 85 C0 0F 84 F3 02 00 00
+
+. 0 3A969DF5 30
+. 8B 4D 84 8B 31 01 F0 89 81 44 01 00 00 8B A5 64 FF FF FF 8B 75 8C 89 34 24 E8 5D A6 00 00
+
+. 0 3A974470 13
+. 89 DA 8B 5C 24 04 B8 06 00 00 00 CD 80
+
+. 0 3A97447D 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974486 1
+. C3
+
+. 0 3A969E13 42
+. C7 45 8C FF FF FF FF 8B 45 84 0F B6 88 74 01 00 00 88 C8 24 03 FE C8 0F 94 C2 83 BD 78 FF FF FF 02 0F 94 C0 21 D0 A8 01 74 0C
+
+. 0 3A969E49 14
+. 8B 4D 84 8B 41 08 85 C0 0F 85 82 02 00 00
+
+. 0 3A96A0D9 15
+. 8B 75 84 8B 16 01 D0 89 46 08 E9 87 FD FF FF
+
+. 0 3A969E6F 22
+. 8B 4D 84 01 91 48 01 00 00 F6 83 14 FC FF FF 40 0F 85 AB 03 00 00
+
+. 0 3A969E85 14
+. 8B 45 84 8B 70 08 85 F6 0F 84 0B 01 00 00
+
+. 0 3A969E93 11
+. 8B 16 89 C7 83 C7 18 85 D2 75 10
+
+. 0 3A969EAE 5
+. 83 FA 21 7E ED
+
+. 0 3A969EA0 14
+. 89 34 97 83 C6 08 8B 06 85 C0 89 C2 74 38
+
+. 0 3A969EB3 12
+. B8 FF FF FF 6F 29 D0 83 F8 0F 77 0C
+
+. 0 3A969EBF 12
+. B8 21 00 00 70 29 D0 89 34 87 EB D8
+
+. 0 3A969EA3 11
+. 83 C6 08 8B 06 85 C0 89 C2 74 38
+
+. 0 3A969EE6 9
+. 8B 4D 84 8B 11 85 D2 74 53
+
+. 0 3A969EEF 7
+. 8B 47 10 85 C0 74 03
+
+. 0 3A969EF6 10
+. 01 50 04 8B 47 0C 85 C0 74 03
+
+. 0 3A969F00 10
+. 01 50 04 8B 47 14 85 C0 74 03
+
+. 0 3A969F0A 10
+. 01 50 04 8B 47 18 85 C0 74 03
+
+. 0 3A969F14 10
+. 01 50 04 8B 47 1C 85 C0 74 03
+
+. 0 3A969F21 7
+. 8B 47 44 85 C0 74 03
+
+. 0 3A969F28 10
+. 01 50 04 8B 47 5C 85 C0 74 03
+
+. 0 3A969F32 13
+. 01 50 04 8B 87 C4 00 00 00 85 C0 74 03
+
+. 0 3A969F3F 10
+. 01 50 04 8B 57 78 85 D2 74 2A
+
+. 0 3A969F73 10
+. 8B 97 98 00 00 00 85 D2 74 13
+
+. 0 3A969F7D 16
+. 8B 42 04 8B 75 84 A8 01 89 86 E8 01 00 00 74 03
+
+. 0 3A969F90 7
+. 8B 47 74 85 C0 74 07
+
+. 0 3A969F9E 16
+. 8B 45 84 F6 80 E8 01 00 00 40 0F 85 88 03 00 00
+
+. 0 3A969FAE 14
+. 8B 45 84 8B 50 28 85 D2 0F 85 1E 02 00 00
+
+. 0 3A96A1DA 5
+. E8 A1 1B 00 00
+
+. 0 3A96A1DF 6
+. 90 E9 D7 FD FF FF
+
+. 0 3A969FBC 14
+. 8B 55 84 8B 42 58 85 C0 0F 85 E5 02 00 00
+
+. 0 3A969FCA 12
+. 8B 45 84 F6 80 E8 01 00 00 20 74 06
+
+. 0 3A969FD6 72
+. 89 83 CC F9 FF FF 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00
+
+. 0 3A96A01E 14
+. 8B BB 7C FD FF FF 85 FF 0F 84 97 FD FF FF
+
+. 0 3A969DC3 11
+. 8B 45 84 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B07E 10
+. 8D 65 F4 5B 5E 5F 5D C2 0C 00
+
+. 0 3A967C70 32
+. 8B 90 70 01 00 00 83 EC 0C 42 89 90 70 01 00 00 31 C0 83 FA 01 0F 94 C0 01 85 10 FF FF FF EB 8C
+
+. 0 3A967C1C 18
+. 8B 8D 60 FE FF FF 89 3C 24 89 4C 24 04 E8 02 B9 00 00
+
+. 0 3A96692A 84
+. 8B 83 7E EC FF FF 8D 75 B4 B9 03 00 00 00 89 B5 CC FE FF FF 8D 95 38 FF FF FF 89 45 B4 8B 83 82 EC FF FF 89 45 B8 8B 83 86 EC FF FF 89 45 BC 8B 83 8A EC FF FF 89 45 C0 0F B7 83 8E EC FF FF 66 89 45 C4 0F B6 83 90 EC FF FF 88 45 C6 89 F0 E8 F2 A6 00 00
+
+. 0 3A971070 37
+. 55 89 E5 81 EC 8C 00 00 00 89 75 F8 31 F6 89 5D F4 89 7D FC 89 D7 89 4D 90 C7 45 8C FF FF FF FF E8 96 48 00 00
+
+. 0 3A971095 18
+. 81 C3 D7 75 00 00 89 74 24 04 89 04 24 E8 89 33 00 00
+
+. 0 3A97444D 5
+. E8 D5 14 00 00
+
+. 0 3A975927 4
+. 8B 0C 24 C3
+
+. 0 3A974452 21
+. 81 C1 1A 42 00 00 31 D2 29 C2 89 91 48 01 00 00 83 C8 FF EB E5
+
+. 0 3A9710A7 6
+. 85 C0 89 C6 78 2C
+
+. 0 3A9710D9 16
+. 8B 45 8C 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96697E 15
+. 89 85 0C FF FF FF 83 F8 FF 0F 85 E1 14 00 00
+
+. 0 3A96698D 20
+. 8B 83 94 F9 FF FF 8B B8 A4 00 00 00 85 FF 0F 84 04 0B 00 00
+
+. 0 3A9669A1 14
+. 8B B5 10 FF FF FF 85 F6 0F 85 79 14 00 00
+
+. 0 3A967E28 65
+. 8B B5 10 FF FF FF 31 C9 89 8D 04 FF FF FF 8D 04 B5 10 00 00 00 29 C4 8D 54 24 2F 8B 83 F0 F9 FF FF 83 E2 F0 89 95 14 FF FF FF 8B 8D 04 FF FF FF 89 04 8A 8B 40 0C 41 89 8D 04 FF FF FF 85 C0 75 E9
+
+. 0 3A967E69 5
+. E9 41 EB FF FF
+
+. 0 3A9669AF 22
+. 8B B3 94 FD FF FF 31 FF 89 BD C4 FE FF FF 85 F6 0F 84 9F 06 00 00
+
+. 0 3A967064 44
+. 31 D2 8B 83 94 F9 FF FF 89 54 24 04 31 D2 83 BD 50 FF FF FF 03 0F 94 C2 89 14 24 8B 8D 10 FF FF FF 8B 95 14 FF FF FF E8 10 81 00 00
+
+. 0 3A96F1A0 83
+. 55 89 E5 57 56 53 81 EC A0 00 00 00 89 45 C0 8D 04 49 8D 04 85 28 00 00 00 89 55 BC 29 C4 8B 55 C0 89 4D B8 8D 44 24 1F 83 E0 F0 89 50 04 8B 4D C0 8D 50 0C 89 45 F0 C7 00 00 00 00 00 89 50 08 0F B6 81 74 01 00 00 C7 45 EC 01 00 00 00 E8 38 67 00 00
+
+. 0 3A96F1F3 31
+. 81 C3 79 94 00 00 24 9F 0C 20 88 81 74 01 00 00 8B 75 B8 C7 45 AC 00 00 00 00 39 75 AC 73 48
+
+. 0 3A96F212 72
+. 8B 45 AC 8B 55 BC 8B 75 EC 8B 3C 82 8B 45 F0 8D 14 76 C1 E2 02 46 8D 4C 02 0C C7 04 10 00 00 00 00 89 4C 10 08 89 7C 10 04 0F B6 87 74 01 00 00 89 75 EC 24 9F 0C 20 88 87 74 01 00 00 8B 4D B8 FF 45 AC 39 4D AC 72 B8
+
+. 0 3A96F25A 65
+. C7 45 A4 00 00 00 00 8B 45 EC 31 C9 C7 45 C8 00 00 00 00 8B 55 F0 8B B3 48 01 00 00 89 8B 48 01 00 00 8D 04 40 8D 44 82 F4 C7 40 08 00 00 00 00 85 D2 89 45 B0 89 75 A8 89 55 B4 0F 84 6A 03 00 00
+
+. 0 3A96F29B 42
+. C7 45 9C 00 00 00 00 8B 45 B4 8B 55 B4 C7 45 98 00 00 00 00 8B 40 04 C7 02 01 00 00 00 8B 90 50 01 00 00 89 45 A0 85 D2 75 44
+
+. 0 3A96F2C5 26
+. 8B 80 D4 01 00 00 8B 4D C0 85 C0 0F 94 C0 39 4D A0 0F 95 C2 21 D0 A8 01 74 2A
+
+. 0 3A96F309 10
+. 8B 55 A0 8B 42 1C 85 C0 75 18
+
+. 0 3A96F32B 52
+. 8B 4D A0 8B 75 08 8B 55 B4 8B 41 2C 8B 40 04 89 4D CC 8B 49 08 89 45 94 89 45 D8 8B 45 0C 89 75 D0 89 55 90 89 45 D4 8B 11 89 4D 8C 85 D2 0F 85 BD 00 00 00
+
+. 0 3A96F41C 9
+. 83 FA 01 0F 84 3F FF FF FF
+
+. 0 3A96F364 30
+. 8B 75 8C B9 24 00 00 00 8B 7D 94 8B 46 04 89 4C 24 04 01 C7 89 FE 89 3C 24 E8 2E 5D 00 00
+
+. 0 3A9750D0 4
+. 88 D1 7A 29
+
+. 0 3A9750D4 8
+. 32 08 0F 84 61 01 00 00
+
+. 0 3A9750DC 8
+. 30 D1 0F 84 86 01 00 00
+
+. 0 3A9750E4 12
+. 8A 48 01 40 38 CA 0F 84 4D 01 00 00
+
+. 0 3A9750F0 9
+. 80 F9 00 0F 84 71 01 00 00
+
+. 0 3A9750F9 4
+. 40 4F 75 14
+
+. 0 3A9750FD 10
+. 8A 08 38 CA 0F 84 36 01 00 00
+
+. 0 3A975107 9
+. 80 F9 00 0F 84 5A 01 00 00
+
+. 0 3A975110 28
+. 40 8B 08 BD FF FE FE FE BF FF FE FE FE 01 CD 31 CD 01 CF 8D 40 04 0F 83 16 01 00 00
+
+. 0 3A975258 8
+. C1 E9 10 40 38 D1 74 DD
+
+. 0 3A975260 5
+. 80 F9 00 74 05
+
+. 0 3A96F382 8
+. 85 C0 0F 85 2A 05 00 00
+
+. 0 3A96F38A 26
+. 89 75 DC 8D 45 CC 8D 55 C8 89 04 24 8D 8B 34 78 FF FF 8D 45 C4 E8 2C 0E 00 00
+
+. 0 3A9701D0 31
+. 55 89 E5 81 EC F8 00 00 00 89 85 24 FF FF FF 31 C0 89 5D F4 89 75 F8 89 7D FC E8 3C 57 00 00
+
+. 0 3A9701EF 30
+. 81 C3 7D 84 00 00 89 95 20 FF FF FF 89 8D 1C FF FF FF 89 85 2C FF FF FF FF 93 E0 F9 FF FF
+
+. 0 3A9658C0 9
+. 55 89 E5 5D E8 5E 00 01 00
+
+. 0 3A9658C9 13
+. 81 C1 A3 2D 01 00 8D 81 3C 00 00 00 C3
+
+. 0 3A97020D 25
+. 89 85 18 FF FF FF 8B 00 89 45 E4 8D 85 30 FF FF FF 89 04 24 E8 0A 4D 00 00
+
+. 0 3A974F30 36
+. 31 C0 8B 54 24 04 89 5A 00 89 72 04 89 7A 08 8D 4C 24 04 89 4A 10 8B 4C 24 00 89 4A 14 89 6A 0C 89 42 18 C3
+
+. 0 3A970226 6
+. 85 C0 89 C2 75 4E
+
+. 0 3A97022C 26
+. 8B 8D 18 FF FF FF 8D 85 28 FF FF FF 89 01 8B 45 08 89 04 24 FF 95 1C FF FF FF
+
+. 0 3A96FEA0 45
+. 55 B9 01 00 00 00 89 E5 56 83 EC 0C 8B 75 08 8B 46 08 8B 56 10 89 44 24 08 8B 46 04 89 44 24 04 8B 06 0F B6 80 74 01 00 00 A8 03 74 05
+
+. 0 3A96FED2 12
+. 89 0C 24 8B 06 31 C9 E8 62 AF FF FF
+
+. 0 3A96B088 14
+. 8B 95 D4 FD FF FF 89 14 24 E8 3A A2 00 00
+
+. 0 3A96B096 20
+. 40 F6 83 14 FC FF FF 01 89 85 C8 FD FF FF 0F 85 2C 04 00 00
+
+. 0 3A96B0AA 21
+. 8B B5 D8 FD FF FF BF FF FF FF FF 89 BD CC FD FF FF 85 F6 74 14
+
+. 0 3A96B0BF 20
+. 8B 85 D8 FD FF FF 8B 88 8C 00 00 00 85 C9 0F 85 A3 01 00 00
+
+. 0 3A96B0D3 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 C2 00 00 00
+
+. 0 3A96B0E1 14
+. 8D 93 09 F0 FF FF 89 95 B4 FD FF FF EB 1F
+
+. 0 3A96B10E 29
+. 8B 8D B4 FD FF FF 8D BE 80 01 00 00 89 F0 89 FA 89 0C 24 B9 0F 00 00 00 E8 65 F7 FF FF
+
+. 0 3A96A890 25
+. 55 89 E5 83 EC 0C 89 75 F8 89 D6 89 7D FC 8B 12 89 C7 31 C0 83 FA FF 74 27
+
+. 0 3A96A8D0 10
+. 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96B12B 4
+. 84 C0 74 C0
+
+. 0 3A96B0EF 31
+. 83 BD CC FD FF FF FF 8B B6 60 01 00 00 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 0F 84 95 00 00 00
+
+. 0 3A96B1A3 27
+. 83 BD CC FD FF FF FF 8B B3 94 F9 FF FF 0F 94 C0 85 F6 0F 95 C2 21 D0 A8 01 74 58
+
+. 0 3A96B1BE 13
+. 0F B6 86 74 01 00 00 24 03 3C 02 74 4B
+
+. 0 3A96B1CB 8
+. 3B B5 D8 FD FF FF 74 43
+
+. 0 3A96B216 9
+. 83 BD CC FD FF FF FF 74 57
+
+. 0 3A96B276 9
+. 83 BB BC 00 00 00 FF 74 A0
+
+. 0 3A96B27F 52
+. 8D 85 E0 FD FF FF 8D 8D E4 FD FF FF 89 44 24 04 8D 83 BC 00 00 00 89 4C 24 08 89 04 24 8B 8D D0 FD FF FF 8B 95 C8 FD FF FF 8B 85 D4 FD FF FF E8 9D DF FF FF
+
+. 0 3A969250 27
+. 55 89 E5 57 31 FF 56 31 F6 53 81 EC B4 00 00 00 89 45 90 8B 45 08 E8 C0 C6 00 00
+
+. 0 3A96926B 114
+. 81 C3 01 F4 00 00 89 4D 88 8B 00 89 55 8C 8B 8B D8 00 00 00 89 BD 7C FF FF FF 89 45 84 8B 83 CC 00 00 00 C7 45 80 FF FF FF FF 89 B5 78 FF FF FF 01 C8 8D 44 02 12 8B 4D 84 83 E0 FC 29 C4 8D 54 24 1B 8B 39 83 E2 F0 89 95 74 FF FF FF 90 8D B4 26 00 00 00 00 31 C0 89 85 70 FF FF FF 31 C0 F6 83 14 FC FF FF 01 89 85 68 FF FF FF 0F 85 78 02 00 00
+
+. 0 3A9692DD 30
+. 8B 47 10 31 F6 89 44 24 08 8B 47 0C 89 44 24 04 8B 8D 74 FF FF FF 89 0C 24 E8 75 C4 00 00
+
+. 0 3A9692FB 16
+. 83 7D 80 FF 89 85 6C FF FF FF 0F 85 98 01 00 00
+
+. 0 3A96930B 8
+. 3B B3 C8 00 00 00 72 1F
+
+. 0 3A969332 7
+. 83 7C B7 14 01 74 DF
+
+. 0 3A969339 41
+. 8B 83 C4 00 00 00 8B 44 F0 04 89 44 24 08 8B 83 C4 00 00 00 8B 04 F0 89 44 24 04 8B 85 6C FF FF FF 89 04 24 E8 0E C4 00 00
+
+. 0 3A969362 22
+. 8B 55 8C 89 54 24 08 8B 4D 90 89 04 24 89 4C 24 04 E8 F8 C3 00 00
+
+. 0 3A969378 31
+. 89 85 70 FF FF FF 8B 85 74 FF FF FF 29 85 70 FF FF FF F6 83 14 FC FF FF 01 0F 85 50 01 00 00
+
+. 0 3A969397 14
+. 8B 55 10 8B 85 74 FF FF FF E8 7B FB FF FF
+
+. 0 3A9693A5 11
+. 89 45 80 8B 44 B7 14 85 C0 75 18
+
+. 0 3A9693B0 10
+. 83 7D 80 FF 0F 84 44 01 00 00
+
+. 0 3A9694FE 45
+. 8B 85 70 FF FF FF 8B 4D 8C 8B 95 74 FF FF FF 29 C8 C6 44 10 FF 00 8D 45 94 89 44 24 08 89 54 24 04 C7 04 24 03 00 00 00 E8 E5 AC 00 00
+
+. 0 3A974210 14
+. 55 89 E5 83 EC 58 89 5D F4 E8 0D 17 00 00
+
+. 0 3A97421E 24
+. 81 C3 4E 44 00 00 89 75 F8 8B B3 24 00 00 00 89 7D FC 8B 06 85 C0 75 7A
+
+. 0 3A974236 21
+. 8B BB 48 01 00 00 8B 55 0C 8B 4D 10 87 D3 B8 C3 00 00 00 CD 80
+
+. 0 3A97424B 13
+. 87 D3 3D 00 F0 FF FF 0F 87 AE 00 00 00
+
+. 0 3A974306 18
+. F7 D8 89 83 48 01 00 00 B8 FF FF FF FF E9 40 FF FF FF
+
+. 0 3A974258 7
+. 83 F8 FF 89 C2 74 31
+
+. 0 3A974290 9
+. 83 BB 48 01 00 00 26 75 C6
+
+. 0 3A97425F 4
+. 85 D2 75 1D
+
+. 0 3A974280 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A96952B 4
+. 85 C0 75 13
+
+. 0 3A969542 19
+. BA 01 00 00 00 B8 01 00 00 00 89 54 B7 14 E9 73 FE FF FF
+
+. 0 3A9693C8 25
+. 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF
+
+. 0 3A969318 14
+. 46 83 7D 80 FF 8D 76 00 0F 85 7D 01 00 00
+
+. 0 3A969326 12
+. 3B B3 C8 00 00 00 0F 83 12 01 00 00
+
+. 0 3A974263 11
+. 8B 4D 10 8B 41 58 39 41 0C 74 12
+
+. 0 3A96952F 19
+. 8B 45 A4 25 00 F0 00 00 3D 00 40 00 00 0F 84 78 FE FF FF
+
+. 0 3A9693BA 39
+. B8 02 00 00 00 89 44 B7 14 B8 02 00 00 00 83 F8 02 0F 94 C0 0F B6 C0 09 85 68 FF FF FF 83 7D 80 FF 0F 84 37 FF FF FF
+
+. 0 3A969444 6
+. 83 7D 80 FF 75 59
+
+. 0 3A96944A 10
+. 8B 85 68 FF FF FF 85 C0 74 15
+
+. 0 3A969454 11
+. 8B 93 48 01 00 00 83 FA 02 74 0A
+
+. 0 3A969469 31
+. 83 45 84 04 8B 8D 68 FF FF FF 09 8D 78 FF FF FF 8B 55 84 8B 02 85 C0 89 C7 0F 85 38 FE FF FF
+
+. 0 3A9692C0 29
+. 31 C0 89 85 70 FF FF FF 31 C0 F6 83 14 FC FF FF 01 89 85 68 FF FF FF 0F 85 78 02 00 00
+
+. 0 3A969488 14
+. 8B 85 78 FF FF FF 85 C0 0F 84 9C 02 00 00
+
+. 0 3A969496 13
+. B8 FF FF FF FF 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96B2B3 11
+. 89 85 CC FD FF FF E9 61 FF FF FF
+
+. 0 3A96B21F 31
+. 83 BD CC FD FF FF FF 8B 95 D8 FD FF FF 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 85 E1 00 00 00
+
+. 0 3A96B31F 39
+. 8B B5 D8 FD FF FF 8D 83 0F F0 FF FF B9 1D 00 00 00 89 04 24 8B 85 D8 FD FF FF 81 C6 CC 01 00 00 89 F2 E8 4A F5 FF FF
+
+. 0 3A96B346 8
+. 84 C0 0F 84 F0 FE FF FF
+
+. 0 3A96B23E 13
+. 83 BD CC FD FF FF FF 0F 84 3C 01 00 00
+
+. 0 3A96B387 14
+. 8B 85 D0 FD FF FF 85 C0 0F 85 69 01 00 00
+
+. 0 3A96B395 11
+. 8B 85 D4 FD FF FF E8 80 03 00 00
+
+. 0 3A96B720 14
+. 55 89 E5 57 56 53 83 EC 64 E8 FD A1 00 00
+
+. 0 3A96B72E 22
+. 81 C3 3E CF 00 00 F6 83 14 FC FF FF 01 89 45 F0 0F 85 5E 04 00 00
+
+. 0 3A96B744 14
+. 8B 8B DC 00 00 00 85 C9 0F 84 20 01 00 00
+
+. 0 3A96B872 22
+. B9 01 00 00 00 8D 93 E4 00 00 00 8D 83 72 F0 FF FF E8 E8 57 00 00
+
+. 0 3A9710AD 23
+. 89 74 24 04 8D 45 94 89 44 24 08 C7 04 24 03 00 00 00 E8 5C 32 00 00
+
+. 0 3A9710C4 4
+. 85 C0 78 09
+
+. 0 3A9710C8 9
+. 8B 45 C0 85 C0 89 07 75 1F
+
+. 0 3A9710F0 42
+. 89 74 24 10 31 C9 BA 02 00 00 00 89 4C 24 14 89 54 24 0C 8B 55 90 89 44 24 04 C7 04 24 00 00 00 00 89 54 24 08 E8 36 3B 00 00
+
+. 0 3A97111A 5
+. 89 45 8C EB B2
+
+. 0 3A9710D1 8
+. 89 34 24 E8 97 33 00 00
+
+. 0 3A96B888 10
+. 89 45 D8 40 0F 84 73 01 00 00
+
+. 0 3A96B892 18
+. 8B 83 E4 00 00 00 83 F8 10 89 45 A0 0F 86 0E 01 00 00
+
+. 0 3A96B8A4 19
+. FC 8B 75 D8 BA 0B 00 00 00 8D BB 83 F0 FF FF 89 D1 F3 A6
+
+. 0 3A96B8B5 2
+. F3 A6
+
+. 0 3A96B8B7 6
+. 0F 85 F5 00 00 00
+
+. 0 3A96B8BD 34
+. 8B 45 D8 89 C2 89 83 DC 00 00 00 8B 40 0C 8D 04 40 8D 04 85 10 00 00 00 01 C2 83 C0 30 39 45 A0 72 26
+
+. 0 3A96B8DF 21
+. 89 93 E0 00 00 00 B9 14 00 00 00 89 D6 8D BB 8F F0 FF FF F3 A6
+
+. 0 3A96B8F2 2
+. F3 A6
+
+. 0 3A96B8F4 17
+. 0F 97 C2 0F 92 C0 8B 4D D8 38 C2 0F 84 4D FE FF FF
+
+. 0 3A96B752 11
+. 31 C0 83 F9 FF 0F 84 FF 03 00 00
+
+. 0 3A96B75D 27
+. C7 45 DC 00 00 00 00 8B 83 E0 00 00 00 83 F8 FF 89 45 D4 89 C7 0F 84 A0 01 00 00
+
+. 0 3A96B778 26
+. 8B 93 E4 00 00 00 01 D1 8B 93 20 FC FF FF 29 C1 89 4D E0 85 D2 89 55 C8 74 31
+
+. 0 3A96B792 29
+. C7 45 C4 00 00 00 00 8D B3 5C FD FF FF 90 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00
+
+. 0 3A96B7AF 8
+. 85 C0 0F 84 29 04 00 00
+
+. 0 3A96B7B7 12
+. FF 45 C4 83 C6 05 83 7D C4 03 7E DD
+
+. 0 3A96B7A0 15
+. 89 74 24 04 8B 4D C8 89 0C 24 E8 D1 9A 00 00
+
+. 0 3A9752B7 12
+. 0F B6 55 FB F7 DA 89 D0 5A 5E 5D C3
+
+. 0 3A96BBE0 11
+. 8B 45 C4 83 C0 30 E9 DD FB FF FF
+
+. 0 3A96B7C8 18
+. 89 45 CC 99 8B 45 CC 89 55 D0 8B 75 D0 21 F0 40 74 1F
+
+. 0 3A96B7DA 21
+. 0F B6 4D CC B8 01 00 00 00 31 D2 0F A5 C2 D3 E0 F6 C1 20 74 04
+
+. 0 3A96B7EF 32
+. 89 C2 31 C0 89 45 CC 89 55 D0 C7 45 EC 00 00 00 00 8B 55 D4 8B 4A 14 49 39 4D EC 89 4D E8 7E 1B
+
+. 0 3A96B82A 40
+. 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00
+
+. 0 3A96B852 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 51 FD FF FF
+
+. 0 3A96B5B0 27
+. 55 89 E5 57 89 C7 56 83 EC 0C 89 55 F4 0F B6 00 84 C0 88 45 EF 0F 84 DF 00 00 00
+
+. 0 3A96B5CB 17
+. 0F B6 0A 0F B6 45 EF 2C 30 3C 09 0F 87 AA 00 00 00
+
+. 0 3A96B686 13
+. 88 C8 BA FF FF FF FF 2C 30 3C 09 76 26
+
+. 0 3A96B693 5
+. 38 4D EF 75 18
+
+. 0 3A96B698 18
+. FF 45 F4 47 8B 45 F4 0F B6 0F 88 4D EF 0F B6 08 EB C8
+
+. 0 3A96B672 6
+. 80 7D EF 00 74 38
+
+. 0 3A96B678 14
+. 0F B6 45 EF 2C 30 3C 09 0F 86 5A FF FF FF
+
+. 0 3A96B6B0 18
+. 0F BE 55 EF 0F BE C1 29 C2 83 C4 0C 89 D0 5E 5F 5D C3
+
+. 0 3A96B85F 8
+. 85 C0 0F 84 25 04 00 00
+
+. 0 3A96B867 4
+. 85 C0 79 A9
+
+. 0 3A96B814 16
+. 4E 89 75 E8 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00
+
+. 0 3A96B824 46
+. 8B BB E0 00 00 00 8B 45 EC 8B 4D E8 01 C8 89 C2 C1 EA 1F 8D 34 02 D1 FE 8D 04 76 C1 E0 03 89 45 C0 8B 44 07 34 3B 45 E0 0F 83 FE 02 00 00
+
+. 0 3A96B86B 7
+. 46 89 75 EC 90 EB A6
+
+. 0 3A96B818 12
+. 8B 4D E8 39 4D EC 0F 8F 2C 03 00 00
+
+. 0 3A96B5E0 17
+. 88 C8 BA 01 00 00 00 2C 30 3C 09 0F 87 C8 00 00 00
+
+. 0 3A96B5F1 36
+. 0F BE 45 EF 47 FF 45 F4 83 E8 30 89 45 F0 0F BE C1 8D 70 D0 0F B6 0F 88 C8 88 4D EF 2C 30 88 CA 3C 09 77 2A
+
+. 0 3A96B63F 16
+. 8B 45 F4 0F B6 08 88 C8 88 CA 2C 30 3C 09 77 1E
+
+. 0 3A96B66D 5
+. 39 75 F0 75 50
+
+. 0 3A96BC8C 11
+. 89 75 EC 85 F6 0F 8E 93 FD FF FF
+
+. 0 3A96BC97 31
+. 8B 4D C0 89 4D A4 8D 76 00 8B BB E0 00 00 00 8B 55 A4 8B 44 17 1C 3B 45 E0 0F 83 7A FD FF FF
+
+. 0 3A96BCB6 13
+. 8B 55 D4 01 C2 8B 45 F0 E8 ED F8 FF FF
+
+. 0 3A96BCC3 8
+. 85 C0 0F 85 5F FD FF FF
+
+. 0 3A96BA2A 23
+. 8B BB E0 00 00 00 3B 75 EC 8D 04 76 8D 3C C7 8D 4F 30 89 4D BC 7E 21
+
+. 0 3A96BA62 21
+. 8B 4F 30 83 F9 01 0F 94 C0 83 F9 03 0F 94 C2 09 D0 A8 01 74 A9
+
+. 0 3A96BA77 11
+. 8B 45 BC 8B 78 08 3B 7D E0 73 9E
+
+. 0 3A96BA82 7
+. 8B 55 DC 85 D2 74 08
+
+. 0 3A96BA91 10
+. 8B 83 1C FC FF FF 85 C0 74 0C
+
+. 0 3A96BA9B 12
+. 8B 55 BC 39 42 0C 0F 87 79 FF FF FF
+
+. 0 3A96BAA7 18
+. 8B 55 D0 8B 45 CC F7 D2 F7 D0 09 C2 0F 84 20 02 00 00
+
+. 0 3A96BAB9 38
+. C7 45 B4 00 00 00 00 8B 55 BC 8B 42 10 8B 52 14 89 45 A8 89 55 AC 81 E2 00 00 0F 00 89 D0 0B 45 B4 89 55 B8 74 12
+
+. 0 3A96BAF1 65
+. 8B 83 54 FC FF FF 31 D2 81 CA 00 00 0F 00 89 55 9C 89 C2 8B 45 9C F7 D2 89 55 98 F7 D0 8B 55 A8 89 45 9C 8B 45 98 21 C2 89 55 98 8B 45 AC 8B 55 9C 21 D0 89 C2 0B 55 98 89 45 9C 0F 85 EE FE FF FF
+
+. 0 3A96BB32 20
+. 8B 45 D4 01 C7 3B 8B 50 FC FF FF 89 7D DC 0F 85 DA FE FF FF
+
+. 0 3A96BB46 19
+. 8D 76 00 8D BC 27 00 00 00 00 F6 83 14 FC FF FF 01 75 66
+
+. 0 3A96BB59 11
+. 8B 45 DC 83 C4 64 5B 5E 5F 5D C3
+
+. 0 3A96B3A0 14
+. 89 85 C4 FD FF FF 85 C0 0F 84 8A 00 00 00
+
+. 0 3A96B3AE 14
+. 8B B5 D8 FD FF FF 85 F6 0F 84 37 01 00 00
+
+. 0 3A96B3BC 13
+. F6 86 E9 01 00 00 08 0F 85 48 01 00 00
+
+. 0 3A96B3C9 10
+. 8B 85 C4 FD FF FF 85 C0 74 65
+
+. 0 3A96B3D3 23
+. 8B 85 C4 FD FF FF 8D 95 E4 FD FF FF 89 95 B8 FD FF FF E8 36 DB FF FF
+
+. 0 3A969041 6
+. 83 7F 10 20 75 DA
+
+. 0 3A969047 6
+. 83 7F 1C 03 76 D4
+
+. 0 3A96904D 24
+. 8B 47 04 8B 4D CC 8D 50 20 3B 11 8D 44 08 04 89 45 B4 0F 87 F6 00 00 00
+
+. 0 3A969065 19
+. FC 8B 75 B4 BA 10 00 00 00 8D BB 38 D6 FF FF 89 D1 F3 A6
+
+. 0 3A969076 2
+. F3 A6
+
+. 0 3A969078 6
+. 0F 84 86 00 00 00
+
+. 0 3A969104 32
+. 8B 75 B4 0F B6 46 14 0F B6 56 18 C1 E0 08 01 D0 0F B6 56 1C C1 E0 08 8B 76 10 01 D0 85 F6 75 0E
+
+. 0 3A969124 10
+. 8B 93 1C FC FF FF 85 D2 74 22
+
+. 0 3A96912E 4
+. 39 C2 73 1E
+
+. 0 3A96B3EA 9
+. 89 85 CC FD FF FF 40 74 52
+
+. 0 3A96B3F3 14
+. 8B 8D C4 FD FF FF 89 0C 24 E8 CF 9E 00 00
+
+. 0 3A96B401 11
+. 8D 70 01 89 34 24 E8 10 A4 FF FF
+
+. 0 3A96B40C 8
+. 89 C2 31 C0 85 D2 74 16
+
+. 0 3A96B414 22
+. 89 74 24 08 8B 85 C4 FD FF FF 89 14 24 89 44 24 04 E8 46 A4 00 00
+
+. 0 3A96B42A 14
+. 89 85 E0 FD FF FF 85 C0 0F 84 48 01 00 00
+
+. 0 3A96B438 13
+. 83 BD CC FD FF FF FF 0F 85 06 FE FF FF
+
+. 0 3A96B24B 13
+. F6 83 14 FC FF FF 01 0F 85 6B 02 00 00
+
+. 0 3A96B258 25
+. 83 BD CC FD FF FF FF 8D 85 E4 FD FF FF 89 85 B8 FD FF FF 0F 85 BF FD FF FF
+
+. 0 3A96CAC0 9
+. 89 C2 8B 40 0C 85 C0 75 F7
+
+. 0 3A9699C5 23
+. 8B 46 08 8B 4D 84 89 81 44 01 00 00 83 C6 20 3B B5 54 FF FF FF 72 AF
+
+. 0 3A969975 22
+. 8D 74 26 00 8D BC 27 00 00 00 00 83 C6 20 3B B5 54 FF FF FF 73 51
+
+. 0 3A97571F 11
+. 89 F9 F7 D9 83 E1 03 29 CA F3 AA
+
+. 0 3A97572A 41
+. 83 EA 20 8B 0F 90 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD
+
+. 0 3A975730 35
+. 8B 4F 1C 83 EA 20 89 47 00 89 47 04 89 47 08 89 47 0C 89 47 10 89 47 14 89 47 18 89 47 1C 8D 7F 20 7D DD
+
+. 0 3A975753 8
+. 8D 4A 20 C1 E9 02 F3 AB
+
+. 0 3A969FDC 66
+. 8B 45 94 8B 55 98 8B 4D 84 89 81 BC 01 00 00 89 91 C0 01 00 00 8B 45 EC 8B 55 F0 89 81 C4 01 00 00 8B 83 00 FC FF FF 89 91 C8 01 00 00 F7 D0 21 85 68 FF FF FF F6 85 68 FF FF FF 01 0F 85 72 03 00 00
+
+. 0 3A96FEDE 11
+. 89 46 14 83 EC 0C 8B 75 FC C9 C3
+
+. 0 3A970246 52
+. 8B 45 E4 8B 95 18 FF FF FF 8B 8D 24 FF FF FF 89 02 8B 85 20 FF FF FF C7 01 00 00 00 00 C7 00 00 00 00 00 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C2 04 00
+
+. 0 3A96F3A4 14
+. 8B 55 C8 83 EC 04 85 D2 0F 85 44 02 00 00
+
+. 0 3A96F3B2 12
+. 8B 55 E0 F6 82 74 01 00 00 60 75 37
+
+. 0 3A96F3BE 62
+. 8B 4D B0 83 EC 1C 8D 44 24 1F 83 E0 F0 C7 00 00 00 00 00 C7 40 08 00 00 00 00 89 50 04 89 41 08 89 45 B0 0F B6 82 74 01 00 00 FF 45 EC 24 9F 0C 20 88 82 74 01 00 00 8B 7D 9C 85 FF 74 0D
+
+. 0 3A96F409 19
+. 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00
+
+. 0 3A975265 5
+. 40 38 D5 74 D3
+
+. 0 3A9699A2 11
+. 3D 51 E5 74 64 0F 87 5A 02 00 00
+
+. 0 3A9699AD 5
+. 83 F8 07 75 CE
+
+. 0 3A969B31 14
+. 8B 8D 40 FF FF FF 39 8D 38 FF FF FF 76 3C
+
+. 0 3A96A171 61
+. 31 C0 89 44 24 14 B8 FF FF FF FF 89 44 24 10 B8 32 00 00 00 89 44 24 0C 8B 47 14 89 44 24 08 8B 95 38 FF FF FF 29 95 3C FF FF FF 8B 8D 3C FF FF FF 89 14 24 89 4C 24 04 E8 A2 AA 00 00
+
+. 0 3A96A1AE 7
+. 40 0F 85 D8 F9 FF FF
+
+. 0 3A96F425 24
+. 81 FA FD FF FF 7F 0F 94 C0 81 FA FF FF FF 7F 0F 94 C2 09 D0 A8 01 74 CC
+
+. 0 3A96F523 11
+. 8B 45 9C 85 C0 0F 84 9F 00 00 00
+
+. 0 3A96F5CD 9
+. 8B 4D B4 8B 01 85 C0 74 13
+
+. 0 3A96F5D6 13
+. 8B 75 B4 8B 76 08 85 F6 89 75 B4 74 22
+
+. 0 3A96F5E3 6
+. 8B 06 85 C0 75 ED
+
+. 0 3A96F5E9 11
+. 8B 45 B4 85 C0 0F 85 A7 FC FF FF
+
+. 0 3A96F2DF 15
+. 8B 75 A0 0F B7 86 4E 01 00 00 66 85 C0 74 1B
+
+. 0 3A96F2EE 37
+. 0F B7 C0 8D 04 85 12 00 00 00 25 FC FF 0F 00 29 C4 8D 44 24 1F 83 E0 F0 89 45 9C 8B 55 A0 8B 42 1C 85 C0 75 18
+
+. 0 3A96FECD 17
+. 89 C1 83 E1 03 89 0C 24 8B 06 31 C9 E8 62 AF FF FF
+
+. 0 3A970BE0 10
+. 83 C4 08 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96AFA7 12
+. 89 F0 8D 65 F4 5B 5E 5F 5D C2 0C 00
+
+. 0 3A96F3F5 7
+. 8B 7D 9C 85 FF 74 0D
+
+. 0 3A96F3FC 32
+. 8B 75 98 8B 45 9C 89 14 B0 46 89 75 98 83 45 8C 08 8B 75 8C 8B 06 85 C0 89 C2 0F 84 07 01 00 00
+
+. 0 3A96F52E 32
+. 8B 45 98 8B 55 9C C7 04 82 00 00 00 00 40 89 45 98 8D 04 C5 04 00 00 00 89 04 24 E8 CE 62 FF FF
+
+. 0 3A96F54E 19
+. 8B 4D A0 85 C0 89 C2 89 81 D4 01 00 00 0F 84 48 05 00 00
+
+. 0 3A96F561 22
+. 8B 4D 98 8D 7A 04 8B 45 A0 8B 75 9C C1 E1 02 83 F9 07 89 02 76 16
+
+. 0 3A96F577 8
+. F7 C7 04 00 00 00 74 0E
+
+. 0 3A96F57F 20
+. 8B 06 8D 7A 08 83 C6 04 83 E9 04 89 42 04 FC C1 E9 02 F3 A5
+
+. 0 3A96F591 2
+. F3 A5
+
+. 0 3A96F593 28
+. 8B 55 A0 8B 45 98 8B B2 D4 01 00 00 C1 E0 02 83 F8 07 89 C2 8D 0C 06 8D 79 04 76 16
+
+. 0 3A96F5AF 8
+. F7 C7 04 00 00 00 74 0E
+
+. 0 3A96F5B7 22
+. 8B 06 8D 79 08 83 C6 04 83 EA 04 89 41 04 FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F5CB 2
+. F3 A5
+
+. 0 3A96AFE3 9
+. 89 FA 89 F0 E8 F4 DD FF FF
+
+. 0 3A968DE0 19
+. 55 89 E5 57 89 D7 56 53 83 EC 14 8B 70 14 E8 38 CB 00 00
+
+. 0 3A968DF3 17
+. 81 C3 79 F8 00 00 C7 45 F0 00 00 00 00 85 F6 74 28
+
+. 0 3A968E04 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 06 89 3C 24 89 44 24 04 E8 62 C4 00 00
+
+. 0 3A968E1E 4
+. 85 C0 74 55
+
+. 0 3A968E22 10
+. 89 75 F0 8B 76 04 85 F6 75 E4
+
+. 0 3A968E2C 8
+. 89 3C 24 E8 9C C4 00 00
+
+. 0 3A968E34 17
+. 8D 50 01 83 C0 0D 89 55 EC 89 04 24 E8 D7 C9 FF FF
+
+. 0 3A968E45 6
+. 85 C0 89 C6 74 34
+
+. 0 3A968E4B 22
+. 8B 55 EC 8D 40 0C 89 7C 24 04 89 04 24 89 54 24 08 E8 0F CA 00 00
+
+. 0 3A968E61 30
+. 89 06 8B 45 F0 C7 46 04 00 00 00 00 C7 46 08 00 00 00 00 89 70 04 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96AFEC 9
+. 80 8E 75 01 00 00 01 EB B2
+
+. 0 3A970BC0 14
+. 8B 06 89 3C 24 89 44 24 04 E8 B2 46 00 00
+
+. 0 3A96F5C5 8
+. FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F313 10
+. 8B 82 E8 00 00 00 85 C0 75 0E
+
+. 0 3A96F31D 14
+. 8B 82 E0 00 00 00 85 C0 0F 84 F8 01 00 00
+
+. 0 3A96F605 25
+. 8B 83 48 01 00 00 8B 7D A8 85 C0 0F 94 C0 85 FF 0F 95 C2 21 D0 A8 01 74 09
+
+. 0 3A96F61E 22
+. 8B 45 A8 89 83 48 01 00 00 8B 4D C0 8B 91 D4 01 00 00 85 D2 74 11
+
+. 0 3A96F645 18
+. 8B 45 EC 8D 04 C5 04 00 00 00 89 04 24 E8 C5 61 FF FF
+
+. 0 3A96F657 19
+. 8B 75 C0 85 C0 89 C1 89 86 D4 01 00 00 0F 84 A4 05 00 00
+
+. 0 3A96F66A 39
+. 8B 45 EC 8B 55 C0 8D 4C 81 04 89 8A 50 01 00 00 8B 4D F0 89 82 54 01 00 00 C7 45 EC 00 00 00 00 85 C9 89 4D B4 74 3E
+
+. 0 3A96F691 11
+. 8B 75 08 85 F6 0F 85 28 04 00 00
+
+. 0 3A96F69C 51
+. 8B 55 B4 8B 4A 04 8B 75 C0 8B 45 EC 8B 96 50 01 00 00 89 0C 82 40 89 45 EC 8B 45 B4 8B 48 04 80 A1 74 01 00 00 9F 8B 55 B4 8B 52 08 89 55 B4 85 D2 75 C2
+
+. 0 3A96F6CF 13
+. F6 83 15 FC FF FF 02 0F 85 20 06 00 00
+
+. 0 3A96F6DC 45
+. 8B 55 C0 8B 45 EC 8B 92 50 01 00 00 89 85 78 FF FF FF 89 95 74 FF FF FF C7 45 AC 00 00 00 00 8B 55 C0 8B 8A DC 01 00 00 39 4D AC 73 49
+
+. 0 3A96F752 29
+. 8B 95 78 FF FF FF 8B 45 C0 8B B5 74 FF FF FF C1 E2 02 83 FA 07 8B B8 D4 01 00 00 76 15
+
+. 0 3A96F76F 8
+. F7 C7 04 00 00 00 74 0D
+
+. 0 3A96F784 8
+. FC 89 D1 C1 E9 02 F3 A5
+
+. 0 3A96F78A 2
+. F3 A5
+
+. 0 3A96F78C 25
+. C7 45 AC 01 00 00 00 8B 55 EC 39 55 AC 89 95 78 FF FF FF 0F 83 ED 04 00 00
+
+. 0 3A96F7A5 40
+. 8B 75 C0 8B 8E D4 01 00 00 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11
+
+. 0 3A96F7DE 15
+. 8D 7A 01 3B BD 78 FF FF FF 0F 83 B0 00 00 00
+
+. 0 3A96F7ED 39
+. C1 E2 02 89 95 6C FF FF FF F7 DA 89 95 68 FF FF FF 89 F6 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C
+
+. 0 3A96F814 6
+. 8B 02 85 C0 74 76
+
+. 0 3A96F81A 14
+. 03 B5 68 FF FF FF 89 B5 70 FF FF FF EB 06
+
+. 0 3A96F82E 11
+. 83 C2 04 3B 85 7C FF FF FF 75 EF
+
+. 0 3A96F828 6
+. 8B 02 85 C0 74 62
+
+. 0 3A96F890 13
+. 47 3B BD 78 FF FF FF 0F 82 63 FF FF FF
+
+. 0 3A96F800 20
+. 8D 34 BD 00 00 00 00 8B 04 31 8B 90 D4 01 00 00 85 D2 74 7C
+
+. 0 3A96F89D 18
+. FF 45 AC 8B 85 78 FF FF FF 39 45 AC 0F 82 FF FE FF FF
+
+. 0 3A96F7AE 31
+. 8B 55 C0 8B 75 AC 8B 82 50 01 00 00 BA 01 00 00 00 8B 04 B0 39 41 04 89 85 7C FF FF FF 74 11
+
+. 0 3A96F7CD 17
+. 8D 41 04 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2
+
+. 0 3A96F7D0 14
+. 83 C0 04 8B B5 7C FF FF FF 42 39 30 75 F2
+
+. 0 3A96F8AF 5
+. E9 E7 03 00 00
+
+. 0 3A96FC9B 24
+. 8B B5 78 FF FF FF C7 04 B1 00 00 00 00 8B 75 A4 85 F6 0F 85 A9 01 00 00
+
+. 0 3A96FCB3 10
+. 8D 65 F4 5B 5E 5F 5D C2 08 00
+
+. 0 3A967090 25
+. 8B 8B 94 F9 FF FF 83 EC 08 8B 91 54 01 00 00 89 95 04 FF FF FF 85 D2 74 3B
+
+. 0 3A9670A9 59
+. FF 8D 04 FF FF FF 8B 81 50 01 00 00 8B 8D 04 FF FF FF 8B 04 88 80 88 74 01 00 00 10 8B 8B 94 F9 FF FF 8B B5 04 FF FF FF 8B 81 50 01 00 00 8B 04 B0 FF 80 70 01 00 00 85 F6 75 C5
+
+. 0 3A9670E4 25
+. 8B 83 F0 F9 FF FF 8B 93 F4 F9 FF FF 89 42 0C 8B 83 F0 F9 FF FF 85 C0 74 03
+
+. 0 3A9670FD 16
+. 89 50 10 83 BB 54 FB FF FF 01 0F 86 52 0A 00 00
+
+. 0 3A96710D 37
+. 8B 8B 94 F9 FF FF B8 01 00 00 00 8B 95 D0 FE FF FF 89 85 04 FF FF FF 8B 81 50 01 00 00 83 C2 50 39 50 04 74 11
+
+. 0 3A967132 17
+. FF 85 04 FF FF FF 8B B5 04 FF FF FF 39 14 B0 75 EF
+
+. 0 3A967143 42
+. 8B 81 50 01 00 00 8B B5 04 FF FF FF 8B BD 50 FF FF FF 89 85 A8 FE FF FF 8B 54 B0 FC 85 FF 89 93 F4 F9 FF FF 0F 85 D8 11 00 00
+
+. 0 3A96716D 13
+. 89 F0 40 31 F6 3B 81 54 01 00 00 73 10
+
+. 0 3A96718A 20
+. 89 B3 F0 F9 FF FF 8B 85 C4 FE FF FF 85 C0 0F 84 9C 11 00 00
+
+. 0 3A96833A 11
+. 8B 93 F4 F9 FF FF E9 6E EE FF FF
+
+. 0 3A9671B3 22
+. 8B 8D D0 FE FF FF 83 C1 50 89 4A 0C 8B 83 F0 F9 FF FF 85 C0 74 03
+
+. 0 3A9671CC 50
+. 31 C0 85 FF 0F 94 C0 89 85 24 FF FF FF 31 C0 83 FF 03 0F 94 C0 8D 8D 24 FF FF FF 8D 93 44 02 FF FF 89 85 28 FF FF FF 8D 83 B4 D9 FE FF E8 62 8F 00 00
+
+. 0 3A970160 17
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 E8 BA 57 00 00
+
+. 0 3A970171 24
+. 81 C3 FB 84 00 00 89 7D FC 89 55 EC 89 4D E8 89 45 F0 FF 93 E0 F9 FF FF
+
+. 0 3A970189 37
+. 8B 38 89 C6 8B 83 E8 00 00 00 C7 06 00 00 00 00 89 45 E4 8B 45 F0 89 83 E8 00 00 00 8B 45 E8 89 04 24 FF 55 EC
+
+. 0 3A9688B0 25
+. 55 BA 01 00 00 00 89 E5 83 EC 0C 89 75 FC 8B 75 08 89 5D F8 E8 62 D0 00 00
+
+. 0 3A9688C9 20
+. 81 C3 A3 FD 00 00 8B 4E 04 8B 83 94 F9 FF FF E8 83 8F 00 00
+
+. 0 3A971860 18
+. 55 89 E5 57 31 FF 56 89 C6 53 83 EC 0C E8 B9 40 00 00
+
+. 0 3A971872 16
+. 81 C3 FA 6D 00 00 89 55 F0 85 C0 89 4D EC 75 0E
+
+. 0 3A971890 16
+. F6 86 75 01 00 00 02 C7 45 E8 00 00 00 00 75 E4
+
+. 0 3A9718A0 13
+. 8B 4D EC 89 F0 8B 55 F0 E8 F3 FB FF FF
+
+. 0 3A9714A0 23
+. 55 89 E5 57 56 53 81 EC 84 00 00 00 89 45 B0 8B 40 2C E8 74 44 00 00
+
+. 0 3A9714B7 43
+. 81 C3 B5 71 00 00 89 55 AC 31 D2 85 C0 89 4D A8 C7 45 A4 00 00 00 00 C7 45 94 00 00 00 00 C7 45 90 00 00 00 00 0F 84 4E 01 00 00
+
+. 0 3A9714E2 38
+. 8B 40 04 8B 4D B0 89 45 A0 8B 45 B0 8B 89 A4 00 00 00 8B 80 AC 00 00 00 85 C9 89 4D 9C 89 45 98 0F 84 EA 00 00 00
+
+. 0 3A971508 23
+. 8B 51 04 8B 4D B0 8B 09 01 CA 66 83 3A 01 89 55 8C 0F 85 77 01 00 00
+
+. 0 3A97151F 22
+. 90 8B 55 8C 8B 7D A0 8B B3 94 F9 FF FF 8B 42 04 01 C7 85 F6 74 22
+
+. 0 3A971535 20
+. 8D 74 26 00 8D BC 27 00 00 00 00 89 F2 89 F8 E8 37 F6 FF FF
+
+. 0 3A971549 7
+. 89 75 88 85 C0 75 1F
+
+. 0 3A971550 7
+. 8B 76 0C 85 F6 75 E9
+
+. 0 3A971540 9
+. 89 F2 89 F8 E8 37 F6 FF FF
+
+. 0 3A97156F 11
+. 8B 45 A8 85 C0 0F 85 07 01 00 00
+
+. 0 3A97157A 29
+. 8B 75 8C 8B 46 08 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08
+
+. 0 3A971597 41
+. 8B 83 38 00 00 00 8B 38 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF
+
+. 0 3A971120 30
+. 55 89 E5 57 89 D7 56 31 F6 53 81 EC B4 00 00 00 8B 55 08 89 85 60 FF FF FF E8 ED 47 00 00
+
+. 0 3A97113E 43
+. 81 C3 2E 75 00 00 89 8D 5C FF FF FF 8B 42 2C F6 83 14 FC FF FF 10 8B 40 04 89 B5 54 FF FF FF 89 85 58 FF FF FF 0F 85 C1 01 00 00
+
+. 0 3A971169 17
+. 8B 55 08 8B 82 AC 00 00 00 85 C0 0F 84 3F 01 00 00
+
+. 0 3A97117A 16
+. 8B 55 08 8B 32 8B 50 04 01 D6 66 83 3E 01 75 1C
+
+. 0 3A97118A 9
+. 3B 7E 08 0F 84 ED 00 00 00
+
+. 0 3A971193 11
+. 8B 46 10 85 C0 0F 84 C7 01 00 00
+
+. 0 3A97119E 8
+. 01 C6 66 83 3E 01 74 E4
+
+. 0 3A971280 35
+. 8B 56 0C 89 F0 01 D0 8B 08 8B 95 58 FF FF FF 01 CA 89 54 24 04 8B 85 5C FF FF FF 89 04 24 E8 DD 3F 00 00
+
+. 0 3A9712A3 10
+. 31 D2 85 C0 0F 85 E6 FE FF FF
+
+. 0 3A9712AD 12
+. 8D 65 F4 89 D0 5B 5E 5F 5D C2 0C 00
+
+. 0 3A9715C0 20
+. 09 45 A4 83 EC 0C 0F B7 46 06 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A9715D4 10
+. 89 45 94 8B 46 0C 85 C0 75 A2
+
+. 0 3A9715DE 10
+. 8B 4D 8C 8B 41 0C 85 C0 74 0A
+
+. 0 3A9715E8 10
+. 01 C1 89 4D 8C E9 2E FF FF FF
+
+. 0 3A971520 21
+. 8B 55 8C 8B 7D A0 8B B3 94 F9 FF FF 8B 42 04 01 C7 85 F6 74 22
+
+. 0 3A9715D7 7
+. 8B 46 0C 85 C0 75 A2
+
+. 0 3A9715F2 7
+. 8B 75 98 85 F6 74 29
+
+. 0 3A971622 11
+. 8B 45 94 85 C0 0F 85 1D 01 00 00
+
+. 0 3A97174A 21
+. BF 10 00 00 00 89 7C 24 04 8B 75 94 46 89 34 24 E8 CD 40 FF FF
+
+. 0 3A97175F 28
+. C7 45 90 0C 00 00 00 8B 55 B0 85 C0 89 C7 8D 8B 7C EB FF FF 89 82 7C 01 00 00 74 AF
+
+. 0 3A97177B 28
+. 89 B2 78 01 00 00 8B 82 DC 00 00 00 8B 75 9C 8B 40 04 85 F6 89 82 8C 01 00 00 74 63
+
+. 0 3A971797 23
+. 8B 4D 9C 8B 02 8B 49 04 01 C1 89 4D 84 8B 75 84 8B 4E 08 01 CE EB 04
+
+. 0 3A9717B2 62
+. 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0
+
+. 0 3A9717F0 10
+. 8B 4D 84 8B 41 0C 85 C0 75 A5
+
+. 0 3A97179F 15
+. 01 C1 89 4D 84 8B 75 84 8B 4E 08 01 CE EB 04
+
+. 0 3A9717FA 11
+. 8B 75 98 85 F6 0F 84 28 FE FF FF
+
+. 0 3A97162D 13
+. 8B 55 A4 8D 65 F4 89 D0 5B 5E 5F 5D C3
+
+. 0 3A9718AD 4
+. 85 C0 74 D3
+
+. 0 3A971884 12
+. 8B 55 E8 8B 76 0C 09 D7 85 F6 74 40
+
+. 0 3A97159F 33
+. 0F B7 46 04 83 E0 02 89 44 24 08 8B 45 AC 89 44 24 04 8B 55 88 89 F8 89 14 24 8B 16 E8 60 FB FF FF
+
+. 0 3A971580 23
+. 01 C6 8B 55 B0 8B 7E 08 8B 4D A0 8B 42 04 01 F9 89 C7 80 38 00 75 08
+
+. 0 3A9717B0 64
+. 01 C6 0F B7 56 06 8B 06 89 D1 81 E1 FF 7F 00 00 81 E2 00 80 00 00 C1 E1 04 89 44 0F 04 8B 45 A0 89 54 0F 08 8B 56 08 01 D0 89 04 0F 8B 55 84 8B 45 A0 03 42 04 89 44 0F 0C 8B 46 0C 85 C0 75 C0
+
+. 0 3A9715F9 15
+. 8B 4D 98 8B 45 B0 8B 51 04 8B 08 01 CA EB 02
+
+. 0 3A97160A 14
+. 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A97161B 7
+. 8B 42 10 85 C0 75 E6
+
+. 0 3A971608 16
+. 01 C2 0F B7 42 04 25 FF 7F 00 00 3B 45 94 76 03
+
+. 0 3A971805 15
+. 8B 45 98 8B 55 B0 8B 70 04 8B 0A 01 CE EB 02
+
+. 0 3A971816 13
+. 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A
+
+. 0 3A97184D 7
+. 8B 46 10 85 C0 75 C0
+
+. 0 3A971814 15
+. 01 C6 8B 46 0C 89 F7 01 C7 F6 46 02 01 75 2A
+
+. 0 3A971823 49
+. 0F B7 46 04 8B 55 B0 8B 8A 7C 01 00 00 8B 56 08 25 FF 7F 00 00 C1 E0 04 89 54 01 04 8B 55 A0 03 17 31 FF 89 7C 01 0C 89 14 01 8B 46 10 85 C0 75 C0
+
+. 0 3A971854 5
+. E9 D4 FD FF FF
+
+. 0 3A971618 10
+. 89 45 94 8B 42 10 85 C0 75 E6
+
+. 0 3A9718D0 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 0 3A9688DD 4
+. 85 C0 74 06
+
+. 0 3A9688E7 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3A9701AE 24
+. 89 3E 8B 45 E4 89 83 E8 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9671FE 14
+. 8B BD 50 FF FF FF 85 FF 0F 85 96 0F 00 00
+
+. 0 3A96720C 20
+. 8B 8B 94 F9 FF FF 8B 91 34 01 00 00 85 D2 0F 84 F3 00 00 00
+
+. 0 3A967313 11
+. 8B 83 E4 F9 FF FF E8 12 98 00 00
+
+. 0 3A970B30 5
+. E8 F2 4D 00 00
+
+. 0 3A970B35 21
+. 81 C1 37 7B 00 00 55 8B 91 28 00 00 00 89 E5 83 7A 08 00 75 1B
+
+. 0 3A970B4A 31
+. 89 42 10 8B 81 94 F9 FF FF C7 02 01 00 00 00 89 42 04 8D 81 04 85 FF FF 89 42 08 5D 89 D0 C3
+
+. 0 3A96731E 19
+. 89 85 78 FE FF FF 8B 83 94 F9 FF FF 8B 40 6C 85 C0 74 09
+
+. 0 3A967331 19
+. 8B 95 78 FE FF FF 89 50 04 8B 83 50 FA FF FF 85 C0 74 09
+
+. 0 3A96734D 63
+. 80 BD 03 FF FF FF 00 8B B3 94 F9 FF FF 8D 86 50 01 00 00 89 83 A4 F9 FF FF 8B 96 54 01 00 00 89 F1 89 83 9C F9 FF FF 8B 86 50 01 00 00 89 93 2C FC FF FF 89 83 28 FC FF FF 0F 84 84 05 00 00
+
+. 0 3A967910 31
+. 31 C0 83 BB 7C FD FF FF 00 0F 95 C0 09 83 3C FC FF FF 89 85 74 FE FF FF 8B 46 0C 85 C0 74 5D
+
+. 0 3A96792F 9
+. 89 C6 8B 40 0C 85 C0 75 F7
+
+. 0 3A967938 2
+. EB 52
+
+. 0 3A96798C 10
+. 8B 46 14 8B 40 04 85 C0 74 B8
+
+. 0 3A967996 2
+. EB A8
+
+. 0 3A967940 14
+. C7 40 08 01 00 00 00 8B 40 04 85 C0 75 F2
+
+. 0 3A96794E 13
+. 8B BD D0 FE FF FF 83 C7 50 39 FE 74 26
+
+. 0 3A967981 11
+. 8B 76 10 85 F6 0F 84 96 07 00 00
+
+. 0 3A96795B 38
+. 8B 85 74 FE FF FF 89 44 24 0C 8B 83 3C FC FF FF 89 44 24 08 8B 86 B0 01 00 00 89 34 24 89 44 24 04 E8 2F 65 00 00
+
+. 0 3A96DEB0 26
+. 55 89 E5 8D 45 08 57 56 53 81 EC FC 00 00 00 8B 75 08 89 45 EC E8 61 7A 00 00
+
+. 0 3A96DECA 28
+. 81 C3 A2 A7 00 00 F6 86 74 01 00 00 04 C7 45 98 00 00 00 00 89 F2 0F 85 1A 04 00 00
+
+. 0 3A96DEE6 7
+. 8B 7D 14 85 FF 75 0E
+
+. 0 3A96DEED 27
+. 31 C0 83 7E 78 00 0F 94 C0 F7 D8 21 45 10 F6 83 14 FC FF FF 20 0F 85 04 0A 00 00
+
+. 0 3A96DF08 11
+. 8B 4E 70 85 C9 0F 85 3A 0A 00 00
+
+. 0 3A96DF13 16
+. 8B 46 2C 8B 40 04 89 45 F0 8B 46 74 85 C0 74 4D
+
+. 0 3A96DF23 7
+. 8B 7D 10 85 FF 74 46
+
+. 0 3A96DF2A 13
+. 8B 46 24 8B 50 04 8B 42 04 85 C0 74 13
+
+. 0 3A96DF4A 14
+. 89 72 04 8B 45 14 85 C0 0F 85 DF 0A 00 00
+
+. 0 3A96DF58 11
+. 8D 83 84 78 FF FF 89 42 08 EB 0D
+
+. 0 3A96DF70 35
+. C7 45 D4 00 00 00 00 8B 46 5C C7 45 DC 00 00 00 00 C7 45 D0 00 00 00 00 85 C0 C7 45 CC 00 00 00 00 74 0F
+
+. 0 3A96DF93 22
+. 8B 40 04 89 45 CC 8B 46 60 8B 40 04 89 45 D0 8B 46 68 85 C0 74 0A
+
+. 0 3A96DFA9 10
+. 83 78 04 11 0F 84 73 02 00 00
+
+. 0 3A96E226 13
+. 8B 7D 10 8B 46 74 85 FF 8B 48 04 75 0C
+
+. 0 3A96E23F 23
+. 89 4D D8 8B 46 20 8B 55 10 8B 40 04 89 55 E0 89 45 DC E9 5D FD FF FF
+
+. 0 3A96DFB3 51
+. C7 45 90 00 00 00 00 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F
+
+. 0 3A96E035 30
+. 8B 46 30 31 D2 8B 40 04 89 95 78 FF FF FF 89 85 7C FF FF FF 8B 86 B4 00 00 00 85 C0 74 09
+
+. 0 3A96E053 24
+. 8B 40 04 89 85 78 FF FF FF 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06
+
+. 0 3A96E06B 22
+. 8B 85 78 FF FF FF 8D 8B E4 F9 FF FF 8D 04 C2 39 CE 89 45 88 74 2F
+
+. 0 3A96E081 7
+. 8B 7D 80 85 FF 74 28
+
+. 0 3A96E088 4
+. 39 C2 73 24
+
+. 0 3A96E08C 24
+. 8D 74 26 00 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC
+
+. 0 3A96E090 20
+. 8B 0A 83 C2 08 8B 45 80 8B 7D 80 01 C8 01 38 3B 55 88 72 EC
+
+. 0 3A96E0A4 26
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00
+
+. 0 3A96E0BE 21
+. 8B 40 04 89 85 74 FF FF FF 8B 45 84 39 45 88 0F 83 5D 02 00 00
+
+. 0 3A96E0D3 110
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00
+
+. 0 3A96E141 14
+. 8B 85 6C FF FF FF 85 C0 0F 84 D1 01 00 00
+
+. 0 3A96E14F 21
+. 89 BD 68 FF FF FF 0F B6 47 0C C0 E8 04 84 C0 0F 84 DC 05 00 00
+
+. 0 3A96E164 12
+. 3B BE FC 01 00 00 0F 84 D0 0B 00 00
+
+. 0 3A96E170 25
+. 31 D2 83 BD 6C FF FF FF 07 0F 94 C2 83 BD 6C FF FF FF 05 0F 84 7B 07 00 00
+
+. 0 3A96E189 29
+. 89 96 00 02 00 00 8B BD 68 FF FF FF 89 BE FC 01 00 00 31 FF 85 C9 BE 01 00 00 00 74 0B
+
+. 0 3A96E1A6 7
+. 8B 41 04 85 C0 74 04
+
+. 0 3A96E1AD 49
+. 89 CF 31 F6 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF
+
+. 0 3A96C0E0 19
+. 55 89 E5 57 31 FF 56 53 81 EC A0 00 00 00 E8 38 98 00 00
+
+. 0 3A96C0F3 20
+. 81 C3 79 C5 00 00 89 45 A8 89 55 A4 89 4D A0 E8 E9 FB FF FF
+
+. 0 3A96BCF0 14
+. 55 89 C1 31 D2 0F B6 00 89 E5 84 C0 74 72
+
+. 0 3A96BCFE 11
+. 41 0F B6 D0 0F B6 01 84 C0 74 67
+
+. 0 3A96BD09 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 57
+
+. 0 3A96BD19 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 47
+
+. 0 3A96BD29 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 37
+
+. 0 3A96BD39 16
+. C1 E2 04 0F B6 C0 41 01 C2 0F B6 01 84 C0 74 27
+
+. 0 3A96BD49 37
+. 8D B4 26 00 00 00 00 C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2
+
+. 0 3A96BD50 30
+. C1 E2 04 0F B6 C0 01 C2 89 D0 41 25 00 00 00 F0 31 C2 C1 E8 18 31 C2 0F B6 01 84 C0 75 E2
+
+. 0 3A96BD6E 6
+. 89 F6 5D 89 D0 C3
+
+. 0 3A96C107 37
+. 89 45 9C 8B 45 18 8B 75 08 FF 83 D4 F9 FF FF 85 C0 C7 45 BC 00 00 00 00 C7 45 C0 00 00 00 00 0F 85 90 02 00 00
+
+. 0 3A96C12C 11
+. 8B 45 08 8B 10 85 D2 89 F8 74 5E
+
+. 0 3A96C137 69
+. 8D 55 BC 89 55 84 8D 76 00 8B 4D 10 89 4C 24 18 8B 55 18 89 54 24 14 8B 4D 14 89 4C 24 10 8B 55 0C 89 44 24 08 89 54 24 0C 8B 06 89 44 24 04 8B 4D 84 89 0C 24 8B 45 A0 8B 55 9C 8B 08 8B 45 A8 E8 34 FC FF FF
+
+. 0 3A96BDB0 23
+. 55 89 E5 57 56 53 83 EC 3C 89 45 F0 8B 45 0C 89 55 EC E8 64 9B 00 00
+
+. 0 3A96BDC7 22
+. 81 C3 A5 C8 00 00 89 4D E8 8B 10 8B 40 04 89 55 E4 89 45 E0 EB 12
+
+. 0 3A96BDEF 48
+. C7 45 CC 00 00 00 00 8B 45 E4 8B 4D 10 C7 45 C8 00 00 00 00 8B 55 1C 8B 0C 88 8B 45 1C 89 4D DC 85 C0 0F 95 C0 39 D1 0F 94 C2 21 D0 A8 01 75 C1
+
+. 0 3A96BE1F 6
+. F6 45 20 02 74 0B
+
+. 0 3A96BE30 13
+. F6 83 14 FC FF FF 08 0F 85 FB 01 00 00
+
+. 0 3A96BE3D 56
+. 8B 55 DC 8B 42 30 8B 8A 8C 01 00 00 8B 40 04 89 45 D8 8B 42 2C 8B 40 04 89 4D D0 89 D1 31 D2 89 45 D4 8B 45 EC F7 B1 64 01 00 00 8B 81 68 01 00 00 8B 34 90 85 F6 75 1F
+
+. 0 3A96BE94 17
+. 8B 7D D8 89 F0 C1 E0 04 01 C7 8B 47 04 85 C0 74 DB
+
+. 0 3A96BE80 20
+. 8B 55 DC 8B 82 6C 01 00 00 8B 34 B0 85 F6 0F 84 DC 00 00 00
+
+. 0 3A96BF70 9
+. 83 7D CC 01 8B 7D C8 74 02
+
+. 0 3A96BF79 6
+. 31 FF 85 FF 74 18
+
+. 0 3A96BF97 23
+. 8B 55 14 85 F6 0F 94 C0 85 D2 0F 95 C2 21 D0 A8 01 0F 84 32 FE FF FF
+
+. 0 3A96BFAE 14
+. 8B 55 14 8B 42 0C 85 C0 0F 84 24 FE FF FF
+
+. 0 3A96BDE0 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 83 ED 01 00 00
+
+. 0 3A96BEA5 7
+. 66 83 7F 0E 00 75 06
+
+. 0 3A96BEB2 12
+. 0F B6 47 0C 83 E0 0F 83 F8 02 7F C2
+
+. 0 3A96BEBE 5
+. 3B 7D E8 74 1A
+
+. 0 3A96BEC3 22
+. 8B 0F 8B 45 D4 8B 55 F0 01 C8 89 54 24 04 89 04 24 E8 A7 93 00 00
+
+. 0 3A96BED9 4
+. 85 C0 75 A3
+
+. 0 3A96BE75 5
+. E9 FF 00 00 00
+
+. 0 3A96BEDD 11
+. 8B 55 14 85 D2 0F 84 81 01 00 00
+
+. 0 3A96BEE8 11
+. 8B 45 D0 85 C0 0F 84 8C 00 00 00
+
+. 0 3A96BEF3 47
+. 8B 4D D0 8B 45 DC 0F B7 0C 71 8B 90 7C 01 00 00 81 E1 FF 7F 00 00 C1 E1 04 89 4D C4 8B 4D 14 8B 41 04 8B 4D C4 39 44 0A 04 0F 84 C6 00 00 00
+
+. 0 3A96BFE8 23
+. 8B 4D 14 8B 01 89 44 24 04 8B 4D C4 8B 04 0A 89 04 24 E8 81 92 00 00
+
+. 0 3A96BFFF 8
+. 85 C0 0F 85 1B FF FF FF
+
+. 0 3A96C007 5
+. E9 73 FF FF FF
+
+. 0 3A96BF7F 19
+. 0F B6 47 0C C0 E8 04 0F B6 C0 83 F8 01 0F 84 8C 00 00 00
+
+. 0 3A96C01E 26
+. 8B 55 08 8B 4D DC 89 3A 89 4A 04 BA 01 00 00 00 83 C4 3C 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96C17C 6
+. 85 C0 89 C2 7F 13
+
+. 0 3A96C195 11
+. 8B 75 BC 85 F6 0F 84 3E 02 00 00
+
+. 0 3A96C1A0 16
+. C7 45 90 00 00 00 00 8B 4D A0 8B 11 85 D2 74 0C
+
+. 0 3A96C1B0 12
+. 0F B6 42 0D 83 E0 03 83 F8 03 74 48
+
+. 0 3A96C1BC 29
+. 8D 55 BC 89 55 84 8B 4D 84 8B 71 04 0F B6 86 74 01 00 00 24 03 3C 02 0F 84 2E 04 00 00
+
+. 0 3A96C1D9 17
+. 8B 83 14 FC FF FF A9 04 02 00 00 0F 85 28 02 00 00
+
+. 0 3A96C1EA 26
+. 8B 45 BC 8B 4D A0 8B 55 84 89 01 8B 42 04 8B 00 8D 65 F4 5B 5E 5F 5D C2 14 00
+
+. 0 3A96E1DE 33
+. 89 85 64 FF FF FF 8B 55 A8 83 EC 14 8B 75 08 89 D7 89 96 08 02 00 00 89 86 04 02 00 00 85 FF 74 09
+
+. 0 3A96E1FF 22
+. 8B 57 04 01 95 64 FF FF FF 83 BD 6C FF FF FF 07 0F 87 56 08 00 00
+
+. 0 3A96E215 17
+. 8B 8D 6C FF FF FF 8B 84 8B B0 D7 FF FF 01 D8 FF E0
+
+. 0 3A96EADD 19
+. 8B 95 70 FF FF FF 8B 8D 64 FF FF FF 01 0A E9 30 F8 FF FF
+
+. 0 3A96E320 16
+. 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF
+
+. 0 3A96E0E0 97
+. 8B 55 88 8B BD 74 FF FF FF 8B 52 04 89 95 0C FF FF FF 89 D0 C1 E8 08 0F B7 0C 47 8B BD 7C FF FF FF C1 E0 04 01 C7 8B 45 8C 81 E1 FF 7F 00 00 89 7D A8 C1 E1 04 8B 90 7C 01 00 00 8B 45 80 01 D1 8B 55 88 03 02 0F B6 95 0C FF FF FF 89 85 70 FF FF FF 83 FA 08 89 95 6C FF FF FF 0F 84 A4 0B 00 00
+
+. 0 3A96ED40 31
+. 31 C0 83 BD 6C FF FF FF 07 8B 96 00 02 00 00 0F 94 C0 83 BD 6C FF FF FF 05 0F 84 5F 01 00 00
+
+. 0 3A96ED5F 8
+. 39 D0 0F 85 09 F4 FF FF
+
+. 0 3A96ED67 26
+. FF 83 D8 F9 FF FF 8B BE 08 02 00 00 8B 86 04 02 00 00 89 7D A8 E9 C1 F9 FF FF
+
+. 0 3A96E742 11
+. 89 85 64 FF FF FF E9 AE FA FF FF
+
+. 0 3A96E1FB 4
+. 85 FF 74 09
+
+. 0 3A96E308 40
+. 8B 85 64 FF FF FF 8B BD 70 FF FF FF 89 07 8D 76 00 8D BC 27 00 00 00 00 83 45 88 08 8B 7D 84 39 7D 88 0F 82 B0 FD FF FF
+
+. 0 3A96BF92 5
+. 83 F8 02 74 79
+
+. 0 3A96C010 14
+. 8B 83 48 FC FF FF 85 C0 0F 85 A2 00 00 00
+
+. 0 3A96BFBC 8
+. 8B 55 DC E8 BC 4B 00 00
+
+. 0 3A96BFC4 9
+. 85 C0 BA FF FF FF FF 75 61
+
+. 0 3A96BFCD 15
+. FF 45 10 8B 4D E0 39 4D 10 0F 82 13 FE FF FF
+
+. 0 3A96E1B1 45
+. 8B 45 A8 8B 4D F0 8B 00 89 74 24 0C 89 7C 24 04 01 C8 31 C9 89 4C 24 10 8D 4D A8 89 54 24 08 8B 55 0C 89 14 24 8B 55 08 E8 02 DF FF FF
+
+. 0 3A96BFDC 12
+. 31 D2 83 C4 3C 89 D0 5B 5E 5F 5D C3
+
+. 0 3A96C182 8
+. 85 C0 0F 88 46 01 00 00
+
+. 0 3A96C18A 11
+. 83 C6 04 31 C0 8B 16 85 D2 75 AB
+
+. 0 3A96C3DE 9
+. 8B 55 A0 8B 02 85 C0 74 0B
+
+. 0 3A96C3E7 11
+. 0F B6 40 0C C0 E8 04 3C 02 74 0B
+
+. 0 3A96C3FD 21
+. 8B 55 A0 C7 02 00 00 00 00 31 C0 8D 65 F4 5B 5E 5F 5D C2 14 00
+
+. 0 3A96E208 13
+. 83 BD 6C FF FF FF 07 0F 87 56 08 00 00
+
+. 0 3A96BD70 4
+. 5D 89 D0 C3
+
+. 0 3A96C069 11
+. 8B 45 D0 85 C0 0F 84 0B FF FF FF
+
+. 0 3A96C074 6
+. F6 45 18 02 74 33
+
+. 0 3A96C0AD 19
+. 8B 45 D0 0F B7 14 70 89 D0 25 FF 7F 00 00 83 F8 02 EB C9
+
+. 0 3A96C089 6
+. 0F 8E F0 FE FF FF
+
+. 0 3A96E330 13
+. FF 45 90 83 7D 90 01 0F 8E 7D FC FF FF
+
+. 0 3A96DFBA 44
+. 89 75 8C 8B 4D 90 8D 04 49 8D 44 85 F4 8D 50 D8 8B 48 D8 8B 7A 04 89 C8 01 F8 89 45 84 8B 06 89 4D 88 89 45 80 8B 42 08 85 C0 74 4F
+
+. 0 3A96DFE6 7
+. 8B 55 84 39 D1 72 28
+
+. 0 3A96E015 19
+. 8B 7D 88 8B 4D 80 8B 07 0F B6 57 04 01 C1 83 FA 07 74 CA
+
+. 0 3A96DFF2 14
+. 8B 96 F4 01 00 00 85 D2 0F 85 CE 08 00 00
+
+. 0 3A96E000 21
+. 8B 45 80 01 01 83 45 88 08 8B 55 84 39 55 88 0F 83 1B 03 00 00
+
+. 0 3A96E33D 35
+. C7 45 B4 00 00 00 00 8B 46 34 C7 45 BC 00 00 00 00 C7 45 B0 00 00 00 00 85 C0 C7 45 AC 00 00 00 00 74 0F
+
+. 0 3A96E36F 7
+. 8B 46 68 85 C0 74 0A
+
+. 0 3A96E376 10
+. 83 78 04 07 0F 84 71 02 00 00
+
+. 0 3A96E380 91
+. 31 C0 8D BB E4 F9 FF FF 89 85 54 FF FF FF 89 BD 14 FF FF FF 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03
+
+. 0 3A96E3DE 22
+. 8B 85 4C FF FF FF 89 D1 BA AB AA AA AA F7 E2 C1 EA 03 39 FA 76 02
+
+. 0 3A96E3F6 20
+. 3B B5 14 FF FF FF 8D 04 52 8D 04 81 89 85 48 FF FF FF 74 48
+
+. 0 3A96E40A 10
+. 8B 85 40 FF FF FF 85 C0 75 0A
+
+. 0 3A96E41E 8
+. 3B 8D 48 FF FF FF 73 2C
+
+. 0 3A96E452 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 F0 02 00 00
+
+. 0 3A96E460 27
+. 8B 95 44 FF FF FF 8B 40 04 39 95 48 FF FF FF 89 85 38 FF FF FF 0F 83 35 FE FF FF
+
+. 0 3A96E2B0 19
+. FF 85 54 FF FF FF 83 BD 54 FF FF FF 01 0F 8E D1 00 00 00
+
+. 0 3A96E394 71
+. 89 B5 50 FF FF FF 8B 95 54 FF FF FF 31 FF 8B 0E 8D 04 52 8D 44 85 F4 89 8D 40 FF FF FF 8B 50 B8 8B 40 BC 89 85 4C FF FF FF 01 D0 89 85 44 FF FF FF 8B 46 30 8B 40 04 89 85 3C FF FF FF 8B 86 B8 00 00 00 85 C0 74 03
+
+. 0 3A96E2C3 11
+. 8B 45 14 85 C0 0F 85 E9 0A 00 00
+
+. 0 3A96E2CE 18
+. 80 8E 74 01 00 00 04 8B 4D 98 85 C9 0F 85 A1 0A 00 00
+
+. 0 3A96E2E0 13
+. 8B 45 08 8B 90 10 02 00 00 85 D2 74 13
+
+. 0 3A96E300 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96E05C 15
+. 89 F8 89 CA C1 E8 03 3B 85 78 FF FF FF 76 06
+
+. 0 3A96E0B0 14
+. 8B 86 DC 00 00 00 85 C0 0F 84 73 05 00 00
+
+. 0 3A96E414 10
+. 8B 86 14 01 00 00 85 C0 75 34
+
+. 0 3A968122 5
+. E8 09 A6 00 00
+
+. 0 3A972730 5
+. 55 89 E5 5D C3
+
+. 0 3A968127 10
+. 8B 83 D0 F9 FF FF 85 C0 75 62
+
+. 0 3A968131 13
+. 83 BB 54 FB FF FF 01 0F 86 86 F2 FF FF
+
+. 0 3A96813E 36
+. 31 C0 89 44 24 0C 31 C0 89 44 24 08 8B 83 94 F9 FF FF 8B 80 B0 01 00 00 89 3C 24 89 44 24 04 E8 4E 5D 00 00
+
+. 0 3A96E233 12
+. 8B 55 D0 8B 45 CC 01 D0 39 C8 74 17
+
+. 0 3A96E256 16
+. 8B 46 20 8B 48 04 01 CA 89 55 D0 E9 4D FD FF FF
+
+. 0 3A96DFED 5
+. E9 3E 03 00 00
+
+. 0 3A968162 5
+. E9 5D F2 FF FF
+
+. 0 3A9673C4 18
+. 8B 95 78 FE FF FF C7 42 0C 01 00 00 00 E8 9A 97 00 00
+
+. 0 3A970B70 5
+. 55 89 E5 5D C3
+
+. 0 3A9673D6 5
+. E8 F5 42 00 00
+
+. 0 3A96B6D0 12
+. 55 89 E5 53 83 EC 08 E8 4F A2 00 00
+
+. 0 3A96B6DC 29
+. 81 C3 90 CF 00 00 8B 8B DC 00 00 00 85 C9 0F 95 C0 83 F9 FF 0F 95 C2 21 D0 A8 01 75 07
+
+. 0 3A96B700 18
+. 89 0C 24 8B 83 E4 00 00 00 89 44 24 04 E8 7E 95 00 00
+
+. 0 3A974C90 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80
+
+. 0 3A974CA1 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3A974CAA 1
+. C3
+
+. 0 3A96B712 14
+. 31 C0 89 83 DC 00 00 00 83 C4 08 5B 5D C3
+
+. 0 3A9673DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A9730A0 17
+. 8B 85 20 FE FF FF 81 C4 04 02 00 00 5B 5E 5F 5D C3
+
+. 0 3A965ECA 11
+. 80 BB 14 FC FF FF 00 89 C7 78 5B
+
+. 0 3A965ED5 10
+. 83 C4 54 89 F8 5B 5E 5F 5D C3
+
+. 0 3A965887 7
+. 89 C7 E8 E2 FF FF FF
+
+. 0 3A965870 4
+. 8B 1C 24 C3
+
+. 0 3A96588E 39
+. 81 C3 DE 2D 01 00 8B 83 08 02 00 00 5A 8D 24 84 29 C2 52 8B 83 94 F9 FF FF 8D 74 94 08 8D 4C 24 04 56 E8 CB AD 00 00
+
+. 0 3A970680 22
+. 55 89 E5 57 89 C7 56 53 83 EC 2C 8B 80 98 00 00 00 E8 95 52 00 00
+
+. 0 3A970696 35
+. 81 C3 D6 7F 00 00 89 55 F0 8B B7 9C 00 00 00 89 45 E8 8B 83 CC F9 FF FF 89 4D EC 85 C0 0F 85 91 00 00 00
+
+. 0 3A97074A 14
+. 8B 55 08 89 14 24 8B 55 F0 E8 08 FE FF FF
+
+. 0 3A970560 29
+. 55 89 E5 83 EC 28 89 5D F4 89 75 F8 89 C6 89 7D FC 0F B6 80 74 01 00 00 E8 AE 53 00 00
+
+. 0 3A97057D 16
+. 81 C3 EF 80 00 00 89 55 F0 89 4D EC A8 08 75 33
+
+. 0 3A97058D 25
+. 88 C2 8B 46 04 80 CA 08 88 96 74 01 00 00 0F B6 08 84 C9 0F 84 98 00 00 00
+
+. 0 3A9705A6 7
+. 8B 56 48 85 D2 75 23
+
+. 0 3A9705D0 9
+. F6 83 14 FC FF FF 02 75 7C
+
+. 0 3A9705D9 4
+. 85 D2 75 42
+
+. 0 3A97061F 29
+. 8B 42 04 8B 55 08 8B 3E 89 54 24 08 8B 55 EC 01 F8 89 54 24 04 8B 55 F0 89 14 24 FF D0
+
+. 0 3A97C92C 11
+. 55 89 E5 83 EC 08 E8 89 00 00 00
+
+. 0 3A97C9C0 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 17 14 00 00 52 8B 83 2C 00 00 00 85 C0 74 02
+
+. 0 3A97C9DD 4
+. 58 5B C9 C3
+
+. 0 3A97C937 5
+. E8 24 01 00 00
+
+. 0 3A97CA60 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 77 13 00 00 50 8B 83 FC FF FF FF 85 C0 74 0A
+
+. 0 3A97CA85 5
+. 8B 5D FC C9 C3
+
+. 0 3A97C93C 5
+. E8 5F 03 00 00
+
+. 0 3A97CCA0 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 36 11 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0C
+
+. 0 3A97CCCE 4
+. 5B 5E 5D C3
+
+. 0 3A97C941 2
+. C9 C3
+
+. 0 3A97063C 2
+. EB 9F
+
+. 0 3A9705DD 7
+. 8B 56 7C 85 D2 74 DC
+
+. 0 3A9705C0 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A970758 19
+. 31 C0 89 83 CC F9 FF FF 8B 45 E8 85 C0 0F 84 59 FF FF FF
+
+. 0 3A9706C4 7
+. 31 C0 E8 65 04 00 00
+
+. 0 3A970B65 4
+. 5D 89 D0 C3
+
+. 0 3A9706CB 15
+. 89 45 E4 C7 40 0C 01 00 00 00 E8 96 04 00 00
+
+. 0 3A9706DA 13
+. 8B B7 54 01 00 00 89 F0 4E 85 C0 75 21
+
+. 0 3A970708 26
+. 8B 87 D4 01 00 00 8B 55 08 8B 04 B0 89 14 24 8B 4D EC 8B 55 F0 E8 3E FE FF FF
+
+. 0 3A9705AD 7
+. 8B 7E 7C 85 FF 75 1C
+
+. 0 3A9705B4 25
+. 8D B6 00 00 00 00 8D BF 00 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A970722 7
+. 89 F0 4E 85 C0 75 DF
+
+. 0 3A9D67C0 16
+. 55 31 C0 89 E5 83 EC 18 89 5D F4 E8 C1 FF FF FF
+
+. 0 3A9D6791 4
+. 8B 1C 24 C3
+
+. 0 3A9D67D0 28
+. 81 C3 40 C8 0F 00 89 75 F8 8B 93 A4 00 00 00 8B 75 10 89 7D FC 85 D2 8B 7D 0C 74 14
+
+. 0 3A9D67EC 6
+. 8B 12 85 D2 75 0E
+
+. 0 3A9D6800 10
+. 89 83 10 90 FF FF 85 C0 75 26
+
+. 0 3A9D680A 21
+. 8B 83 80 01 00 00 0F B7 10 8B 83 68 01 00 00 66 3B 50 38 74 11
+
+. 0 3A9D6830 42
+. 89 BB D8 28 00 00 8B 45 08 89 83 D4 28 00 00 8B 83 3C 01 00 00 89 30 89 74 24 08 89 7C 24 04 8B 45 08 89 04 24 E8 A6 45 0A 00
+
+. 0 3AA7AE00 20
+. 55 89 E5 83 EC 14 89 7D FC 8B 7D 0C 89 5D F4 E8 7D B9 F5 FF
+
+. 0 3AA7AE14 13
+. 81 C3 FC 81 05 00 89 75 F8 85 FF 74 30
+
+. 0 3AA7AE21 6
+. 8B 37 85 F6 74 2A
+
+. 0 3AA7AE27 17
+. 89 34 24 B8 2F 00 00 00 89 44 24 04 E8 A8 B7 F5 FF
+
+. 0 3A9D65E0 6
+. FF A3 40 00 00 00
+
+. 0 3A9D65E6 10
+. 68 68 00 00 00 E9 10 FF FF FF
+
+. 0 3A9D6500 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A96FEF0 16
+. 50 51 52 8B 54 24 10 8B 44 24 0C E8 60 01 00 00
+
+. 0 3A970060 22
+. 55 89 E5 57 56 89 C6 53 83 EC 24 8B 48 30 8B 40 2C E8 B5 58 00 00
+
+. 0 3A970076 59
+. 81 C3 F6 85 00 00 8B 40 04 89 45 EC 8B 46 74 8B 78 04 01 FA 8B 7A 04 8B 12 C1 EF 08 89 F8 C1 E0 04 03 41 04 8B 0E 01 CA 89 55 E8 89 45 E4 F6 40 0D 03 89 45 F0 0F 85 97 00 00 00
+
+. 0 3A9700B1 12
+. 8B 86 DC 00 00 00 31 D2 85 C0 74 25
+
+. 0 3A9700BD 95
+. 8B 40 04 0F B7 14 78 8B 86 7C 01 00 00 81 E2 FF 7F 00 00 C1 E2 04 01 C2 31 C0 83 7A 04 00 0F 95 C0 F7 D8 21 C2 8B 45 E4 BF 01 00 00 00 8D 4D F0 8B 00 01 45 EC 31 C0 89 44 24 10 B8 01 00 00 00 89 54 24 04 89 F2 89 44 24 0C 89 7C 24 08 8B 86 B0 01 00 00 89 04 24 8B 45 EC E8 C4 BF FF FF
+
+. 0 3A97011C 14
+. 8B 55 F0 83 EC 14 89 C1 31 C0 85 D2 74 07
+
+. 0 3A97012A 17
+. 89 C8 8B 4A 04 01 C8 8B B3 44 FC FF FF 85 F6 75 05
+
+. 0 3A97013B 13
+. 8B 55 E8 89 02 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A96FF00 8
+. 5A 59 87 04 24 C2 08 00
+
+. 0 3AA28860 34
+. 57 56 31 C0 8B 74 24 0C 8B 4C 24 10 88 CD 89 CA C1 E1 10 66 89 D1 F7 C6 03 00 00 00 0F 84 81 00 00 00
+
+. 0 3AA28903 15
+. 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA28912 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 CB 00 00 00
+
+. 0 3AA28921 11
+. 31 CA BF FF FE FE FE 01 D7 73 9E
+
+. 0 3AA2892C 11
+. 31 D7 81 CF FF FE FE FE 47 75 A9
+
+. 0 3AA288E0 17
+. 83 EE 04 83 EE 04 83 EE 04 F7 C2 00 00 FF 00 75 05
+
+. 0 3AA288F6 7
+. 8D 46 0C 84 F6 75 03
+
+. 0 3AA288FD 21
+. 8D 46 0D 83 C6 10 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA288F1 5
+. 8D 46 0E EB 0A
+
+. 0 3AA28900 18
+. 83 C6 10 8B 16 BF FF FE FE FE 01 D7 0F 83 DA 00 00 00
+
+. 0 3AA28937 16
+. 8B 56 04 BF FF FE FE FE 01 D7 0F 83 A2 00 00 00
+
+. 0 3AA28947 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 93 00 00 00
+
+. 0 3AA28956 15
+. 31 CA BF FF FE FE FE 01 D7 0F 83 68 FF FF FF
+
+. 0 3AA28965 15
+. 31 D7 81 CF FF FE FE FE 47 0F 85 6F FF FF FF
+
+. 0 3AA288E3 14
+. 83 EE 04 83 EE 04 F7 C2 00 00 FF 00 75 05
+
+. 0 3AA289E9 7
+. 83 C6 04 38 CA 75 02
+
+. 0 3AA289F2 4
+. 84 D2 74 20
+
+. 0 3AA289F6 4
+. 38 CE 75 03
+
+. 0 3AA289FD 4
+. 84 F6 74 15
+
+. 0 3AA28A16 3
+. 5E 5F C3
+
+. 0 3AA7AE38 4
+. 85 C0 74 22
+
+. 0 3AA7AE3C 34
+. 8D 50 01 8B 83 98 01 00 00 89 10 8B 17 8B 83 70 02 00 00 89 10 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9D685A 5
+. E8 C1 FE FF FF
+
+. 0 3A9D6720 10
+. 55 89 E5 56 53 E8 67 00 00 00
+
+. 0 3A9D672A 18
+. 81 C3 E6 C8 0F 00 8D B3 94 FF FF FF 8B 06 85 C0 75 04
+
+. 0 3A9D6740 5
+. 83 C6 04 FF D0
+
+. 0 3AA217B0 14
+. 55 89 E5 83 EC 08 89 1C 24 E8 D3 4F FB FF
+
+. 0 3AA217BE 20
+. 81 C3 52 18 0B 00 89 74 24 04 8B 83 38 02 00 00 85 C0 75 4E
+
+. 0 3AA21820 11
+. 8B 1C 24 8B 74 24 04 89 EC 5D C3
+
+. 0 3A9D6745 6
+. 8B 06 85 C0 75 F5
+
+. 0 3A9D674B 6
+. 5B 5E 5D 89 F6 C3
+
+. 0 3A9D685F 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9A148C 11
+. 55 89 E5 83 EC 08 E8 D9 00 00 00
+
+. 0 3A9A1570 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 CB E2 01 00 52 8B 83 50 00 00 00 85 C0 74 02
+
+. 0 3A9A158D 4
+. 58 5B C9 C3
+
+. 0 3A9A1497 5
+. E8 74 01 00 00
+
+. 0 3A9A1610 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 2B E2 01 00 50 8B 83 FC FF FF FF 85 C0 74 0A
+
+. 0 3A9A1635 5
+. 8B 5D FC C9 C3
+
+. 0 3A9A149C 5
+. E8 7F 8B 01 00
+
+. 0 3A9BA020 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 1A 58 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0C
+
+. 0 3A9BA04E 4
+. 5B 5E 5D C3
+
+. 0 3A9A14A1 2
+. C9 C3
+
+. 0 3A97063E 9
+. F6 C2 03 0F 84 79 FF FF FF
+
+. 0 3A970729 15
+. 8B 45 E4 C7 40 0C 00 00 00 00 E8 38 04 00 00
+
+. 0 3A970738 18
+. 31 C0 89 83 34 00 00 00 83 C4 2C 5B 5E 5F 5D C2 04 00
+
+. 0 3A9658B5 8
+. 8D 93 94 81 FF FF FF E7
+
+. 0 8048430 33
+. 31 ED 5E 89 E1 83 E4 F0 50 54 52 68 B0 5A 05 08 68 20 5B 05 08 51 56 68 B0 57 05 08 E8 AF FF FF FF
+
+. 0 8048400 6
+. FF 25 78 92 05 08
+
+. 0 8048406 10
+. 68 20 00 00 00 E9 A0 FF FF FF
+
+. 0 80483B0 12
+. FF 35 60 92 05 08 FF 25 64 92 05 08
+
+. 0 3A9D6870 22
+. 55 31 C9 89 E5 57 56 53 83 EC 0C 8B 45 0C 8B 7D 10 E8 0B FF FF FF
+
+. 0 3A9D6886 23
+. 81 C3 8A C7 0F 00 8B 75 1C 8D 54 87 04 8B 83 A4 00 00 00 85 C0 74 13
+
+. 0 3A9D689D 6
+. 8B 00 85 C0 75 0D
+
+. 0 3A9D68A3 33
+. B9 01 00 00 00 90 8D B4 26 00 00 00 00 8B 83 B4 00 00 00 85 F6 89 08 8B 83 3C 01 00 00 89 10 74 14
+
+. 0 3A9D68C4 20
+. 89 34 24 31 C0 89 44 24 08 31 C0 89 44 24 04 E8 88 55 01 00
+
+. 0 3A9EBE60 9
+. 55 89 E5 53 E8 28 A9 FE FF
+
+. 0 3A9EBE69 11
+. 81 C3 A7 71 0E 00 E8 FC FE FF FF
+
+. 0 3A9EBD70 16
+. 55 89 E5 57 31 FF 56 53 83 EC 04 E8 11 AA FE FF
+
+. 0 3A9EBD80 20
+. 81 C3 90 72 0E 00 8B 93 3C 2B 00 00 85 D2 0F 85 A0 00 00 00
+
+. 0 3A9EBD94 10
+. 8B B3 BC 9E FF FF 85 F6 74 2B
+
+. 0 3A9EBD9E 11
+. 89 F6 8B 56 04 31 FF 39 D7 73 15
+
+. 0 3A9EBDBE 5
+. 83 FA 1F 76 3A
+
+. 0 3A9EBDFD 12
+. 8D 42 01 89 D7 89 46 04 85 F6 74 C0
+
+. 0 3A9EBE09 24
+. 89 F8 B9 01 00 00 00 C1 E0 04 89 4C 06 08 8B 93 44 2B 00 00 85 D2 75 23
+
+. 0 3A9EBE21 6
+. 31 C0 85 F6 74 07
+
+. 0 3A9EBE27 13
+. C1 E7 04 8D 44 37 08 5A 5B 5E 5F 5D C3
+
+. 0 3A9EBE74 11
+. 89 C2 85 D2 B8 FF FF FF FF 74 1A
+
+. 0 3A9EBE7F 29
+. C7 02 04 00 00 00 8B 45 08 89 42 04 8B 45 0C 89 42 08 8B 45 10 89 42 0C 31 C0 5B 5D C3
+
+. 0 3A9D68D8 7
+. 8B 75 18 85 F6 74 17
+
+. 0 3A9D68DF 23
+. 31 D2 31 C9 89 4C 24 08 89 54 24 04 8B 55 18 89 14 24 E8 6A 55 01 00
+
+. 0 3A9EBDA9 13
+. 8D 46 08 8D 74 26 00 8B 08 85 C9 74 3E
+
+. 0 3A9EBDB6 8
+. 47 83 C0 10 39 D7 72 F2
+
+. 0 3A9D68F6 11
+. 8B B3 68 01 00 00 F6 06 02 75 32
+
+. 0 3A9D6901 7
+. 8B 45 14 85 C0 74 03
+
+. 0 3A9D6908 3
+. FF 55 14
+
+. 0 8055B20 19
+. 55 89 E5 83 EC 18 89 5D F4 89 75 F8 31 F6 E8 45 00 00 00
+
+. 0 8055B78 4
+. 8B 1C 24 C3
+
+. 0 8055B33 14
+. 81 C3 29 37 00 00 89 7D FC E8 57 28 FF FF
+
+. 0 8048398 11
+. 55 89 E5 83 EC 08 E8 B1 00 00 00
+
+. 0 8048454 27
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 FF 0D 01 00 52 8B 83 28 00 00 00 85 C0 74 02
+
+. 0 8048471 4
+. 58 5B C9 C3
+
+. 0 80483A3 5
+. E8 18 01 00 00
+
+. 0 80484C0 15
+. 55 89 E5 51 51 8B 15 58 92 05 08 85 D2 74 19
+
+. 0 80484E8 2
+. C9 C3
+
+. 0 80483A8 5
+. E8 D3 D7 00 00
+
+. 0 8055B80 20
+. 55 89 E5 53 52 BB 48 92 05 08 A1 48 92 05 08 83 F8 FF 74 0C
+
+. 0 8055BA0 4
+. 58 5B 5D C3
+
+. 0 80483AD 2
+. C9 C3
+
+. 0 8055B41 21
+. 8D 93 0C FF FF FF 8D 83 0C FF FF FF 29 C2 C1 FA 02 39 D6 73 15
+
+. 0 8055B6B 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9D690B 7
+. F6 06 02 89 F6 75 38
+
+. 0 3A9D6912 25
+. 8B 93 3C 01 00 00 8B 02 89 7C 24 04 89 44 24 08 8B 45 0C 89 04 24 FF 55 08
+
+. 0 80557B0 31
+. 55 89 E5 83 EC 08 83 E4 F0 B8 00 00 00 00 29 C4 83 EC 08 68 EC 84 04 08 6A 04 E8 01 2C FF FF
+
+. 0 80483D0 6
+. FF 25 6C 92 05 08
+
+. 0 80483D6 10
+. 68 08 00 00 00 E9 D0 FF FF FF
+
+. 0 3A9E94D0 23
+. 55 89 E5 57 56 53 81 EC 2C 01 00 00 8B 4D 0C 8B 7D 08 E8 AA D2 FE FF
+
+. 0 3A9E94E7 23
+. 81 C3 29 9B 0E 00 83 F9 FF 0F 94 C0 85 FF 0F 9E C2 09 D0 A8 01 75 05
+
+. 0 3A9E94FE 5
+. 83 FF 40 7E 1D
+
+. 0 3A9E9520 19
+. 89 8D 64 FF FF FF 8D B5 68 FF FF FF B8 1F 00 00 00 EB 0D
+
+. 0 3A9E9540 10
+. C7 04 86 00 00 00 00 48 79 F6
+
+. 0 3A9E954A 75
+. 8D 57 FF B8 01 00 00 00 89 D1 83 E1 1F C1 EA 05 D3 E0 09 04 96 85 84 93 90 29 00 00 0F 95 C0 0F B6 C0 83 F8 01 19 C0 25 00 00 00 10 89 45 E8 8D 85 D4 FE FF FF 89 44 24 08 8D 85 64 FF FF FF 89 44 24 04 89 3C 24 E8 1B 02 00 00
+
+. 0 3A9E97B0 14
+. 55 89 E5 83 EC 14 89 5D F8 E8 D3 CF FE FF
+
+. 0 3A9E97BE 28
+. 81 C3 52 98 0E 00 8B 4D 0C 8B 55 10 89 75 FC 8B 83 6C 2B 00 00 8B 75 08 85 C0 75 1A
+
+. 0 3A9E97DA 16
+. 89 54 24 08 89 4C 24 04 89 34 24 E8 86 FE FF FF
+
+. 0 3A9E9670 22
+. 55 31 C9 89 E5 57 56 53 81 EC 28 01 00 00 8B 55 0C E8 0B D1 FE FF
+
+. 0 3A9E9686 10
+. 81 C3 8A 99 0E 00 85 D2 74 66
+
+. 0 3A9E9690 49
+. FC 8B 02 B9 20 00 00 00 8D BD 70 FF FF FF 8D 72 04 89 85 64 FF FF FF 8B 82 84 00 00 00 89 85 D0 FE FF FF 89 85 68 FF FF FF 8B 83 68 01 00 00 F3 A5
+
+. 0 3A9E96BF 2
+. F3 A5
+
+. 0 3A9E96C1 10
+. 8B B8 80 01 00 00 85 FF 75 25
+
+. 0 3A9E96CB 25
+. 81 8D 68 FF FF FF 00 00 00 04 8D 83 50 66 F1 FF F6 85 D0 FE FF FF 04 75 06
+
+. 0 3A9E96E4 27
+. 8D 83 58 66 F1 FF 89 85 6C FF FF FF 8D 8D 64 FF FF FF 8B 75 10 31 D2 85 F6 74 06
+
+. 0 3A9E96FF 23
+. 8D 95 D4 FE FF FF BE 08 00 00 00 8B 7D 08 87 DF B8 AE 00 00 00 CD 80
+
+. 0 3A9E9716 15
+. 87 FB 89 85 CC FE FF FF 3D 00 F0 FF FF 77 63
+
+. 0 3A9E9725 24
+. 31 D2 8B 85 CC FE FF FF 83 7D 10 00 F7 D0 0F 95 C2 C1 E8 1F 85 D0 74 3A
+
+. 0 3A9E973D 31
+. FC 8B 85 D4 FE FF FF B9 20 00 00 00 8B 7D 10 8D B5 E0 FE FF FF 89 07 8B 7D 10 83 C7 04 F3 A5
+
+. 0 3A9E975A 2
+. F3 A5
+
+. 0 3A9E975C 44
+. 8B 85 D8 FE FF FF 8B 55 10 89 82 84 00 00 00 8B 85 DC FE FF FF 89 82 88 00 00 00 8B 85 CC FE FF FF 81 C4 28 01 00 00 5B 5E 5F 5D C3
+
+. 0 3A9E97EA 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3A9E9595 13
+. 85 C0 BA FF FF FF FF 0F 88 71 FF FF FF
+
+. 0 3A9E95A2 19
+. 8B 95 D4 FE FF FF 81 C4 2C 01 00 00 89 D0 5B 5E 5F 5D C3
+
+. 0 80557CF 8
+. 83 C4 10 E8 5A 2E FF FF
+
+. 0 8048631 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804864F 2
+. F3 AB
+
+. 0 8048651 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048683 2
+. F3 AB
+
+. 0 8048685 47
+. B8 8F C2 31 42 89 45 C8 B8 EC 51 05 42 89 45 CC B8 8F C2 B1 41 89 45 D0 B8 8F C2 31 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 3C FD FF FF
+
+. 0 80483F0 6
+. FF 25 74 92 05 08
+
+. 0 80483F6 10
+. 68 18 00 00 00 E9 B0 FF FF FF
+
+. 0 3A9E92F0 35
+. 8B 44 24 04 89 58 00 89 70 04 89 78 08 8D 4C 24 04 89 48 10 8B 4C 24 00 89 48 14 89 68 0C E8 7B 5C 0C 00
+
+. 0 3AAAEF8E 4
+. 8B 0C 24 C3
+
+. 0 3A9E9313 14
+. 81 C1 FD 9C 0E 00 8D 89 20 63 F1 FF FF E1
+
+. 0 3A9E9330 22
+. 55 89 E5 83 EC 18 89 5D F4 8B 55 0C 89 75 F8 31 F6 E8 4B D4 FE FF
+
+. 0 3A9E9346 16
+. 81 C3 CA 9C 0E 00 89 7D FC 85 D2 8B 7D 08 75 12
+
+. 0 3A9E9368 25
+. C7 04 24 00 00 00 00 8D 47 1C 89 44 24 08 31 C0 89 44 24 04 E8 8F 04 00 00
+
+. 0 3A9E9810 36
+. 55 89 E5 83 EC 08 89 34 24 BE 08 00 00 00 89 7C 24 04 8B 7D 08 8B 4D 0C 8B 55 10 87 DF B8 AF 00 00 00 CD 80
+
+. 0 3A9E9834 11
+. 87 FB 3D 00 F0 FF FF 89 C6 77 0D
+
+. 0 3A9E983F 13
+. 89 F0 8B 7C 24 04 8B 34 24 89 EC 5D C3
+
+. 0 3A9E9381 4
+. 85 C0 75 D1
+
+. 0 3A9E9385 23
+. BE 01 00 00 00 89 77 18 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 80486B4 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 80486BF 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 58 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 B8 1E 63 42 50 FF 75 B8 E8 09 FE FF FF
+
+. 0 8048501 33
+. 55 89 E5 83 EC 28 C7 45 FC 00 00 00 00 D9 45 08 D9 45 0C D9 C9 DA E9 DF E0 80 E4 45 80 FC 40 74 54
+
+. 0 8048576 12
+. C7 45 FC 01 00 00 00 8B 45 FC C9 C3
+
+. 0 80486F8 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80486FF 17
+. 83 EC 08 B8 52 38 B4 42 50 FF 75 BC E8 F1 FD FF FF
+
+. 0 8048710 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8048717 17
+. 83 EC 08 B8 29 DC 82 42 50 FF 75 C0 E8 D9 FD FF FF
+
+. 0 8048728 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804872F 17
+. 83 EC 08 B8 1F 85 C5 42 50 FF 75 C4 E8 C1 FD FF FF
+
+. 0 8048740 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8048747 13
+. 83 EC 0C 68 D8 5B 05 08 E8 BC FC FF FF
+
+. 0 8048410 6
+. FF 25 7C 92 05 08
+
+. 0 8048416 10
+. 68 28 00 00 00 E9 90 FF FF FF
+
+. 0 3AA044D0 22
+. 55 89 E5 8D 45 0C 53 83 EC 0C 89 44 24 08 8B 45 08 E8 AB 22 FD FF
+
+. 0 3AA044E6 26
+. 81 C3 2A EB 0C 00 89 44 24 04 8B 83 B0 00 00 00 8B 00 89 04 24 E8 60 73 FF FF
+
+. 0 3A9FB860 22
+. 55 31 C9 89 E5 57 56 53 81 EC F0 05 00 00 8B 7D 0C E8 1B AF FD FF
+
+. 0 3A9FB876 17
+. 81 C3 9A 77 0D 00 89 8D 58 FB FF FF E8 A9 B2 FD FF
+
+. 0 3A9D6B30 12
+. 55 89 E5 53 83 EC 04 E8 55 FC FF FF
+
+. 0 3A9D6B3C 16
+. 81 C3 D4 C4 0F 00 8B 8B 14 1A 00 00 85 C9 75 0A
+
+. 0 3A9D6B4C 10
+. 8B 83 A4 01 00 00 5A 5B 5D C3
+
+. 0 3A9FB887 17
+. 8B 00 89 85 38 FB FF FF 8B 45 08 80 78 46 00 75 2B
+
+. 0 3A9FB898 14
+. 8B 93 38 02 00 00 85 D2 0F 84 F7 2A 00 00
+
+. 0 3A9FB8A6 9
+. 89 C2 8B 40 5C 85 C0 75 0C
+
+. 0 3A9FB8AF 20
+. C7 42 5C FF FF FF FF B8 FF FF FF FF 40 BA FF FF FF FF 75 5F
+
+. 0 3A9FB8C3 13
+. 8B 75 08 8B 06 A8 08 0F 85 23 2C 00 00
+
+. 0 3A9FB8D0 8
+. 85 FF 0F 84 FE 2F 00 00
+
+. 0 3A9FB8D8 9
+. 8B 55 08 80 7A 46 00 75 2E
+
+. 0 3A9FB8E1 14
+. 8B 8B 38 02 00 00 85 C9 0F 84 EB 2B 00 00
+
+. 0 3A9FB8EF 7
+. 8B 42 5C 85 C0 75 0C
+
+. 0 3A9FB902 8
+. 40 BA FF FF FF FF 75 18
+
+. 0 3A9FB90A 9
+. 8B 75 08 8B 06 A8 02 74 1D
+
+. 0 3A9FB930 68
+. B8 FF FF FF FF 8D 95 BC FB FF FF 89 85 54 FB FF FF 8B 45 10 89 95 34 FB FF FF 89 85 40 FB FF FF 31 C0 89 85 3C FB FF FF 31 C0 89 85 BC FB FF FF 31 C0 89 85 C0 FB FF FF 89 54 24 04 89 3C 24 E8 AC 76 01 00
+
+. 0 3AA13020 17
+. 55 89 E5 57 56 53 83 EC 0C 8B 75 08 E8 60 37 FC FF
+
+. 0 3AA13031 28
+. 81 C3 DF FF 0B 00 8B 7D 0C 0F B6 06 84 C0 0F 95 C2 3C 25 0F 95 C0 21 D0 A8 01 75 43
+
+. 0 3AA13090 11
+. C7 07 00 00 00 00 F6 06 80 75 B5
+
+. 0 3AA1309B 20
+. 46 0F B6 06 84 C0 0F 95 C2 3C 25 0F 95 C0 21 D0 A8 01 75 E1
+
+. 0 3AA130AF 10
+. 83 C4 0C 89 F0 5B 5E 5F 5D C3
+
+. 0 3A9FB974 38
+. 89 85 4C FB FF FF 8B 4D 08 89 85 B4 FB FF FF 31 C0 0F B7 11 89 85 30 FB FF FF 89 D0 25 00 80 FF FF 66 85 C0 75 0E
+
+. 0 3A9FB99A 14
+. 8B 83 B8 01 00 00 85 C0 0F 85 41 01 00 00
+
+. 0 3A9FB9A8 16
+. 89 D0 25 00 80 FF FF 66 85 C0 0F 84 21 01 00 00
+
+. 0 3A9FBAD9 11
+. 8B 45 08 89 04 24 E8 9C AA FD FF
+
+. 0 3A9D6580 6
+. FF A3 28 00 00 00
+
+. 0 3A9D6586 10
+. 68 38 00 00 00 E9 70 FF FF FF
+
+. 0 3AA12F20 12
+. 55 89 E5 53 83 EC 04 E8 65 38 FC FF
+
+. 0 3AA12F2C 16
+. 81 C3 E4 00 0C 00 8B 93 3C 2B 00 00 85 D2 75 04
+
+. 0 3AA12F3C 4
+. 58 5B 5D C3
+
+. 0 3A9FBAE4 5
+. E9 CF FE FF FF
+
+. 0 3A9FB9B8 36
+. 8B 55 08 8B B5 4C FB FF FF 0F BE 42 46 29 FE 8B 84 10 94 00 00 00 89 14 24 89 74 24 08 89 7C 24 04 FF 50 1C
+
+. 0 3AA1DFD0 28
+. 55 89 E5 83 EC 24 89 75 F8 8B 45 10 8B 75 10 89 5D F4 89 45 F0 31 C0 E8 A5 87 FB FF
+
+. 0 3AA1DFEC 27
+. 81 C3 24 50 0B 00 89 7D FC 85 F6 8B 7D 0C C7 45 EC 00 00 00 00 0F 84 C5 00 00 00
+
+. 0 3AA1E007 27
+. 8B 55 08 89 D1 8B 72 18 8B 01 8B 52 14 25 00 0A 00 00 29 D6 3D 00 0A 00 00 74 30
+
+. 0 3AA1E022 8
+. 85 F6 0F 84 8E 00 00 00
+
+. 0 3AA1E0B8 12
+. 8B 45 F0 8B 55 EC 01 D0 85 C0 75 1C
+
+. 0 3AA1E0E0 29
+. 8B 4D 08 BE FF FF FF FF 0F BE 41 46 8B 84 08 94 00 00 00 89 74 24 04 89 0C 24 FF 50 0C
+
+. 0 3AA1EBC0 17
+. 55 89 E5 57 56 53 83 EC 10 8B 75 08 E8 C0 7B FB FF
+
+. 0 3AA1EBD1 17
+. 81 C3 3F 44 0B 00 8B 0E F6 C1 08 0F 85 23 01 00 00
+
+. 0 3AA1EBE2 9
+. F6 C5 08 0F 84 C5 00 00 00
+
+. 0 3AA1ECB0 11
+. 8B 46 10 85 C0 0F 85 4D FF FF FF
+
+. 0 3AA1ECBB 5
+. E9 32 FF FF FF
+
+. 0 3AA1EBF2 8
+. 89 34 24 E8 36 22 00 00
+
+. 0 3AA20E30 20
+. 55 89 E5 83 EC 18 89 75 FC 8B 75 08 89 5D F8 E8 4D 59 FB FF
+
+. 0 3AA20E44 13
+. 81 C3 CC 21 0B 00 8B 46 1C 85 C0 75 20
+
+. 0 3AA20E51 5
+. F6 06 02 74 07
+
+. 0 3AA20E5D 17
+. 0F BE 46 46 8B 84 30 94 00 00 00 89 34 24 FF 50 34
+
+. 0 3AA13C10 29
+. 55 89 E5 81 EC 84 00 00 00 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 46 38 E8 64 2B FC FF
+
+. 0 3AA13C2D 15
+. 81 C3 E3 F3 0B 00 BF 00 20 00 00 85 C0 78 34
+
+. 0 3AA13C3C 24
+. 0F BE 46 46 8B 94 30 94 00 00 00 8D 45 94 89 44 24 04 89 34 24 FF 52 48
+
+. 0 3AA1E4F0 35
+. 55 89 E5 83 EC 0C 8B 45 0C 89 44 24 08 8B 45 08 8B 40 38 C7 04 24 03 00 00 00 89 44 24 04 E8 CD D7 04 00
+
+. 0 3AA6BCE0 14
+. 55 89 E5 83 EC 64 89 5D F4 E8 A3 AA F6 FF
+
+. 0 3AA6BCEE 31
+. 81 C3 22 73 06 00 89 75 F8 8B B3 BC 01 00 00 89 7D FC 8B 7D 10 8B 06 85 C0 0F 85 83 00 00 00
+
+. 0 3AA6BD0D 5
+. E8 1E AE F6 FF
+
+. 0 3AA6BD12 22
+. 89 45 A8 8B 00 89 F9 8B 55 0C 89 45 B0 87 D3 B8 C5 00 00 00 CD 80
+
+. 0 3AA6BD28 13
+. 87 D3 3D 00 F0 FF FF 0F 87 B7 00 00 00
+
+. 0 3AA6BD35 7
+. 83 F8 FF 89 C2 74 34
+
+. 0 3AA6BD3C 4
+. 85 D2 75 20
+
+. 0 3AA6BD40 8
+. 8B 47 58 39 47 0C 74 18
+
+. 0 3AA6BD60 15
+. 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1E513 2
+. C9 C3
+
+. 0 3AA13C54 4
+. 85 C0 78 18
+
+. 0 3AA13C58 15
+. 8B 45 A4 25 00 F0 00 00 3D 00 20 00 00 74 7F
+
+. 0 3AA13CE6 43
+. 8B 55 B8 8B 45 B4 0F AC D0 08 C1 EA 08 89 C1 0F AC D0 18 81 E1 FF 0F 00 00 25 00 F0 FF FF 09 C1 81 E9 88 00 00 00 83 F9 07 77 0B
+
+. 0 3AA13D11 11
+. 81 0E 00 02 00 00 E9 4B FF FF FF
+
+. 0 3AA13C67 7
+. 8B 45 C8 85 C0 7E 02
+
+. 0 3AA13C6E 62
+. 89 C7 C7 04 24 00 00 00 00 31 C0 BA 03 00 00 00 89 44 24 14 B8 FF FF FF FF B9 22 00 00 00 89 44 24 10 8D 87 FF 0F 00 00 25 00 F0 FF FF 89 54 24 08 89 4C 24 0C 89 44 24 04 E8 04 3F 06 00
+
+. 0 3AA77BB0 13
+. 89 DA B8 5A 00 00 00 8D 5C 24 04 CD 80
+
+. 0 3AA77BBD 9
+. 89 D3 3D 00 F0 FF FF 77 01
+
+. 0 3AA77BC6 1
+. C3
+
+. 0 3AA13CAC 12
+. 89 C2 83 FA FF B8 FF FF FF FF 74 21
+
+. 0 3AA13CB8 28
+. 89 54 24 04 B8 01 00 00 00 89 44 24 0C 8D 04 17 89 44 24 08 89 34 24 E8 CC CE 00 00
+
+. 0 3AA20BA0 20
+. 55 89 E5 83 EC 10 89 75 FC 8B 75 08 89 5D F8 E8 DD 5B FB FF
+
+. 0 3AA20BB4 13
+. 81 C3 5C 24 0B 00 8B 4E 1C 85 C9 74 5F
+
+. 0 3AA20C20 4
+. 8B 16 EB A4
+
+. 0 3AA20BC8 19
+. 8B 45 0C 89 46 1C 8B 45 10 89 46 20 8B 45 14 85 C0 74 15
+
+. 0 3AA20BDB 15
+. 83 E2 FE 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA13CD4 18
+. B8 01 00 00 00 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA20E6E 3
+. 40 74 0F
+
+. 0 3AA20E71 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA1EBFA 23
+. 8B 46 1C 8B 0E 89 46 0C 89 46 04 89 46 08 F6 C5 01 0F 85 4E 01 00 00
+
+. 0 3AA1EC11 14
+. 8B 46 04 8B 7E 20 39 F8 0F 84 D8 00 00 00
+
+. 0 3AA1EC1F 40
+. 89 46 14 89 C2 89 46 10 8B 46 08 89 7D F0 89 46 04 89 46 0C 89 C8 8B 4E 5C 0D 00 08 00 00 89 06 85 C9 0F 8E DD 00 00 00
+
+. 0 3AA1ED24 7
+. A9 02 02 00 00 74 2C
+
+. 0 3AA1ED2B 8
+. 89 56 18 E9 1A FF FF FF
+
+. 0 3AA1EC4D 10
+. 83 7D 0C FF 0F 84 DC 00 00 00
+
+. 0 3AA1ED33 24
+. 8B 56 10 8B 46 14 89 34 24 89 54 24 04 29 D0 89 44 24 08 E8 35 F4 FF FF
+
+. 0 3AA1E180 17
+. 55 89 E5 56 8B 75 10 8B 45 08 8B 55 0C 85 F6 75 07
+
+. 0 3AA1E191 7
+. 31 D2 5E 89 D0 5D C3
+
+. 0 3AA1ED4B 12
+. 89 C2 83 C4 10 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA1E0FD 3
+. 40 74 C4
+
+. 0 3AA1E100 22
+. 8B 45 08 8B 50 1C 8B 48 20 29 D1 8B 55 F0 83 F9 7F 89 55 E8 76 0E
+
+. 0 3AA1E116 21
+. 89 D0 31 D2 F7 F1 8B 4D F0 29 D1 89 4D E8 8B 75 E8 85 F6 75 2B
+
+. 0 3AA1E12B 7
+. 8B 4D F0 85 C9 74 92
+
+. 0 3AA1E132 28
+. 8B 45 F0 89 44 24 08 8B 55 E8 8D 04 3A 89 44 24 04 8B 4D 08 89 0C 24 E8 72 1E 00 00
+
+. 0 3AA1FFC0 30
+. 55 89 E5 57 56 83 EC 10 8B 45 10 8B 55 10 8B 7D 0C 89 45 F4 31 C0 85 D2 0F 84 9D 00 00 00
+
+. 0 3AA1FFDE 17
+. 89 F6 8B 55 08 8B 72 18 8B 42 14 29 C6 85 F6 7E 54
+
+. 0 3AA20043 7
+. 8B 75 F4 85 F6 74 29
+
+. 0 3AA2004A 30
+. 8B 55 08 89 D1 0F BE 42 46 8B 94 10 94 00 00 00 0F B6 07 47 89 0C 24 89 44 24 04 FF 52 0C
+
+. 0 3AA1EBEB 7
+. 8B 46 10 85 C0 75 5B
+
+. 0 3AA1EC57 8
+. 8B 56 14 3B 56 20 74 61
+
+. 0 3AA1EC5F 20
+. 0F B6 45 0C 88 02 8B 4E 14 8B 16 41 F6 C2 02 89 4E 14 75 11
+
+. 0 3AA1EC73 17
+. C1 EA 09 83 7D 0C 0A 0F 94 C0 21 C2 F6 C2 01 74 1D
+
+. 0 3AA1ECA1 14
+. 0F B6 55 0C 83 C4 10 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA20068 3
+. 40 74 08
+
+. 0 3AA2006B 8
+. FF 4D F4 E9 6D FF FF FF
+
+. 0 3AA1FFE0 15
+. 8B 55 08 8B 72 18 8B 42 14 29 C6 85 F6 7E 54
+
+. 0 3AA1EC84 21
+. 8B 46 10 89 34 24 29 C1 89 4C 24 08 89 44 24 04 E8 E7 F4 FF FF
+
+. 0 3AA1E198 7
+. 89 F1 E8 F1 FC FF FF
+
+. 0 3AA1DE90 22
+. 55 89 E5 83 EC 24 89 5D F4 89 75 F8 89 C6 89 7D FC E8 EB 88 FB FF
+
+. 0 3AA1DEA6 18
+. 81 C3 6A 51 0B 00 F6 40 01 10 89 55 F0 89 4D EC 74 6E
+
+. 0 3AA1DF26 10
+. 8B 48 08 8B 50 10 39 D1 74 96
+
+. 0 3AA1DEC6 31
+. 0F BE 46 46 8B 55 EC 8B 84 30 94 00 00 00 89 54 24 08 8B 4D F0 89 34 24 89 4C 24 04 FF 50 3C
+
+. 0 3AA1E430 15
+. 55 89 E5 57 56 83 EC 14 8B 75 10 85 F6 7E 71
+
+. 0 3AA1E43F 9
+. 8B 45 08 F6 40 3C 02 75 37
+
+. 0 3AA1E448 33
+. 90 8D B4 26 00 00 00 00 89 74 24 08 8B 45 0C 89 44 24 04 8B 55 08 8B 42 38 89 04 24 E8 B7 E9 04 00
+
+. 0 3AA6CE20 5
+. E8 69 21 04 00
+
+. 0 3AA6CE25 15
+. 81 C1 EB 61 06 00 83 B9 14 1A 00 00 00 75 1D
+
+. 0 3AA6CE34 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 04 00 00 00 CD 80
+
+addps_1 ... ok
+. 0 3AA6CE48 8
+. 5B 3D 01 F0 FF FF 73 2D
+
+. 0 3AA6CE50 1
+. C3
+
+. 0 3AA1E469 4
+. 85 C0 78 34
+
+. 0 3AA1E46D 9
+. 01 45 0C 29 C6 85 F6 7E 3A
+
+. 0 3AA1E4B0 16
+. 29 75 10 8B 45 08 8B 78 50 8B 70 4C 85 FF 78 11
+
+. 0 3AA1E4D1 10
+. 8B 45 10 83 C4 14 5E 5F 5D C3
+
+. 0 3AA1DEE5 11
+. 89 C7 0F B7 46 44 66 85 C0 74 04
+
+. 0 3AA1DEF4 29
+. 8B 56 5C 8B 46 1C 85 D2 89 46 0C 89 46 04 89 46 08 89 46 14 89 46 10 0F 8E 9F 00 00 00
+
+. 0 3AA1DFB0 12
+. F7 06 02 02 00 00 0F 85 58 FF FF FF
+
+. 0 3AA1DF14 18
+. 89 46 18 89 F8 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1E19F 9
+. 39 F0 BA FF FF FF FF 74 E9
+
+. 0 3AA1EC99 8
+. 40 BA FF FF FF FF 74 04
+
+. 0 3AA20073 15
+. 8B 45 10 8B 4D F4 29 C8 83 C4 10 5E 5F 5D C3
+
+. 0 3AA1E14E 8
+. 29 45 F0 E9 6E FF FF FF
+
+. 0 3AA1E0C4 21
+. 8B 45 10 8B 55 F0 29 D0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9FB9DC 19
+. BA FF FF FF FF 89 95 50 FB FF FF 39 F0 0F 85 41 01 00 00
+
+. 0 3A9FB9EF 21
+. 89 85 50 FB FF FF 8B 95 B4 FB FF FF 80 3A 00 0F 84 2C 01 00 00
+
+. 0 3A9FBB30 16
+. 8B 4D 08 0F B7 01 25 00 80 FF FF 66 85 C0 74 1A
+
+. 0 3A9FBB5A 8
+. 89 0C 24 E8 FE A9 FD FF
+
+. 0 3A9D6560 6
+. FF A3 20 00 00 00
+
+. 0 3A9D6566 10
+. 68 28 00 00 00 E9 90 FF FF FF
+
+. 0 3AA12F90 12
+. 55 89 E5 53 83 EC 04 E8 F5 37 FC FF
+
+. 0 3AA12F9C 16
+. 81 C3 74 00 0C 00 8B 93 44 2B 00 00 85 D2 75 04
+
+. 0 3AA12FAC 4
+. 58 5B 5D C3
+
+. 0 3A9FBB62 10
+. 8B 95 30 FB FF FF 85 D2 74 DE
+
+. 0 3A9FBB4A 16
+. 8B 95 50 FB FF FF 8D 65 F4 89 D0 5B 5E 5F 5D C3
+
+. 0 3AA04500 6
+. 83 C4 0C 5B 5D C3
+
+. 0 8048754 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8048816 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557D7 5
+. E8 42 30 FF FF
+
+. 0 804881E 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804883C 2
+. F3 AB
+
+. 0 804883E 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048870 2
+. F3 AB
+
+. 0 8048872 47
+. B8 8F C2 31 42 89 45 C8 B8 EC 51 05 42 89 45 CC B8 8F C2 B1 41 89 45 D0 B8 8F C2 31 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 4F FB FF FF
+
+. 0 80488A1 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 80488AC 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 58 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 B8 1E 63 42 50 FF 75 B8 E8 23 FC FF FF
+
+. 0 80488DE 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80488E5 17
+. 83 EC 08 B8 52 38 B4 42 50 FF 75 BC E8 0B FC FF FF
+
+. 0 80488F6 7
+. 83 C4 10 85 C0 74 45
+
+. 0 80488FD 17
+. 83 EC 08 B8 29 DC 82 42 50 FF 75 C0 E8 F3 FB FF FF
+
+. 0 804890E 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8048915 17
+. 83 EC 08 B8 1F 85 C5 42 50 FF 75 C4 E8 DB FB FF FF
+
+. 0 8048926 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804892D 13
+. 83 EC 0C 68 BE 5C 05 08 E8 D6 FA FF FF
+
+. 0 3A9FB8BB 8
+. 40 BA FF FF FF FF 75 5F
+
+. 0 3AA1E052 10
+. 8B 71 20 29 D6 3B 75 10 72 C6
+
+. 0 3AA1E05C 10
+. 8B 4D 10 8D 04 39 39 F8 77 07
+
+. 0 3AA1E06D 6
+. 48 80 38 0A 75 F5
+
+. 0 3AA1E073 17
+. C7 45 EC 01 00 00 00 8B 4D 0C 29 C8 8D 70 01 EB 9E
+
+. 0 3AA1E02A 5
+. 3B 75 10 76 03
+
+. 0 3AA1E032 5
+. 83 FE 14 77 4D
+
+. 0 3AA1E037 7
+. 89 D1 89 F2 4A 78 0C
+
+. 0 3AA1E03E 12
+. 89 F6 0F B6 07 47 88 01 41 4A 79 F6
+
+. 0 3AA1E040 10
+. 0F B6 07 47 88 01 41 4A 79 F6
+
+. 0 3AA1E04A 8
+. 8B 45 08 89 48 14 EB 5E
+
+. 0 3AA1E0B0 20
+. 8B 55 10 29 F2 89 55 F0 8B 45 F0 8B 55 EC 01 D0 85 C0 75 1C
+
+addps_2 ... ok
+. 0 804893A 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 80489FC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557DC 5
+. E8 23 32 FF FF
+
+. 0 8048A04 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048A22 2
+. F3 AB
+
+. 0 8048A24 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048A56 2
+. F3 AB
+
+. 0 8048A58 47
+. B8 8F C2 31 42 89 45 C8 B8 EC 51 05 42 89 45 CC B8 8F C2 B1 41 89 45 D0 B8 8F C2 31 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 69 F9 FF FF
+
+. 0 8048A87 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 8048A92 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 58 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 B8 1E 63 42 50 FF 75 B8 E8 35 FA FF FF
+
+. 0 8048ACC 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8048AD3 17
+. 83 EC 08 B8 EC 51 05 42 50 FF 75 BC E8 1D FA FF FF
+
+. 0 8048AE4 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8048AEB 17
+. 83 EC 08 B8 8F C2 B1 41 50 FF 75 C0 E8 05 FA FF FF
+
+. 0 8048AFC 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8048B03 17
+. 83 EC 08 B8 8F C2 31 41 50 FF 75 C4 E8 ED F9 FF FF
+
+. 0 8048B14 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8048B1B 13
+. 83 EC 0C 68 F6 5C 05 08 E8 E8 F8 FF FF
+
+addss_1 ... ok
+. 0 8048B28 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8048BEA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557E1 5
+. E8 0C 34 FF FF
+
+. 0 8048BF2 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048C10 2
+. F3 AB
+
+. 0 8048C12 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048C44 2
+. F3 AB
+
+. 0 8048C46 47
+. B8 8F C2 31 42 89 45 C8 B8 EC 51 05 42 89 45 CC B8 8F C2 B1 41 89 45 D0 B8 8F C2 31 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 7B F7 FF FF
+
+. 0 8048C75 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 8048C80 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 58 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 B8 1E 63 42 50 FF 75 B8 E8 4E F8 FF FF
+
+. 0 8048CB3 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8048CBA 17
+. 83 EC 08 B8 EC 51 05 42 50 FF 75 BC E8 36 F8 FF FF
+
+. 0 8048CCB 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8048CD2 17
+. 83 EC 08 B8 8F C2 B1 41 50 FF 75 C0 E8 1E F8 FF FF
+
+. 0 8048CE3 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8048CEA 17
+. 83 EC 08 B8 8F C2 31 41 50 FF 75 C4 E8 06 F8 FF FF
+
+. 0 8048CFB 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8048D02 13
+. 83 EC 0C 68 2E 5D 05 08 E8 01 F7 FF FF
+
+addss_2 ... ok
+. 0 8048D0F 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8048DD1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557E6 5
+. E8 EE 35 FF FF
+
+. 0 8048DD9 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048DF7 2
+. F3 AB
+
+. 0 8048DF9 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048E27 2
+. F3 AB
+
+. 0 8048E29 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 9C F5 FF FF
+
+. 0 8048E54 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 8048E5F 63
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 55 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 10 30 98 FC 8B 55 B8 81 F2 00 20 00 64 09 D0 85 C0 75 29
+
+. 0 8048E9E 23
+. 8B 45 C4 35 46 00 02 00 8B 55 C0 81 F2 CF 89 03 01 09 D0 85 C0 75 12
+
+. 0 8048EB5 13
+. 83 EC 0C 68 66 5D 05 08 E8 4E F5 FF FF
+
+andnps_1 ... ok
+. 0 8048EC2 5
+. 83 C4 10 EB 62
+
+. 0 8048F29 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557EB 5
+. E8 41 37 FF FF
+
+. 0 8048F31 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048F4F 2
+. F3 AB
+
+. 0 8048F51 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8048F7F 2
+. F3 AB
+
+. 0 8048F81 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 44 F4 FF FF
+
+. 0 8048FAC 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8048FB7 56
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 55 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 10 30 98 FC 8B 55 B8 81 F2 00 20 00 64 09 D0 85 C0 75 29
+
+. 0 8048FEF 23
+. 8B 45 C4 35 46 00 02 00 8B 55 C0 81 F2 CF 89 03 01 09 D0 85 C0 75 12
+
+. 0 8049006 13
+. 83 EC 0C 68 F1 5D 05 08 E8 FD F3 FF FF
+
+andnps_2 ... ok
+. 0 8049013 5
+. 83 C4 10 EB 62
+
+. 0 804907A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557F0 5
+. E8 8D 38 FF FF
+
+. 0 8049082 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80490A0 2
+. F3 AB
+
+. 0 80490A2 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80490D0 2
+. F3 AB
+
+. 0 80490D2 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 F3 F2 FF FF
+
+. 0 80490FD 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 8049108 63
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 54 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 21 45 21 01 8B 55 B8 81 F2 20 44 A8 88 09 D0 85 C0 75 29
+
+. 0 8049147 23
+. 8B 45 C4 35 21 45 21 01 8B 55 C0 81 F2 20 44 A8 88 09 D0 85 C0 75 12
+
+. 0 804915E 13
+. 83 EC 0C 68 2C 5E 05 08 E8 A5 F2 FF FF
+
+andps_1 ... ok
+. 0 804916B 5
+. 83 C4 10 EB 62
+
+. 0 80491D2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557F5 5
+. E8 E0 39 FF FF
+
+. 0 80491DA 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80491F8 2
+. F3 AB
+
+. 0 80491FA 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049228 2
+. F3 AB
+
+. 0 804922A 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 9B F1 FF FF
+
+. 0 8049255 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8049260 56
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 54 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 21 45 21 01 8B 55 B8 81 F2 20 44 A8 88 09 D0 85 C0 75 29
+
+. 0 8049298 23
+. 8B 45 C4 35 21 45 21 01 8B 55 C0 81 F2 20 44 A8 88 09 D0 85 C0 75 12
+
+. 0 80492AF 13
+. 83 EC 0C 68 64 5E 05 08 E8 54 F1 FF FF
+
+andps_2 ... ok
+. 0 80492BC 5
+. 83 C4 10 EB 62
+
+. 0 8049323 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557FA 5
+. E8 2C 3B FF FF
+
+. 0 804932B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049349 2
+. F3 AB
+
+. 0 804934B 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804937D 2
+. F3 AB
+
+. 0 804937F 47
+. B8 5B 91 6A 43 89 45 C8 B8 62 91 6A 43 89 45 CC B8 5B 91 6A 43 89 45 D0 B8 62 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 42 F0 FF FF
+
+. 0 80493AE 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 80493B9 47
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F C2 C8 00 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 80493E8 6
+. 83 7D BC 00 75 1E
+
+. 0 80493EE 6
+. 83 7D C0 FF 75 18
+
+. 0 80493F4 6
+. 83 7D C4 00 75 12
+
+. 0 80493FA 13
+. 83 EC 0C 68 9C 5E 05 08 E8 09 F0 FF FF
+
+cmpeqps_1 ... ok
+. 0 8049407 5
+. 83 C4 10 EB 76
+
+. 0 8049482 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80557FF 5
+. E8 86 3C FF FF
+
+. 0 804948A 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80494A8 2
+. F3 AB
+
+. 0 80494AA 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80494DC 2
+. F3 AB
+
+. 0 80494DE 47
+. B8 5B 91 6A 43 89 45 C8 B8 62 91 6A 43 89 45 CC B8 5B 91 6A 43 89 45 D0 B8 62 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 E3 EE FF FF
+
+. 0 804950D 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 8049518 40
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F C2 4D D8 00 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 8049540 6
+. 83 7D BC 00 75 1E
+
+. 0 8049546 6
+. 83 7D C0 FF 75 18
+
+. 0 804954C 6
+. 83 7D C4 00 75 12
+
+. 0 8049552 13
+. 83 EC 0C 68 78 5F 05 08 E8 B1 EE FF FF
+
+cmpeqps_2 ... ok
+. 0 804955F 5
+. 83 C4 10 EB 76
+
+. 0 80495DA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055804 5
+. E8 D9 3D FF FF
+
+. 0 80495E2 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049600 2
+. F3 AB
+
+. 0 8049602 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804961C 2
+. F3 AB
+
+. 0 804961E 23
+. B8 2B 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 BB ED FF FF
+
+. 0 8049635 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 8049640 48
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F C2 C8 00 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 8049670 6
+. 83 7D BC 00 75 1E
+
+. 0 8049676 6
+. 83 7D C0 00 75 18
+
+. 0 804967C 6
+. 83 7D C4 00 75 12
+
+. 0 8049682 13
+. 83 EC 0C 68 B6 5F 05 08 E8 81 ED FF FF
+
+cmpeqss_1 ... ok
+. 0 804968F 5
+. 83 C4 10 EB 76
+
+. 0 804970A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055809 5
+. E8 04 3F FF FF
+
+. 0 8049712 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049730 2
+. F3 AB
+
+. 0 8049732 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804974C 2
+. F3 AB
+
+. 0 804974E 23
+. B8 2C 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 8B EC FF FF
+
+. 0 8049765 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8049770 41
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F C2 4D D8 00 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 00 75 24
+
+. 0 8049799 6
+. 83 7D BC 00 75 1E
+
+. 0 804979F 6
+. 83 7D C0 00 75 18
+
+. 0 80497A5 6
+. 83 7D C4 00 75 12
+
+. 0 80497AB 13
+. 83 EC 0C 68 F4 5F 05 08 E8 58 EC FF FF
+
+cmpeqss_2 ... ok
+. 0 80497B8 5
+. 83 C4 10 EB 76
+
+. 0 8049833 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805580E 5
+. E8 28 40 FF FF
+
+. 0 804983B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049859 2
+. F3 AB
+
+. 0 804985B 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804988D 2
+. F3 AB
+
+. 0 804988F 47
+. B8 5B 91 6A 43 89 45 C8 B8 62 91 6A 43 89 45 CC B8 5B 91 6A 43 89 45 D0 B8 62 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 32 EB FF FF
+
+. 0 80498BE 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 80498C9 47
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F C2 C8 02 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 80498F8 6
+. 83 7D BC 00 75 1E
+
+. 0 80498FE 6
+. 83 7D C0 FF 75 18
+
+. 0 8049904 6
+. 83 7D C4 00 75 12
+
+. 0 804990A 13
+. 83 EC 0C 68 32 60 05 08 E8 F9 EA FF FF
+
+cmpleps_1 ... ok
+. 0 8049917 5
+. 83 C4 10 EB 76
+
+. 0 8049992 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055813 5
+. E8 82 41 FF FF
+
+. 0 804999A 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80499B8 2
+. F3 AB
+
+. 0 80499BA 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80499EC 2
+. F3 AB
+
+. 0 80499EE 47
+. B8 5B 91 6A 43 89 45 C8 B8 62 91 6A 43 89 45 CC B8 5B 91 6A 43 89 45 D0 B8 62 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 D3 E9 FF FF
+
+. 0 8049A1D 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 8049A28 40
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F C2 4D D8 02 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 8049A50 6
+. 83 7D BC 00 75 1E
+
+. 0 8049A56 6
+. 83 7D C0 FF 75 18
+
+. 0 8049A5C 6
+. 83 7D C4 00 75 12
+
+. 0 8049A62 13
+. 83 EC 0C 68 70 60 05 08 E8 A1 E9 FF FF
+
+cmpleps_2 ... ok
+. 0 8049A6F 5
+. 83 C4 10 EB 76
+
+. 0 8049AEA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055818 5
+. E8 D5 42 FF FF
+
+. 0 8049AF2 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049B10 2
+. F3 AB
+
+. 0 8049B12 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049B2C 2
+. F3 AB
+
+. 0 8049B2E 23
+. B8 2B 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 AB E8 FF FF
+
+. 0 8049B45 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 8049B50 48
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F C2 C8 02 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 8049B80 6
+. 83 7D BC 00 75 1E
+
+. 0 8049B86 6
+. 83 7D C0 00 75 18
+
+. 0 8049B8C 6
+. 83 7D C4 00 75 12
+
+. 0 8049B92 13
+. 83 EC 0C 68 AE 60 05 08 E8 71 E8 FF FF
+
+cmpless_1 ... ok
+. 0 8049B9F 5
+. 83 C4 10 EB 76
+
+. 0 8049C1A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805581D 5
+. E8 00 44 FF FF
+
+. 0 8049C22 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049C40 2
+. F3 AB
+
+. 0 8049C42 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049C5C 2
+. F3 AB
+
+. 0 8049C5E 23
+. B8 2C 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 7B E7 FF FF
+
+. 0 8049C75 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 8049C80 41
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F C2 4D D8 02 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 00 75 24
+
+. 0 8049CA9 6
+. 83 7D BC 00 75 1E
+
+. 0 8049CAF 6
+. 83 7D C0 00 75 18
+
+. 0 8049CB5 6
+. 83 7D C4 00 75 12
+
+. 0 8049CBB 13
+. 83 EC 0C 68 EC 60 05 08 E8 48 E7 FF FF
+
+cmpless_2 ... ok
+. 0 8049CC8 5
+. 83 C4 10 EB 76
+
+. 0 8049D43 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055822 5
+. E8 24 45 FF FF
+
+. 0 8049D4B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049D69 2
+. F3 AB
+
+. 0 8049D6B 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049D9D 2
+. F3 AB
+
+. 0 8049D9F 47
+. B8 55 91 6A 43 89 45 C8 B8 62 91 6A 43 89 45 CC B8 55 91 6A 43 89 45 D0 B8 62 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 22 E6 FF FF
+
+. 0 8049DCE 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 8049DD9 47
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F C2 C8 01 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 8049E08 6
+. 83 7D BC 00 75 1E
+
+. 0 8049E0E 6
+. 83 7D C0 FF 75 18
+
+. 0 8049E14 6
+. 83 7D C4 00 75 12
+
+. 0 8049E1A 13
+. 83 EC 0C 68 2A 61 05 08 E8 E9 E5 FF FF
+
+cmpltps_1 ... ok
+. 0 8049E27 5
+. 83 C4 10 EB 76
+
+. 0 8049EA2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055827 5
+. E8 7E 46 FF FF
+
+. 0 8049EAA 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049EC8 2
+. F3 AB
+
+. 0 8049ECA 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8049EFC 2
+. F3 AB
+
+. 0 8049EFE 47
+. B8 55 91 6A 43 89 45 C8 B8 62 91 6A 43 89 45 CC B8 55 91 6A 43 89 45 D0 B8 62 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 C3 E4 FF FF
+
+. 0 8049F2D 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 8049F38 40
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F C2 4D D8 01 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 8049F60 6
+. 83 7D BC 00 75 1E
+
+. 0 8049F66 6
+. 83 7D C0 FF 75 18
+
+. 0 8049F6C 6
+. 83 7D C4 00 75 12
+
+. 0 8049F72 13
+. 83 EC 0C 68 68 61 05 08 E8 91 E4 FF FF
+
+cmpltps_2 ... ok
+. 0 8049F7F 5
+. 83 C4 10 EB 76
+
+. 0 8049FFA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805582C 5
+. E8 D1 47 FF FF
+
+. 0 804A002 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A020 2
+. F3 AB
+
+. 0 804A022 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A03C 2
+. F3 AB
+
+. 0 804A03E 23
+. B8 2A 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 9B E3 FF FF
+
+. 0 804A055 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 804A060 48
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F C2 C8 01 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804A090 6
+. 83 7D BC 00 75 1E
+
+. 0 804A096 6
+. 83 7D C0 00 75 18
+
+. 0 804A09C 6
+. 83 7D C4 00 75 12
+
+. 0 804A0A2 13
+. 83 EC 0C 68 A6 61 05 08 E8 61 E3 FF FF
+
+cmpltss_1 ... ok
+. 0 804A0AF 5
+. 83 C4 10 EB 76
+
+. 0 804A12A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055831 5
+. E8 FC 48 FF FF
+
+. 0 804A132 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A150 2
+. F3 AB
+
+. 0 804A152 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A16C 2
+. F3 AB
+
+. 0 804A16E 23
+. B8 2C 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 6B E2 FF FF
+
+. 0 804A185 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 804A190 41
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F C2 4D D8 01 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 00 75 24
+
+. 0 804A1B9 6
+. 83 7D BC 00 75 1E
+
+. 0 804A1BF 6
+. 83 7D C0 00 75 18
+
+. 0 804A1C5 6
+. 83 7D C4 00 75 12
+
+. 0 804A1CB 13
+. 83 EC 0C 68 E4 61 05 08 E8 38 E2 FF FF
+
+cmpltss_2 ... ok
+. 0 804A1D8 5
+. 83 C4 10 EB 76
+
+. 0 804A253 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055836 5
+. E8 20 4A FF FF
+
+. 0 804A25B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A279 2
+. F3 AB
+
+. 0 804A27B 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A2AD 2
+. F3 AB
+
+. 0 804A2AF 47
+. B8 62 91 6A 43 89 45 C8 B8 5B 91 6A 43 89 45 CC B8 62 91 6A 43 89 45 D0 B8 5B 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 12 E1 FF FF
+
+. 0 804A2DE 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 804A2E9 47
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F C2 C8 04 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804A318 6
+. 83 7D BC 00 75 1E
+
+. 0 804A31E 6
+. 83 7D C0 FF 75 18
+
+. 0 804A324 6
+. 83 7D C4 00 75 12
+
+. 0 804A32A 13
+. 83 EC 0C 68 22 62 05 08 E8 D9 E0 FF FF
+
+cmpneqps_1 ... ok
+. 0 804A337 5
+. 83 C4 10 EB 76
+
+. 0 804A3B2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805583B 5
+. E8 7A 4B FF FF
+
+. 0 804A3BA 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A3D8 2
+. F3 AB
+
+. 0 804A3DA 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A40C 2
+. F3 AB
+
+. 0 804A40E 47
+. B8 62 91 6A 43 89 45 C8 B8 5B 91 6A 43 89 45 CC B8 62 91 6A 43 89 45 D0 B8 5B 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 B3 DF FF FF
+
+. 0 804A43D 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 804A448 40
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F C2 4D D8 04 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804A470 6
+. 83 7D BC 00 75 1E
+
+. 0 804A476 6
+. 83 7D C0 FF 75 18
+
+. 0 804A47C 6
+. 83 7D C4 00 75 12
+
+. 0 804A482 13
+. 83 EC 0C 68 63 62 05 08 E8 81 DF FF FF
+
+cmpneqps_2 ... ok
+. 0 804A48F 5
+. 83 C4 10 EB 76
+
+. 0 804A50A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055840 5
+. E8 CD 4C FF FF
+
+. 0 804A512 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A530 2
+. F3 AB
+
+. 0 804A532 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A54C 2
+. F3 AB
+
+. 0 804A54E 23
+. B8 2C 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 8B DE FF FF
+
+. 0 804A565 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 804A570 48
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F C2 C8 04 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804A5A0 6
+. 83 7D BC 00 75 1E
+
+. 0 804A5A6 6
+. 83 7D C0 00 75 18
+
+. 0 804A5AC 6
+. 83 7D C4 00 75 12
+
+. 0 804A5B2 13
+. 83 EC 0C 68 A4 62 05 08 E8 51 DE FF FF
+
+cmpneqss_1 ... ok
+. 0 804A5BF 5
+. 83 C4 10 EB 76
+
+. 0 804A63A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055845 5
+. E8 F8 4D FF FF
+
+. 0 804A642 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A660 2
+. F3 AB
+
+. 0 804A662 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A67C 2
+. F3 AB
+
+. 0 804A67E 23
+. B8 2B 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 5B DD FF FF
+
+. 0 804A695 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 804A6A0 41
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F C2 4D D8 04 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 00 75 24
+
+. 0 804A6C9 6
+. 83 7D BC 00 75 1E
+
+. 0 804A6CF 6
+. 83 7D C0 00 75 18
+
+. 0 804A6D5 6
+. 83 7D C4 00 75 12
+
+. 0 804A6DB 13
+. 83 EC 0C 68 E5 62 05 08 E8 28 DD FF FF
+
+cmpneqss_2 ... ok
+. 0 804A6E8 5
+. 83 C4 10 EB 76
+
+. 0 804A763 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805584A 5
+. E8 1C 4F FF FF
+
+. 0 804A76B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A789 2
+. F3 AB
+
+. 0 804A78B 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A7BD 2
+. F3 AB
+
+. 0 804A7BF 47
+. B8 62 91 6A 43 89 45 C8 B8 5B 91 6A 43 89 45 CC B8 62 91 6A 43 89 45 D0 B8 5B 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 02 DC FF FF
+
+. 0 804A7EE 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 804A7F9 47
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F C2 C8 06 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804A828 6
+. 83 7D BC 00 75 1E
+
+. 0 804A82E 6
+. 83 7D C0 FF 75 18
+
+. 0 804A834 6
+. 83 7D C4 00 75 12
+
+. 0 804A83A 13
+. 83 EC 0C 68 26 63 05 08 E8 C9 DB FF FF
+
+cmpnleps_1 ... ok
+. 0 804A847 5
+. 83 C4 10 EB 76
+
+. 0 804A8C2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805584F 5
+. E8 76 50 FF FF
+
+. 0 804A8CA 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A8E8 2
+. F3 AB
+
+. 0 804A8EA 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804A91C 2
+. F3 AB
+
+. 0 804A91E 47
+. B8 62 91 6A 43 89 45 C8 B8 5B 91 6A 43 89 45 CC B8 62 91 6A 43 89 45 D0 B8 5B 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 A3 DA FF FF
+
+. 0 804A94D 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 804A958 40
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F C2 4D D8 06 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804A980 6
+. 83 7D BC 00 75 1E
+
+. 0 804A986 6
+. 83 7D C0 FF 75 18
+
+. 0 804A98C 6
+. 83 7D C4 00 75 12
+
+. 0 804A992 13
+. 83 EC 0C 68 67 63 05 08 E8 71 DA FF FF
+
+cmpnleps_2 ... ok
+. 0 804A99F 5
+. 83 C4 10 EB 76
+
+. 0 804AA1A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055854 5
+. E8 C9 51 FF FF
+
+. 0 804AA22 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AA40 2
+. F3 AB
+
+. 0 804AA42 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AA5C 2
+. F3 AB
+
+. 0 804AA5E 23
+. B8 2C 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 7B D9 FF FF
+
+. 0 804AA75 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 804AA80 48
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F C2 C8 06 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804AAB0 6
+. 83 7D BC 00 75 1E
+
+. 0 804AAB6 6
+. 83 7D C0 00 75 18
+
+. 0 804AABC 6
+. 83 7D C4 00 75 12
+
+. 0 804AAC2 13
+. 83 EC 0C 68 A8 63 05 08 E8 41 D9 FF FF
+
+cmpnless_1 ... ok
+. 0 804AACF 5
+. 83 C4 10 EB 76
+
+. 0 804AB4A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055859 5
+. E8 F4 52 FF FF
+
+. 0 804AB52 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AB70 2
+. F3 AB
+
+. 0 804AB72 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AB8C 2
+. F3 AB
+
+. 0 804AB8E 23
+. B8 2B 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 4B D8 FF FF
+
+. 0 804ABA5 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 804ABB0 41
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F C2 4D D8 06 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 00 75 24
+
+. 0 804ABD9 6
+. 83 7D BC 00 75 1E
+
+. 0 804ABDF 6
+. 83 7D C0 00 75 18
+
+. 0 804ABE5 6
+. 83 7D C4 00 75 12
+
+. 0 804ABEB 13
+. 83 EC 0C 68 E9 63 05 08 E8 18 D8 FF FF
+
+cmpnless_2 ... ok
+. 0 804ABF8 5
+. 83 C4 10 EB 76
+
+. 0 804AC73 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805585E 5
+. E8 18 54 FF FF
+
+. 0 804AC7B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AC99 2
+. F3 AB
+
+. 0 804AC9B 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804ACCD 2
+. F3 AB
+
+. 0 804ACCF 47
+. B8 62 91 6A 43 89 45 C8 B8 55 91 6A 43 89 45 CC B8 62 91 6A 43 89 45 D0 B8 55 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 F2 D6 FF FF
+
+. 0 804ACFE 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 804AD09 47
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F C2 C8 05 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804AD38 6
+. 83 7D BC 00 75 1E
+
+. 0 804AD3E 6
+. 83 7D C0 FF 75 18
+
+. 0 804AD44 6
+. 83 7D C4 00 75 12
+
+. 0 804AD4A 13
+. 83 EC 0C 68 2A 64 05 08 E8 B9 D6 FF FF
+
+cmpnltps_1 ... ok
+. 0 804AD57 5
+. 83 C4 10 EB 76
+
+. 0 804ADD2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055863 5
+. E8 72 55 FF FF
+
+. 0 804ADDA 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804ADF8 2
+. F3 AB
+
+. 0 804ADFA 52
+. B8 5B 91 6A 43 89 45 D8 B8 5B 91 6A 43 89 45 DC B8 5B 91 6A 43 89 45 E0 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AE2C 2
+. F3 AB
+
+. 0 804AE2E 47
+. B8 62 91 6A 43 89 45 C8 B8 55 91 6A 43 89 45 CC B8 62 91 6A 43 89 45 D0 B8 55 91 6A 43 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 93 D5 FF FF
+
+. 0 804AE5D 11
+. 83 C4 10 85 C0 0F 85 B2 00 00 00
+
+. 0 804AE68 40
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F C2 4D D8 05 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804AE90 6
+. 83 7D BC 00 75 1E
+
+. 0 804AE96 6
+. 83 7D C0 FF 75 18
+
+. 0 804AE9C 6
+. 83 7D C4 00 75 12
+
+. 0 804AEA2 13
+. 83 EC 0C 68 6B 64 05 08 E8 61 D5 FF FF
+
+cmpnltps_2 ... ok
+. 0 804AEAF 5
+. 83 C4 10 EB 76
+
+. 0 804AF2A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055868 5
+. E8 C5 56 FF FF
+
+. 0 804AF32 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AF50 2
+. F3 AB
+
+. 0 804AF52 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804AF6C 2
+. F3 AB
+
+. 0 804AF6E 23
+. B8 2C 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 6B D4 FF FF
+
+. 0 804AF85 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 804AF90 48
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F C2 C8 05 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 FF 75 24
+
+. 0 804AFC0 6
+. 83 7D BC 00 75 1E
+
+. 0 804AFC6 6
+. 83 7D C0 00 75 18
+
+. 0 804AFCC 6
+. 83 7D C4 00 75 12
+
+. 0 804AFD2 13
+. 83 EC 0C 68 AC 64 05 08 E8 31 D4 FF FF
+
+cmpnltss_1 ... ok
+. 0 804AFDF 5
+. 83 C4 10 EB 76
+
+. 0 804B05A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805586D 5
+. E8 F0 57 FF FF
+
+. 0 804B062 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B080 2
+. F3 AB
+
+. 0 804B082 28
+. B8 2B 52 9A 44 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B09C 2
+. F3 AB
+
+. 0 804B09E 23
+. B8 2A 52 9A 44 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 3B D3 FF FF
+
+. 0 804B0B5 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 804B0C0 41
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F C2 4D D8 05 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 7D B8 00 75 24
+
+. 0 804B0E9 6
+. 83 7D BC 00 75 1E
+
+. 0 804B0EF 6
+. 83 7D C0 00 75 18
+
+. 0 804B0F5 6
+. 83 7D C4 00 75 12
+
+. 0 804B0FB 13
+. 83 EC 0C 68 ED 64 05 08 E8 08 D3 FF FF
+
+cmpnltss_2 ... ok
+. 0 804B108 5
+. 83 C4 10 EB 76
+
+. 0 804B183 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055872 5
+. E8 14 59 FF FF
+
+. 0 804B18B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B1A9 2
+. F3 AB
+
+. 0 804B1AB 28
+. B8 5B 91 6A 43 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B1C5 2
+. F3 AB
+
+. 0 804B1C7 23
+. B8 62 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 12 D2 FF FF
+
+. 0 804B1DE 11
+. 83 C4 10 85 C0 0F 85 81 00 00 00
+
+. 0 804B1E9 64
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D5 08 00 00 9D 0F 2F C8 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 85 C0 75 12
+
+. 0 804B229 13
+. 83 EC 0C 68 2E 65 05 08 E8 DA D1 FF FF
+
+comiss_1 ... ok
+. 0 804B236 5
+. 83 C4 10 EB 3F
+
+. 0 804B27A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055877 5
+. E8 06 5A FF FF
+
+. 0 804B282 47
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B2AF 2
+. F3 AB
+
+. 0 804B2B1 23
+. B8 62 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 28 D1 FF FF
+
+. 0 804B2C8 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804B2CF 57
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D5 08 00 00 9D 0F 2F 45 E4 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 85 C0 75 12
+
+. 0 804B308 13
+. 83 EC 0C 68 94 65 05 08 E8 FB D0 FF FF
+
+comiss_2 ... ok
+. 0 804B315 5
+. 83 C4 10 EB 3F
+
+. 0 804B359 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805587C 5
+. E8 E0 5A FF FF
+
+. 0 804B361 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B37F 2
+. F3 AB
+
+. 0 804B381 28
+. B8 5B 91 6A 43 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B39B 2
+. F3 AB
+
+. 0 804B39D 23
+. B8 55 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 3C D0 FF FF
+
+. 0 804B3B4 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804B3BF 65
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D4 08 00 00 9D 0F 2F C8 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 01 75 12
+
+. 0 804B400 13
+. 83 EC 0C 68 CF 65 05 08 E8 03 D0 FF FF
+
+comiss_3 ... ok
+. 0 804B40D 5
+. 83 C4 10 EB 3F
+
+. 0 804B451 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055881 5
+. E8 D3 5B FF FF
+
+. 0 804B459 47
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B486 2
+. F3 AB
+
+. 0 804B488 23
+. B8 55 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 51 CF FF FF
+
+. 0 804B49F 7
+. 83 C4 10 85 C0 75 7B
+
+. 0 804B4A6 58
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D4 08 00 00 9D 0F 2F 45 E4 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 01 75 12
+
+. 0 804B4E0 13
+. 83 EC 0C 68 0A 66 05 08 E8 23 CF FF FF
+
+comiss_4 ... ok
+. 0 804B4ED 5
+. 83 C4 10 EB 3F
+
+. 0 804B531 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055886 5
+. E8 AE 5C FF FF
+
+. 0 804B539 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B557 2
+. F3 AB
+
+. 0 804B559 28
+. B8 5B 91 6A 43 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B573 2
+. F3 AB
+
+. 0 804B575 23
+. B8 5B 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 64 CE FF FF
+
+. 0 804B58C 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 804B597 65
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 9C 81 24 24 2A F7 FF FF 81 0C 24 95 08 00 00 9D 0F 2F C8 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 40 75 12
+
+. 0 804B5D8 13
+. 83 EC 0C 68 45 66 05 08 E8 2B CE FF FF
+
+comiss_5 ... ok
+. 0 804B5E5 5
+. 83 C4 10 EB 3F
+
+. 0 804B629 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805588B 5
+. E8 A1 5D FF FF
+
+. 0 804B631 47
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B65E 2
+. F3 AB
+
+. 0 804B660 23
+. B8 5B 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 79 CD FF FF
+
+. 0 804B677 7
+. 83 C4 10 85 C0 75 7B
+
+. 0 804B67E 58
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 9C 81 24 24 2A F7 FF FF 81 0C 24 95 08 00 00 9D 0F 2F 45 E4 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 40 75 12
+
+. 0 804B6B8 13
+. 83 EC 0C 68 80 66 05 08 E8 4B CD FF FF
+
+comiss_6 ... ok
+. 0 804B6C5 5
+. 83 C4 10 EB 3F
+
+. 0 804B709 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055890 5
+. E8 7C 5E FF FF
+
+. 0 804B711 60
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 D2 04 00 00 C7 45 E4 2E 16 00 00 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B74B 2
+. F3 AB
+
+. 0 804B74D 47
+. B8 CD CC 8C 3F 89 45 C8 B8 CD CC 0C 40 89 45 CC B8 33 33 53 40 89 45 D0 B8 CD CC 8C 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 74 CC FF FF
+
+. 0 804B77C 11
+. 83 C4 10 85 C0 0F 85 43 01 00 00
+
+. 0 804B787 53
+. 9B DD B5 48 FF FF FF 0F 6F 45 E0 0F 12 45 C8 0F 16 45 D0 0F 2A C0 0F 13 45 B8 0F 17 45 C0 DD A5 48 FF FF FF 83 EC 08 B8 00 40 9A 44 50 FF 75 B8 E8 45 CD FF FF
+
+. 0 804B7BC 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804B7C3 17
+. 83 EC 08 B8 00 70 B1 45 50 FF 75 BC E8 2D CD FF FF
+
+. 0 804B7D4 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804B7DB 17
+. 83 EC 08 B8 33 33 53 40 50 FF 75 C0 E8 15 CD FF FF
+
+. 0 804B7EC 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804B7F3 17
+. 83 EC 08 B8 CD CC 8C 40 50 FF 75 C4 E8 FD CC FF FF
+
+. 0 804B804 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804B80B 13
+. 83 EC 0C 68 BB 66 05 08 E8 F8 CB FF FF
+
+cvtpi2ps_1 ... ok
+. 0 804B818 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804B8DA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055895 5
+. E8 48 60 FF FF
+
+. 0 804B8E2 60
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 D2 04 00 00 C7 45 E4 2E 16 00 00 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804B91C 2
+. F3 AB
+
+. 0 804B91E 47
+. B8 CD CC 8C 3F 89 45 C8 B8 CD CC 0C 40 89 45 CC B8 33 33 53 40 89 45 D0 B8 CD CC 8C 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 A3 CA FF FF
+
+. 0 804B94D 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804B958 50
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 0F 2A 45 E0 0F 13 45 B8 0F 17 45 C0 DD A5 48 FF FF FF 83 EC 08 B8 00 40 9A 44 50 FF 75 B8 E8 77 CB FF FF
+
+. 0 804B98A 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804B991 17
+. 83 EC 08 B8 00 70 B1 45 50 FF 75 BC E8 5F CB FF FF
+
+. 0 804B9A2 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804B9A9 17
+. 83 EC 08 B8 33 33 53 40 50 FF 75 C0 E8 47 CB FF FF
+
+. 0 804B9BA 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804B9C1 17
+. 83 EC 08 B8 CD CC 8C 40 50 FF 75 C4 E8 2F CB FF FF
+
+. 0 804B9D2 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804B9D9 13
+. 83 EC 0C 68 FC 66 05 08 E8 2A CA FF FF
+
+cvtpi2ps_2 ... ok
+. 0 804B9E6 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804BAA8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805589A 5
+. E8 11 62 FF FF
+
+. 0 804BAB0 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804BACE 2
+. F3 AB
+
+. 0 804BAD0 75
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 7B 14 8E 3F 89 45 E0 B8 7B 14 0E 40 89 45 E4 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 C7 45 D0 01 00 00 00 C7 45 D4 02 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 D5 C8 FF FF
+
+. 0 804BB1B 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804BB22 38
+. 9B DD B5 58 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 6F 45 D0 0F 2D C0 0F 7F 45 C8 DD A5 58 FF FF FF 83 7D C8 0C 75 18
+
+. 0 804BB48 6
+. 83 7D CC 39 75 12
+
+. 0 804BB4E 13
+. 83 EC 0C 68 3D 67 05 08 E8 B5 C8 FF FF
+
+cvtps2pi_1 ... ok
+. 0 804BB5B 5
+. 83 C4 10 EB 4C
+
+. 0 804BBAC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805589F 5
+. E8 10 63 FF FF
+
+. 0 804BBB4 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804BBD2 2
+. F3 AB
+
+. 0 804BBD4 75
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 7B 14 8E 3F 89 45 E0 B8 7B 14 0E 40 89 45 E4 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 C7 45 D0 01 00 00 00 C7 45 D4 02 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 D1 C7 FF FF
+
+. 0 804BC1F 7
+. 83 C4 10 85 C0 75 73
+
+. 0 804BC26 31
+. 9B DD B5 58 FF FF FF 0F 6F 45 D0 0F 2D 45 D8 0F 7F 45 C8 DD A5 58 FF FF FF 83 7D C8 0C 75 18
+
+. 0 804BC45 6
+. 83 7D CC 39 75 12
+
+. 0 804BC4B 13
+. 83 EC 0C 68 CD 67 05 08 E8 B8 C7 FF FF
+
+cvtps2pi_2 ... ok
+. 0 804BC58 5
+. 83 C4 10 EB 4C
+
+. 0 804BCA9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558A4 5
+. E8 08 64 FF FF
+
+. 0 804BCB1 46
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 0C 00 00 00 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804BCDD 2
+. F3 AB
+
+. 0 804BCDF 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 E2 C6 FF FF
+
+. 0 804BD0E 11
+. 83 C4 10 85 C0 0F 85 43 01 00 00
+
+. 0 804BD19 53
+. 9B DD B5 48 FF FF FF 8B 45 E4 0F 12 45 C8 0F 16 45 D0 F3 0F 2A C0 0F 13 45 B8 0F 17 45 C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 40 41 50 FF 75 B8 E8 B3 C7 FF FF
+
+. 0 804BD4E 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804BD55 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 9B C7 FF FF
+
+. 0 804BD66 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804BD6D 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 83 C7 FF FF
+
+. 0 804BD7E 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804BD85 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 6B C7 FF FF
+
+. 0 804BD96 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804BD9D 13
+. 83 EC 0C 68 0E 68 05 08 E8 66 C6 FF FF
+
+cvtsi2ss_1 ... ok
+. 0 804BDAA 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804BE6C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558A9 5
+. E8 C6 65 FF FF
+
+. 0 804BE74 46
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 0C 00 00 00 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804BEA0 2
+. F3 AB
+
+. 0 804BEA2 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 1F C5 FF FF
+
+. 0 804BED1 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 804BEDC 51
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 F3 0F 2A 45 E4 0F 13 45 B8 0F 17 45 C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 40 41 50 FF 75 B8 E8 F2 C5 FF FF
+
+. 0 804BF0F 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804BF16 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 DA C5 FF FF
+
+. 0 804BF27 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804BF2E 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 C2 C5 FF FF
+
+. 0 804BF3F 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804BF46 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 AA C5 FF FF
+
+. 0 804BF57 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804BF5E 13
+. 83 EC 0C 68 4F 68 05 08 E8 A5 C4 FF FF
+
+cvtsi2ss_2 ... ok
+. 0 804BF6B 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804C02D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558AE 5
+. E8 82 67 FF FF
+
+. 0 804C035 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C053 2
+. F3 AB
+
+. 0 804C055 61
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 C7 45 D4 00 00 00 00 C7 45 D4 63 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 5E C3 FF FF
+
+. 0 804C092 7
+. 83 C4 10 85 C0 75 5E
+
+. 0 804C099 37
+. 9B DD B5 58 FF FF FF 0F 12 45 D8 0F 16 45 E0 8B 45 D4 F3 0F 2D C0 89 45 D0 DD A5 58 FF FF FF 83 7D D0 0C 75 12
+
+. 0 804C0BE 13
+. 83 EC 0C 68 90 68 05 08 E8 45 C3 FF FF
+
+cvtss2si_1 ... ok
+. 0 804C0CB 5
+. 83 C4 10 EB 37
+
+. 0 804C107 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558B3 5
+. E8 57 68 FF FF
+
+. 0 804C10F 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C12D 2
+. F3 AB
+
+. 0 804C12F 61
+. B8 B8 1E 63 42 89 45 D8 B8 A4 70 45 41 89 45 DC B8 CD 4C AF 42 89 45 E0 B8 0A D7 2C 42 89 45 E4 C7 45 D4 00 00 00 00 C7 45 D4 63 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 84 C2 FF FF
+
+. 0 804C16C 7
+. 83 C4 10 85 C0 75 57
+
+. 0 804C173 30
+. 9B DD B5 58 FF FF FF 8B 45 D4 F3 0F 2D 45 D8 89 45 D0 DD A5 58 FF FF FF 83 7D D0 39 75 12
+
+. 0 804C191 13
+. 83 EC 0C 68 D1 68 05 08 E8 72 C2 FF FF
+
+cvtss2si_2 ... ok
+. 0 804C19E 5
+. 83 C4 10 EB 37
+
+. 0 804C1DA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558B8 5
+. E8 25 69 FF FF
+
+. 0 804C1E2 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C200 2
+. F3 AB
+
+. 0 804C202 75
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 7B 14 8E 3F 89 45 E0 B8 7B 14 0E 40 89 45 E4 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 C7 45 D0 01 00 00 00 C7 45 D4 02 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 A3 C1 FF FF
+
+. 0 804C24D 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 804C254 38
+. 9B DD B5 58 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 6F 45 D0 0F 2C C0 0F 7F 45 C8 DD A5 58 FF FF FF 83 7D C8 0C 75 18
+
+. 0 804C27A 6
+. 83 7D CC 38 75 12
+
+. 0 804C280 13
+. 83 EC 0C 68 12 69 05 08 E8 83 C1 FF FF
+
+cvttps2pi_1 ... ok
+. 0 804C28D 5
+. 83 C4 10 EB 4C
+
+. 0 804C2DE 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558BD 5
+. E8 24 6A FF FF
+
+. 0 804C2E6 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C304 2
+. F3 AB
+
+. 0 804C306 75
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 7B 14 8E 3F 89 45 E0 B8 7B 14 0E 40 89 45 E4 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 C7 45 D0 01 00 00 00 C7 45 D4 02 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 9F C0 FF FF
+
+. 0 804C351 7
+. 83 C4 10 85 C0 75 73
+
+. 0 804C358 31
+. 9B DD B5 58 FF FF FF 0F 6F 45 D0 0F 2C 45 D8 0F 7F 45 C8 DD A5 58 FF FF FF 83 7D C8 0C 75 18
+
+. 0 804C377 6
+. 83 7D CC 38 75 12
+
+. 0 804C37D 13
+. 83 EC 0C 68 56 69 05 08 E8 86 C0 FF FF
+
+cvttps2pi_2 ... ok
+. 0 804C38A 5
+. 83 C4 10 EB 4C
+
+. 0 804C3DB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558C2 5
+. E8 1C 6B FF FF
+
+. 0 804C3E3 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C401 2
+. F3 AB
+
+. 0 804C403 61
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 C7 45 D4 00 00 00 00 C7 45 D4 63 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 B0 BF FF FF
+
+. 0 804C440 7
+. 83 C4 10 85 C0 75 5E
+
+. 0 804C447 37
+. 9B DD B5 58 FF FF FF 0F 12 45 D8 0F 16 45 E0 8B 45 D4 F3 0F 2C C0 89 45 D0 DD A5 58 FF FF FF 83 7D D0 0C 75 12
+
+. 0 804C46C 13
+. 83 EC 0C 68 9A 69 05 08 E8 97 BF FF FF
+
+cvttss2si_1 ... ok
+. 0 804C479 5
+. 83 C4 10 EB 37
+
+. 0 804C4B5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558C7 5
+. E8 F1 6B FF FF
+
+. 0 804C4BD 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C4DB 2
+. F3 AB
+
+. 0 804C4DD 61
+. B8 B8 1E 63 42 89 45 D8 B8 A4 70 45 41 89 45 DC B8 CD 4C AF 42 89 45 E0 B8 0A D7 2C 42 89 45 E4 C7 45 D4 00 00 00 00 C7 45 D4 63 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 D6 BE FF FF
+
+. 0 804C51A 7
+. 83 C4 10 85 C0 75 57
+
+. 0 804C521 30
+. 9B DD B5 58 FF FF FF 8B 45 D4 F3 0F 2C 45 D8 89 45 D0 DD A5 58 FF FF FF 83 7D D0 38 75 12
+
+. 0 804C53F 13
+. 83 EC 0C 68 DE 69 05 08 E8 C4 BE FF FF
+
+cvttss2si_2 ... ok
+. 0 804C54C 5
+. 83 C4 10 EB 37
+
+. 0 804C588 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558CC 5
+. E8 BF 6C FF FF
+
+. 0 804C590 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C5AE 2
+. F3 AB
+
+. 0 804C5B0 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 40 40 89 45 DC B8 00 00 80 40 89 45 E0 B8 00 00 A0 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C5E2 2
+. F3 AB
+
+. 0 804C5E4 47
+. B8 A4 70 C5 41 89 45 C8 B8 F6 28 6C 40 89 45 CC B8 85 EB 41 42 89 45 D0 B8 33 33 5E 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 DD BD FF FF
+
+. 0 804C613 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804C61E 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 5E C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 AA BE FF FF
+
+. 0 804C657 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804C65E 17
+. 83 EC 08 B8 A4 70 9D 3F 50 FF 75 BC E8 92 BE FF FF
+
+. 0 804C66F 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804C676 17
+. 83 EC 08 B8 85 EB 41 41 50 FF 75 C0 E8 7A BE FF FF
+
+. 0 804C687 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804C68E 17
+. 83 EC 08 B8 8F C2 31 41 50 FF 75 C4 E8 62 BE FF FF
+
+. 0 804C69F 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804C6A6 13
+. 83 EC 0C 68 22 6A 05 08 E8 5D BD FF FF
+
+divps_1 ... ok
+. 0 804C6B3 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804C775 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558D1 5
+. E8 A7 6E FF FF
+
+. 0 804C77D 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C79B 2
+. F3 AB
+
+. 0 804C79D 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 40 40 89 45 DC B8 00 00 80 40 89 45 E0 B8 00 00 A0 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C7CF 2
+. F3 AB
+
+. 0 804C7D1 47
+. B8 A4 70 C5 41 89 45 C8 B8 F6 28 6C 40 89 45 CC B8 85 EB 41 42 89 45 D0 B8 33 33 5E 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 F0 BB FF FF
+
+. 0 804C800 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804C80B 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 5E 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 C4 BC FF FF
+
+. 0 804C83D 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804C844 17
+. 83 EC 08 B8 A4 70 9D 3F 50 FF 75 BC E8 AC BC FF FF
+
+. 0 804C855 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804C85C 17
+. 83 EC 08 B8 85 EB 41 41 50 FF 75 C0 E8 94 BC FF FF
+
+. 0 804C86D 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804C874 17
+. 83 EC 08 B8 8F C2 31 41 50 FF 75 C4 E8 7C BC FF FF
+
+. 0 804C885 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804C88C 13
+. 83 EC 0C 68 5A 6A 05 08 E8 77 BB FF FF
+
+divps_2 ... ok
+. 0 804C899 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804C95B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558D6 5
+. E8 88 70 FF FF
+
+. 0 804C963 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C981 2
+. F3 AB
+
+. 0 804C983 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 40 40 89 45 DC B8 00 00 80 40 89 45 E0 B8 00 00 A0 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804C9B5 2
+. F3 AB
+
+. 0 804C9B7 47
+. B8 A4 70 C5 41 89 45 C8 B8 F6 28 6C 40 89 45 CC B8 85 EB 41 42 89 45 D0 B8 33 33 5E 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 0A BA FF FF
+
+. 0 804C9E6 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 804C9F1 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 5E C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 D6 BA FF FF
+
+. 0 804CA2B 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804CA32 17
+. 83 EC 08 B8 F6 28 6C 40 50 FF 75 BC E8 BE BA FF FF
+
+. 0 804CA43 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804CA4A 17
+. 83 EC 08 B8 85 EB 41 42 50 FF 75 C0 E8 A6 BA FF FF
+
+. 0 804CA5B 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804CA62 17
+. 83 EC 08 B8 33 33 5E 42 50 FF 75 C4 E8 8E BA FF FF
+
+. 0 804CA73 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804CA7A 13
+. 83 EC 0C 68 92 6A 05 08 E8 89 B9 FF FF
+
+divss_1 ... ok
+. 0 804CA87 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804CB49 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558DB 5
+. E8 71 72 FF FF
+
+. 0 804CB51 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804CB6F 2
+. F3 AB
+
+. 0 804CB71 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 40 40 89 45 DC B8 00 00 80 40 89 45 E0 B8 00 00 A0 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804CBA3 2
+. F3 AB
+
+. 0 804CBA5 47
+. B8 A4 70 C5 41 89 45 C8 B8 F6 28 6C 40 89 45 CC B8 85 EB 41 42 89 45 D0 B8 33 33 5E 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 1C B8 FF FF
+
+. 0 804CBD4 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 804CBDF 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 5E 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 EF B8 FF FF
+
+. 0 804CC12 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804CC19 17
+. 83 EC 08 B8 F6 28 6C 40 50 FF 75 BC E8 D7 B8 FF FF
+
+. 0 804CC2A 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804CC31 17
+. 83 EC 08 B8 85 EB 41 42 50 FF 75 C0 E8 BF B8 FF FF
+
+. 0 804CC42 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804CC49 17
+. 83 EC 08 B8 33 33 5E 42 50 FF 75 C4 E8 A7 B8 FF FF
+
+. 0 804CC5A 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804CC61 13
+. 83 EC 0C 68 CA 6A 05 08 E8 A2 B7 FF FF
+
+divss_2 ... ok
+. 0 804CC6E 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804CD30 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558E0 5
+. E8 53 74 FF FF
+
+. 0 804CD38 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804CD56 2
+. F3 AB
+
+. 0 804CD58 52
+. B8 7B 14 0E 40 89 45 D8 B8 7B 14 8E 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 7B 14 0E 41 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804CD8A 2
+. F3 AB
+
+. 0 804CD8C 47
+. B8 D7 A3 F8 40 89 45 C8 B8 9A 99 B1 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 3F 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 35 B6 FF FF
+
+. 0 804CDBB 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804CDC6 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 5F C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 D7 A3 F8 40 50 FF 75 B8 E8 02 B7 FF FF
+
+. 0 804CDFF 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804CE06 17
+. 83 EC 08 B8 9A 99 B1 40 50 FF 75 BC E8 EA B6 FF FF
+
+. 0 804CE17 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804CE1E 17
+. 83 EC 08 B8 B8 1E D5 40 50 FF 75 C0 E8 D2 B6 FF FF
+
+. 0 804CE2F 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804CE36 17
+. 83 EC 08 B8 7B 14 0E 41 50 FF 75 C4 E8 BA B6 FF FF
+
+. 0 804CE47 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804CE4E 13
+. 83 EC 0C 68 02 6B 05 08 E8 B5 B5 FF FF
+
+maxps_1 ... ok
+. 0 804CE5B 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804CF1D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558E5 5
+. E8 3B 76 FF FF
+
+. 0 804CF25 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804CF43 2
+. F3 AB
+
+. 0 804CF45 52
+. B8 7B 14 0E 40 89 45 D8 B8 7B 14 8E 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 7B 14 0E 41 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804CF77 2
+. F3 AB
+
+. 0 804CF79 47
+. B8 D7 A3 F8 40 89 45 C8 B8 9A 99 B1 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 3F 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 48 B4 FF FF
+
+. 0 804CFA8 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804CFB3 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 5F 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 D7 A3 F8 40 50 FF 75 B8 E8 1C B5 FF FF
+
+. 0 804CFE5 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804CFEC 17
+. 83 EC 08 B8 9A 99 B1 40 50 FF 75 BC E8 04 B5 FF FF
+
+. 0 804CFFD 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804D004 17
+. 83 EC 08 B8 B8 1E D5 40 50 FF 75 C0 E8 EC B4 FF FF
+
+. 0 804D015 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804D01C 17
+. 83 EC 08 B8 7B 14 0E 41 50 FF 75 C4 E8 D4 B4 FF FF
+
+. 0 804D02D 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804D034 13
+. 83 EC 0C 68 3A 6B 05 08 E8 CF B3 FF FF
+
+maxps_2 ... ok
+. 0 804D041 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804D103 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558EA 5
+. E8 1C 78 FF FF
+
+. 0 804D10B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D129 2
+. F3 AB
+
+. 0 804D12B 52
+. B8 7B 14 0E 40 89 45 D8 B8 7B 14 8E 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 7B 14 0E 41 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D15D 2
+. F3 AB
+
+. 0 804D15F 47
+. B8 D7 A3 F8 40 89 45 C8 B8 9A 99 B1 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 3F 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 62 B2 FF FF
+
+. 0 804D18E 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 804D199 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 5F C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 D7 A3 F8 40 50 FF 75 B8 E8 2E B3 FF FF
+
+. 0 804D1D3 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804D1DA 17
+. 83 EC 08 B8 9A 99 B1 40 50 FF 75 BC E8 16 B3 FF FF
+
+. 0 804D1EB 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804D1F2 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 FE B2 FF FF
+
+. 0 804D203 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804D20A 17
+. 83 EC 08 B8 7B 14 8E 3F 50 FF 75 C4 E8 E6 B2 FF FF
+
+. 0 804D21B 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804D222 13
+. 83 EC 0C 68 72 6B 05 08 E8 E1 B1 FF FF
+
+maxss_1 ... ok
+. 0 804D22F 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804D2F1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558EF 5
+. E8 05 7A FF FF
+
+. 0 804D2F9 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D317 2
+. F3 AB
+
+. 0 804D319 52
+. B8 7B 14 0E 41 89 45 D8 B8 B8 1E D5 40 89 45 DC B8 7B 14 8E 40 89 45 E0 B8 7B 14 0E 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D34B 2
+. F3 AB
+
+. 0 804D34D 47
+. B8 7B 14 8E 3F 89 45 C8 B8 B8 1E 55 40 89 45 CC B8 9A 99 B1 40 89 45 D0 B8 D7 A3 F8 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 74 B0 FF FF
+
+. 0 804D37C 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 804D387 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 5F 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 7B 14 0E 41 50 FF 75 B8 E8 47 B1 FF FF
+
+. 0 804D3BA 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804D3C1 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 BC E8 2F B1 FF FF
+
+. 0 804D3D2 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804D3D9 17
+. 83 EC 08 B8 9A 99 B1 40 50 FF 75 C0 E8 17 B1 FF FF
+
+. 0 804D3EA 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804D3F1 17
+. 83 EC 08 B8 D7 A3 F8 40 50 FF 75 C4 E8 FF B0 FF FF
+
+. 0 804D402 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804D409 13
+. 83 EC 0C 68 AA 6B 05 08 E8 FA AF FF FF
+
+maxss_2 ... ok
+. 0 804D416 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804D4D8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558F4 5
+. E8 E7 7B FF FF
+
+. 0 804D4E0 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D4FE 2
+. F3 AB
+
+. 0 804D500 52
+. B8 7B 14 0E 40 89 45 D8 B8 7B 14 8E 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 7B 14 0E 41 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D532 2
+. F3 AB
+
+. 0 804D534 47
+. B8 D7 A3 F8 40 89 45 C8 B8 9A 99 B1 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 3F 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 8D AE FF FF
+
+. 0 804D563 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804D56E 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 5D C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 7B 14 0E 40 50 FF 75 B8 E8 5A AF FF FF
+
+. 0 804D5A7 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804D5AE 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 BC E8 42 AF FF FF
+
+. 0 804D5BF 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804D5C6 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 2A AF FF FF
+
+. 0 804D5D7 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804D5DE 17
+. 83 EC 08 B8 7B 14 8E 3F 50 FF 75 C4 E8 12 AF FF FF
+
+. 0 804D5EF 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804D5F6 13
+. 83 EC 0C 68 E2 6B 05 08 E8 0D AE FF FF
+
+minps_1 ... ok
+. 0 804D603 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804D6C5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558F9 5
+. E8 CF 7D FF FF
+
+. 0 804D6CD 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D6EB 2
+. F3 AB
+
+. 0 804D6ED 52
+. B8 7B 14 0E 40 89 45 D8 B8 7B 14 8E 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 7B 14 0E 41 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D71F 2
+. F3 AB
+
+. 0 804D721 47
+. B8 D7 A3 F8 40 89 45 C8 B8 9A 99 B1 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 3F 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 A0 AC FF FF
+
+. 0 804D750 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804D75B 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 5D 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 7B 14 0E 40 50 FF 75 B8 E8 74 AD FF FF
+
+. 0 804D78D 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804D794 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 BC E8 5C AD FF FF
+
+. 0 804D7A5 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804D7AC 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 44 AD FF FF
+
+. 0 804D7BD 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804D7C4 17
+. 83 EC 08 B8 7B 14 8E 3F 50 FF 75 C4 E8 2C AD FF FF
+
+. 0 804D7D5 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804D7DC 13
+. 83 EC 0C 68 1A 6C 05 08 E8 27 AC FF FF
+
+minps_2 ... ok
+. 0 804D7E9 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804D8AB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80558FE 5
+. E8 B0 7F FF FF
+
+. 0 804D8B3 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D8D1 2
+. F3 AB
+
+. 0 804D8D3 52
+. B8 7B 14 0E 40 89 45 D8 B8 7B 14 8E 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 7B 14 0E 41 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804D905 2
+. F3 AB
+
+. 0 804D907 47
+. B8 D7 A3 F8 40 89 45 C8 B8 9A 99 B1 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 3F 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 BA AA FF FF
+
+. 0 804D936 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 804D941 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 5D C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 7B 14 0E 40 50 FF 75 B8 E8 86 AB FF FF
+
+. 0 804D97B 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804D982 17
+. 83 EC 08 B8 9A 99 B1 40 50 FF 75 BC E8 6E AB FF FF
+
+. 0 804D993 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804D99A 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 56 AB FF FF
+
+. 0 804D9AB 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804D9B2 17
+. 83 EC 08 B8 7B 14 8E 3F 50 FF 75 C4 E8 3E AB FF FF
+
+. 0 804D9C3 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804D9CA 13
+. 83 EC 0C 68 52 6C 05 08 E8 39 AA FF FF
+
+minss_1 ... ok
+. 0 804D9D7 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804DA99 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055903 5
+. E8 99 81 FF FF
+
+. 0 804DAA1 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804DABF 2
+. F3 AB
+
+. 0 804DAC1 52
+. B8 7B 14 0E 41 89 45 D8 B8 B8 1E D5 40 89 45 DC B8 7B 14 8E 40 89 45 E0 B8 7B 14 0E 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804DAF3 2
+. F3 AB
+
+. 0 804DAF5 47
+. B8 7B 14 8E 3F 89 45 C8 B8 B8 1E 55 40 89 45 CC B8 9A 99 B1 40 89 45 D0 B8 D7 A3 F8 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 CC A8 FF FF
+
+. 0 804DB24 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 804DB2F 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 5D 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 7B 14 8E 3F 50 FF 75 B8 E8 9F A9 FF FF
+
+. 0 804DB62 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804DB69 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 BC E8 87 A9 FF FF
+
+. 0 804DB7A 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804DB81 17
+. 83 EC 08 B8 9A 99 B1 40 50 FF 75 C0 E8 6F A9 FF FF
+
+. 0 804DB92 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804DB99 17
+. 83 EC 08 B8 D7 A3 F8 40 50 FF 75 C4 E8 57 A9 FF FF
+
+. 0 804DBAA 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804DBB1 13
+. 83 EC 0C 68 8A 6C 05 08 E8 52 A8 FF FF
+
+minss_2 ... ok
+. 0 804DBBE 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804DC80 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055908 5
+. E8 7B 83 FF FF
+
+. 0 804DC88 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804DCA6 2
+. F3 AB
+
+. 0 804DCA8 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804DCDA 2
+. F3 AB
+
+. 0 804DCDC 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 E5 A6 FF FF
+
+. 0 804DD0B 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804DD16 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 28 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 B2 A7 FF FF
+
+. 0 804DD4F 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804DD56 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 BC E8 9A A7 FF FF
+
+. 0 804DD67 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804DD6E 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 C0 E8 82 A7 FF FF
+
+. 0 804DD7F 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804DD86 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 6A A7 FF FF
+
+. 0 804DD97 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804DD9E 13
+. 83 EC 0C 68 C2 6C 05 08 E8 65 A6 FF FF
+
+movaps_1 ... ok
+. 0 804DDAB 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804DE6D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805590D 5
+. E8 63 85 FF FF
+
+. 0 804DE75 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804DE93 2
+. F3 AB
+
+. 0 804DE95 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804DEC7 2
+. F3 AB
+
+. 0 804DEC9 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 F8 A4 FF FF
+
+. 0 804DEF8 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804DF03 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 28 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 CC A5 FF FF
+
+. 0 804DF35 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804DF3C 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 BC E8 B4 A5 FF FF
+
+. 0 804DF4D 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804DF54 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 C0 E8 9C A5 FF FF
+
+. 0 804DF65 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804DF6C 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 84 A5 FF FF
+
+. 0 804DF7D 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804DF84 13
+. 83 EC 0C 68 FD 6C 05 08 E8 7F A4 FF FF
+
+movaps_2 ... ok
+. 0 804DF91 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804E053 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055912 5
+. E8 44 87 FF FF
+
+. 0 804E05B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E079 2
+. F3 AB
+
+. 0 804E07B 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E0AD 2
+. F3 AB
+
+. 0 804E0AF 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 12 A3 FF FF
+
+. 0 804E0DE 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804E0E9 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 12 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 0A D7 2C 42 50 FF 75 B8 E8 DF A3 FF FF
+
+. 0 804E122 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804E129 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 BC E8 C7 A3 FF FF
+
+. 0 804E13A 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804E141 17
+. 83 EC 08 B8 EC 51 05 42 50 FF 75 C0 E8 AF A3 FF FF
+
+. 0 804E152 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804E159 17
+. 83 EC 08 B8 8F C2 31 42 50 FF 75 C4 E8 97 A3 FF FF
+
+. 0 804E16A 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804E171 13
+. 83 EC 0C 68 38 6D 05 08 E8 92 A2 FF FF
+
+movhlps_1 ... ok
+. 0 804E17E 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804E240 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055917 5
+. E8 2C 89 FF FF
+
+. 0 804E248 62
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 A4 70 45 41 89 45 E0 B8 B8 1E 63 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E284 2
+. F3 AB
+
+. 0 804E286 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 3B A1 FF FF
+
+. 0 804E2B5 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804E2C0 50
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 0F 16 45 E0 0F 13 45 B8 0F 17 45 C0 DD A5 48 FF FF FF 83 EC 08 B8 8F C2 31 41 50 FF 75 B8 E8 0F A2 FF FF
+
+. 0 804E2F2 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804E2F9 17
+. 83 EC 08 B8 8F C2 B1 41 50 FF 75 BC E8 F7 A1 FF FF
+
+. 0 804E30A 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804E311 17
+. 83 EC 08 B8 A4 70 45 41 50 FF 75 C0 E8 DF A1 FF FF
+
+. 0 804E322 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804E329 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 C4 E8 C7 A1 FF FF
+
+. 0 804E33A 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804E341 13
+. 83 EC 0C 68 76 6D 05 08 E8 C2 A0 FF FF
+
+movhps_1 ... ok
+. 0 804E34E 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804E410 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805591C 5
+. E8 F7 8A FF FF
+
+. 0 804E418 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E436 2
+. F3 AB
+
+. 0 804E438 89
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 B8 8F C2 31 41 89 45 D0 B8 8F C2 B1 41 89 45 D4 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 C0 92 05 08 E8 5F 9F FF FF
+
+. 0 804E491 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 804E49C 42
+. 9B DD B5 58 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 17 45 C8 DD A5 58 FF FF FF 83 EC 08 B8 0A D7 2C 42 50 FF 75 C8 E8 3B A0 FF FF
+
+. 0 804E4C6 7
+. 83 C4 10 85 C0 74 2A
+
+. 0 804E4CD 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 CC E8 23 A0 FF FF
+
+. 0 804E4DE 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E4E5 13
+. 83 EC 0C 68 B1 6D 05 08 E8 1E 9F FF FF
+
+movhps_2 ... ok
+. 0 804E4F2 5
+. 83 C4 10 EB 6E
+
+. 0 804E565 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055921 5
+. E8 47 8C FF FF
+
+. 0 804E56D 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E58B 2
+. F3 AB
+
+. 0 804E58D 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E5BF 2
+. F3 AB
+
+. 0 804E5C1 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 00 9E FF FF
+
+. 0 804E5F0 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804E5FB 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 16 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 8F C2 31 41 50 FF 75 B8 E8 CD 9E FF FF
+
+. 0 804E634 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804E63B 17
+. 83 EC 08 B8 8F C2 B1 41 50 FF 75 BC E8 B5 9E FF FF
+
+. 0 804E64C 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804E653 17
+. 83 EC 08 B8 A4 70 45 41 50 FF 75 C0 E8 9D 9E FF FF
+
+. 0 804E664 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804E66B 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 C4 E8 85 9E FF FF
+
+. 0 804E67C 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804E683 13
+. 83 EC 0C 68 EC 6D 05 08 E8 80 9D FF FF
+
+movlhps_1 ... ok
+. 0 804E690 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804E752 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055926 5
+. E8 2F 8E FF FF
+
+. 0 804E75A 62
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 B8 A4 70 45 41 89 45 E0 B8 B8 1E 63 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E796 2
+. F3 AB
+
+. 0 804E798 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 29 9C FF FF
+
+. 0 804E7C7 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804E7D2 50
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 0F 12 45 E0 0F 13 45 B8 0F 17 45 C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 FD 9C FF FF
+
+. 0 804E804 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804E80B 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 BC E8 E5 9C FF FF
+
+. 0 804E81C 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804E823 17
+. 83 EC 08 B8 EC 51 05 42 50 FF 75 C0 E8 CD 9C FF FF
+
+. 0 804E834 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804E83B 17
+. 83 EC 08 B8 8F C2 31 42 50 FF 75 C4 E8 B5 9C FF FF
+
+. 0 804E84C 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804E853 13
+. 83 EC 0C 68 2A 6E 05 08 E8 B0 9B FF FF
+
+movlps_1 ... ok
+. 0 804E860 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804E922 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805592B 5
+. E8 FA 8F FF FF
+
+. 0 804E92A 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804E948 2
+. F3 AB
+
+. 0 804E94A 89
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 B8 8F C2 31 41 89 45 D0 B8 8F C2 B1 41 89 45 D4 8B 45 D0 8B 55 D4 89 45 C8 89 55 CC 83 EC 08 6A 01 68 C0 92 05 08 E8 4D 9A FF FF
+
+. 0 804E9A3 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 804E9AE 42
+. 9B DD B5 58 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 13 45 C8 DD A5 58 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 C8 E8 29 9B FF FF
+
+. 0 804E9D8 7
+. 83 C4 10 85 C0 74 2A
+
+. 0 804E9DF 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 CC E8 11 9B FF FF
+
+. 0 804E9F0 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804E9F7 13
+. 83 EC 0C 68 65 6E 05 08 E8 0C 9A FF FF
+
+movlps_2 ... ok
+. 0 804EA04 5
+. 83 C4 10 EB 6E
+
+. 0 804EA77 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055930 5
+. E8 4A 91 FF FF
+
+. 0 804EA7F 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804EA9D 2
+. F3 AB
+
+. 0 804EA9F 54
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 C2 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF C2 89 45 E4 C7 45 D4 00 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 1B 99 FF FF
+
+. 0 804EAD5 7
+. 83 C4 10 85 C0 75 5D
+
+. 0 804EADC 36
+. 9B DD B5 58 FF FF FF 0F 12 6D D8 0F 16 6D E0 8B 45 D4 0F 50 C5 89 45 D0 DD A5 58 FF FF FF 83 7D D0 0A 75 12
+
+. 0 804EB00 13
+. 83 EC 0C 68 A0 6E 05 08 E8 03 99 FF FF
+
+movmskps_1 ... ok
+. 0 804EB0D 5
+. 83 C4 10 EB 37
+
+. 0 804EB49 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055935 5
+. E8 17 92 FF FF
+
+. 0 804EB51 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804EB6F 2
+. F3 AB
+
+. 0 804EB71 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804EBA3 2
+. F3 AB
+
+. 0 804EBA5 71
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 8B 45 C8 89 45 B8 8B 45 CC 89 45 BC 8B 45 D0 89 45 C0 8B 45 D4 89 45 C4 83 EC 08 6A 01 68 C0 92 05 08 E8 04 98 FF FF
+
+. 0 804EBEC 11
+. 83 C4 10 85 C0 0F 85 38 01 00 00
+
+. 0 804EBF7 42
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 2B 45 B8 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 E0 98 FF FF
+
+. 0 804EC21 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804EC28 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 BC E8 C8 98 FF FF
+
+. 0 804EC39 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804EC40 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 C0 E8 B0 98 FF FF
+
+. 0 804EC51 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804EC58 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 98 98 FF FF
+
+. 0 804EC69 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804EC70 13
+. 83 EC 0C 68 E1 6E 05 08 E8 93 97 FF FF
+
+movntps_1 ... ok
+. 0 804EC7D 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804ED3F 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805593A 5
+. E8 08 94 FF FF
+
+. 0 804ED47 95
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C7 45 D8 34 34 34 34 C7 45 DC 12 12 12 12 8B 45 D8 8B 55 DC 89 45 D0 89 55 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 4A 96 FF FF
+
+. 0 804EDA6 7
+. 83 C4 10 85 C0 75 70
+
+. 0 804EDAD 44
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F E7 45 D0 DD A5 58 FF FF FF 8B 45 D4 35 67 45 23 01 8B 55 D0 81 F2 EF CD AB 89 09 D0 85 C0 75 12
+
+. 0 804EDD9 13
+. 83 EC 0C 68 1F 6F 05 08 E8 2A 96 FF FF
+
+movntq_1 ... ok
+. 0 804EDE6 5
+. 83 C4 10 EB 42
+
+. 0 804EE2D 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805593F 5
+. E8 F1 94 FF FF
+
+. 0 804EE35 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804EE53 2
+. F3 AB
+
+. 0 804EE55 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804EE87 2
+. F3 AB
+
+. 0 804EE89 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 38 95 FF FF
+
+. 0 804EEB8 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 804EEC3 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 10 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 04 96 FF FF
+
+. 0 804EEFD 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804EF04 17
+. 83 EC 08 B8 8F C2 B1 41 50 FF 75 BC E8 EC 95 FF FF
+
+. 0 804EF15 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804EF1C 17
+. 83 EC 08 B8 EC 51 05 42 50 FF 75 C0 E8 D4 95 FF FF
+
+. 0 804EF2D 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804EF34 17
+. 83 EC 08 B8 8F C2 31 42 50 FF 75 C4 E8 BC 95 FF FF
+
+. 0 804EF45 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804EF4C 13
+. 83 EC 0C 68 5A 6F 05 08 E8 B7 94 FF FF
+
+movss_1 ... ok
+. 0 804EF59 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804F01B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055944 5
+. E8 DA 96 FF FF
+
+. 0 804F023 47
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 B8 A4 70 45 41 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F050 2
+. F3 AB
+
+. 0 804F052 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 6F 93 FF FF
+
+. 0 804F081 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 804F08C 51
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 F3 0F 10 45 E4 0F 13 45 B8 0F 17 45 C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 42 94 FF FF
+
+. 0 804F0BF 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804F0C6 17
+. 83 EC 08 B8 00 00 00 00 50 FF 75 BC E8 2A 94 FF FF
+
+. 0 804F0D7 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804F0DE 17
+. 83 EC 08 B8 00 00 00 00 50 FF 75 C0 E8 12 94 FF FF
+
+. 0 804F0EF 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804F0F6 17
+. 83 EC 08 B8 00 00 00 00 50 FF 75 C4 E8 FA 93 FF FF
+
+. 0 804F107 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804F10E 13
+. 83 EC 0C 68 92 6F 05 08 E8 F5 92 FF FF
+
+movss_2 ... ok
+. 0 804F11B 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804F1DD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055949 5
+. E8 97 98 FF FF
+
+. 0 804F1E5 32
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F203 2
+. F3 AB
+
+. 0 804F205 68
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 C7 45 D4 00 00 00 00 B8 8F C2 31 41 89 45 D4 8B 45 D4 89 45 D0 83 EC 08 6A 01 68 C0 92 05 08 E8 A7 91 FF FF
+
+. 0 804F249 7
+. 83 C4 10 85 C0 75 7C
+
+. 0 804F250 43
+. 9B DD B5 58 FF FF FF 0F 12 45 D8 0F 16 45 E0 F3 0F 11 45 D0 DD A5 58 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 D0 E8 86 92 FF FF
+
+. 0 804F27B 7
+. 83 C4 10 85 C0 74 12
+
+. 0 804F282 13
+. 83 EC 0C 68 CA 6F 05 08 E8 81 91 FF FF
+
+movss_3 ... ok
+. 0 804F28F 5
+. 83 C4 10 EB 48
+
+. 0 804F2DC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805594E 5
+. E8 91 99 FF FF
+
+. 0 804F2E4 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F302 2
+. F3 AB
+
+. 0 804F304 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F336 2
+. F3 AB
+
+. 0 804F338 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 89 90 FF FF
+
+. 0 804F367 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804F372 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 10 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 56 91 FF FF
+
+. 0 804F3AB 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804F3B2 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 BC E8 3E 91 FF FF
+
+. 0 804F3C3 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804F3CA 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 C0 E8 26 91 FF FF
+
+. 0 804F3DB 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804F3E2 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 0E 91 FF FF
+
+. 0 804F3F3 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804F3FA 13
+. 83 EC 0C 68 02 70 05 08 E8 09 90 FF FF
+
+movups_1 ... ok
+. 0 804F407 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804F4C9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055953 5
+. E8 79 9B FF FF
+
+. 0 804F4D1 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F4EF 2
+. F3 AB
+
+. 0 804F4F1 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F523 2
+. F3 AB
+
+. 0 804F525 47
+. B8 8F C2 31 41 89 45 C8 B8 8F C2 B1 41 89 45 CC B8 EC 51 05 42 89 45 D0 B8 8F C2 31 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 9C 8E FF FF
+
+. 0 804F554 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804F55F 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 10 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 70 8F FF FF
+
+. 0 804F591 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804F598 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 BC E8 58 8F FF FF
+
+. 0 804F5A9 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804F5B0 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 C0 E8 40 8F FF FF
+
+. 0 804F5C1 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804F5C8 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 28 8F FF FF
+
+. 0 804F5D9 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804F5E0 13
+. 83 EC 0C 68 3D 70 05 08 E8 23 8E FF FF
+
+movups_2 ... ok
+. 0 804F5ED 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804F6AF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055958 5
+. E8 5A 9D FF FF
+
+. 0 804F6B7 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F6D5 2
+. F3 AB
+
+. 0 804F6D7 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F709 2
+. F3 AB
+
+. 0 804F70B 47
+. B8 00 00 A0 40 89 45 C8 B8 00 00 80 40 89 45 CC B8 00 00 40 40 89 45 D0 B8 00 00 00 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 B6 8C FF FF
+
+. 0 804F73A 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 804F745 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 59 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 CD CC 76 42 50 FF 75 B8 E8 83 8D FF FF
+
+. 0 804F77E 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804F785 17
+. 83 EC 08 B8 B8 1E 63 43 50 FF 75 BC E8 6B 8D FF FF
+
+. 0 804F796 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804F79D 17
+. 83 EC 08 B8 48 A1 01 43 50 FF 75 C0 E8 53 8D FF FF
+
+. 0 804F7AE 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804F7B5 17
+. 83 EC 08 B8 CD 4C 2F 43 50 FF 75 C4 E8 3B 8D FF FF
+
+. 0 804F7C6 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804F7CD 13
+. 83 EC 0C 68 78 70 05 08 E8 36 8C FF FF
+
+mulps_1 ... ok
+. 0 804F7DA 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804F89C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805595D 5
+. E8 42 9F FF FF
+
+. 0 804F8A4 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F8C2 2
+. F3 AB
+
+. 0 804F8C4 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804F8F6 2
+. F3 AB
+
+. 0 804F8F8 47
+. B8 00 00 A0 40 89 45 C8 B8 00 00 80 40 89 45 CC B8 00 00 40 40 89 45 D0 B8 00 00 00 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 C9 8A FF FF
+
+. 0 804F927 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 804F932 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 59 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 CD CC 76 42 50 FF 75 B8 E8 9D 8B FF FF
+
+. 0 804F964 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804F96B 17
+. 83 EC 08 B8 B8 1E 63 43 50 FF 75 BC E8 85 8B FF FF
+
+. 0 804F97C 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804F983 17
+. 83 EC 08 B8 48 A1 01 43 50 FF 75 C0 E8 6D 8B FF FF
+
+. 0 804F994 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804F99B 17
+. 83 EC 08 B8 CD 4C 2F 43 50 FF 75 C4 E8 55 8B FF FF
+
+. 0 804F9AC 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804F9B3 13
+. 83 EC 0C 68 B0 70 05 08 E8 50 8A FF FF
+
+mulps_2 ... ok
+. 0 804F9C0 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804FA82 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055962 5
+. E8 23 A1 FF FF
+
+. 0 804FA8A 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804FAA8 2
+. F3 AB
+
+. 0 804FAAA 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804FADC 2
+. F3 AB
+
+. 0 804FADE 47
+. B8 00 00 A0 40 89 45 C8 B8 00 00 80 40 89 45 CC B8 00 00 40 40 89 45 D0 B8 00 00 00 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 E3 88 FF FF
+
+. 0 804FB0D 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 804FB18 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 59 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 CD CC 76 42 50 FF 75 B8 E8 AF 89 FF FF
+
+. 0 804FB52 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804FB59 17
+. 83 EC 08 B8 00 00 80 40 50 FF 75 BC E8 97 89 FF FF
+
+. 0 804FB6A 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804FB71 17
+. 83 EC 08 B8 00 00 40 40 50 FF 75 C0 E8 7F 89 FF FF
+
+. 0 804FB82 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804FB89 17
+. 83 EC 08 B8 00 00 00 40 50 FF 75 C4 E8 67 89 FF FF
+
+. 0 804FB9A 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804FBA1 13
+. 83 EC 0C 68 E8 70 05 08 E8 62 88 FF FF
+
+mulss_1 ... ok
+. 0 804FBAE 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804FC70 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055967 5
+. E8 0C A3 FF FF
+
+. 0 804FC78 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804FC96 2
+. F3 AB
+
+. 0 804FC98 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804FCCA 2
+. F3 AB
+
+. 0 804FCCC 47
+. B8 00 00 A0 40 89 45 C8 B8 00 00 80 40 89 45 CC B8 00 00 40 40 89 45 D0 B8 00 00 00 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 F5 86 FF FF
+
+. 0 804FCFB 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 804FD06 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 59 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 CD CC 76 42 50 FF 75 B8 E8 C8 87 FF FF
+
+. 0 804FD39 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 804FD40 17
+. 83 EC 08 B8 00 00 80 40 50 FF 75 BC E8 B0 87 FF FF
+
+. 0 804FD51 7
+. 83 C4 10 85 C0 74 45
+
+. 0 804FD58 17
+. 83 EC 08 B8 00 00 40 40 50 FF 75 C0 E8 98 87 FF FF
+
+. 0 804FD69 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 804FD70 17
+. 83 EC 08 B8 00 00 00 40 50 FF 75 C4 E8 80 87 FF FF
+
+. 0 804FD81 7
+. 83 C4 10 85 C0 74 15
+
+. 0 804FD88 13
+. 83 EC 0C 68 20 71 05 08 E8 7B 86 FF FF
+
+mulss_2 ... ok
+. 0 804FD95 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 804FE57 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805596C 5
+. E8 EE A4 FF FF
+
+. 0 804FE5F 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804FE7D 2
+. F3 AB
+
+. 0 804FE7F 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804FEAD 2
+. F3 AB
+
+. 0 804FEAF 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 16 85 FF FF
+
+. 0 804FEDA 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 804FEE5 63
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 56 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 77 75 BB FD 8B 55 B8 81 F2 EF ED AB ED 09 D0 85 C0 75 29
+
+. 0 804FF24 23
+. 8B 45 C4 35 77 75 BB FD 8B 55 C0 81 F2 EF ED AB ED 09 D0 85 C0 75 12
+
+. 0 804FF3B 13
+. 83 EC 0C 68 58 71 05 08 E8 C8 84 FF FF
+
+orps_1 ... ok
+. 0 804FF48 5
+. 83 C4 10 EB 62
+
+. 0 804FFAF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055971 5
+. E8 41 A6 FF FF
+
+. 0 804FFB7 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 804FFD5 2
+. F3 AB
+
+. 0 804FFD7 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8050005 2
+. F3 AB
+
+. 0 8050007 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 BE 83 FF FF
+
+. 0 8050032 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 805003D 56
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 56 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 77 75 BB FD 8B 55 B8 81 F2 EF ED AB ED 09 D0 85 C0 75 29
+
+. 0 8050075 23
+. 8B 45 C4 35 77 75 BB FD 8B 55 C0 81 F2 EF ED AB ED 09 D0 85 C0 75 12
+
+. 0 805008C 13
+. 83 EC 0C 68 8D 71 05 08 E8 77 83 FF FF
+
+orps_2 ... ok
+. 0 8050099 5
+. 83 C4 10 EB 62
+
+. 0 8050100 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055976 5
+. E8 8D A7 FF FF
+
+. 0 8050108 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0B C6 45 E1 16 C6 45 E2 21 C6 45 E3 2C C6 45 E4 37 C6 45 E5 42 C6 45 E6 4D C6 45 E7 58 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0F C6 45 D9 19 C6 45 DA 23 C6 45 DB 2D C6 45 DC 37 C6 45 DD 41 C6 45 DE 4B C6 45 DF 55 83 EC 08 6A 01 68 C0 92 05 08 E8 71 82 FF FF
+
+. 0 805017F 11
+. 83 C4 10 85 C0 0F 85 4B 01 00 00
+
+. 0 805018A 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E0 C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 0D 75 3F
+
+. 0 80501AC 6
+. 80 7D D1 18 75 39
+
+. 0 80501B2 6
+. 80 7D D2 22 75 33
+
+. 0 80501B8 6
+. 80 7D D3 2D 75 2D
+
+. 0 80501BE 6
+. 80 7D D4 37 75 27
+
+. 0 80501C4 6
+. 80 7D D5 42 75 21
+
+. 0 80501CA 6
+. 80 7D D6 4C 75 1B
+
+. 0 80501D0 6
+. 80 7D D7 57 75 15
+
+. 0 80501D6 13
+. 83 EC 0C 68 C2 71 05 08 E8 2D 82 FF FF
+
+pavgb_1 ... ok
+. 0 80501E3 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 80502E5 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805597B 5
+. E8 6D A9 FF FF
+
+. 0 80502ED 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 0B C6 45 E1 16 C6 45 E2 21 C6 45 E3 2C C6 45 E4 37 C6 45 E5 42 C6 45 E6 4D C6 45 E7 58 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 0F C6 45 D9 19 C6 45 DA 23 C6 45 DB 2D C6 45 DC 37 C6 45 DD 41 C6 45 DE 4B C6 45 DF 55 83 EC 08 6A 01 68 C0 92 05 08 E8 8C 80 FF FF
+
+. 0 8050364 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 805036F 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E0 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 0D 75 3F
+
+. 0 805038E 6
+. 80 7D D1 18 75 39
+
+. 0 8050394 6
+. 80 7D D2 22 75 33
+
+. 0 805039A 6
+. 80 7D D3 2D 75 2D
+
+. 0 80503A0 6
+. 80 7D D4 37 75 27
+
+. 0 80503A6 6
+. 80 7D D5 42 75 21
+
+. 0 80503AC 6
+. 80 7D D6 4C 75 1B
+
+. 0 80503B2 6
+. 80 7D D7 57 75 15
+
+. 0 80503B8 13
+. 83 EC 0C 68 1C 73 05 08 E8 4B 80 FF FF
+
+pavgb_2 ... ok
+. 0 80503C5 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 80504C7 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055980 5
+. E8 4A AB FF FF
+
+. 0 80504CF 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 62 04 66 C7 45 E2 10 0D 66 C7 45 E4 BE 15 66 C7 45 E6 6C 1E C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 F5 05 66 C7 45 DA D9 0D 66 C7 45 DC BD 15 66 C7 45 DE A1 1D 83 EC 08 6A 01 68 C0 92 05 08 E8 BA 7E FF FF
+
+. 0 8050536 11
+. 83 C4 10 85 C0 0F 85 DD 00 00 00
+
+. 0 8050541 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E3 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 2C 05 75 2D
+
+. 0 8050565 8
+. 66 81 7D D2 75 0D 75 25
+
+. 0 805056D 8
+. 66 81 7D D4 BE 15 75 1D
+
+. 0 8050575 8
+. 66 81 7D D6 07 1E 75 15
+
+. 0 805057D 13
+. 83 EC 0C 68 54 73 05 08 E8 86 7E FF FF
+
+pavgw_1 ... ok
+. 0 805058A 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 805062E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055985 5
+. E8 AC AC FF FF
+
+. 0 8050636 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 62 04 66 C7 45 E2 10 0D 66 C7 45 E4 BE 15 66 C7 45 E6 6C 1E C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 F5 05 66 C7 45 DA D9 0D 66 C7 45 DC BD 15 66 C7 45 DE A1 1D 83 EC 08 6A 01 68 C0 92 05 08 E8 53 7D FF FF
+
+. 0 805069D 11
+. 83 C4 10 85 C0 0F 85 DA 00 00 00
+
+. 0 80506A8 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E3 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 81 7D D0 2C 05 75 2D
+
+. 0 80506C9 8
+. 66 81 7D D2 75 0D 75 25
+
+. 0 80506D1 8
+. 66 81 7D D4 BE 15 75 1D
+
+. 0 80506D9 8
+. 66 81 7D D6 07 1E 75 15
+
+. 0 80506E1 13
+. 83 EC 0C 68 1C 74 05 08 E8 22 7D FF FF
+
+pavgw_2 ... ok
+. 0 80506EE 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 8050792 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805598A 5
+. E8 0B AE FF FF
+
+. 0 805079A 79
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 E1 10 66 C7 45 E6 3D 22 C7 45 DC 00 00 00 00 C7 45 DC FF FF FF FF 83 EC 08 6A 01 68 C0 92 05 08 E8 07 7C FF FF
+
+. 0 80507E9 7
+. 83 C4 10 85 C0 75 60
+
+. 0 80507F0 36
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 8B 45 DC 0F C5 C0 00 89 45 D8 DD A5 68 FF FF FF 81 7D D8 D2 04 00 00 75 12
+
+. 0 8050814 13
+. 83 EC 0C 68 54 74 05 08 E8 EF 7B FF FF
+
+pextrw_1 ... ok
+. 0 8050821 5
+. 83 C4 10 EB 3A
+
+. 0 8050860 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805598F 5
+. E8 D4 AE FF FF
+
+. 0 8050868 79
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 E1 10 66 C7 45 E6 3D 22 C7 45 DC 00 00 00 00 C7 45 DC FF FF FF FF 83 EC 08 6A 01 68 C0 92 05 08 E8 39 7B FF FF
+
+. 0 80508B7 7
+. 83 C4 10 85 C0 75 60
+
+. 0 80508BE 36
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 8B 45 DC 0F C5 C0 01 89 45 D8 DD A5 68 FF FF FF 81 7D D8 2E 16 00 00 75 12
+
+. 0 80508E2 13
+. 83 EC 0C 68 8F 74 05 08 E8 21 7B FF FF
+
+pextrw_2 ... ok
+. 0 80508EF 5
+. 83 C4 10 EB 3A
+
+. 0 805092E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055994 5
+. E8 9D AF FF FF
+
+. 0 8050936 79
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 E1 10 66 C7 45 E6 3D 22 C7 45 DC 00 00 00 00 C7 45 DC FF FF FF FF 83 EC 08 6A 01 68 C0 92 05 08 E8 6B 7A FF FF
+
+. 0 8050985 7
+. 83 C4 10 85 C0 75 60
+
+. 0 805098C 36
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 8B 45 DC 0F C5 C0 02 89 45 D8 DD A5 68 FF FF FF 81 7D D8 E1 10 00 00 75 12
+
+. 0 80509B0 13
+. 83 EC 0C 68 CA 74 05 08 E8 53 7A FF FF
+
+pextrw_3 ... ok
+. 0 80509BD 5
+. 83 C4 10 EB 3A
+
+. 0 80509FC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055999 5
+. E8 66 B0 FF FF
+
+. 0 8050A04 79
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 D2 04 66 C7 45 E2 2E 16 66 C7 45 E4 E1 10 66 C7 45 E6 3D 22 C7 45 DC 00 00 00 00 C7 45 DC FF FF FF FF 83 EC 08 6A 01 68 C0 92 05 08 E8 9D 79 FF FF
+
+. 0 8050A53 7
+. 83 C4 10 85 C0 75 60
+
+. 0 8050A5A 36
+. 9B DD B5 68 FF FF FF 0F 6F 45 E0 8B 45 DC 0F C5 C0 03 89 45 D8 DD A5 68 FF FF FF 81 7D D8 3D 22 00 00 75 12
+
+. 0 8050A7E 13
+. 83 EC 0C 68 05 75 05 08 E8 85 79 FF FF
+
+pextrw_4 ... ok
+. 0 8050A8B 5
+. 83 C4 10 EB 3A
+
+. 0 8050ACA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 805599E 5
+. E8 2F B1 FF FF
+
+. 0 8050AD2 79
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 FF FF FF FF C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 D2 04 66 C7 45 DA 2E 16 66 C7 45 DC E1 10 66 C7 45 DE 3D 22 83 EC 08 6A 01 68 C0 92 05 08 E8 CF 78 FF FF
+
+. 0 8050B21 11
+. 83 C4 10 85 C0 0F 85 DC 00 00 00
+
+. 0 8050B2C 35
+. 9B DD B5 58 FF FF FF 8B 45 E4 0F 6F 45 D8 0F C4 C0 00 0F 7F 45 D0 DD A5 58 FF FF FF 66 83 7D D0 FF 75 2D
+
+. 0 8050B4F 8
+. 66 81 7D D2 2E 16 75 25
+
+. 0 8050B57 8
+. 66 81 7D D4 E1 10 75 1D
+
+. 0 8050B5F 8
+. 66 81 7D D6 3D 22 75 15
+
+. 0 8050B67 13
+. 83 EC 0C 68 40 75 05 08 E8 9C 78 FF FF
+
+pinsrw_1 ... ok
+. 0 8050B74 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 8050C18 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559A3 5
+. E8 78 B2 FF FF
+
+. 0 8050C20 79
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 FF FF FF FF C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 D2 04 66 C7 45 DA 2E 16 66 C7 45 DC E1 10 66 C7 45 DE 3D 22 83 EC 08 6A 01 68 C0 92 05 08 E8 81 77 FF FF
+
+. 0 8050C6F 11
+. 83 C4 10 85 C0 0F 85 DC 00 00 00
+
+. 0 8050C7A 36
+. 9B DD B5 58 FF FF FF 8B 45 E4 0F 6F 45 D8 0F C4 C0 01 0F 7F 45 D0 DD A5 58 FF FF FF 66 81 7D D0 D2 04 75 2C
+
+. 0 8050C9E 7
+. 66 83 7D D2 FF 75 25
+
+. 0 8050CA5 8
+. 66 81 7D D4 E1 10 75 1D
+
+. 0 8050CAD 8
+. 66 81 7D D6 3D 22 75 15
+
+. 0 8050CB5 13
+. 83 EC 0C 68 7B 75 05 08 E8 4E 77 FF FF
+
+pinsrw_2 ... ok
+. 0 8050CC2 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 8050D66 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559A8 5
+. E8 C1 B3 FF FF
+
+. 0 8050D6E 79
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 FF FF FF FF C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 D2 04 66 C7 45 DA 2E 16 66 C7 45 DC E1 10 66 C7 45 DE 3D 22 83 EC 08 6A 01 68 C0 92 05 08 E8 33 76 FF FF
+
+. 0 8050DBD 11
+. 83 C4 10 85 C0 0F 85 DC 00 00 00
+
+. 0 8050DC8 36
+. 9B DD B5 58 FF FF FF 8B 45 E4 0F 6F 45 D8 0F C4 C0 02 0F 7F 45 D0 DD A5 58 FF FF FF 66 81 7D D0 D2 04 75 2C
+
+. 0 8050DEC 8
+. 66 81 7D D2 2E 16 75 24
+
+. 0 8050DF4 7
+. 66 83 7D D4 FF 75 1D
+
+. 0 8050DFB 8
+. 66 81 7D D6 3D 22 75 15
+
+. 0 8050E03 13
+. 83 EC 0C 68 B6 75 05 08 E8 00 76 FF FF
+
+pinsrw_3 ... ok
+. 0 8050E10 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 8050EB4 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559AD 5
+. E8 0A B5 FF FF
+
+. 0 8050EBC 79
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E4 00 00 00 00 C7 45 E4 FF FF FF FF C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 D2 04 66 C7 45 DA 2E 16 66 C7 45 DC E1 10 66 C7 45 DE 3D 22 83 EC 08 6A 01 68 C0 92 05 08 E8 E5 74 FF FF
+
+. 0 8050F0B 11
+. 83 C4 10 85 C0 0F 85 DC 00 00 00
+
+. 0 8050F16 36
+. 9B DD B5 58 FF FF FF 8B 45 E4 0F 6F 45 D8 0F C4 C0 03 0F 7F 45 D0 DD A5 58 FF FF FF 66 81 7D D0 D2 04 75 2C
+
+. 0 8050F3A 8
+. 66 81 7D D2 2E 16 75 24
+
+. 0 8050F42 8
+. 66 81 7D D4 E1 10 75 1C
+
+. 0 8050F4A 7
+. 66 83 7D D6 FF 75 15
+
+. 0 8050F51 13
+. 83 EC 0C 68 F1 75 05 08 E8 B2 74 FF FF
+
+pinsrw_4 ... ok
+. 0 8050F5E 8
+. 83 C4 10 E9 9C 00 00 00
+
+. 0 8051002 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559B2 5
+. E8 53 B6 FF FF
+
+. 0 805100A 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 FF FF 66 C7 45 E2 02 00 66 C7 45 E4 FD FF 66 C7 45 E6 04 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 02 00 66 C7 45 DA FD FF 66 C7 45 DC 04 00 66 C7 45 DE FB FF 83 EC 08 6A 01 68 C0 92 05 08 E8 7F 73 FF FF
+
+. 0 8051071 11
+. 83 C4 10 85 C0 0F 85 B8 00 00 00
+
+. 0 805107C 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F EE C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 02 75 27
+
+. 0 805109F 7
+. 66 83 7D D2 02 75 20
+
+. 0 80510A6 7
+. 66 83 7D D4 04 75 19
+
+. 0 80510AD 7
+. 66 83 7D D6 04 75 12
+
+. 0 80510B4 13
+. 83 EC 0C 68 2C 76 05 08 E8 4F 73 FF FF
+
+pmaxsw_1 ... ok
+. 0 80510C1 5
+. 83 C4 10 EB 7E
+
+. 0 8051144 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559B7 5
+. E8 90 B7 FF FF
+
+. 0 805114C 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 FF FF 66 C7 45 E2 02 00 66 C7 45 E4 FD FF 66 C7 45 E6 04 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 02 00 66 C7 45 DA FD FF 66 C7 45 DC 04 00 66 C7 45 DE FB FF 83 EC 08 6A 01 68 C0 92 05 08 E8 3D 72 FF FF
+
+. 0 80511B3 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 80511BE 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F EE 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 02 75 27
+
+. 0 80511DE 7
+. 66 83 7D D2 02 75 20
+
+. 0 80511E5 7
+. 66 83 7D D4 04 75 19
+
+. 0 80511EC 7
+. 66 83 7D D6 04 75 12
+
+. 0 80511F3 13
+. 83 EC 0C 68 F9 76 05 08 E8 10 72 FF FF
+
+pmaxsw_2 ... ok
+. 0 8051200 5
+. 83 C4 10 EB 7E
+
+. 0 8051283 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559BC 5
+. E8 CA B8 FF FF
+
+. 0 805128B 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 01 C6 45 E1 02 C6 45 E2 03 C6 45 E3 04 C6 45 E4 05 C6 45 E5 06 C6 45 E6 07 C6 45 E7 08 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 C0 92 05 08 E8 EE 70 FF FF
+
+. 0 8051302 11
+. 83 C4 10 85 C0 0F 85 4B 01 00 00
+
+. 0 805130D 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F DE C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 08 75 3F
+
+. 0 805132F 6
+. 80 7D D1 07 75 39
+
+. 0 8051335 6
+. 80 7D D2 06 75 33
+
+. 0 805133B 6
+. 80 7D D3 05 75 2D
+
+. 0 8051341 6
+. 80 7D D4 05 75 27
+
+. 0 8051347 6
+. 80 7D D5 06 75 21
+
+. 0 805134D 6
+. 80 7D D6 07 75 1B
+
+. 0 8051353 6
+. 80 7D D7 08 75 15
+
+. 0 8051359 13
+. 83 EC 0C 68 34 77 05 08 E8 AA 70 FF FF
+
+pmaxub_1 ... ok
+. 0 8051366 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 8051468 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559C1 5
+. E8 AA BA FF FF
+
+. 0 8051470 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 01 C6 45 E1 02 C6 45 E2 03 C6 45 E3 04 C6 45 E4 05 C6 45 E5 06 C6 45 E6 07 C6 45 E7 08 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 C0 92 05 08 E8 09 6F FF FF
+
+. 0 80514E7 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 80514F2 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F DE 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 08 75 3F
+
+. 0 8051511 6
+. 80 7D D1 07 75 39
+
+. 0 8051517 6
+. 80 7D D2 06 75 33
+
+. 0 805151D 6
+. 80 7D D3 05 75 2D
+
+. 0 8051523 6
+. 80 7D D4 05 75 27
+
+. 0 8051529 6
+. 80 7D D5 06 75 21
+
+. 0 805152F 6
+. 80 7D D6 07 75 1B
+
+. 0 8051535 6
+. 80 7D D7 08 75 15
+
+. 0 805153B 13
+. 83 EC 0C 68 6F 77 05 08 E8 C8 6E FF FF
+
+pmaxub_2 ... ok
+. 0 8051548 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 805164A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559C6 5
+. E8 87 BC FF FF
+
+. 0 8051652 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 FF FF 66 C7 45 E2 02 00 66 C7 45 E4 FD FF 66 C7 45 E6 04 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 02 00 66 C7 45 DA FD FF 66 C7 45 DC 04 00 66 C7 45 DE FB FF 83 EC 08 6A 01 68 C0 92 05 08 E8 37 6D FF FF
+
+. 0 80516B9 11
+. 83 C4 10 85 C0 0F 85 B8 00 00 00
+
+. 0 80516C4 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F EA C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 FF 75 27
+
+. 0 80516E7 7
+. 66 83 7D D2 FD 75 20
+
+. 0 80516EE 7
+. 66 83 7D D4 FD 75 19
+
+. 0 80516F5 7
+. 66 83 7D D6 FB 75 12
+
+. 0 80516FC 13
+. 83 EC 0C 68 AA 77 05 08 E8 07 6D FF FF
+
+pminsw_1 ... ok
+. 0 8051709 5
+. 83 C4 10 EB 7E
+
+. 0 805178C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559CB 5
+. E8 C4 BD FF FF
+
+. 0 8051794 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 FF FF 66 C7 45 E2 02 00 66 C7 45 E4 FD FF 66 C7 45 E6 04 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 02 00 66 C7 45 DA FD FF 66 C7 45 DC 04 00 66 C7 45 DE FB FF 83 EC 08 6A 01 68 C0 92 05 08 E8 F5 6B FF FF
+
+. 0 80517FB 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 8051806 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F EA 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 FF 75 27
+
+. 0 8051826 7
+. 66 83 7D D2 FD 75 20
+
+. 0 805182D 7
+. 66 83 7D D4 FD 75 19
+
+. 0 8051834 7
+. 66 83 7D D6 FB 75 12
+
+. 0 805183B 13
+. 83 EC 0C 68 E5 77 05 08 E8 C8 6B FF FF
+
+pminsw_2 ... ok
+. 0 8051848 5
+. 83 C4 10 EB 7E
+
+. 0 80518CB 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559D0 5
+. E8 FE BE FF FF
+
+. 0 80518D3 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 01 C6 45 E1 02 C6 45 E2 03 C6 45 E3 04 C6 45 E4 05 C6 45 E5 06 C6 45 E6 07 C6 45 E7 08 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 C0 92 05 08 E8 A6 6A FF FF
+
+. 0 805194A 11
+. 83 C4 10 85 C0 0F 85 4B 01 00 00
+
+. 0 8051955 34
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F DA C8 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 01 75 3F
+
+. 0 8051977 6
+. 80 7D D1 02 75 39
+
+. 0 805197D 6
+. 80 7D D2 03 75 33
+
+. 0 8051983 6
+. 80 7D D3 04 75 2D
+
+. 0 8051989 6
+. 80 7D D4 04 75 27
+
+. 0 805198F 6
+. 80 7D D5 03 75 21
+
+. 0 8051995 6
+. 80 7D D6 02 75 1B
+
+. 0 805199B 6
+. 80 7D D7 01 75 15
+
+. 0 80519A1 13
+. 83 EC 0C 68 20 78 05 08 E8 62 6A FF FF
+
+pminub_1 ... ok
+. 0 80519AE 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 8051AB0 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559D5 5
+. E8 DE C0 FF FF
+
+. 0 8051AB8 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 01 C6 45 E1 02 C6 45 E2 03 C6 45 E3 04 C6 45 E4 05 C6 45 E5 06 C6 45 E6 07 C6 45 E7 08 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 C0 92 05 08 E8 C1 68 FF FF
+
+. 0 8051B2F 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 8051B3A 31
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F DA 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 80 7D D0 01 75 3F
+
+. 0 8051B59 6
+. 80 7D D1 02 75 39
+
+. 0 8051B5F 6
+. 80 7D D2 03 75 33
+
+. 0 8051B65 6
+. 80 7D D3 04 75 2D
+
+. 0 8051B6B 6
+. 80 7D D4 04 75 27
+
+. 0 8051B71 6
+. 80 7D D5 03 75 21
+
+. 0 8051B77 6
+. 80 7D D6 02 75 1B
+
+. 0 8051B7D 6
+. 80 7D D7 01 75 15
+
+. 0 8051B83 13
+. 83 EC 0C 68 5B 78 05 08 E8 80 68 FF FF
+
+pminub_2 ... ok
+. 0 8051B90 8
+. 83 C4 10 E9 FA 00 00 00
+
+. 0 8051C92 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559DA 5
+. E8 BB C2 FF FF
+
+. 0 8051C9A 62
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C7 45 E0 88 80 00 80 C7 45 E4 00 00 00 80 C7 45 DC 00 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 18 67 FF FF
+
+. 0 8051CD8 7
+. 83 C4 10 85 C0 75 5F
+
+. 0 8051CDF 35
+. 9B DD B5 68 FF FF FF 0F 6F 6D E0 8B 45 DC 0F D7 C5 89 45 D8 DD A5 68 FF FF FF 81 7D D8 8B 00 00 00 75 12
+
+. 0 8051D02 13
+. 83 EC 0C 68 96 78 05 08 E8 01 67 FF FF
+
+pmovmskb_1 ... ok
+. 0 8051D0F 5
+. 83 C4 10 EB 3A
+
+. 0 8051D4E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559DF 5
+. E8 72 C3 FF FF
+
+. 0 8051D56 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 04 66 C7 45 E2 AE 08 66 C7 45 E4 05 0D 66 C7 45 E6 5C 11 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 B3 15 66 C7 45 DA 0A 1A 66 C7 45 DC 61 1E 66 C7 45 DE B8 22 83 EC 08 6A 01 68 C0 92 05 08 E8 33 66 FF FF
+
+. 0 8051DBD 11
+. 83 C4 10 85 C0 0F 85 D9 00 00 00
+
+. 0 8051DC8 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F E4 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 5E 75 2D
+
+. 0 8051DEB 8
+. 66 81 7D D2 E2 00 75 25
+
+. 0 8051DF3 8
+. 66 81 7D D4 8B 01 75 1D
+
+. 0 8051DFB 8
+. 66 81 7D D6 5A 02 75 15
+
+. 0 8051E03 13
+. 83 EC 0C 68 D7 78 05 08 E8 00 66 FF FF
+
+pmulhuw_1 ... ok
+. 0 8051E10 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 8051EB1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559E4 5
+. E8 D0 C4 FF FF
+
+. 0 8051EB9 103
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 57 04 66 C7 45 E2 AE 08 66 C7 45 E4 05 0D 66 C7 45 E6 5C 11 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 66 C7 45 D8 B3 15 66 C7 45 DA 0A 1A 66 C7 45 DC 61 1E 66 C7 45 DE B8 22 83 EC 08 6A 01 68 C0 92 05 08 E8 D0 64 FF FF
+
+. 0 8051F20 11
+. 83 C4 10 85 C0 0F 85 D6 00 00 00
+
+. 0 8051F2B 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F E4 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 5E 75 2D
+
+. 0 8051F4B 8
+. 66 81 7D D2 E2 00 75 25
+
+. 0 8051F53 8
+. 66 81 7D D4 8B 01 75 1D
+
+. 0 8051F5B 8
+. 66 81 7D D6 5A 02 75 15
+
+. 0 8051F63 13
+. 83 EC 0C 68 15 79 05 08 E8 A0 64 FF FF
+
+pmulhuw_2 ... ok
+. 0 8051F70 8
+. 83 C4 10 E9 99 00 00 00
+
+. 0 8052011 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559E9 5
+. E8 2B C6 FF FF
+
+. 0 8052019 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 01 C6 45 E1 02 C6 45 E2 03 C6 45 E3 04 C6 45 E4 05 C6 45 E5 06 C6 45 E6 07 C6 45 E7 08 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 C0 92 05 08 E8 60 63 FF FF
+
+. 0 8052090 11
+. 83 C4 10 85 C0 0F 85 B8 00 00 00
+
+. 0 805209B 35
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F F6 C8 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 20 75 27
+
+. 0 80520BE 7
+. 66 83 7D D2 00 75 20
+
+. 0 80520C5 7
+. 66 83 7D D4 00 75 19
+
+. 0 80520CC 7
+. 66 83 7D D6 00 75 12
+
+. 0 80520D3 13
+. 83 EC 0C 68 53 79 05 08 E8 30 63 FF FF
+
+psadbw_1 ... ok
+. 0 80520E0 5
+. 83 C4 10 EB 7E
+
+. 0 8052163 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559EE 5
+. E8 78 C7 FF FF
+
+. 0 805216B 119
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 C6 45 E0 01 C6 45 E1 02 C6 45 E2 03 C6 45 E3 04 C6 45 E4 05 C6 45 E5 06 C6 45 E6 07 C6 45 E7 08 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 C6 45 D8 08 C6 45 D9 07 C6 45 DA 06 C6 45 DB 05 C6 45 DC 04 C6 45 DD 03 C6 45 DE 02 C6 45 DF 01 83 EC 08 6A 01 68 C0 92 05 08 E8 0E 62 FF FF
+
+. 0 80521E2 11
+. 83 C4 10 85 C0 0F 85 B5 00 00 00
+
+. 0 80521ED 32
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F F6 4D E0 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 20 75 27
+
+. 0 805220D 7
+. 66 83 7D D2 00 75 20
+
+. 0 8052214 7
+. 66 83 7D D4 00 75 19
+
+. 0 805221B 7
+. 66 83 7D D6 00 75 12
+
+. 0 8052222 13
+. 83 EC 0C 68 8E 79 05 08 E8 E1 61 FF FF
+
+psadbw_2 ... ok
+. 0 805222F 5
+. 83 C4 10 EB 7E
+
+. 0 80522B2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559F3 5
+. E8 C2 C8 FF FF
+
+. 0 80522BA 79
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 0B 00 66 C7 45 E2 16 00 66 C7 45 E4 21 00 66 C7 45 E6 2C 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 E7 60 FF FF
+
+. 0 8052309 11
+. 83 C4 10 85 C0 0F 85 B9 00 00 00
+
+. 0 8052314 36
+. 9B DD B5 58 FF FF FF 0F 6F 45 E0 0F 6F 4D D8 0F 70 C8 1B 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 2C 75 27
+
+. 0 8052338 7
+. 66 83 7D D2 21 75 20
+
+. 0 805233F 7
+. 66 83 7D D4 16 75 19
+
+. 0 8052346 7
+. 66 83 7D D6 0B 75 12
+
+. 0 805234D 13
+. 83 EC 0C 68 C9 79 05 08 E8 B6 60 FF FF
+
+pshufw_1 ... ok
+. 0 805235A 5
+. 83 C4 10 EB 7E
+
+. 0 80523DD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559F8 5
+. E8 E8 C9 FF FF
+
+. 0 80523E5 79
+. 55 89 E5 57 56 53 81 EC 9C 00 00 00 C7 45 E0 00 00 00 00 C7 45 E4 00 00 00 00 66 C7 45 E0 0B 00 66 C7 45 E2 16 00 66 C7 45 E4 21 00 66 C7 45 E6 2C 00 C7 45 D8 00 00 00 00 C7 45 DC 00 00 00 00 83 EC 08 6A 01 68 C0 92 05 08 E8 BC 5F FF FF
+
+. 0 8052434 11
+. 83 C4 10 85 C0 0F 85 B6 00 00 00
+
+. 0 805243F 33
+. 9B DD B5 58 FF FF FF 0F 6F 4D D8 0F 70 4D E0 1B 0F 7F 4D D0 DD A5 58 FF FF FF 66 83 7D D0 2C 75 27
+
+. 0 8052460 7
+. 66 83 7D D2 21 75 20
+
+. 0 8052467 7
+. 66 83 7D D4 16 75 19
+
+. 0 805246E 7
+. 66 83 7D D6 0B 75 12
+
+. 0 8052475 13
+. 83 EC 0C 68 04 7A 05 08 E8 8E 5F FF FF
+
+pshufw_2 ... ok
+. 0 8052482 5
+. 83 C4 10 EB 7E
+
+. 0 8052505 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 80559FD 5
+. E8 0B CB FF FF
+
+. 0 805250D 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 805252B 2
+. F3 AB
+
+. 0 805252D 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 80 40 89 45 DC B8 00 00 00 3F 89 45 E0 B8 00 00 80 3E 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 805255F 2
+. F3 AB
+
+. 0 8052561 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 60 5E FF FF
+
+. 0 8052590 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 805259B 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 53 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 00 3F 50 FF 75 B8 E8 2D 5F FF FF
+
+. 0 8048522 62
+. D9 45 08 D8 65 0C D9 E1 DD 5D F0 8B 45 08 89 45 E4 D9 45 E4 D9 E1 DD 05 C8 5B 05 08 DE C9 DD 5D E8 B8 00 00 00 00 BA 00 00 28 C0 52 50 B8 00 00 00 00 BA 00 00 00 40 52 50 E8 80 FE FF FF
+
+. 0 80483E0 6
+. FF 25 70 92 05 08
+
+. 0 80483E6 10
+. 68 10 00 00 00 E9 C0 FF FF FF
+
+. 0 3A9A8860 20
+. 55 89 E5 53 83 EC 34 DD 45 08 DD 45 10 D9 C9 E8 DC B2 FF FF
+
+. 0 3A9A3B50 4
+. 8B 1C 24 C3
+
+. 0 3A9A8874 26
+. 81 C3 D0 6F 01 00 DD 55 F0 D9 C9 DD 55 E8 DD 5C 24 08 DD 1C 24 E8 72 D6 FF FF
+
+. 0 3A9A5F00 34
+. DD 44 24 0C D9 E5 E8 00 00 00 00 59 81 C1 39 99 01 00 DF E0 88 E2 80 E4 45 80 FC 40 0F 84 CE 00 00 00
+
+. 0 3A9A5F22 9
+. 80 FC 05 0F 84 D5 00 00 00
+
+. 0 3A9A5F2B 5
+. 80 FC 01 74 6A
+
+. 0 3A9A5F30 25
+. DD 44 24 04 83 EC 08 D9 E5 DF E0 88 E6 80 E4 45 80 FC 40 0F 84 87 01 00 00
+
+. 0 3A9A5F49 9
+. 80 FC 05 0F 84 FE 00 00 00
+
+. 0 3A9A5F52 17
+. D9 C9 D9 C0 DF 3C 24 DF 2C 24 DD E9 DF E0 9E 75 4D
+
+. 0 3A9A5F63 9
+. 58 5A 83 CA 00 DD D8 79 0D
+
+. 0 3A9A5F6C 27
+. DC B9 DC BD FF FF F7 D8 83 D2 00 F7 DA DD 81 DC BD FF FF D9 C9 0F AC D0 01 73 06
+
+. 0 3A9A5F8D 10
+. D8 C8 D1 EA 89 C1 09 D1 75 EA
+
+. 0 3A9A5F81 6
+. 0F AC D0 01 73 06
+
+. 0 3A9A5F87 16
+. D9 C9 D8 C9 D9 C9 D8 C8 D1 EA 89 C1 09 D1 75 EA
+
+. 0 3A9A5F97 3
+. DD D8 C3
+
+. 0 3A9A888E 18
+. 8B 83 38 00 00 00 DD 5D E0 83 38 FF 0F 84 40 01 00 00
+
+. 0 3A9A88A0 11
+. DD 45 E8 DD 1C 24 E8 45 29 00 00
+
+. 0 3A9AB1F0 40
+. 55 89 E5 8B 45 08 8B 55 0C 5D 89 D1 89 C2 F7 DA 81 E1 FF FF FF 7F 09 D0 C1 E8 1F 09 C1 B8 00 00 F0 7F 29 C8 C1 E8 1F C3
+
+. 0 3A9A88AB 8
+. 85 C0 0F 85 2D 01 00 00
+
+. 0 3A9A88B3 11
+. DD 45 F0 DD 1C 24 E8 32 29 00 00
+
+. 0 3A9A88BE 4
+. 85 C0 74 23
+
+. 0 3A9A88E5 14
+. DD 45 F0 D9 EE D9 C9 DD E9 DF E0 9E 75 72
+
+. 0 3A9A8965 13
+. DD D8 DD 45 E0 DD 1C 24 E8 AE 28 00 00
+
+. 0 3A9AB220 17
+. 8B 44 24 08 B9 FF FF EF FF 29 C1 31 C8 C1 E8 1F C3
+
+. 0 3A9A8972 4
+. 85 C0 75 3A
+
+. 0 3A9A89B0 14
+. DD 45 E0 D9 EE D9 C9 DA E9 DF E0 9E 75 22
+
+. 0 3A9A89E0 9
+. DD 45 E0 83 C4 34 5B 5D C3
+
+. 0 8048560 20
+. 83 C4 10 DC 4D E8 DD 45 F0 D9 C9 DA E9 DF E0 F6 C4 45 74 02
+
+. 0 80525D4 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80525DB 17
+. 83 EC 08 B8 00 00 80 3E 50 FF 75 BC E8 15 5F FF FF
+
+. 0 80525EC 7
+. 83 C4 10 85 C0 74 45
+
+. 0 80525F3 17
+. 83 EC 08 B8 00 00 00 40 50 FF 75 C0 E8 FD 5E FF FF
+
+. 0 8052604 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 805260B 17
+. 83 EC 08 B8 00 00 80 40 50 FF 75 C4 E8 E5 5E FF FF
+
+. 0 805261C 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8052623 13
+. 83 EC 0C 68 3F 7A 05 08 E8 E0 5D FF FF
+
+rcpps_1 ... ok
+. 0 8052630 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 80526F2 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A02 5
+. E8 F3 CC FF FF
+
+. 0 80526FA 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052718 2
+. F3 AB
+
+. 0 805271A 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 80 40 89 45 DC B8 00 00 00 3F 89 45 E0 B8 00 00 80 3E 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 805274C 2
+. F3 AB
+
+. 0 805274E 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 73 5C FF FF
+
+. 0 805277D 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 8052788 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 53 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 00 3F 50 FF 75 B8 E8 47 5D FF FF
+
+. 0 80527BA 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80527C1 17
+. 83 EC 08 B8 00 00 80 3E 50 FF 75 BC E8 2F 5D FF FF
+
+. 0 80527D2 7
+. 83 C4 10 85 C0 74 45
+
+. 0 80527D9 17
+. 83 EC 08 B8 00 00 00 40 50 FF 75 C0 E8 17 5D FF FF
+
+. 0 80527EA 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 80527F1 17
+. 83 EC 08 B8 00 00 80 40 50 FF 75 C4 E8 FF 5C FF FF
+
+. 0 8052802 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8052809 13
+. 83 EC 0C 68 77 7A 05 08 E8 FA 5B FF FF
+
+rcpps_2 ... ok
+. 0 8052816 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 80528D8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A07 5
+. E8 D4 CE FF FF
+
+. 0 80528E0 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80528FE 2
+. F3 AB
+
+. 0 8052900 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 80 40 89 45 DC B8 00 00 00 3F 89 45 E0 B8 00 00 80 3E 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052932 2
+. F3 AB
+
+. 0 8052934 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 8D 5A FF FF
+
+. 0 8052963 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 805296E 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 53 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 00 3F 50 FF 75 B8 E8 59 5B FF FF
+
+. 0 80529A8 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80529AF 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 41 5B FF FF
+
+. 0 80529C0 7
+. 83 C4 10 85 C0 74 45
+
+. 0 80529C7 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 29 5B FF FF
+
+. 0 80529D8 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 80529DF 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 11 5B FF FF
+
+. 0 80529F0 7
+. 83 C4 10 85 C0 74 15
+
+. 0 80529F7 13
+. 83 EC 0C 68 AF 7A 05 08 E8 0C 5A FF FF
+
+rcpss_1 ... ok
+. 0 8052A04 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8052AC6 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A0C 5
+. E8 BD D0 FF FF
+
+. 0 8052ACE 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052AEC 2
+. F3 AB
+
+. 0 8052AEE 52
+. B8 00 00 00 40 89 45 D8 B8 00 00 80 40 89 45 DC B8 00 00 00 3F 89 45 E0 B8 00 00 80 3E 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052B20 2
+. F3 AB
+
+. 0 8052B22 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 9F 58 FF FF
+
+. 0 8052B51 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 8052B5C 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 53 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 00 3F 50 FF 75 B8 E8 72 59 FF FF
+
+. 0 8052B8F 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8052B96 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 5A 59 FF FF
+
+. 0 8052BA7 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8052BAE 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 42 59 FF FF
+
+. 0 8052BBF 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8052BC6 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 2A 59 FF FF
+
+. 0 8052BD7 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8052BDE 13
+. 83 EC 0C 68 E7 7A 05 08 E8 25 58 FF FF
+
+rcpss_2 ... ok
+. 0 8052BEB 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8052CAD 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A11 5
+. E8 9F D2 FF FF
+
+. 0 8052CB5 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052CD3 2
+. F3 AB
+
+. 0 8052CD5 52
+. B8 00 00 80 40 89 45 D8 B8 00 00 80 41 89 45 DC B8 00 00 C8 41 89 45 E0 B8 00 00 80 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052D07 2
+. F3 AB
+
+. 0 8052D09 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 B8 56 FF FF
+
+. 0 8052D38 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 8052D43 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 52 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 02 F0 FF 3E 50 FF 75 B8 E8 85 57 FF FF
+
+. 0 8052D7C 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8052D83 17
+. 83 EC 08 B8 02 F0 7F 3E 50 FF 75 BC E8 6D 57 FF FF
+
+. 0 8052D94 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8052D9B 17
+. 83 EC 08 B8 15 C8 4C 3E 50 FF 75 C0 E8 55 57 FF FF
+
+. 0 8052DAC 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8052DB3 17
+. 83 EC 08 B8 BF EF FF 3D 50 FF 75 C4 E8 3D 57 FF FF
+
+. 0 8052DC4 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8052DCB 13
+. 83 EC 0C 68 1F 7B 05 08 E8 38 56 FF FF
+
+rsqrtps_1 ... ok
+. 0 8052DD8 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8052E9A 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A16 5
+. E8 87 D4 FF FF
+
+. 0 8052EA2 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052EC0 2
+. F3 AB
+
+. 0 8052EC2 52
+. B8 00 00 80 40 89 45 D8 B8 00 00 80 41 89 45 DC B8 00 00 C8 41 89 45 E0 B8 00 00 80 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8052EF4 2
+. F3 AB
+
+. 0 8052EF6 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 CB 54 FF FF
+
+. 0 8052F25 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 8052F30 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 52 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 02 F0 FF 3E 50 FF 75 B8 E8 9F 55 FF FF
+
+. 0 8052F62 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8052F69 17
+. 83 EC 08 B8 02 F0 7F 3E 50 FF 75 BC E8 87 55 FF FF
+
+. 0 8052F7A 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8052F81 17
+. 83 EC 08 B8 15 C8 4C 3E 50 FF 75 C0 E8 6F 55 FF FF
+
+. 0 8052F92 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8052F99 17
+. 83 EC 08 B8 BF EF FF 3D 50 FF 75 C4 E8 57 55 FF FF
+
+. 0 8052FAA 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8052FB1 13
+. 83 EC 0C 68 5D 7B 05 08 E8 52 54 FF FF
+
+rsqrtps_2 ... ok
+. 0 8052FBE 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8053080 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A1B 5
+. E8 68 D6 FF FF
+
+. 0 8053088 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80530A6 2
+. F3 AB
+
+. 0 80530A8 52
+. B8 00 00 80 41 89 45 D8 B8 9A 99 B1 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 D7 A3 F8 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80530DA 2
+. F3 AB
+
+. 0 80530DC 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 E5 52 FF FF
+
+. 0 805310B 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 8053116 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 52 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 02 F0 7F 3E 50 FF 75 B8 E8 B1 53 FF FF
+
+. 0 8053150 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8053157 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 99 53 FF FF
+
+. 0 8053168 7
+. 83 C4 10 85 C0 74 45
+
+. 0 805316F 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 81 53 FF FF
+
+. 0 8053180 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8053187 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 69 53 FF FF
+
+. 0 8053198 7
+. 83 C4 10 85 C0 74 15
+
+. 0 805319F 13
+. 83 EC 0C 68 9B 7B 05 08 E8 64 52 FF FF
+
+rsqrtss_1 ... ok
+. 0 80531AC 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 805326E 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A20 5
+. E8 51 D8 FF FF
+
+. 0 8053276 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053294 2
+. F3 AB
+
+. 0 8053296 52
+. B8 00 00 80 41 89 45 D8 B8 9A 99 B1 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 D7 A3 F8 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80532C8 2
+. F3 AB
+
+. 0 80532CA 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 F7 50 FF FF
+
+. 0 80532F9 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 8053304 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 52 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 02 F0 7F 3E 50 FF 75 B8 E8 CA 51 FF FF
+
+. 0 8053337 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 805333E 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 B2 51 FF FF
+
+. 0 805334F 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8053356 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 9A 51 FF FF
+
+. 0 8053367 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 805336E 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 82 51 FF FF
+
+. 0 805337F 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8053386 13
+. 83 EC 0C 68 D9 7B 05 08 E8 7D 50 FF FF
+
+rsqrtss_2 ... ok
+. 0 8053393 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8053455 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A25 5
+. E8 33 DA FF FF
+
+. 0 805345D 24
+. 55 89 E5 57 56 53 83 EC 7C 83 EC 08 6A 01 68 C0 92 05 08 E8 7B 4F FF FF
+
+. 0 8053475 7
+. 83 C4 10 85 C0 75 22
+
+. 0 805347C 29
+. 9B DD B5 78 FF FF FF 0F AE F8 DD A5 78 FF FF FF 83 EC 0C 68 17 7C 05 08 E8 77 4F FF FF
+
+sfence_1 ... ok
+. 0 8053499 5
+. 83 C4 10 EB 10
+
+. 0 80534AE 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A2A 5
+. E8 87 DA FF FF
+
+. 0 80534B6 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80534D4 2
+. F3 AB
+
+. 0 80534D6 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053508 2
+. F3 AB
+
+. 0 805350A 47
+. B8 A4 70 45 41 89 45 C8 B8 B8 1E 63 42 89 45 CC B8 0A D7 2C 42 89 45 D0 B8 CD 4C AF 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 B7 4E FF FF
+
+. 0 8053539 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 8053544 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F C6 C8 E4 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 A4 70 45 41 50 FF 75 B8 E8 83 4F FF FF
+
+. 0 805357E 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8053585 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 BC E8 6B 4F FF FF
+
+. 0 8053596 7
+. 83 C4 10 85 C0 74 45
+
+. 0 805359D 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 C0 E8 53 4F FF FF
+
+. 0 80535AE 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 80535B5 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 3B 4F FF FF
+
+. 0 80535C6 7
+. 83 C4 10 85 C0 74 15
+
+. 0 80535CD 13
+. 83 EC 0C 68 3D 7C 05 08 E8 36 4E FF FF
+
+shufps_1 ... ok
+. 0 80535DA 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 805369C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A2F 5
+. E8 70 DC FF FF
+
+. 0 80536A4 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80536C2 2
+. F3 AB
+
+. 0 80536C4 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80536F6 2
+. F3 AB
+
+. 0 80536F8 47
+. B8 A4 70 45 41 89 45 C8 B8 B8 1E 63 42 89 45 CC B8 0A D7 2C 42 89 45 D0 B8 CD 4C AF 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 C9 4C FF FF
+
+. 0 8053727 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 8053732 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F C6 4D D8 B1 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 B8 1E 63 42 50 FF 75 B8 E8 9C 4D FF FF
+
+. 0 8053765 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 805376C 17
+. 83 EC 08 B8 A4 70 45 41 50 FF 75 BC E8 84 4D FF FF
+
+. 0 805377D 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8053784 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C0 E8 6C 4D FF FF
+
+. 0 8053795 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 805379C 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 C4 E8 54 4D FF FF
+
+. 0 80537AD 7
+. 83 C4 10 85 C0 74 15
+
+. 0 80537B4 13
+. 83 EC 0C 68 78 7C 05 08 E8 4F 4C FF FF
+
+shufps_2 ... ok
+. 0 80537C1 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8053883 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A34 5
+. E8 52 DE FF FF
+
+. 0 805388B 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80538A9 2
+. F3 AB
+
+. 0 80538AB 52
+. B8 00 00 80 41 89 45 D8 B8 00 00 C8 41 89 45 DC B8 00 00 10 42 89 45 E0 B8 00 00 44 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80538DD 2
+. F3 AB
+
+. 0 80538DF 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 E2 4A FF FF
+
+. 0 805390E 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 8053919 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 51 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 80 40 50 FF 75 B8 E8 AF 4B FF FF
+
+. 0 8053952 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8053959 17
+. 83 EC 08 B8 00 00 A0 40 50 FF 75 BC E8 97 4B FF FF
+
+. 0 805396A 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8053971 17
+. 83 EC 08 B8 00 00 C0 40 50 FF 75 C0 E8 7F 4B FF FF
+
+. 0 8053982 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8053989 17
+. 83 EC 08 B8 00 00 E0 40 50 FF 75 C4 E8 67 4B FF FF
+
+. 0 805399A 7
+. 83 C4 10 85 C0 74 15
+
+. 0 80539A1 13
+. 83 EC 0C 68 B3 7C 05 08 E8 62 4A FF FF
+
+sqrtps_1 ... ok
+. 0 80539AE 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8053A70 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A39 5
+. E8 3A E0 FF FF
+
+. 0 8053A78 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053A96 2
+. F3 AB
+
+. 0 8053A98 52
+. B8 00 00 80 41 89 45 D8 B8 00 00 C8 41 89 45 DC B8 00 00 10 42 89 45 E0 B8 00 00 44 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053ACA 2
+. F3 AB
+
+. 0 8053ACC 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 F5 48 FF FF
+
+. 0 8053AFB 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 8053B06 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 51 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 80 40 50 FF 75 B8 E8 C9 49 FF FF
+
+. 0 8053B38 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8053B3F 17
+. 83 EC 08 B8 00 00 A0 40 50 FF 75 BC E8 B1 49 FF FF
+
+. 0 8053B50 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8053B57 17
+. 83 EC 08 B8 00 00 C0 40 50 FF 75 C0 E8 99 49 FF FF
+
+. 0 8053B68 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8053B6F 17
+. 83 EC 08 B8 00 00 E0 40 50 FF 75 C4 E8 81 49 FF FF
+
+. 0 8053B80 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8053B87 13
+. 83 EC 0C 68 EE 7C 05 08 E8 7C 48 FF FF
+
+sqrtps_2 ... ok
+. 0 8053B94 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8053C56 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A3E 5
+. E8 1B E2 FF FF
+
+. 0 8053C5E 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053C7C 2
+. F3 AB
+
+. 0 8053C7E 52
+. B8 00 00 80 41 89 45 D8 B8 9A 99 B1 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 D7 A3 F8 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053CB0 2
+. F3 AB
+
+. 0 8053CB2 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 0F 47 FF FF
+
+. 0 8053CE1 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 8053CEC 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 51 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 80 40 50 FF 75 B8 E8 DB 47 FF FF
+
+. 0 8053D26 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8053D2D 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 C3 47 FF FF
+
+. 0 8053D3E 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8053D45 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 AB 47 FF FF
+
+. 0 8053D56 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8053D5D 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 93 47 FF FF
+
+. 0 8053D6E 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8053D75 13
+. 83 EC 0C 68 29 7D 05 08 E8 8E 46 FF FF
+
+sqrtss_1 ... ok
+. 0 8053D82 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8053E44 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A43 5
+. E8 04 E4 FF FF
+
+. 0 8053E4C 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053E6A 2
+. F3 AB
+
+. 0 8053E6C 52
+. B8 00 00 80 41 89 45 D8 B8 9A 99 B1 40 89 45 DC B8 B8 1E D5 40 89 45 E0 B8 D7 A3 F8 40 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8053E9E 2
+. F3 AB
+
+. 0 8053EA0 47
+. B8 7B 14 8E 3F 89 45 C8 B8 7B 14 0E 40 89 45 CC B8 B8 1E 55 40 89 45 D0 B8 7B 14 8E 40 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 21 45 FF FF
+
+. 0 8053ECF 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 8053EDA 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 51 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 00 00 80 40 50 FF 75 B8 E8 F4 45 FF FF
+
+. 0 8053F0D 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8053F14 17
+. 83 EC 08 B8 7B 14 0E 40 50 FF 75 BC E8 DC 45 FF FF
+
+. 0 8053F25 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8053F2C 17
+. 83 EC 08 B8 B8 1E 55 40 50 FF 75 C0 E8 C4 45 FF FF
+
+. 0 8053F3D 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8053F44 17
+. 83 EC 08 B8 7B 14 8E 40 50 FF 75 C4 E8 AC 45 FF FF
+
+. 0 8053F55 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8053F5C 13
+. 83 EC 0C 68 64 7D 05 08 E8 A7 44 FF FF
+
+sqrtss_2 ... ok
+. 0 8053F69 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 805402B 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A48 5
+. E8 E6 E5 FF FF
+
+. 0 8054033 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054051 2
+. F3 AB
+
+. 0 8054053 52
+. B8 A4 70 45 41 89 45 D8 B8 7B 14 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054085 2
+. F3 AB
+
+. 0 8054087 47
+. B8 00 00 30 42 89 45 C8 B8 00 00 04 42 89 45 CC B8 00 00 B0 41 89 45 D0 B8 00 00 30 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 3A 43 FF FF
+
+. 0 80540B6 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 80540C1 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 5C C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 AE 47 FD 41 50 FF 75 B8 E8 07 44 FF FF
+
+. 0 80540FA 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8054101 17
+. 83 EC 08 B8 F6 28 BE C1 50 FF 75 BC E8 EF 43 FF FF
+
+. 0 8054112 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8054119 17
+. 83 EC 08 B8 14 AE A9 C1 50 FF 75 C0 E8 D7 43 FF FF
+
+. 0 805412A 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8054131 17
+. 83 EC 08 B8 CD 4C 99 C2 50 FF 75 C4 E8 BF 43 FF FF
+
+. 0 8054142 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8054149 13
+. 83 EC 0C 68 9F 7D 05 08 E8 BA 42 FF FF
+
+subps_1 ... ok
+. 0 8054156 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8054218 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A4D 5
+. E8 CE E7 FF FF
+
+. 0 8054220 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 805423E 2
+. F3 AB
+
+. 0 8054240 52
+. B8 A4 70 45 41 89 45 D8 B8 7B 14 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054272 2
+. F3 AB
+
+. 0 8054274 47
+. B8 00 00 30 42 89 45 C8 B8 00 00 04 42 89 45 CC B8 00 00 B0 41 89 45 D0 B8 00 00 30 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 4D 41 FF FF
+
+. 0 80542A3 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 80542AE 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 5C 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 AE 47 FD 41 50 FF 75 B8 E8 21 42 FF FF
+
+. 0 80542E0 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80542E7 17
+. 83 EC 08 B8 F6 28 BE C1 50 FF 75 BC E8 09 42 FF FF
+
+. 0 80542F8 7
+. 83 C4 10 85 C0 74 45
+
+. 0 80542FF 17
+. 83 EC 08 B8 14 AE A9 C1 50 FF 75 C0 E8 F1 41 FF FF
+
+. 0 8054310 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8054317 17
+. 83 EC 08 B8 CD 4C 99 C2 50 FF 75 C4 E8 D9 41 FF FF
+
+. 0 8054328 7
+. 83 C4 10 85 C0 74 15
+
+. 0 805432F 13
+. 83 EC 0C 68 D7 7D 05 08 E8 D4 40 FF FF
+
+subps_2 ... ok
+. 0 805433C 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 80543FE 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A52 5
+. E8 AF E9 FF FF
+
+. 0 8054406 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054424 2
+. F3 AB
+
+. 0 8054426 52
+. B8 A4 70 45 41 89 45 D8 B8 7B 14 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054458 2
+. F3 AB
+
+. 0 805445A 47
+. B8 00 00 30 42 89 45 C8 B8 00 00 04 42 89 45 CC B8 00 00 B0 41 89 45 D0 B8 00 00 30 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 67 3F FF FF
+
+. 0 8054489 11
+. 83 C4 10 85 C0 0F 85 48 01 00 00
+
+. 0 8054494 58
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 F3 0F 5C C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 AE 47 FD 41 50 FF 75 B8 E8 33 40 FF FF
+
+. 0 80544CE 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80544D5 17
+. 83 EC 08 B8 00 00 04 42 50 FF 75 BC E8 1B 40 FF FF
+
+. 0 80544E6 7
+. 83 C4 10 85 C0 74 45
+
+. 0 80544ED 17
+. 83 EC 08 B8 00 00 B0 41 50 FF 75 C0 E8 03 40 FF FF
+
+. 0 80544FE 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8054505 17
+. 83 EC 08 B8 00 00 30 41 50 FF 75 C4 E8 EB 3F FF FF
+
+. 0 8054516 7
+. 83 C4 10 85 C0 74 15
+
+. 0 805451D 13
+. 83 EC 0C 68 0F 7E 05 08 E8 E6 3E FF FF
+
+subss_1 ... ok
+. 0 805452A 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 80545EC 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A57 5
+. E8 98 EB FF FF
+
+. 0 80545F4 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054612 2
+. F3 AB
+
+. 0 8054614 52
+. B8 A4 70 45 41 89 45 D8 B8 7B 14 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054646 2
+. F3 AB
+
+. 0 8054648 47
+. B8 00 00 30 42 89 45 C8 B8 00 00 04 42 89 45 CC B8 00 00 B0 41 89 45 D0 B8 00 00 30 41 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 79 3D FF FF
+
+. 0 8054677 11
+. 83 C4 10 85 C0 0F 85 41 01 00 00
+
+. 0 8054682 51
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 F3 0F 5C 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 AE 47 FD 41 50 FF 75 B8 E8 4C 3E FF FF
+
+. 0 80546B5 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80546BC 17
+. 83 EC 08 B8 00 00 04 42 50 FF 75 BC E8 34 3E FF FF
+
+. 0 80546CD 7
+. 83 C4 10 85 C0 74 45
+
+. 0 80546D4 17
+. 83 EC 08 B8 00 00 B0 41 50 FF 75 C0 E8 1C 3E FF FF
+
+. 0 80546E5 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 80546EC 17
+. 83 EC 08 B8 00 00 30 41 50 FF 75 C4 E8 04 3E FF FF
+
+. 0 80546FD 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8054704 13
+. 83 EC 0C 68 47 7E 05 08 E8 FF 3C FF FF
+
+subss_2 ... ok
+. 0 8054711 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 80547D3 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A5C 5
+. E8 7A ED FF FF
+
+. 0 80547DB 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80547F9 2
+. F3 AB
+
+. 0 80547FB 28
+. B8 5B 91 6A 43 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054815 2
+. F3 AB
+
+. 0 8054817 23
+. B8 62 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 C2 3B FF FF
+
+. 0 805482E 11
+. 83 C4 10 85 C0 0F 85 81 00 00 00
+
+. 0 8054839 64
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D5 08 00 00 9D 0F 2E C8 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 85 C0 75 12
+
+. 0 8054879 13
+. 83 EC 0C 68 7F 7E 05 08 E8 8A 3B FF FF
+
+ucomiss_1 ... ok
+. 0 8054886 5
+. 83 C4 10 EB 3F
+
+. 0 80548CA 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A61 5
+. E8 6C EE FF FF
+
+. 0 80548D2 47
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80548FF 2
+. F3 AB
+
+. 0 8054901 23
+. B8 62 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 D8 3A FF FF
+
+. 0 8054918 7
+. 83 C4 10 85 C0 75 7A
+
+. 0 805491F 57
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D5 08 00 00 9D 0F 2E 45 E4 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 85 C0 75 12
+
+. 0 8054958 13
+. 83 EC 0C 68 BD 7E 05 08 E8 AB 3A FF FF
+
+ucomiss_2 ... ok
+. 0 8054965 5
+. 83 C4 10 EB 3F
+
+. 0 80549A9 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A66 5
+. E8 46 EF FF FF
+
+. 0 80549B1 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80549CF 2
+. F3 AB
+
+. 0 80549D1 28
+. B8 5B 91 6A 43 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80549EB 2
+. F3 AB
+
+. 0 80549ED 23
+. B8 55 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 EC 39 FF FF
+
+. 0 8054A04 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8054A0F 65
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D4 08 00 00 9D 0F 2E C8 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 01 75 12
+
+. 0 8054A50 13
+. 83 EC 0C 68 FB 7E 05 08 E8 B3 39 FF FF
+
+ucomiss_3 ... ok
+. 0 8054A5D 5
+. 83 C4 10 EB 3F
+
+. 0 8054AA1 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A6B 5
+. E8 39 F0 FF FF
+
+. 0 8054AA9 47
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054AD6 2
+. F3 AB
+
+. 0 8054AD8 23
+. B8 55 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 01 39 FF FF
+
+. 0 8054AEF 7
+. 83 C4 10 85 C0 75 7B
+
+. 0 8054AF6 58
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 9C 81 24 24 2A F7 FF FF 81 0C 24 D4 08 00 00 9D 0F 2E 45 E4 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 01 75 12
+
+. 0 8054B30 13
+. 83 EC 0C 68 39 7F 05 08 E8 D3 38 FF FF
+
+ucomiss_4 ... ok
+. 0 8054B3D 5
+. 83 C4 10 EB 3F
+
+. 0 8054B81 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A70 5
+. E8 14 F1 FF FF
+
+. 0 8054B89 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054BA7 2
+. F3 AB
+
+. 0 8054BA9 28
+. B8 5B 91 6A 43 89 45 D8 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054BC3 2
+. F3 AB
+
+. 0 8054BC5 23
+. B8 5B 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 14 38 FF FF
+
+. 0 8054BDC 11
+. 83 C4 10 85 C0 0F 85 82 00 00 00
+
+. 0 8054BE7 65
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 9C 81 24 24 2A F7 FF FF 81 0C 24 95 08 00 00 9D 0F 2E C8 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 40 75 12
+
+. 0 8054C28 13
+. 83 EC 0C 68 77 7F 05 08 E8 DB 37 FF FF
+
+ucomiss_5 ... ok
+. 0 8054C35 5
+. 83 C4 10 EB 3F
+
+. 0 8054C79 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A75 5
+. E8 07 F2 FF FF
+
+. 0 8054C81 47
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 C7 45 E4 00 00 00 00 B8 5B 91 6A 43 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054CAE 2
+. F3 AB
+
+. 0 8054CB0 23
+. B8 5B 91 6A 43 89 45 C8 83 EC 08 6A 01 68 C0 92 05 08 E8 29 37 FF FF
+
+. 0 8054CC7 7
+. 83 C4 10 85 C0 75 7B
+
+. 0 8054CCE 58
+. 9B DD B5 48 FF FF FF 0F 12 45 C8 0F 16 45 D0 9C 81 24 24 2A F7 FF FF 81 0C 24 95 08 00 00 9D 0F 2E 45 E4 9C 8F 45 C4 DD A5 48 FF FF FF 8B 45 C4 25 D5 08 00 00 83 F8 40 75 12
+
+. 0 8054D08 13
+. 83 EC 0C 68 B5 7F 05 08 E8 FB 36 FF FF
+
+ucomiss_6 ... ok
+. 0 8054D15 5
+. 83 C4 10 EB 3F
+
+. 0 8054D59 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A7A 5
+. E8 E2 F2 FF FF
+
+. 0 8054D61 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054D7F 2
+. F3 AB
+
+. 0 8054D81 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054DB3 2
+. F3 AB
+
+. 0 8054DB5 47
+. B8 1F 85 33 41 89 45 C8 B8 8F C2 05 42 89 45 CC B8 D7 A3 5E 42 89 45 D0 B8 8F C2 9B 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 0C 36 FF FF
+
+. 0 8054DE4 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 8054DEF 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 15 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 D7 A3 5E 42 50 FF 75 B8 E8 D9 36 FF FF
+
+. 0 8054E28 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8054E2F 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 BC E8 C1 36 FF FF
+
+. 0 8054E40 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8054E47 17
+. 83 EC 08 B8 8F C2 9B 42 50 FF 75 C0 E8 A9 36 FF FF
+
+. 0 8054E58 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8054E5F 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 91 36 FF FF
+
+. 0 8054E70 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8054E77 13
+. 83 EC 0C 68 F3 7F 05 08 E8 8C 35 FF FF
+
+unpckhps_1 ... ok
+. 0 8054E84 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8054F46 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A7F 5
+. E8 CA F4 FF FF
+
+. 0 8054F4E 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054F6C 2
+. F3 AB
+
+. 0 8054F6E 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8054FA0 2
+. F3 AB
+
+. 0 8054FA2 47
+. B8 1F 85 33 41 89 45 C8 B8 8F C2 05 42 89 45 CC B8 D7 A3 5E 42 89 45 D0 B8 8F C2 9B 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 1F 34 FF FF
+
+. 0 8054FD1 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 8054FDC 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 15 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 D7 A3 5E 42 50 FF 75 B8 E8 F3 34 FF FF
+
+. 0 805500E 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8055015 17
+. 83 EC 08 B8 0A D7 2C 42 50 FF 75 BC E8 DB 34 FF FF
+
+. 0 8055026 7
+. 83 C4 10 85 C0 74 45
+
+. 0 805502D 17
+. 83 EC 08 B8 8F C2 9B 42 50 FF 75 C0 E8 C3 34 FF FF
+
+. 0 805503E 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8055045 17
+. 83 EC 08 B8 CD 4C AF 42 50 FF 75 C4 E8 AB 34 FF FF
+
+. 0 8055056 7
+. 83 C4 10 85 C0 74 15
+
+. 0 805505D 13
+. 83 EC 0C 68 34 80 05 08 E8 A6 33 FF FF
+
+unpckhps_2 ... ok
+. 0 805506A 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 805512C 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A84 5
+. E8 AB F6 FF FF
+
+. 0 8055134 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8055152 2
+. F3 AB
+
+. 0 8055154 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8055186 2
+. F3 AB
+
+. 0 8055188 47
+. B8 1F 85 33 41 89 45 C8 B8 8F C2 05 42 89 45 CC B8 D7 A3 5E 42 89 45 D0 B8 8F C2 9B 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 39 32 FF FF
+
+. 0 80551B7 11
+. 83 C4 10 85 C0 0F 85 47 01 00 00
+
+. 0 80551C2 57
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 14 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 1F 85 33 41 50 FF 75 B8 E8 06 33 FF FF
+
+. 0 80551FB 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 8055202 17
+. 83 EC 08 B8 A4 70 45 41 50 FF 75 BC E8 EE 32 FF FF
+
+. 0 8055213 7
+. 83 C4 10 85 C0 74 45
+
+. 0 805521A 17
+. 83 EC 08 B8 8F C2 05 42 50 FF 75 C0 E8 D6 32 FF FF
+
+. 0 805522B 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8055232 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 C4 E8 BE 32 FF FF
+
+. 0 8055243 7
+. 83 C4 10 85 C0 74 15
+
+. 0 805524A 13
+. 83 EC 0C 68 75 80 05 08 E8 B9 31 FF FF
+
+unpcklps_1 ... ok
+. 0 8055257 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 8055319 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A89 5
+. E8 93 F8 FF FF
+
+. 0 8055321 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 805533F 2
+. F3 AB
+
+. 0 8055341 52
+. B8 A4 70 45 41 89 45 D8 B8 B8 1E 63 42 89 45 DC B8 0A D7 2C 42 89 45 E0 B8 CD 4C AF 42 89 45 E4 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8055373 2
+. F3 AB
+
+. 0 8055375 47
+. B8 1F 85 33 41 89 45 C8 B8 8F C2 05 42 89 45 CC B8 D7 A3 5E 42 89 45 D0 B8 8F C2 9B 42 89 45 D4 83 EC 08 6A 01 68 C0 92 05 08 E8 4C 30 FF FF
+
+. 0 80553A4 11
+. 83 C4 10 85 C0 0F 85 40 01 00 00
+
+. 0 80553AF 50
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 14 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 83 EC 08 B8 1F 85 33 41 50 FF 75 B8 E8 20 31 FF FF
+
+. 0 80553E1 7
+. 83 C4 10 85 C0 74 5D
+
+. 0 80553E8 17
+. 83 EC 08 B8 A4 70 45 41 50 FF 75 BC E8 08 31 FF FF
+
+. 0 80553F9 7
+. 83 C4 10 85 C0 74 45
+
+. 0 8055400 17
+. 83 EC 08 B8 8F C2 05 42 50 FF 75 C0 E8 F0 30 FF FF
+
+. 0 8055411 7
+. 83 C4 10 85 C0 74 2D
+
+. 0 8055418 17
+. 83 EC 08 B8 B8 1E 63 42 50 FF 75 C4 E8 D8 30 FF FF
+
+. 0 8055429 7
+. 83 C4 10 85 C0 74 15
+
+. 0 8055430 13
+. 83 EC 0C 68 B6 80 05 08 E8 D3 2F FF FF
+
+unpcklps_2 ... ok
+. 0 805543D 8
+. 83 C4 10 E9 BA 00 00 00
+
+. 0 80554FF 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A8E 5
+. E8 74 FA FF FF
+
+. 0 8055507 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8055525 2
+. F3 AB
+
+. 0 8055527 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 8055555 2
+. F3 AB
+
+. 0 8055557 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 6E 2E FF FF
+
+. 0 8055582 11
+. 83 C4 10 85 C0 0F 85 BA 00 00 00
+
+. 0 805558D 63
+. 9B DD B5 48 FF FF FF 0F 12 45 D8 0F 16 45 E0 0F 12 4D C8 0F 16 4D D0 0F 57 C8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 56 30 9A FC 8B 55 B8 81 F2 CF A9 03 65 09 D0 85 C0 75 29
+
+. 0 80555CC 23
+. 8B 45 C4 35 56 30 9A FC 8B 55 C0 81 F2 CF A9 03 65 09 D0 85 C0 75 12
+
+. 0 80555E3 13
+. 83 EC 0C 68 F7 80 05 08 E8 20 2E FF FF
+
+xorps_1 ... ok
+. 0 80555F0 5
+. 83 C4 10 EB 62
+
+. 0 8055657 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A93 5
+. E8 C7 FB FF FF
+
+. 0 805565F 32
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8D 7D D8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 805567D 2
+. F3 AB
+
+. 0 805567F 48
+. C7 45 D8 20 64 A8 EC C7 45 DC 31 75 B9 FD C7 45 E0 EF CD AB 89 C7 45 E4 67 45 23 01 8D 7D C8 FC BA 00 00 00 00 B8 04 00 00 00 89 C1 89 D0 F3 AB
+
+. 0 80556AD 2
+. F3 AB
+
+. 0 80556AF 43
+. C7 45 C8 EF CD AB 89 C7 45 CC 67 45 23 01 C7 45 D0 20 64 A8 EC C7 45 D4 31 75 B9 FD 83 EC 08 6A 01 68 C0 92 05 08 E8 16 2D FF FF
+
+. 0 80556DA 11
+. 83 C4 10 85 C0 0F 85 B3 00 00 00
+
+. 0 80556E5 56
+. 9B DD B5 48 FF FF FF 0F 12 4D C8 0F 16 4D D0 0F 57 4D D8 0F 13 4D B8 0F 17 4D C0 DD A5 48 FF FF FF 8B 45 BC 35 56 30 9A FC 8B 55 B8 81 F2 CF A9 03 65 09 D0 85 C0 75 29
+
+. 0 805571D 23
+. 8B 45 C4 35 56 30 9A FC 8B 55 C0 81 F2 CF A9 03 65 09 D0 85 C0 75 12
+
+. 0 8055734 13
+. 83 EC 0C 68 2F 81 05 08 E8 CF 2C FF FF
+
+xorps_2 ... ok
+. 0 8055741 5
+. 83 C4 10 EB 62
+
+. 0 80557A8 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 8055A98 10
+. 83 EC 0C 6A 00 E8 7E 29 FF FF
+
+. 0 8048420 6
+. FF 25 80 92 05 08
+
+. 0 8048426 10
+. 68 30 00 00 00 E9 80 FF FF FF
+
+. 0 3A9EBC50 14
+. 55 89 E5 57 56 53 83 EC 0C E8 33 AB FE FF
+
+. 0 3A9EBC5E 16
+. 81 C3 B2 73 0E 00 8B 93 BC 9E FF FF 85 D2 74 6F
+
+. 0 3A9EBC6E 9
+. 89 F6 8B 42 04 85 C0 74 46
+
+. 0 3A9EBC77 30
+. 89 F6 8D BC 27 00 00 00 00 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21
+
+. 0 3A9EBC95 11
+. 8B 84 83 88 D7 FE FF 01 D8 FF E0
+
+. 0 3A9EBCA0 16
+. 8B 45 08 89 44 24 04 8B 41 08 89 04 24 FF 51 04
+
+. 0 8055AB0 14
+. 55 89 E5 83 EC 18 89 5D F4 E8 BA 00 00 00
+
+. 0 8055ABE 36
+. 81 C3 9E 37 00 00 89 7D FC 8D 83 0C FF FF FF 8D BB 0C FF FF FF 89 75 F8 29 F8 C1 F8 02 85 C0 8D 70 FF 75 12
+
+. 0 8055AE2 5
+. E8 BD 00 00 00
+
+. 0 8055BA4 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 AF 36 00 00 50 E8 C6 28 FF FF
+
+. 0 8048480 14
+. 55 89 E5 50 50 80 3D A0 92 05 08 00 75 2E
+
+. 0 804848E 11
+. A1 70 91 05 08 8B 10 85 D2 74 1C
+
+. 0 80484B5 9
+. C6 05 A0 92 05 08 01 C9 C3
+
+. 0 8055BBA 4
+. 59 5B C9 C3
+
+. 0 8055AE7 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3A9EBCB0 13
+. 8B 93 BC 9E FF FF 8B 42 04 85 C0 75 C3
+
+. 0 3A9EBC80 21
+. 48 89 42 04 C1 E0 04 8D 04 10 8D 48 08 8B 40 08 83 F8 04 77 21
+
+. 0 3A970800 14
+. 55 89 E5 57 56 53 83 EC 4C E8 1D 51 00 00
+
+. 0 3A97080E 24
+. 81 C3 5E 7E 00 00 8D 83 AC F9 FF FF 89 45 CC 89 04 24 FF 93 F8 FB FF FF
+
+. 0 3A9658E0 11
+. 55 89 E5 8B 45 08 FF 40 04 5D C3
+
+. 0 3A970826 44
+. C7 45 F0 00 00 00 00 8B 93 98 F9 FF FF 8B BB 94 F9 FF FF 8D 04 95 0F 00 00 00 83 E0 F0 29 C4 8D 4C 24 0C 85 FF 89 F8 89 4D EC 74 20
+
+. 0 3A970852 26
+. FF 87 70 01 00 00 8B 75 F0 8B 55 EC 89 3C B2 8B 7F 0C 46 89 75 F0 85 FF 75 E6
+
+. 0 3A97086C 17
+. 8B 93 98 F9 FF FF 8B 78 0C 85 FF 0F 84 11 01 00 00
+
+. 0 3A97087D 15
+. C7 45 E8 01 00 00 00 8B 4D EC 39 79 04 74 12
+
+. 0 3A97089E 15
+. 8B 4D E8 41 39 D1 89 4D BC 0F 83 D6 00 00 00
+
+. 0 3A9708AD 22
+. 8B 45 E8 8B 75 EC C1 E0 02 89 45 C0 01 C6 F7 5D C0 89 75 C4 EB 0D
+
+. 0 3A9708D0 25
+. 8B 55 EC 8B 45 BC 8B 04 82 89 45 D0 8B 90 D4 01 00 00 89 45 E4 85 D2 74 77
+
+. 0 3A9708E9 6
+. 8B 02 85 C0 74 71
+
+. 0 3A9708EF 28
+. 8B 75 EC 8B 4D E8 8D 0C 8E 89 4D C8 8B 75 BC 8B 4D C0 8D 34 B1 89 75 B8 39 F8 74 12
+
+. 0 3A97090B 14
+. 90 8D 74 26 00 83 C2 04 8B 02 85 C0 74 47
+
+. 0 3A970919 4
+. 39 F8 75 F3
+
+. 0 3A970910 9
+. 83 C2 04 8B 02 85 C0 74 47
+
+. 0 3A970960 17
+. 8B 55 D0 8B 8A E0 01 00 00 85 C9 0F 85 06 01 00 00
+
+. 0 3A970971 18
+. FF 45 BC 8B 93 98 F9 FF FF 39 55 BC 0F 82 4D FF FF FF
+
+. 0 3A970983 11
+. 8B 7F 0C 85 FF 0F 85 EF FE FF FF
+
+. 0 3A97088C 18
+. 8D 74 26 00 FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2
+
+. 0 3A970890 14
+. FF 45 E8 8B 45 EC 8B 75 E8 39 3C B0 75 F2
+
+. 0 3A97098E 12
+. C7 45 F0 00 00 00 00 39 55 F0 72 2C
+
+. 0 3A9709C6 21
+. 8B 55 F0 8B 4D EC 8B 3C 91 0F B6 97 74 01 00 00 F6 C2 08 74 48
+
+. 0 3A9709DB 2
+. EB C2
+
+. 0 3A97099F 20
+. 88 D0 24 F7 88 87 74 01 00 00 8B 47 04 0F B6 08 84 C9 75 2A
+
+. 0 3A9709B3 5
+. F6 C2 03 75 25
+
+. 0 3A9709B8 14
+. FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 73 71
+
+. 0 3A9709DD 10
+. 8B 97 80 00 00 00 85 D2 75 07
+
+. 0 3A9709E7 7
+. 8B 77 4C 85 F6 74 CA
+
+. 0 3A9709EE 13
+. F6 83 14 FC FF FF 02 0F 85 E1 00 00 00
+
+. 0 3A9709FB 4
+. 85 D2 74 1D
+
+. 0 3A970A1C 7
+. 8B 47 4C 85 C0 75 49
+
+. 0 3A970A6C 9
+. 8B 40 04 8B 17 01 D0 FF D0
+
+. 0 3A97CCD4 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 03 11 00 00 50 E8 06 FD FF FF
+
+. 0 3A97C9F0 26
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 E7 13 00 00 51 80 BB 30 00 00 00 00 75 34
+
+. 0 3A97CA0A 10
+. 8B 93 24 00 00 00 85 D2 75 2F
+
+. 0 3A97CA43 15
+. 83 EC 0C 8B 83 10 FF FF FF 50 E8 32 FF FF FF
+
+. 0 3A97C984 6
+. FF A3 18 00 00 00
+
+. 0 3A97C98A 10
+. 68 18 00 00 00 E9 B0 FF FF FF
+
+. 0 3A97C944 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A9EBEA0 14
+. 55 89 E5 57 56 53 83 EC 0C E8 E3 A8 FE FF
+
+. 0 3A9EBEAE 16
+. 81 C3 62 71 0E 00 8B BB BC 9E FF FF 85 FF 74 68
+
+. 0 3A9EBEBE 22
+. 89 F6 8B 47 04 C1 E0 04 8D 74 38 F8 8D 47 08 39 C6 89 45 F0 73 12
+
+. 0 3A9EBED4 2
+. EB 4A
+
+. 0 3A9EBF20 6
+. 8B 3F 85 FF 75 9A
+
+. 0 3A9EBF26 7
+. 8B 45 08 85 C0 74 0B
+
+. 0 3A9EBF2D 11
+. 8B 45 08 89 04 24 E8 98 B9 09 00
+
+. 0 3AA878D0 17
+. 55 89 E5 57 56 53 83 EC 10 8B 7D 08 E8 B0 EE F4 FF
+
+. 0 3AA878E1 20
+. 81 C3 2F B7 04 00 8B 93 3C 2B 00 00 85 D2 0F 85 DD 00 00 00
+
+. 0 3AA878F5 22
+. 8B 8B 8C 9B FF FF 8D 83 88 9B FF FF 89 45 F0 39 C1 8B 71 04 75 0F
+
+. 0 3AA8790B 2
+. EB 33
+
+. 0 3AA87940 22
+. 8B 8B 94 9B FF FF 8D 83 90 9B FF FF 89 45 EC 39 C1 8B 71 04 75 0C
+
+. 0 3AA87956 2
+. EB 28
+
+. 0 3AA87980 22
+. 8B 8B 9C 9B FF FF 8D 83 98 9B FF FF 89 45 E8 39 C1 8B 71 04 75 0C
+
+. 0 3AA87996 2
+. EB 28
+
+. 0 3AA879C0 10
+. 8B 93 44 2B 00 00 85 D2 75 18
+
+. 0 3AA879CA 8
+. 83 C4 10 5B 5E 5F 5D C3
+
+. 0 3A9EBF38 8
+. 83 C4 0C 5B 5E 5F 5D C3
+
+. 0 3A97CA52 5
+. 83 C4 10 EB BD
+
+. 0 3A97CA14 12
+. 8B 83 14 FF FF FF 8B 10 85 D2 74 17
+
+. 0 3A97CA37 12
+. C6 83 30 00 00 00 01 8B 5D FC C9 C3
+
+. 0 3A97CCEA 4
+. 59 5B C9 C3
+
+. 0 3A970A75 2
+. EB AC
+
+. 0 3A970A23 20
+. FF 8F 70 01 00 00 FF 45 F0 8B 55 F0 3B 93 98 F9 FF FF 72 8F
+
+. 0 3A9BA054 22
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 E7 57 00 00 50 E8 36 75 FE FF
+
+. 0 3A9A15A0 26
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 9B E2 01 00 51 80 BB 5C 00 00 00 00 75 34
+
+. 0 3A9A15BA 10
+. 8B 93 44 00 00 00 85 D2 75 2F
+
+. 0 3A9A15F3 15
+. 83 EC 0C 8B 83 DC FE FF FF 50 E8 22 FF FF FF
+
+. 0 3A9A1524 6
+. FF A3 28 00 00 00
+
+. 0 3A9A152A 10
+. 68 38 00 00 00 E9 70 FF FF FF
+
+. 0 3A9A14A4 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 0 3A9A1602 5
+. 83 C4 10 EB BD
+
+. 0 3A9A15C4 12
+. 8B 83 E0 FE FF FF 8B 10 85 D2 74 17
+
+. 0 3A9A15E7 12
+. C6 83 5C 00 00 00 01 8B 5D FC C9 C3
+
+. 0 3A9BA06A 4
+. 59 5B C9 C3
+
+. 0 3A9709FF 29
+. 8B 52 04 8B 37 8B 87 88 00 00 00 01 F2 89 55 D4 8B 70 04 C1 EE 02 89 F0 4E 85 C0 75 3C
+
+. 0 3A970A58 6
+. 8B 45 D4 FF 14 B0
+
+. 0 3A9D6760 10
+. 55 89 E5 56 53 E8 27 00 00 00
+
+. 0 3A9D676A 18
+. 81 C3 A6 C8 0F 00 8D B3 A0 FF FF FF 8B 06 85 C0 75 04
+
+. 0 3A9D677C 4
+. 5B 5E 5D C3
+
+. 0 3A970A5E 7
+. 89 F0 4E 85 C0 75 F3
+
+. 0 3A970A65 7
+. 8B 47 4C 85 C0 74 B7
+
+. 0 3A970A37 12
+. 8B 4D CC 89 0C 24 FF 93 FC FB FF FF
+
+. 0 3A9658F0 11
+. 55 89 E5 8B 45 08 FF 48 04 5D C3
+
+. 0 3A970A43 13
+. 80 BB 14 FC FF FF 00 0F 88 B5 00 00 00
+
+. 0 3A970A50 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 0 3A9EBCBD 14
+. 89 D0 8B 12 85 D2 89 93 BC 9E FF FF 74 12
+
+. 0 3A9EBCDD 16
+. 8D B3 F4 FF FF FF 8D BB F8 FF FF FF 39 FE 72 0B
+
+. 0 3A9EBCF8 2
+. FF 16
+
+. 0 3AA20870 14
+. 55 89 E5 57 56 53 83 EC 0C E8 13 5F FB FF
+
+. 0 3AA2087E 18
+. 81 C3 92 27 0B 00 C7 04 24 00 00 00 00 E8 D0 FD FF FF
+
+. 0 3AA20660 16
+. 55 31 C0 89 E5 57 56 53 83 EC 24 E8 21 61 FB FF
+
+. 0 3AA20670 34
+. 81 C3 A0 29 0B 00 83 BB B8 01 00 00 00 C7 45 E0 00 00 00 00 0F 95 C0 85 C0 89 45 DC 0F 85 7B 01 00 00
+
+. 0 3AA20692 7
+. 8B 75 08 85 F6 74 0E
+
+. 0 3AA206A7 16
+. 8B B3 70 95 FF FF 8B BB B0 09 00 00 85 F6 75 19
+
+. 0 3AA206D0 13
+. 89 B3 B4 09 00 00 8B 4D 08 85 C9 74 11
+
+. 0 3AA206EE 11
+. 8B 56 5C 85 D2 0F 8E C7 00 00 00
+
+. 0 3AA207C0 12
+. 8B 46 10 39 46 14 0F 86 2D FF FF FF
+
+. 0 3AA206F9 8
+. 0F B6 4E 46 84 C9 75 2F
+
+. 0 3AA20701 4
+. 85 D2 7E 2B
+
+. 0 3AA20730 7
+. 8B 45 08 85 C0 74 11
+
+. 0 3AA20748 22
+. 31 C0 89 83 B4 09 00 00 8B 83 B0 09 00 00 39 C7 0F 85 62 FF FF FF
+
+. 0 3AA2075E 11
+. 8B 76 34 85 F6 0F 85 67 FF FF FF
+
+. 0 3AA20769 14
+. 8D B4 26 00 00 00 00 8B 45 08 85 C0 74 0E
+
+. 0 3AA20785 11
+. 8B 7D DC 85 FF 0F 85 9D 00 00 00
+
+. 0 3AA20790 11
+. 8B 45 E0 83 C4 24 5B 5E 5F 5D C3
+
+. 0 3AA20890 12
+. 8B B3 70 95 FF FF 89 C7 85 F6 74 32
+
+. 0 3AA2089C 10
+. 8D 74 26 00 8B 06 A8 02 75 1A
+
+. 0 3AA208C0 14
+. C7 46 5C FF FF FF FF 8B 76 34 85 F6 75 D2
+
+. 0 3AA208A0 6
+. 8B 06 A8 02 75 1A
+
+. 0 3AA208A6 10
+. 25 08 10 00 00 83 F8 08 74 10
+
+. 0 3AA208B0 7
+. 8B 46 5C 85 C0 75 21
+
+. 0 3AA208D8 29
+. 0F BE 46 46 31 C9 31 D2 8B 84 30 94 00 00 00 89 4C 24 08 89 54 24 04 89 34 24 FF 50 2C
+
+. 0 3AA1EFD0 19
+. 55 89 E5 56 53 83 EC 0C 8B 45 10 8B 75 08 E8 AE 77 FB FF
+
+. 0 3AA1EFE3 25
+. 81 C3 2D 40 0B 00 89 44 24 08 8B 45 0C 89 34 24 89 44 24 04 E8 74 1D 00 00
+
+. 0 3AA20D70 30
+. 55 89 E5 83 EC 1C 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 7D 0C 0F BE 46 46 E8 03 5A FB FF
+
+. 0 3AA20D8E 19
+. 81 C3 82 22 0B 00 8B 84 30 94 00 00 00 89 34 24 FF 50 30
+
+. 0 3AA1EAC0 26
+. 55 89 E5 83 EC 1C 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 56 14 E8 B7 7C FB FF
+
+. 0 3AA1EADA 15
+. 81 C3 36 45 0B 00 8B 46 10 31 FF 39 C2 76 47
+
+. 0 3AA1EB30 10
+. 8B 46 04 89 C2 2B 56 08 75 1D
+
+. 0 3AA1EB3A 29
+. C7 46 4C FF FF FF FF C7 46 50 FF FF FF FF 89 F8 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA20DA1 5
+. 31 D2 40 74 5E
+
+. 0 3AA20DA6 19
+. 8B 55 10 85 FF 0F 94 C0 85 D2 0F 94 C2 09 D0 A8 01 74 5A
+
+. 0 3AA20DB9 31
+. 83 0E 02 8D 46 48 31 FF 89 44 24 08 8D 46 47 89 7C 24 0C 89 44 24 04 89 34 24 E8 C8 FD FF FF
+
+. 0 3AA20BC1 7
+. 8B 16 F6 C2 01 74 38
+
+. 0 3AA20C00 27
+. 8B 46 20 89 0C 24 29 C8 05 FF 0F 00 00 25 00 F0 FF FF 89 44 24 04 E8 75 70 05 00
+
+. 0 3AA77C90 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80
+
+. 0 3AA77CA1 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 0 3AA77CAA 1
+. C3
+
+. 0 3AA20C1B 9
+. 90 8D 74 26 00 8B 16 EB A4
+
+. 0 3AA20BF0 15
+. 83 CA 01 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 0 3AA20DD8 59
+. C7 46 18 00 00 00 00 89 F2 C7 46 14 00 00 00 00 C7 46 10 00 00 00 00 C7 46 08 00 00 00 00 C7 46 04 00 00 00 00 C7 46 0C 00 00 00 00 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 0 3AA1EFFC 6
+. 31 D2 85 C0 74 17
+
+. 0 3AA1F002 32
+. 8B 46 1C 89 F2 89 46 18 89 46 14 89 46 10 89 46 0C 89 46 04 89 46 08 83 C4 0C 89 D0 5B 5E 5D C3
+
+. 0 3AA208F5 2
+. EB C9
+
+. 0 3AA208CE 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 0 3A9EBCFA 7
+. 83 C6 04 39 FE 72 F7
+
+. 0 3A9EBD01 2
+. EB EA
+
+. 0 3A9EBCED 11
+. 8B 45 08 89 04 24 E8 94 D8 05 00
+
+. 0 3AA4958C 11
+. 8B 5C 24 04 B8 FC 00 00 00 CD 80
+
diff --git a/VEX/orig_x86/manyfp.orig b/VEX/orig_x86/manyfp.orig
new file mode 100644
index 0000000..207a4cb
--- /dev/null
+++ b/VEX/orig_x86/manyfp.orig
@@ -0,0 +1,9635 @@
+==13306== valgrind-1.0.4, a memory error detector for x86 GNU/Linux.
+==13306== Copyright (C) 2000-2002, and GNU GPL'd, by Julian Seward.
+==13306== Startup, with flags:
+==13306==    --suppressions=/home/sewardj/trunk/hacked104/Inst/lib/valgrind/default.supp
+==13306==    -v
+==13306== Estimated CPU clock rate is 1000 MHz
+==13306== 
+
+. 0 4006C479 1
+. C3
+
+. 1 400471EA 5
+. E8 C9 55 07 00
+
+. 2 400BC7B8 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 DA BF 01 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0E
+
+. 3 400BC7E8 4
+. 5B 5E C9 C3
+
+. 4 400471EF 2
+. C9 C3
+
+. 5 4000A911 2
+. EB 97
+
+. 6 4000A8AA 7
+. 8B 56 7C 85 D2 74 DF
+
+. 7 4000A890 13
+. 31 D2 89 93 CC FB FF FF E9 F4 FD FF FF
+
+. 8 4000A691 8
+. 85 FF 0F 85 45 01 00 00
+
+. 9 4000A699 7
+. 31 C0 E8 D0 05 00 00
+
+. 10 4000AC70 9
+. 55 89 E5 53 E8 E3 4E 00 00
+
+. 11 4000FB5C 4
+. 8B 1C 24 C3
+
+. 12 4000AC79 19
+. 81 C3 3F 78 00 00 8B 93 48 00 00 00 8B 4A 08 85 C9 75 1B
+
+. 13 4000ACA7 5
+. 5B 89 D0 5D C3
+
+. 14 4000A6A0 15
+. C7 40 0C 01 00 00 00 89 45 E4 E8 01 06 00 00
+
+. 15 4000ACB0 5
+. 55 89 E5 5D C3
+
+. 16 4000A6AF 16
+. 8B 45 F0 8B B8 54 01 00 00 89 FA 4F 85 D2 74 44
+
+. 17 4000A6BF 24
+. 90 8B 75 F0 8B 86 D4 01 00 00 8B 34 B8 0F B6 86 74 01 00 00 A8 08 75 25
+
+. 18 4000A6D7 23
+. 88 C2 8B 46 04 80 CA 08 88 96 74 01 00 00 80 38 00 0F 84 E0 00 00 00
+
+. 19 4000A6EE 7
+. 8B 46 48 85 C0 75 2F
+
+. 20 4000A6F5 7
+. 8B 56 7C 85 D2 75 28
+
+. 21 4000A6FC 7
+. 89 FE 4F 85 F6 75 BD
+
+. 22 4000A6C0 23
+. 8B 75 F0 8B 86 D4 01 00 00 8B 34 B8 0F B6 86 74 01 00 00 A8 08 75 25
+
+. 23 4000A724 9
+. F6 83 88 FB FF FF 02 75 77
+
+. 24 4000A72D 4
+. 85 C0 75 54
+
+. 25 4000A785 29
+. 8B 0E 8B 40 04 01 C8 8B 4D EC 89 0C 24 8B 4D E8 89 4C 24 04 8B 4D 08 89 4C 24 08 FF D0
+
+. 26 42015790 16
+. 55 89 E5 83 EC 18 89 5D F4 31 D2 E8 BD FF FF FF
+
+. 27 4201575D 4
+. 8B 1C 24 C3
+
+. 28 420157A0 28
+. 81 C3 30 4B 11 00 89 75 F8 8B 83 70 01 00 00 8B 75 08 89 7D FC 8B 7D 0C 85 C0 74 0C
+
+. 29 420157BC 22
+. 8B 00 85 C0 B8 01 00 00 00 0F 44 D0 89 93 30 BD FF FF 85 D2 75 20
+
+. 30 420157D2 21
+. 8B 83 64 02 00 00 0F B7 10 8B 83 24 02 00 00 66 3B 50 60 74 0B
+
+. 31 420157F2 39
+. 89 B3 00 3F 00 00 8B 4D 10 8B 83 14 02 00 00 89 BB 04 3F 00 00 89 08 89 34 24 89 7C 24 04 89 4C 24 08 E8 77 46 0C 00
+
+. 32 420D9E90 20
+. 55 89 E5 83 EC 18 89 7D FC 8B 7D 0C 89 5D F4 E8 B9 B8 F3 FF
+
+. 33 420D9EA4 13
+. 81 C3 2C 04 05 00 89 75 F8 85 FF 74 2F
+
+. 34 420D9EB1 6
+. 8B 37 85 F6 74 29
+
+. 35 420D9EB7 16
+. 89 34 24 C7 44 24 04 2F 00 00 00 E8 3D B5 F3 FF
+
+. 36 42015404 6
+. FF A3 94 00 00 00
+
+. 37 4201540A 10
+. 68 10 01 00 00 E9 C0 FD FF FF
+
+. 38 420151D4 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 39 4000A180 16
+. 50 51 52 8B 54 24 10 8B 44 24 0C E8 80 FD FF FF
+
+. 40 40009F10 13
+. 55 89 E5 57 89 C7 56 53 E8 3F 5C 00 00
+
+. 41 40009F1D 80
+. 81 C3 9B 85 00 00 83 EC 3C 8B 48 30 8B 40 2C 8B 40 04 89 45 EC 8B 47 74 8B 70 04 01 F2 8B 42 04 C1 E8 08 89 45 E8 89 C6 8B 41 04 C1 E6 04 8B 0A 01 C6 8B 07 89 75 F0 89 45 E0 01 C8 89 F1 89 45 E4 0F B6 56 0D 80 E2 03 84 D2 0F 85 B7 00 00 00
+
+. 42 40009F6D 10
+. 8B 87 DC 00 00 00 85 C0 74 1C
+
+. 43 40009F77 28
+. 8B 40 04 8B 75 E8 0F B7 14 70 8B 87 7C 01 00 00 C1 E2 04 01 C2 8B 42 04 85 C0 75 5D
+
+. 44 40009FF0 47
+. 8B 01 8D 4D F0 01 45 EC 8B 87 B0 01 00 00 C7 44 24 0C 00 00 00 00 89 54 24 04 89 FA 89 04 24 8B 45 EC C7 44 24 08 01 00 00 00 E8 71 CD FF FF
+
+. 45 40006D90 15
+. 55 89 E5 57 89 C7 56 31 F6 53 E8 BD 8D 00 00
+
+. 46 40006D9F 25
+. 81 C3 19 B7 00 00 81 EC CC 00 00 00 89 4D A0 89 55 A4 0F B6 08 85 C9 74 55
+
+. 47 40006DB8 85
+. 89 CE 0F B6 48 01 E3 4D C1 E6 04 01 CE 0F B6 48 02 E3 42 C1 E6 04 01 CE 0F B6 48 03 E3 37 C1 E6 04 01 CE 0F B6 48 04 E3 2C C1 E6 04 83 C0 05 01 CE 0F B6 08 E3 1F C1 E6 04 BA 00 00 00 F0 40 01 CE 21 F2 81 E6 FF FF FF 0F C1 EA 18 0F B6 08 31 D6 85 C9 75 E1
+vex iropt: 4 x unrolling (22 sts -> 88 sts)
+
+. 48 40006DEE 31
+. C1 E6 04 BA 00 00 00 F0 40 01 CE 21 F2 81 E6 FF FF FF 0F C1 EA 18 0F B6 08 31 D6 85 C9 75 E1
+
+. 49 40006E0D 35
+. FF 83 F8 FB FF FF 8B 55 08 89 75 9C C7 45 B0 00 00 00 00 8B 02 C7 45 B4 00 00 00 00 89 55 80 85 C0 74 67
+
+. 50 40006E30 76
+. 8D 45 B0 89 45 88 89 3C 24 8B 45 9C 8B 4D A0 8B 75 88 8B 55 80 89 44 24 04 8B 01 89 74 24 0C 8B 4D 10 89 44 24 08 8B 02 89 4C 24 20 C7 44 24 14 00 00 00 00 C7 44 24 1C 00 00 00 00 89 44 24 10 8B 45 0C 89 44 24 18 E8 94 0D 00 00
+
+. 51 40007C10 11
+. 55 89 E5 57 56 53 E8 41 7F 00 00
+
+. 52 40007C1B 56
+. 81 C3 9D A8 00 00 83 EC 2C 8B 45 18 8B 08 8B 40 04 89 4D F0 89 45 EC 8D B4 26 00 00 00 00 8D BC 27 00 00 00 00 8B 45 F0 8B 55 1C 8B 0C 90 8B 45 24 89 4D E8 85 C0 74 0B
+
+. 53 40007C5E 9
+. F7 45 28 02 00 00 00 74 15
+
+. 54 40007C7C 13
+. F6 83 88 FB FF FF 08 0F 85 D7 01 00 00
+
+. 55 40007C89 58
+. 8B 75 E8 31 D2 8B 46 30 8B 40 04 89 45 E4 8B 46 2C 8B 40 04 89 45 E0 8B 86 8C 01 00 00 89 45 DC 8B 45 0C F7 B6 64 01 00 00 8B 86 68 01 00 00 8B 34 90 85 F6 0F 84 F1 00 00 00
+
+. 56 40007CC3 34
+. 8D B6 00 00 00 00 8D BC 27 00 00 00 00 8B 7D E4 89 F1 C1 E1 04 01 CF 8B 47 04 85 C0 0F 84 BB 00 00 00
+
+. 57 40007DA0 20
+. 8B 7D E8 8B 87 6C 01 00 00 8B 34 B0 85 F6 0F 85 1C FF FF FF
+
+. 58 40007DB4 4
+. 85 F6 75 0A
+
+. 59 40007DB8 10
+. 8B 4D 20 8B 41 0C 85 C0 75 1B
+
+. 60 40007DC2 15
+. FF 45 1C 8B 45 EC 39 45 1C 0F 82 6F FE FF FF
+
+. 61 40007C40 19
+. 8B 45 F0 8B 55 1C 8B 0C 90 8B 45 24 89 4D E8 85 C0 74 0B
+
+. 62 40007CE5 7
+. 66 83 7F 0E 00 75 0D
+
+. 63 40007CF9 16
+. 0F B6 57 0C 80 E2 0F 80 FA 02 0F 87 97 00 00 00
+
+. 64 40007D09 5
+. 3B 7D 10 74 1A
+
+. 65 40007D0E 22
+. 8B 0F 8B 45 E0 01 C8 89 04 24 8B 45 08 89 44 24 04 E8 1C 67 00 00
+
+. 66 4000E440 14
+. 8B 4C 24 04 8B 54 24 08 8A 01 3A 02 75 09
+
+. 67 4000E457 14
+. B8 01 00 00 00 B9 FF FF FF FF 0F 42 C1 C3
+
+. 68 40007D24 4
+. 85 C0 75 78
+
+. 69 40007CD0 21
+. 8B 7D E4 89 F1 C1 E1 04 01 CF 8B 47 04 85 C0 0F 84 BB 00 00 00
+
+. 70 4000E44E 6
+. 41 42 84 C0 75 F4
+
+. 71 4000E448 6
+. 8A 01 3A 02 75 09
+
+. 72 4000E454 3
+. 31 C0 C3
+
+. 73 40007D28 11
+. 8B 45 DC 85 C0 0F 84 C4 00 00 00
+
+. 74 40007D33 50
+. 8B 55 DC 0F B7 04 72 89 C1 8B 45 E8 81 E1 FF 7F 00 00 89 4D D8 8B 4D 20 8B 90 7C 01 00 00 C1 65 D8 04 8B 41 04 8B 4D D8 39 44 0A 04 0F 84 DB 00 00 00
+
+. 75 40007D65 10
+. 8B 55 20 8B 42 08 85 C0 75 31
+
+. 76 40007D6F 20
+. 8B 4D E8 8B 55 D8 8B 81 7C 01 00 00 8B 44 10 04 85 C0 75 1D
+
+. 77 40007D83 17
+. 8B 4D DC 0F B7 04 71 25 00 80 FF FF 66 85 C0 74 63
+
+. 78 40007DF7 15
+. 0F B6 57 0C C0 EA 04 0F B6 C2 83 F8 01 74 0F
+
+. 79 40007E15 18
+. 8B 75 14 BA 01 00 00 00 8B 45 E8 89 3E 89 46 04 EB AC
+
+. 80 40007DD3 10
+. 83 C4 2C 89 D0 5B 5E 5F 5D C3
+
+. 81 40006E7C 6
+. 85 C0 89 C1 7F 15
+
+. 82 40006E97 11
+. 8B 45 B0 85 C0 0F 84 92 01 00 00
+
+. 83 40006EA2 16
+. C7 45 98 00 00 00 00 8B 55 A0 8B 02 85 C0 74 16
+
+. 84 40006EB2 33
+. 0F B6 48 0D B8 01 00 00 00 80 E1 03 80 F9 03 0F 45 45 98 89 45 98 8B 45 98 85 C0 0F 85 B5 00 00 00
+
+. 85 40006ED3 27
+. 8D 55 B0 89 55 88 8B 45 88 8B 50 04 0F B6 8A 74 01 00 00 80 E1 03 80 F9 02 74 5A
+
+. 86 40006EEE 12
+. F7 83 88 FB FF FF 04 02 00 00 75 1A
+
+. 87 40006EFA 26
+. 8B 45 B0 8B 4D A0 8B 7D 88 89 01 8B 47 04 8B 00 8D 65 F4 5B 5E 5F 5D C2 10 00
+
+. 88 4000A01F 5
+. 83 EC 10 EB 9D
+
+. 89 40009FC1 11
+. 8B 55 F0 89 C1 31 C0 85 D2 74 07
+
+. 90 40009FCC 17
+. 89 C8 8B 4A 04 01 C8 8B B3 C0 FB FF FF 85 F6 75 05
+
+. 91 40009FDD 13
+. 8B 7D E4 89 07 8D 65 F4 5B 5E 5F 5D C3
+
+. 92 4000A190 8
+. 5A 59 87 04 24 C2 08 00
+
+. 93 40047D0C 35
+. 55 89 E5 83 EC 10 8A 45 0C 88 45 FF 8B 45 08 89 45 F8 C7 45 F4 00 00 00 00 8B 45 F8 8A 00 3A 45 FF 75 06
+
+. 94 40047D2F 14
+. 8B 45 F8 89 45 F4 8B 45 F8 80 38 00 75 08
+
+. 95 40047D45 7
+. 8D 45 F8 FF 00 EB D9
+
+. 96 40047D25 10
+. 8B 45 F8 8A 00 3A 45 FF 75 06
+
+. 97 40047D35 8
+. 8B 45 F8 80 38 00 75 08
+
+. 98 40047D3D 8
+. 8B 45 F4 89 45 F0 EB 07
+
+. 99 40047D4C 5
+. 8B 45 F0 C9 C3
+
+. 100 420D9EC7 4
+. 85 C0 74 22
+
+. 101 420D9ECB 34
+. 8D 48 01 8B 83 84 02 00 00 89 08 8B 0F 8B 83 44 03 00 00 89 08 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 102 42015819 5
+. E8 02 FF FF FF
+
+. 103 42015720 14
+. 55 89 E5 83 EC 08 89 1C 24 E8 2F 00 00 00
+
+. 104 4201572E 22
+. 81 C3 A2 4B 11 00 89 74 24 04 8D B3 F0 FF FF FF 8B 06 85 C0 75 0C
+
+. 105 42015750 5
+. 83 C6 04 FF D0
+
+. 106 42072110 14
+. 55 89 E5 83 EC 08 89 1C 24 E8 3F 36 FA FF
+
+. 107 4207211E 20
+. 81 C3 B2 81 0B 00 89 74 24 04 8B 83 10 03 00 00 85 C0 75 4E
+
+. 108 42072180 11
+. 8B 1C 24 8B 74 24 04 89 EC 5D C3
+
+. 109 42015755 6
+. 8B 06 85 C0 75 F5
+
+. 110 4201575B 2
+. EB E7
+
+. 111 42015744 11
+. 8B 1C 24 8B 74 24 04 89 EC 5D C3
+
+. 112 4201581E 13
+. 8B 7D FC 8B 5D F4 8B 75 F8 89 EC 5D C3
+
+. 113 4000A7A2 2
+. EB 8D
+
+. 114 4000A731 7
+. 8B 56 7C 85 D2 74 C4
+
+. 115 4046150C 11
+. 55 89 E5 83 EC 08 E8 E9 01 00 00
+
+. 116 40461700 27
+. 55 89 E5 53 50 E8 00 00 00 00 5B 81 C3 22 DA 01 00 8B 83 90 00 00 00 85 C0 74 02
+
+. 117 4046171D 5
+. 8B 5D FC C9 C3
+
+. 118 40461517 6
+. 90 E8 6F 02 00 00
+
+. 119 4046178C 27
+. 55 89 E5 53 50 E8 00 00 00 00 5B 81 C3 96 D9 01 00 8B 83 FC FF FF FF 85 C0 74 0A
+
+. 120 404617B1 5
+. 8B 5D FC C9 C3
+
+. 121 4046151D 5
+. E8 3A 7B 01 00
+
+. 122 4047905C 34
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 C6 60 00 00 8D 83 F0 FF FF FF 8D 70 FC 8B 40 FC 83 F8 FF 74 0E
+
+. 123 4047908C 4
+. 5B 5E C9 C3
+
+. 124 40461522 2
+. C9 C3
+
+. 125 4000A7CE 11
+. 80 E2 03 84 D2 0F 84 23 FF FF FF
+
+. 126 4000A703 17
+. 8B 4D E4 31 FF C7 41 0C 00 00 00 00 E8 9C 05 00 00
+
+. 127 4000A714 16
+. 89 BB 4C FB FF FF 83 C4 3C 5B 5E 5F 5D C2 04 00
+
+. 128 40000B6D 8
+. 8D 93 98 84 FF FF FF E7
+
+. 129 8048750 33
+. 31 ED 5E 89 E1 83 E4 F0 50 54 52 68 00 E8 05 08 68 F8 85 04 08 51 56 68 44 B8 04 08 E8 6F FF FF FF
+
+. 130 80486E0 6
+. FF 25 50 11 06 08
+
+. 131 80486E6 10
+. 68 60 00 00 00 E9 20 FF FF FF
+
+. 132 8048610 12
+. FF 35 18 11 06 08 FF 25 1C 11 06 08
+
+. 133 40007CEC 13
+. F7 45 28 01 00 00 00 0F 85 A7 00 00 00
+
+. 134 40007DDD 15
+. 89 04 24 8B 55 E8 89 54 24 04 E8 B4 02 00 00
+
+. 135 400080A0 11
+. 55 89 E5 57 56 53 E8 B1 7A 00 00
+
+. 136 400080AB 30
+. 81 C3 0D A4 00 00 83 EC 0C 8B 45 08 8B 75 0C 89 04 24 8B 46 04 89 44 24 04 E8 77 63 00 00
+
+. 137 400080C9 15
+. 8B 76 14 85 C0 0F 94 C2 0F B6 FA 85 FF 75 2A
+
+. 138 400080D8 4
+. 85 F6 74 26
+
+. 139 400080DC 21
+. 8D 74 26 00 8B 45 08 89 04 24 8B 06 89 44 24 04 E8 4F 63 00 00
+
+. 140 400080F1 4
+. 85 C0 75 1B
+
+. 141 40008110 5
+. 8B 76 04 EB E5
+
+. 142 400080FA 4
+. 85 FF 75 04
+
+. 143 400080FE 4
+. 85 F6 75 DE
+
+. 144 40008102 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 145 40007DEC 9
+. BA FF FF FF FF 85 C0 74 CD
+
+. 146 40007E40 20
+. 8B 04 0A 8B 4D 20 89 04 24 8B 01 89 44 24 04 E8 EC 65 00 00
+
+. 147 40007E54 8
+. 85 C0 0F 85 09 FF FF FF
+
+. 148 40007E5C 2
+. EB 99
+
+. 149 42015830 11
+. 55 89 E5 57 56 53 E8 22 FF FF FF
+
+. 150 4201583B 48
+. 81 C3 95 4A 11 00 83 EC 0C 8B 55 0C 8B 7D 10 8B 4D 1C 8B 75 18 8D 44 97 04 8B 93 14 02 00 00 85 C9 89 02 8B 83 48 03 00 00 8B 55 20 89 10 74 18
+
+. 151 4201586B 24
+. 89 0C 24 C7 44 24 04 00 00 00 00 C7 44 24 08 00 00 00 00 E8 7D 5A 01 00
+
+. 152 4202B300 14
+. 55 89 E5 83 EC 08 89 5D FC E8 4F A4 FE FF
+
+. 153 4202B30E 11
+. 81 C3 C2 EF 0F 00 E8 F7 FE FF FF
+
+. 154 4202B210 13
+. 55 89 E5 57 31 FF 56 53 E8 40 A5 FE FF
+
+. 155 4202B21D 23
+. 81 C3 B3 F0 0F 00 83 EC 0C 8B 83 88 01 00 00 85 C0 0F 85 B9 00 00 00
+
+. 156 4202B234 10
+. 8B B3 5C C2 FF FF 85 F6 74 2F
+
+. 157 4202B23E 11
+. 89 F6 8B 56 04 31 FF 39 D7 73 19
+
+. 158 4202B262 5
+. 83 FA 1F 76 6E
+
+. 159 4202B2D5 12
+. 8D 4A 01 89 D7 89 4E 04 85 F6 75 B5
+
+. 160 4202B296 24
+. 89 F9 C1 E1 04 B8 01 00 00 00 89 44 0E 08 8B 83 44 02 00 00 85 C0 75 17
+
+. 161 4202B2AE 4
+. 85 F6 74 0F
+
+. 162 4202B2B2 15
+. C1 E7 04 8D 44 37 08 83 C4 0C 5B 5E 5F 5D C3
+
+. 163 4202B319 11
+. 89 C2 B8 FF FF FF FF 85 D2 74 1A
+
+. 164 4202B324 33
+. C7 02 04 00 00 00 8B 45 08 89 42 04 8B 45 0C 89 42 08 8B 45 10 89 42 0C 31 C0 8B 5D FC 89 EC 5D C3
+
+. 165 42015883 4
+. 85 F6 74 18
+
+. 166 42015887 24
+. 89 34 24 C7 44 24 04 00 00 00 00 C7 44 24 08 00 00 00 00 E8 61 5A 01 00
+
+. 167 4202B249 17
+. 8D 46 08 8D 74 26 00 8B 08 85 C9 0F 84 89 00 00 00
+
+. 168 4202B25A 8
+. 47 83 C0 10 39 D7 72 EE
+
+. 169 4201589F 12
+. 8B B3 24 02 00 00 F6 46 20 02 75 47
+
+. 170 420158AB 7
+. 8B 45 14 85 C0 74 03
+
+. 171 420158B2 3
+. FF 55 14
+
+. 172 80485F8 11
+. 55 89 E5 83 EC 08 E8 71 01 00 00
+
+. 173 8048774 27
+. 55 89 E5 53 50 E8 00 00 00 00 5B 81 C3 96 89 01 00 8B 83 6C 00 00 00 85 C0 74 02
+
+. 174 8048791 5
+. 8B 5D FC C9 C3
+
+. 175 8048603 6
+. 90 E8 CB 01 00 00
+
+. 176 80487D4 15
+. 55 89 E5 83 EC 08 A1 10 11 06 08 85 C0 74 19
+
+. 177 80487FC 2
+. C9 C3
+
+. 178 8048609 5
+. E8 CE 61 01 00
+
+. 179 805E7DC 20
+. 55 89 E5 53 52 A1 00 11 06 08 83 F8 FF BB 00 11 06 08 74 0C
+
+. 180 805E7FC 4
+. 58 5B C9 C3
+
+. 181 804860E 2
+. C9 C3
+
+. 182 420158B5 6
+. F6 46 20 02 75 21
+
+. 183 420158BB 25
+. 89 7C 24 04 8B 45 0C 8B 8B 14 02 00 00 89 04 24 8B 01 89 44 24 08 FF 55 08
+
+. 184 804B844 26
+. 55 89 E5 57 56 53 83 EC 0C 83 E4 F0 C7 45 F0 01 00 00 00 31 DB E8 02 1C 01 00
+
+. 185 805D460 33
+. 55 89 E5 57 56 53 E8 00 00 00 00 5B 81 C3 A9 3C 00 00 83 EC 18 8D 83 F8 F8 FF FF 50 E8 DF B1 FE FF
+
+. 186 8048660 6
+. FF 25 30 11 06 08
+
+. 187 8048666 10
+. 68 20 00 00 00 E9 A0 FF FF FF
+
+. 188 4202A920 17
+. 55 89 E5 83 EC 28 89 5D F4 8B 45 08 E8 2C AE FE FF
+
+. 189 4202A931 20
+. 81 C3 9F F9 0F 00 89 75 F8 89 7D FC 89 04 24 E8 5F AD FE FF
+
+. 190 420156A4 6
+. FF A3 3C 01 00 00
+
+. 191 420156AA 10
+. 68 60 02 00 00 E9 20 FB FF FF
+
+. 192 40047DDC 24
+. 55 89 E5 83 EC 04 C7 45 FC 00 00 00 00 8B 45 FC 03 45 08 80 38 00 75 02
+
+. 193 40047DF6 7
+. 8D 45 FC FF 00 EB EC
+
+. 194 40047DE9 11
+. 8B 45 FC 03 45 08 80 38 00 75 02
+
+. 195 40047DF4 2
+. EB 07
+
+. 196 40047DFD 5
+. 8B 45 FC C9 C3
+
+. 197 4202A945 14
+. 8B B3 14 02 00 00 89 C1 8B 06 85 C0 74 0A
+
+. 198 4202A953 10
+. 8B 7D 08 0F B6 17 84 D2 75 13
+
+. 199 4202A970 9
+. 8B 7D 08 80 7F 01 00 75 47
+
+. 200 4202A9C0 27
+. 8B 55 08 83 E9 02 8B 3E 83 45 08 02 0F B7 02 89 4D F0 8B 37 89 45 EC 85 F6 74 82
+
+. 201 4202A9DB 14
+. 90 8D 74 26 00 0F B7 0E 66 39 4D EC 74 17
+
+. 202 4202A9E9 11
+. 83 C7 04 8B 07 85 C0 89 C6 75 EC
+
+. 203 4202A9E0 9
+. 0F B7 0E 66 39 4D EC 74 17
+
+. 204 4202A9F4 5
+. E9 64 FF FF FF
+
+. 205 4202A95D 15
+. 31 C0 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 206 805D481 32
+. 31 FF 83 C4 10 85 C0 89 C2 C7 45 E8 00 00 00 00 C7 45 EC 00 00 00 00 C7 45 F0 00 00 00 00 74 05
+
+. 207 805D4A6 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 208 804B85E 15
+. 83 7D 08 02 BF 63 00 00 00 0F 84 95 00 00 00
+
+. 209 804B86D 4
+. 85 DB 74 05
+
+. 210 804B876 9
+. BE 01 00 00 00 39 FE 77 3F
+
+. 211 804B87F 6
+. 90 3B 75 F0 72 33
+
+. 212 804B885 15
+. BB 01 00 00 00 83 EC 08 56 53 E8 4C D8 FF FF
+
+. 213 80490E0 37
+. 55 89 E5 57 56 53 83 EC 28 8B 55 0C 8B 45 08 89 D6 0F AF F0 89 F3 C1 E3 04 89 55 EC 89 45 F0 53 E8 8B F5 FF FF
+
+. 214 8048690 6
+. FF 25 3C 11 06 08
+
+. 215 8048696 10
+. 68 38 00 00 00 E9 70 FF FF FF
+
+. 216 400472F0 16
+. 55 89 E5 53 83 EC 44 80 3D C2 CB 32 40 00 74 1B
+
+. 217 4004731B 6
+. 83 7D 08 00 79 21
+
+. 218 40047342 9
+. 80 3D E8 92 31 40 00 74 31
+
+. 219 4004737C 9
+. 80 3D C8 82 0D 40 00 74 4F
+
+. 220 40047385 64
+. C7 45 C8 01 20 00 00 8B 45 08 89 45 CC C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 C7 45 D8 00 00 00 00 8D 5D C8 B9 00 00 00 00 89 D8 89 CA C1 C0 1D C1 C0 03 C1 C8 1B C1 C8 05 C1 C0 0D C1 C0 13
+
+. 221 400473C5 15
+. 89 D1 89 C8 89 45 F0 8B 45 F0 89 45 F4 EB 13
+
+. 222 400473E7 9
+. 80 3D C2 CB 32 40 00 74 13
+
+. 223 40047403 8
+. 8B 45 F4 8B 5D FC C9 C3
+
+. 224 8049105 11
+. 89 45 E8 89 1C 24 E8 80 F5 FF FF
+
+. 225 8049110 11
+. 89 45 E4 89 1C 24 E8 75 F5 FF FF
+
+. 226 804911B 15
+. 89 45 E0 31 FF 8D 04 36 83 C4 10 39 C7 73 17
+***** EXPENSIVE 228 1
+
+. 227 804912A 23
+. 89 C1 31 D2 52 57 DF 2C 24 8B 5D E4 DD 1C FB 47 83 C4 08 39 CF 72 EB
+***** EXPENSIVE 229 2
+vex iropt: 2 x unrolling (34 sts -> 68 sts)
+
+. 228 804912C 21
+. 31 D2 52 57 DF 2C 24 8B 5D E4 DD 1C FB 47 83 C4 08 39 CF 72 EB
+
+. 229 8049141 22
+. 8B 4D EC 0F AF 4D F0 C1 E1 04 C1 E9 02 FC 8B 7D E8 8B 75 E4 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 230 8049155 2
+. F3 A5
+
+. 231 8049157 8
+. 31 F6 83 7D EC 01 76 11
+
+. 232 8049170 7
+. 31 FF 3B 7D EC 73 5B
+
+. 233 8049177 27
+. 8B 5D F0 C1 E3 04 C7 45 DC 00 00 00 00 89 5D D8 90 31 C9 31 DB 39 F1 89 FA 73 12
+***** EXPENSIVE 235 3
+
+. 234 80491A4 46
+. 8B 45 F0 8B 55 DC 0F AF C1 8B 4D E4 C1 E0 04 DD 04 11 8B 5D E0 DD 1C 03 DD 44 11 08 DD 5C 03 08 47 8B 45 D8 01 C2 3B 7D EC 89 55 DC 72 B6
+
+. 235 80491D2 15
+. 56 FF 75 EC FF 75 F0 FF 75 E4 E8 9B F8 FF FF
+
+. 236 8048A7C 29
+. 55 89 E5 57 56 53 83 EC 10 8B 45 10 48 31 F6 39 C6 C7 45 F0 00 00 00 00 89 45 EC 73 6A
+
+. 237 8048B03 10
+. 83 C4 10 5B 5E 31 C0 5F C9 C3
+
+. 238 80491E1 34
+. 68 80 84 2E 41 6A 00 FF 75 EC FF 75 F0 FF 75 E4 68 A0 EC 05 08 FF 75 E0 68 0B E9 05 08 E8 FD F5 FF FF
+***** EXPENSIVE 240 4
+
+. 239 8048800 18
+. 55 89 E5 57 56 53 83 EC 1C 31 F6 3B 75 1C D9 EE 73 49
+***** EXPENSIVE 241 5
+
+. 240 8048812 61
+. 8B 55 18 DD 05 20 E9 05 08 31 DB C1 E2 04 8B 45 0C 8B 4D 14 DD 04 18 DD 04 19 DE E1 DD 44 18 08 DC 6C 19 08 D9 C9 D9 E1 D9 C9 D9 E1 DE C1 D8 C9 DD E2 DF E0 F6 C4 45 0F 85 01 01 00 00
+***** EXPENSIVE 242 6
+
+. 241 8048950 7
+. DD D8 E9 FA FE FF FF
+
+. 242 8048851 8
+. 46 01 D3 3B 75 1C 72 C7
+***** EXPENSIVE 244 7
+
+. 243 8048859 14
+. DD D8 DD 45 20 DD E9 DF E0 F6 C4 45 75 0C
+***** EXPENSIVE 245 8
+
+. 244 8048867 12
+. DD D8 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 245 8049203 17
+. 83 C4 2C FF 75 EC 68 E0 EC 05 08 50 E8 24 4C 01 00
+
+. 246 805DE38 30
+. 55 89 E5 56 53 8B 45 08 E8 00 00 00 00 5B 81 C3 CF 32 00 00 FF 83 08 FF FF FF 85 C0 75 66
+
+. 247 805DE56 16
+. 8B B3 14 FF FF FF FF 83 0C FF FF FF 85 F6 75 0A
+
+. 248 805DE70 15
+. 83 EC 0C 8D 83 84 FC FF FF 50 E8 71 A8 FE FF
+
+. 249 80486F0 6
+. FF 25 54 11 06 08
+
+. 250 80486F6 10
+. 68 68 00 00 00 E9 10 FF FF FF
+
+. 251 42052390 17
+. 55 89 E5 83 EC 18 89 5D FC 8D 4D 0C E8 BC 33 FC FF
+
+. 252 420523A1 33
+. 81 C3 2F 7F 0D 00 8B 83 7C 01 00 00 8B 00 89 4C 24 08 89 04 24 8B 45 08 89 44 24 04 E8 3E 5B FF FF
+
+. 253 42047F00 13
+. 55 89 E5 31 C0 57 56 53 E8 50 D8 FC FF
+
+. 254 42047F0D 31
+. 81 C3 C3 23 0E 00 81 EC 1C 07 00 00 89 85 C0 FA FF FF 31 C0 89 85 B0 FA FF FF E8 18 D7 FC FF
+
+. 255 42015644 6
+. FF A3 24 01 00 00
+
+. 256 4201564A 10
+. 68 30 02 00 00 E9 80 FB FF FF
+
+. 257 40007E06 5
+. 83 F8 02 75 A9
+
+. 258 40007E0B 10
+. 8B 83 C4 FB FF FF 85 C0 75 12
+
+. 259 40007E27 9
+. 8B 45 14 8B 08 85 C9 75 84
+
+. 260 40007E30 13
+. 89 38 8B 7D E8 89 78 04 E9 77 FF FF FF
+
+. 261 40007DD1 12
+. 31 D2 83 C4 2C 89 D0 5B 5E 5F 5D C3
+
+. 262 40006E82 8
+. 85 C0 0F 88 77 02 00 00
+
+. 263 40006E8A 13
+. 83 45 80 04 8B 45 80 8B 00 85 C0 75 9F
+
+. 264 42015B90 9
+. 55 89 E5 53 E8 C4 FB FF FF
+
+. 265 42015B99 15
+. 81 C3 37 47 11 00 8B 83 A4 01 00 00 5B 5D C3
+
+. 266 42047F2C 17
+. 8B 00 89 85 A0 FA FF FF 8B 45 08 80 78 46 00 75 36
+
+. 267 42047F3D 14
+. 8B 93 10 03 00 00 85 D2 0F 84 93 41 00 00
+
+. 268 42047F4B 7
+. 8B 40 5C 85 C0 75 0F
+
+. 269 42047F52 23
+. 8B 4D 08 B8 FF FF FF FF C7 41 5C FF FF FF FF 40 BA FF FF FF FF 74 0A
+
+. 270 42047F73 12
+. 8B 7D 08 8B 07 A9 08 00 00 00 74 12
+
+. 271 42047F91 11
+. 8B 4D 0C 85 C9 0F 84 32 41 00 00
+
+. 272 42047F9C 9
+. 8B 7D 08 80 7F 46 00 75 2E
+
+. 273 42047FA5 14
+. 8B 83 10 03 00 00 85 C0 0F 84 03 41 00 00
+
+. 274 42047FB3 7
+. 8B 47 5C 85 C0 75 0C
+
+. 275 42047FC6 8
+. 40 BA FF FF FF FF 75 9B
+
+. 276 42047FCE 14
+. 8B 75 08 8B 06 83 E0 02 0F 85 C5 40 00 00
+
+. 277 42047FDC 72
+. 8B 55 10 B8 FF FF FF FF 31 F6 89 85 BC FA FF FF 31 C9 8D BD 30 FB FF FF 89 B5 A4 FA FF FF 31 C0 89 95 A8 FA FF FF 8B 55 0C 89 BD 9C FA FF FF 89 8D 30 FB FF FF 89 85 34 FB FF FF 89 95 98 FA FF FF 0F B6 02 84 C0 74 7F
+
+. 278 42048024 4
+. 3C 25 74 7B
+
+. 279 42048028 35
+. 90 8D B4 26 00 00 00 00 8B 85 9C FA FF FF C7 00 00 00 00 00 8B 85 98 FA FF FF F6 00 80 0F 84 3D 40 00 00
+
+. 280 4204C088 11
+. FF 85 98 FA FF FF E9 FF BF FF FF
+
+. 281 42048092 13
+. 8B 95 98 FA FF FF 0F B6 02 84 C0 74 04
+
+. 282 4204809F 4
+. 3C 25 75 8D
+
+. 283 42048030 27
+. 8B 85 9C FA FF FF C7 00 00 00 00 00 8B 85 98 FA FF FF F6 00 80 0F 84 3D 40 00 00
+
+. 284 420480A3 38
+. 31 C9 8B B5 98 FA FF FF 83 BB A4 02 00 00 00 89 B5 28 FB FF FF 0F 95 C1 85 C9 89 8D 94 FA FF FF 0F 85 9B 3F 00 00
+
+. 285 420480C9 33
+. 8B 7D 08 0F BE 4F 46 8B 84 39 94 00 00 00 89 3C 24 8B 55 0C 29 D6 89 54 24 04 89 74 24 08 FF 50 1C
+
+. 286 4206D850 11
+. 55 89 E5 57 56 53 E8 02 7F FA FF
+
+. 287 4206D85B 34
+. 81 C3 75 CA 0B 00 83 EC 2C C7 45 EC 00 00 00 00 8B 45 10 8B 55 10 8B 7D 0C 89 45 F0 31 C0 85 D2 74 6F
+
+. 288 4206D87D 31
+. 8B 45 08 8B 70 18 89 C1 8B 50 14 8B 00 29 D6 25 00 0A 00 00 3D 00 0A 00 00 0F 84 FA 01 00 00
+
+. 289 4206D89C 4
+. 85 F6 74 38
+
+. 290 4206D8D8 12
+. 8B 45 F0 8B 75 EC 01 F0 85 C0 75 10
+
+. 291 4206D8F4 28
+. 8B 55 08 0F BE 4A 46 8B 84 11 94 00 00 00 C7 44 24 04 FF FF FF FF 89 14 24 FF 50 0C
+
+. 292 4206CE30 11
+. 55 89 E5 57 56 53 E8 22 89 FA FF
+
+. 293 4206CE3B 22
+. 81 C3 95 D4 0B 00 83 EC 2C 8B 75 08 8B 16 F7 C2 08 00 00 00 74 1F
+
+. 294 4206CE70 12
+. F7 C2 00 08 00 00 0F 84 34 03 00 00
+
+. 295 4206D1B0 11
+. 8B 46 10 85 C0 0F 85 DE FC FF FF
+
+. 296 4206D1BB 5
+. E9 C3 FC FF FF
+
+. 297 4206CE83 8
+. 89 34 24 E8 65 2E 00 00
+
+. 298 4206FCF0 20
+. 55 89 E5 83 EC 18 89 75 FC 8B 75 08 89 5D F8 E8 59 5A FA FF
+
+. 299 4206FD04 13
+. 81 C3 CC A5 0B 00 8B 46 1C 85 C0 75 20
+
+. 300 4206FD11 5
+. F6 06 02 74 07
+
+. 301 4206FD1D 17
+. 0F BE 56 46 8B 84 32 94 00 00 00 89 34 24 FF 50 34
+
+. 302 42062340 16
+. 55 89 E5 57 BF 00 20 00 00 56 53 E8 0D 34 FB FF
+
+. 303 42062350 22
+. 81 C3 80 7F 0C 00 81 EC 8C 00 00 00 8B 75 08 8B 46 38 85 C0 78 33
+
+. 304 42062366 24
+. 0F BE 56 46 8B 8C 32 94 00 00 00 8D 55 88 89 34 24 89 54 24 04 FF 51 48
+
+. 305 4206E3C0 35
+. 55 89 E5 83 EC 18 C7 04 24 03 00 00 00 8B 45 08 8B 40 38 89 44 24 04 8B 45 0C 89 44 24 08 E8 7D EA 05 00
+
+. 306 420CCE60 17
+. 55 89 E5 83 EC 08 89 1C 24 8B 55 0C E8 EC 88 F4 FF
+
+. 307 420CCE71 22
+. 81 C3 5F D4 05 00 89 74 24 04 8B 4D 10 87 D3 B8 C5 00 00 00 CD 80
+
+. 308 420CCE87 11
+. 87 D3 3D 00 F0 FF FF 89 C6 76 0E
+
+. 309 420CCEA0 13
+. 89 F0 8B 1C 24 8B 74 24 04 89 EC 5D C3
+
+. 310 4206E3E3 4
+. 89 EC 5D C3
+
+. 311 4206237E 4
+. 85 C0 78 17
+
+. 312 42062382 15
+. 8B 45 98 25 00 F0 00 00 3D 00 20 00 00 74 7F
+
+. 313 42062391 68
+. 8B 45 BC 85 C0 0F 4F F8 C7 04 24 00 00 00 00 8D 97 FF 0F 00 00 81 E2 00 F0 FF FF 89 54 24 04 C7 44 24 08 03 00 00 00 C7 44 24 0C 22 00 00 00 C7 44 24 10 FF FF FF FF C7 44 24 14 00 00 00 00 E8 BB 48 07 00
+
+. 314 420D6C90 41
+. 55 53 56 57 8B 5C 24 14 8B 4C 24 18 8B 54 24 1C 8B 74 24 20 8B 7C 24 24 8B 6C 24 28 F7 C5 FF 0F 00 00 B8 EA FF FF FF 75 0A
+
+. 315 420D6CB9 10
+. C1 ED 0C B8 C0 00 00 00 CD 80
+
+. 316 420D6CC3 11
+. 5F 5E 5B 5D 3D 00 F0 FF FF 77 01
+
+. 317 420D6CCE 1
+. C3
+
+. 318 420623D5 10
+. BA FF FF FF FF 83 F8 FF 74 1F
+
+. 319 420623DF 26
+. 89 44 24 04 01 F8 89 34 24 89 44 24 08 C7 44 24 0C 01 00 00 00 E8 77 D8 00 00
+
+. 320 4206FC70 20
+. 55 89 E5 83 EC 18 89 75 FC 8B 75 08 89 5D F8 E8 D9 5A FA FF
+
+. 321 4206FC84 13
+. 81 C3 4C A6 0B 00 8B 4E 1C 85 C9 74 4F
+
+. 322 4206FCE0 4
+. 8B 16 EB B7
+
+. 323 4206FC9B 19
+. 8B 45 0C 89 46 1C 8B 45 10 89 46 20 8B 45 14 85 C0 74 12
+
+. 324 4206FCAE 15
+. 83 E2 FE 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 325 420623F9 18
+. BA 01 00 00 00 81 C4 8C 00 00 00 89 D0 5B 5E 5F 5D C3
+
+. 326 4206FD2E 3
+. 40 74 0F
+
+. 327 4206FD31 10
+. 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 328 4206CE8B 28
+. 8B 16 8B 46 1C 89 46 0C 89 46 04 89 46 08 8B 4E 04 8B 7E 20 39 F9 0F 84 F9 02 00 00
+
+. 329 4206CEA7 38
+. 89 4E 14 8B 46 08 89 4E 10 89 7E 18 89 46 04 89 46 0C 89 D0 8B 56 5C 0D 00 08 00 00 89 06 85 D2 0F 8E C0 02 00 00
+
+. 330 4206D18D 11
+. A9 02 02 00 00 0F 84 35 FD FF FF
+
+. 331 4206CECD 10
+. 83 7D 0C FF 0F 84 A9 01 00 00
+
+. 332 4206D080 13
+. 8B 7E 10 8B 46 14 29 F8 89 45 F0 74 7D
+
+. 333 4206D10A 7
+. 31 C0 E9 55 FD FF FF
+
+. 334 4206CE66 8
+. 83 C4 2C 5B 5E 5F 5D C3
+
+. 335 4206D910 3
+. 40 74 D1
+
+. 336 4206D913 22
+. 8B 55 08 8B 42 1C 8B 4A 20 29 C1 8B 45 F0 83 F9 7F 89 45 E8 76 0C
+
+. 337 4206D929 23
+. 31 D2 F7 F1 8B 45 F0 29 D0 89 45 E8 8B 4D E8 85 C9 0F 84 80 00 00 00
+
+. 338 4206D9C0 11
+. 8B 45 F0 85 C0 0F 84 19 FF FF FF
+
+. 339 4206D9CB 28
+. 8B 4D E8 8B 75 08 8B 45 F0 8D 14 39 89 34 24 89 54 24 04 89 44 24 08 E8 29 19 00 00
+
+. 340 4206F310 26
+. 55 89 E5 57 56 83 EC 10 8B 45 10 8B 55 10 8B 75 0C 89 45 F4 31 C0 85 D2 74 7E
+
+. 341 4206F32A 21
+. 8D B6 00 00 00 00 8B 4D 08 8B 79 18 8B 41 14 29 C7 85 FF 7E 34
+
+. 342 4206F33F 12
+. 3B 7D F4 0F 47 7D F4 83 FF 14 7F 68
+
+. 343 4206F34B 4
+. 85 FF 7E 60
+
+. 344 4206F34F 7
+. 89 FA 4A 89 C1 78 14
+
+. 345 4206F356 20
+. 8D 76 00 8D BC 27 00 00 00 00 0F B6 06 46 88 01 41 4A 79 F6
+vex iropt: 4 x unrolling (26 sts -> 104 sts)
+
+. 346 4206F360 10
+. 0F B6 06 46 88 01 41 4A 79 F6
+
+. 347 4206F36A 16
+. 8B 45 08 89 48 14 29 7D F4 8B 45 F4 85 C0 74 26
+
+. 348 4206F3A0 15
+. 8B 45 10 8B 75 F4 29 F0 83 C4 10 5E 5F 5D C3
+
+. 349 4206D9E7 8
+. 29 45 F0 E9 F5 FE FF FF
+
+. 350 4206D8E4 16
+. 8B 45 10 8B 7D F0 29 F8 83 C4 2C 5B 5E 5F 5D C3
+
+. 351 420480EA 15
+. B9 FF FF FF FF 89 8D B8 FA FF FF 39 F0 74 47
+
+. 352 42048140 17
+. 89 85 B8 FA FF FF 8B B5 28 FB FF FF 80 3E 00 74 A8
+
+. 353 420480F9 10
+. 8B 85 B0 FA FF FF 85 C0 75 2D
+
+. 354 42048103 10
+. 8B 85 94 FA FF FF 85 C0 75 0B
+
+. 355 4204810D 11
+. 8B 95 B8 FA FF FF E9 51 FE FF FF
+
+. 356 42047F69 10
+. 8D 65 F4 89 D0 5B 5E 5F 5D C3
+
+. 357 420523C2 7
+. 8B 5D FC 89 EC 5D C3
+
+. 358 805DE7F 13
+. 8B 83 14 FF FF FF 83 C4 10 85 C0 74 DA
+
+. 359 805DE8C 21
+. 52 8D 45 10 50 FF 75 0C 8B B3 64 00 00 00 FF 36 E8 0F A8 FE FF
+
+. 360 80486B0 6
+. FF 25 44 11 06 08
+
+. 361 80486B6 10
+. 68 48 00 00 00 E9 50 FF FF FF
+
+. 362 42047F61 8
+. 40 BA FF FF FF FF 74 0A
+
+. 363 4206D8A0 16
+. 3B 75 10 0F 47 75 10 83 FE 14 0F 87 C3 01 00 00
+
+. 364 4206DA73 19
+. 89 14 24 8B 45 0C 89 74 24 08 89 44 24 04 E8 0A E4 00 00
+
+. 365 4207BE90 21
+. 8B 4C 24 0C 89 F8 8B 7C 24 04 89 F2 8B 74 24 08 FC D1 E9 73 01
+
+. 366 4207BEA6 4
+. D1 E9 73 02
+
+. 367 4207BEAA 4
+. 66 A5 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 368 4207BEAC 2
+. F3 A5
+
+. 369 4207BEAE 4
+. 97 89 D6 C3
+
+. 370 4206DA86 16
+. 8B 7D 0C 8B 4D 08 01 F7 89 41 14 E9 3A FE FF FF
+
+. 371 4206D8D0 20
+. 8B 45 10 29 F0 89 45 F0 8B 45 F0 8B 75 EC 01 F0 85 C0 75 10
+
+. 372 42048151 178
+. 8D 45 E0 8D 93 A1 1C F2 FF 89 85 20 F9 FF FF 89 95 24 F9 FF FF C6 85 4F FA FF FF 20 31 C0 31 C9 89 85 64 FA FF FF 31 D2 31 FF 89 8D 7C FA FF FF B8 FF FF FF FF 31 C9 89 95 78 FA FF FF 31 D2 89 BD 74 FA FF FF 31 FF 89 85 54 FA FF FF 8B 85 20 F9 FF FF 89 8D 70 FA FF FF 31 C9 89 95 6C FA FF FF 31 D2 89 BD 68 FA FF FF 31 FF 89 8D 60 FA FF FF 31 C9 89 95 5C FA FF FF 31 D2 89 BD 58 FA FF FF 89 F7 46 89 8D 50 FA FF FF 89 95 B0 FA FF FF 89 85 AC FA FF FF 89 B5 28 FB FF FF 0F B6 4F 01 88 8D 4E FA FF FF 80 E9 20 80 F9 5A 0F 86 8D 04 00 00
+
+. 373 42048690 26
+. 0F BE BD 4E FA FF FF 8B 84 BB B0 1D FF FF 8B 84 83 D0 D5 FF FF E9 5B FB FF FF
+
+. 374 42048205 22
+. 8B 8D 24 F9 FF FF 8D BD 28 FB FF FF 89 BD 18 F9 FF FF 01 C8 FF E0
+
+. 375 42049335 25
+. B8 0A 00 00 00 89 85 84 FA FF FF 8B 85 68 FA FF FF 85 C0 0F 84 1F 07 00 00
+
+. 376 42049A6D 47
+. 8B 45 10 83 45 10 04 8B 00 89 C1 89 C2 F7 D9 C1 EA 1F 0F 45 C1 89 95 90 FA FF FF 89 85 88 FA FF FF 8B 85 54 FA FF FF 85 C0 0F 88 9D 02 00 00
+
+. 377 42049D39 16
+. BA 01 00 00 00 89 95 54 FA FF FF E9 5A FD FF FF
+
+. 378 42049AA3 10
+. 8B BD 54 FA FF FF 85 FF 75 43
+
+. 379 42049AF0 58
+. 80 BD 4E FA FF FF 58 8D BB 90 1C FF FF 8D 93 50 1C FF FF 8B 8D 88 FA FF FF 8B B5 AC FA FF FF 0F 94 C0 A8 01 0F 45 D7 83 BD 84 FA FF FF 0A 89 95 2C FA FF FF 0F 84 E6 01 00 00
+vex iropt: 4 x unrolling (29 sts -> 116 sts)
+
+. 380 42049D10 36
+. B8 CD CC CC CC 4E F7 E1 C1 EA 03 8D 3C 92 01 FF 29 F9 85 D2 8B BD 2C FA FF FF 0F B6 04 39 89 D1 88 06 75 DC
+
+. 381 42049D34 5
+. E9 28 FE FF FF
+
+. 382 42049B61 16
+. 89 B5 80 FA FF FF 8B 85 6C FA FF FF 85 C0 74 33
+
+. 383 42049BA4 14
+. 8B B5 50 FA FF FF 85 F6 0F 84 2E F8 FF FF
+
+. 384 420493E0 22
+. 8B 85 AC FA FF FF 8B B5 80 FA FF FF 29 F0 39 85 54 FA FF FF 7F 21
+
+. 385 420493F6 10
+. 8B 85 88 FA FF FF 85 C0 74 17
+
+. 386 42049400 10
+. 8B 8D 7C FA FF FF 85 C9 74 0D
+
+. 387 42049417 52
+. 8B 95 AC FA FF FF B8 00 00 00 00 8B 8D 80 FA FF FF 8B BD 74 FA FF FF 29 CA 29 95 54 FA FF FF 0F 49 85 54 FA FF FF 85 FF 89 85 54 FA FF FF 0F 85 57 02 00 00
+
+. 388 4204944B 19
+. 8D 34 10 8B 85 88 FA FF FF 29 B5 58 FA FF FF 85 C0 74 27
+
+. 389 4204945E 10
+. 8B 95 7C FA FF FF 85 D2 74 1D
+
+. 390 42049485 10
+. 8B 85 90 FA FF FF 85 C0 75 14
+
+. 391 4204948F 10
+. 8B BD 70 FA FF FF 85 FF 75 0A
+
+. 392 42049499 10
+. 8B 8D 78 FA FF FF 85 C9 74 06
+
+. 393 420494A9 13
+. 80 BD 4F FA FF FF 20 0F 84 B2 01 00 00
+
+. 394 42049668 10
+. 8B 85 58 FA FF FF 85 C0 7E 23
+
+. 395 42049695 13
+. 31 D2 89 95 58 FA FF FF E9 14 FE FF FF
+
+. 396 420494B6 14
+. 8B 85 90 FA FF FF 85 C0 0F 84 40 01 00 00
+
+. 397 42049604 10
+. 8B 85 70 FA FF FF 85 C0 74 26
+
+. 398 42049634 14
+. 8B 85 78 FA FF FF 85 C0 0F 84 A0 FE FF FF
+
+. 399 420494E2 10
+. 8B 85 88 FA FF FF 85 C0 74 13
+
+. 400 420494EC 10
+. 8B 8D 7C FA FF FF 85 C9 74 09
+
+. 401 420494FF 22
+. 8B 85 54 FA FF FF 01 85 58 FA FF FF 8B 95 58 FA FF FF 85 D2 7E 23
+
+. 402 42049538 42
+. 8B 4D 08 0F BE 71 46 8B 84 0E 94 00 00 00 89 0C 24 8B 95 80 FA FF FF 89 54 24 04 8B BD AC FA FF FF 29 D7 89 7C 24 08 FF 50 1C
+
+. 403 4206D8B0 7
+. 89 D1 89 F2 4A 78 13
+
+. 404 4206D8B7 19
+. 89 F6 8D BC 27 00 00 00 00 0F B6 07 47 88 01 41 4A 79 F6
+
+. 405 4206D8CA 26
+. 8B 45 08 89 48 14 8B 45 10 29 F0 89 45 F0 8B 45 F0 8B 75 EC 01 F0 85 C0 75 10
+
+. 406 42049562 8
+. 39 F8 0F 85 15 F0 FF FF
+
+. 407 4204956A 11
+. 01 85 B8 FA FF FF E9 8C FC FF FF
+
+. 408 42049201 20
+. FF 85 A4 FA FF FF 8B 8D B0 FA FF FF 85 C9 0F 85 F1 00 00 00
+
+. 409 42049215 37
+. 8B 85 28 FB FF FF 31 F6 89 B5 B0 FA FF FF 8D 70 01 89 B5 28 FB FF FF 89 B5 B4 FA FF FF 0F B6 40 01 84 C0 74 5D
+
+. 410 42049297 48
+. 89 B5 28 FB FF FF 8B 45 08 0F BE 48 46 8B BC 01 94 00 00 00 89 04 24 8B 95 B4 FA FF FF 89 54 24 04 8B 85 28 FB FF FF 29 D0 89 44 24 08 FF 57 1C
+
+. 411 4206D8EC 8
+. 83 C4 2C 5B 5E 5F 5D C3
+
+. 412 420492C7 26
+. 8B B5 28 FB FF FF 89 C2 8B 85 B4 FA FF FF 89 F1 29 C1 39 CA 0F 85 9E F2 FF FF
+
+. 413 420492E1 15
+. 01 95 B8 FA FF FF 80 3E 00 0F 85 76 EE FF FF
+
+. 414 420492F0 5
+. E9 04 EE FF FF
+
+. 415 805DEA1 12
+. C7 04 24 0A 00 00 00 E8 D3 A7 FE FF
+
+. 416 8048680 6
+. FF 25 38 11 06 08
+
+. 417 8048686 10
+. 68 30 00 00 00 E9 80 FF FF FF
+
+. 418 42066030 13
+. 55 89 E5 57 31 FF 56 53 E8 20 F7 FA FF
+
+. 419 4206603D 28
+. 81 C3 93 42 0C 00 83 EC 2C 8B 93 50 D2 FF FF 0F B7 02 25 00 80 FF FF 66 85 C0 75 10
+
+. 420 42066059 24
+. 8B 83 A4 02 00 00 85 C0 B8 01 00 00 00 0F 45 F8 85 FF 0F 85 96 00 00 00
+
+. 421 42066071 13
+. 0F B7 02 25 00 80 FF FF 66 85 C0 74 76
+
+. 422 420660F4 8
+. 89 14 24 E8 08 F2 FA FF
+
+. 423 42015304 6
+. FF A3 54 00 00 00
+
+. 424 4201530A 10
+. 68 90 00 00 00 E9 C0 FE FF FF
+
+. 425 42062310 5
+. 55 89 E5 5D C3
+
+. 426 420660FC 11
+. 8B 93 50 D2 FF FF E9 77 FF FF FF
+
+. 427 4206607E 8
+. 8B 4A 14 3B 4A 18 73 5A
+
+. 428 42066086 36
+. 0F B6 75 08 89 F0 81 E6 FF 00 00 00 88 01 FF 42 14 8B 93 50 D2 FF FF 0F B7 02 25 00 80 FF FF 66 85 C0 74 26
+
+. 429 420660D0 8
+. 89 14 24 E8 DC F1 FA FF
+
+. 430 420152B4 6
+. FF A3 40 00 00 00
+
+. 431 420152BA 10
+. 68 68 00 00 00 E9 10 FF FF FF
+
+. 432 42062320 5
+. 55 89 E5 5D C3
+
+. 433 420660D8 2
+. EB D0
+
+. 434 420660AA 4
+. 85 FF 75 0A
+
+. 435 420660AE 10
+. 83 C4 2C 89 F0 5B 5E 5F 5D C3
+
+. 436 805DEAD 8
+. 58 FF 36 E8 BB A7 FE FF
+
+. 437 8048670 6
+. FF 25 34 11 06 08
+
+. 438 8048676 10
+. 68 28 00 00 00 E9 90 FF FF FF
+
+. 439 400080E0 17
+. 8B 45 08 89 04 24 8B 06 89 44 24 04 E8 4F 63 00 00
+
+. 440 42062820 11
+. 55 89 E5 57 56 53 E8 32 2F FB FF
+
+. 441 4206282B 20
+. 81 C3 A5 7A 0C 00 83 EC 3C 8B 7D 08 85 FF 0F 84 D2 00 00 00
+
+. 442 4206283F 23
+. 0F B7 17 C7 45 D4 00 00 00 00 89 D1 81 E1 00 80 FF FF 66 85 C9 75 14
+
+. 443 42062856 27
+. 8B 83 A4 02 00 00 85 C0 B8 01 00 00 00 0F 44 45 D4 89 45 D4 8B 75 D4 85 F6 75 7F
+
+. 444 42062871 13
+. 89 D1 81 E1 00 80 FF FF 66 85 C9 74 62
+
+. 445 420628E0 8
+. 89 3C 24 E8 1C 2A FB FF
+
+. 446 420628E8 2
+. EB 94
+
+. 447 4206287E 17
+. 0F BE 77 46 8B 84 3E 94 00 00 00 89 3C 24 FF 50 30
+
+. 448 4206E200 26
+. 55 89 E5 83 EC 28 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 56 14 E8 43 75 FA FF
+
+. 449 4206E21A 15
+. 81 C3 B6 C0 0B 00 8B 46 10 31 FF 39 C2 76 47
+
+. 450 4206E229 11
+. 8B 4E 5C 85 C9 0F 8E A2 00 00 00
+
+. 451 4206E2D6 18
+. 89 34 24 29 C2 89 44 24 04 89 54 24 08 E8 68 FD FF FF
+
+. 452 4206E050 17
+. 55 89 E5 83 EC 28 89 5D F4 8B 45 10 E8 FC 76 FA FF
+
+. 453 4206E061 19
+. 81 C3 6F C2 0B 00 89 75 F8 85 C0 8B 75 08 89 7D FC 74 7C
+
+. 454 4206E074 10
+. F6 46 01 10 0F 84 A2 00 00 00
+
+. 455 4206E120 14
+. 8B 4E 08 8B 56 10 39 D1 0F 84 5E FF FF FF
+
+. 456 4206E08C 31
+. 0F BE 7E 46 8B 4D 0C 8B 55 10 8B 84 37 94 00 00 00 89 4C 24 04 89 34 24 89 54 24 08 FF 50 3C
+
+. 457 4206E470 18
+. 55 89 E5 57 56 83 EC 20 8B 75 10 8B 7D 0C 85 F6 7E 30
+
+. 458 4206E482 36
+. 8D B4 26 00 00 00 00 8D BC 27 00 00 00 00 8B 4D 08 8B 41 38 89 74 24 08 89 7C 24 04 89 04 24 E8 CA F6 05 00
+
+. 459 420CDB70 20
+. 53 8B 54 24 10 8B 4C 24 0C 8B 5C 24 08 B8 04 00 00 00 CD 80
+
+. 460 420CDB84 8
+. 5B 3D 01 F0 FF FF 73 01
+
+. 461 420CDB8C 1
+. C3
+
+. 462 4206E4A6 4
+. 85 C0 78 33
+
+. 463 4206E4AA 8
+. 29 C6 01 C7 85 F6 7F DE
+
+. 464 4206E4B2 16
+. 29 75 10 8B 45 08 8B 78 50 8B 70 4C 85 FF 78 11
+
+. 465 4206E4D3 10
+. 8B 45 10 83 C4 20 5E 5F 5D C3
+
+. 466 4206E0AB 11
+. 89 C7 0F B7 46 44 66 85 C0 74 04
+
+. 467 4206E0BA 25
+. 8B 4E 5C 8B 46 1C 85 C9 89 46 0C 89 46 04 89 46 08 89 46 14 89 46 10 7E 21
+
+. 468 4206E0F4 8
+. F7 06 02 02 00 00 75 DA
+
+. 469 4206E0FC 2
+. EB D5
+
+. 470 4206E0D3 16
+. 8B 46 20 89 46 18 3B 7D 10 B8 FF FF FF FF 74 0D
+
+. 471 4206E0F0 4
+. 31 C0 EB EF
+
+. 472 4206E0E3 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 473 4206E2E8 5
+. E9 68 FF FF FF
+
+. 474 4206E255 4
+. 85 C0 74 17
+
+. 475 4206E270 10
+. 8B 46 04 89 C2 2B 56 08 75 17
+
+. 476 4206E27A 5
+. 83 FF FF 74 0E
+
+. 477 4206E27F 18
+. C7 46 4C FF FF FF FF C7 46 50 FF FF FF FF 89 F8 EB CD
+
+. 478 4206E25E 13
+. 8B 5D F4 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 479 4206288F 22
+. 85 C0 0F B7 07 0F 94 C2 25 00 80 FF FF 0F B6 F2 4E 66 85 C0 74 2B
+
+. 480 420628D0 8
+. 89 3C 24 E8 DC 29 FB FF
+
+. 481 420628D8 2
+. EB CB
+
+. 482 420628A5 7
+. 8B 45 D4 85 C0 75 0A
+
+. 483 420628AC 10
+. 89 F0 83 C4 3C 5B 5E 5F 5D C3
+
+. 484 805DEB5 5
+. 83 C4 10 EB AC
+
+. 485 805DE66 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 486 8049214 9
+. 58 FF 75 E0 E8 F3 F4 FF FF
+
+. 487 8048710 6
+. FF 25 5C 11 06 08
+
+. 488 8048716 10
+. 68 78 00 00 00 E9 F0 FE FF FF
+
+. 489 40047670 16
+. 55 89 E5 53 83 EC 34 80 3D C2 CB 32 40 00 74 1B
+
+. 490 4004769B 6
+. 83 7D 08 00 75 02
+
+. 491 400476A3 9
+. 80 3D C8 82 0D 40 00 74 49
+
+. 492 400476AC 64
+. C7 45 C8 04 20 00 00 8B 45 08 89 45 CC C7 45 D0 00 00 00 00 C7 45 D4 00 00 00 00 C7 45 D8 00 00 00 00 8D 5D C8 B9 00 00 00 00 89 D8 89 CA C1 C0 1D C1 C0 03 C1 C8 1B C1 C8 05 C1 C0 0D C1 C0 13
+
+. 493 400476EC 9
+. 89 D1 89 C8 89 45 F4 EB 10
+
+. 494 40047705 5
+. 8B 5D FC C9 C3
+
+. 495 804921D 9
+. 58 FF 75 E4 E8 EA F4 FF FF
+
+. 496 8049226 21
+. 8B 55 E8 89 55 08 83 C4 10 8D 65 F4 5B 5E 5F C9 E9 D5 F4 FF FF
+
+. 497 804B894 9
+. 58 5A 56 53 E8 9F D9 FF FF
+
+. 498 804923C 34
+. 55 89 E5 57 56 53 83 EC 28 8B 45 08 8B 7D 0C 89 C6 0F AF F7 89 F3 C1 E3 04 89 45 F0 53 E8 32 F4 FF FF
+
+. 499 804925E 11
+. 89 45 EC 89 1C 24 E8 27 F4 FF FF
+
+. 500 8049269 11
+. 89 45 E8 89 1C 24 E8 1C F4 FF FF
+
+. 501 8049274 11
+. 89 45 E4 89 1C 24 E8 11 F4 FF FF
+
+. 502 804927F 15
+. 89 45 E0 31 DB 8D 04 36 83 C4 10 39 C3 73 55
+***** EXPENSIVE 504 9
+
+. 503 804928E 79
+. DD 05 28 E9 05 08 DD 05 30 E9 05 08 DD 05 38 E9 05 08 89 C1 89 F6 31 D2 52 53 DF 2C 24 D9 C0 8B 75 EC D8 C4 D9 C9 8B 45 E8 DD 14 DE D9 C9 DD 1C D8 D9 C0 D8 C3 D9 C9 8B 75 E4 D8 C2 D9 C9 8B 45 E0 DD 1C DE DD 1C D8 43 83 C4 08 39 CB 72 C7
+***** EXPENSIVE 505 10
+vex iropt: not unrolling (63 sts)
+
+. 504 80492A4 57
+. 31 D2 52 53 DF 2C 24 D9 C0 8B 75 EC D8 C4 D9 C9 8B 45 E8 DD 14 DE D9 C9 DD 1C D8 D9 C0 D8 C3 D9 C9 8B 75 E4 D8 C2 D9 C9 8B 45 E0 DD 1C DE DD 1C D8 43 83 C4 08 39 CB 72 C7
+***** EXPENSIVE 506 11
+
+. 505 80492DD 16
+. DD D8 DD D8 DD D8 83 EC 0C 6A 00 E8 33 4A 01 00
+
+. 506 805DD20 34
+. 55 89 E5 53 E8 00 00 00 00 5B 81 C3 EB 33 00 00 8B 93 5C 00 00 00 8B 4D 08 8B 02 89 0A 8B 1C 24 C9 C3
+
+. 507 80492ED 17
+. FF 75 E4 FF 75 EC FF 75 F0 57 31 DB E8 A6 2B 00 00
+
+. 508 804BEA4 37
+. 55 89 E5 57 56 53 83 EC 1C 8B 45 08 89 45 F0 8B 4D F0 8B 45 0C 89 45 EC 85 C9 8B 45 14 8B 75 10 89 45 E8 74 4E
+
+. 509 804BEC9 7
+. 31 FF 3B 7D F0 73 26
+
+. 510 804BED0 17
+. 8B 45 EC C1 E0 04 31 DB 89 45 E4 90 E8 6B FA FF FF
+***** EXPENSIVE 512 12
+
+. 511 804B94C 52
+. A1 0C 10 06 08 69 C0 6D 4E C6 41 05 39 30 00 00 25 FF FF FF 7F 55 A3 0C 10 06 08 31 D2 89 E5 A1 0C 10 06 08 52 50 DF 2C 24 DC 0D D8 06 06 08 83 C4 08 C9 C3
+***** EXPENSIVE 513 13
+
+. 512 804BEE1 8
+. DD 1C 1E E8 63 FA FF FF
+***** EXPENSIVE 514 14
+
+. 513 804BEE9 13
+. 47 DD 5C 1E 08 03 5D E4 3B 7D F0 72 E6
+
+. 514 804BEF6 33
+. 8B 45 E8 89 45 14 8B 45 F0 89 45 10 8B 45 EC 89 45 0C 89 75 08 8D 65 F4 5B 5E 5F C9 E9 8D 06 00 00
+
+. 515 804C5A4 38
+. 55 89 E5 53 83 EC 10 6A FF FF 75 14 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 59 4B 01 00 FF 75 08 E8 AE 00 00 00
+***** EXPENSIVE 517 15
+
+. 516 804C678 68
+. 55 89 E5 57 56 53 83 EC 5C 31 D2 52 8B 45 10 DB 45 18 50 E8 00 00 00 00 5B 81 C3 84 4A 01 00 D8 C0 C7 45 E4 00 00 00 00 8B 45 10 DF 2C 24 D9 C9 DC 8B 3C F6 FF FF 83 C4 08 39 45 E4 DE F1 0F 83 E8 00 00 00
+***** EXPENSIVE 518 16
+
+. 517 804C6BC 38
+. C7 45 C4 00 00 00 00 90 31 FF DD 83 44 F6 FF FF 3B 7D 10 DD 55 D0 DD 5D D8 C7 45 E0 00 00 00 00 0F 83 8B 00 00 00
+***** EXPENSIVE 519 17
+
+. 518 804C6E2 33
+. 31 F6 31 D2 52 8B 45 E0 50 DF 2C 24 D8 C9 83 EC 08 DD 14 24 D9 C9 DD 5D 98 DD 5D A8 E8 1D BF FF FF
+
+. 519 8048620 6
+. FF 25 20 11 06 08
+
+. 520 8048626 10
+. 68 00 00 00 00 E9 E0 FF FF FF
+***** EXPENSIVE 522 18
+
+. 521 404658E0 15
+. DD 44 24 04 D9 FF DF E0 A9 00 04 00 00 75 01
+
+. 522 404658EF 1
+. C3
+***** EXPENSIVE 524 19
+
+. 523 804C703 14
+. DD 45 A8 DD 1C 24 DD 5D C8 E8 1F C0 FF FF
+
+. 524 8048730 6
+. FF 25 64 11 06 08
+
+. 525 8048736 10
+. 68 88 00 00 00 E9 D0 FE FF FF
+***** EXPENSIVE 527 20
+
+. 526 40466A60 15
+. DD 44 24 04 D9 FE DF E0 A9 00 04 00 00 75 01
+
+. 527 40466A6F 1
+. C3
+***** EXPENSIVE 529 21
+
+. 528 804C711 92
+. 8B 45 08 DD 04 30 DD 44 30 08 8B 45 E0 03 45 E4 31 D2 DD 45 C8 D9 C3 D9 C9 F7 75 10 D8 CB D9 C9 D8 CA D9 CB DE CC D9 C9 DC 4D C8 D9 C9 8B 45 0C C1 E0 04 DE E2 DE C2 47 DC 45 D8 D9 C9 DC 45 D0 D9 C9 83 C4 10 01 C6 3B 7D 10 DD 5D D8 DD 5D D0 89 55 E0 DD 45 98 0F 82 77 FF FF FF
+***** EXPENSIVE 530 22
+
+. 529 804C76D 43
+. 8B 45 C4 DD 45 D8 8B 55 14 DD 1C 02 DD 45 D0 DD 5C 02 08 8B 45 0C C1 E0 04 01 45 C4 FF 45 E4 8B 45 10 39 45 E4 0F 82 2C FF FF FF
+***** EXPENSIVE 531 23
+
+. 530 804C798 12
+. DD D8 8D 65 F4 5B 5E 31 C0 5F C9 C3
+
+. 531 804C5CA 5
+. 8B 5D FC C9 C3
+
+. 532 80492FE 7
+. 83 C4 20 39 FB 73 3D
+
+. 533 8049305 61
+. 8B 75 F0 C1 E6 04 31 C9 89 75 DC 8B 75 EC 8B 04 0E 8B 54 0E 04 8B 75 E8 89 04 0E 89 54 0E 04 8B 75 EC 8B 44 0E 08 8B 54 0E 0C 43 8B 75 E8 89 44 0E 08 89 54 0E 0C 03 4D DC 39 FB 72 CE
+
+. 534 8049342 15
+. 56 57 FF 75 F0 FF 75 EC 31 DB E8 07 6A 00 00
+
+. 535 804FD58 33
+. 55 89 E5 53 50 6A FF FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 AA 13 01 00 FF 75 08 E8 AF 00 00 00
+
+. 536 804FE28 36
+. 55 89 E5 57 56 53 83 EC 4C E8 00 00 00 00 5B 81 C3 DE 12 01 00 31 C0 83 7D 10 01 8B 75 08 0F 84 C5 01 00 00
+
+. 537 8050011 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 538 804FD79 5
+. 8B 5D FC C9 C3
+
+. 539 8049351 7
+. 83 C4 10 39 FB 73 3E
+
+. 540 8049358 62
+. 8B 45 F0 C1 E0 04 31 C9 89 45 DC 90 8B 75 EC 8B 04 0E 8B 54 0E 04 8B 75 E0 89 04 0E 89 54 0E 04 8B 75 EC 8B 44 0E 08 8B 54 0E 0C 43 8B 75 E0 89 44 0E 08 89 54 0E 0C 03 4D DC 39 FB 72 CE
+
+. 541 8049396 32
+. 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 EC 68 AB E8 05 08 FF 75 E4 68 B8 E8 05 08 E8 4A F4 FF FF
+
+. 542 80493B6 18
+. 83 C4 20 FF 75 F0 57 68 20 ED 05 08 50 E8 70 4A 01 00
+vex iropt: 4 x unrolling (26 sts -> 104 sts)
+
+. 543 4206D8C0 10
+. 0F B6 07 47 88 01 41 4A 79 F6
+
+. 544 4204923A 4
+. 3C 25 74 59
+
+. 545 4204923E 21
+. 8B 85 9C FA FF FF C7 00 00 00 00 00 F6 06 80 0F 84 A2 00 00 00
+
+. 546 420492F5 3
+. 46 EB 94
+
+. 547 4204928C 7
+. 0F B6 06 84 C0 74 04
+
+. 548 42049293 4
+. 3C 25 75 A7
+
+. 549 42048166 157
+. C6 85 4F FA FF FF 20 31 C0 31 C9 89 85 64 FA FF FF 31 D2 31 FF 89 8D 7C FA FF FF B8 FF FF FF FF 31 C9 89 95 78 FA FF FF 31 D2 89 BD 74 FA FF FF 31 FF 89 85 54 FA FF FF 8B 85 20 F9 FF FF 89 8D 70 FA FF FF 31 C9 89 95 6C FA FF FF 31 D2 89 BD 68 FA FF FF 31 FF 89 8D 60 FA FF FF 31 C9 89 95 5C FA FF FF 31 D2 89 BD 58 FA FF FF 89 F7 46 89 8D 50 FA FF FF 89 95 B0 FA FF FF 89 85 AC FA FF FF 89 B5 28 FB FF FF 0F B6 4F 01 88 8D 4E FA FF FF 80 E9 20 80 F9 5A 0F 86 8D 04 00 00
+
+. 550 80493C8 13
+. 83 C4 10 83 7D F0 01 0F 87 84 02 00 00
+
+. 551 80493D5 13
+. 53 57 FF 75 F0 FF 75 EC E8 C6 69 00 00
+
+. 552 804FDA8 38
+. 55 89 E5 57 56 53 83 EC 0C 6A 01 FF 75 10 FF 75 0C 8B 75 08 E8 00 00 00 00 5B 81 C3 53 13 01 00 56 E8 5A 00 00 00
+
+. 553 804FDCE 9
+. 83 C4 10 85 C0 89 C7 75 41
+***** EXPENSIVE 555 24
+
+. 554 804FDD7 28
+. 31 D2 52 8B 45 10 50 DF 2C 24 DD 83 34 F6 FF FF 83 C4 08 31 D2 3B 7D 10 DE F1 73 2F
+***** EXPENSIVE 556 25
+
+. 555 804FDF3 35
+. 8B 4D 0C 31 C0 C1 E1 04 90 DD 04 06 D8 C9 DD 1C 06 DD 44 06 08 D8 C9 42 DD 5C 06 08 01 C8 3B 55 10 72 E6
+***** EXPENSIVE 557 26
+
+. 556 804FE16 12
+. DD D8 8D 65 F4 5B 5E 89 F8 5F C9 C3
+
+. 557 80493E2 32
+. 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 EC 68 BC E8 05 08 FF 75 E8 68 C8 E8 05 08 E8 FE F3 FF FF
+
+. 558 8049402 18
+. 83 C4 30 FF 75 F0 57 68 80 ED 05 08 50 E8 24 4A 01 00
+
+. 559 8049414 13
+. 83 C4 10 83 7D F0 01 0F 87 13 02 00 00
+
+. 560 8049421 15
+. 51 57 FF 75 F0 FF 75 E0 31 DB E8 50 69 00 00
+
+. 561 804FD80 33
+. 55 89 E5 53 50 6A 01 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 82 13 01 00 FF 75 08 E8 87 00 00 00
+
+. 562 804FDA1 5
+. 8B 5D FC C9 C3
+
+. 563 8049430 7
+. 83 C4 10 39 FB 73 33
+***** EXPENSIVE 565 27
+
+. 564 8049437 49
+. 31 D2 52 57 8B 55 F0 DF 2C 24 31 C0 83 C4 08 C1 E2 04 8D 76 00 8B 4D E8 DD 04 01 D8 C9 DD 1C 01 DD 44 01 08 D8 C9 43 DD 5C 01 08 01 D0 39 FB 72 E4
+***** EXPENSIVE 566 28
+
+. 565 8049468 34
+. DD D8 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 E0 68 CD E8 05 08 FF 75 E8 68 C8 E8 05 08 E8 76 F3 FF FF
+
+. 566 804948A 18
+. 83 C4 20 FF 75 F0 57 68 E0 ED 05 08 50 E8 9C 49 01 00
+
+. 567 4207BEA5 5
+. A4 D1 E9 73 02
+
+. 568 804949C 13
+. 83 C4 10 83 7D F0 01 0F 87 63 01 00 00
+
+. 569 80494A9 31
+. 83 EC 0C FF 75 E4 FF 75 EC 6A 00 6A 00 68 00 00 F0 3F 6A 00 FF 75 F0 57 6A 01 E8 B8 24 00 00
+
+. 570 804B980 23
+. 55 89 E5 57 56 53 83 EC 3C 8B 7D 0C 85 FF 8B 4D 24 0F 84 F5 00 00 00
+
+. 571 804B997 6
+. 31 DB 39 FB 73 31
+
+. 572 804B99D 49
+. 8B 55 10 31 C0 C1 E2 04 8D 76 00 43 C7 04 01 00 00 00 00 C7 44 01 04 00 00 00 00 C7 44 01 08 00 00 00 00 C7 44 01 0C 00 00 00 00 01 D0 39 FB 72 DA
+***** EXPENSIVE 574 29
+
+. 573 804B9CE 37
+. 31 D2 8B 45 08 F7 F7 0F AF 55 10 C1 E2 04 DD 45 14 31 DB DD 1C 11 39 FB DD 45 1C DD 5C 11 08 0F 83 8F 00 00 00
+***** EXPENSIVE 575 30
+
+. 574 804B9F3 63
+. 31 D2 52 57 DF 2C 24 DD 5D E0 83 C4 08 C7 45 DC 00 00 00 00 31 F6 8D 76 00 89 F0 31 D2 F7 F7 89 D0 31 D2 52 50 DF 2C 24 DC 0D E0 06 06 08 83 EC 08 DC 75 E0 DD 14 24 DD 5D C8 E8 EE CB FF FF
+***** EXPENSIVE 576 31
+
+. 575 804BA32 14
+. DD 45 C8 DD 1C 24 DD 5D B8 E8 F0 CC FF FF
+***** EXPENSIVE 577 32
+
+. 576 804BA40 66
+. DD 45 B8 DD 45 14 DD 45 1C D9 C9 D8 CA D9 C9 D8 CB D9 CA DC 4D 1C D9 CB DC 4D 14 D9 C9 8B 45 DC 8B 55 28 DE E2 DE C2 DD 1C 02 DD 5C 02 08 8B 45 10 C1 E0 04 43 83 C4 10 01 45 DC 03 75 08 39 FB 72 8A
+
+. 577 804BA82 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 578 80494C8 15
+. 83 C4 2C 57 FF 75 F0 FF 75 EC E8 81 68 00 00
+
+. 579 80494D7 32
+. 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 EC 68 DA E8 05 08 FF 75 E4 68 E7 E8 05 08 E8 09 F3 FF FF
+
+. 580 80494F7 18
+. 83 C4 30 FF 75 F0 57 68 40 EE 05 08 50 E8 2F 49 01 00
+
+. 581 8049509 26
+. FF 75 E4 FF 75 EC 6A 00 6A 00 68 00 00 F0 3F 6A 00 FF 75 F0 57 E8 85 25 00 00
+***** EXPENSIVE 583 33
+
+. 582 804BAA8 28
+. 55 89 E5 57 56 53 83 EC 0C 8B 5D 08 85 DB DD 45 10 DD 45 18 8B 75 20 8B 7D 24 74 75
+
+. 583 804BAC4 6
+. 31 C0 39 D8 73 50
+***** EXPENSIVE 585 34
+
+. 584 804BACA 28
+. 8B 55 0C 31 C9 C1 E2 04 89 F6 D9 C9 40 DD 14 0E D9 C9 DD 54 0E 08 01 D1 39 D8 72 EE
+
+. 585 804BAE6 6
+. 31 C0 39 D8 73 2E
+
+. 586 804BAEC 46
+. 8B 55 0C 31 C9 C1 E2 04 40 C7 04 0F 00 00 00 00 C7 44 0F 04 00 00 00 00 C7 44 0F 08 00 00 00 00 C7 44 0F 0C 00 00 00 00 01 D1 39 D8 72 DA
+***** EXPENSIVE 588 35
+
+. 587 804BB1A 31
+. 31 D2 52 53 DF 2C 24 DC CA DE C9 D9 C9 83 C4 08 DD 1F DD 5F 08 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 588 8049523 15
+. 83 C4 2C 57 FF 75 F0 FF 75 EC E8 26 68 00 00
+
+. 589 8049532 32
+. 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 EC 68 F0 E8 05 08 FF 75 E4 68 E7 E8 05 08 E8 AE F2 FF FF
+
+. 590 8049552 20
+. 83 C4 30 FF 75 F0 57 68 A0 EE 05 08 50 31 F6 E8 D2 48 01 00
+
+. 591 8049566 9
+. 31 DB 83 C4 10 39 FE 72 41
+
+. 592 80495B0 30
+. 83 EC 0C FF 75 E4 FF 75 EC 6A 00 6A 00 68 00 00 F0 3F 6A 00 FF 75 F0 57 53 E8 8A 25 00 00
+
+. 593 804BB58 20
+. 55 89 E5 57 56 53 83 EC 3C 8B 55 0C 85 D2 0F 84 17 01 00 00
+
+. 594 804BB6C 11
+. 31 DB 3B 5D 0C 0F 83 BF 00 00 00
+***** EXPENSIVE 596 36
+
+. 595 804BB77 60
+. 31 D2 52 8B 45 0C 50 DF 2C 24 DD 5D E0 83 C4 08 31 FF 31 F6 90 89 F0 31 D2 F7 75 0C 89 D0 31 D2 52 50 DF 2C 24 DC 0D E8 06 06 08 83 EC 08 DC 75 E0 DD 14 24 DD 5D C8 E8 6D CA FF FF
+***** EXPENSIVE 597 37
+
+. 596 804BBB3 14
+. DD 45 C8 DD 1C 24 DD 5D B8 E8 6F CB FF FF
+***** EXPENSIVE 598 38
+
+. 597 804BBC1 63
+. DD 45 B8 DD 45 14 DD 45 1C D9 C9 D8 CA D9 C9 D8 CB D9 CA DC 4D 1C D9 CB DC 4D 14 D9 C9 8B 55 10 8B 45 24 C1 E2 04 DE E2 DE C2 43 DD 1C 38 DD 5C 38 08 83 C4 10 01 D7 03 75 08 3B 5D 0C 72 8C
+
+. 598 804BC00 7
+. 31 DB 3B 5D 0C 73 2F
+
+. 599 804BC07 47
+. 31 C0 8D 76 00 8B 4D 28 43 C7 04 01 00 00 00 00 C7 44 01 04 00 00 00 00 C7 44 01 08 00 00 00 00 C7 44 01 0C 00 00 00 00 01 D0 3B 5D 0C 72 D6
+
+. 600 804BC36 7
+. 8B 45 08 85 C0 7E 3E
+
+. 601 804BC7B 8
+. 8B 45 0C 2B 45 08 EB BD
+***** EXPENSIVE 603 39
+
+. 602 804BC40 59
+. 31 D2 F7 75 0C 8B 4D 10 0F AF CA 31 D2 52 8B 45 0C 50 DF 2C 24 DD 45 14 8B 45 28 C1 E1 04 D8 C9 D9 C9 DC 4D 1C D9 C9 DD 1C 08 DD 5C 08 08 83 C4 08 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 603 80495CE 15
+. 83 C4 2C 57 FF 75 F0 FF 75 EC E8 7B 67 00 00
+
+. 604 80495DD 32
+. 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 EC 68 00 E9 05 08 FF 75 E4 68 E7 E8 05 08 E8 03 F2 FF FF
+
+. 605 80495FD 10
+. 43 09 C6 83 C4 30 39 FB 72 A9
+
+. 606 8049607 5
+. E9 63 FF FF FF
+
+. 607 804956F 15
+. FF 75 F0 57 68 00 EF 05 08 56 E8 BA 48 01 00
+
+. 608 804957E 9
+. 5A FF 75 EC E8 89 F1 FF FF
+
+. 609 8049587 9
+. 58 FF 75 E8 E8 80 F1 FF FF
+
+. 610 8049590 9
+. 58 FF 75 E4 E8 77 F1 FF FF
+
+. 611 8049599 21
+. 8B 75 E0 89 75 08 83 C4 10 8D 65 F4 5B 5E 5F C9 E9 62 F1 FF FF
+
+. 612 804B89D 9
+. 59 58 56 53 E8 A6 E0 FF FF
+
+. 613 804994C 39
+. 55 89 E5 57 56 53 83 EC 28 8B 4D 0C 8B 45 08 89 CF 0F AF F8 8D 1C FD 00 00 00 00 89 4D EC 89 45 F0 53 E8 1D ED FF FF
+
+. 614 8049973 11
+. 89 45 E8 89 1C 24 E8 12 ED FF FF
+
+. 615 804997E 13
+. 89 45 E4 31 F6 89 1C 24 E8 05 ED FF FF
+
+. 616 804998B 10
+. 83 C4 10 39 FE 89 45 E0 73 18
+***** EXPENSIVE 618 40
+
+. 617 8049995 24
+. 89 F9 90 31 D2 52 56 DF 2C 24 8B 5D E4 DD 1C F3 46 83 C4 08 39 CE 72 EB
+
+. 618 80499AD 22
+. 8B 4D EC 0F AF 4D F0 C1 E1 03 C1 E9 02 FC 8B 7D E8 8B 75 E4 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 619 80499C1 2
+. F3 A5
+
+. 620 80499C3 8
+. 31 FF 83 7D EC 01 76 11
+
+. 621 80499DC 7
+. 31 F6 3B 75 EC 73 4F
+
+. 622 80499E3 27
+. 8B 45 F0 8B 5D E4 C1 E0 03 89 5D DC 89 45 D8 89 F6 31 C9 31 DB 39 F9 89 F2 73 12
+
+. 623 8049A10 34
+. 8B 5D DC 8B 03 0F AF 4D F0 8B 53 04 8B 5D E0 89 04 CB 46 8B 45 D8 01 45 DC 3B 75 EC 89 54 CB 04 72 C2
+
+. 624 8049A32 15
+. 57 FF 75 EC FF 75 F0 FF 75 E4 E8 CF F0 FF FF
+
+. 625 8048B10 29
+. 55 89 E5 57 56 53 83 EC 1C 8B 45 10 48 31 F6 39 C6 C7 45 EC 00 00 00 00 89 45 E8 73 4A
+
+. 626 8048B77 10
+. 83 C4 1C 5B 5E 31 C0 5F C9 C3
+
+. 627 8049A41 34
+. 68 80 84 2E 41 6A 00 FF 75 EC FF 75 F0 FF 75 E4 68 A0 EC 05 08 FF 75 E0 68 0B E9 05 08 E8 F5 EE FF FF
+***** EXPENSIVE 629 41
+
+. 628 8048958 18
+. 55 89 E5 57 56 53 83 EC 1C 31 DB 3B 5D 1C D9 EE 73 36
+***** EXPENSIVE 630 42
+
+. 629 804896A 41
+. DD 05 20 E9 05 08 31 C9 89 F6 8B 45 0C 8B 55 14 DD 04 C8 DD 04 CA DE E1 D9 E1 D8 C9 DD E2 DF E0 F6 C4 45 0F 85 E1 00 00 00
+***** EXPENSIVE 631 43
+
+. 630 8048A74 7
+. DD D8 E9 1A FF FF FF
+
+. 631 8048995 9
+. 43 03 4D 18 3B 5D 1C 72 D6
+***** EXPENSIVE 633 44
+
+. 632 804899E 14
+. DD D8 DD 45 20 DD E9 DF E0 F6 C4 45 75 0C
+***** EXPENSIVE 634 45
+
+. 633 80489AC 12
+. DD D8 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 634 8049A63 17
+. 83 C4 2C FF 75 EC 68 E0 F1 05 08 50 E8 C4 43 01 00
+
+. 635 8049A74 9
+. 59 FF 75 E0 E8 93 EC FF FF
+
+. 636 8049A7D 9
+. 5A FF 75 E4 E8 8A EC FF FF
+
+. 637 8049A86 21
+. 8B 4D E8 89 4D 08 83 C4 10 8D 65 F4 5B 5E 5F C9 E9 75 EC FF FF
+
+. 638 804B8A6 10
+. 58 5A 56 53 43 E8 EC E1 FF FF
+
+. 639 8049A9C 41
+. 55 89 E5 57 56 53 83 EC 28 8B 55 0C 8B 45 08 89 D6 0F AF F0 89 45 F0 8D 04 F5 00 00 00 00 89 55 EC 89 F3 50 E8 CB EB FF FF
+
+. 640 8049AC5 14
+. C1 E3 04 89 45 E8 89 1C 24 E8 BD EB FF FF
+
+. 641 8049AD3 11
+. 89 45 E4 89 1C 24 E8 B2 EB FF FF
+
+. 642 8049ADE 11
+. 89 45 E0 89 1C 24 E8 A7 EB FF FF
+
+. 643 8049AE9 12
+. 31 C9 83 C4 10 39 F1 89 45 DC 73 18
+***** EXPENSIVE 645 46
+
+. 644 8049AF5 24
+. 89 F3 90 31 D2 52 51 DF 2C 24 8B 75 E8 DD 1C CE 41 83 C4 08 39 D9 72 EB
+
+. 645 8049B0D 16
+. 8B 45 EC 0F AF 45 F0 31 C9 8D 1C 00 39 D9 73 48
+***** EXPENSIVE 647 47
+
+. 646 8049B1D 66
+. DD 05 28 E9 05 08 DD 05 30 E9 05 08 DD 05 38 E9 05 08 90 31 D2 52 51 DF 2C 24 D9 C0 D8 C4 8B 45 E4 DD 1C C8 D9 C0 D8 C3 D9 C9 8B 55 E0 D8 C2 D9 C9 8B 75 DC DD 1C CA DD 1C CE 41 83 C4 08 39 D9 72 D1
+***** EXPENSIVE 648 48
+vex iropt: 2 x unrolling (59 sts -> 118 sts)
+
+. 647 8049B30 47
+. 31 D2 52 51 DF 2C 24 D9 C0 D8 C4 8B 45 E4 DD 1C C8 D9 C0 D8 C3 D9 C9 8B 55 E0 D8 C2 D9 C9 8B 75 DC DD 1C CA DD 1C CE 41 83 C4 08 39 D9 72 D1
+***** EXPENSIVE 649 49
+
+. 648 8049B5F 16
+. DD D8 DD D8 DD D8 83 EC 0C 6A 00 E8 B1 41 01 00
+
+. 649 8049B6F 17
+. FF 75 DC FF 75 E4 FF 75 F0 FF 75 EC E8 BC 23 00 00
+
+. 650 804BF3C 37
+. 55 89 E5 57 56 53 83 EC 1C 8B 45 08 89 45 F0 8B 5D F0 8B 45 0C 89 45 EC 85 DB 8B 45 14 8B 7D 10 89 45 E8 74 55
+
+. 651 804BF61 7
+. 31 F6 3B 75 F0 73 2D
+
+. 652 804BF68 17
+. 8B 45 EC C1 E0 04 31 DB 89 45 E4 90 E8 D3 F9 FF FF
+***** EXPENSIVE 654 50
+
+. 653 804BF79 28
+. 46 DD 1C 1F C7 44 1F 08 00 00 00 00 C7 44 1F 0C 00 00 00 00 03 5D E4 3B 75 F0 72 DF
+
+. 654 804BF95 33
+. 8B 45 E8 89 45 14 8B 45 F0 89 45 10 8B 45 EC 89 45 0C 89 7D 08 8D 65 F4 5B 5E 5F C9 E9 EE 05 00 00
+
+. 655 8049B80 22
+. 8B 4D EC 0F AF 4D F0 C1 E1 04 C1 E9 02 FC 8B 7D E0 8B 75 E4 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 656 8049B94 2
+. F3 A5
+
+. 657 8049B96 8
+. 83 C4 20 3B 4D EC 73 24
+
+. 658 8049B9E 36
+. 31 DB 89 D8 C1 E0 04 8B 75 E4 8B 54 06 04 41 8B 04 06 8B 75 E8 89 04 DE 89 54 DE 04 03 5D F0 3B 4D EC 72 DE
+
+. 659 8049BC2 15
+. 50 FF 75 EC FF 75 F0 FF 75 E8 E8 7F 0D 01 00
+
+. 660 805A950 38
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 8B 75 10 E8 00 00 00 00 5B 81 C3 B0 67 00 00 31 C0 83 FE 01 0F 84 8F 02 00 00
+
+. 661 805AC05 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 662 8049BD1 17
+. FF 75 EC FF 75 F0 FF 75 E4 FF 75 E8 E8 22 C1 00 00
+
+. 663 8055D04 32
+. 55 89 E5 57 56 53 83 EC 0C 8B 75 14 E8 00 00 00 00 5B 81 C3 FF B3 00 00 85 F6 0F 84 D3 00 00 00
+
+. 664 8055D24 46
+. 8B 4D 08 8B 01 8B 75 0C 8B 51 04 89 06 8B 45 14 C7 45 F0 01 00 00 00 48 39 45 F0 89 56 04 C7 46 08 00 00 00 00 C7 46 0C 00 00 00 00 73 5E
+
+. 665 8055DB0 11
+. 8B 45 14 2B 45 F0 39 45 F0 74 0A
+
+. 666 8055DBB 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 667 8049BE2 37
+. 83 C4 20 68 80 84 2E 41 6A 00 FF 75 EC FF 75 F0 FF 75 E4 68 AB E8 05 08 FF 75 DC 68 B8 E8 05 08 E8 F9 EB FF FF
+
+. 668 8049C07 20
+. 83 C4 20 FF 75 F0 FF 75 EC 68 20 F2 05 08 50 E8 1D 42 01 00
+
+. 669 8049C1B 17
+. 83 C4 0C FF 75 EC FF 75 F0 FF 75 E8 E8 B4 BC 00 00
+
+. 670 80558E0 36
+. 55 89 E5 57 56 53 81 EC 8C 00 00 00 E8 00 00 00 00 5B 81 C3 23 B8 00 00 31 C0 83 7D 10 01 0F 84 9A 02 00 00
+
+. 671 8055B9E 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 672 8049C2C 10
+. 31 C9 83 C4 10 3B 4D EC 73 28
+***** EXPENSIVE 674 51
+
+. 673 8049C36 38
+. 31 D2 8B 45 EC 52 50 DF 2C 24 31 C0 83 C4 08 8D 76 00 8B 55 E8 DD 04 C2 D8 F1 41 DD 1C C2 03 45 F0 3B 4D EC 72 EC
+***** EXPENSIVE 675 52
+
+. 674 8049C5C 19
+. DD D8 FF 75 EC FF 75 F0 FF 75 E4 FF 75 E8 E8 BD 0F 01 00
+
+. 675 805AC2C 28
+. 55 89 E5 57 56 53 83 EC 0C 8B 45 14 E8 00 00 00 00 5B 81 C3 D7 64 00 00 85 C0 74 40
+
+. 676 805AC48 7
+. 31 FF 3B 7D 14 73 2F
+***** EXPENSIVE 678 53
+
+. 677 805AC4F 47
+. 31 F6 8D 76 00 89 F1 8B 45 08 47 DD 04 F0 C1 E1 04 8B 45 0C 03 75 10 3B 7D 14 DD 1C 08 C7 44 08 08 00 00 00 00 C7 44 08 0C 00 00 00 00 72 D6
+
+. 678 805AC7E 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 679 8049C6F 34
+. 68 80 84 2E 41 6A 00 FF 75 EC FF 75 F0 FF 75 E4 68 BC E8 05 08 FF 75 E0 68 C8 E8 05 08 E8 6F EB FF FF
+
+. 680 8049C91 20
+. 83 C4 30 FF 75 F0 FF 75 EC 68 60 F2 05 08 50 E8 93 41 01 00
+
+. 681 8049CA5 9
+. 5F FF 75 E8 E8 62 EA FF FF
+
+. 682 8049CAE 9
+. 5E FF 75 E4 E8 59 EA FF FF
+
+. 683 8049CB7 9
+. 5B FF 75 E0 E8 50 EA FF FF
+
+. 684 8049CC0 21
+. 8B 75 DC 89 75 08 83 C4 10 8D 65 F4 5B 5E 5F C9 E9 3B EA FF FF
+
+. 685 804B8B0 8
+. 83 C4 10 83 FB 03 76 D2
+
+. 686 804B88A 10
+. 83 EC 08 56 53 E8 4C D8 FF FF
+
+. 687 8049659 14
+. 6A 00 57 FF 75 F0 FF 75 EC E8 1D F5 FF FF
+
+. 688 8048B84 26
+. 55 89 E5 57 56 53 51 31 F6 31 C9 3B 75 10 8B 7D 14 C7 45 F0 00 00 00 00 73 69
+
+. 689 8048B9E 15
+. 89 F6 BB 01 00 00 00 83 C1 02 3B 5D 0C 73 4F
+***** EXPENSIVE 691 54
+
+. 690 8048BAD 34
+. 8D 76 00 31 D2 52 8D 04 0F 50 8B 45 08 DF 2C 24 DD 04 C8 DA E9 DF E0 80 E4 45 83 C4 08 80 FC 40 74 03
+***** EXPENSIVE 692 55
+
+. 691 8048BD2 32
+. 41 31 D2 52 8D 04 0F 50 8B 45 08 DF 2C 24 DD 04 C8 DA E9 DF E0 80 E4 45 83 C4 08 80 FC 40 74 03
+
+. 692 8048BF5 7
+. 43 41 3B 5D 0C 72 B4
+
+. 693 8048BFC 11
+. FF 45 F0 8B 45 10 39 45 F0 72 99
+
+. 694 8048C07 8
+. 5A 5B 89 F0 5E 5F C9 C3
+
+. 695 8049667 15
+. FF 75 F0 57 68 20 F0 05 08 50 E8 C2 47 01 00
+
+. 696 8049676 8
+. 83 C4 20 E9 57 FD FF FF
+
+. 697 8049634 14
+. 6A 00 57 FF 75 F0 FF 75 EC E8 42 F5 FF FF
+
+. 698 8049642 15
+. FF 75 F0 57 68 C0 EF 05 08 50 E8 E7 47 01 00
+
+. 699 8049651 8
+. 83 C4 20 E9 C8 FD FF FF
+
+. 700 804960C 17
+. 68 B8 0B 00 00 57 FF 75 F0 FF 75 E0 E8 67 F5 FF FF
+
+. 701 804961D 15
+. FF 75 F0 57 68 60 EF 05 08 50 E8 0C 48 01 00
+
+. 702 804962C 8
+. 83 C4 20 E9 75 FE FF FF
+***** EXPENSIVE 704 56
+vex iropt: 2 x unrolling (34 sts -> 68 sts)
+
+. 703 8049998 21
+. 31 D2 52 56 DF 2C 24 8B 5D E4 DD 1C F3 46 83 C4 08 39 CE 72 EB
+***** EXPENSIVE 705 57
+vex iropt: 2 x unrolling (34 sts -> 68 sts)
+
+. 704 8049AF8 21
+. 31 D2 52 51 DF 2C 24 8B 75 E8 DD 1C CE 41 83 C4 08 39 D9 72 EB
+***** EXPENSIVE 706 58
+
+. 705 8048BB0 31
+. 31 D2 52 8D 04 0F 50 8B 45 08 DF 2C 24 DD 04 C8 DA E9 DF E0 80 E4 45 83 C4 08 80 FC 40 74 03
+
+. 706 804B8B8 6
+. D1 E6 39 FE 76 C2
+
+. 707 804B880 5
+. 3B 75 F0 72 33
+
+. 708 804915F 17
+. BA 01 00 00 00 46 89 D0 89 F1 D3 E0 39 45 EC 77 F4
+
+. 709 8049192 18
+. 89 F6 89 D0 D1 E1 83 E0 01 43 09 C1 D1 EA 39 F3 72 F0
+
+. 710 8049188 10
+. 31 C9 31 DB 39 F1 89 FA 73 12
+
+. 711 8048A99 29
+. 8B 55 0C C1 E2 04 C7 45 E8 00 00 00 00 89 55 E4 8D 76 00 8B 7D 10 D1 EF 39 75 F0 73 2E
+
+. 712 8048AE4 4
+. 39 F7 77 08
+
+. 713 8048AF0 19
+. 8B 55 E4 FF 45 F0 8B 5D EC 01 FE 01 55 E8 39 5D F0 72 A9
+***** EXPENSIVE 715 59
+
+. 714 8048820 47
+. 8B 45 0C 8B 4D 14 DD 04 18 DD 04 19 DE E1 DD 44 18 08 DC 6C 19 08 D9 C9 D9 E1 D9 C9 D9 E1 DE C1 D8 C9 DD E2 DF E0 F6 C4 45 0F 85 01 01 00 00
+
+. 715 804BEDC 5
+. E8 6B FA FF FF
+***** EXPENSIVE 717 60
+
+. 716 804C6E4 31
+. 31 D2 52 8B 45 E0 50 DF 2C 24 D8 C9 83 EC 08 DD 14 24 D9 C9 DD 5D 98 DD 5D A8 E8 1D BF FF FF
+***** EXPENSIVE 718 61
+
+. 717 804C6C4 30
+. 31 FF DD 83 44 F6 FF FF 3B 7D 10 DD 55 D0 DD 5D D8 C7 45 E0 00 00 00 00 0F 83 8B 00 00 00
+vex iropt: 2 x unrolling (49 sts -> 98 sts)
+
+. 718 8049310 50
+. 8B 75 EC 8B 04 0E 8B 54 0E 04 8B 75 E8 89 04 0E 89 54 0E 04 8B 75 EC 8B 44 0E 08 8B 54 0E 0C 43 8B 75 E8 89 44 0E 08 89 54 0E 0C 03 4D DC 39 FB 72 CE
+
+. 719 804FE4C 11
+. 83 EC 0C FF 75 10 E8 71 CF FF FF
+
+. 720 804CDC8 17
+. 55 89 E5 8B 55 08 B8 01 00 00 00 31 C9 39 D0 73 0A
+
+. 721 804CDD9 10
+. 8D 76 00 D1 E0 41 39 D0 72 F9
+
+. 722 804CDE3 22
+. B8 01 00 00 00 D3 E0 39 C2 0F 94 C0 0F B6 C0 48 09 C1 89 C8 C9 C3
+
+. 723 804FE57 13
+. 89 45 E4 83 C4 10 40 0F 84 BF 01 00 00
+
+. 724 804FE64 15
+. FF 75 E4 FF 75 10 FF 75 0C 56 E8 39 CB FF FF
+
+. 725 804C9AC 28
+. 55 89 E5 57 56 83 EC 10 8B 45 10 48 31 F6 39 C6 C7 45 F4 00 00 00 00 89 45 F0 73 67
+
+. 726 804C9C8 26
+. 8B 55 0C C1 E2 04 C7 45 EC 00 00 00 00 89 55 E8 8B 7D 10 D1 EF 39 75 F4 73 2E
+
+. 727 804CA10 4
+. 39 F7 77 08
+
+. 728 804CA1C 19
+. 8B 45 E8 FF 45 F4 8B 55 F0 01 FE 01 45 EC 39 55 F4 72 A9
+
+. 729 804CA2F 9
+. 83 C4 10 5E 31 C0 5F C9 C3
+
+. 730 804FE73 27
+. C7 45 E0 00 00 00 00 8B 45 E4 83 C4 10 39 45 E0 BF 01 00 00 00 0F 83 81 01 00 00
+***** EXPENSIVE 732 62
+
+. 731 804FE8E 64
+. DB 45 14 D8 C0 DC 8B 3C F6 FF FF DD 5D C8 31 D2 52 57 DF 2C 24 DD 83 34 F6 FF FF D9 C9 D8 C0 83 EC 08 DC 7D C8 D9 C9 DD 5D D8 DD 83 44 F6 FF FF D9 C9 DD 14 24 DD 5D A8 DD 5D D0 E8 62 88 FF FF
+***** EXPENSIVE 733 63
+
+. 732 804FECE 20
+. DD 45 A8 DC 8B BC F8 FF FF DD 1C 24 DD 5D C0 E8 4E 88 FF FF
+***** EXPENSIVE 734 64
+
+. 733 804FEE2 16
+. D9 C0 31 C9 D8 C1 83 C4 10 3B 4D 10 DE C9 73 4A
+***** EXPENSIVE 735 65
+
+. 734 804FEF2 74
+. 8D 04 3F 89 45 B8 8D 14 0F 8B 45 0C 0F AF C1 0F AF 55 0C C1 E2 04 C1 E0 04 DD 04 16 DD 04 06 01 F2 D8 E1 01 F0 DD 1A DD 42 08 DD 40 08 D8 E1 DD 5A 08 D9 C9 03 4D B8 DC 00 D9 C9 DC 40 08 D9 C9 3B 4D 10 DD 18 DD 58 08 72 BC
+
+. 735 804FF3C 16
+. C7 45 BC 01 00 00 00 39 7D BC 0F 83 D3 00 00 00
+***** EXPENSIVE 737 66
+
+. 736 805001F 4
+. DD D8 EB DB
+
+. 737 804FFFE 17
+. FF 45 E0 8B 45 E4 D1 E7 39 45 E0 0F 82 8D FE FF FF
+
+. 738 805000F 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+vex iropt: 2 x unrolling (49 sts -> 98 sts)
+
+. 739 8049364 50
+. 8B 75 EC 8B 04 0E 8B 54 0E 04 8B 75 E0 89 04 0E 89 54 0E 04 8B 75 EC 8B 44 0E 08 8B 54 0E 0C 43 8B 75 E0 89 44 0E 08 89 54 0E 0C 03 4D DC 39 FB 72 CE
+***** EXPENSIVE 741 67
+
+. 740 804884F 10
+. DD DA 46 01 D3 3B 75 1C 72 C7
+***** EXPENSIVE 742 68
+vex iropt: 2 x unrolling (39 sts -> 78 sts)
+
+. 741 804FDFC 26
+. DD 04 06 D8 C9 DD 1C 06 DD 44 06 08 D8 C9 42 DD 5C 06 08 01 C8 3B 55 10 72 E6
+***** EXPENSIVE 743 69
+vex iropt: 2 x unrolling (40 sts -> 80 sts)
+
+. 742 804944C 28
+. 8B 4D E8 DD 04 01 D8 C9 DD 1C 01 DD 44 01 08 D8 C9 43 DD 5C 01 08 01 D0 39 FB 72 E4
+vex iropt: 4 x unrolling (26 sts -> 104 sts)
+
+. 743 804B9A8 38
+. 43 C7 04 01 00 00 00 00 C7 44 01 04 00 00 00 00 C7 44 01 08 00 00 00 00 C7 44 01 0C 00 00 00 00 01 D0 39 FB 72 DA
+***** EXPENSIVE 745 70
+
+. 744 804BA0C 38
+. 89 F0 31 D2 F7 F7 89 D0 31 D2 52 50 DF 2C 24 DC 0D E0 06 06 08 83 EC 08 DC 75 E0 DD 14 24 DD 5D C8 E8 EE CB FF FF
+***** EXPENSIVE 746 71
+vex iropt: 2 x unrolling (32 sts -> 64 sts)
+
+. 745 804BAD4 18
+. D9 C9 40 DD 14 0E D9 C9 DD 54 0E 08 01 D1 39 D8 72 EE
+vex iropt: 4 x unrolling (26 sts -> 104 sts)
+
+. 746 804BAF4 38
+. 40 C7 04 0F 00 00 00 00 C7 44 0F 04 00 00 00 00 C7 44 0F 08 00 00 00 00 C7 44 0F 0C 00 00 00 00 01 D1 39 D8 72 DA
+***** EXPENSIVE 748 72
+
+. 747 804BB8C 39
+. 89 F0 31 D2 F7 75 0C 89 D0 31 D2 52 50 DF 2C 24 DC 0D E8 06 06 08 83 EC 08 DC 75 E0 DD 14 24 DD 5D C8 E8 6D CA FF FF
+vex iropt: 4 x unrolling (30 sts -> 120 sts)
+
+. 748 804BC0C 42
+. 8B 4D 28 43 C7 04 01 00 00 00 00 C7 44 01 04 00 00 00 00 C7 44 01 08 00 00 00 00 C7 44 01 0C 00 00 00 00 01 D0 3B 5D 0C 72 D6
+***** EXPENSIVE 750 73
+
+. 749 804BC3D 62
+. 8B 45 08 31 D2 F7 75 0C 8B 4D 10 0F AF CA 31 D2 52 8B 45 0C 50 DF 2C 24 DD 45 14 8B 45 28 C1 E1 04 D8 C9 D9 C9 DC 4D 1C D9 C9 DD 1C 08 DD 5C 08 08 83 C4 08 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 750 80499CB 17
+. BA 01 00 00 00 47 89 D0 89 F9 D3 E0 39 45 EC 77 F4
+
+. 751 80499FE 18
+. 89 F6 89 D0 D1 E1 83 E0 01 43 09 C1 D1 EA 39 FB 72 F0
+
+. 752 80499F4 10
+. 31 C9 31 DB 39 F9 89 F2 73 12
+
+. 753 8048B2D 17
+. C7 45 E4 00 00 00 00 8B 7D 10 D1 EF 39 75 EC 73 18
+
+. 754 8048B56 4
+. 39 F7 77 0A
+
+. 755 8048B64 19
+. 8B 45 0C FF 45 EC 8B 55 E8 01 FE 01 45 E4 39 55 EC 72 BD
+***** EXPENSIVE 757 74
+
+. 756 8048974 31
+. 8B 45 0C 8B 55 14 DD 04 C8 DD 04 CA DE E1 D9 E1 D8 C9 DD E2 DF E0 F6 C4 45 0F 85 E1 00 00 00
+
+. 757 804BF74 5
+. E8 D3 F9 FF FF
+vex iropt: 2 x unrolling (37 sts -> 74 sts)
+
+. 758 8049BA0 34
+. 89 D8 C1 E0 04 8B 75 E4 8B 54 06 04 41 8B 04 06 8B 75 E8 89 04 DE 89 54 DE 04 03 5D F0 3B 4D EC 72 DE
+
+. 759 805A976 9
+. 83 EC 0C 56 E8 49 24 FF FF
+
+. 760 805A97F 13
+. 89 45 E4 83 C4 10 40 0F 84 81 02 00 00
+
+. 761 805A98C 15
+. FF 75 E4 56 FF 75 0C FF 75 08 E8 9D 20 FF FF
+
+. 762 804CA38 28
+. 55 89 E5 57 56 83 EC 0C 8B 45 10 48 31 F6 39 C6 C7 45 F4 00 00 00 00 89 45 F0 73 4F
+
+. 763 804CA54 18
+. C7 45 EC 00 00 00 00 90 8B 7D 10 D1 EF 39 75 F4 73 1B
+
+. 764 804CA81 4
+. 39 F7 77 0B
+
+. 765 804CA90 19
+. 8B 55 0C FF 45 F4 8B 4D F0 01 FE 01 55 EC 39 4D F4 72 B9
+
+. 766 804CAA3 9
+. 83 C4 0C 5E 31 C0 5F C9 C3
+
+. 767 805A99B 32
+. C7 45 D4 01 00 00 00 8B 45 E4 83 C4 10 39 45 D4 C7 45 E0 01 00 00 00 89 75 D8 0F 87 48 02 00 00
+
+. 768 805A9BB 39
+. 8B 55 E0 8B 75 D8 D1 EE 89 D1 C7 45 CC 00 00 00 00 D1 E1 39 75 CC 89 55 DC 89 4D C8 89 4D E0 89 75 C4 89 75 D8 73 39
+***** EXPENSIVE 770 75
+
+. 769 805A9E2 57
+. 31 C9 8B 75 DC 8B 55 0C 0F AF D1 8D 04 0E 8B 75 08 0F AF 45 0C DD 04 D6 DD 04 C6 D9 C1 D8 C1 D9 CA DE E1 D9 C9 DD 1C D6 FF 45 CC DD 1C C6 8B 45 C4 03 4D C8 39 45 CC 72 C9
+***** EXPENSIVE 771 76
+
+. 770 805AA1B 53
+. 31 D2 52 8B 45 C8 50 DD 83 44 F6 FF FF DD 5D B0 DF 2C 24 DC BB CC F5 FF FF 83 EC 08 DD 83 34 F6 FF FF D9 C9 DD 14 24 DD 9D 78 FF FF FF DD 5D B8 E8 E0 DC FE FF
+***** EXPENSIVE 772 77
+
+. 771 805AA50 23
+. DD 85 78 FF FF FF DC 8B BC F8 FF FF DD 1C 24 DD 5D A8 E8 C9 DC FE FF
+***** EXPENSIVE 773 78
+
+. 772 805AA67 33
+. D9 C0 8B 45 DC D8 C1 C7 45 D0 01 00 00 00 D1 E8 DE C9 83 C4 10 39 45 D0 DD 5D A0 0F 83 27 01 00 00
+
+. 773 805ABAF 6
+. 83 7D DC 01 76 3F
+
+. 774 805ABF4 15
+. FF 45 D4 8B 45 E4 39 45 D4 0F 86 B8 FD FF FF
+
+. 775 805AC03 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 776 8055DC5 50
+. 8B 45 10 0F AF 45 F0 8B 75 08 89 C1 8B 54 C6 04 C1 E1 04 8B 04 C6 8B 75 0C 89 04 0E 89 54 0E 04 C7 44 0E 08 00 00 00 00 C7 44 0E 0C 00 00 00 00 EB C4
+
+. 777 8055904 11
+. 83 EC 0C FF 75 10 E8 B9 74 FF FF
+
+. 778 805590F 13
+. 89 45 E4 83 C4 10 40 0F 84 93 02 00 00
+
+. 779 805591C 39
+. 8B 45 10 89 C2 C7 45 D4 01 00 00 00 8B 75 E4 D1 EA 39 75 D4 89 45 E0 C7 45 D8 01 00 00 00 89 55 DC 0F 87 48 02 00 00
+
+. 780 8055943 15
+. C7 45 CC 00 00 00 00 8B 7D D8 39 7D CC 73 3C
+***** EXPENSIVE 782 79
+
+. 781 8055952 60
+. 31 C9 8B 55 0C 0F AF D1 8B 45 08 8B 75 DC DD 04 D0 8D 04 0E 0F AF 45 0C 8B 7D 08 DD 04 C7 D9 C1 D8 C1 D9 CA DE E1 D9 C9 DD 1C D7 FF 45 CC DD 1C C7 8B 45 D8 03 4D E0 39 45 CC 72 C6
+***** EXPENSIVE 783 80
+
+. 782 805598E 50
+. 31 D2 52 8B 45 E0 50 DD 83 44 F6 FF FF DD 5D B8 DF 2C 24 DC BB D4 F5 FF FF 83 EC 08 DD 83 34 F6 FF FF D9 C9 DD 14 24 DD 5D 88 DD 5D C0 E8 70 2D FF FF
+***** EXPENSIVE 784 81
+
+. 783 80559C0 20
+. DD 45 88 DC 8B BC F8 FF FF DD 1C 24 DD 5D B0 E8 5C 2D FF FF
+***** EXPENSIVE 785 82
+
+. 784 80559D4 33
+. D9 C0 8B 45 DC D8 C1 C7 45 D0 01 00 00 00 D1 E8 DE C9 83 C4 10 39 45 D0 DD 5D A8 0F 83 24 01 00 00
+
+. 785 8055B19 6
+. 83 7D DC 01 76 54
+
+. 786 8055B73 24
+. FF 45 D4 8B 55 E4 D1 6D DC D1 6D E0 D1 65 D8 39 55 D4 0F 86 B8 FD FF FF
+
+. 787 8055B8B 17
+. FF 75 E4 FF 75 10 FF 75 0C FF 75 08 E8 9C 6E FF FF
+
+. 788 8055B9C 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+***** EXPENSIVE 790 83
+vex iropt: 2 x unrolling (36 sts -> 72 sts)
+
+. 789 8049C48 20
+. 8B 55 E8 DD 04 C2 D8 F1 41 DD 1C C2 03 45 F0 3B 4D EC 72 EC
+***** EXPENSIVE 791 84
+vex iropt: 2 x unrolling (41 sts -> 82 sts)
+
+. 790 805AC54 42
+. 89 F1 8B 45 08 47 DD 04 F0 C1 E1 04 8B 45 0C 03 75 10 3B 7D 14 DD 1C 08 C7 44 08 08 00 00 00 00 C7 44 08 0C 00 00 00 00 72 D6
+
+. 791 8048BA0 13
+. BB 01 00 00 00 83 C1 02 3B 5D 0C 73 4F
+vex iropt: 4 x unrolling (18 sts -> 72 sts)
+
+. 792 8049164 12
+. 46 89 D0 89 F1 D3 E0 39 45 EC 77 F4
+vex iropt: 4 x unrolling (19 sts -> 76 sts)
+
+. 793 8049194 16
+. 89 D0 D1 E1 83 E0 01 43 09 C1 D1 EA 39 F3 72 F0
+
+. 794 8048AAC 10
+. 8B 7D 10 D1 EF 39 75 F0 73 2E
+***** EXPENSIVE 796 85
+
+. 795 8048AB6 50
+. 8B 4D 0C 0F AF CE 8B 45 08 C1 E1 04 DD 04 08 8B 5D E8 DD 04 18 D9 C9 01 C1 DD 1C 18 DD 44 18 08 DD 41 08 DD 5C 18 08 D9 C9 DD 19 DD 59 08 39 F7 77 08
+vex iropt: 8 x unrolling (12 sts -> 96 sts)
+
+. 796 8048AE8 8
+. 29 FE D1 EF 39 F7 76 F8
+vex iropt: 8 x unrolling (13 sts -> 104 sts)
+
+. 797 804CDDC 7
+. D1 E0 41 39 D0 72 F9
+
+. 798 804C9D8 10
+. 8B 7D 10 D1 EF 39 75 F4 73 2E
+***** EXPENSIVE 800 86
+
+. 799 804C9E2 50
+. 8B 4D 0C 0F AF CE 8B 55 08 C1 E1 04 DD 04 0A 8B 45 EC DD 04 02 D9 C9 01 D1 DD 1C 02 DD 44 02 08 DD 41 08 DD 5C 02 08 D9 C9 DD 19 DD 59 08 39 F7 77 08
+vex iropt: 8 x unrolling (12 sts -> 96 sts)
+
+. 800 804CA14 8
+. 29 FE D1 EF 39 F7 76 F8
+***** EXPENSIVE 802 87
+vex iropt: not unrolling (62 sts)
+
+. 801 804FEF8 68
+. 8D 14 0F 8B 45 0C 0F AF C1 0F AF 55 0C C1 E2 04 C1 E0 04 DD 04 16 DD 04 06 01 F2 D8 E1 01 F0 DD 1A DD 42 08 DD 40 08 D8 E1 DD 5A 08 D9 C9 03 4D B8 DC 00 D9 C9 DC 40 08 D9 C9 3B 4D 10 DD 18 DD 58 08 72 BC
+***** EXPENSIVE 803 88
+
+. 802 804FE9C 50
+. 31 D2 52 57 DF 2C 24 DD 83 34 F6 FF FF D9 C9 D8 C0 83 EC 08 DC 7D C8 D9 C9 DD 5D D8 DD 83 44 F6 FF FF D9 C9 DD 14 24 DD 5D A8 DD 5D D0 E8 62 88 FF FF
+***** EXPENSIVE 804 89
+
+. 803 804FF4C 63
+. DD 45 C0 DC 4D D0 DD 45 D8 DE E1 DD 45 D8 D8 CA DD 45 C0 D9 CA DE E1 D9 C9 DC 4D D8 DD 45 D0 D9 C9 DC 45 D0 D9 C9 D8 CB 31 C9 DE E9 D9 C9 3B 4D 10 DD 55 D8 D9 C9 DD 55 D0 0F 83 8E 00 00 00
+***** EXPENSIVE 805 90
+
+. 804 804FF8B 97
+. 90 8B 55 BC 8D 04 0A 8D 14 07 0F AF 55 0C C1 E2 04 DD 04 16 01 F2 DD 42 08 0F AF 45 0C D9 C3 D9 C3 D8 CA D9 C9 C1 E0 04 D8 CB DE E1 DD 04 06 D8 E1 01 F0 DD 1A D9 C9 D8 CC D9 CA D8 CB DD 40 08 D9 CB DE C1 DC EA D9 CA DD 5A 08 8D 0C 79 DC 00 D9 C9 DC 40 08 D9 C9 3B 4D 10 DD 18 DD 58 08 72 A0
+***** EXPENSIVE 806 91
+
+. 805 804FFEC 16
+. DD D8 DD D8 FF 45 BC 39 7D BC 0F 82 50 FF FF FF
+***** EXPENSIVE 807 92
+
+. 806 804FFFC 19
+. DD D8 FF 45 E0 8B 45 E4 D1 E7 39 45 E0 0F 82 8D FE FF FF
+vex iropt: 4 x unrolling (18 sts -> 72 sts)
+
+. 807 80499D0 12
+. 47 89 D0 89 F9 D3 E0 39 45 EC 77 F4
+vex iropt: 4 x unrolling (19 sts -> 76 sts)
+
+. 808 8049A00 16
+. 89 D0 D1 E1 83 E0 01 43 09 C1 D1 EA 39 FB 72 F0
+
+. 809 8048B34 10
+. 8B 7D 10 D1 EF 39 75 EC 73 18
+***** EXPENSIVE 811 93
+
+. 810 8048B3E 28
+. 8B 5D 0C 8B 4D 08 8B 55 E4 0F AF DE DD 04 D1 DD 04 D9 DD 1C D1 DD 1C D9 39 F7 77 0A
+
+. 811 8048B5A 10
+. 89 F6 29 FE D1 EF 39 F7 76 F8
+
+. 812 804CA5C 10
+. 8B 7D 10 D1 EF 39 75 F4 73 1B
+***** EXPENSIVE 814 94
+
+. 813 804CA66 31
+. 8B 55 EC 8B 4D 08 DD 04 D1 8B 4D 0C 8B 45 08 0F AF CE DD 04 C8 DD 1C D0 DD 1C C8 39 F7 77 0B
+
+. 814 804CA85 11
+. 8D 76 00 29 FE D1 EF 39 F7 76 F8
+***** EXPENSIVE 816 95
+vex iropt: 2 x unrolling (58 sts -> 116 sts)
+
+. 815 805A9E4 55
+. 8B 75 DC 8B 55 0C 0F AF D1 8D 04 0E 8B 75 08 0F AF 45 0C DD 04 D6 DD 04 C6 D9 C1 D8 C1 D9 CA DE E1 D9 C9 DD 1C D6 FF 45 CC DD 1C C6 8B 45 C4 03 4D C8 39 45 CC 72 C9
+
+. 816 805ABB5 15
+. C7 45 CC 00 00 00 00 8B 45 C4 39 45 CC 73 30
+***** EXPENSIVE 818 96
+
+. 817 805ABC4 46
+. D1 6D DC 8B 55 C8 DD 83 E4 F8 FF FF 2B 55 DC 8B 45 0C 0F AF C2 8B 4D 08 DD 04 C1 FF 45 CC 8B 75 C4 D8 C9 03 55 C8 39 75 CC DD 1C C1 72 E1
+***** EXPENSIVE 819 97
+
+. 818 805ABF2 17
+. DD D8 FF 45 D4 8B 45 E4 39 45 D4 0F 86 B8 FD FF FF
+***** EXPENSIVE 820 98
+
+. 819 8055D52 94
+. 8B 7D 10 8D 76 00 8B 4D 14 2B 4D F0 8B 45 08 0F AF 4D 10 89 FE DD 04 F8 8B 14 C8 C1 E6 04 89 4D E8 8B 4C C8 04 8B 45 0C 89 4C 30 0C DD 14 30 89 54 30 08 8B 45 E8 C1 E0 04 8B 75 0C 81 F1 00 00 00 80 FF 45 F0 DD 1C 06 89 54 06 08 89 4C 06 0C 8B 45 14 2B 45 F0 03 7D 10 39 45 F0 72 A8
+
+. 820 8055B1F 15
+. C7 45 CC 00 00 00 00 8B 7D D8 39 7D CC 73 45
+***** EXPENSIVE 822 99
+
+. 821 8055B2E 67
+. 8B 75 DC D1 EE 8B 55 DC DD 83 D4 F8 FF FF 01 F2 31 C9 8D 04 0E 0F AF 45 0C 8B 7D 08 DD 04 C7 D8 C0 DD 1C C7 8B 45 0C 0F AF C2 DD 04 C7 D8 C9 DD 1C C7 FF 45 CC 8B 45 D8 03 55 E0 03 4D E0 39 45 CC 72 CF
+***** EXPENSIVE 823 100
+
+. 822 8055B71 26
+. DD D8 FF 45 D4 8B 55 E4 D1 6D DC D1 6D E0 D1 65 D8 39 55 D4 0F 86 B8 FD FF FF
+***** EXPENSIVE 824 101
+vex iropt: not unrolling (61 sts)
+
+. 823 8055954 58
+. 8B 55 0C 0F AF D1 8B 45 08 8B 75 DC DD 04 D0 8D 04 0E 0F AF 45 0C 8B 7D 08 DD 04 C7 D9 C1 D8 C1 D9 CA DE E1 D9 C9 DD 1C D7 FF 45 CC DD 1C C7 8B 45 D8 03 4D E0 39 45 CC 72 C6
+***** EXPENSIVE 825 102
+vex iropt: not unrolling (86 sts)
+
+. 824 804FF8C 96
+. 8B 55 BC 8D 04 0A 8D 14 07 0F AF 55 0C C1 E2 04 DD 04 16 01 F2 DD 42 08 0F AF 45 0C D9 C3 D9 C3 D8 CA D9 C9 C1 E0 04 D8 CB DE E1 DD 04 06 D8 E1 01 F0 DD 1A D9 C9 D8 CC D9 CA D8 CB DD 40 08 D9 CB DE C1 DC EA D9 CA DD 5A 08 8D 0C 79 DC 00 D9 C9 DC 40 08 D9 C9 3B 4D 10 DD 18 DD 58 08 72 A0
+vex iropt: 8 x unrolling (12 sts -> 96 sts)
+
+. 825 8048B5C 8
+. 29 FE D1 EF 39 F7 76 F8
+vex iropt: 8 x unrolling (12 sts -> 96 sts)
+
+. 826 804CA88 8
+. 29 FE D1 EF 39 F7 76 F8
+***** EXPENSIVE 828 103
+vex iropt: 2 x unrolling (43 sts -> 86 sts)
+
+. 827 805ABD3 31
+. 8B 45 0C 0F AF C2 8B 4D 08 DD 04 C1 FF 45 CC 8B 75 C4 D8 C9 03 55 C8 39 75 CC DD 1C C1 72 E1
+***** EXPENSIVE 829 104
+
+. 828 805AA88 87
+. C7 45 88 FF FF FF FF 90 DD 45 A8 DC 4D B0 DC 6D B8 DD 5D 98 DD 45 A0 DC 4D B8 DC 6D 98 DD 5D 98 DD 45 A8 DC 4D B8 DD 45 B0 DE C1 DD 45 A0 DC 4D B0 DE E9 DD 5D 90 C7 45 CC 00 00 00 00 8B 45 C4 DD 45 98 DD 45 90 D9 C9 39 45 CC DD 5D B8 DD 5D B0 0F 83 BC 00 00 00
+***** EXPENSIVE 830 105
+
+. 829 805AADF 137
+. 8B 7D 88 03 7D C8 C7 45 8C 00 00 00 00 8B 4D 8C 03 4D D0 8B 45 8C 0F AF 4D 0C 03 45 DC 8B 55 08 DD 04 CA 89 C2 2B 55 D0 03 45 D0 0F AF 55 0C 0F AF 45 0C 8B 75 08 DD 04 D6 DD 04 C6 8B 75 0C 0F AF F7 89 85 74 FF FF FF 8B 45 08 DD 04 F0 DD 45 98 DD 45 90 DD 45 98 D9 CA D8 CC D9 C9 D8 CB D9 CA DE CB D9 C5 D9 C5 D9 CD DC 4D 90 D9 C9 D8 C2 D9 CD D8 C4 D9 CD D8 E3 D9 CD D8 C1 D9 CF DE E2 D9 CD DE E3 D9 CB DD 1C C8
+***** EXPENSIVE 831 106
+
+. 830 805AB68 51
+. DE C2 D9 CB DD 1C F0 D9 CA DE E1 D9 C9 8B 4D C8 FF 45 CC 8B 75 C4 DD 1C D0 D9 E0 8B 95 74 FF FF FF 03 7D C8 01 4D 8C 39 75 CC DD 1C D0 0F 82 51 FF FF FF
+
+. 831 805AB9B 20
+. 8B 45 DC FF 45 D0 D1 E8 FF 4D 88 39 45 D0 0F 82 E1 FE FF FF
+***** EXPENSIVE 833 107
+vex iropt: not unrolling (69 sts)
+
+. 832 8055D58 88
+. 8B 4D 14 2B 4D F0 8B 45 08 0F AF 4D 10 89 FE DD 04 F8 8B 14 C8 C1 E6 04 89 4D E8 8B 4C C8 04 8B 45 0C 89 4C 30 0C DD 14 30 89 54 30 08 8B 45 E8 C1 E0 04 8B 75 0C 81 F1 00 00 00 80 FF 45 F0 DD 1C 06 89 54 06 08 89 4C 06 0C 8B 45 14 2B 45 F0 03 7D 10 39 45 F0 72 A8
+***** EXPENSIVE 834 108
+
+. 833 80559F5 80
+. C7 45 A0 FF FF FF FF DD 45 B0 DC 4D B8 DD 45 C0 DE E1 DD 45 A8 DC 4D C0 DD 45 B0 D9 CA DE E1 D9 C9 DC 4D C0 DD 45 A8 D9 C9 DC 45 B8 D9 C9 DC 4D B8 C7 45 CC 00 00 00 00 8B 45 D8 DE E9 D9 C9 39 45 CC DD 55 C0 D9 C9 DD 55 B8 0F 83 61 01 00 00
+***** EXPENSIVE 835 109
+
+. 834 8055A45 140
+. 8B 7D A0 03 7D E0 C7 45 A4 00 00 00 00 89 F6 8B 4D A4 03 4D D0 8B 45 A4 8B 75 0C 0F AF F7 03 45 DC 0F AF 4D 0C 8B 55 08 DD 04 F2 DD 04 CA 89 C2 2B 55 D0 89 45 84 0F AF 55 0C 8B 45 08 DD 04 D0 8B 45 84 03 45 D0 0F AF 45 0C 89 95 74 FF FF FF 8B 55 08 DD 04 C2 D9 C2 D9 C9 D9 E0 D9 C9 D8 C2 D9 C4 D9 C9 DD 1C CA D9 CC D8 E1 D9 CC DE C1 D9 CA DE E1 D9 C4 D9 C4 D9 C9 89 45 84 D8 CA D9 C9 8B 85 74 FF FF FF D8 CC D9 CA D8 CD
+***** EXPENSIVE 836 110
+
+. 835 8055AD1 48
+. D9 CC D8 CE D9 CB DD 1C C2 DE E1 D9 C9 8B 45 84 DE C2 DD 1C C2 FF 45 CC DD 1C F2 8B 55 E0 8B 75 D8 03 7D E0 01 55 A4 39 75 CC 0F 82 53 FF FF FF
+***** EXPENSIVE 837 111
+
+. 836 8055B01 24
+. DD D8 DD D8 8B 45 DC FF 45 D0 D1 E8 FF 4D A0 39 45 D0 0F 82 E3 FE FF FF
+***** EXPENSIVE 838 112
+vex iropt: 2 x unrolling (55 sts -> 110 sts)
+
+. 837 8055B40 49
+. 8D 04 0E 0F AF 45 0C 8B 7D 08 DD 04 C7 D8 C0 DD 1C C7 8B 45 0C 0F AF C2 DD 04 C7 D8 C9 DD 1C C7 FF 45 CC 8B 45 D8 03 55 E0 03 4D E0 39 45 CC 72 CF
+***** EXPENSIVE 839 113
+
+. 838 805AAEC 131
+. 8B 4D 8C 03 4D D0 8B 45 8C 0F AF 4D 0C 03 45 DC 8B 55 08 DD 04 CA 89 C2 2B 55 D0 03 45 D0 0F AF 55 0C 0F AF 45 0C 8B 75 08 DD 04 D6 DD 04 C6 8B 75 0C 0F AF F7 89 85 74 FF FF FF 8B 45 08 DD 04 F0 DD 45 98 DD 45 90 DD 45 98 D9 CA D8 CC D9 C9 D8 CB D9 CA DE CB D9 C5 D9 C5 D9 CD DC 4D 90 D9 C9 D8 C2 D9 CD D8 C4 D9 CD D8 E3 D9 CD D8 C1 D9 CF DE E2 D9 CD DE E3 D9 CB DD 1C C8 DE C2 D9 CB DD 1C F0
+***** EXPENSIVE 840 114
+
+. 839 805AB6F 44
+. D9 CA DE E1 D9 C9 8B 4D C8 FF 45 CC 8B 75 C4 DD 1C D0 D9 E0 8B 95 74 FF FF FF 03 7D C8 01 4D 8C 39 75 CC DD 1C D0 0F 82 51 FF FF FF
+***** EXPENSIVE 841 115
+
+. 840 805AA90 79
+. DD 45 A8 DC 4D B0 DC 6D B8 DD 5D 98 DD 45 A0 DC 4D B8 DC 6D 98 DD 5D 98 DD 45 A8 DC 4D B8 DD 45 B0 DE C1 DD 45 A0 DC 4D B0 DE E9 DD 5D 90 C7 45 CC 00 00 00 00 8B 45 C4 DD 45 98 DD 45 90 D9 C9 39 45 CC DD 5D B8 DD 5D B0 0F 83 BC 00 00 00
+***** EXPENSIVE 842 116
+
+. 841 80559FC 73
+. DD 45 B0 DC 4D B8 DD 45 C0 DE E1 DD 45 A8 DC 4D C0 DD 45 B0 D9 CA DE E1 D9 C9 DC 4D C0 DD 45 A8 D9 C9 DC 45 B8 D9 C9 DC 4D B8 C7 45 CC 00 00 00 00 8B 45 D8 DE E9 D9 C9 39 45 CC DD 55 C0 D9 C9 DD 55 B8 0F 83 61 01 00 00
+***** EXPENSIVE 843 117
+
+. 842 8055A54 134
+. 8B 4D A4 03 4D D0 8B 45 A4 8B 75 0C 0F AF F7 03 45 DC 0F AF 4D 0C 8B 55 08 DD 04 F2 DD 04 CA 89 C2 2B 55 D0 89 45 84 0F AF 55 0C 8B 45 08 DD 04 D0 8B 45 84 03 45 D0 0F AF 45 0C 89 95 74 FF FF FF 8B 55 08 DD 04 C2 D9 C2 D9 C9 D9 E0 D9 C9 D8 C2 D9 C4 D9 C9 DD 1C CA D9 CC D8 E1 D9 CC DE C1 D9 CA DE E1 D9 C4 D9 C4 D9 C9 89 45 84 D8 CA D9 C9 8B 85 74 FF FF FF D8 CC D9 CA D8 CD D9 CC D8 CE D9 CB DD 1C C2
+***** EXPENSIVE 844 118
+
+. 843 8055ADA 39
+. DE E1 D9 C9 8B 45 84 DE C2 DD 1C C2 FF 45 CC DD 1C F2 8B 55 E0 8B 75 D8 03 7D E0 01 55 A4 39 75 CC 0F 82 53 FF FF FF
+
+. 844 804B8BE 7
+. 8B 75 F0 39 FE 77 58
+
+. 845 804B8C5 18
+. 8D 76 00 BB 01 00 00 00 83 EC 08 56 53 E8 39 D3 FF FF
+
+. 846 8048C10 34
+. 55 89 E5 57 56 53 83 EC 38 8B 45 08 8B 7D 0C 89 C6 0F AF F7 89 F3 C1 E3 04 89 45 F0 53 E8 5E FA FF FF
+
+. 847 8048C32 11
+. 89 45 E4 89 1C 24 E8 53 FA FF FF
+
+. 848 8048C3D 11
+. 89 45 E0 89 1C 24 E8 48 FA FF FF
+
+. 849 8048C48 11
+. 89 45 DC 89 1C 24 E8 3D FA FF FF
+
+. 850 8048C53 15
+. 89 45 D8 31 DB 8D 04 36 83 C4 10 39 C3 73 55
+***** EXPENSIVE 852 119
+
+. 851 8048C62 79
+. DD 05 28 E9 05 08 DD 05 30 E9 05 08 DD 05 38 E9 05 08 89 C1 89 F6 31 D2 52 53 DF 2C 24 D9 C0 8B 75 E4 D8 C4 D9 C9 8B 45 E0 DD 14 DE D9 C9 DD 1C D8 D9 C0 D8 C3 D9 C9 8B 75 DC D8 C2 D9 C9 8B 45 D8 DD 1C DE DD 1C D8 43 83 C4 08 39 CB 72 C7
+***** EXPENSIVE 853 120
+vex iropt: not unrolling (63 sts)
+
+. 852 8048C78 57
+. 31 D2 52 53 DF 2C 24 D9 C0 8B 75 E4 D8 C4 D9 C9 8B 45 E0 DD 14 DE D9 C9 DD 1C D8 D9 C0 D8 C3 D9 C9 8B 75 DC D8 C2 D9 C9 8B 45 D8 DD 1C DE DD 1C D8 43 83 C4 08 39 CB 72 C7
+***** EXPENSIVE 854 121
+
+. 853 8048CB1 16
+. DD D8 DD D8 DD D8 83 EC 0C 6A 00 E8 5F 50 01 00
+
+. 854 8048CC1 8
+. 89 3C 24 E8 33 41 00 00
+
+. 855 804CDFC 28
+. 55 89 E5 57 56 53 83 EC 6C 8B 4D 08 E8 00 00 00 00 5B 81 C3 07 43 01 00 85 C9 75 21
+
+. 856 804CE39 13
+. 83 EC 0C 68 0C 02 00 00 E8 4A B8 FF FF
+
+. 857 804CE46 14
+. 83 C4 10 85 C0 89 45 C4 0F 84 23 02 00 00
+
+. 858 804CE54 15
+. 8B 45 08 83 EC 0C C1 E0 04 50 E8 2D B8 FF FF
+
+. 859 804CE63 20
+. 8B 55 C4 83 C4 10 85 C0 89 82 08 02 00 00 0F 84 E1 01 00 00
+
+. 860 804CE77 27
+. 8B 45 08 8B 75 C4 89 06 89 F0 52 83 C0 08 50 8D 45 E4 50 FF 75 08 E8 12 FD FF FF
+
+. 861 804CBA4 51
+. 55 89 E5 57 56 53 83 EC 2C FF 75 10 FF 75 0C 8D 45 C8 E8 00 00 00 00 5B 81 C3 59 45 01 00 50 FF 75 08 8D 7D C8 8D B3 54 F6 FF FF FC B9 07 00 00 00 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 862 804CBD5 2
+. F3 A5
+
+. 863 804CBD7 5
+. E8 88 00 00 00
+
+. 864 804CC64 36
+. 55 89 E5 57 56 53 83 EC 0C 8B 4D 08 E8 00 00 00 00 5B 81 C3 9F 44 01 00 31 FF 31 F6 85 C9 0F 84 21 01 00 00
+
+. 865 804CC88 10
+. 83 7D 08 01 0F 84 03 01 00 00
+
+. 866 804CD95 20
+. 8B 45 14 8B 55 10 C7 00 01 00 00 00 C7 02 01 00 00 00 EB E8
+
+. 867 804CD91 4
+. 31 C0 EB EF
+
+. 868 804CD84 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 869 804CBDC 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 870 804CE92 11
+. 83 C4 10 85 C0 0F 85 8F 01 00 00
+***** EXPENSIVE 872 122
+
+. 871 804CE9D 61
+. 8B 4D E4 8B 55 C4 89 4A 04 31 D2 52 8B 45 08 50 DF 2C 24 C7 45 DC 00 00 00 00 83 C4 08 DC BB CC F5 FF FF 39 4D DC DD 5D C8 C7 45 D8 01 00 00 00 C7 45 E0 00 00 00 00 0F 83 12 01 00 00
+
+. 872 804CEDA 87
+. C7 45 A8 00 00 00 00 8B 45 C4 8B 75 E0 8B 74 B0 08 8B 55 C4 8B 45 A8 03 82 08 02 00 00 89 75 BC 8B 75 E0 89 84 B2 08 01 00 00 8B 45 D8 89 C2 0F AF 55 BC 89 55 D8 89 45 D4 31 D2 8B 45 08 F7 75 D8 C7 45 C0 01 00 00 00 8B 55 BC 39 55 C0 89 45 B8 0F 83 AF 00 00 00
+
+. 873 804CFE0 12
+. FF 45 E0 39 4D E0 0F 82 F5 FE FF FF
+
+. 874 804CFEC 8
+. 8B 75 08 39 75 DC 77 08
+
+. 875 804CFF4 8
+. 8B 45 C4 E9 35 FE FF FF
+
+. 876 804CE31 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 877 8048CC9 26
+. 89 45 EC 85 C0 FF 75 F0 0F 94 C0 57 68 60 E9 05 08 0F B6 C0 50 E8 55 51 01 00
+
+. 878 8048CE3 9
+. 83 C4 14 57 E8 A4 43 00 00
+
+. 879 804D090 28
+. 55 89 E5 57 56 53 83 EC 0C 8B 75 08 E8 00 00 00 00 5B 81 C3 73 40 01 00 85 F6 75 24
+
+. 880 804D0D0 10
+. 83 EC 0C 6A 08 E8 B6 B5 FF FF
+
+. 881 804D0DA 9
+. 83 C4 10 85 C0 89 C7 74 3B
+
+. 882 804D0E3 14
+. 83 EC 0C 89 30 C1 E6 04 56 E8 9F B5 FF FF
+
+. 883 804D0F1 10
+. 83 C4 10 85 C0 89 47 04 74 04
+
+. 884 804D0FB 4
+. 89 F8 EB C6
+
+. 885 804D0C5 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 886 8048CEC 26
+. 83 C4 0C 85 C0 89 45 E8 57 0F 94 C0 68 A0 E9 05 08 0F B6 C0 50 E8 32 51 01 00
+
+. 887 8048D06 17
+. FF 75 DC FF 75 E4 FF 75 F0 57 31 DB E8 8D 31 00 00
+
+. 888 8048D17 7
+. 83 C4 20 39 FB 73 40
+
+. 889 8048D1E 64
+. 8B 75 F0 C1 E6 04 31 C9 89 75 D4 8D 76 00 8B 75 E4 8B 04 0E 8B 54 0E 04 8B 75 E0 89 04 0E 89 54 0E 04 8B 75 E4 8B 44 0E 08 8B 54 0E 0C 43 8B 75 E0 89 44 0E 08 89 54 0E 0C 03 4D D4 39 FB 72 CE
+
+. 890 8048D5E 23
+. 83 EC 0C FF 75 E8 FF 75 EC 57 FF 75 F0 FF 75 E4 31 DB E8 DF 44 00 00
+
+. 891 804D254 41
+. 55 89 E5 53 83 EC 0C 6A FF FF 75 18 FF 75 14 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 A6 3E 01 00 FF 75 08 E8 BF 00 00 00
+
+. 892 804D33C 64
+. 55 89 E5 57 56 53 83 EC 3C 8B 75 10 8B 55 18 8B 45 14 E8 00 00 00 00 5B 81 C3 C1 3D 01 00 8B 40 04 8B 4A 04 85 F6 89 45 F0 C7 45 E8 01 00 00 00 C7 45 DC 00 00 00 00 89 4D D8 0F 84 0C 03 00 00
+
+. 893 804D37C 7
+. 31 C0 83 FE 01 74 23
+
+. 894 804D3A6 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 895 804D27D 5
+. 8B 5D FC C9 C3
+
+. 896 8048D75 7
+. 83 C4 20 39 FB 73 3E
+
+. 897 8048D7C 62
+. 8B 45 F0 C1 E0 04 31 C9 89 45 D4 90 8B 75 E4 8B 04 0E 8B 54 0E 04 8B 75 D8 89 04 0E 89 54 0E 04 8B 75 E4 8B 44 0E 08 8B 54 0E 0C 43 8B 75 D8 89 44 0E 08 89 54 0E 0C 03 4D D4 39 FB 72 CE
+
+. 898 8048DBA 32
+. 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 E4 68 AB E8 05 08 FF 75 DC 68 B8 E8 05 08 E8 26 FA FF FF
+
+. 899 8048DDA 18
+. 83 C4 20 FF 75 F0 57 68 E0 E9 05 08 50 E8 4C 50 01 00
+
+. 900 8048DEC 13
+. 83 C4 10 83 7D F0 01 0F 87 C1 02 00 00
+
+. 901 8048DF9 21
+. 83 EC 0C FF 75 E8 FF 75 EC 57 FF 75 F0 FF 75 E4 E8 A6 44 00 00
+
+. 902 804D2B4 44
+. 55 89 E5 57 56 53 83 EC 14 6A 01 FF 75 18 FF 75 14 FF 75 10 FF 75 0C 8B 75 08 E8 00 00 00 00 5B 81 C3 41 3E 01 00 56 E8 5C 00 00 00
+
+. 903 804D2E0 9
+. 83 C4 20 85 C0 89 C7 75 43
+***** EXPENSIVE 905 123
+
+. 904 804D2E9 28
+. 31 D2 52 8B 45 10 50 DF 2C 24 DD 83 34 F6 FF FF 83 C4 08 31 D2 3B 7D 10 DE F1 73 31
+***** EXPENSIVE 906 124
+
+. 905 804D305 37
+. 8B 4D 0C 31 C0 C1 E1 04 8D 76 00 DD 04 06 D8 C9 DD 1C 06 DD 44 06 08 D8 C9 42 DD 5C 06 08 01 C8 3B 55 10 72 E6
+***** EXPENSIVE 907 125
+
+. 906 804D32A 12
+. DD D8 8D 65 F4 5B 5E 89 F8 5F C9 C3
+
+. 907 8048E0E 35
+. 83 C4 20 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 E4 68 BC E8 05 08 FF 75 E0 68 C8 E8 05 08 E8 CF F9 FF FF
+
+. 908 8048E31 18
+. 83 C4 20 FF 75 F0 57 68 20 EA 05 08 50 E8 F5 4F 01 00
+
+. 909 8048E43 13
+. 83 C4 10 83 7D F0 01 0F 87 45 02 00 00
+
+. 910 8048E50 23
+. 83 EC 0C FF 75 E8 FF 75 EC 57 FF 75 F0 FF 75 D8 31 DB E8 1D 44 00 00
+
+. 911 804D284 41
+. 55 89 E5 53 83 EC 0C 6A 01 FF 75 18 FF 75 14 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 76 3E 01 00 FF 75 08 E8 8F 00 00 00
+
+. 912 804D2AD 5
+. 8B 5D FC C9 C3
+
+. 913 8048E67 7
+. 83 C4 20 39 FB 73 30
+***** EXPENSIVE 915 126
+
+. 914 8048E6E 46
+. 31 D2 52 57 8B 55 F0 DF 2C 24 31 C0 83 C4 08 C1 E2 04 8B 4D E0 DD 04 01 D8 C9 DD 1C 01 DD 44 01 08 D8 C9 43 DD 5C 01 08 01 D0 39 FB 72 E4
+***** EXPENSIVE 916 127
+
+. 915 8048E9C 34
+. DD D8 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 D8 68 CD E8 05 08 FF 75 E0 68 C8 E8 05 08 E8 42 F9 FF FF
+
+. 916 8048EBE 18
+. 83 C4 20 FF 75 F0 57 68 60 EA 05 08 50 E8 68 4F 01 00
+
+. 917 8048ED0 13
+. 83 C4 10 83 7D F0 01 0F 87 90 01 00 00
+
+. 918 8048EDD 31
+. 83 EC 0C FF 75 DC FF 75 E4 6A 00 6A 00 68 00 00 F0 3F 6A 00 FF 75 F0 57 6A 01 E8 84 2A 00 00
+
+. 919 8048EFC 21
+. 83 C4 24 FF 75 E8 FF 75 EC 57 FF 75 F0 FF 75 E4 E8 43 43 00 00
+
+. 920 8048F11 35
+. 83 C4 20 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 E4 68 DA E8 05 08 FF 75 DC 68 E7 E8 05 08 E8 CC F8 FF FF
+
+. 921 8048F34 18
+. 83 C4 20 FF 75 F0 57 68 A0 EA 05 08 50 E8 F2 4E 01 00
+
+. 922 8048F46 26
+. FF 75 DC FF 75 E4 6A 00 6A 00 68 00 00 F0 3F 6A 00 FF 75 F0 57 E8 48 2B 00 00
+
+. 923 8048F60 21
+. 83 C4 24 FF 75 E8 FF 75 EC 57 FF 75 F0 FF 75 E4 E8 DF 42 00 00
+
+. 924 8048F75 35
+. 83 C4 20 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 E4 68 F0 E8 05 08 FF 75 DC 68 E7 E8 05 08 E8 68 F8 FF FF
+
+. 925 8048F98 20
+. 83 C4 20 FF 75 F0 57 68 E0 EA 05 08 50 31 F6 E8 8C 4E 01 00
+
+. 926 8048FAC 9
+. 31 DB 83 C4 10 39 FE 72 53
+
+. 927 8049008 30
+. 83 EC 0C FF 75 DC FF 75 E4 6A 00 6A 00 68 00 00 F0 3F 6A 00 FF 75 F0 57 53 E8 32 2B 00 00
+
+. 928 8049026 21
+. 83 C4 24 FF 75 E8 FF 75 EC 57 FF 75 F0 FF 75 E4 E8 19 42 00 00
+
+. 929 804903B 35
+. 83 C4 20 68 80 84 2E 41 6A 00 57 FF 75 F0 FF 75 E4 68 00 E9 05 08 FF 75 DC 68 E7 E8 05 08 E8 A2 F7 FF FF
+
+. 930 804905E 10
+. 43 09 C6 83 C4 20 39 FB 72 A0
+
+. 931 8049068 5
+. E9 48 FF FF FF
+
+. 932 8048FB5 15
+. FF 75 F0 57 68 40 EB 05 08 56 E8 74 4E 01 00
+
+. 933 8048FC4 9
+. 58 FF 75 EC E8 67 41 00 00
+
+. 934 804D134 34
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF B6 08 02 00 00 E8 00 00 00 00 5B 81 C3 CA 3F 01 00 E8 BA B5 FF FF
+
+. 935 804D156 18
+. C7 86 08 02 00 00 00 00 00 00 89 34 24 E8 A8 B5 FF FF
+
+. 936 804D168 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 937 8048FCD 9
+. 58 FF 75 E8 E8 9A 41 00 00
+
+. 938 804D170 31
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF 76 04 E8 00 00 00 00 5B 81 C3 91 3F 01 00 E8 81 B5 FF FF
+
+. 939 804D18F 15
+. C7 46 04 00 00 00 00 89 34 24 E8 72 B5 FF FF
+
+. 940 804D19E 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 941 8048FD6 9
+. 5F FF 75 E4 E8 31 F7 FF FF
+
+. 942 8048FDF 9
+. 5E FF 75 E0 E8 28 F7 FF FF
+
+. 943 8048FE8 9
+. 5B FF 75 DC E8 1F F7 FF FF
+
+. 944 8048FF1 21
+. 8B 75 D8 89 75 08 83 C4 10 8D 65 F4 5B 5E 5F C9 E9 0A F7 FF FF
+
+. 945 804B8D7 9
+. 59 58 56 53 E8 98 EB FF FF
+
+. 946 804A478 29
+. 55 89 E5 57 56 53 83 EC 28 8B 75 0C 0F AF 75 08 8D 1C F5 00 00 00 00 53 E8 FB E1 FF FF
+
+. 947 804A495 10
+. 89 C7 89 1C 24 E8 F1 E1 FF FF
+
+. 948 804A49F 11
+. 89 45 E4 89 1C 24 E8 E6 E1 FF FF
+
+. 949 804A4AA 11
+. 89 45 E0 89 1C 24 E8 DB E1 FF FF
+
+. 950 804A4B5 15
+. 89 45 DC 31 DB 8D 04 36 83 C4 10 39 C3 73 54
+***** EXPENSIVE 952 128
+
+. 951 804A4C4 78
+. DD 05 28 E9 05 08 DD 05 30 E9 05 08 DD 05 38 E9 05 08 89 C1 31 D2 52 53 DF 2C 24 83 C4 08 D9 1C 9F 52 53 DF 2C 24 D9 C0 D8 C4 8B 45 E4 D9 1C 98 D9 C0 D8 C3 D9 C9 8B 55 E0 D8 C2 D9 C9 8B 45 DC D9 1C 9A D9 1C 98 43 83 C4 08 39 CB 72 C6
+***** EXPENSIVE 953 129
+vex iropt: not unrolling (71 sts)
+
+. 952 804A4D8 58
+. 31 D2 52 53 DF 2C 24 83 C4 08 D9 1C 9F 52 53 DF 2C 24 D9 C0 D8 C4 8B 45 E4 D9 1C 98 D9 C0 D8 C3 D9 C9 8B 55 E0 D8 C2 D9 C9 8B 45 DC D9 1C 9A D9 1C 98 43 83 C4 08 39 CB 72 C6
+***** EXPENSIVE 954 130
+
+. 953 804A512 16
+. DD D8 DD D8 DD D8 83 EC 0C 6A 00 E8 FE 37 01 00
+
+. 954 804A522 9
+. 5B FF 75 0C E8 AD 5D 00 00
+
+. 955 80502D8 28
+. 55 89 E5 57 56 53 83 EC 5C 8B 55 08 E8 00 00 00 00 5B 81 C3 2B 0E 01 00 85 D2 75 21
+
+. 956 8050315 13
+. 83 EC 0C 68 0C 02 00 00 E8 6E 83 FF FF
+
+. 957 8050322 14
+. 83 C4 10 85 C0 89 45 C4 0F 84 F4 01 00 00
+
+. 958 8050330 15
+. 8B 45 08 83 EC 0C C1 E0 03 50 E8 51 83 FF FF
+
+. 959 805033F 20
+. 8B 55 C4 83 C4 10 85 C0 89 82 08 02 00 00 0F 84 B2 01 00 00
+
+. 960 8050353 27
+. 8B 45 08 8B 75 C4 89 06 50 89 F0 83 C0 08 50 8D 45 E0 50 FF 75 08 E8 36 C8 FF FF
+
+. 961 805036E 11
+. 83 C4 10 85 C0 0F 85 60 01 00 00
+***** EXPENSIVE 963 131
+
+. 962 8050379 57
+. 8B 4D E0 8B 55 C4 89 4A 04 31 D2 52 8B 45 08 50 DF 2C 24 C7 45 D8 00 00 00 00 83 C4 08 DC BB CC F5 FF FF 39 4D D8 DD 5D C8 C7 45 D4 01 00 00 00 C7 45 DC 00 00 00 00 73 7F
+
+. 963 80503B2 79
+. 8B 45 C4 8B 75 DC 8B 74 B0 08 8B 55 D8 8B 80 08 02 00 00 89 75 BC 8D 04 D0 8B 75 DC 8B 55 C4 89 84 B2 08 01 00 00 8B 75 D4 89 F0 0F AF 45 BC 89 45 D4 31 D2 8B 45 08 F7 75 D4 C7 45 C0 01 00 00 00 8B 55 BC 39 55 C0 89 75 D0 89 45 B8 73 28
+
+. 964 8050429 8
+. FF 45 DC 39 4D DC 72 81
+
+. 965 8050431 8
+. 8B 75 08 39 75 D8 77 08
+
+. 966 8050439 8
+. 8B 45 C4 E9 CC FE FF FF
+
+. 967 805030D 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 968 804A52B 28
+. 85 C0 FF 75 08 FF 75 0C 89 45 EC 0F 94 C0 68 00 F8 05 08 0F B6 C0 50 E8 F1 38 01 00
+
+. 969 804A547 11
+. 83 C4 14 FF 75 0C E8 EA 5F 00 00
+
+. 970 805053C 28
+. 55 89 E5 57 56 53 83 EC 0C 8B 7D 08 E8 00 00 00 00 5B 81 C3 C7 0B 01 00 85 FF 75 24
+
+. 971 805057C 10
+. 83 EC 0C 6A 08 E8 0A 81 FF FF
+
+. 972 8050586 9
+. 83 C4 10 85 C0 89 C6 74 3F
+
+. 973 805058F 18
+. 83 EC 0C 89 38 8D 04 FD 00 00 00 00 50 E8 EF 80 FF FF
+
+. 974 80505A1 10
+. 83 C4 10 85 C0 89 46 04 74 04
+
+. 975 80505AB 4
+. 89 F0 EB C2
+
+. 976 8050571 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 977 804A552 28
+. 83 C4 0C 85 C0 FF 75 0C 89 45 E8 0F 94 C0 68 40 F8 05 08 0F B6 C0 50 E8 CA 38 01 00
+
+. 978 804A56E 17
+. FF 75 E0 57 FF 75 08 FF 75 0C 31 DB E8 4D 1F 00 00
+
+. 979 804C4CC 19
+. 55 89 E5 57 56 53 83 EC 0C 8B 4D 08 85 C9 8B 7D 10 74 3A
+
+. 980 804C4DF 7
+. 31 F6 3B 75 08 73 1C
+
+. 981 804C4E6 7
+. 31 DB E8 5F F4 FF FF
+***** EXPENSIVE 983 132
+
+. 982 804C4ED 8
+. D9 1C DF E8 57 F4 FF FF
+***** EXPENSIVE 984 133
+
+. 983 804C4F5 13
+. 46 D9 5C DF 04 03 5D 0C 3B 75 08 72 E6
+
+. 984 804C502 15
+. FF 75 14 FF 75 08 FF 75 0C 57 E8 97 02 00 00
+
+. 985 804C7A8 38
+. 55 89 E5 53 83 EC 10 6A FF FF 75 14 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 55 49 01 00 FF 75 08 E8 A6 00 00 00
+***** EXPENSIVE 987 134
+
+. 986 804C874 68
+. 55 89 E5 57 56 53 83 EC 5C 31 D2 52 8B 45 10 DB 45 18 50 E8 00 00 00 00 5B 81 C3 88 48 01 00 D8 C0 C7 45 E0 00 00 00 00 8B 45 10 DF 2C 24 D9 C9 DC 8B 3C F6 FF FF 83 C4 08 39 45 E0 DE F1 0F 83 EF 00 00 00
+***** EXPENSIVE 988 135
+
+. 987 804C8B8 38
+. C7 45 D0 00 00 00 00 90 31 FF D9 83 50 F6 FF FF 3B 7D 10 D9 55 D4 D9 5D D8 C7 45 DC 00 00 00 00 0F 83 93 00 00 00
+***** EXPENSIVE 989 136
+
+. 988 804C8DE 33
+. 31 F6 31 D2 52 8B 45 DC 50 DF 2C 24 D8 C9 83 EC 08 DD 14 24 D9 C9 DD 5D 98 DD 5D B8 E8 21 BD FF FF
+***** EXPENSIVE 990 137
+
+. 989 804C8FF 20
+. D9 5D E4 D9 45 E4 DD 45 B8 DD 1C 24 D9 5D A8 E8 1D BE FF FF
+***** EXPENSIVE 991 138
+
+. 990 804C913 94
+. D9 5D E4 8B 45 08 D9 45 E4 D9 45 A8 D9 04 F0 D9 44 F0 04 D9 C2 D9 C4 D9 C9 8B 45 DC D8 CB D9 C9 D8 CA D9 CC DE CA D9 CC DE CA D9 CB 03 45 E0 31 D2 DE E2 DE C2 47 F7 75 10 D8 45 D8 D9 C9 D8 45 D4 D9 C9 83 C4 10 03 75 0C 3B 7D 10 D9 5D D8 D9 5D D4 89 55 DC DD 45 98 0F 82 6F FF FF FF
+***** EXPENSIVE 992 139
+
+. 991 804C971 42
+. 8B 45 D0 8B 55 14 D9 45 D8 D9 1C C2 D9 45 D4 D9 5C C2 04 8B 55 0C 01 D0 89 45 D0 FF 45 E0 8B 45 10 39 45 E0 0F 82 25 FF FF FF
+***** EXPENSIVE 993 140
+
+. 992 804C99B 12
+. DD D8 8D 65 F4 5B 5E 31 C0 5F C9 C3
+
+. 993 804C7CE 5
+. 8B 5D FC C9 C3
+
+. 994 804C511 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 995 804A57F 8
+. 83 C4 20 3B 5D 0C 73 1F
+
+. 996 804A587 31
+. 31 D2 8D 76 00 8B 04 D7 8B 4D E4 89 04 D1 8B 44 D7 04 43 89 44 D1 04 03 55 08 3B 5D 0C 72 E6
+
+. 997 804A5A6 23
+. 83 EC 0C FF 75 E8 FF 75 EC FF 75 0C FF 75 08 57 31 DB E8 47 61 00 00
+
+. 998 8050704 41
+. 55 89 E5 53 83 EC 0C 6A FF FF 75 18 FF 75 14 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 F6 09 01 00 FF 75 08 E8 B3 00 00 00
+
+. 999 80507E0 64
+. 55 89 E5 57 56 53 83 EC 2C 8B 75 10 8B 55 18 8B 45 14 E8 00 00 00 00 5B 81 C3 1D 09 01 00 8B 40 04 8B 4A 04 85 F6 89 45 F0 C7 45 E8 01 00 00 00 C7 45 DC 00 00 00 00 89 4D D8 0F 84 08 03 00 00
+
+. 1000 8050820 7
+. 31 C0 83 FE 01 74 23
+
+. 1001 805084A 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1002 805072D 5
+. 8B 5D FC C9 C3
+
+. 1003 804A5BD 8
+. 83 C4 20 3B 5D 0C 73 1D
+
+. 1004 804A5C5 29
+. 31 D2 90 8B 04 D7 8B 4D DC 89 04 D1 8B 44 D7 04 43 89 44 D1 04 03 55 08 3B 5D 0C 72 E6
+
+. 1005 804A5E2 32
+. 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 57 68 AB E8 05 08 FF 75 E0 68 B8 E8 05 08 E8 8A FA FF FF
+***** EXPENSIVE 1007 141
+
+. 1006 804A08C 18
+. 55 89 E5 57 56 53 83 EC 1C 31 DB 3B 5D 1C D9 EE 73 48
+***** EXPENSIVE 1008 142
+
+. 1007 804A09E 59
+. DD 05 40 E9 05 08 31 C9 89 F6 8B 45 0C D9 04 C8 8B 55 14 D9 44 C8 04 D9 C9 D8 2C CA D9 C9 D8 6C CA 04 D9 C9 D9 E1 D9 C9 D9 E1 DE C1 D8 C9 DD E2 DF E0 F6 C4 45 0F 85 FB 00 00 00
+***** EXPENSIVE 1009 143
+
+. 1008 804A1D4 7
+. DD D8 E9 00 FF FF FF
+
+. 1009 804A0DB 9
+. 43 03 4D 18 3B 5D 1C 72 C4
+***** EXPENSIVE 1011 144
+
+. 1010 804A0E4 14
+. DD D8 DD 45 20 DD E9 DF E0 F6 C4 45 75 0C
+***** EXPENSIVE 1012 145
+
+. 1011 804A0F2 12
+. DD D8 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1012 804A602 20
+. 83 C4 20 FF 75 08 FF 75 0C 68 80 F8 05 08 50 E8 22 38 01 00
+
+. 1013 804A616 13
+. 83 C4 10 83 7D 08 01 0F 87 C4 02 00 00
+
+. 1014 804A623 21
+. 83 EC 0C FF 75 E8 FF 75 EC FF 75 0C FF 75 08 57 E8 2C 61 00 00
+
+. 1015 8050764 45
+. 55 89 E5 57 56 53 83 EC 14 6A 01 FF 75 18 FF 75 14 8B 7D 10 57 FF 75 0C 8B 75 08 E8 00 00 00 00 5B 81 C3 90 09 01 00 56 E8 4F 00 00 00
+
+. 1016 8050791 9
+. 83 C4 20 85 C0 89 C1 75 36
+***** EXPENSIVE 1018 146
+
+. 1017 805079A 24
+. 31 D2 52 57 DF 2C 24 D9 83 4C F6 FF FF 83 C4 08 31 D2 39 F9 DE F1 73 28
+***** EXPENSIVE 1019 147
+
+. 1018 80507B2 28
+. 31 C0 D9 04 C6 D8 C9 D9 1C C6 D9 44 C6 04 D8 C9 42 D9 5C C6 04 03 45 0C 39 FA 72 E6
+***** EXPENSIVE 1020 148
+
+. 1019 80507CE 12
+. DD D8 8D 65 F4 5B 5E 89 C8 5F C9 C3
+
+. 1020 804A638 35
+. 83 C4 20 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 57 68 BC E8 05 08 FF 75 E4 68 C8 E8 05 08 E8 31 FA FF FF
+
+. 1021 804A65B 20
+. 83 C4 20 FF 75 08 FF 75 0C 68 E0 F8 05 08 50 E8 C9 37 01 00
+
+. 1022 804A66F 13
+. 83 C4 10 83 7D 08 01 0F 87 44 02 00 00
+
+. 1023 804A67C 25
+. 83 EC 0C FF 75 E8 FF 75 EC FF 75 0C FF 75 08 FF 75 DC 31 DB E8 9F 60 00 00
+
+. 1024 8050734 41
+. 55 89 E5 53 83 EC 0C 6A 01 FF 75 18 FF 75 14 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 C6 09 01 00 FF 75 08 E8 83 00 00 00
+
+. 1025 805075D 5
+. 8B 5D FC C9 C3
+
+. 1026 804A695 8
+. 83 C4 20 3B 5D 0C 73 2F
+***** EXPENSIVE 1028 149
+
+. 1027 804A69D 45
+. 31 D2 52 8B 45 0C 50 DF 2C 24 31 C0 83 C4 08 8B 55 E4 D9 04 C2 D8 C9 D9 1C C2 D9 44 C2 04 D8 C9 43 D9 5C C2 04 03 45 08 3B 5D 0C 72 E2
+***** EXPENSIVE 1029 150
+
+. 1028 804A6CA 36
+. DD D8 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 FF 75 DC 68 CD E8 05 08 FF 75 E4 68 C8 E8 05 08 E8 9E F9 FF FF
+
+. 1029 804A6EE 20
+. 83 C4 20 FF 75 08 FF 75 0C 68 40 F9 05 08 50 E8 36 37 01 00
+
+. 1030 804A702 13
+. 83 C4 10 83 7D 08 01 0F 87 85 01 00 00
+
+. 1031 804A70F 25
+. 51 FF 75 E0 57 6A 00 68 00 00 80 3F FF 75 08 FF 75 0C 6A 01 E8 B4 18 00 00
+
+. 1032 804BFDC 23
+. 55 89 E5 57 56 53 83 EC 4C 8B 75 0C 85 F6 8B 4D 1C 0F 84 E8 00 00 00
+
+. 1033 804BFF3 6
+. 31 DB 39 F3 73 1A
+
+. 1034 804BFF9 26
+. 31 C0 90 43 C7 04 C1 00 00 00 00 C7 44 C1 04 00 00 00 00 03 45 10 39 F3 72 E9
+***** EXPENSIVE 1036 151
+
+. 1035 804C013 34
+. 31 D2 8B 45 08 F7 F6 0F AF 55 10 D9 45 14 31 DB D9 1C D1 39 F3 D9 45 18 D9 5C D1 04 0F 83 9C 00 00 00
+***** EXPENSIVE 1037 152
+
+. 1036 804C035 61
+. 31 D2 52 56 DF 2C 24 DD 5D D8 83 C4 08 C7 45 D4 00 00 00 00 31 FF 90 89 F8 31 D2 F7 F6 89 D0 31 D2 52 50 DF 2C 24 DC 0D E0 06 06 08 83 EC 08 DC 75 D8 DD 14 24 DD 5D B8 E8 AE C5 FF FF
+***** EXPENSIVE 1038 153
+
+. 1037 804C072 20
+. D9 5D E4 D9 45 E4 DD 45 B8 DD 1C 24 D9 5D A8 E8 AA C6 FF FF
+***** EXPENSIVE 1039 154
+
+. 1038 804C086 75
+. D9 5D E4 D9 45 E4 D9 45 A8 D9 45 14 D9 45 18 D9 C9 D8 CA D9 C9 D8 CB D9 CA D8 4D 18 D9 CB D8 4D 14 D9 C9 8B 45 D4 8B 55 20 DE E2 DE C2 D9 1C C2 D9 5C C2 04 43 8B 55 10 01 D0 83 C4 10 03 7D 08 39 F3 89 45 D4 0F 82 7B FF FF FF
+
+. 1039 804C0D1 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1040 804A728 21
+. 83 C4 14 FF 75 E8 FF 75 EC FF 75 0C FF 75 08 57 E8 C7 5F 00 00
+
+. 1041 804A73D 35
+. 83 C4 20 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 57 68 DA E8 05 08 FF 75 E0 68 E7 E8 05 08 E8 2C F9 FF FF
+
+. 1042 804A760 20
+. 83 C4 20 FF 75 08 FF 75 0C 68 A0 F9 05 08 50 E8 C4 36 01 00
+
+. 1043 804A774 24
+. 58 5A FF 75 E0 57 6A 00 68 00 00 80 3F FF 75 08 FF 75 0C E8 6C 19 00 00
+***** EXPENSIVE 1045 155
+
+. 1044 804C0F8 31
+. 55 89 E5 57 56 53 83 EC 0C 8B 55 08 85 D2 8B 7D 0C D9 45 10 D9 45 14 8B 5D 18 8B 75 1C 74 5C
+
+. 1045 804C117 6
+. 31 C0 39 D0 73 35
+***** EXPENSIVE 1047 156
+
+. 1046 804C11D 21
+. 31 C9 90 D9 C9 40 D9 14 CB D9 C9 D9 54 CB 04 01 F9 39 D0 72 EE
+
+. 1047 804C132 6
+. 31 C0 39 D0 73 1A
+
+. 1048 804C138 26
+. 31 C9 89 F6 40 C7 04 CE 00 00 00 00 C7 44 CE 04 00 00 00 00 01 F9 39 D0 72 EA
+***** EXPENSIVE 1050 157
+
+. 1049 804C152 33
+. 89 D0 31 D2 52 50 DF 2C 24 DC CA DE C9 D9 C9 83 C4 08 D9 1E D9 5E 04 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1050 804A78C 21
+. 83 C4 14 FF 75 E8 FF 75 EC FF 75 0C FF 75 08 57 E8 63 5F 00 00
+
+. 1051 804A7A1 35
+. 83 C4 20 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 57 68 F0 E8 05 08 FF 75 E0 68 E7 E8 05 08 E8 C8 F8 FF FF
+
+. 1052 804A7C4 22
+. 83 C4 20 FF 75 08 FF 75 0C 68 00 FA 05 08 50 31 F6 E8 5E 36 01 00
+
+. 1053 804A7DA 10
+. 31 DB 83 C4 10 3B 75 0C 72 50
+
+. 1054 804A834 24
+. 50 FF 75 E0 57 6A 00 68 00 00 80 3F FF 75 08 FF 75 0C 53 E8 48 19 00 00
+
+. 1055 804C194 20
+. 55 89 E5 57 56 53 83 EC 3C 8B 7D 0C 85 FF 0F 84 09 01 00 00
+
+. 1056 804C1A8 11
+. 31 DB 3B 5D 0C 0F 83 B4 00 00 00
+***** EXPENSIVE 1058 158
+
+. 1057 804C1B3 60
+. 31 D2 52 8B 45 0C 50 DF 2C 24 DD 5D D8 83 C4 08 31 FF 31 F6 90 89 F0 31 D2 F7 75 0C 89 D0 31 D2 52 50 DF 2C 24 DC 0D E8 06 06 08 83 EC 08 DC 75 D8 DD 14 24 DD 5D C8 E8 31 C4 FF FF
+***** EXPENSIVE 1059 159
+
+. 1058 804C1EF 20
+. D9 5D E4 D9 45 E4 DD 45 C8 DD 1C 24 D9 5D B8 E8 2D C5 FF FF
+***** EXPENSIVE 1060 160
+
+. 1059 804C203 64
+. D9 5D E4 D9 45 E4 D9 45 B8 D9 45 14 D9 45 18 D9 C9 D8 CA D9 C9 D8 CB D9 CA D8 4D 18 D9 CB D8 4D 14 D9 C9 8B 45 1C DE E2 DE C2 43 D9 1C F8 D9 5C F8 04 83 C4 10 03 7D 10 03 75 08 3B 5D 0C 72 85
+
+. 1060 804C243 7
+. 31 DB 3B 5D 0C 73 1D
+
+. 1061 804C24A 29
+. 31 C0 8B 55 20 43 C7 04 C2 00 00 00 00 C7 44 C2 04 00 00 00 00 03 45 10 3B 5D 0C 72 E5
+
+. 1062 804C267 7
+. 8B 75 08 85 F6 7E 3B
+
+. 1063 804C2A9 8
+. 8B 45 0C 2B 45 08 EB C0
+***** EXPENSIVE 1065 161
+
+. 1064 804C271 56
+. 31 D2 F7 75 0C 8B 4D 10 0F AF CA 31 D2 52 8B 45 0C 50 DF 2C 24 D9 45 14 8B 45 20 D8 C9 D9 C9 D8 4D 18 D9 C9 D9 1C C8 D9 5C C8 04 83 C4 08 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1065 804A84C 21
+. 83 C4 14 FF 75 E8 FF 75 EC FF 75 0C FF 75 08 57 E8 A3 5E 00 00
+
+. 1066 804A861 35
+. 83 C4 20 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 57 68 00 E9 05 08 FF 75 E0 68 E7 E8 05 08 E8 08 F8 FF FF
+
+. 1067 804A884 11
+. 43 09 C6 83 C4 20 3B 5D 0C 72 A5
+
+. 1068 804A88F 5
+. E9 50 FF FF FF
+
+. 1069 804A7E4 17
+. FF 75 08 FF 75 0C 68 60 FA 05 08 56 E8 43 36 01 00
+
+. 1070 804A7F5 9
+. 58 FF 75 EC E8 E6 5D 00 00
+
+. 1071 80505E4 34
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF B6 08 02 00 00 E8 00 00 00 00 5B 81 C3 1A 0B 01 00 E8 0A 81 FF FF
+
+. 1072 8050606 18
+. C7 86 08 02 00 00 00 00 00 00 89 34 24 E8 F8 80 FF FF
+
+. 1073 8050618 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1074 804A7FE 9
+. 58 FF 75 E8 E8 19 5E 00 00
+
+. 1075 8050620 31
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF 76 04 E8 00 00 00 00 5B 81 C3 E1 0A 01 00 E8 D1 80 FF FF
+
+. 1076 805063F 15
+. C7 46 04 00 00 00 00 89 34 24 E8 C2 80 FF FF
+
+. 1077 805064E 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1078 804A807 8
+. 89 3C 24 E8 01 DF FF FF
+
+. 1079 804A80F 9
+. 58 FF 75 E4 E8 F8 DE FF FF
+
+. 1080 804A818 9
+. 5F FF 75 E0 E8 EF DE FF FF
+
+. 1081 804A821 9
+. 5E FF 75 DC E8 E6 DE FF FF
+
+. 1082 804A82A 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1083 804B8E0 9
+. 58 5A 56 53 E8 97 DD FF FF
+
+. 1084 8049680 41
+. 55 89 E5 57 56 53 83 EC 38 8B 55 0C 8B 45 08 89 D6 0F AF F0 89 45 F0 8D 04 F5 00 00 00 00 89 55 EC 89 F3 50 E8 E7 EF FF FF
+
+. 1085 80496A9 14
+. C1 E3 04 89 45 E0 89 1C 24 E8 D9 EF FF FF
+
+. 1086 80496B7 11
+. 89 45 DC 89 1C 24 E8 CE EF FF FF
+
+. 1087 80496C2 11
+. 89 45 D8 89 1C 24 E8 C3 EF FF FF
+
+. 1088 80496CD 12
+. 31 C9 83 C4 10 39 F1 89 45 D4 73 18
+***** EXPENSIVE 1090 162
+
+. 1089 80496D9 24
+. 89 F3 90 31 D2 52 51 DF 2C 24 8B 75 E0 DD 1C CE 41 83 C4 08 39 D9 72 EB
+
+. 1090 80496F1 16
+. 8B 45 EC 0F AF 45 F0 31 C9 8D 1C 00 39 D9 73 48
+***** EXPENSIVE 1092 163
+
+. 1091 8049701 66
+. DD 05 28 E9 05 08 DD 05 30 E9 05 08 DD 05 38 E9 05 08 90 31 D2 52 51 DF 2C 24 D9 C0 D8 C4 8B 45 DC DD 1C C8 D9 C0 D8 C3 D9 C9 8B 55 D8 D8 C2 D9 C9 8B 75 D4 DD 1C CA DD 1C CE 41 83 C4 08 39 D9 72 D1
+***** EXPENSIVE 1093 164
+vex iropt: 2 x unrolling (59 sts -> 118 sts)
+
+. 1092 8049714 47
+. 31 D2 52 51 DF 2C 24 D9 C0 D8 C4 8B 45 DC DD 1C C8 D9 C0 D8 C3 D9 C9 8B 55 D8 D8 C2 D9 C9 8B 75 D4 DD 1C CA DD 1C CE 41 83 C4 08 39 D9 72 D1
+***** EXPENSIVE 1094 165
+
+. 1093 8049743 16
+. DD D8 DD D8 DD D8 83 EC 0C 6A 00 E8 CD 45 01 00
+
+. 1094 8049753 9
+. 58 FF 75 EC E8 D8 EE 00 00
+
+. 1095 8058634 28
+. 55 89 E5 57 56 53 83 EC 5C 8B 45 08 E8 00 00 00 00 5B 81 C3 CF 8A 00 00 85 C0 75 21
+
+. 1096 8058671 13
+. 83 EC 0C 68 0C 02 00 00 E8 12 00 FF FF
+
+. 1097 805867E 14
+. 83 C4 10 85 C0 89 45 CC 0F 84 49 02 00 00
+
+. 1098 805868C 10
+. 83 7D 08 01 0F 84 2D 02 00 00
+
+. 1099 80588C3 18
+. 8B 45 CC C7 80 08 02 00 00 00 00 00 00 E9 E6 FD FF FF
+
+. 1100 80586BB 27
+. 8B 45 08 8B 4D CC 89 01 89 C8 57 83 C0 08 50 8D 45 E4 50 FF 75 08 E8 4E 45 FF FF
+
+. 1101 804CC24 51
+. 55 89 E5 57 56 53 83 EC 2C FF 75 10 FF 75 0C 8D 45 C8 E8 00 00 00 00 5B 81 C3 D9 44 01 00 50 FF 75 08 8D 7D C8 8D B3 84 F6 FF FF FC B9 05 00 00 00 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 1102 804CC55 2
+. F3 A5
+
+. 1103 804CC57 5
+. E8 08 00 00 00
+
+. 1104 804CC5C 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1105 80586D6 11
+. 83 C4 10 85 C0 0F 85 93 01 00 00
+***** EXPENSIVE 1107 166
+
+. 1106 80586E1 61
+. 8B 45 E4 8B 4D CC 89 41 04 31 C9 51 8B 55 08 52 DF 2C 24 C7 45 DC 00 00 00 00 83 C4 08 DC BB D4 F5 FF FF 39 45 DC DD 5D D0 C7 45 D8 01 00 00 00 C7 45 E0 00 00 00 00 0F 83 13 01 00 00
+
+. 1107 805871E 81
+. C7 45 AC 00 00 00 00 8B 55 CC 8B 45 E0 8B 44 82 08 89 45 C4 8B 45 AC 03 82 08 02 00 00 8B 4D E0 89 84 8A 08 01 00 00 8B 4D D8 89 C8 0F AF 45 C4 89 45 D8 31 D2 8B 45 08 F7 75 D8 C7 45 C8 01 00 00 00 8B 55 C4 39 55 C8 89 45 C0 0F 83 B0 00 00 00
+
+. 1108 805881F 18
+. FF 45 E0 8B 45 E0 8B 55 CC 3B 42 04 0F 82 F4 FE FF FF
+
+. 1109 8058831 11
+. D1 6D 08 8B 4D 08 39 4D DC 77 08
+
+. 1110 805883C 8
+. 8B 45 CC E9 25 FE FF FF
+
+. 1111 8058669 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1112 804975C 28
+. 89 45 E8 85 C0 FF 75 F0 0F 94 C0 FF 75 EC 68 80 F0 05 08 0F B6 C0 50 E8 C0 46 01 00
+
+. 1113 8049778 11
+. 83 C4 14 FF 75 EC E8 69 F1 00 00
+
+. 1114 80588EC 28
+. 55 89 E5 57 56 53 83 EC 0C 8B 7D 08 E8 00 00 00 00 5B 81 C3 17 88 00 00 85 FF 75 24
+
+. 1115 805892C 10
+. 83 EC 0C 6A 08 E8 5A FD FE FF
+
+. 1116 8058936 9
+. 83 C4 10 85 C0 89 C6 74 3F
+
+. 1117 805893F 18
+. 83 EC 0C 89 38 8D 04 FD 00 00 00 00 50 E8 3F FD FE FF
+
+. 1118 8058951 10
+. 83 C4 10 85 C0 89 46 04 74 04
+
+. 1119 805895B 4
+. 89 F0 EB C5
+
+. 1120 8058924 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1121 8049783 28
+. 83 C4 0C 85 C0 89 45 E4 FF 75 EC 0F 94 C0 68 C0 F0 05 08 0F B6 C0 50 E8 99 46 01 00
+
+. 1122 804979F 17
+. FF 75 D4 FF 75 DC FF 75 F0 FF 75 EC E8 8C 27 00 00
+
+. 1123 80497B0 22
+. 8B 4D EC 0F AF 4D F0 C1 E1 04 C1 E9 02 FC 8B 7D D8 8B 75 DC F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 1124 80497C4 2
+. F3 A5
+
+. 1125 80497C6 8
+. 83 C4 20 3B 4D EC 73 24
+
+. 1126 80497CE 36
+. 31 DB 89 D8 C1 E0 04 8B 75 DC 8B 54 06 04 41 8B 04 06 8B 75 E0 89 04 DE 89 54 DE 04 03 5D F0 3B 4D EC 72 DE
+
+. 1127 80497F2 23
+. 83 EC 0C FF 75 E4 FF 75 E8 FF 75 EC FF 75 F0 FF 75 E0 E8 FF F1 00 00
+
+. 1128 8058A08 64
+. 55 89 E5 57 56 53 83 EC 2C 8B 55 18 8B 45 14 8B 75 10 E8 00 00 00 00 5B 81 C3 F5 86 00 00 8B 40 04 8B 4A 04 85 F6 89 45 F0 C7 45 EC 01 00 00 00 89 4D E8 C7 45 E4 00 00 00 00 0F 84 18 02 00 00
+
+. 1129 8058A48 8
+. 31 C0 83 7D 10 01 74 26
+
+. 1130 8058A76 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1131 8049809 20
+. 83 C4 20 FF 75 EC FF 75 F0 FF 75 DC FF 75 E0 E8 B3 C3 00 00
+
+. 1132 8055BD0 32
+. 55 89 E5 57 56 53 83 EC 2C 8B 4D 14 E8 00 00 00 00 5B 81 C3 33 B5 00 00 85 C9 0F 84 F3 00 00 00
+
+. 1133 8055BF0 46
+. 8B 4D 08 8B 01 8B 51 04 8B 4D 0C 89 01 8B 45 14 C7 45 EC 01 00 00 00 48 39 45 EC 89 51 04 C7 41 08 00 00 00 00 C7 41 0C 00 00 00 00 73 72
+
+. 1134 8055C90 11
+. 8B 45 14 2B 45 EC 39 45 EC 74 0A
+
+. 1135 8055C9B 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1136 804981D 34
+. 68 80 84 2E 41 6A 00 FF 75 EC FF 75 F0 FF 75 DC 68 AB E8 05 08 FF 75 D4 68 B8 E8 05 08 E8 C1 EF FF FF
+
+. 1137 804983F 20
+. 83 C4 30 FF 75 F0 FF 75 EC 68 00 F1 05 08 50 E8 E5 45 01 00
+
+. 1138 8049853 9
+. 58 FF 75 EC E8 98 9D 00 00
+
+. 1139 80535F4 28
+. 55 89 E5 57 56 53 83 EC 6C 8B 45 08 E8 00 00 00 00 5B 81 C3 0F DB 00 00 85 C0 75 21
+
+. 1140 8053631 13
+. 83 EC 0C 68 0C 02 00 00 E8 52 50 FF FF
+
+. 1141 805363E 14
+. 83 C4 10 85 C0 89 45 C4 0F 84 2B 02 00 00
+
+. 1142 805364C 15
+. 8B 45 08 83 EC 0C C1 E0 04 50 E8 35 50 FF FF
+
+. 1143 805365B 20
+. 8B 55 C4 83 C4 10 85 C0 89 82 08 02 00 00 0F 84 E9 01 00 00
+
+. 1144 805366F 27
+. 8B 45 08 8B 75 C4 89 06 50 89 F0 83 C0 08 50 8D 45 E4 50 FF 75 08 E8 5A 95 FF FF
+
+. 1145 804CBE4 51
+. 55 89 E5 57 56 53 83 EC 2C FF 75 10 FF 75 0C 8D 45 C8 E8 00 00 00 00 5B 81 C3 19 45 01 00 50 FF 75 08 8D 7D C8 8D B3 70 F6 FF FF FC B9 05 00 00 00 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 1146 804CC15 2
+. F3 A5
+
+. 1147 804CC17 5
+. E8 48 00 00 00
+
+. 1148 804CC1C 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1149 805368A 11
+. 83 C4 10 85 C0 0F 85 97 01 00 00
+***** EXPENSIVE 1151 167
+
+. 1150 8053695 61
+. 8B 4D E4 8B 55 C4 89 4A 04 31 D2 52 8B 45 08 50 DF 2C 24 C7 45 DC 00 00 00 00 83 C4 08 DC BB D4 F5 FF FF 39 4D DC DD 5D C8 C7 45 D8 01 00 00 00 C7 45 E0 00 00 00 00 0F 83 18 01 00 00
+
+. 1151 80536D2 84
+. C7 45 A8 00 00 00 00 8B 45 C4 8B 75 E0 8B 74 B0 08 8B 55 C4 8B 45 A8 03 82 08 02 00 00 89 75 BC 8B 75 E0 89 84 B2 08 01 00 00 8B 45 D8 89 C2 0F AF 55 BC 89 55 D8 89 45 D4 31 D2 8B 45 08 C7 45 C0 01 00 00 00 8B 75 BC F7 75 D8 39 75 C0 0F 83 B8 00 00 00
+
+. 1152 80537DE 12
+. FF 45 E0 39 4D E0 0F 82 EF FE FF FF
+
+. 1153 80537EA 11
+. D1 6D 08 8B 55 08 39 55 DC 77 08
+
+. 1154 80537F5 8
+. 8B 45 C4 E9 2C FE FF FF
+
+. 1155 8053629 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1156 804985C 26
+. 89 C3 FF 75 F0 FF 75 EC 31 C0 85 DB 0F 94 C0 68 40 F1 05 08 50 E8 C2 45 01 00
+
+. 1157 8049876 21
+. 83 C4 14 FF 75 E4 53 FF 75 EC FF 75 F0 FF 75 E0 E8 DD A0 00 00
+
+. 1158 8053968 41
+. 55 89 E5 57 56 53 83 EC 2C 8B 7D 10 8B 55 18 E8 00 00 00 00 5B 81 C3 98 D7 00 00 8B 42 04 85 FF 89 45 F0 0F 84 3F 02 00 00
+
+. 1159 8053991 7
+. 31 C0 83 FF 01 74 23
+
+. 1160 80539BB 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1161 804988B 10
+. 31 C9 83 C4 20 3B 4D EC 73 25
+***** EXPENSIVE 1163 168
+
+. 1162 8049895 35
+. 31 D2 8B 45 EC 52 50 DF 2C 24 31 C0 83 C4 08 8B 55 E0 DD 04 C2 D8 F1 41 DD 1C C2 03 45 F0 3B 4D EC 72 EC
+***** EXPENSIVE 1164 169
+
+. 1163 80498B8 19
+. DD D8 FF 75 EC FF 75 F0 FF 75 DC FF 75 E0 E8 61 13 01 00
+
+. 1164 80498CB 34
+. 68 80 84 2E 41 6A 00 FF 75 EC FF 75 F0 FF 75 DC 68 BC E8 05 08 FF 75 D8 68 C8 E8 05 08 E8 13 EF FF FF
+
+. 1165 80498ED 20
+. 83 C4 30 FF 75 F0 FF 75 EC 68 80 F1 05 08 50 E8 37 45 01 00
+
+. 1166 8049901 9
+. 58 FF 75 E4 E8 C6 F0 00 00
+
+. 1167 80589D0 31
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF 76 04 E8 00 00 00 00 5B 81 C3 31 87 00 00 E8 21 FD FE FF
+
+. 1168 80589EF 15
+. C7 46 04 00 00 00 00 89 34 24 E8 12 FD FE FF
+
+. 1169 80589FE 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1170 804990A 9
+. 58 FF 75 E8 E8 81 F0 00 00
+
+. 1171 8058994 34
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF B6 08 02 00 00 E8 00 00 00 00 5B 81 C3 6A 87 00 00 E8 5A FD FE FF
+
+. 1172 400476A1 2
+. EB 62
+
+. 1173 80589B6 18
+. C7 86 08 02 00 00 00 00 00 00 89 34 24 E8 48 FD FE FF
+
+. 1174 80589C8 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1175 8049913 8
+. 89 1C 24 E8 75 9F 00 00
+
+. 1176 8053890 34
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF B6 08 02 00 00 E8 00 00 00 00 5B 81 C3 6E D8 00 00 E8 5E 4E FF FF
+
+. 1177 80538B2 18
+. C7 86 08 02 00 00 00 00 00 00 89 34 24 E8 4C 4E FF FF
+
+. 1178 80538C4 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1179 804991B 9
+. 58 FF 75 E0 E8 EC ED FF FF
+
+. 1180 8049924 9
+. 58 FF 75 DC E8 E3 ED FF FF
+
+. 1181 804992D 9
+. 5F FF 75 D8 E8 DA ED FF FF
+
+. 1182 8049936 21
+. 8B 75 D4 89 75 08 83 C4 10 8D 65 F4 5B 5E 5F C9 E9 C5 ED FF FF
+
+. 1183 804B8E9 10
+. 59 58 56 53 43 E8 81 F5 FF FF
+
+. 1184 804AE74 29
+. 55 89 E5 57 56 53 83 EC 28 8B 75 0C 0F AF 75 08 8D 04 B5 00 00 00 00 50 E8 FF D7 FF FF
+
+. 1185 804AE91 18
+. 8D 1C F5 00 00 00 00 89 1C 24 89 45 E4 E8 ED D7 FF FF
+
+. 1186 804AEA3 11
+. 89 45 E0 89 1C 24 E8 E2 D7 FF FF
+
+. 1187 804AEAE 11
+. 89 45 DC 89 1C 24 E8 D7 D7 FF FF
+
+. 1188 804AEB9 12
+. 31 C9 83 C4 10 39 F1 89 45 D8 73 18
+***** EXPENSIVE 1190 170
+
+. 1189 804AEC5 24
+. 89 F3 90 31 D2 52 51 DF 2C 24 8B 45 E4 D9 1C 88 41 83 C4 08 39 D9 72 EB
+
+. 1190 804AEDD 16
+. 8B 45 0C 0F AF 45 08 31 C9 8D 1C 00 39 D9 73 48
+***** EXPENSIVE 1192 171
+
+. 1191 804AEED 66
+. DD 05 28 E9 05 08 DD 05 30 E9 05 08 DD 05 38 E9 05 08 90 31 D2 52 51 DF 2C 24 D9 C0 D8 C4 8B 55 E0 D9 1C 8A D9 C0 D8 C3 D9 C9 8B 45 DC D8 C2 D9 C9 8B 55 D8 D9 1C 88 D9 1C 8A 41 83 C4 08 39 D9 72 D1
+***** EXPENSIVE 1193 172
+vex iropt: not unrolling (61 sts)
+
+. 1192 804AF00 47
+. 31 D2 52 51 DF 2C 24 D9 C0 D8 C4 8B 55 E0 D9 1C 8A D9 C0 D8 C3 D9 C9 8B 45 DC D8 C2 D9 C9 8B 55 D8 D9 1C 88 D9 1C 8A 41 83 C4 08 39 D9 72 D1
+***** EXPENSIVE 1194 173
+
+. 1193 804AF2F 16
+. DD D8 DD D8 DD D8 83 EC 0C 6A 00 E8 E1 2D 01 00
+
+. 1194 804AF3F 9
+. 5B FF 75 0C E8 60 FD 00 00
+
+. 1195 805ACA8 28
+. 55 89 E5 57 56 53 83 EC 5C 8B 7D 08 E8 00 00 00 00 5B 81 C3 5B 64 00 00 85 FF 75 21
+
+. 1196 805ACE5 13
+. 83 EC 0C 68 0C 02 00 00 E8 9E D9 FE FF
+
+. 1197 805ACF2 14
+. 83 C4 10 85 C0 89 45 C4 0F 84 2E 02 00 00
+
+. 1198 805AD00 10
+. 83 7D 08 01 0F 84 12 02 00 00
+
+. 1199 805AF1C 18
+. 8B 45 C4 C7 80 08 02 00 00 00 00 00 00 E9 01 FE FF FF
+
+. 1200 805AD2F 27
+. 8B 45 08 8B 4D C4 89 01 89 C8 56 83 C0 08 50 8D 45 E0 50 FF 75 08 E8 DA 1E FF FF
+
+. 1201 805AD4A 11
+. 83 C4 10 85 C0 0F 85 76 01 00 00
+***** EXPENSIVE 1203 174
+
+. 1202 805AD55 61
+. 8B 45 E0 8B 4D C4 89 41 04 31 C9 51 8B 55 08 52 DF 2C 24 C7 45 D8 00 00 00 00 83 C4 08 DC BB D4 F5 FF FF 39 45 D8 DD 5D C8 C7 45 D4 01 00 00 00 C7 45 DC 00 00 00 00 0F 83 8C 00 00 00
+
+. 1203 805AD92 76
+. 8B 55 C4 8B 45 DC 8B 44 82 08 89 45 BC 8B 82 08 02 00 00 8B 55 D8 8D 04 D0 8B 4D DC 8B 55 C4 89 84 8A 08 01 00 00 8B 4D D4 89 C8 0F AF 45 BC 89 45 D4 31 D2 8B 45 08 F7 75 D4 C7 45 C0 01 00 00 00 8B 55 BC 39 55 C0 89 45 B8 73 2E
+
+. 1204 805AE0C 18
+. FF 45 DC 8B 4D DC 8B 45 C4 3B 48 04 0F 82 74 FF FF FF
+
+. 1205 805AE1E 11
+. D1 6D 08 8B 55 08 39 55 D8 77 08
+
+. 1206 805AE29 8
+. 8B 45 C4 E9 AC FE FF FF
+
+. 1207 805ACDD 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1208 804AF48 28
+. 85 C0 FF 75 08 FF 75 0C 89 45 EC 0F 94 C0 68 40 FF 05 08 0F B6 C0 50 E8 D4 2E 01 00
+
+. 1209 804AF64 11
+. 83 C4 14 FF 75 0C E8 D5 FF 00 00
+
+. 1210 805AF44 28
+. 55 89 E5 57 56 53 83 EC 0C 8B 7D 08 E8 00 00 00 00 5B 81 C3 BF 61 00 00 85 FF 75 24
+
+. 1211 805AF84 10
+. 83 EC 0C 6A 08 E8 02 D7 FE FF
+
+. 1212 805AF8E 9
+. 83 C4 10 85 C0 89 C6 74 3F
+
+. 1213 805AF97 18
+. 83 EC 0C 89 38 8D 04 BD 00 00 00 00 50 E8 E7 D6 FE FF
+
+. 1214 805AFA9 10
+. 83 C4 10 85 C0 89 46 04 74 04
+
+. 1215 805AFB3 4
+. 89 F0 EB C5
+
+. 1216 805AF7C 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1217 804AF6F 28
+. 83 C4 0C 85 C0 FF 75 0C 89 45 E8 0F 94 C0 68 80 FF 05 08 0F B6 C0 50 E8 AD 2E 01 00
+
+. 1218 804AF8B 17
+. FF 75 D8 FF 75 E0 FF 75 08 FF 75 0C E8 9C 15 00 00
+
+. 1219 804C538 19
+. 55 89 E5 57 56 53 83 EC 0C 8B 5D 08 85 DB 8B 7D 10 74 39
+
+. 1220 804C54B 7
+. 31 F6 3B 75 08 73 1B
+
+. 1221 804C552 7
+. 31 DB E8 F3 F3 FF FF
+***** EXPENSIVE 1223 175
+
+. 1222 804C559 20
+. 46 D9 1C DF C7 44 DF 04 00 00 00 00 03 5D 0C 3B 75 08 72 E7
+
+. 1223 804C56D 15
+. FF 75 14 FF 75 08 FF 75 0C 57 E8 2C 02 00 00
+
+. 1224 804C57C 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1225 804AF9C 22
+. 8B 4D 0C 0F AF 4D 08 C1 E1 03 C1 E9 02 FC 8B 7D DC 8B 75 E0 F3 A5
+vex iropt: 8 x unrolling (15 sts -> 120 sts)
+
+. 1226 804AFB0 2
+. F3 A5
+
+. 1227 804AFB2 8
+. 83 C4 20 3B 4D 0C 73 17
+
+. 1228 804AFBA 23
+. 31 D2 8B 5D E0 8B 04 D3 41 8B 5D E4 89 04 93 03 55 08 3B 4D 0C 72 EB
+
+. 1229 804AFD1 23
+. 83 EC 0C FF 75 E8 FF 75 EC FF 75 0C FF 75 08 FF 75 E4 E8 78 00 01 00
+
+. 1230 805B060 64
+. 55 89 E5 57 56 53 83 EC 2C 8B 55 18 8B 45 14 8B 75 10 E8 00 00 00 00 5B 81 C3 9D 60 00 00 8B 40 04 8B 4A 04 85 F6 89 45 F0 C7 45 EC 01 00 00 00 89 4D E8 C7 45 E4 00 00 00 00 0F 84 0C 02 00 00
+
+. 1231 805B0A0 8
+. 31 C0 83 7D 10 01 74 26
+
+. 1232 805B0CE 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1233 804AFE8 20
+. 83 C4 20 FF 75 0C FF 75 08 FF 75 E0 FF 75 E4 E8 80 D4 00 00
+
+. 1234 805847C 32
+. 55 89 E5 57 56 53 83 EC 0C 8B 45 14 E8 00 00 00 00 5B 81 C3 87 8C 00 00 85 C0 0F 84 B0 00 00 00
+
+. 1235 805849C 33
+. 8B 55 08 8B 02 8B 4D 0C 89 01 8B 45 14 C7 45 F0 01 00 00 00 48 39 45 F0 C7 41 04 00 00 00 00 73 4F
+
+. 1236 805850C 11
+. 8B 45 14 2B 45 F0 39 45 F0 74 0A
+
+. 1237 8058517 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1238 804AFFC 34
+. 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 FF 75 E0 68 AB E8 05 08 FF 75 D8 68 B8 E8 05 08 E8 6E F0 FF FF
+
+. 1239 804B01E 20
+. 83 C4 30 FF 75 08 FF 75 0C 68 C0 FF 05 08 50 E8 06 2E 01 00
+
+. 1240 804B032 9
+. 59 FF 75 0C E8 DD AD 00 00
+
+. 1241 8055E18 28
+. 55 89 E5 57 56 53 83 EC 5C 8B 45 08 E8 00 00 00 00 5B 81 C3 EB B2 00 00 85 C0 75 21
+
+. 1242 8055E55 13
+. 83 EC 0C 68 0C 02 00 00 E8 2E 28 FF FF
+
+. 1243 8055E62 14
+. 83 C4 10 85 C0 89 45 C4 0F 84 06 02 00 00
+
+. 1244 8055E70 15
+. 8B 45 08 83 EC 0C C1 E0 03 50 E8 11 28 FF FF
+
+. 1245 8055E7F 20
+. 8B 55 C4 83 C4 10 85 C0 89 82 08 02 00 00 0F 84 C4 01 00 00
+
+. 1246 8055E93 27
+. 8B 45 08 8B 75 C4 89 06 89 F0 57 83 C0 08 50 8D 45 E0 50 FF 75 08 E8 36 6D FF FF
+
+. 1247 8055EAE 11
+. 83 C4 10 85 C0 0F 85 72 01 00 00
+***** EXPENSIVE 1249 176
+
+. 1248 8055EB9 61
+. 8B 4D E0 8B 55 C4 89 4A 04 31 D2 52 8B 45 08 50 DF 2C 24 C7 45 D8 00 00 00 00 83 C4 08 DC BB D4 F5 FF FF 39 4D D8 DD 5D C8 C7 45 D4 01 00 00 00 C7 45 DC 00 00 00 00 0F 83 89 00 00 00
+
+. 1249 8055EF6 76
+. 8B 45 C4 8B 75 DC 8B 74 B0 08 8B 55 D8 8B 80 08 02 00 00 89 75 BC 8D 04 D0 8B 75 DC 8B 55 C4 89 84 B2 08 01 00 00 8B 75 D4 89 F0 0F AF 45 BC 89 45 D4 31 D2 8B 45 08 F7 75 D4 C7 45 C0 01 00 00 00 8B 55 BC 39 55 C0 89 75 D0 73 31
+
+. 1250 8055F73 12
+. FF 45 DC 39 4D DC 0F 82 77 FF FF FF
+
+. 1251 8055F7F 11
+. D1 6D 08 8B 55 08 39 55 D8 77 08
+
+. 1252 8055F8A 8
+. 8B 45 C4 E9 BB FE FF FF
+
+. 1253 8055E4D 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1254 804B03B 26
+. 89 C3 FF 75 08 FF 75 0C 31 C0 85 DB 0F 94 C0 68 00 00 06 08 50 E8 E3 2D 01 00
+
+. 1255 804B055 21
+. 83 C4 14 FF 75 E8 53 FF 75 0C FF 75 08 FF 75 E4 E8 FA B0 00 00
+
+. 1256 8056164 41
+. 55 89 E5 57 56 53 83 EC 2C 8B 75 10 8B 55 18 E8 00 00 00 00 5B 81 C3 9C AF 00 00 8B 42 04 85 F6 89 45 F0 0F 84 3B 02 00 00
+
+. 1257 805618D 7
+. 31 C0 83 FE 01 74 23
+
+. 1258 80561B7 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1259 804B06A 10
+. 31 C9 83 C4 20 3B 4D 0C 73 26
+***** EXPENSIVE 1261 177
+
+. 1260 804B074 36
+. 31 D2 52 8B 45 0C 50 DF 2C 24 31 C0 83 C4 08 90 8B 55 E4 D9 04 82 D8 F1 41 D9 1C 82 03 45 08 3B 4D 0C 72 EC
+***** EXPENSIVE 1262 178
+
+. 1261 804B098 19
+. DD D8 FF 75 0C FF 75 08 FF 75 E0 FF 75 E4 E8 49 23 01 00
+
+. 1262 805D3F4 31
+. 55 89 E5 57 56 53 83 EC 0C 8B 45 14 E8 00 00 00 00 5B 81 C3 0F 3D 00 00 85 C0 8B 7D 0C 74 2D
+
+. 1263 805D413 7
+. 31 C9 3B 4D 14 73 1C
+
+. 1264 805D41A 28
+. 31 D2 8B 75 08 8B 04 96 41 89 04 D7 C7 44 D7 04 00 00 00 00 03 55 10 3B 4D 14 72 E6
+
+. 1265 805D436 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1266 804B0AB 34
+. 68 80 84 2E 41 6A 00 FF 75 0C FF 75 08 FF 75 E0 68 BC E8 05 08 FF 75 DC 68 C8 E8 05 08 E8 BF EF FF FF
+
+. 1267 804B0CD 20
+. 83 C4 30 FF 75 08 FF 75 0C 68 40 00 06 08 50 E8 57 2D 01 00
+
+. 1268 804B0E1 9
+. 5A FF 75 E8 E8 3E FF 00 00
+
+. 1269 805B028 31
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF 76 04 E8 00 00 00 00 5B 81 C3 D9 60 00 00 E8 C9 D6 FE FF
+
+. 1270 805B047 15
+. C7 46 04 00 00 00 00 89 34 24 E8 BA D6 FE FF
+
+. 1271 805B056 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1272 804B0EA 9
+. 58 FF 75 EC E8 F9 FE 00 00
+
+. 1273 805AFEC 34
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF B6 08 02 00 00 E8 00 00 00 00 5B 81 C3 12 61 00 00 E8 02 D7 FE FF
+
+. 1274 805B00E 18
+. C7 86 08 02 00 00 00 00 00 00 89 34 24 E8 F0 D6 FE FF
+
+. 1275 805B020 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1276 804B0F3 8
+. 89 1C 24 E8 91 AF 00 00
+
+. 1277 805608C 34
+. 55 89 E5 56 53 8B 75 08 83 EC 0C FF B6 08 02 00 00 E8 00 00 00 00 5B 81 C3 72 B0 00 00 E8 62 26 FF FF
+
+. 1278 80560AE 18
+. C7 86 08 02 00 00 00 00 00 00 89 34 24 E8 50 26 FF FF
+
+. 1279 80560C0 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 1280 804B0FB 9
+. 58 FF 75 E4 E8 0C D6 FF FF
+
+. 1281 804B104 9
+. 58 FF 75 E0 E8 03 D6 FF FF
+
+. 1282 804B10D 9
+. 58 FF 75 DC E8 FA D5 FF FF
+
+. 1283 804B116 9
+. 58 FF 75 D8 E8 F1 D5 FF FF
+
+. 1284 804B11F 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1285 804B8F3 8
+. 83 C4 10 83 FB 03 76 D2
+
+. 1286 804B8CD 10
+. 83 EC 08 56 53 E8 39 D3 FF FF
+
+. 1287 80490BA 14
+. 6A 00 57 FF 75 F0 FF 75 E4 E8 BC FA FF FF
+
+. 1288 80490C8 15
+. FF 75 F0 57 68 40 EC 05 08 50 E8 61 4D 01 00
+
+. 1289 80490D7 8
+. 83 C4 20 E9 1A FD FF FF
+
+. 1290 8049095 14
+. 6A 00 57 FF 75 F0 FF 75 E4 E8 E1 FA FF FF
+
+. 1291 80490A3 15
+. FF 75 F0 57 68 E0 EB 05 08 50 E8 86 4D 01 00
+
+. 1292 80490B2 8
+. 83 C4 20 E9 96 FD FF FF
+
+. 1293 804906D 17
+. 68 B8 0B 00 00 57 FF 75 F0 FF 75 D8 E8 06 FB FF FF
+
+. 1294 804907E 15
+. FF 75 F0 57 68 80 EB 05 08 50 E8 AB 4D 01 00
+
+. 1295 804908D 8
+. 83 C4 20 E9 48 FE FF FF
+
+. 1296 804A8E7 14
+. 6A 00 FF 75 0C FF 75 08 57 E8 F7 FA FF FF
+
+. 1297 804A3EC 26
+. 55 89 E5 57 56 53 53 31 F6 31 C9 3B 75 10 8B 7D 14 C7 45 F0 00 00 00 00 73 69
+
+. 1298 804A406 15
+. 89 F6 BB 01 00 00 00 83 C1 02 3B 5D 0C 73 4F
+***** EXPENSIVE 1300 179
+
+. 1299 804A415 34
+. 8D 76 00 31 D2 52 8D 04 0F 50 8B 45 08 DF 2C 24 D9 04 88 DA E9 DF E0 80 E4 45 83 C4 08 80 FC 40 74 03
+***** EXPENSIVE 1301 180
+
+. 1300 804A43A 32
+. 41 31 D2 52 8D 04 0F 50 8B 45 08 DF 2C 24 D9 04 88 DA E9 DF E0 80 E4 45 83 C4 08 80 FC 40 74 03
+
+. 1301 804A45D 7
+. 43 41 3B 5D 0C 72 B4
+
+. 1302 804A464 11
+. FF 45 F0 8B 45 10 39 45 F0 72 99
+
+. 1303 804A46F 8
+. 59 5B 89 F0 5E 5F C9 C3
+
+. 1304 804A8F5 17
+. FF 75 08 FF 75 0C 68 80 FB 05 08 50 E8 32 35 01 00
+
+. 1305 804A906 8
+. 83 C4 20 E9 15 FD FF FF
+
+. 1306 804A8C0 14
+. 6A 00 FF 75 0C FF 75 08 57 E8 1E FB FF FF
+
+. 1307 804A8CE 17
+. FF 75 08 FF 75 0C 68 20 FB 05 08 50 E8 59 35 01 00
+
+. 1308 804A8DF 8
+. 83 C4 20 E9 95 FD FF FF
+
+. 1309 804A894 19
+. 68 B8 0B 00 00 FF 75 0C FF 75 08 FF 75 DC E8 45 FB FF FF
+
+. 1310 804A8A7 17
+. FF 75 08 FF 75 0C 68 C0 FA 05 08 50 E8 80 35 01 00
+
+. 1311 804A8B8 8
+. 83 C4 20 E9 4F FE FF FF
+***** EXPENSIVE 1313 181
+vex iropt: 2 x unrolling (34 sts -> 68 sts)
+
+. 1312 80496DC 21
+. 31 D2 52 51 DF 2C 24 8B 75 E0 DD 1C CE 41 83 C4 08 39 D9 72 EB
+***** EXPENSIVE 1314 182
+vex iropt: 2 x unrolling (35 sts -> 70 sts)
+
+. 1313 804AEC8 21
+. 31 D2 52 51 DF 2C 24 8B 45 E4 D9 1C 88 41 83 C4 08 39 D9 72 EB
+***** EXPENSIVE 1315 183
+
+. 1314 804A418 31
+. 31 D2 52 8D 04 0F 50 8B 45 08 DF 2C 24 D9 04 88 DA E9 DF E0 80 E4 45 83 C4 08 80 FC 40 74 03
+
+. 1315 804B8FB 5
+. 46 39 FE 76 C8
+
+. 1316 804B8C8 15
+. BB 01 00 00 00 83 EC 08 56 53 E8 39 D3 FF FF
+
+. 1317 804CC92 9
+. 8B 55 0C 8B 02 85 C0 74 42
+
+. 1318 804CC9B 14
+. 89 45 EC 31 D2 89 C8 F7 75 EC 85 D2 75 21
+
+. 1319 804CCCA 14
+. 46 8B 55 0C 8B 04 B2 85 C0 89 45 EC 74 05
+
+. 1320 804CCD8 5
+. 83 F9 01 75 C1
+
+. 1321 804CC9E 11
+. 31 D2 89 C8 F7 75 EC 85 D2 75 21
+
+. 1322 804CCA9 33
+. 8D 76 00 89 C8 31 D2 F7 75 EC 89 C1 8B 55 14 8B 45 EC 89 04 BA 31 D2 89 C8 F7 75 EC 47 85 D2 74 E2
+
+. 1323 804CCDD 13
+. F7 C1 01 00 00 00 BE 02 00 00 00 75 25
+
+. 1324 804CD0F 10
+. 83 F9 01 BE 03 00 00 00 74 30
+
+. 1325 804CD49 11
+. 31 F6 39 FE B8 01 00 00 00 73 0C
+vex iropt: 4 x unrolling (20 sts -> 80 sts)
+
+. 1326 804CD54 12
+. 8B 55 14 0F AF 04 B2 46 39 FE 72 F4
+
+. 1327 804CD60 5
+. 3B 45 08 74 27
+
+. 1328 804CD8C 9
+. 8B 45 10 89 38 31 C0 EB EF
+
+. 1329 804CF31 37
+. 8B 45 DC 8B 75 D4 C1 E0 04 89 75 AC 89 45 A4 C7 45 B4 01 00 00 00 8B 55 B8 39 55 B4 C7 45 B0 00 00 00 00 77 72
+***** EXPENSIVE 1331 184
+
+. 1330 804CF56 59
+. 8B 7D A4 8D 76 00 8B 75 AC 01 75 B0 8B 45 B0 31 D2 F7 75 08 89 55 B0 31 D2 52 8B 45 B0 50 DF 2C 24 DC 4D C8 83 EC 08 8B 45 C4 8B B0 08 02 00 00 DD 55 88 DD 1C 24 E8 8F B6 FF FF
+***** EXPENSIVE 1332 185
+
+. 1331 804CF91 23
+. DD 1C 3E 8B 55 C4 DD 45 88 8B B2 08 02 00 00 DD 1C 24 E8 88 B7 FF FF
+***** EXPENSIVE 1333 186
+
+. 1332 804CFA8 32
+. DD 5C 3E 08 FF 45 B4 8B 75 B8 83 C7 10 83 45 A8 10 83 45 A4 10 FF 45 DC 83 C4 10 39 75 B4 76 94
+
+. 1333 804CFC8 21
+. 8B 45 D4 FF 45 C0 8B 55 BC 01 45 AC 39 55 C0 0F 82 63 FF FF FF
+
+. 1334 804CFDD 15
+. 8B 4D E4 FF 45 E0 39 4D E0 0F 82 F5 FE FF FF
+vex iropt: 2 x unrolling (49 sts -> 98 sts)
+
+. 1335 8048D2C 50
+. 8B 75 E4 8B 04 0E 8B 54 0E 04 8B 75 E0 89 04 0E 89 54 0E 04 8B 75 E4 8B 44 0E 08 8B 54 0E 0C 43 8B 75 E0 89 44 0E 08 89 54 0E 0C 03 4D D4 39 FB 72 CE
+
+. 1336 804D383 7
+. 8B 45 14 3B 30 74 26
+
+. 1337 804D3B0 4
+. 3B 32 74 14
+
+. 1338 804D3C8 19
+. C7 45 EC 00 00 00 00 8B 55 F0 39 55 EC 0F 83 B7 00 00 00
+
+. 1339 804D3DB 38
+. 90 8B 45 EC 8B 55 14 8B 4C 82 08 8B 45 E8 0F AF C1 89 45 E8 31 D2 89 F0 F7 75 E8 8B 55 DC 85 D2 0F 85 5F 02 00 00
+
+. 1340 804D401 41
+. 8B 55 08 89 55 D4 8B 55 0C 89 55 D0 8B 55 D8 89 55 CC C7 45 C8 01 00 00 00 C7 45 DC 01 00 00 00 83 F9 02 0F 84 06 02 00 00
+
+. 1341 804D630 37
+. 8B 4D EC 8B 45 14 FF B4 88 08 01 00 00 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 57 00 00 00
+
+. 1342 804D6AC 77
+. 55 89 E5 57 56 53 83 EC 28 8B 45 20 D1 E8 8B 4D 1C 31 D2 89 45 E4 8B 45 20 F7 F1 C7 45 E8 00 00 00 00 D1 E9 E8 00 00 00 00 5B 81 C3 3F 3A 01 00 39 45 E8 C7 45 F0 00 00 00 00 C7 45 EC 00 00 00 00 89 45 E0 89 4D CC 0F 83 FB 00 00 00
+
+. 1343 804D6F9 26
+. 8B 55 24 89 55 D4 C7 45 D0 00 00 00 00 89 F6 8B 4D E8 85 C9 0F 85 F1 00 00 00
+***** EXPENSIVE 1345 187
+
+. 1344 804D713 23
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF 31 F6 3B 75 CC 0F 83 D4 00 00 00
+***** EXPENSIVE 1346 188
+
+. 1345 804D72A 134
+. 8B 4D EC 8B 7D 0C 8B 45 14 0F AF 4D 14 8B 55 D0 C1 E7 04 C1 E0 04 C1 E1 04 C1 E2 04 89 7D D8 89 45 DC 8B 45 F0 03 45 E4 8B 7D 08 0F AF 45 0C DD 04 17 DD 44 17 08 C1 E0 04 DD 04 07 DD 44 07 08 D9 C3 D9 C3 D9 C9 8B 45 10 D8 C3 D9 C9 D8 C2 D9 C9 DD 1C 08 DD 5C 08 08 D9 CB 8B 45 EC DE E1 D9 C9 DE E2 03 45 CC D9 C3 D9 C3 D9 C9 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA D8 CC D9 CB D8 CD D9 C9 C1 E0 04 8B 7D 10
+***** EXPENSIVE 1347 189
+
+. 1346 804D7B0 39
+. DE E2 DE C2 DD 1C 07 DD 5C 07 08 46 8B 45 0C 03 55 D8 01 45 D0 FF 45 F0 03 4D DC FF 45 EC 3B 75 CC 0F 82 75 FF FF FF
+***** EXPENSIVE 1348 190
+
+. 1347 804D7D7 29
+. DD D8 DD D8 8B 55 CC FF 45 E8 8B 7D E0 01 55 EC 83 45 D4 10 39 7D E8 0F 82 14 FF FF FF
+
+. 1348 804D7F4 10
+. 83 C4 28 5B 5E 31 C0 5F C9 C3
+
+. 1349 804D655 8
+. 83 C4 20 E9 26 FE FF FF
+
+. 1350 804D483 15
+. FF 45 EC 8B 4D F0 39 4D EC 0F 82 4A FF FF FF
+
+. 1351 804D492 6
+. 83 7D DC 01 74 07
+
+. 1352 804D49F 12
+. C7 45 EC 00 00 00 00 39 75 EC 73 ED
+***** EXPENSIVE 1354 191
+
+. 1353 804D4AB 47
+. 8B 45 0C C1 E0 04 31 FF 31 C9 89 45 C4 8B 55 D8 DD 04 3A 8B 45 08 DD 1C 08 DD 44 3A 08 FF 45 EC DD 5C 08 08 83 C7 10 03 4D C4 39 75 EC 72 DE
+***** EXPENSIVE 1355 192
+vex iropt: 2 x unrolling (43 sts -> 86 sts)
+
+. 1354 804D4B8 34
+. 8B 55 D8 DD 04 3A 8B 45 08 DD 1C 08 DD 44 3A 08 FF 45 EC DD 5C 08 08 83 C7 10 03 4D C4 39 75 EC 72 DE
+
+. 1355 804D4DA 2
+. EB BC
+
+. 1356 804D498 7
+. 31 C0 E9 07 FF FF FF
+vex iropt: 2 x unrolling (49 sts -> 98 sts)
+
+. 1357 8048D88 50
+. 8B 75 E4 8B 04 0E 8B 54 0E 04 8B 75 D8 89 04 0E 89 54 0E 04 8B 75 E4 8B 44 0E 08 8B 54 0E 0C 43 8B 75 D8 89 44 0E 08 89 54 0E 0C 03 4D D4 39 FB 72 CE
+***** EXPENSIVE 1359 193
+vex iropt: 2 x unrolling (39 sts -> 78 sts)
+
+. 1358 804D310 26
+. DD 04 06 D8 C9 DD 1C 06 DD 44 06 08 D8 C9 42 DD 5C 06 08 01 C8 3B 55 10 72 E6
+***** EXPENSIVE 1360 194
+vex iropt: 2 x unrolling (40 sts -> 80 sts)
+
+. 1359 8048E80 28
+. 8B 4D E0 DD 04 01 D8 C9 DD 1C 01 DD 44 01 08 D8 C9 43 DD 5C 01 08 01 D0 39 FB 72 E4
+
+. 1360 8050401 20
+. 89 75 B0 C7 45 B4 01 00 00 00 8B 45 B8 31 FF 39 45 B4 76 5F
+***** EXPENSIVE 1362 195
+
+. 1361 8050474 45
+. 03 7D B0 89 F8 31 D2 F7 75 08 89 D7 31 D2 52 57 DF 2C 24 DC 4D C8 83 EC 08 8B 45 C4 8B B0 08 02 00 00 DD 55 98 DD 1C 24 E8 7F 81 FF FF
+***** EXPENSIVE 1363 196
+
+. 1362 80504A1 26
+. 8B 55 D8 D9 1C D6 8B 45 C4 DD 45 98 8B B0 08 02 00 00 DD 1C 24 E8 75 82 FF FF
+***** EXPENSIVE 1364 197
+
+. 1363 80504BB 25
+. 8B 55 D8 D9 5C D6 04 FF 45 B4 8B 75 B8 42 83 C4 10 39 75 B4 89 55 D8 76 A0
+
+. 1364 80504D4 5
+. E9 3C FF FF FF
+
+. 1365 8050415 17
+. 8B 45 D0 FF 45 C0 8B 55 BC 01 45 B0 39 55 C0 72 DE
+
+. 1366 8050426 11
+. 8B 4D E0 FF 45 DC 39 4D DC 72 81
+
+. 1367 804C4E8 5
+. E8 5F F4 FF FF
+***** EXPENSIVE 1369 198
+
+. 1368 804C8E0 31
+. 31 D2 52 8B 45 DC 50 DF 2C 24 D8 C9 83 EC 08 DD 14 24 D9 C9 DD 5D 98 DD 5D B8 E8 21 BD FF FF
+***** EXPENSIVE 1370 199
+
+. 1369 804C8C0 30
+. 31 FF D9 83 50 F6 FF FF 3B 7D 10 D9 55 D4 D9 5D D8 C7 45 DC 00 00 00 00 0F 83 93 00 00 00
+vex iropt: 2 x unrolling (36 sts -> 72 sts)
+
+. 1370 804A58C 26
+. 8B 04 D7 8B 4D E4 89 04 D1 8B 44 D7 04 43 89 44 D1 04 03 55 08 3B 5D 0C 72 E6
+
+. 1371 8050827 7
+. 8B 7D 14 3B 37 74 26
+
+. 1372 8050854 4
+. 3B 32 74 14
+
+. 1373 805086C 19
+. C7 45 EC 00 00 00 00 8B 45 F0 39 45 EC 0F 83 B7 00 00 00
+
+. 1374 805087F 38
+. 90 8B 55 EC 8B 7D 14 8B 4C 97 08 8B 45 E8 0F AF C1 89 45 E8 31 D2 89 F0 8B 7D DC F7 75 E8 85 FF 0F 85 5B 02 00 00
+
+. 1375 80508A5 41
+. 8B 55 08 89 55 D4 8B 7D 0C 8B 55 D8 89 7D D0 89 55 CC C7 45 C8 01 00 00 00 C7 45 DC 01 00 00 00 83 F9 02 0F 84 02 02 00 00
+
+. 1376 8050AD0 37
+. 8B 45 EC 8B 55 14 FF B4 82 08 01 00 00 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 57 00 00 00
+
+. 1377 8050B4C 77
+. 55 89 E5 57 56 53 83 EC 1C 8B 45 20 D1 E8 8B 4D 1C 31 D2 89 45 E4 8B 45 20 F7 F1 C7 45 E8 00 00 00 00 D1 E9 E8 00 00 00 00 5B 81 C3 9F 05 01 00 39 45 E8 C7 45 F0 00 00 00 00 C7 45 EC 00 00 00 00 89 45 E0 89 4D D8 0F 83 CF 00 00 00
+
+. 1378 8050B99 18
+. 8B 55 24 89 55 DC 90 8B 45 E8 85 C0 0F 85 CD 00 00 00
+***** EXPENSIVE 1380 200
+
+. 1379 8050BAB 23
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF 31 F6 3B 75 D8 0F 83 B0 00 00 00
+***** EXPENSIVE 1381 201
+
+. 1380 8050BC2 131
+. 8B 4D EC 8B 55 F0 0F AF 4D 14 0F AF 55 0C 8B 45 F0 8B 7D 08 03 45 E4 D9 04 D7 D9 44 D7 04 0F AF 45 0C D9 04 C7 D9 44 C7 04 D9 C3 D9 C3 D9 C9 8B 45 10 D8 C3 D9 C9 D8 C2 D9 C9 D9 1C C8 D9 5C C8 04 D9 CB DE E1 D9 C9 DE E2 8B 45 EC D9 C3 D9 C3 D9 C9 03 45 D8 D8 CA D9 C9 D8 CB D9 CA D8 CC D9 CB D8 CD D9 C9 46 0F AF 45 14 8B 7D 10 DE E2 DE C2 03 55 0C FF 45 F0 03 4D 14 FF 45 EC 3B 75 D8 D9 1C C7
+***** EXPENSIVE 1382 202
+
+. 1381 8050C45 6
+. D9 5C C7 04 72 85
+***** EXPENSIVE 1383 203
+
+. 1382 8050C4B 29
+. DD D8 DD D8 8B 45 D8 FF 45 E8 8B 55 E0 01 45 EC 83 45 DC 08 39 55 E8 0F 82 38 FF FF FF
+
+. 1383 8050C68 10
+. 83 C4 1C 5B 5E 31 C0 5F C9 C3
+
+. 1384 8050AF5 8
+. 83 C4 20 E9 2A FE FF FF
+
+. 1385 8050927 15
+. FF 45 EC 8B 4D F0 39 4D EC 0F 82 4A FF FF FF
+
+. 1386 8050936 6
+. 83 7D DC 01 74 07
+
+. 1387 8050943 12
+. C7 45 EC 00 00 00 00 39 75 EC 73 ED
+
+. 1388 805094F 45
+. 31 D2 8D 76 00 8B 7D EC 8B 4D D8 8B 04 F9 8B 7D 08 89 04 D7 8B 7D EC 8B 44 F9 04 47 8B 4D 08 89 44 D1 04 03 55 0C 39 F7 89 7D EC 72 D8
+vex iropt: 2 x unrolling (43 sts -> 86 sts)
+
+. 1389 8050954 40
+. 8B 7D EC 8B 4D D8 8B 04 F9 8B 7D 08 89 04 D7 8B 7D EC 8B 44 F9 04 47 8B 4D 08 89 44 D1 04 03 55 0C 39 F7 89 7D EC 72 D8
+
+. 1390 805097C 2
+. EB BE
+
+. 1391 805093C 7
+. 31 C0 E9 07 FF FF FF
+vex iropt: 2 x unrolling (36 sts -> 72 sts)
+
+. 1392 804A5C8 26
+. 8B 04 D7 8B 4D DC 89 04 D1 8B 44 D7 04 43 89 44 D1 04 03 55 08 3B 5D 0C 72 E6
+***** EXPENSIVE 1394 204
+
+. 1393 804A0A8 49
+. 8B 45 0C D9 04 C8 8B 55 14 D9 44 C8 04 D9 C9 D8 2C CA D9 C9 D8 6C CA 04 D9 C9 D9 E1 D9 C9 D9 E1 DE C1 D8 C9 DD E2 DF E0 F6 C4 45 0F 85 FB 00 00 00
+***** EXPENSIVE 1395 205
+
+. 1394 804A0D9 11
+. DD DA 43 03 4D 18 3B 5D 1C 72 C4
+***** EXPENSIVE 1396 206
+vex iropt: 2 x unrolling (44 sts -> 88 sts)
+
+. 1395 80507B4 26
+. D9 04 C6 D8 C9 D9 1C C6 D9 44 C6 04 D8 C9 42 D9 5C C6 04 03 45 0C 39 FA 72 E6
+***** EXPENSIVE 1397 207
+vex iropt: 2 x unrolling (47 sts -> 94 sts)
+
+. 1396 804A6AC 30
+. 8B 55 E4 D9 04 C2 D8 C9 D9 1C C2 D9 44 C2 04 D8 C9 43 D9 5C C2 04 03 45 08 3B 5D 0C 72 E2
+vex iropt: 4 x unrolling (24 sts -> 96 sts)
+
+. 1397 804BFFC 23
+. 43 C7 04 C1 00 00 00 00 C7 44 C1 04 00 00 00 00 03 45 10 39 F3 72 E9
+***** EXPENSIVE 1399 208
+
+. 1398 804C04C 38
+. 89 F8 31 D2 F7 F6 89 D0 31 D2 52 50 DF 2C 24 DC 0D E0 06 06 08 83 EC 08 DC 75 D8 DD 14 24 DD 5D B8 E8 AE C5 FF FF
+***** EXPENSIVE 1400 209
+vex iropt: 2 x unrolling (35 sts -> 70 sts)
+
+. 1399 804C120 18
+. D9 C9 40 D9 14 CB D9 C9 D9 54 CB 04 01 F9 39 D0 72 EE
+vex iropt: 4 x unrolling (22 sts -> 88 sts)
+
+. 1400 804C13C 22
+. 40 C7 04 CE 00 00 00 00 C7 44 CE 04 00 00 00 00 01 F9 39 D0 72 EA
+***** EXPENSIVE 1402 210
+
+. 1401 804C1C8 39
+. 89 F0 31 D2 F7 75 0C 89 D0 31 D2 52 50 DF 2C 24 DC 0D E8 06 06 08 83 EC 08 DC 75 D8 DD 14 24 DD 5D C8 E8 31 C4 FF FF
+vex iropt: 4 x unrolling (27 sts -> 108 sts)
+
+. 1402 804C24C 27
+. 8B 55 20 43 C7 04 C2 00 00 00 00 C7 44 C2 04 00 00 00 00 03 45 10 3B 5D 0C 72 E5
+***** EXPENSIVE 1404 211
+
+. 1403 804C26E 59
+. 8B 45 08 31 D2 F7 75 0C 8B 4D 10 0F AF CA 31 D2 52 8B 45 0C 50 DF 2C 24 D9 45 14 8B 45 20 D8 C9 D9 C9 D8 4D 18 D9 C9 D9 1C C8 D9 5C C8 04 83 C4 08 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 1404 8058696 17
+. 8B 45 08 D1 E8 83 EC 0C C1 E0 04 50 E8 E9 FF FE FF
+
+. 1405 80586A7 20
+. 8B 55 CC 83 C4 10 85 C0 89 82 08 02 00 00 0F 84 E9 01 00 00
+
+. 1406 805876F 41
+. 41 89 4D B4 8B 4D DC C1 E1 04 89 45 B0 89 4D A8 90 8B 45 B4 C7 45 BC 01 00 00 00 D1 E8 39 45 BC C7 45 B8 00 00 00 00 73 72
+
+. 1407 805880A 21
+. 8B 55 C0 FF 45 C8 8B 4D C4 01 55 B0 39 4D C8 0F 82 61 FF FF FF
+vex iropt: 2 x unrolling (37 sts -> 74 sts)
+
+. 1408 80497D0 34
+. 89 D8 C1 E0 04 8B 75 DC 8B 54 06 04 41 8B 04 06 8B 75 E0 89 04 DE 89 54 DE 04 03 5D F0 3B 4D EC 72 DE
+
+. 1409 8058A50 10
+. 8B 45 10 8B 75 14 3B 06 74 26
+
+. 1410 8058A80 7
+. 8B 4D 10 3B 0A 74 15
+
+. 1411 8058A9C 11
+. 31 FF 3B 7D F0 0F 83 91 00 00 00
+
+. 1412 8058AA7 34
+. 90 8B 45 EC 8B 75 14 89 C1 8B 54 BE 08 0F AF CA 40 D1 E8 89 4D EC 8B 4D E4 48 85 C9 0F 85 6F 01 00 00
+
+. 1413 8058AC9 41
+. 8B 75 08 89 75 E0 8B 4D 0C 8B 75 E8 89 4D DC 89 75 D8 C7 45 D4 01 00 00 00 C7 45 E4 01 00 00 00 83 FA 02 0F 84 1E 01 00 00
+
+. 1414 8058C10 34
+. 50 8B 75 14 FF B4 BE 08 01 00 00 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 52 00 00 00
+
+. 1415 8058C84 46
+. 55 89 E5 57 56 83 EC 30 8B 45 1C 89 C2 D1 EA 89 55 EC 31 D2 F7 75 18 8B 4D 18 C7 45 F0 00 00 00 00 D1 E9 39 45 F0 89 45 E8 89 4D E4 73 5A
+***** EXPENSIVE 1417 212
+
+. 1416 8058CB2 90
+. 8B 4D 18 49 31 FF 31 F6 89 F6 8B 45 EC 01 F0 8B 55 0C 0F AF D6 89 45 D0 8B 45 08 DD 04 D0 8B 45 D0 0F AF 45 0C 8B 55 08 DD 04 C2 D9 C1 8B 45 14 0F AF C7 8B 55 10 D8 C1 DD 1C C2 8B 45 14 0F AF C1 DE E9 DD 1C C2 FF 45 F0 8B 45 E8 03 4D 18 03 7D 18 03 75 E4 39 45 F0 72 B0
+
+. 1417 8058D0C 10
+. 83 7D E4 01 0F 84 B3 01 00 00
+
+. 1418 8058EC9 7
+. 83 C4 30 5E 5F C9 C3
+
+. 1419 8058C32 5
+. E9 F4 FE FF FF
+
+. 1420 8058B2B 13
+. 83 C4 20 47 3B 7D F0 0F 82 70 FF FF FF
+
+. 1421 8058B38 6
+. 83 7D E4 01 74 07
+
+. 1422 8058B45 7
+. 31 FF 3B 7D 10 73 F2
+
+. 1423 8058B4C 36
+. 8B 75 0C C1 E6 03 8B 4D 08 89 75 D0 8B 75 E8 8B 04 FE 8B 54 FE 04 47 89 01 89 51 04 03 4D D0 3B 7D 10 72 E8
+vex iropt: 2 x unrolling (32 sts -> 64 sts)
+
+. 1424 8058B58 24
+. 8B 75 E8 8B 04 FE 8B 54 FE 04 47 89 01 89 51 04 03 4D D0 3B 7D 10 72 E8
+
+. 1425 8058B70 2
+. EB CC
+
+. 1426 8058B3E 7
+. 31 C0 E9 31 FF FF FF
+
+. 1427 8055CA5 62
+. 8B 55 14 8D 04 D5 F8 FF FF FF 8B 75 10 8B 4D 08 0F AF 45 10 0F AF 75 EC 8B 54 08 04 C1 E6 04 8B 04 08 8B 4D 0C 89 04 31 89 54 31 04 C7 44 31 08 00 00 00 00 C7 44 31 0C 00 00 00 00 EB B8
+
+. 1428 8053726 46
+. 40 8B 55 DC 89 45 B0 C1 E2 04 8B 45 D4 89 45 AC 89 55 A4 8D 76 00 8B 45 B0 C7 45 B8 01 00 00 00 D1 E8 39 45 B8 C7 45 B4 00 00 00 00 73 72
+
+. 1429 80537C6 21
+. 8B 75 D4 FF 45 C0 8B 45 BC 01 75 AC 39 45 C0 0F 82 61 FF FF FF
+
+. 1430 80537DB 15
+. 8B 4D E4 FF 45 E0 39 4D E0 0F 82 EF FE FF FF
+
+. 1431 8053998 7
+. 8B 75 14 3B 3E 74 25
+
+. 1432 80539C4 4
+. 3B 3A 74 14
+
+. 1433 80539DC 39
+. 8B 45 14 8B 40 04 C7 45 D8 00 00 00 00 39 45 D8 89 45 D4 C7 45 DC 01 00 00 00 C7 45 D0 00 00 00 00 0F 83 9F 00 00 00
+
+. 1434 8053A03 42
+. 90 8B 55 D8 8B 75 14 8B 4C 96 08 8B 45 DC 0F AF C1 89 45 DC 31 D2 89 F8 F7 75 DC 40 D1 E8 8B 75 D0 48 85 F6 0F 85 7B 01 00 00
+
+. 1435 8053A2D 41
+. 8B 55 08 89 55 EC 8B 75 0C 8B 55 F0 89 75 E4 89 55 E8 C7 45 E0 01 00 00 00 C7 45 D0 01 00 00 00 83 F9 02 0F 84 2A 01 00 00
+
+. 1436 8053B80 35
+. 52 8B 45 D8 8B 55 14 FF B4 82 08 01 00 00 57 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 51 00 00 00
+
+. 1437 8053BF4 69
+. 55 89 E5 57 56 53 83 EC 44 BE 02 00 00 00 31 D2 8B 45 1C F7 F6 89 45 E8 31 D2 8B 45 1C F7 75 18 89 45 E0 31 D2 8B 45 18 F7 F6 C7 45 EC 00 00 00 00 E8 00 00 00 00 5B 81 C3 EA D4 00 00 39 45 EC 89 45 D8 73 59
+***** EXPENSIVE 1439 213
+
+. 1438 8053C39 89
+. 8B 45 E0 C1 E0 04 8D 50 F8 31 C9 89 C6 89 F6 8B 45 0C 0F AF C1 C1 E0 04 8B 7D 08 DD 04 07 8B 45 0C 0F AF C2 DD 04 38 D9 C1 8B 45 14 0F AF C1 8B 7D 10 D8 C1 DD 1C C7 8B 7D E8 8D 04 0F 0F AF 45 14 DE E9 8B 7D 10 DD 1C C7 FF 45 EC 8B 45 D8 01 F2 03 4D E0 39 45 EC 72 B6
+
+. 1439 8053C92 10
+. 83 7D E0 01 0F 84 D1 01 00 00
+
+. 1440 8053E6D 8
+. 83 C4 44 5B 5E 5F C9 C3
+
+. 1441 8053BA3 5
+. E9 E8 FE FF FF
+
+. 1442 8053A90 18
+. 83 C4 20 FF 45 D8 8B 55 D4 39 55 D8 0F 82 62 FF FF FF
+
+. 1443 8053AA2 6
+. 83 7D D0 01 74 07
+
+. 1444 8053AAF 12
+. C7 45 D8 00 00 00 00 39 7D D8 73 ED
+***** EXPENSIVE 1446 214
+
+. 1445 8053ABB 35
+. 8B 75 0C C1 E6 03 8B 4D 08 89 75 CC 90 8B 45 D8 8B 55 F0 DD 04 C2 40 DD 19 03 4D CC 39 F8 89 45 D8 72 EA
+***** EXPENSIVE 1447 215
+vex iropt: 2 x unrolling (33 sts -> 66 sts)
+
+. 1446 8053AC8 22
+. 8B 45 D8 8B 55 F0 DD 04 C2 40 DD 19 03 4D CC 39 F8 89 45 D8 72 EA
+
+. 1447 8053ADE 2
+. EB C8
+
+. 1448 8053AA8 7
+. 31 C0 E9 0C FF FF FF
+***** EXPENSIVE 1450 216
+vex iropt: 2 x unrolling (36 sts -> 72 sts)
+
+. 1449 80498A4 20
+. 8B 55 E0 DD 04 C2 D8 F1 41 DD 1C C2 03 45 F0 3B 4D EC 72 EC
+
+. 1450 805AD0A 17
+. 8B 45 08 D1 E8 83 EC 0C C1 E0 03 50 E8 75 D9 FE FF
+
+. 1451 805AD1B 20
+. 8B 55 C4 83 C4 10 85 C0 89 82 08 02 00 00 0F 84 CC 01 00 00
+
+. 1452 805ADDE 29
+. 41 89 4D B0 89 45 AC 8D 76 00 8B 45 B0 C7 45 B4 01 00 00 00 D1 E8 31 FF 39 45 B4 72 69
+
+. 1453 805ADFB 17
+. 8B 45 B8 FF 45 C0 8B 55 BC 01 45 AC 39 55 C0 72 DC
+
+. 1454 804C554 5
+. E8 F3 F3 FF FF
+vex iropt: 4 x unrolling (29 sts -> 116 sts)
+
+. 1455 804AFBC 21
+. 8B 5D E0 8B 04 D3 41 8B 5D E4 89 04 93 03 55 08 3B 4D 0C 72 EB
+
+. 1456 805B0A8 10
+. 8B 45 10 8B 75 14 3B 06 74 26
+
+. 1457 805B0D8 7
+. 8B 4D 10 3B 0A 74 15
+
+. 1458 805B0F4 11
+. 31 FF 3B 7D F0 0F 83 91 00 00 00
+
+. 1459 805B0FF 34
+. 90 8B 45 EC 8B 75 14 89 C1 8B 54 BE 08 0F AF CA 40 D1 E8 89 4D EC 8B 4D E4 48 85 C9 0F 85 63 01 00 00
+
+. 1460 805B121 41
+. 8B 75 08 89 75 E0 8B 4D 0C 8B 75 E8 89 4D DC 89 75 D8 C7 45 D4 01 00 00 00 C7 45 E4 01 00 00 00 83 FA 02 0F 84 12 01 00 00
+
+. 1461 805B25C 34
+. 50 8B 75 14 FF B4 BE 08 01 00 00 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 52 00 00 00
+
+. 1462 805B2D0 46
+. 55 89 E5 57 56 83 EC 28 8B 45 1C 89 C2 D1 EA 89 55 EC 31 D2 F7 75 18 8B 4D 18 C7 45 F0 00 00 00 00 D1 E9 39 45 F0 89 45 E8 89 4D E4 73 5A
+***** EXPENSIVE 1464 217
+
+. 1463 805B2FE 90
+. 8B 4D 18 49 31 FF 31 F6 89 F6 8B 45 EC 01 F0 8B 55 0C 0F AF D6 89 45 D4 8B 45 08 D9 04 90 8B 45 D4 0F AF 45 0C 8B 55 08 D9 04 82 D9 C1 8B 45 14 0F AF C7 8B 55 10 D8 C1 D9 1C 82 8B 45 14 0F AF C1 DE E9 D9 1C 82 FF 45 F0 8B 45 E8 03 4D 18 03 7D 18 03 75 E4 39 45 F0 72 B0
+
+. 1464 805B358 10
+. 83 7D E4 01 0F 84 A5 01 00 00
+
+. 1465 805B507 7
+. 83 C4 28 5E 5F C9 C3
+
+. 1466 805B27E 5
+. E9 00 FF FF FF
+
+. 1467 805B183 13
+. 83 C4 20 47 3B 7D F0 0F 82 70 FF FF FF
+
+. 1468 805B190 6
+. 83 7D E4 01 74 07
+
+. 1469 805B19D 7
+. 31 FF 3B 7D 10 73 F2
+
+. 1470 805B1A4 28
+. 8B 4D 0C 8B 55 08 C1 E1 02 8D 76 00 8B 75 E8 8B 04 BE 47 89 02 01 CA 3B 7D 10 72 F0
+vex iropt: 4 x unrolling (24 sts -> 96 sts)
+
+. 1471 805B1B0 16
+. 8B 75 E8 8B 04 BE 47 89 02 01 CA 3B 7D 10 72 F0
+
+. 1472 805B1C0 2
+. EB D4
+
+. 1473 805B196 7
+. 31 C0 E9 31 FF FF FF
+
+. 1474 8058521 43
+. 8B 4D 14 8D 14 8D FC FF FF FF 8B 45 10 8B 4D 08 0F AF 55 10 8B 14 0A 0F AF 45 F0 8B 4D 0C 89 14 C1 C7 44 C1 04 00 00 00 00 EB CB
+
+. 1475 8055F42 29
+. 40 89 45 B4 89 75 B0 8D 76 00 8B 45 B4 C7 45 B8 01 00 00 00 D1 E8 31 FF 39 45 B8 72 65
+
+. 1476 8055F5F 17
+. 8B 75 D0 FF 45 C0 8B 45 BC 01 75 B0 39 45 C0 72 DC
+
+. 1477 8055F70 15
+. 8B 4D E0 FF 45 DC 39 4D DC 0F 82 77 FF FF FF
+
+. 1478 8056194 7
+. 8B 4D 14 3B 31 74 25
+
+. 1479 80561C0 4
+. 3B 32 74 14
+
+. 1480 80561D8 39
+. 8B 7D 14 8B 7F 04 C7 45 D8 00 00 00 00 39 7D D8 89 7D D4 C7 45 DC 01 00 00 00 C7 45 D0 00 00 00 00 0F 83 9E 00 00 00
+
+. 1481 80561FF 41
+. 90 8B 45 D8 8B 55 14 8B 4C 82 08 8B 7D DC 0F AF F9 31 D2 89 F0 F7 F7 40 D1 E8 8B 55 D0 48 85 D2 89 7D DC 0F 85 78 01 00 00
+
+. 1482 8056228 41
+. 8B 55 08 89 55 EC 8B 7D 0C 8B 55 F0 89 7D E4 89 55 E8 C7 45 E0 01 00 00 00 C7 45 D0 01 00 00 00 83 F9 02 0F 84 27 01 00 00
+
+. 1483 8056378 35
+. 50 8B 55 14 8B 45 D8 FF B4 82 08 01 00 00 56 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 51 00 00 00
+
+. 1484 80563EC 69
+. 55 89 E5 57 56 53 83 EC 44 BE 02 00 00 00 31 D2 8B 45 1C F7 F6 89 45 E8 31 D2 8B 45 1C F7 75 18 89 45 E0 31 D2 8B 45 18 F7 F6 C7 45 EC 00 00 00 00 E8 00 00 00 00 5B 81 C3 F2 AC 00 00 39 45 EC 89 45 D8 73 56
+***** EXPENSIVE 1486 218
+
+. 1485 8056431 86
+. 8B 45 E0 C1 E0 03 8D 50 FC 31 C9 89 C6 89 F6 8B 45 0C 0F AF C1 8B 7D 08 D9 04 C7 8B 45 0C 0F AF C2 D9 04 38 D9 C1 8B 45 14 0F AF C1 8B 7D 10 D8 C1 D9 1C 87 8B 7D E8 8D 04 0F 0F AF 45 14 DE E9 8B 7D 10 D9 1C 87 FF 45 EC 8B 45 D8 01 F2 03 4D E0 39 45 EC 72 B9
+
+. 1486 8056487 10
+. 83 7D E0 01 0F 84 D0 01 00 00
+
+. 1487 8056661 8
+. 83 C4 44 5B 5E 5F C9 C3
+
+. 1488 805639B 5
+. E9 EB FE FF FF
+
+. 1489 805628B 18
+. 83 C4 20 FF 45 D8 8B 4D D4 39 4D D8 0F 82 63 FF FF FF
+
+. 1490 805629D 6
+. 83 7D D0 01 74 07
+
+. 1491 80562AA 12
+. C7 45 D8 00 00 00 00 39 75 D8 73 ED
+
+. 1492 80562B6 36
+. 8B 7D 0C C1 E7 02 8B 55 08 89 7D CC 89 F6 8B 4D D8 8B 7D F0 8B 04 8F 41 89 02 03 55 CC 39 F1 89 4D D8 72 EA
+vex iropt: 4 x unrolling (27 sts -> 108 sts)
+
+. 1493 80562C4 22
+. 8B 4D D8 8B 7D F0 8B 04 8F 41 89 02 03 55 CC 39 F1 89 4D D8 72 EA
+
+. 1494 80562DA 2
+. EB C7
+
+. 1495 80562A3 7
+. 31 C0 E9 0D FF FF FF
+***** EXPENSIVE 1497 219
+vex iropt: 2 x unrolling (38 sts -> 76 sts)
+
+. 1496 804B084 20
+. 8B 55 E4 D9 04 82 D8 F1 41 D9 1C 82 03 45 08 3B 4D 0C 72 EC
+vex iropt: 2 x unrolling (32 sts -> 64 sts)
+
+. 1497 805D41C 26
+. 8B 75 08 8B 04 96 41 89 04 D7 C7 44 D7 04 00 00 00 00 03 55 10 3B 4D 14 72 E6
+
+. 1498 804A408 13
+. BB 01 00 00 00 83 C1 02 3B 5D 0C 73 4F
+
+. 1499 804CF40 22
+. C7 45 B4 01 00 00 00 8B 55 B8 39 55 B4 C7 45 B0 00 00 00 00 77 72
+
+. 1500 804D42A 9
+. 83 F9 03 0F 84 C6 01 00 00
+
+. 1501 804D5F9 47
+. 8B 55 EC 8B 4D 14 8B 94 91 08 01 00 00 C1 E0 04 83 EC 0C 01 D0 50 52 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 04 02 00 00
+***** EXPENSIVE 1503 220
+
+. 1502 804D82C 116
+. 55 89 E5 57 56 53 83 EC 7C BE AB AA AA AA 8B 45 20 F7 E6 D1 EA 8B 4D 1C 89 55 E0 8B 45 20 31 D2 F7 F1 89 45 DC 89 C8 F7 E6 89 55 84 D1 EA E8 00 00 00 00 5B 81 C3 B5 38 01 00 89 95 7C FF FF FF D1 E2 C7 45 EC 00 00 00 00 31 FF 89 45 80 89 55 D8 DD 83 B4 F8 FF FF D9 FA 8B 45 DC DC 8B BC F8 FF FF 39 C7 DD 5D D0 C7 45 E8 00 00 00 00 0F 83 DC 01 00 00
+
+. 1503 804D8A0 27
+. C7 45 8C 00 00 00 00 C7 45 88 00 00 00 00 89 F6 8B 75 E8 85 F6 0F 85 CB 01 00 00
+***** EXPENSIVE 1505 221
+
+. 1504 804D8BB 52
+. DD 83 34 F6 FF FF DD 5D C8 DD 83 44 F6 FF FF DD 5D C0 DD 45 C8 DD 5D B8 DD 45 C0 DD 5D B0 C7 45 E4 00 00 00 00 8B 85 7C FF FF FF 39 45 E4 0F 83 77 01 00 00
+***** EXPENSIVE 1506 222
+
+. 1505 804D8EF 149
+. DD 45 D0 8B 75 14 8B 45 0C 8B 55 14 DA 4D 18 0F AF F7 8B 4D 88 C1 E0 04 C1 E2 04 DD 5D 98 C1 E6 04 C1 E1 04 89 45 90 89 55 94 8D 76 00 8B 45 08 DD 04 08 DD 5D A8 DD 44 08 08 8B 45 EC 03 45 E0 0F AF 45 0C C1 E0 04 8B 55 08 DD 04 02 D9 C9 DD 5D A0 DD 44 02 08 8B 55 EC 8B 45 E0 8D 04 42 89 45 80 0F AF 45 0C C1 E0 04 8B 55 08 DD 04 02 DD 44 02 08 D9 C3 D9 C3 D9 C9 D8 C3 D9 C9 D8 C2 DD 83 BC F8 FF FF DD 83 BC F8 FF FF D9 C9 D8 CB D9 C9 D8 CA D9 C9
+***** EXPENSIVE 1507 223
+
+. 1506 804D984 126
+. DC 6D A8 D9 C9 DC 6D A0 D9 CF DE E5 D9 CD DE E3 D9 CB DC 4D 98 D9 CA DC 4D 98 D9 C9 8B 45 10 DC 45 A8 D9 CB DC 45 A0 8B 95 7C FF FF FF D9 C4 D9 C6 D9 C9 D8 E3 D9 C9 D8 C4 D9 CE DE C3 D9 CE DE E3 D9 CB DD 14 30 D9 CB DD 54 30 08 DD 45 C8 8D 04 3A DD 45 C0 0F AF 45 14 D8 CE D9 C9 D8 CF C1 E0 04 DE E1 D9 CC 8B 55 10 DD 5D A8 DD 5D A0 D9 CA DD 1C 02 D9 CA DC 4D C8 D9 CB DC 4D C0
+***** EXPENSIVE 1508 224
+
+. 1507 804DA02 100
+. DE C3 D9 CA DD 5C 02 08 8B 95 7C FF FF FF 8D 04 57 DD 45 B8 DD 45 B0 D9 C9 0F AF 45 14 D8 CB D9 C9 D8 CA D9 CB DC 4D B0 D9 CA DC 4D B8 D9 C9 C1 E0 04 8B 55 10 DE E3 DE C1 D9 C9 DD 1C 02 DD 5C 02 08 FF 45 E4 8B 45 0C 8B 95 7C FF FF FF 03 4D 90 01 45 88 FF 45 EC 03 75 94 47 39 55 E4 0F 82 B6 FE FF FF
+
+. 1508 804DA66 22
+. FF 45 E8 8B 4D DC 03 7D D8 83 45 8C 10 39 4D E8 0F 82 34 FE FF FF
+
+. 1509 804DA7C 10
+. 83 C4 7C 5B 5E 31 C0 5F C9 C3
+
+. 1510 804D628 5
+. E9 53 FE FF FF
+
+. 1511 804D480 18
+. 83 C4 30 FF 45 EC 8B 4D F0 39 4D EC 0F 82 4A FF FF FF
+
+. 1512 8050404 17
+. C7 45 B4 01 00 00 00 8B 45 B8 31 FF 39 45 B4 76 5F
+
+. 1513 80508CE 9
+. 83 F9 03 0F 84 C5 01 00 00
+
+. 1514 8050A9C 45
+. 8B 4D EC 8B 7D 14 8B 8C 8F 08 01 00 00 83 EC 0C 8D 04 C1 50 51 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 D7 01 00 00
+***** EXPENSIVE 1516 225
+
+. 1515 8050CA0 113
+. 55 89 E5 57 56 53 83 EC 64 BE AB AA AA AA 8B 45 20 F7 E6 D1 EA 8B 4D 1C 89 55 DC 8B 45 20 31 D2 F7 F1 89 45 D8 89 C8 F7 E6 89 55 9C D1 EA E8 00 00 00 00 5B 81 C3 41 04 01 00 89 55 94 D1 E2 C7 45 E8 00 00 00 00 31 FF 89 45 98 89 55 D4 DD 83 B4 F8 FF FF D9 FA 8B 55 D8 DC 8B BC F8 FF FF 39 D7 D9 5D D0 C7 45 E4 00 00 00 00 0F 83 AD 01 00 00
+
+. 1516 8050D11 18
+. C7 45 A4 00 00 00 00 8B 45 E4 85 C0 0F 85 A5 01 00 00
+***** EXPENSIVE 1518 226
+
+. 1517 8050D23 49
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 55 CC D9 C9 D9 55 C8 D9 C9 D9 5D C4 D9 5D C0 C7 45 E0 00 00 00 00 8B 45 94 39 45 E0 0F 83 54 01 00 00
+***** EXPENSIVE 1519 227
+
+. 1518 8050D54 146
+. D9 45 D0 DA 4D 18 8B 75 14 8B 4D E8 D9 5D B0 0F AF F7 0F AF 4D 0C 89 F6 8B 45 08 D9 04 C8 D9 44 C8 04 8B 45 E8 03 45 DC 0F AF 45 0C 8B 55 08 D9 04 C2 D9 C9 D9 5D B8 D9 44 C2 04 8B 55 E8 8B 45 DC 8D 04 42 89 45 98 8B 55 08 0F AF 45 0C D9 04 C2 D9 C2 D9 CC D9 5D BC DC C3 DD 83 BC F8 FF FF D8 CC D9 44 C2 04 D9 C9 D8 6D BC D9 C3 D8 C2 D9 C9 D9 5D EC DD 83 BC F8 FF FF D8 C9 D8 6D B8 D9 5D B4 D9 CC DE E2 DE EA D9 C9 D8 4D B0 D9 45 EC D9 CA
+***** EXPENSIVE 1520 228
+
+. 1519 8050DE6 126
+. D8 4D B0 D9 45 B4 D9 CD D8 45 BC D9 CC D8 45 B8 D9 CD D8 C1 D9 C3 D9 CA D8 6D B4 D9 CC D8 C3 D9 CA DE E3 D9 CC D9 55 BC D9 CD D9 55 B8 D9 C9 D9 55 AC D9 CB D9 55 A8 8B 45 10 8B 55 94 D9 45 CC D9 45 C8 D9 CF D9 1C F0 D9 CA D9 5C F0 04 D9 CD D8 CC D9 C9 8D 04 3A D8 CA D9 CC D8 4D CC D9 CA D8 4D C8 D9 CC 0F AF 45 14 DE E1 D9 C9 8B 55 10 DE C3 D9 1C C2 D9 C9 D9 5C C2 04 D9 45 C4
+***** EXPENSIVE 1521 229
+
+. 1520 8050E64 68
+. 8B 55 94 D9 45 C0 8D 04 57 D8 CB D9 C9 D8 CA 0F AF 45 14 DE E1 8B 55 10 D9 1C C2 D9 45 C0 D9 45 C4 DE CB DE C9 DE C1 D9 5C C2 04 FF 45 E0 8B 45 94 03 4D 0C FF 45 E8 03 75 14 47 39 45 E0 0F 82 C4 FE FF FF
+
+. 1521 8050EA8 22
+. FF 45 E4 8B 55 D8 03 7D D4 83 45 A4 08 39 55 E4 0F 82 5A FE FF FF
+
+. 1522 8050EBE 10
+. 83 C4 64 5B 5E 31 C0 5F C9 C3
+
+. 1523 8050AC9 5
+. E9 56 FE FF FF
+
+. 1524 8050924 18
+. 83 C4 30 FF 45 EC 8B 4D F0 39 4D EC 0F 82 4A FF FF FF
+
+. 1525 8058780 24
+. 8B 45 B4 C7 45 BC 01 00 00 00 D1 E8 39 45 BC C7 45 B8 00 00 00 00 73 72
+
+. 1526 8058AF2 9
+. 83 FA 03 0F 84 E4 00 00 00
+
+. 1527 8058BDF 41
+. 8B 55 14 8B 8C BA 08 01 00 00 C1 E0 04 8D 14 08 52 51 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 D4 02 00 00
+***** EXPENSIVE 1529 230
+
+. 1528 8058EDC 93
+. 55 89 E5 57 56 53 83 EC 7C B9 AB AA AA AA 8B 45 1C F7 E1 D1 EA 89 55 E4 8B 45 1C 31 D2 F7 75 18 89 45 E0 8B 45 18 F7 E1 E8 00 00 00 00 5B 81 C3 0B 82 00 00 D1 EA 89 55 DC DD 83 B4 F8 FF FF D9 FA C7 45 E8 00 00 00 00 8B 45 E0 DC 8B BC F8 FF FF 39 45 E8 DD 55 D0 0F 83 70 03 00 00
+***** EXPENSIVE 1530 231
+
+. 1529 8058F39 142
+. D9 E0 C7 45 88 00 00 00 00 31 FF 8D 74 12 FF 8B 55 E4 8D 04 3A 01 C2 8B 4D 0C 0F AF CF 89 95 7C FF FF FF 8B 55 08 DD 04 CA 0F AF 45 0C 8B 8D 7C FF FF FF DD 04 C2 0F AF 4D 0C DD 04 CA D9 C1 D8 C1 D9 C3 8B 45 14 0F AF 45 88 8B 4D 10 D8 C1 DD 1C C1 DC 8B BC F8 FF FF 8B 45 14 0F AF C6 DE EB D9 CA DD 1C C1 DE E1 8D 04 F5 08 00 00 00 0F AF 45 14 D8 C9 DD 1C 08 FF 45 E8 8B 45 18 8B 55 E0 03 75 18 01 45 88 03 7D DC 39 55 E8 72 81
+***** EXPENSIVE 1531 232
+
+. 1530 8058FC7 12
+. DD D8 83 7D DC 01 0F 84 CE 02 00 00
+
+. 1531 80592A1 8
+. 83 C4 7C 5B 5E 5F C9 C3
+
+. 1532 8058C08 5
+. E9 1E FF FF FF
+***** EXPENSIVE 1534 233
+
+. 1533 8055C1E 114
+. 8B 4D 10 8B 45 10 8B 55 08 C1 E1 04 8D 04 C2 89 45 D8 89 4D DC 89 4D D4 89 F6 8B 55 D8 8B 45 08 DD 04 08 8B 32 8B 7A 04 8B 45 0C DD 54 08 08 89 34 08 89 7C 08 04 8B 45 14 2B 45 EC DD 5D E0 0F AF 45 10 80 75 E7 80 C1 E0 04 8B 55 0C DD 45 E0 89 34 02 89 7C 02 04 DD 5C 02 08 8B 45 D4 01 45 D8 FF 45 EC 8B 45 14 2B 45 EC 03 4D DC 39 45 EC 72 A8
+
+. 1534 805373C 24
+. 8B 45 B0 C7 45 B8 01 00 00 00 D1 E8 39 45 B8 C7 45 B4 00 00 00 00 73 72
+
+. 1535 8053A56 9
+. 83 F9 03 0F 84 F1 00 00 00
+
+. 1536 8053B50 42
+. 8B 55 14 8B 75 D8 8B 8C B2 08 01 00 00 C1 E0 04 8D 14 08 52 51 57 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 06 03 00 00
+***** EXPENSIVE 1538 234
+
+. 1537 8053E80 93
+. 55 89 E5 57 56 53 81 EC 84 00 00 00 E8 00 00 00 00 5B 81 C3 83 D2 00 00 DD 83 B4 F8 FF FF D9 FA 8B 45 1C B9 03 00 00 00 31 D2 F7 F1 89 45 DC 31 D2 8B 45 1C F7 75 18 89 45 D8 31 D2 8B 45 18 F7 F1 C7 45 E8 00 00 00 00 DC 8B BC F8 FF FF 39 45 E8 DD 55 E0 89 45 D4 0F 83 9C 03 00 00
+***** EXPENSIVE 1539 235
+
+. 1538 8053EDD 150
+. D9 C0 DE C1 C7 45 88 00 00 00 00 31 FF 89 F6 8B 75 D8 8B 45 0C 0F AF C7 8B 4D 08 D1 E6 DD 04 C1 8D 54 3E FF 8B 45 0C 0F AF C2 8D 14 D5 08 00 00 00 DD 04 C1 0F AF 55 0C D9 C1 8B 45 88 D9 C3 DC 0C 0A D9 C9 D8 E2 D9 CA 03 45 DC D8 C0 8B 55 DC 01 C2 DE C3 8B 4D 14 D9 C1 0F AF 4D 88 0F AF 45 14 03 75 D8 89 95 7C FF FF FF D8 E1 D9 CB 8B 55 10 DD 1C CA D9 CA 01 F7 DD 1C C2 8B 75 D8 8B 95 7C FF FF FF FF 45 E8 8B 45 D4 0F AF 55 14 DE C1 8B 4D 10 01 75 88
+***** EXPENSIVE 1540 236
+
+. 1539 8053F73 12
+. 39 45 E8 DD 1C D1 0F 82 6D FF FF FF
+***** EXPENSIVE 1541 237
+
+. 1540 8053F7F 12
+. DD D8 83 7D D8 01 0F 84 E3 02 00 00
+
+. 1541 805426E 11
+. 81 C4 84 00 00 00 5B 5E 5F C9 C3
+
+. 1542 8053B7A 5
+. E9 11 FF FF FF
+
+. 1543 805ADE8 19
+. 8B 45 B0 C7 45 B4 01 00 00 00 D1 E8 31 FF 39 45 B4 72 69
+
+. 1544 805B14A 9
+. 83 FA 03 0F 84 DC 00 00 00
+
+. 1545 805B22F 38
+. 8B 4D 14 8B 94 B9 08 01 00 00 8D 0C C2 51 52 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 C3 02 00 00
+***** EXPENSIVE 1547 238
+
+. 1546 805B518 93
+. 55 89 E5 57 56 53 83 EC 54 B9 AB AA AA AA 8B 45 1C F7 E1 D1 EA 89 55 E4 8B 45 1C 31 D2 F7 75 18 89 45 E0 8B 45 18 F7 E1 E8 00 00 00 00 5B 81 C3 CF 5B 00 00 D1 EA 89 55 DC DD 83 B4 F8 FF FF D9 FA C7 45 E8 00 00 00 00 8B 55 E0 DC 8B BC F8 FF FF 39 55 E8 D9 5D D8 0F 83 93 00 00 00
+***** EXPENSIVE 1548 239
+
+. 1547 805B575 145
+. D9 45 D8 D9 E0 31 FF 31 F6 89 F6 8B 4D E4 8D 04 31 01 C1 8B 55 0C 0F AF D6 0F AF 45 0C 89 4D A0 8B 4D 08 D9 04 91 D9 04 81 8B 4D A0 0F AF 4D 0C 8B 45 08 D9 04 88 D9 C1 8B 55 DC 8D 44 57 FF D8 C1 D9 C3 8B 55 14 0F AF D7 8B 4D 10 D8 C1 D9 1C 91 DD 83 BC F8 FF FF 8B 55 14 0F AF D0 DE C9 D9 CA DE E1 D9 C9 8D 04 85 04 00 00 00 0F AF 45 14 DE EA D8 CA D9 C9 D9 1C 91 FF 45 E8 D9 1C 08 8B 45 E0 03 7D 18 03 75 DC 39 45 E8 0F 82 7A FF FF FF
+***** EXPENSIVE 1549 240
+
+. 1548 805B606 12
+. DD D8 83 7D DC 01 0F 84 B9 02 00 00
+
+. 1549 805B8CB 8
+. 83 C4 54 5B 5E 5F C9 C3
+
+. 1550 805B255 5
+. E9 29 FF FF FF
+***** EXPENSIVE 1552 241
+
+. 1551 80584BD 79
+. 8B 75 10 89 F0 C1 E0 03 8D 3C B2 89 45 EC 90 8B 4D 08 8B 14 F1 D9 07 8B 45 0C 89 54 F0 04 D9 14 F0 8B 45 14 2B 45 F0 0F AF 45 10 8B 4D 0C 81 F2 00 00 00 80 FF 45 F0 D9 1C C1 89 54 C1 04 8B 45 14 2B 45 F0 03 75 10 03 7D EC 39 45 F0 72 C0
+
+. 1552 8055F4C 19
+. 8B 45 B4 C7 45 B8 01 00 00 00 D1 E8 31 FF 39 45 B8 72 65
+
+. 1553 8056251 9
+. 83 F9 03 0F 84 F2 00 00 00
+
+. 1554 805634C 39
+. 8B 4D D8 8B 7D 14 8B 94 8F 08 01 00 00 8D 0C C2 51 52 56 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 01 03 00 00
+***** EXPENSIVE 1556 242
+
+. 1555 8056674 90
+. 55 89 E5 57 56 53 83 EC 60 E8 00 00 00 00 5B 81 C3 92 AA 00 00 DD 83 B4 F8 FF FF D9 FA 8B 45 1C B9 03 00 00 00 31 D2 F7 F1 89 45 E0 31 D2 8B 45 1C F7 75 18 89 45 DC 31 D2 8B 45 18 F7 F1 C7 45 E8 00 00 00 00 DC 8B BC F8 FF FF 39 45 E8 D9 5D E4 89 45 D8 0F 83 9D 00 00 00
+***** EXPENSIVE 1557 243
+
+. 1556 80566CE 146
+. D9 45 E4 D8 C0 C7 45 A0 00 00 00 00 31 FF 8B 75 DC 8B 45 0C 0F AF C7 8B 4D 08 D1 E6 D9 04 81 8D 54 3E FF 8B 45 0C 0F AF C2 8D 14 95 04 00 00 00 D9 04 81 0F AF 55 0C D9 C1 8B 45 A0 D9 C3 D8 0C 0A D9 C9 D8 E2 D9 CA 03 45 E0 D8 C0 8B 55 E0 01 C2 DE C3 8B 4D 14 D9 C1 0F AF 4D A0 0F AF 45 14 03 75 DC 89 55 98 D8 E1 D9 CB 8B 55 10 D9 1C 8A D9 CA 01 F7 D9 1C 82 8B 75 DC 8B 55 98 FF 45 E8 8B 45 D8 0F AF 55 14 DE C1 8B 4D 10 01 75 A0 39 45 E8
+***** EXPENSIVE 1558 244
+
+. 1557 8056760 9
+. D9 1C 91 0F 82 73 FF FF FF
+***** EXPENSIVE 1559 245
+
+. 1558 8056769 12
+. DD D8 83 7D DC 01 0F 84 EB 02 00 00
+
+. 1559 8056A60 8
+. 83 C4 60 5B 5E 5F C9 C3
+
+. 1560 8056373 5
+. E9 13 FF FF FF
+
+. 1561 804D433 9
+. 83 F9 04 0F 84 7F 01 00 00
+
+. 1562 804D5BB 57
+. 8B 4D EC 8B 55 14 8B 8C 8A 08 01 00 00 C1 E0 04 89 4D E4 01 C1 89 CF 83 EC 08 01 C7 57 51 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 F4 04 00 00
+
+. 1563 804DAE8 94
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 8B 45 20 C1 E8 02 8B 4D 1C 31 D2 89 45 DC 8B 45 20 F7 F1 C7 45 E4 00 00 00 00 C1 E9 02 E8 00 00 00 00 5B 81 C3 FE 35 01 00 8D 14 49 39 45 E4 8B 7D 08 8B 75 10 C7 45 EC 00 00 00 00 C7 45 E8 00 00 00 00 89 45 D8 89 4D D4 89 55 D0 0F 83 78 02 00 00
+
+. 1564 804DB46 33
+. C7 85 54 FF FF FF 00 00 00 00 C7 85 50 FF FF FF 00 00 00 00 89 F6 8B 45 E4 85 C0 0F 85 64 02 00 00
+***** EXPENSIVE 1566 246
+
+. 1565 804DB67 59
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C9 DD 55 C8 D9 C9 DD 55 C0 D9 C9 DD 55 B8 D9 C9 DD 55 B0 D9 C9 DD 5D A8 DD 5D A0 C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 00 02 00 00
+***** EXPENSIVE 1567 247
+
+. 1566 804DBA2 177
+. 8B 45 DC D1 E0 89 45 94 03 45 DC 89 45 84 8B 55 D4 8B 45 0C D1 E2 C1 E0 04 89 95 6C FF FF FF 8B 4D E8 03 55 D4 89 85 58 FF FF FF 8B 45 14 DB 45 18 89 95 68 FF FF FF 0F AF 4D 14 8B 95 50 FF FF FF C1 E0 04 DD 9D 60 FF FF FF C1 E1 04 C1 E2 04 89 85 5C FF FF FF 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 07 DD 9D 48 FF FF FF DD 44 07 08 8B 45 EC 03 45 94 0F AF 45 0C C1 E0 04 DD 04 07 DD 44 07 08 8B 45 EC 03 45 84 0F AF 45 0C C1 E0 04 DD 04 07 D9 CA DD 55 98 DD 85 48 FF FF FF D8 C3 DD 44 07 08 D9 C9 DD 9D 78 FF FF FF D9 C4 D8 C1
+***** EXPENSIVE 1568 248
+
+. 1567 804DC53 139
+. DD 04 17 D9 C9 DD 9D 70 FF FF FF DD 85 48 FF FF FF D9 CB D8 C1 D9 CB DE E5 DD 85 78 FF FF FF DD 44 17 08 D9 C9 D8 C4 D9 CD DD 55 88 D9 CD DD 1C 0E DC C4 DD 85 70 FF FF FF D9 CA DC 65 98 D9 C9 DC 65 88 D9 CA D8 C5 D9 CF DE E3 D9 CD DC 8D 60 FF FF FF D9 CA DC 8D 60 FF FF FF D9 CE DD 5C 0E 08 D9 C4 D9 C1 D9 C9 8B 45 E8 D8 E7 D9 C9 D8 C3 D9 CE DE C7 D9 C9 DE E2 03 45 D4 DD 45 C8 DD 45 C0 0F AF 45 14 D8 CE D9 C9 D8 CA
+***** EXPENSIVE 1569 249
+
+. 1568 804DCDE 141
+. C1 E0 04 DE E1 D9 CB DC A5 78 FF FF FF D9 CC DC A5 70 FF FF FF D9 CB DD 1C 06 D9 CC DC 4D C8 D9 CC DC 4D C0 DE C4 D9 CB DD 5C 06 08 8B 45 E8 03 85 6C FF FF FF DD 45 B8 DD 45 B0 D8 CA D9 C9 0F AF 45 14 D8 CB D9 CA DC 4D B8 D9 CB DC 4D B0 D9 CA C1 E0 04 DE E1 D9 CA DE C1 D9 C9 DD 1C 06 DD 5C 06 08 8B 45 E8 03 85 68 FF FF FF DD 45 A8 DD 45 A0 D9 C9 0F AF 45 14 D8 CB D9 C9 D8 CA D9 CB DC 4D A0 D9 CA DC 4D A8 D9 C9 C1 E0 04
+***** EXPENSIVE 1570 250
+
+. 1569 804DD6B 55
+. DE E3 DE C1 D9 C9 DD 1C 06 DD 5C 06 08 8B 45 0C 01 85 50 FF FF FF FF 45 E0 8B 45 D4 03 95 58 FF FF FF FF 45 EC 03 8D 5C FF FF FF FF 45 E8 39 45 E0 0F 82 56 FE FF FF
+
+. 1570 804DDA2 28
+. 8B 55 D0 FF 45 E4 8B 45 D8 01 55 E8 83 85 54 FF FF FF 10 39 45 E4 0F 82 9E FD FF FF
+
+. 1571 804DDBE 13
+. 81 C4 AC 00 00 00 5B 5E 31 C0 5F C9 C3
+
+. 1572 804D5F4 5
+. E9 87 FE FF FF
+
+. 1573 80508D7 9
+. 83 F9 04 0F 84 7B 01 00 00
+
+. 1574 8050A5B 60
+. 8B 7D EC 8B 55 14 8B BC BA 08 01 00 00 C1 E0 03 89 7D E4 01 C7 83 EC 08 89 7D E0 01 C7 57 FF 75 E0 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 99 04 00 00
+
+. 1575 8050F30 91
+. 55 89 E5 57 56 53 83 EC 74 8B 45 20 C1 E8 02 8B 4D 1C 31 D2 89 45 E0 8B 45 20 F7 F1 C7 45 E8 00 00 00 00 C1 E9 02 E8 00 00 00 00 5B 81 C3 B9 01 01 00 8D 14 49 39 45 E8 8B 7D 08 8B 75 10 C7 45 F0 00 00 00 00 C7 45 EC 00 00 00 00 89 45 DC 89 4D D8 89 55 D4 0F 83 0C 02 00 00
+
+. 1576 8050F8B 20
+. C7 45 80 00 00 00 00 89 F6 8B 45 E8 85 C0 0F 85 02 02 00 00
+***** EXPENSIVE 1578 251
+
+. 1577 8050F9F 59
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 55 D0 D9 C9 D9 55 CC D9 C9 D9 55 C8 D9 C9 D9 55 C4 D9 C9 D9 5D C0 D9 5D BC C7 45 E4 00 00 00 00 8B 45 D8 39 45 E4 0F 83 A4 01 00 00
+***** EXPENSIVE 1579 252
+
+. 1578 8050FDA 145
+. 8B 55 D8 D1 E2 8B 45 E0 D1 E0 89 55 8C 03 55 D8 DB 45 18 89 45 B8 89 55 88 03 45 E0 8B 4D EC 8B 55 F0 D9 5D 84 89 45 B4 0F AF 4D 14 0F AF 55 0C 89 F6 8B 45 F0 03 45 E0 0F AF 45 0C D9 04 C7 D9 44 C7 04 8B 45 F0 03 45 B8 0F AF 45 0C D9 04 C7 D9 44 C7 04 8B 45 F0 03 45 B4 0F AF 45 0C D9 44 C7 04 D9 5D B0 D9 04 D7 D8 C2 D9 04 C7 D9 C9 D9 5D AC D9 C4 D9 44 D7 04 D8 C3 D9 C9 D8 C2 D9 CE DE E2 D9 45 AC D9 C9 D9 5D A8 D8 C5 D9 45 B0 D9 CB
+***** EXPENSIVE 1580 253
+
+. 1579 805106B 127
+. D8 6C D7 04 D9 04 D7 D9 CC D8 C6 D9 CA D9 1C CE D9 45 A8 D8 C2 D9 CC DE E5 D9 CD D8 65 B0 D8 4D 84 D9 CB D9 5C CE 04 D9 C3 D8 E3 D9 CA D8 4D 84 D9 CE D8 6D AC D9 CA D9 5D A0 D9 C4 D8 C6 D9 CD D9 5D A4 D9 C9 D9 55 9C D9 CD D8 6D A4 D9 5D 90 D9 CA DE C1 8B 45 EC D9 45 D0 D9 45 CC 03 45 D8 D8 CC D9 C9 D8 4D A0 0F AF 45 14 DE E1 D9 1C C6 D9 45 CC D8 4D A0 D9 CB D8 4D D0 DE C3 D9 C9
+***** EXPENSIVE 1581 254
+
+. 1580 80510EA 136
+. D8 6D A8 D9 C9 D9 5D 94 D9 C9 D9 5C C6 04 D9 45 C8 8B 45 EC D9 45 C4 D9 45 C8 D9 45 C4 D9 CA 03 45 8C D8 CC D9 CB D8 CD D9 C9 D8 CC D9 CA DE CD 0F AF 45 14 DE E2 DE C3 D9 1C C6 D9 C9 D9 5C C6 04 D9 45 C0 8B 45 EC D9 45 BC D9 C9 03 45 88 D8 4D 94 D9 C9 D8 4D 90 0F AF 45 14 DE E9 D9 1C C6 D9 45 BC D9 45 C0 D8 4D 90 D9 C9 D8 4D 94 DE C1 D9 C9 D9 5D 98 D9 5C C6 04 FF 45 E4 8B 45 D8 03 55 0C FF 45 F0 03 4D 14
+
+. 1581 8051172 12
+. FF 45 EC 39 45 E4 0F 82 8E FE FF FF
+
+. 1582 805117E 25
+. 8B 55 D4 FF 45 E8 8B 45 DC 01 55 EC 83 45 80 08 39 45 E8 0F 82 FD FD FF FF
+
+. 1583 8051197 10
+. 83 C4 74 5B 5E 31 C0 5F C9 C3
+
+. 1584 8050A97 5
+. E9 88 FE FF FF
+
+. 1585 8058AFB 9
+. 83 FA 04 0F 84 A9 00 00 00
+
+. 1586 8058BAD 48
+. 8B 75 14 8B 8C BE 08 01 00 00 C1 E0 04 8D 14 08 83 EC 0C 8D 34 10 56 52 51 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 D3 06 00 00
+
+. 1587 80592B0 68
+. 55 89 E5 57 56 53 81 EC BC 00 00 00 8B 45 1C 89 C2 C1 EA 02 89 55 E4 31 D2 F7 75 18 8B 4D 18 C7 45 E8 00 00 00 00 C1 E9 02 E8 00 00 00 00 5B 81 C3 36 7E 00 00 39 45 E8 89 45 E0 89 4D DC 0F 83 D9 00 00 00
+***** EXPENSIVE 1589 255
+
+. 1588 80592F4 171
+. D1 E1 89 CF 89 4D D8 4F C7 85 54 FF FF FF 00 00 00 00 C7 85 50 FF FF FF 00 00 00 00 8B 95 50 FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 0F AF 85 50 FF FF FF 0F AF 55 0C 0F AF 4D 0C 89 B5 3C FF FF FF 8B 75 08 DD 04 C6 DD 04 D6 DD 04 CE 8B B5 3C FF FF FF 0F AF 75 0C 8B 45 08 D9 C2 DD 04 F0 D9 C9 D8 C2 D9 C3 D8 C2 D9 CC DE E2 8B 45 14 D9 C0 0F AF 85 54 FF FF FF 8B 75 10 D8 C4 DD 1C C6 D9 CC 8B 45 14 0F AF C7 8B 4D D8 DE E2 D9 C9 DD 1C C6 8D 14 39 8D 04 FD 08 00 00 00 0F AF 45 14 0F AF 55 14 D9 E0 D9 CA
+***** EXPENSIVE 1590 256
+
+. 1589 805939F 46
+. DE E1 D9 C9 DD 1C 30 DD 1C D6 8B 45 18 8B 55 DC 03 7D 18 01 85 54 FF FF FF 01 95 50 FF FF FF FF 45 E8 8B 4D E0 39 4D E8 0F 82 43 FF FF FF
+
+. 1590 80593CD 10
+. 83 7D DC 01 0F 84 20 04 00 00
+
+. 1591 80597F7 11
+. 81 C4 BC 00 00 00 5B 5E 5F C9 C3
+
+. 1592 8058BDD 2
+. EB C9
+
+. 1593 8058BA8 5
+. 83 C4 30 EB 81
+
+. 1594 8058B2E 10
+. 47 3B 7D F0 0F 82 70 FF FF FF
+
+. 1595 8053A5F 9
+. 83 F9 04 0F 84 B5 00 00 00
+
+. 1596 8053B1D 49
+. 8B 75 D8 8B 55 14 8B 8C B2 08 01 00 00 C1 E0 04 8D 14 08 83 EC 0C 8D 34 10 56 52 51 57 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 32 07 00 00
+
+. 1597 8054280 76
+. 55 89 E5 57 56 53 81 EC C4 00 00 00 B9 04 00 00 00 31 D2 8B 45 1C F7 F1 89 45 E4 31 D2 8B 45 1C F7 75 18 89 45 E0 31 D2 8B 45 18 F7 F1 C7 45 E8 00 00 00 00 E8 00 00 00 00 5B 81 C3 5B CE 00 00 39 45 E8 89 45 DC 0F 83 B2 00 00 00
+***** EXPENSIVE 1599 257
+
+. 1598 80542CC 151
+. 8B 55 E0 D1 E2 89 55 D8 31 FF 89 F6 8D 14 BD 00 00 00 00 8B 4D D8 8B 75 08 8D 44 11 FF 0F AF 55 0C DD 04 D6 8B 55 0C 0F AF D0 8D 0C 01 8D 04 C5 08 00 00 00 DD 04 D6 0F AF 45 0C 0F AF 4D 0C 8B 55 E4 DD 04 CE DD 04 30 D9 C3 8D 04 3A 8D 14 02 D8 C2 D9 CC 8B 4D E4 01 D1 DE E2 D9 CA D8 C0 D9 C3 8B 75 14 0F AF F7 89 8D 34 FF FF FF D8 C1 8B 4D 10 DD 1C F1 D9 CA D8 C0 D9 C1 8B B5 34 FF FF FF 0F AF 75 14 0F AF 45 14 0F AF 55 14 D8 E1 D9 CC DE E3 DE C1 D9 CA
+***** EXPENSIVE 1600 258
+
+. 1599 8054363 27
+. DD 1C C1 FF 45 E8 DD 1C D1 DD 1C F1 8B 75 DC 03 7D E0 39 75 E8 0F 82 5A FF FF FF
+
+. 1600 805437E 10
+. 83 7D E0 01 0F 84 09 04 00 00
+
+. 1601 8054791 11
+. 81 C4 C4 00 00 00 5B 5E 5F C9 C3
+
+. 1602 8053B4E 2
+. EB C5
+
+. 1603 8053B15 8
+. 83 C4 30 E9 76 FF FF FF
+
+. 1604 8053A93 15
+. FF 45 D8 8B 55 D4 39 55 D8 0F 82 62 FF FF FF
+
+. 1605 805B153 9
+. 83 FA 04 0F 84 A1 00 00 00
+
+. 1606 805B1FD 48
+. 8B 75 14 8B 94 BE 08 01 00 00 C1 E0 03 8D 0C 10 83 EC 0C 8D 34 08 56 51 52 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 A7 06 00 00
+
+. 1607 805B8D4 68
+. 55 89 E5 57 56 53 81 EC B0 00 00 00 8B 45 1C 89 C2 C1 EA 02 89 55 E4 31 D2 F7 75 18 8B 4D 18 C7 45 E8 00 00 00 00 C1 E9 02 E8 00 00 00 00 5B 81 C3 12 58 00 00 39 45 E8 89 45 E0 89 4D DC 0F 83 E8 00 00 00
+***** EXPENSIVE 1609 259
+
+. 1608 805B918 172
+. D1 E1 89 CF 89 4D D0 4F C7 85 50 FF FF FF 00 00 00 00 C7 85 4C FF FF FF 00 00 00 00 8B 95 4C FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 0F AF 85 4C FF FF FF 89 B5 44 FF FF FF 8B 75 08 D9 04 86 0F AF 55 0C 0F AF 4D 0C D9 04 96 D9 04 8E D9 CA D9 5D D8 8B B5 44 FF FF FF 0F AF 75 0C 8B 45 08 D9 45 D8 D9 04 B0 D9 C9 D8 C3 8B 45 D0 D9 C2 D8 C2 D9 CB 8D 14 38 DE E2 8B 45 14 D9 C0 8B 4D 10 0F AF 85 50 FF FF FF D8 C3 D9 1C 81 D9 CB 8B 45 14 0F AF C7 D8 6D D8 D9 14 81 D9 C9 8D 04 BD 04 00 00 00 0F AF 45 14 D9 E0
+***** EXPENSIVE 1610 260
+
+. 1609 805B9C4 60
+. D9 1C 08 D9 C9 0F AF 55 14 DE EA D9 C9 8B 45 18 D9 14 91 D9 C9 03 7D 18 D9 5D D8 D9 5D D4 01 85 50 FF FF FF 8B 55 DC FF 45 E8 8B 4D E0 01 95 4C FF FF FF 39 4D E8 0F 82 34 FF FF FF
+
+. 1610 805BA00 10
+. 83 7D DC 01 0F 84 5F 04 00 00
+
+. 1611 805BE69 11
+. 81 C4 B0 00 00 00 5B 5E 5F C9 C3
+
+. 1612 805B22D 2
+. EB C9
+
+. 1613 805B1F8 5
+. 83 C4 30 EB 89
+
+. 1614 805B186 10
+. 47 3B 7D F0 0F 82 70 FF FF FF
+
+. 1615 805625A 9
+. 83 F9 04 0F 84 B6 00 00 00
+
+. 1616 8056319 49
+. 8B 4D D8 8B 7D 14 8B 94 8F 08 01 00 00 C1 E0 03 8D 0C 10 83 EC 0C 8D 3C 08 57 51 52 56 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 1E 07 00 00
+
+. 1617 8056A68 76
+. 55 89 E5 57 56 53 81 EC C0 00 00 00 B9 04 00 00 00 31 D2 8B 45 1C F7 F1 89 45 E4 31 D2 8B 45 1C F7 75 18 89 45 E0 31 D2 8B 45 18 F7 F1 C7 45 E8 00 00 00 00 E8 00 00 00 00 5B 81 C3 73 A6 00 00 39 45 E8 89 45 DC 0F 83 B8 00 00 00
+***** EXPENSIVE 1619 261
+
+. 1618 8056AB4 153
+. 8B 55 E0 D1 E2 89 55 D8 31 FF 89 F6 8D 14 BD 00 00 00 00 8B 4D D8 8B 75 08 8D 44 11 FF 0F AF 55 0C D9 04 96 8B 55 0C 0F AF D0 8D 0C 01 8D 04 85 04 00 00 00 0F AF 45 0C D9 04 30 D9 04 96 D9 C9 0F AF 4D 0C 8B 55 E4 8D 04 3A D9 5D D4 D9 C1 D9 04 8E 8D 14 02 DC C1 8B 4D E4 01 D1 DE EB D9 C9 D8 C0 D9 C1 D9 45 D4 8B 75 14 0F AF F7 D8 C0 D9 C9 89 8D 34 FF FF FF D8 C2 8B 4D 10 D9 1C B1 D9 C0 8B B5 34 FF FF FF 0F AF 75 14 0F AF 45 14 D8 EC D9 CB 0F AF 55 14 DE E2
+***** EXPENSIVE 1620 262
+
+. 1619 8056B4D 31
+. DE C3 D9 C9 D9 1C 81 FF 45 E8 D9 1C 91 D9 1C B1 8B 75 DC 03 7D E0 39 75 E8 0F 82 54 FF FF FF
+
+. 1620 8056B6C 10
+. 83 7D E0 01 0F 84 65 04 00 00
+
+. 1621 8056FDB 11
+. 81 C4 C0 00 00 00 5B 5E 5F C9 C3
+
+. 1622 805634A 2
+. EB C5
+
+. 1623 8056311 8
+. 83 C4 30 E9 75 FF FF FF
+
+. 1624 805628E 15
+. FF 45 D8 8B 4D D4 39 4D D8 0F 82 63 FF FF FF
+
+. 1625 804D43C 9
+. 83 F9 05 0F 84 31 01 00 00
+
+. 1626 804D576 64
+. 8B 55 EC 8B 4D 14 8B 94 91 08 01 00 00 C1 E0 04 89 55 E4 01 C2 89 D7 01 C7 89 55 E0 8D 14 38 50 52 57 FF 75 E0 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 AA 08 00 00
+
+. 1627 804DE60 115
+. 55 89 E5 57 56 53 81 EC 54 01 00 00 B9 CD CC CC CC 8B 45 20 F7 E1 C1 EA 02 89 55 DC 8B 45 20 31 D2 F7 75 1C 89 45 D8 8B 45 1C F7 E1 68 2F 1B F4 3F 89 95 D4 FE FF FF C1 EA 02 E8 00 00 00 00 5B 81 C3 75 32 01 00 89 55 D4 68 E0 F0 9C 76 C1 E2 02 89 85 D0 FE FF FF 89 55 D0 8B 75 08 8B 7D 10 C7 45 EC 00 00 00 00 C7 45 E8 00 00 00 00 E8 5D A8 FF FF
+***** EXPENSIVE 1629 263
+
+. 1628 804DED3 20
+. 58 5A 68 2F 1B E4 3F 68 E0 F0 9C 76 DD 5D C8 E8 49 A8 FF FF
+***** EXPENSIVE 1630 264
+
+. 1629 804DEE7 25
+. C7 45 E4 00 00 00 00 8B 55 D8 83 C4 10 39 55 E4 DD 5D C0 0F 83 95 04 00 00
+
+. 1630 804DF00 31
+. C7 85 DC FE FF FF 00 00 00 00 C7 85 D8 FE FF FF 00 00 00 00 8B 45 E4 85 C0 0F 85 80 04 00 00
+***** EXPENSIVE 1632 265
+
+. 1631 804DF1F 71
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C1 D9 C1 D9 CB DD 55 B8 D9 CA DD 55 B0 D9 CA DD 55 A8 D9 CA DD 55 A0 D9 CA DD 5D 98 D9 C9 DD 5D 90 DD 5D 88 DD 5D 80 C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 13 04 00 00
+***** EXPENSIVE 1633 266
+
+. 1632 804DF66 198
+. 8B 55 D4 D1 E2 8B 45 DC 89 95 FC FE FF FF 03 55 D4 D1 E0 89 95 F8 FE FF FF 8B 8D D8 FE FF FF 8B 55 E8 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF DB 45 18 D9 CA 89 85 44 FF FF FF 0F AF 55 14 03 45 DC C1 E1 04 DD 9D E0 FE FF FF DD 9D F0 FE FF FF DD 9D E8 FE FF FF 89 85 40 FF FF FF C1 E2 04 89 8D B4 FE FF FF 89 F6 8B 85 B4 FE FF FF DD 04 06 DD 44 06 08 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 44 FF FF FF 0F AF 45 0C C1 E0 04 DD 44 06 08 D9 C9 DD 9D 48 FF FF FF DD 04 06 8B 45 EC 03 85 40 FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 4D EC 8B 45 DC 8D 04 81
+***** EXPENSIVE 1634 267
+
+. 1633 804E02C 186
+. DD 9D 38 FF FF FF D9 CC 89 85 D0 FE FF FF 0F AF 45 0C DD 9D 58 FF FF FF C1 E0 04 DD 85 38 FF FF FF D9 CB DD 95 50 FF FF FF D9 CD DD 9D 60 FF FF FF D9 CA D8 C1 DD 04 06 DC C5 D9 C9 DD 9D 30 FF FF FF DD 85 50 FF FF FF DD 44 06 08 D9 CE DD 9D C0 FE FF FF DE E1 DD 85 48 FF FF FF D9 C3 D9 CA DD 9D 28 FF FF FF D8 C5 D9 C9 D8 C4 DD 85 C0 FE FF FF DD 85 30 FF FF FF D9 C9 D8 C2 D9 CF DC AD 48 FF FF FF D9 C9 D8 C3 D9 CF DD 9D 18 FF FF FF DD 9D 20 FF FF FF D9 CB DE E4 D9 C9 DC A5 38 FF FF FF D9 CC DD 9D 10 FF FF FF DD 85 E0 FE FF FF D9 FA DC 8D F0 FE FF FF D9 CA
+***** EXPENSIVE 1635 268
+
+. 1634 804E0E6 175
+. DC AD C0 FE FF FF DE CA DD 85 E0 FE FF FF D9 FA D9 C9 DC A5 30 FF FF FF D9 C9 DC 8D F0 FE FF FF DE C9 DD 85 18 FF FF FF DC 8D F0 FE FF FF DC AD 60 FF FF FF DD 85 10 FF FF FF DC 8D F0 FE FF FF D9 C1 D9 C9 DC AD 58 FF FF FF D9 C9 D8 C4 DD 9D 08 FF FF FF D9 C0 D8 C3 D9 C9 DE E3 DD 45 C8 DD 45 C0 D8 CE D9 C9 DC 8D 28 FF FF FF D9 CA DD 9D 00 FF FF FF DE C1 DD 45 C8 DD 45 C0 D8 CF D9 C9 DC 8D 20 FF FF FF D9 CB DE E5 DE C2 DD 45 C0 D9 CC DD 9D B8 FE FF FF D9 CD DC 4D C8 DD 85 60 FF FF FF DD 45 C0 D9 CD DC 8D 20 FF FF FF D9 CE
+***** EXPENSIVE 1636 269
+
+. 1635 804E195 180
+. DC 4D C8 D9 C9 DC 85 18 FF FF FF D9 CD DC 8D 28 FF FF FF D9 CE DE E2 D9 C9 DC 8D E8 FE FF FF D9 CD DE E1 D9 CB DD 9D 60 FF FF FF DD 85 58 FF FF FF DD 85 B8 FE FF FF D9 C9 DC 85 10 FF FF FF D9 C9 D8 E5 D9 CC DC 8D E8 FE FF FF D9 CE DC 8D E8 FE FF FF D9 CA DC 8D E8 FE FF FF D9 C9 DD 9D 58 FF FF FF DD 85 08 FF FF FF DD 85 00 FF FF FF D9 CD DD 9D 78 FF FF FF D9 C3 D8 C7 DD 9D 70 FF FF FF DD 85 B8 FE FF FF DE C6 D9 CB DE E6 DD 85 60 FF FF FF DD 85 58 FF FF FF D9 C9 8B 45 E8 DD 1C 17 DD 5C 17 08 DC EA D9 CB 03 45 D4 D8 C1 DD 45 B8 DD 45 B0
+***** EXPENSIVE 1637 270
+
+. 1636 804E249 157
+. 0F AF 45 14 D8 CA D9 C9 D8 CC D9 CA DC 4D B8 D9 CC DC 4D B0 D9 CA C1 E0 04 DE E1 D9 CB DE C1 D9 CA DD 1C 07 D9 C9 DD 5C 07 08 8B 45 E8 03 85 FC FE FF FF DD 45 A8 DD 45 A0 0F AF 45 14 DC 8D 70 FF FF FF D9 C9 DC 8D 78 FF FF FF C1 E0 04 DE E1 DD 1C 07 DD 45 A8 DD 45 A0 DC 8D 78 FF FF FF D9 C9 DC 8D 70 FF FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 F8 FE FF FF DD 45 98 DD 45 90 0F AF 45 14 D8 CD D9 C9 D8 CC D9 CB C1 E0 04 DC 85 08 FF FF FF D9 CB DE E1 D9 CA DD 9D 68 FF FF FF
+***** EXPENSIVE 1638 271
+
+. 1637 804E2E6 147
+. DD 85 00 FF FF FF D9 CA DD 1C 07 D9 CB DC 4D 98 D9 CA DC 4D 90 DE C2 D9 C9 8B 4D E8 DD 5C 07 08 8B 45 D4 8D 04 81 DE E1 DD 45 88 DD 45 80 D8 CA D9 C9 89 85 D0 FE FF FF DC 8D 68 FF FF FF 0F AF 45 14 C1 E0 04 DE E1 DD 1C 07 DD 45 80 D9 C9 DC 4D 88 D9 C9 DC 8D 68 FF FF FF DE C1 DD 5C 07 08 8B 45 0C C1 E0 04 01 85 B4 FE FF FF 8B 45 0C 41 01 85 D8 FE FF FF 8B 45 14 C1 E0 04 89 4D E8 FF 45 E0 8B 4D D4 FF 45 EC 01 C2 39 4D E0 0F 82 53 FC FF FF
+
+. 1638 804E379 28
+. 8B 45 D0 FF 45 E4 8B 55 D8 01 45 E8 83 85 DC FE FF FF 10 39 55 E4 0F 82 7F FB FF FF
+
+. 1639 804E395 10
+. 8D 65 F4 5B 5E 31 C0 5F C9 C3
+
+. 1640 804D5B6 5
+. E9 C5 FE FF FF
+
+. 1641 80508E0 9
+. 83 F9 05 0F 84 2D 01 00 00
+
+. 1642 8050A16 64
+. 8B 4D 14 8B 55 EC 8B 94 91 08 01 00 00 C1 E0 03 89 55 E4 01 C2 89 D7 01 C7 51 89 55 E0 8D 14 38 52 57 FF 75 E0 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 DA 07 00 00
+
+. 1643 8051230 115
+. 55 89 E5 57 56 53 81 EC F4 00 00 00 B9 CD CC CC CC 8B 45 20 F7 E1 C1 EA 02 89 55 D8 8B 45 20 31 D2 F7 75 1C 89 45 D4 8B 45 1C F7 E1 68 2F 1B F4 3F 89 95 14 FF FF FF C1 EA 02 E8 00 00 00 00 5B 81 C3 A5 FE 00 00 89 55 D0 68 E0 F0 9C 76 C1 E2 02 89 85 10 FF FF FF 89 55 CC 8B 75 08 8B 7D 10 C7 45 E8 00 00 00 00 C7 45 E4 00 00 00 00 E8 8D 74 FF FF
+***** EXPENSIVE 1645 272
+
+. 1644 80512A3 20
+. 58 5A 68 2F 1B E4 3F 68 E0 F0 9C 76 D9 5D C8 E8 79 74 FF FF
+***** EXPENSIVE 1646 273
+
+. 1645 80512B7 25
+. C7 45 E0 00 00 00 00 8B 55 D4 83 C4 10 39 55 E0 D9 5D C4 0F 83 9E 04 00 00
+
+. 1646 80512D0 23
+. C7 85 18 FF FF FF 00 00 00 00 89 F6 8B 45 E0 85 C0 0F 85 91 04 00 00
+***** EXPENSIVE 1648 274
+
+. 1647 80512E7 69
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 55 C0 D9 C9 D9 55 BC D9 C9 D9 55 B8 D9 C9 D9 55 B4 D9 C9 D9 55 B0 D9 C9 D9 55 AC D9 C9 D9 5D A8 D9 5D A4 C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 26 04 00 00
+***** EXPENSIVE 1649 275
+
+. 1648 805132C 190
+. 8B 45 D8 D1 E0 89 85 7C FF FF FF 8B 55 D0 03 45 D8 D1 E2 89 85 74 FF FF FF 8B 45 E8 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF DB 45 18 D9 CA 89 95 34 FF FF FF 8B 4D E4 03 55 D0 0F AF 45 0C DD 9D 28 FF FF FF DD 9D 20 FF FF FF D9 9D 1C FF FF FF 89 95 30 FF FF FF 0F AF 4D 14 89 85 0C FF FF FF 90 8B 45 E8 03 45 D8 0F AF 45 0C D9 04 C6 D9 44 C6 04 D9 C9 8B 45 E8 03 85 7C FF FF FF 0F AF 45 0C D9 5D 84 D9 04 C6 D9 44 C6 04 8B 45 E8 8B 95 0C FF FF FF 03 85 74 FF FF FF D9 04 D6 D9 44 D6 04 D9 C9 0F AF 45 0C D9 5D 8C D9 5D 88 D9 04 C6 D9 44 C6 04 D9 C9 8B 55 E8 8B 45 D8 8D 04 82
+***** EXPENSIVE 1650 276
+
+. 1649 80513EA 177
+. 89 85 10 FF FF FF 0F AF 45 0C D9 9D 78 FF FF FF D9 9D 70 FF FF FF D9 04 C6 D9 44 C6 04 D9 45 84 D9 CD D9 55 80 D9 CD D8 C2 D9 CD D8 C1 D9 CA D8 6D 84 D9 CA D9 9D 6C FF FF FF D9 85 70 FF FF FF D9 85 78 FF FF FF D9 CB D9 9D 68 FF FF FF D9 C9 D8 6D 80 D9 CA D8 C4 D9 C9 D8 C3 D9 CA D9 9D 64 FF FF FF D9 C4 D9 85 6C FF FF FF D9 C9 D8 C2 D9 CD D8 A5 78 FF FF FF D9 CC D8 A5 70 FF FF FF D9 C9 D8 C3 D9 CD D9 9D 58 FF FF FF D9 CB D9 95 60 FF FF FF D9 CB D9 9D 5C FF FF FF D9 CB D9 9D 54 FF FF FF DD 85 28 FF FF FF D9 FA DC 8D 20 FF FF FF
+***** EXPENSIVE 1651 277
+
+. 1650 805149B 186
+. D9 CC DE E3 D9 CB DE CA D9 C9 D9 5D EC D9 45 EC DD 85 28 FF FF FF D9 FA D9 CB D8 AD 6C FF FF FF D9 CB DC 8D 20 FF FF FF D8 CB D9 5D EC DD 85 20 FF FF FF D8 8D 58 FF FF FF D8 6D 8C D9 9D 50 FF FF FF DD 85 20 FF FF FF D8 8D 54 FF FF FF D8 6D 88 D9 9D 4C FF FF FF D9 45 C8 D8 8D 68 FF FF FF D9 9D 38 FF FF FF D9 85 50 FF FF FF D9 45 C4 D9 45 EC D9 CA D8 C3 D9 85 4C FF FF FF D9 CC D8 AD 50 FF FF FF D9 CA D8 CD D8 85 38 FF FF FF D9 CC D8 C3 D9 CA D9 9D 40 FF FF FF D9 85 1C FF FF FF D9 CB D8 AD 4C FF FF FF D9 CB DE CC D9 C9 D9 9D 44 FF FF FF D9 45 C4 D9 45 C8
+***** EXPENSIVE 1652 278
+
+. 1651 8051555 181
+. D9 C9 D8 8D 5C FF FF FF D9 CA D9 9D 48 FF FF FF D9 CA D9 9D 3C FF FF FF D9 C9 D8 8D 64 FF FF FF D9 45 C4 D9 45 C8 D9 CC D9 9D 38 FF FF FF D9 C9 DE C2 D8 8D 68 FF FF FF D9 CA DE CB D9 45 C4 D9 45 C8 D8 8D 5C FF FF FF D9 CB DE E4 D8 8D 64 FF FF FF D9 45 8C D9 C9 DE E3 D8 85 58 FF FF FF D9 C9 D8 8D 1C FF FF FF D9 CB D8 8D 1C FF FF FF D9 CA D8 8D 1C FF FF FF D9 CC D9 9D 6C FF FF FF D9 5D 8C D9 85 48 FF FF FF D8 E2 D9 5D A0 D9 45 88 D9 85 40 FF FF FF D9 C9 D8 85 54 FF FF FF D9 C9 D8 E4 D9 85 3C FF FF FF D9 CD D8 85 40 FF FF FF D9 CA D9 5D 88
+***** EXPENSIVE 1653 279
+
+. 1652 805160A 152
+. D9 CC D8 C2 D9 85 44 FF FF FF D9 CB D8 AD 3C FF FF FF D9 CA D9 5D 98 D9 45 8C D9 1C CF D9 CA D8 85 38 FF FF FF D9 C9 D9 5D 94 8B 45 E4 D9 45 C0 D9 45 BC 03 45 D0 D8 CA D9 C9 D8 4D A0 D9 CD 0F AF 45 14 D9 5D 9C DE EC D9 45 88 D9 5C CF 04 D9 CB D9 1C C7 D9 45 BC D8 4D A0 D9 CB D8 4D C0 DE C3 D9 CA D9 5C C7 04 D9 45 B8 8B 45 E4 D9 45 B4 D9 CA D8 85 48 FF FF FF D9 CA 03 85 34 FF FF FF D8 CB D9 C9 D8 4D 9C 0F AF 45 14 DE E1 D9 C9 D9 5D 90 D9 85 44 FF FF FF
+***** EXPENSIVE 1654 280
+
+. 1653 80516A2 146
+. D8 A5 38 FF FF FF D9 C9 D9 1C C7 D9 45 B4 D9 CA D8 4D B8 D9 CA D8 4D 9C DE C2 D9 C9 D9 5C C7 04 D9 45 B0 8B 45 E4 D9 45 AC 03 85 30 FF FF FF D8 4D 94 D9 C9 D8 4D 98 0F AF 45 14 DE E1 D9 1C C7 D9 45 B0 D9 45 AC D8 4D 98 D9 C9 D8 4D 94 DE C1 D9 5C C7 04 8B 55 E4 8B 45 D0 D9 45 A8 D9 45 A4 D9 C9 8D 04 82 D8 4D 90 D9 C9 D8 CA 89 85 10 FF FF FF DE E9 0F AF 45 14 D9 1C C7 D9 45 A4 D9 C9 D8 4D A8 D9 C9 D8 4D 90 DE C1 42 D9 5C C7 04 8B 45 0C
+
+. 1654 8051734 30
+. 01 85 0C FF FF FF FF 45 E8 03 4D 14 89 55 E4 FF 45 DC 8B 55 D0 39 55 DC 0F 82 3A FC FF FF
+
+. 1655 8051752 28
+. 8B 45 CC FF 45 E0 8B 55 D4 01 45 E4 83 85 18 FF FF FF 08 39 55 E0 0F 82 6E FB FF FF
+
+. 1656 805176E 10
+. 8D 65 F4 5B 5E 31 C0 5F C9 C3
+
+. 1657 8050A56 5
+. E9 C9 FE FF FF
+
+. 1658 8058B04 5
+. 83 FA 05 74 6B
+
+. 1659 8058B74 52
+. 8B 55 14 8B 8C BA 08 01 00 00 C1 E0 04 8D 14 08 8D 34 10 83 EC 08 8D 04 30 50 56 52 51 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 5C 0C 00 00
+
+. 1660 8059804 77
+. 55 89 E5 57 56 53 81 EC 64 01 00 00 B9 CD CC CC CC 8B 45 1C F7 E1 C1 EA 02 89 55 E4 8B 45 1C 31 D2 F7 75 18 89 45 E0 8B 45 18 F7 E1 68 2F 1B F4 3F C1 EA 02 E8 00 00 00 00 5B 81 C3 D7 78 00 00 68 E0 F0 9C 76 89 55 DC E8 DF EE FE FF
+***** EXPENSIVE 1662 281
+
+. 1661 8059851 20
+. 5F 58 68 2F 1B E4 3F 68 E0 F0 9C 76 DD 5D D0 E8 CB EE FE FF
+***** EXPENSIVE 1663 282
+
+. 1662 8059865 25
+. C7 45 E8 00 00 00 00 8B 55 E0 83 C4 10 39 55 E8 DD 5D C8 0F 83 72 01 00 00
+***** EXPENSIVE 1664 283
+
+. 1663 805987E 178
+. 8B 45 DC D1 E0 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 89 45 C4 48 DD 9D E8 FE FF FF DD 9D E0 FE FF FF 89 85 C4 FE FF FF C7 85 C0 FE FF FF 00 00 00 00 C7 85 BC FE FF FF 00 00 00 00 90 8B 95 BC FE FF FF 03 55 E4 8B 75 E4 8D 0C 16 8D 34 0E 8B 7D E4 01 F7 8B 45 0C 89 BD 9C FE FF FF 0F AF 85 BC FE FF FF 8B 7D 08 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C DD 04 D7 DD 04 CF DD 04 F7 DD 04 C7 8B BD 9C FE FF FF 0F AF 7D 0C 8B 45 08 DD 04 F8 D9 C4 D8 C1 D9 C4 D8 C4 D9 C1 D9 CF DE E3 D9 CD DE E4 D9 CD D8 C4 DD 85 E8 FE FF FF D9 FA D9 CE DE E5
+***** EXPENSIVE 1665 284
+
+. 1664 8059930 157
+. D9 CD DC 8D E0 FE FF FF DE CC DD 85 E0 FE FF FF D8 CD 8B 45 14 8B 4D 10 0F AF 85 C0 FE FF FF D8 EA D9 CA DE C5 D9 CC DD 1C C1 D9 C0 DD 45 D0 D9 C9 8B 45 14 0F AF 85 C4 FE FF FF 8B B5 C4 FE FF FF D8 C4 D9 CA DE E4 D9 E0 DD 45 C8 D9 CA DD 1C C1 D9 C9 D8 CA D9 C9 8D 04 F5 08 00 00 00 D8 CC 0F AF 45 14 DE E1 8B 95 C4 FE FF FF DD 45 C8 D9 C9 03 55 C4 DD 1C 08 D9 E0 8B 45 14 0F AF C2 DE CB DC 4D D0 8D 14 D5 08 00 00 00 0F AF 55 14 DE C2 DD 1C C1 8B 7D 18 DD 1C 0A 8B 45 DC
+
+. 1665 80599CD 35
+. FF 45 E8 8B 55 E0 01 FE 01 BD C0 FE FF FF 01 85 BC FE FF FF 39 55 E8 89 B5 C4 FE FF FF 0F 82 CC FE FF FF
+
+. 1666 80599F0 10
+. 83 7D DC 01 0F 84 CA 06 00 00
+
+. 1667 805A0C4 8
+. 8D 65 F4 5B 5E 5F C9 C3
+***** EXPENSIVE 1669 285
+vex iropt: not unrolling (76 sts)
+
+. 1668 8055C38 88
+. 8B 55 D8 8B 45 08 DD 04 08 8B 32 8B 7A 04 8B 45 0C DD 54 08 08 89 34 08 89 7C 08 04 8B 45 14 2B 45 EC DD 5D E0 0F AF 45 10 80 75 E7 80 C1 E0 04 8B 55 0C DD 45 E0 89 34 02 89 7C 02 04 DD 5C 02 08 8B 45 D4 01 45 D8 FF 45 EC 8B 45 14 2B 45 EC 03 4D DC 39 45 EC 72 A8
+
+. 1669 8053A68 5
+. 83 F9 05 74 73
+
+. 1670 8053AE0 53
+. 8B 75 D8 8B 55 14 8B 8C B2 08 01 00 00 C1 E0 04 8D 14 08 8D 34 10 83 EC 08 8D 04 30 50 56 52 51 57 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 87 0C 00 00
+
+. 1671 805479C 39
+. 55 89 E5 57 56 53 81 EC 64 01 00 00 68 2F 1B F4 3F E8 00 00 00 00 5B 81 C3 62 C9 00 00 68 E0 F0 9C 76 E8 6D 3F FF FF
+***** EXPENSIVE 1673 286
+
+. 1672 80547C3 20
+. 5F 58 68 2F 1B E4 3F 68 E0 F0 9C 76 DD 5D E0 E8 59 3F FF FF
+***** EXPENSIVE 1674 287
+
+. 1673 80547D7 58
+. B9 05 00 00 00 31 D2 8B 45 1C F7 F1 89 45 D4 31 D2 8B 45 1C F7 75 18 89 45 D0 31 D2 8B 45 18 F7 F1 C7 45 E8 00 00 00 00 83 C4 10 39 45 E8 DD 5D D8 89 45 CC 0F 83 67 01 00 00
+***** EXPENSIVE 1675 288
+
+. 1674 8054811 184
+. 8B 45 D0 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 D1 E0 DD 9D F0 FE FF FF DD 9D E8 FE FF FF 89 45 C8 C7 85 D8 FE FF FF 00 00 00 00 C7 85 C8 FE FF FF 00 00 00 00 90 8B 95 C8 FE FF FF 8B 4D C8 8D 44 11 FF 8B 55 0C 8B 75 08 0F AF 95 C8 FE FF FF DD 04 D6 8B 55 0C 8D 0C 01 0F AF D0 8D 04 C5 08 00 00 00 0F AF 45 0C DD 04 30 8B 45 0C 0F AF C1 DD 04 D6 DD 04 C6 D9 C1 8D 0C CD 08 00 00 00 0F AF 4D 0C D8 C1 DD 04 31 D9 C9 D8 C0 DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CC DE E3 D9 CB D8 C0 DD 85 E8 FE FF FF D9 C9 DE CB D8 CB D8 ED D9 C2 D8 C1 DD 45 E0
+***** EXPENSIVE 1676 289
+
+. 1675 80548C9 140
+. D9 CA DE E4 DD 45 D8 D8 CB D9 CA 8B 95 D8 FE FF FF D8 CE 03 55 D4 8B 7D D4 DE C2 DD 45 D8 D9 CB 8D 0C 17 DC 4D E0 D9 CB DE CE D9 CD 8D 34 0F DE E2 01 F7 D8 C0 D9 C9 D8 C0 D9 CD DE C3 D9 C1 D9 C4 8B 45 14 0F AF 85 D8 FE FF FF 0F AF 55 14 0F AF 4D 14 89 BD A4 FE FF FF 0F AF 75 14 8B 7D 10 D8 E2 D9 C9 D8 E6 D9 CB DE C6 D9 CB DD 1C C7 D9 CA DD 1C D7 DD 1C CF D9 CA DD 1C F7 8B BD A4 FE FF FF 0F AF 7D 14 8B 45 10 8B 55 D0
+***** EXPENSIVE 1677 290
+
+. 1676 8054955 35
+. DE C1 DD 1C F8 FF 45 E8 8D 04 92 8B 4D CC 01 85 C8 FE FF FF 01 95 D8 FE FF FF 39 4D E8 0F 82 D0 FE FF FF
+
+. 1677 8054978 10
+. 83 7D D0 01 0F 84 9C 06 00 00
+
+. 1678 805501E 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1679 805B15C 5
+. 83 FA 05 74 63
+
+. 1680 805B1C4 52
+. 8B 4D 14 8B 94 B9 08 01 00 00 C1 E0 03 8D 0C 10 8D 34 08 83 EC 08 8D 04 30 50 56 51 52 FF 75 10 FF 75 EC FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 7C 0C 00 00
+
+. 1681 805BE74 77
+. 55 89 E5 57 56 53 81 EC 84 01 00 00 B9 CD CC CC CC 8B 45 1C F7 E1 C1 EA 02 89 55 E0 8B 45 1C 31 D2 F7 75 18 89 45 DC 8B 45 18 F7 E1 68 2F 1B F4 3F C1 EA 02 E8 00 00 00 00 5B 81 C3 67 52 00 00 68 E0 F0 9C 76 89 55 D8 E8 6F C8 FE FF
+***** EXPENSIVE 1683 291
+
+. 1682 805BEC1 20
+. 5F 58 68 2F 1B E4 3F 68 E0 F0 9C 76 D9 5D D4 E8 5B C8 FE FF
+***** EXPENSIVE 1684 292
+
+. 1683 805BED5 25
+. C7 45 E4 00 00 00 00 8B 55 DC 83 C4 10 39 55 E4 D9 5D D0 0F 83 BD 01 00 00
+***** EXPENSIVE 1685 293
+
+. 1684 805BEEE 182
+. 8B 45 D8 D1 E0 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 89 45 9C 48 DD 9D B8 FE FF FF DD 9D B0 FE FF FF 89 85 94 FE FF FF C7 85 90 FE FF FF 00 00 00 00 C7 85 8C FE FF FF 00 00 00 00 90 8B 95 8C FE FF FF 03 55 E0 8B 75 E0 8D 0C 16 8D 34 0E 8B 7D E0 01 F7 8B 45 0C 89 BD 7C FE FF FF 0F AF 85 8C FE FF FF 8B 7D 08 D9 04 87 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C D9 04 97 D9 04 B7 D9 CA D9 5D CC D9 04 8F 8B BD 7C FE FF FF 0F AF 7D 0C 8B 45 08 D9 5D C4 D9 04 B8 D9 C9 D9 55 C8 D8 C1 D9 5D C0 D8 6D C8 D9 45 C4 D8 C2 D9 C9 D9 5D B8 D9 45 C0 D8 C1 D9 CA
+***** EXPENSIVE 1686 294
+
+. 1685 805BFA4 148
+. D8 6D C4 D9 CA D9 5D B0 D9 5D BC D9 55 B4 DD 85 B8 FE FF FF D9 FA D9 45 C0 D8 65 BC D9 C9 DC 8D B0 FE FF FF DE C9 D9 5D AC DD 85 B0 FE FF FF D8 4D B0 D8 6D CC D9 5D A8 D9 45 A8 D8 45 AC D9 5D A4 D9 45 D4 D9 E0 D8 4D B8 D9 5D A0 D9 45 D0 D8 C9 D9 45 A8 D9 C9 D8 6D A0 D9 C9 D8 65 AC D9 C9 D9 5D A0 D9 45 D0 D9 C9 D9 5D A8 D9 E0 D9 45 D4 D9 C9 D8 4D B8 D9 C9 DE CA DE C1 8B 45 14 D9 45 CC 0F AF 85 90 FE FF FF D8 45 B0 8B 4D 10 D9 1C 81 8B 45 14
+***** EXPENSIVE 1687 295
+
+. 1686 805C038 115
+. 0F AF 85 94 FE FF FF 8B 95 94 FE FF FF D9 45 A4 03 55 9C D9 1C 81 8B 8D 94 FE FF FF 8D 04 8D 04 00 00 00 0F AF 45 14 8B 75 10 D9 45 A0 D9 1C 30 8B 45 14 0F AF C2 D9 45 A8 D9 1C 86 8D 14 95 04 00 00 00 8B 45 18 0F AF 55 14 01 C1 D9 1C 32 89 8D 94 FE FF FF 8B 55 D8 FF 45 E4 8B 4D DC 01 85 90 FE FF FF 01 95 8C FE FF FF 39 4D E4 0F 82 81 FE FF FF
+
+. 1687 805C0AB 10
+. 83 7D D8 01 0F 84 4F 08 00 00
+
+. 1688 805C904 8
+. 8D 65 F4 5B 5E 5F C9 C3
+***** EXPENSIVE 1690 296
+vex iropt: not unrolling (64 sts)
+
+. 1689 80584CC 64
+. 8B 4D 08 8B 14 F1 D9 07 8B 45 0C 89 54 F0 04 D9 14 F0 8B 45 14 2B 45 F0 0F AF 45 10 8B 4D 0C 81 F2 00 00 00 80 FF 45 F0 D9 1C C1 89 54 C1 04 8B 45 14 2B 45 F0 03 75 10 03 7D EC 39 45 F0 72 C0
+
+. 1690 8056263 5
+. 83 F9 05 74 74
+
+. 1691 80562DC 53
+. 8B 4D D8 8B 7D 14 8B 94 8F 08 01 00 00 C1 E0 03 8D 0C 10 8D 3C 08 83 EC 08 8D 04 38 50 57 51 52 56 FF 75 DC FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 D7 0C 00 00
+
+. 1692 8056FE8 39
+. 55 89 E5 57 56 53 81 EC 44 01 00 00 68 2F 1B F4 3F E8 00 00 00 00 5B 81 C3 16 A1 00 00 68 E0 F0 9C 76 E8 21 17 FF FF
+***** EXPENSIVE 1694 297
+
+. 1693 805700F 20
+. 59 5E 68 2F 1B E4 3F 68 E0 F0 9C 76 D9 5D E0 E8 0D 17 FF FF
+***** EXPENSIVE 1695 298
+
+. 1694 8057023 58
+. B9 05 00 00 00 31 D2 8B 45 1C F7 F1 89 45 D8 31 D2 8B 45 1C F7 75 18 89 45 D4 31 D2 8B 45 18 F7 F1 C7 45 E4 00 00 00 00 83 C4 10 39 45 E4 D9 5D DC 89 45 D0 0F 83 8F 01 00 00
+***** EXPENSIVE 1696 299
+
+. 1695 805705D 187
+. 8B 45 D4 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 D1 E0 DD 9D F0 FE FF FF DD 9D E8 FE FF FF 89 45 CC C7 85 D8 FE FF FF 00 00 00 00 C7 85 CC FE FF FF 00 00 00 00 90 8B 95 CC FE FF FF 8B 4D CC 8D 44 11 FF 8B 55 0C 8B 75 08 0F AF 95 CC FE FF FF D9 04 96 8B 55 0C 8D 0C 01 0F AF D0 8D 04 85 04 00 00 00 0F AF 45 0C D9 04 30 8B 45 0C 0F AF C1 D9 04 96 D9 04 86 D9 C1 8D 0C 8D 04 00 00 00 0F AF 4D 0C D8 C1 D9 CC D9 5D C8 D9 04 31 D9 CC D8 C0 DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CB DE E2 D9 CA D8 C0 DE C9 D9 5D EC DD 85 E8 FE FF FF D8 C9 D8 6D C8 D9 5D BC
+***** EXPENSIVE 1697 300
+
+. 1696 8057118 139
+. D9 45 EC D9 45 BC D8 C1 D9 C9 D8 6D BC D9 C9 D9 5D B8 D9 5D B4 D9 45 DC D9 45 E0 D8 CB D9 C9 D8 CC DE C1 D9 45 DC DE CB 8B 95 D8 FE FF FF D9 45 E0 03 55 D8 8B 45 D8 DE CC D9 CA 8D 0C 10 DE E3 D8 45 C8 D9 45 B4 D9 CB 8D 34 08 D8 C0 D9 C9 01 F0 D9 5D C8 D9 C9 D8 C0 D9 45 B8 D9 CB D8 E2 D9 CA 8B 7D 14 D8 45 B4 D9 CB 0F AF BD D8 FE FF FF 0F AF 55 14 0F AF 4D 14 D8 E1 D9 CB D9 5D B4 D8 45 B8 89 85 BC FE FF FF D9 45 C8
+***** EXPENSIVE 1698 301
+
+. 1697 80571A3 73
+. 8B 45 10 D9 1C B8 D9 55 B8 D9 CA D9 1C 90 D9 1C 88 8B BD BC FE FF FF 0F AF 75 14 0F AF 7D 14 8B 55 D4 D9 45 B4 D9 1C B0 FF 45 E4 D9 1C B8 8B 4D D0 8D 04 92 01 85 CC FE FF FF 01 95 D8 FE FF FF 39 4D E4 0F 82 A8 FE FF FF
+
+. 1698 80571EC 10
+. 83 7D D4 01 0F 84 39 07 00 00
+
+. 1699 805792F 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1700 804D445 9
+. 83 F9 06 0F 84 E0 00 00 00
+
+. 1701 804D52E 67
+. 8B 55 14 8B 4D EC 8B 8C 8A 08 01 00 00 C1 E0 04 89 4D E4 01 C1 89 CF 01 C7 8D 14 38 89 4D E0 8D 0C 10 51 52 57 FF 75 E0 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 F3 0E 00 00
+***** EXPENSIVE 1703 302
+
+. 1702 804E464 136
+. 55 89 E5 57 56 53 81 EC 5C 01 00 00 B9 AB AA AA AA 8B 45 20 F7 E1 C1 EA 02 89 55 DC 8B 45 20 31 D2 F7 75 1C 89 45 D8 8B 45 1C F7 E1 89 95 A4 FE FF FF C1 EA 02 E8 00 00 00 00 5B 81 C3 76 2C 01 00 89 55 D4 8D 14 92 8B 75 08 8B 7D 10 C7 45 EC 00 00 00 00 C7 45 E8 00 00 00 00 89 85 A0 FE FF FF 89 55 D0 DD 83 B4 F8 FF FF D9 FA C7 45 E4 00 00 00 00 8B 45 D8 DC 8B BC F8 FF FF 39 45 E4 DD 5D C8 0F 83 BD 04 00 00
+
+. 1703 804E4EC 31
+. C7 85 AC FE FF FF 00 00 00 00 C7 85 A8 FE FF FF 00 00 00 00 8B 4D E4 85 C9 0F 85 AB 04 00 00
+***** EXPENSIVE 1705 303
+
+. 1704 804E50B 82
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C9 DD 55 C0 D9 C9 DD 55 B8 D9 C9 DD 55 B0 D9 C9 DD 55 A8 D9 C9 DD 55 A0 D9 C9 DD 55 98 D9 C9 DD 55 90 D9 C9 DD 55 88 D9 C9 DD 5D 80 DD 9D 78 FF FF FF C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 30 04 00 00
+***** EXPENSIVE 1706 304
+
+. 1705 804E55D 193
+. 8B 55 DC D1 E2 8B 45 DC C1 E0 02 89 95 64 FF FF FF 03 55 DC 89 95 54 FF FF FF 89 85 3C FF FF FF 8B 55 D4 03 45 DC C1 E2 02 89 85 2C FF FF FF 8B 45 D4 D1 E0 DD 45 C8 89 95 B4 FE FF FF 8B 4D E8 03 55 D4 DA 4D 18 89 85 BC FE FF FF 89 95 B0 FE FF FF 03 45 D4 0F AF 4D 14 8B 95 A8 FE FF FF DD 9D 20 FF FF FF 89 85 B8 FE FF FF C1 E1 04 C1 E2 04 89 F6 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 64 FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 D9 CA DD 9D 70 FF FF FF DD 44 06 08 D9 C9 8B 45 EC 03 85 54 FF FF FF 0F AF 45 0C C1 E0 04 DD 9D 68 FF FF FF DD 04 06
+***** EXPENSIVE 1707 305
+
+. 1706 804E61E 186
+. DD 9D 58 FF FF FF DD 44 06 08 8B 45 EC 03 85 3C FF FF FF 0F AF 45 0C C1 E0 04 DD 9D 48 FF FF FF DD 04 06 DD 9D 40 FF FF FF DD 44 06 08 DD 9D 30 FF FF FF 8B 45 EC 03 85 2C FF FF FF DD 85 40 FF FF FF DD 85 30 FF FF FF D9 C9 0F AF 45 0C D8 C3 D9 C9 D8 C2 DD 83 BC F8 FF FF DD 83 BC F8 FF FF D9 C9 C1 E0 04 D8 CB D9 C9 D8 CA D9 CB DC 04 16 D9 CD DC A5 40 FF FF FF D9 CC DC A5 30 FF FF FF DD 04 06 DD 44 06 08 D9 CB DC 2C 16 D9 CD DC 6C 16 08 D9 CE DC 8D 20 FF FF FF D9 CA DC 8D 20 FF FF FF D9 CF DD 9D 18 FF FF FF D9 CB DC 44 16 08 DD 9D 10 FF FF FF D9 C3 D8 E6
+***** EXPENSIVE 1708 306
+
+. 1707 804E6D8 209
+. D9 CC DE C6 DD 85 70 FF FF FF D8 C3 DD 9D 08 FF FF FF DD 85 68 FF FF FF D8 C2 DD 95 00 FF FF FF DC 8B BC F8 FF FF DC AD 48 FF FF FF DD 9D 98 FE FF FF D9 C4 DD 85 58 FF FF FF D9 C9 D8 C2 D9 CE DE E2 DC 85 08 FF FF FF DD 85 08 FF FF FF D9 C9 DD 9D F8 FE FF FF DC 8B BC F8 FF FF DD 85 48 FF FF FF D9 C9 DC AD 58 FF FF FF D9 C9 DC 85 00 FF FF FF D9 CB DC A5 68 FF FF FF DC 8D 20 FF FF FF D9 CB DD 9D F0 FE FF FF D9 C0 D8 E3 D9 CC DC A5 70 FF FF FF D9 C9 DE C3 DC 8D 20 FF FF FF D9 CB DD 9D E8 FE FF FF DD 85 18 FF FF FF DD 85 98 FE FF FF D8 C4 D9 C9 DC 85 F8 FE FF FF DD 1C 0F DD 9D E0 FE FF FF DD 85 E8 FE FF FF DD 85 10 FF FF FF
+***** EXPENSIVE 1709 307
+
+. 1708 804E7A9 168
+. D9 CC DC AD 98 FE FF FF D9 CC DC 85 F0 FE FF FF D9 C9 D8 C5 D9 C9 DD 5C 0F 08 D9 CB DD 9D 98 FE FF FF D9 CA DD 9D D8 FE FF FF DD 85 E0 FE FF FF D8 C4 DD 9D D0 FE FF FF DD 85 18 FF FF FF DC A5 F8 FE FF FF DD 9D C8 FE FF FF DD 85 10 FF FF FF DC A5 F0 FE FF FF D9 C5 8B 45 E8 D8 C2 D9 C9 DD 9D C0 FE FF FF D9 CB DC A5 E8 FE FF FF D9 CC DC A5 E0 FE FF FF D9 CD DE E1 03 45 D4 DD 45 C0 DD 45 B8 0F AF 45 14 D8 CE D9 C9 D8 CD D9 CE DC 4D C0 D9 CD DC 4D B8 D9 CE C1 E0 04 DE E1 D9 CC DE C5 D9 CB DD 1C 07 D9 CB
+***** EXPENSIVE 1710 308
+
+. 1709 804E851 169
+. DD 5C 07 08 DD 85 98 FE FF FF 8B 45 E8 D8 C3 03 85 BC FE FF FF DD 45 B0 DD 45 A8 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA DC 4D B0 D9 CB DC 4D A8 D9 CA C1 E0 04 DE E1 D9 CA DE C1 D9 C9 DD 1C 07 DD 5C 07 08 8B 45 E8 03 85 B8 FE FF FF DD 45 A0 DD 45 98 0F AF 45 14 DC 8D C0 FE FF FF D9 C9 DC 8D C8 FE FF FF C1 E0 04 DE E1 D9 CA DC A5 98 FE FF FF D9 CA DD 1C 07 DD 45 A0 DD 45 98 DC 8D C8 FE FF FF D9 C9 DC 8D C0 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 B4 FE FF FF DD 45 90 DD 45 88 0F AF 45 14 DC 8D D0 FE FF FF
+***** EXPENSIVE 1711 309
+
+. 1710 804E8FA 147
+. D9 C9 DC 8D D8 FE FF FF C1 E0 04 DE E1 DD 1C 07 DD 45 90 DD 45 88 DC 8D D8 FE FF FF D9 C9 DC 8D D0 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 B0 FE FF FF DD 45 80 DD 85 78 FF FF FF D9 C9 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA DC 8D 78 FF FF FF D9 CB DC 4D 80 D9 C9 C1 E0 04 DE E2 DE C2 DD 1C 07 DD 5C 07 08 8B 45 0C C1 E0 04 01 C2 8B 45 0C 01 85 A8 FE FF FF 8B 45 14 C1 E0 04 FF 45 EC 01 C1 FF 45 E8 FF 45 E0 8B 45 D4 39 45 E0 0F 82 43 FC FF FF
+
+. 1711 804E98D 28
+. 8B 55 D0 FF 45 E4 8B 45 D8 01 55 E8 83 85 AC FE FF FF 10 39 45 E4 0F 82 57 FB FF FF
+
+. 1712 804E9A9 13
+. 81 C4 5C 01 00 00 5B 5E 31 C0 5F C9 C3
+
+. 1713 804D571 5
+. E9 0A FF FF FF
+
+. 1714 80508E9 9
+. 83 F9 06 0F 84 DC 00 00 00
+
+. 1715 80509CE 67
+. 8B 7D 14 8B 4D EC 8B 8C 8F 08 01 00 00 C1 E0 03 89 4D E4 01 C1 89 CF 01 C7 8D 14 38 89 4D E0 8D 0C 10 51 52 57 FF 75 E0 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 27 0E 00 00
+***** EXPENSIVE 1717 310
+
+. 1716 8051838 136
+. 55 89 E5 57 56 53 81 EC 0C 01 00 00 B9 AB AA AA AA 8B 45 20 F7 E1 C1 EA 02 89 55 D8 8B 45 20 31 D2 F7 75 1C 89 45 D4 8B 45 1C F7 E1 89 95 EC FE FF FF C1 EA 02 E8 00 00 00 00 5B 81 C3 A2 F8 00 00 89 55 D0 8D 14 92 8B 75 08 8B 7D 10 C7 45 E8 00 00 00 00 C7 45 E4 00 00 00 00 89 85 E8 FE FF FF 89 55 CC DD 83 B4 F8 FF FF D9 FA C7 45 E0 00 00 00 00 8B 45 D4 DC 8B BC F8 FF FF 39 45 E0 D9 5D C8 0F 83 D0 04 00 00
+
+. 1717 80518C0 23
+. C7 85 F4 FE FF FF 00 00 00 00 89 F6 8B 4D E0 85 C9 0F 85 C6 04 00 00
+***** EXPENSIVE 1719 311
+
+. 1718 80518D7 79
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 55 C4 D9 C9 D9 55 C0 D9 C9 D9 55 BC D9 C9 D9 55 B8 D9 C9 D9 55 B4 D9 C9 D9 55 B0 D9 C9 D9 55 AC D9 C9 D9 55 A8 D9 C9 D9 5D A4 D9 5D A0 C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 4E 04 00 00
+***** EXPENSIVE 1720 312
+
+. 1719 8051926 177
+. 8B 55 D8 C1 E2 02 8B 45 D8 D1 E0 89 55 84 03 55 D8 89 45 94 89 95 78 FF FF FF 03 45 D8 8B 55 D0 D9 83 EC F8 FF FF C1 E2 02 89 45 8C 8B 45 D0 D1 E0 D9 9D F8 FE FF FF 89 95 00 FF FF FF D9 45 C8 03 55 D0 DA 4D 18 89 85 08 FF FF FF 89 95 FC FE FF FF 03 45 D0 8B 55 E4 8B 4D E8 D9 9D 5C FF FF FF 89 85 04 FF FF FF 0F AF 55 14 0F AF 4D 0C 8D 76 00 8B 45 E8 03 45 D8 0F AF 45 0C D9 04 C6 D9 44 C6 04 8B 45 E8 03 45 94 0F AF 45 0C D9 04 C6 D9 CA D9 5D 9C D9 44 C6 04 8B 45 E8 03 45 8C 0F AF 45 0C D9 04 C6 D9 CA D9 5D 98 D9 44 C6 04 D9 CA
+***** EXPENSIVE 1721 313
+
+. 1720 80519D7 192
+. 8B 45 E8 03 45 84 0F AF 45 0C D9 5D 90 D9 C9 D9 5D 88 D9 04 C6 D9 44 C6 04 8B 45 E8 03 85 78 FF FF FF 0F AF 45 0C D9 5D 80 D9 04 C6 D9 9D 7C FF FF FF D9 44 C6 04 D9 9D 74 FF FF FF D9 C2 D8 C1 D9 9D 70 FF FF FF D9 45 80 D8 C2 D9 9D 6C FF FF FF D9 85 70 FF FF FF D8 8D F8 FE FF FF D9 04 CE DE E1 D9 85 6C FF FF FF D8 8D F8 FE FF FF D9 9D 60 FF FF FF D9 44 CE 04 D8 A5 60 FF FF FF D9 CC DE E2 D9 04 CE D9 CC D9 9D 64 FF FF FF D9 CB D8 85 70 FF FF FF D9 44 CE 04 D9 CB D8 65 80 D8 8D 5C FF FF FF D9 CB D8 85 6C FF FF FF D9 C9 D9 9D 58 FF FF FF D9 C3 D9 CA D8 8D 5C FF FF FF D9 CA
+***** EXPENSIVE 1722 314
+
+. 1721 8051A97 187
+. D8 E3 D9 C9 D9 9D 54 FF FF FF D9 85 64 FF FF FF D8 C2 D9 C9 D9 9D 50 FF FF FF D9 C9 D8 AD 64 FF FF FF D9 85 7C FF FF FF D9 85 74 FF FF FF D9 85 7C FF FF FF D9 CE DE C5 D9 C9 D8 45 9C D9 C9 D8 45 98 D9 CD D8 65 9C D9 CB D9 9D 4C FF FF FF D9 C9 D9 9D 44 FF FF FF D9 85 F8 FE FF FF D9 85 F8 FE FF FF D9 85 5C FF FF FF DE CC D8 CA D9 C9 D8 CD D9 C9 D8 6D 90 D9 C9 D8 6D 88 D9 CC D9 95 48 FF FF FF D9 CB D9 9D 40 FF FF FF D9 85 74 FF FF FF D8 65 98 D9 85 5C FF FF FF DE C9 D9 9D 3C FF FF FF D9 85 3C FF FF FF D8 E9 D9 9D 38 FF FF FF D9 85 40 FF FF FF D8 C4 D9 45 90
+***** EXPENSIVE 1723 315
+
+. 1722 8051B52 208
+. D9 C9 D9 9D 34 FF FF FF DE C2 D9 85 58 FF FF FF D8 C2 D9 45 88 D9 C9 D9 1C D7 DE C5 D9 85 54 FF FF FF D8 C5 D9 5C D7 04 D9 85 50 FF FF FF D8 85 38 FF FF FF D9 9D 30 FF FF FF D9 85 4C FF FF FF D8 85 34 FF FF FF D9 C9 D8 85 3C FF FF FF D9 C9 D9 9D 2C FF FF FF DC C2 D9 CB D8 A5 40 FF FF FF D9 CA D9 9D 28 FF FF FF D9 85 44 FF FF FF D8 C2 D9 CC D8 AD 54 FF FF FF D9 C9 D8 AD 58 FF FF FF D9 CC D9 9D 24 FF FF FF D9 9D 1C FF FF FF D9 85 4C FF FF FF D9 85 50 FF FF FF D9 CB D8 AD 48 FF FF FF D9 CB D8 A5 38 FF FF FF D9 C9 D8 A5 34 FF FF FF D9 CC D9 95 20 FF FF FF D9 CB D9 9D 10 FF FF FF D9 C9 D8 AD 44 FF FF FF D9 9D 0C FF FF FF
+***** EXPENSIVE 1724 316
+
+. 1723 8051C22 157
+. 8B 45 E4 D9 45 C4 D9 45 C0 03 45 D0 D8 CC D9 C9 D8 CA 0F AF 45 14 DE E1 D9 1C C7 D9 45 C4 DE CB D9 45 C0 DE C9 DE C2 D9 C9 D9 5C C7 04 D9 45 B8 8B 45 E4 D9 45 BC D9 C9 03 85 08 FF FF FF D8 8D 24 FF FF FF D9 C9 D8 8D 28 FF FF FF 0F AF 45 14 DE E1 D9 1C C7 D9 45 B8 D9 45 BC D9 C9 D8 8D 28 FF FF FF D9 C9 D8 8D 24 FF FF FF DE C1 D9 5C C7 04 D9 45 B0 8B 45 E4 D9 45 B4 D9 C9 03 85 04 FF FF FF D8 8D 1C FF FF FF D9 C9 D8 CA 0F AF 45 14 DE E1 D9 1C C7 D9 45 B4 D9 45 B0 D9 C9
+***** EXPENSIVE 1725 317
+
+. 1724 8051CBF 172
+. D8 8D 1C FF FF FF D9 C9 DE CA DE C1 D9 5C C7 04 8B 45 E4 D9 45 AC D9 45 A8 D9 45 AC D9 45 A8 D9 CA 03 85 00 FF FF FF D8 8D 2C FF FF FF D9 CB D8 8D 30 FF FF FF D9 C9 D8 8D 2C FF FF FF D9 CA D8 8D 30 FF FF FF D9 C9 0F AF 45 14 DE E3 DE C1 D9 C9 D9 1C C7 D9 5C C7 04 D9 45 A4 8B 45 E4 D9 45 A0 D9 C9 03 85 FC FE FF FF D8 8D 10 FF FF FF D9 C9 D8 8D 0C FF FF FF 0F AF 45 14 DE E9 D9 1C C7 D9 45 A0 D9 45 A4 D8 8D 0C FF FF FF D9 C9 D8 8D 10 FF FF FF DE C1 D9 5C C7 04 FF 45 DC 8B 45 D0 03 4D 0C FF 45 E8 03 55 14 FF 45 E4
+
+. 1725 8051D6B 9
+. 39 45 DC 0F 82 24 FC FF FF
+
+. 1726 8051D74 28
+. 8B 55 CC FF 45 E0 8B 45 D4 01 55 E4 83 85 F4 FE FF FF 08 39 45 E0 0F 82 3C FB FF FF
+
+. 1727 8051D90 13
+. 81 C4 0C 01 00 00 5B 5E 31 C0 5F C9 C3
+
+. 1728 8050A11 5
+. E9 0E FF FF FF
+
+. 1729 8058725 74
+. 8B 55 CC 8B 45 E0 8B 44 82 08 89 45 C4 8B 45 AC 03 82 08 02 00 00 8B 4D E0 89 84 8A 08 01 00 00 8B 4D D8 89 C8 0F AF 45 C4 89 45 D8 31 D2 8B 45 08 F7 75 D8 C7 45 C8 01 00 00 00 8B 55 C4 39 55 C8 89 45 C0 0F 83 B0 00 00 00
+***** EXPENSIVE 1731 318
+
+. 1730 8058798 57
+. 8B 7D A8 90 8B 45 B0 01 45 B8 8B 45 B8 31 D2 F7 75 08 89 55 B8 31 D2 52 8B 45 B8 50 DF 2C 24 DC 4D D0 83 EC 08 8B 4D CC 8B B1 08 02 00 00 DD 55 98 DD 1C 24 E8 4F FE FE FF
+***** EXPENSIVE 1732 319
+
+. 1731 80587D1 23
+. DD 1C 3E 8B 45 CC DD 45 98 8B B0 08 02 00 00 DD 1C 24 E8 48 FF FE FF
+***** EXPENSIVE 1733 320
+
+. 1732 80587E8 34
+. 8B 45 B4 FF 45 BC D1 E8 DD 5C 3E 08 83 45 AC 10 83 C7 10 83 45 A8 10 FF 45 DC 83 C4 10 39 45 BC 72 92
+***** EXPENSIVE 1734 321
+vex iropt: not unrolling (113 sts)
+
+. 1733 8058F48 127
+. 8B 55 E4 8D 04 3A 01 C2 8B 4D 0C 0F AF CF 89 95 7C FF FF FF 8B 55 08 DD 04 CA 0F AF 45 0C 8B 8D 7C FF FF FF DD 04 C2 0F AF 4D 0C DD 04 CA D9 C1 D8 C1 D9 C3 8B 45 14 0F AF 45 88 8B 4D 10 D8 C1 DD 1C C1 DC 8B BC F8 FF FF 8B 45 14 0F AF C6 DE EB D9 CA DD 1C C1 DE E1 8D 04 F5 08 00 00 00 0F AF 45 14 D8 C9 DD 1C 08 FF 45 E8 8B 45 18 8B 55 E0 03 75 18 01 45 88 03 7D DC 39 55 E8 72 81
+
+. 1734 8058AA8 33
+. 8B 45 EC 8B 75 14 89 C1 8B 54 BE 08 0F AF CA 40 D1 E8 89 4D EC 8B 4D E4 48 85 C9 0F 85 6F 01 00 00
+
+. 1735 8058C38 37
+. 8B 4D E8 89 4D E0 8B 75 08 8B 4D 0C C7 45 DC 01 00 00 00 89 75 D8 89 4D D4 C7 45 E4 00 00 00 00 E9 8C FE FF FF
+
+. 1736 8058AE9 9
+. 83 FA 02 0F 84 1E 01 00 00
+
+. 1737 8058D16 27
+. 8B 55 E4 42 89 D0 C7 45 F4 01 00 00 00 D1 E8 39 45 F4 89 55 E0 0F 83 12 01 00 00
+***** EXPENSIVE 1739 322
+
+. 1738 8058D31 51
+. 8B 4D 20 8B 75 18 83 C1 10 83 EE 02 89 4D D8 89 75 D4 90 8B 45 D8 C7 45 F0 00 00 00 00 8B 55 E8 DD 40 F8 39 55 F0 DD 40 F0 D9 C9 D9 E0 0F 83 6C 01 00 00
+***** EXPENSIVE 1740 323
+
+. 1739 8058D64 147
+. 8B 4D F4 8B 7D D4 8D 74 09 FF 4F 89 F1 8D 76 00 8B 45 EC 01 C8 8B 55 0C 0F AF D1 89 45 C8 8B 45 08 DD 04 D0 8D 04 CD 08 00 00 00 0F AF 45 0C 8B 55 08 DD 04 10 8B 45 0C 0F AF 45 C8 DD 04 C2 8B 45 C8 8D 14 C5 08 00 00 00 0F AF 55 0C 8B 45 08 DD 04 02 D9 C5 D9 C5 D8 CA D9 C9 D8 CB DE E1 D9 C6 DE CA D9 CA D8 CD DE C1 8B 45 14 D9 C3 0F AF C6 8B 55 10 D8 C2 DD 1C C2 D9 C2 8D 04 F5 08 00 00 00 0F AF 45 14 D8 C1 DD 1C 10 D9 CB 8B 45 14 0F AF C7
+***** EXPENSIVE 1741 324
+
+. 1740 8058DF7 47
+. DE E1 DD 1C C2 DE E1 8D 04 FD 08 00 00 00 0F AF 45 14 D9 E0 DD 1C 10 FF 45 F0 8B 45 E8 03 7D 18 03 75 18 03 4D E4 39 45 F0 0F 82 4E FF FF FF
+***** EXPENSIVE 1742 325
+
+. 1741 8058E26 29
+. DD D8 DD D8 8B 45 E0 FF 45 F4 D1 E8 83 45 D8 10 83 6D D4 02 39 45 F4 0F 82 01 FF FF FF
+
+. 1742 8058E43 9
+. F7 45 E4 01 00 00 00 75 7D
+
+. 1743 80536D9 77
+. 8B 45 C4 8B 75 E0 8B 74 B0 08 8B 55 C4 8B 45 A8 03 82 08 02 00 00 89 75 BC 8B 75 E0 89 84 B2 08 01 00 00 8B 45 D8 89 C2 0F AF 55 BC 89 55 D8 89 45 D4 31 D2 8B 45 08 C7 45 C0 01 00 00 00 8B 75 BC F7 75 D8 39 75 C0 0F 83 B8 00 00 00
+
+. 1744 8053F8B 27
+. 8B 55 D8 42 89 D0 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 55 94 0F 83 06 02 00 00
+
+. 1745 80541AC 13
+. F7 45 D8 01 00 00 00 0F 85 B5 00 00 00
+
+. 1746 80541B9 19
+. C7 45 E8 00 00 00 00 8B 75 D4 39 75 E8 0F 83 A2 00 00 00
+***** EXPENSIVE 1748 326
+
+. 1747 80541CC 142
+. 8B 45 D8 48 DD 45 E0 D8 C0 89 45 90 89 C7 89 F6 8B 45 0C 0F AF C7 8B 4D 08 8B 75 D8 DD 04 C1 D1 E6 8D 04 FD 08 00 00 00 8D 14 3E 0F AF 45 0C D9 C1 DC 0C 08 0F AF 55 0C 8B 45 90 DD 04 D1 03 45 DC 8B 55 DC 01 C2 D9 C2 8B 4D 14 D8 C0 D9 CB 0F AF 4D 90 D8 E1 D9 CB DE C1 89 95 74 FF FF FF 8B 55 10 DD 1C CA D9 C1 8B 8D 74 FF FF FF D9 E0 D9 CA 0F AF 4D 14 0F AF 45 14 D8 E1 D9 CA DE E1 D9 C9 DD 1C C2 DD 1C CA 8B 4D D8 01 CE 01 F7
+
+. 1748 805425A 18
+. FF 45 E8 8B 75 D4 01 4D 90 39 75 E8 0F 82 70 FF FF FF
+***** EXPENSIVE 1750 327
+
+. 1749 805426C 13
+. DD D8 81 C4 84 00 00 00 5B 5E 5F C9 C3
+
+. 1750 8053A04 41
+. 8B 55 D8 8B 75 14 8B 4C 96 08 8B 45 DC 0F AF C1 89 45 DC 31 D2 89 F8 F7 75 DC 40 D1 E8 8B 75 D0 48 85 F6 0F 85 7B 01 00 00
+
+. 1751 8053BA8 37
+. 8B 75 F0 89 75 EC 8B 55 08 8B 75 0C C7 45 E4 01 00 00 00 89 55 E8 89 75 E0 C7 45 D0 00 00 00 00 E9 80 FE FF FF
+
+. 1752 8053A4D 9
+. 83 F9 02 0F 84 2A 01 00 00
+***** EXPENSIVE 1754 328
+vex iropt: not unrolling (72 sts)
+
+. 1753 8053C48 74
+. 8B 45 0C 0F AF C1 C1 E0 04 8B 7D 08 DD 04 07 8B 45 0C 0F AF C2 DD 04 38 D9 C1 8B 45 14 0F AF C1 8B 7D 10 D8 C1 DD 1C C7 8B 7D E8 8D 04 0F 0F AF 45 14 DE E9 8B 7D 10 DD 1C C7 FF 45 EC 8B 45 D8 01 F2 03 4D E0 39 45 EC 72 B6
+***** EXPENSIVE 1755 329
+
+. 1754 805AE64 45
+. 03 7D AC 89 F8 31 D2 F7 75 08 89 D7 31 D2 52 57 DF 2C 24 DC 4D C8 83 EC 08 8B 4D C4 8B B1 08 02 00 00 DD 55 98 DD 1C 24 E8 8F D7 FE FF
+***** EXPENSIVE 1756 330
+
+. 1755 805AE91 26
+. 8B 45 D8 D9 1C C6 8B 55 C4 DD 45 98 8B B2 08 02 00 00 DD 1C 24 E8 85 D8 FE FF
+***** EXPENSIVE 1757 331
+
+. 1756 805AEAB 27
+. 8B 45 B0 8B 4D D8 FF 45 B4 D1 E8 D9 5C CE 04 83 C4 10 41 39 45 B4 89 4D D8 72 9E
+
+. 1757 805AEC6 5
+. E9 30 FF FF FF
+***** EXPENSIVE 1759 332
+vex iropt: not unrolling (118 sts)
+
+. 1758 805B580 134
+. 8B 4D E4 8D 04 31 01 C1 8B 55 0C 0F AF D6 0F AF 45 0C 89 4D A0 8B 4D 08 D9 04 91 D9 04 81 8B 4D A0 0F AF 4D 0C 8B 45 08 D9 04 88 D9 C1 8B 55 DC 8D 44 57 FF D8 C1 D9 C3 8B 55 14 0F AF D7 8B 4D 10 D8 C1 D9 1C 91 DD 83 BC F8 FF FF 8B 55 14 0F AF D0 DE C9 D9 CA DE E1 D9 C9 8D 04 85 04 00 00 00 0F AF 45 14 DE EA D8 CA D9 C9 D9 1C 91 FF 45 E8 D9 1C 08 8B 45 E0 03 7D 18 03 75 DC 39 45 E8 0F 82 7A FF FF FF
+
+. 1759 805B100 33
+. 8B 45 EC 8B 75 14 89 C1 8B 54 BE 08 0F AF CA 40 D1 E8 89 4D EC 8B 4D E4 48 85 C9 0F 85 63 01 00 00
+
+. 1760 805B284 37
+. 8B 4D E8 89 4D E0 8B 75 08 8B 4D 0C C7 45 DC 01 00 00 00 89 75 D8 89 4D D4 C7 45 E4 00 00 00 00 E9 98 FE FF FF
+
+. 1761 805B141 9
+. 83 FA 02 0F 84 12 01 00 00
+
+. 1762 805B362 27
+. 8B 55 E4 42 89 D0 C7 45 F4 01 00 00 00 D1 E8 39 45 F4 89 55 E0 0F 83 0F 01 00 00
+***** EXPENSIVE 1764 333
+
+. 1763 805B37D 51
+. 8B 4D 20 8B 45 18 83 C1 08 83 E8 02 89 4D DC 89 45 D8 90 8B 55 DC C7 45 F0 00 00 00 00 8B 4D E8 D9 42 FC 39 4D F0 D9 42 F8 D9 C9 D9 E0 0F 83 5E 01 00 00
+***** EXPENSIVE 1765 334
+
+. 1764 805B3B0 146
+. 8B 45 F4 8B 7D D8 8D 74 00 FF 4F 89 F1 8D 76 00 8B 55 EC 01 CA 8B 45 0C 0F AF C1 89 55 D0 8B 55 08 D9 04 82 8D 04 8D 04 00 00 00 0F AF 45 0C D9 04 10 8B 45 0C 0F AF 45 D0 D9 04 82 8B 45 D0 8D 14 85 04 00 00 00 0F AF 55 0C 8B 45 08 D9 04 02 D9 C5 D9 C5 D8 CA D9 C9 D8 CB DE E1 D9 C6 DE CA D9 CA D8 CD DE C1 8B 45 14 D9 C3 0F AF C6 8B 55 10 D8 C2 D9 1C 82 D9 C2 8D 04 B5 04 00 00 00 0F AF 45 14 D8 C1 D9 1C 10 D9 CB 8B 45 14 0F AF C7 DE E1
+***** EXPENSIVE 1766 335
+
+. 1765 805B442 45
+. D9 1C 82 DE E1 8D 04 BD 04 00 00 00 0F AF 45 14 D9 E0 D9 1C 10 FF 45 F0 8B 45 E8 03 7D 18 03 75 18 03 4D E4 39 45 F0 0F 82 51 FF FF FF
+***** EXPENSIVE 1767 336
+
+. 1766 805B46F 29
+. DD D8 DD D8 8B 45 E0 FF 45 F4 D1 E8 83 45 DC 08 83 6D D8 02 39 45 F4 0F 82 04 FF FF FF
+
+. 1767 805B48C 9
+. F7 45 E4 01 00 00 00 75 72
+
+. 1768 8056775 27
+. 8B 55 DC 42 89 D0 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 55 AC 0F 83 13 02 00 00
+
+. 1769 80569A3 13
+. F7 45 DC 01 00 00 00 0F 85 B0 00 00 00
+
+. 1770 80569B0 19
+. C7 45 E8 00 00 00 00 8B 75 D8 39 75 E8 0F 83 9D 00 00 00
+***** EXPENSIVE 1772 337
+
+. 1771 80569C3 137
+. 8B 45 DC 48 D9 45 E4 D8 C0 89 45 A8 89 C7 8D 76 00 8B 45 0C 0F AF C7 8B 4D 08 8B 75 DC D9 04 81 D1 E6 8D 04 BD 04 00 00 00 8D 14 3E 0F AF 45 0C D9 C1 D8 0C 08 0F AF 55 0C 8B 45 A8 D9 04 91 03 45 E0 8B 55 E0 01 C2 D9 C2 8B 4D 14 D8 C0 D9 CB 0F AF 4D A8 D8 E1 D9 CB DE C1 89 55 94 8B 55 10 D9 1C 8A D9 C1 8B 4D 94 D9 E0 D9 CA 0F AF 4D 14 0F AF 45 14 D8 E1 D9 CA DE E1 D9 C9 D9 1C 82 D9 1C 8A 8B 4D DC 01 CE 01 F7
+
+. 1772 8056A4C 18
+. FF 45 E8 8B 75 D8 01 4D A8 39 75 E8 0F 82 76 FF FF FF
+***** EXPENSIVE 1774 338
+
+. 1773 8056A5E 10
+. DD D8 83 C4 60 5B 5E 5F C9 C3
+
+. 1774 8056200 40
+. 8B 45 D8 8B 55 14 8B 4C 82 08 8B 7D DC 0F AF F9 31 D2 89 F0 F7 F7 40 D1 E8 8B 55 D0 48 85 D2 89 7D DC 0F 85 78 01 00 00
+
+. 1775 80563A0 37
+. 8B 7D F0 89 7D EC 8B 55 08 8B 7D 0C C7 45 E4 01 00 00 00 89 55 E8 89 7D E0 C7 45 D0 00 00 00 00 E9 83 FE FF FF
+
+. 1776 8056248 9
+. 83 F9 02 0F 84 27 01 00 00
+***** EXPENSIVE 1778 339
+vex iropt: not unrolling (76 sts)
+
+. 1777 8056440 71
+. 8B 45 0C 0F AF C1 8B 7D 08 D9 04 C7 8B 45 0C 0F AF C2 D9 04 38 D9 C1 8B 45 14 0F AF C1 8B 7D 10 D8 C1 D9 1C 87 8B 7D E8 8D 04 0F 0F AF 45 14 DE E9 8B 7D 10 D9 1C 87 FF 45 EC 8B 45 D8 01 F2 03 4D E0 39 45 EC 72 B9
+
+. 1778 804D44E 9
+. 83 F9 07 0F 84 85 00 00 00
+
+. 1779 804D4DC 74
+. 8B 4D 14 8B 55 EC 8B 94 91 08 01 00 00 C1 E0 04 89 55 E4 01 C2 89 D7 01 C7 89 55 E0 8D 14 38 8D 0C 10 83 EC 0C 8D 04 08 50 51 52 57 FF 75 E0 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 82 15 00 00
+
+. 1780 804EAA8 160
+. 55 89 E5 57 56 53 81 EC 84 02 00 00 B8 25 49 92 24 F7 65 20 89 85 98 FD FF FF 8B 45 20 29 D0 89 95 9C FD FF FF D1 E8 03 85 9C FD FF FF C1 E8 02 89 45 DC 31 D2 8B 45 20 F7 75 1C 89 45 D8 B8 25 49 92 24 F7 65 1C 8B 4D 1C 29 D1 89 95 94 FD FF FF D1 E9 03 8D 94 FD FF FF 89 4D D4 C1 6D D4 02 8B 55 D4 68 1F B9 EC 3F 89 85 90 FD FF FF 8D 04 52 E8 00 00 00 00 5B 81 C3 F6 25 01 00 D1 E0 68 40 A1 BB 3B 89 45 D0 8B 75 08 8B 7D 10 C7 45 EC 00 00 00 00 C7 45 E8 00 00 00 00 E8 D8 9A FF FF
+***** EXPENSIVE 1782 340
+
+. 1781 804EB48 20
+. 58 5A 68 1F B9 FC 3F 68 40 A1 BB 3B DD 5D C8 E8 C4 9A FF FF
+***** EXPENSIVE 1783 341
+
+. 1782 804EB5C 20
+. 59 58 68 D7 8A 05 40 68 F0 B8 CC 6C DD 5D C0 E8 B0 9A FF FF
+***** EXPENSIVE 1784 342
+
+. 1783 804EB70 20
+. 58 5A 68 1F B9 EC 3F 68 40 A1 BB 3B DD 5D B8 E8 AC 9B FF FF
+***** EXPENSIVE 1785 343
+
+. 1784 804EB84 20
+. 59 58 68 1F B9 FC 3F 68 40 A1 BB 3B DD 5D B0 E8 98 9B FF FF
+***** EXPENSIVE 1786 344
+
+. 1785 804EB98 20
+. 58 5A 68 D7 8A 05 40 68 F0 B8 CC 6C DD 5D A8 E8 84 9B FF FF
+***** EXPENSIVE 1787 345
+
+. 1786 804EBAC 25
+. C7 45 E4 00 00 00 00 8B 45 D8 83 C4 10 39 45 E4 DD 5D A0 0F 83 21 09 00 00
+
+. 1787 804EBC5 54
+. 8B 55 D4 C1 E2 02 89 95 FC FD FF FF 03 55 D4 89 95 F8 FD FF FF C7 85 A4 FD FF FF 00 00 00 00 C7 85 A0 FD FF FF 00 00 00 00 89 F6 8B 45 E4 85 C0 0F 85 F5 08 00 00
+***** EXPENSIVE 1789 346
+
+. 1788 804EBFB 103
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C1 DD 55 98 DD 5D 88 D9 C0 DD 55 90 DD 5D 80 D9 C9 DD 95 78 FF FF FF D9 C1 DD 9D 70 FF FF FF DD 95 68 FF FF FF D9 C1 DD 9D 60 FF FF FF DD 95 58 FF FF FF D9 C1 DD 9D 50 FF FF FF DD 9D 48 FF FF FF DD 9D 40 FF FF FF C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 68 08 00 00
+***** EXPENSIVE 1790 347
+
+. 1789 804EC62 171
+. DD 45 C8 DC 45 C0 8B 45 DC DD 9D E0 FD FF FF D1 E0 DD 45 C0 D8 C0 89 85 2C FF FF FF 03 45 DC DD 83 B4 F8 FF FF D9 C9 DD 9D D8 FD FF FF 89 85 1C FF FF FF DD 45 B0 D9 C9 D1 E0 DD 9D A8 FD FF FF D8 C0 DD 45 B0 89 85 04 FF FF FF 8B 45 18 DD 83 34 F6 FF FF D9 C9 DC 45 A8 D9 CA DD 9D D0 FD FF FF 8B 55 DC DD 45 A0 D9 C9 F7 D8 50 DD 9D B0 FD FF FF D9 C9 DD 9D C8 FD FF FF DD 45 C8 DD 45 A8 D9 CA D8 C0 D9 C9 C1 E2 02 D8 C0 D9 C9 DD 9D B8 FD FF FF 89 95 0C FF FF FF DB 04 24 D9 CA 03 55 DC D8 C0 D9 C9 DD 9D E8 FD FF FF
+***** EXPENSIVE 1791 348
+
+. 1790 804ED0D 196
+. 89 95 08 FF FF FF DD 45 B8 D9 C9 DD 9D C0 FD FF FF D9 C9 DD 9D 80 FE FF FF 8B 45 D4 D1 E0 D8 C0 8B 55 E8 89 85 04 FE FF FF 0F AF 55 14 03 45 D4 DC AD E0 FD FF FF 89 85 00 FE FF FF C1 E2 04 D1 E0 8B 8D A0 FD FF FF DC B5 A8 FD FF FF 83 C4 04 89 85 F4 FD FF FF 89 95 98 FD FF FF C1 E1 04 DD 9D 98 FE FF FF 89 F6 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 2C FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 1C FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 D9 CC DD 9D 38 FF FF FF DD 44 06 08 8B 45 EC 03 85 0C FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06
+***** EXPENSIVE 1792 349
+
+. 1791 804EDD1 200
+. D9 CD DD 9D 20 FF FF FF DD 44 06 08 8B 45 EC 03 85 08 FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 D9 CE 8B 45 EC 03 85 04 FF FF FF 0F AF 45 0C C1 E0 04 DD 9D 30 FF FF FF DD 04 06 D9 CB DD 9D 10 FF FF FF D9 CA DD 9D 88 FD FF FF DD 85 38 FF FF FF DC 85 88 FD FF FF DD 44 06 08 D9 C9 DD 9D F8 FE FF FF DD 85 30 FF FF FF D8 C1 DD 9D F0 FE FF FF DC AD 30 FF FF FF DD 85 38 FF FF FF DC A5 88 FD FF FF D9 C9 DD 9D E0 FE FF FF D9 C4 D8 C3 D9 CD DE E3 DD 9D E8 FE FF FF DD 85 10 FF FF FF D9 C3 D9 C9 D8 C2 D9 C9 D8 C6 D9 CA DC A5 10 FF FF FF DD 9D 80 FD FF FF D9 C9 DD 95 D0 FE FF FF DC 85 F0 FE FF FF
+***** EXPENSIVE 1793 350
+
+. 1792 804EE99 164
+. D9 CB DE E5 D9 CA DD 9D C8 FE FF FF DD 85 20 FF FF FF DD 85 80 FD FF FF D9 C9 D8 C6 D9 C9 D8 C5 D9 CE DC A5 20 FF FF FF D9 C0 D9 CF DD 9D 78 FE FF FF DD 85 C8 FE FF FF D9 CF D8 C3 D9 CF DC 44 0E 08 D8 C4 D9 CD DD 95 D8 FE FF FF D9 CF DD 9D 88 FE FF FF D9 CE DC 85 F8 FE FF FF DD 04 0E D8 C1 D9 CD DD 9D B8 FE FF FF DD 85 E0 FD FF FF D9 CD D8 C2 D9 CD DC 45 B8 D9 CD DD 9D C0 FE FF FF D9 CC DC B5 A8 FD FF FF DD 85 C8 FE FF FF D9 C9 DC A5 B0 FD FF FF D9 C9 D8 C4 D9 CD D8 C2 D8 C9 D9 C9 DE CD
+***** EXPENSIVE 1794 351
+
+. 1793 804EF3D 205
+. D9 CC DD 9D 60 FE FF FF DD 85 E8 FD FF FF DC 65 C0 DC 65 B8 DD 85 F8 FE FF FF D9 C9 DC B5 A8 FD FF FF D9 C9 D8 E2 D8 C9 DD 9D B0 FE FF FF DD 85 F0 FE FF FF D8 E4 DE C9 DD 9D A8 FE FF FF DD 45 C8 DC A5 D8 FD FF FF DC 45 B8 DC B5 A8 FD FF FF D9 C9 DC A5 D8 FE FF FF D9 CB DC A5 D0 FE FF FF D9 CB D8 C9 D9 CB DE C9 DD 85 D8 FE FF FF DC A5 F8 FE FF FF DD 85 98 FE FF FF DE C9 DD 85 D0 FE FF FF DC A5 F0 FE FF FF D9 C9 DD 9D A0 FE FF FF DD 85 98 FE FF FF DE C9 DD 85 C8 FD FF FF DC 65 A0 D9 C9 DD 9D 90 FE FF FF DC B5 A8 FD FF FF DD 85 88 FE FF FF D9 C9 DC 8D 80 FE FF FF D9 C9 DC 85 E8 FE FF FF D8 C9 DD 9D 88 FE FF FF
+***** EXPENSIVE 1795 352
+
+. 1794 804F00A 204
+. DD 85 78 FE FF FF DC 85 E0 FE FF FF DE C9 DD 9D 78 FE FF FF DD 85 D0 FD FF FF DC 65 A8 DC 45 A0 DC B5 A8 FD FF FF DD 85 E8 FE FF FF D9 C9 DC 8D 80 FE FF FF D9 C9 D8 E7 D8 C9 DD 9D 70 FE FF FF DD 85 E0 FE FF FF DC A5 80 FD FF FF DE C9 DD 9D 68 FE FF FF DD 45 B0 DC A5 C0 FD FF FF DC 65 A0 DC B5 A8 FD FF FF DD 85 80 FD FF FF D9 C9 DC 8D 80 FE FF FF D9 CF D8 E3 D9 C9 D8 E6 D9 C9 D8 CF D9 C9 DE CF DD 85 C8 FD FF FF DC 85 B8 FD FF FF DC B5 A8 FD FF FF DC 8D 80 FE FF FF D9 CB DC A5 E8 FE FF FF D9 CE DC A5 E0 FE FF FF D9 CE D8 CB D9 CE DE CB DD 85 B8 FE FF FF DC 85 60 FE FF FF DD 9D 60 FE FF FF DD 85 B0 FE FF FF
+***** EXPENSIVE 1796 353
+
+. 1795 804F0D6 236
+. D8 C4 8B 95 B4 FE FF FF 8B 85 B0 FE FF FF 81 F2 00 00 00 80 DD 9D 58 FE FF FF DD 85 A8 FE FF FF 89 85 48 FE FF FF 89 95 4C FE FF FF D8 C2 DD 9D 50 FE FF FF 8B 95 AC FE FF FF DD 85 48 FE FF FF DC A5 A0 FE FF FF D9 CD 81 F2 00 00 00 80 8B 85 A8 FE FF FF DC 85 C0 FE FF FF D9 CC DC AD A0 FE FF FF D9 CA DC AD 90 FE FF FF D9 CD DD 9D 48 FE FF FF 89 85 40 FE FF FF 89 95 44 FE FF FF DD 85 40 FE FF FF DC A5 90 FE FF FF DD 9D 40 FE FF FF DD 85 70 FE FF FF D8 C1 DD 9D 38 FE FF FF DD 85 68 FE FF FF D8 C7 DD 9D 88 FD FF FF DD 85 58 FE FF FF D8 C4 DD 9D 58 FE FF FF D9 C9 D8 C3 DD 85 60 FE FF FF D9 CC DC 85 48 FE FF FF D9 CC DC 85 50 FE FF FF D9 CC DD 9D 48 FE FF FF DD 85 60 FE FF FF D9 CC DD 9D 50 FE FF FF D9 CB
+***** EXPENSIVE 1797 354
+
+. 1796 804F1C2 192
+. DC 85 40 FE FF FF DD 85 38 FE FF FF DC 85 88 FE FF FF D9 C9 DD 9D 40 FE FF FF DD 85 88 FD FF FF DC 85 78 FE FF FF D9 C9 DD 9D 38 FE FF FF DD 85 58 FE FF FF D8 C1 D9 CA D8 EE D9 CF D8 EB D9 C9 DD 9D 88 FD FF FF D9 CD D9 E0 DD 85 50 FE FF FF D9 CB D9 E0 D9 C9 DC A5 70 FE FF FF D9 C9 DC A5 68 FE FF FF D9 CB DC A5 38 FE FF FF D9 CF DC 85 88 FE FF FF D9 CE DC 85 78 FE FF FF D9 C9 DC 85 88 FE FF FF D9 CD DC 85 60 FE FF FF D9 CB DC 85 78 FE FF FF D9 CA DD 9D 30 FE FF FF D9 CE DD 9D 28 FE FF FF DD 85 48 FE FF FF DD 85 40 FE FF FF D9 C9 D8 C2 D9 C9 D8 E5 D9 C9 DD 9D 20 FE FF FF
+***** EXPENSIVE 1798 355
+
+. 1797 804F282 177
+. DD 9D 18 FE FF FF D9 C2 D9 C2 D9 C9 D8 E7 D9 C9 D8 C6 D9 C9 8B 85 98 FD FF FF DD 9D 10 FE FF FF DD 9D 08 FE FF FF DD 85 C0 FE FF FF DD 85 B8 FE FF FF D9 C9 DD 1C 07 DD 5C 07 08 8B 45 E8 03 45 D4 DD 45 98 DD 45 90 0F AF 45 14 DC 8D 28 FE FF FF D9 C9 DC 8D 30 FE FF FF C1 E0 04 DE E1 DD 1C 07 DD 45 90 DD 45 98 D9 C9 DC 8D 30 FE FF FF D9 C9 DC 8D 28 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 04 FE FF FF DD 45 88 DD 45 80 D9 CC DE C7 D9 CB 0F AF 45 14 DC 8D 18 FE FF FF DD 85 40 FE FF FF D9 CC DC 8D 20 FE FF FF D9 CB C1 E0 04 DE E6
+***** EXPENSIVE 1799 356
+
+. 1798 804F333 195
+. D9 CB DE C4 D9 C9 DE E2 DD 85 58 FE FF FF DD 85 50 FE FF FF D9 CA DC AD 48 FE FF FF D9 C9 DC A5 88 FD FF FF D9 CA DC 85 38 FE FF FF D9 CB DD 1C 07 DD 45 88 DD 45 80 DC 8D 20 FE FF FF D9 C9 DC 8D 18 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 00 FE FF FF DD 85 78 FF FF FF DD 85 70 FF FF FF 0F AF 45 14 DC 8D 08 FE FF FF D9 C9 DC 8D 10 FE FF FF C1 E0 04 DE E1 DD 1C 07 DD 85 70 FF FF FF DD 85 78 FF FF FF D9 C9 DC 8D 10 FE FF FF D9 C9 DC 8D 08 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 FC FD FF FF DD 85 68 FF FF FF DD 85 60 FF FF FF 0F AF 45 14 D8 CE D9 C9 D8 CF D9 CE DC 8D 68 FF FF FF
+***** EXPENSIVE 1800 357
+
+. 1799 804F3F6 162
+. D9 CF DC 8D 60 FF FF FF D9 CE C1 E0 04 DE E1 D9 CE DE C5 D9 CD DD 1C 07 D9 CB DD 5C 07 08 8B 45 E8 03 85 F8 FD FF FF DD 85 58 FF FF FF DD 85 50 FF FF FF 0F AF 45 14 D8 CB D9 C9 D8 CD D9 CB DC 8D 58 FF FF FF D9 CD DC 8D 50 FF FF FF D9 CB C1 E0 04 DE E1 D9 CC DE C2 D9 CB DD 1C 07 DD 5C 07 08 8B 45 E8 03 85 F4 FD FF FF DD 85 48 FF FF FF DD 85 40 FF FF FF D9 C9 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA DC 8D 40 FF FF FF D9 CB DC 8D 48 FF FF FF D9 C9 C1 E0 04 DE E2 DE C2 DD 1C 07 DD 5C 07 08
+
+. 1800 804F498 50
+. 8B 45 0C C1 E0 04 01 C1 8B 45 0C 01 85 A0 FD FF FF 8B 45 14 C1 E0 04 FF 45 E0 8B 55 D4 FF 45 EC 01 85 98 FD FF FF FF 45 E8 39 55 E0 0F 82 AA F8 FF FF
+
+. 1801 804F4CA 28
+. 8B 45 D0 FF 45 E4 8B 55 D8 01 45 E8 83 85 A4 FD FF FF 10 39 55 E4 0F 82 0A F7 FF FF
+
+. 1802 804F4E6 10
+. 8D 65 F4 5B 5E 31 C0 5F C9 C3
+
+. 1803 804D526 8
+. 83 C4 40 E9 55 FF FF FF
+
+. 1804 80508F2 9
+. 83 F9 07 0F 84 83 00 00 00
+
+. 1805 805097E 72
+. 8B 55 14 8B 7D EC 8B BC BA 08 01 00 00 C1 E0 03 89 7D E4 01 C7 89 7D E0 01 C7 8D 14 38 8D 0C 10 83 EC 0C 8D 04 08 50 51 52 57 FF 75 E0 FF 75 E4 56 FF 75 E8 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 BE 14 00 00
+
+. 1806 8051E84 160
+. 55 89 E5 57 56 53 81 EC D4 01 00 00 B8 25 49 92 24 F7 65 20 89 85 30 FE FF FF 8B 45 20 29 D0 89 95 34 FE FF FF D1 E8 03 85 34 FE FF FF C1 E8 02 89 45 D8 31 D2 8B 45 20 F7 75 1C 89 45 D4 B8 25 49 92 24 F7 65 1C 8B 4D 1C 29 D1 89 95 2C FE FF FF D1 E9 03 8D 2C FE FF FF 89 4D D0 C1 6D D0 02 8B 55 D0 68 1F B9 EC 3F 89 85 28 FE FF FF 8D 04 52 E8 00 00 00 00 5B 81 C3 1A F2 00 00 D1 E0 68 40 A1 BB 3B 89 45 CC 8B 75 08 8B 7D 10 C7 45 E8 00 00 00 00 C7 45 E4 00 00 00 00 E8 FC 66 FF FF
+***** EXPENSIVE 1808 358
+
+. 1807 8051F24 20
+. 58 5A 68 1F B9 FC 3F 68 40 A1 BB 3B D9 5D C8 E8 E8 66 FF FF
+***** EXPENSIVE 1809 359
+
+. 1808 8051F38 20
+. 59 58 68 D7 8A 05 40 68 F0 B8 CC 6C D9 5D C4 E8 D4 66 FF FF
+***** EXPENSIVE 1810 360
+
+. 1809 8051F4C 20
+. 58 5A 68 1F B9 EC 3F 68 40 A1 BB 3B D9 5D C0 E8 D0 67 FF FF
+***** EXPENSIVE 1811 361
+
+. 1810 8051F60 20
+. 59 58 68 1F B9 FC 3F 68 40 A1 BB 3B D9 5D BC E8 BC 67 FF FF
+***** EXPENSIVE 1812 362
+
+. 1811 8051F74 20
+. 58 5A 68 D7 8A 05 40 68 F0 B8 CC 6C D9 5D B8 E8 A8 67 FF FF
+***** EXPENSIVE 1813 363
+
+. 1812 8051F88 25
+. C7 45 E0 00 00 00 00 8B 45 D4 83 C4 10 39 45 E0 D9 5D B4 0F 83 6D 09 00 00
+
+. 1813 8051FA1 42
+. 8B 55 D0 C1 E2 02 89 95 94 FE FF FF 03 55 D0 89 95 90 FE FF FF C7 85 3C FE FF FF 00 00 00 00 8B 45 E0 85 C0 0F 85 4D 09 00 00
+***** EXPENSIVE 1815 364
+
+. 1814 8051FCB 89
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 55 B0 D9 C9 D9 55 AC D9 C9 D9 55 A8 D9 C9 D9 55 A4 D9 C9 D9 55 A0 D9 C9 D9 55 9C D9 C9 D9 55 98 D9 C9 D9 55 94 D9 C9 D9 55 90 D9 C9 D9 55 8C D9 C9 D9 5D 88 D9 5D 84 C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 CE 08 00 00
+***** EXPENSIVE 1816 365
+
+. 1815 8052024 188
+. 8B 45 D8 D1 E0 89 85 7C FF FF FF 03 45 D8 D9 45 C8 89 85 70 FF FF FF D1 E0 D8 45 C4 89 85 58 FF FF FF 8B 45 18 DD 83 B4 F8 FF FF DD 83 34 F6 FF FF D9 CA D9 9D 88 FE FF FF 8B 55 D8 D9 45 C4 F7 D8 50 D9 45 C8 D9 CA DD 9D 78 FE FF FF D9 CA DD 9D 70 FE FF FF D9 45 C0 D9 CA DD 9D 60 FE FF FF D9 45 BC D9 45 B8 D9 CA C1 E2 02 DD 9D 68 FE FF FF D9 CA DD 9D 58 FE FF FF D9 C9 D8 45 B8 D9 45 BC D9 CA DD 9D 48 FE FF FF D9 45 B4 89 95 68 FF FF FF DB 04 24 D9 CA 03 55 D8 89 95 5C FF FF FF D9 9D 84 FE FF FF D9 CA DD 9D 50 FE FF FF D9 C9 DD 9D 40 FE FF FF DD 9D F0 FE FF FF
+***** EXPENSIVE 1817 366
+
+. 1816 80520E0 193
+. 8B 45 D0 D1 E0 89 85 9C FE FF FF 03 45 D0 89 85 98 FE FF FF 8B 4D E4 D1 E0 8B 55 E8 83 C4 04 89 85 8C FE FF FF 0F AF 4D 14 0F AF 55 0C 8D 76 00 8B 45 E8 03 45 D8 0F AF 45 0C D9 04 C6 D9 44 C6 04 8B 45 E8 03 85 7C FF FF FF 0F AF 45 0C D9 04 C6 D9 44 C6 04 8B 45 E8 03 85 70 FF FF FF 0F AF 45 0C D9 04 C6 D9 44 C6 04 D9 CB 8B 45 E8 03 85 68 FF FF FF 0F AF 45 0C D9 5D 80 D9 9D 74 FF FF FF D9 44 C6 04 D9 04 C6 8B 45 E8 03 85 5C FF FF FF 0F AF 45 0C D9 44 C6 04 D9 CB D9 9D 78 FF FF FF D9 04 C6 D9 CC 8B 45 E8 03 85 58 FF FF FF 0F AF 45 0C D9 9D 6C FF FF FF D9 C9 D9 9D 64 FF FF FF
+***** EXPENSIVE 1818 367
+
+. 1817 80521A1 192
+. D9 C4 D9 04 C6 DC C1 D9 CC D9 9D 60 FF FF FF D9 44 C6 04 D9 C9 D9 9D 54 FF FF FF D9 C4 D8 C1 D9 CD DE E1 D9 85 78 FF FF FF D9 CE DE E4 D9 CD D8 C2 D9 45 80 D8 85 60 FF FF FF D9 C9 D9 9D 40 FF FF FF D9 CA D8 AD 78 FF FF FF D9 CB D9 9D 4C FF FF FF D9 85 64 FF FF FF D9 85 74 FF FF FF D8 C2 D9 CB D9 9D 44 FF FF FF D9 CB D9 9D 38 FF FF FF D8 A5 74 FF FF FF D9 CA D8 A5 6C FF FF FF D9 CB D9 9D 50 FF FF FF D9 C9 D9 9D 30 FF FF FF D9 85 64 FF FF FF D9 CA D9 95 2C FF FF FF D9 85 44 FF FF FF D9 C9 D8 85 38 FF FF FF D9 85 40 FF FF FF D9 CC D8 85 6C FF FF FF D9 CA D8 85 54 FF FF FF
+***** EXPENSIVE 1819 368
+
+. 1818 8052261 185
+. D9 CC D8 85 50 FF FF FF D9 45 80 D9 CA D9 9D 20 FF FF FF D9 44 D6 04 D9 CA D8 A5 60 FF FF FF D9 CB D9 9D 34 FF FF FF D9 CC D9 95 28 FF FF FF D9 85 30 FF FF FF D9 C9 D8 04 D6 D9 CA D8 C5 D9 C9 D8 C3 D9 CA D8 C4 D9 C9 D8 85 34 FF FF FF D9 CE D9 95 48 FF FF FF D9 CB D9 9D 3C FF FF FF D9 C9 D9 9D 24 FF FF FF D9 9D 1C FF FF FF D9 CB D9 9D 18 FF FF FF D9 85 88 FE FF FF D8 45 C0 DD 85 78 FE FF FF DE F9 DC A5 70 FE FF FF D9 85 28 FF FF FF D8 C2 D9 C1 DE C9 D9 9D E0 FE FF FF DD 85 68 FE FF FF D9 CB D8 85 34 FF FF FF D9 CB D8 C0 D9 C9 DE CB DC A5 60 FE FF FF
+***** EXPENSIVE 1820 369
+
+. 1819 805231A 212
+. D9 CA D9 9D DC FE FF FF D9 C9 DC A5 58 FE FF FF D9 85 54 FF FF FF D9 C9 DC B5 78 FE FF FF D9 C9 D8 E2 D8 C9 D9 9D 14 FF FF FF D9 85 50 FF FF FF D8 A5 34 FF FF FF DE C9 D9 9D 10 FF FF FF DD 85 60 FE FF FF D8 C0 DC AD 68 FE FF FF DC 85 58 FE FF FF D9 85 34 FF FF FF D9 C9 DC B5 78 FE FF FF D9 C9 D8 A5 40 FF FF FF D9 C1 DE C9 D9 CA D8 A5 44 FF FF FF D9 CA D9 9D 08 FF FF FF DD 85 58 FE FF FF D9 C9 DE CA D8 C0 D9 C9 D9 9D 0C FF FF FF D8 AD 88 FE FF FF D9 85 44 FF FF FF D9 C9 DC B5 78 FE FF FF D9 C9 D8 A5 54 FF FF FF D9 9D 44 FF FF FF D9 C0 D8 8D 44 FF FF FF D9 9D 04 FF FF FF D9 85 40 FF FF FF D8 A5 50 FF FF FF DE C9 D9 9D 00 FF FF FF
+***** EXPENSIVE 1821 370
+
+. 1820 80523EE 218
+. D9 85 84 FE FF FF D8 65 B4 DD 85 78 FE FF FF DE F9 DC 8D F0 FE FF FF D9 85 24 FF FF FF D8 85 4C FF FF FF D9 C1 DE C9 D9 9D FC FE FF FF D9 85 20 FF FF FF D8 C2 DE C9 DD 85 50 FE FF FF D8 C0 DC A5 48 FE FF FF DC 85 40 FE FF FF D9 C9 D9 9D EC FE FF FF DC B5 78 FE FF FF D9 85 4C FF FF FF D9 C9 DC 8D F0 FE FF FF D9 C9 D8 A5 30 FF FF FF D8 C9 D9 9D E8 FE FF FF D9 C1 D8 A5 2C FF FF FF DE C9 D9 9D E4 FE FF FF DD 85 48 FE FF FF D8 C0 DC AD 50 FE FF FF DC A5 40 FE FF FF D9 85 30 FF FF FF D9 C9 DC B5 78 FE FF FF DC 8D F0 FE FF FF D9 C9 D8 A5 3C FF FF FF D9 9D 30 FF FF FF D9 85 2C FF FF FF D9 C1 D8 8D 30 FF FF FF D9 C9 D8 A5 38 FF FF FF D9 C9 D9 5D EC DE C9
+***** EXPENSIVE 1822 371
+
+. 1821 80524C8 214
+. D9 45 EC D9 C9 D9 5D EC DD 85 40 FE FF FF D8 C0 D8 85 84 FE FF FF DC B5 78 FE FF FF DC 8D F0 FE FF FF D9 85 3C FF FF FF D8 A5 4C FF FF FF D9 C1 DE C9 D9 85 1C FF FF FF D8 85 E0 FE FF FF D9 85 38 FF FF FF DE E5 D9 9D E0 FE FF FF D9 85 14 FF FF FF D9 CA DE CC D9 C9 D8 85 0C FF FF FF D9 85 18 FF FF FF D8 85 DC FE FF FF D9 C9 D9 9D D8 FE FF FF D9 85 04 FF FF FF D8 A5 0C FF FF FF D9 C9 D9 9D DC FE FF FF D9 85 10 FF FF FF D8 85 08 FF FF FF D9 C9 D9 9D D0 FE FF FF D9 85 10 FF FF FF D9 45 EC D9 CA D9 9D D4 FE FF FF D9 CA D9 5D EC D9 85 00 FF FF FF D9 85 14 FF FF FF D9 CB D9 E0 D9 45 EC D9 CA D8 A5 08 FF FF FF D9 CE D9 5D EC D8 A5 00 FF FF FF
+***** EXPENSIVE 1823 372
+
+. 1822 805259E 217
+. D9 CB D9 E0 D9 45 EC D9 CE D9 9D CC FE FF FF D8 A5 04 FF FF FF D9 CB D9 9D C8 FE FF FF D9 85 E8 FE FF FF D8 C4 D9 9D C4 FE FF FF D9 85 E0 FE FF FF D8 85 D8 FE FF FF D9 9D D8 FE FF FF D9 85 DC FE FF FF D8 85 D4 FE FF FF D9 9D D4 FE FF FF D9 85 E0 FE FF FF D8 85 D0 FE FF FF D9 9D D0 FE FF FF D9 85 DC FE FF FF D8 85 CC FE FF FF D9 9D CC FE FF FF D9 85 DC FE FF FF D8 85 C8 FE FF FF D9 85 E4 FE FF FF D9 C9 D9 9D C8 FE FF FF D9 85 C4 FE FF FF D8 85 FC FE FF FF D9 C9 D8 C3 D9 C9 D9 9D C4 FE FF FF D8 85 EC FE FF FF D9 85 D8 FE FF FF D8 C1 D9 CD D8 EA D9 CB D8 EE D9 CD D9 9D C0 FE FF FF D9 C9 D9 E0 D9 85 D4 FE FF FF D9 CE D9 E0 D9 C9 D8 A5 E8 FE FF FF
+***** EXPENSIVE 1824 373
+
+. 1823 8052677 192
+. D9 C9 D8 A5 E4 FE FF FF D9 CE D8 A5 C4 FE FF FF D9 CB D8 85 FC FE FF FF D9 C9 D8 85 FC FE FF FF D9 CE D8 85 EC FE FF FF D9 CC D8 85 E0 FE FF FF D9 CD D8 85 EC FE FF FF D9 CB D9 9D BC FE FF FF D9 85 C8 FE FF FF D8 E6 D9 9D B8 FE FF FF D9 85 D0 FE FF FF D8 E3 D9 CB D8 85 D0 FE FF FF D9 CA D8 AD D8 FE FF FF D9 CB D9 9D B4 FE FF FF D9 C9 D9 9D AC FE FF FF D9 85 CC FE FF FF D9 85 CC FE FF FF D9 C9 D8 C2 D9 C9 DE E2 D9 C4 D9 85 C8 FE FF FF D9 CC D9 9D A4 FE FF FF D9 85 D4 FE FF FF D9 CE D8 E5 D9 C9 DE C5 D9 CB DE C6 D9 CC D8 85 C4 FE FF FF D9 85 1C FF FF FF D9 85 18 FF FF FF
+***** EXPENSIVE 1825 374
+
+. 1824 8052737 168
+. D9 C9 D9 1C CF D9 5C CF 04 D9 CC D9 9D B0 FE FF FF D9 CB D9 9D A0 FE FF FF 8B 45 E4 D9 45 B0 D9 45 AC D9 45 B0 D9 45 AC D9 CA 03 45 D0 D8 8D BC FE FF FF D9 CA D8 8D C0 FE FF FF D9 CB D8 8D C0 FE FF FF D9 C9 D8 8D BC FE FF FF D9 C9 0F AF 45 14 DE E2 DE C2 D9 1C C7 D9 5C C7 04 D9 45 A8 8B 45 E4 D9 45 A4 03 85 9C FE FF FF D8 8D B8 FE FF FF D9 C9 D8 CB 0F AF 45 14 DE E1 D9 C9 D9 95 A8 FE FF FF D9 C9 D9 1C C7 D9 45 A8 D8 8D B8 FE FF FF D9 CA D8 4D A4 DE C2 D9 45 9C D9 45 A0 D9 C9 D8 8D B0 FE FF FF D9 C9
+***** EXPENSIVE 1826 375
+
+. 1825 80527DF 163
+. D8 8D B4 FE FF FF D9 CB D9 54 C7 04 D9 CB DE E1 8B 45 E4 D9 45 A0 D9 45 9C 03 85 98 FE FF FF D8 8D B4 FE FF FF D9 C9 D8 8D B0 FE FF FF 0F AF 45 14 DE C1 D9 C9 D9 1C C7 D9 5C C7 04 D9 45 98 8B 45 E4 D9 45 94 D9 45 94 D9 C9 03 85 94 FE FF FF D8 CD D9 CA D8 8D AC FE FF FF D9 CD D8 4D 98 D9 C9 D8 8D AC FE FF FF D9 CD 0F AF 45 14 DE E2 DE C4 D9 1C C7 D9 CA D9 5C C7 04 D9 45 90 8B 45 E4 D9 45 8C 03 85 90 FE FF FF D8 CC D9 C9 D8 CB 0F AF 45 14 DE E1 D9 C9 D9 9D B8 FE FF FF D9 1C C7 D9 45 8C
+***** EXPENSIVE 1827 376
+
+. 1826 8052882 112
+. DE C9 D9 C9 D8 4D 90 DE C1 D9 45 84 D9 45 88 D9 C9 D8 8D A0 FE FF FF D9 C9 D8 8D A4 FE FF FF D9 CA D9 5C C7 04 DE E9 8B 45 E4 D9 45 88 D9 45 84 D9 C9 03 85 8C FE FF FF D8 8D A0 FE FF FF D9 C9 D8 8D A4 FE FF FF 0F AF 45 14 DE C1 D9 C9 D9 1C C7 D9 5C C7 04 FF 45 DC 8B 45 D0 03 55 0C FF 45 E8 03 4D 14 FF 45 E4 39 45 DC 0F 82 1E F8 FF FF
+
+. 1827 80528F2 28
+. 8B 55 CC FF 45 E0 8B 45 D4 01 55 E4 83 85 3C FE FF FF 08 39 45 E0 0F 82 B2 F6 FF FF
+
+. 1828 805290E 10
+. 8D 65 F4 5B 5E 31 C0 5F C9 C3
+
+. 1829 80509C6 8
+. 83 C4 40 E9 59 FF FF FF
+
+. 1830 804CD19 13
+. 8D 76 00 31 D2 89 C8 F7 F6 85 D2 74 0F
+
+. 1831 804CD26 15
+. 89 F6 83 C6 02 31 D2 89 C8 F7 F6 85 D2 75 F3
+vex iropt: 4 x unrolling (16 sts -> 64 sts)
+
+. 1832 804CD28 13
+. 83 C6 02 31 D2 89 C8 F7 F6 85 D2 75 F3
+
+. 1833 804CD35 20
+. 89 C8 31 D2 F7 F6 8B 55 14 89 34 BA 47 83 F8 01 89 C1 75 D3
+
+. 1834 8058B09 34
+. 8B 4D 14 FF B4 B9 08 01 00 00 FF 75 10 FF 75 EC 52 FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 A1 15 00 00
+***** EXPENSIVE 1836 377
+
+. 1835 805A0CC 90
+. 55 89 E5 57 56 53 81 EC 1C 01 00 00 8B 45 20 31 D2 F7 75 18 89 45 DC 31 D2 8B 45 20 F7 75 1C 89 45 D8 31 D2 8B 45 1C F7 75 18 31 D2 52 89 45 D4 8B 45 18 50 E8 00 00 00 00 5B 81 C3 0F 70 00 00 DF 2C 24 DC BB D4 F5 FF FF 83 EC 08 DD 14 24 DD 9D E8 FE FF FF E8 FA E4 FE FF
+***** EXPENSIVE 1837 378
+
+. 1836 805A126 17
+. DD 85 E8 FE FF FF DD 1C 24 DD 5D C8 E8 F9 E5 FE FF
+***** EXPENSIVE 1838 379
+
+. 1837 805A137 25
+. C7 45 E0 00 00 00 00 8B 55 D8 83 C4 10 39 55 E0 DD 5D C0 0F 83 B9 01 00 00
+***** EXPENSIVE 1839 380
+
+. 1838 805A150 184
+. 8B 45 D4 D1 E0 DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C9 DD 9D 40 FF FF FF DD 9D 38 FF FF FF C7 85 10 FF FF FF 00 00 00 00 C7 85 0C FF FF FF 00 00 00 00 C7 85 08 FF FF FF 00 00 00 00 C7 85 04 FF FF FF 00 00 00 00 89 85 18 FF FF FF 89 85 14 FF FF FF 8B 45 14 8B 8D 0C FF FF FF 8B BD 10 FF FF FF DD 85 40 FF FF FF DD 85 38 FF FF FF D9 C9 0F AF 85 04 FF FF FF 8B 95 08 FF FF FF 49 4F DD 5D B8 DD 5D B0 C7 45 D0 00 00 00 00 89 45 A8 8B 75 18 89 55 AC 89 8D E4 FE FF FF 89 BD 00 FF FF FF 8B 45 D0 DD 85 38 FF FF FF 85 C0 D9 C0 DD 85 40 FF FF FF D9 C2 74 30
+
+. 1839 805A238 11
+. 31 C9 3B 4D 18 0F 83 01 07 00 00
+***** EXPENSIVE 1841 381
+
+. 1840 805A243 21
+. 8B 55 AC 89 F6 8B 45 0C 0F AF C2 8B 7D 08 85 C9 DD 04 C7 74 24
+***** EXPENSIVE 1842 382
+
+. 1841 805A27C 25
+. D9 C2 41 D8 C9 D9 C9 03 55 DC D8 CA D9 CD 3B 4D 18 DE C1 D9 CB DE C4 73 08
+***** EXPENSIVE 1843 383
+
+. 1842 805A295 8
+. D9 CA D9 CB D9 CA EB AB
+***** EXPENSIVE 1844 384
+
+. 1843 805A248 16
+. 8B 45 0C 0F AF C2 8B 7D 08 85 C9 DD 04 C7 74 24
+***** EXPENSIVE 1845 385
+
+. 1844 805A258 61
+. DD 45 B8 D8 CB DD 45 B0 DD 45 B0 D9 C9 D8 CC D9 C9 DE CD D9 CB DC 4D B8 D9 C9 DE E3 DE C3 D9 C9 D9 CA D9 C9 D9 C2 41 D8 C9 D9 C9 03 55 DC D8 CA D9 CD 3B 4D 18 DE C1 D9 CB DE C4 73 08
+***** EXPENSIVE 1846 386
+
+. 1845 805A29D 15
+. DD D8 DD D8 8B 45 D0 85 C0 0F 85 4D 06 00 00
+***** EXPENSIVE 1847 387
+
+. 1846 805A2AC 48
+. DD D9 8B 45 A8 8B 55 10 DD 1C C2 8B 8D 18 FF FF FF 8B BD 14 FF FF FF 4E FF 45 D0 01 8D 00 FF FF FF 01 BD E4 FE FF FF 39 75 D0 0F 86 15 FF FF FF
+***** EXPENSIVE 1848 388
+
+. 1847 805A1F1 23
+. 8B 45 D0 DD 85 38 FF FF FF 85 C0 D9 C0 DD 85 40 FF FF FF D9 C2 74 30
+***** EXPENSIVE 1849 389
+
+. 1848 805A208 59
+. DD 45 B8 DD 45 B8 DD 45 B0 DD 45 B0 D9 CB D9 E0 D9 CA DC 4D C8 D9 C9 DC 4D C0 D9 CA DC 4D C0 D9 CB DC 4D C8 D9 C9 DE C2 DE C2 DD 5D B8 DD 5D B0 31 C9 3B 4D 18 0F 83 01 07 00 00
+
+. 1849 805A8F9 5
+. 39 75 D0 73 29
+***** EXPENSIVE 1851 390
+
+. 1850 805A8FE 41
+. 8B 45 14 0F AF 85 E4 FE FF FF 8B 4D 10 8B BD E4 FE FF FF DD 1C C1 8D 04 FD 08 00 00 00 0F AF 45 14 DD 1C 08 E9 90 F9 FF FF
+
+. 1851 805A2B7 37
+. 8B 8D 18 FF FF FF 8B BD 14 FF FF FF 4E FF 45 D0 01 8D 00 FF FF FF 01 BD E4 FE FF FF 39 75 D0 0F 86 15 FF FF FF
+
+. 1852 805A2DC 45
+. 8B 45 1C 8B 55 D4 FF 45 E0 8B 4D D8 01 85 10 FF FF FF 01 85 0C FF FF FF 01 95 08 FF FF FF 01 85 04 FF FF FF 39 4D E0 0F 82 9A FE FF FF
+
+. 1853 805A309 10
+. 83 7D D4 01 0F 84 05 05 00 00
+
+. 1854 805A818 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1855 8053A6D 35
+. 8B 75 D8 8B 45 14 FF B4 B0 08 01 00 00 57 FF 75 DC 51 FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 98 15 00 00
+***** EXPENSIVE 1857 391
+
+. 1856 8055028 90
+. 55 89 E5 57 56 53 81 EC 0C 01 00 00 8B 45 20 31 D2 F7 75 18 89 45 DC 31 D2 8B 45 20 F7 75 1C 89 45 D8 31 D2 8B 45 1C F7 75 18 31 D2 52 89 45 D4 8B 45 18 50 E8 00 00 00 00 5B 81 C3 B3 C0 00 00 DF 2C 24 DC BB D4 F5 FF FF 83 EC 08 DD 14 24 DD 9D F8 FE FF FF E8 9E 35 FF FF
+***** EXPENSIVE 1858 392
+
+. 1857 8055082 17
+. DD 85 F8 FE FF FF DD 1C 24 DD 5D C8 E8 9D 36 FF FF
+***** EXPENSIVE 1859 393
+
+. 1858 8055093 25
+. C7 45 E0 00 00 00 00 8B 55 D4 83 C4 10 39 55 E0 DD 5D C0 0F 83 6C 01 00 00
+***** EXPENSIVE 1860 394
+
+. 1859 80550AC 88
+. 8B 45 D8 DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C9 D1 E0 DD 9D 60 FF FF FF DD 9D 58 FF FF FF C7 85 10 FF FF FF 00 00 00 00 C7 85 0C FF FF FF 00 00 00 00 89 85 18 FF FF FF C7 45 D0 00 00 00 00 8B 55 18 39 55 D0 DD 85 60 FF FF FF DD 85 58 FF FF FF 0F 83 40 07 00 00
+***** EXPENSIVE 1861 395
+
+. 1860 8055104 51
+. 0F AF 55 D8 8B 8D 0C FF FF FF 0F AF 55 E0 C7 85 14 FF FF FF 00 00 00 00 89 4D BC 89 55 B4 8B 55 D0 DD 85 58 FF FF FF 85 D2 DD 85 60 FF FF FF D9 C1 74 24
+
+. 1861 805515B 53
+. 8B 45 BC 0F AF 45 D8 0F AF 45 0C 89 45 B8 8B 45 18 0F AF 45 D8 0F AF 45 E0 8D 48 FF 8B 45 D8 8B 7D B4 D1 E0 31 F6 4F 89 85 1C FF FF FF 85 F6 0F 85 40 06 00 00
+***** EXPENSIVE 1863 396
+
+. 1862 8055190 49
+. D9 C1 8B 55 B8 8B 45 08 DD 85 58 FF FF FF D9 C9 DC 0C D0 D9 C9 D8 CA DE E9 DE C3 46 8B 45 18 29 F0 03 8D 1C FF FF FF 03 BD 18 FF FF FF 39 C6 76 C7
+
+. 1863 8055188 8
+. 85 F6 0F 85 40 06 00 00
+***** EXPENSIVE 1865 397
+
+. 1864 80557D0 35
+. D9 C4 D9 C4 D8 CA D9 C9 D8 CB DE E1 8B 45 18 D9 C5 DE CA D9 CA D8 CC 29 F0 DE C1 39 C6 D9 C1 D9 C1 74 39
+***** EXPENSIVE 1866 398
+
+. 1865 80557F3 57
+. D9 CB 8D 04 CD 08 00 00 00 0F AF 45 0C 8B 55 0C 0F AF D1 89 85 F4 FE FF FF 8B 45 08 DC 0C D0 D9 CA 8B 95 F4 FE FF FF DC 0C 02 DE EA D9 C9 D8 C0 DE C3 D9 C9 E9 7F F9 FF FF
+
+. 1866 80551AB 22
+. 46 8B 45 18 29 F0 03 8D 1C FF FF FF 03 BD 18 FF FF FF 39 C6 76 C7
+***** EXPENSIVE 1868 399
+
+. 1867 80551C1 50
+. DD D8 DD D8 8B 85 10 FF FF FF 03 85 14 FF FF FF 0F AF 45 14 8B 4D 10 DD 1C C1 8B 7D DC FF 45 D0 8B 45 18 01 BD 14 FF FF FF 39 45 D0 0F 82 2F FF FF FF
+***** EXPENSIVE 1869 400
+
+. 1868 8055122 21
+. 8B 55 D0 DD 85 58 FF FF FF 85 D2 DD 85 60 FF FF FF D9 C1 74 24
+***** EXPENSIVE 1870 401
+vex iropt: fold_Expr: no rule for: 32to1(0x1:I32)
+
+. 1869 8055137 89
+. DD 45 C8 D8 CD DD 45 C0 DD 45 C0 D9 C9 D8 CE D9 C9 DE CF D9 CD DC 4D C8 D9 C9 DE E5 DE C5 D9 CB D9 CC D9 CB 8B 45 BC 0F AF 45 D8 0F AF 45 0C 89 45 B8 8B 45 18 0F AF 45 D8 0F AF 45 E0 8D 48 FF 8B 45 D8 8B 7D B4 D1 E0 31 F6 4F 89 85 1C FF FF FF 85 F6 0F 85 40 06 00 00
+***** EXPENSIVE 1871 402
+
+. 1870 80551F3 37
+. DD D8 DD D8 8B 55 D8 8B 4D 18 FF 45 E0 8B 7D D4 01 95 10 FF FF FF 01 8D 0C FF FF FF 39 7D E0 0F 82 CD FE FF FF
+
+. 1871 8055218 10
+. 83 7D D8 01 0F 84 25 05 00 00
+
+. 1872 8055747 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1873 805B161 34
+. 8B 75 14 FF B4 BE 08 01 00 00 FF 75 10 FF 75 EC 52 FF 75 D4 FF 75 D8 FF 75 DC FF 75 E0 E8 89 17 00 00
+***** EXPENSIVE 1875 403
+
+. 1874 805C90C 90
+. 55 89 E5 57 56 53 81 EC DC 00 00 00 8B 45 20 31 D2 F7 75 18 89 45 D8 31 D2 8B 45 20 F7 75 1C 89 45 D4 31 D2 8B 45 1C F7 75 18 31 D2 52 89 45 D0 8B 45 18 50 E8 00 00 00 00 5B 81 C3 CF 47 00 00 DF 2C 24 DC BB D4 F5 FF FF 83 EC 08 DD 14 24 DD 9D 28 FF FF FF E8 BA BC FE FF
+***** EXPENSIVE 1876 404
+
+. 1875 805C966 17
+. DD 85 28 FF FF FF DD 1C 24 D9 5D C8 E8 B9 BD FE FF
+***** EXPENSIVE 1877 405
+
+. 1876 805C977 25
+. C7 45 DC 00 00 00 00 8B 55 D4 83 C4 10 39 55 DC D9 5D C4 0F 83 B1 01 00 00
+***** EXPENSIVE 1878 406
+
+. 1877 805C990 175
+. 8B 45 D0 D1 E0 D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 5D 80 D9 9D 7C FF FF FF C7 85 50 FF FF FF 00 00 00 00 C7 85 4C FF FF FF 00 00 00 00 C7 85 48 FF FF FF 00 00 00 00 C7 85 44 FF FF FF 00 00 00 00 89 85 58 FF FF FF 89 85 54 FF FF FF 8B 45 14 8B B5 4C FF FF FF 8B BD 50 FF FF FF D9 45 80 D9 85 7C FF FF FF D9 C9 0F AF 85 44 FF FF FF 8B 95 48 FF FF FF 4E 4F D9 5D C0 D9 5D BC C7 45 CC 00 00 00 00 89 45 B4 8B 4D 18 89 55 B8 89 B5 24 FF FF FF 89 BD 40 FF FF FF 8B 45 CC D9 85 7C FF FF FF 85 C0 D9 C0 D9 45 80 D9 C2 74 30
+
+. 1878 805CA6F 11
+. 31 F6 3B 75 18 0F 83 85 06 00 00
+***** EXPENSIVE 1880 407
+
+. 1879 805CA7A 22
+. 8B 55 B8 8D 76 00 8B 45 0C 0F AF C2 8B 7D 08 85 F6 D9 04 87 74 24
+***** EXPENSIVE 1881 408
+
+. 1880 805CAB4 25
+. D9 C2 46 D8 C9 D9 C9 03 55 D8 D8 CA D9 CD 3B 75 18 DE C1 D9 CB DE C4 73 08
+***** EXPENSIVE 1882 409
+
+. 1881 805CACD 8
+. D9 CA D9 CB D9 CA EB AB
+***** EXPENSIVE 1883 410
+
+. 1882 805CA80 16
+. 8B 45 0C 0F AF C2 8B 7D 08 85 F6 D9 04 87 74 24
+***** EXPENSIVE 1884 411
+
+. 1883 805CA90 61
+. D9 45 C0 D8 CB D9 45 BC D9 45 BC D9 C9 D8 CC D9 C9 DE CD D9 CB D8 4D C0 D9 C9 DE E3 DE C3 D9 C9 D9 CA D9 C9 D9 C2 46 D8 C9 D9 C9 03 55 D8 D8 CA D9 CD 3B 75 18 DE C1 D9 CB DE C4 73 08
+***** EXPENSIVE 1885 412
+
+. 1884 805CAD5 15
+. DD D8 DD D8 8B 45 CC 85 C0 0F 85 D0 05 00 00
+***** EXPENSIVE 1886 413
+
+. 1885 805CAE4 48
+. DD D9 8B 45 B4 8B 55 10 D9 1C 82 8B B5 58 FF FF FF 8B BD 54 FF FF FF 49 FF 45 CC 01 B5 40 FF FF FF 01 BD 24 FF FF FF 39 4D CC 0F 86 17 FF FF FF
+***** EXPENSIVE 1887 414
+
+. 1886 805CA2B 20
+. 8B 45 CC D9 85 7C FF FF FF 85 C0 D9 C0 D9 45 80 D9 C2 74 30
+***** EXPENSIVE 1888 415
+
+. 1887 805CA3F 59
+. D9 45 C0 D9 45 C0 D9 45 BC D9 45 BC D9 CB D9 E0 D9 CA D8 4D C8 D9 C9 D8 4D C4 D9 CA D8 4D C4 D9 CB D8 4D C8 D9 C9 DE C2 DE C2 D9 5D C0 D9 5D BC 31 F6 3B 75 18 0F 83 85 06 00 00
+
+. 1888 805D0B4 5
+. 39 4D CC 73 29
+***** EXPENSIVE 1890 416
+
+. 1889 805D0B9 41
+. 8B 45 14 0F AF 85 24 FF FF FF 8B 75 10 8B BD 24 FF FF FF D9 1C 86 8D 04 BD 04 00 00 00 0F AF 45 14 D9 1C 30 E9 0D FA FF FF
+
+. 1890 805CAEF 37
+. 8B B5 58 FF FF FF 8B BD 54 FF FF FF 49 FF 45 CC 01 B5 40 FF FF FF 01 BD 24 FF FF FF 39 4D CC 0F 86 17 FF FF FF
+
+. 1891 805CB14 45
+. 8B 45 1C 8B 55 D0 FF 45 DC 8B 4D D4 01 85 50 FF FF FF 01 85 4C FF FF FF 01 95 48 FF FF FF 01 85 44 FF FF FF 39 4D DC 0F 82 9F FE FF FF
+
+. 1892 805CB41 10
+. 83 7D D0 01 0F 84 98 04 00 00
+
+. 1893 805CFE3 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 1894 8056268 35
+. 8B 45 D8 8B 55 14 FF B4 82 08 01 00 00 56 FF 75 DC 51 FF 75 E0 FF 75 E8 FF 75 E4 FF 75 EC E8 AD 16 00 00
+***** EXPENSIVE 1896 417
+
+. 1895 8057938 90
+. 55 89 E5 57 56 53 81 EC EC 00 00 00 8B 45 20 31 D2 F7 75 18 89 45 D8 31 D2 8B 45 20 F7 75 1C 89 45 D4 31 D2 8B 45 1C F7 75 18 31 D2 52 89 45 D0 8B 45 18 50 E8 00 00 00 00 5B 81 C3 A3 97 00 00 DF 2C 24 DC BB D4 F5 FF FF 83 EC 08 DD 14 24 DD 9D 18 FF FF FF E8 8E 0C FF FF
+***** EXPENSIVE 1897 418
+
+. 1896 8057992 17
+. DD 85 18 FF FF FF DD 1C 24 D9 5D C8 E8 8D 0D FF FF
+***** EXPENSIVE 1898 419
+
+. 1897 80579A3 25
+. C7 45 DC 00 00 00 00 8B 55 D0 83 C4 10 39 55 DC D9 5D C4 0F 83 59 01 00 00
+***** EXPENSIVE 1899 420
+
+. 1898 80579BC 76
+. 8B 45 D4 D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D1 E0 D9 5D 88 D9 5D 84 C7 85 3C FF FF FF 00 00 00 00 C7 85 38 FF FF FF 00 00 00 00 89 85 44 FF FF FF C7 45 CC 00 00 00 00 8B 55 18 39 55 CC D9 45 88 D9 45 84 0F 83 E0 06 00 00
+***** EXPENSIVE 1900 421
+
+. 1899 8057A08 45
+. 0F AF 55 D4 8B 8D 38 FF FF FF 0F AF 55 DC C7 85 40 FF FF FF 00 00 00 00 89 4D C0 89 55 B8 8B 45 CC D9 45 84 85 C0 D9 45 88 D9 C1 74 24
+
+. 1900 8057A59 55
+. 8B 45 C0 0F AF 45 D4 0F AF 45 0C 89 45 BC 8B 45 18 0F AF 45 D4 0F AF 45 DC 8D 48 FF 8B 45 D4 8B 7D B8 D1 E0 31 F6 4F 89 85 48 FF FF FF 89 F6 85 F6 0F 85 E4 05 00 00
+***** EXPENSIVE 1902 422
+
+. 1901 8057A90 46
+. D9 C1 8B 55 BC 8B 45 08 D9 45 84 D9 C9 D8 0C 90 D9 C9 D8 CA DE E9 DE C3 46 8B 45 18 29 F0 03 8D 48 FF FF FF 03 BD 44 FF FF FF 39 C6 76 CA
+
+. 1902 8057A88 8
+. 85 F6 0F 85 E4 05 00 00
+***** EXPENSIVE 1904 423
+
+. 1903 8058074 35
+. D9 C4 D9 C4 D8 CA D9 C9 D8 CB DE E1 8B 45 18 D9 C5 DE CA D9 CA D8 CC 29 F0 DE C1 39 C6 D9 C1 D9 C1 74 39
+***** EXPENSIVE 1905 424
+
+. 1904 8058097 57
+. D9 CB 8D 04 8D 04 00 00 00 0F AF 45 0C 8B 55 0C 0F AF D1 89 85 14 FF FF FF 8B 45 08 D8 0C 90 D9 CA 8B 95 14 FF FF FF D8 0C 02 DE EA D9 C9 D8 C0 DE C3 D9 C9 E9 D8 F9 FF FF
+
+. 1905 8057AA8 22
+. 46 8B 45 18 29 F0 03 8D 48 FF FF FF 03 BD 44 FF FF FF 39 C6 76 CA
+***** EXPENSIVE 1907 425
+
+. 1906 8057ABE 50
+. DD D8 DD D8 8B 85 3C FF FF FF 03 85 40 FF FF FF 0F AF 45 14 8B 4D 10 D9 1C 81 8B 7D D8 FF 45 CC 8B 45 18 01 BD 40 FF FF FF 39 45 CC 0F 82 36 FF FF FF
+***** EXPENSIVE 1908 426
+
+. 1907 8057A26 15
+. 8B 45 CC D9 45 84 85 C0 D9 45 88 D9 C1 74 24
+***** EXPENSIVE 1909 427
+vex iropt: fold_Expr: no rule for: 32to1(0x1:I32)
+
+. 1908 8057A35 91
+. D9 45 C8 D8 CD D9 45 C4 D9 45 C4 D9 C9 D8 CE D9 C9 DE CF D9 CD D8 4D C8 D9 C9 DE E5 DE C5 D9 CB D9 CC D9 CB 8B 45 C0 0F AF 45 D4 0F AF 45 0C 89 45 BC 8B 45 18 0F AF 45 D4 0F AF 45 DC 8D 48 FF 8B 45 D4 8B 7D B8 D1 E0 31 F6 4F 89 85 48 FF FF FF 89 F6 85 F6 0F 85 E4 05 00 00
+***** EXPENSIVE 1910 428
+
+. 1909 8057AF0 37
+. DD D8 DD D8 8B 55 D4 8B 4D 18 FF 45 DC 8B 7D D0 01 95 3C FF FF FF 01 8D 38 FF FF FF 39 7D DC 0F 82 DA FE FF FF
+
+. 1910 8057B15 10
+. 83 7D D4 01 0F 84 D2 04 00 00
+
+. 1911 8057FF1 8
+. 8D 65 F4 5B 5E 5F C9 C3
+***** EXPENSIVE 1913 429
+
+. 1912 804CF5C 53
+. 8B 75 AC 01 75 B0 8B 45 B0 31 D2 F7 75 08 89 55 B0 31 D2 52 8B 45 B0 50 DF 2C 24 DC 4D C8 83 EC 08 8B 45 C4 8B B0 08 02 00 00 DD 55 88 DD 1C 24 E8 8F B6 FF FF
+
+. 1913 804CEE1 80
+. 8B 45 C4 8B 75 E0 8B 74 B0 08 8B 55 C4 8B 45 A8 03 82 08 02 00 00 89 75 BC 8B 75 E0 89 84 B2 08 01 00 00 8B 45 D8 89 C2 0F AF 55 BC 89 55 D8 89 45 D4 31 D2 8B 45 08 F7 75 D8 C7 45 C0 01 00 00 00 8B 55 BC 39 55 C0 89 45 B8 0F 83 AF 00 00 00
+
+. 1914 804DB5C 11
+. 8B 45 E4 85 C0 0F 85 64 02 00 00
+
+. 1915 804DDCB 6
+. 83 7D 18 FF 74 4E
+***** EXPENSIVE 1917 430
+
+. 1916 804DE1F 65
+. 8B 85 54 FF FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 28 DD 5D C8 DD 5D C0 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 2C DD 5D B8 DD 44 02 F0 D9 C9 DD 5D B0 DD 5D A8 DD 44 02 F8 E9 2C FD FF FF
+***** EXPENSIVE 1918 431
+
+. 1917 804DB8C 22
+. DD 5D A0 C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 00 02 00 00
+
+. 1918 804D3DC 37
+. 8B 45 EC 8B 55 14 8B 4C 82 08 8B 45 E8 0F AF C1 89 45 E8 31 D2 89 F0 F7 75 E8 8B 55 DC 85 D2 0F 85 5F 02 00 00
+
+. 1919 804D660 37
+. 8B 55 D8 89 55 D4 8B 55 08 89 55 CC 8B 55 0C C7 45 D0 01 00 00 00 89 55 C8 C7 45 DC 00 00 00 00 E9 9C FD FF FF
+
+. 1920 804D421 9
+. 83 F9 02 0F 84 06 02 00 00
+***** EXPENSIVE 1922 432
+
+. 1921 804D74C 130
+. 8B 45 F0 03 45 E4 8B 7D 08 0F AF 45 0C DD 04 17 DD 44 17 08 C1 E0 04 DD 04 07 DD 44 07 08 D9 C3 D9 C3 D9 C9 8B 45 10 D8 C3 D9 C9 D8 C2 D9 C9 DD 1C 08 DD 5C 08 08 D9 CB 8B 45 EC DE E1 D9 C9 DE E2 03 45 CC D9 C3 D9 C3 D9 C9 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA D8 CC D9 CB D8 CD D9 C9 C1 E0 04 8B 7D 10 DE E2 DE C2 DD 1C 07 DD 5C 07 08 46 8B 45 0C 03 55 D8 01 45 D0 FF 45 F0 03 4D DC FF 45 EC
+
+. 1922 804D7CE 9
+. 3B 75 CC 0F 82 75 FF FF FF
+***** EXPENSIVE 1924 433
+
+. 1923 804DDD1 78
+. 8B 85 54 FF FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 8B 55 28 DD 5D C0 DD 5D C8 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 2C DD 5D B0 DD 44 02 F8 D9 C9 DD 5D B8 DD 5D A0 DD 44 02 F0 80 75 C7 80 80 75 B7 80 DD 5D A8 80 75 A7 80 E9 70 FD FF FF
+
+. 1924 804DB8F 19
+. C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 00 02 00 00
+
+. 1925 8050F94 11
+. 8B 45 E8 85 C0 0F 85 02 02 00 00
+
+. 1926 80511A1 6
+. 83 7D 18 FF 74 4B
+***** EXPENSIVE 1928 434
+
+. 1927 80511F2 62
+. 8B 45 80 8B 55 24 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 28 D9 5D D0 D9 5D CC D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 2C D9 5D C8 D9 44 02 F8 D9 C9 D9 5D C4 D9 5D C0 D9 44 02 FC E9 94 FD FF FF
+***** EXPENSIVE 1929 435
+
+. 1928 8050FC4 22
+. D9 5D BC C7 45 E4 00 00 00 00 8B 45 D8 39 45 E4 0F 83 A4 01 00 00
+
+. 1929 8050880 37
+. 8B 55 EC 8B 7D 14 8B 4C 97 08 8B 45 E8 0F AF C1 89 45 E8 31 D2 89 F0 8B 7D DC F7 75 E8 85 FF 0F 85 5B 02 00 00
+
+. 1930 8050B00 37
+. 8B 7D D8 89 7D D4 8B 55 08 8B 7D 0C C7 45 D0 01 00 00 00 89 55 CC 89 7D C8 C7 45 DC 00 00 00 00 E9 A0 FD FF FF
+
+. 1931 80508C5 9
+. 83 F9 02 0F 84 02 02 00 00
+***** EXPENSIVE 1933 436
+vex iropt: not unrolling (126 sts)
+
+. 1932 8050BD0 123
+. 8B 45 F0 8B 7D 08 03 45 E4 D9 04 D7 D9 44 D7 04 0F AF 45 0C D9 04 C7 D9 44 C7 04 D9 C3 D9 C3 D9 C9 8B 45 10 D8 C3 D9 C9 D8 C2 D9 C9 D9 1C C8 D9 5C C8 04 D9 CB DE E1 D9 C9 DE E2 8B 45 EC D9 C3 D9 C3 D9 C9 03 45 D8 D8 CA D9 C9 D8 CB D9 CA D8 CC D9 CB D8 CD D9 C9 46 0F AF 45 14 8B 7D 10 DE E2 DE C2 03 55 0C FF 45 F0 03 4D 14 FF 45 EC 3B 75 D8 D9 1C C7 D9 5C C7 04 72 85
+***** EXPENSIVE 1934 437
+
+. 1933 80511A7 75
+. 8B 45 80 8B 55 24 D9 44 02 F8 D9 44 02 FC 8B 55 28 D9 5D CC D9 5D D0 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 2C D9 5D C4 D9 44 02 FC D9 C9 D9 5D C8 D9 5D BC D9 44 02 F8 80 75 CF 80 80 75 C7 80 D9 5D C0 80 75 BF 80 E9 D5 FD FF FF
+
+. 1934 8050FC7 19
+. C7 45 E4 00 00 00 00 8B 45 D8 39 45 E4 0F 83 A4 01 00 00
+***** EXPENSIVE 1936 438
+
+. 1935 8059310 159
+. 8B 95 50 FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 0F AF 85 50 FF FF FF 0F AF 55 0C 0F AF 4D 0C 89 B5 3C FF FF FF 8B 75 08 DD 04 C6 DD 04 D6 DD 04 CE 8B B5 3C FF FF FF 0F AF 75 0C 8B 45 08 D9 C2 DD 04 F0 D9 C9 D8 C2 D9 C3 D8 C2 D9 CC DE E2 8B 45 14 D9 C0 0F AF 85 54 FF FF FF 8B 75 10 D8 C4 DD 1C C6 D9 CC 8B 45 14 0F AF C7 8B 4D D8 DE E2 D9 C9 DD 1C C6 8D 14 39 8D 04 FD 08 00 00 00 0F AF 45 14 0F AF 55 14 D9 E0 D9 CA DE E1 D9 C9 DD 1C 30 DD 1C D6 8B 45 18 8B 55 DC
+
+. 1936 80593AF 30
+. 03 7D 18 01 85 54 FF FF FF 01 95 50 FF FF FF FF 45 E8 8B 4D E0 39 4D E8 0F 82 43 FF FF FF
+
+. 1937 8058E4C 15
+. C7 45 F0 00 00 00 00 8B 55 E8 39 55 F0 73 6E
+***** EXPENSIVE 1939 439
+
+. 1938 8058E5B 110
+. 8B 4D E4 49 89 4D DC 89 CF 8B 45 EC 8D 34 38 8B 45 0C 0F AF C7 8B 55 08 8B 4D 14 DD 04 C2 0F AF 4D DC 8B 55 DC 8B 45 10 DD 1C C8 8D 0C D5 08 00 00 00 0F AF 4D 14 0F AF 75 0C 89 4D CC 8B 4D 08 8B 54 F1 04 8B 04 F1 81 F2 00 00 00 80 8B 75 10 8B 4D CC 89 04 31 89 54 31 04 FF 45 F0 8B 75 18 8B 45 E8 01 75 DC 03 7D E4 39 45 F0 72 9B
+
+. 1939 8054388 28
+. 8B 45 E0 40 C7 45 EC 01 00 00 00 89 85 6C FF FF FF D1 E8 39 45 EC 0F 83 DF 02 00 00
+
+. 1940 8054683 13
+. F7 45 E0 01 00 00 00 0F 85 01 01 00 00
+
+. 1941 8054690 19
+. C7 45 E8 00 00 00 00 8B 55 DC 39 55 E8 0F 83 EE 00 00 00
+***** EXPENSIVE 1943 440
+
+. 1942 80546A3 161
+. 8B 4D E0 8B 75 E0 49 C1 E6 02 DD 83 DC F8 FF FF 89 8D 68 FF FF FF 89 CF 89 B5 64 FF FF FF 8D 76 00 8B 45 E0 8D 14 47 8B 45 0C 8B 4D 08 0F AF C7 DD 04 C1 8D 04 FD 08 00 00 00 0F AF 45 0C DD 04 08 8B 45 0C 0F AF C2 8D 14 D5 08 00 00 00 0F AF 55 0C DD 04 C1 DD 04 0A D9 C4 D9 FA D9 C3 D8 C2 DE C9 D9 C5 D9 FA 8B 95 68 FF FF FF D9 C5 03 55 E4 D8 E4 8B 45 E4 8D 0C 10 DE C9 01 C8 D9 C0 D9 CE DE C4 D9 CA DE E4 D9 CA 8B 75 14 0F AF B5 68 FF FF FF 0F AF 55 14 0F AF 4D 14 89 85 34 FF FF FF
+***** EXPENSIVE 1944 441
+
+. 1943 8054744 75
+. D8 C0 D9 CC 8B 45 10 D8 E2 D9 CB D8 C0 D9 CC DD 1C F0 DE C1 D9 C9 DD 1C D0 D9 C9 8B B5 34 FF FF FF DD 1C C8 8B 55 E0 FF 45 E8 8B 4D DC 0F AF 75 14 D9 E0 01 95 68 FF FF FF 03 BD 64 FF FF FF 39 4D E8 DD 1C F0 0F 82 35 FF FF FF
+***** EXPENSIVE 1945 442
+
+. 1944 805478F 13
+. DD D8 81 C4 C4 00 00 00 5B 5E 5F C9 C3
+***** EXPENSIVE 1946 443
+
+. 1945 805B934 160
+. 8B 95 4C FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 0F AF 85 4C FF FF FF 89 B5 44 FF FF FF 8B 75 08 D9 04 86 0F AF 55 0C 0F AF 4D 0C D9 04 96 D9 04 8E D9 CA D9 5D D8 8B B5 44 FF FF FF 0F AF 75 0C 8B 45 08 D9 45 D8 D9 04 B0 D9 C9 D8 C3 8B 45 D0 D9 C2 D8 C2 D9 CB 8D 14 38 DE E2 8B 45 14 D9 C0 8B 4D 10 0F AF 85 50 FF FF FF D8 C3 D9 1C 81 D9 CB 8B 45 14 0F AF C7 D8 6D D8 D9 14 81 D9 C9 8D 04 BD 04 00 00 00 0F AF 45 14 D9 E0 D9 1C 08 D9 C9 0F AF 55 14 DE EA D9 C9 8B 45 18
+***** EXPENSIVE 1947 444
+
+. 1946 805B9D4 44
+. D9 14 91 D9 C9 03 7D 18 D9 5D D8 D9 5D D4 01 85 50 FF FF FF 8B 55 DC FF 45 E8 8B 4D E0 01 95 4C FF FF FF 39 4D E8 0F 82 34 FF FF FF
+
+. 1947 805B495 15
+. C7 45 F0 00 00 00 00 8B 55 E8 39 55 F0 73 63
+***** EXPENSIVE 1949 445
+
+. 1948 805B4A4 99
+. 8B 7D E4 4F 89 FE 89 F6 8B 45 EC 8B 55 14 0F AF D7 8D 0C 30 8B 45 0C 89 55 D0 0F AF C6 8B 55 08 D9 04 82 8B 45 D0 8B 55 10 D9 1C 82 8D 14 BD 04 00 00 00 0F AF 55 14 0F AF 4D 0C 89 55 D0 8B 55 08 8B 04 8A 35 00 00 00 80 8B 4D 10 8B 55 D0 89 04 0A FF 45 F0 8B 4D E8 03 7D 18 03 75 E4 39 4D F0 72 A5
+
+. 1949 8056B76 28
+. 8B 45 E0 40 C7 45 EC 01 00 00 00 89 85 5C FF FF FF D1 E8 39 45 EC 0F 83 FE 02 00 00
+
+. 1950 8056E90 13
+. F7 45 E0 01 00 00 00 0F 85 3E 01 00 00
+
+. 1951 8056E9D 19
+. C7 45 E8 00 00 00 00 8B 45 DC 39 45 E8 0F 83 2B 01 00 00
+***** EXPENSIVE 1953 446
+
+. 1952 8056EB0 188
+. 8B 55 E0 8B 4D E0 4A C1 E1 02 89 95 58 FF FF FF 89 D7 89 8D 54 FF FF FF 8B 45 0C 8B 4D 08 0F AF C7 D9 04 81 8D 04 BD 04 00 00 00 0F AF 45 0C 8B 75 E0 8D 14 77 D9 04 08 D9 C9 8B 45 0C 0F AF C2 8D 14 95 04 00 00 00 0F AF 55 0C D9 9D 70 FF FF FF D9 04 0A D9 9D 64 FF FF FF D9 95 6C FF FF FF D9 04 81 DD 83 DC F8 FF FF D9 FA D9 CA D8 85 64 FF FF FF DE CA D9 C9 D9 9D 60 FF FF FF DD 83 DC F8 FF FF D9 FA D9 85 70 FF FF FF D8 E2 8B 95 58 FF FF FF 03 55 E4 DE C9 8B 45 E4 D9 85 70 FF FF FF D9 C9 D9 5D F0 8D 0C 10 DE C1 D9 85 64 FF FF FF D9 45 F0 01 C8 D9 85 60 FF FF FF
+***** EXPENSIVE 1954 447
+
+. 1953 8056F6C 111
+. D9 CA D8 A5 6C FF FF FF D9 CA 8B 75 14 0F AF B5 58 FF FF FF 0F AF 55 14 0F AF 4D 14 D8 E9 D9 CB 89 85 34 FF FF FF D8 C0 D9 CA 8B 45 10 D8 C0 D9 CA D9 1C B0 D8 85 60 FF FF FF D9 CA D9 1C 90 8B B5 34 FF FF FF D9 1C 88 8B 55 E0 FF 45 E8 8B 4D DC 0F AF 75 14 D9 E0 01 95 58 FF FF FF 03 BD 54 FF FF FF 39 4D E8 D9 1C B0 0F 82 ED FE FF FF
+vex iropt: 2 x unrolling (32 sts -> 64 sts)
+
+. 1954 804CCAC 30
+. 89 C8 31 D2 F7 75 EC 89 C1 8B 55 14 8B 45 EC 89 04 BA 31 D2 89 C8 F7 75 EC 47 85 D2 74 E2
+
+. 1955 804D8B0 11
+. 8B 75 E8 85 F6 0F 85 CB 01 00 00
+
+. 1956 804DA86 6
+. 83 7D 18 FF 74 32
+***** EXPENSIVE 1958 448
+
+. 1957 804DABE 39
+. 8B 45 8C 8B 55 24 DD 44 02 F0 DD 5D C8 DD 44 02 F8 8B 55 28 DD 5D C0 DD 44 02 F0 DD 5D B8 DD 44 02 F8 E9 F1 FD FF FF
+***** EXPENSIVE 1959 449
+
+. 1958 804D8D6 25
+. DD 5D B0 C7 45 E4 00 00 00 00 8B 85 7C FF FF FF 39 45 E4 0F 83 77 01 00 00
+***** EXPENSIVE 1960 450
+
+. 1959 804D91C 140
+. 8B 45 08 DD 04 08 DD 5D A8 DD 44 08 08 8B 45 EC 03 45 E0 0F AF 45 0C C1 E0 04 8B 55 08 DD 04 02 D9 C9 DD 5D A0 DD 44 02 08 8B 55 EC 8B 45 E0 8D 04 42 89 45 80 0F AF 45 0C C1 E0 04 8B 55 08 DD 04 02 DD 44 02 08 D9 C3 D9 C3 D9 C9 D8 C3 D9 C9 D8 C2 DD 83 BC F8 FF FF DD 83 BC F8 FF FF D9 C9 D8 CB D9 C9 D8 CA D9 C9 DC 6D A8 D9 C9 DC 6D A0 D9 CF DE E5 D9 CD DE E3 D9 CB DC 4D 98 D9 CA DC 4D 98 D9 C9 8B 45 10 DC 45 A8 D9 CB
+***** EXPENSIVE 1961 451
+
+. 1960 804D9A8 132
+. DC 45 A0 8B 95 7C FF FF FF D9 C4 D9 C6 D9 C9 D8 E3 D9 C9 D8 C4 D9 CE DE C3 D9 CE DE E3 D9 CB DD 14 30 D9 CB DD 54 30 08 DD 45 C8 8D 04 3A DD 45 C0 0F AF 45 14 D8 CE D9 C9 D8 CF C1 E0 04 DE E1 D9 CC 8B 55 10 DD 5D A8 DD 5D A0 D9 CA DD 1C 02 D9 CA DC 4D C8 D9 CB DC 4D C0 DE C3 D9 CA DD 5C 02 08 8B 95 7C FF FF FF 8D 04 57 DD 45 B8 DD 45 B0 D9 C9 0F AF 45 14 D8 CB D9 C9 D8 CA D9 CB DC 4D B0 D9 CA
+***** EXPENSIVE 1962 452
+
+. 1961 804DA2C 58
+. DC 4D B8 D9 C9 C1 E0 04 8B 55 10 DE E3 DE C1 D9 C9 DD 1C 02 DD 5C 02 08 FF 45 E4 8B 45 0C 8B 95 7C FF FF FF 03 4D 90 01 45 88 FF 45 EC 03 75 94 47 39 55 E4 0F 82 B6 FE FF FF
+***** EXPENSIVE 1963 453
+
+. 1962 804DA8C 50
+. 8B 45 8C 8B 55 24 DD 44 02 F0 DD 5D C8 DD 44 02 F8 8B 55 28 DD 5D C0 DD 44 02 F0 DD 5D B8 DD 44 02 F8 DD 5D B0 80 75 C7 80 80 75 B7 80 E9 1B FE FF FF
+
+. 1963 804D8D9 22
+. C7 45 E4 00 00 00 00 8B 85 7C FF FF FF 39 45 E4 0F 83 77 01 00 00
+
+. 1964 8050D18 11
+. 8B 45 E4 85 C0 0F 85 A5 01 00 00
+
+. 1965 8050EC8 6
+. 83 7D 18 FF 74 34
+***** EXPENSIVE 1967 454
+
+. 1966 8050F02 43
+. 8B 45 A4 8B 55 24 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 28 D9 5D CC D9 44 02 F8 D9 C9 D9 5D C8 D9 5D C4 D9 44 02 FC E9 11 FE FF FF
+***** EXPENSIVE 1968 455
+
+. 1967 8050D3E 22
+. D9 5D C0 C7 45 E0 00 00 00 00 8B 45 94 39 45 E0 0F 83 54 01 00 00
+***** EXPENSIVE 1969 456
+
+. 1968 8050D6C 142
+. 8B 45 08 D9 04 C8 D9 44 C8 04 8B 45 E8 03 45 DC 0F AF 45 0C 8B 55 08 D9 04 C2 D9 C9 D9 5D B8 D9 44 C2 04 8B 55 E8 8B 45 DC 8D 04 42 89 45 98 8B 55 08 0F AF 45 0C D9 04 C2 D9 C2 D9 CC D9 5D BC DC C3 DD 83 BC F8 FF FF D8 CC D9 44 C2 04 D9 C9 D8 6D BC D9 C3 D8 C2 D9 C9 D9 5D EC DD 83 BC F8 FF FF D8 C9 D8 6D B8 D9 5D B4 D9 CC DE E2 DE EA D9 C9 D8 4D B0 D9 45 EC D9 CA D8 4D B0 D9 45 B4 D9 CD D8 45 BC D9 CC D8 45 B8 D9 CD D8 C1
+***** EXPENSIVE 1970 457
+
+. 1969 8050DFA 127
+. D9 C3 D9 CA D8 6D B4 D9 CC D8 C3 D9 CA DE E3 D9 CC D9 55 BC D9 CD D9 55 B8 D9 C9 D9 55 AC D9 CB D9 55 A8 8B 45 10 8B 55 94 D9 45 CC D9 45 C8 D9 CF D9 1C F0 D9 CA D9 5C F0 04 D9 CD D8 CC D9 C9 8D 04 3A D8 CA D9 CC D8 4D CC D9 CA D8 4D C8 D9 CC 0F AF 45 14 DE E1 D9 C9 8B 55 10 DE C3 D9 1C C2 D9 C9 D9 5C C2 04 D9 45 C4 8B 55 94 D9 45 C0 8D 04 57 D8 CB D9 C9 D8 CA 0F AF 45 14 DE E1
+***** EXPENSIVE 1971 458
+
+. 1970 8050E79 47
+. 8B 55 10 D9 1C C2 D9 45 C0 D9 45 C4 DE CB DE C9 DE C1 D9 5C C2 04 FF 45 E0 8B 45 94 03 4D 0C FF 45 E8 03 75 14 47 39 45 E0 0F 82 C4 FE FF FF
+***** EXPENSIVE 1972 459
+
+. 1971 8050ECE 52
+. 8B 45 A4 8B 55 24 D9 44 02 F8 D9 44 02 FC 8B 55 28 D9 5D C8 D9 44 02 FC D9 C9 D9 5D CC D9 5D C0 D9 44 02 F8 80 75 CB 80 D9 5D C4 80 75 C3 80 E9 3F FE FF FF
+
+. 1972 8050D41 19
+. C7 45 E0 00 00 00 00 8B 45 94 39 45 E0 0F 83 54 01 00 00
+
+. 1973 8058FD3 27
+. 8B 4D DC 41 89 C8 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 4D 94 0F 83 07 02 00 00
+***** EXPENSIVE 1975 460
+
+. 1974 8058FEE 76
+. C7 45 8C 10 00 00 00 8D 76 00 8B 45 8C 8B 55 20 DD 44 02 F0 DD 44 02 F8 8B 55 24 DD 5D C0 DD 44 02 F8 D9 C9 DD 5D C8 DD 5D B0 DD 44 02 F0 C7 45 E8 00 00 00 00 8B 45 E0 80 75 C7 80 80 75 B7 80 39 45 E8 DD 5D B8 0F 83 A6 01 00 00
+***** EXPENSIVE 1976 461
+
+. 1975 805903A 171
+. 8B 4D D4 81 F1 00 00 00 80 8B 45 DC 89 4D 9C 2B 45 EC 8B 4D EC 8B 55 D0 8D 44 00 FF 8D 74 09 FF 89 55 98 89 45 90 89 F7 89 F6 8B 45 E4 8D 14 38 01 D0 8B 4D 0C 89 85 7C FF FF FF 0F AF CF 8B 45 08 DD 04 C8 8D 04 FD 08 00 00 00 8B 4D 08 0F AF 45 0C DD 5D A8 DD 04 08 8B 45 0C 0F AF C2 8D 14 D5 08 00 00 00 DD 04 C1 0F AF 55 0C 8B 45 0C DD 04 0A 0F AF 85 7C FF FF FF 8B 95 7C FF FF FF DD 04 C1 D9 CB DD 5D A0 8D 0C D5 08 00 00 00 DD 45 C8 DD 45 C0 0F AF 4D 0C 8B 45 08 D8 CA D9 C9 D8 CB D9 CA DC 4D C8 D9 CB DC 4D C0
+***** EXPENSIVE 1977 462
+
+. 1976 80590E5 122
+. DD 04 01 D9 CB DE E2 DE C3 DD 45 B8 DD 45 B0 DD 45 B8 D9 C9 D8 CC D9 CA D8 CE D9 C9 DE CC D9 CD DC 4D B0 D9 CD DE E1 D9 CA DE C4 D9 C0 D9 C3 D9 C9 D8 C3 D9 C9 D8 C5 DD 83 BC F8 FF FF DD 83 BC F8 FF FF D9 C9 D8 CB D9 C9 D8 CA D9 CC DE E5 D9 CD DE E6 D9 C9 DC 45 A8 D9 CC DC 6D A8 D9 CA DC 6D A0 D9 CB DC 4D 98 D9 CD DC 4D 98 D9 CC DD 55 A8 8B 4D DC 8B 45 14 8D 14 4E
+***** EXPENSIVE 1978 463
+
+. 1977 805915F 129
+. 0F AF C6 8B 4D 10 DD 1C C1 8D 04 F5 08 00 00 00 0F AF 45 14 DC 45 A0 DD 14 08 D9 C1 8B 45 14 0F AF C2 D9 C3 D9 C9 D8 E5 8D 14 D5 08 00 00 00 DD 1C C1 0F AF 55 14 D8 C5 8B 45 14 DD 1C 0A D9 C9 0F AF 45 90 8B 55 90 DE C3 D9 CA DD 1C C1 DE E2 D9 C9 8D 04 D5 08 00 00 00 0F AF 45 14 D9 E0 DD 1C 08 FF 45 E8 8B 4D 18 8B 45 E0 01 CA 01 CE 03 7D DC 39 45 E8 DD 5D A0 89 55 90 0F 82 84 FE FF FF
+
+. 1978 80591E0 21
+. 8B 45 94 FF 45 EC D1 E8 83 45 8C 10 39 45 EC 0F 82 03 FE FF FF
+
+. 1979 80591F5 13
+. F7 45 DC 01 00 00 00 0F 85 9F 00 00 00
+***** EXPENSIVE 1981 464
+
+. 1980 8053754 57
+. 8B 7D A4 90 8B 75 AC 01 75 B4 8B 45 B4 31 D2 F7 75 08 89 55 B4 31 D2 52 8B 45 B4 50 DF 2C 24 DC 4D C8 83 EC 08 8B 45 C4 8B B0 08 02 00 00 DD 55 88 DD 1C 24 E8 93 4E FF FF
+***** EXPENSIVE 1982 465
+
+. 1981 805378D 23
+. DD 1C 3E 8B 55 C4 DD 45 88 8B B2 08 02 00 00 DD 1C 24 E8 8C 4F FF FF
+***** EXPENSIVE 1983 466
+
+. 1982 80537A4 34
+. 8B 45 B0 FF 45 B8 D1 E8 DD 5C 3E 08 83 45 A8 10 83 C7 10 83 45 A4 10 FF 45 DC 83 C4 10 39 45 B8 72 92
+***** EXPENSIVE 1984 467
+
+. 1983 8053FA6 70
+. C7 45 8C 10 00 00 00 C7 45 80 02 00 00 00 8B 4D 8C 8B 75 20 DD 44 0E F0 8B 45 24 DD 5D C8 DD 44 0E F8 DD 5D C0 DD 44 08 F0 DD 5D B8 C7 45 E8 00 00 00 00 DD 44 08 F8 8B 45 D4 39 45 E8 DD 5D B0 0F 83 A7 01 00 00
+***** EXPENSIVE 1985 468
+
+. 1984 8053FEC 170
+. 8B 55 80 8B 4D D8 89 D7 D1 E1 89 55 AC 4F 89 4D A8 C7 45 84 00 00 00 00 8B 55 AC 8B 75 84 8D 44 32 FF 8B 4D A8 8D 34 01 8B 4D 84 29 D1 8B 55 A8 8D 4C 0A FF 89 8D 74 FF FF FF 8B 4D 0C 8B 55 08 0F AF C8 8D 04 C5 08 00 00 00 DD 04 CA 0F AF 45 0C DD 5D A0 DD 04 10 8B 45 0C 0F AF C6 DD 04 C2 8B 45 0C 0F AF 85 74 FF FF FF DD 04 C2 8B 85 74 FF FF FF 8D 0C C5 08 00 00 00 8D 34 F5 08 00 00 00 0F AF 75 0C 0F AF 4D 0C D9 C1 D9 CB DD 5D 98 DD 04 11 DD 04 16 D9 CC D8 C2 DD 83 BC F8 FF FF D9 CA D9 E0 D9 C5 D9 CB D8 CA
+***** EXPENSIVE 1986 469
+
+. 1985 8054096 129
+. DC 6D A0 D9 CB D8 C1 D9 CA DC 45 A0 DD 83 BC F8 FF FF D9 CF DE E2 D9 C9 DC 4D E0 D9 C9 DD 5D A0 D9 CD D8 C9 D9 C2 D9 C9 8B 45 14 8B 75 10 DC 6D 98 D9 CA 0F AF C7 DC 45 98 D9 C9 D8 E6 D9 CB DE C6 D9 CC DE E3 DD 45 A0 D9 CB DC 4D E0 8B 4D DC D9 C1 D9 CD DD 55 98 D9 CC DD 1C C6 8D 04 FD 08 00 00 00 DC C4 DE E9 0F AF 45 14 DD 45 C8 DD 45 C0 D9 CC DD 1C 30 D9 CB 8D 14 39 D8 CC D9 CB D8 CA
+***** EXPENSIVE 1987 470
+
+. 1986 8054117 124
+. 8B 45 14 0F AF C2 DE E3 D9 CA DD 1C C6 8D 0C 11 DC 4D C0 D9 CA 8D 14 D5 08 00 00 00 DC 4D C8 8B 45 14 DD 45 B8 DD 45 B0 D9 C9 0F AF C1 0F AF 55 14 D8 CD D9 C9 D8 CB D9 CC DE C2 D9 CC 8D 0C CD 08 00 00 00 DC 4D B0 D9 CA DC 4D B8 D9 C9 DD 1C 32 D9 CB 0F AF 4D 14 DE E2 DE C2 8B 55 D8 DD 1C C6 FF 45 E8 DD 1C 31 8D 04 52 8B 4D D4 03 7D D8 01 45 84 39 4D E8 0F 82 71 FE FF FF
+
+. 1987 8054193 25
+. 8B 45 94 FF 45 EC D1 E8 83 45 8C 10 83 45 80 02 39 45 EC 0F 82 08 FE FF FF
+***** EXPENSIVE 1989 471
+vex iropt: not unrolling (110 sts)
+
+. 1988 8053EEC 147
+. 8B 75 D8 8B 45 0C 0F AF C7 8B 4D 08 D1 E6 DD 04 C1 8D 54 3E FF 8B 45 0C 0F AF C2 8D 14 D5 08 00 00 00 DD 04 C1 0F AF 55 0C D9 C1 8B 45 88 D9 C3 DC 0C 0A D9 C9 D8 E2 D9 CA 03 45 DC D8 C0 8B 55 DC 01 C2 DE C3 8B 4D 14 D9 C1 0F AF 4D 88 0F AF 45 14 03 75 D8 89 95 7C FF FF FF D8 E1 D9 CB 8B 55 10 DD 1C CA D9 CA 01 F7 DD 1C C2 8B 75 D8 8B 95 7C FF FF FF FF 45 E8 8B 45 D4 0F AF 55 14 DE C1 8B 4D 10 01 75 88 39 45 E8 DD 1C D1 0F 82 6D FF FF FF
+
+. 1989 805B612 27
+. 8B 55 DC 42 89 D0 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 55 B8 0F 83 F5 01 00 00
+***** EXPENSIVE 1991 472
+
+. 1990 805B62D 73
+. C7 45 AC 02 00 00 00 8B 4D AC 8B 45 20 D9 44 88 F8 D9 44 88 FC 8B 45 24 D9 5D D0 D9 44 88 FC D9 C9 D9 5D D4 D9 5D C8 D9 44 88 F8 C7 45 E8 00 00 00 00 8B 45 E0 80 75 D3 80 80 75 CB 80 39 45 E8 D9 5D CC 0F 83 97 01 00 00
+***** EXPENSIVE 1992 473
+
+. 1991 805B676 160
+. 8B 45 DC 2B 45 EC 8D 44 00 FF D9 45 D8 89 45 B0 8B 45 EC D9 83 EC F8 FF FF D9 C9 D9 E0 D9 C9 8D 74 00 FF D9 5D B4 D9 5D BC 89 F7 8D 76 00 8B 4D E4 8D 14 39 01 D1 8B 45 0C 89 4D A0 0F AF C7 8B 4D 08 D9 04 81 8D 04 BD 04 00 00 00 0F AF 45 0C D9 04 08 8B 45 0C 0F AF C2 8D 14 95 04 00 00 00 D9 04 81 D9 C9 0F AF 55 0C 8B 45 0C D9 5D C0 0F AF 45 A0 D9 04 0A 8B 55 A0 D9 04 81 D9 CB D9 5D C4 8D 0C 95 04 00 00 00 D9 45 D4 D9 45 D0 0F AF 4D 0C 8B 45 08 D8 CA D9 C9 D8 CB D9 CA D8 4D D4
+***** EXPENSIVE 1993 474
+
+. 1992 805B716 115
+. D9 CB D8 4D D0 D9 04 01 D9 CB DE E2 DE C3 D9 45 CC D9 45 C8 D9 45 CC D9 C9 D8 CC D9 CA D8 CE D9 C9 DE CC D9 CD D8 4D C8 D9 CD DE E1 D9 CA DE C4 D9 C0 D9 C3 D9 C9 D8 C3 D9 C9 D8 C5 D9 45 B4 D9 45 B4 D9 C9 D8 CB D9 C9 D8 CA D9 CC DE E5 D9 CD DE E6 D8 45 C0 D9 CC D8 6D C4 D9 CA D8 6D C0 D9 CB D8 4D BC D9 CD D8 4D BC D9 C9 D8 45 C4 D9 CC D9 55 C0
+***** EXPENSIVE 1994 475
+
+. 1993 805B789 132
+. D9 CC 8B 4D DC 8B 45 14 8D 14 4E 0F AF C6 8B 4D 10 D9 1C 81 D9 CB 8D 04 B5 04 00 00 00 0F AF 45 14 D9 1C 08 D9 C0 8B 45 14 0F AF C2 D9 C2 D9 C9 D8 E4 8D 14 95 04 00 00 00 D9 1C 81 0F AF 55 14 D8 C4 8B 45 14 D9 1C 0A 0F AF 45 B0 8B 55 B0 DE C2 D9 C9 D9 1C 81 DE E1 8D 04 95 04 00 00 00 0F AF 45 14 D9 E0 D9 1C 08 FF 45 E8 8B 4D 18 8B 45 E0 01 CA 01 CE 03 7D DC 39 45 E8 89 55 B0 0F 82 97 FE FF FF
+
+. 1994 805B80D 21
+. 8B 45 B8 FF 45 EC D1 E8 83 45 AC 02 39 45 EC 0F 82 12 FE FF FF
+
+. 1995 805B822 13
+. F7 45 DC 01 00 00 00 0F 85 9C 00 00 00
+***** EXPENSIVE 1997 476
+
+. 1996 8055FC4 45
+. 03 7D B0 89 F8 31 D2 F7 75 08 89 D7 31 D2 52 57 DF 2C 24 DC 4D C8 83 EC 08 8B 45 C4 8B B0 08 02 00 00 DD 55 98 DD 1C 24 E8 2F 26 FF FF
+***** EXPENSIVE 1998 477
+
+. 1997 8055FF1 26
+. 8B 55 D8 D9 1C D6 8B 45 C4 DD 45 98 8B B0 08 02 00 00 DD 1C 24 E8 25 27 FF FF
+***** EXPENSIVE 1999 478
+
+. 1998 805600B 27
+. 8B 45 B4 8B 55 D8 FF 45 B8 D1 E8 D9 5C D6 04 83 C4 10 42 39 45 B8 89 55 D8 72 9E
+
+. 1999 8056026 5
+. E9 34 FF FF FF
+***** EXPENSIVE 2001 479
+
+. 2000 8056790 68
+. C7 45 A4 02 00 00 00 90 8B 4D A4 8B 75 20 8B 45 24 D9 44 8E F8 D9 44 8E FC D9 C9 D9 5D D4 D9 5D D0 D9 44 88 F8 D9 44 88 FC D9 C9 C7 45 E8 00 00 00 00 8B 45 D8 39 45 E8 D9 5D CC D9 5D C8 0F 83 BA 01 00 00
+***** EXPENSIVE 2002 480
+
+. 2001 80567D4 167
+. 89 4D C4 8B 4D DC 8B 55 EC D1 E1 8D 7C 12 FF 89 4D C0 C7 45 9C 00 00 00 00 8D 76 00 8B 55 C4 8B 75 9C 8D 44 32 FF 8B 4D C0 8D 34 01 8B 4D 9C 29 D1 8B 55 C0 8D 4C 0A FF 89 4D 94 8B 4D 0C 0F AF C8 8D 04 85 04 00 00 00 8B 55 08 0F AF 45 0C D9 04 10 8B 45 0C 0F AF C6 D9 04 82 8B 45 0C D9 04 8A 0F AF 45 94 D9 04 82 D9 C9 D9 5D BC 8B 45 94 D9 C1 8D 0C 85 04 00 00 00 D8 C1 8D 34 B5 04 00 00 00 0F AF 75 0C 0F AF 4D 0C D9 5D B4 DD 83 BC F8 FF FF D9 04 11 D9 CC D9 5D B8 D8 4D B4 D9 04 16 D9 CC D9 E0 D9 C9
+***** EXPENSIVE 2003 481
+
+. 2002 805687B 135
+. D8 6D BC D9 C4 D9 C9 D9 5D F0 D8 C1 DD 83 BC F8 FF FF D8 C9 D8 6D B8 D9 45 F0 D9 C9 D9 5D F0 D9 45 BC D9 CE DE E3 D9 CD D8 45 B4 D9 CC DE E3 D9 C9 D8 4D E4 D9 45 F0 D9 CB D8 4D E4 D9 CC D9 5D BC 8B 45 E0 8D 14 38 D9 C2 8D 0C 10 D8 C4 8B 45 14 8B 75 10 0F AF C7 D9 5D B0 D9 45 BC D9 1C 86 D9 C4 8D 04 BD 04 00 00 00 D8 E1 D9 CD DE C1 D9 C9 0F AF 45 14 D8 45 B8 D9 45 D4 D9 45 D0 D9 CA D9 14 30 D9 C9 D8 CE
+***** EXPENSIVE 2004 482
+
+. 2003 8056902 140
+. D9 CA D8 4D B0 8B 45 14 0F AF C2 DE EA D9 C9 D9 1C 86 D9 45 D4 D8 4D B0 D9 CD D8 4D D0 D9 CB DE E4 D9 CA DE C4 8D 14 95 04 00 00 00 8B 45 14 D9 45 CC D9 45 C8 D9 C9 0F AF C1 0F AF 55 14 D8 CA D9 C9 D8 CC D9 CA 8D 0C 8D 04 00 00 00 D8 4D C8 D9 CC D8 4D CC D9 CD D9 1C 32 0F AF 4D 14 DE E1 D9 CA DE C3 D9 C9 8B 55 DC D9 1C 86 D9 C9 FF 45 E8 D9 1C 31 8D 04 52 8B 4D D8 03 7D DC 01 45 9C 39 4D E8 D9 5D B8 0F 82 62 FE FF FF
+
+. 2004 805698E 21
+. 8B 45 AC FF 45 EC D1 E8 83 45 A4 02 39 45 EC 0F 82 F5 FD FF FF
+***** EXPENSIVE 2006 483
+vex iropt: not unrolling (116 sts)
+
+. 2005 80566DC 141
+. 8B 75 DC 8B 45 0C 0F AF C7 8B 4D 08 D1 E6 D9 04 81 8D 54 3E FF 8B 45 0C 0F AF C2 8D 14 95 04 00 00 00 D9 04 81 0F AF 55 0C D9 C1 8B 45 A0 D9 C3 D8 0C 0A D9 C9 D8 E2 D9 CA 03 45 E0 D8 C0 8B 55 E0 01 C2 DE C3 8B 4D 14 D9 C1 0F AF 4D A0 0F AF 45 14 03 75 DC 89 55 98 D8 E1 D9 CB 8B 55 10 D9 1C 8A D9 CA 01 F7 D9 1C 82 8B 75 DC 8B 55 98 FF 45 E8 8B 45 D8 0F AF 55 14 DE C1 8B 4D 10 01 75 A0 39 45 E8 D9 1C 91 0F 82 73 FF FF FF
+
+. 2006 804DF14 11
+. 8B 45 E4 85 C0 0F 85 80 04 00 00
+
+. 2007 804E39F 6
+. 83 7D 18 FF 74 65
+***** EXPENSIVE 2009 484
+
+. 2008 804E40A 87
+. 8B 85 DC FE FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 28 DD 5D B8 DD 5D B0 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 2C DD 5D A8 DD 5D A0 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 30 DD 5D 98 DD 5D 90 DD 44 02 F0 DD 44 02 F8 D9 C9 DD 5D 88 DD 5D 80 E9 F2 FA FF FF
+
+. 2009 804DF53 19
+. C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 13 04 00 00
+***** EXPENSIVE 2011 485
+
+. 2010 804E3A5 101
+. 8B 85 DC FE FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 8B 55 28 DD 5D B0 DD 5D B8 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 2C DD 5D A0 DD 5D A8 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 30 DD 5D 90 DD 44 02 F8 D9 C9 DD 5D 98 DD 5D 80 DD 44 02 F0 80 75 B7 80 80 75 A7 80 80 75 97 80 DD 5D 88 80 75 87 80 E9 49 FB FF FF
+
+. 2011 80512DC 11
+. 8B 45 E0 85 C0 0F 85 91 04 00 00
+
+. 2012 8051778 6
+. 83 7D 18 FF 74 65
+***** EXPENSIVE 2014 486
+
+. 2013 80517E3 84
+. 8B 85 18 FF FF FF 8B 55 24 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 28 D9 5D C0 D9 5D BC D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 2C D9 5D B8 D9 5D B4 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 30 D9 5D B0 D9 44 02 F8 D9 C9 D9 5D AC D9 5D A8 D9 44 02 FC E9 DF FA FF FF
+***** EXPENSIVE 2015 487
+
+. 2014 8051316 22
+. D9 5D A4 C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 26 04 00 00
+***** EXPENSIVE 2016 488
+
+. 2015 805177E 101
+. 8B 85 18 FF FF FF 8B 55 24 D9 44 02 F8 D9 44 02 FC 8B 55 28 D9 5D BC D9 5D C0 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 2C D9 5D B4 D9 5D B8 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 30 D9 5D AC D9 44 02 FC D9 C9 D9 5D B0 D9 5D A4 D9 44 02 F8 80 75 BF 80 80 75 B7 80 80 75 AF 80 D9 5D A8 80 75 A7 80 E9 36 FB FF FF
+
+. 2016 8051319 19
+. C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 26 04 00 00
+***** EXPENSIVE 2018 489
+
+. 2017 805879C 53
+. 8B 45 B0 01 45 B8 8B 45 B8 31 D2 F7 75 08 89 55 B8 31 D2 52 8B 45 B8 50 DF 2C 24 DC 4D D0 83 EC 08 8B 4D CC 8B B1 08 02 00 00 DD 55 98 DD 1C 24 E8 4F FE FE FF
+***** EXPENSIVE 2019 490
+
+. 2018 80598BC 158
+. 8B 95 BC FE FF FF 03 55 E4 8B 75 E4 8D 0C 16 8D 34 0E 8B 7D E4 01 F7 8B 45 0C 89 BD 9C FE FF FF 0F AF 85 BC FE FF FF 8B 7D 08 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C DD 04 D7 DD 04 CF DD 04 F7 DD 04 C7 8B BD 9C FE FF FF 0F AF 7D 0C 8B 45 08 DD 04 F8 D9 C4 D8 C1 D9 C4 D8 C4 D9 C1 D9 CF DE E3 D9 CD DE E4 D9 CD D8 C4 DD 85 E8 FE FF FF D9 FA D9 CE DE E5 D9 CD DC 8D E0 FE FF FF DE CC DD 85 E0 FE FF FF D8 CD 8B 45 14 8B 4D 10 0F AF 85 C0 FE FF FF D8 EA D9 CA DE C5 D9 CC DD 1C C1
+***** EXPENSIVE 2020 491
+
+. 2019 805995A 150
+. D9 C0 DD 45 D0 D9 C9 8B 45 14 0F AF 85 C4 FE FF FF 8B B5 C4 FE FF FF D8 C4 D9 CA DE E4 D9 E0 DD 45 C8 D9 CA DD 1C C1 D9 C9 D8 CA D9 C9 8D 04 F5 08 00 00 00 D8 CC 0F AF 45 14 DE E1 8B 95 C4 FE FF FF DD 45 C8 D9 C9 03 55 C4 DD 1C 08 D9 E0 8B 45 14 0F AF C2 DE CB DC 4D D0 8D 14 D5 08 00 00 00 0F AF 55 14 DE C2 DD 1C C1 8B 7D 18 DD 1C 0A 8B 45 DC FF 45 E8 8B 55 E0 01 FE 01 BD C0 FE FF FF 01 85 BC FE FF FF 39 55 E8 89 B5 C4 FE FF FF 0F 82 CC FE FF FF
+***** EXPENSIVE 2021 492
+
+. 2020 8058D44 32
+. 8B 45 D8 C7 45 F0 00 00 00 00 8B 55 E8 DD 40 F8 39 55 F0 DD 40 F0 D9 C9 D9 E0 0F 83 6C 01 00 00
+
+. 2021 8054982 30
+. 8B 75 D0 46 89 F0 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 B5 FC FE FF FF 0F 83 EB 04 00 00
+
+. 2022 8054E8B 13
+. F7 45 D0 01 00 00 00 0F 85 86 01 00 00
+
+. 2023 8054E98 19
+. C7 45 E8 00 00 00 00 8B 75 CC 39 75 E8 0F 83 73 01 00 00
+***** EXPENSIVE 2025 493
+
+. 2024 8054EAB 174
+. 8B 45 D0 48 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 DD 9D F0 FE FF FF DD 9D E8 FE FF FF 89 85 E4 FE FF FF 89 85 D4 FE FF FF 8D 76 00 8B B5 D4 FE FF FF 8B 45 0C 8B 7D 08 0F AF C6 DD 04 C7 8D 04 F5 08 00 00 00 8B 4D D0 0F AF 45 0C 8D 14 4E DD 04 38 D9 C9 8B 45 0C 0F AF C2 D8 C0 DD 04 C7 8D 0C 8E D8 C0 D9 C1 8D 14 D5 08 00 00 00 0F AF 55 0C 0F AF 4D 0C D8 C1 DD 85 E8 FE FF FF DD 04 3A DD 04 CF D9 CA D8 CB D9 CD DD 9D A8 FE FF FF D9 CD D8 C0 D9 CD D8 C0 D9 CC D8 E1 DD 85 F0 FE FF FF D9 FA D9 CC DC AD A8 FE FF FF D9 CC
+***** EXPENSIVE 2026 494
+
+. 2025 8054F59 145
+. DC 8D E8 FE FF FF D8 CC D9 CC DD 9D A8 FE FF FF DD 45 E0 DD 45 D8 D9 C9 D8 CE D9 C9 D8 CF 8B 95 E4 FE FF FF DE C1 03 55 D4 8B 45 D4 DD 45 E0 8D 0C 10 DE CF D9 CD DC 4D D8 DE EE 8D 34 08 D9 C3 01 F0 D8 C1 D9 CC 8B 7D 14 0F AF BD E4 FE FF FF DE E1 89 85 A4 FE FF FF D9 C3 D9 CB 8B 45 10 DE C2 D9 C9 DD 1C F8 0F AF 55 14 D9 C0 D9 CA D8 E4 D9 C9 D9 E0 D9 CB D9 E0 D9 C9 8B BD A4 FE FF FF DD 1C D0 D9 C9 0F AF 4D 14 0F AF 75 14 0F AF 7D 14
+***** EXPENSIVE 2027 495
+
+. 2026 8054FEA 52
+. 8B 55 D0 D8 E4 D9 CA DE E4 DE E2 DD 1C C8 D9 C9 FF 45 E8 DD 1C F0 8B 4D CC DD 1C F8 8D 04 92 01 95 E4 FE FF FF 01 85 D4 FE FF FF 39 4D E8 0F 82 BA FE FF FF
+***** EXPENSIVE 2028 496
+
+. 2027 805BF2C 161
+. 8B 95 8C FE FF FF 03 55 E0 8B 75 E0 8D 0C 16 8D 34 0E 8B 7D E0 01 F7 8B 45 0C 89 BD 7C FE FF FF 0F AF 85 8C FE FF FF 8B 7D 08 D9 04 87 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C D9 04 97 D9 04 B7 D9 CA D9 5D CC D9 04 8F 8B BD 7C FE FF FF 0F AF 7D 0C 8B 45 08 D9 5D C4 D9 04 B8 D9 C9 D9 55 C8 D8 C1 D9 5D C0 D8 6D C8 D9 45 C4 D8 C2 D9 C9 D9 5D B8 D9 45 C0 D8 C1 D9 CA D8 6D C4 D9 CA D9 5D B0 D9 5D BC D9 55 B4 DD 85 B8 FE FF FF D9 FA D9 45 C0 D8 65 BC D9 C9 DC 8D B0 FE FF FF DE C9 D9 5D AC
+***** EXPENSIVE 2029 497
+
+. 2028 805BFCD 161
+. DD 85 B0 FE FF FF D8 4D B0 D8 6D CC D9 5D A8 D9 45 A8 D8 45 AC D9 5D A4 D9 45 D4 D9 E0 D8 4D B8 D9 5D A0 D9 45 D0 D8 C9 D9 45 A8 D9 C9 D8 6D A0 D9 C9 D8 65 AC D9 C9 D9 5D A0 D9 45 D0 D9 C9 D9 5D A8 D9 E0 D9 45 D4 D9 C9 D8 4D B8 D9 C9 DE CA DE C1 8B 45 14 D9 45 CC 0F AF 85 90 FE FF FF D8 45 B0 8B 4D 10 D9 1C 81 8B 45 14 0F AF 85 94 FE FF FF 8B 95 94 FE FF FF D9 45 A4 03 55 9C D9 1C 81 8B 8D 94 FE FF FF 8D 04 8D 04 00 00 00 0F AF 45 14 8B 75 10 D9 45 A0 D9 1C 30 8B 45 14 0F AF C2
+***** EXPENSIVE 2030 498
+
+. 2029 805C06E 61
+. D9 45 A8 D9 1C 86 8D 14 95 04 00 00 00 8B 45 18 0F AF 55 14 01 C1 D9 1C 32 89 8D 94 FE FF FF 8B 55 D8 FF 45 E4 8B 4D DC 01 85 90 FE FF FF 01 95 8C FE FF FF 39 4D E4 0F 82 81 FE FF FF
+***** EXPENSIVE 2031 499
+
+. 2030 805B390 32
+. 8B 55 DC C7 45 F0 00 00 00 00 8B 4D E8 D9 42 FC 39 4D F0 D9 42 F8 D9 C9 D9 E0 0F 83 5E 01 00 00
+
+. 2031 80571F6 30
+. 8B 75 D4 46 89 F0 C7 45 E8 01 00 00 00 D1 E8 39 45 E8 89 B5 F8 FE FF FF 0F 83 4A 05 00 00
+
+. 2032 805775E 13
+. F7 45 D4 01 00 00 00 0F 85 C4 01 00 00
+
+. 2033 805776B 19
+. C7 45 E4 00 00 00 00 8B 75 D0 39 75 E4 0F 83 B1 01 00 00
+***** EXPENSIVE 2035 500
+
+. 2034 805777E 185
+. 8B 45 D4 48 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 DD 9D F0 FE FF FF DD 9D E8 FE FF FF 89 85 E4 FE FF FF 89 85 D4 FE FF FF 8B B5 D4 FE FF FF 8B 45 0C 8B 7D 08 0F AF C6 D9 04 87 8D 04 B5 04 00 00 00 8B 4D D4 0F AF 45 0C 8D 14 4E D9 04 38 8B 45 0C 0F AF C2 D9 04 87 D9 CA D8 C0 D9 CA D8 C0 D9 CA 8D 0C 8E D9 95 18 FF FF FF D8 C2 D9 9D 0C FF FF FF 0F AF 4D 0C DD 85 E8 FE FF FF 8D 14 95 04 00 00 00 0F AF 55 0C D9 04 8F D9 C9 D8 8D 0C FF FF FF D8 E1 D9 04 3A D9 C9 D9 9D 08 FF FF FF D9 CA D8 C0 D9 CA D8 C0 DD 85 F0 FE FF FF D9 FA D9 CC D8 AD 18 FF FF FF
+***** EXPENSIVE 2036 501
+
+. 2035 8057837 179
+. D9 CC DC 8D E8 FE FF FF D8 CC D9 9D 04 FF FF FF D9 CB D9 9D 18 FF FF FF D9 45 E0 D9 45 DC D9 C9 D8 CC D9 C9 D8 CB 8B 95 E4 FE FF FF 03 55 D8 8B 45 D8 DE C1 D9 45 E0 8D 0C 10 DE CB D9 85 04 FF FF FF D9 CC D8 4D DC D9 CC D8 A5 08 FF FF FF 8D 34 08 D9 9D 00 FF FF FF D9 CA 01 F0 DE E3 D9 85 0C FF FF FF D9 85 04 FF FF FF 8B 7D 14 0F AF BD E4 FE FF FF D8 85 08 FF FF FF D9 C9 89 85 BC FE FF FF DE C2 D9 C9 8B 45 10 D9 1C B8 D9 85 00 FF FF FF D9 C1 80 B5 03 FF FF FF 80 8B BD BC FE FF FF D9 E0 D9 C9 0F AF 55 14 0F AF 4D 14 0F AF 75 14 D8 E4
+***** EXPENSIVE 2037 502
+
+. 2036 80578EA 69
+. D9 CC 0F AF 7D 14 D8 AD 00 FF FF FF D9 CA D8 E3 D9 C9 DE E3 D9 1C 90 D9 CA D9 1C 88 D9 C9 D9 1C B0 D9 1C B8 8B 55 D4 8D 04 92 FF 45 E4 8B 4D D0 01 95 E4 FE FF FF 01 85 D4 FE FF FF 39 4D E4 0F 82 79 FE FF FF
+
+. 2037 804D457 41
+. 83 EC 0C 8B 45 EC 8B 55 14 FF B4 82 08 01 00 00 56 FF 75 E8 51 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 C4 21 00 00
+
+. 2038 804F644 77
+. 55 89 E5 57 56 53 81 EC AC 00 00 00 31 D2 8B 45 24 F7 75 1C 89 45 E0 31 D2 8B 45 24 F7 75 20 89 45 DC 31 D2 8B 45 20 F7 75 1C 8B 55 1C 4A 31 FF E8 00 00 00 00 5B 81 C3 9B 1A 01 00 89 55 D0 0F AF D0 3B 7D E0 89 45 D8 89 55 D4 73 42
+***** EXPENSIVE 2040 503
+
+. 2039 804F691 66
+. 8B 45 0C 8B 55 14 C1 E0 04 C1 E2 04 31 F6 31 C9 89 85 64 FF FF FF 89 95 60 FF FF FF 8B 45 08 DD 04 30 8B 55 10 DD 1C 0A DD 44 30 08 47 DD 5C 0A 08 03 B5 64 FF FF FF 03 8D 60 FF FF FF 3B 7D E0 72 DA
+
+. 2040 804F6D3 22
+. 8B 45 D0 D1 E8 C7 45 CC 01 00 00 00 40 39 45 CC 0F 83 B2 00 00 00
+
+. 2041 804F6E9 23
+. 8B 4D E0 89 45 C4 89 8D 68 FF FF FF 31 FF 3B 7D E0 0F 83 83 00 00 00
+***** EXPENSIVE 2043 504
+
+. 2042 804F700 131
+. 8B 75 1C 2B 75 CC 0F AF 75 E0 89 75 C0 8D 76 00 8B 85 68 FF FF FF 8B 4D C0 01 F9 8D 14 38 8B 45 0C 0F AF C1 8B 75 14 0F AF F2 C1 E0 04 89 8D 54 FF FF FF 0F AF 55 0C 8B 4D 08 DD 04 01 C1 E2 04 DC 04 11 C1 E6 04 8B 4D 10 DD 1C 31 03 45 08 03 55 08 DD 40 08 DC 42 08 DD 5C 31 08 8B 8D 54 FF FF FF DD 00 0F AF 4D 14 C1 E1 04 DC 2A 8B 75 10 DD 1C 0E DD 40 08 47 DC 6A 08 3B 7D E0 DD 5C 0E 08 72 8D
+
+. 2043 804F783 24
+. 8B 7D E0 FF 45 CC 8B 45 C4 01 BD 68 FF FF FF 39 45 CC 0F 82 5A FF FF FF
+
+. 2044 804F6F5 11
+. 31 FF 3B 7D E0 0F 83 83 00 00 00
+
+. 2045 804F79B 7
+. 31 FF 3B 7D E0 73 42
+***** EXPENSIVE 2047 505
+
+. 2046 804F7A2 66
+. 8B 55 14 8B 45 0C C1 E2 04 C1 E0 04 31 F6 31 C9 89 95 70 FF FF FF 89 85 6C FF FF FF 8B 55 10 DD 04 32 8B 45 08 DD 1C 08 DD 44 32 08 47 DD 5C 08 08 03 B5 70 FF FF FF 03 8D 6C FF FF FF 3B 7D E0 72 DA
+
+. 2047 804F7E4 18
+. 8B 45 D0 D1 E8 C7 45 C8 01 00 00 00 40 39 45 C8 73 75
+
+. 2048 804F7F6 19
+. 8B 55 E0 89 45 BC 89 95 74 FF FF FF 31 FF 3B 7D E0 73 4E
+***** EXPENSIVE 2050 506
+
+. 2049 804F809 78
+. 8B 4D 0C C1 E1 04 8B B5 74 FF FF FF 31 D2 89 8D 54 FF FF FF 8D 76 00 8D 04 3E 0F AF 45 14 C1 E0 04 8B 4D 10 DD 04 01 8B 4D 08 DC 04 11 DD 1C 11 8B 4D 10 DD 44 01 08 8B 45 08 DC 44 10 08 47 DD 5C 10 08 03 95 54 FF FF FF 3B 7D E0 72 C9
+
+. 2050 804F857 20
+. 8B 55 E0 FF 45 C8 8B 4D BC 01 95 74 FF FF FF 39 4D C8 72 97
+
+. 2051 804F802 7
+. 31 FF 3B 7D E0 73 4E
+
+. 2052 804F86B 22
+. 8B 45 D0 D1 E8 C7 45 CC 01 00 00 00 40 39 45 CC 0F 83 BC 01 00 00
+
+. 2053 804F881 42
+. 8B 75 E0 89 B5 78 FF FF FF 8B 55 1C 2B 55 CC 8B 45 CC 31 FF 0F AF 45 DC 0F AF 55 E0 3B 7D E0 89 45 B8 89 45 B4 89 55 B0 73 62
+***** EXPENSIVE 2055 507
+
+. 2054 804F8AB 98
+. 8B 4D 14 C1 E1 04 31 F6 89 8D 7C FF FF FF 8D 76 00 8B 85 78 FF FF FF 8D 0C 38 8B 55 10 0F AF 4D 0C DD 04 32 C1 E1 04 8B 45 08 DD 1C 08 DD 44 32 08 8B 55 B0 DD 5C 08 08 8D 0C 3A 0F AF 4D 0C 8B 45 10 DD 04 30 C1 E1 04 8B 55 08 DD 1C 0A 47 DD 44 30 08 03 B5 7C FF FF FF 3B 7D E0 DD 5C 0A 08 72 AF
+
+. 2055 804F90D 22
+. 8B 45 D0 D1 E8 C7 45 C8 01 00 00 00 40 39 45 C8 0F 83 FF 00 00 00
+***** EXPENSIVE 2057 508
+
+. 2056 804F923 32
+. DD 83 34 F6 FF FF 89 45 AC DD 5D 80 DD 83 44 F6 FF FF 8B 75 E0 8B 7D B8 85 FF 0F 85 E1 03 00 00
+
+. 2057 804FD24 6
+. 83 7D 18 FF 74 18
+***** EXPENSIVE 2059 509
+
+. 2058 804FD42 20
+. 8B 45 B8 C1 E0 04 03 45 28 DD 40 F0 DD 40 F8 E9 F2 FB FF FF
+
+. 2059 804F948 11
+. 31 FF 3B 7D E0 0F 83 C8 03 00 00
+***** EXPENSIVE 2061 510
+
+. 2060 804F953 145
+. 8B 45 1C 2B 45 C8 0F AF 45 E0 89 85 54 FF FF FF 90 8D 14 3E 8B 8D 54 FF FF FF 8D 04 39 0F AF 55 14 8B 4D 10 C1 E2 04 0F AF 45 14 D9 C1 DC 0C 11 C1 E0 04 D9 C1 DC 4C 01 08 D9 C1 D9 C4 DC 4C 11 08 D9 C9 D8 E2 D9 CB DE C2 8B 95 78 FF FF FF D9 C3 DC 0C 01 D9 CB 8D 04 3A 0F AF 45 0C 8B 4D 08 C1 E0 04 DC 04 01 D9 C1 D9 C9 DD 1C 01 D8 C3 01 C8 DC 40 08 8B 55 B0 DD 58 08 D9 C9 8D 04 3A 0F AF 45 0C C1 E0 04 DC 04 01 DD 1C 01 DE E1 01 C8 47
+***** EXPENSIVE 2062 511
+
+. 2061 804F9E4 15
+. DC 40 08 3B 7D E0 DD 58 08 0F 82 71 FF FF FF
+***** EXPENSIVE 2063 512
+
+. 2062 804F9F3 45
+. DD D8 DD D8 8B 4D B4 01 4D B8 8B 4D 1C 0F AF 4D DC 8B 45 B8 31 D2 F7 F1 FF 45 C8 8B 4D AC 03 75 E0 39 4D C8 89 55 B8 0F 82 18 FF FF FF
+
+. 2063 804F938 11
+. 8B 7D B8 85 FF 0F 85 E1 03 00 00
+***** EXPENSIVE 2065 513
+
+. 2064 804FA20 29
+. DD D8 8B 45 D0 D1 E8 8B 75 E0 FF 45 CC 40 01 B5 78 FF FF FF 39 45 CC 0F 82 4D FE FF FF
+
+. 2065 804F88A 33
+. 8B 55 1C 2B 55 CC 8B 45 CC 31 FF 0F AF 45 DC 0F AF 55 E0 3B 7D E0 89 45 B8 89 45 B4 89 55 B0 73 62
+
+. 2066 804FA3D 14
+. 8B 4D D8 85 C9 C7 45 E4 00 00 00 00 74 57
+
+. 2067 804FA4B 87
+. 8B 7D 0C 8B 45 14 C1 E7 04 C1 E0 04 31 F6 31 C9 89 7D 88 89 85 4C FF FF FF 8B 7D 08 8B 04 37 8B 54 37 04 8B 7D 10 89 04 0F 89 54 0F 04 8B 7D 08 8B 44 37 08 8B 54 37 0C 8B 7D 10 89 44 0F 08 FF 45 E4 8B 45 D8 89 54 0F 0C 03 75 88 03 8D 4C FF FF FF 39 45 E4 72 C2
+
+. 2068 804FAA2 19
+. C7 45 C8 01 00 00 00 8B 55 1C 39 55 C8 0F 83 98 00 00 00
+
+. 2069 804FAB5 27
+. 8B 4D E0 8B 75 D8 89 4D 90 89 75 8C C7 45 E4 00 00 00 00 8B 7D D8 39 7D E4 73 62
+
+. 2070 804FAD0 98
+. 8B 45 8C 8B 55 90 89 45 A8 89 95 4C FF FF FF 90 8B 4D E4 8B 75 E4 03 8D 4C FF FF FF 03 75 A8 0F AF 4D 0C C1 E1 04 8B 7D 08 0F AF 75 14 8B 04 0F 8B 54 0F 04 C1 E6 04 8B 7D 10 89 04 37 89 54 37 04 8B 7D 08 8B 44 0F 08 8B 54 0F 0C 8B 4D 10 89 44 31 08 89 54 31 0C FF 45 E4 8B 75 D8 39 75 E4 72 AE
+
+. 2071 804FB32 27
+. 8B 7D E0 8B 45 D8 FF 45 C8 8B 55 1C 01 7D 90 01 45 8C 39 55 C8 0F 82 74 FF FF FF
+
+. 2072 804FAC1 15
+. C7 45 E4 00 00 00 00 8B 7D D8 39 7D E4 73 62
+
+. 2073 804FB4D 28
+. C7 45 E8 01 00 00 00 8B 75 DC 8B 4D 20 39 75 E8 8B 7D D8 89 4D EC 0F 83 98 01 00 00
+
+. 2074 804FD01 13
+. 81 C4 AC 00 00 00 5B 5E 31 C0 5F C9 C3
+***** EXPENSIVE 2076 514
+
+. 2075 804FD2A 24
+. 8B 45 B8 C1 E0 04 03 45 28 DD 40 F8 DD 40 F0 D9 C9 D9 E0 E9 06 FC FF FF
+
+. 2076 80508FB 41
+. 83 EC 0C 8B 45 EC 8B 55 14 FF B4 82 08 01 00 00 56 FF 75 E8 51 FF 75 1C FF 75 C8 FF 75 CC FF 75 D0 FF 75 D4 E8 0C 21 00 00
+
+. 2077 8052A30 74
+. 55 89 E5 57 56 53 83 EC 7C 31 D2 8B 45 24 F7 75 1C 89 45 E4 31 D2 8B 45 24 F7 75 20 89 45 E0 31 D2 8B 45 20 F7 75 1C 8B 55 1C 4A 31 FF E8 00 00 00 00 5B 81 C3 B2 E6 00 00 89 55 D4 0F AF D0 3B 7D E4 89 45 DC 89 55 D8 73 2A
+
+. 2078 8052A7A 42
+. 31 C9 31 D2 8B 75 08 8B 04 CE 8B 75 10 89 04 D6 8B 75 08 8B 44 CE 04 47 8B 75 10 89 44 D6 04 03 4D 0C 03 55 14 3B 7D E4 72 DA
+
+. 2079 8052AA4 22
+. 8B 45 D4 D1 E8 C7 45 D0 01 00 00 00 40 39 45 D0 0F 83 AD 00 00 00
+
+. 2080 8052ABA 20
+. 89 45 C8 8B 45 E4 89 45 84 31 FF 3B 7D E4 0F 83 84 00 00 00
+***** EXPENSIVE 2082 515
+
+. 2081 8052ACE 132
+. 8B 4D 1C 2B 4D D0 8B 55 84 0F AF 4D E4 89 55 C4 89 4D C0 8D 76 00 8B 55 C0 01 FA 89 55 80 8B 75 C4 8B 55 0C 0F AF 55 80 8D 04 3E 8B 4D 08 8B 75 14 D9 04 D1 0F AF F0 0F AF 45 0C D8 04 C1 8B 4D 10 D9 1C F1 8B 4D 08 D9 44 D1 04 D8 44 C1 04 8B 4D 10 D9 5C F1 04 8B 75 08 D9 04 D6 8B 4D 80 D8 2C C6 0F AF 4D 14 8B 75 10 D9 1C CE 8B 75 08 D9 44 D6 04 47 D8 6C C6 04 3B 7D E4 8B 45 10 D9 5C C8 04 72 92
+
+. 2082 8052B52 21
+. 8B 55 E4 FF 45 D0 8B 4D C8 01 55 84 39 4D D0 0F 82 5C FF FF FF
+
+. 2083 8052AC3 11
+. 31 FF 3B 7D E4 0F 83 84 00 00 00
+
+. 2084 8052B67 7
+. 31 FF 3B 7D E4 73 2A
+
+. 2085 8052B6E 42
+. 31 C9 31 D2 8B 75 10 8B 04 CE 8B 75 08 89 04 D6 8B 75 10 8B 44 CE 04 47 8B 75 08 89 44 D6 04 03 4D 14 03 55 0C 3B 7D E4 72 DA
+
+. 2086 8052B98 18
+. 8B 45 D4 D1 E8 C7 45 CC 01 00 00 00 40 39 45 CC 73 63
+
+. 2087 8052BAA 19
+. 89 45 BC 8B 45 E4 89 85 78 FF FF FF 31 FF 3B 7D E4 73 3C
+***** EXPENSIVE 2089 516
+
+. 2088 8052BBD 60
+. 8B 8D 78 FF FF FF 31 D2 8D 76 00 8D 04 39 0F AF 45 14 8B 75 10 D9 04 C6 8B 75 08 D8 04 D6 D9 1C D6 8B 75 10 D9 44 C6 04 8B 45 08 D8 44 D0 04 47 D9 5C D0 04 03 55 0C 3B 7D E4 72 CF
+
+. 2089 8052BF9 20
+. 8B 55 E4 FF 45 CC 8B 4D BC 01 95 78 FF FF FF 39 4D CC 72 A9
+
+. 2090 8052BB6 7
+. 31 FF 3B 7D E4 73 3C
+
+. 2091 8052C0D 22
+. 8B 45 D4 D1 E8 C7 45 D0 01 00 00 00 40 39 45 D0 0F 83 A0 01 00 00
+
+. 2092 8052C23 45
+. 8B 75 E4 89 45 A4 89 75 88 89 45 B8 8B 55 1C 2B 55 D0 8B 45 D0 31 FF 0F AF 45 E0 0F AF 55 E4 3B 7D E4 89 45 B4 89 45 B0 89 55 AC 73 55
+
+. 2093 8052C50 85
+. 31 C9 89 F6 8B 75 88 8D 04 3E 8B 75 10 8B 14 CE 0F AF 45 0C 8B 75 08 89 14 C6 8B 75 10 8B 54 CE 04 8B 75 08 89 54 C6 04 8B 55 AC 8D 04 3A 8B 75 10 8B 14 CE 0F AF 45 0C 8B 75 08 89 14 C6 47 8B 75 10 8B 54 CE 04 8B 75 08 03 4D 14 3B 7D E4 89 54 C6 04 72 AF
+
+. 2094 8052CA5 19
+. C7 45 CC 01 00 00 00 8B 45 A4 39 45 CC 0F 83 F6 00 00 00
+***** EXPENSIVE 2096 517
+
+. 2095 8052CB8 43
+. 8B 45 D4 D1 E8 40 D9 83 50 F6 FF FF 89 45 A8 D9 83 4C F6 FF FF D9 C9 D9 5D 8C 8B 75 E4 8D 76 00 8B 7D B4 85 FF 0F 85 32 03 00 00
+
+. 2096 8053015 6
+. 83 7D 18 FF 74 18
+***** EXPENSIVE 2098 518
+
+. 2097 8053033 20
+. 8B 55 B4 8B 4D 28 8D 04 D1 D9 40 F8 D9 40 FC E9 A1 FC FF FF
+
+. 2098 8052CE8 11
+. 31 FF 3B 7D E4 0F 83 19 03 00 00
+***** EXPENSIVE 2100 519
+
+. 2099 8052CF3 140
+. 8B 45 1C 2B 45 CC 0F AF 45 E4 89 45 80 8D 14 3E 8B 4D 80 0F AF 55 14 8D 04 39 D9 C1 8B 4D 10 D8 0C D1 0F AF 45 14 D9 C1 D8 4C C1 04 D9 C1 D9 C4 D8 4C D1 04 D9 C9 D8 E2 D9 CB DE C2 8B 55 88 D9 C3 D8 0C C1 D9 C1 8D 04 3A 8B 4D 08 0F AF 45 0C D8 C1 D9 CC D8 04 C1 D9 CC D8 44 C1 04 D9 CC 8B 55 AC D9 1C C1 D9 CB D9 5C C1 04 8D 04 3A 0F AF 45 0C DE E2 47 D8 04 C1 D9 C9 D8 44 C1 04 D9 C9 3B 7D E4 D9 1C C1 D9 5C C1 04 72 81
+***** EXPENSIVE 2101 520
+
+. 2100 8052D7F 45
+. DD D8 DD D8 8B 4D B0 01 4D B4 8B 4D 1C 0F AF 4D E0 8B 45 B4 31 D2 F7 F1 FF 45 CC 8B 4D A8 03 75 E4 39 4D CC 89 55 B4 0F 82 2C FF FF FF
+
+. 2101 8052CD8 11
+. 8B 7D B4 85 FF 0F 85 32 03 00 00
+***** EXPENSIVE 2103 521
+
+. 2102 8052DAC 23
+. DD D8 8B 75 E4 FF 45 D0 8B 45 B8 01 75 88 39 45 D0 0F 82 6C FE FF FF
+
+. 2103 8052C2F 33
+. 8B 55 1C 2B 55 D0 8B 45 D0 31 FF 0F AF 45 E0 0F AF 55 E4 3B 7D E4 89 45 B4 89 45 B0 89 55 AC 73 55
+
+. 2104 8052DC3 14
+. 8B 4D DC 85 C9 C7 45 E8 00 00 00 00 74 2F
+
+. 2105 8052DD1 47
+. 31 C9 31 D2 8B 75 08 8B 04 CE 8B 75 10 89 04 D6 8B 75 08 8B 44 CE 04 8B 75 10 89 44 D6 04 FF 45 E8 8B 45 DC 03 4D 0C 03 55 14 39 45 E8 72 D5
+
+. 2106 8052E00 15
+. C7 45 CC 01 00 00 00 8B 55 1C 39 55 CC 73 78
+
+. 2107 8052E0F 27
+. 8B 4D E4 8B 75 DC 89 4D 94 89 75 90 C7 45 E8 00 00 00 00 8B 45 DC 39 45 E8 73 46
+
+. 2108 8052E2A 70
+. 8B 55 94 8B 7D 90 89 95 78 FF FF FF 89 F6 8B 45 E8 8B 4D E8 03 85 78 FF FF FF 0F AF 45 0C 01 F9 8B 75 08 8B 14 C6 0F AF 4D 14 8B 75 10 89 14 CE 8B 55 08 8B 44 C2 04 89 44 CE 04 FF 45 E8 8B 4D DC 39 4D E8 72 C8
+
+. 2109 8052E70 23
+. 8B 75 E4 8B 45 DC FF 45 CC 8B 55 1C 01 75 94 01 45 90 39 55 CC 72 94
+
+. 2110 8052E1B 15
+. C7 45 E8 00 00 00 00 8B 45 DC 39 45 E8 73 46
+
+. 2111 8052E87 28
+. C7 45 EC 01 00 00 00 8B 75 E0 8B 4D 20 39 75 EC 8B 7D DC 89 4D F0 0F 83 57 01 00 00
+
+. 2112 8052FFA 10
+. 83 C4 7C 5B 5E 31 C0 5F C9 C3
+***** EXPENSIVE 2114 522
+
+. 2113 805301B 24
+. 8B 55 B4 8B 4D 28 8D 04 D1 D9 40 FC D9 40 F8 D9 C9 D9 E0 E9 B5 FC FF FF
+
+. 2114 804E500 11
+. 8B 4D E4 85 C9 0F 85 AB 04 00 00
+
+. 2115 804E9B6 10
+. 83 7D 18 FF 0F 84 80 00 00 00
+***** EXPENSIVE 2117 523
+
+. 2116 804EA40 103
+. 8B 85 AC FE FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 28 DD 5D C0 DD 5D B8 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 2C DD 5D B0 DD 5D A8 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 30 DD 5D A0 DD 5D 98 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 34 DD 5D 90 DD 44 02 F0 D9 C9 DD 5D 88 DD 5D 80 DD 44 02 F8 E9 9D FA FF FF
+***** EXPENSIVE 2118 524
+
+. 2117 804E544 25
+. DD 9D 78 FF FF FF C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 30 04 00 00
+***** EXPENSIVE 2119 525
+
+. 2118 804E9C0 128
+. 8B 85 AC FE FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 8B 55 28 DD 5D B8 DD 5D C0 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 2C DD 5D A8 DD 5D B0 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 30 DD 5D 98 DD 5D A0 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 34 DD 5D 88 DD 5D 90 DD 44 02 F8 DD 44 02 F0 80 75 BF 80 80 75 AF 80 80 75 9F 80 80 75 8F 80 DD 5D 80 DD 9D 78 FF FF FF 80 B5 7F FF FF FF 80 E9 0A FB FF FF
+
+. 2119 804E54A 19
+. C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 30 04 00 00
+
+. 2120 80518CC 11
+. 8B 4D E0 85 C9 0F 85 C6 04 00 00
+
+. 2121 8051D9D 6
+. 83 7D 18 FF 74 7A
+***** EXPENSIVE 2123 526
+
+. 2122 8051E1D 103
+. 8B 85 F4 FE FF FF 8B 55 24 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 28 D9 5D C4 D9 5D C0 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 2C D9 5D BC D9 5D B8 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 30 D9 5D B4 D9 5D B0 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 34 D9 5D AC D9 44 02 F8 D9 C9 D9 5D A8 D9 5D A4 D9 44 02 FC E9 8C FA FF FF
+***** EXPENSIVE 2124 527
+
+. 2123 8051910 22
+. D9 5D A0 C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 4E 04 00 00
+***** EXPENSIVE 2125 528
+
+. 2124 8051DA3 122
+. 8B 85 F4 FE FF FF 8B 55 24 D9 44 02 F8 D9 44 02 FC 8B 55 28 D9 5D C0 D9 5D C4 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 2C D9 5D B8 D9 5D BC D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 30 D9 5D B0 D9 5D B4 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 34 D9 5D A8 D9 5D AC D9 44 02 FC D9 44 02 F8 80 75 C3 80 80 75 BB 80 80 75 B3 80 80 75 AB 80 D9 5D A4 D9 5D A0 80 75 A3 80 E9 F6 FA FF FF
+
+. 2125 8051913 19
+. C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 4E 04 00 00
+
+. 2126 8059202 19
+. C7 45 E8 00 00 00 00 8B 55 E0 39 55 E8 0F 83 8C 00 00 00
+***** EXPENSIVE 2128 529
+
+. 2127 8059215 138
+. 8B 75 DC DD 45 D0 4E D9 E0 89 F7 8B 4D E4 8D 04 39 01 C1 8B 55 0C 0F AF D7 0F AF 45 0C 89 8D 7C FF FF FF 8B 4D 08 DD 04 D1 DD 04 C1 8B 8D 7C FF FF FF 0F AF 4D 0C 8B 45 08 DD 04 C8 D9 C1 D8 E1 DD 83 BC F8 FF FF 8B 4D DC D8 C9 8B 45 14 0F AF C6 8D 14 4E D8 C4 8B 4D 10 DD 1C C1 D9 CA DE C1 8D 04 F5 08 00 00 00 0F AF 45 14 D8 CB DD 1C 08 FF 45 E8 8B 45 E0 0F AF 55 14 DE E9 03 75 18 03 7D DC 39 45 E8 DD 1C D1 72 81
+***** EXPENSIVE 2129 530
+
+. 2128 805929F 10
+. DD D8 83 C4 7C 5B 5E 5F C9 C3
+***** EXPENSIVE 2130 531
+
+. 2129 80543A4 102
+. C7 85 54 FF FF FF 10 00 00 00 C7 85 50 FF FF FF FE FF FF FF 8B 95 54 FF FF FF 8B 4D 20 8B 45 24 DD 44 11 F0 DD 44 11 F8 D9 C9 DD 5D D0 DD 5D C8 DD 44 10 F0 DD 44 10 F8 D9 C9 8B 45 28 DD 5D C0 DD 5D B8 DD 44 10 F0 DD 44 10 F8 D9 C9 C7 45 E8 00 00 00 00 8B 45 DC 39 45 E8 DD 5D B0 DD 5D A8 0F 83 57 02 00 00
+***** EXPENSIVE 2131 532
+
+. 2130 805440A 192
+. 8B 55 E0 D1 E2 8B 4D EC 8B B5 50 FF FF FF 8B 45 E0 8D 4C 09 FF 8D 74 32 FF C1 E0 02 89 55 A4 89 8D 60 FF FF FF 89 B5 5C FF FF FF 89 CF 89 85 58 FF FF FF 8D 76 00 8B 45 0C 8B 75 08 0F AF C7 DD 04 C6 8D 04 FD 08 00 00 00 0F AF 45 0C 8B 4D A4 DD 04 30 8D 14 39 8B 45 0C 0F AF C2 8B 8D 5C FF FF FF DD 04 C6 03 4D A4 8B 45 0C 0F AF C1 8D 14 D5 08 00 00 00 DD 04 C6 0F AF 55 0C 8B 45 0C DD 04 32 0F AF 85 5C FF FF FF 8B 95 5C FF FF FF DD 04 C6 D9 CB DD 5D 98 D9 C9 DD 55 90 8D 04 D5 08 00 00 00 D8 C4 0F AF 45 0C DD 9D 40 FF FF FF 8D 0C CD 08 00 00 00 DD 45 98 0F AF 4D 0C DD 04 30
+***** EXPENSIVE 2132 533
+
+. 2131 80544CA 152
+. D9 C9 D8 C3 DD 04 31 D9 CA D9 E0 D9 C9 DD 5D 88 D9 CB DC 6D 98 D9 C2 D8 C4 D9 CA D9 E0 D9 C9 DD 9D 78 FF FF FF D9 C4 D8 C1 D9 CD DE E1 D9 C9 DD 55 80 DD 85 78 FF FF FF D9 CB DE E4 D9 CA D8 C1 DD 45 88 D9 CE DC 65 90 D9 CE DC 85 40 FF FF FF D9 C9 DD 9D 70 FF FF FF 8B 95 60 FF FF FF 03 55 E4 8B 45 E4 8D 0C 10 01 C8 8B 75 14 0F AF B5 60 FF FF FF 89 85 34 FF FF FF 8B 45 10 DD 1C F0 D9 C9 8B B5 60 FF FF FF 8D 34 F5 08 00 00 00 0F AF 75 14 D8 C3 D9 C4 D9 C9
+***** EXPENSIVE 2133 534
+
+. 2132 8054562 149
+. DD 1C 06 D8 E2 DD 45 D0 DD 45 C8 DC 8D 70 FF FF FF D9 C9 8B 45 14 D8 CA 8B 75 10 0F AF C2 DE E1 DD 1C C6 DD 45 D0 DC 8D 70 FF FF FF DD 85 40 FF FF FF D9 CA DC 4D C8 D9 CA DC 65 88 D9 CD DC 65 80 D9 CA DE C1 8B 45 14 DD 45 C0 DD 45 B8 0F AF C1 D8 CB D9 C9 D8 CE D9 CB DC 4D C0 D9 CE DC 4D B8 D9 CB 8D 14 D5 08 00 00 00 8D 0C CD 08 00 00 00 0F AF 55 14 DE E1 D9 CA 0F AF 4D 14 DE C5 DD 1C 32 D9 C9 DC A5 78 FF FF FF D9 C9 DD 1C C6 D9 CB DE C1 D9 C9
+***** EXPENSIVE 2134 535
+
+. 2133 80545F7 106
+. DD 1C 31 DD 45 B0 DD 45 A8 D9 C9 D8 CA D9 C9 D8 CB 8B 45 14 0F AF 85 34 FF FF FF DE E9 DD 1C C6 D9 C9 8B 85 34 FF FF FF 8D 34 C5 08 00 00 00 DC 4D B0 D9 C9 DC 4D A8 0F AF 75 14 DE C1 8B 55 10 8B 4D E0 DD 1C 16 FF 45 E8 8B B5 58 FF FF FF 8B 45 DC 01 8D 60 FF FF FF 01 B5 5C FF FF FF 39 45 E8 8D 3C 8F 0F 82 DF FD FF FF
+
+. 2134 8054661 34
+. 8B 85 6C FF FF FF FF 45 EC D1 E8 83 85 54 FF FF FF 10 83 AD 50 FF FF FF 02 39 45 EC 0F 82 35 FD FF FF
+
+. 2135 805B82F 19
+. C7 45 E8 00 00 00 00 8B 55 E0 39 55 E8 0F 83 89 00 00 00
+***** EXPENSIVE 2137 536
+
+. 2136 805B842 135
+. 8B 75 DC D9 45 D8 4E D9 E0 89 F7 8D 76 00 8B 4D E4 8D 04 39 01 C1 8B 55 0C 0F AF D7 0F AF 45 0C 89 4D A0 8B 4D 08 D9 04 91 D9 04 81 8B 4D A0 0F AF 4D 0C 8B 45 08 D9 04 88 D9 C1 D8 E1 DD 83 BC F8 FF FF 8B 4D DC D8 C9 8B 45 14 0F AF C6 8D 14 4E D8 C4 8B 4D 10 D9 1C 81 D9 CA DE C1 8D 04 B5 04 00 00 00 0F AF 45 14 D8 CB D9 1C 08 FF 45 E8 8B 45 E0 0F AF 55 14 DE E9 03 75 18 03 7D DC 39 45 E8 D9 1C 91 72 87
+***** EXPENSIVE 2138 537
+
+. 2137 805B8C9 10
+. DD D8 83 C4 54 5B 5E 5F C9 C3
+***** EXPENSIVE 2139 538
+
+. 2138 8056B92 104
+. C7 85 44 FF FF FF 02 00 00 00 C7 85 40 FF FF FF FE FF FF FF 89 F6 8B 95 44 FF FF FF 8B 4D 20 8B 45 24 D9 44 91 F8 D9 44 91 FC D9 C9 D9 5D D0 D9 5D CC D9 44 90 F8 D9 44 90 FC D9 C9 8B 45 28 D9 5D C8 D9 5D C4 D9 44 90 F8 D9 44 90 FC D9 C9 C7 45 E8 00 00 00 00 8B 45 DC 39 45 E8 D9 5D C0 D9 5D BC 0F 83 74 02 00 00
+***** EXPENSIVE 2140 539
+
+. 2139 8056BFA 185
+. 8B 55 E0 D1 E2 8B 4D EC 8B B5 40 FF FF FF 8B 45 E0 8D 4C 09 FF 8D 74 32 FF C1 E0 02 89 55 B8 89 8D 50 FF FF FF 89 B5 4C FF FF FF 89 CF 89 85 48 FF FF FF 8D 76 00 8B 45 0C 8B 75 08 0F AF C7 D9 04 86 8D 04 BD 04 00 00 00 0F AF 45 0C 8B 4D B8 D9 04 30 8D 14 39 8B 45 0C 0F AF C2 8B 8D 4C FF FF FF D9 04 86 D9 CA 03 4D B8 8B 45 0C 0F AF C1 8D 0C 8D 04 00 00 00 0F AF 4D 0C D9 5D B4 D9 5D B0 D9 5D AC D9 04 86 D9 04 31 8D 14 95 04 00 00 00 0F AF 55 0C D9 5D A0 D9 5D A4 8B 45 0C D9 45 B4 D9 04 32 D9 C9 0F AF 85 4C FF FF FF 8B 95 4C FF FF FF D8 45 A4 D9 04 86
+***** EXPENSIVE 2141 540
+
+. 2140 8056CB3 158
+. D9 C9 80 75 A3 80 8D 04 95 04 00 00 00 D9 5D 9C D9 45 B0 0F AF 45 0C D8 45 A0 D9 CA D9 5D A8 D9 04 30 D9 CA D9 5D 98 D9 C9 8B 95 50 FF FF FF 03 55 E4 8B 45 E4 D9 E0 D9 45 A8 8D 0C 10 D8 C1 D9 45 AC D9 CA D8 6D A8 D9 5D 94 D9 C9 01 C8 D8 C2 D9 45 AC D9 45 9C D9 C9 8B 75 14 0F AF B5 50 FF FF FF DE E4 D8 C1 D9 45 98 D9 C9 89 85 34 FF FF FF 8B 45 10 D9 1C B0 D8 C2 8B B5 50 FF FF FF D9 5D 90 D9 45 B4 D9 45 B0 8D 34 B5 04 00 00 00 D8 65 A0 D9 C9 0F AF 75 14 D8 65 A4 D9 45 90
+***** EXPENSIVE 2142 541
+
+. 2141 8056D51 131
+. D9 45 94 D8 EA D9 C9 D9 1C 06 D9 C2 D9 45 D0 D9 CA D9 55 8C D9 C9 D8 C7 D9 CA DE C9 D9 45 CC D8 CA D9 CE 8B 45 14 8B 75 10 D8 6D 98 D9 CB D8 45 94 D9 CD 0F AF C2 D8 6D 9C D9 C9 DE E6 D9 5D 88 D9 C9 D9 55 84 D9 CB D9 55 80 D9 CC D9 1C 86 D9 45 CC D8 4D 8C D9 C9 D8 4D D0 DE C1 D9 45 C4 D9 45 C8 8B 45 14 D8 4D 88 D9 C9 D8 CC 8D 14 95 04 00 00 00 0F AF 55 14 0F AF C1 DE E9 D9 C9 D9 14 32 D9 C9
+***** EXPENSIVE 2143 542
+
+. 2142 8056DD4 154
+. D9 1C 86 D9 45 C4 D9 45 C8 D9 C9 D8 4D 88 D9 C9 DE CC D9 CA DE E5 D9 C9 DE C2 D9 45 BC D9 45 C0 D9 C9 8B 45 14 D8 CD D9 C9 8D 0C 8D 04 00 00 00 D8 CC 0F AF 4D 14 0F AF 85 34 FF FF FF DE E1 D9 CA D9 1C 31 D9 C9 D9 1C 86 8B 85 34 FF FF FF D9 45 BC D9 CB 8D 34 85 04 00 00 00 D8 4D C0 D9 CB DE CA D9 CA 0F AF 75 14 8B 55 E0 8B 45 10 8B 8D 48 FF FF FF DE C1 D9 1C 06 01 95 50 FF FF FF 01 8D 4C FF FF FF D9 5D 8C FF 45 E8 8B 75 DC 39 75 E8 8D 3C 97 0F 82 C2 FD FF FF
+
+. 2143 8056E6E 34
+. 8B 85 5C FF FF FF FF 45 EC D1 E8 83 85 44 FF FF FF 02 83 AD 40 FF FF FF 02 39 45 EC 0F 82 18 FD FF FF
+
+. 2144 804EBF0 11
+. 8B 45 E4 85 C0 0F 85 F5 08 00 00
+
+. 2145 804F4F0 10
+. 83 7D 18 FF 0F 84 B5 00 00 00
+***** EXPENSIVE 2147 543
+
+. 2146 804F5AF 149
+. 8B 85 A4 FD FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 28 DD 5D 98 DD 5D 90 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 2C DD 5D 88 DD 5D 80 DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 30 DD 9D 78 FF FF FF DD 9D 70 FF FF FF DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 34 DD 9D 68 FF FF FF DD 9D 60 FF FF FF DD 44 02 F0 DD 44 02 F8 D9 C9 8B 55 38 DD 9D 58 FF FF FF DD 9D 50 FF FF FF DD 44 02 F0 DD 44 02 F8 D9 C9 DD 9D 48 FF FF FF DD 9D 40 FF FF FF E9 0B F6 FF FF
+
+. 2147 804EC4F 19
+. C7 45 E0 00 00 00 00 8B 45 D4 39 45 E0 0F 83 68 08 00 00
+***** EXPENSIVE 2149 544
+
+. 2148 804F4FA 181
+. 8B 85 A4 FD FF FF 8B 55 24 DD 44 02 F0 DD 44 02 F8 8B 55 28 DD 5D 90 DD 5D 98 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 2C DD 5D 80 DD 5D 88 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 30 DD 9D 70 FF FF FF DD 9D 78 FF FF FF DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 34 DD 9D 60 FF FF FF DD 9D 68 FF FF FF DD 44 02 F8 DD 44 02 F0 DD 9D 58 FF FF FF 80 75 97 80 80 75 87 80 80 B5 77 FF FF FF 80 80 B5 67 FF FF FF 80 DD 9D 50 FF FF FF 8B 55 38 DD 44 02 F8 DD 44 02 F0 D9 C9 DD 9D 40 FF FF FF 80 B5 57 FF FF FF 80 DD 9D 48 FF FF FF 80 B5 47 FF FF FF 80 E9 A0 F6 FF FF
+
+. 2149 8051FC0 11
+. 8B 45 E0 85 C0 0F 85 4D 09 00 00
+
+. 2150 8052918 10
+. 83 7D 18 FF 0F 84 91 00 00 00
+***** EXPENSIVE 2152 545
+
+. 2151 80529B3 122
+. 8B 85 3C FE FF FF 8B 55 24 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 28 D9 5D B0 D9 5D AC D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 2C D9 5D A8 D9 5D A4 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 30 D9 5D A0 D9 5D 9C D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 34 D9 5D 98 D9 5D 94 D9 44 02 F8 D9 44 02 FC D9 C9 8B 55 38 D9 5D 90 D9 44 02 F8 D9 C9 D9 5D 8C D9 5D 88 D9 44 02 FC E9 E1 F5 FF FF
+***** EXPENSIVE 2153 546
+
+. 2152 805200E 22
+. D9 5D 84 C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 CE 08 00 00
+***** EXPENSIVE 2154 547
+
+. 2153 8052922 145
+. 8B 85 3C FE FF FF 8B 55 24 D9 44 02 F8 D9 44 02 FC 8B 55 28 D9 5D AC D9 5D B0 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 2C D9 5D A4 D9 5D A8 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 30 D9 5D 9C D9 5D A0 D9 44 02 FC D9 44 02 F8 D9 C9 8B 55 34 D9 5D 94 D9 5D 98 D9 44 02 FC D9 44 02 F8 D9 5D 90 80 75 AF 80 80 75 A7 80 80 75 9F 80 80 75 97 80 D9 5D 8C 8B 55 38 D9 44 02 FC D9 44 02 F8 D9 C9 D9 5D 84 80 75 8F 80 D9 5D 88 80 75 87 80 E9 5E F6 FF FF
+
+. 2154 8052011 19
+. C7 45 DC 00 00 00 00 8B 45 D0 39 45 DC 0F 83 CE 08 00 00
+***** EXPENSIVE 2156 548
+vex iropt: not unrolling (81 sts)
+
+. 2155 8058CBC 80
+. 8B 45 EC 01 F0 8B 55 0C 0F AF D6 89 45 D0 8B 45 08 DD 04 D0 8B 45 D0 0F AF 45 0C 8B 55 08 DD 04 C2 D9 C1 8B 45 14 0F AF C7 8B 55 10 D8 C1 DD 1C C2 8B 45 14 0F AF C1 DE E9 DD 1C C2 FF 45 F0 8B 45 E8 03 4D 18 03 7D 18 03 75 E4 39 45 F0 72 B0
+
+. 2156 805A313 30
+. 8B 75 D4 46 89 F0 C7 45 E4 01 00 00 00 D1 E8 39 45 E4 89 B5 4C FF FF FF 0F 83 50 02 00 00
+
+. 2157 805A581 13
+. F7 45 D4 01 00 00 00 0F 85 8A 02 00 00
+***** EXPENSIVE 2159 549
+
+. 2158 805A58E 33
+. 31 D2 52 8B 45 18 50 DF 2C 24 DC BB 3C F6 FF FF 83 EC 08 DD 14 24 DD 9D E8 FE FF FF E8 71 E0 FE FF
+***** EXPENSIVE 2160 550
+
+. 2159 805A5AF 20
+. DD 85 E8 FE FF FF DD 1C 24 DD 9D 78 FF FF FF E8 6D E1 FE FF
+***** EXPENSIVE 2161 551
+
+. 2160 805A5C3 35
+. DD 9D 70 FF FF FF C7 45 E0 00 00 00 00 8B 55 D8 80 B5 77 FF FF FF 80 83 C4 10 39 55 E0 0F 83 32 02 00 00
+***** EXPENSIVE 2162 552
+
+. 2161 805A5E6 103
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C9 DD 9D 40 FF FF FF DD 9D 38 FF FF FF C7 85 34 FF FF FF 00 00 00 00 C7 85 30 FF FF FF 00 00 00 00 31 FF C7 85 2C FF FF FF 00 00 00 00 C7 45 D0 00 00 00 00 8B 45 18 DD 85 40 FF FF FF DD 85 38 FF FF FF D9 C9 39 45 D0 DD 9D 68 FF FF FF DD 9D 60 FF FF FF 0F 83 A2 01 00 00
+***** EXPENSIVE 2163 553
+
+. 2162 805A64D 55
+. 8B 55 E4 8B 8D 2C FF FF FF 8D 14 51 89 95 E0 FE FF FF 8B 45 D0 DD 85 38 FF FF FF DD 85 40 FF FF FF D9 C9 85 C0 DD 95 50 FF FF FF DD 95 58 FF FF FF D9 C0 D9 C2 74 42
+
+. 2163 805A6C6 11
+. 31 C9 3B 4D 18 0F 83 C0 01 00 00
+
+. 2164 805A6D1 11
+. 8B 95 E0 FE FF FF 4A 85 C9 74 5C
+***** EXPENSIVE 2166 554
+
+. 2165 805A738 89
+. 8B 45 0C 0F AF C2 8B 75 08 DD 04 C6 D9 C1 D9 C4 D9 C9 D8 CA D9 C9 DE CA D9 C3 D9 C6 D8 CA D9 C9 D8 CB D9 CA D8 CD D9 CB D8 CF D9 C9 DE E2 DE C2 41 DC 85 58 FF FF FF D9 C9 DC 85 50 FF FF FF D9 C9 03 55 DC 3B 4D 18 DD 9D 58 FF FF FF DD 9D 50 FF FF FF 0F 82 47 FF FF FF
+
+. 2166 805A6D8 4
+. 85 C9 74 5C
+***** EXPENSIVE 2168 555
+
+. 2167 805A6DC 136
+. DD 85 78 FF FF FF D8 C9 DD 85 70 FF FF FF DD 85 70 FF FF FF D9 C9 D8 CD D9 C9 DE CB D9 CC DC 8D 78 FF FF FF DD 85 68 FF FF FF D9 CA DE E5 DE C2 D8 CC DD 85 60 FF FF FF DD 85 60 FF FF FF D9 C9 D8 CC D9 C9 DE CE D9 CB DC 8D 68 FF FF FF D9 C9 DE E3 DE C4 D9 CA D9 C9 D9 CB D9 C9 8B 45 0C 0F AF C2 8B 75 08 DD 04 C6 D9 C1 D9 C4 D9 C9 D8 CA D9 C9 DE CA D9 C3 D9 C6 D8 CA D9 C9 D8 CB D9 CA D8 CD D9 CB D8 CF D9 C9
+***** EXPENSIVE 2169 556
+
+. 2168 805A764 45
+. DE E2 DE C2 41 DC 85 58 FF FF FF D9 C9 DC 85 50 FF FF FF D9 C9 03 55 DC 3B 4D 18 DD 9D 58 FF FF FF DD 9D 50 FF FF FF 0F 82 47 FF FF FF
+***** EXPENSIVE 2170 557
+
+. 2169 805A791 22
+. DD D8 DD D8 DD D8 DD D8 8B 4D D0 8B 45 18 41 2B 45 D0 39 C1 73 79
+***** EXPENSIVE 2171 558
+
+. 2170 805A7A7 72
+. 8B 45 D0 0F AF 45 D4 8B 55 E4 89 45 D0 8D 04 47 8D 44 50 FF 8B 55 14 0F AF D0 8D 04 C5 08 00 00 00 DD 85 58 FF FF FF 8B 75 10 0F AF 45 14 DD 1C D6 DD 85 50 FF FF FF DD 1C 30 8B 45 18 39 C1 89 4D D0 0F 82 70 FE FF FF
+***** EXPENSIVE 2172 559
+
+. 2171 805A65F 37
+. 8B 45 D0 DD 85 38 FF FF FF DD 85 40 FF FF FF D9 C9 85 C0 DD 95 50 FF FF FF DD 95 58 FF FF FF D9 C0 D9 C2 74 42
+***** EXPENSIVE 2173 560
+
+. 2172 805A684 77
+. DD 85 68 FF FF FF DD 85 68 FF FF FF DD 85 60 FF FF FF DD 85 60 FF FF FF D9 CB D9 E0 D9 CA DC 4D C8 D9 C9 DC 4D C0 D9 CA DC 4D C0 D9 CB DC 4D C8 D9 C9 DE C2 DE C2 DD 9D 68 FF FF FF DD 9D 60 FF FF FF 31 C9 3B 4D 18 0F 83 C0 01 00 00
+
+. 2173 805A820 4
+. 39 C1 74 3E
+***** EXPENSIVE 2175 561
+
+. 2174 805A862 47
+. 8B 45 D0 0F AF 45 D4 8B 95 30 FF FF FF 89 45 D0 8B 75 E4 8D 04 42 8D 44 70 FF 0F AF 45 14 DD 85 58 FF FF FF 8B 55 10 DD 1C C2 E9 50 FF FF FF
+
+. 2175 805A7E1 14
+. 8B 45 18 39 C1 89 4D D0 0F 82 70 FE FF FF
+***** EXPENSIVE 2177 562
+
+. 2176 805A824 62
+. 8B 95 34 FF FF FF 0F AF 45 D4 8D 04 42 8B 55 E4 D1 E2 29 D0 48 8B 55 14 0F AF D0 DD 85 58 FF FF FF 8B 75 10 8D 04 C5 08 00 00 00 DD 1C D6 0F AF 45 14 80 B5 57 FF FF FF 80 E9 76 FF FF FF
+***** EXPENSIVE 2178 563
+
+. 2177 805A7D8 23
+. DD 85 50 FF FF FF DD 1C 30 8B 45 18 39 C1 89 4D D0 0F 82 70 FE FF FF
+
+. 2178 805A7EF 41
+. 8B 55 1C 8B 4D D4 FF 45 E0 8B 75 D8 01 95 34 FF FF FF 01 95 30 FF FF FF 01 D7 01 8D 2C FF FF FF 39 75 E0 0F 82 08 FE FF FF
+***** EXPENSIVE 2180 564
+
+. 2179 8053758 53
+. 8B 75 AC 01 75 B4 8B 45 B4 31 D2 F7 75 08 89 55 B4 31 D2 52 8B 45 B4 50 DF 2C 24 DC 4D C8 83 EC 08 8B 45 C4 8B B0 08 02 00 00 DD 55 88 DD 1C 24 E8 93 4E FF FF
+
+. 2180 8053C9C 27
+. 8B 55 E0 42 89 D0 C7 45 F0 01 00 00 00 D1 E8 39 45 F0 89 55 D4 0F 83 32 01 00 00
+***** EXPENSIVE 2182 565
+
+. 2181 8053CB7 49
+. 8B 4D 20 8B 7D E0 83 C1 10 8D 7C 3F FE 89 4D C0 89 7D BC 89 F6 C7 45 EC 00 00 00 00 8B 55 D8 8B 45 C0 39 55 EC DD 40 F0 DD 40 F8 0F 83 8D 01 00 00
+***** EXPENSIVE 2183 566
+
+. 2182 8053CE8 152
+. 8B 45 E0 D1 E0 8B 7D BC 8B 4D F0 4F 8D 74 09 FF C7 45 C4 00 00 00 00 89 45 CC 89 45 C8 8D 76 00 8B 45 0C 8B 55 08 0F AF C6 DD 04 C2 8D 04 F5 08 00 00 00 0F AF 45 0C DD 04 10 8B 45 0C 0F AF C7 DD 04 C2 8D 04 FD 08 00 00 00 0F AF 45 0C DD 04 10 8B 4D F0 8B 55 C4 8D 44 4A FF 8B 4D E8 8B 55 14 01 C1 0F AF D0 D9 C3 D9 C3 D9 CD 8D 04 C5 08 00 00 00 D8 E3 D9 CC D8 C2 D9 C9 89 4D B0 0F AF 45 14 8B 4D 10 DE C3 D9 CC DE E1 D9 C4 D9 C6 D9 CB DD 1C D1 D9 CA D8 CB
+***** EXPENSIVE 2184 567
+
+. 2183 8053D80 76
+. D9 C9 DD 1C 08 D9 C9 D8 CB 8B 45 14 0F AF 45 B0 DE E9 DD 1C C1 8B 45 B0 8D 0C C5 08 00 00 00 D8 CA D9 C9 D8 CB 0F AF 4D 14 DE C1 8B 55 10 DD 1C 11 FF 45 EC 8B 4D E0 8B 45 D8 03 7D CC 03 75 C8 01 4D C4 39 45 EC 0F 82 3C FF FF FF
+***** EXPENSIVE 2185 568
+
+. 2184 8053DCC 29
+. DD D8 DD D8 8B 45 D4 FF 45 F0 D1 E8 83 45 C0 10 83 6D BC 02 39 45 F0 0F 82 E3 FE FF FF
+***** EXPENSIVE 2186 569
+
+. 2185 8053CCC 28
+. C7 45 EC 00 00 00 00 8B 55 D8 8B 45 C0 39 55 EC DD 40 F0 DD 40 F8 0F 83 8D 01 00 00
+
+. 2186 8053DE9 9
+. F7 45 E0 01 00 00 00 75 7B
+***** EXPENSIVE 2188 570
+
+. 2187 80550E5 31
+. C7 45 D0 00 00 00 00 8B 55 18 39 55 D0 DD 85 60 FF FF FF DD 85 58 FF FF FF 0F 83 40 07 00 00
+***** EXPENSIVE 2189 571
+vex iropt: not unrolling (85 sts)
+
+. 2188 805B308 80
+. 8B 45 EC 01 F0 8B 55 0C 0F AF D6 89 45 D4 8B 45 08 D9 04 90 8B 45 D4 0F AF 45 0C 8B 55 08 D9 04 82 D9 C1 8B 45 14 0F AF C7 8B 55 10 D8 C1 D9 1C 82 8B 45 14 0F AF C1 DE E9 D9 1C 82 FF 45 F0 8B 45 E8 03 4D 18 03 7D 18 03 75 E4 39 45 F0 72 B0
+
+. 2189 805CB4B 27
+. 8B 75 D0 46 89 F0 C7 45 E0 01 00 00 00 D1 E8 39 45 E0 89 75 84 0F 83 3D 02 00 00
+
+. 2190 805CDA3 13
+. F7 45 D0 01 00 00 00 0F 85 33 02 00 00
+***** EXPENSIVE 2192 572
+
+. 2191 805CDB0 33
+. 31 D2 52 8B 45 18 50 DF 2C 24 DC BB 3C F6 FF FF 83 EC 08 DD 14 24 DD 9D 28 FF FF FF E8 4F B8 FE FF
+***** EXPENSIVE 2193 573
+
+. 2192 805CDD1 17
+. DD 85 28 FF FF FF DD 1C 24 D9 5D 9C E8 4E B9 FE FF
+***** EXPENSIVE 2194 574
+
+. 2193 805CDE2 27
+. C7 45 DC 00 00 00 00 8B 55 D4 D9 E0 83 C4 10 39 55 DC D9 5D 98 0F 83 E6 01 00 00
+***** EXPENSIVE 2195 575
+
+. 2194 805CDFD 99
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 5D 80 D9 9D 7C FF FF FF C7 85 78 FF FF FF 00 00 00 00 C7 85 74 FF FF FF 00 00 00 00 C7 85 70 FF FF FF 00 00 00 00 C7 85 6C FF FF FF 00 00 00 00 C7 45 CC 00 00 00 00 8B 45 18 D9 45 80 D9 85 7C FF FF FF D9 C9 39 45 CC D9 5D 94 D9 5D 90 0F 83 56 01 00 00
+***** EXPENSIVE 2196 576
+
+. 2195 805CE60 40
+. 8B 55 E0 8B 8D 6C FF FF FF 8D 3C 51 8B 45 CC D9 85 7C FF FF FF D9 45 80 D9 C9 85 C0 D9 55 88 D9 55 8C D9 C0 D9 C2 74 30
+
+. 2196 805CEB8 11
+. 31 F6 3B 75 18 0F 83 90 01 00 00
+
+. 2197 805CEC3 7
+. 8D 57 FF 85 F6 74 44
+***** EXPENSIVE 2199 577
+
+. 2198 805CF0E 77
+. 8B 45 0C 0F AF C2 8B 4D 08 D9 04 81 D9 C1 D9 C4 D9 C9 D8 CA D9 C9 DE CA D9 C3 D9 C6 D8 CA D9 C9 D8 CB D9 CA D8 CD D9 CB D8 CF D9 C9 DE E2 DE C2 46 D8 45 8C D9 C9 D8 45 88 D9 C9 03 55 D8 3B 75 18 D9 5D 8C D9 5D 88 0F 82 6B FF FF FF
+
+. 2199 805CEC6 4
+. 85 F6 74 44
+***** EXPENSIVE 2201 578
+
+. 2200 805CECA 112
+. D9 45 9C D8 C9 D9 45 98 D9 45 98 D9 C9 D8 CD D9 C9 DE CB D9 CC D8 4D 9C D9 45 94 D9 CA DE E5 DE C2 D8 CC D9 45 90 D9 45 90 D9 C9 D8 CC D9 C9 DE CE D9 CB D8 4D 94 D9 C9 DE E3 DE C4 D9 CA D9 C9 D9 CB D9 C9 8B 45 0C 0F AF C2 8B 4D 08 D9 04 81 D9 C1 D9 C4 D9 C9 D8 CA D9 C9 DE CA D9 C3 D9 C6 D8 CA D9 C9 D8 CB D9 CA D8 CD D9 CB D8 CF D9 C9
+***** EXPENSIVE 2202 579
+
+. 2201 805CF3A 33
+. DE E2 DE C2 46 D8 45 8C D9 C9 D8 45 88 D9 C9 03 55 D8 3B 75 18 D9 5D 8C D9 5D 88 0F 82 6B FF FF FF
+***** EXPENSIVE 2203 580
+
+. 2202 805CF5B 22
+. DD D8 DD D8 DD D8 DD D8 8B 4D CC 8B 45 18 41 2B 45 CC 39 C1 73 7A
+***** EXPENSIVE 2204 581
+
+. 2203 805CF71 69
+. 8B 75 CC 0F AF 75 D0 8B 95 70 FF FF FF 8D 04 72 8B 75 E0 8D 44 70 FF 8B 55 14 0F AF D0 8D 04 85 04 00 00 00 D9 45 8C 8B 75 10 0F AF 45 14 D9 1C 96 D9 45 88 D9 1C 30 8B 45 18 39 C1 89 4D CC 0F 82 B6 FE FF FF
+***** EXPENSIVE 2205 582
+
+. 2204 805CE6C 28
+. 8B 45 CC D9 85 7C FF FF FF D9 45 80 D9 C9 85 C0 D9 55 88 D9 55 8C D9 C0 D9 C2 74 30
+***** EXPENSIVE 2206 583
+
+. 2205 805CE88 59
+. D9 45 94 D9 45 94 D9 45 90 D9 45 90 D9 CB D9 E0 D9 CA D8 4D C8 D9 C9 D8 4D C4 D9 CA D8 4D C4 D9 CB D8 4D C8 D9 C9 DE C2 DE C2 D9 5D 94 D9 5D 90 31 F6 3B 75 18 0F 83 90 01 00 00
+
+. 2206 805CFEB 4
+. 39 C1 74 38
+***** EXPENSIVE 2208 584
+
+. 2207 805D027 44
+. 8B 45 CC 0F AF 45 D0 8B 95 74 FF FF FF 89 45 CC 8B 75 E0 8D 04 42 8D 44 70 FF 0F AF 45 14 D9 45 8C 8B 55 10 D9 1C 82 E9 55 FF FF FF
+
+. 2208 805CFA8 14
+. 8B 45 18 39 C1 89 4D CC 0F 82 B6 FE FF FF
+***** EXPENSIVE 2210 585
+
+. 2209 805CFEF 56
+. 8B 95 78 FF FF FF 0F AF 45 D0 8D 04 42 8B 55 E0 D1 E2 29 D0 48 8B 55 14 0F AF D0 D9 45 8C 8B 75 10 8D 04 85 04 00 00 00 D9 1C 96 0F AF 45 14 80 75 8B 80 E9 7B FF FF FF
+***** EXPENSIVE 2211 586
+
+. 2210 805CFA2 20
+. D9 45 88 D9 1C 30 8B 45 18 39 C1 89 4D CC 0F 82 B6 FE FF FF
+
+. 2211 805CFB6 45
+. 8B 55 1C 8B 4D D0 FF 45 DC 8B 75 D4 01 95 78 FF FF FF 01 95 74 FF FF FF 01 95 70 FF FF FF 01 8D 6C FF FF FF 39 75 DC 0F 82 59 FE FF FF
+
+. 2212 8056491 27
+. 8B 55 E0 42 89 D0 C7 45 F0 01 00 00 00 D1 E8 39 45 F0 89 55 D4 0F 83 31 01 00 00
+***** EXPENSIVE 2214 587
+
+. 2213 80564AC 48
+. 8B 4D 20 8B 7D E0 83 C1 08 8D 7C 3F FE 89 4D C0 89 7D BC 90 C7 45 EC 00 00 00 00 8B 55 D8 8B 45 C0 39 55 EC D9 40 F8 D9 40 FC 0F 83 8D 01 00 00
+***** EXPENSIVE 2215 588
+
+. 2214 80564DC 152
+. 8B 45 E0 D1 E0 8B 7D BC 8B 4D F0 4F 8D 74 09 FF C7 45 C4 00 00 00 00 89 45 CC 89 45 C8 8D 76 00 8B 45 0C 8B 55 08 0F AF C6 D9 04 82 8D 04 B5 04 00 00 00 0F AF 45 0C D9 04 10 8B 45 0C 0F AF C7 D9 04 82 8D 04 BD 04 00 00 00 0F AF 45 0C D9 04 10 8B 4D F0 8B 55 C4 8D 44 4A FF 8B 4D E8 8B 55 14 01 C1 0F AF D0 D9 C3 D9 C3 D9 CD 8D 04 85 04 00 00 00 D8 E3 D9 CC D8 C2 D9 C9 89 4D B0 0F AF 45 14 8B 4D 10 DE C3 D9 CC DE E1 D9 C4 D9 C6 D9 CB D9 1C 91 D9 CA D8 CB
+***** EXPENSIVE 2216 589
+
+. 2215 8056574 76
+. D9 C9 D9 1C 08 D9 C9 D8 CB 8B 45 14 0F AF 45 B0 DE E9 D9 1C 81 8B 45 B0 8D 0C 85 04 00 00 00 D8 CA D9 C9 D8 CB 0F AF 4D 14 DE C1 8B 55 10 D9 1C 11 FF 45 EC 8B 4D E0 8B 45 D8 03 7D CC 03 75 C8 01 4D C4 39 45 EC 0F 82 3C FF FF FF
+***** EXPENSIVE 2217 590
+
+. 2216 80565C0 29
+. DD D8 DD D8 8B 45 D4 FF 45 F0 D1 E8 83 45 C0 08 83 6D BC 02 39 45 F0 0F 82 E3 FE FF FF
+***** EXPENSIVE 2218 591
+
+. 2217 80564C0 28
+. C7 45 EC 00 00 00 00 8B 55 D8 8B 45 C0 39 55 EC D9 40 F8 D9 40 FC 0F 83 8D 01 00 00
+
+. 2218 80565DD 9
+. F7 45 E0 01 00 00 00 75 7B
+***** EXPENSIVE 2220 592
+
+. 2219 80579EF 25
+. C7 45 CC 00 00 00 00 8B 55 18 39 55 CC D9 45 88 D9 45 84 0F 83 E0 06 00 00
+***** EXPENSIVE 2221 593
+
+. 2220 8058FF8 66
+. 8B 45 8C 8B 55 20 DD 44 02 F0 DD 44 02 F8 8B 55 24 DD 5D C0 DD 44 02 F8 D9 C9 DD 5D C8 DD 5D B0 DD 44 02 F0 C7 45 E8 00 00 00 00 8B 45 E0 80 75 C7 80 80 75 B7 80 39 45 E8 DD 5D B8 0F 83 A6 01 00 00
+***** EXPENSIVE 2222 594
+
+. 2221 80549A0 121
+. C7 85 DC FE FF FF 10 00 00 00 C7 85 CC FE FF FF 02 00 00 00 8B BD DC FE FF FF 8B 45 20 DD 44 38 F0 DD 44 38 F8 D9 C9 8B 45 24 DD 5D C0 DD 5D B8 DD 44 38 F0 DD 44 38 F8 D9 C9 8B 45 28 DD 5D B0 DD 5D A8 DD 44 38 F0 DD 44 38 F8 D9 C9 8B 45 2C DD 5D A0 DD 5D 98 DD 44 38 F0 DD 44 38 F8 D9 C9 C7 45 E8 00 00 00 00 8B 45 CC 39 45 E8 DD 5D 90 DD 5D 88 0F 83 50 04 00 00
+***** EXPENSIVE 2223 595
+
+. 2222 8054A19 205
+. 8B 85 CC FE FF FF 8B 55 D0 89 C1 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 D1 E2 49 DD 9D F0 FE FF FF DD 9D E8 FE FF FF 89 45 84 89 55 80 89 8D E0 FE FF FF C7 85 D0 FE FF FF 00 00 00 00 90 8B 7D 84 8B B5 D0 FE FF FF 8D 44 37 FF 8B 55 80 8D 0C 02 01 CA 29 FE 8B 7D 80 89 95 A4 FE FF FF 8D 74 37 FF 8B 55 0C 01 F7 0F AF D0 8D 04 C5 08 00 00 00 89 BD 7C FF FF FF 0F AF 45 0C 8B 7D 08 DD 04 38 8B 45 0C 0F AF C1 8D 0C CD 08 00 00 00 DD 04 C7 0F AF 4D 0C 8B 45 0C DD 04 39 0F AF 85 A4 FE FF FF DD 9D 58 FF FF FF DD 04 C7 8B 85 A4 FE FF FF DD 04 D7 8D 3C C5 08 00 00 00 8B 45 0C 8B 55 08 0F AF 85 7C FF FF FF 0F AF 7D 0C
+***** EXPENSIVE 2224 596
+
+. 2223 8054AE6 186
+. DD 9D 70 FF FF FF D9 CA DD 9D 68 FF FF FF DD 04 17 DD 04 C2 8B 95 7C FF FF FF 8D 04 D5 08 00 00 00 8B 4D 08 0F AF 45 0C DD 04 08 D9 CA 8B 45 0C 0F AF C6 8D 34 F5 08 00 00 00 DD 9D B8 FE FF FF D9 CA 0F AF 75 0C DD 9D 60 FF FF FF D9 E0 DD 04 0E DD 85 B8 FE FF FF D9 CB DD 9D 50 FF FF FF DD 04 C1 DD 85 60 FF FF FF DD 85 58 FF FF FF D9 CD D8 C4 D9 CB D9 E0 D9 C9 D8 C2 D9 CD D8 C1 DD 85 50 FF FF FF D9 CC DD 9D 48 FF FF FF D9 CA DC AD 60 FF FF FF D9 C9 DC AD 58 FF FF FF D9 CB D8 C6 D9 C9 DD 9D 40 FF FF FF D9 CA DD 9D 38 FF FF FF DD 85 48 FF FF FF D9 C4 D8 C3
+***** EXPENSIVE 2225 597
+
+. 2224 8054BA0 172
+. DD 85 B8 FE FF FF D9 CA D8 C3 D9 C9 DD 9D 30 FF FF FF D9 CE DC A5 50 FF FF FF D9 C9 DE E4 D9 CD DD 95 28 FF FF FF DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CD DE E3 D9 CC DE CA DD 85 F0 FE FF FF D9 FA D9 C9 DC A5 48 FF FF FF D9 C9 DC 8D E8 FE FF FF DE C9 DD 85 30 FF FF FF DC 8D E8 FE FF FF DC AD 70 FF FF FF D9 CC DC 8D E8 FE FF FF D9 C4 D9 C9 DC AD 68 FF FF FF D9 C9 D8 C3 DD 9D 20 FF FF FF D9 C0 D8 C2 D9 C9 DE E2 DD 45 E0 DD 45 D8 D8 CF D9 C9 DC 8D 40 FF FF FF D9 CA DD 9D 18 FF FF FF DE C1 DD 45 E0 DD 45 D8
+***** EXPENSIVE 2226 598
+
+. 2225 8054C4C 176
+. D8 CD D9 C9 DC 8D 38 FF FF FF DE C1 DD 45 D8 D9 CD DC 4D E0 D9 CD DC 8D 38 FF FF FF DE E5 D9 CD DE E3 DD 85 68 FF FF FF DD 45 D8 D9 CC DD 95 B0 FE FF FF D9 CF DC 4D E0 D9 C9 DC 85 28 FF FF FF D9 CF D8 E5 D9 CC DC 8D 40 FF FF FF DE E1 D9 CE DD 9D 68 FF FF FF DD 85 70 FF FF FF DD 85 20 FF FF FF D9 CC DD 9D 10 FF FF FF D9 C2 D9 C9 DC 85 30 FF FF FF D9 CC D8 E6 D9 C9 D8 C7 D9 CE DC 85 20 FF FF FF D9 CC DD 9D 70 FF FF FF D9 CD DD 9D 08 FF FF FF DD 85 18 FF FF FF DD 85 B0 FE FF FF D9 CC DD 9D 00 FF FF FF D9 CB 8B 95 E0 FE FF FF
+***** EXPENSIVE 2227 599
+
+. 2226 8054CFC 166
+. 03 55 D4 8B 45 D4 8D 0C 10 8D 34 08 01 F0 8B 7D 14 0F AF BD E0 FE FF FF DE C4 89 85 A4 FE FF FF DD 85 70 FF FF FF 8B 45 10 DD 1C F8 8B 85 E0 FE FF FF 8D 04 C5 08 00 00 00 8B 7D 10 0F AF 45 14 DD 85 68 FF FF FF DD 1C 38 D9 C9 DE E5 DC C1 DD 45 C0 DD 45 B8 D8 CB D9 C9 8B 45 14 D8 CD 0F AF C2 DE E1 DD 1C C7 DD 45 A8 DD 45 B0 D9 C9 DC 8D 08 FF FF FF D9 CB DC 4D C0 D9 CD DC 4D B8 D9 C9 8B 45 14 DC 8D 10 FF FF FF 8D 14 D5 08 00 00 00 0F AF 55 14 0F AF C1 DE E3 DE C4 D9 CB DD 1C 3A DD 1C C7 DD 45 A8
+***** EXPENSIVE 2228 600
+
+. 2227 8054DA2 169
+. DD 45 B0 DC 8D 08 FF FF FF D9 C9 DC 8D 10 FF FF FF 8D 0C CD 08 00 00 00 0F AF 4D 14 DE C1 DD 85 18 FF FF FF D9 C9 DD 1C 39 DD 45 A0 DD 45 98 D8 CD D9 C9 D8 CB 8B 45 14 0F AF C6 DE E1 DD 1C C7 DE E2 DD 45 90 DD 45 88 D8 CB D9 CC 8D 34 F5 08 00 00 00 DC 4D A0 D9 CA DC 4D 98 D9 C9 8B 45 14 DC 8D 00 FF FF FF 0F AF 85 A4 FE FF FF 0F AF 75 14 DE E4 DE C1 DD 1C 3E D9 C9 DD 1C C7 8B 85 A4 FE FF FF DD 45 88 D9 C9 8D 3C C5 08 00 00 00 DC 4D 90 D9 C9 DC 8D 00 FF FF FF 0F AF 7D 14 8B 45 10 8B 55 D0 DE C1 DD 1C 07
+
+. 2228 8054E4B 30
+. FF 45 E8 8D 04 92 8B 4D CC 01 95 E0 FE FF FF 01 85 D0 FE FF FF 39 4D E8 0F 82 EF FB FF FF
+
+. 2229 8054E69 34
+. 8B 85 FC FE FF FF FF 45 EC D1 E8 83 85 DC FE FF FF 10 83 85 CC FE FF FF 02 39 45 EC 0F 82 29 FB FF FF
+***** EXPENSIVE 2231 601
+
+. 2230 805B634 66
+. 8B 4D AC 8B 45 20 D9 44 88 F8 D9 44 88 FC 8B 45 24 D9 5D D0 D9 44 88 FC D9 C9 D9 5D D4 D9 5D C8 D9 44 88 F8 C7 45 E8 00 00 00 00 8B 45 E0 80 75 D3 80 80 75 CB 80 39 45 E8 D9 5D CC 0F 83 97 01 00 00
+***** EXPENSIVE 2232 602
+
+. 2231 8057214 113
+. C7 85 DC FE FF FF 02 00 00 00 89 F6 8B BD DC FE FF FF 8B 45 20 D9 44 B8 F8 D9 44 B8 FC D9 C9 8B 45 24 D9 5D B0 D9 5D AC D9 44 B8 F8 D9 44 B8 FC D9 C9 8B 45 28 D9 5D A8 D9 5D A4 D9 44 B8 F8 D9 44 B8 FC D9 C9 8B 45 2C D9 5D A0 D9 5D 9C D9 44 B8 F8 D9 44 B8 FC D9 C9 C7 45 E4 00 00 00 00 8B 45 D0 39 45 E4 D9 5D 98 D9 5D 94 0F 83 BE 04 00 00
+***** EXPENSIVE 2233 603
+
+. 2232 8057285 202
+. 8B 45 E8 8B 55 D4 DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 8D 44 00 FF D1 E2 DD 9D F0 FE FF FF DD 9D E8 FE FF FF 89 7D 90 89 85 E0 FE FF FF 89 55 8C C7 85 D0 FE FF FF 00 00 00 00 8D 76 00 8B 8D D0 FE FF FF 8B 75 90 8D 44 0E FF 8B 7D 8C 8D 0C 07 8B B5 D0 FE FF FF 01 CF 8B 55 8C 2B 75 90 89 BD BC FE FF FF 8D 74 32 FF 8B 7D 0C 01 F2 0F AF F8 8D 04 85 04 00 00 00 89 55 88 0F AF 45 0C 8B 55 08 D9 04 10 8B 45 0C 0F AF C1 D9 04 82 D9 C9 8B 45 0C 0F AF 85 BC FE FF FF D9 5D 80 8D 0C 8D 04 00 00 00 D9 04 82 8B 85 BC FE FF FF D9 04 BA 0F AF 4D 0C 8D 3C 85 04 00 00 00 8B 45 0C 0F AF 45 88 D9 5D 84 0F AF 7D 0C
+***** EXPENSIVE 2234 604
+
+. 2233 805734F 205
+. D9 04 11 D9 9D 78 FF FF FF D9 9D 74 FF FF FF D9 04 17 D9 04 82 D9 C9 8B 55 88 8D 04 95 04 00 00 00 8B 4D 08 0F AF 45 0C D9 9D 70 FF FF FF D9 04 08 D9 CA D9 9D 7C FF FF FF D9 C9 D9 9D 6C FF FF FF 8B 45 0C 0F AF C6 D9 04 81 D9 85 7C FF FF FF D9 C9 8D 34 B5 04 00 00 00 D9 95 68 FF FF FF DE C1 0F AF 75 0C D9 85 74 FF FF FF D8 C2 D9 04 0E D9 C9 D9 9D 64 FF FF FF D9 E0 D9 85 78 FF FF FF D9 85 7C FF FF FF D9 C9 80 B5 6F FF FF FF 80 D8 C2 D9 C9 D8 A5 68 FF FF FF D9 CA D8 AD 78 FF FF FF D9 CC D8 AD 74 FF FF FF D9 85 70 FF FF FF D8 85 6C FF FF FF D9 CB D9 9D 60 FF FF FF D9 CC D9 9D 5C FF FF FF D9 CB D9 9D 58 FF FF FF
+***** EXPENSIVE 2235 605
+
+. 2234 805741C 191
+. D9 85 70 FF FF FF D9 85 64 FF FF FF D9 C4 D9 CA D8 A5 6C FF FF FF D9 C9 D8 C4 D9 CA D8 C3 D9 C9 D9 9D 54 FF FF FF D9 C9 D9 95 50 FF FF FF D9 C9 D9 9D 4C FF FF FF DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CB D8 A5 64 FF FF FF DE CB D9 CA D9 5D EC D9 45 EC DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CC DE E2 D9 CB DE C9 D9 5D EC DD 85 E8 FE FF FF DE C9 D8 6D 84 D9 9D 48 FF FF FF DD 85 E8 FE FF FF D8 8D 4C FF FF FF D8 6D 80 D9 9D 44 FF FF FF D9 85 48 FF FF FF D8 C1 D9 C9 D8 AD 48 FF FF FF D9 C9 D9 9D 40 FF FF FF D9 9D 38 FF FF FF D9 45 DC D9 45 E0 D8 8D 60 FF FF FF
+***** EXPENSIVE 2236 606
+
+. 2235 80574DB 173
+. D9 C9 D8 8D 58 FF FF FF DE C1 D9 45 EC D9 85 44 FF FF FF D9 CA D9 9D 30 FF FF FF D9 45 E0 D9 45 DC D9 CB D8 C2 D9 CB D8 8D 54 FF FF FF D9 CA D8 AD 44 FF FF FF D9 C9 D8 8D 5C FF FF FF DE C2 D9 9D 34 FF FF FF D9 45 E0 D9 45 DC D9 C9 D8 8D 58 FF FF FF D9 C9 D8 8D 60 FF FF FF D9 45 84 D9 C9 DE E2 D9 CB D9 9D 3C FF FF FF D9 45 DC D9 45 E0 D9 CC D8 85 50 FF FF FF D9 CC D8 8D 54 FF FF FF D9 C9 D8 8D 5C FF FF FF D9 CC D9 5D 84 8B 95 E0 FE FF FF 03 55 D8 8B 45 D8 8D 0C 10 8D 34 08 DE EB D9 85 38 FF FF FF 01 F0 D8 E3 D9 CB
+***** EXPENSIVE 2237 607
+
+. 2236 8057588 187
+. 8B 7D 14 D8 85 38 FF FF FF 0F AF BD E0 FE FF FF D9 45 80 D9 C9 D9 9D 24 FF FF FF 89 85 BC FE FF FF D9 45 84 8B 45 10 D9 1C B8 D8 85 4C FF FF FF 8B 85 E0 FE FF FF D9 5D 80 D9 85 34 FF FF FF 8D 04 85 04 00 00 00 8B 7D 10 0F AF 45 14 D8 C1 D9 45 80 D9 CA D8 AD 34 FF FF FF D9 85 40 FF FF FF D9 85 3C FF FF FF D9 C9 D8 E5 D9 C9 D8 85 30 FF FF FF D9 CC D9 1C 38 D9 C9 D9 9D 20 FF FF FF D9 45 B0 D9 45 AC D9 CD D8 85 40 FF FF FF D9 CD D8 CC D9 C9 8B 45 14 D8 CA 0F AF C2 DE E1 D9 CC D9 9D 1C FF FF FF D9 85 3C FF FF FF D8 A5 30 FF FF FF D9 CC D9 1C 87 D9 45 B0 DE CB
+***** EXPENSIVE 2238 608
+
+. 2237 8057643 159
+. D9 45 AC DE C9 8D 14 95 04 00 00 00 0F AF 55 14 DE C2 D9 C9 D9 1C 3A D9 45 A4 D9 45 A8 D9 C9 D8 CA D9 C9 8B 45 14 D8 CC 0F AF C1 DE E1 D9 1C 87 D9 45 9C D9 45 A0 D9 C9 D8 8D 20 FF FF FF D9 CA D8 4D A8 D9 CC D8 4D A4 D9 C9 8B 45 14 D8 8D 24 FF FF FF 8D 0C 8D 04 00 00 00 0F AF 4D 14 0F AF C6 DE E2 DE C3 D9 CA D9 1C 39 D9 45 A0 D9 CA D9 1C 87 D9 45 9C D8 8D 24 FF FF FF D9 CA D8 8D 20 FF FF FF DE C2 D9 45 94 D9 45 98 8B 45 14 D8 8D 1C FF FF FF D9 C9 D8 CA 8D 34 B5 04 00 00 00
+***** EXPENSIVE 2239 609
+
+. 2238 80576E2 97
+. 0F AF 85 BC FE FF FF 0F AF 75 14 DE E9 D9 CA D9 1C 3E D9 C9 D9 1C 87 8B 85 BC FE FF FF D9 45 94 D9 C9 8D 3C 85 04 00 00 00 D8 4D 98 D9 C9 D8 8D 1C FF FF FF 0F AF 7D 14 8B 45 10 DE C1 D9 1C 07 8B 55 D4 8D 04 92 FF 45 E4 8B 4D D0 01 95 E0 FE FF FF 01 85 D0 FE FF FF 39 4D E4 0F 82 81 FB FF FF
+
+. 2239 8057743 27
+. 8B 85 F8 FE FF FF FF 45 E8 D1 E8 83 85 DC FE FF FF 02 39 45 E8 0F 82 C2 FA FF FF
+***** EXPENSIVE 2241 610
+
+. 2240 804DBF8 160
+. 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 07 DD 9D 48 FF FF FF DD 44 07 08 8B 45 EC 03 45 94 0F AF 45 0C C1 E0 04 DD 04 07 DD 44 07 08 8B 45 EC 03 45 84 0F AF 45 0C C1 E0 04 DD 04 07 D9 CA DD 55 98 DD 85 48 FF FF FF D8 C3 DD 44 07 08 D9 C9 DD 9D 78 FF FF FF D9 C4 D8 C1 DD 04 17 D9 C9 DD 9D 70 FF FF FF DD 85 48 FF FF FF D9 CB D8 C1 D9 CB DE E5 DD 85 78 FF FF FF DD 44 17 08 D9 C9 D8 C4 D9 CD DD 55 88 D9 CD DD 1C 0E DC C4 DD 85 70 FF FF FF D9 CA DC 65 98 D9 C9 DC 65 88 D9 CA
+***** EXPENSIVE 2242 611
+
+. 2241 804DC98 139
+. D8 C5 D9 CF DE E3 D9 CD DC 8D 60 FF FF FF D9 CA DC 8D 60 FF FF FF D9 CE DD 5C 0E 08 D9 C4 D9 C1 D9 C9 8B 45 E8 D8 E7 D9 C9 D8 C3 D9 CE DE C7 D9 C9 DE E2 03 45 D4 DD 45 C8 DD 45 C0 0F AF 45 14 D8 CE D9 C9 D8 CA C1 E0 04 DE E1 D9 CB DC A5 78 FF FF FF D9 CC DC A5 70 FF FF FF D9 CB DD 1C 06 D9 CC DC 4D C8 D9 CC DC 4D C0 DE C4 D9 CB DD 5C 06 08 8B 45 E8 03 85 6C FF FF FF DD 45 B8 DD 45 B0 D8 CA D9 C9 0F AF 45 14 D8 CB
+***** EXPENSIVE 2243 612
+
+. 2242 804DD23 127
+. D9 CA DC 4D B8 D9 CB DC 4D B0 D9 CA C1 E0 04 DE E1 D9 CA DE C1 D9 C9 DD 1C 06 DD 5C 06 08 8B 45 E8 03 85 68 FF FF FF DD 45 A8 DD 45 A0 D9 C9 0F AF 45 14 D8 CB D9 C9 D8 CA D9 CB DC 4D A0 D9 CA DC 4D A8 D9 C9 C1 E0 04 DE E3 DE C1 D9 C9 DD 1C 06 DD 5C 06 08 8B 45 0C 01 85 50 FF FF FF FF 45 E0 8B 45 D4 03 95 58 FF FF FF FF 45 EC 03 8D 5C FF FF FF FF 45 E8 39 45 E0 0F 82 56 FE FF FF
+***** EXPENSIVE 2244 613
+
+. 2243 805100C 138
+. 8B 45 F0 03 45 E0 0F AF 45 0C D9 04 C7 D9 44 C7 04 8B 45 F0 03 45 B8 0F AF 45 0C D9 04 C7 D9 44 C7 04 8B 45 F0 03 45 B4 0F AF 45 0C D9 44 C7 04 D9 5D B0 D9 04 D7 D8 C2 D9 04 C7 D9 C9 D9 5D AC D9 C4 D9 44 D7 04 D8 C3 D9 C9 D8 C2 D9 CE DE E2 D9 45 AC D9 C9 D9 5D A8 D8 C5 D9 45 B0 D9 CB D8 6C D7 04 D9 04 D7 D9 CC D8 C6 D9 CA D9 1C CE D9 45 A8 D8 C2 D9 CC DE E5 D9 CD D8 65 B0 D8 4D 84 D9 CB D9 5C CE 04 D9 C3 D8 E3
+***** EXPENSIVE 2245 614
+
+. 2244 8051096 128
+. D9 CA D8 4D 84 D9 CE D8 6D AC D9 CA D9 5D A0 D9 C4 D8 C6 D9 CD D9 5D A4 D9 C9 D9 55 9C D9 CD D8 6D A4 D9 5D 90 D9 CA DE C1 8B 45 EC D9 45 D0 D9 45 CC 03 45 D8 D8 CC D9 C9 D8 4D A0 0F AF 45 14 DE E1 D9 1C C6 D9 45 CC D8 4D A0 D9 CB D8 4D D0 DE C3 D9 C9 D8 6D A8 D9 C9 D9 5D 94 D9 C9 D9 5C C6 04 D9 45 C8 8B 45 EC D9 45 C4 D9 45 C8 D9 45 C4 D9 CA 03 45 8C D8 CC D9 CB D8 CD D9 C9 D8 CC
+***** EXPENSIVE 2246 615
+
+. 2245 8051116 104
+. D9 CA DE CD 0F AF 45 14 DE E2 DE C3 D9 1C C6 D9 C9 D9 5C C6 04 D9 45 C0 8B 45 EC D9 45 BC D9 C9 03 45 88 D8 4D 94 D9 C9 D8 4D 90 0F AF 45 14 DE E9 D9 1C C6 D9 45 BC D9 45 C0 D8 4D 90 D9 C9 D8 4D 94 DE C1 D9 C9 D9 5D 98 D9 5C C6 04 FF 45 E4 8B 45 D8 03 55 0C FF 45 F0 03 4D 14 FF 45 EC 39 45 E4 0F 82 8E FE FF FF
+
+. 2246 80593D7 30
+. 8B 75 DC 46 89 F0 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 B5 68 FF FF FF 0F 83 F9 02 00 00
+***** EXPENSIVE 2248 616
+
+. 2247 80593F5 103
+. C7 85 58 FF FF FF 10 00 00 00 90 8B 85 58 FF FF FF 8B 55 20 DD 44 02 F0 DD 44 02 F8 8B 55 24 DD 5D C8 DD 5D D0 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 28 DD 5D B8 DD 44 02 F8 D9 C9 DD 5D C0 DD 5D A8 DD 44 02 F0 C7 45 E8 00 00 00 00 8B 45 E0 80 75 CF 80 80 75 BF 80 80 75 AF 80 39 45 E8 DD 5D B0 0F 83 77 02 00 00
+***** EXPENSIVE 2249 617
+
+. 2248 805945C 198
+. 8B 55 DC 8B 4D EC 8B 45 DC 8D 4C 09 FF D1 E2 2B 45 EC 89 95 6C FF FF FF 89 8D 60 FF FF FF 89 8D 5C FF FF FF 8D 7C 00 FF 8B 95 5C FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 89 B5 3C FF FF FF 0F AF 85 5C FF FF FF 8B 75 08 DD 04 C6 8B 85 5C FF FF FF 8D 04 C5 08 00 00 00 0F AF 45 0C DD 5D A0 DD 04 30 8B 45 0C 0F AF C2 DD 04 C6 8B 45 0C 0F AF C1 8D 14 D5 08 00 00 00 DD 04 C6 0F AF 55 0C 8B 45 0C 8D 0C CD 08 00 00 00 DD 04 32 0F AF 4D 0C 0F AF 85 3C FF FF FF 8B 95 3C FF FF FF DD 04 31 DD 04 C6 D9 CD 8D 34 D5 08 00 00 00 0F AF 75 0C 8B 4D 08 DD 5D 98 DD 04 0E DD 5D 90 DD 45 D0 DD 45 C8
+***** EXPENSIVE 2250 618
+
+. 2249 8059522 128
+. D9 C9 D8 CD D9 C9 D8 CB DE E9 DD 9D 40 FF FF FF DD 45 C0 DD 45 B8 D9 CD DC 4D C8 D9 CD D8 CA D9 CB DC 4D D0 D9 C9 D8 CC D9 C9 DE C5 DE E2 DD 45 B0 DD 45 A8 DC 4D 90 D9 C9 D8 CE DE E1 DD 85 40 FF FF FF DD 45 B0 D9 CD DC 4D B8 D9 CF DC 4D A8 D9 C9 D8 C2 D9 CB DC 4D C0 D9 CD DC 4D 90 DE C1 D9 CC DE C6 DD 45 A0 DD 45 98 D9 CB DD 5D 88 8B 85 6C FF FF FF D9 C5 D9 C9 8D 0C 38 D8 C4 D9 C9
+***** EXPENSIVE 2251 619
+
+. 2250 80595A2 156
+. D8 C5 D9 CE DE E5 8B 45 14 DD 45 88 0F AF 85 60 FF FF FF 8B 75 10 D8 C1 DD 1C C6 D9 CD DD 5D 80 8B B5 60 FF FF FF DD 85 40 FF FF FF D9 CA D8 C6 D9 CA DE E1 8D 04 F5 08 00 00 00 DD 45 80 D9 CB 8B 75 10 DC 6D A0 D9 CE 0F AF 45 14 DC 6D 98 D9 CB D8 C2 D9 CB 8B 95 60 FF FF FF DD 9D 70 FF FF FF D9 CA DD 1C 30 D9 CA 03 95 6C FF FF FF D9 E0 D9 C4 8B 45 14 0F AF C2 D8 E1 DD 1C C6 D9 CC 8B 45 14 DD 9D 78 FF FF FF 0F AF C1 DD 85 70 FF FF FF D9 C9 D9 E0 D9 CA DC 65 80 D9 CC
+***** EXPENSIVE 2252 620
+
+. 2251 805963E 149
+. 8D 14 D5 08 00 00 00 8D 0C CD 08 00 00 00 0F AF 55 14 DC 85 78 FF FF FF D9 C9 0F AF 4D 14 D8 C2 D9 CB DC 65 88 D9 CA DC AD 70 FF FF FF D9 CC D9 E0 D9 CB DD 1C 32 D9 CB DD 9D 70 FF FF FF DD 1C C6 D9 C9 DD 95 78 FF FF FF D9 C9 DD 1C 31 8B 45 14 0F AF C7 DD 1C C6 80 B5 77 FF FF FF 80 8D 04 FD 08 00 00 00 0F AF 45 14 DD 85 70 FF FF FF DD 1C 30 8B 55 DC 8B 45 18 FF 45 E8 8B 4D E0 01 85 60 FF FF FF 01 C7 01 95 5C FF FF FF 39 4D E8 0F 82 B1 FD FF FF
+
+. 2252 80596D3 27
+. 8B 85 68 FF FF FF FF 45 EC D1 E8 83 85 58 FF FF FF 10 39 45 EC 0F 82 12 FD FF FF
+
+. 2253 80596EE 13
+. F7 45 DC 01 00 00 00 0F 85 FC 00 00 00
+
+. 2254 80596FB 19
+. C7 45 E8 00 00 00 00 8B 75 E0 39 75 E8 0F 83 E9 00 00 00
+***** EXPENSIVE 2256 621
+
+. 2255 805970E 162
+. 8B 7D DC DD 83 DC F8 FF FF C7 85 64 FF FF FF 00 00 00 00 4F 89 F6 8B 45 E4 8D 14 38 8D 0C 10 01 C8 8B 75 0C 89 85 3C FF FF FF 0F AF F7 8B 45 08 DD 04 F0 8B B5 3C FF FF FF 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C DD 04 D0 DD 04 C8 DD 04 F0 D9 C4 D9 FA DD 83 34 F6 FF FF DE F1 D9 C3 D8 E2 DE C9 D9 C5 D9 FA 8B 8D 64 FF FF FF 8B 55 EC DD 83 34 F6 FF FF 8D 44 51 FF DE F1 8B 75 DC D9 C5 8B 55 14 8D 0C 70 0F AF D0 8B 75 10 D8 C2 DD 1C D6 D9 CC DE C2 D9 C2 D9 CC DE CA D9 CB 8D 04 C5 08 00 00 00
+***** EXPENSIVE 2257 622
+
+. 2256 80597B0 69
+. D9 E0 0F AF 45 14 D8 E1 DD 1C 30 D9 CB 8B 45 14 0F AF C1 DE E2 D9 C9 DD 1C C6 8D 0C CD 08 00 00 00 8B 45 18 FF 45 E8 8B 55 E0 0F AF 4D 14 DE E1 01 85 64 FF FF FF 03 7D DC 39 55 E8 DD 1C 31 0F 82 2F FF FF FF
+***** EXPENSIVE 2258 623
+
+. 2257 80597F5 13
+. DD D8 81 C4 BC 00 00 00 5B 5E 5F C9 C3
+***** EXPENSIVE 2259 624
+
+. 2258 80542D8 154
+. 8D 14 BD 00 00 00 00 8B 4D D8 8B 75 08 8D 44 11 FF 0F AF 55 0C DD 04 D6 8B 55 0C 0F AF D0 8D 0C 01 8D 04 C5 08 00 00 00 DD 04 D6 0F AF 45 0C 0F AF 4D 0C 8B 55 E4 DD 04 CE DD 04 30 D9 C3 8D 04 3A 8D 14 02 D8 C2 D9 CC 8B 4D E4 01 D1 DE E2 D9 CA D8 C0 D9 C3 8B 75 14 0F AF F7 89 8D 34 FF FF FF D8 C1 8B 4D 10 DD 1C F1 D9 CA D8 C0 D9 C1 8B B5 34 FF FF FF 0F AF 75 14 0F AF 45 14 0F AF 55 14 D8 E1 D9 CC DE E3 DE C1 D9 CA DD 1C C1 FF 45 E8 DD 1C D1 DD 1C F1 8B 75 DC
+
+. 2259 8054372 12
+. 03 7D E0 39 75 E8 0F 82 5A FF FF FF
+
+. 2260 805BA0A 30
+. 8B 75 DC 46 89 F0 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 B5 64 FF FF FF 0F 83 05 03 00 00
+***** EXPENSIVE 2262 625
+
+. 2261 805BA28 104
+. C7 85 54 FF FF FF 02 00 00 00 89 F6 8B 85 54 FF FF FF 8B 55 20 D9 44 82 F8 D9 44 82 FC 8B 55 24 D9 5D C8 D9 5D CC D9 44 82 FC D9 44 82 F8 D9 C9 8B 55 28 D9 5D C0 D9 44 82 FC D9 C9 D9 5D C4 D9 5D B8 D9 44 82 F8 C7 45 E8 00 00 00 00 8B 45 E0 80 75 CB 80 80 75 C3 80 80 75 BB 80 39 45 E8 D9 5D BC 0F 83 82 02 00 00
+***** EXPENSIVE 2263 626
+
+. 2262 805BA90 197
+. 8B 55 DC 8B 4D EC 8B 45 DC 8D 4C 09 FF D1 E2 2B 45 EC 89 95 78 FF FF FF 89 8D 5C FF FF FF 89 8D 58 FF FF FF 8D 7C 00 FF 8B 95 58 FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 89 B5 44 FF FF FF 0F AF 85 58 FF FF FF 8B 75 08 D9 04 86 8B 85 58 FF FF FF 8D 04 85 04 00 00 00 0F AF 45 0C D9 5D B4 D9 04 30 8B 45 0C 0F AF C2 D9 5D B0 D9 04 86 8B 45 0C 0F AF C1 D9 04 86 D9 C9 8B 45 0C 0F AF 85 44 FF FF FF 8D 14 95 04 00 00 00 8D 0C 8D 04 00 00 00 0F AF 55 0C 0F AF 4D 0C D9 5D AC D9 04 86 8B 85 44 FF FF FF D9 04 32 D9 04 31 D9 CA D9 5D A4 8D 34 85 04 00 00 00 D9 45 CC 0F AF 75 0C 8B 55 08
+***** EXPENSIVE 2264 627
+
+. 2263 805BB55 138
+. D8 4D AC D9 CB D9 5D A8 D9 04 16 D9 CB D9 5D A0 D9 45 C8 D8 C9 D8 6D A0 D9 5D A0 D9 45 C8 D8 4D AC D9 C9 D8 4D CC DE C1 D9 45 C4 D8 4D A8 D9 5D 9C D9 45 C0 D8 CA D8 6D 9C D9 5D 9C D9 45 C0 D8 4D A8 D9 CA D8 4D C4 D9 CA D9 5D A8 D9 C9 D8 45 A8 D9 5D 98 D9 45 BC D8 4D A4 D9 5D 94 D9 45 B8 D8 CA D8 6D 94 D9 5D 94 D9 45 BC DE CA D9 45 B8 D8 4D A4 DE C2 D9 45 B4 D8 45 9C D9 5D 90 D9 45 B0 D8 45 98 D9 5D 8C D9 45 A0
+***** EXPENSIVE 2265 628
+
+. 2264 805BBDF 151
+. D9 45 A0 D9 45 B4 D9 45 B0 D9 CB D8 65 94 D9 CA D8 45 94 D9 C9 D8 65 9C D9 CB D8 65 98 D9 C4 D9 CB D9 5D 88 D9 CA 8B 85 78 FF FF FF 8D 0C 38 D8 C5 D9 45 8C D9 CD DE E6 8B 45 14 D9 45 90 0F AF 85 5C FF FF FF D8 C2 D9 CD D8 C1 D9 CD 8B 75 10 D9 1C 86 D9 CC D9 5D 84 D9 CC 8B B5 5C FF FF FF D9 E0 D9 C2 80 75 8B 80 D8 E1 8D 04 B5 04 00 00 00 0F AF 45 14 8B 75 10 D9 5D 80 D9 45 84 D9 45 88 D9 C9 8B 95 5C FF FF FF D9 1C 30 03 95 78 FF FF FF D8 C2 8B 45 14
+***** EXPENSIVE 2266 629
+
+. 2265 805BC76 156
+. 0F AF C2 D9 9D 7C FF FF FF D9 45 80 D9 1C 86 D9 CC 8D 14 95 04 00 00 00 8B 45 14 0F AF 55 14 0F AF C1 D8 6D 90 D9 85 7C FF FF FF D9 CC D8 6D 8C D9 CC 8D 0C 8D 04 00 00 00 D9 1C 32 D9 C9 0F AF 4D 14 D8 65 88 D9 C9 D9 14 86 D9 5D 90 D9 CA D9 5D 8C 80 75 8F 80 8B 45 14 0F AF C7 DE C2 D9 45 8C D9 1C 31 D9 C9 D9 1C 86 8D 04 BD 04 00 00 00 0F AF 45 14 D9 E0 D9 1C 30 8B 55 DC 8B 45 18 FF 45 E8 8B 4D E0 01 85 5C FF FF FF 01 C7 01 95 58 FF FF FF 39 4D E8 0F 82 A6 FD FF FF
+
+. 2266 805BD12 27
+. 8B 85 64 FF FF FF FF 45 EC D1 E8 83 85 54 FF FF FF 02 39 45 EC 0F 82 07 FD FF FF
+
+. 2267 805BD2D 13
+. F7 45 DC 01 00 00 00 0F 85 2F 01 00 00
+
+. 2268 805BD3A 19
+. C7 45 E8 00 00 00 00 8B 75 E0 39 75 E8 0F 83 1C 01 00 00
+***** EXPENSIVE 2270 630
+
+. 2269 805BD4D 189
+. 8B 7D DC C7 85 60 FF FF FF 00 00 00 00 4F 90 8B 45 E4 8D 14 38 8D 0C 10 01 C8 8B 75 0C 89 85 44 FF FF FF 0F AF F7 8B 45 08 D9 04 B0 8B B5 44 FF FF FF 0F AF 75 0C D9 9D 74 FF FF FF 0F AF 55 0C D9 04 B0 0F AF 4D 0C D9 9D 6C FF FF FF D9 04 90 D9 04 88 DD 83 DC F8 FF FF D9 FA D9 85 6C FF FF FF D9 C9 DC BB 34 F6 FF FF D9 C9 D8 EB DE C9 D9 9D 68 FF FF FF DD 83 DC F8 FF FF D9 FA DC BB 34 F6 FF FF D9 CA D8 85 6C FF FF FF 8B 8D 60 FF FF FF DE CA D9 C9 8B 55 EC D9 5D F0 8D 44 51 FF 8B 75 DC 8B 55 14 D9 85 74 FF FF FF 8D 0C 70 0F AF D0 8B 75 10 D8 85 68 FF FF FF D9 45 F0
+***** EXPENSIVE 2271 631
+
+. 2270 805BE0A 95
+. D9 C9 D9 1C 96 D9 C1 8D 04 85 04 00 00 00 D9 E0 0F AF 45 14 D8 E1 D9 1C 30 8B 45 14 D9 85 74 FF FF FF 0F AF C1 D8 A5 68 FF FF FF D9 1C 86 8D 0C 8D 04 00 00 00 8B 45 18 FF 45 E8 8B 55 E0 0F AF 4D 14 DE E9 01 85 60 FF FF FF 03 7D DC 39 55 E8 D9 95 70 FF FF FF D9 1C 31 0F 82 F3 FE FF FF
+***** EXPENSIVE 2272 632
+
+. 2271 8056AC0 154
+. 8D 14 BD 00 00 00 00 8B 4D D8 8B 75 08 8D 44 11 FF 0F AF 55 0C D9 04 96 8B 55 0C 0F AF D0 8D 0C 01 8D 04 85 04 00 00 00 0F AF 45 0C D9 04 30 D9 04 96 D9 C9 0F AF 4D 0C 8B 55 E4 8D 04 3A D9 5D D4 D9 C1 D9 04 8E 8D 14 02 DC C1 8B 4D E4 01 D1 DE EB D9 C9 D8 C0 D9 C1 D9 45 D4 8B 75 14 0F AF F7 D8 C0 D9 C9 89 8D 34 FF FF FF D8 C2 8B 4D 10 D9 1C B1 D9 C0 8B B5 34 FF FF FF 0F AF 75 14 0F AF 45 14 D8 EC D9 CB 0F AF 55 14 DE E2 DE C3 D9 C9 D9 1C 81 FF 45 E8 D9 1C 91
+***** EXPENSIVE 2273 633
+
+. 2272 8056B5A 18
+. D9 1C B1 8B 75 DC 03 7D E0 39 75 E8 0F 82 54 FF FF FF
+***** EXPENSIVE 2274 634
+
+. 2273 8059064 159
+. 8B 45 E4 8D 14 38 01 D0 8B 4D 0C 89 85 7C FF FF FF 0F AF CF 8B 45 08 DD 04 C8 8D 04 FD 08 00 00 00 8B 4D 08 0F AF 45 0C DD 5D A8 DD 04 08 8B 45 0C 0F AF C2 8D 14 D5 08 00 00 00 DD 04 C1 0F AF 55 0C 8B 45 0C DD 04 0A 0F AF 85 7C FF FF FF 8B 95 7C FF FF FF DD 04 C1 D9 CB DD 5D A0 8D 0C D5 08 00 00 00 DD 45 C8 DD 45 C0 0F AF 4D 0C 8B 45 08 D8 CA D9 C9 D8 CB D9 CA DC 4D C8 D9 CB DC 4D C0 DD 04 01 D9 CB DE E2 DE C3 DD 45 B8 DD 45 B0 DD 45 B8 D9 C9 D8 CC D9 CA D8 CE D9 C9 DE CC
+***** EXPENSIVE 2275 635
+
+. 2274 8059103 132
+. D9 CD DC 4D B0 D9 CD DE E1 D9 CA DE C4 D9 C0 D9 C3 D9 C9 D8 C3 D9 C9 D8 C5 DD 83 BC F8 FF FF DD 83 BC F8 FF FF D9 C9 D8 CB D9 C9 D8 CA D9 CC DE E5 D9 CD DE E6 D9 C9 DC 45 A8 D9 CC DC 6D A8 D9 CA DC 6D A0 D9 CB DC 4D 98 D9 CD DC 4D 98 D9 CC DD 55 A8 8B 4D DC 8B 45 14 8D 14 4E 0F AF C6 8B 4D 10 DD 1C C1 8D 04 F5 08 00 00 00 0F AF 45 14 DC 45 A0 DD 14 08 D9 C1 8B 45 14 0F AF C2 D9 C3 D9 C9 D8 E5
+***** EXPENSIVE 2276 636
+
+. 2275 8059187 89
+. 8D 14 D5 08 00 00 00 DD 1C C1 0F AF 55 14 D8 C5 8B 45 14 DD 1C 0A D9 C9 0F AF 45 90 8B 55 90 DE C3 D9 CA DD 1C C1 DE E2 D9 C9 8D 04 D5 08 00 00 00 0F AF 45 14 D9 E0 DD 1C 08 FF 45 E8 8B 4D 18 8B 45 E0 01 CA 01 CE 03 7D DC 39 45 E8 DD 5D A0 89 55 90 0F 82 84 FE FF FF
+***** EXPENSIVE 2277 637
+
+. 2276 8053FB4 56
+. 8B 4D 8C 8B 75 20 DD 44 0E F0 8B 45 24 DD 5D C8 DD 44 0E F8 DD 5D C0 DD 44 08 F0 DD 5D B8 C7 45 E8 00 00 00 00 DD 44 08 F8 8B 45 D4 39 45 E8 DD 5D B0 0F 83 A7 01 00 00
+***** EXPENSIVE 2278 638
+vex iropt: not unrolling (108 sts)
+
+. 2277 80541DC 144
+. 8B 45 0C 0F AF C7 8B 4D 08 8B 75 D8 DD 04 C1 D1 E6 8D 04 FD 08 00 00 00 8D 14 3E 0F AF 45 0C D9 C1 DC 0C 08 0F AF 55 0C 8B 45 90 DD 04 D1 03 45 DC 8B 55 DC 01 C2 D9 C2 8B 4D 14 D8 C0 D9 CB 0F AF 4D 90 D8 E1 D9 CB DE C1 89 95 74 FF FF FF 8B 55 10 DD 1C CA D9 C1 8B 8D 74 FF FF FF D9 E0 D9 CA 0F AF 4D 14 0F AF 45 14 D8 E1 D9 CA DE E1 D9 C9 DD 1C C2 DD 1C CA 8B 4D D8 01 CE 01 F7 FF 45 E8 8B 75 D4 01 4D 90 39 75 E8 0F 82 70 FF FF FF
+***** EXPENSIVE 2279 639
+
+. 2278 805B6A4 149
+. 8B 4D E4 8D 14 39 01 D1 8B 45 0C 89 4D A0 0F AF C7 8B 4D 08 D9 04 81 8D 04 BD 04 00 00 00 0F AF 45 0C D9 04 08 8B 45 0C 0F AF C2 8D 14 95 04 00 00 00 D9 04 81 D9 C9 0F AF 55 0C 8B 45 0C D9 5D C0 0F AF 45 A0 D9 04 0A 8B 55 A0 D9 04 81 D9 CB D9 5D C4 8D 0C 95 04 00 00 00 D9 45 D4 D9 45 D0 0F AF 4D 0C 8B 45 08 D8 CA D9 C9 D8 CB D9 CA D8 4D D4 D9 CB D8 4D D0 D9 04 01 D9 CB DE E2 DE C3 D9 45 CC D9 45 C8 D9 45 CC D9 C9 D8 CC D9 CA D8 CE D9 C9 DE CC
+***** EXPENSIVE 2280 640
+
+. 2279 805B739 126
+. D9 CD D8 4D C8 D9 CD DE E1 D9 CA DE C4 D9 C0 D9 C3 D9 C9 D8 C3 D9 C9 D8 C5 D9 45 B4 D9 45 B4 D9 C9 D8 CB D9 C9 D8 CA D9 CC DE E5 D9 CD DE E6 D8 45 C0 D9 CC D8 6D C4 D9 CA D8 6D C0 D9 CB D8 4D BC D9 CD D8 4D BC D9 C9 D8 45 C4 D9 CC D9 55 C0 D9 CC 8B 4D DC 8B 45 14 8D 14 4E 0F AF C6 8B 4D 10 D9 1C 81 D9 CB 8D 04 B5 04 00 00 00 0F AF 45 14 D9 1C 08 D9 C0 8B 45 14 0F AF C2 D9 C2
+***** EXPENSIVE 2281 641
+
+. 2280 805B7B7 86
+. D9 C9 D8 E4 8D 14 95 04 00 00 00 D9 1C 81 0F AF 55 14 D8 C4 8B 45 14 D9 1C 0A 0F AF 45 B0 8B 55 B0 DE C2 D9 C9 D9 1C 81 DE E1 8D 04 95 04 00 00 00 0F AF 45 14 D9 E0 D9 1C 08 FF 45 E8 8B 4D 18 8B 45 E0 01 CA 01 CE 03 7D DC 39 45 E8 89 55 B0 0F 82 97 FE FF FF
+***** EXPENSIVE 2282 642
+
+. 2281 8056798 60
+. 8B 4D A4 8B 75 20 8B 45 24 D9 44 8E F8 D9 44 8E FC D9 C9 D9 5D D4 D9 5D D0 D9 44 88 F8 D9 44 88 FC D9 C9 C7 45 E8 00 00 00 00 8B 45 D8 39 45 E8 D9 5D CC D9 5D C8 0F 83 BA 01 00 00
+***** EXPENSIVE 2283 643
+vex iropt: not unrolling (114 sts)
+
+. 2282 80569D4 138
+. 8B 45 0C 0F AF C7 8B 4D 08 8B 75 DC D9 04 81 D1 E6 8D 04 BD 04 00 00 00 8D 14 3E 0F AF 45 0C D9 C1 D8 0C 08 0F AF 55 0C 8B 45 A8 D9 04 91 03 45 E0 8B 55 E0 01 C2 D9 C2 8B 4D 14 D8 C0 D9 CB 0F AF 4D A8 D8 E1 D9 CB DE C1 89 55 94 8B 55 10 D9 1C 8A D9 C1 8B 4D 94 D9 E0 D9 CA 0F AF 4D 14 0F AF 45 14 D8 E1 D9 CA DE E1 D9 C9 D9 1C 82 D9 1C 8A 8B 4D DC 01 CE 01 F7 FF 45 E8 8B 75 D8 01 4D A8 39 75 E8 0F 82 76 FF FF FF
+***** EXPENSIVE 2284 644
+
+. 2283 8059400 92
+. 8B 85 58 FF FF FF 8B 55 20 DD 44 02 F0 DD 44 02 F8 8B 55 24 DD 5D C8 DD 5D D0 DD 44 02 F8 DD 44 02 F0 D9 C9 8B 55 28 DD 5D B8 DD 44 02 F8 D9 C9 DD 5D C0 DD 5D A8 DD 44 02 F0 C7 45 E8 00 00 00 00 8B 45 E0 80 75 CF 80 80 75 BF 80 80 75 AF 80 39 45 E8 DD 5D B0 0F 83 77 02 00 00
+***** EXPENSIVE 2285 645
+
+. 2284 805BA34 92
+. 8B 85 54 FF FF FF 8B 55 20 D9 44 82 F8 D9 44 82 FC 8B 55 24 D9 5D C8 D9 5D CC D9 44 82 FC D9 44 82 F8 D9 C9 8B 55 28 D9 5D C0 D9 44 82 FC D9 C9 D9 5D C4 D9 5D B8 D9 44 82 F8 C7 45 E8 00 00 00 00 8B 45 E0 80 75 CB 80 80 75 C3 80 80 75 BB 80 39 45 E8 D9 5D BC 0F 83 82 02 00 00
+
+. 2285 805A331 19
+. C7 45 E0 00 00 00 00 8B 7D D8 39 7D E0 0F 83 29 02 00 00
+***** EXPENSIVE 2287 646
+
+. 2286 805A344 95
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C9 DD 9D 40 FF FF FF DD 9D 38 FF FF FF C7 85 24 FF FF FF 00 00 00 00 C7 85 20 FF FF FF 00 00 00 00 C7 85 1C FF FF FF 00 00 00 00 C7 45 D0 00 00 00 00 8B 45 18 DD 85 40 FF FF FF DD 85 38 FF FF FF D9 C9 39 45 D0 DD 5D A0 DD 5D 98 0F 83 A3 01 00 00
+***** EXPENSIVE 2288 647
+
+. 2287 805A3A3 46
+. 8B 95 1C FF FF FF C7 85 28 FF FF FF 00 00 00 00 89 55 84 89 F6 8B 45 D0 DD 85 38 FF FF FF 85 C0 DD 55 88 DD 55 90 DD 85 40 FF FF FF 74 30
+
+. 2288 805A401 11
+. 31 C9 3B 4D 18 0F 83 E4 04 00 00
+
+. 2289 805A40C 40
+. 8B 85 4C FF FF FF D1 E8 8B 75 E4 89 85 E4 FE FF FF 29 C6 8B 7D E4 8B 45 84 8D 54 78 FF 8D 76 00 85 C9 0F 85 A4 04 00 00
+***** EXPENSIVE 2291 648
+
+. 2290 805A434 68
+. DD 85 40 FF FF FF DD 85 38 FF FF FF 8B 45 0C 0F AF C2 8B 7D 08 DD 04 C7 8D 04 D5 08 00 00 00 0F AF 45 0C DD 04 38 D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CD DE CA D9 CB DE CA D9 CA 85 C9 DE E3 DE C1 74 24
+***** EXPENSIVE 2292 649
+
+. 2291 805A49C 67
+. D9 C2 D9 C4 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CD D9 CA D8 CC D9 C9 DE E3 DE C1 D9 C9 41 DC 45 90 D9 C9 DC 45 88 D9 C9 8B 85 E4 FE FF FF 03 55 DC 3B 4D 18 DD 5D 90 DD 5D 88 8D 74 06 FF 0F 82 4D FF FF FF
+
+. 2292 805A42C 8
+. 85 C9 0F 85 A4 04 00 00
+***** EXPENSIVE 2294 650
+
+. 2293 805A8D8 24
+. 89 F0 C1 E0 04 8B 7D 24 DD 44 07 08 DD 04 07 D9 C9 D9 E0 E9 50 FB FF FF
+***** EXPENSIVE 2295 651
+
+. 2294 805A440 56
+. 8B 45 0C 0F AF C2 8B 7D 08 DD 04 C7 8D 04 D5 08 00 00 00 0F AF 45 0C DD 04 38 D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CD DE CA D9 CB DE CA D9 CA 85 C9 DE E3 DE C1 74 24
+***** EXPENSIVE 2296 652
+
+. 2295 805A478 103
+. DD 45 A0 D8 CB DD 45 98 DD 45 98 D9 C9 D8 CE D9 C9 DE CD D9 CD DC 4D A0 D9 C9 DE E5 DE C3 D9 CA D9 CB D9 CA D9 C2 D9 C4 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CD D9 CA D8 CC D9 C9 DE E3 DE C1 D9 C9 41 DC 45 90 D9 C9 DC 45 88 D9 C9 8B 85 E4 FE FF FF 03 55 DC 3B 4D 18 DD 5D 90 DD 5D 88 8D 74 06 FF 0F 82 4D FF FF FF
+***** EXPENSIVE 2297 653
+
+. 2296 805A4DF 19
+. DD D8 DD D8 8B 45 18 2B 45 D0 39 45 D0 0F 83 AC 03 00 00
+***** EXPENSIVE 2298 654
+
+. 2297 805A4F2 84
+. 8B 85 20 FF FF FF 8B 55 E4 03 85 28 FF FF FF 8D 44 50 FF 8B 55 14 0F AF D0 8D 04 C5 08 00 00 00 DD 45 90 8B 4D 10 0F AF 45 14 DD 1C D1 DD 45 88 DD 1C 08 8B 95 28 FF FF FF 8B 45 D4 FF 45 D0 8B 4D 18 8D 14 42 39 4D D0 89 95 28 FF FF FF 0F 82 72 FE FF FF
+***** EXPENSIVE 2299 655
+
+. 2298 805A3B8 25
+. 8B 45 D0 DD 85 38 FF FF FF 85 C0 DD 55 88 DD 55 90 DD 85 40 FF FF FF 74 30
+***** EXPENSIVE 2300 656
+
+. 2299 805A3D1 59
+. DD 45 A0 DD 45 A0 DD 45 98 DD 45 98 D9 CB D9 E0 D9 CA DC 4D C8 D9 C9 DC 4D C0 D9 CA DC 4D C0 D9 CB DC 4D C8 D9 C9 DE C2 DE C2 DD 5D A0 DD 5D 98 31 C9 3B 4D 18 0F 83 E4 04 00 00
+***** EXPENSIVE 2301 657
+
+. 2300 805A89E 56
+. 8B 95 24 FF FF FF 0F AF 45 D4 8D 04 42 8B 55 E4 D1 E2 29 D0 48 8B 55 14 0F AF D0 DD 45 90 8B 4D 10 8D 04 C5 08 00 00 00 DD 1C D1 0F AF 45 14 80 75 8F 80 E9 49 FC FF FF
+***** EXPENSIVE 2302 658
+
+. 2301 805A51F 39
+. DD 45 88 DD 1C 08 8B 95 28 FF FF FF 8B 45 D4 FF 45 D0 8B 4D 18 8D 14 42 39 4D D0 89 95 28 FF FF FF 0F 82 72 FE FF FF
+
+. 2302 805A546 39
+. 8B 75 1C 8B 7D D4 FF 45 E0 8B 45 D8 01 B5 24 FF FF FF 01 B5 20 FF FF FF 01 BD 1C FF FF FF 39 45 E0 0F 82 0F FE FF FF
+
+. 2303 805A56D 20
+. 8B 85 4C FF FF FF FF 45 E4 D1 E8 39 45 E4 0F 82 B0 FD FF FF
+
+. 2304 805CB66 19
+. C7 45 DC 00 00 00 00 8B 7D D4 39 7D DC 0F 83 19 02 00 00
+***** EXPENSIVE 2306 659
+
+. 2305 805CB79 89
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 5D 80 D9 9D 7C FF FF FF C7 85 64 FF FF FF 00 00 00 00 C7 85 60 FF FF FF 00 00 00 00 C7 85 5C FF FF FF 00 00 00 00 C7 45 CC 00 00 00 00 8B 45 18 D9 45 80 D9 85 7C FF FF FF D9 C9 39 45 CC D9 5D B0 D9 5D AC 0F 83 99 01 00 00
+***** EXPENSIVE 2307 660
+
+. 2306 805CBD2 44
+. 8B 95 5C FF FF FF C7 85 68 FF FF FF 00 00 00 00 89 55 A0 8D 76 00 8B 45 CC D9 85 7C FF FF FF 85 C0 D9 55 A4 D9 55 A8 D9 45 80 74 30
+
+. 2307 805CC2E 11
+. 31 F6 3B 75 18 0F 83 72 04 00 00
+
+. 2308 805CC39 35
+. 8B 45 84 D1 E8 8B 4D E0 89 85 24 FF FF FF 29 C1 8B 7D E0 8B 45 A0 8D 54 78 FF 90 85 F6 0F 85 3C 04 00 00
+***** EXPENSIVE 2310 661
+
+. 2309 805CC5C 65
+. D9 45 80 D9 85 7C FF FF FF 8B 45 0C 0F AF C2 8B 7D 08 D9 04 87 8D 04 95 04 00 00 00 0F AF 45 0C D9 04 38 D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CD DE CA D9 CB DE CA D9 CA 85 F6 DE E3 DE C1 74 24
+***** EXPENSIVE 2311 662
+
+. 2310 805CCC1 67
+. D9 C2 D9 C4 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CD D9 CA D8 CC D9 C9 DE E3 DE C1 D9 C9 46 D8 45 A8 D9 C9 D8 45 A4 D9 C9 8B 85 24 FF FF FF 03 55 D8 3B 75 18 D9 5D A8 D9 5D A4 8D 4C 01 FF 0F 82 50 FF FF FF
+
+. 2311 805CC54 8
+. 85 F6 0F 85 3C 04 00 00
+***** EXPENSIVE 2313 663
+
+. 2312 805D098 19
+. 8B 7D 24 D9 44 CF 04 D9 04 CF D9 C9 D9 E0 E9 BA FB FF FF
+***** EXPENSIVE 2314 664
+
+. 2313 805CC65 56
+. 8B 45 0C 0F AF C2 8B 7D 08 D9 04 87 8D 04 95 04 00 00 00 0F AF 45 0C D9 04 38 D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CD DE CA D9 CB DE CA D9 CA 85 F6 DE E3 DE C1 74 24
+***** EXPENSIVE 2315 665
+
+. 2314 805CC9D 103
+. D9 45 B0 D8 CB D9 45 AC D9 45 AC D9 C9 D8 CE D9 C9 DE CD D9 CD D8 4D B0 D9 C9 DE E5 DE C3 D9 CA D9 CB D9 CA D9 C2 D9 C4 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CD D9 CA D8 CC D9 C9 DE E3 DE C1 D9 C9 46 D8 45 A8 D9 C9 D8 45 A4 D9 C9 8B 85 24 FF FF FF 03 55 D8 3B 75 18 D9 5D A8 D9 5D A4 8D 4C 01 FF 0F 82 50 FF FF FF
+***** EXPENSIVE 2316 666
+
+. 2315 805CD04 19
+. DD D8 DD D8 8B 45 18 2B 45 CC 39 45 CC 0F 83 49 03 00 00
+***** EXPENSIVE 2317 667
+
+. 2316 805CD17 84
+. 8B 85 60 FF FF FF 8B 55 E0 03 85 68 FF FF FF 8D 44 50 FF 8B 55 14 0F AF D0 8D 04 85 04 00 00 00 D9 45 A8 8B 4D 10 0F AF 45 14 D9 1C 91 D9 45 A4 D9 1C 08 8B 95 68 FF FF FF 8B 45 D0 FF 45 CC 8B 4D 18 8D 14 42 39 4D CC 89 95 68 FF FF FF 0F 82 7D FE FF FF
+***** EXPENSIVE 2318 668
+
+. 2317 805CBE8 22
+. 8B 45 CC D9 85 7C FF FF FF 85 C0 D9 55 A4 D9 55 A8 D9 45 80 74 30
+***** EXPENSIVE 2319 669
+
+. 2318 805CBFE 59
+. D9 45 B0 D9 45 B0 D9 45 AC D9 45 AC D9 CB D9 E0 D9 CA D8 4D C8 D9 C9 D8 4D C4 D9 CA D8 4D C4 D9 CB D8 4D C8 D9 C9 DE C2 DE C2 D9 5D B0 D9 5D AC 31 F6 3B 75 18 0F 83 72 04 00 00
+***** EXPENSIVE 2320 670
+
+. 2319 805D060 56
+. 8B 95 64 FF FF FF 0F AF 45 D0 8D 04 42 8B 55 E0 D1 E2 29 D0 48 8B 55 14 0F AF D0 D9 45 A8 8B 4D 10 8D 04 85 04 00 00 00 D9 1C 91 0F AF 45 14 80 75 A7 80 E9 AC FC FF FF
+***** EXPENSIVE 2321 671
+
+. 2320 805CD44 39
+. D9 45 A4 D9 1C 08 8B 95 68 FF FF FF 8B 45 D0 FF 45 CC 8B 4D 18 8D 14 42 39 4D CC 89 95 68 FF FF FF 0F 82 7D FE FF FF
+
+. 2321 805CD6B 39
+. 8B 75 1C 8B 7D D0 FF 45 DC 8B 45 D4 01 B5 64 FF FF FF 01 B5 60 FF FF FF 01 BD 5C FF FF FF 39 45 DC 0F 82 1C FE FF FF
+
+. 2322 805CD92 17
+. 8B 45 84 FF 45 E0 D1 E8 39 45 E0 0F 82 C3 FD FF FF
+
+. 2323 804D708 11
+. 8B 4D E8 85 C9 0F 85 F1 00 00 00
+
+. 2324 804D804 6
+. 83 7D 18 FF 74 12
+***** EXPENSIVE 2326 672
+
+. 2325 804D81C 14
+. 8B 7D D4 DD 47 F0 DD 47 F8 E9 F5 FE FF FF
+
+. 2326 804D71F 11
+. 31 F6 3B 75 CC 0F 83 D4 00 00 00
+***** EXPENSIVE 2328 673
+vex iropt: 2 x unrolling (44 sts -> 88 sts)
+
+. 2327 804F6AD 38
+. 8B 45 08 DD 04 30 8B 55 10 DD 1C 0A DD 44 30 08 47 DD 5C 0A 08 03 B5 64 FF FF FF 03 8D 60 FF FF FF 3B 7D E0 72 DA
+***** EXPENSIVE 2329 674
+vex iropt: not unrolling (80 sts)
+
+. 2328 804F710 115
+. 8B 85 68 FF FF FF 8B 4D C0 01 F9 8D 14 38 8B 45 0C 0F AF C1 8B 75 14 0F AF F2 C1 E0 04 89 8D 54 FF FF FF 0F AF 55 0C 8B 4D 08 DD 04 01 C1 E2 04 DC 04 11 C1 E6 04 8B 4D 10 DD 1C 31 03 45 08 03 55 08 DD 40 08 DC 42 08 DD 5C 31 08 8B 8D 54 FF FF FF DD 00 0F AF 4D 14 C1 E1 04 DC 2A 8B 75 10 DD 1C 0E DD 40 08 47 DC 6A 08 3B 7D E0 DD 5C 0E 08 72 8D
+***** EXPENSIVE 2330 675
+vex iropt: 2 x unrolling (44 sts -> 88 sts)
+
+. 2329 804F7BE 38
+. 8B 55 10 DD 04 32 8B 45 08 DD 1C 08 DD 44 32 08 47 DD 5C 08 08 03 B5 70 FF FF FF 03 8D 6C FF FF FF 3B 7D E0 72 DA
+***** EXPENSIVE 2331 676
+vex iropt: 2 x unrolling (53 sts -> 106 sts)
+
+. 2330 804F820 55
+. 8D 04 3E 0F AF 45 14 C1 E0 04 8B 4D 10 DD 04 01 8B 4D 08 DC 04 11 DD 1C 11 8B 4D 10 DD 44 01 08 8B 45 08 DC 44 10 08 47 DD 5C 10 08 03 95 54 FF FF FF 3B 7D E0 72 C9
+***** EXPENSIVE 2332 677
+vex iropt: not unrolling (63 sts)
+
+. 2331 804F8BC 81
+. 8B 85 78 FF FF FF 8D 0C 38 8B 55 10 0F AF 4D 0C DD 04 32 C1 E1 04 8B 45 08 DD 1C 08 DD 44 32 08 8B 55 B0 DD 5C 08 08 8D 0C 3A 0F AF 4D 0C 8B 45 10 DD 04 30 C1 E1 04 8B 55 08 DD 1C 0A 47 DD 44 30 08 03 B5 7C FF FF FF 3B 7D E0 DD 5C 0A 08 72 AF
+***** EXPENSIVE 2333 678
+vex iropt: not unrolling (114 sts)
+
+. 2332 804F964 143
+. 8D 14 3E 8B 8D 54 FF FF FF 8D 04 39 0F AF 55 14 8B 4D 10 C1 E2 04 0F AF 45 14 D9 C1 DC 0C 11 C1 E0 04 D9 C1 DC 4C 01 08 D9 C1 D9 C4 DC 4C 11 08 D9 C9 D8 E2 D9 CB DE C2 8B 95 78 FF FF FF D9 C3 DC 0C 01 D9 CB 8D 04 3A 0F AF 45 0C 8B 4D 08 C1 E0 04 DC 04 01 D9 C1 D9 C9 DD 1C 01 D8 C3 01 C8 DC 40 08 8B 55 B0 DD 58 08 D9 C9 8D 04 3A 0F AF 45 0C C1 E0 04 DC 04 01 DD 1C 01 DE E1 01 C8 47 DC 40 08 3B 7D E0 DD 58 08 0F 82 71 FF FF FF
+vex iropt: 2 x unrolling (58 sts -> 116 sts)
+
+. 2333 804FA64 62
+. 8B 7D 08 8B 04 37 8B 54 37 04 8B 7D 10 89 04 0F 89 54 0F 04 8B 7D 08 8B 44 37 08 8B 54 37 0C 8B 7D 10 89 44 0F 08 FF 45 E4 8B 45 D8 89 54 0F 0C 03 75 88 03 8D 4C FF FF FF 39 45 E4 72 C2
+vex iropt: not unrolling (68 sts)
+
+. 2334 804FAE0 82
+. 8B 4D E4 8B 75 E4 03 8D 4C FF FF FF 03 75 A8 0F AF 4D 0C C1 E1 04 8B 7D 08 0F AF 75 14 8B 04 0F 8B 54 0F 04 C1 E6 04 8B 7D 10 89 04 37 89 54 37 04 8B 7D 08 8B 44 0F 08 8B 54 0F 0C 8B 4D 10 89 44 31 08 89 54 31 0C FF 45 E4 8B 75 D8 39 75 E4 72 AE
+***** EXPENSIVE 2336 679
+
+. 2335 804D80A 18
+. 8B 45 D4 DD 40 F8 DD 40 F0 D9 C9 D9 E0 E9 03 FF FF FF
+
+. 2336 8050BA0 11
+. 8B 45 E8 85 C0 0F 85 CD 00 00 00
+
+. 2337 8050C78 6
+. 83 7D 18 FF 74 12
+***** EXPENSIVE 2339 680
+
+. 2338 8050C90 14
+. 8B 7D DC D9 47 F8 D9 47 FC E9 19 FF FF FF
+
+. 2339 8050BB7 11
+. 31 F6 3B 75 D8 0F 83 B0 00 00 00
+vex iropt: 2 x unrolling (46 sts -> 92 sts)
+
+. 2340 8052A7E 38
+. 8B 75 08 8B 04 CE 8B 75 10 89 04 D6 8B 75 08 8B 44 CE 04 47 8B 75 10 89 44 D6 04 03 4D 0C 03 55 14 3B 7D E4 72 DA
+***** EXPENSIVE 2342 681
+vex iropt: not unrolling (104 sts)
+
+. 2341 8052AE4 110
+. 8B 55 C0 01 FA 89 55 80 8B 75 C4 8B 55 0C 0F AF 55 80 8D 04 3E 8B 4D 08 8B 75 14 D9 04 D1 0F AF F0 0F AF 45 0C D8 04 C1 8B 4D 10 D9 1C F1 8B 4D 08 D9 44 D1 04 D8 44 C1 04 8B 4D 10 D9 5C F1 04 8B 75 08 D9 04 D6 8B 4D 80 D8 2C C6 0F AF 4D 14 8B 75 10 D9 1C CE 8B 75 08 D9 44 D6 04 47 D8 6C C6 04 3B 7D E4 8B 45 10 D9 5C C8 04 72 92
+vex iropt: 2 x unrolling (46 sts -> 92 sts)
+
+. 2342 8052B72 38
+. 8B 75 10 8B 04 CE 8B 75 08 89 04 D6 8B 75 10 8B 44 CE 04 47 8B 75 08 89 44 D6 04 03 4D 14 03 55 0C 3B 7D E4 72 DA
+***** EXPENSIVE 2344 682
+vex iropt: 2 x unrolling (60 sts -> 120 sts)
+
+. 2343 8052BC8 49
+. 8D 04 39 0F AF 45 14 8B 75 10 D9 04 C6 8B 75 08 D8 04 D6 D9 1C D6 8B 75 10 D9 44 C6 04 8B 45 08 D8 44 D0 04 47 D9 5C D0 04 03 55 0C 3B 7D E4 72 CF
+vex iropt: not unrolling (76 sts)
+
+. 2344 8052C54 81
+. 8B 75 88 8D 04 3E 8B 75 10 8B 14 CE 0F AF 45 0C 8B 75 08 89 14 C6 8B 75 10 8B 54 CE 04 8B 75 08 89 54 C6 04 8B 55 AC 8D 04 3A 8B 75 10 8B 14 CE 0F AF 45 0C 8B 75 08 89 14 C6 47 8B 75 10 8B 54 CE 04 8B 75 08 03 4D 14 3B 7D E4 89 54 C6 04 72 AF
+***** EXPENSIVE 2346 683
+vex iropt: not unrolling (124 sts)
+
+. 2345 8052D00 127
+. 8D 14 3E 8B 4D 80 0F AF 55 14 8D 04 39 D9 C1 8B 4D 10 D8 0C D1 0F AF 45 14 D9 C1 D8 4C C1 04 D9 C1 D9 C4 D8 4C D1 04 D9 C9 D8 E2 D9 CB DE C2 8B 55 88 D9 C3 D8 0C C1 D9 C1 8D 04 3A 8B 4D 08 0F AF 45 0C D8 C1 D9 CC D8 04 C1 D9 CC D8 44 C1 04 D9 CC 8B 55 AC D9 1C C1 D9 CB D9 5C C1 04 8D 04 3A 0F AF 45 0C DE E2 47 D8 04 C1 D9 C9 D8 44 C1 04 D9 C9 3B 7D E4 D9 1C C1 D9 5C C1 04 72 81
+vex iropt: 2 x unrolling (49 sts -> 98 sts)
+
+. 2346 8052DD5 43
+. 8B 75 08 8B 04 CE 8B 75 10 89 04 D6 8B 75 08 8B 44 CE 04 8B 75 10 89 44 D6 04 FF 45 E8 8B 45 DC 03 4D 0C 03 55 14 39 45 E8 72 D5
+vex iropt: 2 x unrolling (54 sts -> 108 sts)
+
+. 2347 8052E38 56
+. 8B 45 E8 8B 4D E8 03 85 78 FF FF FF 0F AF 45 0C 01 F9 8B 75 08 8B 14 C6 0F AF 4D 14 8B 75 10 89 14 CE 8B 55 08 8B 44 C2 04 89 44 CE 04 FF 45 E8 8B 4D DC 39 4D E8 72 C8
+***** EXPENSIVE 2349 684
+
+. 2348 8050C7E 18
+. 8B 45 DC D9 40 FC D9 40 F8 D9 C9 D9 E0 E9 27 FF FF FF
+***** EXPENSIVE 2350 685
+vex iropt: not unrolling (111 sts)
+
+. 2349 8059220 127
+. 8B 4D E4 8D 04 39 01 C1 8B 55 0C 0F AF D7 0F AF 45 0C 89 8D 7C FF FF FF 8B 4D 08 DD 04 D1 DD 04 C1 8B 8D 7C FF FF FF 0F AF 4D 0C 8B 45 08 DD 04 C8 D9 C1 D8 E1 DD 83 BC F8 FF FF 8B 4D DC D8 C9 8B 45 14 0F AF C6 8D 14 4E D8 C4 8B 4D 10 DD 1C C1 D9 CA DE C1 8D 04 F5 08 00 00 00 0F AF 45 14 D8 CB DD 1C 08 FF 45 E8 8B 45 E0 0F AF 55 14 DE E9 03 75 18 03 7D DC 39 45 E8 DD 1C D1 72 81
+***** EXPENSIVE 2351 686
+
+. 2350 80543B8 82
+. 8B 95 54 FF FF FF 8B 4D 20 8B 45 24 DD 44 11 F0 DD 44 11 F8 D9 C9 DD 5D D0 DD 5D C8 DD 44 10 F0 DD 44 10 F8 D9 C9 8B 45 28 DD 5D C0 DD 5D B8 DD 44 10 F0 DD 44 10 F8 D9 C9 C7 45 E8 00 00 00 00 8B 45 DC 39 45 E8 DD 5D B0 DD 5D A8 0F 83 57 02 00 00
+***** EXPENSIVE 2352 687
+vex iropt: not unrolling (117 sts)
+
+. 2351 805B850 121
+. 8B 4D E4 8D 04 39 01 C1 8B 55 0C 0F AF D7 0F AF 45 0C 89 4D A0 8B 4D 08 D9 04 91 D9 04 81 8B 4D A0 0F AF 4D 0C 8B 45 08 D9 04 88 D9 C1 D8 E1 DD 83 BC F8 FF FF 8B 4D DC D8 C9 8B 45 14 0F AF C6 8D 14 4E D8 C4 8B 4D 10 D9 1C 81 D9 CA DE C1 8D 04 B5 04 00 00 00 0F AF 45 14 D8 CB D9 1C 08 FF 45 E8 8B 45 E0 0F AF 55 14 DE E9 03 75 18 03 7D DC 39 45 E8 D9 1C 91 72 87
+***** EXPENSIVE 2353 688
+
+. 2352 8056BA8 82
+. 8B 95 44 FF FF FF 8B 4D 20 8B 45 24 D9 44 91 F8 D9 44 91 FC D9 C9 D9 5D D0 D9 5D CC D9 44 90 F8 D9 44 90 FC D9 C9 8B 45 28 D9 5D C8 D9 5D C4 D9 44 90 F8 D9 44 90 FC D9 C9 C7 45 E8 00 00 00 00 8B 45 DC 39 45 E8 D9 5D C0 D9 5D BC 0F 83 74 02 00 00
+***** EXPENSIVE 2354 689
+
+. 2353 804DFCC 190
+. 8B 85 B4 FE FF FF DD 04 06 DD 44 06 08 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 44 FF FF FF 0F AF 45 0C C1 E0 04 DD 44 06 08 D9 C9 DD 9D 48 FF FF FF DD 04 06 8B 45 EC 03 85 40 FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 4D EC 8B 45 DC 8D 04 81 DD 9D 38 FF FF FF D9 CC 89 85 D0 FE FF FF 0F AF 45 0C DD 9D 58 FF FF FF C1 E0 04 DD 85 38 FF FF FF D9 CB DD 95 50 FF FF FF D9 CD DD 9D 60 FF FF FF D9 CA D8 C1 DD 04 06 DC C5 D9 C9 DD 9D 30 FF FF FF DD 85 50 FF FF FF DD 44 06 08 D9 CE DD 9D C0 FE FF FF DE E1 DD 85 48 FF FF FF D9 C3
+***** EXPENSIVE 2355 690
+
+. 2354 804E08A 184
+. D9 CA DD 9D 28 FF FF FF D8 C5 D9 C9 D8 C4 DD 85 C0 FE FF FF DD 85 30 FF FF FF D9 C9 D8 C2 D9 CF DC AD 48 FF FF FF D9 C9 D8 C3 D9 CF DD 9D 18 FF FF FF DD 9D 20 FF FF FF D9 CB DE E4 D9 C9 DC A5 38 FF FF FF D9 CC DD 9D 10 FF FF FF DD 85 E0 FE FF FF D9 FA DC 8D F0 FE FF FF D9 CA DC AD C0 FE FF FF DE CA DD 85 E0 FE FF FF D9 FA D9 C9 DC A5 30 FF FF FF D9 C9 DC 8D F0 FE FF FF DE C9 DD 85 18 FF FF FF DC 8D F0 FE FF FF DC AD 60 FF FF FF DD 85 10 FF FF FF DC 8D F0 FE FF FF D9 C1 D9 C9 DC AD 58 FF FF FF D9 C9 D8 C4 DD 9D 08 FF FF FF D9 C0 D8 C3 D9 C9 DE E3
+***** EXPENSIVE 2356 691
+
+. 2355 804E142 168
+. DD 45 C8 DD 45 C0 D8 CE D9 C9 DC 8D 28 FF FF FF D9 CA DD 9D 00 FF FF FF DE C1 DD 45 C8 DD 45 C0 D8 CF D9 C9 DC 8D 20 FF FF FF D9 CB DE E5 DE C2 DD 45 C0 D9 CC DD 9D B8 FE FF FF D9 CD DC 4D C8 DD 85 60 FF FF FF DD 45 C0 D9 CD DC 8D 20 FF FF FF D9 CE DC 4D C8 D9 C9 DC 85 18 FF FF FF D9 CD DC 8D 28 FF FF FF D9 CE DE E2 D9 C9 DC 8D E8 FE FF FF D9 CD DE E1 D9 CB DD 9D 60 FF FF FF DD 85 58 FF FF FF DD 85 B8 FE FF FF D9 C9 DC 85 10 FF FF FF D9 C9 D8 E5 D9 CC DC 8D E8 FE FF FF D9 CE DC 8D E8 FE FF FF D9 CA
+***** EXPENSIVE 2357 692
+
+. 2356 804E1EA 164
+. DC 8D E8 FE FF FF D9 C9 DD 9D 58 FF FF FF DD 85 08 FF FF FF DD 85 00 FF FF FF D9 CD DD 9D 78 FF FF FF D9 C3 D8 C7 DD 9D 70 FF FF FF DD 85 B8 FE FF FF DE C6 D9 CB DE E6 DD 85 60 FF FF FF DD 85 58 FF FF FF D9 C9 8B 45 E8 DD 1C 17 DD 5C 17 08 DC EA D9 CB 03 45 D4 D8 C1 DD 45 B8 DD 45 B0 0F AF 45 14 D8 CA D9 C9 D8 CC D9 CA DC 4D B8 D9 CC DC 4D B0 D9 CA C1 E0 04 DE E1 D9 CB DE C1 D9 CA DD 1C 07 D9 C9 DD 5C 07 08 8B 45 E8 03 85 FC FE FF FF DD 45 A8 DD 45 A0 0F AF 45 14 DC 8D 70 FF FF FF D9 C9
+***** EXPENSIVE 2358 693
+
+. 2357 804E28E 162
+. DC 8D 78 FF FF FF C1 E0 04 DE E1 DD 1C 07 DD 45 A8 DD 45 A0 DC 8D 78 FF FF FF D9 C9 DC 8D 70 FF FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 F8 FE FF FF DD 45 98 DD 45 90 0F AF 45 14 D8 CD D9 C9 D8 CC D9 CB C1 E0 04 DC 85 08 FF FF FF D9 CB DE E1 D9 CA DD 9D 68 FF FF FF DD 85 00 FF FF FF D9 CA DD 1C 07 D9 CB DC 4D 98 D9 CA DC 4D 90 DE C2 D9 C9 8B 4D E8 DD 5C 07 08 8B 45 D4 8D 04 81 DE E1 DD 45 88 DD 45 80 D8 CA D9 C9 89 85 D0 FE FF FF DC 8D 68 FF FF FF 0F AF 45 14 C1 E0 04 DE E1 DD 1C 07
+***** EXPENSIVE 2359 694
+
+. 2358 804E330 73
+. DD 45 80 D9 C9 DC 4D 88 D9 C9 DC 8D 68 FF FF FF DE C1 DD 5C 07 08 8B 45 0C C1 E0 04 01 85 B4 FE FF FF 8B 45 0C 41 01 85 D8 FE FF FF 8B 45 14 C1 E0 04 89 4D E8 FF 45 E0 8B 4D D4 FF 45 EC 01 C2 39 4D E0 0F 82 53 FC FF FF
+***** EXPENSIVE 2360 695
+
+. 2359 805138C 177
+. 8B 45 E8 03 45 D8 0F AF 45 0C D9 04 C6 D9 44 C6 04 D9 C9 8B 45 E8 03 85 7C FF FF FF 0F AF 45 0C D9 5D 84 D9 04 C6 D9 44 C6 04 8B 45 E8 8B 95 0C FF FF FF 03 85 74 FF FF FF D9 04 D6 D9 44 D6 04 D9 C9 0F AF 45 0C D9 5D 8C D9 5D 88 D9 04 C6 D9 44 C6 04 D9 C9 8B 55 E8 8B 45 D8 8D 04 82 89 85 10 FF FF FF 0F AF 45 0C D9 9D 78 FF FF FF D9 9D 70 FF FF FF D9 04 C6 D9 44 C6 04 D9 45 84 D9 CD D9 55 80 D9 CD D8 C2 D9 CD D8 C1 D9 CA D8 6D 84 D9 CA D9 9D 6C FF FF FF D9 85 70 FF FF FF D9 85 78 FF FF FF D9 CB D9 9D 68 FF FF FF D9 C9 D8 6D 80
+***** EXPENSIVE 2361 696
+
+. 2360 805143D 181
+. D9 CA D8 C4 D9 C9 D8 C3 D9 CA D9 9D 64 FF FF FF D9 C4 D9 85 6C FF FF FF D9 C9 D8 C2 D9 CD D8 A5 78 FF FF FF D9 CC D8 A5 70 FF FF FF D9 C9 D8 C3 D9 CD D9 9D 58 FF FF FF D9 CB D9 95 60 FF FF FF D9 CB D9 9D 5C FF FF FF D9 CB D9 9D 54 FF FF FF DD 85 28 FF FF FF D9 FA DC 8D 20 FF FF FF D9 CC DE E3 D9 CB DE CA D9 C9 D9 5D EC D9 45 EC DD 85 28 FF FF FF D9 FA D9 CB D8 AD 6C FF FF FF D9 CB DC 8D 20 FF FF FF D8 CB D9 5D EC DD 85 20 FF FF FF D8 8D 58 FF FF FF D8 6D 8C D9 9D 50 FF FF FF DD 85 20 FF FF FF D8 8D 54 FF FF FF D8 6D 88 D9 9D 4C FF FF FF
+***** EXPENSIVE 2362 697
+
+. 2361 80514F2 181
+. D9 45 C8 D8 8D 68 FF FF FF D9 9D 38 FF FF FF D9 85 50 FF FF FF D9 45 C4 D9 45 EC D9 CA D8 C3 D9 85 4C FF FF FF D9 CC D8 AD 50 FF FF FF D9 CA D8 CD D8 85 38 FF FF FF D9 CC D8 C3 D9 CA D9 9D 40 FF FF FF D9 85 1C FF FF FF D9 CB D8 AD 4C FF FF FF D9 CB DE CC D9 C9 D9 9D 44 FF FF FF D9 45 C4 D9 45 C8 D9 C9 D8 8D 5C FF FF FF D9 CA D9 9D 48 FF FF FF D9 CA D9 9D 3C FF FF FF D9 C9 D8 8D 64 FF FF FF D9 45 C4 D9 45 C8 D9 CC D9 9D 38 FF FF FF D9 C9 DE C2 D8 8D 68 FF FF FF D9 CA DE CB D9 45 C4 D9 45 C8 D8 8D 5C FF FF FF D9 CB DE E4 D8 8D 64 FF FF FF
+***** EXPENSIVE 2363 698
+
+. 2362 80515A7 169
+. D9 45 8C D9 C9 DE E3 D8 85 58 FF FF FF D9 C9 D8 8D 1C FF FF FF D9 CB D8 8D 1C FF FF FF D9 CA D8 8D 1C FF FF FF D9 CC D9 9D 6C FF FF FF D9 5D 8C D9 85 48 FF FF FF D8 E2 D9 5D A0 D9 45 88 D9 85 40 FF FF FF D9 C9 D8 85 54 FF FF FF D9 C9 D8 E4 D9 85 3C FF FF FF D9 CD D8 85 40 FF FF FF D9 CA D9 5D 88 D9 CC D8 C2 D9 85 44 FF FF FF D9 CB D8 AD 3C FF FF FF D9 CA D9 5D 98 D9 45 8C D9 1C CF D9 CA D8 85 38 FF FF FF D9 C9 D9 5D 94 8B 45 E4 D9 45 C0 D9 45 BC 03 45 D0 D8 CA D9 C9 D8 4D A0 D9 CD 0F AF 45 14 D9 5D 9C
+***** EXPENSIVE 2364 699
+
+. 2363 8051650 152
+. DE EC D9 45 88 D9 5C CF 04 D9 CB D9 1C C7 D9 45 BC D8 4D A0 D9 CB D8 4D C0 DE C3 D9 CA D9 5C C7 04 D9 45 B8 8B 45 E4 D9 45 B4 D9 CA D8 85 48 FF FF FF D9 CA 03 85 34 FF FF FF D8 CB D9 C9 D8 4D 9C 0F AF 45 14 DE E1 D9 C9 D9 5D 90 D9 85 44 FF FF FF D8 A5 38 FF FF FF D9 C9 D9 1C C7 D9 45 B4 D9 CA D8 4D B8 D9 CA D8 4D 9C DE C2 D9 C9 D9 5C C7 04 D9 45 B0 8B 45 E4 D9 45 AC 03 85 30 FF FF FF D8 4D 94 D9 C9 D8 4D 98 0F AF 45 14 DE E1 D9 1C C7 D9 45 B0 D9 45 AC
+***** EXPENSIVE 2365 700
+
+. 2364 80516E8 106
+. D8 4D 98 D9 C9 D8 4D 94 DE C1 D9 5C C7 04 8B 55 E4 8B 45 D0 D9 45 A8 D9 45 A4 D9 C9 8D 04 82 D8 4D 90 D9 C9 D8 CA 89 85 10 FF FF FF DE E9 0F AF 45 14 D9 1C C7 D9 45 A4 D9 C9 D8 4D A8 D9 C9 D8 4D 90 DE C1 42 D9 5C C7 04 8B 45 0C 01 85 0C FF FF FF FF 45 E8 03 4D 14 89 55 E4 FF 45 DC 8B 55 D0 39 55 DC 0F 82 3A FC FF FF
+
+. 2365 80599FA 30
+. 8B 4D DC 41 89 C8 C7 45 EC 01 00 00 00 D1 E8 39 45 EC 89 8D F0 FE FF FF 0F 83 2E 05 00 00
+***** EXPENSIVE 2367 701
+
+. 2366 8059A18 127
+. C7 85 C8 FE FF FF 10 00 00 00 89 F6 8B B5 C8 FE FF FF 8B 7D 20 8B 45 24 DD 44 37 F0 DD 44 37 F8 DD 5D B0 DD 5D B8 DD 44 30 F8 DD 44 30 F0 D9 C9 8B 45 28 DD 5D A0 DD 5D A8 DD 44 30 F8 DD 44 30 F0 D9 C9 8B 45 2C DD 5D 90 DD 44 30 F8 D9 C9 DD 5D 98 DD 5D 80 DD 44 30 F0 C7 45 E8 00 00 00 00 8B 45 E0 80 75 B7 80 80 75 A7 80 80 75 97 80 80 75 87 80 39 45 E8 DD 5D 88 0F 83 94 04 00 00
+***** EXPENSIVE 2368 702
+
+. 2367 8059A97 219
+. 8B 45 DC 8B 55 EC 8D 54 12 FF DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 D1 E0 DD 9D E8 FE FF FF DD 9D E0 FE FF FF 89 85 F4 FE FF FF 89 95 D4 FE FF FF C7 85 D0 FE FF FF 00 00 00 00 89 95 CC FE FF FF 8D 76 00 8B 95 CC FE FF FF 03 55 E4 8B 75 E4 8D 0C 16 8D 34 0E 8B 7D E4 01 F7 8B 45 0C 89 BD 9C FE FF FF 0F AF 85 CC FE FF FF 8B 7D 08 DD 04 C7 8B 85 CC FE FF FF 8D 04 C5 08 00 00 00 0F AF 45 0C DD 9D 78 FF FF FF DD 04 38 8B 45 0C 0F AF C2 DD 04 C7 8B 45 0C 0F AF C1 DD 04 C7 8B 45 0C 0F AF C6 DD 04 C7 8B 45 0C 0F AF 85 9C FE FF FF 8D 14 D5 08 00 00 00 8D 0C CD 08 00 00 00 8D 34 F5 08 00 00 00 DD 04 C7 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C 8B 85 9C FE FF FF
+***** EXPENSIVE 2369 703
+
+. 2368 8059B72 167
+. DD 04 3A DD 04 39 D9 CE DD 9D 70 FF FF FF DD 04 3E 8D 3C C5 08 00 00 00 0F AF 7D 0C 8B 55 08 DD 9D 68 FF FF FF DD 04 17 D9 CA DD 9D 60 FF FF FF D9 C9 DD 9D 58 FF FF FF DD 45 B8 DD 45 B0 D9 C9 D8 CD D9 C9 D8 CA DE E9 DD 9D 50 FF FF FF DD 45 A8 D8 CB DD 9D A8 FE FF FF DD 45 A0 D8 CD DC AD A8 FE FF FF D9 C9 DC 4D B8 D9 C9 DD 9D A8 FE FF FF D9 CB DC 4D B0 DD 45 98 DD 45 90 DC 8D 68 FF FF FF D9 CA DE C5 D8 CA D9 CC DD 9D 48 FF FF FF DE EB DD 45 88 DD 45 80 D9 CB DC 4D A0 D9 CB DC 8D 58 FF FF FF D9 CD
+***** EXPENSIVE 2370 704
+
+. 2369 8059C19 169
+. DC 4D A8 D9 C9 DC 8D 60 FF FF FF D9 C9 DE C3 DD 45 98 D9 C9 DE E5 DD 45 80 DD 45 88 DD 85 A8 FE FF FF D9 CC DC 4D 90 D9 CA DC 8D 60 FF FF FF D9 CB DC 8D 68 FF FF FF D9 C9 DC 8D 58 FF FF FF D9 CC D8 C6 D9 C9 DE C2 D9 CB DE C2 D9 CA DD 9D 40 FF FF FF DD 85 50 FF FF FF D9 C3 D9 C9 D8 C6 DD 85 48 FF FF FF D9 CA D8 C4 D9 CF DC AD 50 FF FF FF D9 CA D8 C3 D9 CF DD 9D 38 FF FF FF D9 C9 DD 9D 30 FF FF FF D9 C9 DC AD 48 FF FF FF DD 9D 28 FF FF FF DD 85 40 FF FF FF D8 C1 DD 9D 20 FF FF FF DD 85 38 FF FF FF D8 C5
+***** EXPENSIVE 2371 705
+
+. 2370 8059CC2 171
+. DD 85 A8 FE FF FF D9 C9 DD 9D 18 FF FF FF DE E4 D9 CA DE E1 DD 85 E8 FE FF FF D9 FA DC 8D E0 FE FF FF D9 CA DC A5 40 FF FF FF DE CA DD 85 E8 FE FF FF D9 FA D9 CC DC A5 38 FF FF FF D9 CC DC 8D E0 FE FF FF DE CC DD 85 20 FF FF FF DC 8D E0 FE FF FF DC AD 78 FF FF FF DD 85 18 FF FF FF DC 8D E0 FE FF FF D9 C1 D9 C9 DC AD 70 FF FF FF D9 C9 D8 C4 DD 9D 10 FF FF FF D9 C9 DE E3 D9 C0 DD 45 D0 D9 C9 D8 C6 D9 CA DE E6 D9 E0 DD 85 30 FF FF FF DD 45 C8 D8 CE D9 C9 D8 CA DE E1 DD 45 C8 D8 CC D9 CA DC 8D 28 FF FF FF DE E2
+***** EXPENSIVE 2372 706
+
+. 2371 8059D6D 195
+. DD 45 C8 D9 CB DD 9D 08 FF FF FF D9 CA D9 E0 DD 85 30 FF FF FF D9 CE DC 4D D0 D9 CE D8 C9 DE C6 DD 85 78 FF FF FF DC 85 20 FF FF FF DD 9D 78 FF FF FF DD 85 70 FF FF FF DC 85 18 FF FF FF DD 9D 70 FF FF FF DD 85 10 FF FF FF D9 CC DC 4D D0 D9 CC D8 E2 D9 C9 DC 8D 28 FF FF FF DE C4 DD 9D A8 FE FF FF DD 85 08 FF FF FF D9 C4 D8 E4 D9 CA DC 85 10 FF FF FF D9 C9 D8 C3 D9 CB DC AD 08 FF FF FF D9 CA DD 9D 00 FF FF FF DD 95 10 FF FF FF D9 C9 DD 9D 08 FF FF FF D9 CB 8B 95 D4 FE FF FF 03 95 F4 FE FF FF 8B 85 F4 FE FF FF 8B 4D DC 01 D0 2B 4D EC 8B B5 D0 FE FF FF 89 85 98 FE FF FF 8D 4C 4E FF
+***** EXPENSIVE 2373 707
+
+. 2372 8059E30 201
+. 8B 85 F4 FE FF FF 8B 75 14 8D 3C 08 0F AF B5 D4 FE FF FF 8B 45 10 DE C2 DD 85 78 FF FF FF DD 1C F0 8B 85 D4 FE FF FF 8D 04 C5 08 00 00 00 8B 75 10 0F AF 45 14 DD 85 70 FF FF FF DD 1C 30 8B 45 14 0F AF C2 DD 85 A8 FE FF FF DD 1C C6 D9 C4 8B 45 14 8D 14 D5 08 00 00 00 0F AF 55 14 0F AF 85 98 FE FF FF D8 C4 D9 CD DE E4 DD 85 00 FF FF FF D9 C9 DD 1C 32 DD 1C C6 D9 CB 8B 85 98 FE FF FF 8D 34 C5 08 00 00 00 8B 45 14 8B 55 10 0F AF C1 0F AF 75 14 8D 0C CD 08 00 00 00 0F AF 4D 14 DD 1C 16 DD 1C C2 80 B5 0F FF FF FF 80 8B 45 14 0F AF C7 8D 3C FD 08 00 00 00 0F AF 7D 14 DD 85 08 FF FF FF D9 C9 D9 E0 D9 C9
+***** EXPENSIVE 2374 708
+
+. 2373 8059EF9 50
+. DD 1C 11 D9 C9 FF 45 E8 DD 1C C2 8B 4D E0 8B 45 18 DD 1C 17 8B 55 DC 01 85 D4 FE FF FF 01 85 D0 FE FF FF 01 95 CC FE FF FF 39 4D E8 0F 82 B1 FB FF FF
+
+. 2374 8059F2B 27
+. 8B 85 F0 FE FF FF FF 45 EC D1 E8 83 85 C8 FE FF FF 10 39 45 EC 0F 82 DE FA FF FF
+***** EXPENSIVE 2376 709
+
+. 2375 8059A24 115
+. 8B B5 C8 FE FF FF 8B 7D 20 8B 45 24 DD 44 37 F0 DD 44 37 F8 DD 5D B0 DD 5D B8 DD 44 30 F8 DD 44 30 F0 D9 C9 8B 45 28 DD 5D A0 DD 5D A8 DD 44 30 F8 DD 44 30 F0 D9 C9 8B 45 2C DD 5D 90 DD 44 30 F8 D9 C9 DD 5D 98 DD 5D 80 DD 44 30 F0 C7 45 E8 00 00 00 00 8B 45 E0 80 75 B7 80 80 75 A7 80 80 75 97 80 80 75 87 80 39 45 E8 DD 5D 88 0F 83 94 04 00 00
+
+. 2376 8059F46 13
+. F7 45 DC 01 00 00 00 0F 85 71 01 00 00
+***** EXPENSIVE 2378 710
+
+. 2377 80549B4 101
+. 8B BD DC FE FF FF 8B 45 20 DD 44 38 F0 DD 44 38 F8 D9 C9 8B 45 24 DD 5D C0 DD 5D B8 DD 44 38 F0 DD 44 38 F8 D9 C9 8B 45 28 DD 5D B0 DD 5D A8 DD 44 38 F0 DD 44 38 F8 D9 C9 8B 45 2C DD 5D A0 DD 5D 98 DD 44 38 F0 DD 44 38 F8 D9 C9 C7 45 E8 00 00 00 00 8B 45 CC 39 45 E8 DD 5D 90 DD 5D 88 0F 83 50 04 00 00
+***** EXPENSIVE 2379 711
+
+. 2378 8054848 159
+. 8B 95 C8 FE FF FF 8B 4D C8 8D 44 11 FF 8B 55 0C 8B 75 08 0F AF 95 C8 FE FF FF DD 04 D6 8B 55 0C 8D 0C 01 0F AF D0 8D 04 C5 08 00 00 00 0F AF 45 0C DD 04 30 8B 45 0C 0F AF C1 DD 04 D6 DD 04 C6 D9 C1 8D 0C CD 08 00 00 00 0F AF 4D 0C D8 C1 DD 04 31 D9 C9 D8 C0 DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CC DE E3 D9 CB D8 C0 DD 85 E8 FE FF FF D9 C9 DE CB D8 CB D8 ED D9 C2 D8 C1 DD 45 E0 D9 CA DE E4 DD 45 D8 D8 CB D9 CA 8B 95 D8 FE FF FF D8 CE 03 55 D4 8B 7D D4 DE C2 DD 45 D8
+***** EXPENSIVE 2380 712
+
+. 2379 80548E7 145
+. D9 CB 8D 0C 17 DC 4D E0 D9 CB DE CE D9 CD 8D 34 0F DE E2 01 F7 D8 C0 D9 C9 D8 C0 D9 CD DE C3 D9 C1 D9 C4 8B 45 14 0F AF 85 D8 FE FF FF 0F AF 55 14 0F AF 4D 14 89 BD A4 FE FF FF 0F AF 75 14 8B 7D 10 D8 E2 D9 C9 D8 E6 D9 CB DE C6 D9 CB DD 1C C7 D9 CA DD 1C D7 DD 1C CF D9 CA DD 1C F7 8B BD A4 FE FF FF 0F AF 7D 14 8B 45 10 8B 55 D0 DE C1 DD 1C F8 FF 45 E8 8D 04 92 8B 4D CC 01 85 C8 FE FF FF 01 95 D8 FE FF FF 39 4D E8 0F 82 D0 FE FF FF
+
+. 2380 805C0B5 30
+. 8B 75 D8 46 89 F0 C7 45 E8 01 00 00 00 D1 E8 39 45 E8 89 B5 C0 FE FF FF 0F 83 4C 06 00 00
+***** EXPENSIVE 2382 713
+
+. 2381 805C0D3 134
+. C7 85 98 FE FF FF 02 00 00 00 8D 76 00 8B BD 98 FE FF FF 8B 45 20 D9 44 B8 F8 D9 44 B8 FC 8B 45 24 D9 5D 94 D9 5D 98 D9 44 B8 FC D9 44 B8 F8 D9 C9 8B 45 28 D9 5D 8C D9 5D 90 D9 44 B8 FC D9 44 B8 F8 D9 C9 8B 45 2C D9 5D 84 D9 44 B8 FC D9 C9 D9 5D 88 D9 9D 7C FF FF FF D9 44 B8 F8 C7 45 E4 00 00 00 00 8B 45 DC 80 75 97 80 80 75 8F 80 80 75 87 80 80 B5 7F FF FF FF 80 39 45 E4 D9 5D 80 0F 83 AB 05 00 00
+***** EXPENSIVE 2383 714
+
+. 2382 805C159 217
+. 8B 45 D8 8B 55 E8 8D 54 12 FF DD 83 C4 F8 FF FF DD 83 CC F8 FF FF D9 C9 D1 E0 DD 9D B8 FE FF FF DD 9D B0 FE FF FF 89 85 EC FE FF FF 89 95 A4 FE FF FF C7 85 A0 FE FF FF 00 00 00 00 89 95 9C FE FF FF 90 8B 95 9C FE FF FF 03 55 E0 8B 75 E0 8D 0C 16 8D 34 0E 8B 7D E0 01 F7 8B 45 0C 89 BD 7C FE FF FF 0F AF 85 9C FE FF FF 8B 7D 08 D9 04 87 8B 85 9C FE FF FF 8D 04 85 04 00 00 00 0F AF 45 0C D9 9D 78 FF FF FF D9 04 38 8B 45 0C 0F AF C2 D9 9D 74 FF FF FF D9 04 87 8B 45 0C 0F AF C1 D9 04 87 8B 45 0C 0F AF C6 D9 9D 6C FF FF FF 8D 0C 8D 04 00 00 00 D9 04 87 8B 45 0C 0F AF 85 7C FE FF FF 0F AF 4D 0C 8D 14 95 04 00 00 00 8D 34 B5 04 00 00 00 D9 04 87 D9 CA
+***** EXPENSIVE 2384 715
+
+. 2383 805C232 190
+. 0F AF 55 0C D9 9D 70 FF FF FF 0F AF 75 0C D9 04 39 8B 85 7C FE FF FF D9 04 3A D9 C9 D9 9D 68 FF FF FF D9 04 3E 8D 3C 85 04 00 00 00 0F AF 7D 0C 8B 55 08 D9 9D 60 FF FF FF D9 04 17 D9 CA D9 9D 64 FF FF FF D9 C9 D9 9D 5C FF FF FF D9 45 98 D9 45 94 D9 C9 D8 8D 70 FF FF FF D9 C9 D8 CA DE E9 D9 45 90 D8 8D 6C FF FF FF D9 9D 58 FF FF FF D9 45 8C D9 45 94 D9 C9 D8 8D 68 FF FF FF D9 C9 D8 8D 70 FF FF FF D9 C9 D8 AD 58 FF FF FF D9 CB D8 4D 98 DE C1 D9 CA D9 9D 58 FF FF FF D9 45 90 D9 45 8C D8 8D 6C FF FF FF D9 C9 D8 8D 68 FF FF FF DE C1 D9 9D 54 FF FF FF D9 45 84 D9 45 88
+***** EXPENSIVE 2385 716
+
+. 2384 805C2F0 204
+. D8 8D 64 FF FF FF D9 C9 D8 8D 60 FF FF FF DE E9 D9 9D 50 FF FF FF D9 45 84 D9 45 88 D9 C9 D8 8D 64 FF FF FF D9 C9 D8 8D 60 FF FF FF DE C1 D9 9D 4C FF FF FF D9 45 80 D8 CB D9 9D 48 FF FF FF D9 85 7C FF FF FF D8 8D 5C FF FF FF D8 AD 48 FF FF FF D9 9D 48 FF FF FF D9 45 80 D9 85 48 FF FF FF D8 C2 D9 C9 D8 8D 5C FF FF FF D9 CC D8 8D 7C FF FF FF DE C4 D9 9D 44 FF FF FF D9 C1 D8 C3 D9 CA DE E3 D9 85 54 FF FF FF D9 CA D9 9D 40 FF FF FF D9 C9 D8 A5 4C FF FF FF D9 85 58 FF FF FF D9 85 54 FF FF FF D9 C9 D8 85 50 FF FF FF D9 C9 D8 85 4C FF FF FF D9 CA D9 9D 34 FF FF FF D9 85 44 FF FF FF D9 85 40 FF FF FF D8 C3 D9 CC
+***** EXPENSIVE 2386 717
+
+. 2385 805C3BC 219
+. D8 A5 48 FF FF FF D9 85 58 FF FF FF D9 CA D8 C3 D9 CD D9 9D 2C FF FF FF D9 95 3C FF FF FF D9 CD D9 95 38 FF FF FF D9 C9 D8 A5 50 FF FF FF D9 CC D9 9D 30 FF FF FF DD 85 B8 FE FF FF D9 FA D9 CA D8 AD 44 FF FF FF D9 CA DC 8D B0 FE FF FF D8 CA D9 5D EC D9 C9 D9 9D 44 FF FF FF D9 45 EC DD 85 B8 FE FF FF D9 FA D9 CB D8 AD 40 FF FF FF D9 CB DC 8D B0 FE FF FF D8 CB D9 9D 28 FF FF FF DD 85 B0 FE FF FF D8 8D 30 FF FF FF D8 AD 78 FF FF FF D9 9D 24 FF FF FF DD 85 B0 FE FF FF D8 8D 2C FF FF FF D8 AD 74 FF FF FF D9 9D 20 FF FF FF D9 85 24 FF FF FF D8 C1 D9 9D 1C FF FF FF D9 85 20 FF FF FF D8 85 28 FF FF FF D9 9D 18 FF FF FF D9 45 D4 D9 E0 D9 95 08 FF FF FF D8 CD
+***** EXPENSIVE 2387 718
+
+. 2386 805C497 213
+. D9 9D 0C FF FF FF D8 AD 24 FF FF FF D9 45 D0 D9 C9 D9 9D 14 FF FF FF D8 CB D9 85 20 FF FF FF D8 A5 28 FF FF FF D9 C9 D8 AD 0C FF FF FF D9 C9 D9 9D 10 FF FF FF D9 9D 0C FF FF FF D9 85 08 FF FF FF D9 45 D0 D9 C9 D8 CA D9 C9 D8 8D 34 FF FF FF DE E9 D9 9D 04 FF FF FF D9 45 D0 D9 E0 DC CC D9 CB D8 4D D4 DE C4 D9 C9 D9 9D 40 FF FF FF D9 CA D9 95 00 FF FF FF D9 45 D4 D8 8D 34 FF FF FF D9 CA DE CB D9 CA DE C1 D9 85 78 FF FF FF D8 85 30 FF FF FF D9 9D 78 FF FF FF D9 85 74 FF FF FF D8 85 2C FF FF FF D9 9D 74 FF FF FF D9 85 1C FF FF FF D8 A5 04 FF FF FF D9 9D FC FE FF FF D9 85 18 FF FF FF D8 85 0C FF FF FF D9 9D F8 FE FF FF D9 85 14 FF FF FF
+***** EXPENSIVE 2388 719
+
+. 2387 805C56C 225
+. D8 E1 D9 C9 D8 85 14 FF FF FF D9 9D 14 FF FF FF 8B 95 A4 FE FF FF D9 85 10 FF FF FF D9 C9 03 95 EC FE FF FF D9 9D F4 FE FF FF D8 E1 D9 85 10 FF FF FF 8B 85 EC FE FF FF 8B 4D D8 01 D0 2B 4D E8 8B B5 A0 FE FF FF DE C2 D9 9D 10 FF FF FF D9 85 18 FF FF FF 8D 4C 4E FF D8 A5 0C FF FF FF D9 C9 D9 9D F0 FE FF FF 89 85 78 FE FF FF D9 85 1C FF FF FF 8B 85 EC FE FF FF 8B 75 14 8D 3C 08 D8 85 04 FF FF FF D9 C9 0F AF B5 A4 FE FF FF D9 9D 18 FF FF FF 8B 45 10 D9 85 78 FF FF FF D9 C9 D9 9D 1C FF FF FF D9 1C B0 8B 85 A4 FE FF FF 8D 04 85 04 00 00 00 8B 75 10 0F AF 45 14 D9 85 74 FF FF FF D9 1C 30 8B 45 14 0F AF C2 D9 85 FC FE FF FF D9 1C 86 8D 14 95 04 00 00 00 8B 45 14 0F AF 55 14
+***** EXPENSIVE 2389 720
+
+. 2388 805C64D 183
+. 0F AF 85 78 FE FF FF D9 85 F8 FE FF FF D9 85 F4 FE FF FF D9 C9 D9 1C 32 D9 1C 86 8B 85 78 FE FF FF 8D 34 85 04 00 00 00 8B 45 14 8B 55 10 0F AF C1 0F AF 75 14 D9 85 F0 FE FF FF D9 85 1C FF FF FF D9 C9 D9 1C 16 80 B5 1B FF FF FF 80 D9 1C 82 8D 0C 8D 04 00 00 00 8B 45 14 0F AF 4D 14 0F AF C7 D9 85 18 FF FF FF 80 B5 13 FF FF FF 80 8D 3C BD 04 00 00 00 D9 1C 11 D9 85 14 FF FF FF 0F AF 7D 14 D9 85 10 FF FF FF D9 C9 D9 1C 82 D9 1C 17 8B 45 18 8B 55 D8 FF 45 E4 8B 4D DC 01 85 A4 FE FF FF 01 85 A0 FE FF FF 01 95 9C FE FF FF 39 4D E4 0F 82 98 FA FF FF
+
+. 2389 805C704 27
+. 8B 85 C0 FE FF FF FF 45 E8 D1 E8 83 85 98 FE FF FF 02 39 45 E8 0F 82 C1 F9 FF FF
+***** EXPENSIVE 2391 721
+
+. 2390 805C0E0 121
+. 8B BD 98 FE FF FF 8B 45 20 D9 44 B8 F8 D9 44 B8 FC 8B 45 24 D9 5D 94 D9 5D 98 D9 44 B8 FC D9 44 B8 F8 D9 C9 8B 45 28 D9 5D 8C D9 5D 90 D9 44 B8 FC D9 44 B8 F8 D9 C9 8B 45 2C D9 5D 84 D9 44 B8 FC D9 C9 D9 5D 88 D9 9D 7C FF FF FF D9 44 B8 F8 C7 45 E4 00 00 00 00 8B 45 DC 80 75 97 80 80 75 8F 80 80 75 87 80 80 B5 7F FF FF FF 80 39 45 E4 D9 5D 80 0F 83 AB 05 00 00
+
+. 2391 805C71F 13
+. F7 45 D8 01 00 00 00 0F 85 D8 01 00 00
+***** EXPENSIVE 2393 722
+
+. 2392 8057220 101
+. 8B BD DC FE FF FF 8B 45 20 D9 44 B8 F8 D9 44 B8 FC D9 C9 8B 45 24 D9 5D B0 D9 5D AC D9 44 B8 F8 D9 44 B8 FC D9 C9 8B 45 28 D9 5D A8 D9 5D A4 D9 44 B8 F8 D9 44 B8 FC D9 C9 8B 45 2C D9 5D A0 D9 5D 9C D9 44 B8 F8 D9 44 B8 FC D9 C9 C7 45 E4 00 00 00 00 8B 45 D0 39 45 E4 D9 5D 98 D9 5D 94 0F 83 BE 04 00 00
+***** EXPENSIVE 2394 723
+
+. 2393 8057094 161
+. 8B 95 CC FE FF FF 8B 4D CC 8D 44 11 FF 8B 55 0C 8B 75 08 0F AF 95 CC FE FF FF D9 04 96 8B 55 0C 8D 0C 01 0F AF D0 8D 04 85 04 00 00 00 0F AF 45 0C D9 04 30 8B 45 0C 0F AF C1 D9 04 96 D9 04 86 D9 C1 8D 0C 8D 04 00 00 00 0F AF 4D 0C D8 C1 D9 CC D9 5D C8 D9 04 31 D9 CC D8 C0 DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CB DE E2 D9 CA D8 C0 DE C9 D9 5D EC DD 85 E8 FE FF FF D8 C9 D8 6D C8 D9 5D BC D9 45 EC D9 45 BC D8 C1 D9 C9 D8 6D BC D9 C9 D9 5D B8 D9 5D B4 D9 45 DC D9 45 E0 D8 CB
+***** EXPENSIVE 2395 724
+
+. 2394 8057135 147
+. D9 C9 D8 CC DE C1 D9 45 DC DE CB 8B 95 D8 FE FF FF D9 45 E0 03 55 D8 8B 45 D8 DE CC D9 CA 8D 0C 10 DE E3 D8 45 C8 D9 45 B4 D9 CB 8D 34 08 D8 C0 D9 C9 01 F0 D9 5D C8 D9 C9 D8 C0 D9 45 B8 D9 CB D8 E2 D9 CA 8B 7D 14 D8 45 B4 D9 CB 0F AF BD D8 FE FF FF 0F AF 55 14 0F AF 4D 14 D8 E1 D9 CB D9 5D B4 D8 45 B8 89 85 BC FE FF FF D9 45 C8 8B 45 10 D9 1C B8 D9 55 B8 D9 CA D9 1C 90 D9 1C 88 8B BD BC FE FF FF 0F AF 75 14 0F AF 7D 14 8B 55 D4 D9 45 B4
+***** EXPENSIVE 2396 725
+
+. 2395 80571C8 36
+. D9 1C B0 FF 45 E4 D9 1C B8 8B 4D D0 8D 04 92 01 85 CC FE FF FF 01 95 D8 FE FF FF 39 4D E4 0F 82 A8 FE FF FF
+***** EXPENSIVE 2397 726
+
+. 2396 8054004 168
+. 8B 55 AC 8B 75 84 8D 44 32 FF 8B 4D A8 8D 34 01 8B 4D 84 29 D1 8B 55 A8 8D 4C 0A FF 89 8D 74 FF FF FF 8B 4D 0C 8B 55 08 0F AF C8 8D 04 C5 08 00 00 00 DD 04 CA 0F AF 45 0C DD 5D A0 DD 04 10 8B 45 0C 0F AF C6 DD 04 C2 8B 45 0C 0F AF 85 74 FF FF FF DD 04 C2 8B 85 74 FF FF FF 8D 0C C5 08 00 00 00 8D 34 F5 08 00 00 00 0F AF 75 0C 0F AF 4D 0C D9 C1 D9 CB DD 5D 98 DD 04 11 DD 04 16 D9 CC D8 C2 DD 83 BC F8 FF FF D9 CA D9 E0 D9 C5 D9 CB D8 CA DC 6D A0 D9 CB D8 C1 D9 CA DC 45 A0 DD 83 BC F8 FF FF D9 CF DE E2
+***** EXPENSIVE 2398 727
+
+. 2397 80540AC 128
+. D9 C9 DC 4D E0 D9 C9 DD 5D A0 D9 CD D8 C9 D9 C2 D9 C9 8B 45 14 8B 75 10 DC 6D 98 D9 CA 0F AF C7 DC 45 98 D9 C9 D8 E6 D9 CB DE C6 D9 CC DE E3 DD 45 A0 D9 CB DC 4D E0 8B 4D DC D9 C1 D9 CD DD 55 98 D9 CC DD 1C C6 8D 04 FD 08 00 00 00 DC C4 DE E9 0F AF 45 14 DD 45 C8 DD 45 C0 D9 CC DD 1C 30 D9 CB 8D 14 39 D8 CC D9 CB D8 CA 8B 45 14 0F AF C2 DE E3 D9 CA DD 1C C6 8D 0C 11 DC 4D C0 D9 CA
+***** EXPENSIVE 2399 728
+
+. 2398 805412C 103
+. 8D 14 D5 08 00 00 00 DC 4D C8 8B 45 14 DD 45 B8 DD 45 B0 D9 C9 0F AF C1 0F AF 55 14 D8 CD D9 C9 D8 CB D9 CC DE C2 D9 CC 8D 0C CD 08 00 00 00 DC 4D B0 D9 CA DC 4D B8 D9 C9 DD 1C 32 D9 CB 0F AF 4D 14 DE E2 DE C2 8B 55 D8 DD 1C C6 FF 45 E8 DD 1C 31 8D 04 52 8B 4D D4 03 7D D8 01 45 84 39 4D E8 0F 82 71 FE FF FF
+***** EXPENSIVE 2400 729
+
+. 2399 80567F0 162
+. 8B 55 C4 8B 75 9C 8D 44 32 FF 8B 4D C0 8D 34 01 8B 4D 9C 29 D1 8B 55 C0 8D 4C 0A FF 89 4D 94 8B 4D 0C 0F AF C8 8D 04 85 04 00 00 00 8B 55 08 0F AF 45 0C D9 04 10 8B 45 0C 0F AF C6 D9 04 82 8B 45 0C D9 04 8A 0F AF 45 94 D9 04 82 D9 C9 D9 5D BC 8B 45 94 D9 C1 8D 0C 85 04 00 00 00 D8 C1 8D 34 B5 04 00 00 00 0F AF 75 0C 0F AF 4D 0C D9 5D B4 DD 83 BC F8 FF FF D9 04 11 D9 CC D9 5D B8 D8 4D B4 D9 04 16 D9 CC D9 E0 D9 C9 D8 6D BC D9 C4 D9 C9 D9 5D F0 D8 C1 DD 83 BC F8 FF FF D8 C9 D8 6D B8
+***** EXPENSIVE 2401 730
+
+. 2400 8056892 133
+. D9 45 F0 D9 C9 D9 5D F0 D9 45 BC D9 CE DE E3 D9 CD D8 45 B4 D9 CC DE E3 D9 C9 D8 4D E4 D9 45 F0 D9 CB D8 4D E4 D9 CC D9 5D BC 8B 45 E0 8D 14 38 D9 C2 8D 0C 10 D8 C4 8B 45 14 8B 75 10 0F AF C7 D9 5D B0 D9 45 BC D9 1C 86 D9 C4 8D 04 BD 04 00 00 00 D8 E1 D9 CD DE C1 D9 C9 0F AF 45 14 D8 45 B8 D9 45 D4 D9 45 D0 D9 CA D9 14 30 D9 C9 D8 CE D9 CA D8 4D B0 8B 45 14 0F AF C2 DE EA D9 C9 D9 1C 86 D9 45 D4
+***** EXPENSIVE 2402 731
+
+. 2401 8056917 119
+. D8 4D B0 D9 CD D8 4D D0 D9 CB DE E4 D9 CA DE C4 8D 14 95 04 00 00 00 8B 45 14 D9 45 CC D9 45 C8 D9 C9 0F AF C1 0F AF 55 14 D8 CA D9 C9 D8 CC D9 CA 8D 0C 8D 04 00 00 00 D8 4D C8 D9 CC D8 4D CC D9 CD D9 1C 32 0F AF 4D 14 DE E1 D9 CA DE C3 D9 C9 8B 55 DC D9 1C 86 D9 C9 FF 45 E8 D9 1C 31 8D 04 52 8B 4D D8 03 7D DC 01 45 9C 39 4D E8 D9 5D B8 0F 82 62 FE FF FF
+***** EXPENSIVE 2403 732
+
+. 2402 8059484 185
+. 8B 95 5C FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 89 B5 3C FF FF FF 0F AF 85 5C FF FF FF 8B 75 08 DD 04 C6 8B 85 5C FF FF FF 8D 04 C5 08 00 00 00 0F AF 45 0C DD 5D A0 DD 04 30 8B 45 0C 0F AF C2 DD 04 C6 8B 45 0C 0F AF C1 8D 14 D5 08 00 00 00 DD 04 C6 0F AF 55 0C 8B 45 0C 8D 0C CD 08 00 00 00 DD 04 32 0F AF 4D 0C 0F AF 85 3C FF FF FF 8B 95 3C FF FF FF DD 04 31 DD 04 C6 D9 CD 8D 34 D5 08 00 00 00 0F AF 75 0C 8B 4D 08 DD 5D 98 DD 04 0E DD 5D 90 DD 45 D0 DD 45 C8 D9 C9 D8 CD D9 C9 D8 CB DE E9 DD 9D 40 FF FF FF DD 45 C0 DD 45 B8 D9 CD DC 4D C8
+***** EXPENSIVE 2404 733
+
+. 2403 805953D 130
+. D9 CD D8 CA D9 CB DC 4D D0 D9 C9 D8 CC D9 C9 DE C5 DE E2 DD 45 B0 DD 45 A8 DC 4D 90 D9 C9 D8 CE DE E1 DD 85 40 FF FF FF DD 45 B0 D9 CD DC 4D B8 D9 CF DC 4D A8 D9 C9 D8 C2 D9 CB DC 4D C0 D9 CD DC 4D 90 DE C1 D9 CC DE C6 DD 45 A0 DD 45 98 D9 CB DD 5D 88 8B 85 6C FF FF FF D9 C5 D9 C9 8D 0C 38 D8 C4 D9 C9 D8 C5 D9 CE DE E5 8B 45 14 DD 45 88 0F AF 85 60 FF FF FF 8B 75 10 D8 C1 DD 1C C6 D9 CD
+***** EXPENSIVE 2405 734
+
+. 2404 80595BF 166
+. DD 5D 80 8B B5 60 FF FF FF DD 85 40 FF FF FF D9 CA D8 C6 D9 CA DE E1 8D 04 F5 08 00 00 00 DD 45 80 D9 CB 8B 75 10 DC 6D A0 D9 CE 0F AF 45 14 DC 6D 98 D9 CB D8 C2 D9 CB 8B 95 60 FF FF FF DD 9D 70 FF FF FF D9 CA DD 1C 30 D9 CA 03 95 6C FF FF FF D9 E0 D9 C4 8B 45 14 0F AF C2 D8 E1 DD 1C C6 D9 CC 8B 45 14 DD 9D 78 FF FF FF 0F AF C1 DD 85 70 FF FF FF D9 C9 D9 E0 D9 CA DC 65 80 D9 CC 8D 14 D5 08 00 00 00 8D 0C CD 08 00 00 00 0F AF 55 14 DC 85 78 FF FF FF D9 C9 0F AF 4D 14 D8 C2 D9 CB DC 65 88 D9 CA
+***** EXPENSIVE 2406 735
+
+. 2405 8059665 110
+. DC AD 70 FF FF FF D9 CC D9 E0 D9 CB DD 1C 32 D9 CB DD 9D 70 FF FF FF DD 1C C6 D9 C9 DD 95 78 FF FF FF D9 C9 DD 1C 31 8B 45 14 0F AF C7 DD 1C C6 80 B5 77 FF FF FF 80 8D 04 FD 08 00 00 00 0F AF 45 14 DD 85 70 FF FF FF DD 1C 30 8B 55 DC 8B 45 18 FF 45 E8 8B 4D E0 01 85 60 FF FF FF 01 C7 01 95 5C FF FF FF 39 4D E8 0F 82 B1 FD FF FF
+***** EXPENSIVE 2407 736
+
+. 2406 8059724 153
+. 8B 45 E4 8D 14 38 8D 0C 10 01 C8 8B 75 0C 89 85 3C FF FF FF 0F AF F7 8B 45 08 DD 04 F0 8B B5 3C FF FF FF 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C DD 04 D0 DD 04 C8 DD 04 F0 D9 C4 D9 FA DD 83 34 F6 FF FF DE F1 D9 C3 D8 E2 DE C9 D9 C5 D9 FA 8B 8D 64 FF FF FF 8B 55 EC DD 83 34 F6 FF FF 8D 44 51 FF DE F1 8B 75 DC D9 C5 8B 55 14 8D 0C 70 0F AF D0 8B 75 10 D8 C2 DD 1C D6 D9 CC DE C2 D9 C2 D9 CC DE CA D9 CB 8D 04 C5 08 00 00 00 D9 E0 0F AF 45 14 D8 E1 DD 1C 30 D9 CB
+***** EXPENSIVE 2408 737
+
+. 2407 80597BD 56
+. 8B 45 14 0F AF C1 DE E2 D9 C9 DD 1C C6 8D 0C CD 08 00 00 00 8B 45 18 FF 45 E8 8B 55 E0 0F AF 4D 14 DE E1 01 85 64 FF FF FF 03 7D DC 39 55 E8 DD 1C 31 0F 82 2F FF FF FF
+***** EXPENSIVE 2409 738
+
+. 2408 80546C4 148
+. 8B 45 E0 8D 14 47 8B 45 0C 8B 4D 08 0F AF C7 DD 04 C1 8D 04 FD 08 00 00 00 0F AF 45 0C DD 04 08 8B 45 0C 0F AF C2 8D 14 D5 08 00 00 00 0F AF 55 0C DD 04 C1 DD 04 0A D9 C4 D9 FA D9 C3 D8 C2 DE C9 D9 C5 D9 FA 8B 95 68 FF FF FF D9 C5 03 55 E4 D8 E4 8B 45 E4 8D 0C 10 DE C9 01 C8 D9 C0 D9 CE DE C4 D9 CA DE E4 D9 CA 8B 75 14 0F AF B5 68 FF FF FF 0F AF 55 14 0F AF 4D 14 89 85 34 FF FF FF D8 C0 D9 CC 8B 45 10 D8 E2 D9 CB D8 C0 D9 CC DD 1C F0 DE C1
+***** EXPENSIVE 2410 739
+
+. 2409 8054758 55
+. D9 C9 DD 1C D0 D9 C9 8B B5 34 FF FF FF DD 1C C8 8B 55 E0 FF 45 E8 8B 4D DC 0F AF 75 14 D9 E0 01 95 68 FF FF FF 03 BD 64 FF FF FF 39 4D E8 DD 1C F0 0F 82 35 FF FF FF
+***** EXPENSIVE 2411 740
+
+. 2410 805BAB8 184
+. 8B 95 58 FF FF FF 03 55 E4 8B 75 E4 8D 0C 16 01 CE 8B 45 0C 89 B5 44 FF FF FF 0F AF 85 58 FF FF FF 8B 75 08 D9 04 86 8B 85 58 FF FF FF 8D 04 85 04 00 00 00 0F AF 45 0C D9 5D B4 D9 04 30 8B 45 0C 0F AF C2 D9 5D B0 D9 04 86 8B 45 0C 0F AF C1 D9 04 86 D9 C9 8B 45 0C 0F AF 85 44 FF FF FF 8D 14 95 04 00 00 00 8D 0C 8D 04 00 00 00 0F AF 55 0C 0F AF 4D 0C D9 5D AC D9 04 86 8B 85 44 FF FF FF D9 04 32 D9 04 31 D9 CA D9 5D A4 8D 34 85 04 00 00 00 D9 45 CC 0F AF 75 0C 8B 55 08 D8 4D AC D9 CB D9 5D A8 D9 04 16 D9 CB D9 5D A0 D9 45 C8 D8 C9 D8 6D A0 D9 5D A0
+***** EXPENSIVE 2412 741
+
+. 2411 805BB70 137
+. D9 45 C8 D8 4D AC D9 C9 D8 4D CC DE C1 D9 45 C4 D8 4D A8 D9 5D 9C D9 45 C0 D8 CA D8 6D 9C D9 5D 9C D9 45 C0 D8 4D A8 D9 CA D8 4D C4 D9 CA D9 5D A8 D9 C9 D8 45 A8 D9 5D 98 D9 45 BC D8 4D A4 D9 5D 94 D9 45 B8 D8 CA D8 6D 94 D9 5D 94 D9 45 BC DE CA D9 45 B8 D8 4D A4 DE C2 D9 45 B4 D8 45 9C D9 5D 90 D9 45 B0 D8 45 98 D9 5D 8C D9 45 A0 D9 45 A0 D9 45 B4 D9 45 B0 D9 CB D8 65 94 D9 CA D8 45 94 D9 C9 D8 65 9C D9 CB
+***** EXPENSIVE 2413 742
+
+. 2412 805BBF9 162
+. D8 65 98 D9 C4 D9 CB D9 5D 88 D9 CA 8B 85 78 FF FF FF 8D 0C 38 D8 C5 D9 45 8C D9 CD DE E6 8B 45 14 D9 45 90 0F AF 85 5C FF FF FF D8 C2 D9 CD D8 C1 D9 CD 8B 75 10 D9 1C 86 D9 CC D9 5D 84 D9 CC 8B B5 5C FF FF FF D9 E0 D9 C2 80 75 8B 80 D8 E1 8D 04 B5 04 00 00 00 0F AF 45 14 8B 75 10 D9 5D 80 D9 45 84 D9 45 88 D9 C9 8B 95 5C FF FF FF D9 1C 30 03 95 78 FF FF FF D8 C2 8B 45 14 0F AF C2 D9 9D 7C FF FF FF D9 45 80 D9 1C 86 D9 CC 8D 14 95 04 00 00 00 8B 45 14 0F AF 55 14 0F AF C1 D8 6D 90
+***** EXPENSIVE 2414 743
+
+. 2413 805BC9B 119
+. D9 85 7C FF FF FF D9 CC D8 6D 8C D9 CC 8D 0C 8D 04 00 00 00 D9 1C 32 D9 C9 0F AF 4D 14 D8 65 88 D9 C9 D9 14 86 D9 5D 90 D9 CA D9 5D 8C 80 75 8F 80 8B 45 14 0F AF C7 DE C2 D9 45 8C D9 1C 31 D9 C9 D9 1C 86 8D 04 BD 04 00 00 00 0F AF 45 14 D9 E0 D9 1C 30 8B 55 DC 8B 45 18 FF 45 E8 8B 4D E0 01 85 5C FF FF FF 01 C7 01 95 58 FF FF FF 39 4D E8 0F 82 A6 FD FF FF
+***** EXPENSIVE 2415 744
+
+. 2414 805BD5C 188
+. 8B 45 E4 8D 14 38 8D 0C 10 01 C8 8B 75 0C 89 85 44 FF FF FF 0F AF F7 8B 45 08 D9 04 B0 8B B5 44 FF FF FF 0F AF 75 0C D9 9D 74 FF FF FF 0F AF 55 0C D9 04 B0 0F AF 4D 0C D9 9D 6C FF FF FF D9 04 90 D9 04 88 DD 83 DC F8 FF FF D9 FA D9 85 6C FF FF FF D9 C9 DC BB 34 F6 FF FF D9 C9 D8 EB DE C9 D9 9D 68 FF FF FF DD 83 DC F8 FF FF D9 FA DC BB 34 F6 FF FF D9 CA D8 85 6C FF FF FF 8B 8D 60 FF FF FF DE CA D9 C9 8B 55 EC D9 5D F0 8D 44 51 FF 8B 75 DC 8B 55 14 D9 85 74 FF FF FF 8D 0C 70 0F AF D0 8B 75 10 D8 85 68 FF FF FF D9 45 F0 D9 C9 D9 1C 96 D9 C1 8D 04 85 04 00 00 00
+***** EXPENSIVE 2416 745
+
+. 2415 805BE18 81
+. D9 E0 0F AF 45 14 D8 E1 D9 1C 30 8B 45 14 D9 85 74 FF FF FF 0F AF C1 D8 A5 68 FF FF FF D9 1C 86 8D 0C 8D 04 00 00 00 8B 45 18 FF 45 E8 8B 55 E0 0F AF 4D 14 DE E9 01 85 60 FF FF FF 03 7D DC 39 55 E8 D9 95 70 FF FF FF D9 1C 31 0F 82 F3 FE FF FF
+***** EXPENSIVE 2417 746
+
+. 2416 8056EC8 192
+. 8B 45 0C 8B 4D 08 0F AF C7 D9 04 81 8D 04 BD 04 00 00 00 0F AF 45 0C 8B 75 E0 8D 14 77 D9 04 08 D9 C9 8B 45 0C 0F AF C2 8D 14 95 04 00 00 00 0F AF 55 0C D9 9D 70 FF FF FF D9 04 0A D9 9D 64 FF FF FF D9 95 6C FF FF FF D9 04 81 DD 83 DC F8 FF FF D9 FA D9 CA D8 85 64 FF FF FF DE CA D9 C9 D9 9D 60 FF FF FF DD 83 DC F8 FF FF D9 FA D9 85 70 FF FF FF D8 E2 8B 95 58 FF FF FF 03 55 E4 DE C9 8B 45 E4 D9 85 70 FF FF FF D9 C9 D9 5D F0 8D 0C 10 DE C1 D9 85 64 FF FF FF D9 45 F0 01 C8 D9 85 60 FF FF FF D9 CA D8 A5 6C FF FF FF D9 CA 8B 75 14 0F AF B5 58 FF FF FF 0F AF 55 14 0F AF 4D 14
+***** EXPENSIVE 2418 747
+
+. 2417 8056F88 83
+. D8 E9 D9 CB 89 85 34 FF FF FF D8 C0 D9 CA 8B 45 10 D8 C0 D9 CA D9 1C B0 D8 85 60 FF FF FF D9 CA D9 1C 90 8B B5 34 FF FF FF D9 1C 88 8B 55 E0 FF 45 E8 8B 4D DC 0F AF 75 14 D9 E0 01 95 58 FF FF FF 03 BD 54 FF FF FF 39 4D E8 D9 1C B0 0F 82 ED FE FF FF
+***** EXPENSIVE 2419 748
+
+. 2418 804E5D0 190
+. 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 64 FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 D9 CA DD 9D 70 FF FF FF DD 44 06 08 D9 C9 8B 45 EC 03 85 54 FF FF FF 0F AF 45 0C C1 E0 04 DD 9D 68 FF FF FF DD 04 06 DD 9D 58 FF FF FF DD 44 06 08 8B 45 EC 03 85 3C FF FF FF 0F AF 45 0C C1 E0 04 DD 9D 48 FF FF FF DD 04 06 DD 9D 40 FF FF FF DD 44 06 08 DD 9D 30 FF FF FF 8B 45 EC 03 85 2C FF FF FF DD 85 40 FF FF FF DD 85 30 FF FF FF D9 C9 0F AF 45 0C D8 C3 D9 C9 D8 C2 DD 83 BC F8 FF FF DD 83 BC F8 FF FF D9 C9 C1 E0 04 D8 CB D9 C9 D8 CA D9 CB DC 04 16
+***** EXPENSIVE 2420 749
+
+. 2419 804E68E 196
+. D9 CD DC A5 40 FF FF FF D9 CC DC A5 30 FF FF FF DD 04 06 DD 44 06 08 D9 CB DC 2C 16 D9 CD DC 6C 16 08 D9 CE DC 8D 20 FF FF FF D9 CA DC 8D 20 FF FF FF D9 CF DD 9D 18 FF FF FF D9 CB DC 44 16 08 DD 9D 10 FF FF FF D9 C3 D8 E6 D9 CC DE C6 DD 85 70 FF FF FF D8 C3 DD 9D 08 FF FF FF DD 85 68 FF FF FF D8 C2 DD 95 00 FF FF FF DC 8B BC F8 FF FF DC AD 48 FF FF FF DD 9D 98 FE FF FF D9 C4 DD 85 58 FF FF FF D9 C9 D8 C2 D9 CE DE E2 DC 85 08 FF FF FF DD 85 08 FF FF FF D9 C9 DD 9D F8 FE FF FF DC 8B BC F8 FF FF DD 85 48 FF FF FF D9 C9 DC AD 58 FF FF FF D9 C9 DC 85 00 FF FF FF D9 CB DC A5 68 FF FF FF
+***** EXPENSIVE 2421 750
+
+. 2420 804E752 204
+. DC 8D 20 FF FF FF D9 CB DD 9D F0 FE FF FF D9 C0 D8 E3 D9 CC DC A5 70 FF FF FF D9 C9 DE C3 DC 8D 20 FF FF FF D9 CB DD 9D E8 FE FF FF DD 85 18 FF FF FF DD 85 98 FE FF FF D8 C4 D9 C9 DC 85 F8 FE FF FF DD 1C 0F DD 9D E0 FE FF FF DD 85 E8 FE FF FF DD 85 10 FF FF FF D9 CC DC AD 98 FE FF FF D9 CC DC 85 F0 FE FF FF D9 C9 D8 C5 D9 C9 DD 5C 0F 08 D9 CB DD 9D 98 FE FF FF D9 CA DD 9D D8 FE FF FF DD 85 E0 FE FF FF D8 C4 DD 9D D0 FE FF FF DD 85 18 FF FF FF DC A5 F8 FE FF FF DD 9D C8 FE FF FF DD 85 10 FF FF FF DC A5 F0 FE FF FF D9 C5 8B 45 E8 D8 C2 D9 C9 DD 9D C0 FE FF FF D9 CB DC A5 E8 FE FF FF D9 CC DC A5 E0 FE FF FF
+***** EXPENSIVE 2422 751
+
+. 2421 804E81E 143
+. D9 CD DE E1 03 45 D4 DD 45 C0 DD 45 B8 0F AF 45 14 D8 CE D9 C9 D8 CD D9 CE DC 4D C0 D9 CD DC 4D B8 D9 CE C1 E0 04 DE E1 D9 CC DE C5 D9 CB DD 1C 07 D9 CB DD 5C 07 08 DD 85 98 FE FF FF 8B 45 E8 D8 C3 03 85 BC FE FF FF DD 45 B0 DD 45 A8 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA DC 4D B0 D9 CB DC 4D A8 D9 CA C1 E0 04 DE E1 D9 CA DE C1 D9 C9 DD 1C 07 DD 5C 07 08 8B 45 E8 03 85 B8 FE FF FF DD 45 A0 DD 45 98 0F AF 45 14 DC 8D C0 FE FF FF
+***** EXPENSIVE 2423 752
+
+. 2422 804E8AD 171
+. D9 C9 DC 8D C8 FE FF FF C1 E0 04 DE E1 D9 CA DC A5 98 FE FF FF D9 CA DD 1C 07 DD 45 A0 DD 45 98 DC 8D C8 FE FF FF D9 C9 DC 8D C0 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 B4 FE FF FF DD 45 90 DD 45 88 0F AF 45 14 DC 8D D0 FE FF FF D9 C9 DC 8D D8 FE FF FF C1 E0 04 DE E1 DD 1C 07 DD 45 90 DD 45 88 DC 8D D8 FE FF FF D9 C9 DC 8D D0 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 B0 FE FF FF DD 45 80 DD 85 78 FF FF FF D9 C9 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA DC 8D 78 FF FF FF D9 CB DC 4D 80 D9 C9 C1 E0 04 DE E2 DE C2
+***** EXPENSIVE 2424 753
+
+. 2423 804E958 53
+. DD 1C 07 DD 5C 07 08 8B 45 0C C1 E0 04 01 C2 8B 45 0C 01 85 A8 FE FF FF 8B 45 14 C1 E0 04 FF 45 EC 01 C1 FF 45 E8 FF 45 E0 8B 45 D4 39 45 E0 0F 82 43 FC FF FF
+***** EXPENSIVE 2425 754
+
+. 2424 8051998 183
+. 8B 45 E8 03 45 D8 0F AF 45 0C D9 04 C6 D9 44 C6 04 8B 45 E8 03 45 94 0F AF 45 0C D9 04 C6 D9 CA D9 5D 9C D9 44 C6 04 8B 45 E8 03 45 8C 0F AF 45 0C D9 04 C6 D9 CA D9 5D 98 D9 44 C6 04 D9 CA 8B 45 E8 03 45 84 0F AF 45 0C D9 5D 90 D9 C9 D9 5D 88 D9 04 C6 D9 44 C6 04 8B 45 E8 03 85 78 FF FF FF 0F AF 45 0C D9 5D 80 D9 04 C6 D9 9D 7C FF FF FF D9 44 C6 04 D9 9D 74 FF FF FF D9 C2 D8 C1 D9 9D 70 FF FF FF D9 45 80 D8 C2 D9 9D 6C FF FF FF D9 85 70 FF FF FF D8 8D F8 FE FF FF D9 04 CE DE E1 D9 85 6C FF FF FF D8 8D F8 FE FF FF D9 9D 60 FF FF FF D9 44 CE 04
+***** EXPENSIVE 2426 755
+
+. 2425 8051A4F 183
+. D8 A5 60 FF FF FF D9 CC DE E2 D9 04 CE D9 CC D9 9D 64 FF FF FF D9 CB D8 85 70 FF FF FF D9 44 CE 04 D9 CB D8 65 80 D8 8D 5C FF FF FF D9 CB D8 85 6C FF FF FF D9 C9 D9 9D 58 FF FF FF D9 C3 D9 CA D8 8D 5C FF FF FF D9 CA D8 E3 D9 C9 D9 9D 54 FF FF FF D9 85 64 FF FF FF D8 C2 D9 C9 D9 9D 50 FF FF FF D9 C9 D8 AD 64 FF FF FF D9 85 7C FF FF FF D9 85 74 FF FF FF D9 85 7C FF FF FF D9 CE DE C5 D9 C9 D8 45 9C D9 C9 D8 45 98 D9 CD D8 65 9C D9 CB D9 9D 4C FF FF FF D9 C9 D9 9D 44 FF FF FF D9 85 F8 FE FF FF D9 85 F8 FE FF FF D9 85 5C FF FF FF DE CC D8 CA D9 C9
+***** EXPENSIVE 2427 756
+
+. 2426 8051B06 196
+. D8 CD D9 C9 D8 6D 90 D9 C9 D8 6D 88 D9 CC D9 95 48 FF FF FF D9 CB D9 9D 40 FF FF FF D9 85 74 FF FF FF D8 65 98 D9 85 5C FF FF FF DE C9 D9 9D 3C FF FF FF D9 85 3C FF FF FF D8 E9 D9 9D 38 FF FF FF D9 85 40 FF FF FF D8 C4 D9 45 90 D9 C9 D9 9D 34 FF FF FF DE C2 D9 85 58 FF FF FF D8 C2 D9 45 88 D9 C9 D9 1C D7 DE C5 D9 85 54 FF FF FF D8 C5 D9 5C D7 04 D9 85 50 FF FF FF D8 85 38 FF FF FF D9 9D 30 FF FF FF D9 85 4C FF FF FF D8 85 34 FF FF FF D9 C9 D8 85 3C FF FF FF D9 C9 D9 9D 2C FF FF FF DC C2 D9 CB D8 A5 40 FF FF FF D9 CA D9 9D 28 FF FF FF D9 85 44 FF FF FF D8 C2 D9 CC D8 AD 54 FF FF FF
+***** EXPENSIVE 2428 757
+
+. 2427 8051BCA 179
+. D9 C9 D8 AD 58 FF FF FF D9 CC D9 9D 24 FF FF FF D9 9D 1C FF FF FF D9 85 4C FF FF FF D9 85 50 FF FF FF D9 CB D8 AD 48 FF FF FF D9 CB D8 A5 38 FF FF FF D9 C9 D8 A5 34 FF FF FF D9 CC D9 95 20 FF FF FF D9 CB D9 9D 10 FF FF FF D9 C9 D8 AD 44 FF FF FF D9 9D 0C FF FF FF 8B 45 E4 D9 45 C4 D9 45 C0 03 45 D0 D8 CC D9 C9 D8 CA 0F AF 45 14 DE E1 D9 1C C7 D9 45 C4 DE CB D9 45 C0 DE C9 DE C2 D9 C9 D9 5C C7 04 D9 45 B8 8B 45 E4 D9 45 BC D9 C9 03 85 08 FF FF FF D8 8D 24 FF FF FF D9 C9 D8 8D 28 FF FF FF 0F AF 45 14 DE E1 D9 1C C7 D9 45 B8 D9 45 BC
+***** EXPENSIVE 2429 758
+
+. 2428 8051C7D 165
+. D9 C9 D8 8D 28 FF FF FF D9 C9 D8 8D 24 FF FF FF DE C1 D9 5C C7 04 D9 45 B0 8B 45 E4 D9 45 B4 D9 C9 03 85 04 FF FF FF D8 8D 1C FF FF FF D9 C9 D8 CA 0F AF 45 14 DE E1 D9 1C C7 D9 45 B4 D9 45 B0 D9 C9 D8 8D 1C FF FF FF D9 C9 DE CA DE C1 D9 5C C7 04 8B 45 E4 D9 45 AC D9 45 A8 D9 45 AC D9 45 A8 D9 CA 03 85 00 FF FF FF D8 8D 2C FF FF FF D9 CB D8 8D 30 FF FF FF D9 C9 D8 8D 2C FF FF FF D9 CA D8 8D 30 FF FF FF D9 C9 0F AF 45 14 DE E3 DE C1 D9 C9 D9 1C C7 D9 5C C7 04 D9 45 A4 8B 45 E4 D9 45 A0 D9 C9
+***** EXPENSIVE 2430 759
+
+. 2429 8051D22 82
+. 03 85 FC FE FF FF D8 8D 10 FF FF FF D9 C9 D8 8D 0C FF FF FF 0F AF 45 14 DE E9 D9 1C C7 D9 45 A0 D9 45 A4 D8 8D 0C FF FF FF D9 C9 D8 8D 10 FF FF FF DE C1 D9 5C C7 04 FF 45 DC 8B 45 D0 03 4D 0C FF 45 E8 03 55 14 FF 45 E4 39 45 DC 0F 82 24 FC FF FF
+***** EXPENSIVE 2431 760
+
+. 2430 8058D74 151
+. 8B 45 EC 01 C8 8B 55 0C 0F AF D1 89 45 C8 8B 45 08 DD 04 D0 8D 04 CD 08 00 00 00 0F AF 45 0C 8B 55 08 DD 04 10 8B 45 0C 0F AF 45 C8 DD 04 C2 8B 45 C8 8D 14 C5 08 00 00 00 0F AF 55 0C 8B 45 08 DD 04 02 D9 C5 D9 C5 D8 CA D9 C9 D8 CB DE E1 D9 C6 DE CA D9 CA D8 CD DE C1 8B 45 14 D9 C3 0F AF C6 8B 55 10 D8 C2 DD 1C C2 D9 C2 8D 04 F5 08 00 00 00 0F AF 45 14 D8 C1 DD 1C 10 D9 CB 8B 45 14 0F AF C7 DE E1 DD 1C C2 DE E1 8D 04 FD 08 00 00 00 0F AF 45 14 D9 E0
+***** EXPENSIVE 2432 761
+
+. 2431 8058E0B 27
+. DD 1C 10 FF 45 F0 8B 45 E8 03 7D 18 03 75 18 03 4D E4 39 45 F0 0F 82 4E FF FF FF
+***** EXPENSIVE 2433 762
+
+. 2432 8053D08 151
+. 8B 45 0C 8B 55 08 0F AF C6 DD 04 C2 8D 04 F5 08 00 00 00 0F AF 45 0C DD 04 10 8B 45 0C 0F AF C7 DD 04 C2 8D 04 FD 08 00 00 00 0F AF 45 0C DD 04 10 8B 4D F0 8B 55 C4 8D 44 4A FF 8B 4D E8 8B 55 14 01 C1 0F AF D0 D9 C3 D9 C3 D9 CD 8D 04 C5 08 00 00 00 D8 E3 D9 CC D8 C2 D9 C9 89 4D B0 0F AF 45 14 8B 4D 10 DE C3 D9 CC DE E1 D9 C4 D9 C6 D9 CB DD 1C D1 D9 CA D8 CB D9 C9 DD 1C 08 D9 C9 D8 CB 8B 45 14 0F AF 45 B0 DE E9 DD 1C C1 8B 45 B0 8D 0C C5 08 00 00 00
+***** EXPENSIVE 2434 763
+
+. 2433 8053D9F 45
+. D8 CA D9 C9 D8 CB 0F AF 4D 14 DE C1 8B 55 10 DD 1C 11 FF 45 EC 8B 4D E0 8B 45 D8 03 7D CC 03 75 C8 01 4D C4 39 45 EC 0F 82 3C FF FF FF
+***** EXPENSIVE 2435 764
+
+. 2434 805B3C0 151
+. 8B 55 EC 01 CA 8B 45 0C 0F AF C1 89 55 D0 8B 55 08 D9 04 82 8D 04 8D 04 00 00 00 0F AF 45 0C D9 04 10 8B 45 0C 0F AF 45 D0 D9 04 82 8B 45 D0 8D 14 85 04 00 00 00 0F AF 55 0C 8B 45 08 D9 04 02 D9 C5 D9 C5 D8 CA D9 C9 D8 CB DE E1 D9 C6 DE CA D9 CA D8 CD DE C1 8B 45 14 D9 C3 0F AF C6 8B 55 10 D8 C2 D9 1C 82 D9 C2 8D 04 B5 04 00 00 00 0F AF 45 14 D8 C1 D9 1C 10 D9 CB 8B 45 14 0F AF C7 DE E1 D9 1C 82 DE E1 8D 04 BD 04 00 00 00 0F AF 45 14 D9 E0 D9 1C 10
+
+. 2435 805B457 24
+. FF 45 F0 8B 45 E8 03 7D 18 03 75 18 03 4D E4 39 45 F0 0F 82 51 FF FF FF
+***** EXPENSIVE 2437 765
+
+. 2436 80564FC 151
+. 8B 45 0C 8B 55 08 0F AF C6 D9 04 82 8D 04 B5 04 00 00 00 0F AF 45 0C D9 04 10 8B 45 0C 0F AF C7 D9 04 82 8D 04 BD 04 00 00 00 0F AF 45 0C D9 04 10 8B 4D F0 8B 55 C4 8D 44 4A FF 8B 4D E8 8B 55 14 01 C1 0F AF D0 D9 C3 D9 C3 D9 CD 8D 04 85 04 00 00 00 D8 E3 D9 CC D8 C2 D9 C9 89 4D B0 0F AF 45 14 8B 4D 10 DE C3 D9 CC DE E1 D9 C4 D9 C6 D9 CB D9 1C 91 D9 CA D8 CB D9 C9 D9 1C 08 D9 C9 D8 CB 8B 45 14 0F AF 45 B0 DE E9 D9 1C 81 8B 45 B0 8D 0C 85 04 00 00 00
+***** EXPENSIVE 2438 766
+
+. 2437 8056593 45
+. D8 CA D9 C9 D8 CB 0F AF 4D 14 DE C1 8B 55 10 D9 1C 11 FF 45 EC 8B 4D E0 8B 45 D8 03 7D CC 03 75 C8 01 4D C4 39 45 EC 0F 82 3C FF FF FF
+***** EXPENSIVE 2439 767
+
+. 2438 8054440 169
+. 8B 45 0C 8B 75 08 0F AF C7 DD 04 C6 8D 04 FD 08 00 00 00 0F AF 45 0C 8B 4D A4 DD 04 30 8D 14 39 8B 45 0C 0F AF C2 8B 8D 5C FF FF FF DD 04 C6 03 4D A4 8B 45 0C 0F AF C1 8D 14 D5 08 00 00 00 DD 04 C6 0F AF 55 0C 8B 45 0C DD 04 32 0F AF 85 5C FF FF FF 8B 95 5C FF FF FF DD 04 C6 D9 CB DD 5D 98 D9 C9 DD 55 90 8D 04 D5 08 00 00 00 D8 C4 0F AF 45 0C DD 9D 40 FF FF FF 8D 0C CD 08 00 00 00 DD 45 98 0F AF 4D 0C DD 04 30 D9 C9 D8 C3 DD 04 31 D9 CA D9 E0 D9 C9 DD 5D 88 D9 CB DC 6D 98 D9 C2 D8 C4 D9 CA D9 E0 D9 C9
+***** EXPENSIVE 2440 768
+
+. 2439 80544E9 165
+. DD 9D 78 FF FF FF D9 C4 D8 C1 D9 CD DE E1 D9 C9 DD 55 80 DD 85 78 FF FF FF D9 CB DE E4 D9 CA D8 C1 DD 45 88 D9 CE DC 65 90 D9 CE DC 85 40 FF FF FF D9 C9 DD 9D 70 FF FF FF 8B 95 60 FF FF FF 03 55 E4 8B 45 E4 8D 0C 10 01 C8 8B 75 14 0F AF B5 60 FF FF FF 89 85 34 FF FF FF 8B 45 10 DD 1C F0 D9 C9 8B B5 60 FF FF FF 8D 34 F5 08 00 00 00 0F AF 75 14 D8 C3 D9 C4 D9 C9 DD 1C 06 D8 E2 DD 45 D0 DD 45 C8 DC 8D 70 FF FF FF D9 C9 8B 45 14 D8 CA 8B 75 10 0F AF C2 DE E1 DD 1C C6 DD 45 D0 DC 8D 70 FF FF FF
+***** EXPENSIVE 2441 769
+
+. 2440 805458E 152
+. DD 85 40 FF FF FF D9 CA DC 4D C8 D9 CA DC 65 88 D9 CD DC 65 80 D9 CA DE C1 8B 45 14 DD 45 C0 DD 45 B8 0F AF C1 D8 CB D9 C9 D8 CE D9 CB DC 4D C0 D9 CE DC 4D B8 D9 CB 8D 14 D5 08 00 00 00 8D 0C CD 08 00 00 00 0F AF 55 14 DE E1 D9 CA 0F AF 4D 14 DE C5 DD 1C 32 D9 C9 DC A5 78 FF FF FF D9 C9 DD 1C C6 D9 CB DE C1 D9 C9 DD 1C 31 DD 45 B0 DD 45 A8 D9 C9 D8 CA D9 C9 D8 CB 8B 45 14 0F AF 85 34 FF FF FF DE E9 DD 1C C6 D9 C9 8B 85 34 FF FF FF 8D 34 C5 08 00 00 00
+***** EXPENSIVE 2442 770
+
+. 2441 8054626 59
+. DC 4D B0 D9 C9 DC 4D A8 0F AF 75 14 DE C1 8B 55 10 8B 4D E0 DD 1C 16 FF 45 E8 8B B5 58 FF FF FF 8B 45 DC 01 8D 60 FF FF FF 01 B5 5C FF FF FF 39 45 E8 8D 3C 8F 0F 82 DF FD FF FF
+***** EXPENSIVE 2443 771
+
+. 2442 8056C30 178
+. 8B 45 0C 8B 75 08 0F AF C7 D9 04 86 8D 04 BD 04 00 00 00 0F AF 45 0C 8B 4D B8 D9 04 30 8D 14 39 8B 45 0C 0F AF C2 8B 8D 4C FF FF FF D9 04 86 D9 CA 03 4D B8 8B 45 0C 0F AF C1 8D 0C 8D 04 00 00 00 0F AF 4D 0C D9 5D B4 D9 5D B0 D9 5D AC D9 04 86 D9 04 31 8D 14 95 04 00 00 00 0F AF 55 0C D9 5D A0 D9 5D A4 8B 45 0C D9 45 B4 D9 04 32 D9 C9 0F AF 85 4C FF FF FF 8B 95 4C FF FF FF D8 45 A4 D9 04 86 D9 C9 80 75 A3 80 8D 04 95 04 00 00 00 D9 5D 9C D9 45 B0 0F AF 45 0C D8 45 A0 D9 CA D9 5D A8 D9 04 30 D9 CA D9 5D 98 D9 C9 8B 95 50 FF FF FF
+***** EXPENSIVE 2444 772
+
+. 2443 8056CE2 144
+. 03 55 E4 8B 45 E4 D9 E0 D9 45 A8 8D 0C 10 D8 C1 D9 45 AC D9 CA D8 6D A8 D9 5D 94 D9 C9 01 C8 D8 C2 D9 45 AC D9 45 9C D9 C9 8B 75 14 0F AF B5 50 FF FF FF DE E4 D8 C1 D9 45 98 D9 C9 89 85 34 FF FF FF 8B 45 10 D9 1C B0 D8 C2 8B B5 50 FF FF FF D9 5D 90 D9 45 B4 D9 45 B0 8D 34 B5 04 00 00 00 D8 65 A0 D9 C9 0F AF 75 14 D8 65 A4 D9 45 90 D9 45 94 D8 EA D9 C9 D9 1C 06 D9 C2 D9 45 D0 D9 CA D9 55 8C D9 C9 D8 C7 D9 CA DE C9 D9 45 CC D8 CA
+***** EXPENSIVE 2445 773
+
+. 2444 8056D72 132
+. D9 CE 8B 45 14 8B 75 10 D8 6D 98 D9 CB D8 45 94 D9 CD 0F AF C2 D8 6D 9C D9 C9 DE E6 D9 5D 88 D9 C9 D9 55 84 D9 CB D9 55 80 D9 CC D9 1C 86 D9 45 CC D8 4D 8C D9 C9 D8 4D D0 DE C1 D9 45 C4 D9 45 C8 8B 45 14 D8 4D 88 D9 C9 D8 CC 8D 14 95 04 00 00 00 0F AF 55 14 0F AF C1 DE E9 D9 C9 D9 14 32 D9 C9 D9 1C 86 D9 45 C4 D9 45 C8 D9 C9 D8 4D 88 D9 C9 DE CC D9 CA DE E5 D9 C9 DE C2 D9 45 BC D9 45 C0 D9 C9
+***** EXPENSIVE 2446 774
+
+. 2445 8056DF6 120
+. 8B 45 14 D8 CD D9 C9 8D 0C 8D 04 00 00 00 D8 CC 0F AF 4D 14 0F AF 85 34 FF FF FF DE E1 D9 CA D9 1C 31 D9 C9 D9 1C 86 8B 85 34 FF FF FF D9 45 BC D9 CB 8D 34 85 04 00 00 00 D8 4D C0 D9 CB DE CA D9 CA 0F AF 75 14 8B 55 E0 8B 45 10 8B 8D 48 FF FF FF DE C1 D9 1C 06 01 95 50 FF FF FF 01 8D 4C FF FF FF D9 5D 8C FF 45 E8 8B 75 DC 39 75 E8 8D 3C 97 0F 82 C2 FD FF FF
+***** EXPENSIVE 2447 775
+
+. 2446 804ED74 195
+. 8B 45 EC 03 45 DC 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 2C FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 8B 45 EC 03 85 1C FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 D9 CC DD 9D 38 FF FF FF DD 44 06 08 8B 45 EC 03 85 0C FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 D9 CD DD 9D 20 FF FF FF DD 44 06 08 8B 45 EC 03 85 08 FF FF FF 0F AF 45 0C C1 E0 04 DD 04 06 DD 44 06 08 D9 CE 8B 45 EC 03 85 04 FF FF FF 0F AF 45 0C C1 E0 04 DD 9D 30 FF FF FF DD 04 06 D9 CB DD 9D 10 FF FF FF D9 CA DD 9D 88 FD FF FF DD 85 38 FF FF FF DC 85 88 FD FF FF DD 44 06 08 D9 C9 DD 9D F8 FE FF FF
+***** EXPENSIVE 2448 776
+
+. 2447 804EE37 182
+. DD 85 30 FF FF FF D8 C1 DD 9D F0 FE FF FF DC AD 30 FF FF FF DD 85 38 FF FF FF DC A5 88 FD FF FF D9 C9 DD 9D E0 FE FF FF D9 C4 D8 C3 D9 CD DE E3 DD 9D E8 FE FF FF DD 85 10 FF FF FF D9 C3 D9 C9 D8 C2 D9 C9 D8 C6 D9 CA DC A5 10 FF FF FF DD 9D 80 FD FF FF D9 C9 DD 95 D0 FE FF FF DC 85 F0 FE FF FF D9 CB DE E5 D9 CA DD 9D C8 FE FF FF DD 85 20 FF FF FF DD 85 80 FD FF FF D9 C9 D8 C6 D9 C9 D8 C5 D9 CE DC A5 20 FF FF FF D9 C0 D9 CF DD 9D 78 FE FF FF DD 85 C8 FE FF FF D9 CF D8 C3 D9 CF DC 44 0E 08 D8 C4 D9 CD DD 95 D8 FE FF FF D9 CF DD 9D 88 FE FF FF
+***** EXPENSIVE 2449 777
+
+. 2448 804EEED 178
+. D9 CE DC 85 F8 FE FF FF DD 04 0E D8 C1 D9 CD DD 9D B8 FE FF FF DD 85 E0 FD FF FF D9 CD D8 C2 D9 CD DC 45 B8 D9 CD DD 9D C0 FE FF FF D9 CC DC B5 A8 FD FF FF DD 85 C8 FE FF FF D9 C9 DC A5 B0 FD FF FF D9 C9 D8 C4 D9 CD D8 C2 D8 C9 D9 C9 DE CD D9 CC DD 9D 60 FE FF FF DD 85 E8 FD FF FF DC 65 C0 DC 65 B8 DD 85 F8 FE FF FF D9 C9 DC B5 A8 FD FF FF D9 C9 D8 E2 D8 C9 DD 9D B0 FE FF FF DD 85 F0 FE FF FF D8 E4 DE C9 DD 9D A8 FE FF FF DD 45 C8 DC A5 D8 FD FF FF DC 45 B8 DC B5 A8 FD FF FF D9 C9 DC A5 D8 FE FF FF D9 CB DC A5 D0 FE FF FF D9 CB
+***** EXPENSIVE 2450 778
+
+. 2449 804EF9F 217
+. D8 C9 D9 CB DE C9 DD 85 D8 FE FF FF DC A5 F8 FE FF FF DD 85 98 FE FF FF DE C9 DD 85 D0 FE FF FF DC A5 F0 FE FF FF D9 C9 DD 9D A0 FE FF FF DD 85 98 FE FF FF DE C9 DD 85 C8 FD FF FF DC 65 A0 D9 C9 DD 9D 90 FE FF FF DC B5 A8 FD FF FF DD 85 88 FE FF FF D9 C9 DC 8D 80 FE FF FF D9 C9 DC 85 E8 FE FF FF D8 C9 DD 9D 88 FE FF FF DD 85 78 FE FF FF DC 85 E0 FE FF FF DE C9 DD 9D 78 FE FF FF DD 85 D0 FD FF FF DC 65 A8 DC 45 A0 DC B5 A8 FD FF FF DD 85 E8 FE FF FF D9 C9 DC 8D 80 FE FF FF D9 C9 D8 E7 D8 C9 DD 9D 70 FE FF FF DD 85 E0 FE FF FF DC A5 80 FD FF FF DE C9 DD 9D 68 FE FF FF DD 45 B0 DC A5 C0 FD FF FF DC 65 A0 DC B5 A8 FD FF FF DD 85 80 FD FF FF D9 C9
+***** EXPENSIVE 2451 779
+
+. 2450 804F078 220
+. DC 8D 80 FE FF FF D9 CF D8 E3 D9 C9 D8 E6 D9 C9 D8 CF D9 C9 DE CF DD 85 C8 FD FF FF DC 85 B8 FD FF FF DC B5 A8 FD FF FF DC 8D 80 FE FF FF D9 CB DC A5 E8 FE FF FF D9 CE DC A5 E0 FE FF FF D9 CE D8 CB D9 CE DE CB DD 85 B8 FE FF FF DC 85 60 FE FF FF DD 9D 60 FE FF FF DD 85 B0 FE FF FF D8 C4 8B 95 B4 FE FF FF 8B 85 B0 FE FF FF 81 F2 00 00 00 80 DD 9D 58 FE FF FF DD 85 A8 FE FF FF 89 85 48 FE FF FF 89 95 4C FE FF FF D8 C2 DD 9D 50 FE FF FF 8B 95 AC FE FF FF DD 85 48 FE FF FF DC A5 A0 FE FF FF D9 CD 81 F2 00 00 00 80 8B 85 A8 FE FF FF DC 85 C0 FE FF FF D9 CC DC AD A0 FE FF FF D9 CA DC AD 90 FE FF FF D9 CD DD 9D 48 FE FF FF 89 85 40 FE FF FF 89 95 44 FE FF FF
+***** EXPENSIVE 2452 780
+
+. 2451 804F154 204
+. DD 85 40 FE FF FF DC A5 90 FE FF FF DD 9D 40 FE FF FF DD 85 70 FE FF FF D8 C1 DD 9D 38 FE FF FF DD 85 68 FE FF FF D8 C7 DD 9D 88 FD FF FF DD 85 58 FE FF FF D8 C4 DD 9D 58 FE FF FF D9 C9 D8 C3 DD 85 60 FE FF FF D9 CC DC 85 48 FE FF FF D9 CC DC 85 50 FE FF FF D9 CC DD 9D 48 FE FF FF DD 85 60 FE FF FF D9 CC DD 9D 50 FE FF FF D9 CB DC 85 40 FE FF FF DD 85 38 FE FF FF DC 85 88 FE FF FF D9 C9 DD 9D 40 FE FF FF DD 85 88 FD FF FF DC 85 78 FE FF FF D9 C9 DD 9D 38 FE FF FF DD 85 58 FE FF FF D8 C1 D9 CA D8 EE D9 CF D8 EB D9 C9 DD 9D 88 FD FF FF D9 CD D9 E0 DD 85 50 FE FF FF D9 CB D9 E0 D9 C9 DC A5 70 FE FF FF D9 C9
+***** EXPENSIVE 2453 781
+
+. 2452 804F220 190
+. DC A5 68 FE FF FF D9 CB DC A5 38 FE FF FF D9 CF DC 85 88 FE FF FF D9 CE DC 85 78 FE FF FF D9 C9 DC 85 88 FE FF FF D9 CD DC 85 60 FE FF FF D9 CB DC 85 78 FE FF FF D9 CA DD 9D 30 FE FF FF D9 CE DD 9D 28 FE FF FF DD 85 48 FE FF FF DD 85 40 FE FF FF D9 C9 D8 C2 D9 C9 D8 E5 D9 C9 DD 9D 20 FE FF FF DD 9D 18 FE FF FF D9 C2 D9 C2 D9 C9 D8 E7 D9 C9 D8 C6 D9 C9 8B 85 98 FD FF FF DD 9D 10 FE FF FF DD 9D 08 FE FF FF DD 85 C0 FE FF FF DD 85 B8 FE FF FF D9 C9 DD 1C 07 DD 5C 07 08 8B 45 E8 03 45 D4 DD 45 98 DD 45 90 0F AF 45 14 DC 8D 28 FE FF FF D9 C9 DC 8D 30 FE FF FF C1 E0 04
+***** EXPENSIVE 2454 782
+
+. 2453 804F2DE 181
+. DE E1 DD 1C 07 DD 45 90 DD 45 98 D9 C9 DC 8D 30 FE FF FF D9 C9 DC 8D 28 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 04 FE FF FF DD 45 88 DD 45 80 D9 CC DE C7 D9 CB 0F AF 45 14 DC 8D 18 FE FF FF DD 85 40 FE FF FF D9 CC DC 8D 20 FE FF FF D9 CB C1 E0 04 DE E6 D9 CB DE C4 D9 C9 DE E2 DD 85 58 FE FF FF DD 85 50 FE FF FF D9 CA DC AD 48 FE FF FF D9 C9 DC A5 88 FD FF FF D9 CA DC 85 38 FE FF FF D9 CB DD 1C 07 DD 45 88 DD 45 80 DC 8D 20 FE FF FF D9 C9 DC 8D 18 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 00 FE FF FF DD 85 78 FF FF FF DD 85 70 FF FF FF
+***** EXPENSIVE 2455 783
+
+. 2454 804F393 181
+. 0F AF 45 14 DC 8D 08 FE FF FF D9 C9 DC 8D 10 FE FF FF C1 E0 04 DE E1 DD 1C 07 DD 85 70 FF FF FF DD 85 78 FF FF FF D9 C9 DC 8D 10 FE FF FF D9 C9 DC 8D 08 FE FF FF DE C1 DD 5C 07 08 8B 45 E8 03 85 FC FD FF FF DD 85 68 FF FF FF DD 85 60 FF FF FF 0F AF 45 14 D8 CE D9 C9 D8 CF D9 CE DC 8D 68 FF FF FF D9 CF DC 8D 60 FF FF FF D9 CE C1 E0 04 DE E1 D9 CE DE C5 D9 CD DD 1C 07 D9 CB DD 5C 07 08 8B 45 E8 03 85 F8 FD FF FF DD 85 58 FF FF FF DD 85 50 FF FF FF 0F AF 45 14 D8 CB D9 C9 D8 CD D9 CB DC 8D 58 FF FF FF D9 CD DC 8D 50 FF FF FF D9 CB C1 E0 04
+***** EXPENSIVE 2456 784
+
+. 2455 804F448 130
+. DE E1 D9 CC DE C2 D9 CB DD 1C 07 DD 5C 07 08 8B 45 E8 03 85 F4 FD FF FF DD 85 48 FF FF FF DD 85 40 FF FF FF D9 C9 0F AF 45 14 D8 CA D9 C9 D8 CB D9 CA DC 8D 40 FF FF FF D9 CB DC 8D 48 FF FF FF D9 C9 C1 E0 04 DE E2 DE C2 DD 1C 07 DD 5C 07 08 8B 45 0C C1 E0 04 01 C1 8B 45 0C 01 85 A0 FD FF FF 8B 45 14 C1 E0 04 FF 45 E0 8B 55 D4 FF 45 EC 01 85 98 FD FF FF FF 45 E8 39 55 E0 0F 82 AA F8 FF FF
+***** EXPENSIVE 2457 785
+
+. 2456 8052110 186
+. 8B 45 E8 03 45 D8 0F AF 45 0C D9 04 C6 D9 44 C6 04 8B 45 E8 03 85 7C FF FF FF 0F AF 45 0C D9 04 C6 D9 44 C6 04 8B 45 E8 03 85 70 FF FF FF 0F AF 45 0C D9 04 C6 D9 44 C6 04 D9 CB 8B 45 E8 03 85 68 FF FF FF 0F AF 45 0C D9 5D 80 D9 9D 74 FF FF FF D9 44 C6 04 D9 04 C6 8B 45 E8 03 85 5C FF FF FF 0F AF 45 0C D9 44 C6 04 D9 CB D9 9D 78 FF FF FF D9 04 C6 D9 CC 8B 45 E8 03 85 58 FF FF FF 0F AF 45 0C D9 9D 6C FF FF FF D9 C9 D9 9D 64 FF FF FF D9 C4 D9 04 C6 DC C1 D9 CC D9 9D 60 FF FF FF D9 44 C6 04 D9 C9 D9 9D 54 FF FF FF D9 C4 D8 C1 D9 CD DE E1 D9 85 78 FF FF FF
+***** EXPENSIVE 2458 786
+
+. 2457 80521CA 204
+. D9 CE DE E4 D9 CD D8 C2 D9 45 80 D8 85 60 FF FF FF D9 C9 D9 9D 40 FF FF FF D9 CA D8 AD 78 FF FF FF D9 CB D9 9D 4C FF FF FF D9 85 64 FF FF FF D9 85 74 FF FF FF D8 C2 D9 CB D9 9D 44 FF FF FF D9 CB D9 9D 38 FF FF FF D8 A5 74 FF FF FF D9 CA D8 A5 6C FF FF FF D9 CB D9 9D 50 FF FF FF D9 C9 D9 9D 30 FF FF FF D9 85 64 FF FF FF D9 CA D9 95 2C FF FF FF D9 85 44 FF FF FF D9 C9 D8 85 38 FF FF FF D9 85 40 FF FF FF D9 CC D8 85 6C FF FF FF D9 CA D8 85 54 FF FF FF D9 CC D8 85 50 FF FF FF D9 45 80 D9 CA D9 9D 20 FF FF FF D9 44 D6 04 D9 CA D8 A5 60 FF FF FF D9 CB D9 9D 34 FF FF FF D9 CC D9 95 28 FF FF FF D9 85 30 FF FF FF
+***** EXPENSIVE 2459 787
+
+. 2458 8052296 186
+. D9 C9 D8 04 D6 D9 CA D8 C5 D9 C9 D8 C3 D9 CA D8 C4 D9 C9 D8 85 34 FF FF FF D9 CE D9 95 48 FF FF FF D9 CB D9 9D 3C FF FF FF D9 C9 D9 9D 24 FF FF FF D9 9D 1C FF FF FF D9 CB D9 9D 18 FF FF FF D9 85 88 FE FF FF D8 45 C0 DD 85 78 FE FF FF DE F9 DC A5 70 FE FF FF D9 85 28 FF FF FF D8 C2 D9 C1 DE C9 D9 9D E0 FE FF FF DD 85 68 FE FF FF D9 CB D8 85 34 FF FF FF D9 CB D8 C0 D9 C9 DE CB DC A5 60 FE FF FF D9 CA D9 9D DC FE FF FF D9 C9 DC A5 58 FE FF FF D9 85 54 FF FF FF D9 C9 DC B5 78 FE FF FF D9 C9 D8 E2 D8 C9 D9 9D 14 FF FF FF D9 85 50 FF FF FF D8 A5 34 FF FF FF
+***** EXPENSIVE 2460 788
+
+. 2459 8052350 213
+. DE C9 D9 9D 10 FF FF FF DD 85 60 FE FF FF D8 C0 DC AD 68 FE FF FF DC 85 58 FE FF FF D9 85 34 FF FF FF D9 C9 DC B5 78 FE FF FF D9 C9 D8 A5 40 FF FF FF D9 C1 DE C9 D9 CA D8 A5 44 FF FF FF D9 CA D9 9D 08 FF FF FF DD 85 58 FE FF FF D9 C9 DE CA D8 C0 D9 C9 D9 9D 0C FF FF FF D8 AD 88 FE FF FF D9 85 44 FF FF FF D9 C9 DC B5 78 FE FF FF D9 C9 D8 A5 54 FF FF FF D9 9D 44 FF FF FF D9 C0 D8 8D 44 FF FF FF D9 9D 04 FF FF FF D9 85 40 FF FF FF D8 A5 50 FF FF FF DE C9 D9 9D 00 FF FF FF D9 85 84 FE FF FF D8 65 B4 DD 85 78 FE FF FF DE F9 DC 8D F0 FE FF FF D9 85 24 FF FF FF D8 85 4C FF FF FF D9 C1 DE C9 D9 9D FC FE FF FF D9 85 20 FF FF FF D8 C2 DE C9
+***** EXPENSIVE 2461 789
+
+. 2460 8052425 219
+. DD 85 50 FE FF FF D8 C0 DC A5 48 FE FF FF DC 85 40 FE FF FF D9 C9 D9 9D EC FE FF FF DC B5 78 FE FF FF D9 85 4C FF FF FF D9 C9 DC 8D F0 FE FF FF D9 C9 D8 A5 30 FF FF FF D8 C9 D9 9D E8 FE FF FF D9 C1 D8 A5 2C FF FF FF DE C9 D9 9D E4 FE FF FF DD 85 48 FE FF FF D8 C0 DC AD 50 FE FF FF DC A5 40 FE FF FF D9 85 30 FF FF FF D9 C9 DC B5 78 FE FF FF DC 8D F0 FE FF FF D9 C9 D8 A5 3C FF FF FF D9 9D 30 FF FF FF D9 85 2C FF FF FF D9 C1 D8 8D 30 FF FF FF D9 C9 D8 A5 38 FF FF FF D9 C9 D9 5D EC DE C9 D9 45 EC D9 C9 D9 5D EC DD 85 40 FE FF FF D8 C0 D8 85 84 FE FF FF DC B5 78 FE FF FF DC 8D F0 FE FF FF D9 85 3C FF FF FF D8 A5 4C FF FF FF D9 C1 DE C9 D9 85 1C FF FF FF
+***** EXPENSIVE 2462 790
+
+. 2461 8052500 213
+. D8 85 E0 FE FF FF D9 85 38 FF FF FF DE E5 D9 9D E0 FE FF FF D9 85 14 FF FF FF D9 CA DE CC D9 C9 D8 85 0C FF FF FF D9 85 18 FF FF FF D8 85 DC FE FF FF D9 C9 D9 9D D8 FE FF FF D9 85 04 FF FF FF D8 A5 0C FF FF FF D9 C9 D9 9D DC FE FF FF D9 85 10 FF FF FF D8 85 08 FF FF FF D9 C9 D9 9D D0 FE FF FF D9 85 10 FF FF FF D9 45 EC D9 CA D9 9D D4 FE FF FF D9 CA D9 5D EC D9 85 00 FF FF FF D9 85 14 FF FF FF D9 CB D9 E0 D9 45 EC D9 CA D8 A5 08 FF FF FF D9 CE D9 5D EC D8 A5 00 FF FF FF D9 CB D9 E0 D9 45 EC D9 CE D9 9D CC FE FF FF D8 A5 04 FF FF FF D9 CB D9 9D C8 FE FF FF D9 85 E8 FE FF FF D8 C4 D9 9D C4 FE FF FF D9 85 E0 FE FF FF D8 85 D8 FE FF FF
+***** EXPENSIVE 2463 791
+
+. 2462 80525D5 212
+. D9 9D D8 FE FF FF D9 85 DC FE FF FF D8 85 D4 FE FF FF D9 9D D4 FE FF FF D9 85 E0 FE FF FF D8 85 D0 FE FF FF D9 9D D0 FE FF FF D9 85 DC FE FF FF D8 85 CC FE FF FF D9 9D CC FE FF FF D9 85 DC FE FF FF D8 85 C8 FE FF FF D9 85 E4 FE FF FF D9 C9 D9 9D C8 FE FF FF D9 85 C4 FE FF FF D8 85 FC FE FF FF D9 C9 D8 C3 D9 C9 D9 9D C4 FE FF FF D8 85 EC FE FF FF D9 85 D8 FE FF FF D8 C1 D9 CD D8 EA D9 CB D8 EE D9 CD D9 9D C0 FE FF FF D9 C9 D9 E0 D9 85 D4 FE FF FF D9 CE D9 E0 D9 C9 D8 A5 E8 FE FF FF D9 C9 D8 A5 E4 FE FF FF D9 CE D8 A5 C4 FE FF FF D9 CB D8 85 FC FE FF FF D9 C9 D8 85 FC FE FF FF D9 CE D8 85 EC FE FF FF D9 CC D8 85 E0 FE FF FF D9 CD
+***** EXPENSIVE 2464 792
+
+. 2463 80526A9 184
+. D8 85 EC FE FF FF D9 CB D9 9D BC FE FF FF D9 85 C8 FE FF FF D8 E6 D9 9D B8 FE FF FF D9 85 D0 FE FF FF D8 E3 D9 CB D8 85 D0 FE FF FF D9 CA D8 AD D8 FE FF FF D9 CB D9 9D B4 FE FF FF D9 C9 D9 9D AC FE FF FF D9 85 CC FE FF FF D9 85 CC FE FF FF D9 C9 D8 C2 D9 C9 DE E2 D9 C4 D9 85 C8 FE FF FF D9 CC D9 9D A4 FE FF FF D9 85 D4 FE FF FF D9 CE D8 E5 D9 C9 DE C5 D9 CB DE C6 D9 CC D8 85 C4 FE FF FF D9 85 1C FF FF FF D9 85 18 FF FF FF D9 C9 D9 1C CF D9 5C CF 04 D9 CC D9 9D B0 FE FF FF D9 CB D9 9D A0 FE FF FF 8B 45 E4 D9 45 B0 D9 45 AC D9 45 B0 D9 45 AC D9 CA
+***** EXPENSIVE 2465 793
+
+. 2464 8052761 175
+. 03 45 D0 D8 8D BC FE FF FF D9 CA D8 8D C0 FE FF FF D9 CB D8 8D C0 FE FF FF D9 C9 D8 8D BC FE FF FF D9 C9 0F AF 45 14 DE E2 DE C2 D9 1C C7 D9 5C C7 04 D9 45 A8 8B 45 E4 D9 45 A4 03 85 9C FE FF FF D8 8D B8 FE FF FF D9 C9 D8 CB 0F AF 45 14 DE E1 D9 C9 D9 95 A8 FE FF FF D9 C9 D9 1C C7 D9 45 A8 D8 8D B8 FE FF FF D9 CA D8 4D A4 DE C2 D9 45 9C D9 45 A0 D9 C9 D8 8D B0 FE FF FF D9 C9 D8 8D B4 FE FF FF D9 CB D9 54 C7 04 D9 CB DE E1 8B 45 E4 D9 45 A0 D9 45 9C 03 85 98 FE FF FF D8 8D B4 FE FF FF D9 C9 D8 8D B0 FE FF FF 0F AF 45 14
+***** EXPENSIVE 2466 794
+
+. 2465 8052810 153
+. DE C1 D9 C9 D9 1C C7 D9 5C C7 04 D9 45 98 8B 45 E4 D9 45 94 D9 45 94 D9 C9 03 85 94 FE FF FF D8 CD D9 CA D8 8D AC FE FF FF D9 CD D8 4D 98 D9 C9 D8 8D AC FE FF FF D9 CD 0F AF 45 14 DE E2 DE C4 D9 1C C7 D9 CA D9 5C C7 04 D9 45 90 8B 45 E4 D9 45 8C 03 85 90 FE FF FF D8 CC D9 C9 D8 CB 0F AF 45 14 DE E1 D9 C9 D9 9D B8 FE FF FF D9 1C C7 D9 45 8C DE C9 D9 C9 D8 4D 90 DE C1 D9 45 84 D9 45 88 D9 C9 D8 8D A0 FE FF FF D9 C9 D8 8D A4 FE FF FF D9 CA D9 5C C7 04 DE E9
+***** EXPENSIVE 2467 795
+
+. 2466 80528A9 73
+. 8B 45 E4 D9 45 88 D9 45 84 D9 C9 03 85 8C FE FF FF D8 8D A0 FE FF FF D9 C9 D8 8D A4 FE FF FF 0F AF 45 14 DE C1 D9 C9 D9 1C C7 D9 5C C7 04 FF 45 DC 8B 45 D0 03 55 0C FF 45 E8 03 4D 14 FF 45 E4 39 45 DC 0F 82 1E F8 FF FF
+
+. 2467 804CD1C 10
+. 31 D2 89 C8 F7 F6 85 D2 74 0F
+***** EXPENSIVE 2469 796
+
+. 2468 805A1A3 101
+. 8B 45 14 8B 8D 0C FF FF FF 8B BD 10 FF FF FF DD 85 40 FF FF FF DD 85 38 FF FF FF D9 C9 0F AF 85 04 FF FF FF 8B 95 08 FF FF FF 49 4F DD 5D B8 DD 5D B0 C7 45 D0 00 00 00 00 89 45 A8 8B 75 18 89 55 AC 89 8D E4 FE FF FF 89 BD 00 FF FF FF 8B 45 D0 DD 85 38 FF FF FF 85 C0 D9 C0 DD 85 40 FF FF FF D9 C2 74 30
+
+. 2469 8055222 28
+. 8B 45 D8 40 C7 45 E4 01 00 00 00 89 85 6C FF FF FF D1 E8 39 45 E4 0F 83 85 02 00 00
+
+. 2470 805523E 19
+. C7 45 E0 00 00 00 00 8B 55 D4 39 55 E0 0F 83 5E 02 00 00
+***** EXPENSIVE 2472 797
+
+. 2471 8055251 95
+. DD 83 34 F6 FF FF DD 83 44 F6 FF FF D9 C9 DD 9D 60 FF FF FF DD 9D 58 FF FF FF C7 85 28 FF FF FF 00 00 00 00 C7 85 24 FF FF FF 00 00 00 00 C7 85 20 FF FF FF 00 00 00 00 C7 45 D0 00 00 00 00 8B 45 18 DD 85 60 FF FF FF DD 85 58 FF FF FF D9 C9 39 45 D0 DD 5D A8 DD 5D A0 0F 83 D8 01 00 00
+***** EXPENSIVE 2473 798
+
+. 2472 80552B0 87
+. 8B 95 6C FF FF FF D1 EA 8B 45 E4 29 D0 40 8B BD 24 FF FF FF 8B 8D 20 FF FF FF 89 85 2C FF FF FF 0F AF 7D D8 8B 85 28 FF FF FF 89 55 94 89 4D 90 C7 85 30 FF FF FF 00 00 00 00 89 7D 9C 89 45 98 8B 45 D0 DD 85 58 FF FF FF 85 C0 D9 C0 DD 85 60 FF FF FF D9 C2 74 30
+
+. 2473 8055337 11
+. 31 F6 3B 75 18 0F 83 83 04 00 00
+
+. 2474 8055342 46
+. 8B 45 98 0F AF 45 D8 89 85 F0 FE FF FF 8B 7D D8 8B 45 E4 D1 E0 8B 55 9C D1 E7 29 85 F0 FE FF FF 8D 4C 02 FF 89 BD 34 FF FF FF 85 F6 74 20
+
+. 2475 8055390 13
+. 8B 45 18 29 F0 39 C6 0F 83 F7 03 00 00
+***** EXPENSIVE 2477 799
+
+. 2476 805539D 72
+. 8B 45 0C 0F AF C1 8B 55 08 DD 04 C2 8D 04 CD 08 00 00 00 0F AF 45 0C DD 04 10 D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CC D9 CA D8 CD D9 C9 46 DE E3 DE C1 D9 CD 03 8D 34 FF FF FF 3B 75 18 DE C1 D9 CB DE C4 73 0B
+***** EXPENSIVE 2478 800
+
+. 2477 80553E5 11
+. D9 CA D9 CB D9 CA E9 7C FF FF FF
+
+. 2478 805536C 4
+. 85 F6 74 20
+***** EXPENSIVE 2480 801
+
+. 2479 8055370 45
+. DD 45 A8 D8 CA DD 45 A0 DD 45 A0 D9 C9 D8 CB D9 C9 DE CC D9 CA DC 4D A8 D9 C9 DE E2 DE C2 D9 C9 8B 45 18 29 F0 39 C6 0F 83 F7 03 00 00
+***** EXPENSIVE 2481 802
+
+. 2480 8055794 49
+. 8B BD F0 FE FF FF 0F AF 45 D8 8D 44 47 FF 8B 55 0C 0F AF D0 8D 04 C5 08 00 00 00 8B 7D 08 0F AF 45 0C DD 04 38 DD 04 D7 D9 C9 D9 E0 E9 F2 FB FF FF
+***** EXPENSIVE 2482 803
+
+. 2481 80553B7 46
+. D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CC D9 CA D8 CD D9 C9 46 DE E3 DE C1 D9 CD 03 8D 34 FF FF FF 3B 75 18 DE C1 D9 CB DE C4 73 0B
+***** EXPENSIVE 2483 804
+
+. 2482 80553F0 11
+. DD D8 DD D8 8B 45 E4 85 C0 74 0B
+
+. 2483 80553FB 11
+. 8B 45 D0 85 C0 0F 85 74 03 00 00
+***** EXPENSIVE 2485 805
+
+. 2484 8055406 130
+. DD 85 60 FF FF FF DD 85 58 FF FF FF 8B 55 E4 8B 4D 90 8D 04 51 8B BD 30 FF FF FF 8D 44 07 FF D9 C1 D9 C1 D9 C9 8B 55 14 0F AF D0 D8 CC D9 C9 D8 CD D9 CB 8D 04 C5 08 00 00 00 DE CD D9 C9 DE CB 8B 4D 10 0F AF 45 14 DE E1 D9 CA DE C1 D9 C9 DD 1C D1 DD 1C 08 8B 45 DC 01 C7 8B 8D 2C FF FF FF 89 BD 30 FF FF FF 8B 55 94 FF 45 D0 8B 7D 18 8D 54 11 FF 39 7D D0 89 95 2C FF FF FF 0F 82 68 FE FF FF
+***** EXPENSIVE 2486 806
+
+. 2485 80552F0 23
+. 8B 45 D0 DD 85 58 FF FF FF 85 C0 D9 C0 DD 85 60 FF FF FF D9 C2 74 30
+***** EXPENSIVE 2487 807
+
+. 2486 8055307 59
+. DD 45 A8 DD 45 A0 DD 45 A8 DD 45 A0 D9 CB DC 4D C8 D9 CA DC 4D C0 D9 C9 DC 4D C0 D9 CB DC 4D C8 D9 CA DE E1 D9 CA DE C1 D9 C9 DD 5D A8 DD 5D A0 31 F6 3B 75 18 0F 83 83 04 00 00
+***** EXPENSIVE 2488 808
+
+. 2487 805577A 23
+. 8B 85 2C FF FF FF C1 E0 04 03 45 24 DD 40 F0 DD 40 F8 E9 81 FC FF FF
+***** EXPENSIVE 2489 809
+
+. 2488 8055412 118
+. 8B 55 E4 8B 4D 90 8D 04 51 8B BD 30 FF FF FF 8D 44 07 FF D9 C1 D9 C1 D9 C9 8B 55 14 0F AF D0 D8 CC D9 C9 D8 CD D9 CB 8D 04 C5 08 00 00 00 DE CD D9 C9 DE CB 8B 4D 10 0F AF 45 14 DE E1 D9 CA DE C1 D9 C9 DD 1C D1 DD 1C 08 8B 45 DC 01 C7 8B 8D 2C FF FF FF 89 BD 30 FF FF FF 8B 55 94 FF 45 D0 8B 7D 18 8D 54 11 FF 39 7D D0 89 95 2C FF FF FF 0F 82 68 FE FF FF
+
+. 2489 8055488 39
+. 8B 45 18 8B 55 D8 FF 45 E0 8B 4D D4 01 85 28 FF FF FF 01 85 24 FF FF FF 01 95 20 FF FF FF 39 4D E0 0F 82 DA FD FF FF
+
+. 2490 80554AF 20
+. 8B 85 6C FF FF FF FF 45 E4 D1 E8 39 45 E4 0F 82 7B FD FF FF
+
+. 2491 80554C3 13
+. F7 45 D8 01 00 00 00 0F 85 77 02 00 00
+***** EXPENSIVE 2493 810
+
+. 2492 805C9E0 95
+. 8B 45 14 8B B5 4C FF FF FF 8B BD 50 FF FF FF D9 45 80 D9 85 7C FF FF FF D9 C9 0F AF 85 44 FF FF FF 8B 95 48 FF FF FF 4E 4F D9 5D C0 D9 5D BC C7 45 CC 00 00 00 00 89 45 B4 8B 4D 18 89 55 B8 89 B5 24 FF FF FF 89 BD 40 FF FF FF 8B 45 CC D9 85 7C FF FF FF 85 C0 D9 C0 D9 45 80 D9 C2 74 30
+
+. 2493 8057B1F 25
+. 8B 45 D4 40 C7 45 E0 01 00 00 00 89 45 8C D1 E8 39 45 E0 0F 83 6A 02 00 00
+
+. 2494 8057B38 19
+. C7 45 DC 00 00 00 00 8B 55 D0 39 55 DC 0F 83 46 02 00 00
+***** EXPENSIVE 2496 811
+
+. 2495 8057B4B 83
+. D9 83 4C F6 FF FF D9 83 50 F6 FF FF D9 C9 D9 5D 88 D9 5D 84 C7 85 54 FF FF FF 00 00 00 00 C7 85 50 FF FF FF 00 00 00 00 C7 85 4C FF FF FF 00 00 00 00 C7 45 CC 00 00 00 00 8B 45 18 D9 45 88 D9 45 84 D9 C9 39 45 CC D9 5D B4 D9 5D B0 0F 83 CC 01 00 00
+***** EXPENSIVE 2497 812
+
+. 2496 8057B9E 79
+. 8B 55 8C D1 EA 8B 45 E0 29 D0 40 8B BD 50 FF FF FF 8B 8D 4C FF FF FF 89 85 58 FF FF FF 0F AF 7D D4 8B 85 54 FF FF FF 89 55 A4 89 4D A0 C7 85 5C FF FF FF 00 00 00 00 89 7D AC 89 45 A8 90 8B 45 CC D9 45 84 85 C0 D9 C0 D9 45 88 D9 C2 74 30
+
+. 2497 8057C1D 11
+. 31 F6 3B 75 18 0F 83 41 04 00 00
+
+. 2498 8057C28 48
+. 8B 45 A8 0F AF 45 D4 89 85 10 FF FF FF 8B 7D D4 8B 45 E0 D1 E0 8B 55 AC D1 E7 29 85 10 FF FF FF 8D 4C 02 FF 89 BD 60 FF FF FF 89 F6 85 F6 74 20
+
+. 2499 8057C78 13
+. 8B 45 18 29 F0 39 C6 0F 83 B3 03 00 00
+***** EXPENSIVE 2501 813
+
+. 2500 8057C85 72
+. 8B 45 0C 0F AF C1 8B 55 08 D9 04 82 8D 04 8D 04 00 00 00 0F AF 45 0C D9 04 10 D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CC D9 CA D8 CD D9 C9 46 DE E3 DE C1 D9 CD 03 8D 60 FF FF FF 3B 75 18 DE C1 D9 CB DE C4 73 0B
+***** EXPENSIVE 2502 814
+
+. 2501 8057CCD 11
+. D9 CA D9 CB D9 CA E9 7C FF FF FF
+
+. 2502 8057C54 4
+. 85 F6 74 20
+***** EXPENSIVE 2504 815
+
+. 2503 8057C58 45
+. D9 45 B4 D8 CA D9 45 B0 D9 45 B0 D9 C9 D8 CB D9 C9 DE CC D9 CA D8 4D B4 D9 C9 DE E2 DE C2 D9 C9 8B 45 18 29 F0 39 C6 0F 83 B3 03 00 00
+***** EXPENSIVE 2505 816
+
+. 2504 8058038 49
+. 8B BD 10 FF FF FF 0F AF 45 D4 8D 44 47 FF 8B 55 0C 0F AF D0 8D 04 85 04 00 00 00 8B 7D 08 0F AF 45 0C D9 04 38 D9 04 97 D9 C9 D9 E0 E9 36 FC FF FF
+***** EXPENSIVE 2506 817
+
+. 2505 8057C9F 46
+. D9 C3 D9 C3 D9 C9 D8 CB D9 C9 D8 CA D9 CB D8 CC D9 CA D8 CD D9 C9 46 DE E3 DE C1 D9 CD 03 8D 60 FF FF FF 3B 75 18 DE C1 D9 CB DE C4 73 0B
+***** EXPENSIVE 2507 818
+
+. 2506 8057CD8 11
+. DD D8 DD D8 8B 45 E0 85 C0 74 0B
+
+. 2507 8057CE3 11
+. 8B 45 CC 85 C0 0F 85 33 03 00 00
+***** EXPENSIVE 2509 819
+
+. 2508 8057CEE 124
+. D9 45 88 D9 45 84 8B 7D E0 8B 55 A0 8D 04 7A 8B 8D 5C FF FF FF 8D 44 01 FF D9 C1 D9 C1 D9 C9 8B 55 14 0F AF D0 D8 CC D9 C9 D8 CD D9 CB 8D 04 85 04 00 00 00 DE CD D9 C9 DE CB 8B 7D 10 0F AF 45 14 DE E1 D9 CA DE C1 D9 C9 D9 1C 97 D9 1C 38 8B 45 D8 01 C1 89 8D 5C FF FF FF 8B 55 A4 8B 8D 58 FF FF FF FF 45 CC 8B 7D 18 8D 54 11 FF 39 7D CC 89 95 58 FF FF FF 0F 82 72 FE FF FF
+***** EXPENSIVE 2510 820
+
+. 2509 8057BDC 17
+. 8B 45 CC D9 45 84 85 C0 D9 C0 D9 45 88 D9 C2 74 30
+***** EXPENSIVE 2511 821
+
+. 2510 8057BED 59
+. D9 45 B4 D9 45 B0 D9 45 B4 D9 45 B0 D9 CB D8 4D C8 D9 CA D8 4D C4 D9 C9 D8 4D C4 D9 CB D8 4D C8 D9 CA DE E1 D9 CA DE C1 D9 C9 D9 5D B4 D9 5D B0 31 F6 3B 75 18 0F 83 41 04 00 00
+***** EXPENSIVE 2512 822
+
+. 2511 8058021 23
+. 8B 95 58 FF FF FF 8B 4D 24 8D 04 D1 D9 40 F8 D9 40 FC E9 BC FC FF FF
+***** EXPENSIVE 2513 823
+
+. 2512 8057CF4 118
+. 8B 7D E0 8B 55 A0 8D 04 7A 8B 8D 5C FF FF FF 8D 44 01 FF D9 C1 D9 C1 D9 C9 8B 55 14 0F AF D0 D8 CC D9 C9 D8 CD D9 CB 8D 04 85 04 00 00 00 DE CD D9 C9 DE CB 8B 7D 10 0F AF 45 14 DE E1 D9 CA DE C1 D9 C9 D9 1C 97 D9 1C 38 8B 45 D8 01 C1 89 8D 5C FF FF FF 8B 55 A4 8B 8D 58 FF FF FF FF 45 CC 8B 7D 18 8D 54 11 FF 39 7D CC 89 95 58 FF FF FF 0F 82 72 FE FF FF
+
+. 2513 8057D6A 39
+. 8B 45 18 8B 55 D4 FF 45 DC 8B 4D D0 01 85 54 FF FF FF 01 85 50 FF FF FF 01 95 4C FF FF FF 39 4D DC 0F 82 EC FD FF FF
+
+. 2514 8057D91 17
+. 8B 45 8C FF 45 E0 D1 E8 39 45 E0 0F 82 96 FD FF FF
+
+. 2515 8057DA2 13
+. F7 45 D4 01 00 00 00 0F 85 42 02 00 00
+***** EXPENSIVE 2517 824
+
+. 2516 8059ADC 206
+. 8B 95 CC FE FF FF 03 55 E4 8B 75 E4 8D 0C 16 8D 34 0E 8B 7D E4 01 F7 8B 45 0C 89 BD 9C FE FF FF 0F AF 85 CC FE FF FF 8B 7D 08 DD 04 C7 8B 85 CC FE FF FF 8D 04 C5 08 00 00 00 0F AF 45 0C DD 9D 78 FF FF FF DD 04 38 8B 45 0C 0F AF C2 DD 04 C7 8B 45 0C 0F AF C1 DD 04 C7 8B 45 0C 0F AF C6 DD 04 C7 8B 45 0C 0F AF 85 9C FE FF FF 8D 14 D5 08 00 00 00 8D 0C CD 08 00 00 00 8D 34 F5 08 00 00 00 DD 04 C7 0F AF 55 0C 0F AF 4D 0C 0F AF 75 0C 8B 85 9C FE FF FF DD 04 3A DD 04 39 D9 CE DD 9D 70 FF FF FF DD 04 3E 8D 3C C5 08 00 00 00 0F AF 7D 0C 8B 55 08 DD 9D 68 FF FF FF DD 04 17 D9 CA DD 9D 60 FF FF FF D9 C9 DD 9D 58 FF FF FF
+***** EXPENSIVE 2518 825
+
+. 2517 8059BAA 152
+. DD 45 B8 DD 45 B0 D9 C9 D8 CD D9 C9 D8 CA DE E9 DD 9D 50 FF FF FF DD 45 A8 D8 CB DD 9D A8 FE FF FF DD 45 A0 D8 CD DC AD A8 FE FF FF D9 C9 DC 4D B8 D9 C9 DD 9D A8 FE FF FF D9 CB DC 4D B0 DD 45 98 DD 45 90 DC 8D 68 FF FF FF D9 CA DE C5 D8 CA D9 CC DD 9D 48 FF FF FF DE EB DD 45 88 DD 45 80 D9 CB DC 4D A0 D9 CB DC 8D 58 FF FF FF D9 CD DC 4D A8 D9 C9 DC 8D 60 FF FF FF D9 C9 DE C3 DD 45 98 D9 C9 DE E5 DD 45 80 DD 45 88 DD 85 A8 FE FF FF D9 CC DC 4D 90 D9 CA
+***** EXPENSIVE 2519 826
+
+. 2518 8059C42 180
+. DC 8D 60 FF FF FF D9 CB DC 8D 68 FF FF FF D9 C9 DC 8D 58 FF FF FF D9 CC D8 C6 D9 C9 DE C2 D9 CB DE C2 D9 CA DD 9D 40 FF FF FF DD 85 50 FF FF FF D9 C3 D9 C9 D8 C6 DD 85 48 FF FF FF D9 CA D8 C4 D9 CF DC AD 50 FF FF FF D9 CA D8 C3 D9 CF DD 9D 38 FF FF FF D9 C9 DD 9D 30 FF FF FF D9 C9 DC AD 48 FF FF FF DD 9D 28 FF FF FF DD 85 40 FF FF FF D8 C1 DD 9D 20 FF FF FF DD 85 38 FF FF FF D8 C5 DD 85 A8 FE FF FF D9 C9 DD 9D 18 FF FF FF DE E4 D9 CA DE E1 DD 85 E8 FE FF FF D9 FA DC 8D E0 FE FF FF D9 CA DC A5 40 FF FF FF DE CA DD 85 E8 FE FF FF D9 FA
+***** EXPENSIVE 2520 827
+
+. 2519 8059CF6 169
+. D9 CC DC A5 38 FF FF FF D9 CC DC 8D E0 FE FF FF DE CC DD 85 20 FF FF FF DC 8D E0 FE FF FF DC AD 78 FF FF FF DD 85 18 FF FF FF DC 8D E0 FE FF FF D9 C1 D9 C9 DC AD 70 FF FF FF D9 C9 D8 C4 DD 9D 10 FF FF FF D9 C9 DE E3 D9 C0 DD 45 D0 D9 C9 D8 C6 D9 CA DE E6 D9 E0 DD 85 30 FF FF FF DD 45 C8 D8 CE D9 C9 D8 CA DE E1 DD 45 C8 D8 CC D9 CA DC 8D 28 FF FF FF DE E2 DD 45 C8 D9 CB DD 9D 08 FF FF FF D9 CA D9 E0 DD 85 30 FF FF FF D9 CE DC 4D D0 D9 CE D8 C9 DE C6 DD 85 78 FF FF FF DC 85 20 FF FF FF DD 9D 78 FF FF FF
+***** EXPENSIVE 2521 828
+
+. 2520 8059D9F 207
+. DD 85 70 FF FF FF DC 85 18 FF FF FF DD 9D 70 FF FF FF DD 85 10 FF FF FF D9 CC DC 4D D0 D9 CC D8 E2 D9 C9 DC 8D 28 FF FF FF DE C4 DD 9D A8 FE FF FF DD 85 08 FF FF FF D9 C4 D8 E4 D9 CA DC 85 10 FF FF FF D9 C9 D8 C3 D9 CB DC AD 08 FF FF FF D9 CA DD 9D 00 FF FF FF DD 95 10 FF FF FF D9 C9 DD 9D 08 FF FF FF D9 CB 8B 95 D4 FE FF FF 03 95 F4 FE FF FF 8B 85 F4 FE FF FF 8B 4D DC 01 D0 2B 4D EC 8B B5 D0 FE FF FF 89 85 98 FE FF FF 8D 4C 4E FF 8B 85 F4 FE FF FF 8B 75 14 8D 3C 08 0F AF B5 D4 FE FF FF 8B 45 10 DE C2 DD 85 78 FF FF FF DD 1C F0 8B 85 D4 FE FF FF 8D 04 C5 08 00 00 00 8B 75 10 0F AF 45 14 DD 85 70 FF FF FF DD 1C 30
+***** EXPENSIVE 2522 829
+
+. 2521 8059E6E 189
+. 8B 45 14 0F AF C2 DD 85 A8 FE FF FF DD 1C C6 D9 C4 8B 45 14 8D 14 D5 08 00 00 00 0F AF 55 14 0F AF 85 98 FE FF FF D8 C4 D9 CD DE E4 DD 85 00 FF FF FF D9 C9 DD 1C 32 DD 1C C6 D9 CB 8B 85 98 FE FF FF 8D 34 C5 08 00 00 00 8B 45 14 8B 55 10 0F AF C1 0F AF 75 14 8D 0C CD 08 00 00 00 0F AF 4D 14 DD 1C 16 DD 1C C2 80 B5 0F FF FF FF 80 8B 45 14 0F AF C7 8D 3C FD 08 00 00 00 0F AF 7D 14 DD 85 08 FF FF FF D9 C9 D9 E0 D9 C9 DD 1C 11 D9 C9 FF 45 E8 DD 1C C2 8B 4D E0 8B 45 18 DD 1C 17 8B 55 DC 01 85 D4 FE FF FF 01 85 D0 FE FF FF 01 95 CC FE FF FF 39 4D E8 0F 82 B1 FB FF FF
+***** EXPENSIVE 2523 830
+
+. 2522 8054ED8 159
+. 8B B5 D4 FE FF FF 8B 45 0C 8B 7D 08 0F AF C6 DD 04 C7 8D 04 F5 08 00 00 00 8B 4D D0 0F AF 45 0C 8D 14 4E DD 04 38 D9 C9 8B 45 0C 0F AF C2 D8 C0 DD 04 C7 8D 0C 8E D8 C0 D9 C1 8D 14 D5 08 00 00 00 0F AF 55 0C 0F AF 4D 0C D8 C1 DD 85 E8 FE FF FF DD 04 3A DD 04 CF D9 CA D8 CB D9 CD DD 9D A8 FE FF FF D9 CD D8 C0 D9 CD D8 C0 D9 CC D8 E1 DD 85 F0 FE FF FF D9 FA D9 CC DC AD A8 FE FF FF D9 CC DC 8D E8 FE FF FF D8 CC D9 CC DD 9D A8 FE FF FF DD 45 E0 DD 45 D8 D9 C9 D8 CE D9 C9 D8 CF
+***** EXPENSIVE 2524 831
+
+. 2523 8054F77 140
+. 8B 95 E4 FE FF FF DE C1 03 55 D4 8B 45 D4 DD 45 E0 8D 0C 10 DE CF D9 CD DC 4D D8 DE EE 8D 34 08 D9 C3 01 F0 D8 C1 D9 CC 8B 7D 14 0F AF BD E4 FE FF FF DE E1 89 85 A4 FE FF FF D9 C3 D9 CB 8B 45 10 DE C2 D9 C9 DD 1C F8 0F AF 55 14 D9 C0 D9 CA D8 E4 D9 C9 D9 E0 D9 CB D9 E0 D9 C9 8B BD A4 FE FF FF DD 1C D0 D9 C9 0F AF 4D 14 0F AF 75 14 0F AF 7D 14 8B 55 D0 D8 E4 D9 CA DE E4 DE E2 DD 1C C8 D9 C9 FF 45 E8 DD 1C F0 8B 4D CC
+***** EXPENSIVE 2525 832
+
+. 2524 8055003 27
+. DD 1C F8 8D 04 92 01 95 E4 FE FF FF 01 85 D4 FE FF FF 39 4D E8 0F 82 BA FE FF FF
+***** EXPENSIVE 2526 833
+
+. 2525 805C19C 210
+. 8B 95 9C FE FF FF 03 55 E0 8B 75 E0 8D 0C 16 8D 34 0E 8B 7D E0 01 F7 8B 45 0C 89 BD 7C FE FF FF 0F AF 85 9C FE FF FF 8B 7D 08 D9 04 87 8B 85 9C FE FF FF 8D 04 85 04 00 00 00 0F AF 45 0C D9 9D 78 FF FF FF D9 04 38 8B 45 0C 0F AF C2 D9 9D 74 FF FF FF D9 04 87 8B 45 0C 0F AF C1 D9 04 87 8B 45 0C 0F AF C6 D9 9D 6C FF FF FF 8D 0C 8D 04 00 00 00 D9 04 87 8B 45 0C 0F AF 85 7C FE FF FF 0F AF 4D 0C 8D 14 95 04 00 00 00 8D 34 B5 04 00 00 00 D9 04 87 D9 CA 0F AF 55 0C D9 9D 70 FF FF FF 0F AF 75 0C D9 04 39 8B 85 7C FE FF FF D9 04 3A D9 C9 D9 9D 68 FF FF FF D9 04 3E 8D 3C 85 04 00 00 00 0F AF 7D 0C 8B 55 08 D9 9D 60 FF FF FF D9 04 17
+***** EXPENSIVE 2527 834
+
+. 2526 805C26E 185
+. D9 CA D9 9D 64 FF FF FF D9 C9 D9 9D 5C FF FF FF D9 45 98 D9 45 94 D9 C9 D8 8D 70 FF FF FF D9 C9 D8 CA DE E9 D9 45 90 D8 8D 6C FF FF FF D9 9D 58 FF FF FF D9 45 8C D9 45 94 D9 C9 D8 8D 68 FF FF FF D9 C9 D8 8D 70 FF FF FF D9 C9 D8 AD 58 FF FF FF D9 CB D8 4D 98 DE C1 D9 CA D9 9D 58 FF FF FF D9 45 90 D9 45 8C D8 8D 6C FF FF FF D9 C9 D8 8D 68 FF FF FF DE C1 D9 9D 54 FF FF FF D9 45 84 D9 45 88 D8 8D 64 FF FF FF D9 C9 D8 8D 60 FF FF FF DE E9 D9 9D 50 FF FF FF D9 45 84 D9 45 88 D9 C9 D8 8D 64 FF FF FF D9 C9 D8 8D 60 FF FF FF DE C1 D9 9D 4C FF FF FF D9 45 80
+***** EXPENSIVE 2528 835
+
+. 2527 805C327 209
+. D8 CB D9 9D 48 FF FF FF D9 85 7C FF FF FF D8 8D 5C FF FF FF D8 AD 48 FF FF FF D9 9D 48 FF FF FF D9 45 80 D9 85 48 FF FF FF D8 C2 D9 C9 D8 8D 5C FF FF FF D9 CC D8 8D 7C FF FF FF DE C4 D9 9D 44 FF FF FF D9 C1 D8 C3 D9 CA DE E3 D9 85 54 FF FF FF D9 CA D9 9D 40 FF FF FF D9 C9 D8 A5 4C FF FF FF D9 85 58 FF FF FF D9 85 54 FF FF FF D9 C9 D8 85 50 FF FF FF D9 C9 D8 85 4C FF FF FF D9 CA D9 9D 34 FF FF FF D9 85 44 FF FF FF D9 85 40 FF FF FF D8 C3 D9 CC D8 A5 48 FF FF FF D9 85 58 FF FF FF D9 CA D8 C3 D9 CD D9 9D 2C FF FF FF D9 95 3C FF FF FF D9 CD D9 95 38 FF FF FF D9 C9 D8 A5 50 FF FF FF D9 CC D9 9D 30 FF FF FF DD 85 B8 FE FF FF
+***** EXPENSIVE 2529 836
+
+. 2528 805C3F8 224
+. D9 FA D9 CA D8 AD 44 FF FF FF D9 CA DC 8D B0 FE FF FF D8 CA D9 5D EC D9 C9 D9 9D 44 FF FF FF D9 45 EC DD 85 B8 FE FF FF D9 FA D9 CB D8 AD 40 FF FF FF D9 CB DC 8D B0 FE FF FF D8 CB D9 9D 28 FF FF FF DD 85 B0 FE FF FF D8 8D 30 FF FF FF D8 AD 78 FF FF FF D9 9D 24 FF FF FF DD 85 B0 FE FF FF D8 8D 2C FF FF FF D8 AD 74 FF FF FF D9 9D 20 FF FF FF D9 85 24 FF FF FF D8 C1 D9 9D 1C FF FF FF D9 85 20 FF FF FF D8 85 28 FF FF FF D9 9D 18 FF FF FF D9 45 D4 D9 E0 D9 95 08 FF FF FF D8 CD D9 9D 0C FF FF FF D8 AD 24 FF FF FF D9 45 D0 D9 C9 D9 9D 14 FF FF FF D8 CB D9 85 20 FF FF FF D8 A5 28 FF FF FF D9 C9 D8 AD 0C FF FF FF D9 C9 D9 9D 10 FF FF FF D9 9D 0C FF FF FF D9 85 08 FF FF FF
+***** EXPENSIVE 2530 837
+
+. 2529 805C4D8 209
+. D9 45 D0 D9 C9 D8 CA D9 C9 D8 8D 34 FF FF FF DE E9 D9 9D 04 FF FF FF D9 45 D0 D9 E0 DC CC D9 CB D8 4D D4 DE C4 D9 C9 D9 9D 40 FF FF FF D9 CA D9 95 00 FF FF FF D9 45 D4 D8 8D 34 FF FF FF D9 CA DE CB D9 CA DE C1 D9 85 78 FF FF FF D8 85 30 FF FF FF D9 9D 78 FF FF FF D9 85 74 FF FF FF D8 85 2C FF FF FF D9 9D 74 FF FF FF D9 85 1C FF FF FF D8 A5 04 FF FF FF D9 9D FC FE FF FF D9 85 18 FF FF FF D8 85 0C FF FF FF D9 9D F8 FE FF FF D9 85 14 FF FF FF D8 E1 D9 C9 D8 85 14 FF FF FF D9 9D 14 FF FF FF 8B 95 A4 FE FF FF D9 85 10 FF FF FF D9 C9 03 95 EC FE FF FF D9 9D F4 FE FF FF D8 E1 D9 85 10 FF FF FF 8B 85 EC FE FF FF 8B 4D D8 01 D0
+***** EXPENSIVE 2531 838
+
+. 2530 805C5A9 229
+. 2B 4D E8 8B B5 A0 FE FF FF DE C2 D9 9D 10 FF FF FF D9 85 18 FF FF FF 8D 4C 4E FF D8 A5 0C FF FF FF D9 C9 D9 9D F0 FE FF FF 89 85 78 FE FF FF D9 85 1C FF FF FF 8B 85 EC FE FF FF 8B 75 14 8D 3C 08 D8 85 04 FF FF FF D9 C9 0F AF B5 A4 FE FF FF D9 9D 18 FF FF FF 8B 45 10 D9 85 78 FF FF FF D9 C9 D9 9D 1C FF FF FF D9 1C B0 8B 85 A4 FE FF FF 8D 04 85 04 00 00 00 8B 75 10 0F AF 45 14 D9 85 74 FF FF FF D9 1C 30 8B 45 14 0F AF C2 D9 85 FC FE FF FF D9 1C 86 8D 14 95 04 00 00 00 8B 45 14 0F AF 55 14 0F AF 85 78 FE FF FF D9 85 F8 FE FF FF D9 85 F4 FE FF FF D9 C9 D9 1C 32 D9 1C 86 8B 85 78 FE FF FF 8D 34 85 04 00 00 00 8B 45 14 8B 55 10 0F AF C1 0F AF 75 14 D9 85 F0 FE FF FF D9 85 1C FF FF FF
+***** EXPENSIVE 2532 839
+
+. 2531 805C68E 118
+. D9 C9 D9 1C 16 80 B5 1B FF FF FF 80 D9 1C 82 8D 0C 8D 04 00 00 00 8B 45 14 0F AF 4D 14 0F AF C7 D9 85 18 FF FF FF 80 B5 13 FF FF FF 80 8D 3C BD 04 00 00 00 D9 1C 11 D9 85 14 FF FF FF 0F AF 7D 14 D9 85 10 FF FF FF D9 C9 D9 1C 82 D9 1C 17 8B 45 18 8B 55 D8 FF 45 E4 8B 4D DC 01 85 A4 FE FF FF 01 85 A0 FE FF FF 01 95 9C FE FF FF 39 4D E4 0F 82 98 FA FF FF
+***** EXPENSIVE 2533 840
+
+. 2532 80577A8 175
+. 8B B5 D4 FE FF FF 8B 45 0C 8B 7D 08 0F AF C6 D9 04 87 8D 04 B5 04 00 00 00 8B 4D D4 0F AF 45 0C 8D 14 4E D9 04 38 8B 45 0C 0F AF C2 D9 04 87 D9 CA D8 C0 D9 CA D8 C0 D9 CA 8D 0C 8E D9 95 18 FF FF FF D8 C2 D9 9D 0C FF FF FF 0F AF 4D 0C DD 85 E8 FE FF FF 8D 14 95 04 00 00 00 0F AF 55 0C D9 04 8F D9 C9 D8 8D 0C FF FF FF D8 E1 D9 04 3A D9 C9 D9 9D 08 FF FF FF D9 CA D8 C0 D9 CA D8 C0 DD 85 F0 FE FF FF D9 FA D9 CC D8 AD 18 FF FF FF D9 CC DC 8D E8 FE FF FF D8 CC D9 9D 04 FF FF FF D9 CB D9 9D 18 FF FF FF D9 45 E0 D9 45 DC D9 C9
+***** EXPENSIVE 2534 841
+
+. 2533 8057857 172
+. D8 CC D9 C9 D8 CB 8B 95 E4 FE FF FF 03 55 D8 8B 45 D8 DE C1 D9 45 E0 8D 0C 10 DE CB D9 85 04 FF FF FF D9 CC D8 4D DC D9 CC D8 A5 08 FF FF FF 8D 34 08 D9 9D 00 FF FF FF D9 CA 01 F0 DE E3 D9 85 0C FF FF FF D9 85 04 FF FF FF 8B 7D 14 0F AF BD E4 FE FF FF D8 85 08 FF FF FF D9 C9 89 85 BC FE FF FF DE C2 D9 C9 8B 45 10 D9 1C B8 D9 85 00 FF FF FF D9 C1 80 B5 03 FF FF FF 80 8B BD BC FE FF FF D9 E0 D9 C9 0F AF 55 14 0F AF 4D 14 0F AF 75 14 D8 E4 D9 CC 0F AF 7D 14 D8 AD 00 FF FF FF D9 CA D8 E3 D9 C9 DE E3 D9 1C 90 D9 CA
+***** EXPENSIVE 2535 842
+
+. 2534 8057903 44
+. D9 1C 88 D9 C9 D9 1C B0 D9 1C B8 8B 55 D4 8D 04 92 FF 45 E4 8B 4D D0 01 95 E4 FE FF FF 01 85 D4 FE FF FF 39 4D E4 0F 82 79 FE FF FF
+***** EXPENSIVE 2536 843
+vex iropt: not unrolling (79 sts)
+
+. 2535 8058E64 101
+. 8B 45 EC 8D 34 38 8B 45 0C 0F AF C7 8B 55 08 8B 4D 14 DD 04 C2 0F AF 4D DC 8B 55 DC 8B 45 10 DD 1C C8 8D 0C D5 08 00 00 00 0F AF 4D 14 0F AF 75 0C 89 4D CC 8B 4D 08 8B 54 F1 04 8B 04 F1 81 F2 00 00 00 80 8B 75 10 8B 4D CC 89 04 31 89 54 31 04 FF 45 F0 8B 75 18 8B 45 E8 01 75 DC 03 7D E4 39 45 F0 72 9B
+***** EXPENSIVE 2537 844
+vex iropt: not unrolling (75 sts)
+
+. 2536 805B4AC 91
+. 8B 45 EC 8B 55 14 0F AF D7 8D 0C 30 8B 45 0C 89 55 D0 0F AF C6 8B 55 08 D9 04 82 8B 45 D0 8B 55 10 D9 1C 82 8D 14 BD 04 00 00 00 0F AF 55 14 0F AF 4D 0C 89 55 D0 8B 55 08 8B 04 8A 35 00 00 00 80 8B 4D 10 8B 55 D0 89 04 0A FF 45 F0 8B 4D E8 03 7D 18 03 75 E4 39 4D F0 72 A5
+***** EXPENSIVE 2538 845
+
+. 2537 8054A58 206
+. 8B 7D 84 8B B5 D0 FE FF FF 8D 44 37 FF 8B 55 80 8D 0C 02 01 CA 29 FE 8B 7D 80 89 95 A4 FE FF FF 8D 74 37 FF 8B 55 0C 01 F7 0F AF D0 8D 04 C5 08 00 00 00 89 BD 7C FF FF FF 0F AF 45 0C 8B 7D 08 DD 04 38 8B 45 0C 0F AF C1 8D 0C CD 08 00 00 00 DD 04 C7 0F AF 4D 0C 8B 45 0C DD 04 39 0F AF 85 A4 FE FF FF DD 9D 58 FF FF FF DD 04 C7 8B 85 A4 FE FF FF DD 04 D7 8D 3C C5 08 00 00 00 8B 45 0C 8B 55 08 0F AF 85 7C FF FF FF 0F AF 7D 0C DD 9D 70 FF FF FF D9 CA DD 9D 68 FF FF FF DD 04 17 DD 04 C2 8B 95 7C FF FF FF 8D 04 D5 08 00 00 00 8B 4D 08 0F AF 45 0C DD 04 08 D9 CA 8B 45 0C 0F AF C6 8D 34 F5 08 00 00 00 DD 9D B8 FE FF FF
+***** EXPENSIVE 2539 846
+
+. 2538 8054B26 176
+. D9 CA 0F AF 75 0C DD 9D 60 FF FF FF D9 E0 DD 04 0E DD 85 B8 FE FF FF D9 CB DD 9D 50 FF FF FF DD 04 C1 DD 85 60 FF FF FF DD 85 58 FF FF FF D9 CD D8 C4 D9 CB D9 E0 D9 C9 D8 C2 D9 CD D8 C1 DD 85 50 FF FF FF D9 CC DD 9D 48 FF FF FF D9 CA DC AD 60 FF FF FF D9 C9 DC AD 58 FF FF FF D9 CB D8 C6 D9 C9 DD 9D 40 FF FF FF D9 CA DD 9D 38 FF FF FF DD 85 48 FF FF FF D9 C4 D8 C3 DD 85 B8 FE FF FF D9 CA D8 C3 D9 C9 DD 9D 30 FF FF FF D9 CE DC A5 50 FF FF FF D9 C9 DE E4 D9 CD DD 95 28 FF FF FF DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CD
+***** EXPENSIVE 2540 847
+
+. 2539 8054BD6 163
+. DE E3 D9 CC DE CA DD 85 F0 FE FF FF D9 FA D9 C9 DC A5 48 FF FF FF D9 C9 DC 8D E8 FE FF FF DE C9 DD 85 30 FF FF FF DC 8D E8 FE FF FF DC AD 70 FF FF FF D9 CC DC 8D E8 FE FF FF D9 C4 D9 C9 DC AD 68 FF FF FF D9 C9 D8 C3 DD 9D 20 FF FF FF D9 C0 D8 C2 D9 C9 DE E2 DD 45 E0 DD 45 D8 D8 CF D9 C9 DC 8D 40 FF FF FF D9 CA DD 9D 18 FF FF FF DE C1 DD 45 E0 DD 45 D8 D8 CD D9 C9 DC 8D 38 FF FF FF DE C1 DD 45 D8 D9 CD DC 4D E0 D9 CD DC 8D 38 FF FF FF DE E5 D9 CD DE E3 DD 85 68 FF FF FF DD 45 D8 D9 CC
+***** EXPENSIVE 2541 848
+
+. 2540 8054C79 191
+. DD 95 B0 FE FF FF D9 CF DC 4D E0 D9 C9 DC 85 28 FF FF FF D9 CF D8 E5 D9 CC DC 8D 40 FF FF FF DE E1 D9 CE DD 9D 68 FF FF FF DD 85 70 FF FF FF DD 85 20 FF FF FF D9 CC DD 9D 10 FF FF FF D9 C2 D9 C9 DC 85 30 FF FF FF D9 CC D8 E6 D9 C9 D8 C7 D9 CE DC 85 20 FF FF FF D9 CC DD 9D 70 FF FF FF D9 CD DD 9D 08 FF FF FF DD 85 18 FF FF FF DD 85 B0 FE FF FF D9 CC DD 9D 00 FF FF FF D9 CB 8B 95 E0 FE FF FF 03 55 D4 8B 45 D4 8D 0C 10 8D 34 08 01 F0 8B 7D 14 0F AF BD E0 FE FF FF DE C4 89 85 A4 FE FF FF DD 85 70 FF FF FF 8B 45 10 DD 1C F8 8B 85 E0 FE FF FF 8D 04 C5 08 00 00 00 8B 7D 10
+***** EXPENSIVE 2542 849
+
+. 2541 8054D38 159
+. 0F AF 45 14 DD 85 68 FF FF FF DD 1C 38 D9 C9 DE E5 DC C1 DD 45 C0 DD 45 B8 D8 CB D9 C9 8B 45 14 D8 CD 0F AF C2 DE E1 DD 1C C7 DD 45 A8 DD 45 B0 D9 C9 DC 8D 08 FF FF FF D9 CB DC 4D C0 D9 CD DC 4D B8 D9 C9 8B 45 14 DC 8D 10 FF FF FF 8D 14 D5 08 00 00 00 0F AF 55 14 0F AF C1 DE E3 DE C4 D9 CB DD 1C 3A DD 1C C7 DD 45 A8 DD 45 B0 DC 8D 08 FF FF FF D9 C9 DC 8D 10 FF FF FF 8D 0C CD 08 00 00 00 0F AF 4D 14 DE C1 DD 85 18 FF FF FF D9 C9 DD 1C 39 DD 45 A0 DD 45 98 D8 CD D9 C9 D8 CB
+***** EXPENSIVE 2543 850
+
+. 2542 8054DD7 146
+. 8B 45 14 0F AF C6 DE E1 DD 1C C7 DE E2 DD 45 90 DD 45 88 D8 CB D9 CC 8D 34 F5 08 00 00 00 DC 4D A0 D9 CA DC 4D 98 D9 C9 8B 45 14 DC 8D 00 FF FF FF 0F AF 85 A4 FE FF FF 0F AF 75 14 DE E4 DE C1 DD 1C 3E D9 C9 DD 1C C7 8B 85 A4 FE FF FF DD 45 88 D9 C9 8D 3C C5 08 00 00 00 DC 4D 90 D9 C9 DC 8D 00 FF FF FF 0F AF 7D 14 8B 45 10 8B 55 D0 DE C1 DD 1C 07 FF 45 E8 8D 04 92 8B 4D CC 01 95 E0 FE FF FF 01 85 D0 FE FF FF 39 4D E8 0F 82 EF FB FF FF
+***** EXPENSIVE 2544 851
+
+. 2543 80572C4 196
+. 8B 8D D0 FE FF FF 8B 75 90 8D 44 0E FF 8B 7D 8C 8D 0C 07 8B B5 D0 FE FF FF 01 CF 8B 55 8C 2B 75 90 89 BD BC FE FF FF 8D 74 32 FF 8B 7D 0C 01 F2 0F AF F8 8D 04 85 04 00 00 00 89 55 88 0F AF 45 0C 8B 55 08 D9 04 10 8B 45 0C 0F AF C1 D9 04 82 D9 C9 8B 45 0C 0F AF 85 BC FE FF FF D9 5D 80 8D 0C 8D 04 00 00 00 D9 04 82 8B 85 BC FE FF FF D9 04 BA 0F AF 4D 0C 8D 3C 85 04 00 00 00 8B 45 0C 0F AF 45 88 D9 5D 84 0F AF 7D 0C D9 04 11 D9 9D 78 FF FF FF D9 9D 74 FF FF FF D9 04 17 D9 04 82 D9 C9 8B 55 88 8D 04 95 04 00 00 00 8B 4D 08 0F AF 45 0C D9 9D 70 FF FF FF D9 04 08 D9 CA D9 9D 7C FF FF FF
+***** EXPENSIVE 2545 852
+
+. 2544 8057388 196
+. D9 C9 D9 9D 6C FF FF FF 8B 45 0C 0F AF C6 D9 04 81 D9 85 7C FF FF FF D9 C9 8D 34 B5 04 00 00 00 D9 95 68 FF FF FF DE C1 0F AF 75 0C D9 85 74 FF FF FF D8 C2 D9 04 0E D9 C9 D9 9D 64 FF FF FF D9 E0 D9 85 78 FF FF FF D9 85 7C FF FF FF D9 C9 80 B5 6F FF FF FF 80 D8 C2 D9 C9 D8 A5 68 FF FF FF D9 CA D8 AD 78 FF FF FF D9 CC D8 AD 74 FF FF FF D9 85 70 FF FF FF D8 85 6C FF FF FF D9 CB D9 9D 60 FF FF FF D9 CC D9 9D 5C FF FF FF D9 CB D9 9D 58 FF FF FF D9 85 70 FF FF FF D9 85 64 FF FF FF D9 C4 D9 CA D8 A5 6C FF FF FF D9 C9 D8 C4 D9 CA D8 C3 D9 C9 D9 9D 54 FF FF FF D9 C9 D9 95 50 FF FF FF D9 C9
+***** EXPENSIVE 2546 853
+
+. 2545 805744C 190
+. D9 9D 4C FF FF FF DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CB D8 A5 64 FF FF FF DE CB D9 CA D9 5D EC D9 45 EC DD 85 F0 FE FF FF D9 FA DC 8D E8 FE FF FF D9 CC DE E2 D9 CB DE C9 D9 5D EC DD 85 E8 FE FF FF DE C9 D8 6D 84 D9 9D 48 FF FF FF DD 85 E8 FE FF FF D8 8D 4C FF FF FF D8 6D 80 D9 9D 44 FF FF FF D9 85 48 FF FF FF D8 C1 D9 C9 D8 AD 48 FF FF FF D9 C9 D9 9D 40 FF FF FF D9 9D 38 FF FF FF D9 45 DC D9 45 E0 D8 8D 60 FF FF FF D9 C9 D8 8D 58 FF FF FF DE C1 D9 45 EC D9 85 44 FF FF FF D9 CA D9 9D 30 FF FF FF D9 45 E0 D9 45 DC D9 CB D8 C2 D9 CB D8 8D 54 FF FF FF D9 CA
+***** EXPENSIVE 2547 854
+
+. 2546 805750A 189
+. D8 AD 44 FF FF FF D9 C9 D8 8D 5C FF FF FF DE C2 D9 9D 34 FF FF FF D9 45 E0 D9 45 DC D9 C9 D8 8D 58 FF FF FF D9 C9 D8 8D 60 FF FF FF D9 45 84 D9 C9 DE E2 D9 CB D9 9D 3C FF FF FF D9 45 DC D9 45 E0 D9 CC D8 85 50 FF FF FF D9 CC D8 8D 54 FF FF FF D9 C9 D8 8D 5C FF FF FF D9 CC D9 5D 84 8B 95 E0 FE FF FF 03 55 D8 8B 45 D8 8D 0C 10 8D 34 08 DE EB D9 85 38 FF FF FF 01 F0 D8 E3 D9 CB 8B 7D 14 D8 85 38 FF FF FF 0F AF BD E0 FE FF FF D9 45 80 D9 C9 D9 9D 24 FF FF FF 89 85 BC FE FF FF D9 45 84 8B 45 10 D9 1C B8 D8 85 4C FF FF FF 8B 85 E0 FE FF FF D9 5D 80 D9 85 34 FF FF FF
+***** EXPENSIVE 2548 855
+
+. 2547 80575C7 164
+. 8D 04 85 04 00 00 00 8B 7D 10 0F AF 45 14 D8 C1 D9 45 80 D9 CA D8 AD 34 FF FF FF D9 85 40 FF FF FF D9 85 3C FF FF FF D9 C9 D8 E5 D9 C9 D8 85 30 FF FF FF D9 CC D9 1C 38 D9 C9 D9 9D 20 FF FF FF D9 45 B0 D9 45 AC D9 CD D8 85 40 FF FF FF D9 CD D8 CC D9 C9 8B 45 14 D8 CA 0F AF C2 DE E1 D9 CC D9 9D 1C FF FF FF D9 85 3C FF FF FF D8 A5 30 FF FF FF D9 CC D9 1C 87 D9 45 B0 DE CB D9 45 AC DE C9 8D 14 95 04 00 00 00 0F AF 55 14 DE C2 D9 C9 D9 1C 3A D9 45 A4 D9 45 A8 D9 C9 D8 CA D9 C9 8B 45 14 D8 CC
+***** EXPENSIVE 2549 856
+
+. 2548 805766B 171
+. 0F AF C1 DE E1 D9 1C 87 D9 45 9C D9 45 A0 D9 C9 D8 8D 20 FF FF FF D9 CA D8 4D A8 D9 CC D8 4D A4 D9 C9 8B 45 14 D8 8D 24 FF FF FF 8D 0C 8D 04 00 00 00 0F AF 4D 14 0F AF C6 DE E2 DE C3 D9 CA D9 1C 39 D9 45 A0 D9 CA D9 1C 87 D9 45 9C D8 8D 24 FF FF FF D9 CA D8 8D 20 FF FF FF DE C2 D9 45 94 D9 45 98 8B 45 14 D8 8D 1C FF FF FF D9 C9 D8 CA 8D 34 B5 04 00 00 00 0F AF 85 BC FE FF FF 0F AF 75 14 DE E9 D9 CA D9 1C 3E D9 C9 D9 1C 87 8B 85 BC FE FF FF D9 45 94 D9 C9 8D 3C 85 04 00 00 00 D8 4D 98 D9 C9 D8 8D 1C FF FF FF
+***** EXPENSIVE 2550 857
+
+. 2549 8057716 45
+. 0F AF 7D 14 8B 45 10 DE C1 D9 1C 07 8B 55 D4 8D 04 92 FF 45 E4 8B 4D D0 01 95 E0 FE FF FF 01 85 D0 FE FF FF 39 4D E4 0F 82 81 FB FF FF
+***** EXPENSIVE 2551 858
+
+. 2550 805A620 45
+. C7 45 D0 00 00 00 00 8B 45 18 DD 85 40 FF FF FF DD 85 38 FF FF FF D9 C9 39 45 D0 DD 9D 68 FF FF FF DD 9D 60 FF FF FF 0F 83 A2 01 00 00
+***** EXPENSIVE 2552 859
+
+. 2551 8055289 39
+. C7 45 D0 00 00 00 00 8B 45 18 DD 85 60 FF FF FF DD 85 58 FF FF FF D9 C9 39 45 D0 DD 5D A8 DD 5D A0 0F 83 D8 01 00 00
+***** EXPENSIVE 2553 860
+
+. 2552 805CE3C 36
+. C7 45 CC 00 00 00 00 8B 45 18 D9 45 80 D9 85 7C FF FF FF D9 C9 39 45 CC D9 5D 94 D9 5D 90 0F 83 56 01 00 00
+***** EXPENSIVE 2554 861
+
+. 2553 8057B7D 33
+. C7 45 CC 00 00 00 00 8B 45 18 D9 45 88 D9 45 84 D9 C9 39 45 CC D9 5D B4 D9 5D B0 0F 83 CC 01 00 00
+
+. 2554 804B900 2
+. EB 1B
+
+. 2555 804B91D 13
+. 83 EC 0C 68 44 B9 04 08 E8 F6 23 01 00
+
+. 2556 804B92A 5
+. E8 A9 E3 FF FF
+
+. 2557 8049CD8 16
+. 55 89 E5 57 56 53 83 EC 28 6A 00 E8 14 31 00 00
+
+. 2558 804CE18 23
+. 6A 01 8D 83 EA F6 FF FF 6A 21 50 8D 83 0C F6 FF FF 50 E8 8D 0E 01 00
+
+. 2559 805DCBC 38
+. 55 89 E5 56 53 E8 00 00 00 00 5B 81 C3 4E 34 00 00 8B 83 5C 00 00 00 8B 00 85 C0 8B 4D 08 8B 75 0C 8B 55 10 74 0F
+
+. 2560 805DCE2 8
+. FF 75 14 52 56 51 FF D0
+
+. 2561 804B944 5
+. 55 89 E5 C9 C3
+
+. 2562 805DCEA 7
+. 8D 65 F8 5B 5E C9 C3
+
+. 2563 804CE2F 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2564 8049CE8 22
+. 89 C7 58 5A 31 C0 85 FF 68 C0 F2 05 08 0F 95 C0 50 E8 3A 41 01 00
+
+. 2565 8049CFE 12
+. C7 04 24 00 00 00 00 E8 2A E9 00 00
+
+. 2566 8058650 23
+. 6A 01 8D 83 15 F7 FF FF 6A 21 50 8D 83 0C F6 FF FF 50 E8 55 56 00 00
+
+. 2567 8058667 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2568 8049D0A 22
+. 89 C3 5E 58 31 C0 85 DB 68 00 F3 05 08 0F 95 C0 50 E8 18 41 01 00
+
+. 2569 8049D20 12
+. C7 04 24 00 00 00 00 E8 C8 98 00 00
+
+. 2570 8053610 23
+. 6A 01 8D 83 E9 F6 FF FF 6A 21 50 8D 83 0C F6 FF FF 50 E8 95 A6 00 00
+
+. 2571 8053627 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2572 8049D2C 24
+. 85 C0 5A 59 89 45 DC 0F 95 C0 68 40 F3 05 08 0F B6 C0 50 E8 F4 40 01 00
+
+. 2573 8049D44 12
+. C7 04 24 00 00 00 00 E8 40 33 00 00
+
+. 2574 804D0AC 23
+. 6A 01 8D 83 EA F6 FF FF 6A 7B 50 8D 83 0C F6 FF FF 50 E8 F9 0B 01 00
+
+. 2575 804D0C3 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2576 8049D50 20
+. 59 5E 31 C0 85 FF 68 80 F3 05 08 0F 95 C0 50 E8 D4 40 01 00
+
+. 2577 8049D64 12
+. C7 04 24 00 00 00 00 E8 7C EB 00 00
+
+. 2578 8058908 26
+. 6A 01 8D 83 15 F7 FF FF 68 84 00 00 00 50 8D 83 0C F6 FF FF 50 E8 9A 53 00 00
+
+. 2579 8058922 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2580 8049D70 20
+. 58 5A 31 C0 85 DB 68 C0 F3 05 08 0F 95 C0 50 E8 B4 40 01 00
+
+. 2581 8049D84 12
+. C7 04 24 0A 00 00 00 E8 6C 30 00 00
+
+. 2582 8049D90 14
+. 89 C7 C7 04 24 0A 00 00 00 E8 56 98 00 00
+
+. 2583 8049D9E 15
+. 89 45 DC C7 04 24 0A 00 00 00 E8 87 E8 00 00
+
+. 2584 8049DAD 14
+. 89 C3 C7 04 24 0A 00 00 00 E8 D5 32 00 00
+
+. 2585 8049DBB 14
+. 89 C6 C7 04 24 0A 00 00 00 E8 23 EB 00 00
+
+. 2586 8049DC9 20
+. 89 34 24 57 6A 00 89 45 D8 6A 01 8D 45 E0 50 E8 77 34 00 00
+
+. 2587 804D688 23
+. 6A 01 6A 6B 8D 83 F4 F6 FF FF 50 8D 83 0C F6 FF FF 50 E8 1D 06 01 00
+
+. 2588 804D69F 10
+. B8 01 00 00 00 E9 FD FC FF FF
+
+. 2589 8049DDD 22
+. 83 C4 18 85 C0 0F 94 C0 68 00 F4 05 08 0F B6 C0 50 E8 45 40 01 00
+
+. 2590 8049DF3 18
+. 58 FF 75 D8 53 6A 00 6A 01 8D 45 E8 50 E8 03 EC 00 00
+
+. 2591 8058C60 23
+. 6A 01 6A 36 8D 83 21 F7 FF FF 50 8D 83 0C F6 FF FF 50 E8 45 50 00 00
+
+. 2592 8058C77 10
+. B8 01 00 00 00 E9 F5 FD FF FF
+
+. 2593 8049E05 22
+. 83 C4 18 85 C0 0F 94 C0 68 40 F4 05 08 0F B6 C0 50 E8 1D 40 01 00
+
+. 2594 8049E1B 20
+. 58 FF 75 D8 FF 75 DC 6A 00 6A 01 8D 45 E8 50 E8 39 9B 00 00
+
+. 2595 8053BD0 23
+. 6A 01 6A 58 8D 83 F3 F6 FF FF 50 8D 83 0C F6 FF FF 50 E8 D5 A0 00 00
+
+. 2596 8053BE7 10
+. B8 01 00 00 00 E9 CA FD FF FF
+
+. 2597 8049E2F 22
+. 83 C4 18 85 C0 0F 94 C0 68 80 F4 05 08 0F B6 C0 50 E8 F3 3F 01 00
+
+. 2598 8049E45 16
+. 83 C4 0C 6A 00 6A 01 8D 45 E0 50 E8 03 5F 00 00
+
+. 2599 8050023 23
+. 6A 04 6A 5A 8D 83 FE F6 FF FF 50 8D 83 D3 F6 FF FF 50 E8 82 DC 00 00
+
+. 2600 805003A 7
+. B8 04 00 00 00 EB D0
+
+. 2601 8049E55 21
+. 85 C0 5A 59 0F 94 C0 68 C0 F4 05 08 0F B6 C0 50 E8 CE 3F 01 00
+
+. 2602 8049E6A 17
+. 89 34 24 57 6A 00 6A 01 8D 45 E0 50 E8 09 34 00 00
+
+. 2603 8049E7B 22
+. 83 C4 18 85 C0 0F 94 C0 68 00 F5 05 08 0F B6 C0 50 E8 A7 3F 01 00
+
+. 2604 8049E91 16
+. 83 C4 0C 6A 00 6A 01 8D 45 E0 50 E8 DF 5E 00 00
+
+. 2605 8049EA1 21
+. 85 C0 5A 59 0F 94 C0 68 40 F5 05 08 0F B6 C0 50 E8 82 3F 01 00
+
+. 2606 8049EB6 17
+. 89 34 24 57 6A 00 6A 01 8D 45 E0 50 E8 ED 33 00 00
+
+. 2607 804D32C 10
+. 8D 65 F4 5B 5E 89 F8 5F C9 C3
+
+. 2608 8049EC7 22
+. 83 C4 18 85 C0 0F 94 C0 68 80 F5 05 08 0F B6 C0 50 E8 5B 3F 01 00
+
+. 2609 8049EDD 16
+. 83 C4 0C 6A 00 6A 01 8D 45 E0 50 E8 BB 5E 00 00
+
+. 2610 804FE18 10
+. 8D 65 F4 5B 5E 89 F8 5F C9 C3
+
+. 2611 8049EED 21
+. 85 C0 5A 59 0F 94 C0 68 C0 F5 05 08 0F B6 C0 50 E8 36 3F 01 00
+
+. 2612 8049F02 16
+. 83 C4 0C 6A 11 6A 01 8D 45 E0 50 E8 46 5E 00 00
+
+. 2613 8049F12 21
+. 85 C0 5A 59 0F 94 C0 68 00 F6 05 08 0F B6 C0 50 E8 11 3F 01 00
+
+. 2614 8049F27 16
+. 83 C4 0C 6A 11 6A 01 8D 45 E0 50 E8 49 5E 00 00
+
+. 2615 8049F37 21
+. 85 C0 5A 59 0F 94 C0 68 40 F6 05 08 0F B6 C0 50 E8 EC 3E 01 00
+
+. 2616 8049F4C 16
+. 83 C4 0C 6A 11 6A 01 8D 45 E0 50 E8 4C 5E 00 00
+
+. 2617 8049F5C 21
+. 85 C0 5A 59 0F 94 C0 68 80 F6 05 08 0F B6 C0 50 E8 C7 3E 01 00
+
+. 2618 8049F71 23
+. C7 07 03 00 00 00 89 34 24 57 6A 04 6A 01 8D 45 E0 50 E8 CC 32 00 00
+
+. 2619 804D38A 23
+. 6A 04 8D 83 F4 F6 FF FF 6A 75 50 8D 83 4C F8 FF FF 50 E8 1B 09 01 00
+
+. 2620 804D3A1 13
+. B8 04 00 00 00 8D 65 F4 5B 5E 5F C9 C3
+
+. 2621 8049F88 22
+. 83 C4 18 85 C0 0F 94 C0 68 C0 F6 05 08 0F B6 C0 50 E8 9A 3E 01 00
+
+. 2622 8049F9E 23
+. C7 07 03 00 00 00 89 34 24 57 6A 04 6A 01 8D 45 E0 50 E8 CF 32 00 00
+
+. 2623 8049FB5 22
+. 83 C4 18 85 C0 0F 94 C0 68 00 F7 05 08 0F B6 C0 50 E8 6D 3E 01 00
+
+. 2624 8049FCB 23
+. C7 07 03 00 00 00 89 34 24 57 6A 04 6A 01 8D 45 E0 50 E8 D2 32 00 00
+
+. 2625 8049FE2 22
+. 83 C4 18 85 C0 0F 94 C0 68 40 F7 05 08 0F B6 C0 50 E8 40 3E 01 00
+
+. 2626 8049FF8 24
+. C7 03 03 00 00 00 59 FF 75 D8 53 6A 04 6A 01 8D 45 E8 50 E8 F8 E9 00 00
+
+. 2627 8058A5A 23
+. 6A 04 8D 83 21 F7 FF FF 6A 40 50 8D 83 4C F8 FF FF 50 E8 4B 52 00 00
+
+. 2628 8058A71 13
+. B8 04 00 00 00 8D 65 F4 5B 5E 5F C9 C3
+
+. 2629 804A010 22
+. 83 C4 18 85 C0 0F 94 C0 68 80 F7 05 08 0F B6 C0 50 E8 12 3E 01 00
+
+. 2630 804A026 27
+. 8B 45 DC C7 00 03 00 00 00 5A FF 75 D8 50 6A 04 6A 01 8D 45 E8 50 E8 27 99 00 00
+
+. 2631 805399F 23
+. 6A 04 8D 83 F3 F6 FF FF 6A 62 50 8D 83 4C F8 FF FF 50 E8 06 A3 00 00
+
+. 2632 80539B6 13
+. B8 04 00 00 00 8D 65 F4 5B 5E 5F C9 C3
+
+. 2633 804A041 22
+. 83 C4 18 85 C0 0F 94 C0 68 C0 F7 05 08 0F B6 C0 50 E8 E1 3D 01 00
+
+. 2634 804A057 9
+. 58 FF 75 DC E8 30 98 00 00
+
+. 2635 804A060 8
+. 89 1C 24 E8 2C E9 00 00
+
+. 2636 804A068 8
+. 89 3C 24 E8 C4 30 00 00
+
+. 2637 804A070 9
+. 58 FF 75 D8 E8 57 E9 00 00
+
+. 2638 804A079 8
+. 89 34 24 E8 EF 30 00 00
+
+. 2639 804A081 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 2640 804B92F 5
+. E8 5C FB FF FF
+
+. 2641 804B490 16
+. 55 89 E5 57 56 53 83 EC 28 6A 00 E8 38 4E 00 00
+
+. 2642 80502F4 23
+. 6A 01 8D 83 EA F6 FF FF 6A 21 50 8D 83 0C F6 FF FF 50 E8 B1 D9 00 00
+
+. 2643 805030B 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2644 804B4A0 22
+. 89 C7 58 5A 31 C0 85 FF 68 A0 01 06 08 0F 95 C0 50 E8 82 29 01 00
+
+. 2645 804B4B6 12
+. C7 04 24 00 00 00 00 E8 E6 F7 00 00
+
+. 2646 805ACC4 23
+. 6A 01 8D 83 15 F7 FF FF 6A 21 50 8D 83 0C F6 FF FF 50 E8 E1 2F 00 00
+
+. 2647 805ACDB 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2648 804B4C2 22
+. 89 C3 5E 58 31 C0 85 DB 68 E0 01 06 08 0F 95 C0 50 E8 60 29 01 00
+
+. 2649 804B4D8 12
+. C7 04 24 00 00 00 00 E8 34 A9 00 00
+
+. 2650 8055E34 23
+. 6A 01 8D 83 E9 F6 FF FF 6A 21 50 8D 83 0C F6 FF FF 50 E8 71 7E 00 00
+
+. 2651 8055E4B 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2652 804B4E4 24
+. 85 C0 5A 59 89 45 E8 0F 95 C0 68 20 02 06 08 0F B6 C0 50 E8 3C 29 01 00
+
+. 2653 804B4FC 12
+. C7 04 24 00 00 00 00 E8 34 50 00 00
+
+. 2654 8050558 23
+. 6A 01 8D 83 EA F6 FF FF 6A 7B 50 8D 83 0C F6 FF FF 50 E8 4D D7 00 00
+
+. 2655 805056F 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2656 804B508 20
+. 59 5E 31 C0 85 FF 68 60 02 06 08 0F 95 C0 50 E8 1C 29 01 00
+
+. 2657 804B51C 12
+. C7 04 24 00 00 00 00 E8 1C FA 00 00
+
+. 2658 805AF60 26
+. 6A 01 8D 83 15 F7 FF FF 68 84 00 00 00 50 8D 83 0C F6 FF FF 50 E8 42 2D 00 00
+
+. 2659 805AF7A 10
+. 31 C0 8D 65 F4 5B 5E 5F C9 C3
+
+. 2660 804B528 20
+. 58 5A 31 C0 85 DB 68 A0 02 06 08 0F 95 C0 50 E8 FC 28 01 00
+
+. 2661 804B53C 12
+. C7 04 24 0A 00 00 00 E8 90 4D 00 00
+
+. 2662 804B548 14
+. 89 C7 C7 04 24 0A 00 00 00 E8 C2 A8 00 00
+
+. 2663 804B556 15
+. 89 45 E8 C7 04 24 0A 00 00 00 E8 43 F7 00 00
+
+. 2664 804B565 14
+. 89 C3 C7 04 24 0A 00 00 00 E8 C9 4F 00 00
+
+. 2665 804B573 14
+. 89 C6 C7 04 24 0A 00 00 00 E8 C3 F9 00 00
+
+. 2666 804B581 20
+. 89 34 24 57 6A 00 89 45 E4 6A 01 8D 45 EC 50 E8 6F 51 00 00
+
+. 2667 8050B28 23
+. 6A 01 6A 6B 8D 83 F4 F6 FF FF 50 8D 83 0C F6 FF FF 50 E8 7D D1 00 00
+
+. 2668 8050B3F 10
+. B8 01 00 00 00 E9 01 FD FF FF
+
+. 2669 804B595 22
+. 83 C4 18 85 C0 0F 94 C0 68 E0 02 06 08 0F B6 C0 50 E8 8D 28 01 00
+
+. 2670 804B5AB 18
+. 58 FF 75 E4 53 6A 00 6A 01 8D 45 F0 50 E8 A3 FA 00 00
+
+. 2671 805B2AC 23
+. 6A 01 6A 36 8D 83 21 F7 FF FF 50 8D 83 0C F6 FF FF 50 E8 F9 29 00 00
+
+. 2672 805B2C3 10
+. B8 01 00 00 00 E9 01 FE FF FF
+
+. 2673 804B5BD 22
+. 83 C4 18 85 C0 0F 94 C0 68 20 03 06 08 0F B6 C0 50 E8 65 28 01 00
+
+. 2674 804B5D3 20
+. 58 FF 75 E4 FF 75 E8 6A 00 6A 01 8D 45 F0 50 E8 7D AB 00 00
+
+. 2675 80563C8 23
+. 6A 01 6A 58 8D 83 F3 F6 FF FF 50 8D 83 0C F6 FF FF 50 E8 DD 78 00 00
+
+. 2676 80563DF 10
+. B8 01 00 00 00 E9 CE FD FF FF
+
+. 2677 804B5E7 22
+. 83 C4 18 85 C0 0F 94 C0 68 60 03 06 08 0F B6 C0 50 E8 3B 28 01 00
+
+. 2678 804B5FD 16
+. 83 C4 0C 6A 00 6A 01 8D 45 EC 50 E8 3B 7A 00 00
+
+. 2679 8053048 33
+. 55 89 E5 53 50 6A FF FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 BA E0 00 00 FF 75 08 E8 AB 00 00 00
+
+. 2680 8053114 36
+. 55 89 E5 57 56 53 83 EC 5C E8 00 00 00 00 5B 81 C3 F2 DF 00 00 31 C0 83 7D 10 01 8B 75 08 0F 84 E6 01 00 00
+
+. 2681 8053138 11
+. 83 EC 0C FF 75 10 E8 85 9C FF FF
+
+. 2682 8053143 13
+. 89 45 E0 83 C4 10 40 0F 84 E0 01 00 00
+
+. 2683 8053330 23
+. 6A 04 6A 5A 8D 83 FE F6 FF FF 50 8D 83 D3 F6 FF FF 50 E8 75 A9 00 00
+
+. 2684 8053347 7
+. B8 04 00 00 00 EB D0
+
+. 2685 805331E 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 2686 8053069 5
+. 8B 5D FC C9 C3
+
+. 2687 804B60D 21
+. 85 C0 5A 59 0F 94 C0 68 A0 03 06 08 0F B6 C0 50 E8 16 28 01 00
+
+. 2688 804B622 17
+. 89 34 24 57 6A 00 6A 01 8D 45 EC 50 E8 01 51 00 00
+
+. 2689 804B633 22
+. 83 C4 18 85 C0 0F 94 C0 68 E0 03 06 08 0F B6 C0 50 E8 EF 27 01 00
+
+. 2690 804B649 16
+. 83 C4 0C 6A 00 6A 01 8D 45 EC 50 E8 17 7A 00 00
+
+. 2691 8053070 33
+. 55 89 E5 53 50 6A 01 FF 75 10 FF 75 0C E8 00 00 00 00 5B 81 C3 92 E0 00 00 FF 75 08 E8 83 00 00 00
+
+. 2692 8053091 5
+. 8B 5D FC C9 C3
+
+. 2693 804B659 21
+. 85 C0 5A 59 0F 94 C0 68 20 04 06 08 0F B6 C0 50 E8 CA 27 01 00
+
+. 2694 804B66E 17
+. 89 34 24 57 6A 00 6A 01 8D 45 EC 50 E8 E5 50 00 00
+
+. 2695 80507D0 10
+. 8D 65 F4 5B 5E 89 C8 5F C9 C3
+
+. 2696 804B67F 22
+. 83 C4 18 85 C0 0F 94 C0 68 60 04 06 08 0F B6 C0 50 E8 A3 27 01 00
+
+. 2697 804B695 16
+. 83 C4 0C 6A 00 6A 01 8D 45 EC 50 E8 F3 79 00 00
+
+. 2698 8053098 39
+. 55 89 E5 57 56 53 83 EC 0C 6A 01 8B 7D 10 57 FF 75 0C 8B 75 08 E8 00 00 00 00 5B 81 C3 62 E0 00 00 56 E8 55 00 00 00
+
+. 2699 80530BF 9
+. 83 C4 10 85 C0 89 C1 75 3C
+
+. 2700 8053104 10
+. 8D 65 F4 5B 5E 89 C8 5F C9 C3
+
+. 2701 804B6A5 21
+. 85 C0 5A 59 0F 94 C0 68 A0 04 06 08 0F B6 C0 50 E8 7E 27 01 00
+
+. 2702 804B6BA 16
+. 83 C4 0C 6A 11 6A 01 8D 45 EC 50 E8 7E 79 00 00
+
+. 2703 804B6CA 21
+. 85 C0 5A 59 0F 94 C0 68 E0 04 06 08 0F B6 C0 50 E8 59 27 01 00
+
+. 2704 804B6DF 16
+. 83 C4 0C 6A 11 6A 01 8D 45 EC 50 E8 81 79 00 00
+
+. 2705 804B6EF 21
+. 85 C0 5A 59 0F 94 C0 68 20 05 06 08 0F B6 C0 50 E8 34 27 01 00
+
+. 2706 804B704 16
+. 83 C4 0C 6A 11 6A 01 8D 45 EC 50 E8 84 79 00 00
+
+. 2707 804B714 21
+. 85 C0 5A 59 0F 94 C0 68 60 05 06 08 0F B6 C0 50 E8 0F 27 01 00
+
+. 2708 804B729 23
+. C7 07 03 00 00 00 89 34 24 57 6A 04 6A 01 8D 45 EC 50 E8 C4 4F 00 00
+
+. 2709 805082E 23
+. 6A 04 8D 83 F4 F6 FF FF 6A 75 50 8D 83 4C F8 FF FF 50 E8 77 D4 00 00
+
+. 2710 8050845 13
+. B8 04 00 00 00 8D 65 F4 5B 5E 5F C9 C3
+
+. 2711 804B740 22
+. 83 C4 18 85 C0 0F 94 C0 68 A0 05 06 08 0F B6 C0 50 E8 E2 26 01 00
+
+. 2712 804B756 23
+. C7 07 03 00 00 00 89 34 24 57 6A 04 6A 01 8D 45 EC 50 E8 C7 4F 00 00
+
+. 2713 804B76D 22
+. 83 C4 18 85 C0 0F 94 C0 68 E0 05 06 08 0F B6 C0 50 E8 B5 26 01 00
+
+. 2714 804B783 23
+. C7 07 03 00 00 00 89 34 24 57 6A 04 6A 01 8D 45 EC 50 E8 CA 4F 00 00
+
+. 2715 804B79A 22
+. 83 C4 18 85 C0 0F 94 C0 68 20 06 06 08 0F B6 C0 50 E8 88 26 01 00
+
+. 2716 804B7B0 24
+. C7 03 03 00 00 00 58 FF 75 E4 53 6A 04 6A 01 8D 45 F0 50 E8 98 F8 00 00
+
+. 2717 805B0B2 23
+. 6A 04 8D 83 21 F7 FF FF 6A 40 50 8D 83 4C F8 FF FF 50 E8 F3 2B 00 00
+
+. 2718 805B0C9 13
+. B8 04 00 00 00 8D 65 F4 5B 5E 5F C9 C3
+
+. 2719 804B7C8 22
+. 83 C4 18 85 C0 0F 94 C0 68 60 06 06 08 0F B6 C0 50 E8 5A 26 01 00
+
+. 2720 804B7DE 27
+. 8B 45 E8 C7 00 03 00 00 00 59 FF 75 E4 50 6A 04 6A 01 8D 45 F0 50 E8 6B A9 00 00
+
+. 2721 805619B 23
+. 6A 04 8D 83 F3 F6 FF FF 6A 62 50 8D 83 4C F8 FF FF 50 E8 0A 7B 00 00
+
+. 2722 80561B2 13
+. B8 04 00 00 00 8D 65 F4 5B 5E 5F C9 C3
+
+. 2723 804B7F9 22
+. 83 C4 18 85 C0 0F 94 C0 0F B6 C0 68 A0 06 06 08 50 E8 29 26 01 00
+
+. 2724 804B80F 9
+. 5A FF 75 E8 E8 74 A8 00 00
+
+. 2725 804B818 8
+. 89 1C 24 E8 CC F7 00 00
+
+. 2726 804B820 8
+. 89 3C 24 E8 BC 4D 00 00
+
+. 2727 804B828 9
+. 58 FF 75 E4 E8 F7 F7 00 00
+
+. 2728 804B831 8
+. 89 34 24 E8 E7 4D 00 00
+
+. 2729 804B839 8
+. 8D 65 F4 5B 5E 5F C9 C3
+
+. 2730 804B934 5
+. E8 1F 2D 01 00
+
+. 2731 805E658 27
+. 55 89 E5 53 50 E8 00 00 00 00 5B 81 C3 B2 2A 00 00 8B 83 10 FF FF FF 85 C0 75 1C
+
+. 2732 805E673 18
+. 8B 93 0C FF FF FF 8B 8B 08 FF FF FF 31 C0 39 D1 74 0F
+
+. 2733 805E694 5
+. 8B 5D FC C9 C3
+
+. 2734 804B939 8
+. 89 04 24 E8 BF CD FF FF
+
+. 2735 8048700 6
+. FF 25 58 11 06 08
+
+. 2736 8048706 10
+. 68 70 00 00 00 E9 00 FF FF FF
+
+. 2737 4202B0F0 11
+. 55 89 E5 57 56 53 E8 62 A6 FE FF
+
+. 2738 4202B0FB 22
+. 81 C3 D5 F1 0F 00 83 EC 0C 8B 8B 5C C2 FF FF 8B 7D 08 85 C9 74 7E
+
+. 2739 4202B111 2
+. EB 0D
+
+. 2740 4202B120 9
+. 8B 41 04 89 CA 85 C0 74 48
+
+. 2741 4202B129 31
+. 8D B4 26 00 00 00 00 8B 42 04 48 89 42 04 C1 E0 04 8D 04 10 8B 50 08 8D 70 08 83 FA 04 77 20
+
+. 2742 4202B148 13
+. 8D 83 78 07 FF FF 8B 04 90 01 D8 FF E0
+
+. 2743 4202B1B5 12
+. 8B 46 08 89 7C 24 04 89 04 24 EB 9E
+
+. 2744 4202B15F 3
+. FF 56 04
+
+. 2745 805E800 23
+. 55 89 E5 53 52 E8 00 00 00 00 5B 81 C3 0A 29 00 00 90 E8 81 9F FE FF
+
+. 2746 8048798 15
+. 55 89 E5 83 EC 08 80 3D 84 11 06 08 00 75 29
+
+. 2747 80487A7 11
+. A1 08 10 06 08 8B 10 85 D2 74 17
+
+. 2748 80487C9 9
+. C6 05 84 11 06 08 01 C9 C3
+
+. 2749 805E817 5
+. 8B 5D FC C9 C3
+
+. 2750 4202B162 15
+. 8B 8B 5C C2 FF FF 8B 41 04 89 CA 85 C0 75 BF
+
+. 2751 4202B130 24
+. 8B 42 04 48 89 42 04 C1 E0 04 8D 04 10 8B 50 08 8D 70 08 83 FA 04 77 20
+
+. 2752 4000A950 11
+. 55 89 E5 57 56 53 E8 01 52 00 00
+
+. 2753 4000A95B 53
+. 81 C3 5D 7B 00 00 83 EC 4C C7 45 F0 00 00 00 00 8B 93 6C FB FF FF 8B BB 68 FB FF FF 8D 0C 95 0F 00 00 00 83 E1 F0 29 CC 8D 4C 24 0C 85 FF 89 F8 89 4D EC 74 20
+vex iropt: 4 x unrolling (27 sts -> 108 sts)
+
+. 2754 4000A990 26
+. FF 87 70 01 00 00 8B 75 F0 8B 55 EC 89 3C B2 46 8B 7F 0C 89 75 F0 85 FF 75 E6
+
+. 2755 4000A9AA 17
+. 8B 93 6C FB FF FF 8B 78 0C 85 FF 0F 84 D0 00 00 00
+
+. 2756 4000A9BB 15
+. C7 45 E8 01 00 00 00 8B 45 EC 39 78 04 74 14
+
+. 2757 4000A9DE 15
+. 8B 4D E8 41 39 D1 89 4D BC 0F 83 93 00 00 00
+
+. 2758 4000A9ED 22
+. 8B 45 E8 8B 75 EC C1 E0 02 89 45 C0 01 C6 F7 5D C0 89 75 C4 EB 0D
+
+. 2759 4000AA10 25
+. 8B 55 BC 8B 4D EC 8B 04 91 89 45 CC 8B 90 D4 01 00 00 89 45 E4 85 D2 74 38
+
+. 2760 4000AA29 6
+. 8B 02 85 C0 74 32
+
+. 2761 4000AA2F 41
+. 8B 4D C0 8B 75 BC 8D 34 B1 8B 4D E8 89 75 B8 8B 75 EC 8D 0C 8E 89 4D C8 89 F6 8D BC 27 00 00 00 00 39 F8 0F 84 CF 01 00 00
+
+. 2762 4000AA58 9
+. 83 C2 04 8B 02 85 C0 75 EF
+
+. 2763 4000AA50 8
+. 39 F8 0F 84 CF 01 00 00
+
+. 2764 4000AA61 17
+. 8B 75 CC 8B 8E E0 01 00 00 85 C9 0F 85 4F 01 00 00
+
+. 2765 4000AA72 14
+. FF 45 BC 8B 93 6C FB FF FF 39 55 BC 72 90
+
+. 2766 4000AA80 11
+. 8B 7F 0C 85 FF 0F 85 30 FF FF FF
+
+. 2767 4000A9CA 20
+. 8D B6 00 00 00 00 FF 45 E8 8B 75 EC 8B 45 E8 39 3C 86 75 F2
+vex iropt: 4 x unrolling (21 sts -> 84 sts)
+
+. 2768 4000A9D0 14
+. FF 45 E8 8B 75 EC 8B 45 E8 39 3C 86 75 F2
+
+. 2769 4000AA8B 16
+. C7 45 F0 00 00 00 00 39 55 F0 0F 83 9D 00 00 00
+
+. 2770 4000AA9B 21
+. 8B 75 F0 8B 45 EC 8B 3C B0 0F B6 8F 74 01 00 00 F6 C1 08 74 7C
+
+. 2771 4000AAB0 19
+. 88 CA 8B 47 04 80 E2 F7 88 97 74 01 00 00 80 38 00 75 07
+
+. 2772 4000AAC3 7
+. 80 E1 03 84 C9 74 5C
+
+. 2773 4000AB26 18
+. 8B 93 6C FB FF FF FF 45 F0 39 55 F0 0F 82 63 FF FF FF
+
+. 2774 4000AACA 10
+. 8B 87 80 00 00 00 85 C0 75 07
+
+. 2775 4000AAD4 7
+. 8B 4F 4C 85 C9 74 4B
+
+. 2776 4000AADB 13
+. F6 83 88 FB FF FF 02 0F 85 AC 00 00 00
+
+. 2777 4000AAE8 4
+. 85 C0 74 33
+
+. 2778 4000AB1F 7
+. 8B 47 4C 85 C0 75 63
+
+. 2779 4000AB89 9
+. 8B 40 04 8B 0F 01 C8 FF D0
+
+. 2780 400BC7EC 23
+. 55 89 E5 53 52 E8 00 00 00 00 5B 81 C3 A6 BF 01 00 90 E8 45 AA F8 FF
+
+. 2781 40047248 26
+. 55 89 E5 53 52 E8 00 00 00 00 5B 81 C3 4A 15 09 00 80 BB 24 00 00 00 00 75 34
+
+. 2782 40047262 10
+. 8B 83 14 00 00 00 85 C0 75 2F
+
+. 2783 4004729B 14
+. 83 EC 0C FF B3 64 F8 FF FF E8 5B FF FF FF
+
+. 2784 40047204 6
+. FF A3 0C 00 00 00
+
+. 2785 4004720A 10
+. 68 00 00 00 00 E9 E0 FF FF FF
+
+. 2786 400471F4 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 2787 4202B350 11
+. 55 89 E5 57 56 53 E8 02 A4 FE FF
+
+. 2788 4202B35B 16
+. 81 C3 75 EF 0F 00 83 EC 1C 8B 45 08 85 C0 74 4B
+
+. 2789 4202B36B 10
+. 8B BB 5C C2 FF FF 85 FF 74 41
+
+. 2790 4202B375 31
+. 8D 74 26 00 8D BC 27 00 00 00 00 8B 47 04 8D 57 08 89 55 F0 C1 E0 04 8D 74 38 F8 39 D6 72 1C
+
+. 2791 4202B3B0 6
+. 8B 3F 85 FF 75 CA
+
+. 2792 4202B3B6 8
+. 83 C4 1C 5B 5E 5F 5D C3
+
+. 2793 400472A9 5
+. 83 C4 10 EB BE
+
+. 2794 4004726C 12
+. 8B 83 68 F8 FF FF 8B 10 85 D2 74 17
+
+. 2795 4004728F 12
+. C6 83 24 00 00 00 01 8B 5D FC C9 C3
+
+. 2796 400BC803 5
+. E8 72 FC FA FF
+
+. 2797 4006C47A 1
+. C3
+
+. 2798 400BC808 5
+. 8B 5D FC C9 C3
+
+. 2799 4000AB92 2
+. EB 92
+
+. 2800 40479090 23
+. 55 89 E5 53 52 E8 00 00 00 00 5B 81 C3 92 60 00 00 90 E8 7D 86 FE FF
+
+. 2801 40461724 26
+. 55 89 E5 53 52 E8 00 00 00 00 5B 81 C3 FE D9 01 00 80 BB 94 00 00 00 00 75 34
+
+. 2802 4046173E 10
+. 8B 83 84 00 00 00 85 C0 75 2F
+
+. 2803 40461777 14
+. 83 EC 0C FF B3 D4 FE FF FF E8 DF FE FF FF
+
+. 2804 40461664 6
+. FF A3 58 00 00 00
+
+. 2805 4046166A 10
+. 68 98 00 00 00 E9 B0 FE FF FF
+
+. 2806 40461524 12
+. FF B3 04 00 00 00 FF A3 08 00 00 00
+
+. 2807 40461785 5
+. 83 C4 10 EB BE
+
+. 2808 40461748 12
+. 8B 83 D8 FE FF FF 8B 10 85 D2 74 17
+
+. 2809 4046176B 12
+. C6 83 94 00 00 00 01 8B 5D FC C9 C3
+
+. 2810 404790A7 5
+. 8B 5D FC C9 C3
+
+. 2811 4210DB60 14
+. 55 89 E5 83 EC 08 89 1C 24 E8 EF 7B F0 FF
+
+. 2812 4210DB6E 22
+. 81 C3 62 C7 01 00 89 74 24 04 8D B3 FC FF FF FF 8B 06 85 C0 75 0C
+
+. 2813 4210DB84 11
+. 8B 1C 24 8B 74 24 04 89 EC 5D C3
+
+. 2814 4000AB38 9
+. 80 BB 88 FB FF FF 00 78 08
+
+. 2815 4000AB41 8
+. 8D 65 F4 5B 5E 5F 5D C3
+
+. 2816 4202B171 12
+. 8B 11 85 D2 89 93 5C C2 FF FF 74 08
+
+. 2817 4202B185 10
+. 8B 8B 5C C2 FF FF 85 C9 75 91
+
+. 2818 4202B18F 14
+. 8B B3 40 02 00 00 3B B3 88 02 00 00 73 24
+
+. 2819 4202B19D 5
+. 8D 76 00 FF 16
+
+. 2820 420701A0 11
+. 55 89 E5 57 56 53 E8 B2 55 FA FF
+
+. 2821 420701AB 14
+. 81 C3 25 A1 0B 00 83 EC 0C E8 B7 FF FF FF
+
+. 2822 42070170 14
+. 55 89 E5 83 EC 08 89 5D FC E8 DF 55 FA FF
+
+. 2823 4207017E 18
+. 81 C3 52 A1 0B 00 C7 04 24 01 00 00 00 E8 50 F4 FF FF
+
+. 2824 4206F5E0 13
+. 55 89 E5 31 D2 57 56 53 E8 70 61 FA FF
+
+. 2825 4206F5ED 37
+. 81 C3 E3 AC 0B 00 83 EC 3C C7 45 D4 00 00 00 00 83 BB A4 02 00 00 00 0F 95 C2 85 D2 89 55 D0 0F 85 55 01 00 00
+
+. 2826 4206F612 7
+. 8B 45 08 85 C0 74 0E
+
+. 2827 4206F619 14
+. 8B 83 88 01 00 00 85 C0 0F 85 2D 01 00 00
+
+. 2828 4206F627 20
+. 8B B3 48 D2 FF FF 8B BB 78 0E 00 00 85 F6 0F 84 A0 00 00 00
+
+. 2829 4206F63B 18
+. 90 8D 74 26 00 89 B3 7C 0E 00 00 8B 45 08 85 C0 74 11
+
+. 2830 4206F64D 17
+. 0F B7 06 25 00 80 FF FF 66 85 C0 0F 84 E9 00 00 00
+
+. 2831 4206F747 8
+. 89 34 24 E8 B5 5B FA FF
+
+. 2832 4206F74F 5
+. E9 0A FF FF FF
+
+. 2833 4206F65E 11
+. 8B 56 5C 85 D2 0F 8E C9 00 00 00
+
+. 2834 4206F732 12
+. 8B 46 10 39 46 14 0F 86 2B FF FF FF
+
+. 2835 4206F669 8
+. 0F B6 4E 46 84 C9 75 34
+
+. 2836 4206F671 4
+. 85 D2 7E 30
+
+. 2837 4206F6A5 7
+. 8B 45 08 85 C0 74 0D
+
+. 2838 4206F6AC 13
+. 0F B7 06 25 00 80 FF FF 66 85 C0 74 6F
+
+. 2839 4206F728 8
+. 89 34 24 E8 84 5B FA FF
+
+. 2840 4206F730 2
+. EB 87
+
+. 2841 4206F6B9 18
+. 8B 83 78 0E 00 00 31 C9 89 8B 7C 0E 00 00 39 C7 74 58
+
+. 2842 4206F723 5
+. 8B 76 34 EB AB
+
+. 2843 4206F6D3 8
+. 85 F6 0F 85 65 FF FF FF
+
+. 2844 4206F640 13
+. 89 B3 7C 0E 00 00 8B 45 08 85 C0 74 11
+
+. 2845 4206F6DB 7
+. 8B 45 08 85 C0 74 0A
+
+. 2846 4206F6E2 10
+. 8B 83 44 02 00 00 85 C0 75 27
+
+. 2847 4206F6EC 7
+. 8B 45 D0 85 C0 75 0B
+
+. 2848 4206F6F3 11
+. 8B 45 D4 83 C4 3C 5B 5E 5F 5D C3
+
+. 2849 42070190 7
+. 8B 5D FC 89 EC 5D C3
+
+. 2850 420701B9 12
+. 8B B3 48 D2 FF FF 89 C7 85 F6 74 33
+
+. 2851 420701C5 20
+. 8D 74 26 00 8D BC 27 00 00 00 00 8B 06 A9 02 00 00 00 75 11
+
+. 2852 420701EA 14
+. C7 46 5C FF FF FF FF 8B 76 34 85 F6 75 D8
+
+. 2853 420701D0 9
+. 8B 06 A9 02 00 00 00 75 11
+
+. 2854 420701D9 10
+. 25 08 10 00 00 83 F8 08 74 07
+
+. 2855 420701E3 7
+. 8B 46 5C 85 C0 75 18
+
+. 2856 42070202 33
+. 0F BE 56 46 8B 84 32 94 00 00 00 C7 44 24 08 00 00 00 00 89 34 24 C7 44 24 04 00 00 00 00 FF 50 2C
+
+. 2857 4206DF50 23
+. 55 89 E5 83 EC 18 89 75 FC 8B 45 0C 8B 75 08 89 5D F8 E8 F6 77 FA FF
+
+. 2858 4206DF67 25
+. 81 C3 69 C3 0B 00 89 44 24 04 8B 45 10 89 34 24 89 44 24 08 E8 60 1E 00 00
+
+. 2859 4206FDE0 30
+. 55 89 E5 83 EC 28 89 75 F8 8B 75 08 89 5D F4 89 7D FC 8B 7D 0C 0F BE 56 46 E8 5F 59 FA FF
+
+. 2860 4206FDFE 19
+. 81 C3 D2 A4 0B 00 8B 84 32 94 00 00 00 89 34 24 FF 50 30
+
+. 2861 4206FE11 5
+. 31 D2 40 74 58
+
+. 2862 4206FE16 4
+. 85 FF 74 07
+
+. 2863 4206FE21 33
+. 83 0E 02 8D 56 47 8D 46 48 89 34 24 89 54 24 04 89 44 24 08 C7 44 24 0C 00 00 00 00 E8 2E FE FF FF
+
+. 2864 4206FC91 10
+. 8B 16 F7 C2 01 00 00 00 74 2A
+
+. 2865 4206FCC5 27
+. 89 0C 24 8B 46 20 29 C8 05 FF 0F 00 00 25 00 F0 FF FF 89 44 24 04 E8 80 70 06 00
+
+. 2866 420D6D60 17
+. 89 DA 8B 4C 24 08 8B 5C 24 04 B8 5B 00 00 00 CD 80
+
+. 2867 420D6D71 9
+. 89 D3 3D 01 F0 FF FF 73 01
+
+. 2868 420D6D7A 1
+. C3
+
+. 2869 4206FCC0 5
+. 83 CA 01 EB EC
+
+. 2870 4206FCB1 12
+. 89 16 8B 5D F8 8B 75 FC 89 EC 5D C3
+
+. 2871 4206FE42 59
+. 89 F2 C7 46 18 00 00 00 00 C7 46 14 00 00 00 00 C7 46 10 00 00 00 00 C7 46 08 00 00 00 00 C7 46 04 00 00 00 00 C7 46 0C 00 00 00 00 8B 5D F4 89 D0 8B 75 F8 8B 7D FC 89 EC 5D C3
+
+. 2872 4206DF80 6
+. 31 D2 85 C0 74 17
+
+. 2873 4206DF86 35
+. 8B 46 1C 89 F2 89 46 18 89 46 14 89 46 10 89 46 0C 89 46 04 89 46 08 8B 5D F8 89 D0 8B 75 FC 89 EC 5D C3
+
+. 2874 42070223 2
+. EB C5
+
+. 2875 420701F8 10
+. 83 C4 0C 89 F8 5B 5E 5F 5D C3
+
+. 2876 4202B1A2 11
+. 83 C6 04 3B B3 88 02 00 00 72 F3
+
+. 2877 4202B1AD 2
+. EB 12
+
+. 2878 4202B1C1 8
+. 89 3C 24 E8 BB A4 FE FF
+
+. 2879 42015684 6
+. FF A3 34 01 00 00
+
+. 2880 4201568A 10
+. 68 50 02 00 00 E9 40 FB FF FF
+
+. 2881 420AE610 13
+. 89 DA 8B 5C 24 04 B8 01 00 00 00 CD 80
+==13306== 
+--13306--       lru: 1494 epochs, 0 clearings.
+--13306-- translate: new 2882 (113298 -> 626759), discard 0 (0 -> 0).
+--13306--  dispatch: 149400000 basic blocks, 1495/98742 sched events, 66040 tt_fast misses.
+--13306-- reg-alloc: 0 t-req-spill, 0+0 orig+spill uis, 0 total-reg-r.
+--13306--    sanity: 1496 cheap, 60 expensive checks.
+vex storage:  P 176,  T total 158600256 (4015121),  T curr 0 (0)
diff --git a/VEX/priv/guest_amd64_defs.h b/VEX/priv/guest_amd64_defs.h
new file mode 100644
index 0000000..003ebde
--- /dev/null
+++ b/VEX/priv/guest_amd64_defs.h
@@ -0,0 +1,566 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                guest_amd64_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Only to be used within the guest-amd64 directory. */
+
+#ifndef __VEX_GUEST_AMD64_DEFS_H
+#define __VEX_GUEST_AMD64_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"              // VexEmNote
+#include "libvex_guest_amd64.h"         // VexGuestAMD64State
+#include "guest_generic_bb_to_IR.h"     // DisResult
+
+/*---------------------------------------------------------*/
+/*--- amd64 to IR conversion                            ---*/
+/*---------------------------------------------------------*/
+
+/* Convert one amd64 insn to IR.  See the type DisOneInstrFn in
+   bb_to_IR.h. */
+extern
+DisResult disInstr_AMD64 ( IRSB*        irbb,
+                           Bool         (*resteerOkFn) ( void*, Addr ),
+                           Bool         resteerCisOk,
+                           void*        callback_opaque,
+                           const UChar* guest_code,
+                           Long         delta,
+                           Addr         guest_IP,
+                           VexArch      guest_arch,
+                           const VexArchInfo* archinfo,
+                           const VexAbiInfo*  abiinfo,
+                           VexEndness   host_endness,
+                           Bool         sigill_diag );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern
+IRExpr* guest_amd64_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts );
+
+/* Describes to the optimiser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern 
+Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int,
+                                                   VexRegisterUpdates );
+
+extern
+VexGuestLayout amd64guest_layout;
+
+
+/*---------------------------------------------------------*/
+/*--- amd64 guest helpers                               ---*/
+/*---------------------------------------------------------*/
+
+/* --- CLEAN HELPERS --- */
+
+extern ULong amd64g_calculate_rflags_all ( 
+                ULong cc_op, 
+                ULong cc_dep1, ULong cc_dep2, ULong cc_ndep 
+             );
+
+extern ULong amd64g_calculate_rflags_c ( 
+                ULong cc_op, 
+                ULong cc_dep1, ULong cc_dep2, ULong cc_ndep 
+             );
+
+extern ULong amd64g_calculate_condition ( 
+                ULong/*AMD64Condcode*/ cond, 
+                ULong cc_op, 
+                ULong cc_dep1, ULong cc_dep2, ULong cc_ndep 
+             );
+
+extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
+
+extern ULong amd64g_calculate_RCR  ( 
+                ULong arg, ULong rot_amt, ULong rflags_in, Long sz 
+             );
+
+extern ULong amd64g_calculate_RCL  ( 
+                ULong arg, ULong rot_amt, ULong rflags_in, Long sz 
+             );
+
+extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
+
+extern ULong amd64g_check_fldcw ( ULong fpucw );
+
+extern ULong amd64g_create_fpucw ( ULong fpround );
+
+extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
+
+extern ULong amd64g_create_mxcsr ( ULong sseround );
+
+extern VexEmNote amd64g_dirtyhelper_FLDENV  ( VexGuestAMD64State*, HWord );
+extern VexEmNote amd64g_dirtyhelper_FRSTOR  ( VexGuestAMD64State*, HWord );
+extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
+
+extern void amd64g_dirtyhelper_FSTENV  ( VexGuestAMD64State*, HWord );
+extern void amd64g_dirtyhelper_FNSAVE  ( VexGuestAMD64State*, HWord );
+extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
+
+/* Translate a guest virtual_addr into a guest linear address by
+   consulting the supplied LDT/GDT structures.  Their representation
+   must be as specified in pub/libvex_guest_amd64.h.  To indicate a
+   translation failure, 1<<32 is returned.  On success, the lower 32
+   bits of the returned result indicate the linear address.  
+*/
+//extern 
+//ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt, 
+//                              UInt seg_selector, UInt virtual_addr );
+
+extern ULong amd64g_calculate_mmx_pmaddwd  ( ULong, ULong );
+extern ULong amd64g_calculate_mmx_psadbw   ( ULong, ULong );
+
+extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
+
+extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
+extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
+extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
+extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
+
+extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
+                                   ULong dHi, ULong dLo,
+                                   ULong imm_and_return_control_bit );
+
+extern ULong amd64g_calculate_pext  ( ULong, ULong );
+extern ULong amd64g_calculate_pdep  ( ULong, ULong );
+
+/* --- DIRTY HELPERS --- */
+
+extern ULong amd64g_dirtyhelper_loadF80le  ( Addr/*addr*/ );
+
+extern void  amd64g_dirtyhelper_storeF80le ( Addr/*addr*/, ULong/*data*/ );
+
+extern void  amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
+extern void  amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
+extern void  amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
+extern void  amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
+
+extern void  amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
+
+extern void      amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM
+                    ( VexGuestAMD64State*, HWord );
+extern VexEmNote amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM
+                    ( VexGuestAMD64State*, HWord );
+
+extern ULong amd64g_dirtyhelper_RDTSC ( void );
+extern void  amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st );
+
+extern ULong amd64g_dirtyhelper_IN  ( ULong portno, ULong sz/*1,2 or 4*/ );
+extern void  amd64g_dirtyhelper_OUT ( ULong portno, ULong data, 
+                                      ULong sz/*1,2 or 4*/ );
+
+extern void amd64g_dirtyhelper_SxDT ( void* address,
+                                      ULong op /* 0 or 1 */ );
+
+/* Helps with PCMP{I,E}STR{I,M}.
+
+   CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
+   actually it could be a clean helper, but for the fact that we can't
+   pass by value 2 x V128 to a clean helper, nor have one returned.)
+   Reads guest state, writes to guest state for the xSTRM cases, no
+   accesses of memory, is a pure function.
+
+   opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
+   the callee knows which I/E and I/M variant it is dealing with and
+   what the specific operation is.  4th byte of opcode is in the range
+   0x60 to 0x63:
+       istri  66 0F 3A 63
+       istrm  66 0F 3A 62
+       estri  66 0F 3A 61
+       estrm  66 0F 3A 60
+
+   gstOffL and gstOffR are the guest state offsets for the two XMM
+   register inputs.  We never have to deal with the memory case since
+   that is handled by pre-loading the relevant value into the fake
+   XMM16 register.
+
+   For ESTRx variants, edxIN and eaxIN hold the values of those two
+   registers.
+
+   In all cases, the bottom 16 bits of the result contain the new
+   OSZACP %rflags values.  For xSTRI variants, bits[31:16] of the
+   result hold the new %ecx value.  For xSTRM variants, the helper
+   writes the result directly to the guest XMM0.
+
+   Declarable side effects: in all cases, reads guest state at
+   [gstOffL, +16) and [gstOffR, +16).  For xSTRM variants, also writes
+   guest_XMM0.
+
+   Is expected to be called with opc_and_imm combinations which have
+   actually been validated, and will assert if otherwise.  The front
+   end should ensure we're only called with verified values.
+*/
+extern ULong amd64g_dirtyhelper_PCMPxSTRx ( 
+          VexGuestAMD64State*,
+          HWord opc4_and_imm,
+          HWord gstOffL, HWord gstOffR,
+          HWord edxIN, HWord eaxIN
+       );
+
+/* Implementation of intel AES instructions as described in
+   Intel  Advanced Vector Extensions
+          Programming Reference
+          MARCH 2008
+          319433-002.
+
+   CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
+   actually it could be a clean helper, but for the fact that we can't
+   pass by value 2 x V128 to a clean helper, nor have one returned.)
+   Reads guest state, writes to guest state, no
+   accesses of memory, is a pure function.
+
+   opc4 contains the 4th byte of opcode. Front-end should only
+   give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
+   (will assert otherwise).
+
+   gstOffL and gstOffR are the guest state offsets for the two XMM
+   register inputs, gstOffD is the guest state offset for the XMM register
+   output.  We never have to deal with the memory case since that is handled
+   by pre-loading the relevant value into the fake XMM16 register.
+
+*/
+extern void amd64g_dirtyhelper_AES ( 
+          VexGuestAMD64State* gst,
+          HWord opc4, HWord gstOffD,
+          HWord gstOffL, HWord gstOffR
+       );
+
+/* Implementation of AESKEYGENASSIST. 
+
+   CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
+   actually it could be a clean helper, but for the fact that we can't
+   pass by value 1 x V128 to a clean helper, nor have one returned.)
+   Reads guest state, writes to guest state, no
+   accesses of memory, is a pure function.
+
+   imm8 is the Round Key constant.
+
+   gstOffL and gstOffR are the guest state offsets for the two XMM
+   register input and output.  We never have to deal with the memory case since
+   that is handled by pre-loading the relevant value into the fake
+   XMM16 register.
+
+*/
+extern void amd64g_dirtyhelper_AESKEYGENASSIST ( 
+          VexGuestAMD64State* gst,
+          HWord imm8,
+          HWord gstOffL, HWord gstOffR
+       );
+
+//extern void  amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
+//extern void  amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
+//extern void  amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
+
+//extern void  amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
+
+//extern VexEmNote
+//            amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
+
+//extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
+
+//extern VexEmNote
+//            amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
+
+
+
+/*---------------------------------------------------------*/
+/*--- Condition code stuff                              ---*/
+/*---------------------------------------------------------*/
+
+/* rflags masks */
+#define AMD64G_CC_SHIFT_O   11
+#define AMD64G_CC_SHIFT_S   7
+#define AMD64G_CC_SHIFT_Z   6
+#define AMD64G_CC_SHIFT_A   4
+#define AMD64G_CC_SHIFT_C   0
+#define AMD64G_CC_SHIFT_P   2
+
+#define AMD64G_CC_MASK_O    (1ULL << AMD64G_CC_SHIFT_O)
+#define AMD64G_CC_MASK_S    (1ULL << AMD64G_CC_SHIFT_S)
+#define AMD64G_CC_MASK_Z    (1ULL << AMD64G_CC_SHIFT_Z)
+#define AMD64G_CC_MASK_A    (1ULL << AMD64G_CC_SHIFT_A)
+#define AMD64G_CC_MASK_C    (1ULL << AMD64G_CC_SHIFT_C)
+#define AMD64G_CC_MASK_P    (1ULL << AMD64G_CC_SHIFT_P)
+
+/* FPU flag masks */
+#define AMD64G_FC_SHIFT_C3   14
+#define AMD64G_FC_SHIFT_C2   10
+#define AMD64G_FC_SHIFT_C1   9
+#define AMD64G_FC_SHIFT_C0   8
+
+#define AMD64G_FC_MASK_C3    (1ULL << AMD64G_FC_SHIFT_C3)
+#define AMD64G_FC_MASK_C2    (1ULL << AMD64G_FC_SHIFT_C2)
+#define AMD64G_FC_MASK_C1    (1ULL << AMD64G_FC_SHIFT_C1)
+#define AMD64G_FC_MASK_C0    (1ULL << AMD64G_FC_SHIFT_C0)
+
+
+/* %RFLAGS thunk descriptors.  A four-word thunk is used to record
+   details of the most recent flag-setting operation, so the flags can
+   be computed later if needed.  It is possible to do this a little
+   more efficiently using a 3-word thunk, but that makes it impossible
+   to describe the flag data dependencies sufficiently accurately for
+   Memcheck.  Hence 4 words are used, with minimal loss of efficiency.
+
+   The four words are:
+
+      CC_OP, which describes the operation.
+
+      CC_DEP1 and CC_DEP2.  These are arguments to the operation.
+         We want Memcheck to believe that the resulting flags are
+         data-dependent on both CC_DEP1 and CC_DEP2, hence the 
+         name DEP.
+
+      CC_NDEP.  This is a 3rd argument to the operation which is
+         sometimes needed.  We arrange things so that Memcheck does
+         not believe the resulting flags are data-dependent on CC_NDEP
+         ("not dependent").
+
+   To make Memcheck believe that (the definedness of) the encoded
+   flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
+   requires two things:
+
+   (1) In the guest state layout info (amd64guest_layout), CC_OP and
+       CC_NDEP are marked as always defined.
+
+   (2) When passing the thunk components to an evaluation function
+       (calculate_condition, calculate_eflags, calculate_eflags_c) the
+       IRCallee's mcx_mask must be set so as to exclude from
+       consideration all passed args except CC_DEP1 and CC_DEP2.
+
+   Strictly speaking only (2) is necessary for correctness.  However,
+   (1) helps efficiency in that since (2) means we never ask about the
+   definedness of CC_OP or CC_NDEP, we may as well not even bother to
+   track their definedness.
+
+   When building the thunk, it is always necessary to write words into
+   CC_DEP1 and CC_DEP2, even if those args are not used given the
+   CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
+   This is important because otherwise Memcheck could give false
+   positives as it does not understand the relationship between the
+   CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the 
+   definedness of the stored flags always depends on both CC_DEP1 and
+   CC_DEP2.
+
+   However, it is only necessary to set CC_NDEP when the CC_OP value
+   requires it, because Memcheck ignores CC_NDEP, and the evaluation
+   functions do understand the CC_OP fields and will only examine
+   CC_NDEP for suitable values of CC_OP.
+
+   A summary of the field usages is:
+
+   Operation          DEP1               DEP2               NDEP
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+   add/sub/mul        first arg          second arg         unused
+
+   adc/sbb            first arg          (second arg)
+                                         XOR old_carry      old_carry
+
+   and/or/xor         result             zero               unused
+
+   inc/dec            result             zero               old_carry
+
+   shl/shr/sar        result             subshifted-        unused
+                                         result
+
+   rol/ror            result             zero               old_flags
+
+   copy               old_flags          zero               unused.
+
+
+   Therefore Memcheck will believe the following:
+
+   * add/sub/mul -- definedness of result flags depends on definedness
+     of both args.
+
+   * adc/sbb -- definedness of result flags depends on definedness of
+     both args and definedness of the old C flag.  Because only two
+     DEP fields are available, the old C flag is XOR'd into the second
+     arg so that Memcheck sees the data dependency on it.  That means
+     the NDEP field must contain a second copy of the old C flag
+     so that the evaluation functions can correctly recover the second
+     arg.
+
+   * and/or/xor are straightforward -- definedness of result flags
+     depends on definedness of result value.
+
+   * inc/dec -- definedness of result flags depends only on
+     definedness of result.  This isn't really true -- it also depends
+     on the old C flag.  However, we don't want Memcheck to see that,
+     and so the old C flag must be passed in NDEP and not in DEP2.
+     It's inconceivable that a compiler would generate code that puts
+     the C flag in an undefined state, then does an inc/dec, which
+     leaves C unchanged, and then makes a conditional jump/move based
+     on C.  So our fiction seems a good approximation.
+
+   * shl/shr/sar -- straightforward, again, definedness of result
+     flags depends on definedness of result value.  The subshifted
+     value (value shifted one less) is also needed, but its
+     definedness is the same as the definedness of the shifted value.
+
+   * rol/ror -- these only set O and C, and leave A Z C P alone.
+     However it seems prudent (as per inc/dec) to say the definedness
+     of all resulting flags depends on the definedness of the result,
+     hence the old flags must go in as NDEP and not DEP2.
+
+   * rcl/rcr are too difficult to do in-line, and so are done by a
+     helper function.  They are not part of this scheme.  The helper
+     function takes the value to be rotated, the rotate amount and the
+     old flags, and returns the new flags and the rotated value.
+     Since the helper's mcx_mask does not have any set bits, Memcheck
+     will lazily propagate undefinedness from any of the 3 args into 
+     both results (flags and actual value).
+*/
+enum {
+    AMD64G_CC_OP_COPY=0,  /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
+                          /* just copy DEP1 to output */
+
+    AMD64G_CC_OP_ADDB,    /* 1 */
+    AMD64G_CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    AMD64G_CC_OP_ADDL,    /* 3 */
+    AMD64G_CC_OP_ADDQ,    /* 4 */
+
+    AMD64G_CC_OP_SUBB,    /* 5 */
+    AMD64G_CC_OP_SUBW,    /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    AMD64G_CC_OP_SUBL,    /* 7 */
+    AMD64G_CC_OP_SUBQ,    /* 8 */
+
+    AMD64G_CC_OP_ADCB,    /* 9 */
+    AMD64G_CC_OP_ADCW,    /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
+    AMD64G_CC_OP_ADCL,    /* 11 */
+    AMD64G_CC_OP_ADCQ,    /* 12 */
+
+    AMD64G_CC_OP_SBBB,    /* 13 */
+    AMD64G_CC_OP_SBBW,    /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
+    AMD64G_CC_OP_SBBL,    /* 15 */
+    AMD64G_CC_OP_SBBQ,    /* 16 */
+
+    AMD64G_CC_OP_LOGICB,  /* 17 */
+    AMD64G_CC_OP_LOGICW,  /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
+    AMD64G_CC_OP_LOGICL,  /* 19 */
+    AMD64G_CC_OP_LOGICQ,  /* 20 */
+
+    AMD64G_CC_OP_INCB,    /* 21 */
+    AMD64G_CC_OP_INCW,    /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
+    AMD64G_CC_OP_INCL,    /* 23 */
+    AMD64G_CC_OP_INCQ,    /* 24 */
+
+    AMD64G_CC_OP_DECB,    /* 25 */
+    AMD64G_CC_OP_DECW,    /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
+    AMD64G_CC_OP_DECL,    /* 27 */
+    AMD64G_CC_OP_DECQ,    /* 28 */
+
+    AMD64G_CC_OP_SHLB,    /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
+    AMD64G_CC_OP_SHLW,    /* 30 where res' is like res but shifted one bit less */
+    AMD64G_CC_OP_SHLL,    /* 31 */
+    AMD64G_CC_OP_SHLQ,    /* 32 */
+
+    AMD64G_CC_OP_SHRB,    /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
+    AMD64G_CC_OP_SHRW,    /* 34 where res' is like res but shifted one bit less */
+    AMD64G_CC_OP_SHRL,    /* 35 */
+    AMD64G_CC_OP_SHRQ,    /* 36 */
+
+    AMD64G_CC_OP_ROLB,    /* 37 */
+    AMD64G_CC_OP_ROLW,    /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
+    AMD64G_CC_OP_ROLL,    /* 39 */
+    AMD64G_CC_OP_ROLQ,    /* 40 */
+
+    AMD64G_CC_OP_RORB,    /* 41 */
+    AMD64G_CC_OP_RORW,    /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
+    AMD64G_CC_OP_RORL,    /* 43 */
+    AMD64G_CC_OP_RORQ,    /* 44 */
+
+    AMD64G_CC_OP_UMULB,   /* 45 */
+    AMD64G_CC_OP_UMULW,   /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    AMD64G_CC_OP_UMULL,   /* 47 */
+    AMD64G_CC_OP_UMULQ,   /* 48 */
+
+    AMD64G_CC_OP_SMULB,   /* 49 */
+    AMD64G_CC_OP_SMULW,   /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    AMD64G_CC_OP_SMULL,   /* 51 */
+    AMD64G_CC_OP_SMULQ,   /* 52 */
+
+    AMD64G_CC_OP_ANDN32,  /* 53 */
+    AMD64G_CC_OP_ANDN64,  /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
+
+    AMD64G_CC_OP_BLSI32,  /* 55 */
+    AMD64G_CC_OP_BLSI64,  /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
+
+    AMD64G_CC_OP_BLSMSK32,/* 57 */
+    AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
+
+    AMD64G_CC_OP_BLSR32,  /* 59 */
+    AMD64G_CC_OP_BLSR64,  /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
+
+    AMD64G_CC_OP_NUMBER
+};
+
+typedef
+   enum {
+      AMD64CondO      = 0,  /* overflow           */
+      AMD64CondNO     = 1,  /* no overflow        */
+
+      AMD64CondB      = 2,  /* below              */
+      AMD64CondNB     = 3,  /* not below          */
+
+      AMD64CondZ      = 4,  /* zero               */
+      AMD64CondNZ     = 5,  /* not zero           */
+
+      AMD64CondBE     = 6,  /* below or equal     */
+      AMD64CondNBE    = 7,  /* not below or equal */
+
+      AMD64CondS      = 8,  /* negative           */
+      AMD64CondNS     = 9,  /* not negative       */
+
+      AMD64CondP      = 10, /* parity even        */
+      AMD64CondNP     = 11, /* not parity even    */
+
+      AMD64CondL      = 12, /* less               */
+      AMD64CondNL     = 13, /* not less           */
+
+      AMD64CondLE     = 14, /* less or equal      */
+      AMD64CondNLE    = 15, /* not less or equal  */
+
+      AMD64CondAlways = 16  /* HACK */
+   }
+   AMD64Condcode;
+
+#endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                  guest_amd64_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_amd64_helpers.c b/VEX/priv/guest_amd64_helpers.c
new file mode 100644
index 0000000..2887b08
--- /dev/null
+++ b/VEX/priv/guest_amd64_helpers.c
@@ -0,0 +1,4086 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             guest_amd64_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_amd64.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_amd64_defs.h"
+#include "guest_generic_x87.h"
+
+
+/* This file contains helper functions for amd64 guest code.
+   Calls to these functions are generated by the back end.
+   These calls are of course in the host machine code and 
+   this file will be compiled to host machine code, so that
+   all makes sense.  
+
+   Only change the signatures of these helper functions very
+   carefully.  If you change the signature here, you'll have to change
+   the parameters passed to it in the IR calls constructed by
+   guest-amd64/toIR.c.
+
+   The convention used is that all functions called from generated
+   code are named amd64g_<something>, and any function whose name lacks
+   that prefix is not called from generated code.  Note that some
+   LibVEX_* functions can however be called by VEX's client, but that
+   is not the same as calling them from VEX-generated code.
+*/
+
+
+/* Set to 1 to get detailed profiling info about use of the flag
+   machinery. */
+#define PROFILE_RFLAGS 0
+
+
+/*---------------------------------------------------------------*/
+/*--- %rflags run-time helpers.                               ---*/
+/*---------------------------------------------------------------*/
+
+/* Do 64x64 -> 128 signed/unsigned multiplies, for computing flags
+   after imulq/mulq. */
+
+static void mullS64 ( Long u, Long v, Long* rHi, Long* rLo )
+{
+   ULong u0, v0, w0;
+    Long u1, v1, w1, w2, t;
+   u0   = u & 0xFFFFFFFFULL; 
+   u1   = u >> 32;
+   v0   = v & 0xFFFFFFFFULL;
+   v1   = v >> 32;
+   w0   = u0 * v0;
+   t    = u1 * v0 + (w0 >> 32);
+   w1   = t & 0xFFFFFFFFULL;
+   w2   = t >> 32;
+   w1   = u0 * v1 + w1;
+   *rHi = u1 * v1 + w2 + (w1 >> 32);
+   *rLo = u * v;
+}
+
+static void mullU64 ( ULong u, ULong v, ULong* rHi, ULong* rLo )
+{
+   ULong u0, v0, w0;
+   ULong u1, v1, w1,w2,t;
+   u0   = u & 0xFFFFFFFFULL;
+   u1   = u >> 32;
+   v0   = v & 0xFFFFFFFFULL;
+   v1   = v >> 32;
+   w0   = u0 * v0;
+   t    = u1 * v0 + (w0 >> 32);
+   w1   = t & 0xFFFFFFFFULL;
+   w2   = t >> 32;
+   w1   = u0 * v1 + w1;
+   *rHi = u1 * v1 + w2 + (w1 >> 32);
+   *rLo = u * v;
+}
+
+
+static const UChar parity_table[256] = {
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0,
+    0, AMD64G_CC_MASK_P, AMD64G_CC_MASK_P, 0, AMD64G_CC_MASK_P, 0, 0, AMD64G_CC_MASK_P,
+};
+
+/* generalised left-shifter */
+static inline Long lshift ( Long x, Int n )
+{
+   if (n >= 0)
+      return (ULong)x << n;
+   else
+      return x >> (-n);
+}
+
+/* identity on ULong */
+static inline ULong idULong ( ULong x )
+{
+   return x;
+}
+
+
+#define PREAMBLE(__data_bits)					\
+   /* const */ ULong DATA_MASK 					\
+      = __data_bits==8                                          \
+           ? 0xFFULL 					        \
+           : (__data_bits==16                                   \
+                ? 0xFFFFULL 		                        \
+                : (__data_bits==32                              \
+                     ? 0xFFFFFFFFULL                            \
+                     : 0xFFFFFFFFFFFFFFFFULL));                 \
+   /* const */ ULong SIGN_MASK = 1ULL << (__data_bits - 1);     \
+   /* const */ ULong CC_DEP1 = cc_dep1_formal;			\
+   /* const */ ULong CC_DEP2 = cc_dep2_formal;			\
+   /* const */ ULong CC_NDEP = cc_ndep_formal;			\
+   /* Four bogus assignments, which hopefully gcc can     */	\
+   /* optimise away, and which stop it complaining about  */	\
+   /* unused variables.                                   */	\
+   SIGN_MASK = SIGN_MASK;					\
+   DATA_MASK = DATA_MASK;					\
+   CC_DEP2 = CC_DEP2;						\
+   CC_NDEP = CC_NDEP;
+
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_ADD(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     ULong argL, argR, res;					\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2;						\
+     res  = argL + argR;					\
+     cf = (DATA_UTYPE)res < (DATA_UTYPE)argL;			\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR ^ -1) & (argL ^ res), 		\
+                 12 - DATA_BITS) & AMD64G_CC_MASK_O;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SUB(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     ULong argL, argR, res;					\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2;						\
+     res  = argL - argR;					\
+     cf = (DATA_UTYPE)argL < (DATA_UTYPE)argR;			\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR) & (argL ^ res),	 		\
+                 12 - DATA_BITS) & AMD64G_CC_MASK_O; 		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_ADC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     ULong argL, argR, oldC, res;		 		\
+     oldC = CC_NDEP & AMD64G_CC_MASK_C;				\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2 ^ oldC;	       				\
+     res  = (argL + argR) + oldC;				\
+     if (oldC)							\
+        cf = (DATA_UTYPE)res <= (DATA_UTYPE)argL;		\
+     else							\
+        cf = (DATA_UTYPE)res < (DATA_UTYPE)argL;		\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR ^ -1) & (argL ^ res), 		\
+                  12 - DATA_BITS) & AMD64G_CC_MASK_O;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SBB(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     ULong argL, argR, oldC, res;	       			\
+     oldC = CC_NDEP & AMD64G_CC_MASK_C;				\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2 ^ oldC;	       				\
+     res  = (argL - argR) - oldC;				\
+     if (oldC)							\
+        cf = (DATA_UTYPE)argL <= (DATA_UTYPE)argR;		\
+     else							\
+        cf = (DATA_UTYPE)argL < (DATA_UTYPE)argR;		\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR) & (argL ^ res), 			\
+                 12 - DATA_BITS) & AMD64G_CC_MASK_O;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_LOGIC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     cf = 0;							\
+     pf = parity_table[(UChar)CC_DEP1];				\
+     af = 0;							\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     of = 0;							\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_INC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     ULong argL, argR, res;					\
+     res  = CC_DEP1;						\
+     argL = res - 1;						\
+     argR = 1;							\
+     cf = CC_NDEP & AMD64G_CC_MASK_C;				\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = ((res & DATA_MASK) == SIGN_MASK) << 11;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_DEC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     ULong argL, argR, res;					\
+     res  = CC_DEP1;						\
+     argL = res + 1;						\
+     argR = 1;							\
+     cf = CC_NDEP & AMD64G_CC_MASK_C;				\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = ((res & DATA_MASK) 					\
+          == ((ULong)SIGN_MASK - 1)) << 11;			\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SHL(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     cf = (CC_DEP2 >> (DATA_BITS - 1)) & AMD64G_CC_MASK_C;	\
+     pf = parity_table[(UChar)CC_DEP1];				\
+     af = 0; /* undefined */					\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     /* of is defined if shift count == 1 */			\
+     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS) 		\
+          & AMD64G_CC_MASK_O;					\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SHR(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);  					\
+   { ULong cf, pf, af, zf, sf, of;				\
+     cf = CC_DEP2 & 1;						\
+     pf = parity_table[(UChar)CC_DEP1];				\
+     af = 0; /* undefined */					\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     /* of is defined if shift count == 1 */			\
+     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS)		\
+          & AMD64G_CC_MASK_O;					\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+/* ROL: cf' = lsb(result).  of' = msb(result) ^ lsb(result). */
+/* DEP1 = result, NDEP = old flags */
+#define ACTIONS_ROL(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong fl 							\
+        = (CC_NDEP & ~(AMD64G_CC_MASK_O | AMD64G_CC_MASK_C))	\
+          | (AMD64G_CC_MASK_C & CC_DEP1)			\
+          | (AMD64G_CC_MASK_O & (lshift(CC_DEP1,  		\
+                                      11-(DATA_BITS-1)) 	\
+                     ^ lshift(CC_DEP1, 11)));			\
+     return fl;							\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+/* ROR: cf' = msb(result).  of' = msb(result) ^ msb-1(result). */
+/* DEP1 = result, NDEP = old flags */
+#define ACTIONS_ROR(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong fl 							\
+        = (CC_NDEP & ~(AMD64G_CC_MASK_O | AMD64G_CC_MASK_C))	\
+          | (AMD64G_CC_MASK_C & (CC_DEP1 >> (DATA_BITS-1)))	\
+          | (AMD64G_CC_MASK_O & (lshift(CC_DEP1, 		\
+                                      11-(DATA_BITS-1)) 	\
+                     ^ lshift(CC_DEP1, 11-(DATA_BITS-1)+1)));	\
+     return fl;							\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_UMUL(DATA_BITS, DATA_UTYPE,  NARROWtoU,         \
+                                DATA_U2TYPE, NARROWto2U)        \
+{                                                               \
+   PREAMBLE(DATA_BITS);                                         \
+   { ULong cf, pf, af, zf, sf, of;                              \
+     DATA_UTYPE  hi;                                            \
+     DATA_UTYPE  lo                                             \
+        = NARROWtoU( ((DATA_UTYPE)CC_DEP1)                      \
+                     * ((DATA_UTYPE)CC_DEP2) );                 \
+     DATA_U2TYPE rr                                             \
+        = NARROWto2U(                                           \
+             ((DATA_U2TYPE)((DATA_UTYPE)CC_DEP1))               \
+             * ((DATA_U2TYPE)((DATA_UTYPE)CC_DEP2)) );          \
+     hi = NARROWtoU(rr >>/*u*/ DATA_BITS);                      \
+     cf = (hi != 0);                                            \
+     pf = parity_table[(UChar)lo];                              \
+     af = 0; /* undefined */                                    \
+     zf = (lo == 0) << 6;                                       \
+     sf = lshift(lo, 8 - DATA_BITS) & 0x80;                     \
+     of = cf << 11;                                             \
+     return cf | pf | af | zf | sf | of;                        \
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SMUL(DATA_BITS, DATA_STYPE,  NARROWtoS,         \
+                                DATA_S2TYPE, NARROWto2S)        \
+{                                                               \
+   PREAMBLE(DATA_BITS);                                         \
+   { ULong cf, pf, af, zf, sf, of;                              \
+     DATA_STYPE  hi;                                            \
+     DATA_STYPE  lo                                             \
+        = NARROWtoS( ((DATA_S2TYPE)(DATA_STYPE)CC_DEP1)         \
+                     * ((DATA_S2TYPE)(DATA_STYPE)CC_DEP2) );    \
+     DATA_S2TYPE rr                                             \
+        = NARROWto2S(                                           \
+             ((DATA_S2TYPE)((DATA_STYPE)CC_DEP1))               \
+             * ((DATA_S2TYPE)((DATA_STYPE)CC_DEP2)) );          \
+     hi = NARROWtoS(rr >>/*s*/ DATA_BITS);                      \
+     cf = (hi != (lo >>/*s*/ (DATA_BITS-1)));                   \
+     pf = parity_table[(UChar)lo];                              \
+     af = 0; /* undefined */                                    \
+     zf = (lo == 0) << 6;                                       \
+     sf = lshift(lo, 8 - DATA_BITS) & 0x80;                     \
+     of = cf << 11;                                             \
+     return cf | pf | af | zf | sf | of;                        \
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_UMULQ                                           \
+{                                                               \
+   PREAMBLE(64);                                                \
+   { ULong cf, pf, af, zf, sf, of;                              \
+     ULong lo, hi;                                              \
+     mullU64( (ULong)CC_DEP1, (ULong)CC_DEP2, &hi, &lo );       \
+     cf = (hi != 0);                                            \
+     pf = parity_table[(UChar)lo];                              \
+     af = 0; /* undefined */                                    \
+     zf = (lo == 0) << 6;                                       \
+     sf = lshift(lo, 8 - 64) & 0x80;                            \
+     of = cf << 11;                                             \
+     return cf | pf | af | zf | sf | of;                        \
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SMULQ                                           \
+{                                                               \
+   PREAMBLE(64);                                                \
+   { ULong cf, pf, af, zf, sf, of;                              \
+     Long lo, hi;                                               \
+     mullS64( (Long)CC_DEP1, (Long)CC_DEP2, &hi, &lo );         \
+     cf = (hi != (lo >>/*s*/ (64-1)));                          \
+     pf = parity_table[(UChar)lo];                              \
+     af = 0; /* undefined */                                    \
+     zf = (lo == 0) << 6;                                       \
+     sf = lshift(lo, 8 - 64) & 0x80;                            \
+     of = cf << 11;                                             \
+     return cf | pf | af | zf | sf | of;                        \
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_ANDN(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     cf = 0;							\
+     pf = 0;							\
+     af = 0;							\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     of = 0;							\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_BLSI(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     cf = ((DATA_UTYPE)CC_DEP2 != 0);				\
+     pf = 0;							\
+     af = 0;							\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     of = 0;							\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_BLSMSK(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { Long cf, pf, af, zf, sf, of;				\
+     cf = ((DATA_UTYPE)CC_DEP2 == 0);				\
+     pf = 0;							\
+     af = 0;							\
+     zf = 0;							\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     of = 0;							\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_BLSR(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { ULong cf, pf, af, zf, sf, of;				\
+     cf = ((DATA_UTYPE)CC_DEP2 == 0);				\
+     pf = 0;							\
+     af = 0;							\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     of = 0;							\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+
+#if PROFILE_RFLAGS
+
+static Bool initted     = False;
+
+/* C flag, fast route */
+static UInt tabc_fast[AMD64G_CC_OP_NUMBER];
+/* C flag, slow route */
+static UInt tabc_slow[AMD64G_CC_OP_NUMBER];
+/* table for calculate_cond */
+static UInt tab_cond[AMD64G_CC_OP_NUMBER][16];
+/* total entry counts for calc_all, calc_c, calc_cond. */
+static UInt n_calc_all  = 0;
+static UInt n_calc_c    = 0;
+static UInt n_calc_cond = 0;
+
+#define SHOW_COUNTS_NOW (0 == (0x3FFFFF & (n_calc_all+n_calc_c+n_calc_cond)))
+
+
+static void showCounts ( void )
+{
+   Int op, co;
+   HChar ch;
+   vex_printf("\nTotal calls: calc_all=%u   calc_cond=%u   calc_c=%u\n",
+              n_calc_all, n_calc_cond, n_calc_c);
+
+   vex_printf("      cSLOW  cFAST    O   NO    B   NB    Z   NZ   BE  NBE"
+              "    S   NS    P   NP    L   NL   LE  NLE\n");
+   vex_printf("     -----------------------------------------------------"
+              "----------------------------------------\n");
+   for (op = 0; op < AMD64G_CC_OP_NUMBER; op++) {
+
+      ch = ' ';
+      if (op > 0 && (op-1) % 4 == 0) 
+         ch = 'B';
+      if (op > 0 && (op-1) % 4 == 1) 
+         ch = 'W';
+      if (op > 0 && (op-1) % 4 == 2) 
+         ch = 'L';
+      if (op > 0 && (op-1) % 4 == 3) 
+         ch = 'Q';
+
+      vex_printf("%2d%c: ", op, ch);
+      vex_printf("%6u ", tabc_slow[op]);
+      vex_printf("%6u ", tabc_fast[op]);
+      for (co = 0; co < 16; co++) {
+         Int n = tab_cond[op][co];
+         if (n >= 1000) {
+            vex_printf(" %3dK", n / 1000);
+         } else 
+         if (n >= 0) {
+            vex_printf(" %3d ", n );
+         } else {
+            vex_printf("     ");
+         }
+      }
+      vex_printf("\n");
+   }
+   vex_printf("\n");
+}
+
+static void initCounts ( void )
+{
+   Int op, co;
+   initted = True;
+   for (op = 0; op < AMD64G_CC_OP_NUMBER; op++) {
+      tabc_fast[op] = tabc_slow[op] = 0;
+      for (co = 0; co < 16; co++)
+         tab_cond[op][co] = 0;
+   }
+}
+
+#endif /* PROFILE_RFLAGS */
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate all the 6 flags from the supplied thunk parameters.
+   Worker function, not directly called from generated code. */
+static
+ULong amd64g_calculate_rflags_all_WRK ( ULong cc_op, 
+                                        ULong cc_dep1_formal, 
+                                        ULong cc_dep2_formal,
+                                        ULong cc_ndep_formal )
+{
+   switch (cc_op) {
+      case AMD64G_CC_OP_COPY:
+         return cc_dep1_formal
+                & (AMD64G_CC_MASK_O | AMD64G_CC_MASK_S | AMD64G_CC_MASK_Z 
+                   | AMD64G_CC_MASK_A | AMD64G_CC_MASK_C | AMD64G_CC_MASK_P);
+
+      case AMD64G_CC_OP_ADDB:   ACTIONS_ADD( 8,  UChar  );
+      case AMD64G_CC_OP_ADDW:   ACTIONS_ADD( 16, UShort );
+      case AMD64G_CC_OP_ADDL:   ACTIONS_ADD( 32, UInt   );
+      case AMD64G_CC_OP_ADDQ:   ACTIONS_ADD( 64, ULong  );
+
+      case AMD64G_CC_OP_ADCB:   ACTIONS_ADC( 8,  UChar  );
+      case AMD64G_CC_OP_ADCW:   ACTIONS_ADC( 16, UShort );
+      case AMD64G_CC_OP_ADCL:   ACTIONS_ADC( 32, UInt   );
+      case AMD64G_CC_OP_ADCQ:   ACTIONS_ADC( 64, ULong  );
+
+      case AMD64G_CC_OP_SUBB:   ACTIONS_SUB(  8, UChar  );
+      case AMD64G_CC_OP_SUBW:   ACTIONS_SUB( 16, UShort );
+      case AMD64G_CC_OP_SUBL:   ACTIONS_SUB( 32, UInt   );
+      case AMD64G_CC_OP_SUBQ:   ACTIONS_SUB( 64, ULong  );
+
+      case AMD64G_CC_OP_SBBB:   ACTIONS_SBB(  8, UChar  );
+      case AMD64G_CC_OP_SBBW:   ACTIONS_SBB( 16, UShort );
+      case AMD64G_CC_OP_SBBL:   ACTIONS_SBB( 32, UInt   );
+      case AMD64G_CC_OP_SBBQ:   ACTIONS_SBB( 64, ULong  );
+
+      case AMD64G_CC_OP_LOGICB: ACTIONS_LOGIC(  8, UChar  );
+      case AMD64G_CC_OP_LOGICW: ACTIONS_LOGIC( 16, UShort );
+      case AMD64G_CC_OP_LOGICL: ACTIONS_LOGIC( 32, UInt   );
+      case AMD64G_CC_OP_LOGICQ: ACTIONS_LOGIC( 64, ULong  );
+
+      case AMD64G_CC_OP_INCB:   ACTIONS_INC(  8, UChar  );
+      case AMD64G_CC_OP_INCW:   ACTIONS_INC( 16, UShort );
+      case AMD64G_CC_OP_INCL:   ACTIONS_INC( 32, UInt   );
+      case AMD64G_CC_OP_INCQ:   ACTIONS_INC( 64, ULong  );
+
+      case AMD64G_CC_OP_DECB:   ACTIONS_DEC(  8, UChar  );
+      case AMD64G_CC_OP_DECW:   ACTIONS_DEC( 16, UShort );
+      case AMD64G_CC_OP_DECL:   ACTIONS_DEC( 32, UInt   );
+      case AMD64G_CC_OP_DECQ:   ACTIONS_DEC( 64, ULong  );
+
+      case AMD64G_CC_OP_SHLB:   ACTIONS_SHL(  8, UChar  );
+      case AMD64G_CC_OP_SHLW:   ACTIONS_SHL( 16, UShort );
+      case AMD64G_CC_OP_SHLL:   ACTIONS_SHL( 32, UInt   );
+      case AMD64G_CC_OP_SHLQ:   ACTIONS_SHL( 64, ULong  );
+
+      case AMD64G_CC_OP_SHRB:   ACTIONS_SHR(  8, UChar  );
+      case AMD64G_CC_OP_SHRW:   ACTIONS_SHR( 16, UShort );
+      case AMD64G_CC_OP_SHRL:   ACTIONS_SHR( 32, UInt   );
+      case AMD64G_CC_OP_SHRQ:   ACTIONS_SHR( 64, ULong  );
+
+      case AMD64G_CC_OP_ROLB:   ACTIONS_ROL(  8, UChar  );
+      case AMD64G_CC_OP_ROLW:   ACTIONS_ROL( 16, UShort );
+      case AMD64G_CC_OP_ROLL:   ACTIONS_ROL( 32, UInt   );
+      case AMD64G_CC_OP_ROLQ:   ACTIONS_ROL( 64, ULong  );
+
+      case AMD64G_CC_OP_RORB:   ACTIONS_ROR(  8, UChar  );
+      case AMD64G_CC_OP_RORW:   ACTIONS_ROR( 16, UShort );
+      case AMD64G_CC_OP_RORL:   ACTIONS_ROR( 32, UInt   );
+      case AMD64G_CC_OP_RORQ:   ACTIONS_ROR( 64, ULong  );
+
+      case AMD64G_CC_OP_UMULB:  ACTIONS_UMUL(  8, UChar,  toUChar,
+                                                  UShort, toUShort );
+      case AMD64G_CC_OP_UMULW:  ACTIONS_UMUL( 16, UShort, toUShort,
+                                                  UInt,   toUInt );
+      case AMD64G_CC_OP_UMULL:  ACTIONS_UMUL( 32, UInt,   toUInt,
+                                                  ULong,  idULong );
+
+      case AMD64G_CC_OP_UMULQ:  ACTIONS_UMULQ;
+
+      case AMD64G_CC_OP_SMULB:  ACTIONS_SMUL(  8, Char,   toUChar,
+                                                  Short,  toUShort );
+      case AMD64G_CC_OP_SMULW:  ACTIONS_SMUL( 16, Short,  toUShort, 
+                                                  Int,    toUInt   );
+      case AMD64G_CC_OP_SMULL:  ACTIONS_SMUL( 32, Int,    toUInt,
+                                                  Long,   idULong );
+
+      case AMD64G_CC_OP_SMULQ:  ACTIONS_SMULQ;
+
+      case AMD64G_CC_OP_ANDN32: ACTIONS_ANDN( 32, UInt   );
+      case AMD64G_CC_OP_ANDN64: ACTIONS_ANDN( 64, ULong  );
+
+      case AMD64G_CC_OP_BLSI32: ACTIONS_BLSI( 32, UInt   );
+      case AMD64G_CC_OP_BLSI64: ACTIONS_BLSI( 64, ULong  );
+
+      case AMD64G_CC_OP_BLSMSK32: ACTIONS_BLSMSK( 32, UInt   );
+      case AMD64G_CC_OP_BLSMSK64: ACTIONS_BLSMSK( 64, ULong  );
+
+      case AMD64G_CC_OP_BLSR32: ACTIONS_BLSR( 32, UInt   );
+      case AMD64G_CC_OP_BLSR64: ACTIONS_BLSR( 64, ULong  );
+
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("amd64g_calculate_rflags_all_WRK(AMD64)"
+                    "( %llu, 0x%llx, 0x%llx, 0x%llx )\n",
+                    cc_op, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal );
+         vpanic("amd64g_calculate_rflags_all_WRK(AMD64)");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate all the 6 flags from the supplied thunk parameters. */
+ULong amd64g_calculate_rflags_all ( ULong cc_op, 
+                                    ULong cc_dep1, 
+                                    ULong cc_dep2,
+                                    ULong cc_ndep )
+{
+#  if PROFILE_RFLAGS
+   if (!initted) initCounts();
+   n_calc_all++;
+   if (SHOW_COUNTS_NOW) showCounts();
+#  endif
+   return
+      amd64g_calculate_rflags_all_WRK ( cc_op, cc_dep1, cc_dep2, cc_ndep );
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate just the carry flag from the supplied thunk parameters. */
+ULong amd64g_calculate_rflags_c ( ULong cc_op, 
+                                  ULong cc_dep1, 
+                                  ULong cc_dep2,
+                                  ULong cc_ndep )
+{
+#  if PROFILE_RFLAGS
+   if (!initted) initCounts();
+   n_calc_c++;
+   tabc_fast[cc_op]++;
+   if (SHOW_COUNTS_NOW) showCounts();
+#  endif
+
+   /* Fast-case some common ones. */
+   switch (cc_op) {
+      case AMD64G_CC_OP_COPY:
+         return (cc_dep1 >> AMD64G_CC_SHIFT_C) & 1;
+      case AMD64G_CC_OP_LOGICQ: 
+      case AMD64G_CC_OP_LOGICL: 
+      case AMD64G_CC_OP_LOGICW: 
+      case AMD64G_CC_OP_LOGICB:
+         return 0;
+	 //      case AMD64G_CC_OP_SUBL:
+	 //         return ((UInt)cc_dep1) < ((UInt)cc_dep2)
+	 //                   ? AMD64G_CC_MASK_C : 0;
+	 //      case AMD64G_CC_OP_SUBW:
+	 //         return ((UInt)(cc_dep1 & 0xFFFF)) < ((UInt)(cc_dep2 & 0xFFFF))
+	 //                   ? AMD64G_CC_MASK_C : 0;
+	 //      case AMD64G_CC_OP_SUBB:
+	 //         return ((UInt)(cc_dep1 & 0xFF)) < ((UInt)(cc_dep2 & 0xFF))
+	 //                   ? AMD64G_CC_MASK_C : 0;
+	 //      case AMD64G_CC_OP_INCL:
+	 //      case AMD64G_CC_OP_DECL:
+	 //         return cc_ndep & AMD64G_CC_MASK_C;
+      default: 
+         break;
+   }
+
+#  if PROFILE_RFLAGS
+   tabc_fast[cc_op]--;
+   tabc_slow[cc_op]++;
+#  endif
+
+   return amd64g_calculate_rflags_all_WRK(cc_op,cc_dep1,cc_dep2,cc_ndep) 
+          & AMD64G_CC_MASK_C;
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* returns 1 or 0 */
+ULong amd64g_calculate_condition ( ULong/*AMD64Condcode*/ cond, 
+                                   ULong cc_op, 
+                                   ULong cc_dep1, 
+                                   ULong cc_dep2,
+                                   ULong cc_ndep )
+{
+   ULong rflags = amd64g_calculate_rflags_all_WRK(cc_op, cc_dep1, 
+                                                  cc_dep2, cc_ndep);
+   ULong of,sf,zf,cf,pf;
+   ULong inv = cond & 1;
+
+#  if PROFILE_RFLAGS
+   if (!initted) initCounts();
+   tab_cond[cc_op][cond]++;
+   n_calc_cond++;
+   if (SHOW_COUNTS_NOW) showCounts();
+#  endif
+
+   switch (cond) {
+      case AMD64CondNO:
+      case AMD64CondO: /* OF == 1 */
+         of = rflags >> AMD64G_CC_SHIFT_O;
+         return 1 & (inv ^ of);
+
+      case AMD64CondNZ:
+      case AMD64CondZ: /* ZF == 1 */
+         zf = rflags >> AMD64G_CC_SHIFT_Z;
+         return 1 & (inv ^ zf);
+
+      case AMD64CondNB:
+      case AMD64CondB: /* CF == 1 */
+         cf = rflags >> AMD64G_CC_SHIFT_C;
+         return 1 & (inv ^ cf);
+         break;
+
+      case AMD64CondNBE:
+      case AMD64CondBE: /* (CF or ZF) == 1 */
+         cf = rflags >> AMD64G_CC_SHIFT_C;
+         zf = rflags >> AMD64G_CC_SHIFT_Z;
+         return 1 & (inv ^ (cf | zf));
+         break;
+
+      case AMD64CondNS:
+      case AMD64CondS: /* SF == 1 */
+         sf = rflags >> AMD64G_CC_SHIFT_S;
+         return 1 & (inv ^ sf);
+
+      case AMD64CondNP:
+      case AMD64CondP: /* PF == 1 */
+         pf = rflags >> AMD64G_CC_SHIFT_P;
+         return 1 & (inv ^ pf);
+
+      case AMD64CondNL:
+      case AMD64CondL: /* (SF xor OF) == 1 */
+         sf = rflags >> AMD64G_CC_SHIFT_S;
+         of = rflags >> AMD64G_CC_SHIFT_O;
+         return 1 & (inv ^ (sf ^ of));
+         break;
+
+      case AMD64CondNLE:
+      case AMD64CondLE: /* ((SF xor OF) or ZF)  == 1 */
+         sf = rflags >> AMD64G_CC_SHIFT_S;
+         of = rflags >> AMD64G_CC_SHIFT_O;
+         zf = rflags >> AMD64G_CC_SHIFT_Z;
+         return 1 & (inv ^ ((sf ^ of) | zf));
+         break;
+
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("amd64g_calculate_condition"
+                    "( %llu, %llu, 0x%llx, 0x%llx, 0x%llx )\n",
+                    cond, cc_op, cc_dep1, cc_dep2, cc_ndep );
+         vpanic("amd64g_calculate_condition");
+   }
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+ULong LibVEX_GuestAMD64_get_rflags ( /*IN*/const VexGuestAMD64State* vex_state )
+{
+   ULong rflags = amd64g_calculate_rflags_all_WRK(
+                     vex_state->guest_CC_OP,
+                     vex_state->guest_CC_DEP1,
+                     vex_state->guest_CC_DEP2,
+                     vex_state->guest_CC_NDEP
+                  );
+   Long dflag = vex_state->guest_DFLAG;
+   vassert(dflag == 1 || dflag == -1);
+   if (dflag == -1)
+      rflags |= (1<<10);
+   if (vex_state->guest_IDFLAG == 1)
+      rflags |= (1<<21);
+   if (vex_state->guest_ACFLAG == 1)
+      rflags |= (1<<18);
+
+   return rflags;
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void
+LibVEX_GuestAMD64_put_rflag_c ( ULong new_carry_flag,
+                               /*MOD*/VexGuestAMD64State* vex_state )
+{
+   ULong oszacp = amd64g_calculate_rflags_all_WRK(
+                     vex_state->guest_CC_OP,
+                     vex_state->guest_CC_DEP1,
+                     vex_state->guest_CC_DEP2,
+                     vex_state->guest_CC_NDEP
+                  );
+   if (new_carry_flag & 1) {
+      oszacp |= AMD64G_CC_MASK_C;
+   } else {
+      oszacp &= ~AMD64G_CC_MASK_C;
+   }
+   vex_state->guest_CC_OP   = AMD64G_CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = oszacp;
+   vex_state->guest_CC_DEP2 = 0;
+   vex_state->guest_CC_NDEP = 0;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- %rflags translation-time function specialisers.         ---*/
+/*--- These help iropt specialise calls the above run-time    ---*/
+/*--- %rflags functions.                                      ---*/
+/*---------------------------------------------------------------*/
+
+/* Used by the optimiser to try specialisations.  Returns an
+   equivalent expression, or NULL if none. */
+
+static Bool isU64 ( IRExpr* e, ULong n )
+{
+   return toBool( e->tag == Iex_Const
+                  && e->Iex.Const.con->tag == Ico_U64
+                  && e->Iex.Const.con->Ico.U64 == n );
+}
+
+IRExpr* guest_amd64_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts )
+{
+#  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
+#  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
+#  define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
+#  define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
+#  define mkU8(_n)  IRExpr_Const(IRConst_U8(_n))
+
+   Int i, arity = 0;
+   for (i = 0; args[i]; i++)
+      arity++;
+#  if 0
+   vex_printf("spec request:\n");
+   vex_printf("   %s  ", function_name);
+   for (i = 0; i < arity; i++) {
+      vex_printf("  ");
+      ppIRExpr(args[i]);
+   }
+   vex_printf("\n");
+#  endif
+
+   /* --------- specialising "amd64g_calculate_condition" --------- */
+
+   if (vex_streq(function_name, "amd64g_calculate_condition")) {
+      /* specialise calls to above "calculate condition" function */
+      IRExpr *cond, *cc_op, *cc_dep1, *cc_dep2;
+      vassert(arity == 5);
+      cond    = args[0];
+      cc_op   = args[1];
+      cc_dep1 = args[2];
+      cc_dep2 = args[3];
+
+      /*---------------- ADDQ ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_ADDQ) && isU64(cond, AMD64CondZ)) {
+         /* long long add, then Z --> test (dst+src == 0) */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64, 
+                           binop(Iop_Add64, cc_dep1, cc_dep2),
+                           mkU64(0)));
+      }
+
+      /*---------------- ADDL ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_ADDL) && isU64(cond, AMD64CondO)) {
+         /* This is very commonly generated by Javascript JITs, for
+            the idiom "do a 32-bit add and jump to out-of-line code if
+            an overflow occurs". */
+         /* long add, then O (overflow)
+            --> ((dep1 ^ dep2 ^ -1) & (dep1 ^ (dep1 + dep2)))[31]
+            --> (((dep1 ^ dep2 ^ -1) & (dep1 ^ (dep1 +64 dep2))) >>u 31) & 1
+            --> (((not(dep1 ^ dep2)) & (dep1 ^ (dep1 +64 dep2))) >>u 31) & 1
+         */
+         vassert(isIRAtom(cc_dep1));
+         vassert(isIRAtom(cc_dep2));
+         return
+            binop(Iop_And64,
+                  binop(Iop_Shr64,
+                        binop(Iop_And64,
+                              unop(Iop_Not64,
+                                   binop(Iop_Xor64, cc_dep1, cc_dep2)),
+                              binop(Iop_Xor64,
+                                    cc_dep1,
+                                    binop(Iop_Add64, cc_dep1, cc_dep2))),
+                        mkU8(31)),
+                  mkU64(1));
+
+      }
+
+      /*---------------- SUBQ ----------------*/
+
+      /* 0, */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondO)) {
+         /* long long sub/cmp, then O (overflow)
+            --> ((dep1 ^ dep2) & (dep1 ^ (dep1 - dep2)))[63]
+            --> ((dep1 ^ dep2) & (dep1 ^ (dep1 - dep2))) >>u 63
+         */
+         vassert(isIRAtom(cc_dep1));
+         vassert(isIRAtom(cc_dep2));
+         return binop(Iop_Shr64,
+                      binop(Iop_And64,
+                            binop(Iop_Xor64, cc_dep1, cc_dep2),
+                            binop(Iop_Xor64,
+                                  cc_dep1,
+                                  binop(Iop_Sub64, cc_dep1, cc_dep2))),
+                      mkU8(64));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNO)) {
+         /* No action.  Never yet found a test case. */
+      }
+
+      /* 2, 3 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondB)) {
+         /* long long sub/cmp, then B (unsigned less than)
+            --> test dst <u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64U, cc_dep1, cc_dep2));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNB)) {
+         /* long long sub/cmp, then NB (unsigned greater than or equal)
+            --> test src <=u dst */
+         /* Note, args are opposite way round from the usual */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, cc_dep2, cc_dep1));
+      }
+
+      /* 4, 5 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondZ)) {
+         /* long long sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64,cc_dep1,cc_dep2));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNZ)) {
+         /* long long sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64,cc_dep1,cc_dep2));
+      }
+
+      /* 6, 7 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondBE)) {
+         /* long long sub/cmp, then BE (unsigned less than or equal)
+            --> test dst <=u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, cc_dep1, cc_dep2));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNBE)) {
+         /* long long sub/cmp, then NBE (unsigned greater than)
+            --> test !(dst <=u src) */
+         return binop(Iop_Xor64,
+                      unop(Iop_1Uto64,
+                           binop(Iop_CmpLE64U, cc_dep1, cc_dep2)),
+                      mkU64(1));
+      }
+
+      /* 8, 9 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondS)) {
+         /* long long sub/cmp, then S (negative)
+            --> (dst-src)[63]
+            --> (dst-src) >>u 63 */
+         return binop(Iop_Shr64,
+                      binop(Iop_Sub64, cc_dep1, cc_dep2),
+                      mkU8(63));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNS)) {
+         /* long long sub/cmp, then NS (not negative)
+            --> (dst-src)[63] ^ 1
+            --> ((dst-src) >>u 63) ^ 1 */
+         return binop(Iop_Xor64,
+                      binop(Iop_Shr64,
+                            binop(Iop_Sub64, cc_dep1, cc_dep2),
+                            mkU8(63)),
+                      mkU64(1));
+      }
+
+      /* 12, 13 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondL)) {
+         /* long long sub/cmp, then L (signed less than) 
+            --> test dst <s src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64S, cc_dep1, cc_dep2));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNL)) {
+         /* long long sub/cmp, then NL (signed greater than or equal) 
+            --> test dst >=s src
+            --> test src <=s dst */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64S, cc_dep2, cc_dep1));
+      }
+
+      /* 14, 15 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondLE)) {
+         /* long long sub/cmp, then LE (signed less than or equal)
+            --> test dst <=s src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64S, cc_dep1, cc_dep2));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ) && isU64(cond, AMD64CondNLE)) {
+         /* long sub/cmp, then NLE (signed greater than) 
+            --> test !(dst <=s src)
+            --> test (dst >s src)
+            --> test (src <s dst) */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64S, cc_dep2, cc_dep1));
+
+      }
+
+      /*---------------- SUBL ----------------*/
+
+      /* 0, */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondO)) {
+         /* This is very commonly generated by Javascript JITs, for
+            the idiom "do a 32-bit subtract and jump to out-of-line
+            code if an overflow occurs". */
+         /* long sub/cmp, then O (overflow)
+            --> ((dep1 ^ dep2) & (dep1 ^ (dep1 - dep2)))[31]
+            --> (((dep1 ^ dep2) & (dep1 ^ (dep1 -64 dep2))) >>u 31) & 1
+         */
+         vassert(isIRAtom(cc_dep1));
+         vassert(isIRAtom(cc_dep2));
+         return
+            binop(Iop_And64,
+                  binop(Iop_Shr64,
+                        binop(Iop_And64,
+                              binop(Iop_Xor64, cc_dep1, cc_dep2),
+                              binop(Iop_Xor64,
+                                    cc_dep1,
+                                    binop(Iop_Sub64, cc_dep1, cc_dep2))),
+                        mkU8(31)),
+                  mkU64(1));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNO)) {
+         /* No action.  Never yet found a test case. */
+      }
+
+      /* 2, 3 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondB)) {
+         /* long sub/cmp, then B (unsigned less than)
+            --> test dst <u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32U,
+                           unop(Iop_64to32, cc_dep1),
+                           unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNB)) {
+         /* long sub/cmp, then NB (unsigned greater than or equal)
+            --> test src <=u dst */
+         /* Note, args are opposite way round from the usual */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32U,
+                           unop(Iop_64to32, cc_dep2),
+                           unop(Iop_64to32, cc_dep1)));
+      }
+
+      /* 4, 5 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondZ)) {
+         /* long sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ32,
+                           unop(Iop_64to32, cc_dep1),
+                           unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNZ)) {
+         /* long sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE32,
+                           unop(Iop_64to32, cc_dep1),
+                           unop(Iop_64to32, cc_dep2)));
+      }
+
+      /* 6, 7 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondBE)) {
+         /* long sub/cmp, then BE (unsigned less than or equal)
+            --> test dst <=u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32U, 
+                           unop(Iop_64to32, cc_dep1),
+                           unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNBE)) {
+         /* long sub/cmp, then NBE (unsigned greater than)
+            --> test src <u dst */
+         /* Note, args are opposite way round from the usual */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32U, 
+                           unop(Iop_64to32, cc_dep2),
+                           unop(Iop_64to32, cc_dep1)));
+      }
+
+      /* 8, 9 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondS)) {
+         /* long sub/cmp, then S (negative)
+            --> (dst-src)[31]
+            --> ((dst -64 src) >>u 31) & 1
+            Pointless to narrow the args to 32 bit before the subtract. */
+         return binop(Iop_And64,
+                      binop(Iop_Shr64,
+                            binop(Iop_Sub64, cc_dep1, cc_dep2),
+                            mkU8(31)),
+                      mkU64(1));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNS)) {
+         /* long sub/cmp, then NS (not negative)
+            --> (dst-src)[31] ^ 1
+            --> (((dst -64 src) >>u 31) & 1) ^ 1
+            Pointless to narrow the args to 32 bit before the subtract. */
+         return binop(Iop_Xor64,
+                      binop(Iop_And64,
+                            binop(Iop_Shr64,
+                                  binop(Iop_Sub64, cc_dep1, cc_dep2),
+                                  mkU8(31)),
+                            mkU64(1)),
+                      mkU64(1));
+      }
+
+      /* 12, 13 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondL)) {
+         /* long sub/cmp, then L (signed less than) 
+            --> test dst <s src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32S,
+                           unop(Iop_64to32, cc_dep1),
+                           unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNL)) {
+         /* long sub/cmp, then NL (signed greater than or equal) 
+            --> test dst >=s src
+            --> test src <=s dst */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32S,
+                           unop(Iop_64to32, cc_dep2),
+                           unop(Iop_64to32, cc_dep1)));
+      }
+
+      /* 14, 15 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondLE)) {
+         /* long sub/cmp, then LE (signed less than or equal) 
+            --> test dst <=s src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32S,
+                           unop(Iop_64to32, cc_dep1),
+                           unop(Iop_64to32, cc_dep2)));
+
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL) && isU64(cond, AMD64CondNLE)) {
+         /* long sub/cmp, then NLE (signed greater than) 
+            --> test !(dst <=s src)
+            --> test (dst >s src)
+            --> test (src <s dst) */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32S,
+                           unop(Iop_64to32, cc_dep2),
+                           unop(Iop_64to32, cc_dep1)));
+
+      }
+
+      /*---------------- SUBW ----------------*/
+
+      /* 4, 5 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondZ)) {
+         /* word sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ16, 
+                           unop(Iop_64to16,cc_dep1),
+                           unop(Iop_64to16,cc_dep2)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondNZ)) {
+         /* word sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE16, 
+                           unop(Iop_64to16,cc_dep1),
+                           unop(Iop_64to16,cc_dep2)));
+      }
+
+      /* 6, */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondBE)) {
+         /* word sub/cmp, then BE (unsigned less than or equal)
+            --> test dst <=u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U,
+                           binop(Iop_Shl64, cc_dep1, mkU8(48)),
+                           binop(Iop_Shl64, cc_dep2, mkU8(48))));
+      }
+
+      /* 14, */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondLE)) {
+         /* word sub/cmp, then LE (signed less than or equal) 
+            --> test dst <=s src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64S, 
+                           binop(Iop_Shl64,cc_dep1,mkU8(48)),
+                           binop(Iop_Shl64,cc_dep2,mkU8(48))));
+
+      }
+
+      /*---------------- SUBB ----------------*/
+
+      /* 2, 3 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondB)) {
+         /* byte sub/cmp, then B (unsigned less than)
+            --> test dst <u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64U,
+                           binop(Iop_And64, cc_dep1, mkU64(0xFF)),
+                           binop(Iop_And64, cc_dep2, mkU64(0xFF))));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNB)) {
+         /* byte sub/cmp, then NB (unsigned greater than or equal)
+            --> test src <=u dst */
+         /* Note, args are opposite way round from the usual */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U,
+                           binop(Iop_And64, cc_dep2, mkU64(0xFF)),
+                           binop(Iop_And64, cc_dep1, mkU64(0xFF))));
+      }
+
+      /* 4, 5 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondZ)) {
+         /* byte sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ8, 
+                           unop(Iop_64to8,cc_dep1),
+                           unop(Iop_64to8,cc_dep2)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNZ)) {
+         /* byte sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE8, 
+                           unop(Iop_64to8,cc_dep1),
+                           unop(Iop_64to8,cc_dep2)));
+      }
+
+      /* 6, */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondBE)) {
+         /* byte sub/cmp, then BE (unsigned less than or equal)
+            --> test dst <=u src */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, 
+                           binop(Iop_And64, cc_dep1, mkU64(0xFF)),
+                           binop(Iop_And64, cc_dep2, mkU64(0xFF))));
+      }
+
+      /* 8, 9 */
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondS)
+                                          && isU64(cc_dep2, 0)) {
+         /* byte sub/cmp of zero, then S --> test (dst-0 <s 0)
+                                         --> test dst <s 0
+                                         --> (ULong)dst[7]
+            This is yet another scheme by which gcc figures out if the
+            top bit of a byte is 1 or 0.  See also LOGICB/CondS below. */
+         /* Note: isU64(cc_dep2, 0) is correct, even though this is
+            for an 8-bit comparison, since the args to the helper
+            function are always U64s. */
+         return binop(Iop_And64,
+                      binop(Iop_Shr64,cc_dep1,mkU8(7)),
+                      mkU64(1));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB) && isU64(cond, AMD64CondNS)
+                                          && isU64(cc_dep2, 0)) {
+         /* byte sub/cmp of zero, then NS --> test !(dst-0 <s 0)
+                                          --> test !(dst <s 0)
+                                          --> (ULong) !dst[7]
+         */
+         return binop(Iop_Xor64,
+                      binop(Iop_And64,
+                            binop(Iop_Shr64,cc_dep1,mkU8(7)),
+                            mkU64(1)),
+                      mkU64(1));
+      }
+
+      /*---------------- LOGICQ ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICQ) && isU64(cond, AMD64CondZ)) {
+         /* long long and/or/xor, then Z --> test dst==0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICQ) && isU64(cond, AMD64CondNZ)) {
+         /* long long and/or/xor, then NZ --> test dst!=0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICQ) && isU64(cond, AMD64CondL)) {
+         /* long long and/or/xor, then L
+            LOGIC sets SF and ZF according to the
+            result and makes OF be zero.  L computes SF ^ OF, but
+            OF is zero, so this reduces to SF -- which will be 1 iff
+            the result is < signed 0.  Hence ...
+         */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64S, 
+                           cc_dep1, 
+                           mkU64(0)));
+      }
+
+      /*---------------- LOGICL ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondZ)) {
+         /* long and/or/xor, then Z --> test dst==0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ32,
+                           unop(Iop_64to32, cc_dep1), 
+                           mkU32(0)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondNZ)) {
+         /* long and/or/xor, then NZ --> test dst!=0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE32,
+                           unop(Iop_64to32, cc_dep1), 
+                           mkU32(0)));
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondLE)) {
+         /* long and/or/xor, then LE
+            This is pretty subtle.  LOGIC sets SF and ZF according to the
+            result and makes OF be zero.  LE computes (SF ^ OF) | ZF, but
+            OF is zero, so this reduces to SF | ZF -- which will be 1 iff
+            the result is <=signed 0.  Hence ...
+         */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32S,
+                           unop(Iop_64to32, cc_dep1), 
+                           mkU32(0)));
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondS)) {
+         /* long and/or/xor, then S --> (ULong)result[31] */
+         return binop(Iop_And64,
+                      binop(Iop_Shr64, cc_dep1, mkU8(31)),
+                      mkU64(1));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICL) && isU64(cond, AMD64CondNS)) {
+         /* long and/or/xor, then S --> (ULong) ~ result[31] */
+         return binop(Iop_Xor64,
+                binop(Iop_And64,
+                      binop(Iop_Shr64, cc_dep1, mkU8(31)),
+                      mkU64(1)),
+                mkU64(1));
+      }
+
+      /*---------------- LOGICW ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICW) && isU64(cond, AMD64CondZ)) {
+         /* word and/or/xor, then Z --> test dst==0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64,
+                           binop(Iop_And64, cc_dep1, mkU64(0xFFFF)),
+                           mkU64(0)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICW) && isU64(cond, AMD64CondNZ)) {
+         /* word and/or/xor, then NZ --> test dst!=0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64,
+                           binop(Iop_And64, cc_dep1, mkU64(0xFFFF)),
+                           mkU64(0)));
+      }
+
+      /*---------------- LOGICB ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondZ)) {
+         /* byte and/or/xor, then Z --> test dst==0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64, binop(Iop_And64,cc_dep1,mkU64(255)), 
+                                        mkU64(0)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondNZ)) {
+         /* byte and/or/xor, then NZ --> test dst!=0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64, binop(Iop_And64,cc_dep1,mkU64(255)), 
+                                        mkU64(0)));
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondS)) {
+         /* this is an idiom gcc sometimes uses to find out if the top
+            bit of a byte register is set: eg testb %al,%al; js ..
+            Since it just depends on the top bit of the byte, extract
+            that bit and explicitly get rid of all the rest.  This
+            helps memcheck avoid false positives in the case where any
+            of the other bits in the byte are undefined. */
+         /* byte and/or/xor, then S --> (UInt)result[7] */
+         return binop(Iop_And64,
+                      binop(Iop_Shr64,cc_dep1,mkU8(7)),
+                      mkU64(1));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICB) && isU64(cond, AMD64CondNS)) {
+         /* byte and/or/xor, then NS --> (UInt)!result[7] */
+         return binop(Iop_Xor64,
+                      binop(Iop_And64,
+                            binop(Iop_Shr64,cc_dep1,mkU8(7)),
+                            mkU64(1)),
+                      mkU64(1));
+      }
+
+      /*---------------- INCB ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_INCB) && isU64(cond, AMD64CondLE)) {
+         /* 8-bit inc, then LE --> sign bit of the arg */
+         return binop(Iop_And64,
+                      binop(Iop_Shr64,
+                            binop(Iop_Sub64, cc_dep1, mkU64(1)),
+                            mkU8(7)),
+                      mkU64(1));
+      }
+
+      /*---------------- INCW ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_INCW) && isU64(cond, AMD64CondZ)) {
+         /* 16-bit inc, then Z --> test dst == 0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64, 
+                           binop(Iop_Shl64,cc_dep1,mkU8(48)), 
+                           mkU64(0)));
+      }
+
+      /*---------------- DECL ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_DECL) && isU64(cond, AMD64CondZ)) {
+         /* dec L, then Z --> test dst == 0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ32,
+                           unop(Iop_64to32, cc_dep1),
+                           mkU32(0)));
+      }
+
+      /*---------------- DECW ----------------*/
+
+      if (isU64(cc_op, AMD64G_CC_OP_DECW) && isU64(cond, AMD64CondNZ)) {
+         /* 16-bit dec, then NZ --> test dst != 0 */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64, 
+                           binop(Iop_Shl64,cc_dep1,mkU8(48)), 
+                           mkU64(0)));
+      }
+
+      /*---------------- COPY ----------------*/
+      /* This can happen, as a result of amd64 FP compares: "comisd ... ;
+         jbe" for example. */
+
+      if (isU64(cc_op, AMD64G_CC_OP_COPY) && 
+          (isU64(cond, AMD64CondBE) || isU64(cond, AMD64CondNBE))) {
+         /* COPY, then BE --> extract C and Z from dep1, and test (C
+            or Z == 1). */
+         /* COPY, then NBE --> extract C and Z from dep1, and test (C
+            or Z == 0). */
+         ULong nnn = isU64(cond, AMD64CondBE) ? 1 : 0;
+         return
+            unop(
+               Iop_1Uto64,
+               binop(
+                  Iop_CmpEQ64,
+                  binop(
+                     Iop_And64,
+                     binop(
+                        Iop_Or64,
+                        binop(Iop_Shr64, cc_dep1, mkU8(AMD64G_CC_SHIFT_C)),
+                        binop(Iop_Shr64, cc_dep1, mkU8(AMD64G_CC_SHIFT_Z))
+                     ),
+                     mkU64(1)
+                  ),
+                  mkU64(nnn)
+               )
+            );
+      }
+      
+      if (isU64(cc_op, AMD64G_CC_OP_COPY) && isU64(cond, AMD64CondB)) {
+         /* COPY, then B --> extract C dep1, and test (C == 1). */
+         return
+            unop(
+               Iop_1Uto64,
+               binop(
+                  Iop_CmpNE64,
+                  binop(
+                     Iop_And64,
+                     binop(Iop_Shr64, cc_dep1, mkU8(AMD64G_CC_SHIFT_C)),
+                     mkU64(1)
+                  ),
+                  mkU64(0)
+               )
+            );
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_COPY) 
+          && (isU64(cond, AMD64CondZ) || isU64(cond, AMD64CondNZ))) {
+         /* COPY, then Z --> extract Z from dep1, and test (Z == 1). */
+         /* COPY, then NZ --> extract Z from dep1, and test (Z == 0). */
+         UInt nnn = isU64(cond, AMD64CondZ) ? 1 : 0;
+         return
+            unop(
+               Iop_1Uto64,
+               binop(
+                  Iop_CmpEQ64,
+                  binop(
+                     Iop_And64,
+                     binop(Iop_Shr64, cc_dep1, mkU8(AMD64G_CC_SHIFT_Z)),
+                     mkU64(1)
+                  ),
+                  mkU64(nnn)
+               )
+            );
+      }
+
+      if (isU64(cc_op, AMD64G_CC_OP_COPY) && isU64(cond, AMD64CondP)) {
+         /* COPY, then P --> extract P from dep1, and test (P == 1). */
+         return
+            unop(
+               Iop_1Uto64,
+               binop(
+                  Iop_CmpNE64,
+                  binop(
+                     Iop_And64,
+                     binop(Iop_Shr64, cc_dep1, mkU8(AMD64G_CC_SHIFT_P)),
+                     mkU64(1)
+                  ),
+                  mkU64(0)
+               )
+            );
+      }
+
+      return NULL;
+   }
+
+   /* --------- specialising "amd64g_calculate_rflags_c" --------- */
+
+   if (vex_streq(function_name, "amd64g_calculate_rflags_c")) {
+      /* specialise calls to above "calculate_rflags_c" function */
+      IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
+      vassert(arity == 4);
+      cc_op   = args[0];
+      cc_dep1 = args[1];
+      cc_dep2 = args[2];
+      cc_ndep = args[3];
+
+      if (isU64(cc_op, AMD64G_CC_OP_SUBQ)) {
+         /* C after sub denotes unsigned less than */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64U, 
+                           cc_dep1,
+                           cc_dep2));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBL)) {
+         /* C after sub denotes unsigned less than */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32U,
+                           unop(Iop_64to32, cc_dep1), 
+                           unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_SUBB)) {
+         /* C after sub denotes unsigned less than */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64U, 
+                           binop(Iop_And64,cc_dep1,mkU64(0xFF)),
+                           binop(Iop_And64,cc_dep2,mkU64(0xFF))));
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_LOGICQ)
+          || isU64(cc_op, AMD64G_CC_OP_LOGICL)
+          || isU64(cc_op, AMD64G_CC_OP_LOGICW)
+          || isU64(cc_op, AMD64G_CC_OP_LOGICB)) {
+         /* cflag after logic is zero */
+         return mkU64(0);
+      }
+      if (isU64(cc_op, AMD64G_CC_OP_DECL) || isU64(cc_op, AMD64G_CC_OP_INCL)
+          || isU64(cc_op, AMD64G_CC_OP_DECQ) || isU64(cc_op, AMD64G_CC_OP_INCQ)) {
+         /* If the thunk is dec or inc, the cflag is supplied as CC_NDEP. */
+         return cc_ndep;
+      }
+
+#     if 0
+      if (cc_op->tag == Iex_Const) {
+         vex_printf("CFLAG "); ppIRExpr(cc_op); vex_printf("\n");
+      }
+#     endif
+
+      return NULL;
+   }
+
+#  undef unop
+#  undef binop
+#  undef mkU64
+#  undef mkU32
+#  undef mkU8
+
+   return NULL;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Supporting functions for x87 FPU activities.            ---*/
+/*---------------------------------------------------------------*/
+
+static inline Bool host_is_little_endian ( void )
+{
+   UInt x = 0x76543210;
+   UChar* p = (UChar*)(&x);
+   return toBool(*p == 0x10);
+}
+
+/* Inspect a value and its tag, as per the x87 'FXAM' instruction. */
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl ) 
+{
+   Bool   mantissaIsZero;
+   Int    bexp;
+   UChar  sign;
+   UChar* f64;
+
+   vassert(host_is_little_endian());
+
+   /* vex_printf("calculate_FXAM ( %d, %llx ) .. ", tag, dbl ); */
+
+   f64  = (UChar*)(&dbl);
+   sign = toUChar( (f64[7] >> 7) & 1 );
+
+   /* First off, if the tag indicates the register was empty,
+      return 1,0,sign,1 */
+   if (tag == 0) {
+      /* vex_printf("Empty\n"); */
+      return AMD64G_FC_MASK_C3 | 0 | (sign << AMD64G_FC_SHIFT_C1) 
+                                   | AMD64G_FC_MASK_C0;
+   }
+
+   bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F);
+   bexp &= 0x7FF;
+
+   mantissaIsZero
+      = toBool(
+           (f64[6] & 0x0F) == 0 
+           && (f64[5] | f64[4] | f64[3] | f64[2] | f64[1] | f64[0]) == 0
+        );
+
+   /* If both exponent and mantissa are zero, the value is zero.
+      Return 1,0,sign,0. */
+   if (bexp == 0 && mantissaIsZero) {
+      /* vex_printf("Zero\n"); */
+      return AMD64G_FC_MASK_C3 | 0 
+                               | (sign << AMD64G_FC_SHIFT_C1) | 0;
+   }
+   
+   /* If exponent is zero but mantissa isn't, it's a denormal.
+      Return 1,1,sign,0. */
+   if (bexp == 0 && !mantissaIsZero) {
+      /* vex_printf("Denormal\n"); */
+      return AMD64G_FC_MASK_C3 | AMD64G_FC_MASK_C2 
+                               | (sign << AMD64G_FC_SHIFT_C1) | 0;
+   }
+
+   /* If the exponent is 7FF and the mantissa is zero, this is an infinity.
+      Return 0,1,sign,1. */
+   if (bexp == 0x7FF && mantissaIsZero) {
+      /* vex_printf("Inf\n"); */
+      return 0 | AMD64G_FC_MASK_C2 | (sign << AMD64G_FC_SHIFT_C1) 
+                                   | AMD64G_FC_MASK_C0;
+   }
+
+   /* If the exponent is 7FF and the mantissa isn't zero, this is a NaN.
+      Return 0,0,sign,1. */
+   if (bexp == 0x7FF && !mantissaIsZero) {
+      /* vex_printf("NaN\n"); */
+      return 0 | 0 | (sign << AMD64G_FC_SHIFT_C1) | AMD64G_FC_MASK_C0;
+   }
+
+   /* Uh, ok, we give up.  It must be a normal finite number.
+      Return 0,1,sign,0.
+   */
+   /* vex_printf("normal\n"); */
+   return 0 | AMD64G_FC_MASK_C2 | (sign << AMD64G_FC_SHIFT_C1) | 0;
+}
+
+
+/* This is used to implement both 'frstor' and 'fldenv'.  The latter
+   appears to differ from the former only in that the 8 FP registers
+   themselves are not transferred into the guest state. */
+static
+VexEmNote do_put_x87 ( Bool moveRegs,
+                       /*IN*/UChar* x87_state,
+                       /*OUT*/VexGuestAMD64State* vex_state )
+{
+   Int        stno, preg;
+   UInt       tag;
+   ULong*     vexRegs = (ULong*)(&vex_state->guest_FPREG[0]);
+   UChar*     vexTags = (UChar*)(&vex_state->guest_FPTAG[0]);
+   Fpu_State* x87     = (Fpu_State*)x87_state;
+   UInt       ftop    = (x87->env[FP_ENV_STAT] >> 11) & 7;
+   UInt       tagw    = x87->env[FP_ENV_TAG];
+   UInt       fpucw   = x87->env[FP_ENV_CTRL];
+   UInt       c3210   = x87->env[FP_ENV_STAT] & 0x4700;
+   VexEmNote  ew;
+   UInt       fpround;
+   ULong      pair;
+
+   /* Copy registers and tags */
+   for (stno = 0; stno < 8; stno++) {
+      preg = (stno + ftop) & 7;
+      tag = (tagw >> (2*preg)) & 3;
+      if (tag == 3) {
+         /* register is empty */
+         /* hmm, if it's empty, does it still get written?  Probably
+            safer to say it does.  If we don't, memcheck could get out
+            of sync, in that it thinks all FP registers are defined by
+            this helper, but in reality some have not been updated. */
+         if (moveRegs)
+            vexRegs[preg] = 0; /* IEEE754 64-bit zero */
+         vexTags[preg] = 0;
+      } else {
+         /* register is non-empty */
+         if (moveRegs)
+            convert_f80le_to_f64le( &x87->reg[10*stno], 
+                                    (UChar*)&vexRegs[preg] );
+         vexTags[preg] = 1;
+      }
+   }
+
+   /* stack pointer */
+   vex_state->guest_FTOP = ftop;
+
+   /* status word */
+   vex_state->guest_FC3210 = c3210;
+
+   /* handle the control word, setting FPROUND and detecting any
+      emulation warnings. */
+   pair    = amd64g_check_fldcw ( (ULong)fpucw );
+   fpround = (UInt)pair & 0xFFFFFFFFULL;
+   ew      = (VexEmNote)(pair >> 32);
+   
+   vex_state->guest_FPROUND = fpround & 3;
+
+   /* emulation warnings --> caller */
+   return ew;
+}
+
+
+/* Create an x87 FPU state from the guest state, as close as
+   we can approximate it. */
+static
+void do_get_x87 ( /*IN*/VexGuestAMD64State* vex_state,
+                  /*OUT*/UChar* x87_state )
+{
+   Int        i, stno, preg;
+   UInt       tagw;
+   ULong*     vexRegs = (ULong*)(&vex_state->guest_FPREG[0]);
+   UChar*     vexTags = (UChar*)(&vex_state->guest_FPTAG[0]);
+   Fpu_State* x87     = (Fpu_State*)x87_state;
+   UInt       ftop    = vex_state->guest_FTOP;
+   UInt       c3210   = vex_state->guest_FC3210;
+
+   for (i = 0; i < 14; i++)
+      x87->env[i] = 0;
+
+   x87->env[1] = x87->env[3] = x87->env[5] = x87->env[13] = 0xFFFF;
+   x87->env[FP_ENV_STAT] 
+      = toUShort(((ftop & 7) << 11) | (c3210 & 0x4700));
+   x87->env[FP_ENV_CTRL] 
+      = toUShort(amd64g_create_fpucw( vex_state->guest_FPROUND ));
+
+   /* Dump the register stack in ST order. */
+   tagw = 0;
+   for (stno = 0; stno < 8; stno++) {
+      preg = (stno + ftop) & 7;
+      if (vexTags[preg] == 0) {
+         /* register is empty */
+         tagw |= (3 << (2*preg));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[preg], 
+                                 &x87->reg[10*stno] );
+      } else {
+         /* register is full. */
+         tagw |= (0 << (2*preg));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[preg], 
+                                 &x87->reg[10*stno] );
+      }
+   }
+   x87->env[FP_ENV_TAG] = toUShort(tagw);
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+/* NOTE: only handles 32-bit format (no REX.W on the insn) */
+void amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM ( VexGuestAMD64State* gst,
+                                                HWord addr )
+{
+   /* Derived from values obtained from
+      vendor_id       : AuthenticAMD
+      cpu family      : 15
+      model           : 12
+      model name      : AMD Athlon(tm) 64 Processor 3200+
+      stepping        : 0
+      cpu MHz         : 2200.000
+      cache size      : 512 KB
+   */
+   /* Somewhat roundabout, but at least it's simple. */
+   Fpu_State tmp;
+   UShort*   addrS = (UShort*)addr;
+   UChar*    addrC = (UChar*)addr;
+   UInt      mxcsr;
+   UShort    fp_tags;
+   UInt      summary_tags;
+   Int       r, stno;
+   UShort    *srcS, *dstS;
+
+   do_get_x87( gst, (UChar*)&tmp );
+   mxcsr = amd64g_create_mxcsr( gst->guest_SSEROUND );
+
+   /* Now build the proper fxsave image from the x87 image we just
+      made. */
+
+   addrS[0]  = tmp.env[FP_ENV_CTRL]; /* FCW: fpu control word */
+   addrS[1]  = tmp.env[FP_ENV_STAT]; /* FCW: fpu status word */
+
+   /* set addrS[2] in an endian-independent way */
+   summary_tags = 0;
+   fp_tags = tmp.env[FP_ENV_TAG];
+   for (r = 0; r < 8; r++) {
+      if ( ((fp_tags >> (2*r)) & 3) != 3 )
+         summary_tags |= (1 << r);
+   }
+   addrC[4]  = toUChar(summary_tags); /* FTW: tag summary byte */
+   addrC[5]  = 0; /* pad */
+
+   /* FOP: faulting fpu opcode.  From experimentation, the real CPU
+      does not write this field. (?!) */
+   addrS[3]  = 0; /* BOGUS */
+
+   /* RIP (Last x87 instruction pointer).  From experimentation, the
+      real CPU does not write this field. (?!) */
+   addrS[4]  = 0; /* BOGUS */
+   addrS[5]  = 0; /* BOGUS */
+   addrS[6]  = 0; /* BOGUS */
+   addrS[7]  = 0; /* BOGUS */
+
+   /* RDP (Last x87 data pointer).  From experimentation, the real CPU
+      does not write this field. (?!) */
+   addrS[8]  = 0; /* BOGUS */
+   addrS[9]  = 0; /* BOGUS */
+   addrS[10] = 0; /* BOGUS */
+   addrS[11] = 0; /* BOGUS */
+
+   addrS[12] = toUShort(mxcsr);  /* MXCSR */
+   addrS[13] = toUShort(mxcsr >> 16);
+
+   addrS[14] = 0xFFFF; /* MXCSR mask (lo16) */
+   addrS[15] = 0x0000; /* MXCSR mask (hi16) */
+
+   /* Copy in the FP registers, in ST order. */
+   for (stno = 0; stno < 8; stno++) {
+      srcS = (UShort*)(&tmp.reg[10*stno]);
+      dstS = (UShort*)(&addrS[16 + 8*stno]);
+      dstS[0] = srcS[0];
+      dstS[1] = srcS[1];
+      dstS[2] = srcS[2];
+      dstS[3] = srcS[3];
+      dstS[4] = srcS[4];
+      dstS[5] = 0;
+      dstS[6] = 0;
+      dstS[7] = 0;
+   }
+
+   /* That's the first 160 bytes of the image done.  Now only %xmm0
+      .. %xmm15 remain to be copied, and we let the generated IR do
+      that, so as to make Memcheck's definedness flow for the non-XMM
+      parts independant from that of the all the other control and
+      status words in the structure.  This avoids the false positives
+      shown in #291310. */
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest state, reads guest mem) */
+VexEmNote amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM ( VexGuestAMD64State* gst,
+                                                      HWord addr )
+{
+   Fpu_State tmp;
+   VexEmNote warnX87 = EmNote_NONE;
+   VexEmNote warnXMM = EmNote_NONE;
+   UShort*   addrS   = (UShort*)addr;
+   UChar*    addrC   = (UChar*)addr;
+   UShort    fp_tags;
+   Int       r, stno, i;
+
+   /* Don't restore %xmm0 .. %xmm15, for the same reasons that
+      amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM doesn't save them.  See
+      comment in that function for details. */
+
+   /* Copy the x87 registers out of the image, into a temporary
+      Fpu_State struct. */
+   for (i = 0; i < 14; i++) tmp.env[i] = 0;
+   for (i = 0; i < 80; i++) tmp.reg[i] = 0;
+   /* fill in tmp.reg[0..7] */
+   for (stno = 0; stno < 8; stno++) {
+      UShort* dstS = (UShort*)(&tmp.reg[10*stno]);
+      UShort* srcS = (UShort*)(&addrS[16 + 8*stno]);
+      dstS[0] = srcS[0];
+      dstS[1] = srcS[1];
+      dstS[2] = srcS[2];
+      dstS[3] = srcS[3];
+      dstS[4] = srcS[4];
+   }
+   /* fill in tmp.env[0..13] */
+   tmp.env[FP_ENV_CTRL] = addrS[0]; /* FCW: fpu control word */
+   tmp.env[FP_ENV_STAT] = addrS[1]; /* FCW: fpu status word */
+
+   fp_tags = 0;
+   for (r = 0; r < 8; r++) {
+      if (addrC[4] & (1<<r))
+         fp_tags |= (0 << (2*r)); /* EMPTY */
+      else 
+         fp_tags |= (3 << (2*r)); /* VALID -- not really precise enough. */
+   }
+   tmp.env[FP_ENV_TAG] = fp_tags;
+
+   /* Now write 'tmp' into the guest state. */
+   warnX87 = do_put_x87( True/*moveRegs*/, (UChar*)&tmp, gst );
+
+   { UInt w32 = (((UInt)addrS[12]) & 0xFFFF)
+                | ((((UInt)addrS[13]) & 0xFFFF) << 16);
+     ULong w64 = amd64g_check_ldmxcsr( (ULong)w32 );
+
+     warnXMM = (VexEmNote)(w64 >> 32);
+
+     gst->guest_SSEROUND = w64 & 0xFFFFFFFFULL;
+   }
+
+   /* Prefer an X87 emwarn over an XMM one, if both exist. */
+   if (warnX87 != EmNote_NONE)
+      return warnX87;
+   else
+      return warnXMM;
+}
+
+
+/* DIRTY HELPER (writes guest state) */
+/* Initialise the x87 FPU state as per 'finit'. */
+void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* gst )
+{
+   Int i;
+   gst->guest_FTOP = 0;
+   for (i = 0; i < 8; i++) {
+      gst->guest_FPTAG[i] = 0; /* empty */
+      gst->guest_FPREG[i] = 0; /* IEEE754 64-bit zero */
+   }
+   gst->guest_FPROUND = (ULong)Irrm_NEAREST;
+   gst->guest_FC3210  = 0;
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest memory) */
+ULong amd64g_dirtyhelper_loadF80le ( Addr addrU )
+{
+   ULong f64;
+   convert_f80le_to_f64le ( (UChar*)addrU, (UChar*)&f64 );
+   return f64;
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest memory) */
+void amd64g_dirtyhelper_storeF80le ( Addr addrU, ULong f64 )
+{
+   convert_f64le_to_f80le( (UChar*)&f64, (UChar*)addrU );
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* CLEAN HELPER */
+/* mxcsr[15:0] contains a SSE native format MXCSR value.
+   Extract from it the required SSEROUND value and any resulting
+   emulation warning, and return (warn << 32) | sseround value.
+*/
+ULong amd64g_check_ldmxcsr ( ULong mxcsr )
+{
+   /* Decide on a rounding mode.  mxcsr[14:13] holds it. */
+   /* NOTE, encoded exactly as per enum IRRoundingMode. */
+   ULong rmode = (mxcsr >> 13) & 3;
+
+   /* Detect any required emulation warnings. */
+   VexEmNote ew = EmNote_NONE;
+
+   if ((mxcsr & 0x1F80) != 0x1F80) {
+      /* unmasked exceptions! */
+      ew = EmWarn_X86_sseExns;
+   }
+   else 
+   if (mxcsr & (1<<15)) {
+      /* FZ is set */
+      ew = EmWarn_X86_fz;
+   } 
+   else
+   if (mxcsr & (1<<6)) {
+      /* DAZ is set */
+      ew = EmWarn_X86_daz;
+   }
+
+   return (((ULong)ew) << 32) | ((ULong)rmode);
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* CLEAN HELPER */
+/* Given sseround as an IRRoundingMode value, create a suitable SSE
+   native format MXCSR value. */
+ULong amd64g_create_mxcsr ( ULong sseround )
+{
+   sseround &= 3;
+   return 0x1F80 | (sseround << 13);
+}
+
+
+/* CLEAN HELPER */
+/* fpucw[15:0] contains a x87 native format FPU control word.
+   Extract from it the required FPROUND value and any resulting
+   emulation warning, and return (warn << 32) | fpround value.
+*/
+ULong amd64g_check_fldcw ( ULong fpucw )
+{
+   /* Decide on a rounding mode.  fpucw[11:10] holds it. */
+   /* NOTE, encoded exactly as per enum IRRoundingMode. */
+   ULong rmode = (fpucw >> 10) & 3;
+
+   /* Detect any required emulation warnings. */
+   VexEmNote ew = EmNote_NONE;
+
+   if ((fpucw & 0x3F) != 0x3F) {
+      /* unmasked exceptions! */
+      ew = EmWarn_X86_x87exns;
+   }
+   else
+   if (((fpucw >> 8) & 3) != 3) {
+      /* unsupported precision */
+      ew = EmWarn_X86_x87precision;
+   }
+
+   return (((ULong)ew) << 32) | ((ULong)rmode);
+}
+
+
+/* CLEAN HELPER */
+/* Given fpround as an IRRoundingMode value, create a suitable x87
+   native format FPU control word. */
+ULong amd64g_create_fpucw ( ULong fpround )
+{
+   fpround &= 3;
+   return 0x037F | (fpround << 10);
+}
+
+
+/* This is used to implement 'fldenv'.  
+   Reads 28 bytes at x87_state[0 .. 27]. */
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER */
+VexEmNote amd64g_dirtyhelper_FLDENV ( /*OUT*/VexGuestAMD64State* vex_state,
+                                      /*IN*/HWord x87_state)
+{
+   return do_put_x87( False, (UChar*)x87_state, vex_state );
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER */
+/* Create an x87 FPU env from the guest state, as close as we can
+   approximate it.  Writes 28 bytes at x87_state[0..27]. */
+void amd64g_dirtyhelper_FSTENV ( /*IN*/VexGuestAMD64State* vex_state,
+                                 /*OUT*/HWord x87_state )
+{
+   Int        i, stno, preg;
+   UInt       tagw;
+   UChar*     vexTags = (UChar*)(&vex_state->guest_FPTAG[0]);
+   Fpu_State* x87     = (Fpu_State*)x87_state;
+   UInt       ftop    = vex_state->guest_FTOP;
+   ULong      c3210   = vex_state->guest_FC3210;
+
+   for (i = 0; i < 14; i++)
+      x87->env[i] = 0;
+
+   x87->env[1] = x87->env[3] = x87->env[5] = x87->env[13] = 0xFFFF;
+   x87->env[FP_ENV_STAT] 
+      = toUShort(toUInt( ((ftop & 7) << 11) | (c3210 & 0x4700) ));
+   x87->env[FP_ENV_CTRL] 
+      = toUShort(toUInt( amd64g_create_fpucw( vex_state->guest_FPROUND ) ));
+
+   /* Compute the x87 tag word. */
+   tagw = 0;
+   for (stno = 0; stno < 8; stno++) {
+      preg = (stno + ftop) & 7;
+      if (vexTags[preg] == 0) {
+         /* register is empty */
+         tagw |= (3 << (2*preg));
+      } else {
+         /* register is full. */
+         tagw |= (0 << (2*preg));
+      }
+   }
+   x87->env[FP_ENV_TAG] = toUShort(tagw);
+
+   /* We don't dump the x87 registers, tho. */
+}
+
+
+/* This is used to implement 'fnsave'.  
+   Writes 108 bytes at x87_state[0 .. 107]. */
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER */
+void amd64g_dirtyhelper_FNSAVE ( /*IN*/VexGuestAMD64State* vex_state,
+                                 /*OUT*/HWord x87_state)
+{
+   do_get_x87( vex_state, (UChar*)x87_state );
+}
+
+
+/* This is used to implement 'fnsaves'.  
+   Writes 94 bytes at x87_state[0 .. 93]. */
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER */
+void amd64g_dirtyhelper_FNSAVES ( /*IN*/VexGuestAMD64State* vex_state,
+                                  /*OUT*/HWord x87_state)
+{
+   Int           i, stno, preg;
+   UInt          tagw;
+   ULong*        vexRegs = (ULong*)(&vex_state->guest_FPREG[0]);
+   UChar*        vexTags = (UChar*)(&vex_state->guest_FPTAG[0]);
+   Fpu_State_16* x87     = (Fpu_State_16*)x87_state;
+   UInt          ftop    = vex_state->guest_FTOP;
+   UInt          c3210   = vex_state->guest_FC3210;
+
+   for (i = 0; i < 7; i++)
+      x87->env[i] = 0;
+
+   x87->env[FPS_ENV_STAT] 
+      = toUShort(((ftop & 7) << 11) | (c3210 & 0x4700));
+   x87->env[FPS_ENV_CTRL] 
+      = toUShort(amd64g_create_fpucw( vex_state->guest_FPROUND ));
+
+   /* Dump the register stack in ST order. */
+   tagw = 0;
+   for (stno = 0; stno < 8; stno++) {
+      preg = (stno + ftop) & 7;
+      if (vexTags[preg] == 0) {
+         /* register is empty */
+         tagw |= (3 << (2*preg));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[preg], 
+                                 &x87->reg[10*stno] );
+      } else {
+         /* register is full. */
+         tagw |= (0 << (2*preg));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[preg], 
+                                 &x87->reg[10*stno] );
+      }
+   }
+   x87->env[FPS_ENV_TAG] = toUShort(tagw);
+}
+
+
+/* This is used to implement 'frstor'.  
+   Reads 108 bytes at x87_state[0 .. 107]. */
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER */
+VexEmNote amd64g_dirtyhelper_FRSTOR ( /*OUT*/VexGuestAMD64State* vex_state,
+                                      /*IN*/HWord x87_state)
+{
+   return do_put_x87( True, (UChar*)x87_state, vex_state );
+}
+
+
+/* This is used to implement 'frstors'.
+   Reads 94 bytes at x87_state[0 .. 93]. */
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER */
+VexEmNote amd64g_dirtyhelper_FRSTORS ( /*OUT*/VexGuestAMD64State* vex_state,
+                                       /*IN*/HWord x87_state)
+{
+   Int           stno, preg;
+   UInt          tag;
+   ULong*        vexRegs = (ULong*)(&vex_state->guest_FPREG[0]);
+   UChar*        vexTags = (UChar*)(&vex_state->guest_FPTAG[0]);
+   Fpu_State_16* x87     = (Fpu_State_16*)x87_state;
+   UInt          ftop    = (x87->env[FPS_ENV_STAT] >> 11) & 7;
+   UInt          tagw    = x87->env[FPS_ENV_TAG];
+   UInt          fpucw   = x87->env[FPS_ENV_CTRL];
+   UInt          c3210   = x87->env[FPS_ENV_STAT] & 0x4700;
+   VexEmNote     ew;
+   UInt          fpround;
+   ULong         pair;
+
+   /* Copy registers and tags */
+   for (stno = 0; stno < 8; stno++) {
+      preg = (stno + ftop) & 7;
+      tag = (tagw >> (2*preg)) & 3;
+      if (tag == 3) {
+         /* register is empty */
+         /* hmm, if it's empty, does it still get written?  Probably
+            safer to say it does.  If we don't, memcheck could get out
+            of sync, in that it thinks all FP registers are defined by
+            this helper, but in reality some have not been updated. */
+         vexRegs[preg] = 0; /* IEEE754 64-bit zero */
+         vexTags[preg] = 0;
+      } else {
+         /* register is non-empty */
+         convert_f80le_to_f64le( &x87->reg[10*stno], 
+                                 (UChar*)&vexRegs[preg] );
+         vexTags[preg] = 1;
+      }
+   }
+
+   /* stack pointer */
+   vex_state->guest_FTOP = ftop;
+
+   /* status word */
+   vex_state->guest_FC3210 = c3210;
+
+   /* handle the control word, setting FPROUND and detecting any
+      emulation warnings. */
+   pair    = amd64g_check_fldcw ( (ULong)fpucw );
+   fpround = (UInt)pair & 0xFFFFFFFFULL;
+   ew      = (VexEmNote)(pair >> 32);
+   
+   vex_state->guest_FPROUND = fpround & 3;
+
+   /* emulation warnings --> caller */
+   return ew;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Misc integer helpers, including rotates and CPUID.      ---*/
+/*---------------------------------------------------------------*/
+
+/* Claim to be the following CPU, which is probably representative of
+   the lowliest (earliest) amd64 offerings.  It can do neither sse3
+   nor cx16.
+
+   vendor_id       : AuthenticAMD  
+   cpu family      : 15  
+   model           : 5  
+   model name      : AMD Opteron (tm) Processor 848  
+   stepping        : 10  
+   cpu MHz         : 1797.682  
+   cache size      : 1024 KB  
+   fpu             : yes  
+   fpu_exception   : yes  
+   cpuid level     : 1  
+   wp              : yes  
+   flags           : fpu vme de pse tsc msr pae mce cx8 apic sep
+                     mtrr pge mca cmov pat pse36 clflush mmx fxsr
+                     sse sse2 syscall nx mmxext lm 3dnowext 3dnow  
+   bogomips        : 3600.62  
+   TLB size        : 1088 4K pages  
+   clflush size    : 64  
+   cache_alignment : 64  
+   address sizes   : 40 bits physical, 48 bits virtual  
+   power management: ts fid vid ttp
+
+   2012-Feb-21: don't claim 3dnow or 3dnowext, since in fact 
+   we don't support them.  See #291568.  3dnow is 80000001.EDX.31
+   and 3dnowext is 80000001.EDX.30.
+*/
+void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st )
+{
+#  define SET_ABCD(_a,_b,_c,_d)                \
+      do { st->guest_RAX = (ULong)(_a);        \
+           st->guest_RBX = (ULong)(_b);        \
+           st->guest_RCX = (ULong)(_c);        \
+           st->guest_RDX = (ULong)(_d);        \
+      } while (0)
+
+   switch (0xFFFFFFFF & st->guest_RAX) {
+      case 0x00000000:
+         SET_ABCD(0x00000001, 0x68747541, 0x444d4163, 0x69746e65);
+         break;
+      case 0x00000001:
+         SET_ABCD(0x00000f5a, 0x01000800, 0x00000000, 0x078bfbff);
+         break;
+      case 0x80000000:
+         SET_ABCD(0x80000018, 0x68747541, 0x444d4163, 0x69746e65);
+         break;
+      case 0x80000001:
+         /* Don't claim to support 3dnow or 3dnowext.  0xe1d3fbff is
+            the original it-is-supported value that the h/w provides.
+            See #291568. */
+         SET_ABCD(0x00000f5a, 0x00000505, 0x00000000, /*0xe1d3fbff*/
+                                                      0x21d3fbff);
+         break;
+      case 0x80000002:
+         SET_ABCD(0x20444d41, 0x6574704f, 0x206e6f72, 0x296d7428);
+         break;
+      case 0x80000003:
+         SET_ABCD(0x6f725020, 0x73736563, 0x3820726f, 0x00003834);
+         break;
+      case 0x80000004:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000005:
+         SET_ABCD(0xff08ff08, 0xff20ff20, 0x40020140, 0x40020140);
+         break;
+      case 0x80000006:
+         SET_ABCD(0x00000000, 0x42004200, 0x04008140, 0x00000000);
+         break;
+      case 0x80000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x0000000f);
+         break;
+      case 0x80000008:
+         SET_ABCD(0x00003028, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      default:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+   }
+#  undef SET_ABCD
+}
+
+
+/* Claim to be the following CPU (2 x ...), which is sse3 and cx16
+   capable.
+
+   vendor_id       : GenuineIntel
+   cpu family      : 6
+   model           : 15
+   model name      : Intel(R) Core(TM)2 CPU 6600 @ 2.40GHz
+   stepping        : 6
+   cpu MHz         : 2394.000
+   cache size      : 4096 KB
+   physical id     : 0
+   siblings        : 2
+   core id         : 0
+   cpu cores       : 2
+   fpu             : yes
+   fpu_exception   : yes
+   cpuid level     : 10
+   wp              : yes
+   flags           : fpu vme de pse tsc msr pae mce cx8 apic sep
+                     mtrr pge mca cmov pat pse36 clflush dts acpi
+                     mmx fxsr sse sse2 ss ht tm syscall nx lm
+                     constant_tsc pni monitor ds_cpl vmx est tm2
+                     cx16 xtpr lahf_lm
+   bogomips        : 4798.78
+   clflush size    : 64
+   cache_alignment : 64
+   address sizes   : 36 bits physical, 48 bits virtual
+   power management:
+*/
+void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st )
+{
+#  define SET_ABCD(_a,_b,_c,_d)                \
+      do { st->guest_RAX = (ULong)(_a);        \
+           st->guest_RBX = (ULong)(_b);        \
+           st->guest_RCX = (ULong)(_c);        \
+           st->guest_RDX = (ULong)(_d);        \
+      } while (0)
+
+   switch (0xFFFFFFFF & st->guest_RAX) {
+      case 0x00000000:
+         SET_ABCD(0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69);
+         break;
+      case 0x00000001:
+         SET_ABCD(0x000006f6, 0x00020800, 0x0000e3bd, 0xbfebfbff);
+         break;
+      case 0x00000002:
+         SET_ABCD(0x05b0b101, 0x005657f0, 0x00000000, 0x2cb43049);
+         break;
+      case 0x00000003:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000004: {
+         switch (0xFFFFFFFF & st->guest_RCX) {
+            case 0x00000000: SET_ABCD(0x04000121, 0x01c0003f,
+                                      0x0000003f, 0x00000001); break;
+            case 0x00000001: SET_ABCD(0x04000122, 0x01c0003f,
+                                      0x0000003f, 0x00000001); break;
+            case 0x00000002: SET_ABCD(0x04004143, 0x03c0003f,
+                                      0x00000fff, 0x00000001); break;
+            default:         SET_ABCD(0x00000000, 0x00000000,
+                                      0x00000000, 0x00000000); break;
+         }
+         break;
+      }
+      case 0x00000005:
+         SET_ABCD(0x00000040, 0x00000040, 0x00000003, 0x00000020);
+         break;
+      case 0x00000006:
+         SET_ABCD(0x00000001, 0x00000002, 0x00000001, 0x00000000);
+         break;
+      case 0x00000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000008:
+         SET_ABCD(0x00000400, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000009:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x0000000a:
+      unhandled_eax_value:
+         SET_ABCD(0x07280202, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000000:
+         SET_ABCD(0x80000008, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000001:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000001, 0x20100800);
+         break;
+      case 0x80000002:
+         SET_ABCD(0x65746e49, 0x2952286c, 0x726f4320, 0x4d542865);
+         break;
+      case 0x80000003:
+         SET_ABCD(0x43203229, 0x20205550, 0x20202020, 0x20202020);
+         break;
+      case 0x80000004:
+         SET_ABCD(0x30303636, 0x20402020, 0x30342e32, 0x007a4847);
+         break;
+      case 0x80000005:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000006:
+         SET_ABCD(0x00000000, 0x00000000, 0x10008040, 0x00000000);
+         break;
+      case 0x80000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000008:
+         SET_ABCD(0x00003024, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      default:         
+         goto unhandled_eax_value;
+   }
+#  undef SET_ABCD
+}
+
+
+/* Claim to be the following CPU (4 x ...), which is sse4.2 and cx16
+   capable.
+
+   vendor_id       : GenuineIntel
+   cpu family      : 6
+   model           : 37
+   model name      : Intel(R) Core(TM) i5 CPU         670  @ 3.47GHz
+   stepping        : 2
+   cpu MHz         : 3334.000
+   cache size      : 4096 KB
+   physical id     : 0
+   siblings        : 4
+   core id         : 0
+   cpu cores       : 2
+   apicid          : 0
+   initial apicid  : 0
+   fpu             : yes
+   fpu_exception   : yes
+   cpuid level     : 11
+   wp              : yes
+   flags           : fpu vme de pse tsc msr pae mce cx8 apic sep
+                     mtrr pge mca cmov pat pse36 clflush dts acpi
+                     mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp
+                     lm constant_tsc arch_perfmon pebs bts rep_good
+                     xtopology nonstop_tsc aperfmperf pni pclmulqdq
+                     dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16
+                     xtpr pdcm sse4_1 sse4_2 popcnt aes lahf_lm ida
+                     arat tpr_shadow vnmi flexpriority ept vpid
+   bogomips        : 6957.57
+   clflush size    : 64
+   cache_alignment : 64
+   address sizes   : 36 bits physical, 48 bits virtual
+   power management:
+*/
+void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st )
+{
+#  define SET_ABCD(_a,_b,_c,_d)                \
+      do { st->guest_RAX = (ULong)(_a);        \
+           st->guest_RBX = (ULong)(_b);        \
+           st->guest_RCX = (ULong)(_c);        \
+           st->guest_RDX = (ULong)(_d);        \
+      } while (0)
+
+   UInt old_eax = (UInt)st->guest_RAX;
+   UInt old_ecx = (UInt)st->guest_RCX;
+
+   switch (old_eax) {
+      case 0x00000000:
+         SET_ABCD(0x0000000b, 0x756e6547, 0x6c65746e, 0x49656e69);
+         break;
+      case 0x00000001:
+         SET_ABCD(0x00020652, 0x00100800, 0x0298e3ff, 0xbfebfbff);
+         break;
+      case 0x00000002:
+         SET_ABCD(0x55035a01, 0x00f0b2e3, 0x00000000, 0x09ca212c);
+         break;
+      case 0x00000003:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000004:
+         switch (old_ecx) {
+            case 0x00000000: SET_ABCD(0x1c004121, 0x01c0003f,
+                                      0x0000003f, 0x00000000); break;
+            case 0x00000001: SET_ABCD(0x1c004122, 0x00c0003f,
+                                      0x0000007f, 0x00000000); break;
+            case 0x00000002: SET_ABCD(0x1c004143, 0x01c0003f,
+                                      0x000001ff, 0x00000000); break;
+            case 0x00000003: SET_ABCD(0x1c03c163, 0x03c0003f,
+                                      0x00000fff, 0x00000002); break;
+            default:         SET_ABCD(0x00000000, 0x00000000,
+                                      0x00000000, 0x00000000); break;
+         }
+         break;
+      case 0x00000005:
+         SET_ABCD(0x00000040, 0x00000040, 0x00000003, 0x00001120);
+         break;
+      case 0x00000006:
+         SET_ABCD(0x00000007, 0x00000002, 0x00000001, 0x00000000);
+         break;
+      case 0x00000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000008:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000009:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x0000000a:
+         SET_ABCD(0x07300403, 0x00000004, 0x00000000, 0x00000603);
+         break;
+      case 0x0000000b:
+         switch (old_ecx) {
+            case 0x00000000:
+               SET_ABCD(0x00000001, 0x00000002,
+                        0x00000100, 0x00000000); break;
+            case 0x00000001:
+               SET_ABCD(0x00000004, 0x00000004,
+                        0x00000201, 0x00000000); break;
+            default:
+               SET_ABCD(0x00000000, 0x00000000,
+                        old_ecx,    0x00000000); break;
+         }
+         break;
+      case 0x0000000c:
+         SET_ABCD(0x00000001, 0x00000002, 0x00000100, 0x00000000);
+         break;
+      case 0x0000000d:
+         switch (old_ecx) {
+            case 0x00000000: SET_ABCD(0x00000001, 0x00000002,
+                                      0x00000100, 0x00000000); break;
+            case 0x00000001: SET_ABCD(0x00000004, 0x00000004,
+                                      0x00000201, 0x00000000); break;
+            default:         SET_ABCD(0x00000000, 0x00000000,
+                                      old_ecx,    0x00000000); break;
+         }
+         break;
+      case 0x80000000:
+         SET_ABCD(0x80000008, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000001:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000001, 0x28100800);
+         break;
+      case 0x80000002:
+         SET_ABCD(0x65746e49, 0x2952286c, 0x726f4320, 0x4d542865);
+         break;
+      case 0x80000003:
+         SET_ABCD(0x35692029, 0x55504320, 0x20202020, 0x20202020);
+         break;
+      case 0x80000004:
+         SET_ABCD(0x30373620, 0x20402020, 0x37342e33, 0x007a4847);
+         break;
+      case 0x80000005:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000006:
+         SET_ABCD(0x00000000, 0x00000000, 0x01006040, 0x00000000);
+         break;
+      case 0x80000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000100);
+         break;
+      case 0x80000008:
+         SET_ABCD(0x00003024, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      default:
+         SET_ABCD(0x00000001, 0x00000002, 0x00000100, 0x00000000);
+         break;
+   }
+#  undef SET_ABCD
+}
+
+
+/* Claim to be the following CPU (4 x ...), which is AVX and cx16
+   capable.  Plus (kludge!) it "supports" HTM.
+
+   vendor_id       : GenuineIntel
+   cpu family      : 6
+   model           : 42
+   model name      : Intel(R) Core(TM) i5-2300 CPU @ 2.80GHz
+   stepping        : 7
+   cpu MHz         : 1600.000
+   cache size      : 6144 KB
+   physical id     : 0
+   siblings        : 4
+   core id         : 3
+   cpu cores       : 4
+   apicid          : 6
+   initial apicid  : 6
+   fpu             : yes
+   fpu_exception   : yes
+   cpuid level     : 13
+   wp              : yes
+   flags           : fpu vme de pse tsc msr pae mce cx8 apic sep
+                     mtrr pge mca cmov pat pse36 clflush dts acpi
+                     mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp
+                     lm constant_tsc arch_perfmon pebs bts rep_good
+                     nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq
+                     dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16
+                     xtpr pdcm sse4_1 sse4_2 popcnt aes xsave avx 
+                     lahf_lm ida arat epb xsaveopt pln pts dts
+                     tpr_shadow vnmi flexpriority ept vpid
+
+   bogomips        : 5768.94
+   clflush size    : 64
+   cache_alignment : 64
+   address sizes   : 36 bits physical, 48 bits virtual
+   power management:
+*/
+void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st )
+{
+#  define SET_ABCD(_a,_b,_c,_d)                \
+      do { st->guest_RAX = (ULong)(_a);        \
+           st->guest_RBX = (ULong)(_b);        \
+           st->guest_RCX = (ULong)(_c);        \
+           st->guest_RDX = (ULong)(_d);        \
+      } while (0)
+
+   UInt old_eax = (UInt)st->guest_RAX;
+   UInt old_ecx = (UInt)st->guest_RCX;
+
+   switch (old_eax) {
+      case 0x00000000:
+         SET_ABCD(0x0000000d, 0x756e6547, 0x6c65746e, 0x49656e69);
+         break;
+      case 0x00000001:
+         SET_ABCD(0x000206a7, 0x00100800, 0x1f9ae3bf, 0xbfebfbff);
+         break;
+      case 0x00000002:
+         SET_ABCD(0x76035a01, 0x00f0b0ff, 0x00000000, 0x00ca0000);
+         break;
+      case 0x00000003:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000004:
+         switch (old_ecx) {
+            case 0x00000000: SET_ABCD(0x1c004121, 0x01c0003f,
+                                      0x0000003f, 0x00000000); break;
+            case 0x00000001: SET_ABCD(0x1c004122, 0x01c0003f,
+                                      0x0000003f, 0x00000000); break;
+            case 0x00000002: SET_ABCD(0x1c004143, 0x01c0003f,
+                                      0x000001ff, 0x00000000); break;
+            case 0x00000003: SET_ABCD(0x1c03c163, 0x02c0003f,
+                                      0x00001fff, 0x00000006); break;
+            default:         SET_ABCD(0x00000000, 0x00000000,
+                                      0x00000000, 0x00000000); break;
+         }
+         break;
+      case 0x00000005:
+         SET_ABCD(0x00000040, 0x00000040, 0x00000003, 0x00001120);
+         break;
+      case 0x00000006:
+         SET_ABCD(0x00000077, 0x00000002, 0x00000009, 0x00000000);
+         break;
+      case 0x00000007:
+         SET_ABCD(0x00000000, 0x00000800, 0x00000000, 0x00000000);
+         break;
+      case 0x00000008:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000009:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x0000000a:
+         SET_ABCD(0x07300803, 0x00000000, 0x00000000, 0x00000603);
+         break;
+      case 0x0000000b:
+         switch (old_ecx) {
+            case 0x00000000:
+               SET_ABCD(0x00000001, 0x00000001,
+                        0x00000100, 0x00000000); break;
+            case 0x00000001:
+               SET_ABCD(0x00000004, 0x00000004,
+                        0x00000201, 0x00000000); break;
+            default:
+               SET_ABCD(0x00000000, 0x00000000,
+                        old_ecx,    0x00000000); break;
+         }
+         break;
+      case 0x0000000c:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x0000000d:
+         switch (old_ecx) {
+            case 0x00000000: SET_ABCD(0x00000007, 0x00000340,
+                                      0x00000340, 0x00000000); break;
+            case 0x00000001: SET_ABCD(0x00000001, 0x00000000,
+                                      0x00000000, 0x00000000); break;
+            case 0x00000002: SET_ABCD(0x00000100, 0x00000240,
+                                      0x00000000, 0x00000000); break;
+            default:         SET_ABCD(0x00000000, 0x00000000,
+                                      0x00000000, 0x00000000); break;
+         }
+         break;
+      case 0x0000000e:
+         SET_ABCD(0x00000007, 0x00000340, 0x00000340, 0x00000000);
+         break;
+      case 0x0000000f:
+         SET_ABCD(0x00000007, 0x00000340, 0x00000340, 0x00000000);
+         break;
+      case 0x80000000:
+         SET_ABCD(0x80000008, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000001:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000001, 0x28100800);
+         break;
+      case 0x80000002:
+         SET_ABCD(0x20202020, 0x20202020, 0x65746e49, 0x2952286c);
+         break;
+      case 0x80000003:
+         SET_ABCD(0x726f4320, 0x4d542865, 0x35692029, 0x3033322d);
+         break;
+      case 0x80000004:
+         SET_ABCD(0x50432030, 0x20402055, 0x30382e32, 0x007a4847);
+         break;
+      case 0x80000005:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000006:
+         SET_ABCD(0x00000000, 0x00000000, 0x01006040, 0x00000000);
+         break;
+      case 0x80000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000100);
+         break;
+      case 0x80000008:
+         SET_ABCD(0x00003024, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      default:
+         SET_ABCD(0x00000007, 0x00000340, 0x00000340, 0x00000000);
+         break;
+   }
+#  undef SET_ABCD
+}
+
+
+ULong amd64g_calculate_RCR ( ULong arg, 
+                             ULong rot_amt, 
+                             ULong rflags_in, 
+                             Long  szIN )
+{
+   Bool  wantRflags = toBool(szIN < 0);
+   ULong sz         = wantRflags ? (-szIN) : szIN;
+   ULong tempCOUNT  = rot_amt & (sz == 8 ? 0x3F : 0x1F);
+   ULong cf=0, of=0, tempcf;
+
+   switch (sz) {
+      case 8:
+         cf        = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         of        = ((arg >> 63) ^ cf) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = arg & 1;
+            arg    = (arg >> 1) | (cf << 63);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         break;
+      case 4:
+         while (tempCOUNT >= 33) tempCOUNT -= 33;
+         cf        = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         of        = ((arg >> 31) ^ cf) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = arg & 1;
+            arg    = ((arg >> 1) & 0x7FFFFFFFULL) | (cf << 31);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         break;
+      case 2:
+         while (tempCOUNT >= 17) tempCOUNT -= 17;
+         cf        = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         of        = ((arg >> 15) ^ cf) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = arg & 1;
+            arg    = ((arg >> 1) & 0x7FFFULL) | (cf << 15);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         break;
+      case 1:
+         while (tempCOUNT >= 9) tempCOUNT -= 9;
+         cf        = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         of        = ((arg >> 7) ^ cf) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = arg & 1;
+            arg    = ((arg >> 1) & 0x7FULL) | (cf << 7);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         break;
+      default:
+         vpanic("calculate_RCR(amd64g): invalid size");
+   }
+
+   cf &= 1;
+   of &= 1;
+   rflags_in &= ~(AMD64G_CC_MASK_C | AMD64G_CC_MASK_O);
+   rflags_in |= (cf << AMD64G_CC_SHIFT_C) | (of << AMD64G_CC_SHIFT_O);
+
+   /* caller can ask to have back either the resulting flags or
+      resulting value, but not both */
+   return wantRflags ? rflags_in : arg;
+}
+
+ULong amd64g_calculate_RCL ( ULong arg, 
+                             ULong rot_amt, 
+                             ULong rflags_in, 
+                             Long  szIN )
+{
+   Bool  wantRflags = toBool(szIN < 0);
+   ULong sz         = wantRflags ? (-szIN) : szIN;
+   ULong tempCOUNT  = rot_amt & (sz == 8 ? 0x3F : 0x1F);
+   ULong cf=0, of=0, tempcf;
+
+   switch (sz) {
+      case 8:
+         cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = (arg >> 63) & 1;
+            arg    = (arg << 1) | (cf & 1);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         of = ((arg >> 63) ^ cf) & 1;
+         break;
+      case 4:
+         while (tempCOUNT >= 33) tempCOUNT -= 33;
+         cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = (arg >> 31) & 1;
+            arg    = 0xFFFFFFFFULL & ((arg << 1) | (cf & 1));
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         of = ((arg >> 31) ^ cf) & 1;
+         break;
+      case 2:
+         while (tempCOUNT >= 17) tempCOUNT -= 17;
+         cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = (arg >> 15) & 1;
+            arg    = 0xFFFFULL & ((arg << 1) | (cf & 1));
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         of = ((arg >> 15) ^ cf) & 1;
+         break;
+      case 1:
+         while (tempCOUNT >= 9) tempCOUNT -= 9;
+         cf = (rflags_in >> AMD64G_CC_SHIFT_C) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = (arg >> 7) & 1;
+            arg    = 0xFFULL & ((arg << 1) | (cf & 1));
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         of = ((arg >> 7) ^ cf) & 1;
+         break;
+      default: 
+         vpanic("calculate_RCL(amd64g): invalid size");
+   }
+
+   cf &= 1;
+   of &= 1;
+   rflags_in &= ~(AMD64G_CC_MASK_C | AMD64G_CC_MASK_O);
+   rflags_in |= (cf << AMD64G_CC_SHIFT_C) | (of << AMD64G_CC_SHIFT_O);
+
+   return wantRflags ? rflags_in : arg;
+}
+
+/* Taken from gf2x-0.9.5, released under GPLv2+ (later versions LGPLv2+)
+ * svn://scm.gforge.inria.fr/svn/gf2x/trunk/hardware/opteron/gf2x_mul1.h@25
+ */
+ULong amd64g_calculate_pclmul(ULong a, ULong b, ULong which)
+{
+    ULong hi, lo, tmp, A[16];
+
+   A[0] = 0;            A[1] = a;
+   A[2] = A[1] << 1;    A[3] = A[2] ^ a;
+   A[4] = A[2] << 1;    A[5] = A[4] ^ a;
+   A[6] = A[3] << 1;    A[7] = A[6] ^ a;
+   A[8] = A[4] << 1;    A[9] = A[8] ^ a;
+   A[10] = A[5] << 1;   A[11] = A[10] ^ a;
+   A[12] = A[6] << 1;   A[13] = A[12] ^ a;
+   A[14] = A[7] << 1;   A[15] = A[14] ^ a;
+
+   lo = (A[b >> 60] << 4) ^ A[(b >> 56) & 15];
+   hi = lo >> 56;
+   lo = (lo << 8) ^ (A[(b >> 52) & 15] << 4) ^ A[(b >> 48) & 15];
+   hi = (hi << 8) | (lo >> 56);
+   lo = (lo << 8) ^ (A[(b >> 44) & 15] << 4) ^ A[(b >> 40) & 15];
+   hi = (hi << 8) | (lo >> 56);
+   lo = (lo << 8) ^ (A[(b >> 36) & 15] << 4) ^ A[(b >> 32) & 15];
+   hi = (hi << 8) | (lo >> 56);
+   lo = (lo << 8) ^ (A[(b >> 28) & 15] << 4) ^ A[(b >> 24) & 15];
+   hi = (hi << 8) | (lo >> 56);
+   lo = (lo << 8) ^ (A[(b >> 20) & 15] << 4) ^ A[(b >> 16) & 15];
+   hi = (hi << 8) | (lo >> 56);
+   lo = (lo << 8) ^ (A[(b >> 12) & 15] << 4) ^ A[(b >> 8) & 15];
+   hi = (hi << 8) | (lo >> 56);
+   lo = (lo << 8) ^ (A[(b >> 4) & 15] << 4) ^ A[b & 15];
+
+   ULong m0 = -1;
+   m0 /= 255;
+   tmp = -((a >> 63) & 1); tmp &= ((b & (m0 * 0xfe)) >> 1); hi = hi ^ tmp;
+   tmp = -((a >> 62) & 1); tmp &= ((b & (m0 * 0xfc)) >> 2); hi = hi ^ tmp;
+   tmp = -((a >> 61) & 1); tmp &= ((b & (m0 * 0xf8)) >> 3); hi = hi ^ tmp;
+   tmp = -((a >> 60) & 1); tmp &= ((b & (m0 * 0xf0)) >> 4); hi = hi ^ tmp;
+   tmp = -((a >> 59) & 1); tmp &= ((b & (m0 * 0xe0)) >> 5); hi = hi ^ tmp;
+   tmp = -((a >> 58) & 1); tmp &= ((b & (m0 * 0xc0)) >> 6); hi = hi ^ tmp;
+   tmp = -((a >> 57) & 1); tmp &= ((b & (m0 * 0x80)) >> 7); hi = hi ^ tmp;
+
+   return which ? hi : lo;
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-amd64 platforms, return 1. */
+ULong amd64g_dirtyhelper_RDTSC ( void )
+{
+#  if defined(__x86_64__)
+   UInt  eax, edx;
+   __asm__ __volatile__("rdtsc" : "=a" (eax), "=d" (edx));
+   return (((ULong)edx) << 32) | ((ULong)eax);
+#  else
+   return 1ULL;
+#  endif
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-amd64 platforms, return 1. */
+/* This uses a different calling convention from _RDTSC just above
+   only because of the difficulty of returning 96 bits from a C
+   function -- RDTSC returns 64 bits and so is simple by comparison,
+   on amd64. */
+void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st )
+{
+#  if defined(__x86_64__)
+   UInt eax, ecx, edx;
+   __asm__ __volatile__("rdtscp" : "=a" (eax), "=d" (edx), "=c" (ecx));
+   st->guest_RAX = (ULong)eax;
+   st->guest_RCX = (ULong)ecx;
+   st->guest_RDX = (ULong)edx;
+#  else
+   /* Do nothing. */
+#  endif
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-amd64 platforms, return 0. */
+ULong amd64g_dirtyhelper_IN ( ULong portno, ULong sz/*1,2 or 4*/ )
+{
+#  if defined(__x86_64__)
+   ULong r = 0;
+   portno &= 0xFFFF;
+   switch (sz) {
+      case 4: 
+         __asm__ __volatile__("movq $0,%%rax; inl %w1,%%eax; movq %%rax,%0" 
+                              : "=a" (r) : "Nd" (portno));
+	 break;
+      case 2: 
+         __asm__ __volatile__("movq $0,%%rax; inw %w1,%w0" 
+                              : "=a" (r) : "Nd" (portno));
+	 break;
+      case 1: 
+         __asm__ __volatile__("movq $0,%%rax; inb %w1,%b0" 
+                              : "=a" (r) : "Nd" (portno));
+	 break;
+      default:
+         break; /* note: no 64-bit version of insn exists */
+   }
+   return r;
+#  else
+   return 0;
+#  endif
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-amd64 platforms, do nothing. */
+void amd64g_dirtyhelper_OUT ( ULong portno, ULong data, ULong sz/*1,2 or 4*/ )
+{
+#  if defined(__x86_64__)
+   portno &= 0xFFFF;
+   switch (sz) {
+      case 4: 
+         __asm__ __volatile__("movq %0,%%rax; outl %%eax, %w1" 
+                              : : "a" (data), "Nd" (portno));
+	 break;
+      case 2: 
+         __asm__ __volatile__("outw %w0, %w1" 
+                              : : "a" (data), "Nd" (portno));
+	 break;
+      case 1: 
+         __asm__ __volatile__("outb %b0, %w1" 
+                              : : "a" (data), "Nd" (portno));
+	 break;
+      default:
+         break; /* note: no 64-bit version of insn exists */
+   }
+#  else
+   /* do nothing */
+#  endif
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-amd64 platforms, do nothing. */
+/* op = 0: call the native SGDT instruction.
+   op = 1: call the native SIDT instruction.
+*/
+void amd64g_dirtyhelper_SxDT ( void *address, ULong op ) {
+#  if defined(__x86_64__)
+   switch (op) {
+      case 0:
+         __asm__ __volatile__("sgdt (%0)" : : "r" (address) : "memory");
+         break;
+      case 1:
+         __asm__ __volatile__("sidt (%0)" : : "r" (address) : "memory");
+         break;
+      default:
+         vpanic("amd64g_dirtyhelper_SxDT");
+   }
+#  else
+   /* do nothing */
+   UChar* p = (UChar*)address;
+   p[0] = p[1] = p[2] = p[3] = p[4] = p[5] = 0;
+   p[6] = p[7] = p[8] = p[9] = 0;
+#  endif
+}
+
+/*---------------------------------------------------------------*/
+/*--- Helpers for MMX/SSE/SSE2.                               ---*/
+/*---------------------------------------------------------------*/
+
+static inline UChar abdU8 ( UChar xx, UChar yy ) {
+   return toUChar(xx>yy ? xx-yy : yy-xx);
+}
+
+static inline ULong mk32x2 ( UInt w1, UInt w0 ) {
+   return (((ULong)w1) << 32) | ((ULong)w0);
+}
+
+static inline UShort sel16x4_3 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUShort(hi32 >> 16);
+}
+static inline UShort sel16x4_2 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUShort(hi32);
+}
+static inline UShort sel16x4_1 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUShort(lo32 >> 16);
+}
+static inline UShort sel16x4_0 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUShort(lo32);
+}
+
+static inline UChar sel8x8_7 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 24);
+}
+static inline UChar sel8x8_6 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 16);
+}
+static inline UChar sel8x8_5 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 8);
+}
+static inline UChar sel8x8_4 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 0);
+}
+static inline UChar sel8x8_3 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 24);
+}
+static inline UChar sel8x8_2 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 16);
+}
+static inline UChar sel8x8_1 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 8);
+}
+static inline UChar sel8x8_0 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 0);
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calculate_mmx_pmaddwd ( ULong xx, ULong yy )
+{
+   return
+      mk32x2(
+         (((Int)(Short)sel16x4_3(xx)) * ((Int)(Short)sel16x4_3(yy)))
+            + (((Int)(Short)sel16x4_2(xx)) * ((Int)(Short)sel16x4_2(yy))),
+         (((Int)(Short)sel16x4_1(xx)) * ((Int)(Short)sel16x4_1(yy)))
+            + (((Int)(Short)sel16x4_0(xx)) * ((Int)(Short)sel16x4_0(yy)))
+      );
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calculate_mmx_psadbw ( ULong xx, ULong yy )
+{
+   UInt t = 0;
+   t += (UInt)abdU8( sel8x8_7(xx), sel8x8_7(yy) );
+   t += (UInt)abdU8( sel8x8_6(xx), sel8x8_6(yy) );
+   t += (UInt)abdU8( sel8x8_5(xx), sel8x8_5(yy) );
+   t += (UInt)abdU8( sel8x8_4(xx), sel8x8_4(yy) );
+   t += (UInt)abdU8( sel8x8_3(xx), sel8x8_3(yy) );
+   t += (UInt)abdU8( sel8x8_2(xx), sel8x8_2(yy) );
+   t += (UInt)abdU8( sel8x8_1(xx), sel8x8_1(yy) );
+   t += (UInt)abdU8( sel8x8_0(xx), sel8x8_0(yy) );
+   t &= 0xFFFF;
+   return (ULong)t;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi )
+{
+   UShort t, min;
+   UInt   idx;
+   t = sel16x4_0(sLo); if (True)    { min = t; idx = 0; }
+   t = sel16x4_1(sLo); if (t < min) { min = t; idx = 1; }
+   t = sel16x4_2(sLo); if (t < min) { min = t; idx = 2; }
+   t = sel16x4_3(sLo); if (t < min) { min = t; idx = 3; }
+   t = sel16x4_0(sHi); if (t < min) { min = t; idx = 4; }
+   t = sel16x4_1(sHi); if (t < min) { min = t; idx = 5; }
+   t = sel16x4_2(sHi); if (t < min) { min = t; idx = 6; }
+   t = sel16x4_3(sHi); if (t < min) { min = t; idx = 7; }
+   return ((ULong)(idx << 16)) | ((ULong)min);
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calc_crc32b ( ULong crcIn, ULong b )
+{
+   UInt  i;
+   ULong crc = (b & 0xFFULL) ^ crcIn;
+   for (i = 0; i < 8; i++)
+      crc = (crc >> 1) ^ ((crc & 1) ? 0x82f63b78ULL : 0);
+   return crc;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calc_crc32w ( ULong crcIn, ULong w )
+{
+   UInt  i;
+   ULong crc = (w & 0xFFFFULL) ^ crcIn;
+   for (i = 0; i < 16; i++)
+      crc = (crc >> 1) ^ ((crc & 1) ? 0x82f63b78ULL : 0);
+   return crc;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calc_crc32l ( ULong crcIn, ULong l )
+{
+   UInt i;
+   ULong crc = (l & 0xFFFFFFFFULL) ^ crcIn;
+   for (i = 0; i < 32; i++)
+      crc = (crc >> 1) ^ ((crc & 1) ? 0x82f63b78ULL : 0);
+   return crc;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calc_crc32q ( ULong crcIn, ULong q )
+{
+   ULong crc = amd64g_calc_crc32l(crcIn, q);
+   return amd64g_calc_crc32l(crc, q >> 32);
+}
+
+
+/* .. helper for next fn .. */
+static inline ULong sad_8x4 ( ULong xx, ULong yy )
+{
+   UInt t = 0;
+   t += (UInt)abdU8( sel8x8_3(xx), sel8x8_3(yy) );
+   t += (UInt)abdU8( sel8x8_2(xx), sel8x8_2(yy) );
+   t += (UInt)abdU8( sel8x8_1(xx), sel8x8_1(yy) );
+   t += (UInt)abdU8( sel8x8_0(xx), sel8x8_0(yy) );
+   return (ULong)t;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
+                            ULong dHi, ULong dLo,
+                            ULong imm_and_return_control_bit )
+{
+   UInt imm8     = imm_and_return_control_bit & 7;
+   Bool calcHi   = (imm_and_return_control_bit >> 7) & 1;
+   UInt srcOffsL = imm8 & 3; /* src offs in 32-bit (L) chunks */
+   UInt dstOffsL = (imm8 >> 2) & 1; /* dst offs in ditto chunks */
+   /* For src we only need 32 bits, so get them into the
+      lower half of a 64 bit word. */
+   ULong src = ((srcOffsL & 2) ? sHi : sLo) >> (32 * (srcOffsL & 1));
+   /* For dst we need to get hold of 56 bits (7 bytes) from a total of
+      11 bytes.  If calculating the low part of the result, need bytes
+      dstOffsL * 4 + (0 .. 6); if calculating the high part,
+      dstOffsL * 4 + (4 .. 10). */
+   ULong dst;
+   /* dstOffL = 0, Lo  ->  0 .. 6
+      dstOffL = 1, Lo  ->  4 .. 10
+      dstOffL = 0, Hi  ->  4 .. 10
+      dstOffL = 1, Hi  ->  8 .. 14
+   */
+   if (calcHi && dstOffsL) {
+      /* 8 .. 14 */
+      dst = dHi & 0x00FFFFFFFFFFFFFFULL;
+   }
+   else if (!calcHi && !dstOffsL) {
+      /* 0 .. 6 */
+      dst = dLo & 0x00FFFFFFFFFFFFFFULL;
+   } 
+   else {
+      /* 4 .. 10 */
+      dst = (dLo >> 32) | ((dHi & 0x00FFFFFFULL) << 32);
+   }
+   ULong r0  = sad_8x4( dst >>  0, src );
+   ULong r1  = sad_8x4( dst >>  8, src );
+   ULong r2  = sad_8x4( dst >> 16, src );
+   ULong r3  = sad_8x4( dst >> 24, src );
+   ULong res = (r3 << 48) | (r2 << 32) | (r1 << 16) | r0;
+   return res;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calculate_pext ( ULong src_masked, ULong mask )
+{
+   ULong dst = 0;
+   ULong src_bit;
+   ULong dst_bit = 1;
+   for (src_bit = 1; src_bit; src_bit <<= 1) {
+      if (mask & src_bit) {
+         if (src_masked & src_bit) dst |= dst_bit;
+         dst_bit <<= 1;
+      }
+   }
+   return dst;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong amd64g_calculate_pdep ( ULong src, ULong mask )
+{
+   ULong dst = 0;
+   ULong dst_bit;
+   ULong src_bit = 1;
+   for (dst_bit = 1; dst_bit; dst_bit <<= 1) {
+      if (mask & dst_bit) {
+         if (src & src_bit) dst |= dst_bit;
+         src_bit <<= 1;
+      }
+   }
+   return dst;
+}
+
+/*---------------------------------------------------------------*/
+/*--- Helpers for SSE4.2 PCMP{E,I}STR{I,M}                    ---*/
+/*---------------------------------------------------------------*/
+
+static UInt zmask_from_V128 ( V128* arg )
+{
+   UInt i, res = 0;
+   for (i = 0; i < 16; i++) {
+      res |=  ((arg->w8[i] == 0) ? 1 : 0) << i;
+   }
+   return res;
+}
+
+static UInt zmask_from_V128_wide ( V128* arg )
+{
+   UInt i, res = 0;
+   for (i = 0; i < 8; i++) {
+      res |=  ((arg->w16[i] == 0) ? 1 : 0) << i;
+   }
+   return res;
+}
+
+/* Helps with PCMP{I,E}STR{I,M}.
+
+   CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
+   actually it could be a clean helper, but for the fact that we can't
+   pass by value 2 x V128 to a clean helper, nor have one returned.)
+   Reads guest state, writes to guest state for the xSTRM cases, no
+   accesses of memory, is a pure function.
+
+   opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
+   the callee knows which I/E and I/M variant it is dealing with and
+   what the specific operation is.  4th byte of opcode is in the range
+   0x60 to 0x63:
+       istri  66 0F 3A 63
+       istrm  66 0F 3A 62
+       estri  66 0F 3A 61
+       estrm  66 0F 3A 60
+
+   gstOffL and gstOffR are the guest state offsets for the two XMM
+   register inputs.  We never have to deal with the memory case since
+   that is handled by pre-loading the relevant value into the fake
+   XMM16 register.
+
+   For ESTRx variants, edxIN and eaxIN hold the values of those two
+   registers.
+
+   In all cases, the bottom 16 bits of the result contain the new
+   OSZACP %rflags values.  For xSTRI variants, bits[31:16] of the
+   result hold the new %ecx value.  For xSTRM variants, the helper
+   writes the result directly to the guest XMM0.
+
+   Declarable side effects: in all cases, reads guest state at
+   [gstOffL, +16) and [gstOffR, +16).  For xSTRM variants, also writes
+   guest_XMM0.
+
+   Is expected to be called with opc_and_imm combinations which have
+   actually been validated, and will assert if otherwise.  The front
+   end should ensure we're only called with verified values.
+*/
+ULong amd64g_dirtyhelper_PCMPxSTRx ( 
+          VexGuestAMD64State* gst,
+          HWord opc4_and_imm,
+          HWord gstOffL, HWord gstOffR,
+          HWord edxIN, HWord eaxIN
+       )
+{
+   HWord opc4 = (opc4_and_imm >> 8) & 0xFF;
+   HWord imm8 = opc4_and_imm & 0xFF;
+   HWord isISTRx = opc4 & 2;
+   HWord isxSTRM = (opc4 & 1) ^ 1;
+   vassert((opc4 & 0xFC) == 0x60); /* 0x60 .. 0x63 */
+   HWord wide = (imm8 & 1);
+
+   // where the args are
+   V128* argL = (V128*)( ((UChar*)gst) + gstOffL );
+   V128* argR = (V128*)( ((UChar*)gst) + gstOffR );
+
+   /* Create the arg validity masks, either from the vectors
+      themselves or from the supplied edx/eax values. */
+   // FIXME: this is only right for the 8-bit data cases.
+   // At least that is asserted above.
+   UInt zmaskL, zmaskR;
+
+   // temp spot for the resulting flags and vector.
+   V128 resV;
+   UInt resOSZACP;
+
+   // for checking whether case was handled
+   Bool ok = False;
+
+   if (wide) {
+      if (isISTRx) {
+         zmaskL = zmask_from_V128_wide(argL);
+         zmaskR = zmask_from_V128_wide(argR);
+      } else {
+         Int tmp;
+         tmp = edxIN & 0xFFFFFFFF;
+         if (tmp < -8) tmp = -8;
+         if (tmp > 8)  tmp = 8;
+         if (tmp < 0)  tmp = -tmp;
+         vassert(tmp >= 0 && tmp <= 8);
+         zmaskL = (1 << tmp) & 0xFF;
+         tmp = eaxIN & 0xFFFFFFFF;
+         if (tmp < -8) tmp = -8;
+         if (tmp > 8)  tmp = 8;
+         if (tmp < 0)  tmp = -tmp;
+         vassert(tmp >= 0 && tmp <= 8);
+         zmaskR = (1 << tmp) & 0xFF;
+      }
+      // do the meyaath
+      ok = compute_PCMPxSTRx_wide ( 
+              &resV, &resOSZACP, argL, argR, 
+              zmaskL, zmaskR, imm8, (Bool)isxSTRM
+           );
+   } else {
+      if (isISTRx) {
+         zmaskL = zmask_from_V128(argL);
+         zmaskR = zmask_from_V128(argR);
+      } else {
+         Int tmp;
+         tmp = edxIN & 0xFFFFFFFF;
+         if (tmp < -16) tmp = -16;
+         if (tmp > 16)  tmp = 16;
+         if (tmp < 0)   tmp = -tmp;
+         vassert(tmp >= 0 && tmp <= 16);
+         zmaskL = (1 << tmp) & 0xFFFF;
+         tmp = eaxIN & 0xFFFFFFFF;
+         if (tmp < -16) tmp = -16;
+         if (tmp > 16)  tmp = 16;
+         if (tmp < 0)   tmp = -tmp;
+         vassert(tmp >= 0 && tmp <= 16);
+         zmaskR = (1 << tmp) & 0xFFFF;
+      }
+      // do the meyaath
+      ok = compute_PCMPxSTRx ( 
+              &resV, &resOSZACP, argL, argR, 
+              zmaskL, zmaskR, imm8, (Bool)isxSTRM
+           );
+   }
+
+   // front end shouldn't pass us any imm8 variants we can't
+   // handle.  Hence:
+   vassert(ok);
+
+   // So, finally we need to get the results back to the caller.
+   // In all cases, the new OSZACP value is the lowest 16 of
+   // the return value.
+   if (isxSTRM) {
+      gst->guest_YMM0[0] = resV.w32[0];
+      gst->guest_YMM0[1] = resV.w32[1];
+      gst->guest_YMM0[2] = resV.w32[2];
+      gst->guest_YMM0[3] = resV.w32[3];
+      return resOSZACP & 0x8D5;
+   } else {
+      UInt newECX = resV.w32[0] & 0xFFFF;
+      return (newECX << 16) | (resOSZACP & 0x8D5);
+   }
+}
+
+/*---------------------------------------------------------------*/
+/*--- AES primitives and helpers                              ---*/
+/*---------------------------------------------------------------*/
+/* a 16 x 16 matrix */
+static const UChar sbox[256] = {                   // row nr
+   0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, // 1
+   0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+   0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, // 2
+   0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+   0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, // 3
+   0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+   0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, // 4
+   0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+   0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, // 5
+   0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+   0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, // 6
+   0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+   0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, // 7
+   0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+   0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, // 8
+   0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+   0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, // 9
+   0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+   0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, //10
+   0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+   0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, //11
+   0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+   0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, //12
+   0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+   0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, //13
+   0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+   0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, //14
+   0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+   0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, //15
+   0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+   0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, //16
+   0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+};
+static void SubBytes (V128* v)
+{
+   V128 r;
+   UInt i;
+   for (i = 0; i < 16; i++)
+      r.w8[i] = sbox[v->w8[i]];
+   *v = r;
+}
+
+/* a 16 x 16 matrix */
+static const UChar invsbox[256] = {                // row nr
+   0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, // 1
+   0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,     
+   0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, // 2
+   0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,     
+   0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, // 3
+   0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,     
+   0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, // 4
+   0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,     
+   0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, // 5
+   0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,     
+   0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, // 6
+   0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,     
+   0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, // 7
+   0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,     
+   0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, // 8
+   0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,     
+   0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, // 9
+   0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,     
+   0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, //10
+   0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,     
+   0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, //11
+   0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,     
+   0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, //12
+   0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,     
+   0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, //13
+   0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,     
+   0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, //14
+   0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,     
+   0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, //15
+   0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,     
+   0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, //16
+   0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+};
+static void InvSubBytes (V128* v)
+{
+   V128 r;
+   UInt i;
+   for (i = 0; i < 16; i++)
+      r.w8[i] = invsbox[v->w8[i]];
+   *v = r;
+}
+
+static const UChar ShiftRows_op[16] =
+   {11, 6, 1, 12, 7, 2, 13, 8, 3, 14, 9, 4, 15, 10, 5, 0};
+static void ShiftRows (V128* v)
+{
+   V128 r;
+   UInt i;
+   for (i = 0; i < 16; i++)
+      r.w8[i] = v->w8[ShiftRows_op[15-i]];
+   *v = r;
+}
+
+static const UChar InvShiftRows_op[16] = 
+   {3, 6, 9, 12, 15, 2, 5, 8, 11, 14, 1, 4, 7, 10, 13, 0};
+static void InvShiftRows (V128* v)
+{
+   V128 r;
+   UInt i;
+   for (i = 0; i < 16; i++)
+      r.w8[i] = v->w8[InvShiftRows_op[15-i]];
+   *v = r;
+}
+
+/* Multiplication of the finite fields elements of AES.
+   See "A Specification for The AES Algorithm Rijndael 
+        (by Joan Daemen & Vincent Rijmen)"
+        Dr. Brian Gladman, v3.1, 3rd March 2001. */
+/* N values so that (hex) xy = 0x03^N.
+   0x00 cannot be used. We put 0xff for this value.*/
+/* a 16 x 16 matrix */
+static const UChar Nxy[256] = {                    // row nr
+   0xff, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, // 1
+   0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03,     
+   0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, // 2
+   0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1,     
+   0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, // 3
+   0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78,     
+   0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, // 4
+   0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e,     
+   0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, // 5
+   0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38,     
+   0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, // 6
+   0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10,     
+   0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, // 7
+   0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba,     
+   0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, // 8
+   0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57,     
+   0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, // 9
+   0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8,     
+   0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, //10
+   0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0,     
+   0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, //11
+   0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7,     
+   0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, //12
+   0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d,     
+   0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, //13
+   0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1,     
+   0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, //14
+   0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab,     
+   0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, //15
+   0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,     
+   0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, //16
+   0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07
+};
+
+/* E values so that E = 0x03^xy. */
+static const UChar Exy[256] = {                    // row nr
+   0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, // 1
+   0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35,     
+   0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4, // 2
+   0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa,     
+   0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26, // 3
+   0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31,     
+   0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc, // 4
+   0x4f, 0xd1, 0x68, 0xb8, 0xd3, 0x6e, 0xb2, 0xcd,     
+   0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7, // 5
+   0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x78, 0x88,     
+   0x83, 0x9e, 0xb9, 0xd0, 0x6b, 0xbd, 0xdc, 0x7f, // 6
+   0x81, 0x98, 0xb3, 0xce, 0x49, 0xdb, 0x76, 0x9a,     
+   0xb5, 0xc4, 0x57, 0xf9, 0x10, 0x30, 0x50, 0xf0, // 7
+   0x0b, 0x1d, 0x27, 0x69, 0xbb, 0xd6, 0x61, 0xa3,     
+   0xfe, 0x19, 0x2b, 0x7d, 0x87, 0x92, 0xad, 0xec, // 8
+   0x2f, 0x71, 0x93, 0xae, 0xe9, 0x20, 0x60, 0xa0,     
+   0xfb, 0x16, 0x3a, 0x4e, 0xd2, 0x6d, 0xb7, 0xc2, // 9
+   0x5d, 0xe7, 0x32, 0x56, 0xfa, 0x15, 0x3f, 0x41,     
+   0xc3, 0x5e, 0xe2, 0x3d, 0x47, 0xc9, 0x40, 0xc0, //10
+   0x5b, 0xed, 0x2c, 0x74, 0x9c, 0xbf, 0xda, 0x75,     
+   0x9f, 0xba, 0xd5, 0x64, 0xac, 0xef, 0x2a, 0x7e, //11
+   0x82, 0x9d, 0xbc, 0xdf, 0x7a, 0x8e, 0x89, 0x80,     
+   0x9b, 0xb6, 0xc1, 0x58, 0xe8, 0x23, 0x65, 0xaf, //12
+   0xea, 0x25, 0x6f, 0xb1, 0xc8, 0x43, 0xc5, 0x54,     
+   0xfc, 0x1f, 0x21, 0x63, 0xa5, 0xf4, 0x07, 0x09, //13
+   0x1b, 0x2d, 0x77, 0x99, 0xb0, 0xcb, 0x46, 0xca,     
+   0x45, 0xcf, 0x4a, 0xde, 0x79, 0x8b, 0x86, 0x91, //14
+   0xa8, 0xe3, 0x3e, 0x42, 0xc6, 0x51, 0xf3, 0x0e,     
+   0x12, 0x36, 0x5a, 0xee, 0x29, 0x7b, 0x8d, 0x8c, //15
+   0x8f, 0x8a, 0x85, 0x94, 0xa7, 0xf2, 0x0d, 0x17,     
+   0x39, 0x4b, 0xdd, 0x7c, 0x84, 0x97, 0xa2, 0xfd, //16
+   0x1c, 0x24, 0x6c, 0xb4, 0xc7, 0x52, 0xf6, 0x01};
+
+static inline UChar ff_mul(UChar u1, UChar u2)
+{
+   if ((u1 > 0) && (u2 > 0)) {
+      UInt ui = Nxy[u1] + Nxy[u2];
+      if (ui >= 255)
+         ui = ui - 255;
+      return Exy[ui];
+   } else {
+      return 0;
+   };
+}
+
+static void MixColumns (V128* v)
+{
+   V128 r;
+   Int j;
+#define P(x,row,col) (x)->w8[((row)*4+(col))]
+   for (j = 0; j < 4; j++) {
+      P(&r,j,0) = ff_mul(0x02, P(v,j,0)) ^ ff_mul(0x03, P(v,j,1)) 
+         ^ P(v,j,2) ^ P(v,j,3);
+      P(&r,j,1) = P(v,j,0) ^ ff_mul( 0x02, P(v,j,1) ) 
+         ^ ff_mul(0x03, P(v,j,2) ) ^ P(v,j,3);
+      P(&r,j,2) = P(v,j,0) ^ P(v,j,1) ^ ff_mul( 0x02, P(v,j,2) )
+         ^ ff_mul(0x03, P(v,j,3) );
+      P(&r,j,3) = ff_mul(0x03, P(v,j,0) ) ^ P(v,j,1) ^ P(v,j,2)
+         ^ ff_mul( 0x02, P(v,j,3) );
+   }
+   *v = r;
+#undef P
+}
+
+static void InvMixColumns (V128* v)
+{
+   V128 r;
+   Int j;
+#define P(x,row,col) (x)->w8[((row)*4+(col))]
+   for (j = 0; j < 4; j++) {
+      P(&r,j,0) = ff_mul(0x0e, P(v,j,0) ) ^ ff_mul(0x0b, P(v,j,1) )
+         ^ ff_mul(0x0d,P(v,j,2) ) ^ ff_mul(0x09, P(v,j,3) );
+      P(&r,j,1) = ff_mul(0x09, P(v,j,0) ) ^ ff_mul(0x0e, P(v,j,1) )
+         ^ ff_mul(0x0b,P(v,j,2) ) ^ ff_mul(0x0d, P(v,j,3) );
+      P(&r,j,2) = ff_mul(0x0d, P(v,j,0) ) ^ ff_mul(0x09, P(v,j,1) )
+         ^ ff_mul(0x0e,P(v,j,2) ) ^ ff_mul(0x0b, P(v,j,3) );
+      P(&r,j,3) = ff_mul(0x0b, P(v,j,0) ) ^ ff_mul(0x0d, P(v,j,1) )
+         ^ ff_mul(0x09,P(v,j,2) ) ^ ff_mul(0x0e, P(v,j,3) );
+   }
+   *v = r;
+#undef P
+
+}
+
+/* For description, see definition in guest_amd64_defs.h */
+void amd64g_dirtyhelper_AES ( 
+          VexGuestAMD64State* gst,
+          HWord opc4, HWord gstOffD,
+          HWord gstOffL, HWord gstOffR
+       )
+{
+   // where the args are
+   V128* argD = (V128*)( ((UChar*)gst) + gstOffD );
+   V128* argL = (V128*)( ((UChar*)gst) + gstOffL );
+   V128* argR = (V128*)( ((UChar*)gst) + gstOffR );
+   V128  r;
+
+   switch (opc4) {
+      case 0xDC: /* AESENC */
+      case 0xDD: /* AESENCLAST */
+         r = *argR;
+         ShiftRows (&r);
+         SubBytes  (&r);
+         if (opc4 == 0xDC)
+            MixColumns (&r);
+         argD->w64[0] = r.w64[0] ^ argL->w64[0];
+         argD->w64[1] = r.w64[1] ^ argL->w64[1];
+         break;
+
+      case 0xDE: /* AESDEC */
+      case 0xDF: /* AESDECLAST */
+         r = *argR;
+         InvShiftRows (&r);
+         InvSubBytes (&r);
+         if (opc4 == 0xDE)
+            InvMixColumns (&r);
+         argD->w64[0] = r.w64[0] ^ argL->w64[0];
+         argD->w64[1] = r.w64[1] ^ argL->w64[1];
+         break;
+
+      case 0xDB: /* AESIMC */
+         *argD = *argL;
+         InvMixColumns (argD);
+         break;
+      default: vassert(0);
+   }
+}
+
+static inline UInt RotWord (UInt   w32)
+{
+   return ((w32 >> 8) | (w32 << 24));
+}
+
+static inline UInt SubWord (UInt   w32)
+{
+   UChar *w8;
+   UChar *r8;
+   UInt res;
+   w8 = (UChar*) &w32;
+   r8 = (UChar*) &res;
+   r8[0] = sbox[w8[0]];
+   r8[1] = sbox[w8[1]];
+   r8[2] = sbox[w8[2]];
+   r8[3] = sbox[w8[3]];
+   return res;
+}
+
+/* For description, see definition in guest_amd64_defs.h */
+extern void amd64g_dirtyhelper_AESKEYGENASSIST ( 
+          VexGuestAMD64State* gst,
+          HWord imm8,
+          HWord gstOffL, HWord gstOffR
+       )
+{
+   // where the args are
+   V128* argL = (V128*)( ((UChar*)gst) + gstOffL );
+   V128* argR = (V128*)( ((UChar*)gst) + gstOffR );
+
+   // We have to create the result in a temporary in the
+   // case where the src and dst regs are the same.  See #341698.
+   V128 tmp;
+
+   tmp.w32[3] = RotWord (SubWord (argL->w32[3])) ^ imm8;
+   tmp.w32[2] = SubWord (argL->w32[3]);
+   tmp.w32[1] = RotWord (SubWord (argL->w32[1])) ^ imm8;
+   tmp.w32[0] = SubWord (argL->w32[1]);
+
+   argR->w32[3] = tmp.w32[3];
+   argR->w32[2] = tmp.w32[2];
+   argR->w32[1] = tmp.w32[1];
+   argR->w32[0] = tmp.w32[0];
+}
+
+
+
+/*---------------------------------------------------------------*/
+/*--- Helpers for dealing with, and describing,               ---*/
+/*--- guest state as a whole.                                 ---*/
+/*---------------------------------------------------------------*/
+
+/* Initialise the entire amd64 guest state. */
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestAMD64_initialise ( /*OUT*/VexGuestAMD64State* vex_state )
+{
+   vex_state->host_EvC_FAILADDR = 0;
+   vex_state->host_EvC_COUNTER = 0;
+   vex_state->pad0 = 0;
+
+   vex_state->guest_RAX = 0;
+   vex_state->guest_RCX = 0;
+   vex_state->guest_RDX = 0;
+   vex_state->guest_RBX = 0;
+   vex_state->guest_RSP = 0;
+   vex_state->guest_RBP = 0;
+   vex_state->guest_RSI = 0;
+   vex_state->guest_RDI = 0;
+   vex_state->guest_R8  = 0;
+   vex_state->guest_R9  = 0;
+   vex_state->guest_R10 = 0;
+   vex_state->guest_R11 = 0;
+   vex_state->guest_R12 = 0;
+   vex_state->guest_R13 = 0;
+   vex_state->guest_R14 = 0;
+   vex_state->guest_R15 = 0;
+
+   vex_state->guest_CC_OP   = AMD64G_CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = 0;
+   vex_state->guest_CC_DEP2 = 0;
+   vex_state->guest_CC_NDEP = 0;
+
+   vex_state->guest_DFLAG   = 1; /* forwards */
+   vex_state->guest_IDFLAG  = 0;
+   vex_state->guest_ACFLAG  = 0;
+
+   /* HACK: represent the offset associated with a constant %fs. 
+      Typically, on linux, this assumes that %fs is only ever zero (main
+      thread) or 0x63. */
+   vex_state->guest_FS_CONST = 0;
+
+   vex_state->guest_RIP = 0;
+
+   /* Initialise the simulated FPU */
+   amd64g_dirtyhelper_FINIT( vex_state );
+
+   /* Initialise the AVX state. */
+#  define AVXZERO(_ymm) \
+      do { _ymm[0]=_ymm[1]=_ymm[2]=_ymm[3] = 0; \
+           _ymm[4]=_ymm[5]=_ymm[6]=_ymm[7] = 0; \
+      } while (0)
+   vex_state->guest_SSEROUND = (ULong)Irrm_NEAREST;
+   AVXZERO(vex_state->guest_YMM0);
+   AVXZERO(vex_state->guest_YMM1);
+   AVXZERO(vex_state->guest_YMM2);
+   AVXZERO(vex_state->guest_YMM3);
+   AVXZERO(vex_state->guest_YMM4);
+   AVXZERO(vex_state->guest_YMM5);
+   AVXZERO(vex_state->guest_YMM6);
+   AVXZERO(vex_state->guest_YMM7);
+   AVXZERO(vex_state->guest_YMM8);
+   AVXZERO(vex_state->guest_YMM9);
+   AVXZERO(vex_state->guest_YMM10);
+   AVXZERO(vex_state->guest_YMM11);
+   AVXZERO(vex_state->guest_YMM12);
+   AVXZERO(vex_state->guest_YMM13);
+   AVXZERO(vex_state->guest_YMM14);
+   AVXZERO(vex_state->guest_YMM15);
+   AVXZERO(vex_state->guest_YMM16);
+
+#  undef AVXZERO
+
+   vex_state->guest_EMNOTE = EmNote_NONE;
+
+   /* These should not ever be either read or written, but we
+      initialise them anyway. */
+   vex_state->guest_CMSTART = 0;
+   vex_state->guest_CMLEN   = 0;
+
+   vex_state->guest_NRADDR   = 0;
+   vex_state->guest_SC_CLASS = 0;
+   vex_state->guest_GS_CONST = 0;
+
+   vex_state->guest_IP_AT_SYSCALL = 0;
+   vex_state->pad1 = 0;
+}
+
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this generates significantly slower code).  
+
+   By default we enforce precise exns for guest %RSP, %RBP and %RIP
+   only.  These are the minimum needed to extract correct stack
+   backtraces from amd64 code.
+
+   Only %RSP is needed in mode VexRegUpdSpAtMemAccess.   
+*/
+Bool guest_amd64_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   Int rbp_min = offsetof(VexGuestAMD64State, guest_RBP);
+   Int rbp_max = rbp_min + 8 - 1;
+   Int rsp_min = offsetof(VexGuestAMD64State, guest_RSP);
+   Int rsp_max = rsp_min + 8 - 1;
+   Int rip_min = offsetof(VexGuestAMD64State, guest_RIP);
+   Int rip_max = rip_min + 8 - 1;
+
+   if (maxoff < rsp_min || minoff > rsp_max) {
+      /* no overlap with rsp */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False; // We only need to check stack pointer.
+   } else {
+      return True;
+   }
+
+   if (maxoff < rbp_min || minoff > rbp_max) {
+      /* no overlap with rbp */
+   } else {
+      return True;
+   }
+
+   if (maxoff < rip_min || minoff > rip_max) {
+      /* no overlap with eip */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+
+#define ALWAYSDEFD(field)                             \
+    { offsetof(VexGuestAMD64State, field),            \
+      (sizeof ((VexGuestAMD64State*)0)->field) }
+
+VexGuestLayout
+   amd64guest_layout
+      = {
+          /* Total size of the guest state, in bytes. */
+          .total_sizeB = sizeof(VexGuestAMD64State),
+
+          /* Describe the stack pointer. */
+          .offset_SP = offsetof(VexGuestAMD64State,guest_RSP),
+          .sizeof_SP = 8,
+
+          /* Describe the frame pointer. */
+          .offset_FP = offsetof(VexGuestAMD64State,guest_RBP),
+          .sizeof_FP = 8,
+
+          /* Describe the instruction pointer. */
+          .offset_IP = offsetof(VexGuestAMD64State,guest_RIP),
+          .sizeof_IP = 8,
+
+          /* Describe any sections to be regarded by Memcheck as
+             'always-defined'. */
+          .n_alwaysDefd = 16,
+
+          /* flags thunk: OP and NDEP are always defd, whereas DEP1
+             and DEP2 have to be tracked.  See detailed comment in
+             gdefs.h on meaning of thunk fields. */
+          .alwaysDefd
+             = { /*  0 */ ALWAYSDEFD(guest_CC_OP),
+                 /*  1 */ ALWAYSDEFD(guest_CC_NDEP),
+		 /*  2 */ ALWAYSDEFD(guest_DFLAG),
+                 /*  3 */ ALWAYSDEFD(guest_IDFLAG),
+                 /*  4 */ ALWAYSDEFD(guest_RIP),
+                 /*  5 */ ALWAYSDEFD(guest_FS_CONST),
+                 /*  6 */ ALWAYSDEFD(guest_FTOP),
+                 /*  7 */ ALWAYSDEFD(guest_FPTAG),
+                 /*  8 */ ALWAYSDEFD(guest_FPROUND),
+                 /*  9 */ ALWAYSDEFD(guest_FC3210),
+                 // /* */ ALWAYSDEFD(guest_CS),
+                 // /* */ ALWAYSDEFD(guest_DS),
+                 // /* */ ALWAYSDEFD(guest_ES),
+                 // /* */ ALWAYSDEFD(guest_FS),
+                 // /* */ ALWAYSDEFD(guest_GS),
+                 // /* */ ALWAYSDEFD(guest_SS),
+                 // /* */ ALWAYSDEFD(guest_LDT),
+                 // /* */ ALWAYSDEFD(guest_GDT),
+                 /* 10 */ ALWAYSDEFD(guest_EMNOTE),
+                 /* 11 */ ALWAYSDEFD(guest_SSEROUND),
+                 /* 12 */ ALWAYSDEFD(guest_CMSTART),
+                 /* 13 */ ALWAYSDEFD(guest_CMLEN),
+                 /* 14 */ ALWAYSDEFD(guest_SC_CLASS),
+                 /* 15 */ ALWAYSDEFD(guest_IP_AT_SYSCALL)
+               }
+        };
+
+
+/*---------------------------------------------------------------*/
+/*--- end                               guest_amd64_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_amd64_toIR.c b/VEX/priv/guest_amd64_toIR.c
new file mode 100644
index 0000000..add943d
--- /dev/null
+++ b/VEX/priv/guest_amd64_toIR.c
@@ -0,0 +1,32029 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                     guest_amd64_toIR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Translates AMD64 code to IR. */
+
+/* TODO:
+
+   All Puts to CC_OP/CC_DEP1/CC_DEP2/CC_NDEP should really be checked
+   to ensure a 64-bit value is being written.
+
+   x87 FP Limitations:
+ 
+   * all arithmetic done at 64 bits
+ 
+   * no FP exceptions, except for handling stack over/underflow
+ 
+   * FP rounding mode observed only for float->int conversions and
+     int->float conversions which could lose accuracy, and for
+     float-to-float rounding.  For all other operations,
+     round-to-nearest is used, regardless.
+ 
+   * some of the FCOM cases could do with testing -- not convinced
+     that the args are the right way round.
+ 
+   * FSAVE does not re-initialise the FPU; it should do
+ 
+   * FINIT not only initialises the FPU environment, it also zeroes
+     all the FP registers.  It should leave the registers unchanged.
+ 
+    SAHF should cause eflags[1] == 1, and in fact it produces 0.  As
+    per Intel docs this bit has no meaning anyway.  Since PUSHF is the
+    only way to observe eflags[1], a proper fix would be to make that
+    bit be set by PUSHF.
+ 
+    This module uses global variables and so is not MT-safe (if that
+    should ever become relevant).
+*/
+
+/* Notes re address size overrides (0x67).
+
+   According to the AMD documentation (24594 Rev 3.09, Sept 2003,
+   "AMD64 Architecture Programmer's Manual Volume 3: General-Purpose
+   and System Instructions"), Section 1.2.3 ("Address-Size Override
+   Prefix"):
+
+   0x67 applies to all explicit memory references, causing the top
+   32 bits of the effective address to become zero.
+
+   0x67 has no effect on stack references (push/pop); these always
+   use a 64-bit address.
+
+   0x67 changes the interpretation of instructions which implicitly
+   reference RCX/RSI/RDI, so that in fact ECX/ESI/EDI are used
+   instead.  These are:
+
+      cmp{s,sb,sw,sd,sq}
+      in{s,sb,sw,sd}
+      jcxz, jecxz, jrcxz
+      lod{s,sb,sw,sd,sq}
+      loop{,e,bz,be,z}
+      mov{s,sb,sw,sd,sq}
+      out{s,sb,sw,sd}
+      rep{,e,ne,nz}
+      sca{s,sb,sw,sd,sq}
+      sto{s,sb,sw,sd,sq}
+      xlat{,b} */
+
+/* "Special" instructions.
+
+   This instruction decoder can decode three special instructions
+   which mean nothing natively (are no-ops as far as regs/mem are
+   concerned) but have meaning for supporting Valgrind.  A special
+   instruction is flagged by the 16-byte preamble 48C1C703 48C1C70D
+   48C1C73D 48C1C733 (in the standard interpretation, that means: rolq
+   $3, %rdi; rolq $13, %rdi; rolq $61, %rdi; rolq $51, %rdi).
+   Following that, one of the following 3 are allowed (standard
+   interpretation in parentheses):
+
+      4887DB (xchgq %rbx,%rbx)   %RDX = client_request ( %RAX )
+      4887C9 (xchgq %rcx,%rcx)   %RAX = guest_NRADDR
+      4887D2 (xchgq %rdx,%rdx)   call-noredir *%RAX
+      4887F6 (xchgq %rdi,%rdi)   IR injection
+
+   Any other bytes following the 16-byte preamble are illegal and
+   constitute a failure in instruction decoding.  This all assumes
+   that the preamble will never occur except in specific code
+   fragments designed for Valgrind to catch.
+
+   No prefixes may precede a "Special" instruction.
+*/
+
+/* casLE (implementation of lock-prefixed insns) and rep-prefixed
+   insns: the side-exit back to the start of the insn is done with
+   Ijk_Boring.  This is quite wrong, it should be done with
+   Ijk_NoRedir, since otherwise the side exit, which is intended to
+   restart the instruction for whatever reason, could go somewhere
+   entirely else.  Doing it right (with Ijk_NoRedir jumps) would make
+   no-redir jumps performance critical, at least for rep-prefixed
+   instructions, since all iterations thereof would involve such a
+   jump.  It's not such a big deal with casLE since the side exit is
+   only taken if the CAS fails, that is, the location is contended,
+   which is relatively unlikely.
+
+   Note also, the test for CAS success vs failure is done using
+   Iop_CasCmp{EQ,NE}{8,16,32,64} rather than the ordinary
+   Iop_Cmp{EQ,NE} equivalents.  This is so as to tell Memcheck that it
+   shouldn't definedness-check these comparisons.  See
+   COMMENT_ON_CasCmpEQ in memcheck/mc_translate.c for
+   background/rationale.
+*/
+
+/* LOCK prefixed instructions.  These are translated using IR-level
+   CAS statements (IRCAS) and are believed to preserve atomicity, even
+   from the point of view of some other process racing against a
+   simulated one (presumably they communicate via a shared memory
+   segment).
+
+   Handlers which are aware of LOCK prefixes are:
+      dis_op2_G_E      (add, or, adc, sbb, and, sub, xor)
+      dis_cmpxchg_G_E  (cmpxchg)
+      dis_Grp1         (add, or, adc, sbb, and, sub, xor)
+      dis_Grp3         (not, neg)
+      dis_Grp4         (inc, dec)
+      dis_Grp5         (inc, dec)
+      dis_Grp8_Imm     (bts, btc, btr)
+      dis_bt_G_E       (bts, btc, btr)
+      dis_xadd_G_E     (xadd)
+*/
+
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_guest_amd64.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_generic_x87.h"
+#include "guest_amd64_defs.h"
+
+
+/*------------------------------------------------------------*/
+/*--- Globals                                              ---*/
+/*------------------------------------------------------------*/
+
+/* These are set at the start of the translation of an insn, right
+   down in disInstr_AMD64, so that we don't have to pass them around
+   endlessly.  They are all constant during the translation of any
+   given insn. */
+
+/* These are set at the start of the translation of a BB, so
+   that we don't have to pass them around endlessly. */
+
+/* We need to know this to do sub-register accesses correctly. */
+static VexEndness host_endness;
+
+/* Pointer to the guest code area (points to start of BB, not to the
+   insn being processed). */
+static const UChar* guest_code;
+
+/* The guest address corresponding to guest_code[0]. */
+static Addr64 guest_RIP_bbstart;
+
+/* The guest address for the instruction currently being
+   translated. */
+static Addr64 guest_RIP_curr_instr;
+
+/* The IRSB* into which we're generating code. */
+static IRSB* irsb;
+
+/* For ensuring that %rip-relative addressing is done right.  A read
+   of %rip generates the address of the next instruction.  It may be
+   that we don't conveniently know that inside disAMode().  For sanity
+   checking, if the next insn %rip is needed, we make a guess at what
+   it is, record that guess here, and set the accompanying Bool to
+   indicate that -- after this insn's decode is finished -- that guess
+   needs to be checked.  */
+
+/* At the start of each insn decode, is set to (0, False).
+   After the decode, if _mustcheck is now True, _assumed is
+   checked. */
+
+static Addr64 guest_RIP_next_assumed;
+static Bool   guest_RIP_next_mustcheck;
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for constructing IR.                         ---*/
+/*------------------------------------------------------------*/
+ 
+/* Generate a new temporary of the given type. */
+static IRTemp newTemp ( IRType ty )
+{
+   vassert(isPlausibleIRType(ty));
+   return newIRTemp( irsb->tyenv, ty );
+}
+
+/* Add a statement to the list held by "irsb". */
+static void stmt ( IRStmt* st )
+{
+   addStmtToIRSB( irsb, st );
+}
+
+/* Generate a statement "dst := e". */ 
+static void assign ( IRTemp dst, IRExpr* e )
+{
+   stmt( IRStmt_WrTmp(dst, e) );
+}
+
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* triop ( IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3 )
+{
+   return IRExpr_Triop(op, a1, a2, a3);
+}
+
+static IRExpr* mkexpr ( IRTemp tmp )
+{
+   return IRExpr_RdTmp(tmp);
+}
+
+static IRExpr* mkU8 ( ULong i )
+{
+   vassert(i < 256);
+   return IRExpr_Const(IRConst_U8( (UChar)i ));
+}
+
+static IRExpr* mkU16 ( ULong i )
+{
+   vassert(i < 0x10000ULL);
+   return IRExpr_Const(IRConst_U16( (UShort)i ));
+}
+
+static IRExpr* mkU32 ( ULong i )
+{
+   vassert(i < 0x100000000ULL);
+   return IRExpr_Const(IRConst_U32( (UInt)i ));
+}
+
+static IRExpr* mkU64 ( ULong i )
+{
+   return IRExpr_Const(IRConst_U64(i));
+}
+
+static IRExpr* mkU ( IRType ty, ULong i )
+{
+   switch (ty) {
+      case Ity_I8:  return mkU8(i);
+      case Ity_I16: return mkU16(i);
+      case Ity_I32: return mkU32(i);
+      case Ity_I64: return mkU64(i);
+      default: vpanic("mkU(amd64)");
+   }
+}
+
+static void storeLE ( IRExpr* addr, IRExpr* data )
+{
+   stmt( IRStmt_Store(Iend_LE, addr, data) );
+}
+
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
+{
+   return IRExpr_Load(Iend_LE, ty, addr);
+}
+
+static IROp mkSizedOp ( IRType ty, IROp op8 )
+{
+   vassert(op8 == Iop_Add8 || op8 == Iop_Sub8 
+           || op8 == Iop_Mul8 
+           || op8 == Iop_Or8 || op8 == Iop_And8 || op8 == Iop_Xor8
+           || op8 == Iop_Shl8 || op8 == Iop_Shr8 || op8 == Iop_Sar8
+           || op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8
+           || op8 == Iop_CasCmpNE8
+           || op8 == Iop_Not8 );
+   switch (ty) {
+      case Ity_I8:  return 0 +op8;
+      case Ity_I16: return 1 +op8;
+      case Ity_I32: return 2 +op8;
+      case Ity_I64: return 3 +op8;
+      default: vpanic("mkSizedOp(amd64)");
+   }
+}
+
+static 
+IRExpr* doScalarWidening ( Int szSmall, Int szBig, Bool signd, IRExpr* src )
+{
+   if (szSmall == 1 && szBig == 4) {
+      return unop(signd ? Iop_8Sto32 : Iop_8Uto32, src);
+   }
+   if (szSmall == 1 && szBig == 2) {
+      return unop(signd ? Iop_8Sto16 : Iop_8Uto16, src);
+   }
+   if (szSmall == 2 && szBig == 4) {
+      return unop(signd ? Iop_16Sto32 : Iop_16Uto32, src);
+   }
+   if (szSmall == 1 && szBig == 8 && !signd) {
+      return unop(Iop_8Uto64, src);
+   }
+   if (szSmall == 1 && szBig == 8 && signd) {
+      return unop(Iop_8Sto64, src);
+   }
+   if (szSmall == 2 && szBig == 8 && !signd) {
+      return unop(Iop_16Uto64, src);
+   }
+   if (szSmall == 2 && szBig == 8 && signd) {
+      return unop(Iop_16Sto64, src);
+   }
+   vpanic("doScalarWidening(amd64)");
+}
+
+
+
+/*------------------------------------------------------------*/
+/*--- Debugging output                                     ---*/
+/*------------------------------------------------------------*/
+
+/* Bomb out if we can't handle something. */
+__attribute__ ((noreturn))
+static void unimplemented ( const HChar* str )
+{
+   vex_printf("amd64toIR: unimplemented feature\n");
+   vpanic(str);
+}
+
+#define DIP(format, args...)           \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_printf(format, ## args)
+
+#define DIS(buf, format, args...)      \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_sprintf(buf, format, ## args)
+
+
+/*------------------------------------------------------------*/
+/*--- Offsets of various parts of the amd64 guest state.   ---*/
+/*------------------------------------------------------------*/
+
+#define OFFB_RAX       offsetof(VexGuestAMD64State,guest_RAX)
+#define OFFB_RBX       offsetof(VexGuestAMD64State,guest_RBX)
+#define OFFB_RCX       offsetof(VexGuestAMD64State,guest_RCX)
+#define OFFB_RDX       offsetof(VexGuestAMD64State,guest_RDX)
+#define OFFB_RSP       offsetof(VexGuestAMD64State,guest_RSP)
+#define OFFB_RBP       offsetof(VexGuestAMD64State,guest_RBP)
+#define OFFB_RSI       offsetof(VexGuestAMD64State,guest_RSI)
+#define OFFB_RDI       offsetof(VexGuestAMD64State,guest_RDI)
+#define OFFB_R8        offsetof(VexGuestAMD64State,guest_R8)
+#define OFFB_R9        offsetof(VexGuestAMD64State,guest_R9)
+#define OFFB_R10       offsetof(VexGuestAMD64State,guest_R10)
+#define OFFB_R11       offsetof(VexGuestAMD64State,guest_R11)
+#define OFFB_R12       offsetof(VexGuestAMD64State,guest_R12)
+#define OFFB_R13       offsetof(VexGuestAMD64State,guest_R13)
+#define OFFB_R14       offsetof(VexGuestAMD64State,guest_R14)
+#define OFFB_R15       offsetof(VexGuestAMD64State,guest_R15)
+
+#define OFFB_RIP       offsetof(VexGuestAMD64State,guest_RIP)
+
+#define OFFB_FS_CONST  offsetof(VexGuestAMD64State,guest_FS_CONST)
+#define OFFB_GS_CONST  offsetof(VexGuestAMD64State,guest_GS_CONST)
+
+#define OFFB_CC_OP     offsetof(VexGuestAMD64State,guest_CC_OP)
+#define OFFB_CC_DEP1   offsetof(VexGuestAMD64State,guest_CC_DEP1)
+#define OFFB_CC_DEP2   offsetof(VexGuestAMD64State,guest_CC_DEP2)
+#define OFFB_CC_NDEP   offsetof(VexGuestAMD64State,guest_CC_NDEP)
+
+#define OFFB_FPREGS    offsetof(VexGuestAMD64State,guest_FPREG[0])
+#define OFFB_FPTAGS    offsetof(VexGuestAMD64State,guest_FPTAG[0])
+#define OFFB_DFLAG     offsetof(VexGuestAMD64State,guest_DFLAG)
+#define OFFB_ACFLAG    offsetof(VexGuestAMD64State,guest_ACFLAG)
+#define OFFB_IDFLAG    offsetof(VexGuestAMD64State,guest_IDFLAG)
+#define OFFB_FTOP      offsetof(VexGuestAMD64State,guest_FTOP)
+#define OFFB_FC3210    offsetof(VexGuestAMD64State,guest_FC3210)
+#define OFFB_FPROUND   offsetof(VexGuestAMD64State,guest_FPROUND)
+
+#define OFFB_SSEROUND  offsetof(VexGuestAMD64State,guest_SSEROUND)
+#define OFFB_YMM0      offsetof(VexGuestAMD64State,guest_YMM0)
+#define OFFB_YMM1      offsetof(VexGuestAMD64State,guest_YMM1)
+#define OFFB_YMM2      offsetof(VexGuestAMD64State,guest_YMM2)
+#define OFFB_YMM3      offsetof(VexGuestAMD64State,guest_YMM3)
+#define OFFB_YMM4      offsetof(VexGuestAMD64State,guest_YMM4)
+#define OFFB_YMM5      offsetof(VexGuestAMD64State,guest_YMM5)
+#define OFFB_YMM6      offsetof(VexGuestAMD64State,guest_YMM6)
+#define OFFB_YMM7      offsetof(VexGuestAMD64State,guest_YMM7)
+#define OFFB_YMM8      offsetof(VexGuestAMD64State,guest_YMM8)
+#define OFFB_YMM9      offsetof(VexGuestAMD64State,guest_YMM9)
+#define OFFB_YMM10     offsetof(VexGuestAMD64State,guest_YMM10)
+#define OFFB_YMM11     offsetof(VexGuestAMD64State,guest_YMM11)
+#define OFFB_YMM12     offsetof(VexGuestAMD64State,guest_YMM12)
+#define OFFB_YMM13     offsetof(VexGuestAMD64State,guest_YMM13)
+#define OFFB_YMM14     offsetof(VexGuestAMD64State,guest_YMM14)
+#define OFFB_YMM15     offsetof(VexGuestAMD64State,guest_YMM15)
+#define OFFB_YMM16     offsetof(VexGuestAMD64State,guest_YMM16)
+
+#define OFFB_EMNOTE    offsetof(VexGuestAMD64State,guest_EMNOTE)
+#define OFFB_CMSTART   offsetof(VexGuestAMD64State,guest_CMSTART)
+#define OFFB_CMLEN     offsetof(VexGuestAMD64State,guest_CMLEN)
+
+#define OFFB_NRADDR    offsetof(VexGuestAMD64State,guest_NRADDR)
+
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for deconstructing the        ---*/
+/*--- amd64 insn stream.                                   ---*/
+/*------------------------------------------------------------*/
+
+/* This is the AMD64 register encoding -- integer regs. */
+#define R_RAX 0
+#define R_RCX 1
+#define R_RDX 2
+#define R_RBX 3
+#define R_RSP 4
+#define R_RBP 5
+#define R_RSI 6
+#define R_RDI 7
+#define R_R8  8
+#define R_R9  9
+#define R_R10 10
+#define R_R11 11
+#define R_R12 12
+#define R_R13 13
+#define R_R14 14
+#define R_R15 15
+
+/* This is the Intel register encoding -- segment regs. */
+#define R_ES 0
+#define R_CS 1
+#define R_SS 2
+#define R_DS 3
+#define R_FS 4
+#define R_GS 5
+
+
+/* Various simple conversions */
+
+static ULong extend_s_8to64 ( UChar x )
+{
+   return (ULong)((Long)(((ULong)x) << 56) >> 56);
+}
+
+static ULong extend_s_16to64 ( UShort x )
+{
+   return (ULong)((Long)(((ULong)x) << 48) >> 48);
+}
+
+static ULong extend_s_32to64 ( UInt x )
+{
+   return (ULong)((Long)(((ULong)x) << 32) >> 32);
+}
+
+/* Figure out whether the mod and rm parts of a modRM byte refer to a
+   register or memory.  If so, the byte will have the form 11XXXYYY,
+   where YYY is the register number. */
+inline
+static Bool epartIsReg ( UChar mod_reg_rm )
+{
+   return toBool(0xC0 == (mod_reg_rm & 0xC0));
+}
+
+/* Extract the 'g' field from a modRM byte.  This only produces 3
+   bits, which is not a complete register number.  You should avoid
+   this function if at all possible. */
+inline
+static Int gregLO3ofRM ( UChar mod_reg_rm )
+{
+   return (Int)( (mod_reg_rm >> 3) & 7 );
+}
+
+/* Ditto the 'e' field of a modRM byte. */
+inline
+static Int eregLO3ofRM ( UChar mod_reg_rm )
+{
+   return (Int)(mod_reg_rm & 0x7);
+}
+
+/* Get a 8/16/32-bit unsigned value out of the insn stream. */
+
+static inline UChar getUChar ( Long delta )
+{
+   UChar v = guest_code[delta+0];
+   return v;
+}
+
+static UInt getUDisp16 ( Long delta )
+{
+   UInt v = guest_code[delta+1]; v <<= 8;
+   v |= guest_code[delta+0];
+   return v & 0xFFFF;
+}
+
+//.. static UInt getUDisp ( Int size, Long delta )
+//.. {
+//..    switch (size) {
+//..       case 4: return getUDisp32(delta);
+//..       case 2: return getUDisp16(delta);
+//..       case 1: return getUChar(delta);
+//..       default: vpanic("getUDisp(x86)");
+//..    }
+//..    return 0; /*notreached*/
+//.. }
+
+
+/* Get a byte value out of the insn stream and sign-extend to 64
+   bits. */
+static Long getSDisp8 ( Long delta )
+{
+   return extend_s_8to64( guest_code[delta] );
+}
+
+/* Get a 16-bit value out of the insn stream and sign-extend to 64
+   bits. */
+static Long getSDisp16 ( Long delta )
+{
+   UInt v = guest_code[delta+1]; v <<= 8;
+   v |= guest_code[delta+0];
+   return extend_s_16to64( (UShort)v );
+}
+
+/* Get a 32-bit value out of the insn stream and sign-extend to 64
+   bits. */
+static Long getSDisp32 ( Long delta )
+{
+   UInt v = guest_code[delta+3]; v <<= 8;
+   v |= guest_code[delta+2]; v <<= 8;
+   v |= guest_code[delta+1]; v <<= 8;
+   v |= guest_code[delta+0];
+   return extend_s_32to64( v );
+}
+
+/* Get a 64-bit value out of the insn stream. */
+static Long getDisp64 ( Long delta )
+{
+   ULong v = 0;
+   v |= guest_code[delta+7]; v <<= 8;
+   v |= guest_code[delta+6]; v <<= 8;
+   v |= guest_code[delta+5]; v <<= 8;
+   v |= guest_code[delta+4]; v <<= 8;
+   v |= guest_code[delta+3]; v <<= 8;
+   v |= guest_code[delta+2]; v <<= 8;
+   v |= guest_code[delta+1]; v <<= 8;
+   v |= guest_code[delta+0];
+   return v;
+}
+
+/* Note: because AMD64 doesn't allow 64-bit literals, it is an error
+   if this is called with size==8.  Should not happen. */
+static Long getSDisp ( Int size, Long delta )
+{
+   switch (size) {
+      case 4: return getSDisp32(delta);
+      case 2: return getSDisp16(delta);
+      case 1: return getSDisp8(delta);
+      default: vpanic("getSDisp(amd64)");
+  }
+}
+
+static ULong mkSizeMask ( Int sz )
+{
+   switch (sz) {
+      case 1: return 0x00000000000000FFULL;
+      case 2: return 0x000000000000FFFFULL;
+      case 4: return 0x00000000FFFFFFFFULL;
+      case 8: return 0xFFFFFFFFFFFFFFFFULL;
+      default: vpanic("mkSzMask(amd64)");
+   }
+}
+
+static Int imin ( Int a, Int b )
+{
+   return (a < b) ? a : b;
+}
+
+static IRType szToITy ( Int n )
+{
+   switch (n) {
+      case 1: return Ity_I8;
+      case 2: return Ity_I16;
+      case 4: return Ity_I32;
+      case 8: return Ity_I64;
+      default: vex_printf("\nszToITy(%d)\n", n);
+               vpanic("szToITy(amd64)");
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- For dealing with prefixes.                           ---*/
+/*------------------------------------------------------------*/
+
+/* The idea is to pass around an int holding a bitmask summarising
+   info from the prefixes seen on the current instruction, including
+   info from the REX byte.  This info is used in various places, but
+   most especially when making sense of register fields in
+   instructions.
+
+   The top 8 bits of the prefix are 0x55, just as a hacky way to
+   ensure it really is a valid prefix.
+
+   Things you can safely assume about a well-formed prefix:
+   * at most one segment-override bit (CS,DS,ES,FS,GS,SS) is set.
+   * if REX is not present then REXW,REXR,REXX,REXB will read
+     as zero.
+   * F2 and F3 will not both be 1.
+*/
+
+typedef UInt  Prefix;
+
+#define PFX_ASO    (1<<0)    /* address-size override present (0x67) */
+#define PFX_66     (1<<1)    /* operand-size override-to-16 present (0x66) */
+#define PFX_REX    (1<<2)    /* REX byte present (0x40 to 0x4F) */
+#define PFX_REXW   (1<<3)    /* REX W bit, if REX present, else 0 */
+#define PFX_REXR   (1<<4)    /* REX R bit, if REX present, else 0 */
+#define PFX_REXX   (1<<5)    /* REX X bit, if REX present, else 0 */
+#define PFX_REXB   (1<<6)    /* REX B bit, if REX present, else 0 */
+#define PFX_LOCK   (1<<7)    /* bus LOCK prefix present (0xF0) */
+#define PFX_F2     (1<<8)    /* REP/REPE/REPZ prefix present (0xF2) */
+#define PFX_F3     (1<<9)    /* REPNE/REPNZ prefix present (0xF3) */
+#define PFX_CS     (1<<10)   /* CS segment prefix present (0x2E) */
+#define PFX_DS     (1<<11)   /* DS segment prefix present (0x3E) */
+#define PFX_ES     (1<<12)   /* ES segment prefix present (0x26) */
+#define PFX_FS     (1<<13)   /* FS segment prefix present (0x64) */
+#define PFX_GS     (1<<14)   /* GS segment prefix present (0x65) */
+#define PFX_SS     (1<<15)   /* SS segment prefix present (0x36) */
+#define PFX_VEX    (1<<16)   /* VEX prefix present (0xC4 or 0xC5) */
+#define PFX_VEXL   (1<<17)   /* VEX L bit, if VEX present, else 0 */
+/* The extra register field VEX.vvvv is encoded (after not-ing it) as
+   PFX_VEXnV3 .. PFX_VEXnV0, so these must occupy adjacent bit
+   positions. */
+#define PFX_VEXnV0 (1<<18)   /* ~VEX vvvv[0], if VEX present, else 0 */
+#define PFX_VEXnV1 (1<<19)   /* ~VEX vvvv[1], if VEX present, else 0 */
+#define PFX_VEXnV2 (1<<20)   /* ~VEX vvvv[2], if VEX present, else 0 */
+#define PFX_VEXnV3 (1<<21)   /* ~VEX vvvv[3], if VEX present, else 0 */
+
+
+#define PFX_EMPTY 0x55000000
+
+static Bool IS_VALID_PFX ( Prefix pfx ) {
+   return toBool((pfx & 0xFF000000) == PFX_EMPTY);
+}
+
+static Bool haveREX ( Prefix pfx ) {
+   return toBool(pfx & PFX_REX);
+}
+
+static Int getRexW ( Prefix pfx ) {
+   return (pfx & PFX_REXW) ? 1 : 0;
+}
+static Int getRexR ( Prefix pfx ) {
+   return (pfx & PFX_REXR) ? 1 : 0;
+}
+static Int getRexX ( Prefix pfx ) {
+   return (pfx & PFX_REXX) ? 1 : 0;
+}
+static Int getRexB ( Prefix pfx ) {
+   return (pfx & PFX_REXB) ? 1 : 0;
+}
+
+/* Check a prefix doesn't have F2 or F3 set in it, since usually that
+   completely changes what instruction it really is. */
+static Bool haveF2orF3 ( Prefix pfx ) {
+   return toBool((pfx & (PFX_F2|PFX_F3)) > 0);
+}
+static Bool haveF2andF3 ( Prefix pfx ) {
+   return toBool((pfx & (PFX_F2|PFX_F3)) == (PFX_F2|PFX_F3));
+}
+static Bool haveF2 ( Prefix pfx ) {
+   return toBool((pfx & PFX_F2) > 0);
+}
+static Bool haveF3 ( Prefix pfx ) {
+   return toBool((pfx & PFX_F3) > 0);
+}
+
+static Bool have66 ( Prefix pfx ) {
+   return toBool((pfx & PFX_66) > 0);
+}
+static Bool haveASO ( Prefix pfx ) {
+   return toBool((pfx & PFX_ASO) > 0);
+}
+static Bool haveLOCK ( Prefix pfx ) {
+   return toBool((pfx & PFX_LOCK) > 0);
+}
+
+/* Return True iff pfx has 66 set and F2 and F3 clear */
+static Bool have66noF2noF3 ( Prefix pfx )
+{
+  return 
+     toBool((pfx & (PFX_66|PFX_F2|PFX_F3)) == PFX_66);
+}
+
+/* Return True iff pfx has F2 set and 66 and F3 clear */
+static Bool haveF2no66noF3 ( Prefix pfx )
+{
+  return 
+     toBool((pfx & (PFX_66|PFX_F2|PFX_F3)) == PFX_F2);
+}
+
+/* Return True iff pfx has F3 set and 66 and F2 clear */
+static Bool haveF3no66noF2 ( Prefix pfx )
+{
+  return 
+     toBool((pfx & (PFX_66|PFX_F2|PFX_F3)) == PFX_F3);
+}
+
+/* Return True iff pfx has F3 set and F2 clear */
+static Bool haveF3noF2 ( Prefix pfx )
+{
+  return 
+     toBool((pfx & (PFX_F2|PFX_F3)) == PFX_F3);
+}
+
+/* Return True iff pfx has F2 set and F3 clear */
+static Bool haveF2noF3 ( Prefix pfx )
+{
+  return 
+     toBool((pfx & (PFX_F2|PFX_F3)) == PFX_F2);
+}
+
+/* Return True iff pfx has 66, F2 and F3 clear */
+static Bool haveNo66noF2noF3 ( Prefix pfx )
+{
+  return 
+     toBool((pfx & (PFX_66|PFX_F2|PFX_F3)) == 0);
+}
+
+/* Return True iff pfx has any of 66, F2 and F3 set */
+static Bool have66orF2orF3 ( Prefix pfx )
+{
+  return toBool( ! haveNo66noF2noF3(pfx) );
+}
+
+/* Return True iff pfx has 66 or F3 set */
+static Bool have66orF3 ( Prefix pfx )
+{
+   return toBool((pfx & (PFX_66|PFX_F3)) > 0);
+}
+
+/* Clear all the segment-override bits in a prefix. */
+static Prefix clearSegBits ( Prefix p )
+{
+   return 
+      p & ~(PFX_CS | PFX_DS | PFX_ES | PFX_FS | PFX_GS | PFX_SS);
+}
+
+/* Get the (inverted, hence back to "normal") VEX.vvvv field. */
+static UInt getVexNvvvv ( Prefix pfx ) {
+   UInt r = (UInt)pfx;
+   r /= (UInt)PFX_VEXnV0; /* pray this turns into a shift */
+   return r & 0xF;
+}
+
+static Bool haveVEX ( Prefix pfx ) {
+   return toBool(pfx & PFX_VEX);
+}
+
+static Int getVexL ( Prefix pfx ) {
+   return (pfx & PFX_VEXL) ? 1 : 0;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- For dealing with escapes                             ---*/
+/*------------------------------------------------------------*/
+
+
+/* Escapes come after the prefixes, but before the primary opcode
+   byte.  They escape the primary opcode byte into a bigger space.
+   The 0xF0000000 isn't significant, except so as to make it not
+   overlap valid Prefix values, for sanity checking.
+*/
+
+typedef
+   enum { 
+      ESC_NONE=0xF0000000, // none
+      ESC_0F,              // 0F
+      ESC_0F38,            // 0F 38
+      ESC_0F3A             // 0F 3A
+   }
+   Escape;
+
+
+/*------------------------------------------------------------*/
+/*--- For dealing with integer registers                   ---*/
+/*------------------------------------------------------------*/
+
+/* This is somewhat complex.  The rules are:
+
+   For 64, 32 and 16 bit register references, the e or g fields in the
+   modrm bytes supply the low 3 bits of the register number.  The
+   fourth (most-significant) bit of the register number is supplied by
+   the REX byte, if it is present; else that bit is taken to be zero.
+
+   The REX.R bit supplies the high bit corresponding to the g register
+   field, and the REX.B bit supplies the high bit corresponding to the
+   e register field (when the mod part of modrm indicates that modrm's
+   e component refers to a register and not to memory).
+
+   The REX.X bit supplies a high register bit for certain registers
+   in SIB address modes, and is generally rarely used.
+
+   For 8 bit register references, the presence of the REX byte itself
+   has significance.  If there is no REX present, then the 3-bit
+   number extracted from the modrm e or g field is treated as an index
+   into the sequence %al %cl %dl %bl %ah %ch %dh %bh -- that is, the
+   old x86 encoding scheme.
+
+   But if there is a REX present, the register reference is
+   interpreted in the same way as for 64/32/16-bit references: a high
+   bit is extracted from REX, giving a 4-bit number, and the denoted
+   register is the lowest 8 bits of the 16 integer registers denoted
+   by the number.  In particular, values 3 through 7 of this sequence
+   do not refer to %ah %ch %dh %bh but instead to the lowest 8 bits of
+   %rsp %rbp %rsi %rdi.
+
+   The REX.W bit has no bearing at all on register numbers.  Instead
+   its presence indicates that the operand size is to be overridden
+   from its default value (32 bits) to 64 bits instead.  This is in
+   the same fashion that an 0x66 prefix indicates the operand size is
+   to be overridden from 32 bits down to 16 bits.  When both REX.W and
+   0x66 are present there is a conflict, and REX.W takes precedence.
+
+   Rather than try to handle this complexity using a single huge
+   function, several smaller ones are provided.  The aim is to make it
+   as difficult as possible to screw up register decoding in a subtle
+   and hard-to-track-down way.
+
+   Because these routines fish around in the host's memory (that is,
+   in the guest state area) for sub-parts of guest registers, their
+   correctness depends on the host's endianness.  So far these
+   routines only work for little-endian hosts.  Those for which
+   endianness is important have assertions to ensure sanity.
+*/
+
+
+/* About the simplest question you can ask: where do the 64-bit
+   integer registers live (in the guest state) ? */
+
+static Int integerGuestReg64Offset ( UInt reg )
+{
+   switch (reg) {
+      case R_RAX: return OFFB_RAX;
+      case R_RCX: return OFFB_RCX;
+      case R_RDX: return OFFB_RDX;
+      case R_RBX: return OFFB_RBX;
+      case R_RSP: return OFFB_RSP;
+      case R_RBP: return OFFB_RBP;
+      case R_RSI: return OFFB_RSI;
+      case R_RDI: return OFFB_RDI;
+      case R_R8:  return OFFB_R8;
+      case R_R9:  return OFFB_R9;
+      case R_R10: return OFFB_R10;
+      case R_R11: return OFFB_R11;
+      case R_R12: return OFFB_R12;
+      case R_R13: return OFFB_R13;
+      case R_R14: return OFFB_R14;
+      case R_R15: return OFFB_R15;
+      default: vpanic("integerGuestReg64Offset(amd64)");
+   }
+}
+
+
+/* Produce the name of an integer register, for printing purposes.
+   reg is a number in the range 0 .. 15 that has been generated from a
+   3-bit reg-field number and a REX extension bit.  irregular denotes
+   the case where sz==1 and no REX byte is present. */
+
+static 
+const HChar* nameIReg ( Int sz, UInt reg, Bool irregular )
+{
+   static const HChar* ireg64_names[16]
+     = { "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
+         "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" };
+   static const HChar* ireg32_names[16]
+     = { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
+         "%r8d", "%r9d", "%r10d","%r11d","%r12d","%r13d","%r14d","%r15d" };
+   static const HChar* ireg16_names[16]
+     = { "%ax",  "%cx",  "%dx",  "%bx",  "%sp",  "%bp",  "%si",  "%di",
+         "%r8w", "%r9w", "%r10w","%r11w","%r12w","%r13w","%r14w","%r15w" };
+   static const HChar* ireg8_names[16]
+     = { "%al",  "%cl",  "%dl",  "%bl",  "%spl", "%bpl", "%sil", "%dil",
+         "%r8b", "%r9b", "%r10b","%r11b","%r12b","%r13b","%r14b","%r15b" };
+   static const HChar* ireg8_irregular[8] 
+     = { "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh" };
+
+   vassert(reg < 16);
+   if (sz == 1) {
+      if (irregular)
+         vassert(reg < 8);
+   } else {
+      vassert(irregular == False);
+   }
+
+   switch (sz) {
+      case 8: return ireg64_names[reg];
+      case 4: return ireg32_names[reg];
+      case 2: return ireg16_names[reg];
+      case 1: if (irregular) {
+                 return ireg8_irregular[reg];
+              } else {
+                 return ireg8_names[reg];
+              }
+      default: vpanic("nameIReg(amd64)");
+   }
+}
+
+/* Using the same argument conventions as nameIReg, produce the
+   guest state offset of an integer register. */
+
+static 
+Int offsetIReg ( Int sz, UInt reg, Bool irregular )
+{
+   vassert(reg < 16);
+   if (sz == 1) {
+      if (irregular)
+         vassert(reg < 8);
+   } else {
+      vassert(irregular == False);
+   }
+
+   /* Deal with irregular case -- sz==1 and no REX present */
+   if (sz == 1 && irregular) {
+      switch (reg) {
+         case R_RSP: return 1+ OFFB_RAX;
+         case R_RBP: return 1+ OFFB_RCX;
+         case R_RSI: return 1+ OFFB_RDX;
+         case R_RDI: return 1+ OFFB_RBX;
+         default:    break; /* use the normal case */
+      }
+   }
+
+   /* Normal case */
+   return integerGuestReg64Offset(reg);
+}
+
+
+/* Read the %CL register :: Ity_I8, for shift/rotate operations. */
+
+static IRExpr* getIRegCL ( void )
+{
+   vassert(host_endness == VexEndnessLE);
+   return IRExpr_Get( OFFB_RCX, Ity_I8 );
+}
+
+
+/* Write to the %AH register. */
+
+static void putIRegAH ( IRExpr* e )
+{
+   vassert(host_endness == VexEndnessLE);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
+   stmt( IRStmt_Put( OFFB_RAX+1, e ) );
+}
+
+
+/* Read/write various widths of %RAX, as it has various
+   special-purpose uses. */
+
+static const HChar* nameIRegRAX ( Int sz )
+{
+   switch (sz) {
+      case 1: return "%al";
+      case 2: return "%ax";
+      case 4: return "%eax";
+      case 8: return "%rax";
+      default: vpanic("nameIRegRAX(amd64)");
+   }
+}
+
+static IRExpr* getIRegRAX ( Int sz )
+{
+   vassert(host_endness == VexEndnessLE);
+   switch (sz) {
+      case 1: return IRExpr_Get( OFFB_RAX, Ity_I8 );
+      case 2: return IRExpr_Get( OFFB_RAX, Ity_I16 );
+      case 4: return unop(Iop_64to32, IRExpr_Get( OFFB_RAX, Ity_I64 ));
+      case 8: return IRExpr_Get( OFFB_RAX, Ity_I64 );
+      default: vpanic("getIRegRAX(amd64)");
+   }
+}
+
+static void putIRegRAX ( Int sz, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(irsb->tyenv, e);
+   vassert(host_endness == VexEndnessLE);
+   switch (sz) {
+      case 8: vassert(ty == Ity_I64);
+              stmt( IRStmt_Put( OFFB_RAX, e ));
+              break;
+      case 4: vassert(ty == Ity_I32);
+              stmt( IRStmt_Put( OFFB_RAX, unop(Iop_32Uto64,e) ));
+              break;
+      case 2: vassert(ty == Ity_I16);
+              stmt( IRStmt_Put( OFFB_RAX, e ));
+              break;
+      case 1: vassert(ty == Ity_I8);
+              stmt( IRStmt_Put( OFFB_RAX, e ));
+              break;
+      default: vpanic("putIRegRAX(amd64)");
+   }
+}
+
+
+/* Read/write various widths of %RDX, as it has various
+   special-purpose uses. */
+
+static const HChar* nameIRegRDX ( Int sz )
+{
+   switch (sz) {
+      case 1: return "%dl";
+      case 2: return "%dx";
+      case 4: return "%edx";
+      case 8: return "%rdx";
+      default: vpanic("nameIRegRDX(amd64)");
+   }
+}
+
+static IRExpr* getIRegRDX ( Int sz )
+{
+   vassert(host_endness == VexEndnessLE);
+   switch (sz) {
+      case 1: return IRExpr_Get( OFFB_RDX, Ity_I8 );
+      case 2: return IRExpr_Get( OFFB_RDX, Ity_I16 );
+      case 4: return unop(Iop_64to32, IRExpr_Get( OFFB_RDX, Ity_I64 ));
+      case 8: return IRExpr_Get( OFFB_RDX, Ity_I64 );
+      default: vpanic("getIRegRDX(amd64)");
+   }
+}
+
+static void putIRegRDX ( Int sz, IRExpr* e )
+{
+   vassert(host_endness == VexEndnessLE);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == szToITy(sz));
+   switch (sz) {
+      case 8: stmt( IRStmt_Put( OFFB_RDX, e ));
+              break;
+      case 4: stmt( IRStmt_Put( OFFB_RDX, unop(Iop_32Uto64,e) ));
+              break;
+      case 2: stmt( IRStmt_Put( OFFB_RDX, e ));
+              break;
+      case 1: stmt( IRStmt_Put( OFFB_RDX, e ));
+              break;
+      default: vpanic("putIRegRDX(amd64)");
+   }
+}
+
+
+/* Simplistic functions to deal with the integer registers as a
+   straightforward bank of 16 64-bit regs. */
+
+static IRExpr* getIReg64 ( UInt regno )
+{
+   return IRExpr_Get( integerGuestReg64Offset(regno),
+                      Ity_I64 );
+}
+
+static void putIReg64 ( UInt regno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I64);
+   stmt( IRStmt_Put( integerGuestReg64Offset(regno), e ) );
+}
+
+static const HChar* nameIReg64 ( UInt regno )
+{
+   return nameIReg( 8, regno, False );
+}
+
+
+/* Simplistic functions to deal with the lower halves of integer
+   registers as a straightforward bank of 16 32-bit regs. */
+
+static IRExpr* getIReg32 ( UInt regno )
+{
+   vassert(host_endness == VexEndnessLE);
+   return unop(Iop_64to32,
+               IRExpr_Get( integerGuestReg64Offset(regno),
+                           Ity_I64 ));
+}
+
+static void putIReg32 ( UInt regno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I32);
+   stmt( IRStmt_Put( integerGuestReg64Offset(regno), 
+                     unop(Iop_32Uto64,e) ) );
+}
+
+static const HChar* nameIReg32 ( UInt regno )
+{
+   return nameIReg( 4, regno, False );
+}
+
+
+/* Simplistic functions to deal with the lower quarters of integer
+   registers as a straightforward bank of 16 16-bit regs. */
+
+static IRExpr* getIReg16 ( UInt regno )
+{
+   vassert(host_endness == VexEndnessLE);
+   return IRExpr_Get( integerGuestReg64Offset(regno),
+                      Ity_I16 );
+}
+
+static void putIReg16 ( UInt regno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I16);
+   stmt( IRStmt_Put( integerGuestReg64Offset(regno), 
+                     unop(Iop_16Uto64,e) ) );
+}
+
+static const HChar* nameIReg16 ( UInt regno )
+{
+   return nameIReg( 2, regno, False );
+}
+
+
+/* Sometimes what we know is a 3-bit register number, a REX byte, and
+   which field of the REX byte is to be used to extend to a 4-bit
+   number.  These functions cater for that situation.  
+*/
+static IRExpr* getIReg64rexX ( Prefix pfx, UInt lo3bits )
+{
+   vassert(lo3bits < 8);
+   vassert(IS_VALID_PFX(pfx));
+   return getIReg64( lo3bits | (getRexX(pfx) << 3) );
+}
+
+static const HChar* nameIReg64rexX ( Prefix pfx, UInt lo3bits )
+{
+   vassert(lo3bits < 8);
+   vassert(IS_VALID_PFX(pfx));
+   return nameIReg( 8, lo3bits | (getRexX(pfx) << 3), False );
+}
+
+static const HChar* nameIRegRexB ( Int sz, Prefix pfx, UInt lo3bits )
+{
+   vassert(lo3bits < 8);
+   vassert(IS_VALID_PFX(pfx));
+   vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
+   return nameIReg( sz, lo3bits | (getRexB(pfx) << 3), 
+                        toBool(sz==1 && !haveREX(pfx)) );
+}
+
+static IRExpr* getIRegRexB ( Int sz, Prefix pfx, UInt lo3bits )
+{
+   vassert(lo3bits < 8);
+   vassert(IS_VALID_PFX(pfx));
+   vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
+   if (sz == 4) {
+      sz = 8;
+      return unop(Iop_64to32,
+                  IRExpr_Get(
+                     offsetIReg( sz, lo3bits | (getRexB(pfx) << 3), 
+                                     False/*!irregular*/ ),
+                     szToITy(sz)
+                 )
+             );
+   } else {
+      return IRExpr_Get(
+                offsetIReg( sz, lo3bits | (getRexB(pfx) << 3), 
+                                toBool(sz==1 && !haveREX(pfx)) ),
+                szToITy(sz)
+             );
+   }
+}
+
+static void putIRegRexB ( Int sz, Prefix pfx, UInt lo3bits, IRExpr* e )
+{
+   vassert(lo3bits < 8);
+   vassert(IS_VALID_PFX(pfx));
+   vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == szToITy(sz));
+   stmt( IRStmt_Put( 
+            offsetIReg( sz, lo3bits | (getRexB(pfx) << 3), 
+                            toBool(sz==1 && !haveREX(pfx)) ),
+            sz==4 ? unop(Iop_32Uto64,e) : e
+   ));
+}
+
+
+/* Functions for getting register numbers from modrm bytes and REX
+   when we don't have to consider the complexities of integer subreg
+   accesses.
+*/
+/* Extract the g reg field from a modRM byte, and augment it using the
+   REX.R bit from the supplied REX byte.  The R bit usually is
+   associated with the g register field.
+*/
+static UInt gregOfRexRM ( Prefix pfx, UChar mod_reg_rm )
+{
+   Int reg = (Int)( (mod_reg_rm >> 3) & 7 );
+   reg += (pfx & PFX_REXR) ? 8 : 0;
+   return reg;
+}
+
+/* Extract the e reg field from a modRM byte, and augment it using the
+   REX.B bit from the supplied REX byte.  The B bit usually is
+   associated with the e register field (when modrm indicates e is a
+   register, that is).
+*/
+static UInt eregOfRexRM ( Prefix pfx, UChar mod_reg_rm )
+{
+   Int rm;
+   vassert(epartIsReg(mod_reg_rm));
+   rm = (Int)(mod_reg_rm & 0x7);
+   rm += (pfx & PFX_REXB) ? 8 : 0;
+   return rm;
+}
+
+
+/* General functions for dealing with integer register access. */
+
+/* Produce the guest state offset for a reference to the 'g' register
+   field in a modrm byte, taking into account REX (or its absence),
+   and the size of the access.
+*/
+static UInt offsetIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm )
+{
+   UInt reg;
+   vassert(host_endness == VexEndnessLE);
+   vassert(IS_VALID_PFX(pfx));
+   vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
+   reg = gregOfRexRM( pfx, mod_reg_rm );
+   return offsetIReg( sz, reg, toBool(sz == 1 && !haveREX(pfx)) );
+}
+
+static 
+IRExpr* getIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm )
+{
+   if (sz == 4) {
+      sz = 8;
+      return unop(Iop_64to32,
+                  IRExpr_Get( offsetIRegG( sz, pfx, mod_reg_rm ),
+                              szToITy(sz) ));
+   } else {
+      return IRExpr_Get( offsetIRegG( sz, pfx, mod_reg_rm ),
+                         szToITy(sz) );
+   }
+}
+
+static 
+void putIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == szToITy(sz));
+   if (sz == 4) {
+      e = unop(Iop_32Uto64,e);
+   }
+   stmt( IRStmt_Put( offsetIRegG( sz, pfx, mod_reg_rm ), e ) );
+}
+
+static
+const HChar* nameIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm )
+{
+   return nameIReg( sz, gregOfRexRM(pfx,mod_reg_rm),
+                        toBool(sz==1 && !haveREX(pfx)) );
+}
+
+
+static
+IRExpr* getIRegV ( Int sz, Prefix pfx )
+{
+   if (sz == 4) {
+      sz = 8;
+      return unop(Iop_64to32,
+                  IRExpr_Get( offsetIReg( sz, getVexNvvvv(pfx), False ),
+                              szToITy(sz) ));
+   } else {
+      return IRExpr_Get( offsetIReg( sz, getVexNvvvv(pfx), False ),
+                         szToITy(sz) );
+   }
+}
+
+static
+void putIRegV ( Int sz, Prefix pfx, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == szToITy(sz));
+   if (sz == 4) {
+      e = unop(Iop_32Uto64,e);
+   }
+   stmt( IRStmt_Put( offsetIReg( sz, getVexNvvvv(pfx), False ), e ) );
+}
+
+static
+const HChar* nameIRegV ( Int sz, Prefix pfx )
+{
+   return nameIReg( sz, getVexNvvvv(pfx), False );
+}
+
+
+
+/* Produce the guest state offset for a reference to the 'e' register
+   field in a modrm byte, taking into account REX (or its absence),
+   and the size of the access.  eregOfRexRM will assert if mod_reg_rm
+   denotes a memory access rather than a register access.
+*/
+static UInt offsetIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm )
+{
+   UInt reg;
+   vassert(host_endness == VexEndnessLE);
+   vassert(IS_VALID_PFX(pfx));
+   vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
+   reg = eregOfRexRM( pfx, mod_reg_rm );
+   return offsetIReg( sz, reg, toBool(sz == 1 && !haveREX(pfx)) );
+}
+
+static 
+IRExpr* getIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm )
+{
+   if (sz == 4) {
+      sz = 8;
+      return unop(Iop_64to32,
+                  IRExpr_Get( offsetIRegE( sz, pfx, mod_reg_rm ),
+                              szToITy(sz) ));
+   } else {
+      return IRExpr_Get( offsetIRegE( sz, pfx, mod_reg_rm ),
+                         szToITy(sz) );
+   }
+}
+
+static 
+void putIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == szToITy(sz));
+   if (sz == 4) {
+      e = unop(Iop_32Uto64,e);
+   }
+   stmt( IRStmt_Put( offsetIRegE( sz, pfx, mod_reg_rm ), e ) );
+}
+
+static
+const HChar* nameIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm )
+{
+   return nameIReg( sz, eregOfRexRM(pfx,mod_reg_rm),
+                        toBool(sz==1 && !haveREX(pfx)) );
+}
+
+
+/*------------------------------------------------------------*/
+/*--- For dealing with XMM registers                       ---*/
+/*------------------------------------------------------------*/
+
+static Int ymmGuestRegOffset ( UInt ymmreg )
+{
+   switch (ymmreg) {
+      case 0:  return OFFB_YMM0;
+      case 1:  return OFFB_YMM1;
+      case 2:  return OFFB_YMM2;
+      case 3:  return OFFB_YMM3;
+      case 4:  return OFFB_YMM4;
+      case 5:  return OFFB_YMM5;
+      case 6:  return OFFB_YMM6;
+      case 7:  return OFFB_YMM7;
+      case 8:  return OFFB_YMM8;
+      case 9:  return OFFB_YMM9;
+      case 10: return OFFB_YMM10;
+      case 11: return OFFB_YMM11;
+      case 12: return OFFB_YMM12;
+      case 13: return OFFB_YMM13;
+      case 14: return OFFB_YMM14;
+      case 15: return OFFB_YMM15;
+      default: vpanic("ymmGuestRegOffset(amd64)");
+   }
+}
+
+static Int xmmGuestRegOffset ( UInt xmmreg )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   return ymmGuestRegOffset( xmmreg );
+}
+
+/* Lanes of vector registers are always numbered from zero being the
+   least significant lane (rightmost in the register).  */
+
+static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 8);
+   return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
+}
+
+static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 4);
+   return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
+}
+
+static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 2);
+   return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
+}
+
+static Int ymmGuestRegLane128offset ( UInt ymmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 2);
+   return ymmGuestRegOffset( ymmreg ) + 16 * laneno;
+}
+
+static Int ymmGuestRegLane64offset ( UInt ymmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 4);
+   return ymmGuestRegOffset( ymmreg ) + 8 * laneno;
+}
+
+static Int ymmGuestRegLane32offset ( UInt ymmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 8);
+   return ymmGuestRegOffset( ymmreg ) + 4 * laneno;
+}
+
+static IRExpr* getXMMReg ( UInt xmmreg )
+{
+   return IRExpr_Get( xmmGuestRegOffset(xmmreg), Ity_V128 );
+}
+
+static IRExpr* getXMMRegLane64 ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane64offset(xmmreg,laneno), Ity_I64 );
+}
+
+static IRExpr* getXMMRegLane64F ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane64offset(xmmreg,laneno), Ity_F64 );
+}
+
+static IRExpr* getXMMRegLane32 ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane32offset(xmmreg,laneno), Ity_I32 );
+}
+
+static IRExpr* getXMMRegLane32F ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane32offset(xmmreg,laneno), Ity_F32 );
+}
+
+static IRExpr* getXMMRegLane16 ( UInt xmmreg, Int laneno )
+{
+  return IRExpr_Get( xmmGuestRegLane16offset(xmmreg,laneno), Ity_I16 );
+}
+
+static void putXMMReg ( UInt xmmreg, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_V128);
+   stmt( IRStmt_Put( xmmGuestRegOffset(xmmreg), e ) );
+}
+
+static void putXMMRegLane64 ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I64);
+   stmt( IRStmt_Put( xmmGuestRegLane64offset(xmmreg,laneno), e ) );
+}
+
+static void putXMMRegLane64F ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_F64);
+   stmt( IRStmt_Put( xmmGuestRegLane64offset(xmmreg,laneno), e ) );
+}
+
+static void putXMMRegLane32F ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_F32);
+   stmt( IRStmt_Put( xmmGuestRegLane32offset(xmmreg,laneno), e ) );
+}
+
+static void putXMMRegLane32 ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I32);
+   stmt( IRStmt_Put( xmmGuestRegLane32offset(xmmreg,laneno), e ) );
+}
+
+static IRExpr* getYMMReg ( UInt xmmreg )
+{
+   return IRExpr_Get( ymmGuestRegOffset(xmmreg), Ity_V256 );
+}
+
+static IRExpr* getYMMRegLane128 ( UInt ymmreg, Int laneno )
+{
+   return IRExpr_Get( ymmGuestRegLane128offset(ymmreg,laneno), Ity_V128 );
+}
+
+static IRExpr* getYMMRegLane64 ( UInt ymmreg, Int laneno )
+{
+   return IRExpr_Get( ymmGuestRegLane64offset(ymmreg,laneno), Ity_I64 );
+}
+
+static IRExpr* getYMMRegLane32 ( UInt ymmreg, Int laneno )
+{
+   return IRExpr_Get( ymmGuestRegLane32offset(ymmreg,laneno), Ity_I32 );
+}
+
+static void putYMMReg ( UInt ymmreg, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_V256);
+   stmt( IRStmt_Put( ymmGuestRegOffset(ymmreg), e ) );
+}
+
+static void putYMMRegLane128 ( UInt ymmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_V128);
+   stmt( IRStmt_Put( ymmGuestRegLane128offset(ymmreg,laneno), e ) );
+}
+
+static void putYMMRegLane64F ( UInt ymmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_F64);
+   stmt( IRStmt_Put( ymmGuestRegLane64offset(ymmreg,laneno), e ) );
+}
+
+static void putYMMRegLane64 ( UInt ymmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I64);
+   stmt( IRStmt_Put( ymmGuestRegLane64offset(ymmreg,laneno), e ) );
+}
+
+static void putYMMRegLane32F ( UInt ymmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_F32);
+   stmt( IRStmt_Put( ymmGuestRegLane32offset(ymmreg,laneno), e ) );
+}
+
+static void putYMMRegLane32 ( UInt ymmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I32);
+   stmt( IRStmt_Put( ymmGuestRegLane32offset(ymmreg,laneno), e ) );
+}
+
+static IRExpr* mkV128 ( UShort mask )
+{
+   return IRExpr_Const(IRConst_V128(mask));
+}
+
+/* Write the low half of a YMM reg and zero out the upper half. */
+static void putYMMRegLoAndZU ( UInt ymmreg, IRExpr* e )
+{
+   putYMMRegLane128( ymmreg, 0, e );
+   putYMMRegLane128( ymmreg, 1, mkV128(0) );
+}
+
+static IRExpr* mkAnd1 ( IRExpr* x, IRExpr* y )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,x) == Ity_I1);
+   vassert(typeOfIRExpr(irsb->tyenv,y) == Ity_I1);
+   return unop(Iop_64to1, 
+               binop(Iop_And64, 
+                     unop(Iop_1Uto64,x), 
+                     unop(Iop_1Uto64,y)));
+}
+
+/* Generate a compare-and-swap operation, operating on memory at
+   'addr'.  The expected value is 'expVal' and the new value is
+   'newVal'.  If the operation fails, then transfer control (with a
+   no-redir jump (XXX no -- see comment at top of this file)) to
+   'restart_point', which is presumably the address of the guest
+   instruction again -- retrying, essentially. */
+static void casLE ( IRExpr* addr, IRExpr* expVal, IRExpr* newVal,
+                    Addr64 restart_point )
+{
+   IRCAS* cas;
+   IRType tyE    = typeOfIRExpr(irsb->tyenv, expVal);
+   IRType tyN    = typeOfIRExpr(irsb->tyenv, newVal);
+   IRTemp oldTmp = newTemp(tyE);
+   IRTemp expTmp = newTemp(tyE);
+   vassert(tyE == tyN);
+   vassert(tyE == Ity_I64 || tyE == Ity_I32
+           || tyE == Ity_I16 || tyE == Ity_I8);
+   assign(expTmp, expVal);
+   cas = mkIRCAS( IRTemp_INVALID, oldTmp, Iend_LE, addr, 
+                  NULL, mkexpr(expTmp), NULL, newVal );
+   stmt( IRStmt_CAS(cas) );
+   stmt( IRStmt_Exit(
+            binop( mkSizedOp(tyE,Iop_CasCmpNE8),
+                   mkexpr(oldTmp), mkexpr(expTmp) ),
+            Ijk_Boring, /*Ijk_NoRedir*/
+            IRConst_U64( restart_point ),
+            OFFB_RIP
+         ));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for %rflags.                                 ---*/
+/*------------------------------------------------------------*/
+
+/* -------------- Evaluating the flags-thunk. -------------- */
+
+/* Build IR to calculate all the eflags from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+   Ity_I64. */
+static IRExpr* mk_amd64g_calculate_rflags_all ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I64),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I64) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I64,
+           0/*regparm*/, 
+           "amd64g_calculate_rflags_all", &amd64g_calculate_rflags_all,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+/* Build IR to calculate some particular condition from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+   Ity_Bit. */
+static IRExpr* mk_amd64g_calculate_condition ( AMD64Condcode cond )
+{
+   IRExpr** args
+      = mkIRExprVec_5( mkU64(cond),
+                       IRExpr_Get(OFFB_CC_OP,   Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I64),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I64) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I64,
+           0/*regparm*/, 
+           "amd64g_calculate_condition", &amd64g_calculate_condition,
+           args
+        );
+   /* Exclude the requested condition, OP and NDEP from definedness
+      checking.  We're only interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<1) | (1<<4);
+   return unop(Iop_64to1, call);
+}
+
+/* Build IR to calculate just the carry flag from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression :: Ity_I64. */
+static IRExpr* mk_amd64g_calculate_rflags_c ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I64),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I64) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I64,
+           0/*regparm*/, 
+           "amd64g_calculate_rflags_c", &amd64g_calculate_rflags_c,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+/* -------------- Building the flags-thunk. -------------- */
+
+/* The machinery in this section builds the flag-thunk following a
+   flag-setting operation.  Hence the various setFlags_* functions.
+*/
+
+static Bool isAddSub ( IROp op8 )
+{
+   return toBool(op8 == Iop_Add8 || op8 == Iop_Sub8);
+}
+
+static Bool isLogic ( IROp op8 )
+{
+   return toBool(op8 == Iop_And8 || op8 == Iop_Or8 || op8 == Iop_Xor8);
+}
+
+/* U-widen 1/8/16/32/64 bit int expr to 64. */
+static IRExpr* widenUto64 ( IRExpr* e )
+{
+   switch (typeOfIRExpr(irsb->tyenv,e)) {
+      case Ity_I64: return e;
+      case Ity_I32: return unop(Iop_32Uto64, e);
+      case Ity_I16: return unop(Iop_16Uto64, e);
+      case Ity_I8:  return unop(Iop_8Uto64, e);
+      case Ity_I1:  return unop(Iop_1Uto64, e);
+      default: vpanic("widenUto64");
+   }
+}
+
+/* S-widen 8/16/32/64 bit int expr to 32. */
+static IRExpr* widenSto64 ( IRExpr* e )
+{
+   switch (typeOfIRExpr(irsb->tyenv,e)) {
+      case Ity_I64: return e;
+      case Ity_I32: return unop(Iop_32Sto64, e);
+      case Ity_I16: return unop(Iop_16Sto64, e);
+      case Ity_I8:  return unop(Iop_8Sto64, e);
+      default: vpanic("widenSto64");
+   }
+}
+
+/* Narrow 8/16/32/64 bit int expr to 8/16/32/64.  Clearly only some
+   of these combinations make sense. */
+static IRExpr* narrowTo ( IRType dst_ty, IRExpr* e )
+{
+   IRType src_ty = typeOfIRExpr(irsb->tyenv,e);
+   if (src_ty == dst_ty)
+      return e;
+   if (src_ty == Ity_I32 && dst_ty == Ity_I16)
+      return unop(Iop_32to16, e);
+   if (src_ty == Ity_I32 && dst_ty == Ity_I8)
+      return unop(Iop_32to8, e);
+   if (src_ty == Ity_I64 && dst_ty == Ity_I32)
+      return unop(Iop_64to32, e);
+   if (src_ty == Ity_I64 && dst_ty == Ity_I16)
+      return unop(Iop_64to16, e);
+   if (src_ty == Ity_I64 && dst_ty == Ity_I8)
+      return unop(Iop_64to8, e);
+
+   vex_printf("\nsrc, dst tys are: ");
+   ppIRType(src_ty);
+   vex_printf(", ");
+   ppIRType(dst_ty);
+   vex_printf("\n");
+   vpanic("narrowTo(amd64)");
+}
+
+
+/* Set the flags thunk OP, DEP1 and DEP2 fields.  The supplied op is
+   auto-sized up to the real op. */
+
+static 
+void setFlags_DEP1_DEP2 ( IROp op8, IRTemp dep1, IRTemp dep2, IRType ty )
+{
+   Int ccOp = 0;
+   switch (ty) {
+      case Ity_I8:  ccOp = 0; break;
+      case Ity_I16: ccOp = 1; break;
+      case Ity_I32: ccOp = 2; break;
+      case Ity_I64: ccOp = 3; break;
+      default: vassert(0);
+   }
+   switch (op8) {
+      case Iop_Add8: ccOp += AMD64G_CC_OP_ADDB;   break;
+      case Iop_Sub8: ccOp += AMD64G_CC_OP_SUBB;   break;
+      default:       ppIROp(op8);
+                     vpanic("setFlags_DEP1_DEP2(amd64)");
+   }
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(ccOp)) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dep1))) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(dep2))) );
+}
+
+
+/* Set the OP and DEP1 fields only, and write zero to DEP2. */
+
+static 
+void setFlags_DEP1 ( IROp op8, IRTemp dep1, IRType ty )
+{
+   Int ccOp = 0;
+   switch (ty) {
+      case Ity_I8:  ccOp = 0; break;
+      case Ity_I16: ccOp = 1; break;
+      case Ity_I32: ccOp = 2; break;
+      case Ity_I64: ccOp = 3; break;
+      default: vassert(0);
+   }
+   switch (op8) {
+      case Iop_Or8:
+      case Iop_And8:
+      case Iop_Xor8: ccOp += AMD64G_CC_OP_LOGICB; break;
+      default:       ppIROp(op8);
+                     vpanic("setFlags_DEP1(amd64)");
+   }
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(ccOp)) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dep1))) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0)) );
+}
+
+
+/* For shift operations, we put in the result and the undershifted
+   result.  Except if the shift amount is zero, the thunk is left
+   unchanged. */
+
+static void setFlags_DEP1_DEP2_shift ( IROp    op64,
+                                       IRTemp  res,
+                                       IRTemp  resUS,
+                                       IRType  ty,
+                                       IRTemp  guard )
+{
+   Int ccOp = 0;
+   switch (ty) {
+      case Ity_I8:  ccOp = 0; break;
+      case Ity_I16: ccOp = 1; break;
+      case Ity_I32: ccOp = 2; break;
+      case Ity_I64: ccOp = 3; break;
+      default: vassert(0);
+   }
+
+   vassert(guard);
+
+   /* Both kinds of right shifts are handled by the same thunk
+      operation. */
+   switch (op64) {
+      case Iop_Shr64:
+      case Iop_Sar64: ccOp += AMD64G_CC_OP_SHRB; break;
+      case Iop_Shl64: ccOp += AMD64G_CC_OP_SHLB; break;
+      default:        ppIROp(op64);
+                      vpanic("setFlags_DEP1_DEP2_shift(amd64)");
+   }
+
+   /* guard :: Ity_I8.  We need to convert it to I1. */
+   IRTemp guardB = newTemp(Ity_I1);
+   assign( guardB, binop(Iop_CmpNE8, mkexpr(guard), mkU8(0)) );
+
+   /* DEP1 contains the result, DEP2 contains the undershifted value. */
+   stmt( IRStmt_Put( OFFB_CC_OP,
+                     IRExpr_ITE( mkexpr(guardB),
+                                 mkU64(ccOp),
+                                 IRExpr_Get(OFFB_CC_OP,Ity_I64) ) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1,
+                     IRExpr_ITE( mkexpr(guardB),
+                                 widenUto64(mkexpr(res)),
+                                 IRExpr_Get(OFFB_CC_DEP1,Ity_I64) ) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, 
+                     IRExpr_ITE( mkexpr(guardB),
+                                 widenUto64(mkexpr(resUS)),
+                                 IRExpr_Get(OFFB_CC_DEP2,Ity_I64) ) ));
+}
+
+
+/* For the inc/dec case, we store in DEP1 the result value and in NDEP
+   the former value of the carry flag, which unfortunately we have to
+   compute. */
+
+static void setFlags_INC_DEC ( Bool inc, IRTemp res, IRType ty )
+{
+   Int ccOp = inc ? AMD64G_CC_OP_INCB : AMD64G_CC_OP_DECB;
+
+   switch (ty) {
+      case Ity_I8:  ccOp += 0; break;
+      case Ity_I16: ccOp += 1; break;
+      case Ity_I32: ccOp += 2; break;
+      case Ity_I64: ccOp += 3; break;
+      default: vassert(0);
+   }
+   
+   /* This has to come first, because calculating the C flag 
+      may require reading all four thunk fields. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mk_amd64g_calculate_rflags_c()) );
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(ccOp)) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(res))) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0)) );
+}
+
+
+/* Multiplies are pretty much like add and sub: DEP1 and DEP2 hold the
+   two arguments. */
+
+static
+void setFlags_MUL ( IRType ty, IRTemp arg1, IRTemp arg2, ULong base_op )
+{
+   switch (ty) {
+      case Ity_I8:
+         stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+0) ) );
+         break;
+      case Ity_I16:
+         stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+1) ) );
+         break;
+      case Ity_I32:
+         stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+2) ) );
+         break;
+      case Ity_I64:
+         stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+3) ) );
+         break;
+      default:
+         vpanic("setFlags_MUL(amd64)");
+   }
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(arg1)) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(arg2)) ));
+}
+
+
+/* -------------- Condition codes. -------------- */
+
+/* Condition codes, using the AMD encoding.  */
+
+static const HChar* name_AMD64Condcode ( AMD64Condcode cond )
+{
+   switch (cond) {
+      case AMD64CondO:      return "o";
+      case AMD64CondNO:     return "no";
+      case AMD64CondB:      return "b";
+      case AMD64CondNB:     return "ae"; /*"nb";*/
+      case AMD64CondZ:      return "e"; /*"z";*/
+      case AMD64CondNZ:     return "ne"; /*"nz";*/
+      case AMD64CondBE:     return "be";
+      case AMD64CondNBE:    return "a"; /*"nbe";*/
+      case AMD64CondS:      return "s";
+      case AMD64CondNS:     return "ns";
+      case AMD64CondP:      return "p";
+      case AMD64CondNP:     return "np";
+      case AMD64CondL:      return "l";
+      case AMD64CondNL:     return "ge"; /*"nl";*/
+      case AMD64CondLE:     return "le";
+      case AMD64CondNLE:    return "g"; /*"nle";*/
+      case AMD64CondAlways: return "ALWAYS";
+      default: vpanic("name_AMD64Condcode");
+   }
+}
+
+static 
+AMD64Condcode positiveIse_AMD64Condcode ( AMD64Condcode  cond,
+                                          /*OUT*/Bool*   needInvert )
+{
+   vassert(cond >= AMD64CondO && cond <= AMD64CondNLE);
+   if (cond & 1) {
+      *needInvert = True;
+      return cond-1;
+   } else {
+      *needInvert = False;
+      return cond;
+   }
+}
+
+
+/* -------------- Helpers for ADD/SUB with carry. -------------- */
+
+/* Given ta1, ta2 and tres, compute tres = ADC(ta1,ta2) and set flags
+   appropriately.
+
+   Optionally, generate a store for the 'tres' value.  This can either
+   be a normal store, or it can be a cas-with-possible-failure style
+   store:
+
+   if taddr is IRTemp_INVALID, then no store is generated.
+
+   if taddr is not IRTemp_INVALID, then a store (using taddr as
+   the address) is generated:
+
+     if texpVal is IRTemp_INVALID then a normal store is
+     generated, and restart_point must be zero (it is irrelevant).
+
+     if texpVal is not IRTemp_INVALID then a cas-style store is
+     generated.  texpVal is the expected value, restart_point
+     is the restart point if the store fails, and texpVal must
+     have the same type as tres.   
+
+*/
+static void helper_ADC ( Int sz,
+                         IRTemp tres, IRTemp ta1, IRTemp ta2,
+                         /* info about optional store: */
+                         IRTemp taddr, IRTemp texpVal, Addr64 restart_point )
+{
+   UInt    thunkOp;
+   IRType  ty    = szToITy(sz);
+   IRTemp  oldc  = newTemp(Ity_I64);
+   IRTemp  oldcn = newTemp(ty);
+   IROp    plus  = mkSizedOp(ty, Iop_Add8);
+   IROp    xor   = mkSizedOp(ty, Iop_Xor8);
+
+   vassert(typeOfIRTemp(irsb->tyenv, tres) == ty);
+
+   switch (sz) {
+      case 8:  thunkOp = AMD64G_CC_OP_ADCQ; break;
+      case 4:  thunkOp = AMD64G_CC_OP_ADCL; break;
+      case 2:  thunkOp = AMD64G_CC_OP_ADCW; break;
+      case 1:  thunkOp = AMD64G_CC_OP_ADCB; break;
+      default: vassert(0);
+   }
+
+   /* oldc = old carry flag, 0 or 1 */
+   assign( oldc,  binop(Iop_And64,
+                        mk_amd64g_calculate_rflags_c(),
+                        mkU64(1)) );
+
+   assign( oldcn, narrowTo(ty, mkexpr(oldc)) );
+
+   assign( tres, binop(plus,
+                       binop(plus,mkexpr(ta1),mkexpr(ta2)),
+                       mkexpr(oldcn)) );
+
+   /* Possibly generate a store of 'tres' to 'taddr'.  See comment at
+      start of this function. */
+   if (taddr != IRTemp_INVALID) {
+      if (texpVal == IRTemp_INVALID) {
+         vassert(restart_point == 0);
+         storeLE( mkexpr(taddr), mkexpr(tres) );
+      } else {
+         vassert(typeOfIRTemp(irsb->tyenv, texpVal) == ty);
+         /* .. and hence 'texpVal' has the same type as 'tres'. */
+         casLE( mkexpr(taddr),
+                mkexpr(texpVal), mkexpr(tres), restart_point );
+      }
+   }
+
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(thunkOp) ) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(ta1))  ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(binop(xor, mkexpr(ta2), 
+                                                         mkexpr(oldcn)) )) );
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );
+}
+
+
+/* Given ta1, ta2 and tres, compute tres = SBB(ta1,ta2) and set flags
+   appropriately.  As with helper_ADC, possibly generate a store of
+   the result -- see comments on helper_ADC for details.
+*/
+static void helper_SBB ( Int sz,
+                         IRTemp tres, IRTemp ta1, IRTemp ta2,
+                         /* info about optional store: */
+                         IRTemp taddr, IRTemp texpVal, Addr64 restart_point )
+{
+   UInt    thunkOp;
+   IRType  ty    = szToITy(sz);
+   IRTemp  oldc  = newTemp(Ity_I64);
+   IRTemp  oldcn = newTemp(ty);
+   IROp    minus = mkSizedOp(ty, Iop_Sub8);
+   IROp    xor   = mkSizedOp(ty, Iop_Xor8);
+
+   vassert(typeOfIRTemp(irsb->tyenv, tres) == ty);
+
+   switch (sz) {
+      case 8:  thunkOp = AMD64G_CC_OP_SBBQ; break;
+      case 4:  thunkOp = AMD64G_CC_OP_SBBL; break;
+      case 2:  thunkOp = AMD64G_CC_OP_SBBW; break;
+      case 1:  thunkOp = AMD64G_CC_OP_SBBB; break;
+      default: vassert(0);
+   }
+
+   /* oldc = old carry flag, 0 or 1 */
+   assign( oldc, binop(Iop_And64,
+                       mk_amd64g_calculate_rflags_c(),
+                       mkU64(1)) );
+
+   assign( oldcn, narrowTo(ty, mkexpr(oldc)) );
+
+   assign( tres, binop(minus,
+                       binop(minus,mkexpr(ta1),mkexpr(ta2)),
+                       mkexpr(oldcn)) );
+
+   /* Possibly generate a store of 'tres' to 'taddr'.  See comment at
+      start of this function. */
+   if (taddr != IRTemp_INVALID) {
+      if (texpVal == IRTemp_INVALID) {
+         vassert(restart_point == 0);
+         storeLE( mkexpr(taddr), mkexpr(tres) );
+      } else {
+         vassert(typeOfIRTemp(irsb->tyenv, texpVal) == ty);
+         /* .. and hence 'texpVal' has the same type as 'tres'. */
+         casLE( mkexpr(taddr),
+                mkexpr(texpVal), mkexpr(tres), restart_point );
+      }
+   }
+
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(thunkOp) ) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(ta1) )) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(binop(xor, mkexpr(ta2), 
+                                                         mkexpr(oldcn)) )) );
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );
+}
+
+
+/* -------------- Helpers for disassembly printing. -------------- */
+
+static const HChar* nameGrp1 ( Int opc_aux )
+{
+   static const HChar* grp1_names[8] 
+     = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" };
+   if (opc_aux < 0 || opc_aux > 7) vpanic("nameGrp1(amd64)");
+   return grp1_names[opc_aux];
+}
+
+static const HChar* nameGrp2 ( Int opc_aux )
+{
+   static const HChar* grp2_names[8] 
+     = { "rol", "ror", "rcl", "rcr", "shl", "shr", "shl", "sar" };
+   if (opc_aux < 0 || opc_aux > 7) vpanic("nameGrp2(amd64)");
+   return grp2_names[opc_aux];
+}
+
+static const HChar* nameGrp4 ( Int opc_aux )
+{
+   static const HChar* grp4_names[8] 
+     = { "inc", "dec", "???", "???", "???", "???", "???", "???" };
+   if (opc_aux < 0 || opc_aux > 1) vpanic("nameGrp4(amd64)");
+   return grp4_names[opc_aux];
+}
+
+static const HChar* nameGrp5 ( Int opc_aux )
+{
+   static const HChar* grp5_names[8] 
+     = { "inc", "dec", "call*", "call*", "jmp*", "jmp*", "push", "???" };
+   if (opc_aux < 0 || opc_aux > 6) vpanic("nameGrp5(amd64)");
+   return grp5_names[opc_aux];
+}
+
+static const HChar* nameGrp8 ( Int opc_aux )
+{
+   static const HChar* grp8_names[8] 
+      = { "???", "???", "???", "???", "bt", "bts", "btr", "btc" };
+   if (opc_aux < 4 || opc_aux > 7) vpanic("nameGrp8(amd64)");
+   return grp8_names[opc_aux];
+}
+
+//.. static const HChar* nameSReg ( UInt sreg )
+//.. {
+//..    switch (sreg) {
+//..       case R_ES: return "%es";
+//..       case R_CS: return "%cs";
+//..       case R_SS: return "%ss";
+//..       case R_DS: return "%ds";
+//..       case R_FS: return "%fs";
+//..       case R_GS: return "%gs";
+//..       default: vpanic("nameSReg(x86)");
+//..    }
+//.. }
+
+static const HChar* nameMMXReg ( Int mmxreg )
+{
+   static const HChar* mmx_names[8] 
+     = { "%mm0", "%mm1", "%mm2", "%mm3", "%mm4", "%mm5", "%mm6", "%mm7" };
+   if (mmxreg < 0 || mmxreg > 7) vpanic("nameMMXReg(amd64,guest)");
+   return mmx_names[mmxreg];
+}
+
+static const HChar* nameXMMReg ( Int xmmreg )
+{
+   static const HChar* xmm_names[16] 
+     = { "%xmm0",  "%xmm1",  "%xmm2",  "%xmm3", 
+         "%xmm4",  "%xmm5",  "%xmm6",  "%xmm7", 
+         "%xmm8",  "%xmm9",  "%xmm10", "%xmm11", 
+         "%xmm12", "%xmm13", "%xmm14", "%xmm15" };
+   if (xmmreg < 0 || xmmreg > 15) vpanic("nameXMMReg(amd64)");
+   return xmm_names[xmmreg];
+}
+ 
+static const HChar* nameMMXGran ( Int gran )
+{
+   switch (gran) {
+      case 0: return "b";
+      case 1: return "w";
+      case 2: return "d";
+      case 3: return "q";
+      default: vpanic("nameMMXGran(amd64,guest)");
+   }
+}
+
+static HChar nameISize ( Int size )
+{
+   switch (size) {
+      case 8: return 'q';
+      case 4: return 'l';
+      case 2: return 'w';
+      case 1: return 'b';
+      default: vpanic("nameISize(amd64)");
+   }
+}
+
+static const HChar* nameYMMReg ( Int ymmreg )
+{
+   static const HChar* ymm_names[16] 
+     = { "%ymm0",  "%ymm1",  "%ymm2",  "%ymm3", 
+         "%ymm4",  "%ymm5",  "%ymm6",  "%ymm7", 
+         "%ymm8",  "%ymm9",  "%ymm10", "%ymm11", 
+         "%ymm12", "%ymm13", "%ymm14", "%ymm15" };
+   if (ymmreg < 0 || ymmreg > 15) vpanic("nameYMMReg(amd64)");
+   return ymm_names[ymmreg];
+}
+
+
+/*------------------------------------------------------------*/
+/*--- JMP helpers                                          ---*/
+/*------------------------------------------------------------*/
+
+static void jmp_lit( /*MOD*/DisResult* dres,
+                     IRJumpKind kind, Addr64 d64 )
+{
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = kind;
+   stmt( IRStmt_Put( OFFB_RIP, mkU64(d64) ) );
+}
+
+static void jmp_treg( /*MOD*/DisResult* dres,
+                      IRJumpKind kind, IRTemp t )
+{
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = kind;
+   stmt( IRStmt_Put( OFFB_RIP, mkexpr(t) ) );
+}
+
+static 
+void jcc_01 ( /*MOD*/DisResult* dres,
+              AMD64Condcode cond, Addr64 d64_false, Addr64 d64_true )
+{
+   Bool          invert;
+   AMD64Condcode condPos;
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = Ijk_Boring;
+   condPos = positiveIse_AMD64Condcode ( cond, &invert );
+   if (invert) {
+      stmt( IRStmt_Exit( mk_amd64g_calculate_condition(condPos),
+                         Ijk_Boring,
+                         IRConst_U64(d64_false),
+                         OFFB_RIP ) );
+      stmt( IRStmt_Put( OFFB_RIP, mkU64(d64_true) ) );
+   } else {
+      stmt( IRStmt_Exit( mk_amd64g_calculate_condition(condPos),
+                         Ijk_Boring,
+                         IRConst_U64(d64_true),
+                         OFFB_RIP ) );
+      stmt( IRStmt_Put( OFFB_RIP, mkU64(d64_false) ) );
+   }
+}
+
+/* Let new_rsp be the %rsp value after a call/return.  Let nia be the
+   guest address of the next instruction to be executed.
+
+   This function generates an AbiHint to say that -128(%rsp)
+   .. -1(%rsp) should now be regarded as uninitialised.
+*/
+static 
+void make_redzone_AbiHint ( const VexAbiInfo* vbi,
+                            IRTemp new_rsp, IRTemp nia, const HChar* who )
+{
+   Int szB = vbi->guest_stack_redzone_size;
+   vassert(szB >= 0);
+
+   /* A bit of a kludge.  Currently the only AbI we've guested AMD64
+      for is ELF.  So just check it's the expected 128 value
+      (paranoia). */
+   vassert(szB == 128);
+
+   if (0) vex_printf("AbiHint: %s\n", who);
+   vassert(typeOfIRTemp(irsb->tyenv, new_rsp) == Ity_I64);
+   vassert(typeOfIRTemp(irsb->tyenv, nia) == Ity_I64);
+   if (szB > 0)
+      stmt( IRStmt_AbiHint( 
+               binop(Iop_Sub64, mkexpr(new_rsp), mkU64(szB)), 
+               szB,
+               mkexpr(nia)
+            ));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassembling addressing modes                       ---*/
+/*------------------------------------------------------------*/
+
+static 
+const HChar* segRegTxt ( Prefix pfx )
+{
+   if (pfx & PFX_CS) return "%cs:";
+   if (pfx & PFX_DS) return "%ds:";
+   if (pfx & PFX_ES) return "%es:";
+   if (pfx & PFX_FS) return "%fs:";
+   if (pfx & PFX_GS) return "%gs:";
+   if (pfx & PFX_SS) return "%ss:";
+   return ""; /* no override */
+}
+
+
+/* 'virtual' is an IRExpr* holding a virtual address.  Convert it to a
+   linear address by adding any required segment override as indicated
+   by sorb, and also dealing with any address size override
+   present. */
+static
+IRExpr* handleAddrOverrides ( const VexAbiInfo* vbi, 
+                              Prefix pfx, IRExpr* virtual )
+{
+   /* Note that the below are hacks that relies on the assumption
+      that %fs or %gs are constant.
+      Typically, %fs is always 0x63 on linux (in the main thread, it
+      stays at value 0), %gs always 0x60 on Darwin, ... */
+   /* --- segment overrides --- */
+   if (pfx & PFX_FS) {
+      if (vbi->guest_amd64_assume_fs_is_const) {
+         /* return virtual + guest_FS_CONST. */
+         virtual = binop(Iop_Add64, virtual,
+                                    IRExpr_Get(OFFB_FS_CONST, Ity_I64));
+      } else {
+         unimplemented("amd64 %fs segment override");
+      }
+   }
+
+   if (pfx & PFX_GS) {
+      if (vbi->guest_amd64_assume_gs_is_const) {
+         /* return virtual + guest_GS_CONST. */
+         virtual = binop(Iop_Add64, virtual,
+                                    IRExpr_Get(OFFB_GS_CONST, Ity_I64));
+      } else {
+         unimplemented("amd64 %gs segment override");
+      }
+   }
+
+   /* cs, ds, es and ss are simply ignored in 64-bit mode. */
+
+   /* --- address size override --- */
+   if (haveASO(pfx))
+      virtual = unop(Iop_32Uto64, unop(Iop_64to32, virtual));
+
+   return virtual;
+}
+
+//.. {
+//..    Int    sreg;
+//..    IRType hWordTy;
+//..    IRTemp ldt_ptr, gdt_ptr, seg_selector, r64;
+//.. 
+//..    if (sorb == 0)
+//..       /* the common case - no override */
+//..       return virtual;
+//.. 
+//..    switch (sorb) {
+//..       case 0x3E: sreg = R_DS; break;
+//..       case 0x26: sreg = R_ES; break;
+//..       case 0x64: sreg = R_FS; break;
+//..       case 0x65: sreg = R_GS; break;
+//..       default: vpanic("handleAddrOverrides(x86,guest)");
+//..    }
+//.. 
+//..    hWordTy = sizeof(HWord)==4 ? Ity_I32 : Ity_I64;
+//.. 
+//..    seg_selector = newTemp(Ity_I32);
+//..    ldt_ptr      = newTemp(hWordTy);
+//..    gdt_ptr      = newTemp(hWordTy);
+//..    r64          = newTemp(Ity_I64);
+//.. 
+//..    assign( seg_selector, unop(Iop_16Uto32, getSReg(sreg)) );
+//..    assign( ldt_ptr, IRExpr_Get( OFFB_LDT, hWordTy ));
+//..    assign( gdt_ptr, IRExpr_Get( OFFB_GDT, hWordTy ));
+//.. 
+//..    /*
+//..    Call this to do the translation and limit checks: 
+//..    ULong x86g_use_seg_selector ( HWord ldt, HWord gdt,
+//..                                  UInt seg_selector, UInt virtual_addr )
+//..    */
+//..    assign( 
+//..       r64, 
+//..       mkIRExprCCall( 
+//..          Ity_I64, 
+//..          0/*regparms*/, 
+//..          "x86g_use_seg_selector", 
+//..          &x86g_use_seg_selector, 
+//..          mkIRExprVec_4( mkexpr(ldt_ptr), mkexpr(gdt_ptr), 
+//..                         mkexpr(seg_selector), virtual)
+//..       )
+//..    );
+//.. 
+//..    /* If the high 32 of the result are non-zero, there was a 
+//..       failure in address translation.  In which case, make a
+//..       quick exit.
+//..    */
+//..    stmt( 
+//..       IRStmt_Exit(
+//..          binop(Iop_CmpNE32, unop(Iop_64HIto32, mkexpr(r64)), mkU32(0)),
+//..          Ijk_MapFail,
+//..          IRConst_U32( guest_eip_curr_instr )
+//..       )
+//..    );
+//.. 
+//..    /* otherwise, here's the translated result. */
+//..    return unop(Iop_64to32, mkexpr(r64));
+//.. }
+
+
+/* Generate IR to calculate an address indicated by a ModRM and
+   following SIB bytes.  The expression, and the number of bytes in
+   the address mode, are returned (the latter in *len).  Note that
+   this fn should not be called if the R/M part of the address denotes
+   a register instead of memory.  If print_codegen is true, text of
+   the addressing mode is placed in buf.
+
+   The computed address is stored in a new tempreg, and the
+   identity of the tempreg is returned.
+
+   extra_bytes holds the number of bytes after the amode, as supplied
+   by the caller.  This is needed to make sense of %rip-relative
+   addresses.  Note that the value that *len is set to is only the
+   length of the amode itself and does not include the value supplied
+   in extra_bytes.
+ */
+
+static IRTemp disAMode_copy2tmp ( IRExpr* addr64 )
+{
+   IRTemp tmp = newTemp(Ity_I64);
+   assign( tmp, addr64 );
+   return tmp;
+}
+
+static 
+IRTemp disAMode ( /*OUT*/Int* len,
+                  const VexAbiInfo* vbi, Prefix pfx, Long delta, 
+                  /*OUT*/HChar* buf, Int extra_bytes )
+{
+   UChar mod_reg_rm = getUChar(delta);
+   delta++;
+
+   buf[0] = (UChar)0;
+   vassert(extra_bytes >= 0 && extra_bytes < 10);
+
+   /* squeeze out the reg field from mod_reg_rm, since a 256-entry
+      jump table seems a bit excessive. 
+   */
+   mod_reg_rm &= 0xC7;                         /* is now XX000YYY */
+   mod_reg_rm  = toUChar(mod_reg_rm | (mod_reg_rm >> 3));
+                                               /* is now XX0XXYYY */
+   mod_reg_rm &= 0x1F;                         /* is now 000XXYYY */
+   switch (mod_reg_rm) {
+
+      /* REX.B==0: (%rax) .. (%rdi), not including (%rsp) or (%rbp).
+         REX.B==1: (%r8)  .. (%r15), not including (%r12) or (%r13).
+      */
+      case 0x00: case 0x01: case 0x02: case 0x03: 
+      /* ! 04 */ /* ! 05 */ case 0x06: case 0x07:
+         { UChar rm = toUChar(mod_reg_rm & 7);
+           DIS(buf, "%s(%s)", segRegTxt(pfx), nameIRegRexB(8,pfx,rm));
+           *len = 1;
+           return disAMode_copy2tmp(
+                  handleAddrOverrides(vbi, pfx, getIRegRexB(8,pfx,rm)));
+         }
+
+      /* REX.B==0: d8(%rax) ... d8(%rdi), not including d8(%rsp) 
+         REX.B==1: d8(%r8)  ... d8(%r15), not including d8(%r12) 
+      */
+      case 0x08: case 0x09: case 0x0A: case 0x0B: 
+      /* ! 0C */ case 0x0D: case 0x0E: case 0x0F:
+         { UChar rm = toUChar(mod_reg_rm & 7);
+           Long d   = getSDisp8(delta);
+           if (d == 0) {
+              DIS(buf, "%s(%s)", segRegTxt(pfx), nameIRegRexB(8,pfx,rm));
+           } else {
+              DIS(buf, "%s%lld(%s)", segRegTxt(pfx), d, nameIRegRexB(8,pfx,rm));
+           }
+           *len = 2;
+           return disAMode_copy2tmp(
+                  handleAddrOverrides(vbi, pfx,
+                     binop(Iop_Add64,getIRegRexB(8,pfx,rm),mkU64(d))));
+         }
+
+      /* REX.B==0: d32(%rax) ... d32(%rdi), not including d32(%rsp)
+         REX.B==1: d32(%r8)  ... d32(%r15), not including d32(%r12)
+      */
+      case 0x10: case 0x11: case 0x12: case 0x13: 
+      /* ! 14 */ case 0x15: case 0x16: case 0x17:
+         { UChar rm = toUChar(mod_reg_rm & 7);
+           Long  d  = getSDisp32(delta);
+           DIS(buf, "%s%lld(%s)", segRegTxt(pfx), d, nameIRegRexB(8,pfx,rm));
+           *len = 5;
+           return disAMode_copy2tmp(
+                  handleAddrOverrides(vbi, pfx,
+                     binop(Iop_Add64,getIRegRexB(8,pfx,rm),mkU64(d))));
+         }
+
+      /* REX.B==0: a register, %rax .. %rdi.  This shouldn't happen. */
+      /* REX.B==1: a register, %r8  .. %r16.  This shouldn't happen. */
+      case 0x18: case 0x19: case 0x1A: case 0x1B:
+      case 0x1C: case 0x1D: case 0x1E: case 0x1F:
+         vpanic("disAMode(amd64): not an addr!");
+
+      /* RIP + disp32.  This assumes that guest_RIP_curr_instr is set
+         correctly at the start of handling each instruction. */
+      case 0x05: 
+         { Long d = getSDisp32(delta);
+           *len = 5;
+           DIS(buf, "%s%lld(%%rip)", segRegTxt(pfx), d);
+           /* We need to know the next instruction's start address.
+              Try and figure out what it is, record the guess, and ask
+              the top-level driver logic (bbToIR_AMD64) to check we
+              guessed right, after the instruction is completely
+              decoded. */
+           guest_RIP_next_mustcheck = True;
+           guest_RIP_next_assumed = guest_RIP_bbstart 
+                                    + delta+4 + extra_bytes;
+           return disAMode_copy2tmp( 
+                     handleAddrOverrides(vbi, pfx, 
+                        binop(Iop_Add64, mkU64(guest_RIP_next_assumed), 
+                                         mkU64(d))));
+         }
+
+      case 0x04: {
+         /* SIB, with no displacement.  Special cases:
+            -- %rsp cannot act as an index value.  
+               If index_r indicates %rsp, zero is used for the index.
+            -- when mod is zero and base indicates RBP or R13, base is 
+               instead a 32-bit sign-extended literal.
+            It's all madness, I tell you.  Extract %index, %base and 
+            scale from the SIB byte.  The value denoted is then:
+               | %index == %RSP && (%base == %RBP || %base == %R13)
+               = d32 following SIB byte
+               | %index == %RSP && !(%base == %RBP || %base == %R13)
+               = %base
+               | %index != %RSP && (%base == %RBP || %base == %R13)
+               = d32 following SIB byte + (%index << scale)
+               | %index != %RSP && !(%base == %RBP || %base == %R13)
+               = %base + (%index << scale)
+         */
+         UChar sib     = getUChar(delta);
+         UChar scale   = toUChar((sib >> 6) & 3);
+         UChar index_r = toUChar((sib >> 3) & 7);
+         UChar base_r  = toUChar(sib & 7);
+         /* correct since #(R13) == 8 + #(RBP) */
+         Bool  base_is_BPor13 = toBool(base_r == R_RBP);
+         Bool  index_is_SP    = toBool(index_r == R_RSP && 0==getRexX(pfx));
+         delta++;
+
+         if ((!index_is_SP) && (!base_is_BPor13)) {
+            if (scale == 0) {
+               DIS(buf, "%s(%s,%s)", segRegTxt(pfx), 
+                         nameIRegRexB(8,pfx,base_r), 
+                         nameIReg64rexX(pfx,index_r));
+            } else {
+               DIS(buf, "%s(%s,%s,%d)", segRegTxt(pfx), 
+                         nameIRegRexB(8,pfx,base_r), 
+                         nameIReg64rexX(pfx,index_r), 1<<scale);
+            }
+            *len = 2;
+            return
+               disAMode_copy2tmp( 
+               handleAddrOverrides(vbi, pfx,
+                  binop(Iop_Add64, 
+                        getIRegRexB(8,pfx,base_r),
+                        binop(Iop_Shl64, getIReg64rexX(pfx,index_r),
+                              mkU8(scale)))));
+         }
+
+         if ((!index_is_SP) && base_is_BPor13) {
+            Long d = getSDisp32(delta);
+            DIS(buf, "%s%lld(,%s,%d)", segRegTxt(pfx), d, 
+                      nameIReg64rexX(pfx,index_r), 1<<scale);
+            *len = 6;
+            return
+               disAMode_copy2tmp(
+               handleAddrOverrides(vbi, pfx, 
+                  binop(Iop_Add64,
+                        binop(Iop_Shl64, getIReg64rexX(pfx,index_r), 
+                                         mkU8(scale)),
+                        mkU64(d))));
+         }
+
+         if (index_is_SP && (!base_is_BPor13)) {
+            DIS(buf, "%s(%s)", segRegTxt(pfx), nameIRegRexB(8,pfx,base_r));
+            *len = 2;
+            return disAMode_copy2tmp(
+                   handleAddrOverrides(vbi, pfx, getIRegRexB(8,pfx,base_r)));
+         }
+
+         if (index_is_SP && base_is_BPor13) {
+            Long d = getSDisp32(delta);
+            DIS(buf, "%s%lld", segRegTxt(pfx), d);
+            *len = 6;
+            return disAMode_copy2tmp(
+                   handleAddrOverrides(vbi, pfx, mkU64(d)));
+         }
+
+         vassert(0);
+      }
+
+      /* SIB, with 8-bit displacement.  Special cases:
+         -- %esp cannot act as an index value.  
+            If index_r indicates %esp, zero is used for the index.
+         Denoted value is:
+            | %index == %ESP
+            = d8 + %base
+            | %index != %ESP
+            = d8 + %base + (%index << scale)
+      */
+      case 0x0C: {
+         UChar sib     = getUChar(delta);
+         UChar scale   = toUChar((sib >> 6) & 3);
+         UChar index_r = toUChar((sib >> 3) & 7);
+         UChar base_r  = toUChar(sib & 7);
+         Long d        = getSDisp8(delta+1);
+
+         if (index_r == R_RSP && 0==getRexX(pfx)) {
+            DIS(buf, "%s%lld(%s)", segRegTxt(pfx), 
+                                   d, nameIRegRexB(8,pfx,base_r));
+            *len = 3;
+            return disAMode_copy2tmp(
+                   handleAddrOverrides(vbi, pfx, 
+                      binop(Iop_Add64, getIRegRexB(8,pfx,base_r), mkU64(d)) ));
+         } else {
+            if (scale == 0) {
+               DIS(buf, "%s%lld(%s,%s)", segRegTxt(pfx), d, 
+                         nameIRegRexB(8,pfx,base_r), 
+                         nameIReg64rexX(pfx,index_r));
+            } else {
+               DIS(buf, "%s%lld(%s,%s,%d)", segRegTxt(pfx), d, 
+                         nameIRegRexB(8,pfx,base_r), 
+                         nameIReg64rexX(pfx,index_r), 1<<scale);
+            }
+            *len = 3;
+            return 
+                disAMode_copy2tmp(
+                handleAddrOverrides(vbi, pfx,
+                  binop(Iop_Add64,
+                        binop(Iop_Add64, 
+                              getIRegRexB(8,pfx,base_r), 
+                              binop(Iop_Shl64, 
+                                    getIReg64rexX(pfx,index_r), mkU8(scale))),
+                        mkU64(d))));
+         }
+         vassert(0); /*NOTREACHED*/
+      }
+
+      /* SIB, with 32-bit displacement.  Special cases:
+         -- %rsp cannot act as an index value.  
+            If index_r indicates %rsp, zero is used for the index.
+         Denoted value is:
+            | %index == %RSP
+            = d32 + %base
+            | %index != %RSP
+            = d32 + %base + (%index << scale)
+      */
+      case 0x14: {
+         UChar sib     = getUChar(delta);
+         UChar scale   = toUChar((sib >> 6) & 3);
+         UChar index_r = toUChar((sib >> 3) & 7);
+         UChar base_r  = toUChar(sib & 7);
+         Long d        = getSDisp32(delta+1);
+
+         if (index_r == R_RSP && 0==getRexX(pfx)) {
+            DIS(buf, "%s%lld(%s)", segRegTxt(pfx), 
+                                   d, nameIRegRexB(8,pfx,base_r));
+            *len = 6;
+            return disAMode_copy2tmp(
+                   handleAddrOverrides(vbi, pfx, 
+                      binop(Iop_Add64, getIRegRexB(8,pfx,base_r), mkU64(d)) ));
+         } else {
+            if (scale == 0) {
+               DIS(buf, "%s%lld(%s,%s)", segRegTxt(pfx), d, 
+                         nameIRegRexB(8,pfx,base_r), 
+                         nameIReg64rexX(pfx,index_r));
+            } else {
+               DIS(buf, "%s%lld(%s,%s,%d)", segRegTxt(pfx), d, 
+                         nameIRegRexB(8,pfx,base_r), 
+                         nameIReg64rexX(pfx,index_r), 1<<scale);
+            }
+            *len = 6;
+            return 
+                disAMode_copy2tmp(
+                handleAddrOverrides(vbi, pfx,
+                  binop(Iop_Add64,
+                        binop(Iop_Add64, 
+                              getIRegRexB(8,pfx,base_r), 
+                              binop(Iop_Shl64, 
+                                    getIReg64rexX(pfx,index_r), mkU8(scale))),
+                        mkU64(d))));
+         }
+         vassert(0); /*NOTREACHED*/
+      }
+
+      default:
+         vpanic("disAMode(amd64)");
+         return 0; /*notreached*/
+   }
+}
+
+
+/* Similarly for VSIB addressing.  This returns just the addend,
+   and fills in *rI and *vscale with the register number of the vector
+   index and its multiplicand.  */
+static
+IRTemp disAVSIBMode ( /*OUT*/Int* len,
+                      const VexAbiInfo* vbi, Prefix pfx, Long delta,
+                      /*OUT*/HChar* buf, /*OUT*/UInt* rI,
+                      IRType ty, /*OUT*/Int* vscale )
+{
+   UChar mod_reg_rm = getUChar(delta);
+   const HChar *vindex;
+
+   *len = 0;
+   *rI = 0;
+   *vscale = 0;
+   buf[0] = (UChar)0;
+   if ((mod_reg_rm & 7) != 4 || epartIsReg(mod_reg_rm))
+      return IRTemp_INVALID;
+
+   UChar sib     = getUChar(delta+1);
+   UChar scale   = toUChar((sib >> 6) & 3);
+   UChar index_r = toUChar((sib >> 3) & 7);
+   UChar base_r  = toUChar(sib & 7);
+   Long  d       = 0;
+   /* correct since #(R13) == 8 + #(RBP) */
+   Bool  base_is_BPor13 = toBool(base_r == R_RBP);
+   delta += 2;
+   *len = 2;
+
+   *rI = index_r | (getRexX(pfx) << 3);
+   if (ty == Ity_V128)
+      vindex = nameXMMReg(*rI);
+   else
+      vindex = nameYMMReg(*rI);
+   *vscale = 1<<scale;
+
+   switch (mod_reg_rm >> 6) {
+   case 0:
+      if (base_is_BPor13) {
+         d = getSDisp32(delta);
+         *len += 4;
+         if (scale == 0) {
+            DIS(buf, "%s%lld(,%s)", segRegTxt(pfx), d, vindex);
+         } else {
+            DIS(buf, "%s%lld(,%s,%d)", segRegTxt(pfx), d, vindex, 1<<scale);
+         }
+         return disAMode_copy2tmp( mkU64(d) );
+      } else {
+         if (scale == 0) {
+            DIS(buf, "%s(%s,%s)", segRegTxt(pfx),
+                     nameIRegRexB(8,pfx,base_r), vindex);
+         } else {
+            DIS(buf, "%s(%s,%s,%d)", segRegTxt(pfx),
+                     nameIRegRexB(8,pfx,base_r), vindex, 1<<scale);
+         }
+      }
+      break;
+   case 1:
+      d = getSDisp8(delta);
+      *len += 1;
+      goto have_disp;
+   case 2:
+      d = getSDisp32(delta);
+      *len += 4;
+   have_disp:
+      if (scale == 0) {
+         DIS(buf, "%s%lld(%s,%s)", segRegTxt(pfx), d,
+                  nameIRegRexB(8,pfx,base_r), vindex);
+      } else {
+         DIS(buf, "%s%lld(%s,%s,%d)", segRegTxt(pfx), d,
+                  nameIRegRexB(8,pfx,base_r), vindex, 1<<scale);
+      }
+      break;
+   }
+
+   if (!d)
+      return disAMode_copy2tmp( getIRegRexB(8,pfx,base_r) );
+   return disAMode_copy2tmp( binop(Iop_Add64, getIRegRexB(8,pfx,base_r),
+                                   mkU64(d)) );
+}
+
+
+/* Figure out the number of (insn-stream) bytes constituting the amode
+   beginning at delta.  Is useful for getting hold of literals beyond
+   the end of the amode before it has been disassembled.  */
+
+static UInt lengthAMode ( Prefix pfx, Long delta )
+{
+   UChar mod_reg_rm = getUChar(delta);
+   delta++;
+
+   /* squeeze out the reg field from mod_reg_rm, since a 256-entry
+      jump table seems a bit excessive. 
+   */
+   mod_reg_rm &= 0xC7;                         /* is now XX000YYY */
+   mod_reg_rm  = toUChar(mod_reg_rm | (mod_reg_rm >> 3));
+                                               /* is now XX0XXYYY */
+   mod_reg_rm &= 0x1F;                         /* is now 000XXYYY */
+   switch (mod_reg_rm) {
+
+      /* REX.B==0: (%rax) .. (%rdi), not including (%rsp) or (%rbp).
+         REX.B==1: (%r8)  .. (%r15), not including (%r12) or (%r13).
+      */
+      case 0x00: case 0x01: case 0x02: case 0x03: 
+      /* ! 04 */ /* ! 05 */ case 0x06: case 0x07:
+         return 1;
+
+      /* REX.B==0: d8(%rax) ... d8(%rdi), not including d8(%rsp) 
+         REX.B==1: d8(%r8)  ... d8(%r15), not including d8(%r12) 
+      */
+      case 0x08: case 0x09: case 0x0A: case 0x0B: 
+      /* ! 0C */ case 0x0D: case 0x0E: case 0x0F:
+         return 2;
+
+      /* REX.B==0: d32(%rax) ... d32(%rdi), not including d32(%rsp)
+         REX.B==1: d32(%r8)  ... d32(%r15), not including d32(%r12)
+      */
+      case 0x10: case 0x11: case 0x12: case 0x13: 
+      /* ! 14 */ case 0x15: case 0x16: case 0x17:
+         return 5;
+
+      /* REX.B==0: a register, %rax .. %rdi.  This shouldn't happen. */
+      /* REX.B==1: a register, %r8  .. %r16.  This shouldn't happen. */
+      /* Not an address, but still handled. */
+      case 0x18: case 0x19: case 0x1A: case 0x1B:
+      case 0x1C: case 0x1D: case 0x1E: case 0x1F:
+         return 1;
+
+      /* RIP + disp32. */
+      case 0x05: 
+         return 5;
+
+      case 0x04: {
+         /* SIB, with no displacement. */
+         UChar sib     = getUChar(delta);
+         UChar base_r  = toUChar(sib & 7);
+         /* correct since #(R13) == 8 + #(RBP) */
+         Bool  base_is_BPor13 = toBool(base_r == R_RBP);
+
+         if (base_is_BPor13) {
+            return 6;
+         } else {
+            return 2;
+         }
+      }
+
+      /* SIB, with 8-bit displacement. */
+      case 0x0C:
+         return 3;
+
+      /* SIB, with 32-bit displacement. */
+      case 0x14:
+         return 6;
+
+      default:
+         vpanic("lengthAMode(amd64)");
+         return 0; /*notreached*/
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassembling common idioms                          ---*/
+/*------------------------------------------------------------*/
+
+/* Handle binary integer instructions of the form
+      op E, G  meaning
+      op reg-or-mem, reg
+   Is passed the a ptr to the modRM byte, the actual operation, and the
+   data size.  Returns the address advanced completely over this
+   instruction.
+
+   E(src) is reg-or-mem
+   G(dst) is reg.
+
+   If E is reg, -->    GET %G,  tmp
+                       OP %E,   tmp
+                       PUT tmp, %G
+ 
+   If E is mem and OP is not reversible, 
+                -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpa
+                       GET %G, tmp2
+                       OP tmpa, tmp2
+                       PUT tmp2, %G
+
+   If E is mem and OP is reversible
+                -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpa
+                       OP %G, tmpa
+                       PUT tmpa, %G
+*/
+static
+ULong dis_op2_E_G ( const VexAbiInfo* vbi,
+                    Prefix      pfx,
+                    Bool        addSubCarry,
+                    IROp        op8, 
+                    Bool        keep,
+                    Int         size, 
+                    Long        delta0,
+                    const HChar* t_amd64opc )
+{
+   HChar   dis_buf[50];
+   Int     len;
+   IRType  ty   = szToITy(size);
+   IRTemp  dst1 = newTemp(ty);
+   IRTemp  src  = newTemp(ty);
+   IRTemp  dst0 = newTemp(ty);
+   UChar   rm   = getUChar(delta0);
+   IRTemp  addr = IRTemp_INVALID;
+
+   /* addSubCarry == True indicates the intended operation is
+      add-with-carry or subtract-with-borrow. */
+   if (addSubCarry) {
+      vassert(op8 == Iop_Add8 || op8 == Iop_Sub8);
+      vassert(keep);
+   }
+
+   if (epartIsReg(rm)) {
+      /* Specially handle XOR reg,reg, because that doesn't really
+         depend on reg, and doing the obvious thing potentially
+         generates a spurious value check failure due to the bogus
+         dependency. */
+      if ((op8 == Iop_Xor8 || (op8 == Iop_Sub8 && addSubCarry))
+          && offsetIRegG(size,pfx,rm) == offsetIRegE(size,pfx,rm)) {
+         if (False && op8 == Iop_Sub8)
+            vex_printf("vex amd64->IR: sbb %%r,%%r optimisation(1)\n");
+         putIRegG(size,pfx,rm, mkU(ty,0));
+      }
+
+      assign( dst0, getIRegG(size,pfx,rm) );
+      assign( src,  getIRegE(size,pfx,rm) );
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         helper_ADC( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIRegG(size, pfx, rm, mkexpr(dst1));
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         helper_SBB( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIRegG(size, pfx, rm, mkexpr(dst1));
+      } else {
+         assign( dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+         if (keep)
+            putIRegG(size, pfx, rm, mkexpr(dst1));
+      }
+
+      DIP("%s%c %s,%s\n", t_amd64opc, nameISize(size), 
+                          nameIRegE(size,pfx,rm),
+                          nameIRegG(size,pfx,rm));
+      return 1+delta0;
+   } else {
+      /* E refers to memory */
+      addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      assign( dst0, getIRegG(size,pfx,rm) );
+      assign( src,  loadLE(szToITy(size), mkexpr(addr)) );
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         helper_ADC( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIRegG(size, pfx, rm, mkexpr(dst1));
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         helper_SBB( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIRegG(size, pfx, rm, mkexpr(dst1));
+      } else {
+         assign( dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+         if (keep)
+            putIRegG(size, pfx, rm, mkexpr(dst1));
+      }
+
+      DIP("%s%c %s,%s\n", t_amd64opc, nameISize(size), 
+                          dis_buf, nameIRegG(size, pfx, rm));
+      return len+delta0;
+   }
+}
+
+
+
+/* Handle binary integer instructions of the form
+      op G, E  meaning
+      op reg, reg-or-mem
+   Is passed the a ptr to the modRM byte, the actual operation, and the
+   data size.  Returns the address advanced completely over this
+   instruction.
+
+   G(src) is reg.
+   E(dst) is reg-or-mem
+
+   If E is reg, -->    GET %E,  tmp
+                       OP %G,   tmp
+                       PUT tmp, %E
+ 
+   If E is mem, -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpv
+                       OP %G, tmpv
+                       ST tmpv, (tmpa)
+*/
+static
+ULong dis_op2_G_E ( const VexAbiInfo* vbi,
+                    Prefix      pfx,
+                    Bool        addSubCarry,
+                    IROp        op8, 
+                    Bool        keep,
+                    Int         size, 
+                    Long        delta0,
+                    const HChar* t_amd64opc )
+{
+   HChar   dis_buf[50];
+   Int     len;
+   IRType  ty   = szToITy(size);
+   IRTemp  dst1 = newTemp(ty);
+   IRTemp  src  = newTemp(ty);
+   IRTemp  dst0 = newTemp(ty);
+   UChar   rm   = getUChar(delta0);
+   IRTemp  addr = IRTemp_INVALID;
+
+   /* addSubCarry == True indicates the intended operation is
+      add-with-carry or subtract-with-borrow. */
+   if (addSubCarry) {
+      vassert(op8 == Iop_Add8 || op8 == Iop_Sub8);
+      vassert(keep);
+   }
+
+   if (epartIsReg(rm)) {
+      /* Specially handle XOR reg,reg, because that doesn't really
+         depend on reg, and doing the obvious thing potentially
+         generates a spurious value check failure due to the bogus
+         dependency.  Ditto SBB reg,reg. */
+      if ((op8 == Iop_Xor8 || (op8 == Iop_Sub8 && addSubCarry))
+          && offsetIRegG(size,pfx,rm) == offsetIRegE(size,pfx,rm)) {
+         putIRegE(size,pfx,rm, mkU(ty,0));
+      }
+
+      assign(dst0, getIRegE(size,pfx,rm));
+      assign(src,  getIRegG(size,pfx,rm));
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         helper_ADC( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIRegE(size, pfx, rm, mkexpr(dst1));
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         helper_SBB( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIRegE(size, pfx, rm, mkexpr(dst1));
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+         if (keep)
+            putIRegE(size, pfx, rm, mkexpr(dst1));
+      }
+
+      DIP("%s%c %s,%s\n", t_amd64opc, nameISize(size), 
+                          nameIRegG(size,pfx,rm),
+                          nameIRegE(size,pfx,rm));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      assign(dst0, loadLE(ty,mkexpr(addr)));
+      assign(src,  getIRegG(size,pfx,rm));
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         if (haveLOCK(pfx)) {
+            /* cas-style store */
+            helper_ADC( size, dst1, dst0, src,
+                        /*store*/addr, dst0/*expVal*/, guest_RIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_ADC( size, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         if (haveLOCK(pfx)) {
+            /* cas-style store */
+            helper_SBB( size, dst1, dst0, src,
+                        /*store*/addr, dst0/*expVal*/, guest_RIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_SBB( size, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (keep) {
+            if (haveLOCK(pfx)) {
+               if (0) vex_printf("locked case\n" );
+               casLE( mkexpr(addr),
+                      mkexpr(dst0)/*expval*/, 
+                      mkexpr(dst1)/*newval*/, guest_RIP_curr_instr );
+            } else {
+               if (0) vex_printf("nonlocked case\n");
+               storeLE(mkexpr(addr), mkexpr(dst1));
+            }
+         }
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+      }
+
+      DIP("%s%c %s,%s\n", t_amd64opc, nameISize(size), 
+                          nameIRegG(size,pfx,rm), dis_buf);
+      return len+delta0;
+   }
+}
+
+
+/* Handle move instructions of the form
+      mov E, G  meaning
+      mov reg-or-mem, reg
+   Is passed the a ptr to the modRM byte, and the data size.  Returns
+   the address advanced completely over this instruction.
+
+   E(src) is reg-or-mem
+   G(dst) is reg.
+
+   If E is reg, -->    GET %E,  tmpv
+                       PUT tmpv, %G
+ 
+   If E is mem  -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpb
+                       PUT tmpb, %G
+*/
+static
+ULong dis_mov_E_G ( const VexAbiInfo* vbi,
+                    Prefix      pfx,
+                    Int         size, 
+                    Long        delta0 )
+{
+   Int len;
+   UChar rm = getUChar(delta0);
+   HChar dis_buf[50];
+
+   if (epartIsReg(rm)) {
+      putIRegG(size, pfx, rm, getIRegE(size, pfx, rm));
+      DIP("mov%c %s,%s\n", nameISize(size),
+                           nameIRegE(size,pfx,rm),
+                           nameIRegG(size,pfx,rm));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      IRTemp addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      putIRegG(size, pfx, rm, loadLE(szToITy(size), mkexpr(addr)));
+      DIP("mov%c %s,%s\n", nameISize(size), 
+                           dis_buf,
+                           nameIRegG(size,pfx,rm));
+      return delta0+len;
+   }
+}
+
+
+/* Handle move instructions of the form
+      mov G, E  meaning
+      mov reg, reg-or-mem
+   Is passed the a ptr to the modRM byte, and the data size.  Returns
+   the address advanced completely over this instruction.
+   We have to decide here whether F2 or F3 are acceptable.  F2 never is.
+
+   G(src) is reg.
+   E(dst) is reg-or-mem
+
+   If E is reg, -->    GET %G,  tmp
+                       PUT tmp, %E
+ 
+   If E is mem, -->    (getAddr E) -> tmpa
+                       GET %G, tmpv
+                       ST tmpv, (tmpa) 
+*/
+static
+ULong dis_mov_G_E ( const VexAbiInfo*  vbi,
+                    Prefix       pfx,
+                    Int          size, 
+                    Long         delta0,
+                    /*OUT*/Bool* ok )
+{
+   Int   len;
+   UChar rm = getUChar(delta0);
+   HChar dis_buf[50];
+
+   *ok = True;
+
+   if (epartIsReg(rm)) {
+      if (haveF2orF3(pfx)) { *ok = False; return delta0; }
+      putIRegE(size, pfx, rm, getIRegG(size, pfx, rm));
+      DIP("mov%c %s,%s\n", nameISize(size),
+                           nameIRegG(size,pfx,rm),
+                           nameIRegE(size,pfx,rm));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      if (haveF2(pfx)) { *ok = False; return delta0; }
+      /* F3(XRELEASE) is acceptable, though. */
+      IRTemp addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      storeLE( mkexpr(addr), getIRegG(size, pfx, rm) );
+      DIP("mov%c %s,%s\n", nameISize(size), 
+                           nameIRegG(size,pfx,rm), 
+                           dis_buf);
+      return len+delta0;
+   }
+}
+
+
+/* op $immediate, AL/AX/EAX/RAX. */
+static
+ULong dis_op_imm_A ( Int    size,
+                     Bool   carrying,
+                     IROp   op8,
+                     Bool   keep,
+                     Long   delta,
+                     const HChar* t_amd64opc )
+{
+   Int    size4 = imin(size,4);
+   IRType ty    = szToITy(size);
+   IRTemp dst0  = newTemp(ty);
+   IRTemp src   = newTemp(ty);
+   IRTemp dst1  = newTemp(ty);
+   Long  lit    = getSDisp(size4,delta);
+   assign(dst0, getIRegRAX(size));
+   assign(src,  mkU(ty,lit & mkSizeMask(size)));
+
+   if (isAddSub(op8) && !carrying) {
+      assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+      setFlags_DEP1_DEP2(op8, dst0, src, ty);
+   }
+   else
+   if (isLogic(op8)) {
+      vassert(!carrying);
+      assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+      setFlags_DEP1(op8, dst1, ty);
+   }
+   else
+   if (op8 == Iop_Add8 && carrying) {
+      helper_ADC( size, dst1, dst0, src,
+                  /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+   }
+   else
+   if (op8 == Iop_Sub8 && carrying) {
+      helper_SBB( size, dst1, dst0, src,
+                  /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+   }
+   else
+      vpanic("dis_op_imm_A(amd64,guest)");
+
+   if (keep)
+      putIRegRAX(size, mkexpr(dst1));
+
+   DIP("%s%c $%lld, %s\n", t_amd64opc, nameISize(size), 
+                           lit, nameIRegRAX(size));
+   return delta+size4;
+}
+
+
+/* Sign- and Zero-extending moves. */
+static
+ULong dis_movx_E_G ( const VexAbiInfo* vbi,
+                     Prefix pfx,
+                     Long delta, Int szs, Int szd, Bool sign_extend )
+{
+   UChar rm = getUChar(delta);
+   if (epartIsReg(rm)) {
+      putIRegG(szd, pfx, rm,
+                    doScalarWidening(
+                       szs,szd,sign_extend,
+                       getIRegE(szs,pfx,rm)));
+      DIP("mov%c%c%c %s,%s\n", sign_extend ? 's' : 'z',
+                               nameISize(szs), 
+                               nameISize(szd),
+                               nameIRegE(szs,pfx,rm),
+                               nameIRegG(szd,pfx,rm));
+      return 1+delta;
+   }
+
+   /* E refers to memory */    
+   {
+      Int    len;
+      HChar  dis_buf[50];
+      IRTemp addr = disAMode ( &len, vbi, pfx, delta, dis_buf, 0 );
+      putIRegG(szd, pfx, rm,
+                    doScalarWidening(
+                       szs,szd,sign_extend, 
+                       loadLE(szToITy(szs),mkexpr(addr))));
+      DIP("mov%c%c%c %s,%s\n", sign_extend ? 's' : 'z',
+                               nameISize(szs), 
+                               nameISize(szd),
+                               dis_buf, 
+                               nameIRegG(szd,pfx,rm));
+      return len+delta;
+   }
+}
+
+
+/* Generate code to divide ArchRegs RDX:RAX / EDX:EAX / DX:AX / AX by
+   the 64 / 32 / 16 / 8 bit quantity in the given IRTemp.  */
+static
+void codegen_div ( Int sz, IRTemp t, Bool signed_divide )
+{
+   /* special-case the 64-bit case */
+   if (sz == 8) {
+      IROp   op     = signed_divide ? Iop_DivModS128to64 
+                                    : Iop_DivModU128to64;
+      IRTemp src128 = newTemp(Ity_I128);
+      IRTemp dst128 = newTemp(Ity_I128);
+      assign( src128, binop(Iop_64HLto128, 
+                            getIReg64(R_RDX), 
+                            getIReg64(R_RAX)) );
+      assign( dst128, binop(op, mkexpr(src128), mkexpr(t)) );
+      putIReg64( R_RAX, unop(Iop_128to64,mkexpr(dst128)) );
+      putIReg64( R_RDX, unop(Iop_128HIto64,mkexpr(dst128)) );
+   } else {
+      IROp   op    = signed_divide ? Iop_DivModS64to32 
+                                   : Iop_DivModU64to32;
+      IRTemp src64 = newTemp(Ity_I64);
+      IRTemp dst64 = newTemp(Ity_I64);
+      switch (sz) {
+      case 4:
+         assign( src64, 
+                 binop(Iop_32HLto64, getIRegRDX(4), getIRegRAX(4)) );
+         assign( dst64, 
+                 binop(op, mkexpr(src64), mkexpr(t)) );
+         putIRegRAX( 4, unop(Iop_64to32,mkexpr(dst64)) );
+         putIRegRDX( 4, unop(Iop_64HIto32,mkexpr(dst64)) );
+         break;
+      case 2: {
+         IROp widen3264 = signed_divide ? Iop_32Sto64 : Iop_32Uto64;
+         IROp widen1632 = signed_divide ? Iop_16Sto32 : Iop_16Uto32;
+         assign( src64, unop(widen3264,
+                             binop(Iop_16HLto32, 
+                                   getIRegRDX(2), 
+                                   getIRegRAX(2))) );
+         assign( dst64, binop(op, mkexpr(src64), unop(widen1632,mkexpr(t))) );
+         putIRegRAX( 2, unop(Iop_32to16,unop(Iop_64to32,mkexpr(dst64))) );
+         putIRegRDX( 2, unop(Iop_32to16,unop(Iop_64HIto32,mkexpr(dst64))) );
+         break;
+      }
+      case 1: {
+         IROp widen3264 = signed_divide ? Iop_32Sto64 : Iop_32Uto64;
+         IROp widen1632 = signed_divide ? Iop_16Sto32 : Iop_16Uto32;
+         IROp widen816  = signed_divide ? Iop_8Sto16  : Iop_8Uto16;
+         assign( src64, unop(widen3264, 
+                        unop(widen1632, getIRegRAX(2))) );
+         assign( dst64, 
+                 binop(op, mkexpr(src64), 
+                           unop(widen1632, unop(widen816, mkexpr(t)))) );
+         putIRegRAX( 1, unop(Iop_16to8, 
+                        unop(Iop_32to16,
+                        unop(Iop_64to32,mkexpr(dst64)))) );
+         putIRegAH( unop(Iop_16to8, 
+                    unop(Iop_32to16,
+                    unop(Iop_64HIto32,mkexpr(dst64)))) );
+         break;
+      }
+      default: 
+         vpanic("codegen_div(amd64)");
+      }
+   }
+}
+
+static 
+ULong dis_Grp1 ( const VexAbiInfo* vbi,
+                 Prefix pfx,
+                 Long delta, UChar modrm, 
+                 Int am_sz, Int d_sz, Int sz, Long d64 )
+{
+   Int     len;
+   HChar   dis_buf[50];
+   IRType  ty   = szToITy(sz);
+   IRTemp  dst1 = newTemp(ty);
+   IRTemp  src  = newTemp(ty);
+   IRTemp  dst0 = newTemp(ty);
+   IRTemp  addr = IRTemp_INVALID;
+   IROp    op8  = Iop_INVALID;
+   ULong   mask = mkSizeMask(sz);
+
+   switch (gregLO3ofRM(modrm)) {
+      case 0: op8 = Iop_Add8; break;  case 1: op8 = Iop_Or8;  break;
+      case 2: break;  // ADC
+      case 3: break;  // SBB
+      case 4: op8 = Iop_And8; break;  case 5: op8 = Iop_Sub8; break;
+      case 6: op8 = Iop_Xor8; break;  case 7: op8 = Iop_Sub8; break;
+      /*NOTREACHED*/
+      default: vpanic("dis_Grp1(amd64): unhandled case");
+   }
+
+   if (epartIsReg(modrm)) {
+      vassert(am_sz == 1);
+
+      assign(dst0, getIRegE(sz,pfx,modrm));
+      assign(src,  mkU(ty,d64 & mask));
+
+      if (gregLO3ofRM(modrm) == 2 /* ADC */) {
+         helper_ADC( sz, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+      } else 
+      if (gregLO3ofRM(modrm) == 3 /* SBB */) {
+         helper_SBB( sz, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+      }
+
+      if (gregLO3ofRM(modrm) < 7)
+         putIRegE(sz, pfx, modrm, mkexpr(dst1));
+
+      delta += (am_sz + d_sz);
+      DIP("%s%c $%lld, %s\n", 
+          nameGrp1(gregLO3ofRM(modrm)), nameISize(sz), d64, 
+          nameIRegE(sz,pfx,modrm));
+   } else {
+      addr = disAMode ( &len, vbi, pfx, delta, dis_buf, /*xtra*/d_sz );
+
+      assign(dst0, loadLE(ty,mkexpr(addr)));
+      assign(src, mkU(ty,d64 & mask));
+
+      if (gregLO3ofRM(modrm) == 2 /* ADC */) {
+         if (haveLOCK(pfx)) {
+            /* cas-style store */
+            helper_ADC( sz, dst1, dst0, src,
+                       /*store*/addr, dst0/*expVal*/, guest_RIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_ADC( sz, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else 
+      if (gregLO3ofRM(modrm) == 3 /* SBB */) {
+         if (haveLOCK(pfx)) {
+            /* cas-style store */
+            helper_SBB( sz, dst1, dst0, src,
+                       /*store*/addr, dst0/*expVal*/, guest_RIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_SBB( sz, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (gregLO3ofRM(modrm) < 7) {
+            if (haveLOCK(pfx)) {
+               casLE( mkexpr(addr), mkexpr(dst0)/*expVal*/, 
+                                    mkexpr(dst1)/*newVal*/,
+                                    guest_RIP_curr_instr );
+            } else {
+               storeLE(mkexpr(addr), mkexpr(dst1));
+            }
+         }
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+      }
+
+      delta += (len+d_sz);
+      DIP("%s%c $%lld, %s\n", 
+          nameGrp1(gregLO3ofRM(modrm)), nameISize(sz),
+          d64, dis_buf);
+   }
+   return delta;
+}
+
+
+/* Group 2 extended opcodes.  shift_expr must be an 8-bit typed
+   expression. */
+
+static
+ULong dis_Grp2 ( const VexAbiInfo* vbi,
+                 Prefix pfx,
+                 Long delta, UChar modrm,
+                 Int am_sz, Int d_sz, Int sz, IRExpr* shift_expr,
+                 const HChar* shift_expr_txt, Bool* decode_OK )
+{
+   /* delta on entry points at the modrm byte. */
+   HChar  dis_buf[50];
+   Int    len;
+   Bool   isShift, isRotate, isRotateC;
+   IRType ty    = szToITy(sz);
+   IRTemp dst0  = newTemp(ty);
+   IRTemp dst1  = newTemp(ty);
+   IRTemp addr  = IRTemp_INVALID;
+
+   *decode_OK = True;
+
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+
+   /* Put value to shift/rotate in dst0. */
+   if (epartIsReg(modrm)) {
+      assign(dst0, getIRegE(sz, pfx, modrm));
+      delta += (am_sz + d_sz);
+   } else {
+      addr = disAMode ( &len, vbi, pfx, delta, dis_buf, /*xtra*/d_sz );
+      assign(dst0, loadLE(ty,mkexpr(addr)));
+      delta += len + d_sz;
+   }
+
+   isShift = False;
+   switch (gregLO3ofRM(modrm)) { case 4: case 5: case 6: case 7: isShift = True; }
+
+   isRotate = False;
+   switch (gregLO3ofRM(modrm)) { case 0: case 1: isRotate = True; }
+
+   isRotateC = False;
+   switch (gregLO3ofRM(modrm)) { case 2: case 3: isRotateC = True; }
+
+   if (!isShift && !isRotate && !isRotateC) {
+      /*NOTREACHED*/
+      vpanic("dis_Grp2(Reg): unhandled case(amd64)");
+   }
+
+   if (isRotateC) {
+      /* Call a helper; this insn is so ridiculous it does not deserve
+         better.  One problem is, the helper has to calculate both the
+         new value and the new flags.  This is more than 64 bits, and
+         there is no way to return more than 64 bits from the helper.
+         Hence the crude and obvious solution is to call it twice,
+         using the sign of the sz field to indicate whether it is the
+         value or rflags result we want.
+      */
+      Bool     left = toBool(gregLO3ofRM(modrm) == 2);
+      IRExpr** argsVALUE;
+      IRExpr** argsRFLAGS;
+
+      IRTemp new_value  = newTemp(Ity_I64);
+      IRTemp new_rflags = newTemp(Ity_I64);
+      IRTemp old_rflags = newTemp(Ity_I64);
+
+      assign( old_rflags, widenUto64(mk_amd64g_calculate_rflags_all()) );
+
+      argsVALUE
+         = mkIRExprVec_4( widenUto64(mkexpr(dst0)), /* thing to rotate */
+                          widenUto64(shift_expr),   /* rotate amount */
+                          mkexpr(old_rflags),
+                          mkU64(sz) );
+      assign( new_value, 
+                 mkIRExprCCall(
+                    Ity_I64, 
+                    0/*regparm*/, 
+                    left ? "amd64g_calculate_RCL" : "amd64g_calculate_RCR",
+                    left ? &amd64g_calculate_RCL  : &amd64g_calculate_RCR,
+                    argsVALUE
+                 )
+            );
+      
+      argsRFLAGS
+         = mkIRExprVec_4( widenUto64(mkexpr(dst0)), /* thing to rotate */
+                          widenUto64(shift_expr),   /* rotate amount */
+                          mkexpr(old_rflags),
+                          mkU64(-sz) );
+      assign( new_rflags, 
+                 mkIRExprCCall(
+                    Ity_I64, 
+                    0/*regparm*/, 
+                    left ? "amd64g_calculate_RCL" : "amd64g_calculate_RCR",
+                    left ? &amd64g_calculate_RCL  : &amd64g_calculate_RCR,
+                    argsRFLAGS
+                 )
+            );
+
+      assign( dst1, narrowTo(ty, mkexpr(new_value)) );
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(new_rflags) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+   }
+
+   else
+   if (isShift) {
+
+      IRTemp pre64     = newTemp(Ity_I64);
+      IRTemp res64     = newTemp(Ity_I64);
+      IRTemp res64ss   = newTemp(Ity_I64);
+      IRTemp shift_amt = newTemp(Ity_I8);
+      UChar  mask      = toUChar(sz==8 ? 63 : 31);
+      IROp   op64;
+
+      switch (gregLO3ofRM(modrm)) { 
+         case 4: op64 = Iop_Shl64; break;
+         case 5: op64 = Iop_Shr64; break;
+         case 6: op64 = Iop_Shl64; break;
+         case 7: op64 = Iop_Sar64; break;
+         /*NOTREACHED*/
+         default: vpanic("dis_Grp2:shift"); break;
+      }
+
+      /* Widen the value to be shifted to 64 bits, do the shift, and
+         narrow back down.  This seems surprisingly long-winded, but
+         unfortunately the AMD semantics requires that 8/16/32-bit
+         shifts give defined results for shift values all the way up
+         to 32, and this seems the simplest way to do it.  It has the
+         advantage that the only IR level shifts generated are of 64
+         bit values, and the shift amount is guaranteed to be in the
+         range 0 .. 63, thereby observing the IR semantics requiring
+         all shift values to be in the range 0 .. 2^word_size-1. 
+
+         Therefore the shift amount is masked with 63 for 64-bit shifts
+         and 31 for all others.
+      */
+      /* shift_amt = shift_expr & MASK, regardless of operation size */
+      assign( shift_amt, binop(Iop_And8, shift_expr, mkU8(mask)) );
+
+      /* suitably widen the value to be shifted to 64 bits. */
+      assign( pre64, op64==Iop_Sar64 ? widenSto64(mkexpr(dst0))
+                                     : widenUto64(mkexpr(dst0)) );
+
+      /* res64 = pre64 `shift` shift_amt */
+      assign( res64, binop(op64, mkexpr(pre64), mkexpr(shift_amt)) );
+
+      /* res64ss = pre64 `shift` ((shift_amt - 1) & MASK) */
+      assign( res64ss,
+              binop(op64,
+                    mkexpr(pre64), 
+                    binop(Iop_And8,
+                          binop(Iop_Sub8,
+                                mkexpr(shift_amt), mkU8(1)),
+                          mkU8(mask))) );
+
+      /* Build the flags thunk. */
+      setFlags_DEP1_DEP2_shift(op64, res64, res64ss, ty, shift_amt);
+
+      /* Narrow the result back down. */
+      assign( dst1, narrowTo(ty, mkexpr(res64)) );
+
+   } /* if (isShift) */
+
+   else 
+   if (isRotate) {
+      Int    ccOp      = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 
+                                        : (ty==Ity_I32 ? 2 : 3));
+      Bool   left      = toBool(gregLO3ofRM(modrm) == 0);
+      IRTemp rot_amt   = newTemp(Ity_I8);
+      IRTemp rot_amt64 = newTemp(Ity_I8);
+      IRTemp oldFlags  = newTemp(Ity_I64);
+      UChar  mask      = toUChar(sz==8 ? 63 : 31);
+
+      /* rot_amt = shift_expr & mask */
+      /* By masking the rotate amount thusly, the IR-level Shl/Shr
+         expressions never shift beyond the word size and thus remain
+         well defined. */
+      assign(rot_amt64, binop(Iop_And8, shift_expr, mkU8(mask)));
+
+      if (ty == Ity_I64)
+         assign(rot_amt, mkexpr(rot_amt64));
+      else
+         assign(rot_amt, binop(Iop_And8, mkexpr(rot_amt64), mkU8(8*sz-1)));
+
+      if (left) {
+
+         /* dst1 = (dst0 << rot_amt) | (dst0 >>u (wordsize-rot_amt)) */
+         assign(dst1, 
+            binop( mkSizedOp(ty,Iop_Or8),
+                   binop( mkSizedOp(ty,Iop_Shl8), 
+                          mkexpr(dst0),
+                          mkexpr(rot_amt)
+                   ),
+                   binop( mkSizedOp(ty,Iop_Shr8), 
+                          mkexpr(dst0), 
+                          binop(Iop_Sub8,mkU8(8*sz), mkexpr(rot_amt))
+                   )
+            )
+         );
+         ccOp += AMD64G_CC_OP_ROLB;
+
+      } else { /* right */
+
+         /* dst1 = (dst0 >>u rot_amt) | (dst0 << (wordsize-rot_amt)) */
+         assign(dst1, 
+            binop( mkSizedOp(ty,Iop_Or8),
+                   binop( mkSizedOp(ty,Iop_Shr8), 
+                          mkexpr(dst0),
+                          mkexpr(rot_amt)
+                   ),
+                   binop( mkSizedOp(ty,Iop_Shl8), 
+                          mkexpr(dst0), 
+                          binop(Iop_Sub8,mkU8(8*sz), mkexpr(rot_amt))
+                   )
+            )
+         );
+         ccOp += AMD64G_CC_OP_RORB;
+
+      }
+
+      /* dst1 now holds the rotated value.  Build flag thunk.  We
+         need the resulting value for this, and the previous flags.
+         Except don't set it if the rotate count is zero. */
+
+      assign(oldFlags, mk_amd64g_calculate_rflags_all());
+
+      /* rot_amt64 :: Ity_I8.  We need to convert it to I1. */
+      IRTemp rot_amt64b = newTemp(Ity_I1);
+      assign(rot_amt64b, binop(Iop_CmpNE8, mkexpr(rot_amt64), mkU8(0)) );
+
+      /* CC_DEP1 is the rotated value.  CC_NDEP is flags before. */
+      stmt( IRStmt_Put( OFFB_CC_OP,
+                        IRExpr_ITE( mkexpr(rot_amt64b),
+                                    mkU64(ccOp),
+                                    IRExpr_Get(OFFB_CC_OP,Ity_I64) ) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, 
+                        IRExpr_ITE( mkexpr(rot_amt64b),
+                                    widenUto64(mkexpr(dst1)),
+                                    IRExpr_Get(OFFB_CC_DEP1,Ity_I64) ) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, 
+                        IRExpr_ITE( mkexpr(rot_amt64b),
+                                    mkU64(0),
+                                    IRExpr_Get(OFFB_CC_DEP2,Ity_I64) ) ));
+      stmt( IRStmt_Put( OFFB_CC_NDEP, 
+                        IRExpr_ITE( mkexpr(rot_amt64b),
+                                    mkexpr(oldFlags),
+                                    IRExpr_Get(OFFB_CC_NDEP,Ity_I64) ) ));
+   } /* if (isRotate) */
+
+   /* Save result, and finish up. */
+   if (epartIsReg(modrm)) {
+      putIRegE(sz, pfx, modrm, mkexpr(dst1));
+      if (vex_traceflags & VEX_TRACE_FE) {
+         vex_printf("%s%c ",
+                    nameGrp2(gregLO3ofRM(modrm)), nameISize(sz) );
+         if (shift_expr_txt)
+            vex_printf("%s", shift_expr_txt);
+         else
+            ppIRExpr(shift_expr);
+         vex_printf(", %s\n", nameIRegE(sz,pfx,modrm));
+      }
+   } else {
+      storeLE(mkexpr(addr), mkexpr(dst1));
+      if (vex_traceflags & VEX_TRACE_FE) {
+         vex_printf("%s%c ",
+                    nameGrp2(gregLO3ofRM(modrm)), nameISize(sz) );
+         if (shift_expr_txt)
+            vex_printf("%s", shift_expr_txt);
+         else
+            ppIRExpr(shift_expr);
+         vex_printf(", %s\n", dis_buf);
+      }
+   }
+   return delta;
+}
+
+
+/* Group 8 extended opcodes (but BT/BTS/BTC/BTR only). */
+static
+ULong dis_Grp8_Imm ( const VexAbiInfo* vbi,
+                     Prefix pfx,
+                     Long delta, UChar modrm,
+                     Int am_sz, Int sz, ULong src_val,
+                     Bool* decode_OK )
+{
+   /* src_val denotes a d8.
+      And delta on entry points at the modrm byte. */
+
+   IRType ty     = szToITy(sz);
+   IRTemp t2     = newTemp(Ity_I64);
+   IRTemp t2m    = newTemp(Ity_I64);
+   IRTemp t_addr = IRTemp_INVALID;
+   HChar  dis_buf[50];
+   ULong  mask;
+
+   /* we're optimists :-) */
+   *decode_OK = True;
+
+   /* Check whether F2 or F3 are acceptable. */
+   if (epartIsReg(modrm)) {
+      /* F2 or F3 are not allowed in the register case. */
+      if (haveF2orF3(pfx)) {
+         *decode_OK = False;
+         return delta;
+     }
+   } else {
+      /* F2 or F3 (but not both) are allowable provided LOCK is also
+         present. */
+      if (haveF2orF3(pfx)) {
+         if (haveF2andF3(pfx) || !haveLOCK(pfx)) {
+            *decode_OK = False;
+            return delta;
+         }
+      }
+   }
+
+   /* Limit src_val -- the bit offset -- to something within a word.
+      The Intel docs say that literal offsets larger than a word are
+      masked in this way. */
+   switch (sz) {
+      case 2:  src_val &= 15; break;
+      case 4:  src_val &= 31; break;
+      case 8:  src_val &= 63; break;
+      default: *decode_OK = False; return delta;
+   }
+
+   /* Invent a mask suitable for the operation. */
+   switch (gregLO3ofRM(modrm)) {
+      case 4: /* BT */  mask = 0;                  break;
+      case 5: /* BTS */ mask = 1ULL << src_val;    break;
+      case 6: /* BTR */ mask = ~(1ULL << src_val); break;
+      case 7: /* BTC */ mask = 1ULL << src_val;    break;
+         /* If this needs to be extended, probably simplest to make a
+            new function to handle the other cases (0 .. 3).  The
+            Intel docs do however not indicate any use for 0 .. 3, so
+            we don't expect this to happen. */
+      default: *decode_OK = False; return delta;
+   }
+
+   /* Fetch the value to be tested and modified into t2, which is
+      64-bits wide regardless of sz. */
+   if (epartIsReg(modrm)) {
+      vassert(am_sz == 1);
+      assign( t2, widenUto64(getIRegE(sz, pfx, modrm)) );
+      delta += (am_sz + 1);
+      DIP("%s%c $0x%llx, %s\n", nameGrp8(gregLO3ofRM(modrm)), 
+                                nameISize(sz),
+                                src_val, nameIRegE(sz,pfx,modrm));
+   } else {
+      Int len;
+      t_addr = disAMode ( &len, vbi, pfx, delta, dis_buf, 1 );
+      delta  += (len+1);
+      assign( t2, widenUto64(loadLE(ty, mkexpr(t_addr))) );
+      DIP("%s%c $0x%llx, %s\n", nameGrp8(gregLO3ofRM(modrm)), 
+                                nameISize(sz),
+                                src_val, dis_buf);
+   }
+
+   /* Compute the new value into t2m, if non-BT. */
+   switch (gregLO3ofRM(modrm)) {
+      case 4: /* BT */
+         break;
+      case 5: /* BTS */
+         assign( t2m, binop(Iop_Or64, mkU64(mask), mkexpr(t2)) );
+         break;
+      case 6: /* BTR */
+         assign( t2m, binop(Iop_And64, mkU64(mask), mkexpr(t2)) );
+         break;
+      case 7: /* BTC */
+         assign( t2m, binop(Iop_Xor64, mkU64(mask), mkexpr(t2)) );
+         break;
+     default: 
+         /*NOTREACHED*/ /*the previous switch guards this*/
+         vassert(0);
+   }
+
+   /* Write the result back, if non-BT. */
+   if (gregLO3ofRM(modrm) != 4 /* BT */) {
+      if (epartIsReg(modrm)) {
+        putIRegE(sz, pfx, modrm, narrowTo(ty, mkexpr(t2m)));
+      } else {
+         if (haveLOCK(pfx)) {
+            casLE( mkexpr(t_addr),
+                   narrowTo(ty, mkexpr(t2))/*expd*/,
+                   narrowTo(ty, mkexpr(t2m))/*new*/,
+                   guest_RIP_curr_instr );
+         } else {
+            storeLE(mkexpr(t_addr), narrowTo(ty, mkexpr(t2m)));
+         }
+      }
+   }
+
+   /* Copy relevant bit from t2 into the carry flag. */
+   /* Flags: C=selected bit, O,S,Z,A,P undefined, so are set to zero. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            binop(Iop_And64,
+                  binop(Iop_Shr64, mkexpr(t2), mkU8(src_val)),
+                  mkU64(1))
+       ));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+
+   return delta;
+}
+
+
+/* Signed/unsigned widening multiply.  Generate IR to multiply the
+   value in RAX/EAX/AX/AL by the given IRTemp, and park the result in
+   RDX:RAX/EDX:EAX/DX:AX/AX.
+*/
+static void codegen_mulL_A_D ( Int sz, Bool syned, 
+                               IRTemp tmp, const HChar* tmp_txt )
+{
+   IRType ty = szToITy(sz);
+   IRTemp t1 = newTemp(ty);
+
+   assign( t1, getIRegRAX(sz) );
+
+   switch (ty) {
+      case Ity_I64: {
+         IRTemp res128  = newTemp(Ity_I128);
+         IRTemp resHi   = newTemp(Ity_I64);
+         IRTemp resLo   = newTemp(Ity_I64);
+         IROp   mulOp   = syned ? Iop_MullS64 : Iop_MullU64;
+         UInt   tBaseOp = syned ? AMD64G_CC_OP_SMULB : AMD64G_CC_OP_UMULB;
+         setFlags_MUL ( Ity_I64, t1, tmp, tBaseOp );
+         assign( res128, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
+         assign( resHi, unop(Iop_128HIto64,mkexpr(res128)));
+         assign( resLo, unop(Iop_128to64,mkexpr(res128)));
+         putIReg64(R_RDX, mkexpr(resHi));
+         putIReg64(R_RAX, mkexpr(resLo));
+         break;
+      }
+      case Ity_I32: {
+         IRTemp res64   = newTemp(Ity_I64);
+         IRTemp resHi   = newTemp(Ity_I32);
+         IRTemp resLo   = newTemp(Ity_I32);
+         IROp   mulOp   = syned ? Iop_MullS32 : Iop_MullU32;
+         UInt   tBaseOp = syned ? AMD64G_CC_OP_SMULB : AMD64G_CC_OP_UMULB;
+         setFlags_MUL ( Ity_I32, t1, tmp, tBaseOp );
+         assign( res64, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
+         assign( resHi, unop(Iop_64HIto32,mkexpr(res64)));
+         assign( resLo, unop(Iop_64to32,mkexpr(res64)));
+         putIRegRDX(4, mkexpr(resHi));
+         putIRegRAX(4, mkexpr(resLo));
+         break;
+      }
+      case Ity_I16: {
+         IRTemp res32   = newTemp(Ity_I32);
+         IRTemp resHi   = newTemp(Ity_I16);
+         IRTemp resLo   = newTemp(Ity_I16);
+         IROp   mulOp   = syned ? Iop_MullS16 : Iop_MullU16;
+         UInt   tBaseOp = syned ? AMD64G_CC_OP_SMULB : AMD64G_CC_OP_UMULB;
+         setFlags_MUL ( Ity_I16, t1, tmp, tBaseOp );
+         assign( res32, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
+         assign( resHi, unop(Iop_32HIto16,mkexpr(res32)));
+         assign( resLo, unop(Iop_32to16,mkexpr(res32)));
+         putIRegRDX(2, mkexpr(resHi));
+         putIRegRAX(2, mkexpr(resLo));
+         break;
+      }
+      case Ity_I8: {
+         IRTemp res16   = newTemp(Ity_I16);
+         IRTemp resHi   = newTemp(Ity_I8);
+         IRTemp resLo   = newTemp(Ity_I8);
+         IROp   mulOp   = syned ? Iop_MullS8 : Iop_MullU8;
+         UInt   tBaseOp = syned ? AMD64G_CC_OP_SMULB : AMD64G_CC_OP_UMULB;
+         setFlags_MUL ( Ity_I8, t1, tmp, tBaseOp );
+         assign( res16, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
+         assign( resHi, unop(Iop_16HIto8,mkexpr(res16)));
+         assign( resLo, unop(Iop_16to8,mkexpr(res16)));
+         putIRegRAX(2, mkexpr(res16));
+         break;
+      }
+      default:
+         ppIRType(ty);
+         vpanic("codegen_mulL_A_D(amd64)");
+   }
+   DIP("%s%c %s\n", syned ? "imul" : "mul", nameISize(sz), tmp_txt);
+}
+
+
+/* Group 3 extended opcodes.  We have to decide here whether F2 and F3
+   might be valid.*/
+static 
+ULong dis_Grp3 ( const VexAbiInfo* vbi, 
+                 Prefix pfx, Int sz, Long delta, Bool* decode_OK )
+{
+   Long    d64;
+   UChar   modrm;
+   HChar   dis_buf[50];
+   Int     len;
+   IRTemp  addr;
+   IRType  ty = szToITy(sz);
+   IRTemp  t1 = newTemp(ty);
+   IRTemp dst1, src, dst0;
+   *decode_OK = True;
+   modrm = getUChar(delta);
+   if (epartIsReg(modrm)) {
+      /* F2/XACQ and F3/XREL are always invalid in the non-mem case. */
+      if (haveF2orF3(pfx)) goto unhandled;
+      switch (gregLO3ofRM(modrm)) {
+         case 0: { /* TEST */
+            delta++; 
+            d64 = getSDisp(imin(4,sz), delta); 
+            delta += imin(4,sz);
+            dst1 = newTemp(ty);
+            assign(dst1, binop(mkSizedOp(ty,Iop_And8),
+                               getIRegE(sz,pfx,modrm),
+                               mkU(ty, d64 & mkSizeMask(sz))));
+            setFlags_DEP1( Iop_And8, dst1, ty );
+            DIP("test%c $%lld, %s\n", 
+                nameISize(sz), d64, 
+                nameIRegE(sz, pfx, modrm));
+            break;
+         }
+         case 1:
+            *decode_OK = False;
+            return delta;
+         case 2: /* NOT */
+            delta++;
+            putIRegE(sz, pfx, modrm,
+                              unop(mkSizedOp(ty,Iop_Not8),
+                                   getIRegE(sz, pfx, modrm)));
+            DIP("not%c %s\n", nameISize(sz), 
+                              nameIRegE(sz, pfx, modrm));
+            break;
+         case 3: /* NEG */
+            delta++;
+            dst0 = newTemp(ty);
+            src  = newTemp(ty);
+            dst1 = newTemp(ty);
+            assign(dst0, mkU(ty,0));
+            assign(src,  getIRegE(sz, pfx, modrm));
+            assign(dst1, binop(mkSizedOp(ty,Iop_Sub8), mkexpr(dst0),
+                                                       mkexpr(src)));
+            setFlags_DEP1_DEP2(Iop_Sub8, dst0, src, ty);
+            putIRegE(sz, pfx, modrm, mkexpr(dst1));
+            DIP("neg%c %s\n", nameISize(sz), nameIRegE(sz, pfx, modrm));
+            break;
+         case 4: /* MUL (unsigned widening) */
+            delta++;
+            src = newTemp(ty);
+            assign(src, getIRegE(sz,pfx,modrm));
+            codegen_mulL_A_D ( sz, False, src,
+                               nameIRegE(sz,pfx,modrm) );
+            break;
+         case 5: /* IMUL (signed widening) */
+            delta++;
+            src = newTemp(ty);
+            assign(src, getIRegE(sz,pfx,modrm));
+            codegen_mulL_A_D ( sz, True, src,
+                               nameIRegE(sz,pfx,modrm) );
+            break;
+         case 6: /* DIV */
+            delta++;
+            assign( t1, getIRegE(sz, pfx, modrm) );
+            codegen_div ( sz, t1, False );
+            DIP("div%c %s\n", nameISize(sz), 
+                              nameIRegE(sz, pfx, modrm));
+            break;
+         case 7: /* IDIV */
+            delta++;
+            assign( t1, getIRegE(sz, pfx, modrm) );
+            codegen_div ( sz, t1, True );
+            DIP("idiv%c %s\n", nameISize(sz), 
+                               nameIRegE(sz, pfx, modrm));
+            break;
+         default: 
+            /*NOTREACHED*/
+            vpanic("Grp3(amd64,R)");
+      }
+   } else {
+      /* Decide if F2/XACQ or F3/XREL might be valid. */
+      Bool validF2orF3 = haveF2orF3(pfx) ? False : True;
+      if ((gregLO3ofRM(modrm) == 3/*NEG*/ || gregLO3ofRM(modrm) == 2/*NOT*/)
+          && haveF2orF3(pfx) && !haveF2andF3(pfx) && haveLOCK(pfx)) {
+         validF2orF3 = True;
+      }
+      if (!validF2orF3) goto unhandled;
+      /* */
+      addr = disAMode ( &len, vbi, pfx, delta, dis_buf,
+                        /* we have to inform disAMode of any immediate
+                           bytes used */
+                        gregLO3ofRM(modrm)==0/*TEST*/
+                           ? imin(4,sz)
+                           : 0
+                      );
+      t1   = newTemp(ty);
+      delta += len;
+      assign(t1, loadLE(ty,mkexpr(addr)));
+      switch (gregLO3ofRM(modrm)) {
+         case 0: { /* TEST */
+            d64 = getSDisp(imin(4,sz), delta); 
+            delta += imin(4,sz);
+            dst1 = newTemp(ty);
+            assign(dst1, binop(mkSizedOp(ty,Iop_And8),
+                               mkexpr(t1), 
+                               mkU(ty, d64 & mkSizeMask(sz))));
+            setFlags_DEP1( Iop_And8, dst1, ty );
+            DIP("test%c $%lld, %s\n", nameISize(sz), d64, dis_buf);
+            break;
+         }
+         case 1:
+            *decode_OK = False;
+            return delta;
+         case 2: /* NOT */
+            dst1 = newTemp(ty);
+            assign(dst1, unop(mkSizedOp(ty,Iop_Not8), mkexpr(t1)));
+            if (haveLOCK(pfx)) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(dst1)/*new*/,
+                                    guest_RIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(dst1) );
+            }
+            DIP("not%c %s\n", nameISize(sz), dis_buf);
+            break;
+         case 3: /* NEG */
+            dst0 = newTemp(ty);
+            src  = newTemp(ty);
+            dst1 = newTemp(ty);
+            assign(dst0, mkU(ty,0));
+            assign(src,  mkexpr(t1));
+            assign(dst1, binop(mkSizedOp(ty,Iop_Sub8), mkexpr(dst0),
+                                                       mkexpr(src)));
+            if (haveLOCK(pfx)) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(dst1)/*new*/,
+                                    guest_RIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(dst1) );
+            }
+            setFlags_DEP1_DEP2(Iop_Sub8, dst0, src, ty);
+            DIP("neg%c %s\n", nameISize(sz), dis_buf);
+            break;
+         case 4: /* MUL (unsigned widening) */
+            codegen_mulL_A_D ( sz, False, t1, dis_buf );
+            break;
+         case 5: /* IMUL */
+            codegen_mulL_A_D ( sz, True, t1, dis_buf );
+            break;
+         case 6: /* DIV */
+            codegen_div ( sz, t1, False );
+            DIP("div%c %s\n", nameISize(sz), dis_buf);
+            break;
+         case 7: /* IDIV */
+            codegen_div ( sz, t1, True );
+            DIP("idiv%c %s\n", nameISize(sz), dis_buf);
+            break;
+         default: 
+            /*NOTREACHED*/
+            vpanic("Grp3(amd64,M)");
+      }
+   }
+   return delta;
+  unhandled:
+   *decode_OK = False;
+   return delta;
+}
+
+
+/* Group 4 extended opcodes.  We have to decide here whether F2 and F3
+   might be valid. */
+static
+ULong dis_Grp4 ( const VexAbiInfo* vbi,
+                 Prefix pfx, Long delta, Bool* decode_OK )
+{
+   Int   alen;
+   UChar modrm;
+   HChar dis_buf[50];
+   IRType ty = Ity_I8;
+   IRTemp t1 = newTemp(ty);
+   IRTemp t2 = newTemp(ty);
+
+   *decode_OK = True;
+
+   modrm = getUChar(delta);
+   if (epartIsReg(modrm)) {
+      /* F2/XACQ and F3/XREL are always invalid in the non-mem case. */
+      if (haveF2orF3(pfx)) goto unhandled;
+      assign(t1, getIRegE(1, pfx, modrm));
+      switch (gregLO3ofRM(modrm)) {
+         case 0: /* INC */
+            assign(t2, binop(Iop_Add8, mkexpr(t1), mkU8(1)));
+            putIRegE(1, pfx, modrm, mkexpr(t2));
+            setFlags_INC_DEC( True, t2, ty );
+            break;
+         case 1: /* DEC */
+            assign(t2, binop(Iop_Sub8, mkexpr(t1), mkU8(1)));
+            putIRegE(1, pfx, modrm, mkexpr(t2));
+            setFlags_INC_DEC( False, t2, ty );
+            break;
+         default: 
+            *decode_OK = False;
+            return delta;
+      }
+      delta++;
+      DIP("%sb %s\n", nameGrp4(gregLO3ofRM(modrm)),
+                      nameIRegE(1, pfx, modrm));
+   } else {
+      /* Decide if F2/XACQ or F3/XREL might be valid. */
+      Bool validF2orF3 = haveF2orF3(pfx) ? False : True;
+      if ((gregLO3ofRM(modrm) == 0/*INC*/ || gregLO3ofRM(modrm) == 1/*DEC*/)
+          && haveF2orF3(pfx) && !haveF2andF3(pfx) && haveLOCK(pfx)) {
+         validF2orF3 = True;
+      }
+      if (!validF2orF3) goto unhandled;
+      /* */
+      IRTemp addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( t1, loadLE(ty, mkexpr(addr)) );
+      switch (gregLO3ofRM(modrm)) {
+         case 0: /* INC */
+            assign(t2, binop(Iop_Add8, mkexpr(t1), mkU8(1)));
+            if (haveLOCK(pfx)) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(t2)/*new*/, 
+                      guest_RIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(t2) );
+            }
+            setFlags_INC_DEC( True, t2, ty );
+            break;
+         case 1: /* DEC */
+            assign(t2, binop(Iop_Sub8, mkexpr(t1), mkU8(1)));
+            if (haveLOCK(pfx)) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(t2)/*new*/, 
+                      guest_RIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(t2) );
+            }
+            setFlags_INC_DEC( False, t2, ty );
+            break;
+         default: 
+            *decode_OK = False;
+            return delta;
+      }
+      delta += alen;
+      DIP("%sb %s\n", nameGrp4(gregLO3ofRM(modrm)), dis_buf);
+   }
+   return delta;
+  unhandled:
+   *decode_OK = False;
+   return delta;
+}
+
+
+/* Group 5 extended opcodes.  We have to decide here whether F2 and F3
+   might be valid. */
+static
+ULong dis_Grp5 ( const VexAbiInfo* vbi,
+                 Prefix pfx, Int sz, Long delta,
+                 /*MOD*/DisResult* dres, /*OUT*/Bool* decode_OK )
+{
+   Int     len;
+   UChar   modrm;
+   HChar   dis_buf[50];
+   IRTemp  addr = IRTemp_INVALID;
+   IRType  ty = szToITy(sz);
+   IRTemp  t1 = newTemp(ty);
+   IRTemp  t2 = IRTemp_INVALID;
+   IRTemp  t3 = IRTemp_INVALID;
+   Bool    showSz = True;
+
+   *decode_OK = True;
+
+   modrm = getUChar(delta);
+   if (epartIsReg(modrm)) {
+      /* F2/XACQ and F3/XREL are always invalid in the non-mem case.
+         F2/CALL and F2/JMP may have bnd prefix. */
+     if (haveF2orF3(pfx)
+         && ! (haveF2(pfx)
+               && (gregLO3ofRM(modrm) == 2 || gregLO3ofRM(modrm) == 4)))
+        goto unhandledR;
+      assign(t1, getIRegE(sz,pfx,modrm));
+      switch (gregLO3ofRM(modrm)) {
+         case 0: /* INC */
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Add8),
+                             mkexpr(t1), mkU(ty,1)));
+            setFlags_INC_DEC( True, t2, ty );
+            putIRegE(sz,pfx,modrm, mkexpr(t2));
+            break;
+         case 1: /* DEC */
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Sub8),
+                             mkexpr(t1), mkU(ty,1)));
+            setFlags_INC_DEC( False, t2, ty );
+            putIRegE(sz,pfx,modrm, mkexpr(t2));
+            break;
+         case 2: /* call Ev */
+            /* Ignore any sz value and operate as if sz==8. */
+            if (!(sz == 4 || sz == 8)) goto unhandledR;
+            if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+            sz = 8;
+            t3 = newTemp(Ity_I64);
+            assign(t3, getIRegE(sz,pfx,modrm));
+            t2 = newTemp(Ity_I64);
+            assign(t2, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(8)));
+            putIReg64(R_RSP, mkexpr(t2));
+            storeLE( mkexpr(t2), mkU64(guest_RIP_bbstart+delta+1));
+            make_redzone_AbiHint(vbi, t2, t3/*nia*/, "call-Ev(reg)");
+            jmp_treg(dres, Ijk_Call, t3);
+            vassert(dres->whatNext == Dis_StopHere);
+            showSz = False;
+            break;
+         case 4: /* jmp Ev */
+            /* Ignore any sz value and operate as if sz==8. */
+            if (!(sz == 4 || sz == 8)) goto unhandledR;
+            if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+            sz = 8;
+            t3 = newTemp(Ity_I64);
+            assign(t3, getIRegE(sz,pfx,modrm));
+            jmp_treg(dres, Ijk_Boring, t3);
+            vassert(dres->whatNext == Dis_StopHere);
+            showSz = False;
+            break;
+         case 6: /* PUSH Ev */
+            /* There is no encoding for 32-bit operand size; hence ... */
+            if (sz == 4) sz = 8;
+            if (sz == 8 || sz == 2) {
+               ty = szToITy(sz); /* redo it, since sz might have changed */
+               t3 = newTemp(ty);
+               assign(t3, getIRegE(sz,pfx,modrm));
+               t2 = newTemp(Ity_I64);
+               assign( t2, binop(Iop_Sub64,getIReg64(R_RSP),mkU64(sz)) );
+               putIReg64(R_RSP, mkexpr(t2) );
+               storeLE( mkexpr(t2), mkexpr(t3) );
+               break;
+            } else {
+               goto unhandledR; /* awaiting test case */
+            }
+         default:
+         unhandledR:
+            *decode_OK = False;
+            return delta;
+      }
+      delta++;
+      DIP("%s%c %s\n", nameGrp5(gregLO3ofRM(modrm)),
+                       showSz ? nameISize(sz) : ' ', 
+                       nameIRegE(sz, pfx, modrm));
+   } else {
+      /* Decide if F2/XACQ, F3/XREL, F2/CALL or F2/JMP might be valid. */
+      Bool validF2orF3 = haveF2orF3(pfx) ? False : True;
+      if ((gregLO3ofRM(modrm) == 0/*INC*/ || gregLO3ofRM(modrm) == 1/*DEC*/)
+          && haveF2orF3(pfx) && !haveF2andF3(pfx) && haveLOCK(pfx)) {
+         validF2orF3 = True;
+      } else if ((gregLO3ofRM(modrm) == 2 || gregLO3ofRM(modrm) == 4)
+                 && (haveF2(pfx) && !haveF3(pfx))) {
+         validF2orF3 = True;
+      }
+      if (!validF2orF3) goto unhandledM;
+      /* */
+      addr = disAMode ( &len, vbi, pfx, delta, dis_buf, 0 );
+      if (gregLO3ofRM(modrm) != 2 && gregLO3ofRM(modrm) != 4
+                                  && gregLO3ofRM(modrm) != 6) {
+         assign(t1, loadLE(ty,mkexpr(addr)));
+      }
+      switch (gregLO3ofRM(modrm)) {
+         case 0: /* INC */ 
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Add8),
+                             mkexpr(t1), mkU(ty,1)));
+            if (haveLOCK(pfx)) {
+               casLE( mkexpr(addr),
+                      mkexpr(t1), mkexpr(t2), guest_RIP_curr_instr );
+            } else {
+               storeLE(mkexpr(addr),mkexpr(t2));
+            }
+            setFlags_INC_DEC( True, t2, ty );
+            break;
+         case 1: /* DEC */ 
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Sub8),
+                             mkexpr(t1), mkU(ty,1)));
+            if (haveLOCK(pfx)) {
+               casLE( mkexpr(addr),
+                      mkexpr(t1), mkexpr(t2), guest_RIP_curr_instr );
+            } else {
+               storeLE(mkexpr(addr),mkexpr(t2));
+            }
+            setFlags_INC_DEC( False, t2, ty );
+            break;
+         case 2: /* call Ev */
+            /* Ignore any sz value and operate as if sz==8. */
+            if (!(sz == 4 || sz == 8)) goto unhandledM;
+            if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+            sz = 8;
+            t3 = newTemp(Ity_I64);
+            assign(t3, loadLE(Ity_I64,mkexpr(addr)));
+            t2 = newTemp(Ity_I64);
+            assign(t2, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(8)));
+            putIReg64(R_RSP, mkexpr(t2));
+            storeLE( mkexpr(t2), mkU64(guest_RIP_bbstart+delta+len));
+            make_redzone_AbiHint(vbi, t2, t3/*nia*/, "call-Ev(mem)");
+            jmp_treg(dres, Ijk_Call, t3);
+            vassert(dres->whatNext == Dis_StopHere);
+            showSz = False;
+            break;
+         case 4: /* JMP Ev */
+            /* Ignore any sz value and operate as if sz==8. */
+            if (!(sz == 4 || sz == 8)) goto unhandledM;
+            if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+            sz = 8;
+            t3 = newTemp(Ity_I64);
+            assign(t3, loadLE(Ity_I64,mkexpr(addr)));
+            jmp_treg(dres, Ijk_Boring, t3);
+            vassert(dres->whatNext == Dis_StopHere);
+            showSz = False;
+            break;
+         case 6: /* PUSH Ev */
+            /* There is no encoding for 32-bit operand size; hence ... */
+            if (sz == 4) sz = 8;
+            if (sz == 8 || sz == 2) {
+               ty = szToITy(sz); /* redo it, since sz might have changed */
+               t3 = newTemp(ty);
+               assign(t3, loadLE(ty,mkexpr(addr)));
+               t2 = newTemp(Ity_I64);
+               assign( t2, binop(Iop_Sub64,getIReg64(R_RSP),mkU64(sz)) );
+               putIReg64(R_RSP, mkexpr(t2) );
+               storeLE( mkexpr(t2), mkexpr(t3) );
+               break;
+            } else {
+               goto unhandledM; /* awaiting test case */
+            }
+         default: 
+         unhandledM:
+            *decode_OK = False;
+            return delta;
+      }
+      delta += len;
+      DIP("%s%c %s\n", nameGrp5(gregLO3ofRM(modrm)),
+                       showSz ? nameISize(sz) : ' ', 
+                       dis_buf);
+   }
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassembling string ops (including REP prefixes)    ---*/
+/*------------------------------------------------------------*/
+
+/* Code shared by all the string ops */
+static
+void dis_string_op_increment ( Int sz, IRTemp t_inc )
+{
+   UChar logSz;
+   if (sz == 8 || sz == 4 || sz == 2) {
+      logSz = 1;
+      if (sz == 4) logSz = 2;
+      if (sz == 8) logSz = 3;
+      assign( t_inc, 
+              binop(Iop_Shl64, IRExpr_Get( OFFB_DFLAG, Ity_I64 ),
+                               mkU8(logSz) ) );
+   } else {
+      assign( t_inc, 
+              IRExpr_Get( OFFB_DFLAG, Ity_I64 ) );
+   }
+}
+
+static
+void dis_string_op( void (*dis_OP)( Int, IRTemp, Prefix pfx ),
+                    Int sz, const HChar* name, Prefix pfx )
+{
+   IRTemp t_inc = newTemp(Ity_I64);
+   /* Really we ought to inspect the override prefixes, but we don't.
+      The following assertion catches any resulting sillyness. */
+   vassert(pfx == clearSegBits(pfx));
+   dis_string_op_increment(sz, t_inc);
+   dis_OP( sz, t_inc, pfx );
+   DIP("%s%c\n", name, nameISize(sz));
+}
+
+static 
+void dis_MOVS ( Int sz, IRTemp t_inc, Prefix pfx )
+{
+   IRType ty = szToITy(sz);
+   IRTemp td = newTemp(Ity_I64);   /* RDI */
+   IRTemp ts = newTemp(Ity_I64);   /* RSI */
+   IRExpr *incd, *incs;
+
+   if (haveASO(pfx)) {
+      assign( td, unop(Iop_32Uto64, getIReg32(R_RDI)) );
+      assign( ts, unop(Iop_32Uto64, getIReg32(R_RSI)) );
+   } else {
+      assign( td, getIReg64(R_RDI) );
+      assign( ts, getIReg64(R_RSI) );
+   }
+
+   storeLE( mkexpr(td), loadLE(ty,mkexpr(ts)) );
+
+   incd = binop(Iop_Add64, mkexpr(td), mkexpr(t_inc));
+   incs = binop(Iop_Add64, mkexpr(ts), mkexpr(t_inc));
+   if (haveASO(pfx)) {
+      incd = unop(Iop_32Uto64, unop(Iop_64to32, incd));
+      incs = unop(Iop_32Uto64, unop(Iop_64to32, incs));
+   }
+   putIReg64( R_RDI, incd );
+   putIReg64( R_RSI, incs );
+}
+
+static 
+void dis_LODS ( Int sz, IRTemp t_inc, Prefix pfx )
+{
+   IRType ty = szToITy(sz);
+   IRTemp ts = newTemp(Ity_I64);   /* RSI */
+   IRExpr *incs;
+
+   if (haveASO(pfx))
+      assign( ts, unop(Iop_32Uto64, getIReg32(R_RSI)) );
+   else
+      assign( ts, getIReg64(R_RSI) );
+
+   putIRegRAX ( sz, loadLE(ty, mkexpr(ts)) );
+
+   incs = binop(Iop_Add64, mkexpr(ts), mkexpr(t_inc));
+   if (haveASO(pfx))
+      incs = unop(Iop_32Uto64, unop(Iop_64to32, incs));
+   putIReg64( R_RSI, incs );
+}
+
+static 
+void dis_STOS ( Int sz, IRTemp t_inc, Prefix pfx )
+{
+   IRType ty = szToITy(sz);
+   IRTemp ta = newTemp(ty);        /* rAX */
+   IRTemp td = newTemp(Ity_I64);   /* RDI */
+   IRExpr *incd;
+
+   assign( ta, getIRegRAX(sz) );
+
+   if (haveASO(pfx))
+      assign( td, unop(Iop_32Uto64, getIReg32(R_RDI)) );
+   else
+      assign( td, getIReg64(R_RDI) );
+
+   storeLE( mkexpr(td), mkexpr(ta) );
+
+   incd = binop(Iop_Add64, mkexpr(td), mkexpr(t_inc));
+   if (haveASO(pfx))
+      incd = unop(Iop_32Uto64, unop(Iop_64to32, incd));
+   putIReg64( R_RDI, incd );
+}
+
+static 
+void dis_CMPS ( Int sz, IRTemp t_inc, Prefix pfx )
+{
+   IRType ty  = szToITy(sz);
+   IRTemp tdv = newTemp(ty);      /* (RDI) */
+   IRTemp tsv = newTemp(ty);      /* (RSI) */
+   IRTemp td  = newTemp(Ity_I64); /*  RDI  */
+   IRTemp ts  = newTemp(Ity_I64); /*  RSI  */
+   IRExpr *incd, *incs;
+
+   if (haveASO(pfx)) {
+      assign( td, unop(Iop_32Uto64, getIReg32(R_RDI)) );
+      assign( ts, unop(Iop_32Uto64, getIReg32(R_RSI)) );
+   } else {
+      assign( td, getIReg64(R_RDI) );
+      assign( ts, getIReg64(R_RSI) );
+   }
+
+   assign( tdv, loadLE(ty,mkexpr(td)) );
+
+   assign( tsv, loadLE(ty,mkexpr(ts)) );
+
+   setFlags_DEP1_DEP2 ( Iop_Sub8, tsv, tdv, ty );
+
+   incd = binop(Iop_Add64, mkexpr(td), mkexpr(t_inc));
+   incs = binop(Iop_Add64, mkexpr(ts), mkexpr(t_inc));
+   if (haveASO(pfx)) {
+      incd = unop(Iop_32Uto64, unop(Iop_64to32, incd));
+      incs = unop(Iop_32Uto64, unop(Iop_64to32, incs));
+   }
+   putIReg64( R_RDI, incd );
+   putIReg64( R_RSI, incs );
+}
+
+static 
+void dis_SCAS ( Int sz, IRTemp t_inc, Prefix pfx )
+{
+   IRType ty  = szToITy(sz);
+   IRTemp ta  = newTemp(ty);       /*  rAX  */
+   IRTemp td  = newTemp(Ity_I64);  /*  RDI  */
+   IRTemp tdv = newTemp(ty);       /* (RDI) */
+   IRExpr *incd;
+
+   assign( ta, getIRegRAX(sz) );
+
+   if (haveASO(pfx))
+      assign( td, unop(Iop_32Uto64, getIReg32(R_RDI)) );
+   else
+      assign( td, getIReg64(R_RDI) );
+
+   assign( tdv, loadLE(ty,mkexpr(td)) );
+
+   setFlags_DEP1_DEP2 ( Iop_Sub8, ta, tdv, ty );
+
+   incd = binop(Iop_Add64, mkexpr(td), mkexpr(t_inc));
+   if (haveASO(pfx))
+      incd = unop(Iop_32Uto64, unop(Iop_64to32, incd));
+   putIReg64( R_RDI, incd );
+}
+
+
+/* Wrap the appropriate string op inside a REP/REPE/REPNE.  We assume
+   the insn is the last one in the basic block, and so emit a jump to
+   the next insn, rather than just falling through. */
+static 
+void dis_REP_op ( /*MOD*/DisResult* dres,
+                  AMD64Condcode cond,
+                  void (*dis_OP)(Int, IRTemp, Prefix),
+                  Int sz, Addr64 rip, Addr64 rip_next, const HChar* name,
+                  Prefix pfx )
+{
+   IRTemp t_inc = newTemp(Ity_I64);
+   IRTemp tc;
+   IRExpr* cmp;
+
+   /* Really we ought to inspect the override prefixes, but we don't.
+      The following assertion catches any resulting sillyness. */
+   vassert(pfx == clearSegBits(pfx));
+
+   if (haveASO(pfx)) {
+      tc = newTemp(Ity_I32);  /*  ECX  */
+      assign( tc, getIReg32(R_RCX) );
+      cmp = binop(Iop_CmpEQ32, mkexpr(tc), mkU32(0));
+   } else {
+      tc = newTemp(Ity_I64);  /*  RCX  */
+      assign( tc, getIReg64(R_RCX) );
+      cmp = binop(Iop_CmpEQ64, mkexpr(tc), mkU64(0));
+   }
+
+   stmt( IRStmt_Exit( cmp, Ijk_Boring,
+                      IRConst_U64(rip_next), OFFB_RIP ) );
+
+   if (haveASO(pfx))
+      putIReg32(R_RCX, binop(Iop_Sub32, mkexpr(tc), mkU32(1)) );
+  else
+      putIReg64(R_RCX, binop(Iop_Sub64, mkexpr(tc), mkU64(1)) );
+
+   dis_string_op_increment(sz, t_inc);
+   dis_OP (sz, t_inc, pfx);
+
+   if (cond == AMD64CondAlways) {
+      jmp_lit(dres, Ijk_Boring, rip);
+      vassert(dres->whatNext == Dis_StopHere);
+   } else {
+      stmt( IRStmt_Exit( mk_amd64g_calculate_condition(cond),
+                         Ijk_Boring,
+                         IRConst_U64(rip),
+                         OFFB_RIP ) );
+      jmp_lit(dres, Ijk_Boring, rip_next);
+      vassert(dres->whatNext == Dis_StopHere);
+   }
+   DIP("%s%c\n", name, nameISize(sz));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Arithmetic, etc.                                     ---*/
+/*------------------------------------------------------------*/
+
+/* IMUL E, G.  Supplied eip points to the modR/M byte. */
+static
+ULong dis_mul_E_G ( const VexAbiInfo* vbi,
+                    Prefix      pfx,
+                    Int         size, 
+                    Long        delta0 )
+{
+   Int    alen;
+   HChar  dis_buf[50];
+   UChar  rm = getUChar(delta0);
+   IRType ty = szToITy(size);
+   IRTemp te = newTemp(ty);
+   IRTemp tg = newTemp(ty);
+   IRTemp resLo = newTemp(ty);
+
+   assign( tg, getIRegG(size, pfx, rm) );
+   if (epartIsReg(rm)) {
+      assign( te, getIRegE(size, pfx, rm) );
+   } else {
+      IRTemp addr = disAMode( &alen, vbi, pfx, delta0, dis_buf, 0 );
+      assign( te, loadLE(ty,mkexpr(addr)) );
+   }
+
+   setFlags_MUL ( ty, te, tg, AMD64G_CC_OP_SMULB );
+
+   assign( resLo, binop( mkSizedOp(ty, Iop_Mul8), mkexpr(te), mkexpr(tg) ) );
+
+   putIRegG(size, pfx, rm, mkexpr(resLo) );
+
+   if (epartIsReg(rm)) {
+      DIP("imul%c %s, %s\n", nameISize(size), 
+                             nameIRegE(size,pfx,rm),
+                             nameIRegG(size,pfx,rm));
+      return 1+delta0;
+   } else {
+      DIP("imul%c %s, %s\n", nameISize(size), 
+                             dis_buf, 
+                             nameIRegG(size,pfx,rm));
+      return alen+delta0;
+   }
+}
+
+
+/* IMUL I * E -> G.  Supplied rip points to the modR/M byte. */
+static
+ULong dis_imul_I_E_G ( const VexAbiInfo* vbi,
+                       Prefix      pfx,
+                       Int         size, 
+                       Long        delta,
+                       Int         litsize )
+{
+   Long   d64;
+   Int    alen;
+   HChar  dis_buf[50];
+   UChar  rm = getUChar(delta);
+   IRType ty = szToITy(size);
+   IRTemp te = newTemp(ty);
+   IRTemp tl = newTemp(ty);
+   IRTemp resLo = newTemp(ty);
+
+   vassert(/*size == 1 ||*/ size == 2 || size == 4 || size == 8);
+
+   if (epartIsReg(rm)) {
+      assign(te, getIRegE(size, pfx, rm));
+      delta++;
+   } else {
+      IRTemp addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                                     imin(4,litsize) );
+      assign(te, loadLE(ty, mkexpr(addr)));
+      delta += alen;
+   }
+   d64 = getSDisp(imin(4,litsize),delta);
+   delta += imin(4,litsize);
+
+   d64 &= mkSizeMask(size);
+   assign(tl, mkU(ty,d64));
+
+   assign( resLo, binop( mkSizedOp(ty, Iop_Mul8), mkexpr(te), mkexpr(tl) ));
+
+   setFlags_MUL ( ty, te, tl, AMD64G_CC_OP_SMULB );
+
+   putIRegG(size, pfx, rm, mkexpr(resLo));
+
+   DIP("imul%c $%lld, %s, %s\n", 
+       nameISize(size), d64, 
+       ( epartIsReg(rm) ? nameIRegE(size,pfx,rm) : dis_buf ),
+       nameIRegG(size,pfx,rm) );
+   return delta;
+}
+
+
+/* Generate an IR sequence to do a popcount operation on the supplied
+   IRTemp, and return a new IRTemp holding the result.  'ty' may be
+   Ity_I16, Ity_I32 or Ity_I64 only. */
+static IRTemp gen_POPCOUNT ( IRType ty, IRTemp src )
+{
+   Int i;
+   if (ty == Ity_I16) {
+      IRTemp old = IRTemp_INVALID;
+      IRTemp nyu = IRTemp_INVALID;
+      IRTemp mask[4], shift[4];
+      for (i = 0; i < 4; i++) {
+         mask[i]  = newTemp(ty);
+         shift[i] = 1 << i;
+      }
+      assign(mask[0], mkU16(0x5555));
+      assign(mask[1], mkU16(0x3333));
+      assign(mask[2], mkU16(0x0F0F));
+      assign(mask[3], mkU16(0x00FF));
+      old = src;
+      for (i = 0; i < 4; i++) {
+         nyu = newTemp(ty);
+         assign(nyu,
+                binop(Iop_Add16, 
+                      binop(Iop_And16,
+                            mkexpr(old),
+                            mkexpr(mask[i])),
+                      binop(Iop_And16,
+                            binop(Iop_Shr16, mkexpr(old), mkU8(shift[i])),
+                            mkexpr(mask[i]))));
+         old = nyu;
+      }
+      return nyu;
+   }
+   if (ty == Ity_I32) {
+      IRTemp old = IRTemp_INVALID;
+      IRTemp nyu = IRTemp_INVALID;
+      IRTemp mask[5], shift[5];
+      for (i = 0; i < 5; i++) {
+         mask[i]  = newTemp(ty);
+         shift[i] = 1 << i;
+      }
+      assign(mask[0], mkU32(0x55555555));
+      assign(mask[1], mkU32(0x33333333));
+      assign(mask[2], mkU32(0x0F0F0F0F));
+      assign(mask[3], mkU32(0x00FF00FF));
+      assign(mask[4], mkU32(0x0000FFFF));
+      old = src;
+      for (i = 0; i < 5; i++) {
+         nyu = newTemp(ty);
+         assign(nyu,
+                binop(Iop_Add32, 
+                      binop(Iop_And32,
+                            mkexpr(old),
+                            mkexpr(mask[i])),
+                      binop(Iop_And32,
+                            binop(Iop_Shr32, mkexpr(old), mkU8(shift[i])),
+                            mkexpr(mask[i]))));
+         old = nyu;
+      }
+      return nyu;
+   }
+   if (ty == Ity_I64) {
+      IRTemp old = IRTemp_INVALID;
+      IRTemp nyu = IRTemp_INVALID;
+      IRTemp mask[6], shift[6];
+      for (i = 0; i < 6; i++) {
+         mask[i]  = newTemp(ty);
+         shift[i] = 1 << i;
+      }
+      assign(mask[0], mkU64(0x5555555555555555ULL));
+      assign(mask[1], mkU64(0x3333333333333333ULL));
+      assign(mask[2], mkU64(0x0F0F0F0F0F0F0F0FULL));
+      assign(mask[3], mkU64(0x00FF00FF00FF00FFULL));
+      assign(mask[4], mkU64(0x0000FFFF0000FFFFULL));
+      assign(mask[5], mkU64(0x00000000FFFFFFFFULL));
+      old = src;
+      for (i = 0; i < 6; i++) {
+         nyu = newTemp(ty);
+         assign(nyu,
+                binop(Iop_Add64, 
+                      binop(Iop_And64,
+                            mkexpr(old),
+                            mkexpr(mask[i])),
+                      binop(Iop_And64,
+                            binop(Iop_Shr64, mkexpr(old), mkU8(shift[i])),
+                            mkexpr(mask[i]))));
+         old = nyu;
+      }
+      return nyu;
+   }
+   /*NOTREACHED*/
+   vassert(0);
+}
+
+
+/* Generate an IR sequence to do a count-leading-zeroes operation on
+   the supplied IRTemp, and return a new IRTemp holding the result.
+   'ty' may be Ity_I16, Ity_I32 or Ity_I64 only.  In the case where
+   the argument is zero, return the number of bits in the word (the
+   natural semantics). */
+static IRTemp gen_LZCNT ( IRType ty, IRTemp src )
+{
+   vassert(ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16);
+
+   IRTemp src64 = newTemp(Ity_I64);
+   assign(src64, widenUto64( mkexpr(src) ));
+
+   IRTemp src64x = newTemp(Ity_I64);
+   assign(src64x, 
+          binop(Iop_Shl64, mkexpr(src64),
+                           mkU8(64 - 8 * sizeofIRType(ty))));
+
+   // Clz64 has undefined semantics when its input is zero, so
+   // special-case around that.
+   IRTemp res64 = newTemp(Ity_I64);
+   assign(res64,
+          IRExpr_ITE(
+             binop(Iop_CmpEQ64, mkexpr(src64x), mkU64(0)),
+             mkU64(8 * sizeofIRType(ty)),
+             unop(Iop_Clz64, mkexpr(src64x))
+   ));
+
+   IRTemp res = newTemp(ty);
+   assign(res, narrowTo(ty, mkexpr(res64)));
+   return res;
+}
+
+
+/* Generate an IR sequence to do a count-trailing-zeroes operation on
+   the supplied IRTemp, and return a new IRTemp holding the result.
+   'ty' may be Ity_I16, Ity_I32 or Ity_I64 only.  In the case where
+   the argument is zero, return the number of bits in the word (the
+   natural semantics). */
+static IRTemp gen_TZCNT ( IRType ty, IRTemp src )
+{
+   vassert(ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16);
+
+   IRTemp src64 = newTemp(Ity_I64);
+   assign(src64, widenUto64( mkexpr(src) ));
+
+   // Ctz64 has undefined semantics when its input is zero, so
+   // special-case around that.
+   IRTemp res64 = newTemp(Ity_I64);
+   assign(res64,
+          IRExpr_ITE(
+             binop(Iop_CmpEQ64, mkexpr(src64), mkU64(0)),
+             mkU64(8 * sizeofIRType(ty)),
+             unop(Iop_Ctz64, mkexpr(src64))
+   ));
+
+   IRTemp res = newTemp(ty);
+   assign(res, narrowTo(ty, mkexpr(res64)));
+   return res;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- x87 FLOATING POINT INSTRUCTIONS                      ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/* --- Helper functions for dealing with the register stack. --- */
+
+/* --- Set the emulation-warning pseudo-register. --- */
+
+static void put_emwarn ( IRExpr* e /* :: Ity_I32 */ )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   stmt( IRStmt_Put( OFFB_EMNOTE, e ) );
+}
+
+/* --- Produce an IRExpr* denoting a 64-bit QNaN. --- */
+
+static IRExpr* mkQNaN64 ( void )
+{
+  /* QNaN is 0 2047 1 0(51times) 
+     == 0b 11111111111b 1 0(51times)
+     == 0x7FF8 0000 0000 0000
+   */
+   return IRExpr_Const(IRConst_F64i(0x7FF8000000000000ULL));
+}
+
+/* --------- Get/put the top-of-stack pointer :: Ity_I32 --------- */
+
+static IRExpr* get_ftop ( void )
+{
+   return IRExpr_Get( OFFB_FTOP, Ity_I32 );
+}
+
+static void put_ftop ( IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   stmt( IRStmt_Put( OFFB_FTOP, e ) );
+}
+
+/* --------- Get/put the C3210 bits. --------- */
+
+static IRExpr*  /* :: Ity_I64 */ get_C3210 ( void )
+{
+   return IRExpr_Get( OFFB_FC3210, Ity_I64 );
+}
+
+static void put_C3210 ( IRExpr* e  /* :: Ity_I64 */ )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64);
+   stmt( IRStmt_Put( OFFB_FC3210, e ) );
+}
+
+/* --------- Get/put the FPU rounding mode. --------- */
+static IRExpr* /* :: Ity_I32 */ get_fpround ( void )
+{
+   return unop(Iop_64to32, IRExpr_Get( OFFB_FPROUND, Ity_I64 ));
+}
+
+static void put_fpround ( IRExpr* /* :: Ity_I32 */ e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   stmt( IRStmt_Put( OFFB_FPROUND, unop(Iop_32Uto64,e) ) );
+}
+
+
+/* --------- Synthesise a 2-bit FPU rounding mode. --------- */
+/* Produces a value in 0 .. 3, which is encoded as per the type
+   IRRoundingMode.  Since the guest_FPROUND value is also encoded as
+   per IRRoundingMode, we merely need to get it and mask it for
+   safety.
+*/
+static IRExpr* /* :: Ity_I32 */ get_roundingmode ( void )
+{
+   return binop( Iop_And32, get_fpround(), mkU32(3) );
+}
+
+static IRExpr* /* :: Ity_I32 */ get_FAKE_roundingmode ( void )
+{
+   return mkU32(Irrm_NEAREST);
+}
+
+
+/* --------- Get/set FP register tag bytes. --------- */
+
+/* Given i, and some expression e, generate 'ST_TAG(i) = e'. */
+
+static void put_ST_TAG ( Int i, IRExpr* value )
+{
+   IRRegArray* descr;
+   vassert(typeOfIRExpr(irsb->tyenv, value) == Ity_I8);
+   descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   stmt( IRStmt_PutI( mkIRPutI(descr, get_ftop(), i, value) ) );
+}
+
+/* Given i, generate an expression yielding 'ST_TAG(i)'.  This will be
+   zero to indicate "Empty" and nonzero to indicate "NonEmpty".  */
+
+static IRExpr* get_ST_TAG ( Int i )
+{
+   IRRegArray* descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   return IRExpr_GetI( descr, get_ftop(), i );
+}
+
+
+/* --------- Get/set FP registers. --------- */
+
+/* Given i, and some expression e, emit 'ST(i) = e' and set the
+   register's tag to indicate the register is full.  The previous
+   state of the register is not checked. */
+
+static void put_ST_UNCHECKED ( Int i, IRExpr* value )
+{
+   IRRegArray* descr;
+   vassert(typeOfIRExpr(irsb->tyenv, value) == Ity_F64);
+   descr = mkIRRegArray( OFFB_FPREGS, Ity_F64, 8 );
+   stmt( IRStmt_PutI( mkIRPutI(descr, get_ftop(), i, value) ) );
+   /* Mark the register as in-use. */
+   put_ST_TAG(i, mkU8(1));
+}
+
+/* Given i, and some expression e, emit
+      ST(i) = is_full(i) ? NaN : e
+   and set the tag accordingly.
+*/
+
+static void put_ST ( Int i, IRExpr* value )
+{
+   put_ST_UNCHECKED(
+      i,
+      IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+                  /* non-0 means full */
+                  mkQNaN64(),
+                  /* 0 means empty */
+                  value
+      )
+   );
+}
+
+
+/* Given i, generate an expression yielding 'ST(i)'. */
+
+static IRExpr* get_ST_UNCHECKED ( Int i )
+{
+   IRRegArray* descr = mkIRRegArray( OFFB_FPREGS, Ity_F64, 8 );
+   return IRExpr_GetI( descr, get_ftop(), i );
+}
+
+
+/* Given i, generate an expression yielding 
+  is_full(i) ? ST(i) : NaN
+*/
+
+static IRExpr* get_ST ( Int i )
+{
+   return
+      IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+                  /* non-0 means full */
+                  get_ST_UNCHECKED(i),
+                  /* 0 means empty */
+                  mkQNaN64());
+}
+
+
+/* Given i, and some expression e, and a condition cond, generate IR
+   which has the same effect as put_ST(i,e) when cond is true and has
+   no effect when cond is false.  Given the lack of proper
+   if-then-else in the IR, this is pretty tricky.
+*/
+
+static void maybe_put_ST ( IRTemp cond, Int i, IRExpr* value )
+{
+   // new_tag = if cond then FULL else old_tag
+   // new_val = if cond then (if old_tag==FULL then NaN else val)
+   //                   else old_val
+
+   IRTemp old_tag = newTemp(Ity_I8);
+   assign(old_tag, get_ST_TAG(i));
+   IRTemp new_tag = newTemp(Ity_I8);
+   assign(new_tag,
+          IRExpr_ITE(mkexpr(cond), mkU8(1)/*FULL*/, mkexpr(old_tag)));
+
+   IRTemp old_val = newTemp(Ity_F64);
+   assign(old_val, get_ST_UNCHECKED(i));
+   IRTemp new_val = newTemp(Ity_F64);
+   assign(new_val,
+          IRExpr_ITE(mkexpr(cond),
+                     IRExpr_ITE(binop(Iop_CmpNE8, mkexpr(old_tag), mkU8(0)),
+                                /* non-0 means full */
+                                mkQNaN64(),
+                                /* 0 means empty */
+                                value),
+                     mkexpr(old_val)));
+
+   put_ST_UNCHECKED(i, mkexpr(new_val));
+   // put_ST_UNCHECKED incorrectly sets tag(i) to always be FULL.  So 
+   // now set it to new_tag instead.
+   put_ST_TAG(i, mkexpr(new_tag));
+}
+
+/* Adjust FTOP downwards by one register. */
+
+static void fp_push ( void )
+{
+   put_ftop( binop(Iop_Sub32, get_ftop(), mkU32(1)) );
+}
+
+/* Adjust FTOP downwards by one register when COND is 1:I1.  Else
+   don't change it. */
+
+static void maybe_fp_push ( IRTemp cond )
+{
+   put_ftop( binop(Iop_Sub32, get_ftop(), unop(Iop_1Uto32,mkexpr(cond))) );
+}
+
+/* Adjust FTOP upwards by one register, and mark the vacated register
+   as empty.  */
+
+static void fp_pop ( void )
+{
+   put_ST_TAG(0, mkU8(0));
+   put_ftop( binop(Iop_Add32, get_ftop(), mkU32(1)) );
+}
+
+/* Set the C2 bit of the FPU status register to e[0].  Assumes that
+   e[31:1] == 0. 
+*/
+static void set_C2 ( IRExpr* e )
+{
+   IRExpr* cleared = binop(Iop_And64, get_C3210(), mkU64(~AMD64G_FC_MASK_C2));
+   put_C3210( binop(Iop_Or64,
+                    cleared,
+                    binop(Iop_Shl64, e, mkU8(AMD64G_FC_SHIFT_C2))) );
+}
+
+/* Generate code to check that abs(d64) < 2^63 and is finite.  This is
+   used to do the range checks for FSIN, FCOS, FSINCOS and FPTAN.  The
+   test is simple, but the derivation of it is not so simple.
+
+   The exponent field for an IEEE754 double is 11 bits.  That means it
+   can take values 0 through 0x7FF.  If the exponent has value 0x7FF,
+   the number is either a NaN or an Infinity and so is not finite.
+   Furthermore, a finite value of exactly 2^63 is the smallest value
+   that has exponent value 0x43E.  Hence, what we need to do is
+   extract the exponent, ignoring the sign bit and mantissa, and check
+   it is < 0x43E, or <= 0x43D.
+
+   To make this easily applicable to 32- and 64-bit targets, a
+   roundabout approach is used.  First the number is converted to I64,
+   then the top 32 bits are taken.  Shifting them right by 20 bits
+   places the sign bit and exponent in the bottom 12 bits.  Anding
+   with 0x7FF gets rid of the sign bit, leaving just the exponent
+   available for comparison.
+*/
+static IRTemp math_IS_TRIG_ARG_FINITE_AND_IN_RANGE ( IRTemp d64 )
+{
+   IRTemp i64 = newTemp(Ity_I64);
+   assign(i64, unop(Iop_ReinterpF64asI64, mkexpr(d64)) );
+   IRTemp exponent = newTemp(Ity_I32);
+   assign(exponent,
+          binop(Iop_And32,
+                binop(Iop_Shr32, unop(Iop_64HIto32, mkexpr(i64)), mkU8(20)),
+                mkU32(0x7FF)));
+   IRTemp in_range_and_finite = newTemp(Ity_I1);
+   assign(in_range_and_finite,
+          binop(Iop_CmpLE32U, mkexpr(exponent), mkU32(0x43D)));
+   return in_range_and_finite;
+}
+
+/* Invent a plausible-looking FPU status word value:
+      ((ftop & 7) << 11) | (c3210 & 0x4700)
+ */
+static IRExpr* get_FPU_sw ( void )
+{
+   return
+      unop(Iop_32to16,
+           binop(Iop_Or32,
+                 binop(Iop_Shl32, 
+                       binop(Iop_And32, get_ftop(), mkU32(7)), 
+                             mkU8(11)),
+                       binop(Iop_And32, unop(Iop_64to32, get_C3210()), 
+                                        mkU32(0x4700))
+      ));
+}
+
+
+/* ------------------------------------------------------- */
+/* Given all that stack-mangling junk, we can now go ahead
+   and describe FP instructions. 
+*/
+
+/* ST(0) = ST(0) `op` mem64/32(addr)
+   Need to check ST(0)'s tag on read, but not on write.
+*/
+static
+void fp_do_op_mem_ST_0 ( IRTemp addr, const HChar* op_txt, HChar* dis_buf, 
+                         IROp op, Bool dbl )
+{
+   DIP("f%s%c %s\n", op_txt, dbl?'l':'s', dis_buf);
+   if (dbl) {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                get_ST(0), 
+                loadLE(Ity_F64,mkexpr(addr))
+         ));
+   } else {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                get_ST(0), 
+                unop(Iop_F32toF64, loadLE(Ity_F32,mkexpr(addr)))
+         ));
+   }
+}
+
+
+/* ST(0) = mem64/32(addr) `op` ST(0)
+   Need to check ST(0)'s tag on read, but not on write.
+*/
+static
+void fp_do_oprev_mem_ST_0 ( IRTemp addr, const HChar* op_txt, HChar* dis_buf, 
+                            IROp op, Bool dbl )
+{
+   DIP("f%s%c %s\n", op_txt, dbl?'l':'s', dis_buf);
+   if (dbl) {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                loadLE(Ity_F64,mkexpr(addr)),
+                get_ST(0)
+         ));
+   } else {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                unop(Iop_F32toF64, loadLE(Ity_F32,mkexpr(addr))),
+                get_ST(0)
+         ));
+   }
+}
+
+
+/* ST(dst) = ST(dst) `op` ST(src).
+   Check dst and src tags when reading but not on write.
+*/
+static
+void fp_do_op_ST_ST ( const HChar* op_txt, IROp op, UInt st_src, UInt st_dst,
+                      Bool pop_after )
+{
+   DIP("f%s%s st(%u), st(%u)\n", op_txt, pop_after?"p":"", st_src, st_dst );
+   put_ST_UNCHECKED( 
+      st_dst, 
+      triop( op, 
+             get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+             get_ST(st_dst), 
+             get_ST(st_src) ) 
+   );
+   if (pop_after)
+      fp_pop();
+}
+
+/* ST(dst) = ST(src) `op` ST(dst).
+   Check dst and src tags when reading but not on write.
+*/
+static
+void fp_do_oprev_ST_ST ( const HChar* op_txt, IROp op, UInt st_src, UInt st_dst,
+                         Bool pop_after )
+{
+   DIP("f%s%s st(%u), st(%u)\n", op_txt, pop_after?"p":"", st_src, st_dst );
+   put_ST_UNCHECKED( 
+      st_dst, 
+      triop( op, 
+             get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+             get_ST(st_src), 
+             get_ST(st_dst) ) 
+   );
+   if (pop_after)
+      fp_pop();
+}
+
+/* %rflags(Z,P,C) = UCOMI( st(0), st(i) ) */
+static void fp_do_ucomi_ST0_STi ( UInt i, Bool pop_after )
+{
+   DIP("fucomi%s %%st(0),%%st(%u)\n", pop_after ? "p" : "", i);
+   /* This is a bit of a hack (and isn't really right).  It sets
+      Z,P,C,O correctly, but forces A and S to zero, whereas the Intel
+      documentation implies A and S are unchanged. 
+   */
+   /* It's also fishy in that it is used both for COMIP and
+      UCOMIP, and they aren't the same (although similar). */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            binop( Iop_And64,
+                   unop( Iop_32Uto64,
+                         binop(Iop_CmpF64, get_ST(0), get_ST(i))),
+                   mkU64(0x45)
+        )));
+   if (pop_after)
+      fp_pop();
+}
+
+
+/* returns 
+   32to16( if e32 <s -32768 || e32 >s 32767 then -32768 else e32 )
+*/
+static IRExpr* x87ishly_qnarrow_32_to_16 ( IRExpr* e32 )
+{
+   IRTemp t32 = newTemp(Ity_I32);
+   assign( t32, e32 );
+   return
+      IRExpr_ITE( 
+         binop(Iop_CmpLT64U, 
+               unop(Iop_32Uto64, 
+                    binop(Iop_Add32, mkexpr(t32), mkU32(32768))), 
+               mkU64(65536)),
+         unop(Iop_32to16, mkexpr(t32)),
+         mkU16( 0x8000 ) );
+}
+
+
+static
+ULong dis_FPU ( /*OUT*/Bool* decode_ok, 
+                const VexAbiInfo* vbi, Prefix pfx, Long delta )
+{
+   Int    len;
+   UInt   r_src, r_dst;
+   HChar  dis_buf[50];
+   IRTemp t1, t2;
+
+   /* On entry, delta points at the second byte of the insn (the modrm
+      byte).*/
+   UChar first_opcode = getUChar(delta-1);
+   UChar modrm        = getUChar(delta+0);
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xD8 opcodes +-+-+-+-+-+-+-+ */
+
+   if (first_opcode == 0xD8) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FADD single-real */
+               fp_do_op_mem_ST_0 ( addr, "add", dis_buf, Iop_AddF64, False );
+               break;
+
+            case 1: /* FMUL single-real */
+               fp_do_op_mem_ST_0 ( addr, "mul", dis_buf, Iop_MulF64, False );
+               break;
+
+            case 2: /* FCOM single-real */
+               DIP("fcoms %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               /* The AMD documentation suggests that forcing C1 to
+                  zero is correct (Eliot Moss) */
+               put_C3210( 
+                   unop( Iop_32Uto64,
+                       binop( Iop_And32,
+                              binop(Iop_Shl32, 
+                                    binop(Iop_CmpF64, 
+                                          get_ST(0),
+                                          unop(Iop_F32toF64, 
+                                               loadLE(Ity_F32,mkexpr(addr)))),
+                                    mkU8(8)),
+                              mkU32(0x4500)
+                   )));
+               break;  
+
+            case 3: /* FCOMP single-real */
+               /* The AMD documentation suggests that forcing C1 to
+                  zero is correct (Eliot Moss) */
+               DIP("fcomps %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   unop( Iop_32Uto64,
+                       binop( Iop_And32,
+                              binop(Iop_Shl32, 
+                                    binop(Iop_CmpF64, 
+                                          get_ST(0),
+                                          unop(Iop_F32toF64, 
+                                               loadLE(Ity_F32,mkexpr(addr)))),
+                                    mkU8(8)),
+                              mkU32(0x4500)
+                   )));
+               fp_pop();
+               break;  
+
+            case 4: /* FSUB single-real */
+               fp_do_op_mem_ST_0 ( addr, "sub", dis_buf, Iop_SubF64, False );
+               break;
+
+            case 5: /* FSUBR single-real */
+               fp_do_oprev_mem_ST_0 ( addr, "subr", dis_buf, Iop_SubF64, False );
+               break;
+
+            case 6: /* FDIV single-real */
+               fp_do_op_mem_ST_0 ( addr, "div", dis_buf, Iop_DivF64, False );
+               break;
+
+            case 7: /* FDIVR single-real */
+               fp_do_oprev_mem_ST_0 ( addr, "divr", dis_buf, Iop_DivF64, False );
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xD8\n");
+               goto decode_fail;
+         }
+      } else {
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADD %st(?),%st(0) */
+               fp_do_op_ST_ST ( "add", Iop_AddF64, modrm - 0xC0, 0, False );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMUL %st(?),%st(0) */
+               fp_do_op_ST_ST ( "mul", Iop_MulF64, modrm - 0xC8, 0, False );
+               break;
+
+            /* Dunno if this is right */
+            case 0xD0 ... 0xD7: /* FCOM %st(?),%st(0) */
+               r_dst = (UInt)modrm - 0xD0;
+               DIP("fcom %%st(0),%%st(%d)\n", r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   unop(Iop_32Uto64,
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   )));
+               break;
+
+            /* Dunno if this is right */
+            case 0xD8 ... 0xDF: /* FCOMP %st(?),%st(0) */
+               r_dst = (UInt)modrm - 0xD8;
+               DIP("fcomp %%st(0),%%st(%d)\n", r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   unop(Iop_32Uto64,
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   )));
+               fp_pop();
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUB %st(?),%st(0) */
+               fp_do_op_ST_ST ( "sub", Iop_SubF64, modrm - 0xE0, 0, False );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUBR %st(?),%st(0) */
+               fp_do_oprev_ST_ST ( "subr", Iop_SubF64, modrm - 0xE8, 0, False );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIV %st(?),%st(0) */
+               fp_do_op_ST_ST ( "div", Iop_DivF64, modrm - 0xF0, 0, False );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIVR %st(?),%st(0) */
+               fp_do_oprev_ST_ST ( "divr", Iop_DivF64, modrm - 0xF8, 0, False );
+               break;
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xD9 opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xD9) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FLD single-real */
+               DIP("flds %s\n", dis_buf);
+               fp_push();
+               put_ST(0, unop(Iop_F32toF64,
+                              loadLE(Ity_F32, mkexpr(addr))));
+               break;
+
+            case 2: /* FST single-real */
+               DIP("fsts %s\n", dis_buf);
+               storeLE(mkexpr(addr),
+                       binop(Iop_F64toF32, get_roundingmode(), get_ST(0)));
+               break;
+
+            case 3: /* FSTP single-real */
+               DIP("fstps %s\n", dis_buf);
+               storeLE(mkexpr(addr), 
+                       binop(Iop_F64toF32, get_roundingmode(), get_ST(0)));
+               fp_pop();
+               break;
+
+            case 4: { /* FLDENV m28 */
+               /* Uses dirty helper: 
+                     VexEmNote amd64g_do_FLDENV ( VexGuestX86State*, HWord ) */
+               IRTemp    ew = newTemp(Ity_I32);
+               IRTemp   w64 = newTemp(Ity_I64);
+               IRDirty*   d = unsafeIRDirty_0_N ( 
+                                 0/*regparms*/, 
+                                 "amd64g_dirtyhelper_FLDENV", 
+                                 &amd64g_dirtyhelper_FLDENV,
+                                 mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                              );
+               d->tmp       = w64;
+               /* declare we're reading memory */
+               d->mFx   = Ifx_Read;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 28;
+
+               /* declare we're writing guest state */
+               d->nFxState = 4;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Write;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Write;
+               d->fxState[1].offset = OFFB_FPTAGS;
+               d->fxState[1].size   = 8 * sizeof(UChar);
+
+               d->fxState[2].fx     = Ifx_Write;
+               d->fxState[2].offset = OFFB_FPROUND;
+               d->fxState[2].size   = sizeof(ULong);
+
+               d->fxState[3].fx     = Ifx_Write;
+               d->fxState[3].offset = OFFB_FC3210;
+               d->fxState[3].size   = sizeof(ULong);
+
+               stmt( IRStmt_Dirty(d) );
+
+               /* ew contains any emulation warning we may need to
+                  issue.  If needed, side-exit to the next insn,
+                  reporting the warning, so that Valgrind's dispatcher
+                  sees the warning. */
+               assign(ew, unop(Iop_64to32,mkexpr(w64)) );
+               put_emwarn( mkexpr(ew) );
+               stmt( 
+                  IRStmt_Exit(
+                     binop(Iop_CmpNE32, mkexpr(ew), mkU32(0)),
+                     Ijk_EmWarn,
+                     IRConst_U64( guest_RIP_bbstart+delta ),
+                     OFFB_RIP
+                  )
+               );
+
+               DIP("fldenv %s\n", dis_buf);
+               break;
+            }
+
+            case 5: {/* FLDCW */
+               /* The only thing we observe in the control word is the
+                  rounding mode.  Therefore, pass the 16-bit value
+                  (x87 native-format control word) to a clean helper,
+                  getting back a 64-bit value, the lower half of which
+                  is the FPROUND value to store, and the upper half of
+                  which is the emulation-warning token which may be
+                  generated.
+               */
+               /* ULong amd64h_check_fldcw ( ULong ); */
+               IRTemp t64 = newTemp(Ity_I64);
+               IRTemp ew = newTemp(Ity_I32);
+               DIP("fldcw %s\n", dis_buf);
+               assign( t64, mkIRExprCCall(
+                               Ity_I64, 0/*regparms*/, 
+                               "amd64g_check_fldcw",
+                               &amd64g_check_fldcw, 
+                               mkIRExprVec_1( 
+                                  unop( Iop_16Uto64, 
+                                        loadLE(Ity_I16, mkexpr(addr)))
+                               )
+                            )
+                     );
+
+               put_fpround( unop(Iop_64to32, mkexpr(t64)) );
+               assign( ew, unop(Iop_64HIto32, mkexpr(t64) ) );
+               put_emwarn( mkexpr(ew) );
+               /* Finally, if an emulation warning was reported,
+                  side-exit to the next insn, reporting the warning,
+                  so that Valgrind's dispatcher sees the warning. */
+               stmt( 
+                  IRStmt_Exit(
+                     binop(Iop_CmpNE32, mkexpr(ew), mkU32(0)),
+                     Ijk_EmWarn,
+                     IRConst_U64( guest_RIP_bbstart+delta ),
+                     OFFB_RIP
+                  )
+               );
+               break;
+            }
+
+            case 6: { /* FNSTENV m28 */
+               /* Uses dirty helper: 
+                     void amd64g_do_FSTENV ( VexGuestAMD64State*, HWord ) */
+               IRDirty* d = unsafeIRDirty_0_N ( 
+                               0/*regparms*/, 
+                               "amd64g_dirtyhelper_FSTENV", 
+                               &amd64g_dirtyhelper_FSTENV,
+                               mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                            );
+               /* declare we're writing memory */
+               d->mFx   = Ifx_Write;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 28;
+
+               /* declare we're reading guest state */
+               d->nFxState = 4;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Read;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Read;
+               d->fxState[1].offset = OFFB_FPTAGS;
+               d->fxState[1].size   = 8 * sizeof(UChar);
+
+               d->fxState[2].fx     = Ifx_Read;
+               d->fxState[2].offset = OFFB_FPROUND;
+               d->fxState[2].size   = sizeof(ULong);
+
+               d->fxState[3].fx     = Ifx_Read;
+               d->fxState[3].offset = OFFB_FC3210;
+               d->fxState[3].size   = sizeof(ULong);
+
+               stmt( IRStmt_Dirty(d) );
+
+               DIP("fnstenv %s\n", dis_buf);
+               break;
+            }
+
+            case 7: /* FNSTCW */
+               /* Fake up a native x87 FPU control word.  The only
+                  thing it depends on is FPROUND[1:0], so call a clean
+                  helper to cook it up. */
+               /* ULong amd64g_create_fpucw ( ULong fpround ) */
+               DIP("fnstcw %s\n", dis_buf);
+               storeLE(
+                  mkexpr(addr), 
+                  unop( Iop_64to16, 
+                        mkIRExprCCall(
+                           Ity_I64, 0/*regp*/,
+                           "amd64g_create_fpucw", &amd64g_create_fpucw, 
+                           mkIRExprVec_1( unop(Iop_32Uto64, get_fpround()) ) 
+                        ) 
+                  ) 
+               );
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xD9\n");
+               goto decode_fail;
+         }
+
+      } else {
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FLD %st(?) */
+               r_src = (UInt)modrm - 0xC0;
+               DIP("fld %%st(%u)\n", r_src);
+               t1 = newTemp(Ity_F64);
+               assign(t1, get_ST(r_src));
+               fp_push();
+               put_ST(0, mkexpr(t1));
+               break;
+
+            case 0xC8 ... 0xCF: /* FXCH %st(?) */
+               r_src = (UInt)modrm - 0xC8;
+               DIP("fxch %%st(%u)\n", r_src);
+               t1 = newTemp(Ity_F64);
+               t2 = newTemp(Ity_F64);
+               assign(t1, get_ST(0));
+               assign(t2, get_ST(r_src));
+               put_ST_UNCHECKED(0, mkexpr(t2));
+               put_ST_UNCHECKED(r_src, mkexpr(t1));
+               break;
+
+            case 0xE0: /* FCHS */
+               DIP("fchs\n");
+               put_ST_UNCHECKED(0, unop(Iop_NegF64, get_ST(0)));
+               break;
+
+            case 0xE1: /* FABS */
+               DIP("fabs\n");
+               put_ST_UNCHECKED(0, unop(Iop_AbsF64, get_ST(0)));
+               break;
+
+            case 0xE5: { /* FXAM */
+               /* This is an interesting one.  It examines %st(0),
+                  regardless of whether the tag says it's empty or not.
+                  Here, just pass both the tag (in our format) and the
+                  value (as a double, actually a ULong) to a helper
+                  function. */
+               IRExpr** args
+                  = mkIRExprVec_2( unop(Iop_8Uto64, get_ST_TAG(0)),
+                                   unop(Iop_ReinterpF64asI64, 
+                                        get_ST_UNCHECKED(0)) );
+               put_C3210(mkIRExprCCall(
+                            Ity_I64, 
+                            0/*regparm*/, 
+                            "amd64g_calculate_FXAM", &amd64g_calculate_FXAM,
+                            args
+                        ));
+               DIP("fxam\n");
+               break;
+            }
+
+            case 0xE8: /* FLD1 */
+               DIP("fld1\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(1.0))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL)));
+               break;
+
+            case 0xE9: /* FLDL2T */
+               DIP("fldl2t\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(3.32192809488736234781))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x400a934f0979a371ULL)));
+               break;
+
+            case 0xEA: /* FLDL2E */
+               DIP("fldl2e\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(1.44269504088896340739))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3ff71547652b82feULL)));
+               break;
+
+            case 0xEB: /* FLDPI */
+               DIP("fldpi\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(3.14159265358979323851))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x400921fb54442d18ULL)));
+               break;
+
+            case 0xEC: /* FLDLG2 */
+               DIP("fldlg2\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(0.301029995663981143))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3fd34413509f79ffULL)));
+               break;
+
+            case 0xED: /* FLDLN2 */
+               DIP("fldln2\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(0.69314718055994530942))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3fe62e42fefa39efULL)));
+               break;
+
+            case 0xEE: /* FLDZ */
+               DIP("fldz\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(0.0))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x0000000000000000ULL)));
+               break;
+
+            case 0xF0: /* F2XM1 */
+               DIP("f2xm1\n");
+               put_ST_UNCHECKED(0, 
+                  binop(Iop_2xm1F64, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0)));
+               break;
+
+            case 0xF1: /* FYL2X */
+               DIP("fyl2x\n");
+               put_ST_UNCHECKED(1, 
+                  triop(Iop_Yl2xF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(1), 
+                        get_ST(0)));
+               fp_pop();
+               break;
+
+            case 0xF2: { /* FPTAN */
+               DIP("fptan\n");
+               IRTemp argD = newTemp(Ity_F64);
+               assign(argD, get_ST(0));
+               IRTemp argOK = math_IS_TRIG_ARG_FINITE_AND_IN_RANGE(argD);
+               IRTemp resD = newTemp(Ity_F64);
+               assign(resD,
+                  IRExpr_ITE(
+                     mkexpr(argOK), 
+                     binop(Iop_TanF64,
+                           get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                           mkexpr(argD)),
+                     mkexpr(argD))
+               );
+               put_ST_UNCHECKED(0, mkexpr(resD));
+               /* Conditionally push 1.0 on the stack, if the arg is
+                  in range */
+               maybe_fp_push(argOK);
+               maybe_put_ST(argOK, 0,
+                            IRExpr_Const(IRConst_F64(1.0)));
+               set_C2( binop(Iop_Xor64,
+                             unop(Iop_1Uto64, mkexpr(argOK)), 
+                             mkU64(1)) );
+               break;
+            }
+
+            case 0xF3: /* FPATAN */
+               DIP("fpatan\n");
+               put_ST_UNCHECKED(1, 
+                  triop(Iop_AtanF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(1), 
+                        get_ST(0)));
+               fp_pop();
+               break;
+
+            case 0xF4: { /* FXTRACT */
+               IRTemp argF = newTemp(Ity_F64);
+               IRTemp sigF = newTemp(Ity_F64);
+               IRTemp expF = newTemp(Ity_F64);
+               IRTemp argI = newTemp(Ity_I64);
+               IRTemp sigI = newTemp(Ity_I64);
+               IRTemp expI = newTemp(Ity_I64);
+               DIP("fxtract\n");
+               assign( argF, get_ST(0) );
+               assign( argI, unop(Iop_ReinterpF64asI64, mkexpr(argF)));
+               assign( sigI, 
+                       mkIRExprCCall(
+                          Ity_I64, 0/*regparms*/, 
+                          "x86amd64g_calculate_FXTRACT", 
+                          &x86amd64g_calculate_FXTRACT, 
+                          mkIRExprVec_2( mkexpr(argI), 
+                                         mkIRExpr_HWord(0)/*sig*/ )) 
+               );
+               assign( expI, 
+                       mkIRExprCCall(
+                          Ity_I64, 0/*regparms*/, 
+                          "x86amd64g_calculate_FXTRACT", 
+                          &x86amd64g_calculate_FXTRACT, 
+                          mkIRExprVec_2( mkexpr(argI), 
+                                         mkIRExpr_HWord(1)/*exp*/ )) 
+               );
+               assign( sigF, unop(Iop_ReinterpI64asF64, mkexpr(sigI)) );
+               assign( expF, unop(Iop_ReinterpI64asF64, mkexpr(expI)) );
+               /* exponent */
+               put_ST_UNCHECKED(0, mkexpr(expF) );
+               fp_push();
+               /* significand */
+               put_ST(0, mkexpr(sigF) );
+               break;
+            }
+
+            case 0xF5: { /* FPREM1 -- IEEE compliant */
+               IRTemp a1 = newTemp(Ity_F64);
+               IRTemp a2 = newTemp(Ity_F64);
+               DIP("fprem1\n");
+               /* Do FPREM1 twice, once to get the remainder, and once
+                  to get the C3210 flag values. */
+               assign( a1, get_ST(0) );
+               assign( a2, get_ST(1) );
+               put_ST_UNCHECKED(0,
+                  triop(Iop_PRem1F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1),
+                        mkexpr(a2)));
+               put_C3210(
+                  unop(Iop_32Uto64,
+                  triop(Iop_PRem1C3210F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1),
+                        mkexpr(a2)) ));
+               break;
+            }
+
+            case 0xF7: /* FINCSTP */
+               DIP("fincstp\n");
+               put_ftop( binop(Iop_Add32, get_ftop(), mkU32(1)) );
+               break;
+
+            case 0xF8: { /* FPREM -- not IEEE compliant */
+               IRTemp a1 = newTemp(Ity_F64);
+               IRTemp a2 = newTemp(Ity_F64);
+               DIP("fprem\n");
+               /* Do FPREM twice, once to get the remainder, and once
+                  to get the C3210 flag values. */
+               assign( a1, get_ST(0) );
+               assign( a2, get_ST(1) );
+               put_ST_UNCHECKED(0,
+                  triop(Iop_PRemF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1),
+                        mkexpr(a2)));
+               put_C3210(
+                  unop(Iop_32Uto64,
+                  triop(Iop_PRemC3210F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1),
+                        mkexpr(a2)) ));
+               break;
+            }
+
+            case 0xF9: /* FYL2XP1 */
+               DIP("fyl2xp1\n");
+               put_ST_UNCHECKED(1, 
+                  triop(Iop_Yl2xp1F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(1), 
+                        get_ST(0)));
+               fp_pop();
+               break;
+
+            case 0xFA: /* FSQRT */
+               DIP("fsqrt\n");
+               put_ST_UNCHECKED(0, 
+                  binop(Iop_SqrtF64, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0)));
+               break;
+
+            case 0xFB: { /* FSINCOS */
+               DIP("fsincos\n");
+               IRTemp argD = newTemp(Ity_F64);
+               assign(argD, get_ST(0));
+               IRTemp argOK = math_IS_TRIG_ARG_FINITE_AND_IN_RANGE(argD);
+               IRTemp resD = newTemp(Ity_F64);
+               assign(resD,
+                  IRExpr_ITE(
+                     mkexpr(argOK), 
+                     binop(Iop_SinF64,
+                           get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                           mkexpr(argD)),
+                     mkexpr(argD))
+               );
+               put_ST_UNCHECKED(0, mkexpr(resD));
+               /* Conditionally push the cos value on the stack, if
+                  the arg is in range */
+               maybe_fp_push(argOK);
+               maybe_put_ST(argOK, 0,
+                  binop(Iop_CosF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(argD)));
+               set_C2( binop(Iop_Xor64,
+                             unop(Iop_1Uto64, mkexpr(argOK)), 
+                             mkU64(1)) );
+               break;
+            }
+
+            case 0xFC: /* FRNDINT */
+               DIP("frndint\n");
+               put_ST_UNCHECKED(0,
+                  binop(Iop_RoundF64toInt, get_roundingmode(), get_ST(0)) );
+               break;
+
+            case 0xFD: /* FSCALE */
+               DIP("fscale\n");
+               put_ST_UNCHECKED(0, 
+                  triop(Iop_ScaleF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0), 
+                        get_ST(1)));
+               break;
+
+            case 0xFE:   /* FSIN */
+            case 0xFF: { /* FCOS */
+               Bool isSIN = modrm == 0xFE;
+               DIP("%s\n", isSIN ? "fsin" : "fcos");
+               IRTemp argD = newTemp(Ity_F64);
+               assign(argD, get_ST(0));
+               IRTemp argOK = math_IS_TRIG_ARG_FINITE_AND_IN_RANGE(argD);
+               IRTemp resD = newTemp(Ity_F64);
+               assign(resD,
+                  IRExpr_ITE(
+                     mkexpr(argOK), 
+                     binop(isSIN ? Iop_SinF64 : Iop_CosF64,
+                           get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                           mkexpr(argD)),
+                     mkexpr(argD))
+               );
+               put_ST_UNCHECKED(0, mkexpr(resD));
+               set_C2( binop(Iop_Xor64,
+                             unop(Iop_1Uto64, mkexpr(argOK)), 
+                             mkU64(1)) );
+               break;
+            }
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDA opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDA) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IROp   fop;
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FIADD m32int */ /* ST(0) += m32int */
+               DIP("fiaddl %s\n", dis_buf);
+               fop = Iop_AddF64;
+               goto do_fop_m32;
+
+            case 1: /* FIMUL m32int */ /* ST(0) *= m32int */
+               DIP("fimull %s\n", dis_buf);
+               fop = Iop_MulF64;
+               goto do_fop_m32;
+
+            case 4: /* FISUB m32int */ /* ST(0) -= m32int */
+               DIP("fisubl %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_fop_m32;
+
+            case 5: /* FISUBR m32int */ /* ST(0) = m32int - ST(0) */
+               DIP("fisubrl %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_foprev_m32;
+
+            case 6: /* FIDIV m32int */ /* ST(0) /= m32int */
+               DIP("fisubl %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_fop_m32;
+
+            case 7: /* FIDIVR m32int */ /* ST(0) = m32int / ST(0) */
+               DIP("fidivrl %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_foprev_m32;
+
+            do_fop_m32:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0),
+                        unop(Iop_I32StoF64,
+                             loadLE(Ity_I32, mkexpr(addr)))));
+               break;
+
+            do_foprev_m32:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        unop(Iop_I32StoF64,
+                             loadLE(Ity_I32, mkexpr(addr))),
+                        get_ST(0)));
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xDA\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FCMOVB ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC0;
+               DIP("fcmovb %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_amd64g_calculate_condition(AMD64CondB),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xC8 ... 0xCF: /* FCMOVE(Z) ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC8;
+               DIP("fcmovz %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_amd64g_calculate_condition(AMD64CondZ),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xD0 ... 0xD7: /* FCMOVBE ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD0;
+               DIP("fcmovbe %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_amd64g_calculate_condition(AMD64CondBE),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xD8 ... 0xDF: /* FCMOVU ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD8;
+               DIP("fcmovu %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_amd64g_calculate_condition(AMD64CondP),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xE9: /* FUCOMPP %st(0),%st(1) */
+               DIP("fucompp %%st(0),%%st(1)\n");
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   unop(Iop_32Uto64,
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(1)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   )));
+               fp_pop();
+               fp_pop();
+               break;
+
+            default:
+               goto decode_fail;
+         }
+
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDB opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDB) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FILD m32int */
+               DIP("fildl %s\n", dis_buf);
+               fp_push();
+               put_ST(0, unop(Iop_I32StoF64,
+                              loadLE(Ity_I32, mkexpr(addr))));
+               break;
+
+            case 1: /* FISTTPL m32 (SSE3) */
+               DIP("fisttpl %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI32S, mkU32(Irrm_ZERO), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 2: /* FIST m32 */
+               DIP("fistl %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI32S, get_roundingmode(), get_ST(0)) );
+               break;
+
+            case 3: /* FISTP m32 */
+               DIP("fistpl %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI32S, get_roundingmode(), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 5: { /* FLD extended-real */
+               /* Uses dirty helper: 
+                     ULong amd64g_loadF80le ( ULong )
+                  addr holds the address.  First, do a dirty call to
+                  get hold of the data. */
+               IRTemp   val  = newTemp(Ity_I64);
+               IRExpr** args = mkIRExprVec_1 ( mkexpr(addr) );
+
+               IRDirty* d = unsafeIRDirty_1_N ( 
+                               val, 
+                               0/*regparms*/, 
+                               "amd64g_dirtyhelper_loadF80le", 
+                               &amd64g_dirtyhelper_loadF80le, 
+                               args 
+                            );
+               /* declare that we're reading memory */
+               d->mFx   = Ifx_Read;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 10;
+
+               /* execute the dirty call, dumping the result in val. */
+               stmt( IRStmt_Dirty(d) );
+               fp_push();
+               put_ST(0, unop(Iop_ReinterpI64asF64, mkexpr(val)));
+
+               DIP("fldt %s\n", dis_buf);
+               break;
+            }
+
+            case 7: { /* FSTP extended-real */
+               /* Uses dirty helper: 
+                     void amd64g_storeF80le ( ULong addr, ULong data ) 
+               */
+               IRExpr** args 
+                  = mkIRExprVec_2( mkexpr(addr), 
+                                   unop(Iop_ReinterpF64asI64, get_ST(0)) );
+
+               IRDirty* d = unsafeIRDirty_0_N ( 
+                               0/*regparms*/, 
+                               "amd64g_dirtyhelper_storeF80le", 
+                               &amd64g_dirtyhelper_storeF80le,
+                               args 
+                            );
+               /* declare we're writing memory */
+               d->mFx   = Ifx_Write;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 10;
+
+               /* execute the dirty call. */
+               stmt( IRStmt_Dirty(d) );
+               fp_pop();
+
+               DIP("fstpt\n %s", dis_buf);
+               break;
+            }
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xDB\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FCMOVNB ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC0;
+               DIP("fcmovnb %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_amd64g_calculate_condition(AMD64CondNB),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC8;
+               DIP("fcmovnz %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(
+                  0, 
+                  IRExpr_ITE( 
+                     mk_amd64g_calculate_condition(AMD64CondNZ),
+                     get_ST(r_src),
+                     get_ST(0)
+                  )
+               );
+               break;
+
+            case 0xD0 ... 0xD7: /* FCMOVNBE ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD0;
+               DIP("fcmovnbe %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(
+                  0, 
+                  IRExpr_ITE( 
+                     mk_amd64g_calculate_condition(AMD64CondNBE),
+                     get_ST(r_src),
+                     get_ST(0)
+                  ) 
+               );
+               break;
+
+            case 0xD8 ... 0xDF: /* FCMOVNU ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD8;
+               DIP("fcmovnu %%st(%u), %%st(0)\n", r_src);
+               put_ST_UNCHECKED(
+                  0, 
+                  IRExpr_ITE( 
+                     mk_amd64g_calculate_condition(AMD64CondNP),
+                     get_ST(r_src),
+                     get_ST(0)
+                  )
+               );
+               break;
+
+            case 0xE2:
+               DIP("fnclex\n");
+               break;
+
+            case 0xE3: {
+               /* Uses dirty helper: 
+                     void amd64g_do_FINIT ( VexGuestAMD64State* ) */
+               IRDirty* d  = unsafeIRDirty_0_N ( 
+                                0/*regparms*/, 
+                                "amd64g_dirtyhelper_FINIT", 
+                                &amd64g_dirtyhelper_FINIT,
+                                mkIRExprVec_1( IRExpr_BBPTR() )
+                             );
+
+               /* declare we're writing guest state */
+               d->nFxState = 5;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Write;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Write;
+               d->fxState[1].offset = OFFB_FPREGS;
+               d->fxState[1].size   = 8 * sizeof(ULong);
+
+               d->fxState[2].fx     = Ifx_Write;
+               d->fxState[2].offset = OFFB_FPTAGS;
+               d->fxState[2].size   = 8 * sizeof(UChar);
+
+               d->fxState[3].fx     = Ifx_Write;
+               d->fxState[3].offset = OFFB_FPROUND;
+               d->fxState[3].size   = sizeof(ULong);
+
+               d->fxState[4].fx     = Ifx_Write;
+               d->fxState[4].offset = OFFB_FC3210;
+               d->fxState[4].size   = sizeof(ULong);
+
+               stmt( IRStmt_Dirty(d) );
+
+               DIP("fninit\n");
+               break;
+            }
+
+            case 0xE8 ... 0xEF: /* FUCOMI %st(0),%st(?) */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xE8, False );
+               break;
+
+            case 0xF0 ... 0xF7: /* FCOMI %st(0),%st(?) */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xF0, False );
+               break;
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDC opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDC) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FADD double-real */
+               fp_do_op_mem_ST_0 ( addr, "add", dis_buf, Iop_AddF64, True );
+               break;
+
+            case 1: /* FMUL double-real */
+               fp_do_op_mem_ST_0 ( addr, "mul", dis_buf, Iop_MulF64, True );
+               break;
+
+//..             case 2: /* FCOM double-real */
+//..                DIP("fcoml %s\n", dis_buf);
+//..                /* This forces C1 to zero, which isn't right. */
+//..                put_C3210( 
+//..                    binop( Iop_And32,
+//..                           binop(Iop_Shl32, 
+//..                                 binop(Iop_CmpF64, 
+//..                                       get_ST(0),
+//..                                       loadLE(Ity_F64,mkexpr(addr))),
+//..                                 mkU8(8)),
+//..                           mkU32(0x4500)
+//..                    ));
+//..                break;  
+
+            case 3: /* FCOMP double-real */
+               DIP("fcompl %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   unop(Iop_32Uto64,
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      loadLE(Ity_F64,mkexpr(addr))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   )));
+               fp_pop();
+               break;  
+
+            case 4: /* FSUB double-real */
+               fp_do_op_mem_ST_0 ( addr, "sub", dis_buf, Iop_SubF64, True );
+               break;
+
+            case 5: /* FSUBR double-real */
+               fp_do_oprev_mem_ST_0 ( addr, "subr", dis_buf, Iop_SubF64, True );
+               break;
+
+            case 6: /* FDIV double-real */
+               fp_do_op_mem_ST_0 ( addr, "div", dis_buf, Iop_DivF64, True );
+               break;
+
+            case 7: /* FDIVR double-real */
+               fp_do_oprev_mem_ST_0 ( addr, "divr", dis_buf, Iop_DivF64, True );
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xDC\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADD %st(0),%st(?) */
+               fp_do_op_ST_ST ( "add", Iop_AddF64, 0, modrm - 0xC0, False );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMUL %st(0),%st(?) */
+               fp_do_op_ST_ST ( "mul", Iop_MulF64, 0, modrm - 0xC8, False );
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUBR %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "subr", Iop_SubF64, 0, modrm - 0xE0, False );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUB %st(0),%st(?) */
+               fp_do_op_ST_ST ( "sub", Iop_SubF64, 0, modrm - 0xE8, False );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIVR %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "divr", Iop_DivF64, 0, modrm - 0xF0, False );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIV %st(0),%st(?) */
+               fp_do_op_ST_ST ( "div", Iop_DivF64, 0, modrm - 0xF8, False );
+               break;
+
+            default:
+               goto decode_fail;
+         }
+
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDD opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDD) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FLD double-real */
+               DIP("fldl %s\n", dis_buf);
+               fp_push();
+               put_ST(0, loadLE(Ity_F64, mkexpr(addr)));
+               break;
+
+            case 1: /* FISTTPQ m64 (SSE3) */
+               DIP("fistppll %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI64S, mkU32(Irrm_ZERO), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 2: /* FST double-real */
+               DIP("fstl %s\n", dis_buf);
+               storeLE(mkexpr(addr), get_ST(0));
+               break;
+
+            case 3: /* FSTP double-real */
+               DIP("fstpl %s\n", dis_buf);
+               storeLE(mkexpr(addr), get_ST(0));
+               fp_pop();
+               break;
+
+            case 4: { /* FRSTOR m94/m108 */
+               IRTemp   ew = newTemp(Ity_I32);
+               IRTemp  w64 = newTemp(Ity_I64);
+               IRDirty*  d;
+               if ( have66(pfx) ) {
+                  /* Uses dirty helper: 
+                     VexEmNote amd64g_dirtyhelper_FRSTORS
+                                  ( VexGuestAMD64State*, HWord ) */
+                  d = unsafeIRDirty_0_N ( 
+                         0/*regparms*/, 
+                         "amd64g_dirtyhelper_FRSTORS",
+                         &amd64g_dirtyhelper_FRSTORS,
+                         mkIRExprVec_1( mkexpr(addr) )
+                      );
+                  d->mSize = 94;
+               } else {
+                  /* Uses dirty helper: 
+                     VexEmNote amd64g_dirtyhelper_FRSTOR 
+                                  ( VexGuestAMD64State*, HWord ) */
+                  d = unsafeIRDirty_0_N ( 
+                         0/*regparms*/, 
+                         "amd64g_dirtyhelper_FRSTOR",
+                         &amd64g_dirtyhelper_FRSTOR,
+                         mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                      );
+                  d->mSize = 108;
+               }
+
+               d->tmp    = w64;
+               /* declare we're reading memory */
+               d->mFx   = Ifx_Read;
+               d->mAddr = mkexpr(addr);
+               /* d->mSize set above */
+
+               /* declare we're writing guest state */
+               d->nFxState = 5;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Write;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Write;
+               d->fxState[1].offset = OFFB_FPREGS;
+               d->fxState[1].size   = 8 * sizeof(ULong);
+
+               d->fxState[2].fx     = Ifx_Write;
+               d->fxState[2].offset = OFFB_FPTAGS;
+               d->fxState[2].size   = 8 * sizeof(UChar);
+
+               d->fxState[3].fx     = Ifx_Write;
+               d->fxState[3].offset = OFFB_FPROUND;
+               d->fxState[3].size   = sizeof(ULong);
+
+               d->fxState[4].fx     = Ifx_Write;
+               d->fxState[4].offset = OFFB_FC3210;
+               d->fxState[4].size   = sizeof(ULong);
+
+               stmt( IRStmt_Dirty(d) );
+
+               /* ew contains any emulation warning we may need to
+                  issue.  If needed, side-exit to the next insn,
+                  reporting the warning, so that Valgrind's dispatcher
+                  sees the warning. */
+               assign(ew, unop(Iop_64to32,mkexpr(w64)) );
+               put_emwarn( mkexpr(ew) );
+               stmt( 
+                  IRStmt_Exit(
+                     binop(Iop_CmpNE32, mkexpr(ew), mkU32(0)),
+                     Ijk_EmWarn,
+                     IRConst_U64( guest_RIP_bbstart+delta ),
+                     OFFB_RIP
+                  )
+               );
+
+               if ( have66(pfx) ) {
+                  DIP("frstors %s\n", dis_buf);
+               } else {
+                  DIP("frstor %s\n", dis_buf);
+               }
+               break;
+            }
+
+            case 6: { /* FNSAVE m94/m108 */
+               IRDirty *d;
+               if ( have66(pfx) ) {
+                 /* Uses dirty helper: 
+                    void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*,
+                                                      HWord ) */
+                  d = unsafeIRDirty_0_N ( 
+                         0/*regparms*/, 
+                         "amd64g_dirtyhelper_FNSAVES", 
+                         &amd64g_dirtyhelper_FNSAVES,
+                         mkIRExprVec_1( mkexpr(addr) )
+                         );
+                  d->mSize = 94;
+               } else {
+                 /* Uses dirty helper: 
+                    void amd64g_dirtyhelper_FNSAVE ( VexGuestAMD64State*,
+                                                     HWord ) */
+                  d = unsafeIRDirty_0_N ( 
+                         0/*regparms*/, 
+                         "amd64g_dirtyhelper_FNSAVE",
+                         &amd64g_dirtyhelper_FNSAVE,
+                         mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                      );
+                  d->mSize = 108;
+               }
+
+               /* declare we're writing memory */
+               d->mFx   = Ifx_Write;
+               d->mAddr = mkexpr(addr);
+               /* d->mSize set above */
+
+               /* declare we're reading guest state */
+               d->nFxState = 5;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Read;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Read;
+               d->fxState[1].offset = OFFB_FPREGS;
+               d->fxState[1].size   = 8 * sizeof(ULong);
+
+               d->fxState[2].fx     = Ifx_Read;
+               d->fxState[2].offset = OFFB_FPTAGS;
+               d->fxState[2].size   = 8 * sizeof(UChar);
+
+               d->fxState[3].fx     = Ifx_Read;
+               d->fxState[3].offset = OFFB_FPROUND;
+               d->fxState[3].size   = sizeof(ULong);
+
+               d->fxState[4].fx     = Ifx_Read;
+               d->fxState[4].offset = OFFB_FC3210;
+               d->fxState[4].size   = sizeof(ULong);
+
+               stmt( IRStmt_Dirty(d) );
+
+               if ( have66(pfx) ) {
+                 DIP("fnsaves %s\n", dis_buf);
+               } else {
+                 DIP("fnsave %s\n", dis_buf);
+               }
+               break;
+            }
+
+            case 7: { /* FNSTSW m16 */
+               IRExpr* sw = get_FPU_sw();
+               vassert(typeOfIRExpr(irsb->tyenv, sw) == Ity_I16);
+               storeLE( mkexpr(addr), sw );
+               DIP("fnstsw %s\n", dis_buf);
+               break;
+            }
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xDD\n");
+               goto decode_fail;
+         }
+      } else {
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FFREE %st(?) */
+               r_dst = (UInt)modrm - 0xC0;
+               DIP("ffree %%st(%u)\n", r_dst);
+               put_ST_TAG ( r_dst, mkU8(0) );
+               break;
+
+            case 0xD0 ... 0xD7: /* FST %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xD0;
+               DIP("fst %%st(0),%%st(%u)\n", r_dst);
+               /* P4 manual says: "If the destination operand is a
+                  non-empty register, the invalid-operation exception
+                  is not generated.  Hence put_ST_UNCHECKED. */
+               put_ST_UNCHECKED(r_dst, get_ST(0));
+               break;
+
+            case 0xD8 ... 0xDF: /* FSTP %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xD8;
+               DIP("fstp %%st(0),%%st(%u)\n", r_dst);
+               /* P4 manual says: "If the destination operand is a
+                  non-empty register, the invalid-operation exception
+                  is not generated.  Hence put_ST_UNCHECKED. */
+               put_ST_UNCHECKED(r_dst, get_ST(0));
+               fp_pop();
+               break;
+
+            case 0xE0 ... 0xE7: /* FUCOM %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xE0;
+               DIP("fucom %%st(0),%%st(%u)\n", r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210(
+                   unop(Iop_32Uto64, 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   )));
+               break;
+
+            case 0xE8 ... 0xEF: /* FUCOMP %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xE8;
+               DIP("fucomp %%st(0),%%st(%u)\n", r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   unop(Iop_32Uto64, 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   )));
+               fp_pop();
+               break;
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDE opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDE) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IROp   fop;
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FIADD m16int */ /* ST(0) += m16int */
+               DIP("fiaddw %s\n", dis_buf);
+               fop = Iop_AddF64;
+               goto do_fop_m16;
+
+            case 1: /* FIMUL m16int */ /* ST(0) *= m16int */
+               DIP("fimulw %s\n", dis_buf);
+               fop = Iop_MulF64;
+               goto do_fop_m16;
+
+            case 4: /* FISUB m16int */ /* ST(0) -= m16int */
+               DIP("fisubw %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_fop_m16;
+
+            case 5: /* FISUBR m16int */ /* ST(0) = m16int - ST(0) */
+               DIP("fisubrw %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_foprev_m16;
+
+            case 6: /* FIDIV m16int */ /* ST(0) /= m16int */
+               DIP("fisubw %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_fop_m16;
+
+            case 7: /* FIDIVR m16int */ /* ST(0) = m16int / ST(0) */
+               DIP("fidivrw %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_foprev_m16;
+
+            do_fop_m16:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0),
+                        unop(Iop_I32StoF64,
+                             unop(Iop_16Sto32, 
+                                  loadLE(Ity_I16, mkexpr(addr))))));
+               break;
+
+            do_foprev_m16:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        unop(Iop_I32StoF64,
+                             unop(Iop_16Sto32, 
+                                  loadLE(Ity_I16, mkexpr(addr)))),
+                        get_ST(0)));
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xDE\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADDP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "add", Iop_AddF64, 0, modrm - 0xC0, True );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMULP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "mul", Iop_MulF64, 0, modrm - 0xC8, True );
+               break;
+
+            case 0xD9: /* FCOMPP %st(0),%st(1) */
+               DIP("fcompp %%st(0),%%st(1)\n");
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   unop(Iop_32Uto64,
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(1)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   )));
+               fp_pop();
+               fp_pop();
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUBRP %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "subr", Iop_SubF64, 0,  modrm - 0xE0, True );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUBP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "sub", Iop_SubF64, 0,  modrm - 0xE8, True );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIVRP %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "divr", Iop_DivF64, 0, modrm - 0xF0, True );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIVP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "div", Iop_DivF64, 0, modrm - 0xF8, True );
+               break;
+
+            default: 
+               goto decode_fail;
+         }
+
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDF opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDF) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+         delta += len;
+
+         switch (gregLO3ofRM(modrm)) {
+
+            case 0: /* FILD m16int */
+               DIP("fildw %s\n", dis_buf);
+               fp_push();
+               put_ST(0, unop(Iop_I32StoF64,
+                              unop(Iop_16Sto32,
+                                   loadLE(Ity_I16, mkexpr(addr)))));
+               break;
+
+            case 1: /* FISTTPS m16 (SSE3) */
+               DIP("fisttps %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        x87ishly_qnarrow_32_to_16( 
+                        binop(Iop_F64toI32S, mkU32(Irrm_ZERO), get_ST(0)) ));
+               fp_pop();
+               break;
+
+            case 2: /* FIST m16 */
+               DIP("fists %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        x87ishly_qnarrow_32_to_16(
+                        binop(Iop_F64toI32S, get_roundingmode(), get_ST(0)) ));
+               break;
+
+            case 3: /* FISTP m16 */
+               DIP("fistps %s\n", dis_buf);
+               storeLE( mkexpr(addr),
+                        x87ishly_qnarrow_32_to_16( 
+                        binop(Iop_F64toI32S, get_roundingmode(), get_ST(0)) ));
+               fp_pop();
+               break;
+
+            case 5: /* FILD m64 */
+               DIP("fildll %s\n", dis_buf);
+               fp_push();
+               put_ST(0, binop(Iop_I64StoF64,
+                               get_roundingmode(),
+                               loadLE(Ity_I64, mkexpr(addr))));
+               break;
+
+            case 7: /* FISTP m64 */
+               DIP("fistpll %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI64S, get_roundingmode(), get_ST(0)) );
+               fp_pop();
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregLO3ofRM(modrm));
+               vex_printf("first_opcode == 0xDF\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0: /* FFREEP %st(0) */
+               DIP("ffreep %%st(%d)\n", 0);
+               put_ST_TAG ( 0, mkU8(0) );
+               fp_pop();
+               break;
+
+            case 0xE0: /* FNSTSW %ax */
+               DIP("fnstsw %%ax\n");
+               /* Invent a plausible-looking FPU status word value and
+                  dump it in %AX:
+                     ((ftop & 7) << 11) | (c3210 & 0x4700)
+               */
+               putIRegRAX(
+                  2,
+                  unop(Iop_32to16,
+                       binop(Iop_Or32,
+                             binop(Iop_Shl32, 
+                                   binop(Iop_And32, get_ftop(), mkU32(7)), 
+                                   mkU8(11)),
+                             binop(Iop_And32, 
+                                   unop(Iop_64to32, get_C3210()), 
+                                   mkU32(0x4700))
+               )));
+               break;
+
+            case 0xE8 ... 0xEF: /* FUCOMIP %st(0),%st(?) */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xE8, True );
+               break;
+
+            case 0xF0 ... 0xF7: /* FCOMIP %st(0),%st(?) */
+               /* not really right since COMIP != UCOMIP */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xF0, True );
+               break;
+
+            default: 
+               goto decode_fail;
+         }
+      }
+
+   }
+
+   else
+      goto decode_fail;
+
+   *decode_ok = True;
+   return delta;
+
+  decode_fail:
+   *decode_ok = False;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- MMX INSTRUCTIONS                                     ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/* Effect of MMX insns on x87 FPU state (table 11-2 of 
+   IA32 arch manual, volume 3):
+
+   Read from, or write to MMX register (viz, any insn except EMMS):
+   * All tags set to Valid (non-empty) -- FPTAGS[i] := nonzero
+   * FP stack pointer set to zero
+
+   EMMS:
+   * All tags set to Invalid (empty) -- FPTAGS[i] := zero
+   * FP stack pointer set to zero
+*/
+
+static void do_MMX_preamble ( void )
+{
+   Int         i;
+   IRRegArray* descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   IRExpr*     zero  = mkU32(0);
+   IRExpr*     tag1  = mkU8(1);
+   put_ftop(zero);
+   for (i = 0; i < 8; i++)
+      stmt( IRStmt_PutI( mkIRPutI(descr, zero, i, tag1) ) );
+}
+
+static void do_EMMS_preamble ( void )
+{
+   Int         i;
+   IRRegArray* descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   IRExpr*     zero  = mkU32(0);
+   IRExpr*     tag0  = mkU8(0);
+   put_ftop(zero);
+   for (i = 0; i < 8; i++)
+      stmt( IRStmt_PutI( mkIRPutI(descr, zero, i, tag0) ) );
+}
+
+
+static IRExpr* getMMXReg ( UInt archreg )
+{
+   vassert(archreg < 8);
+   return IRExpr_Get( OFFB_FPREGS + 8 * archreg, Ity_I64 );
+}
+
+
+static void putMMXReg ( UInt archreg, IRExpr* e )
+{
+   vassert(archreg < 8);
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I64);
+   stmt( IRStmt_Put( OFFB_FPREGS + 8 * archreg, e ) );
+}
+
+
+/* Helper for non-shift MMX insns.  Note this is incomplete in the
+   sense that it does not first call do_MMX_preamble() -- that is the
+   responsibility of its caller. */
+
+static 
+ULong dis_MMXop_regmem_to_reg ( const VexAbiInfo* vbi,
+                                Prefix      pfx,
+                                Long        delta,
+                                UChar       opc,
+                                const HChar* name,
+                                Bool        show_granularity )
+{
+   HChar   dis_buf[50];
+   UChar   modrm = getUChar(delta);
+   Bool    isReg = epartIsReg(modrm);
+   IRExpr* argL  = NULL;
+   IRExpr* argR  = NULL;
+   IRExpr* argG  = NULL;
+   IRExpr* argE  = NULL;
+   IRTemp  res   = newTemp(Ity_I64);
+
+   Bool    invG  = False;
+   IROp    op    = Iop_INVALID;
+   void*   hAddr = NULL;
+   const HChar*  hName = NULL;
+   Bool    eLeft = False;
+
+#  define XXX(_name) do { hAddr = &_name; hName = #_name; } while (0)
+
+   switch (opc) {
+      /* Original MMX ones */
+      case 0xFC: op = Iop_Add8x8; break;
+      case 0xFD: op = Iop_Add16x4; break;
+      case 0xFE: op = Iop_Add32x2; break;
+
+      case 0xEC: op = Iop_QAdd8Sx8; break;
+      case 0xED: op = Iop_QAdd16Sx4; break;
+
+      case 0xDC: op = Iop_QAdd8Ux8; break;
+      case 0xDD: op = Iop_QAdd16Ux4; break;
+
+      case 0xF8: op = Iop_Sub8x8;  break;
+      case 0xF9: op = Iop_Sub16x4; break;
+      case 0xFA: op = Iop_Sub32x2; break;
+
+      case 0xE8: op = Iop_QSub8Sx8; break;
+      case 0xE9: op = Iop_QSub16Sx4; break;
+
+      case 0xD8: op = Iop_QSub8Ux8; break;
+      case 0xD9: op = Iop_QSub16Ux4; break;
+
+      case 0xE5: op = Iop_MulHi16Sx4; break;
+      case 0xD5: op = Iop_Mul16x4; break;
+      case 0xF5: XXX(amd64g_calculate_mmx_pmaddwd); break;
+
+      case 0x74: op = Iop_CmpEQ8x8; break;
+      case 0x75: op = Iop_CmpEQ16x4; break;
+      case 0x76: op = Iop_CmpEQ32x2; break;
+
+      case 0x64: op = Iop_CmpGT8Sx8; break;
+      case 0x65: op = Iop_CmpGT16Sx4; break;
+      case 0x66: op = Iop_CmpGT32Sx2; break;
+
+      case 0x6B: op = Iop_QNarrowBin32Sto16Sx4; eLeft = True; break;
+      case 0x63: op = Iop_QNarrowBin16Sto8Sx8;  eLeft = True; break;
+      case 0x67: op = Iop_QNarrowBin16Sto8Ux8;  eLeft = True; break;
+
+      case 0x68: op = Iop_InterleaveHI8x8;  eLeft = True; break;
+      case 0x69: op = Iop_InterleaveHI16x4; eLeft = True; break;
+      case 0x6A: op = Iop_InterleaveHI32x2; eLeft = True; break;
+
+      case 0x60: op = Iop_InterleaveLO8x8;  eLeft = True; break;
+      case 0x61: op = Iop_InterleaveLO16x4; eLeft = True; break;
+      case 0x62: op = Iop_InterleaveLO32x2; eLeft = True; break;
+
+      case 0xDB: op = Iop_And64; break;
+      case 0xDF: op = Iop_And64; invG = True; break;
+      case 0xEB: op = Iop_Or64; break;
+      case 0xEF: /* Possibly do better here if argL and argR are the
+                    same reg */
+                 op = Iop_Xor64; break;
+
+      /* Introduced in SSE1 */
+      case 0xE0: op = Iop_Avg8Ux8;    break;
+      case 0xE3: op = Iop_Avg16Ux4;   break;
+      case 0xEE: op = Iop_Max16Sx4;   break;
+      case 0xDE: op = Iop_Max8Ux8;    break;
+      case 0xEA: op = Iop_Min16Sx4;   break;
+      case 0xDA: op = Iop_Min8Ux8;    break;
+      case 0xE4: op = Iop_MulHi16Ux4; break;
+      case 0xF6: XXX(amd64g_calculate_mmx_psadbw); break;
+
+      /* Introduced in SSE2 */
+      case 0xD4: op = Iop_Add64; break;
+      case 0xFB: op = Iop_Sub64; break;
+
+      default: 
+         vex_printf("\n0x%x\n", (Int)opc);
+         vpanic("dis_MMXop_regmem_to_reg");
+   }
+
+#  undef XXX
+
+   argG = getMMXReg(gregLO3ofRM(modrm));
+   if (invG)
+      argG = unop(Iop_Not64, argG);
+
+   if (isReg) {
+      delta++;
+      argE = getMMXReg(eregLO3ofRM(modrm));
+   } else {
+      Int    len;
+      IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+      delta += len;
+      argE = loadLE(Ity_I64, mkexpr(addr));
+   }
+
+   if (eLeft) {
+      argL = argE;
+      argR = argG;
+   } else {
+      argL = argG;
+      argR = argE;
+   }
+
+   if (op != Iop_INVALID) {
+      vassert(hName == NULL);
+      vassert(hAddr == NULL);
+      assign(res, binop(op, argL, argR));
+   } else {
+      vassert(hName != NULL);
+      vassert(hAddr != NULL);
+      assign( res, 
+              mkIRExprCCall(
+                 Ity_I64, 
+                 0/*regparms*/, hName, hAddr,
+                 mkIRExprVec_2( argL, argR )
+              ) 
+            );
+   }
+
+   putMMXReg( gregLO3ofRM(modrm), mkexpr(res) );
+
+   DIP("%s%s %s, %s\n", 
+       name, show_granularity ? nameMMXGran(opc & 3) : "",
+       ( isReg ? nameMMXReg(eregLO3ofRM(modrm)) : dis_buf ),
+       nameMMXReg(gregLO3ofRM(modrm)) );
+
+   return delta;
+}
+
+
+/* Vector by scalar shift of G by the amount specified at the bottom
+   of E.  This is a straight copy of dis_SSE_shiftG_byE. */
+
+static ULong dis_MMX_shiftG_byE ( const VexAbiInfo* vbi,
+                                  Prefix pfx, Long delta, 
+                                  const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen, size;
+   IRTemp  addr;
+   Bool    shl, shr, sar;
+   UChar   rm   = getUChar(delta);
+   IRTemp  g0   = newTemp(Ity_I64);
+   IRTemp  g1   = newTemp(Ity_I64);
+   IRTemp  amt  = newTemp(Ity_I64);
+   IRTemp  amt8 = newTemp(Ity_I8);
+
+   if (epartIsReg(rm)) {
+      assign( amt, getMMXReg(eregLO3ofRM(rm)) );
+      DIP("%s %s,%s\n", opname,
+                        nameMMXReg(eregLO3ofRM(rm)),
+                        nameMMXReg(gregLO3ofRM(rm)) );
+      delta++;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( amt, loadLE(Ity_I64, mkexpr(addr)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameMMXReg(gregLO3ofRM(rm)) );
+      delta += alen;
+   }
+   assign( g0,   getMMXReg(gregLO3ofRM(rm)) );
+   assign( amt8, unop(Iop_64to8, mkexpr(amt)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x4: shl = True; size = 32; break;
+      case Iop_ShlN32x2: shl = True; size = 32; break;
+      case Iop_Shl64:    shl = True; size = 64; break;
+      case Iop_ShrN16x4: shr = True; size = 16; break;
+      case Iop_ShrN32x2: shr = True; size = 32; break;
+      case Iop_Shr64:    shr = True; size = 64; break;
+      case Iop_SarN16x4: sar = True; size = 16; break;
+      case Iop_SarN32x2: sar = True; size = 32; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           mkU64(0)
+        )
+     );
+   } else 
+   if (sar) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U,mkexpr(amt),mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           binop(op, mkexpr(g0), mkU8(size-1))
+        )
+     );
+   } else {
+      vassert(0);
+   }
+
+   putMMXReg( gregLO3ofRM(rm), mkexpr(g1) );
+   return delta;
+}
+
+
+/* Vector by scalar shift of E by an immediate byte.  This is a
+   straight copy of dis_SSE_shiftE_imm. */
+
+static 
+ULong dis_MMX_shiftE_imm ( Long delta, const HChar* opname, IROp op )
+{
+   Bool    shl, shr, sar;
+   UChar   rm   = getUChar(delta);
+   IRTemp  e0   = newTemp(Ity_I64);
+   IRTemp  e1   = newTemp(Ity_I64);
+   UChar   amt, size;
+   vassert(epartIsReg(rm));
+   vassert(gregLO3ofRM(rm) == 2 
+           || gregLO3ofRM(rm) == 4 || gregLO3ofRM(rm) == 6);
+   amt = getUChar(delta+1);
+   delta += 2;
+   DIP("%s $%d,%s\n", opname,
+                      (Int)amt,
+                      nameMMXReg(eregLO3ofRM(rm)) );
+
+   assign( e0, getMMXReg(eregLO3ofRM(rm)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x4: shl = True; size = 16; break;
+      case Iop_ShlN32x2: shl = True; size = 32; break;
+      case Iop_Shl64:    shl = True; size = 64; break;
+      case Iop_SarN16x4: sar = True; size = 16; break;
+      case Iop_SarN32x2: sar = True; size = 32; break;
+      case Iop_ShrN16x4: shr = True; size = 16; break;
+      case Iop_ShrN32x2: shr = True; size = 32; break;
+      case Iop_Shr64:    shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( e1, amt >= size 
+                    ? mkU64(0)
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else 
+   if (sar) {
+     assign( e1, amt >= size 
+                    ? binop(op, mkexpr(e0), mkU8(size-1))
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else {
+      vassert(0);
+   }
+
+   putMMXReg( eregLO3ofRM(rm), mkexpr(e1) );
+   return delta;
+}
+
+
+/* Completely handle all MMX instructions except emms. */
+
+static
+ULong dis_MMX ( Bool* decode_ok,
+                const VexAbiInfo* vbi, Prefix pfx, Int sz, Long delta )
+{
+   Int   len;
+   UChar modrm;
+   HChar dis_buf[50];
+   UChar opc = getUChar(delta);
+   delta++;
+
+   /* dis_MMX handles all insns except emms. */
+   do_MMX_preamble();
+
+   switch (opc) {
+
+      case 0x6E: 
+         if (sz == 4) {
+            /* MOVD (src)ireg32-or-mem32 (E), (dst)mmxreg (G)*/
+            modrm = getUChar(delta);
+            if (epartIsReg(modrm)) {
+               delta++;
+               putMMXReg(
+                  gregLO3ofRM(modrm),
+                  binop( Iop_32HLto64,
+                         mkU32(0),
+                         getIReg32(eregOfRexRM(pfx,modrm)) ) );
+               DIP("movd %s, %s\n", 
+                   nameIReg32(eregOfRexRM(pfx,modrm)), 
+                   nameMMXReg(gregLO3ofRM(modrm)));
+            } else {
+               IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+               delta += len;
+               putMMXReg(
+                  gregLO3ofRM(modrm),
+                  binop( Iop_32HLto64,
+                         mkU32(0),
+                         loadLE(Ity_I32, mkexpr(addr)) ) );
+               DIP("movd %s, %s\n", dis_buf, nameMMXReg(gregLO3ofRM(modrm)));
+            }
+         } 
+         else
+         if (sz == 8) {
+            /* MOVD (src)ireg64-or-mem64 (E), (dst)mmxreg (G)*/
+            modrm = getUChar(delta);
+            if (epartIsReg(modrm)) {
+               delta++;
+               putMMXReg( gregLO3ofRM(modrm),
+                          getIReg64(eregOfRexRM(pfx,modrm)) );
+               DIP("movd %s, %s\n", 
+                   nameIReg64(eregOfRexRM(pfx,modrm)), 
+                   nameMMXReg(gregLO3ofRM(modrm)));
+            } else {
+               IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+               delta += len;
+               putMMXReg( gregLO3ofRM(modrm),
+                          loadLE(Ity_I64, mkexpr(addr)) );
+               DIP("movd{64} %s, %s\n", dis_buf, nameMMXReg(gregLO3ofRM(modrm)));
+            }
+         }
+         else {
+            goto mmx_decode_failure;
+         }
+         break;
+
+      case 0x7E:
+         if (sz == 4) {
+            /* MOVD (src)mmxreg (G), (dst)ireg32-or-mem32 (E) */
+            modrm = getUChar(delta);
+            if (epartIsReg(modrm)) {
+               delta++;
+               putIReg32( eregOfRexRM(pfx,modrm),
+                          unop(Iop_64to32, getMMXReg(gregLO3ofRM(modrm)) ) );
+               DIP("movd %s, %s\n", 
+                   nameMMXReg(gregLO3ofRM(modrm)), 
+                   nameIReg32(eregOfRexRM(pfx,modrm)));
+            } else {
+               IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+               delta += len;
+               storeLE( mkexpr(addr),
+                        unop(Iop_64to32, getMMXReg(gregLO3ofRM(modrm)) ) );
+               DIP("movd %s, %s\n", nameMMXReg(gregLO3ofRM(modrm)), dis_buf);
+            }
+         }
+         else
+         if (sz == 8) {
+            /* MOVD (src)mmxreg (G), (dst)ireg64-or-mem64 (E) */
+            modrm = getUChar(delta);
+            if (epartIsReg(modrm)) {
+               delta++;
+               putIReg64( eregOfRexRM(pfx,modrm),
+                          getMMXReg(gregLO3ofRM(modrm)) );
+               DIP("movd %s, %s\n", 
+                   nameMMXReg(gregLO3ofRM(modrm)), 
+                   nameIReg64(eregOfRexRM(pfx,modrm)));
+            } else {
+               IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+               delta += len;
+               storeLE( mkexpr(addr),
+                       getMMXReg(gregLO3ofRM(modrm)) );
+               DIP("movd{64} %s, %s\n", nameMMXReg(gregLO3ofRM(modrm)), dis_buf);
+            }
+         } else {
+            goto mmx_decode_failure;
+         }
+         break;
+
+      case 0x6F:
+         /* MOVQ (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4
+             && /*ignore redundant REX.W*/!(sz==8 && haveNo66noF2noF3(pfx))) 
+            goto mmx_decode_failure;
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putMMXReg( gregLO3ofRM(modrm), getMMXReg(eregLO3ofRM(modrm)) );
+            DIP("movq %s, %s\n", 
+                nameMMXReg(eregLO3ofRM(modrm)), 
+                nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+            delta += len;
+            putMMXReg( gregLO3ofRM(modrm), loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movq %s, %s\n", 
+                dis_buf, nameMMXReg(gregLO3ofRM(modrm)));
+         }
+         break;
+
+      case 0x7F:
+         /* MOVQ (src)mmxreg, (dst)mmxreg-or-mem */
+         if (sz != 4
+             && /*ignore redundant REX.W*/!(sz==8 && haveNo66noF2noF3(pfx)))
+            goto mmx_decode_failure;
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putMMXReg( eregLO3ofRM(modrm), getMMXReg(gregLO3ofRM(modrm)) );
+            DIP("movq %s, %s\n",
+                nameMMXReg(gregLO3ofRM(modrm)),
+                nameMMXReg(eregLO3ofRM(modrm)));
+         } else {
+            IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+            delta += len;
+            storeLE( mkexpr(addr), getMMXReg(gregLO3ofRM(modrm)) );
+            DIP("mov(nt)q %s, %s\n", 
+                nameMMXReg(gregLO3ofRM(modrm)), dis_buf);
+         }
+         break;
+
+      case 0xFC: 
+      case 0xFD: 
+      case 0xFE: /* PADDgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "padd", True );
+         break;
+
+      case 0xEC: 
+      case 0xED: /* PADDSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4
+             && /*ignore redundant REX.W*/!(sz==8 && haveNo66noF2noF3(pfx)))
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "padds", True );
+         break;
+
+      case 0xDC: 
+      case 0xDD: /* PADDUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "paddus", True );
+         break;
+
+      case 0xF8: 
+      case 0xF9: 
+      case 0xFA: /* PSUBgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "psub", True );
+         break;
+
+      case 0xE8: 
+      case 0xE9: /* PSUBSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "psubs", True );
+         break;
+
+      case 0xD8: 
+      case 0xD9: /* PSUBUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "psubus", True );
+         break;
+
+      case 0xE5: /* PMULHW (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pmulhw", False );
+         break;
+
+      case 0xD5: /* PMULLW (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pmullw", False );
+         break;
+
+      case 0xF5: /* PMADDWD (src)mmxreg-or-mem, (dst)mmxreg */
+         vassert(sz == 4);
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pmaddwd", False );
+         break;
+
+      case 0x74: 
+      case 0x75: 
+      case 0x76: /* PCMPEQgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pcmpeq", True );
+         break;
+
+      case 0x64: 
+      case 0x65: 
+      case 0x66: /* PCMPGTgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pcmpgt", True );
+         break;
+
+      case 0x6B: /* PACKSSDW (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "packssdw", False );
+         break;
+
+      case 0x63: /* PACKSSWB (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "packsswb", False );
+         break;
+
+      case 0x67: /* PACKUSWB (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "packuswb", False );
+         break;
+
+      case 0x68: 
+      case 0x69: 
+      case 0x6A: /* PUNPCKHgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4
+             && /*ignore redundant REX.W*/!(sz==8 && haveNo66noF2noF3(pfx))) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "punpckh", True );
+         break;
+
+      case 0x60: 
+      case 0x61: 
+      case 0x62: /* PUNPCKLgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4
+             && /*ignore redundant REX.W*/!(sz==8 && haveNo66noF2noF3(pfx))) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "punpckl", True );
+         break;
+
+      case 0xDB: /* PAND (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pand", False );
+         break;
+
+      case 0xDF: /* PANDN (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pandn", False );
+         break;
+
+      case 0xEB: /* POR (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "por", False );
+         break;
+
+      case 0xEF: /* PXOR (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( vbi, pfx, delta, opc, "pxor", False );
+         break; 
+
+#     define SHIFT_BY_REG(_name,_op)                                     \
+                delta = dis_MMX_shiftG_byE(vbi, pfx, delta, _name, _op); \
+                break;
+
+      /* PSLLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xF1: SHIFT_BY_REG("psllw", Iop_ShlN16x4);
+      case 0xF2: SHIFT_BY_REG("pslld", Iop_ShlN32x2);
+      case 0xF3: SHIFT_BY_REG("psllq", Iop_Shl64);
+
+      /* PSRLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xD1: SHIFT_BY_REG("psrlw", Iop_ShrN16x4);
+      case 0xD2: SHIFT_BY_REG("psrld", Iop_ShrN32x2);
+      case 0xD3: SHIFT_BY_REG("psrlq", Iop_Shr64);
+
+      /* PSRAgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xE1: SHIFT_BY_REG("psraw", Iop_SarN16x4);
+      case 0xE2: SHIFT_BY_REG("psrad", Iop_SarN32x2);
+
+#     undef SHIFT_BY_REG
+
+      case 0x71: 
+      case 0x72: 
+      case 0x73: {
+         /* (sz==4): PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
+         UChar byte2, subopc;
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         byte2  = getUChar(delta);      /* amode / sub-opcode */
+         subopc = toUChar( (byte2 >> 3) & 7 );
+
+#        define SHIFT_BY_IMM(_name,_op)                        \
+            do { delta = dis_MMX_shiftE_imm(delta,_name,_op);  \
+            } while (0)
+
+              if (subopc == 2 /*SRL*/ && opc == 0x71) 
+                  SHIFT_BY_IMM("psrlw", Iop_ShrN16x4);
+         else if (subopc == 2 /*SRL*/ && opc == 0x72) 
+                 SHIFT_BY_IMM("psrld", Iop_ShrN32x2);
+         else if (subopc == 2 /*SRL*/ && opc == 0x73) 
+                 SHIFT_BY_IMM("psrlq", Iop_Shr64);
+
+         else if (subopc == 4 /*SAR*/ && opc == 0x71) 
+                 SHIFT_BY_IMM("psraw", Iop_SarN16x4);
+         else if (subopc == 4 /*SAR*/ && opc == 0x72) 
+                 SHIFT_BY_IMM("psrad", Iop_SarN32x2);
+
+         else if (subopc == 6 /*SHL*/ && opc == 0x71) 
+                 SHIFT_BY_IMM("psllw", Iop_ShlN16x4);
+         else if (subopc == 6 /*SHL*/ && opc == 0x72) 
+                  SHIFT_BY_IMM("pslld", Iop_ShlN32x2);
+         else if (subopc == 6 /*SHL*/ && opc == 0x73) 
+                 SHIFT_BY_IMM("psllq", Iop_Shl64);
+
+         else goto mmx_decode_failure;
+
+#        undef SHIFT_BY_IMM
+         break;
+      }
+
+      case 0xF7: {
+         IRTemp addr    = newTemp(Ity_I64);
+         IRTemp regD    = newTemp(Ity_I64);
+         IRTemp regM    = newTemp(Ity_I64);
+         IRTemp mask    = newTemp(Ity_I64);
+         IRTemp olddata = newTemp(Ity_I64);
+         IRTemp newdata = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+         if (sz != 4 || (!epartIsReg(modrm)))
+            goto mmx_decode_failure;
+         delta++;
+
+         assign( addr, handleAddrOverrides( vbi, pfx, getIReg64(R_RDI) ));
+         assign( regM, getMMXReg( eregLO3ofRM(modrm) ));
+         assign( regD, getMMXReg( gregLO3ofRM(modrm) ));
+         assign( mask, binop(Iop_SarN8x8, mkexpr(regM), mkU8(7)) );
+         assign( olddata, loadLE( Ity_I64, mkexpr(addr) ));
+         assign( newdata, 
+                 binop(Iop_Or64, 
+                       binop(Iop_And64, 
+                             mkexpr(regD), 
+                             mkexpr(mask) ),
+                       binop(Iop_And64, 
+                             mkexpr(olddata),
+                             unop(Iop_Not64, mkexpr(mask)))) );
+         storeLE( mkexpr(addr), mkexpr(newdata) );
+         DIP("maskmovq %s,%s\n", nameMMXReg( eregLO3ofRM(modrm) ),
+                                 nameMMXReg( gregLO3ofRM(modrm) ) );
+         break;
+      }
+
+      /* --- MMX decode failure --- */
+      default:
+      mmx_decode_failure:
+         *decode_ok = False;
+         return delta; /* ignored */
+
+   }
+
+   *decode_ok = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- More misc arithmetic and other obscure insns.        ---*/
+/*------------------------------------------------------------*/
+
+/* Generate base << amt with vacated places filled with stuff
+   from xtra.  amt guaranteed in 0 .. 63. */
+static 
+IRExpr* shiftL64_with_extras ( IRTemp base, IRTemp xtra, IRTemp amt )
+{
+   /* if   amt == 0 
+      then base
+      else (base << amt) | (xtra >>u (64-amt))
+   */
+   return
+      IRExpr_ITE( 
+         binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)),
+         binop(Iop_Or64, 
+               binop(Iop_Shl64, mkexpr(base), mkexpr(amt)),
+               binop(Iop_Shr64, mkexpr(xtra), 
+                                binop(Iop_Sub8, mkU8(64), mkexpr(amt)))
+               ),
+         mkexpr(base)
+      );
+}
+
+/* Generate base >>u amt with vacated places filled with stuff
+   from xtra.  amt guaranteed in 0 .. 63. */
+static 
+IRExpr* shiftR64_with_extras ( IRTemp xtra, IRTemp base, IRTemp amt )
+{
+   /* if   amt == 0 
+      then base
+      else (base >>u amt) | (xtra << (64-amt))
+   */
+   return
+      IRExpr_ITE( 
+         binop(Iop_CmpNE8, mkexpr(amt), mkU8(0)),
+         binop(Iop_Or64, 
+               binop(Iop_Shr64, mkexpr(base), mkexpr(amt)),
+               binop(Iop_Shl64, mkexpr(xtra), 
+                                binop(Iop_Sub8, mkU8(64), mkexpr(amt)))
+               ),
+         mkexpr(base)
+      );
+}
+
+/* Double length left and right shifts.  Apparently only required in
+   v-size (no b- variant). */
+static
+ULong dis_SHLRD_Gv_Ev ( const VexAbiInfo* vbi,
+                        Prefix pfx,
+                        Long delta, UChar modrm,
+                        Int sz,
+                        IRExpr* shift_amt,
+                        Bool amt_is_literal,
+                        const HChar* shift_amt_txt,
+                        Bool left_shift )
+{
+   /* shift_amt :: Ity_I8 is the amount to shift.  shift_amt_txt is used
+      for printing it.   And eip on entry points at the modrm byte. */
+   Int len;
+   HChar dis_buf[50];
+
+   IRType ty     = szToITy(sz);
+   IRTemp gsrc   = newTemp(ty);
+   IRTemp esrc   = newTemp(ty);
+   IRTemp addr   = IRTemp_INVALID;
+   IRTemp tmpSH  = newTemp(Ity_I8);
+   IRTemp tmpSS  = newTemp(Ity_I8);
+   IRTemp tmp64  = IRTemp_INVALID;
+   IRTemp res64  = IRTemp_INVALID;
+   IRTemp rss64  = IRTemp_INVALID;
+   IRTemp resTy  = IRTemp_INVALID;
+   IRTemp rssTy  = IRTemp_INVALID;
+   Int    mask   = sz==8 ? 63 : 31;
+
+   vassert(sz == 2 || sz == 4 || sz == 8);
+
+   /* The E-part is the destination; this is shifted.  The G-part
+      supplies bits to be shifted into the E-part, but is not
+      changed.  
+
+      If shifting left, form a double-length word with E at the top
+      and G at the bottom, and shift this left.  The result is then in
+      the high part.
+
+      If shifting right, form a double-length word with G at the top
+      and E at the bottom, and shift this right.  The result is then
+      at the bottom.  */
+
+   /* Fetch the operands. */
+
+   assign( gsrc, getIRegG(sz, pfx, modrm) );
+
+   if (epartIsReg(modrm)) {
+      delta++;
+      assign( esrc, getIRegE(sz, pfx, modrm) );
+      DIP("sh%cd%c %s, %s, %s\n",
+          ( left_shift ? 'l' : 'r' ), nameISize(sz), 
+          shift_amt_txt,
+          nameIRegG(sz, pfx, modrm), nameIRegE(sz, pfx, modrm));
+   } else {
+      addr = disAMode ( &len, vbi, pfx, delta, dis_buf, 
+                        /* # bytes following amode */
+                        amt_is_literal ? 1 : 0 );
+      delta += len;
+      assign( esrc, loadLE(ty, mkexpr(addr)) );
+      DIP("sh%cd%c %s, %s, %s\n", 
+          ( left_shift ? 'l' : 'r' ), nameISize(sz), 
+          shift_amt_txt,
+          nameIRegG(sz, pfx, modrm), dis_buf);
+   }
+
+   /* Calculate the masked shift amount (tmpSH), the masked subshift
+      amount (tmpSS), the shifted value (res64) and the subshifted
+      value (rss64). */
+
+   assign( tmpSH, binop(Iop_And8, shift_amt, mkU8(mask)) );
+   assign( tmpSS, binop(Iop_And8, 
+                        binop(Iop_Sub8, mkexpr(tmpSH), mkU8(1) ),
+                        mkU8(mask)));
+
+   tmp64 = newTemp(Ity_I64);
+   res64 = newTemp(Ity_I64);
+   rss64 = newTemp(Ity_I64);
+
+   if (sz == 2 || sz == 4) {
+
+      /* G is xtra; E is data */
+      /* what a freaking nightmare: */
+      if (sz == 4 && left_shift) {
+         assign( tmp64, binop(Iop_32HLto64, mkexpr(esrc), mkexpr(gsrc)) );
+         assign( res64, 
+                 binop(Iop_Shr64, 
+                       binop(Iop_Shl64, mkexpr(tmp64), mkexpr(tmpSH)),
+                       mkU8(32)) );
+         assign( rss64, 
+                 binop(Iop_Shr64, 
+                       binop(Iop_Shl64, mkexpr(tmp64), mkexpr(tmpSS)),
+                       mkU8(32)) );
+      }
+      else
+      if (sz == 4 && !left_shift) {
+         assign( tmp64, binop(Iop_32HLto64, mkexpr(gsrc), mkexpr(esrc)) );
+         assign( res64, binop(Iop_Shr64, mkexpr(tmp64), mkexpr(tmpSH)) );
+         assign( rss64, binop(Iop_Shr64, mkexpr(tmp64), mkexpr(tmpSS)) );
+      }
+      else
+      if (sz == 2 && left_shift) {
+         assign( tmp64,
+                 binop(Iop_32HLto64,
+                       binop(Iop_16HLto32, mkexpr(esrc), mkexpr(gsrc)),
+                       binop(Iop_16HLto32, mkexpr(gsrc), mkexpr(gsrc))
+         ));
+         /* result formed by shifting [esrc'gsrc'gsrc'gsrc] */
+         assign( res64, 
+                 binop(Iop_Shr64, 
+                       binop(Iop_Shl64, mkexpr(tmp64), mkexpr(tmpSH)),
+                       mkU8(48)) );
+         /* subshift formed by shifting [esrc'0000'0000'0000] */
+         assign( rss64, 
+                 binop(Iop_Shr64, 
+                       binop(Iop_Shl64, 
+                             binop(Iop_Shl64, unop(Iop_16Uto64, mkexpr(esrc)),
+                                              mkU8(48)),
+                             mkexpr(tmpSS)),
+                       mkU8(48)) );
+      }
+      else
+      if (sz == 2 && !left_shift) {
+         assign( tmp64,
+                 binop(Iop_32HLto64,
+                       binop(Iop_16HLto32, mkexpr(gsrc), mkexpr(gsrc)),
+                       binop(Iop_16HLto32, mkexpr(gsrc), mkexpr(esrc))
+         ));
+         /* result formed by shifting [gsrc'gsrc'gsrc'esrc] */
+         assign( res64, binop(Iop_Shr64, mkexpr(tmp64), mkexpr(tmpSH)) );
+         /* subshift formed by shifting [0000'0000'0000'esrc] */
+         assign( rss64, binop(Iop_Shr64, 
+                              unop(Iop_16Uto64, mkexpr(esrc)), 
+                              mkexpr(tmpSS)) );
+      }
+
+   } else {
+
+      vassert(sz == 8);
+      if (left_shift) {
+         assign( res64, shiftL64_with_extras( esrc, gsrc, tmpSH ));
+         assign( rss64, shiftL64_with_extras( esrc, gsrc, tmpSS ));
+      } else {
+         assign( res64, shiftR64_with_extras( gsrc, esrc, tmpSH ));
+         assign( rss64, shiftR64_with_extras( gsrc, esrc, tmpSS ));
+      }
+
+   }
+
+   resTy = newTemp(ty);
+   rssTy = newTemp(ty);
+   assign( resTy, narrowTo(ty, mkexpr(res64)) );
+   assign( rssTy, narrowTo(ty, mkexpr(rss64)) );
+
+   /* Put result back and write the flags thunk. */
+   setFlags_DEP1_DEP2_shift ( left_shift ? Iop_Shl64 : Iop_Sar64,
+                              resTy, rssTy, ty, tmpSH );
+
+   if (epartIsReg(modrm)) {
+      putIRegE(sz, pfx, modrm, mkexpr(resTy));
+   } else {
+      storeLE( mkexpr(addr), mkexpr(resTy) );
+   }
+
+   if (amt_is_literal) delta++;
+   return delta;
+}
+
+
+/* Handle BT/BTS/BTR/BTC Gv, Ev.  Apparently b-size is not
+   required. */
+
+typedef enum { BtOpNone, BtOpSet, BtOpReset, BtOpComp } BtOp;
+
+static const HChar* nameBtOp ( BtOp op )
+{
+   switch (op) {
+      case BtOpNone:  return "";
+      case BtOpSet:   return "s";
+      case BtOpReset: return "r";
+      case BtOpComp:  return "c";
+      default: vpanic("nameBtOp(amd64)");
+   }
+}
+
+
+static
+ULong dis_bt_G_E ( const VexAbiInfo* vbi,
+                   Prefix pfx, Int sz, Long delta, BtOp op,
+                   /*OUT*/Bool* decode_OK )
+{
+   HChar  dis_buf[50];
+   UChar  modrm;
+   Int    len;
+   IRTemp t_fetched, t_bitno0, t_bitno1, t_bitno2, t_addr0, 
+          t_addr1, t_rsp, t_mask, t_new;
+
+   vassert(sz == 2 || sz == 4 || sz == 8);
+
+   t_fetched = t_bitno0 = t_bitno1 = t_bitno2 
+             = t_addr0 = t_addr1 = t_rsp
+             = t_mask = t_new = IRTemp_INVALID;
+
+   t_fetched = newTemp(Ity_I8);
+   t_new     = newTemp(Ity_I8);
+   t_bitno0  = newTemp(Ity_I64);
+   t_bitno1  = newTemp(Ity_I64);
+   t_bitno2  = newTemp(Ity_I8);
+   t_addr1   = newTemp(Ity_I64);
+   modrm     = getUChar(delta);
+
+   *decode_OK = True;
+   if (epartIsReg(modrm)) {
+      /* F2 and F3 are never acceptable. */
+      if (haveF2orF3(pfx)) {
+         *decode_OK = False;
+         return delta;
+      }
+   } else {
+      /* F2 or F3 (but not both) are allowed, provided LOCK is also
+         present, and only for the BTC/BTS/BTR cases (not BT). */
+      if (haveF2orF3(pfx)) {
+         if (haveF2andF3(pfx) || !haveLOCK(pfx) || op == BtOpNone) {
+            *decode_OK = False;
+            return delta;
+         }
+      }
+   }
+
+   assign( t_bitno0, widenSto64(getIRegG(sz, pfx, modrm)) );
+   
+   if (epartIsReg(modrm)) {
+      delta++;
+      /* Get it onto the client's stack.  Oh, this is a horrible
+         kludge.  See https://bugs.kde.org/show_bug.cgi?id=245925.
+         Because of the ELF ABI stack redzone, there may be live data
+         up to 128 bytes below %RSP.  So we can't just push it on the
+         stack, else we may wind up trashing live data, and causing
+         impossible-to-find simulation errors.  (Yes, this did
+         happen.)  So we need to drop RSP before at least 128 before
+         pushing it.  That unfortunately means hitting Memcheck's
+         fast-case painting code.  Ideally we should drop more than
+         128, to reduce the chances of breaking buggy programs that
+         have live data below -128(%RSP).  Memcheck fast-cases moves
+         of 288 bytes due to the need to handle ppc64-linux quickly,
+         so let's use 288.  Of course the real fix is to get rid of
+         this kludge entirely.  */
+      t_rsp = newTemp(Ity_I64);
+      t_addr0 = newTemp(Ity_I64);
+
+      vassert(vbi->guest_stack_redzone_size == 128);
+      assign( t_rsp, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(288)) );
+      putIReg64(R_RSP, mkexpr(t_rsp));
+
+      storeLE( mkexpr(t_rsp), getIRegE(sz, pfx, modrm) );
+
+      /* Make t_addr0 point at it. */
+      assign( t_addr0, mkexpr(t_rsp) );
+
+      /* Mask out upper bits of the shift amount, since we're doing a
+         reg. */
+      assign( t_bitno1, binop(Iop_And64, 
+                              mkexpr(t_bitno0), 
+                              mkU64(sz == 8 ? 63 : sz == 4 ? 31 : 15)) );
+
+   } else {
+      t_addr0 = disAMode ( &len, vbi, pfx, delta, dis_buf, 0 );
+      delta += len;
+      assign( t_bitno1, mkexpr(t_bitno0) );
+   }
+  
+   /* At this point: t_addr0 is the address being operated on.  If it
+      was a reg, we will have pushed it onto the client's stack.
+      t_bitno1 is the bit number, suitably masked in the case of a
+      reg.  */
+  
+   /* Now the main sequence. */
+   assign( t_addr1, 
+           binop(Iop_Add64, 
+                 mkexpr(t_addr0), 
+                 binop(Iop_Sar64, mkexpr(t_bitno1), mkU8(3))) );
+
+   /* t_addr1 now holds effective address */
+
+   assign( t_bitno2, 
+           unop(Iop_64to8, 
+                binop(Iop_And64, mkexpr(t_bitno1), mkU64(7))) );
+
+   /* t_bitno2 contains offset of bit within byte */
+
+   if (op != BtOpNone) {
+      t_mask = newTemp(Ity_I8);
+      assign( t_mask, binop(Iop_Shl8, mkU8(1), mkexpr(t_bitno2)) );
+   }
+
+   /* t_mask is now a suitable byte mask */
+
+   assign( t_fetched, loadLE(Ity_I8, mkexpr(t_addr1)) );
+
+   if (op != BtOpNone) {
+      switch (op) {
+         case BtOpSet:
+            assign( t_new,
+                    binop(Iop_Or8, mkexpr(t_fetched), mkexpr(t_mask)) );
+            break;
+         case BtOpComp:
+            assign( t_new,
+                    binop(Iop_Xor8, mkexpr(t_fetched), mkexpr(t_mask)) );
+            break;
+         case BtOpReset:
+            assign( t_new,
+                    binop(Iop_And8, mkexpr(t_fetched), 
+                                    unop(Iop_Not8, mkexpr(t_mask))) );
+            break;
+         default: 
+            vpanic("dis_bt_G_E(amd64)");
+      }
+      if ((haveLOCK(pfx)) && !epartIsReg(modrm)) {
+         casLE( mkexpr(t_addr1), mkexpr(t_fetched)/*expd*/,
+                                 mkexpr(t_new)/*new*/,
+                                 guest_RIP_curr_instr );
+      } else {
+         storeLE( mkexpr(t_addr1), mkexpr(t_new) );
+      }
+   }
+  
+   /* Side effect done; now get selected bit into Carry flag */
+   /* Flags: C=selected bit, O,S,Z,A,P undefined, so are set to zero. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            binop(Iop_And64,
+                  binop(Iop_Shr64, 
+                        unop(Iop_8Uto64, mkexpr(t_fetched)),
+                        mkexpr(t_bitno2)),
+                  mkU64(1)))
+       );
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+
+   /* Move reg operand from stack back to reg */
+   if (epartIsReg(modrm)) {
+      /* t_rsp still points at it. */
+      /* only write the reg if actually modifying it; doing otherwise
+         zeroes the top half erroneously when doing btl due to
+         standard zero-extend rule */
+      if (op != BtOpNone)
+         putIRegE(sz, pfx, modrm, loadLE(szToITy(sz), mkexpr(t_rsp)) );
+      putIReg64(R_RSP, binop(Iop_Add64, mkexpr(t_rsp), mkU64(288)) );
+   }
+
+   DIP("bt%s%c %s, %s\n",
+       nameBtOp(op), nameISize(sz), nameIRegG(sz, pfx, modrm), 
+       ( epartIsReg(modrm) ? nameIRegE(sz, pfx, modrm) : dis_buf ) );
+ 
+   return delta;
+}
+
+
+
+/* Handle BSF/BSR.  Only v-size seems necessary. */
+static
+ULong dis_bs_E_G ( const VexAbiInfo* vbi,
+                   Prefix pfx, Int sz, Long delta, Bool fwds )
+{
+   Bool   isReg;
+   UChar  modrm;
+   HChar  dis_buf[50];
+
+   IRType ty    = szToITy(sz);
+   IRTemp src   = newTemp(ty);
+   IRTemp dst   = newTemp(ty);
+   IRTemp src64 = newTemp(Ity_I64);
+   IRTemp dst64 = newTemp(Ity_I64);
+   IRTemp srcB  = newTemp(Ity_I1);
+
+   vassert(sz == 8 || sz == 4 || sz == 2);
+
+   modrm = getUChar(delta);
+   isReg = epartIsReg(modrm);
+   if (isReg) {
+      delta++;
+      assign( src, getIRegE(sz, pfx, modrm) );
+   } else {
+      Int    len;
+      IRTemp addr = disAMode( &len, vbi, pfx, delta, dis_buf, 0 );
+      delta += len;
+      assign( src, loadLE(ty, mkexpr(addr)) );
+   }
+
+   DIP("bs%c%c %s, %s\n",
+       fwds ? 'f' : 'r', nameISize(sz), 
+       ( isReg ? nameIRegE(sz, pfx, modrm) : dis_buf ), 
+       nameIRegG(sz, pfx, modrm));
+
+   /* First, widen src to 64 bits if it is not already. */
+   assign( src64, widenUto64(mkexpr(src)) );
+
+   /* Generate a bool expression which is zero iff the original is
+      zero, and nonzero otherwise.  Ask for a CmpNE version which, if
+      instrumented by Memcheck, is instrumented expensively, since
+      this may be used on the output of a preceding movmskb insn,
+      which has been known to be partially defined, and in need of
+      careful handling. */
+   assign( srcB, binop(Iop_ExpCmpNE64, mkexpr(src64), mkU64(0)) );
+
+   /* Flags: Z is 1 iff source value is zero.  All others 
+      are undefined -- we force them to zero. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            IRExpr_ITE( mkexpr(srcB),
+                        /* src!=0 */
+                        mkU64(0),
+                        /* src==0 */
+                        mkU64(AMD64G_CC_MASK_Z)
+                        )
+       ));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+
+   /* Result: iff source value is zero, we can't use
+      Iop_Clz64/Iop_Ctz64 as they have no defined result in that case.
+      But anyway, amd64 semantics say the result is undefined in
+      such situations.  Hence handle the zero case specially. */
+
+   /* Bleh.  What we compute:
+
+          bsf64:  if src == 0 then {dst is unchanged} 
+                              else Ctz64(src)
+
+          bsr64:  if src == 0 then {dst is unchanged} 
+                              else 63 - Clz64(src)
+
+          bsf32:  if src == 0 then {dst is unchanged} 
+                              else Ctz64(32Uto64(src))
+
+          bsr32:  if src == 0 then {dst is unchanged}
+                              else 63 - Clz64(32Uto64(src))
+
+          bsf16:  if src == 0 then {dst is unchanged} 
+                              else Ctz64(32Uto64(16Uto32(src)))
+
+          bsr16:  if src == 0 then {dst is unchanged} 
+                              else 63 - Clz64(32Uto64(16Uto32(src)))
+   */
+
+   /* The main computation, guarding against zero. */
+   assign( dst64,
+           IRExpr_ITE( 
+              mkexpr(srcB),
+              /* src != 0 */
+              fwds ? unop(Iop_Ctz64, mkexpr(src64))
+                   : binop(Iop_Sub64, 
+                           mkU64(63), 
+                           unop(Iop_Clz64, mkexpr(src64))),
+              /* src == 0 -- leave dst unchanged */
+              widenUto64( getIRegG( sz, pfx, modrm ) )
+           )
+         );
+
+   if (sz == 2)
+      assign( dst, unop(Iop_64to16, mkexpr(dst64)) );
+   else
+   if (sz == 4)
+      assign( dst, unop(Iop_64to32, mkexpr(dst64)) );
+   else
+      assign( dst, mkexpr(dst64) );
+
+   /* dump result back */
+   putIRegG( sz, pfx, modrm, mkexpr(dst) );
+
+   return delta;
+}
+
+
+/* swap rAX with the reg specified by reg and REX.B */
+static 
+void codegen_xchg_rAX_Reg ( Prefix pfx, Int sz, UInt regLo3 )
+{
+   IRType ty = szToITy(sz);
+   IRTemp t1 = newTemp(ty);
+   IRTemp t2 = newTemp(ty);
+   vassert(sz == 2 || sz == 4 || sz == 8);
+   vassert(regLo3 < 8);
+   if (sz == 8) {
+      assign( t1, getIReg64(R_RAX) );
+      assign( t2, getIRegRexB(8, pfx, regLo3) );
+      putIReg64( R_RAX, mkexpr(t2) );
+      putIRegRexB(8, pfx, regLo3, mkexpr(t1) );
+   } else if (sz == 4) {
+      assign( t1, getIReg32(R_RAX) );
+      assign( t2, getIRegRexB(4, pfx, regLo3) );
+      putIReg32( R_RAX, mkexpr(t2) );
+      putIRegRexB(4, pfx, regLo3, mkexpr(t1) );
+   } else {
+      assign( t1, getIReg16(R_RAX) );
+      assign( t2, getIRegRexB(2, pfx, regLo3) );
+      putIReg16( R_RAX, mkexpr(t2) );
+      putIRegRexB(2, pfx, regLo3, mkexpr(t1) );
+   }
+   DIP("xchg%c %s, %s\n", 
+       nameISize(sz), nameIRegRAX(sz), 
+                      nameIRegRexB(sz,pfx, regLo3));
+}
+
+
+static 
+void codegen_SAHF ( void )
+{
+   /* Set the flags to:
+      (amd64g_calculate_flags_all() & AMD64G_CC_MASK_O) 
+                                    -- retain the old O flag
+      | (%AH & (AMD64G_CC_MASK_S|AMD64G_CC_MASK_Z|AMD64G_CC_MASK_A
+                |AMD64G_CC_MASK_P|AMD64G_CC_MASK_C)
+   */
+   ULong  mask_SZACP = AMD64G_CC_MASK_S|AMD64G_CC_MASK_Z|AMD64G_CC_MASK_A
+                       |AMD64G_CC_MASK_C|AMD64G_CC_MASK_P;
+   IRTemp oldflags   = newTemp(Ity_I64);
+   assign( oldflags, mk_amd64g_calculate_rflags_all() );
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1,
+         binop(Iop_Or64,
+               binop(Iop_And64, mkexpr(oldflags), mkU64(AMD64G_CC_MASK_O)),
+               binop(Iop_And64, 
+                     binop(Iop_Shr64, getIReg64(R_RAX), mkU8(8)),
+                     mkU64(mask_SZACP))
+              )
+   ));
+}
+
+
+static 
+void codegen_LAHF ( void  )
+{
+   /* AH <- EFLAGS(SF:ZF:0:AF:0:PF:1:CF) */
+   IRExpr* rax_with_hole;
+   IRExpr* new_byte;
+   IRExpr* new_rax;
+   ULong   mask_SZACP = AMD64G_CC_MASK_S|AMD64G_CC_MASK_Z|AMD64G_CC_MASK_A
+                        |AMD64G_CC_MASK_C|AMD64G_CC_MASK_P;
+
+   IRTemp  flags = newTemp(Ity_I64);
+   assign( flags, mk_amd64g_calculate_rflags_all() );
+
+   rax_with_hole 
+      = binop(Iop_And64, getIReg64(R_RAX), mkU64(~0xFF00ULL));
+   new_byte 
+      = binop(Iop_Or64, binop(Iop_And64, mkexpr(flags), mkU64(mask_SZACP)),
+                        mkU64(1<<1));
+   new_rax 
+      = binop(Iop_Or64, rax_with_hole,
+                        binop(Iop_Shl64, new_byte, mkU8(8)));
+   putIReg64(R_RAX, new_rax);
+}
+
+
+static
+ULong dis_cmpxchg_G_E ( /*OUT*/Bool* ok,
+                        const VexAbiInfo*  vbi,
+                        Prefix       pfx,
+                        Int          size, 
+                        Long         delta0 )
+{
+   HChar dis_buf[50];
+   Int   len;
+
+   IRType ty    = szToITy(size);
+   IRTemp acc   = newTemp(ty);
+   IRTemp src   = newTemp(ty);
+   IRTemp dest  = newTemp(ty);
+   IRTemp dest2 = newTemp(ty);
+   IRTemp acc2  = newTemp(ty);
+   IRTemp cond  = newTemp(Ity_I1);
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  rm    = getUChar(delta0);
+
+   /* There are 3 cases to consider:
+
+      reg-reg: ignore any lock prefix, generate sequence based
+               on ITE
+
+      reg-mem, not locked: ignore any lock prefix, generate sequence
+                           based on ITE
+
+      reg-mem, locked: use IRCAS
+   */
+
+   /* Decide whether F2 or F3 are acceptable.  Never for register
+      case, but for the memory case, one or the other is OK provided
+      LOCK is also present. */
+   if (epartIsReg(rm)) {
+      if (haveF2orF3(pfx)) {
+         *ok = False;
+         return delta0;
+      }
+   } else {
+      if (haveF2orF3(pfx)) {
+         if (haveF2andF3(pfx) || !haveLOCK(pfx)) {
+            *ok = False;
+            return delta0;
+         }
+      }
+   }
+
+   if (epartIsReg(rm)) {
+      /* case 1 */
+      assign( dest, getIRegE(size, pfx, rm) );
+      delta0++;
+      assign( src, getIRegG(size, pfx, rm) );
+      assign( acc, getIRegRAX(size) );
+      setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
+      assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
+      assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+      assign( acc2,  IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
+      putIRegRAX(size, mkexpr(acc2));
+      putIRegE(size, pfx, rm, mkexpr(dest2));
+      DIP("cmpxchg%c %s,%s\n", nameISize(size),
+                               nameIRegG(size,pfx,rm),
+                               nameIRegE(size,pfx,rm) );
+   } 
+   else if (!epartIsReg(rm) && !haveLOCK(pfx)) {
+      /* case 2 */
+      addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      assign( dest, loadLE(ty, mkexpr(addr)) );
+      delta0 += len;
+      assign( src, getIRegG(size, pfx, rm) );
+      assign( acc, getIRegRAX(size) );
+      setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
+      assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
+      assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+      assign( acc2,  IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
+      putIRegRAX(size, mkexpr(acc2));
+      storeLE( mkexpr(addr), mkexpr(dest2) );
+      DIP("cmpxchg%c %s,%s\n", nameISize(size), 
+                               nameIRegG(size,pfx,rm), dis_buf);
+   }
+   else if (!epartIsReg(rm) && haveLOCK(pfx)) {
+      /* case 3 */
+      /* src is new value.  acc is expected value.  dest is old value.
+         Compute success from the output of the IRCAS, and steer the
+         new value for RAX accordingly: in case of success, RAX is
+         unchanged. */
+      addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      delta0 += len;
+      assign( src, getIRegG(size, pfx, rm) );
+      assign( acc, getIRegRAX(size) );
+      stmt( IRStmt_CAS( 
+         mkIRCAS( IRTemp_INVALID, dest, Iend_LE, mkexpr(addr), 
+                  NULL, mkexpr(acc), NULL, mkexpr(src) )
+      ));
+      setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
+      assign( cond, mk_amd64g_calculate_condition(AMD64CondZ) );
+      assign( acc2,  IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
+      putIRegRAX(size, mkexpr(acc2));
+      DIP("cmpxchg%c %s,%s\n", nameISize(size), 
+                               nameIRegG(size,pfx,rm), dis_buf);
+   }
+   else vassert(0);
+
+   *ok = True;
+   return delta0;
+}
+
+
+/* Handle conditional move instructions of the form
+      cmovcc E(reg-or-mem), G(reg)
+
+   E(src) is reg-or-mem
+   G(dst) is reg.
+
+   If E is reg, -->    GET %E, tmps
+                       GET %G, tmpd
+                       CMOVcc tmps, tmpd
+                       PUT tmpd, %G
+ 
+   If E is mem  -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmps
+                       GET %G, tmpd
+                       CMOVcc tmps, tmpd
+                       PUT tmpd, %G
+*/
+static
+ULong dis_cmov_E_G ( const VexAbiInfo* vbi,
+                     Prefix        pfx,
+                     Int           sz, 
+                     AMD64Condcode cond,
+                     Long          delta0 )
+{
+   UChar rm  = getUChar(delta0);
+   HChar dis_buf[50];
+   Int   len;
+
+   IRType ty   = szToITy(sz);
+   IRTemp tmps = newTemp(ty);
+   IRTemp tmpd = newTemp(ty);
+
+   if (epartIsReg(rm)) {
+      assign( tmps, getIRegE(sz, pfx, rm) );
+      assign( tmpd, getIRegG(sz, pfx, rm) );
+
+      putIRegG( sz, pfx, rm,
+                IRExpr_ITE( mk_amd64g_calculate_condition(cond),
+                            mkexpr(tmps),
+                            mkexpr(tmpd) )
+              );
+      DIP("cmov%s %s,%s\n", name_AMD64Condcode(cond),
+                            nameIRegE(sz,pfx,rm),
+                            nameIRegG(sz,pfx,rm));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      IRTemp addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      assign( tmps, loadLE(ty, mkexpr(addr)) );
+      assign( tmpd, getIRegG(sz, pfx, rm) );
+
+      putIRegG( sz, pfx, rm,
+                IRExpr_ITE( mk_amd64g_calculate_condition(cond),
+                            mkexpr(tmps),
+                            mkexpr(tmpd) )
+              );
+
+      DIP("cmov%s %s,%s\n", name_AMD64Condcode(cond),
+                            dis_buf,
+                            nameIRegG(sz,pfx,rm));
+      return len+delta0;
+   }
+}
+
+
+static
+ULong dis_xadd_G_E ( /*OUT*/Bool* decode_ok,
+                     const VexAbiInfo* vbi,
+                     Prefix pfx, Int sz, Long delta0 )
+{
+   Int   len;
+   UChar rm = getUChar(delta0);
+   HChar dis_buf[50];
+
+   IRType ty    = szToITy(sz);
+   IRTemp tmpd  = newTemp(ty);
+   IRTemp tmpt0 = newTemp(ty);
+   IRTemp tmpt1 = newTemp(ty);
+
+   /* There are 3 cases to consider:
+
+      reg-reg: ignore any lock prefix,
+               generate 'naive' (non-atomic) sequence
+
+      reg-mem, not locked: ignore any lock prefix, generate 'naive'
+                           (non-atomic) sequence
+
+      reg-mem, locked: use IRCAS
+   */
+
+   if (epartIsReg(rm)) {
+      /* case 1 */
+      assign( tmpd, getIRegE(sz, pfx, rm) );
+      assign( tmpt0, getIRegG(sz, pfx, rm) );
+      assign( tmpt1, binop(mkSizedOp(ty,Iop_Add8),
+                           mkexpr(tmpd), mkexpr(tmpt0)) );
+      setFlags_DEP1_DEP2( Iop_Add8, tmpd, tmpt0, ty );
+      putIRegG(sz, pfx, rm, mkexpr(tmpd));
+      putIRegE(sz, pfx, rm, mkexpr(tmpt1));
+      DIP("xadd%c %s, %s\n",
+          nameISize(sz), nameIRegG(sz,pfx,rm), nameIRegE(sz,pfx,rm));
+      *decode_ok = True;
+      return 1+delta0;
+   }
+   else if (!epartIsReg(rm) && !haveLOCK(pfx)) {
+      /* case 2 */
+      IRTemp addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      assign( tmpd,  loadLE(ty, mkexpr(addr)) );
+      assign( tmpt0, getIRegG(sz, pfx, rm) );
+      assign( tmpt1, binop(mkSizedOp(ty,Iop_Add8),
+                           mkexpr(tmpd), mkexpr(tmpt0)) );
+      setFlags_DEP1_DEP2( Iop_Add8, tmpd, tmpt0, ty );
+      storeLE( mkexpr(addr), mkexpr(tmpt1) );
+      putIRegG(sz, pfx, rm, mkexpr(tmpd));
+      DIP("xadd%c %s, %s\n",
+          nameISize(sz), nameIRegG(sz,pfx,rm), dis_buf);
+      *decode_ok = True;
+      return len+delta0;
+   }
+   else if (!epartIsReg(rm) && haveLOCK(pfx)) {
+      /* case 3 */
+      IRTemp addr = disAMode ( &len, vbi, pfx, delta0, dis_buf, 0 );
+      assign( tmpd,  loadLE(ty, mkexpr(addr)) );
+      assign( tmpt0, getIRegG(sz, pfx, rm) );
+      assign( tmpt1, binop(mkSizedOp(ty,Iop_Add8), 
+                           mkexpr(tmpd), mkexpr(tmpt0)) );
+      casLE( mkexpr(addr), mkexpr(tmpd)/*expVal*/,
+                           mkexpr(tmpt1)/*newVal*/, guest_RIP_curr_instr );
+      setFlags_DEP1_DEP2( Iop_Add8, tmpd, tmpt0, ty );
+      putIRegG(sz, pfx, rm, mkexpr(tmpd));
+      DIP("xadd%c %s, %s\n",
+          nameISize(sz), nameIRegG(sz,pfx,rm), dis_buf);
+      *decode_ok = True;
+      return len+delta0;
+   }
+   /*UNREACHED*/
+   vassert(0);
+}
+
+//.. /* Move 16 bits from Ew (ireg or mem) to G (a segment register). */
+//.. 
+//.. static
+//.. UInt dis_mov_Ew_Sw ( UChar sorb, Long delta0 )
+//.. {
+//..    Int    len;
+//..    IRTemp addr;
+//..    UChar  rm  = getUChar(delta0);
+//..    HChar  dis_buf[50];
+//.. 
+//..    if (epartIsReg(rm)) {
+//..       putSReg( gregOfRM(rm), getIReg(2, eregOfRM(rm)) );
+//..       DIP("movw %s,%s\n", nameIReg(2,eregOfRM(rm)), nameSReg(gregOfRM(rm)));
+//..       return 1+delta0;
+//..    } else {
+//..       addr = disAMode ( &len, sorb, delta0, dis_buf );
+//..       putSReg( gregOfRM(rm), loadLE(Ity_I16, mkexpr(addr)) );
+//..       DIP("movw %s,%s\n", dis_buf, nameSReg(gregOfRM(rm)));
+//..       return len+delta0;
+//..    }
+//.. }
+//.. 
+//.. /* Move 16 bits from G (a segment register) to Ew (ireg or mem).  If
+//..    dst is ireg and sz==4, zero out top half of it.  */
+//.. 
+//.. static
+//.. UInt dis_mov_Sw_Ew ( UChar sorb,
+//..                      Int   sz,
+//..                      UInt  delta0 )
+//.. {
+//..    Int    len;
+//..    IRTemp addr;
+//..    UChar  rm  = getUChar(delta0);
+//..    HChar  dis_buf[50];
+//.. 
+//..    vassert(sz == 2 || sz == 4);
+//.. 
+//..    if (epartIsReg(rm)) {
+//..       if (sz == 4)
+//..          putIReg(4, eregOfRM(rm), unop(Iop_16Uto32, getSReg(gregOfRM(rm))));
+//..       else
+//..          putIReg(2, eregOfRM(rm), getSReg(gregOfRM(rm)));
+//.. 
+//..       DIP("mov %s,%s\n", nameSReg(gregOfRM(rm)), nameIReg(sz,eregOfRM(rm)));
+//..       return 1+delta0;
+//..    } else {
+//..       addr = disAMode ( &len, sorb, delta0, dis_buf );
+//..       storeLE( mkexpr(addr), getSReg(gregOfRM(rm)) );
+//..       DIP("mov %s,%s\n", nameSReg(gregOfRM(rm)), dis_buf);
+//..       return len+delta0;
+//..    }
+//.. }
+//.. 
+//.. 
+//.. static 
+//.. void dis_push_segreg ( UInt sreg, Int sz )
+//.. {
+//..     IRTemp t1 = newTemp(Ity_I16);
+//..     IRTemp ta = newTemp(Ity_I32);
+//..     vassert(sz == 2 || sz == 4);
+//.. 
+//..     assign( t1, getSReg(sreg) );
+//..     assign( ta, binop(Iop_Sub32, getIReg(4, R_ESP), mkU32(sz)) );
+//..     putIReg(4, R_ESP, mkexpr(ta));
+//..     storeLE( mkexpr(ta), mkexpr(t1) );
+//.. 
+//..     DIP("pushw %s\n", nameSReg(sreg));
+//.. }
+//.. 
+//.. static
+//.. void dis_pop_segreg ( UInt sreg, Int sz )
+//.. {
+//..     IRTemp t1 = newTemp(Ity_I16);
+//..     IRTemp ta = newTemp(Ity_I32);
+//..     vassert(sz == 2 || sz == 4);
+//.. 
+//..     assign( ta, getIReg(4, R_ESP) );
+//..     assign( t1, loadLE(Ity_I16, mkexpr(ta)) );
+//.. 
+//..     putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(ta), mkU32(sz)) );
+//..     putSReg( sreg, mkexpr(t1) );
+//..     DIP("pop %s\n", nameSReg(sreg));
+//.. }
+
+static
+void dis_ret ( /*MOD*/DisResult* dres, const VexAbiInfo* vbi, ULong d64 )
+{
+   IRTemp t1 = newTemp(Ity_I64); 
+   IRTemp t2 = newTemp(Ity_I64);
+   IRTemp t3 = newTemp(Ity_I64);
+   assign(t1, getIReg64(R_RSP));
+   assign(t2, loadLE(Ity_I64,mkexpr(t1)));
+   assign(t3, binop(Iop_Add64, mkexpr(t1), mkU64(8+d64)));
+   putIReg64(R_RSP, mkexpr(t3));
+   make_redzone_AbiHint(vbi, t3, t2/*nia*/, "ret");
+   jmp_treg(dres, Ijk_Ret, t2);
+   vassert(dres->whatNext == Dis_StopHere);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- SSE/SSE2/SSE3 helpers                                ---*/
+/*------------------------------------------------------------*/
+
+/* Indicates whether the op requires a rounding-mode argument.  Note
+   that this covers only vector floating point arithmetic ops, and
+   omits the scalar ones that need rounding modes.  Note also that
+   inconsistencies here will get picked up later by the IR sanity
+   checker, so this isn't correctness-critical. */
+static Bool requiresRMode ( IROp op )
+{
+   switch (op) {
+      /* 128 bit ops */
+      case Iop_Add32Fx4: case Iop_Sub32Fx4:
+      case Iop_Mul32Fx4: case Iop_Div32Fx4:
+      case Iop_Add64Fx2: case Iop_Sub64Fx2:
+      case Iop_Mul64Fx2: case Iop_Div64Fx2:
+      /* 256 bit ops */
+      case Iop_Add32Fx8: case Iop_Sub32Fx8:
+      case Iop_Mul32Fx8: case Iop_Div32Fx8:
+      case Iop_Add64Fx4: case Iop_Sub64Fx4:
+      case Iop_Mul64Fx4: case Iop_Div64Fx4:
+         return True;
+      default:
+         break;
+   }
+   return False;
+}
+
+
+/* Worker function; do not call directly. 
+   Handles full width G = G `op` E   and   G = (not G) `op` E.
+*/
+
+static ULong dis_SSE_E_to_G_all_wrk ( 
+                const VexAbiInfo* vbi,
+                Prefix pfx, Long delta, 
+                const HChar* opname, IROp op,
+                Bool   invertG
+             )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getUChar(delta);
+   Bool    needsRMode = requiresRMode(op);
+   IRExpr* gpart
+      = invertG ? unop(Iop_NotV128, getXMMReg(gregOfRexRM(pfx,rm)))
+                : getXMMReg(gregOfRexRM(pfx,rm));
+   if (epartIsReg(rm)) {
+      putXMMReg(
+         gregOfRexRM(pfx,rm),
+         needsRMode
+            ? triop(op, get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        gpart,
+                        getXMMReg(eregOfRexRM(pfx,rm)))
+            : binop(op, gpart,
+                        getXMMReg(eregOfRexRM(pfx,rm)))
+      );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      putXMMReg(
+         gregOfRexRM(pfx,rm), 
+         needsRMode
+            ? triop(op, get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        gpart,
+                        loadLE(Ity_V128, mkexpr(addr)))
+            : binop(op, gpart,
+                        loadLE(Ity_V128, mkexpr(addr)))
+      );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* All lanes SSE binary operation, G = G `op` E. */
+
+static
+ULong dis_SSE_E_to_G_all ( const VexAbiInfo* vbi,
+                           Prefix pfx, Long delta, 
+                           const HChar* opname, IROp op )
+{
+   return dis_SSE_E_to_G_all_wrk( vbi, pfx, delta, opname, op, False );
+}
+
+/* All lanes SSE binary operation, G = (not G) `op` E. */
+
+static
+ULong dis_SSE_E_to_G_all_invG ( const VexAbiInfo* vbi,
+                                Prefix pfx, Long delta, 
+                                const HChar* opname, IROp op )
+{
+   return dis_SSE_E_to_G_all_wrk( vbi, pfx, delta, opname, op, True );
+}
+
+
+/* Lowest 32-bit lane only SSE binary operation, G = G `op` E. */
+
+static ULong dis_SSE_E_to_G_lo32 ( const VexAbiInfo* vbi,
+                                   Prefix pfx, Long delta, 
+                                   const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getUChar(delta);
+   IRExpr* gpart = getXMMReg(gregOfRexRM(pfx,rm));
+   if (epartIsReg(rm)) {
+      putXMMReg( gregOfRexRM(pfx,rm), 
+                 binop(op, gpart,
+                           getXMMReg(eregOfRexRM(pfx,rm))) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+1;
+   } else {
+      /* We can only do a 32-bit memory read, so the upper 3/4 of the
+         E operand needs to be made simply of zeroes. */
+      IRTemp epart = newTemp(Ity_V128);
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( epart, unop( Iop_32UtoV128,
+                           loadLE(Ity_I32, mkexpr(addr))) );
+      putXMMReg( gregOfRexRM(pfx,rm), 
+                 binop(op, gpart, mkexpr(epart)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* Lower 64-bit lane only SSE binary operation, G = G `op` E. */
+
+static ULong dis_SSE_E_to_G_lo64 ( const VexAbiInfo* vbi,
+                                   Prefix pfx, Long delta, 
+                                   const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getUChar(delta);
+   IRExpr* gpart = getXMMReg(gregOfRexRM(pfx,rm));
+   if (epartIsReg(rm)) {
+      putXMMReg( gregOfRexRM(pfx,rm), 
+                 binop(op, gpart,
+                           getXMMReg(eregOfRexRM(pfx,rm))) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+1;
+   } else {
+      /* We can only do a 64-bit memory read, so the upper half of the
+         E operand needs to be made simply of zeroes. */
+      IRTemp epart = newTemp(Ity_V128);
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( epart, unop( Iop_64UtoV128,
+                           loadLE(Ity_I64, mkexpr(addr))) );
+      putXMMReg( gregOfRexRM(pfx,rm), 
+                 binop(op, gpart, mkexpr(epart)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* All lanes unary SSE operation, G = op(E). */
+
+static ULong dis_SSE_E_to_G_unary_all ( 
+                const VexAbiInfo* vbi,
+                Prefix pfx, Long delta, 
+                const HChar* opname, IROp op
+             )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getUChar(delta);
+   // Sqrt32Fx4 and Sqrt64Fx2 take a rounding mode, which is faked
+   // up in the usual way.
+   Bool needsIRRM = op == Iop_Sqrt32Fx4 || op == Iop_Sqrt64Fx2;
+   if (epartIsReg(rm)) {
+      IRExpr* src = getXMMReg(eregOfRexRM(pfx,rm));
+      /* XXXROUNDINGFIXME */
+      IRExpr* res = needsIRRM ? binop(op, get_FAKE_roundingmode(), src)
+                              : unop(op, src);
+      putXMMReg( gregOfRexRM(pfx,rm), res );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      IRExpr* src = loadLE(Ity_V128, mkexpr(addr));
+      /* XXXROUNDINGFIXME */
+      IRExpr* res = needsIRRM ? binop(op, get_FAKE_roundingmode(), src)
+                              : unop(op, src);
+      putXMMReg( gregOfRexRM(pfx,rm), res );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* Lowest 32-bit lane only unary SSE operation, G = op(E). */
+
+static ULong dis_SSE_E_to_G_unary_lo32 ( 
+                const VexAbiInfo* vbi,
+                Prefix pfx, Long delta, 
+                const HChar* opname, IROp op
+             )
+{
+   /* First we need to get the old G value and patch the low 32 bits
+      of the E operand into it.  Then apply op and write back to G. */
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getUChar(delta);
+   IRTemp  oldG0 = newTemp(Ity_V128);
+   IRTemp  oldG1 = newTemp(Ity_V128);
+
+   assign( oldG0, getXMMReg(gregOfRexRM(pfx,rm)) );
+
+   if (epartIsReg(rm)) {
+      assign( oldG1, 
+              binop( Iop_SetV128lo32,
+                     mkexpr(oldG0),
+                     getXMMRegLane32(eregOfRexRM(pfx,rm), 0)) );
+      putXMMReg( gregOfRexRM(pfx,rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( oldG1, 
+              binop( Iop_SetV128lo32,
+                     mkexpr(oldG0),
+                     loadLE(Ity_I32, mkexpr(addr)) ));
+      putXMMReg( gregOfRexRM(pfx,rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* Lowest 64-bit lane only unary SSE operation, G = op(E). */
+
+static ULong dis_SSE_E_to_G_unary_lo64 ( 
+                const VexAbiInfo* vbi,
+                Prefix pfx, Long delta, 
+                const HChar* opname, IROp op
+             )
+{
+   /* First we need to get the old G value and patch the low 64 bits
+      of the E operand into it.  Then apply op and write back to G. */
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getUChar(delta);
+   IRTemp  oldG0 = newTemp(Ity_V128);
+   IRTemp  oldG1 = newTemp(Ity_V128);
+
+   assign( oldG0, getXMMReg(gregOfRexRM(pfx,rm)) );
+
+   if (epartIsReg(rm)) {
+      assign( oldG1, 
+              binop( Iop_SetV128lo64,
+                     mkexpr(oldG0),
+                     getXMMRegLane64(eregOfRexRM(pfx,rm), 0)) );
+      putXMMReg( gregOfRexRM(pfx,rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( oldG1, 
+              binop( Iop_SetV128lo64,
+                     mkexpr(oldG0),
+                     loadLE(Ity_I64, mkexpr(addr)) ));
+      putXMMReg( gregOfRexRM(pfx,rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* SSE integer binary operation:
+      G = G `op` E   (eLeft == False)
+      G = E `op` G   (eLeft == True)
+*/
+static ULong dis_SSEint_E_to_G( 
+                const VexAbiInfo* vbi,
+                Prefix pfx, Long delta, 
+                const HChar* opname, IROp op,
+                Bool   eLeft
+             )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getUChar(delta);
+   IRExpr* gpart = getXMMReg(gregOfRexRM(pfx,rm));
+   IRExpr* epart = NULL;
+   if (epartIsReg(rm)) {
+      epart = getXMMReg(eregOfRexRM(pfx,rm));
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      delta += 1;
+   } else {
+      addr  = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      epart = loadLE(Ity_V128, mkexpr(addr));
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      delta += alen;
+   }
+   putXMMReg( gregOfRexRM(pfx,rm), 
+              eLeft ? binop(op, epart, gpart)
+                    : binop(op, gpart, epart) );
+   return delta;
+}
+
+
+/* Helper for doing SSE FP comparisons.  False return ==> unhandled.
+   This is all a bit of a kludge in that it ignores the subtleties of
+   ordered-vs-unordered and signalling-vs-nonsignalling in the Intel
+   spec. */
+static Bool findSSECmpOp ( /*OUT*/Bool* preSwapP,
+                           /*OUT*/IROp* opP,
+                           /*OUT*/Bool* postNotP,
+                           UInt imm8, Bool all_lanes, Int sz )
+{
+   if (imm8 >= 32) return False;
+
+   /* First, compute a (preSwap, op, postNot) triple from
+      the supplied imm8. */
+   Bool pre = False;
+   IROp op  = Iop_INVALID;
+   Bool not = False;
+
+#  define XXX(_pre, _op, _not) { pre = _pre; op = _op; not = _not; }
+   // If you add a case here, add a corresponding test for both VCMPSD_128
+   // and VCMPSS_128 in avx-1.c.
+   switch (imm8) {
+      // "O" = ordered, "U" = unordered
+      // "Q" = non-signalling (quiet), "S" = signalling
+      //
+      //             swap operands?
+      //             |
+      //             |      cmp op          invert after?
+      //             |      |               |
+      //             v      v               v
+      case 0x0:  XXX(False, Iop_CmpEQ32Fx4, False); break; // EQ_OQ
+      case 0x1:  XXX(False, Iop_CmpLT32Fx4, False); break; // LT_OS
+      case 0x2:  XXX(False, Iop_CmpLE32Fx4, False); break; // LE_OS
+      case 0x3:  XXX(False, Iop_CmpUN32Fx4, False); break; // UNORD_Q
+      case 0x4:  XXX(False, Iop_CmpEQ32Fx4, True);  break; // NEQ_UQ
+      case 0x5:  XXX(False, Iop_CmpLT32Fx4, True);  break; // NLT_US
+      case 0x6:  XXX(False, Iop_CmpLE32Fx4, True);  break; // NLE_US
+      case 0x7:  XXX(False, Iop_CmpUN32Fx4, True);  break; // ORD_Q
+      case 0x8:  XXX(False, Iop_CmpEQ32Fx4, False); break; // EQ_UQ
+      case 0x9:  XXX(True,  Iop_CmpLE32Fx4, True);  break; // NGE_US
+      /* "Enhanced Comparison Predicate[s] for VEX-Encoded [insns] */
+      case 0xA:  XXX(True,  Iop_CmpLT32Fx4, True);  break; // NGT_US
+      // 0xB  FALSE_OQ
+      // 0xC: this isn't really right because it returns all-1s when
+      // either operand is a NaN, and it should return all-0s.
+      case 0xC:  XXX(False, Iop_CmpEQ32Fx4, True);  break; // NEQ_OQ
+      case 0xD:  XXX(True,  Iop_CmpLE32Fx4, False); break; // GE_OS
+      case 0xE:  XXX(True,  Iop_CmpLT32Fx4, False); break; // GT_OS
+      // 0xF  TRUE_UQ
+      // 0x10  EQ_OS
+      case 0x11: XXX(False, Iop_CmpLT32Fx4, False); break; // LT_OQ
+      case 0x12: XXX(False, Iop_CmpLE32Fx4, False); break; // LE_OQ
+      // 0x13  UNORD_S
+      // 0x14  NEQ_US
+      // 0x15  NLT_UQ
+      case 0x16: XXX(False, Iop_CmpLE32Fx4, True);  break; // NLE_UQ
+      // 0x17  ORD_S
+      // 0x18  EQ_US
+      // 0x19  NGE_UQ
+      // 0x1A  NGT_UQ
+      // 0x1B  FALSE_OS
+      // 0x1C  NEQ_OS
+      // 0x1D  GE_OQ
+      case 0x1E: XXX(True,  Iop_CmpLT32Fx4, False); break; // GT_OQ
+      // 0x1F  TRUE_US
+      /* Don't forget to add test cases to VCMPSS_128_<imm8> in
+         avx-1.c if new cases turn up. */
+      default: break;
+   }
+#  undef XXX
+   if (op == Iop_INVALID) return False;
+
+   /* Now convert the op into one with the same arithmetic but that is
+      correct for the width and laneage requirements. */
+
+   /**/ if (sz == 4 && all_lanes) {
+      switch (op) {
+         case Iop_CmpEQ32Fx4: op = Iop_CmpEQ32Fx4; break;
+         case Iop_CmpLT32Fx4: op = Iop_CmpLT32Fx4; break;
+         case Iop_CmpLE32Fx4: op = Iop_CmpLE32Fx4; break;
+         case Iop_CmpUN32Fx4: op = Iop_CmpUN32Fx4; break;
+         default: vassert(0);
+      }
+   }
+   else if (sz == 4 && !all_lanes) {
+      switch (op) {
+         case Iop_CmpEQ32Fx4: op = Iop_CmpEQ32F0x4; break;
+         case Iop_CmpLT32Fx4: op = Iop_CmpLT32F0x4; break;
+         case Iop_CmpLE32Fx4: op = Iop_CmpLE32F0x4; break;
+         case Iop_CmpUN32Fx4: op = Iop_CmpUN32F0x4; break;
+         default: vassert(0);
+      }
+   }
+   else if (sz == 8 && all_lanes) {
+      switch (op) {
+         case Iop_CmpEQ32Fx4: op = Iop_CmpEQ64Fx2; break;
+         case Iop_CmpLT32Fx4: op = Iop_CmpLT64Fx2; break;
+         case Iop_CmpLE32Fx4: op = Iop_CmpLE64Fx2; break;
+         case Iop_CmpUN32Fx4: op = Iop_CmpUN64Fx2; break;
+         default: vassert(0);
+      }
+   }
+   else if (sz == 8 && !all_lanes) {
+      switch (op) {
+         case Iop_CmpEQ32Fx4: op = Iop_CmpEQ64F0x2; break;
+         case Iop_CmpLT32Fx4: op = Iop_CmpLT64F0x2; break;
+         case Iop_CmpLE32Fx4: op = Iop_CmpLE64F0x2; break;
+         case Iop_CmpUN32Fx4: op = Iop_CmpUN64F0x2; break;
+         default: vassert(0);
+      }
+   }
+   else {
+      vpanic("findSSECmpOp(amd64,guest)");
+   }
+
+   *preSwapP = pre; *opP = op; *postNotP = not;
+   return True;
+}
+
+
+/* Handles SSE 32F/64F comparisons.  It can fail, in which case it
+   returns the original delta to indicate failure. */
+
+static Long dis_SSE_cmp_E_to_G ( const VexAbiInfo* vbi,
+                                 Prefix pfx, Long delta, 
+                                 const HChar* opname, Bool all_lanes, Int sz )
+{
+   Long    delta0 = delta;
+   HChar   dis_buf[50];
+   Int     alen;
+   UInt    imm8;
+   IRTemp  addr;
+   Bool    preSwap = False;
+   IROp    op      = Iop_INVALID;
+   Bool    postNot = False;
+   IRTemp  plain   = newTemp(Ity_V128);
+   UChar   rm      = getUChar(delta);
+   UShort  mask    = 0;
+   vassert(sz == 4 || sz == 8);
+   if (epartIsReg(rm)) {
+      imm8 = getUChar(delta+1);
+      if (imm8 >= 8) return delta0; /* FAIL */
+      Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
+      if (!ok) return delta0; /* FAIL */
+      vassert(!preSwap); /* never needed for imm8 < 8 */
+      assign( plain, binop(op, getXMMReg(gregOfRexRM(pfx,rm)), 
+                               getXMMReg(eregOfRexRM(pfx,rm))) );
+      delta += 2;
+      DIP("%s $%d,%s,%s\n", opname,
+                            (Int)imm8,
+                            nameXMMReg(eregOfRexRM(pfx,rm)),
+                            nameXMMReg(gregOfRexRM(pfx,rm)) );
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8 = getUChar(delta+alen);
+      if (imm8 >= 8) return delta0; /* FAIL */
+      Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
+      if (!ok) return delta0; /* FAIL */
+      vassert(!preSwap); /* never needed for imm8 < 8 */
+      assign( plain, 
+              binop(
+                 op,
+                 getXMMReg(gregOfRexRM(pfx,rm)), 
+                   all_lanes 
+                      ? loadLE(Ity_V128, mkexpr(addr))
+                   : sz == 8
+                      ? unop( Iop_64UtoV128, loadLE(Ity_I64, mkexpr(addr)))
+                   : /*sz==4*/
+                      unop( Iop_32UtoV128, loadLE(Ity_I32, mkexpr(addr)))
+              ) 
+      );
+      delta += alen+1;
+      DIP("%s $%d,%s,%s\n", opname,
+                            (Int)imm8,
+                            dis_buf,
+                            nameXMMReg(gregOfRexRM(pfx,rm)) );
+   }
+
+   if (postNot && all_lanes) {
+      putXMMReg( gregOfRexRM(pfx,rm), 
+                 unop(Iop_NotV128, mkexpr(plain)) );
+   }
+   else
+   if (postNot && !all_lanes) {
+      mask = toUShort(sz==4 ? 0x000F : 0x00FF);
+      putXMMReg( gregOfRexRM(pfx,rm), 
+                 binop(Iop_XorV128, mkexpr(plain), mkV128(mask)) );
+   }
+   else {
+      putXMMReg( gregOfRexRM(pfx,rm), mkexpr(plain) );
+   }
+
+   return delta;
+}
+
+
+/* Vector by scalar shift of G by the amount specified at the bottom
+   of E. */
+
+static ULong dis_SSE_shiftG_byE ( const VexAbiInfo* vbi,
+                                  Prefix pfx, Long delta, 
+                                  const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen, size;
+   IRTemp  addr;
+   Bool    shl, shr, sar;
+   UChar   rm   = getUChar(delta);
+   IRTemp  g0   = newTemp(Ity_V128);
+   IRTemp  g1   = newTemp(Ity_V128);
+   IRTemp  amt  = newTemp(Ity_I64);
+   IRTemp  amt8 = newTemp(Ity_I8);
+   if (epartIsReg(rm)) {
+      assign( amt, getXMMRegLane64(eregOfRexRM(pfx,rm), 0) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRexRM(pfx,rm)),
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      delta++;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( amt, loadLE(Ity_I64, mkexpr(addr)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRexRM(pfx,rm)) );
+      delta += alen;
+   }
+   assign( g0,   getXMMReg(gregOfRexRM(pfx,rm)) );
+   assign( amt8, unop(Iop_64to8, mkexpr(amt)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x8: shl = True; size = 32; break;
+      case Iop_ShlN32x4: shl = True; size = 32; break;
+      case Iop_ShlN64x2: shl = True; size = 64; break;
+      case Iop_SarN16x8: sar = True; size = 16; break;
+      case Iop_SarN32x4: sar = True; size = 32; break;
+      case Iop_ShrN16x8: shr = True; size = 16; break;
+      case Iop_ShrN32x4: shr = True; size = 32; break;
+      case Iop_ShrN64x2: shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           mkV128(0x0000)
+        )
+     );
+   } else 
+   if (sar) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           binop(op, mkexpr(g0), mkU8(size-1))
+        )
+     );
+   } else {
+      vassert(0);
+   }
+
+   putXMMReg( gregOfRexRM(pfx,rm), mkexpr(g1) );
+   return delta;
+}
+
+
+/* Vector by scalar shift of E by an immediate byte. */
+
+static 
+ULong dis_SSE_shiftE_imm ( Prefix pfx, 
+                           Long delta, const HChar* opname, IROp op )
+{
+   Bool    shl, shr, sar;
+   UChar   rm   = getUChar(delta);
+   IRTemp  e0   = newTemp(Ity_V128);
+   IRTemp  e1   = newTemp(Ity_V128);
+   UChar   amt, size;
+   vassert(epartIsReg(rm));
+   vassert(gregLO3ofRM(rm) == 2 
+           || gregLO3ofRM(rm) == 4 || gregLO3ofRM(rm) == 6);
+   amt = getUChar(delta+1);
+   delta += 2;
+   DIP("%s $%d,%s\n", opname,
+                      (Int)amt,
+                      nameXMMReg(eregOfRexRM(pfx,rm)) );
+   assign( e0, getXMMReg(eregOfRexRM(pfx,rm)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x8: shl = True; size = 16; break;
+      case Iop_ShlN32x4: shl = True; size = 32; break;
+      case Iop_ShlN64x2: shl = True; size = 64; break;
+      case Iop_SarN16x8: sar = True; size = 16; break;
+      case Iop_SarN32x4: sar = True; size = 32; break;
+      case Iop_ShrN16x8: shr = True; size = 16; break;
+      case Iop_ShrN32x4: shr = True; size = 32; break;
+      case Iop_ShrN64x2: shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( e1, amt >= size 
+                    ? mkV128(0x0000)
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else 
+   if (sar) {
+     assign( e1, amt >= size 
+                    ? binop(op, mkexpr(e0), mkU8(size-1))
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else {
+      vassert(0);
+   }
+
+   putXMMReg( eregOfRexRM(pfx,rm), mkexpr(e1) );
+   return delta;
+}
+
+
+/* Get the current SSE rounding mode. */
+
+static IRExpr* /* :: Ity_I32 */ get_sse_roundingmode ( void )
+{
+   return 
+      unop( Iop_64to32, 
+            binop( Iop_And64, 
+                   IRExpr_Get( OFFB_SSEROUND, Ity_I64 ), 
+                   mkU64(3) ));
+}
+
+static void put_sse_roundingmode ( IRExpr* sseround )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, sseround) == Ity_I32);
+   stmt( IRStmt_Put( OFFB_SSEROUND, 
+                     unop(Iop_32Uto64,sseround) ) );
+}
+
+/* Break a V128-bit value up into four 32-bit ints. */
+
+static void breakupV128to32s ( IRTemp t128,
+                               /*OUTs*/
+                               IRTemp* t3, IRTemp* t2,
+                               IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi64 = newTemp(Ity_I64);
+   IRTemp lo64 = newTemp(Ity_I64);
+   assign( hi64, unop(Iop_V128HIto64, mkexpr(t128)) );
+   assign( lo64, unop(Iop_V128to64,   mkexpr(t128)) );
+
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+
+   *t0 = newTemp(Ity_I32);
+   *t1 = newTemp(Ity_I32);
+   *t2 = newTemp(Ity_I32);
+   *t3 = newTemp(Ity_I32);
+   assign( *t0, unop(Iop_64to32,   mkexpr(lo64)) );
+   assign( *t1, unop(Iop_64HIto32, mkexpr(lo64)) );
+   assign( *t2, unop(Iop_64to32,   mkexpr(hi64)) );
+   assign( *t3, unop(Iop_64HIto32, mkexpr(hi64)) );
+}
+
+/* Construct a V128-bit value from four 32-bit ints. */
+
+static IRExpr* mkV128from32s ( IRTemp t3, IRTemp t2,
+                               IRTemp t1, IRTemp t0 )
+{
+   return
+      binop( Iop_64HLtoV128,
+             binop(Iop_32HLto64, mkexpr(t3), mkexpr(t2)),
+             binop(Iop_32HLto64, mkexpr(t1), mkexpr(t0))
+   );
+}
+
+/* Break a 64-bit value up into four 16-bit ints. */
+
+static void breakup64to16s ( IRTemp t64,
+                             /*OUTs*/
+                             IRTemp* t3, IRTemp* t2,
+                             IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi32 = newTemp(Ity_I32);
+   IRTemp lo32 = newTemp(Ity_I32);
+   assign( hi32, unop(Iop_64HIto32, mkexpr(t64)) );
+   assign( lo32, unop(Iop_64to32,   mkexpr(t64)) );
+
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+
+   *t0 = newTemp(Ity_I16);
+   *t1 = newTemp(Ity_I16);
+   *t2 = newTemp(Ity_I16);
+   *t3 = newTemp(Ity_I16);
+   assign( *t0, unop(Iop_32to16,   mkexpr(lo32)) );
+   assign( *t1, unop(Iop_32HIto16, mkexpr(lo32)) );
+   assign( *t2, unop(Iop_32to16,   mkexpr(hi32)) );
+   assign( *t3, unop(Iop_32HIto16, mkexpr(hi32)) );
+}
+
+/* Construct a 64-bit value from four 16-bit ints. */
+
+static IRExpr* mk64from16s ( IRTemp t3, IRTemp t2,
+                             IRTemp t1, IRTemp t0 )
+{
+   return
+      binop( Iop_32HLto64,
+             binop(Iop_16HLto32, mkexpr(t3), mkexpr(t2)),
+             binop(Iop_16HLto32, mkexpr(t1), mkexpr(t0))
+   );
+}
+
+/* Break a V256-bit value up into four 64-bit ints. */
+
+static void breakupV256to64s ( IRTemp t256,
+                               /*OUTs*/
+                               IRTemp* t3, IRTemp* t2,
+                               IRTemp* t1, IRTemp* t0 )
+{ 
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   *t0 = newTemp(Ity_I64);
+   *t1 = newTemp(Ity_I64);
+   *t2 = newTemp(Ity_I64);
+   *t3 = newTemp(Ity_I64);
+   assign( *t0, unop(Iop_V256to64_0, mkexpr(t256)) );
+   assign( *t1, unop(Iop_V256to64_1, mkexpr(t256)) );
+   assign( *t2, unop(Iop_V256to64_2, mkexpr(t256)) );
+   assign( *t3, unop(Iop_V256to64_3, mkexpr(t256)) );
+}
+
+/* Break a V256-bit value up into two V128s. */
+
+static void breakupV256toV128s ( IRTemp t256,
+                                 /*OUTs*/
+                                 IRTemp* t1, IRTemp* t0 )
+{ 
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   *t0 = newTemp(Ity_V128);
+   *t1 = newTemp(Ity_V128);
+   assign(*t1, unop(Iop_V256toV128_1, mkexpr(t256)));
+   assign(*t0, unop(Iop_V256toV128_0, mkexpr(t256)));
+}
+
+/* Break a V256-bit value up into eight 32-bit ints.  */
+
+static void breakupV256to32s ( IRTemp t256,
+                               /*OUTs*/
+                               IRTemp* t7, IRTemp* t6,
+                               IRTemp* t5, IRTemp* t4,
+                               IRTemp* t3, IRTemp* t2,
+                               IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp t128_1 = IRTemp_INVALID;
+   IRTemp t128_0 = IRTemp_INVALID;
+   breakupV256toV128s( t256, &t128_1, &t128_0 );
+   breakupV128to32s( t128_1, t7, t6, t5, t4 );
+   breakupV128to32s( t128_0, t3, t2, t1, t0 );
+}
+
+/* Break a V128-bit value up into two 64-bit ints. */
+
+static void breakupV128to64s ( IRTemp t128,
+                               /*OUTs*/
+                               IRTemp* t1, IRTemp* t0 )
+{
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   *t0 = newTemp(Ity_I64);
+   *t1 = newTemp(Ity_I64);
+   assign( *t0, unop(Iop_V128to64,   mkexpr(t128)) );
+   assign( *t1, unop(Iop_V128HIto64, mkexpr(t128)) );
+}
+
+/* Construct a V256-bit value from eight 32-bit ints. */
+
+static IRExpr* mkV256from32s ( IRTemp t7, IRTemp t6,
+                               IRTemp t5, IRTemp t4,
+                               IRTemp t3, IRTemp t2,
+                               IRTemp t1, IRTemp t0 )
+{
+   return
+      binop( Iop_V128HLtoV256,
+             binop( Iop_64HLtoV128,
+                    binop(Iop_32HLto64, mkexpr(t7), mkexpr(t6)),
+                    binop(Iop_32HLto64, mkexpr(t5), mkexpr(t4)) ),
+             binop( Iop_64HLtoV128,
+                    binop(Iop_32HLto64, mkexpr(t3), mkexpr(t2)),
+                    binop(Iop_32HLto64, mkexpr(t1), mkexpr(t0)) )
+   );
+}
+
+/* Construct a V256-bit value from four 64-bit ints. */
+
+static IRExpr* mkV256from64s ( IRTemp t3, IRTemp t2,
+                               IRTemp t1, IRTemp t0 )
+{
+   return
+      binop( Iop_V128HLtoV256,
+             binop(Iop_64HLtoV128, mkexpr(t3), mkexpr(t2)),
+             binop(Iop_64HLtoV128, mkexpr(t1), mkexpr(t0))
+   );
+}
+
+/* Helper for the SSSE3 (not SSE3) PMULHRSW insns.  Given two 64-bit
+   values (aa,bb), computes, for each of the 4 16-bit lanes:
+
+   (((aa_lane *s32 bb_lane) >>u 14) + 1) >>u 1
+*/
+static IRExpr* dis_PMULHRSW_helper ( IRExpr* aax, IRExpr* bbx )
+{
+   IRTemp aa      = newTemp(Ity_I64);
+   IRTemp bb      = newTemp(Ity_I64);
+   IRTemp aahi32s = newTemp(Ity_I64);
+   IRTemp aalo32s = newTemp(Ity_I64);
+   IRTemp bbhi32s = newTemp(Ity_I64);
+   IRTemp bblo32s = newTemp(Ity_I64);
+   IRTemp rHi     = newTemp(Ity_I64);
+   IRTemp rLo     = newTemp(Ity_I64);
+   IRTemp one32x2 = newTemp(Ity_I64);
+   assign(aa, aax);
+   assign(bb, bbx);
+   assign( aahi32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveHI16x4, mkexpr(aa), mkexpr(aa)),
+                 mkU8(16) ));
+   assign( aalo32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveLO16x4, mkexpr(aa), mkexpr(aa)),
+                 mkU8(16) ));
+   assign( bbhi32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveHI16x4, mkexpr(bb), mkexpr(bb)),
+                 mkU8(16) ));
+   assign( bblo32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveLO16x4, mkexpr(bb), mkexpr(bb)),
+                 mkU8(16) ));
+   assign(one32x2, mkU64( (1ULL << 32) + 1 ));
+   assign(
+      rHi,
+      binop(
+         Iop_ShrN32x2,
+         binop(
+            Iop_Add32x2, 
+            binop(
+               Iop_ShrN32x2,
+               binop(Iop_Mul32x2, mkexpr(aahi32s), mkexpr(bbhi32s)),
+               mkU8(14)
+            ),
+            mkexpr(one32x2)
+         ),
+         mkU8(1)
+      )
+   );
+   assign(
+      rLo,
+      binop(
+         Iop_ShrN32x2,
+         binop(
+            Iop_Add32x2, 
+            binop(
+               Iop_ShrN32x2,
+               binop(Iop_Mul32x2, mkexpr(aalo32s), mkexpr(bblo32s)),
+               mkU8(14)
+            ),
+            mkexpr(one32x2)
+         ),
+         mkU8(1)
+      )
+   );
+   return
+      binop(Iop_CatEvenLanes16x4, mkexpr(rHi), mkexpr(rLo));
+}
+
+/* Helper for the SSSE3 (not SSE3) PSIGN{B,W,D} insns.  Given two 64-bit
+   values (aa,bb), computes, for each lane:
+
+          if aa_lane < 0 then - bb_lane
+     else if aa_lane > 0 then bb_lane
+     else 0
+*/
+static IRExpr* dis_PSIGN_helper ( IRExpr* aax, IRExpr* bbx, Int laneszB )
+{
+   IRTemp aa       = newTemp(Ity_I64);
+   IRTemp bb       = newTemp(Ity_I64);
+   IRTemp zero     = newTemp(Ity_I64);
+   IRTemp bbNeg    = newTemp(Ity_I64);
+   IRTemp negMask  = newTemp(Ity_I64);
+   IRTemp posMask  = newTemp(Ity_I64);
+   IROp   opSub    = Iop_INVALID;
+   IROp   opCmpGTS = Iop_INVALID;
+
+   switch (laneszB) {
+      case 1: opSub = Iop_Sub8x8;  opCmpGTS = Iop_CmpGT8Sx8;  break;
+      case 2: opSub = Iop_Sub16x4; opCmpGTS = Iop_CmpGT16Sx4; break;
+      case 4: opSub = Iop_Sub32x2; opCmpGTS = Iop_CmpGT32Sx2; break;
+      default: vassert(0);
+   }
+
+   assign( aa,      aax );
+   assign( bb,      bbx );
+   assign( zero,    mkU64(0) );
+   assign( bbNeg,   binop(opSub,    mkexpr(zero), mkexpr(bb)) );
+   assign( negMask, binop(opCmpGTS, mkexpr(zero), mkexpr(aa)) );
+   assign( posMask, binop(opCmpGTS, mkexpr(aa),   mkexpr(zero)) );
+
+   return
+      binop(Iop_Or64,
+            binop(Iop_And64, mkexpr(bb),    mkexpr(posMask)),
+            binop(Iop_And64, mkexpr(bbNeg), mkexpr(negMask)) );
+
+}
+
+
+/* Helper for the SSSE3 (not SSE3) PABS{B,W,D} insns.  Given a 64-bit
+   value aa, computes, for each lane
+
+   if aa < 0 then -aa else aa
+
+   Note that the result is interpreted as unsigned, so that the
+   absolute value of the most negative signed input can be
+   represented.
+*/
+static IRTemp math_PABS_MMX ( IRTemp aa, Int laneszB )
+{
+   IRTemp res     = newTemp(Ity_I64);
+   IRTemp zero    = newTemp(Ity_I64);
+   IRTemp aaNeg   = newTemp(Ity_I64);
+   IRTemp negMask = newTemp(Ity_I64);
+   IRTemp posMask = newTemp(Ity_I64);
+   IROp   opSub   = Iop_INVALID;
+   IROp   opSarN  = Iop_INVALID;
+
+   switch (laneszB) {
+      case 1: opSub = Iop_Sub8x8;  opSarN = Iop_SarN8x8;  break;
+      case 2: opSub = Iop_Sub16x4; opSarN = Iop_SarN16x4; break;
+      case 4: opSub = Iop_Sub32x2; opSarN = Iop_SarN32x2; break;
+      default: vassert(0);
+   }
+
+   assign( negMask, binop(opSarN, mkexpr(aa), mkU8(8*laneszB-1)) );
+   assign( posMask, unop(Iop_Not64, mkexpr(negMask)) );
+   assign( zero,    mkU64(0) );
+   assign( aaNeg,   binop(opSub, mkexpr(zero), mkexpr(aa)) );
+   assign( res,
+           binop(Iop_Or64,
+                 binop(Iop_And64, mkexpr(aa),    mkexpr(posMask)),
+                 binop(Iop_And64, mkexpr(aaNeg), mkexpr(negMask)) ));
+   return res;
+}
+
+/* XMM version of math_PABS_MMX. */
+static IRTemp math_PABS_XMM ( IRTemp aa, Int laneszB )
+{
+   IRTemp res  = newTemp(Ity_V128);
+   IRTemp aaHi = newTemp(Ity_I64);
+   IRTemp aaLo = newTemp(Ity_I64);
+   assign(aaHi, unop(Iop_V128HIto64, mkexpr(aa)));
+   assign(aaLo, unop(Iop_V128to64, mkexpr(aa)));
+   assign(res, binop(Iop_64HLtoV128,
+                     mkexpr(math_PABS_MMX(aaHi, laneszB)),
+                     mkexpr(math_PABS_MMX(aaLo, laneszB))));
+   return res;
+}
+
+/* Specialisations of math_PABS_XMM, since there's no easy way to do
+   partial applications in C :-( */
+static IRTemp math_PABS_XMM_pap4 ( IRTemp aa ) {
+   return math_PABS_XMM(aa, 4);
+}
+
+static IRTemp math_PABS_XMM_pap2 ( IRTemp aa ) {
+   return math_PABS_XMM(aa, 2);
+}
+
+static IRTemp math_PABS_XMM_pap1 ( IRTemp aa ) {
+   return math_PABS_XMM(aa, 1);
+}
+
+/* YMM version of math_PABS_XMM. */
+static IRTemp math_PABS_YMM ( IRTemp aa, Int laneszB )
+{
+   IRTemp res  = newTemp(Ity_V256);
+   IRTemp aaHi = IRTemp_INVALID;
+   IRTemp aaLo = IRTemp_INVALID;
+   breakupV256toV128s(aa, &aaHi, &aaLo);
+   assign(res, binop(Iop_V128HLtoV256,
+                     mkexpr(math_PABS_XMM(aaHi, laneszB)),
+                     mkexpr(math_PABS_XMM(aaLo, laneszB))));
+   return res;
+}
+
+static IRTemp math_PABS_YMM_pap4 ( IRTemp aa ) {
+   return math_PABS_YMM(aa, 4);
+}
+
+static IRTemp math_PABS_YMM_pap2 ( IRTemp aa ) {
+   return math_PABS_YMM(aa, 2);
+}
+
+static IRTemp math_PABS_YMM_pap1 ( IRTemp aa ) {
+   return math_PABS_YMM(aa, 1);
+}
+
+static IRExpr* dis_PALIGNR_XMM_helper ( IRTemp hi64,
+                                        IRTemp lo64, Long byteShift )
+{
+   vassert(byteShift >= 1 && byteShift <= 7);
+   return
+      binop(Iop_Or64,
+            binop(Iop_Shl64, mkexpr(hi64), mkU8(8*(8-byteShift))),
+            binop(Iop_Shr64, mkexpr(lo64), mkU8(8*byteShift))
+      );
+}
+
+static IRTemp math_PALIGNR_XMM ( IRTemp sV, IRTemp dV, UInt imm8 ) 
+{
+   IRTemp res = newTemp(Ity_V128);
+   IRTemp sHi = newTemp(Ity_I64);
+   IRTemp sLo = newTemp(Ity_I64);
+   IRTemp dHi = newTemp(Ity_I64);
+   IRTemp dLo = newTemp(Ity_I64);
+   IRTemp rHi = newTemp(Ity_I64);
+   IRTemp rLo = newTemp(Ity_I64);
+
+   assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+   assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+   assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+   if (imm8 == 0) {
+      assign( rHi, mkexpr(sHi) );
+      assign( rLo, mkexpr(sLo) );
+   }
+   else if (imm8 >= 1 && imm8 <= 7) {
+      assign( rHi, dis_PALIGNR_XMM_helper(dLo, sHi, imm8) );
+      assign( rLo, dis_PALIGNR_XMM_helper(sHi, sLo, imm8) );
+   }
+   else if (imm8 == 8) {
+      assign( rHi, mkexpr(dLo) );
+      assign( rLo, mkexpr(sHi) );
+   }
+   else if (imm8 >= 9 && imm8 <= 15) {
+      assign( rHi, dis_PALIGNR_XMM_helper(dHi, dLo, imm8-8) );
+      assign( rLo, dis_PALIGNR_XMM_helper(dLo, sHi, imm8-8) );
+   }
+   else if (imm8 == 16) {
+      assign( rHi, mkexpr(dHi) );
+      assign( rLo, mkexpr(dLo) );
+   }
+   else if (imm8 >= 17 && imm8 <= 23) {
+      assign( rHi, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(imm8-16))) );
+      assign( rLo, dis_PALIGNR_XMM_helper(dHi, dLo, imm8-16) );
+   }
+   else if (imm8 == 24) {
+      assign( rHi, mkU64(0) );
+      assign( rLo, mkexpr(dHi) );
+   }
+   else if (imm8 >= 25 && imm8 <= 31) {
+      assign( rHi, mkU64(0) );
+      assign( rLo, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(imm8-24))) );
+   }
+   else if (imm8 >= 32 && imm8 <= 255) {
+      assign( rHi, mkU64(0) );
+      assign( rLo, mkU64(0) );
+   }
+   else
+      vassert(0);
+
+   assign( res, binop(Iop_64HLtoV128, mkexpr(rHi), mkexpr(rLo)));
+   return res;
+}
+
+
+/* Generate a SIGSEGV followed by a restart of the current instruction
+   if effective_addr is not 16-aligned.  This is required behaviour
+   for some SSE3 instructions and all 128-bit SSSE3 instructions.
+   This assumes that guest_RIP_curr_instr is set correctly! */
+static
+void gen_SEGV_if_not_XX_aligned ( IRTemp effective_addr, ULong mask )
+{
+   stmt(
+      IRStmt_Exit(
+         binop(Iop_CmpNE64,
+               binop(Iop_And64,mkexpr(effective_addr),mkU64(mask)),
+               mkU64(0)),
+         Ijk_SigSEGV,
+         IRConst_U64(guest_RIP_curr_instr),
+         OFFB_RIP
+      )
+   );
+}
+
+static void gen_SEGV_if_not_16_aligned ( IRTemp effective_addr ) {
+   gen_SEGV_if_not_XX_aligned(effective_addr, 16-1);
+}
+
+static void gen_SEGV_if_not_32_aligned ( IRTemp effective_addr ) {
+   gen_SEGV_if_not_XX_aligned(effective_addr, 32-1);
+}
+
+/* Helper for deciding whether a given insn (starting at the opcode
+   byte) may validly be used with a LOCK prefix.  The following insns
+   may be used with LOCK when their destination operand is in memory.
+   AFAICS this is exactly the same for both 32-bit and 64-bit mode.
+
+   ADD        80 /0,  81 /0,  82 /0,  83 /0,  00,  01
+   OR         80 /1,  81 /1,  82 /x,  83 /1,  08,  09
+   ADC        80 /2,  81 /2,  82 /2,  83 /2,  10,  11
+   SBB        81 /3,  81 /3,  82 /x,  83 /3,  18,  19
+   AND        80 /4,  81 /4,  82 /x,  83 /4,  20,  21
+   SUB        80 /5,  81 /5,  82 /x,  83 /5,  28,  29
+   XOR        80 /6,  81 /6,  82 /x,  83 /6,  30,  31
+
+   DEC        FE /1,  FF /1
+   INC        FE /0,  FF /0
+
+   NEG        F6 /3,  F7 /3
+   NOT        F6 /2,  F7 /2
+
+   XCHG       86, 87
+
+   BTC        0F BB,  0F BA /7
+   BTR        0F B3,  0F BA /6
+   BTS        0F AB,  0F BA /5
+
+   CMPXCHG    0F B0,  0F B1
+   CMPXCHG8B  0F C7 /1
+
+   XADD       0F C0,  0F C1
+
+   ------------------------------
+
+   80 /0  =  addb $imm8,  rm8
+   81 /0  =  addl $imm32, rm32  and  addw $imm16, rm16
+   82 /0  =  addb $imm8,  rm8
+   83 /0  =  addl $simm8, rm32  and  addw $simm8, rm16
+
+   00     =  addb r8,  rm8
+   01     =  addl r32, rm32  and  addw r16, rm16
+
+   Same for ADD OR ADC SBB AND SUB XOR
+
+   FE /1  = dec rm8
+   FF /1  = dec rm32  and  dec rm16
+
+   FE /0  = inc rm8
+   FF /0  = inc rm32  and  inc rm16
+
+   F6 /3  = neg rm8
+   F7 /3  = neg rm32  and  neg rm16
+
+   F6 /2  = not rm8
+   F7 /2  = not rm32  and  not rm16
+
+   0F BB     = btcw r16, rm16    and  btcl r32, rm32
+   OF BA /7  = btcw $imm8, rm16  and  btcw $imm8, rm32
+
+   Same for BTS, BTR
+*/
+static Bool can_be_used_with_LOCK_prefix ( const UChar* opc )
+{
+   switch (opc[0]) {
+      case 0x00: case 0x01: case 0x08: case 0x09:
+      case 0x10: case 0x11: case 0x18: case 0x19:
+      case 0x20: case 0x21: case 0x28: case 0x29:
+      case 0x30: case 0x31:
+         if (!epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0x80: case 0x81: case 0x82: case 0x83:
+         if (gregLO3ofRM(opc[1]) >= 0 && gregLO3ofRM(opc[1]) <= 6
+             && !epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0xFE: case 0xFF:
+         if (gregLO3ofRM(opc[1]) >= 0 && gregLO3ofRM(opc[1]) <= 1
+             && !epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0xF6: case 0xF7:
+         if (gregLO3ofRM(opc[1]) >= 2 && gregLO3ofRM(opc[1]) <= 3
+             && !epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0x86: case 0x87:
+         if (!epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0x0F: {
+         switch (opc[1]) {
+            case 0xBB: case 0xB3: case 0xAB:
+               if (!epartIsReg(opc[2]))
+                  return True;
+               break;
+            case 0xBA: 
+               if (gregLO3ofRM(opc[2]) >= 5 && gregLO3ofRM(opc[2]) <= 7
+                   && !epartIsReg(opc[2]))
+                  return True;
+               break;
+            case 0xB0: case 0xB1:
+               if (!epartIsReg(opc[2]))
+                  return True;
+               break;
+            case 0xC7: 
+               if (gregLO3ofRM(opc[2]) == 1 && !epartIsReg(opc[2]) )
+                  return True;
+               break;
+            case 0xC0: case 0xC1:
+               if (!epartIsReg(opc[2]))
+                  return True;
+               break;
+            default:
+               break;
+         } /* switch (opc[1]) */
+         break;
+      }
+
+      default:
+         break;
+   } /* switch (opc[0]) */
+
+   return False;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level SSE/SSE2: dis_ESC_0F__SSE2                 ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static Long dis_COMISD ( const VexAbiInfo* vbi, Prefix pfx,
+                         Long delta, Bool isAvx, UChar opc )
+{
+   vassert(opc == 0x2F/*COMISD*/ || opc == 0x2E/*UCOMISD*/);
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp argL  = newTemp(Ity_F64);
+   IRTemp argR  = newTemp(Ity_F64);
+   UChar  modrm = getUChar(delta);
+   IRTemp addr  = IRTemp_INVALID;
+   if (epartIsReg(modrm)) {
+      assign( argR, getXMMRegLane64F( eregOfRexRM(pfx,modrm), 
+                                      0/*lowest lane*/ ) );
+      delta += 1;
+      DIP("%s%scomisd %s,%s\n", isAvx ? "v" : "",
+                                opc==0x2E ? "u" : "",
+                                nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                nameXMMReg(gregOfRexRM(pfx,modrm)) );
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argR, loadLE(Ity_F64, mkexpr(addr)) );
+      delta += alen;
+      DIP("%s%scomisd %s,%s\n", isAvx ? "v" : "",
+                                opc==0x2E ? "u" : "",
+                                dis_buf,
+                                nameXMMReg(gregOfRexRM(pfx,modrm)) );
+   }
+   assign( argL, getXMMRegLane64F( gregOfRexRM(pfx,modrm), 
+                                   0/*lowest lane*/ ) );
+
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            binop( Iop_And64,
+                   unop( Iop_32Uto64, 
+                         binop(Iop_CmpF64, mkexpr(argL), mkexpr(argR)) ),
+                   mkU64(0x45)
+       )));
+   return delta;
+}
+
+
+static Long dis_COMISS ( const VexAbiInfo* vbi, Prefix pfx,
+                         Long delta, Bool isAvx, UChar opc )
+{
+   vassert(opc == 0x2F/*COMISS*/ || opc == 0x2E/*UCOMISS*/);
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp argL  = newTemp(Ity_F32);
+   IRTemp argR  = newTemp(Ity_F32);
+   UChar  modrm = getUChar(delta);
+   IRTemp addr  = IRTemp_INVALID;
+   if (epartIsReg(modrm)) {
+      assign( argR, getXMMRegLane32F( eregOfRexRM(pfx,modrm), 
+                                      0/*lowest lane*/ ) );
+      delta += 1;
+      DIP("%s%scomiss %s,%s\n", isAvx ? "v" : "",
+                                opc==0x2E ? "u" : "",
+                                nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                nameXMMReg(gregOfRexRM(pfx,modrm)) );
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argR, loadLE(Ity_F32, mkexpr(addr)) );
+      delta += alen;
+      DIP("%s%scomiss %s,%s\n", isAvx ? "v" : "",
+                                opc==0x2E ? "u" : "",
+                                dis_buf,
+                                nameXMMReg(gregOfRexRM(pfx,modrm)) );
+   }
+   assign( argL, getXMMRegLane32F( gregOfRexRM(pfx,modrm), 
+                                   0/*lowest lane*/ ) );
+
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            binop( Iop_And64,
+                   unop( Iop_32Uto64, 
+                         binop(Iop_CmpF64, 
+                               unop(Iop_F32toF64,mkexpr(argL)),
+                               unop(Iop_F32toF64,mkexpr(argR)))),
+                   mkU64(0x45)
+       )));
+   return delta;
+}
+
+
+static Long dis_PSHUFD_32x4 ( const VexAbiInfo* vbi, Prefix pfx,
+                              Long delta, Bool writesYmm )
+{
+   Int    order;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp sV    = newTemp(Ity_V128);
+   UChar  modrm = getUChar(delta);
+   const HChar* strV  = writesYmm ? "v" : "";
+   IRTemp addr  = IRTemp_INVALID;
+   if (epartIsReg(modrm)) {
+      assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
+      order = (Int)getUChar(delta+1);
+      delta += 1+1;
+      DIP("%spshufd $%d,%s,%s\n", strV, order, 
+                                  nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 
+                        1/*byte after the amode*/ );
+      assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+      order = (Int)getUChar(delta+alen);
+      delta += alen+1;
+      DIP("%spshufd $%d,%s,%s\n", strV, order, 
+                                 dis_buf,
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+   }
+
+   IRTemp s3, s2, s1, s0;
+   s3 = s2 = s1 = s0 = IRTemp_INVALID;
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+
+#  define SEL(n)  ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+   IRTemp dV = newTemp(Ity_V128);
+   assign(dV,
+          mkV128from32s( SEL((order>>6)&3), SEL((order>>4)&3),
+                         SEL((order>>2)&3), SEL((order>>0)&3) )
+   );
+#  undef SEL
+
+   (writesYmm ? putYMMRegLoAndZU : putXMMReg)
+      (gregOfRexRM(pfx,modrm), mkexpr(dV));
+   return delta;
+}
+
+
+static Long dis_PSHUFD_32x8 ( const VexAbiInfo* vbi, Prefix pfx, Long delta )
+{
+   Int    order;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp sV    = newTemp(Ity_V256);
+   UChar  modrm = getUChar(delta);
+   IRTemp addr  = IRTemp_INVALID;
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getYMMReg(rE) );
+      order = (Int)getUChar(delta+1);
+      delta += 1+1;
+      DIP("vpshufd $%d,%s,%s\n", order, nameYMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf,
+                        1/*byte after the amode*/ );
+      assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+      order = (Int)getUChar(delta+alen);
+      delta += alen+1;
+      DIP("vpshufd $%d,%s,%s\n", order,  dis_buf, nameYMMReg(rG));
+   }
+
+   IRTemp s[8];
+   s[7] = s[6] = s[5] = s[4] = s[3] = s[2] = s[1] = s[0] = IRTemp_INVALID;
+   breakupV256to32s( sV, &s[7], &s[6], &s[5], &s[4],
+                         &s[3], &s[2], &s[1], &s[0] );
+
+   putYMMReg( rG, mkV256from32s( s[4 + ((order>>6)&3)],
+                                 s[4 + ((order>>4)&3)],
+                                 s[4 + ((order>>2)&3)],
+                                 s[4 + ((order>>0)&3)],
+                                 s[0 + ((order>>6)&3)],
+                                 s[0 + ((order>>4)&3)],
+                                 s[0 + ((order>>2)&3)],
+                                 s[0 + ((order>>0)&3)] ) );
+   return delta;
+}
+
+
+static IRTemp math_PSRLDQ ( IRTemp sV, Int imm )
+{
+   IRTemp dV    = newTemp(Ity_V128);
+   IRTemp hi64  = newTemp(Ity_I64);
+   IRTemp lo64  = newTemp(Ity_I64);
+   IRTemp hi64r = newTemp(Ity_I64);
+   IRTemp lo64r = newTemp(Ity_I64);
+
+   vassert(imm >= 0 && imm <= 255);
+   if (imm >= 16) {
+      assign(dV, mkV128(0x0000));
+      return dV;
+   }
+
+   assign( hi64, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( lo64, unop(Iop_V128to64, mkexpr(sV)) );
+
+   if (imm == 0) {
+      assign( lo64r, mkexpr(lo64) );
+      assign( hi64r, mkexpr(hi64) );
+   }
+   else
+   if (imm == 8) {
+      assign( hi64r, mkU64(0) );
+      assign( lo64r, mkexpr(hi64) );
+   }
+   else 
+   if (imm > 8) {
+      assign( hi64r, mkU64(0) );
+      assign( lo64r, binop( Iop_Shr64, mkexpr(hi64), mkU8( 8*(imm-8) ) ));
+   } else {
+      assign( hi64r, binop( Iop_Shr64, mkexpr(hi64), mkU8(8 * imm) ));
+      assign( lo64r, 
+              binop( Iop_Or64,
+                     binop(Iop_Shr64, mkexpr(lo64), 
+                           mkU8(8 * imm)),
+                     binop(Iop_Shl64, mkexpr(hi64),
+                           mkU8(8 * (8 - imm)) )
+                     )
+              );
+   }
+   
+   assign( dV, binop(Iop_64HLtoV128, mkexpr(hi64r), mkexpr(lo64r)) );
+   return dV;
+}
+
+
+static IRTemp math_PSLLDQ ( IRTemp sV, Int imm )
+{
+   IRTemp       dV    = newTemp(Ity_V128);
+   IRTemp       hi64  = newTemp(Ity_I64);
+   IRTemp       lo64  = newTemp(Ity_I64);
+   IRTemp       hi64r = newTemp(Ity_I64);
+   IRTemp       lo64r = newTemp(Ity_I64);
+
+   vassert(imm >= 0 && imm <= 255);
+   if (imm >= 16) {
+      assign(dV, mkV128(0x0000));
+      return dV;
+   }
+
+   assign( hi64, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( lo64, unop(Iop_V128to64, mkexpr(sV)) );
+   
+   if (imm == 0) {
+      assign( lo64r, mkexpr(lo64) );
+      assign( hi64r, mkexpr(hi64) );
+   }
+   else
+   if (imm == 8) {
+      assign( lo64r, mkU64(0) );
+      assign( hi64r, mkexpr(lo64) );
+   }
+   else
+   if (imm > 8) {
+      assign( lo64r, mkU64(0) );
+      assign( hi64r, binop( Iop_Shl64, mkexpr(lo64), mkU8( 8*(imm-8) ) ));
+   } else {
+      assign( lo64r, binop( Iop_Shl64, mkexpr(lo64), mkU8(8 * imm) ));
+      assign( hi64r, 
+              binop( Iop_Or64,
+                     binop(Iop_Shl64, mkexpr(hi64), 
+                           mkU8(8 * imm)),
+                     binop(Iop_Shr64, mkexpr(lo64),
+                           mkU8(8 * (8 - imm)) )
+                     )
+              );
+   }
+
+   assign( dV, binop(Iop_64HLtoV128, mkexpr(hi64r), mkexpr(lo64r)) );
+   return dV;
+}
+
+
+static Long dis_CVTxSD2SI ( const VexAbiInfo* vbi, Prefix pfx,
+                            Long delta, Bool isAvx, UChar opc, Int sz )
+{
+   vassert(opc == 0x2D/*CVTSD2SI*/ || opc == 0x2C/*CVTTSD2SI*/);
+   HChar  dis_buf[50];
+   Int    alen   = 0;
+   UChar  modrm  = getUChar(delta);
+   IRTemp addr   = IRTemp_INVALID;
+   IRTemp rmode  = newTemp(Ity_I32);
+   IRTemp f64lo  = newTemp(Ity_F64);
+   Bool   r2zero = toBool(opc == 0x2C);
+
+   if (epartIsReg(modrm)) {
+      delta += 1;
+      assign(f64lo, getXMMRegLane64F(eregOfRexRM(pfx,modrm), 0));
+      DIP("%scvt%ssd2si %s,%s\n", isAvx ? "v" : "", r2zero ? "t" : "",
+                                  nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameIReg(sz, gregOfRexRM(pfx,modrm),
+                                           False));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(f64lo, loadLE(Ity_F64, mkexpr(addr)));
+      delta += alen;
+      DIP("%scvt%ssd2si %s,%s\n", isAvx ? "v" : "", r2zero ? "t" : "",
+                                  dis_buf,
+                                  nameIReg(sz, gregOfRexRM(pfx,modrm),
+                                           False));
+   }
+
+   if (r2zero) {
+      assign( rmode, mkU32((UInt)Irrm_ZERO) );
+   } else {
+      assign( rmode, get_sse_roundingmode() );
+   }
+
+   if (sz == 4) {
+      putIReg32( gregOfRexRM(pfx,modrm),
+                 binop( Iop_F64toI32S, mkexpr(rmode), mkexpr(f64lo)) );
+   } else {
+      vassert(sz == 8);
+      putIReg64( gregOfRexRM(pfx,modrm),
+                 binop( Iop_F64toI64S, mkexpr(rmode), mkexpr(f64lo)) );
+   }
+
+   return delta;
+}
+
+
+static Long dis_CVTxSS2SI ( const VexAbiInfo* vbi, Prefix pfx,
+                            Long delta, Bool isAvx, UChar opc, Int sz )
+{
+   vassert(opc == 0x2D/*CVTSS2SI*/ || opc == 0x2C/*CVTTSS2SI*/);
+   HChar  dis_buf[50];
+   Int    alen   = 0;
+   UChar  modrm  = getUChar(delta);
+   IRTemp addr   = IRTemp_INVALID;
+   IRTemp rmode  = newTemp(Ity_I32);
+   IRTemp f32lo  = newTemp(Ity_F32);
+   Bool   r2zero = toBool(opc == 0x2C);
+
+   if (epartIsReg(modrm)) {
+      delta += 1;
+      assign(f32lo, getXMMRegLane32F(eregOfRexRM(pfx,modrm), 0));
+      DIP("%scvt%sss2si %s,%s\n", isAvx ? "v" : "", r2zero ? "t" : "",
+                                  nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameIReg(sz, gregOfRexRM(pfx,modrm), 
+                                           False));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(f32lo, loadLE(Ity_F32, mkexpr(addr)));
+      delta += alen;
+      DIP("%scvt%sss2si %s,%s\n", isAvx ? "v" : "", r2zero ? "t" : "",
+                                  dis_buf,
+                                  nameIReg(sz, gregOfRexRM(pfx,modrm),
+                                           False));
+   }
+
+   if (r2zero) {
+      assign( rmode, mkU32((UInt)Irrm_ZERO) );
+   } else {
+      assign( rmode, get_sse_roundingmode() );
+   }
+
+   if (sz == 4) {
+      putIReg32( gregOfRexRM(pfx,modrm),
+                 binop( Iop_F64toI32S, 
+                        mkexpr(rmode), 
+                        unop(Iop_F32toF64, mkexpr(f32lo))) );
+   } else {
+      vassert(sz == 8);
+      putIReg64( gregOfRexRM(pfx,modrm),
+                 binop( Iop_F64toI64S, 
+                        mkexpr(rmode), 
+                        unop(Iop_F32toF64, mkexpr(f32lo))) );
+   }
+   
+   return delta;
+}
+
+
+static Long dis_CVTPS2PD_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp f32lo = newTemp(Ity_F32);
+   IRTemp f32hi = newTemp(Ity_F32);
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( f32lo, getXMMRegLane32F(rE, 0) );
+      assign( f32hi, getXMMRegLane32F(rE, 1) );
+      delta += 1;
+      DIP("%scvtps2pd %s,%s\n",
+          isAvx ? "v" : "", nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( f32lo, loadLE(Ity_F32, mkexpr(addr)) );
+      assign( f32hi, loadLE(Ity_F32, 
+                            binop(Iop_Add64,mkexpr(addr),mkU64(4))) );
+      delta += alen;
+      DIP("%scvtps2pd %s,%s\n",
+          isAvx ? "v" : "", dis_buf, nameXMMReg(rG));
+   }
+
+   putXMMRegLane64F( rG, 1, unop(Iop_F32toF64, mkexpr(f32hi)) );
+   putXMMRegLane64F( rG, 0, unop(Iop_F32toF64, mkexpr(f32lo)) );
+   if (isAvx)
+      putYMMRegLane128( rG, 1, mkV128(0));
+   return delta;
+}
+
+
+static Long dis_CVTPS2PD_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp f32_0 = newTemp(Ity_F32);
+   IRTemp f32_1 = newTemp(Ity_F32);
+   IRTemp f32_2 = newTemp(Ity_F32);
+   IRTemp f32_3 = newTemp(Ity_F32);
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( f32_0, getXMMRegLane32F(rE, 0) );
+      assign( f32_1, getXMMRegLane32F(rE, 1) );
+      assign( f32_2, getXMMRegLane32F(rE, 2) );
+      assign( f32_3, getXMMRegLane32F(rE, 3) );
+      delta += 1;
+      DIP("vcvtps2pd %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( f32_0, loadLE(Ity_F32, mkexpr(addr)) );
+      assign( f32_1, loadLE(Ity_F32, 
+                            binop(Iop_Add64,mkexpr(addr),mkU64(4))) );
+      assign( f32_2, loadLE(Ity_F32, 
+                            binop(Iop_Add64,mkexpr(addr),mkU64(8))) );
+      assign( f32_3, loadLE(Ity_F32, 
+                            binop(Iop_Add64,mkexpr(addr),mkU64(12))) );
+      delta += alen;
+      DIP("vcvtps2pd %s,%s\n", dis_buf, nameYMMReg(rG));
+   }
+
+   putYMMRegLane64F( rG, 3, unop(Iop_F32toF64, mkexpr(f32_3)) );
+   putYMMRegLane64F( rG, 2, unop(Iop_F32toF64, mkexpr(f32_2)) );
+   putYMMRegLane64F( rG, 1, unop(Iop_F32toF64, mkexpr(f32_1)) );
+   putYMMRegLane64F( rG, 0, unop(Iop_F32toF64, mkexpr(f32_0)) );
+   return delta;
+}
+
+
+static Long dis_CVTPD2PS_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp argV  = newTemp(Ity_V128);
+   IRTemp rmode = newTemp(Ity_I32);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getXMMReg(rE) );
+      delta += 1;
+      DIP("%scvtpd2ps %s,%s\n", isAvx ? "v" : "",
+          nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+      delta += alen;
+      DIP("%scvtpd2ps %s,%s\n", isAvx ? "v" : "",
+          dis_buf, nameXMMReg(rG) );
+   }
+         
+   assign( rmode, get_sse_roundingmode() );
+   IRTemp t0 = newTemp(Ity_F64);
+   IRTemp t1 = newTemp(Ity_F64);
+   assign( t0, unop(Iop_ReinterpI64asF64, 
+                    unop(Iop_V128to64, mkexpr(argV))) );
+   assign( t1, unop(Iop_ReinterpI64asF64, 
+                    unop(Iop_V128HIto64, mkexpr(argV))) );
+      
+#  define CVT(_t)  binop( Iop_F64toF32, mkexpr(rmode), mkexpr(_t) )
+   putXMMRegLane32(  rG, 3, mkU32(0) );
+   putXMMRegLane32(  rG, 2, mkU32(0) );
+   putXMMRegLane32F( rG, 1, CVT(t1) );
+   putXMMRegLane32F( rG, 0, CVT(t0) );
+#  undef CVT
+   if (isAvx)
+      putYMMRegLane128( rG, 1, mkV128(0) );
+
+   return delta;
+}
+
+
+static Long dis_CVTxPS2DQ_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                                Long delta, Bool isAvx, Bool r2zero )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   IRTemp argV  = newTemp(Ity_V128);
+   IRTemp rmode = newTemp(Ity_I32);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp t0, t1, t2, t3;
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getXMMReg(rE) );
+      delta += 1;
+      DIP("%scvt%sps2dq %s,%s\n",
+          isAvx ? "v" : "", r2zero ? "t" : "", nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+      delta += alen;
+      DIP("%scvt%sps2dq %s,%s\n",
+          isAvx ? "v" : "", r2zero ? "t" : "", dis_buf, nameXMMReg(rG) );
+   }
+
+   assign( rmode, r2zero ? mkU32((UInt)Irrm_ZERO)
+                         : get_sse_roundingmode() );
+   t0 = t1 = t2 = t3 = IRTemp_INVALID;
+   breakupV128to32s( argV, &t3, &t2, &t1, &t0 );
+   /* This is less than ideal.  If it turns out to be a performance
+      bottleneck it can be improved. */
+#  define CVT(_t)                             \
+      binop( Iop_F64toI32S,                   \
+             mkexpr(rmode),                   \
+             unop( Iop_F32toF64,              \
+                   unop( Iop_ReinterpI32asF32, mkexpr(_t))) )
+      
+   putXMMRegLane32( rG, 3, CVT(t3) );
+   putXMMRegLane32( rG, 2, CVT(t2) );
+   putXMMRegLane32( rG, 1, CVT(t1) );
+   putXMMRegLane32( rG, 0, CVT(t0) );
+#  undef CVT
+   if (isAvx)
+      putYMMRegLane128( rG, 1, mkV128(0) );
+
+   return delta;
+}
+
+
+static Long dis_CVTxPS2DQ_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                                Long delta, Bool r2zero )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   IRTemp argV  = newTemp(Ity_V256);
+   IRTemp rmode = newTemp(Ity_I32);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp t0, t1, t2, t3, t4, t5, t6, t7;
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getYMMReg(rE) );
+      delta += 1;
+      DIP("vcvt%sps2dq %s,%s\n",
+          r2zero ? "t" : "", nameYMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V256, mkexpr(addr)) );
+      delta += alen;
+      DIP("vcvt%sps2dq %s,%s\n",
+          r2zero ? "t" : "", dis_buf, nameYMMReg(rG) );
+   }
+
+   assign( rmode, r2zero ? mkU32((UInt)Irrm_ZERO)
+                         : get_sse_roundingmode() );
+   t0 = t1 = t2 = t3 = t4 = t5 = t6 = t7 = IRTemp_INVALID;
+   breakupV256to32s( argV, &t7, &t6, &t5, &t4, &t3, &t2, &t1, &t0 );
+   /* This is less than ideal.  If it turns out to be a performance
+      bottleneck it can be improved. */
+#  define CVT(_t)                             \
+      binop( Iop_F64toI32S,                   \
+             mkexpr(rmode),                   \
+             unop( Iop_F32toF64,              \
+                   unop( Iop_ReinterpI32asF32, mkexpr(_t))) )
+      
+   putYMMRegLane32( rG, 7, CVT(t7) );
+   putYMMRegLane32( rG, 6, CVT(t6) );
+   putYMMRegLane32( rG, 5, CVT(t5) );
+   putYMMRegLane32( rG, 4, CVT(t4) );
+   putYMMRegLane32( rG, 3, CVT(t3) );
+   putYMMRegLane32( rG, 2, CVT(t2) );
+   putYMMRegLane32( rG, 1, CVT(t1) );
+   putYMMRegLane32( rG, 0, CVT(t0) );
+#  undef CVT
+
+   return delta;
+}
+
+
+static Long dis_CVTxPD2DQ_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                                Long delta, Bool isAvx, Bool r2zero )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   IRTemp argV  = newTemp(Ity_V128);
+   IRTemp rmode = newTemp(Ity_I32);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp t0, t1;
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getXMMReg(rE) );
+      delta += 1;
+      DIP("%scvt%spd2dq %s,%s\n",
+          isAvx ? "v" : "", r2zero ? "t" : "", nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+      delta += alen;
+      DIP("%scvt%spd2dqx %s,%s\n",
+          isAvx ? "v" : "", r2zero ? "t" : "", dis_buf, nameXMMReg(rG) );
+   }
+
+   if (r2zero) {
+      assign(rmode, mkU32((UInt)Irrm_ZERO) );
+   } else {
+      assign( rmode, get_sse_roundingmode() );
+   }
+
+   t0 = newTemp(Ity_F64);
+   t1 = newTemp(Ity_F64);
+   assign( t0, unop(Iop_ReinterpI64asF64, 
+                    unop(Iop_V128to64, mkexpr(argV))) );
+   assign( t1, unop(Iop_ReinterpI64asF64, 
+                    unop(Iop_V128HIto64, mkexpr(argV))) );
+
+#  define CVT(_t)  binop( Iop_F64toI32S,                   \
+                          mkexpr(rmode),                   \
+                          mkexpr(_t) )
+
+   putXMMRegLane32( rG, 3, mkU32(0) );
+   putXMMRegLane32( rG, 2, mkU32(0) );
+   putXMMRegLane32( rG, 1, CVT(t1) );
+   putXMMRegLane32( rG, 0, CVT(t0) );
+#  undef CVT
+   if (isAvx)
+      putYMMRegLane128( rG, 1, mkV128(0) );
+
+   return delta;
+}
+
+
+static Long dis_CVTxPD2DQ_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                                Long delta, Bool r2zero )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   IRTemp argV  = newTemp(Ity_V256);
+   IRTemp rmode = newTemp(Ity_I32);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp t0, t1, t2, t3;
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getYMMReg(rE) );
+      delta += 1;
+      DIP("vcvt%spd2dq %s,%s\n",
+          r2zero ? "t" : "", nameYMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V256, mkexpr(addr)) );
+      delta += alen;
+      DIP("vcvt%spd2dqy %s,%s\n",
+          r2zero ? "t" : "", dis_buf, nameXMMReg(rG) );
+   }
+
+   if (r2zero) {
+      assign(rmode, mkU32((UInt)Irrm_ZERO) );
+   } else {
+      assign( rmode, get_sse_roundingmode() );
+   }
+
+   t0 = IRTemp_INVALID;
+   t1 = IRTemp_INVALID;
+   t2 = IRTemp_INVALID;
+   t3 = IRTemp_INVALID;
+   breakupV256to64s( argV, &t3, &t2, &t1, &t0 );
+
+#  define CVT(_t)  binop( Iop_F64toI32S,                   \
+                          mkexpr(rmode),                   \
+                          unop( Iop_ReinterpI64asF64,      \
+                                mkexpr(_t) ) )
+
+   putXMMRegLane32( rG, 3, CVT(t3) );
+   putXMMRegLane32( rG, 2, CVT(t2) );
+   putXMMRegLane32( rG, 1, CVT(t1) );
+   putXMMRegLane32( rG, 0, CVT(t0) );
+#  undef CVT
+   putYMMRegLane128( rG, 1, mkV128(0) );
+
+   return delta;
+}
+
+
+static Long dis_CVTDQ2PS_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   IRTemp argV  = newTemp(Ity_V128);
+   IRTemp rmode = newTemp(Ity_I32);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp t0, t1, t2, t3;
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getXMMReg(rE) );
+      delta += 1;
+      DIP("%scvtdq2ps %s,%s\n",
+          isAvx ? "v" : "", nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+      delta += alen;
+      DIP("%scvtdq2ps %s,%s\n",
+          isAvx ? "v" : "", dis_buf, nameXMMReg(rG) );
+   }
+
+   assign( rmode, get_sse_roundingmode() );
+   t0 = IRTemp_INVALID;
+   t1 = IRTemp_INVALID;
+   t2 = IRTemp_INVALID;
+   t3 = IRTemp_INVALID;
+   breakupV128to32s( argV, &t3, &t2, &t1, &t0 );
+
+#  define CVT(_t)  binop( Iop_F64toF32,                    \
+                          mkexpr(rmode),                   \
+                          unop(Iop_I32StoF64,mkexpr(_t)))
+      
+   putXMMRegLane32F( rG, 3, CVT(t3) );
+   putXMMRegLane32F( rG, 2, CVT(t2) );
+   putXMMRegLane32F( rG, 1, CVT(t1) );
+   putXMMRegLane32F( rG, 0, CVT(t0) );
+#  undef CVT
+   if (isAvx)
+      putYMMRegLane128( rG, 1, mkV128(0) );
+
+   return delta;
+}
+
+static Long dis_CVTDQ2PS_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   IRTemp argV   = newTemp(Ity_V256);
+   IRTemp rmode  = newTemp(Ity_I32);
+   UInt   rG     = gregOfRexRM(pfx,modrm);
+   IRTemp t0, t1, t2, t3, t4, t5, t6, t7;
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getYMMReg(rE) );
+      delta += 1;
+      DIP("vcvtdq2ps %s,%s\n", nameYMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V256, mkexpr(addr)) );
+      delta += alen;
+      DIP("vcvtdq2ps %s,%s\n", dis_buf, nameYMMReg(rG) );
+   }
+
+   assign( rmode, get_sse_roundingmode() );
+   t0 = IRTemp_INVALID;
+   t1 = IRTemp_INVALID;
+   t2 = IRTemp_INVALID;
+   t3 = IRTemp_INVALID;
+   t4 = IRTemp_INVALID;
+   t5 = IRTemp_INVALID;
+   t6 = IRTemp_INVALID;
+   t7 = IRTemp_INVALID;
+   breakupV256to32s( argV, &t7, &t6, &t5, &t4, &t3, &t2, &t1, &t0 );
+
+#  define CVT(_t)  binop( Iop_F64toF32,                    \
+                          mkexpr(rmode),                   \
+                          unop(Iop_I32StoF64,mkexpr(_t)))
+      
+   putYMMRegLane32F( rG, 7, CVT(t7) );
+   putYMMRegLane32F( rG, 6, CVT(t6) );
+   putYMMRegLane32F( rG, 5, CVT(t5) );
+   putYMMRegLane32F( rG, 4, CVT(t4) );
+   putYMMRegLane32F( rG, 3, CVT(t3) );
+   putYMMRegLane32F( rG, 2, CVT(t2) );
+   putYMMRegLane32F( rG, 1, CVT(t1) );
+   putYMMRegLane32F( rG, 0, CVT(t0) );
+#  undef CVT
+
+   return delta;
+}
+
+
+static Long dis_PMOVMSKB_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   UChar modrm = getUChar(delta);
+   vassert(epartIsReg(modrm)); /* ensured by caller */
+   UInt   rE = eregOfRexRM(pfx,modrm);
+   UInt   rG = gregOfRexRM(pfx,modrm);
+   IRTemp t0 = newTemp(Ity_V128);
+   IRTemp t1 = newTemp(Ity_I32);
+   assign(t0, getXMMReg(rE));
+   assign(t1, unop(Iop_16Uto32, unop(Iop_GetMSBs8x16, mkexpr(t0))));
+   putIReg32(rG, mkexpr(t1));
+   DIP("%spmovmskb %s,%s\n", isAvx ? "v" : "", nameXMMReg(rE),
+       nameIReg32(rG));
+   delta += 1;
+   return delta;
+}
+
+
+static Long dis_PMOVMSKB_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta  )
+{
+   UChar modrm = getUChar(delta);
+   vassert(epartIsReg(modrm)); /* ensured by caller */
+   UInt   rE = eregOfRexRM(pfx,modrm);
+   UInt   rG = gregOfRexRM(pfx,modrm);
+   IRTemp t0 = newTemp(Ity_V128);
+   IRTemp t1 = newTemp(Ity_V128);
+   IRTemp t2 = newTemp(Ity_I16);
+   IRTemp t3 = newTemp(Ity_I16);
+   assign(t0, getYMMRegLane128(rE, 0));
+   assign(t1, getYMMRegLane128(rE, 1));
+   assign(t2, unop(Iop_GetMSBs8x16, mkexpr(t0)));
+   assign(t3, unop(Iop_GetMSBs8x16, mkexpr(t1)));
+   putIReg32(rG, binop(Iop_16HLto32, mkexpr(t3), mkexpr(t2)));
+   DIP("vpmovmskb %s,%s\n", nameYMMReg(rE), nameIReg32(rG));
+   delta += 1;
+   return delta;
+}
+
+
+/* FIXME: why not just use InterleaveLO / InterleaveHI?  I think the
+   relevant ops are "xIsH ? InterleaveHI32x4 : InterleaveLO32x4". */
+/* Does the maths for 128 bit versions of UNPCKLPS and UNPCKHPS */
+static IRTemp math_UNPCKxPS_128 ( IRTemp sV, IRTemp dV, Bool xIsH )
+{
+   IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+   s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+   breakupV128to32s( dV, &d3, &d2, &d1, &d0 );
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+   IRTemp res = newTemp(Ity_V128);
+   assign(res,  xIsH ? mkV128from32s( s3, d3, s2, d2 )
+                     : mkV128from32s( s1, d1, s0, d0 ));
+   return res;
+}
+
+
+/* FIXME: why not just use InterleaveLO / InterleaveHI ?? */
+/* Does the maths for 128 bit versions of UNPCKLPD and UNPCKHPD */
+static IRTemp math_UNPCKxPD_128 ( IRTemp sV, IRTemp dV, Bool xIsH )
+{
+   IRTemp s1 = newTemp(Ity_I64);
+   IRTemp s0 = newTemp(Ity_I64);
+   IRTemp d1 = newTemp(Ity_I64);
+   IRTemp d0 = newTemp(Ity_I64);
+   assign( d1, unop(Iop_V128HIto64, mkexpr(dV)) );
+   assign( d0, unop(Iop_V128to64,   mkexpr(dV)) );
+   assign( s1, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( s0, unop(Iop_V128to64,   mkexpr(sV)) );
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, xIsH ? binop(Iop_64HLtoV128, mkexpr(s1), mkexpr(d1))
+                    : binop(Iop_64HLtoV128, mkexpr(s0), mkexpr(d0)));
+   return res;
+}
+
+
+/* Does the maths for 256 bit versions of UNPCKLPD and UNPCKHPD.
+   Doesn't seem like this fits in either of the Iop_Interleave{LO,HI}
+   or the Iop_Cat{Odd,Even}Lanes idioms, hence just do it the stupid
+   way. */
+static IRTemp math_UNPCKxPD_256 ( IRTemp sV, IRTemp dV, Bool xIsH )
+{
+   IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+   s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+   breakupV256to64s( dV, &d3, &d2, &d1, &d0 );
+   breakupV256to64s( sV, &s3, &s2, &s1, &s0 );
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, xIsH
+               ? IRExpr_Qop(Iop_64x4toV256, mkexpr(s3), mkexpr(d3),
+                                            mkexpr(s1), mkexpr(d1))
+               : IRExpr_Qop(Iop_64x4toV256, mkexpr(s2), mkexpr(d2),
+                                            mkexpr(s0), mkexpr(d0)));
+   return res;
+}
+
+
+/* FIXME: this is really bad.  Surely can do something better here?
+   One observation is that the steering in the upper and lower 128 bit
+   halves is the same as with math_UNPCKxPS_128, so we simply split
+   into two halves, and use that.  Consequently any improvement in
+   math_UNPCKxPS_128 (probably, to use interleave-style primops)
+   benefits this too. */
+static IRTemp math_UNPCKxPS_256 ( IRTemp sV, IRTemp dV, Bool xIsH )
+{
+   IRTemp sVhi = IRTemp_INVALID, sVlo = IRTemp_INVALID;
+   IRTemp dVhi = IRTemp_INVALID, dVlo = IRTemp_INVALID;
+   breakupV256toV128s( sV, &sVhi, &sVlo );
+   breakupV256toV128s( dV, &dVhi, &dVlo );
+   IRTemp rVhi = math_UNPCKxPS_128(sVhi, dVhi, xIsH);
+   IRTemp rVlo = math_UNPCKxPS_128(sVlo, dVlo, xIsH);
+   IRTemp rV   = newTemp(Ity_V256);
+   assign(rV, binop(Iop_V128HLtoV256, mkexpr(rVhi), mkexpr(rVlo)));
+   return rV;
+}
+
+
+static IRTemp math_SHUFPS_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+   s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+   vassert(imm8 < 256);
+
+   breakupV128to32s( dV, &d3, &d2, &d1, &d0 );
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+
+#  define SELD(n) ((n)==0 ? d0 : ((n)==1 ? d1 : ((n)==2 ? d2 : d3)))
+#  define SELS(n) ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, 
+          mkV128from32s( SELS((imm8>>6)&3), SELS((imm8>>4)&3), 
+                         SELD((imm8>>2)&3), SELD((imm8>>0)&3) ) );
+#  undef SELD
+#  undef SELS
+   return res;
+}
+
+
+/* 256-bit SHUFPS appears to steer each of the 128-bit halves
+   identically.  Hence do the clueless thing and use math_SHUFPS_128
+   twice. */
+static IRTemp math_SHUFPS_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   IRTemp sVhi = IRTemp_INVALID, sVlo = IRTemp_INVALID;
+   IRTemp dVhi = IRTemp_INVALID, dVlo = IRTemp_INVALID;
+   breakupV256toV128s( sV, &sVhi, &sVlo );
+   breakupV256toV128s( dV, &dVhi, &dVlo );
+   IRTemp rVhi = math_SHUFPS_128(sVhi, dVhi, imm8);
+   IRTemp rVlo = math_SHUFPS_128(sVlo, dVlo, imm8);
+   IRTemp rV   = newTemp(Ity_V256);
+   assign(rV, binop(Iop_V128HLtoV256, mkexpr(rVhi), mkexpr(rVlo)));
+   return rV;
+}
+
+
+static IRTemp math_SHUFPD_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   IRTemp s1 = newTemp(Ity_I64);
+   IRTemp s0 = newTemp(Ity_I64);
+   IRTemp d1 = newTemp(Ity_I64);
+   IRTemp d0 = newTemp(Ity_I64);
+
+   assign( d1, unop(Iop_V128HIto64, mkexpr(dV)) );
+   assign( d0, unop(Iop_V128to64,   mkexpr(dV)) );
+   assign( s1, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( s0, unop(Iop_V128to64,   mkexpr(sV)) );
+
+#  define SELD(n) mkexpr((n)==0 ? d0 : d1)
+#  define SELS(n) mkexpr((n)==0 ? s0 : s1)
+
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop( Iop_64HLtoV128,
+                      SELS((imm8>>1)&1), SELD((imm8>>0)&1) ) );
+
+#  undef SELD
+#  undef SELS
+   return res;
+}
+
+
+static IRTemp math_SHUFPD_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   IRTemp sVhi = IRTemp_INVALID, sVlo = IRTemp_INVALID;
+   IRTemp dVhi = IRTemp_INVALID, dVlo = IRTemp_INVALID;
+   breakupV256toV128s( sV, &sVhi, &sVlo );
+   breakupV256toV128s( dV, &dVhi, &dVlo );
+   IRTemp rVhi = math_SHUFPD_128(sVhi, dVhi, (imm8 >> 2) & 3);
+   IRTemp rVlo = math_SHUFPD_128(sVlo, dVlo, imm8 & 3);
+   IRTemp rV   = newTemp(Ity_V256);
+   assign(rV, binop(Iop_V128HLtoV256, mkexpr(rVhi), mkexpr(rVlo)));
+   return rV;
+}
+
+
+static IRTemp math_BLENDPD_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   UShort imm8_mask_16;
+   IRTemp imm8_mask = newTemp(Ity_V128);
+
+   switch( imm8 & 3 ) {
+      case 0:  imm8_mask_16 = 0x0000; break;
+      case 1:  imm8_mask_16 = 0x00FF; break;
+      case 2:  imm8_mask_16 = 0xFF00; break;
+      case 3:  imm8_mask_16 = 0xFFFF; break;
+      default: vassert(0);            break;
+   }
+   assign( imm8_mask, mkV128( imm8_mask_16 ) );
+
+   IRTemp res = newTemp(Ity_V128);
+   assign ( res, binop( Iop_OrV128, 
+                        binop( Iop_AndV128, mkexpr(sV),
+                                            mkexpr(imm8_mask) ), 
+                        binop( Iop_AndV128, mkexpr(dV), 
+                               unop( Iop_NotV128, mkexpr(imm8_mask) ) ) ) );
+   return res;
+}
+
+
+static IRTemp math_BLENDPD_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   IRTemp sVhi = IRTemp_INVALID, sVlo = IRTemp_INVALID;
+   IRTemp dVhi = IRTemp_INVALID, dVlo = IRTemp_INVALID;
+   breakupV256toV128s( sV, &sVhi, &sVlo );
+   breakupV256toV128s( dV, &dVhi, &dVlo );
+   IRTemp rVhi = math_BLENDPD_128(sVhi, dVhi, (imm8 >> 2) & 3);
+   IRTemp rVlo = math_BLENDPD_128(sVlo, dVlo, imm8 & 3);
+   IRTemp rV   = newTemp(Ity_V256);
+   assign(rV, binop(Iop_V128HLtoV256, mkexpr(rVhi), mkexpr(rVlo)));
+   return rV;
+}
+
+
+static IRTemp math_BLENDPS_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   UShort imm8_perms[16] = { 0x0000, 0x000F, 0x00F0, 0x00FF, 0x0F00,
+                             0x0F0F, 0x0FF0, 0x0FFF, 0xF000, 0xF00F,
+                             0xF0F0, 0xF0FF, 0xFF00, 0xFF0F, 0xFFF0,
+                             0xFFFF };
+   IRTemp imm8_mask = newTemp(Ity_V128);
+   assign( imm8_mask, mkV128( imm8_perms[ (imm8 & 15) ] ) );
+
+   IRTemp res = newTemp(Ity_V128);
+   assign ( res, binop( Iop_OrV128,
+                        binop( Iop_AndV128, mkexpr(sV), 
+                                            mkexpr(imm8_mask) ),
+                        binop( Iop_AndV128, mkexpr(dV),
+                               unop( Iop_NotV128, mkexpr(imm8_mask) ) ) ) );
+   return res;
+}
+
+
+static IRTemp math_BLENDPS_256 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   IRTemp sVhi = IRTemp_INVALID, sVlo = IRTemp_INVALID;
+   IRTemp dVhi = IRTemp_INVALID, dVlo = IRTemp_INVALID;
+   breakupV256toV128s( sV, &sVhi, &sVlo );
+   breakupV256toV128s( dV, &dVhi, &dVlo );
+   IRTemp rVhi = math_BLENDPS_128(sVhi, dVhi, (imm8 >> 4) & 15);
+   IRTemp rVlo = math_BLENDPS_128(sVlo, dVlo, imm8 & 15);
+   IRTemp rV   = newTemp(Ity_V256);
+   assign(rV, binop(Iop_V128HLtoV256, mkexpr(rVhi), mkexpr(rVlo)));
+   return rV;
+}
+
+
+static IRTemp math_PBLENDW_128 ( IRTemp sV, IRTemp dV, UInt imm8 )
+{
+   /* Make w be a 16-bit version of imm8, formed by duplicating each
+      bit in imm8. */
+   Int i;
+   UShort imm16 = 0;
+   for (i = 0; i < 8; i++) {
+      if (imm8 & (1 << i))
+         imm16 |= (3 << (2*i));
+   }
+   IRTemp imm16_mask = newTemp(Ity_V128);
+   assign( imm16_mask, mkV128( imm16 ));
+
+   IRTemp res = newTemp(Ity_V128);
+   assign ( res, binop( Iop_OrV128,
+                        binop( Iop_AndV128, mkexpr(sV), 
+                                            mkexpr(imm16_mask) ),
+                        binop( Iop_AndV128, mkexpr(dV),
+                               unop( Iop_NotV128, mkexpr(imm16_mask) ) ) ) );
+   return res;
+}
+
+
+static IRTemp math_PMULUDQ_128 ( IRTemp sV, IRTemp dV )
+{
+   /* This is a really poor translation -- could be improved if
+      performance critical */
+   IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+   s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+   breakupV128to32s( dV, &d3, &d2, &d1, &d0 );
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop(Iop_64HLtoV128,
+                     binop( Iop_MullU32, mkexpr(d2), mkexpr(s2)),
+                     binop( Iop_MullU32, mkexpr(d0), mkexpr(s0)) ));
+   return res;
+}
+
+
+static IRTemp math_PMULUDQ_256 ( IRTemp sV, IRTemp dV )
+{
+   /* This is a really poor translation -- could be improved if
+      performance critical */
+   IRTemp sHi, sLo, dHi, dLo;
+   sHi = sLo = dHi = dLo = IRTemp_INVALID;
+   breakupV256toV128s( dV, &dHi, &dLo);
+   breakupV256toV128s( sV, &sHi, &sLo);
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256,
+                     mkexpr(math_PMULUDQ_128(sHi, dHi)),
+                     mkexpr(math_PMULUDQ_128(sLo, dLo))));
+   return res;
+}
+
+
+static IRTemp math_PMULDQ_128 ( IRTemp dV, IRTemp sV )
+{
+   /* This is a really poor translation -- could be improved if
+      performance critical */
+   IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+   s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+   breakupV128to32s( dV, &d3, &d2, &d1, &d0 );
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop(Iop_64HLtoV128,
+                     binop( Iop_MullS32, mkexpr(d2), mkexpr(s2)),
+                     binop( Iop_MullS32, mkexpr(d0), mkexpr(s0)) ));
+   return res;
+}
+
+
+static IRTemp math_PMULDQ_256 ( IRTemp sV, IRTemp dV )
+{
+   /* This is a really poor translation -- could be improved if
+      performance critical */
+   IRTemp sHi, sLo, dHi, dLo;
+   sHi = sLo = dHi = dLo = IRTemp_INVALID;
+   breakupV256toV128s( dV, &dHi, &dLo);
+   breakupV256toV128s( sV, &sHi, &sLo);
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256,
+                     mkexpr(math_PMULDQ_128(sHi, dHi)),
+                     mkexpr(math_PMULDQ_128(sLo, dLo))));
+   return res;
+}
+
+
+static IRTemp math_PMADDWD_128 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp sVhi, sVlo, dVhi, dVlo;
+   IRTemp resHi = newTemp(Ity_I64);
+   IRTemp resLo = newTemp(Ity_I64);
+   sVhi = sVlo = dVhi = dVlo = IRTemp_INVALID;
+   breakupV128to64s( sV, &sVhi, &sVlo );
+   breakupV128to64s( dV, &dVhi, &dVlo );
+   assign( resHi, mkIRExprCCall(Ity_I64, 0/*regparms*/,
+                                "amd64g_calculate_mmx_pmaddwd", 
+                                &amd64g_calculate_mmx_pmaddwd,
+                                mkIRExprVec_2( mkexpr(sVhi), mkexpr(dVhi))));
+   assign( resLo, mkIRExprCCall(Ity_I64, 0/*regparms*/,
+                                "amd64g_calculate_mmx_pmaddwd", 
+                                &amd64g_calculate_mmx_pmaddwd,
+                                mkIRExprVec_2( mkexpr(sVlo), mkexpr(dVlo))));
+   IRTemp res = newTemp(Ity_V128);
+   assign( res, binop(Iop_64HLtoV128, mkexpr(resHi), mkexpr(resLo))) ;
+   return res;
+}
+
+
+static IRTemp math_PMADDWD_256 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp sHi, sLo, dHi, dLo;
+   sHi = sLo = dHi = dLo = IRTemp_INVALID;
+   breakupV256toV128s( dV, &dHi, &dLo);
+   breakupV256toV128s( sV, &sHi, &sLo);
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256,
+                     mkexpr(math_PMADDWD_128(dHi, sHi)),
+                     mkexpr(math_PMADDWD_128(dLo, sLo))));
+   return res;
+}
+
+
+static IRTemp math_ADDSUBPD_128 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp addV = newTemp(Ity_V128);
+   IRTemp subV = newTemp(Ity_V128);
+   IRTemp a1   = newTemp(Ity_I64);
+   IRTemp s0   = newTemp(Ity_I64);
+   IRTemp rm   = newTemp(Ity_I32);
+
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( addV, triop(Iop_Add64Fx2, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+   assign( subV, triop(Iop_Sub64Fx2, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+
+   assign( a1, unop(Iop_V128HIto64, mkexpr(addV) ));
+   assign( s0, unop(Iop_V128to64,   mkexpr(subV) ));
+
+   IRTemp res = newTemp(Ity_V128);
+   assign( res, binop(Iop_64HLtoV128, mkexpr(a1), mkexpr(s0)) );
+   return res;
+}
+
+
+static IRTemp math_ADDSUBPD_256 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp a3, a2, a1, a0, s3, s2, s1, s0;
+   IRTemp addV = newTemp(Ity_V256);
+   IRTemp subV = newTemp(Ity_V256);
+   IRTemp rm   = newTemp(Ity_I32);
+   a3 = a2 = a1 = a0 = s3 = s2 = s1 = s0 = IRTemp_INVALID;
+
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( addV, triop(Iop_Add64Fx4, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+   assign( subV, triop(Iop_Sub64Fx4, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+
+   breakupV256to64s( addV, &a3, &a2, &a1, &a0 );
+   breakupV256to64s( subV, &s3, &s2, &s1, &s0 );
+
+   IRTemp res = newTemp(Ity_V256);
+   assign( res, mkV256from64s( a3, s2, a1, s0 ) );
+   return res;
+}
+
+
+static IRTemp math_ADDSUBPS_128 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp a3, a2, a1, a0, s3, s2, s1, s0;
+   IRTemp addV = newTemp(Ity_V128);
+   IRTemp subV = newTemp(Ity_V128);
+   IRTemp rm   = newTemp(Ity_I32);
+   a3 = a2 = a1 = a0 = s3 = s2 = s1 = s0 = IRTemp_INVALID;
+
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( addV, triop(Iop_Add32Fx4, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+   assign( subV, triop(Iop_Sub32Fx4, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+
+   breakupV128to32s( addV, &a3, &a2, &a1, &a0 );
+   breakupV128to32s( subV, &s3, &s2, &s1, &s0 );
+
+   IRTemp res = newTemp(Ity_V128);
+   assign( res, mkV128from32s( a3, s2, a1, s0 ) );
+   return res;
+}
+
+
+static IRTemp math_ADDSUBPS_256 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp a7, a6, a5, a4, a3, a2, a1, a0;
+   IRTemp s7, s6, s5, s4, s3, s2, s1, s0;
+   IRTemp addV = newTemp(Ity_V256);
+   IRTemp subV = newTemp(Ity_V256);
+   IRTemp rm   = newTemp(Ity_I32);
+   a7 = a6 = a5 = a4 = a3 = a2 = a1 = a0 = IRTemp_INVALID;
+   s7 = s6 = s5 = s4 = s3 = s2 = s1 = s0 = IRTemp_INVALID;
+
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( addV, triop(Iop_Add32Fx8, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+   assign( subV, triop(Iop_Sub32Fx8, mkexpr(rm), mkexpr(dV), mkexpr(sV)) );
+
+   breakupV256to32s( addV, &a7, &a6, &a5, &a4, &a3, &a2, &a1, &a0 );
+   breakupV256to32s( subV, &s7, &s6, &s5, &s4, &s3, &s2, &s1, &s0 );
+
+   IRTemp res = newTemp(Ity_V256);
+   assign( res, mkV256from32s( a7, s6, a5, s4, a3, s2, a1, s0 ) );
+   return res;
+}
+
+
+/* Handle 128 bit PSHUFLW and PSHUFHW. */
+static Long dis_PSHUFxW_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                              Long delta, Bool isAvx, Bool xIsH )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   UInt   rG = gregOfRexRM(pfx,modrm);
+   UInt   imm8;
+   IRTemp sVmut, dVmut, sVcon, sV, dV, s3, s2, s1, s0;
+   s3 = s2 = s1 = s0 = IRTemp_INVALID;
+   sV    = newTemp(Ity_V128);
+   dV    = newTemp(Ity_V128);
+   sVmut = newTemp(Ity_I64);
+   dVmut = newTemp(Ity_I64);
+   sVcon = newTemp(Ity_I64);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getXMMReg(rE) );
+      imm8 = (UInt)getUChar(delta+1);
+      delta += 1+1;
+      DIP("%spshuf%cw $%u,%s,%s\n",
+          isAvx ? "v" : "", xIsH ? 'h' : 'l',
+          imm8, nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+      assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+      imm8 = (UInt)getUChar(delta+alen);
+      delta += alen+1;
+      DIP("%spshuf%cw $%u,%s,%s\n",
+          isAvx ? "v" : "", xIsH ? 'h' : 'l',
+          imm8, dis_buf, nameXMMReg(rG));
+   }
+
+   /* Get the to-be-changed (mut) and unchanging (con) bits of the
+      source. */
+   assign( sVmut, unop(xIsH ? Iop_V128HIto64 : Iop_V128to64,   mkexpr(sV)) );
+   assign( sVcon, unop(xIsH ? Iop_V128to64   : Iop_V128HIto64, mkexpr(sV)) );
+
+   breakup64to16s( sVmut, &s3, &s2, &s1, &s0 );
+#  define SEL(n) \
+             ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+   assign(dVmut, mk64from16s( SEL((imm8>>6)&3), SEL((imm8>>4)&3),
+                              SEL((imm8>>2)&3), SEL((imm8>>0)&3) ));
+#  undef SEL
+
+   assign(dV, xIsH ? binop(Iop_64HLtoV128, mkexpr(dVmut), mkexpr(sVcon))
+                   : binop(Iop_64HLtoV128, mkexpr(sVcon), mkexpr(dVmut)) );
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)(rG, mkexpr(dV));
+   return delta;
+}
+
+
+/* Handle 256 bit PSHUFLW and PSHUFHW. */
+static Long dis_PSHUFxW_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                              Long delta, Bool xIsH )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   UInt   rG = gregOfRexRM(pfx,modrm);
+   UInt   imm8;
+   IRTemp sV, s[8], sV64[4], dVhi, dVlo;
+   sV64[3] = sV64[2] = sV64[1] = sV64[0] = IRTemp_INVALID;
+   s[7] = s[6] = s[5] = s[4] = s[3] = s[2] = s[1] = s[0] = IRTemp_INVALID;
+   sV    = newTemp(Ity_V256);
+   dVhi  = newTemp(Ity_I64);
+   dVlo  = newTemp(Ity_I64);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getYMMReg(rE) );
+      imm8 = (UInt)getUChar(delta+1);
+      delta += 1+1;
+      DIP("vpshuf%cw $%u,%s,%s\n", xIsH ? 'h' : 'l',
+          imm8, nameYMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+      assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+      imm8 = (UInt)getUChar(delta+alen);
+      delta += alen+1;
+      DIP("vpshuf%cw $%u,%s,%s\n", xIsH ? 'h' : 'l',
+          imm8, dis_buf, nameYMMReg(rG));
+   }
+
+   breakupV256to64s( sV, &sV64[3], &sV64[2], &sV64[1], &sV64[0] );
+   breakup64to16s( sV64[xIsH ? 3 : 2], &s[7], &s[6], &s[5], &s[4] );
+   breakup64to16s( sV64[xIsH ? 1 : 0], &s[3], &s[2], &s[1], &s[0] );
+
+   assign( dVhi, mk64from16s( s[4 + ((imm8>>6)&3)], s[4 + ((imm8>>4)&3)],
+                              s[4 + ((imm8>>2)&3)], s[4 + ((imm8>>0)&3)] ) );
+   assign( dVlo, mk64from16s( s[0 + ((imm8>>6)&3)], s[0 + ((imm8>>4)&3)],
+                              s[0 + ((imm8>>2)&3)], s[0 + ((imm8>>0)&3)] ) );
+   putYMMReg( rG, mkV256from64s( xIsH ? dVhi : sV64[3],
+                                 xIsH ? sV64[2] : dVhi,
+                                 xIsH ? dVlo : sV64[1],
+                                 xIsH ? sV64[0] : dVlo ) );
+   return delta;
+}
+
+
+static Long dis_PEXTRW_128_EregOnly_toG ( const VexAbiInfo* vbi, Prefix pfx,
+                                          Long delta, Bool isAvx )
+{
+   Long   deltaIN = delta;
+   UChar  modrm   = getUChar(delta);
+   UInt   rG      = gregOfRexRM(pfx,modrm);
+   IRTemp sV      = newTemp(Ity_V128);
+   IRTemp d16     = newTemp(Ity_I16);
+   UInt   imm8;
+   IRTemp s0, s1, s2, s3;
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign(sV, getXMMReg(rE));
+      imm8 = getUChar(delta+1) & 7;
+      delta += 1+1;
+      DIP("%spextrw $%d,%s,%s\n", isAvx ? "v" : "",
+          (Int)imm8, nameXMMReg(rE), nameIReg32(rG));
+   } else {
+      /* The memory case is disallowed, apparently. */
+      return deltaIN; /* FAIL */
+   }
+   s3 = s2 = s1 = s0 = IRTemp_INVALID;
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+   switch (imm8) {
+      case 0:  assign(d16, unop(Iop_32to16,   mkexpr(s0))); break;
+      case 1:  assign(d16, unop(Iop_32HIto16, mkexpr(s0))); break;
+      case 2:  assign(d16, unop(Iop_32to16,   mkexpr(s1))); break;
+      case 3:  assign(d16, unop(Iop_32HIto16, mkexpr(s1))); break;
+      case 4:  assign(d16, unop(Iop_32to16,   mkexpr(s2))); break;
+      case 5:  assign(d16, unop(Iop_32HIto16, mkexpr(s2))); break;
+      case 6:  assign(d16, unop(Iop_32to16,   mkexpr(s3))); break;
+      case 7:  assign(d16, unop(Iop_32HIto16, mkexpr(s3))); break;
+      default: vassert(0);
+   }
+   putIReg32(rG, unop(Iop_16Uto32, mkexpr(d16)));
+   return delta;
+}
+ 
+
+static Long dis_CVTDQ2PD_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   IRTemp arg64 = newTemp(Ity_I64);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   const HChar* mbV   = isAvx ? "v" : "";
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( arg64, getXMMRegLane64(rE, 0) );
+      delta += 1;
+      DIP("%scvtdq2pd %s,%s\n", mbV, nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+      delta += alen;
+      DIP("%scvtdq2pd %s,%s\n", mbV, dis_buf, nameXMMReg(rG) );
+   }
+   putXMMRegLane64F( 
+      rG, 0,
+      unop(Iop_I32StoF64, unop(Iop_64to32, mkexpr(arg64)))
+   );
+   putXMMRegLane64F(
+      rG, 1, 
+      unop(Iop_I32StoF64, unop(Iop_64HIto32, mkexpr(arg64)))
+   );
+   if (isAvx)
+      putYMMRegLane128(rG, 1, mkV128(0));
+   return delta;
+}
+
+
+static Long dis_STMXCSR ( const VexAbiInfo* vbi, Prefix pfx,
+                          Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   vassert(!epartIsReg(modrm)); /* ensured by caller */
+   vassert(gregOfRexRM(pfx,modrm) == 3); /* ditto */
+
+   addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+   delta += alen;
+
+   /* Fake up a native SSE mxcsr word.  The only thing it depends on
+      is SSEROUND[1:0], so call a clean helper to cook it up.
+   */
+   /* ULong amd64h_create_mxcsr ( ULong sseround ) */
+   DIP("%sstmxcsr %s\n",  isAvx ? "v" : "", dis_buf);
+   storeLE( 
+      mkexpr(addr), 
+      unop(Iop_64to32,      
+           mkIRExprCCall(
+              Ity_I64, 0/*regp*/,
+              "amd64g_create_mxcsr", &amd64g_create_mxcsr, 
+              mkIRExprVec_1( unop(Iop_32Uto64,get_sse_roundingmode()) ) 
+           ) 
+      )
+   );
+   return delta;
+}
+
+
+static Long dis_LDMXCSR ( const VexAbiInfo* vbi, Prefix pfx,
+                          Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   vassert(!epartIsReg(modrm)); /* ensured by caller */
+   vassert(gregOfRexRM(pfx,modrm) == 2); /* ditto */
+
+   IRTemp t64 = newTemp(Ity_I64);
+   IRTemp ew  = newTemp(Ity_I32);
+
+   addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+   delta += alen;
+   DIP("%sldmxcsr %s\n",  isAvx ? "v" : "", dis_buf);
+
+   /* The only thing we observe in %mxcsr is the rounding mode.
+      Therefore, pass the 32-bit value (SSE native-format control
+      word) to a clean helper, getting back a 64-bit value, the
+      lower half of which is the SSEROUND value to store, and the
+      upper half of which is the emulation-warning token which may
+      be generated.  
+   */
+   /* ULong amd64h_check_ldmxcsr ( ULong ); */
+   assign( t64, mkIRExprCCall(
+                   Ity_I64, 0/*regparms*/, 
+                   "amd64g_check_ldmxcsr",
+                   &amd64g_check_ldmxcsr, 
+                   mkIRExprVec_1( 
+                      unop(Iop_32Uto64,
+                           loadLE(Ity_I32, mkexpr(addr))
+                      )
+                   )
+                )
+         );
+
+   put_sse_roundingmode( unop(Iop_64to32, mkexpr(t64)) );
+   assign( ew, unop(Iop_64HIto32, mkexpr(t64) ) );
+   put_emwarn( mkexpr(ew) );
+   /* Finally, if an emulation warning was reported, side-exit to
+      the next insn, reporting the warning, so that Valgrind's
+      dispatcher sees the warning. */
+   stmt( 
+      IRStmt_Exit(
+         binop(Iop_CmpNE64, unop(Iop_32Uto64,mkexpr(ew)), mkU64(0)),
+         Ijk_EmWarn,
+         IRConst_U64(guest_RIP_bbstart+delta),
+         OFFB_RIP
+      )
+   );
+   return delta;
+}
+
+
+static IRTemp math_PINSRW_128 ( IRTemp v128, IRTemp u16, UInt imm8 )
+{
+   vassert(imm8 >= 0 && imm8 <= 7);
+
+   // Create a V128 value which has the selected word in the
+   // specified lane, and zeroes everywhere else.
+   IRTemp tmp128    = newTemp(Ity_V128);
+   IRTemp halfshift = newTemp(Ity_I64);
+   assign(halfshift, binop(Iop_Shl64,
+                           unop(Iop_16Uto64, mkexpr(u16)),
+                           mkU8(16 * (imm8 & 3))));
+   if (imm8 < 4) {
+      assign(tmp128, binop(Iop_64HLtoV128, mkU64(0), mkexpr(halfshift)));
+   } else {
+      assign(tmp128, binop(Iop_64HLtoV128, mkexpr(halfshift), mkU64(0)));
+   }
+
+   UShort mask = ~(3 << (imm8 * 2));
+   IRTemp res  = newTemp(Ity_V128);
+   assign( res, binop(Iop_OrV128,
+                      mkexpr(tmp128),
+                      binop(Iop_AndV128, mkexpr(v128), mkV128(mask))) );
+   return res;
+}
+
+
+static IRTemp math_PSADBW_128 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp s1, s0, d1, d0;
+   s1 = s0 = d1 = d0 = IRTemp_INVALID;
+
+   breakupV128to64s( sV, &s1, &s0 );
+   breakupV128to64s( dV, &d1, &d0 );
+   
+   IRTemp res = newTemp(Ity_V128);
+   assign( res,
+           binop(Iop_64HLtoV128,
+                 mkIRExprCCall(Ity_I64, 0/*regparms*/,
+                               "amd64g_calculate_mmx_psadbw", 
+                               &amd64g_calculate_mmx_psadbw,
+                               mkIRExprVec_2( mkexpr(s1), mkexpr(d1))),
+                 mkIRExprCCall(Ity_I64, 0/*regparms*/,
+                               "amd64g_calculate_mmx_psadbw", 
+                               &amd64g_calculate_mmx_psadbw,
+                               mkIRExprVec_2( mkexpr(s0), mkexpr(d0)))) );
+   return res;
+}
+
+
+static IRTemp math_PSADBW_256 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp sHi, sLo, dHi, dLo;
+   sHi = sLo = dHi = dLo = IRTemp_INVALID;
+   breakupV256toV128s( dV, &dHi, &dLo);
+   breakupV256toV128s( sV, &sHi, &sLo);
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256,
+                     mkexpr(math_PSADBW_128(dHi, sHi)),
+                     mkexpr(math_PSADBW_128(dLo, sLo))));
+   return res;
+}
+
+
+static Long dis_MASKMOVDQU ( const VexAbiInfo* vbi, Prefix pfx,
+                             Long delta, Bool isAvx )
+{
+   IRTemp regD    = newTemp(Ity_V128);
+   IRTemp mask    = newTemp(Ity_V128);
+   IRTemp olddata = newTemp(Ity_V128);
+   IRTemp newdata = newTemp(Ity_V128);
+   IRTemp addr    = newTemp(Ity_I64);
+   UChar  modrm   = getUChar(delta);
+   UInt   rG      = gregOfRexRM(pfx,modrm);
+   UInt   rE      = eregOfRexRM(pfx,modrm);
+
+   assign( addr, handleAddrOverrides( vbi, pfx, getIReg64(R_RDI) ));
+   assign( regD, getXMMReg( rG ));
+
+   /* Unfortunately can't do the obvious thing with SarN8x16
+      here since that can't be re-emitted as SSE2 code - no such
+      insn. */
+   assign( mask, 
+           binop(Iop_64HLtoV128,
+                 binop(Iop_SarN8x8, 
+                       getXMMRegLane64( eregOfRexRM(pfx,modrm), 1 ), 
+                       mkU8(7) ),
+                 binop(Iop_SarN8x8, 
+                       getXMMRegLane64( eregOfRexRM(pfx,modrm), 0 ), 
+                       mkU8(7) ) ));
+   assign( olddata, loadLE( Ity_V128, mkexpr(addr) ));
+   assign( newdata, binop(Iop_OrV128, 
+                          binop(Iop_AndV128, 
+                                mkexpr(regD), 
+                                mkexpr(mask) ),
+                          binop(Iop_AndV128, 
+                                mkexpr(olddata),
+                                unop(Iop_NotV128, mkexpr(mask)))) );
+   storeLE( mkexpr(addr), mkexpr(newdata) );
+
+   delta += 1;
+   DIP("%smaskmovdqu %s,%s\n", isAvx ? "v" : "",
+       nameXMMReg(rE), nameXMMReg(rG) );
+   return delta;
+}
+
+
+static Long dis_MOVMSKPS_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   UChar modrm = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx,modrm);
+   UInt   rE   = eregOfRexRM(pfx,modrm);
+   IRTemp t0   = newTemp(Ity_I32);
+   IRTemp t1   = newTemp(Ity_I32);
+   IRTemp t2   = newTemp(Ity_I32);
+   IRTemp t3   = newTemp(Ity_I32);
+   delta += 1;
+   assign( t0, binop( Iop_And32,
+                      binop(Iop_Shr32, getXMMRegLane32(rE,0), mkU8(31)),
+                      mkU32(1) ));
+   assign( t1, binop( Iop_And32,
+                      binop(Iop_Shr32, getXMMRegLane32(rE,1), mkU8(30)),
+                      mkU32(2) ));
+   assign( t2, binop( Iop_And32,
+                      binop(Iop_Shr32, getXMMRegLane32(rE,2), mkU8(29)),
+                      mkU32(4) ));
+   assign( t3, binop( Iop_And32,
+                      binop(Iop_Shr32, getXMMRegLane32(rE,3), mkU8(28)),
+                      mkU32(8) ));
+   putIReg32( rG, binop(Iop_Or32,
+                        binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                        binop(Iop_Or32, mkexpr(t2), mkexpr(t3)) ) );
+   DIP("%smovmskps %s,%s\n", isAvx ? "v" : "",
+       nameXMMReg(rE), nameIReg32(rG));
+   return delta;
+}
+
+
+static Long dis_MOVMSKPS_256 ( const VexAbiInfo* vbi, Prefix pfx, Long delta )
+{
+   UChar modrm = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx,modrm);
+   UInt   rE   = eregOfRexRM(pfx,modrm);
+   IRTemp t0   = newTemp(Ity_I32);
+   IRTemp t1   = newTemp(Ity_I32);
+   IRTemp t2   = newTemp(Ity_I32);
+   IRTemp t3   = newTemp(Ity_I32);
+   IRTemp t4   = newTemp(Ity_I32);
+   IRTemp t5   = newTemp(Ity_I32);
+   IRTemp t6   = newTemp(Ity_I32);
+   IRTemp t7   = newTemp(Ity_I32);
+   delta += 1;
+   assign( t0, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,0), mkU8(31)),
+                      mkU32(1) ));
+   assign( t1, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,1), mkU8(30)),
+                      mkU32(2) ));
+   assign( t2, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,2), mkU8(29)),
+                      mkU32(4) ));
+   assign( t3, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,3), mkU8(28)),
+                      mkU32(8) ));
+   assign( t4, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,4), mkU8(27)),
+                      mkU32(16) ));
+   assign( t5, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,5), mkU8(26)),
+                      mkU32(32) ));
+   assign( t6, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,6), mkU8(25)),
+                      mkU32(64) ));
+   assign( t7, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,7), mkU8(24)),
+                      mkU32(128) ));
+   putIReg32( rG, binop(Iop_Or32,
+                        binop(Iop_Or32,
+                              binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                              binop(Iop_Or32, mkexpr(t2), mkexpr(t3)) ),
+                        binop(Iop_Or32,
+                              binop(Iop_Or32, mkexpr(t4), mkexpr(t5)),
+                              binop(Iop_Or32, mkexpr(t6), mkexpr(t7)) ) ) );
+   DIP("vmovmskps %s,%s\n", nameYMMReg(rE), nameIReg32(rG));
+   return delta;
+}
+
+
+static Long dis_MOVMSKPD_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   UChar modrm = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx,modrm);
+   UInt   rE   = eregOfRexRM(pfx,modrm);
+   IRTemp t0   = newTemp(Ity_I32);
+   IRTemp t1   = newTemp(Ity_I32);
+   delta += 1;
+   assign( t0, binop( Iop_And32,
+                      binop(Iop_Shr32, getXMMRegLane32(rE,1), mkU8(31)),
+                      mkU32(1) ));
+   assign( t1, binop( Iop_And32,
+                      binop(Iop_Shr32, getXMMRegLane32(rE,3), mkU8(30)),
+                      mkU32(2) ));
+   putIReg32( rG, binop(Iop_Or32, mkexpr(t0), mkexpr(t1) ) );
+   DIP("%smovmskpd %s,%s\n", isAvx ? "v" : "",
+       nameXMMReg(rE), nameIReg32(rG));
+   return delta;
+}
+
+
+static Long dis_MOVMSKPD_256 ( const VexAbiInfo* vbi, Prefix pfx, Long delta )
+{
+   UChar modrm = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx,modrm);
+   UInt   rE   = eregOfRexRM(pfx,modrm);
+   IRTemp t0   = newTemp(Ity_I32);
+   IRTemp t1   = newTemp(Ity_I32);
+   IRTemp t2   = newTemp(Ity_I32);
+   IRTemp t3   = newTemp(Ity_I32);
+   delta += 1;
+   assign( t0, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,1), mkU8(31)),
+                      mkU32(1) ));
+   assign( t1, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,3), mkU8(30)),
+                      mkU32(2) ));
+   assign( t2, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,5), mkU8(29)),
+                      mkU32(4) ));
+   assign( t3, binop( Iop_And32,
+                      binop(Iop_Shr32, getYMMRegLane32(rE,7), mkU8(28)),
+                      mkU32(8) ));
+   putIReg32( rG, binop(Iop_Or32,
+                        binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                        binop(Iop_Or32, mkexpr(t2), mkexpr(t3)) ) );
+   DIP("vmovmskps %s,%s\n", nameYMMReg(rE), nameIReg32(rG));
+   return delta;
+}
+
+
+/* Note, this also handles SSE(1) insns. */
+__attribute__((noinline))
+static
+Long dis_ESC_0F__SSE2 ( Bool* decode_OK,
+                        const VexAbiInfo* vbi,
+                        Prefix pfx, Int sz, Long deltaIN,
+                        DisResult* dres )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   IRTemp t0    = IRTemp_INVALID;
+   IRTemp t1    = IRTemp_INVALID;
+   IRTemp t2    = IRTemp_INVALID;
+   IRTemp t3    = IRTemp_INVALID;
+   IRTemp t4    = IRTemp_INVALID;
+   IRTemp t5    = IRTemp_INVALID;
+   IRTemp t6    = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   *decode_OK = False;
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0x10:
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         /* 66 0F 10 = MOVUPD -- move from E (mem or xmm) to G (xmm). */
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       getXMMReg( eregOfRexRM(pfx,modrm) ));
+            DIP("movupd %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("movupd %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* F2 0F 10 = MOVSD -- move 64 bits from E (mem or lo half xmm) to
+         G (lo half xmm).  If E is mem, upper half of G is zeroed out.
+         If E is reg, upper half of G is unchanged. */
+      if (haveF2no66noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8) ) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMRegLane64( gregOfRexRM(pfx,modrm), 0,
+                             getXMMRegLane64( eregOfRexRM(pfx,modrm), 0 ));
+            DIP("movsd %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMReg( gregOfRexRM(pfx,modrm), mkV128(0) );
+            putXMMRegLane64( gregOfRexRM(pfx,modrm), 0,
+                             loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movsd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* F3 0F 10 = MOVSS -- move 32 bits from E (mem or lo 1/4 xmm) to G
+         (lo 1/4 xmm).  If E is mem, upper 3/4 of G is zeroed out. */
+      if (haveF3no66noF2(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMRegLane32( gregOfRexRM(pfx,modrm), 0,
+                             getXMMRegLane32( eregOfRexRM(pfx,modrm), 0 ));
+            DIP("movss %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMReg( gregOfRexRM(pfx,modrm), mkV128(0) );
+            putXMMRegLane32( gregOfRexRM(pfx,modrm), 0,
+                             loadLE(Ity_I32, mkexpr(addr)) );
+            DIP("movss %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* 0F 10 = MOVUPS -- move from E (mem or xmm) to G (xmm). */
+      if (haveNo66noF2noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       getXMMReg( eregOfRexRM(pfx,modrm) ));
+            DIP("movups %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("movups %s,%s\n", dis_buf,
+                                     nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x11:
+      /* F2 0F 11 = MOVSD -- move 64 bits from G (lo half xmm) to E (mem
+         or lo half xmm). */
+      if (haveF2no66noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMRegLane64( eregOfRexRM(pfx,modrm), 0,
+                             getXMMRegLane64( gregOfRexRM(pfx,modrm), 0 ));
+            DIP("movsd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                 nameXMMReg(eregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr),
+                     getXMMRegLane64(gregOfRexRM(pfx,modrm), 0) );
+            DIP("movsd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                 dis_buf);
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* F3 0F 11 = MOVSS -- move 32 bits from G (lo 1/4 xmm) to E (mem
+         or lo 1/4 xmm). */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            /* fall through, we don't yet have a test case */
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr),
+                     getXMMRegLane32(gregOfRexRM(pfx,modrm), 0) );
+            DIP("movss %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                 dis_buf);
+            delta += alen;
+            goto decode_success;
+         }
+      }
+      /* 66 0F 11 = MOVUPD -- move from G (xmm) to E (mem or xmm). */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( eregOfRexRM(pfx,modrm),
+                       getXMMReg( gregOfRexRM(pfx,modrm) ) );
+            DIP("movupd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(eregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movupd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                  dis_buf );
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* 0F 11 = MOVUPS -- move from G (xmm) to E (mem or xmm). */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            /* fall through; awaiting test case */
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movups %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                  dis_buf );
+            delta += alen;
+            goto decode_success;
+         }
+      }
+      break;
+
+   case 0x12:
+      /* 66 0F 12 = MOVLPD -- move from mem to low half of XMM. */
+      /* Identical to MOVLPS ? */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            /* fall through; apparently reg-reg is not possible */
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putXMMRegLane64( gregOfRexRM(pfx,modrm),
+                             0/*lower lane*/,
+                             loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movlpd %s, %s\n", 
+                dis_buf, nameXMMReg( gregOfRexRM(pfx,modrm) ));
+            goto decode_success;
+         }
+      }
+      /* 0F 12 = MOVLPS -- move from mem to low half of XMM. */
+      /* OF 12 = MOVHLPS -- from from hi half to lo half of XMM. */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            putXMMRegLane64( gregOfRexRM(pfx,modrm),  
+                             0/*lower lane*/,
+                             getXMMRegLane64( eregOfRexRM(pfx,modrm), 1 ));
+            DIP("movhlps %s, %s\n", nameXMMReg(eregOfRexRM(pfx,modrm)), 
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putXMMRegLane64( gregOfRexRM(pfx,modrm),  0/*lower lane*/,
+                             loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movlps %s, %s\n", 
+                dis_buf, nameXMMReg( gregOfRexRM(pfx,modrm) ));
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x13:
+      /* 0F 13 = MOVLPS -- move from low half of XMM to mem. */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            storeLE( mkexpr(addr), 
+                     getXMMRegLane64( gregOfRexRM(pfx,modrm), 
+                                      0/*lower lane*/ ) );
+            DIP("movlps %s, %s\n", nameXMMReg( gregOfRexRM(pfx,modrm) ),
+                                   dis_buf);
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      /* 66 0F 13 = MOVLPD -- move from low half of XMM to mem. */
+      /* Identical to MOVLPS ? */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            storeLE( mkexpr(addr), 
+                     getXMMRegLane64( gregOfRexRM(pfx,modrm), 
+                                      0/*lower lane*/ ) );
+            DIP("movlpd %s, %s\n", nameXMMReg( gregOfRexRM(pfx,modrm) ),
+                                   dis_buf);
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0x14:
+   case 0x15:
+      /* 0F 14 = UNPCKLPS -- unpack and interleave low part F32s */
+      /* 0F 15 = UNPCKHPS -- unpack and interleave high part F32s */
+      /* These just appear to be special cases of SHUFPS */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         Bool   hi = toBool(opc == 0x15);
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+         modrm = getUChar(delta);
+         UInt   rG = gregOfRexRM(pfx,modrm);
+         assign( dV, getXMMReg(rG) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                dis_buf, nameXMMReg(rG));
+         }
+         IRTemp res = math_UNPCKxPS_128( sV, dV, hi );
+         putXMMReg( rG, mkexpr(res) );
+         goto decode_success;
+      }
+      /* 66 0F 15 = UNPCKHPD -- unpack and interleave high part F64s */
+      /* 66 0F 14 = UNPCKLPD -- unpack and interleave low part F64s */
+      /* These just appear to be special cases of SHUFPS */
+      if (have66noF2noF3(pfx) 
+          && sz == 2 /* could be 8 if rex also present */) {
+         Bool   hi = toBool(opc == 0x15);
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+         modrm = getUChar(delta);
+         UInt   rG = gregOfRexRM(pfx,modrm);
+         assign( dV, getXMMReg(rG) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                dis_buf, nameXMMReg(rG));
+         }
+         IRTemp res = math_UNPCKxPD_128( sV, dV, hi );
+         putXMMReg( rG, mkexpr(res) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x16:
+      /* 66 0F 16 = MOVHPD -- move from mem to high half of XMM. */
+      /* These seems identical to MOVHPS.  This instruction encoding is
+         completely crazy. */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            /* fall through; apparently reg-reg is not possible */
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putXMMRegLane64( gregOfRexRM(pfx,modrm), 1/*upper lane*/,
+                             loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movhpd %s,%s\n", dis_buf, 
+                                  nameXMMReg( gregOfRexRM(pfx,modrm) ));
+            goto decode_success;
+         }
+      }
+      /* 0F 16 = MOVHPS -- move from mem to high half of XMM. */
+      /* 0F 16 = MOVLHPS -- move from lo half to hi half of XMM. */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            putXMMRegLane64( gregOfRexRM(pfx,modrm), 1/*upper lane*/,
+                             getXMMRegLane64( eregOfRexRM(pfx,modrm), 0 ) );
+            DIP("movhps %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)), 
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putXMMRegLane64( gregOfRexRM(pfx,modrm), 1/*upper lane*/,
+                             loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movhps %s,%s\n", dis_buf, 
+                                  nameXMMReg( gregOfRexRM(pfx,modrm) ));
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x17:
+      /* 0F 17 = MOVHPS -- move from high half of XMM to mem. */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            storeLE( mkexpr(addr), 
+                     getXMMRegLane64( gregOfRexRM(pfx,modrm),
+                                      1/*upper lane*/ ) );
+            DIP("movhps %s,%s\n", nameXMMReg( gregOfRexRM(pfx,modrm) ),
+                                  dis_buf);
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      /* 66 0F 17 = MOVHPD -- move from high half of XMM to mem. */
+      /* Again, this seems identical to MOVHPS. */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            storeLE( mkexpr(addr), 
+                     getXMMRegLane64( gregOfRexRM(pfx,modrm),
+                                      1/*upper lane*/ ) );
+            DIP("movhpd %s,%s\n", nameXMMReg( gregOfRexRM(pfx,modrm) ),
+                                  dis_buf);
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0x18:
+      /* 0F 18 /0 = PREFETCHNTA -- prefetch into caches, */
+      /* 0F 18 /1 = PREFETCH0   -- with various different hints */
+      /* 0F 18 /2 = PREFETCH1 */
+      /* 0F 18 /3 = PREFETCH2 */
+      if (haveNo66noF2noF3(pfx)
+          && !epartIsReg(getUChar(delta)) 
+          && gregLO3ofRM(getUChar(delta)) >= 0
+          && gregLO3ofRM(getUChar(delta)) <= 3) {
+         const HChar* hintstr = "??";
+
+         modrm = getUChar(delta);
+         vassert(!epartIsReg(modrm));
+
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+
+         switch (gregLO3ofRM(modrm)) {
+            case 0: hintstr = "nta"; break;
+            case 1: hintstr = "t0"; break;
+            case 2: hintstr = "t1"; break;
+            case 3: hintstr = "t2"; break;
+            default: vassert(0);
+         }
+
+         DIP("prefetch%s %s\n", hintstr, dis_buf);
+         goto decode_success;
+      }
+      break;
+
+   case 0x28:
+      /* 66 0F 28 = MOVAPD -- move from E (mem or xmm) to G (xmm). */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       getXMMReg( eregOfRexRM(pfx,modrm) ));
+            DIP("movapd %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("movapd %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* 0F 28 = MOVAPS -- move from E (mem or xmm) to G (xmm). */
+      if (haveNo66noF2noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       getXMMReg( eregOfRexRM(pfx,modrm) ));
+            DIP("movaps %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("movaps %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x29:
+      /* 0F 29 = MOVAPS -- move from G (xmm) to E (mem or xmm). */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( eregOfRexRM(pfx,modrm),
+                       getXMMReg( gregOfRexRM(pfx,modrm) ));
+            DIP("movaps %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(eregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movaps %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                  dis_buf );
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* 66 0F 29 = MOVAPD -- move from G (xmm) to E (mem or xmm). */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( eregOfRexRM(pfx,modrm),
+                       getXMMReg( gregOfRexRM(pfx,modrm) ) );
+            DIP("movapd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(eregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movapd %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)),
+                                  dis_buf );
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x2A:
+      /* 0F 2A = CVTPI2PS -- convert 2 x I32 in mem/mmx to 2 x F32 in low
+         half xmm */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp arg64 = newTemp(Ity_I64);
+         IRTemp rmode = newTemp(Ity_I32);
+
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+         if (epartIsReg(modrm)) {
+            assign( arg64, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("cvtpi2ps %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("cvtpi2ps %s,%s\n", dis_buf,
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)) );
+         }
+
+         assign( rmode, get_sse_roundingmode() );
+
+         putXMMRegLane32F( 
+            gregOfRexRM(pfx,modrm), 0,
+            binop(Iop_F64toF32, 
+                  mkexpr(rmode),
+                  unop(Iop_I32StoF64, 
+                       unop(Iop_64to32, mkexpr(arg64)) )) );
+
+         putXMMRegLane32F(
+            gregOfRexRM(pfx,modrm), 1, 
+            binop(Iop_F64toF32, 
+                  mkexpr(rmode),
+                  unop(Iop_I32StoF64,
+                       unop(Iop_64HIto32, mkexpr(arg64)) )) );
+
+         goto decode_success;
+      }
+      /* F3 0F 2A = CVTSI2SS 
+         -- sz==4: convert I32 in mem/ireg to F32 in low quarter xmm
+         -- sz==8: convert I64 in mem/ireg to F32 in low quarter xmm */
+      if (haveF3no66noF2(pfx) && (sz == 4 || sz == 8)) {
+         IRTemp rmode = newTemp(Ity_I32);
+         assign( rmode, get_sse_roundingmode() );
+         modrm = getUChar(delta);
+         if (sz == 4) {
+            IRTemp arg32 = newTemp(Ity_I32);
+            if (epartIsReg(modrm)) {
+               assign( arg32, getIReg32(eregOfRexRM(pfx,modrm)) );
+               delta += 1;
+               DIP("cvtsi2ss %s,%s\n", nameIReg32(eregOfRexRM(pfx,modrm)),
+                                       nameXMMReg(gregOfRexRM(pfx,modrm)));
+            } else {
+               addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+               assign( arg32, loadLE(Ity_I32, mkexpr(addr)) );
+               delta += alen;
+               DIP("cvtsi2ss %s,%s\n", dis_buf,
+                                       nameXMMReg(gregOfRexRM(pfx,modrm)) );
+            }
+            putXMMRegLane32F( 
+               gregOfRexRM(pfx,modrm), 0,
+               binop(Iop_F64toF32,
+                     mkexpr(rmode),
+                     unop(Iop_I32StoF64, mkexpr(arg32)) ) );
+         } else {
+            /* sz == 8 */
+            IRTemp arg64 = newTemp(Ity_I64);
+            if (epartIsReg(modrm)) {
+               assign( arg64, getIReg64(eregOfRexRM(pfx,modrm)) );
+               delta += 1;
+               DIP("cvtsi2ssq %s,%s\n", nameIReg64(eregOfRexRM(pfx,modrm)),
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)));
+            } else {
+               addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+               assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+               delta += alen;
+               DIP("cvtsi2ssq %s,%s\n", dis_buf,
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)) );
+            }
+            putXMMRegLane32F( 
+               gregOfRexRM(pfx,modrm), 0,
+               binop(Iop_F64toF32,
+                     mkexpr(rmode),
+                     binop(Iop_I64StoF64, mkexpr(rmode), mkexpr(arg64)) ) );
+         }
+         goto decode_success;
+      }
+      /* F2 0F 2A = CVTSI2SD 
+         when sz==4 -- convert I32 in mem/ireg to F64 in low half xmm
+         when sz==8 -- convert I64 in mem/ireg to F64 in low half xmm
+      */
+      if (haveF2no66noF3(pfx) && (sz == 4 || sz == 8)) {
+         modrm = getUChar(delta);
+         if (sz == 4) {
+            IRTemp arg32 = newTemp(Ity_I32);
+            if (epartIsReg(modrm)) {
+               assign( arg32, getIReg32(eregOfRexRM(pfx,modrm)) );
+               delta += 1;
+               DIP("cvtsi2sdl %s,%s\n", nameIReg32(eregOfRexRM(pfx,modrm)),
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)));
+            } else {
+               addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+               assign( arg32, loadLE(Ity_I32, mkexpr(addr)) );
+               delta += alen;
+               DIP("cvtsi2sdl %s,%s\n", dis_buf,
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)) );
+            }
+            putXMMRegLane64F( gregOfRexRM(pfx,modrm), 0,
+                              unop(Iop_I32StoF64, mkexpr(arg32)) 
+            );
+         } else {
+            /* sz == 8 */
+            IRTemp arg64 = newTemp(Ity_I64);
+            if (epartIsReg(modrm)) {
+               assign( arg64, getIReg64(eregOfRexRM(pfx,modrm)) );
+               delta += 1;
+               DIP("cvtsi2sdq %s,%s\n", nameIReg64(eregOfRexRM(pfx,modrm)),
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)));
+            } else {
+               addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+               assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+               delta += alen;
+               DIP("cvtsi2sdq %s,%s\n", dis_buf,
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)) );
+            }
+            putXMMRegLane64F( 
+               gregOfRexRM(pfx,modrm), 
+               0,
+               binop( Iop_I64StoF64,
+                      get_sse_roundingmode(),
+                      mkexpr(arg64)
+               ) 
+            );
+         }
+         goto decode_success;
+      }
+      /* 66 0F 2A = CVTPI2PD -- convert 2 x I32 in mem/mmx to 2 x F64 in
+         xmm(G) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp arg64 = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            /* Only switch to MMX mode if the source is a MMX register.
+               This is inconsistent with all other instructions which
+               convert between XMM and (M64 or MMX), which always switch
+               to MMX mode even if 64-bit operand is M64 and not MMX.  At
+               least, that's what the Intel docs seem to me to say.
+               Fixes #210264. */
+            do_MMX_preamble();
+            assign( arg64, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("cvtpi2pd %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("cvtpi2pd %s,%s\n", dis_buf,
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)) );
+         }
+
+         putXMMRegLane64F( 
+            gregOfRexRM(pfx,modrm), 0,
+            unop(Iop_I32StoF64, unop(Iop_64to32, mkexpr(arg64)) )
+         );
+
+         putXMMRegLane64F( 
+            gregOfRexRM(pfx,modrm), 1,
+            unop(Iop_I32StoF64, unop(Iop_64HIto32, mkexpr(arg64)) )
+         );
+
+         goto decode_success;
+      }
+      break;
+
+   case 0x2B:
+      /* 66 0F 2B = MOVNTPD -- for us, just a plain SSE store. */
+      /* 0F 2B = MOVNTPS -- for us, just a plain SSE store. */
+      if ( (haveNo66noF2noF3(pfx) && sz == 4)
+           || (have66noF2noF3(pfx) && sz == 2) ) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movntp%s %s,%s\n", sz==2 ? "d" : "s",
+                                    dis_buf,
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0x2C:
+   case 0x2D:
+      /* 0F 2D = CVTPS2PI -- convert 2 x F32 in mem/low half xmm to 2 x
+         I32 in mmx, according to prevailing SSE rounding mode */
+      /* 0F 2C = CVTTPS2PI -- convert 2 x F32 in mem/low half xmm to 2 x
+         I32 in mmx, rounding towards zero */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp dst64  = newTemp(Ity_I64);
+         IRTemp rmode  = newTemp(Ity_I32);
+         IRTemp f32lo  = newTemp(Ity_F32);
+         IRTemp f32hi  = newTemp(Ity_F32);
+         Bool   r2zero = toBool(opc == 0x2C);
+
+         do_MMX_preamble();
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            assign(f32lo, getXMMRegLane32F(eregOfRexRM(pfx,modrm), 0));
+            assign(f32hi, getXMMRegLane32F(eregOfRexRM(pfx,modrm), 1));
+            DIP("cvt%sps2pi %s,%s\n", r2zero ? "t" : "",
+                                      nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign(f32lo, loadLE(Ity_F32, mkexpr(addr)));
+            assign(f32hi, loadLE(Ity_F32, binop( Iop_Add64, 
+                                                 mkexpr(addr), 
+                                                 mkU64(4) )));
+            delta += alen;
+            DIP("cvt%sps2pi %s,%s\n", r2zero ? "t" : "",
+                                      dis_buf,
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         if (r2zero) {
+            assign(rmode, mkU32((UInt)Irrm_ZERO) );
+         } else {
+            assign( rmode, get_sse_roundingmode() );
+         }
+
+         assign( 
+            dst64,
+            binop( Iop_32HLto64,
+                   binop( Iop_F64toI32S, 
+                          mkexpr(rmode), 
+                          unop( Iop_F32toF64, mkexpr(f32hi) ) ),
+                   binop( Iop_F64toI32S, 
+                          mkexpr(rmode), 
+                          unop( Iop_F32toF64, mkexpr(f32lo) ) )
+                 )
+         );
+
+         putMMXReg(gregLO3ofRM(modrm), mkexpr(dst64));
+         goto decode_success;
+      }
+      /* F3 0F 2D = CVTSS2SI 
+         when sz==4 -- convert F32 in mem/low quarter xmm to I32 in ireg, 
+                       according to prevailing SSE rounding mode
+         when sz==8 -- convert F32 in mem/low quarter xmm to I64 in ireg, 
+                       according to prevailing SSE rounding mode
+      */
+      /* F3 0F 2C = CVTTSS2SI 
+         when sz==4 -- convert F32 in mem/low quarter xmm to I32 in ireg, 
+                       truncating towards zero
+         when sz==8 -- convert F32 in mem/low quarter xmm to I64 in ireg, 
+                       truncating towards zero 
+      */
+      if (haveF3no66noF2(pfx) && (sz == 4 || sz == 8)) {
+         delta = dis_CVTxSS2SI( vbi, pfx, delta, False/*!isAvx*/, opc, sz);
+         goto decode_success;
+      }
+      /* F2 0F 2D = CVTSD2SI 
+         when sz==4 -- convert F64 in mem/low half xmm to I32 in ireg, 
+                       according to prevailing SSE rounding mode
+         when sz==8 -- convert F64 in mem/low half xmm to I64 in ireg, 
+                       according to prevailing SSE rounding mode
+      */
+      /* F2 0F 2C = CVTTSD2SI 
+         when sz==4 -- convert F64 in mem/low half xmm to I32 in ireg, 
+                       truncating towards zero
+         when sz==8 -- convert F64 in mem/low half xmm to I64 in ireg, 
+                       truncating towards zero 
+      */
+      if (haveF2no66noF3(pfx) && (sz == 4 || sz == 8)) {
+         delta = dis_CVTxSD2SI( vbi, pfx, delta, False/*!isAvx*/, opc, sz);
+         goto decode_success;
+      }
+      /* 66 0F 2D = CVTPD2PI -- convert 2 x F64 in mem/xmm to 2 x
+         I32 in mmx, according to prevailing SSE rounding mode */
+      /* 66 0F 2C = CVTTPD2PI -- convert 2 x F64 in mem/xmm to 2 x
+         I32 in mmx, rounding towards zero */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp dst64  = newTemp(Ity_I64);
+         IRTemp rmode  = newTemp(Ity_I32);
+         IRTemp f64lo  = newTemp(Ity_F64);
+         IRTemp f64hi  = newTemp(Ity_F64);
+         Bool   r2zero = toBool(opc == 0x2C);
+
+         do_MMX_preamble();
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            assign(f64lo, getXMMRegLane64F(eregOfRexRM(pfx,modrm), 0));
+            assign(f64hi, getXMMRegLane64F(eregOfRexRM(pfx,modrm), 1));
+            DIP("cvt%spd2pi %s,%s\n", r2zero ? "t" : "",
+                                      nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign(f64lo, loadLE(Ity_F64, mkexpr(addr)));
+            assign(f64hi, loadLE(Ity_F64, binop( Iop_Add64, 
+                                                 mkexpr(addr), 
+                                                 mkU64(8) )));
+            delta += alen;
+            DIP("cvt%spf2pi %s,%s\n", r2zero ? "t" : "",
+                                      dis_buf,
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         if (r2zero) {
+            assign(rmode, mkU32((UInt)Irrm_ZERO) );
+         } else {
+            assign( rmode, get_sse_roundingmode() );
+         }
+
+         assign( 
+            dst64,
+            binop( Iop_32HLto64,
+                   binop( Iop_F64toI32S, mkexpr(rmode), mkexpr(f64hi) ),
+                   binop( Iop_F64toI32S, mkexpr(rmode), mkexpr(f64lo) )
+                 )
+         );
+
+         putMMXReg(gregLO3ofRM(modrm), mkexpr(dst64));
+         goto decode_success;
+      }
+      break;
+
+   case 0x2E:
+   case 0x2F:
+      /* 66 0F 2E = UCOMISD -- 64F0x2 comparison G,E, and set ZCP */
+      /* 66 0F 2F = COMISD  -- 64F0x2 comparison G,E, and set ZCP */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_COMISD( vbi, pfx, delta, False/*!isAvx*/, opc );
+         goto decode_success;
+      }
+      /* 0F 2E = UCOMISS -- 32F0x4 comparison G,E, and set ZCP */
+      /* 0F 2F = COMISS  -- 32F0x4 comparison G,E, and set ZCP */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_COMISS( vbi, pfx, delta, False/*!isAvx*/, opc );
+         goto decode_success;
+      }
+      break;
+
+   case 0x50:
+      /* 0F 50 = MOVMSKPS - move 4 sign bits from 4 x F32 in xmm(E)
+         to 4 lowest bits of ireg(G) */
+      if (haveNo66noF2noF3(pfx) && (sz == 4 || sz == 8)
+          && epartIsReg(getUChar(delta))) {
+         /* sz == 8 is a kludge to handle insns with REX.W redundantly
+            set to 1, which has been known to happen:
+
+            4c 0f 50 d9             rex64X movmskps %xmm1,%r11d
+
+            20071106: Intel docs say that REX.W isn't redundant: when
+            present, a 64-bit register is written; when not present, only
+            the 32-bit half is written.  However, testing on a Core2
+            machine suggests the entire 64 bit register is written
+            irrespective of the status of REX.W.  That could be because
+            of the default rule that says "if the lower half of a 32-bit
+            register is written, the upper half is zeroed".  By using
+            putIReg32 here we inadvertantly produce the same behaviour as
+            the Core2, for the same reason -- putIReg32 implements said
+            rule.
+
+            AMD docs give no indication that REX.W is even valid for this
+            insn. */
+         delta = dis_MOVMSKPS_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      /* 66 0F 50 = MOVMSKPD - move 2 sign bits from 2 x F64 in xmm(E) to
+         2 lowest bits of ireg(G) */
+      if (have66noF2noF3(pfx) && (sz == 2 || sz == 8)) {
+         /* sz == 8 is a kludge to handle insns with REX.W redundantly
+            set to 1, which has been known to happen:
+            66 4c 0f 50 d9          rex64X movmskpd %xmm1,%r11d
+            20071106: see further comments on MOVMSKPS implementation above.
+         */
+         delta = dis_MOVMSKPD_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x51:
+      /* F3 0F 51 = SQRTSS -- approx sqrt 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_unary_lo32( vbi, pfx, delta, 
+                                            "sqrtss", Iop_Sqrt32F0x4 );
+         goto decode_success;
+      }
+      /* 0F 51 = SQRTPS -- approx sqrt 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_unary_all( vbi, pfx, delta, 
+                                           "sqrtps", Iop_Sqrt32Fx4 );
+         goto decode_success;
+      }
+      /* F2 0F 51 = SQRTSD -- approx sqrt 64F0x2 from R/M to R */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_unary_lo64( vbi, pfx, delta, 
+                                            "sqrtsd", Iop_Sqrt64F0x2 );
+         goto decode_success;
+      }
+      /* 66 0F 51 = SQRTPD -- approx sqrt 64Fx2 from R/M to R */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_unary_all( vbi, pfx, delta, 
+                                           "sqrtpd", Iop_Sqrt64Fx2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x52:
+      /* F3 0F 52 = RSQRTSS -- approx reciprocal sqrt 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_unary_lo32( vbi, pfx, delta, 
+                                            "rsqrtss", Iop_RSqrtEst32F0x4 );
+         goto decode_success;
+      }
+      /* 0F 52 = RSQRTPS -- approx reciprocal sqrt 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_unary_all( vbi, pfx, delta, 
+                                           "rsqrtps", Iop_RSqrtEst32Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x53:
+      /* F3 0F 53 = RCPSS -- approx reciprocal 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_unary_lo32( vbi, pfx, delta,
+                                            "rcpss", Iop_RecipEst32F0x4 );
+         goto decode_success;
+      }
+      /* 0F 53 = RCPPS -- approx reciprocal 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_unary_all( vbi, pfx, delta,
+                                           "rcpps", Iop_RecipEst32Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x54:
+      /* 0F 54 = ANDPS -- G = G and E */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "andps", Iop_AndV128 );
+         goto decode_success;
+      }
+      /* 66 0F 54 = ANDPD -- G = G and E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "andpd", Iop_AndV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x55:
+      /* 0F 55 = ANDNPS -- G = (not G) and E */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all_invG( vbi, pfx, delta, "andnps",
+                                                           Iop_AndV128 );
+         goto decode_success;
+      }
+      /* 66 0F 55 = ANDNPD -- G = (not G) and E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all_invG( vbi, pfx, delta, "andnpd",
+                                                           Iop_AndV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x56:
+      /* 0F 56 = ORPS -- G = G and E */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "orps", Iop_OrV128 );
+         goto decode_success;
+      }
+      /* 66 0F 56 = ORPD -- G = G and E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "orpd", Iop_OrV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x57:
+      /* 66 0F 57 = XORPD -- G = G xor E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "xorpd", Iop_XorV128 );
+         goto decode_success;
+      }
+      /* 0F 57 = XORPS -- G = G xor E */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "xorps", Iop_XorV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x58:
+      /* 0F 58 = ADDPS -- add 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "addps", Iop_Add32Fx4 );
+         goto decode_success;
+      }
+      /* F3 0F 58 = ADDSS -- add 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo32( vbi, pfx, delta, "addss", Iop_Add32F0x4 );
+         goto decode_success;
+      }
+      /* F2 0F 58 = ADDSD -- add 64F0x2 from R/M to R */
+      if (haveF2no66noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_SSE_E_to_G_lo64( vbi, pfx, delta, "addsd", Iop_Add64F0x2 );
+         goto decode_success;
+      }
+      /* 66 0F 58 = ADDPD -- add 32Fx4 from R/M to R */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "addpd", Iop_Add64Fx2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x59:
+      /* F2 0F 59 = MULSD -- mul 64F0x2 from R/M to R */
+      if (haveF2no66noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_SSE_E_to_G_lo64( vbi, pfx, delta, "mulsd", Iop_Mul64F0x2 );
+         goto decode_success;
+      }
+      /* F3 0F 59 = MULSS -- mul 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo32( vbi, pfx, delta, "mulss", Iop_Mul32F0x4 );
+         goto decode_success;
+      }
+      /* 0F 59 = MULPS -- mul 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "mulps", Iop_Mul32Fx4 );
+         goto decode_success;
+      }
+      /* 66 0F 59 = MULPD -- mul 64Fx2 from R/M to R */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "mulpd", Iop_Mul64Fx2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5A:
+      /* 0F 5A = CVTPS2PD -- convert 2 x F32 in low half mem/xmm to 2 x
+         F64 in xmm(G). */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_CVTPS2PD_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      /* F3 0F 5A = CVTSS2SD -- convert F32 in mem/low 1/4 xmm to F64 in
+         low half xmm(G) */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         IRTemp f32lo = newTemp(Ity_F32);
+
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            assign(f32lo, getXMMRegLane32F(eregOfRexRM(pfx,modrm), 0));
+            DIP("cvtss2sd %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign(f32lo, loadLE(Ity_F32, mkexpr(addr)));
+            delta += alen;
+            DIP("cvtss2sd %s,%s\n", dis_buf,
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         putXMMRegLane64F( gregOfRexRM(pfx,modrm), 0, 
+                           unop( Iop_F32toF64, mkexpr(f32lo) ) );
+
+         goto decode_success;
+      }
+      /* F2 0F 5A = CVTSD2SS -- convert F64 in mem/low half xmm to F32 in
+         low 1/4 xmm(G), according to prevailing SSE rounding mode */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         IRTemp rmode = newTemp(Ity_I32);
+         IRTemp f64lo = newTemp(Ity_F64);
+
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            assign(f64lo, getXMMRegLane64F(eregOfRexRM(pfx,modrm), 0));
+            DIP("cvtsd2ss %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign(f64lo, loadLE(Ity_F64, mkexpr(addr)));
+            delta += alen;
+            DIP("cvtsd2ss %s,%s\n", dis_buf,
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         assign( rmode, get_sse_roundingmode() );
+         putXMMRegLane32F( 
+            gregOfRexRM(pfx,modrm), 0, 
+            binop( Iop_F64toF32, mkexpr(rmode), mkexpr(f64lo) )
+         );
+
+         goto decode_success;
+      }
+      /* 66 0F 5A = CVTPD2PS -- convert 2 x F64 in mem/xmm to 2 x F32 in
+         lo half xmm(G), rounding according to prevailing SSE rounding
+         mode, and zero upper half */
+      /* Note, this is practically identical to CVTPD2DQ.  It would have
+         be nice to merge them together. */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_CVTPD2PS_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5B:
+      /* F3 0F 5B = CVTTPS2DQ -- convert 4 x F32 in mem/xmm to 4 x I32 in
+         xmm(G), rounding towards zero */
+      /* 66 0F 5B = CVTPS2DQ -- convert 4 x F32 in mem/xmm to 4 x I32 in
+         xmm(G), as per the prevailing rounding mode */
+      if ( (have66noF2noF3(pfx) && sz == 2)
+           || (haveF3no66noF2(pfx) && sz == 4) ) {
+         Bool r2zero = toBool(sz == 4); // FIXME -- unreliable (???)
+         delta = dis_CVTxPS2DQ_128( vbi, pfx, delta, False/*!isAvx*/, r2zero );
+         goto decode_success;
+      }
+      /* 0F 5B = CVTDQ2PS -- convert 4 x I32 in mem/xmm to 4 x F32 in
+         xmm(G) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_CVTDQ2PS_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5C:
+      /* F3 0F 5C = SUBSS -- sub 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo32( vbi, pfx, delta, "subss", Iop_Sub32F0x4 );
+         goto decode_success;
+      }
+      /* F2 0F 5C = SUBSD -- sub 64F0x2 from R/M to R */
+      if (haveF2no66noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_SSE_E_to_G_lo64( vbi, pfx, delta, "subsd", Iop_Sub64F0x2 );
+         goto decode_success;
+      }
+      /* 0F 5C = SUBPS -- sub 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "subps", Iop_Sub32Fx4 );
+         goto decode_success;
+      }
+      /* 66 0F 5C = SUBPD -- sub 64Fx2 from R/M to R */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "subpd", Iop_Sub64Fx2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5D:
+      /* 0F 5D = MINPS -- min 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "minps", Iop_Min32Fx4 );
+         goto decode_success;
+      }
+      /* F3 0F 5D = MINSS -- min 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo32( vbi, pfx, delta, "minss", Iop_Min32F0x4 );
+         goto decode_success;
+      }
+      /* F2 0F 5D = MINSD -- min 64F0x2 from R/M to R */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo64( vbi, pfx, delta, "minsd", Iop_Min64F0x2 );
+         goto decode_success;
+      }
+      /* 66 0F 5D = MINPD -- min 64Fx2 from R/M to R */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "minpd", Iop_Min64Fx2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5E:
+      /* F2 0F 5E = DIVSD -- div 64F0x2 from R/M to R */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo64( vbi, pfx, delta, "divsd", Iop_Div64F0x2 );
+         goto decode_success;
+      }
+      /* 0F 5E = DIVPS -- div 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "divps", Iop_Div32Fx4 );
+         goto decode_success;
+      }
+      /* F3 0F 5E = DIVSS -- div 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo32( vbi, pfx, delta, "divss", Iop_Div32F0x4 );
+         goto decode_success;
+      }
+      /* 66 0F 5E = DIVPD -- div 64Fx2 from R/M to R */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "divpd", Iop_Div64Fx2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5F:
+      /* 0F 5F = MAXPS -- max 32Fx4 from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "maxps", Iop_Max32Fx4 );
+         goto decode_success;
+      }
+      /* F3 0F 5F = MAXSS -- max 32F0x4 from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo32( vbi, pfx, delta, "maxss", Iop_Max32F0x4 );
+         goto decode_success;
+      }
+      /* F2 0F 5F = MAXSD -- max 64F0x2 from R/M to R */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         delta = dis_SSE_E_to_G_lo64( vbi, pfx, delta, "maxsd", Iop_Max64F0x2 );
+         goto decode_success;
+      }
+      /* 66 0F 5F = MAXPD -- max 64Fx2 from R/M to R */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "maxpd", Iop_Max64Fx2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x60:
+      /* 66 0F 60 = PUNPCKLBW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "punpcklbw",
+                                    Iop_InterleaveLO8x16, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x61:
+      /* 66 0F 61 = PUNPCKLWD */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "punpcklwd",
+                                    Iop_InterleaveLO16x8, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x62:
+      /* 66 0F 62 = PUNPCKLDQ */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "punpckldq",
+                                    Iop_InterleaveLO32x4, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x63:
+      /* 66 0F 63 = PACKSSWB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "packsswb",
+                                    Iop_QNarrowBin16Sto8Sx16, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x64:
+      /* 66 0F 64 = PCMPGTB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta, 
+                                    "pcmpgtb", Iop_CmpGT8Sx16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x65:
+      /* 66 0F 65 = PCMPGTW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pcmpgtw", Iop_CmpGT16Sx8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x66:
+      /* 66 0F 66 = PCMPGTD */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pcmpgtd", Iop_CmpGT32Sx4, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x67:
+      /* 66 0F 67 = PACKUSWB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "packuswb",
+                                    Iop_QNarrowBin16Sto8Ux16, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x68:
+      /* 66 0F 68 = PUNPCKHBW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "punpckhbw",
+                                    Iop_InterleaveHI8x16, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x69:
+      /* 66 0F 69 = PUNPCKHWD */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "punpckhwd",
+                                    Iop_InterleaveHI16x8, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6A:
+      /* 66 0F 6A = PUNPCKHDQ */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta, 
+                                    "punpckhdq",
+                                    Iop_InterleaveHI32x4, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6B:
+      /* 66 0F 6B = PACKSSDW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "packssdw",
+                                    Iop_QNarrowBin32Sto16Sx8, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6C:
+      /* 66 0F 6C = PUNPCKLQDQ */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "punpcklqdq",
+                                    Iop_InterleaveLO64x2, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6D:
+      /* 66 0F 6D = PUNPCKHQDQ */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "punpckhqdq",
+                                    Iop_InterleaveHI64x2, True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6E:
+      /* 66 0F 6E = MOVD from ireg32/m32 to xmm lo 1/4,
+                    zeroing high 3/4 of xmm. */
+      /*              or from ireg64/m64 to xmm lo 1/2,
+                    zeroing high 1/2 of xmm. */
+      if (have66noF2noF3(pfx)) {
+         vassert(sz == 2 || sz == 8);
+         if (sz == 2) sz = 4;
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            if (sz == 4) {
+               putXMMReg(
+                  gregOfRexRM(pfx,modrm),
+                  unop( Iop_32UtoV128, getIReg32(eregOfRexRM(pfx,modrm)) ) 
+               );
+               DIP("movd %s, %s\n", nameIReg32(eregOfRexRM(pfx,modrm)), 
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+            } else {
+               putXMMReg(
+                  gregOfRexRM(pfx,modrm),
+                  unop( Iop_64UtoV128, getIReg64(eregOfRexRM(pfx,modrm)) ) 
+               );
+               DIP("movq %s, %s\n", nameIReg64(eregOfRexRM(pfx,modrm)), 
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+            }
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putXMMReg(
+               gregOfRexRM(pfx,modrm),
+               sz == 4 
+                  ?  unop( Iop_32UtoV128,loadLE(Ity_I32, mkexpr(addr)) ) 
+                  :  unop( Iop_64UtoV128,loadLE(Ity_I64, mkexpr(addr)) )
+            );
+            DIP("mov%c %s, %s\n", sz == 4 ? 'd' : 'q', dis_buf, 
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x6F:
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         /* 66 0F 6F = MOVDQA -- move from E (mem or xmm) to G (xmm). */
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       getXMMReg( eregOfRexRM(pfx,modrm) ));
+            DIP("movdqa %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("movdqa %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         /* F3 0F 6F = MOVDQU -- move from E (mem or xmm) to G (xmm). */
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       getXMMReg( eregOfRexRM(pfx,modrm) ));
+            DIP("movdqu %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("movdqu %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x70:
+      /* 66 0F 70 = PSHUFD -- rearrange 4x32 from E(xmm or mem) to G(xmm) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PSHUFD_32x4( vbi, pfx, delta, False/*!writesYmm*/);
+         goto decode_success;
+      }
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F 70 = PSHUFW -- rearrange 4x16 from E(mmx or mem) to G(mmx) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         Int order;
+         IRTemp sV, dV, s3, s2, s1, s0;
+         s3 = s2 = s1 = s0 = IRTemp_INVALID;
+         sV = newTemp(Ity_I64);
+         dV = newTemp(Ity_I64);
+         do_MMX_preamble();
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            order = (Int)getUChar(delta+1);
+            delta += 1+1;
+            DIP("pshufw $%d,%s,%s\n", order, 
+                                      nameMMXReg(eregLO3ofRM(modrm)),
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf,
+                              1/*extra byte after amode*/ );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            order = (Int)getUChar(delta+alen);
+            delta += 1+alen;
+            DIP("pshufw $%d,%s,%s\n", order, 
+                                      dis_buf,
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         }
+         breakup64to16s( sV, &s3, &s2, &s1, &s0 );
+#        define SEL(n) \
+                   ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+         assign(dV,
+                mk64from16s( SEL((order>>6)&3), SEL((order>>4)&3),
+                             SEL((order>>2)&3), SEL((order>>0)&3) )
+         );
+         putMMXReg(gregLO3ofRM(modrm), mkexpr(dV));
+#        undef SEL
+         goto decode_success;
+      }
+      /* F2 0F 70 = PSHUFLW -- rearrange lower half 4x16 from E(xmm or
+         mem) to G(xmm), and copy upper half */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         delta = dis_PSHUFxW_128( vbi, pfx, delta,
+                                  False/*!isAvx*/, False/*!xIsH*/ );
+         goto decode_success;
+      }
+      /* F3 0F 70 = PSHUFHW -- rearrange upper half 4x16 from E(xmm or
+         mem) to G(xmm), and copy lower half */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_PSHUFxW_128( vbi, pfx, delta,
+                                  False/*!isAvx*/, True/*xIsH*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x71:
+      /* 66 0F 71 /2 ib = PSRLW by immediate */
+      if (have66noF2noF3(pfx) && sz == 2
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 2) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "psrlw", Iop_ShrN16x8 );
+         goto decode_success;
+      }
+      /* 66 0F 71 /4 ib = PSRAW by immediate */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 4) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "psraw", Iop_SarN16x8 );
+         goto decode_success;
+      }
+      /* 66 0F 71 /6 ib = PSLLW by immediate */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 6) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "psllw", Iop_ShlN16x8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x72:
+      /* 66 0F 72 /2 ib = PSRLD by immediate */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 2) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "psrld", Iop_ShrN32x4 );
+         goto decode_success;
+      }
+      /* 66 0F 72 /4 ib = PSRAD by immediate */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 4) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "psrad", Iop_SarN32x4 );
+         goto decode_success;
+      }
+      /* 66 0F 72 /6 ib = PSLLD by immediate */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 6) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "pslld", Iop_ShlN32x4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x73:
+      /* 66 0F 73 /3 ib = PSRLDQ by immediate */
+      /* note, if mem case ever filled in, 1 byte after amode */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 3) {
+         Int imm = (Int)getUChar(delta+1);
+         Int reg = eregOfRexRM(pfx,getUChar(delta));
+         DIP("psrldq $%d,%s\n", imm, nameXMMReg(reg));
+         delta += 2;
+         IRTemp sV = newTemp(Ity_V128);
+         assign( sV, getXMMReg(reg) );
+         putXMMReg(reg, mkexpr(math_PSRLDQ( sV, imm )));
+         goto decode_success;
+      }
+      /* 66 0F 73 /7 ib = PSLLDQ by immediate */
+      /* note, if mem case ever filled in, 1 byte after amode */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 7) {
+         Int imm = (Int)getUChar(delta+1);
+         Int reg = eregOfRexRM(pfx,getUChar(delta));
+         DIP("pslldq $%d,%s\n", imm, nameXMMReg(reg));
+         vassert(imm >= 0 && imm <= 255);
+         delta += 2;
+         IRTemp sV = newTemp(Ity_V128);
+         assign( sV, getXMMReg(reg) );
+         putXMMReg(reg, mkexpr(math_PSLLDQ( sV, imm )));
+         goto decode_success;
+      }
+      /* 66 0F 73 /2 ib = PSRLQ by immediate */
+      if (have66noF2noF3(pfx) && sz == 2
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 2) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "psrlq", Iop_ShrN64x2 );
+         goto decode_success;
+      }
+      /* 66 0F 73 /6 ib = PSLLQ by immediate */
+      if (have66noF2noF3(pfx) && sz == 2 
+          && epartIsReg(getUChar(delta))
+          && gregLO3ofRM(getUChar(delta)) == 6) {
+         delta = dis_SSE_shiftE_imm( pfx, delta, "psllq", Iop_ShlN64x2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x74:
+      /* 66 0F 74 = PCMPEQB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pcmpeqb", Iop_CmpEQ8x16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x75:
+      /* 66 0F 75 = PCMPEQW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pcmpeqw", Iop_CmpEQ16x8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x76:
+      /* 66 0F 76 = PCMPEQD */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pcmpeqd", Iop_CmpEQ32x4, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x7E:
+      /* F3 0F 7E = MOVQ -- move 64 bits from E (mem or lo half xmm) to
+         G (lo half xmm).  Upper half of G is zeroed out. */
+      if (haveF3no66noF2(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            putXMMRegLane64( gregOfRexRM(pfx,modrm), 0,
+                             getXMMRegLane64( eregOfRexRM(pfx,modrm), 0 ));
+               /* zero bits 127:64 */
+               putXMMRegLane64( gregOfRexRM(pfx,modrm), 1, mkU64(0) );
+            DIP("movsd %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMReg( gregOfRexRM(pfx,modrm), mkV128(0) );
+            putXMMRegLane64( gregOfRexRM(pfx,modrm), 0,
+                             loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movsd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* 66 0F 7E = MOVD from xmm low 1/4 to ireg32 or m32. */
+      /*              or from xmm low 1/2 to ireg64 or m64. */
+         if (have66noF2noF3(pfx) && (sz == 2 || sz == 8)) {
+         if (sz == 2) sz = 4;
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            if (sz == 4) {
+               putIReg32( eregOfRexRM(pfx,modrm),
+                          getXMMRegLane32(gregOfRexRM(pfx,modrm), 0) );
+               DIP("movd %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), 
+                                    nameIReg32(eregOfRexRM(pfx,modrm)));
+            } else {
+               putIReg64( eregOfRexRM(pfx,modrm),
+                          getXMMRegLane64(gregOfRexRM(pfx,modrm), 0) );
+               DIP("movq %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), 
+                                    nameIReg64(eregOfRexRM(pfx,modrm)));
+            }
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            storeLE( mkexpr(addr),
+                     sz == 4
+                        ? getXMMRegLane32(gregOfRexRM(pfx,modrm),0)
+                        : getXMMRegLane64(gregOfRexRM(pfx,modrm),0) );
+            DIP("mov%c %s, %s\n", sz == 4 ? 'd' : 'q',
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf);
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x7F:
+      /* F3 0F 7F = MOVDQU -- move from G (xmm) to E (mem or xmm). */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            goto decode_failure; /* awaiting test case */
+            delta += 1;
+            putXMMReg( eregOfRexRM(pfx,modrm),
+                       getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movdqu %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), 
+                                   nameXMMReg(eregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movdqu %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf);
+         }
+         goto decode_success;
+      }
+      /* 66 0F 7F = MOVDQA -- move from G (xmm) to E (mem or xmm). */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            putXMMReg( eregOfRexRM(pfx,modrm),
+                       getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movdqa %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), 
+                                   nameXMMReg(eregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            delta += alen;
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movdqa %s, %s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf);
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0xAE:
+      /* 0F AE /7 = SFENCE -- flush pending operations to memory */
+      if (haveNo66noF2noF3(pfx) 
+          && epartIsReg(getUChar(delta)) && gregLO3ofRM(getUChar(delta)) == 7
+          && sz == 4) {
+         delta += 1;
+         /* Insert a memory fence.  It's sometimes important that these
+            are carried through to the generated code. */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("sfence\n");
+         goto decode_success;
+      }
+      /* mindless duplication follows .. */
+      /* 0F AE /5 = LFENCE -- flush pending operations to memory */
+      /* 0F AE /6 = MFENCE -- flush pending operations to memory */
+      if (haveNo66noF2noF3(pfx)
+          && epartIsReg(getUChar(delta))
+          && (gregLO3ofRM(getUChar(delta)) == 5
+              || gregLO3ofRM(getUChar(delta)) == 6)
+          && sz == 4) {
+         delta += 1;
+         /* Insert a memory fence.  It's sometimes important that these
+            are carried through to the generated code. */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("%sfence\n", gregLO3ofRM(getUChar(delta-1))==5 ? "l" : "m");
+         goto decode_success;
+      }
+
+      /* 0F AE /7 = CLFLUSH -- flush cache line */
+      if (haveNo66noF2noF3(pfx)
+          && !epartIsReg(getUChar(delta)) && gregLO3ofRM(getUChar(delta)) == 7
+          && sz == 4) {
+
+         /* This is something of a hack.  We need to know the size of
+            the cache line containing addr.  Since we don't (easily),
+            assume 256 on the basis that no real cache would have a
+            line that big.  It's safe to invalidate more stuff than we
+            need, just inefficient. */
+         ULong lineszB = 256ULL;
+
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+
+         /* Round addr down to the start of the containing block. */
+         stmt( IRStmt_Put(
+                  OFFB_CMSTART,
+                  binop( Iop_And64, 
+                         mkexpr(addr), 
+                         mkU64( ~(lineszB-1) ))) );
+
+         stmt( IRStmt_Put(OFFB_CMLEN, mkU64(lineszB) ) );
+
+         jmp_lit(dres, Ijk_InvalICache, (Addr64)(guest_RIP_bbstart+delta));
+
+         DIP("clflush %s\n", dis_buf);
+         goto decode_success;
+      }
+
+      /* 0F AE /3 = STMXCSR m32 -- store %mxcsr */
+      if (haveNo66noF2noF3(pfx)
+          && !epartIsReg(getUChar(delta)) && gregLO3ofRM(getUChar(delta)) == 3
+          && sz == 4) {
+         delta = dis_STMXCSR(vbi, pfx, delta, False/*!isAvx*/);
+         goto decode_success;
+      }
+      /* 0F AE /2 = LDMXCSR m32 -- load %mxcsr */
+      if (haveNo66noF2noF3(pfx)
+          && !epartIsReg(getUChar(delta)) && gregLO3ofRM(getUChar(delta)) == 2
+          && sz == 4) {
+         delta = dis_LDMXCSR(vbi, pfx, delta, False/*!isAvx*/);
+         goto decode_success;
+      }
+      /* 0F AE /0 = FXSAVE m512 -- write x87 and SSE state to memory.
+         Note that the presence or absence of REX.W slightly affects the
+         written format: whether the saved FPU IP and DP pointers are 64
+         or 32 bits.  But the helper function we call simply writes zero
+         bits in the relevant fields (which are 64 bits regardless of
+         what REX.W is) and so it's good enough (iow, equally broken) in
+         both cases. */
+      if (haveNo66noF2noF3(pfx) && (sz == 4 || sz == 8)
+          && !epartIsReg(getUChar(delta))
+          && gregOfRexRM(pfx,getUChar(delta)) == 0) {
+          IRDirty* d;
+         modrm = getUChar(delta);
+         vassert(!epartIsReg(modrm));
+
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         gen_SEGV_if_not_16_aligned(addr);
+
+         DIP("%sfxsave %s\n", sz==8 ? "rex64/" : "", dis_buf);
+
+         /* Uses dirty helper: 
+              void amd64g_do_FXSAVE_ALL_EXCEPT_XMM ( VexGuestAMD64State*,
+                                                     ULong ) */
+         d = unsafeIRDirty_0_N ( 
+                0/*regparms*/, 
+                "amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM",
+                &amd64g_dirtyhelper_FXSAVE_ALL_EXCEPT_XMM,
+                mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+             );
+
+         /* declare we're writing memory */
+         d->mFx   = Ifx_Write;
+         d->mAddr = mkexpr(addr);
+         d->mSize = 464; /* according to recent Intel docs */
+
+         /* declare we're reading guest state */
+         d->nFxState = 6;
+         vex_bzero(&d->fxState, sizeof(d->fxState));
+
+         d->fxState[0].fx     = Ifx_Read;
+         d->fxState[0].offset = OFFB_FTOP;
+         d->fxState[0].size   = sizeof(UInt);
+
+         d->fxState[1].fx     = Ifx_Read;
+         d->fxState[1].offset = OFFB_FPREGS;
+         d->fxState[1].size   = 8 * sizeof(ULong);
+
+         d->fxState[2].fx     = Ifx_Read;
+         d->fxState[2].offset = OFFB_FPTAGS;
+         d->fxState[2].size   = 8 * sizeof(UChar);
+
+         d->fxState[3].fx     = Ifx_Read;
+         d->fxState[3].offset = OFFB_FPROUND;
+         d->fxState[3].size   = sizeof(ULong);
+
+         d->fxState[4].fx     = Ifx_Read;
+         d->fxState[4].offset = OFFB_FC3210;
+         d->fxState[4].size   = sizeof(ULong);
+
+         d->fxState[5].fx     = Ifx_Read;
+         d->fxState[5].offset = OFFB_SSEROUND;
+         d->fxState[5].size   = sizeof(ULong);
+
+         /* Call the helper.  This creates all parts of the in-memory
+            image except for the XMM[0..15] array, which we do
+            separately, in order that any undefinedness in the XMM
+            registers is tracked separately by Memcheck and does not
+            "infect" the in-memory shadow for the other parts of the
+            image (FPTOP, FPREGS, FPTAGS, FPROUND, FC3210,
+            SSEROUND). */
+         stmt( IRStmt_Dirty(d) );
+
+         /* And now the XMMs themselves. */
+         UInt xmm;
+         for (xmm = 0; xmm < 16; xmm++) {
+            storeLE( binop(Iop_Add64, mkexpr(addr), mkU64(160 + xmm * 16)),
+                     getXMMReg(xmm) );
+         }
+
+         goto decode_success;
+      }
+      /* 0F AE /1 = FXRSTOR m512 -- read x87 and SSE state from memory.
+         As with FXSAVE above we ignore the value of REX.W since we're
+         not bothering with the FPU DP and IP fields. */
+      if (haveNo66noF2noF3(pfx) && (sz == 4 || sz == 8)
+          && !epartIsReg(getUChar(delta))
+          && gregOfRexRM(pfx,getUChar(delta)) == 1) {
+         IRDirty* d;
+         modrm = getUChar(delta);
+         vassert(!epartIsReg(modrm));
+
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         gen_SEGV_if_not_16_aligned(addr);
+
+         DIP("%sfxrstor %s\n", sz==8 ? "rex64/" : "", dis_buf);
+
+         /* Uses dirty helper: 
+              VexEmNote amd64g_do_FXRSTOR_ALL_EXCEPT_XMM ( VexGuestAMD64State*,
+                                                           ULong )
+            NOTE:
+              the VexEmNote value is simply ignored
+         */
+         d = unsafeIRDirty_0_N ( 
+                0/*regparms*/, 
+                "amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM", 
+                &amd64g_dirtyhelper_FXRSTOR_ALL_EXCEPT_XMM,
+                mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+             );
+
+         /* declare we're reading memory */
+         d->mFx   = Ifx_Read;
+         d->mAddr = mkexpr(addr);
+         d->mSize = 464; /* according to recent Intel docs */
+
+         /* declare we're writing guest state */
+         d->nFxState = 6;
+         vex_bzero(&d->fxState, sizeof(d->fxState));
+
+         d->fxState[0].fx     = Ifx_Write;
+         d->fxState[0].offset = OFFB_FTOP;
+         d->fxState[0].size   = sizeof(UInt);
+
+         d->fxState[1].fx     = Ifx_Write;
+         d->fxState[1].offset = OFFB_FPREGS;
+         d->fxState[1].size   = 8 * sizeof(ULong);
+
+         d->fxState[2].fx     = Ifx_Write;
+         d->fxState[2].offset = OFFB_FPTAGS;
+         d->fxState[2].size   = 8 * sizeof(UChar);
+
+         d->fxState[3].fx     = Ifx_Write;
+         d->fxState[3].offset = OFFB_FPROUND;
+         d->fxState[3].size   = sizeof(ULong);
+
+         d->fxState[4].fx     = Ifx_Write;
+         d->fxState[4].offset = OFFB_FC3210;
+         d->fxState[4].size   = sizeof(ULong);
+
+         d->fxState[5].fx     = Ifx_Write;
+         d->fxState[5].offset = OFFB_SSEROUND;
+         d->fxState[5].size   = sizeof(ULong);
+
+         /* Call the helper.  This reads all parts of the in-memory
+            image except for the XMM[0..15] array, which we do
+            separately, in order that any undefinedness in the XMM
+            registers is tracked separately by Memcheck and does not
+            "infect" the in-guest-state shadow for the other parts of the
+            image (FPTOP, FPREGS, FPTAGS, FPROUND, FC3210,
+            SSEROUND). */
+         stmt( IRStmt_Dirty(d) );
+
+         /* And now the XMMs themselves. */
+         UInt xmm;
+         for (xmm = 0; xmm < 16; xmm++) {
+            putXMMReg(xmm, loadLE(Ity_V128,
+                                  binop(Iop_Add64, mkexpr(addr),
+                                                   mkU64(160 + xmm * 16))));
+         }
+
+         goto decode_success;
+      }
+      break;
+
+   case 0xC2:
+      /* 0F C2 = CMPPS -- 32Fx4 comparison from R/M to R */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         Long delta0 = delta;
+         delta = dis_SSE_cmp_E_to_G( vbi, pfx, delta, "cmpps", True, 4 );
+         if (delta > delta0) goto decode_success;
+      }
+      /* F3 0F C2 = CMPSS -- 32F0x4 comparison from R/M to R */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         Long delta0 = delta;
+         delta = dis_SSE_cmp_E_to_G( vbi, pfx, delta, "cmpss", False, 4 );
+         if (delta > delta0) goto decode_success;
+      }
+      /* F2 0F C2 = CMPSD -- 64F0x2 comparison from R/M to R */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         Long delta0 = delta;
+         delta = dis_SSE_cmp_E_to_G( vbi, pfx, delta, "cmpsd", False, 8 );
+         if (delta > delta0) goto decode_success;
+      }
+      /* 66 0F C2 = CMPPD -- 64Fx2 comparison from R/M to R */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         Long delta0 = delta;
+         delta = dis_SSE_cmp_E_to_G( vbi, pfx, delta, "cmppd", True, 8 );
+         if (delta > delta0) goto decode_success;
+      }
+      break;
+
+   case 0xC3:
+      /* 0F C3 = MOVNTI -- for us, just a plain ireg store. */
+      if (haveNo66noF2noF3(pfx) && (sz == 4 || sz == 8)) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getIRegG(sz, pfx, modrm) );
+            DIP("movnti %s,%s\n", dis_buf,
+                                  nameIRegG(sz, pfx, modrm));
+            delta += alen;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0xC4:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F C4 = PINSRW -- get 16 bits from E(mem or low half ireg) and
+         put it into the specified lane of mmx(G). */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         /* Use t0 .. t3 to hold the 4 original 16-bit lanes of the
+            mmx reg.  t4 is the new lane value.  t5 is the original
+            mmx value. t6 is the new mmx value. */
+         Int lane;
+         t4 = newTemp(Ity_I16);
+         t5 = newTemp(Ity_I64);
+         t6 = newTemp(Ity_I64);
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+
+         assign(t5, getMMXReg(gregLO3ofRM(modrm)));
+         breakup64to16s( t5, &t3, &t2, &t1, &t0 );
+
+         if (epartIsReg(modrm)) {
+            assign(t4, getIReg16(eregOfRexRM(pfx,modrm)));
+            delta += 1+1;
+            lane = getUChar(delta-1);
+            DIP("pinsrw $%d,%s,%s\n", (Int)lane, 
+                                      nameIReg16(eregOfRexRM(pfx,modrm)),
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += 1+alen;
+            lane = getUChar(delta-1);
+            assign(t4, loadLE(Ity_I16, mkexpr(addr)));
+            DIP("pinsrw $%d,%s,%s\n", (Int)lane,
+                                      dis_buf,
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         switch (lane & 3) {
+            case 0:  assign(t6, mk64from16s(t3,t2,t1,t4)); break;
+            case 1:  assign(t6, mk64from16s(t3,t2,t4,t0)); break;
+            case 2:  assign(t6, mk64from16s(t3,t4,t1,t0)); break;
+            case 3:  assign(t6, mk64from16s(t4,t2,t1,t0)); break;
+            default: vassert(0);
+         }
+         putMMXReg(gregLO3ofRM(modrm), mkexpr(t6));
+         goto decode_success;
+      }
+      /* 66 0F C4 = PINSRW -- get 16 bits from E(mem or low half ireg) and
+         put it into the specified lane of xmm(G). */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         Int lane;
+         t4 = newTemp(Ity_I16);
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign(t4, getIReg16(rE));
+            delta += 1+1;
+            lane = getUChar(delta-1);
+            DIP("pinsrw $%d,%s,%s\n",
+                (Int)lane, nameIReg16(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 
+                              1/*byte after the amode*/ );
+            delta += 1+alen;
+            lane = getUChar(delta-1);
+            assign(t4, loadLE(Ity_I16, mkexpr(addr)));
+            DIP("pinsrw $%d,%s,%s\n",
+                (Int)lane, dis_buf, nameXMMReg(rG));
+         }
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg(rG));
+         IRTemp res_vec = math_PINSRW_128( src_vec, t4, lane & 7);
+         putXMMReg(rG, mkexpr(res_vec));
+         goto decode_success;
+      }
+      break;
+
+   case 0xC5:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F C5 = PEXTRW -- extract 16-bit field from mmx(E) and put 
+         zero-extend of it in ireg(G). */
+      if (haveNo66noF2noF3(pfx) && (sz == 4 || sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            IRTemp sV = newTemp(Ity_I64);
+            t5 = newTemp(Ity_I16);
+            do_MMX_preamble();
+            assign(sV, getMMXReg(eregLO3ofRM(modrm)));
+            breakup64to16s( sV, &t3, &t2, &t1, &t0 );
+            switch (getUChar(delta+1) & 3) {
+               case 0:  assign(t5, mkexpr(t0)); break;
+               case 1:  assign(t5, mkexpr(t1)); break;
+               case 2:  assign(t5, mkexpr(t2)); break;
+               case 3:  assign(t5, mkexpr(t3)); break;
+               default: vassert(0);
+            }
+            if (sz == 8)
+               putIReg64(gregOfRexRM(pfx,modrm), unop(Iop_16Uto64, mkexpr(t5)));
+            else
+               putIReg32(gregOfRexRM(pfx,modrm), unop(Iop_16Uto32, mkexpr(t5)));
+            DIP("pextrw $%d,%s,%s\n",
+                (Int)getUChar(delta+1),
+                nameMMXReg(eregLO3ofRM(modrm)),
+                sz==8 ? nameIReg64(gregOfRexRM(pfx,modrm))
+                      : nameIReg32(gregOfRexRM(pfx,modrm))
+            );
+            delta += 2;
+            goto decode_success;
+         } 
+         /* else fall through */
+         /* note, for anyone filling in the mem case: this insn has one
+            byte after the amode and therefore you must pass 1 as the
+            last arg to disAMode */
+      }
+      /* 66 0F C5 = PEXTRW -- extract 16-bit field from xmm(E) and put 
+         zero-extend of it in ireg(G). */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         Long delta0 = delta;
+         delta = dis_PEXTRW_128_EregOnly_toG( vbi, pfx, delta,
+                                              False/*!isAvx*/ );
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      break;
+
+   case 0xC6:
+      /* 0F C6 /r ib = SHUFPS -- shuffle packed F32s */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         Int    imm8 = 0;
+         IRTemp sV   = newTemp(Ity_V128);
+         IRTemp dV   = newTemp(Ity_V128);
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx,modrm);
+         assign( dV, getXMMReg(rG) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            imm8 = (Int)getUChar(delta+1);
+            delta += 1+1;
+            DIP("shufps $%d,%s,%s\n", imm8, nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += 1+alen;
+            DIP("shufps $%d,%s,%s\n", imm8, dis_buf, nameXMMReg(rG));
+         }
+         IRTemp res = math_SHUFPS_128( sV, dV, imm8 );
+         putXMMReg( gregOfRexRM(pfx,modrm), mkexpr(res) );
+         goto decode_success;
+      }
+      /* 66 0F C6 /r ib = SHUFPD -- shuffle packed F64s */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         Int    select;
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+
+         modrm = getUChar(delta);
+         assign( dV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
+            select = (Int)getUChar(delta+1);
+            delta += 1+1;
+            DIP("shufpd $%d,%s,%s\n", select, 
+                                      nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                      nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            select = getUChar(delta+alen);
+            delta += 1+alen;
+            DIP("shufpd $%d,%s,%s\n", select, 
+                                      dis_buf,
+                                      nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         IRTemp res = math_SHUFPD_128( sV, dV, select );
+         putXMMReg( gregOfRexRM(pfx,modrm), mkexpr(res) );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD1:
+      /* 66 0F D1 = PSRLW by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "psrlw", Iop_ShrN16x8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD2:
+      /* 66 0F D2 = PSRLD by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "psrld", Iop_ShrN32x4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD3:
+      /* 66 0F D3 = PSRLQ by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "psrlq", Iop_ShrN64x2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD4:
+      /* 66 0F D4 = PADDQ */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddq", Iop_Add64x2, False );
+         goto decode_success;
+      }
+      /* ***--- this is an MMX class insn introduced in SSE2 ---*** */
+      /* 0F D4 = PADDQ -- add 64x1 */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                   vbi, pfx, delta, opc, "paddq", False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD5:
+      /* 66 0F D5 = PMULLW -- 16x8 multiply */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta, 
+                                    "pmullw", Iop_Mul16x8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD6:
+      /* F3 0F D6 = MOVQ2DQ -- move from E (mmx) to G (lo half xmm, zero
+         hi half). */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            do_MMX_preamble();
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       unop(Iop_64UtoV128, getMMXReg( eregLO3ofRM(modrm) )) );
+            DIP("movq2dq %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                   nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+            goto decode_success;
+         }
+         /* apparently no mem case for this insn */
+      }
+      /* 66 0F D6 = MOVQ -- move 64 bits from G (lo half xmm) to E (mem
+         or lo half xmm).  */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            /* fall through, awaiting test case */
+            /* dst: lo half copied, hi half zeroed */
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), 
+                     getXMMRegLane64( gregOfRexRM(pfx,modrm), 0 ));
+            DIP("movq %s,%s\n", nameXMMReg(gregOfRexRM(pfx,modrm)), dis_buf );
+            delta += alen;
+            goto decode_success;
+         }
+      }
+      /* F2 0F D6 = MOVDQ2Q -- move from E (lo half xmm, not mem) to G (mmx). */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            do_MMX_preamble();
+            putMMXReg( gregLO3ofRM(modrm), 
+                       getXMMRegLane64( eregOfRexRM(pfx,modrm), 0 ));
+            DIP("movdq2q %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                   nameMMXReg(gregLO3ofRM(modrm)));
+            delta += 1;
+            goto decode_success;
+         }
+         /* apparently no mem case for this insn */
+      }
+      break;
+
+   case 0xD7:
+      /* 66 0F D7 = PMOVMSKB -- extract sign bits from each of 16
+         lanes in xmm(E), turn them into a byte, and put
+         zero-extend of it in ireg(G).  Doing this directly is just
+         too cumbersome; give up therefore and call a helper. */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)
+          && epartIsReg(getUChar(delta))) { /* no memory case, it seems */
+         delta = dis_PMOVMSKB_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F D7 = PMOVMSKB -- extract sign bits from each of 8 lanes in
+         mmx(E), turn them into a byte, and put zero-extend of it in
+         ireg(G). */
+      if (haveNo66noF2noF3(pfx)
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            do_MMX_preamble();
+            t0 = newTemp(Ity_I64);
+            t1 = newTemp(Ity_I32);
+            assign(t0, getMMXReg(eregLO3ofRM(modrm)));
+            assign(t1, unop(Iop_8Uto32, unop(Iop_GetMSBs8x8, mkexpr(t0))));
+            putIReg32(gregOfRexRM(pfx,modrm), mkexpr(t1));
+            DIP("pmovmskb %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                    nameIReg32(gregOfRexRM(pfx,modrm)));
+            delta += 1;
+            goto decode_success;
+         } 
+         /* else fall through */
+      }
+      break;
+
+   case 0xD8:
+      /* 66 0F D8 = PSUBUSB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "psubusb", Iop_QSub8Ux16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD9:
+      /* 66 0F D9 = PSUBUSW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "psubusw", Iop_QSub16Ux8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDA:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F DA = PMINUB -- 8x8 unsigned min */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "pminub", False );
+         goto decode_success;
+      }
+      /* 66 0F DA = PMINUB -- 8x16 unsigned min */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pminub", Iop_Min8Ux16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDB:
+      /* 66 0F DB = PAND */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "pand", Iop_AndV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDC:
+      /* 66 0F DC = PADDUSB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddusb", Iop_QAdd8Ux16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDD:
+      /* 66 0F DD = PADDUSW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddusw", Iop_QAdd16Ux8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDE:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F DE = PMAXUB -- 8x8 unsigned max */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "pmaxub", False );
+         goto decode_success;
+      }
+      /* 66 0F DE = PMAXUB -- 8x16 unsigned max */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pmaxub", Iop_Max8Ux16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDF:
+      /* 66 0F DF = PANDN */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all_invG( vbi, pfx, delta, "pandn", Iop_AndV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE0:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F E0 = PAVGB -- 8x8 unsigned Packed Average, with rounding */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "pavgb", False );
+         goto decode_success;
+      }
+      /* 66 0F E0 = PAVGB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pavgb", Iop_Avg8Ux16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE1:
+      /* 66 0F E1 = PSRAW by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "psraw", Iop_SarN16x8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE2:
+      /* 66 0F E2 = PSRAD by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "psrad", Iop_SarN32x4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE3:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F E3 = PAVGW -- 16x4 unsigned Packed Average, with rounding */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "pavgw", False );
+         goto decode_success;
+      }
+      /* 66 0F E3 = PAVGW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pavgw", Iop_Avg16Ux8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE4:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F E4 = PMULUH -- 16x4 hi-half of unsigned widening multiply */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "pmuluh", False );
+         goto decode_success;
+      }
+      /* 66 0F E4 = PMULHUW -- 16x8 hi-half of unsigned widening multiply */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pmulhuw", Iop_MulHi16Ux8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE5:
+      /* 66 0F E5 = PMULHW -- 16x8 hi-half of signed widening multiply */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pmulhw", Iop_MulHi16Sx8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE6:
+      /* 66 0F E6 = CVTTPD2DQ -- convert 2 x F64 in mem/xmm to 2 x I32 in
+         lo half xmm(G), and zero upper half, rounding towards zero */
+      /* F2 0F E6 = CVTPD2DQ -- convert 2 x F64 in mem/xmm to 2 x I32 in
+         lo half xmm(G), according to prevailing rounding mode, and zero
+         upper half */
+      if ( (haveF2no66noF3(pfx) && sz == 4)
+           || (have66noF2noF3(pfx) && sz == 2) ) {
+         delta = dis_CVTxPD2DQ_128( vbi, pfx, delta, False/*!isAvx*/,
+                                    toBool(sz == 2)/*r2zero*/);
+         goto decode_success;
+      }
+      /* F3 0F E6 = CVTDQ2PD -- convert 2 x I32 in mem/lo half xmm to 2 x
+         F64 in xmm(G) */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_CVTDQ2PD_128(vbi, pfx, delta, False/*!isAvx*/);
+         goto decode_success;
+      }
+      break;
+
+   case 0xE7:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F E7 = MOVNTQ -- for us, just a plain MMX store.  Note, the
+         Intel manual does not say anything about the usual business of
+         the FP reg tags getting trashed whenever an MMX insn happens.
+         So we just leave them alone. 
+      */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            /* do_MMX_preamble(); Intel docs don't specify this */
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getMMXReg(gregLO3ofRM(modrm)) );
+            DIP("movntq %s,%s\n", dis_buf,
+                                  nameMMXReg(gregLO3ofRM(modrm)));
+            delta += alen;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      /* 66 0F E7 = MOVNTDQ -- for us, just a plain SSE store. */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            storeLE( mkexpr(addr), getXMMReg(gregOfRexRM(pfx,modrm)) );
+            DIP("movntdq %s,%s\n", dis_buf,
+                                   nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0xE8:
+      /* 66 0F E8 = PSUBSB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "psubsb", Iop_QSub8Sx16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE9:
+      /* 66 0F E9 = PSUBSW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "psubsw", Iop_QSub16Sx8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEA:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F EA = PMINSW -- 16x4 signed min */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "pminsw", False );
+         goto decode_success;
+      }
+      /* 66 0F EA = PMINSW -- 16x8 signed min */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pminsw", Iop_Min16Sx8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEB:
+      /* 66 0F EB = POR */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "por", Iop_OrV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEC:
+      /* 66 0F EC = PADDSB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddsb", Iop_QAdd8Sx16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xED:
+      /* 66 0F ED = PADDSW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddsw", Iop_QAdd16Sx8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEE:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F EE = PMAXSW -- 16x4 signed max */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "pmaxsw", False );
+         goto decode_success;
+      }
+      /* 66 0F EE = PMAXSW -- 16x8 signed max */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pmaxsw", Iop_Max16Sx8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEF:
+      /* 66 0F EF = PXOR */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_E_to_G_all( vbi, pfx, delta, "pxor", Iop_XorV128 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF1:
+      /* 66 0F F1 = PSLLW by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "psllw", Iop_ShlN16x8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF2:
+      /* 66 0F F2 = PSLLD by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "pslld", Iop_ShlN32x4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF3:
+      /* 66 0F F3 = PSLLQ by E */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSE_shiftG_byE( vbi, pfx, delta, "psllq", Iop_ShlN64x2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF4:
+      /* 66 0F F4 = PMULUDQ -- unsigned widening multiply of 32-lanes 0 x
+         0 to form lower 64-bit half and lanes 2 x 2 to form upper 64-bit
+         half */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx,modrm);
+         assign( dV, getXMMReg(rG) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("pmuludq %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmuludq %s,%s\n", dis_buf, nameXMMReg(rG));
+         }
+         putXMMReg( rG, mkexpr(math_PMULUDQ_128( sV, dV )) );
+         goto decode_success;
+      }
+      /* ***--- this is an MMX class insn introduced in SSE2 ---*** */
+      /* 0F F4 = PMULUDQ -- unsigned widening multiply of 32-lanes 0 x
+         0 to form 64-bit result */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp sV = newTemp(Ity_I64);
+         IRTemp dV = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I32);
+         t0 = newTemp(Ity_I32);
+         modrm = getUChar(delta);
+
+         do_MMX_preamble();
+         assign( dV, getMMXReg(gregLO3ofRM(modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("pmuludq %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                   nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmuludq %s,%s\n", dis_buf,
+                                   nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         assign( t0, unop(Iop_64to32, mkexpr(dV)) );
+         assign( t1, unop(Iop_64to32, mkexpr(sV)) );
+         putMMXReg( gregLO3ofRM(modrm),
+                    binop( Iop_MullU32, mkexpr(t0), mkexpr(t1) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF5:
+      /* 66 0F F5 = PMADDWD -- Multiply and add packed integers from
+         E(xmm or mem) to G(xmm) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+         modrm     = getUChar(delta);
+         UInt   rG = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("pmaddwd %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmaddwd %s,%s\n", dis_buf, nameXMMReg(rG));
+         }
+         assign( dV, getXMMReg(rG) );
+         putXMMReg( rG, mkexpr(math_PMADDWD_128(dV, sV)) );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF6:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F F6 = PSADBW -- sum of 8Ux8 absolute differences */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                    vbi, pfx, delta, opc, "psadbw", False );
+         goto decode_success;
+      }
+      /* 66 0F F6 = PSADBW -- 2 x (8x8 -> 48 zeroes ++ u16) Sum Abs Diffs
+         from E(xmm or mem) to G(xmm) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp sV  = newTemp(Ity_V128);
+         IRTemp dV  = newTemp(Ity_V128);
+         modrm = getUChar(delta);
+         UInt   rG   = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("psadbw %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("psadbw %s,%s\n", dis_buf, nameXMMReg(rG));
+         }
+         assign( dV, getXMMReg(rG) );
+         putXMMReg( rG, mkexpr( math_PSADBW_128 ( dV, sV ) ) );
+
+         goto decode_success;
+      }
+      break;
+
+   case 0xF7:
+      /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+      /* 0F F7 = MASKMOVQ -- 8x8 masked store */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         Bool ok = False;
+         delta = dis_MMX( &ok, vbi, pfx, sz, delta-1 );
+         if (ok) goto decode_success;
+      }
+      /* 66 0F F7 = MASKMOVDQU -- store selected bytes of double quadword */
+      if (have66noF2noF3(pfx) && sz == 2 && epartIsReg(getUChar(delta))) {
+         delta = dis_MASKMOVDQU( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF8:
+      /* 66 0F F8 = PSUBB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta, 
+                                    "psubb", Iop_Sub8x16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF9:
+      /* 66 0F F9 = PSUBW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "psubw", Iop_Sub16x8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFA:
+      /* 66 0F FA = PSUBD */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "psubd", Iop_Sub32x4, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFB:
+      /* 66 0F FB = PSUBQ */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "psubq", Iop_Sub64x2, False );
+         goto decode_success;
+      }
+      /* ***--- this is an MMX class insn introduced in SSE2 ---*** */
+      /* 0F FB = PSUBQ -- sub 64x1 */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         do_MMX_preamble();
+         delta = dis_MMXop_regmem_to_reg ( 
+                   vbi, pfx, delta, opc, "psubq", False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFC:
+      /* 66 0F FC = PADDB */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddb", Iop_Add8x16, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFD:
+      /* 66 0F FD = PADDW */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddw", Iop_Add16x8, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFE:
+      /* 66 0F FE = PADDD */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "paddd", Iop_Add32x4, False );
+         goto decode_success;
+      }
+      break;
+
+   default:
+      goto decode_failure;
+
+   }
+
+  decode_failure:
+   *decode_OK = False;
+   return deltaIN;
+
+  decode_success:
+   *decode_OK = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level SSE3 (not SupSSE3): dis_ESC_0F__SSE3       ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static Long dis_MOVDDUP_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                              Long delta, Bool isAvx )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp sV    = newTemp(Ity_V128);
+   IRTemp d0    = newTemp(Ity_I64);
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getXMMReg(rE) );
+      DIP("%smovddup %s,%s\n",
+          isAvx ? "v" : "", nameXMMReg(rE), nameXMMReg(rG));
+      delta += 1;
+      assign ( d0, unop(Iop_V128to64, mkexpr(sV)) );
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( d0, loadLE(Ity_I64, mkexpr(addr)) );
+      DIP("%smovddup %s,%s\n",
+          isAvx ? "v" : "", dis_buf, nameXMMReg(rG));
+      delta += alen;
+   }
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( rG, binop(Iop_64HLtoV128,mkexpr(d0),mkexpr(d0)) );
+   return delta;
+}
+
+
+static Long dis_MOVDDUP_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                              Long delta )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp d0    = newTemp(Ity_I64);
+   IRTemp d1    = newTemp(Ity_I64);
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      DIP("vmovddup %s,%s\n", nameYMMReg(rE), nameYMMReg(rG));
+      delta += 1;
+      assign ( d0, getYMMRegLane64(rE, 0) );
+      assign ( d1, getYMMRegLane64(rE, 2) );
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( d0, loadLE(Ity_I64, mkexpr(addr)) );
+      assign( d1, loadLE(Ity_I64, binop(Iop_Add64,
+                                        mkexpr(addr), mkU64(16))) );
+      DIP("vmovddup %s,%s\n", dis_buf, nameYMMReg(rG));
+      delta += alen;
+   }
+   putYMMRegLane64( rG, 0, mkexpr(d0) );
+   putYMMRegLane64( rG, 1, mkexpr(d0) );
+   putYMMRegLane64( rG, 2, mkexpr(d1) );
+   putYMMRegLane64( rG, 3, mkexpr(d1) );
+   return delta;
+}
+
+
+static Long dis_MOVSxDUP_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx, Bool isL )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp sV    = newTemp(Ity_V128);
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp s3, s2, s1, s0;
+   s3 = s2 = s1 = s0 = IRTemp_INVALID;
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getXMMReg(rE) );
+      DIP("%smovs%cdup %s,%s\n",
+          isAvx ? "v" : "", isL ? 'l' : 'h', nameXMMReg(rE), nameXMMReg(rG));
+      delta += 1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      if (!isAvx)
+         gen_SEGV_if_not_16_aligned( addr );
+      assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+      DIP("%smovs%cdup %s,%s\n",
+          isAvx ? "v" : "", isL ? 'l' : 'h', dis_buf, nameXMMReg(rG));
+      delta += alen;
+   }
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( rG, isL ? mkV128from32s( s2, s2, s0, s0 )
+                : mkV128from32s( s3, s3, s1, s1 ) );
+   return delta;
+}
+
+
+static Long dis_MOVSxDUP_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isL )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   IRTemp sV    = newTemp(Ity_V256);
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp s7, s6, s5, s4, s3, s2, s1, s0;
+   s7 = s6 = s5 = s4 = s3 = s2 = s1 = s0 = IRTemp_INVALID;
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getYMMReg(rE) );
+      DIP("vmovs%cdup %s,%s\n",
+          isL ? 'l' : 'h', nameYMMReg(rE), nameYMMReg(rG));
+      delta += 1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+      DIP("vmovs%cdup %s,%s\n",
+          isL ? 'l' : 'h', dis_buf, nameYMMReg(rG));
+      delta += alen;
+   }
+   breakupV256to32s( sV, &s7, &s6, &s5, &s4, &s3, &s2, &s1, &s0 );
+   putYMMRegLane128( rG, 1, isL ? mkV128from32s( s6, s6, s4, s4 )
+                                : mkV128from32s( s7, s7, s5, s5 ) );
+   putYMMRegLane128( rG, 0, isL ? mkV128from32s( s2, s2, s0, s0 )
+                                : mkV128from32s( s3, s3, s1, s1 ) );
+   return delta;
+}
+
+
+static IRTemp math_HADDPS_128 ( IRTemp dV, IRTemp sV, Bool isAdd )
+{
+   IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+   IRTemp leftV  = newTemp(Ity_V128);
+   IRTemp rightV = newTemp(Ity_V128);
+   IRTemp rm     = newTemp(Ity_I32);
+   s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+   breakupV128to32s( dV, &d3, &d2, &d1, &d0 );
+
+   assign( leftV,  mkV128from32s( s2, s0, d2, d0 ) );
+   assign( rightV, mkV128from32s( s3, s1, d3, d1 ) );
+
+   IRTemp res = newTemp(Ity_V128);
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( res, triop(isAdd ? Iop_Add32Fx4 : Iop_Sub32Fx4,
+                      mkexpr(rm), mkexpr(leftV), mkexpr(rightV) ) );
+   return res;
+}
+
+
+static IRTemp math_HADDPD_128 ( IRTemp dV, IRTemp sV, Bool isAdd )
+{
+   IRTemp s1, s0, d1, d0;
+   IRTemp leftV  = newTemp(Ity_V128);
+   IRTemp rightV = newTemp(Ity_V128);
+   IRTemp rm     = newTemp(Ity_I32);
+   s1 = s0 = d1 = d0 = IRTemp_INVALID;
+
+   breakupV128to64s( sV, &s1, &s0 );
+   breakupV128to64s( dV, &d1, &d0 );
+   
+   assign( leftV,  binop(Iop_64HLtoV128, mkexpr(s0), mkexpr(d0)) );
+   assign( rightV, binop(Iop_64HLtoV128, mkexpr(s1), mkexpr(d1)) );
+
+   IRTemp res = newTemp(Ity_V128);
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( res, triop(isAdd ? Iop_Add64Fx2 : Iop_Sub64Fx2,
+                      mkexpr(rm), mkexpr(leftV), mkexpr(rightV) ) );
+   return res;
+}
+
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F__SSE3 ( Bool* decode_OK,
+                        const VexAbiInfo* vbi,
+                        Prefix pfx, Int sz, Long deltaIN )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   *decode_OK = False;
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0x12:
+      /* F3 0F 12 = MOVSLDUP -- move from E (mem or xmm) to G (xmm),
+         duplicating some lanes (2:2:0:0). */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_MOVSxDUP_128( vbi, pfx, delta, False/*!isAvx*/,
+                                   True/*isL*/ );
+         goto decode_success;
+      }
+      /* F2 0F 12 = MOVDDUP -- move from E (mem or xmm) to G (xmm),
+         duplicating some lanes (0:1:0:1). */
+      if (haveF2no66noF3(pfx) 
+          && (sz == 4 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_MOVDDUP_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x16:
+      /* F3 0F 16 = MOVSHDUP -- move from E (mem or xmm) to G (xmm),
+         duplicating some lanes (3:3:1:1). */
+      if (haveF3no66noF2(pfx) && sz == 4) {
+         delta = dis_MOVSxDUP_128( vbi, pfx, delta, False/*!isAvx*/,
+                                   False/*!isL*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x7C:
+   case 0x7D:
+      /* F2 0F 7C = HADDPS -- 32x4 add across from E (mem or xmm) to G (xmm). */
+      /* F2 0F 7D = HSUBPS -- 32x4 sub across from E (mem or xmm) to G (xmm). */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         IRTemp eV     = newTemp(Ity_V128);
+         IRTemp gV     = newTemp(Ity_V128);
+         Bool   isAdd  = opc == 0x7C;
+         const HChar* str = isAdd ? "add" : "sub";
+         modrm         = getUChar(delta);
+         UInt   rG     = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            DIP("h%sps %s,%s\n", str, nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("h%sps %s,%s\n", str, dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+
+         assign( gV, getXMMReg(rG) );
+         putXMMReg( rG, mkexpr( math_HADDPS_128 ( gV, eV, isAdd ) ) );
+         goto decode_success;
+      }
+      /* 66 0F 7C = HADDPD -- 64x2 add across from E (mem or xmm) to G (xmm). */
+      /* 66 0F 7D = HSUBPD -- 64x2 sub across from E (mem or xmm) to G (xmm). */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp eV     = newTemp(Ity_V128);
+         IRTemp gV     = newTemp(Ity_V128);
+         Bool   isAdd  = opc == 0x7C;
+         const HChar* str = isAdd ? "add" : "sub";
+         modrm         = getUChar(delta);
+         UInt   rG     = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            DIP("h%spd %s,%s\n", str, nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("h%spd %s,%s\n", str, dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+
+         assign( gV, getXMMReg(rG) );
+         putXMMReg( rG, mkexpr( math_HADDPD_128 ( gV, eV, isAdd ) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD0:
+      /* 66 0F D0 = ADDSUBPD -- 64x4 +/- from E (mem or xmm) to G (xmm). */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp eV   = newTemp(Ity_V128);
+         IRTemp gV   = newTemp(Ity_V128);
+         modrm       = getUChar(delta);
+         UInt   rG   = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            DIP("addsubpd %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("addsubpd %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+
+         assign( gV, getXMMReg(rG) );
+         putXMMReg( rG, mkexpr( math_ADDSUBPD_128 ( gV, eV ) ) );
+         goto decode_success;
+      }
+      /* F2 0F D0 = ADDSUBPS -- 32x4 +/-/+/- from E (mem or xmm) to G (xmm). */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         IRTemp eV   = newTemp(Ity_V128);
+         IRTemp gV   = newTemp(Ity_V128);
+         modrm       = getUChar(delta);
+         UInt   rG   = gregOfRexRM(pfx,modrm);
+
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            DIP("addsubps %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("addsubps %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+
+         assign( gV, getXMMReg(rG) );
+         putXMMReg( rG, mkexpr( math_ADDSUBPS_128 ( gV, eV ) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF0:
+      /* F2 0F F0 = LDDQU -- move from E (mem or xmm) to G (xmm). */
+      if (haveF2no66noF3(pfx) && sz == 4) {
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            goto decode_failure;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMReg( gregOfRexRM(pfx,modrm), 
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("lddqu %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   default:
+      goto decode_failure;
+
+   }
+
+  decode_failure:
+   *decode_OK = False;
+   return deltaIN;
+
+  decode_success:
+   *decode_OK = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level SSSE3: dis_ESC_0F38__SupSSE3               ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static
+IRTemp math_PSHUFB_XMM ( IRTemp dV/*data to perm*/, IRTemp sV/*perm*/ )
+{
+   IRTemp sHi        = newTemp(Ity_I64);
+   IRTemp sLo        = newTemp(Ity_I64);
+   IRTemp dHi        = newTemp(Ity_I64);
+   IRTemp dLo        = newTemp(Ity_I64);
+   IRTemp rHi        = newTemp(Ity_I64);
+   IRTemp rLo        = newTemp(Ity_I64);
+   IRTemp sevens     = newTemp(Ity_I64);
+   IRTemp mask0x80hi = newTemp(Ity_I64);
+   IRTemp mask0x80lo = newTemp(Ity_I64);
+   IRTemp maskBit3hi = newTemp(Ity_I64);
+   IRTemp maskBit3lo = newTemp(Ity_I64);
+   IRTemp sAnd7hi    = newTemp(Ity_I64);
+   IRTemp sAnd7lo    = newTemp(Ity_I64);
+   IRTemp permdHi    = newTemp(Ity_I64);
+   IRTemp permdLo    = newTemp(Ity_I64);
+   IRTemp res        = newTemp(Ity_V128);
+
+   assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+   assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+   assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+   assign( sevens, mkU64(0x0707070707070707ULL) );
+
+   /* mask0x80hi = Not(SarN8x8(sHi,7))
+      maskBit3hi = SarN8x8(ShlN8x8(sHi,4),7)
+      sAnd7hi    = And(sHi,sevens)
+      permdHi    = Or( And(Perm8x8(dHi,sAnd7hi),maskBit3hi),
+      And(Perm8x8(dLo,sAnd7hi),Not(maskBit3hi)) )
+      rHi        = And(permdHi,mask0x80hi)
+   */
+   assign(
+      mask0x80hi,
+      unop(Iop_Not64, binop(Iop_SarN8x8,mkexpr(sHi),mkU8(7))));
+
+   assign(
+      maskBit3hi,
+      binop(Iop_SarN8x8,
+            binop(Iop_ShlN8x8,mkexpr(sHi),mkU8(4)),
+            mkU8(7)));
+
+   assign(sAnd7hi, binop(Iop_And64,mkexpr(sHi),mkexpr(sevens)));
+
+   assign(
+      permdHi,
+      binop(
+         Iop_Or64,
+         binop(Iop_And64,
+               binop(Iop_Perm8x8,mkexpr(dHi),mkexpr(sAnd7hi)),
+               mkexpr(maskBit3hi)),
+         binop(Iop_And64,
+               binop(Iop_Perm8x8,mkexpr(dLo),mkexpr(sAnd7hi)),
+               unop(Iop_Not64,mkexpr(maskBit3hi))) ));
+
+   assign(rHi, binop(Iop_And64,mkexpr(permdHi),mkexpr(mask0x80hi)) );
+
+   /* And the same for the lower half of the result.  What fun. */
+
+   assign(
+      mask0x80lo,
+      unop(Iop_Not64, binop(Iop_SarN8x8,mkexpr(sLo),mkU8(7))));
+
+   assign(
+      maskBit3lo,
+      binop(Iop_SarN8x8,
+            binop(Iop_ShlN8x8,mkexpr(sLo),mkU8(4)),
+            mkU8(7)));
+
+   assign(sAnd7lo, binop(Iop_And64,mkexpr(sLo),mkexpr(sevens)));
+
+   assign(
+      permdLo,
+      binop(
+         Iop_Or64,
+         binop(Iop_And64,
+               binop(Iop_Perm8x8,mkexpr(dHi),mkexpr(sAnd7lo)),
+               mkexpr(maskBit3lo)),
+         binop(Iop_And64,
+               binop(Iop_Perm8x8,mkexpr(dLo),mkexpr(sAnd7lo)),
+               unop(Iop_Not64,mkexpr(maskBit3lo))) ));
+
+   assign(rLo, binop(Iop_And64,mkexpr(permdLo),mkexpr(mask0x80lo)) );
+
+   assign(res, binop(Iop_64HLtoV128, mkexpr(rHi), mkexpr(rLo)));
+   return res;
+}
+
+
+static
+IRTemp math_PSHUFB_YMM ( IRTemp dV/*data to perm*/, IRTemp sV/*perm*/ )
+{
+   IRTemp sHi, sLo, dHi, dLo;
+   sHi = sLo = dHi = dLo = IRTemp_INVALID;
+   breakupV256toV128s( dV, &dHi, &dLo);
+   breakupV256toV128s( sV, &sHi, &sLo);
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256,
+                     mkexpr(math_PSHUFB_XMM(dHi, sHi)),
+                     mkexpr(math_PSHUFB_XMM(dLo, sLo))));
+   return res;
+}
+
+
+static Long dis_PHADD_128 ( const VexAbiInfo* vbi, Prefix pfx, Long delta,
+                            Bool isAvx, UChar opc )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   const HChar* str = "???";
+   IROp   opV64  = Iop_INVALID;
+   IROp   opCatO = Iop_CatOddLanes16x4;
+   IROp   opCatE = Iop_CatEvenLanes16x4;
+   IRTemp sV     = newTemp(Ity_V128);
+   IRTemp dV     = newTemp(Ity_V128);
+   IRTemp sHi    = newTemp(Ity_I64);
+   IRTemp sLo    = newTemp(Ity_I64);
+   IRTemp dHi    = newTemp(Ity_I64);
+   IRTemp dLo    = newTemp(Ity_I64);
+   UChar  modrm  = getUChar(delta);
+   UInt   rG     = gregOfRexRM(pfx,modrm);
+   UInt   rV     = isAvx ? getVexNvvvv(pfx) : rG;
+
+   switch (opc) {
+      case 0x01: opV64 = Iop_Add16x4;   str = "addw";  break;
+      case 0x02: opV64 = Iop_Add32x2;   str = "addd";  break;
+      case 0x03: opV64 = Iop_QAdd16Sx4; str = "addsw"; break;
+      case 0x05: opV64 = Iop_Sub16x4;   str = "subw";  break;
+      case 0x06: opV64 = Iop_Sub32x2;   str = "subd";  break;
+      case 0x07: opV64 = Iop_QSub16Sx4; str = "subsw"; break;
+      default: vassert(0);
+   }
+   if (opc == 0x02 || opc == 0x06) {
+      opCatO = Iop_InterleaveHI32x2;
+      opCatE = Iop_InterleaveLO32x2;
+   }
+
+   assign( dV, getXMMReg(rV) );
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getXMMReg(rE) );
+      DIP("%sph%s %s,%s\n", isAvx ? "v" : "", str,
+          nameXMMReg(rE), nameXMMReg(rG));
+      delta += 1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      if (!isAvx)
+         gen_SEGV_if_not_16_aligned( addr );
+      assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+      DIP("%sph%s %s,%s\n", isAvx ? "v" : "", str,
+          dis_buf, nameXMMReg(rG));
+      delta += alen;
+   }
+
+   assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+   assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+   assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+   /* This isn't a particularly efficient way to compute the
+      result, but at least it avoids a proliferation of IROps,
+      hence avoids complication all the backends. */
+   
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( rG, 
+        binop(Iop_64HLtoV128,
+              binop(opV64,
+                    binop(opCatE,mkexpr(sHi),mkexpr(sLo)),
+                    binop(opCatO,mkexpr(sHi),mkexpr(sLo)) ),
+              binop(opV64,
+                    binop(opCatE,mkexpr(dHi),mkexpr(dLo)),
+                    binop(opCatO,mkexpr(dHi),mkexpr(dLo)) ) ) );
+   return delta;
+}
+
+
+static Long dis_PHADD_256 ( const VexAbiInfo* vbi, Prefix pfx, Long delta,
+                            UChar opc )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   const HChar* str = "???";
+   IROp   opV64  = Iop_INVALID;
+   IROp   opCatO = Iop_CatOddLanes16x4;
+   IROp   opCatE = Iop_CatEvenLanes16x4;
+   IRTemp sV     = newTemp(Ity_V256);
+   IRTemp dV     = newTemp(Ity_V256);
+   IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+   s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+   UChar  modrm  = getUChar(delta);
+   UInt   rG     = gregOfRexRM(pfx,modrm);
+   UInt   rV     = getVexNvvvv(pfx);
+
+   switch (opc) {
+      case 0x01: opV64 = Iop_Add16x4;   str = "addw";  break;
+      case 0x02: opV64 = Iop_Add32x2;   str = "addd";  break;
+      case 0x03: opV64 = Iop_QAdd16Sx4; str = "addsw"; break;
+      case 0x05: opV64 = Iop_Sub16x4;   str = "subw";  break;
+      case 0x06: opV64 = Iop_Sub32x2;   str = "subd";  break;
+      case 0x07: opV64 = Iop_QSub16Sx4; str = "subsw"; break;
+      default: vassert(0);
+   }
+   if (opc == 0x02 || opc == 0x06) {
+      opCatO = Iop_InterleaveHI32x2;
+      opCatE = Iop_InterleaveLO32x2;
+   }
+
+   assign( dV, getYMMReg(rV) );
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getYMMReg(rE) );
+      DIP("vph%s %s,%s\n", str, nameYMMReg(rE), nameYMMReg(rG));
+      delta += 1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+      DIP("vph%s %s,%s\n", str, dis_buf, nameYMMReg(rG));
+      delta += alen;
+   }
+
+   breakupV256to64s( dV, &d3, &d2, &d1, &d0 );
+   breakupV256to64s( sV, &s3, &s2, &s1, &s0 );
+
+   /* This isn't a particularly efficient way to compute the
+      result, but at least it avoids a proliferation of IROps,
+      hence avoids complication all the backends. */
+
+   putYMMReg( rG,
+              binop(Iop_V128HLtoV256,
+                    binop(Iop_64HLtoV128,
+                          binop(opV64,
+                                binop(opCatE,mkexpr(s3),mkexpr(s2)),
+                                binop(opCatO,mkexpr(s3),mkexpr(s2)) ),
+                          binop(opV64,
+                                binop(opCatE,mkexpr(d3),mkexpr(d2)),
+                                binop(opCatO,mkexpr(d3),mkexpr(d2)) ) ),
+                    binop(Iop_64HLtoV128,
+                          binop(opV64,
+                                binop(opCatE,mkexpr(s1),mkexpr(s0)),
+                                binop(opCatO,mkexpr(s1),mkexpr(s0)) ),
+                          binop(opV64,
+                                binop(opCatE,mkexpr(d1),mkexpr(d0)),
+                                binop(opCatO,mkexpr(d1),mkexpr(d0)) ) ) ) );
+   return delta;
+}
+
+
+static IRTemp math_PMADDUBSW_128 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp sVoddsSX  = newTemp(Ity_V128);
+   IRTemp sVevensSX = newTemp(Ity_V128);
+   IRTemp dVoddsZX  = newTemp(Ity_V128);
+   IRTemp dVevensZX = newTemp(Ity_V128);
+   /* compute dV unsigned x sV signed */
+   assign( sVoddsSX, binop(Iop_SarN16x8, mkexpr(sV), mkU8(8)) );
+   assign( sVevensSX, binop(Iop_SarN16x8, 
+                            binop(Iop_ShlN16x8, mkexpr(sV), mkU8(8)),
+                            mkU8(8)) );
+   assign( dVoddsZX, binop(Iop_ShrN16x8, mkexpr(dV), mkU8(8)) );
+   assign( dVevensZX, binop(Iop_ShrN16x8,
+                            binop(Iop_ShlN16x8, mkexpr(dV), mkU8(8)),
+                            mkU8(8)) );
+
+   IRTemp res = newTemp(Ity_V128);
+   assign( res, binop(Iop_QAdd16Sx8,
+                      binop(Iop_Mul16x8, mkexpr(sVoddsSX), mkexpr(dVoddsZX)),
+                      binop(Iop_Mul16x8, mkexpr(sVevensSX), mkexpr(dVevensZX))
+                     )
+         );
+   return res;
+}
+
+
+static
+IRTemp math_PMADDUBSW_256 ( IRTemp dV, IRTemp sV )
+{
+   IRTemp sHi, sLo, dHi, dLo;
+   sHi = sLo = dHi = dLo = IRTemp_INVALID;
+   breakupV256toV128s( dV, &dHi, &dLo);
+   breakupV256toV128s( sV, &sHi, &sLo);
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256,
+                     mkexpr(math_PMADDUBSW_128(dHi, sHi)),
+                     mkexpr(math_PMADDUBSW_128(dLo, sLo))));
+   return res;
+}
+
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F38__SupSSE3 ( Bool* decode_OK,
+                             const VexAbiInfo* vbi,
+                             Prefix pfx, Int sz, Long deltaIN )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   *decode_OK = False;
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0x00:
+      /* 66 0F 38 00 = PSHUFB -- Packed Shuffle Bytes 8x16 (XMM) */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /*redundant REX.W*/ sz == 8)) {
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+
+         modrm = getUChar(delta);
+         assign( dV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
+            delta += 1;
+            DIP("pshufb %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("pshufb %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         IRTemp res = math_PSHUFB_XMM( dV, sV );
+         putXMMReg(gregOfRexRM(pfx,modrm), mkexpr(res));
+         goto decode_success;
+      }
+      /* 0F 38 00 = PSHUFB -- Packed Shuffle Bytes 8x8 (MMX) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp sV      = newTemp(Ity_I64);
+         IRTemp dV      = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+         assign( dV, getMMXReg(gregLO3ofRM(modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("pshufb %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                  nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("pshufb %s,%s\n", dis_buf,
+                                  nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         putMMXReg(
+            gregLO3ofRM(modrm),
+            binop(
+               Iop_And64,
+               /* permute the lanes */
+               binop(
+                  Iop_Perm8x8,
+                  mkexpr(dV),
+                  binop(Iop_And64, mkexpr(sV), mkU64(0x0707070707070707ULL))
+               ),
+               /* mask off lanes which have (index & 0x80) == 0x80 */
+               unop(Iop_Not64, binop(Iop_SarN8x8, mkexpr(sV), mkU8(7)))
+            )
+         );
+         goto decode_success;
+      }
+      break;
+
+   case 0x01:
+   case 0x02:
+   case 0x03:
+   case 0x05:
+   case 0x06:
+   case 0x07:
+      /* 66 0F 38 01 = PHADDW -- 16x8 add across from E (mem or xmm) and
+         G to G (xmm). */
+      /* 66 0F 38 02 = PHADDD -- 32x4 add across from E (mem or xmm) and
+         G to G (xmm). */
+      /* 66 0F 38 03 = PHADDSW -- 16x8 signed qadd across from E (mem or
+         xmm) and G to G (xmm). */
+      /* 66 0F 38 05 = PHSUBW -- 16x8 sub across from E (mem or xmm) and
+         G to G (xmm). */
+      /* 66 0F 38 06 = PHSUBD -- 32x4 sub across from E (mem or xmm) and
+         G to G (xmm). */
+      /* 66 0F 38 07 = PHSUBSW -- 16x8 signed qsub across from E (mem or
+         xmm) and G to G (xmm). */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /*redundant REX.W*/ sz == 8)) {
+         delta = dis_PHADD_128( vbi, pfx, delta, False/*isAvx*/, opc );
+         goto decode_success;
+      }
+      /* ***--- these are MMX class insns introduced in SSSE3 ---*** */
+      /* 0F 38 01 = PHADDW -- 16x4 add across from E (mem or mmx) and G
+         to G (mmx). */
+      /* 0F 38 02 = PHADDD -- 32x2 add across from E (mem or mmx) and G
+         to G (mmx). */
+      /* 0F 38 03 = PHADDSW -- 16x4 signed qadd across from E (mem or
+         mmx) and G to G (mmx). */
+      /* 0F 38 05 = PHSUBW -- 16x4 sub across from E (mem or mmx) and G
+         to G (mmx). */
+      /* 0F 38 06 = PHSUBD -- 32x2 sub across from E (mem or mmx) and G
+         to G (mmx). */
+      /* 0F 38 07 = PHSUBSW -- 16x4 signed qsub across from E (mem or
+         mmx) and G to G (mmx). */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         const HChar* str = "???";
+         IROp   opV64  = Iop_INVALID;
+         IROp   opCatO = Iop_CatOddLanes16x4;
+         IROp   opCatE = Iop_CatEvenLanes16x4;
+         IRTemp sV     = newTemp(Ity_I64);
+         IRTemp dV     = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+
+         switch (opc) {
+            case 0x01: opV64 = Iop_Add16x4;   str = "addw";  break;
+            case 0x02: opV64 = Iop_Add32x2;   str = "addd";  break;
+            case 0x03: opV64 = Iop_QAdd16Sx4; str = "addsw"; break;
+            case 0x05: opV64 = Iop_Sub16x4;   str = "subw";  break;
+            case 0x06: opV64 = Iop_Sub32x2;   str = "subd";  break;
+            case 0x07: opV64 = Iop_QSub16Sx4; str = "subsw"; break;
+            default: vassert(0);
+         }
+         if (opc == 0x02 || opc == 0x06) {
+            opCatO = Iop_InterleaveHI32x2;
+            opCatE = Iop_InterleaveLO32x2;
+         }
+
+         do_MMX_preamble();
+         assign( dV, getMMXReg(gregLO3ofRM(modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("ph%s %s,%s\n", str, nameMMXReg(eregLO3ofRM(modrm)),
+                                     nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("ph%s %s,%s\n", str, dis_buf,
+                                     nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         putMMXReg(
+            gregLO3ofRM(modrm),
+            binop(opV64,
+                  binop(opCatE,mkexpr(sV),mkexpr(dV)),
+                  binop(opCatO,mkexpr(sV),mkexpr(dV))
+            )
+         );
+         goto decode_success;
+      }
+      break;
+
+   case 0x04:
+      /* 66 0F 38 04 = PMADDUBSW -- Multiply and Add Packed Signed and
+         Unsigned Bytes (XMM) */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /*redundant REX.W*/ sz == 8)) {
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+         modrm     = getUChar(delta);
+         UInt   rG = gregOfRexRM(pfx,modrm);
+
+         assign( dV, getXMMReg(rG) );
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("pmaddubsw %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmaddubsw %s,%s\n", dis_buf, nameXMMReg(rG));
+         }
+
+         putXMMReg( rG, mkexpr( math_PMADDUBSW_128( dV, sV ) ) );
+         goto decode_success;
+      }
+      /* 0F 38 04 = PMADDUBSW -- Multiply and Add Packed Signed and
+         Unsigned Bytes (MMX) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp sV        = newTemp(Ity_I64);
+         IRTemp dV        = newTemp(Ity_I64);
+         IRTemp sVoddsSX  = newTemp(Ity_I64);
+         IRTemp sVevensSX = newTemp(Ity_I64);
+         IRTemp dVoddsZX  = newTemp(Ity_I64);
+         IRTemp dVevensZX = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+         assign( dV, getMMXReg(gregLO3ofRM(modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("pmaddubsw %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                     nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmaddubsw %s,%s\n", dis_buf,
+                                     nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         /* compute dV unsigned x sV signed */
+         assign( sVoddsSX,
+                 binop(Iop_SarN16x4, mkexpr(sV), mkU8(8)) );
+         assign( sVevensSX,
+                 binop(Iop_SarN16x4, 
+                       binop(Iop_ShlN16x4, mkexpr(sV), mkU8(8)), 
+                       mkU8(8)) );
+         assign( dVoddsZX,
+                 binop(Iop_ShrN16x4, mkexpr(dV), mkU8(8)) );
+         assign( dVevensZX,
+                 binop(Iop_ShrN16x4,
+                       binop(Iop_ShlN16x4, mkexpr(dV), mkU8(8)),
+                       mkU8(8)) );
+
+         putMMXReg(
+            gregLO3ofRM(modrm),
+            binop(Iop_QAdd16Sx4,
+                  binop(Iop_Mul16x4, mkexpr(sVoddsSX), mkexpr(dVoddsZX)),
+                  binop(Iop_Mul16x4, mkexpr(sVevensSX), mkexpr(dVevensZX))
+            )
+         );
+         goto decode_success;
+      }
+      break;
+
+   case 0x08:
+   case 0x09:
+   case 0x0A:
+      /* 66 0F 38 08 = PSIGNB -- Packed Sign 8x16 (XMM) */
+      /* 66 0F 38 09 = PSIGNW -- Packed Sign 16x8 (XMM) */
+      /* 66 0F 38 0A = PSIGND -- Packed Sign 32x4 (XMM) */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /*redundant REX.W*/ sz == 8)) {
+         IRTemp sV      = newTemp(Ity_V128);
+         IRTemp dV      = newTemp(Ity_V128);
+         IRTemp sHi     = newTemp(Ity_I64);
+         IRTemp sLo     = newTemp(Ity_I64);
+         IRTemp dHi     = newTemp(Ity_I64);
+         IRTemp dLo     = newTemp(Ity_I64);
+         const HChar* str = "???";
+         Int    laneszB = 0;
+
+         switch (opc) {
+            case 0x08: laneszB = 1; str = "b"; break;
+            case 0x09: laneszB = 2; str = "w"; break;
+            case 0x0A: laneszB = 4; str = "d"; break;
+            default: vassert(0);
+         }
+
+         modrm = getUChar(delta);
+         assign( dV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
+            delta += 1;
+            DIP("psign%s %s,%s\n", str, nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("psign%s %s,%s\n", str, dis_buf,
+                                        nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+         assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+         assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+         assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+         putXMMReg(
+            gregOfRexRM(pfx,modrm),
+            binop(Iop_64HLtoV128,
+                  dis_PSIGN_helper( mkexpr(sHi), mkexpr(dHi), laneszB ),
+                  dis_PSIGN_helper( mkexpr(sLo), mkexpr(dLo), laneszB )
+            )
+         );
+         goto decode_success;
+      }
+      /* 0F 38 08 = PSIGNB -- Packed Sign 8x8  (MMX) */
+      /* 0F 38 09 = PSIGNW -- Packed Sign 16x4 (MMX) */
+      /* 0F 38 0A = PSIGND -- Packed Sign 32x2 (MMX) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp sV      = newTemp(Ity_I64);
+         IRTemp dV      = newTemp(Ity_I64);
+         const HChar* str = "???";
+         Int    laneszB = 0;
+
+         switch (opc) {
+            case 0x08: laneszB = 1; str = "b"; break;
+            case 0x09: laneszB = 2; str = "w"; break;
+            case 0x0A: laneszB = 4; str = "d"; break;
+            default: vassert(0);
+         }
+
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+         assign( dV, getMMXReg(gregLO3ofRM(modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("psign%s %s,%s\n", str, nameMMXReg(eregLO3ofRM(modrm)),
+                                        nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("psign%s %s,%s\n", str, dis_buf,
+                                        nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         putMMXReg(
+            gregLO3ofRM(modrm),
+            dis_PSIGN_helper( mkexpr(sV), mkexpr(dV), laneszB )
+         );
+         goto decode_success;
+      }
+      break;
+
+   case 0x0B:
+      /* 66 0F 38 0B = PMULHRSW -- Packed Multiply High with Round and
+         Scale (XMM) */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /*redundant REX.W*/ sz == 8)) {
+         IRTemp sV  = newTemp(Ity_V128);
+         IRTemp dV  = newTemp(Ity_V128);
+         IRTemp sHi = newTemp(Ity_I64);
+         IRTemp sLo = newTemp(Ity_I64);
+         IRTemp dHi = newTemp(Ity_I64);
+         IRTemp dLo = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+         assign( dV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
+            delta += 1;
+            DIP("pmulhrsw %s,%s\n", nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmulhrsw %s,%s\n", dis_buf,
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+         assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+         assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+         assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+         putXMMReg(
+            gregOfRexRM(pfx,modrm),
+            binop(Iop_64HLtoV128,
+                  dis_PMULHRSW_helper( mkexpr(sHi), mkexpr(dHi) ),
+                  dis_PMULHRSW_helper( mkexpr(sLo), mkexpr(dLo) )
+            )
+         );
+         goto decode_success;
+      }
+      /* 0F 38 0B = PMULHRSW -- Packed Multiply High with Round and Scale
+         (MMX) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp sV = newTemp(Ity_I64);
+         IRTemp dV = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+         assign( dV, getMMXReg(gregLO3ofRM(modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("pmulhrsw %s,%s\n", nameMMXReg(eregLO3ofRM(modrm)),
+                                    nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmulhrsw %s,%s\n", dis_buf,
+                                    nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         putMMXReg(
+            gregLO3ofRM(modrm),
+            dis_PMULHRSW_helper( mkexpr(sV), mkexpr(dV) )
+         );
+         goto decode_success;
+      }
+      break;
+
+   case 0x1C:
+   case 0x1D:
+   case 0x1E:
+      /* 66 0F 38 1C = PABSB -- Packed Absolute Value 8x16 (XMM) */
+      /* 66 0F 38 1D = PABSW -- Packed Absolute Value 16x8 (XMM) */
+      /* 66 0F 38 1E = PABSD -- Packed Absolute Value 32x4 (XMM) */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /*redundant REX.W*/ sz == 8)) {
+         IRTemp sV  = newTemp(Ity_V128);
+         const HChar* str = "???";
+         Int    laneszB = 0;
+
+         switch (opc) {
+            case 0x1C: laneszB = 1; str = "b"; break;
+            case 0x1D: laneszB = 2; str = "w"; break;
+            case 0x1E: laneszB = 4; str = "d"; break;
+            default: vassert(0);
+         }
+
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
+            delta += 1;
+            DIP("pabs%s %s,%s\n", str, nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                       nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("pabs%s %s,%s\n", str, dis_buf,
+                                       nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         putXMMReg( gregOfRexRM(pfx,modrm),
+                    mkexpr(math_PABS_XMM(sV, laneszB)) );
+         goto decode_success;
+      }
+      /* 0F 38 1C = PABSB -- Packed Absolute Value 8x8  (MMX) */
+      /* 0F 38 1D = PABSW -- Packed Absolute Value 16x4 (MMX) */
+      /* 0F 38 1E = PABSD -- Packed Absolute Value 32x2 (MMX) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp sV      = newTemp(Ity_I64);
+         const HChar* str = "???";
+         Int    laneszB = 0;
+
+         switch (opc) {
+            case 0x1C: laneszB = 1; str = "b"; break;
+            case 0x1D: laneszB = 2; str = "w"; break;
+            case 0x1E: laneszB = 4; str = "d"; break;
+            default: vassert(0);
+         }
+
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            delta += 1;
+            DIP("pabs%s %s,%s\n", str, nameMMXReg(eregLO3ofRM(modrm)),
+                                       nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("pabs%s %s,%s\n", str, dis_buf,
+                                       nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         putMMXReg( gregLO3ofRM(modrm),
+                    mkexpr(math_PABS_MMX( sV, laneszB )) );
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  //decode_failure:
+   *decode_OK = False;
+   return deltaIN;
+
+  decode_success:
+   *decode_OK = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level SSSE3: dis_ESC_0F3A__SupSSE3               ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F3A__SupSSE3 ( Bool* decode_OK,
+                             const VexAbiInfo* vbi,
+                             Prefix pfx, Int sz, Long deltaIN )
+{
+   Long   d64   = 0;
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   *decode_OK = False;
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0x0F:
+      /* 66 0F 3A 0F = PALIGNR -- Packed Align Right (XMM) */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /*redundant REX.W*/ sz == 8)) {
+         IRTemp sV  = newTemp(Ity_V128);
+         IRTemp dV  = newTemp(Ity_V128);
+
+         modrm = getUChar(delta);
+         assign( dV, getXMMReg(gregOfRexRM(pfx,modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getXMMReg(eregOfRexRM(pfx,modrm)) );
+            d64 = (Long)getUChar(delta+1);
+            delta += 1+1;
+            DIP("palignr $%d,%s,%s\n", (Int)d64,
+                                       nameXMMReg(eregOfRexRM(pfx,modrm)),
+                                       nameXMMReg(gregOfRexRM(pfx,modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            d64 = (Long)getUChar(delta+alen);
+            delta += alen+1;
+            DIP("palignr $%d,%s,%s\n", (Int)d64,
+                                       dis_buf,
+                                       nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+
+         IRTemp res = math_PALIGNR_XMM( sV, dV, d64 );
+         putXMMReg( gregOfRexRM(pfx,modrm), mkexpr(res) );
+         goto decode_success;
+      }
+      /* 0F 3A 0F = PALIGNR -- Packed Align Right (MMX) */
+      if (haveNo66noF2noF3(pfx) && sz == 4) {
+         IRTemp sV  = newTemp(Ity_I64);
+         IRTemp dV  = newTemp(Ity_I64);
+         IRTemp res = newTemp(Ity_I64);
+
+         modrm = getUChar(delta);
+         do_MMX_preamble();
+         assign( dV, getMMXReg(gregLO3ofRM(modrm)) );
+
+         if (epartIsReg(modrm)) {
+            assign( sV, getMMXReg(eregLO3ofRM(modrm)) );
+            d64 = (Long)getUChar(delta+1);
+            delta += 1+1;
+            DIP("palignr $%d,%s,%s\n",  (Int)d64, 
+                                        nameMMXReg(eregLO3ofRM(modrm)),
+                                        nameMMXReg(gregLO3ofRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+            d64 = (Long)getUChar(delta+alen);
+            delta += alen+1;
+            DIP("palignr $%d%s,%s\n", (Int)d64,
+                                      dis_buf,
+                                      nameMMXReg(gregLO3ofRM(modrm)));
+         }
+
+         if (d64 == 0) {
+            assign( res, mkexpr(sV) );
+         }
+         else if (d64 >= 1 && d64 <= 7) {
+            assign(res, 
+                   binop(Iop_Or64,
+                         binop(Iop_Shr64, mkexpr(sV), mkU8(8*d64)),
+                         binop(Iop_Shl64, mkexpr(dV), mkU8(8*(8-d64))
+                        )));
+         }
+         else if (d64 == 8) {
+           assign( res, mkexpr(dV) );
+         }
+         else if (d64 >= 9 && d64 <= 15) {
+            assign( res, binop(Iop_Shr64, mkexpr(dV), mkU8(8*(d64-8))) );
+         }
+         else if (d64 >= 16 && d64 <= 255) {
+            assign( res, mkU64(0) );
+         }
+         else
+            vassert(0);
+
+         putMMXReg( gregLO3ofRM(modrm), mkexpr(res) );
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  //decode_failure:
+   *decode_OK = False;
+   return deltaIN;
+
+  decode_success:
+   *decode_OK = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level SSE4: dis_ESC_0F__SSE4                     ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F__SSE4 ( Bool* decode_OK,
+                        const VexArchInfo* archinfo,
+                        const VexAbiInfo* vbi,
+                        Prefix pfx, Int sz, Long deltaIN )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   IRType ty    = Ity_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   *decode_OK = False;
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0xB8:
+      /* F3 0F B8  = POPCNT{W,L,Q}
+         Count the number of 1 bits in a register
+      */
+      if (haveF3noF2(pfx) /* so both 66 and REX.W are possibilities */
+          && (sz == 2 || sz == 4 || sz == 8)) {
+         /*IRType*/ ty  = szToITy(sz);
+         IRTemp     src = newTemp(ty);
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            assign(src, getIRegE(sz, pfx, modrm));
+            delta += 1;
+            DIP("popcnt%c %s, %s\n", nameISize(sz), nameIRegE(sz, pfx, modrm),
+                nameIRegG(sz, pfx, modrm));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0);
+            assign(src, loadLE(ty, mkexpr(addr)));
+            delta += alen;
+            DIP("popcnt%c %s, %s\n", nameISize(sz), dis_buf,
+                nameIRegG(sz, pfx, modrm));
+         }
+
+         IRTemp result = gen_POPCOUNT(ty, src);
+         putIRegG(sz, pfx, modrm, mkexpr(result));
+
+         // Update flags.  This is pretty lame .. perhaps can do better
+         // if this turns out to be performance critical.
+         // O S A C P are cleared.  Z is set if SRC == 0.
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+         stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP1,
+               binop(Iop_Shl64,
+                     unop(Iop_1Uto64,
+                          binop(Iop_CmpEQ64,
+                                widenUto64(mkexpr(src)),
+                                mkU64(0))),
+                     mkU8(AMD64G_CC_SHIFT_Z))));
+
+         goto decode_success;
+      }
+      break;
+
+   case 0xBC:
+      /* F3 0F BC -- TZCNT (count trailing zeroes.  A BMI extension,
+         which we can only decode if we're sure this is a BMI1 capable cpu
+         that supports TZCNT, since otherwise it's BSF, which behaves
+         differently on zero source.  */
+      if (haveF3noF2(pfx) /* so both 66 and 48 are possibilities */
+          && (sz == 2 || sz == 4 || sz == 8)
+          && 0 != (archinfo->hwcaps & VEX_HWCAPS_AMD64_BMI)) {
+         /*IRType*/ ty  = szToITy(sz);
+         IRTemp     src = newTemp(ty);
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            assign(src, getIRegE(sz, pfx, modrm));
+            delta += 1;
+            DIP("tzcnt%c %s, %s\n", nameISize(sz), nameIRegE(sz, pfx, modrm),
+                nameIRegG(sz, pfx, modrm));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0);
+            assign(src, loadLE(ty, mkexpr(addr)));
+            delta += alen;
+            DIP("tzcnt%c %s, %s\n", nameISize(sz), dis_buf,
+                nameIRegG(sz, pfx, modrm));
+         }
+
+         IRTemp res = gen_TZCNT(ty, src);
+         putIRegG(sz, pfx, modrm, mkexpr(res));
+
+         // Update flags.  This is pretty lame .. perhaps can do better
+         // if this turns out to be performance critical.
+         // O S A P are cleared.  Z is set if RESULT == 0.
+         // C is set if SRC is zero.
+         IRTemp src64 = newTemp(Ity_I64);
+         IRTemp res64 = newTemp(Ity_I64);
+         assign(src64, widenUto64(mkexpr(src)));
+         assign(res64, widenUto64(mkexpr(res)));
+
+         IRTemp oszacp = newTemp(Ity_I64);
+         assign(
+            oszacp,
+            binop(Iop_Or64,
+                  binop(Iop_Shl64,
+                        unop(Iop_1Uto64,
+                             binop(Iop_CmpEQ64, mkexpr(res64), mkU64(0))),
+                        mkU8(AMD64G_CC_SHIFT_Z)),
+                  binop(Iop_Shl64,
+                        unop(Iop_1Uto64,
+                             binop(Iop_CmpEQ64, mkexpr(src64), mkU64(0))),
+                        mkU8(AMD64G_CC_SHIFT_C))
+            )
+         );
+
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+         stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(oszacp) ));
+
+         goto decode_success;
+      }
+      break;
+
+   case 0xBD:
+      /* F3 0F BD -- LZCNT (count leading zeroes.  An AMD extension,
+         which we can only decode if we're sure this is an AMD cpu
+         that supports LZCNT, since otherwise it's BSR, which behaves
+         differently.  Bizarrely, my Sandy Bridge also accepts these
+         instructions but produces different results. */
+      if (haveF3noF2(pfx) /* so both 66 and 48 are possibilities */
+          && (sz == 2 || sz == 4 || sz == 8) 
+          && 0 != (archinfo->hwcaps & VEX_HWCAPS_AMD64_LZCNT)) {
+         /*IRType*/ ty  = szToITy(sz);
+         IRTemp     src = newTemp(ty);
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            assign(src, getIRegE(sz, pfx, modrm));
+            delta += 1;
+            DIP("lzcnt%c %s, %s\n", nameISize(sz), nameIRegE(sz, pfx, modrm),
+                nameIRegG(sz, pfx, modrm));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0);
+            assign(src, loadLE(ty, mkexpr(addr)));
+            delta += alen;
+            DIP("lzcnt%c %s, %s\n", nameISize(sz), dis_buf,
+                nameIRegG(sz, pfx, modrm));
+         }
+
+         IRTemp res = gen_LZCNT(ty, src);
+         putIRegG(sz, pfx, modrm, mkexpr(res));
+
+         // Update flags.  This is pretty lame .. perhaps can do better
+         // if this turns out to be performance critical.
+         // O S A P are cleared.  Z is set if RESULT == 0.
+         // C is set if SRC is zero.
+         IRTemp src64 = newTemp(Ity_I64);
+         IRTemp res64 = newTemp(Ity_I64);
+         assign(src64, widenUto64(mkexpr(src)));
+         assign(res64, widenUto64(mkexpr(res)));
+
+         IRTemp oszacp = newTemp(Ity_I64);
+         assign(
+            oszacp,
+            binop(Iop_Or64,
+                  binop(Iop_Shl64,
+                        unop(Iop_1Uto64,
+                             binop(Iop_CmpEQ64, mkexpr(res64), mkU64(0))),
+                        mkU8(AMD64G_CC_SHIFT_Z)),
+                  binop(Iop_Shl64,
+                        unop(Iop_1Uto64,
+                             binop(Iop_CmpEQ64, mkexpr(src64), mkU64(0))),
+                        mkU8(AMD64G_CC_SHIFT_C))
+            )
+         );
+
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+         stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(oszacp) ));
+
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  //decode_failure:
+   *decode_OK = False;
+   return deltaIN;
+
+  decode_success:
+   *decode_OK = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level SSE4: dis_ESC_0F38__SSE4                   ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static IRTemp math_PBLENDVB_128 ( IRTemp vecE, IRTemp vecG,
+                                  IRTemp vec0/*controlling mask*/,
+                                  UInt gran, IROp opSAR )
+{
+   /* The tricky bit is to convert vec0 into a suitable mask, by
+      copying the most significant bit of each lane into all positions
+      in the lane. */
+   IRTemp sh = newTemp(Ity_I8);
+   assign(sh, mkU8(8 * gran - 1));
+
+   IRTemp mask = newTemp(Ity_V128);
+   assign(mask, binop(opSAR, mkexpr(vec0), mkexpr(sh)));
+
+   IRTemp notmask = newTemp(Ity_V128);
+   assign(notmask, unop(Iop_NotV128, mkexpr(mask)));
+
+   IRTemp res = newTemp(Ity_V128);
+   assign(res,  binop(Iop_OrV128,
+                      binop(Iop_AndV128, mkexpr(vecE), mkexpr(mask)),
+                      binop(Iop_AndV128, mkexpr(vecG), mkexpr(notmask))));
+   return res;
+}
+
+static IRTemp math_PBLENDVB_256 ( IRTemp vecE, IRTemp vecG,
+                                  IRTemp vec0/*controlling mask*/,
+                                  UInt gran, IROp opSAR128 )
+{
+   /* The tricky bit is to convert vec0 into a suitable mask, by
+      copying the most significant bit of each lane into all positions
+      in the lane. */
+   IRTemp sh = newTemp(Ity_I8);
+   assign(sh, mkU8(8 * gran - 1));
+
+   IRTemp vec0Hi = IRTemp_INVALID;
+   IRTemp vec0Lo = IRTemp_INVALID;
+   breakupV256toV128s( vec0, &vec0Hi, &vec0Lo );
+
+   IRTemp mask = newTemp(Ity_V256);
+   assign(mask, binop(Iop_V128HLtoV256,
+                      binop(opSAR128, mkexpr(vec0Hi), mkexpr(sh)),
+                      binop(opSAR128, mkexpr(vec0Lo), mkexpr(sh))));
+
+   IRTemp notmask = newTemp(Ity_V256);
+   assign(notmask, unop(Iop_NotV256, mkexpr(mask)));
+
+   IRTemp res = newTemp(Ity_V256);
+   assign(res,  binop(Iop_OrV256,
+                      binop(Iop_AndV256, mkexpr(vecE), mkexpr(mask)),
+                      binop(Iop_AndV256, mkexpr(vecG), mkexpr(notmask))));
+   return res;
+}
+
+static Long dis_VBLENDV_128 ( const VexAbiInfo* vbi, Prefix pfx, Long delta,
+                              const HChar *name, UInt gran, IROp opSAR )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   UInt   rV     = getVexNvvvv(pfx);
+   UInt   rIS4   = 0xFF; /* invalid */
+   IRTemp vecE   = newTemp(Ity_V128);
+   IRTemp vecV   = newTemp(Ity_V128);
+   IRTemp vecIS4 = newTemp(Ity_V128);
+   if (epartIsReg(modrm)) {
+      delta++;
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign(vecE, getXMMReg(rE));
+      UChar ib = getUChar(delta);
+      rIS4 = (ib >> 4) & 0xF;
+      DIP("%s %s,%s,%s,%s\n",
+          name, nameXMMReg(rIS4), nameXMMReg(rE),
+          nameXMMReg(rV), nameXMMReg(rG));
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      delta += alen;
+      assign(vecE, loadLE(Ity_V128, mkexpr(addr)));
+      UChar ib = getUChar(delta);
+      rIS4 = (ib >> 4) & 0xF;
+      DIP("%s %s,%s,%s,%s\n",
+          name, nameXMMReg(rIS4), dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+   }
+   delta++;
+   assign(vecV,   getXMMReg(rV));
+   assign(vecIS4, getXMMReg(rIS4));
+   IRTemp res = math_PBLENDVB_128( vecE, vecV, vecIS4, gran, opSAR );
+   putYMMRegLoAndZU( rG, mkexpr(res) );
+   return delta;
+}
+
+static Long dis_VBLENDV_256 ( const VexAbiInfo* vbi, Prefix pfx, Long delta,
+                              const HChar *name, UInt gran, IROp opSAR128 )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   UInt   rV     = getVexNvvvv(pfx);
+   UInt   rIS4   = 0xFF; /* invalid */
+   IRTemp vecE   = newTemp(Ity_V256);
+   IRTemp vecV   = newTemp(Ity_V256);
+   IRTemp vecIS4 = newTemp(Ity_V256);
+   if (epartIsReg(modrm)) {
+      delta++;
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign(vecE, getYMMReg(rE));
+      UChar ib = getUChar(delta);
+      rIS4 = (ib >> 4) & 0xF;
+      DIP("%s %s,%s,%s,%s\n",
+          name, nameYMMReg(rIS4), nameYMMReg(rE),
+          nameYMMReg(rV), nameYMMReg(rG));
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      delta += alen;
+      assign(vecE, loadLE(Ity_V256, mkexpr(addr)));
+      UChar ib = getUChar(delta);
+      rIS4 = (ib >> 4) & 0xF;
+      DIP("%s %s,%s,%s,%s\n",
+          name, nameYMMReg(rIS4), dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+   }
+   delta++;
+   assign(vecV,   getYMMReg(rV));
+   assign(vecIS4, getYMMReg(rIS4));
+   IRTemp res = math_PBLENDVB_256( vecE, vecV, vecIS4, gran, opSAR128 );
+   putYMMReg( rG, mkexpr(res) );
+   return delta;
+}
+
+static void finish_xTESTy ( IRTemp andV, IRTemp andnV, Int sign )
+{
+   /* Set Z=1 iff (vecE & vecG) == 0
+      Set C=1 iff (vecE & not vecG) == 0
+   */
+
+   /* andV, andnV:  vecE & vecG,  vecE and not(vecG) */
+
+   /* andV resp. andnV, reduced to 64-bit values, by or-ing the top
+      and bottom 64-bits together.  It relies on this trick:
+
+      InterleaveLO64x2([a,b],[c,d]) == [b,d]    hence
+
+      InterleaveLO64x2([a,b],[a,b]) == [b,b]    and similarly
+      InterleaveHI64x2([a,b],[a,b]) == [a,a] 
+
+      and so the OR of the above 2 exprs produces
+      [a OR b, a OR b], from which we simply take the lower half.
+   */
+   IRTemp and64  = newTemp(Ity_I64);
+   IRTemp andn64 = newTemp(Ity_I64);
+
+   assign(and64,
+          unop(Iop_V128to64,
+               binop(Iop_OrV128,
+                     binop(Iop_InterleaveLO64x2,
+                           mkexpr(andV), mkexpr(andV)),
+                     binop(Iop_InterleaveHI64x2,
+                           mkexpr(andV), mkexpr(andV)))));
+
+   assign(andn64,
+          unop(Iop_V128to64,
+               binop(Iop_OrV128,
+                     binop(Iop_InterleaveLO64x2,
+                           mkexpr(andnV), mkexpr(andnV)),
+                     binop(Iop_InterleaveHI64x2,
+                           mkexpr(andnV), mkexpr(andnV)))));
+
+   IRTemp z64 = newTemp(Ity_I64);
+   IRTemp c64 = newTemp(Ity_I64);
+   if (sign == 64) {
+      /* When only interested in the most significant bit, just shift
+         arithmetically right and negate.  */
+      assign(z64,
+             unop(Iop_Not64,
+                  binop(Iop_Sar64, mkexpr(and64), mkU8(63))));
+
+      assign(c64,
+             unop(Iop_Not64,
+                  binop(Iop_Sar64, mkexpr(andn64), mkU8(63))));
+   } else {
+      if (sign == 32) {
+         /* When interested in bit 31 and bit 63, mask those bits and
+            fallthrough into the PTEST handling.  */
+         IRTemp t0 = newTemp(Ity_I64);
+         IRTemp t1 = newTemp(Ity_I64);
+         IRTemp t2 = newTemp(Ity_I64);
+         assign(t0, mkU64(0x8000000080000000ULL));
+         assign(t1, binop(Iop_And64, mkexpr(and64), mkexpr(t0)));
+         assign(t2, binop(Iop_And64, mkexpr(andn64), mkexpr(t0)));
+         and64 = t1;
+         andn64 = t2;
+      }
+      /* Now convert and64, andn64 to all-zeroes or all-1s, so we can
+         slice out the Z and C bits conveniently.  We use the standard
+         trick all-zeroes -> all-zeroes, anything-else -> all-ones
+         done by "(x | -x) >>s (word-size - 1)".
+      */
+      assign(z64,
+             unop(Iop_Not64,
+                  binop(Iop_Sar64,
+                        binop(Iop_Or64,
+                              binop(Iop_Sub64, mkU64(0), mkexpr(and64)),
+                                    mkexpr(and64)), mkU8(63))));
+
+      assign(c64,
+             unop(Iop_Not64,
+                  binop(Iop_Sar64,
+                        binop(Iop_Or64,
+                              binop(Iop_Sub64, mkU64(0), mkexpr(andn64)),
+                                    mkexpr(andn64)), mkU8(63))));
+   }
+
+   /* And finally, slice out the Z and C flags and set the flags
+      thunk to COPY for them.  OSAP are set to zero. */
+   IRTemp newOSZACP = newTemp(Ity_I64);
+   assign(newOSZACP, 
+          binop(Iop_Or64,
+                binop(Iop_And64, mkexpr(z64), mkU64(AMD64G_CC_MASK_Z)),
+                binop(Iop_And64, mkexpr(c64), mkU64(AMD64G_CC_MASK_C))));
+
+   stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(newOSZACP)));
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+}
+
+
+/* Handles 128 bit versions of PTEST, VTESTPS or VTESTPD.
+   sign is 0 for PTEST insn, 32 for VTESTPS and 64 for VTESTPD. */
+static Long dis_xTESTy_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                             Long delta, Bool isAvx, Int sign )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   IRTemp vecE = newTemp(Ity_V128);
+   IRTemp vecG = newTemp(Ity_V128);
+
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign(vecE, getXMMReg(rE));
+      delta += 1;
+      DIP( "%s%stest%s %s,%s\n",
+           isAvx ? "v" : "", sign == 0 ? "p" : "",
+           sign == 0 ? "" : sign == 32 ? "ps" : "pd",
+           nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      if (!isAvx)
+         gen_SEGV_if_not_16_aligned( addr );
+      assign(vecE, loadLE( Ity_V128, mkexpr(addr) ));
+      delta += alen;
+      DIP( "%s%stest%s %s,%s\n",
+           isAvx ? "v" : "", sign == 0 ? "p" : "",
+           sign == 0 ? "" : sign == 32 ? "ps" : "pd",
+           dis_buf, nameXMMReg(rG) );
+   }
+
+   assign(vecG, getXMMReg(rG));
+
+   /* Set Z=1 iff (vecE & vecG) == 0
+      Set C=1 iff (vecE & not vecG) == 0
+   */
+
+   /* andV, andnV:  vecE & vecG,  vecE and not(vecG) */
+   IRTemp andV  = newTemp(Ity_V128);
+   IRTemp andnV = newTemp(Ity_V128);
+   assign(andV,  binop(Iop_AndV128, mkexpr(vecE), mkexpr(vecG)));
+   assign(andnV, binop(Iop_AndV128,
+                       mkexpr(vecE),
+                       binop(Iop_XorV128, mkexpr(vecG),
+                                          mkV128(0xFFFF))));
+
+   finish_xTESTy ( andV, andnV, sign );
+   return delta;
+}
+
+
+/* Handles 256 bit versions of PTEST, VTESTPS or VTESTPD.
+   sign is 0 for PTEST insn, 32 for VTESTPS and 64 for VTESTPD. */
+static Long dis_xTESTy_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                             Long delta, Int sign )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   IRTemp vecE   = newTemp(Ity_V256);
+   IRTemp vecG   = newTemp(Ity_V256);
+
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign(vecE, getYMMReg(rE));
+      delta += 1;
+      DIP( "v%stest%s %s,%s\n", sign == 0 ? "p" : "",
+           sign == 0 ? "" : sign == 32 ? "ps" : "pd",
+           nameYMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(vecE, loadLE( Ity_V256, mkexpr(addr) ));
+      delta += alen;
+      DIP( "v%stest%s %s,%s\n", sign == 0 ? "p" : "",
+           sign == 0 ? "" : sign == 32 ? "ps" : "pd",
+           dis_buf, nameYMMReg(rG) );
+   }
+
+   assign(vecG, getYMMReg(rG));
+
+   /* Set Z=1 iff (vecE & vecG) == 0
+      Set C=1 iff (vecE & not vecG) == 0
+   */
+
+   /* andV, andnV:  vecE & vecG,  vecE and not(vecG) */
+   IRTemp andV  = newTemp(Ity_V256);
+   IRTemp andnV = newTemp(Ity_V256);
+   assign(andV,  binop(Iop_AndV256, mkexpr(vecE), mkexpr(vecG)));
+   assign(andnV, binop(Iop_AndV256,
+                       mkexpr(vecE), unop(Iop_NotV256, mkexpr(vecG))));
+
+   IRTemp andVhi  = IRTemp_INVALID;
+   IRTemp andVlo  = IRTemp_INVALID;
+   IRTemp andnVhi = IRTemp_INVALID;
+   IRTemp andnVlo = IRTemp_INVALID;
+   breakupV256toV128s( andV, &andVhi, &andVlo );
+   breakupV256toV128s( andnV, &andnVhi, &andnVlo );
+
+   IRTemp andV128  = newTemp(Ity_V128);
+   IRTemp andnV128 = newTemp(Ity_V128);
+   assign( andV128, binop( Iop_OrV128, mkexpr(andVhi), mkexpr(andVlo) ) );
+   assign( andnV128, binop( Iop_OrV128, mkexpr(andnVhi), mkexpr(andnVlo) ) );
+
+   finish_xTESTy ( andV128, andnV128, sign );
+   return delta;
+}
+
+
+/* Handles 128 bit versions of PMOVZXBW and PMOVSXBW. */
+static Long dis_PMOVxXBW_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   const HChar* mbV    = isAvx ? "v" : "";
+   const HChar  how    = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "%spmov%cxbw %s,%s\n", mbV, how, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, 
+              unop( Iop_64UtoV128, loadLE( Ity_I64, mkexpr(addr) ) ) );
+      delta += alen;
+      DIP( "%spmov%cxbw %s,%s\n", mbV, how, dis_buf, nameXMMReg(rG) );
+   }
+
+   IRExpr* res 
+      = xIsZ /* do math for either zero or sign extend */
+        ? binop( Iop_InterleaveLO8x16, 
+                 IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) )
+        : binop( Iop_SarN16x8, 
+                 binop( Iop_ShlN16x8, 
+                        binop( Iop_InterleaveLO8x16,
+                               IRExpr_Const( IRConst_V128(0) ),
+                               mkexpr(srcVec) ),
+                        mkU8(8) ),
+                 mkU8(8) );
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg) ( rG, res );
+
+   return delta;
+}
+
+
+/* Handles 256 bit versions of PMOVZXBW and PMOVSXBW. */
+static Long dis_PMOVxXBW_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   UChar  how    = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "vpmov%cxbw %s,%s\n", how, nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, loadLE( Ity_V128, mkexpr(addr) ) );
+      delta += alen;
+      DIP( "vpmov%cxbw %s,%s\n", how, dis_buf, nameYMMReg(rG) );
+   }
+
+   /* First do zero extend.  */
+   IRExpr* res
+      = binop( Iop_V128HLtoV256,
+               binop( Iop_InterleaveHI8x16,
+                      IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) ),
+               binop( Iop_InterleaveLO8x16,
+                      IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) ) );
+   /* And if needed sign extension as well.  */
+   if (!xIsZ)
+      res = binop( Iop_SarN16x16,
+                   binop( Iop_ShlN16x16, res, mkU8(8) ), mkU8(8) );
+
+   putYMMReg ( rG, res );
+
+   return delta;
+}
+
+
+static Long dis_PMOVxXWD_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   const HChar* mbV    = isAvx ? "v" : "";
+   const HChar  how    = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "%spmov%cxwd %s,%s\n", mbV, how, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, 
+              unop( Iop_64UtoV128, loadLE( Ity_I64, mkexpr(addr) ) ) );
+      delta += alen;
+      DIP( "%spmov%cxwd %s,%s\n", mbV, how, dis_buf, nameXMMReg(rG) );
+   }
+
+   IRExpr* res
+      = binop( Iop_InterleaveLO16x8,  
+               IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) );
+   if (!xIsZ)
+      res = binop(Iop_SarN32x4, 
+                  binop(Iop_ShlN32x4, res, mkU8(16)), mkU8(16));
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( gregOfRexRM(pfx, modrm), res );
+
+   return delta;
+}
+
+
+static Long dis_PMOVxXWD_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   UChar  how    = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "vpmov%cxwd %s,%s\n", how, nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, loadLE( Ity_V128, mkexpr(addr) ) );
+      delta += alen;
+      DIP( "vpmov%cxwd %s,%s\n", how, dis_buf, nameYMMReg(rG) );
+   }
+
+   IRExpr* res
+      = binop( Iop_V128HLtoV256,
+               binop( Iop_InterleaveHI16x8,
+                      IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) ),
+               binop( Iop_InterleaveLO16x8,
+                      IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) ) );
+   if (!xIsZ)
+      res = binop(Iop_SarN32x8,
+                  binop(Iop_ShlN32x8, res, mkU8(16)), mkU8(16));
+
+   putYMMReg ( rG, res );
+
+   return delta;
+}
+
+
+static Long dis_PMOVSXWQ_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcBytes = newTemp(Ity_I32);
+   UChar  modrm    = getUChar(delta);
+   const HChar* mbV = isAvx ? "v" : "";
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+
+   if ( epartIsReg( modrm ) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcBytes, getXMMRegLane32( rE, 0 ) );
+      delta += 1;
+      DIP( "%spmovsxwq %s,%s\n", mbV, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcBytes, loadLE( Ity_I32, mkexpr(addr) ) );
+      delta += alen;
+      DIP( "%spmovsxwq %s,%s\n", mbV, dis_buf, nameXMMReg(rG) );
+   }
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( rG, binop( Iop_64HLtoV128, 
+                   unop( Iop_16Sto64,
+                         unop( Iop_32HIto16, mkexpr(srcBytes) ) ),
+                   unop( Iop_16Sto64, 
+                         unop( Iop_32to16, mkexpr(srcBytes) ) ) ) );
+   return delta;
+}
+
+
+static Long dis_PMOVSXWQ_256 ( const VexAbiInfo* vbi, Prefix pfx, Long delta )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcBytes = newTemp(Ity_I64);
+   UChar  modrm    = getUChar(delta);
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+   IRTemp s3, s2, s1, s0;
+   s3 = s2 = s1 = s0 = IRTemp_INVALID;
+
+   if ( epartIsReg( modrm ) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcBytes, getXMMRegLane64( rE, 0 ) );
+      delta += 1;
+      DIP( "vpmovsxwq %s,%s\n", nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcBytes, loadLE( Ity_I64, mkexpr(addr) ) );
+      delta += alen;
+      DIP( "vpmovsxwq %s,%s\n", dis_buf, nameYMMReg(rG) );
+   }
+
+   breakup64to16s( srcBytes, &s3, &s2, &s1, &s0 );
+   putYMMReg( rG, binop( Iop_V128HLtoV256,
+                         binop( Iop_64HLtoV128,
+                                unop( Iop_16Sto64, mkexpr(s3) ),
+                                unop( Iop_16Sto64, mkexpr(s2) ) ),
+                         binop( Iop_64HLtoV128,
+                                unop( Iop_16Sto64, mkexpr(s1) ),
+                                unop( Iop_16Sto64, mkexpr(s0) ) ) ) );
+   return delta;
+}
+
+
+static Long dis_PMOVZXWQ_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm    = getUChar(delta);
+   const HChar* mbV = isAvx ? "v" : "";
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+
+   if ( epartIsReg( modrm ) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "%spmovzxwq %s,%s\n", mbV, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, 
+              unop( Iop_32UtoV128, loadLE( Ity_I32, mkexpr(addr) ) ) );
+      delta += alen;
+      DIP( "%spmovzxwq %s,%s\n", mbV, dis_buf, nameXMMReg(rG) );
+   }
+
+   IRTemp zeroVec = newTemp( Ity_V128 );
+   assign( zeroVec, IRExpr_Const( IRConst_V128(0) ) );
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( rG, binop( Iop_InterleaveLO16x8, 
+                   mkexpr(zeroVec), 
+                   binop( Iop_InterleaveLO16x8, 
+                          mkexpr(zeroVec), mkexpr(srcVec) ) ) );
+   return delta;
+}
+
+
+static Long dis_PMOVZXWQ_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm    = getUChar(delta);
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+
+   if ( epartIsReg( modrm ) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "vpmovzxwq %s,%s\n", nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec,
+              unop( Iop_64UtoV128, loadLE( Ity_I64, mkexpr(addr) ) ) );
+      delta += alen;
+      DIP( "vpmovzxwq %s,%s\n", dis_buf, nameYMMReg(rG) );
+   }
+
+   IRTemp zeroVec = newTemp( Ity_V128 );
+   assign( zeroVec, IRExpr_Const( IRConst_V128(0) ) );
+
+   putYMMReg( rG, binop( Iop_V128HLtoV256,
+                         binop( Iop_InterleaveHI16x8,
+                                mkexpr(zeroVec),
+                                binop( Iop_InterleaveLO16x8,
+                                       mkexpr(zeroVec), mkexpr(srcVec) ) ),
+                         binop( Iop_InterleaveLO16x8,
+                                mkexpr(zeroVec),
+                                binop( Iop_InterleaveLO16x8,
+                                       mkexpr(zeroVec), mkexpr(srcVec) ) ) ) );
+   return delta;
+}
+
+
+/* Handles 128 bit versions of PMOVZXDQ and PMOVSXDQ. */
+static Long dis_PMOVxXDQ_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcI64 = newTemp(Ity_I64);
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   const HChar* mbV = isAvx ? "v" : "";
+   const HChar  how = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   /* Compute both srcI64 -- the value to expand -- and srcVec -- same
+      thing in a V128, with arbitrary junk in the top 64 bits.  Use
+      one or both of them and let iropt clean up afterwards (as
+      usual). */
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      assign( srcI64, unop(Iop_V128to64, mkexpr(srcVec)) );
+      delta += 1;
+      DIP( "%spmov%cxdq %s,%s\n", mbV, how, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcI64, loadLE(Ity_I64, mkexpr(addr)) );
+      assign( srcVec, unop( Iop_64UtoV128, mkexpr(srcI64)) );
+      delta += alen;
+      DIP( "%spmov%cxdq %s,%s\n", mbV, how, dis_buf, nameXMMReg(rG) );
+   }
+
+   IRExpr* res 
+      = xIsZ /* do math for either zero or sign extend */
+        ? binop( Iop_InterleaveLO32x4, 
+                 IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) )
+        : binop( Iop_64HLtoV128, 
+                 unop( Iop_32Sto64, 
+                       unop( Iop_64HIto32, mkexpr(srcI64) ) ), 
+                 unop( Iop_32Sto64, 
+                       unop( Iop_64to32, mkexpr(srcI64) ) ) );
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg) ( rG, res );
+
+   return delta;
+}
+
+
+/* Handles 256 bit versions of PMOVZXDQ and PMOVSXDQ. */
+static Long dis_PMOVxXDQ_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   UChar  how    = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   /* Compute both srcI64 -- the value to expand -- and srcVec -- same
+      thing in a V128, with arbitrary junk in the top 64 bits.  Use
+      one or both of them and let iropt clean up afterwards (as
+      usual). */
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "vpmov%cxdq %s,%s\n", how, nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, loadLE(Ity_V128, mkexpr(addr)) );
+      delta += alen;
+      DIP( "vpmov%cxdq %s,%s\n", how, dis_buf, nameYMMReg(rG) );
+   }
+
+   IRExpr* res;
+   if (xIsZ)
+      res = binop( Iop_V128HLtoV256,
+                   binop( Iop_InterleaveHI32x4,
+                          IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) ),
+                   binop( Iop_InterleaveLO32x4,
+                          IRExpr_Const( IRConst_V128(0) ), mkexpr(srcVec) ) );
+   else {
+      IRTemp s3, s2, s1, s0;
+      s3 = s2 = s1 = s0 = IRTemp_INVALID;
+      breakupV128to32s( srcVec, &s3, &s2, &s1, &s0 );
+      res = binop( Iop_V128HLtoV256,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_32Sto64, mkexpr(s3) ),
+                          unop( Iop_32Sto64, mkexpr(s2) ) ),
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_32Sto64, mkexpr(s1) ),
+                          unop( Iop_32Sto64, mkexpr(s0) ) ) );
+   }
+
+   putYMMReg ( rG, res );
+
+   return delta;
+}
+
+
+/* Handles 128 bit versions of PMOVZXBD and PMOVSXBD. */
+static Long dis_PMOVxXBD_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   const HChar* mbV = isAvx ? "v" : "";
+   const HChar  how = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "%spmov%cxbd %s,%s\n", mbV, how, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, 
+              unop( Iop_32UtoV128, loadLE( Ity_I32, mkexpr(addr) ) ) );
+      delta += alen;
+      DIP( "%spmov%cxbd %s,%s\n", mbV, how, dis_buf, nameXMMReg(rG) );
+   }
+
+   IRTemp zeroVec = newTemp(Ity_V128);
+   assign( zeroVec, IRExpr_Const( IRConst_V128(0) ) );
+
+   IRExpr* res
+      = binop(Iop_InterleaveLO8x16,
+              mkexpr(zeroVec),
+              binop(Iop_InterleaveLO8x16, 
+                    mkexpr(zeroVec), mkexpr(srcVec)));
+   if (!xIsZ)
+      res = binop(Iop_SarN32x4, 
+                  binop(Iop_ShlN32x4, res, mkU8(24)), mkU8(24));
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg) ( rG, res );
+
+   return delta;
+}
+
+
+/* Handles 256 bit versions of PMOVZXBD and PMOVSXBD. */
+static Long dis_PMOVxXBD_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool xIsZ )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec = newTemp(Ity_V128);
+   UChar  modrm  = getUChar(delta);
+   UChar  how    = xIsZ ? 'z' : 's';
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "vpmov%cxbd %s,%s\n", how, nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec,
+              unop( Iop_64UtoV128, loadLE( Ity_I64, mkexpr(addr) ) ) );
+      delta += alen;
+      DIP( "vpmov%cxbd %s,%s\n", how, dis_buf, nameYMMReg(rG) );
+   }
+
+   IRTemp zeroVec = newTemp(Ity_V128);
+   assign( zeroVec, IRExpr_Const( IRConst_V128(0) ) );
+
+   IRExpr* res
+      = binop( Iop_V128HLtoV256,
+               binop(Iop_InterleaveHI8x16,
+                     mkexpr(zeroVec),
+                     binop(Iop_InterleaveLO8x16,
+                           mkexpr(zeroVec), mkexpr(srcVec)) ),
+               binop(Iop_InterleaveLO8x16,
+                     mkexpr(zeroVec),
+                     binop(Iop_InterleaveLO8x16,
+                           mkexpr(zeroVec), mkexpr(srcVec)) ) );
+   if (!xIsZ)
+      res = binop(Iop_SarN32x8,
+                  binop(Iop_ShlN32x8, res, mkU8(24)), mkU8(24));
+
+   putYMMReg ( rG, res );
+
+   return delta;
+}
+
+
+/* Handles 128 bit versions of PMOVSXBQ. */
+static Long dis_PMOVSXBQ_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcBytes = newTemp(Ity_I16);
+   UChar  modrm    = getUChar(delta);
+   const HChar* mbV = isAvx ? "v" : "";
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcBytes, getXMMRegLane16( rE, 0 ) );
+      delta += 1;
+      DIP( "%spmovsxbq %s,%s\n", mbV, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcBytes, loadLE( Ity_I16, mkexpr(addr) ) );
+      delta += alen;
+      DIP( "%spmovsxbq %s,%s\n", mbV, dis_buf, nameXMMReg(rG) );
+   }
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( rG, binop( Iop_64HLtoV128,
+                   unop( Iop_8Sto64,
+                         unop( Iop_16HIto8, mkexpr(srcBytes) ) ),
+                   unop( Iop_8Sto64,
+                         unop( Iop_16to8, mkexpr(srcBytes) ) ) ) );
+   return delta;
+}
+
+
+/* Handles 256 bit versions of PMOVSXBQ. */
+static Long dis_PMOVSXBQ_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcBytes = newTemp(Ity_I32);
+   UChar  modrm    = getUChar(delta);
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcBytes, getXMMRegLane32( rE, 0 ) );
+      delta += 1;
+      DIP( "vpmovsxbq %s,%s\n", nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcBytes, loadLE( Ity_I32, mkexpr(addr) ) );
+      delta += alen;
+      DIP( "vpmovsxbq %s,%s\n", dis_buf, nameYMMReg(rG) );
+   }
+
+   putYMMReg
+      ( rG, binop( Iop_V128HLtoV256,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_8Sto64,
+                                unop( Iop_16HIto8,
+                                      unop( Iop_32HIto16,
+                                            mkexpr(srcBytes) ) ) ),
+                          unop( Iop_8Sto64,
+                                unop( Iop_16to8,
+                                      unop( Iop_32HIto16,
+                                            mkexpr(srcBytes) ) ) ) ),
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_8Sto64,
+                                unop( Iop_16HIto8,
+                                      unop( Iop_32to16,
+                                            mkexpr(srcBytes) ) ) ),
+                          unop( Iop_8Sto64,
+                                unop( Iop_16to8,
+                                      unop( Iop_32to16,
+                                            mkexpr(srcBytes) ) ) ) ) ) );
+   return delta;
+}
+
+
+/* Handles 128 bit versions of PMOVZXBQ. */
+static Long dis_PMOVZXBQ_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta, Bool isAvx )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec   = newTemp(Ity_V128);
+   UChar  modrm    = getUChar(delta);
+   const HChar* mbV = isAvx ? "v" : "";
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "%spmovzxbq %s,%s\n", mbV, nameXMMReg(rE), nameXMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec, 
+              unop( Iop_32UtoV128, 
+                    unop( Iop_16Uto32, loadLE( Ity_I16, mkexpr(addr) ))));
+      delta += alen;
+      DIP( "%spmovzxbq %s,%s\n", mbV, dis_buf, nameXMMReg(rG) );
+   }
+
+   IRTemp zeroVec = newTemp(Ity_V128);
+   assign( zeroVec, IRExpr_Const( IRConst_V128(0) ) );
+
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      ( rG, binop( Iop_InterleaveLO8x16, 
+                   mkexpr(zeroVec), 
+                   binop( Iop_InterleaveLO8x16, 
+                          mkexpr(zeroVec), 
+                          binop( Iop_InterleaveLO8x16, 
+                                 mkexpr(zeroVec), mkexpr(srcVec) ) ) ) );
+   return delta;
+}
+
+
+/* Handles 256 bit versions of PMOVZXBQ. */
+static Long dis_PMOVZXBQ_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp srcVec   = newTemp(Ity_V128);
+   UChar  modrm    = getUChar(delta);
+   UInt   rG       = gregOfRexRM(pfx, modrm);
+   if ( epartIsReg(modrm) ) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      assign( srcVec, getXMMReg(rE) );
+      delta += 1;
+      DIP( "vpmovzxbq %s,%s\n", nameXMMReg(rE), nameYMMReg(rG) );
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( srcVec,
+              unop( Iop_32UtoV128, loadLE( Ity_I32, mkexpr(addr) )));
+      delta += alen;
+      DIP( "vpmovzxbq %s,%s\n", dis_buf, nameYMMReg(rG) );
+   }
+
+   IRTemp zeroVec = newTemp(Ity_V128);
+   assign( zeroVec, IRExpr_Const( IRConst_V128(0) ) );
+
+   putYMMReg
+      ( rG, binop( Iop_V128HLtoV256,
+                   binop( Iop_InterleaveHI8x16,
+                          mkexpr(zeroVec),
+                          binop( Iop_InterleaveLO8x16,
+                                 mkexpr(zeroVec),
+                                 binop( Iop_InterleaveLO8x16,
+                                        mkexpr(zeroVec), mkexpr(srcVec) ) ) ),
+                   binop( Iop_InterleaveLO8x16,
+                          mkexpr(zeroVec),
+                          binop( Iop_InterleaveLO8x16,
+                                 mkexpr(zeroVec),
+                                 binop( Iop_InterleaveLO8x16,
+                                        mkexpr(zeroVec), mkexpr(srcVec) ) ) )
+                 ) );
+   return delta;
+}
+
+
+static Long dis_PHMINPOSUW_128 ( const VexAbiInfo* vbi, Prefix pfx,
+                                 Long delta, Bool isAvx )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   const HChar* mbV = isAvx ? "v" : "";
+   IRTemp sV     = newTemp(Ity_V128);
+   IRTemp sHi    = newTemp(Ity_I64);
+   IRTemp sLo    = newTemp(Ity_I64);
+   IRTemp dLo    = newTemp(Ity_I64);
+   UInt   rG     = gregOfRexRM(pfx,modrm);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getXMMReg(rE) );
+      delta += 1;
+      DIP("%sphminposuw %s,%s\n", mbV, nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      if (!isAvx)
+         gen_SEGV_if_not_16_aligned(addr);
+      assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+      delta += alen;
+      DIP("%sphminposuw %s,%s\n", mbV, dis_buf, nameXMMReg(rG));
+   }
+   assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+   assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+   assign( dLo, mkIRExprCCall(
+                   Ity_I64, 0/*regparms*/,
+                   "amd64g_calculate_sse_phminposuw", 
+                   &amd64g_calculate_sse_phminposuw,
+                   mkIRExprVec_2( mkexpr(sLo), mkexpr(sHi) )
+         ));
+   (isAvx ? putYMMRegLoAndZU : putXMMReg)
+      (rG, unop(Iop_64UtoV128, mkexpr(dLo)));
+   return delta;
+}
+
+
+static Long dis_AESx ( const VexAbiInfo* vbi, Prefix pfx,
+                       Long delta, Bool isAvx, UChar opc )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   UInt   rG     = gregOfRexRM(pfx, modrm);
+   UInt   regNoL = 0;
+   UInt   regNoR = (isAvx && opc != 0xDB) ? getVexNvvvv(pfx) : rG;
+
+   /* This is a nasty kludge.  We need to pass 2 x V128 to the
+      helper.  Since we can't do that, use a dirty
+      helper to compute the results directly from the XMM regs in
+      the guest state.  That means for the memory case, we need to
+      move the left operand into a pseudo-register (XMM16, let's
+      call it). */
+   if (epartIsReg(modrm)) {
+      regNoL = eregOfRexRM(pfx, modrm);
+      delta += 1;
+   } else {
+      regNoL = 16; /* use XMM16 as an intermediary */
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      /* alignment check needed ???? */
+      stmt( IRStmt_Put( OFFB_YMM16, loadLE(Ity_V128, mkexpr(addr)) ));
+      delta += alen;
+   }
+
+   void*  fn = &amd64g_dirtyhelper_AES;
+   const HChar* nm = "amd64g_dirtyhelper_AES";
+
+   /* Round up the arguments.  Note that this is a kludge -- the
+      use of mkU64 rather than mkIRExpr_HWord implies the
+      assumption that the host's word size is 64-bit. */
+   UInt gstOffD = ymmGuestRegOffset(rG);
+   UInt gstOffL = regNoL == 16 ? OFFB_YMM16 : ymmGuestRegOffset(regNoL);
+   UInt gstOffR = ymmGuestRegOffset(regNoR);
+   IRExpr*  opc4         = mkU64(opc);
+   IRExpr*  gstOffDe     = mkU64(gstOffD);
+   IRExpr*  gstOffLe     = mkU64(gstOffL);
+   IRExpr*  gstOffRe     = mkU64(gstOffR);
+   IRExpr** args
+      = mkIRExprVec_5( IRExpr_BBPTR(), opc4, gstOffDe, gstOffLe, gstOffRe );
+
+   IRDirty* d    = unsafeIRDirty_0_N( 0/*regparms*/, nm, fn, args );
+   /* It's not really a dirty call, but we can't use the clean helper
+      mechanism here for the very lame reason that we can't pass 2 x
+      V128s by value to a helper.  Hence this roundabout scheme. */
+   d->nFxState = 2;
+   vex_bzero(&d->fxState, sizeof(d->fxState));
+   /* AES{ENC,ENCLAST,DEC,DECLAST} read both registers, and writes
+      the second for !isAvx or the third for isAvx.
+      AESIMC (0xDB) reads the first register, and writes the second. */
+   d->fxState[0].fx     = Ifx_Read;
+   d->fxState[0].offset = gstOffL;
+   d->fxState[0].size   = sizeof(U128);
+   d->fxState[1].offset = gstOffR;
+   d->fxState[1].size   = sizeof(U128);
+   if (opc == 0xDB)
+      d->fxState[1].fx   = Ifx_Write;
+   else if (!isAvx || rG == regNoR)
+      d->fxState[1].fx   = Ifx_Modify;
+   else {
+      d->fxState[1].fx     = Ifx_Read;
+      d->nFxState++;
+      d->fxState[2].fx     = Ifx_Write;
+      d->fxState[2].offset = gstOffD; 
+      d->fxState[2].size   = sizeof(U128);
+   }
+
+   stmt( IRStmt_Dirty(d) );
+   {
+      const HChar* opsuf;
+      switch (opc) {
+         case 0xDC: opsuf = "enc"; break;
+         case 0XDD: opsuf = "enclast"; break;
+         case 0xDE: opsuf = "dec"; break;
+         case 0xDF: opsuf = "declast"; break;
+         case 0xDB: opsuf = "imc"; break;
+         default: vassert(0);
+      }
+      DIP("%saes%s %s,%s%s%s\n", isAvx ? "v" : "", opsuf, 
+          (regNoL == 16 ? dis_buf : nameXMMReg(regNoL)),
+          nameXMMReg(regNoR),
+          (isAvx && opc != 0xDB) ? "," : "",
+          (isAvx && opc != 0xDB) ? nameXMMReg(rG) : "");
+   }
+   if (isAvx)
+      putYMMRegLane128( rG, 1, mkV128(0) );
+   return delta;
+}
+
+static Long dis_AESKEYGENASSIST ( const VexAbiInfo* vbi, Prefix pfx,
+                                  Long delta, Bool isAvx )
+{
+   IRTemp addr   = IRTemp_INVALID;
+   Int    alen   = 0;
+   HChar  dis_buf[50];
+   UChar  modrm  = getUChar(delta);
+   UInt   regNoL = 0;
+   UInt   regNoR = gregOfRexRM(pfx, modrm);
+   UChar  imm    = 0;
+
+   /* This is a nasty kludge.  See AESENC et al. instructions. */
+   modrm = getUChar(delta);
+   if (epartIsReg(modrm)) {
+      regNoL = eregOfRexRM(pfx, modrm);
+      imm = getUChar(delta+1);
+      delta += 1+1;
+   } else {
+      regNoL = 16; /* use XMM16 as an intermediary */
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      /* alignment check ???? . */
+      stmt( IRStmt_Put( OFFB_YMM16, loadLE(Ity_V128, mkexpr(addr)) ));
+      imm = getUChar(delta+alen);
+      delta += alen+1;
+   }
+
+   /* Who ya gonna call?  Presumably not Ghostbusters. */
+   void*  fn = &amd64g_dirtyhelper_AESKEYGENASSIST;
+   const HChar* nm = "amd64g_dirtyhelper_AESKEYGENASSIST";
+
+   /* Round up the arguments.  Note that this is a kludge -- the
+      use of mkU64 rather than mkIRExpr_HWord implies the
+      assumption that the host's word size is 64-bit. */
+   UInt gstOffL = regNoL == 16 ? OFFB_YMM16 : ymmGuestRegOffset(regNoL);
+   UInt gstOffR = ymmGuestRegOffset(regNoR);
+
+   IRExpr*  imme          = mkU64(imm & 0xFF);
+   IRExpr*  gstOffLe     = mkU64(gstOffL);
+   IRExpr*  gstOffRe     = mkU64(gstOffR);
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_BBPTR(), imme, gstOffLe, gstOffRe );
+
+   IRDirty* d    = unsafeIRDirty_0_N( 0/*regparms*/, nm, fn, args );
+   /* It's not really a dirty call, but we can't use the clean helper
+      mechanism here for the very lame reason that we can't pass 2 x
+      V128s by value to a helper.  Hence this roundabout scheme. */
+   d->nFxState = 2;
+   vex_bzero(&d->fxState, sizeof(d->fxState));
+   d->fxState[0].fx     = Ifx_Read;
+   d->fxState[0].offset = gstOffL;
+   d->fxState[0].size   = sizeof(U128);
+   d->fxState[1].fx     = Ifx_Write;
+   d->fxState[1].offset = gstOffR;
+   d->fxState[1].size   = sizeof(U128);
+   stmt( IRStmt_Dirty(d) );
+
+   DIP("%saeskeygenassist $%x,%s,%s\n", isAvx ? "v" : "", (UInt)imm,
+       (regNoL == 16 ? dis_buf : nameXMMReg(regNoL)),
+       nameXMMReg(regNoR));
+   if (isAvx)
+      putYMMRegLane128( regNoR, 1, mkV128(0) );
+   return delta;
+}
+
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F38__SSE4 ( Bool* decode_OK,
+                          const VexAbiInfo* vbi,
+                          Prefix pfx, Int sz, Long deltaIN )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   *decode_OK = False;
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0x10:
+   case 0x14:
+   case 0x15:
+      /* 66 0F 38 10 /r = PBLENDVB xmm1, xmm2/m128  (byte gran)
+         66 0F 38 14 /r = BLENDVPS xmm1, xmm2/m128  (float gran)
+         66 0F 38 15 /r = BLENDVPD xmm1, xmm2/m128  (double gran)
+         Blend at various granularities, with XMM0 (implicit operand)
+         providing the controlling mask.
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         modrm = getUChar(delta);
+
+         const HChar* nm    = NULL;
+         UInt   gran  = 0;
+         IROp   opSAR = Iop_INVALID;
+         switch (opc) {
+            case 0x10:
+               nm = "pblendvb"; gran = 1; opSAR = Iop_SarN8x16;
+               break;
+            case 0x14:
+               nm = "blendvps"; gran = 4; opSAR = Iop_SarN32x4;
+               break;
+            case 0x15:
+               nm = "blendvpd"; gran = 8; opSAR = Iop_SarN64x2;
+               break;
+         }
+         vassert(nm);
+
+         IRTemp vecE = newTemp(Ity_V128);
+         IRTemp vecG = newTemp(Ity_V128);
+         IRTemp vec0 = newTemp(Ity_V128);
+
+         if ( epartIsReg(modrm) ) {
+            assign(vecE, getXMMReg(eregOfRexRM(pfx, modrm)));
+            delta += 1;
+            DIP( "%s %s,%s\n", nm,
+                 nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                 nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign(vecE, loadLE( Ity_V128, mkexpr(addr) ));
+            delta += alen;
+            DIP( "%s %s,%s\n", nm,
+                 dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         assign(vecG, getXMMReg(gregOfRexRM(pfx, modrm)));
+         assign(vec0, getXMMReg(0));
+
+         IRTemp res = math_PBLENDVB_128( vecE, vecG, vec0, gran, opSAR );
+         putXMMReg(gregOfRexRM(pfx, modrm), mkexpr(res));
+
+         goto decode_success;
+      }
+      break;
+
+   case 0x17:
+      /* 66 0F 38 17 /r = PTEST xmm1, xmm2/m128
+         Logical compare (set ZF and CF from AND/ANDN of the operands) */
+      if (have66noF2noF3(pfx)
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_xTESTy_128( vbi, pfx, delta, False/*!isAvx*/, 0 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x20:
+      /* 66 0F 38 20 /r = PMOVSXBW xmm1, xmm2/m64 
+         Packed Move with Sign Extend from Byte to Word (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXBW_128( vbi, pfx, delta,
+                                   False/*!isAvx*/, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x21:
+      /* 66 0F 38 21 /r = PMOVSXBD xmm1, xmm2/m32 
+         Packed Move with Sign Extend from Byte to DWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXBD_128( vbi, pfx, delta,
+                                   False/*!isAvx*/, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x22:
+      /* 66 0F 38 22 /r = PMOVSXBQ xmm1, xmm2/m16
+         Packed Move with Sign Extend from Byte to QWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVSXBQ_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x23:
+      /* 66 0F 38 23 /r = PMOVSXWD xmm1, xmm2/m64 
+         Packed Move with Sign Extend from Word to DWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXWD_128(vbi, pfx, delta,
+                                  False/*!isAvx*/, False/*!xIsZ*/);
+         goto decode_success;
+      }
+      break;
+
+   case 0x24:
+      /* 66 0F 38 24 /r = PMOVSXWQ xmm1, xmm2/m32
+         Packed Move with Sign Extend from Word to QWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVSXWQ_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x25:
+      /* 66 0F 38 25 /r = PMOVSXDQ xmm1, xmm2/m64
+         Packed Move with Sign Extend from Double Word to Quad Word (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXDQ_128( vbi, pfx, delta,
+                                   False/*!isAvx*/, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x28:
+      /* 66 0F 38 28 = PMULDQ -- signed widening multiply of 32-lanes
+         0 x 0 to form lower 64-bit half and lanes 2 x 2 to form upper
+         64-bit half */
+      /* This is a really poor translation -- could be improved if
+         performance critical.  It's a copy-paste of PMULUDQ, too. */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         IRTemp sV = newTemp(Ity_V128);
+         IRTemp dV = newTemp(Ity_V128);
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx,modrm);
+         assign( dV, getXMMReg(rG) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("pmuldq %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("pmuldq %s,%s\n", dis_buf, nameXMMReg(rG));
+         }
+
+         putXMMReg( rG, mkexpr(math_PMULDQ_128( dV, sV )) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x29:
+      /* 66 0F 38 29 = PCMPEQQ
+         64x2 equality comparison */
+      if (have66noF2noF3(pfx) && sz == 2) { 
+         /* FIXME: this needs an alignment check */
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta, 
+                                    "pcmpeqq", Iop_CmpEQ64x2, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x2A:
+      /* 66 0F 38 2A = MOVNTDQA
+         "non-temporal" "streaming" load
+         Handle like MOVDQA but only memory operand is allowed */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         modrm = getUChar(delta);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            putXMMReg( gregOfRexRM(pfx,modrm),
+                       loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("movntdqa %s,%s\n", dis_buf,
+                                    nameXMMReg(gregOfRexRM(pfx,modrm)));
+            delta += alen;
+            goto decode_success;
+         }
+      }
+      break;
+
+   case 0x2B:
+      /* 66 0f 38 2B /r = PACKUSDW xmm1, xmm2/m128
+         2x 32x4 S->U saturating narrow from xmm2/m128 to xmm1 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+  
+         modrm = getUChar(delta);
+
+         IRTemp argL = newTemp(Ity_V128);
+         IRTemp argR = newTemp(Ity_V128);
+
+         if ( epartIsReg(modrm) ) {
+            assign( argL, getXMMReg( eregOfRexRM(pfx, modrm) ) );
+            delta += 1;
+            DIP( "packusdw %s,%s\n",
+                 nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                 nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( argL, loadLE( Ity_V128, mkexpr(addr) ));
+            delta += alen;
+            DIP( "packusdw %s,%s\n",
+                 dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         assign(argR, getXMMReg( gregOfRexRM(pfx, modrm) ));
+
+         putXMMReg( gregOfRexRM(pfx, modrm), 
+                    binop( Iop_QNarrowBin32Sto16Ux8,
+                           mkexpr(argL), mkexpr(argR)) );
+
+         goto decode_success;
+      }
+      break;
+
+   case 0x30:
+      /* 66 0F 38 30 /r = PMOVZXBW xmm1, xmm2/m64 
+         Packed Move with Zero Extend from Byte to Word (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXBW_128( vbi, pfx, delta,
+                                   False/*!isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x31:
+      /* 66 0F 38 31 /r = PMOVZXBD xmm1, xmm2/m32 
+         Packed Move with Zero Extend from Byte to DWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXBD_128( vbi, pfx, delta,
+                                   False/*!isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x32:
+      /* 66 0F 38 32 /r = PMOVZXBQ xmm1, xmm2/m16
+         Packed Move with Zero Extend from Byte to QWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVZXBQ_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x33:
+      /* 66 0F 38 33 /r = PMOVZXWD xmm1, xmm2/m64 
+         Packed Move with Zero Extend from Word to DWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXWD_128( vbi, pfx, delta,
+                                   False/*!isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x34:
+      /* 66 0F 38 34 /r = PMOVZXWQ xmm1, xmm2/m32
+         Packed Move with Zero Extend from Word to QWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVZXWQ_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x35:
+      /* 66 0F 38 35 /r = PMOVZXDQ xmm1, xmm2/m64
+         Packed Move with Zero Extend from DWord to QWord (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PMOVxXDQ_128( vbi, pfx, delta,
+                                   False/*!isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x37:
+      /* 66 0F 38 37 = PCMPGTQ
+         64x2 comparison (signed, presumably; the Intel docs don't say :-)
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         /* FIXME: this needs an alignment check */
+         delta = dis_SSEint_E_to_G( vbi, pfx, delta,
+                                    "pcmpgtq", Iop_CmpGT64Sx2, False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x38:
+   case 0x3C:
+      /* 66 0F 38 38 /r = PMINSB xmm1, xmm2/m128    8Sx16 (signed) min
+         66 0F 38 3C /r = PMAXSB xmm1, xmm2/m128    8Sx16 (signed) max
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         /* FIXME: this needs an alignment check */
+         Bool isMAX = opc == 0x3C;
+         delta = dis_SSEint_E_to_G(
+                    vbi, pfx, delta, 
+                    isMAX ? "pmaxsb" : "pminsb",
+                    isMAX ? Iop_Max8Sx16 : Iop_Min8Sx16,
+                    False
+                 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x39:
+   case 0x3D:
+      /* 66 0F 38 39 /r = PMINSD xmm1, xmm2/m128
+         Minimum of Packed Signed Double Word Integers (XMM)
+         66 0F 38 3D /r = PMAXSD xmm1, xmm2/m128
+         Maximum of Packed Signed Double Word Integers (XMM) 
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         /* FIXME: this needs an alignment check */
+         Bool isMAX = opc == 0x3D;
+         delta = dis_SSEint_E_to_G(
+                    vbi, pfx, delta, 
+                    isMAX ? "pmaxsd" : "pminsd",
+                    isMAX ? Iop_Max32Sx4 : Iop_Min32Sx4,
+                    False
+                 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3A:
+   case 0x3E:
+      /* 66 0F 38 3A /r = PMINUW xmm1, xmm2/m128
+         Minimum of Packed Unsigned Word Integers (XMM)
+         66 0F 38 3E /r = PMAXUW xmm1, xmm2/m128
+         Maximum of Packed Unsigned Word Integers (XMM)
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         /* FIXME: this needs an alignment check */
+         Bool isMAX = opc == 0x3E;
+         delta = dis_SSEint_E_to_G(
+                    vbi, pfx, delta, 
+                    isMAX ? "pmaxuw" : "pminuw",
+                    isMAX ? Iop_Max16Ux8 : Iop_Min16Ux8,
+                    False
+                 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3B:
+   case 0x3F:
+      /* 66 0F 38 3B /r = PMINUD xmm1, xmm2/m128
+         Minimum of Packed Unsigned Doubleword Integers (XMM)
+         66 0F 38 3F /r = PMAXUD xmm1, xmm2/m128
+         Maximum of Packed Unsigned Doubleword Integers (XMM)
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         /* FIXME: this needs an alignment check */
+         Bool isMAX = opc == 0x3F;
+         delta = dis_SSEint_E_to_G(
+                    vbi, pfx, delta, 
+                    isMAX ? "pmaxud" : "pminud",
+                    isMAX ? Iop_Max32Ux4 : Iop_Min32Ux4,
+                    False
+                 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x40:
+      /* 66 0F 38 40 /r = PMULLD xmm1, xmm2/m128
+         32x4 integer multiply from xmm2/m128 to xmm1 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+  
+         modrm = getUChar(delta);
+
+         IRTemp argL = newTemp(Ity_V128);
+         IRTemp argR = newTemp(Ity_V128);
+
+         if ( epartIsReg(modrm) ) {
+            assign( argL, getXMMReg( eregOfRexRM(pfx, modrm) ) );
+            delta += 1;
+            DIP( "pmulld %s,%s\n",
+                 nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                 nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( argL, loadLE( Ity_V128, mkexpr(addr) ));
+            delta += alen;
+            DIP( "pmulld %s,%s\n",
+                 dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         assign(argR, getXMMReg( gregOfRexRM(pfx, modrm) ));
+
+         putXMMReg( gregOfRexRM(pfx, modrm), 
+                    binop( Iop_Mul32x4, mkexpr(argL), mkexpr(argR)) );
+
+         goto decode_success;
+      }
+      break;
+
+   case 0x41:
+      /* 66 0F 38 41 /r = PHMINPOSUW xmm1, xmm2/m128
+         Packed Horizontal Word Minimum from xmm2/m128 to xmm1 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PHMINPOSUW_128( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      } 
+      break;
+
+   case 0xDC:
+   case 0xDD:
+   case 0xDE:
+   case 0xDF:
+   case 0xDB:
+      /* 66 0F 38 DC /r = AESENC xmm1, xmm2/m128
+                  DD /r = AESENCLAST xmm1, xmm2/m128
+                  DE /r = AESDEC xmm1, xmm2/m128
+                  DF /r = AESDECLAST xmm1, xmm2/m128
+
+                  DB /r = AESIMC xmm1, xmm2/m128 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_AESx( vbi, pfx, delta, False/*!isAvx*/, opc );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF0:
+   case 0xF1:
+      /* F2 0F 38 F0 /r = CRC32 r/m8, r32 (REX.W ok, 66 not ok)
+         F2 0F 38 F1 /r = CRC32 r/m{16,32,64}, r32
+         The decoding on this is a bit unusual.
+      */
+      if (haveF2noF3(pfx)
+          && (opc == 0xF1 || (opc == 0xF0 && !have66(pfx)))) {
+         modrm = getUChar(delta);
+
+         if (opc == 0xF0) 
+            sz = 1;
+         else
+            vassert(sz == 2 || sz == 4 || sz == 8);
+
+         IRType tyE = szToITy(sz);
+         IRTemp valE = newTemp(tyE);
+
+         if (epartIsReg(modrm)) {
+            assign(valE, getIRegE(sz, pfx, modrm));
+            delta += 1;
+            DIP("crc32b %s,%s\n", nameIRegE(sz, pfx, modrm),
+                nameIRegG(1==getRexW(pfx) ? 8 : 4, pfx, modrm));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign(valE, loadLE(tyE, mkexpr(addr)));
+            delta += alen;
+            DIP("crc32b %s,%s\n", dis_buf,
+                nameIRegG(1==getRexW(pfx) ? 8 : 4, pfx, modrm));
+         }
+
+         /* Somewhat funny getting/putting of the crc32 value, in order
+            to ensure that it turns into 64-bit gets and puts.  However,
+            mask off the upper 32 bits so as to not get memcheck false
+            +ves around the helper call. */
+         IRTemp valG0 = newTemp(Ity_I64);
+         assign(valG0, binop(Iop_And64, getIRegG(8, pfx, modrm),
+                             mkU64(0xFFFFFFFF)));
+
+         const HChar* nm = NULL;
+         void*  fn = NULL;
+         switch (sz) {
+            case 1: nm = "amd64g_calc_crc32b";
+                    fn = &amd64g_calc_crc32b; break;
+            case 2: nm = "amd64g_calc_crc32w";
+                    fn = &amd64g_calc_crc32w; break;
+            case 4: nm = "amd64g_calc_crc32l";
+                    fn = &amd64g_calc_crc32l; break;
+            case 8: nm = "amd64g_calc_crc32q";
+                    fn = &amd64g_calc_crc32q; break;
+         }
+         vassert(nm && fn);
+         IRTemp valG1 = newTemp(Ity_I64);
+         assign(valG1,
+                mkIRExprCCall(Ity_I64, 0/*regparm*/, nm, fn, 
+                              mkIRExprVec_2(mkexpr(valG0),
+                                            widenUto64(mkexpr(valE)))));
+
+         putIRegG(4, pfx, modrm, unop(Iop_64to32, mkexpr(valG1)));
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  //decode_failure:
+   *decode_OK = False;
+   return deltaIN;
+
+  decode_success:
+   *decode_OK = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level SSE4: dis_ESC_0F3A__SSE4                   ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static Long dis_PEXTRW ( const VexAbiInfo* vbi, Prefix pfx,
+                         Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   IRTemp t0    = IRTemp_INVALID;
+   IRTemp t1    = IRTemp_INVALID;
+   IRTemp t2    = IRTemp_INVALID;
+   IRTemp t3    = IRTemp_INVALID;
+   UChar  modrm = getUChar(delta);
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   Int    imm8_20;
+   IRTemp xmm_vec = newTemp(Ity_V128);
+   IRTemp d16   = newTemp(Ity_I16);
+   const HChar* mbV = isAvx ? "v" : "";
+
+   vassert(0==getRexW(pfx)); /* ensured by caller */
+   assign( xmm_vec, getXMMReg(rG) );
+   breakupV128to32s( xmm_vec, &t3, &t2, &t1, &t0 );
+
+   if ( epartIsReg( modrm ) ) {
+      imm8_20 = (Int)(getUChar(delta+1) & 7);
+   } else { 
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8_20 = (Int)(getUChar(delta+alen) & 7);
+   }
+
+   switch (imm8_20) {
+      case 0:  assign(d16, unop(Iop_32to16,   mkexpr(t0))); break;
+      case 1:  assign(d16, unop(Iop_32HIto16, mkexpr(t0))); break;
+      case 2:  assign(d16, unop(Iop_32to16,   mkexpr(t1))); break;
+      case 3:  assign(d16, unop(Iop_32HIto16, mkexpr(t1))); break;
+      case 4:  assign(d16, unop(Iop_32to16,   mkexpr(t2))); break;
+      case 5:  assign(d16, unop(Iop_32HIto16, mkexpr(t2))); break;
+      case 6:  assign(d16, unop(Iop_32to16,   mkexpr(t3))); break;
+      case 7:  assign(d16, unop(Iop_32HIto16, mkexpr(t3))); break;
+      default: vassert(0);
+   }
+
+   if ( epartIsReg( modrm ) ) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      putIReg32( rE, unop(Iop_16Uto32, mkexpr(d16)) );
+      delta += 1+1;
+      DIP( "%spextrw $%d, %s,%s\n", mbV, imm8_20,
+           nameXMMReg( rG ), nameIReg32( rE ) );
+   } else {
+      storeLE( mkexpr(addr), mkexpr(d16) );
+      delta += alen+1;
+      DIP( "%spextrw $%d, %s,%s\n", mbV, imm8_20, nameXMMReg( rG ), dis_buf );
+   }
+   return delta;
+}
+
+
+static Long dis_PEXTRD ( const VexAbiInfo* vbi, Prefix pfx,
+                         Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   IRTemp t0    = IRTemp_INVALID;
+   IRTemp t1    = IRTemp_INVALID;
+   IRTemp t2    = IRTemp_INVALID;
+   IRTemp t3    = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   Int    imm8_10;
+   IRTemp xmm_vec   = newTemp(Ity_V128);
+   IRTemp src_dword = newTemp(Ity_I32);
+   const HChar* mbV = isAvx ? "v" : "";
+
+   vassert(0==getRexW(pfx)); /* ensured by caller */
+   modrm = getUChar(delta);
+   assign( xmm_vec, getXMMReg( gregOfRexRM(pfx,modrm) ) );
+   breakupV128to32s( xmm_vec, &t3, &t2, &t1, &t0 );
+
+   if ( epartIsReg( modrm ) ) {
+      imm8_10 = (Int)(getUChar(delta+1) & 3);
+   } else { 
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8_10 = (Int)(getUChar(delta+alen) & 3);
+   }
+
+   switch ( imm8_10 ) {
+      case 0:  assign( src_dword, mkexpr(t0) ); break;
+      case 1:  assign( src_dword, mkexpr(t1) ); break;
+      case 2:  assign( src_dword, mkexpr(t2) ); break;
+      case 3:  assign( src_dword, mkexpr(t3) ); break;
+      default: vassert(0);
+   }
+
+   if ( epartIsReg( modrm ) ) {
+      putIReg32( eregOfRexRM(pfx,modrm), mkexpr(src_dword) );
+      delta += 1+1;
+      DIP( "%spextrd $%d, %s,%s\n", mbV, imm8_10,
+           nameXMMReg( gregOfRexRM(pfx, modrm) ),
+           nameIReg32( eregOfRexRM(pfx, modrm) ) );
+   } else {
+      storeLE( mkexpr(addr), mkexpr(src_dword) );
+      delta += alen+1;
+      DIP( "%spextrd $%d, %s,%s\n", mbV,
+           imm8_10, nameXMMReg( gregOfRexRM(pfx, modrm) ), dis_buf );
+   }
+   return delta;
+}
+
+
+static Long dis_PEXTRQ ( const VexAbiInfo* vbi, Prefix pfx,
+                         Long delta, Bool isAvx )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   Int imm8_0;
+   IRTemp xmm_vec   = newTemp(Ity_V128);
+   IRTemp src_qword = newTemp(Ity_I64);
+   const HChar* mbV = isAvx ? "v" : "";
+
+   vassert(1==getRexW(pfx)); /* ensured by caller */
+   modrm = getUChar(delta);
+   assign( xmm_vec, getXMMReg( gregOfRexRM(pfx,modrm) ) );
+
+   if ( epartIsReg( modrm ) ) {
+      imm8_0 = (Int)(getUChar(delta+1) & 1);
+   } else {
+      addr   = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8_0 = (Int)(getUChar(delta+alen) & 1);
+   }
+
+   switch ( imm8_0 ) {
+      case 0:  assign( src_qword, unop(Iop_V128to64,   mkexpr(xmm_vec)) );
+               break;
+      case 1:  assign( src_qword, unop(Iop_V128HIto64, mkexpr(xmm_vec)) );
+               break;
+      default: vassert(0);
+   }
+
+   if ( epartIsReg( modrm ) ) {
+      putIReg64( eregOfRexRM(pfx,modrm), mkexpr(src_qword) );
+      delta += 1+1;
+      DIP( "%spextrq $%d, %s,%s\n", mbV, imm8_0,
+           nameXMMReg( gregOfRexRM(pfx, modrm) ),
+           nameIReg64( eregOfRexRM(pfx, modrm) ) );
+   } else {
+      storeLE( mkexpr(addr), mkexpr(src_qword) );
+      delta += alen+1;
+      DIP( "%spextrq $%d, %s,%s\n", mbV,
+           imm8_0, nameXMMReg( gregOfRexRM(pfx, modrm) ), dis_buf );
+   }
+   return delta;
+}
+
+static IRExpr* math_CTZ32(IRExpr *exp)
+{
+   /* Iop_Ctz32 isn't implemented by the amd64 back end, so use Iop_Ctz64. */
+   return unop(Iop_64to32, unop(Iop_Ctz64, unop(Iop_32Uto64, exp)));
+}
+
+static Long dis_PCMPISTRI_3A ( UChar modrm, UInt regNoL, UInt regNoR,
+                               Long delta, UChar opc, UChar imm,
+                               HChar dis_buf[])
+{
+   /* We only handle PCMPISTRI for now */
+   vassert((opc & 0x03) == 0x03);
+   /* And only an immediate byte of 0x38 or 0x3A */
+   vassert((imm & ~0x02) == 0x38);
+
+   /* FIXME: Is this correct when RegNoL == 16 ? */
+   IRTemp argL = newTemp(Ity_V128);
+   assign(argL, getXMMReg(regNoL));
+   IRTemp argR = newTemp(Ity_V128);
+   assign(argR, getXMMReg(regNoR));
+
+   IRTemp zmaskL = newTemp(Ity_I32);
+   assign(zmaskL, unop(Iop_16Uto32,
+                       unop(Iop_GetMSBs8x16,
+                            binop(Iop_CmpEQ8x16, mkexpr(argL), mkV128(0)))));
+   IRTemp zmaskR = newTemp(Ity_I32);
+   assign(zmaskR, unop(Iop_16Uto32,
+                       unop(Iop_GetMSBs8x16,
+                            binop(Iop_CmpEQ8x16, mkexpr(argR), mkV128(0)))));
+
+   /* We want validL = ~(zmaskL | -zmaskL)
+
+      But this formulation kills memcheck's validity tracking when any
+      bits above the first "1" are invalid.  So reformulate as:
+
+      validL = (zmaskL ? (1 << ctz(zmaskL)) : 0) - 1
+   */
+
+   IRExpr *ctzL = unop(Iop_32to8, math_CTZ32(mkexpr(zmaskL)));
+
+   /* Generate a bool expression which is zero iff the original is
+      zero.  Do this carefully so memcheck can propagate validity bits
+      correctly.
+    */
+   IRTemp zmaskL_zero = newTemp(Ity_I1);
+   assign(zmaskL_zero, binop(Iop_ExpCmpNE32, mkexpr(zmaskL), mkU32(0)));
+
+   IRTemp validL = newTemp(Ity_I32);
+   assign(validL, binop(Iop_Sub32,
+                        IRExpr_ITE(mkexpr(zmaskL_zero),
+                                   binop(Iop_Shl32, mkU32(1), ctzL),
+                                   mkU32(0)),
+                        mkU32(1)));
+
+   /* And similarly for validR. */
+   IRExpr *ctzR = unop(Iop_32to8, math_CTZ32(mkexpr(zmaskR)));
+   IRTemp zmaskR_zero = newTemp(Ity_I1);
+   assign(zmaskR_zero, binop(Iop_ExpCmpNE32, mkexpr(zmaskR), mkU32(0)));
+   IRTemp validR = newTemp(Ity_I32);
+   assign(validR, binop(Iop_Sub32,
+                        IRExpr_ITE(mkexpr(zmaskR_zero),
+                                   binop(Iop_Shl32, mkU32(1), ctzR),
+                                   mkU32(0)),
+                        mkU32(1)));
+
+   /* Do the actual comparison. */
+   IRExpr *boolResII = unop(Iop_16Uto32,
+                            unop(Iop_GetMSBs8x16,
+                                 binop(Iop_CmpEQ8x16, mkexpr(argL),
+                                                      mkexpr(argR))));
+
+   /* Compute boolresII & validL & validR (i.e., if both valid, use
+      comparison result) */
+   IRExpr *intRes1_a = binop(Iop_And32, boolResII,
+                             binop(Iop_And32,
+                                   mkexpr(validL), mkexpr(validR)));
+
+   /* Compute ~(validL | validR); i.e., if both invalid, force 1. */
+   IRExpr *intRes1_b = unop(Iop_Not32, binop(Iop_Or32,
+                                             mkexpr(validL), mkexpr(validR)));
+   /* Otherwise, zero. */
+   IRExpr *intRes1 = binop(Iop_And32, mkU32(0xFFFF),
+                           binop(Iop_Or32, intRes1_a, intRes1_b));
+
+   /* The "0x30" in imm=0x3A means "polarity=3" means XOR validL with
+      result. */
+   IRTemp intRes2 = newTemp(Ity_I32);
+   assign(intRes2, binop(Iop_And32, mkU32(0xFFFF),
+                         binop(Iop_Xor32, intRes1, mkexpr(validL))));
+
+   /* If the 0x40 bit were set in imm=0x3A, we would return the index
+      of the msb.  Since it is clear, we return the index of the
+      lsb. */
+   IRExpr *newECX = math_CTZ32(binop(Iop_Or32,
+                                     mkexpr(intRes2), mkU32(0x10000)));
+
+   /* And thats our rcx. */
+   putIReg32(R_RCX, newECX);
+
+   /* Now for the condition codes... */
+
+   /* C == 0 iff intRes2 == 0 */
+   IRExpr *c_bit = IRExpr_ITE( binop(Iop_ExpCmpNE32, mkexpr(intRes2),
+                                     mkU32(0)),
+                               mkU32(1 << AMD64G_CC_SHIFT_C),
+                               mkU32(0));
+   /* Z == 1 iff any in argL is 0 */
+   IRExpr *z_bit = IRExpr_ITE( mkexpr(zmaskL_zero),
+                               mkU32(1 << AMD64G_CC_SHIFT_Z),
+                               mkU32(0));
+   /* S == 1 iff any in argR is 0 */
+   IRExpr *s_bit = IRExpr_ITE( mkexpr(zmaskR_zero),
+                               mkU32(1 << AMD64G_CC_SHIFT_S),
+                               mkU32(0));
+   /* O == IntRes2[0] */
+   IRExpr *o_bit = binop(Iop_Shl32, binop(Iop_And32, mkexpr(intRes2),
+                                          mkU32(0x01)),
+                         mkU8(AMD64G_CC_SHIFT_O));
+
+   /* Put them all together */
+   IRTemp cc = newTemp(Ity_I64);
+   assign(cc, widenUto64(binop(Iop_Or32,
+                               binop(Iop_Or32, c_bit, z_bit),
+                               binop(Iop_Or32, s_bit, o_bit))));
+   stmt(IRStmt_Put(OFFB_CC_OP, mkU64(AMD64G_CC_OP_COPY)));
+   stmt(IRStmt_Put(OFFB_CC_DEP1, mkexpr(cc)));
+   stmt(IRStmt_Put(OFFB_CC_DEP2, mkU64(0)));
+   stmt(IRStmt_Put(OFFB_CC_NDEP, mkU64(0)));
+
+   return delta;
+}
+
+/* This can fail, in which case it returns the original (unchanged)
+   delta. */
+static Long dis_PCMPxSTRx ( const VexAbiInfo* vbi, Prefix pfx,
+                            Long delta, Bool isAvx, UChar opc )
+{
+   Long   delta0  = delta;
+   UInt   isISTRx = opc & 2;
+   UInt   isxSTRM = (opc & 1) ^ 1;
+   UInt   regNoL  = 0;
+   UInt   regNoR  = 0;
+   UChar  imm     = 0;
+   IRTemp addr    = IRTemp_INVALID;
+   Int    alen    = 0;
+   HChar  dis_buf[50];
+
+   /* This is a nasty kludge.  We need to pass 2 x V128 to the helper
+      (which is clean).  Since we can't do that, use a dirty helper to
+      compute the results directly from the XMM regs in the guest
+      state.  That means for the memory case, we need to move the left
+      operand into a pseudo-register (XMM16, let's call it). */
+   UChar modrm = getUChar(delta);
+   if (epartIsReg(modrm)) {
+      regNoL = eregOfRexRM(pfx, modrm);
+      regNoR = gregOfRexRM(pfx, modrm);
+      imm = getUChar(delta+1);
+      delta += 1+1;
+   } else {
+      regNoL = 16; /* use XMM16 as an intermediary */
+      regNoR = gregOfRexRM(pfx, modrm);
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      /* No alignment check; I guess that makes sense, given that
+         these insns are for dealing with C style strings. */
+      stmt( IRStmt_Put( OFFB_YMM16, loadLE(Ity_V128, mkexpr(addr)) ));
+      imm = getUChar(delta+alen);
+      delta += alen+1;
+   }
+
+   /* Print the insn here, since dis_PCMPISTRI_3A doesn't do so
+      itself. */
+   if (regNoL == 16) {
+      DIP("%spcmp%cstr%c $%x,%s,%s\n",
+          isAvx ? "v" : "", isISTRx ? 'i' : 'e', isxSTRM ? 'm' : 'i',
+          (UInt)imm, dis_buf, nameXMMReg(regNoR));
+   } else {
+      DIP("%spcmp%cstr%c $%x,%s,%s\n",
+          isAvx ? "v" : "", isISTRx ? 'i' : 'e', isxSTRM ? 'm' : 'i',
+          (UInt)imm, nameXMMReg(regNoL), nameXMMReg(regNoR));
+   }
+
+   /* Handle special case(s). */
+   if (imm == 0x3A && isISTRx && !isxSTRM) {
+      return dis_PCMPISTRI_3A ( modrm, regNoL, regNoR, delta,
+                                opc, imm, dis_buf);
+   }
+
+   /* Now we know the XMM reg numbers for the operands, and the
+      immediate byte.  Is it one we can actually handle? Throw out any
+      cases for which the helper function has not been verified. */
+   switch (imm) {
+      case 0x00: case 0x02: case 0x08: case 0x0A: case 0x0C: case 0x0E:
+      case 0x12: case 0x14: case 0x1A:
+      case 0x30: case 0x34: case 0x38: case 0x3A:
+      case 0x40: case 0x44: case 0x46: case 0x4A:
+         break;
+      // the 16-bit character versions of the above
+      case 0x01: case 0x03: case 0x09: case 0x0B: case 0x0D:
+      case 0x13:            case 0x1B:
+                            case 0x39: case 0x3B:
+                 case 0x45:            case 0x4B:
+         break;
+      default:
+         return delta0; /*FAIL*/
+   }
+
+   /* Who ya gonna call?  Presumably not Ghostbusters. */
+   void*  fn = &amd64g_dirtyhelper_PCMPxSTRx;
+   const HChar* nm = "amd64g_dirtyhelper_PCMPxSTRx";
+
+   /* Round up the arguments.  Note that this is a kludge -- the use
+      of mkU64 rather than mkIRExpr_HWord implies the assumption that
+      the host's word size is 64-bit. */
+   UInt gstOffL = regNoL == 16 ? OFFB_YMM16 : ymmGuestRegOffset(regNoL);
+   UInt gstOffR = ymmGuestRegOffset(regNoR);
+
+   IRExpr*  opc4_and_imm = mkU64((opc << 8) | (imm & 0xFF));
+   IRExpr*  gstOffLe     = mkU64(gstOffL);
+   IRExpr*  gstOffRe     = mkU64(gstOffR);
+   IRExpr*  edxIN        = isISTRx ? mkU64(0) : getIRegRDX(8);
+   IRExpr*  eaxIN        = isISTRx ? mkU64(0) : getIRegRAX(8);
+   IRExpr** args
+      = mkIRExprVec_6( IRExpr_BBPTR(),
+                       opc4_and_imm, gstOffLe, gstOffRe, edxIN, eaxIN );
+
+   IRTemp   resT = newTemp(Ity_I64);
+   IRDirty* d    = unsafeIRDirty_1_N( resT, 0/*regparms*/, nm, fn, args );
+   /* It's not really a dirty call, but we can't use the clean helper
+      mechanism here for the very lame reason that we can't pass 2 x
+      V128s by value to a helper.  Hence this roundabout scheme. */
+   d->nFxState = 2;
+   vex_bzero(&d->fxState, sizeof(d->fxState));
+   d->fxState[0].fx     = Ifx_Read;
+   d->fxState[0].offset = gstOffL;
+   d->fxState[0].size   = sizeof(U128);
+   d->fxState[1].fx     = Ifx_Read;
+   d->fxState[1].offset = gstOffR;
+   d->fxState[1].size   = sizeof(U128);
+   if (isxSTRM) {
+      /* Declare that the helper writes XMM0. */
+      d->nFxState = 3;
+      d->fxState[2].fx     = Ifx_Write;
+      d->fxState[2].offset = ymmGuestRegOffset(0);
+      d->fxState[2].size   = sizeof(U128);
+   }
+
+   stmt( IRStmt_Dirty(d) );
+
+   /* Now resT[15:0] holds the new OSZACP values, so the condition
+      codes must be updated. And for a xSTRI case, resT[31:16] holds
+      the new ECX value, so stash that too. */
+   if (!isxSTRM) {
+      putIReg64(R_RCX, binop(Iop_And64,
+                             binop(Iop_Shr64, mkexpr(resT), mkU8(16)),
+                             mkU64(0xFFFF)));
+   }
+
+   /* Zap the upper half of the dest reg as per AVX conventions. */
+   if (isxSTRM && isAvx)
+      putYMMRegLane128(/*YMM*/0, 1, mkV128(0));
+
+   stmt( IRStmt_Put(
+            OFFB_CC_DEP1,
+            binop(Iop_And64, mkexpr(resT), mkU64(0xFFFF))
+   ));
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+
+   return delta;
+}
+
+
+static IRTemp math_PINSRB_128 ( IRTemp v128, IRTemp u8, UInt imm8 )
+{
+   vassert(imm8 >= 0 && imm8 <= 15);
+
+   // Create a V128 value which has the selected byte in the
+   // specified lane, and zeroes everywhere else.
+   IRTemp tmp128    = newTemp(Ity_V128);
+   IRTemp halfshift = newTemp(Ity_I64);
+   assign(halfshift, binop(Iop_Shl64,
+                           unop(Iop_8Uto64, mkexpr(u8)),
+                           mkU8(8 * (imm8 & 7))));
+   if (imm8 < 8) {
+      assign(tmp128, binop(Iop_64HLtoV128, mkU64(0), mkexpr(halfshift)));
+   } else {
+      assign(tmp128, binop(Iop_64HLtoV128, mkexpr(halfshift), mkU64(0)));
+   }
+
+   UShort mask = ~(1 << imm8);
+   IRTemp res  = newTemp(Ity_V128);
+   assign( res, binop(Iop_OrV128,
+                      mkexpr(tmp128),
+                      binop(Iop_AndV128, mkexpr(v128), mkV128(mask))) );
+   return res;
+}
+
+
+static IRTemp math_PINSRD_128 ( IRTemp v128, IRTemp u32, UInt imm8 )
+{
+   IRTemp z32 = newTemp(Ity_I32);
+   assign(z32, mkU32(0));
+
+   /* Surround u32 with zeroes as per imm, giving us something we can
+      OR into a suitably masked-out v128.*/
+   IRTemp withZs = newTemp(Ity_V128);
+   UShort mask = 0;
+   switch (imm8) {
+      case 3:  mask = 0x0FFF;
+               assign(withZs, mkV128from32s(u32, z32, z32, z32));
+               break;
+      case 2:  mask = 0xF0FF;
+               assign(withZs, mkV128from32s(z32, u32, z32, z32));
+               break;
+      case 1:  mask = 0xFF0F;
+               assign(withZs, mkV128from32s(z32, z32, u32, z32));
+               break;
+      case 0:  mask = 0xFFF0;
+               assign(withZs, mkV128from32s(z32, z32, z32, u32));
+               break;
+      default: vassert(0);
+   }
+
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop( Iop_OrV128,
+                      mkexpr(withZs),
+                      binop( Iop_AndV128, mkexpr(v128), mkV128(mask) ) ) );
+   return res;
+}
+
+
+static IRTemp math_PINSRQ_128 ( IRTemp v128, IRTemp u64, UInt imm8 )
+{
+   /* Surround u64 with zeroes as per imm, giving us something we can
+      OR into a suitably masked-out v128.*/
+   IRTemp withZs = newTemp(Ity_V128);
+   UShort mask = 0;
+   if (imm8 == 0) { 
+      mask = 0xFF00; 
+      assign(withZs, binop(Iop_64HLtoV128, mkU64(0), mkexpr(u64)));
+   } else {
+      vassert(imm8 == 1);
+      mask = 0x00FF;
+      assign( withZs, binop(Iop_64HLtoV128, mkexpr(u64), mkU64(0)));
+   }
+
+   IRTemp res = newTemp(Ity_V128);
+   assign( res, binop( Iop_OrV128,
+                       mkexpr(withZs),
+                       binop( Iop_AndV128, mkexpr(v128), mkV128(mask) ) ) );
+   return res;
+}
+
+
+static IRTemp math_INSERTPS ( IRTemp dstV, IRTemp toInsertD, UInt imm8 )
+{
+   const IRTemp inval = IRTemp_INVALID;
+   IRTemp dstDs[4] = { inval, inval, inval, inval };
+   breakupV128to32s( dstV, &dstDs[3], &dstDs[2], &dstDs[1], &dstDs[0] );
+
+   vassert(imm8 <= 255);
+   dstDs[(imm8 >> 4) & 3] = toInsertD; /* "imm8_count_d" */
+
+   UInt imm8_zmask = (imm8 & 15);
+   IRTemp zero_32 = newTemp(Ity_I32);
+   assign( zero_32, mkU32(0) );
+   IRTemp resV = newTemp(Ity_V128);
+   assign( resV, mkV128from32s( 
+                    ((imm8_zmask & 8) == 8) ? zero_32 : dstDs[3], 
+                    ((imm8_zmask & 4) == 4) ? zero_32 : dstDs[2], 
+                    ((imm8_zmask & 2) == 2) ? zero_32 : dstDs[1], 
+                    ((imm8_zmask & 1) == 1) ? zero_32 : dstDs[0]) );
+   return resV;
+}
+
+
+static Long dis_PEXTRB_128_GtoE ( const VexAbiInfo* vbi, Prefix pfx,
+                                  Long delta, Bool isAvx )
+{
+   IRTemp addr     = IRTemp_INVALID;
+   Int    alen     = 0;
+   HChar  dis_buf[50];
+   IRTemp xmm_vec  = newTemp(Ity_V128);
+   IRTemp sel_lane = newTemp(Ity_I32);
+   IRTemp shr_lane = newTemp(Ity_I32);
+   const HChar* mbV = isAvx ? "v" : "";
+   UChar  modrm    = getUChar(delta);
+   IRTemp t3, t2, t1, t0;
+   Int    imm8;
+   assign( xmm_vec, getXMMReg( gregOfRexRM(pfx,modrm) ) );
+   t3 = t2 = t1 = t0 = IRTemp_INVALID;
+   breakupV128to32s( xmm_vec, &t3, &t2, &t1, &t0 );
+
+   if ( epartIsReg( modrm ) ) {
+      imm8 = (Int)getUChar(delta+1);
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8 = (Int)getUChar(delta+alen);
+   }
+   switch ( (imm8 >> 2) & 3 ) {
+      case 0:  assign( sel_lane, mkexpr(t0) ); break;
+      case 1:  assign( sel_lane, mkexpr(t1) ); break;
+      case 2:  assign( sel_lane, mkexpr(t2) ); break;
+      case 3:  assign( sel_lane, mkexpr(t3) ); break;
+      default: vassert(0);
+   }
+   assign( shr_lane, 
+           binop( Iop_Shr32, mkexpr(sel_lane), mkU8(((imm8 & 3)*8)) ) );
+
+   if ( epartIsReg( modrm ) ) {
+      putIReg64( eregOfRexRM(pfx,modrm), 
+                 unop( Iop_32Uto64, 
+                       binop(Iop_And32, mkexpr(shr_lane), mkU32(255)) ) );
+      delta += 1+1;
+      DIP( "%spextrb $%d, %s,%s\n", mbV, imm8, 
+           nameXMMReg( gregOfRexRM(pfx, modrm) ), 
+           nameIReg64( eregOfRexRM(pfx, modrm) ) );
+   } else {
+      storeLE( mkexpr(addr), unop(Iop_32to8, mkexpr(shr_lane) ) );
+      delta += alen+1;
+      DIP( "%spextrb $%d,%s,%s\n", mbV,
+           imm8, nameXMMReg( gregOfRexRM(pfx, modrm) ), dis_buf );
+   }
+   
+   return delta;
+}
+
+
+static IRTemp math_DPPD_128 ( IRTemp src_vec, IRTemp dst_vec, UInt imm8 )
+{
+   vassert(imm8 < 256);
+   UShort imm8_perms[4] = { 0x0000, 0x00FF, 0xFF00, 0xFFFF };
+   IRTemp and_vec = newTemp(Ity_V128);
+   IRTemp sum_vec = newTemp(Ity_V128);
+   IRTemp rm      = newTemp(Ity_I32);
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( and_vec, binop( Iop_AndV128,
+                           triop( Iop_Mul64Fx2,
+                                  mkexpr(rm),
+                                  mkexpr(dst_vec), mkexpr(src_vec) ),
+                           mkV128( imm8_perms[ ((imm8 >> 4) & 3) ] ) ) );
+
+   assign( sum_vec, binop( Iop_Add64F0x2,
+                           binop( Iop_InterleaveHI64x2,
+                                  mkexpr(and_vec), mkexpr(and_vec) ),
+                           binop( Iop_InterleaveLO64x2,
+                                  mkexpr(and_vec), mkexpr(and_vec) ) ) );
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop( Iop_AndV128,
+                      binop( Iop_InterleaveLO64x2,
+                             mkexpr(sum_vec), mkexpr(sum_vec) ),
+                      mkV128( imm8_perms[ (imm8 & 3) ] ) ) );
+   return res;
+}
+
+
+static IRTemp math_DPPS_128 ( IRTemp src_vec, IRTemp dst_vec, UInt imm8 )
+{
+   vassert(imm8 < 256);
+   IRTemp tmp_prod_vec = newTemp(Ity_V128);
+   IRTemp prod_vec     = newTemp(Ity_V128);
+   IRTemp sum_vec      = newTemp(Ity_V128);
+   IRTemp rm           = newTemp(Ity_I32);
+   IRTemp v3, v2, v1, v0;
+   v3 = v2 = v1 = v0   = IRTemp_INVALID;
+   UShort imm8_perms[16] = { 0x0000, 0x000F, 0x00F0, 0x00FF, 0x0F00, 
+                             0x0F0F, 0x0FF0, 0x0FFF, 0xF000, 0xF00F,
+                             0xF0F0, 0xF0FF, 0xFF00, 0xFF0F, 0xFFF0,
+                             0xFFFF };
+
+   assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+   assign( tmp_prod_vec, 
+           binop( Iop_AndV128, 
+                  triop( Iop_Mul32Fx4,
+                         mkexpr(rm), mkexpr(dst_vec), mkexpr(src_vec) ), 
+                  mkV128( imm8_perms[((imm8 >> 4)& 15)] ) ) );
+   breakupV128to32s( tmp_prod_vec, &v3, &v2, &v1, &v0 );
+   assign( prod_vec, mkV128from32s( v3, v1, v2, v0 ) );
+
+   assign( sum_vec, triop( Iop_Add32Fx4,
+                           mkexpr(rm),
+                           binop( Iop_InterleaveHI32x4, 
+                                  mkexpr(prod_vec), mkexpr(prod_vec) ), 
+                           binop( Iop_InterleaveLO32x4, 
+                                  mkexpr(prod_vec), mkexpr(prod_vec) ) ) );
+
+   IRTemp res = newTemp(Ity_V128);
+   assign( res, binop( Iop_AndV128, 
+                       triop( Iop_Add32Fx4,
+                              mkexpr(rm),
+                              binop( Iop_InterleaveHI32x4,
+                                     mkexpr(sum_vec), mkexpr(sum_vec) ), 
+                              binop( Iop_InterleaveLO32x4,
+                                     mkexpr(sum_vec), mkexpr(sum_vec) ) ), 
+                       mkV128( imm8_perms[ (imm8 & 15) ] ) ) );
+   return res;
+}
+
+
+static IRTemp math_MPSADBW_128 ( IRTemp dst_vec, IRTemp src_vec, UInt imm8 )
+{
+   /* Mask out bits of the operands we don't need.  This isn't
+      strictly necessary, but it does ensure Memcheck doesn't
+      give us any false uninitialised value errors as a
+      result. */
+   UShort src_mask[4] = { 0x000F, 0x00F0, 0x0F00, 0xF000 };
+   UShort dst_mask[2] = { 0x07FF, 0x7FF0 };
+
+   IRTemp src_maskV = newTemp(Ity_V128);
+   IRTemp dst_maskV = newTemp(Ity_V128);
+   assign(src_maskV, mkV128( src_mask[ imm8 & 3 ] ));
+   assign(dst_maskV, mkV128( dst_mask[ (imm8 >> 2) & 1 ] ));
+
+   IRTemp src_masked = newTemp(Ity_V128);
+   IRTemp dst_masked = newTemp(Ity_V128);
+   assign(src_masked, binop(Iop_AndV128, mkexpr(src_vec), mkexpr(src_maskV)));
+   assign(dst_masked, binop(Iop_AndV128, mkexpr(dst_vec), mkexpr(dst_maskV)));
+
+   /* Generate 4 64 bit values that we can hand to a clean helper */
+   IRTemp sHi = newTemp(Ity_I64);
+   IRTemp sLo = newTemp(Ity_I64);
+   assign( sHi, unop(Iop_V128HIto64, mkexpr(src_masked)) );
+   assign( sLo, unop(Iop_V128to64,   mkexpr(src_masked)) );
+
+   IRTemp dHi = newTemp(Ity_I64);
+   IRTemp dLo = newTemp(Ity_I64);
+   assign( dHi, unop(Iop_V128HIto64, mkexpr(dst_masked)) );
+   assign( dLo, unop(Iop_V128to64,   mkexpr(dst_masked)) );
+
+   /* Compute halves of the result separately */
+   IRTemp resHi = newTemp(Ity_I64);
+   IRTemp resLo = newTemp(Ity_I64);
+
+   IRExpr** argsHi
+      = mkIRExprVec_5( mkexpr(sHi), mkexpr(sLo), mkexpr(dHi), mkexpr(dLo),
+                       mkU64( 0x80 | (imm8 & 7) ));
+   IRExpr** argsLo
+      = mkIRExprVec_5( mkexpr(sHi), mkexpr(sLo), mkexpr(dHi), mkexpr(dLo),
+                       mkU64( 0x00 | (imm8 & 7) ));
+
+   assign(resHi, mkIRExprCCall( Ity_I64, 0/*regparm*/,
+                                "amd64g_calc_mpsadbw",
+                                &amd64g_calc_mpsadbw, argsHi ));
+   assign(resLo, mkIRExprCCall( Ity_I64, 0/*regparm*/,
+                                "amd64g_calc_mpsadbw",
+                                &amd64g_calc_mpsadbw, argsLo ));
+
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop(Iop_64HLtoV128, mkexpr(resHi), mkexpr(resLo)));
+   return res;
+}
+
+static Long dis_EXTRACTPS ( const VexAbiInfo* vbi, Prefix pfx,
+                            Long delta, Bool isAvx )
+{
+   IRTemp addr       = IRTemp_INVALID;
+   Int    alen       = 0;
+   HChar  dis_buf[50];
+   UChar  modrm      = getUChar(delta);
+   Int imm8_10;
+   IRTemp xmm_vec    = newTemp(Ity_V128);
+   IRTemp src_dword  = newTemp(Ity_I32);
+   UInt   rG         = gregOfRexRM(pfx,modrm);
+   IRTemp t3, t2, t1, t0;
+   t3 = t2 = t1 = t0 = IRTemp_INVALID;
+
+   assign( xmm_vec, getXMMReg( rG ) );
+   breakupV128to32s( xmm_vec, &t3, &t2, &t1, &t0 );
+
+   if ( epartIsReg( modrm ) ) {
+      imm8_10 = (Int)(getUChar(delta+1) & 3);
+   } else { 
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8_10 = (Int)(getUChar(delta+alen) & 3);
+   }
+
+   switch ( imm8_10 ) {
+      case 0:  assign( src_dword, mkexpr(t0) ); break;
+      case 1:  assign( src_dword, mkexpr(t1) ); break;
+      case 2:  assign( src_dword, mkexpr(t2) ); break;
+      case 3:  assign( src_dword, mkexpr(t3) ); break;
+      default: vassert(0);
+   }
+
+   if ( epartIsReg( modrm ) ) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      putIReg32( rE, mkexpr(src_dword) );
+      delta += 1+1;
+      DIP( "%sextractps $%d, %s,%s\n", isAvx ? "v" : "", imm8_10,
+           nameXMMReg( rG ), nameIReg32( rE ) );
+   } else {
+      storeLE( mkexpr(addr), mkexpr(src_dword) );
+      delta += alen+1;
+      DIP( "%sextractps $%d, %s,%s\n", isAvx ? "v" : "", imm8_10,
+           nameXMMReg( rG ), dis_buf );
+   }
+
+   return delta;
+}
+
+
+static IRTemp math_PCLMULQDQ( IRTemp dV, IRTemp sV, UInt imm8 )
+{
+   IRTemp t0 = newTemp(Ity_I64);
+   IRTemp t1 = newTemp(Ity_I64);
+   assign(t0, unop((imm8&1)? Iop_V128HIto64 : Iop_V128to64, 
+              mkexpr(dV)));
+   assign(t1, unop((imm8&16) ? Iop_V128HIto64 : Iop_V128to64,
+              mkexpr(sV)));
+
+   IRTemp t2 = newTemp(Ity_I64);
+   IRTemp t3 = newTemp(Ity_I64);
+
+   IRExpr** args;
+
+   args = mkIRExprVec_3(mkexpr(t0), mkexpr(t1), mkU64(0));
+   assign(t2, mkIRExprCCall(Ity_I64,0, "amd64g_calculate_pclmul",
+                            &amd64g_calculate_pclmul, args));
+   args = mkIRExprVec_3(mkexpr(t0), mkexpr(t1), mkU64(1));
+   assign(t3, mkIRExprCCall(Ity_I64,0, "amd64g_calculate_pclmul",
+                            &amd64g_calculate_pclmul, args));
+
+   IRTemp res     = newTemp(Ity_V128);
+   assign(res, binop(Iop_64HLtoV128, mkexpr(t3), mkexpr(t2)));
+   return res;
+}
+
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F3A__SSE4 ( Bool* decode_OK,
+                          const VexAbiInfo* vbi,
+                          Prefix pfx, Int sz, Long deltaIN )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   *decode_OK = False;
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0x08:
+      /* 66 0F 3A 08 /r ib = ROUNDPS imm8, xmm2/m128, xmm1 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+
+         IRTemp src0 = newTemp(Ity_F32);
+         IRTemp src1 = newTemp(Ity_F32);
+         IRTemp src2 = newTemp(Ity_F32);
+         IRTemp src3 = newTemp(Ity_F32);
+         IRTemp res0 = newTemp(Ity_F32);
+         IRTemp res1 = newTemp(Ity_F32);
+         IRTemp res2 = newTemp(Ity_F32);
+         IRTemp res3 = newTemp(Ity_F32);
+         IRTemp rm   = newTemp(Ity_I32);
+         Int    imm  = 0;
+
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            assign( src0, 
+                    getXMMRegLane32F( eregOfRexRM(pfx, modrm), 0 ) );
+            assign( src1, 
+                    getXMMRegLane32F( eregOfRexRM(pfx, modrm), 1 ) );
+            assign( src2, 
+                    getXMMRegLane32F( eregOfRexRM(pfx, modrm), 2 ) );
+            assign( src3, 
+                    getXMMRegLane32F( eregOfRexRM(pfx, modrm), 3 ) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) goto decode_failure;
+            delta += 1+1;
+            DIP( "roundps $%d,%s,%s\n",
+                 imm, nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                      nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            gen_SEGV_if_not_16_aligned(addr);
+            assign( src0, loadLE(Ity_F32,
+                                 binop(Iop_Add64, mkexpr(addr), mkU64(0) )));
+            assign( src1, loadLE(Ity_F32,
+                                 binop(Iop_Add64, mkexpr(addr), mkU64(4) )));
+            assign( src2, loadLE(Ity_F32,
+                                 binop(Iop_Add64, mkexpr(addr), mkU64(8) )));
+            assign( src3, loadLE(Ity_F32,
+                                 binop(Iop_Add64, mkexpr(addr), mkU64(12) )));
+            imm = getUChar(delta+alen);
+            if (imm & ~15) goto decode_failure;
+            delta += alen+1;
+            DIP( "roundps $%d,%s,%s\n",
+                 imm, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(rm, (imm & 4) ? get_sse_roundingmode() : mkU32(imm & 3));
+
+         assign(res0, binop(Iop_RoundF32toInt, mkexpr(rm), mkexpr(src0)) );
+         assign(res1, binop(Iop_RoundF32toInt, mkexpr(rm), mkexpr(src1)) );
+         assign(res2, binop(Iop_RoundF32toInt, mkexpr(rm), mkexpr(src2)) );
+         assign(res3, binop(Iop_RoundF32toInt, mkexpr(rm), mkexpr(src3)) );
+
+         putXMMRegLane32F( gregOfRexRM(pfx, modrm), 0, mkexpr(res0) );
+         putXMMRegLane32F( gregOfRexRM(pfx, modrm), 1, mkexpr(res1) );
+         putXMMRegLane32F( gregOfRexRM(pfx, modrm), 2, mkexpr(res2) );
+         putXMMRegLane32F( gregOfRexRM(pfx, modrm), 3, mkexpr(res3) );
+
+         goto decode_success;
+      }
+      break;
+
+   case 0x09:
+      /* 66 0F 3A 09 /r ib = ROUNDPD imm8, xmm2/m128, xmm1 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+
+         IRTemp src0 = newTemp(Ity_F64);
+         IRTemp src1 = newTemp(Ity_F64);
+         IRTemp res0 = newTemp(Ity_F64);
+         IRTemp res1 = newTemp(Ity_F64);
+         IRTemp rm   = newTemp(Ity_I32);
+         Int    imm  = 0;
+
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            assign( src0, 
+                    getXMMRegLane64F( eregOfRexRM(pfx, modrm), 0 ) );
+            assign( src1, 
+                    getXMMRegLane64F( eregOfRexRM(pfx, modrm), 1 ) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) goto decode_failure;
+            delta += 1+1;
+            DIP( "roundpd $%d,%s,%s\n",
+                 imm, nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                      nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            gen_SEGV_if_not_16_aligned(addr);
+            assign( src0, loadLE(Ity_F64,
+                                 binop(Iop_Add64, mkexpr(addr), mkU64(0) )));
+            assign( src1, loadLE(Ity_F64,
+                                 binop(Iop_Add64, mkexpr(addr), mkU64(8) )));
+            imm = getUChar(delta+alen);
+            if (imm & ~15) goto decode_failure;
+            delta += alen+1;
+            DIP( "roundpd $%d,%s,%s\n",
+                 imm, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(rm, (imm & 4) ? get_sse_roundingmode() : mkU32(imm & 3));
+
+         assign(res0, binop(Iop_RoundF64toInt, mkexpr(rm), mkexpr(src0)) );
+         assign(res1, binop(Iop_RoundF64toInt, mkexpr(rm), mkexpr(src1)) );
+
+         putXMMRegLane64F( gregOfRexRM(pfx, modrm), 0, mkexpr(res0) );
+         putXMMRegLane64F( gregOfRexRM(pfx, modrm), 1, mkexpr(res1) );
+
+         goto decode_success;
+      }
+      break;
+
+   case 0x0A:
+   case 0x0B:
+      /* 66 0F 3A 0A /r ib = ROUNDSS imm8, xmm2/m32, xmm1
+         66 0F 3A 0B /r ib = ROUNDSD imm8, xmm2/m64, xmm1
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+
+         Bool   isD = opc == 0x0B;
+         IRTemp src = newTemp(isD ? Ity_F64 : Ity_F32);
+         IRTemp res = newTemp(isD ? Ity_F64 : Ity_F32);
+         Int    imm = 0;
+
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            assign( src, 
+                    isD ? getXMMRegLane64F( eregOfRexRM(pfx, modrm), 0 )
+                        : getXMMRegLane32F( eregOfRexRM(pfx, modrm), 0 ) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) goto decode_failure;
+            delta += 1+1;
+            DIP( "rounds%c $%d,%s,%s\n",
+                 isD ? 'd' : 's',
+                 imm, nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                      nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( src, loadLE( isD ? Ity_F64 : Ity_F32, mkexpr(addr) ));
+            imm = getUChar(delta+alen);
+            if (imm & ~15) goto decode_failure;
+            delta += alen+1;
+            DIP( "rounds%c $%d,%s,%s\n",
+                 isD ? 'd' : 's',
+                 imm, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(res, binop(isD ? Iop_RoundF64toInt : Iop_RoundF32toInt,
+                           (imm & 4) ? get_sse_roundingmode() 
+                                     : mkU32(imm & 3),
+                           mkexpr(src)) );
+
+         if (isD)
+            putXMMRegLane64F( gregOfRexRM(pfx, modrm), 0, mkexpr(res) );
+         else
+            putXMMRegLane32F( gregOfRexRM(pfx, modrm), 0, mkexpr(res) );
+
+         goto decode_success;
+      }
+      break;
+
+   case 0x0C:
+      /* 66 0F 3A 0C /r ib = BLENDPS xmm1, xmm2/m128, imm8
+         Blend Packed Single Precision Floating-Point Values (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+
+         Int imm8;
+         IRTemp dst_vec = newTemp(Ity_V128);
+         IRTemp src_vec = newTemp(Ity_V128);
+
+         modrm = getUChar(delta);
+
+         assign( dst_vec, getXMMReg( gregOfRexRM(pfx, modrm) ) );
+
+         if ( epartIsReg( modrm ) ) {
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getXMMReg( eregOfRexRM(pfx, modrm) ) );
+            delta += 1+1;
+            DIP( "blendps $%d, %s,%s\n", imm8,
+                 nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                 nameXMMReg( gregOfRexRM(pfx, modrm) ) );    
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "blendpd $%d, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         putXMMReg( gregOfRexRM(pfx, modrm), 
+                    mkexpr( math_BLENDPS_128( src_vec, dst_vec, imm8) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x0D:
+      /* 66 0F 3A 0D /r ib = BLENDPD xmm1, xmm2/m128, imm8
+         Blend Packed Double Precision Floating-Point Values (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+
+         Int imm8;
+         IRTemp dst_vec = newTemp(Ity_V128);
+         IRTemp src_vec = newTemp(Ity_V128);
+
+         modrm = getUChar(delta);
+         assign( dst_vec, getXMMReg( gregOfRexRM(pfx, modrm) ) );
+
+         if ( epartIsReg( modrm ) ) {
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getXMMReg( eregOfRexRM(pfx, modrm) ) );
+            delta += 1+1;
+            DIP( "blendpd $%d, %s,%s\n", imm8,
+                 nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                 nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "blendpd $%d, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         putXMMReg( gregOfRexRM(pfx, modrm), 
+                    mkexpr( math_BLENDPD_128( src_vec, dst_vec, imm8) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x0E:
+      /* 66 0F 3A 0E /r ib = PBLENDW xmm1, xmm2/m128, imm8
+         Blend Packed Words (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+
+         Int imm8;
+         IRTemp dst_vec = newTemp(Ity_V128);
+         IRTemp src_vec = newTemp(Ity_V128);
+
+         modrm = getUChar(delta);
+
+         assign( dst_vec, getXMMReg( gregOfRexRM(pfx, modrm) ) );
+
+         if ( epartIsReg( modrm ) ) {
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getXMMReg( eregOfRexRM(pfx, modrm) ) );
+            delta += 1+1;
+            DIP( "pblendw $%d, %s,%s\n", imm8,
+                 nameXMMReg( eregOfRexRM(pfx, modrm) ),
+                 nameXMMReg( gregOfRexRM(pfx, modrm) ) );    
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "pblendw $%d, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg( gregOfRexRM(pfx, modrm) ) );
+         }
+
+         putXMMReg( gregOfRexRM(pfx, modrm), 
+                    mkexpr( math_PBLENDW_128( src_vec, dst_vec, imm8) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x14:
+      /* 66 0F 3A 14 /r ib = PEXTRB r/m16, xmm, imm8
+         Extract Byte from xmm, store in mem or zero-extend + store in gen.reg.
+         (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PEXTRB_128_GtoE( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x15:
+      /* 66 0F 3A 15 /r ib = PEXTRW r/m16, xmm, imm8
+         Extract Word from xmm, store in mem or zero-extend + store in gen.reg.
+         (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_PEXTRW( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x16:
+      /* 66 no-REX.W 0F 3A 16 /r ib = PEXTRD reg/mem32, xmm2, imm8
+         Extract Doubleword int from xmm reg and store in gen.reg or mem. (XMM) 
+         Note that this insn has the same opcodes as PEXTRQ, but 
+         here the REX.W bit is _not_ present */
+      if (have66noF2noF3(pfx) 
+          && sz == 2 /* REX.W is _not_ present */) {
+         delta = dis_PEXTRD( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      /* 66 REX.W 0F 3A 16 /r ib = PEXTRQ reg/mem64, xmm2, imm8
+         Extract Quadword int from xmm reg and store in gen.reg or mem. (XMM) 
+         Note that this insn has the same opcodes as PEXTRD, but 
+         here the REX.W bit is present */
+      if (have66noF2noF3(pfx) 
+          && sz == 8 /* REX.W is present */) {
+         delta = dis_PEXTRQ( vbi, pfx, delta, False/*!isAvx*/);
+         goto decode_success;
+      }
+      break;
+
+   case 0x17:
+      /* 66 0F 3A 17 /r ib = EXTRACTPS reg/mem32, xmm2, imm8 Extract
+         float from xmm reg and store in gen.reg or mem.  This is
+         identical to PEXTRD, except that REX.W appears to be ignored.
+      */
+      if (have66noF2noF3(pfx) 
+          && (sz == 2 || /* ignore redundant REX.W */ sz == 8)) {
+         delta = dis_EXTRACTPS( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x20:
+      /* 66 0F 3A 20 /r ib = PINSRB xmm1, r32/m8, imm8
+         Extract byte from r32/m8 and insert into xmm1 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         Int    imm8;
+         IRTemp new8 = newTemp(Ity_I8);
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx, modrm);
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8 = (Int)(getUChar(delta+1) & 0xF);
+            assign( new8, unop(Iop_32to8, getIReg32(rE)) );
+            delta += 1+1;
+            DIP( "pinsrb $%d,%s,%s\n", imm8,
+                 nameIReg32(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8 = (Int)(getUChar(delta+alen) & 0xF);
+            assign( new8, loadLE( Ity_I8, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "pinsrb $%d,%s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rG) );
+         }
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( gregOfRexRM(pfx, modrm) ));
+         IRTemp res = math_PINSRB_128( src_vec, new8, imm8 );
+         putXMMReg( rG, mkexpr(res) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x21:
+      /* 66 0F 3A 21 /r ib = INSERTPS imm8, xmm2/m32, xmm1
+         Insert Packed Single Precision Floating-Point Value (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         UInt   imm8;
+         IRTemp d2ins = newTemp(Ity_I32); /* comes from the E part */
+         const IRTemp inval = IRTemp_INVALID;
+
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx, modrm);
+
+         if ( epartIsReg( modrm ) ) {
+            UInt   rE = eregOfRexRM(pfx, modrm);
+            IRTemp vE = newTemp(Ity_V128);
+            assign( vE, getXMMReg(rE) );
+            IRTemp dsE[4] = { inval, inval, inval, inval };
+            breakupV128to32s( vE, &dsE[3], &dsE[2], &dsE[1], &dsE[0] );
+            imm8 = getUChar(delta+1);
+            d2ins = dsE[(imm8 >> 6) & 3]; /* "imm8_count_s" */
+            delta += 1+1;
+            DIP( "insertps $%u, %s,%s\n",
+                 imm8, nameXMMReg(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( d2ins, loadLE( Ity_I32, mkexpr(addr) ) );
+            imm8 = getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "insertps $%u, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rG) );
+         }
+
+         IRTemp vG = newTemp(Ity_V128);
+         assign( vG, getXMMReg(rG) );
+
+         putXMMReg( rG, mkexpr(math_INSERTPS( vG, d2ins, imm8 )) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x22:
+      /* 66 no-REX.W 0F 3A 22 /r ib = PINSRD xmm1, r/m32, imm8
+         Extract Doubleword int from gen.reg/mem32 and insert into xmm1 */
+      if (have66noF2noF3(pfx) 
+          && sz == 2 /* REX.W is NOT present */) {
+         Int    imm8_10;
+         IRTemp src_u32 = newTemp(Ity_I32);
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx, modrm);
+
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8_10 = (Int)(getUChar(delta+1) & 3);
+            assign( src_u32, getIReg32( rE ) );
+            delta += 1+1;
+            DIP( "pinsrd $%d, %s,%s\n",
+                 imm8_10, nameIReg32(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8_10 = (Int)(getUChar(delta+alen) & 3);
+            assign( src_u32, loadLE( Ity_I32, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "pinsrd $%d, %s,%s\n", 
+                 imm8_10, dis_buf, nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rG ));
+         IRTemp res_vec = math_PINSRD_128( src_vec, src_u32, imm8_10 );
+         putXMMReg( rG, mkexpr(res_vec) );
+         goto decode_success;
+      }
+      /* 66 REX.W 0F 3A 22 /r ib = PINSRQ xmm1, r/m64, imm8
+         Extract Quadword int from gen.reg/mem64 and insert into xmm1 */
+      if (have66noF2noF3(pfx) 
+          && sz == 8 /* REX.W is present */) {
+         Int imm8_0;
+         IRTemp src_u64 = newTemp(Ity_I64);
+         modrm = getUChar(delta);
+         UInt rG = gregOfRexRM(pfx, modrm);
+
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8_0 = (Int)(getUChar(delta+1) & 1);
+            assign( src_u64, getIReg64( rE ) );
+            delta += 1+1;
+            DIP( "pinsrq $%d, %s,%s\n",
+                 imm8_0, nameIReg64(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8_0 = (Int)(getUChar(delta+alen) & 1);
+            assign( src_u64, loadLE( Ity_I64, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "pinsrq $%d, %s,%s\n", 
+                 imm8_0, dis_buf, nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rG ));
+         IRTemp res_vec = math_PINSRQ_128( src_vec, src_u64, imm8_0 );
+         putXMMReg( rG, mkexpr(res_vec) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x40:
+      /* 66 0F 3A 40 /r ib = DPPS xmm1, xmm2/m128, imm8
+         Dot Product of Packed Single Precision Floating-Point Values (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         modrm = getUChar(delta);
+         Int    imm8;
+         IRTemp src_vec = newTemp(Ity_V128);
+         IRTemp dst_vec = newTemp(Ity_V128);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+         assign( dst_vec, getXMMReg( rG ) );
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getXMMReg(rE) );
+            delta += 1+1;
+            DIP( "dpps $%d, %s,%s\n",
+                 imm8, nameXMMReg(rE), nameXMMReg(rG) );    
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "dpps $%d, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rG) );
+         }
+         IRTemp res = math_DPPS_128( src_vec, dst_vec, imm8 );
+         putXMMReg( rG, mkexpr(res) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x41:
+      /* 66 0F 3A 41 /r ib = DPPD xmm1, xmm2/m128, imm8
+         Dot Product of Packed Double Precision Floating-Point Values (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         modrm = getUChar(delta);
+         Int    imm8;
+         IRTemp src_vec = newTemp(Ity_V128);
+         IRTemp dst_vec = newTemp(Ity_V128);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+         assign( dst_vec, getXMMReg( rG ) );
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getXMMReg(rE) );
+            delta += 1+1;
+            DIP( "dppd $%d, %s,%s\n",
+                 imm8, nameXMMReg(rE), nameXMMReg(rG) );    
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "dppd $%d, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rG) );
+         }
+         IRTemp res = math_DPPD_128( src_vec, dst_vec, imm8 );
+         putXMMReg( rG, mkexpr(res) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x42:
+      /* 66 0F 3A 42 /r ib = MPSADBW xmm1, xmm2/m128, imm8
+         Multiple Packed Sums of Absolule Difference (XMM) */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         Int    imm8;
+         IRTemp src_vec = newTemp(Ity_V128);
+         IRTemp dst_vec = newTemp(Ity_V128);
+         modrm          = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+
+         assign( dst_vec, getXMMReg(rG) );
+  
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getXMMReg(rE) );
+            delta += 1+1;
+            DIP( "mpsadbw $%d, %s,%s\n", imm8,
+                 nameXMMReg(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "mpsadbw $%d, %s,%s\n", imm8, dis_buf, nameXMMReg(rG) );
+         }
+
+         putXMMReg( rG, mkexpr( math_MPSADBW_128(dst_vec, src_vec, imm8) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x44:
+      /* 66 0F 3A 44 /r ib = PCLMULQDQ xmm1, xmm2/m128, imm8
+       * Carry-less multiplication of selected XMM quadwords into XMM
+       * registers (a.k.a multiplication of polynomials over GF(2))
+       */
+      if (have66noF2noF3(pfx) && sz == 2) {
+  
+         Int imm8;
+         IRTemp svec = newTemp(Ity_V128);
+         IRTemp dvec = newTemp(Ity_V128);
+         modrm       = getUChar(delta);
+         UInt   rG   = gregOfRexRM(pfx, modrm);
+
+         assign( dvec, getXMMReg(rG) );
+  
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            imm8 = (Int)getUChar(delta+1);
+            assign( svec, getXMMReg(rE) );
+            delta += 1+1;
+            DIP( "pclmulqdq $%d, %s,%s\n", imm8,
+                 nameXMMReg(rE), nameXMMReg(rG) );    
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            gen_SEGV_if_not_16_aligned( addr );
+            assign( svec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "pclmulqdq $%d, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rG) );
+         }
+
+         putXMMReg( rG, mkexpr( math_PCLMULQDQ(dvec, svec, imm8) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x60:
+   case 0x61:
+   case 0x62:
+   case 0x63:
+      /* 66 0F 3A 63 /r ib = PCMPISTRI imm8, xmm2/m128, xmm1
+         66 0F 3A 62 /r ib = PCMPISTRM imm8, xmm2/m128, xmm1
+         66 0F 3A 61 /r ib = PCMPESTRI imm8, xmm2/m128, xmm1
+         66 0F 3A 60 /r ib = PCMPESTRM imm8, xmm2/m128, xmm1
+         (selected special cases that actually occur in glibc,
+          not by any means a complete implementation.)
+      */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         Long delta0 = delta;
+         delta = dis_PCMPxSTRx( vbi, pfx, delta, False/*!isAvx*/, opc );
+         if (delta > delta0) goto decode_success;
+         /* else fall though; dis_PCMPxSTRx failed to decode it */
+      }
+      break;
+
+   case 0xDF:
+      /* 66 0F 3A DF /r ib = AESKEYGENASSIST imm8, xmm2/m128, xmm1 */
+      if (have66noF2noF3(pfx) && sz == 2) {
+         delta = dis_AESKEYGENASSIST( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  decode_failure:
+   *decode_OK = False;
+   return deltaIN;
+
+  decode_success:
+   *decode_OK = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level post-escape decoders: dis_ESC_NONE         ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+__attribute__((noinline))
+static
+Long dis_ESC_NONE (
+        /*MB_OUT*/DisResult* dres,
+        /*MB_OUT*/Bool*      expect_CAS,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  vbi,
+        Prefix pfx, Int sz, Long deltaIN 
+     )
+{
+   Long   d64   = 0;
+   UChar  abyte = 0;
+   IRTemp addr  = IRTemp_INVALID;
+   IRTemp t1    = IRTemp_INVALID;
+   IRTemp t2    = IRTemp_INVALID;
+   IRTemp t3    = IRTemp_INVALID;
+   IRTemp t4    = IRTemp_INVALID;
+   IRTemp t5    = IRTemp_INVALID;
+   IRType ty    = Ity_INVALID;
+   UChar  modrm = 0;
+   Int    am_sz = 0;
+   Int    d_sz  = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta); delta++;
+
+   /* delta now points at the modrm byte.  In most of the cases that
+      follow, neither the F2 nor F3 prefixes are allowed.  However,
+      for some basic arithmetic operations we have to allow F2/XACQ or
+      F3/XREL in the case where the destination is memory and the LOCK
+      prefix is also present.  Do this check by looking at the modrm
+      byte but not advancing delta over it. */
+   /* By default, F2 and F3 are not allowed, so let's start off with
+      that setting. */
+   Bool validF2orF3 = haveF2orF3(pfx) ? False : True;
+   { UChar tmp_modrm = getUChar(delta);
+     switch (opc) {
+        case 0x00: /* ADD Gb,Eb */  case 0x01: /* ADD Gv,Ev */
+        case 0x08: /* OR  Gb,Eb */  case 0x09: /* OR  Gv,Ev */
+        case 0x10: /* ADC Gb,Eb */  case 0x11: /* ADC Gv,Ev */
+        case 0x18: /* SBB Gb,Eb */  case 0x19: /* SBB Gv,Ev */
+        case 0x20: /* AND Gb,Eb */  case 0x21: /* AND Gv,Ev */
+        case 0x28: /* SUB Gb,Eb */  case 0x29: /* SUB Gv,Ev */
+        case 0x30: /* XOR Gb,Eb */  case 0x31: /* XOR Gv,Ev */
+           if (!epartIsReg(tmp_modrm)
+               && haveF2orF3(pfx) && !haveF2andF3(pfx) && haveLOCK(pfx)) {
+              /* dst is mem, and we have F2 or F3 but not both */
+              validF2orF3 = True;
+           }
+           break;
+        default:
+           break;
+     }
+   }
+
+   /* Now, in the switch below, for the opc values examined by the
+      switch above, use validF2orF3 rather than looking at pfx
+      directly. */
+   switch (opc) {
+
+   case 0x00: /* ADD Gb,Eb */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Add8, True, 1, delta, "add" );
+      return delta;
+   case 0x01: /* ADD Gv,Ev */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Add8, True, sz, delta, "add" );
+      return delta;
+
+   case 0x02: /* ADD Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Add8, True, 1, delta, "add" );
+      return delta;
+   case 0x03: /* ADD Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Add8, True, sz, delta, "add" );
+      return delta;
+
+   case 0x04: /* ADD Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, False, Iop_Add8, True, delta, "add" );
+      return delta;
+   case 0x05: /* ADD Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A(sz, False, Iop_Add8, True, delta, "add" );
+      return delta;
+
+   case 0x08: /* OR Gb,Eb */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Or8, True, 1, delta, "or" );
+      return delta;
+   case 0x09: /* OR Gv,Ev */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Or8, True, sz, delta, "or" );
+      return delta;
+
+   case 0x0A: /* OR Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Or8, True, 1, delta, "or" );
+      return delta;
+   case 0x0B: /* OR Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Or8, True, sz, delta, "or" );
+      return delta;
+
+   case 0x0C: /* OR Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, False, Iop_Or8, True, delta, "or" );
+      return delta;
+   case 0x0D: /* OR Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, False, Iop_Or8, True, delta, "or" );
+      return delta;
+
+   case 0x10: /* ADC Gb,Eb */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, True, Iop_Add8, True, 1, delta, "adc" );
+      return delta;
+   case 0x11: /* ADC Gv,Ev */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, True, Iop_Add8, True, sz, delta, "adc" );
+      return delta;
+
+   case 0x12: /* ADC Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, True, Iop_Add8, True, 1, delta, "adc" );
+      return delta;
+   case 0x13: /* ADC Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, True, Iop_Add8, True, sz, delta, "adc" );
+      return delta;
+
+   case 0x14: /* ADC Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, True, Iop_Add8, True, delta, "adc" );
+      return delta;
+   case 0x15: /* ADC Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, True, Iop_Add8, True, delta, "adc" );
+      return delta;
+
+   case 0x18: /* SBB Gb,Eb */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, True, Iop_Sub8, True, 1, delta, "sbb" );
+      return delta;
+   case 0x19: /* SBB Gv,Ev */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, True, Iop_Sub8, True, sz, delta, "sbb" );
+      return delta;
+
+   case 0x1A: /* SBB Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, True, Iop_Sub8, True, 1, delta, "sbb" );
+      return delta;
+   case 0x1B: /* SBB Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, True, Iop_Sub8, True, sz, delta, "sbb" );
+      return delta;
+
+   case 0x1C: /* SBB Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, True, Iop_Sub8, True, delta, "sbb" );
+      return delta;
+   case 0x1D: /* SBB Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, True, Iop_Sub8, True, delta, "sbb" );
+      return delta;
+
+   case 0x20: /* AND Gb,Eb */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_And8, True, 1, delta, "and" );
+      return delta;
+   case 0x21: /* AND Gv,Ev */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_And8, True, sz, delta, "and" );
+      return delta;
+
+   case 0x22: /* AND Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_And8, True, 1, delta, "and" );
+      return delta;
+   case 0x23: /* AND Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_And8, True, sz, delta, "and" );
+      return delta;
+
+   case 0x24: /* AND Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, False, Iop_And8, True, delta, "and" );
+      return delta;
+   case 0x25: /* AND Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, False, Iop_And8, True, delta, "and" );
+      return delta;
+
+   case 0x28: /* SUB Gb,Eb */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Sub8, True, 1, delta, "sub" );
+      return delta;
+   case 0x29: /* SUB Gv,Ev */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Sub8, True, sz, delta, "sub" );
+      return delta;
+
+   case 0x2A: /* SUB Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Sub8, True, 1, delta, "sub" );
+      return delta;
+   case 0x2B: /* SUB Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Sub8, True, sz, delta, "sub" );
+      return delta;
+
+   case 0x2C: /* SUB Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A(1, False, Iop_Sub8, True, delta, "sub" );
+      return delta;
+   case 0x2D: /* SUB Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, False, Iop_Sub8, True, delta, "sub" );
+      return delta;
+
+   case 0x30: /* XOR Gb,Eb */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Xor8, True, 1, delta, "xor" );
+      return delta;
+   case 0x31: /* XOR Gv,Ev */
+      if (!validF2orF3) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Xor8, True, sz, delta, "xor" );
+      return delta;
+
+   case 0x32: /* XOR Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Xor8, True, 1, delta, "xor" );
+      return delta;
+   case 0x33: /* XOR Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Xor8, True, sz, delta, "xor" );
+      return delta;
+
+   case 0x34: /* XOR Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, False, Iop_Xor8, True, delta, "xor" );
+      return delta;
+   case 0x35: /* XOR Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, False, Iop_Xor8, True, delta, "xor" );
+      return delta;
+
+   case 0x38: /* CMP Gb,Eb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Sub8, False, 1, delta, "cmp" );
+      return delta;
+   case 0x39: /* CMP Gv,Ev */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_G_E ( vbi, pfx, False, Iop_Sub8, False, sz, delta, "cmp" );
+      return delta;
+
+   case 0x3A: /* CMP Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Sub8, False, 1, delta, "cmp" );
+      return delta;
+   case 0x3B: /* CMP Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_Sub8, False, sz, delta, "cmp" );
+      return delta;
+
+   case 0x3C: /* CMP Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, False, Iop_Sub8, False, delta, "cmp" );
+      return delta;
+   case 0x3D: /* CMP Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, False, Iop_Sub8, False, delta, "cmp" );
+      return delta;
+
+   case 0x50: /* PUSH eAX */
+   case 0x51: /* PUSH eCX */
+   case 0x52: /* PUSH eDX */
+   case 0x53: /* PUSH eBX */
+   case 0x55: /* PUSH eBP */
+   case 0x56: /* PUSH eSI */
+   case 0x57: /* PUSH eDI */
+   case 0x54: /* PUSH eSP */
+      /* This is the Right Way, in that the value to be pushed is
+         established before %rsp is changed, so that pushq %rsp
+         correctly pushes the old value. */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 2 || sz == 4 || sz == 8);
+      if (sz == 4)
+         sz = 8; /* there is no encoding for 32-bit push in 64-bit mode */
+      ty = sz==2 ? Ity_I16 : Ity_I64;
+      t1 = newTemp(ty); 
+      t2 = newTemp(Ity_I64);
+      assign(t1, getIRegRexB(sz, pfx, opc-0x50));
+      assign(t2, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(sz)));
+      putIReg64(R_RSP, mkexpr(t2) );
+      storeLE(mkexpr(t2),mkexpr(t1));
+      DIP("push%c %s\n", nameISize(sz), nameIRegRexB(sz,pfx,opc-0x50));
+      return delta;
+
+   case 0x58: /* POP eAX */
+   case 0x59: /* POP eCX */
+   case 0x5A: /* POP eDX */
+   case 0x5B: /* POP eBX */
+   case 0x5D: /* POP eBP */
+   case 0x5E: /* POP eSI */
+   case 0x5F: /* POP eDI */
+   case 0x5C: /* POP eSP */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 2 || sz == 4 || sz == 8);
+      if (sz == 4)
+         sz = 8; /* there is no encoding for 32-bit pop in 64-bit mode */
+      t1 = newTemp(szToITy(sz)); 
+      t2 = newTemp(Ity_I64);
+      assign(t2, getIReg64(R_RSP));
+      assign(t1, loadLE(szToITy(sz),mkexpr(t2)));
+      putIReg64(R_RSP, binop(Iop_Add64, mkexpr(t2), mkU64(sz)));
+      putIRegRexB(sz, pfx, opc-0x58, mkexpr(t1));
+      DIP("pop%c %s\n", nameISize(sz), nameIRegRexB(sz,pfx,opc-0x58));
+      return delta;
+
+   case 0x63: /* MOVSX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (haveREX(pfx) && 1==getRexW(pfx)) {
+         vassert(sz == 8);
+         /* movsx r/m32 to r64 */
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putIRegG(8, pfx, modrm, 
+                             unop(Iop_32Sto64, 
+                                  getIRegE(4, pfx, modrm)));
+            DIP("movslq %s,%s\n",
+                nameIRegE(4, pfx, modrm),
+                nameIRegG(8, pfx, modrm));
+            return delta;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putIRegG(8, pfx, modrm, 
+                             unop(Iop_32Sto64, 
+                                  loadLE(Ity_I32, mkexpr(addr))));
+            DIP("movslq %s,%s\n", dis_buf, 
+                nameIRegG(8, pfx, modrm));
+            return delta;
+         }
+      } else {
+         goto decode_failure;
+      }
+
+   case 0x68: /* PUSH Iv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      /* Note, sz==4 is not possible in 64-bit mode.  Hence ... */
+      if (sz == 4) sz = 8;
+      d64 = getSDisp(imin(4,sz),delta); 
+      delta += imin(4,sz);
+      goto do_push_I;
+
+   case 0x69: /* IMUL Iv, Ev, Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_imul_I_E_G ( vbi, pfx, sz, delta, sz );
+      return delta;
+
+   case 0x6A: /* PUSH Ib, sign-extended to sz */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      /* Note, sz==4 is not possible in 64-bit mode.  Hence ... */
+      if (sz == 4) sz = 8;
+      d64 = getSDisp8(delta); delta += 1;
+      goto do_push_I;
+   do_push_I:
+      ty = szToITy(sz);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(ty);
+      assign( t1, binop(Iop_Sub64,getIReg64(R_RSP),mkU64(sz)) );
+      putIReg64(R_RSP, mkexpr(t1) );
+      /* stop mkU16 asserting if d32 is a negative 16-bit number
+         (bug #132813) */
+      if (ty == Ity_I16)
+         d64 &= 0xFFFF;
+      storeLE( mkexpr(t1), mkU(ty,d64) );
+      DIP("push%c $%lld\n", nameISize(sz), (Long)d64);
+      return delta;
+
+   case 0x6B: /* IMUL Ib, Ev, Gv */
+      delta = dis_imul_I_E_G ( vbi, pfx, sz, delta, 1 );
+      return delta;
+
+   case 0x70:
+   case 0x71:
+   case 0x72:   /* JBb/JNAEb (jump below) */
+   case 0x73:   /* JNBb/JAEb (jump not below) */
+   case 0x74:   /* JZb/JEb (jump zero) */
+   case 0x75:   /* JNZb/JNEb (jump not zero) */
+   case 0x76:   /* JBEb/JNAb (jump below or equal) */
+   case 0x77:   /* JNBEb/JAb (jump not below or equal) */
+   case 0x78:   /* JSb (jump negative) */
+   case 0x79:   /* JSb (jump not negative) */
+   case 0x7A:   /* JP (jump parity even) */
+   case 0x7B:   /* JNP/JPO (jump parity odd) */
+   case 0x7C:   /* JLb/JNGEb (jump less) */
+   case 0x7D:   /* JGEb/JNLb (jump greater or equal) */
+   case 0x7E:   /* JLEb/JNGb (jump less or equal) */
+   case 0x7F: { /* JGb/JNLEb (jump greater) */
+      Long   jmpDelta;
+      const HChar* comment  = "";
+      if (haveF3(pfx)) goto decode_failure;
+      if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+      jmpDelta = getSDisp8(delta);
+      vassert(-128 <= jmpDelta && jmpDelta < 128);
+      d64 = (guest_RIP_bbstart+delta+1) + jmpDelta;
+      delta++;
+      if (resteerCisOk
+          && vex_control.guest_chase_cond
+          && (Addr64)d64 != (Addr64)guest_RIP_bbstart
+          && jmpDelta < 0
+          && resteerOkFn( callback_opaque, (Addr64)d64) ) {
+         /* Speculation: assume this backward branch is taken.  So we
+            need to emit a side-exit to the insn following this one,
+            on the negation of the condition, and continue at the
+            branch target address (d64).  If we wind up back at the
+            first instruction of the trace, just stop; it's better to
+            let the IR loop unroller handle that case. */
+         stmt( IRStmt_Exit( 
+                  mk_amd64g_calculate_condition(
+                     (AMD64Condcode)(1 ^ (opc - 0x70))),
+                  Ijk_Boring,
+                  IRConst_U64(guest_RIP_bbstart+delta),
+                  OFFB_RIP ) );
+         dres->whatNext   = Dis_ResteerC;
+         dres->continueAt = d64;
+         comment = "(assumed taken)";
+      }
+      else
+      if (resteerCisOk
+          && vex_control.guest_chase_cond
+          && (Addr64)d64 != (Addr64)guest_RIP_bbstart
+          && jmpDelta >= 0
+          && resteerOkFn( callback_opaque, guest_RIP_bbstart+delta ) ) {
+         /* Speculation: assume this forward branch is not taken.  So
+            we need to emit a side-exit to d64 (the dest) and continue
+            disassembling at the insn immediately following this
+            one. */
+         stmt( IRStmt_Exit( 
+                  mk_amd64g_calculate_condition((AMD64Condcode)(opc - 0x70)),
+                  Ijk_Boring,
+                  IRConst_U64(d64),
+                  OFFB_RIP ) );
+         dres->whatNext   = Dis_ResteerC;
+         dres->continueAt = guest_RIP_bbstart+delta;
+         comment = "(assumed not taken)";
+      }
+      else {
+         /* Conservative default translation - end the block at this
+            point. */
+         jcc_01( dres, (AMD64Condcode)(opc - 0x70),
+                 guest_RIP_bbstart+delta, d64 );
+         vassert(dres->whatNext == Dis_StopHere);
+      }
+      DIP("j%s-8 0x%llx %s\n", name_AMD64Condcode(opc - 0x70), d64, comment);
+      return delta;
+   }
+
+   case 0x80: /* Grp1 Ib,Eb */
+      modrm = getUChar(delta);
+      /* Disallow F2/XACQ and F3/XREL for the non-mem case.  Allow
+         just one for the mem case and also require LOCK in this case.
+         Note that this erroneously allows XACQ/XREL on CMP since we
+         don't check the subopcode here.  No big deal. */
+      if (epartIsReg(modrm) && haveF2orF3(pfx))
+         goto decode_failure;
+      if (!epartIsReg(modrm) && haveF2andF3(pfx))
+         goto decode_failure;
+      if (!epartIsReg(modrm) && haveF2orF3(pfx) && !haveLOCK(pfx))
+         goto decode_failure;
+      am_sz = lengthAMode(pfx,delta);
+      sz    = 1;
+      d_sz  = 1;
+      d64   = getSDisp8(delta + am_sz);
+      delta = dis_Grp1 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, d64 );
+      return delta;
+
+   case 0x81: /* Grp1 Iv,Ev */
+      modrm = getUChar(delta);
+      /* Same comment as for case 0x80 just above. */
+      if (epartIsReg(modrm) && haveF2orF3(pfx))
+         goto decode_failure;
+      if (!epartIsReg(modrm) && haveF2andF3(pfx))
+         goto decode_failure;
+      if (!epartIsReg(modrm) && haveF2orF3(pfx) && !haveLOCK(pfx))
+         goto decode_failure;
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = imin(sz,4);
+      d64   = getSDisp(d_sz, delta + am_sz);
+      delta = dis_Grp1 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, d64 );
+      return delta;
+
+   case 0x83: /* Grp1 Ib,Ev */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = 1;
+      d64   = getSDisp8(delta + am_sz);
+      delta = dis_Grp1 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, d64 );
+      return delta;
+
+   case 0x84: /* TEST Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_And8, False, 1, delta, "test" );
+      return delta;
+
+   case 0x85: /* TEST Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op2_E_G ( vbi, pfx, False, Iop_And8, False, sz, delta, "test" );
+      return delta;
+
+   /* XCHG reg,mem automatically asserts LOCK# even without a LOCK
+      prefix.  Therefore, generate CAS regardless of the presence or
+      otherwise of a LOCK prefix. */
+   case 0x86: /* XCHG Gb,Eb */
+      sz = 1;
+      /* Fall through ... */
+   case 0x87: /* XCHG Gv,Ev */
+      modrm = getUChar(delta);
+      /* Check whether F2 or F3 are allowable.  For the mem case, one
+         or the othter but not both are.  We don't care about the
+         presence of LOCK in this case -- XCHG is unusual in this
+         respect. */
+      if (haveF2orF3(pfx)) {
+         if (epartIsReg(modrm)) { 
+            goto decode_failure;
+         } else {
+            if (haveF2andF3(pfx))
+               goto decode_failure;
+         }
+      }
+      ty = szToITy(sz);
+      t1 = newTemp(ty); t2 = newTemp(ty);
+      if (epartIsReg(modrm)) {
+         assign(t1, getIRegE(sz, pfx, modrm));
+         assign(t2, getIRegG(sz, pfx, modrm));
+         putIRegG(sz, pfx, modrm, mkexpr(t1));
+         putIRegE(sz, pfx, modrm, mkexpr(t2));
+         delta++;
+         DIP("xchg%c %s, %s\n", 
+             nameISize(sz), nameIRegG(sz, pfx, modrm), 
+                            nameIRegE(sz, pfx, modrm));
+      } else {
+         *expect_CAS = True;
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         assign( t1, loadLE(ty, mkexpr(addr)) );
+         assign( t2, getIRegG(sz, pfx, modrm) );
+         casLE( mkexpr(addr),
+                mkexpr(t1), mkexpr(t2), guest_RIP_curr_instr );
+         putIRegG( sz, pfx, modrm, mkexpr(t1) );
+         delta += alen;
+         DIP("xchg%c %s, %s\n", nameISize(sz), 
+                                nameIRegG(sz, pfx, modrm), dis_buf);
+      }
+      return delta;
+
+   case 0x88: { /* MOV Gb,Eb */
+      /* We let dis_mov_G_E decide whether F3(XRELEASE) is allowable. */
+      Bool ok = True;
+      delta = dis_mov_G_E(vbi, pfx, 1, delta, &ok);
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0x89: { /* MOV Gv,Ev */
+      /* We let dis_mov_G_E decide whether F3(XRELEASE) is allowable. */
+      Bool ok = True;
+      delta = dis_mov_G_E(vbi, pfx, sz, delta, &ok);
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0x8A: /* MOV Eb,Gb */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_mov_E_G(vbi, pfx, 1, delta);
+      return delta;
+
+   case 0x8B: /* MOV Ev,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_mov_E_G(vbi, pfx, sz, delta);
+      return delta;
+
+   case 0x8D: /* LEA M,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (sz != 4 && sz != 8)
+         goto decode_failure;
+      modrm = getUChar(delta);
+      if (epartIsReg(modrm)) 
+         goto decode_failure;
+      /* NOTE!  this is the one place where a segment override prefix
+         has no effect on the address calculation.  Therefore we clear
+         any segment override bits in pfx. */
+      addr = disAMode ( &alen, vbi, clearSegBits(pfx), delta, dis_buf, 0 );
+      delta += alen;
+      /* This is a hack.  But it isn't clear that really doing the
+         calculation at 32 bits is really worth it.  Hence for leal,
+         do the full 64-bit calculation and then truncate it. */
+      putIRegG( sz, pfx, modrm, 
+                         sz == 4
+                            ? unop(Iop_64to32, mkexpr(addr))
+                            : mkexpr(addr)
+              );
+      DIP("lea%c %s, %s\n", nameISize(sz), dis_buf, 
+                            nameIRegG(sz,pfx,modrm));
+      return delta;
+
+   case 0x8F: { /* POPQ m64 / POPW m16 */
+      Int   len;
+      UChar rm;
+      /* There is no encoding for 32-bit pop in 64-bit mode.
+         So sz==4 actually means sz==8. */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 2 || sz == 4
+              || /* tolerate redundant REX.W, see #210481 */ sz == 8);
+      if (sz == 4) sz = 8;
+      if (sz != 8) goto decode_failure; // until we know a sz==2 test case exists
+
+      rm = getUChar(delta);
+
+      /* make sure this instruction is correct POP */
+      if (epartIsReg(rm) || gregLO3ofRM(rm) != 0)
+         goto decode_failure;
+      /* and has correct size */
+      vassert(sz == 8);      
+       
+      t1 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+      assign( t1, getIReg64(R_RSP) );
+      assign( t3, loadLE(Ity_I64, mkexpr(t1)) );
+       
+      /* Increase RSP; must be done before the STORE.  Intel manual
+         says: If the RSP register is used as a base register for
+         addressing a destination operand in memory, the POP
+         instruction computes the effective address of the operand
+         after it increments the RSP register.  */
+      putIReg64(R_RSP, binop(Iop_Add64, mkexpr(t1), mkU64(sz)) );
+
+      addr = disAMode ( &len, vbi, pfx, delta, dis_buf, 0 );
+      storeLE( mkexpr(addr), mkexpr(t3) );
+
+      DIP("popl %s\n", dis_buf);
+
+      delta += len;
+      return delta;
+   }
+
+   case 0x90: /* XCHG eAX,eAX */
+      /* detect and handle F3 90 (rep nop) specially */
+      if (!have66(pfx) && !haveF2(pfx) && haveF3(pfx)) {
+         DIP("rep nop (P4 pause)\n");
+         /* "observe" the hint.  The Vex client needs to be careful not
+            to cause very long delays as a result, though. */
+         jmp_lit(dres, Ijk_Yield, guest_RIP_bbstart+delta);
+         vassert(dres->whatNext == Dis_StopHere);
+         return delta;
+      }
+      /* detect and handle NOPs specially */
+      if (/* F2/F3 probably change meaning completely */
+          !haveF2orF3(pfx)
+          /* If REX.B is 1, we're not exchanging rAX with itself */
+          && getRexB(pfx)==0 ) {
+         DIP("nop\n");
+         return delta;
+      }
+      /* else fall through to normal case. */
+   case 0x91: /* XCHG rAX,rCX */
+   case 0x92: /* XCHG rAX,rDX */
+   case 0x93: /* XCHG rAX,rBX */
+   case 0x94: /* XCHG rAX,rSP */
+   case 0x95: /* XCHG rAX,rBP */
+   case 0x96: /* XCHG rAX,rSI */
+   case 0x97: /* XCHG rAX,rDI */
+      /* guard against mutancy */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      codegen_xchg_rAX_Reg ( pfx, sz, opc - 0x90 );
+      return delta;
+
+   case 0x98: /* CBW */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (sz == 8) {
+         putIRegRAX( 8, unop(Iop_32Sto64, getIRegRAX(4)) );
+         DIP(/*"cdqe\n"*/"cltq");
+         return delta;
+      }
+      if (sz == 4) {
+         putIRegRAX( 4, unop(Iop_16Sto32, getIRegRAX(2)) );
+         DIP("cwtl\n");
+         return delta;
+      }
+      if (sz == 2) {
+         putIRegRAX( 2, unop(Iop_8Sto16, getIRegRAX(1)) );
+         DIP("cbw\n");
+         return delta;
+      }
+      goto decode_failure;
+
+   case 0x99: /* CWD/CDQ/CQO */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 2 || sz == 4 || sz == 8);
+      ty = szToITy(sz);
+      putIRegRDX( sz, 
+                  binop(mkSizedOp(ty,Iop_Sar8), 
+                        getIRegRAX(sz),
+                        mkU8(sz == 2 ? 15 : (sz == 4 ? 31 : 63))) );
+      DIP(sz == 2 ? "cwd\n" 
+                  : (sz == 4 ? /*"cdq\n"*/ "cltd\n" 
+                             : "cqo\n"));
+      return delta;
+
+   case 0x9B: /* FWAIT (X87 insn) */
+      /* ignore? */
+      DIP("fwait\n");
+      return delta;
+
+   case 0x9C: /* PUSHF */ {
+      /* Note.  There is no encoding for a 32-bit pushf in 64-bit
+         mode.  So sz==4 actually means sz==8. */
+      /* 24 July 06: has also been seen with a redundant REX prefix,
+         so must also allow sz==8. */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 2 || sz == 4 || sz == 8);
+      if (sz == 4) sz = 8;
+      if (sz != 8) goto decode_failure; // until we know a sz==2 test case exists
+
+      t1 = newTemp(Ity_I64);
+      assign( t1, binop(Iop_Sub64,getIReg64(R_RSP),mkU64(sz)) );
+      putIReg64(R_RSP, mkexpr(t1) );
+
+      t2 = newTemp(Ity_I64);
+      assign( t2, mk_amd64g_calculate_rflags_all() );
+
+      /* Patch in the D flag.  This can simply be a copy of bit 10 of
+         baseBlock[OFFB_DFLAG]. */
+      t3 = newTemp(Ity_I64);
+      assign( t3, binop(Iop_Or64,
+                        mkexpr(t2),
+                        binop(Iop_And64,
+                              IRExpr_Get(OFFB_DFLAG,Ity_I64),
+                              mkU64(1<<10))) 
+            );
+
+      /* And patch in the ID flag. */
+      t4 = newTemp(Ity_I64);
+      assign( t4, binop(Iop_Or64,
+                        mkexpr(t3),
+                        binop(Iop_And64,
+                              binop(Iop_Shl64, IRExpr_Get(OFFB_IDFLAG,Ity_I64), 
+                                               mkU8(21)),
+                              mkU64(1<<21)))
+            );
+
+      /* And patch in the AC flag too. */
+      t5 = newTemp(Ity_I64);
+      assign( t5, binop(Iop_Or64,
+                        mkexpr(t4),
+                        binop(Iop_And64,
+                              binop(Iop_Shl64, IRExpr_Get(OFFB_ACFLAG,Ity_I64), 
+                                               mkU8(18)),
+                              mkU64(1<<18)))
+            );
+
+      /* if sz==2, the stored value needs to be narrowed. */
+      if (sz == 2)
+        storeLE( mkexpr(t1), unop(Iop_32to16,
+                             unop(Iop_64to32,mkexpr(t5))) );
+      else 
+        storeLE( mkexpr(t1), mkexpr(t5) );
+
+      DIP("pushf%c\n", nameISize(sz));
+      return delta;
+   }
+
+   case 0x9D: /* POPF */
+      /* Note.  There is no encoding for a 32-bit popf in 64-bit mode.
+         So sz==4 actually means sz==8. */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 2 || sz == 4);
+      if (sz == 4) sz = 8;
+      if (sz != 8) goto decode_failure; // until we know a sz==2 test case exists
+      t1 = newTemp(Ity_I64); t2 = newTemp(Ity_I64);
+      assign(t2, getIReg64(R_RSP));
+      assign(t1, widenUto64(loadLE(szToITy(sz),mkexpr(t2))));
+      putIReg64(R_RSP, binop(Iop_Add64, mkexpr(t2), mkU64(sz)));
+      /* t1 is the flag word.  Mask out everything except OSZACP and 
+         set the flags thunk to AMD64G_CC_OP_COPY. */
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, 
+                        binop(Iop_And64,
+                              mkexpr(t1), 
+                              mkU64( AMD64G_CC_MASK_C | AMD64G_CC_MASK_P 
+                                     | AMD64G_CC_MASK_A | AMD64G_CC_MASK_Z 
+                                     | AMD64G_CC_MASK_S| AMD64G_CC_MASK_O )
+                             )
+                       )
+          );
+
+      /* Also need to set the D flag, which is held in bit 10 of t1.
+         If zero, put 1 in OFFB_DFLAG, else -1 in OFFB_DFLAG. */
+      stmt( IRStmt_Put( 
+               OFFB_DFLAG,
+               IRExpr_ITE( 
+                  unop(Iop_64to1,
+                       binop(Iop_And64, 
+                             binop(Iop_Shr64, mkexpr(t1), mkU8(10)), 
+                             mkU64(1))),
+                  mkU64(0xFFFFFFFFFFFFFFFFULL),
+                  mkU64(1)))
+          );
+
+      /* And set the ID flag */
+      stmt( IRStmt_Put( 
+               OFFB_IDFLAG,
+               IRExpr_ITE( 
+                  unop(Iop_64to1,
+                       binop(Iop_And64, 
+                             binop(Iop_Shr64, mkexpr(t1), mkU8(21)), 
+                             mkU64(1))),
+                  mkU64(1),
+                  mkU64(0))) 
+          );
+
+      /* And set the AC flag too */
+      stmt( IRStmt_Put( 
+               OFFB_ACFLAG,
+               IRExpr_ITE( 
+                  unop(Iop_64to1,
+                       binop(Iop_And64, 
+                             binop(Iop_Shr64, mkexpr(t1), mkU8(18)), 
+                             mkU64(1))),
+                  mkU64(1),
+                  mkU64(0))) 
+          );
+
+      DIP("popf%c\n", nameISize(sz));
+      return delta;
+
+   case 0x9E: /* SAHF */
+      codegen_SAHF();
+      DIP("sahf\n");
+      return delta;
+
+   case 0x9F: /* LAHF */
+      codegen_LAHF();
+      DIP("lahf\n");
+      return delta;
+
+   case 0xA0: /* MOV Ob,AL */
+      if (have66orF2orF3(pfx)) goto decode_failure;
+      sz = 1;
+      /* Fall through ... */
+   case 0xA1: /* MOV Ov,eAX */
+      if (sz != 8 && sz != 4 && sz != 2 && sz != 1) 
+         goto decode_failure;
+      d64 = getDisp64(delta); 
+      delta += 8;
+      ty = szToITy(sz);
+      addr = newTemp(Ity_I64);
+      assign( addr, handleAddrOverrides(vbi, pfx, mkU64(d64)) );
+      putIRegRAX(sz, loadLE( ty, mkexpr(addr) ));
+      DIP("mov%c %s0x%llx, %s\n", nameISize(sz), 
+                                  segRegTxt(pfx), d64,
+                                  nameIRegRAX(sz));
+      return delta;
+
+   case 0xA2: /* MOV AL,Ob */
+      if (have66orF2orF3(pfx)) goto decode_failure;
+      sz = 1;
+      /* Fall through ... */
+   case 0xA3: /* MOV eAX,Ov */
+      if (sz != 8 && sz != 4 && sz != 2 && sz != 1) 
+         goto decode_failure;
+      d64 = getDisp64(delta); 
+      delta += 8;
+      ty = szToITy(sz);
+      addr = newTemp(Ity_I64);
+      assign( addr, handleAddrOverrides(vbi, pfx, mkU64(d64)) );
+      storeLE( mkexpr(addr), getIRegRAX(sz) );
+      DIP("mov%c %s, %s0x%llx\n", nameISize(sz), nameIRegRAX(sz),
+                                  segRegTxt(pfx), d64);
+      return delta;
+
+   case 0xA4:
+   case 0xA5:
+      /* F3 A4: rep movsb */
+      if (haveF3(pfx) && !haveF2(pfx)) {
+         if (opc == 0xA4)
+            sz = 1;
+         dis_REP_op ( dres, AMD64CondAlways, dis_MOVS, sz,
+                      guest_RIP_curr_instr,
+                      guest_RIP_bbstart+delta, "rep movs", pfx );
+        dres->whatNext = Dis_StopHere;
+        return delta;
+      }
+      /* A4: movsb */
+      if (!haveF3(pfx) && !haveF2(pfx)) {
+         if (opc == 0xA4)
+            sz = 1;
+         dis_string_op( dis_MOVS, sz, "movs", pfx );
+         return delta;
+      }
+      goto decode_failure;
+
+   case 0xA6:
+   case 0xA7:
+      /* F3 A6/A7: repe cmps/rep cmps{w,l,q} */
+      if (haveF3(pfx) && !haveF2(pfx)) {
+         if (opc == 0xA6)
+            sz = 1;
+         dis_REP_op ( dres, AMD64CondZ, dis_CMPS, sz, 
+                      guest_RIP_curr_instr,
+                      guest_RIP_bbstart+delta, "repe cmps", pfx );
+         dres->whatNext = Dis_StopHere;
+         return delta;
+      }
+      goto decode_failure;
+
+   case 0xAA:
+   case 0xAB:
+      /* F3 AA/AB: rep stosb/rep stos{w,l,q} */
+      if (haveF3(pfx) && !haveF2(pfx)) {
+         if (opc == 0xAA)
+            sz = 1;
+         dis_REP_op ( dres, AMD64CondAlways, dis_STOS, sz,
+                      guest_RIP_curr_instr,
+                      guest_RIP_bbstart+delta, "rep stos", pfx );
+         vassert(dres->whatNext == Dis_StopHere);
+         return delta;
+      }
+      /* AA/AB: stosb/stos{w,l,q} */
+      if (!haveF3(pfx) && !haveF2(pfx)) {
+         if (opc == 0xAA)
+            sz = 1;
+         dis_string_op( dis_STOS, sz, "stos", pfx );
+         return delta;
+      }
+      goto decode_failure;
+
+   case 0xA8: /* TEST Ib, AL */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( 1, False, Iop_And8, False, delta, "test" );
+      return delta;
+   case 0xA9: /* TEST Iv, eAX */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_op_imm_A( sz, False, Iop_And8, False, delta, "test" );
+      return delta;
+
+   case 0xAC: /* LODS, no REP prefix */
+   case 0xAD:
+      dis_string_op( dis_LODS, ( opc == 0xAC ? 1 : sz ), "lods", pfx );
+      return delta;
+
+   case 0xAE:
+   case 0xAF:
+      /* F2 AE/AF: repne scasb/repne scas{w,l,q} */
+      if (haveF2(pfx) && !haveF3(pfx)) {
+         if (opc == 0xAE)
+            sz = 1;
+         dis_REP_op ( dres, AMD64CondNZ, dis_SCAS, sz, 
+                      guest_RIP_curr_instr,
+                      guest_RIP_bbstart+delta, "repne scas", pfx );
+         vassert(dres->whatNext == Dis_StopHere);
+         return delta;
+      }
+      /* F3 AE/AF: repe scasb/repe scas{w,l,q} */
+      if (!haveF2(pfx) && haveF3(pfx)) {
+         if (opc == 0xAE)
+            sz = 1;
+         dis_REP_op ( dres, AMD64CondZ, dis_SCAS, sz, 
+                      guest_RIP_curr_instr,
+                      guest_RIP_bbstart+delta, "repe scas", pfx );
+         vassert(dres->whatNext == Dis_StopHere);
+         return delta;
+      }
+      /* AE/AF: scasb/scas{w,l,q} */
+      if (!haveF2(pfx) && !haveF3(pfx)) {
+         if (opc == 0xAE)
+            sz = 1;
+         dis_string_op( dis_SCAS, sz, "scas", pfx );
+         return delta;
+      }
+      goto decode_failure;
+
+   /* XXXX be careful here with moves to AH/BH/CH/DH */
+   case 0xB0: /* MOV imm,AL */
+   case 0xB1: /* MOV imm,CL */
+   case 0xB2: /* MOV imm,DL */
+   case 0xB3: /* MOV imm,BL */
+   case 0xB4: /* MOV imm,AH */
+   case 0xB5: /* MOV imm,CH */
+   case 0xB6: /* MOV imm,DH */
+   case 0xB7: /* MOV imm,BH */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      d64 = getUChar(delta); 
+      delta += 1;
+      putIRegRexB(1, pfx, opc-0xB0, mkU8(d64));
+      DIP("movb $%lld,%s\n", d64, nameIRegRexB(1,pfx,opc-0xB0));
+      return delta;
+
+   case 0xB8: /* MOV imm,eAX */
+   case 0xB9: /* MOV imm,eCX */
+   case 0xBA: /* MOV imm,eDX */
+   case 0xBB: /* MOV imm,eBX */
+   case 0xBC: /* MOV imm,eSP */
+   case 0xBD: /* MOV imm,eBP */
+   case 0xBE: /* MOV imm,eSI */
+   case 0xBF: /* MOV imm,eDI */
+      /* This is the one-and-only place where 64-bit literals are
+         allowed in the instruction stream. */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (sz == 8) {
+         d64 = getDisp64(delta);
+         delta += 8;
+         putIRegRexB(8, pfx, opc-0xB8, mkU64(d64));
+         DIP("movabsq $%lld,%s\n", (Long)d64, 
+                                   nameIRegRexB(8,pfx,opc-0xB8));
+      } else {
+         d64 = getSDisp(imin(4,sz),delta);
+         delta += imin(4,sz);
+         putIRegRexB(sz, pfx, opc-0xB8, 
+                         mkU(szToITy(sz), d64 & mkSizeMask(sz)));
+         DIP("mov%c $%lld,%s\n", nameISize(sz), 
+                                 (Long)d64, 
+                                 nameIRegRexB(sz,pfx,opc-0xB8));
+      }
+      return delta;
+
+   case 0xC0: { /* Grp2 Ib,Eb */
+      Bool decode_OK = True;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = 1;
+      d64   = getUChar(delta + am_sz);
+      sz    = 1;
+      delta = dis_Grp2 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d64 & 0xFF), NULL, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xC1: { /* Grp2 Ib,Ev */
+      Bool decode_OK = True;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = 1;
+      d64   = getUChar(delta + am_sz);
+      delta = dis_Grp2 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d64 & 0xFF), NULL, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xC2: /* RET imm16 */
+      if (have66orF3(pfx)) goto decode_failure;
+      if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+      d64 = getUDisp16(delta); 
+      delta += 2;
+      dis_ret(dres, vbi, d64);
+      DIP("ret $%lld\n", d64);
+      return delta;
+
+   case 0xC3: /* RET */
+      if (have66(pfx)) goto decode_failure;
+      /* F3 is acceptable on AMD. */
+      if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+      dis_ret(dres, vbi, 0);
+      DIP(haveF3(pfx) ? "rep ; ret\n" : "ret\n");
+      return delta;
+
+   case 0xC6: /* C6 /0 = MOV Ib,Eb */
+      sz = 1;
+      goto maybe_do_Mov_I_E;
+   case 0xC7: /* C7 /0 = MOV Iv,Ev */
+      goto maybe_do_Mov_I_E;
+   maybe_do_Mov_I_E:
+      modrm = getUChar(delta);
+      if (gregLO3ofRM(modrm) == 0) {
+         if (epartIsReg(modrm)) {
+            /* Neither F2 nor F3 are allowable. */
+            if (haveF2orF3(pfx)) goto decode_failure;
+            delta++; /* mod/rm byte */
+            d64 = getSDisp(imin(4,sz),delta); 
+            delta += imin(4,sz);
+            putIRegE(sz, pfx, modrm, 
+                         mkU(szToITy(sz), d64 & mkSizeMask(sz)));
+            DIP("mov%c $%lld, %s\n", nameISize(sz), 
+                                     (Long)d64, 
+                                     nameIRegE(sz,pfx,modrm));
+         } else {
+            if (haveF2(pfx)) goto decode_failure;
+            /* F3(XRELEASE) is allowable here */
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 
+                              /*xtra*/imin(4,sz) );
+            delta += alen;
+            d64 = getSDisp(imin(4,sz),delta);
+            delta += imin(4,sz);
+            storeLE(mkexpr(addr), 
+                    mkU(szToITy(sz), d64 & mkSizeMask(sz)));
+            DIP("mov%c $%lld, %s\n", nameISize(sz), (Long)d64, dis_buf);
+         }
+         return delta;
+      }
+      /* BEGIN HACKY SUPPORT FOR xbegin */
+      if (opc == 0xC7 && modrm == 0xF8 && !have66orF2orF3(pfx) && sz == 4
+          && (archinfo->hwcaps & VEX_HWCAPS_AMD64_AVX)) {
+         delta++; /* mod/rm byte */
+         d64 = getSDisp(4,delta); 
+         delta += 4;
+         guest_RIP_next_mustcheck = True;
+         guest_RIP_next_assumed   = guest_RIP_bbstart + delta;
+         Addr64 failAddr = guest_RIP_bbstart + delta + d64;
+         /* EAX contains the failure status code.  Bit 3 is "Set if an
+            internal buffer overflowed", which seems like the
+            least-bogus choice we can make here. */
+         putIRegRAX(4, mkU32(1<<3));
+         /* And jump to the fail address. */
+         jmp_lit(dres, Ijk_Boring, failAddr);
+         vassert(dres->whatNext == Dis_StopHere);
+         DIP("xbeginq 0x%llx\n", failAddr);
+         return delta;
+      }
+      /* END HACKY SUPPORT FOR xbegin */
+      /* BEGIN HACKY SUPPORT FOR xabort */
+      if (opc == 0xC6 && modrm == 0xF8 && !have66orF2orF3(pfx) && sz == 1
+          && (archinfo->hwcaps & VEX_HWCAPS_AMD64_AVX)) {
+         delta++; /* mod/rm byte */
+         abyte = getUChar(delta); delta++;
+         /* There is never a real transaction in progress, so do nothing. */
+         DIP("xabort $%d", (Int)abyte);
+         return delta;
+      }
+      /* END HACKY SUPPORT FOR xabort */
+      goto decode_failure;
+
+   case 0xC8: /* ENTER */
+      /* Same comments re operand size as for LEAVE below apply.
+         Also, only handles the case "enter $imm16, $0"; other cases
+         for the second operand (nesting depth) are not handled. */
+      if (sz != 4)
+         goto decode_failure;
+      d64 = getUDisp16(delta);
+      delta += 2;
+      vassert(d64 >= 0 && d64 <= 0xFFFF);
+      if (getUChar(delta) != 0)
+         goto decode_failure;
+      delta++;
+      /* Intel docs seem to suggest:
+           push rbp
+           temp = rsp
+           rbp = temp
+           rsp = rsp - imm16
+      */
+      t1 = newTemp(Ity_I64);
+      assign(t1, getIReg64(R_RBP));
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(8)));
+      putIReg64(R_RSP, mkexpr(t2));
+      storeLE(mkexpr(t2), mkexpr(t1));
+      putIReg64(R_RBP, mkexpr(t2));
+      if (d64 > 0) {
+         putIReg64(R_RSP, binop(Iop_Sub64, mkexpr(t2), mkU64(d64)));
+      }
+      DIP("enter $%u, $0\n", (UInt)d64);
+      return delta;
+
+   case 0xC9: /* LEAVE */
+      /* In 64-bit mode this defaults to a 64-bit operand size.  There
+         is no way to encode a 32-bit variant.  Hence sz==4 but we do
+         it as if sz=8. */
+      if (sz != 4) 
+         goto decode_failure;
+      t1 = newTemp(Ity_I64); 
+      t2 = newTemp(Ity_I64);
+      assign(t1, getIReg64(R_RBP));
+      /* First PUT RSP looks redundant, but need it because RSP must
+         always be up-to-date for Memcheck to work... */
+      putIReg64(R_RSP, mkexpr(t1));
+      assign(t2, loadLE(Ity_I64,mkexpr(t1)));
+      putIReg64(R_RBP, mkexpr(t2));
+      putIReg64(R_RSP, binop(Iop_Add64, mkexpr(t1), mkU64(8)) );
+      DIP("leave\n");
+      return delta;
+
+   case 0xCC: /* INT 3 */
+      jmp_lit(dres, Ijk_SigTRAP, guest_RIP_bbstart + delta);
+      vassert(dres->whatNext == Dis_StopHere);
+      DIP("int $0x3\n");
+      return delta;
+
+   case 0xD0: { /* Grp2 1,Eb */
+      Bool decode_OK = True;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = 0;
+      d64   = 1;
+      sz    = 1;
+      delta = dis_Grp2 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d64), NULL, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xD1: { /* Grp2 1,Ev */
+      Bool decode_OK = True;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = 0;
+      d64   = 1;
+      delta = dis_Grp2 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d64), NULL, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xD2: { /* Grp2 CL,Eb */
+      Bool decode_OK = True;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = 0;
+      sz    = 1;
+      delta = dis_Grp2 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, 
+                         getIRegCL(), "%cl", &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xD3: { /* Grp2 CL,Ev */
+      Bool decode_OK = True;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d_sz  = 0;
+      delta = dis_Grp2 ( vbi, pfx, delta, modrm, am_sz, d_sz, sz, 
+                         getIRegCL(), "%cl", &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xD8: /* X87 instructions */
+   case 0xD9:
+   case 0xDA:
+   case 0xDB:
+   case 0xDC:
+   case 0xDD:
+   case 0xDE:
+   case 0xDF: {
+      Bool redundantREXWok = False;
+
+      if (haveF2orF3(pfx)) 
+         goto decode_failure;
+
+      /* kludge to tolerate redundant rex.w prefixes (should do this
+         properly one day) */
+      /* mono 1.1.18.1 produces 48 D9 FA, which is rex.w fsqrt */
+      if ( (opc == 0xD9 && getUChar(delta+0) == 0xFA)/*fsqrt*/ )
+         redundantREXWok = True;
+
+      Bool size_OK = False;
+      if ( sz == 4 )
+         size_OK = True;
+      else if ( sz == 8 )
+         size_OK = redundantREXWok;
+      else if ( sz == 2 ) {
+         int mod_rm = getUChar(delta+0);
+         int reg = gregLO3ofRM(mod_rm);
+         /* The HotSpot JVM uses these */
+         if ( (opc == 0xDD) && (reg == 0 /* FLDL   */ ||
+                                reg == 4 /* FNSAVE */ ||
+                                reg == 6 /* FRSTOR */ ) )
+            size_OK = True;
+      }
+      /* AMD manual says 0x66 size override is ignored, except where
+         it is meaningful */
+      if (!size_OK)
+         goto decode_failure;
+
+      Bool decode_OK = False;
+      delta = dis_FPU ( &decode_OK, vbi, pfx, delta );
+      if (!decode_OK)
+         goto decode_failure;
+
+      return delta;
+   }
+
+   case 0xE0: /* LOOPNE disp8: decrement count, jump if count != 0 && ZF==0 */
+   case 0xE1: /* LOOPE  disp8: decrement count, jump if count != 0 && ZF==1 */
+   case 0xE2: /* LOOP   disp8: decrement count, jump if count != 0 */
+    { /* The docs say this uses rCX as a count depending on the
+         address size override, not the operand one. */
+      IRExpr* zbit  = NULL;
+      IRExpr* count = NULL;
+      IRExpr* cond  = NULL;
+      const HChar* xtra = NULL;
+
+      if (have66orF2orF3(pfx) || 1==getRexW(pfx)) goto decode_failure;
+      /* So at this point we've rejected any variants which appear to
+         be governed by the usual operand-size modifiers.  Hence only
+         the address size prefix can have an effect.  It changes the
+         size from 64 (default) to 32. */
+      d64 = guest_RIP_bbstart+delta+1 + getSDisp8(delta);
+      delta++;
+      if (haveASO(pfx)) {
+         /* 64to32 of 64-bit get is merely a get-put improvement
+            trick. */
+         putIReg32(R_RCX, binop(Iop_Sub32,
+                                unop(Iop_64to32, getIReg64(R_RCX)), 
+                                mkU32(1)));
+      } else {
+         putIReg64(R_RCX, binop(Iop_Sub64, getIReg64(R_RCX), mkU64(1)));
+      }
+
+      /* This is correct, both for 32- and 64-bit versions.  If we're
+         doing a 32-bit dec and the result is zero then the default
+         zero extension rule will cause the upper 32 bits to be zero
+         too.  Hence a 64-bit check against zero is OK. */
+      count = getIReg64(R_RCX);
+      cond = binop(Iop_CmpNE64, count, mkU64(0));
+      switch (opc) {
+         case 0xE2: 
+            xtra = ""; 
+            break;
+         case 0xE1: 
+            xtra = "e"; 
+            zbit = mk_amd64g_calculate_condition( AMD64CondZ );
+            cond = mkAnd1(cond, zbit);
+            break;
+         case 0xE0: 
+            xtra = "ne";
+            zbit = mk_amd64g_calculate_condition( AMD64CondNZ );
+            cond = mkAnd1(cond, zbit);
+            break;
+         default:
+            vassert(0);
+      }
+      stmt( IRStmt_Exit(cond, Ijk_Boring, IRConst_U64(d64), OFFB_RIP) );
+
+      DIP("loop%s%s 0x%llx\n", xtra, haveASO(pfx) ? "l" : "", d64);
+      return delta;
+    }
+
+   case 0xE3: 
+      /* JRCXZ or JECXZ, depending address size override. */
+      if (have66orF2orF3(pfx)) goto decode_failure;
+      d64 = (guest_RIP_bbstart+delta+1) + getSDisp8(delta); 
+      delta++;
+      if (haveASO(pfx)) {
+         /* 32-bit */
+         stmt( IRStmt_Exit( binop(Iop_CmpEQ64, 
+                                  unop(Iop_32Uto64, getIReg32(R_RCX)), 
+                                  mkU64(0)),
+                            Ijk_Boring,
+                            IRConst_U64(d64),
+                            OFFB_RIP
+             ));
+         DIP("jecxz 0x%llx\n", d64);
+      } else {
+         /* 64-bit */
+         stmt( IRStmt_Exit( binop(Iop_CmpEQ64, 
+                                  getIReg64(R_RCX), 
+                                  mkU64(0)),
+                            Ijk_Boring,
+                            IRConst_U64(d64),
+                            OFFB_RIP
+               ));
+         DIP("jrcxz 0x%llx\n", d64);
+      }
+      return delta;
+
+   case 0xE4: /* IN imm8, AL */
+      sz = 1; 
+      t1 = newTemp(Ity_I64);
+      abyte = getUChar(delta); delta++;
+      assign(t1, mkU64( abyte & 0xFF ));
+      DIP("in%c $%d,%s\n", nameISize(sz), (Int)abyte, nameIRegRAX(sz));
+      goto do_IN;
+   case 0xE5: /* IN imm8, eAX */
+      if (!(sz == 2 || sz == 4)) goto decode_failure;
+      t1 = newTemp(Ity_I64);
+      abyte = getUChar(delta); delta++;
+      assign(t1, mkU64( abyte & 0xFF ));
+      DIP("in%c $%d,%s\n", nameISize(sz), (Int)abyte, nameIRegRAX(sz));
+      goto do_IN;
+   case 0xEC: /* IN %DX, AL */
+      sz = 1; 
+      t1 = newTemp(Ity_I64);
+      assign(t1, unop(Iop_16Uto64, getIRegRDX(2)));
+      DIP("in%c %s,%s\n", nameISize(sz), nameIRegRDX(2), 
+                                         nameIRegRAX(sz));
+      goto do_IN;
+   case 0xED: /* IN %DX, eAX */
+      if (!(sz == 2 || sz == 4)) goto decode_failure;
+      t1 = newTemp(Ity_I64);
+      assign(t1, unop(Iop_16Uto64, getIRegRDX(2)));
+      DIP("in%c %s,%s\n", nameISize(sz), nameIRegRDX(2), 
+                                         nameIRegRAX(sz));
+      goto do_IN;
+   do_IN: {
+      /* At this point, sz indicates the width, and t1 is a 64-bit
+         value giving port number. */
+      IRDirty* d;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 1 || sz == 2 || sz == 4);
+      ty = szToITy(sz);
+      t2 = newTemp(Ity_I64);
+      d = unsafeIRDirty_1_N( 
+             t2,
+             0/*regparms*/, 
+             "amd64g_dirtyhelper_IN", 
+             &amd64g_dirtyhelper_IN,
+             mkIRExprVec_2( mkexpr(t1), mkU64(sz) )
+          );
+      /* do the call, dumping the result in t2. */
+      stmt( IRStmt_Dirty(d) );
+      putIRegRAX(sz, narrowTo( ty, mkexpr(t2) ) );
+      return delta;
+   }
+
+   case 0xE6: /* OUT AL, imm8 */
+      sz = 1;
+      t1 = newTemp(Ity_I64);
+      abyte = getUChar(delta); delta++;
+      assign( t1, mkU64( abyte & 0xFF ) );
+      DIP("out%c %s,$%d\n", nameISize(sz), nameIRegRAX(sz), (Int)abyte);
+      goto do_OUT;
+   case 0xE7: /* OUT eAX, imm8 */
+      if (!(sz == 2 || sz == 4)) goto decode_failure;
+      t1 = newTemp(Ity_I64);
+      abyte = getUChar(delta); delta++;
+      assign( t1, mkU64( abyte & 0xFF ) );
+      DIP("out%c %s,$%d\n", nameISize(sz), nameIRegRAX(sz), (Int)abyte);
+      goto do_OUT;
+   case 0xEE: /* OUT AL, %DX */
+      sz = 1;
+      t1 = newTemp(Ity_I64);
+      assign( t1, unop(Iop_16Uto64, getIRegRDX(2)) );
+      DIP("out%c %s,%s\n", nameISize(sz), nameIRegRAX(sz),
+                                          nameIRegRDX(2));
+      goto do_OUT;
+   case 0xEF: /* OUT eAX, %DX */
+      if (!(sz == 2 || sz == 4)) goto decode_failure;
+      t1 = newTemp(Ity_I64);
+      assign( t1, unop(Iop_16Uto64, getIRegRDX(2)) );
+      DIP("out%c %s,%s\n", nameISize(sz), nameIRegRAX(sz),
+                                          nameIRegRDX(2));
+      goto do_OUT;
+   do_OUT: {
+      /* At this point, sz indicates the width, and t1 is a 64-bit
+         value giving port number. */
+      IRDirty* d;
+      if (haveF2orF3(pfx)) goto decode_failure;
+      vassert(sz == 1 || sz == 2 || sz == 4);
+      ty = szToITy(sz);
+      d = unsafeIRDirty_0_N( 
+             0/*regparms*/, 
+             "amd64g_dirtyhelper_OUT", 
+             &amd64g_dirtyhelper_OUT,
+             mkIRExprVec_3( mkexpr(t1),
+                            widenUto64( getIRegRAX(sz) ), 
+                            mkU64(sz) )
+          );
+      stmt( IRStmt_Dirty(d) );
+      return delta;
+   }
+
+   case 0xE8: /* CALL J4 */
+      if (haveF3(pfx)) goto decode_failure;
+      if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+      d64 = getSDisp32(delta); delta += 4;
+      d64 += (guest_RIP_bbstart+delta); 
+      /* (guest_RIP_bbstart+delta) == return-to addr, d64 == call-to addr */
+      t1 = newTemp(Ity_I64); 
+      assign(t1, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(8)));
+      putIReg64(R_RSP, mkexpr(t1));
+      storeLE( mkexpr(t1), mkU64(guest_RIP_bbstart+delta));
+      t2 = newTemp(Ity_I64);
+      assign(t2, mkU64((Addr64)d64));
+      make_redzone_AbiHint(vbi, t1, t2/*nia*/, "call-d32");
+      if (resteerOkFn( callback_opaque, (Addr64)d64) ) {
+         /* follow into the call target. */
+         dres->whatNext   = Dis_ResteerU;
+         dres->continueAt = d64;
+      } else {
+         jmp_lit(dres, Ijk_Call, d64);
+         vassert(dres->whatNext == Dis_StopHere);
+      }
+      DIP("call 0x%llx\n",d64);
+      return delta;
+
+   case 0xE9: /* Jv (jump, 16/32 offset) */
+      if (haveF3(pfx)) goto decode_failure;
+      if (sz != 4) 
+         goto decode_failure; /* JRS added 2004 July 11 */
+      if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+      d64 = (guest_RIP_bbstart+delta+sz) + getSDisp(sz,delta); 
+      delta += sz;
+      if (resteerOkFn(callback_opaque, (Addr64)d64)) {
+         dres->whatNext   = Dis_ResteerU;
+         dres->continueAt = d64;
+      } else {
+         jmp_lit(dres, Ijk_Boring, d64);
+         vassert(dres->whatNext == Dis_StopHere);
+      }
+      DIP("jmp 0x%llx\n", d64);
+      return delta;
+
+   case 0xEB: /* Jb (jump, byte offset) */
+      if (haveF3(pfx)) goto decode_failure;
+      if (sz != 4) 
+         goto decode_failure; /* JRS added 2004 July 11 */
+      if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+      d64 = (guest_RIP_bbstart+delta+1) + getSDisp8(delta); 
+      delta++;
+      if (resteerOkFn(callback_opaque, (Addr64)d64)) {
+         dres->whatNext   = Dis_ResteerU;
+         dres->continueAt = d64;
+      } else {
+         jmp_lit(dres, Ijk_Boring, d64);
+         vassert(dres->whatNext == Dis_StopHere);
+      }
+      DIP("jmp-8 0x%llx\n", d64);
+      return delta;
+
+   case 0xF5: /* CMC */
+   case 0xF8: /* CLC */
+   case 0xF9: /* STC */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign( t1, mk_amd64g_calculate_rflags_all() );
+      switch (opc) {
+         case 0xF5: 
+            assign( t2, binop(Iop_Xor64, mkexpr(t1), 
+                                         mkU64(AMD64G_CC_MASK_C)));
+            DIP("cmc\n");
+            break;
+         case 0xF8: 
+            assign( t2, binop(Iop_And64, mkexpr(t1), 
+                                         mkU64(~AMD64G_CC_MASK_C)));
+            DIP("clc\n");
+            break;
+         case 0xF9: 
+            assign( t2, binop(Iop_Or64, mkexpr(t1), 
+                                        mkU64(AMD64G_CC_MASK_C)));
+            DIP("stc\n");
+            break;
+         default: 
+            vpanic("disInstr(x64)(cmc/clc/stc)");
+      }
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(t2) ));
+      /* Set NDEP even though it isn't used.  This makes redundant-PUT
+         elimination of previous stores to this field work better. */
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+      return delta;
+
+   case 0xF6: { /* Grp3 Eb */
+      Bool decode_OK = True;
+      /* RM'd: if (haveF2orF3(pfx)) goto decode_failure; */
+      /* We now let dis_Grp3 itself decide if F2 and/or F3 are valid */
+      delta = dis_Grp3 ( vbi, pfx, 1, delta, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xF7: { /* Grp3 Ev */
+      Bool decode_OK = True;
+      /* RM'd: if (haveF2orF3(pfx)) goto decode_failure; */
+      /* We now let dis_Grp3 itself decide if F2 and/or F3 are valid */
+      delta = dis_Grp3 ( vbi, pfx, sz, delta, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xFC: /* CLD */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      stmt( IRStmt_Put( OFFB_DFLAG, mkU64(1)) );
+      DIP("cld\n");
+      return delta;
+
+   case 0xFD: /* STD */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      stmt( IRStmt_Put( OFFB_DFLAG, mkU64(-1ULL)) );
+      DIP("std\n");
+      return delta;
+
+   case 0xFE: { /* Grp4 Eb */
+      Bool decode_OK = True;
+      /* RM'd: if (haveF2orF3(pfx)) goto decode_failure; */
+      /* We now let dis_Grp4 itself decide if F2 and/or F3 are valid */
+      delta = dis_Grp4 ( vbi, pfx, delta, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   case 0xFF: { /* Grp5 Ev */
+      Bool decode_OK = True;
+      /* RM'd: if (haveF2orF3(pfx)) goto decode_failure; */
+      /* We now let dis_Grp5 itself decide if F2 and/or F3 are valid */
+      delta = dis_Grp5 ( vbi, pfx, sz, delta, dres, &decode_OK );
+      if (!decode_OK) goto decode_failure;
+      return delta;
+   }
+
+   default:
+      break;
+
+   }
+
+  decode_failure:
+   return deltaIN; /* fail */
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level post-escape decoders: dis_ESC_0F           ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static IRTemp math_BSWAP ( IRTemp t1, IRType ty )
+{
+   IRTemp t2 = newTemp(ty);
+   if (ty == Ity_I64) {
+      IRTemp m8  = newTemp(Ity_I64);
+      IRTemp s8  = newTemp(Ity_I64);
+      IRTemp m16 = newTemp(Ity_I64);
+      IRTemp s16 = newTemp(Ity_I64);
+      IRTemp m32 = newTemp(Ity_I64);
+      assign( m8, mkU64(0xFF00FF00FF00FF00ULL) );
+      assign( s8,
+              binop(Iop_Or64,
+                    binop(Iop_Shr64,
+                          binop(Iop_And64,mkexpr(t1),mkexpr(m8)),
+                          mkU8(8)),
+                    binop(Iop_And64,
+                          binop(Iop_Shl64,mkexpr(t1),mkU8(8)),
+                          mkexpr(m8))
+                   ) 
+            );
+
+      assign( m16, mkU64(0xFFFF0000FFFF0000ULL) );
+      assign( s16,
+              binop(Iop_Or64,
+                    binop(Iop_Shr64,
+                          binop(Iop_And64,mkexpr(s8),mkexpr(m16)),
+                          mkU8(16)),
+                    binop(Iop_And64,
+                          binop(Iop_Shl64,mkexpr(s8),mkU8(16)),
+                          mkexpr(m16))
+                   ) 
+            );
+
+      assign( m32, mkU64(0xFFFFFFFF00000000ULL) );
+      assign( t2,
+              binop(Iop_Or64,
+                    binop(Iop_Shr64,
+                          binop(Iop_And64,mkexpr(s16),mkexpr(m32)),
+                          mkU8(32)),
+                    binop(Iop_And64,
+                          binop(Iop_Shl64,mkexpr(s16),mkU8(32)),
+                          mkexpr(m32))
+                   ) 
+            );
+      return t2;
+   }
+   if (ty == Ity_I32) {
+      assign( t2,
+         binop(
+            Iop_Or32,
+            binop(Iop_Shl32, mkexpr(t1), mkU8(24)),
+            binop(
+               Iop_Or32,
+               binop(Iop_And32, binop(Iop_Shl32, mkexpr(t1), mkU8(8)),
+                                mkU32(0x00FF0000)),
+               binop(Iop_Or32,
+                     binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(8)),
+                                      mkU32(0x0000FF00)),
+                     binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(24)),
+                                      mkU32(0x000000FF) )
+            )))
+      );
+      return t2;
+   }
+   if (ty == Ity_I16) {
+      assign(t2, 
+             binop(Iop_Or16,
+                   binop(Iop_Shl16, mkexpr(t1), mkU8(8)),
+                   binop(Iop_Shr16, mkexpr(t1), mkU8(8)) ));
+      return t2;
+   }
+   vassert(0);
+   /*NOTREACHED*/
+   return IRTemp_INVALID;
+}
+
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F (
+        /*MB_OUT*/DisResult* dres,
+        /*MB_OUT*/Bool*      expect_CAS,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  vbi,
+        Prefix pfx, Int sz, Long deltaIN 
+     )
+{
+   Long   d64   = 0;
+   IRTemp addr  = IRTemp_INVALID;
+   IRTemp t1    = IRTemp_INVALID;
+   IRTemp t2    = IRTemp_INVALID;
+   UChar  modrm = 0;
+   Int    am_sz = 0;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+
+   /* In the first switch, look for ordinary integer insns. */
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) { /* first switch */
+
+   case 0x01:
+   {
+      modrm = getUChar(delta);
+      /* 0F 01 /0 -- SGDT */
+      /* 0F 01 /1 -- SIDT */
+      if (!epartIsReg(modrm)
+          && (gregLO3ofRM(modrm) == 0 || gregLO3ofRM(modrm) == 1)) {
+         /* This is really revolting, but ... since each processor
+            (core) only has one IDT and one GDT, just let the guest
+            see it (pass-through semantics).  I can't see any way to
+            construct a faked-up value, so don't bother to try. */
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         switch (gregLO3ofRM(modrm)) {
+            case 0: DIP("sgdt %s\n", dis_buf); break;
+            case 1: DIP("sidt %s\n", dis_buf); break;
+            default: vassert(0); /*NOTREACHED*/
+         }
+         IRDirty* d = unsafeIRDirty_0_N (
+                          0/*regparms*/,
+                          "amd64g_dirtyhelper_SxDT",
+                          &amd64g_dirtyhelper_SxDT,
+                          mkIRExprVec_2( mkexpr(addr),
+                                         mkU64(gregLO3ofRM(modrm)) )
+                      );
+         /* declare we're writing memory */
+         d->mFx   = Ifx_Write;
+         d->mAddr = mkexpr(addr);
+         d->mSize = 6;
+         stmt( IRStmt_Dirty(d) );
+         return delta;
+      }
+      /* 0F 01 D0 = XGETBV */
+      if (modrm == 0xD0 && (archinfo->hwcaps & VEX_HWCAPS_AMD64_AVX)) {
+         delta += 1;
+         DIP("xgetbv\n");
+         /* Fault (SEGV) if ECX isn't zero.  Intel docs say #GP and I
+            am not sure if that translates in to SEGV or to something
+            else, in user space. */
+         t1 = newTemp(Ity_I32);
+         assign( t1, getIReg32(R_RCX) );
+         stmt( IRStmt_Exit(binop(Iop_CmpNE32, mkexpr(t1), mkU32(0)),
+                           Ijk_SigSEGV,
+                           IRConst_U64(guest_RIP_curr_instr),
+                           OFFB_RIP
+         ));
+         putIRegRAX(4, mkU32(7));
+         putIRegRDX(4, mkU32(0));
+         return delta;
+      }
+      /* BEGIN HACKY SUPPORT FOR xend */
+      /* 0F 01 D5 = XEND */
+      if (modrm == 0xD5 && (archinfo->hwcaps & VEX_HWCAPS_AMD64_AVX)) {
+         /* We are never in an transaction (xbegin immediately aborts).
+            So this just always generates a General Protection Fault. */
+         delta += 1;
+         jmp_lit(dres, Ijk_SigSEGV, guest_RIP_bbstart + delta);
+         vassert(dres->whatNext == Dis_StopHere);
+         DIP("xend\n");
+         return delta;
+      }
+      /* END HACKY SUPPORT FOR xend */
+      /* BEGIN HACKY SUPPORT FOR xtest */
+      /* 0F 01 D6 = XTEST */
+      if (modrm == 0xD6 && (archinfo->hwcaps & VEX_HWCAPS_AMD64_AVX)) {
+         /* Sets ZF because there never is a transaction, and all
+            CF, OF, SF, PF and AF are always cleared by xtest. */
+         delta += 1;
+         DIP("xtest\n");
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP1, mkU64(AMD64G_CC_MASK_Z) ));
+         /* Set NDEP even though it isn't used.  This makes redundant-PUT
+            elimination of previous stores to this field work better. */
+         stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+         return delta;
+      }
+      /* END HACKY SUPPORT FOR xtest */
+      /* 0F 01 F9 = RDTSCP */
+      if (modrm == 0xF9 && (archinfo->hwcaps & VEX_HWCAPS_AMD64_RDTSCP)) {
+         delta += 1;
+         /* Uses dirty helper: 
+            void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* )
+            declared to wr rax, rcx, rdx
+         */
+         const HChar* fName = "amd64g_dirtyhelper_RDTSCP";
+         void*        fAddr = &amd64g_dirtyhelper_RDTSCP;
+         IRDirty* d
+            = unsafeIRDirty_0_N ( 0/*regparms*/, 
+                                  fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
+         /* declare guest state effects */
+         d->nFxState = 3;
+         vex_bzero(&d->fxState, sizeof(d->fxState));
+         d->fxState[0].fx     = Ifx_Write;
+         d->fxState[0].offset = OFFB_RAX;
+         d->fxState[0].size   = 8;
+         d->fxState[1].fx     = Ifx_Write;
+         d->fxState[1].offset = OFFB_RCX;
+         d->fxState[1].size   = 8;
+         d->fxState[2].fx     = Ifx_Write;
+         d->fxState[2].offset = OFFB_RDX;
+         d->fxState[2].size   = 8;
+         /* execute the dirty call, side-effecting guest state */
+         stmt( IRStmt_Dirty(d) );
+         /* RDTSCP is a serialising insn.  So, just in case someone is
+            using it as a memory fence ... */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("rdtscp\n");
+         return delta;
+      }
+      /* else decode failed */
+      break;
+   }
+
+   case 0x05: /* SYSCALL */
+      guest_RIP_next_mustcheck = True;
+      guest_RIP_next_assumed = guest_RIP_bbstart + delta;
+      putIReg64( R_RCX, mkU64(guest_RIP_next_assumed) );
+      /* It's important that all guest state is up-to-date
+         at this point.  So we declare an end-of-block here, which
+         forces any cached guest state to be flushed. */
+      jmp_lit(dres, Ijk_Sys_syscall, guest_RIP_next_assumed);
+      vassert(dres->whatNext == Dis_StopHere);
+      DIP("syscall\n");
+      return delta;
+
+   case 0x0B: /* UD2 */
+      stmt( IRStmt_Put( OFFB_RIP, mkU64(guest_RIP_curr_instr) ) );
+      jmp_lit(dres, Ijk_NoDecode, guest_RIP_curr_instr);
+      vassert(dres->whatNext == Dis_StopHere);
+      DIP("ud2\n");
+      return delta;
+
+   case 0x0D: /* 0F 0D /0 -- prefetch mem8 */
+              /* 0F 0D /1 -- prefetchw mem8 */
+      if (have66orF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      if (epartIsReg(modrm)) goto decode_failure;
+      if (gregLO3ofRM(modrm) != 0 && gregLO3ofRM(modrm) != 1)
+         goto decode_failure;
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      delta += alen;
+      switch (gregLO3ofRM(modrm)) {
+         case 0: DIP("prefetch %s\n", dis_buf); break;
+         case 1: DIP("prefetchw %s\n", dis_buf); break;
+         default: vassert(0); /*NOTREACHED*/
+      }
+      return delta;
+
+   case 0x1F:
+      if (haveF2orF3(pfx)) goto decode_failure;
+      modrm = getUChar(delta);
+      if (epartIsReg(modrm)) goto decode_failure;
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      delta += alen;
+      DIP("nop%c %s\n", nameISize(sz), dis_buf);
+      return delta;
+
+   case 0x31: { /* RDTSC */
+      IRTemp   val  = newTemp(Ity_I64);
+      IRExpr** args = mkIRExprVec_0();
+      IRDirty* d    = unsafeIRDirty_1_N ( 
+                         val, 
+                         0/*regparms*/, 
+                         "amd64g_dirtyhelper_RDTSC", 
+                         &amd64g_dirtyhelper_RDTSC, 
+                         args 
+                      );
+      if (have66orF2orF3(pfx)) goto decode_failure;
+      /* execute the dirty call, dumping the result in val. */
+      stmt( IRStmt_Dirty(d) );
+      putIRegRDX(4, unop(Iop_64HIto32, mkexpr(val)));
+      putIRegRAX(4, unop(Iop_64to32, mkexpr(val)));
+      DIP("rdtsc\n");
+      return delta;
+   }
+
+   case 0x40:
+   case 0x41:
+   case 0x42: /* CMOVBb/CMOVNAEb (cmov below) */
+   case 0x43: /* CMOVNBb/CMOVAEb (cmov not below) */
+   case 0x44: /* CMOVZb/CMOVEb (cmov zero) */
+   case 0x45: /* CMOVNZb/CMOVNEb (cmov not zero) */
+   case 0x46: /* CMOVBEb/CMOVNAb (cmov below or equal) */
+   case 0x47: /* CMOVNBEb/CMOVAb (cmov not below or equal) */
+   case 0x48: /* CMOVSb (cmov negative) */
+   case 0x49: /* CMOVSb (cmov not negative) */
+   case 0x4A: /* CMOVP (cmov parity even) */
+   case 0x4B: /* CMOVNP (cmov parity odd) */
+   case 0x4C: /* CMOVLb/CMOVNGEb (cmov less) */
+   case 0x4D: /* CMOVGEb/CMOVNLb (cmov greater or equal) */
+   case 0x4E: /* CMOVLEb/CMOVNGb (cmov less or equal) */
+   case 0x4F: /* CMOVGb/CMOVNLEb (cmov greater) */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_cmov_E_G(vbi, pfx, sz, (AMD64Condcode)(opc - 0x40), delta);
+      return delta;
+
+   case 0x80:
+   case 0x81:
+   case 0x82:   /* JBb/JNAEb (jump below) */
+   case 0x83:   /* JNBb/JAEb (jump not below) */
+   case 0x84:   /* JZb/JEb (jump zero) */
+   case 0x85:   /* JNZb/JNEb (jump not zero) */
+   case 0x86:   /* JBEb/JNAb (jump below or equal) */
+   case 0x87:   /* JNBEb/JAb (jump not below or equal) */
+   case 0x88:   /* JSb (jump negative) */
+   case 0x89:   /* JSb (jump not negative) */
+   case 0x8A:   /* JP (jump parity even) */
+   case 0x8B:   /* JNP/JPO (jump parity odd) */
+   case 0x8C:   /* JLb/JNGEb (jump less) */
+   case 0x8D:   /* JGEb/JNLb (jump greater or equal) */
+   case 0x8E:   /* JLEb/JNGb (jump less or equal) */
+   case 0x8F: { /* JGb/JNLEb (jump greater) */
+      Long   jmpDelta;
+      const HChar* comment  = "";
+      if (haveF3(pfx)) goto decode_failure;
+      if (haveF2(pfx)) DIP("bnd ; "); /* MPX bnd prefix. */
+      jmpDelta = getSDisp32(delta);
+      d64 = (guest_RIP_bbstart+delta+4) + jmpDelta;
+      delta += 4;
+      if (resteerCisOk
+          && vex_control.guest_chase_cond
+          && (Addr64)d64 != (Addr64)guest_RIP_bbstart
+          && jmpDelta < 0
+          && resteerOkFn( callback_opaque, (Addr64)d64) ) {
+         /* Speculation: assume this backward branch is taken.  So
+            we need to emit a side-exit to the insn following this
+            one, on the negation of the condition, and continue at
+            the branch target address (d64).  If we wind up back at
+            the first instruction of the trace, just stop; it's
+            better to let the IR loop unroller handle that case. */
+         stmt( IRStmt_Exit( 
+                  mk_amd64g_calculate_condition(
+                     (AMD64Condcode)(1 ^ (opc - 0x80))),
+                  Ijk_Boring,
+                  IRConst_U64(guest_RIP_bbstart+delta),
+                  OFFB_RIP
+             ));
+         dres->whatNext   = Dis_ResteerC;
+         dres->continueAt = d64;
+         comment = "(assumed taken)";
+      }
+      else
+      if (resteerCisOk
+          && vex_control.guest_chase_cond
+          && (Addr64)d64 != (Addr64)guest_RIP_bbstart
+          && jmpDelta >= 0
+          && resteerOkFn( callback_opaque, guest_RIP_bbstart+delta ) ) {
+         /* Speculation: assume this forward branch is not taken.
+            So we need to emit a side-exit to d64 (the dest) and
+            continue disassembling at the insn immediately
+            following this one. */
+         stmt( IRStmt_Exit( 
+                  mk_amd64g_calculate_condition((AMD64Condcode)
+                                                (opc - 0x80)),
+                  Ijk_Boring,
+                  IRConst_U64(d64),
+                  OFFB_RIP
+             ));
+         dres->whatNext   = Dis_ResteerC;
+         dres->continueAt = guest_RIP_bbstart+delta;
+         comment = "(assumed not taken)";
+      }
+      else {
+         /* Conservative default translation - end the block at
+            this point. */
+         jcc_01( dres, (AMD64Condcode)(opc - 0x80),
+                 guest_RIP_bbstart+delta, d64 );
+         vassert(dres->whatNext == Dis_StopHere);
+      }
+      DIP("j%s-32 0x%llx %s\n", name_AMD64Condcode(opc - 0x80), d64, comment);
+      return delta;
+   }
+
+   case 0x90:
+   case 0x91:
+   case 0x92: /* set-Bb/set-NAEb (set if below) */
+   case 0x93: /* set-NBb/set-AEb (set if not below) */
+   case 0x94: /* set-Zb/set-Eb (set if zero) */
+   case 0x95: /* set-NZb/set-NEb (set if not zero) */
+   case 0x96: /* set-BEb/set-NAb (set if below or equal) */
+   case 0x97: /* set-NBEb/set-Ab (set if not below or equal) */
+   case 0x98: /* set-Sb (set if negative) */
+   case 0x99: /* set-Sb (set if not negative) */
+   case 0x9A: /* set-P (set if parity even) */
+   case 0x9B: /* set-NP (set if parity odd) */
+   case 0x9C: /* set-Lb/set-NGEb (set if less) */
+   case 0x9D: /* set-GEb/set-NLb (set if greater or equal) */
+   case 0x9E: /* set-LEb/set-NGb (set if less or equal) */
+   case 0x9F: /* set-Gb/set-NLEb (set if greater) */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      t1 = newTemp(Ity_I8);
+      assign( t1, unop(Iop_1Uto8,mk_amd64g_calculate_condition(opc-0x90)) );
+      modrm = getUChar(delta);
+      if (epartIsReg(modrm)) {
+         delta++;
+         putIRegE(1, pfx, modrm, mkexpr(t1));
+         DIP("set%s %s\n", name_AMD64Condcode(opc-0x90), 
+                           nameIRegE(1,pfx,modrm));
+      } else {
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         storeLE( mkexpr(addr), mkexpr(t1) );
+         DIP("set%s %s\n", name_AMD64Condcode(opc-0x90), dis_buf);
+      }
+      return delta;
+
+   case 0x1A:
+   case 0x1B: { /* Future MPX instructions, currently NOPs.
+                   BNDMK b, m     F3 0F 1B
+                   BNDCL b, r/m   F3 0F 1A
+                   BNDCU b, r/m   F2 0F 1A
+                   BNDCN b, r/m   F2 0F 1B
+                   BNDMOV b, b/m  66 0F 1A
+                   BNDMOV b/m, b  66 0F 1B
+                   BNDLDX b, mib     0F 1A
+                   BNDSTX mib, b     0F 1B */
+
+      /* All instructions have two operands. One operand is always the
+         bnd register number (bnd0-bnd3, other register numbers are
+         ignored when MPX isn't enabled, but should generate an
+         exception if MPX is enabled) given by gregOfRexRM. The other
+         operand is either a ModRM:reg, ModRM:r/m or a SIB encoded
+         address, all of which can be decoded by using either
+         eregOfRexRM or disAMode. */
+
+      modrm = getUChar(delta);
+      int bnd = gregOfRexRM(pfx,modrm);
+      const HChar *oper;
+      if (epartIsReg(modrm)) {
+         oper = nameIReg64 (eregOfRexRM(pfx,modrm));
+         delta += 1;
+      } else {
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         oper = dis_buf;
+      }
+
+      if (haveF3no66noF2 (pfx)) {
+         if (opc == 0x1B) {
+            DIP ("bndmk %s, %%bnd%d\n", oper, bnd);
+         } else /* opc == 0x1A */ {
+            DIP ("bndcl %s, %%bnd%d\n", oper, bnd);
+         }
+      } else if (haveF2no66noF3 (pfx)) {
+         if (opc == 0x1A) {
+            DIP ("bndcu %s, %%bnd%d\n", oper, bnd);
+         } else /* opc == 0x1B */ {
+            DIP ("bndcn %s, %%bnd%d\n", oper, bnd);
+         }
+      } else if (have66noF2noF3 (pfx)) {
+         if (opc == 0x1A) {
+            DIP ("bndmov %s, %%bnd%d\n", oper, bnd);
+         } else /* opc == 0x1B */ {
+            DIP ("bndmov %%bnd%d, %s\n", bnd, oper);
+         }
+      } else if (haveNo66noF2noF3 (pfx)) {
+         if (opc == 0x1A) {
+            DIP ("bndldx %s, %%bnd%d\n", oper, bnd);
+         } else /* opc == 0x1B */ {
+            DIP ("bndstx %%bnd%d, %s\n", bnd, oper);
+         }
+      } else goto decode_failure;
+
+      return delta;
+   }
+
+   case 0xA2: { /* CPUID */
+      /* Uses dirty helper: 
+            void amd64g_dirtyhelper_CPUID ( VexGuestAMD64State* )
+         declared to mod rax, wr rbx, rcx, rdx
+      */
+      IRDirty*     d     = NULL;
+      const HChar* fName = NULL;
+      void*        fAddr = NULL;
+
+      /* JRS 2014-11-11: this a really horrible temp kludge to work
+         around the fact that the Yosemite (OSX 10.10)
+         /usr/lib/system/libdyld.dylib expects XSAVE/XRSTOR to be
+         implemented, because amd64g_dirtyhelper_CPUID_avx_and_cx16
+         claims they are supported, but so far they aren't.  So cause
+         it to fall back to a simpler CPU.  The cleaner approach of
+         setting CPUID(eax=1).OSXSAVE=0 and .XSAVE=0 isn't desirable
+         since it will (per the official Intel guidelines) lead to
+         software concluding that AVX isn't supported.
+
+         This is also a kludge in that putting these ifdefs here checks
+         the build (host) architecture, when really we're checking the
+         guest architecture. */
+      Bool this_is_yosemite = False;
+#     if defined(VGP_amd64_darwin) && DARWIN_VERS == DARWIN_10_10
+      this_is_yosemite = True;
+#     endif
+
+      if (haveF2orF3(pfx)) goto decode_failure;
+      /* This isn't entirely correct, CPUID should depend on the VEX
+         capabilities, not on the underlying CPU. See bug #324882. */
+      if (!this_is_yosemite &&
+          (archinfo->hwcaps & VEX_HWCAPS_AMD64_SSE3) &&
+          (archinfo->hwcaps & VEX_HWCAPS_AMD64_CX16) &&
+          (archinfo->hwcaps & VEX_HWCAPS_AMD64_AVX)) {
+         fName = "amd64g_dirtyhelper_CPUID_avx_and_cx16";
+         fAddr = &amd64g_dirtyhelper_CPUID_avx_and_cx16;
+         /* This is a Core-i5-2300-like machine */
+      }
+      else if ((archinfo->hwcaps & VEX_HWCAPS_AMD64_SSE3) &&
+               (archinfo->hwcaps & VEX_HWCAPS_AMD64_CX16)) {
+         fName = "amd64g_dirtyhelper_CPUID_sse42_and_cx16";
+         fAddr = &amd64g_dirtyhelper_CPUID_sse42_and_cx16;
+         /* This is a Core-i5-670-like machine */
+      }
+      else {
+         /* Give a CPUID for at least a baseline machine, SSE2
+            only, and no CX16 */
+         fName = "amd64g_dirtyhelper_CPUID_baseline";
+         fAddr = &amd64g_dirtyhelper_CPUID_baseline;
+      }
+
+      vassert(fName); vassert(fAddr);
+      d = unsafeIRDirty_0_N ( 0/*regparms*/, 
+                              fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
+      /* declare guest state effects */
+      d->nFxState = 4;
+      vex_bzero(&d->fxState, sizeof(d->fxState));
+      d->fxState[0].fx     = Ifx_Modify;
+      d->fxState[0].offset = OFFB_RAX;
+      d->fxState[0].size   = 8;
+      d->fxState[1].fx     = Ifx_Write;
+      d->fxState[1].offset = OFFB_RBX;
+      d->fxState[1].size   = 8;
+      d->fxState[2].fx     = Ifx_Modify;
+      d->fxState[2].offset = OFFB_RCX;
+      d->fxState[2].size   = 8;
+      d->fxState[3].fx     = Ifx_Write;
+      d->fxState[3].offset = OFFB_RDX;
+      d->fxState[3].size   = 8;
+      /* execute the dirty call, side-effecting guest state */
+      stmt( IRStmt_Dirty(d) );
+      /* CPUID is a serialising insn.  So, just in case someone is
+         using it as a memory fence ... */
+      stmt( IRStmt_MBE(Imbe_Fence) );
+      DIP("cpuid\n");
+      return delta;
+   }
+
+   case 0xA3: { /* BT Gv,Ev */
+      /* We let dis_bt_G_E decide whether F2 or F3 are allowable. */
+      Bool ok = True;
+      if (sz != 8 && sz != 4 && sz != 2) goto decode_failure;
+      delta = dis_bt_G_E ( vbi, pfx, sz, delta, BtOpNone, &ok );
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0xA4: /* SHLDv imm8,Gv,Ev */
+      modrm = getUChar(delta);
+      d64   = delta + lengthAMode(pfx, delta);
+      vex_sprintf(dis_buf, "$%d", (Int)getUChar(d64));
+      delta = dis_SHLRD_Gv_Ev ( 
+                 vbi, pfx, delta, modrm, sz, 
+                 mkU8(getUChar(d64)), True, /* literal */
+                 dis_buf, True /* left */ );
+      return delta;
+
+   case 0xA5: /* SHLDv %cl,Gv,Ev */
+      modrm = getUChar(delta);
+      delta = dis_SHLRD_Gv_Ev ( 
+                 vbi, pfx, delta, modrm, sz,
+                 getIRegCL(), False, /* not literal */
+                 "%cl", True /* left */ );
+      return delta;
+
+   case 0xAB: { /* BTS Gv,Ev */
+      /* We let dis_bt_G_E decide whether F2 or F3 are allowable. */
+      Bool ok = True;
+      if (sz != 8 && sz != 4 && sz != 2) goto decode_failure;
+      delta = dis_bt_G_E ( vbi, pfx, sz, delta, BtOpSet, &ok );
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0xAC: /* SHRDv imm8,Gv,Ev */
+      modrm = getUChar(delta);
+      d64   = delta + lengthAMode(pfx, delta);
+      vex_sprintf(dis_buf, "$%d", (Int)getUChar(d64));
+      delta = dis_SHLRD_Gv_Ev ( 
+                 vbi, pfx, delta, modrm, sz, 
+                 mkU8(getUChar(d64)), True, /* literal */
+                 dis_buf, False /* right */ );
+      return delta;
+
+   case 0xAD: /* SHRDv %cl,Gv,Ev */
+      modrm = getUChar(delta);
+      delta = dis_SHLRD_Gv_Ev ( 
+                 vbi, pfx, delta, modrm, sz, 
+                 getIRegCL(), False, /* not literal */
+                 "%cl", False /* right */);
+      return delta;
+
+   case 0xAF: /* IMUL Ev, Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      delta = dis_mul_E_G ( vbi, pfx, sz, delta );
+      return delta;
+
+   case 0xB0: { /* CMPXCHG Gb,Eb */
+      Bool ok = True;
+      /* We let dis_cmpxchg_G_E decide whether F2 or F3 are allowable. */
+      delta = dis_cmpxchg_G_E ( &ok, vbi, pfx, 1, delta );
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0xB1: { /* CMPXCHG Gv,Ev (allowed in 16,32,64 bit) */
+      Bool ok = True;
+      /* We let dis_cmpxchg_G_E decide whether F2 or F3 are allowable. */
+      if (sz != 2 && sz != 4 && sz != 8) goto decode_failure;
+      delta = dis_cmpxchg_G_E ( &ok, vbi, pfx, sz, delta );
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0xB3: { /* BTR Gv,Ev */
+      /* We let dis_bt_G_E decide whether F2 or F3 are allowable. */
+      Bool ok = True;
+      if (sz != 8 && sz != 4 && sz != 2) goto decode_failure;
+      delta = dis_bt_G_E ( vbi, pfx, sz, delta, BtOpReset, &ok );
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0xB6: /* MOVZXb Eb,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (sz != 2 && sz != 4 && sz != 8)
+         goto decode_failure;
+      delta = dis_movx_E_G ( vbi, pfx, delta, 1, sz, False );
+      return delta;
+
+   case 0xB7: /* MOVZXw Ew,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (sz != 4 && sz != 8)
+         goto decode_failure;
+      delta = dis_movx_E_G ( vbi, pfx, delta, 2, sz, False );
+      return delta;
+
+   case 0xBA: { /* Grp8 Ib,Ev */
+      /* We let dis_Grp8_Imm decide whether F2 or F3 are allowable. */
+      Bool decode_OK = False;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(pfx,delta);
+      d64   = getSDisp8(delta + am_sz);
+      delta = dis_Grp8_Imm ( vbi, pfx, delta, modrm, am_sz, sz, d64,
+                             &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      return delta;
+   }
+
+   case 0xBB: { /* BTC Gv,Ev */
+      /* We let dis_bt_G_E decide whether F2 or F3 are allowable. */
+      Bool ok = False;
+      if (sz != 8 && sz != 4 && sz != 2) goto decode_failure;
+      delta = dis_bt_G_E ( vbi, pfx, sz, delta, BtOpComp, &ok );
+      if (!ok) goto decode_failure;
+      return delta;
+   }
+
+   case 0xBC: /* BSF Gv,Ev */
+      if (!haveF2orF3(pfx)
+          || (haveF3noF2(pfx)
+              && 0 == (archinfo->hwcaps & VEX_HWCAPS_AMD64_BMI))) {
+         /* no-F2 no-F3 0F BC = BSF
+                  or F3 0F BC = REP; BSF on older CPUs.  */
+         delta = dis_bs_E_G ( vbi, pfx, sz, delta, True );
+         return delta;
+      }
+      /* Fall through, since F3 0F BC is TZCNT, and needs to
+         be handled by dis_ESC_0F__SSE4. */
+      break;
+
+   case 0xBD: /* BSR Gv,Ev */
+      if (!haveF2orF3(pfx)
+          || (haveF3noF2(pfx)
+              && 0 == (archinfo->hwcaps & VEX_HWCAPS_AMD64_LZCNT))) {
+         /* no-F2 no-F3 0F BD = BSR
+                  or F3 0F BD = REP; BSR on older CPUs.  */
+         delta = dis_bs_E_G ( vbi, pfx, sz, delta, False );
+         return delta;
+      }
+      /* Fall through, since F3 0F BD is LZCNT, and needs to
+         be handled by dis_ESC_0F__SSE4. */
+      break;
+
+   case 0xBE: /* MOVSXb Eb,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (sz != 2 && sz != 4 && sz != 8)
+         goto decode_failure;
+      delta = dis_movx_E_G ( vbi, pfx, delta, 1, sz, True );
+      return delta;
+
+   case 0xBF: /* MOVSXw Ew,Gv */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      if (sz != 4 && sz != 8)
+         goto decode_failure;
+      delta = dis_movx_E_G ( vbi, pfx, delta, 2, sz, True );
+      return delta;
+
+   case 0xC0: { /* XADD Gb,Eb */ 
+      Bool decode_OK = False;
+      delta = dis_xadd_G_E ( &decode_OK, vbi, pfx, 1, delta );
+      if (!decode_OK)
+         goto decode_failure;
+      return delta;
+   }
+
+   case 0xC1: { /* XADD Gv,Ev */ 
+      Bool decode_OK = False;
+      delta = dis_xadd_G_E ( &decode_OK, vbi, pfx, sz, delta );
+      if (!decode_OK)
+         goto decode_failure;
+      return delta;
+   }
+
+   case 0xC7: { /* CMPXCHG8B Ev, CMPXCHG16B Ev */
+      IRType  elemTy     = sz==4 ? Ity_I32 : Ity_I64;
+      IRTemp  expdHi     = newTemp(elemTy);
+      IRTemp  expdLo     = newTemp(elemTy);
+      IRTemp  dataHi     = newTemp(elemTy);
+      IRTemp  dataLo     = newTemp(elemTy);
+      IRTemp  oldHi      = newTemp(elemTy);
+      IRTemp  oldLo      = newTemp(elemTy);
+      IRTemp  flags_old  = newTemp(Ity_I64);
+      IRTemp  flags_new  = newTemp(Ity_I64);
+      IRTemp  success    = newTemp(Ity_I1);
+      IROp    opOR       = sz==4 ? Iop_Or32    : Iop_Or64;
+      IROp    opXOR      = sz==4 ? Iop_Xor32   : Iop_Xor64;
+      IROp    opCasCmpEQ = sz==4 ? Iop_CasCmpEQ32 : Iop_CasCmpEQ64;
+      IRExpr* zero       = sz==4 ? mkU32(0)    : mkU64(0);
+      IRTemp expdHi64    = newTemp(Ity_I64);
+      IRTemp expdLo64    = newTemp(Ity_I64);
+
+      /* Translate this using a DCAS, even if there is no LOCK
+         prefix.  Life is too short to bother with generating two
+         different translations for the with/without-LOCK-prefix
+         cases. */
+      *expect_CAS = True;
+
+      /* Decode, and generate address. */
+      if (have66(pfx)) goto decode_failure;
+      if (sz != 4 && sz != 8) goto decode_failure;
+      if (sz == 8 && !(archinfo->hwcaps & VEX_HWCAPS_AMD64_CX16))
+         goto decode_failure;
+      modrm = getUChar(delta);
+      if (epartIsReg(modrm)) goto decode_failure;
+      if (gregLO3ofRM(modrm) != 1) goto decode_failure;
+      if (haveF2orF3(pfx)) {
+         /* Since the e-part is memory only, F2 or F3 (one or the
+            other) is acceptable if LOCK is also present.  But only
+            for cmpxchg8b. */
+         if (sz == 8) goto decode_failure;
+         if (haveF2andF3(pfx) || !haveLOCK(pfx)) goto decode_failure;
+      }
+
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      delta += alen;
+
+      /* cmpxchg16b requires an alignment check. */
+      if (sz == 8)
+         gen_SEGV_if_not_16_aligned( addr );
+
+      /* Get the expected and new values. */
+      assign( expdHi64, getIReg64(R_RDX) );
+      assign( expdLo64, getIReg64(R_RAX) );
+
+      /* These are the correctly-sized expected and new values.
+         However, we also get expdHi64/expdLo64 above as 64-bits
+         regardless, because we will need them later in the 32-bit
+         case (paradoxically). */
+      assign( expdHi, sz==4 ? unop(Iop_64to32, mkexpr(expdHi64))
+                            : mkexpr(expdHi64) );
+      assign( expdLo, sz==4 ? unop(Iop_64to32, mkexpr(expdLo64))
+                            : mkexpr(expdLo64) );
+      assign( dataHi, sz==4 ? getIReg32(R_RCX) : getIReg64(R_RCX) );
+      assign( dataLo, sz==4 ? getIReg32(R_RBX) : getIReg64(R_RBX) );
+
+      /* Do the DCAS */
+      stmt( IRStmt_CAS(
+               mkIRCAS( oldHi, oldLo, 
+                        Iend_LE, mkexpr(addr), 
+                        mkexpr(expdHi), mkexpr(expdLo),
+                        mkexpr(dataHi), mkexpr(dataLo)
+            )));
+
+      /* success when oldHi:oldLo == expdHi:expdLo */
+      assign( success,
+              binop(opCasCmpEQ,
+                    binop(opOR,
+                          binop(opXOR, mkexpr(oldHi), mkexpr(expdHi)),
+                          binop(opXOR, mkexpr(oldLo), mkexpr(expdLo))
+                    ),
+                    zero
+              ));
+
+      /* If the DCAS is successful, that is to say oldHi:oldLo ==
+         expdHi:expdLo, then put expdHi:expdLo back in RDX:RAX,
+         which is where they came from originally.  Both the actual
+         contents of these two regs, and any shadow values, are
+         unchanged.  If the DCAS fails then we're putting into
+         RDX:RAX the value seen in memory. */
+      /* Now of course there's a complication in the 32-bit case
+         (bah!): if the DCAS succeeds, we need to leave RDX:RAX
+         unchanged; but if we use the same scheme as in the 64-bit
+         case, we get hit by the standard rule that a write to the
+         bottom 32 bits of an integer register zeros the upper 32
+         bits.  And so the upper halves of RDX and RAX mysteriously
+         become zero.  So we have to stuff back in the original
+         64-bit values which we previously stashed in
+         expdHi64:expdLo64, even if we're doing a cmpxchg8b. */
+      /* It's just _so_ much fun ... */
+      putIRegRDX( 8,
+                  IRExpr_ITE( mkexpr(success),
+                              mkexpr(expdHi64),
+                              sz == 4 ? unop(Iop_32Uto64, mkexpr(oldHi))
+                                      : mkexpr(oldHi)
+                ));
+      putIRegRAX( 8,
+                  IRExpr_ITE( mkexpr(success),
+                              mkexpr(expdLo64),
+                              sz == 4 ? unop(Iop_32Uto64, mkexpr(oldLo))
+                                      : mkexpr(oldLo)
+                ));
+
+      /* Copy the success bit into the Z flag and leave the others
+         unchanged */
+      assign( flags_old, widenUto64(mk_amd64g_calculate_rflags_all()));
+      assign( 
+         flags_new,
+         binop(Iop_Or64,
+               binop(Iop_And64, mkexpr(flags_old), 
+                                mkU64(~AMD64G_CC_MASK_Z)),
+               binop(Iop_Shl64,
+                     binop(Iop_And64,
+                           unop(Iop_1Uto64, mkexpr(success)), mkU64(1)), 
+                     mkU8(AMD64G_CC_SHIFT_Z)) ));
+
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(AMD64G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(flags_new) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0) ));
+      /* Set NDEP even though it isn't used.  This makes
+         redundant-PUT elimination of previous stores to this field
+         work better. */
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU64(0) ));
+
+      /* Sheesh.  Aren't you glad it was me and not you that had to
+         write and validate all this grunge? */
+
+      DIP("cmpxchg8b %s\n", dis_buf);
+      return delta;
+   }
+
+   case 0xC8: /* BSWAP %eax */
+   case 0xC9:
+   case 0xCA:
+   case 0xCB:
+   case 0xCC:
+   case 0xCD:
+   case 0xCE:
+   case 0xCF: /* BSWAP %edi */
+      if (haveF2orF3(pfx)) goto decode_failure;
+      /* According to the AMD64 docs, this insn can have size 4 or
+         8. */
+      if (sz == 4) {
+         t1 = newTemp(Ity_I32);
+         assign( t1, getIRegRexB(4, pfx, opc-0xC8) );
+         t2 = math_BSWAP( t1, Ity_I32 );
+         putIRegRexB(4, pfx, opc-0xC8, mkexpr(t2));
+         DIP("bswapl %s\n", nameIRegRexB(4, pfx, opc-0xC8));
+         return delta;
+      }
+      if (sz == 8) {
+         t1 = newTemp(Ity_I64);
+         t2 = newTemp(Ity_I64);
+         assign( t1, getIRegRexB(8, pfx, opc-0xC8) );
+         t2 = math_BSWAP( t1, Ity_I64 );
+         putIRegRexB(8, pfx, opc-0xC8, mkexpr(t2));
+         DIP("bswapq %s\n", nameIRegRexB(8, pfx, opc-0xC8));
+         return delta;
+      }
+      goto decode_failure;
+
+   default:
+      break;
+
+   } /* first switch */
+
+
+   /* =-=-=-=-=-=-=-=-= MMXery =-=-=-=-=-=-=-=-= */
+   /* In the second switch, pick off MMX insns. */
+
+   if (!have66orF2orF3(pfx)) {
+      /* So there's no SIMD prefix. */
+
+      vassert(sz == 4 || sz == 8);
+
+      switch (opc) { /* second switch */
+
+      case 0x71: 
+      case 0x72: 
+      case 0x73: /* PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
+
+      case 0x6E: /* MOVD (src)ireg-or-mem, (dst)mmxreg */
+      case 0x7E: /* MOVD (src)mmxreg, (dst)ireg-or-mem */
+      case 0x7F: /* MOVQ (src)mmxreg, (dst)mmxreg-or-mem */
+      case 0x6F: /* MOVQ (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xFC: 
+      case 0xFD: 
+      case 0xFE: /* PADDgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xEC: 
+      case 0xED: /* PADDSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xDC:
+      case 0xDD: /* PADDUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xF8: 
+      case 0xF9: 
+      case 0xFA: /* PSUBgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xE8: 
+      case 0xE9: /* PSUBSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xD8: 
+      case 0xD9: /* PSUBUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xE5: /* PMULHW (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xD5: /* PMULLW (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xF5: /* PMADDWD (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x74: 
+      case 0x75: 
+      case 0x76: /* PCMPEQgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x64: 
+      case 0x65: 
+      case 0x66: /* PCMPGTgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x6B: /* PACKSSDW (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0x63: /* PACKSSWB (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0x67: /* PACKUSWB (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x68: 
+      case 0x69: 
+      case 0x6A: /* PUNPCKHgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x60: 
+      case 0x61: 
+      case 0x62: /* PUNPCKLgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xDB: /* PAND (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xDF: /* PANDN (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xEB: /* POR (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xEF: /* PXOR (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xF1: /* PSLLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xF2: 
+      case 0xF3: 
+
+      case 0xD1: /* PSRLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xD2: 
+      case 0xD3: 
+
+      case 0xE1: /* PSRAgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xE2: { 
+         Bool decode_OK = False;
+         delta = dis_MMX ( &decode_OK, vbi, pfx, sz, deltaIN );
+         if (decode_OK)
+            return delta;
+         goto decode_failure;
+      }
+
+      default:
+         break;
+      } /* second switch */
+
+   }
+
+   /* A couple of MMX corner cases */
+   if (opc == 0x0E/* FEMMS */ || opc == 0x77/* EMMS */) {
+      if (sz != 4)
+         goto decode_failure;
+      do_EMMS_preamble();
+      DIP("{f}emms\n");
+      return delta;
+   }
+
+   /* =-=-=-=-=-=-=-=-= SSE2ery =-=-=-=-=-=-=-=-= */
+   /* Perhaps it's an SSE or SSE2 instruction.  We can try this
+      without checking the guest hwcaps because SSE2 is a baseline
+      facility in 64 bit mode. */
+   {
+      Bool decode_OK = False;
+      delta = dis_ESC_0F__SSE2 ( &decode_OK, vbi, pfx, sz, deltaIN, dres );
+      if (decode_OK)
+         return delta;
+   }
+
+   /* =-=-=-=-=-=-=-=-= SSE3ery =-=-=-=-=-=-=-=-= */
+   /* Perhaps it's a SSE3 instruction.  FIXME: check guest hwcaps
+      first. */
+   {
+      Bool decode_OK = False;
+      delta = dis_ESC_0F__SSE3 ( &decode_OK, vbi, pfx, sz, deltaIN );
+      if (decode_OK)
+         return delta;
+   }
+
+   /* =-=-=-=-=-=-=-=-= SSE4ery =-=-=-=-=-=-=-=-= */
+   /* Perhaps it's a SSE4 instruction.  FIXME: check guest hwcaps
+      first. */
+   {
+      Bool decode_OK = False;
+      delta = dis_ESC_0F__SSE4 ( &decode_OK,
+                                 archinfo, vbi, pfx, sz, deltaIN );
+      if (decode_OK)
+         return delta;
+   }
+
+  decode_failure:
+   return deltaIN; /* fail */
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level post-escape decoders: dis_ESC_0F38         ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F38 (
+        /*MB_OUT*/DisResult* dres,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  vbi,
+        Prefix pfx, Int sz, Long deltaIN 
+     )
+{
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   case 0xF0:   /* 0F 38 F0 = MOVBE m16/32/64(E), r16/32/64(G) */
+   case 0xF1: { /* 0F 38 F1 = MOVBE r16/32/64(G), m16/32/64(E) */
+      if (!haveF2orF3(pfx) && !haveVEX(pfx)
+          && (sz == 2 || sz == 4 || sz == 8)) {
+         IRTemp addr  = IRTemp_INVALID;
+         UChar  modrm = 0;
+         Int    alen  = 0;
+         HChar  dis_buf[50];
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) break;
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         IRType ty = szToITy(sz);
+         IRTemp src = newTemp(ty);
+         if (opc == 0xF0) { /* LOAD */
+            assign(src, loadLE(ty, mkexpr(addr)));
+            IRTemp dst = math_BSWAP(src, ty);
+            putIRegG(sz, pfx, modrm, mkexpr(dst));
+            DIP("movbe %s,%s\n", dis_buf, nameIRegG(sz, pfx, modrm));
+         } else { /* STORE */
+            assign(src, getIRegG(sz, pfx, modrm));
+            IRTemp dst = math_BSWAP(src, ty);
+            storeLE(mkexpr(addr), mkexpr(dst));
+            DIP("movbe %s,%s\n", nameIRegG(sz, pfx, modrm), dis_buf);
+         }
+         return delta;
+      }
+      /* else fall through; maybe one of the decoders below knows what
+         it is. */
+      break;
+   }
+
+   default:
+      break;
+
+   }
+
+   /* =-=-=-=-=-=-=-=-= SSSE3ery =-=-=-=-=-=-=-=-= */
+   /* Perhaps it's an SSSE3 instruction.  FIXME: consult guest hwcaps
+      rather than proceeding indiscriminately. */
+   {
+      Bool decode_OK = False;
+      delta = dis_ESC_0F38__SupSSE3 ( &decode_OK, vbi, pfx, sz, deltaIN );
+      if (decode_OK)
+         return delta;
+   }
+
+   /* =-=-=-=-=-=-=-=-= SSE4ery =-=-=-=-=-=-=-=-= */
+   /* Perhaps it's an SSE4 instruction.  FIXME: consult guest hwcaps
+      rather than proceeding indiscriminately. */
+   {
+      Bool decode_OK = False;
+      delta = dis_ESC_0F38__SSE4 ( &decode_OK, vbi, pfx, sz, deltaIN );
+      if (decode_OK)
+         return delta;
+   }
+
+  /*decode_failure:*/
+   return deltaIN; /* fail */
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level post-escape decoders: dis_ESC_0F3A         ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F3A (
+        /*MB_OUT*/DisResult* dres,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  vbi,
+        Prefix pfx, Int sz, Long deltaIN 
+     )
+{
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   switch (opc) {
+
+   default:
+      break;
+
+   }
+
+   /* =-=-=-=-=-=-=-=-= SSSE3ery =-=-=-=-=-=-=-=-= */
+   /* Perhaps it's an SSSE3 instruction.  FIXME: consult guest hwcaps
+      rather than proceeding indiscriminately. */
+   {
+      Bool decode_OK = False;
+      delta = dis_ESC_0F3A__SupSSE3 ( &decode_OK, vbi, pfx, sz, deltaIN );
+      if (decode_OK)
+         return delta;
+   }
+
+   /* =-=-=-=-=-=-=-=-= SSE4ery =-=-=-=-=-=-=-=-= */
+   /* Perhaps it's an SSE4 instruction.  FIXME: consult guest hwcaps
+      rather than proceeding indiscriminately. */
+   {
+      Bool decode_OK = False;
+      delta = dis_ESC_0F3A__SSE4 ( &decode_OK, vbi, pfx, sz, deltaIN );
+      if (decode_OK)
+         return delta;
+   }
+
+   return deltaIN; /* fail */
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level post-escape decoders: dis_ESC_0F__VEX      ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/* FIXME: common up with the _256_ version below? */
+static
+Long dis_VEX_NDS_128_AnySimdPfx_0F_WIG (
+        /*OUT*/Bool* uses_vvvv, const VexAbiInfo* vbi,
+        Prefix pfx, Long delta, const HChar* name,
+        /* The actual operation.  Use either 'op' or 'opfn',
+           but not both. */
+        IROp op, IRTemp(*opFn)(IRTemp,IRTemp),
+        Bool invertLeftArg,
+        Bool swapArgs
+     )
+{
+   UChar  modrm = getUChar(delta);
+   UInt   rD    = gregOfRexRM(pfx, modrm);
+   UInt   rSL   = getVexNvvvv(pfx);
+   IRTemp tSL   = newTemp(Ity_V128);
+   IRTemp tSR   = newTemp(Ity_V128);
+   IRTemp addr  = IRTemp_INVALID;
+   HChar  dis_buf[50];
+   Int    alen  = 0;
+   vassert(0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*WIG?*/);
+
+   assign(tSL, invertLeftArg ? unop(Iop_NotV128, getXMMReg(rSL))
+                             : getXMMReg(rSL));
+
+   if (epartIsReg(modrm)) {
+      UInt rSR = eregOfRexRM(pfx, modrm);
+      delta += 1;
+      assign(tSR, getXMMReg(rSR));
+      DIP("%s %s,%s,%s\n",
+          name, nameXMMReg(rSR), nameXMMReg(rSL), nameXMMReg(rD));
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      delta += alen;
+      assign(tSR, loadLE(Ity_V128, mkexpr(addr)));
+      DIP("%s %s,%s,%s\n",
+          name, dis_buf, nameXMMReg(rSL), nameXMMReg(rD));
+   }
+
+   IRTemp res = IRTemp_INVALID;
+   if (op != Iop_INVALID) {
+      vassert(opFn == NULL);
+      res = newTemp(Ity_V128);
+      if (requiresRMode(op)) {
+         IRTemp rm = newTemp(Ity_I32);
+         assign(rm, get_FAKE_roundingmode()); /* XXXROUNDINGFIXME */
+         assign(res, swapArgs
+                        ? triop(op, mkexpr(rm), mkexpr(tSR), mkexpr(tSL))
+                        : triop(op, mkexpr(rm), mkexpr(tSL), mkexpr(tSR)));
+      } else {
+         assign(res, swapArgs
+                        ? binop(op, mkexpr(tSR), mkexpr(tSL))
+                        : binop(op, mkexpr(tSL), mkexpr(tSR)));
+      }
+   } else {
+      vassert(opFn != NULL);
+      res = swapArgs ? opFn(tSR, tSL) : opFn(tSL, tSR);
+   }
+
+   putYMMRegLoAndZU(rD, mkexpr(res));
+
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* Handle a VEX_NDS_128_66_0F_WIG (3-addr) insn, with a simple IROp
+   for the operation, no inversion of the left arg, and no swapping of
+   args. */
+static
+Long dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple (
+        /*OUT*/Bool* uses_vvvv, const VexAbiInfo* vbi,
+        Prefix pfx, Long delta, const HChar* name,
+        IROp op
+     )
+{
+   return dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+             uses_vvvv, vbi, pfx, delta, name, op, NULL, False, False);
+}
+
+
+/* Handle a VEX_NDS_128_66_0F_WIG (3-addr) insn, using the given IR
+   generator to compute the result, no inversion of the left
+   arg, and no swapping of args. */
+static
+Long dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex (
+        /*OUT*/Bool* uses_vvvv, const VexAbiInfo* vbi,
+        Prefix pfx, Long delta, const HChar* name,
+        IRTemp(*opFn)(IRTemp,IRTemp)
+     )
+{
+   return dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+             uses_vvvv, vbi, pfx, delta, name,
+             Iop_INVALID, opFn, False, False );
+}
+
+
+/* Vector by scalar shift of V by the amount specified at the bottom
+   of E. */
+static ULong dis_AVX128_shiftV_byE ( const VexAbiInfo* vbi,
+                                     Prefix pfx, Long delta, 
+                                     const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen, size;
+   IRTemp  addr;
+   Bool    shl, shr, sar;
+   UChar   modrm = getUChar(delta);
+   UInt    rG    = gregOfRexRM(pfx,modrm);
+   UInt    rV    = getVexNvvvv(pfx);;
+   IRTemp  g0    = newTemp(Ity_V128);
+   IRTemp  g1    = newTemp(Ity_V128);
+   IRTemp  amt   = newTemp(Ity_I64);
+   IRTemp  amt8  = newTemp(Ity_I8);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( amt, getXMMRegLane64(rE, 0) );
+      DIP("%s %s,%s,%s\n", opname, nameXMMReg(rE),
+          nameXMMReg(rV), nameXMMReg(rG) );
+      delta++;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( amt, loadLE(Ity_I64, mkexpr(addr)) );
+      DIP("%s %s,%s,%s\n", opname, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+      delta += alen;
+   }
+   assign( g0, getXMMReg(rV) );
+   assign( amt8, unop(Iop_64to8, mkexpr(amt)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x8: shl = True; size = 32; break;
+      case Iop_ShlN32x4: shl = True; size = 32; break;
+      case Iop_ShlN64x2: shl = True; size = 64; break;
+      case Iop_SarN16x8: sar = True; size = 16; break;
+      case Iop_SarN32x4: sar = True; size = 32; break;
+      case Iop_ShrN16x8: shr = True; size = 16; break;
+      case Iop_ShrN32x4: shr = True; size = 32; break;
+      case Iop_ShrN64x2: shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           mkV128(0x0000)
+        )
+     );
+   } else 
+   if (sar) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           binop(op, mkexpr(g0), mkU8(size-1))
+        )
+     );
+   } else {
+      vassert(0);
+   }
+
+   putYMMRegLoAndZU( rG, mkexpr(g1) );
+   return delta;
+}
+
+
+/* Vector by scalar shift of V by the amount specified at the bottom
+   of E. */
+static ULong dis_AVX256_shiftV_byE ( const VexAbiInfo* vbi,
+                                     Prefix pfx, Long delta, 
+                                     const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen, size;
+   IRTemp  addr;
+   Bool    shl, shr, sar;
+   UChar   modrm = getUChar(delta);
+   UInt    rG    = gregOfRexRM(pfx,modrm);
+   UInt    rV    = getVexNvvvv(pfx);;
+   IRTemp  g0    = newTemp(Ity_V256);
+   IRTemp  g1    = newTemp(Ity_V256);
+   IRTemp  amt   = newTemp(Ity_I64);
+   IRTemp  amt8  = newTemp(Ity_I8);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( amt, getXMMRegLane64(rE, 0) );
+      DIP("%s %s,%s,%s\n", opname, nameXMMReg(rE),
+          nameYMMReg(rV), nameYMMReg(rG) );
+      delta++;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( amt, loadLE(Ity_I64, mkexpr(addr)) );
+      DIP("%s %s,%s,%s\n", opname, dis_buf, nameYMMReg(rV), nameYMMReg(rG) );
+      delta += alen;
+   }
+   assign( g0, getYMMReg(rV) );
+   assign( amt8, unop(Iop_64to8, mkexpr(amt)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x16: shl = True; size = 32; break;
+      case Iop_ShlN32x8:  shl = True; size = 32; break;
+      case Iop_ShlN64x4:  shl = True; size = 64; break;
+      case Iop_SarN16x16: sar = True; size = 16; break;
+      case Iop_SarN32x8:  sar = True; size = 32; break;
+      case Iop_ShrN16x16: shr = True; size = 16; break;
+      case Iop_ShrN32x8:  shr = True; size = 32; break;
+      case Iop_ShrN64x4:  shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           binop(Iop_V128HLtoV256, mkV128(0), mkV128(0))
+        )
+     );
+   } else 
+   if (sar) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT64U, mkexpr(amt), mkU64(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           binop(op, mkexpr(g0), mkU8(size-1))
+        )
+     );
+   } else {
+      vassert(0);
+   }
+
+   putYMMReg( rG, mkexpr(g1) );
+   return delta;
+}
+
+
+/* Vector by vector shift of V by the amount specified at the bottom
+   of E.  Vector by vector shifts are defined for all shift amounts,
+   so not using Iop_S*x* here (and SSE2 doesn't support variable shifts
+   anyway).  */
+static ULong dis_AVX_var_shiftV_byE ( const VexAbiInfo* vbi,
+                                      Prefix pfx, Long delta,
+                                      const HChar* opname, IROp op, Bool isYMM )
+{
+   HChar   dis_buf[50];
+   Int     alen, size, i;
+   IRTemp  addr;
+   UChar   modrm = getUChar(delta);
+   UInt    rG    = gregOfRexRM(pfx,modrm);
+   UInt    rV    = getVexNvvvv(pfx);;
+   IRTemp  sV    = isYMM ? newTemp(Ity_V256) : newTemp(Ity_V128);
+   IRTemp  amt   = isYMM ? newTemp(Ity_V256) : newTemp(Ity_V128);
+   IRTemp  amts[8], sVs[8], res[8];
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( amt, isYMM ? getYMMReg(rE) : getXMMReg(rE) );
+      if (isYMM) {
+         DIP("%s %s,%s,%s\n", opname, nameYMMReg(rE),
+             nameYMMReg(rV), nameYMMReg(rG) );
+      } else {
+         DIP("%s %s,%s,%s\n", opname, nameXMMReg(rE),
+             nameXMMReg(rV), nameXMMReg(rG) );
+      }
+      delta++;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( amt, loadLE(isYMM ? Ity_V256 : Ity_V128, mkexpr(addr)) );
+      if (isYMM) {
+         DIP("%s %s,%s,%s\n", opname, dis_buf, nameYMMReg(rV),
+             nameYMMReg(rG) );
+      } else {
+         DIP("%s %s,%s,%s\n", opname, dis_buf, nameXMMReg(rV),
+             nameXMMReg(rG) );
+      }
+      delta += alen;
+   }
+   assign( sV, isYMM ? getYMMReg(rV) : getXMMReg(rV) );
+
+   size = 0;
+   switch (op) {
+      case Iop_Shl32: size = 32; break;
+      case Iop_Shl64: size = 64; break;
+      case Iop_Sar32: size = 32; break;
+      case Iop_Shr32: size = 32; break;
+      case Iop_Shr64: size = 64; break;
+      default: vassert(0);
+   }
+
+   for (i = 0; i < 8; i++) {
+      sVs[i] = IRTemp_INVALID;
+      amts[i] = IRTemp_INVALID;
+   }
+   switch (size) {
+      case 32:
+         if (isYMM) {
+            breakupV256to32s( sV, &sVs[7], &sVs[6], &sVs[5], &sVs[4],
+                                  &sVs[3], &sVs[2], &sVs[1], &sVs[0] );
+            breakupV256to32s( amt, &amts[7], &amts[6], &amts[5], &amts[4],
+                                   &amts[3], &amts[2], &amts[1], &amts[0] );
+         } else {
+            breakupV128to32s( sV, &sVs[3], &sVs[2], &sVs[1], &sVs[0] );
+            breakupV128to32s( amt, &amts[3], &amts[2], &amts[1], &amts[0] );
+        }
+         break;
+      case 64:
+         if (isYMM) {
+            breakupV256to64s( sV, &sVs[3], &sVs[2], &sVs[1], &sVs[0] );
+            breakupV256to64s( amt, &amts[3], &amts[2], &amts[1], &amts[0] );
+         } else {
+            breakupV128to64s( sV, &sVs[1], &sVs[0] );
+            breakupV128to64s( amt, &amts[1], &amts[0] );
+         }
+         break;
+      default: vassert(0);
+   }
+   for (i = 0; i < 8; i++)
+      if (sVs[i] != IRTemp_INVALID) {
+         res[i] = size == 32 ? newTemp(Ity_I32) : newTemp(Ity_I64);
+         assign( res[i],
+                 IRExpr_ITE(
+                    binop(size == 32 ? Iop_CmpLT32U : Iop_CmpLT64U,
+                          mkexpr(amts[i]),
+                          size == 32 ? mkU32(size) : mkU64(size)),
+                    binop(op, mkexpr(sVs[i]),
+                               unop(size == 32 ? Iop_32to8 : Iop_64to8,
+                                    mkexpr(amts[i]))),
+                    op == Iop_Sar32 ? binop(op, mkexpr(sVs[i]), mkU8(size-1))
+                                    : size == 32 ? mkU32(0) : mkU64(0)
+         ));
+      }
+   switch (size) {
+      case 32:
+         for (i = 0; i < 8; i++)
+            putYMMRegLane32( rG, i, (i < 4 || isYMM)
+                                    ? mkexpr(res[i]) : mkU32(0) );
+         break;
+      case 64:
+         for (i = 0; i < 4; i++)
+            putYMMRegLane64( rG, i, (i < 2 || isYMM)
+                                    ? mkexpr(res[i]) : mkU64(0) );
+         break;
+      default: vassert(0);
+   }
+
+   return delta;
+}
+
+
+/* Vector by scalar shift of E into V, by an immediate byte.  Modified
+   version of dis_SSE_shiftE_imm. */
+static
+Long dis_AVX128_shiftE_to_V_imm( Prefix pfx, 
+                                 Long delta, const HChar* opname, IROp op )
+{
+   Bool    shl, shr, sar;
+   UChar   rm   = getUChar(delta);
+   IRTemp  e0   = newTemp(Ity_V128);
+   IRTemp  e1   = newTemp(Ity_V128);
+   UInt    rD   = getVexNvvvv(pfx);
+   UChar   amt, size;
+   vassert(epartIsReg(rm));
+   vassert(gregLO3ofRM(rm) == 2 
+           || gregLO3ofRM(rm) == 4 || gregLO3ofRM(rm) == 6);
+   amt = getUChar(delta+1);
+   delta += 2;
+   DIP("%s $%d,%s,%s\n", opname,
+                         (Int)amt,
+                         nameXMMReg(eregOfRexRM(pfx,rm)),
+                         nameXMMReg(rD));
+   assign( e0, getXMMReg(eregOfRexRM(pfx,rm)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x8: shl = True; size = 16; break;
+      case Iop_ShlN32x4: shl = True; size = 32; break;
+      case Iop_ShlN64x2: shl = True; size = 64; break;
+      case Iop_SarN16x8: sar = True; size = 16; break;
+      case Iop_SarN32x4: sar = True; size = 32; break;
+      case Iop_ShrN16x8: shr = True; size = 16; break;
+      case Iop_ShrN32x4: shr = True; size = 32; break;
+      case Iop_ShrN64x2: shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( e1, amt >= size 
+                    ? mkV128(0x0000)
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else 
+   if (sar) {
+     assign( e1, amt >= size 
+                    ? binop(op, mkexpr(e0), mkU8(size-1))
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else {
+      vassert(0);
+   }
+
+   putYMMRegLoAndZU( rD, mkexpr(e1) );
+   return delta;
+}
+
+
+/* Vector by scalar shift of E into V, by an immediate byte.  Modified
+   version of dis_AVX128_shiftE_to_V_imm. */
+static
+Long dis_AVX256_shiftE_to_V_imm( Prefix pfx, 
+                                 Long delta, const HChar* opname, IROp op )
+{
+   Bool    shl, shr, sar;
+   UChar   rm   = getUChar(delta);
+   IRTemp  e0   = newTemp(Ity_V256);
+   IRTemp  e1   = newTemp(Ity_V256);
+   UInt    rD   = getVexNvvvv(pfx);
+   UChar   amt, size;
+   vassert(epartIsReg(rm));
+   vassert(gregLO3ofRM(rm) == 2 
+           || gregLO3ofRM(rm) == 4 || gregLO3ofRM(rm) == 6);
+   amt = getUChar(delta+1);
+   delta += 2;
+   DIP("%s $%d,%s,%s\n", opname,
+                         (Int)amt,
+                         nameYMMReg(eregOfRexRM(pfx,rm)),
+                         nameYMMReg(rD));
+   assign( e0, getYMMReg(eregOfRexRM(pfx,rm)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x16: shl = True; size = 16; break;
+      case Iop_ShlN32x8:  shl = True; size = 32; break;
+      case Iop_ShlN64x4:  shl = True; size = 64; break;
+      case Iop_SarN16x16: sar = True; size = 16; break;
+      case Iop_SarN32x8:  sar = True; size = 32; break;
+      case Iop_ShrN16x16: shr = True; size = 16; break;
+      case Iop_ShrN32x8:  shr = True; size = 32; break;
+      case Iop_ShrN64x4:  shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+
+   if (shl || shr) {
+     assign( e1, amt >= size 
+                    ? binop(Iop_V128HLtoV256, mkV128(0), mkV128(0))
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else 
+   if (sar) {
+     assign( e1, amt >= size 
+                    ? binop(op, mkexpr(e0), mkU8(size-1))
+                    : binop(op, mkexpr(e0), mkU8(amt))
+     );
+   } else {
+      vassert(0);
+   }
+
+   putYMMReg( rD, mkexpr(e1) );
+   return delta;
+}
+
+
+/* Lower 64-bit lane only AVX128 binary operation:
+   G[63:0]    = V[63:0] `op` E[63:0]
+   G[127:64]  = V[127:64]
+   G[255:128] = 0.
+   The specified op must be of the 64F0x2 kind, so that it
+   copies the upper half of the left operand to the result.
+*/
+static Long dis_AVX128_E_V_to_G_lo64 ( /*OUT*/Bool* uses_vvvv,
+                                       const VexAbiInfo* vbi,
+                                       Prefix pfx, Long delta, 
+                                       const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm    = getUChar(delta);
+   UInt    rG    = gregOfRexRM(pfx,rm);
+   UInt    rV    = getVexNvvvv(pfx);
+   IRExpr* vpart = getXMMReg(rV);
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      putXMMReg( rG, binop(op, vpart, getXMMReg(rE)) );
+      DIP("%s %s,%s,%s\n", opname,
+          nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+      delta = delta+1;
+   } else {
+      /* We can only do a 64-bit memory read, so the upper half of the
+         E operand needs to be made simply of zeroes. */
+      IRTemp epart = newTemp(Ity_V128);
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( epart, unop( Iop_64UtoV128,
+                           loadLE(Ity_I64, mkexpr(addr))) );
+      putXMMReg( rG, binop(op, vpart, mkexpr(epart)) );
+      DIP("%s %s,%s,%s\n", opname,
+          dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+      delta = delta+alen;
+   }
+   putYMMRegLane128( rG, 1, mkV128(0) );
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* Lower 64-bit lane only AVX128 unary operation:
+   G[63:0]    = op(E[63:0])
+   G[127:64]  = V[127:64]
+   G[255:128] = 0
+   The specified op must be of the 64F0x2 kind, so that it
+   copies the upper half of the operand to the result.
+*/
+static Long dis_AVX128_E_V_to_G_lo64_unary ( /*OUT*/Bool* uses_vvvv,
+                                             const VexAbiInfo* vbi,
+                                             Prefix pfx, Long delta, 
+                                             const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm  = getUChar(delta);
+   UInt    rG  = gregOfRexRM(pfx,rm);
+   UInt    rV  = getVexNvvvv(pfx);
+   IRTemp  e64 = newTemp(Ity_I64);
+
+   /* Fetch E[63:0] */
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(e64, getXMMRegLane64(rE, 0));
+      DIP("%s %s,%s,%s\n", opname,
+          nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+      delta += 1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(e64, loadLE(Ity_I64, mkexpr(addr)));
+      DIP("%s %s,%s,%s\n", opname,
+          dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+      delta += alen;
+   }
+
+   /* Create a value 'arg' as V[127:64]++E[63:0] */
+   IRTemp arg = newTemp(Ity_V128);
+   assign(arg,
+          binop(Iop_SetV128lo64,
+                getXMMReg(rV), mkexpr(e64)));
+   /* and apply op to it */
+   putYMMRegLoAndZU( rG, unop(op, mkexpr(arg)) );
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* Lower 32-bit lane only AVX128 unary operation:
+   G[31:0]    = op(E[31:0])
+   G[127:32]  = V[127:32]
+   G[255:128] = 0
+   The specified op must be of the 32F0x4 kind, so that it
+   copies the upper 3/4 of the operand to the result.
+*/
+static Long dis_AVX128_E_V_to_G_lo32_unary ( /*OUT*/Bool* uses_vvvv,
+                                             const VexAbiInfo* vbi,
+                                             Prefix pfx, Long delta, 
+                                             const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm  = getUChar(delta);
+   UInt    rG  = gregOfRexRM(pfx,rm);
+   UInt    rV  = getVexNvvvv(pfx);
+   IRTemp  e32 = newTemp(Ity_I32);
+
+   /* Fetch E[31:0] */
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(e32, getXMMRegLane32(rE, 0));
+      DIP("%s %s,%s,%s\n", opname,
+          nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+      delta += 1;
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(e32, loadLE(Ity_I32, mkexpr(addr)));
+      DIP("%s %s,%s,%s\n", opname,
+          dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+      delta += alen;
+   }
+
+   /* Create a value 'arg' as V[127:32]++E[31:0] */
+   IRTemp arg = newTemp(Ity_V128);
+   assign(arg,
+          binop(Iop_SetV128lo32,
+                getXMMReg(rV), mkexpr(e32)));
+   /* and apply op to it */
+   putYMMRegLoAndZU( rG, unop(op, mkexpr(arg)) );
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* Lower 32-bit lane only AVX128 binary operation:
+   G[31:0]    = V[31:0] `op` E[31:0]
+   G[127:32]  = V[127:32]
+   G[255:128] = 0.
+   The specified op must be of the 32F0x4 kind, so that it
+   copies the upper 3/4 of the left operand to the result.
+*/
+static Long dis_AVX128_E_V_to_G_lo32 ( /*OUT*/Bool* uses_vvvv,
+                                       const VexAbiInfo* vbi,
+                                       Prefix pfx, Long delta, 
+                                       const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm    = getUChar(delta);
+   UInt    rG    = gregOfRexRM(pfx,rm);
+   UInt    rV    = getVexNvvvv(pfx);
+   IRExpr* vpart = getXMMReg(rV);
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      putXMMReg( rG, binop(op, vpart, getXMMReg(rE)) );
+      DIP("%s %s,%s,%s\n", opname,
+          nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+      delta = delta+1;
+   } else {
+      /* We can only do a 32-bit memory read, so the upper 3/4 of the
+         E operand needs to be made simply of zeroes. */
+      IRTemp epart = newTemp(Ity_V128);
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( epart, unop( Iop_32UtoV128,
+                           loadLE(Ity_I32, mkexpr(addr))) );
+      putXMMReg( rG, binop(op, vpart, mkexpr(epart)) );
+      DIP("%s %s,%s,%s\n", opname,
+          dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+      delta = delta+alen;
+   }
+   putYMMRegLane128( rG, 1, mkV128(0) );
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* All-lanes AVX128 binary operation:
+   G[127:0]   = V[127:0] `op` E[127:0]
+   G[255:128] = 0.
+*/
+static Long dis_AVX128_E_V_to_G ( /*OUT*/Bool* uses_vvvv,
+                                  const VexAbiInfo* vbi,
+                                  Prefix pfx, Long delta, 
+                                  const HChar* opname, IROp op )
+{
+   return dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+             uses_vvvv, vbi, pfx, delta, opname, op,
+             NULL, False/*!invertLeftArg*/, False/*!swapArgs*/
+   );
+}
+
+
+/* Handles AVX128 32F/64F comparisons.  A derivative of
+   dis_SSEcmp_E_to_G.  It can fail, in which case it returns the
+   original delta to indicate failure. */
+static
+Long dis_AVX128_cmp_V_E_to_G ( /*OUT*/Bool* uses_vvvv,
+                               const VexAbiInfo* vbi,
+                               Prefix pfx, Long delta, 
+                               const HChar* opname, Bool all_lanes, Int sz )
+{
+   vassert(sz == 4 || sz == 8);
+   Long    deltaIN = delta;
+   HChar   dis_buf[50];
+   Int     alen;
+   UInt    imm8;
+   IRTemp  addr;
+   Bool    preSwap = False;
+   IROp    op      = Iop_INVALID;
+   Bool    postNot = False;
+   IRTemp  plain   = newTemp(Ity_V128);
+   UChar   rm      = getUChar(delta);
+   UInt    rG      = gregOfRexRM(pfx, rm);
+   UInt    rV      = getVexNvvvv(pfx);
+   IRTemp argL     = newTemp(Ity_V128);
+   IRTemp argR     = newTemp(Ity_V128);
+
+   assign(argL, getXMMReg(rV));
+   if (epartIsReg(rm)) {
+      imm8 = getUChar(delta+1);
+      Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
+      if (!ok) return deltaIN; /* FAIL */
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(argR, getXMMReg(rE));
+      delta += 1+1;
+      DIP("%s $%d,%s,%s,%s\n",
+          opname, (Int)imm8,
+          nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8 = getUChar(delta+alen);
+      Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8, all_lanes, sz);
+      if (!ok) return deltaIN; /* FAIL */
+      assign(argR, 
+             all_lanes   ? loadLE(Ity_V128, mkexpr(addr))
+             : sz == 8   ? unop( Iop_64UtoV128, loadLE(Ity_I64, mkexpr(addr)))
+             : /*sz==4*/   unop( Iop_32UtoV128, loadLE(Ity_I32, mkexpr(addr))));
+      delta += alen+1;
+      DIP("%s $%d,%s,%s,%s\n",
+          opname, (Int)imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+   }
+
+   assign(plain, preSwap ? binop(op, mkexpr(argR), mkexpr(argL))
+                         : binop(op, mkexpr(argL), mkexpr(argR)));
+
+   if (all_lanes) {
+      /* This is simple: just invert the result, if necessary, and
+         have done. */
+      if (postNot) {
+         putYMMRegLoAndZU( rG, unop(Iop_NotV128, mkexpr(plain)) );
+      } else {
+         putYMMRegLoAndZU( rG, mkexpr(plain) );
+      }
+   }
+   else
+   if (!preSwap) {
+      /* More complex.  It's a one-lane-only, hence need to possibly
+         invert only that one lane.  But at least the other lanes are
+         correctly "in" the result, having been copied from the left
+         operand (argL). */
+      if (postNot) {
+         IRExpr* mask = mkV128(sz==4 ? 0x000F : 0x00FF);
+         putYMMRegLoAndZU( rG, binop(Iop_XorV128, mkexpr(plain),
+                                                  mask) );
+      } else {
+         putYMMRegLoAndZU( rG, mkexpr(plain) );
+      }
+   }
+   else {
+      /* This is the most complex case.  One-lane-only, but the args
+         were swapped.  So we have to possibly invert the bottom lane,
+         and (definitely) we have to copy the upper lane(s) from argL
+         since, due to the swapping, what's currently there is from
+         argR, which is not correct. */
+      IRTemp res     = newTemp(Ity_V128);
+      IRTemp mask    = newTemp(Ity_V128);
+      IRTemp notMask = newTemp(Ity_V128);
+      assign(mask,    mkV128(sz==4 ? 0x000F : 0x00FF));
+      assign(notMask, mkV128(sz==4 ? 0xFFF0 : 0xFF00));
+      if (postNot) {
+         assign(res,
+                binop(Iop_OrV128,
+                      binop(Iop_AndV128,
+                            unop(Iop_NotV128, mkexpr(plain)),
+                            mkexpr(mask)),
+                      binop(Iop_AndV128, mkexpr(argL), mkexpr(notMask))));
+      } else {
+         assign(res,
+                binop(Iop_OrV128,
+                      binop(Iop_AndV128,
+                            mkexpr(plain),
+                            mkexpr(mask)),
+                      binop(Iop_AndV128, mkexpr(argL), mkexpr(notMask))));
+      }
+      putYMMRegLoAndZU( rG, mkexpr(res) );
+   }
+
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* Handles AVX256 32F/64F comparisons.  A derivative of
+   dis_SSEcmp_E_to_G.  It can fail, in which case it returns the
+   original delta to indicate failure. */
+static
+Long dis_AVX256_cmp_V_E_to_G ( /*OUT*/Bool* uses_vvvv,
+                               const VexAbiInfo* vbi,
+                               Prefix pfx, Long delta, 
+                               const HChar* opname, Int sz )
+{
+   vassert(sz == 4 || sz == 8);
+   Long    deltaIN = delta;
+   HChar   dis_buf[50];
+   Int     alen;
+   UInt    imm8;
+   IRTemp  addr;
+   Bool    preSwap = False;
+   IROp    op      = Iop_INVALID;
+   Bool    postNot = False;
+   IRTemp  plain   = newTemp(Ity_V256);
+   UChar   rm      = getUChar(delta);
+   UInt    rG      = gregOfRexRM(pfx, rm);
+   UInt    rV      = getVexNvvvv(pfx);
+   IRTemp argL     = newTemp(Ity_V256);
+   IRTemp argR     = newTemp(Ity_V256);
+   IRTemp argLhi   = IRTemp_INVALID;
+   IRTemp argLlo   = IRTemp_INVALID;
+   IRTemp argRhi   = IRTemp_INVALID;
+   IRTemp argRlo   = IRTemp_INVALID;
+
+   assign(argL, getYMMReg(rV));
+   if (epartIsReg(rm)) {
+      imm8 = getUChar(delta+1);
+      Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8,
+                             True/*all_lanes*/, sz);
+      if (!ok) return deltaIN; /* FAIL */
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(argR, getYMMReg(rE));
+      delta += 1+1;
+      DIP("%s $%d,%s,%s,%s\n",
+          opname, (Int)imm8,
+          nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+      imm8 = getUChar(delta+alen);
+      Bool ok = findSSECmpOp(&preSwap, &op, &postNot, imm8,
+                             True/*all_lanes*/, sz);
+      if (!ok) return deltaIN; /* FAIL */
+      assign(argR, loadLE(Ity_V256, mkexpr(addr)) );
+      delta += alen+1;
+      DIP("%s $%d,%s,%s,%s\n",
+          opname, (Int)imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+   }
+
+   breakupV256toV128s( preSwap ? argR : argL, &argLhi, &argLlo );
+   breakupV256toV128s( preSwap ? argL : argR, &argRhi, &argRlo );
+   assign(plain, binop( Iop_V128HLtoV256,
+                        binop(op, mkexpr(argLhi), mkexpr(argRhi)),
+                        binop(op, mkexpr(argLlo), mkexpr(argRlo)) ) );
+
+   /* This is simple: just invert the result, if necessary, and
+      have done. */
+   if (postNot) {
+      putYMMReg( rG, unop(Iop_NotV256, mkexpr(plain)) );
+   } else {
+      putYMMReg( rG, mkexpr(plain) );
+   }
+
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* Handles AVX128 unary E-to-G all-lanes operations. */
+static
+Long dis_AVX128_E_to_G_unary ( /*OUT*/Bool* uses_vvvv,
+                               const VexAbiInfo* vbi,
+                               Prefix pfx, Long delta, 
+                               const HChar* opname,
+                               IRTemp (*opFn)(IRTemp) )
+{
+   HChar  dis_buf[50];
+   Int    alen;
+   IRTemp addr;
+   IRTemp res  = newTemp(Ity_V128);
+   IRTemp arg  = newTemp(Ity_V128);
+   UChar  rm   = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx, rm);
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(arg, getXMMReg(rE));
+      delta += 1;
+      DIP("%s %s,%s\n", opname, nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(arg, loadLE(Ity_V128, mkexpr(addr)));
+      delta += alen;
+      DIP("%s %s,%s\n", opname, dis_buf, nameXMMReg(rG));
+   }
+   res = opFn(arg);
+   putYMMRegLoAndZU( rG, mkexpr(res) );
+   *uses_vvvv = False;
+   return delta;
+}
+
+
+/* Handles AVX128 unary E-to-G all-lanes operations. */
+static
+Long dis_AVX128_E_to_G_unary_all ( /*OUT*/Bool* uses_vvvv,
+                                   const VexAbiInfo* vbi,
+                                   Prefix pfx, Long delta, 
+                                   const HChar* opname, IROp op )
+{
+   HChar  dis_buf[50];
+   Int    alen;
+   IRTemp addr;
+   IRTemp arg  = newTemp(Ity_V128);
+   UChar  rm   = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx, rm);
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(arg, getXMMReg(rE));
+      delta += 1;
+      DIP("%s %s,%s\n", opname, nameXMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(arg, loadLE(Ity_V128, mkexpr(addr)));
+      delta += alen;
+      DIP("%s %s,%s\n", opname, dis_buf, nameXMMReg(rG));
+   }
+   // Sqrt32Fx4 and Sqrt64Fx2 take a rounding mode, which is faked
+   // up in the usual way.
+   Bool needsIRRM = op == Iop_Sqrt32Fx4 || op == Iop_Sqrt64Fx2;
+   /* XXXROUNDINGFIXME */
+   IRExpr* res = needsIRRM ? binop(op, get_FAKE_roundingmode(), mkexpr(arg))
+                           : unop(op, mkexpr(arg));
+   putYMMRegLoAndZU( rG, res );
+   *uses_vvvv = False;
+   return delta;
+}
+
+
+/* FIXME: common up with the _128_ version above? */
+static
+Long dis_VEX_NDS_256_AnySimdPfx_0F_WIG (
+        /*OUT*/Bool* uses_vvvv, const VexAbiInfo* vbi,
+        Prefix pfx, Long delta, const HChar* name,
+        /* The actual operation.  Use either 'op' or 'opfn',
+           but not both. */
+        IROp op, IRTemp(*opFn)(IRTemp,IRTemp),
+        Bool invertLeftArg,
+        Bool swapArgs
+     )
+{
+   UChar  modrm = getUChar(delta);
+   UInt   rD    = gregOfRexRM(pfx, modrm);
+   UInt   rSL   = getVexNvvvv(pfx);
+   IRTemp tSL   = newTemp(Ity_V256);
+   IRTemp tSR   = newTemp(Ity_V256);
+   IRTemp addr  = IRTemp_INVALID;
+   HChar  dis_buf[50];
+   Int    alen  = 0;
+   vassert(1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*WIG?*/);
+
+   assign(tSL, invertLeftArg ? unop(Iop_NotV256, getYMMReg(rSL))
+                             : getYMMReg(rSL));
+
+   if (epartIsReg(modrm)) {
+      UInt rSR = eregOfRexRM(pfx, modrm);
+      delta += 1;
+      assign(tSR, getYMMReg(rSR));
+      DIP("%s %s,%s,%s\n",
+          name, nameYMMReg(rSR), nameYMMReg(rSL), nameYMMReg(rD));
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      delta += alen;
+      assign(tSR, loadLE(Ity_V256, mkexpr(addr)));
+      DIP("%s %s,%s,%s\n",
+          name, dis_buf, nameYMMReg(rSL), nameYMMReg(rD));
+   }
+
+   IRTemp res = IRTemp_INVALID;
+   if (op != Iop_INVALID) {
+      vassert(opFn == NULL);
+      res = newTemp(Ity_V256);
+      if (requiresRMode(op)) {
+         IRTemp rm = newTemp(Ity_I32);
+         assign(rm, get_FAKE_roundingmode()); /* XXXROUNDINGFIXME */
+         assign(res, swapArgs
+                        ? triop(op, mkexpr(rm), mkexpr(tSR), mkexpr(tSL))
+                        : triop(op, mkexpr(rm), mkexpr(tSL), mkexpr(tSR)));
+      } else {
+         assign(res, swapArgs
+                        ? binop(op, mkexpr(tSR), mkexpr(tSL))
+                        : binop(op, mkexpr(tSL), mkexpr(tSR)));
+      }
+   } else {
+      vassert(opFn != NULL);
+      res = swapArgs ? opFn(tSR, tSL) : opFn(tSL, tSR);
+   }
+
+   putYMMReg(rD, mkexpr(res));
+
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* All-lanes AVX256 binary operation:
+   G[255:0] = V[255:0] `op` E[255:0]
+*/
+static Long dis_AVX256_E_V_to_G ( /*OUT*/Bool* uses_vvvv,
+                                  const VexAbiInfo* vbi,
+                                  Prefix pfx, Long delta, 
+                                  const HChar* opname, IROp op )
+{
+   return dis_VEX_NDS_256_AnySimdPfx_0F_WIG(
+             uses_vvvv, vbi, pfx, delta, opname, op,
+             NULL, False/*!invertLeftArg*/, False/*!swapArgs*/
+   );
+}
+
+
+/* Handle a VEX_NDS_256_66_0F_WIG (3-addr) insn, with a simple IROp
+   for the operation, no inversion of the left arg, and no swapping of
+   args. */
+static
+Long dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple (
+        /*OUT*/Bool* uses_vvvv, const VexAbiInfo* vbi,
+        Prefix pfx, Long delta, const HChar* name,
+        IROp op
+     )
+{
+   return dis_VEX_NDS_256_AnySimdPfx_0F_WIG(
+             uses_vvvv, vbi, pfx, delta, name, op, NULL, False, False);
+}
+
+
+/* Handle a VEX_NDS_256_66_0F_WIG (3-addr) insn, using the given IR
+   generator to compute the result, no inversion of the left
+   arg, and no swapping of args. */
+static
+Long dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex (
+        /*OUT*/Bool* uses_vvvv, const VexAbiInfo* vbi,
+        Prefix pfx, Long delta, const HChar* name,
+        IRTemp(*opFn)(IRTemp,IRTemp)
+     )
+{
+   return dis_VEX_NDS_256_AnySimdPfx_0F_WIG(
+             uses_vvvv, vbi, pfx, delta, name,
+             Iop_INVALID, opFn, False, False );
+}
+
+
+/* Handles AVX256 unary E-to-G all-lanes operations. */
+static
+Long dis_AVX256_E_to_G_unary ( /*OUT*/Bool* uses_vvvv,
+                               const VexAbiInfo* vbi,
+                               Prefix pfx, Long delta,
+                               const HChar* opname,
+                               IRTemp (*opFn)(IRTemp) )
+{
+   HChar  dis_buf[50];
+   Int    alen;
+   IRTemp addr;
+   IRTemp res  = newTemp(Ity_V256);
+   IRTemp arg  = newTemp(Ity_V256);
+   UChar  rm   = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx, rm);
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(arg, getYMMReg(rE));
+      delta += 1;
+      DIP("%s %s,%s\n", opname, nameYMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(arg, loadLE(Ity_V256, mkexpr(addr)));
+      delta += alen;
+      DIP("%s %s,%s\n", opname, dis_buf, nameYMMReg(rG));
+   }
+   res = opFn(arg);
+   putYMMReg( rG, mkexpr(res) );
+   *uses_vvvv = False;
+   return delta;
+}
+
+
+/* Handles AVX256 unary E-to-G all-lanes operations. */
+static
+Long dis_AVX256_E_to_G_unary_all ( /*OUT*/Bool* uses_vvvv,
+                                   const VexAbiInfo* vbi,
+                                   Prefix pfx, Long delta, 
+                                   const HChar* opname, IROp op )
+{
+   HChar  dis_buf[50];
+   Int    alen;
+   IRTemp addr;
+   IRTemp arg  = newTemp(Ity_V256);
+   UChar  rm   = getUChar(delta);
+   UInt   rG   = gregOfRexRM(pfx, rm);
+   if (epartIsReg(rm)) {
+      UInt rE = eregOfRexRM(pfx,rm);
+      assign(arg, getYMMReg(rE));
+      delta += 1;
+      DIP("%s %s,%s\n", opname, nameYMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign(arg, loadLE(Ity_V256, mkexpr(addr)));
+      delta += alen;
+      DIP("%s %s,%s\n", opname, dis_buf, nameYMMReg(rG));
+   }
+   putYMMReg( rG, unop(op, mkexpr(arg)) );
+   *uses_vvvv = False;
+   return delta;
+}
+
+
+/* The use of ReinterpF64asI64 is ugly.  Surely could do better if we
+   had a variant of Iop_64x4toV256 that took F64s as args instead. */
+static Long dis_CVTDQ2PD_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   IRTemp sV    = newTemp(Ity_V128);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( sV, getXMMReg(rE) );
+      delta += 1;
+      DIP("vcvtdq2pd %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+      delta += alen;
+      DIP("vcvtdq2pd %s,%s\n", dis_buf, nameYMMReg(rG) );
+   }
+   IRTemp s3, s2, s1, s0;
+   s3 = s2 = s1 = s0 = IRTemp_INVALID;
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+   IRExpr* res 
+      = IRExpr_Qop(
+           Iop_64x4toV256,
+           unop(Iop_ReinterpF64asI64, unop(Iop_I32StoF64, mkexpr(s3))),
+           unop(Iop_ReinterpF64asI64, unop(Iop_I32StoF64, mkexpr(s2))),
+           unop(Iop_ReinterpF64asI64, unop(Iop_I32StoF64, mkexpr(s1))),
+           unop(Iop_ReinterpF64asI64, unop(Iop_I32StoF64, mkexpr(s0)))
+        );
+   putYMMReg(rG, res);
+   return delta;
+}
+
+
+static Long dis_CVTPD2PS_256 ( const VexAbiInfo* vbi, Prefix pfx,
+                               Long delta )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   IRTemp argV  = newTemp(Ity_V256);
+   IRTemp rmode = newTemp(Ity_I32);
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx,modrm);
+      assign( argV, getYMMReg(rE) );
+      delta += 1;
+      DIP("vcvtpd2psy %s,%s\n", nameYMMReg(rE), nameXMMReg(rG));
+   } else {
+      addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( argV, loadLE(Ity_V256, mkexpr(addr)) );
+      delta += alen;
+      DIP("vcvtpd2psy %s,%s\n", dis_buf, nameXMMReg(rG) );
+   }
+         
+   assign( rmode, get_sse_roundingmode() );
+   IRTemp t3, t2, t1, t0;
+   t3 = t2 = t1 = t0 = IRTemp_INVALID;
+   breakupV256to64s( argV, &t3, &t2, &t1, &t0 );
+#  define CVT(_t)  binop( Iop_F64toF32, mkexpr(rmode), \
+                          unop(Iop_ReinterpI64asF64, mkexpr(_t)) )
+   putXMMRegLane32F( rG, 3, CVT(t3) );
+   putXMMRegLane32F( rG, 2, CVT(t2) );
+   putXMMRegLane32F( rG, 1, CVT(t1) );
+   putXMMRegLane32F( rG, 0, CVT(t0) );
+#  undef CVT
+   putYMMRegLane128( rG, 1, mkV128(0) );
+   return delta;
+}
+
+
+static IRTemp math_VPUNPCK_YMM ( IRTemp tL, IRType tR, IROp op )
+{
+   IRTemp tLhi, tLlo, tRhi, tRlo;
+   tLhi = tLlo = tRhi = tRlo = IRTemp_INVALID;
+   IRTemp res = newTemp(Ity_V256);
+   breakupV256toV128s( tL, &tLhi, &tLlo );
+   breakupV256toV128s( tR, &tRhi, &tRlo );
+   assign( res, binop( Iop_V128HLtoV256,
+                       binop( op, mkexpr(tRhi), mkexpr(tLhi) ),
+                       binop( op, mkexpr(tRlo), mkexpr(tLlo) ) ) );
+   return res;
+}
+
+
+static IRTemp math_VPUNPCKLBW_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveLO8x16 );
+}
+
+
+static IRTemp math_VPUNPCKLWD_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveLO16x8 );
+}
+
+
+static IRTemp math_VPUNPCKLDQ_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveLO32x4 );
+}
+
+
+static IRTemp math_VPUNPCKLQDQ_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveLO64x2 );
+}
+
+
+static IRTemp math_VPUNPCKHBW_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveHI8x16 );
+}
+
+
+static IRTemp math_VPUNPCKHWD_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveHI16x8 );
+}
+
+
+static IRTemp math_VPUNPCKHDQ_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveHI32x4 );
+}
+
+
+static IRTemp math_VPUNPCKHQDQ_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_InterleaveHI64x2 );
+}
+
+
+static IRTemp math_VPACKSSWB_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_QNarrowBin16Sto8Sx16 );
+}
+
+
+static IRTemp math_VPACKUSWB_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_QNarrowBin16Sto8Ux16 );
+}
+
+
+static IRTemp math_VPACKSSDW_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_QNarrowBin32Sto16Sx8 );
+}
+
+
+static IRTemp math_VPACKUSDW_YMM ( IRTemp tL, IRTemp tR )
+{
+   return math_VPUNPCK_YMM( tL, tR, Iop_QNarrowBin32Sto16Ux8 );
+}
+
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F__VEX (
+        /*MB_OUT*/DisResult* dres,
+        /*OUT*/   Bool*      uses_vvvv,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  vbi,
+        Prefix pfx, Int sz, Long deltaIN 
+     )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   *uses_vvvv = False;
+
+   switch (opc) {
+
+   case 0x10:
+      /* VMOVSD m64, xmm1 = VEX.LIG.F2.0F.WIG 10 /r */
+      /* Move 64 bits from E (mem only) to G (lo half xmm).
+         Bits 255-64 of the dest are zeroed out. */
+      if (haveF2no66noF3(pfx) && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         UInt   rG   = gregOfRexRM(pfx,modrm);
+         IRTemp z128 = newTemp(Ity_V128);
+         assign(z128, mkV128(0));
+         putXMMReg( rG, mkexpr(z128) );
+         /* FIXME: ALIGNMENT CHECK? */
+         putXMMRegLane64( rG, 0, loadLE(Ity_I64, mkexpr(addr)) );
+         putYMMRegLane128( rG, 1, mkexpr(z128) );
+         DIP("vmovsd %s,%s\n", dis_buf, nameXMMReg(rG));
+         delta += alen;
+         goto decode_success;
+      }
+      /* VMOVSD xmm3, xmm2, xmm1 = VEX.LIG.F2.0F.WIG 10 /r */
+      /* Reg form. */
+      if (haveF2no66noF3(pfx) && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         delta++;
+         DIP("vmovsd %s,%s,%s\n",
+             nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign(res, binop(Iop_64HLtoV128,
+                           getXMMRegLane64(rV, 1),
+                           getXMMRegLane64(rE, 0)));
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVSS m32, xmm1 = VEX.LIG.F3.0F.WIG 10 /r */
+      /* Move 32 bits from E (mem only) to G (lo half xmm).
+         Bits 255-32 of the dest are zeroed out. */
+      if (haveF3no66noF2(pfx) && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         UInt   rG   = gregOfRexRM(pfx,modrm);
+         IRTemp z128 = newTemp(Ity_V128);
+         assign(z128, mkV128(0));
+         putXMMReg( rG, mkexpr(z128) );
+         /* FIXME: ALIGNMENT CHECK? */
+         putXMMRegLane32( rG, 0, loadLE(Ity_I32, mkexpr(addr)) );
+         putYMMRegLane128( rG, 1, mkexpr(z128) );
+         DIP("vmovss %s,%s\n", dis_buf, nameXMMReg(rG));
+         delta += alen;
+         goto decode_success;
+      }
+      /* VMOVSS xmm3, xmm2, xmm1 = VEX.LIG.F3.0F.WIG 10 /r */
+      /* Reg form. */
+      if (haveF3no66noF2(pfx) && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         delta++;
+         DIP("vmovss %s,%s,%s\n",
+             nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign( res, binop( Iop_64HLtoV128,
+                             getXMMRegLane64(rV, 1),
+                             binop(Iop_32HLto64,
+                                   getXMMRegLane32(rV, 1),
+                                   getXMMRegLane32(rE, 0)) ) );
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVUPD xmm2/m128, xmm1 = VEX.128.66.0F.WIG 10 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rG, getXMMReg( rE ));
+            DIP("vmovupd %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("vmovupd %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVUPD ymm2/m256, ymm1 = VEX.256.66.0F.WIG 10 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rG, getYMMReg( rE ));
+            DIP("vmovupd %s,%s\n", nameYMMReg(rE), nameYMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) );
+            DIP("vmovupd %s,%s\n", dis_buf, nameYMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVUPS xmm2/m128, xmm1 = VEX.128.0F.WIG 10 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rG, getXMMReg( rE ));
+            DIP("vmovups %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("vmovups %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVUPS ymm2/m256, ymm1 = VEX.256.0F.WIG 10 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rG, getYMMReg( rE ));
+            DIP("vmovups %s,%s\n", nameYMMReg(rE), nameYMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) );
+            DIP("vmovups %s,%s\n", dis_buf, nameYMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x11:
+      /* VMOVSD xmm1, m64 = VEX.LIG.F2.0F.WIG 11 /r */
+      /* Move 64 bits from G (low half xmm) to mem only. */
+      if (haveF2no66noF3(pfx) && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         UInt   rG   = gregOfRexRM(pfx,modrm);
+         /* FIXME: ALIGNMENT CHECK? */
+         storeLE( mkexpr(addr), getXMMRegLane64(rG, 0));
+         DIP("vmovsd %s,%s\n", nameXMMReg(rG), dis_buf);
+         delta += alen;
+         goto decode_success;
+      }
+      /* VMOVSD xmm3, xmm2, xmm1 = VEX.LIG.F2.0F.WIG 11 /r */
+      /* Reg form. */
+      if (haveF2no66noF3(pfx) && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         delta++;
+         DIP("vmovsd %s,%s,%s\n",
+             nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign(res, binop(Iop_64HLtoV128,
+                           getXMMRegLane64(rV, 1),
+                           getXMMRegLane64(rE, 0)));
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVSS xmm1, m64 = VEX.LIG.F3.0F.WIG 11 /r */
+      /* Move 32 bits from G (low 1/4 xmm) to mem only. */
+      if (haveF3no66noF2(pfx) && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         UInt   rG   = gregOfRexRM(pfx,modrm);
+         /* FIXME: ALIGNMENT CHECK? */
+         storeLE( mkexpr(addr), getXMMRegLane32(rG, 0));
+         DIP("vmovss %s,%s\n", nameXMMReg(rG), dis_buf);
+         delta += alen;
+         goto decode_success;
+      }
+      /* VMOVSS xmm3, xmm2, xmm1 = VEX.LIG.F3.0F.WIG 11 /r */
+      /* Reg form. */
+      if (haveF3no66noF2(pfx) && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         delta++;
+         DIP("vmovss %s,%s,%s\n",
+             nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign( res, binop( Iop_64HLtoV128,
+                             getXMMRegLane64(rV, 1),
+                             binop(Iop_32HLto64,
+                                   getXMMRegLane32(rV, 1),
+                                   getXMMRegLane32(rE, 0)) ) );
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVUPD xmm1, xmm2/m128 = VEX.128.66.0F.WIG 11 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rE, getXMMReg(rG) );
+            DIP("vmovupd %s,%s\n", nameXMMReg(rG), nameXMMReg(rE));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getXMMReg(rG) );
+            DIP("vmovupd %s,%s\n", nameXMMReg(rG), dis_buf);
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVUPD ymm1, ymm2/m256 = VEX.256.66.0F.WIG 11 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rE, getYMMReg(rG) );
+            DIP("vmovupd %s,%s\n", nameYMMReg(rG), nameYMMReg(rE));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getYMMReg(rG) );
+            DIP("vmovupd %s,%s\n", nameYMMReg(rG), dis_buf);
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVUPS xmm1, xmm2/m128 = VEX.128.0F.WIG 11 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rE, getXMMReg(rG) );
+            DIP("vmovups %s,%s\n", nameXMMReg(rG), nameXMMReg(rE));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getXMMReg(rG) );
+            DIP("vmovups %s,%s\n", nameXMMReg(rG), dis_buf);
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVUPS ymm1, ymm2/m256 = VEX.256.0F.WIG 11 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rE, getYMMReg(rG) );
+            DIP("vmovups %s,%s\n", nameYMMReg(rG), nameYMMReg(rE));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getYMMReg(rG) );
+            DIP("vmovups %s,%s\n", nameYMMReg(rG), dis_buf);
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x12:
+      /* VMOVDDUP xmm2/m64, xmm1 = VEX.128.F2.0F.WIG /12 r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_MOVDDUP_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VMOVDDUP ymm2/m256, ymm1 = VEX.256.F2.0F.WIG /12 r */
+      if (haveF2no66noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_MOVDDUP_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      /* VMOVHLPS xmm3, xmm2, xmm1 = VEX.NDS.128.0F.WIG 12 /r */
+      /* Insn only exists in reg form */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         delta++;
+         DIP("vmovhlps %s,%s,%s\n",
+             nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign(res, binop(Iop_64HLtoV128,
+                           getXMMRegLane64(rV, 1),
+                           getXMMRegLane64(rE, 1)));
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVLPS m64, xmm1, xmm2 = VEX.NDS.128.0F.WIG 12 /r */
+      /* Insn exists only in mem form, it appears. */
+      /* VMOVLPD m64, xmm1, xmm2 = VEX.NDS.128.66.0F.WIG 12 /r */
+      /* Insn exists only in mem form, it appears. */
+      if ((have66noF2noF3(pfx) || haveNo66noF2noF3(pfx))
+          && 0==getVexL(pfx)/*128*/ && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         DIP("vmovlpd %s,%s,%s\n",
+             dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign(res, binop(Iop_64HLtoV128,
+                           getXMMRegLane64(rV, 1),
+                           loadLE(Ity_I64, mkexpr(addr))));
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVSLDUP xmm2/m128, xmm1 = VEX.NDS.128.F3.0F.WIG 12 /r */
+      if (haveF3no66noF2(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_MOVSxDUP_128( vbi, pfx, delta, True/*isAvx*/,
+                                   True/*isL*/ );
+         goto decode_success;
+      }
+      /* VMOVSLDUP ymm2/m256, ymm1 = VEX.NDS.256.F3.0F.WIG 12 /r */
+      if (haveF3no66noF2(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_MOVSxDUP_256( vbi, pfx, delta, True/*isL*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x13:
+      /* VMOVLPS xmm1, m64 = VEX.128.0F.WIG 13 /r */
+      /* Insn exists only in mem form, it appears. */
+      /* VMOVLPD xmm1, m64 = VEX.128.66.0F.WIG 13 /r */
+      /* Insn exists only in mem form, it appears. */
+      if ((have66noF2noF3(pfx) || haveNo66noF2noF3(pfx))
+          && 0==getVexL(pfx)/*128*/ && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         storeLE( mkexpr(addr), getXMMRegLane64( rG, 0));
+         DIP("vmovlpd %s,%s\n", nameXMMReg(rG), dis_buf);
+         goto decode_success;
+      }
+      break;
+
+   case 0x14:
+   case 0x15:
+      /* VUNPCKLPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 14 /r */
+      /* VUNPCKHPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 15 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Bool   hi    = opc == 0x15;
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx,modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp eV    = newTemp(Ity_V128);
+         IRTemp vV    = newTemp(Ity_V128);
+         assign( vV, getXMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            delta += 1;
+            DIP("vunpck%sps %s,%s\n", hi ? "h" : "l",
+                nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("vunpck%sps %s,%s\n", hi ? "h" : "l",
+                dis_buf, nameXMMReg(rG));
+         }
+         IRTemp res = math_UNPCKxPS_128( eV, vV, hi );
+         putYMMRegLoAndZU( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VUNPCKLPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 14 /r */
+      /* VUNPCKHPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 15 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         Bool   hi    = opc == 0x15;
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx,modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp eV    = newTemp(Ity_V256);
+         IRTemp vV    = newTemp(Ity_V256);
+         assign( vV, getYMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getYMMReg(rE) );
+            delta += 1;
+            DIP("vunpck%sps %s,%s\n", hi ? "h" : "l",
+                nameYMMReg(rE), nameYMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V256, mkexpr(addr)) );
+            delta += alen;
+            DIP("vunpck%sps %s,%s\n", hi ? "h" : "l",
+                dis_buf, nameYMMReg(rG));
+         }
+         IRTemp res = math_UNPCKxPS_256( eV, vV, hi );
+         putYMMReg( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VUNPCKLPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 14 /r */
+      /* VUNPCKHPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 15 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Bool   hi    = opc == 0x15;
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx,modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp eV    = newTemp(Ity_V128);
+         IRTemp vV    = newTemp(Ity_V128);
+         assign( vV, getXMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            delta += 1;
+            DIP("vunpck%spd %s,%s\n", hi ? "h" : "l",
+                nameXMMReg(rE), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("vunpck%spd %s,%s\n", hi ? "h" : "l",
+                dis_buf, nameXMMReg(rG));
+         }
+         IRTemp res = math_UNPCKxPD_128( eV, vV, hi );
+         putYMMRegLoAndZU( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VUNPCKLPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 14 /r */
+      /* VUNPCKHPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 15 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         Bool   hi    = opc == 0x15;
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx,modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp eV    = newTemp(Ity_V256);
+         IRTemp vV    = newTemp(Ity_V256);
+         assign( vV, getYMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getYMMReg(rE) );
+            delta += 1;
+            DIP("vunpck%spd %s,%s\n", hi ? "h" : "l",
+                nameYMMReg(rE), nameYMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( eV, loadLE(Ity_V256, mkexpr(addr)) );
+            delta += alen;
+            DIP("vunpck%spd %s,%s\n", hi ? "h" : "l",
+                dis_buf, nameYMMReg(rG));
+         }
+         IRTemp res = math_UNPCKxPD_256( eV, vV, hi );
+         putYMMReg( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x16:
+      /* VMOVLHPS xmm3, xmm2, xmm1 = VEX.NDS.128.0F.WIG 16 /r */
+      /* Insn only exists in reg form */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         delta++;
+         DIP("vmovlhps %s,%s,%s\n",
+             nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign(res, binop(Iop_64HLtoV128,
+                           getXMMRegLane64(rE, 0),
+                           getXMMRegLane64(rV, 0)));
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVHPS m64, xmm1, xmm2 = VEX.NDS.128.0F.WIG 16 /r */
+      /* Insn exists only in mem form, it appears. */
+      /* VMOVHPD m64, xmm1, xmm2 = VEX.NDS.128.66.0F.WIG 16 /r */
+      /* Insn exists only in mem form, it appears. */
+      if ((have66noF2noF3(pfx) || haveNo66noF2noF3(pfx))
+          && 0==getVexL(pfx)/*128*/ && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         DIP("vmovhp%c %s,%s,%s\n", have66(pfx) ? 'd' : 's',
+             dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+         IRTemp res = newTemp(Ity_V128);
+         assign(res, binop(Iop_64HLtoV128,
+                           loadLE(Ity_I64, mkexpr(addr)),
+                           getXMMRegLane64(rV, 0)));
+         putYMMRegLoAndZU(rG, mkexpr(res));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMOVSHDUP xmm2/m128, xmm1 = VEX.NDS.128.F3.0F.WIG 16 /r */
+      if (haveF3no66noF2(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_MOVSxDUP_128( vbi, pfx, delta, True/*isAvx*/,
+                                   False/*!isL*/ );
+         goto decode_success;
+      }
+      /* VMOVSHDUP ymm2/m256, ymm1 = VEX.NDS.256.F3.0F.WIG 16 /r */
+      if (haveF3no66noF2(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_MOVSxDUP_256( vbi, pfx, delta, False/*!isL*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x17:
+      /* VMOVHPS xmm1, m64 = VEX.128.0F.WIG 17 /r */
+      /* Insn exists only in mem form, it appears. */
+      /* VMOVHPD xmm1, m64 = VEX.128.66.0F.WIG 17 /r */
+      /* Insn exists only in mem form, it appears. */
+      if ((have66noF2noF3(pfx) || haveNo66noF2noF3(pfx))
+          && 0==getVexL(pfx)/*128*/ && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         storeLE( mkexpr(addr), getXMMRegLane64( rG, 1));
+         DIP("vmovhp%c %s,%s\n", have66(pfx) ? 'd' : 's',
+             nameXMMReg(rG), dis_buf);
+         goto decode_success;
+      }
+      break;
+
+   case 0x28:
+      /* VMOVAPD xmm2/m128, xmm1 = VEX.128.66.0F.WIG 28 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rG, getXMMReg( rE ));
+            DIP("vmovapd %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("vmovapd %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVAPD ymm2/m256, ymm1 = VEX.256.66.0F.WIG 28 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rG, getYMMReg( rE ));
+            DIP("vmovapd %s,%s\n", nameYMMReg(rE), nameYMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_32_aligned( addr );
+            putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) );
+            DIP("vmovapd %s,%s\n", dis_buf, nameYMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVAPS xmm2/m128, xmm1 = VEX.128.0F.WIG 28 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rG, getXMMReg( rE ));
+            DIP("vmovaps %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            putYMMRegLoAndZU( rG, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("vmovaps %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVAPS ymm2/m256, ymm1 = VEX.256.0F.WIG 28 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rG, getYMMReg( rE ));
+            DIP("vmovaps %s,%s\n", nameYMMReg(rE), nameYMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_32_aligned( addr );
+            putYMMReg( rG, loadLE(Ity_V256, mkexpr(addr)) );
+            DIP("vmovaps %s,%s\n", dis_buf, nameYMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x29:
+      /* VMOVAPD xmm1, xmm2/m128 = VEX.128.66.0F.WIG 29 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rE, getXMMReg(rG) );
+            DIP("vmovapd %s,%s\n", nameXMMReg(rG), nameXMMReg(rE));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            storeLE( mkexpr(addr), getXMMReg(rG) );
+            DIP("vmovapd %s,%s\n", nameXMMReg(rG), dis_buf );
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVAPD ymm1, ymm2/m256 = VEX.256.66.0F.WIG 29 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rE, getYMMReg(rG) );
+            DIP("vmovapd %s,%s\n", nameYMMReg(rG), nameYMMReg(rE));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_32_aligned( addr );
+            storeLE( mkexpr(addr), getYMMReg(rG) );
+            DIP("vmovapd %s,%s\n", nameYMMReg(rG), dis_buf );
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVAPS xmm1, xmm2/m128 = VEX.128.0F.WIG 29 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMRegLoAndZU( rE, getXMMReg(rG) );
+            DIP("vmovaps %s,%s\n", nameXMMReg(rG), nameXMMReg(rE));
+            delta += 1;
+            goto decode_success;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            storeLE( mkexpr(addr), getXMMReg(rG) );
+            DIP("vmovaps %s,%s\n", nameXMMReg(rG), dis_buf );
+            delta += alen;
+            goto decode_success;
+         }
+      }
+      /* VMOVAPS ymm1, ymm2/m256 = VEX.256.0F.WIG 29 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putYMMReg( rE, getYMMReg(rG) );
+            DIP("vmovaps %s,%s\n", nameYMMReg(rG), nameYMMReg(rE));
+            delta += 1;
+            goto decode_success;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_32_aligned( addr );
+            storeLE( mkexpr(addr), getYMMReg(rG) );
+            DIP("vmovaps %s,%s\n", nameYMMReg(rG), dis_buf );
+            delta += alen;
+            goto decode_success;
+         }
+      }
+      break;
+
+   case 0x2A: {
+      IRTemp rmode = newTemp(Ity_I32);
+      assign( rmode, get_sse_roundingmode() );
+      /* VCVTSI2SD r/m32, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.W0 2A /r */
+      if (haveF2no66noF3(pfx) && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rV    = getVexNvvvv(pfx);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp arg32 = newTemp(Ity_I32);
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx,modrm);
+            assign( arg32, getIReg32(rS) );
+            delta += 1;
+            DIP("vcvtsi2sdl %s,%s,%s\n",
+                nameIReg32(rS), nameXMMReg(rV), nameXMMReg(rD));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( arg32, loadLE(Ity_I32, mkexpr(addr)) );
+            delta += alen;
+            DIP("vcvtsi2sdl %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rD));
+         }
+         putXMMRegLane64F( rD, 0,
+                           unop(Iop_I32StoF64, mkexpr(arg32)));
+         putXMMRegLane64( rD, 1, getXMMRegLane64( rV, 1 ));
+         putYMMRegLane128( rD, 1, mkV128(0) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VCVTSI2SD r/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.W1 2A /r */
+      if (haveF2no66noF3(pfx) && 1==getRexW(pfx)/*W1*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rV    = getVexNvvvv(pfx);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp arg64 = newTemp(Ity_I64);
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx,modrm);
+            assign( arg64, getIReg64(rS) );
+            delta += 1;
+            DIP("vcvtsi2sdq %s,%s,%s\n",
+                nameIReg64(rS), nameXMMReg(rV), nameXMMReg(rD));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("vcvtsi2sdq %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rD));
+         }
+         putXMMRegLane64F( rD, 0,
+                           binop( Iop_I64StoF64,
+                                  get_sse_roundingmode(),
+                                  mkexpr(arg64)) );
+         putXMMRegLane64( rD, 1, getXMMRegLane64( rV, 1 ));
+         putYMMRegLane128( rD, 1, mkV128(0) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VCVTSI2SS r/m64, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.W1 2A /r */
+      if (haveF3no66noF2(pfx) && 1==getRexW(pfx)/*W1*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rV    = getVexNvvvv(pfx);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp arg64 = newTemp(Ity_I64);
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx,modrm);
+            assign( arg64, getIReg64(rS) );
+            delta += 1;
+            DIP("vcvtsi2ssq %s,%s,%s\n",
+                nameIReg64(rS), nameXMMReg(rV), nameXMMReg(rD));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+            delta += alen;
+            DIP("vcvtsi2ssq %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rD));
+         }
+         putXMMRegLane32F( rD, 0,
+                           binop(Iop_F64toF32,
+                                 mkexpr(rmode),
+                                 binop(Iop_I64StoF64, mkexpr(rmode),
+                                                      mkexpr(arg64)) ) );
+         putXMMRegLane32( rD, 1, getXMMRegLane32( rV, 1 ));
+         putXMMRegLane64( rD, 1, getXMMRegLane64( rV, 1 ));
+         putYMMRegLane128( rD, 1, mkV128(0) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VCVTSI2SS r/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.W0 2A /r */
+      if (haveF3no66noF2(pfx) && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rV    = getVexNvvvv(pfx);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp arg32 = newTemp(Ity_I32);
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx,modrm);
+            assign( arg32, getIReg32(rS) );
+            delta += 1;
+            DIP("vcvtsi2ssl %s,%s,%s\n",
+                nameIReg32(rS), nameXMMReg(rV), nameXMMReg(rD));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( arg32, loadLE(Ity_I32, mkexpr(addr)) );
+            delta += alen;
+            DIP("vcvtsi2ssl %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rD));
+         }
+         putXMMRegLane32F( rD, 0,
+                           binop(Iop_F64toF32,
+                                 mkexpr(rmode),
+                                 unop(Iop_I32StoF64, mkexpr(arg32)) ) );
+         putXMMRegLane32( rD, 1, getXMMRegLane32( rV, 1 ));
+         putXMMRegLane64( rD, 1, getXMMRegLane64( rV, 1 ));
+         putYMMRegLane128( rD, 1, mkV128(0) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+   }
+
+   case 0x2B:
+      /* VMOVNTPD xmm1, m128 = VEX.128.66.0F.WIG 2B /r */
+      /* VMOVNTPS xmm1, m128 = VEX.128.0F.WIG 2B /r */
+      if ((have66noF2noF3(pfx) || haveNo66noF2noF3(pfx))
+          && 0==getVexL(pfx)/*128*/ && !epartIsReg(getUChar(delta))) {
+         UChar  modrm = getUChar(delta);
+         UInt   rS    = gregOfRexRM(pfx, modrm);
+         IRTemp tS    = newTemp(Ity_V128);
+         assign(tS, getXMMReg(rS));
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         gen_SEGV_if_not_16_aligned(addr);
+         storeLE(mkexpr(addr), mkexpr(tS));
+         DIP("vmovntp%c %s,%s\n", have66(pfx) ? 'd' : 's',
+             nameXMMReg(rS), dis_buf);
+         goto decode_success;
+      }
+      /* VMOVNTPD ymm1, m256 = VEX.256.66.0F.WIG 2B /r */
+      /* VMOVNTPS ymm1, m256 = VEX.256.0F.WIG 2B /r */
+      if ((have66noF2noF3(pfx) || haveNo66noF2noF3(pfx))
+          && 1==getVexL(pfx)/*256*/ && !epartIsReg(getUChar(delta))) {
+         UChar  modrm = getUChar(delta);
+         UInt   rS    = gregOfRexRM(pfx, modrm);
+         IRTemp tS    = newTemp(Ity_V256);
+         assign(tS, getYMMReg(rS));
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         gen_SEGV_if_not_32_aligned(addr);
+         storeLE(mkexpr(addr), mkexpr(tS));
+         DIP("vmovntp%c %s,%s\n", have66(pfx) ? 'd' : 's',
+             nameYMMReg(rS), dis_buf);
+         goto decode_success;
+      }
+      break;
+
+   case 0x2C:
+      /* VCVTTSD2SI xmm1/m32, r32 = VEX.LIG.F2.0F.W0 2C /r */
+      if (haveF2no66noF3(pfx) && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_CVTxSD2SI( vbi, pfx, delta, True/*isAvx*/, opc, 4);
+         goto decode_success;
+      }
+      /* VCVTTSD2SI xmm1/m64, r64 = VEX.LIG.F2.0F.W1 2C /r */
+      if (haveF2no66noF3(pfx) && 1==getRexW(pfx)/*W1*/) {
+         delta = dis_CVTxSD2SI( vbi, pfx, delta, True/*isAvx*/, opc, 8);
+         goto decode_success;
+      }
+      /* VCVTTSS2SI xmm1/m32, r32 = VEX.LIG.F3.0F.W0 2C /r */
+      if (haveF3no66noF2(pfx) && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_CVTxSS2SI( vbi, pfx, delta, True/*isAvx*/, opc, 4);
+         goto decode_success;
+      }
+      /* VCVTTSS2SI xmm1/m64, r64 = VEX.LIG.F3.0F.W1 2C /r */
+      if (haveF3no66noF2(pfx) && 1==getRexW(pfx)/*W1*/) {
+         delta = dis_CVTxSS2SI( vbi, pfx, delta, True/*isAvx*/, opc, 8);
+         goto decode_success;
+      }
+      break;
+
+   case 0x2D:
+      /* VCVTSD2SI xmm1/m32, r32 = VEX.LIG.F2.0F.W0 2D /r */
+      if (haveF2no66noF3(pfx) && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_CVTxSD2SI( vbi, pfx, delta, True/*isAvx*/, opc, 4);
+         goto decode_success;
+      }
+      /* VCVTSD2SI xmm1/m64, r64 = VEX.LIG.F2.0F.W1 2D /r */
+      if (haveF2no66noF3(pfx) && 1==getRexW(pfx)/*W1*/) {
+         delta = dis_CVTxSD2SI( vbi, pfx, delta, True/*isAvx*/, opc, 8);
+         goto decode_success;
+      }
+      /* VCVTSS2SI xmm1/m32, r32 = VEX.LIG.F3.0F.W0 2D /r */
+      if (haveF3no66noF2(pfx) && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_CVTxSS2SI( vbi, pfx, delta, True/*isAvx*/, opc, 4);
+         goto decode_success;
+      }
+      /* VCVTSS2SI xmm1/m64, r64 = VEX.LIG.F3.0F.W1 2D /r */
+      if (haveF3no66noF2(pfx) && 1==getRexW(pfx)/*W1*/) {
+         delta = dis_CVTxSS2SI( vbi, pfx, delta, True/*isAvx*/, opc, 8);
+         goto decode_success;
+      }
+      break;
+
+   case 0x2E:
+   case 0x2F:
+      /* VUCOMISD xmm2/m64, xmm1 = VEX.LIG.66.0F.WIG 2E /r */
+      /* VCOMISD  xmm2/m64, xmm1 = VEX.LIG.66.0F.WIG 2F /r */
+      if (have66noF2noF3(pfx)) {
+         delta = dis_COMISD( vbi, pfx, delta, True/*isAvx*/, opc );
+         goto decode_success;
+      }
+      /* VUCOMISS xmm2/m32, xmm1 = VEX.LIG.0F.WIG 2E /r */
+      /* VCOMISS xmm2/m32, xmm1  = VEX.LIG.0F.WIG 2F /r */
+      if (haveNo66noF2noF3(pfx)) {
+         delta = dis_COMISS( vbi, pfx, delta, True/*isAvx*/, opc );
+         goto decode_success;
+      }
+      break;
+
+   case 0x50:
+      /* VMOVMSKPD xmm2, r32 = VEX.128.66.0F.WIG 50 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_MOVMSKPD_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VMOVMSKPD ymm2, r32 = VEX.256.66.0F.WIG 50 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_MOVMSKPD_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      /* VMOVMSKPS xmm2, r32 = VEX.128.0F.WIG 50 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_MOVMSKPS_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VMOVMSKPS ymm2, r32 = VEX.256.0F.WIG 50 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_MOVMSKPS_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      break;
+
+   case 0x51:
+      /* VSQRTSS xmm3/m64(E), xmm2(V), xmm1(G) = VEX.NDS.LIG.F3.0F.WIG 51 /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32_unary(
+                    uses_vvvv, vbi, pfx, delta, "vsqrtss", Iop_Sqrt32F0x4 );
+         goto decode_success;
+      }
+      /* VSQRTPS xmm2/m128(E), xmm1(G) = VEX.NDS.128.0F.WIG 51 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vsqrtps", Iop_Sqrt32Fx4 );
+         goto decode_success;
+      }
+      /* VSQRTPS ymm2/m256(E), ymm1(G) = VEX.NDS.256.0F.WIG 51 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vsqrtps", Iop_Sqrt32Fx8 );
+         goto decode_success;
+      }
+      /* VSQRTSD xmm3/m64(E), xmm2(V), xmm1(G) = VEX.NDS.LIG.F2.0F.WIG 51 /r */
+      if (haveF2no66noF3(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo64_unary(
+                    uses_vvvv, vbi, pfx, delta, "vsqrtsd", Iop_Sqrt64F0x2 );
+         goto decode_success;
+      }
+      /* VSQRTPD xmm2/m128(E), xmm1(G) = VEX.NDS.128.66.0F.WIG 51 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vsqrtpd", Iop_Sqrt64Fx2 );
+         goto decode_success;
+      }
+      /* VSQRTPD ymm2/m256(E), ymm1(G) = VEX.NDS.256.66.0F.WIG 51 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vsqrtpd", Iop_Sqrt64Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x52:
+      /* VRSQRTSS xmm3/m64(E), xmm2(V), xmm1(G) = VEX.NDS.LIG.F3.0F.WIG 52 /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32_unary(
+                    uses_vvvv, vbi, pfx, delta, "vrsqrtss",
+                    Iop_RSqrtEst32F0x4 );
+         goto decode_success;
+      }
+      /* VRSQRTPS xmm2/m128(E), xmm1(G) = VEX.NDS.128.0F.WIG 52 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vrsqrtps", Iop_RSqrtEst32Fx4 );
+         goto decode_success;
+      }
+      /* VRSQRTPS ymm2/m256(E), ymm1(G) = VEX.NDS.256.0F.WIG 52 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vrsqrtps", Iop_RSqrtEst32Fx8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x53:
+      /* VRCPSS xmm3/m64(E), xmm2(V), xmm1(G) = VEX.NDS.LIG.F3.0F.WIG 53 /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32_unary(
+                    uses_vvvv, vbi, pfx, delta, "vrcpss", Iop_RecipEst32F0x4 );
+         goto decode_success;
+      }
+      /* VRCPPS xmm2/m128(E), xmm1(G) = VEX.NDS.128.0F.WIG 53 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vrcpps", Iop_RecipEst32Fx4 );
+         goto decode_success;
+      }
+      /* VRCPPS ymm2/m256(E), ymm1(G) = VEX.NDS.256.0F.WIG 53 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_to_G_unary_all(
+                    uses_vvvv, vbi, pfx, delta, "vrcpps", Iop_RecipEst32Fx8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x54:
+      /* VANDPD r/m, rV, r ::: r = rV & r/m */
+      /* VANDPD = VEX.NDS.128.66.0F.WIG 54 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vandpd", Iop_AndV128 );
+         goto decode_success;
+      }
+      /* VANDPD r/m, rV, r ::: r = rV & r/m */
+      /* VANDPD = VEX.NDS.256.66.0F.WIG 54 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vandpd", Iop_AndV256 );
+         goto decode_success;
+      }
+      /* VANDPS = VEX.NDS.128.0F.WIG 54 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vandps", Iop_AndV128 );
+         goto decode_success;
+      }
+      /* VANDPS = VEX.NDS.256.0F.WIG 54 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vandps", Iop_AndV256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x55:
+      /* VANDNPD r/m, rV, r ::: r = (not rV) & r/m */
+      /* VANDNPD = VEX.NDS.128.66.0F.WIG 55 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vandpd", Iop_AndV128,
+                    NULL, True/*invertLeftArg*/, False/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VANDNPD = VEX.NDS.256.66.0F.WIG 55 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vandpd", Iop_AndV256,
+                    NULL, True/*invertLeftArg*/, False/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VANDNPS = VEX.NDS.128.0F.WIG 55 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vandps", Iop_AndV128,
+                    NULL, True/*invertLeftArg*/, False/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VANDNPS = VEX.NDS.256.0F.WIG 55 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vandps", Iop_AndV256,
+                    NULL, True/*invertLeftArg*/, False/*swapArgs*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x56:
+      /* VORPD r/m, rV, r ::: r = rV | r/m */
+      /* VORPD = VEX.NDS.128.66.0F.WIG 56 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vorpd", Iop_OrV128 );
+         goto decode_success;
+      }
+      /* VORPD r/m, rV, r ::: r = rV | r/m */
+      /* VORPD = VEX.NDS.256.66.0F.WIG 56 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vorpd", Iop_OrV256 );
+         goto decode_success;
+      }
+      /* VORPS r/m, rV, r ::: r = rV | r/m */
+      /* VORPS = VEX.NDS.128.0F.WIG 56 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vorps", Iop_OrV128 );
+         goto decode_success;
+      }
+      /* VORPS r/m, rV, r ::: r = rV | r/m */
+      /* VORPS = VEX.NDS.256.0F.WIG 56 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vorps", Iop_OrV256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x57:
+      /* VXORPD r/m, rV, r ::: r = rV ^ r/m */
+      /* VXORPD = VEX.NDS.128.66.0F.WIG 57 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vxorpd", Iop_XorV128 );
+         goto decode_success;
+      }
+      /* VXORPD r/m, rV, r ::: r = rV ^ r/m */
+      /* VXORPD = VEX.NDS.256.66.0F.WIG 57 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vxorpd", Iop_XorV256 );
+         goto decode_success;
+      }
+      /* VXORPS r/m, rV, r ::: r = rV ^ r/m */
+      /* VXORPS = VEX.NDS.128.0F.WIG 57 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vxorps", Iop_XorV128 );
+         goto decode_success;
+      }
+      /* VXORPS r/m, rV, r ::: r = rV ^ r/m */
+      /* VXORPS = VEX.NDS.256.0F.WIG 57 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vxorps", Iop_XorV256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x58:
+      /* VADDSD xmm3/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.WIG 58 /r */
+      if (haveF2no66noF3(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo64(
+                    uses_vvvv, vbi, pfx, delta, "vaddsd", Iop_Add64F0x2 );
+         goto decode_success;
+      }
+      /* VADDSS xmm3/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.WIG 58 /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32(
+                    uses_vvvv, vbi, pfx, delta, "vaddss", Iop_Add32F0x4 );
+         goto decode_success;
+      }
+      /* VADDPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 58 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vaddps", Iop_Add32Fx4 );
+         goto decode_success;
+      }
+      /* VADDPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 58 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vaddps", Iop_Add32Fx8 );
+         goto decode_success;
+      }
+      /* VADDPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 58 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vaddpd", Iop_Add64Fx2 );
+         goto decode_success;
+      }
+      /* VADDPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 58 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vaddpd", Iop_Add64Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x59:
+      /* VMULSD xmm3/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.WIG 59 /r */
+      if (haveF2no66noF3(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo64(
+                    uses_vvvv, vbi, pfx, delta, "vmulsd", Iop_Mul64F0x2 );
+         goto decode_success;
+      }
+      /* VMULSS xmm3/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.WIG 59 /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32(
+                    uses_vvvv, vbi, pfx, delta, "vmulss", Iop_Mul32F0x4 );
+         goto decode_success;
+      }
+      /* VMULPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 59 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmulps", Iop_Mul32Fx4 );
+         goto decode_success;
+      }
+      /* VMULPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 59 /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmulps", Iop_Mul32Fx8 );
+         goto decode_success;
+      }
+      /* VMULPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 59 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmulpd", Iop_Mul64Fx2 );
+         goto decode_success;
+      }
+      /* VMULPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 59 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmulpd", Iop_Mul64Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5A:
+      /* VCVTPS2PD xmm2/m64, xmm1 = VEX.128.0F.WIG 5A /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTPS2PD_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VCVTPS2PD xmm2/m128, ymm1 = VEX.256.0F.WIG 5A /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTPS2PD_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      /* VCVTPD2PS xmm2/m128, xmm1 = VEX.128.66.0F.WIG 5A /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTPD2PS_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VCVTPD2PS ymm2/m256, xmm1 = VEX.256.66.0F.WIG 5A /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTPD2PS_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      /* VCVTSD2SS xmm3/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.WIG 5A /r */
+      if (haveF2no66noF3(pfx)) {
+         UChar  modrm = getUChar(delta);
+         UInt   rV    = getVexNvvvv(pfx);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp f64lo = newTemp(Ity_F64);
+         IRTemp rmode = newTemp(Ity_I32);
+         assign( rmode, get_sse_roundingmode() );
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx,modrm);
+            assign(f64lo, getXMMRegLane64F(rS, 0));
+            delta += 1;
+            DIP("vcvtsd2ss %s,%s,%s\n",
+                nameXMMReg(rS), nameXMMReg(rV), nameXMMReg(rD));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign(f64lo, loadLE(Ity_F64, mkexpr(addr)) );
+            delta += alen;
+            DIP("vcvtsd2ss %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rD));
+         }
+         putXMMRegLane32F( rD, 0,
+                           binop( Iop_F64toF32, mkexpr(rmode),
+                                                mkexpr(f64lo)) );
+         putXMMRegLane32( rD, 1, getXMMRegLane32( rV, 1 ));
+         putXMMRegLane64( rD, 1, getXMMRegLane64( rV, 1 ));
+         putYMMRegLane128( rD, 1, mkV128(0) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VCVTSS2SD xmm3/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.WIG 5A /r */
+      if (haveF3no66noF2(pfx)) {
+         UChar  modrm = getUChar(delta);
+         UInt   rV    = getVexNvvvv(pfx);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp f32lo = newTemp(Ity_F32);
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx,modrm);
+            assign(f32lo, getXMMRegLane32F(rS, 0));
+            delta += 1;
+            DIP("vcvtss2sd %s,%s,%s\n",
+                nameXMMReg(rS), nameXMMReg(rV), nameXMMReg(rD));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign(f32lo, loadLE(Ity_F32, mkexpr(addr)) );
+            delta += alen;
+            DIP("vcvtss2sd %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rD));
+         }
+         putXMMRegLane64F( rD, 0,
+                           unop( Iop_F32toF64, mkexpr(f32lo)) );
+         putXMMRegLane64( rD, 1, getXMMRegLane64( rV, 1 ));
+         putYMMRegLane128( rD, 1, mkV128(0) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x5B:
+      /* VCVTPS2DQ xmm2/m128, xmm1 = VEX.128.66.0F.WIG 5B /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTxPS2DQ_128( vbi, pfx, delta,
+                                    True/*isAvx*/, False/*!r2zero*/ );
+         goto decode_success;
+      }
+      /* VCVTPS2DQ ymm2/m256, ymm1 = VEX.256.66.0F.WIG 5B /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTxPS2DQ_256( vbi, pfx, delta,
+                                    False/*!r2zero*/ );
+         goto decode_success;
+      }
+      /* VCVTTPS2DQ xmm2/m128, xmm1 = VEX.128.F3.0F.WIG 5B /r */
+      if (haveF3no66noF2(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTxPS2DQ_128( vbi, pfx, delta,
+                                    True/*isAvx*/, True/*r2zero*/ );
+         goto decode_success;
+      }
+      /* VCVTTPS2DQ ymm2/m256, ymm1 = VEX.256.F3.0F.WIG 5B /r */
+      if (haveF3no66noF2(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTxPS2DQ_256( vbi, pfx, delta,
+                                    True/*r2zero*/ );
+         goto decode_success;
+      }
+      /* VCVTDQ2PS xmm2/m128, xmm1 = VEX.128.0F.WIG 5B /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTDQ2PS_128 ( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VCVTDQ2PS ymm2/m256, ymm1 = VEX.256.0F.WIG 5B /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTDQ2PS_256 ( vbi, pfx, delta );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5C:
+      /* VSUBSD xmm3/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.WIG 5C /r */
+      if (haveF2no66noF3(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo64(
+                    uses_vvvv, vbi, pfx, delta, "vsubsd", Iop_Sub64F0x2 );
+         goto decode_success;
+      }
+      /* VSUBSS xmm3/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.WIG 5C /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32(
+                    uses_vvvv, vbi, pfx, delta, "vsubss", Iop_Sub32F0x4 );
+         goto decode_success;
+      }
+      /* VSUBPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 5C /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vsubps", Iop_Sub32Fx4 );
+         goto decode_success;
+      }
+      /* VSUBPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 5C /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vsubps", Iop_Sub32Fx8 );
+         goto decode_success;
+      }
+      /* VSUBPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 5C /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vsubpd", Iop_Sub64Fx2 );
+         goto decode_success;
+      }
+      /* VSUBPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 5C /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vsubpd", Iop_Sub64Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5D:
+      /* VMINSD xmm3/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.WIG 5D /r */
+      if (haveF2no66noF3(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo64(
+                    uses_vvvv, vbi, pfx, delta, "vminsd", Iop_Min64F0x2 );
+         goto decode_success;
+      }
+      /* VMINSS xmm3/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.WIG 5D /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32(
+                    uses_vvvv, vbi, pfx, delta, "vminss", Iop_Min32F0x4 );
+         goto decode_success;
+      }
+      /* VMINPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 5D /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vminps", Iop_Min32Fx4 );
+         goto decode_success;
+      }
+      /* VMINPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 5D /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vminps", Iop_Min32Fx8 );
+         goto decode_success;
+      }
+      /* VMINPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 5D /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vminpd", Iop_Min64Fx2 );
+         goto decode_success;
+      }
+      /* VMINPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 5D /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vminpd", Iop_Min64Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5E:
+      /* VDIVSD xmm3/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.WIG 5E /r */
+      if (haveF2no66noF3(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo64(
+                    uses_vvvv, vbi, pfx, delta, "vdivsd", Iop_Div64F0x2 );
+         goto decode_success;
+      }
+      /* VDIVSS xmm3/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.WIG 5E /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32(
+                    uses_vvvv, vbi, pfx, delta, "vdivss", Iop_Div32F0x4 );
+         goto decode_success;
+      }
+      /* VDIVPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 5E /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vdivps", Iop_Div32Fx4 );
+         goto decode_success;
+      }
+      /* VDIVPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 5E /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vdivps", Iop_Div32Fx8 );
+         goto decode_success;
+      }
+      /* VDIVPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 5E /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vdivpd", Iop_Div64Fx2 );
+         goto decode_success;
+      }
+      /* VDIVPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 5E /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vdivpd", Iop_Div64Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x5F:
+      /* VMAXSD xmm3/m64, xmm2, xmm1 = VEX.NDS.LIG.F2.0F.WIG 5F /r */
+      if (haveF2no66noF3(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo64(
+                    uses_vvvv, vbi, pfx, delta, "vmaxsd", Iop_Max64F0x2 );
+         goto decode_success;
+      }
+      /* VMAXSS xmm3/m32, xmm2, xmm1 = VEX.NDS.LIG.F3.0F.WIG 5F /r */
+      if (haveF3no66noF2(pfx)) {
+         delta = dis_AVX128_E_V_to_G_lo32(
+                    uses_vvvv, vbi, pfx, delta, "vmaxss", Iop_Max32F0x4 );
+         goto decode_success;
+      }
+      /* VMAXPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.0F.WIG 5F /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmaxps", Iop_Max32Fx4 );
+         goto decode_success;
+      }
+      /* VMAXPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.0F.WIG 5F /r */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmaxps", Iop_Max32Fx8 );
+         goto decode_success;
+      }
+      /* VMAXPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 5F /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmaxpd", Iop_Max64Fx2 );
+         goto decode_success;
+      }
+      /* VMAXPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 5F /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vmaxpd", Iop_Max64Fx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x60:
+      /* VPUNPCKLBW r/m, rV, r ::: r = interleave-lo-bytes(rV, r/m) */
+      /* VPUNPCKLBW = VEX.NDS.128.66.0F.WIG 60 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpcklbw",
+                    Iop_InterleaveLO8x16, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKLBW r/m, rV, r ::: r = interleave-lo-bytes(rV, r/m) */
+      /* VPUNPCKLBW = VEX.NDS.256.66.0F.WIG 60 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpcklbw",
+                    math_VPUNPCKLBW_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x61:
+      /* VPUNPCKLWD r/m, rV, r ::: r = interleave-lo-words(rV, r/m) */
+      /* VPUNPCKLWD = VEX.NDS.128.66.0F.WIG 61 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpcklwd",
+                    Iop_InterleaveLO16x8, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKLWD r/m, rV, r ::: r = interleave-lo-words(rV, r/m) */
+      /* VPUNPCKLWD = VEX.NDS.256.66.0F.WIG 61 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpcklwd",
+                    math_VPUNPCKLWD_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x62:
+      /* VPUNPCKLDQ r/m, rV, r ::: r = interleave-lo-dwords(rV, r/m) */
+      /* VPUNPCKLDQ = VEX.NDS.128.66.0F.WIG 62 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckldq",
+                    Iop_InterleaveLO32x4, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKLDQ r/m, rV, r ::: r = interleave-lo-dwords(rV, r/m) */
+      /* VPUNPCKLDQ = VEX.NDS.256.66.0F.WIG 62 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckldq",
+                    math_VPUNPCKLDQ_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x63:
+      /* VPACKSSWB r/m, rV, r ::: r = QNarrowBin16Sto8Sx16(rV, r/m) */
+      /* VPACKSSWB = VEX.NDS.128.66.0F.WIG 63 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpacksswb",
+                    Iop_QNarrowBin16Sto8Sx16, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPACKSSWB r/m, rV, r ::: r = QNarrowBin16Sto8Sx16(rV, r/m) */
+      /* VPACKSSWB = VEX.NDS.256.66.0F.WIG 63 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpacksswb",
+                    math_VPACKSSWB_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x64:
+      /* VPCMPGTB r/m, rV, r ::: r = rV `>s-by-8s` r/m */
+      /* VPCMPGTB = VEX.NDS.128.66.0F.WIG 64 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtb", Iop_CmpGT8Sx16 );
+         goto decode_success;
+      }
+      /* VPCMPGTB r/m, rV, r ::: r = rV `>s-by-8s` r/m */
+      /* VPCMPGTB = VEX.NDS.256.66.0F.WIG 64 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtb", Iop_CmpGT8Sx32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x65:
+      /* VPCMPGTW r/m, rV, r ::: r = rV `>s-by-16s` r/m */
+      /* VPCMPGTW = VEX.NDS.128.66.0F.WIG 65 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtw", Iop_CmpGT16Sx8 );
+         goto decode_success;
+      }
+      /* VPCMPGTW r/m, rV, r ::: r = rV `>s-by-16s` r/m */
+      /* VPCMPGTW = VEX.NDS.256.66.0F.WIG 65 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtw", Iop_CmpGT16Sx16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x66:
+      /* VPCMPGTD r/m, rV, r ::: r = rV `>s-by-32s` r/m */
+      /* VPCMPGTD = VEX.NDS.128.66.0F.WIG 66 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtd", Iop_CmpGT32Sx4 );
+         goto decode_success;
+      }
+      /* VPCMPGTD r/m, rV, r ::: r = rV `>s-by-32s` r/m */
+      /* VPCMPGTD = VEX.NDS.256.66.0F.WIG 66 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtd", Iop_CmpGT32Sx8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x67:
+      /* VPACKUSWB r/m, rV, r ::: r = QNarrowBin16Sto8Ux16(rV, r/m) */
+      /* VPACKUSWB = VEX.NDS.128.66.0F.WIG 67 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpackuswb",
+                    Iop_QNarrowBin16Sto8Ux16, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPACKUSWB r/m, rV, r ::: r = QNarrowBin16Sto8Ux16(rV, r/m) */
+      /* VPACKUSWB = VEX.NDS.256.66.0F.WIG 67 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpackuswb",
+                    math_VPACKUSWB_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x68:
+      /* VPUNPCKHBW r/m, rV, r ::: r = interleave-hi-bytes(rV, r/m) */
+      /* VPUNPCKHBW = VEX.NDS.128.0F.WIG 68 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhbw",
+                    Iop_InterleaveHI8x16, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKHBW r/m, rV, r ::: r = interleave-hi-bytes(rV, r/m) */
+      /* VPUNPCKHBW = VEX.NDS.256.0F.WIG 68 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhbw",
+                    math_VPUNPCKHBW_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x69:
+      /* VPUNPCKHWD r/m, rV, r ::: r = interleave-hi-words(rV, r/m) */
+      /* VPUNPCKHWD = VEX.NDS.128.0F.WIG 69 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhwd",
+                    Iop_InterleaveHI16x8, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKHWD r/m, rV, r ::: r = interleave-hi-words(rV, r/m) */
+      /* VPUNPCKHWD = VEX.NDS.256.0F.WIG 69 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhwd",
+                    math_VPUNPCKHWD_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6A:
+      /* VPUNPCKHDQ r/m, rV, r ::: r = interleave-hi-dwords(rV, r/m) */
+      /* VPUNPCKHDQ = VEX.NDS.128.66.0F.WIG 6A /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhdq",
+                    Iop_InterleaveHI32x4, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKHDQ r/m, rV, r ::: r = interleave-hi-dwords(rV, r/m) */
+      /* VPUNPCKHDQ = VEX.NDS.256.66.0F.WIG 6A /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhdq",
+                    math_VPUNPCKHDQ_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6B:
+      /* VPACKSSDW r/m, rV, r ::: r = QNarrowBin32Sto16Sx8(rV, r/m) */
+      /* VPACKSSDW = VEX.NDS.128.66.0F.WIG 6B /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpackssdw",
+                    Iop_QNarrowBin32Sto16Sx8, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPACKSSDW r/m, rV, r ::: r = QNarrowBin32Sto16Sx8(rV, r/m) */
+      /* VPACKSSDW = VEX.NDS.256.66.0F.WIG 6B /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpackssdw",
+                    math_VPACKSSDW_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6C:
+      /* VPUNPCKLQDQ r/m, rV, r ::: r = interleave-lo-64bitses(rV, r/m) */
+      /* VPUNPCKLQDQ = VEX.NDS.128.0F.WIG 6C /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpcklqdq",
+                    Iop_InterleaveLO64x2, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKLQDQ r/m, rV, r ::: r = interleave-lo-64bitses(rV, r/m) */
+      /* VPUNPCKLQDQ = VEX.NDS.256.0F.WIG 6C /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpcklqdq",
+                    math_VPUNPCKLQDQ_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6D:
+      /* VPUNPCKHQDQ r/m, rV, r ::: r = interleave-hi-64bitses(rV, r/m) */
+      /* VPUNPCKHQDQ = VEX.NDS.128.0F.WIG 6D /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhqdq",
+                    Iop_InterleaveHI64x2, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPUNPCKHQDQ r/m, rV, r ::: r = interleave-hi-64bitses(rV, r/m) */
+      /* VPUNPCKHQDQ = VEX.NDS.256.0F.WIG 6D /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpunpckhqdq",
+                    math_VPUNPCKHQDQ_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x6E:
+      /* VMOVD r32/m32, xmm1 = VEX.128.66.0F.W0 6E */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         vassert(sz == 2); /* even tho we are transferring 4, not 2. */
+         UChar modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            putYMMRegLoAndZU(
+               gregOfRexRM(pfx,modrm),
+               unop( Iop_32UtoV128, getIReg32(eregOfRexRM(pfx,modrm)) ) 
+            );
+            DIP("vmovd %s, %s\n", nameIReg32(eregOfRexRM(pfx,modrm)), 
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+        } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putYMMRegLoAndZU(
+               gregOfRexRM(pfx,modrm),
+               unop( Iop_32UtoV128,loadLE(Ity_I32, mkexpr(addr)))
+                             );
+            DIP("vmovd %s, %s\n", dis_buf, 
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+         goto decode_success;
+      }
+      /* VMOVQ r64/m64, xmm1 = VEX.128.66.0F.W1 6E */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 1==getRexW(pfx)/*W1*/) {
+         vassert(sz == 2); /* even tho we are transferring 8, not 2. */
+         UChar modrm = getUChar(delta);
+         if (epartIsReg(modrm)) {
+            delta += 1;
+            putYMMRegLoAndZU(
+               gregOfRexRM(pfx,modrm),
+               unop( Iop_64UtoV128, getIReg64(eregOfRexRM(pfx,modrm)) ) 
+            );
+            DIP("vmovq %s, %s\n", nameIReg64(eregOfRexRM(pfx,modrm)), 
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+        } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            putYMMRegLoAndZU(
+               gregOfRexRM(pfx,modrm),
+               unop( Iop_64UtoV128,loadLE(Ity_I64, mkexpr(addr)))
+                             );
+            DIP("vmovq %s, %s\n", dis_buf, 
+                                  nameXMMReg(gregOfRexRM(pfx,modrm)));
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x6F:
+      /* VMOVDQA ymm2/m256, ymm1 = VEX.256.66.0F.WIG 6F */
+      /* VMOVDQU ymm2/m256, ymm1 = VEX.256.F3.0F.WIG 6F */
+      if ((have66noF2noF3(pfx) || haveF3no66noF2(pfx))
+          && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp tD    = newTemp(Ity_V256);
+         Bool   isA   = have66noF2noF3(pfx);
+         HChar  ch    = isA ? 'a' : 'u';
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            assign(tD, getYMMReg(rS));
+            DIP("vmovdq%c %s,%s\n", ch, nameYMMReg(rS), nameYMMReg(rD));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            if (isA)
+               gen_SEGV_if_not_32_aligned(addr);
+            assign(tD, loadLE(Ity_V256, mkexpr(addr)));
+            DIP("vmovdq%c %s,%s\n", ch, dis_buf, nameYMMReg(rD));
+         }
+         putYMMReg(rD, mkexpr(tD));
+         goto decode_success;
+      }
+      /* VMOVDQA xmm2/m128, xmm1 = VEX.128.66.0F.WIG 6F */
+      /* VMOVDQU xmm2/m128, xmm1 = VEX.128.F3.0F.WIG 6F */
+      if ((have66noF2noF3(pfx) || haveF3no66noF2(pfx))
+          && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp tD    = newTemp(Ity_V128);
+         Bool   isA   = have66noF2noF3(pfx);
+         HChar  ch    = isA ? 'a' : 'u';
+         if (epartIsReg(modrm)) {
+            UInt rS = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            assign(tD, getXMMReg(rS));
+            DIP("vmovdq%c %s,%s\n", ch, nameXMMReg(rS), nameXMMReg(rD));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            if (isA)
+               gen_SEGV_if_not_16_aligned(addr);
+            assign(tD, loadLE(Ity_V128, mkexpr(addr)));
+            DIP("vmovdq%c %s,%s\n", ch, dis_buf, nameXMMReg(rD));
+         }
+         putYMMRegLoAndZU(rD, mkexpr(tD));
+         goto decode_success;
+      }
+      break;
+
+   case 0x70:
+      /* VPSHUFD imm8, xmm2/m128, xmm1 = VEX.128.66.0F.WIG 70 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PSHUFD_32x4( vbi, pfx, delta, True/*writesYmm*/);
+         goto decode_success;
+      }
+      /* VPSHUFD imm8, ymm2/m256, ymm1 = VEX.256.66.0F.WIG 70 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PSHUFD_32x8( vbi, pfx, delta);
+         goto decode_success;
+      }
+      /* VPSHUFLW imm8, xmm2/m128, xmm1 = VEX.128.F2.0F.WIG 70 /r ib */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PSHUFxW_128( vbi, pfx, delta,
+                                  True/*isAvx*/, False/*!xIsH*/ );
+         goto decode_success;
+      }
+      /* VPSHUFLW imm8, ymm2/m256, ymm1 = VEX.256.F2.0F.WIG 70 /r ib */
+      if (haveF2no66noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PSHUFxW_256( vbi, pfx, delta, False/*!xIsH*/ );
+         goto decode_success;
+      }
+      /* VPSHUFHW imm8, xmm2/m128, xmm1 = VEX.128.F3.0F.WIG 70 /r ib */
+      if (haveF3no66noF2(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PSHUFxW_128( vbi, pfx, delta,
+                                  True/*isAvx*/, True/*xIsH*/ );
+         goto decode_success;
+      }
+      /* VPSHUFHW imm8, ymm2/m256, ymm1 = VEX.256.F3.0F.WIG 70 /r ib */
+      if (haveF3no66noF2(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PSHUFxW_256( vbi, pfx, delta, True/*xIsH*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x71:
+      /* VPSRLW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /2 ib */
+      /* VPSRAW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /4 ib */
+      /* VPSLLW imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 71 /6 ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/
+          && epartIsReg(getUChar(delta))) {
+         if (gregLO3ofRM(getUChar(delta)) == 2/*SRL*/) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrlw", Iop_ShrN16x8 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 4/*SRA*/) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpsraw", Iop_SarN16x8 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 6/*SLL*/) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpsllw", Iop_ShlN16x8 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      /* VPSRLW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /2 ib */
+      /* VPSRAW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /4 ib */
+      /* VPSLLW imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 71 /6 ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && epartIsReg(getUChar(delta))) {
+         if (gregLO3ofRM(getUChar(delta)) == 2/*SRL*/) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrlw", Iop_ShrN16x16 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 4/*SRA*/) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpsraw", Iop_SarN16x16 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 6/*SLL*/) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpsllw", Iop_ShlN16x16 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0x72:
+      /* VPSRLD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /2 ib */
+      /* VPSRAD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /4 ib */
+      /* VPSLLD imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 72 /6 ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/
+          && epartIsReg(getUChar(delta))) {
+         if (gregLO3ofRM(getUChar(delta)) == 2/*SRL*/) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrld", Iop_ShrN32x4 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 4/*SRA*/) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrad", Iop_SarN32x4 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 6/*SLL*/) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpslld", Iop_ShlN32x4 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      /* VPSRLD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /2 ib */
+      /* VPSRAD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /4 ib */
+      /* VPSLLD imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 72 /6 ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && epartIsReg(getUChar(delta))) {
+         if (gregLO3ofRM(getUChar(delta)) == 2/*SRL*/) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrld", Iop_ShrN32x8 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 4/*SRA*/) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrad", Iop_SarN32x8 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 6/*SLL*/) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpslld", Iop_ShlN32x8 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0x73:
+      /* VPSRLDQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /3 ib */
+      /* VPSLLDQ imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /7 ib */
+      /* VPSRLQ  imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /2 ib */
+      /* VPSLLQ  imm8, xmm2, xmm1 = VEX.NDD.128.66.0F.WIG 73 /6 ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && epartIsReg(getUChar(delta))) {
+         Int    rS   = eregOfRexRM(pfx,getUChar(delta));
+         Int    rD   = getVexNvvvv(pfx);
+         IRTemp vecS = newTemp(Ity_V128);
+         if (gregLO3ofRM(getUChar(delta)) == 3) {
+            Int imm = (Int)getUChar(delta+1);
+            DIP("vpsrldq $%d,%s,%s\n", imm, nameXMMReg(rS), nameXMMReg(rD));
+            delta += 2;
+            assign( vecS, getXMMReg(rS) );
+            putYMMRegLoAndZU(rD, mkexpr(math_PSRLDQ( vecS, imm )));
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 7) {
+            Int imm = (Int)getUChar(delta+1);
+            DIP("vpslldq $%d,%s,%s\n", imm, nameXMMReg(rS), nameXMMReg(rD));
+            delta += 2;
+            assign( vecS, getXMMReg(rS) );
+            putYMMRegLoAndZU(rD, mkexpr(math_PSLLDQ( vecS, imm )));
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 2) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrlq", Iop_ShrN64x2 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 6) {
+            delta = dis_AVX128_shiftE_to_V_imm( pfx, delta,
+                                                "vpsllq", Iop_ShlN64x2 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      /* VPSRLDQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /3 ib */
+      /* VPSLLDQ imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /7 ib */
+      /* VPSRLQ  imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /2 ib */
+      /* VPSLLQ  imm8, ymm2, ymm1 = VEX.NDD.256.66.0F.WIG 73 /6 ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && epartIsReg(getUChar(delta))) {
+         Int    rS   = eregOfRexRM(pfx,getUChar(delta));
+         Int    rD   = getVexNvvvv(pfx);
+         if (gregLO3ofRM(getUChar(delta)) == 3) {
+            IRTemp vecS0 = newTemp(Ity_V128);
+            IRTemp vecS1 = newTemp(Ity_V128);
+            Int imm = (Int)getUChar(delta+1);
+            DIP("vpsrldq $%d,%s,%s\n", imm, nameYMMReg(rS), nameYMMReg(rD));
+            delta += 2;
+            assign( vecS0, getYMMRegLane128(rS, 0));
+            assign( vecS1, getYMMRegLane128(rS, 1));
+            putYMMRegLane128(rD, 0, mkexpr(math_PSRLDQ( vecS0, imm )));
+            putYMMRegLane128(rD, 1, mkexpr(math_PSRLDQ( vecS1, imm )));
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 7) {
+            IRTemp vecS0 = newTemp(Ity_V128);
+            IRTemp vecS1 = newTemp(Ity_V128);
+            Int imm = (Int)getUChar(delta+1);
+            DIP("vpslldq $%d,%s,%s\n", imm, nameYMMReg(rS), nameYMMReg(rD));
+            delta += 2;
+            assign( vecS0, getYMMRegLane128(rS, 0));
+            assign( vecS1, getYMMRegLane128(rS, 1));
+            putYMMRegLane128(rD, 0, mkexpr(math_PSLLDQ( vecS0, imm )));
+            putYMMRegLane128(rD, 1, mkexpr(math_PSLLDQ( vecS1, imm )));
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 2) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpsrlq", Iop_ShrN64x4 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         if (gregLO3ofRM(getUChar(delta)) == 6) {
+            delta = dis_AVX256_shiftE_to_V_imm( pfx, delta,
+                                                "vpsllq", Iop_ShlN64x4 );
+            *uses_vvvv = True;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0x74:
+      /* VPCMPEQB r/m, rV, r ::: r = rV `eq-by-8s` r/m */
+      /* VPCMPEQB = VEX.NDS.128.66.0F.WIG 74 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqb", Iop_CmpEQ8x16 );
+         goto decode_success;
+      }
+      /* VPCMPEQB r/m, rV, r ::: r = rV `eq-by-8s` r/m */
+      /* VPCMPEQB = VEX.NDS.256.66.0F.WIG 74 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqb", Iop_CmpEQ8x32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x75:
+      /* VPCMPEQW r/m, rV, r ::: r = rV `eq-by-16s` r/m */
+      /* VPCMPEQW = VEX.NDS.128.66.0F.WIG 75 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqw", Iop_CmpEQ16x8 );
+         goto decode_success;
+      }
+      /* VPCMPEQW r/m, rV, r ::: r = rV `eq-by-16s` r/m */
+      /* VPCMPEQW = VEX.NDS.256.66.0F.WIG 75 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqw", Iop_CmpEQ16x16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x76:
+      /* VPCMPEQD r/m, rV, r ::: r = rV `eq-by-32s` r/m */
+      /* VPCMPEQD = VEX.NDS.128.66.0F.WIG 76 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqd", Iop_CmpEQ32x4 );
+         goto decode_success;
+      }
+      /* VPCMPEQD r/m, rV, r ::: r = rV `eq-by-32s` r/m */
+      /* VPCMPEQD = VEX.NDS.256.66.0F.WIG 76 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqd", Iop_CmpEQ32x8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x77:
+      /* VZEROUPPER = VEX.128.0F.WIG 77 */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Int i;
+         IRTemp zero128 = newTemp(Ity_V128);
+         assign(zero128, mkV128(0));
+         for (i = 0; i < 16; i++) {
+            putYMMRegLane128(i, 1, mkexpr(zero128));
+         }
+         DIP("vzeroupper\n");
+         goto decode_success;
+      }
+      /* VZEROALL = VEX.256.0F.WIG 77 */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         Int i;
+         IRTemp zero128 = newTemp(Ity_V128);
+         assign(zero128, mkV128(0));
+         for (i = 0; i < 16; i++) {
+            putYMMRegLoAndZU(i, mkexpr(zero128));
+         }
+         DIP("vzeroall\n");
+         goto decode_success;
+      }
+      break;
+
+   case 0x7C:
+   case 0x7D:
+      /* VHADDPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.F2.0F.WIG 7C /r */
+      /* VHSUBPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.F2.0F.WIG 7D /r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         IRTemp sV     = newTemp(Ity_V128);
+         IRTemp dV     = newTemp(Ity_V128);
+         Bool   isAdd  = opc == 0x7C;
+         const HChar* str = isAdd ? "add" : "sub";
+         UChar modrm   = getUChar(delta);
+         UInt   rG     = gregOfRexRM(pfx,modrm);
+         UInt   rV     = getVexNvvvv(pfx);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            DIP("vh%spd %s,%s,%s\n", str, nameXMMReg(rE),
+                nameXMMReg(rV), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("vh%spd %s,%s,%s\n", str, dis_buf,
+                nameXMMReg(rV), nameXMMReg(rG));
+            delta += alen;
+         }
+         assign( dV, getXMMReg(rV) );
+         putYMMRegLoAndZU( rG, mkexpr( math_HADDPS_128 ( dV, sV, isAdd ) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VHADDPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.F2.0F.WIG 7C /r */
+      /* VHSUBPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.F2.0F.WIG 7D /r */
+      if (haveF2no66noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         IRTemp sV     = newTemp(Ity_V256);
+         IRTemp dV     = newTemp(Ity_V256);
+         IRTemp s1, s0, d1, d0;
+         Bool   isAdd  = opc == 0x7C;
+         const HChar* str = isAdd ? "add" : "sub";
+         UChar modrm   = getUChar(delta);
+         UInt   rG     = gregOfRexRM(pfx,modrm);
+         UInt   rV     = getVexNvvvv(pfx);
+         s1 = s0 = d1 = d0 = IRTemp_INVALID;
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getYMMReg(rE) );
+            DIP("vh%spd %s,%s,%s\n", str, nameYMMReg(rE),
+                nameYMMReg(rV), nameYMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+            DIP("vh%spd %s,%s,%s\n", str, dis_buf,
+                nameYMMReg(rV), nameYMMReg(rG));
+            delta += alen;
+         }
+         assign( dV, getYMMReg(rV) );
+         breakupV256toV128s( dV, &d1, &d0 );
+         breakupV256toV128s( sV, &s1, &s0 );
+         putYMMReg( rG, binop(Iop_V128HLtoV256,
+                              mkexpr( math_HADDPS_128 ( d1, s1, isAdd ) ),
+                              mkexpr( math_HADDPS_128 ( d0, s0, isAdd ) ) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VHADDPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 7C /r */
+      /* VHSUBPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG 7D /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         IRTemp sV     = newTemp(Ity_V128);
+         IRTemp dV     = newTemp(Ity_V128);
+         Bool   isAdd  = opc == 0x7C;
+         const HChar* str = isAdd ? "add" : "sub";
+         UChar modrm   = getUChar(delta);
+         UInt   rG     = gregOfRexRM(pfx,modrm);
+         UInt   rV     = getVexNvvvv(pfx);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            DIP("vh%spd %s,%s,%s\n", str, nameXMMReg(rE),
+                nameXMMReg(rV), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            DIP("vh%spd %s,%s,%s\n", str, dis_buf,
+                nameXMMReg(rV), nameXMMReg(rG));
+            delta += alen;
+         }
+         assign( dV, getXMMReg(rV) );
+         putYMMRegLoAndZU( rG, mkexpr( math_HADDPD_128 ( dV, sV, isAdd ) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VHADDPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 7C /r */
+      /* VHSUBPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG 7D /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         IRTemp sV     = newTemp(Ity_V256);
+         IRTemp dV     = newTemp(Ity_V256);
+         IRTemp s1, s0, d1, d0;
+         Bool   isAdd  = opc == 0x7C;
+         const HChar* str = isAdd ? "add" : "sub";
+         UChar modrm   = getUChar(delta);
+         UInt   rG     = gregOfRexRM(pfx,modrm);
+         UInt   rV     = getVexNvvvv(pfx);
+         s1 = s0 = d1 = d0 = IRTemp_INVALID;
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getYMMReg(rE) );
+            DIP("vh%spd %s,%s,%s\n", str, nameYMMReg(rE),
+                nameYMMReg(rV), nameYMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+            DIP("vh%spd %s,%s,%s\n", str, dis_buf,
+                nameYMMReg(rV), nameYMMReg(rG));
+            delta += alen;
+         }
+         assign( dV, getYMMReg(rV) );
+         breakupV256toV128s( dV, &d1, &d0 );
+         breakupV256toV128s( sV, &s1, &s0 );
+         putYMMReg( rG, binop(Iop_V128HLtoV256,
+                              mkexpr( math_HADDPD_128 ( d1, s1, isAdd ) ),
+                              mkexpr( math_HADDPD_128 ( d0, s0, isAdd ) ) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x7E:
+      /* Note the Intel docs don't make sense for this.  I think they
+         are wrong.  They seem to imply it is a store when in fact I
+         think it is a load.  Also it's unclear whether this is W0, W1
+         or WIG. */
+      /* VMOVQ xmm2/m64, xmm1 = VEX.128.F3.0F.W0 7E /r */
+      if (haveF3no66noF2(pfx) 
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         vassert(sz == 4); /* even tho we are transferring 8, not 4. */
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            putXMMRegLane64( rG, 0, getXMMRegLane64( rE, 0 ));
+            DIP("vmovq %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            putXMMRegLane64( rG, 0, loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("vmovq %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+         /* zero bits 255:64 */
+         putXMMRegLane64( rG, 1, mkU64(0) );
+         putYMMRegLane128( rG, 1, mkV128(0) );
+         goto decode_success;
+      }
+      /* VMOVQ xmm1, r64 = VEX.128.66.0F.W1 7E /r (reg case only) */
+      /* Moves from G to E, so is a store-form insn */
+      /* Intel docs list this in the VMOVD entry for some reason. */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 1==getRexW(pfx)/*W1*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            DIP("vmovq %s,%s\n", nameXMMReg(rG), nameIReg64(rE));
+            putIReg64(rE, getXMMRegLane64(rG, 0));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getXMMRegLane64(rG, 0) );
+            DIP("vmovq %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      /* VMOVD xmm1, m32/r32 = VEX.128.66.0F.W0 7E /r (reg case only) */
+      /* Moves from G to E, so is a store-form insn */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            DIP("vmovd %s,%s\n", nameXMMReg(rG), nameIReg32(rE));
+            putIReg32(rE, getXMMRegLane32(rG, 0));
+            delta += 1;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getXMMRegLane32(rG, 0) );
+            DIP("vmovd %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0x7F:
+      /* VMOVDQA ymm1, ymm2/m256 = VEX.256.66.0F.WIG 7F */
+      /* VMOVDQU ymm1, ymm2/m256 = VEX.256.F3.0F.WIG 7F */
+      if ((have66noF2noF3(pfx) || haveF3no66noF2(pfx))
+          && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rS    = gregOfRexRM(pfx, modrm);
+         IRTemp tS    = newTemp(Ity_V256);
+         Bool   isA   = have66noF2noF3(pfx);
+         HChar  ch    = isA ? 'a' : 'u';
+         assign(tS, getYMMReg(rS));
+         if (epartIsReg(modrm)) {
+            UInt rD = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            putYMMReg(rD, mkexpr(tS));
+            DIP("vmovdq%c %s,%s\n", ch, nameYMMReg(rS), nameYMMReg(rD));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            if (isA)
+               gen_SEGV_if_not_32_aligned(addr);
+            storeLE(mkexpr(addr), mkexpr(tS));
+            DIP("vmovdq%c %s,%s\n", ch, nameYMMReg(rS), dis_buf);
+         }
+         goto decode_success;
+      }
+      /* VMOVDQA xmm1, xmm2/m128 = VEX.128.66.0F.WIG 7F */
+      /* VMOVDQU xmm1, xmm2/m128 = VEX.128.F3.0F.WIG 7F */
+      if ((have66noF2noF3(pfx) || haveF3no66noF2(pfx))
+          && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rS    = gregOfRexRM(pfx, modrm);
+         IRTemp tS    = newTemp(Ity_V128);
+         Bool   isA   = have66noF2noF3(pfx);
+         HChar  ch    = isA ? 'a' : 'u';
+         assign(tS, getXMMReg(rS));
+         if (epartIsReg(modrm)) {
+            UInt rD = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            putYMMRegLoAndZU(rD, mkexpr(tS));
+            DIP("vmovdq%c %s,%s\n", ch, nameXMMReg(rS), nameXMMReg(rD));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            if (isA)
+               gen_SEGV_if_not_16_aligned(addr);
+            storeLE(mkexpr(addr), mkexpr(tS));
+            DIP("vmovdq%c %s,%s\n", ch, nameXMMReg(rS), dis_buf);
+         }
+         goto decode_success;
+      }
+      break;
+
+   case 0xAE:
+      /* VSTMXCSR m32 = VEX.LZ.0F.WIG AE /3 */
+      if (haveNo66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*LZ*/
+          && 0==getRexW(pfx) /* be paranoid -- Intel docs don't require this */
+          && !epartIsReg(getUChar(delta)) && gregLO3ofRM(getUChar(delta)) == 3
+          && sz == 4) {
+         delta = dis_STMXCSR(vbi, pfx, delta, True/*isAvx*/);
+         goto decode_success;
+      }
+      /* VLDMXCSR m32 = VEX.LZ.0F.WIG AE /2 */
+      if (haveNo66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*LZ*/
+          && 0==getRexW(pfx) /* be paranoid -- Intel docs don't require this */
+          && !epartIsReg(getUChar(delta)) && gregLO3ofRM(getUChar(delta)) == 2
+          && sz == 4) {
+         delta = dis_LDMXCSR(vbi, pfx, delta, True/*isAvx*/);
+         goto decode_success;
+      }
+      break;
+
+   case 0xC2:
+      /* VCMPSD xmm3/m64(E=argL), xmm2(V=argR), xmm1(G) */
+      /* = VEX.NDS.LIG.F2.0F.WIG C2 /r ib */
+      if (haveF2no66noF3(pfx)) {
+         Long delta0 = delta;
+         delta = dis_AVX128_cmp_V_E_to_G( uses_vvvv, vbi, pfx, delta,
+                                          "vcmpsd", False/*!all_lanes*/,
+                                          8/*sz*/);
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      /* VCMPSS xmm3/m32(E=argL), xmm2(V=argR), xmm1(G) */
+      /* = VEX.NDS.LIG.F3.0F.WIG C2 /r ib */
+      if (haveF3no66noF2(pfx)) {
+         Long delta0 = delta;
+         delta = dis_AVX128_cmp_V_E_to_G( uses_vvvv, vbi, pfx, delta,
+                                          "vcmpss", False/*!all_lanes*/,
+                                          4/*sz*/);
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      /* VCMPPD xmm3/m128(E=argL), xmm2(V=argR), xmm1(G) */
+      /* = VEX.NDS.128.66.0F.WIG C2 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Long delta0 = delta;
+         delta = dis_AVX128_cmp_V_E_to_G( uses_vvvv, vbi, pfx, delta,
+                                          "vcmppd", True/*all_lanes*/,
+                                          8/*sz*/);
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      /* VCMPPD ymm3/m256(E=argL), ymm2(V=argR), ymm1(G) */
+      /* = VEX.NDS.256.66.0F.WIG C2 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         Long delta0 = delta;
+         delta = dis_AVX256_cmp_V_E_to_G( uses_vvvv, vbi, pfx, delta,
+                                          "vcmppd", 8/*sz*/);
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      /* VCMPPS xmm3/m128(E=argL), xmm2(V=argR), xmm1(G) */
+      /* = VEX.NDS.128.0F.WIG C2 /r ib */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Long delta0 = delta;
+         delta = dis_AVX128_cmp_V_E_to_G( uses_vvvv, vbi, pfx, delta,
+                                          "vcmpps", True/*all_lanes*/,
+                                          4/*sz*/);
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      /* VCMPPS ymm3/m256(E=argL), ymm2(V=argR), ymm1(G) */
+      /* = VEX.NDS.256.0F.WIG C2 /r ib */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         Long delta0 = delta;
+         delta = dis_AVX256_cmp_V_E_to_G( uses_vvvv, vbi, pfx, delta,
+                                          "vcmpps", 4/*sz*/);
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      break;
+
+   case 0xC4:
+      /* VPINSRW r32/m16, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG C4 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         Int    imm8;
+         IRTemp new16 = newTemp(Ity_I16);
+
+         if ( epartIsReg( modrm ) ) {
+            imm8 = (Int)(getUChar(delta+1) & 7);
+            assign( new16, unop(Iop_32to16,
+                                getIReg32(eregOfRexRM(pfx,modrm))) );
+            delta += 1+1;
+            DIP( "vpinsrw $%d,%s,%s\n", imm8,
+                 nameIReg32( eregOfRexRM(pfx, modrm) ), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8 = (Int)(getUChar(delta+alen) & 7);
+            assign( new16, loadLE( Ity_I16, mkexpr(addr) ));
+            delta += alen+1;
+            DIP( "vpinsrw $%d,%s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rV ));
+         IRTemp res_vec = math_PINSRW_128( src_vec, new16, imm8 );
+         putYMMRegLoAndZU( rG, mkexpr(res_vec) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xC5:
+      /* VPEXTRW imm8, xmm1, reg32 = VEX.128.66.0F.W0 C5 /r ib */
+      if (have66noF2noF3(pfx)
+         && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         Long delta0 = delta;
+         delta = dis_PEXTRW_128_EregOnly_toG( vbi, pfx, delta,
+                                              True/*isAvx*/ );
+         if (delta > delta0) goto decode_success;
+         /* else fall through -- decoding has failed */
+      }
+      break; 
+
+   case 0xC6:
+      /* VSHUFPS imm8, xmm3/m128, xmm2, xmm1, xmm2 */
+      /* = VEX.NDS.128.0F.WIG C6 /r ib */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Int    imm8 = 0;
+         IRTemp eV   = newTemp(Ity_V128);
+         IRTemp vV   = newTemp(Ity_V128);
+         UInt  modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         assign( vV, getXMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            imm8 = (Int)getUChar(delta+1);
+            delta += 1+1;
+            DIP("vshufps $%d,%s,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += 1+alen;
+            DIP("vshufps $%d,%s,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+         }
+         IRTemp res = math_SHUFPS_128( eV, vV, imm8 );
+         putYMMRegLoAndZU( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VSHUFPS imm8, ymm3/m256, ymm2, ymm1, ymm2 */
+      /* = VEX.NDS.256.0F.WIG C6 /r ib */
+      if (haveNo66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         Int    imm8 = 0;
+         IRTemp eV   = newTemp(Ity_V256);
+         IRTemp vV   = newTemp(Ity_V256);
+         UInt  modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         assign( vV, getYMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getYMMReg(rE) );
+            imm8 = (Int)getUChar(delta+1);
+            delta += 1+1;
+            DIP("vshufps $%d,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( eV, loadLE(Ity_V256, mkexpr(addr)) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += 1+alen;
+            DIP("vshufps $%d,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+         }
+         IRTemp res = math_SHUFPS_256( eV, vV, imm8 );
+         putYMMReg( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VSHUFPD imm8, xmm3/m128, xmm2, xmm1, xmm2 */
+      /* = VEX.NDS.128.66.0F.WIG C6 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Int    imm8 = 0;
+         IRTemp eV   = newTemp(Ity_V128);
+         IRTemp vV   = newTemp(Ity_V128);
+         UInt  modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         assign( vV, getXMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getXMMReg(rE) );
+            imm8 = (Int)getUChar(delta+1);
+            delta += 1+1;
+            DIP("vshufpd $%d,%s,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += 1+alen;
+            DIP("vshufpd $%d,%s,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+         }
+         IRTemp res = math_SHUFPD_128( eV, vV, imm8 );
+         putYMMRegLoAndZU( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VSHUFPD imm8, ymm3/m256, ymm2, ymm1, ymm2 */
+      /* = VEX.NDS.256.66.0F.WIG C6 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         Int    imm8 = 0;
+         IRTemp eV   = newTemp(Ity_V256);
+         IRTemp vV   = newTemp(Ity_V256);
+         UInt  modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         UInt  rV    = getVexNvvvv(pfx);
+         assign( vV, getYMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( eV, getYMMReg(rE) );
+            imm8 = (Int)getUChar(delta+1);
+            delta += 1+1;
+            DIP("vshufpd $%d,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( eV, loadLE(Ity_V256, mkexpr(addr)) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += 1+alen;
+            DIP("vshufpd $%d,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+         }
+         IRTemp res = math_SHUFPD_256( eV, vV, imm8 );
+         putYMMReg( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xD0:
+      /* VADDSUBPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG D0 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vaddsubpd", math_ADDSUBPD_128 );
+         goto decode_success;
+      }
+      /* VADDSUBPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG D0 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vaddsubpd", math_ADDSUBPD_256 );
+         goto decode_success;
+      }
+      /* VADDSUBPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.F2.0F.WIG D0 /r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vaddsubps", math_ADDSUBPS_128 );
+         goto decode_success;
+      }
+      /* VADDSUBPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.F2.0F.WIG D0 /r */
+      if (haveF2no66noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vaddsubps", math_ADDSUBPS_256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD1:
+      /* VPSRLW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG D1 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrlw", Iop_ShrN16x8 );
+         *uses_vvvv = True;
+         goto decode_success;
+                        
+      }
+      /* VPSRLW xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG D1 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrlw", Iop_ShrN16x16 );
+         *uses_vvvv = True;
+         goto decode_success;
+                        
+      }
+      break;
+
+   case 0xD2:
+      /* VPSRLD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG D2 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrld", Iop_ShrN32x4 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSRLD xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG D2 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrld", Iop_ShrN32x8 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xD3:
+      /* VPSRLQ xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG D3 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrlq", Iop_ShrN64x2 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSRLQ xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG D3 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrlq", Iop_ShrN64x4 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xD4:
+      /* VPADDQ r/m, rV, r ::: r = rV + r/m */
+      /* VPADDQ = VEX.NDS.128.66.0F.WIG D4 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddq", Iop_Add64x2 );
+         goto decode_success;
+      }
+      /* VPADDQ r/m, rV, r ::: r = rV + r/m */
+      /* VPADDQ = VEX.NDS.256.66.0F.WIG D4 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddq", Iop_Add64x4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD5:
+      /* VPMULLW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG D5 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmullw", Iop_Mul16x8 );
+         goto decode_success;
+      }
+      /* VPMULLW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG D5 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmullw", Iop_Mul16x16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD6:
+      /* I can't even find any Intel docs for this one. */
+      /* Basically: 66 0F D6 = MOVQ -- move 64 bits from G (lo half
+         xmm) to E (mem or lo half xmm).  Looks like L==0(128), W==0
+         (WIG, maybe?) */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*this might be redundant, dunno*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx,modrm);
+         if (epartIsReg(modrm)) {
+            /* fall through, awaiting test case */
+            /* dst: lo half copied, hi half zeroed */
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            storeLE( mkexpr(addr), getXMMRegLane64( rG, 0 ));
+            DIP("vmovq %s,%s\n", nameXMMReg(rG), dis_buf );
+            delta += alen;
+            goto decode_success;
+         }
+      }
+      break;
+
+   case 0xD7:
+      /* VEX.128.66.0F.WIG D7 /r = VPMOVMSKB xmm1, r32 */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVMSKB_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VEX.128.66.0F.WIG D7 /r = VPMOVMSKB ymm1, r32 */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVMSKB_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD8:
+      /* VPSUBUSB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG D8 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubusb", Iop_QSub8Ux16 );
+         goto decode_success;
+      }
+      /* VPSUBUSB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG D8 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubusb", Iop_QSub8Ux32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xD9:
+      /* VPSUBUSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG D9 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubusw", Iop_QSub16Ux8 );
+         goto decode_success;
+      }
+      /* VPSUBUSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG D9 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubusw", Iop_QSub16Ux16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDA:
+      /* VPMINUB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG DA /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpminub", Iop_Min8Ux16 );
+         goto decode_success;
+      }
+      /* VPMINUB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG DA /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpminub", Iop_Min8Ux32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDB:
+      /* VPAND r/m, rV, r ::: r = rV & r/m */
+      /* VEX.NDS.128.66.0F.WIG DB /r = VPAND xmm3/m128, xmm2, xmm1 */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpand", Iop_AndV128 );
+         goto decode_success;
+      }
+      /* VPAND r/m, rV, r ::: r = rV & r/m */
+      /* VEX.NDS.256.66.0F.WIG DB /r = VPAND ymm3/m256, ymm2, ymm1 */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpand", Iop_AndV256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDC:
+      /* VPADDUSB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG DC /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddusb", Iop_QAdd8Ux16 );
+         goto decode_success;
+      }
+      /* VPADDUSB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG DC /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddusb", Iop_QAdd8Ux32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDD:
+      /* VPADDUSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG DD /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddusw", Iop_QAdd16Ux8 );
+         goto decode_success;
+      }
+      /* VPADDUSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG DD /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddusw", Iop_QAdd16Ux16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDE:
+      /* VPMAXUB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG DE /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxub", Iop_Max8Ux16 );
+         goto decode_success;
+      }
+      /* VPMAXUB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG DE /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxub", Iop_Max8Ux32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xDF:
+      /* VPANDN r/m, rV, r ::: r = rV & ~r/m (is that correct, re the ~ ?) */
+      /* VEX.NDS.128.66.0F.WIG DF /r = VPANDN xmm3/m128, xmm2, xmm1 */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpandn", Iop_AndV128,
+                    NULL, True/*invertLeftArg*/, False/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPANDN r/m, rV, r ::: r = rV & ~r/m (is that correct, re the ~ ?) */
+      /* VEX.NDS.256.66.0F.WIG DF /r = VPANDN ymm3/m256, ymm2, ymm1 */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpandn", Iop_AndV256,
+                    NULL, True/*invertLeftArg*/, False/*swapArgs*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE0:
+      /* VPAVGB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E0 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpavgb", Iop_Avg8Ux16 );
+         goto decode_success;
+      }
+      /* VPAVGB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E0 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpavgb", Iop_Avg8Ux32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE1:
+      /* VPSRAW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E1 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpsraw", Iop_SarN16x8 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSRAW xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E1 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpsraw", Iop_SarN16x16 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xE2:
+      /* VPSRAD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E2 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrad", Iop_SarN32x4 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSRAD xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E2 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpsrad", Iop_SarN32x8 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xE3:
+      /* VPAVGW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E3 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpavgw", Iop_Avg16Ux8 );
+         goto decode_success;
+      }
+      /* VPAVGW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E3 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpavgw", Iop_Avg16Ux16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE4:
+      /* VPMULHUW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E4 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmulhuw", Iop_MulHi16Ux8 );
+         goto decode_success;
+      }
+      /* VPMULHUW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E4 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmulhuw", Iop_MulHi16Ux16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE5:
+      /* VPMULHW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E5 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmulhw", Iop_MulHi16Sx8 );
+         goto decode_success;
+      }
+      /* VPMULHW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E5 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpmulhw", Iop_MulHi16Sx16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE6:
+      /* VCVTDQ2PD xmm2/m64, xmm1 = VEX.128.F3.0F.WIG E6 /r */
+      if (haveF3no66noF2(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTDQ2PD_128(vbi, pfx, delta, True/*isAvx*/);
+         goto decode_success;
+      }
+      /* VCVTDQ2PD xmm2/m128, ymm1 = VEX.256.F3.0F.WIG E6 /r */
+      if (haveF3no66noF2(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTDQ2PD_256(vbi, pfx, delta);
+         goto decode_success;
+      }
+      /* VCVTTPD2DQ xmm2/m128, xmm1 = VEX.128.66.0F.WIG E6 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTxPD2DQ_128(vbi, pfx, delta, True/*isAvx*/,
+                                   True/*r2zero*/);
+         goto decode_success;
+      }
+      /* VCVTTPD2DQ ymm2/m256, xmm1 = VEX.256.66.0F.WIG E6 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTxPD2DQ_256(vbi, pfx, delta, True/*r2zero*/);
+         goto decode_success;
+      }
+      /* VCVTPD2DQ xmm2/m128, xmm1 = VEX.128.F2.0F.WIG E6 /r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_CVTxPD2DQ_128(vbi, pfx, delta, True/*isAvx*/,
+                                   False/*!r2zero*/);
+         goto decode_success;
+      }
+      /* VCVTPD2DQ ymm2/m256, xmm1 = VEX.256.F2.0F.WIG E6 /r */
+      if (haveF2no66noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_CVTxPD2DQ_256(vbi, pfx, delta, False/*!r2zero*/);
+         goto decode_success;
+      }
+      break;
+
+   case 0xE7:
+      /* VMOVNTDQ xmm1, m128 = VEX.128.66.0F.WIG E7 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar modrm = getUChar(delta);
+         UInt rG     = gregOfRexRM(pfx,modrm);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_16_aligned( addr );
+            storeLE( mkexpr(addr), getXMMReg(rG) );
+            DIP("vmovntdq %s,%s\n", dis_buf, nameXMMReg(rG));
+            delta += alen;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      /* VMOVNTDQ ymm1, m256 = VEX.256.66.0F.WIG E7 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar modrm = getUChar(delta);
+         UInt rG     = gregOfRexRM(pfx,modrm);
+         if (!epartIsReg(modrm)) {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            gen_SEGV_if_not_32_aligned( addr );
+            storeLE( mkexpr(addr), getYMMReg(rG) );
+            DIP("vmovntdq %s,%s\n", dis_buf, nameYMMReg(rG));
+            delta += alen;
+            goto decode_success;
+         }
+         /* else fall through */
+      }
+      break;
+
+   case 0xE8:
+      /* VPSUBSB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E8 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubsb", Iop_QSub8Sx16 );
+         goto decode_success;
+      }
+      /* VPSUBSB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E8 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubsb", Iop_QSub8Sx32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xE9:
+      /* VPSUBSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG E9 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubsw", Iop_QSub16Sx8 );
+         goto decode_success;
+      }
+      /* VPSUBSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG E9 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpsubsw", Iop_QSub16Sx16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEA:
+      /* VPMINSW r/m, rV, r ::: r = min-signed16s(rV, r/m) */
+      /* VPMINSW = VEX.NDS.128.66.0F.WIG EA /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminsw", Iop_Min16Sx8 );
+         goto decode_success;
+      }
+      /* VPMINSW r/m, rV, r ::: r = min-signed16s(rV, r/m) */
+      /* VPMINSW = VEX.NDS.256.66.0F.WIG EA /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminsw", Iop_Min16Sx16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEB:
+      /* VPOR r/m, rV, r ::: r = rV | r/m */
+      /* VPOR = VEX.NDS.128.66.0F.WIG EB /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpor", Iop_OrV128 );
+         goto decode_success;
+      }
+      /* VPOR r/m, rV, r ::: r = rV | r/m */
+      /* VPOR = VEX.NDS.256.66.0F.WIG EB /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpor", Iop_OrV256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEC:
+      /* VPADDSB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG EC /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddsb", Iop_QAdd8Sx16 );
+         goto decode_success;
+      }
+      /* VPADDSB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG EC /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddsb", Iop_QAdd8Sx32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xED:
+      /* VPADDSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG ED /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddsw", Iop_QAdd16Sx8 );
+         goto decode_success;
+      }
+      /* VPADDSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG ED /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_V_to_G(
+                    uses_vvvv, vbi, pfx, delta, "vpaddsw", Iop_QAdd16Sx16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEE:
+      /* VPMAXSW r/m, rV, r ::: r = max-signed16s(rV, r/m) */
+      /* VPMAXSW = VEX.NDS.128.66.0F.WIG EE /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxsw", Iop_Max16Sx8 );
+         goto decode_success;
+      }
+      /* VPMAXSW r/m, rV, r ::: r = max-signed16s(rV, r/m) */
+      /* VPMAXSW = VEX.NDS.256.66.0F.WIG EE /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxsw", Iop_Max16Sx16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xEF:
+      /* VPXOR r/m, rV, r ::: r = rV ^ r/m */
+      /* VPXOR = VEX.NDS.128.66.0F.WIG EF /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpxor", Iop_XorV128 );
+         goto decode_success;
+      }
+      /* VPXOR r/m, rV, r ::: r = rV ^ r/m */
+      /* VPXOR = VEX.NDS.256.66.0F.WIG EF /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpxor", Iop_XorV256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF0:
+      /* VLDDQU m256, ymm1 = VEX.256.F2.0F.WIG F0 /r */
+      if (haveF2no66noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp tD    = newTemp(Ity_V256);
+         if (epartIsReg(modrm)) break;
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         assign(tD, loadLE(Ity_V256, mkexpr(addr)));
+         DIP("vlddqu %s,%s\n", dis_buf, nameYMMReg(rD));
+         putYMMReg(rD, mkexpr(tD));
+         goto decode_success;
+      }
+      /* VLDDQU m128, xmm1 = VEX.128.F2.0F.WIG F0 /r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp tD    = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) break;
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         assign(tD, loadLE(Ity_V128, mkexpr(addr)));
+         DIP("vlddqu %s,%s\n", dis_buf, nameXMMReg(rD));
+         putYMMRegLoAndZU(rD, mkexpr(tD));
+         goto decode_success;
+      }
+      break;
+
+   case 0xF1:
+      /* VPSLLW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG F1 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpsllw", Iop_ShlN16x8 );
+         *uses_vvvv = True;
+         goto decode_success;
+                        
+      }
+      /* VPSLLW xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG F1 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpsllw", Iop_ShlN16x16 );
+         *uses_vvvv = True;
+         goto decode_success;
+                        
+      }
+      break;
+
+   case 0xF2:
+      /* VPSLLD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG F2 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpslld", Iop_ShlN32x4 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSLLD xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG F2 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpslld", Iop_ShlN32x8 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xF3:
+      /* VPSLLQ xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG F3 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_shiftV_byE( vbi, pfx, delta,
+                                        "vpsllq", Iop_ShlN64x2 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSLLQ xmm3/m128, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG F3 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_shiftV_byE( vbi, pfx, delta,
+                                        "vpsllq", Iop_ShlN64x4 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xF4:
+      /* VPMULUDQ xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG F4 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpmuludq", math_PMULUDQ_128 );
+         goto decode_success;
+      }
+      /* VPMULUDQ ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG F4 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpmuludq", math_PMULUDQ_256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF5:
+      /* VPMADDWD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG F5 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpmaddwd", math_PMADDWD_128 );
+         goto decode_success;
+      }
+      /* VPMADDWD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG F5 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpmaddwd", math_PMADDWD_256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF6:
+      /* VPSADBW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F.WIG F6 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpsadbw", math_PSADBW_128 );
+         goto decode_success;
+      }
+      /* VPSADBW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F.WIG F6 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpsadbw", math_PSADBW_256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF7:
+      /* VMASKMOVDQU xmm2, xmm1 = VEX.128.66.0F.WIG F7 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && epartIsReg(getUChar(delta))) {
+         delta = dis_MASKMOVDQU( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF8:
+      /* VPSUBB r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBB = VEX.NDS.128.66.0F.WIG F8 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubb", Iop_Sub8x16 );
+         goto decode_success;
+      }
+      /* VPSUBB r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBB = VEX.NDS.256.66.0F.WIG F8 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubb", Iop_Sub8x32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF9:
+      /* VPSUBW r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBW = VEX.NDS.128.66.0F.WIG F9 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubw", Iop_Sub16x8 );
+         goto decode_success;
+      }
+      /* VPSUBW r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBW = VEX.NDS.256.66.0F.WIG F9 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubw", Iop_Sub16x16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFA:
+      /* VPSUBD r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBD = VEX.NDS.128.66.0F.WIG FA /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubd", Iop_Sub32x4 );
+         goto decode_success;
+      }
+      /* VPSUBD r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBD = VEX.NDS.256.66.0F.WIG FA /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubd", Iop_Sub32x8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFB:
+      /* VPSUBQ r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBQ = VEX.NDS.128.66.0F.WIG FB /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubq", Iop_Sub64x2 );
+         goto decode_success;
+      }
+      /* VPSUBQ r/m, rV, r ::: r = rV - r/m */
+      /* VPSUBQ = VEX.NDS.256.66.0F.WIG FB /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpsubq", Iop_Sub64x4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFC:
+      /* VPADDB r/m, rV, r ::: r = rV + r/m */
+      /* VPADDB = VEX.NDS.128.66.0F.WIG FC /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddb", Iop_Add8x16 );
+         goto decode_success;
+      }
+      /* VPADDB r/m, rV, r ::: r = rV + r/m */
+      /* VPADDB = VEX.NDS.256.66.0F.WIG FC /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddb", Iop_Add8x32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFD:
+      /* VPADDW r/m, rV, r ::: r = rV + r/m */
+      /* VPADDW = VEX.NDS.128.66.0F.WIG FD /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddw", Iop_Add16x8 );
+         goto decode_success;
+      }
+      /* VPADDW r/m, rV, r ::: r = rV + r/m */
+      /* VPADDW = VEX.NDS.256.66.0F.WIG FD /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddw", Iop_Add16x16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0xFE:
+      /* VPADDD r/m, rV, r ::: r = rV + r/m */
+      /* VPADDD = VEX.NDS.128.66.0F.WIG FE /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddd", Iop_Add32x4 );
+         goto decode_success;
+      }
+      /* VPADDD r/m, rV, r ::: r = rV + r/m */
+      /* VPADDD = VEX.NDS.256.66.0F.WIG FE /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpaddd", Iop_Add32x8 );
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  //decode_failure:
+   return deltaIN;
+
+  decode_success:
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level post-escape decoders: dis_ESC_0F38__VEX    ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static IRTemp math_PERMILPS_VAR_128 ( IRTemp dataV, IRTemp ctrlV )
+{
+   /* In the control vector, zero out all but the bottom two bits of
+      each 32-bit lane. */
+   IRExpr* cv1 = binop(Iop_ShrN32x4,
+                       binop(Iop_ShlN32x4, mkexpr(ctrlV), mkU8(30)),
+                       mkU8(30));
+   /* And use the resulting cleaned-up control vector as steering
+      in a Perm operation. */
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop(Iop_Perm32x4, mkexpr(dataV), cv1));
+   return res;
+}
+
+static IRTemp math_PERMILPS_VAR_256 ( IRTemp dataV, IRTemp ctrlV )
+{
+   IRTemp dHi, dLo, cHi, cLo;
+   dHi = dLo = cHi = cLo = IRTemp_INVALID;
+   breakupV256toV128s( dataV, &dHi, &dLo );
+   breakupV256toV128s( ctrlV, &cHi, &cLo );
+   IRTemp rHi = math_PERMILPS_VAR_128( dHi, cHi );
+   IRTemp rLo = math_PERMILPS_VAR_128( dLo, cLo );
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256, mkexpr(rHi), mkexpr(rLo)));
+   return res;
+}
+
+static IRTemp math_PERMILPD_VAR_128 ( IRTemp dataV, IRTemp ctrlV )
+{
+   /* No cleverness here .. */
+   IRTemp dHi, dLo, cHi, cLo;
+   dHi = dLo = cHi = cLo = IRTemp_INVALID;
+   breakupV128to64s( dataV, &dHi, &dLo );
+   breakupV128to64s( ctrlV, &cHi, &cLo );
+   IRExpr* rHi
+      = IRExpr_ITE( unop(Iop_64to1,
+                         binop(Iop_Shr64, mkexpr(cHi), mkU8(1))),
+                    mkexpr(dHi), mkexpr(dLo) );
+   IRExpr* rLo
+      = IRExpr_ITE( unop(Iop_64to1,
+                         binop(Iop_Shr64, mkexpr(cLo), mkU8(1))),
+                    mkexpr(dHi), mkexpr(dLo) );
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, binop(Iop_64HLtoV128, rHi, rLo));
+   return res;
+}
+
+static IRTemp math_PERMILPD_VAR_256 ( IRTemp dataV, IRTemp ctrlV )
+{
+   IRTemp dHi, dLo, cHi, cLo;
+   dHi = dLo = cHi = cLo = IRTemp_INVALID;
+   breakupV256toV128s( dataV, &dHi, &dLo );
+   breakupV256toV128s( ctrlV, &cHi, &cLo );
+   IRTemp rHi = math_PERMILPD_VAR_128( dHi, cHi );
+   IRTemp rLo = math_PERMILPD_VAR_128( dLo, cLo );
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_V128HLtoV256, mkexpr(rHi), mkexpr(rLo)));
+   return res;
+}
+
+static IRTemp math_VPERMD ( IRTemp ctrlV, IRTemp dataV )
+{
+   /* In the control vector, zero out all but the bottom three bits of
+      each 32-bit lane. */
+   IRExpr* cv1 = binop(Iop_ShrN32x8,
+                       binop(Iop_ShlN32x8, mkexpr(ctrlV), mkU8(29)),
+                       mkU8(29));
+   /* And use the resulting cleaned-up control vector as steering
+      in a Perm operation. */
+   IRTemp res = newTemp(Ity_V256);
+   assign(res, binop(Iop_Perm32x8, mkexpr(dataV), cv1));
+   return res;
+}
+
+static Long dis_SHIFTX ( /*OUT*/Bool* uses_vvvv,
+                         const VexAbiInfo* vbi, Prefix pfx, Long delta,
+                         const HChar* opname, IROp op8 )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   Int     size = getRexW(pfx) ? 8 : 4;
+   IRType  ty   = szToITy(size);
+   IRTemp  src  = newTemp(ty);
+   IRTemp  amt  = newTemp(ty);
+   UChar   rm   = getUChar(delta);
+
+   assign( amt, getIRegV(size,pfx) );
+   if (epartIsReg(rm)) {
+      assign( src, getIRegE(size,pfx,rm) );
+      DIP("%s %s,%s,%s\n", opname, nameIRegV(size,pfx),
+                           nameIRegE(size,pfx,rm), nameIRegG(size,pfx,rm));
+      delta++;
+   } else {
+      IRTemp addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+      assign( src, loadLE(ty, mkexpr(addr)) );
+      DIP("%s %s,%s,%s\n", opname, nameIRegV(size,pfx), dis_buf,
+                           nameIRegG(size,pfx,rm));
+      delta += alen;
+   }
+
+   putIRegG( size, pfx, rm,
+             binop(mkSizedOp(ty,op8), mkexpr(src),
+                   narrowTo(Ity_I8, binop(mkSizedOp(ty,Iop_And8), mkexpr(amt),
+                                          mkU(ty,8*size-1)))) );
+   /* Flags aren't modified.  */
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+static Long dis_FMA ( const VexAbiInfo* vbi, Prefix pfx, Long delta, UChar opc )
+{
+   UChar  modrm   = getUChar(delta);
+   UInt   rG      = gregOfRexRM(pfx, modrm);
+   UInt   rV      = getVexNvvvv(pfx);
+   Bool   scalar  = (opc & 0xF) > 7 && (opc & 1);
+   IRType ty      = getRexW(pfx) ? Ity_F64 : Ity_F32;
+   IRType vty     = scalar ? ty : getVexL(pfx) ? Ity_V256 : Ity_V128;
+   IRTemp vX      = newTemp(vty);
+   IRTemp vY      = newTemp(vty);
+   IRTemp vZ      = newTemp(vty);
+   IRExpr *x[8], *y[8], *z[8];
+   IRTemp addr    = IRTemp_INVALID;
+   HChar  dis_buf[50];
+   Int    alen    = 0;
+   const HChar *name;
+   const HChar *suffix;
+   const HChar *order;
+   Bool   negateRes   = False;
+   Bool   negateZeven = False;
+   Bool   negateZodd  = False;
+   Int    i, j;
+   Int    count;
+   static IROp ops[] = { Iop_V256to64_0, Iop_V256to64_1,
+                         Iop_V256to64_2, Iop_V256to64_3,
+                         Iop_V128to64, Iop_V128HIto64 };
+
+   switch (opc & 0xF) {
+   case 0x6:
+      name = "addsub";
+      negateZeven = True;
+      break;
+   case 0x7:
+      name = "subadd";
+      negateZodd = True;
+      break;
+   case 0x8:
+   case 0x9:
+      name = "add";
+      break;
+   case 0xA:
+   case 0xB:
+      name = "sub";
+      negateZeven = True;
+      negateZodd = True;
+      break;
+   case 0xC:
+   case 0xD:
+      name = "add";
+      negateRes = True;
+      negateZeven = True;
+      negateZodd = True;
+      break;
+   case 0xE:
+   case 0xF:
+      name = "sub";
+      negateRes = True;
+      break;
+   default:
+      vpanic("dis_FMA(amd64)");
+      break;
+   }
+   switch (opc & 0xF0) {
+   case 0x90: order = "132"; break;
+   case 0xA0: order = "213"; break;
+   case 0xB0: order = "231"; break;
+   default: vpanic("dis_FMA(amd64)"); break;
+   }
+   if (scalar)
+      suffix = ty == Ity_F64 ? "sd" : "ss";
+   else
+      suffix = ty == Ity_F64 ? "pd" : "ps";
+
+   if (scalar) {
+      assign( vX, ty == Ity_F64
+                  ? getXMMRegLane64F(rG, 0) : getXMMRegLane32F(rG, 0) );
+      assign( vZ, ty == Ity_F64
+                  ? getXMMRegLane64F(rV, 0) : getXMMRegLane32F(rV, 0) );
+   } else {
+      assign( vX, vty == Ity_V256 ? getYMMReg(rG) : getXMMReg(rG) );
+      assign( vZ, vty == Ity_V256 ? getYMMReg(rV) : getXMMReg(rV) );
+   }
+
+   if (epartIsReg(modrm)) {
+      UInt rE = eregOfRexRM(pfx, modrm);
+      delta += 1;
+      if (scalar)
+         assign( vY, ty == Ity_F64
+                     ? getXMMRegLane64F(rE, 0) : getXMMRegLane32F(rE, 0) );
+      else
+         assign( vY, vty == Ity_V256 ? getYMMReg(rE) : getXMMReg(rE) );
+      if (vty == Ity_V256) {
+         DIP("vf%sm%s%s%s %s,%s,%s\n", negateRes ? "n" : "",
+             name, order, suffix, nameYMMReg(rE), nameYMMReg(rV),
+             nameYMMReg(rG));
+      } else {
+         DIP("vf%sm%s%s%s %s,%s,%s\n", negateRes ? "n" : "",
+             name, order, suffix, nameXMMReg(rE), nameXMMReg(rV),
+             nameXMMReg(rG));
+      }
+   } else {
+      addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+      delta += alen;
+      assign(vY, loadLE(vty, mkexpr(addr)));
+      if (vty == Ity_V256) {
+         DIP("vf%sm%s%s%s %s,%s,%s\n", negateRes ? "n" : "",
+             name, order, suffix, dis_buf, nameYMMReg(rV),
+             nameYMMReg(rG));
+      } else {
+         DIP("vf%sm%s%s%s %s,%s,%s\n", negateRes ? "n" : "",
+             name, order, suffix, dis_buf, nameXMMReg(rV),
+             nameXMMReg(rG));
+      }
+   }
+
+   /* vX/vY/vZ now in 132 order.  If it is different order, swap the
+      arguments.  */
+   if ((opc & 0xF0) != 0x90) {
+      IRTemp tem = vX;
+      if ((opc & 0xF0) == 0xA0) {
+         vX = vZ;
+         vZ = vY;
+         vY = tem;
+      } else {
+         vX = vZ;
+         vZ = tem;
+      }
+   }
+
+   if (scalar) {
+      count = 1;
+      x[0] = mkexpr(vX);
+      y[0] = mkexpr(vY);
+      z[0] = mkexpr(vZ);
+   } else if (ty == Ity_F32) {
+      count = vty == Ity_V256 ? 8 : 4;
+      j = vty == Ity_V256 ? 0 : 4;
+      for (i = 0; i < count; i += 2) {
+         IRTemp tem = newTemp(Ity_I64);
+         assign(tem, unop(ops[i / 2 + j], mkexpr(vX)));
+         x[i] = unop(Iop_64to32, mkexpr(tem));
+         x[i + 1] = unop(Iop_64HIto32, mkexpr(tem));
+         tem = newTemp(Ity_I64);
+         assign(tem, unop(ops[i / 2 + j], mkexpr(vY)));
+         y[i] = unop(Iop_64to32, mkexpr(tem));
+         y[i + 1] = unop(Iop_64HIto32, mkexpr(tem));
+         tem = newTemp(Ity_I64);
+         assign(tem, unop(ops[i / 2 + j], mkexpr(vZ)));
+         z[i] = unop(Iop_64to32, mkexpr(tem));
+         z[i + 1] = unop(Iop_64HIto32, mkexpr(tem));
+      }
+   } else {
+      count = vty == Ity_V256 ? 4 : 2;
+      j = vty == Ity_V256 ? 0 : 4;
+      for (i = 0; i < count; i++) {
+         x[i] = unop(ops[i + j], mkexpr(vX));
+         y[i] = unop(ops[i + j], mkexpr(vY));
+         z[i] = unop(ops[i + j], mkexpr(vZ));
+      }
+   }
+   if (!scalar)
+      for (i = 0; i < count; i++) {
+         IROp op = ty == Ity_F64
+                   ? Iop_ReinterpI64asF64 : Iop_ReinterpI32asF32;
+         x[i] = unop(op, x[i]);
+         y[i] = unop(op, y[i]);
+         z[i] = unop(op, z[i]);
+      }
+   for (i = 0; i < count; i++) {
+      if ((i & 1) ? negateZodd : negateZeven)
+         z[i] = unop(ty == Ity_F64 ? Iop_NegF64 : Iop_NegF32, z[i]);
+      x[i] = IRExpr_Qop(ty == Ity_F64 ? Iop_MAddF64 : Iop_MAddF32,
+                        get_FAKE_roundingmode(), x[i], y[i], z[i]);
+      if (negateRes)
+         x[i] = unop(ty == Ity_F64 ? Iop_NegF64 : Iop_NegF32, x[i]);
+      if (ty == Ity_F64)
+         putYMMRegLane64F( rG, i, x[i] );
+      else
+         putYMMRegLane32F( rG, i, x[i] );
+   }
+   if (vty != Ity_V256)
+      putYMMRegLane128( rG, 1, mkV128(0) );
+
+   return delta;
+}
+
+
+/* Masked load or masked store. */
+static ULong dis_VMASKMOV ( Bool *uses_vvvv, const VexAbiInfo* vbi,
+                            Prefix pfx, Long delta,
+                            const HChar* opname, Bool isYMM, IRType ty,
+                            Bool isLoad )
+{
+   HChar   dis_buf[50];
+   Int     alen, i;
+   IRTemp  addr;
+   UChar   modrm = getUChar(delta);
+   UInt    rG    = gregOfRexRM(pfx,modrm);
+   UInt    rV    = getVexNvvvv(pfx);
+
+   addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+   delta += alen;
+
+   /**/ if (isLoad && isYMM) {
+      DIP("%s %s,%s,%s\n", opname, dis_buf, nameYMMReg(rV), nameYMMReg(rG) );
+   }
+   else if (isLoad && !isYMM) {
+      DIP("%s %s,%s,%s\n", opname, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+   }
+
+   else if (!isLoad && isYMM) {
+      DIP("%s %s,%s,%s\n", opname, nameYMMReg(rG), nameYMMReg(rV), dis_buf );
+   }
+   else {
+      vassert(!isLoad && !isYMM);
+      DIP("%s %s,%s,%s\n", opname, nameXMMReg(rG), nameXMMReg(rV), dis_buf );
+   }
+
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   Bool laneIs32 = ty == Ity_I32;
+
+   Int nLanes = (isYMM ? 2 : 1) * (laneIs32 ? 4 : 2);
+
+   for (i = 0; i < nLanes; i++) {
+      IRExpr* shAmt = laneIs32 ? mkU8(31)    : mkU8(63);
+      IRExpr* one   = laneIs32 ? mkU32(1)    : mkU64(1);
+      IROp    opSHR = laneIs32 ? Iop_Shr32   : Iop_Shr64;
+      IROp    opEQ  = laneIs32 ? Iop_CmpEQ32 : Iop_CmpEQ64;
+      IRExpr* lane  = (laneIs32 ? getYMMRegLane32 : getYMMRegLane64)( rV, i );
+
+      IRTemp  cond = newTemp(Ity_I1);
+      assign(cond, binop(opEQ, binop(opSHR, lane, shAmt), one));
+
+      IRTemp  data = newTemp(ty);
+      IRExpr* ea   = binop(Iop_Add64, mkexpr(addr),
+                                      mkU64(i * (laneIs32 ? 4 : 8)));
+      if (isLoad) {
+         stmt(
+            IRStmt_LoadG(
+               Iend_LE, laneIs32 ? ILGop_Ident32 : ILGop_Ident64,
+               data, ea, laneIs32 ? mkU32(0) : mkU64(0), mkexpr(cond)
+         ));
+         (laneIs32 ? putYMMRegLane32 : putYMMRegLane64)( rG, i, mkexpr(data) );
+      } else {
+         assign(data, (laneIs32 ? getYMMRegLane32 : getYMMRegLane64)( rG, i ));
+         stmt( IRStmt_StoreG(Iend_LE, ea, mkexpr(data), mkexpr(cond)) );
+      }
+   }
+
+   if (isLoad && !isYMM)
+      putYMMRegLane128( rG, 1, mkV128(0) );
+
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+/* Gather.  */
+static ULong dis_VGATHER ( Bool *uses_vvvv, const VexAbiInfo* vbi,
+                           Prefix pfx, Long delta,
+                           const HChar* opname, Bool isYMM,
+                           Bool isVM64x, IRType ty )
+{
+   HChar  dis_buf[50];
+   Int    alen, i, vscale, count1, count2;
+   IRTemp addr;
+   UChar  modrm = getUChar(delta);
+   UInt   rG    = gregOfRexRM(pfx,modrm);
+   UInt   rV    = getVexNvvvv(pfx);
+   UInt   rI;
+   IRType dstTy = (isYMM && (ty == Ity_I64 || !isVM64x)) ? Ity_V256 : Ity_V128;
+   IRType idxTy = (isYMM && (ty == Ity_I32 || isVM64x)) ? Ity_V256 : Ity_V128;
+   IRTemp cond;
+   addr = disAVSIBMode ( &alen, vbi, pfx, delta, dis_buf, &rI,
+                         idxTy, &vscale );
+   if (addr == IRTemp_INVALID || rI == rG || rI == rV || rG == rV)
+      return delta;
+   if (dstTy == Ity_V256) {
+      DIP("%s %s,%s,%s\n", opname, nameYMMReg(rV), dis_buf, nameYMMReg(rG) );
+   } else {
+      DIP("%s %s,%s,%s\n", opname, nameXMMReg(rV), dis_buf, nameXMMReg(rG) );
+   }
+   delta += alen;
+
+   if (ty == Ity_I32) {
+      count1 = isYMM ? 8 : 4;
+      count2 = isVM64x ? count1 / 2 : count1;
+   } else {
+      count1 = count2 = isYMM ? 4 : 2;
+   }
+
+   /* First update the mask register to copies of the sign bit.  */
+   if (ty == Ity_I32) {
+      if (isYMM)
+         putYMMReg( rV, binop(Iop_SarN32x8, getYMMReg( rV ), mkU8(31)) );
+      else
+         putYMMRegLoAndZU( rV, binop(Iop_SarN32x4, getXMMReg( rV ), mkU8(31)) );
+   } else {
+      for (i = 0; i < count1; i++) {
+         putYMMRegLane64( rV, i, binop(Iop_Sar64, getYMMRegLane64( rV, i ),
+                                       mkU8(63)) );
+      }
+   }
+
+   /* Next gather the individual elements.  If any fault occurs, the
+      corresponding mask element will be set and the loop stops.  */
+   for (i = 0; i < count2; i++) {
+      IRExpr *expr, *addr_expr;
+      cond = newTemp(Ity_I1);
+      assign( cond, 
+              binop(ty == Ity_I32 ? Iop_CmpLT32S : Iop_CmpLT64S,
+                    ty == Ity_I32 ? getYMMRegLane32( rV, i )
+                                  : getYMMRegLane64( rV, i ),
+                    mkU(ty, 0)) );
+      expr = ty == Ity_I32 ? getYMMRegLane32( rG, i )
+                           : getYMMRegLane64( rG, i );
+      addr_expr = isVM64x ? getYMMRegLane64( rI, i )
+                          : unop(Iop_32Sto64, getYMMRegLane32( rI, i ));
+      switch (vscale) {
+         case 2: addr_expr = binop(Iop_Shl64, addr_expr, mkU8(1)); break;
+         case 4: addr_expr = binop(Iop_Shl64, addr_expr, mkU8(2)); break;
+         case 8: addr_expr = binop(Iop_Shl64, addr_expr, mkU8(3)); break;
+         default: break;
+      }
+      addr_expr = binop(Iop_Add64, mkexpr(addr), addr_expr);
+      addr_expr = handleAddrOverrides(vbi, pfx, addr_expr);
+      addr_expr = IRExpr_ITE(mkexpr(cond), addr_expr, getIReg64(R_RSP));
+      expr = IRExpr_ITE(mkexpr(cond), loadLE(ty, addr_expr), expr);
+      if (ty == Ity_I32) {
+         putYMMRegLane32( rG, i, expr );
+         putYMMRegLane32( rV, i, mkU32(0) );
+      } else {
+         putYMMRegLane64( rG, i, expr);
+         putYMMRegLane64( rV, i, mkU64(0) );
+      }
+   }
+
+   if (!isYMM || (ty == Ity_I32 && isVM64x)) {
+      if (ty == Ity_I64 || isYMM)
+         putYMMRegLane128( rV, 1, mkV128(0) );
+      else if (ty == Ity_I32 && count2 == 2) {
+         putYMMRegLane64( rV, 1, mkU64(0) );
+         putYMMRegLane64( rG, 1, mkU64(0) );
+      }
+      putYMMRegLane128( rG, 1, mkV128(0) );
+   }
+
+   *uses_vvvv = True;
+   return delta;
+}
+
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F38__VEX (
+        /*MB_OUT*/DisResult* dres,
+        /*OUT*/   Bool*      uses_vvvv,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  vbi,
+        Prefix pfx, Int sz, Long deltaIN 
+     )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   *uses_vvvv = False;
+
+   switch (opc) {
+
+   case 0x00:
+      /* VPSHUFB r/m, rV, r ::: r = shuf(rV, r/m) */
+      /* VPSHUFB = VEX.NDS.128.66.0F38.WIG 00 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpshufb", math_PSHUFB_XMM );
+         goto decode_success;
+      }
+      /* VPSHUFB r/m, rV, r ::: r = shuf(rV, r/m) */
+      /* VPSHUFB = VEX.NDS.256.66.0F38.WIG 00 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpshufb", math_PSHUFB_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x01:
+   case 0x02:
+   case 0x03:
+      /* VPHADDW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 01 /r */
+      /* VPHADDD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 02 /r */
+      /* VPHADDSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 03 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PHADD_128( vbi, pfx, delta, True/*isAvx*/, opc );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPHADDW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 01 /r */
+      /* VPHADDD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 02 /r */
+      /* VPHADDSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 03 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PHADD_256( vbi, pfx, delta, opc );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x04:
+      /* VPMADDUBSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 04 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpmaddubsw",
+                    math_PMADDUBSW_128 );
+         goto decode_success;
+      }
+      /* VPMADDUBSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 04 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpmaddubsw",
+                    math_PMADDUBSW_256 );
+         goto decode_success;
+      }
+      break;
+      
+   case 0x05:
+   case 0x06:
+   case 0x07:
+      /* VPHSUBW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 05 /r */
+      /* VPHSUBD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 06 /r */
+      /* VPHSUBSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 07 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PHADD_128( vbi, pfx, delta, True/*isAvx*/, opc );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPHSUBW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 05 /r */
+      /* VPHSUBD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 06 /r */
+      /* VPHSUBSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 07 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PHADD_256( vbi, pfx, delta, opc );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x08:
+   case 0x09:
+   case 0x0A:
+      /* VPSIGNB xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 08 /r */
+      /* VPSIGNW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 09 /r */
+      /* VPSIGND xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 0A /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         IRTemp sV      = newTemp(Ity_V128);
+         IRTemp dV      = newTemp(Ity_V128);
+         IRTemp sHi, sLo, dHi, dLo;
+         sHi = sLo = dHi = dLo = IRTemp_INVALID;
+         HChar  ch      = '?';
+         Int    laneszB = 0;
+         UChar  modrm   = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx,modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+
+         switch (opc) {
+            case 0x08: laneszB = 1; ch = 'b'; break;
+            case 0x09: laneszB = 2; ch = 'w'; break;
+            case 0x0A: laneszB = 4; ch = 'd'; break;
+            default: vassert(0);
+         }
+
+         assign( dV, getXMMReg(rV) );
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("vpsign%c %s,%s,%s\n", ch, nameXMMReg(rE),
+                nameXMMReg(rV), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("vpsign%c %s,%s,%s\n", ch, dis_buf,
+                nameXMMReg(rV), nameXMMReg(rG));
+         }
+
+         breakupV128to64s( dV, &dHi, &dLo );
+         breakupV128to64s( sV, &sHi, &sLo );
+
+         putYMMRegLoAndZU(
+            rG,
+            binop(Iop_64HLtoV128,
+                  dis_PSIGN_helper( mkexpr(sHi), mkexpr(dHi), laneszB ),
+                  dis_PSIGN_helper( mkexpr(sLo), mkexpr(dLo), laneszB )
+            )
+         );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSIGNB ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 08 /r */
+      /* VPSIGNW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 09 /r */
+      /* VPSIGND ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 0A /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         IRTemp sV      = newTemp(Ity_V256);
+         IRTemp dV      = newTemp(Ity_V256);
+         IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+         s3 = s2 = s1 = s0 = IRTemp_INVALID;
+         d3 = d2 = d1 = d0 = IRTemp_INVALID;
+         UChar  ch      = '?';
+         Int    laneszB = 0;
+         UChar  modrm   = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx,modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+
+         switch (opc) {
+            case 0x08: laneszB = 1; ch = 'b'; break;
+            case 0x09: laneszB = 2; ch = 'w'; break;
+            case 0x0A: laneszB = 4; ch = 'd'; break;
+            default: vassert(0);
+         }
+
+         assign( dV, getYMMReg(rV) );
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getYMMReg(rE) );
+            delta += 1;
+            DIP("vpsign%c %s,%s,%s\n", ch, nameYMMReg(rE),
+                nameYMMReg(rV), nameYMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+            delta += alen;
+            DIP("vpsign%c %s,%s,%s\n", ch, dis_buf,
+                nameYMMReg(rV), nameYMMReg(rG));
+         }
+
+         breakupV256to64s( dV, &d3, &d2, &d1, &d0 );
+         breakupV256to64s( sV, &s3, &s2, &s1, &s0 );
+
+         putYMMReg(
+            rG,
+            binop( Iop_V128HLtoV256,
+                   binop(Iop_64HLtoV128,
+                         dis_PSIGN_helper( mkexpr(s3), mkexpr(d3), laneszB ),
+                         dis_PSIGN_helper( mkexpr(s2), mkexpr(d2), laneszB )
+                   ),
+                   binop(Iop_64HLtoV128,
+                         dis_PSIGN_helper( mkexpr(s1), mkexpr(d1), laneszB ),
+                         dis_PSIGN_helper( mkexpr(s0), mkexpr(d0), laneszB )
+                   )
+            )
+         );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0B:
+      /* VPMULHRSW xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 0B /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         IRTemp sV      = newTemp(Ity_V128);
+         IRTemp dV      = newTemp(Ity_V128);
+         IRTemp sHi, sLo, dHi, dLo;
+         sHi = sLo = dHi = dLo = IRTemp_INVALID;
+         UChar  modrm   = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx,modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+
+         assign( dV, getXMMReg(rV) );
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getXMMReg(rE) );
+            delta += 1;
+            DIP("vpmulhrsw %s,%s,%s\n", nameXMMReg(rE),
+                nameXMMReg(rV), nameXMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            delta += alen;
+            DIP("vpmulhrsw %s,%s,%s\n", dis_buf,
+                nameXMMReg(rV), nameXMMReg(rG));
+         }
+
+         breakupV128to64s( dV, &dHi, &dLo );
+         breakupV128to64s( sV, &sHi, &sLo );
+
+         putYMMRegLoAndZU(
+            rG,
+            binop(Iop_64HLtoV128,
+                  dis_PMULHRSW_helper( mkexpr(sHi), mkexpr(dHi) ),
+                  dis_PMULHRSW_helper( mkexpr(sLo), mkexpr(dLo) )
+            )
+         );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPMULHRSW ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 0B /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         IRTemp sV      = newTemp(Ity_V256);
+         IRTemp dV      = newTemp(Ity_V256);
+         IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+         s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+         UChar  modrm   = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx,modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+
+         assign( dV, getYMMReg(rV) );
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            assign( sV, getYMMReg(rE) );
+            delta += 1;
+            DIP("vpmulhrsw %s,%s,%s\n", nameYMMReg(rE),
+                nameYMMReg(rV), nameYMMReg(rG));
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+            delta += alen;
+            DIP("vpmulhrsw %s,%s,%s\n", dis_buf,
+                nameYMMReg(rV), nameYMMReg(rG));
+         }
+
+         breakupV256to64s( dV, &d3, &d2, &d1, &d0 );
+         breakupV256to64s( sV, &s3, &s2, &s1, &s0 );
+
+         putYMMReg(
+            rG,
+            binop(Iop_V128HLtoV256,
+                  binop(Iop_64HLtoV128,
+                        dis_PMULHRSW_helper( mkexpr(s3), mkexpr(d3) ),
+                        dis_PMULHRSW_helper( mkexpr(s2), mkexpr(d2) ) ),
+                  binop(Iop_64HLtoV128,
+                        dis_PMULHRSW_helper( mkexpr(s1), mkexpr(d1) ),
+                        dis_PMULHRSW_helper( mkexpr(s0), mkexpr(d0) ) )
+            )
+         );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0C:
+      /* VPERMILPS xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 0C /r */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp ctrlV = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            DIP("vpermilps %s,%s,%s\n",
+                nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+            assign(ctrlV, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpermilps %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+            assign(ctrlV, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         IRTemp dataV = newTemp(Ity_V128);
+         assign(dataV, getXMMReg(rV));
+         IRTemp resV = math_PERMILPS_VAR_128(dataV, ctrlV);
+         putYMMRegLoAndZU(rG, mkexpr(resV));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPERMILPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 0C /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp ctrlV = newTemp(Ity_V256);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            DIP("vpermilps %s,%s,%s\n",
+                nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(ctrlV, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpermilps %s,%s,%s\n",
+                dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(ctrlV, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         IRTemp dataV = newTemp(Ity_V256);
+         assign(dataV, getYMMReg(rV));
+         IRTemp resV = math_PERMILPS_VAR_256(dataV, ctrlV);
+         putYMMReg(rG, mkexpr(resV));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0D:
+      /* VPERMILPD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 0D /r */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp ctrlV = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            DIP("vpermilpd %s,%s,%s\n",
+                nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+            assign(ctrlV, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpermilpd %s,%s,%s\n",
+                dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+            assign(ctrlV, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         IRTemp dataV = newTemp(Ity_V128);
+         assign(dataV, getXMMReg(rV));
+         IRTemp resV = math_PERMILPD_VAR_128(dataV, ctrlV);
+         putYMMRegLoAndZU(rG, mkexpr(resV));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPERMILPD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 0D /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp ctrlV = newTemp(Ity_V256);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            DIP("vpermilpd %s,%s,%s\n",
+                nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(ctrlV, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpermilpd %s,%s,%s\n",
+                dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(ctrlV, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         IRTemp dataV = newTemp(Ity_V256);
+         assign(dataV, getYMMReg(rV));
+         IRTemp resV = math_PERMILPD_VAR_256(dataV, ctrlV);
+         putYMMReg(rG, mkexpr(resV));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0E:
+      /* VTESTPS xmm2/m128, xmm1 = VEX.128.66.0F38.WIG 0E /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_xTESTy_128( vbi, pfx, delta, True/*isAvx*/, 32 );
+         goto decode_success;
+      }
+      /* VTESTPS ymm2/m256, ymm1 = VEX.256.66.0F38.WIG 0E /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_xTESTy_256( vbi, pfx, delta, 32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x0F:
+      /* VTESTPD xmm2/m128, xmm1 = VEX.128.66.0F38.WIG 0F /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_xTESTy_128( vbi, pfx, delta, True/*isAvx*/, 64 );
+         goto decode_success;
+      }
+      /* VTESTPD ymm2/m256, ymm1 = VEX.256.66.0F38.WIG 0F /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_xTESTy_256( vbi, pfx, delta, 64 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x16:
+      /* VPERMPS ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 16 /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpermps", math_VPERMD );
+         goto decode_success;
+      }
+      break;
+
+   case 0x17:
+      /* VPTEST xmm2/m128, xmm1 = VEX.128.66.0F38.WIG 17 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_xTESTy_128( vbi, pfx, delta, True/*isAvx*/, 0 );
+         goto decode_success;
+      }
+      /* VPTEST ymm2/m256, ymm1 = VEX.256.66.0F38.WIG 17 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_xTESTy_256( vbi, pfx, delta, 0 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x18:
+      /* VBROADCASTSS m32, xmm1 = VEX.128.66.0F38.WIG 18 /r */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/
+          && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         DIP("vbroadcastss %s,%s\n", dis_buf, nameXMMReg(rG));
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, loadLE(Ity_I32, mkexpr(addr)));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = binop(Iop_64HLtoV128, mkexpr(t64), mkexpr(t64));
+         putYMMRegLoAndZU(rG, res);
+         goto decode_success;
+      }
+      /* VBROADCASTSS m32, ymm1 = VEX.256.66.0F38.WIG 18 /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         DIP("vbroadcastss %s,%s\n", dis_buf, nameYMMReg(rG));
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, loadLE(Ity_I32, mkexpr(addr)));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         goto decode_success;
+      }
+      /* VBROADCASTSS xmm2, xmm1 = VEX.128.66.0F38.WIG 18 /r */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/
+          && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         DIP("vbroadcastss %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, getXMMRegLane32(rE, 0));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = binop(Iop_64HLtoV128, mkexpr(t64), mkexpr(t64));
+         putYMMRegLoAndZU(rG, res);
+         delta++;
+         goto decode_success;
+      }
+      /* VBROADCASTSS xmm2, ymm1 = VEX.256.66.0F38.WIG 18 /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         DIP("vbroadcastss %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, getXMMRegLane32(rE, 0));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         delta++;
+         goto decode_success;
+      }
+      break;
+
+   case 0x19:
+      /* VBROADCASTSD m64, ymm1 = VEX.256.66.0F38.WIG 19 /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         DIP("vbroadcastsd %s,%s\n", dis_buf, nameYMMReg(rG));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, loadLE(Ity_I64, mkexpr(addr)));
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         goto decode_success;
+      }
+      /* VBROADCASTSD xmm2, ymm1 = VEX.256.66.0F38.WIG 19 /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         UInt  rE    = eregOfRexRM(pfx, modrm);
+         DIP("vbroadcastsd %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, getXMMRegLane64(rE, 0));
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         delta++;
+         goto decode_success;
+      }
+      break;
+
+   case 0x1A:
+      /* VBROADCASTF128 m128, ymm1 = VEX.256.66.0F38.WIG 1A /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         DIP("vbroadcastf128 %s,%s\n", dis_buf, nameYMMReg(rG));
+         IRTemp t128 = newTemp(Ity_V128);
+         assign(t128, loadLE(Ity_V128, mkexpr(addr)));
+         putYMMReg( rG, binop(Iop_V128HLtoV256, mkexpr(t128), mkexpr(t128)) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x1C:
+      /* VPABSB xmm2/m128, xmm1 = VEX.128.66.0F38.WIG 1C /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_to_G_unary(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpabsb", math_PABS_XMM_pap1 );
+         goto decode_success;
+      }
+      /* VPABSB ymm2/m256, ymm1 = VEX.256.66.0F38.WIG 1C /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_to_G_unary(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpabsb", math_PABS_YMM_pap1 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x1D:
+      /* VPABSW xmm2/m128, xmm1 = VEX.128.66.0F38.WIG 1D /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_to_G_unary(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpabsw", math_PABS_XMM_pap2 );
+         goto decode_success;
+      }
+      /* VPABSW ymm2/m256, ymm1 = VEX.256.66.0F38.WIG 1D /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_to_G_unary(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpabsw", math_PABS_YMM_pap2 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x1E:
+      /* VPABSD xmm2/m128, xmm1 = VEX.128.66.0F38.WIG 1E /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AVX128_E_to_G_unary(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpabsd", math_PABS_XMM_pap4 );
+         goto decode_success;
+      }
+      /* VPABSD ymm2/m256, ymm1 = VEX.256.66.0F38.WIG 1E /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_AVX256_E_to_G_unary(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpabsd", math_PABS_YMM_pap4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x20:
+      /* VPMOVSXBW xmm2/m64, xmm1 */
+      /* VPMOVSXBW = VEX.128.66.0F38.WIG 20 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXBW_128( vbi, pfx, delta,
+                                   True/*isAvx*/, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVSXBW xmm2/m128, ymm1 */
+      /* VPMOVSXBW = VEX.256.66.0F38.WIG 20 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXBW_256( vbi, pfx, delta, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x21:
+      /* VPMOVSXBD xmm2/m32, xmm1 */
+      /* VPMOVSXBD = VEX.128.66.0F38.WIG 21 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXBD_128( vbi, pfx, delta,
+                                   True/*isAvx*/, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVSXBD xmm2/m64, ymm1 */
+      /* VPMOVSXBD = VEX.256.66.0F38.WIG 21 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXBD_256( vbi, pfx, delta, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x22:
+      /* VPMOVSXBQ xmm2/m16, xmm1 */
+      /* VPMOVSXBQ = VEX.128.66.0F38.WIG 22 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVSXBQ_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VPMOVSXBQ xmm2/m32, ymm1 */
+      /* VPMOVSXBQ = VEX.256.66.0F38.WIG 22 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVSXBQ_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      break;
+
+   case 0x23:
+      /* VPMOVSXWD xmm2/m64, xmm1 = VEX.128.66.0F38.WIG 23 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXWD_128( vbi, pfx, delta,
+                                   True/*isAvx*/, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVSXWD xmm2/m128, ymm1 = VEX.256.66.0F38.WIG 23 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXWD_256( vbi, pfx, delta, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x24:
+      /* VPMOVSXWQ xmm2/m32, xmm1 = VEX.128.66.0F38.WIG 24 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVSXWQ_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VPMOVSXWQ xmm2/m64, ymm1 = VEX.256.66.0F38.WIG 24 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVSXWQ_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      break;
+
+   case 0x25:
+      /* VPMOVSXDQ xmm2/m64, xmm1 = VEX.128.66.0F38.WIG 25 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXDQ_128( vbi, pfx, delta,
+                                   True/*isAvx*/, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVSXDQ xmm2/m128, ymm1 = VEX.256.66.0F38.WIG 25 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXDQ_256( vbi, pfx, delta, False/*!xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x28:
+      /* VPMULDQ xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.WIG 28 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpmuldq", math_PMULDQ_128 );
+         goto decode_success;
+      }
+      /* VPMULDQ ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.WIG 28 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta,
+                    "vpmuldq", math_PMULDQ_256 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x29:
+      /* VPCMPEQQ r/m, rV, r ::: r = rV `eq-by-64s` r/m */
+      /* VPCMPEQQ = VEX.NDS.128.66.0F38.WIG 29 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqq", Iop_CmpEQ64x2 );
+         goto decode_success;
+      }
+      /* VPCMPEQQ r/m, rV, r ::: r = rV `eq-by-64s` r/m */
+      /* VPCMPEQQ = VEX.NDS.256.66.0F38.WIG 29 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpeqq", Iop_CmpEQ64x4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x2A:
+      /* VMOVNTDQA m128, xmm1 = VEX.128.66.0F38.WIG 2A /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && !epartIsReg(getUChar(delta))) {
+         UChar  modrm = getUChar(delta);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp tD    = newTemp(Ity_V128);
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         gen_SEGV_if_not_16_aligned(addr);
+         assign(tD, loadLE(Ity_V128, mkexpr(addr)));
+         DIP("vmovntdqa %s,%s\n", dis_buf, nameXMMReg(rD));
+         putYMMRegLoAndZU(rD, mkexpr(tD));
+         goto decode_success;
+      }
+      /* VMOVNTDQA m256, ymm1 = VEX.256.66.0F38.WIG 2A /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && !epartIsReg(getUChar(delta))) {
+         UChar  modrm = getUChar(delta);
+         UInt   rD    = gregOfRexRM(pfx, modrm);
+         IRTemp tD    = newTemp(Ity_V256);
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         gen_SEGV_if_not_32_aligned(addr);
+         assign(tD, loadLE(Ity_V256, mkexpr(addr)));
+         DIP("vmovntdqa %s,%s\n", dis_buf, nameYMMReg(rD));
+         putYMMReg(rD, mkexpr(tD));
+         goto decode_success;
+      }
+      break;
+
+   case 0x2B:
+      /* VPACKUSDW r/m, rV, r ::: r = QNarrowBin32Sto16Ux8(rV, r/m) */
+      /* VPACKUSDW = VEX.NDS.128.66.0F38.WIG 2B /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG(
+                    uses_vvvv, vbi, pfx, delta, "vpackusdw",
+                    Iop_QNarrowBin32Sto16Ux8, NULL,
+                    False/*!invertLeftArg*/, True/*swapArgs*/ );
+         goto decode_success;
+      }
+      /* VPACKUSDW r/m, rV, r ::: r = QNarrowBin32Sto16Ux8(rV, r/m) */
+      /* VPACKUSDW = VEX.NDS.256.66.0F38.WIG 2B /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpackusdw",
+                    math_VPACKUSDW_YMM );
+         goto decode_success;
+      }
+      break;
+
+   case 0x2C:
+      /* VMASKMOVPS m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 2C /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovps",
+                               /*!isYMM*/False, Ity_I32, /*isLoad*/True );
+         goto decode_success;
+      }
+      /* VMASKMOVPS m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 2C /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovps",
+                               /*isYMM*/True, Ity_I32, /*isLoad*/True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x2D:
+      /* VMASKMOVPD m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 2D /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovpd",
+                               /*!isYMM*/False, Ity_I64, /*isLoad*/True );
+         goto decode_success;
+      }
+      /* VMASKMOVPD m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 2D /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovpd",
+                               /*isYMM*/True, Ity_I64, /*isLoad*/True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x2E:
+      /* VMASKMOVPS xmm1, xmm2, m128 = VEX.NDS.128.66.0F38.W0 2E /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovps",
+                               /*!isYMM*/False, Ity_I32, /*!isLoad*/False );
+         goto decode_success;
+      }
+      /* VMASKMOVPS ymm1, ymm2, m256 = VEX.NDS.256.66.0F38.W0 2E /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovps",
+                               /*isYMM*/True, Ity_I32, /*!isLoad*/False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x2F:
+      /* VMASKMOVPD xmm1, xmm2, m128 = VEX.NDS.128.66.0F38.W0 2F /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovpd",
+                               /*!isYMM*/False, Ity_I64, /*!isLoad*/False );
+         goto decode_success;
+      }
+      /* VMASKMOVPD ymm1, ymm2, m256 = VEX.NDS.256.66.0F38.W0 2F /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/
+          && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vmaskmovpd",
+                               /*isYMM*/True, Ity_I64, /*!isLoad*/False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x30:
+      /* VPMOVZXBW xmm2/m64, xmm1 */
+      /* VPMOVZXBW = VEX.128.66.0F38.WIG 30 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXBW_128( vbi, pfx, delta,
+                                   True/*isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVZXBW xmm2/m128, ymm1 */
+      /* VPMOVZXBW = VEX.256.66.0F38.WIG 30 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXBW_256( vbi, pfx, delta, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x31:
+      /* VPMOVZXBD xmm2/m32, xmm1 */
+      /* VPMOVZXBD = VEX.128.66.0F38.WIG 31 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXBD_128( vbi, pfx, delta,
+                                   True/*isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVZXBD xmm2/m64, ymm1 */
+      /* VPMOVZXBD = VEX.256.66.0F38.WIG 31 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXBD_256( vbi, pfx, delta, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x32:
+      /* VPMOVZXBQ xmm2/m16, xmm1 */
+      /* VPMOVZXBQ = VEX.128.66.0F38.WIG 32 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVZXBQ_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VPMOVZXBQ xmm2/m32, ymm1 */
+      /* VPMOVZXBQ = VEX.256.66.0F38.WIG 32 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVZXBQ_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      break;
+
+   case 0x33:
+      /* VPMOVZXWD xmm2/m64, xmm1 */
+      /* VPMOVZXWD = VEX.128.66.0F38.WIG 33 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXWD_128( vbi, pfx, delta,
+                                   True/*isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVZXWD xmm2/m128, ymm1 */
+      /* VPMOVZXWD = VEX.256.66.0F38.WIG 33 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXWD_256( vbi, pfx, delta, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x34:
+      /* VPMOVZXWQ xmm2/m32, xmm1 = VEX.128.66.0F38.WIG 34 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVZXWQ_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VPMOVZXWQ xmm2/m64, ymm1 = VEX.256.66.0F38.WIG 34 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVZXWQ_256( vbi, pfx, delta );
+         goto decode_success;
+      }
+      break;
+
+   case 0x35:
+      /* VPMOVZXDQ xmm2/m64, xmm1 = VEX.128.66.0F38.WIG 35 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PMOVxXDQ_128( vbi, pfx, delta,
+                                   True/*isAvx*/, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      /* VPMOVZXDQ xmm2/m128, ymm1 = VEX.256.66.0F38.WIG 35 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_PMOVxXDQ_256( vbi, pfx, delta, True/*xIsZ*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x36:
+      /* VPERMD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 36 /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_complex(
+                    uses_vvvv, vbi, pfx, delta, "vpermd", math_VPERMD );
+         goto decode_success;
+      }
+      break;
+
+   case 0x37:
+      /* VPCMPGTQ r/m, rV, r ::: r = rV `>s-by-64s` r/m */
+      /* VPCMPGTQ = VEX.NDS.128.66.0F38.WIG 37 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtq", Iop_CmpGT64Sx2 );
+         goto decode_success;
+      }
+      /* VPCMPGTQ r/m, rV, r ::: r = rV `>s-by-64s` r/m */
+      /* VPCMPGTQ = VEX.NDS.256.66.0F38.WIG 37 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpcmpgtq", Iop_CmpGT64Sx4 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x38:
+      /* VPMINSB r/m, rV, r ::: r = min-signed-8s(rV, r/m) */
+      /* VPMINSB = VEX.NDS.128.66.0F38.WIG 38 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminsb", Iop_Min8Sx16 );
+         goto decode_success;
+      }
+      /* VPMINSB r/m, rV, r ::: r = min-signed-8s(rV, r/m) */
+      /* VPMINSB = VEX.NDS.256.66.0F38.WIG 38 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminsb", Iop_Min8Sx32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x39:
+      /* VPMINSD r/m, rV, r ::: r = min-signed-32s(rV, r/m) */
+      /* VPMINSD = VEX.NDS.128.66.0F38.WIG 39 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminsd", Iop_Min32Sx4 );
+         goto decode_success;
+      }
+      /* VPMINSD r/m, rV, r ::: r = min-signed-32s(rV, r/m) */
+      /* VPMINSD = VEX.NDS.256.66.0F38.WIG 39 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminsd", Iop_Min32Sx8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3A:
+      /* VPMINUW r/m, rV, r ::: r = min-unsigned-16s(rV, r/m) */
+      /* VPMINUW = VEX.NDS.128.66.0F38.WIG 3A /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminuw", Iop_Min16Ux8 );
+         goto decode_success;
+      }
+      /* VPMINUW r/m, rV, r ::: r = min-unsigned-16s(rV, r/m) */
+      /* VPMINUW = VEX.NDS.256.66.0F38.WIG 3A /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminuw", Iop_Min16Ux16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3B:
+      /* VPMINUD r/m, rV, r ::: r = min-unsigned-32s(rV, r/m) */
+      /* VPMINUD = VEX.NDS.128.66.0F38.WIG 3B /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminud", Iop_Min32Ux4 );
+         goto decode_success;
+      }
+      /* VPMINUD r/m, rV, r ::: r = min-unsigned-32s(rV, r/m) */
+      /* VPMINUD = VEX.NDS.256.66.0F38.WIG 3B /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpminud", Iop_Min32Ux8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3C:
+      /* VPMAXSB r/m, rV, r ::: r = max-signed-8s(rV, r/m) */
+      /* VPMAXSB = VEX.NDS.128.66.0F38.WIG 3C /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxsb", Iop_Max8Sx16 );
+         goto decode_success;
+      }
+      /* VPMAXSB r/m, rV, r ::: r = max-signed-8s(rV, r/m) */
+      /* VPMAXSB = VEX.NDS.256.66.0F38.WIG 3C /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxsb", Iop_Max8Sx32 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3D:
+      /* VPMAXSD r/m, rV, r ::: r = max-signed-32s(rV, r/m) */
+      /* VPMAXSD = VEX.NDS.128.66.0F38.WIG 3D /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxsd", Iop_Max32Sx4 );
+         goto decode_success;
+      }
+      /* VPMAXSD r/m, rV, r ::: r = max-signed-32s(rV, r/m) */
+      /* VPMAXSD = VEX.NDS.256.66.0F38.WIG 3D /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxsd", Iop_Max32Sx8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3E:
+      /* VPMAXUW r/m, rV, r ::: r = max-unsigned-16s(rV, r/m) */
+      /* VPMAXUW = VEX.NDS.128.66.0F38.WIG 3E /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxuw", Iop_Max16Ux8 );
+         goto decode_success;
+      }
+      /* VPMAXUW r/m, rV, r ::: r = max-unsigned-16s(rV, r/m) */
+      /* VPMAXUW = VEX.NDS.256.66.0F38.WIG 3E /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxuw", Iop_Max16Ux16 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x3F:
+      /* VPMAXUD r/m, rV, r ::: r = max-unsigned-32s(rV, r/m) */
+      /* VPMAXUD = VEX.NDS.128.66.0F38.WIG 3F /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxud", Iop_Max32Ux4 );
+         goto decode_success;
+      }
+      /* VPMAXUD r/m, rV, r ::: r = max-unsigned-32s(rV, r/m) */
+      /* VPMAXUD = VEX.NDS.256.66.0F38.WIG 3F /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmaxud", Iop_Max32Ux8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x40:
+      /* VPMULLD r/m, rV, r ::: r = mul-32s(rV, r/m) */
+      /* VPMULLD = VEX.NDS.128.66.0F38.WIG 40 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VEX_NDS_128_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmulld", Iop_Mul32x4 );
+         goto decode_success;
+      }
+      /* VPMULLD r/m, rV, r ::: r = mul-32s(rV, r/m) */
+      /* VPMULLD = VEX.NDS.256.66.0F38.WIG 40 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VEX_NDS_256_AnySimdPfx_0F_WIG_simple(
+                    uses_vvvv, vbi, pfx, delta, "vpmulld", Iop_Mul32x8 );
+         goto decode_success;
+      }
+      break;
+
+   case 0x41:
+      /* VPHMINPOSUW xmm2/m128, xmm1 = VEX.128.66.0F38.WIG 41 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_PHMINPOSUW_128( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      } 
+      break;
+
+   case 0x45:
+      /* VPSRLVD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 45 /r */
+      /* VPSRLVD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 45 /r */
+      if (have66noF2noF3(pfx) && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_AVX_var_shiftV_byE( vbi, pfx, delta, "vpsrlvd",
+                                         Iop_Shr32, 1==getVexL(pfx) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSRLVQ xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W1 45 /r */
+      /* VPSRLVQ ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W1 45 /r */
+      if (have66noF2noF3(pfx) && 1==getRexW(pfx)/*W1*/) {
+         delta = dis_AVX_var_shiftV_byE( vbi, pfx, delta, "vpsrlvq",
+                                         Iop_Shr64, 1==getVexL(pfx) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x46:
+      /* VPSRAVD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 46 /r */
+      /* VPSRAVD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 46 /r */
+      if (have66noF2noF3(pfx) && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_AVX_var_shiftV_byE( vbi, pfx, delta, "vpsravd",
+                                         Iop_Sar32, 1==getVexL(pfx) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x47:
+      /* VPSLLVD xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 47 /r */
+      /* VPSLLVD ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 47 /r */
+      if (have66noF2noF3(pfx) && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_AVX_var_shiftV_byE( vbi, pfx, delta, "vpsllvd",
+                                         Iop_Shl32, 1==getVexL(pfx) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPSLLVQ xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W1 47 /r */
+      /* VPSLLVQ ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W1 47 /r */
+      if (have66noF2noF3(pfx) && 1==getRexW(pfx)/*W1*/) {
+         delta = dis_AVX_var_shiftV_byE( vbi, pfx, delta, "vpsllvq",
+                                         Iop_Shl64, 1==getVexL(pfx) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x58:
+      /* VPBROADCASTD xmm2/m32, xmm1 = VEX.128.66.0F38.W0 58 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t32 = newTemp(Ity_I32);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastd %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            assign(t32, getXMMRegLane32(rE, 0));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastd %s,%s\n", dis_buf, nameXMMReg(rG));
+            assign(t32, loadLE(Ity_I32, mkexpr(addr)));
+         }
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = binop(Iop_64HLtoV128, mkexpr(t64), mkexpr(t64));
+         putYMMRegLoAndZU(rG, res);
+         goto decode_success;
+      }
+      /* VPBROADCASTD xmm2/m32, ymm1 = VEX.256.66.0F38.W0 58 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t32 = newTemp(Ity_I32);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastd %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+            assign(t32, getXMMRegLane32(rE, 0));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastd %s,%s\n", dis_buf, nameYMMReg(rG));
+            assign(t32, loadLE(Ity_I32, mkexpr(addr)));
+         }
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         goto decode_success;
+      }
+      break;
+
+   case 0x59:
+      /* VPBROADCASTQ xmm2/m64, xmm1 = VEX.128.66.0F38.W0 59 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t64 = newTemp(Ity_I64);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastq %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            assign(t64, getXMMRegLane64(rE, 0));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastq %s,%s\n", dis_buf, nameXMMReg(rG));
+            assign(t64, loadLE(Ity_I64, mkexpr(addr)));
+         }
+         IRExpr* res = binop(Iop_64HLtoV128, mkexpr(t64), mkexpr(t64));
+         putYMMRegLoAndZU(rG, res);
+         goto decode_success;
+      }
+      /* VPBROADCASTQ xmm2/m64, ymm1 = VEX.256.66.0F38.W0 59 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t64 = newTemp(Ity_I64);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastq %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+            assign(t64, getXMMRegLane64(rE, 0));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastq %s,%s\n", dis_buf, nameYMMReg(rG));
+            assign(t64, loadLE(Ity_I64, mkexpr(addr)));
+         }
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         goto decode_success;
+      }
+      break;
+
+   case 0x5A:
+      /* VBROADCASTI128 m128, ymm1 = VEX.256.66.0F38.WIG 5A /r */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/
+          && !epartIsReg(getUChar(delta))) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+         delta += alen;
+         DIP("vbroadcasti128 %s,%s\n", dis_buf, nameYMMReg(rG));
+         IRTemp t128 = newTemp(Ity_V128);
+         assign(t128, loadLE(Ity_V128, mkexpr(addr)));
+         putYMMReg( rG, binop(Iop_V128HLtoV256, mkexpr(t128), mkexpr(t128)) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x78:
+      /* VPBROADCASTB xmm2/m8, xmm1 = VEX.128.66.0F38.W0 78 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t8   = newTemp(Ity_I8);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastb %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            assign(t8, unop(Iop_32to8, getXMMRegLane32(rE, 0)));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastb %s,%s\n", dis_buf, nameXMMReg(rG));
+            assign(t8, loadLE(Ity_I8, mkexpr(addr)));
+         }
+         IRTemp t16 = newTemp(Ity_I16);
+         assign(t16, binop(Iop_8HLto16, mkexpr(t8), mkexpr(t8)));
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, binop(Iop_16HLto32, mkexpr(t16), mkexpr(t16)));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = binop(Iop_64HLtoV128, mkexpr(t64), mkexpr(t64));
+         putYMMRegLoAndZU(rG, res);
+         goto decode_success;
+      }
+      /* VPBROADCASTB xmm2/m8, ymm1 = VEX.256.66.0F38.W0 78 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t8   = newTemp(Ity_I8);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastb %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+            assign(t8, unop(Iop_32to8, getXMMRegLane32(rE, 0)));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastb %s,%s\n", dis_buf, nameYMMReg(rG));
+            assign(t8, loadLE(Ity_I8, mkexpr(addr)));
+         }
+         IRTemp t16 = newTemp(Ity_I16);
+         assign(t16, binop(Iop_8HLto16, mkexpr(t8), mkexpr(t8)));
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, binop(Iop_16HLto32, mkexpr(t16), mkexpr(t16)));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         goto decode_success;
+      }
+      break;
+
+   case 0x79:
+      /* VPBROADCASTW xmm2/m16, xmm1 = VEX.128.66.0F38.W0 79 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t16  = newTemp(Ity_I16);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastw %s,%s\n", nameXMMReg(rE), nameXMMReg(rG));
+            assign(t16, unop(Iop_32to16, getXMMRegLane32(rE, 0)));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastw %s,%s\n", dis_buf, nameXMMReg(rG));
+            assign(t16, loadLE(Ity_I16, mkexpr(addr)));
+         }
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, binop(Iop_16HLto32, mkexpr(t16), mkexpr(t16)));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = binop(Iop_64HLtoV128, mkexpr(t64), mkexpr(t64));
+         putYMMRegLoAndZU(rG, res);
+         goto decode_success;
+      }
+      /* VPBROADCASTW xmm2/m16, ymm1 = VEX.256.66.0F38.W0 79 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/) {
+         UChar modrm = getUChar(delta);
+         UInt  rG    = gregOfRexRM(pfx, modrm);
+         IRTemp t16  = newTemp(Ity_I16);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta++;
+            DIP("vpbroadcastw %s,%s\n", nameXMMReg(rE), nameYMMReg(rG));
+            assign(t16, unop(Iop_32to16, getXMMRegLane32(rE, 0)));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 0 );
+            delta += alen;
+            DIP("vpbroadcastw %s,%s\n", dis_buf, nameYMMReg(rG));
+            assign(t16, loadLE(Ity_I16, mkexpr(addr)));
+         }
+         IRTemp t32 = newTemp(Ity_I32);
+         assign(t32, binop(Iop_16HLto32, mkexpr(t16), mkexpr(t16)));
+         IRTemp t64 = newTemp(Ity_I64);
+         assign(t64, binop(Iop_32HLto64, mkexpr(t32), mkexpr(t32)));
+         IRExpr* res = IRExpr_Qop(Iop_64x4toV256, mkexpr(t64), mkexpr(t64),
+                                                  mkexpr(t64), mkexpr(t64));
+         putYMMReg(rG, res);
+         goto decode_success;
+      }
+      break;
+
+   case 0x8C:
+      /* VPMASKMOVD m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W0 8C /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovd",
+                               /*!isYMM*/False, Ity_I32, /*isLoad*/True );
+         goto decode_success;
+      }
+      /* VPMASKMOVD m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W0 8C /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovd",
+                               /*isYMM*/True, Ity_I32, /*isLoad*/True );
+         goto decode_success;
+      }
+      /* VPMASKMOVQ m128, xmm2, xmm1 = VEX.NDS.128.66.0F38.W1 8C /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 1==getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovq",
+                               /*!isYMM*/False, Ity_I64, /*isLoad*/True );
+         goto decode_success;
+      }
+      /* VPMASKMOVQ m256, ymm2, ymm1 = VEX.NDS.256.66.0F38.W1 8C /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 1==getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovq",
+                               /*isYMM*/True, Ity_I64, /*isLoad*/True );
+         goto decode_success;
+      }
+      break;
+
+   case 0x8E:
+      /* VPMASKMOVD xmm1, xmm2, m128 = VEX.NDS.128.66.0F38.W0 8E /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0==getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovd",
+                               /*!isYMM*/False, Ity_I32, /*!isLoad*/False );
+         goto decode_success;
+      }
+      /* VPMASKMOVD ymm1, ymm2, m256 = VEX.NDS.256.66.0F38.W0 8E /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0==getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovd",
+                               /*isYMM*/True, Ity_I32, /*!isLoad*/False );
+         goto decode_success;
+      }
+      /* VPMASKMOVQ xmm1, xmm2, m128 = VEX.NDS.128.66.0F38.W1 8E /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 1==getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovq",
+                               /*!isYMM*/False, Ity_I64, /*!isLoad*/False );
+         goto decode_success;
+      }
+      /* VPMASKMOVQ ymm1, ymm2, m256 = VEX.NDS.256.66.0F38.W1 8E /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 1==getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         delta = dis_VMASKMOV( uses_vvvv, vbi, pfx, delta, "vpmaskmovq",
+                               /*isYMM*/True, Ity_I64, /*!isLoad*/False );
+         goto decode_success;
+      }
+      break;
+
+   case 0x90:
+      /* VPGATHERDD xmm2, vm32x, xmm1 = VEX.DDS.128.66.0F38.W0 90 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherdd",
+                              /*!isYMM*/False, /*!isVM64x*/False, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VPGATHERDD ymm2, vm32y, ymm1 = VEX.DDS.256.66.0F38.W0 90 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherdd",
+                              /*isYMM*/True, /*!isVM64x*/False, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VPGATHERDQ xmm2, vm32x, xmm1 = VEX.DDS.128.66.0F38.W1 90 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherdq",
+                              /*!isYMM*/False, /*!isVM64x*/False, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VPGATHERDQ ymm2, vm32x, ymm1 = VEX.DDS.256.66.0F38.W1 90 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherdq",
+                              /*isYMM*/True, /*!isVM64x*/False, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      break;
+
+   case 0x91:
+      /* VPGATHERQD xmm2, vm64x, xmm1 = VEX.DDS.128.66.0F38.W0 91 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherqd",
+                              /*!isYMM*/False, /*isVM64x*/True, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VPGATHERQD xmm2, vm64y, xmm1 = VEX.DDS.256.66.0F38.W0 91 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherqd",
+                              /*isYMM*/True, /*isVM64x*/True, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VPGATHERQQ xmm2, vm64x, xmm1 = VEX.DDS.128.66.0F38.W1 91 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherqq",
+                              /*!isYMM*/False, /*isVM64x*/True, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VPGATHERQQ ymm2, vm64y, ymm1 = VEX.DDS.256.66.0F38.W1 91 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vpgatherqq",
+                              /*isYMM*/True, /*isVM64x*/True, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      break;
+
+   case 0x92:
+      /* VGATHERDPS xmm2, vm32x, xmm1 = VEX.DDS.128.66.0F38.W0 92 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherdps",
+                              /*!isYMM*/False, /*!isVM64x*/False, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VGATHERDPS ymm2, vm32y, ymm1 = VEX.DDS.256.66.0F38.W0 92 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherdps",
+                              /*isYMM*/True, /*!isVM64x*/False, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VGATHERDPD xmm2, vm32x, xmm1 = VEX.DDS.128.66.0F38.W1 92 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherdpd",
+                              /*!isYMM*/False, /*!isVM64x*/False, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VGATHERDPD ymm2, vm32x, ymm1 = VEX.DDS.256.66.0F38.W1 92 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherdpd",
+                              /*isYMM*/True, /*!isVM64x*/False, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      break;
+
+   case 0x93:
+      /* VGATHERQPS xmm2, vm64x, xmm1 = VEX.DDS.128.66.0F38.W0 93 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherqps",
+                              /*!isYMM*/False, /*isVM64x*/True, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VGATHERQPS xmm2, vm64y, xmm1 = VEX.DDS.256.66.0F38.W0 93 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 0 == getRexW(pfx)/*W0*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherqps",
+                              /*isYMM*/True, /*isVM64x*/True, Ity_I32 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VGATHERQPD xmm2, vm64x, xmm1 = VEX.DDS.128.66.0F38.W1 93 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherqpd",
+                              /*!isYMM*/False, /*isVM64x*/True, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      /* VGATHERQPD ymm2, vm64y, ymm1 = VEX.DDS.256.66.0F38.W1 93 /r */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 1 == getRexW(pfx)/*W1*/ && !epartIsReg(getUChar(delta))) {
+         Long delta0 = delta;
+         delta = dis_VGATHER( uses_vvvv, vbi, pfx, delta, "vgatherqpd",
+                              /*isYMM*/True, /*isVM64x*/True, Ity_I64 );
+         if (delta != delta0)
+            goto decode_success;
+      }
+      break;
+
+   case 0x96 ... 0x9F:
+   case 0xA6 ... 0xAF:
+   case 0xB6 ... 0xBF:
+      /* VFMADDSUB132PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 96 /r */
+      /* VFMADDSUB132PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 96 /r */
+      /* VFMADDSUB132PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 96 /r */
+      /* VFMADDSUB132PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 96 /r */
+      /* VFMSUBADD132PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 97 /r */
+      /* VFMSUBADD132PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 97 /r */
+      /* VFMSUBADD132PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 97 /r */
+      /* VFMSUBADD132PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 97 /r */
+      /* VFMADD132PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 98 /r */
+      /* VFMADD132PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 98 /r */
+      /* VFMADD132PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 98 /r */
+      /* VFMADD132PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 98 /r */
+      /* VFMADD132SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 99 /r */
+      /* VFMADD132SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 99 /r */
+      /* VFMSUB132PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 9A /r */
+      /* VFMSUB132PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 9A /r */
+      /* VFMSUB132PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 9A /r */
+      /* VFMSUB132PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 9A /r */
+      /* VFMSUB132SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 9B /r */
+      /* VFMSUB132SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 9B /r */
+      /* VFNMADD132PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 9C /r */
+      /* VFNMADD132PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 9C /r */
+      /* VFNMADD132PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 9C /r */
+      /* VFNMADD132PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 9C /r */
+      /* VFNMADD132SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 9D /r */
+      /* VFNMADD132SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 9D /r */
+      /* VFNMSUB132PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 9E /r */
+      /* VFNMSUB132PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 9E /r */
+      /* VFNMSUB132PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 9E /r */
+      /* VFNMSUB132PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 9E /r */
+      /* VFNMSUB132SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 9F /r */
+      /* VFNMSUB132SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 9F /r */
+      /* VFMADDSUB213PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 A6 /r */
+      /* VFMADDSUB213PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 A6 /r */
+      /* VFMADDSUB213PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 A6 /r */
+      /* VFMADDSUB213PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 A6 /r */
+      /* VFMSUBADD213PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 A7 /r */
+      /* VFMSUBADD213PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 A7 /r */
+      /* VFMSUBADD213PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 A7 /r */
+      /* VFMSUBADD213PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 A7 /r */
+      /* VFMADD213PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 A8 /r */
+      /* VFMADD213PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 A8 /r */
+      /* VFMADD213PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 A8 /r */
+      /* VFMADD213PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 A8 /r */
+      /* VFMADD213SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 A9 /r */
+      /* VFMADD213SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 A9 /r */
+      /* VFMSUB213PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 AA /r */
+      /* VFMSUB213PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 AA /r */
+      /* VFMSUB213PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 AA /r */
+      /* VFMSUB213PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 AA /r */
+      /* VFMSUB213SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 AB /r */
+      /* VFMSUB213SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 AB /r */
+      /* VFNMADD213PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 AC /r */
+      /* VFNMADD213PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 AC /r */
+      /* VFNMADD213PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 AC /r */
+      /* VFNMADD213PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 AC /r */
+      /* VFNMADD213SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 AD /r */
+      /* VFNMADD213SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 AD /r */
+      /* VFNMSUB213PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 AE /r */
+      /* VFNMSUB213PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 AE /r */
+      /* VFNMSUB213PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 AE /r */
+      /* VFNMSUB213PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 AE /r */
+      /* VFNMSUB213SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 AF /r */
+      /* VFNMSUB213SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 AF /r */
+      /* VFMADDSUB231PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 B6 /r */
+      /* VFMADDSUB231PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 B6 /r */
+      /* VFMADDSUB231PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 B6 /r */
+      /* VFMADDSUB231PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 B6 /r */
+      /* VFMSUBADD231PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 B7 /r */
+      /* VFMSUBADD231PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 B7 /r */
+      /* VFMSUBADD231PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 B7 /r */
+      /* VFMSUBADD231PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 B7 /r */
+      /* VFMADD231PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 B8 /r */
+      /* VFMADD231PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 B8 /r */
+      /* VFMADD231PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 B8 /r */
+      /* VFMADD231PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 B8 /r */
+      /* VFMADD231SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 B9 /r */
+      /* VFMADD231SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 B9 /r */
+      /* VFMSUB231PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 BA /r */
+      /* VFMSUB231PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 BA /r */
+      /* VFMSUB231PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 BA /r */
+      /* VFMSUB231PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 BA /r */
+      /* VFMSUB231SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 BB /r */
+      /* VFMSUB231SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 BB /r */
+      /* VFNMADD231PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 BC /r */
+      /* VFNMADD231PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 BC /r */
+      /* VFNMADD231PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 BC /r */
+      /* VFNMADD231PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 BC /r */
+      /* VFNMADD231SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 BD /r */
+      /* VFNMADD231SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 BD /r */
+      /* VFNMSUB231PS xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W0 BE /r */
+      /* VFNMSUB231PS ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W0 BE /r */
+      /* VFNMSUB231PD xmm3/m128, xmm2, xmm1 = VEX.DDS.128.66.0F38.W1 BE /r */
+      /* VFNMSUB231PD ymm3/m256, ymm2, ymm1 = VEX.DDS.256.66.0F38.W1 BE /r */
+      /* VFNMSUB231SS xmm3/m32, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W0 BF /r */
+      /* VFNMSUB231SD xmm3/m64, xmm2, xmm1 = VEX.DDS.LIG.66.0F38.W1 BF /r */
+      if (have66noF2noF3(pfx)) {
+         delta = dis_FMA( vbi, pfx, delta, opc );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xDB:
+   case 0xDC:
+   case 0xDD:
+   case 0xDE:
+   case 0xDF:
+      /* VAESIMC xmm2/m128, xmm1 = VEX.128.66.0F38.WIG DB /r */
+      /* VAESENC xmm3/m128, xmm2, xmm1 = VEX.128.66.0F38.WIG DC /r */
+      /* VAESENCLAST xmm3/m128, xmm2, xmm1 = VEX.128.66.0F38.WIG DD /r */
+      /* VAESDEC xmm3/m128, xmm2, xmm1 = VEX.128.66.0F38.WIG DE /r */
+      /* VAESDECLAST xmm3/m128, xmm2, xmm1 = VEX.128.66.0F38.WIG DF /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AESx( vbi, pfx, delta, True/*!isAvx*/, opc );
+         if (opc != 0xDB) *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xF2:
+      /* ANDN r/m32, r32b, r32a = VEX.NDS.LZ.0F38.W0 F2 /r */
+      /* ANDN r/m64, r64b, r64a = VEX.NDS.LZ.0F38.W1 F2 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  dst  = newTemp(ty);
+         IRTemp  src1 = newTemp(ty);
+         IRTemp  src2 = newTemp(ty);
+         UChar   rm   = getUChar(delta);
+
+         assign( src1, getIRegV(size,pfx) );
+         if (epartIsReg(rm)) {
+            assign( src2, getIRegE(size,pfx,rm) );
+            DIP("andn %s,%s,%s\n", nameIRegE(size,pfx,rm),
+                nameIRegV(size,pfx), nameIRegG(size,pfx,rm));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( src2, loadLE(ty, mkexpr(addr)) );
+            DIP("andn %s,%s,%s\n", dis_buf, nameIRegV(size,pfx),
+                nameIRegG(size,pfx,rm));
+            delta += alen;
+         }
+
+         assign( dst, binop( mkSizedOp(ty,Iop_And8),
+                             unop( mkSizedOp(ty,Iop_Not8), mkexpr(src1) ),
+                             mkexpr(src2) ) );
+         putIRegG( size, pfx, rm, mkexpr(dst) );
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(size == 8
+                                               ? AMD64G_CC_OP_ANDN64
+                                               : AMD64G_CC_OP_ANDN32)) );
+         stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dst))) );
+         stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0)) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xF3:
+      /* BLSI r/m32, r32 = VEX.NDD.LZ.0F38.W0 F3 /3 */
+      /* BLSI r/m64, r64 = VEX.NDD.LZ.0F38.W1 F3 /3 */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*LZ*/
+          && !haveREX(pfx) && gregLO3ofRM(getUChar(delta)) == 3) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  src  = newTemp(ty);
+         IRTemp  dst  = newTemp(ty);
+         UChar   rm   = getUChar(delta);
+
+         if (epartIsReg(rm)) {
+            assign( src, getIRegE(size,pfx,rm) );
+            DIP("blsi %s,%s\n", nameIRegE(size,pfx,rm),
+                nameIRegV(size,pfx));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( src, loadLE(ty, mkexpr(addr)) );
+            DIP("blsi %s,%s\n", dis_buf, nameIRegV(size,pfx));
+            delta += alen;
+         }
+
+         assign( dst, binop(mkSizedOp(ty,Iop_And8),
+                            binop(mkSizedOp(ty,Iop_Sub8), mkU(ty, 0),
+                                  mkexpr(src)), mkexpr(src)) );
+         putIRegV( size, pfx, mkexpr(dst) );
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(size == 8
+                                               ? AMD64G_CC_OP_BLSI64
+                                               : AMD64G_CC_OP_BLSI32)) );
+         stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dst))) );
+         stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(src))) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* BLSMSK r/m32, r32 = VEX.NDD.LZ.0F38.W0 F3 /2 */
+      /* BLSMSK r/m64, r64 = VEX.NDD.LZ.0F38.W1 F3 /2 */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*LZ*/
+          && !haveREX(pfx) && gregLO3ofRM(getUChar(delta)) == 2) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  src  = newTemp(ty);
+         IRTemp  dst  = newTemp(ty);
+         UChar   rm   = getUChar(delta);
+
+         if (epartIsReg(rm)) {
+            assign( src, getIRegE(size,pfx,rm) );
+            DIP("blsmsk %s,%s\n", nameIRegE(size,pfx,rm),
+                nameIRegV(size,pfx));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( src, loadLE(ty, mkexpr(addr)) );
+            DIP("blsmsk %s,%s\n", dis_buf, nameIRegV(size,pfx));
+            delta += alen;
+         }
+
+         assign( dst, binop(mkSizedOp(ty,Iop_Xor8),
+                            binop(mkSizedOp(ty,Iop_Sub8), mkexpr(src),
+                                  mkU(ty, 1)), mkexpr(src)) );
+         putIRegV( size, pfx, mkexpr(dst) );
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(size == 8
+                                               ? AMD64G_CC_OP_BLSMSK64
+                                               : AMD64G_CC_OP_BLSMSK32)) );
+         stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dst))) );
+         stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(src))) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* BLSR r/m32, r32 = VEX.NDD.LZ.0F38.W0 F3 /1 */
+      /* BLSR r/m64, r64 = VEX.NDD.LZ.0F38.W1 F3 /1 */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*LZ*/
+          && !haveREX(pfx) && gregLO3ofRM(getUChar(delta)) == 1) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  src  = newTemp(ty);
+         IRTemp  dst  = newTemp(ty);
+         UChar   rm   = getUChar(delta);
+
+         if (epartIsReg(rm)) {
+            assign( src, getIRegE(size,pfx,rm) );
+            DIP("blsr %s,%s\n", nameIRegE(size,pfx,rm),
+                nameIRegV(size,pfx));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( src, loadLE(ty, mkexpr(addr)) );
+            DIP("blsr %s,%s\n", dis_buf, nameIRegV(size,pfx));
+            delta += alen;
+         }
+
+         assign( dst, binop(mkSizedOp(ty,Iop_And8),
+                            binop(mkSizedOp(ty,Iop_Sub8), mkexpr(src),
+                                  mkU(ty, 1)), mkexpr(src)) );
+         putIRegV( size, pfx, mkexpr(dst) );
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(size == 8
+                                               ? AMD64G_CC_OP_BLSR64
+                                               : AMD64G_CC_OP_BLSR32)) );
+         stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dst))) );
+         stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(src))) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0xF5:
+      /* BZHI r32b, r/m32, r32a = VEX.NDS.LZ.0F38.W0 F5 /r */
+      /* BZHI r64b, r/m64, r64a = VEX.NDS.LZ.0F38.W1 F5 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         Int     size  = getRexW(pfx) ? 8 : 4;
+         IRType  ty    = szToITy(size);
+         IRTemp  dst   = newTemp(ty);
+         IRTemp  src1  = newTemp(ty);
+         IRTemp  src2  = newTemp(ty);
+         IRTemp  start = newTemp(Ity_I8);
+         IRTemp  cond  = newTemp(Ity_I1);
+         UChar   rm    = getUChar(delta);
+
+         assign( src2, getIRegV(size,pfx) );
+         if (epartIsReg(rm)) {
+            assign( src1, getIRegE(size,pfx,rm) );
+            DIP("bzhi %s,%s,%s\n", nameIRegV(size,pfx),
+                nameIRegE(size,pfx,rm), nameIRegG(size,pfx,rm));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( src1, loadLE(ty, mkexpr(addr)) );
+            DIP("bzhi %s,%s,%s\n", nameIRegV(size,pfx), dis_buf,
+                nameIRegG(size,pfx,rm));
+            delta += alen;
+         }
+
+         assign( start, narrowTo( Ity_I8, mkexpr(src2) ) );
+         assign( cond, binop(Iop_CmpLT32U,
+                             unop(Iop_8Uto32, mkexpr(start)),
+                             mkU32(8*size)) );
+         /* if (start < opsize) {
+               if (start == 0)
+                  dst = 0;
+               else
+                  dst = (src1 << (opsize-start)) u>> (opsize-start);
+            } else {
+               dst = src1;
+            } */
+         assign( dst,
+                 IRExpr_ITE(
+                    mkexpr(cond),
+                    IRExpr_ITE(
+                       binop(Iop_CmpEQ8, mkexpr(start), mkU8(0)),
+                       mkU(ty, 0),
+                       binop(
+                          mkSizedOp(ty,Iop_Shr8),
+                          binop(
+                             mkSizedOp(ty,Iop_Shl8),
+                             mkexpr(src1),
+                             binop(Iop_Sub8, mkU8(8*size), mkexpr(start))
+                          ),
+                          binop(Iop_Sub8, mkU8(8*size), mkexpr(start))
+                       )
+                    ),
+                    mkexpr(src1)
+                 )
+               );
+         putIRegG( size, pfx, rm, mkexpr(dst) );
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(size == 8
+                                               ? AMD64G_CC_OP_BLSR64
+                                               : AMD64G_CC_OP_BLSR32)) );
+         stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dst))) );
+         stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(cond))) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* PDEP r/m32, r32b, r32a = VEX.NDS.LZ.F2.0F38.W0 F5 /r */
+      /* PDEP r/m64, r64b, r64a = VEX.NDS.LZ.F2.0F38.W1 F5 /r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  src  = newTemp(ty);
+         IRTemp  mask = newTemp(ty);
+         UChar   rm   = getUChar(delta);
+
+         assign( src, getIRegV(size,pfx) );
+         if (epartIsReg(rm)) {
+            assign( mask, getIRegE(size,pfx,rm) );
+            DIP("pdep %s,%s,%s\n", nameIRegE(size,pfx,rm),
+                nameIRegV(size,pfx), nameIRegG(size,pfx,rm));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( mask, loadLE(ty, mkexpr(addr)) );
+            DIP("pdep %s,%s,%s\n", dis_buf, nameIRegV(size,pfx),
+                nameIRegG(size,pfx,rm));
+            delta += alen;
+         }
+
+         IRExpr** args = mkIRExprVec_2( widenUto64(mkexpr(src)),
+                                        widenUto64(mkexpr(mask)) );
+         putIRegG( size, pfx, rm,
+                   narrowTo(ty, mkIRExprCCall(Ity_I64, 0/*regparms*/,
+                                              "amd64g_calculate_pdep",
+                                              &amd64g_calculate_pdep, args)) );
+         *uses_vvvv = True;
+         /* Flags aren't modified.  */
+         goto decode_success;
+      }
+      /* PEXT r/m32, r32b, r32a = VEX.NDS.LZ.F3.0F38.W0 F5 /r */
+      /* PEXT r/m64, r64b, r64a = VEX.NDS.LZ.F3.0F38.W1 F5 /r */
+      if (haveF3no66noF2(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  src  = newTemp(ty);
+         IRTemp  mask = newTemp(ty);
+         UChar   rm   = getUChar(delta);
+
+         assign( src, getIRegV(size,pfx) );
+         if (epartIsReg(rm)) {
+            assign( mask, getIRegE(size,pfx,rm) );
+            DIP("pext %s,%s,%s\n", nameIRegE(size,pfx,rm),
+                nameIRegV(size,pfx), nameIRegG(size,pfx,rm));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( mask, loadLE(ty, mkexpr(addr)) );
+            DIP("pext %s,%s,%s\n", dis_buf, nameIRegV(size,pfx),
+                nameIRegG(size,pfx,rm));
+            delta += alen;
+         }
+
+         /* First mask off bits not set in mask, they are ignored
+            and it should be fine if they contain undefined values.  */
+         IRExpr* masked = binop(mkSizedOp(ty,Iop_And8),
+                                mkexpr(src), mkexpr(mask));
+         IRExpr** args = mkIRExprVec_2( widenUto64(masked),
+                                        widenUto64(mkexpr(mask)) );
+         putIRegG( size, pfx, rm,
+                   narrowTo(ty, mkIRExprCCall(Ity_I64, 0/*regparms*/,
+                                              "amd64g_calculate_pext",
+                                              &amd64g_calculate_pext, args)) );
+         *uses_vvvv = True;
+         /* Flags aren't modified.  */
+         goto decode_success;
+      }
+      break;
+
+   case 0xF6:
+      /* MULX r/m32, r32b, r32a = VEX.NDD.LZ.F2.0F38.W0 F6 /r */
+      /* MULX r/m64, r64b, r64a = VEX.NDD.LZ.F2.0F38.W1 F6 /r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  src1 = newTemp(ty);
+         IRTemp  src2 = newTemp(ty);
+         IRTemp  res  = newTemp(size == 8 ? Ity_I128 : Ity_I64);
+         UChar   rm   = getUChar(delta);
+
+         assign( src1, getIRegRDX(size) );
+         if (epartIsReg(rm)) {
+            assign( src2, getIRegE(size,pfx,rm) );
+            DIP("mulx %s,%s,%s\n", nameIRegE(size,pfx,rm),
+                nameIRegV(size,pfx), nameIRegG(size,pfx,rm));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( src2, loadLE(ty, mkexpr(addr)) );
+            DIP("mulx %s,%s,%s\n", dis_buf, nameIRegV(size,pfx),
+                nameIRegG(size,pfx,rm));
+            delta += alen;
+         }
+
+         assign( res, binop(size == 8 ? Iop_MullU64 : Iop_MullU32,
+                            mkexpr(src1), mkexpr(src2)) );
+         putIRegV( size, pfx,
+                   unop(size == 8 ? Iop_128to64 : Iop_64to32, mkexpr(res)) );
+         putIRegG( size, pfx, rm,
+                   unop(size == 8 ? Iop_128HIto64 : Iop_64HIto32,
+                        mkexpr(res)) );
+         *uses_vvvv = True;
+         /* Flags aren't modified.  */
+         goto decode_success;
+      }
+      break;
+
+   case 0xF7:
+      /* SARX r32b, r/m32, r32a = VEX.NDS.LZ.F3.0F38.W0 F7 /r */
+      /* SARX r64b, r/m64, r64a = VEX.NDS.LZ.F3.0F38.W1 F7 /r */
+      if (haveF3no66noF2(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         delta = dis_SHIFTX( uses_vvvv, vbi, pfx, delta, "sarx", Iop_Sar8 );
+         goto decode_success;
+      }
+      /* SHLX r32b, r/m32, r32a = VEX.NDS.LZ.66.0F38.W0 F7 /r */
+      /* SHLX r64b, r/m64, r64a = VEX.NDS.LZ.66.0F38.W1 F7 /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         delta = dis_SHIFTX( uses_vvvv, vbi, pfx, delta, "shlx", Iop_Shl8 );
+         goto decode_success;
+      }
+      /* SHRX r32b, r/m32, r32a = VEX.NDS.LZ.F2.0F38.W0 F7 /r */
+      /* SHRX r64b, r/m64, r64a = VEX.NDS.LZ.F2.0F38.W1 F7 /r */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         delta = dis_SHIFTX( uses_vvvv, vbi, pfx, delta, "shrx", Iop_Shr8 );
+         goto decode_success;
+      }
+      /* BEXTR r32b, r/m32, r32a = VEX.NDS.LZ.0F38.W0 F7 /r */
+      /* BEXTR r64b, r/m64, r64a = VEX.NDS.LZ.0F38.W1 F7 /r */
+      if (haveNo66noF2noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         Int     size  = getRexW(pfx) ? 8 : 4;
+         IRType  ty    = szToITy(size);
+         IRTemp  dst   = newTemp(ty);
+         IRTemp  src1  = newTemp(ty);
+         IRTemp  src2  = newTemp(ty);
+         IRTemp  stle  = newTemp(Ity_I16);
+         IRTemp  start = newTemp(Ity_I8);
+         IRTemp  len   = newTemp(Ity_I8);
+         UChar   rm    = getUChar(delta);
+
+         assign( src2, getIRegV(size,pfx) );
+         if (epartIsReg(rm)) {
+            assign( src1, getIRegE(size,pfx,rm) );
+            DIP("bextr %s,%s,%s\n", nameIRegV(size,pfx),
+                nameIRegE(size,pfx,rm), nameIRegG(size,pfx,rm));
+            delta++;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            assign( src1, loadLE(ty, mkexpr(addr)) );
+            DIP("bextr %s,%s,%s\n", nameIRegV(size,pfx), dis_buf,
+                nameIRegG(size,pfx,rm));
+            delta += alen;
+         }
+
+         assign( stle, narrowTo( Ity_I16, mkexpr(src2) ) );
+         assign( start, unop( Iop_16to8, mkexpr(stle) ) );
+         assign( len, unop( Iop_16HIto8, mkexpr(stle) ) );
+         /* if (start+len < opsize) {
+               if (len != 0)
+                  dst = (src1 << (opsize-start-len)) u>> (opsize-len);
+               else
+                  dst = 0;
+            } else {
+               if (start < opsize)
+                  dst = src1 u>> start;
+               else
+                  dst = 0;
+            } */
+         assign( dst,
+                 IRExpr_ITE(
+                    binop(Iop_CmpLT32U,
+                          binop(Iop_Add32,
+                                unop(Iop_8Uto32, mkexpr(start)),
+                                unop(Iop_8Uto32, mkexpr(len))),
+                          mkU32(8*size)),
+                    IRExpr_ITE(
+                       binop(Iop_CmpEQ8, mkexpr(len), mkU8(0)),
+                       mkU(ty, 0),
+                       binop(mkSizedOp(ty,Iop_Shr8),
+                             binop(mkSizedOp(ty,Iop_Shl8), mkexpr(src1),
+                                   binop(Iop_Sub8,
+                                         binop(Iop_Sub8, mkU8(8*size),
+                                               mkexpr(start)),
+                                         mkexpr(len))),
+                             binop(Iop_Sub8, mkU8(8*size),
+                                   mkexpr(len)))
+                    ),
+                    IRExpr_ITE(
+                       binop(Iop_CmpLT32U,
+                             unop(Iop_8Uto32, mkexpr(start)),
+                             mkU32(8*size)),
+                       binop(mkSizedOp(ty,Iop_Shr8), mkexpr(src1),
+                             mkexpr(start)),
+                       mkU(ty, 0)
+                    )
+                 )
+               );
+         putIRegG( size, pfx, rm, mkexpr(dst) );
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(size == 8
+                                               ? AMD64G_CC_OP_ANDN64
+                                               : AMD64G_CC_OP_ANDN32)) );
+         stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dst))) );
+         stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0)) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  //decode_failure:
+   return deltaIN;
+
+  decode_success:
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Top-level post-escape decoders: dis_ESC_0F3A__VEX    ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+static IRTemp math_VPERMILPS_128 ( IRTemp sV, UInt imm8 )
+{
+   vassert(imm8 < 256);
+   IRTemp s3, s2, s1, s0;
+   s3 = s2 = s1 = s0 = IRTemp_INVALID;
+   breakupV128to32s( sV, &s3, &s2, &s1, &s0 );
+#  define SEL(_nn) (((_nn)==0) ? s0 : ((_nn)==1) ? s1 \
+                                    : ((_nn)==2) ? s2 : s3)
+   IRTemp res = newTemp(Ity_V128);
+   assign(res, mkV128from32s( SEL((imm8 >> 6) & 3),
+                              SEL((imm8 >> 4) & 3),
+                              SEL((imm8 >> 2) & 3),
+                              SEL((imm8 >> 0) & 3) ));
+#  undef SEL
+   return res;
+}
+
+__attribute__((noinline))
+static
+Long dis_ESC_0F3A__VEX (
+        /*MB_OUT*/DisResult* dres,
+        /*OUT*/   Bool*      uses_vvvv,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  vbi,
+        Prefix pfx, Int sz, Long deltaIN 
+     )
+{
+   IRTemp addr  = IRTemp_INVALID;
+   Int    alen  = 0;
+   HChar  dis_buf[50];
+   Long   delta = deltaIN;
+   UChar  opc   = getUChar(delta);
+   delta++;
+   *uses_vvvv = False;
+
+   switch (opc) {
+
+   case 0x00:
+   case 0x01:
+      /* VPERMQ imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.W1 00 /r ib */
+      /* VPERMPD imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.W1 01 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/
+          && 1==getRexW(pfx)/*W1*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp sV    = newTemp(Ity_V256);
+         const HChar *name  = opc == 0 ? "vpermq" : "vpermpd";
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("%s $%u,%s,%s\n",
+                name, imm8, nameYMMReg(rE), nameYMMReg(rG));
+            assign(sV, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("%s $%u,%s,%s\n",
+                name, imm8, dis_buf, nameYMMReg(rG));
+            assign(sV, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         delta++;
+         IRTemp s[4];
+         s[3] = s[2] = s[1] = s[0] = IRTemp_INVALID;
+         breakupV256to64s(sV, &s[3], &s[2], &s[1], &s[0]);
+         IRTemp dV = newTemp(Ity_V256);
+         assign(dV, IRExpr_Qop(Iop_64x4toV256,
+                               mkexpr(s[(imm8 >> 6) & 3]),
+                               mkexpr(s[(imm8 >> 4) & 3]),
+                               mkexpr(s[(imm8 >> 2) & 3]),
+                               mkexpr(s[(imm8 >> 0) & 3])));
+         putYMMReg(rG, mkexpr(dV));
+         goto decode_success;
+      }
+      break;
+
+   case 0x02:
+      /* VPBLENDD imm8, xmm3/m128, xmm2, xmm1 = VEX.NDS.128.66.0F3A.W0 02 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V128);
+         IRTemp dV    = newTemp(Ity_V128);
+         UInt   i;
+         IRTemp s[4], d[4];
+         assign(sV, getXMMReg(rV));
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpblendd $%u,%s,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+            assign(dV, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpblendd $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+            assign(dV, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         delta++;
+         for (i = 0; i < 4; i++) {
+            s[i] = IRTemp_INVALID;
+            d[i] = IRTemp_INVALID;
+         }
+         breakupV128to32s( sV, &s[3], &s[2], &s[1], &s[0] );
+         breakupV128to32s( dV, &d[3], &d[2], &d[1], &d[0] );
+         for (i = 0; i < 4; i++)
+            putYMMRegLane32(rG, i, mkexpr((imm8 & (1<<i)) ? d[i] : s[i]));
+         putYMMRegLane128(rG, 1, mkV128(0));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPBLENDD imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.256.66.0F3A.W0 02 /r ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V256);
+         IRTemp dV    = newTemp(Ity_V256);
+         UInt   i;
+         IRTemp s[8], d[8];
+         assign(sV, getYMMReg(rV));
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpblendd $%u,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(dV, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpblendd $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(dV, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         delta++;
+         for (i = 0; i < 8; i++) {
+            s[i] = IRTemp_INVALID;
+            d[i] = IRTemp_INVALID;
+         }
+         breakupV256to32s( sV, &s[7], &s[6], &s[5], &s[4],
+                               &s[3], &s[2], &s[1], &s[0] );
+         breakupV256to32s( dV, &d[7], &d[6], &d[5], &d[4],
+                               &d[3], &d[2], &d[1], &d[0] );
+         for (i = 0; i < 8; i++)
+            putYMMRegLane32(rG, i, mkexpr((imm8 & (1<<i)) ? d[i] : s[i]));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x04:
+      /* VPERMILPS imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.WIG 04 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp sV    = newTemp(Ity_V256);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpermilps $%u,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rG));
+            assign(sV, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpermilps $%u,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rG));
+            assign(sV, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         delta++;
+         IRTemp  sVhi = IRTemp_INVALID, sVlo = IRTemp_INVALID;
+         breakupV256toV128s( sV, &sVhi, &sVlo );
+         IRTemp  dVhi = math_VPERMILPS_128( sVhi, imm8 );
+         IRTemp  dVlo = math_VPERMILPS_128( sVlo, imm8 );
+         IRExpr* res  = binop(Iop_V128HLtoV256, mkexpr(dVhi), mkexpr(dVlo));
+         putYMMReg(rG, res);
+         goto decode_success;
+      }
+      /* VPERMILPS imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG 04 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp sV    = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpermilps $%u,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rG));
+            assign(sV, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpermilps $%u,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rG));
+            assign(sV, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         delta++;
+         putYMMRegLoAndZU(rG, mkexpr ( math_VPERMILPS_128 ( sV, imm8 ) ) );
+         goto decode_success;
+      }
+      break;
+
+   case 0x05:
+      /* VPERMILPD imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG 05 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp sV    = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpermilpd $%u,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rG));
+            assign(sV, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpermilpd $%u,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rG));
+            assign(sV, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         delta++;
+         IRTemp s1 = newTemp(Ity_I64);
+         IRTemp s0 = newTemp(Ity_I64);
+         assign(s1, unop(Iop_V128HIto64, mkexpr(sV)));
+         assign(s0, unop(Iop_V128to64,   mkexpr(sV)));
+         IRTemp dV = newTemp(Ity_V128);
+         assign(dV, binop(Iop_64HLtoV128,
+                               mkexpr((imm8 & (1<<1)) ? s1 : s0),
+                               mkexpr((imm8 & (1<<0)) ? s1 : s0)));
+         putYMMRegLoAndZU(rG, mkexpr(dV));
+         goto decode_success;
+      }
+      /* VPERMILPD imm8, ymm2/m256, ymm1 = VEX.256.66.0F3A.WIG 05 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp sV    = newTemp(Ity_V256);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpermilpd $%u,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rG));
+            assign(sV, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpermilpd $%u,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rG));
+            assign(sV, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         delta++;
+         IRTemp s3, s2, s1, s0;
+         s3 = s2 = s1 = s0 = IRTemp_INVALID;
+         breakupV256to64s(sV, &s3, &s2, &s1, &s0);
+         IRTemp dV = newTemp(Ity_V256);
+         assign(dV, IRExpr_Qop(Iop_64x4toV256,
+                               mkexpr((imm8 & (1<<3)) ? s3 : s2),
+                               mkexpr((imm8 & (1<<2)) ? s3 : s2),
+                               mkexpr((imm8 & (1<<1)) ? s1 : s0),
+                               mkexpr((imm8 & (1<<0)) ? s1 : s0)));
+         putYMMReg(rG, mkexpr(dV));
+         goto decode_success;
+      }
+      break;
+
+   case 0x06:
+      /* VPERM2F128 imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.66.0F3A.W0 06 /r ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp s00   = newTemp(Ity_V128);
+         IRTemp s01   = newTemp(Ity_V128);
+         IRTemp s10   = newTemp(Ity_V128);
+         IRTemp s11   = newTemp(Ity_V128);
+         assign(s00, getYMMRegLane128(rV, 0));
+         assign(s01, getYMMRegLane128(rV, 1));
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vperm2f128 $%u,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(s10, getYMMRegLane128(rE, 0));
+            assign(s11, getYMMRegLane128(rE, 1));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vperm2f128 $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(s10, loadLE(Ity_V128, binop(Iop_Add64,
+                                               mkexpr(addr), mkU64(0))));
+            assign(s11, loadLE(Ity_V128, binop(Iop_Add64,
+                                               mkexpr(addr), mkU64(16))));
+         }
+         delta++;
+#        define SEL(_nn) (((_nn)==0) ? s00 : ((_nn)==1) ? s01 \
+                                           : ((_nn)==2) ? s10 : s11)
+         putYMMRegLane128(rG, 0, mkexpr(SEL((imm8 >> 0) & 3)));
+         putYMMRegLane128(rG, 1, mkexpr(SEL((imm8 >> 4) & 3)));
+#        undef SEL
+         if (imm8 & (1<<3)) putYMMRegLane128(rG, 0, mkV128(0));
+         if (imm8 & (1<<7)) putYMMRegLane128(rG, 1, mkV128(0));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x08:
+      /* VROUNDPS imm8, xmm2/m128, xmm1 */
+      /* VROUNDPS = VEX.NDS.128.66.0F3A.WIG 08 ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp src   = newTemp(Ity_V128);
+         IRTemp s0    = IRTemp_INVALID;
+         IRTemp s1    = IRTemp_INVALID;
+         IRTemp s2    = IRTemp_INVALID;
+         IRTemp s3    = IRTemp_INVALID;
+         IRTemp rm    = newTemp(Ity_I32);
+         Int    imm   = 0;
+
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            assign( src, getXMMReg( rE ) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) break;
+            delta += 1+1;
+            DIP( "vroundps $%d,%s,%s\n", imm, nameXMMReg(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( src, loadLE(Ity_V128, mkexpr(addr) ) );
+            imm = getUChar(delta+alen);
+            if (imm & ~15) break;
+            delta += alen+1;
+            DIP( "vroundps $%d,%s,%s\n", imm, dis_buf, nameXMMReg(rG) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(rm, (imm & 4) ? get_sse_roundingmode() : mkU32(imm & 3));
+
+         breakupV128to32s( src, &s3, &s2, &s1, &s0 );
+         putYMMRegLane128( rG, 1, mkV128(0) );
+#        define CVT(s) binop(Iop_RoundF32toInt, mkexpr(rm), \
+                             unop(Iop_ReinterpI32asF32, mkexpr(s)))
+         putYMMRegLane32F( rG, 3, CVT(s3) );
+         putYMMRegLane32F( rG, 2, CVT(s2) );
+         putYMMRegLane32F( rG, 1, CVT(s1) );
+         putYMMRegLane32F( rG, 0, CVT(s0) );
+#        undef CVT
+         goto decode_success;
+      }
+      /* VROUNDPS imm8, ymm2/m256, ymm1 */
+      /* VROUNDPS = VEX.NDS.256.66.0F3A.WIG 08 ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp src   = newTemp(Ity_V256);
+         IRTemp s0    = IRTemp_INVALID;
+         IRTemp s1    = IRTemp_INVALID;
+         IRTemp s2    = IRTemp_INVALID;
+         IRTemp s3    = IRTemp_INVALID;
+         IRTemp s4    = IRTemp_INVALID;
+         IRTemp s5    = IRTemp_INVALID;
+         IRTemp s6    = IRTemp_INVALID;
+         IRTemp s7    = IRTemp_INVALID;
+         IRTemp rm    = newTemp(Ity_I32);
+         Int    imm   = 0;
+
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            assign( src, getYMMReg( rE ) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) break;
+            delta += 1+1;
+            DIP( "vroundps $%d,%s,%s\n", imm, nameYMMReg(rE), nameYMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( src, loadLE(Ity_V256, mkexpr(addr) ) );
+            imm = getUChar(delta+alen);
+            if (imm & ~15) break;
+            delta += alen+1;
+            DIP( "vroundps $%d,%s,%s\n", imm, dis_buf, nameYMMReg(rG) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(rm, (imm & 4) ? get_sse_roundingmode() : mkU32(imm & 3));
+
+         breakupV256to32s( src, &s7, &s6, &s5, &s4, &s3, &s2, &s1, &s0 );
+#        define CVT(s) binop(Iop_RoundF32toInt, mkexpr(rm), \
+                             unop(Iop_ReinterpI32asF32, mkexpr(s)))
+         putYMMRegLane32F( rG, 7, CVT(s7) );
+         putYMMRegLane32F( rG, 6, CVT(s6) );
+         putYMMRegLane32F( rG, 5, CVT(s5) );
+         putYMMRegLane32F( rG, 4, CVT(s4) );
+         putYMMRegLane32F( rG, 3, CVT(s3) );
+         putYMMRegLane32F( rG, 2, CVT(s2) );
+         putYMMRegLane32F( rG, 1, CVT(s1) );
+         putYMMRegLane32F( rG, 0, CVT(s0) );
+#        undef CVT
+         goto decode_success;
+      }
+
+   case 0x09:
+      /* VROUNDPD imm8, xmm2/m128, xmm1 */
+      /* VROUNDPD = VEX.NDS.128.66.0F3A.WIG 09 ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp src   = newTemp(Ity_V128);
+         IRTemp s0    = IRTemp_INVALID;
+         IRTemp s1    = IRTemp_INVALID;
+         IRTemp rm    = newTemp(Ity_I32);
+         Int    imm   = 0;
+
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            assign( src, getXMMReg( rE ) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) break;
+            delta += 1+1;
+            DIP( "vroundpd $%d,%s,%s\n", imm, nameXMMReg(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( src, loadLE(Ity_V128, mkexpr(addr) ) );
+            imm = getUChar(delta+alen);
+            if (imm & ~15) break;
+            delta += alen+1;
+            DIP( "vroundpd $%d,%s,%s\n", imm, dis_buf, nameXMMReg(rG) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(rm, (imm & 4) ? get_sse_roundingmode() : mkU32(imm & 3));
+
+         breakupV128to64s( src, &s1, &s0 );
+         putYMMRegLane128( rG, 1, mkV128(0) );
+#        define CVT(s) binop(Iop_RoundF64toInt, mkexpr(rm), \
+                             unop(Iop_ReinterpI64asF64, mkexpr(s)))
+         putYMMRegLane64F( rG, 1, CVT(s1) );
+         putYMMRegLane64F( rG, 0, CVT(s0) );
+#        undef CVT
+         goto decode_success;
+      }
+      /* VROUNDPD imm8, ymm2/m256, ymm1 */
+      /* VROUNDPD = VEX.NDS.256.66.0F3A.WIG 09 ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         IRTemp src   = newTemp(Ity_V256);
+         IRTemp s0    = IRTemp_INVALID;
+         IRTemp s1    = IRTemp_INVALID;
+         IRTemp s2    = IRTemp_INVALID;
+         IRTemp s3    = IRTemp_INVALID;
+         IRTemp rm    = newTemp(Ity_I32);
+         Int    imm   = 0;
+
+         modrm = getUChar(delta);
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            assign( src, getYMMReg( rE ) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) break;
+            delta += 1+1;
+            DIP( "vroundpd $%d,%s,%s\n", imm, nameYMMReg(rE), nameYMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( src, loadLE(Ity_V256, mkexpr(addr) ) );
+            imm = getUChar(delta+alen);
+            if (imm & ~15) break;
+            delta += alen+1;
+            DIP( "vroundps $%d,%s,%s\n", imm, dis_buf, nameYMMReg(rG) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(rm, (imm & 4) ? get_sse_roundingmode() : mkU32(imm & 3));
+
+         breakupV256to64s( src, &s3, &s2, &s1, &s0 );
+#        define CVT(s) binop(Iop_RoundF64toInt, mkexpr(rm), \
+                             unop(Iop_ReinterpI64asF64, mkexpr(s)))
+         putYMMRegLane64F( rG, 3, CVT(s3) );
+         putYMMRegLane64F( rG, 2, CVT(s2) );
+         putYMMRegLane64F( rG, 1, CVT(s1) );
+         putYMMRegLane64F( rG, 0, CVT(s0) );
+#        undef CVT
+         goto decode_success;
+      }
+
+   case 0x0A:
+   case 0x0B:
+      /* VROUNDSS imm8, xmm3/m32, xmm2, xmm1 */
+      /* VROUNDSS = VEX.NDS.128.66.0F3A.WIG 0A ib */
+      /* VROUNDSD imm8, xmm3/m64, xmm2, xmm1 */
+      /* VROUNDSD = VEX.NDS.128.66.0F3A.WIG 0B ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         Bool   isD   = opc == 0x0B;
+         IRTemp src   = newTemp(isD ? Ity_F64 : Ity_F32);
+         IRTemp res   = newTemp(isD ? Ity_F64 : Ity_F32);
+         Int    imm   = 0;
+
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            assign( src, 
+                    isD ? getXMMRegLane64F(rE, 0) : getXMMRegLane32F(rE, 0) );
+            imm = getUChar(delta+1);
+            if (imm & ~15) break;
+            delta += 1+1;
+            DIP( "vrounds%c $%d,%s,%s,%s\n",
+                 isD ? 'd' : 's',
+                 imm, nameXMMReg( rE ), nameXMMReg( rV ), nameXMMReg( rG ) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( src, loadLE( isD ? Ity_F64 : Ity_F32, mkexpr(addr) ));
+            imm = getUChar(delta+alen);
+            if (imm & ~15) break;
+            delta += alen+1;
+            DIP( "vrounds%c $%d,%s,%s,%s\n",
+                 isD ? 'd' : 's',
+                 imm, dis_buf, nameXMMReg( rV ), nameXMMReg( rG ) );
+         }
+
+         /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+            that encoding is the same as the encoding for IRRoundingMode,
+            we can use that value directly in the IR as a rounding
+            mode. */
+         assign(res, binop(isD ? Iop_RoundF64toInt : Iop_RoundF32toInt,
+                           (imm & 4) ? get_sse_roundingmode() 
+                                     : mkU32(imm & 3),
+                           mkexpr(src)) );
+
+         if (isD)
+            putXMMRegLane64F( rG, 0, mkexpr(res) );
+         else {
+            putXMMRegLane32F( rG, 0, mkexpr(res) );
+            putXMMRegLane32F( rG, 1, getXMMRegLane32F( rV, 1 ) );
+         }
+         putXMMRegLane64F( rG, 1, getXMMRegLane64F( rV, 1 ) );
+         putYMMRegLane128( rG, 1, mkV128(0) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0C:
+      /* VBLENDPS imm8, ymm3/m256, ymm2, ymm1 */
+      /* VBLENDPS = VEX.NDS.256.66.0F3A.WIG 0C /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V256);
+         IRTemp sE    = newTemp(Ity_V256);
+         assign ( sV, getYMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vblendps $%u,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(sE, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vblendps $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(sE, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         delta++;
+         putYMMReg( rG, 
+                    mkexpr( math_BLENDPS_256( sE, sV, imm8) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VBLENDPS imm8, xmm3/m128, xmm2, xmm1 */
+      /* VBLENDPS = VEX.NDS.128.66.0F3A.WIG 0C /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V128);
+         IRTemp sE    = newTemp(Ity_V128);
+         assign ( sV, getXMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vblendps $%u,%s,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+            assign(sE, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vblendps $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+            assign(sE, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         delta++;
+         putYMMRegLoAndZU( rG, 
+                           mkexpr( math_BLENDPS_128( sE, sV, imm8) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0D:
+      /* VBLENDPD imm8, ymm3/m256, ymm2, ymm1 */
+      /* VBLENDPD = VEX.NDS.256.66.0F3A.WIG 0D /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V256);
+         IRTemp sE    = newTemp(Ity_V256);
+         assign ( sV, getYMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vblendpd $%u,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(sE, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vblendpd $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(sE, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         delta++;
+         putYMMReg( rG, 
+                    mkexpr( math_BLENDPD_256( sE, sV, imm8) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VBLENDPD imm8, xmm3/m128, xmm2, xmm1 */
+      /* VBLENDPD = VEX.NDS.128.66.0F3A.WIG 0D /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V128);
+         IRTemp sE    = newTemp(Ity_V128);
+         assign ( sV, getXMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vblendpd $%u,%s,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+            assign(sE, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vblendpd $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+            assign(sE, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         delta++;
+         putYMMRegLoAndZU( rG, 
+                           mkexpr( math_BLENDPD_128( sE, sV, imm8) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0E:
+      /* VPBLENDW imm8, xmm3/m128, xmm2, xmm1 */
+      /* VPBLENDW = VEX.NDS.128.66.0F3A.WIG 0E /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V128);
+         IRTemp sE    = newTemp(Ity_V128);
+         assign ( sV, getXMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpblendw $%u,%s,%s,%s\n",
+                imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG));
+            assign(sE, getXMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpblendw $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG));
+            assign(sE, loadLE(Ity_V128, mkexpr(addr)));
+         }
+         delta++;
+         putYMMRegLoAndZU( rG, 
+                           mkexpr( math_PBLENDW_128( sE, sV, imm8) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPBLENDW imm8, ymm3/m256, ymm2, ymm1 */
+      /* VPBLENDW = VEX.NDS.256.66.0F3A.WIG 0E /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V256);
+         IRTemp sE    = newTemp(Ity_V256);
+         IRTemp sVhi, sVlo, sEhi, sElo;
+         sVhi = sVlo = sEhi = sElo = IRTemp_INVALID;
+         assign ( sV, getYMMReg(rV) );
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vpblendw $%u,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(sE, getYMMReg(rE));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vpblendw $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(sE, loadLE(Ity_V256, mkexpr(addr)));
+         }
+         delta++;
+         breakupV256toV128s( sV, &sVhi, &sVlo );
+         breakupV256toV128s( sE, &sEhi, &sElo );
+         putYMMReg( rG, binop( Iop_V128HLtoV256,
+                               mkexpr( math_PBLENDW_128( sEhi, sVhi, imm8) ),
+                               mkexpr( math_PBLENDW_128( sElo, sVlo, imm8) ) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x0F:
+      /* VPALIGNR imm8, xmm3/m128, xmm2, xmm1 */
+      /* VPALIGNR = VEX.NDS.128.66.0F3A.WIG 0F /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V128);
+         IRTemp dV    = newTemp(Ity_V128);
+         UInt   imm8;
+
+         assign( dV, getXMMReg(rV) );
+
+         if ( epartIsReg( modrm ) ) {
+            UInt   rE = eregOfRexRM(pfx, modrm);
+            assign( sV, getXMMReg(rE) );
+            imm8 = getUChar(delta+1);
+            delta += 1+1;
+            DIP("vpalignr $%d,%s,%s,%s\n", imm8, nameXMMReg(rE),
+                                           nameXMMReg(rV), nameXMMReg(rG));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+            imm8 = getUChar(delta+alen);
+            delta += alen+1;
+            DIP("vpalignr $%d,%s,%s,%s\n", imm8, dis_buf,
+                                           nameXMMReg(rV), nameXMMReg(rG));
+         }
+
+         IRTemp res = math_PALIGNR_XMM( sV, dV, imm8 );
+         putYMMRegLoAndZU( rG, mkexpr(res) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPALIGNR imm8, ymm3/m256, ymm2, ymm1 */
+      /* VPALIGNR = VEX.NDS.256.66.0F3A.WIG 0F /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp sV    = newTemp(Ity_V256);
+         IRTemp dV    = newTemp(Ity_V256);
+         IRTemp sHi, sLo, dHi, dLo;
+         sHi = sLo = dHi = dLo = IRTemp_INVALID;
+         UInt   imm8;
+
+         assign( dV, getYMMReg(rV) );
+
+         if ( epartIsReg( modrm ) ) {
+            UInt   rE = eregOfRexRM(pfx, modrm);
+            assign( sV, getYMMReg(rE) );
+            imm8 = getUChar(delta+1);
+            delta += 1+1;
+            DIP("vpalignr $%d,%s,%s,%s\n", imm8, nameYMMReg(rE),
+                                           nameYMMReg(rV), nameYMMReg(rG));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( sV, loadLE(Ity_V256, mkexpr(addr)) );
+            imm8 = getUChar(delta+alen);
+            delta += alen+1;
+            DIP("vpalignr $%d,%s,%s,%s\n", imm8, dis_buf,
+                                           nameYMMReg(rV), nameYMMReg(rG));
+         }
+
+         breakupV256toV128s( dV, &dHi, &dLo );
+         breakupV256toV128s( sV, &sHi, &sLo );
+         putYMMReg( rG, binop( Iop_V128HLtoV256,
+                               mkexpr( math_PALIGNR_XMM( sHi, dHi, imm8 ) ),
+                               mkexpr( math_PALIGNR_XMM( sLo, dLo, imm8 ) ) )
+                    );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x14:
+      /* VPEXTRB imm8, xmm2, reg/m8 = VEX.128.66.0F3A.W0 14 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_PEXTRB_128_GtoE( vbi, pfx, delta, False/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x15:
+      /* VPEXTRW imm8, reg/m16, xmm2 */
+      /* VPEXTRW = VEX.128.66.0F3A.W0 15 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_PEXTRW( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x16:
+      /* VPEXTRD imm8, r32/m32, xmm2 */
+      /* VPEXTRD = VEX.128.66.0F3A.W0 16 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         delta = dis_PEXTRD( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      /* VPEXTRQ = VEX.128.66.0F3A.W1 16 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 1==getRexW(pfx)/*W1*/) {
+         delta = dis_PEXTRQ( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x17:
+      /* VEXTRACTPS imm8, xmm1, r32/m32 = VEX.128.66.0F3A.WIG 17 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_EXTRACTPS( vbi, pfx, delta, True/*isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0x18:
+      /* VINSERTF128 r/m, rV, rD
+         ::: rD = insertinto(a lane in rV, 128 bits from r/m) */
+      /* VINSERTF128 = VEX.NDS.256.66.0F3A.W0 18 /r ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   ib    = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp t128  = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            assign(t128, getXMMReg(rE));
+            ib = getUChar(delta);
+            DIP("vinsertf128 $%u,%s,%s,%s\n",
+                ib, nameXMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign(t128, loadLE(Ity_V128, mkexpr(addr)));
+            delta += alen;
+            ib = getUChar(delta);
+            DIP("vinsertf128 $%u,%s,%s,%s\n",
+                ib, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+         }
+         delta++;
+         putYMMRegLane128(rG, 0,   getYMMRegLane128(rV, 0));
+         putYMMRegLane128(rG, 1,   getYMMRegLane128(rV, 1));
+         putYMMRegLane128(rG, ib & 1, mkexpr(t128));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x19:
+     /* VEXTRACTF128 $lane_no, rS, r/m
+        ::: r/m:V128 = a lane of rS:V256 (RM format) */
+     /* VEXTRACTF128 = VEX.256.66.0F3A.W0 19 /r ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   ib    = 0;
+         UInt   rS    = gregOfRexRM(pfx, modrm);
+         IRTemp t128  = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rD = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            ib = getUChar(delta);
+            assign(t128, getYMMRegLane128(rS, ib & 1));
+            putYMMRegLoAndZU(rD, mkexpr(t128));
+            DIP("vextractf128 $%u,%s,%s\n",
+                ib, nameXMMReg(rS), nameYMMReg(rD));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            ib = getUChar(delta);
+            assign(t128, getYMMRegLane128(rS, ib & 1));
+            storeLE(mkexpr(addr), mkexpr(t128));
+            DIP("vextractf128 $%u,%s,%s\n",
+                ib, nameYMMReg(rS), dis_buf);
+         }
+         delta++;
+         /* doesn't use vvvv */
+         goto decode_success;
+      }
+      break;
+
+   case 0x20:
+      /* VPINSRB r32/m8, xmm2, xmm1 = VEX.NDS.128.66.0F3A.W0 20 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm  = getUChar(delta);
+         UInt   rG     = gregOfRexRM(pfx, modrm);
+         UInt   rV     = getVexNvvvv(pfx);
+         Int    imm8;
+         IRTemp src_u8 = newTemp(Ity_I8);
+
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8 = (Int)(getUChar(delta+1) & 15);
+            assign( src_u8, unop(Iop_32to8, getIReg32( rE )) );
+            delta += 1+1;
+            DIP( "vpinsrb $%d,%s,%s,%s\n",
+                 imm8, nameIReg32(rE), nameXMMReg(rV), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8 = (Int)(getUChar(delta+alen) & 15);
+            assign( src_u8, loadLE( Ity_I8, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "vpinsrb $%d,%s,%s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rV ));
+         IRTemp res_vec = math_PINSRB_128( src_vec, src_u8, imm8 );
+         putYMMRegLoAndZU( rG, mkexpr(res_vec) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x21:
+      /* VINSERTPS imm8, xmm3/m32, xmm2, xmm1
+         = VEX.NDS.128.66.0F3A.WIG 21 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         UInt   imm8;
+         IRTemp d2ins = newTemp(Ity_I32); /* comes from the E part */
+         const IRTemp inval = IRTemp_INVALID;
+
+         if ( epartIsReg( modrm ) ) {
+            UInt   rE = eregOfRexRM(pfx, modrm);
+            IRTemp vE = newTemp(Ity_V128);
+            assign( vE, getXMMReg(rE) );
+            IRTemp dsE[4] = { inval, inval, inval, inval };
+            breakupV128to32s( vE, &dsE[3], &dsE[2], &dsE[1], &dsE[0] );
+            imm8 = getUChar(delta+1);
+            d2ins = dsE[(imm8 >> 6) & 3]; /* "imm8_count_s" */
+            delta += 1+1;
+            DIP( "insertps $%u, %s,%s\n",
+                 imm8, nameXMMReg(rE), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign( d2ins, loadLE( Ity_I32, mkexpr(addr) ) );
+            imm8 = getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "insertps $%u, %s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rG) );
+         }
+
+         IRTemp vV = newTemp(Ity_V128);
+         assign( vV, getXMMReg(rV) );
+
+         putYMMRegLoAndZU( rG, mkexpr(math_INSERTPS( vV, d2ins, imm8 )) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x22:
+      /* VPINSRD r32/m32, xmm2, xmm1 = VEX.NDS.128.66.0F3A.W0 22 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         Int    imm8_10;
+         IRTemp src_u32 = newTemp(Ity_I32);
+
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8_10 = (Int)(getUChar(delta+1) & 3);
+            assign( src_u32, getIReg32( rE ) );
+            delta += 1+1;
+            DIP( "vpinsrd $%d,%s,%s,%s\n",
+                 imm8_10, nameIReg32(rE), nameXMMReg(rV), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8_10 = (Int)(getUChar(delta+alen) & 3);
+            assign( src_u32, loadLE( Ity_I32, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "vpinsrd $%d,%s,%s,%s\n", 
+                 imm8_10, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rV ));
+         IRTemp res_vec = math_PINSRD_128( src_vec, src_u32, imm8_10 );
+         putYMMRegLoAndZU( rG, mkexpr(res_vec) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPINSRQ r64/m64, xmm2, xmm1 = VEX.NDS.128.66.0F3A.W1 22 /r ib */
+      if (have66noF2noF3(pfx)
+          && 0==getVexL(pfx)/*128*/ && 1==getRexW(pfx)/*W1*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         Int    imm8_0;
+         IRTemp src_u64 = newTemp(Ity_I64);
+
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8_0 = (Int)(getUChar(delta+1) & 1);
+            assign( src_u64, getIReg64( rE ) );
+            delta += 1+1;
+            DIP( "vpinsrq $%d,%s,%s,%s\n",
+                 imm8_0, nameIReg64(rE), nameXMMReg(rV), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8_0 = (Int)(getUChar(delta+alen) & 1);
+            assign( src_u64, loadLE( Ity_I64, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "vpinsrd $%d,%s,%s,%s\n", 
+                 imm8_0, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rV ));
+         IRTemp res_vec = math_PINSRQ_128( src_vec, src_u64, imm8_0 );
+         putYMMRegLoAndZU( rG, mkexpr(res_vec) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x38:
+      /* VINSERTI128 r/m, rV, rD
+         ::: rD = insertinto(a lane in rV, 128 bits from r/m) */
+      /* VINSERTI128 = VEX.NDS.256.66.0F3A.W0 38 /r ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   ib    = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp t128  = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            assign(t128, getXMMReg(rE));
+            ib = getUChar(delta);
+            DIP("vinserti128 $%u,%s,%s,%s\n",
+                ib, nameXMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            assign(t128, loadLE(Ity_V128, mkexpr(addr)));
+            delta += alen;
+            ib = getUChar(delta);
+            DIP("vinserti128 $%u,%s,%s,%s\n",
+                ib, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+         }
+         delta++;
+         putYMMRegLane128(rG, 0,   getYMMRegLane128(rV, 0));
+         putYMMRegLane128(rG, 1,   getYMMRegLane128(rV, 1));
+         putYMMRegLane128(rG, ib & 1, mkexpr(t128));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x39:
+      /* VEXTRACTI128 $lane_no, rS, r/m
+         ::: r/m:V128 = a lane of rS:V256 (RM format) */
+      /* VEXTRACTI128 = VEX.256.66.0F3A.W0 39 /r ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   ib    = 0;
+         UInt   rS    = gregOfRexRM(pfx, modrm);
+         IRTemp t128  = newTemp(Ity_V128);
+         if (epartIsReg(modrm)) {
+            UInt rD = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            ib = getUChar(delta);
+            assign(t128, getYMMRegLane128(rS, ib & 1));
+            putYMMRegLoAndZU(rD, mkexpr(t128));
+            DIP("vextracti128 $%u,%s,%s\n",
+                ib, nameXMMReg(rS), nameYMMReg(rD));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            ib = getUChar(delta);
+            assign(t128, getYMMRegLane128(rS, ib & 1));
+            storeLE(mkexpr(addr), mkexpr(t128));
+            DIP("vextracti128 $%u,%s,%s\n",
+                ib, nameYMMReg(rS), dis_buf);
+         }
+         delta++;
+         /* doesn't use vvvv */
+         goto decode_success;
+      }
+      break;
+
+   case 0x40:
+      /* VDPPS imm8, xmm3/m128,xmm2,xmm1 = VEX.NDS.128.66.0F3A.WIG 40 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm   = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+         IRTemp dst_vec = newTemp(Ity_V128);
+         Int    imm8;
+         if (epartIsReg( modrm )) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8 = (Int)getUChar(delta+1);
+            assign( dst_vec, getXMMReg( rE ) );
+            delta += 1+1;
+            DIP( "vdpps $%d,%s,%s,%s\n",
+                 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8 = (Int)getUChar(delta+alen);
+            assign( dst_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "vdpps $%d,%s,%s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rV ));
+         IRTemp res_vec = math_DPPS_128( src_vec, dst_vec, imm8 );
+         putYMMRegLoAndZU( rG, mkexpr(res_vec) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VDPPS imm8, ymm3/m128,ymm2,ymm1 = VEX.NDS.256.66.0F3A.WIG 40 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm   = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+         IRTemp dst_vec = newTemp(Ity_V256);
+         Int    imm8;
+         if (epartIsReg( modrm )) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8 = (Int)getUChar(delta+1);
+            assign( dst_vec, getYMMReg( rE ) );
+            delta += 1+1;
+            DIP( "vdpps $%d,%s,%s,%s\n",
+                 imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8 = (Int)getUChar(delta+alen);
+            assign( dst_vec, loadLE( Ity_V256, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "vdpps $%d,%s,%s,%s\n", 
+                 imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V256);
+         assign(src_vec, getYMMReg( rV ));
+         IRTemp s0, s1, d0, d1;
+         s0 = s1 = d0 = d1 = IRTemp_INVALID;
+         breakupV256toV128s( dst_vec, &d1, &d0 );
+         breakupV256toV128s( src_vec, &s1, &s0 );
+         putYMMReg( rG, binop( Iop_V128HLtoV256,
+                               mkexpr( math_DPPS_128(s1, d1, imm8) ),
+                               mkexpr( math_DPPS_128(s0, d0, imm8) ) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x41:
+      /* VDPPD imm8, xmm3/m128,xmm2,xmm1 = VEX.NDS.128.66.0F3A.WIG 41 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm   = getUChar(delta);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+         IRTemp dst_vec = newTemp(Ity_V128);
+         Int    imm8;
+         if (epartIsReg( modrm )) {
+            UInt rE = eregOfRexRM(pfx,modrm);
+            imm8 = (Int)getUChar(delta+1);
+            assign( dst_vec, getXMMReg( rE ) );
+            delta += 1+1;
+            DIP( "vdppd $%d,%s,%s,%s\n",
+                 imm8, nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            imm8 = (Int)getUChar(delta+alen);
+            assign( dst_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            delta += alen+1;
+            DIP( "vdppd $%d,%s,%s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+         }
+
+         IRTemp src_vec = newTemp(Ity_V128);
+         assign(src_vec, getXMMReg( rV ));
+         IRTemp res_vec = math_DPPD_128( src_vec, dst_vec, imm8 );
+         putYMMRegLoAndZU( rG, mkexpr(res_vec) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x42:
+      /* VMPSADBW imm8, xmm3/m128,xmm2,xmm1 */
+      /* VMPSADBW = VEX.NDS.128.66.0F3A.WIG 42 /r ib */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm   = getUChar(delta);
+         Int    imm8;
+         IRTemp src_vec = newTemp(Ity_V128);
+         IRTemp dst_vec = newTemp(Ity_V128);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+
+         assign( dst_vec, getXMMReg(rV) );
+  
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getXMMReg(rE) );
+            delta += 1+1;
+            DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
+                 nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            assign( src_vec, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
+                 dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+         }
+
+         putYMMRegLoAndZU( rG, mkexpr( math_MPSADBW_128(dst_vec,
+                                                        src_vec, imm8) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VMPSADBW imm8, ymm3/m256,ymm2,ymm1 */
+      /* VMPSADBW = VEX.NDS.256.66.0F3A.WIG 42 /r ib */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         UChar  modrm   = getUChar(delta);
+         Int    imm8;
+         IRTemp src_vec = newTemp(Ity_V256);
+         IRTemp dst_vec = newTemp(Ity_V256);
+         UInt   rG      = gregOfRexRM(pfx, modrm);
+         UInt   rV      = getVexNvvvv(pfx);
+         IRTemp sHi, sLo, dHi, dLo;
+         sHi = sLo = dHi = dLo = IRTemp_INVALID;
+
+         assign( dst_vec, getYMMReg(rV) );
+
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+
+            imm8 = (Int)getUChar(delta+1);
+            assign( src_vec, getYMMReg(rE) );
+            delta += 1+1;
+            DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
+                 nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG) );
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf,
+                             1/* imm8 is 1 byte after the amode */ );
+            assign( src_vec, loadLE( Ity_V256, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "vmpsadbw $%d, %s,%s,%s\n", imm8,
+                 dis_buf, nameYMMReg(rV), nameYMMReg(rG) );
+         }
+
+         breakupV256toV128s( dst_vec, &dHi, &dLo );
+         breakupV256toV128s( src_vec, &sHi, &sLo );
+         putYMMReg( rG, binop( Iop_V128HLtoV256,
+                               mkexpr( math_MPSADBW_128(dHi, sHi, imm8 >> 3) ),
+                               mkexpr( math_MPSADBW_128(dLo, sLo, imm8) ) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x44:
+      /* VPCLMULQDQ imm8, xmm3/m128,xmm2,xmm1 */
+      /* VPCLMULQDQ = VEX.NDS.128.66.0F3A.WIG 44 /r ib */
+      /* 66 0F 3A 44 /r ib = PCLMULQDQ xmm1, xmm2/m128, imm8
+       * Carry-less multiplication of selected XMM quadwords into XMM
+       * registers (a.k.a multiplication of polynomials over GF(2))
+       */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         UChar  modrm = getUChar(delta);
+         Int imm8;
+         IRTemp sV    = newTemp(Ity_V128);
+         IRTemp dV    = newTemp(Ity_V128);
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+
+         assign( dV, getXMMReg(rV) );
+  
+         if ( epartIsReg( modrm ) ) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            imm8 = (Int)getUChar(delta+1);
+            assign( sV, getXMMReg(rE) );
+            delta += 1+1;
+            DIP( "vpclmulqdq $%d, %s,%s,%s\n", imm8,
+                 nameXMMReg(rE), nameXMMReg(rV), nameXMMReg(rG) );    
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 
+                             1/* imm8 is 1 byte after the amode */ );
+            assign( sV, loadLE( Ity_V128, mkexpr(addr) ) );
+            imm8 = (Int)getUChar(delta+alen);
+            delta += alen+1;
+            DIP( "vpclmulqdq $%d, %s,%s,%s\n", 
+                 imm8, dis_buf, nameXMMReg(rV), nameXMMReg(rG) );
+         }
+
+         putYMMRegLoAndZU( rG, mkexpr( math_PCLMULQDQ(dV, sV, imm8) ) );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x46:
+      /* VPERM2I128 imm8, ymm3/m256, ymm2, ymm1 = VEX.NDS.66.0F3A.W0 46 /r ib */
+      if (have66noF2noF3(pfx)
+          && 1==getVexL(pfx)/*256*/ && 0==getRexW(pfx)/*W0*/) {
+         UChar  modrm = getUChar(delta);
+         UInt   imm8  = 0;
+         UInt   rG    = gregOfRexRM(pfx, modrm);
+         UInt   rV    = getVexNvvvv(pfx);
+         IRTemp s00   = newTemp(Ity_V128);
+         IRTemp s01   = newTemp(Ity_V128);
+         IRTemp s10   = newTemp(Ity_V128);
+         IRTemp s11   = newTemp(Ity_V128);
+         assign(s00, getYMMRegLane128(rV, 0));
+         assign(s01, getYMMRegLane128(rV, 1));
+         if (epartIsReg(modrm)) {
+            UInt rE = eregOfRexRM(pfx, modrm);
+            delta += 1;
+            imm8 = getUChar(delta);
+            DIP("vperm2i128 $%u,%s,%s,%s\n",
+                imm8, nameYMMReg(rE), nameYMMReg(rV), nameYMMReg(rG));
+            assign(s10, getYMMRegLane128(rE, 0));
+            assign(s11, getYMMRegLane128(rE, 1));
+         } else {
+            addr = disAMode( &alen, vbi, pfx, delta, dis_buf, 1 );
+            delta += alen;
+            imm8 = getUChar(delta);
+            DIP("vperm2i128 $%u,%s,%s,%s\n",
+                imm8, dis_buf, nameYMMReg(rV), nameYMMReg(rG));
+            assign(s10, loadLE(Ity_V128, binop(Iop_Add64,
+                                               mkexpr(addr), mkU64(0))));
+            assign(s11, loadLE(Ity_V128, binop(Iop_Add64,
+                                               mkexpr(addr), mkU64(16))));
+         }
+         delta++;
+#        define SEL(_nn) (((_nn)==0) ? s00 : ((_nn)==1) ? s01 \
+                                           : ((_nn)==2) ? s10 : s11)
+         putYMMRegLane128(rG, 0, mkexpr(SEL((imm8 >> 0) & 3)));
+         putYMMRegLane128(rG, 1, mkexpr(SEL((imm8 >> 4) & 3)));
+#        undef SEL
+         if (imm8 & (1<<3)) putYMMRegLane128(rG, 0, mkV128(0));
+         if (imm8 & (1<<7)) putYMMRegLane128(rG, 1, mkV128(0));
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x4A:
+      /* VBLENDVPS xmmG, xmmE/memE, xmmV, xmmIS4
+         ::: xmmG:V128 = PBLEND(xmmE, xmmV, xmmIS4) (RMVR) */
+      /* VBLENDVPS = VEX.NDS.128.66.0F3A.WIG 4A /r /is4 */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VBLENDV_128 ( vbi, pfx, delta,
+                                   "vblendvps", 4, Iop_SarN32x4 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VBLENDVPS ymmG, ymmE/memE, ymmV, ymmIS4
+         ::: ymmG:V256 = PBLEND(ymmE, ymmV, ymmIS4) (RMVR) */
+      /* VBLENDVPS = VEX.NDS.256.66.0F3A.WIG 4A /r /is4 */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VBLENDV_256 ( vbi, pfx, delta,
+                                   "vblendvps", 4, Iop_SarN32x4 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x4B:
+      /* VBLENDVPD xmmG, xmmE/memE, xmmV, xmmIS4
+         ::: xmmG:V128 = PBLEND(xmmE, xmmV, xmmIS4) (RMVR) */
+      /* VBLENDVPD = VEX.NDS.128.66.0F3A.WIG 4B /r /is4 */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VBLENDV_128 ( vbi, pfx, delta,
+                                   "vblendvpd", 8, Iop_SarN64x2 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VBLENDVPD ymmG, ymmE/memE, ymmV, ymmIS4
+         ::: ymmG:V256 = PBLEND(ymmE, ymmV, ymmIS4) (RMVR) */
+      /* VBLENDVPD = VEX.NDS.256.66.0F3A.WIG 4B /r /is4 */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VBLENDV_256 ( vbi, pfx, delta,
+                                   "vblendvpd", 8, Iop_SarN64x2 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x4C:
+      /* VPBLENDVB xmmG, xmmE/memE, xmmV, xmmIS4
+         ::: xmmG:V128 = PBLEND(xmmE, xmmV, xmmIS4) (RMVR) */
+      /* VPBLENDVB = VEX.NDS.128.66.0F3A.WIG 4C /r /is4 */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_VBLENDV_128 ( vbi, pfx, delta,
+                                   "vpblendvb", 1, Iop_SarN8x16 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      /* VPBLENDVB ymmG, ymmE/memE, ymmV, ymmIS4
+         ::: ymmG:V256 = PBLEND(ymmE, ymmV, ymmIS4) (RMVR) */
+      /* VPBLENDVB = VEX.NDS.256.66.0F3A.WIG 4C /r /is4 */
+      if (have66noF2noF3(pfx) && 1==getVexL(pfx)/*256*/) {
+         delta = dis_VBLENDV_256 ( vbi, pfx, delta,
+                                   "vpblendvb", 1, Iop_SarN8x16 );
+         *uses_vvvv = True;
+         goto decode_success;
+      }
+      break;
+
+   case 0x60:
+   case 0x61:
+   case 0x62:
+   case 0x63:
+      /* VEX.128.66.0F3A.WIG 63 /r ib = VPCMPISTRI imm8, xmm2/m128, xmm1
+         VEX.128.66.0F3A.WIG 62 /r ib = VPCMPISTRM imm8, xmm2/m128, xmm1
+         VEX.128.66.0F3A.WIG 61 /r ib = VPCMPESTRI imm8, xmm2/m128, xmm1
+         VEX.128.66.0F3A.WIG 60 /r ib = VPCMPESTRM imm8, xmm2/m128, xmm1
+         (selected special cases that actually occur in glibc,
+          not by any means a complete implementation.)
+      */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         Long delta0 = delta;
+         delta = dis_PCMPxSTRx( vbi, pfx, delta, True/*isAvx*/, opc );
+         if (delta > delta0) goto decode_success;
+         /* else fall though; dis_PCMPxSTRx failed to decode it */
+      }
+      break;
+
+   case 0xDF:
+      /* VAESKEYGENASSIST imm8, xmm2/m128, xmm1 = VEX.128.66.0F3A.WIG DF /r */
+      if (have66noF2noF3(pfx) && 0==getVexL(pfx)/*128*/) {
+         delta = dis_AESKEYGENASSIST( vbi, pfx, delta, True/*!isAvx*/ );
+         goto decode_success;
+      }
+      break;
+
+   case 0xF0:
+      /* RORX imm8, r/m32, r32a = VEX.LZ.F2.0F3A.W0 F0 /r /i */
+      /* RORX imm8, r/m64, r64a = VEX.LZ.F2.0F3A.W1 F0 /r /i */
+      if (haveF2no66noF3(pfx) && 0==getVexL(pfx)/*LZ*/ && !haveREX(pfx)) {
+         Int     size = getRexW(pfx) ? 8 : 4;
+         IRType  ty   = szToITy(size);
+         IRTemp  src  = newTemp(ty);
+         UChar   rm   = getUChar(delta);
+         UChar   imm8;
+
+         if (epartIsReg(rm)) {
+            imm8 = getUChar(delta+1);
+            assign( src, getIRegE(size,pfx,rm) );
+            DIP("rorx %d,%s,%s\n", imm8, nameIRegE(size,pfx,rm),
+                                   nameIRegG(size,pfx,rm));
+            delta += 2;
+         } else {
+            addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 );
+            imm8 = getUChar(delta+alen);
+            assign( src, loadLE(ty, mkexpr(addr)) );
+            DIP("rorx %d,%s,%s\n", imm8, dis_buf, nameIRegG(size,pfx,rm));
+            delta += alen + 1;
+         }
+         imm8 &= 8*size-1;
+
+         /* dst = (src >>u imm8) | (src << (size-imm8)) */
+         putIRegG( size, pfx, rm,
+                   imm8 == 0 ? mkexpr(src)
+                   : binop( mkSizedOp(ty,Iop_Or8),
+                            binop( mkSizedOp(ty,Iop_Shr8), mkexpr(src),
+                                   mkU8(imm8) ),
+                            binop( mkSizedOp(ty,Iop_Shl8), mkexpr(src),
+                                   mkU8(8*size-imm8) ) ) );
+         /* Flags aren't modified.  */
+         goto decode_success;
+      }
+      break;
+
+   default:
+      break;
+
+   }
+
+  //decode_failure:
+   return deltaIN;
+
+  decode_success:
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- Disassemble a single instruction                     ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction is
+   located in host memory at &guest_code[delta]. */
+   
+static
+DisResult disInstr_AMD64_WRK ( 
+             /*OUT*/Bool* expect_CAS,
+             Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+             Bool         resteerCisOk,
+             void*        callback_opaque,
+             Long         delta64,
+             const VexArchInfo* archinfo,
+             const VexAbiInfo*  vbi,
+             Bool         sigill_diag
+          )
+{
+   IRTemp    t1, t2;
+   UChar     pre;
+   Int       n, n_prefixes;
+   DisResult dres;
+
+   /* The running delta */
+   Long delta = delta64;
+
+   /* Holds eip at the start of the insn, so that we can print
+      consistent error messages for unimplemented insns. */
+   Long delta_start = delta;
+
+   /* sz denotes the nominal data-op size of the insn; we change it to
+      2 if an 0x66 prefix is seen and 8 if REX.W is 1.  In case of
+      conflict REX.W takes precedence. */
+   Int sz = 4;
+
+   /* pfx holds the summary of prefixes. */
+   Prefix pfx = PFX_EMPTY;
+
+   /* Holds the computed opcode-escape indication. */
+   Escape esc = ESC_NONE;
+
+   /* Set result defaults. */
+   dres.whatNext    = Dis_Continue;
+   dres.len         = 0;
+   dres.continueAt  = 0;
+   dres.jk_StopHere = Ijk_INVALID;
+   *expect_CAS = False;
+
+   vassert(guest_RIP_next_assumed == 0);
+   vassert(guest_RIP_next_mustcheck == False);
+
+   t1 = t2 = IRTemp_INVALID; 
+
+   DIP("\t0x%llx:  ", guest_RIP_bbstart+delta);
+
+   /* Spot "Special" instructions (see comment at top of file). */
+   {
+      const UChar* code = guest_code + delta;
+      /* Spot the 16-byte preamble:
+         48C1C703   rolq $3,  %rdi
+         48C1C70D   rolq $13, %rdi
+         48C1C73D   rolq $61, %rdi
+         48C1C733   rolq $51, %rdi
+      */
+      if (code[ 0] == 0x48 && code[ 1] == 0xC1 && code[ 2] == 0xC7 
+                                               && code[ 3] == 0x03 &&
+          code[ 4] == 0x48 && code[ 5] == 0xC1 && code[ 6] == 0xC7 
+                                               && code[ 7] == 0x0D &&
+          code[ 8] == 0x48 && code[ 9] == 0xC1 && code[10] == 0xC7 
+                                               && code[11] == 0x3D &&
+          code[12] == 0x48 && code[13] == 0xC1 && code[14] == 0xC7 
+                                               && code[15] == 0x33) {
+         /* Got a "Special" instruction preamble.  Which one is it? */
+         if (code[16] == 0x48 && code[17] == 0x87 
+                              && code[18] == 0xDB /* xchgq %rbx,%rbx */) {
+            /* %RDX = client_request ( %RAX ) */
+            DIP("%%rdx = client_request ( %%rax )\n");
+            delta += 19;
+            jmp_lit(&dres, Ijk_ClientReq, guest_RIP_bbstart+delta);
+            vassert(dres.whatNext == Dis_StopHere);
+            goto decode_success;
+         }
+         else
+         if (code[16] == 0x48 && code[17] == 0x87 
+                              && code[18] == 0xC9 /* xchgq %rcx,%rcx */) {
+            /* %RAX = guest_NRADDR */
+            DIP("%%rax = guest_NRADDR\n");
+            delta += 19;
+            putIRegRAX(8, IRExpr_Get( OFFB_NRADDR, Ity_I64 ));
+            goto decode_success;
+         }
+         else
+         if (code[16] == 0x48 && code[17] == 0x87 
+                              && code[18] == 0xD2 /* xchgq %rdx,%rdx */) {
+            /* call-noredir *%RAX */
+            DIP("call-noredir *%%rax\n");
+            delta += 19;
+            t1 = newTemp(Ity_I64);
+            assign(t1, getIRegRAX(8));
+            t2 = newTemp(Ity_I64);
+            assign(t2, binop(Iop_Sub64, getIReg64(R_RSP), mkU64(8)));
+            putIReg64(R_RSP, mkexpr(t2));
+            storeLE( mkexpr(t2), mkU64(guest_RIP_bbstart+delta));
+            jmp_treg(&dres, Ijk_NoRedir, t1);
+            vassert(dres.whatNext == Dis_StopHere);
+            goto decode_success;
+         }
+         else
+         if (code[16] == 0x48 && code[17] == 0x87
+                              && code[18] == 0xff /* xchgq %rdi,%rdi */) {
+           /* IR injection */
+            DIP("IR injection\n");
+            vex_inject_ir(irsb, Iend_LE);
+
+            // Invalidate the current insn. The reason is that the IRop we're
+            // injecting here can change. In which case the translation has to
+            // be redone. For ease of handling, we simply invalidate all the
+            // time.
+            stmt(IRStmt_Put(OFFB_CMSTART, mkU64(guest_RIP_curr_instr)));
+            stmt(IRStmt_Put(OFFB_CMLEN,   mkU64(19)));
+   
+            delta += 19;
+
+            stmt( IRStmt_Put( OFFB_RIP, mkU64(guest_RIP_bbstart + delta) ) );
+            dres.whatNext    = Dis_StopHere;
+            dres.jk_StopHere = Ijk_InvalICache;
+            goto decode_success;
+         }
+         /* We don't know what it is. */
+         goto decode_failure;
+         /*NOTREACHED*/
+      }
+   }
+
+   /* Eat prefixes, summarising the result in pfx and sz, and rejecting
+      as many invalid combinations as possible. */
+   n_prefixes = 0;
+   while (True) {
+      if (n_prefixes > 7) goto decode_failure;
+      pre = getUChar(delta);
+      switch (pre) {
+         case 0x66: pfx |= PFX_66; break;
+         case 0x67: pfx |= PFX_ASO; break;
+         case 0xF2: pfx |= PFX_F2; break;
+         case 0xF3: pfx |= PFX_F3; break;
+         case 0xF0: pfx |= PFX_LOCK; *expect_CAS = True; break;
+         case 0x2E: pfx |= PFX_CS; break;
+         case 0x3E: pfx |= PFX_DS; break;
+         case 0x26: pfx |= PFX_ES; break;
+         case 0x64: pfx |= PFX_FS; break;
+         case 0x65: pfx |= PFX_GS; break;
+         case 0x36: pfx |= PFX_SS; break;
+         case 0x40 ... 0x4F:
+            pfx |= PFX_REX;
+            if (pre & (1<<3)) pfx |= PFX_REXW;
+            if (pre & (1<<2)) pfx |= PFX_REXR;
+            if (pre & (1<<1)) pfx |= PFX_REXX;
+            if (pre & (1<<0)) pfx |= PFX_REXB;
+            break;
+         default: 
+            goto not_a_legacy_prefix;
+      }
+      n_prefixes++;
+      delta++;
+   }
+
+   not_a_legacy_prefix:
+   /* We've used up all the non-VEX prefixes.  Parse and validate a
+      VEX prefix if that's appropriate. */
+   if (archinfo->hwcaps & VEX_HWCAPS_AMD64_AVX) {
+      /* Used temporarily for holding VEX prefixes. */
+      UChar vex0 = getUChar(delta);
+      if (vex0 == 0xC4) {
+         /* 3-byte VEX */
+         UChar vex1 = getUChar(delta+1);
+         UChar vex2 = getUChar(delta+2);
+         delta += 3;
+         pfx |= PFX_VEX;
+         /* Snarf contents of byte 1 */
+         /* R */ pfx |= (vex1 & (1<<7)) ? 0 : PFX_REXR;
+         /* X */ pfx |= (vex1 & (1<<6)) ? 0 : PFX_REXX;
+         /* B */ pfx |= (vex1 & (1<<5)) ? 0 : PFX_REXB;
+         /* m-mmmm */
+         switch (vex1 & 0x1F) {
+            case 1: esc = ESC_0F;   break;
+            case 2: esc = ESC_0F38; break;
+            case 3: esc = ESC_0F3A; break;
+            /* Any other m-mmmm field will #UD */
+            default: goto decode_failure;
+         }
+         /* Snarf contents of byte 2 */
+         /* W */    pfx |= (vex2 & (1<<7)) ? PFX_REXW : 0;
+         /* ~v3 */  pfx |= (vex2 & (1<<6)) ? 0 : PFX_VEXnV3;
+         /* ~v2 */  pfx |= (vex2 & (1<<5)) ? 0 : PFX_VEXnV2;
+         /* ~v1 */  pfx |= (vex2 & (1<<4)) ? 0 : PFX_VEXnV1;
+         /* ~v0 */  pfx |= (vex2 & (1<<3)) ? 0 : PFX_VEXnV0;
+         /* L */    pfx |= (vex2 & (1<<2)) ? PFX_VEXL : 0;
+         /* pp */
+         switch (vex2 & 3) {
+            case 0: break;
+            case 1: pfx |= PFX_66; break;
+            case 2: pfx |= PFX_F3; break;
+            case 3: pfx |= PFX_F2; break;
+            default: vassert(0);
+         }
+      }
+      else if (vex0 == 0xC5) {
+         /* 2-byte VEX */
+         UChar vex1 = getUChar(delta+1);
+         delta += 2;
+         pfx |= PFX_VEX;
+         /* Snarf contents of byte 1 */
+         /* R */    pfx |= (vex1 & (1<<7)) ? 0 : PFX_REXR;
+         /* ~v3 */  pfx |= (vex1 & (1<<6)) ? 0 : PFX_VEXnV3;
+         /* ~v2 */  pfx |= (vex1 & (1<<5)) ? 0 : PFX_VEXnV2;
+         /* ~v1 */  pfx |= (vex1 & (1<<4)) ? 0 : PFX_VEXnV1;
+         /* ~v0 */  pfx |= (vex1 & (1<<3)) ? 0 : PFX_VEXnV0;
+         /* L */    pfx |= (vex1 & (1<<2)) ? PFX_VEXL : 0;
+         /* pp */
+         switch (vex1 & 3) {
+            case 0: break;
+            case 1: pfx |= PFX_66; break;
+            case 2: pfx |= PFX_F3; break;
+            case 3: pfx |= PFX_F2; break;
+            default: vassert(0);
+         }
+         /* implied: */
+         esc = ESC_0F;
+      }
+      /* Can't have both VEX and REX */
+      if ((pfx & PFX_VEX) && (pfx & PFX_REX))
+         goto decode_failure; /* can't have both */
+   }
+
+   /* Dump invalid combinations */
+   n = 0;
+   if (pfx & PFX_F2) n++;
+   if (pfx & PFX_F3) n++;
+   if (n > 1) 
+      goto decode_failure; /* can't have both */
+
+   n = 0;
+   if (pfx & PFX_CS) n++;
+   if (pfx & PFX_DS) n++;
+   if (pfx & PFX_ES) n++;
+   if (pfx & PFX_FS) n++;
+   if (pfx & PFX_GS) n++;
+   if (pfx & PFX_SS) n++;
+   if (n > 1) 
+      goto decode_failure; /* multiple seg overrides == illegal */
+
+   /* We have a %fs prefix.  Reject it if there's no evidence in 'vbi'
+      that we should accept it. */
+   if ((pfx & PFX_FS) && !vbi->guest_amd64_assume_fs_is_const)
+      goto decode_failure;
+
+   /* Ditto for %gs prefixes. */
+   if ((pfx & PFX_GS) && !vbi->guest_amd64_assume_gs_is_const)
+      goto decode_failure;
+
+   /* Set up sz. */
+   sz = 4;
+   if (pfx & PFX_66) sz = 2;
+   if ((pfx & PFX_REX) && (pfx & PFX_REXW)) sz = 8;
+
+   /* Now we should be looking at the primary opcode byte or the
+      leading escapes.  Check that any LOCK prefix is actually
+      allowed. */
+   if (haveLOCK(pfx)) {
+      if (can_be_used_with_LOCK_prefix( &guest_code[delta] )) {
+         DIP("lock ");
+      } else {
+         *expect_CAS = False;
+         goto decode_failure;
+      }
+   }
+
+   /* Eat up opcode escape bytes, until we're really looking at the
+      primary opcode byte.  But only if there's no VEX present. */
+   if (!(pfx & PFX_VEX)) {
+      vassert(esc == ESC_NONE);
+      pre = getUChar(delta);
+      if (pre == 0x0F) {
+         delta++;
+         pre = getUChar(delta);
+         switch (pre) {
+            case 0x38: esc = ESC_0F38; delta++; break;
+            case 0x3A: esc = ESC_0F3A; delta++; break;
+            default:   esc = ESC_0F; break;
+         }
+      }
+   }
+
+   /* So now we're really really looking at the primary opcode
+      byte. */
+   Long delta_at_primary_opcode = delta;
+
+   if (!(pfx & PFX_VEX)) {
+      /* Handle non-VEX prefixed instructions.  "Legacy" (non-VEX) SSE
+         instructions preserve the upper 128 bits of YMM registers;
+         iow we can simply ignore the presence of the upper halves of
+         these registers. */
+      switch (esc) {
+         case ESC_NONE:
+            delta = dis_ESC_NONE( &dres, expect_CAS,
+                                  resteerOkFn, resteerCisOk, callback_opaque,
+                                  archinfo, vbi, pfx, sz, delta );
+            break;
+         case ESC_0F:
+            delta = dis_ESC_0F  ( &dres, expect_CAS,
+                                  resteerOkFn, resteerCisOk, callback_opaque,
+                                  archinfo, vbi, pfx, sz, delta );
+            break;
+         case ESC_0F38:
+            delta = dis_ESC_0F38( &dres,
+                                  resteerOkFn, resteerCisOk, callback_opaque,
+                                  archinfo, vbi, pfx, sz, delta );
+            break;
+         case ESC_0F3A:
+            delta = dis_ESC_0F3A( &dres,
+                                  resteerOkFn, resteerCisOk, callback_opaque,
+                                  archinfo, vbi, pfx, sz, delta );
+            break;
+         default:
+            vassert(0);
+      }
+   } else {
+      /* VEX prefixed instruction */
+      /* Sloppy Intel wording: "An instruction encoded with a VEX.128
+         prefix that loads a YMM register operand ..." zeroes out bits
+         128 and above of the register. */
+      Bool uses_vvvv = False;
+      switch (esc) {
+         case ESC_0F:
+            delta = dis_ESC_0F__VEX ( &dres, &uses_vvvv,
+                                      resteerOkFn, resteerCisOk,
+                                      callback_opaque,
+                                      archinfo, vbi, pfx, sz, delta );
+            break;
+         case ESC_0F38:
+            delta = dis_ESC_0F38__VEX ( &dres, &uses_vvvv,
+                                        resteerOkFn, resteerCisOk,
+                                        callback_opaque,
+                                        archinfo, vbi, pfx, sz, delta );
+            break;
+         case ESC_0F3A:
+            delta = dis_ESC_0F3A__VEX ( &dres, &uses_vvvv,
+                                        resteerOkFn, resteerCisOk,
+                                        callback_opaque,
+                                        archinfo, vbi, pfx, sz, delta );
+            break;
+         case ESC_NONE:
+            /* The presence of a VEX prefix, by Intel definition,
+               always implies at least an 0F escape. */
+            goto decode_failure;
+         default:
+            vassert(0);
+      }
+      /* If the insn doesn't use VEX.vvvv then it must be all ones.
+         Check this. */
+      if (!uses_vvvv) {
+         if (getVexNvvvv(pfx) != 0)
+            goto decode_failure;
+      }
+   }
+
+   vassert(delta - delta_at_primary_opcode >= 0);
+   vassert(delta - delta_at_primary_opcode < 16/*let's say*/);
+
+   /* Use delta == delta_at_primary_opcode to denote decode failure.
+      This implies that any successful decode must use at least one
+      byte up. */
+   if (delta == delta_at_primary_opcode)
+      goto decode_failure;
+   else
+      goto decode_success; /* \o/ */
+
+#if 0 /* XYZZY */
+
+   /* ---------------------------------------------------- */
+   /* --- The SSE/SSE2 decoder.                        --- */
+   /* ---------------------------------------------------- */
+
+   /* What did I do to deserve SSE ?  Perhaps I was really bad in a
+      previous life? */
+
+   /* Note, this doesn't handle SSE3 right now.  All amd64s support
+      SSE2 as a minimum so there is no point distinguishing SSE1 vs
+      SSE2. */
+
+   insn = &guest_code[delta];
+
+   /* FXSAVE is spuriously at the start here only because it is
+      thusly placed in guest-x86/toIR.c. */
+
+   /* ------ SSE decoder main ------ */
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE decoder.                      --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSE2 decoder.                   --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE/SSE2 decoder.                 --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSE3 decoder.                   --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE3 decoder.                     --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSSE3 decoder.                  --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSSE3 decoder.                    --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSE4 decoder                    --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE4 decoder                      --- */
+   /* ---------------------------------------------------- */
+
+   /*after_sse_decoders:*/
+
+   /* Get the primary opcode. */
+   opc = getUChar(delta); delta++;
+
+   /* We get here if the current insn isn't SSE, or this CPU doesn't
+      support SSE. */
+
+   switch (opc) {
+
+   /* ------------------------ Control flow --------------- */
+
+   /* ------------------------ CWD/CDQ -------------------- */
+
+   /* ------------------------ FPU ops -------------------- */
+
+   /* ------------------------ INT ------------------------ */
+
+   case 0xCD: { /* INT imm8 */
+      IRJumpKind jk = Ijk_Boring;
+      if (have66orF2orF3(pfx)) goto decode_failure;
+      d64 = getUChar(delta); delta++;
+      switch (d64) {
+         case 32: jk = Ijk_Sys_int32; break;
+         default: goto decode_failure;
+      }
+      guest_RIP_next_mustcheck = True;
+      guest_RIP_next_assumed = guest_RIP_bbstart + delta;
+      jmp_lit(jk, guest_RIP_next_assumed);
+      /* It's important that all ArchRegs carry their up-to-date value
+         at this point.  So we declare an end-of-block here, which
+         forces any TempRegs caching ArchRegs to be flushed. */
+      vassert(dres.whatNext == Dis_StopHere);
+      DIP("int $0x%02x\n", (UInt)d64);
+      break;
+   }
+
+   /* ------------------------ Jcond, byte offset --------- */
+
+   /* ------------------------ IMUL ----------------------- */
+
+   /* ------------------------ MOV ------------------------ */
+
+   /* ------------------------ MOVx ------------------------ */
+
+   /* ------------------------ opl imm, A ----------------- */
+
+   /* ------------------------ opl Ev, Gv ----------------- */
+
+   /* ------------------------ opl Gv, Ev ----------------- */
+
+   /* ------------------------ POP ------------------------ */
+
+   /* ------------------------ PUSH ----------------------- */
+
+   /* ------ AE: SCAS variants ------ */
+
+   /* ------ A6, A7: CMPS variants ------ */
+
+   /* ------ AA, AB: STOS variants ------ */
+
+   /* ------ A4, A5: MOVS variants ------ */
+
+   /* ------------------------ XCHG ----------------------- */
+
+   /* ------------------------ IN / OUT ----------------------- */
+ 
+   /* ------------------------ (Grp1 extensions) ---------- */
+
+   /* ------------------------ (Grp2 extensions) ---------- */
+
+   /* ------------------------ (Grp3 extensions) ---------- */
+
+   /* ------------------------ (Grp4 extensions) ---------- */
+
+   /* ------------------------ (Grp5 extensions) ---------- */
+
+   /* ------------------------ Escapes to 2-byte opcodes -- */
+
+   case 0x0F: {
+      opc = getUChar(delta); delta++;
+      switch (opc) {
+
+      /* =-=-=-=-=-=-=-=-=- Grp8 =-=-=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- BSF/BSR -=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- BSWAP -=-=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- BT/BTS/BTR/BTC =-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- CMOV =-=-=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- CMPXCHG -=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- CPUID -=-=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- MOVZX, MOVSX =-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- MUL/IMUL =-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- NOPs =-=-=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- Jcond d32 -=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- PREFETCH =-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- RDTSC -=-=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- SETcc Eb =-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- SHLD/SHRD -=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- SYSCALL -=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- XADD -=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- SGDT and SIDT =-=-=-=-=-=-=-=-=-=-= */
+
+      /* =-=-=-=-=-=-=-=-=- unimp2 =-=-=-=-=-=-=-=-=-=-= */
+
+      default:
+         goto decode_failure;
+   } /* switch (opc) for the 2-byte opcodes */
+   goto decode_success;
+   } /* case 0x0F: of primary opcode */
+
+   /* ------------------------ ??? ------------------------ */
+#endif /* XYZZY */
+  
+     //default:
+  decode_failure:
+   /* All decode failures end up here. */
+   if (sigill_diag) {
+      vex_printf("vex amd64->IR: unhandled instruction bytes: "
+                 "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+                 (Int)getUChar(delta_start+0),
+                 (Int)getUChar(delta_start+1),
+                 (Int)getUChar(delta_start+2),
+                 (Int)getUChar(delta_start+3),
+                 (Int)getUChar(delta_start+4),
+                 (Int)getUChar(delta_start+5),
+                 (Int)getUChar(delta_start+6),
+                 (Int)getUChar(delta_start+7) );
+      vex_printf("vex amd64->IR:   REX=%d REX.W=%d REX.R=%d REX.X=%d REX.B=%d\n",
+                 haveREX(pfx) ? 1 : 0, getRexW(pfx), getRexR(pfx),
+                 getRexX(pfx), getRexB(pfx));
+      vex_printf("vex amd64->IR:   VEX=%d VEX.L=%d VEX.nVVVV=0x%x ESC=%s\n",
+                 haveVEX(pfx) ? 1 : 0, getVexL(pfx),
+                 getVexNvvvv(pfx),
+                 esc==ESC_NONE ? "NONE" :
+                   esc==ESC_0F ? "0F" :
+                   esc==ESC_0F38 ? "0F38" :
+                   esc==ESC_0F3A ? "0F3A" : "???");
+      vex_printf("vex amd64->IR:   PFX.66=%d PFX.F2=%d PFX.F3=%d\n",
+                 have66(pfx) ? 1 : 0, haveF2(pfx) ? 1 : 0,
+                 haveF3(pfx) ? 1 : 0);
+   }
+
+   /* Tell the dispatcher that this insn cannot be decoded, and so has
+      not been executed, and (is currently) the next to be executed.
+      RIP should be up-to-date since it made so at the start of each
+      insn, but nevertheless be paranoid and update it again right
+      now. */
+   stmt( IRStmt_Put( OFFB_RIP, mkU64(guest_RIP_curr_instr) ) );
+   jmp_lit(&dres, Ijk_NoDecode, guest_RIP_curr_instr);
+   vassert(dres.whatNext == Dis_StopHere);
+   dres.len = 0;
+   /* We also need to say that a CAS is not expected now, regardless
+      of what it might have been set to at the start of the function,
+      since the IR that we've emitted just above (to synthesis a
+      SIGILL) does not involve any CAS, and presumably no other IR has
+      been emitted for this (non-decoded) insn. */
+   *expect_CAS = False;
+   return dres;
+
+   //   } /* switch (opc) for the main (primary) opcode switch. */
+
+  decode_success:
+   /* All decode successes end up here. */
+   switch (dres.whatNext) {
+      case Dis_Continue:
+         stmt( IRStmt_Put( OFFB_RIP, mkU64(guest_RIP_bbstart + delta) ) );
+         break;
+      case Dis_ResteerU:
+      case Dis_ResteerC:
+         stmt( IRStmt_Put( OFFB_RIP, mkU64(dres.continueAt) ) );
+         break;
+      case Dis_StopHere:
+         break;
+      default:
+         vassert(0);
+   }
+
+   DIP("\n");
+   dres.len = toUInt(delta - delta_start);
+   return dres;
+}
+
+#undef DIP
+#undef DIS
+
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+DisResult disInstr_AMD64 ( IRSB*        irsb_IN,
+                           Bool         (*resteerOkFn) ( void*, Addr ),
+                           Bool         resteerCisOk,
+                           void*        callback_opaque,
+                           const UChar* guest_code_IN,
+                           Long         delta,
+                           Addr         guest_IP,
+                           VexArch      guest_arch,
+                           const VexArchInfo* archinfo,
+                           const VexAbiInfo*  abiinfo,
+                           VexEndness   host_endness_IN,
+                           Bool         sigill_diag_IN )
+{
+   Int       i, x1, x2;
+   Bool      expect_CAS, has_CAS;
+   DisResult dres;
+
+   /* Set globals (see top of this file) */
+   vassert(guest_arch == VexArchAMD64);
+   guest_code           = guest_code_IN;
+   irsb                 = irsb_IN;
+   host_endness         = host_endness_IN;
+   guest_RIP_curr_instr = guest_IP;
+   guest_RIP_bbstart    = guest_IP - delta;
+
+   /* We'll consult these after doing disInstr_AMD64_WRK. */
+   guest_RIP_next_assumed   = 0;
+   guest_RIP_next_mustcheck = False;
+
+   x1 = irsb_IN->stmts_used;
+   expect_CAS = False;
+   dres = disInstr_AMD64_WRK ( &expect_CAS, resteerOkFn,
+                               resteerCisOk,
+                               callback_opaque,
+                               delta, archinfo, abiinfo, sigill_diag_IN );
+   x2 = irsb_IN->stmts_used;
+   vassert(x2 >= x1);
+
+   /* If disInstr_AMD64_WRK tried to figure out the next rip, check it
+      got it right.  Failure of this assertion is serious and denotes
+      a bug in disInstr. */
+   if (guest_RIP_next_mustcheck 
+       && guest_RIP_next_assumed != guest_RIP_curr_instr + dres.len) {
+      vex_printf("\n");
+      vex_printf("assumed next %%rip = 0x%llx\n", 
+                 guest_RIP_next_assumed );
+      vex_printf(" actual next %%rip = 0x%llx\n", 
+                 guest_RIP_curr_instr + dres.len );
+      vpanic("disInstr_AMD64: disInstr miscalculated next %rip");
+   }
+
+   /* See comment at the top of disInstr_AMD64_WRK for meaning of
+      expect_CAS.  Here, we (sanity-)check for the presence/absence of
+      IRCAS as directed by the returned expect_CAS value. */
+   has_CAS = False;
+   for (i = x1; i < x2; i++) {
+      if (irsb_IN->stmts[i]->tag == Ist_CAS)
+         has_CAS = True;
+   }
+
+   if (expect_CAS != has_CAS) {
+      /* inconsistency detected.  re-disassemble the instruction so as
+         to generate a useful error message; then assert. */
+      vex_traceflags |= VEX_TRACE_FE;
+      dres = disInstr_AMD64_WRK ( &expect_CAS, resteerOkFn,
+                                  resteerCisOk,
+                                  callback_opaque,
+                                  delta, archinfo, abiinfo, sigill_diag_IN );
+      for (i = x1; i < x2; i++) {
+         vex_printf("\t\t");
+         ppIRStmt(irsb_IN->stmts[i]);
+         vex_printf("\n");
+      }
+      /* Failure of this assertion is serious and denotes a bug in
+         disInstr. */
+      vpanic("disInstr_AMD64: inconsistency in LOCK prefix handling");
+   }
+
+   return dres;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Unused stuff                                         ---*/
+/*------------------------------------------------------------*/
+
+// A potentially more Memcheck-friendly version of gen_LZCNT, if
+// this should ever be needed.
+//
+//static IRTemp gen_LZCNT ( IRType ty, IRTemp src )
+//{
+//   /* Scheme is simple: propagate the most significant 1-bit into all
+//      lower positions in the word.  This gives a word of the form
+//      0---01---1.  Now invert it, giving a word of the form
+//      1---10---0, then do a population-count idiom (to count the 1s,
+//      which is the number of leading zeroes, or the word size if the
+//      original word was 0.
+//   */
+//   Int i;
+//   IRTemp t[7];
+//   for (i = 0; i < 7; i++) {
+//      t[i] = newTemp(ty);
+//   }
+//   if (ty == Ity_I64) {
+//      assign(t[0], binop(Iop_Or64, mkexpr(src),
+//                                   binop(Iop_Shr64, mkexpr(src),  mkU8(1))));
+//      assign(t[1], binop(Iop_Or64, mkexpr(t[0]),
+//                                   binop(Iop_Shr64, mkexpr(t[0]), mkU8(2))));
+//      assign(t[2], binop(Iop_Or64, mkexpr(t[1]),
+//                                   binop(Iop_Shr64, mkexpr(t[1]), mkU8(4))));
+//      assign(t[3], binop(Iop_Or64, mkexpr(t[2]),
+//                                   binop(Iop_Shr64, mkexpr(t[2]), mkU8(8))));
+//      assign(t[4], binop(Iop_Or64, mkexpr(t[3]),
+//                                   binop(Iop_Shr64, mkexpr(t[3]), mkU8(16))));
+//      assign(t[5], binop(Iop_Or64, mkexpr(t[4]),
+//                                   binop(Iop_Shr64, mkexpr(t[4]), mkU8(32))));
+//      assign(t[6], unop(Iop_Not64, mkexpr(t[5])));
+//      return gen_POPCOUNT(ty, t[6]);
+//   }
+//   if (ty == Ity_I32) {
+//      assign(t[0], binop(Iop_Or32, mkexpr(src),
+//                                   binop(Iop_Shr32, mkexpr(src),  mkU8(1))));
+//      assign(t[1], binop(Iop_Or32, mkexpr(t[0]),
+//                                   binop(Iop_Shr32, mkexpr(t[0]), mkU8(2))));
+//      assign(t[2], binop(Iop_Or32, mkexpr(t[1]),
+//                                   binop(Iop_Shr32, mkexpr(t[1]), mkU8(4))));
+//      assign(t[3], binop(Iop_Or32, mkexpr(t[2]),
+//                                   binop(Iop_Shr32, mkexpr(t[2]), mkU8(8))));
+//      assign(t[4], binop(Iop_Or32, mkexpr(t[3]),
+//                                   binop(Iop_Shr32, mkexpr(t[3]), mkU8(16))));
+//      assign(t[5], unop(Iop_Not32, mkexpr(t[4])));
+//      return gen_POPCOUNT(ty, t[5]);
+//   }
+//   if (ty == Ity_I16) {
+//      assign(t[0], binop(Iop_Or16, mkexpr(src),
+//                                   binop(Iop_Shr16, mkexpr(src),  mkU8(1))));
+//      assign(t[1], binop(Iop_Or16, mkexpr(t[0]),
+//                                   binop(Iop_Shr16, mkexpr(t[0]), mkU8(2))));
+//      assign(t[2], binop(Iop_Or16, mkexpr(t[1]),
+//                                   binop(Iop_Shr16, mkexpr(t[1]), mkU8(4))));
+//      assign(t[3], binop(Iop_Or16, mkexpr(t[2]),
+//                                   binop(Iop_Shr16, mkexpr(t[2]), mkU8(8))));
+//      assign(t[4], unop(Iop_Not16, mkexpr(t[3])));
+//      return gen_POPCOUNT(ty, t[4]);
+//   }
+//   vassert(0);
+//}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end                                       guest_amd64_toIR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_arm64_defs.h b/VEX/priv/guest_arm64_defs.h
new file mode 100644
index 0000000..a3431f0
--- /dev/null
+++ b/VEX/priv/guest_arm64_defs.h
@@ -0,0 +1,258 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                guest_arm64_defs.h ---*/
+/*---------------------------------------------------------------*/
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2013 OpenWorks
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_GUEST_ARM64_DEFS_H
+#define __VEX_GUEST_ARM64_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "guest_generic_bb_to_IR.h"     // DisResult
+
+/*---------------------------------------------------------*/
+/*--- arm64 to IR conversion                            ---*/
+/*---------------------------------------------------------*/
+
+/* Convert one ARM64 insn to IR.  See the type DisOneInstrFn in
+   bb_to_IR.h. */
+extern
+DisResult disInstr_ARM64 ( IRSB*        irbb,
+                           Bool         (*resteerOkFn) ( void*, Addr ),
+                           Bool         resteerCisOk,
+                           void*        callback_opaque,
+                           const UChar* guest_code,
+                           Long         delta,
+                           Addr         guest_IP,
+                           VexArch      guest_arch,
+                           const VexArchInfo* archinfo,
+                           const VexAbiInfo*  abiinfo,
+                           VexEndness   host_endness,
+                           Bool         sigill_diag );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern
+IRExpr* guest_arm64_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts );
+
+/* Describes to the optimser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern 
+Bool guest_arm64_state_requires_precise_mem_exns ( Int, Int,
+                                                   VexRegisterUpdates );
+
+extern
+VexGuestLayout arm64Guest_layout;
+
+
+/*---------------------------------------------------------*/
+/*--- arm64 guest helpers                               ---*/
+/*---------------------------------------------------------*/
+
+/* --- CLEAN HELPERS --- */
+
+/* Calculate NZCV from the supplied thunk components, in the positions
+   they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
+   Returned bits 63:32 and 27:0 are zero. */
+extern 
+ULong arm64g_calculate_flags_nzcv ( ULong cc_op, ULong cc_dep1,
+                                    ULong cc_dep2, ULong cc_dep3 );
+
+/* Calculate the C flag from the thunk components, in the lowest bit
+   of the word (bit 0). */
+extern
+ULong arm64g_calculate_flag_c ( ULong cc_op, ULong cc_dep1,
+                                ULong cc_dep2, ULong cc_dep3 );
+
+//ZZ /* Calculate the V flag from the thunk components, in the lowest bit
+//ZZ    of the word (bit 0). */
+//ZZ extern 
+//ZZ UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
+//ZZ                              UInt cc_dep2, UInt cc_dep3 );
+//ZZ 
+/* Calculate the specified condition from the thunk components, in the
+   lowest bit of the word (bit 0). */
+extern 
+ULong arm64g_calculate_condition ( /* ARM64Condcode << 4 | cc_op */
+                                   ULong cond_n_op ,
+                                   ULong cc_dep1,
+                                   ULong cc_dep2, ULong cc_dep3 );
+
+//ZZ /* Calculate the QC flag from the thunk components, in the lowest bit
+//ZZ    of the word (bit 0). */
+//ZZ extern 
+//ZZ UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
+//ZZ                               UInt resR1, UInt resR2 );
+
+
+/* --- DIRTY HELPERS --- */
+
+extern ULong arm64g_dirtyhelper_MRS_CNTVCT_EL0 ( void );
+
+
+/*---------------------------------------------------------*/
+/*--- Condition code stuff                              ---*/
+/*---------------------------------------------------------*/
+
+/* Flag masks.  Defines positions of flag bits in the NZCV
+   register. */
+#define ARM64G_CC_SHIFT_N  31
+#define ARM64G_CC_SHIFT_Z  30
+#define ARM64G_CC_SHIFT_C  29
+#define ARM64G_CC_SHIFT_V  28
+//ZZ #define ARMG_CC_SHIFT_Q  27
+//ZZ 
+//ZZ #define ARMG_CC_MASK_N    (1 << ARMG_CC_SHIFT_N)
+//ZZ #define ARMG_CC_MASK_Z    (1 << ARMG_CC_SHIFT_Z)
+//ZZ #define ARMG_CC_MASK_C    (1 << ARMG_CC_SHIFT_C)
+//ZZ #define ARMG_CC_MASK_V    (1 << ARMG_CC_SHIFT_V)
+//ZZ #define ARMG_CC_MASK_Q    (1 << ARMG_CC_SHIFT_Q)
+
+/* Flag thunk descriptors.  A four-word thunk is used to record
+   details of the most recent flag-setting operation, so NZCV can
+   be computed later if needed.
+
+   The four words are:
+
+      CC_OP, which describes the operation.
+
+      CC_DEP1, CC_DEP2, CC_NDEP.  These are arguments to the
+         operation.  We want set up the mcx_masks in flag helper calls
+         involving these fields so that Memcheck "believes" that the
+         resulting flags are data-dependent on both CC_DEP1 and
+         CC_DEP2.  Hence the name DEP.
+
+   When building the thunk, it is always necessary to write words into
+   CC_DEP1/2 and NDEP, even if those args are not used given the CC_OP
+   field.  This is important because otherwise Memcheck could give
+   false positives as it does not understand the relationship between
+   the CC_OP field and CC_DEP1/2/NDEP, and so believes that the
+   definedness of the stored flags always depends on all 3 DEP values.
+
+   A summary of the field usages is:
+
+   OP                DEP1              DEP2              DEP3
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+   OP_COPY           curr_NZCV:28x0    unused            unused
+   OP_ADD32          argL              argR              unused
+   OP_ADD64          argL              argR              unused
+   OP_SUB32          argL              argR              unused
+   OP_SUB64          argL              argR              unused
+   OP_ADC32          argL              argR              63x0:old_C
+   OP_ADC64          argL              argR              63x0:old_C
+   OP_SBC32          argL              argR              63x0:old_C
+   OP_SBC64          argL              argR              63x0:old_C
+   OP_LOGIC32        result            unused            unused
+   OP_LOGIC64        result            unused            unused
+//ZZ    OP_MUL            result            unused            30x0:old_C:old_V
+//ZZ    OP_MULL           resLO32           resHI32           30x0:old_C:old_V
+//ZZ */
+
+enum {
+   ARM64G_CC_OP_COPY=0,   /* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0
+                             just copy DEP1 to output */
+
+   ARM64G_CC_OP_ADD32,    /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
+                             DEP3 = 0 */
+
+   ARM64G_CC_OP_ADD64,    /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
+                             DEP3 = 0 */
+
+   ARM64G_CC_OP_SUB32,    /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
+                             DEP3 = 0 */
+
+   ARM64G_CC_OP_SUB64,    /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
+                             DEP3 = 0 */
+
+   ARM64G_CC_OP_ADC32,    /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
+                             DEP3 = oldC (in LSB) */
+
+   ARM64G_CC_OP_ADC64,    /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
+                             DEP3 = oldC (in LSB) */
+
+   ARM64G_CC_OP_SBC32,    /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
+                             DEP3 = oldC (in LSB) */
+
+   ARM64G_CC_OP_SBC64,    /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
+                             DEP3 = oldC (in LSB) */
+
+   ARM64G_CC_OP_LOGIC32,  /* DEP1 = result, DEP2 = 0, DEP3 = 0 */
+   ARM64G_CC_OP_LOGIC64,  /* DEP1 = result, DEP2 = 0, DEP3 = 0 */
+
+//ZZ    ARMG_CC_OP_MUL,     /* DEP1 = result, DEP2 = 0, DEP3 = oldC:old_V
+//ZZ                           (in bits 1:0) */
+//ZZ 
+//ZZ    ARMG_CC_OP_MULL,    /* DEP1 = resLO32, DEP2 = resHI32, DEP3 = oldC:old_V
+//ZZ                           (in bits 1:0) */
+
+   ARM64G_CC_OP_NUMBER
+};
+
+/* XXXX because of the calling conventions for
+   arm64g_calculate_condition, all these OP values MUST be in the range
+   0 .. 15 only (viz, 4-bits). */
+
+
+
+/* Defines conditions which we can ask for */
+
+typedef
+   enum {
+      ARM64CondEQ = 0,  /* equal                         : Z=1 */
+      ARM64CondNE = 1,  /* not equal                     : Z=0 */
+
+      ARM64CondCS = 2,  /* >=u (higher or same) (aka HS) : C=1 */
+      ARM64CondCC = 3,  /* <u  (lower)          (aka LO) : C=0 */
+
+      ARM64CondMI = 4,  /* minus (negative)              : N=1 */
+      ARM64CondPL = 5,  /* plus (zero or +ve)            : N=0 */
+
+      ARM64CondVS = 6,  /* overflow                      : V=1 */
+      ARM64CondVC = 7,  /* no overflow                   : V=0 */
+
+      ARM64CondHI = 8,  /* >u   (higher)                 : C=1 && Z=0 */
+      ARM64CondLS = 9,  /* <=u  (lower or same)          : C=0 || Z=1 */
+
+      ARM64CondGE = 10, /* >=s (signed greater or equal) : N=V */
+      ARM64CondLT = 11, /* <s  (signed less than)        : N!=V */
+
+      ARM64CondGT = 12, /* >s  (signed greater)          : Z=0 && N=V */
+      ARM64CondLE = 13, /* <=s (signed less or equal)    : Z=1 || N!=V */
+
+      ARM64CondAL = 14, /* always (unconditional)        : 1 */
+      ARM64CondNV = 15  /* always (unconditional)        : 1 */
+   }
+   ARM64Condcode;
+
+#endif /* ndef __VEX_GUEST_ARM64_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                  guest_arm64_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_arm64_helpers.c b/VEX/priv/guest_arm64_helpers.c
new file mode 100644
index 0000000..e8dda64
--- /dev/null
+++ b/VEX/priv/guest_arm64_helpers.c
@@ -0,0 +1,1407 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             guest_arm64_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2013 OpenWorks
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_arm64.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_arm64_defs.h"
+
+
+/* This file contains helper functions for arm guest code.  Calls to
+   these functions are generated by the back end.  These calls are of
+   course in the host machine code and this file will be compiled to
+   host machine code, so that all makes sense.
+
+   Only change the signatures of these helper functions very
+   carefully.  If you change the signature here, you'll have to change
+   the parameters passed to it in the IR calls constructed by
+   guest_arm64_toIR.c.
+*/
+
+
+/* Set to 1 to get detailed profiling info about individual N, Z, C
+   and V flag evaluation. */
+#define PROFILE_NZCV_FLAGS 0
+
+#if PROFILE_NZCV_FLAGS
+
+static UInt tab_eval[ARM64G_CC_OP_NUMBER][16];
+static UInt initted = 0;
+static UInt tot_evals = 0;
+
+static void initCounts ( void )
+{
+   UInt i, j;
+   for (i = 0; i < ARM64G_CC_OP_NUMBER; i++) {
+      for (j = 0; j < 16; j++) {
+         tab_eval[i][j] = 0;
+      }
+   }
+   initted = 1;
+}
+
+static void showCounts ( void )
+{
+   const HChar* nameCC[16]
+      = { "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC",
+          "HI", "LS", "GE", "LT", "GT", "LE", "AL", "NV" };
+   UInt i, j;
+   ULong sum = 0;
+   vex_printf("\nCC_OP          0         1         2         3    "
+              "     4         5         6\n");
+   vex_printf(  "--------------------------------------------------"
+              "--------------------------\n");
+   for (j = 0; j < 16; j++) {
+      vex_printf("%2d %s  ", j, nameCC[j]);
+      for (i = 0; i < ARM64G_CC_OP_NUMBER; i++) {
+         vex_printf("%9d ", tab_eval[i][j]);
+         sum += tab_eval[i][j];
+      }
+      vex_printf("\n");
+   }
+   vex_printf("(In total %llu calls)\n", sum);
+}
+
+#define NOTE_EVAL(_cc_op, _cond) \
+   do { \
+      if (!initted) initCounts(); \
+      vassert( ((UInt)(_cc_op)) < ARM64G_CC_OP_NUMBER); \
+      vassert( ((UInt)(_cond)) < 16); \
+      tab_eval[(UInt)(_cc_op)][(UInt)(cond)]++;  \
+      tot_evals++; \
+      if (0 == (tot_evals & 0x7FFF)) \
+        showCounts(); \
+   } while (0)
+
+#endif /* PROFILE_NZCV_FLAGS */
+
+
+/* Calculate the N flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 63:1 are zero. */
+static
+ULong arm64g_calculate_flag_n ( ULong cc_op, ULong cc_dep1,
+                                ULong cc_dep2, ULong cc_dep3 )
+{
+   switch (cc_op) {
+      case ARM64G_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         ULong nf   = (cc_dep1 >> ARM64G_CC_SHIFT_N) & 1;
+         return nf;
+      }
+      case ARM64G_CC_OP_ADD32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         UInt  res  = argL + argR;
+         ULong nf   = (ULong)(res >> 31);
+         return nf;
+      }
+      case ARM64G_CC_OP_ADD64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong res  = argL + argR;
+         ULong nf   = (ULong)(res >> 63);
+         return nf;
+      }
+      case ARM64G_CC_OP_SUB32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         UInt  res  = argL - argR;
+         ULong nf   = (ULong)(res >> 31);
+         return nf;
+      }
+      case ARM64G_CC_OP_SUB64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong res  = argL - argR;
+         ULong nf   = res >> 63;
+         return nf;
+      }
+      case ARM64G_CC_OP_ADC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt  res  = argL + argR + oldC;
+         ULong nf   = (ULong)(res >> 31);
+         return nf;
+      }
+      case ARM64G_CC_OP_ADC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong res  = argL + argR + oldC;
+         ULong nf   = res >> 63;
+         return nf;
+      }
+      case ARM64G_CC_OP_SBC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt  res  = argL - argR - (oldC ^ 1);
+         ULong nf   = (ULong)(res >> 31);
+         return nf;
+      }
+      case ARM64G_CC_OP_SBC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong res  = argL - argR - (oldC ^ 1);
+         ULong nf   = res >> 63;
+         return nf;
+      }
+      case ARM64G_CC_OP_LOGIC32: {
+         /* (res, unused, unused) */
+         UInt  res = (UInt)cc_dep1;
+         ULong nf  = res >> 31;
+         return nf;
+      }
+      case ARM64G_CC_OP_LOGIC64: {
+         /* (res, unused, unused) */
+         ULong res = cc_dep1;
+         ULong nf  = res >> 63;
+         return nf;
+      }
+//ZZ       case ARMG_CC_OP_MUL: {
+//ZZ          /* (res, unused, oldC:oldV) */
+//ZZ          UInt res  = cc_dep1;
+//ZZ          UInt nf   = res >> 31;
+//ZZ          return nf;
+//ZZ       }
+//ZZ       case ARMG_CC_OP_MULL: {
+//ZZ          /* (resLo32, resHi32, oldC:oldV) */
+//ZZ          UInt resHi32 = cc_dep2;
+//ZZ          UInt nf      = resHi32 >> 31;
+//ZZ          return nf;
+//ZZ       }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("arm64g_calculate_flag_n"
+                    "( op=%llu, dep1=0x%llx, dep2=0x%llx, dep3=0x%llx )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("arm64g_calculate_flag_n");
+   }
+}
+
+
+/* Calculate the Z flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 63:1 are zero. */
+static
+ULong arm64g_calculate_flag_z ( ULong cc_op, ULong cc_dep1,
+                                ULong cc_dep2, ULong cc_dep3 )
+{
+   switch (cc_op) {
+      case ARM64G_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         ULong zf   = (cc_dep1 >> ARM64G_CC_SHIFT_Z) & 1;
+         return zf;
+      }
+      case ARM64G_CC_OP_ADD32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         UInt  res  = argL + argR;
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_ADD64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong res  = argL + argR;
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_SUB32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         UInt  res  = argL - argR;
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_SUB64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong res  = argL - argR;
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_ADC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt  res  = argL + argR + oldC;
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_ADC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong res  = argL + argR + oldC;
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_SBC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt  res  = argL - argR - (oldC ^ 1);
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_SBC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong res  = argL - argR - (oldC ^ 1);
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_LOGIC32: {
+         /* (res, unused, unused) */
+         UInt  res  = (UInt)cc_dep1;
+         ULong zf   = res == 0;
+         return zf;
+      }
+      case ARM64G_CC_OP_LOGIC64: {
+         /* (res, unused, unused) */
+         ULong res  = cc_dep1;
+         ULong zf   = res == 0;
+         return zf;
+      }
+//ZZ       case ARMG_CC_OP_MUL: {
+//ZZ          /* (res, unused, oldC:oldV) */
+//ZZ          UInt res  = cc_dep1;
+//ZZ          UInt zf   = res == 0;
+//ZZ          return zf;
+//ZZ       }
+//ZZ       case ARMG_CC_OP_MULL: {
+//ZZ          /* (resLo32, resHi32, oldC:oldV) */
+//ZZ          UInt resLo32 = cc_dep1;
+//ZZ          UInt resHi32 = cc_dep2;
+//ZZ          UInt zf      = (resHi32|resLo32) == 0;
+//ZZ          return zf;
+//ZZ       }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("arm64g_calculate_flag_z"
+                    "( op=%llu, dep1=0x%llx, dep2=0x%llx, dep3=0x%llx )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("arm64g_calculate_flag_z");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate the C flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 63:1 are zero. */
+ULong arm64g_calculate_flag_c ( ULong cc_op, ULong cc_dep1,
+                                ULong cc_dep2, ULong cc_dep3 )
+{
+   switch (cc_op) {
+      case ARM64G_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         ULong cf = (cc_dep1 >> ARM64G_CC_SHIFT_C) & 1;
+         return cf;
+      }
+      case ARM64G_CC_OP_ADD32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         UInt  res  = argL + argR;
+         ULong cf   = res < argL;
+         return cf;
+      }
+      case ARM64G_CC_OP_ADD64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong res  = argL + argR;
+         ULong cf   = res < argL;
+         return cf;
+      }
+      case ARM64G_CC_OP_SUB32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         ULong cf   = argL >= argR;
+         return cf;
+      }
+      case ARM64G_CC_OP_SUB64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong cf   = argL >= argR;
+         return cf;
+      }
+      case ARM64G_CC_OP_ADC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt  res  = argL + argR + oldC;
+         ULong cf   = oldC ? (res <= argL) : (res < argL);
+         return cf;
+      }
+      case ARM64G_CC_OP_ADC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong res  = argL + argR + oldC;
+         ULong cf   = oldC ? (res <= argL) : (res < argL);
+         return cf;
+      }
+      case ARM64G_CC_OP_SBC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong cf   = oldC ? (argL >= argR) : (argL > argR);
+         return cf;
+      }
+      case ARM64G_CC_OP_SBC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong cf   = oldC ? (argL >= argR) : (argL > argR);
+         return cf;
+      }
+      case ARM64G_CC_OP_LOGIC32:
+      case ARM64G_CC_OP_LOGIC64: {
+         /* (res, unused, unused) */
+         return 0; // C after logic is zero on arm64
+      }
+//ZZ       case ARMG_CC_OP_MUL: {
+//ZZ          /* (res, unused, oldC:oldV) */
+//ZZ          UInt oldC = (cc_dep3 >> 1) & 1;
+//ZZ          vassert((cc_dep3 & ~3) == 0);
+//ZZ          UInt cf   = oldC;
+//ZZ          return cf;
+//ZZ       }
+//ZZ       case ARMG_CC_OP_MULL: {
+//ZZ          /* (resLo32, resHi32, oldC:oldV) */
+//ZZ          UInt oldC    = (cc_dep3 >> 1) & 1;
+//ZZ          vassert((cc_dep3 & ~3) == 0);
+//ZZ          UInt cf      = oldC;
+//ZZ          return cf;
+//ZZ       }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("arm64g_calculate_flag_c"
+                    "( op=%llu, dep1=0x%llx, dep2=0x%llx, dep3=0x%llx )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("arm64g_calculate_flag_c");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate the V flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 63:1 are zero. */
+static
+ULong arm64g_calculate_flag_v ( ULong cc_op, ULong cc_dep1,
+                                ULong cc_dep2, ULong cc_dep3 )
+{
+   switch (cc_op) {
+      case ARM64G_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         ULong vf   = (cc_dep1 >> ARM64G_CC_SHIFT_V) & 1;
+         return vf;
+      }
+      case ARM64G_CC_OP_ADD32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         UInt  res  = argL + argR;
+         ULong vf   = (ULong)(((res ^ argL) & (res ^ argR)) >> 31);
+         return vf;
+      }
+      case ARM64G_CC_OP_ADD64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong res  = argL + argR;
+         ULong vf   = ((res ^ argL) & (res ^ argR)) >> 63;
+         return vf;
+      }
+      case ARM64G_CC_OP_SUB32: {
+         /* (argL, argR, unused) */
+         UInt  argL = (UInt)cc_dep1;
+         UInt  argR = (UInt)cc_dep2;
+         UInt  res  = argL - argR;
+         ULong vf   = (ULong)(((argL ^ argR) & (argL ^ res)) >> 31);
+         return vf;
+      }
+      case ARM64G_CC_OP_SUB64: {
+         /* (argL, argR, unused) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong res  = argL - argR;
+         ULong vf   = (((argL ^ argR) & (argL ^ res))) >> 63;
+         return vf;
+      }
+      case ARM64G_CC_OP_ADC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt  res  = argL + argR + oldC;
+         ULong vf   = (ULong)(((res ^ argL) & (res ^ argR)) >> 31);
+         return vf;
+      }
+      case ARM64G_CC_OP_ADC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong res  = argL + argR + oldC;
+         ULong vf   = ((res ^ argL) & (res ^ argR)) >> 63;
+         return vf;
+      }
+      case ARM64G_CC_OP_SBC32: {
+         /* (argL, argR, oldC) */
+         UInt  argL = cc_dep1;
+         UInt  argR = cc_dep2;
+         UInt  oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt  res  = argL - argR - (oldC ^ 1);
+         ULong vf   = (ULong)(((argL ^ argR) & (argL ^ res)) >> 31);
+         return vf;
+      }
+      case ARM64G_CC_OP_SBC64: {
+         /* (argL, argR, oldC) */
+         ULong argL = cc_dep1;
+         ULong argR = cc_dep2;
+         ULong oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         ULong res  = argL - argR - (oldC ^ 1);
+         ULong vf   = ((argL ^ argR) & (argL ^ res)) >> 63;
+         return vf;
+      }
+      case ARM64G_CC_OP_LOGIC32:
+      case ARM64G_CC_OP_LOGIC64: {
+         /* (res, unused, unused) */
+         return 0; // V after logic is zero on arm64
+      }
+//ZZ       case ARMG_CC_OP_MUL: {
+//ZZ          /* (res, unused, oldC:oldV) */
+//ZZ          UInt oldV = (cc_dep3 >> 0) & 1;
+//ZZ          vassert((cc_dep3 & ~3) == 0);
+//ZZ          UInt vf   = oldV;
+//ZZ          return vf;
+//ZZ       }
+//ZZ       case ARMG_CC_OP_MULL: {
+//ZZ          /* (resLo32, resHi32, oldC:oldV) */
+//ZZ          UInt oldV    = (cc_dep3 >> 0) & 1;
+//ZZ          vassert((cc_dep3 & ~3) == 0);
+//ZZ          UInt vf      = oldV;
+//ZZ          return vf;
+//ZZ       }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("arm64g_calculate_flag_v"
+                    "( op=%llu, dep1=0x%llx, dep2=0x%llx, dep3=0x%llx )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("arm64g_calculate_flag_v");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate NZCV from the supplied thunk components, in the positions
+   they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
+   Returned bits 27:0 are zero. */
+ULong arm64g_calculate_flags_nzcv ( ULong cc_op, ULong cc_dep1,
+                                    ULong cc_dep2, ULong cc_dep3 )
+{
+   ULong f;
+   ULong res = 0;
+   f = 1 & arm64g_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARM64G_CC_SHIFT_N);
+   f = 1 & arm64g_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARM64G_CC_SHIFT_Z);
+   f = 1 & arm64g_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARM64G_CC_SHIFT_C);
+   f = 1 & arm64g_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARM64G_CC_SHIFT_V);
+   return res;
+}
+
+//ZZ 
+//ZZ /* CALLED FROM GENERATED CODE: CLEAN HELPER */
+//ZZ /* Calculate the QC flag from the arguments, in the lowest bit
+//ZZ    of the word (bit 0).  Urr, having this out of line is bizarre.
+//ZZ    Push back inline. */
+//ZZ UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
+//ZZ                               UInt resR1, UInt resR2 )
+//ZZ {
+//ZZ    if (resL1 != resR1 || resL2 != resR2)
+//ZZ       return 1;
+//ZZ    else
+//ZZ       return 0;
+//ZZ }
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate the specified condition from the thunk components, in the
+   lowest bit of the word (bit 0).  Returned bits 63:1 are zero. */
+ULong arm64g_calculate_condition ( /* ARM64Condcode << 4 | cc_op */
+                                   ULong cond_n_op ,
+                                   ULong cc_dep1,
+                                   ULong cc_dep2, ULong cc_dep3 )
+{
+   ULong cond  = cond_n_op >> 4;
+   ULong cc_op = cond_n_op & 0xF;
+   ULong inv   = cond & 1;
+   ULong nf, zf, vf, cf;
+
+#  if PROFILE_NZCV_FLAGS
+   NOTE_EVAL(cc_op, cond);
+#  endif
+
+   //   vex_printf("XXXXXXXX %llx %llx %llx %llx\n", 
+   //              cond_n_op, cc_dep1, cc_dep2, cc_dep3);
+
+   switch (cond) {
+      case ARM64CondEQ:    // Z=1         => z
+      case ARM64CondNE:    // Z=0
+         zf = arm64g_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ zf;
+
+      case ARM64CondCS:    // C=1         => c
+      case ARM64CondCC:    // C=0
+         cf = arm64g_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ cf;
+
+      case ARM64CondMI:    // N=1         => n
+      case ARM64CondPL:    // N=0
+         nf = arm64g_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ nf;
+
+      case ARM64CondVS:    // V=1         => v
+      case ARM64CondVC:    // V=0
+         vf = arm64g_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ vf;
+
+      case ARM64CondHI:    // C=1 && Z=0   => c & ~z
+      case ARM64CondLS:    // C=0 || Z=1
+         cf = arm64g_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         zf = arm64g_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ (1 & (cf & ~zf));
+
+      case ARM64CondGE:    // N=V          => ~(n^v)
+      case ARM64CondLT:    // N!=V
+         nf = arm64g_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         vf = arm64g_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ (1 & ~(nf ^ vf));
+
+      case ARM64CondGT:    // Z=0 && N=V   => ~z & ~(n^v)  =>  ~(z | (n^v))
+      case ARM64CondLE:    // Z=1 || N!=V
+         nf = arm64g_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         vf = arm64g_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         zf = arm64g_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ (1 & ~(zf | (nf ^ vf)));
+
+      case ARM64CondAL:    // 1
+      case ARM64CondNV:    // 1
+         return 1;
+
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("arm64g_calculate_condition(ARM64)"
+                    "( %llu, %llu, 0x%llx, 0x%llx, 0x%llx )\n",
+                    cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("armg_calculate_condition(ARM64)");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-arm64 platforms, return 0. */
+ULong arm64g_dirtyhelper_MRS_CNTVCT_EL0 ( void )
+{
+#  if defined(__aarch64__) && !defined(__arm__)
+   ULong w = 0x5555555555555555ULL; /* overwritten */
+   __asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(w));
+   return w;
+#  else
+   return 0ULL;
+#  endif
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Flag-helpers translation-time function specialisers.    ---*/
+/*--- These help iropt specialise calls the above run-time    ---*/
+/*--- flags functions.                                        ---*/
+/*---------------------------------------------------------------*/
+
+/* Used by the optimiser to try specialisations.  Returns an
+   equivalent expression, or NULL if none. */
+
+static Bool isU64 ( IRExpr* e, ULong n )
+{
+   return
+      toBool( e->tag == Iex_Const
+              && e->Iex.Const.con->tag == Ico_U64
+              && e->Iex.Const.con->Ico.U64 == n );
+}
+
+IRExpr* guest_arm64_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts )
+{
+#  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
+#  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
+#  define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
+#  define mkU8(_n)  IRExpr_Const(IRConst_U8(_n))
+
+   Int i, arity = 0;
+   for (i = 0; args[i]; i++)
+      arity++;
+//ZZ #  if 0
+//ZZ    vex_printf("spec request:\n");
+//ZZ    vex_printf("   %s  ", function_name);
+//ZZ    for (i = 0; i < arity; i++) {
+//ZZ       vex_printf("  ");
+//ZZ       ppIRExpr(args[i]);
+//ZZ    }
+//ZZ    vex_printf("\n");
+//ZZ #  endif
+
+   /* --------- specialising "arm64g_calculate_condition" --------- */
+
+   if (vex_streq(function_name, "arm64g_calculate_condition")) {
+
+      /* specialise calls to the "arm64g_calculate_condition" function.
+         Not sure whether this is strictly necessary, but: the
+         replacement IR must produce only the values 0 or 1.  Bits
+         63:1 are required to be zero. */
+      IRExpr *cond_n_op, *cc_dep1, *cc_dep2  ; //, *cc_ndep;
+      vassert(arity == 4);
+      cond_n_op = args[0]; /* (ARM64Condcode << 4)  |  ARM64G_CC_OP_* */
+      cc_dep1   = args[1];
+      cc_dep2   = args[2];
+      //cc_ndep   = args[3];
+
+      /*---------------- SUB64 ----------------*/
+
+      /* 0, 1 */
+      if (isU64(cond_n_op, (ARM64CondEQ << 4) | ARM64G_CC_OP_SUB64)) {
+         /* EQ after SUB --> test argL == argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ64, cc_dep1, cc_dep2));
+      }
+      if (isU64(cond_n_op, (ARM64CondNE << 4) | ARM64G_CC_OP_SUB64)) {
+         /* NE after SUB --> test argL != argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE64, cc_dep1, cc_dep2));
+      }
+
+      /* 2, 3 */
+      if (isU64(cond_n_op, (ARM64CondCS << 4) | ARM64G_CC_OP_SUB64)) {
+         /* CS after SUB --> test argL >=u argR
+                         --> test argR <=u argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, cc_dep2, cc_dep1));
+      }
+      if (isU64(cond_n_op, (ARM64CondCC << 4) | ARM64G_CC_OP_SUB64)) {
+         /* CC after SUB --> test argL <u argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64U, cc_dep1, cc_dep2));
+      }
+
+      /* 8, 9 */
+      if (isU64(cond_n_op, (ARM64CondLS << 4) | ARM64G_CC_OP_SUB64)) {
+         /* LS after SUB --> test argL <=u argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64U, cc_dep1, cc_dep2));
+      }
+      if (isU64(cond_n_op, (ARM64CondHI << 4) | ARM64G_CC_OP_SUB64)) {
+         /* HI after SUB --> test argL >u argR
+                         --> test argR <u argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64U, cc_dep2, cc_dep1));
+      }
+
+      /* 10, 11 */
+      if (isU64(cond_n_op, (ARM64CondLT << 4) | ARM64G_CC_OP_SUB64)) {
+         /* LT after SUB --> test argL <s argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64S, cc_dep1, cc_dep2));
+      }
+      if (isU64(cond_n_op, (ARM64CondGE << 4) | ARM64G_CC_OP_SUB64)) {
+         /* GE after SUB --> test argL >=s argR
+                         --> test argR <=s argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64S, cc_dep2, cc_dep1));
+      }
+
+      /* 12, 13 */
+      if (isU64(cond_n_op, (ARM64CondGT << 4) | ARM64G_CC_OP_SUB64)) {
+         /* GT after SUB --> test argL >s argR
+                         --> test argR <s argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT64S, cc_dep2, cc_dep1));
+      }
+      if (isU64(cond_n_op, (ARM64CondLE << 4) | ARM64G_CC_OP_SUB64)) {
+         /* LE after SUB --> test argL <=s argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE64S, cc_dep1, cc_dep2));
+      }
+
+      /*---------------- SUB32 ----------------*/
+
+      /* 0, 1 */
+      if (isU64(cond_n_op, (ARM64CondEQ << 4) | ARM64G_CC_OP_SUB32)) {
+         /* EQ after SUB --> test argL == argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpEQ32, unop(Iop_64to32, cc_dep1),
+                                        unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cond_n_op, (ARM64CondNE << 4) | ARM64G_CC_OP_SUB32)) {
+         /* NE after SUB --> test argL != argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpNE32, unop(Iop_64to32, cc_dep1),
+                                        unop(Iop_64to32, cc_dep2)));
+      }
+
+      /* 2, 3 */
+      if (isU64(cond_n_op, (ARM64CondCS << 4) | ARM64G_CC_OP_SUB32)) {
+         /* CS after SUB --> test argL >=u argR
+                         --> test argR <=u argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32U, unop(Iop_64to32, cc_dep2),
+                                         unop(Iop_64to32, cc_dep1)));
+      }
+      if (isU64(cond_n_op, (ARM64CondCC << 4) | ARM64G_CC_OP_SUB32)) {
+         /* CC after SUB --> test argL <u argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32U, unop(Iop_64to32, cc_dep1),
+                                         unop(Iop_64to32, cc_dep2)));
+      }
+
+      /* 8, 9 */
+      if (isU64(cond_n_op, (ARM64CondLS << 4) | ARM64G_CC_OP_SUB32)) {
+         /* LS after SUB --> test argL <=u argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32U, unop(Iop_64to32, cc_dep1),
+                                         unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cond_n_op, (ARM64CondHI << 4) | ARM64G_CC_OP_SUB32)) {
+         /* HI after SUB --> test argL >u argR
+                         --> test argR <u argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32U, unop(Iop_64to32, cc_dep2),
+                                         unop(Iop_64to32, cc_dep1)));
+      }
+
+      /* 10, 11 */
+      if (isU64(cond_n_op, (ARM64CondLT << 4) | ARM64G_CC_OP_SUB32)) {
+         /* LT after SUB --> test argL <s argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32S, unop(Iop_64to32, cc_dep1),
+                                         unop(Iop_64to32, cc_dep2)));
+      }
+      if (isU64(cond_n_op, (ARM64CondGE << 4) | ARM64G_CC_OP_SUB32)) {
+         /* GE after SUB --> test argL >=s argR
+                         --> test argR <=s argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32S, unop(Iop_64to32, cc_dep2),
+                                         unop(Iop_64to32, cc_dep1)));
+      }
+
+      /* 12, 13 */
+      if (isU64(cond_n_op, (ARM64CondGT << 4) | ARM64G_CC_OP_SUB32)) {
+         /* GT after SUB --> test argL >s argR
+                         --> test argR <s argL */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLT32S, unop(Iop_64to32, cc_dep2), 
+                                         unop(Iop_64to32, cc_dep1)));
+      }
+      if (isU64(cond_n_op, (ARM64CondLE << 4) | ARM64G_CC_OP_SUB32)) {
+         /* LE after SUB --> test argL <=s argR */
+         return unop(Iop_1Uto64,
+                     binop(Iop_CmpLE32S, unop(Iop_64to32, cc_dep1),
+                                         unop(Iop_64to32, cc_dep2)));
+      }
+
+//ZZ       /*---------------- SBB ----------------*/
+//ZZ 
+//ZZ       if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SBB)) {
+//ZZ          /* This seems to happen a lot in softfloat code, eg __divdf3+140 */
+//ZZ          /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
+//ZZ          /* HS after SBB (same as C after SBB below)
+//ZZ             --> oldC ? (argL >=u argR) : (argL >u argR)
+//ZZ             --> oldC ? (argR <=u argL) : (argR <u argL)
+//ZZ          */
+//ZZ          return
+//ZZ             IRExpr_ITE(
+//ZZ                binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
+//ZZ                /* case oldC != 0 */
+//ZZ                unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
+//ZZ                /* case oldC == 0 */
+//ZZ                unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
+//ZZ             );
+//ZZ       }
+//ZZ 
+//ZZ       /*---------------- LOGIC ----------------*/
+//ZZ 
+//ZZ       if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
+//ZZ          /* EQ after LOGIC --> test res == 0 */
+//ZZ          return unop(Iop_1Uto32,
+//ZZ                      binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
+//ZZ       }
+//ZZ       if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
+//ZZ          /* NE after LOGIC --> test res != 0 */
+//ZZ          return unop(Iop_1Uto32,
+//ZZ                      binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
+//ZZ       }
+//ZZ 
+//ZZ       if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_LOGIC)) {
+//ZZ          /* PL after LOGIC --> test (res >> 31) == 0 */
+//ZZ          return unop(Iop_1Uto32,
+//ZZ                      binop(Iop_CmpEQ32,
+//ZZ                            binop(Iop_Shr32, cc_dep1, mkU8(31)),
+//ZZ                            mkU32(0)));
+//ZZ       }
+//ZZ       if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_LOGIC)) {
+//ZZ          /* MI after LOGIC --> test (res >> 31) == 1 */
+//ZZ          return unop(Iop_1Uto32,
+//ZZ                      binop(Iop_CmpEQ32,
+//ZZ                            binop(Iop_Shr32, cc_dep1, mkU8(31)),
+//ZZ                            mkU32(1)));
+//ZZ       }
+
+      /*---------------- COPY ----------------*/
+
+      if (isU64(cond_n_op, (ARM64CondEQ << 4) | ARM64G_CC_OP_COPY)) {
+         /* EQ after COPY --> (cc_dep1 >> ARM64G_CC_SHIFT_Z) & 1 */
+         return binop(Iop_And64,
+                      binop(Iop_Shr64, cc_dep1,
+                                       mkU8(ARM64G_CC_SHIFT_Z)),
+                      mkU64(1));
+      }
+      if (isU64(cond_n_op, (ARM64CondNE << 4) | ARM64G_CC_OP_COPY)) {
+         /* NE after COPY --> ((cc_dep1 >> ARM64G_CC_SHIFT_Z) ^ 1) & 1 */
+         return binop(Iop_And64,
+                      binop(Iop_Xor64,
+                            binop(Iop_Shr64, cc_dep1,
+                                             mkU8(ARM64G_CC_SHIFT_Z)),
+                            mkU64(1)),
+                      mkU64(1));
+      }
+
+//ZZ       /*----------------- AL -----------------*/
+//ZZ 
+//ZZ       /* A critically important case for Thumb code.
+//ZZ 
+//ZZ          What we're trying to spot is the case where cond_n_op is an
+//ZZ          expression of the form Or32(..., 0xE0) since that means the
+//ZZ          caller is asking for CondAL and we can simply return 1
+//ZZ          without caring what the ... part is.  This is a potentially
+//ZZ          dodgy kludge in that it assumes that the ... part has zeroes
+//ZZ          in bits 7:4, so that the result of the Or32 is guaranteed to
+//ZZ          be 0xE in bits 7:4.  Given that the places where this first
+//ZZ          arg are constructed (in guest_arm_toIR.c) are very
+//ZZ          constrained, we can get away with this.  To make this
+//ZZ          guaranteed safe would require to have a new primop, Slice44
+//ZZ          or some such, thusly
+//ZZ 
+//ZZ          Slice44(arg1, arg2) = 0--(24)--0 arg1[7:4] arg2[3:0]
+//ZZ 
+//ZZ          and we would then look for Slice44(0xE0, ...)
+//ZZ          which would give the required safety property.
+//ZZ 
+//ZZ          It would be infeasibly expensive to scan backwards through
+//ZZ          the entire block looking for an assignment to the temp, so
+//ZZ          just look at the previous 16 statements.  That should find it
+//ZZ          if it is an interesting case, as a result of how the
+//ZZ          boilerplate guff at the start of each Thumb insn translation
+//ZZ          is made.
+//ZZ       */
+//ZZ       if (cond_n_op->tag == Iex_RdTmp) {
+//ZZ          Int    j;
+//ZZ          IRTemp look_for = cond_n_op->Iex.RdTmp.tmp;
+//ZZ          Int    limit    = n_precedingStmts - 16;
+//ZZ          if (limit < 0) limit = 0;
+//ZZ          if (0) vex_printf("scanning %d .. %d\n", n_precedingStmts-1, limit);
+//ZZ          for (j = n_precedingStmts - 1; j >= limit; j--) {
+//ZZ             IRStmt* st = precedingStmts[j];
+//ZZ             if (st->tag == Ist_WrTmp
+//ZZ                 && st->Ist.WrTmp.tmp == look_for
+//ZZ                 && st->Ist.WrTmp.data->tag == Iex_Binop
+//ZZ                 && st->Ist.WrTmp.data->Iex.Binop.op == Iop_Or32
+//ZZ                 && isU32(st->Ist.WrTmp.data->Iex.Binop.arg2, (ARMCondAL << 4)))
+//ZZ                return mkU32(1);
+//ZZ          }
+//ZZ          /* Didn't find any useful binding to the first arg
+//ZZ             in the previous 16 stmts. */
+//ZZ       }
+   }
+
+//ZZ    /* --------- specialising "armg_calculate_flag_c" --------- */
+//ZZ 
+//ZZ    else
+//ZZ    if (vex_streq(function_name, "armg_calculate_flag_c")) {
+//ZZ 
+//ZZ       /* specialise calls to the "armg_calculate_flag_c" function.
+//ZZ          Note that the returned value must be either 0 or 1; nonzero
+//ZZ          bits 31:1 are not allowed.  In turn, incoming oldV and oldC
+//ZZ          values (from the thunk) are assumed to have bits 31:1
+//ZZ          clear. */
+//ZZ       IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
+//ZZ       vassert(arity == 4);
+//ZZ       cc_op   = args[0]; /* ARMG_CC_OP_* */
+//ZZ       cc_dep1 = args[1];
+//ZZ       cc_dep2 = args[2];
+//ZZ       cc_ndep = args[3];
+//ZZ 
+//ZZ       if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
+//ZZ          /* Thunk args are (result, shco, oldV) */
+//ZZ          /* C after LOGIC --> shco */
+//ZZ          return cc_dep2;
+//ZZ       }
+//ZZ 
+//ZZ       if (isU32(cc_op, ARMG_CC_OP_SUB)) {
+//ZZ          /* Thunk args are (argL, argR, unused) */
+//ZZ          /* C after SUB --> argL >=u argR
+//ZZ                         --> argR <=u argL */
+//ZZ          return unop(Iop_1Uto32,
+//ZZ                      binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
+//ZZ       }
+//ZZ 
+//ZZ       if (isU32(cc_op, ARMG_CC_OP_SBB)) {
+//ZZ          /* This happens occasionally in softfloat code, eg __divdf3+140 */
+//ZZ          /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
+//ZZ          /* C after SBB (same as HS after SBB above)
+//ZZ             --> oldC ? (argL >=u argR) : (argL >u argR)
+//ZZ             --> oldC ? (argR <=u argL) : (argR <u argL)
+//ZZ          */
+//ZZ          return
+//ZZ             IRExpr_ITE(
+//ZZ                binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
+//ZZ                /* case oldC != 0 */
+//ZZ                unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
+//ZZ                /* case oldC == 0 */
+//ZZ                unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
+//ZZ             );
+//ZZ       }
+//ZZ 
+//ZZ    }
+//ZZ 
+//ZZ    /* --------- specialising "armg_calculate_flag_v" --------- */
+//ZZ 
+//ZZ    else
+//ZZ    if (vex_streq(function_name, "armg_calculate_flag_v")) {
+//ZZ 
+//ZZ       /* specialise calls to the "armg_calculate_flag_v" function.
+//ZZ          Note that the returned value must be either 0 or 1; nonzero
+//ZZ          bits 31:1 are not allowed.  In turn, incoming oldV and oldC
+//ZZ          values (from the thunk) are assumed to have bits 31:1
+//ZZ          clear. */
+//ZZ       IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
+//ZZ       vassert(arity == 4);
+//ZZ       cc_op   = args[0]; /* ARMG_CC_OP_* */
+//ZZ       cc_dep1 = args[1];
+//ZZ       cc_dep2 = args[2];
+//ZZ       cc_ndep = args[3];
+//ZZ 
+//ZZ       if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
+//ZZ          /* Thunk args are (result, shco, oldV) */
+//ZZ          /* V after LOGIC --> oldV */
+//ZZ          return cc_ndep;
+//ZZ       }
+//ZZ 
+//ZZ       if (isU32(cc_op, ARMG_CC_OP_SUB)) {
+//ZZ          /* Thunk args are (argL, argR, unused) */
+//ZZ          /* V after SUB 
+//ZZ             --> let res = argL - argR
+//ZZ                 in ((argL ^ argR) & (argL ^ res)) >> 31
+//ZZ             --> ((argL ^ argR) & (argL ^ (argL - argR))) >> 31
+//ZZ          */
+//ZZ          IRExpr* argL = cc_dep1;
+//ZZ          IRExpr* argR = cc_dep2;
+//ZZ          return
+//ZZ             binop(Iop_Shr32,
+//ZZ                   binop(Iop_And32,
+//ZZ                         binop(Iop_Xor32, argL, argR),
+//ZZ                         binop(Iop_Xor32, argL, binop(Iop_Sub32, argL, argR))
+//ZZ                   ),
+//ZZ                   mkU8(31)
+//ZZ             );
+//ZZ       }
+//ZZ 
+//ZZ       if (isU32(cc_op, ARMG_CC_OP_SBB)) {
+//ZZ          /* This happens occasionally in softfloat code, eg __divdf3+140 */
+//ZZ          /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
+//ZZ          /* V after SBB
+//ZZ             --> let res = argL - argR - (oldC ^ 1)
+//ZZ                 in  (argL ^ argR) & (argL ^ res) & 1
+//ZZ          */
+//ZZ          return
+//ZZ             binop(
+//ZZ                Iop_And32,
+//ZZ                binop(
+//ZZ                   Iop_And32,
+//ZZ                   // argL ^ argR
+//ZZ                   binop(Iop_Xor32, cc_dep1, cc_dep2),
+//ZZ                   // argL ^ (argL - argR - (oldC ^ 1))
+//ZZ                   binop(Iop_Xor32,
+//ZZ                         cc_dep1,
+//ZZ                         binop(Iop_Sub32,
+//ZZ                               binop(Iop_Sub32, cc_dep1, cc_dep2),
+//ZZ                               binop(Iop_Xor32, cc_ndep, mkU32(1)))
+//ZZ                   )
+//ZZ                ),
+//ZZ                mkU32(1)
+//ZZ             );
+//ZZ       }
+//ZZ 
+//ZZ    }
+
+#  undef unop
+#  undef binop
+#  undef mkU64
+#  undef mkU8
+
+   return NULL;
+}
+
+
+/*----------------------------------------------*/
+/*--- The exported fns ..                    ---*/
+/*----------------------------------------------*/
+
+//ZZ /* VISIBLE TO LIBVEX CLIENT */
+//ZZ #if 0
+//ZZ void LibVEX_GuestARM_put_flags ( UInt flags_native,
+//ZZ                                  /*OUT*/VexGuestARMState* vex_state )
+//ZZ {
+//ZZ    vassert(0); // FIXME
+//ZZ 
+//ZZ    /* Mask out everything except N Z V C. */
+//ZZ    flags_native
+//ZZ       &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
+//ZZ    
+//ZZ    vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
+//ZZ    vex_state->guest_CC_DEP1 = flags_native;
+//ZZ    vex_state->guest_CC_DEP2 = 0;
+//ZZ    vex_state->guest_CC_NDEP = 0;
+//ZZ }
+//ZZ #endif
+
+/* VISIBLE TO LIBVEX CLIENT */
+ULong LibVEX_GuestARM64_get_nzcv ( /*IN*/const VexGuestARM64State* vex_state )
+{
+   ULong nzcv = 0;
+   // NZCV
+   nzcv |= arm64g_calculate_flags_nzcv(
+               vex_state->guest_CC_OP,
+               vex_state->guest_CC_DEP1,
+               vex_state->guest_CC_DEP2,
+               vex_state->guest_CC_NDEP
+            );
+   vassert(0 == (nzcv & 0xFFFFFFFF0FFFFFFFULL));
+//ZZ    // Q
+//ZZ    if (vex_state->guest_QFLAG32 > 0)
+//ZZ       cpsr |= (1 << 27);
+//ZZ    // GE
+//ZZ    if (vex_state->guest_GEFLAG0 > 0)
+//ZZ       cpsr |= (1 << 16);
+//ZZ    if (vex_state->guest_GEFLAG1 > 0)
+//ZZ       cpsr |= (1 << 17);
+//ZZ    if (vex_state->guest_GEFLAG2 > 0)
+//ZZ       cpsr |= (1 << 18);
+//ZZ    if (vex_state->guest_GEFLAG3 > 0)
+//ZZ       cpsr |= (1 << 19);
+//ZZ    // M
+//ZZ    cpsr |= (1 << 4); // 0b10000 means user-mode
+//ZZ    // J,T   J (bit 24) is zero by initialisation above
+//ZZ    // T  we copy from R15T[0]
+//ZZ    if (vex_state->guest_R15T & 1)
+//ZZ       cpsr |= (1 << 5);
+//ZZ    // ITSTATE we punt on for the time being.  Could compute it
+//ZZ    // if needed though.
+//ZZ    // E, endianness, 0 (littleendian) from initialisation above
+//ZZ    // A,I,F disable some async exceptions.  Not sure about these.
+//ZZ    // Leave as zero for the time being.
+   return nzcv;
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+ULong LibVEX_GuestARM64_get_fpsr ( const VexGuestARM64State* vex_state )
+{
+   UInt w32 = vex_state->guest_QCFLAG[0] | vex_state->guest_QCFLAG[1]
+              | vex_state->guest_QCFLAG[2] | vex_state->guest_QCFLAG[3];
+   ULong fpsr = 0;
+   // QC
+   if (w32 != 0)
+      fpsr |= (1 << 27);
+   return fpsr;
+}
+
+void LibVEX_GuestARM64_set_fpsr ( /*MOD*/VexGuestARM64State* vex_state,
+                                  ULong fpsr )
+{
+   // QC
+   vex_state->guest_QCFLAG[0] = (UInt)((fpsr >> 27) & 1);
+   vex_state->guest_QCFLAG[1] = 0;
+   vex_state->guest_QCFLAG[2] = 0;
+   vex_state->guest_QCFLAG[3] = 0;
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestARM64_initialise ( /*OUT*/VexGuestARM64State* vex_state )
+{
+   vex_bzero(vex_state, sizeof(*vex_state));
+//ZZ    vex_state->host_EvC_FAILADDR = 0;
+//ZZ    vex_state->host_EvC_COUNTER = 0;
+//ZZ 
+//ZZ    vex_state->guest_R0  = 0;
+//ZZ    vex_state->guest_R1  = 0;
+//ZZ    vex_state->guest_R2  = 0;
+//ZZ    vex_state->guest_R3  = 0;
+//ZZ    vex_state->guest_R4  = 0;
+//ZZ    vex_state->guest_R5  = 0;
+//ZZ    vex_state->guest_R6  = 0;
+//ZZ    vex_state->guest_R7  = 0;
+//ZZ    vex_state->guest_R8  = 0;
+//ZZ    vex_state->guest_R9  = 0;
+//ZZ    vex_state->guest_R10 = 0;
+//ZZ    vex_state->guest_R11 = 0;
+//ZZ    vex_state->guest_R12 = 0;
+//ZZ    vex_state->guest_R13 = 0;
+//ZZ    vex_state->guest_R14 = 0;
+//ZZ    vex_state->guest_R15T = 0;  /* NB: implies ARM mode */
+//ZZ 
+   vex_state->guest_CC_OP   = ARM64G_CC_OP_COPY;
+//ZZ    vex_state->guest_CC_DEP1 = 0;
+//ZZ    vex_state->guest_CC_DEP2 = 0;
+//ZZ    vex_state->guest_CC_NDEP = 0;
+//ZZ    vex_state->guest_QFLAG32 = 0;
+//ZZ    vex_state->guest_GEFLAG0 = 0;
+//ZZ    vex_state->guest_GEFLAG1 = 0;
+//ZZ    vex_state->guest_GEFLAG2 = 0;
+//ZZ    vex_state->guest_GEFLAG3 = 0;
+//ZZ 
+//ZZ    vex_state->guest_EMNOTE  = EmNote_NONE;
+//ZZ    vex_state->guest_CMSTART = 0;
+//ZZ    vex_state->guest_CMLEN   = 0;
+//ZZ    vex_state->guest_NRADDR  = 0;
+//ZZ    vex_state->guest_IP_AT_SYSCALL = 0;
+//ZZ 
+//ZZ    vex_state->guest_D0  = 0;
+//ZZ    vex_state->guest_D1  = 0;
+//ZZ    vex_state->guest_D2  = 0;
+//ZZ    vex_state->guest_D3  = 0;
+//ZZ    vex_state->guest_D4  = 0;
+//ZZ    vex_state->guest_D5  = 0;
+//ZZ    vex_state->guest_D6  = 0;
+//ZZ    vex_state->guest_D7  = 0;
+//ZZ    vex_state->guest_D8  = 0;
+//ZZ    vex_state->guest_D9  = 0;
+//ZZ    vex_state->guest_D10 = 0;
+//ZZ    vex_state->guest_D11 = 0;
+//ZZ    vex_state->guest_D12 = 0;
+//ZZ    vex_state->guest_D13 = 0;
+//ZZ    vex_state->guest_D14 = 0;
+//ZZ    vex_state->guest_D15 = 0;
+//ZZ    vex_state->guest_D16 = 0;
+//ZZ    vex_state->guest_D17 = 0;
+//ZZ    vex_state->guest_D18 = 0;
+//ZZ    vex_state->guest_D19 = 0;
+//ZZ    vex_state->guest_D20 = 0;
+//ZZ    vex_state->guest_D21 = 0;
+//ZZ    vex_state->guest_D22 = 0;
+//ZZ    vex_state->guest_D23 = 0;
+//ZZ    vex_state->guest_D24 = 0;
+//ZZ    vex_state->guest_D25 = 0;
+//ZZ    vex_state->guest_D26 = 0;
+//ZZ    vex_state->guest_D27 = 0;
+//ZZ    vex_state->guest_D28 = 0;
+//ZZ    vex_state->guest_D29 = 0;
+//ZZ    vex_state->guest_D30 = 0;
+//ZZ    vex_state->guest_D31 = 0;
+//ZZ 
+//ZZ    /* ARM encoded; zero is the default as it happens (result flags
+//ZZ       (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
+//ZZ       all exns masked, all exn sticky bits cleared). */
+//ZZ    vex_state->guest_FPSCR = 0;
+//ZZ 
+//ZZ    vex_state->guest_TPIDRURO = 0;
+//ZZ 
+//ZZ    /* Not in a Thumb IT block. */
+//ZZ    vex_state->guest_ITSTATE = 0;
+//ZZ 
+//ZZ    vex_state->padding1 = 0;
+//ZZ    vex_state->padding2 = 0;
+//ZZ    vex_state->padding3 = 0;
+//ZZ    vex_state->padding4 = 0;
+//ZZ    vex_state->padding5 = 0;
+}
+
+
+/*-----------------------------------------------------------*/
+/*--- Describing the arm guest state, for the benefit     ---*/
+/*--- of iropt and instrumenters.                         ---*/
+/*-----------------------------------------------------------*/
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this generates significantly slower code).  
+
+   We enforce precise exns for guest SP, PC, 29(FP), 30(LR).
+   That might be overkill (for 29 and 30); I don't know.
+*/
+Bool guest_arm64_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   Int xsp_min = offsetof(VexGuestARM64State, guest_XSP);
+   Int xsp_max = xsp_min + 8 - 1;
+   Int pc_min  = offsetof(VexGuestARM64State, guest_PC);
+   Int pc_max  = pc_min + 8 - 1;
+
+   if (maxoff < xsp_min || minoff > xsp_max) {
+      /* no overlap with xsp */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False; // We only need to check stack pointer.
+   } else {
+      return True;
+   }
+
+   if (maxoff < pc_min || minoff > pc_max) {
+      /* no overlap with pc */
+   } else {
+      return True;
+   }
+
+   /* Guessing that we need PX for FP, but I don't really know. */
+   Int x29_min = offsetof(VexGuestARM64State, guest_X29);
+   Int x29_max = x29_min + 8 - 1;
+
+   if (maxoff < x29_min || minoff > x29_max) {
+      /* no overlap with x29 */
+   } else {
+      return True;
+   }
+
+   /* Guessing that we need PX for LR, but I don't really know. */
+   Int x30_min = offsetof(VexGuestARM64State, guest_X30);
+   Int x30_max = x30_min + 8 - 1;
+
+   if (maxoff < x30_min || minoff > x30_max) {
+      /* no overlap with r30 */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+
+#define ALWAYSDEFD(field)                             \
+    { offsetof(VexGuestARM64State, field),            \
+      (sizeof ((VexGuestARM64State*)0)->field) }
+VexGuestLayout
+   arm64Guest_layout 
+      = { 
+          /* Total size of the guest state, in bytes. */
+          .total_sizeB = sizeof(VexGuestARM64State),
+
+          /* Describe the stack pointer. */
+          .offset_SP = offsetof(VexGuestARM64State,guest_XSP),
+          .sizeof_SP = 8,
+
+          /* Describe the instruction pointer. */
+          .offset_IP = offsetof(VexGuestARM64State,guest_PC),
+          .sizeof_IP = 8,
+
+          /* Describe any sections to be regarded by Memcheck as
+             'always-defined'. */
+          .n_alwaysDefd = 9,
+
+          /* flags thunk: OP is always defd, whereas DEP1 and DEP2
+             have to be tracked.  See detailed comment in gdefs.h on
+             meaning of thunk fields. */
+          .alwaysDefd
+             = { /* 0 */ ALWAYSDEFD(guest_PC),
+                 /* 1 */ ALWAYSDEFD(guest_CC_OP),
+                 /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
+                 /* 3 */ ALWAYSDEFD(guest_EMNOTE),
+                 /* 4 */ ALWAYSDEFD(guest_CMSTART),
+                 /* 5 */ ALWAYSDEFD(guest_CMLEN),
+                 /* 6 */ ALWAYSDEFD(guest_NRADDR),
+                 /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
+                 /* 8 */ ALWAYSDEFD(guest_TPIDR_EL0)
+               }
+        };
+
+
+/*---------------------------------------------------------------*/
+/*--- end                               guest_arm64_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_arm64_toIR.c b/VEX/priv/guest_arm64_toIR.c
new file mode 100644
index 0000000..14c27f1
--- /dev/null
+++ b/VEX/priv/guest_arm64_toIR.c
@@ -0,0 +1,14063 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                     guest_arm64_toIR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2013 OpenWorks
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* KNOWN LIMITATIONS 2014-Nov-16
+
+   * Correctness: FMAXNM, FMINNM are implemented the same as FMAX/FMIN.
+
+     Also FP comparison "unordered" .. is implemented as normal FP
+     comparison.
+
+     Both should be fixed.  They behave incorrectly in the presence of
+     NaNs.
+
+     FMULX is treated the same as FMUL.  That's also not correct.
+
+   * Floating multiply-add (etc) insns.  Are split into a multiply and 
+     an add, and so suffer double rounding and hence sometimes the
+     least significant mantissa bit is incorrect.  Fix: use the IR
+     multiply-add IROps instead.
+
+   * FRINTA, FRINTN are kludged .. they just round to nearest.  No special
+     handling for the "ties" case.  FRINTX might be dubious too.
+
+   * Ditto FCVTXN.  No idea what "round to odd" means.  This implementation
+     just rounds to nearest.
+*/
+
+/* "Special" instructions.
+
+   This instruction decoder can decode four special instructions
+   which mean nothing natively (are no-ops as far as regs/mem are
+   concerned) but have meaning for supporting Valgrind.  A special
+   instruction is flagged by a 16-byte preamble:
+
+      93CC0D8C 93CC358C 93CCCD8C 93CCF58C
+      (ror x12, x12, #3;   ror x12, x12, #13
+       ror x12, x12, #51;  ror x12, x12, #61)
+
+   Following that, one of the following 3 are allowed
+   (standard interpretation in parentheses):
+
+      AA0A014A (orr x10,x10,x10)   X3 = client_request ( X4 )
+      AA0B016B (orr x11,x11,x11)   X3 = guest_NRADDR
+      AA0C018C (orr x12,x12,x12)   branch-and-link-to-noredir X8
+      AA090129 (orr x9,x9,x9)      IR injection
+
+   Any other bytes following the 16-byte preamble are illegal and
+   constitute a failure in instruction decoding.  This all assumes
+   that the preamble will never occur except in specific code
+   fragments designed for Valgrind to catch.
+*/
+
+/* Translates ARM64 code to IR. */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_guest_arm64.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_arm64_defs.h"
+
+
+/*------------------------------------------------------------*/
+/*--- Globals                                              ---*/
+/*------------------------------------------------------------*/
+
+/* These are set at the start of the translation of a instruction, so
+   that we don't have to pass them around endlessly.  CONST means does
+   not change during translation of the instruction.
+*/
+
+/* CONST: what is the host's endianness?  We need to know this in
+   order to do sub-register accesses to the SIMD/FP registers
+   correctly. */
+static VexEndness host_endness;
+
+/* CONST: The guest address for the instruction currently being
+   translated.  */
+static Addr64 guest_PC_curr_instr;
+
+/* MOD: The IRSB* into which we're generating code. */
+static IRSB* irsb;
+
+
+/*------------------------------------------------------------*/
+/*--- Debugging output                                     ---*/
+/*------------------------------------------------------------*/
+
+#define DIP(format, args...)           \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_printf(format, ## args)
+
+#define DIS(buf, format, args...)      \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_sprintf(buf, format, ## args)
+
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for deconstructing the        ---*/
+/*--- arm insn stream.                                     ---*/
+/*------------------------------------------------------------*/
+
+/* Do a little-endian load of a 32-bit word, regardless of the
+   endianness of the underlying host. */
+static inline UInt getUIntLittleEndianly ( const UChar* p )
+{
+   UInt w = 0;
+   w = (w << 8) | p[3];
+   w = (w << 8) | p[2];
+   w = (w << 8) | p[1];
+   w = (w << 8) | p[0];
+   return w;
+}
+
+/* Sign extend a N-bit value up to 64 bits, by copying
+   bit N-1 into all higher positions. */
+static ULong sx_to_64 ( ULong x, UInt n )
+{
+   vassert(n > 1 && n < 64);
+   Long r = (Long)x;
+   r = (r << (64-n)) >> (64-n);
+   return (ULong)r;
+}
+
+//ZZ /* Do a little-endian load of a 16-bit word, regardless of the
+//ZZ    endianness of the underlying host. */
+//ZZ static inline UShort getUShortLittleEndianly ( UChar* p )
+//ZZ {
+//ZZ    UShort w = 0;
+//ZZ    w = (w << 8) | p[1];
+//ZZ    w = (w << 8) | p[0];
+//ZZ    return w;
+//ZZ }
+//ZZ 
+//ZZ static UInt ROR32 ( UInt x, UInt sh ) {
+//ZZ    vassert(sh >= 0 && sh < 32);
+//ZZ    if (sh == 0)
+//ZZ       return x;
+//ZZ    else
+//ZZ       return (x << (32-sh)) | (x >> sh);
+//ZZ }
+//ZZ 
+//ZZ static Int popcount32 ( UInt x )
+//ZZ {
+//ZZ    Int res = 0, i;
+//ZZ    for (i = 0; i < 32; i++) {
+//ZZ       res += (x & 1);
+//ZZ       x >>= 1;
+//ZZ    }
+//ZZ    return res;
+//ZZ }
+//ZZ 
+//ZZ static UInt setbit32 ( UInt x, Int ix, UInt b )
+//ZZ {
+//ZZ    UInt mask = 1 << ix;
+//ZZ    x &= ~mask;
+//ZZ    x |= ((b << ix) & mask);
+//ZZ    return x;
+//ZZ }
+
+#define BITS2(_b1,_b0)  \
+   (((_b1) << 1) | (_b0))
+
+#define BITS3(_b2,_b1,_b0)  \
+  (((_b2) << 2) | ((_b1) << 1) | (_b0))
+
+#define BITS4(_b3,_b2,_b1,_b0)  \
+   (((_b3) << 3) | ((_b2) << 2) | ((_b1) << 1) | (_b0))
+
+#define BITS8(_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   ((BITS4((_b7),(_b6),(_b5),(_b4)) << 4)  \
+    | BITS4((_b3),(_b2),(_b1),(_b0)))
+
+#define BITS5(_b4,_b3,_b2,_b1,_b0)  \
+   (BITS8(0,0,0,(_b4),(_b3),(_b2),(_b1),(_b0)))
+#define BITS6(_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (BITS8(0,0,(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+#define BITS7(_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (BITS8(0,(_b6),(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+
+#define BITS9(_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (((_b8) << 8)  \
+    | BITS8((_b7),(_b6),(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+
+#define BITS10(_b9,_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (((_b9) << 9) | ((_b8) << 8)  \
+    | BITS8((_b7),(_b6),(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+
+#define BITS11(_b10,_b9,_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (((_b10) << 10)  \
+    | BITS10(_b9,_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0))
+
+#define BITS12(_b11, _b10,_b9,_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0) \
+   (((_b11) << 11)  \
+    | BITS11(_b10,_b9,_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0))
+
+#define X00 BITS2(0,0)
+#define X01 BITS2(0,1)
+#define X10 BITS2(1,0)
+#define X11 BITS2(1,1)
+
+// produces _uint[_bMax:_bMin]
+#define SLICE_UInt(_uint,_bMax,_bMin)  \
+   (( ((UInt)(_uint)) >> (_bMin))  \
+    & (UInt)((1ULL << ((_bMax) - (_bMin) + 1)) - 1ULL))
+
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for creating IR fragments.    ---*/
+/*------------------------------------------------------------*/
+
+static IRExpr* mkV128 ( UShort w )
+{
+   return IRExpr_Const(IRConst_V128(w));
+}
+
+static IRExpr* mkU64 ( ULong i )
+{
+   return IRExpr_Const(IRConst_U64(i));
+}
+
+static IRExpr* mkU32 ( UInt i )
+{
+   return IRExpr_Const(IRConst_U32(i));
+}
+
+static IRExpr* mkU16 ( UInt i )
+{
+   vassert(i < 65536);
+   return IRExpr_Const(IRConst_U16(i));
+}
+
+static IRExpr* mkU8 ( UInt i )
+{
+   vassert(i < 256);
+   return IRExpr_Const(IRConst_U8( (UChar)i ));
+}
+
+static IRExpr* mkexpr ( IRTemp tmp )
+{
+   return IRExpr_RdTmp(tmp);
+}
+
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* triop ( IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3 )
+{
+   return IRExpr_Triop(op, a1, a2, a3);
+}
+
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
+{
+   return IRExpr_Load(Iend_LE, ty, addr);
+}
+
+/* Add a statement to the list held by "irbb". */
+static void stmt ( IRStmt* st )
+{
+   addStmtToIRSB( irsb, st );
+}
+
+static void assign ( IRTemp dst, IRExpr* e )
+{
+   stmt( IRStmt_WrTmp(dst, e) );
+}
+
+static void storeLE ( IRExpr* addr, IRExpr* data )
+{
+   stmt( IRStmt_Store(Iend_LE, addr, data) );
+}
+
+//ZZ static void storeGuardedLE ( IRExpr* addr, IRExpr* data, IRTemp guardT )
+//ZZ {
+//ZZ    if (guardT == IRTemp_INVALID) {
+//ZZ       /* unconditional */
+//ZZ       storeLE(addr, data);
+//ZZ    } else {
+//ZZ       stmt( IRStmt_StoreG(Iend_LE, addr, data,
+//ZZ                           binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0))) );
+//ZZ    }
+//ZZ }
+//ZZ 
+//ZZ static void loadGuardedLE ( IRTemp dst, IRLoadGOp cvt,
+//ZZ                             IRExpr* addr, IRExpr* alt, 
+//ZZ                             IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+//ZZ {
+//ZZ    if (guardT == IRTemp_INVALID) {
+//ZZ       /* unconditional */
+//ZZ       IRExpr* loaded = NULL;
+//ZZ       switch (cvt) {
+//ZZ          case ILGop_Ident32:
+//ZZ             loaded = loadLE(Ity_I32, addr); break;
+//ZZ          case ILGop_8Uto32:
+//ZZ             loaded = unop(Iop_8Uto32, loadLE(Ity_I8, addr)); break;
+//ZZ          case ILGop_8Sto32:
+//ZZ             loaded = unop(Iop_8Sto32, loadLE(Ity_I8, addr)); break;
+//ZZ          case ILGop_16Uto32:
+//ZZ             loaded = unop(Iop_16Uto32, loadLE(Ity_I16, addr)); break;
+//ZZ          case ILGop_16Sto32:
+//ZZ             loaded = unop(Iop_16Sto32, loadLE(Ity_I16, addr)); break;
+//ZZ          default:
+//ZZ             vassert(0);
+//ZZ       }
+//ZZ       vassert(loaded != NULL);
+//ZZ       assign(dst, loaded);
+//ZZ    } else {
+//ZZ       /* Generate a guarded load into 'dst', but apply 'cvt' to the
+//ZZ          loaded data before putting the data in 'dst'.  If the load
+//ZZ          does not take place, 'alt' is placed directly in 'dst'. */
+//ZZ       stmt( IRStmt_LoadG(Iend_LE, cvt, dst, addr, alt,
+//ZZ                          binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0))) );
+//ZZ    }
+//ZZ }
+
+/* Generate a new temporary of the given type. */
+static IRTemp newTemp ( IRType ty )
+{
+   vassert(isPlausibleIRType(ty));
+   return newIRTemp( irsb->tyenv, ty );
+}
+
+/* This is used in many places, so the brevity is an advantage. */
+static IRTemp newTempV128(void)
+{
+   return newTemp(Ity_V128);
+}
+
+/* Initialise V128 temporaries en masse. */
+static
+void newTempsV128_2(IRTemp* t1, IRTemp* t2)
+{
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   *t1 = newTempV128();
+   *t2 = newTempV128();
+}
+
+static
+void newTempsV128_3(IRTemp* t1, IRTemp* t2, IRTemp* t3)
+{
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   *t1 = newTempV128();
+   *t2 = newTempV128();
+   *t3 = newTempV128();
+}
+
+static
+void newTempsV128_4(IRTemp* t1, IRTemp* t2, IRTemp* t3, IRTemp* t4)
+{
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   vassert(t4 && *t4 == IRTemp_INVALID);
+   *t1 = newTempV128();
+   *t2 = newTempV128();
+   *t3 = newTempV128();
+   *t4 = newTempV128();
+}
+
+static
+void newTempsV128_7(IRTemp* t1, IRTemp* t2, IRTemp* t3,
+                    IRTemp* t4, IRTemp* t5, IRTemp* t6, IRTemp* t7)
+{
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   vassert(t4 && *t4 == IRTemp_INVALID);
+   vassert(t5 && *t5 == IRTemp_INVALID);
+   vassert(t6 && *t6 == IRTemp_INVALID);
+   vassert(t7 && *t7 == IRTemp_INVALID);
+   *t1 = newTempV128();
+   *t2 = newTempV128();
+   *t3 = newTempV128();
+   *t4 = newTempV128();
+   *t5 = newTempV128();
+   *t6 = newTempV128();
+   *t7 = newTempV128();
+}
+
+//ZZ /* Produces a value in 0 .. 3, which is encoded as per the type
+//ZZ    IRRoundingMode. */
+//ZZ static IRExpr* /* :: Ity_I32 */ get_FAKE_roundingmode ( void )
+//ZZ {
+//ZZ    return mkU32(Irrm_NEAREST);
+//ZZ }
+//ZZ 
+//ZZ /* Generate an expression for SRC rotated right by ROT. */
+//ZZ static IRExpr* genROR32( IRTemp src, Int rot )
+//ZZ {
+//ZZ    vassert(rot >= 0 && rot < 32);
+//ZZ    if (rot == 0)
+//ZZ       return mkexpr(src);
+//ZZ    return
+//ZZ       binop(Iop_Or32,
+//ZZ             binop(Iop_Shl32, mkexpr(src), mkU8(32 - rot)),
+//ZZ             binop(Iop_Shr32, mkexpr(src), mkU8(rot)));
+//ZZ }
+//ZZ 
+//ZZ static IRExpr* mkU128 ( ULong i )
+//ZZ {
+//ZZ    return binop(Iop_64HLtoV128, mkU64(i), mkU64(i));
+//ZZ }
+//ZZ 
+//ZZ /* Generate a 4-aligned version of the given expression if
+//ZZ    the given condition is true.  Else return it unchanged. */
+//ZZ static IRExpr* align4if ( IRExpr* e, Bool b )
+//ZZ {
+//ZZ    if (b)
+//ZZ       return binop(Iop_And32, e, mkU32(~3));
+//ZZ    else
+//ZZ       return e;
+//ZZ }
+
+/* Other IR construction helpers. */
+static IROp mkAND ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_And32;
+      case Ity_I64: return Iop_And64;
+      default: vpanic("mkAND");
+   }
+}
+
+static IROp mkOR ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Or32;
+      case Ity_I64: return Iop_Or64;
+      default: vpanic("mkOR");
+   }
+}
+
+static IROp mkXOR ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Xor32;
+      case Ity_I64: return Iop_Xor64;
+      default: vpanic("mkXOR");
+   }
+}
+
+static IROp mkSHL ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Shl32;
+      case Ity_I64: return Iop_Shl64;
+      default: vpanic("mkSHL");
+   }
+}
+
+static IROp mkSHR ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Shr32;
+      case Ity_I64: return Iop_Shr64;
+      default: vpanic("mkSHR");
+   }
+}
+
+static IROp mkSAR ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Sar32;
+      case Ity_I64: return Iop_Sar64;
+      default: vpanic("mkSAR");
+   }
+}
+
+static IROp mkNOT ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Not32;
+      case Ity_I64: return Iop_Not64;
+      default: vpanic("mkNOT");
+   }
+}
+
+static IROp mkADD ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Add32;
+      case Ity_I64: return Iop_Add64;
+      default: vpanic("mkADD");
+   }
+}
+
+static IROp mkSUB ( IRType ty ) {
+   switch (ty) {
+      case Ity_I32: return Iop_Sub32;
+      case Ity_I64: return Iop_Sub64;
+      default: vpanic("mkSUB");
+   }
+}
+
+static IROp mkADDF ( IRType ty ) {
+   switch (ty) {
+      case Ity_F32: return Iop_AddF32;
+      case Ity_F64: return Iop_AddF64;
+      default: vpanic("mkADDF");
+   }
+}
+
+static IROp mkSUBF ( IRType ty ) {
+   switch (ty) {
+      case Ity_F32: return Iop_SubF32;
+      case Ity_F64: return Iop_SubF64;
+      default: vpanic("mkSUBF");
+   }
+}
+
+static IROp mkMULF ( IRType ty ) {
+   switch (ty) {
+      case Ity_F32: return Iop_MulF32;
+      case Ity_F64: return Iop_MulF64;
+      default: vpanic("mkMULF");
+   }
+}
+
+static IROp mkDIVF ( IRType ty ) {
+   switch (ty) {
+      case Ity_F32: return Iop_DivF32;
+      case Ity_F64: return Iop_DivF64;
+      default: vpanic("mkMULF");
+   }
+}
+
+static IROp mkNEGF ( IRType ty ) {
+   switch (ty) {
+      case Ity_F32: return Iop_NegF32;
+      case Ity_F64: return Iop_NegF64;
+      default: vpanic("mkNEGF");
+   }
+}
+
+static IROp mkABSF ( IRType ty ) {
+   switch (ty) {
+      case Ity_F32: return Iop_AbsF32;
+      case Ity_F64: return Iop_AbsF64;
+      default: vpanic("mkNEGF");
+   }
+}
+
+static IROp mkSQRTF ( IRType ty ) {
+   switch (ty) {
+      case Ity_F32: return Iop_SqrtF32;
+      case Ity_F64: return Iop_SqrtF64;
+      default: vpanic("mkNEGF");
+   }
+}
+
+static IROp mkVecADD ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Add8x16, Iop_Add16x8, Iop_Add32x4, Iop_Add64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQADDU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QAdd8Ux16, Iop_QAdd16Ux8, Iop_QAdd32Ux4, Iop_QAdd64Ux2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQADDS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QAdd8Sx16, Iop_QAdd16Sx8, Iop_QAdd32Sx4, Iop_QAdd64Sx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQADDEXTSUSATUU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QAddExtSUsatUU8x16, Iop_QAddExtSUsatUU16x8,
+          Iop_QAddExtSUsatUU32x4, Iop_QAddExtSUsatUU64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQADDEXTUSSATSS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QAddExtUSsatSS8x16, Iop_QAddExtUSsatSS16x8,
+          Iop_QAddExtUSsatSS32x4, Iop_QAddExtUSsatSS64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecSUB ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Sub8x16, Iop_Sub16x8, Iop_Sub32x4, Iop_Sub64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQSUBU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QSub8Ux16, Iop_QSub16Ux8, Iop_QSub32Ux4, Iop_QSub64Ux2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQSUBS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QSub8Sx16, Iop_QSub16Sx8, Iop_QSub32Sx4, Iop_QSub64Sx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecSARN ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4, Iop_SarN64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecSHRN ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecSHLN ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_ShlN8x16, Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecCATEVENLANES ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8,
+          Iop_CatEvenLanes32x4, Iop_InterleaveLO64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecCATODDLANES ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_CatOddLanes8x16, Iop_CatOddLanes16x8,
+          Iop_CatOddLanes32x4, Iop_InterleaveHI64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecINTERLEAVELO ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_InterleaveLO8x16, Iop_InterleaveLO16x8,
+          Iop_InterleaveLO32x4, Iop_InterleaveLO64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecINTERLEAVEHI ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_InterleaveHI8x16, Iop_InterleaveHI16x8,
+          Iop_InterleaveHI32x4, Iop_InterleaveHI64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecMAXU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4, Iop_Max64Ux2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecMAXS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Max8Sx16, Iop_Max16Sx8, Iop_Max32Sx4, Iop_Max64Sx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecMINU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Min8Ux16, Iop_Min16Ux8, Iop_Min32Ux4, Iop_Min64Ux2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecMINS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Min8Sx16, Iop_Min16Sx8, Iop_Min32Sx4, Iop_Min64Sx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecMUL ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Mul8x16, Iop_Mul16x8, Iop_Mul32x4, Iop_INVALID };
+   vassert(size < 3);
+   return ops[size];
+}
+
+static IROp mkVecMULLU ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_Mull8Ux8, Iop_Mull16Ux4, Iop_Mull32Ux2, Iop_INVALID };
+   vassert(sizeNarrow < 3);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecMULLS ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_Mull8Sx8, Iop_Mull16Sx4, Iop_Mull32Sx2, Iop_INVALID };
+   vassert(sizeNarrow < 3);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQDMULLS ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_INVALID, Iop_QDMull16Sx4, Iop_QDMull32Sx2, Iop_INVALID };
+   vassert(sizeNarrow < 3);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecCMPEQ ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_CmpEQ8x16, Iop_CmpEQ16x8, Iop_CmpEQ32x4, Iop_CmpEQ64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecCMPGTU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_CmpGT8Ux16, Iop_CmpGT16Ux8, Iop_CmpGT32Ux4, Iop_CmpGT64Ux2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecCMPGTS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_CmpGT8Sx16, Iop_CmpGT16Sx8, Iop_CmpGT32Sx4, Iop_CmpGT64Sx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecABS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Abs8x16, Iop_Abs16x8, Iop_Abs32x4, Iop_Abs64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecZEROHIxxOFV128 ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_ZeroHI120ofV128, Iop_ZeroHI112ofV128,
+          Iop_ZeroHI96ofV128,  Iop_ZeroHI64ofV128 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IRExpr* mkU ( IRType ty, ULong imm ) {
+   switch (ty) {
+      case Ity_I32: return mkU32((UInt)(imm & 0xFFFFFFFFULL));
+      case Ity_I64: return mkU64(imm);
+      default: vpanic("mkU");
+   }
+}
+
+static IROp mkVecQDMULHIS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_INVALID, Iop_QDMulHi16Sx8, Iop_QDMulHi32Sx4, Iop_INVALID };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQRDMULHIS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_INVALID, Iop_QRDMulHi16Sx8, Iop_QRDMulHi32Sx4, Iop_INVALID };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQANDUQSH ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QandUQsh8x16, Iop_QandUQsh16x8,
+          Iop_QandUQsh32x4, Iop_QandUQsh64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQANDSQSH ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QandSQsh8x16, Iop_QandSQsh16x8,
+          Iop_QandSQsh32x4, Iop_QandSQsh64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQANDUQRSH ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QandUQRsh8x16, Iop_QandUQRsh16x8,
+          Iop_QandUQRsh32x4, Iop_QandUQRsh64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQANDSQRSH ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QandSQRsh8x16, Iop_QandSQRsh16x8,
+          Iop_QandSQRsh32x4, Iop_QandSQRsh64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecSHU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Sh8Ux16, Iop_Sh16Ux8, Iop_Sh32Ux4, Iop_Sh64Ux2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecSHS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Sh8Sx16, Iop_Sh16Sx8, Iop_Sh32Sx4, Iop_Sh64Sx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecRSHU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Rsh8Ux16, Iop_Rsh16Ux8, Iop_Rsh32Ux4, Iop_Rsh64Ux2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecRSHS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_Rsh8Sx16, Iop_Rsh16Sx8, Iop_Rsh32Sx4, Iop_Rsh64Sx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecNARROWUN ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_NarrowUn16to8x8, Iop_NarrowUn32to16x4,
+          Iop_NarrowUn64to32x2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQNARROWUNSU ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QNarrowUn16Sto8Ux8,  Iop_QNarrowUn32Sto16Ux4,
+          Iop_QNarrowUn64Sto32Ux2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQNARROWUNSS ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QNarrowUn16Sto8Sx8,  Iop_QNarrowUn32Sto16Sx4,
+          Iop_QNarrowUn64Sto32Sx2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQNARROWUNUU ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QNarrowUn16Uto8Ux8,  Iop_QNarrowUn32Uto16Ux4,
+          Iop_QNarrowUn64Uto32Ux2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQANDqshrNNARROWUU ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QandQShrNnarrow16Uto8Ux8, Iop_QandQShrNnarrow32Uto16Ux4,
+          Iop_QandQShrNnarrow64Uto32Ux2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQANDqsarNNARROWSS ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QandQSarNnarrow16Sto8Sx8,  Iop_QandQSarNnarrow32Sto16Sx4,
+          Iop_QandQSarNnarrow64Sto32Sx2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQANDqsarNNARROWSU ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QandQSarNnarrow16Sto8Ux8,  Iop_QandQSarNnarrow32Sto16Ux4,
+          Iop_QandQSarNnarrow64Sto32Ux2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQANDqrshrNNARROWUU ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QandQRShrNnarrow16Uto8Ux8,  Iop_QandQRShrNnarrow32Uto16Ux4,
+          Iop_QandQRShrNnarrow64Uto32Ux2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQANDqrsarNNARROWSS ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QandQRSarNnarrow16Sto8Sx8,  Iop_QandQRSarNnarrow32Sto16Sx4,
+          Iop_QandQRSarNnarrow64Sto32Sx2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQANDqrsarNNARROWSU ( UInt sizeNarrow ) {
+   const IROp ops[4]
+      = { Iop_QandQRSarNnarrow16Sto8Ux8,  Iop_QandQRSarNnarrow32Sto16Ux4,
+          Iop_QandQRSarNnarrow64Sto32Ux2, Iop_INVALID };
+   vassert(sizeNarrow < 4);
+   return ops[sizeNarrow];
+}
+
+static IROp mkVecQSHLNSATUU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QShlNsatUU8x16, Iop_QShlNsatUU16x8,
+          Iop_QShlNsatUU32x4, Iop_QShlNsatUU64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQSHLNSATSS ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QShlNsatSS8x16, Iop_QShlNsatSS16x8,
+          Iop_QShlNsatSS32x4, Iop_QShlNsatSS64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecQSHLNSATSU ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_QShlNsatSU8x16, Iop_QShlNsatSU16x8,
+          Iop_QShlNsatSU32x4, Iop_QShlNsatSU64x2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecADDF ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_INVALID, Iop_INVALID, Iop_Add32Fx4, Iop_Add64Fx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecMAXF ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_INVALID, Iop_INVALID, Iop_Max32Fx4, Iop_Max64Fx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+static IROp mkVecMINF ( UInt size ) {
+   const IROp ops[4]
+      = { Iop_INVALID, Iop_INVALID, Iop_Min32Fx4, Iop_Min64Fx2 };
+   vassert(size < 4);
+   return ops[size];
+}
+
+/* Generate IR to create 'arg rotated right by imm', for sane values
+   of 'ty' and 'imm'. */
+static IRTemp mathROR ( IRType ty, IRTemp arg, UInt imm )
+{
+   UInt w = 0;
+   if (ty == Ity_I64) {
+      w = 64;
+   } else {
+      vassert(ty == Ity_I32);
+      w = 32;
+   }
+   vassert(w != 0);
+   vassert(imm < w);
+   if (imm == 0) {
+      return arg;
+   }
+   IRTemp res = newTemp(ty);
+   assign(res, binop(mkOR(ty),
+                     binop(mkSHL(ty), mkexpr(arg), mkU8(w - imm)),
+                     binop(mkSHR(ty), mkexpr(arg), mkU8(imm)) ));
+   return res;
+}
+
+/* Generate IR to set the returned temp to either all-zeroes or
+   all ones, as a copy of arg<imm>. */
+static IRTemp mathREPLICATE ( IRType ty, IRTemp arg, UInt imm )
+{
+   UInt w = 0;
+   if (ty == Ity_I64) {
+      w = 64;
+   } else {
+      vassert(ty == Ity_I32);
+      w = 32;
+   }
+   vassert(w != 0);
+   vassert(imm < w);
+   IRTemp res = newTemp(ty);
+   assign(res, binop(mkSAR(ty),
+                     binop(mkSHL(ty), mkexpr(arg), mkU8(w - 1 - imm)),
+                     mkU8(w - 1)));
+   return res;
+}
+
+/* U-widen 8/16/32/64 bit int expr to 64. */
+static IRExpr* widenUto64 ( IRType srcTy, IRExpr* e )
+{
+   switch (srcTy) {
+      case Ity_I64: return e;
+      case Ity_I32: return unop(Iop_32Uto64, e);
+      case Ity_I16: return unop(Iop_16Uto64, e);
+      case Ity_I8:  return unop(Iop_8Uto64, e);
+      default: vpanic("widenUto64(arm64)");
+   }
+}
+
+/* Narrow 64 bit int expr to 8/16/32/64.  Clearly only some
+   of these combinations make sense. */
+static IRExpr* narrowFrom64 ( IRType dstTy, IRExpr* e )
+{
+   switch (dstTy) {
+      case Ity_I64: return e;
+      case Ity_I32: return unop(Iop_64to32, e);
+      case Ity_I16: return unop(Iop_64to16, e);
+      case Ity_I8:  return unop(Iop_64to8, e);
+      default: vpanic("narrowFrom64(arm64)");
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for accessing guest registers.               ---*/
+/*------------------------------------------------------------*/
+
+#define OFFB_X0       offsetof(VexGuestARM64State,guest_X0)
+#define OFFB_X1       offsetof(VexGuestARM64State,guest_X1)
+#define OFFB_X2       offsetof(VexGuestARM64State,guest_X2)
+#define OFFB_X3       offsetof(VexGuestARM64State,guest_X3)
+#define OFFB_X4       offsetof(VexGuestARM64State,guest_X4)
+#define OFFB_X5       offsetof(VexGuestARM64State,guest_X5)
+#define OFFB_X6       offsetof(VexGuestARM64State,guest_X6)
+#define OFFB_X7       offsetof(VexGuestARM64State,guest_X7)
+#define OFFB_X8       offsetof(VexGuestARM64State,guest_X8)
+#define OFFB_X9       offsetof(VexGuestARM64State,guest_X9)
+#define OFFB_X10      offsetof(VexGuestARM64State,guest_X10)
+#define OFFB_X11      offsetof(VexGuestARM64State,guest_X11)
+#define OFFB_X12      offsetof(VexGuestARM64State,guest_X12)
+#define OFFB_X13      offsetof(VexGuestARM64State,guest_X13)
+#define OFFB_X14      offsetof(VexGuestARM64State,guest_X14)
+#define OFFB_X15      offsetof(VexGuestARM64State,guest_X15)
+#define OFFB_X16      offsetof(VexGuestARM64State,guest_X16)
+#define OFFB_X17      offsetof(VexGuestARM64State,guest_X17)
+#define OFFB_X18      offsetof(VexGuestARM64State,guest_X18)
+#define OFFB_X19      offsetof(VexGuestARM64State,guest_X19)
+#define OFFB_X20      offsetof(VexGuestARM64State,guest_X20)
+#define OFFB_X21      offsetof(VexGuestARM64State,guest_X21)
+#define OFFB_X22      offsetof(VexGuestARM64State,guest_X22)
+#define OFFB_X23      offsetof(VexGuestARM64State,guest_X23)
+#define OFFB_X24      offsetof(VexGuestARM64State,guest_X24)
+#define OFFB_X25      offsetof(VexGuestARM64State,guest_X25)
+#define OFFB_X26      offsetof(VexGuestARM64State,guest_X26)
+#define OFFB_X27      offsetof(VexGuestARM64State,guest_X27)
+#define OFFB_X28      offsetof(VexGuestARM64State,guest_X28)
+#define OFFB_X29      offsetof(VexGuestARM64State,guest_X29)
+#define OFFB_X30      offsetof(VexGuestARM64State,guest_X30)
+
+#define OFFB_XSP      offsetof(VexGuestARM64State,guest_XSP)
+#define OFFB_PC       offsetof(VexGuestARM64State,guest_PC)
+
+#define OFFB_CC_OP    offsetof(VexGuestARM64State,guest_CC_OP)
+#define OFFB_CC_DEP1  offsetof(VexGuestARM64State,guest_CC_DEP1)
+#define OFFB_CC_DEP2  offsetof(VexGuestARM64State,guest_CC_DEP2)
+#define OFFB_CC_NDEP  offsetof(VexGuestARM64State,guest_CC_NDEP)
+
+#define OFFB_TPIDR_EL0 offsetof(VexGuestARM64State,guest_TPIDR_EL0)
+#define OFFB_NRADDR   offsetof(VexGuestARM64State,guest_NRADDR)
+
+#define OFFB_Q0       offsetof(VexGuestARM64State,guest_Q0)
+#define OFFB_Q1       offsetof(VexGuestARM64State,guest_Q1)
+#define OFFB_Q2       offsetof(VexGuestARM64State,guest_Q2)
+#define OFFB_Q3       offsetof(VexGuestARM64State,guest_Q3)
+#define OFFB_Q4       offsetof(VexGuestARM64State,guest_Q4)
+#define OFFB_Q5       offsetof(VexGuestARM64State,guest_Q5)
+#define OFFB_Q6       offsetof(VexGuestARM64State,guest_Q6)
+#define OFFB_Q7       offsetof(VexGuestARM64State,guest_Q7)
+#define OFFB_Q8       offsetof(VexGuestARM64State,guest_Q8)
+#define OFFB_Q9       offsetof(VexGuestARM64State,guest_Q9)
+#define OFFB_Q10      offsetof(VexGuestARM64State,guest_Q10)
+#define OFFB_Q11      offsetof(VexGuestARM64State,guest_Q11)
+#define OFFB_Q12      offsetof(VexGuestARM64State,guest_Q12)
+#define OFFB_Q13      offsetof(VexGuestARM64State,guest_Q13)
+#define OFFB_Q14      offsetof(VexGuestARM64State,guest_Q14)
+#define OFFB_Q15      offsetof(VexGuestARM64State,guest_Q15)
+#define OFFB_Q16      offsetof(VexGuestARM64State,guest_Q16)
+#define OFFB_Q17      offsetof(VexGuestARM64State,guest_Q17)
+#define OFFB_Q18      offsetof(VexGuestARM64State,guest_Q18)
+#define OFFB_Q19      offsetof(VexGuestARM64State,guest_Q19)
+#define OFFB_Q20      offsetof(VexGuestARM64State,guest_Q20)
+#define OFFB_Q21      offsetof(VexGuestARM64State,guest_Q21)
+#define OFFB_Q22      offsetof(VexGuestARM64State,guest_Q22)
+#define OFFB_Q23      offsetof(VexGuestARM64State,guest_Q23)
+#define OFFB_Q24      offsetof(VexGuestARM64State,guest_Q24)
+#define OFFB_Q25      offsetof(VexGuestARM64State,guest_Q25)
+#define OFFB_Q26      offsetof(VexGuestARM64State,guest_Q26)
+#define OFFB_Q27      offsetof(VexGuestARM64State,guest_Q27)
+#define OFFB_Q28      offsetof(VexGuestARM64State,guest_Q28)
+#define OFFB_Q29      offsetof(VexGuestARM64State,guest_Q29)
+#define OFFB_Q30      offsetof(VexGuestARM64State,guest_Q30)
+#define OFFB_Q31      offsetof(VexGuestARM64State,guest_Q31)
+
+#define OFFB_FPCR     offsetof(VexGuestARM64State,guest_FPCR)
+#define OFFB_QCFLAG   offsetof(VexGuestARM64State,guest_QCFLAG)
+
+#define OFFB_CMSTART  offsetof(VexGuestARM64State,guest_CMSTART)
+#define OFFB_CMLEN    offsetof(VexGuestARM64State,guest_CMLEN)
+
+
+/* ---------------- Integer registers ---------------- */
+
+static Int offsetIReg64 ( UInt iregNo )
+{
+   /* Do we care about endianness here?  We do if sub-parts of integer
+      registers are accessed. */
+   switch (iregNo) {
+      case 0:  return OFFB_X0;
+      case 1:  return OFFB_X1;
+      case 2:  return OFFB_X2;
+      case 3:  return OFFB_X3;
+      case 4:  return OFFB_X4;
+      case 5:  return OFFB_X5;
+      case 6:  return OFFB_X6;
+      case 7:  return OFFB_X7;
+      case 8:  return OFFB_X8;
+      case 9:  return OFFB_X9;
+      case 10: return OFFB_X10;
+      case 11: return OFFB_X11;
+      case 12: return OFFB_X12;
+      case 13: return OFFB_X13;
+      case 14: return OFFB_X14;
+      case 15: return OFFB_X15;
+      case 16: return OFFB_X16;
+      case 17: return OFFB_X17;
+      case 18: return OFFB_X18;
+      case 19: return OFFB_X19;
+      case 20: return OFFB_X20;
+      case 21: return OFFB_X21;
+      case 22: return OFFB_X22;
+      case 23: return OFFB_X23;
+      case 24: return OFFB_X24;
+      case 25: return OFFB_X25;
+      case 26: return OFFB_X26;
+      case 27: return OFFB_X27;
+      case 28: return OFFB_X28;
+      case 29: return OFFB_X29;
+      case 30: return OFFB_X30;
+      /* but not 31 */
+      default: vassert(0);
+   }
+}
+
+static Int offsetIReg64orSP ( UInt iregNo )
+{
+   return iregNo == 31  ? OFFB_XSP  : offsetIReg64(iregNo);
+}
+
+static const HChar* nameIReg64orZR ( UInt iregNo )
+{
+   vassert(iregNo < 32);
+   static const HChar* names[32]
+      = { "x0",  "x1",  "x2",  "x3",  "x4",  "x5",  "x6",  "x7", 
+          "x8",  "x9",  "x10", "x11", "x12", "x13", "x14", "x15", 
+          "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", 
+          "x24", "x25", "x26", "x27", "x28", "x29", "x30", "xzr" };
+   return names[iregNo];
+}
+
+static const HChar* nameIReg64orSP ( UInt iregNo )
+{
+   if (iregNo == 31) {
+      return "sp";
+   }
+   vassert(iregNo < 31);
+   return nameIReg64orZR(iregNo);
+}
+
+static IRExpr* getIReg64orSP ( UInt iregNo )
+{
+   vassert(iregNo < 32);
+   return IRExpr_Get( offsetIReg64orSP(iregNo), Ity_I64 );
+}
+
+static IRExpr* getIReg64orZR ( UInt iregNo )
+{
+   if (iregNo == 31) {
+      return mkU64(0);
+   }
+   vassert(iregNo < 31);
+   return IRExpr_Get( offsetIReg64orSP(iregNo), Ity_I64 );
+}
+
+static void putIReg64orSP ( UInt iregNo, IRExpr* e ) 
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64);
+   stmt( IRStmt_Put(offsetIReg64orSP(iregNo), e) );
+}
+
+static void putIReg64orZR ( UInt iregNo, IRExpr* e ) 
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64);
+   if (iregNo == 31) {
+      return;
+   }
+   vassert(iregNo < 31);
+   stmt( IRStmt_Put(offsetIReg64orSP(iregNo), e) );
+}
+
+static const HChar* nameIReg32orZR ( UInt iregNo )
+{
+   vassert(iregNo < 32);
+   static const HChar* names[32]
+      = { "w0",  "w1",  "w2",  "w3",  "w4",  "w5",  "w6",  "w7", 
+          "w8",  "w9",  "w10", "w11", "w12", "w13", "w14", "w15", 
+          "w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23", 
+          "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wzr" };
+   return names[iregNo];
+}
+
+static const HChar* nameIReg32orSP ( UInt iregNo )
+{
+   if (iregNo == 31) {
+      return "wsp";
+   }
+   vassert(iregNo < 31);
+   return nameIReg32orZR(iregNo);
+}
+
+static IRExpr* getIReg32orSP ( UInt iregNo )
+{
+   vassert(iregNo < 32);
+   return unop(Iop_64to32,
+               IRExpr_Get( offsetIReg64orSP(iregNo), Ity_I64 ));
+}
+
+static IRExpr* getIReg32orZR ( UInt iregNo )
+{
+   if (iregNo == 31) {
+      return mkU32(0);
+   }
+   vassert(iregNo < 31);
+   return unop(Iop_64to32,
+               IRExpr_Get( offsetIReg64orSP(iregNo), Ity_I64 ));
+}
+
+static void putIReg32orSP ( UInt iregNo, IRExpr* e ) 
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   stmt( IRStmt_Put(offsetIReg64orSP(iregNo), unop(Iop_32Uto64, e)) );
+}
+
+static void putIReg32orZR ( UInt iregNo, IRExpr* e ) 
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   if (iregNo == 31) {
+      return;
+   }
+   vassert(iregNo < 31);
+   stmt( IRStmt_Put(offsetIReg64orSP(iregNo), unop(Iop_32Uto64, e)) );
+}
+
+static const HChar* nameIRegOrSP ( Bool is64, UInt iregNo )
+{
+   vassert(is64 == True || is64 == False);
+   return is64 ? nameIReg64orSP(iregNo) : nameIReg32orSP(iregNo);
+}
+
+static const HChar* nameIRegOrZR ( Bool is64, UInt iregNo )
+{
+   vassert(is64 == True || is64 == False);
+   return is64 ? nameIReg64orZR(iregNo) : nameIReg32orZR(iregNo);
+}
+
+static IRExpr* getIRegOrZR ( Bool is64, UInt iregNo )
+{
+   vassert(is64 == True || is64 == False);
+   return is64 ? getIReg64orZR(iregNo) : getIReg32orZR(iregNo);
+}
+
+static void putIRegOrZR ( Bool is64, UInt iregNo, IRExpr* e )
+{
+   vassert(is64 == True || is64 == False);
+   if (is64) putIReg64orZR(iregNo, e); else putIReg32orZR(iregNo, e);
+}
+
+static void putPC ( IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64);
+   stmt( IRStmt_Put(OFFB_PC, e) );
+}
+
+
+/* ---------------- Vector (Q) registers ---------------- */
+
+static Int offsetQReg128 ( UInt qregNo )
+{
+   /* We don't care about endianness at this point.  It only becomes
+      relevant when dealing with sections of these registers.*/
+   switch (qregNo) {
+      case 0:  return OFFB_Q0;
+      case 1:  return OFFB_Q1;
+      case 2:  return OFFB_Q2;
+      case 3:  return OFFB_Q3;
+      case 4:  return OFFB_Q4;
+      case 5:  return OFFB_Q5;
+      case 6:  return OFFB_Q6;
+      case 7:  return OFFB_Q7;
+      case 8:  return OFFB_Q8;
+      case 9:  return OFFB_Q9;
+      case 10: return OFFB_Q10;
+      case 11: return OFFB_Q11;
+      case 12: return OFFB_Q12;
+      case 13: return OFFB_Q13;
+      case 14: return OFFB_Q14;
+      case 15: return OFFB_Q15;
+      case 16: return OFFB_Q16;
+      case 17: return OFFB_Q17;
+      case 18: return OFFB_Q18;
+      case 19: return OFFB_Q19;
+      case 20: return OFFB_Q20;
+      case 21: return OFFB_Q21;
+      case 22: return OFFB_Q22;
+      case 23: return OFFB_Q23;
+      case 24: return OFFB_Q24;
+      case 25: return OFFB_Q25;
+      case 26: return OFFB_Q26;
+      case 27: return OFFB_Q27;
+      case 28: return OFFB_Q28;
+      case 29: return OFFB_Q29;
+      case 30: return OFFB_Q30;
+      case 31: return OFFB_Q31;
+      default: vassert(0);
+   }
+}
+
+/* Write to a complete Qreg. */
+static void putQReg128 ( UInt qregNo, IRExpr* e )
+{
+   vassert(qregNo < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_V128);
+   stmt( IRStmt_Put(offsetQReg128(qregNo), e) );
+}
+
+/* Read a complete Qreg. */
+static IRExpr* getQReg128 ( UInt qregNo )
+{
+   vassert(qregNo < 32);
+   return IRExpr_Get(offsetQReg128(qregNo), Ity_V128);
+}
+
+/* Produce the IR type for some sub-part of a vector.  For 32- and 64-
+   bit sub-parts we can choose either integer or float types, and
+   choose float on the basis that that is the common use case and so
+   will give least interference with Put-to-Get forwarding later
+   on. */
+static IRType preferredVectorSubTypeFromSize ( UInt szB )
+{
+   switch (szB) {
+      case 1:  return Ity_I8;
+      case 2:  return Ity_I16;
+      case 4:  return Ity_I32; //Ity_F32;
+      case 8:  return Ity_F64;
+      case 16: return Ity_V128;
+      default: vassert(0);
+   }
+}
+
+/* Find the offset of the laneNo'th lane of type laneTy in the given
+   Qreg.  Since the host is little-endian, the least significant lane
+   has the lowest offset. */
+static Int offsetQRegLane ( UInt qregNo, IRType laneTy, UInt laneNo )
+{
+   vassert(host_endness == VexEndnessLE);
+   Int base = offsetQReg128(qregNo);
+   /* Since the host is little-endian, the least significant lane
+      will be at the lowest address. */
+   /* Restrict this to known types, so as to avoid silently accepting
+      stupid types. */
+   UInt laneSzB = 0;
+   switch (laneTy) {
+      case Ity_I8:                 laneSzB = 1;  break;
+      case Ity_F16: case Ity_I16:  laneSzB = 2;  break;
+      case Ity_F32: case Ity_I32:  laneSzB = 4;  break;
+      case Ity_F64: case Ity_I64:  laneSzB = 8;  break;
+      case Ity_V128:               laneSzB = 16; break;
+      default: break;
+   }
+   vassert(laneSzB > 0);
+   UInt minOff = laneNo * laneSzB;
+   UInt maxOff = minOff + laneSzB - 1;
+   vassert(maxOff < 16);
+   return base + minOff;
+}
+
+/* Put to the least significant lane of a Qreg. */
+static void putQRegLO ( UInt qregNo, IRExpr* e )
+{
+   IRType ty  = typeOfIRExpr(irsb->tyenv, e);
+   Int    off = offsetQRegLane(qregNo, ty, 0);
+   switch (ty) {
+      case Ity_I8:  case Ity_I16: case Ity_I32: case Ity_I64:
+      case Ity_F16: case Ity_F32: case Ity_F64: case Ity_V128:
+         break;
+      default:
+         vassert(0); // Other cases are probably invalid
+   }
+   stmt(IRStmt_Put(off, e));
+}
+
+/* Get from the least significant lane of a Qreg. */
+static IRExpr* getQRegLO ( UInt qregNo, IRType ty )
+{
+   Int off = offsetQRegLane(qregNo, ty, 0);
+   switch (ty) {
+      case Ity_I8:
+      case Ity_F16: case Ity_I16:
+      case Ity_I32: case Ity_I64:
+      case Ity_F32: case Ity_F64: case Ity_V128:
+         break;
+      default:
+         vassert(0); // Other cases are ATC
+   }
+   return IRExpr_Get(off, ty);
+}
+
+static const HChar* nameQRegLO ( UInt qregNo, IRType laneTy )
+{
+   static const HChar* namesQ[32]
+      = { "q0",  "q1",  "q2",  "q3",  "q4",  "q5",  "q6",  "q7", 
+          "q8",  "q9",  "q10", "q11", "q12", "q13", "q14", "q15", 
+          "q16", "q17", "q18", "q19", "q20", "q21", "q22", "q23", 
+          "q24", "q25", "q26", "q27", "q28", "q29", "q30", "q31" };
+   static const HChar* namesD[32]
+      = { "d0",  "d1",  "d2",  "d3",  "d4",  "d5",  "d6",  "d7", 
+          "d8",  "d9",  "d10", "d11", "d12", "d13", "d14", "d15", 
+          "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", 
+          "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31" };
+   static const HChar* namesS[32]
+      = { "s0",  "s1",  "s2",  "s3",  "s4",  "s5",  "s6",  "s7", 
+          "s8",  "s9",  "s10", "s11", "s12", "s13", "s14", "s15", 
+          "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", 
+          "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31" };
+   static const HChar* namesH[32]
+      = { "h0",  "h1",  "h2",  "h3",  "h4",  "h5",  "h6",  "h7", 
+          "h8",  "h9",  "h10", "h11", "h12", "h13", "h14", "h15", 
+          "h16", "h17", "h18", "h19", "h20", "h21", "h22", "h23", 
+          "h24", "h25", "h26", "h27", "h28", "h29", "h30", "h31" };
+   static const HChar* namesB[32]
+      = { "b0",  "b1",  "b2",  "b3",  "b4",  "b5",  "b6",  "b7", 
+          "b8",  "b9",  "b10", "b11", "b12", "b13", "b14", "b15", 
+          "b16", "b17", "b18", "b19", "b20", "b21", "b22", "b23", 
+          "b24", "b25", "b26", "b27", "b28", "b29", "b30", "b31" };
+   vassert(qregNo < 32);
+   switch (sizeofIRType(laneTy)) {
+      case 1:  return namesB[qregNo];
+      case 2:  return namesH[qregNo];
+      case 4:  return namesS[qregNo];
+      case 8:  return namesD[qregNo];
+      case 16: return namesQ[qregNo];
+      default: vassert(0);
+   }
+   /*NOTREACHED*/
+}
+
+static const HChar* nameQReg128 ( UInt qregNo )
+{
+   return nameQRegLO(qregNo, Ity_V128);
+}
+
+/* Find the offset of the most significant half (8 bytes) of the given
+   Qreg.  This requires knowing the endianness of the host. */
+static Int offsetQRegHI64 ( UInt qregNo )
+{
+   return offsetQRegLane(qregNo, Ity_I64, 1);
+}
+
+static IRExpr* getQRegHI64 ( UInt qregNo )
+{
+   return IRExpr_Get(offsetQRegHI64(qregNo), Ity_I64);
+}
+
+static void putQRegHI64 ( UInt qregNo, IRExpr* e )
+{
+   IRType ty  = typeOfIRExpr(irsb->tyenv, e);
+   Int    off = offsetQRegHI64(qregNo);
+   switch (ty) {
+      case Ity_I64: case Ity_F64:
+         break;
+      default:
+         vassert(0); // Other cases are plain wrong
+   }
+   stmt(IRStmt_Put(off, e));
+}
+
+/* Put to a specified lane of a Qreg. */
+static void putQRegLane ( UInt qregNo, UInt laneNo, IRExpr* e )
+{
+   IRType laneTy  = typeOfIRExpr(irsb->tyenv, e);
+   Int    off     = offsetQRegLane(qregNo, laneTy, laneNo);
+   switch (laneTy) {
+      case Ity_F64: case Ity_I64:
+      case Ity_I32: case Ity_F32:
+      case Ity_I16: case Ity_F16:
+      case Ity_I8:
+         break;
+      default:
+         vassert(0); // Other cases are ATC
+   }
+   stmt(IRStmt_Put(off, e));
+}
+
+/* Get from a specified lane of a Qreg. */
+static IRExpr* getQRegLane ( UInt qregNo, UInt laneNo, IRType laneTy )
+{
+   Int off = offsetQRegLane(qregNo, laneTy, laneNo);
+   switch (laneTy) {
+      case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+      case Ity_F64: case Ity_F32: case Ity_F16:
+         break;
+      default:
+         vassert(0); // Other cases are ATC
+   }
+   return IRExpr_Get(off, laneTy);
+}
+
+
+//ZZ /* ---------------- Misc registers ---------------- */
+//ZZ 
+//ZZ static void putMiscReg32 ( UInt    gsoffset, 
+//ZZ                            IRExpr* e, /* :: Ity_I32 */
+//ZZ                            IRTemp  guardT /* :: Ity_I32, 0 or 1 */)
+//ZZ {
+//ZZ    switch (gsoffset) {
+//ZZ       case OFFB_FPSCR:   break;
+//ZZ       case OFFB_QFLAG32: break;
+//ZZ       case OFFB_GEFLAG0: break;
+//ZZ       case OFFB_GEFLAG1: break;
+//ZZ       case OFFB_GEFLAG2: break;
+//ZZ       case OFFB_GEFLAG3: break;
+//ZZ       default: vassert(0); /* awaiting more cases */
+//ZZ    }
+//ZZ    vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+//ZZ 
+//ZZ    if (guardT == IRTemp_INVALID) {
+//ZZ       /* unconditional write */
+//ZZ       stmt(IRStmt_Put(gsoffset, e));
+//ZZ    } else {
+//ZZ       stmt(IRStmt_Put(
+//ZZ          gsoffset,
+//ZZ          IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+//ZZ                      e, IRExpr_Get(gsoffset, Ity_I32) )
+//ZZ       ));
+//ZZ    }
+//ZZ }
+//ZZ 
+//ZZ static IRTemp get_ITSTATE ( void )
+//ZZ {
+//ZZ    ASSERT_IS_THUMB;
+//ZZ    IRTemp t = newTemp(Ity_I32);
+//ZZ    assign(t, IRExpr_Get( OFFB_ITSTATE, Ity_I32));
+//ZZ    return t;
+//ZZ }
+//ZZ 
+//ZZ static void put_ITSTATE ( IRTemp t )
+//ZZ {
+//ZZ    ASSERT_IS_THUMB;
+//ZZ    stmt( IRStmt_Put( OFFB_ITSTATE, mkexpr(t)) );
+//ZZ }
+//ZZ 
+//ZZ static IRTemp get_QFLAG32 ( void )
+//ZZ {
+//ZZ    IRTemp t = newTemp(Ity_I32);
+//ZZ    assign(t, IRExpr_Get( OFFB_QFLAG32, Ity_I32));
+//ZZ    return t;
+//ZZ }
+//ZZ 
+//ZZ static void put_QFLAG32 ( IRTemp t, IRTemp condT )
+//ZZ {
+//ZZ    putMiscReg32( OFFB_QFLAG32, mkexpr(t), condT );
+//ZZ }
+//ZZ 
+//ZZ /* Stickily set the 'Q' flag (APSR bit 27) of the APSR (Application Program
+//ZZ    Status Register) to indicate that overflow or saturation occurred.
+//ZZ    Nb: t must be zero to denote no saturation, and any nonzero
+//ZZ    value to indicate saturation. */
+//ZZ static void or_into_QFLAG32 ( IRExpr* e, IRTemp condT )
+//ZZ {
+//ZZ    IRTemp old = get_QFLAG32();
+//ZZ    IRTemp nyu = newTemp(Ity_I32);
+//ZZ    assign(nyu, binop(Iop_Or32, mkexpr(old), e) );
+//ZZ    put_QFLAG32(nyu, condT);
+//ZZ }
+
+
+/* ---------------- FPCR stuff ---------------- */
+
+/* Generate IR to get hold of the rounding mode bits in FPCR, and
+   convert them to IR format.  Bind the final result to the
+   returned temp. */
+static IRTemp /* :: Ity_I32 */ mk_get_IR_rounding_mode ( void )
+{
+   /* The ARMvfp encoding for rounding mode bits is:
+         00  to nearest
+         01  to +infinity
+         10  to -infinity
+         11  to zero
+      We need to convert that to the IR encoding:
+         00  to nearest (the default)
+         10  to +infinity
+         01  to -infinity
+         11  to zero
+      Which can be done by swapping bits 0 and 1.
+      The rmode bits are at 23:22 in FPSCR.
+   */
+   IRTemp armEncd = newTemp(Ity_I32);
+   IRTemp swapped = newTemp(Ity_I32);
+   /* Fish FPCR[23:22] out, and slide to bottom.  Doesn't matter that
+      we don't zero out bits 24 and above, since the assignment to
+      'swapped' will mask them out anyway. */
+   assign(armEncd,
+          binop(Iop_Shr32, IRExpr_Get(OFFB_FPCR, Ity_I32), mkU8(22)));
+   /* Now swap them. */
+   assign(swapped,
+          binop(Iop_Or32,
+                binop(Iop_And32,
+                      binop(Iop_Shl32, mkexpr(armEncd), mkU8(1)),
+                      mkU32(2)),
+                binop(Iop_And32,
+                      binop(Iop_Shr32, mkexpr(armEncd), mkU8(1)),
+                      mkU32(1))
+         ));
+   return swapped;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for flag handling and conditional insns      ---*/
+/*------------------------------------------------------------*/
+
+static const HChar* nameARM64Condcode ( ARM64Condcode cond )
+{
+   switch (cond) {
+      case ARM64CondEQ:  return "eq";
+      case ARM64CondNE:  return "ne";
+      case ARM64CondCS:  return "cs";  // or 'hs'
+      case ARM64CondCC:  return "cc";  // or 'lo'
+      case ARM64CondMI:  return "mi";
+      case ARM64CondPL:  return "pl";
+      case ARM64CondVS:  return "vs";
+      case ARM64CondVC:  return "vc";
+      case ARM64CondHI:  return "hi";
+      case ARM64CondLS:  return "ls";
+      case ARM64CondGE:  return "ge";
+      case ARM64CondLT:  return "lt";
+      case ARM64CondGT:  return "gt";
+      case ARM64CondLE:  return "le";
+      case ARM64CondAL:  return "al";
+      case ARM64CondNV:  return "nv";
+      default: vpanic("name_ARM64Condcode");
+   }
+}
+
+/* and a handy shorthand for it */
+static const HChar* nameCC ( ARM64Condcode cond ) {
+   return nameARM64Condcode(cond);
+}
+
+
+/* Build IR to calculate some particular condition from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression of type
+   Ity_I64, suitable for narrowing.  Although the return type is
+   Ity_I64, the returned value is either 0 or 1.  'cond' must be
+   :: Ity_I64 and must denote the condition to compute in 
+   bits 7:4, and be zero everywhere else.
+*/
+static IRExpr* mk_arm64g_calculate_condition_dyn ( IRExpr* cond )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, cond) == Ity_I64);
+   /* And 'cond' had better produce a value in which only bits 7:4 are
+      nonzero.  However, obviously we can't assert for that. */
+
+   /* So what we're constructing for the first argument is 
+      "(cond << 4) | stored-operation".
+      However, as per comments above, 'cond' must be supplied
+      pre-shifted to this function.
+
+      This pairing scheme requires that the ARM64_CC_OP_ values all fit
+      in 4 bits.  Hence we are passing a (COND, OP) pair in the lowest
+      8 bits of the first argument. */
+   IRExpr** args
+      = mkIRExprVec_4(
+           binop(Iop_Or64, IRExpr_Get(OFFB_CC_OP, Ity_I64), cond),
+           IRExpr_Get(OFFB_CC_DEP1, Ity_I64),
+           IRExpr_Get(OFFB_CC_DEP2, Ity_I64),
+           IRExpr_Get(OFFB_CC_NDEP, Ity_I64)
+        );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I64,
+           0/*regparm*/, 
+           "arm64g_calculate_condition", &arm64g_calculate_condition,
+           args
+        );
+
+   /* Exclude the requested condition, OP and NDEP from definedness
+      checking.  We're only interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+/* Build IR to calculate some particular condition from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression of type
+   Ity_I64, suitable for narrowing.  Although the return type is
+   Ity_I64, the returned value is either 0 or 1.
+*/
+static IRExpr* mk_arm64g_calculate_condition ( ARM64Condcode cond )
+{
+  /* First arg is "(cond << 4) | condition".  This requires that the
+     ARM64_CC_OP_ values all fit in 4 bits.  Hence we are passing a
+     (COND, OP) pair in the lowest 8 bits of the first argument. */
+   vassert(cond >= 0 && cond <= 15);
+   return mk_arm64g_calculate_condition_dyn( mkU64(cond << 4) );
+}
+
+
+/* Build IR to calculate just the carry flag from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+   Ity_I64. */
+static IRExpr* mk_arm64g_calculate_flag_c ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I64),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I64) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I64,
+           0/*regparm*/,
+           "arm64g_calculate_flag_c", &arm64g_calculate_flag_c,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+//ZZ /* Build IR to calculate just the overflow flag from stored
+//ZZ    CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+//ZZ    Ity_I32. */
+//ZZ static IRExpr* mk_armg_calculate_flag_v ( void )
+//ZZ {
+//ZZ    IRExpr** args
+//ZZ       = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
+//ZZ                        IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+//ZZ                        IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+//ZZ                        IRExpr_Get(OFFB_CC_NDEP, Ity_I32) );
+//ZZ    IRExpr* call
+//ZZ       = mkIRExprCCall(
+//ZZ            Ity_I32,
+//ZZ            0/*regparm*/, 
+//ZZ            "armg_calculate_flag_v", &armg_calculate_flag_v,
+//ZZ            args
+//ZZ         );
+//ZZ    /* Exclude OP and NDEP from definedness checking.  We're only
+//ZZ       interested in DEP1 and DEP2. */
+//ZZ    call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+//ZZ    return call;
+//ZZ }
+
+
+/* Build IR to calculate N Z C V in bits 31:28 of the
+   returned word. */
+static IRExpr* mk_arm64g_calculate_flags_nzcv ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I64),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I64),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I64) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I64,
+           0/*regparm*/, 
+           "arm64g_calculate_flags_nzcv", &arm64g_calculate_flags_nzcv,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+/* Build IR to set the flags thunk, in the most general case. */
+static
+void setFlags_D1_D2_ND ( UInt cc_op,
+                         IRTemp t_dep1, IRTemp t_dep2, IRTemp t_ndep )
+{
+   vassert(typeOfIRTemp(irsb->tyenv, t_dep1 == Ity_I64));
+   vassert(typeOfIRTemp(irsb->tyenv, t_dep2 == Ity_I64));
+   vassert(typeOfIRTemp(irsb->tyenv, t_ndep == Ity_I64));
+   vassert(cc_op >= ARM64G_CC_OP_COPY && cc_op < ARM64G_CC_OP_NUMBER);
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU64(cc_op) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(t_dep1) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkexpr(t_dep2) ));
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(t_ndep) ));
+}
+
+/* Build IR to set the flags thunk after ADD or SUB. */
+static
+void setFlags_ADD_SUB ( Bool is64, Bool isSUB, IRTemp argL, IRTemp argR )
+{
+   IRTemp argL64 = IRTemp_INVALID;
+   IRTemp argR64 = IRTemp_INVALID;
+   IRTemp z64    = newTemp(Ity_I64);
+   if (is64) {
+      argL64 = argL;
+      argR64 = argR;
+   } else {
+      argL64 = newTemp(Ity_I64);
+      argR64 = newTemp(Ity_I64);
+      assign(argL64, unop(Iop_32Uto64, mkexpr(argL)));
+      assign(argR64, unop(Iop_32Uto64, mkexpr(argR)));
+   }
+   assign(z64, mkU64(0));
+   UInt cc_op = ARM64G_CC_OP_NUMBER;
+   /**/ if ( isSUB &&  is64) { cc_op = ARM64G_CC_OP_SUB64; }
+   else if ( isSUB && !is64) { cc_op = ARM64G_CC_OP_SUB32; }
+   else if (!isSUB &&  is64) { cc_op = ARM64G_CC_OP_ADD64; }
+   else if (!isSUB && !is64) { cc_op = ARM64G_CC_OP_ADD32; }
+   else                      { vassert(0); }
+   setFlags_D1_D2_ND(cc_op, argL64, argR64, z64);
+}
+
+/* Build IR to set the flags thunk after ADC or SBC. */
+static
+void setFlags_ADC_SBC ( Bool is64, Bool isSBC,
+                        IRTemp argL, IRTemp argR, IRTemp oldC )
+{
+   IRTemp argL64 = IRTemp_INVALID;
+   IRTemp argR64 = IRTemp_INVALID;
+   IRTemp oldC64 = IRTemp_INVALID;
+   if (is64) {
+      argL64 = argL;
+      argR64 = argR;
+      oldC64 = oldC;
+   } else {
+      argL64 = newTemp(Ity_I64);
+      argR64 = newTemp(Ity_I64);
+      oldC64 = newTemp(Ity_I64);
+      assign(argL64, unop(Iop_32Uto64, mkexpr(argL)));
+      assign(argR64, unop(Iop_32Uto64, mkexpr(argR)));
+      assign(oldC64, unop(Iop_32Uto64, mkexpr(oldC)));
+   }
+   UInt cc_op = ARM64G_CC_OP_NUMBER;
+   /**/ if ( isSBC &&  is64) { cc_op = ARM64G_CC_OP_SBC64; }
+   else if ( isSBC && !is64) { cc_op = ARM64G_CC_OP_SBC32; }
+   else if (!isSBC &&  is64) { cc_op = ARM64G_CC_OP_ADC64; }
+   else if (!isSBC && !is64) { cc_op = ARM64G_CC_OP_ADC32; }
+   else                      { vassert(0); }
+   setFlags_D1_D2_ND(cc_op, argL64, argR64, oldC64);
+}
+
+/* Build IR to set the flags thunk after ADD or SUB, if the given
+   condition evaluates to True at run time.  If not, the flags are set
+   to the specified NZCV value. */
+static
+void setFlags_ADD_SUB_conditionally (
+        Bool is64, Bool isSUB,
+        IRTemp cond, IRTemp argL, IRTemp argR, UInt nzcv
+     )
+{
+   /* Generate IR as follows:
+        CC_OP   = ITE(cond, OP_{ADD,SUB}{32,64}, OP_COPY)
+        CC_DEP1 = ITE(cond, argL64, nzcv << 28)
+        CC_DEP2 = ITE(cond, argR64, 0)
+        CC_NDEP = 0
+   */
+
+   IRTemp z64 = newTemp(Ity_I64);
+   assign(z64, mkU64(0));
+
+   /* Establish the operation and operands for the True case. */
+   IRTemp t_dep1 = IRTemp_INVALID;
+   IRTemp t_dep2 = IRTemp_INVALID;
+   UInt   t_op   = ARM64G_CC_OP_NUMBER;
+   /**/ if ( isSUB &&  is64) { t_op = ARM64G_CC_OP_SUB64; }
+   else if ( isSUB && !is64) { t_op = ARM64G_CC_OP_SUB32; }
+   else if (!isSUB &&  is64) { t_op = ARM64G_CC_OP_ADD64; }
+   else if (!isSUB && !is64) { t_op = ARM64G_CC_OP_ADD32; }
+   else                      { vassert(0); }
+   /* */
+   if (is64) {
+      t_dep1 = argL;
+      t_dep2 = argR;
+   } else {
+      t_dep1 = newTemp(Ity_I64);
+      t_dep2 = newTemp(Ity_I64);
+      assign(t_dep1, unop(Iop_32Uto64, mkexpr(argL)));
+      assign(t_dep2, unop(Iop_32Uto64, mkexpr(argR)));
+   }
+
+   /* Establish the operation and operands for the False case. */
+   IRTemp f_dep1 = newTemp(Ity_I64);
+   IRTemp f_dep2 = z64;
+   UInt   f_op   = ARM64G_CC_OP_COPY;
+   assign(f_dep1, mkU64(nzcv << 28));
+
+   /* Final thunk values */
+   IRTemp dep1 = newTemp(Ity_I64);
+   IRTemp dep2 = newTemp(Ity_I64);
+   IRTemp op   = newTemp(Ity_I64);
+
+   assign(op,   IRExpr_ITE(mkexpr(cond), mkU64(t_op), mkU64(f_op)));
+   assign(dep1, IRExpr_ITE(mkexpr(cond), mkexpr(t_dep1), mkexpr(f_dep1)));
+   assign(dep2, IRExpr_ITE(mkexpr(cond), mkexpr(t_dep2), mkexpr(f_dep2)));
+
+   /* finally .. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkexpr(op) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(dep1) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkexpr(dep2) ));
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(z64) ));
+}
+
+/* Build IR to set the flags thunk after AND/OR/XOR or variants thereof. */
+static
+void setFlags_LOGIC ( Bool is64, IRTemp res )
+{
+   IRTemp res64 = IRTemp_INVALID;
+   IRTemp z64   = newTemp(Ity_I64);
+   UInt   cc_op = ARM64G_CC_OP_NUMBER;
+   if (is64) {
+      res64 = res;
+      cc_op = ARM64G_CC_OP_LOGIC64;
+   } else {
+      res64 = newTemp(Ity_I64);
+      assign(res64, unop(Iop_32Uto64, mkexpr(res)));
+      cc_op = ARM64G_CC_OP_LOGIC32;
+   }
+   assign(z64, mkU64(0));
+   setFlags_D1_D2_ND(cc_op, res64, z64, z64);
+}
+
+/* Build IR to set the flags thunk to a given NZCV value.  NZCV is
+   located in bits 31:28 of the supplied value. */
+static
+void setFlags_COPY ( IRTemp nzcv_28x0 )
+{
+   IRTemp z64 = newTemp(Ity_I64);
+   assign(z64, mkU64(0));
+   setFlags_D1_D2_ND(ARM64G_CC_OP_COPY, nzcv_28x0, z64, z64);
+}
+
+
+//ZZ /* Minor variant of the above that sets NDEP to zero (if it
+//ZZ    sets it at all) */
+//ZZ static void setFlags_D1_D2 ( UInt cc_op, IRTemp t_dep1,
+//ZZ                              IRTemp t_dep2,
+//ZZ                              IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+//ZZ {
+//ZZ    IRTemp z32 = newTemp(Ity_I32);
+//ZZ    assign( z32, mkU32(0) );
+//ZZ    setFlags_D1_D2_ND( cc_op, t_dep1, t_dep2, z32, guardT );
+//ZZ }
+//ZZ 
+//ZZ 
+//ZZ /* Minor variant of the above that sets DEP2 to zero (if it
+//ZZ    sets it at all) */
+//ZZ static void setFlags_D1_ND ( UInt cc_op, IRTemp t_dep1,
+//ZZ                              IRTemp t_ndep,
+//ZZ                              IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+//ZZ {
+//ZZ    IRTemp z32 = newTemp(Ity_I32);
+//ZZ    assign( z32, mkU32(0) );
+//ZZ    setFlags_D1_D2_ND( cc_op, t_dep1, z32, t_ndep, guardT );
+//ZZ }
+//ZZ 
+//ZZ 
+//ZZ /* Minor variant of the above that sets DEP2 and NDEP to zero (if it
+//ZZ    sets them at all) */
+//ZZ static void setFlags_D1 ( UInt cc_op, IRTemp t_dep1,
+//ZZ                           IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+//ZZ {
+//ZZ    IRTemp z32 = newTemp(Ity_I32);
+//ZZ    assign( z32, mkU32(0) );
+//ZZ    setFlags_D1_D2_ND( cc_op, t_dep1, z32, z32, guardT );
+//ZZ }
+
+
+/*------------------------------------------------------------*/
+/*--- Misc math helpers                                    ---*/
+/*------------------------------------------------------------*/
+
+/* Generate IR for ((x & mask) >>u sh) | ((x << sh) & mask) */
+static IRTemp math_SWAPHELPER ( IRTemp x, ULong mask, Int sh )
+{
+   IRTemp maskT = newTemp(Ity_I64);
+   IRTemp res   = newTemp(Ity_I64);
+   vassert(sh >= 1 && sh <= 63);
+   assign(maskT, mkU64(mask));
+   assign( res,
+           binop(Iop_Or64,
+                 binop(Iop_Shr64,
+                       binop(Iop_And64,mkexpr(x),mkexpr(maskT)),
+                       mkU8(sh)),
+                 binop(Iop_And64,
+                       binop(Iop_Shl64,mkexpr(x),mkU8(sh)),
+                       mkexpr(maskT))
+                 ) 
+           );
+   return res;
+}
+
+/* Generates byte swaps within 32-bit lanes. */
+static IRTemp math_UINTSWAP64 ( IRTemp src )
+{
+   IRTemp res;
+   res = math_SWAPHELPER(src, 0xFF00FF00FF00FF00ULL, 8);
+   res = math_SWAPHELPER(res, 0xFFFF0000FFFF0000ULL, 16);
+   return res;
+}
+
+/* Generates byte swaps within 16-bit lanes. */
+static IRTemp math_USHORTSWAP64 ( IRTemp src )
+{
+   IRTemp res;
+   res = math_SWAPHELPER(src, 0xFF00FF00FF00FF00ULL, 8);
+   return res;
+}
+
+/* Generates a 64-bit byte swap. */
+static IRTemp math_BYTESWAP64 ( IRTemp src )
+{
+   IRTemp res;
+   res = math_SWAPHELPER(src, 0xFF00FF00FF00FF00ULL, 8);
+   res = math_SWAPHELPER(res, 0xFFFF0000FFFF0000ULL, 16);
+   res = math_SWAPHELPER(res, 0xFFFFFFFF00000000ULL, 32);
+   return res;
+}
+
+/* Generates a 64-bit bit swap. */
+static IRTemp math_BITSWAP64 ( IRTemp src )
+{
+   IRTemp res;
+   res = math_SWAPHELPER(src, 0xAAAAAAAAAAAAAAAAULL, 1);
+   res = math_SWAPHELPER(res, 0xCCCCCCCCCCCCCCCCULL, 2);
+   res = math_SWAPHELPER(res, 0xF0F0F0F0F0F0F0F0ULL, 4);
+   return math_BYTESWAP64(res);
+}
+
+/* Duplicates the bits at the bottom of the given word to fill the
+   whole word.  src :: Ity_I64 is assumed to have zeroes everywhere
+   except for the bottom bits. */
+static IRTemp math_DUP_TO_64 ( IRTemp src, IRType srcTy )
+{
+   if (srcTy == Ity_I8) {
+      IRTemp t16 = newTemp(Ity_I64);
+      assign(t16, binop(Iop_Or64, mkexpr(src),
+                                  binop(Iop_Shl64, mkexpr(src), mkU8(8))));
+      IRTemp t32 = newTemp(Ity_I64);
+      assign(t32, binop(Iop_Or64, mkexpr(t16),
+                                  binop(Iop_Shl64, mkexpr(t16), mkU8(16))));
+      IRTemp t64 = newTemp(Ity_I64);
+      assign(t64, binop(Iop_Or64, mkexpr(t32),
+                                  binop(Iop_Shl64, mkexpr(t32), mkU8(32))));
+      return t64;
+   }
+   if (srcTy == Ity_I16) {
+      IRTemp t32 = newTemp(Ity_I64);
+      assign(t32, binop(Iop_Or64, mkexpr(src),
+                                  binop(Iop_Shl64, mkexpr(src), mkU8(16))));
+      IRTemp t64 = newTemp(Ity_I64);
+      assign(t64, binop(Iop_Or64, mkexpr(t32),
+                                  binop(Iop_Shl64, mkexpr(t32), mkU8(32))));
+      return t64;
+   }
+   if (srcTy == Ity_I32) {
+      IRTemp t64 = newTemp(Ity_I64);
+      assign(t64, binop(Iop_Or64, mkexpr(src),
+                                  binop(Iop_Shl64, mkexpr(src), mkU8(32))));
+      return t64;
+   }
+   if (srcTy == Ity_I64) {
+      return src;
+   }
+   vassert(0);
+}
+
+
+/* Duplicates the src element exactly so as to fill a V128 value. */
+static IRTemp math_DUP_TO_V128 ( IRTemp src, IRType srcTy )
+{
+   IRTemp res = newTempV128();
+   if (srcTy == Ity_F64) {
+      IRTemp i64 = newTemp(Ity_I64);
+      assign(i64, unop(Iop_ReinterpF64asI64, mkexpr(src)));
+      assign(res, binop(Iop_64HLtoV128, mkexpr(i64), mkexpr(i64)));
+      return res;
+   }
+   if (srcTy == Ity_F32) {
+      IRTemp i64a = newTemp(Ity_I64);
+      assign(i64a, unop(Iop_32Uto64, unop(Iop_ReinterpF32asI32, mkexpr(src))));
+      IRTemp i64b = newTemp(Ity_I64);
+      assign(i64b, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(i64a), mkU8(32)),
+                                   mkexpr(i64a)));
+      assign(res, binop(Iop_64HLtoV128, mkexpr(i64b), mkexpr(i64b)));
+      return res;
+   }
+   if (srcTy == Ity_I64) {
+      assign(res, binop(Iop_64HLtoV128, mkexpr(src), mkexpr(src)));
+      return res;
+   }
+   if (srcTy == Ity_I32 || srcTy == Ity_I16 || srcTy == Ity_I8) {
+      IRTemp t1 = newTemp(Ity_I64);
+      assign(t1, widenUto64(srcTy, mkexpr(src)));
+      IRTemp t2 = math_DUP_TO_64(t1, srcTy);
+      assign(res, binop(Iop_64HLtoV128, mkexpr(t2), mkexpr(t2)));
+      return res;
+   }
+   vassert(0);
+}
+
+
+/* |fullWidth| is a full V128 width result.  Depending on bitQ,
+   zero out the upper half. */
+static IRExpr* math_MAYBE_ZERO_HI64 ( UInt bitQ, IRTemp fullWidth )
+{
+   if (bitQ == 1) return mkexpr(fullWidth);
+   if (bitQ == 0) return unop(Iop_ZeroHI64ofV128, mkexpr(fullWidth));
+   vassert(0);
+}
+
+/* The same, but from an expression instead. */
+static IRExpr* math_MAYBE_ZERO_HI64_fromE ( UInt bitQ, IRExpr* fullWidth )
+{
+   IRTemp fullWidthT = newTempV128();
+   assign(fullWidthT, fullWidth);
+   return math_MAYBE_ZERO_HI64(bitQ, fullWidthT);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- FP comparison helpers                                ---*/
+/*------------------------------------------------------------*/
+
+/* irRes :: Ity_I32 holds a floating point comparison result encoded
+   as an IRCmpF64Result.  Generate code to convert it to an
+   ARM64-encoded (N,Z,C,V) group in the lowest 4 bits of an I64 value.
+   Assign a new temp to hold that value, and return the temp. */
+static
+IRTemp mk_convert_IRCmpF64Result_to_NZCV ( IRTemp irRes32 )
+{
+   IRTemp ix       = newTemp(Ity_I64);
+   IRTemp termL    = newTemp(Ity_I64);
+   IRTemp termR    = newTemp(Ity_I64);
+   IRTemp nzcv     = newTemp(Ity_I64);
+   IRTemp irRes    = newTemp(Ity_I64);
+
+   /* This is where the fun starts.  We have to convert 'irRes' from
+      an IR-convention return result (IRCmpF64Result) to an
+      ARM-encoded (N,Z,C,V) group.  The final result is in the bottom
+      4 bits of 'nzcv'. */
+   /* Map compare result from IR to ARM(nzcv) */
+   /*
+      FP cmp result | IR   | ARM(nzcv)
+      --------------------------------
+      UN              0x45   0011
+      LT              0x01   1000
+      GT              0x00   0010
+      EQ              0x40   0110
+   */
+   /* Now since you're probably wondering WTF ..
+
+      ix fishes the useful bits out of the IR value, bits 6 and 0, and
+      places them side by side, giving a number which is 0, 1, 2 or 3.
+
+      termL is a sequence cooked up by GNU superopt.  It converts ix
+         into an almost correct value NZCV value (incredibly), except
+         for the case of UN, where it produces 0100 instead of the
+         required 0011.
+
+      termR is therefore a correction term, also computed from ix.  It
+         is 1 in the UN case and 0 for LT, GT and UN.  Hence, to get
+         the final correct value, we subtract termR from termL.
+
+      Don't take my word for it.  There's a test program at the bottom
+      of guest_arm_toIR.c, to try this out with.
+   */
+   assign(irRes, unop(Iop_32Uto64, mkexpr(irRes32)));
+
+   assign(
+      ix,
+      binop(Iop_Or64,
+            binop(Iop_And64,
+                  binop(Iop_Shr64, mkexpr(irRes), mkU8(5)),
+                  mkU64(3)),
+            binop(Iop_And64, mkexpr(irRes), mkU64(1))));
+
+   assign(
+      termL,
+      binop(Iop_Add64,
+            binop(Iop_Shr64,
+                  binop(Iop_Sub64,
+                        binop(Iop_Shl64,
+                              binop(Iop_Xor64, mkexpr(ix), mkU64(1)),
+                              mkU8(62)),
+                        mkU64(1)),
+                  mkU8(61)),
+            mkU64(1)));
+
+   assign(
+      termR,
+      binop(Iop_And64,
+            binop(Iop_And64,
+                  mkexpr(ix),
+                  binop(Iop_Shr64, mkexpr(ix), mkU8(1))),
+            mkU64(1)));
+
+   assign(nzcv, binop(Iop_Sub64, mkexpr(termL), mkexpr(termR)));
+   return nzcv;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Data processing (immediate)                          ---*/
+/*------------------------------------------------------------*/
+
+/* Helper functions for supporting "DecodeBitMasks" */
+
+static ULong dbm_ROR ( Int width, ULong x, Int rot )
+{
+   vassert(width > 0 && width <= 64);
+   vassert(rot >= 0 && rot < width);
+   if (rot == 0) return x;
+   ULong res = x >> rot;
+   res |= (x << (width - rot));
+   if (width < 64)
+     res &= ((1ULL << width) - 1);
+   return res;
+}
+
+static ULong dbm_RepTo64( Int esize, ULong x )
+{
+   switch (esize) {
+      case 64:
+         return x;
+      case 32:
+         x &= 0xFFFFFFFF; x |= (x << 32);
+         return x;
+      case 16:
+         x &= 0xFFFF; x |= (x << 16); x |= (x << 32);
+         return x;
+      case 8:
+         x &= 0xFF; x |= (x << 8); x |= (x << 16); x |= (x << 32);
+         return x;
+      case 4:
+         x &= 0xF; x |= (x << 4); x |= (x << 8);
+         x |= (x << 16); x |= (x << 32);
+         return x;
+      case 2:
+         x &= 0x3; x |= (x << 2); x |= (x << 4); x |= (x << 8);
+         x |= (x << 16); x |= (x << 32);
+         return x;
+      default:
+         break;
+   }
+   vpanic("dbm_RepTo64");
+   /*NOTREACHED*/
+   return 0;
+}
+
+static Int dbm_highestSetBit ( ULong x )
+{
+   Int i;
+   for (i = 63; i >= 0; i--) {
+      if (x & (1ULL << i))
+         return i;
+   }
+   vassert(x == 0);
+   return -1;
+}
+
+static
+Bool dbm_DecodeBitMasks ( /*OUT*/ULong* wmask, /*OUT*/ULong* tmask, 
+                          ULong immN, ULong imms, ULong immr, Bool immediate,
+                          UInt M /*32 or 64*/)
+{
+   vassert(immN < (1ULL << 1));
+   vassert(imms < (1ULL << 6));
+   vassert(immr < (1ULL << 6));
+   vassert(immediate == False || immediate == True);
+   vassert(M == 32 || M == 64);
+
+   Int len = dbm_highestSetBit( ((immN << 6) & 64) | ((~imms) & 63) );
+   if (len < 1) { /* printf("fail1\n"); */ return False; }
+   vassert(len <= 6);
+   vassert(M >= (1 << len));
+
+   vassert(len >= 1 && len <= 6);
+   ULong levels = // (zeroes(6 - len) << (6-len)) | ones(len);
+                  (1 << len) - 1;
+   vassert(levels >= 1 && levels <= 63);
+
+   if (immediate && ((imms & levels) == levels)) { 
+      /* printf("fail2 imms %llu levels %llu len %d\n", imms, levels, len); */
+      return False;
+   }
+
+   ULong S = imms & levels;
+   ULong R = immr & levels;
+   Int   diff = S - R;
+   diff &= 63;
+   Int esize = 1 << len;
+   vassert(2 <= esize && esize <= 64);
+
+   /* Be careful of these (1ULL << (S+1)) - 1 expressions, and the
+      same below with d.  S can be 63 in which case we have an out of
+      range and hence undefined shift. */
+   vassert(S >= 0 && S <= 63);
+   vassert(esize >= (S+1));
+   ULong elem_s = // Zeroes(esize-(S+1)):Ones(S+1)
+                  //(1ULL << (S+1)) - 1;
+                  ((1ULL << S) - 1) + (1ULL << S);
+
+   Int d = // diff<len-1:0>
+           diff & ((1 << len)-1);
+   vassert(esize >= (d+1));
+   vassert(d >= 0 && d <= 63);
+
+   ULong elem_d = // Zeroes(esize-(d+1)):Ones(d+1)
+                  //(1ULL << (d+1)) - 1;
+                  ((1ULL << d) - 1) + (1ULL << d);
+
+   if (esize != 64) vassert(elem_s < (1ULL << esize));
+   if (esize != 64) vassert(elem_d < (1ULL << esize));
+
+   if (wmask) *wmask = dbm_RepTo64(esize, dbm_ROR(esize, elem_s, R));
+   if (tmask) *tmask = dbm_RepTo64(esize, elem_d);
+
+   return True;
+}
+
+
+static
+Bool dis_ARM64_data_processing_immediate(/*MB_OUT*/DisResult* dres,
+                                         UInt insn)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+
+   /* insn[28:23]
+      10000x PC-rel addressing
+      10001x Add/subtract (immediate)
+      100100 Logical (immediate)
+      100101 Move Wide (immediate)
+      100110 Bitfield
+      100111 Extract
+   */
+
+   /* ------------------ ADD/SUB{,S} imm12 ------------------ */
+   if (INSN(28,24) == BITS5(1,0,0,0,1)) {
+      Bool is64   = INSN(31,31) == 1;
+      Bool isSub  = INSN(30,30) == 1;
+      Bool setCC  = INSN(29,29) == 1;
+      UInt sh     = INSN(23,22);
+      UInt uimm12 = INSN(21,10);
+      UInt nn     = INSN(9,5);
+      UInt dd     = INSN(4,0);
+      const HChar* nm = isSub ? "sub" : "add";
+      if (sh >= 2) {
+         /* Invalid; fall through */
+      } else {
+         vassert(sh <= 1);
+         uimm12 <<= (12 * sh);
+         if (is64) {
+            IRTemp argL  = newTemp(Ity_I64);
+            IRTemp argR  = newTemp(Ity_I64);
+            IRTemp res   = newTemp(Ity_I64);
+            assign(argL, getIReg64orSP(nn));
+            assign(argR, mkU64(uimm12));
+            assign(res,  binop(isSub ? Iop_Sub64 : Iop_Add64,
+                               mkexpr(argL), mkexpr(argR)));
+            if (setCC) {
+               putIReg64orZR(dd, mkexpr(res));
+               setFlags_ADD_SUB(True/*is64*/, isSub, argL, argR);
+               DIP("%ss %s, %s, 0x%x\n",
+                   nm, nameIReg64orZR(dd), nameIReg64orSP(nn), uimm12);
+            } else {
+               putIReg64orSP(dd, mkexpr(res));
+               DIP("%s %s, %s, 0x%x\n",
+                   nm, nameIReg64orSP(dd), nameIReg64orSP(nn), uimm12);
+            }
+         } else {
+            IRTemp argL  = newTemp(Ity_I32);
+            IRTemp argR  = newTemp(Ity_I32);
+            IRTemp res   = newTemp(Ity_I32);
+            assign(argL, getIReg32orSP(nn));
+            assign(argR, mkU32(uimm12));
+            assign(res,  binop(isSub ? Iop_Sub32 : Iop_Add32,
+                               mkexpr(argL), mkexpr(argR)));
+            if (setCC) {
+               putIReg32orZR(dd, mkexpr(res));
+               setFlags_ADD_SUB(False/*!is64*/, isSub, argL, argR);
+               DIP("%ss %s, %s, 0x%x\n",
+                   nm, nameIReg32orZR(dd), nameIReg32orSP(nn), uimm12);
+            } else {
+               putIReg32orSP(dd, mkexpr(res));
+               DIP("%s %s, %s, 0x%x\n",
+                   nm, nameIReg32orSP(dd), nameIReg32orSP(nn), uimm12);
+            }
+         }
+         return True;
+      }
+   }
+
+   /* -------------------- ADR/ADRP -------------------- */
+   if (INSN(28,24) == BITS5(1,0,0,0,0)) {
+      UInt  bP    = INSN(31,31);
+      UInt  immLo = INSN(30,29);
+      UInt  immHi = INSN(23,5);
+      UInt  rD    = INSN(4,0);
+      ULong uimm  = (immHi << 2) | immLo;
+      ULong simm  = sx_to_64(uimm, 21);
+      ULong val;
+      if (bP) {
+         val = (guest_PC_curr_instr & 0xFFFFFFFFFFFFF000ULL) + (simm << 12);
+      } else {
+         val = guest_PC_curr_instr + simm;
+      }
+      putIReg64orZR(rD, mkU64(val));
+      DIP("adr%s %s, 0x%llx\n", bP ? "p" : "", nameIReg64orZR(rD), val);
+      return True;
+   }
+
+   /* -------------------- LOGIC(imm) -------------------- */
+   if (INSN(28,23) == BITS6(1,0,0,1,0,0)) {
+      /* 31 30 28     22 21   15   9  4
+         sf op 100100 N  immr imms Rn Rd
+           op=00: AND  Rd|SP, Rn, #imm
+           op=01: ORR  Rd|SP, Rn, #imm
+           op=10: EOR  Rd|SP, Rn, #imm
+           op=11: ANDS Rd|ZR, Rn, #imm
+      */
+      Bool  is64 = INSN(31,31) == 1;
+      UInt  op   = INSN(30,29);
+      UInt  N    = INSN(22,22);
+      UInt  immR = INSN(21,16);
+      UInt  immS = INSN(15,10);
+      UInt  nn   = INSN(9,5);
+      UInt  dd   = INSN(4,0);
+      ULong imm  = 0;
+      Bool  ok;
+      if (N == 1 && !is64) 
+         goto after_logic_imm; /* not allowed; fall through */
+      ok = dbm_DecodeBitMasks(&imm, NULL,
+                              N, immS, immR, True, is64 ? 64 : 32);
+      if (!ok)
+         goto after_logic_imm;
+
+      const HChar* names[4] = { "and", "orr", "eor", "ands" };
+      const IROp   ops64[4] = { Iop_And64, Iop_Or64, Iop_Xor64, Iop_And64 };
+      const IROp   ops32[4] = { Iop_And32, Iop_Or32, Iop_Xor32, Iop_And32 };
+
+      vassert(op < 4);
+      if (is64) {
+         IRExpr* argL = getIReg64orZR(nn);
+         IRExpr* argR = mkU64(imm);
+         IRTemp  res  = newTemp(Ity_I64);
+         assign(res, binop(ops64[op], argL, argR));
+         if (op < 3) {
+            putIReg64orSP(dd, mkexpr(res));
+            DIP("%s %s, %s, 0x%llx\n", names[op],
+                nameIReg64orSP(dd), nameIReg64orZR(nn), imm);
+         } else {
+            putIReg64orZR(dd, mkexpr(res));
+            setFlags_LOGIC(True/*is64*/, res);
+            DIP("%s %s, %s, 0x%llx\n", names[op],
+                nameIReg64orZR(dd), nameIReg64orZR(nn), imm);
+         }
+      } else {
+         IRExpr* argL = getIReg32orZR(nn);
+         IRExpr* argR = mkU32((UInt)imm);
+         IRTemp  res  = newTemp(Ity_I32);
+         assign(res, binop(ops32[op], argL, argR));
+         if (op < 3) {
+            putIReg32orSP(dd, mkexpr(res));
+            DIP("%s %s, %s, 0x%x\n", names[op],
+                nameIReg32orSP(dd), nameIReg32orZR(nn), (UInt)imm);
+         } else {
+            putIReg32orZR(dd, mkexpr(res));
+            setFlags_LOGIC(False/*!is64*/, res);
+            DIP("%s %s, %s, 0x%x\n", names[op],
+                nameIReg32orZR(dd), nameIReg32orZR(nn), (UInt)imm);
+         }
+      }
+      return True;
+   }
+   after_logic_imm:
+
+   /* -------------------- MOV{Z,N,K} -------------------- */
+   if (INSN(28,23) == BITS6(1,0,0,1,0,1)) {
+      /* 31 30 28      22 20    4
+         |  |  |       |  |     |
+         sf 10 100 101 hw imm16 Rd   MOV(Z) Rd, (imm16 << (16*hw))
+         sf 00 100 101 hw imm16 Rd   MOV(N) Rd, ~(imm16 << (16*hw))
+         sf 11 100 101 hw imm16 Rd   MOV(K) Rd, (imm16 << (16*hw))
+      */
+      Bool is64   = INSN(31,31) == 1;
+      UInt subopc = INSN(30,29);
+      UInt hw     = INSN(22,21);
+      UInt imm16  = INSN(20,5);
+      UInt dd     = INSN(4,0);
+      if (subopc == BITS2(0,1) || (!is64 && hw >= 2)) {
+         /* invalid; fall through */
+      } else {
+         ULong imm64 = ((ULong)imm16) << (16 * hw);
+         if (!is64)
+            vassert(imm64 < 0x100000000ULL);
+         switch (subopc) {
+            case BITS2(1,0): // MOVZ
+               putIRegOrZR(is64, dd, is64 ? mkU64(imm64) : mkU32((UInt)imm64));
+               DIP("movz %s, 0x%llx\n", nameIRegOrZR(is64, dd), imm64);
+               break;
+            case BITS2(0,0): // MOVN
+               imm64 = ~imm64;
+               if (!is64)
+                  imm64 &= 0xFFFFFFFFULL;
+               putIRegOrZR(is64, dd, is64 ? mkU64(imm64) : mkU32((UInt)imm64));
+               DIP("movn %s, 0x%llx\n", nameIRegOrZR(is64, dd), imm64);
+               break;
+            case BITS2(1,1): // MOVK
+               /* This is more complex.  We are inserting a slice into
+                  the destination register, so we need to have the old
+                  value of it. */
+               if (is64) {
+                  IRTemp old = newTemp(Ity_I64);
+                  assign(old, getIReg64orZR(dd));
+                  ULong mask = 0xFFFFULL << (16 * hw);
+                  IRExpr* res
+                     = binop(Iop_Or64, 
+                             binop(Iop_And64, mkexpr(old), mkU64(~mask)),
+                             mkU64(imm64));
+                  putIReg64orZR(dd, res);
+                  DIP("movk %s, 0x%x, lsl %u\n",
+                      nameIReg64orZR(dd), imm16, 16*hw);
+               } else {
+                  IRTemp old = newTemp(Ity_I32);
+                  assign(old, getIReg32orZR(dd));
+                  vassert(hw <= 1);
+                  UInt mask = 0xFFFF << (16 * hw);
+                  IRExpr* res
+                     = binop(Iop_Or32, 
+                             binop(Iop_And32, mkexpr(old), mkU32(~mask)),
+                             mkU32((UInt)imm64));
+                  putIReg32orZR(dd, res);
+                  DIP("movk %s, 0x%x, lsl %u\n",
+                      nameIReg32orZR(dd), imm16, 16*hw);
+               }
+               break;
+            default:
+               vassert(0);
+         }
+         return True;
+      }
+   }
+
+   /* -------------------- {U,S,}BFM -------------------- */
+   /*    30 28     22 21   15   9  4
+
+      sf 10 100110 N  immr imms nn dd
+         UBFM Wd, Wn, #immr, #imms   when sf=0, N=0, immr[5]=0, imms[5]=0
+         UBFM Xd, Xn, #immr, #imms   when sf=1, N=1
+
+      sf 00 100110 N  immr imms nn dd
+         SBFM Wd, Wn, #immr, #imms   when sf=0, N=0, immr[5]=0, imms[5]=0
+         SBFM Xd, Xn, #immr, #imms   when sf=1, N=1
+
+      sf 01 100110 N  immr imms nn dd
+         BFM Wd, Wn, #immr, #imms   when sf=0, N=0, immr[5]=0, imms[5]=0
+         BFM Xd, Xn, #immr, #imms   when sf=1, N=1
+   */
+   if (INSN(28,23) == BITS6(1,0,0,1,1,0)) {
+      UInt sf     = INSN(31,31);
+      UInt opc    = INSN(30,29);
+      UInt N      = INSN(22,22);
+      UInt immR   = INSN(21,16);
+      UInt immS   = INSN(15,10);
+      UInt nn     = INSN(9,5);
+      UInt dd     = INSN(4,0);
+      Bool inZero = False;
+      Bool extend = False;
+      const HChar* nm = "???";
+      /* skip invalid combinations */
+      switch (opc) {
+         case BITS2(0,0):
+            inZero = True; extend = True; nm = "sbfm"; break;
+         case BITS2(0,1):
+            inZero = False; extend = False; nm = "bfm"; break;
+         case BITS2(1,0):
+            inZero = True; extend = False; nm = "ubfm"; break;
+         case BITS2(1,1):
+            goto after_bfm; /* invalid */
+         default:
+            vassert(0);
+      }
+      if (sf == 1 && N != 1) goto after_bfm;
+      if (sf == 0 && (N != 0 || ((immR >> 5) & 1) != 0
+                             || ((immS >> 5) & 1) != 0)) goto after_bfm;
+      ULong wmask = 0, tmask = 0;
+      Bool ok = dbm_DecodeBitMasks(&wmask, &tmask,
+                                   N, immS, immR, False, sf == 1 ? 64 : 32);
+      if (!ok) goto after_bfm; /* hmmm */
+
+      Bool   is64 = sf == 1;
+      IRType ty   = is64 ? Ity_I64 : Ity_I32;
+
+      IRTemp dst = newTemp(ty);
+      IRTemp src = newTemp(ty);
+      IRTemp bot = newTemp(ty);
+      IRTemp top = newTemp(ty);
+      IRTemp res = newTemp(ty);
+      assign(dst, inZero ? mkU(ty,0) : getIRegOrZR(is64, dd));
+      assign(src, getIRegOrZR(is64, nn));
+      /* perform bitfield move on low bits */
+      assign(bot, binop(mkOR(ty),
+                        binop(mkAND(ty), mkexpr(dst), mkU(ty, ~wmask)),
+                        binop(mkAND(ty), mkexpr(mathROR(ty, src, immR)),
+                                         mkU(ty, wmask))));
+      /* determine extension bits (sign, zero or dest register) */
+      assign(top, mkexpr(extend ? mathREPLICATE(ty, src, immS) : dst));
+      /* combine extension bits and result bits */
+      assign(res, binop(mkOR(ty),
+                        binop(mkAND(ty), mkexpr(top), mkU(ty, ~tmask)),
+                        binop(mkAND(ty), mkexpr(bot), mkU(ty, tmask))));
+      putIRegOrZR(is64, dd, mkexpr(res));
+      DIP("%s %s, %s, immR=%u, immS=%u\n",
+          nm, nameIRegOrZR(is64, dd), nameIRegOrZR(is64, nn), immR, immS);
+      return True;
+   }
+   after_bfm:
+
+   /* ---------------------- EXTR ---------------------- */
+   /*   30 28     22 20 15   9 4
+      1 00 100111 10 m  imm6 n d  EXTR Xd, Xn, Xm, #imm6
+      0 00 100111 00 m  imm6 n d  EXTR Wd, Wn, Wm, #imm6 when #imm6 < 32
+   */
+   if (INSN(30,23) == BITS8(0,0,1,0,0,1,1,1) && INSN(21,21) == 0) {
+      Bool is64  = INSN(31,31) == 1;
+      UInt mm    = INSN(20,16);
+      UInt imm6  = INSN(15,10);
+      UInt nn    = INSN(9,5);
+      UInt dd    = INSN(4,0);
+      Bool valid = True;
+      if (INSN(31,31) != INSN(22,22))
+        valid = False;
+      if (!is64 && imm6 >= 32)
+        valid = False;
+      if (!valid) goto after_extr;
+      IRType ty    = is64 ? Ity_I64 : Ity_I32;
+      IRTemp srcHi = newTemp(ty);
+      IRTemp srcLo = newTemp(ty);
+      IRTemp res   = newTemp(ty);
+      assign(srcHi, getIRegOrZR(is64, nn));
+      assign(srcLo, getIRegOrZR(is64, mm));
+      if (imm6 == 0) {
+        assign(res, mkexpr(srcLo));
+      } else {
+        UInt szBits = 8 * sizeofIRType(ty);
+        vassert(imm6 > 0 && imm6 < szBits);
+        assign(res, binop(mkOR(ty),
+                          binop(mkSHL(ty), mkexpr(srcHi), mkU8(szBits-imm6)),
+                          binop(mkSHR(ty), mkexpr(srcLo), mkU8(imm6))));
+      }
+      putIRegOrZR(is64, dd, mkexpr(res));
+      DIP("extr %s, %s, %s, #%u\n",
+          nameIRegOrZR(is64,dd),
+          nameIRegOrZR(is64,nn), nameIRegOrZR(is64,mm), imm6);
+      return True;
+   }
+  after_extr:
+
+   vex_printf("ARM64 front end: data_processing_immediate\n");
+   return False;
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Data processing (register) instructions              ---*/
+/*------------------------------------------------------------*/
+
+static const HChar* nameSH ( UInt sh ) {
+   switch (sh) {
+      case 0: return "lsl";
+      case 1: return "lsr";
+      case 2: return "asr";
+      case 3: return "ror";
+      default: vassert(0);
+   }
+}
+
+/* Generate IR to get a register value, possibly shifted by an
+   immediate.  Returns either a 32- or 64-bit temporary holding the
+   result.  After the shift, the value can optionally be NOT-ed 
+   too.
+
+   sh_how coding: 00=SHL, 01=SHR, 10=SAR, 11=ROR.  sh_amt may only be
+   in the range 0 to (is64 ? 64 : 32)-1.  For some instructions, ROR
+   isn't allowed, but it's the job of the caller to check that.
+*/
+static IRTemp getShiftedIRegOrZR ( Bool is64,
+                                   UInt sh_how, UInt sh_amt, UInt regNo,
+                                   Bool invert )
+{
+   vassert(sh_how < 4);
+   vassert(sh_amt < (is64 ? 64 : 32));
+   IRType ty = is64 ? Ity_I64 : Ity_I32;
+   IRTemp t0 = newTemp(ty);
+   assign(t0, getIRegOrZR(is64, regNo));
+   IRTemp t1 = newTemp(ty);
+   switch (sh_how) {
+      case BITS2(0,0):
+         assign(t1, binop(mkSHL(ty), mkexpr(t0), mkU8(sh_amt)));
+         break;
+      case BITS2(0,1):
+         assign(t1, binop(mkSHR(ty), mkexpr(t0), mkU8(sh_amt)));
+         break;
+      case BITS2(1,0):
+         assign(t1, binop(mkSAR(ty), mkexpr(t0), mkU8(sh_amt)));
+         break;
+      case BITS2(1,1):
+         assign(t1, mkexpr(mathROR(ty, t0, sh_amt)));
+         break;
+      default:
+         vassert(0);
+   }
+   if (invert) {
+      IRTemp t2 = newTemp(ty);
+      assign(t2, unop(mkNOT(ty), mkexpr(t1)));
+      return t2;
+   } else {
+      return t1;
+   }
+}
+
+
+static
+Bool dis_ARM64_data_processing_register(/*MB_OUT*/DisResult* dres,
+                                        UInt insn)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+
+   /* ------------------- ADD/SUB(reg) ------------------- */
+   /* x==0 => 32 bit op      x==1 => 64 bit op
+      sh: 00=LSL, 01=LSR, 10=ASR, 11=ROR(NOT ALLOWED)
+
+      31 30 29 28    23 21 20 15   9  4
+      |  |  |  |     |  |  |  |    |  |
+      x  0  0  01011 sh 0  Rm imm6 Rn Rd   ADD  Rd,Rn, sh(Rm,imm6)
+      x  0  1  01011 sh 0  Rm imm6 Rn Rd   ADDS Rd,Rn, sh(Rm,imm6)
+      x  1  0  01011 sh 0  Rm imm6 Rn Rd   SUB  Rd,Rn, sh(Rm,imm6)
+      x  1  1  01011 sh 0  Rm imm6 Rn Rd   SUBS Rd,Rn, sh(Rm,imm6)
+   */
+   if (INSN(28,24) == BITS5(0,1,0,1,1) && INSN(21,21) == 0) {
+      UInt   bX    = INSN(31,31);
+      UInt   bOP   = INSN(30,30); /* 0: ADD, 1: SUB */
+      UInt   bS    = INSN(29, 29); /* set flags? */
+      UInt   sh    = INSN(23,22);
+      UInt   rM    = INSN(20,16);
+      UInt   imm6  = INSN(15,10);
+      UInt   rN    = INSN(9,5);
+      UInt   rD    = INSN(4,0);
+      Bool   isSUB = bOP == 1;
+      Bool   is64  = bX == 1;
+      IRType ty    = is64 ? Ity_I64 : Ity_I32;
+      if ((!is64 && imm6 > 31) || sh == BITS2(1,1)) {
+         /* invalid; fall through */
+      } else {
+         IRTemp argL = newTemp(ty);
+         assign(argL, getIRegOrZR(is64, rN));
+         IRTemp argR = getShiftedIRegOrZR(is64, sh, imm6, rM, False);
+         IROp   op   = isSUB ? mkSUB(ty) : mkADD(ty);
+         IRTemp res  = newTemp(ty);
+         assign(res, binop(op, mkexpr(argL), mkexpr(argR)));
+         if (rD != 31) putIRegOrZR(is64, rD, mkexpr(res));
+         if (bS) {
+            setFlags_ADD_SUB(is64, isSUB, argL, argR);
+         }
+         DIP("%s%s %s, %s, %s, %s #%u\n",
+             bOP ? "sub" : "add", bS ? "s" : "",
+             nameIRegOrZR(is64, rD), nameIRegOrZR(is64, rN),
+             nameIRegOrZR(is64, rM), nameSH(sh), imm6);
+         return True;
+      }
+   }
+
+   /* ------------------- ADC/SBC(reg) ------------------- */
+   /* x==0 => 32 bit op      x==1 => 64 bit op
+
+      31 30 29 28    23 21 20 15     9  4
+      |  |  |  |     |  |  |  |      |  |
+      x  0  0  11010 00 0  Rm 000000 Rn Rd   ADC  Rd,Rn,Rm
+      x  0  1  11010 00 0  Rm 000000 Rn Rd   ADCS Rd,Rn,Rm
+      x  1  0  11010 00 0  Rm 000000 Rn Rd   SBC  Rd,Rn,Rm
+      x  1  1  11010 00 0  Rm 000000 Rn Rd   SBCS Rd,Rn,Rm
+   */
+
+   if (INSN(28,21) == BITS8(1,1,0,1,0,0,0,0) && INSN(15,10) == 0 ) {
+      UInt   bX    = INSN(31,31);
+      UInt   bOP   = INSN(30,30); /* 0: ADC, 1: SBC */
+      UInt   bS    = INSN(29,29); /* set flags */
+      UInt   rM    = INSN(20,16);
+      UInt   rN    = INSN(9,5);
+      UInt   rD    = INSN(4,0);
+
+      Bool   isSUB = bOP == 1;
+      Bool   is64  = bX == 1;
+      IRType ty    = is64 ? Ity_I64 : Ity_I32;
+
+      IRTemp oldC = newTemp(ty);
+      assign(oldC,
+             is64 ? mk_arm64g_calculate_flag_c()
+                  : unop(Iop_64to32, mk_arm64g_calculate_flag_c()) );
+
+      IRTemp argL = newTemp(ty);
+      assign(argL, getIRegOrZR(is64, rN));
+      IRTemp argR = newTemp(ty);
+      assign(argR, getIRegOrZR(is64, rM));
+
+      IROp   op   = isSUB ? mkSUB(ty) : mkADD(ty);
+      IRTemp res  = newTemp(ty);
+      if (isSUB) {
+         IRExpr* one = is64 ? mkU64(1) : mkU32(1);
+         IROp xorOp = is64 ? Iop_Xor64 : Iop_Xor32;
+         assign(res,
+                binop(op,
+                      binop(op, mkexpr(argL), mkexpr(argR)),
+                      binop(xorOp, mkexpr(oldC), one)));
+      } else {
+         assign(res,
+                binop(op,
+                      binop(op, mkexpr(argL), mkexpr(argR)),
+                      mkexpr(oldC)));
+      }
+
+      if (rD != 31) putIRegOrZR(is64, rD, mkexpr(res));
+
+      if (bS) {
+         setFlags_ADC_SBC(is64, isSUB, argL, argR, oldC);
+      }
+
+      DIP("%s%s %s, %s, %s\n",
+          bOP ? "sbc" : "adc", bS ? "s" : "",
+          nameIRegOrZR(is64, rD), nameIRegOrZR(is64, rN),
+          nameIRegOrZR(is64, rM));
+      return True;
+   }
+
+   /* -------------------- LOGIC(reg) -------------------- */   
+   /* x==0 => 32 bit op      x==1 => 64 bit op
+      N==0 => inv? is no-op (no inversion)
+      N==1 => inv? is NOT
+      sh: 00=LSL, 01=LSR, 10=ASR, 11=ROR
+
+      31 30 28    23 21 20 15   9  4
+      |  |  |     |  |  |  |    |  |
+      x  00 01010 sh N  Rm imm6 Rn Rd  AND  Rd,Rn, inv?(sh(Rm,imm6))
+      x  01 01010 sh N  Rm imm6 Rn Rd  ORR  Rd,Rn, inv?(sh(Rm,imm6))
+      x  10 01010 sh N  Rm imm6 Rn Rd  EOR  Rd,Rn, inv?(sh(Rm,imm6))
+      x  11 01010 sh N  Rm imm6 Rn Rd  ANDS Rd,Rn, inv?(sh(Rm,imm6))
+      With N=1, the names are: BIC ORN EON BICS
+   */
+   if (INSN(28,24) == BITS5(0,1,0,1,0)) {
+      UInt   bX   = INSN(31,31);
+      UInt   sh   = INSN(23,22);
+      UInt   bN   = INSN(21,21);
+      UInt   rM   = INSN(20,16);
+      UInt   imm6 = INSN(15,10);
+      UInt   rN   = INSN(9,5);
+      UInt   rD   = INSN(4,0);
+      Bool   is64 = bX == 1;
+      IRType ty   = is64 ? Ity_I64 : Ity_I32;
+      if (!is64 && imm6 > 31) {
+         /* invalid; fall though */
+      } else {
+         IRTemp argL = newTemp(ty);
+         assign(argL, getIRegOrZR(is64, rN));
+         IRTemp argR = getShiftedIRegOrZR(is64, sh, imm6, rM, bN == 1);
+         IROp   op   = Iop_INVALID;
+         switch (INSN(30,29)) {
+            case BITS2(0,0): case BITS2(1,1): op = mkAND(ty); break;
+            case BITS2(0,1):                  op = mkOR(ty);  break;
+            case BITS2(1,0):                  op = mkXOR(ty); break;
+            default: vassert(0);
+         }
+         IRTemp res = newTemp(ty);
+         assign(res, binop(op, mkexpr(argL), mkexpr(argR)));
+         if (INSN(30,29) == BITS2(1,1)) {
+            setFlags_LOGIC(is64, res);
+         }
+         putIRegOrZR(is64, rD, mkexpr(res));
+
+         static const HChar* names_op[8]
+            = { "and", "orr", "eor", "ands", "bic", "orn", "eon", "bics" };
+         vassert(((bN << 2) | INSN(30,29)) < 8);
+         const HChar* nm_op = names_op[(bN << 2) | INSN(30,29)];
+         /* Special-case the printing of "MOV" */
+         if (rN == 31/*zr*/ && sh == 0/*LSL*/ && imm6 == 0 && bN == 0) {
+            DIP("mov %s, %s\n", nameIRegOrZR(is64, rD),
+                                nameIRegOrZR(is64, rM));
+         } else {
+            DIP("%s %s, %s, %s, %s #%u\n", nm_op,
+                nameIRegOrZR(is64, rD), nameIRegOrZR(is64, rN),
+                nameIRegOrZR(is64, rM), nameSH(sh), imm6);
+         }
+         return True;
+      }
+   }
+
+   /* -------------------- {U,S}MULH -------------------- */   
+   /* 31       23 22 20 15     9   4
+      10011011 1  10 Rm 011111 Rn Rd   UMULH Xd,Xn,Xm
+      10011011 0  10 Rm 011111 Rn Rd   SMULH Xd,Xn,Xm
+   */
+   if (INSN(31,24) == BITS8(1,0,0,1,1,0,1,1)
+       && INSN(22,21) == BITS2(1,0) && INSN(15,10) == BITS6(0,1,1,1,1,1)) {
+      Bool isU = INSN(23,23) == 1;
+      UInt mm  = INSN(20,16);
+      UInt nn  = INSN(9,5);
+      UInt dd  = INSN(4,0);
+      putIReg64orZR(dd, unop(Iop_128HIto64,
+                             binop(isU ? Iop_MullU64 : Iop_MullS64,
+                                   getIReg64orZR(nn), getIReg64orZR(mm))));
+      DIP("%cmulh %s, %s, %s\n", 
+          isU ? 'u' : 's',
+          nameIReg64orZR(dd), nameIReg64orZR(nn), nameIReg64orZR(mm));
+      return True;
+   }
+
+   /* -------------------- M{ADD,SUB} -------------------- */   
+   /* 31 30           20 15 14 9 4
+      sf 00 11011 000 m  0  a  n r   MADD Rd,Rn,Rm,Ra  d = a+m*n
+      sf 00 11011 000 m  1  a  n r   MADD Rd,Rn,Rm,Ra  d = a-m*n
+   */
+   if (INSN(30,21) == BITS10(0,0,1,1,0,1,1,0,0,0)) {
+      Bool is64  = INSN(31,31) == 1;
+      UInt mm    = INSN(20,16);
+      Bool isAdd = INSN(15,15) == 0;
+      UInt aa    = INSN(14,10);
+      UInt nn    = INSN(9,5);
+      UInt dd    = INSN(4,0);
+      if (is64) {
+         putIReg64orZR(
+            dd,
+            binop(isAdd ? Iop_Add64 : Iop_Sub64,
+                  getIReg64orZR(aa),
+                  binop(Iop_Mul64, getIReg64orZR(mm), getIReg64orZR(nn))));
+      } else {
+         putIReg32orZR(
+            dd,
+            binop(isAdd ? Iop_Add32 : Iop_Sub32,
+                  getIReg32orZR(aa),
+                  binop(Iop_Mul32, getIReg32orZR(mm), getIReg32orZR(nn))));
+      }
+      DIP("%s %s, %s, %s, %s\n",
+          isAdd ? "madd" : "msub",
+          nameIRegOrZR(is64, dd), nameIRegOrZR(is64, nn),
+          nameIRegOrZR(is64, mm), nameIRegOrZR(is64, aa));
+      return True;
+   }
+
+   /* ---------------- CS{EL,INC,INV,NEG} ---------------- */   
+   /* 31 30 28        20 15   11 9  4
+      sf 00 1101 0100 mm cond 00 nn dd   CSEL  Rd,Rn,Rm
+      sf 00 1101 0100 mm cond 01 nn dd   CSINC Rd,Rn,Rm
+      sf 10 1101 0100 mm cond 00 nn dd   CSINV Rd,Rn,Rm
+      sf 10 1101 0100 mm cond 01 nn dd   CSNEG Rd,Rn,Rm
+      In all cases, the operation is: Rd = if cond then Rn else OP(Rm)
+   */
+   if (INSN(29,21) == BITS9(0, 1,1,0,1, 0,1,0,0) && INSN(11,11) == 0) {
+      Bool    is64 = INSN(31,31) == 1;
+      UInt    b30  = INSN(30,30);
+      UInt    mm   = INSN(20,16);
+      UInt    cond = INSN(15,12);
+      UInt    b10  = INSN(10,10);
+      UInt    nn   = INSN(9,5);
+      UInt    dd   = INSN(4,0);
+      UInt    op   = (b30 << 1) | b10; /* 00=id 01=inc 10=inv 11=neg */
+      IRType  ty   = is64 ? Ity_I64 : Ity_I32;
+      IRExpr* argL = getIRegOrZR(is64, nn);
+      IRExpr* argR = getIRegOrZR(is64, mm);
+      switch (op) {
+         case BITS2(0,0):
+            break;
+         case BITS2(0,1):
+            argR = binop(mkADD(ty), argR, mkU(ty,1));
+            break;
+         case BITS2(1,0):
+            argR = unop(mkNOT(ty), argR);
+            break;
+         case BITS2(1,1):
+            argR = binop(mkSUB(ty), mkU(ty,0), argR);
+            break;
+         default:
+            vassert(0);
+      }
+      putIRegOrZR(
+         is64, dd,
+         IRExpr_ITE(unop(Iop_64to1, mk_arm64g_calculate_condition(cond)),
+                    argL, argR)
+      );
+      const HChar* op_nm[4] = { "csel", "csinc", "csinv", "csneg" };
+      DIP("%s %s, %s, %s, %s\n", op_nm[op],
+          nameIRegOrZR(is64, dd), nameIRegOrZR(is64, nn),
+          nameIRegOrZR(is64, mm), nameCC(cond));
+      return True;
+   }
+
+   /* -------------- ADD/SUB(extended reg) -------------- */   
+   /*     28         20 15  12   9 4
+      000 01011 00 1 m  opt imm3 n d   ADD  Wd|SP, Wn|SP, Wm ext&lsld
+      100 01011 00 1 m  opt imm3 n d   ADD  Xd|SP, Xn|SP, Rm ext&lsld
+
+      001 01011 00 1 m  opt imm3 n d   ADDS Wd,    Wn|SP, Wm ext&lsld
+      101 01011 00 1 m  opt imm3 n d   ADDS Xd,    Xn|SP, Rm ext&lsld
+
+      010 01011 00 1 m  opt imm3 n d   SUB  Wd|SP, Wn|SP, Wm ext&lsld
+      110 01011 00 1 m  opt imm3 n d   SUB  Xd|SP, Xn|SP, Rm ext&lsld
+
+      011 01011 00 1 m  opt imm3 n d   SUBS Wd,    Wn|SP, Wm ext&lsld
+      111 01011 00 1 m  opt imm3 n d   SUBS Xd,    Xn|SP, Rm ext&lsld
+
+      The 'm' operand is extended per opt, thusly:
+
+        000   Xm & 0xFF           UXTB
+        001   Xm & 0xFFFF         UXTH
+        010   Xm & (2^32)-1       UXTW
+        011   Xm                  UXTX
+
+        100   Xm sx from bit 7    SXTB
+        101   Xm sx from bit 15   SXTH
+        110   Xm sx from bit 31   SXTW
+        111   Xm                  SXTX
+
+      In the 64 bit case (bit31 == 1), UXTX and SXTX are the identity
+      operation on Xm.  In the 32 bit case, UXTW, UXTX, SXTW and SXTX
+      are the identity operation on Wm.
+
+      After extension, the value is shifted left by imm3 bits, which
+      may only be in the range 0 .. 4 inclusive.
+   */
+   if (INSN(28,21) == BITS8(0,1,0,1,1,0,0,1) && INSN(12,10) <= 4) {
+      Bool is64  = INSN(31,31) == 1;
+      Bool isSub = INSN(30,30) == 1;
+      Bool setCC = INSN(29,29) == 1;
+      UInt mm    = INSN(20,16);
+      UInt opt   = INSN(15,13);
+      UInt imm3  = INSN(12,10);
+      UInt nn    = INSN(9,5);
+      UInt dd    = INSN(4,0);
+      const HChar* nameExt[8] = { "uxtb", "uxth", "uxtw", "uxtx",
+                                  "sxtb", "sxth", "sxtw", "sxtx" };
+      /* Do almost the same thing in the 32- and 64-bit cases. */
+      IRTemp xN = newTemp(Ity_I64);
+      IRTemp xM = newTemp(Ity_I64);
+      assign(xN, getIReg64orSP(nn));
+      assign(xM, getIReg64orZR(mm));
+      IRExpr* xMw  = mkexpr(xM); /* "xM widened" */
+      Int     shSX = 0;
+      /* widen Xm .. */
+      switch (opt) {
+         case BITS3(0,0,0): // UXTB
+            xMw = binop(Iop_And64, xMw, mkU64(0xFF)); break;
+         case BITS3(0,0,1): // UXTH
+            xMw = binop(Iop_And64, xMw, mkU64(0xFFFF)); break;
+         case BITS3(0,1,0): // UXTW -- noop for the 32bit case
+            if (is64) {
+               xMw = unop(Iop_32Uto64, unop(Iop_64to32, xMw));
+            }
+            break;
+         case BITS3(0,1,1): // UXTX -- always a noop
+            break;
+         case BITS3(1,0,0): // SXTB
+            shSX = 56; goto sxTo64;
+         case BITS3(1,0,1): // SXTH
+            shSX = 48; goto sxTo64;
+         case BITS3(1,1,0): // SXTW -- noop for the 32bit case
+            if (is64) {
+               shSX = 32; goto sxTo64;
+            }
+            break;
+         case BITS3(1,1,1): // SXTX -- always a noop
+            break;
+         sxTo64:
+            vassert(shSX >= 32);
+            xMw = binop(Iop_Sar64, binop(Iop_Shl64, xMw, mkU8(shSX)),
+                        mkU8(shSX));
+            break;
+         default:
+            vassert(0);
+      }
+      /* and now shift */
+      IRTemp argL = xN;
+      IRTemp argR = newTemp(Ity_I64);
+      assign(argR, binop(Iop_Shl64, xMw, mkU8(imm3)));
+      IRTemp res = newTemp(Ity_I64);
+      assign(res, binop(isSub ? Iop_Sub64 : Iop_Add64,
+                        mkexpr(argL), mkexpr(argR)));
+      if (is64) {
+         if (setCC) {
+            putIReg64orZR(dd, mkexpr(res));
+            setFlags_ADD_SUB(True/*is64*/, isSub, argL, argR);
+         } else {
+            putIReg64orSP(dd, mkexpr(res));
+         }
+      } else {
+         if (setCC) {
+            IRTemp argL32 = newTemp(Ity_I32);
+            IRTemp argR32 = newTemp(Ity_I32);
+            putIReg32orZR(dd, unop(Iop_64to32, mkexpr(res)));
+            assign(argL32, unop(Iop_64to32, mkexpr(argL)));
+            assign(argR32, unop(Iop_64to32, mkexpr(argR)));
+            setFlags_ADD_SUB(False/*!is64*/, isSub, argL32, argR32);
+         } else {
+            putIReg32orSP(dd, unop(Iop_64to32, mkexpr(res)));
+         }
+      }
+      DIP("%s%s %s, %s, %s %s lsl %u\n",
+          isSub ? "sub" : "add", setCC ? "s" : "",
+          setCC ? nameIRegOrZR(is64, dd) : nameIRegOrSP(is64, dd),
+          nameIRegOrSP(is64, nn), nameIRegOrSP(is64, mm),
+          nameExt[opt], imm3);
+      return True;
+   }
+
+   /* ---------------- CCMP/CCMN(imm) ---------------- */
+   /* Bizarrely, these appear in the "data processing register"
+      category, even though they are operations against an
+      immediate. */
+   /* 31   29        20   15   11 9    3
+      sf 1 111010010 imm5 cond 10 Rn 0 nzcv   CCMP Rn, #imm5, #nzcv, cond
+      sf 0 111010010 imm5 cond 10 Rn 0 nzcv   CCMN Rn, #imm5, #nzcv, cond
+
+      Operation is:
+         (CCMP) flags = if cond then flags-after-sub(Rn,imm5) else nzcv
+         (CCMN) flags = if cond then flags-after-add(Rn,imm5) else nzcv
+   */
+   if (INSN(29,21) == BITS9(1,1,1,0,1,0,0,1,0)
+       && INSN(11,10) == BITS2(1,0) && INSN(4,4) == 0) {
+      Bool is64  = INSN(31,31) == 1;
+      Bool isSUB = INSN(30,30) == 1;
+      UInt imm5  = INSN(20,16);
+      UInt cond  = INSN(15,12);
+      UInt nn    = INSN(9,5);
+      UInt nzcv  = INSN(3,0);
+
+      IRTemp condT = newTemp(Ity_I1);
+      assign(condT, unop(Iop_64to1, mk_arm64g_calculate_condition(cond)));
+
+      IRType ty   = is64 ? Ity_I64 : Ity_I32;
+      IRTemp argL = newTemp(ty);
+      IRTemp argR = newTemp(ty);
+
+      if (is64) {
+         assign(argL, getIReg64orZR(nn));
+         assign(argR, mkU64(imm5));
+      } else {
+         assign(argL, getIReg32orZR(nn));
+         assign(argR, mkU32(imm5));
+      }
+      setFlags_ADD_SUB_conditionally(is64, isSUB, condT, argL, argR, nzcv);
+
+      DIP("ccm%c %s, #%u, #%u, %s\n",
+          isSUB ? 'p' : 'n', nameIRegOrZR(is64, nn),
+          imm5, nzcv, nameCC(cond));
+      return True;
+   }
+
+   /* ---------------- CCMP/CCMN(reg) ---------------- */
+   /* 31   29        20 15   11 9    3
+      sf 1 111010010 Rm cond 00 Rn 0 nzcv   CCMP Rn, Rm, #nzcv, cond
+      sf 0 111010010 Rm cond 00 Rn 0 nzcv   CCMN Rn, Rm, #nzcv, cond
+      Operation is:
+         (CCMP) flags = if cond then flags-after-sub(Rn,Rm) else nzcv
+         (CCMN) flags = if cond then flags-after-add(Rn,Rm) else nzcv
+   */
+   if (INSN(29,21) == BITS9(1,1,1,0,1,0,0,1,0)
+       && INSN(11,10) == BITS2(0,0) && INSN(4,4) == 0) {
+      Bool is64  = INSN(31,31) == 1;
+      Bool isSUB = INSN(30,30) == 1;
+      UInt mm    = INSN(20,16);
+      UInt cond  = INSN(15,12);
+      UInt nn    = INSN(9,5);
+      UInt nzcv  = INSN(3,0);
+
+      IRTemp condT = newTemp(Ity_I1);
+      assign(condT, unop(Iop_64to1, mk_arm64g_calculate_condition(cond)));
+
+      IRType ty   = is64 ? Ity_I64 : Ity_I32;
+      IRTemp argL = newTemp(ty);
+      IRTemp argR = newTemp(ty);
+
+      if (is64) {
+         assign(argL, getIReg64orZR(nn));
+         assign(argR, getIReg64orZR(mm));
+      } else {
+         assign(argL, getIReg32orZR(nn));
+         assign(argR, getIReg32orZR(mm));
+      }
+      setFlags_ADD_SUB_conditionally(is64, isSUB, condT, argL, argR, nzcv);
+
+      DIP("ccm%c %s, %s, #%u, %s\n",
+          isSUB ? 'p' : 'n', nameIRegOrZR(is64, nn),
+          nameIRegOrZR(is64, mm), nzcv, nameCC(cond));
+      return True;
+   }
+
+
+   /* -------------- REV/REV16/REV32/RBIT -------------- */   
+   /* 31 30 28       20    15   11 9 4
+
+      1  10 11010110 00000 0000 11 n d    (1) REV   Xd, Xn
+      0  10 11010110 00000 0000 10 n d    (2) REV   Wd, Wn
+
+      1  10 11010110 00000 0000 00 n d    (3) RBIT  Xd, Xn
+      0  10 11010110 00000 0000 00 n d    (4) RBIT  Wd, Wn
+
+      1  10 11010110 00000 0000 01 n d    (5) REV16 Xd, Xn
+      0  10 11010110 00000 0000 01 n d    (6) REV16 Wd, Wn
+
+      1  10 11010110 00000 0000 10 n d    (7) REV32 Xd, Xn
+   */
+   if (INSN(30,21) == BITS10(1,0,1,1,0,1,0,1,1,0)
+       && INSN(20,12) == BITS9(0,0,0,0,0,0,0,0,0)) {
+      UInt b31 = INSN(31,31);
+      UInt opc = INSN(11,10);
+
+      UInt ix = 0;
+      /**/ if (b31 == 1 && opc == BITS2(1,1)) ix = 1; 
+      else if (b31 == 0 && opc == BITS2(1,0)) ix = 2; 
+      else if (b31 == 1 && opc == BITS2(0,0)) ix = 3; 
+      else if (b31 == 0 && opc == BITS2(0,0)) ix = 4; 
+      else if (b31 == 1 && opc == BITS2(0,1)) ix = 5; 
+      else if (b31 == 0 && opc == BITS2(0,1)) ix = 6; 
+      else if (b31 == 1 && opc == BITS2(1,0)) ix = 7; 
+      if (ix >= 1 && ix <= 7) {
+         Bool   is64  = ix == 1 || ix == 3 || ix == 5 || ix == 7;
+         UInt   nn    = INSN(9,5);
+         UInt   dd    = INSN(4,0);
+         IRTemp src   = newTemp(Ity_I64);
+         IRTemp dst   = IRTemp_INVALID;
+         IRTemp (*math)(IRTemp) = NULL;
+         switch (ix) {
+            case 1: case 2: math = math_BYTESWAP64;   break;
+            case 3: case 4: math = math_BITSWAP64;    break;
+            case 5: case 6: math = math_USHORTSWAP64; break;
+            case 7:         math = math_UINTSWAP64;   break;
+            default: vassert(0);
+         }
+         const HChar* names[7]
+           = { "rev", "rev", "rbit", "rbit", "rev16", "rev16", "rev32" };
+         const HChar* nm = names[ix-1];
+         vassert(math);
+         if (ix == 6) {
+            /* This has to be special cased, since the logic below doesn't
+               handle it correctly. */
+            assign(src, getIReg64orZR(nn));
+            dst = math(src);
+            putIReg64orZR(dd,
+                          unop(Iop_32Uto64, unop(Iop_64to32, mkexpr(dst))));
+         } else if (is64) {
+            assign(src, getIReg64orZR(nn));
+            dst = math(src);
+            putIReg64orZR(dd, mkexpr(dst));
+         } else {
+            assign(src, binop(Iop_Shl64, getIReg64orZR(nn), mkU8(32)));
+            dst = math(src);
+            putIReg32orZR(dd, unop(Iop_64to32, mkexpr(dst)));
+         }
+         DIP("%s %s, %s\n", nm,
+             nameIRegOrZR(is64,dd), nameIRegOrZR(is64,nn));
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* -------------------- CLZ/CLS -------------------- */   
+   /*    30 28   24   20    15      9 4
+      sf 10 1101 0110 00000 00010 0 n d    CLZ Rd, Rn
+      sf 10 1101 0110 00000 00010 1 n d    CLS Rd, Rn
+   */
+   if (INSN(30,21) == BITS10(1,0,1,1,0,1,0,1,1,0)
+       && INSN(20,11) == BITS10(0,0,0,0,0,0,0,0,1,0)) {
+      Bool   is64  = INSN(31,31) == 1;
+      Bool   isCLS = INSN(10,10) == 1;
+      UInt   nn    = INSN(9,5);
+      UInt   dd    = INSN(4,0);
+      IRTemp src   = newTemp(Ity_I64);
+      IRTemp srcZ  = newTemp(Ity_I64);
+      IRTemp dst   = newTemp(Ity_I64);
+      /* Get the argument, widened out to 64 bit */
+      if (is64) {
+         assign(src, getIReg64orZR(nn));
+      } else {
+         assign(src, binop(Iop_Shl64,
+                           unop(Iop_32Uto64, getIReg32orZR(nn)), mkU8(32)));
+      }
+      /* If this is CLS, mash the arg around accordingly */
+      if (isCLS) {
+         IRExpr* one = mkU8(1);
+         assign(srcZ,
+         binop(Iop_Xor64,
+               binop(Iop_Shl64, mkexpr(src), one),
+               binop(Iop_Shl64, binop(Iop_Shr64, mkexpr(src), one), one)));
+      } else {
+         assign(srcZ, mkexpr(src));
+      }
+      /* And compute CLZ. */
+      if (is64) {
+         assign(dst, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(srcZ), mkU64(0)),
+                                mkU64(isCLS ? 63 : 64),
+                                unop(Iop_Clz64, mkexpr(srcZ))));
+         putIReg64orZR(dd, mkexpr(dst));
+      } else {
+         assign(dst, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(srcZ), mkU64(0)),
+                                mkU64(isCLS ? 31 : 32),
+                                unop(Iop_Clz64, mkexpr(srcZ))));
+         putIReg32orZR(dd, unop(Iop_64to32, mkexpr(dst)));
+      }
+      DIP("cl%c %s, %s\n", isCLS ? 's' : 'z',
+          nameIRegOrZR(is64, dd), nameIRegOrZR(is64, nn));
+      return True;
+   }
+
+   /* ------------------ LSLV/LSRV/ASRV/RORV ------------------ */   
+   /*    30 28        20 15   11 9 4
+      sf 00 1101 0110 m  0010 00 n d   LSLV Rd,Rn,Rm
+      sf 00 1101 0110 m  0010 01 n d   LSRV Rd,Rn,Rm
+      sf 00 1101 0110 m  0010 10 n d   ASRV Rd,Rn,Rm
+      sf 00 1101 0110 m  0010 11 n d   RORV Rd,Rn,Rm
+   */
+   if (INSN(30,21) == BITS10(0,0,1,1,0,1,0,1,1,0)
+       && INSN(15,12) == BITS4(0,0,1,0)) {
+      Bool   is64 = INSN(31,31) == 1;
+      UInt   mm   = INSN(20,16);
+      UInt   op   = INSN(11,10);
+      UInt   nn   = INSN(9,5);
+      UInt   dd   = INSN(4,0);
+      IRType ty   = is64 ? Ity_I64 : Ity_I32;
+      IRTemp srcL = newTemp(ty);
+      IRTemp srcR = newTemp(Ity_I64);
+      IRTemp res  = newTemp(ty);
+      IROp   iop  = Iop_INVALID;
+      assign(srcL, getIRegOrZR(is64, nn));
+      assign(srcR, binop(Iop_And64, getIReg64orZR(mm),
+                                    mkU64(is64 ? 63 : 31)));
+      if (op < 3) {
+         // LSLV, LSRV, ASRV
+         switch (op) {
+            case BITS2(0,0): iop = mkSHL(ty); break;
+            case BITS2(0,1): iop = mkSHR(ty); break;
+            case BITS2(1,0): iop = mkSAR(ty); break;
+            default: vassert(0);
+         }
+         assign(res, binop(iop, mkexpr(srcL),
+                                unop(Iop_64to8, mkexpr(srcR))));
+      } else {
+         // RORV
+         IROp opSHL = mkSHL(ty);
+         IROp opSHR = mkSHR(ty);
+         IROp opOR  = mkOR(ty);
+         IRExpr* width = mkU64(is64 ? 64: 32);
+         assign(
+            res,
+            IRExpr_ITE(
+               binop(Iop_CmpEQ64, mkexpr(srcR), mkU64(0)),
+               mkexpr(srcL),
+               binop(opOR,
+                     binop(opSHL,
+                           mkexpr(srcL),
+                           unop(Iop_64to8, binop(Iop_Sub64, width,
+                                                            mkexpr(srcR)))),
+                     binop(opSHR,
+                           mkexpr(srcL), unop(Iop_64to8, mkexpr(srcR))))
+         ));
+      }
+      putIRegOrZR(is64, dd, mkexpr(res));
+      vassert(op < 4);
+      const HChar* names[4] = { "lslv", "lsrv", "asrv", "rorv" };
+      DIP("%s %s, %s, %s\n",
+          names[op], nameIRegOrZR(is64,dd),
+                     nameIRegOrZR(is64,nn), nameIRegOrZR(is64,mm));
+      return True;
+   }
+
+   /* -------------------- SDIV/UDIV -------------------- */   
+   /*    30 28        20 15    10 9 4
+      sf 00 1101 0110 m  00001  1 n d  SDIV Rd,Rn,Rm
+      sf 00 1101 0110 m  00001  0 n d  UDIV Rd,Rn,Rm
+   */
+   if (INSN(30,21) == BITS10(0,0,1,1,0,1,0,1,1,0)
+       && INSN(15,11) == BITS5(0,0,0,0,1)) {
+      Bool is64 = INSN(31,31) == 1;
+      UInt mm   = INSN(20,16);
+      Bool isS  = INSN(10,10) == 1;
+      UInt nn   = INSN(9,5);
+      UInt dd   = INSN(4,0);
+      if (isS) {
+         putIRegOrZR(is64, dd, binop(is64 ? Iop_DivS64 : Iop_DivS32,
+                                     getIRegOrZR(is64, nn),
+                                     getIRegOrZR(is64, mm)));
+      } else {
+         putIRegOrZR(is64, dd, binop(is64 ? Iop_DivU64 : Iop_DivU32,
+                                     getIRegOrZR(is64, nn),
+                                     getIRegOrZR(is64, mm)));
+      }
+      DIP("%cdiv %s, %s, %s\n", isS ? 's' : 'u',
+          nameIRegOrZR(is64, dd),
+          nameIRegOrZR(is64, nn), nameIRegOrZR(is64, mm));
+      return True;
+   }
+
+   /* ------------------ {S,U}M{ADD,SUB}L ------------------ */   
+   /* 31        23  20 15 14 9 4
+      1001 1011 101 m  0  a  n d   UMADDL Xd,Wn,Wm,Xa
+      1001 1011 001 m  0  a  n d   SMADDL Xd,Wn,Wm,Xa
+      1001 1011 101 m  1  a  n d   UMSUBL Xd,Wn,Wm,Xa
+      1001 1011 001 m  1  a  n d   SMSUBL Xd,Wn,Wm,Xa
+      with operation
+         Xd = Xa +/- (Wn *u/s Wm)
+   */
+   if (INSN(31,24) == BITS8(1,0,0,1,1,0,1,1) && INSN(22,21) == BITS2(0,1)) {
+      Bool   isU   = INSN(23,23) == 1;
+      UInt   mm    = INSN(20,16);
+      Bool   isAdd = INSN(15,15) == 0;
+      UInt   aa    = INSN(14,10);
+      UInt   nn    = INSN(9,5);
+      UInt   dd    = INSN(4,0);
+      IRTemp wN    = newTemp(Ity_I32);
+      IRTemp wM    = newTemp(Ity_I32);
+      IRTemp xA    = newTemp(Ity_I64);
+      IRTemp muld  = newTemp(Ity_I64);
+      IRTemp res   = newTemp(Ity_I64);
+      assign(wN, getIReg32orZR(nn));
+      assign(wM, getIReg32orZR(mm));
+      assign(xA, getIReg64orZR(aa));
+      assign(muld, binop(isU ? Iop_MullU32 : Iop_MullS32,
+                         mkexpr(wN), mkexpr(wM)));
+      assign(res, binop(isAdd ? Iop_Add64 : Iop_Sub64,
+                        mkexpr(xA), mkexpr(muld)));
+      putIReg64orZR(dd, mkexpr(res));
+      DIP("%cm%sl %s, %s, %s, %s\n", isU ? 'u' : 's', isAdd ? "add" : "sub",
+          nameIReg64orZR(dd), nameIReg32orZR(nn),
+          nameIReg32orZR(mm), nameIReg64orZR(aa));
+      return True;
+   }
+   vex_printf("ARM64 front end: data_processing_register\n");
+   return False;
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Math helpers for vector interleave/deinterleave      ---*/
+/*------------------------------------------------------------*/
+
+#define EX(_tmp) \
+           mkexpr(_tmp)
+#define SL(_hi128,_lo128,_nbytes) \
+           ( (_nbytes) == 0 \
+                ? (_lo128) \
+                : triop(Iop_SliceV128,(_hi128),(_lo128),mkU8(_nbytes)) )
+#define ROR(_v128,_nbytes) \
+           SL((_v128),(_v128),(_nbytes))
+#define ROL(_v128,_nbytes) \
+           SL((_v128),(_v128),16-(_nbytes))
+#define SHR(_v128,_nbytes) \
+           binop(Iop_ShrV128,(_v128),mkU8(8*(_nbytes)))
+#define SHL(_v128,_nbytes) \
+           binop(Iop_ShlV128,(_v128),mkU8(8*(_nbytes)))
+#define ILO64x2(_argL,_argR) \
+           binop(Iop_InterleaveLO64x2,(_argL),(_argR))
+#define IHI64x2(_argL,_argR) \
+           binop(Iop_InterleaveHI64x2,(_argL),(_argR))
+#define ILO32x4(_argL,_argR) \
+           binop(Iop_InterleaveLO32x4,(_argL),(_argR))
+#define IHI32x4(_argL,_argR) \
+           binop(Iop_InterleaveHI32x4,(_argL),(_argR))
+#define ILO16x8(_argL,_argR) \
+           binop(Iop_InterleaveLO16x8,(_argL),(_argR))
+#define IHI16x8(_argL,_argR) \
+           binop(Iop_InterleaveHI16x8,(_argL),(_argR))
+#define ILO8x16(_argL,_argR) \
+           binop(Iop_InterleaveLO8x16,(_argL),(_argR))
+#define IHI8x16(_argL,_argR) \
+           binop(Iop_InterleaveHI8x16,(_argL),(_argR))
+#define CEV32x4(_argL,_argR) \
+           binop(Iop_CatEvenLanes32x4,(_argL),(_argR))
+#define COD32x4(_argL,_argR) \
+           binop(Iop_CatOddLanes32x4,(_argL),(_argR))
+#define COD16x8(_argL,_argR) \
+           binop(Iop_CatOddLanes16x8,(_argL),(_argR))
+#define COD8x16(_argL,_argR) \
+           binop(Iop_CatOddLanes8x16,(_argL),(_argR))
+#define CEV8x16(_argL,_argR) \
+           binop(Iop_CatEvenLanes8x16,(_argL),(_argR))
+#define AND(_arg1,_arg2) \
+           binop(Iop_AndV128,(_arg1),(_arg2))
+#define OR2(_arg1,_arg2) \
+           binop(Iop_OrV128,(_arg1),(_arg2))
+#define OR3(_arg1,_arg2,_arg3) \
+           binop(Iop_OrV128,(_arg1),binop(Iop_OrV128,(_arg2),(_arg3)))
+#define OR4(_arg1,_arg2,_arg3,_arg4) \
+           binop(Iop_OrV128, \
+                 binop(Iop_OrV128,(_arg1),(_arg2)), \
+                 binop(Iop_OrV128,(_arg3),(_arg4)))
+
+
+/* Do interleaving for 1 128 bit vector, for ST1 insns. */
+static
+void math_INTERLEAVE1_128( /*OUTx1*/ IRTemp* i0,
+                           UInt laneSzBlg2, IRTemp u0 )
+{
+   assign(*i0, mkexpr(u0));
+}
+
+
+/* Do interleaving for 2 128 bit vectors, for ST2 insns. */
+static
+void math_INTERLEAVE2_128( /*OUTx2*/ IRTemp* i0, IRTemp* i1,
+                           UInt laneSzBlg2, IRTemp u0, IRTemp u1 )
+{
+   /* This is pretty easy, since we have primitives directly to
+      hand. */
+   if (laneSzBlg2 == 3) {
+      // 64x2
+      // u1 == B1 B0, u0 == A1 A0
+      // i1 == B1 A1, i0 == B0 A0
+      assign(*i0, binop(Iop_InterleaveLO64x2, mkexpr(u1), mkexpr(u0)));
+      assign(*i1, binop(Iop_InterleaveHI64x2, mkexpr(u1), mkexpr(u0)));
+      return;
+   }
+   if (laneSzBlg2 == 2) {
+      // 32x4
+      // u1 == B3 B2 B1 B0, u0 == A3 A2 A1 A0, 
+      // i1 == B3 A3 B2 A2, i0 == B1 A1 B0 A0
+      assign(*i0, binop(Iop_InterleaveLO32x4, mkexpr(u1), mkexpr(u0)));
+      assign(*i1, binop(Iop_InterleaveHI32x4, mkexpr(u1), mkexpr(u0)));
+      return;
+   } 
+   if (laneSzBlg2 == 1) {
+      // 16x8
+      // u1 == B{7..0}, u0 == A{7..0}
+      // i0 == B3 A3 B2 A2 B1 A1 B0 A0
+      // i1 == B7 A7 B6 A6 B5 A5 B4 A4
+      assign(*i0, binop(Iop_InterleaveLO16x8, mkexpr(u1), mkexpr(u0)));
+      assign(*i1, binop(Iop_InterleaveHI16x8, mkexpr(u1), mkexpr(u0)));
+      return;
+   }
+   if (laneSzBlg2 == 0) {
+      // 8x16
+      // u1 == B{f..0}, u0 == A{f..0}
+      // i0 == B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0
+      // i1 == Bf Af Be Ae Bd Ad Bc Ac Bb Ab Ba Aa B9 A9 B8 A8
+      assign(*i0, binop(Iop_InterleaveLO8x16, mkexpr(u1), mkexpr(u0)));
+      assign(*i1, binop(Iop_InterleaveHI8x16, mkexpr(u1), mkexpr(u0)));
+      return;
+   }
+   /*NOTREACHED*/
+   vassert(0);
+}
+
+
+/* Do interleaving for 3 128 bit vectors, for ST3 insns. */
+static
+void math_INTERLEAVE3_128( 
+        /*OUTx3*/ IRTemp* i0, IRTemp* i1, IRTemp* i2,
+        UInt laneSzBlg2,
+        IRTemp u0, IRTemp u1, IRTemp u2 )
+{
+   if (laneSzBlg2 == 3) {
+      // 64x2
+      // u2 == C1 C0, u1 == B1 B0, u0 == A1 A0
+      // i2 == C1 B1, i1 == A1 C0, i0 == B0 A0,
+      assign(*i2, IHI64x2( EX(u2), EX(u1) ));
+      assign(*i1, ILO64x2( ROR(EX(u0),8), EX(u2) ));
+      assign(*i0, ILO64x2( EX(u1), EX(u0) ));
+      return;
+   }
+
+   if (laneSzBlg2 == 2) {
+      // 32x4
+      // u2 == C3 C2 C1 C0, u1 == B3 B2 B1 B0, u0 == A3 A2 A1 A0
+      // p2 == C3 C2 B3 B2, p1 == A3 A2 C1 C0, p0 == B1 B0 A1 A0
+      // i2 == C3 B3 A2 C2, i1 == B2 A2 C1 B1, i0 == A1 C0 B0 A0
+      IRTemp p0    = newTempV128();
+      IRTemp p1    = newTempV128();
+      IRTemp p2    = newTempV128();
+      IRTemp c1100 = newTempV128();
+      IRTemp c0011 = newTempV128();
+      IRTemp c0110 = newTempV128();
+      assign(c1100, mkV128(0xFF00));
+      assign(c0011, mkV128(0x00FF));
+      assign(c0110, mkV128(0x0FF0));
+      // First interleave them at 64x2 granularity,
+      // generating partial ("p") values.
+      math_INTERLEAVE3_128(&p0, &p1, &p2, 3, u0, u1, u2);
+      // And more shuffling around for the final answer
+      assign(*i2, OR2( AND( IHI32x4(EX(p2), ROL(EX(p2),8)), EX(c1100) ),
+                       AND( IHI32x4(ROR(EX(p1),4), EX(p2)), EX(c0011) ) ));
+      assign(*i1, OR3( SHL(EX(p2),12),
+                       AND(EX(p1),EX(c0110)),
+                       SHR(EX(p0),12) ));
+      assign(*i0, OR2( AND( ILO32x4(EX(p0),ROL(EX(p1),4)), EX(c1100) ),
+                       AND( ILO32x4(ROR(EX(p0),8),EX(p0)), EX(c0011) ) ));
+      return;
+   }
+
+   if (laneSzBlg2 == 1) {
+      // 16x8
+      // u2 == C7 C6 C5 C4 C3 C2 C1 C0
+      // u1 == B7 B6 B5 B4 B3 B2 B1 B0
+      // u0 == A7 A6 A5 A4 A3 A2 A1 A0
+      //
+      // p2 == C7 C6 B7 B6 A7 A6 C5 C4
+      // p1 == B5 B4 A5 A4 C3 C2 B3 B2
+      // p0 == A3 A2 C1 C0 B1 B0 A1 A0
+      //
+      // i2 == C7 B7 A7 C6 B6 A6 C5 B5
+      // i1 == A5 C4 B4 A4 C4 B3 A3 C2
+      // i0 == B2 A2 C1 B1 A1 C0 B0 A0
+      IRTemp p0    = newTempV128();
+      IRTemp p1    = newTempV128();
+      IRTemp p2    = newTempV128();
+      IRTemp c1000 = newTempV128();
+      IRTemp c0100 = newTempV128();
+      IRTemp c0010 = newTempV128();
+      IRTemp c0001 = newTempV128();
+      assign(c1000, mkV128(0xF000));
+      assign(c0100, mkV128(0x0F00));
+      assign(c0010, mkV128(0x00F0));
+      assign(c0001, mkV128(0x000F));
+      // First interleave them at 32x4 granularity,
+      // generating partial ("p") values.
+      math_INTERLEAVE3_128(&p0, &p1, &p2, 2, u0, u1, u2);
+      // And more shuffling around for the final answer
+      assign(*i2,
+             OR4( AND( IHI16x8( EX(p2),        ROL(EX(p2),4) ), EX(c1000) ),
+                  AND( IHI16x8( ROL(EX(p2),6), EX(p2)        ), EX(c0100) ),
+                  AND( IHI16x8( ROL(EX(p2),2), ROL(EX(p2),6) ), EX(c0010) ),
+                  AND( ILO16x8( ROR(EX(p2),2), ROL(EX(p1),2) ), EX(c0001) )
+      ));
+      assign(*i1,
+             OR4( AND( IHI16x8( ROL(EX(p1),4), ROR(EX(p2),2) ), EX(c1000) ),
+                  AND( IHI16x8( EX(p1),        ROL(EX(p1),4) ), EX(c0100) ),
+                  AND( IHI16x8( ROL(EX(p1),4), ROL(EX(p1),8) ), EX(c0010) ),
+                  AND( IHI16x8( ROR(EX(p0),6), ROL(EX(p1),4) ), EX(c0001) )
+      ));
+      assign(*i0,
+             OR4( AND( IHI16x8( ROR(EX(p1),2), ROL(EX(p0),2) ), EX(c1000) ),
+                  AND( IHI16x8( ROL(EX(p0),2), ROL(EX(p0),6) ), EX(c0100) ),
+                  AND( IHI16x8( ROL(EX(p0),8), ROL(EX(p0),2) ), EX(c0010) ),
+                  AND( IHI16x8( ROL(EX(p0),4), ROL(EX(p0),8) ), EX(c0001) )
+      ));
+      return;
+   }
+
+   if (laneSzBlg2 == 0) {
+      // 8x16.  It doesn't seem worth the hassle of first doing a
+      // 16x8 interleave, so just generate all 24 partial results
+      // directly :-(
+      // u2 == Cf .. C0, u1 == Bf .. B0, u0 == Af .. A0
+      // i2 == Cf Bf Af Ce .. Bb Ab Ca
+      // i1 == Ba Aa C9 B9 .. A6 C5 B5
+      // i0 == A5 C4 B4 A4 .. C0 B0 A0
+
+      IRTemp i2_FEDC = newTempV128(); IRTemp i2_BA98 = newTempV128();
+      IRTemp i2_7654 = newTempV128(); IRTemp i2_3210 = newTempV128();
+      IRTemp i1_FEDC = newTempV128(); IRTemp i1_BA98 = newTempV128();
+      IRTemp i1_7654 = newTempV128(); IRTemp i1_3210 = newTempV128();
+      IRTemp i0_FEDC = newTempV128(); IRTemp i0_BA98 = newTempV128();
+      IRTemp i0_7654 = newTempV128(); IRTemp i0_3210 = newTempV128();
+      IRTemp i2_hi64 = newTempV128(); IRTemp i2_lo64 = newTempV128();
+      IRTemp i1_hi64 = newTempV128(); IRTemp i1_lo64 = newTempV128();
+      IRTemp i0_hi64 = newTempV128(); IRTemp i0_lo64 = newTempV128();
+
+      // eg XXXX(qqq, CC, 0xF, BB, 0xA)) sets qqq to be a vector
+      // of the form 14 bytes junk : CC[0xF] : BB[0xA]
+      //
+#     define XXXX(_tempName,_srcVec1,_srcShift1,_srcVec2,_srcShift2) \
+         IRTemp t_##_tempName = newTempV128(); \
+         assign(t_##_tempName, \
+                ILO8x16( ROR(EX(_srcVec1),(_srcShift1)), \
+                         ROR(EX(_srcVec2),(_srcShift2)) ) )
+
+      // Let CC, BB, AA be (handy) aliases of u2, u1, u0 respectively
+      IRTemp CC = u2; IRTemp BB = u1; IRTemp AA = u0;
+
+      // The slicing and reassembly are done as interleavedly as possible,
+      // so as to minimise the demand for registers in the back end, which
+      // was observed to be a problem in testing.
+
+      XXXX(CfBf, CC, 0xf, BB, 0xf); // i2[15:14]
+      XXXX(AfCe, AA, 0xf, CC, 0xe);
+      assign(i2_FEDC, ILO16x8(EX(t_CfBf), EX(t_AfCe)));
+
+      XXXX(BeAe, BB, 0xe, AA, 0xe);
+      XXXX(CdBd, CC, 0xd, BB, 0xd);
+      assign(i2_BA98, ILO16x8(EX(t_BeAe), EX(t_CdBd)));
+      assign(i2_hi64, ILO32x4(EX(i2_FEDC), EX(i2_BA98)));
+
+      XXXX(AdCc, AA, 0xd, CC, 0xc);
+      XXXX(BcAc, BB, 0xc, AA, 0xc);
+      assign(i2_7654, ILO16x8(EX(t_AdCc), EX(t_BcAc)));
+
+      XXXX(CbBb, CC, 0xb, BB, 0xb);
+      XXXX(AbCa, AA, 0xb, CC, 0xa); // i2[1:0] 
+      assign(i2_3210, ILO16x8(EX(t_CbBb), EX(t_AbCa)));
+      assign(i2_lo64, ILO32x4(EX(i2_7654), EX(i2_3210)));
+      assign(*i2, ILO64x2(EX(i2_hi64), EX(i2_lo64)));
+
+      XXXX(BaAa, BB, 0xa, AA, 0xa); // i1[15:14]
+      XXXX(C9B9, CC, 0x9, BB, 0x9);
+      assign(i1_FEDC, ILO16x8(EX(t_BaAa), EX(t_C9B9)));
+
+      XXXX(A9C8, AA, 0x9, CC, 0x8);
+      XXXX(B8A8, BB, 0x8, AA, 0x8);
+      assign(i1_BA98, ILO16x8(EX(t_A9C8), EX(t_B8A8)));
+      assign(i1_hi64, ILO32x4(EX(i1_FEDC), EX(i1_BA98)));
+
+      XXXX(C7B7, CC, 0x7, BB, 0x7);
+      XXXX(A7C6, AA, 0x7, CC, 0x6);
+      assign(i1_7654, ILO16x8(EX(t_C7B7), EX(t_A7C6)));
+
+      XXXX(B6A6, BB, 0x6, AA, 0x6);
+      XXXX(C5B5, CC, 0x5, BB, 0x5); // i1[1:0]
+      assign(i1_3210, ILO16x8(EX(t_B6A6), EX(t_C5B5)));
+      assign(i1_lo64, ILO32x4(EX(i1_7654), EX(i1_3210)));
+      assign(*i1, ILO64x2(EX(i1_hi64), EX(i1_lo64)));
+
+      XXXX(A5C4, AA, 0x5, CC, 0x4); // i0[15:14]
+      XXXX(B4A4, BB, 0x4, AA, 0x4);
+      assign(i0_FEDC, ILO16x8(EX(t_A5C4), EX(t_B4A4)));
+
+      XXXX(C3B3, CC, 0x3, BB, 0x3);
+      XXXX(A3C2, AA, 0x3, CC, 0x2);
+      assign(i0_BA98, ILO16x8(EX(t_C3B3), EX(t_A3C2)));
+      assign(i0_hi64, ILO32x4(EX(i0_FEDC), EX(i0_BA98)));
+
+      XXXX(B2A2, BB, 0x2, AA, 0x2);
+      XXXX(C1B1, CC, 0x1, BB, 0x1);
+      assign(i0_7654, ILO16x8(EX(t_B2A2), EX(t_C1B1)));
+
+      XXXX(A1C0, AA, 0x1, CC, 0x0);
+      XXXX(B0A0, BB, 0x0, AA, 0x0); // i0[1:0]
+      assign(i0_3210, ILO16x8(EX(t_A1C0), EX(t_B0A0)));
+      assign(i0_lo64, ILO32x4(EX(i0_7654), EX(i0_3210)));
+      assign(*i0, ILO64x2(EX(i0_hi64), EX(i0_lo64)));
+
+#     undef XXXX
+      return;
+   }
+
+   /*NOTREACHED*/
+   vassert(0);
+}
+
+
+/* Do interleaving for 4 128 bit vectors, for ST4 insns. */
+static
+void math_INTERLEAVE4_128( 
+        /*OUTx4*/ IRTemp* i0, IRTemp* i1, IRTemp* i2, IRTemp* i3,
+        UInt laneSzBlg2,
+        IRTemp u0, IRTemp u1, IRTemp u2, IRTemp u3 )
+{
+   if (laneSzBlg2 == 3) {
+      // 64x2
+      assign(*i0, ILO64x2(EX(u1), EX(u0)));
+      assign(*i1, ILO64x2(EX(u3), EX(u2)));
+      assign(*i2, IHI64x2(EX(u1), EX(u0)));
+      assign(*i3, IHI64x2(EX(u3), EX(u2)));
+      return;
+   }
+   if (laneSzBlg2 == 2) {
+      // 32x4
+      // First, interleave at the 64-bit lane size.
+      IRTemp p0 = newTempV128();
+      IRTemp p1 = newTempV128();
+      IRTemp p2 = newTempV128();
+      IRTemp p3 = newTempV128();
+      math_INTERLEAVE4_128(&p0, &p1, &p2, &p3, 3, u0, u1, u2, u3);
+      // And interleave (cat) at the 32 bit size.
+      assign(*i0, CEV32x4(EX(p1), EX(p0)));
+      assign(*i1, COD32x4(EX(p1), EX(p0)));
+      assign(*i2, CEV32x4(EX(p3), EX(p2)));
+      assign(*i3, COD32x4(EX(p3), EX(p2)));
+      return;
+   }
+   if (laneSzBlg2 == 1) {
+      // 16x8
+      // First, interleave at the 32-bit lane size.
+      IRTemp p0 = newTempV128();
+      IRTemp p1 = newTempV128();
+      IRTemp p2 = newTempV128();
+      IRTemp p3 = newTempV128();
+      math_INTERLEAVE4_128(&p0, &p1, &p2, &p3, 2, u0, u1, u2, u3);
+      // And rearrange within each vector, to get the right 16 bit lanes.
+      assign(*i0, COD16x8(EX(p0), SHL(EX(p0), 2)));
+      assign(*i1, COD16x8(EX(p1), SHL(EX(p1), 2)));
+      assign(*i2, COD16x8(EX(p2), SHL(EX(p2), 2)));
+      assign(*i3, COD16x8(EX(p3), SHL(EX(p3), 2)));
+      return;
+   }
+   if (laneSzBlg2 == 0) {
+      // 8x16
+      // First, interleave at the 16-bit lane size.
+      IRTemp p0 = newTempV128();
+      IRTemp p1 = newTempV128();
+      IRTemp p2 = newTempV128();
+      IRTemp p3 = newTempV128();
+      math_INTERLEAVE4_128(&p0, &p1, &p2, &p3, 1, u0, u1, u2, u3);
+      // And rearrange within each vector, to get the right 8 bit lanes.
+      assign(*i0, IHI32x4(COD8x16(EX(p0),EX(p0)), CEV8x16(EX(p0),EX(p0))));
+      assign(*i1, IHI32x4(COD8x16(EX(p1),EX(p1)), CEV8x16(EX(p1),EX(p1))));
+      assign(*i2, IHI32x4(COD8x16(EX(p2),EX(p2)), CEV8x16(EX(p2),EX(p2))));
+      assign(*i3, IHI32x4(COD8x16(EX(p3),EX(p3)), CEV8x16(EX(p3),EX(p3))));
+      return;
+   }
+   /*NOTREACHED*/
+   vassert(0);
+}
+
+
+/* Do deinterleaving for 1 128 bit vector, for LD1 insns. */
+static
+void math_DEINTERLEAVE1_128( /*OUTx1*/ IRTemp* u0,
+                             UInt laneSzBlg2, IRTemp i0 )
+{
+   assign(*u0, mkexpr(i0));
+}
+
+
+/* Do deinterleaving for 2 128 bit vectors, for LD2 insns. */
+static
+void math_DEINTERLEAVE2_128( /*OUTx2*/ IRTemp* u0, IRTemp* u1,
+                             UInt laneSzBlg2, IRTemp i0, IRTemp i1 )
+{
+   /* This is pretty easy, since we have primitives directly to
+      hand. */
+   if (laneSzBlg2 == 3) {
+      // 64x2
+      // i1 == B1 A1, i0 == B0 A0
+      // u1 == B1 B0, u0 == A1 A0
+      assign(*u0, binop(Iop_InterleaveLO64x2, mkexpr(i1), mkexpr(i0)));
+      assign(*u1, binop(Iop_InterleaveHI64x2, mkexpr(i1), mkexpr(i0)));
+      return;
+   }
+   if (laneSzBlg2 == 2) {
+      // 32x4
+      // i1 == B3 A3 B2 A2, i0 == B1 A1 B0 A0
+      // u1 == B3 B2 B1 B0, u0 == A3 A2 A1 A0, 
+      assign(*u0, binop(Iop_CatEvenLanes32x4, mkexpr(i1), mkexpr(i0)));
+      assign(*u1, binop(Iop_CatOddLanes32x4, mkexpr(i1), mkexpr(i0)));
+      return;
+   }
+   if (laneSzBlg2 == 1) {
+      // 16x8
+      // i0 == B3 A3 B2 A2 B1 A1 B0 A0
+      // i1 == B7 A7 B6 A6 B5 A5 B4 A4
+      // u1 == B{7..0}, u0 == A{7..0}
+      assign(*u0, binop(Iop_CatEvenLanes16x8, mkexpr(i1), mkexpr(i0)));
+      assign(*u1, binop(Iop_CatOddLanes16x8,  mkexpr(i1), mkexpr(i0)));
+      return;
+   }
+   if (laneSzBlg2 == 0) {
+      // 8x16
+      // i0 == B7 A7 B6 A6 B5 A5 B4 A4 B3 A3 B2 A2 B1 A1 B0 A0
+      // i1 == Bf Af Be Ae Bd Ad Bc Ac Bb Ab Ba Aa B9 A9 B8 A8
+      // u1 == B{f..0}, u0 == A{f..0}
+      assign(*u0, binop(Iop_CatEvenLanes8x16, mkexpr(i1), mkexpr(i0)));
+      assign(*u1, binop(Iop_CatOddLanes8x16,  mkexpr(i1), mkexpr(i0)));
+      return;
+   }
+   /*NOTREACHED*/
+   vassert(0);
+}
+
+
+/* Do deinterleaving for 3 128 bit vectors, for LD3 insns. */
+static
+void math_DEINTERLEAVE3_128( 
+        /*OUTx3*/ IRTemp* u0, IRTemp* u1, IRTemp* u2,
+        UInt laneSzBlg2,
+        IRTemp i0, IRTemp i1, IRTemp i2 )
+{
+   if (laneSzBlg2 == 3) {
+      // 64x2
+      // i2 == C1 B1, i1 == A1 C0, i0 == B0 A0,
+      // u2 == C1 C0, u1 == B1 B0, u0 == A1 A0
+      assign(*u2, ILO64x2( ROL(EX(i2),8), EX(i1)        ));
+      assign(*u1, ILO64x2( EX(i2),        ROL(EX(i0),8) ));
+      assign(*u0, ILO64x2( ROL(EX(i1),8), EX(i0)        ));
+      return;
+   }
+
+   if (laneSzBlg2 == 2) {
+      // 32x4
+      // i2 == C3 B3 A2 C2, i1 == B2 A2 C1 B1, i0 == A1 C0 B0 A0
+      // p2 == C3 C2 B3 B2, p1 == A3 A2 C1 C0, p0 == B1 B0 A1 A0
+      // u2 == C3 C2 C1 C0, u1 == B3 B2 B1 B0, u0 == A3 A2 A1 A0
+      IRTemp t_a1c0b0a0 = newTempV128();
+      IRTemp t_a2c1b1a1 = newTempV128();
+      IRTemp t_a3c2b2a2 = newTempV128();
+      IRTemp t_a0c3b3a3 = newTempV128();
+      IRTemp p0 = newTempV128();
+      IRTemp p1 = newTempV128();
+      IRTemp p2 = newTempV128();
+      // Compute some intermediate values.
+      assign(t_a1c0b0a0, EX(i0));
+      assign(t_a2c1b1a1, SL(EX(i1),EX(i0),3*4));
+      assign(t_a3c2b2a2, SL(EX(i2),EX(i1),2*4));
+      assign(t_a0c3b3a3, SL(EX(i0),EX(i2),1*4));
+      // First deinterleave into lane-pairs
+      assign(p0, ILO32x4(EX(t_a2c1b1a1),EX(t_a1c0b0a0)));
+      assign(p1, ILO64x2(ILO32x4(EX(t_a0c3b3a3), EX(t_a3c2b2a2)),
+                         IHI32x4(EX(t_a2c1b1a1), EX(t_a1c0b0a0))));
+      assign(p2, ILO32x4(ROR(EX(t_a0c3b3a3),1*4), ROR(EX(t_a3c2b2a2),1*4)));
+      // Then deinterleave at 64x2 granularity.
+      math_DEINTERLEAVE3_128(u0, u1, u2, 3, p0, p1, p2);
+      return;
+   }
+
+   if (laneSzBlg2 == 1) {
+      // 16x8
+      // u2 == C7 C6 C5 C4 C3 C2 C1 C0
+      // u1 == B7 B6 B5 B4 B3 B2 B1 B0
+      // u0 == A7 A6 A5 A4 A3 A2 A1 A0
+      //
+      // i2 == C7 B7 A7 C6 B6 A6 C5 B5
+      // i1 == A5 C4 B4 A4 C4 B3 A3 C2
+      // i0 == B2 A2 C1 B1 A1 C0 B0 A0
+      //
+      // p2 == C7 C6 B7 B6 A7 A6 C5 C4
+      // p1 == B5 B4 A5 A4 C3 C2 B3 B2
+      // p0 == A3 A2 C1 C0 B1 B0 A1 A0
+
+      IRTemp s0, s1, s2, s3, t0, t1, t2, t3, p0, p1, p2, c00111111;
+      s0 = s1 = s2 = s3
+         = t0 = t1 = t2 = t3 = p0 = p1 = p2 = c00111111 = IRTemp_INVALID;
+      newTempsV128_4(&s0, &s1, &s2, &s3);
+      newTempsV128_4(&t0, &t1, &t2, &t3);
+      newTempsV128_4(&p0, &p1, &p2, &c00111111);
+
+      // s0 == b2a2 c1b1a1 c0b0a0
+      // s1 == b4a4 c3b3c3 c2b2a2
+      // s2 == b6a6 c5b5a5 c4b4a4
+      // s3 == b0a0 c7b7a7 c6b6a6
+      assign(s0, EX(i0));
+      assign(s1, SL(EX(i1),EX(i0),6*2));
+      assign(s2, SL(EX(i2),EX(i1),4*2));
+      assign(s3, SL(EX(i0),EX(i2),2*2));
+
+      // t0 == 0 0 c1c0 b1b0 a1a0
+      // t1 == 0 0 c3c2 b3b2 a3a2
+      // t2 == 0 0 c5c4 b5b4 a5a4
+      // t3 == 0 0 c7c6 b7b6 a7a6
+      assign(c00111111, mkV128(0x0FFF));
+      assign(t0, AND( ILO16x8( ROR(EX(s0),3*2), EX(s0)), EX(c00111111)));
+      assign(t1, AND( ILO16x8( ROR(EX(s1),3*2), EX(s1)), EX(c00111111)));
+      assign(t2, AND( ILO16x8( ROR(EX(s2),3*2), EX(s2)), EX(c00111111)));
+      assign(t3, AND( ILO16x8( ROR(EX(s3),3*2), EX(s3)), EX(c00111111)));
+
+      assign(p0, OR2(EX(t0),          SHL(EX(t1),6*2)));
+      assign(p1, OR2(SHL(EX(t2),4*2), SHR(EX(t1),2*2)));
+      assign(p2, OR2(SHL(EX(t3),2*2), SHR(EX(t2),4*2)));
+
+      // Then deinterleave at 32x4 granularity.
+      math_DEINTERLEAVE3_128(u0, u1, u2, 2, p0, p1, p2);
+      return;
+   }
+
+   if (laneSzBlg2 == 0) {
+      // 8x16.  This is the same scheme as for 16x8, with twice the
+      // number of intermediate values.
+      //
+      // u2 == C{f..0}
+      // u1 == B{f..0}
+      // u0 == A{f..0}
+      //
+      // i2 == CBA{f} CBA{e} CBA{d} CBA{c} CBA{b} C{a}
+      // i1 ==  BA{a} CBA{9} CBA{8} CBA{7} CBA{6} CB{5}
+      // i0 ==   A{5} CBA{4} CBA{3} CBA{2} CBA{1} CBA{0}
+      //
+      // p2 == C{fe} B{fe} A{fe} C{dc} B{dc} A{dc} C{ba} B{ba}
+      // p1 == A{ba} C{98} B{98} A{98} C{76} B{76} A{76} C{54}
+      // p0 == B{54} A{54} C{32} B{32} A{32} C{10} B{10} A{10}
+      //
+      IRTemp s0, s1, s2, s3, s4, s5, s6, s7,
+             t0, t1, t2, t3, t4, t5, t6, t7, p0, p1, p2, cMASK;
+      s0 = s1 = s2 = s3 = s4 = s5 = s6 = s7
+         = t0 = t1 = t2 = t3 = t4 = t5 = t6 = t7 = p0 = p1 = p2 = cMASK
+         = IRTemp_INVALID;
+      newTempsV128_4(&s0, &s1, &s2, &s3);
+      newTempsV128_4(&s4, &s5, &s6, &s7);
+      newTempsV128_4(&t0, &t1, &t2, &t3);
+      newTempsV128_4(&t4, &t5, &t6, &t7);
+      newTempsV128_4(&p0, &p1, &p2, &cMASK);
+
+      // s0 == A{5} CBA{4} CBA{3} CBA{2} CBA{1} CBA{0}
+      // s1 == A{7} CBA{6} CBA{5} CBA{4} CBA{3} CBA{2}
+      // s2 == A{9} CBA{8} CBA{7} CBA{6} CBA{5} CBA{4}
+      // s3 == A{b} CBA{a} CBA{9} CBA{8} CBA{7} CBA{6}
+      // s4 == A{d} CBA{c} CBA{b} CBA{a} CBA{9} CBA{8}
+      // s5 == A{f} CBA{e} CBA{d} CBA{c} CBA{b} CBA{a}
+      // s6 == A{1} CBA{0} CBA{f} CBA{e} CBA{d} CBA{c}
+      // s7 == A{3} CBA{2} CBA{1} CBA{0} CBA{f} CBA{e}
+      assign(s0, SL(EX(i1),EX(i0), 0));
+      assign(s1, SL(EX(i1),EX(i0), 6));
+      assign(s2, SL(EX(i1),EX(i0),12));
+      assign(s3, SL(EX(i2),EX(i1), 2));
+      assign(s4, SL(EX(i2),EX(i1), 8));
+      assign(s5, SL(EX(i2),EX(i1),14));
+      assign(s6, SL(EX(i0),EX(i2), 4));
+      assign(s7, SL(EX(i0),EX(i2),10));
+
+      // t0 == 0--(ten)--0 C1 C0 B1 B0 A1 A0
+      // t1 == 0--(ten)--0 C3 C2 B3 B2 A3 A2
+      // t2 == 0--(ten)--0 C5 C4 B5 B4 A5 A4
+      // t3 == 0--(ten)--0 C7 C6 B7 B6 A7 A6
+      // t4 == 0--(ten)--0 C9 C8 B9 B8 A9 A8
+      // t5 == 0--(ten)--0 Cb Ca Bb Ba Ab Aa
+      // t6 == 0--(ten)--0 Cd Cc Bd Bc Ad Ac
+      // t7 == 0--(ten)--0 Cf Ce Bf Be Af Ae
+      assign(cMASK, mkV128(0x003F));
+      assign(t0, AND( ILO8x16( ROR(EX(s0),3), EX(s0)), EX(cMASK)));
+      assign(t1, AND( ILO8x16( ROR(EX(s1),3), EX(s1)), EX(cMASK)));
+      assign(t2, AND( ILO8x16( ROR(EX(s2),3), EX(s2)), EX(cMASK)));
+      assign(t3, AND( ILO8x16( ROR(EX(s3),3), EX(s3)), EX(cMASK)));
+      assign(t4, AND( ILO8x16( ROR(EX(s4),3), EX(s4)), EX(cMASK)));
+      assign(t5, AND( ILO8x16( ROR(EX(s5),3), EX(s5)), EX(cMASK)));
+      assign(t6, AND( ILO8x16( ROR(EX(s6),3), EX(s6)), EX(cMASK)));
+      assign(t7, AND( ILO8x16( ROR(EX(s7),3), EX(s7)), EX(cMASK)));
+
+      assign(p0, OR3( SHL(EX(t2),12), SHL(EX(t1),6), EX(t0) ));
+      assign(p1, OR4( SHL(EX(t5),14), SHL(EX(t4),8),
+                 SHL(EX(t3),2), SHR(EX(t2),4) ));
+      assign(p2, OR3( SHL(EX(t7),10), SHL(EX(t6),4), SHR(EX(t5),2) ));
+
+      // Then deinterleave at 16x8 granularity.
+      math_DEINTERLEAVE3_128(u0, u1, u2, 1, p0, p1, p2);
+      return;
+   }
+
+   /*NOTREACHED*/
+   vassert(0);
+}
+
+
+/* Do deinterleaving for 4 128 bit vectors, for LD4 insns. */
+static
+void math_DEINTERLEAVE4_128( 
+        /*OUTx4*/ IRTemp* u0, IRTemp* u1, IRTemp* u2, IRTemp* u3,
+        UInt laneSzBlg2,
+        IRTemp i0, IRTemp i1, IRTemp i2, IRTemp i3 )
+{
+   if (laneSzBlg2 == 3) {
+      // 64x2
+      assign(*u0, ILO64x2(EX(i2), EX(i0)));
+      assign(*u1, IHI64x2(EX(i2), EX(i0)));
+      assign(*u2, ILO64x2(EX(i3), EX(i1)));
+      assign(*u3, IHI64x2(EX(i3), EX(i1)));
+      return;
+   }
+   if (laneSzBlg2 == 2) {
+      // 32x4
+      IRTemp p0 = newTempV128();
+      IRTemp p2 = newTempV128();
+      IRTemp p1 = newTempV128();
+      IRTemp p3 = newTempV128();
+      assign(p0, ILO32x4(EX(i1), EX(i0)));
+      assign(p1, IHI32x4(EX(i1), EX(i0)));
+      assign(p2, ILO32x4(EX(i3), EX(i2)));
+      assign(p3, IHI32x4(EX(i3), EX(i2)));
+      // And now do what we did for the 64-bit case.
+      math_DEINTERLEAVE4_128(u0, u1, u2, u3, 3, p0, p1, p2, p3);
+      return;
+   }
+   if (laneSzBlg2 == 1) {
+      // 16x8
+      // Deinterleave into 32-bit chunks, then do as the 32-bit case.
+      IRTemp p0 = newTempV128();
+      IRTemp p1 = newTempV128();
+      IRTemp p2 = newTempV128();
+      IRTemp p3 = newTempV128();
+      assign(p0, IHI16x8(EX(i0), SHL(EX(i0), 8)));
+      assign(p1, IHI16x8(EX(i1), SHL(EX(i1), 8)));
+      assign(p2, IHI16x8(EX(i2), SHL(EX(i2), 8)));
+      assign(p3, IHI16x8(EX(i3), SHL(EX(i3), 8)));
+      // From here on is like the 32 bit case.
+      math_DEINTERLEAVE4_128(u0, u1, u2, u3, 2, p0, p1, p2, p3);
+      return;
+   }
+   if (laneSzBlg2 == 0) {
+      // 8x16
+      // Deinterleave into 16-bit chunks, then do as the 16-bit case.
+      IRTemp p0 = newTempV128();
+      IRTemp p1 = newTempV128();
+      IRTemp p2 = newTempV128();
+      IRTemp p3 = newTempV128();
+      assign(p0, IHI64x2( IHI8x16(EX(i0),ROL(EX(i0),4)),
+                          ILO8x16(EX(i0),ROL(EX(i0),4)) ));
+      assign(p1, IHI64x2( IHI8x16(EX(i1),ROL(EX(i1),4)),
+                          ILO8x16(EX(i1),ROL(EX(i1),4)) ));
+      assign(p2, IHI64x2( IHI8x16(EX(i2),ROL(EX(i2),4)),
+                          ILO8x16(EX(i2),ROL(EX(i2),4)) ));
+      assign(p3, IHI64x2( IHI8x16(EX(i3),ROL(EX(i3),4)),
+                          ILO8x16(EX(i3),ROL(EX(i3),4)) ));
+      // From here on is like the 16 bit case.
+      math_DEINTERLEAVE4_128(u0, u1, u2, u3, 1, p0, p1, p2, p3);
+      return;
+   }
+   /*NOTREACHED*/
+   vassert(0);
+}
+
+
+/* Wrappers that use the full-width (de)interleavers to do half-width
+   (de)interleaving.  The scheme is to clone each input lane in the
+   lower half of each incoming value, do a full width (de)interleave
+   at the next lane size up, and remove every other lane of the the
+   result.  The returned values may have any old junk in the upper
+   64 bits -- the caller must ignore that. */
+
+/* Helper function -- get doubling and narrowing operations. */
+static
+void math_get_doubler_and_halver ( /*OUT*/IROp* doubler,
+                                   /*OUT*/IROp* halver,
+                                   UInt laneSzBlg2 )
+{
+   switch (laneSzBlg2) {
+      case 2:
+         *doubler = Iop_InterleaveLO32x4; *halver = Iop_CatEvenLanes32x4;
+         break;
+      case 1:
+         *doubler = Iop_InterleaveLO16x8; *halver = Iop_CatEvenLanes16x8;
+         break;
+      case 0:
+         *doubler = Iop_InterleaveLO8x16; *halver = Iop_CatEvenLanes8x16;
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+/* Do interleaving for 1 64 bit vector, for ST1 insns. */
+static
+void math_INTERLEAVE1_64( /*OUTx1*/ IRTemp* i0,
+                          UInt laneSzBlg2, IRTemp u0 )
+{
+   assign(*i0, mkexpr(u0));
+}
+
+
+/* Do interleaving for 2 64 bit vectors, for ST2 insns. */
+static
+void math_INTERLEAVE2_64( /*OUTx2*/ IRTemp* i0, IRTemp* i1,
+                          UInt laneSzBlg2, IRTemp u0, IRTemp u1 )
+{
+   if (laneSzBlg2 == 3) {
+      // 1x64, degenerate case
+      assign(*i0, EX(u0));
+      assign(*i1, EX(u1));
+      return;
+   }
+
+   vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+   IROp doubler = Iop_INVALID, halver = Iop_INVALID;
+   math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
+
+   IRTemp du0 = newTempV128();
+   IRTemp du1 = newTempV128();
+   assign(du0, binop(doubler, EX(u0), EX(u0)));
+   assign(du1, binop(doubler, EX(u1), EX(u1)));
+   IRTemp di0 = newTempV128();
+   IRTemp di1 = newTempV128();
+   math_INTERLEAVE2_128(&di0, &di1, laneSzBlg2 + 1, du0, du1);
+   assign(*i0, binop(halver, EX(di0), EX(di0)));
+   assign(*i1, binop(halver, EX(di1), EX(di1)));
+}
+
+
+/* Do interleaving for 3 64 bit vectors, for ST3 insns. */
+static
+void math_INTERLEAVE3_64( 
+        /*OUTx3*/ IRTemp* i0, IRTemp* i1, IRTemp* i2,
+        UInt laneSzBlg2,
+        IRTemp u0, IRTemp u1, IRTemp u2 )
+{
+   if (laneSzBlg2 == 3) {
+      // 1x64, degenerate case
+      assign(*i0, EX(u0));
+      assign(*i1, EX(u1));
+      assign(*i2, EX(u2));
+      return;
+   }
+
+   vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+   IROp doubler = Iop_INVALID, halver = Iop_INVALID;
+   math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
+
+   IRTemp du0 = newTempV128();
+   IRTemp du1 = newTempV128();
+   IRTemp du2 = newTempV128();
+   assign(du0, binop(doubler, EX(u0), EX(u0)));
+   assign(du1, binop(doubler, EX(u1), EX(u1)));
+   assign(du2, binop(doubler, EX(u2), EX(u2)));
+   IRTemp di0 = newTempV128();
+   IRTemp di1 = newTempV128();
+   IRTemp di2 = newTempV128();
+   math_INTERLEAVE3_128(&di0, &di1, &di2, laneSzBlg2 + 1, du0, du1, du2);
+   assign(*i0, binop(halver, EX(di0), EX(di0)));
+   assign(*i1, binop(halver, EX(di1), EX(di1)));
+   assign(*i2, binop(halver, EX(di2), EX(di2)));
+}
+
+
+/* Do interleaving for 4 64 bit vectors, for ST4 insns. */
+static
+void math_INTERLEAVE4_64( 
+        /*OUTx4*/ IRTemp* i0, IRTemp* i1, IRTemp* i2, IRTemp* i3,
+        UInt laneSzBlg2,
+        IRTemp u0, IRTemp u1, IRTemp u2, IRTemp u3 )
+{
+   if (laneSzBlg2 == 3) {
+      // 1x64, degenerate case
+      assign(*i0, EX(u0));
+      assign(*i1, EX(u1));
+      assign(*i2, EX(u2));
+      assign(*i3, EX(u3));
+      return;
+   }
+
+   vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+   IROp doubler = Iop_INVALID, halver = Iop_INVALID;
+   math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
+
+   IRTemp du0 = newTempV128();
+   IRTemp du1 = newTempV128();
+   IRTemp du2 = newTempV128();
+   IRTemp du3 = newTempV128();
+   assign(du0, binop(doubler, EX(u0), EX(u0)));
+   assign(du1, binop(doubler, EX(u1), EX(u1)));
+   assign(du2, binop(doubler, EX(u2), EX(u2)));
+   assign(du3, binop(doubler, EX(u3), EX(u3)));
+   IRTemp di0 = newTempV128();
+   IRTemp di1 = newTempV128();
+   IRTemp di2 = newTempV128();
+   IRTemp di3 = newTempV128();
+   math_INTERLEAVE4_128(&di0, &di1, &di2, &di3,
+                        laneSzBlg2 + 1, du0, du1, du2, du3);
+   assign(*i0, binop(halver, EX(di0), EX(di0)));
+   assign(*i1, binop(halver, EX(di1), EX(di1)));
+   assign(*i2, binop(halver, EX(di2), EX(di2)));
+   assign(*i3, binop(halver, EX(di3), EX(di3)));
+}
+
+
+/* Do deinterleaving for 1 64 bit vector, for LD1 insns. */
+static
+void math_DEINTERLEAVE1_64( /*OUTx1*/ IRTemp* u0,
+                            UInt laneSzBlg2, IRTemp i0 )
+{
+   assign(*u0, mkexpr(i0));
+}
+
+
+/* Do deinterleaving for 2 64 bit vectors, for LD2 insns. */
+static
+void math_DEINTERLEAVE2_64( /*OUTx2*/ IRTemp* u0, IRTemp* u1,
+                            UInt laneSzBlg2, IRTemp i0, IRTemp i1 )
+{
+   if (laneSzBlg2 == 3) {
+      // 1x64, degenerate case
+      assign(*u0, EX(i0));
+      assign(*u1, EX(i1));
+      return;
+   }
+
+   vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+   IROp doubler = Iop_INVALID, halver = Iop_INVALID;
+   math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
+
+   IRTemp di0 = newTempV128();
+   IRTemp di1 = newTempV128();
+   assign(di0, binop(doubler, EX(i0), EX(i0)));
+   assign(di1, binop(doubler, EX(i1), EX(i1)));
+
+   IRTemp du0 = newTempV128();
+   IRTemp du1 = newTempV128();
+   math_DEINTERLEAVE2_128(&du0, &du1, laneSzBlg2 + 1, di0, di1);
+   assign(*u0, binop(halver, EX(du0), EX(du0)));
+   assign(*u1, binop(halver, EX(du1), EX(du1)));
+}
+
+
+/* Do deinterleaving for 3 64 bit vectors, for LD3 insns. */
+static
+void math_DEINTERLEAVE3_64( 
+        /*OUTx3*/ IRTemp* u0, IRTemp* u1, IRTemp* u2,
+        UInt laneSzBlg2,
+        IRTemp i0, IRTemp i1, IRTemp i2 )
+{
+   if (laneSzBlg2 == 3) {
+      // 1x64, degenerate case
+      assign(*u0, EX(i0));
+      assign(*u1, EX(i1));
+      assign(*u2, EX(i2));
+      return;
+   }
+
+   vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+   IROp doubler = Iop_INVALID, halver = Iop_INVALID;
+   math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
+
+   IRTemp di0 = newTempV128();
+   IRTemp di1 = newTempV128();
+   IRTemp di2 = newTempV128();
+   assign(di0, binop(doubler, EX(i0), EX(i0)));
+   assign(di1, binop(doubler, EX(i1), EX(i1)));
+   assign(di2, binop(doubler, EX(i2), EX(i2)));
+   IRTemp du0 = newTempV128();
+   IRTemp du1 = newTempV128();
+   IRTemp du2 = newTempV128();
+   math_DEINTERLEAVE3_128(&du0, &du1, &du2, laneSzBlg2 + 1, di0, di1, di2);
+   assign(*u0, binop(halver, EX(du0), EX(du0)));
+   assign(*u1, binop(halver, EX(du1), EX(du1)));
+   assign(*u2, binop(halver, EX(du2), EX(du2)));
+}
+
+
+/* Do deinterleaving for 4 64 bit vectors, for LD4 insns. */
+static
+void math_DEINTERLEAVE4_64( 
+        /*OUTx4*/ IRTemp* u0, IRTemp* u1, IRTemp* u2, IRTemp* u3,
+        UInt laneSzBlg2,
+        IRTemp i0, IRTemp i1, IRTemp i2, IRTemp i3 )
+{
+   if (laneSzBlg2 == 3) {
+      // 1x64, degenerate case
+      assign(*u0, EX(i0));
+      assign(*u1, EX(i1));
+      assign(*u2, EX(i2));
+      assign(*u3, EX(i3));
+      return;
+   }
+
+   vassert(laneSzBlg2 >= 0 && laneSzBlg2 <= 2);
+   IROp doubler = Iop_INVALID, halver = Iop_INVALID;
+   math_get_doubler_and_halver(&doubler, &halver, laneSzBlg2);
+
+   IRTemp di0 = newTempV128();
+   IRTemp di1 = newTempV128();
+   IRTemp di2 = newTempV128();
+   IRTemp di3 = newTempV128();
+   assign(di0, binop(doubler, EX(i0), EX(i0)));
+   assign(di1, binop(doubler, EX(i1), EX(i1)));
+   assign(di2, binop(doubler, EX(i2), EX(i2)));
+   assign(di3, binop(doubler, EX(i3), EX(i3)));
+   IRTemp du0 = newTempV128();
+   IRTemp du1 = newTempV128();
+   IRTemp du2 = newTempV128();
+   IRTemp du3 = newTempV128();
+   math_DEINTERLEAVE4_128(&du0, &du1, &du2, &du3,
+                          laneSzBlg2 + 1, di0, di1, di2, di3);
+   assign(*u0, binop(halver, EX(du0), EX(du0)));
+   assign(*u1, binop(halver, EX(du1), EX(du1)));
+   assign(*u2, binop(halver, EX(du2), EX(du2)));
+   assign(*u3, binop(halver, EX(du3), EX(du3)));
+}
+
+
+#undef EX
+#undef SL
+#undef ROR
+#undef ROL
+#undef SHR
+#undef SHL
+#undef ILO64x2
+#undef IHI64x2
+#undef ILO32x4
+#undef IHI32x4
+#undef ILO16x8
+#undef IHI16x8
+#undef ILO16x8
+#undef IHI16x8
+#undef CEV32x4
+#undef COD32x4
+#undef COD16x8
+#undef COD8x16
+#undef CEV8x16
+#undef AND
+#undef OR2
+#undef OR3
+#undef OR4
+
+
+/*------------------------------------------------------------*/
+/*--- Load and Store instructions                          ---*/
+/*------------------------------------------------------------*/
+
+/* Generate the EA for a "reg + reg" style amode.  This is done from
+   parts of the insn, but for sanity checking sake it takes the whole
+   insn.  This appears to depend on insn[15:12], with opt=insn[15:13]
+   and S=insn[12]:
+
+   The possible forms, along with their opt:S values, are:
+      011:0   Xn|SP + Xm
+      111:0   Xn|SP + Xm
+      011:1   Xn|SP + Xm * transfer_szB
+      111:1   Xn|SP + Xm * transfer_szB
+      010:0   Xn|SP + 32Uto64(Wm)
+      010:1   Xn|SP + 32Uto64(Wm) * transfer_szB
+      110:0   Xn|SP + 32Sto64(Wm)
+      110:1   Xn|SP + 32Sto64(Wm) * transfer_szB
+
+   Rm is insn[20:16].  Rn is insn[9:5].  Rt is insn[4:0].  Log2 of
+   the transfer size is insn[23,31,30].  For integer loads/stores,
+   insn[23] is zero, hence szLg2 can be at most 3 in such cases.
+
+   If the decoding fails, it returns IRTemp_INVALID.
+
+   isInt is True iff this is decoding is for transfers to/from integer
+   registers.  If False it is for transfers to/from vector registers.
+*/
+static IRTemp gen_indexed_EA ( /*OUT*/HChar* buf, UInt insn, Bool isInt )
+{
+   UInt    optS  = SLICE_UInt(insn, 15, 12);
+   UInt    mm    = SLICE_UInt(insn, 20, 16);
+   UInt    nn    = SLICE_UInt(insn, 9, 5);
+   UInt    szLg2 = (isInt ? 0 : (SLICE_UInt(insn, 23, 23) << 2))
+                   | SLICE_UInt(insn, 31, 30); // Log2 of the size
+
+   buf[0] = 0;
+
+   /* Sanity checks, that this really is a load/store insn. */
+   if (SLICE_UInt(insn, 11, 10) != BITS2(1,0))
+      goto fail;
+
+   if (isInt
+       && SLICE_UInt(insn, 29, 21) != BITS9(1,1,1,0,0,0,0,1,1)/*LDR*/
+       && SLICE_UInt(insn, 29, 21) != BITS9(1,1,1,0,0,0,0,0,1)/*STR*/
+       && SLICE_UInt(insn, 29, 21) != BITS9(1,1,1,0,0,0,1,0,1)/*LDRSbhw Xt*/
+       && SLICE_UInt(insn, 29, 21) != BITS9(1,1,1,0,0,0,1,1,1))/*LDRSbhw Wt*/
+      goto fail;
+
+   if (!isInt
+       && SLICE_UInt(insn, 29, 24) != BITS6(1,1,1,1,0,0)) /*LDR/STR*/
+      goto fail;
+
+   /* Throw out non-verified but possibly valid cases. */
+   switch (szLg2) {
+      case BITS3(0,0,0): break; //  8 bit, valid for both int and vec
+      case BITS3(0,0,1): break; // 16 bit, valid for both int and vec
+      case BITS3(0,1,0): break; // 32 bit, valid for both int and vec
+      case BITS3(0,1,1): break; // 64 bit, valid for both int and vec
+      case BITS3(1,0,0): // can only ever be valid for the vector case
+                         if (isInt) goto fail; else break;
+      case BITS3(1,0,1): // these sizes are never valid
+      case BITS3(1,1,0):
+      case BITS3(1,1,1): goto fail;
+
+      default: vassert(0);
+   }
+
+   IRExpr* rhs  = NULL;
+   switch (optS) {
+      case BITS4(1,1,1,0): goto fail; //ATC
+      case BITS4(0,1,1,0):
+         rhs = getIReg64orZR(mm);
+         vex_sprintf(buf, "[%s, %s]",
+                     nameIReg64orZR(nn), nameIReg64orZR(mm));
+         break;
+      case BITS4(1,1,1,1): goto fail; //ATC
+      case BITS4(0,1,1,1):
+         rhs = binop(Iop_Shl64, getIReg64orZR(mm), mkU8(szLg2));
+         vex_sprintf(buf, "[%s, %s lsl %u]",
+                     nameIReg64orZR(nn), nameIReg64orZR(mm), szLg2);
+         break;
+      case BITS4(0,1,0,0):
+         rhs = unop(Iop_32Uto64, getIReg32orZR(mm));
+         vex_sprintf(buf, "[%s, %s uxtx]",
+                     nameIReg64orZR(nn), nameIReg32orZR(mm));
+         break;
+      case BITS4(0,1,0,1):
+         rhs = binop(Iop_Shl64,
+                     unop(Iop_32Uto64, getIReg32orZR(mm)), mkU8(szLg2));
+         vex_sprintf(buf, "[%s, %s uxtx, lsl %u]",
+                     nameIReg64orZR(nn), nameIReg32orZR(mm), szLg2);
+         break;
+      case BITS4(1,1,0,0):
+         rhs = unop(Iop_32Sto64, getIReg32orZR(mm));
+         vex_sprintf(buf, "[%s, %s sxtx]",
+                     nameIReg64orZR(nn), nameIReg32orZR(mm));
+         break;
+      case BITS4(1,1,0,1):
+         rhs = binop(Iop_Shl64,
+                     unop(Iop_32Sto64, getIReg32orZR(mm)), mkU8(szLg2));
+         vex_sprintf(buf, "[%s, %s sxtx, lsl %u]",
+                     nameIReg64orZR(nn), nameIReg32orZR(mm), szLg2);
+         break;
+      default:
+         /* The rest appear to be genuinely invalid */
+         goto fail;
+   }
+
+   vassert(rhs);
+   IRTemp res = newTemp(Ity_I64);
+   assign(res, binop(Iop_Add64, getIReg64orSP(nn), rhs));
+   return res;
+
+  fail:
+   vex_printf("gen_indexed_EA: unhandled case optS == 0x%x\n", optS);
+   return IRTemp_INVALID;
+}
+
+
+/* Generate an 8/16/32/64 bit integer store to ADDR for the lowest
+   bits of DATAE :: Ity_I64. */
+static void gen_narrowing_store ( UInt szB, IRTemp addr, IRExpr* dataE )
+{
+   IRExpr* addrE = mkexpr(addr);
+   switch (szB) {
+      case 8:
+         storeLE(addrE, dataE);
+         break;
+      case 4:
+         storeLE(addrE, unop(Iop_64to32, dataE));
+         break;
+      case 2:
+         storeLE(addrE, unop(Iop_64to16, dataE));
+         break;
+      case 1:
+         storeLE(addrE, unop(Iop_64to8, dataE));
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+
+/* Generate an 8/16/32/64 bit unsigned widening load from ADDR,
+   placing the result in an Ity_I64 temporary. */
+static IRTemp gen_zwidening_load ( UInt szB, IRTemp addr )
+{
+   IRTemp  res   = newTemp(Ity_I64);
+   IRExpr* addrE = mkexpr(addr);
+   switch (szB) {
+      case 8:
+         assign(res, loadLE(Ity_I64,addrE));
+         break;
+      case 4:
+         assign(res, unop(Iop_32Uto64, loadLE(Ity_I32,addrE)));
+         break;
+      case 2:
+         assign(res, unop(Iop_16Uto64, loadLE(Ity_I16,addrE)));
+         break;
+      case 1:
+         assign(res, unop(Iop_8Uto64, loadLE(Ity_I8,addrE)));
+         break;
+      default:
+         vassert(0);
+   }
+   return res;
+}
+
+
+/* Generate a "standard 7" name, from bitQ and size.  But also
+   allow ".1d" since that's occasionally useful. */
+static
+const HChar* nameArr_Q_SZ ( UInt bitQ, UInt size )
+{
+   vassert(bitQ <= 1 && size <= 3);
+   const HChar* nms[8]
+      = { "8b", "4h", "2s", "1d", "16b", "8h", "4s", "2d" };
+   UInt ix = (bitQ << 2) | size;
+   vassert(ix < 8);
+   return nms[ix];
+}
+
+
+static
+Bool dis_ARM64_load_store(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+
+   /* ------------ LDR,STR (immediate, uimm12) ----------- */
+   /* uimm12 is scaled by the transfer size
+
+      31 29  26    21    9  4
+      |  |   |     |     |  |
+      11 111 00100 imm12 nn tt    STR  Xt, [Xn|SP, #imm12 * 8]
+      11 111 00101 imm12 nn tt    LDR  Xt, [Xn|SP, #imm12 * 8]
+
+      10 111 00100 imm12 nn tt    STR  Wt, [Xn|SP, #imm12 * 4]
+      10 111 00101 imm12 nn tt    LDR  Wt, [Xn|SP, #imm12 * 4]
+
+      01 111 00100 imm12 nn tt    STRH Wt, [Xn|SP, #imm12 * 2]
+      01 111 00101 imm12 nn tt    LDRH Wt, [Xn|SP, #imm12 * 2]
+
+      00 111 00100 imm12 nn tt    STRB Wt, [Xn|SP, #imm12 * 1]
+      00 111 00101 imm12 nn tt    LDRB Wt, [Xn|SP, #imm12 * 1]
+   */
+   if (INSN(29,23) == BITS7(1,1,1,0,0,1,0)) {
+      UInt   szLg2 = INSN(31,30);
+      UInt   szB   = 1 << szLg2;
+      Bool   isLD  = INSN(22,22) == 1;
+      UInt   offs  = INSN(21,10) * szB;
+      UInt   nn    = INSN(9,5);
+      UInt   tt    = INSN(4,0);
+      IRTemp ta    = newTemp(Ity_I64);
+      assign(ta, binop(Iop_Add64, getIReg64orSP(nn), mkU64(offs)));
+      if (nn == 31) { /* FIXME generate stack alignment check */ }
+      vassert(szLg2 < 4);
+      if (isLD) {
+         putIReg64orZR(tt, mkexpr(gen_zwidening_load(szB, ta)));
+      } else {
+         gen_narrowing_store(szB, ta, getIReg64orZR(tt));
+      }
+      const HChar* ld_name[4] = { "ldrb", "ldrh", "ldr", "ldr" };
+      const HChar* st_name[4] = { "strb", "strh", "str", "str" };
+      DIP("%s %s, [%s, #%u]\n", 
+          (isLD ? ld_name : st_name)[szLg2], nameIRegOrZR(szB == 8, tt),
+          nameIReg64orSP(nn), offs);
+      return True;
+   }
+
+   /* ------------ LDUR,STUR (immediate, simm9) ----------- */
+   /*
+      31 29  26      20   11 9  4
+      |  |   |       |    |  |  |
+      (at-Rn-then-Rn=EA)  |  |  |
+      sz 111 00000 0 imm9 01 Rn Rt   STR Rt, [Xn|SP], #simm9
+      sz 111 00001 0 imm9 01 Rn Rt   LDR Rt, [Xn|SP], #simm9
+
+      (at-EA-then-Rn=EA)
+      sz 111 00000 0 imm9 11 Rn Rt   STR Rt, [Xn|SP, #simm9]!
+      sz 111 00001 0 imm9 11 Rn Rt   LDR Rt, [Xn|SP, #simm9]!
+
+      (at-EA)
+      sz 111 00000 0 imm9 00 Rn Rt   STR Rt, [Xn|SP, #simm9]
+      sz 111 00001 0 imm9 00 Rn Rt   LDR Rt, [Xn|SP, #simm9]
+
+      simm9 is unscaled.
+
+      The case 'wback && Rn == Rt && Rt != 31' is disallowed.  In the
+      load case this is because would create two competing values for
+      Rt.  In the store case the reason is unclear, but the spec
+      disallows it anyway.
+
+      Stores are narrowing, loads are unsigned widening.  sz encodes
+      the transfer size in the normal way: 00=1, 01=2, 10=4, 11=8.
+   */
+   if ((INSN(29,21) & BITS9(1,1,1, 1,1,1,1,0, 1))
+       == BITS9(1,1,1, 0,0,0,0,0, 0)) {
+      UInt szLg2  = INSN(31,30);
+      UInt szB    = 1 << szLg2;
+      Bool isLoad = INSN(22,22) == 1;
+      UInt imm9   = INSN(20,12);
+      UInt nn     = INSN(9,5);
+      UInt tt     = INSN(4,0);
+      Bool wBack  = INSN(10,10) == 1;
+      UInt how    = INSN(11,10);
+      if (how == BITS2(1,0) || (wBack && nn == tt && tt != 31)) {
+         /* undecodable; fall through */
+      } else {
+         if (nn == 31) { /* FIXME generate stack alignment check */ }
+
+         // Compute the transfer address TA and the writeback address WA.
+         IRTemp tRN = newTemp(Ity_I64);
+         assign(tRN, getIReg64orSP(nn));
+         IRTemp tEA = newTemp(Ity_I64);
+         Long simm9 = (Long)sx_to_64(imm9, 9);
+         assign(tEA, binop(Iop_Add64, mkexpr(tRN), mkU64(simm9)));
+
+         IRTemp tTA = newTemp(Ity_I64);
+         IRTemp tWA = newTemp(Ity_I64);
+         switch (how) {
+            case BITS2(0,1):
+               assign(tTA, mkexpr(tRN)); assign(tWA, mkexpr(tEA)); break;
+            case BITS2(1,1):
+               assign(tTA, mkexpr(tEA)); assign(tWA, mkexpr(tEA)); break;
+            case BITS2(0,0):
+               assign(tTA, mkexpr(tEA)); /* tWA is unused */ break;
+            default:
+               vassert(0); /* NOTREACHED */
+         }
+
+         /* Normally rN would be updated after the transfer.  However, in
+            the special case typifed by
+               str x30, [sp,#-16]!
+            it is necessary to update SP before the transfer, (1)
+            because Memcheck will otherwise complain about a write
+            below the stack pointer, and (2) because the segfault
+            stack extension mechanism will otherwise extend the stack
+            only down to SP before the instruction, which might not be
+            far enough, if the -16 bit takes the actual access
+            address to the next page.
+         */
+         Bool earlyWBack
+           = wBack && simm9 < 0 && szB == 8
+             && how == BITS2(1,1) && nn == 31 && !isLoad && tt != nn;
+
+         if (wBack && earlyWBack)
+            putIReg64orSP(nn, mkexpr(tEA));
+
+         if (isLoad) {
+            putIReg64orZR(tt, mkexpr(gen_zwidening_load(szB, tTA)));
+         } else {
+            gen_narrowing_store(szB, tTA, getIReg64orZR(tt));
+         }
+
+         if (wBack && !earlyWBack)
+            putIReg64orSP(nn, mkexpr(tEA));
+
+         const HChar* ld_name[4] = { "ldurb", "ldurh", "ldur", "ldur" };
+         const HChar* st_name[4] = { "sturb", "sturh", "stur", "stur" };
+         const HChar* fmt_str = NULL;
+         switch (how) {
+            case BITS2(0,1):
+               fmt_str = "%s %s, [%s], #%lld (at-Rn-then-Rn=EA)\n";
+               break;
+            case BITS2(1,1):
+               fmt_str = "%s %s, [%s, #%lld]! (at-EA-then-Rn=EA)\n";
+               break;
+            case BITS2(0,0):
+               fmt_str = "%s %s, [%s, #%lld] (at-Rn)\n";
+               break;
+            default:
+               vassert(0);
+         }
+         DIP(fmt_str, (isLoad ? ld_name : st_name)[szLg2],
+                      nameIRegOrZR(szB == 8, tt),
+                      nameIReg64orSP(nn), simm9);
+         return True;
+      }
+   }
+
+   /* -------- LDP,STP (immediate, simm7) (INT REGS) -------- */
+   /* L==1 => mm==LD
+      L==0 => mm==ST
+      x==0 => 32 bit transfers, and zero extended loads
+      x==1 => 64 bit transfers
+      simm7 is scaled by the (single-register) transfer size
+
+      (at-Rn-then-Rn=EA)
+      x0 101 0001 L imm7 Rt2 Rn Rt1  mmP Rt1,Rt2, [Xn|SP], #imm
+   
+      (at-EA-then-Rn=EA)
+      x0 101 0011 L imm7 Rt2 Rn Rt1  mmP Rt1,Rt2, [Xn|SP, #imm]!
+
+      (at-EA)
+      x0 101 0010 L imm7 Rt2 Rn Rt1  mmP Rt1,Rt2, [Xn|SP, #imm]
+   */
+
+   UInt insn_30_23 = INSN(30,23);
+   if (insn_30_23 == BITS8(0,1,0,1,0,0,0,1) 
+       || insn_30_23 == BITS8(0,1,0,1,0,0,1,1)
+       || insn_30_23 == BITS8(0,1,0,1,0,0,1,0)) {
+      UInt bL     = INSN(22,22);
+      UInt bX     = INSN(31,31);
+      UInt bWBack = INSN(23,23);
+      UInt rT1    = INSN(4,0);
+      UInt rN     = INSN(9,5);
+      UInt rT2    = INSN(14,10);
+      Long simm7  = (Long)sx_to_64(INSN(21,15), 7);
+      if ((bWBack && (rT1 == rN || rT2 == rN) && rN != 31)
+          || (bL && rT1 == rT2)) {
+         /* undecodable; fall through */
+      } else {
+         if (rN == 31) { /* FIXME generate stack alignment check */ }
+
+         // Compute the transfer address TA and the writeback address WA.
+         IRTemp tRN = newTemp(Ity_I64);
+         assign(tRN, getIReg64orSP(rN));
+         IRTemp tEA = newTemp(Ity_I64);
+         simm7 = (bX ? 8 : 4) * simm7;
+         assign(tEA, binop(Iop_Add64, mkexpr(tRN), mkU64(simm7)));
+
+         IRTemp tTA = newTemp(Ity_I64);
+         IRTemp tWA = newTemp(Ity_I64);
+         switch (INSN(24,23)) {
+            case BITS2(0,1):
+               assign(tTA, mkexpr(tRN)); assign(tWA, mkexpr(tEA)); break;
+            case BITS2(1,1):
+               assign(tTA, mkexpr(tEA)); assign(tWA, mkexpr(tEA)); break;
+            case BITS2(1,0):
+               assign(tTA, mkexpr(tEA)); /* tWA is unused */ break;
+            default:
+               vassert(0); /* NOTREACHED */
+         }
+
+         /* Normally rN would be updated after the transfer.  However, in
+            the special case typifed by
+               stp x29, x30, [sp,#-112]!
+            it is necessary to update SP before the transfer, (1)
+            because Memcheck will otherwise complain about a write
+            below the stack pointer, and (2) because the segfault
+            stack extension mechanism will otherwise extend the stack
+            only down to SP before the instruction, which might not be
+            far enough, if the -112 bit takes the actual access
+            address to the next page.
+         */
+         Bool earlyWBack
+           = bWBack && simm7 < 0
+             && INSN(24,23) == BITS2(1,1) && rN == 31 && bL == 0;
+
+         if (bWBack && earlyWBack)
+            putIReg64orSP(rN, mkexpr(tEA));
+
+         /**/ if (bL == 1 && bX == 1) {
+            // 64 bit load
+            putIReg64orZR(rT1, loadLE(Ity_I64,
+                                      binop(Iop_Add64,mkexpr(tTA),mkU64(0))));
+            putIReg64orZR(rT2, loadLE(Ity_I64, 
+                                      binop(Iop_Add64,mkexpr(tTA),mkU64(8))));
+         } else if (bL == 1 && bX == 0) {
+            // 32 bit load
+            putIReg32orZR(rT1, loadLE(Ity_I32,
+                                      binop(Iop_Add64,mkexpr(tTA),mkU64(0))));
+            putIReg32orZR(rT2, loadLE(Ity_I32, 
+                                      binop(Iop_Add64,mkexpr(tTA),mkU64(4))));
+         } else if (bL == 0 && bX == 1) {
+            // 64 bit store
+            storeLE(binop(Iop_Add64,mkexpr(tTA),mkU64(0)),
+                    getIReg64orZR(rT1));
+            storeLE(binop(Iop_Add64,mkexpr(tTA),mkU64(8)),
+                    getIReg64orZR(rT2));
+         } else {
+            vassert(bL == 0 && bX == 0);
+            // 32 bit store
+            storeLE(binop(Iop_Add64,mkexpr(tTA),mkU64(0)),
+                    getIReg32orZR(rT1));
+            storeLE(binop(Iop_Add64,mkexpr(tTA),mkU64(4)),
+                    getIReg32orZR(rT2));
+         }
+
+         if (bWBack && !earlyWBack)
+            putIReg64orSP(rN, mkexpr(tEA));
+
+         const HChar* fmt_str = NULL;
+         switch (INSN(24,23)) {
+            case BITS2(0,1):
+               fmt_str = "%sp %s, %s, [%s], #%lld (at-Rn-then-Rn=EA)\n";
+               break;
+            case BITS2(1,1):
+               fmt_str = "%sp %s, %s, [%s, #%lld]! (at-EA-then-Rn=EA)\n";
+               break;
+            case BITS2(1,0):
+               fmt_str = "%sp %s, %s, [%s, #%lld] (at-Rn)\n";
+               break;
+            default:
+               vassert(0);
+         }
+         DIP(fmt_str, bL == 0 ? "st" : "ld",
+                      nameIRegOrZR(bX == 1, rT1),
+                      nameIRegOrZR(bX == 1, rT2),
+                      nameIReg64orSP(rN), simm7);
+         return True;
+      }
+   }
+
+   /* ---------------- LDR (literal, int reg) ---------------- */
+   /* 31 29      23    4
+      00 011 000 imm19 Rt   LDR   Wt, [PC + sxTo64(imm19 << 2)]
+      01 011 000 imm19 Rt   LDR   Xt, [PC + sxTo64(imm19 << 2)]
+      10 011 000 imm19 Rt   LDRSW Xt, [PC + sxTo64(imm19 << 2)]
+      11 011 000 imm19 Rt   prefetch  [PC + sxTo64(imm19 << 2)]
+      Just handles the first two cases for now.
+   */
+   if (INSN(29,24) == BITS6(0,1,1,0,0,0) && INSN(31,31) == 0) {
+      UInt  imm19 = INSN(23,5);
+      UInt  rT    = INSN(4,0);
+      UInt  bX    = INSN(30,30);
+      ULong ea    = guest_PC_curr_instr + sx_to_64(imm19 << 2, 21);
+      if (bX) {
+         putIReg64orZR(rT, loadLE(Ity_I64, mkU64(ea)));
+      } else {
+         putIReg32orZR(rT, loadLE(Ity_I32, mkU64(ea)));
+      }
+      DIP("ldr %s, 0x%llx (literal)\n", nameIRegOrZR(bX == 1, rT), ea);
+      return True;
+   }
+
+   /* -------------- {LD,ST}R (integer register) --------------- */
+   /* 31 29        20 15     12 11 9  4
+      |  |         |  |      |  |  |  |
+      11 111000011 Rm option S  10 Rn Rt  LDR  Xt, [Xn|SP, R<m>{ext/sh}]
+      10 111000011 Rm option S  10 Rn Rt  LDR  Wt, [Xn|SP, R<m>{ext/sh}]
+      01 111000011 Rm option S  10 Rn Rt  LDRH Wt, [Xn|SP, R<m>{ext/sh}]
+      00 111000011 Rm option S  10 Rn Rt  LDRB Wt, [Xn|SP, R<m>{ext/sh}]
+
+      11 111000001 Rm option S  10 Rn Rt  STR  Xt, [Xn|SP, R<m>{ext/sh}]
+      10 111000001 Rm option S  10 Rn Rt  STR  Wt, [Xn|SP, R<m>{ext/sh}]
+      01 111000001 Rm option S  10 Rn Rt  STRH Wt, [Xn|SP, R<m>{ext/sh}]
+      00 111000001 Rm option S  10 Rn Rt  STRB Wt, [Xn|SP, R<m>{ext/sh}]
+   */
+   if (INSN(29,23) == BITS7(1,1,1,0,0,0,0)
+       && INSN(21,21) == 1 && INSN(11,10) == BITS2(1,0)) {
+      HChar  dis_buf[64];
+      UInt   szLg2 = INSN(31,30);
+      Bool   isLD  = INSN(22,22) == 1;
+      UInt   tt    = INSN(4,0);
+      IRTemp ea    = gen_indexed_EA(dis_buf, insn, True/*to/from int regs*/);
+      if (ea != IRTemp_INVALID) {
+         switch (szLg2) {
+            case 3: /* 64 bit */
+               if (isLD) {
+                  putIReg64orZR(tt, loadLE(Ity_I64, mkexpr(ea)));
+                  DIP("ldr %s, %s\n", nameIReg64orZR(tt), dis_buf);
+               } else {
+                  storeLE(mkexpr(ea), getIReg64orZR(tt));
+                  DIP("str %s, %s\n", nameIReg64orZR(tt), dis_buf);
+               }
+               break;
+            case 2: /* 32 bit */
+               if (isLD) {
+                  putIReg32orZR(tt, loadLE(Ity_I32, mkexpr(ea)));
+                  DIP("ldr %s, %s\n", nameIReg32orZR(tt), dis_buf);
+               } else {
+                  storeLE(mkexpr(ea), getIReg32orZR(tt));
+                  DIP("str %s, %s\n", nameIReg32orZR(tt), dis_buf);
+               }
+               break;
+            case 1: /* 16 bit */
+               if (isLD) {
+                  putIReg64orZR(tt, unop(Iop_16Uto64,
+                                         loadLE(Ity_I16, mkexpr(ea))));
+                  DIP("ldruh %s, %s\n", nameIReg32orZR(tt), dis_buf);
+               } else {
+                  storeLE(mkexpr(ea), unop(Iop_64to16, getIReg64orZR(tt)));
+                  DIP("strh %s, %s\n", nameIReg32orZR(tt), dis_buf);
+               }
+               break;
+            case 0: /* 8 bit */
+               if (isLD) {
+                  putIReg64orZR(tt, unop(Iop_8Uto64,
+                                         loadLE(Ity_I8, mkexpr(ea))));
+                  DIP("ldrub %s, %s\n", nameIReg32orZR(tt), dis_buf);
+               } else {
+                  storeLE(mkexpr(ea), unop(Iop_64to8, getIReg64orZR(tt)));
+                  DIP("strb %s, %s\n", nameIReg32orZR(tt), dis_buf);
+               }
+               break;
+            default:
+               vassert(0);
+         }
+         return True;
+      }
+   }
+
+   /* -------------- LDRS{B,H,W} (uimm12) -------------- */
+   /* 31 29  26  23 21    9 4
+      10 111 001 10 imm12 n t   LDRSW Xt, [Xn|SP, #pimm12 * 4]
+      01 111 001 1x imm12 n t   LDRSH Rt, [Xn|SP, #pimm12 * 2]
+      00 111 001 1x imm12 n t   LDRSB Rt, [Xn|SP, #pimm12 * 1]
+      where
+         Rt is Wt when x==1, Xt when x==0
+   */
+   if (INSN(29,23) == BITS7(1,1,1,0,0,1,1)) {
+      /* Further checks on bits 31:30 and 22 */
+      Bool valid = False;
+      switch ((INSN(31,30) << 1) | INSN(22,22)) {
+         case BITS3(1,0,0):
+         case BITS3(0,1,0): case BITS3(0,1,1):
+         case BITS3(0,0,0): case BITS3(0,0,1):
+            valid = True;
+            break;
+      }
+      if (valid) {
+         UInt    szLg2 = INSN(31,30);
+         UInt    bitX  = INSN(22,22);
+         UInt    imm12 = INSN(21,10);
+         UInt    nn    = INSN(9,5);
+         UInt    tt    = INSN(4,0);
+         UInt    szB   = 1 << szLg2;
+         IRExpr* ea    = binop(Iop_Add64,
+                               getIReg64orSP(nn), mkU64(imm12 * szB));
+         switch (szB) {
+            case 4:
+               vassert(bitX == 0);
+               putIReg64orZR(tt, unop(Iop_32Sto64, loadLE(Ity_I32, ea)));
+               DIP("ldrsw %s, [%s, #%u]\n", nameIReg64orZR(tt),
+                   nameIReg64orSP(nn), imm12 * szB);
+               break;
+            case 2:
+               if (bitX == 1) {
+                  putIReg32orZR(tt, unop(Iop_16Sto32, loadLE(Ity_I16, ea)));
+               } else {
+                  putIReg64orZR(tt, unop(Iop_16Sto64, loadLE(Ity_I16, ea)));
+               }
+               DIP("ldrsh %s, [%s, #%u]\n",
+                   nameIRegOrZR(bitX == 0, tt),
+                   nameIReg64orSP(nn), imm12 * szB);
+               break;
+            case 1:
+               if (bitX == 1) {
+                  putIReg32orZR(tt, unop(Iop_8Sto32, loadLE(Ity_I8, ea)));
+               } else {
+                  putIReg64orZR(tt, unop(Iop_8Sto64, loadLE(Ity_I8, ea)));
+               }
+               DIP("ldrsb %s, [%s, #%u]\n",
+                   nameIRegOrZR(bitX == 0, tt),
+                   nameIReg64orSP(nn), imm12 * szB);
+               break;
+            default:
+               vassert(0);
+         }
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* -------------- LDRS{B,H,W} (simm9, upd) -------------- */
+   /* (at-Rn-then-Rn=EA)
+      31 29      23 21 20   11 9 4
+      00 111 000 1x 0  imm9 01 n t  LDRSB Rt, [Xn|SP], #simm9
+      01 111 000 1x 0  imm9 01 n t  LDRSH Rt, [Xn|SP], #simm9
+      10 111 000 10 0  imm9 01 n t  LDRSW Xt, [Xn|SP], #simm9
+
+      (at-EA-then-Rn=EA)
+      00 111 000 1x 0  imm9 11 n t  LDRSB Rt, [Xn|SP, #simm9]!
+      01 111 000 1x 0  imm9 11 n t  LDRSH Rt, [Xn|SP, #simm9]!
+      10 111 000 10 0  imm9 11 n t  LDRSW Xt, [Xn|SP, #simm9]!      
+      where
+         Rt is Wt when x==1, Xt when x==0
+         transfer-at-Rn when [11]==0, at EA when [11]==1
+   */
+   if (INSN(29,23) == BITS7(1,1,1,0,0,0,1)
+       && INSN(21,21) == 0 && INSN(10,10) == 1) {
+      /* Further checks on bits 31:30 and 22 */
+      Bool valid = False;
+      switch ((INSN(31,30) << 1) | INSN(22,22)) {
+         case BITS3(1,0,0):                    // LDRSW Xt
+         case BITS3(0,1,0): case BITS3(0,1,1): // LDRSH Xt, Wt
+         case BITS3(0,0,0): case BITS3(0,0,1): // LDRSB Xt, Wt
+            valid = True;
+            break;
+      }
+      if (valid) {
+         UInt   szLg2 = INSN(31,30);
+         UInt   imm9  = INSN(20,12);
+         Bool   atRN  = INSN(11,11) == 0;
+         UInt   nn    = INSN(9,5);
+         UInt   tt    = INSN(4,0);
+         IRTemp tRN   = newTemp(Ity_I64);
+         IRTemp tEA   = newTemp(Ity_I64);
+         IRTemp tTA   = IRTemp_INVALID;
+         ULong  simm9 = sx_to_64(imm9, 9);
+         Bool   is64  = INSN(22,22) == 0;
+         assign(tRN, getIReg64orSP(nn));
+         assign(tEA, binop(Iop_Add64, mkexpr(tRN), mkU64(simm9)));
+         tTA = atRN ? tRN : tEA;
+         HChar ch = '?';
+         /* There are 5 cases: 
+               byte     load,           SX to 64
+               byte     load, SX to 32, ZX to 64
+               halfword load,           SX to 64
+               halfword load, SX to 32, ZX to 64
+               word     load,           SX to 64
+            The ifs below handle them in the listed order.
+         */
+         if (szLg2 == 0) {
+            ch = 'b';
+            if (is64) {
+               putIReg64orZR(tt, unop(Iop_8Sto64,
+                                      loadLE(Ity_I8, mkexpr(tTA))));
+            } else {
+               putIReg32orZR(tt, unop(Iop_8Sto32,
+                                      loadLE(Ity_I8, mkexpr(tTA))));
+            }
+         }
+         else if (szLg2 == 1) {
+            ch = 'h';
+            if (is64) {
+               putIReg64orZR(tt, unop(Iop_16Sto64,
+                                      loadLE(Ity_I16, mkexpr(tTA))));
+            } else {
+               putIReg32orZR(tt, unop(Iop_16Sto32,
+                                      loadLE(Ity_I16, mkexpr(tTA))));
+            }
+         }
+         else if (szLg2 == 2 && is64) {
+            ch = 'w';
+            putIReg64orZR(tt, unop(Iop_32Sto64,
+                                   loadLE(Ity_I32, mkexpr(tTA))));
+         }
+         else {
+            vassert(0);
+         }
+         putIReg64orSP(nn, mkexpr(tEA));
+         DIP(atRN ? "ldrs%c %s, [%s], #%lld\n" : "ldrs%c %s, [%s, #%lld]!",
+             ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), simm9);
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* -------------- LDRS{B,H,W} (simm9, noUpd) -------------- */
+   /* 31 29      23 21 20   11 9 4
+      00 111 000 1x 0  imm9 00 n t  LDURSB Rt, [Xn|SP, #simm9]
+      01 111 000 1x 0  imm9 00 n t  LDURSH Rt, [Xn|SP, #simm9]
+      10 111 000 10 0  imm9 00 n t  LDURSW Xt, [Xn|SP, #simm9]
+      where
+         Rt is Wt when x==1, Xt when x==0
+   */
+   if (INSN(29,23) == BITS7(1,1,1,0,0,0,1)
+       && INSN(21,21) == 0 && INSN(11,10) == BITS2(0,0)) {
+      /* Further checks on bits 31:30 and 22 */
+      Bool valid = False;
+      switch ((INSN(31,30) << 1) | INSN(22,22)) {
+         case BITS3(1,0,0):                    // LDURSW Xt
+         case BITS3(0,1,0): case BITS3(0,1,1): // LDURSH Xt, Wt
+         case BITS3(0,0,0): case BITS3(0,0,1): // LDURSB Xt, Wt
+            valid = True;
+            break;
+      }
+      if (valid) {
+         UInt   szLg2 = INSN(31,30);
+         UInt   imm9  = INSN(20,12);
+         UInt   nn    = INSN(9,5);
+         UInt   tt    = INSN(4,0);
+         IRTemp tRN   = newTemp(Ity_I64);
+         IRTemp tEA   = newTemp(Ity_I64);
+         ULong  simm9 = sx_to_64(imm9, 9);
+         Bool   is64  = INSN(22,22) == 0;
+         assign(tRN, getIReg64orSP(nn));
+         assign(tEA, binop(Iop_Add64, mkexpr(tRN), mkU64(simm9)));
+         HChar ch = '?';
+         /* There are 5 cases: 
+               byte     load,           SX to 64
+               byte     load, SX to 32, ZX to 64
+               halfword load,           SX to 64
+               halfword load, SX to 32, ZX to 64
+               word     load,           SX to 64
+            The ifs below handle them in the listed order.
+         */
+         if (szLg2 == 0) {
+            ch = 'b';
+            if (is64) {
+               putIReg64orZR(tt, unop(Iop_8Sto64,
+                                      loadLE(Ity_I8, mkexpr(tEA))));
+            } else {
+               putIReg32orZR(tt, unop(Iop_8Sto32,
+                                      loadLE(Ity_I8, mkexpr(tEA))));
+            }
+         }
+         else if (szLg2 == 1) {
+            ch = 'h';
+            if (is64) {
+               putIReg64orZR(tt, unop(Iop_16Sto64,
+                                      loadLE(Ity_I16, mkexpr(tEA))));
+            } else {
+               putIReg32orZR(tt, unop(Iop_16Sto32,
+                                      loadLE(Ity_I16, mkexpr(tEA))));
+            }
+         }
+         else if (szLg2 == 2 && is64) {
+            ch = 'w';
+            putIReg64orZR(tt, unop(Iop_32Sto64,
+                                   loadLE(Ity_I32, mkexpr(tEA))));
+         }
+         else {
+            vassert(0);
+         }
+         DIP("ldurs%c %s, [%s, #%lld]",
+             ch, nameIRegOrZR(is64, tt), nameIReg64orSP(nn), simm9);
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* -------- LDP,STP (immediate, simm7) (FP&VEC) -------- */
+   /* L==1    => mm==LD
+      L==0    => mm==ST
+      sz==00  => 32 bit (S) transfers
+      sz==01  => 64 bit (D) transfers
+      sz==10  => 128 bit (Q) transfers
+      sz==11  isn't allowed
+      simm7 is scaled by the (single-register) transfer size
+
+      31 29  26   22 21   14 9 4
+
+      sz 101 1000 L  imm7 t2 n t1   mmNP SDQt1, SDQt2, [Xn|SP, #imm]
+                                    (at-EA, with nontemporal hint)
+
+      sz 101 1001 L  imm7 t2 n t1   mmP SDQt1, SDQt2, [Xn|SP], #imm
+                                    (at-Rn-then-Rn=EA)
+
+      sz 101 1010 L  imm7 t2 n t1   mmP SDQt1, SDQt2, [Xn|SP, #imm]
+                                    (at-EA)
+
+      sz 101 1011 L  imm7 t2 n t1   mmP SDQt1, SDQt2, [Xn|SP, #imm]!
+                                    (at-EA-then-Rn=EA)
+   */
+   if (INSN(29,25) == BITS5(1,0,1,1,0)) {
+      UInt szSlg2 = INSN(31,30); // log2 of the xfer size in 32-bit units
+      Bool isLD   = INSN(22,22) == 1;
+      Bool wBack  = INSN(23,23) == 1;
+      Long simm7  = (Long)sx_to_64(INSN(21,15), 7);
+      UInt tt2    = INSN(14,10);
+      UInt nn     = INSN(9,5);
+      UInt tt1    = INSN(4,0);
+      if (szSlg2 == BITS2(1,1) || (isLD && tt1 == tt2)) {
+         /* undecodable; fall through */
+      } else {
+         if (nn == 31) { /* FIXME generate stack alignment check */ }
+
+         // Compute the transfer address TA and the writeback address WA.
+         UInt   szB = 4 << szSlg2; /* szB is the per-register size */
+         IRTemp tRN = newTemp(Ity_I64);
+         assign(tRN, getIReg64orSP(nn));
+         IRTemp tEA = newTemp(Ity_I64);
+         simm7 = szB * simm7;
+         assign(tEA, binop(Iop_Add64, mkexpr(tRN), mkU64(simm7)));
+
+         IRTemp tTA = newTemp(Ity_I64);
+         IRTemp tWA = newTemp(Ity_I64);
+         switch (INSN(24,23)) {
+            case BITS2(0,1):
+               assign(tTA, mkexpr(tRN)); assign(tWA, mkexpr(tEA)); break;
+            case BITS2(1,1):
+               assign(tTA, mkexpr(tEA)); assign(tWA, mkexpr(tEA)); break;
+            case BITS2(1,0):
+            case BITS2(0,0):
+               assign(tTA, mkexpr(tEA)); /* tWA is unused */ break;
+            default:
+               vassert(0); /* NOTREACHED */
+         }
+
+         IRType ty = Ity_INVALID;
+         switch (szB) {
+            case 4:  ty = Ity_F32;  break;
+            case 8:  ty = Ity_F64;  break;
+            case 16: ty = Ity_V128; break;
+            default: vassert(0);
+         }
+
+         /* Normally rN would be updated after the transfer.  However, in
+            the special cases typifed by
+               stp q0, q1, [sp,#-512]!
+               stp d0, d1, [sp,#-512]!
+               stp s0, s1, [sp,#-512]!
+            it is necessary to update SP before the transfer, (1)
+            because Memcheck will otherwise complain about a write
+            below the stack pointer, and (2) because the segfault
+            stack extension mechanism will otherwise extend the stack
+            only down to SP before the instruction, which might not be
+            far enough, if the -512 bit takes the actual access
+            address to the next page.
+         */
+         Bool earlyWBack
+           = wBack && simm7 < 0
+             && INSN(24,23) == BITS2(1,1) && nn == 31 && !isLD;
+
+         if (wBack && earlyWBack)
+            putIReg64orSP(nn, mkexpr(tEA));
+
+         if (isLD) {
+            if (szB < 16) {
+               putQReg128(tt1, mkV128(0x0000));
+            }
+            putQRegLO(tt1,
+                      loadLE(ty, binop(Iop_Add64, mkexpr(tTA), mkU64(0))));
+            if (szB < 16) {
+               putQReg128(tt2, mkV128(0x0000));
+            }
+            putQRegLO(tt2,
+                      loadLE(ty, binop(Iop_Add64, mkexpr(tTA), mkU64(szB))));
+         } else {
+            storeLE(binop(Iop_Add64, mkexpr(tTA), mkU64(0)),
+                    getQRegLO(tt1, ty));
+            storeLE(binop(Iop_Add64, mkexpr(tTA), mkU64(szB)),
+                    getQRegLO(tt2, ty));
+         }
+
+         if (wBack && !earlyWBack)
+            putIReg64orSP(nn, mkexpr(tEA));
+
+         const HChar* fmt_str = NULL;
+         switch (INSN(24,23)) {
+            case BITS2(0,1):
+               fmt_str = "%sp %s, %s, [%s], #%lld (at-Rn-then-Rn=EA)\n";
+               break;
+            case BITS2(1,1):
+               fmt_str = "%sp %s, %s, [%s, #%lld]! (at-EA-then-Rn=EA)\n";
+               break;
+            case BITS2(1,0):
+               fmt_str = "%sp %s, %s, [%s, #%lld] (at-Rn)\n";
+               break;
+            case BITS2(0,0):
+               fmt_str = "%snp %s, %s, [%s, #%lld] (at-Rn)\n";
+               break;
+            default:
+               vassert(0);
+         }
+         DIP(fmt_str, isLD ? "ld" : "st",
+                      nameQRegLO(tt1, ty), nameQRegLO(tt2, ty),
+                      nameIReg64orSP(nn), simm7);
+         return True;
+      }
+   }
+
+   /* -------------- {LD,ST}R (vector register) --------------- */
+   /* 31 29     23  20 15     12 11 9  4
+      |  |      |   |  |      |  |  |  |
+      00 111100 011 Rm option S  10 Rn Rt  LDR Bt, [Xn|SP, R<m>{ext/sh}]
+      01 111100 011 Rm option S  10 Rn Rt  LDR Ht, [Xn|SP, R<m>{ext/sh}]
+      10 111100 011 Rm option S  10 Rn Rt  LDR St, [Xn|SP, R<m>{ext/sh}]
+      11 111100 011 Rm option S  10 Rn Rt  LDR Dt, [Xn|SP, R<m>{ext/sh}]
+      00 111100 111 Rm option S  10 Rn Rt  LDR Qt, [Xn|SP, R<m>{ext/sh}]
+
+      00 111100 001 Rm option S  10 Rn Rt  STR Bt, [Xn|SP, R<m>{ext/sh}]
+      01 111100 001 Rm option S  10 Rn Rt  STR Ht, [Xn|SP, R<m>{ext/sh}]
+      10 111100 001 Rm option S  10 Rn Rt  STR St, [Xn|SP, R<m>{ext/sh}]
+      11 111100 001 Rm option S  10 Rn Rt  STR Dt, [Xn|SP, R<m>{ext/sh}]
+      00 111100 101 Rm option S  10 Rn Rt  STR Qt, [Xn|SP, R<m>{ext/sh}]
+   */
+   if (INSN(29,24) == BITS6(1,1,1,1,0,0)
+       && INSN(21,21) == 1 && INSN(11,10) == BITS2(1,0)) {
+      HChar  dis_buf[64];
+      UInt   szLg2 = (INSN(23,23) << 2) | INSN(31,30);
+      Bool   isLD  = INSN(22,22) == 1;
+      UInt   tt    = INSN(4,0);
+      if (szLg2 > 4) goto after_LDR_STR_vector_register;
+      IRTemp ea    = gen_indexed_EA(dis_buf, insn, False/*to/from vec regs*/);
+      if (ea == IRTemp_INVALID) goto after_LDR_STR_vector_register;
+      switch (szLg2) {
+         case 0: /* 8 bit */
+            if (isLD) {
+               putQReg128(tt, mkV128(0x0000));
+               putQRegLO(tt, loadLE(Ity_I8, mkexpr(ea)));
+               DIP("ldr %s, %s\n", nameQRegLO(tt, Ity_I8), dis_buf);
+            } else {
+               storeLE(mkexpr(ea), getQRegLO(tt, Ity_I8));
+               DIP("str %s, %s\n", nameQRegLO(tt, Ity_I8), dis_buf);
+            }
+            break;
+         case 1:
+            if (isLD) {
+               putQReg128(tt, mkV128(0x0000));
+               putQRegLO(tt, loadLE(Ity_I16, mkexpr(ea)));
+               DIP("ldr %s, %s\n", nameQRegLO(tt, Ity_I16), dis_buf);
+            } else {
+               storeLE(mkexpr(ea), getQRegLO(tt, Ity_I16));
+               DIP("str %s, %s\n", nameQRegLO(tt, Ity_I16), dis_buf);
+            }
+            break;
+         case 2: /* 32 bit */
+            if (isLD) {
+               putQReg128(tt, mkV128(0x0000));
+               putQRegLO(tt, loadLE(Ity_I32, mkexpr(ea)));
+               DIP("ldr %s, %s\n", nameQRegLO(tt, Ity_I32), dis_buf);
+            } else {
+               storeLE(mkexpr(ea), getQRegLO(tt, Ity_I32));
+               DIP("str %s, %s\n", nameQRegLO(tt, Ity_I32), dis_buf);
+            }
+            break;
+         case 3: /* 64 bit */
+            if (isLD) {
+               putQReg128(tt, mkV128(0x0000));
+               putQRegLO(tt, loadLE(Ity_I64, mkexpr(ea)));
+               DIP("ldr %s, %s\n", nameQRegLO(tt, Ity_I64), dis_buf);
+            } else {
+               storeLE(mkexpr(ea), getQRegLO(tt, Ity_I64));
+               DIP("str %s, %s\n", nameQRegLO(tt, Ity_I64), dis_buf);
+            }
+            break;
+         case 4:
+            if (isLD) {
+               putQReg128(tt, loadLE(Ity_V128, mkexpr(ea)));
+               DIP("ldr %s, %s\n", nameQReg128(tt), dis_buf);
+            } else {
+               storeLE(mkexpr(ea), getQReg128(tt));
+               DIP("str %s, %s\n", nameQReg128(tt), dis_buf);
+            }
+            break;
+         default:
+            vassert(0);
+      }
+      return True;
+   }
+  after_LDR_STR_vector_register:
+
+   /* ---------- LDRS{B,H,W} (integer register, SX) ---------- */
+   /* 31 29      22 20 15  12 11 9  4
+      |  |       |  |  |   |  |  |  |
+      10 1110001 01 Rm opt S 10 Rn Rt    LDRSW Xt, [Xn|SP, R<m>{ext/sh}]
+
+      01 1110001 01 Rm opt S 10 Rn Rt    LDRSH Xt, [Xn|SP, R<m>{ext/sh}]
+      01 1110001 11 Rm opt S 10 Rn Rt    LDRSH Wt, [Xn|SP, R<m>{ext/sh}]
+
+      00 1110001 01 Rm opt S 10 Rn Rt    LDRSB Xt, [Xn|SP, R<m>{ext/sh}]
+      00 1110001 11 Rm opt S 10 Rn Rt    LDRSB Wt, [Xn|SP, R<m>{ext/sh}]
+   */
+   if (INSN(29,23) == BITS7(1,1,1,0,0,0,1)
+       && INSN(21,21) == 1 && INSN(11,10) == BITS2(1,0)) {
+      HChar  dis_buf[64];
+      UInt   szLg2  = INSN(31,30);
+      Bool   sxTo64 = INSN(22,22) == 0; // else sx to 32 and zx to 64
+      UInt   tt     = INSN(4,0);
+      if (szLg2 == 3) goto after_LDRS_integer_register;
+      IRTemp ea     = gen_indexed_EA(dis_buf, insn, True/*to/from int regs*/);
+      if (ea == IRTemp_INVALID) goto after_LDRS_integer_register;
+      /* Enumerate the 5 variants explicitly. */
+      if (szLg2 == 2/*32 bit*/ && sxTo64) {
+         putIReg64orZR(tt, unop(Iop_32Sto64, loadLE(Ity_I32, mkexpr(ea))));
+         DIP("ldrsw %s, %s\n", nameIReg64orZR(tt), dis_buf);
+         return True;
+      }
+      else
+      if (szLg2 == 1/*16 bit*/) {
+         if (sxTo64) {
+            putIReg64orZR(tt, unop(Iop_16Sto64, loadLE(Ity_I16, mkexpr(ea))));
+            DIP("ldrsh %s, %s\n", nameIReg64orZR(tt), dis_buf);
+         } else {
+            putIReg32orZR(tt, unop(Iop_16Sto32, loadLE(Ity_I16, mkexpr(ea))));
+            DIP("ldrsh %s, %s\n", nameIReg32orZR(tt), dis_buf);
+         }
+         return True;
+      }
+      else
+      if (szLg2 == 0/*8 bit*/) {
+         if (sxTo64) {
+            putIReg64orZR(tt, unop(Iop_8Sto64, loadLE(Ity_I8, mkexpr(ea))));
+            DIP("ldrsb %s, %s\n", nameIReg64orZR(tt), dis_buf);
+         } else {
+            putIReg32orZR(tt, unop(Iop_8Sto32, loadLE(Ity_I8, mkexpr(ea))));
+            DIP("ldrsb %s, %s\n", nameIReg32orZR(tt), dis_buf);
+         }
+         return True;
+      }
+      /* else it's an invalid combination */
+   }
+  after_LDRS_integer_register:
+
+   /* -------- LDR/STR (immediate, SIMD&FP, unsigned offset) -------- */
+   /* This is the Unsigned offset variant only.  The Post-Index and
+      Pre-Index variants are below.
+
+      31 29      23 21    9 4
+      00 111 101 01 imm12 n t   LDR Bt, [Xn|SP + imm12 * 1]
+      01 111 101 01 imm12 n t   LDR Ht, [Xn|SP + imm12 * 2]
+      10 111 101 01 imm12 n t   LDR St, [Xn|SP + imm12 * 4]
+      11 111 101 01 imm12 n t   LDR Dt, [Xn|SP + imm12 * 8]
+      00 111 101 11 imm12 n t   LDR Qt, [Xn|SP + imm12 * 16]
+
+      00 111 101 00 imm12 n t   STR Bt, [Xn|SP + imm12 * 1]
+      01 111 101 00 imm12 n t   STR Ht, [Xn|SP + imm12 * 2]
+      10 111 101 00 imm12 n t   STR St, [Xn|SP + imm12 * 4]
+      11 111 101 00 imm12 n t   STR Dt, [Xn|SP + imm12 * 8]
+      00 111 101 10 imm12 n t   STR Qt, [Xn|SP + imm12 * 16]
+   */
+   if (INSN(29,24) == BITS6(1,1,1,1,0,1)
+       && ((INSN(23,23) << 2) | INSN(31,30)) <= 4) {
+      UInt   szLg2  = (INSN(23,23) << 2) | INSN(31,30);
+      Bool   isLD   = INSN(22,22) == 1;
+      UInt   pimm12 = INSN(21,10) << szLg2;
+      UInt   nn     = INSN(9,5);
+      UInt   tt     = INSN(4,0);
+      IRTemp tEA    = newTemp(Ity_I64);
+      IRType ty     = preferredVectorSubTypeFromSize(1 << szLg2);
+      assign(tEA, binop(Iop_Add64, getIReg64orSP(nn), mkU64(pimm12)));
+      if (isLD) {
+         if (szLg2 < 4) {
+            putQReg128(tt, mkV128(0x0000));
+         }
+         putQRegLO(tt, loadLE(ty, mkexpr(tEA)));
+      } else {
+         storeLE(mkexpr(tEA), getQRegLO(tt, ty));
+      }
+      DIP("%s %s, [%s, #%u]\n",
+          isLD ? "ldr" : "str",
+          nameQRegLO(tt, ty), nameIReg64orSP(nn), pimm12);
+      return True;
+   }
+
+   /* -------- LDR/STR (immediate, SIMD&FP, pre/post index) -------- */
+   /* These are the Post-Index and Pre-Index variants.
+
+      31 29      23   20   11 9 4
+      (at-Rn-then-Rn=EA)
+      00 111 100 01 0 imm9 01 n t   LDR Bt, [Xn|SP], #simm
+      01 111 100 01 0 imm9 01 n t   LDR Ht, [Xn|SP], #simm
+      10 111 100 01 0 imm9 01 n t   LDR St, [Xn|SP], #simm
+      11 111 100 01 0 imm9 01 n t   LDR Dt, [Xn|SP], #simm
+      00 111 100 11 0 imm9 01 n t   LDR Qt, [Xn|SP], #simm
+
+      (at-EA-then-Rn=EA)
+      00 111 100 01 0 imm9 11 n t   LDR Bt, [Xn|SP, #simm]!
+      01 111 100 01 0 imm9 11 n t   LDR Ht, [Xn|SP, #simm]!
+      10 111 100 01 0 imm9 11 n t   LDR St, [Xn|SP, #simm]!
+      11 111 100 01 0 imm9 11 n t   LDR Dt, [Xn|SP, #simm]!
+      00 111 100 11 0 imm9 11 n t   LDR Qt, [Xn|SP, #simm]!
+
+      Stores are the same except with bit 22 set to 0.
+   */
+   if (INSN(29,24) == BITS6(1,1,1,1,0,0)
+       && ((INSN(23,23) << 2) | INSN(31,30)) <= 4
+       && INSN(21,21) == 0 && INSN(10,10) == 1) {
+      UInt   szLg2  = (INSN(23,23) << 2) | INSN(31,30);
+      Bool   isLD   = INSN(22,22) == 1;
+      UInt   imm9   = INSN(20,12);
+      Bool   atRN   = INSN(11,11) == 0;
+      UInt   nn     = INSN(9,5);
+      UInt   tt     = INSN(4,0);
+      IRTemp tRN    = newTemp(Ity_I64);
+      IRTemp tEA    = newTemp(Ity_I64);
+      IRTemp tTA    = IRTemp_INVALID;
+      IRType ty     = preferredVectorSubTypeFromSize(1 << szLg2);
+      ULong  simm9  = sx_to_64(imm9, 9);
+      assign(tRN, getIReg64orSP(nn));
+      assign(tEA, binop(Iop_Add64, mkexpr(tRN), mkU64(simm9)));
+      tTA = atRN ? tRN : tEA;
+      if (isLD) {
+         if (szLg2 < 4) {
+            putQReg128(tt, mkV128(0x0000));
+         }
+         putQRegLO(tt, loadLE(ty, mkexpr(tTA)));
+      } else {
+         storeLE(mkexpr(tTA), getQRegLO(tt, ty));
+      }
+      putIReg64orSP(nn, mkexpr(tEA));
+      DIP(atRN ? "%s %s, [%s], #%lld\n" : "%s %s, [%s, #%lld]!\n",
+          isLD ? "ldr" : "str",
+          nameQRegLO(tt, ty), nameIReg64orSP(nn), simm9);
+      return True;
+   }
+
+   /* -------- LDUR/STUR (unscaled offset, SIMD&FP) -------- */
+   /* 31 29      23   20   11 9 4
+      00 111 100 01 0 imm9 00 n t   LDR Bt, [Xn|SP, #simm]
+      01 111 100 01 0 imm9 00 n t   LDR Ht, [Xn|SP, #simm]
+      10 111 100 01 0 imm9 00 n t   LDR St, [Xn|SP, #simm]
+      11 111 100 01 0 imm9 00 n t   LDR Dt, [Xn|SP, #simm]
+      00 111 100 11 0 imm9 00 n t   LDR Qt, [Xn|SP, #simm]
+
+      00 111 100 00 0 imm9 00 n t   STR Bt, [Xn|SP, #simm]
+      01 111 100 00 0 imm9 00 n t   STR Ht, [Xn|SP, #simm]
+      10 111 100 00 0 imm9 00 n t   STR St, [Xn|SP, #simm]
+      11 111 100 00 0 imm9 00 n t   STR Dt, [Xn|SP, #simm]
+      00 111 100 10 0 imm9 00 n t   STR Qt, [Xn|SP, #simm]
+   */
+   if (INSN(29,24) == BITS6(1,1,1,1,0,0)
+       && ((INSN(23,23) << 2) | INSN(31,30)) <= 4
+       && INSN(21,21) == 0 && INSN(11,10) == BITS2(0,0)) {
+      UInt   szLg2  = (INSN(23,23) << 2) | INSN(31,30);
+      Bool   isLD   = INSN(22,22) == 1;
+      UInt   imm9   = INSN(20,12);
+      UInt   nn     = INSN(9,5);
+      UInt   tt     = INSN(4,0);
+      ULong  simm9  = sx_to_64(imm9, 9);
+      IRTemp tEA    = newTemp(Ity_I64);
+      IRType ty     = preferredVectorSubTypeFromSize(1 << szLg2);
+      assign(tEA, binop(Iop_Add64, getIReg64orSP(nn), mkU64(simm9)));
+      if (isLD) {
+         if (szLg2 < 4) {
+            putQReg128(tt, mkV128(0x0000));
+         }
+         putQRegLO(tt, loadLE(ty, mkexpr(tEA)));
+      } else {
+         storeLE(mkexpr(tEA), getQRegLO(tt, ty));
+      }
+      DIP("%s %s, [%s, #%lld]\n",
+          isLD ? "ldur" : "stur",
+          nameQRegLO(tt, ty), nameIReg64orSP(nn), (Long)simm9);
+      return True;
+   }
+
+   /* ---------------- LDR (literal, SIMD&FP) ---------------- */
+   /* 31 29      23    4
+      00 011 100 imm19 t    LDR St, [PC + sxTo64(imm19 << 2)]
+      01 011 100 imm19 t    LDR Dt, [PC + sxTo64(imm19 << 2)]
+      10 011 100 imm19 t    LDR Qt, [PC + sxTo64(imm19 << 2)]
+   */
+   if (INSN(29,24) == BITS6(0,1,1,1,0,0) && INSN(31,30) < BITS2(1,1)) {
+      UInt   szB   = 4 << INSN(31,30);
+      UInt   imm19 = INSN(23,5);
+      UInt   tt    = INSN(4,0);
+      ULong  ea    = guest_PC_curr_instr + sx_to_64(imm19 << 2, 21);
+      IRType ty    = preferredVectorSubTypeFromSize(szB);
+      putQReg128(tt, mkV128(0x0000));
+      putQRegLO(tt, loadLE(ty, mkU64(ea)));
+      DIP("ldr %s, 0x%llx (literal)\n", nameQRegLO(tt, ty), ea);
+      return True;
+   }
+
+   /* ------ LD1/ST1 (multiple 1-elem structs to/from 1 reg  ------ */
+   /* ------ LD2/ST2 (multiple 2-elem structs to/from 2 regs ------ */
+   /* ------ LD3/ST3 (multiple 3-elem structs to/from 3 regs ------ */
+   /* ------ LD4/ST4 (multiple 4-elem structs to/from 4 regs ------ */
+   /* 31 29  26   22 21 20    15   11 9 4    
+
+      0q 001 1000 L  0  00000 0000 sz n t  xx4 {Vt..t+3.T}, [Xn|SP]
+      0q 001 1001 L  0  m     0000 sz n t  xx4 {Vt..t+3.T}, [Xn|SP], step
+
+      0q 001 1000 L  0  00000 0100 sz n t  xx3 {Vt..t+2.T}, [Xn|SP]
+      0q 001 1001 L  0  m     0100 sz n t  xx3 {Vt..t+2.T}, [Xn|SP], step
+
+      0q 001 1000 L  0  00000 1000 sz n t  xx2 {Vt..t+1.T}, [Xn|SP]
+      0q 001 1001 L  0  m     1000 sz n t  xx2 {Vt..t+1.T}, [Xn|SP], step
+
+      0q 001 1000 L  0  00000 0111 sz n t  xx1 {Vt.T},      [Xn|SP]
+      0q 001 1001 L  0  m     0111 sz n t  xx1 {Vt.T},      [Xn|SP], step
+
+      T    = defined by Q and sz in the normal way
+      step = if m == 11111 then transfer-size else Xm
+      xx   = case L of 1 -> LD ; 0 -> ST
+   */
+   if (INSN(31,31) == 0 && INSN(29,24) == BITS6(0,0,1,1,0,0)
+       && INSN(21,21) == 0) {
+      Bool bitQ  = INSN(30,30);
+      Bool isPX  = INSN(23,23) == 1;
+      Bool isLD  = INSN(22,22) == 1;
+      UInt mm    = INSN(20,16);
+      UInt opc   = INSN(15,12);
+      UInt sz    = INSN(11,10);
+      UInt nn    = INSN(9,5);
+      UInt tt    = INSN(4,0);
+      Bool isQ   = bitQ == 1;
+      Bool is1d  = sz == BITS2(1,1) && !isQ;
+      UInt nRegs = 0;
+      switch (opc) {
+         case BITS4(0,0,0,0): nRegs = 4; break;
+         case BITS4(0,1,0,0): nRegs = 3; break;
+         case BITS4(1,0,0,0): nRegs = 2; break;
+         case BITS4(0,1,1,1): nRegs = 1; break;
+         default: break;
+      }
+
+      /* The combination insn[23] == 0 && insn[20:16] != 0 is not allowed.
+         If we see it, set nRegs to 0 so as to cause the next conditional
+         to fail. */
+      if (!isPX && mm != 0)
+         nRegs = 0;
+      
+      if (nRegs == 1                             /* .1d is allowed */
+          || (nRegs >= 2 && nRegs <= 4 && !is1d) /* .1d is not allowed */) {
+
+         UInt xferSzB = (isQ ? 16 : 8) * nRegs;
+
+         /* Generate the transfer address (TA) and if necessary the
+            writeback address (WB) */
+         IRTemp tTA = newTemp(Ity_I64);
+         assign(tTA, getIReg64orSP(nn));
+         if (nn == 31) { /* FIXME generate stack alignment check */ }
+         IRTemp tWB = IRTemp_INVALID;
+         if (isPX) {
+            tWB = newTemp(Ity_I64);
+            assign(tWB, binop(Iop_Add64,
+                              mkexpr(tTA), 
+                              mm == BITS5(1,1,1,1,1) ? mkU64(xferSzB)
+                                                     : getIReg64orZR(mm)));
+         }
+
+         /* -- BEGIN generate the transfers -- */
+
+         IRTemp u0, u1, u2, u3, i0, i1, i2, i3;
+         u0 = u1 = u2 = u3 = i0 = i1 = i2 = i3 = IRTemp_INVALID;
+         switch (nRegs) {
+            case 4: u3 = newTempV128(); i3 = newTempV128(); /* fallthru */
+            case 3: u2 = newTempV128(); i2 = newTempV128(); /* fallthru */
+            case 2: u1 = newTempV128(); i1 = newTempV128(); /* fallthru */
+            case 1: u0 = newTempV128(); i0 = newTempV128(); break;
+            default: vassert(0);
+         }
+
+         /* -- Multiple 128 or 64 bit stores -- */
+         if (!isLD) {
+            switch (nRegs) {
+               case 4: assign(u3, getQReg128((tt+3) % 32)); /* fallthru */
+               case 3: assign(u2, getQReg128((tt+2) % 32)); /* fallthru */
+               case 2: assign(u1, getQReg128((tt+1) % 32)); /* fallthru */
+               case 1: assign(u0, getQReg128((tt+0) % 32)); break;
+               default: vassert(0);
+            }
+            switch (nRegs) {
+               case 4:  (isQ ? math_INTERLEAVE4_128 : math_INTERLEAVE4_64)
+                           (&i0, &i1, &i2, &i3, sz, u0, u1, u2, u3);
+                        break;
+               case 3:  (isQ ? math_INTERLEAVE3_128 : math_INTERLEAVE3_64)
+                           (&i0, &i1, &i2, sz, u0, u1, u2);
+                        break;
+               case 2:  (isQ ? math_INTERLEAVE2_128 : math_INTERLEAVE2_64)
+                           (&i0, &i1, sz, u0, u1);
+                        break;
+               case 1:  (isQ ? math_INTERLEAVE1_128 : math_INTERLEAVE1_64)
+                           (&i0, sz, u0);
+                        break;
+               default: vassert(0);
+            }
+#           define MAYBE_NARROW_TO_64(_expr) \
+                      (isQ ? (_expr) : unop(Iop_V128to64,(_expr)))
+            UInt step = isQ ? 16 : 8;
+            switch (nRegs) {
+               case 4:  storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(3*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(i3)) );
+                        /* fallthru */
+               case 3:  storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(2*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(i2)) );
+                        /* fallthru */
+               case 2:  storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(1*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(i1)) );
+                        /* fallthru */
+               case 1:  storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(0*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(i0)) );
+                        break;
+               default: vassert(0);
+            }
+#           undef MAYBE_NARROW_TO_64
+         }
+
+         /* -- Multiple 128 or 64 bit loads -- */
+         else /* isLD */ {
+            UInt   step   = isQ ? 16 : 8;
+            IRType loadTy = isQ ? Ity_V128 : Ity_I64;
+#           define MAYBE_WIDEN_FROM_64(_expr) \
+                      (isQ ? (_expr) : unop(Iop_64UtoV128,(_expr)))
+            switch (nRegs) {
+               case 4:
+                  assign(i3, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(3 * step)))));
+                  /* fallthru */
+               case 3:
+                  assign(i2, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(2 * step)))));
+                  /* fallthru */
+               case 2:
+                  assign(i1, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(1 * step)))));
+                  /* fallthru */
+               case 1:
+                  assign(i0, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(0 * step)))));
+                  break;
+               default:
+                  vassert(0);
+            }
+#           undef MAYBE_WIDEN_FROM_64
+            switch (nRegs) {
+               case 4:  (isQ ? math_DEINTERLEAVE4_128 : math_DEINTERLEAVE4_64)
+                           (&u0, &u1, &u2, &u3, sz, i0,i1,i2,i3);
+                        break;
+               case 3:  (isQ ? math_DEINTERLEAVE3_128 : math_DEINTERLEAVE3_64)
+                           (&u0, &u1, &u2, sz, i0, i1, i2);
+                        break;
+               case 2:  (isQ ? math_DEINTERLEAVE2_128 : math_DEINTERLEAVE2_64)
+                           (&u0, &u1, sz, i0, i1);
+                        break;
+               case 1:  (isQ ? math_DEINTERLEAVE1_128 : math_DEINTERLEAVE1_64)
+                           (&u0, sz, i0);
+                        break;
+               default: vassert(0);
+            }
+            switch (nRegs) {
+               case 4:  putQReg128( (tt+3) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u3));
+                        /* fallthru */
+               case 3:  putQReg128( (tt+2) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u2));
+                        /* fallthru */
+               case 2:  putQReg128( (tt+1) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u1));
+                        /* fallthru */
+               case 1:  putQReg128( (tt+0) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u0));
+                        break;
+               default: vassert(0);
+            }
+         }
+
+         /* -- END generate the transfers -- */
+
+         /* Do the writeback, if necessary */
+         if (isPX) {
+            putIReg64orSP(nn, mkexpr(tWB));
+         }            
+
+         HChar pxStr[20];
+         pxStr[0] = pxStr[sizeof(pxStr)-1] = 0;
+         if (isPX) {
+            if (mm == BITS5(1,1,1,1,1))
+               vex_sprintf(pxStr, ", #%u", xferSzB);
+            else
+               vex_sprintf(pxStr, ", %s", nameIReg64orZR(mm));
+         }
+         const HChar* arr = nameArr_Q_SZ(bitQ, sz);
+         DIP("%s%u {v%u.%s .. v%u.%s}, [%s]%s\n",
+             isLD ? "ld" : "st", nRegs,
+             (tt+0) % 32, arr, (tt+nRegs-1) % 32, arr, nameIReg64orSP(nn),
+             pxStr);
+
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* ------ LD1/ST1 (multiple 1-elem structs to/from 2 regs  ------ */
+   /* ------ LD1/ST1 (multiple 1-elem structs to/from 3 regs  ------ */
+   /* ------ LD1/ST1 (multiple 1-elem structs to/from 4 regs  ------ */
+   /* 31 29  26   22 21 20    15   11 9 4    
+
+      0q 001 1000 L  0  00000 0010 sz n t  xx1 {Vt..t+3.T}, [Xn|SP]
+      0q 001 1001 L  0  m     0010 sz n t  xx1 {Vt..t+3.T}, [Xn|SP], step
+
+      0q 001 1000 L  0  00000 0110 sz n t  xx1 {Vt..t+2.T}, [Xn|SP]
+      0q 001 1001 L  0  m     0110 sz n t  xx1 {Vt..t+2.T}, [Xn|SP], step
+
+      0q 001 1000 L  0  00000 1010 sz n t  xx1 {Vt..t+1.T}, [Xn|SP]
+      0q 001 1001 L  0  m     1010 sz n t  xx1 {Vt..t+1.T}, [Xn|SP], step
+
+      T    = defined by Q and sz in the normal way
+      step = if m == 11111 then transfer-size else Xm
+      xx   = case L of 1 -> LD ; 0 -> ST
+   */
+   if (INSN(31,31) == 0 && INSN(29,24) == BITS6(0,0,1,1,0,0)
+       && INSN(21,21) == 0) {
+      Bool bitQ  = INSN(30,30);
+      Bool isPX  = INSN(23,23) == 1;
+      Bool isLD  = INSN(22,22) == 1;
+      UInt mm    = INSN(20,16);
+      UInt opc   = INSN(15,12);
+      UInt sz    = INSN(11,10);
+      UInt nn    = INSN(9,5);
+      UInt tt    = INSN(4,0);
+      Bool isQ   = bitQ == 1;
+      UInt nRegs = 0;
+      switch (opc) {
+         case BITS4(0,0,1,0): nRegs = 4; break;
+         case BITS4(0,1,1,0): nRegs = 3; break;
+         case BITS4(1,0,1,0): nRegs = 2; break;
+         default: break;
+      }
+      
+      /* The combination insn[23] == 0 && insn[20:16] != 0 is not allowed.
+         If we see it, set nRegs to 0 so as to cause the next conditional
+         to fail. */
+      if (!isPX && mm != 0)
+         nRegs = 0;
+      
+      if (nRegs >= 2 && nRegs <= 4) {
+
+         UInt xferSzB = (isQ ? 16 : 8) * nRegs;
+
+         /* Generate the transfer address (TA) and if necessary the
+            writeback address (WB) */
+         IRTemp tTA = newTemp(Ity_I64);
+         assign(tTA, getIReg64orSP(nn));
+         if (nn == 31) { /* FIXME generate stack alignment check */ }
+         IRTemp tWB = IRTemp_INVALID;
+         if (isPX) {
+            tWB = newTemp(Ity_I64);
+            assign(tWB, binop(Iop_Add64,
+                              mkexpr(tTA), 
+                              mm == BITS5(1,1,1,1,1) ? mkU64(xferSzB)
+                                                     : getIReg64orZR(mm)));
+         }
+
+         /* -- BEGIN generate the transfers -- */
+
+         IRTemp u0, u1, u2, u3;
+         u0 = u1 = u2 = u3 = IRTemp_INVALID;
+         switch (nRegs) {
+            case 4: u3 = newTempV128(); /* fallthru */
+            case 3: u2 = newTempV128(); /* fallthru */
+            case 2: u1 = newTempV128();
+                    u0 = newTempV128(); break;
+            default: vassert(0);
+         }
+
+         /* -- Multiple 128 or 64 bit stores -- */
+         if (!isLD) {
+            switch (nRegs) {
+               case 4: assign(u3, getQReg128((tt+3) % 32)); /* fallthru */
+               case 3: assign(u2, getQReg128((tt+2) % 32)); /* fallthru */
+               case 2: assign(u1, getQReg128((tt+1) % 32));
+                       assign(u0, getQReg128((tt+0) % 32)); break;
+               default: vassert(0);
+            }
+#           define MAYBE_NARROW_TO_64(_expr) \
+                      (isQ ? (_expr) : unop(Iop_V128to64,(_expr)))
+            UInt step = isQ ? 16 : 8;
+            switch (nRegs) {
+               case 4:  storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(3*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(u3)) );
+                        /* fallthru */
+               case 3:  storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(2*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(u2)) );
+                        /* fallthru */
+               case 2:  storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(1*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(u1)) );
+                        storeLE( binop(Iop_Add64, mkexpr(tTA), mkU64(0*step)),
+                                 MAYBE_NARROW_TO_64(mkexpr(u0)) );
+                        break;
+               default: vassert(0);
+            }
+#           undef MAYBE_NARROW_TO_64
+         }
+
+         /* -- Multiple 128 or 64 bit loads -- */
+         else /* isLD */ {
+            UInt   step   = isQ ? 16 : 8;
+            IRType loadTy = isQ ? Ity_V128 : Ity_I64;
+#           define MAYBE_WIDEN_FROM_64(_expr) \
+                      (isQ ? (_expr) : unop(Iop_64UtoV128,(_expr)))
+            switch (nRegs) {
+               case 4:
+                  assign(u3, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(3 * step)))));
+                  /* fallthru */
+               case 3:
+                  assign(u2, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(2 * step)))));
+                  /* fallthru */
+               case 2:
+                  assign(u1, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(1 * step)))));
+                  assign(u0, MAYBE_WIDEN_FROM_64(
+                                loadLE(loadTy,
+                                       binop(Iop_Add64, mkexpr(tTA),
+                                                        mkU64(0 * step)))));
+                  break;
+               default:
+                  vassert(0);
+            }
+#           undef MAYBE_WIDEN_FROM_64
+            switch (nRegs) {
+               case 4:  putQReg128( (tt+3) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u3));
+                        /* fallthru */
+               case 3:  putQReg128( (tt+2) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u2));
+                        /* fallthru */
+               case 2:  putQReg128( (tt+1) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u1));
+                        putQReg128( (tt+0) % 32,
+                                    math_MAYBE_ZERO_HI64(bitQ, u0));
+                        break;
+               default: vassert(0);
+            }
+         }
+
+         /* -- END generate the transfers -- */
+
+         /* Do the writeback, if necessary */
+         if (isPX) {
+            putIReg64orSP(nn, mkexpr(tWB));
+         }            
+
+         HChar pxStr[20];
+         pxStr[0] = pxStr[sizeof(pxStr)-1] = 0;
+         if (isPX) {
+            if (mm == BITS5(1,1,1,1,1))
+               vex_sprintf(pxStr, ", #%u", xferSzB);
+            else
+               vex_sprintf(pxStr, ", %s", nameIReg64orZR(mm));
+         }
+         const HChar* arr = nameArr_Q_SZ(bitQ, sz);
+         DIP("%s1 {v%u.%s .. v%u.%s}, [%s]%s\n",
+             isLD ? "ld" : "st",
+             (tt+0) % 32, arr, (tt+nRegs-1) % 32, arr, nameIReg64orSP(nn),
+             pxStr);
+
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* ---------- LD1R (single structure, replicate) ---------- */
+   /* ---------- LD2R (single structure, replicate) ---------- */
+   /* ---------- LD3R (single structure, replicate) ---------- */
+   /* ---------- LD4R (single structure, replicate) ---------- */
+   /* 31 29       22 20    15    11 9 4    
+      0q 001 1010 10 00000 110 0 sz n t  LD1R {Vt.T}, [Xn|SP]
+      0q 001 1011 10 m     110 0 sz n t  LD1R {Vt.T}, [Xn|SP], step
+
+      0q 001 1010 11 00000 110 0 sz n t  LD2R {Vt..t+1.T}, [Xn|SP]
+      0q 001 1011 11 m     110 0 sz n t  LD2R {Vt..t+1.T}, [Xn|SP], step
+
+      0q 001 1010 10 00000 111 0 sz n t  LD3R {Vt..t+2.T}, [Xn|SP]
+      0q 001 1011 10 m     111 0 sz n t  LD3R {Vt..t+2.T}, [Xn|SP], step
+
+      0q 001 1010 11 00000 111 0 sz n t  LD4R {Vt..t+3.T}, [Xn|SP]
+      0q 001 1011 11 m     111 0 sz n t  LD4R {Vt..t+3.T}, [Xn|SP], step
+
+      step = if m == 11111 then transfer-size else Xm
+   */
+   if (INSN(31,31) == 0 && INSN(29,24) == BITS6(0,0,1,1,0,1)
+       && INSN(22,22) == 1 && INSN(15,14) == BITS2(1,1)
+       && INSN(12,12) == 0) {
+      UInt   bitQ  = INSN(30,30);
+      Bool   isPX  = INSN(23,23) == 1;
+      UInt   nRegs = ((INSN(13,13) << 1) | INSN(21,21)) + 1;
+      UInt   mm    = INSN(20,16);
+      UInt   sz    = INSN(11,10);
+      UInt   nn    = INSN(9,5);
+      UInt   tt    = INSN(4,0);
+
+      /* The combination insn[23] == 0 && insn[20:16] != 0 is not allowed. */
+      if (isPX || mm == 0) {
+
+         IRType ty    = integerIRTypeOfSize(1 << sz);
+
+         UInt laneSzB = 1 << sz;
+         UInt xferSzB = laneSzB * nRegs;
+
+         /* Generate the transfer address (TA) and if necessary the
+            writeback address (WB) */
+         IRTemp tTA = newTemp(Ity_I64);
+         assign(tTA, getIReg64orSP(nn));
+         if (nn == 31) { /* FIXME generate stack alignment check */ }
+         IRTemp tWB = IRTemp_INVALID;
+         if (isPX) {
+            tWB = newTemp(Ity_I64);
+            assign(tWB, binop(Iop_Add64,
+                              mkexpr(tTA), 
+                              mm == BITS5(1,1,1,1,1) ? mkU64(xferSzB)
+                                                     : getIReg64orZR(mm)));
+         }
+
+         /* Do the writeback, if necessary */
+         if (isPX) {
+            putIReg64orSP(nn, mkexpr(tWB));
+         }            
+
+         IRTemp e0, e1, e2, e3, v0, v1, v2, v3;
+         e0 = e1 = e2 = e3 = v0 = v1 = v2 = v3 = IRTemp_INVALID;
+         switch (nRegs) {
+            case 4:
+               e3 = newTemp(ty);
+               assign(e3, loadLE(ty, binop(Iop_Add64, mkexpr(tTA),
+                                                      mkU64(3 * laneSzB))));
+               v3 = math_DUP_TO_V128(e3, ty);
+               putQReg128((tt+3) % 32, math_MAYBE_ZERO_HI64(bitQ, v3));
+               /* fallthrough */
+            case 3:
+               e2 = newTemp(ty);
+               assign(e2, loadLE(ty, binop(Iop_Add64, mkexpr(tTA),
+                                                      mkU64(2 * laneSzB))));
+               v2 = math_DUP_TO_V128(e2, ty);
+               putQReg128((tt+2) % 32, math_MAYBE_ZERO_HI64(bitQ, v2));
+               /* fallthrough */
+            case 2:
+               e1 = newTemp(ty);
+               assign(e1, loadLE(ty, binop(Iop_Add64, mkexpr(tTA),
+                                                      mkU64(1 * laneSzB))));
+               v1 = math_DUP_TO_V128(e1, ty);
+               putQReg128((tt+1) % 32, math_MAYBE_ZERO_HI64(bitQ, v1));
+               /* fallthrough */
+            case 1:
+               e0 = newTemp(ty);
+               assign(e0, loadLE(ty, binop(Iop_Add64, mkexpr(tTA),
+                                                      mkU64(0 * laneSzB))));
+               v0 = math_DUP_TO_V128(e0, ty);
+               putQReg128((tt+0) % 32, math_MAYBE_ZERO_HI64(bitQ, v0));
+               break;
+            default:
+               vassert(0);
+         }
+
+         HChar pxStr[20];
+         pxStr[0] = pxStr[sizeof(pxStr)-1] = 0;
+         if (isPX) {
+            if (mm == BITS5(1,1,1,1,1))
+               vex_sprintf(pxStr, ", #%u", xferSzB);
+            else
+               vex_sprintf(pxStr, ", %s", nameIReg64orZR(mm));
+         }
+         const HChar* arr = nameArr_Q_SZ(bitQ, sz);
+         DIP("ld%ur {v%u.%s .. v%u.%s}, [%s]%s\n",
+             nRegs,
+             (tt+0) % 32, arr, (tt+nRegs-1) % 32, arr, nameIReg64orSP(nn),
+             pxStr);
+
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* ------ LD1/ST1 (single structure, to/from one lane) ------ */
+   /* ------ LD2/ST2 (single structure, to/from one lane) ------ */
+   /* ------ LD3/ST3 (single structure, to/from one lane) ------ */
+   /* ------ LD4/ST4 (single structure, to/from one lane) ------ */
+   /* 31 29       22 21 20    15    11 9 4    
+      0q 001 1010 L  0  00000 xx0 S sz n t  op1 {Vt.T}[ix], [Xn|SP]
+      0q 001 1011 L  0  m     xx0 S sz n t  op1 {Vt.T}[ix], [Xn|SP], step
+
+      0q 001 1010 L  1  00000 xx0 S sz n t  op2 {Vt..t+1.T}[ix], [Xn|SP]
+      0q 001 1011 L  1  m     xx0 S sz n t  op2 {Vt..t+1.T}[ix], [Xn|SP], step
+
+      0q 001 1010 L  0  00000 xx1 S sz n t  op3 {Vt..t+2.T}[ix], [Xn|SP]
+      0q 001 1011 L  0  m     xx1 S sz n t  op3 {Vt..t+2.T}[ix], [Xn|SP], step
+
+      0q 001 1010 L  1  00000 xx1 S sz n t  op4 {Vt..t+3.T}[ix], [Xn|SP]
+      0q 001 1011 L  1  m     xx1 S sz n t  op4 {Vt..t+3.T}[ix], [Xn|SP], step
+
+      step = if m == 11111 then transfer-size else Xm
+      op   = case L of 1 -> LD ; 0 -> ST
+
+      laneszB,ix = case xx:q:S:sz of 00:b:b:bb -> 1, bbbb
+                                     01:b:b:b0 -> 2, bbb
+                                     10:b:b:00 -> 4, bb
+                                     10:b:0:01 -> 8, b
+   */
+   if (INSN(31,31) == 0 && INSN(29,24) == BITS6(0,0,1,1,0,1)) {
+      UInt   bitQ  = INSN(30,30);
+      Bool   isPX  = INSN(23,23) == 1;
+      Bool   isLD  = INSN(22,22) == 1;
+      UInt   nRegs = ((INSN(13,13) << 1) | INSN(21,21)) + 1;
+      UInt   mm    = INSN(20,16);
+      UInt   xx    = INSN(15,14);
+      UInt   bitS  = INSN(12,12);
+      UInt   sz    = INSN(11,10);
+      UInt   nn    = INSN(9,5);
+      UInt   tt    = INSN(4,0);
+
+      Bool valid = True;
+
+      /* The combination insn[23] == 0 && insn[20:16] != 0 is not allowed. */
+      if (!isPX && mm != 0)
+         valid = False;
+
+      UInt laneSzB = 0;  /* invalid */
+      UInt ix      = 16; /* invalid */
+
+      UInt xx_q_S_sz = (xx << 4) | (bitQ << 3) | (bitS << 2) | sz;
+      switch (xx_q_S_sz) {
+         case 0x00: case 0x01: case 0x02: case 0x03:
+         case 0x04: case 0x05: case 0x06: case 0x07:
+         case 0x08: case 0x09: case 0x0A: case 0x0B:
+         case 0x0C: case 0x0D: case 0x0E: case 0x0F:
+            laneSzB = 1; ix = xx_q_S_sz & 0xF;
+            break;
+         case 0x10: case 0x12: case 0x14: case 0x16:
+         case 0x18: case 0x1A: case 0x1C: case 0x1E:
+            laneSzB = 2; ix = (xx_q_S_sz >> 1) & 7;
+            break;
+         case 0x20: case 0x24: case 0x28: case 0x2C:
+            laneSzB = 4; ix = (xx_q_S_sz >> 2) & 3;
+            break;
+         case 0x21: case 0x29:
+            laneSzB = 8; ix = (xx_q_S_sz >> 3) & 1;
+            break;
+         default:
+            break;
+      }
+
+      if (valid && laneSzB != 0) {
+
+         IRType ty      = integerIRTypeOfSize(laneSzB);
+         UInt   xferSzB = laneSzB * nRegs;
+
+         /* Generate the transfer address (TA) and if necessary the
+            writeback address (WB) */
+         IRTemp tTA = newTemp(Ity_I64);
+         assign(tTA, getIReg64orSP(nn));
+         if (nn == 31) { /* FIXME generate stack alignment check */ }
+         IRTemp tWB = IRTemp_INVALID;
+         if (isPX) {
+            tWB = newTemp(Ity_I64);
+            assign(tWB, binop(Iop_Add64,
+                              mkexpr(tTA), 
+                              mm == BITS5(1,1,1,1,1) ? mkU64(xferSzB)
+                                                     : getIReg64orZR(mm)));
+         }
+
+         /* Do the writeback, if necessary */
+         if (isPX) {
+            putIReg64orSP(nn, mkexpr(tWB));
+         }            
+
+         switch (nRegs) {
+            case 4: {
+               IRExpr* addr
+                  = binop(Iop_Add64, mkexpr(tTA), mkU64(3 * laneSzB));
+               if (isLD) {
+                  putQRegLane((tt+3) % 32, ix, loadLE(ty, addr));
+               } else {
+                  storeLE(addr, getQRegLane((tt+3) % 32, ix, ty));
+               }
+               /* fallthrough */
+            }
+            case 3: {
+               IRExpr* addr
+                  = binop(Iop_Add64, mkexpr(tTA), mkU64(2 * laneSzB));
+               if (isLD) {
+                  putQRegLane((tt+2) % 32, ix, loadLE(ty, addr));
+               } else {
+                  storeLE(addr, getQRegLane((tt+2) % 32, ix, ty));
+               }
+               /* fallthrough */
+            }
+            case 2: {
+               IRExpr* addr
+                  = binop(Iop_Add64, mkexpr(tTA), mkU64(1 * laneSzB));
+               if (isLD) {
+                  putQRegLane((tt+1) % 32, ix, loadLE(ty, addr));
+               } else {
+                  storeLE(addr, getQRegLane((tt+1) % 32, ix, ty));
+               }
+               /* fallthrough */
+            }
+            case 1: {
+               IRExpr* addr
+                  = binop(Iop_Add64, mkexpr(tTA), mkU64(0 * laneSzB));
+               if (isLD) {
+                  putQRegLane((tt+0) % 32, ix, loadLE(ty, addr));
+               } else {
+                  storeLE(addr, getQRegLane((tt+0) % 32, ix, ty));
+               }
+               break;
+            }
+            default:
+               vassert(0);
+         }
+
+         HChar pxStr[20];
+         pxStr[0] = pxStr[sizeof(pxStr)-1] = 0;
+         if (isPX) {
+            if (mm == BITS5(1,1,1,1,1))
+               vex_sprintf(pxStr, ", #%u", xferSzB);
+            else
+               vex_sprintf(pxStr, ", %s", nameIReg64orZR(mm));
+         }
+         const HChar* arr = nameArr_Q_SZ(bitQ, sz);
+         DIP("%s%u {v%u.%s .. v%u.%s}[%u], [%s]%s\n",
+             isLD ? "ld" : "st", nRegs,
+             (tt+0) % 32, arr, (tt+nRegs-1) % 32, arr, 
+             ix, nameIReg64orSP(nn), pxStr);
+
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* ------------------ LD{,A}X{R,RH,RB} ------------------ */
+   /* ------------------ ST{,L}X{R,RH,RB} ------------------ */
+   /* 31 29     23  20      14    9 4
+      sz 001000 010 11111 0 11111 n t   LDX{R,RH,RB}  Rt, [Xn|SP]
+      sz 001000 010 11111 1 11111 n t   LDAX{R,RH,RB} Rt, [Xn|SP]
+      sz 001000 000 s     0 11111 n t   STX{R,RH,RB}  Ws, Rt, [Xn|SP]
+      sz 001000 000 s     1 11111 n t   STLX{R,RH,RB} Ws, Rt, [Xn|SP]
+   */
+   if (INSN(29,23) == BITS7(0,0,1,0,0,0,0)
+       && (INSN(23,21) & BITS3(1,0,1)) == BITS3(0,0,0)
+       && INSN(14,10) == BITS5(1,1,1,1,1)) {
+      UInt szBlg2     = INSN(31,30);
+      Bool isLD       = INSN(22,22) == 1;
+      Bool isAcqOrRel = INSN(15,15) == 1;
+      UInt ss         = INSN(20,16);
+      UInt nn         = INSN(9,5);
+      UInt tt         = INSN(4,0);
+
+      vassert(szBlg2 < 4);
+      UInt   szB = 1 << szBlg2; /* 1, 2, 4 or 8 */
+      IRType ty  = integerIRTypeOfSize(szB);
+      const HChar* suffix[4] = { "rb", "rh", "r", "r" };
+
+      IRTemp ea = newTemp(Ity_I64);
+      assign(ea, getIReg64orSP(nn));
+      /* FIXME generate check that ea is szB-aligned */
+
+      if (isLD && ss == BITS5(1,1,1,1,1)) {
+         IRTemp res = newTemp(ty);
+         stmt(IRStmt_LLSC(Iend_LE, res, mkexpr(ea), NULL/*LL*/));
+         putIReg64orZR(tt, widenUto64(ty, mkexpr(res)));
+         if (isAcqOrRel) {
+            stmt(IRStmt_MBE(Imbe_Fence));
+         }
+         DIP("ld%sx%s %s, [%s]\n", isAcqOrRel ? "a" : "", suffix[szBlg2],
+             nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn));
+         return True;
+      }
+      if (!isLD) {
+         if (isAcqOrRel) {
+            stmt(IRStmt_MBE(Imbe_Fence));
+         }
+         IRTemp  res  = newTemp(Ity_I1);
+         IRExpr* data = narrowFrom64(ty, getIReg64orZR(tt));
+         stmt(IRStmt_LLSC(Iend_LE, res, mkexpr(ea), data));
+         /* IR semantics: res is 1 if store succeeds, 0 if it fails.
+            Need to set rS to 1 on failure, 0 on success. */
+         putIReg64orZR(ss, binop(Iop_Xor64, unop(Iop_1Uto64, mkexpr(res)),
+                                            mkU64(1)));
+         DIP("st%sx%s %s, %s, [%s]\n", isAcqOrRel ? "a" : "", suffix[szBlg2],
+             nameIRegOrZR(False, ss),
+             nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn));
+         return True;
+      }
+      /* else fall through */
+   }
+
+   /* ------------------ LDA{R,RH,RB} ------------------ */
+   /* ------------------ STL{R,RH,RB} ------------------ */
+   /* 31 29     23  20      14    9 4
+      sz 001000 110 11111 1 11111 n t   LDAR<sz> Rt, [Xn|SP]
+      sz 001000 100 11111 1 11111 n t   STLR<sz> Rt, [Xn|SP]
+   */
+   if (INSN(29,23) == BITS7(0,0,1,0,0,0,1)
+       && INSN(21,10) == BITS12(0,1,1,1,1,1,1,1,1,1,1,1)) {
+      UInt szBlg2 = INSN(31,30);
+      Bool isLD   = INSN(22,22) == 1;
+      UInt nn     = INSN(9,5);
+      UInt tt     = INSN(4,0);
+
+      vassert(szBlg2 < 4);
+      UInt   szB = 1 << szBlg2; /* 1, 2, 4 or 8 */
+      IRType ty  = integerIRTypeOfSize(szB);
+      const HChar* suffix[4] = { "rb", "rh", "r", "r" };
+
+      IRTemp ea = newTemp(Ity_I64);
+      assign(ea, getIReg64orSP(nn));
+      /* FIXME generate check that ea is szB-aligned */
+
+      if (isLD) {
+         IRTemp res = newTemp(ty);
+         assign(res, loadLE(ty, mkexpr(ea)));
+         putIReg64orZR(tt, widenUto64(ty, mkexpr(res)));
+         stmt(IRStmt_MBE(Imbe_Fence));
+         DIP("lda%s %s, [%s]\n", suffix[szBlg2],
+             nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn));
+      } else {
+         stmt(IRStmt_MBE(Imbe_Fence));
+         IRExpr* data = narrowFrom64(ty, getIReg64orZR(tt));
+         storeLE(mkexpr(ea), data);
+         DIP("stl%s %s, [%s]\n", suffix[szBlg2],
+             nameIRegOrZR(szB == 8, tt), nameIReg64orSP(nn));
+      }
+      return True;
+   }
+
+   /* ------------------ PRFM (immediate) ------------------ */
+   /* 31           21    9 4
+      11 111 00110 imm12 n t   PRFM pfrop=Rt, [Xn|SP, #pimm]
+   */
+   if (INSN(31,22) == BITS10(1,1,1,1,1,0,0,1,1,0)) {
+      UInt imm12 = INSN(21,10);
+      UInt nn    = INSN(9,5);
+      UInt tt    = INSN(4,0);
+      /* Generating any IR here is pointless, except for documentation
+         purposes, as it will get optimised away later. */
+      IRTemp ea = newTemp(Ity_I64);
+      assign(ea, binop(Iop_Add64, getIReg64orSP(nn), mkU64(imm12 * 8)));
+      DIP("prfm prfop=%u, [%s, #%u]\n", tt, nameIReg64orSP(nn), imm12 * 8);
+      return True;
+   }
+
+   vex_printf("ARM64 front end: load_store\n");
+   return False;
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Control flow and misc instructions                   ---*/
+/*------------------------------------------------------------*/
+
+static
+Bool dis_ARM64_branch_etc(/*MB_OUT*/DisResult* dres, UInt insn,
+                          const VexArchInfo* archinfo)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+
+   /* ---------------------- B cond ----------------------- */
+   /* 31        24    4 3
+      0101010 0 imm19 0 cond */
+   if (INSN(31,24) == BITS8(0,1,0,1,0,1,0,0) && INSN(4,4) == 0) {
+      UInt  cond   = INSN(3,0);
+      ULong uimm64 = INSN(23,5) << 2;
+      Long  simm64 = (Long)sx_to_64(uimm64, 21);
+      vassert(dres->whatNext    == Dis_Continue);
+      vassert(dres->len         == 4);
+      vassert(dres->continueAt  == 0);
+      vassert(dres->jk_StopHere == Ijk_INVALID);
+      stmt( IRStmt_Exit(unop(Iop_64to1, mk_arm64g_calculate_condition(cond)),
+                        Ijk_Boring,
+                        IRConst_U64(guest_PC_curr_instr + simm64),
+                        OFFB_PC) );
+      putPC(mkU64(guest_PC_curr_instr + 4));
+      dres->whatNext    = Dis_StopHere;
+      dres->jk_StopHere = Ijk_Boring;
+      DIP("b.%s 0x%llx\n", nameCC(cond), guest_PC_curr_instr + simm64);
+      return True;
+   }
+
+   /* -------------------- B{L} uncond -------------------- */
+   if (INSN(30,26) == BITS5(0,0,1,0,1)) {
+      /* 000101 imm26  B  (PC + sxTo64(imm26 << 2))
+         100101 imm26  B  (PC + sxTo64(imm26 << 2))
+      */
+      UInt  bLink  = INSN(31,31);
+      ULong uimm64 = INSN(25,0) << 2;
+      Long  simm64 = (Long)sx_to_64(uimm64, 28);
+      if (bLink) {
+         putIReg64orSP(30, mkU64(guest_PC_curr_instr + 4));
+      }
+      putPC(mkU64(guest_PC_curr_instr + simm64));
+      dres->whatNext = Dis_StopHere;
+      dres->jk_StopHere = Ijk_Call;
+      DIP("b%s 0x%llx\n", bLink == 1 ? "l" : "",
+                          guest_PC_curr_instr + simm64);
+      return True;
+   }
+
+   /* --------------------- B{L} reg --------------------- */
+   /* 31      24 22 20    15     9  4
+      1101011 00 10 11111 000000 nn 00000  RET  Rn
+      1101011 00 01 11111 000000 nn 00000  CALL Rn
+      1101011 00 00 11111 000000 nn 00000  JMP  Rn
+   */
+   if (INSN(31,23) == BITS9(1,1,0,1,0,1,1,0,0)
+       && INSN(20,16) == BITS5(1,1,1,1,1)
+       && INSN(15,10) == BITS6(0,0,0,0,0,0)
+       && INSN(4,0) == BITS5(0,0,0,0,0)) {
+      UInt branch_type = INSN(22,21);
+      UInt nn          = INSN(9,5);
+      if (branch_type == BITS2(1,0) /* RET */) {
+         putPC(getIReg64orZR(nn));
+         dres->whatNext = Dis_StopHere;
+         dres->jk_StopHere = Ijk_Ret;
+         DIP("ret %s\n", nameIReg64orZR(nn));
+         return True;
+      }
+      if (branch_type == BITS2(0,1) /* CALL */) {
+         IRTemp dst = newTemp(Ity_I64);
+         assign(dst, getIReg64orZR(nn));
+         putIReg64orSP(30, mkU64(guest_PC_curr_instr + 4));
+         putPC(mkexpr(dst));
+         dres->whatNext = Dis_StopHere;
+         dres->jk_StopHere = Ijk_Call;
+         DIP("blr %s\n", nameIReg64orZR(nn));
+         return True;
+      }
+      if (branch_type == BITS2(0,0) /* JMP */) {
+         putPC(getIReg64orZR(nn));
+         dres->whatNext = Dis_StopHere;
+         dres->jk_StopHere = Ijk_Boring;
+         DIP("jmp %s\n", nameIReg64orZR(nn));
+         return True;
+      }
+   }
+
+   /* -------------------- CB{N}Z -------------------- */
+   /* sf 011 010 1 imm19 Rt   CBNZ Xt|Wt, (PC + sxTo64(imm19 << 2))
+      sf 011 010 0 imm19 Rt   CBZ  Xt|Wt, (PC + sxTo64(imm19 << 2))
+   */
+   if (INSN(30,25) == BITS6(0,1,1,0,1,0)) {
+      Bool    is64   = INSN(31,31) == 1;
+      Bool    bIfZ   = INSN(24,24) == 0;
+      ULong   uimm64 = INSN(23,5) << 2;
+      UInt    rT     = INSN(4,0);
+      Long    simm64 = (Long)sx_to_64(uimm64, 21);
+      IRExpr* cond   = NULL;
+      if (is64) {
+         cond = binop(bIfZ ? Iop_CmpEQ64 : Iop_CmpNE64,
+                      getIReg64orZR(rT), mkU64(0));
+      } else {
+         cond = binop(bIfZ ? Iop_CmpEQ32 : Iop_CmpNE32,
+                      getIReg32orZR(rT), mkU32(0));
+      }
+      stmt( IRStmt_Exit(cond,
+                        Ijk_Boring,
+                        IRConst_U64(guest_PC_curr_instr + simm64),
+                        OFFB_PC) );
+      putPC(mkU64(guest_PC_curr_instr + 4));
+      dres->whatNext    = Dis_StopHere;
+      dres->jk_StopHere = Ijk_Boring;
+      DIP("cb%sz %s, 0x%llx\n",
+          bIfZ ? "" : "n", nameIRegOrZR(is64, rT),
+          guest_PC_curr_instr + simm64);
+      return True;
+   }
+
+   /* -------------------- TB{N}Z -------------------- */
+   /* 31 30      24 23  18  5 4
+      b5 011 011 1  b40 imm14 t  TBNZ Xt, #(b5:b40), (PC + sxTo64(imm14 << 2))
+      b5 011 011 0  b40 imm14 t  TBZ  Xt, #(b5:b40), (PC + sxTo64(imm14 << 2))
+   */
+   if (INSN(30,25) == BITS6(0,1,1,0,1,1)) {
+      UInt    b5     = INSN(31,31);
+      Bool    bIfZ   = INSN(24,24) == 0;
+      UInt    b40    = INSN(23,19);
+      UInt    imm14  = INSN(18,5);
+      UInt    tt     = INSN(4,0);
+      UInt    bitNo  = (b5 << 5) | b40;
+      ULong   uimm64 = imm14 << 2;
+      Long    simm64 = sx_to_64(uimm64, 16);
+      IRExpr* cond 
+         = binop(bIfZ ? Iop_CmpEQ64 : Iop_CmpNE64,
+                 binop(Iop_And64,
+                       binop(Iop_Shr64, getIReg64orZR(tt), mkU8(bitNo)),
+                       mkU64(1)),
+                 mkU64(0));
+      stmt( IRStmt_Exit(cond,
+                        Ijk_Boring,
+                        IRConst_U64(guest_PC_curr_instr + simm64),
+                        OFFB_PC) );
+      putPC(mkU64(guest_PC_curr_instr + 4));
+      dres->whatNext    = Dis_StopHere;
+      dres->jk_StopHere = Ijk_Boring;
+      DIP("tb%sz %s, #%u, 0x%llx\n",
+          bIfZ ? "" : "n", nameIReg64orZR(tt), bitNo,
+          guest_PC_curr_instr + simm64);
+      return True;
+   }
+
+   /* -------------------- SVC -------------------- */
+   /* 11010100 000 imm16 000 01
+      Don't bother with anything except the imm16==0 case.
+   */
+   if (INSN(31,0) == 0xD4000001) {
+      putPC(mkU64(guest_PC_curr_instr + 4));
+      dres->whatNext    = Dis_StopHere;
+      dres->jk_StopHere = Ijk_Sys_syscall;
+      DIP("svc #0\n");
+      return True;
+   }
+
+   /* ------------------ M{SR,RS} ------------------ */
+   /* ---- Cases for TPIDR_EL0 ----
+      0xD51BD0 010 Rt   MSR tpidr_el0, rT
+      0xD53BD0 010 Rt   MRS rT, tpidr_el0
+   */
+   if (   (INSN(31,0) & 0xFFFFFFE0) == 0xD51BD040 /*MSR*/
+       || (INSN(31,0) & 0xFFFFFFE0) == 0xD53BD040 /*MRS*/) {
+      Bool toSys = INSN(21,21) == 0;
+      UInt tt    = INSN(4,0);
+      if (toSys) {
+         stmt( IRStmt_Put( OFFB_TPIDR_EL0, getIReg64orZR(tt)) );
+         DIP("msr tpidr_el0, %s\n", nameIReg64orZR(tt));
+      } else {
+         putIReg64orZR(tt, IRExpr_Get( OFFB_TPIDR_EL0, Ity_I64 ));
+         DIP("mrs %s, tpidr_el0\n", nameIReg64orZR(tt));
+      }
+      return True;
+   }
+   /* ---- Cases for FPCR ----
+      0xD51B44 000 Rt  MSR fpcr, rT
+      0xD53B44 000 Rt  MSR rT, fpcr
+   */
+   if (   (INSN(31,0) & 0xFFFFFFE0) == 0xD51B4400 /*MSR*/
+       || (INSN(31,0) & 0xFFFFFFE0) == 0xD53B4400 /*MRS*/) {
+      Bool toSys = INSN(21,21) == 0;
+      UInt tt    = INSN(4,0);
+      if (toSys) {
+         stmt( IRStmt_Put( OFFB_FPCR, getIReg32orZR(tt)) );
+         DIP("msr fpcr, %s\n", nameIReg64orZR(tt));
+      } else {
+         putIReg32orZR(tt, IRExpr_Get(OFFB_FPCR, Ity_I32));
+         DIP("mrs %s, fpcr\n", nameIReg64orZR(tt));
+      }
+      return True;
+   }
+   /* ---- Cases for FPSR ----
+      0xD51B44 001 Rt  MSR fpsr, rT
+      0xD53B44 001 Rt  MSR rT, fpsr
+      The only part of this we model is FPSR.QC.  All other bits
+      are ignored when writing to it and RAZ when reading from it.
+   */
+   if (   (INSN(31,0) & 0xFFFFFFE0) == 0xD51B4420 /*MSR*/
+       || (INSN(31,0) & 0xFFFFFFE0) == 0xD53B4420 /*MRS*/) {
+      Bool toSys = INSN(21,21) == 0;
+      UInt tt    = INSN(4,0);
+      if (toSys) {
+         /* Just deal with FPSR.QC.  Make up a V128 value which is
+            zero if Xt[27] is zero and any other value if Xt[27] is
+            nonzero. */
+         IRTemp qc64 = newTemp(Ity_I64);
+         assign(qc64, binop(Iop_And64,
+                            binop(Iop_Shr64, getIReg64orZR(tt), mkU8(27)),
+                            mkU64(1)));
+         IRExpr* qcV128 = binop(Iop_64HLtoV128, mkexpr(qc64), mkexpr(qc64));
+         stmt( IRStmt_Put( OFFB_QCFLAG, qcV128 ) );
+         DIP("msr fpsr, %s\n", nameIReg64orZR(tt));
+      } else {
+         /* Generate a value which is all zeroes except for bit 27,
+            which must be zero if QCFLAG is all zeroes and one otherwise. */
+         IRTemp qcV128 = newTempV128();
+         assign(qcV128, IRExpr_Get( OFFB_QCFLAG, Ity_V128 ));
+         IRTemp qc64 = newTemp(Ity_I64);
+         assign(qc64, binop(Iop_Or64, unop(Iop_V128HIto64, mkexpr(qcV128)),
+                                      unop(Iop_V128to64,   mkexpr(qcV128))));
+         IRExpr* res = binop(Iop_Shl64, 
+                             unop(Iop_1Uto64,
+                                  binop(Iop_CmpNE64, mkexpr(qc64), mkU64(0))),
+                             mkU8(27));
+         putIReg64orZR(tt, res);
+         DIP("mrs %s, fpsr\n", nameIReg64orZR(tt));
+      }
+      return True;
+   }
+   /* ---- Cases for NZCV ----
+      D51B42 000 Rt  MSR nzcv, rT
+      D53B42 000 Rt  MRS rT, nzcv
+      The only parts of NZCV that actually exist are bits 31:28, which 
+      are the N Z C and V bits themselves.  Hence the flags thunk provides
+      all the state we need.
+   */
+   if (   (INSN(31,0) & 0xFFFFFFE0) == 0xD51B4200 /*MSR*/
+       || (INSN(31,0) & 0xFFFFFFE0) == 0xD53B4200 /*MRS*/) {
+      Bool  toSys = INSN(21,21) == 0;
+      UInt  tt    = INSN(4,0);
+      if (toSys) {
+         IRTemp t = newTemp(Ity_I64);
+         assign(t, binop(Iop_And64, getIReg64orZR(tt), mkU64(0xF0000000ULL)));
+         setFlags_COPY(t);
+         DIP("msr %s, nzcv\n", nameIReg32orZR(tt));
+      } else {
+         IRTemp res = newTemp(Ity_I64);
+         assign(res, mk_arm64g_calculate_flags_nzcv());
+         putIReg32orZR(tt, unop(Iop_64to32, mkexpr(res)));
+         DIP("mrs %s, nzcv\n", nameIReg64orZR(tt));
+      }
+      return True;
+   }
+   /* ---- Cases for DCZID_EL0 ----
+      Don't support arbitrary reads and writes to this register.  Just
+      return the value 16, which indicates that the DC ZVA instruction
+      is not permitted, so we don't have to emulate it.
+      D5 3B 00 111 Rt  MRS rT, dczid_el0
+   */
+   if ((INSN(31,0) & 0xFFFFFFE0) == 0xD53B00E0) {
+      UInt tt = INSN(4,0);
+      putIReg64orZR(tt, mkU64(1<<4));
+      DIP("mrs %s, dczid_el0 (FAKED)\n", nameIReg64orZR(tt));
+      return True;
+   }
+   /* ---- Cases for CTR_EL0 ----
+      We just handle reads, and make up a value from the D and I line
+      sizes in the VexArchInfo we are given, and patch in the following
+      fields that the Foundation model gives ("natively"):
+      CWG = 0b0100, ERG = 0b0100, L1Ip = 0b11
+      D5 3B 00 001 Rt  MRS rT, dczid_el0
+   */
+   if ((INSN(31,0) & 0xFFFFFFE0) == 0xD53B0020) {
+      UInt tt = INSN(4,0);
+      /* Need to generate a value from dMinLine_lg2_szB and
+         dMinLine_lg2_szB.  The value in the register is in 32-bit
+         units, so need to subtract 2 from the values in the
+         VexArchInfo.  We can assume that the values here are valid --
+         disInstr_ARM64 checks them -- so there's no need to deal with
+         out-of-range cases. */
+      vassert(archinfo->arm64_dMinLine_lg2_szB >= 2
+              && archinfo->arm64_dMinLine_lg2_szB <= 17
+              && archinfo->arm64_iMinLine_lg2_szB >= 2
+              && archinfo->arm64_iMinLine_lg2_szB <= 17);
+      UInt val
+         = 0x8440c000 | ((0xF & (archinfo->arm64_dMinLine_lg2_szB - 2)) << 16)
+                      | ((0xF & (archinfo->arm64_iMinLine_lg2_szB - 2)) << 0);
+      putIReg64orZR(tt, mkU64(val));
+      DIP("mrs %s, ctr_el0\n", nameIReg64orZR(tt));
+      return True;
+   }
+   /* ---- Cases for CNTVCT_EL0 ----
+      This is a timestamp counter of some sort.  Support reads of it only
+      by passing through to the host.
+      D5 3B E0 010 Rt  MRS Xt, cntvct_el0
+   */
+   if ((INSN(31,0) & 0xFFFFFFE0) == 0xD53BE040) {
+      UInt     tt   = INSN(4,0);
+      IRTemp   val  = newTemp(Ity_I64);
+      IRExpr** args = mkIRExprVec_0();
+      IRDirty* d    = unsafeIRDirty_1_N ( 
+                         val, 
+                         0/*regparms*/, 
+                         "arm64g_dirtyhelper_MRS_CNTVCT_EL0",
+                         &arm64g_dirtyhelper_MRS_CNTVCT_EL0,
+                         args 
+                      );
+      /* execute the dirty call, dumping the result in val. */
+      stmt( IRStmt_Dirty(d) );
+      putIReg64orZR(tt, mkexpr(val));
+      DIP("mrs %s, cntvct_el0\n", nameIReg64orZR(tt));
+      return True;
+   }   
+
+   /* ------------------ IC_IVAU ------------------ */
+   /* D5 0B 75 001 Rt  ic ivau, rT
+   */
+   if ((INSN(31,0) & 0xFFFFFFE0) == 0xD50B7520) {
+      /* We will always be provided with a valid iMinLine value. */
+      vassert(archinfo->arm64_iMinLine_lg2_szB >= 2
+              && archinfo->arm64_iMinLine_lg2_szB <= 17);
+      /* Round the requested address, in rT, down to the start of the
+         containing block. */
+      UInt   tt      = INSN(4,0);
+      ULong  lineszB = 1ULL << archinfo->arm64_iMinLine_lg2_szB;
+      IRTemp addr    = newTemp(Ity_I64);
+      assign( addr, binop( Iop_And64,
+                           getIReg64orZR(tt),
+                           mkU64(~(lineszB - 1))) );
+      /* Set the invalidation range, request exit-and-invalidate, with
+         continuation at the next instruction. */
+      stmt(IRStmt_Put(OFFB_CMSTART, mkexpr(addr)));
+      stmt(IRStmt_Put(OFFB_CMLEN,   mkU64(lineszB)));
+      /* be paranoid ... */
+      stmt( IRStmt_MBE(Imbe_Fence) );
+      putPC(mkU64( guest_PC_curr_instr + 4 ));
+      dres->whatNext    = Dis_StopHere;
+      dres->jk_StopHere = Ijk_InvalICache;
+      DIP("ic ivau, %s\n", nameIReg64orZR(tt));
+      return True;
+   }
+
+   /* ------------------ DC_CVAU ------------------ */
+   /* D5 0B 7B 001 Rt  dc cvau, rT
+   */
+   if ((INSN(31,0) & 0xFFFFFFE0) == 0xD50B7B20) {
+      /* Exactly the same scheme as for IC IVAU, except we observe the
+         dMinLine size, and request an Ijk_FlushDCache instead of
+         Ijk_InvalICache. */
+      /* We will always be provided with a valid dMinLine value. */
+      vassert(archinfo->arm64_dMinLine_lg2_szB >= 2
+              && archinfo->arm64_dMinLine_lg2_szB <= 17);
+      /* Round the requested address, in rT, down to the start of the
+         containing block. */
+      UInt   tt      = INSN(4,0);
+      ULong  lineszB = 1ULL << archinfo->arm64_dMinLine_lg2_szB;
+      IRTemp addr    = newTemp(Ity_I64);
+      assign( addr, binop( Iop_And64,
+                           getIReg64orZR(tt),
+                           mkU64(~(lineszB - 1))) );
+      /* Set the flush range, request exit-and-flush, with
+         continuation at the next instruction. */
+      stmt(IRStmt_Put(OFFB_CMSTART, mkexpr(addr)));
+      stmt(IRStmt_Put(OFFB_CMLEN,   mkU64(lineszB)));
+      /* be paranoid ... */
+      stmt( IRStmt_MBE(Imbe_Fence) );
+      putPC(mkU64( guest_PC_curr_instr + 4 ));
+      dres->whatNext    = Dis_StopHere;
+      dres->jk_StopHere = Ijk_FlushDCache;
+      DIP("dc cvau, %s\n", nameIReg64orZR(tt));
+      return True;
+   }
+
+   /* ------------------ ISB, DMB, DSB ------------------ */
+   /* 31          21            11  7 6  4
+      11010 10100 0 00 011 0011 CRm 1 01 11111  DMB opt
+      11010 10100 0 00 011 0011 CRm 1 00 11111  DSB opt
+      11010 10100 0 00 011 0011 CRm 1 10 11111  ISB opt
+   */
+   if (INSN(31,22) == BITS10(1,1,0,1,0,1,0,1,0,0)
+       && INSN(21,12) == BITS10(0,0,0,0,1,1,0,0,1,1)
+       && INSN(7,7) == 1
+       && INSN(6,5) <= BITS2(1,0) && INSN(4,0) == BITS5(1,1,1,1,1)) {
+      UInt opc = INSN(6,5);
+      UInt CRm = INSN(11,8);
+      vassert(opc <= 2 && CRm <= 15);
+      stmt(IRStmt_MBE(Imbe_Fence));
+      const HChar* opNames[3] 
+         = { "dsb", "dmb", "isb" };
+      const HChar* howNames[16]
+         = { "#0", "oshld", "oshst", "osh", "#4", "nshld", "nshst", "nsh",
+             "#8", "ishld", "ishst", "ish", "#12", "ld", "st", "sy" };
+      DIP("%s %s\n", opNames[opc], howNames[CRm]);
+      return True;
+   }
+
+   /* -------------------- NOP -------------------- */
+   if (INSN(31,0) == 0xD503201F) {
+      DIP("nop\n");
+      return True;
+   }
+
+   /* -------------------- BRK -------------------- */
+   /* 31        23  20    4
+      1101 0100 001 imm16 00000  BRK #imm16
+   */
+   if (INSN(31,24) == BITS8(1,1,0,1,0,1,0,0)
+       && INSN(23,21) == BITS3(0,0,1) && INSN(4,0) == BITS5(0,0,0,0,0)) {
+      UInt imm16 = INSN(20,5);
+      /* Request SIGTRAP and then restart of this insn. */
+      putPC(mkU64(guest_PC_curr_instr + 0));
+      dres->whatNext    = Dis_StopHere;
+      dres->jk_StopHere = Ijk_SigTRAP;
+      DIP("brk #%u\n", imm16);
+      return True;
+   }
+
+  //fail:
+   vex_printf("ARM64 front end: branch_etc\n");
+   return False;
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- SIMD and FP instructions: helper functions           ---*/
+/*------------------------------------------------------------*/
+
+/* Some constructors for interleave/deinterleave expressions. */
+
+static IRExpr* mk_CatEvenLanes64x2 ( IRTemp a10, IRTemp b10 ) {
+   // returns a0 b0
+   return binop(Iop_InterleaveLO64x2, mkexpr(a10), mkexpr(b10));
+}
+
+static IRExpr* mk_CatOddLanes64x2 ( IRTemp a10, IRTemp b10 ) {
+   // returns a1 b1
+   return binop(Iop_InterleaveHI64x2, mkexpr(a10), mkexpr(b10));
+}
+
+static IRExpr* mk_CatEvenLanes32x4 ( IRTemp a3210, IRTemp b3210 ) {
+   // returns a2 a0 b2 b0
+   return binop(Iop_CatEvenLanes32x4, mkexpr(a3210), mkexpr(b3210));
+}
+
+static IRExpr* mk_CatOddLanes32x4 ( IRTemp a3210, IRTemp b3210 ) {
+   // returns a3 a1 b3 b1
+   return binop(Iop_CatOddLanes32x4, mkexpr(a3210), mkexpr(b3210));
+}
+
+static IRExpr* mk_InterleaveLO32x4 ( IRTemp a3210, IRTemp b3210 ) {
+   // returns a1 b1 a0 b0
+   return binop(Iop_InterleaveLO32x4, mkexpr(a3210), mkexpr(b3210));
+}
+
+static IRExpr* mk_InterleaveHI32x4 ( IRTemp a3210, IRTemp b3210 ) {
+   // returns a3 b3 a2 b2
+   return binop(Iop_InterleaveHI32x4, mkexpr(a3210), mkexpr(b3210));
+}
+
+static IRExpr* mk_CatEvenLanes16x8 ( IRTemp a76543210, IRTemp b76543210 ) {
+   // returns a6 a4 a2 a0 b6 b4 b2 b0
+   return binop(Iop_CatEvenLanes16x8, mkexpr(a76543210), mkexpr(b76543210));
+}
+
+static IRExpr* mk_CatOddLanes16x8 ( IRTemp a76543210, IRTemp b76543210 ) {
+   // returns a7 a5 a3 a1 b7 b5 b3 b1
+   return binop(Iop_CatOddLanes16x8, mkexpr(a76543210), mkexpr(b76543210));
+}
+
+static IRExpr* mk_InterleaveLO16x8 ( IRTemp a76543210, IRTemp b76543210 ) {
+   // returns a3 b3 a2 b2 a1 b1 a0 b0
+   return binop(Iop_InterleaveLO16x8, mkexpr(a76543210), mkexpr(b76543210));
+}
+
+static IRExpr* mk_InterleaveHI16x8 ( IRTemp a76543210, IRTemp b76543210 ) {
+   // returns a7 b7 a6 b6 a5 b5 a4 b4
+   return binop(Iop_InterleaveHI16x8, mkexpr(a76543210), mkexpr(b76543210));
+}
+
+static IRExpr* mk_CatEvenLanes8x16 ( IRTemp aFEDCBA9876543210,
+                                     IRTemp bFEDCBA9876543210 ) {
+   // returns aE aC aA a8 a6 a4 a2 a0 bE bC bA b8 b6 b4 b2 b0
+   return binop(Iop_CatEvenLanes8x16, mkexpr(aFEDCBA9876543210),
+                                      mkexpr(bFEDCBA9876543210));
+}
+
+static IRExpr* mk_CatOddLanes8x16 ( IRTemp aFEDCBA9876543210,
+                                    IRTemp bFEDCBA9876543210 ) {
+   // returns aF aD aB a9 a7 a5 a3 a1 bF bD bB b9 b7 b5 b3 b1
+   return binop(Iop_CatOddLanes8x16, mkexpr(aFEDCBA9876543210),
+                                     mkexpr(bFEDCBA9876543210));
+}
+
+static IRExpr* mk_InterleaveLO8x16 ( IRTemp aFEDCBA9876543210,
+                                     IRTemp bFEDCBA9876543210 ) {
+   // returns a7 b7 a6 b6 a5 b5 a4 b4 a3 b3 a2 b2 a1 b1 a0 b0
+   return binop(Iop_InterleaveLO8x16, mkexpr(aFEDCBA9876543210),
+                                      mkexpr(bFEDCBA9876543210));
+}
+
+static IRExpr* mk_InterleaveHI8x16 ( IRTemp aFEDCBA9876543210,
+                                     IRTemp bFEDCBA9876543210 ) {
+   // returns aF bF aE bE aD bD aC bC aB bB aA bA a9 b9 a8 b8
+   return binop(Iop_InterleaveHI8x16, mkexpr(aFEDCBA9876543210),
+                                      mkexpr(bFEDCBA9876543210));
+}
+
+/* Generate N copies of |bit| in the bottom of a ULong. */
+static ULong Replicate ( ULong bit, Int N )
+{
+   vassert(bit <= 1 && N >= 1 && N < 64);
+   if (bit == 0) {
+      return 0;
+    } else {
+      /* Careful.  This won't work for N == 64. */
+      return (1ULL << N) - 1;
+   }
+}
+
+static ULong Replicate32x2 ( ULong bits32 )
+{
+   vassert(0 == (bits32 & ~0xFFFFFFFFULL));
+   return (bits32 << 32) | bits32;
+}
+
+static ULong Replicate16x4 ( ULong bits16 )
+{
+   vassert(0 == (bits16 & ~0xFFFFULL));
+   return Replicate32x2((bits16 << 16) | bits16);
+}
+
+static ULong Replicate8x8 ( ULong bits8 )
+{
+   vassert(0 == (bits8 & ~0xFFULL));
+   return Replicate16x4((bits8 << 8) | bits8);
+}
+
+/* Expand the VFPExpandImm-style encoding in the bottom 8 bits of
+   |imm8| to either a 32-bit value if N is 32 or a 64 bit value if N
+   is 64.  In the former case, the upper 32 bits of the returned value
+   are guaranteed to be zero. */
+static ULong VFPExpandImm ( ULong imm8, Int N )
+{
+   vassert(imm8 <= 0xFF);
+   vassert(N == 32 || N == 64);
+   Int E = ((N == 32) ? 8 : 11) - 2; // The spec incorrectly omits the -2.
+   Int F = N - E - 1;
+   ULong imm8_6 = (imm8 >> 6) & 1;
+   /* sign: 1 bit */
+   /* exp:  E bits */
+   /* frac: F bits */
+   ULong sign = (imm8 >> 7) & 1;
+   ULong exp  = ((imm8_6 ^ 1) << (E-1)) | Replicate(imm8_6, E-1);
+   ULong frac = ((imm8 & 63) << (F-6)) | Replicate(0, F-6);
+   vassert(sign < (1ULL << 1));
+   vassert(exp  < (1ULL << E));
+   vassert(frac < (1ULL << F));
+   vassert(1 + E + F == N);
+   ULong res = (sign << (E+F)) | (exp << F) | frac;
+   return res;
+}
+
+/* Expand an AdvSIMDExpandImm-style encoding into a 64-bit value.
+   This might fail, as indicated by the returned Bool.  Page 2530 of
+   the manual. */
+static Bool AdvSIMDExpandImm ( /*OUT*/ULong* res,
+                               UInt op, UInt cmode, UInt imm8 )
+{
+   vassert(op <= 1);
+   vassert(cmode <= 15);
+   vassert(imm8 <= 255);
+
+   *res = 0; /* will overwrite iff returning True */
+
+   ULong imm64    = 0;
+   Bool  testimm8 = False;
+
+   switch (cmode >> 1) {
+      case 0:
+         testimm8 = False; imm64 = Replicate32x2(imm8); break;
+      case 1:
+         testimm8 = True; imm64 = Replicate32x2(imm8 << 8); break;
+      case 2:
+         testimm8 = True; imm64 = Replicate32x2(imm8 << 16); break;
+      case 3:
+         testimm8 = True; imm64 = Replicate32x2(imm8 << 24); break;
+      case 4:
+          testimm8 = False; imm64 = Replicate16x4(imm8); break;
+      case 5:
+          testimm8 = True; imm64 = Replicate16x4(imm8 << 8); break;
+      case 6:
+          testimm8 = True;
+          if ((cmode & 1) == 0)
+              imm64 = Replicate32x2((imm8 << 8) | 0xFF);
+          else
+              imm64 = Replicate32x2((imm8 << 16) | 0xFFFF);
+          break;
+      case 7:
+         testimm8 = False;
+         if ((cmode & 1) == 0 && op == 0)
+             imm64 = Replicate8x8(imm8);
+         if ((cmode & 1) == 0 && op == 1) {
+             imm64 = 0;   imm64 |= (imm8 & 0x80) ? 0xFF : 0x00;
+             imm64 <<= 8; imm64 |= (imm8 & 0x40) ? 0xFF : 0x00;
+             imm64 <<= 8; imm64 |= (imm8 & 0x20) ? 0xFF : 0x00;
+             imm64 <<= 8; imm64 |= (imm8 & 0x10) ? 0xFF : 0x00;
+             imm64 <<= 8; imm64 |= (imm8 & 0x08) ? 0xFF : 0x00;
+             imm64 <<= 8; imm64 |= (imm8 & 0x04) ? 0xFF : 0x00;
+             imm64 <<= 8; imm64 |= (imm8 & 0x02) ? 0xFF : 0x00;
+             imm64 <<= 8; imm64 |= (imm8 & 0x01) ? 0xFF : 0x00;
+         }
+         if ((cmode & 1) == 1 && op == 0) {
+            ULong imm8_7  = (imm8 >> 7) & 1;
+            ULong imm8_6  = (imm8 >> 6) & 1;
+            ULong imm8_50 = imm8 & 63;
+            ULong imm32 = (imm8_7                 << (1 + 5 + 6 + 19))
+                          | ((imm8_6 ^ 1)         << (5 + 6 + 19))
+                          | (Replicate(imm8_6, 5) << (6 + 19))
+                          | (imm8_50              << 19);
+            imm64 = Replicate32x2(imm32);
+         }
+         if ((cmode & 1) == 1 && op == 1) {
+            // imm64 = imm8<7>:NOT(imm8<6>)
+            //                :Replicate(imm8<6>,8):imm8<5:0>:Zeros(48);
+            ULong imm8_7  = (imm8 >> 7) & 1;
+            ULong imm8_6  = (imm8 >> 6) & 1;
+            ULong imm8_50 = imm8 & 63;
+            imm64 = (imm8_7 << 63) | ((imm8_6 ^ 1) << 62)
+                    | (Replicate(imm8_6, 8) << 54)
+                    | (imm8_50 << 48);
+         }
+         break;
+      default:
+        vassert(0);
+   }
+
+   if (testimm8 && imm8 == 0)
+      return False;
+
+   *res = imm64;
+   return True;
+}
+
+/* Help a bit for decoding laneage for vector operations that can be
+   of the form 4x32, 2x64 or 2x32-and-zero-upper-half, as encoded by Q
+   and SZ bits, typically for vector floating point. */
+static Bool getLaneInfo_Q_SZ ( /*OUT*/IRType* tyI,  /*OUT*/IRType* tyF,
+                               /*OUT*/UInt* nLanes, /*OUT*/Bool* zeroUpper,
+                               /*OUT*/const HChar** arrSpec,
+                               Bool bitQ, Bool bitSZ )
+{
+   vassert(bitQ == True || bitQ == False);
+   vassert(bitSZ == True || bitSZ == False);
+   if (bitQ && bitSZ) { // 2x64
+      if (tyI)       *tyI       = Ity_I64;
+      if (tyF)       *tyF       = Ity_F64;
+      if (nLanes)    *nLanes    = 2;
+      if (zeroUpper) *zeroUpper = False;
+      if (arrSpec)   *arrSpec   = "2d";
+      return True;
+   }
+   if (bitQ && !bitSZ) { // 4x32
+      if (tyI)       *tyI       = Ity_I32;
+      if (tyF)       *tyF       = Ity_F32;
+      if (nLanes)    *nLanes    = 4;
+      if (zeroUpper) *zeroUpper = False;
+      if (arrSpec)   *arrSpec   = "4s";
+      return True;
+   }
+   if (!bitQ && !bitSZ) { // 2x32
+      if (tyI)       *tyI       = Ity_I32;
+      if (tyF)       *tyF       = Ity_F32;
+      if (nLanes)    *nLanes    = 2;
+      if (zeroUpper) *zeroUpper = True;
+      if (arrSpec)   *arrSpec   = "2s";
+      return True;
+   }
+   // Else impliedly 1x64, which isn't allowed.
+   return False;
+}
+
+/* Helper for decoding laneage for shift-style vector operations 
+   that involve an immediate shift amount. */
+static Bool getLaneInfo_IMMH_IMMB ( /*OUT*/UInt* shift, /*OUT*/UInt* szBlg2,
+                                    UInt immh, UInt immb )
+{
+   vassert(immh < (1<<4));
+   vassert(immb < (1<<3));
+   UInt immhb = (immh << 3) | immb;
+   if (immh & 8) {
+      if (shift)  *shift  = 128 - immhb;
+      if (szBlg2) *szBlg2 = 3;
+      return True;
+   }
+   if (immh & 4) {
+      if (shift)  *shift  = 64 - immhb;
+      if (szBlg2) *szBlg2 = 2;
+      return True;
+   }
+   if (immh & 2) {
+      if (shift)  *shift  = 32 - immhb;
+      if (szBlg2) *szBlg2 = 1;
+      return True;
+   }
+   if (immh & 1) {
+      if (shift)  *shift  = 16 - immhb;
+      if (szBlg2) *szBlg2 = 0;
+      return True;
+   }
+   return False;
+}
+
+/* Generate IR to fold all lanes of the V128 value in 'src' as
+   characterised by the operator 'op', and return the result in the
+   bottom bits of a V128, with all other bits set to zero. */
+static IRTemp math_FOLDV ( IRTemp src, IROp op )
+{
+   /* The basic idea is to use repeated applications of Iop_CatEven*
+      and Iop_CatOdd* operators to 'src' so as to clone each lane into
+      a complete vector.  Then fold all those vectors with 'op' and
+      zero out all but the least significant lane. */
+   switch (op) {
+      case Iop_Min8Sx16: case Iop_Min8Ux16:
+      case Iop_Max8Sx16: case Iop_Max8Ux16: case Iop_Add8x16: {
+         /* NB: temp naming here is misleading -- the naming is for 8
+            lanes of 16 bit, whereas what is being operated on is 16
+            lanes of 8 bits. */
+         IRTemp x76543210 = src;
+         IRTemp x76547654 = newTempV128();
+         IRTemp x32103210 = newTempV128();
+         assign(x76547654, mk_CatOddLanes64x2 (x76543210, x76543210));
+         assign(x32103210, mk_CatEvenLanes64x2(x76543210, x76543210));
+         IRTemp x76767676 = newTempV128();
+         IRTemp x54545454 = newTempV128();
+         IRTemp x32323232 = newTempV128();
+         IRTemp x10101010 = newTempV128();
+         assign(x76767676, mk_CatOddLanes32x4 (x76547654, x76547654));
+         assign(x54545454, mk_CatEvenLanes32x4(x76547654, x76547654));
+         assign(x32323232, mk_CatOddLanes32x4 (x32103210, x32103210));
+         assign(x10101010, mk_CatEvenLanes32x4(x32103210, x32103210));
+         IRTemp x77777777 = newTempV128();
+         IRTemp x66666666 = newTempV128();
+         IRTemp x55555555 = newTempV128();
+         IRTemp x44444444 = newTempV128();
+         IRTemp x33333333 = newTempV128();
+         IRTemp x22222222 = newTempV128();
+         IRTemp x11111111 = newTempV128();
+         IRTemp x00000000 = newTempV128();
+         assign(x77777777, mk_CatOddLanes16x8 (x76767676, x76767676));
+         assign(x66666666, mk_CatEvenLanes16x8(x76767676, x76767676));
+         assign(x55555555, mk_CatOddLanes16x8 (x54545454, x54545454));
+         assign(x44444444, mk_CatEvenLanes16x8(x54545454, x54545454));
+         assign(x33333333, mk_CatOddLanes16x8 (x32323232, x32323232));
+         assign(x22222222, mk_CatEvenLanes16x8(x32323232, x32323232));
+         assign(x11111111, mk_CatOddLanes16x8 (x10101010, x10101010));
+         assign(x00000000, mk_CatEvenLanes16x8(x10101010, x10101010));
+         /* Naming not misleading after here. */
+         IRTemp xAllF = newTempV128();
+         IRTemp xAllE = newTempV128();
+         IRTemp xAllD = newTempV128();
+         IRTemp xAllC = newTempV128();
+         IRTemp xAllB = newTempV128();
+         IRTemp xAllA = newTempV128();
+         IRTemp xAll9 = newTempV128();
+         IRTemp xAll8 = newTempV128();
+         IRTemp xAll7 = newTempV128();
+         IRTemp xAll6 = newTempV128();
+         IRTemp xAll5 = newTempV128();
+         IRTemp xAll4 = newTempV128();
+         IRTemp xAll3 = newTempV128();
+         IRTemp xAll2 = newTempV128();
+         IRTemp xAll1 = newTempV128();
+         IRTemp xAll0 = newTempV128();
+         assign(xAllF, mk_CatOddLanes8x16 (x77777777, x77777777));
+         assign(xAllE, mk_CatEvenLanes8x16(x77777777, x77777777));
+         assign(xAllD, mk_CatOddLanes8x16 (x66666666, x66666666));
+         assign(xAllC, mk_CatEvenLanes8x16(x66666666, x66666666));
+         assign(xAllB, mk_CatOddLanes8x16 (x55555555, x55555555));
+         assign(xAllA, mk_CatEvenLanes8x16(x55555555, x55555555));
+         assign(xAll9, mk_CatOddLanes8x16 (x44444444, x44444444));
+         assign(xAll8, mk_CatEvenLanes8x16(x44444444, x44444444));
+         assign(xAll7, mk_CatOddLanes8x16 (x33333333, x33333333));
+         assign(xAll6, mk_CatEvenLanes8x16(x33333333, x33333333));
+         assign(xAll5, mk_CatOddLanes8x16 (x22222222, x22222222));
+         assign(xAll4, mk_CatEvenLanes8x16(x22222222, x22222222));
+         assign(xAll3, mk_CatOddLanes8x16 (x11111111, x11111111));
+         assign(xAll2, mk_CatEvenLanes8x16(x11111111, x11111111));
+         assign(xAll1, mk_CatOddLanes8x16 (x00000000, x00000000));
+         assign(xAll0, mk_CatEvenLanes8x16(x00000000, x00000000));
+         IRTemp maxFE = newTempV128();
+         IRTemp maxDC = newTempV128();
+         IRTemp maxBA = newTempV128();
+         IRTemp max98 = newTempV128();
+         IRTemp max76 = newTempV128();
+         IRTemp max54 = newTempV128();
+         IRTemp max32 = newTempV128();
+         IRTemp max10 = newTempV128();
+         assign(maxFE, binop(op, mkexpr(xAllF), mkexpr(xAllE)));
+         assign(maxDC, binop(op, mkexpr(xAllD), mkexpr(xAllC)));
+         assign(maxBA, binop(op, mkexpr(xAllB), mkexpr(xAllA)));
+         assign(max98, binop(op, mkexpr(xAll9), mkexpr(xAll8)));
+         assign(max76, binop(op, mkexpr(xAll7), mkexpr(xAll6)));
+         assign(max54, binop(op, mkexpr(xAll5), mkexpr(xAll4)));
+         assign(max32, binop(op, mkexpr(xAll3), mkexpr(xAll2)));
+         assign(max10, binop(op, mkexpr(xAll1), mkexpr(xAll0)));
+         IRTemp maxFEDC = newTempV128();
+         IRTemp maxBA98 = newTempV128();
+         IRTemp max7654 = newTempV128();
+         IRTemp max3210 = newTempV128();
+         assign(maxFEDC, binop(op, mkexpr(maxFE), mkexpr(maxDC)));
+         assign(maxBA98, binop(op, mkexpr(maxBA), mkexpr(max98)));
+         assign(max7654, binop(op, mkexpr(max76), mkexpr(max54)));
+         assign(max3210, binop(op, mkexpr(max32), mkexpr(max10)));
+         IRTemp maxFEDCBA98 = newTempV128();
+         IRTemp max76543210 = newTempV128();
+         assign(maxFEDCBA98, binop(op, mkexpr(maxFEDC), mkexpr(maxBA98)));
+         assign(max76543210, binop(op, mkexpr(max7654), mkexpr(max3210)));
+         IRTemp maxAllLanes = newTempV128();
+         assign(maxAllLanes, binop(op, mkexpr(maxFEDCBA98),
+                                       mkexpr(max76543210)));
+         IRTemp res = newTempV128();
+         assign(res, unop(Iop_ZeroHI120ofV128, mkexpr(maxAllLanes)));
+         return res;
+      }
+      case Iop_Min16Sx8: case Iop_Min16Ux8:
+      case Iop_Max16Sx8: case Iop_Max16Ux8: case Iop_Add16x8: {
+         IRTemp x76543210 = src;
+         IRTemp x76547654 = newTempV128();
+         IRTemp x32103210 = newTempV128();
+         assign(x76547654, mk_CatOddLanes64x2 (x76543210, x76543210));
+         assign(x32103210, mk_CatEvenLanes64x2(x76543210, x76543210));
+         IRTemp x76767676 = newTempV128();
+         IRTemp x54545454 = newTempV128();
+         IRTemp x32323232 = newTempV128();
+         IRTemp x10101010 = newTempV128();
+         assign(x76767676, mk_CatOddLanes32x4 (x76547654, x76547654));
+         assign(x54545454, mk_CatEvenLanes32x4(x76547654, x76547654));
+         assign(x32323232, mk_CatOddLanes32x4 (x32103210, x32103210));
+         assign(x10101010, mk_CatEvenLanes32x4(x32103210, x32103210));
+         IRTemp x77777777 = newTempV128();
+         IRTemp x66666666 = newTempV128();
+         IRTemp x55555555 = newTempV128();
+         IRTemp x44444444 = newTempV128();
+         IRTemp x33333333 = newTempV128();
+         IRTemp x22222222 = newTempV128();
+         IRTemp x11111111 = newTempV128();
+         IRTemp x00000000 = newTempV128();
+         assign(x77777777, mk_CatOddLanes16x8 (x76767676, x76767676));
+         assign(x66666666, mk_CatEvenLanes16x8(x76767676, x76767676));
+         assign(x55555555, mk_CatOddLanes16x8 (x54545454, x54545454));
+         assign(x44444444, mk_CatEvenLanes16x8(x54545454, x54545454));
+         assign(x33333333, mk_CatOddLanes16x8 (x32323232, x32323232));
+         assign(x22222222, mk_CatEvenLanes16x8(x32323232, x32323232));
+         assign(x11111111, mk_CatOddLanes16x8 (x10101010, x10101010));
+         assign(x00000000, mk_CatEvenLanes16x8(x10101010, x10101010));
+         IRTemp max76 = newTempV128();
+         IRTemp max54 = newTempV128();
+         IRTemp max32 = newTempV128();
+         IRTemp max10 = newTempV128();
+         assign(max76, binop(op, mkexpr(x77777777), mkexpr(x66666666)));
+         assign(max54, binop(op, mkexpr(x55555555), mkexpr(x44444444)));
+         assign(max32, binop(op, mkexpr(x33333333), mkexpr(x22222222)));
+         assign(max10, binop(op, mkexpr(x11111111), mkexpr(x00000000)));
+         IRTemp max7654 = newTempV128();
+         IRTemp max3210 = newTempV128();
+         assign(max7654, binop(op, mkexpr(max76), mkexpr(max54)));
+         assign(max3210, binop(op, mkexpr(max32), mkexpr(max10)));
+         IRTemp max76543210 = newTempV128();
+         assign(max76543210, binop(op, mkexpr(max7654), mkexpr(max3210)));
+         IRTemp res = newTempV128();
+         assign(res, unop(Iop_ZeroHI112ofV128, mkexpr(max76543210)));
+         return res;
+      }
+      case Iop_Max32Fx4: case Iop_Min32Fx4:
+      case Iop_Min32Sx4: case Iop_Min32Ux4:
+      case Iop_Max32Sx4: case Iop_Max32Ux4: case Iop_Add32x4: {
+         IRTemp x3210 = src;
+         IRTemp x3232 = newTempV128();
+         IRTemp x1010 = newTempV128();
+         assign(x3232, mk_CatOddLanes64x2 (x3210, x3210));
+         assign(x1010, mk_CatEvenLanes64x2(x3210, x3210));
+         IRTemp x3333 = newTempV128();
+         IRTemp x2222 = newTempV128();
+         IRTemp x1111 = newTempV128();
+         IRTemp x0000 = newTempV128();
+         assign(x3333, mk_CatOddLanes32x4 (x3232, x3232));
+         assign(x2222, mk_CatEvenLanes32x4(x3232, x3232));
+         assign(x1111, mk_CatOddLanes32x4 (x1010, x1010));
+         assign(x0000, mk_CatEvenLanes32x4(x1010, x1010));
+         IRTemp max32 = newTempV128();
+         IRTemp max10 = newTempV128();
+         assign(max32, binop(op, mkexpr(x3333), mkexpr(x2222)));
+         assign(max10, binop(op, mkexpr(x1111), mkexpr(x0000)));
+         IRTemp max3210 = newTempV128();
+         assign(max3210, binop(op, mkexpr(max32), mkexpr(max10)));
+         IRTemp res = newTempV128();
+         assign(res, unop(Iop_ZeroHI96ofV128, mkexpr(max3210)));
+         return res;
+      }
+      case Iop_Add64x2: {
+         IRTemp x10 = src;
+         IRTemp x00 = newTempV128();
+         IRTemp x11 = newTempV128();
+         assign(x11, binop(Iop_InterleaveHI64x2, mkexpr(x10), mkexpr(x10)));
+         assign(x00, binop(Iop_InterleaveLO64x2, mkexpr(x10), mkexpr(x10)));
+         IRTemp max10 = newTempV128();
+         assign(max10, binop(op, mkexpr(x11), mkexpr(x00)));
+         IRTemp res = newTempV128();
+         assign(res, unop(Iop_ZeroHI64ofV128, mkexpr(max10)));
+         return res;
+      }
+      default:
+         vassert(0);
+   }
+}
+
+
+/* Generate IR for TBL and TBX.  This deals with the 128 bit case
+   only. */
+static IRTemp math_TBL_TBX ( IRTemp tab[4], UInt len, IRTemp src,
+                             IRTemp oor_values )
+{
+   vassert(len >= 0 && len <= 3);
+
+   /* Generate some useful constants as concisely as possible. */
+   IRTemp half15 = newTemp(Ity_I64);
+   assign(half15, mkU64(0x0F0F0F0F0F0F0F0FULL));
+   IRTemp half16 = newTemp(Ity_I64);
+   assign(half16, mkU64(0x1010101010101010ULL));
+
+   /* A zero vector */
+   IRTemp allZero = newTempV128();
+   assign(allZero, mkV128(0x0000));
+   /* A vector containing 15 in each 8-bit lane */
+   IRTemp all15 = newTempV128();
+   assign(all15, binop(Iop_64HLtoV128, mkexpr(half15), mkexpr(half15)));
+   /* A vector containing 16 in each 8-bit lane */
+   IRTemp all16 = newTempV128();
+   assign(all16, binop(Iop_64HLtoV128, mkexpr(half16), mkexpr(half16)));
+   /* A vector containing 32 in each 8-bit lane */
+   IRTemp all32 = newTempV128();
+   assign(all32, binop(Iop_Add8x16, mkexpr(all16), mkexpr(all16)));
+   /* A vector containing 48 in each 8-bit lane */
+   IRTemp all48 = newTempV128();
+   assign(all48, binop(Iop_Add8x16, mkexpr(all16), mkexpr(all32)));
+   /* A vector containing 64 in each 8-bit lane */
+   IRTemp all64 = newTempV128();
+   assign(all64, binop(Iop_Add8x16, mkexpr(all32), mkexpr(all32)));
+
+   /* Group the 16/32/48/64 vectors so as to be indexable. */
+   IRTemp allXX[4] = { all16, all32, all48, all64 };
+
+   /* Compute the result for each table vector, with zeroes in places
+      where the index values are out of range, and OR them into the
+      running vector. */
+   IRTemp running_result = newTempV128();
+   assign(running_result, mkV128(0));
+
+   UInt tabent;
+   for (tabent = 0; tabent <= len; tabent++) {
+      vassert(tabent >= 0 && tabent < 4);
+      IRTemp bias = newTempV128();
+      assign(bias,
+             mkexpr(tabent == 0 ? allZero : allXX[tabent-1]));
+      IRTemp biased_indices = newTempV128();
+      assign(biased_indices,
+             binop(Iop_Sub8x16, mkexpr(src), mkexpr(bias)));
+      IRTemp valid_mask = newTempV128();
+      assign(valid_mask,
+             binop(Iop_CmpGT8Ux16, mkexpr(all16), mkexpr(biased_indices)));
+      IRTemp safe_biased_indices = newTempV128();
+      assign(safe_biased_indices,
+             binop(Iop_AndV128, mkexpr(biased_indices), mkexpr(all15)));
+      IRTemp results_or_junk = newTempV128();
+      assign(results_or_junk,
+             binop(Iop_Perm8x16, mkexpr(tab[tabent]),
+                                 mkexpr(safe_biased_indices)));
+      IRTemp results_or_zero = newTempV128();
+      assign(results_or_zero,
+             binop(Iop_AndV128, mkexpr(results_or_junk), mkexpr(valid_mask)));
+      /* And OR that into the running result. */
+      IRTemp tmp = newTempV128();
+      assign(tmp, binop(Iop_OrV128, mkexpr(results_or_zero),
+                        mkexpr(running_result)));
+      running_result = tmp;
+   }
+
+   /* So now running_result holds the overall result where the indices
+      are in range, and zero in out-of-range lanes.  Now we need to
+      compute an overall validity mask and use this to copy in the
+      lanes in the oor_values for out of range indices.  This is
+      unnecessary for TBL but will get folded out by iropt, so we lean
+      on that and generate the same code for TBL and TBX here. */
+   IRTemp overall_valid_mask = newTempV128();
+   assign(overall_valid_mask,
+          binop(Iop_CmpGT8Ux16, mkexpr(allXX[len]), mkexpr(src)));
+   IRTemp result = newTempV128();
+   assign(result,
+          binop(Iop_OrV128,
+                mkexpr(running_result),
+                binop(Iop_AndV128,
+                      mkexpr(oor_values),
+                      unop(Iop_NotV128, mkexpr(overall_valid_mask)))));
+   return result;      
+}
+
+
+/* Let |argL| and |argR| be V128 values, and let |opI64x2toV128| be
+   an op which takes two I64s and produces a V128.  That is, a widening
+   operator.  Generate IR which applies |opI64x2toV128| to either the
+   lower (if |is2| is False) or upper (if |is2| is True) halves of
+   |argL| and |argR|, and return the value in a new IRTemp.
+*/
+static
+IRTemp math_BINARY_WIDENING_V128 ( Bool is2, IROp opI64x2toV128,
+                                   IRExpr* argL, IRExpr* argR )
+{
+   IRTemp res   = newTempV128();
+   IROp   slice = is2 ? Iop_V128HIto64 : Iop_V128to64;
+   assign(res, binop(opI64x2toV128, unop(slice, argL),
+                                    unop(slice, argR)));
+   return res;
+}
+
+
+/* Generate signed/unsigned absolute difference vector IR. */
+static
+IRTemp math_ABD ( Bool isU, UInt size, IRExpr* argLE, IRExpr* argRE )
+{
+   vassert(size <= 3);
+   IRTemp argL = newTempV128();
+   IRTemp argR = newTempV128();
+   IRTemp msk  = newTempV128();
+   IRTemp res  = newTempV128();
+   assign(argL, argLE);
+   assign(argR, argRE);
+   assign(msk, binop(isU ? mkVecCMPGTU(size) : mkVecCMPGTS(size),
+                     mkexpr(argL), mkexpr(argR)));
+   assign(res,
+          binop(Iop_OrV128,
+                binop(Iop_AndV128,
+                      binop(mkVecSUB(size), mkexpr(argL), mkexpr(argR)),
+                      mkexpr(msk)),
+                binop(Iop_AndV128,
+                      binop(mkVecSUB(size), mkexpr(argR), mkexpr(argL)),
+                      unop(Iop_NotV128, mkexpr(msk)))));
+   return res;
+}
+
+
+/* Generate IR that takes a V128 and sign- or zero-widens
+   either the lower or upper set of lanes to twice-as-wide,
+   resulting in a new V128 value. */
+static
+IRTemp math_WIDEN_LO_OR_HI_LANES ( Bool zWiden, Bool fromUpperHalf,
+                                   UInt sizeNarrow, IRExpr* srcE )
+{
+   IRTemp src = newTempV128();
+   IRTemp res = newTempV128();
+   assign(src, srcE);
+   switch (sizeNarrow) {
+      case X10:
+         assign(res,
+                binop(zWiden ? Iop_ShrN64x2 : Iop_SarN64x2,
+                      binop(fromUpperHalf ? Iop_InterleaveHI32x4
+                                          : Iop_InterleaveLO32x4,
+                            mkexpr(src),
+                            mkexpr(src)),
+                      mkU8(32)));
+         break;
+      case X01:
+         assign(res,
+                binop(zWiden ? Iop_ShrN32x4 : Iop_SarN32x4,
+                      binop(fromUpperHalf ? Iop_InterleaveHI16x8
+                                          : Iop_InterleaveLO16x8,
+                            mkexpr(src),
+                            mkexpr(src)),
+                      mkU8(16)));
+         break;
+      case X00:
+         assign(res,
+                binop(zWiden ? Iop_ShrN16x8 : Iop_SarN16x8,
+                      binop(fromUpperHalf ? Iop_InterleaveHI8x16
+                                          : Iop_InterleaveLO8x16,
+                            mkexpr(src),
+                            mkexpr(src)),
+                      mkU8(8)));
+         break;
+      default:
+         vassert(0);
+   }
+   return res;
+}
+
+
+/* Generate IR that takes a V128 and sign- or zero-widens
+   either the even or odd lanes to twice-as-wide,
+   resulting in a new V128 value. */
+static
+IRTemp math_WIDEN_EVEN_OR_ODD_LANES ( Bool zWiden, Bool fromOdd,
+                                      UInt sizeNarrow, IRExpr* srcE )
+{
+   IRTemp src   = newTempV128();
+   IRTemp res   = newTempV128();
+   IROp   opSAR = mkVecSARN(sizeNarrow+1);
+   IROp   opSHR = mkVecSHRN(sizeNarrow+1);
+   IROp   opSHL = mkVecSHLN(sizeNarrow+1);
+   IROp   opSxR = zWiden ? opSHR : opSAR;
+   UInt   amt   = 0;
+   switch (sizeNarrow) {
+      case X10: amt = 32; break;
+      case X01: amt = 16; break;
+      case X00: amt = 8;  break;
+      default: vassert(0);
+   }
+   assign(src, srcE);
+   if (fromOdd) {
+      assign(res, binop(opSxR, mkexpr(src), mkU8(amt)));
+   } else {
+      assign(res, binop(opSxR, binop(opSHL, mkexpr(src), mkU8(amt)),
+                               mkU8(amt)));
+   }
+   return res;
+}
+
+
+/* Generate IR that takes two V128s and narrows (takes lower half)
+   of each lane, producing a single V128 value. */
+static
+IRTemp math_NARROW_LANES ( IRTemp argHi, IRTemp argLo, UInt sizeNarrow )
+{
+   IRTemp res = newTempV128();
+   assign(res, binop(mkVecCATEVENLANES(sizeNarrow),
+                     mkexpr(argHi), mkexpr(argLo)));
+   return res;
+}
+
+
+/* Return a temp which holds the vector dup of the lane of width
+   (1 << size) obtained from src[laneNo]. */
+static
+IRTemp math_DUP_VEC_ELEM ( IRExpr* src, UInt size, UInt laneNo )
+{
+   vassert(size <= 3);
+   /* Normalise |laneNo| so it is of the form
+      x000 for D, xx00 for S, xxx0 for H, and xxxx for B.
+      This puts the bits we want to inspect at constant offsets
+      regardless of the value of |size|.
+   */
+   UInt ix = laneNo << size;
+   vassert(ix <= 15);
+   IROp ops[4] = { Iop_INVALID, Iop_INVALID, Iop_INVALID, Iop_INVALID };
+   switch (size) {
+      case 0: /* B */
+         ops[0] = (ix & 1) ? Iop_CatOddLanes8x16 : Iop_CatEvenLanes8x16;
+         /* fallthrough */
+      case 1: /* H */
+         ops[1] = (ix & 2) ? Iop_CatOddLanes16x8 : Iop_CatEvenLanes16x8;
+         /* fallthrough */
+      case 2: /* S */
+         ops[2] = (ix & 4) ? Iop_CatOddLanes32x4 : Iop_CatEvenLanes32x4;
+         /* fallthrough */
+      case 3: /* D */
+         ops[3] = (ix & 8) ? Iop_InterleaveHI64x2 : Iop_InterleaveLO64x2;
+         break;
+      default:
+         vassert(0);
+   }
+   IRTemp res = newTempV128();
+   assign(res, src);
+   Int i;
+   for (i = 3; i >= 0; i--) {
+      if (ops[i] == Iop_INVALID)
+         break;
+      IRTemp tmp = newTempV128();
+      assign(tmp, binop(ops[i], mkexpr(res), mkexpr(res)));
+      res = tmp;
+   }
+   return res;
+}
+
+
+/* Let |srcV| be a V128 value, and let |imm5| be a lane-and-size
+   selector encoded as shown below.  Return a new V128 holding the
+   selected lane from |srcV| dup'd out to V128, and also return the
+   lane number, log2 of the lane size in bytes, and width-character via
+   *laneNo, *laneSzLg2 and *laneCh respectively.  It may be that imm5
+   is an invalid selector, in which case return
+   IRTemp_INVALID, 0, 0 and '?' respectively.
+
+   imm5 = xxxx1   signifies .b[xxxx]
+        = xxx10   .h[xxx]
+        = xx100   .s[xx]
+        = x1000   .d[x]
+        otherwise invalid     
+*/
+static
+IRTemp handle_DUP_VEC_ELEM ( /*OUT*/UInt* laneNo,
+                             /*OUT*/UInt* laneSzLg2, /*OUT*/HChar* laneCh,
+                             IRExpr* srcV, UInt imm5 )
+{
+   *laneNo    = 0;
+   *laneSzLg2 = 0;
+   *laneCh    = '?';
+
+   if (imm5 & 1) {
+      *laneNo    = (imm5 >> 1) & 15;
+      *laneSzLg2 = 0;
+      *laneCh    = 'b';
+   }
+   else if (imm5 & 2) {
+      *laneNo    = (imm5 >> 2) & 7;
+      *laneSzLg2 = 1;
+      *laneCh    = 'h';
+   }
+   else if (imm5 & 4) {
+      *laneNo    = (imm5 >> 3) & 3;
+      *laneSzLg2 = 2;
+      *laneCh    = 's';
+   }
+   else if (imm5 & 8) {
+      *laneNo    = (imm5 >> 4) & 1;
+      *laneSzLg2 = 3;
+      *laneCh    = 'd';
+   }
+   else {
+      /* invalid */
+      return IRTemp_INVALID;
+   }
+
+   return math_DUP_VEC_ELEM(srcV, *laneSzLg2, *laneNo);
+}
+
+
+/* Clone |imm| to every lane of a V128, with lane size log2 of |size|. */
+static
+IRTemp math_VEC_DUP_IMM ( UInt size, ULong imm )
+{
+   IRType ty  = Ity_INVALID;
+   IRTemp rcS = IRTemp_INVALID;
+   switch (size) {
+      case X01:
+         vassert(imm <= 0xFFFFULL);
+         ty  = Ity_I16;
+         rcS = newTemp(ty); assign(rcS, mkU16( (UShort)imm ));
+         break;
+      case X10:
+         vassert(imm <= 0xFFFFFFFFULL);
+         ty  = Ity_I32;
+         rcS = newTemp(ty); assign(rcS, mkU32( (UInt)imm ));
+         break;
+      case X11:
+         ty  = Ity_I64;
+         rcS = newTemp(ty); assign(rcS, mkU64(imm)); break;
+      default:
+         vassert(0);
+   }
+   IRTemp rcV = math_DUP_TO_V128(rcS, ty);
+   return rcV;
+}
+
+
+/* Let |new64| be a V128 in which only the lower 64 bits are interesting,
+   and the upper can contain any value -- it is ignored.  If |is2| is False,
+   generate IR to put |new64| in the lower half of vector reg |dd| and zero
+   the upper half.  If |is2| is True, generate IR to put |new64| in the upper
+   half of vector reg |dd| and leave the lower half unchanged.  This
+   simulates the behaviour of the "foo/foo2" instructions in which the 
+   destination is half the width of sources, for example addhn/addhn2.
+*/
+static
+void putLO64andZUorPutHI64 ( Bool is2, UInt dd, IRTemp new64 )
+{
+   if (is2) {
+      /* Get the old contents of Vdd, zero the upper half, and replace
+         it with 'x'. */
+      IRTemp t_zero_oldLO = newTempV128();
+      assign(t_zero_oldLO, unop(Iop_ZeroHI64ofV128, getQReg128(dd)));
+      IRTemp t_newHI_zero = newTempV128();
+      assign(t_newHI_zero, binop(Iop_InterleaveLO64x2, mkexpr(new64),
+                                                       mkV128(0x0000)));
+      IRTemp res = newTempV128();
+      assign(res, binop(Iop_OrV128, mkexpr(t_zero_oldLO),
+                                    mkexpr(t_newHI_zero)));
+      putQReg128(dd, mkexpr(res));
+   } else {
+      /* This is simple. */
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(new64)));
+   }
+}
+
+
+/* Compute vector SQABS at lane size |size| for |srcE|, returning
+   the q result in |*qabs| and the normal result in |*nabs|. */
+static
+void math_SQABS ( /*OUT*/IRTemp* qabs, /*OUT*/IRTemp* nabs,
+                  IRExpr* srcE, UInt size )
+{
+      IRTemp src, mask, maskn, nsub, qsub;
+      src = mask = maskn = nsub = qsub = IRTemp_INVALID;
+      newTempsV128_7(&src, &mask, &maskn, &nsub, &qsub, nabs, qabs);
+      assign(src,   srcE);
+      assign(mask,  binop(mkVecCMPGTS(size),  mkV128(0x0000), mkexpr(src)));
+      assign(maskn, unop(Iop_NotV128, mkexpr(mask)));
+      assign(nsub,  binop(mkVecSUB(size),   mkV128(0x0000), mkexpr(src)));
+      assign(qsub,  binop(mkVecQSUBS(size), mkV128(0x0000), mkexpr(src)));
+      assign(*nabs, binop(Iop_OrV128,
+                          binop(Iop_AndV128, mkexpr(nsub), mkexpr(mask)),
+                          binop(Iop_AndV128, mkexpr(src),  mkexpr(maskn))));
+      assign(*qabs, binop(Iop_OrV128,
+                          binop(Iop_AndV128, mkexpr(qsub), mkexpr(mask)),
+                          binop(Iop_AndV128, mkexpr(src),  mkexpr(maskn))));
+}
+
+
+/* Compute vector SQNEG at lane size |size| for |srcE|, returning
+   the q result in |*qneg| and the normal result in |*nneg|. */
+static
+void math_SQNEG ( /*OUT*/IRTemp* qneg, /*OUT*/IRTemp* nneg,
+                  IRExpr* srcE, UInt size )
+{
+      IRTemp src = IRTemp_INVALID;
+      newTempsV128_3(&src, nneg, qneg);
+      assign(src,   srcE);
+      assign(*nneg, binop(mkVecSUB(size),   mkV128(0x0000), mkexpr(src)));
+      assign(*qneg, binop(mkVecQSUBS(size), mkV128(0x0000), mkexpr(src)));
+}
+
+
+/* Zero all except the least significant lane of |srcE|, where |size|
+   indicates the lane size in the usual way. */
+static IRTemp math_ZERO_ALL_EXCEPT_LOWEST_LANE ( UInt size, IRExpr* srcE )
+{
+   vassert(size < 4);
+   IRTemp t = newTempV128();
+   assign(t, unop(mkVecZEROHIxxOFV128(size), srcE));
+   return t;
+}
+
+
+/* Generate IR to compute vector widening MULL from either the lower
+   (is2==False) or upper (is2==True) halves of vecN and vecM.  The
+   widening multiplies are unsigned when isU==True and signed when
+   isU==False.  |size| is the narrow lane size indication.  Optionally,
+   the product may be added to or subtracted from vecD, at the wide lane
+   size.  This happens when |mas| is 'a' (add) or 's' (sub).  When |mas|
+   is 'm' (only multiply) then the accumulate part does not happen, and
+   |vecD| is expected to == IRTemp_INVALID.
+
+   Only size==0 (h_b_b), size==1 (s_h_h) and size==2 (d_s_s) variants
+   are allowed.  The result is returned in a new IRTemp, which is
+   returned in *res. */
+static
+void math_MULL_ACC ( /*OUT*/IRTemp* res,
+                     Bool is2, Bool isU, UInt size, HChar mas,
+                     IRTemp vecN, IRTemp vecM, IRTemp vecD )
+{
+   vassert(res && *res == IRTemp_INVALID);
+   vassert(size <= 2);
+   vassert(mas == 'm' || mas == 'a' || mas == 's');
+   if (mas == 'm') vassert(vecD == IRTemp_INVALID);
+   IROp   mulOp = isU ? mkVecMULLU(size) : mkVecMULLS(size);
+   IROp   accOp = (mas == 'a') ? mkVecADD(size+1) 
+                  : (mas == 's' ? mkVecSUB(size+1)
+                  : Iop_INVALID);
+   IRTemp mul   = math_BINARY_WIDENING_V128(is2, mulOp, 
+                                            mkexpr(vecN), mkexpr(vecM));
+   *res = newTempV128();
+   assign(*res, mas == 'm' ? mkexpr(mul) 
+                           : binop(accOp, mkexpr(vecD), mkexpr(mul)));
+}
+
+
+/* Same as math_MULL_ACC, except the multiply is signed widening,
+   the multiplied value is then doubled, before being added to or
+   subtracted from the accumulated value.  And everything is
+   saturated.  In all cases, saturation residuals are returned
+   via (sat1q, sat1n), and in the accumulate cases,
+   via (sat2q, sat2n) too.  All results are returned in new temporaries.
+   In the no-accumulate case, *sat2q and *sat2n are never instantiated,
+   so the caller can tell this has happened. */
+static
+void math_SQDMULL_ACC ( /*OUT*/IRTemp* res,
+                        /*OUT*/IRTemp* sat1q, /*OUT*/IRTemp* sat1n,
+                        /*OUT*/IRTemp* sat2q, /*OUT*/IRTemp* sat2n,
+                        Bool is2, UInt size, HChar mas,
+                        IRTemp vecN, IRTemp vecM, IRTemp vecD )
+{
+   vassert(size <= 2);
+   vassert(mas == 'm' || mas == 'a' || mas == 's');
+   /* Compute
+         sat1q = vecN.D[is2] *sq vecM.d[is2] *q 2
+         sat1n = vecN.D[is2] *s  vecM.d[is2] *  2
+      IOW take either the low or high halves of vecN and vecM, signed widen,
+      multiply, double that, and signedly saturate.  Also compute the same
+      but without saturation.
+   */
+   vassert(sat2q && *sat2q == IRTemp_INVALID);
+   vassert(sat2n && *sat2n == IRTemp_INVALID);
+   newTempsV128_3(sat1q, sat1n, res);
+   IRTemp tq = math_BINARY_WIDENING_V128(is2, mkVecQDMULLS(size),
+                                         mkexpr(vecN), mkexpr(vecM));
+   IRTemp tn = math_BINARY_WIDENING_V128(is2, mkVecMULLS(size),
+                                         mkexpr(vecN), mkexpr(vecM));
+   assign(*sat1q, mkexpr(tq));
+   assign(*sat1n, binop(mkVecADD(size+1), mkexpr(tn), mkexpr(tn)));
+
+   /* If there is no accumulation, the final result is sat1q,
+      and there's no assignment to sat2q or sat2n. */
+   if (mas == 'm') {
+      assign(*res, mkexpr(*sat1q));
+      return;
+   }
+
+   /* Compute
+         sat2q  = vecD +sq/-sq sat1q
+         sat2n  = vecD +/-     sat1n
+         result = sat2q
+   */
+   newTempsV128_2(sat2q, sat2n);
+   assign(*sat2q, binop(mas == 'a' ? mkVecQADDS(size+1) : mkVecQSUBS(size+1),
+                        mkexpr(vecD), mkexpr(*sat1q)));
+   assign(*sat2n, binop(mas == 'a' ? mkVecADD(size+1) : mkVecSUB(size+1),
+                        mkexpr(vecD), mkexpr(*sat1n)));
+   assign(*res, mkexpr(*sat2q));
+}
+
+
+/* Generate IR for widening signed vector multiplies.  The operands
+   have their lane width signedly widened, and they are then multiplied
+   at the wider width, returning results in two new IRTemps. */
+static
+void math_MULLS ( /*OUT*/IRTemp* resHI, /*OUT*/IRTemp* resLO,
+                  UInt sizeNarrow, IRTemp argL, IRTemp argR )
+{
+   vassert(sizeNarrow <= 2);
+   newTempsV128_2(resHI, resLO);
+   IRTemp argLhi = newTemp(Ity_I64);
+   IRTemp argLlo = newTemp(Ity_I64);
+   IRTemp argRhi = newTemp(Ity_I64);
+   IRTemp argRlo = newTemp(Ity_I64);
+   assign(argLhi, unop(Iop_V128HIto64, mkexpr(argL)));
+   assign(argLlo, unop(Iop_V128to64,   mkexpr(argL)));
+   assign(argRhi, unop(Iop_V128HIto64, mkexpr(argR)));
+   assign(argRlo, unop(Iop_V128to64,   mkexpr(argR)));
+   IROp opMulls = mkVecMULLS(sizeNarrow);
+   assign(*resHI, binop(opMulls, mkexpr(argLhi), mkexpr(argRhi)));
+   assign(*resLO, binop(opMulls, mkexpr(argLlo), mkexpr(argRlo)));
+}
+
+
+/* Generate IR for SQDMULH and SQRDMULH: signedly wideningly multiply,
+   double that, possibly add a rounding constant (R variants), and take
+   the high half. */
+static
+void math_SQDMULH ( /*OUT*/IRTemp* res,
+                    /*OUT*/IRTemp* sat1q, /*OUT*/IRTemp* sat1n,
+                    Bool isR, UInt size, IRTemp vN, IRTemp vM )
+{
+   vassert(size == X01 || size == X10); /* s or h only */
+
+   newTempsV128_3(res, sat1q, sat1n);
+
+   IRTemp mullsHI = IRTemp_INVALID, mullsLO = IRTemp_INVALID;
+   math_MULLS(&mullsHI, &mullsLO, size, vN, vM);
+
+   IRTemp addWide = mkVecADD(size+1);
+
+   if (isR) {
+      assign(*sat1q, binop(mkVecQRDMULHIS(size), mkexpr(vN), mkexpr(vM)));
+
+      Int    rcShift    = size == X01 ? 15 : 31;
+      IRTemp roundConst = math_VEC_DUP_IMM(size+1, 1ULL << rcShift);
+      assign(*sat1n,
+             binop(mkVecCATODDLANES(size),
+                   binop(addWide,
+                         binop(addWide, mkexpr(mullsHI), mkexpr(mullsHI)),
+                         mkexpr(roundConst)),
+                   binop(addWide,
+                         binop(addWide, mkexpr(mullsLO), mkexpr(mullsLO)),
+                         mkexpr(roundConst))));
+   } else {
+      assign(*sat1q, binop(mkVecQDMULHIS(size), mkexpr(vN), mkexpr(vM)));
+
+      assign(*sat1n,
+             binop(mkVecCATODDLANES(size),
+                   binop(addWide, mkexpr(mullsHI), mkexpr(mullsHI)),
+                   binop(addWide, mkexpr(mullsLO), mkexpr(mullsLO))));
+   }
+
+   assign(*res, mkexpr(*sat1q));
+}
+
+
+/* Generate IR for SQSHL, UQSHL, SQSHLU by imm.  Put the result in
+   a new temp in *res, and the Q difference pair in new temps in
+   *qDiff1 and *qDiff2 respectively.  |nm| denotes which of the
+   three operations it is. */
+static
+void math_QSHL_IMM ( /*OUT*/IRTemp* res,
+                     /*OUT*/IRTemp* qDiff1, /*OUT*/IRTemp* qDiff2, 
+                     IRTemp src, UInt size, UInt shift, const HChar* nm )
+{
+   vassert(size <= 3);
+   UInt laneBits = 8 << size;
+   vassert(shift < laneBits);
+   newTempsV128_3(res, qDiff1, qDiff2);
+   IRTemp z128 = newTempV128();
+   assign(z128, mkV128(0x0000));
+
+   /* UQSHL */
+   if (vex_streq(nm, "uqshl")) {
+      IROp qop = mkVecQSHLNSATUU(size);
+      assign(*res, binop(qop, mkexpr(src), mkU8(shift)));
+      if (shift == 0) {
+         /* No shift means no saturation. */
+         assign(*qDiff1, mkexpr(z128));
+         assign(*qDiff2, mkexpr(z128));
+      } else {
+         /* Saturation has occurred if any of the shifted-out bits are
+            nonzero.  We get the shifted-out bits by right-shifting the
+            original value. */
+         UInt rshift = laneBits - shift;
+         vassert(rshift >= 1 && rshift < laneBits);
+         assign(*qDiff1, binop(mkVecSHRN(size), mkexpr(src), mkU8(rshift)));
+         assign(*qDiff2, mkexpr(z128));
+      }
+      return;
+   }
+
+   /* SQSHL */
+   if (vex_streq(nm, "sqshl")) {
+      IROp qop = mkVecQSHLNSATSS(size);
+      assign(*res, binop(qop, mkexpr(src), mkU8(shift)));
+      if (shift == 0) {
+         /* No shift means no saturation. */
+         assign(*qDiff1, mkexpr(z128));
+         assign(*qDiff2, mkexpr(z128));
+      } else {
+         /* Saturation has occurred if any of the shifted-out bits are
+            different from the top bit of the original value. */
+         UInt rshift = laneBits - 1 - shift;
+         vassert(rshift >= 0 && rshift < laneBits-1);
+         /* qDiff1 is the shifted out bits, and the top bit of the original
+            value, preceded by zeroes. */
+         assign(*qDiff1, binop(mkVecSHRN(size), mkexpr(src), mkU8(rshift)));
+         /* qDiff2 is the top bit of the original value, cloned the 
+            correct number of times. */
+         assign(*qDiff2, binop(mkVecSHRN(size),
+                               binop(mkVecSARN(size), mkexpr(src),
+                                                      mkU8(laneBits-1)),
+                               mkU8(rshift)));
+         /* This also succeeds in comparing the top bit of the original
+            value to itself, which is a bit stupid, but not wrong. */
+      }
+      return;
+   }
+
+   /* SQSHLU */
+   if (vex_streq(nm, "sqshlu")) {
+      IROp qop = mkVecQSHLNSATSU(size);
+      assign(*res, binop(qop, mkexpr(src), mkU8(shift)));
+      if (shift == 0) {
+         /* If there's no shift, saturation depends on the top bit
+            of the source. */
+         assign(*qDiff1, binop(mkVecSHRN(size), mkexpr(src), mkU8(laneBits-1)));
+         assign(*qDiff2, mkexpr(z128));
+      } else {
+         /* Saturation has occurred if any of the shifted-out bits are
+            nonzero.  We get the shifted-out bits by right-shifting the
+            original value. */
+         UInt rshift = laneBits - shift;
+         vassert(rshift >= 1 && rshift < laneBits);
+         assign(*qDiff1, binop(mkVecSHRN(size), mkexpr(src), mkU8(rshift)));
+         assign(*qDiff2, mkexpr(z128));
+      }
+      return;
+   }
+
+   vassert(0);
+}
+
+
+/* Generate IR to do SRHADD and URHADD. */
+static
+IRTemp math_RHADD ( UInt size, Bool isU, IRTemp aa, IRTemp bb )
+{
+   /* Generate this:
+      (A >> 1) + (B >> 1) + (((A & 1) + (B & 1) + 1) >> 1)
+   */
+   vassert(size <= 3);
+   IROp opSHR = isU ? mkVecSHRN(size) : mkVecSARN(size);
+   IROp opADD = mkVecADD(size);
+   /* The only tricky bit is to generate the correct vector 1 constant. */
+   const ULong ones64[4]
+      = { 0x0101010101010101ULL, 0x0001000100010001ULL,
+          0x0000000100000001ULL, 0x0000000000000001ULL };
+   IRTemp imm64 = newTemp(Ity_I64);
+   assign(imm64, mkU64(ones64[size]));
+   IRTemp vecOne = newTempV128();
+   assign(vecOne, binop(Iop_64HLtoV128, mkexpr(imm64), mkexpr(imm64)));
+   IRTemp scaOne = newTemp(Ity_I8);
+   assign(scaOne, mkU8(1));
+   IRTemp res = newTempV128();
+   assign(res,
+          binop(opADD,
+                binop(opSHR, mkexpr(aa), mkexpr(scaOne)),
+                binop(opADD,
+                      binop(opSHR, mkexpr(bb), mkexpr(scaOne)),
+                      binop(opSHR,
+                            binop(opADD,
+                                  binop(opADD,
+                                        binop(Iop_AndV128, mkexpr(aa),
+                                                           mkexpr(vecOne)),
+                                        binop(Iop_AndV128, mkexpr(bb),
+                                                           mkexpr(vecOne))
+                                  ),
+                                  mkexpr(vecOne)
+                            ),
+                            mkexpr(scaOne)
+                      )
+                )
+          )
+   );
+   return res;
+}
+
+
+/* QCFLAG tracks the SIMD sticky saturation status.  Update the status
+   thusly: if, after application of |opZHI| to both |qres| and |nres|,
+   they have the same value, leave QCFLAG unchanged.  Otherwise, set it
+   (implicitly) to 1.  |opZHI| may only be one of the Iop_ZeroHIxxofV128
+   operators, or Iop_INVALID, in which case |qres| and |nres| are used
+   unmodified.  The presence |opZHI| means this function can be used to
+   generate QCFLAG update code for both scalar and vector SIMD operations.
+*/
+static
+void updateQCFLAGwithDifferenceZHI ( IRTemp qres, IRTemp nres, IROp opZHI )
+{
+   IRTemp diff      = newTempV128();
+   IRTemp oldQCFLAG = newTempV128();
+   IRTemp newQCFLAG = newTempV128();
+   if (opZHI == Iop_INVALID) {
+      assign(diff, binop(Iop_XorV128, mkexpr(qres), mkexpr(nres)));
+   } else {
+      vassert(opZHI == Iop_ZeroHI64ofV128
+              || opZHI == Iop_ZeroHI96ofV128 || opZHI == Iop_ZeroHI112ofV128);
+      assign(diff, unop(opZHI, binop(Iop_XorV128, mkexpr(qres), mkexpr(nres))));
+   }
+   assign(oldQCFLAG, IRExpr_Get(OFFB_QCFLAG, Ity_V128));
+   assign(newQCFLAG, binop(Iop_OrV128, mkexpr(oldQCFLAG), mkexpr(diff)));
+   stmt(IRStmt_Put(OFFB_QCFLAG, mkexpr(newQCFLAG)));
+}
+
+
+/* A variant of updateQCFLAGwithDifferenceZHI in which |qres| and |nres|
+   are used unmodified, hence suitable for QCFLAG updates for whole-vector
+   operations. */
+static
+void updateQCFLAGwithDifference ( IRTemp qres, IRTemp nres )
+{
+   updateQCFLAGwithDifferenceZHI(qres, nres, Iop_INVALID);
+}
+
+
+/* Generate IR to rearrange two vector values in a way which is useful
+   for doing S/D add-pair etc operations.  There are 3 cases:
+
+   2d:  [m1 m0] [n1 n0]  -->  [m1 n1] [m0 n0]
+
+   4s:  [m3 m2 m1 m0] [n3 n2 n1 n0]  -->  [m3 m1 n3 n1] [m2 m0 n2 n0]
+
+   2s:  [m2 m2 m1 m0] [n3 n2 n1 n0]  -->  [0 0 m1 n1] [0 0 m0 n0]
+
+   The cases are distinguished as follows:
+   isD == True,  bitQ == 1  =>  2d
+   isD == False, bitQ == 1  =>  4s
+   isD == False, bitQ == 0  =>  2s
+*/
+static
+void math_REARRANGE_FOR_FLOATING_PAIRWISE (
+        /*OUT*/IRTemp* rearrL, /*OUT*/IRTemp* rearrR,
+        IRTemp vecM, IRTemp vecN, Bool isD, UInt bitQ
+     )
+{
+   vassert(rearrL && *rearrL == IRTemp_INVALID);
+   vassert(rearrR && *rearrR == IRTemp_INVALID);
+   *rearrL = newTempV128();
+   *rearrR = newTempV128();
+   if (isD) {
+      // 2d case
+      vassert(bitQ == 1);
+      assign(*rearrL, binop(Iop_InterleaveHI64x2, mkexpr(vecM), mkexpr(vecN)));
+      assign(*rearrR, binop(Iop_InterleaveLO64x2, mkexpr(vecM), mkexpr(vecN)));
+   }
+   else if (!isD && bitQ == 1) {
+      // 4s case
+      assign(*rearrL, binop(Iop_CatOddLanes32x4,  mkexpr(vecM), mkexpr(vecN)));
+      assign(*rearrR, binop(Iop_CatEvenLanes32x4, mkexpr(vecM), mkexpr(vecN)));
+   } else {
+      // 2s case
+      vassert(!isD && bitQ == 0);
+      IRTemp m1n1m0n0 = newTempV128();
+      IRTemp m0n0m1n1 = newTempV128();
+      assign(m1n1m0n0, binop(Iop_InterleaveLO32x4,
+                             mkexpr(vecM), mkexpr(vecN)));
+      assign(m0n0m1n1, triop(Iop_SliceV128,
+                             mkexpr(m1n1m0n0), mkexpr(m1n1m0n0), mkU8(8)));
+      assign(*rearrL, unop(Iop_ZeroHI64ofV128, mkexpr(m1n1m0n0)));
+      assign(*rearrR, unop(Iop_ZeroHI64ofV128, mkexpr(m0n0m1n1)));
+   }
+}
+
+
+/* Returns 2.0 ^ (-n) for n in 1 .. 64 */
+static Double two_to_the_minus ( Int n )
+{
+   if (n == 1) return 0.5;
+   vassert(n >= 2 && n <= 64);
+   Int half = n / 2;
+   return two_to_the_minus(half) * two_to_the_minus(n - half);
+}
+
+
+/* Returns 2.0 ^ n for n in 1 .. 64 */
+static Double two_to_the_plus ( Int n )
+{
+   if (n == 1) return 2.0;
+   vassert(n >= 2 && n <= 64);
+   Int half = n / 2;
+   return two_to_the_plus(half) * two_to_the_plus(n - half);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- SIMD and FP instructions                             ---*/
+/*------------------------------------------------------------*/
+
+static
+Bool dis_AdvSIMD_EXT(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  29     23  21 20 15 14   10 9 4
+      0 q 101110 op2 0  m  0  imm4 0  n d
+      Decode fields: op2
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(29,24) != BITS6(1,0,1,1,1,0)
+       || INSN(21,21) != 0 || INSN(15,15) != 0 || INSN(10,10) != 0) {
+      return False;
+   }
+   UInt bitQ = INSN(30,30);
+   UInt op2  = INSN(23,22);
+   UInt mm   = INSN(20,16);
+   UInt imm4 = INSN(14,11);
+   UInt nn   = INSN(9,5);
+   UInt dd   = INSN(4,0);
+
+   if (op2 == BITS2(0,0)) {
+      /* -------- 00: EXT 16b_16b_16b, 8b_8b_8b -------- */
+      IRTemp sHi = newTempV128();
+      IRTemp sLo = newTempV128();
+      IRTemp res = newTempV128();
+      assign(sHi, getQReg128(mm));
+      assign(sLo, getQReg128(nn));
+      if (bitQ == 1) {
+         if (imm4 == 0) {
+            assign(res, mkexpr(sLo));
+         } else {
+            vassert(imm4 >= 1 && imm4 <= 15);
+            assign(res, triop(Iop_SliceV128,
+                              mkexpr(sHi), mkexpr(sLo), mkU8(imm4)));
+         }
+         putQReg128(dd, mkexpr(res));
+         DIP("ext v%u.16b, v%u.16b, v%u.16b, #%u\n", dd, nn, mm, imm4);
+      } else {
+         if (imm4 >= 8) return False;
+         if (imm4 == 0) {
+            assign(res, mkexpr(sLo));
+         } else {
+            vassert(imm4 >= 1 && imm4 <= 7);
+            IRTemp hi64lo64 = newTempV128();
+            assign(hi64lo64, binop(Iop_InterleaveLO64x2,
+                                   mkexpr(sHi), mkexpr(sLo)));
+            assign(res, triop(Iop_SliceV128,
+                              mkexpr(hi64lo64), mkexpr(hi64lo64), mkU8(imm4)));
+         }
+         putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+         DIP("ext v%u.8b, v%u.8b, v%u.8b, #%u\n", dd, nn, mm, imm4);
+      }
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_TBL_TBX(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  29     23  21 20 15 14  12 11 9 4
+      0 q 001110 op2 0  m  0  len op 00 n d
+      Decode fields: op2,len,op
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(29,24) != BITS6(0,0,1,1,1,0)
+       || INSN(21,21) != 0
+       || INSN(15,15) != 0
+       || INSN(11,10) != BITS2(0,0)) {
+      return False;
+   }
+   UInt bitQ  = INSN(30,30);
+   UInt op2   = INSN(23,22);
+   UInt mm    = INSN(20,16);
+   UInt len   = INSN(14,13);
+   UInt bitOP = INSN(12,12);
+   UInt nn    = INSN(9,5);
+   UInt dd    = INSN(4,0);
+
+   if (op2 == X00) {
+      /* -------- 00,xx,0 TBL, xx register table -------- */
+      /* -------- 00,xx,1 TBX, xx register table -------- */
+      /* 31  28        20 15 14  12  9 4
+         0q0 01110 000 m  0  len 000 n d  TBL Vd.Ta, {Vn .. V(n+len)%32}, Vm.Ta
+         0q0 01110 000 m  0  len 100 n d  TBX Vd.Ta, {Vn .. V(n+len)%32}, Vm.Ta
+         where Ta = 16b(q=1) or 8b(q=0)
+      */
+      Bool isTBX = bitOP == 1;
+      /* The out-of-range values to use. */
+      IRTemp oor_values = newTempV128();
+      assign(oor_values, isTBX ? getQReg128(dd) : mkV128(0));
+      /* src value */
+      IRTemp src = newTempV128();
+      assign(src, getQReg128(mm));
+      /* The table values */
+      IRTemp tab[4];
+      UInt   i;
+      for (i = 0; i <= len; i++) {
+         vassert(i < 4);
+         tab[i] = newTempV128();
+         assign(tab[i], getQReg128((nn + i) % 32));
+      }
+      IRTemp res = math_TBL_TBX(tab, len, src, oor_values);
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* Ta = bitQ ==1 ? "16b" : "8b";
+      const HChar* nm = isTBX ? "tbx" : "tbl";
+      DIP("%s %s.%s, {v%d.16b .. v%d.16b}, %s.%s\n",
+          nm, nameQReg128(dd), Ta, nn, (nn + len) % 32, nameQReg128(mm), Ta);
+      return True;
+   }
+
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_ZIP_UZP_TRN(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  29     23   21 20 15 14     11 9 4
+      0 q 001110 size 0  m  0  opcode 10 n d
+      Decode fields: opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(29,24) != BITS6(0,0,1,1,1,0)
+       || INSN(21,21) != 0 || INSN(15,15) != 0 || INSN(11,10) != BITS2(1,0)) {
+      return False;
+   }
+   UInt bitQ   = INSN(30,30);
+   UInt size   = INSN(23,22);
+   UInt mm     = INSN(20,16);
+   UInt opcode = INSN(14,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+
+   if (opcode == BITS3(0,0,1) || opcode == BITS3(1,0,1)) {
+      /* -------- 001 UZP1 std7_std7_std7 -------- */
+      /* -------- 101 UZP2 std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isUZP1 = opcode == BITS3(0,0,1);
+      IROp   op     = isUZP1 ? mkVecCATEVENLANES(size)
+                             : mkVecCATODDLANES(size);
+      IRTemp preL = newTempV128();
+      IRTemp preR = newTempV128();
+      IRTemp res  = newTempV128();
+      if (bitQ == 0) {
+         assign(preL, binop(Iop_InterleaveLO64x2, getQReg128(mm),
+                                                  getQReg128(nn)));
+         assign(preR, mkexpr(preL));
+      } else {
+         assign(preL, getQReg128(mm));
+         assign(preR, getQReg128(nn));
+      }
+      assign(res, binop(op, mkexpr(preL), mkexpr(preR)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isUZP1 ? "uzp1" : "uzp2";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS3(0,1,0) || opcode == BITS3(1,1,0)) {
+      /* -------- 010 TRN1 std7_std7_std7 -------- */
+      /* -------- 110 TRN2 std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isTRN1 = opcode == BITS3(0,1,0);
+      IROp   op1    = isTRN1 ? mkVecCATEVENLANES(size)
+                             : mkVecCATODDLANES(size);
+      IROp op2 = mkVecINTERLEAVEHI(size);
+      IRTemp srcM = newTempV128();
+      IRTemp srcN = newTempV128();
+      IRTemp res  = newTempV128();
+      assign(srcM, getQReg128(mm));
+      assign(srcN, getQReg128(nn));
+      assign(res, binop(op2, binop(op1, mkexpr(srcM), mkexpr(srcM)),
+                             binop(op1, mkexpr(srcN), mkexpr(srcN))));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isTRN1 ? "trn1" : "trn2";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS3(0,1,1) || opcode == BITS3(1,1,1)) {
+      /* -------- 011 ZIP1 std7_std7_std7 -------- */
+      /* -------- 111 ZIP2 std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isZIP1 = opcode == BITS3(0,1,1);
+      IROp   op     = isZIP1 ? mkVecINTERLEAVELO(size)
+                             : mkVecINTERLEAVEHI(size);
+      IRTemp preL = newTempV128();
+      IRTemp preR = newTempV128();
+      IRTemp res  = newTempV128();
+      if (bitQ == 0 && !isZIP1) {
+         IRTemp z128 = newTempV128();
+         assign(z128, mkV128(0x0000));
+         // preL = Vm shifted left 32 bits
+         // preR = Vn shifted left 32 bits
+         assign(preL, triop(Iop_SliceV128,
+                            getQReg128(mm), mkexpr(z128), mkU8(12)));
+         assign(preR, triop(Iop_SliceV128,
+                            getQReg128(nn), mkexpr(z128), mkU8(12)));
+
+      } else {
+         assign(preL, getQReg128(mm));
+         assign(preR, getQReg128(nn));
+      }
+      assign(res, binop(op, mkexpr(preL), mkexpr(preR)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isZIP1 ? "zip1" : "zip2";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_across_lanes(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31    28    23   21    16     11 9 4
+      0 q u 01110 size 11000 opcode 10 n d
+      Decode fields: u,size,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,24) != BITS5(0,1,1,1,0)
+       || INSN(21,17) != BITS5(1,1,0,0,0) || INSN(11,10) != BITS2(1,0)) {
+      return False;
+   }
+   UInt bitQ   = INSN(30,30);
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt opcode = INSN(16,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+
+   if (opcode == BITS5(0,0,0,1,1)) {
+      /* -------- 0,xx,00011 SADDLV -------- */
+      /* -------- 1,xx,00011 UADDLV -------- */
+      /* size is the narrow size */
+      if (size == X11 || (size == X10 && bitQ == 0)) return False;
+      Bool   isU = bitU == 1;
+      IRTemp src = newTempV128();
+      assign(src, getQReg128(nn));
+      /* The basic plan is to widen the lower half, and if Q = 1,
+         the upper half too.  Add them together (if Q = 1), and in
+         either case fold with add at twice the lane width.
+      */
+      IRExpr* widened
+         = mkexpr(math_WIDEN_LO_OR_HI_LANES(
+                     isU, False/*!fromUpperHalf*/, size, mkexpr(src)));
+      if (bitQ == 1) {
+         widened
+            = binop(mkVecADD(size+1),
+                    widened,
+                    mkexpr(math_WIDEN_LO_OR_HI_LANES(
+                              isU, True/*fromUpperHalf*/, size, mkexpr(src)))
+              );
+      }
+      /* Now fold. */
+      IRTemp tWi = newTempV128();
+      assign(tWi, widened);
+      IRTemp res = math_FOLDV(tWi, mkVecADD(size+1));
+      putQReg128(dd, mkexpr(res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      const HChar  ch  = "bhsd"[size];
+      DIP("%s %s.%c, %s.%s\n", isU ? "uaddlv" : "saddlv",
+          nameQReg128(dd), ch, nameQReg128(nn), arr);
+      return True;
+   }
+
+   UInt ix = 0;
+   /**/ if (opcode == BITS5(0,1,0,1,0)) { ix = bitU == 0 ? 1 : 2; }
+   else if (opcode == BITS5(1,1,0,1,0)) { ix = bitU == 0 ? 3 : 4; }
+   else if (opcode == BITS5(1,1,0,1,1) && bitU == 0) { ix = 5; }
+   /**/   
+   if (ix != 0) {
+      /* -------- 0,xx,01010: SMAXV -------- (1) */
+      /* -------- 1,xx,01010: UMAXV -------- (2) */
+      /* -------- 0,xx,11010: SMINV -------- (3) */
+      /* -------- 1,xx,11010: UMINV -------- (4) */
+      /* -------- 0,xx,11011: ADDV  -------- (5) */
+      vassert(ix >= 1 && ix <= 5);
+      if (size == X11) return False; // 1d,2d cases not allowed
+      if (size == X10 && bitQ == 0) return False; // 2s case not allowed
+      const IROp opMAXS[3]
+         = { Iop_Max8Sx16, Iop_Max16Sx8, Iop_Max32Sx4 };
+      const IROp opMAXU[3]
+         = { Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4 };
+      const IROp opMINS[3]
+         = { Iop_Min8Sx16, Iop_Min16Sx8, Iop_Min32Sx4 };
+      const IROp opMINU[3]
+         = { Iop_Min8Ux16, Iop_Min16Ux8, Iop_Min32Ux4 };
+      const IROp opADD[3]
+         = { Iop_Add8x16,  Iop_Add16x8,  Iop_Add32x4 };
+      vassert(size < 3);
+      IROp op = Iop_INVALID;
+      const HChar* nm = NULL;
+      switch (ix) {
+         case 1: op = opMAXS[size]; nm = "smaxv"; break;
+         case 2: op = opMAXU[size]; nm = "umaxv"; break;
+         case 3: op = opMINS[size]; nm = "sminv"; break;
+         case 4: op = opMINU[size]; nm = "uminv"; break;
+         case 5: op = opADD[size];  nm = "addv";  break;
+         default: vassert(0);
+      }
+      vassert(op != Iop_INVALID && nm != NULL);
+      IRTemp tN1 = newTempV128();
+      assign(tN1, getQReg128(nn));
+      /* If Q == 0, we're just folding lanes in the lower half of
+         the value.  In which case, copy the lower half of the
+         source into the upper half, so we can then treat it the
+         same as the full width case.  Except for the addition case,
+         in which we have to zero out the upper half. */
+      IRTemp tN2 = newTempV128();
+      assign(tN2, bitQ == 0 
+                     ? (ix == 5 ? unop(Iop_ZeroHI64ofV128, mkexpr(tN1))
+                                : mk_CatEvenLanes64x2(tN1,tN1))
+                     : mkexpr(tN1));
+      IRTemp res = math_FOLDV(tN2, op);
+      if (res == IRTemp_INVALID)
+         return False; /* means math_FOLDV
+                          doesn't handle this case yet */
+      putQReg128(dd, mkexpr(res));
+      const IRType tys[3] = { Ity_I8, Ity_I16, Ity_I32 };
+      IRType laneTy = tys[size];
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s, %s.%s\n", nm,
+          nameQRegLO(dd, laneTy), nameQReg128(nn), arr);
+      return True;
+   }
+
+   if ((size == X00 || size == X10)
+       && (opcode == BITS5(0,1,1,0,0) || opcode == BITS5(0,1,1,1,1))) {
+      /* -------- 0,00,01100: FMAXMNV s_4s -------- */
+      /* -------- 0,10,01100: FMINMNV s_4s -------- */
+      /* -------- 1,00,01111: FMAXV   s_4s -------- */
+      /* -------- 1,10,01111: FMINV   s_4s -------- */
+      /* FMAXNM, FMINNM: FIXME -- KLUDGED */
+      if (bitQ == 0) return False; // Only 4s is allowed
+      Bool   isMIN = (size & 2) == 2;
+      Bool   isNM  = opcode == BITS5(0,1,1,0,0);
+      IROp   opMXX = (isMIN ? mkVecMINF : mkVecMAXF)(2);
+      IRTemp src = newTempV128();
+      assign(src, getQReg128(nn));
+      IRTemp res = math_FOLDV(src, opMXX);
+      putQReg128(dd, mkexpr(res));
+      DIP("%s%sv s%u, %u.4s\n",
+          isMIN ? "fmin" : "fmax", isNM ? "nm" : "", dd, nn);
+      return True;
+   }
+
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_copy(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31     28       20   15 14   10 9 4
+      0 q op 01110000 imm5 0  imm4 1  n d
+      Decode fields: q,op,imm4
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,21) != BITS8(0,1,1,1,0,0,0,0)
+       || INSN(15,15) != 0 || INSN(10,10) != 1) {
+      return False;
+   }
+   UInt bitQ  = INSN(30,30);
+   UInt bitOP = INSN(29,29);
+   UInt imm5  = INSN(20,16);
+   UInt imm4  = INSN(14,11);
+   UInt nn    = INSN(9,5);
+   UInt dd    = INSN(4,0);
+
+   /* -------- x,0,0000: DUP (element, vector) -------- */
+   /* 31  28       20   15     9 4
+      0q0 01110000 imm5 000001 n d  DUP Vd.T, Vn.Ts[index]
+   */
+   if (bitOP == 0 && imm4 == BITS4(0,0,0,0)) {
+      UInt   laneNo    = 0;
+      UInt   laneSzLg2 = 0;
+      HChar  laneCh    = '?';
+      IRTemp res       = handle_DUP_VEC_ELEM(&laneNo, &laneSzLg2, &laneCh,
+                                             getQReg128(nn), imm5);
+      if (res == IRTemp_INVALID)
+         return False;
+      if (bitQ == 0 && laneSzLg2 == X11)
+         return False; /* .1d case */
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arT = nameArr_Q_SZ(bitQ, laneSzLg2);
+      DIP("dup %s.%s, %s.%c[%u]\n",
+           nameQReg128(dd), arT, nameQReg128(nn), laneCh, laneNo);
+      return True;
+   }
+
+   /* -------- x,0,0001: DUP (general, vector) -------- */
+   /* 31  28       20   15       9 4
+      0q0 01110000 imm5 0 0001 1 n d  DUP Vd.T, Rn
+      Q=0 writes 64, Q=1 writes 128
+      imm5: xxxx1  8B(q=0)      or 16b(q=1),     R=W
+            xxx10  4H(q=0)      or 8H(q=1),      R=W
+            xx100  2S(q=0)      or 4S(q=1),      R=W
+            x1000  Invalid(q=0) or 2D(q=1),      R=X
+            x0000  Invalid(q=0) or Invalid(q=1)
+      Require op=0, imm4=0001
+   */
+   if (bitOP == 0 && imm4 == BITS4(0,0,0,1)) {
+      Bool   isQ = bitQ == 1;
+      IRTemp w0  = newTemp(Ity_I64);
+      const HChar* arT = "??";
+      IRType laneTy = Ity_INVALID;
+      if (imm5 & 1) {
+         arT    = isQ ? "16b" : "8b";
+         laneTy = Ity_I8;
+         assign(w0, unop(Iop_8Uto64, unop(Iop_64to8, getIReg64orZR(nn))));
+      }
+      else if (imm5 & 2) {
+         arT    = isQ ? "8h" : "4h";
+         laneTy = Ity_I16;
+         assign(w0, unop(Iop_16Uto64, unop(Iop_64to16, getIReg64orZR(nn))));
+      }
+      else if (imm5 & 4) {
+         arT    = isQ ? "4s" : "2s";
+         laneTy = Ity_I32;
+         assign(w0, unop(Iop_32Uto64, unop(Iop_64to32, getIReg64orZR(nn))));
+      }
+      else if ((imm5 & 8) && isQ) {
+         arT    = "2d";
+         laneTy = Ity_I64;
+         assign(w0, getIReg64orZR(nn));
+      }
+      else {
+         /* invalid; leave laneTy unchanged. */
+      }
+      /* */
+      if (laneTy != Ity_INVALID) {
+         IRTemp w1 = math_DUP_TO_64(w0, laneTy);
+         putQReg128(dd, binop(Iop_64HLtoV128,
+                              isQ ? mkexpr(w1) : mkU64(0), mkexpr(w1)));
+         DIP("dup %s.%s, %s\n",
+             nameQReg128(dd), arT, nameIRegOrZR(laneTy == Ity_I64, nn));
+         return True;
+      }
+      /* invalid */
+      return False;
+   }
+
+   /* -------- 1,0,0011: INS (general) -------- */
+   /* 31  28       20   15     9 4
+      010 01110000 imm5 000111 n d  INS Vd.Ts[ix], Rn
+      where Ts,ix = case imm5 of xxxx1 -> B, xxxx
+                                 xxx10 -> H, xxx
+                                 xx100 -> S, xx
+                                 x1000 -> D, x
+   */
+   if (bitQ == 1 && bitOP == 0 && imm4 == BITS4(0,0,1,1)) {
+      HChar   ts     = '?';
+      UInt    laneNo = 16;
+      IRExpr* src    = NULL;
+      if (imm5 & 1) {
+         src    = unop(Iop_64to8, getIReg64orZR(nn));
+         laneNo = (imm5 >> 1) & 15;
+         ts     = 'b';
+      }
+      else if (imm5 & 2) {
+         src    = unop(Iop_64to16, getIReg64orZR(nn));
+         laneNo = (imm5 >> 2) & 7;
+         ts     = 'h';
+      }
+      else if (imm5 & 4) {
+         src    = unop(Iop_64to32, getIReg64orZR(nn));
+         laneNo = (imm5 >> 3) & 3;
+         ts     = 's';
+      }
+      else if (imm5 & 8) {
+         src    = getIReg64orZR(nn);
+         laneNo = (imm5 >> 4) & 1;
+         ts     = 'd';
+      }
+      /* */
+      if (src) {
+         vassert(laneNo < 16);
+         putQRegLane(dd, laneNo, src);
+         DIP("ins %s.%c[%u], %s\n",
+             nameQReg128(dd), ts, laneNo, nameIReg64orZR(nn));
+         return True;
+      }
+      /* invalid */
+      return False;
+   }
+
+   /* -------- x,0,0101: SMOV -------- */
+   /* -------- x,0,0111: UMOV -------- */
+   /* 31  28        20   15     9 4
+      0q0 01110 000 imm5 001111 n d  UMOV Xd/Wd, Vn.Ts[index]
+      0q0 01110 000 imm5 001011 n d  SMOV Xd/Wd, Vn.Ts[index]
+      dest is Xd when q==1, Wd when q==0
+      UMOV:
+         Ts,index,ops = case q:imm5 of
+                          0:xxxx1 -> B, xxxx, 8Uto64
+                          1:xxxx1 -> invalid
+                          0:xxx10 -> H, xxx,  16Uto64
+                          1:xxx10 -> invalid
+                          0:xx100 -> S, xx,   32Uto64
+                          1:xx100 -> invalid
+                          1:x1000 -> D, x,    copy64
+                          other   -> invalid
+      SMOV:
+         Ts,index,ops = case q:imm5 of
+                          0:xxxx1 -> B, xxxx, (32Uto64 . 8Sto32)
+                          1:xxxx1 -> B, xxxx, 8Sto64
+                          0:xxx10 -> H, xxx,  (32Uto64 . 16Sto32)
+                          1:xxx10 -> H, xxx,  16Sto64
+                          0:xx100 -> invalid
+                          1:xx100 -> S, xx,   32Sto64
+                          1:x1000 -> invalid
+                          other   -> invalid
+   */
+   if (bitOP == 0 && (imm4 == BITS4(0,1,0,1) || imm4 == BITS4(0,1,1,1))) {
+      Bool isU  = (imm4 & 2) == 2;
+      const HChar* arTs = "??";
+      UInt    laneNo = 16; /* invalid */
+      // Setting 'res' to non-NULL determines valid/invalid
+      IRExpr* res    = NULL;
+      if (!bitQ && (imm5 & 1)) { // 0:xxxx1
+         laneNo = (imm5 >> 1) & 15;
+         IRExpr* lane = getQRegLane(nn, laneNo, Ity_I8);
+         res = isU ? unop(Iop_8Uto64, lane)
+                   : unop(Iop_32Uto64, unop(Iop_8Sto32, lane));
+         arTs = "b";
+      }
+      else if (bitQ && (imm5 & 1)) { // 1:xxxx1
+         laneNo = (imm5 >> 1) & 15;
+         IRExpr* lane = getQRegLane(nn, laneNo, Ity_I8);
+         res = isU ? NULL
+                   : unop(Iop_8Sto64, lane);
+         arTs = "b";
+      }
+      else if (!bitQ && (imm5 & 2)) { // 0:xxx10
+         laneNo = (imm5 >> 2) & 7;
+         IRExpr* lane = getQRegLane(nn, laneNo, Ity_I16);
+         res = isU ? unop(Iop_16Uto64, lane)
+                   : unop(Iop_32Uto64, unop(Iop_16Sto32, lane));
+         arTs = "h";
+      }
+      else if (bitQ && (imm5 & 2)) { // 1:xxx10
+         laneNo = (imm5 >> 2) & 7;
+         IRExpr* lane = getQRegLane(nn, laneNo, Ity_I16);
+         res = isU ? NULL
+                   : unop(Iop_16Sto64, lane);
+         arTs = "h";
+      }
+      else if (!bitQ && (imm5 & 4)) { // 0:xx100
+         laneNo = (imm5 >> 3) & 3;
+         IRExpr* lane = getQRegLane(nn, laneNo, Ity_I32);
+         res = isU ? unop(Iop_32Uto64, lane)
+                   : NULL;
+         arTs = "s";
+      }
+      else if (bitQ && (imm5 & 4)) { // 1:xxx10
+         laneNo = (imm5 >> 3) & 3;
+         IRExpr* lane = getQRegLane(nn, laneNo, Ity_I32);
+         res = isU ? NULL
+                   : unop(Iop_32Sto64, lane);
+         arTs = "s";
+      }
+      else if (bitQ && (imm5 & 8)) { // 1:x1000
+         laneNo = (imm5 >> 4) & 1;
+         IRExpr* lane = getQRegLane(nn, laneNo, Ity_I64);
+         res = isU ? lane
+                   : NULL;
+         arTs = "d";
+      }
+      /* */
+      if (res) {
+         vassert(laneNo < 16);
+         putIReg64orZR(dd, res);
+         DIP("%cmov %s, %s.%s[%u]\n", isU ? 'u' : 's',
+             nameIRegOrZR(bitQ == 1, dd),
+             nameQReg128(nn), arTs, laneNo);
+         return True;
+      }
+      /* invalid */
+      return False;
+   }
+
+   /* -------- 1,1,xxxx: INS (element) -------- */
+   /* 31  28       20     14   9 4
+      011 01110000 imm5 0 imm4 n d  INS Vd.Ts[ix1], Vn.Ts[ix2]
+      where Ts,ix1,ix2
+               = case imm5 of xxxx1 -> B, xxxx, imm4[3:0]
+                              xxx10 -> H, xxx,  imm4[3:1]
+                              xx100 -> S, xx,   imm4[3:2]
+                              x1000 -> D, x,    imm4[3:3]
+   */
+   if (bitQ == 1 && bitOP == 1) {
+      HChar   ts  = '?';
+      IRType  ity = Ity_INVALID;
+      UInt    ix1 = 16;
+      UInt    ix2 = 16;
+      if (imm5 & 1) {
+         ts  = 'b';
+         ity = Ity_I8;
+         ix1 = (imm5 >> 1) & 15;
+         ix2 = (imm4 >> 0) & 15;
+      }
+      else if (imm5 & 2) {
+         ts  = 'h';
+         ity = Ity_I16;
+         ix1 = (imm5 >> 2) & 7;
+         ix2 = (imm4 >> 1) & 7;
+      }
+      else if (imm5 & 4) {
+         ts  = 's';
+         ity = Ity_I32;
+         ix1 = (imm5 >> 3) & 3;
+         ix2 = (imm4 >> 2) & 3;
+      }
+      else if (imm5 & 8) {
+         ts  = 'd';
+         ity = Ity_I64;
+         ix1 = (imm5 >> 4) & 1;
+         ix2 = (imm4 >> 3) & 1;
+      }
+      /* */
+      if (ity != Ity_INVALID) {
+         vassert(ix1 < 16);
+         vassert(ix2 < 16);
+         putQRegLane(dd, ix1, getQRegLane(nn, ix2, ity));
+         DIP("ins %s.%c[%u], %s.%c[%u]\n",
+             nameQReg128(dd), ts, ix1, nameQReg128(nn), ts, ix2);
+         return True;
+      }
+      /* invalid */
+      return False;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_modified_immediate(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31    28          18  15    11 9     4
+      0q op 01111 00000 abc cmode 01 defgh d
+      Decode fields: q,op,cmode
+      Bit 11 is really "o2", but it is always zero.
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,19) != BITS10(0,1,1,1,1,0,0,0,0,0)
+       || INSN(11,10) != BITS2(0,1)) {
+      return False;
+   }
+   UInt bitQ     = INSN(30,30);
+   UInt bitOP    = INSN(29,29);
+   UInt cmode    = INSN(15,12);
+   UInt abcdefgh = (INSN(18,16) << 5) | INSN(9,5);
+   UInt dd       = INSN(4,0);
+
+   ULong imm64lo  = 0;
+   UInt  op_cmode = (bitOP << 4) | cmode;
+   Bool  ok       = False;
+   Bool  isORR    = False;
+   Bool  isBIC    = False;
+   Bool  isMOV    = False;
+   Bool  isMVN    = False;
+   Bool  isFMOV   = False;
+   switch (op_cmode) {
+      /* -------- x,0,0000 MOVI 32-bit shifted imm -------- */
+      /* -------- x,0,0010 MOVI 32-bit shifted imm -------- */
+      /* -------- x,0,0100 MOVI 32-bit shifted imm -------- */
+      /* -------- x,0,0110 MOVI 32-bit shifted imm -------- */
+      case BITS5(0,0,0,0,0): case BITS5(0,0,0,1,0):
+      case BITS5(0,0,1,0,0): case BITS5(0,0,1,1,0): // 0:0xx0
+         ok = True; isMOV = True; break;
+
+      /* -------- x,0,0001 ORR (vector, immediate) 32-bit -------- */
+      /* -------- x,0,0011 ORR (vector, immediate) 32-bit -------- */
+      /* -------- x,0,0101 ORR (vector, immediate) 32-bit -------- */
+      /* -------- x,0,0111 ORR (vector, immediate) 32-bit -------- */
+      case BITS5(0,0,0,0,1): case BITS5(0,0,0,1,1):
+      case BITS5(0,0,1,0,1): case BITS5(0,0,1,1,1): // 0:0xx1
+         ok = True; isORR = True; break;
+
+      /* -------- x,0,1000 MOVI 16-bit shifted imm -------- */
+      /* -------- x,0,1010 MOVI 16-bit shifted imm -------- */
+      case BITS5(0,1,0,0,0): case BITS5(0,1,0,1,0): // 0:10x0
+         ok = True; isMOV = True; break;
+
+      /* -------- x,0,1001 ORR (vector, immediate) 16-bit -------- */
+      /* -------- x,0,1011 ORR (vector, immediate) 16-bit -------- */
+      case BITS5(0,1,0,0,1): case BITS5(0,1,0,1,1): // 0:10x1
+         ok = True; isORR = True; break;
+
+      /* -------- x,0,1100 MOVI 32-bit shifting ones -------- */
+      /* -------- x,0,1101 MOVI 32-bit shifting ones -------- */
+      case BITS5(0,1,1,0,0): case BITS5(0,1,1,0,1): // 0:110x
+         ok = True; isMOV = True; break;
+
+      /* -------- x,0,1110 MOVI 8-bit -------- */
+      case BITS5(0,1,1,1,0):
+         ok = True; isMOV = True; break;
+
+      /* -------- x,0,1111 FMOV (vector, immediate, F32) -------- */
+      case BITS5(0,1,1,1,1): // 0:1111
+         ok = True; isFMOV = True; break;
+
+      /* -------- x,1,0000 MVNI 32-bit shifted imm -------- */
+      /* -------- x,1,0010 MVNI 32-bit shifted imm  -------- */
+      /* -------- x,1,0100 MVNI 32-bit shifted imm  -------- */
+      /* -------- x,1,0110 MVNI 32-bit shifted imm  -------- */
+      case BITS5(1,0,0,0,0): case BITS5(1,0,0,1,0):
+      case BITS5(1,0,1,0,0): case BITS5(1,0,1,1,0): // 1:0xx0
+         ok = True; isMVN = True; break;
+
+      /* -------- x,1,0001 BIC (vector, immediate) 32-bit -------- */
+      /* -------- x,1,0011 BIC (vector, immediate) 32-bit -------- */
+      /* -------- x,1,0101 BIC (vector, immediate) 32-bit -------- */
+      /* -------- x,1,0111 BIC (vector, immediate) 32-bit -------- */
+      case BITS5(1,0,0,0,1): case BITS5(1,0,0,1,1):
+      case BITS5(1,0,1,0,1): case BITS5(1,0,1,1,1): // 1:0xx1
+         ok = True; isBIC = True; break;
+
+      /* -------- x,1,1000 MVNI 16-bit shifted imm -------- */
+      /* -------- x,1,1010 MVNI 16-bit shifted imm -------- */
+      case BITS5(1,1,0,0,0): case BITS5(1,1,0,1,0): // 1:10x0
+         ok = True; isMVN = True; break;
+
+      /* -------- x,1,1001 BIC (vector, immediate) 16-bit -------- */
+      /* -------- x,1,1011 BIC (vector, immediate) 16-bit -------- */
+      case BITS5(1,1,0,0,1): case BITS5(1,1,0,1,1): // 1:10x1
+         ok = True; isBIC = True; break;
+
+      /* -------- x,1,1100 MVNI 32-bit shifting ones -------- */
+      /* -------- x,1,1101 MVNI 32-bit shifting ones -------- */
+      case BITS5(1,1,1,0,0): case BITS5(1,1,1,0,1): // 1:110x
+         ok = True; isMVN = True; break;
+
+      /* -------- 0,1,1110 MOVI 64-bit scalar -------- */
+      /* -------- 1,1,1110 MOVI 64-bit vector -------- */
+      case BITS5(1,1,1,1,0):
+         ok = True; isMOV = True; break;
+
+      /* -------- 1,1,1111 FMOV (vector, immediate, F64) -------- */
+      case BITS5(1,1,1,1,1): // 1:1111
+         ok = bitQ == 1; isFMOV = True; break;
+
+      default:
+        break;
+   }
+   if (ok) {
+      vassert(1 == (isMOV ? 1 : 0) + (isMVN ? 1 : 0)
+                   + (isORR ? 1 : 0) + (isBIC ? 1 : 0) + (isFMOV ? 1 : 0));
+      ok = AdvSIMDExpandImm(&imm64lo, bitOP, cmode, abcdefgh);
+   }
+   if (ok) {
+      if (isORR || isBIC) {
+         ULong inv
+            = isORR ? 0ULL : ~0ULL;
+         IRExpr* immV128
+            = binop(Iop_64HLtoV128, mkU64(inv ^ imm64lo), mkU64(inv ^ imm64lo));
+         IRExpr* res
+            = binop(isORR ? Iop_OrV128 : Iop_AndV128, getQReg128(dd), immV128);
+         const HChar* nm = isORR ? "orr" : "bic";
+         if (bitQ == 0) {
+            putQReg128(dd, unop(Iop_ZeroHI64ofV128, res));
+            DIP("%s %s.1d, %016llx\n", nm, nameQReg128(dd), imm64lo);
+         } else {
+            putQReg128(dd, res);
+            DIP("%s %s.2d, #0x%016llx'%016llx\n", nm,
+                nameQReg128(dd), imm64lo, imm64lo);
+         }
+      } 
+      else if (isMOV || isMVN || isFMOV) {
+         if (isMVN) imm64lo = ~imm64lo;
+         ULong   imm64hi = bitQ == 0  ? 0  :  imm64lo;
+         IRExpr* immV128 = binop(Iop_64HLtoV128, mkU64(imm64hi),
+                                                 mkU64(imm64lo));
+         putQReg128(dd, immV128);
+         DIP("mov %s, #0x%016llx'%016llx\n", nameQReg128(dd), imm64hi, imm64lo);
+      }
+      return True;
+   }
+   /* else fall through */
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_scalar_copy(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31    28       20   15 14   10 9 4
+      01 op 11110000 imm5 0  imm4 1  n d
+      Decode fields: op,imm4
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,30) != BITS2(0,1)
+       || INSN(28,21) != BITS8(1,1,1,1,0,0,0,0)
+       || INSN(15,15) != 0 || INSN(10,10) != 1) {
+      return False;
+   }
+   UInt bitOP = INSN(29,29);
+   UInt imm5  = INSN(20,16);
+   UInt imm4  = INSN(14,11);
+   UInt nn    = INSN(9,5);
+   UInt dd    = INSN(4,0);
+
+   if (bitOP == 0 && imm4 == BITS4(0,0,0,0)) {
+      /* -------- 0,0000 DUP (element, scalar) -------- */
+      IRTemp w0     = newTemp(Ity_I64);
+      const HChar* arTs = "??";
+      IRType laneTy = Ity_INVALID;
+      UInt   laneNo = 16; /* invalid */
+      if (imm5 & 1) {
+         arTs   = "b";
+         laneNo = (imm5 >> 1) & 15;
+         laneTy = Ity_I8;
+         assign(w0, unop(Iop_8Uto64, getQRegLane(nn, laneNo, laneTy)));
+      }
+      else if (imm5 & 2) {
+         arTs   = "h";
+         laneNo = (imm5 >> 2) & 7;
+         laneTy = Ity_I16;
+         assign(w0, unop(Iop_16Uto64, getQRegLane(nn, laneNo, laneTy)));
+      }
+      else if (imm5 & 4) {
+         arTs   = "s";
+         laneNo = (imm5 >> 3) & 3;
+         laneTy = Ity_I32;
+         assign(w0, unop(Iop_32Uto64, getQRegLane(nn, laneNo, laneTy)));
+      }
+      else if (imm5 & 8) {
+         arTs   = "d";
+         laneNo = (imm5 >> 4) & 1;
+         laneTy = Ity_I64;
+         assign(w0, getQRegLane(nn, laneNo, laneTy));
+      }
+      else {
+         /* invalid; leave laneTy unchanged. */
+      }
+      /* */
+      if (laneTy != Ity_INVALID) {
+         vassert(laneNo < 16);
+         putQReg128(dd, binop(Iop_64HLtoV128, mkU64(0), mkexpr(w0)));
+         DIP("dup %s, %s.%s[%u]\n",
+             nameQRegLO(dd, laneTy), nameQReg128(nn), arTs, laneNo);
+         return True;
+      }
+      /* else fall through */
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_scalar_pairwise(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31   28    23 21    16     11 9 4
+      01 u 11110 sz 11000 opcode 10 n d
+      Decode fields: u,sz,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,30) != BITS2(0,1)
+       || INSN(28,24) != BITS5(1,1,1,1,0)
+       || INSN(21,17) != BITS5(1,1,0,0,0)
+       || INSN(11,10) != BITS2(1,0)) {
+      return False;
+   }
+   UInt bitU   = INSN(29,29);
+   UInt sz     = INSN(23,22);
+   UInt opcode = INSN(16,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+
+   if (bitU == 0 && sz == X11 && opcode == BITS5(1,1,0,1,1)) {
+      /* -------- 0,11,11011 ADDP d_2d -------- */
+      IRTemp xy = newTempV128();
+      IRTemp xx = newTempV128();
+      assign(xy, getQReg128(nn));
+      assign(xx, binop(Iop_InterleaveHI64x2, mkexpr(xy), mkexpr(xy)));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128,
+                          binop(Iop_Add64x2, mkexpr(xy), mkexpr(xx))));
+      DIP("addp d%u, %s.2d\n", dd, nameQReg128(nn));
+      return True;
+   }
+
+   if (bitU == 1 && sz <= X01 && opcode == BITS5(0,1,1,0,1)) {
+      /* -------- 1,00,01101 ADDP s_2s -------- */
+      /* -------- 1,01,01101 ADDP d_2d -------- */
+      Bool   isD   = sz == X01;
+      IROp   opZHI = mkVecZEROHIxxOFV128(isD ? 3 : 2);
+      IROp   opADD = mkVecADDF(isD ? 3 : 2);
+      IRTemp src   = newTempV128();
+      IRTemp argL  = newTempV128();
+      IRTemp argR  = newTempV128();
+      assign(src, getQReg128(nn));
+      assign(argL, unop(opZHI, mkexpr(src)));
+      assign(argR, unop(opZHI, triop(Iop_SliceV128, mkexpr(src), mkexpr(src), 
+                                                    mkU8(isD ? 8 : 4))));
+      putQReg128(dd, unop(opZHI,
+                          triop(opADD, mkexpr(mk_get_IR_rounding_mode()),
+                                              mkexpr(argL), mkexpr(argR))));
+      DIP(isD ? "faddp d%u, v%u.2d\n" : "faddp s%u, v%u.2s\n", dd, nn);
+      return True;
+   }
+
+   if (bitU == 1
+       && (opcode == BITS5(0,1,1,0,0) || opcode == BITS5(0,1,1,1,1))) {
+      /* -------- 1,0x,01100 FMAXNMP d_2d, s_2s -------- */
+      /* -------- 1,1x,01100 FMINNMP d_2d, s_2s -------- */
+      /* -------- 1,0x,01111 FMAXP   d_2d, s_2s -------- */
+      /* -------- 1,1x,01111 FMINP   d_2d, s_2s -------- */
+      /* FMAXNM, FMINNM: FIXME -- KLUDGED */
+      Bool   isD   = (sz & 1) == 1;
+      Bool   isMIN = (sz & 2) == 2;
+      Bool   isNM  = opcode == BITS5(0,1,1,0,0);
+      IROp   opZHI = mkVecZEROHIxxOFV128(isD ? 3 : 2);
+      IROp   opMXX = (isMIN ? mkVecMINF : mkVecMAXF)(isD ? 3 : 2);
+      IRTemp src   = newTempV128();
+      IRTemp argL  = newTempV128();
+      IRTemp argR  = newTempV128();
+      assign(src, getQReg128(nn));
+      assign(argL, unop(opZHI, mkexpr(src)));
+      assign(argR, unop(opZHI, triop(Iop_SliceV128, mkexpr(src), mkexpr(src), 
+                                                    mkU8(isD ? 8 : 4))));
+      putQReg128(dd, unop(opZHI,
+                          binop(opMXX, mkexpr(argL), mkexpr(argR))));
+      HChar c = isD ? 'd' : 's';
+      DIP("%s%sp %c%u, v%u.2%c\n",
+           isMIN ? "fmin" : "fmax", isNM ? "nm" : "", c, dd, nn, c);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_scalar_shift_by_imm(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31   28     22   18   15     10 9 4
+      01 u 111110 immh immb opcode 1  n d
+      Decode fields: u,immh,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,30) != BITS2(0,1)
+       || INSN(28,23) != BITS6(1,1,1,1,1,0) || INSN(10,10) != 1) {
+      return False;
+   }
+   UInt bitU   = INSN(29,29);
+   UInt immh   = INSN(22,19);
+   UInt immb   = INSN(18,16);
+   UInt opcode = INSN(15,11);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   UInt immhb  = (immh << 3) | immb;
+
+   if ((immh & 8) == 8
+       && (opcode == BITS5(0,0,0,0,0) || opcode == BITS5(0,0,0,1,0))) {
+      /* -------- 0,1xxx,00000 SSHR d_d_#imm -------- */
+      /* -------- 1,1xxx,00000 USHR d_d_#imm -------- */
+      /* -------- 0,1xxx,00010 SSRA d_d_#imm -------- */
+      /* -------- 1,1xxx,00010 USRA d_d_#imm -------- */
+      Bool isU   = bitU == 1;
+      Bool isAcc = opcode == BITS5(0,0,0,1,0);
+      UInt sh    = 128 - immhb;
+      vassert(sh >= 1 && sh <= 64);
+      IROp    op  = isU ? Iop_ShrN64x2 : Iop_SarN64x2;
+      IRExpr* src = getQReg128(nn);
+      IRTemp  shf = newTempV128();
+      IRTemp  res = newTempV128();
+      if (sh == 64 && isU) {
+         assign(shf, mkV128(0x0000));
+      } else {
+         UInt nudge = 0;
+         if (sh == 64) {
+            vassert(!isU);
+            nudge = 1;
+         }
+         assign(shf, binop(op, src, mkU8(sh - nudge)));
+      }
+      assign(res, isAcc ? binop(Iop_Add64x2, getQReg128(dd), mkexpr(shf))
+                        : mkexpr(shf));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      const HChar* nm = isAcc ? (isU ? "usra" : "ssra")
+                              : (isU ? "ushr" : "sshr");
+      DIP("%s d%u, d%u, #%u\n", nm, dd, nn, sh);
+      return True;
+   }
+
+   if ((immh & 8) == 8
+       && (opcode == BITS5(0,0,1,0,0) || opcode == BITS5(0,0,1,1,0))) {
+      /* -------- 0,1xxx,00100 SRSHR d_d_#imm -------- */
+      /* -------- 1,1xxx,00100 URSHR d_d_#imm -------- */
+      /* -------- 0,1xxx,00110 SRSRA d_d_#imm -------- */
+      /* -------- 1,1xxx,00110 URSRA d_d_#imm -------- */
+      Bool isU   = bitU == 1;
+      Bool isAcc = opcode == BITS5(0,0,1,1,0);
+      UInt sh    = 128 - immhb;
+      vassert(sh >= 1 && sh <= 64);
+      IROp    op  = isU ? Iop_Rsh64Ux2 : Iop_Rsh64Sx2;
+      vassert(sh >= 1 && sh <= 64);
+      IRExpr* src  = getQReg128(nn);
+      IRTemp  imm8 = newTemp(Ity_I8);
+      assign(imm8, mkU8((UChar)(-sh)));
+      IRExpr* amt  = mkexpr(math_DUP_TO_V128(imm8, Ity_I8));
+      IRTemp  shf  = newTempV128();
+      IRTemp  res  = newTempV128();
+      assign(shf, binop(op, src, amt));
+      assign(res, isAcc ? binop(Iop_Add64x2, getQReg128(dd), mkexpr(shf))
+                        : mkexpr(shf));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      const HChar* nm = isAcc ? (isU ? "ursra" : "srsra")
+                              : (isU ? "urshr" : "srshr");
+      DIP("%s d%u, d%u, #%u\n", nm, dd, nn, sh);
+      return True;
+   }
+
+   if (bitU == 1 && (immh & 8) == 8 && opcode == BITS5(0,1,0,0,0)) {
+      /* -------- 1,1xxx,01000 SRI d_d_#imm -------- */
+      UInt sh = 128 - immhb;
+      vassert(sh >= 1 && sh <= 64);
+      if (sh == 64) {
+         putQReg128(dd, unop(Iop_ZeroHI64ofV128, getQReg128(dd)));
+      } else {
+         /* sh is in range 1 .. 63 */
+         ULong   nmask  = (ULong)(((Long)0x8000000000000000ULL) >> (sh-1));
+         IRExpr* nmaskV = binop(Iop_64HLtoV128, mkU64(nmask), mkU64(nmask));
+         IRTemp  res    = newTempV128();
+         assign(res, binop(Iop_OrV128,
+                           binop(Iop_AndV128, getQReg128(dd), nmaskV),
+                           binop(Iop_ShrN64x2, getQReg128(nn), mkU8(sh))));
+         putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      }
+      DIP("sri d%u, d%u, #%u\n", dd, nn, sh);
+      return True;
+   }
+
+   if (bitU == 0 && (immh & 8) == 8 && opcode == BITS5(0,1,0,1,0)) {
+      /* -------- 0,1xxx,01010 SHL d_d_#imm -------- */
+      UInt sh = immhb - 64;
+      vassert(sh >= 0 && sh < 64);
+      putQReg128(dd,
+                 unop(Iop_ZeroHI64ofV128,
+                      sh == 0 ? getQReg128(nn)
+                              : binop(Iop_ShlN64x2, getQReg128(nn), mkU8(sh))));
+      DIP("shl d%u, d%u, #%u\n", dd, nn, sh);
+      return True;
+   }
+
+   if (bitU == 1 && (immh & 8) == 8 && opcode == BITS5(0,1,0,1,0)) {
+      /* -------- 1,1xxx,01010 SLI d_d_#imm -------- */
+      UInt sh = immhb - 64;
+      vassert(sh >= 0 && sh < 64);
+      if (sh == 0) {
+         putQReg128(dd, unop(Iop_ZeroHI64ofV128, getQReg128(nn)));
+      } else {
+         /* sh is in range 1 .. 63 */
+         ULong   nmask  = (1ULL << sh) - 1;
+         IRExpr* nmaskV = binop(Iop_64HLtoV128, mkU64(nmask), mkU64(nmask));
+         IRTemp  res    = newTempV128();
+         assign(res, binop(Iop_OrV128,
+                           binop(Iop_AndV128, getQReg128(dd), nmaskV),
+                           binop(Iop_ShlN64x2, getQReg128(nn), mkU8(sh))));
+         putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      }
+      DIP("sli d%u, d%u, #%u\n", dd, nn, sh);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,1,1,0)
+       || (bitU == 1 && opcode == BITS5(0,1,1,0,0))) {
+      /* -------- 0,01110  SQSHL  #imm -------- */
+      /* -------- 1,01110  UQSHL  #imm -------- */
+      /* -------- 1,01100  SQSHLU #imm -------- */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok) return False;
+      vassert(size >= 0 && size <= 3);
+      /* The shift encoding has opposite sign for the leftwards case.
+         Adjust shift to compensate. */
+      UInt lanebits = 8 << size;
+      shift = lanebits - shift;
+      vassert(shift >= 0 && shift < lanebits);
+      const HChar* nm = NULL;
+      /**/ if (bitU == 0 && opcode == BITS5(0,1,1,1,0)) nm = "sqshl";
+      else if (bitU == 1 && opcode == BITS5(0,1,1,1,0)) nm = "uqshl";
+      else if (bitU == 1 && opcode == BITS5(0,1,1,0,0)) nm = "sqshlu";
+      else vassert(0);
+      IRTemp qDiff1 = IRTemp_INVALID;
+      IRTemp qDiff2 = IRTemp_INVALID;
+      IRTemp res = IRTemp_INVALID;
+      IRTemp src = math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, getQReg128(nn));
+      /* This relies on the fact that the zeroed out lanes generate zeroed
+         result lanes and don't saturate, so there's no point in trimming 
+         the resulting res, qDiff1 or qDiff2 values. */
+      math_QSHL_IMM(&res, &qDiff1, &qDiff2, src, size, shift, nm);
+      putQReg128(dd, mkexpr(res));
+      updateQCFLAGwithDifference(qDiff1, qDiff2);
+      const HChar arr = "bhsd"[size];
+      DIP("%s %c%u, %c%u, #%u\n", nm, arr, dd, arr, nn, shift);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,0,1,0) || opcode == BITS5(1,0,0,1,1)
+       || (bitU == 1
+           && (opcode == BITS5(1,0,0,0,0) || opcode == BITS5(1,0,0,0,1)))) {
+      /* -------- 0,10010   SQSHRN #imm -------- */
+      /* -------- 1,10010   UQSHRN #imm -------- */
+      /* -------- 0,10011  SQRSHRN #imm -------- */
+      /* -------- 1,10011  UQRSHRN #imm -------- */
+      /* -------- 1,10000  SQSHRUN #imm -------- */
+      /* -------- 1,10001 SQRSHRUN #imm -------- */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || size == X11) return False;
+      vassert(size >= X00 && size <= X10);
+      vassert(shift >= 1 && shift <= (8 << size));
+      const HChar* nm = "??";
+      IROp op = Iop_INVALID;
+      /* Decide on the name and the operation. */
+      /**/ if (bitU == 0 && opcode == BITS5(1,0,0,1,0)) {
+         nm = "sqshrn"; op = mkVecQANDqsarNNARROWSS(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,1,0)) {
+         nm = "uqshrn"; op = mkVecQANDqshrNNARROWUU(size);
+      }
+      else if (bitU == 0 && opcode == BITS5(1,0,0,1,1)) {
+         nm = "sqrshrn"; op = mkVecQANDqrsarNNARROWSS(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,1,1)) {
+         nm = "uqrshrn"; op = mkVecQANDqrshrNNARROWUU(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,0,0)) {
+         nm = "sqshrun"; op = mkVecQANDqsarNNARROWSU(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,0,1)) {
+         nm = "sqrshrun"; op = mkVecQANDqrsarNNARROWSU(size);
+      }
+      else vassert(0);
+      /* Compute the result (Q, shifted value) pair. */
+      IRTemp src128 = math_ZERO_ALL_EXCEPT_LOWEST_LANE(size+1, getQReg128(nn));
+      IRTemp pair   = newTempV128();
+      assign(pair, binop(op, mkexpr(src128), mkU8(shift)));
+      /* Update the result reg */
+      IRTemp res64in128 = newTempV128();
+      assign(res64in128, unop(Iop_ZeroHI64ofV128, mkexpr(pair)));
+      putQReg128(dd, mkexpr(res64in128));
+      /* Update the Q flag. */
+      IRTemp q64q64 = newTempV128();
+      assign(q64q64, binop(Iop_InterleaveHI64x2, mkexpr(pair), mkexpr(pair)));
+      IRTemp z128 = newTempV128();
+      assign(z128, mkV128(0x0000));
+      updateQCFLAGwithDifference(q64q64, z128);
+      /* */
+      const HChar arrNarrow = "bhsd"[size];
+      const HChar arrWide   = "bhsd"[size+1];
+      DIP("%s %c%u, %c%u, #%u\n", nm, arrNarrow, dd, arrWide, nn, shift);
+      return True;
+   }
+
+   if (immh >= BITS4(0,1,0,0) && opcode == BITS5(1,1,1,0,0)) {
+      /* -------- 0,!=00xx,11100 SCVTF d_d_imm, s_s_imm -------- */
+      /* -------- 1,!=00xx,11100 UCVTF d_d_imm, s_s_imm -------- */
+      UInt size  = 0;
+      UInt fbits = 0;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&fbits, &size, immh, immb);
+      /* The following holds because immh is never zero. */
+      vassert(ok);
+      /* The following holds because immh >= 0100. */
+      vassert(size == X10 || size == X11);
+      Bool isD = size == X11;
+      Bool isU = bitU == 1;
+      vassert(fbits >= 1 && fbits <= (isD ? 64 : 32));
+      Double  scale  = two_to_the_minus(fbits);
+      IRExpr* scaleE = isD ? IRExpr_Const(IRConst_F64(scale))
+                             : IRExpr_Const(IRConst_F32( (Float)scale ));
+      IROp    opMUL  = isD ? Iop_MulF64 : Iop_MulF32;
+      IROp    opCVT  = isU ? (isD ? Iop_I64UtoF64 : Iop_I32UtoF32)
+                           : (isD ? Iop_I64StoF64 : Iop_I32StoF32);
+      IRType tyF = isD ? Ity_F64 : Ity_F32;
+      IRType tyI = isD ? Ity_I64 : Ity_I32;
+      IRTemp src = newTemp(tyI);
+      IRTemp res = newTemp(tyF);
+      IRTemp rm  = mk_get_IR_rounding_mode();
+      assign(src, getQRegLane(nn, 0, tyI));
+      assign(res, triop(opMUL, mkexpr(rm),
+                               binop(opCVT, mkexpr(rm), mkexpr(src)), scaleE));
+      putQRegLane(dd, 0, mkexpr(res));
+      if (!isD) {
+         putQRegLane(dd, 1, mkU32(0));
+      }
+      putQRegLane(dd, 1, mkU64(0));
+      const HChar ch = isD ? 'd' : 's';
+      DIP("%s %c%u, %c%u, #%u\n", isU ? "ucvtf" : "scvtf",
+          ch, dd, ch, nn, fbits);
+      return True;
+   }
+
+   if (immh >= BITS4(0,1,0,0) && opcode == BITS5(1,1,1,1,1)) {
+      /* -------- 0,!=00xx,11111 FCVTZS d_d_imm, s_s_imm -------- */
+      /* -------- 1,!=00xx,11111 FCVTZU d_d_imm, s_s_imm -------- */
+      UInt size  = 0;
+      UInt fbits = 0;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&fbits, &size, immh, immb);
+      /* The following holds because immh is never zero. */
+      vassert(ok);
+      /* The following holds because immh >= 0100. */
+      vassert(size == X10 || size == X11);
+      Bool isD = size == X11;
+      Bool isU = bitU == 1;
+      vassert(fbits >= 1 && fbits <= (isD ? 64 : 32));
+      Double  scale  = two_to_the_plus(fbits);
+      IRExpr* scaleE = isD ? IRExpr_Const(IRConst_F64(scale))
+                           : IRExpr_Const(IRConst_F32( (Float)scale ));
+      IROp    opMUL  = isD ? Iop_MulF64 : Iop_MulF32;
+      IROp    opCVT  = isU ? (isD ? Iop_F64toI64U : Iop_F32toI32U)
+                           : (isD ? Iop_F64toI64S : Iop_F32toI32S);
+      IRType tyF = isD ? Ity_F64 : Ity_F32;
+      IRType tyI = isD ? Ity_I64 : Ity_I32;
+      IRTemp src = newTemp(tyF);
+      IRTemp res = newTemp(tyI);
+      IRTemp rm  = newTemp(Ity_I32);
+      assign(src, getQRegLane(nn, 0, tyF));
+      assign(rm,  mkU32(Irrm_ZERO));
+      assign(res, binop(opCVT, mkexpr(rm), 
+                               triop(opMUL, mkexpr(rm), mkexpr(src), scaleE)));
+      putQRegLane(dd, 0, mkexpr(res));
+      if (!isD) {
+         putQRegLane(dd, 1, mkU32(0));
+      }
+      putQRegLane(dd, 1, mkU64(0));
+      const HChar ch = isD ? 'd' : 's';
+      DIP("%s %c%u, %c%u, #%u\n", isU ? "fcvtzu" : "fcvtzs",
+          ch, dd, ch, nn, fbits);
+      return True;
+   }
+
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_scalar_three_different(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31 29 28    23   21 20 15     11 9 4
+      01 U  11110 size 1  m  opcode 00 n d
+      Decode fields: u,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,30) != BITS2(0,1)
+       || INSN(28,24) != BITS5(1,1,1,1,0)
+       || INSN(21,21) != 1
+       || INSN(11,10) != BITS2(0,0)) {
+      return False;
+   }
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt mm     = INSN(20,16);
+   UInt opcode = INSN(15,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+
+   if (bitU == 0
+       && (opcode == BITS4(1,1,0,1)
+           || opcode == BITS4(1,0,0,1) || opcode == BITS4(1,0,1,1))) {
+      /* -------- 0,1101  SQDMULL -------- */ // 0 (ks)
+      /* -------- 0,1001  SQDMLAL -------- */ // 1
+      /* -------- 0,1011  SQDMLSL -------- */ // 2
+      /* Widens, and size refers to the narrowed lanes. */
+      UInt ks = 3;
+      switch (opcode) {
+         case BITS4(1,1,0,1): ks = 0; break;
+         case BITS4(1,0,0,1): ks = 1; break;
+         case BITS4(1,0,1,1): ks = 2; break;
+         default: vassert(0);
+      }
+      vassert(ks >= 0 && ks <= 2);
+      if (size == X00 || size == X11) return False;
+      vassert(size <= 2);
+      IRTemp vecN, vecM, vecD, res, sat1q, sat1n, sat2q, sat2n;
+      vecN = vecM = vecD = res = sat1q = sat1n = sat2q = sat2n = IRTemp_INVALID;
+      newTempsV128_3(&vecN, &vecM, &vecD);
+      assign(vecN, getQReg128(nn));
+      assign(vecM, getQReg128(mm));
+      assign(vecD, getQReg128(dd));
+      math_SQDMULL_ACC(&res, &sat1q, &sat1n, &sat2q, &sat2n,
+                       False/*!is2*/, size, "mas"[ks],
+                       vecN, vecM, ks == 0 ? IRTemp_INVALID : vecD);
+      IROp opZHI = mkVecZEROHIxxOFV128(size+1);
+      putQReg128(dd, unop(opZHI, mkexpr(res)));
+      vassert(sat1q != IRTemp_INVALID && sat1n != IRTemp_INVALID);
+      updateQCFLAGwithDifferenceZHI(sat1q, sat1n, opZHI);
+      if (sat2q != IRTemp_INVALID || sat2n != IRTemp_INVALID) {
+         updateQCFLAGwithDifferenceZHI(sat2q, sat2n, opZHI);
+      }
+      const HChar* nm        = ks == 0 ? "sqdmull"
+                                       : (ks == 1 ? "sqdmlal" : "sqdmlsl");
+      const HChar  arrNarrow = "bhsd"[size];
+      const HChar  arrWide   = "bhsd"[size+1];
+      DIP("%s %c%d, %c%d, %c%d\n",
+          nm, arrWide, dd, arrNarrow, nn, arrNarrow, mm);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_scalar_three_same(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31 29 28    23   21 20 15     10 9 4
+      01 U  11110 size 1  m  opcode 1  n d
+      Decode fields: u,size,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,30) != BITS2(0,1)
+       || INSN(28,24) != BITS5(1,1,1,1,0)
+       || INSN(21,21) != 1
+       || INSN(10,10) != 1) {
+      return False;
+   }
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt mm     = INSN(20,16);
+   UInt opcode = INSN(15,11);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+
+   if (opcode == BITS5(0,0,0,0,1) || opcode == BITS5(0,0,1,0,1)) {
+      /* -------- 0,xx,00001 SQADD std4_std4_std4 -------- */
+      /* -------- 1,xx,00001 UQADD std4_std4_std4 -------- */
+      /* -------- 0,xx,00101 SQSUB std4_std4_std4 -------- */
+      /* -------- 1,xx,00101 UQSUB std4_std4_std4 -------- */
+      Bool isADD = opcode == BITS5(0,0,0,0,1);
+      Bool isU   = bitU == 1;
+      IROp qop   = Iop_INVALID;
+      IROp nop   = Iop_INVALID;
+      if (isADD) {
+         qop = isU ? mkVecQADDU(size) : mkVecQADDS(size);
+         nop = mkVecADD(size);
+      } else {
+         qop = isU ? mkVecQSUBU(size) : mkVecQSUBS(size);
+         nop = mkVecSUB(size);
+      }
+      IRTemp argL = newTempV128();
+      IRTemp argR = newTempV128();
+      IRTemp qres = newTempV128();
+      IRTemp nres = newTempV128();
+      assign(argL, getQReg128(nn));
+      assign(argR, getQReg128(mm));
+      assign(qres, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(
+                             size, binop(qop, mkexpr(argL), mkexpr(argR)))));
+      assign(nres, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(
+                             size, binop(nop, mkexpr(argL), mkexpr(argR)))));
+      putQReg128(dd, mkexpr(qres));
+      updateQCFLAGwithDifference(qres, nres);
+      const HChar* nm  = isADD ? (isU ? "uqadd" : "sqadd") 
+                               : (isU ? "uqsub" : "sqsub");
+      const HChar  arr = "bhsd"[size];
+      DIP("%s %c%u, %c%u, %c%u\n", nm, arr, dd, arr, nn, arr, mm);
+      return True;
+   }
+
+   if (size == X11 && opcode == BITS5(0,0,1,1,0)) {
+      /* -------- 0,11,00110 CMGT d_d_d -------- */ // >s
+      /* -------- 1,11,00110 CMHI d_d_d -------- */ // >u
+      Bool    isGT = bitU == 0;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = getQReg128(mm);
+      IRTemp  res  = newTempV128();
+      assign(res,
+             isGT ? binop(Iop_CmpGT64Sx2, argL, argR)
+                  : binop(Iop_CmpGT64Ux2, argL, argR));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      DIP("%s %s, %s, %s\n",isGT ? "cmgt" : "cmhi",
+          nameQRegLO(dd, Ity_I64),
+          nameQRegLO(nn, Ity_I64), nameQRegLO(mm, Ity_I64));
+      return True;
+   }
+
+   if (size == X11 && opcode == BITS5(0,0,1,1,1)) {
+      /* -------- 0,11,00111 CMGE d_d_d -------- */ // >=s
+      /* -------- 1,11,00111 CMHS d_d_d -------- */ // >=u
+      Bool    isGE = bitU == 0;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = getQReg128(mm);
+      IRTemp  res  = newTempV128();
+      assign(res,
+             isGE ? unop(Iop_NotV128, binop(Iop_CmpGT64Sx2, argR, argL))
+                  : unop(Iop_NotV128, binop(Iop_CmpGT64Ux2, argR, argL)));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      DIP("%s %s, %s, %s\n", isGE ? "cmge" : "cmhs",
+          nameQRegLO(dd, Ity_I64),
+          nameQRegLO(nn, Ity_I64), nameQRegLO(mm, Ity_I64));
+      return True;
+   }
+
+   if (size == X11 && (opcode == BITS5(0,1,0,0,0)
+                       || opcode == BITS5(0,1,0,1,0))) {
+      /* -------- 0,xx,01000 SSHL  d_d_d -------- */
+      /* -------- 0,xx,01010 SRSHL d_d_d -------- */
+      /* -------- 1,xx,01000 USHL  d_d_d -------- */
+      /* -------- 1,xx,01010 URSHL d_d_d -------- */
+      Bool isU = bitU == 1;
+      Bool isR = opcode == BITS5(0,1,0,1,0);
+      IROp op  = isR ? (isU ? mkVecRSHU(size) : mkVecRSHS(size))
+                     : (isU ? mkVecSHU(size)  : mkVecSHS(size));
+      IRTemp res = newTempV128();
+      assign(res, binop(op, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      const HChar* nm  = isR ? (isU ? "urshl" : "srshl")
+                             : (isU ? "ushl"  : "sshl");
+      DIP("%s %s, %s, %s\n", nm,
+          nameQRegLO(dd, Ity_I64),
+          nameQRegLO(nn, Ity_I64), nameQRegLO(mm, Ity_I64));
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,0,0,1) || opcode == BITS5(0,1,0,1,1)) {
+      /* -------- 0,xx,01001 SQSHL  std4_std4_std4 -------- */
+      /* -------- 0,xx,01011 SQRSHL std4_std4_std4 -------- */
+      /* -------- 1,xx,01001 UQSHL  std4_std4_std4 -------- */
+      /* -------- 1,xx,01011 UQRSHL std4_std4_std4 -------- */
+      Bool isU = bitU == 1;
+      Bool isR = opcode == BITS5(0,1,0,1,1);
+      IROp op  = isR ? (isU ? mkVecQANDUQRSH(size) : mkVecQANDSQRSH(size))
+                     : (isU ? mkVecQANDUQSH(size)  : mkVecQANDSQSH(size));
+      /* This is a bit tricky.  Since we're only interested in the lowest
+         lane of the result, we zero out all the rest in the operands, so
+         as to ensure that other lanes don't pollute the returned Q value.
+         This works because it means, for the lanes we don't care about, we
+         are shifting zero by zero, which can never saturate. */
+      IRTemp res256 = newTemp(Ity_V256);
+      IRTemp resSH  = newTempV128();
+      IRTemp resQ   = newTempV128();
+      IRTemp zero   = newTempV128();
+      assign(
+         res256,
+         binop(op, 
+               mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, getQReg128(nn))),
+               mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, getQReg128(mm)))));
+      assign(resSH, unop(Iop_V256toV128_0, mkexpr(res256)));
+      assign(resQ,  unop(Iop_V256toV128_1, mkexpr(res256)));
+      assign(zero,  mkV128(0x0000));      
+      putQReg128(dd, mkexpr(resSH));
+      updateQCFLAGwithDifference(resQ, zero);
+      const HChar* nm  = isR ? (isU ? "uqrshl" : "sqrshl")
+                             : (isU ? "uqshl"  : "sqshl");
+      const HChar  arr = "bhsd"[size];
+      DIP("%s %c%u, %c%u, %c%u\n", nm, arr, dd, arr, nn, arr, mm);
+      return True;
+   }
+
+   if (size == X11 && opcode == BITS5(1,0,0,0,0)) {
+      /* -------- 0,11,10000 ADD d_d_d -------- */
+      /* -------- 1,11,10000 SUB d_d_d -------- */
+      Bool   isSUB = bitU == 1;
+      IRTemp res   = newTemp(Ity_I64);
+      assign(res, binop(isSUB ? Iop_Sub64 : Iop_Add64,
+                        getQRegLane(nn, 0, Ity_I64),
+                        getQRegLane(mm, 0, Ity_I64)));
+      putQRegLane(dd, 0, mkexpr(res));
+      putQRegLane(dd, 1, mkU64(0));
+      DIP("%s %s, %s, %s\n", isSUB ? "sub" : "add",
+          nameQRegLO(dd, Ity_I64),
+          nameQRegLO(nn, Ity_I64), nameQRegLO(mm, Ity_I64));
+      return True;
+   }
+
+   if (size == X11 && opcode == BITS5(1,0,0,0,1)) {
+      /* -------- 0,11,10001 CMTST d_d_d -------- */ // &, != 0
+      /* -------- 1,11,10001 CMEQ  d_d_d -------- */ // ==
+      Bool    isEQ = bitU == 1;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = getQReg128(mm);
+      IRTemp  res  = newTempV128();
+      assign(res,
+             isEQ ? binop(Iop_CmpEQ64x2, argL, argR)
+                  : unop(Iop_NotV128, binop(Iop_CmpEQ64x2,
+                                            binop(Iop_AndV128, argL, argR), 
+                                            mkV128(0x0000))));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      DIP("%s %s, %s, %s\n", isEQ ? "cmeq" : "cmtst",
+          nameQRegLO(dd, Ity_I64),
+          nameQRegLO(nn, Ity_I64), nameQRegLO(mm, Ity_I64));
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,1,1,0)) {
+      /* -------- 0,xx,10110 SQDMULH s and h variants only -------- */
+      /* -------- 1,xx,10110 SQRDMULH s and h variants only -------- */
+      if (size == X00 || size == X11) return False;
+      Bool isR = bitU == 1;
+      IRTemp res, sat1q, sat1n, vN, vM;
+      res = sat1q = sat1n = vN = vM = IRTemp_INVALID;
+      newTempsV128_2(&vN, &vM);
+      assign(vN, getQReg128(nn));
+      assign(vM, getQReg128(mm));
+      math_SQDMULH(&res, &sat1q, &sat1n, isR, size, vN, vM);
+      putQReg128(dd,
+                 mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, mkexpr(res))));
+      updateQCFLAGwithDifference(
+         math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, mkexpr(sat1q)),
+         math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, mkexpr(sat1n)));
+      const HChar  arr = "bhsd"[size];
+      const HChar* nm  = isR ? "sqrdmulh" : "sqdmulh";
+      DIP("%s %c%d, %c%d, %c%d\n", nm, arr, dd, arr, nn, arr, mm);
+      return True;
+   }
+
+   if (bitU == 1 && size >= X10 && opcode == BITS5(1,1,0,1,0)) {
+      /* -------- 1,1x,11010 FABD d_d_d, s_s_s -------- */
+      IRType ity = size == X11 ? Ity_F64 : Ity_F32;
+      IRTemp res = newTemp(ity);
+      assign(res, unop(mkABSF(ity),
+                       triop(mkSUBF(ity),
+                             mkexpr(mk_get_IR_rounding_mode()),
+                             getQRegLO(nn,ity), getQRegLO(mm,ity))));
+      putQReg128(dd, mkV128(0x0000));
+      putQRegLO(dd, mkexpr(res));
+      DIP("fabd %s, %s, %s\n",
+          nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity));
+      return True;
+   }
+
+   if (bitU == 0 && size <= X01 && opcode == BITS5(1,1,0,1,1)) {
+      /* -------- 0,0x,11011 FMULX d_d_d, s_s_s -------- */
+      // KLUDGE: FMULX is treated the same way as FMUL.  That can't be right.
+      IRType ity = size == X01 ? Ity_F64 : Ity_F32;
+      IRTemp res = newTemp(ity);
+      assign(res, triop(mkMULF(ity),
+                        mkexpr(mk_get_IR_rounding_mode()),
+                        getQRegLO(nn,ity), getQRegLO(mm,ity)));
+      putQReg128(dd, mkV128(0x0000));
+      putQRegLO(dd, mkexpr(res));
+      DIP("fmulx %s, %s, %s\n",
+          nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity));
+      return True;
+   }
+
+   if (size <= X01 && opcode == BITS5(1,1,1,0,0)) {
+      /* -------- 0,0x,11100 FCMEQ d_d_d, s_s_s -------- */
+      /* -------- 1,0x,11100 FCMGE d_d_d, s_s_s -------- */
+      Bool   isD   = size == X01;
+      IRType ity   = isD ? Ity_F64 : Ity_F32;
+      Bool   isGE  = bitU == 1;
+      IROp   opCMP = isGE ? (isD ? Iop_CmpLE64Fx2 : Iop_CmpLE32Fx4)
+                          : (isD ? Iop_CmpEQ64Fx2 : Iop_CmpEQ32Fx4);
+      IRTemp res   = newTempV128();
+      assign(res, isGE ? binop(opCMP, getQReg128(mm), getQReg128(nn)) // swapd
+                       : binop(opCMP, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? X11 : X10,
+                                                             mkexpr(res))));
+      DIP("%s %s, %s, %s\n", isGE ? "fcmge" : "fcmeq",
+          nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity));
+      return True;
+   }
+
+   if (bitU == 1 && size >= X10 && opcode == BITS5(1,1,1,0,0)) {
+      /* -------- 1,1x,11100 FCMGT d_d_d, s_s_s -------- */
+      Bool   isD   = size == X11;
+      IRType ity   = isD ? Ity_F64 : Ity_F32;
+      IROp   opCMP = isD ? Iop_CmpLT64Fx2 : Iop_CmpLT32Fx4;
+      IRTemp res   = newTempV128();
+      assign(res, binop(opCMP, getQReg128(mm), getQReg128(nn))); // swapd
+      putQReg128(dd, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? X11 : X10,
+                                                             mkexpr(res))));
+      DIP("%s %s, %s, %s\n", "fcmgt",
+          nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity));
+      return True;
+   }
+
+   if (bitU == 1 && opcode == BITS5(1,1,1,0,1)) {
+      /* -------- 1,0x,11101 FACGE d_d_d, s_s_s -------- */
+      /* -------- 1,1x,11101 FACGT d_d_d, s_s_s -------- */
+      Bool   isD   = (size & 1) == 1;
+      IRType ity   = isD ? Ity_F64 : Ity_F32;
+      Bool   isGT  = (size & 2) == 2;
+      IROp   opCMP = isGT ? (isD ? Iop_CmpLT64Fx2 : Iop_CmpLT32Fx4)
+                          : (isD ? Iop_CmpLE64Fx2 : Iop_CmpLE32Fx4);
+      IROp   opABS = isD ? Iop_Abs64Fx2 : Iop_Abs32Fx4;
+      IRTemp res   = newTempV128();
+      assign(res, binop(opCMP, unop(opABS, getQReg128(mm)),
+                               unop(opABS, getQReg128(nn)))); // swapd
+      putQReg128(dd, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? X11 : X10,
+                                                             mkexpr(res))));
+      DIP("%s %s, %s, %s\n", isGT ? "facgt" : "facge",
+          nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity));
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(1,1,1,1,1)) {
+      /* -------- 0,0x,11111: FRECPS  d_d_d, s_s_s -------- */
+      /* -------- 0,1x,11111: FRSQRTS d_d_d, s_s_s -------- */
+      Bool isSQRT = (size & 2) == 2;
+      Bool isD    = (size & 1) == 1;
+      IROp op     = isSQRT ? (isD ? Iop_RSqrtStep64Fx2 : Iop_RSqrtStep32Fx4)
+                           : (isD ? Iop_RecipStep64Fx2 : Iop_RecipStep32Fx4);
+      IRTemp res = newTempV128();
+      assign(res, binop(op, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? X11 : X10,
+                                                             mkexpr(res))));
+      HChar c = isD ? 'd' : 's';
+      DIP("%s %c%u, %c%u, %c%u\n", isSQRT ? "frsqrts" : "frecps",
+          c, dd, c, nn, c, mm);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_scalar_two_reg_misc(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31 29 28    23   21    16     11 9 4
+      01 U  11110 size 10000 opcode 10 n d
+      Decode fields: u,size,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,30) != BITS2(0,1)
+       || INSN(28,24) != BITS5(1,1,1,1,0)
+       || INSN(21,17) != BITS5(1,0,0,0,0)
+       || INSN(11,10) != BITS2(1,0)) {
+      return False;
+   }
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt opcode = INSN(16,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+
+   if (opcode == BITS5(0,0,0,1,1)) {
+      /* -------- 0,xx,00011: SUQADD std4_std4 -------- */
+      /* -------- 1,xx,00011: USQADD std4_std4 -------- */
+      /* These are a bit tricky (to say the least).  See comments on
+         the vector variants (in dis_AdvSIMD_two_reg_misc) below for
+         details. */
+      Bool   isUSQADD = bitU == 1;
+      IROp   qop  = isUSQADD ? mkVecQADDEXTSUSATUU(size)
+                             : mkVecQADDEXTUSSATSS(size);
+      IROp   nop  = mkVecADD(size);
+      IRTemp argL = newTempV128();
+      IRTemp argR = newTempV128();
+      assign(argL, getQReg128(nn));
+      assign(argR, getQReg128(dd));
+      IRTemp qres = math_ZERO_ALL_EXCEPT_LOWEST_LANE(
+                       size, binop(qop, mkexpr(argL), mkexpr(argR)));
+      IRTemp nres = math_ZERO_ALL_EXCEPT_LOWEST_LANE(
+                       size, binop(nop, mkexpr(argL), mkexpr(argR)));
+      putQReg128(dd, mkexpr(qres));
+      updateQCFLAGwithDifference(qres, nres);
+      const HChar arr = "bhsd"[size];
+      DIP("%s %c%u, %c%u\n", isUSQADD ? "usqadd" : "suqadd", arr, dd, arr, nn);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,1,1,1)) {
+      /* -------- 0,xx,00111 SQABS std4_std4 -------- */
+      /* -------- 1,xx,00111 SQNEG std4_std4 -------- */
+      Bool isNEG = bitU == 1;
+      IRTemp qresFW = IRTemp_INVALID, nresFW = IRTemp_INVALID;
+      (isNEG ? math_SQNEG : math_SQABS)( &qresFW, &nresFW,
+                                         getQReg128(nn), size );
+      IRTemp qres = math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, mkexpr(qresFW));
+      IRTemp nres = math_ZERO_ALL_EXCEPT_LOWEST_LANE(size, mkexpr(nresFW));
+      putQReg128(dd, mkexpr(qres));
+      updateQCFLAGwithDifference(qres, nres);
+      const HChar arr = "bhsd"[size];
+      DIP("%s %c%u, %c%u\n", isNEG ? "sqneg" : "sqabs", arr, dd, arr, nn);
+      return True;
+   }
+
+   if (size == X11 && opcode == BITS5(0,1,0,0,0)) {
+      /* -------- 0,11,01000: CMGT d_d_#0 -------- */ // >s 0
+      /* -------- 1,11,01000: CMGE d_d_#0 -------- */ // >=s 0
+      Bool    isGT = bitU == 0;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = mkV128(0x0000);
+      IRTemp  res  = newTempV128();
+      assign(res, isGT ? binop(Iop_CmpGT64Sx2, argL, argR)
+                       : unop(Iop_NotV128, binop(Iop_CmpGT64Sx2, argR, argL)));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      DIP("cm%s d%u, d%u, #0\n", isGT ? "gt" : "ge", dd, nn);
+      return True;
+   }
+
+   if (size == X11 && opcode == BITS5(0,1,0,0,1)) {
+      /* -------- 0,11,01001: CMEQ d_d_#0 -------- */ // == 0
+      /* -------- 1,11,01001: CMLE d_d_#0 -------- */ // <=s 0
+      Bool    isEQ = bitU == 0;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = mkV128(0x0000);
+      IRTemp  res  = newTempV128();
+      assign(res, isEQ ? binop(Iop_CmpEQ64x2, argL, argR)
+                       : unop(Iop_NotV128,
+                              binop(Iop_CmpGT64Sx2, argL, argR)));
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128, mkexpr(res)));
+      DIP("cm%s d%u, d%u, #0\n", isEQ ? "eq" : "le", dd, nn);
+      return True;
+   }
+
+   if (bitU == 0 && size == X11 && opcode == BITS5(0,1,0,1,0)) {
+      /* -------- 0,11,01010: CMLT d_d_#0 -------- */ // <s 0
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128,
+                          binop(Iop_CmpGT64Sx2, mkV128(0x0000),
+                                                getQReg128(nn))));
+      DIP("cm%s d%u, d%u, #0\n", "lt", dd, nn);
+      return True;
+   }
+
+   if (bitU == 0 && size == X11 && opcode == BITS5(0,1,0,1,1)) {
+      /* -------- 0,11,01011 ABS d_d -------- */
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128,
+                          unop(Iop_Abs64x2, getQReg128(nn))));
+      DIP("abs d%u, d%u\n", dd, nn);
+      return True;
+   }
+
+   if (bitU == 1 && size == X11 && opcode == BITS5(0,1,0,1,1)) {
+      /* -------- 1,11,01011 NEG d_d -------- */
+      putQReg128(dd, unop(Iop_ZeroHI64ofV128,
+                          binop(Iop_Sub64x2, mkV128(0x0000), getQReg128(nn))));
+      DIP("neg d%u, d%u\n", dd, nn);
+      return True;
+   }
+
+   UInt ix = 0; /*INVALID*/
+   if (size >= X10) {
+      switch (opcode) {
+         case BITS5(0,1,1,0,0): ix = (bitU == 1) ? 4 : 1; break;
+         case BITS5(0,1,1,0,1): ix = (bitU == 1) ? 5 : 2; break;
+         case BITS5(0,1,1,1,0): if (bitU == 0) ix = 3; break;
+         default: break;
+      }
+   }
+   if (ix > 0) {
+      /* -------- 0,1x,01100 FCMGT d_d_#0.0, s_s_#0.0 (ix 1) -------- */
+      /* -------- 0,1x,01101 FCMEQ d_d_#0.0, s_s_#0.0 (ix 2) -------- */
+      /* -------- 0,1x,01110 FCMLT d_d_#0.0, s_s_#0.0 (ix 3) -------- */
+      /* -------- 1,1x,01100 FCMGE d_d_#0.0, s_s_#0.0 (ix 4) -------- */
+      /* -------- 1,1x,01101 FCMLE d_d_#0.0, s_s_#0.0 (ix 5) -------- */
+      Bool   isD     = size == X11;
+      IRType ity     = isD ? Ity_F64 : Ity_F32;
+      IROp   opCmpEQ = isD ? Iop_CmpEQ64Fx2 : Iop_CmpEQ32Fx4;
+      IROp   opCmpLE = isD ? Iop_CmpLE64Fx2 : Iop_CmpLE32Fx4;
+      IROp   opCmpLT = isD ? Iop_CmpLT64Fx2 : Iop_CmpLT32Fx4;
+      IROp   opCmp   = Iop_INVALID;
+      Bool   swap    = False;
+      const HChar* nm = "??";
+      switch (ix) {
+         case 1: nm = "fcmgt"; opCmp = opCmpLT; swap = True; break;
+         case 2: nm = "fcmeq"; opCmp = opCmpEQ; break;
+         case 3: nm = "fcmlt"; opCmp = opCmpLT; break;
+         case 4: nm = "fcmge"; opCmp = opCmpLE; swap = True; break;
+         case 5: nm = "fcmle"; opCmp = opCmpLE; break;
+         default: vassert(0);
+      }
+      IRExpr* zero = mkV128(0x0000);
+      IRTemp res = newTempV128();
+      assign(res, swap ? binop(opCmp, zero, getQReg128(nn))
+                       : binop(opCmp, getQReg128(nn), zero));
+      putQReg128(dd, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? X11 : X10,
+                                                             mkexpr(res))));
+
+      DIP("%s %s, %s, #0.0\n", nm, nameQRegLO(dd, ity), nameQRegLO(nn, ity));
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,1,0,0)
+       || (bitU == 1 && opcode == BITS5(1,0,0,1,0))) {
+      /* -------- 0,xx,10100: SQXTN -------- */
+      /* -------- 1,xx,10100: UQXTN -------- */
+      /* -------- 1,xx,10010: SQXTUN -------- */
+      if (size == X11) return False;
+      vassert(size < 3);
+      IROp  opN    = Iop_INVALID;
+      Bool  zWiden = True;
+      const HChar* nm = "??";
+      /**/ if (bitU == 0 && opcode == BITS5(1,0,1,0,0)) {
+         opN = mkVecQNARROWUNSS(size); nm = "sqxtn"; zWiden = False;
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,1,0,0)) {
+         opN = mkVecQNARROWUNUU(size); nm = "uqxtn";
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,1,0)) {
+         opN = mkVecQNARROWUNSU(size); nm = "sqxtun";
+      }
+      else vassert(0);
+      IRTemp src  = math_ZERO_ALL_EXCEPT_LOWEST_LANE(
+                       size+1, getQReg128(nn));
+      IRTemp resN = math_ZERO_ALL_EXCEPT_LOWEST_LANE(
+                       size, unop(Iop_64UtoV128, unop(opN, mkexpr(src))));
+      putQReg128(dd, mkexpr(resN));
+      /* This widens zero lanes to zero, and compares it against zero, so all
+         of the non-participating lanes make no contribution to the
+         Q flag state. */
+      IRTemp resW = math_WIDEN_LO_OR_HI_LANES(zWiden, False/*!fromUpperHalf*/,
+                                              size, mkexpr(resN));
+      updateQCFLAGwithDifference(src, resW);
+      const HChar arrNarrow = "bhsd"[size];
+      const HChar arrWide   = "bhsd"[size+1];
+      DIP("%s %c%u, %c%u\n", nm, arrNarrow, dd, arrWide, nn);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,1,1,0) && bitU == 1 && size == X01) {
+      /* -------- 1,01,10110 FCVTXN s_d -------- */
+      /* Using Irrm_NEAREST here isn't right.  The docs say "round to
+         odd" but I don't know what that really means. */
+      putQRegLO(dd,
+                binop(Iop_F64toF32, mkU32(Irrm_NEAREST),
+                                    getQRegLO(nn, Ity_F64)));
+      putQRegLane(dd, 1, mkU32(0));
+      putQRegLane(dd, 1, mkU64(0));
+      DIP("fcvtxn s%u, d%u\n", dd, nn);
+      return True;
+   }
+
+   ix = 0; /*INVALID*/
+   switch (opcode) {
+      case BITS5(1,1,0,1,0): ix = ((size & 2) == 2) ? 4 : 1; break;
+      case BITS5(1,1,0,1,1): ix = ((size & 2) == 2) ? 5 : 2; break;
+      case BITS5(1,1,1,0,0): if ((size & 2) == 0) ix = 3; break;
+      default: break;
+   }
+   if (ix > 0) {
+      /* -------- 0,0x,11010 FCVTNS d_d, s_s (ix 1) -------- */
+      /* -------- 0,0x,11011 FCVTMS d_d, s_s (ix 2) -------- */
+      /* -------- 0,0x,11100 FCVTAS d_d, s_s (ix 3) -------- */
+      /* -------- 0,1x,11010 FCVTPS d_d, s_s (ix 4) -------- */
+      /* -------- 0,1x,11011 FCVTZS d_d, s_s (ix 5) -------- */
+      /* -------- 1,0x,11010 FCVTNS d_d, s_s (ix 1) -------- */
+      /* -------- 1,0x,11011 FCVTMS d_d, s_s (ix 2) -------- */
+      /* -------- 1,0x,11100 FCVTAS d_d, s_s (ix 3) -------- */
+      /* -------- 1,1x,11010 FCVTPS d_d, s_s (ix 4) -------- */
+      /* -------- 1,1x,11011 FCVTZS d_d, s_s (ix 5) -------- */
+      Bool           isD  = (size & 1) == 1;
+      IRType         tyF  = isD ? Ity_F64 : Ity_F32;
+      IRType         tyI  = isD ? Ity_I64 : Ity_I32;
+      IRRoundingMode irrm = 8; /*impossible*/
+      HChar          ch   = '?';
+      switch (ix) {
+         case 1: ch = 'n'; irrm = Irrm_NEAREST; break;
+         case 2: ch = 'm'; irrm = Irrm_NegINF;  break;
+         case 3: ch = 'a'; irrm = Irrm_NEAREST; break; /* kludge? */
+         case 4: ch = 'p'; irrm = Irrm_PosINF;  break;
+         case 5: ch = 'z'; irrm = Irrm_ZERO;    break;
+         default: vassert(0);
+      }
+      IROp cvt = Iop_INVALID;
+      if (bitU == 1) {
+         cvt = isD ? Iop_F64toI64U : Iop_F32toI32U;
+      } else {
+         cvt = isD ? Iop_F64toI64S : Iop_F32toI32S;
+      }
+      IRTemp src = newTemp(tyF);
+      IRTemp res = newTemp(tyI);
+      assign(src, getQRegLane(nn, 0, tyF));
+      assign(res, binop(cvt, mkU32(irrm), mkexpr(src)));
+      putQRegLane(dd, 0, mkexpr(res)); /* bits 31-0 or 63-0 */
+      if (!isD) {
+         putQRegLane(dd, 1, mkU32(0)); /* bits 63-32 */
+      }
+      putQRegLane(dd, 1, mkU64(0));    /* bits 127-64 */
+      HChar sOrD = isD ? 'd' : 's';
+      DIP("fcvt%c%c %c%u, %c%u\n", ch, bitU == 1 ? 'u' : 's',
+          sOrD, dd, sOrD, nn);
+      return True;
+   }
+
+   if (size <= X01 && opcode == BITS5(1,1,1,0,1)) {
+      /* -------- 0,0x,11101: SCVTF d_d, s_s -------- */
+      /* -------- 1,0x,11101: UCVTF d_d, s_s -------- */
+      Bool   isU = bitU == 1;
+      Bool   isD = (size & 1) == 1;
+      IRType tyI = isD ? Ity_I64 : Ity_I32;
+      IROp   iop = isU ? (isD ? Iop_I64UtoF64 : Iop_I32UtoF32)
+                       : (isD ? Iop_I64StoF64 : Iop_I32StoF32);
+      IRTemp rm  = mk_get_IR_rounding_mode();
+      putQRegLO(dd, binop(iop, mkexpr(rm), getQRegLO(nn, tyI)));
+      if (!isD) {
+         putQRegLane(dd, 1, mkU32(0)); /* bits 63-32 */
+      }
+      putQRegLane(dd, 1, mkU64(0));    /* bits 127-64 */
+      HChar c = isD ? 'd' : 's';
+      DIP("%ccvtf %c%u, %c%u\n", isU ? 'u' : 's', c, dd, c, nn);
+      return True;
+   }
+
+   if (size >= X10 && opcode == BITS5(1,1,1,0,1)) {
+      /* -------- 0,1x,11101: FRECPE  d_d, s_s -------- */
+      /* -------- 1,1x,11101: FRSQRTE d_d, s_s -------- */
+      Bool isSQRT = bitU == 1;
+      Bool isD    = (size & 1) == 1;
+      IROp op     = isSQRT ? (isD ? Iop_RSqrtEst64Fx2 : Iop_RSqrtEst32Fx4)
+                           : (isD ? Iop_RecipEst64Fx2 : Iop_RecipEst32Fx4);
+      IRTemp resV = newTempV128();
+      assign(resV, unop(op, getQReg128(nn)));
+      putQReg128(dd, mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? X11 : X10,
+                                                             mkexpr(resV))));
+      HChar c = isD ? 'd' : 's';
+      DIP("%s %c%u, %c%u\n", isSQRT ? "frsqrte" : "frecpe", c, dd, c, nn);
+      return True;
+   }
+
+   if (bitU == 0 && size >= X10 && opcode == BITS5(1,1,1,1,1)) {
+      /* -------- 0,1x,11111: FRECPX  d_d, s_s -------- */
+      Bool   isD = (size & 1) == 1;
+      IRType ty  = isD ? Ity_F64 : Ity_F32;
+      IROp   op  = isD ? Iop_RecpExpF64 : Iop_RecpExpF32;
+      IRTemp res = newTemp(ty);
+      IRTemp rm  = mk_get_IR_rounding_mode();
+      assign(res, binop(op, mkexpr(rm), getQRegLane(nn, 0, ty)));
+      putQReg128(dd, mkV128(0x0000));
+      putQRegLane(dd, 0, mkexpr(res));
+      HChar c = isD ? 'd' : 's';
+      DIP("%s %c%u, %c%u\n", "frecpx", c, dd, c, nn);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_scalar_x_indexed_element(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31   28    23   21 20 19 15     11   9 4
+      01 U 11111 size L  M  m  opcode H  0 n d
+      Decode fields are: u,size,opcode
+      M is really part of the mm register number.  Individual 
+      cases need to inspect L and H though.
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,30) != BITS2(0,1)
+       || INSN(28,24) != BITS5(1,1,1,1,1) || INSN(10,10) !=0) {
+      return False;
+   }
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt bitL   = INSN(21,21);
+   UInt bitM   = INSN(20,20);
+   UInt mmLO4  = INSN(19,16);
+   UInt opcode = INSN(15,12);
+   UInt bitH   = INSN(11,11);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+   vassert(bitH < 2 && bitM < 2 && bitL < 2);
+
+   if (bitU == 0 && size >= X10
+       && (opcode == BITS4(0,0,0,1) || opcode == BITS4(0,1,0,1))) {
+      /* -------- 0,1x,0001 FMLA d_d_d[], s_s_s[] -------- */
+      /* -------- 0,1x,0101 FMLS d_d_d[], s_s_s[] -------- */
+      Bool isD   = (size & 1) == 1;
+      Bool isSUB = opcode == BITS4(0,1,0,1);
+      UInt index;
+      if      (!isD)             index = (bitH << 1) | bitL;
+      else if (isD && bitL == 0) index = bitH;
+      else return False; // sz:L == x11 => unallocated encoding
+      vassert(index < (isD ? 2 : 4));
+      IRType ity   = isD ? Ity_F64 : Ity_F32;
+      IRTemp elem  = newTemp(ity);
+      UInt   mm    = (bitM << 4) | mmLO4;
+      assign(elem, getQRegLane(mm, index, ity));
+      IRTemp dupd  = math_DUP_TO_V128(elem, ity);
+      IROp   opADD = isD ? Iop_Add64Fx2 : Iop_Add32Fx4;
+      IROp   opSUB = isD ? Iop_Sub64Fx2 : Iop_Sub32Fx4;
+      IROp   opMUL = isD ? Iop_Mul64Fx2 : Iop_Mul32Fx4;
+      IRTemp rm    = mk_get_IR_rounding_mode();
+      IRTemp t1    = newTempV128();
+      IRTemp t2    = newTempV128();
+      // FIXME: double rounding; use FMA primops instead
+      assign(t1, triop(opMUL, mkexpr(rm), getQReg128(nn), mkexpr(dupd)));
+      assign(t2, triop(isSUB ? opSUB : opADD,
+                       mkexpr(rm), getQReg128(dd), mkexpr(t1)));
+      putQReg128(dd,
+                 mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? 3 : 2,
+                                                         mkexpr(t2))));
+      const HChar c = isD ? 'd' : 's';
+      DIP("%s %c%u, %c%u, %s.%c[%u]\n", isSUB ? "fmls" : "fmla",
+          c, dd, c, nn, nameQReg128(mm), c, index);
+      return True;
+   }
+
+   if (size >= X10 && opcode == BITS4(1,0,0,1)) {
+      /* -------- 0,1x,1001 FMUL  d_d_d[], s_s_s[] -------- */
+      /* -------- 1,1x,1001 FMULX d_d_d[], s_s_s[] -------- */
+      Bool isD    = (size & 1) == 1;
+      Bool isMULX = bitU == 1;
+      UInt index;
+      if      (!isD)             index = (bitH << 1) | bitL;
+      else if (isD && bitL == 0) index = bitH;
+      else return False; // sz:L == x11 => unallocated encoding
+      vassert(index < (isD ? 2 : 4));
+      IRType ity   = isD ? Ity_F64 : Ity_F32;
+      IRTemp elem  = newTemp(ity);
+      UInt   mm    = (bitM << 4) | mmLO4;
+      assign(elem, getQRegLane(mm, index, ity));
+      IRTemp dupd  = math_DUP_TO_V128(elem, ity);
+      IROp   opMUL = isD ? Iop_Mul64Fx2 : Iop_Mul32Fx4;
+      IRTemp rm    = mk_get_IR_rounding_mode();
+      IRTemp t1    = newTempV128();
+      // KLUDGE: FMULX is treated the same way as FMUL.  That can't be right.
+      assign(t1, triop(opMUL, mkexpr(rm), getQReg128(nn), mkexpr(dupd)));
+      putQReg128(dd,
+                 mkexpr(math_ZERO_ALL_EXCEPT_LOWEST_LANE(isD ? 3 : 2,
+                                                         mkexpr(t1))));
+      const HChar c = isD ? 'd' : 's';
+      DIP("%s %c%u, %c%u, %s.%c[%u]\n", isMULX ? "fmulx" : "fmul",
+          c, dd, c, nn, nameQReg128(mm), c, index);
+      return True;
+   }
+
+   if (bitU == 0 
+       && (opcode == BITS4(1,0,1,1)
+           || opcode == BITS4(0,0,1,1) || opcode == BITS4(0,1,1,1))) {
+      /* -------- 0,xx,1011 SQDMULL s/h variants only -------- */ // 0 (ks)
+      /* -------- 0,xx,0011 SQDMLAL s/h variants only -------- */ // 1
+      /* -------- 0,xx,0111 SQDMLSL s/h variants only -------- */ // 2
+      /* Widens, and size refers to the narrowed lanes. */
+      UInt ks = 3;
+      switch (opcode) {
+         case BITS4(1,0,1,1): ks = 0; break;
+         case BITS4(0,0,1,1): ks = 1; break;
+         case BITS4(0,1,1,1): ks = 2; break;
+         default: vassert(0);
+      }
+      vassert(ks >= 0 && ks <= 2);
+      UInt mm  = 32; // invalid
+      UInt ix  = 16; // invalid
+      switch (size) {
+         case X00:
+            return False; // h_b_b[] case is not allowed
+         case X01:
+            mm = mmLO4; ix = (bitH << 2) | (bitL << 1) | (bitM << 0); break;
+         case X10:
+            mm = (bitM << 4) | mmLO4; ix = (bitH << 1) | (bitL << 0); break;
+         case X11:
+            return False; // q_d_d[] case is not allowed
+         default:
+            vassert(0);
+      }
+      vassert(mm < 32 && ix < 16);
+      IRTemp vecN, vecD, res, sat1q, sat1n, sat2q, sat2n;
+      vecN = vecD = res = sat1q = sat1n = sat2q = sat2n = IRTemp_INVALID;
+      newTempsV128_2(&vecN, &vecD);
+      assign(vecN, getQReg128(nn));
+      IRTemp vecM  = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
+      assign(vecD, getQReg128(dd));
+      math_SQDMULL_ACC(&res, &sat1q, &sat1n, &sat2q, &sat2n,
+                       False/*!is2*/, size, "mas"[ks],
+                       vecN, vecM, ks == 0 ? IRTemp_INVALID : vecD);
+      IROp opZHI = mkVecZEROHIxxOFV128(size+1);
+      putQReg128(dd, unop(opZHI, mkexpr(res)));
+      vassert(sat1q != IRTemp_INVALID && sat1n != IRTemp_INVALID);
+      updateQCFLAGwithDifferenceZHI(sat1q, sat1n, opZHI);
+      if (sat2q != IRTemp_INVALID || sat2n != IRTemp_INVALID) {
+         updateQCFLAGwithDifferenceZHI(sat2q, sat2n, opZHI);
+      }
+      const HChar* nm        = ks == 0 ? "sqmull"
+                                       : (ks == 1 ? "sqdmlal" : "sqdmlsl");
+      const HChar  arrNarrow = "bhsd"[size];
+      const HChar  arrWide   = "bhsd"[size+1];
+      DIP("%s %c%d, %c%d, v%d.%c[%u]\n",
+          nm, arrWide, dd, arrNarrow, nn, dd, arrNarrow, ix);
+      return True;
+   }
+
+   if (opcode == BITS4(1,1,0,0) || opcode == BITS4(1,1,0,1)) {
+      /* -------- 0,xx,1100 SQDMULH s and h variants only -------- */
+      /* -------- 0,xx,1101 SQRDMULH s and h variants only -------- */
+      UInt mm  = 32; // invalid
+      UInt ix  = 16; // invalid
+      switch (size) {
+         case X00:
+            return False; // b case is not allowed
+         case X01:
+            mm = mmLO4; ix = (bitH << 2) | (bitL << 1) | (bitM << 0); break;
+         case X10:
+            mm = (bitM << 4) | mmLO4; ix = (bitH << 1) | (bitL << 0); break;
+         case X11:
+            return False; // q case is not allowed
+         default:
+            vassert(0);
+      }
+      vassert(mm < 32 && ix < 16);
+      Bool isR = opcode == BITS4(1,1,0,1);
+      IRTemp res, sat1q, sat1n, vN, vM;
+      res = sat1q = sat1n = vN = vM = IRTemp_INVALID;
+      vN = newTempV128();
+      assign(vN, getQReg128(nn));
+      vM = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
+      math_SQDMULH(&res, &sat1q, &sat1n, isR, size, vN, vM);
+      IROp opZHI = mkVecZEROHIxxOFV128(size);
+      putQReg128(dd, unop(opZHI, mkexpr(res)));
+      updateQCFLAGwithDifferenceZHI(sat1q, sat1n, opZHI);
+      const HChar* nm  = isR ? "sqrdmulh" : "sqdmulh";
+      HChar ch         = size == X01 ? 'h' : 's';
+      DIP("%s %c%d, %c%d, v%d.%c[%u]\n", nm, ch, dd, ch, nn, ch, dd, ix);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_shift_by_immediate(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31    28     22   18   15     10 9 4
+      0 q u 011110 immh immb opcode 1  n d
+      Decode fields: u,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,23) != BITS6(0,1,1,1,1,0) || INSN(10,10) != 1) {
+      return False;
+   }
+   UInt bitQ   = INSN(30,30);
+   UInt bitU   = INSN(29,29);
+   UInt immh   = INSN(22,19);
+   UInt immb   = INSN(18,16);
+   UInt opcode = INSN(15,11);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+
+   if (opcode == BITS5(0,0,0,0,0) || opcode == BITS5(0,0,0,1,0)) {
+      /* -------- 0,00000 SSHR std7_std7_#imm -------- */
+      /* -------- 1,00000 USHR std7_std7_#imm -------- */
+      /* -------- 0,00010 SSRA std7_std7_#imm -------- */
+      /* -------- 1,00010 USRA std7_std7_#imm -------- */
+      /* laneTy, shift = case immh:immb of
+                         0001:xxx -> B, SHR:8-xxx
+                         001x:xxx -> H, SHR:16-xxxx
+                         01xx:xxx -> S, SHR:32-xxxxx
+                         1xxx:xxx -> D, SHR:64-xxxxxx
+                         other    -> invalid
+      */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool isQ   = bitQ == 1;
+      Bool isU   = bitU == 1;
+      Bool isAcc = opcode == BITS5(0,0,0,1,0);
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || (bitQ == 0 && size == X11)) return False;
+      vassert(size >= 0 && size <= 3);
+      UInt lanebits = 8 << size;
+      vassert(shift >= 1 && shift <= lanebits);
+      IROp    op  = isU ? mkVecSHRN(size) : mkVecSARN(size);
+      IRExpr* src = getQReg128(nn);
+      IRTemp  shf = newTempV128();
+      IRTemp  res = newTempV128();
+      if (shift == lanebits && isU) {
+         assign(shf, mkV128(0x0000));
+      } else {
+         UInt nudge = 0;
+         if (shift == lanebits) {
+            vassert(!isU);
+            nudge = 1;
+         }
+         assign(shf, binop(op, src, mkU8(shift - nudge)));
+      }
+      assign(res, isAcc ? binop(mkVecADD(size), getQReg128(dd), mkexpr(shf))
+                        : mkexpr(shf));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      HChar laneCh = "bhsd"[size];
+      UInt  nLanes = (isQ ? 128 : 64) / lanebits;
+      const HChar* nm = isAcc ? (isU ? "usra" : "ssra")
+                              : (isU ? "ushr" : "sshr");
+      DIP("%s %s.%u%c, %s.%u%c, #%u\n", nm,
+          nameQReg128(dd), nLanes, laneCh,
+          nameQReg128(nn), nLanes, laneCh, shift);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,1,0,0) || opcode == BITS5(0,0,1,1,0)) {
+      /* -------- 0,00100 SRSHR std7_std7_#imm -------- */
+      /* -------- 1,00100 URSHR std7_std7_#imm -------- */
+      /* -------- 0,00110 SRSRA std7_std7_#imm -------- */
+      /* -------- 1,00110 URSRA std7_std7_#imm -------- */
+      /* laneTy, shift = case immh:immb of
+                         0001:xxx -> B, SHR:8-xxx
+                         001x:xxx -> H, SHR:16-xxxx
+                         01xx:xxx -> S, SHR:32-xxxxx
+                         1xxx:xxx -> D, SHR:64-xxxxxx
+                         other    -> invalid
+      */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool isQ   = bitQ == 1;
+      Bool isU   = bitU == 1;
+      Bool isAcc = opcode == BITS5(0,0,1,1,0);
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || (bitQ == 0 && size == X11)) return False;
+      vassert(size >= 0 && size <= 3);
+      UInt lanebits = 8 << size;
+      vassert(shift >= 1 && shift <= lanebits);
+      IROp    op   = isU ? mkVecRSHU(size) : mkVecRSHS(size);
+      IRExpr* src  = getQReg128(nn);
+      IRTemp  imm8 = newTemp(Ity_I8);
+      assign(imm8, mkU8((UChar)(-shift)));
+      IRExpr* amt  = mkexpr(math_DUP_TO_V128(imm8, Ity_I8));
+      IRTemp  shf  = newTempV128();
+      IRTemp  res  = newTempV128();
+      assign(shf, binop(op, src, amt));
+      assign(res, isAcc ? binop(mkVecADD(size), getQReg128(dd), mkexpr(shf))
+                        : mkexpr(shf));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      HChar laneCh = "bhsd"[size];
+      UInt  nLanes = (isQ ? 128 : 64) / lanebits;
+      const HChar* nm = isAcc ? (isU ? "ursra" : "srsra")
+                              : (isU ? "urshr" : "srshr");
+      DIP("%s %s.%u%c, %s.%u%c, #%u\n", nm,
+          nameQReg128(dd), nLanes, laneCh,
+          nameQReg128(nn), nLanes, laneCh, shift);
+      return True;
+   }
+
+   if (bitU == 1 && opcode == BITS5(0,1,0,0,0)) {
+      /* -------- 1,01000 SRI std7_std7_#imm -------- */
+      /* laneTy, shift = case immh:immb of
+                         0001:xxx -> B, SHR:8-xxx
+                         001x:xxx -> H, SHR:16-xxxx
+                         01xx:xxx -> S, SHR:32-xxxxx
+                         1xxx:xxx -> D, SHR:64-xxxxxx
+                         other    -> invalid
+      */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool isQ   = bitQ == 1;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || (bitQ == 0 && size == X11)) return False;
+      vassert(size >= 0 && size <= 3);
+      UInt lanebits = 8 << size;
+      vassert(shift >= 1 && shift <= lanebits);
+      IRExpr* src = getQReg128(nn);
+      IRTemp  res = newTempV128();
+      if (shift == lanebits) {
+         assign(res, getQReg128(dd));
+      } else {
+         assign(res, binop(mkVecSHRN(size), src, mkU8(shift)));
+         IRExpr* nmask = binop(mkVecSHLN(size),
+                               mkV128(0xFFFF), mkU8(lanebits - shift));
+         IRTemp  tmp   = newTempV128();
+         assign(tmp, binop(Iop_OrV128,
+                           mkexpr(res),
+                           binop(Iop_AndV128, getQReg128(dd), nmask)));
+         res = tmp;
+      }
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      HChar laneCh = "bhsd"[size];
+      UInt  nLanes = (isQ ? 128 : 64) / lanebits;
+      DIP("%s %s.%u%c, %s.%u%c, #%u\n", "sri",
+          nameQReg128(dd), nLanes, laneCh,
+          nameQReg128(nn), nLanes, laneCh, shift);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,0,1,0)) {
+      /* -------- 0,01010 SHL std7_std7_#imm -------- */
+      /* -------- 1,01010 SLI std7_std7_#imm -------- */
+      /* laneTy, shift = case immh:immb of
+                         0001:xxx -> B, xxx
+                         001x:xxx -> H, xxxx
+                         01xx:xxx -> S, xxxxx
+                         1xxx:xxx -> D, xxxxxx
+                         other    -> invalid
+      */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool isSLI = bitU == 1;
+      Bool isQ   = bitQ == 1;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || (bitQ == 0 && size == X11)) return False;
+      vassert(size >= 0 && size <= 3);
+      /* The shift encoding has opposite sign for the leftwards case.
+         Adjust shift to compensate. */
+      UInt lanebits = 8 << size;
+      shift = lanebits - shift;
+      vassert(shift >= 0 && shift < lanebits);
+      IROp    op  = mkVecSHLN(size);
+      IRExpr* src = getQReg128(nn);
+      IRTemp  res = newTempV128();
+      if (shift == 0) {
+         assign(res, src);
+      } else {
+         assign(res, binop(op, src, mkU8(shift)));
+         if (isSLI) {
+            IRExpr* nmask = binop(mkVecSHRN(size),
+                                  mkV128(0xFFFF), mkU8(lanebits - shift));
+            IRTemp  tmp   = newTempV128();
+            assign(tmp, binop(Iop_OrV128,
+                              mkexpr(res),
+                              binop(Iop_AndV128, getQReg128(dd), nmask)));
+            res = tmp;
+         }
+      }
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      HChar laneCh = "bhsd"[size];
+      UInt  nLanes = (isQ ? 128 : 64) / lanebits;
+      const HChar* nm = isSLI ? "sli" : "shl";
+      DIP("%s %s.%u%c, %s.%u%c, #%u\n", nm,
+          nameQReg128(dd), nLanes, laneCh,
+          nameQReg128(nn), nLanes, laneCh, shift);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,1,1,0)
+       || (bitU == 1 && opcode == BITS5(0,1,1,0,0))) {
+      /* -------- 0,01110  SQSHL  std7_std7_#imm -------- */
+      /* -------- 1,01110  UQSHL  std7_std7_#imm -------- */
+      /* -------- 1,01100  SQSHLU std7_std7_#imm -------- */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool isQ   = bitQ == 1;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || (bitQ == 0 && size == X11)) return False;
+      vassert(size >= 0 && size <= 3);
+      /* The shift encoding has opposite sign for the leftwards case.
+         Adjust shift to compensate. */
+      UInt lanebits = 8 << size;
+      shift = lanebits - shift;
+      vassert(shift >= 0 && shift < lanebits);
+      const HChar* nm = NULL;
+      /**/ if (bitU == 0 && opcode == BITS5(0,1,1,1,0)) nm = "sqshl";
+      else if (bitU == 1 && opcode == BITS5(0,1,1,1,0)) nm = "uqshl";
+      else if (bitU == 1 && opcode == BITS5(0,1,1,0,0)) nm = "sqshlu";
+      else vassert(0);
+      IRTemp qDiff1 = IRTemp_INVALID;
+      IRTemp qDiff2 = IRTemp_INVALID;
+      IRTemp res = IRTemp_INVALID;
+      IRTemp src = newTempV128();
+      assign(src, getQReg128(nn));
+      math_QSHL_IMM(&res, &qDiff1, &qDiff2, src, size, shift, nm);
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      updateQCFLAGwithDifferenceZHI(qDiff1, qDiff2,
+                                    isQ ? Iop_INVALID : Iop_ZeroHI64ofV128);
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, #%u\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, shift);
+      return True;
+   }
+
+   if (bitU == 0
+       && (opcode == BITS5(1,0,0,0,0) || opcode == BITS5(1,0,0,0,1))) {
+      /* -------- 0,10000  SHRN{,2} #imm -------- */
+      /* -------- 0,10001 RSHRN{,2} #imm -------- */
+      /* Narrows, and size is the narrow size. */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool is2   = bitQ == 1;
+      Bool isR   = opcode == BITS5(1,0,0,0,1);
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || size == X11) return False;
+      vassert(shift >= 1);
+      IRTemp t1 = newTempV128();
+      IRTemp t2 = newTempV128();
+      IRTemp t3 = newTempV128();
+      assign(t1, getQReg128(nn));
+      assign(t2, isR ? binop(mkVecADD(size+1),
+                             mkexpr(t1),
+                             mkexpr(math_VEC_DUP_IMM(size+1, 1ULL<<(shift-1))))
+                     : mkexpr(t1));
+      assign(t3, binop(mkVecSHRN(size+1), mkexpr(t2), mkU8(shift)));
+      IRTemp t4 = math_NARROW_LANES(t3, t3, size);
+      putLO64andZUorPutHI64(is2, dd, t4);
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      DIP("%s %s.%s, %s.%s, #%u\n", isR ? "rshrn" : "shrn",
+          nameQReg128(dd), arrNarrow, nameQReg128(nn), arrWide, shift);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,0,1,0) || opcode == BITS5(1,0,0,1,1)
+       || (bitU == 1
+           && (opcode == BITS5(1,0,0,0,0) || opcode == BITS5(1,0,0,0,1)))) {
+      /* -------- 0,10010   SQSHRN{,2} #imm -------- */
+      /* -------- 1,10010   UQSHRN{,2} #imm -------- */
+      /* -------- 0,10011  SQRSHRN{,2} #imm -------- */
+      /* -------- 1,10011  UQRSHRN{,2} #imm -------- */
+      /* -------- 1,10000  SQSHRUN{,2} #imm -------- */
+      /* -------- 1,10001 SQRSHRUN{,2} #imm -------- */
+      UInt size  = 0;
+      UInt shift = 0;
+      Bool is2   = bitQ == 1;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&shift, &size, immh, immb);
+      if (!ok || size == X11) return False;
+      vassert(shift >= 1 && shift <= (8 << size));
+      const HChar* nm = "??";
+      IROp op = Iop_INVALID;
+      /* Decide on the name and the operation. */
+      /**/ if (bitU == 0 && opcode == BITS5(1,0,0,1,0)) {
+         nm = "sqshrn"; op = mkVecQANDqsarNNARROWSS(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,1,0)) {
+         nm = "uqshrn"; op = mkVecQANDqshrNNARROWUU(size);
+      }
+      else if (bitU == 0 && opcode == BITS5(1,0,0,1,1)) {
+         nm = "sqrshrn"; op = mkVecQANDqrsarNNARROWSS(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,1,1)) {
+         nm = "uqrshrn"; op = mkVecQANDqrshrNNARROWUU(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,0,0)) {
+         nm = "sqshrun"; op = mkVecQANDqsarNNARROWSU(size);
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,0,1)) {
+         nm = "sqrshrun"; op = mkVecQANDqrsarNNARROWSU(size);
+      }
+      else vassert(0);
+      /* Compute the result (Q, shifted value) pair. */
+      IRTemp src128 = newTempV128();
+      assign(src128, getQReg128(nn));
+      IRTemp pair = newTempV128();
+      assign(pair, binop(op, mkexpr(src128), mkU8(shift)));
+      /* Update the result reg */
+      IRTemp res64in128 = newTempV128();
+      assign(res64in128, unop(Iop_ZeroHI64ofV128, mkexpr(pair)));
+      putLO64andZUorPutHI64(is2, dd, res64in128);
+      /* Update the Q flag. */
+      IRTemp q64q64 = newTempV128();
+      assign(q64q64, binop(Iop_InterleaveHI64x2, mkexpr(pair), mkexpr(pair)));
+      IRTemp z128 = newTempV128();
+      assign(z128, mkV128(0x0000));
+      updateQCFLAGwithDifference(q64q64, z128);
+      /* */
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      DIP("%s %s.%s, %s.%s, #%u\n", nm,
+          nameQReg128(dd), arrNarrow, nameQReg128(nn), arrWide, shift);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,1,0,0)) {
+      /* -------- 0,10100 SSHLL{,2} #imm -------- */
+      /* -------- 1,10100 USHLL{,2} #imm -------- */
+      /* 31  28     22   18   15     9 4
+         0q0 011110 immh immb 101001 n d  SSHLL Vd.Ta, Vn.Tb, #sh
+         0q1 011110 immh immb 101001 n d  USHLL Vd.Ta, Vn.Tb, #sh
+         where Ta,Tb,sh
+           = case immh of 1xxx -> invalid
+                          01xx -> 2d, 2s(q0)/4s(q1),  immh:immb - 32 (0..31)
+                          001x -> 4s, 4h(q0)/8h(q1),  immh:immb - 16 (0..15)
+                          0001 -> 8h, 8b(q0)/16b(q1), immh:immb - 8  (0..7)
+                          0000 -> AdvSIMD modified immediate (???)
+      */
+      Bool    isQ   = bitQ == 1;
+      Bool    isU   = bitU == 1;
+      UInt    immhb = (immh << 3) | immb;
+      IRTemp  src   = newTempV128();
+      IRTemp  zero  = newTempV128();
+      IRExpr* res   = NULL;
+      UInt    sh    = 0;
+      const HChar* ta = "??";
+      const HChar* tb = "??";
+      assign(src, getQReg128(nn));
+      assign(zero, mkV128(0x0000));
+      if (immh & 8) {
+         /* invalid; don't assign to res */
+      }
+      else if (immh & 4) {
+         sh = immhb - 32;
+         vassert(sh < 32); /* so 32-sh is 1..32 */
+         ta = "2d";
+         tb = isQ ? "4s" : "2s";
+         IRExpr* tmp = isQ ? mk_InterleaveHI32x4(src, zero) 
+                           : mk_InterleaveLO32x4(src, zero);
+         res = binop(isU ? Iop_ShrN64x2 : Iop_SarN64x2, tmp, mkU8(32-sh));
+      }
+      else if (immh & 2) {
+         sh = immhb - 16;
+         vassert(sh < 16); /* so 16-sh is 1..16 */
+         ta = "4s";
+         tb = isQ ? "8h" : "4h";
+         IRExpr* tmp = isQ ? mk_InterleaveHI16x8(src, zero) 
+                           : mk_InterleaveLO16x8(src, zero);
+         res = binop(isU ? Iop_ShrN32x4 : Iop_SarN32x4, tmp, mkU8(16-sh));
+      }
+      else if (immh & 1) {
+         sh = immhb - 8;
+         vassert(sh < 8); /* so 8-sh is 1..8 */
+         ta = "8h";
+         tb = isQ ? "16b" : "8b";
+         IRExpr* tmp = isQ ? mk_InterleaveHI8x16(src, zero) 
+                           : mk_InterleaveLO8x16(src, zero);
+         res = binop(isU ? Iop_ShrN16x8 : Iop_SarN16x8, tmp, mkU8(8-sh));
+      } else {
+         vassert(immh == 0);
+         /* invalid; don't assign to res */
+      }
+      /* */
+      if (res) {
+         putQReg128(dd, res);
+         DIP("%cshll%s %s.%s, %s.%s, #%d\n",
+             isU ? 'u' : 's', isQ ? "2" : "",
+             nameQReg128(dd), ta, nameQReg128(nn), tb, sh);
+         return True;
+      }
+      return False;
+   }
+
+   if (opcode == BITS5(1,1,1,0,0)) {
+      /* -------- 0,11100 SCVTF {2d_2d,4s_4s,2s_2s}_imm -------- */
+      /* -------- 1,11100 UCVTF {2d_2d,4s_4s,2s_2s}_imm -------- */
+      /* If immh is of the form 00xx, the insn is invalid. */
+      if (immh < BITS4(0,1,0,0)) return False;
+      UInt size  = 0;
+      UInt fbits = 0;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&fbits, &size, immh, immb);
+      /* The following holds because immh is never zero. */
+      vassert(ok);
+      /* The following holds because immh >= 0100. */
+      vassert(size == X10 || size == X11);
+      Bool isD = size == X11;
+      Bool isU = bitU == 1;
+      Bool isQ = bitQ == 1;
+      if (isD && !isQ) return False; /* reject .1d case */
+      vassert(fbits >= 1 && fbits <= (isD ? 64 : 32));
+      Double  scale  = two_to_the_minus(fbits);
+      IRExpr* scaleE = isD ? IRExpr_Const(IRConst_F64(scale))
+                           : IRExpr_Const(IRConst_F32( (Float)scale ));
+      IROp    opMUL  = isD ? Iop_MulF64 : Iop_MulF32;
+      IROp    opCVT  = isU ? (isD ? Iop_I64UtoF64 : Iop_I32UtoF32)
+                           : (isD ? Iop_I64StoF64 : Iop_I32StoF32);
+      IRType tyF = isD ? Ity_F64 : Ity_F32;
+      IRType tyI = isD ? Ity_I64 : Ity_I32;
+      UInt nLanes = (isQ ? 2 : 1) * (isD ? 1 : 2);
+      vassert(nLanes == 2 || nLanes == 4);
+      for (UInt i = 0; i < nLanes; i++) {
+         IRTemp src = newTemp(tyI);
+         IRTemp res = newTemp(tyF);
+         IRTemp rm  = mk_get_IR_rounding_mode();
+         assign(src, getQRegLane(nn, i, tyI));
+         assign(res, triop(opMUL, mkexpr(rm),
+                                  binop(opCVT, mkexpr(rm), mkexpr(src)),
+                                  scaleE));
+         putQRegLane(dd, i, mkexpr(res));
+      }
+      if (!isQ) {
+         putQRegLane(dd, 1, mkU64(0));
+      }
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, #%u\n", isU ? "ucvtf" : "scvtf",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, fbits);
+      return True;
+   }
+
+   if (opcode == BITS5(1,1,1,1,1)) {
+      /* -------- 0,11111 FCVTZS {2d_2d,4s_4s,2s_2s}_imm -------- */
+      /* -------- 1,11111 FCVTZU {2d_2d,4s_4s,2s_2s}_imm -------- */
+      /* If immh is of the form 00xx, the insn is invalid. */
+      if (immh < BITS4(0,1,0,0)) return False;
+      UInt size  = 0;
+      UInt fbits = 0;
+      Bool ok    = getLaneInfo_IMMH_IMMB(&fbits, &size, immh, immb);
+      /* The following holds because immh is never zero. */
+      vassert(ok);
+      /* The following holds because immh >= 0100. */
+      vassert(size == X10 || size == X11);
+      Bool isD = size == X11;
+      Bool isU = bitU == 1;
+      Bool isQ = bitQ == 1;
+      if (isD && !isQ) return False; /* reject .1d case */
+      vassert(fbits >= 1 && fbits <= (isD ? 64 : 32));
+      Double  scale  = two_to_the_plus(fbits);
+      IRExpr* scaleE = isD ? IRExpr_Const(IRConst_F64(scale))
+                           : IRExpr_Const(IRConst_F32( (Float)scale ));
+      IROp    opMUL  = isD ? Iop_MulF64 : Iop_MulF32;
+      IROp    opCVT  = isU ? (isD ? Iop_F64toI64U : Iop_F32toI32U)
+                           : (isD ? Iop_F64toI64S : Iop_F32toI32S);
+      IRType tyF = isD ? Ity_F64 : Ity_F32;
+      IRType tyI = isD ? Ity_I64 : Ity_I32;
+      UInt nLanes = (isQ ? 2 : 1) * (isD ? 1 : 2);
+      vassert(nLanes == 2 || nLanes == 4);
+      for (UInt i = 0; i < nLanes; i++) {
+         IRTemp src = newTemp(tyF);
+         IRTemp res = newTemp(tyI);
+         IRTemp rm  = newTemp(Ity_I32);
+         assign(src, getQRegLane(nn, i, tyF));
+         assign(rm,  mkU32(Irrm_ZERO));
+         assign(res, binop(opCVT, mkexpr(rm), 
+                                  triop(opMUL, mkexpr(rm),
+                                               mkexpr(src), scaleE)));
+         putQRegLane(dd, i, mkexpr(res));
+      }
+      if (!isQ) {
+         putQRegLane(dd, 1, mkU64(0));
+      }
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, #%u\n", isU ? "fcvtzu" : "fcvtzs",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, fbits);
+      return True;
+   }
+
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_three_different(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31 30 29 28    23   21 20 15     11 9 4
+      0  Q  U  01110 size 1  m  opcode 00 n d
+      Decode fields: u,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,24) != BITS5(0,1,1,1,0)
+       || INSN(21,21) != 1
+       || INSN(11,10) != BITS2(0,0)) {
+      return False;
+   }
+   UInt bitQ   = INSN(30,30);
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt mm     = INSN(20,16);
+   UInt opcode = INSN(15,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+   Bool is2    = bitQ == 1;
+
+   if (opcode == BITS4(0,0,0,0) || opcode == BITS4(0,0,1,0)) {
+      /* -------- 0,0000 SADDL{2} -------- */
+      /* -------- 1,0000 UADDL{2} -------- */
+      /* -------- 0,0010 SSUBL{2} -------- */
+      /* -------- 1,0010 USUBL{2} -------- */
+      /* Widens, and size refers to the narrowed lanes. */
+      if (size == X11) return False;
+      vassert(size <= 2);
+      Bool   isU   = bitU == 1;
+      Bool   isADD = opcode == BITS4(0,0,0,0);
+      IRTemp argL  = math_WIDEN_LO_OR_HI_LANES(isU, is2, size, getQReg128(nn));
+      IRTemp argR  = math_WIDEN_LO_OR_HI_LANES(isU, is2, size, getQReg128(mm));
+      IRTemp res   = newTempV128();
+      assign(res, binop(isADD ? mkVecADD(size+1) : mkVecSUB(size+1),
+                        mkexpr(argL), mkexpr(argR)));
+      putQReg128(dd, mkexpr(res));
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      const HChar* nm        = isADD ? (isU ? "uaddl" : "saddl")
+                                     : (isU ? "usubl" : "ssubl");
+      DIP("%s%s %s.%s, %s.%s, %s.%s\n", nm, is2 ? "2" : "",
+          nameQReg128(dd), arrWide,
+          nameQReg128(nn), arrNarrow, nameQReg128(mm), arrNarrow);
+      return True;
+   }
+
+   if (opcode == BITS4(0,0,0,1) || opcode == BITS4(0,0,1,1)) {
+      /* -------- 0,0001 SADDW{2} -------- */
+      /* -------- 1,0001 UADDW{2} -------- */
+      /* -------- 0,0011 SSUBW{2} -------- */
+      /* -------- 1,0011 USUBW{2} -------- */
+      /* Widens, and size refers to the narrowed lanes. */
+      if (size == X11) return False;
+      vassert(size <= 2);
+      Bool   isU   = bitU == 1;
+      Bool   isADD = opcode == BITS4(0,0,0,1);
+      IRTemp argR  = math_WIDEN_LO_OR_HI_LANES(isU, is2, size, getQReg128(mm));
+      IRTemp res   = newTempV128();
+      assign(res, binop(isADD ? mkVecADD(size+1) : mkVecSUB(size+1),
+                        getQReg128(nn), mkexpr(argR)));
+      putQReg128(dd, mkexpr(res));
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      const HChar* nm        = isADD ? (isU ? "uaddw" : "saddw")
+                                     : (isU ? "usubw" : "ssubw");
+      DIP("%s%s %s.%s, %s.%s, %s.%s\n", nm, is2 ? "2" : "",
+          nameQReg128(dd), arrWide,
+          nameQReg128(nn), arrWide, nameQReg128(mm), arrNarrow);
+      return True;
+   }
+
+   if (opcode == BITS4(0,1,0,0) || opcode == BITS4(0,1,1,0)) {
+      /* -------- 0,0100  ADDHN{2} -------- */
+      /* -------- 1,0100 RADDHN{2} -------- */
+      /* -------- 0,0110  SUBHN{2} -------- */
+      /* -------- 1,0110 RSUBHN{2} -------- */
+      /* Narrows, and size refers to the narrowed lanes. */
+      if (size == X11) return False;
+      vassert(size <= 2);
+      const UInt shift[3] = { 8, 16, 32 };
+      Bool isADD = opcode == BITS4(0,1,0,0);
+      Bool isR   = bitU == 1;
+      /* Combined elements in wide lanes */
+      IRTemp  wide  = newTempV128();
+      IRExpr* wideE = binop(isADD ? mkVecADD(size+1) : mkVecSUB(size+1),
+                            getQReg128(nn), getQReg128(mm));
+      if (isR) {
+         wideE = binop(mkVecADD(size+1),
+                       wideE,
+                       mkexpr(math_VEC_DUP_IMM(size+1,
+                                               1ULL << (shift[size]-1))));
+      }
+      assign(wide, wideE);
+      /* Top halves of elements, still in wide lanes */
+      IRTemp shrd = newTempV128();
+      assign(shrd, binop(mkVecSHRN(size+1), mkexpr(wide), mkU8(shift[size])));
+      /* Elements now compacted into lower 64 bits */
+      IRTemp new64 = newTempV128();
+      assign(new64, binop(mkVecCATEVENLANES(size), mkexpr(shrd), mkexpr(shrd)));
+      putLO64andZUorPutHI64(is2, dd, new64);
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      const HChar* nm = isADD ? (isR ? "raddhn" : "addhn")
+                              : (isR ? "rsubhn" : "subhn");
+      DIP("%s%s %s.%s, %s.%s, %s.%s\n", nm, is2 ? "2" : "",
+          nameQReg128(dd), arrNarrow,
+          nameQReg128(nn), arrWide, nameQReg128(mm), arrWide);
+      return True;
+   }
+
+   if (opcode == BITS4(0,1,0,1) || opcode == BITS4(0,1,1,1)) {
+      /* -------- 0,0101 SABAL{2} -------- */
+      /* -------- 1,0101 UABAL{2} -------- */
+      /* -------- 0,0111 SABDL{2} -------- */
+      /* -------- 1,0111 UABDL{2} -------- */
+      /* Widens, and size refers to the narrowed lanes. */
+      if (size == X11) return False;
+      vassert(size <= 2);
+      Bool   isU   = bitU == 1;
+      Bool   isACC = opcode == BITS4(0,1,0,1);
+      IRTemp argL  = math_WIDEN_LO_OR_HI_LANES(isU, is2, size, getQReg128(nn));
+      IRTemp argR  = math_WIDEN_LO_OR_HI_LANES(isU, is2, size, getQReg128(mm));
+      IRTemp abd   = math_ABD(isU, size+1, mkexpr(argL), mkexpr(argR));
+      IRTemp res   = newTempV128();
+      assign(res, isACC ? binop(mkVecADD(size+1), mkexpr(abd), getQReg128(dd))
+                        : mkexpr(abd));
+      putQReg128(dd, mkexpr(res));
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      const HChar* nm        = isACC ? (isU ? "uabal" : "sabal")
+                                     : (isU ? "uabdl" : "sabdl");
+      DIP("%s%s %s.%s, %s.%s, %s.%s\n", nm, is2 ? "2" : "",
+          nameQReg128(dd), arrWide,
+          nameQReg128(nn), arrNarrow, nameQReg128(mm), arrNarrow);
+      return True;
+   }
+
+   if (opcode == BITS4(1,1,0,0)
+       || opcode == BITS4(1,0,0,0) || opcode == BITS4(1,0,1,0)) {
+      /* -------- 0,1100  SMULL{2} -------- */ // 0 (ks)
+      /* -------- 1,1100  UMULL{2} -------- */ // 0
+      /* -------- 0,1000  SMLAL{2} -------- */ // 1
+      /* -------- 1,1000  UMLAL{2} -------- */ // 1
+      /* -------- 0,1010  SMLSL{2} -------- */ // 2
+      /* -------- 1,1010  UMLSL{2} -------- */ // 2
+      /* Widens, and size refers to the narrowed lanes. */
+      UInt ks = 3;
+      switch (opcode) {
+         case BITS4(1,1,0,0): ks = 0; break;
+         case BITS4(1,0,0,0): ks = 1; break;
+         case BITS4(1,0,1,0): ks = 2; break;
+         default: vassert(0);
+      }
+      vassert(ks >= 0 && ks <= 2);
+      if (size == X11) return False;
+      vassert(size <= 2);
+      Bool   isU  = bitU == 1;
+      IRTemp vecN = newTempV128();
+      IRTemp vecM = newTempV128();
+      IRTemp vecD = newTempV128();
+      assign(vecN, getQReg128(nn));
+      assign(vecM, getQReg128(mm));
+      assign(vecD, getQReg128(dd));
+      IRTemp res = IRTemp_INVALID;
+      math_MULL_ACC(&res, is2, isU, size, "mas"[ks],
+                    vecN, vecM, ks == 0 ? IRTemp_INVALID : vecD);
+      putQReg128(dd, mkexpr(res));
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      const HChar* nm        = ks == 0 ? "mull" : (ks == 1 ? "mlal" : "mlsl");
+      DIP("%c%s%s %s.%s, %s.%s, %s.%s\n", isU ? 'u' : 's', nm, is2 ? "2" : "",
+          nameQReg128(dd), arrWide,
+          nameQReg128(nn), arrNarrow, nameQReg128(mm), arrNarrow);
+      return True;
+   }
+
+   if (bitU == 0
+       && (opcode == BITS4(1,1,0,1)
+           || opcode == BITS4(1,0,0,1) || opcode == BITS4(1,0,1,1))) {
+      /* -------- 0,1101  SQDMULL{2} -------- */ // 0 (ks)
+      /* -------- 0,1001  SQDMLAL{2} -------- */ // 1
+      /* -------- 0,1011  SQDMLSL{2} -------- */ // 2
+      /* Widens, and size refers to the narrowed lanes. */
+      UInt ks = 3;
+      switch (opcode) {
+         case BITS4(1,1,0,1): ks = 0; break;
+         case BITS4(1,0,0,1): ks = 1; break;
+         case BITS4(1,0,1,1): ks = 2; break;
+         default: vassert(0);
+      }
+      vassert(ks >= 0 && ks <= 2);
+      if (size == X00 || size == X11) return False;
+      vassert(size <= 2);
+      IRTemp vecN, vecM, vecD, res, sat1q, sat1n, sat2q, sat2n;
+      vecN = vecM = vecD = res = sat1q = sat1n = sat2q = sat2n = IRTemp_INVALID;
+      newTempsV128_3(&vecN, &vecM, &vecD);
+      assign(vecN, getQReg128(nn));
+      assign(vecM, getQReg128(mm));
+      assign(vecD, getQReg128(dd));
+      math_SQDMULL_ACC(&res, &sat1q, &sat1n, &sat2q, &sat2n,
+                       is2, size, "mas"[ks],
+                       vecN, vecM, ks == 0 ? IRTemp_INVALID : vecD);
+      putQReg128(dd, mkexpr(res));
+      vassert(sat1q != IRTemp_INVALID && sat1n != IRTemp_INVALID);
+      updateQCFLAGwithDifference(sat1q, sat1n);
+      if (sat2q != IRTemp_INVALID || sat2n != IRTemp_INVALID) {
+         updateQCFLAGwithDifference(sat2q, sat2n);
+      }
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      const HChar* nm        = ks == 0 ? "sqdmull"
+                                       : (ks == 1 ? "sqdmlal" : "sqdmlsl");
+      DIP("%s%s %s.%s, %s.%s, %s.%s\n", nm, is2 ? "2" : "",
+          nameQReg128(dd), arrWide,
+          nameQReg128(nn), arrNarrow, nameQReg128(mm), arrNarrow);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS4(1,1,1,0)) {
+      /* -------- 0,1110  PMULL{2} -------- */
+      /* Widens, and size refers to the narrowed lanes. */
+      if (size != X00) return False;
+      IRTemp res
+         = math_BINARY_WIDENING_V128(is2, Iop_PolynomialMull8x8,
+                                     getQReg128(nn), getQReg128(mm));
+      putQReg128(dd, mkexpr(res));
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      DIP("%s%s %s.%s, %s.%s, %s.%s\n", "pmull", is2 ? "2" : "",
+          nameQReg128(dd), arrNarrow,
+          nameQReg128(nn), arrWide, nameQReg128(mm), arrWide);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_three_same(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31 30 29 28    23   21 20 15     10 9 4
+      0  Q  U  01110 size 1  m  opcode 1  n d
+      Decode fields: u,size,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,24) != BITS5(0,1,1,1,0)
+       || INSN(21,21) != 1
+       || INSN(10,10) != 1) {
+      return False;
+   }
+   UInt bitQ   = INSN(30,30);
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt mm     = INSN(20,16);
+   UInt opcode = INSN(15,11);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+
+   if (opcode == BITS5(0,0,0,0,0) || opcode == BITS5(0,0,1,0,0)) {
+      /* -------- 0,xx,00000 SHADD std6_std6_std6 -------- */
+      /* -------- 1,xx,00000 UHADD std6_std6_std6 -------- */
+      /* -------- 0,xx,00100 SHSUB std6_std6_std6 -------- */
+      /* -------- 1,xx,00100 UHSUB std6_std6_std6 -------- */
+      if (size == X11) return False;
+      Bool isADD = opcode == BITS5(0,0,0,0,0);
+      Bool isU   = bitU == 1;
+      /* Widen both args out, do the math, narrow to final result. */
+      IRTemp argL   = newTempV128();
+      IRTemp argLhi = IRTemp_INVALID;
+      IRTemp argLlo = IRTemp_INVALID;
+      IRTemp argR   = newTempV128();
+      IRTemp argRhi = IRTemp_INVALID;
+      IRTemp argRlo = IRTemp_INVALID;
+      IRTemp resHi  = newTempV128();
+      IRTemp resLo  = newTempV128();
+      IRTemp res    = IRTemp_INVALID;
+      assign(argL, getQReg128(nn));
+      argLlo = math_WIDEN_LO_OR_HI_LANES(isU, False, size, mkexpr(argL));
+      argLhi = math_WIDEN_LO_OR_HI_LANES(isU, True,  size, mkexpr(argL));
+      assign(argR, getQReg128(mm));
+      argRlo = math_WIDEN_LO_OR_HI_LANES(isU, False, size, mkexpr(argR));
+      argRhi = math_WIDEN_LO_OR_HI_LANES(isU, True,  size, mkexpr(argR));
+      IROp opADDSUB = isADD ? mkVecADD(size+1) : mkVecSUB(size+1);
+      IROp opSxR = isU ? mkVecSHRN(size+1) : mkVecSARN(size+1);
+      assign(resHi, binop(opSxR,
+                          binop(opADDSUB, mkexpr(argLhi), mkexpr(argRhi)),
+                          mkU8(1)));
+      assign(resLo, binop(opSxR,
+                          binop(opADDSUB, mkexpr(argLlo), mkexpr(argRlo)),
+                          mkU8(1)));
+      res = math_NARROW_LANES ( resHi, resLo, size );
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isADD ? (isU ? "uhadd" : "shadd") 
+                               : (isU ? "uhsub" : "shsub");
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,0,1,0)) {
+      /* -------- 0,xx,00010 SRHADD std7_std7_std7 -------- */
+      /* -------- 1,xx,00010 URHADD std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isU  = bitU == 1;
+      IRTemp argL = newTempV128();
+      IRTemp argR = newTempV128();
+      assign(argL, getQReg128(nn));
+      assign(argR, getQReg128(mm));
+      IRTemp res = math_RHADD(size, isU, argL, argR);
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", isU ? "urhadd" : "srhadd",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,0,0,1) || opcode == BITS5(0,0,1,0,1)) {
+      /* -------- 0,xx,00001 SQADD std7_std7_std7 -------- */
+      /* -------- 1,xx,00001 UQADD std7_std7_std7 -------- */
+      /* -------- 0,xx,00101 SQSUB std7_std7_std7 -------- */
+      /* -------- 1,xx,00101 UQSUB std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isADD = opcode == BITS5(0,0,0,0,1);
+      Bool isU   = bitU == 1;
+      IROp qop   = Iop_INVALID;
+      IROp nop   = Iop_INVALID;
+      if (isADD) {
+         qop = isU ? mkVecQADDU(size) : mkVecQADDS(size);
+         nop = mkVecADD(size);
+      } else {
+         qop = isU ? mkVecQSUBU(size) : mkVecQSUBS(size);
+         nop = mkVecSUB(size);
+      }
+      IRTemp argL = newTempV128();
+      IRTemp argR = newTempV128();
+      IRTemp qres = newTempV128();
+      IRTemp nres = newTempV128();
+      assign(argL, getQReg128(nn));
+      assign(argR, getQReg128(mm));
+      assign(qres, math_MAYBE_ZERO_HI64_fromE(
+                      bitQ, binop(qop, mkexpr(argL), mkexpr(argR))));
+      assign(nres, math_MAYBE_ZERO_HI64_fromE(
+                      bitQ, binop(nop, mkexpr(argL), mkexpr(argR))));
+      putQReg128(dd, mkexpr(qres));
+      updateQCFLAGwithDifference(qres, nres);
+      const HChar* nm  = isADD ? (isU ? "uqadd" : "sqadd") 
+                               : (isU ? "uqsub" : "sqsub");
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(0,0,0,1,1)) {
+      /* -------- 0,00,00011 AND 16b_16b_16b, 8b_8b_8b -------- */
+      /* -------- 0,01,00011 BIC 16b_16b_16b, 8b_8b_8b -------- */
+      /* -------- 0,10,00011 ORR 16b_16b_16b, 8b_8b_8b -------- */
+      /* -------- 0,10,00011 ORN 16b_16b_16b, 8b_8b_8b -------- */
+      Bool   isORx  = (size & 2) == 2;
+      Bool   invert = (size & 1) == 1;
+      IRTemp res    = newTempV128();
+      assign(res, binop(isORx ? Iop_OrV128 : Iop_AndV128,
+                        getQReg128(nn),
+                        invert ? unop(Iop_NotV128, getQReg128(mm))
+                               : getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* names[4] = { "and", "bic", "orr", "orn" };
+      const HChar* ar = bitQ == 1 ? "16b" : "8b";
+      DIP("%s %s.%s, %s.%s, %s.%s\n", names[INSN(23,22)],
+          nameQReg128(dd), ar, nameQReg128(nn), ar, nameQReg128(mm), ar);
+      return True;
+   }
+
+   if (bitU == 1 && opcode == BITS5(0,0,0,1,1)) {
+      /* -------- 1,00,00011 EOR 16b_16b_16b, 8b_8b_8b -------- */
+      /* -------- 1,01,00011 BSL 16b_16b_16b, 8b_8b_8b -------- */
+      /* -------- 1,10,00011 BIT 16b_16b_16b, 8b_8b_8b -------- */
+      /* -------- 1,10,00011 BIF 16b_16b_16b, 8b_8b_8b -------- */
+      IRTemp argD = newTempV128();
+      IRTemp argN = newTempV128();
+      IRTemp argM = newTempV128();
+      assign(argD, getQReg128(dd));
+      assign(argN, getQReg128(nn));
+      assign(argM, getQReg128(mm));
+      const IROp opXOR = Iop_XorV128;
+      const IROp opAND = Iop_AndV128;
+      const IROp opNOT = Iop_NotV128;
+      IRTemp res = newTempV128();
+      switch (size) {
+         case BITS2(0,0): /* EOR */
+            assign(res, binop(opXOR, mkexpr(argM), mkexpr(argN)));
+            break;
+         case BITS2(0,1): /* BSL */
+            assign(res, binop(opXOR, mkexpr(argM),
+                              binop(opAND,
+                                    binop(opXOR, mkexpr(argM), mkexpr(argN)),
+                                          mkexpr(argD))));
+            break;
+         case BITS2(1,0): /* BIT */
+            assign(res, binop(opXOR, mkexpr(argD),
+                              binop(opAND,
+                                    binop(opXOR, mkexpr(argD), mkexpr(argN)),
+                                    mkexpr(argM))));
+            break;
+         case BITS2(1,1): /* BIF */
+            assign(res, binop(opXOR, mkexpr(argD),
+                              binop(opAND,
+                                    binop(opXOR, mkexpr(argD), mkexpr(argN)),
+                                    unop(opNOT, mkexpr(argM)))));
+            break;
+         default:
+            vassert(0);
+      }
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nms[4] = { "eor", "bsl", "bit", "bif" };
+      const HChar* arr = bitQ == 1 ? "16b" : "8b";
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nms[size],
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,1,1,0)) {
+      /* -------- 0,xx,00110 CMGT std7_std7_std7 -------- */ // >s
+      /* -------- 1,xx,00110 CMHI std7_std7_std7 -------- */ // >u
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isGT  = bitU == 0;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = getQReg128(mm);
+      IRTemp  res  = newTempV128();
+      assign(res,
+             isGT ? binop(mkVecCMPGTS(size), argL, argR)
+                  : binop(mkVecCMPGTU(size), argL, argR));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isGT ? "cmgt" : "cmhi";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,1,1,1)) {
+      /* -------- 0,xx,00111 CMGE std7_std7_std7 -------- */ // >=s
+      /* -------- 1,xx,00111 CMHS std7_std7_std7 -------- */ // >=u
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool    isGE = bitU == 0;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = getQReg128(mm);
+      IRTemp  res  = newTempV128();
+      assign(res,
+             isGE ? unop(Iop_NotV128, binop(mkVecCMPGTS(size), argR, argL))
+                  : unop(Iop_NotV128, binop(mkVecCMPGTU(size), argR, argL)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isGE ? "cmge" : "cmhs";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,0,0,0) || opcode == BITS5(0,1,0,1,0)) {
+      /* -------- 0,xx,01000 SSHL  std7_std7_std7 -------- */
+      /* -------- 0,xx,01010 SRSHL std7_std7_std7 -------- */
+      /* -------- 1,xx,01000 USHL  std7_std7_std7 -------- */
+      /* -------- 1,xx,01010 URSHL std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isU = bitU == 1;
+      Bool isR = opcode == BITS5(0,1,0,1,0);
+      IROp op  = isR ? (isU ? mkVecRSHU(size) : mkVecRSHS(size))
+                     : (isU ? mkVecSHU(size)  : mkVecSHS(size));
+      IRTemp res = newTempV128();
+      assign(res, binop(op, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isR ? (isU ? "urshl" : "srshl")
+                             : (isU ? "ushl"  : "sshl");
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,0,0,1) || opcode == BITS5(0,1,0,1,1)) {
+      /* -------- 0,xx,01001 SQSHL  std7_std7_std7 -------- */
+      /* -------- 0,xx,01011 SQRSHL std7_std7_std7 -------- */
+      /* -------- 1,xx,01001 UQSHL  std7_std7_std7 -------- */
+      /* -------- 1,xx,01011 UQRSHL std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isU = bitU == 1;
+      Bool isR = opcode == BITS5(0,1,0,1,1);
+      IROp op  = isR ? (isU ? mkVecQANDUQRSH(size) : mkVecQANDSQRSH(size))
+                     : (isU ? mkVecQANDUQSH(size)  : mkVecQANDSQSH(size));
+      /* This is a bit tricky.  If we're only interested in the lowest 64 bits
+         of the result (viz, bitQ == 0), then we must adjust the operands to
+         ensure that the upper part of the result, that we don't care about,
+         doesn't pollute the returned Q value.  To do this, zero out the upper
+         operand halves beforehand.  This works because it means, for the
+         lanes we don't care about, we are shifting zero by zero, which can
+         never saturate. */
+      IRTemp res256 = newTemp(Ity_V256);
+      IRTemp resSH  = newTempV128();
+      IRTemp resQ   = newTempV128();
+      IRTemp zero   = newTempV128();
+      assign(res256, binop(op, 
+                           math_MAYBE_ZERO_HI64_fromE(bitQ, getQReg128(nn)),
+                           math_MAYBE_ZERO_HI64_fromE(bitQ, getQReg128(mm))));
+      assign(resSH, unop(Iop_V256toV128_0, mkexpr(res256)));
+      assign(resQ,  unop(Iop_V256toV128_1, mkexpr(res256)));
+      assign(zero,  mkV128(0x0000));      
+      putQReg128(dd, mkexpr(resSH));
+      updateQCFLAGwithDifference(resQ, zero);
+      const HChar* nm  = isR ? (isU ? "uqrshl" : "sqrshl")
+                             : (isU ? "uqshl"  : "sqshl");
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,1,0,0) || opcode == BITS5(0,1,1,0,1)) {
+      /* -------- 0,xx,01100 SMAX std7_std7_std7 -------- */
+      /* -------- 1,xx,01100 UMAX std7_std7_std7 -------- */
+      /* -------- 0,xx,01101 SMIN std7_std7_std7 -------- */
+      /* -------- 1,xx,01101 UMIN std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isU   = bitU == 1;
+      Bool isMAX = (opcode & 1) == 0;
+      IROp op    = isMAX ? (isU ? mkVecMAXU(size) : mkVecMAXS(size))
+                         : (isU ? mkVecMINU(size) : mkVecMINS(size));
+      IRTemp t   = newTempV128();
+      assign(t, binop(op, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t));
+      const HChar* nm = isMAX ? (isU ? "umax" : "smax")
+                              : (isU ? "umin" : "smin");
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,1,1,0) || opcode == BITS5(0,1,1,1,1)) {
+      /* -------- 0,xx,01110 SABD std6_std6_std6 -------- */
+      /* -------- 1,xx,01110 UABD std6_std6_std6 -------- */
+      /* -------- 0,xx,01111 SABA std6_std6_std6 -------- */
+      /* -------- 1,xx,01111 UABA std6_std6_std6 -------- */
+      if (size == X11) return False; // 1d/2d cases not allowed
+      Bool isU   = bitU == 1;
+      Bool isACC = opcode == BITS5(0,1,1,1,1);
+      vassert(size <= 2);      
+      IRTemp t1 = math_ABD(isU, size, getQReg128(nn), getQReg128(mm));
+      IRTemp t2 = newTempV128();
+      assign(t2, isACC ? binop(mkVecADD(size), mkexpr(t1), getQReg128(dd))
+                       : mkexpr(t1));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t2));
+      const HChar* nm  = isACC ? (isU ? "uaba" : "saba")
+                               : (isU ? "uabd" : "sabd");
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,0,0,0)) {
+      /* -------- 0,xx,10000 ADD std7_std7_std7 -------- */
+      /* -------- 1,xx,10000 SUB std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isSUB = bitU == 1;
+      IROp   op    = isSUB ? mkVecSUB(size) : mkVecADD(size);
+      IRTemp t     = newTempV128();
+      assign(t, binop(op, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t));
+      const HChar* nm  = isSUB ? "sub" : "add";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,0,0,1)) {
+      /* -------- 0,xx,10001 CMTST std7_std7_std7 -------- */ // &, != 0
+      /* -------- 1,xx,10001 CMEQ  std7_std7_std7 -------- */ // ==
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool    isEQ = bitU == 1;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = getQReg128(mm);
+      IRTemp  res  = newTempV128();
+      assign(res,
+             isEQ ? binop(mkVecCMPEQ(size), argL, argR)
+                  : unop(Iop_NotV128, binop(mkVecCMPEQ(size),
+                                            binop(Iop_AndV128, argL, argR), 
+                                            mkV128(0x0000))));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isEQ ? "cmeq" : "cmtst";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,0,1,0)) {
+      /* -------- 0,xx,10010 MLA std7_std7_std7 -------- */
+      /* -------- 1,xx,10010 MLS std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isMLS = bitU == 1;
+      IROp   opMUL    = mkVecMUL(size);
+      IROp   opADDSUB = isMLS ? mkVecSUB(size) : mkVecADD(size);
+      IRTemp res      = newTempV128();
+      if (opMUL != Iop_INVALID && opADDSUB != Iop_INVALID) {
+         assign(res, binop(opADDSUB,
+                           getQReg128(dd),
+                           binop(opMUL, getQReg128(nn), getQReg128(mm))));
+         putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+         const HChar* arr = nameArr_Q_SZ(bitQ, size);
+         DIP("%s %s.%s, %s.%s, %s.%s\n", isMLS ? "mls" : "mla",
+             nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+         return True;
+      }
+      return False;
+   }
+
+   if (opcode == BITS5(1,0,0,1,1)) {
+      /* -------- 0,xx,10011 MUL  std7_std7_std7 -------- */
+      /* -------- 1,xx,10011 PMUL 16b_16b_16b, 8b_8b_8b -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isPMUL = bitU == 1;
+      const IROp opsPMUL[4]
+         = { Iop_PolynomialMul8x16, Iop_INVALID, Iop_INVALID, Iop_INVALID };
+      IROp   opMUL = isPMUL ? opsPMUL[size] : mkVecMUL(size);
+      IRTemp res   = newTempV128();
+      if (opMUL != Iop_INVALID) {
+         assign(res, binop(opMUL, getQReg128(nn), getQReg128(mm)));
+         putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+         const HChar* arr = nameArr_Q_SZ(bitQ, size);
+         DIP("%s %s.%s, %s.%s, %s.%s\n", isPMUL ? "pmul" : "mul",
+             nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+         return True;
+      }
+      return False;
+   }
+
+   if (opcode == BITS5(1,0,1,0,0) || opcode == BITS5(1,0,1,0,1)) {
+      /* -------- 0,xx,10100 SMAXP std6_std6_std6 -------- */
+      /* -------- 1,xx,10100 UMAXP std6_std6_std6 -------- */
+      /* -------- 0,xx,10101 SMINP std6_std6_std6 -------- */
+      /* -------- 1,xx,10101 UMINP std6_std6_std6 -------- */
+      if (size == X11) return False;
+      Bool isU   = bitU == 1;
+      Bool isMAX = opcode == BITS5(1,0,1,0,0);
+      IRTemp vN  = newTempV128();
+      IRTemp vM  = newTempV128();
+      IROp op = isMAX ? (isU ? mkVecMAXU(size) : mkVecMAXS(size))
+                      : (isU ? mkVecMINU(size) : mkVecMINS(size));
+      assign(vN, getQReg128(nn));
+      assign(vM, getQReg128(mm));
+      IRTemp res128 = newTempV128();
+      assign(res128,
+             binop(op,
+                   binop(mkVecCATEVENLANES(size), mkexpr(vM), mkexpr(vN)),
+                   binop(mkVecCATODDLANES(size),  mkexpr(vM), mkexpr(vN))));
+      /* In the half-width case, use CatEL32x4 to extract the half-width
+         result from the full-width result. */
+      IRExpr* res
+         = bitQ == 0 ? unop(Iop_ZeroHI64ofV128,
+                            binop(Iop_CatEvenLanes32x4, mkexpr(res128),
+                                                        mkexpr(res128)))
+                     : mkexpr(res128);
+      putQReg128(dd, res);
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      const HChar* nm  = isMAX ? (isU ? "umaxp" : "smaxp")
+                               : (isU ? "uminp" : "sminp");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,1,1,0)) {
+      /* -------- 0,xx,10110 SQDMULH s and h variants only -------- */
+      /* -------- 1,xx,10110 SQRDMULH s and h variants only -------- */
+      if (size == X00 || size == X11) return False;
+      Bool isR = bitU == 1;
+      IRTemp res, sat1q, sat1n, vN, vM;
+      res = sat1q = sat1n = vN = vM = IRTemp_INVALID;
+      newTempsV128_2(&vN, &vM);
+      assign(vN, getQReg128(nn));
+      assign(vM, getQReg128(mm));
+      math_SQDMULH(&res, &sat1q, &sat1n, isR, size, vN, vM);
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      IROp opZHI = bitQ == 0 ? Iop_ZeroHI64ofV128 : Iop_INVALID;
+      updateQCFLAGwithDifferenceZHI(sat1q, sat1n, opZHI);
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      const HChar* nm  = isR ? "sqrdmulh" : "sqdmulh";
+      DIP("%s %s.%s, %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(1,0,1,1,1)) {
+      /* -------- 0,xx,10111 ADDP std7_std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      IRTemp vN = newTempV128();
+      IRTemp vM = newTempV128();
+      assign(vN, getQReg128(nn));
+      assign(vM, getQReg128(mm));
+      IRTemp res128 = newTempV128();
+      assign(res128,
+             binop(mkVecADD(size),
+                   binop(mkVecCATEVENLANES(size), mkexpr(vM), mkexpr(vN)),
+                   binop(mkVecCATODDLANES(size),  mkexpr(vM), mkexpr(vN))));
+      /* In the half-width case, use CatEL32x4 to extract the half-width
+         result from the full-width result. */
+      IRExpr* res
+         = bitQ == 0 ? unop(Iop_ZeroHI64ofV128,
+                            binop(Iop_CatEvenLanes32x4, mkexpr(res128),
+                                                        mkexpr(res128)))
+                     : mkexpr(res128);
+      putQReg128(dd, res);
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("addp %s.%s, %s.%s, %s.%s\n",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 0
+       && (opcode == BITS5(1,1,0,0,0) || opcode == BITS5(1,1,1,1,0))) {
+      /* -------- 0,0x,11000 FMAXNM 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 0,1x,11000 FMINNM 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 0,0x,11110 FMAX   2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 0,1x,11110 FMIN   2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* FMAXNM, FMINNM: FIXME -- KLUDGED */
+      Bool   isD   = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      Bool   isMIN = (size & 2) == 2;
+      Bool   isNM  = opcode == BITS5(1,1,0,0,0);
+      IROp   opMXX = (isMIN ? mkVecMINF : mkVecMAXF)(isD ? X11 : X10);
+      IRTemp res   = newTempV128();
+      assign(res, binop(opMXX, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s%s %s.%s, %s.%s, %s.%s\n",
+          isMIN ? "fmin" : "fmax", isNM ? "nm" : "",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(1,1,0,0,1)) {
+      /* -------- 0,0x,11001 FMLA 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 0,1x,11001 FMLS 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD   = (size & 1) == 1;
+      Bool isSUB = (size & 2) == 2;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IROp opADD = isD ? Iop_Add64Fx2 : Iop_Add32Fx4;
+      IROp opSUB = isD ? Iop_Sub64Fx2 : Iop_Sub32Fx4;
+      IROp opMUL = isD ? Iop_Mul64Fx2 : Iop_Mul32Fx4;
+      IRTemp rm = mk_get_IR_rounding_mode();
+      IRTemp t1 = newTempV128();
+      IRTemp t2 = newTempV128();
+      // FIXME: double rounding; use FMA primops instead
+      assign(t1, triop(opMUL,
+                       mkexpr(rm), getQReg128(nn), getQReg128(mm)));
+      assign(t2, triop(isSUB ? opSUB : opADD,
+                       mkexpr(rm), getQReg128(dd), mkexpr(t1)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t2));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", isSUB ? "fmls" : "fmla",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(1,1,0,1,0)) {
+      /* -------- 0,0x,11010 FADD 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 0,1x,11010 FSUB 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD   = (size & 1) == 1;
+      Bool isSUB = (size & 2) == 2;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      const IROp ops[4]
+         = { Iop_Add32Fx4, Iop_Add64Fx2, Iop_Sub32Fx4, Iop_Sub64Fx2 };
+      IROp   op = ops[size];
+      IRTemp rm = mk_get_IR_rounding_mode();
+      IRTemp t1 = newTempV128();
+      IRTemp t2 = newTempV128();
+      assign(t1, triop(op, mkexpr(rm), getQReg128(nn), getQReg128(mm)));
+      assign(t2, math_MAYBE_ZERO_HI64(bitQ, t1));
+      putQReg128(dd, mkexpr(t2));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", isSUB ? "fsub" : "fadd",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 1 && size >= X10 && opcode == BITS5(1,1,0,1,0)) {
+      /* -------- 1,1x,11010 FABD 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IROp   opSUB = isD ? Iop_Sub64Fx2 : Iop_Sub32Fx4;
+      IROp   opABS = isD ? Iop_Abs64Fx2 : Iop_Abs32Fx4;
+      IRTemp rm    = mk_get_IR_rounding_mode();
+      IRTemp t1    = newTempV128();
+      IRTemp t2    = newTempV128();
+      // FIXME: use Abd primop instead?
+      assign(t1, triop(opSUB, mkexpr(rm), getQReg128(nn), getQReg128(mm)));
+      assign(t2, unop(opABS, mkexpr(t1)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t2));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("fabd %s.%s, %s.%s, %s.%s\n",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (size <= X01 && opcode == BITS5(1,1,0,1,1)) {
+      /* -------- 0,0x,11011 FMULX 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 1,0x,11011 FMUL  2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      // KLUDGE: FMULX is treated the same way as FMUL.  That can't be right.
+      Bool isD    = (size & 1) == 1;
+      Bool isMULX = bitU == 0;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IRTemp rm = mk_get_IR_rounding_mode();
+      IRTemp t1 = newTempV128();
+      assign(t1, triop(isD ? Iop_Mul64Fx2 : Iop_Mul32Fx4,
+                       mkexpr(rm), getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t1));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", isMULX ? "fmulx" : "fmul",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (size <= X01 && opcode == BITS5(1,1,1,0,0)) {
+      /* -------- 0,0x,11100 FCMEQ 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 1,0x,11100 FCMGE 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      Bool   isGE  = bitU == 1;
+      IROp   opCMP = isGE ? (isD ? Iop_CmpLE64Fx2 : Iop_CmpLE32Fx4)
+                          : (isD ? Iop_CmpEQ64Fx2 : Iop_CmpEQ32Fx4);
+      IRTemp t1    = newTempV128();
+      assign(t1, isGE ? binop(opCMP, getQReg128(mm), getQReg128(nn)) // swapd
+                      : binop(opCMP, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t1));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", isGE ? "fcmge" : "fcmeq",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 1 && size >= X10 && opcode == BITS5(1,1,1,0,0)) {
+      /* -------- 1,1x,11100 FCMGT 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IROp   opCMP = isD ? Iop_CmpLT64Fx2 : Iop_CmpLT32Fx4;
+      IRTemp t1    = newTempV128();
+      assign(t1, binop(opCMP, getQReg128(mm), getQReg128(nn))); // swapd
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t1));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", "fcmgt",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 1 && opcode == BITS5(1,1,1,0,1)) {
+      /* -------- 1,0x,11101 FACGE 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 1,1x,11101 FACGT 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD  = (size & 1) == 1;
+      Bool isGT = (size & 2) == 2;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IROp   opCMP = isGT ? (isD ? Iop_CmpLT64Fx2 : Iop_CmpLT32Fx4)
+                          : (isD ? Iop_CmpLE64Fx2 : Iop_CmpLE32Fx4);
+      IROp   opABS = isD ? Iop_Abs64Fx2 : Iop_Abs32Fx4;
+      IRTemp t1    = newTempV128();
+      assign(t1, binop(opCMP, unop(opABS, getQReg128(mm)),
+                              unop(opABS, getQReg128(nn)))); // swapd
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t1));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", isGT ? "facgt" : "facge",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 1
+       && (opcode == BITS5(1,1,0,0,0) || opcode == BITS5(1,1,1,1,0))) {
+      /* -------- 1,0x,11000 FMAXNMP 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 1,1x,11000 FMINNMP 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 1,0x,11110 FMAXP   2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 1,1x,11110 FMINP   2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* FMAXNM, FMINNM: FIXME -- KLUDGED */
+      Bool isD = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      Bool   isMIN = (size & 2) == 2;
+      Bool   isNM  = opcode == BITS5(1,1,0,0,0);
+      IROp   opMXX = (isMIN ? mkVecMINF : mkVecMAXF)(isD ? 3 : 2);
+      IRTemp srcN  = newTempV128();
+      IRTemp srcM  = newTempV128();
+      IRTemp preL  = IRTemp_INVALID;
+      IRTemp preR  = IRTemp_INVALID;
+      assign(srcN, getQReg128(nn));
+      assign(srcM, getQReg128(mm));
+      math_REARRANGE_FOR_FLOATING_PAIRWISE(&preL, &preR,
+                                           srcM, srcN, isD, bitQ);
+      putQReg128(
+         dd, math_MAYBE_ZERO_HI64_fromE(
+                bitQ,
+                binop(opMXX, mkexpr(preL), mkexpr(preR))));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s%sp %s.%s, %s.%s, %s.%s\n", 
+          isMIN ? "fmin" : "fmax", isNM ? "nm" : "",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 1 && size <= X01 && opcode == BITS5(1,1,0,1,0)) {
+      /* -------- 1,0x,11010 FADDP 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD = size == X01;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IRTemp srcN = newTempV128();
+      IRTemp srcM = newTempV128();
+      IRTemp preL = IRTemp_INVALID;
+      IRTemp preR = IRTemp_INVALID;
+      assign(srcN, getQReg128(nn));
+      assign(srcM, getQReg128(mm));
+      math_REARRANGE_FOR_FLOATING_PAIRWISE(&preL, &preR,
+                                           srcM, srcN, isD, bitQ);
+      putQReg128(
+         dd, math_MAYBE_ZERO_HI64_fromE(
+                bitQ,
+                triop(mkVecADDF(isD ? 3 : 2),
+                      mkexpr(mk_get_IR_rounding_mode()),
+                      mkexpr(preL), mkexpr(preR))));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", "faddp",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 1 && size <= X01 && opcode == BITS5(1,1,1,1,1)) {
+      /* -------- 1,0x,11111 FDIV 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isD = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      vassert(size <= 1);
+      const IROp ops[2] = { Iop_Div32Fx4, Iop_Div64Fx2 };
+      IROp   op = ops[size];
+      IRTemp rm = mk_get_IR_rounding_mode();
+      IRTemp t1 = newTempV128();
+      IRTemp t2 = newTempV128();
+      assign(t1, triop(op, mkexpr(rm), getQReg128(nn), getQReg128(mm)));
+      assign(t2, math_MAYBE_ZERO_HI64(bitQ, t1));
+      putQReg128(dd, mkexpr(t2));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", "fdiv",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(1,1,1,1,1)) {
+      /* -------- 0,0x,11111: FRECPS  2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      /* -------- 0,1x,11111: FRSQRTS 2d_2d_2d, 4s_4s_4s, 2s_2s_2s -------- */
+      Bool isSQRT = (size & 2) == 2;
+      Bool isD    = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IROp op     = isSQRT ? (isD ? Iop_RSqrtStep64Fx2 : Iop_RSqrtStep32Fx4)
+                           : (isD ? Iop_RecipStep64Fx2 : Iop_RecipStep32Fx4);
+      IRTemp res = newTempV128();
+      assign(res, binop(op, getQReg128(nn), getQReg128(mm)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%s\n", isSQRT ? "frsqrts" : "frecps",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm), arr);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_two_reg_misc(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31 30 29 28    23   21    16     11 9 4
+      0  Q  U  01110 size 10000 opcode 10 n d
+      Decode fields: U,size,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,24) != BITS5(0,1,1,1,0)
+       || INSN(21,17) != BITS5(1,0,0,0,0)
+       || INSN(11,10) != BITS2(1,0)) {
+      return False;
+   }
+   UInt bitQ   = INSN(30,30);
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt opcode = INSN(16,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+
+   if (bitU == 0 && size <= X10 && opcode == BITS5(0,0,0,0,0)) {
+      /* -------- 0,00,00000: REV64 16b_16b, 8b_8b -------- */
+      /* -------- 0,01,00000: REV64 8h_8h, 4h_4h -------- */
+      /* -------- 0,10,00000: REV64 4s_4s, 2s_2s -------- */
+      const IROp iops[3] = { Iop_Reverse8sIn64_x2,
+                             Iop_Reverse16sIn64_x2, Iop_Reverse32sIn64_x2 };
+      vassert(size <= 2);
+      IRTemp res = newTempV128();
+      assign(res, unop(iops[size], getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s\n", "rev64",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 1 && size <= X01 && opcode == BITS5(0,0,0,0,0)) {
+      /* -------- 1,00,00000: REV32 16b_16b, 8b_8b -------- */
+      /* -------- 1,01,00000: REV32 8h_8h, 4h_4h -------- */
+      Bool   isH = size == X01;
+      IRTemp res = newTempV128();
+      IROp   iop = isH ? Iop_Reverse16sIn32_x4 : Iop_Reverse8sIn32_x4;
+      assign(res, unop(iop, getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s\n", "rev32",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 0 && size == X00 && opcode == BITS5(0,0,0,0,1)) {
+      /* -------- 0,00,00001: REV16 16b_16b, 8b_8b -------- */
+      IRTemp res = newTempV128();
+      assign(res, unop(Iop_Reverse8sIn16_x8, getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s\n", "rev16",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,0,1,0) || opcode == BITS5(0,0,1,1,0)) {
+      /* -------- 0,xx,00010: SADDLP std6_std6 -------- */
+      /* -------- 1,xx,00010: UADDLP std6_std6 -------- */
+      /* -------- 0,xx,00110: SADALP std6_std6 -------- */
+      /* -------- 1,xx,00110: UADALP std6_std6 -------- */
+      /* Widens, and size refers to the narrow size. */
+      if (size == X11) return False; // no 1d or 2d cases
+      Bool   isU   = bitU == 1;
+      Bool   isACC = opcode == BITS5(0,0,1,1,0);
+      IRTemp src   = newTempV128();
+      IRTemp sum   = newTempV128();
+      IRTemp res   = newTempV128();
+      assign(src, getQReg128(nn));
+      assign(sum,
+             binop(mkVecADD(size+1),
+                   mkexpr(math_WIDEN_EVEN_OR_ODD_LANES(
+                             isU, True/*fromOdd*/, size, mkexpr(src))),
+                   mkexpr(math_WIDEN_EVEN_OR_ODD_LANES(
+                             isU, False/*!fromOdd*/, size, mkexpr(src)))));
+      assign(res, isACC ? binop(mkVecADD(size+1), mkexpr(sum), getQReg128(dd))
+                        : mkexpr(sum));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(bitQ, size+1);
+      DIP("%s %s.%s, %s.%s\n", isACC ? (isU ? "uadalp" : "sadalp")
+                                     : (isU ? "uaddlp" : "saddlp"),
+          nameQReg128(dd), arrWide, nameQReg128(nn), arrNarrow);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,0,1,1)) {
+      /* -------- 0,xx,00011: SUQADD std7_std7 -------- */
+      /* -------- 1,xx,00011: USQADD std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isUSQADD = bitU == 1;
+      /* This is switched (in the US vs SU sense) deliberately.
+         SUQADD corresponds to the ExtUSsatSS variants and 
+         USQADD corresponds to the ExtSUsatUU variants.
+         See libvex_ir for more details. */
+      IROp   qop  = isUSQADD ? mkVecQADDEXTSUSATUU(size)
+                             : mkVecQADDEXTUSSATSS(size);
+      IROp   nop  = mkVecADD(size);
+      IRTemp argL = newTempV128();
+      IRTemp argR = newTempV128();
+      IRTemp qres = newTempV128();
+      IRTemp nres = newTempV128();
+      /* Because the two arguments to the addition are implicitly 
+         extended differently (one signedly, the other unsignedly) it is
+         important to present them to the primop in the correct order. */
+      assign(argL, getQReg128(nn));
+      assign(argR, getQReg128(dd));
+      assign(qres, math_MAYBE_ZERO_HI64_fromE(
+                      bitQ, binop(qop, mkexpr(argL), mkexpr(argR))));
+      assign(nres, math_MAYBE_ZERO_HI64_fromE(
+                      bitQ, binop(nop, mkexpr(argL), mkexpr(argR))));
+      putQReg128(dd, mkexpr(qres));
+      updateQCFLAGwithDifference(qres, nres);
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s\n", isUSQADD ? "usqadd" : "suqadd",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,1,0,0)) {
+      /* -------- 0,xx,00100: CLS std6_std6 -------- */
+      /* -------- 1,xx,00100: CLZ std6_std6 -------- */
+      if (size == X11) return False; // no 1d or 2d cases
+      const IROp opsCLS[3] = { Iop_Cls8x16, Iop_Cls16x8, Iop_Cls32x4 };
+      const IROp opsCLZ[3] = { Iop_Clz8x16, Iop_Clz16x8, Iop_Clz32x4 };
+      Bool   isCLZ = bitU == 1;
+      IRTemp res   = newTempV128();
+      vassert(size <= 2);
+      assign(res, unop(isCLZ ? opsCLZ[size] : opsCLS[size], getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s\n", isCLZ ? "clz" : "cls",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (size == X00 && opcode == BITS5(0,0,1,0,1)) {
+      /* -------- 0,00,00101: CNT 16b_16b, 8b_8b -------- */
+      /* -------- 1,00,00101: NOT 16b_16b, 8b_8b -------- */
+      IRTemp res = newTempV128();
+      assign(res, unop(bitU == 0 ? Iop_Cnt8x16 : Iop_NotV128, getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, 0);
+      DIP("%s %s.%s, %s.%s\n", bitU == 0 ? "cnt" : "not",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 1 && size == X01 && opcode == BITS5(0,0,1,0,1)) {
+      /* -------- 1,01,00101  RBIT 16b_16b, 8b_8b -------- */
+      IRTemp res = newTempV128();
+      assign(res, unop(Iop_Reverse1sIn8_x16, getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, 0);
+      DIP("%s %s.%s, %s.%s\n", "rbit",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,0,1,1,1)) {
+      /* -------- 0,xx,00111 SQABS std7_std7 -------- */
+      /* -------- 1,xx,00111 SQNEG std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isNEG  = bitU == 1;
+      IRTemp qresFW = IRTemp_INVALID, nresFW = IRTemp_INVALID;
+      (isNEG ? math_SQNEG : math_SQABS)( &qresFW, &nresFW,
+                                         getQReg128(nn), size );
+      IRTemp qres = newTempV128(), nres = newTempV128();
+      assign(qres, math_MAYBE_ZERO_HI64(bitQ, qresFW));
+      assign(nres, math_MAYBE_ZERO_HI64(bitQ, nresFW));
+      putQReg128(dd, mkexpr(qres));
+      updateQCFLAGwithDifference(qres, nres);
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s\n", isNEG ? "sqneg" : "sqabs",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,0,0,0)) {
+      /* -------- 0,xx,01000: CMGT std7_std7_#0 -------- */ // >s 0
+      /* -------- 1,xx,01000: CMGE std7_std7_#0 -------- */ // >=s 0
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool    isGT  = bitU == 0;
+      IRExpr* argL  = getQReg128(nn);
+      IRExpr* argR  = mkV128(0x0000);
+      IRTemp  res   = newTempV128();
+      IROp    opGTS = mkVecCMPGTS(size);
+      assign(res, isGT ? binop(opGTS, argL, argR)
+                       : unop(Iop_NotV128, binop(opGTS, argR, argL)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("cm%s %s.%s, %s.%s, #0\n", isGT ? "gt" : "ge",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (opcode == BITS5(0,1,0,0,1)) {
+      /* -------- 0,xx,01001: CMEQ std7_std7_#0 -------- */ // == 0
+      /* -------- 1,xx,01001: CMLE std7_std7_#0 -------- */ // <=s 0
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool    isEQ = bitU == 0;
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = mkV128(0x0000);
+      IRTemp  res  = newTempV128();
+      assign(res, isEQ ? binop(mkVecCMPEQ(size), argL, argR)
+                       : unop(Iop_NotV128,
+                              binop(mkVecCMPGTS(size), argL, argR)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("cm%s %s.%s, %s.%s, #0\n", isEQ ? "eq" : "le",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(0,1,0,1,0)) {
+      /* -------- 0,xx,01010: CMLT std7_std7_#0 -------- */ // <s 0
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      IRExpr* argL = getQReg128(nn);
+      IRExpr* argR = mkV128(0x0000);
+      IRTemp  res  = newTempV128();
+      assign(res, binop(mkVecCMPGTS(size), argR, argL));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("cm%s %s.%s, %s.%s, #0\n", "lt",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(0,1,0,1,1)) {
+      /* -------- 0,xx,01011: ABS std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      IRTemp res = newTempV128();
+      assign(res, unop(mkVecABS(size), getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("abs %s.%s, %s.%s\n", nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 1 && opcode == BITS5(0,1,0,1,1)) {
+      /* -------- 1,xx,01011: NEG std7_std7 -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      IRTemp res = newTempV128();
+      assign(res, binop(mkVecSUB(size), mkV128(0x0000), getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("neg %s.%s, %s.%s\n", nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   UInt ix = 0; /*INVALID*/
+   if (size >= X10) {
+      switch (opcode) {
+         case BITS5(0,1,1,0,0): ix = (bitU == 1) ? 4 : 1; break;
+         case BITS5(0,1,1,0,1): ix = (bitU == 1) ? 5 : 2; break;
+         case BITS5(0,1,1,1,0): if (bitU == 0) ix = 3; break;
+         default: break;
+      }
+   }
+   if (ix > 0) {
+      /* -------- 0,1x,01100 FCMGT 2d_2d,4s_4s,2s_2s _#0.0 (ix 1) -------- */
+      /* -------- 0,1x,01101 FCMEQ 2d_2d,4s_4s,2s_2s _#0.0 (ix 2) -------- */
+      /* -------- 0,1x,01110 FCMLT 2d_2d,4s_4s,2s_2s _#0.0 (ix 3) -------- */
+      /* -------- 1,1x,01100 FCMGE 2d_2d,4s_4s,2s_2s _#0.0 (ix 4) -------- */
+      /* -------- 1,1x,01101 FCMLE 2d_2d,4s_4s,2s_2s _#0.0 (ix 5) -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isD     = size == X11;
+      IROp   opCmpEQ = isD ? Iop_CmpEQ64Fx2 : Iop_CmpEQ32Fx4;
+      IROp   opCmpLE = isD ? Iop_CmpLE64Fx2 : Iop_CmpLE32Fx4;
+      IROp   opCmpLT = isD ? Iop_CmpLT64Fx2 : Iop_CmpLT32Fx4;
+      IROp   opCmp   = Iop_INVALID;
+      Bool   swap    = False;
+      const HChar* nm = "??";
+      switch (ix) {
+         case 1: nm = "fcmgt"; opCmp = opCmpLT; swap = True; break;
+         case 2: nm = "fcmeq"; opCmp = opCmpEQ; break;
+         case 3: nm = "fcmlt"; opCmp = opCmpLT; break;
+         case 4: nm = "fcmge"; opCmp = opCmpLE; swap = True; break;
+         case 5: nm = "fcmle"; opCmp = opCmpLE; break;
+         default: vassert(0);
+      }
+      IRExpr* zero = mkV128(0x0000);
+      IRTemp res = newTempV128();
+      assign(res, swap ? binop(opCmp, zero, getQReg128(nn))
+                       : binop(opCmp, getQReg128(nn), zero));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = bitQ == 0 ? "2s" : (size == X11 ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, #0.0\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (size >= X10 && opcode == BITS5(0,1,1,1,1)) {
+      /* -------- 0,1x,01111: FABS 2d_2d, 4s_4s, 2s_2s -------- */
+      /* -------- 1,1x,01111: FNEG 2d_2d, 4s_4s, 2s_2s -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool   isFNEG = bitU == 1;
+      IROp   op     = isFNEG ? (size == X10 ? Iop_Neg32Fx4 : Iop_Neg64Fx2)
+                             : (size == X10 ? Iop_Abs32Fx4 : Iop_Abs64Fx2);
+      IRTemp res = newTempV128();
+      assign(res, unop(op, getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = bitQ == 0 ? "2s" : (size == X11 ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s\n", isFNEG ? "fneg" : "fabs",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 0 && opcode == BITS5(1,0,0,1,0)) {
+      /* -------- 0,xx,10010: XTN{,2} -------- */
+      if (size == X11) return False;
+      vassert(size < 3);
+      Bool   is2  = bitQ == 1;
+      IROp   opN  = mkVecNARROWUN(size);
+      IRTemp resN = newTempV128();
+      assign(resN, unop(Iop_64UtoV128, unop(opN, getQReg128(nn))));
+      putLO64andZUorPutHI64(is2, dd, resN);
+      const HChar* nm        = "xtn";
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      DIP("%s%s %s.%s, %s.%s\n", is2 ? "2" : "", nm,
+          nameQReg128(dd), arrNarrow, nameQReg128(nn), arrWide);
+      return True;
+   }
+
+   if (opcode == BITS5(1,0,1,0,0)
+       || (bitU == 1 && opcode == BITS5(1,0,0,1,0))) {
+      /* -------- 0,xx,10100: SQXTN{,2} -------- */
+      /* -------- 1,xx,10100: UQXTN{,2} -------- */
+      /* -------- 1,xx,10010: SQXTUN{,2} -------- */
+      if (size == X11) return False;
+      vassert(size < 3);
+      Bool  is2    = bitQ == 1;
+      IROp  opN    = Iop_INVALID;
+      Bool  zWiden = True;
+      const HChar* nm = "??";
+      /**/ if (bitU == 0 && opcode == BITS5(1,0,1,0,0)) {
+         opN = mkVecQNARROWUNSS(size); nm = "sqxtn"; zWiden = False;
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,1,0,0)) {
+         opN = mkVecQNARROWUNUU(size); nm = "uqxtn";
+      }
+      else if (bitU == 1 && opcode == BITS5(1,0,0,1,0)) {
+         opN = mkVecQNARROWUNSU(size); nm = "sqxtun";
+      }
+      else vassert(0);
+      IRTemp src  = newTempV128();
+      assign(src, getQReg128(nn));
+      IRTemp resN = newTempV128();
+      assign(resN, unop(Iop_64UtoV128, unop(opN, mkexpr(src))));
+      putLO64andZUorPutHI64(is2, dd, resN);
+      IRTemp resW = math_WIDEN_LO_OR_HI_LANES(zWiden, False/*!fromUpperHalf*/,
+                                              size, mkexpr(resN));
+      updateQCFLAGwithDifference(src, resW);
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      DIP("%s%s %s.%s, %s.%s\n", is2 ? "2" : "", nm,
+          nameQReg128(dd), arrNarrow, nameQReg128(nn), arrWide);
+      return True;
+   }
+
+   if (bitU == 1 && opcode == BITS5(1,0,0,1,1)) {
+      /* -------- 1,xx,10011 SHLL{2} #lane-width -------- */
+      /* Widens, and size is the narrow size. */
+      if (size == X11) return False;
+      Bool is2   = bitQ == 1;
+      IROp opINT = is2 ? mkVecINTERLEAVEHI(size) : mkVecINTERLEAVELO(size);
+      IROp opSHL = mkVecSHLN(size+1);
+      IRTemp src = newTempV128();
+      IRTemp res = newTempV128();
+      assign(src, getQReg128(nn));
+      assign(res, binop(opSHL, binop(opINT, mkexpr(src), mkexpr(src)),
+                               mkU8(8 << size)));
+      putQReg128(dd, mkexpr(res));
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      DIP("shll%s %s.%s, %s.%s, #%u\n", is2 ? "2" : "", 
+          nameQReg128(dd), arrWide, nameQReg128(nn), arrNarrow, 8 << size);
+      return True;
+   }
+
+   if (bitU == 0 && size <= X01 && opcode == BITS5(1,0,1,1,0)) {
+      /* -------- 0,0x,10110: FCVTN 4h/8h_4s, 2s/4s_2d -------- */
+      UInt   nLanes = size == X00 ? 4 : 2;
+      IRType srcTy  = size == X00 ? Ity_F32 : Ity_F64;
+      IROp   opCvt  = size == X00 ? Iop_F32toF16 : Iop_F64toF32;
+      IRTemp rm     = mk_get_IR_rounding_mode();
+      IRTemp src[nLanes];
+      for (UInt i = 0; i < nLanes; i++) {
+         src[i] = newTemp(srcTy);
+         assign(src[i], getQRegLane(nn, i, srcTy));
+      }
+      for (UInt i = 0; i < nLanes; i++) {
+         putQRegLane(dd, nLanes * bitQ + i,
+                         binop(opCvt, mkexpr(rm), mkexpr(src[i])));
+      }
+      if (bitQ == 0) {
+         putQRegLane(dd, 1, mkU64(0));
+      }
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, 1+size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    1+size+1);
+      DIP("fcvtn%s %s.%s, %s.%s\n", bitQ ? "2" : "",
+          nameQReg128(dd), arrNarrow, nameQReg128(nn), arrWide);
+      return True;
+   }
+
+   if (bitU == 1 && size == X01 && opcode == BITS5(1,0,1,1,0)) {
+      /* -------- 1,01,10110: FCVTXN 2s/4s_2d -------- */
+      /* Using Irrm_NEAREST here isn't right.  The docs say "round to
+         odd" but I don't know what that really means. */
+      IRType srcTy = Ity_F64;
+      IROp   opCvt = Iop_F64toF32;
+      IRTemp src[2];
+      for (UInt i = 0; i < 2; i++) {
+         src[i] = newTemp(srcTy);
+         assign(src[i], getQRegLane(nn, i, srcTy));
+      }
+      for (UInt i = 0; i < 2; i++) {
+         putQRegLane(dd, 2 * bitQ + i,
+                         binop(opCvt, mkU32(Irrm_NEAREST), mkexpr(src[i])));
+      }
+      if (bitQ == 0) {
+         putQRegLane(dd, 1, mkU64(0));
+      }
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, 1+size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    1+size+1);
+      DIP("fcvtxn%s %s.%s, %s.%s\n", bitQ ? "2" : "",
+          nameQReg128(dd), arrNarrow, nameQReg128(nn), arrWide);
+      return True;
+   }
+
+   if (bitU == 0 && size <= X01 && opcode == BITS5(1,0,1,1,1)) {
+      /* -------- 0,0x,10111: FCVTL 4s_4h/8h, 2d_2s/4s -------- */
+      UInt   nLanes = size == X00 ? 4 : 2;
+      IRType srcTy  = size == X00 ? Ity_F16 : Ity_F32;
+      IROp   opCvt  = size == X00 ? Iop_F16toF32 : Iop_F32toF64;
+      IRTemp src[nLanes];
+      for (UInt i = 0; i < nLanes; i++) {
+         src[i] = newTemp(srcTy);
+         assign(src[i], getQRegLane(nn, nLanes * bitQ + i, srcTy));
+      }
+      for (UInt i = 0; i < nLanes; i++) {
+         putQRegLane(dd, i, unop(opCvt, mkexpr(src[i])));
+      }
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, 1+size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    1+size+1);
+      DIP("fcvtl%s %s.%s, %s.%s\n", bitQ ? "2" : "",
+          nameQReg128(dd), arrWide, nameQReg128(nn), arrNarrow);
+      return True;
+   }
+
+   ix = 0;
+   if (opcode == BITS5(1,1,0,0,0) || opcode == BITS5(1,1,0,0,1)) {
+      ix = 1 + ((((bitU & 1) << 2) | ((size & 2) << 0)) | ((opcode & 1) << 0));
+      // = 1 + bitU[0]:size[1]:opcode[0]
+      vassert(ix >= 1 && ix <= 8);
+      if (ix == 7) ix = 0;
+   }
+   if (ix > 0) {
+      /* -------- 0,0x,11000 FRINTN 2d_2d, 4s_4s, 2s_2s (1) -------- */
+      /* -------- 0,0x,11001 FRINTM 2d_2d, 4s_4s, 2s_2s (2) -------- */
+      /* -------- 0,1x,11000 FRINTP 2d_2d, 4s_4s, 2s_2s (3) -------- */
+      /* -------- 0,1x,11001 FRINTZ 2d_2d, 4s_4s, 2s_2s (4) -------- */
+      /* -------- 1,0x,11000 FRINTA 2d_2d, 4s_4s, 2s_2s (5) -------- */
+      /* -------- 1,0x,11001 FRINTX 2d_2d, 4s_4s, 2s_2s (6) -------- */
+      /* -------- 1,1x,11000 (apparently unassigned)    (7) -------- */
+      /* -------- 1,1x,11001 FRINTI 2d_2d, 4s_4s, 2s_2s (8) -------- */
+      /* rm plan:
+         FRINTN: tieeven -- !! FIXME KLUDGED !!
+         FRINTM: -inf
+         FRINTP: +inf
+         FRINTZ: zero
+         FRINTA: tieaway -- !! FIXME KLUDGED !!
+         FRINTX: per FPCR + "exact = TRUE"
+         FRINTI: per FPCR
+      */
+      Bool isD = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+
+      IRTemp irrmRM = mk_get_IR_rounding_mode();
+
+      UChar ch = '?';
+      IRTemp irrm = newTemp(Ity_I32);
+      switch (ix) {
+         case 1: ch = 'n'; assign(irrm, mkU32(Irrm_NEAREST)); break;
+         case 2: ch = 'm'; assign(irrm, mkU32(Irrm_NegINF)); break;
+         case 3: ch = 'p'; assign(irrm, mkU32(Irrm_PosINF)); break;
+         case 4: ch = 'z'; assign(irrm, mkU32(Irrm_ZERO)); break; 
+         // The following is a kludge.  Should be: Irrm_NEAREST_TIE_AWAY_0
+         case 5: ch = 'a'; assign(irrm, mkU32(Irrm_NEAREST)); break;
+         // I am unsure about the following, due to the "integral exact"
+         // description in the manual.  What does it mean? (frintx, that is)
+         case 6: ch = 'x'; assign(irrm, mkexpr(irrmRM)); break;
+         case 8: ch = 'i'; assign(irrm, mkexpr(irrmRM)); break; 
+         default: vassert(0);
+      }
+
+      IROp opRND = isD ? Iop_RoundF64toInt : Iop_RoundF32toInt;
+      if (isD) {
+         for (UInt i = 0; i < 2; i++) {
+            putQRegLane(dd, i, binop(opRND, mkexpr(irrm),
+                                            getQRegLane(nn, i, Ity_F64)));
+         }
+      } else {
+         UInt n = bitQ==1 ? 4 : 2;
+         for (UInt i = 0; i < n; i++) {
+            putQRegLane(dd, i, binop(opRND, mkexpr(irrm),
+                                            getQRegLane(nn, i, Ity_F32)));
+         }
+         if (bitQ == 0)
+            putQRegLane(dd, 1, mkU64(0)); // zero out lanes 2 and 3
+      }
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("frint%c %s.%s, %s.%s\n", ch,
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   ix = 0; /*INVALID*/
+   switch (opcode) {
+      case BITS5(1,1,0,1,0): ix = ((size & 2) == 2) ? 4 : 1; break;
+      case BITS5(1,1,0,1,1): ix = ((size & 2) == 2) ? 5 : 2; break;
+      case BITS5(1,1,1,0,0): if ((size & 2) == 0) ix = 3; break;
+      default: break;
+   }
+   if (ix > 0) {
+      /* -------- 0,0x,11010 FCVTNS 2d_2d, 4s_4s, 2s_2s (ix 1) -------- */
+      /* -------- 0,0x,11011 FCVTMS 2d_2d, 4s_4s, 2s_2s (ix 2) -------- */
+      /* -------- 0,0x,11100 FCVTAS 2d_2d, 4s_4s, 2s_2s (ix 3) -------- */
+      /* -------- 0,1x,11010 FCVTPS 2d_2d, 4s_4s, 2s_2s (ix 4) -------- */
+      /* -------- 0,1x,11011 FCVTZS 2d_2d, 4s_4s, 2s_2s (ix 5) -------- */
+      /* -------- 1,0x,11010 FCVTNS 2d_2d, 4s_4s, 2s_2s (ix 1) -------- */
+      /* -------- 1,0x,11011 FCVTMS 2d_2d, 4s_4s, 2s_2s (ix 2) -------- */
+      /* -------- 1,0x,11100 FCVTAS 2d_2d, 4s_4s, 2s_2s (ix 3) -------- */
+      /* -------- 1,1x,11010 FCVTPS 2d_2d, 4s_4s, 2s_2s (ix 4) -------- */
+      /* -------- 1,1x,11011 FCVTZS 2d_2d, 4s_4s, 2s_2s (ix 5) -------- */
+      Bool isD = (size & 1) == 1;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+
+      IRRoundingMode irrm = 8; /*impossible*/
+      HChar          ch   = '?';
+      switch (ix) {
+         case 1: ch = 'n'; irrm = Irrm_NEAREST; break;
+         case 2: ch = 'm'; irrm = Irrm_NegINF;  break;
+         case 3: ch = 'a'; irrm = Irrm_NEAREST; break; /* kludge? */
+         case 4: ch = 'p'; irrm = Irrm_PosINF;  break;
+         case 5: ch = 'z'; irrm = Irrm_ZERO;    break;
+         default: vassert(0);
+      }
+      IROp cvt = Iop_INVALID;
+      if (bitU == 1) {
+         cvt = isD ? Iop_F64toI64U : Iop_F32toI32U;
+      } else {
+         cvt = isD ? Iop_F64toI64S : Iop_F32toI32S;
+      }
+      if (isD) {
+         for (UInt i = 0; i < 2; i++) {
+            putQRegLane(dd, i, binop(cvt, mkU32(irrm),
+                                            getQRegLane(nn, i, Ity_F64)));
+         }
+      } else {
+         UInt n = bitQ==1 ? 4 : 2;
+         for (UInt i = 0; i < n; i++) {
+            putQRegLane(dd, i, binop(cvt, mkU32(irrm),
+                                            getQRegLane(nn, i, Ity_F32)));
+         }
+         if (bitQ == 0)
+            putQRegLane(dd, 1, mkU64(0)); // zero out lanes 2 and 3
+      }
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("fcvt%c%c %s.%s, %s.%s\n", ch, bitU == 1 ? 'u' : 's',
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (size == X10 && opcode == BITS5(1,1,1,0,0)) {
+      /* -------- 0,10,11100: URECPE  4s_4s, 2s_2s -------- */
+      /* -------- 1,10,11100: URSQRTE 4s_4s, 2s_2s -------- */
+      Bool isREC = bitU == 0;
+      IROp op    = isREC ? Iop_RecipEst32Ux4 : Iop_RSqrtEst32Ux4;
+      IRTemp res = newTempV128();
+      assign(res, unop(op, getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* nm  = isREC ? "urecpe" : "ursqrte";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (size <= X01 && opcode == BITS5(1,1,1,0,1)) {
+      /* -------- 0,0x,11101: SCVTF -------- */
+      /* -------- 1,0x,11101: UCVTF -------- */
+      /* 31  28      22 21       15     9 4
+         0q0 01110 0 sz 1  00001 110110 n d  SCVTF Vd, Vn
+         0q1 01110 0 sz 1  00001 110110 n d  UCVTF Vd, Vn
+         with laneage:
+         case sz:Q of 00 -> 2S, zero upper, 01 -> 4S, 10 -> illegal, 11 -> 2D
+      */
+      Bool isQ   = bitQ == 1;
+      Bool isU   = bitU == 1;
+      Bool isF64 = (size & 1) == 1;
+      if (isQ || !isF64) {
+         IRType tyF = Ity_INVALID, tyI = Ity_INVALID;
+         UInt   nLanes = 0;
+         Bool   zeroHI = False;
+         const HChar* arrSpec = NULL;
+         Bool   ok  = getLaneInfo_Q_SZ(&tyI, &tyF, &nLanes, &zeroHI, &arrSpec,
+                                       isQ, isF64 );
+         IROp   iop = isU ? (isF64 ? Iop_I64UtoF64 : Iop_I32UtoF32)
+                          : (isF64 ? Iop_I64StoF64 : Iop_I32StoF32);
+         IRTemp rm  = mk_get_IR_rounding_mode();
+         UInt   i;
+         vassert(ok); /* the 'if' above should ensure this */
+         for (i = 0; i < nLanes; i++) {
+            putQRegLane(dd, i,
+                        binop(iop, mkexpr(rm), getQRegLane(nn, i, tyI)));
+         }
+         if (zeroHI) {
+            putQRegLane(dd, 1, mkU64(0));
+         }
+         DIP("%ccvtf %s.%s, %s.%s\n", isU ? 'u' : 's',
+             nameQReg128(dd), arrSpec, nameQReg128(nn), arrSpec);
+         return True;
+      }
+      /* else fall through */
+   }
+
+   if (size >= X10 && opcode == BITS5(1,1,1,0,1)) {
+      /* -------- 0,1x,11101: FRECPE  2d_2d, 4s_4s, 2s_2s -------- */
+      /* -------- 1,1x,11101: FRSQRTE 2d_2d, 4s_4s, 2s_2s -------- */
+      Bool isSQRT = bitU == 1;
+      Bool isD    = (size & 1) == 1;
+      IROp op     = isSQRT ? (isD ? Iop_RSqrtEst64Fx2 : Iop_RSqrtEst32Fx4)
+                           : (isD ? Iop_RecipEst64Fx2 : Iop_RecipEst32Fx4);
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IRTemp resV = newTempV128();
+      assign(resV, unop(op, getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, resV));
+      const HChar* arr = bitQ == 0 ? "2s" : (size == X11 ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s\n", isSQRT ? "frsqrte" : "frecpe",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   if (bitU == 1 && size >= X10 && opcode == BITS5(1,1,1,1,1)) {
+      /* -------- 1,1x,11111: FSQRT 2d_2d, 4s_4s, 2s_2s -------- */
+      Bool isD = (size & 1) == 1;
+      IROp op  = isD ? Iop_Sqrt64Fx2 : Iop_Sqrt32Fx4;
+      if (bitQ == 0 && isD) return False; // implied 1d case
+      IRTemp resV = newTempV128();
+      assign(resV, binop(op, mkexpr(mk_get_IR_rounding_mode()),
+                             getQReg128(nn)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, resV));
+      const HChar* arr = bitQ == 0 ? "2s" : (size == X11 ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s\n", "fsqrt",
+          nameQReg128(dd), arr, nameQReg128(nn), arr);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_vector_x_indexed_elem(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31    28    23   21 20 19 15     11   9 4
+      0 Q U 01111 size L  M  m  opcode H  0 n d
+      Decode fields are: u,size,opcode
+      M is really part of the mm register number.  Individual 
+      cases need to inspect L and H though.
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,31) != 0
+       || INSN(28,24) != BITS5(0,1,1,1,1) || INSN(10,10) !=0) {
+      return False;
+   }
+   UInt bitQ   = INSN(30,30);
+   UInt bitU   = INSN(29,29);
+   UInt size   = INSN(23,22);
+   UInt bitL   = INSN(21,21);
+   UInt bitM   = INSN(20,20);
+   UInt mmLO4  = INSN(19,16);
+   UInt opcode = INSN(15,12);
+   UInt bitH   = INSN(11,11);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+   vassert(size < 4);
+   vassert(bitH < 2 && bitM < 2 && bitL < 2);
+
+   if (bitU == 0 && size >= X10
+       && (opcode == BITS4(0,0,0,1) || opcode == BITS4(0,1,0,1))) {
+      /* -------- 0,1x,0001 FMLA 2d_2d_d[], 4s_4s_s[], 2s_2s_s[] -------- */
+      /* -------- 0,1x,0101 FMLS 2d_2d_d[], 4s_4s_s[], 2s_2s_s[] -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isD   = (size & 1) == 1;
+      Bool isSUB = opcode == BITS4(0,1,0,1);
+      UInt index;
+      if      (!isD)             index = (bitH << 1) | bitL;
+      else if (isD && bitL == 0) index = bitH;
+      else return False; // sz:L == x11 => unallocated encoding
+      vassert(index < (isD ? 2 : 4));
+      IRType ity   = isD ? Ity_F64 : Ity_F32;
+      IRTemp elem  = newTemp(ity);
+      UInt   mm    = (bitM << 4) | mmLO4;
+      assign(elem, getQRegLane(mm, index, ity));
+      IRTemp dupd  = math_DUP_TO_V128(elem, ity);
+      IROp   opADD = isD ? Iop_Add64Fx2 : Iop_Add32Fx4;
+      IROp   opSUB = isD ? Iop_Sub64Fx2 : Iop_Sub32Fx4;
+      IROp   opMUL = isD ? Iop_Mul64Fx2 : Iop_Mul32Fx4;
+      IRTemp rm    = mk_get_IR_rounding_mode();
+      IRTemp t1    = newTempV128();
+      IRTemp t2    = newTempV128();
+      // FIXME: double rounding; use FMA primops instead
+      assign(t1, triop(opMUL, mkexpr(rm), getQReg128(nn), mkexpr(dupd)));
+      assign(t2, triop(isSUB ? opSUB : opADD,
+                       mkexpr(rm), getQReg128(dd), mkexpr(t1)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, t2));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%c[%u]\n", isSUB ? "fmls" : "fmla",
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(mm),
+          isD ? 'd' : 's', index);
+      return True;
+   }
+
+   if (size >= X10 && opcode == BITS4(1,0,0,1)) {
+      /* -------- 0,1x,1001 FMUL  2d_2d_d[], 4s_4s_s[], 2s_2s_s[] -------- */
+      /* -------- 1,1x,1001 FMULX 2d_2d_d[], 4s_4s_s[], 2s_2s_s[] -------- */
+      if (bitQ == 0 && size == X11) return False; // implied 1d case
+      Bool isD    = (size & 1) == 1;
+      Bool isMULX = bitU == 1;
+      UInt index;
+      if      (!isD)             index = (bitH << 1) | bitL;
+      else if (isD && bitL == 0) index = bitH;
+      else return False; // sz:L == x11 => unallocated encoding
+      vassert(index < (isD ? 2 : 4));
+      IRType ity  = isD ? Ity_F64 : Ity_F32;
+      IRTemp elem = newTemp(ity);
+      UInt   mm   = (bitM << 4) | mmLO4;
+      assign(elem, getQRegLane(mm, index, ity));
+      IRTemp dupd = math_DUP_TO_V128(elem, ity);
+      // KLUDGE: FMULX is treated the same way as FMUL.  That can't be right.
+      IRTemp res  = newTempV128();
+      assign(res, triop(isD ? Iop_Mul64Fx2 : Iop_Mul32Fx4,
+                        mkexpr(mk_get_IR_rounding_mode()),
+                        getQReg128(nn), mkexpr(dupd)));
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = bitQ == 0 ? "2s" : (isD ? "2d" : "4s");
+      DIP("%s %s.%s, %s.%s, %s.%c[%u]\n", 
+          isMULX ? "fmulx" : "fmul", nameQReg128(dd), arr,
+          nameQReg128(nn), arr, nameQReg128(mm), isD ? 'd' : 's', index);
+      return True;
+   }
+
+   if ((bitU == 1 && (opcode == BITS4(0,0,0,0) || opcode == BITS4(0,1,0,0)))
+       || (bitU == 0 && opcode == BITS4(1,0,0,0))) {
+      /* -------- 1,xx,0000 MLA s/h variants only -------- */
+      /* -------- 1,xx,0100 MLS s/h variants only -------- */
+      /* -------- 0,xx,1000 MUL s/h variants only -------- */
+      Bool isMLA = opcode == BITS4(0,0,0,0);
+      Bool isMLS = opcode == BITS4(0,1,0,0);
+      UInt mm    = 32; // invalid
+      UInt ix    = 16; // invalid
+      switch (size) {
+         case X00:
+            return False; // b case is not allowed
+         case X01:
+            mm = mmLO4; ix = (bitH << 2) | (bitL << 1) | (bitM << 0); break;
+         case X10:
+            mm = (bitM << 4) | mmLO4; ix = (bitH << 1) | (bitL << 0); break;
+         case X11:
+            return False; // d case is not allowed
+         default:
+            vassert(0);
+      }
+      vassert(mm < 32 && ix < 16);
+      IROp   opMUL = mkVecMUL(size);
+      IROp   opADD = mkVecADD(size);
+      IROp   opSUB = mkVecSUB(size);
+      HChar  ch    = size == X01 ? 'h' : 's';
+      IRTemp vecM  = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
+      IRTemp vecD  = newTempV128();
+      IRTemp vecN  = newTempV128();
+      IRTemp res   = newTempV128();
+      assign(vecD, getQReg128(dd));
+      assign(vecN, getQReg128(nn));
+      IRExpr* prod = binop(opMUL, mkexpr(vecN), mkexpr(vecM));
+      if (isMLA || isMLS) {
+         assign(res, binop(isMLA ? opADD : opSUB, mkexpr(vecD), prod));
+      } else {
+         assign(res, prod);
+      }
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      DIP("%s %s.%s, %s.%s, %s.%c[%u]\n", isMLA ? "mla"
+                                                : (isMLS ? "mls" : "mul"),
+          nameQReg128(dd), arr,
+          nameQReg128(nn), arr, nameQReg128(dd), ch, ix);
+      return True;
+   }
+
+   if (opcode == BITS4(1,0,1,0)
+       || opcode == BITS4(0,0,1,0) || opcode == BITS4(0,1,1,0)) {
+      /* -------- 0,xx,1010 SMULL s/h variants only -------- */ // 0 (ks)
+      /* -------- 1,xx,1010 UMULL s/h variants only -------- */ // 0
+      /* -------- 0,xx,0010 SMLAL s/h variants only -------- */ // 1
+      /* -------- 1,xx,0010 UMLAL s/h variants only -------- */ // 1
+      /* -------- 0,xx,0110 SMLSL s/h variants only -------- */ // 2
+      /* -------- 1,xx,0110 SMLSL s/h variants only -------- */ // 2
+      /* Widens, and size refers to the narrowed lanes. */
+      UInt ks = 3;
+      switch (opcode) {
+         case BITS4(1,0,1,0): ks = 0; break;
+         case BITS4(0,0,1,0): ks = 1; break;
+         case BITS4(0,1,1,0): ks = 2; break;
+         default: vassert(0);
+      }
+      vassert(ks >= 0 && ks <= 2);
+      Bool isU = bitU == 1;
+      Bool is2 = bitQ == 1;
+      UInt mm  = 32; // invalid
+      UInt ix  = 16; // invalid
+      switch (size) {
+         case X00:
+            return False; // h_b_b[] case is not allowed
+         case X01:
+            mm = mmLO4; ix = (bitH << 2) | (bitL << 1) | (bitM << 0); break;
+         case X10:
+            mm = (bitM << 4) | mmLO4; ix = (bitH << 1) | (bitL << 0); break;
+         case X11:
+            return False; // q_d_d[] case is not allowed
+         default:
+            vassert(0);
+      }
+      vassert(mm < 32 && ix < 16);
+      IRTemp vecN  = newTempV128();
+      IRTemp vecM  = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
+      IRTemp vecD  = newTempV128();
+      assign(vecN, getQReg128(nn));
+      assign(vecD, getQReg128(dd));
+      IRTemp res = IRTemp_INVALID;
+      math_MULL_ACC(&res, is2, isU, size, "mas"[ks],
+                    vecN, vecM, ks == 0 ? IRTemp_INVALID : vecD);
+      putQReg128(dd, mkexpr(res));
+      const HChar* nm        = ks == 0 ? "mull" : (ks == 1 ? "mlal" : "mlsl");
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      HChar ch               = size == X01 ? 'h' : 's';
+      DIP("%c%s%s %s.%s, %s.%s, %s.%c[%u]\n",
+          isU ? 'u' : 's', nm, is2 ? "2" : "",
+          nameQReg128(dd), arrWide,
+          nameQReg128(nn), arrNarrow, nameQReg128(dd), ch, ix);
+      return True;
+   }
+
+   if (bitU == 0 
+       && (opcode == BITS4(1,0,1,1)
+           || opcode == BITS4(0,0,1,1) || opcode == BITS4(0,1,1,1))) {
+      /* -------- 0,xx,1011 SQDMULL s/h variants only -------- */ // 0 (ks)
+      /* -------- 0,xx,0011 SQDMLAL s/h variants only -------- */ // 1
+      /* -------- 0,xx,0111 SQDMLSL s/h variants only -------- */ // 2
+      /* Widens, and size refers to the narrowed lanes. */
+      UInt ks = 3;
+      switch (opcode) {
+         case BITS4(1,0,1,1): ks = 0; break;
+         case BITS4(0,0,1,1): ks = 1; break;
+         case BITS4(0,1,1,1): ks = 2; break;
+         default: vassert(0);
+      }
+      vassert(ks >= 0 && ks <= 2);
+      Bool is2 = bitQ == 1;
+      UInt mm  = 32; // invalid
+      UInt ix  = 16; // invalid
+      switch (size) {
+         case X00:
+            return False; // h_b_b[] case is not allowed
+         case X01:
+            mm = mmLO4; ix = (bitH << 2) | (bitL << 1) | (bitM << 0); break;
+         case X10:
+            mm = (bitM << 4) | mmLO4; ix = (bitH << 1) | (bitL << 0); break;
+         case X11:
+            return False; // q_d_d[] case is not allowed
+         default:
+            vassert(0);
+      }
+      vassert(mm < 32 && ix < 16);
+      IRTemp vecN, vecD, res, sat1q, sat1n, sat2q, sat2n;
+      vecN = vecD = res = sat1q = sat1n = sat2q = sat2n = IRTemp_INVALID;
+      newTempsV128_2(&vecN, &vecD);
+      assign(vecN, getQReg128(nn));
+      IRTemp vecM  = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
+      assign(vecD, getQReg128(dd));
+      math_SQDMULL_ACC(&res, &sat1q, &sat1n, &sat2q, &sat2n,
+                       is2, size, "mas"[ks],
+                       vecN, vecM, ks == 0 ? IRTemp_INVALID : vecD);
+      putQReg128(dd, mkexpr(res));
+      vassert(sat1q != IRTemp_INVALID && sat1n != IRTemp_INVALID);
+      updateQCFLAGwithDifference(sat1q, sat1n);
+      if (sat2q != IRTemp_INVALID || sat2n != IRTemp_INVALID) {
+         updateQCFLAGwithDifference(sat2q, sat2n);
+      }
+      const HChar* nm        = ks == 0 ? "sqdmull"
+                                       : (ks == 1 ? "sqdmlal" : "sqdmlsl");
+      const HChar* arrNarrow = nameArr_Q_SZ(bitQ, size);
+      const HChar* arrWide   = nameArr_Q_SZ(1,    size+1);
+      HChar ch               = size == X01 ? 'h' : 's';
+      DIP("%s%s %s.%s, %s.%s, %s.%c[%u]\n",
+          nm, is2 ? "2" : "",
+          nameQReg128(dd), arrWide,
+          nameQReg128(nn), arrNarrow, nameQReg128(dd), ch, ix);
+      return True;
+   }
+
+   if (opcode == BITS4(1,1,0,0) || opcode == BITS4(1,1,0,1)) {
+      /* -------- 0,xx,1100 SQDMULH s and h variants only -------- */
+      /* -------- 0,xx,1101 SQRDMULH s and h variants only -------- */
+      UInt mm  = 32; // invalid
+      UInt ix  = 16; // invalid
+      switch (size) {
+         case X00:
+            return False; // b case is not allowed
+         case X01:
+            mm = mmLO4; ix = (bitH << 2) | (bitL << 1) | (bitM << 0); break;
+         case X10:
+            mm = (bitM << 4) | mmLO4; ix = (bitH << 1) | (bitL << 0); break;
+         case X11:
+            return False; // q case is not allowed
+         default:
+            vassert(0);
+      }
+      vassert(mm < 32 && ix < 16);
+      Bool isR = opcode == BITS4(1,1,0,1);
+      IRTemp res, sat1q, sat1n, vN, vM;
+      res = sat1q = sat1n = vN = vM = IRTemp_INVALID;
+      vN = newTempV128();
+      assign(vN, getQReg128(nn));
+      vM = math_DUP_VEC_ELEM(getQReg128(mm), size, ix);
+      math_SQDMULH(&res, &sat1q, &sat1n, isR, size, vN, vM);
+      putQReg128(dd, math_MAYBE_ZERO_HI64(bitQ, res));
+      IROp opZHI = bitQ == 0 ? Iop_ZeroHI64ofV128 : Iop_INVALID;
+      updateQCFLAGwithDifferenceZHI(sat1q, sat1n, opZHI);
+      const HChar* nm  = isR ? "sqrdmulh" : "sqdmulh";
+      const HChar* arr = nameArr_Q_SZ(bitQ, size);
+      HChar ch         = size == X01 ? 'h' : 's';
+      DIP("%s %s.%s, %s.%s, %s.%c[%u]\n", nm,
+          nameQReg128(dd), arr, nameQReg128(nn), arr, nameQReg128(dd), ch, ix);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_crypto_aes(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_crypto_three_reg_sha(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_crypto_two_reg_sha(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_compare(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  28    23 21 20 15 13   9 4
+      000 11110 ty 1  m  op 1000 n opcode2
+      The first 3 bits are really "M 0 S", but M and S are always zero.
+      Decode fields are: ty,op,opcode2
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,24) != BITS8(0,0,0,1,1,1,1,0)
+       || INSN(21,21) != 1 || INSN(13,10) != BITS4(1,0,0,0)) {
+      return False;
+   }
+   UInt ty      = INSN(23,22);
+   UInt mm      = INSN(20,16);
+   UInt op      = INSN(15,14);
+   UInt nn      = INSN(9,5);
+   UInt opcode2 = INSN(4,0);
+   vassert(ty < 4);
+
+   if (ty <= X01 && op == X00
+       && (opcode2 & BITS5(0,0,1,1,1)) == BITS5(0,0,0,0,0)) {
+      /* -------- 0x,00,00000 FCMP  d_d,   s_s -------- */
+      /* -------- 0x,00,01000 FCMP  d_#0, s_#0 -------- */
+      /* -------- 0x,00,10000 FCMPE d_d,   s_s -------- */
+      /* -------- 0x,00,11000 FCMPE d_#0, s_#0 -------- */
+      /* 31        23   20    15      9 4
+         000 11110 01 1     m 00 1000 n 10 000  FCMPE Dn, Dm
+         000 11110 01 1 00000 00 1000 n 11 000  FCMPE Dn, #0.0
+         000 11110 01 1     m 00 1000 n 00 000  FCMP  Dn, Dm
+         000 11110 01 1 00000 00 1000 n 01 000  FCMP  Dn, #0.0
+
+         000 11110 00 1     m 00 1000 n 10 000  FCMPE Sn, Sm
+         000 11110 00 1 00000 00 1000 n 11 000  FCMPE Sn, #0.0
+         000 11110 00 1     m 00 1000 n 00 000  FCMP  Sn, Sm
+         000 11110 00 1 00000 00 1000 n 01 000  FCMP  Sn, #0.0
+
+         FCMPE generates Invalid Operation exn if either arg is any kind
+         of NaN.  FCMP generates Invalid Operation exn if either arg is a
+         signalling NaN.  We ignore this detail here and produce the same
+         IR for both.
+      */
+      Bool   isD     = (ty & 1) == 1;
+      Bool   isCMPE  = (opcode2 & 16) == 16;
+      Bool   cmpZero = (opcode2 & 8) == 8;
+      IRType ity     = isD ? Ity_F64 : Ity_F32;
+      Bool   valid   = True;
+      if (cmpZero && mm != 0) valid = False;
+      if (valid) {
+         IRTemp argL  = newTemp(ity);
+         IRTemp argR  = newTemp(ity);
+         IRTemp irRes = newTemp(Ity_I32);
+         assign(argL, getQRegLO(nn, ity));
+         assign(argR,
+                cmpZero 
+                   ? (IRExpr_Const(isD ? IRConst_F64i(0) : IRConst_F32i(0)))
+                   : getQRegLO(mm, ity));
+         assign(irRes, binop(isD ? Iop_CmpF64 : Iop_CmpF32,
+                             mkexpr(argL), mkexpr(argR)));
+         IRTemp nzcv = mk_convert_IRCmpF64Result_to_NZCV(irRes);
+         IRTemp nzcv_28x0 = newTemp(Ity_I64);
+         assign(nzcv_28x0, binop(Iop_Shl64, mkexpr(nzcv), mkU8(28)));
+         setFlags_COPY(nzcv_28x0);
+         DIP("fcmp%s %s, %s\n", isCMPE ? "e" : "", nameQRegLO(nn, ity),
+             cmpZero ? "#0.0" : nameQRegLO(mm, ity));
+         return True;
+      }
+      return False;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_conditional_compare(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  28    23 21 20 15   11 9 4  3
+      000 11110 ty 1  m  cond 01 n op nzcv
+      The first 3 bits are really "M 0 S", but M and S are always zero.
+      Decode fields are: ty,op
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,24) != BITS8(0,0,0,1,1,1,1,0)
+       || INSN(21,21) != 1 || INSN(11,10) != BITS2(0,1)) {
+      return False;
+   }
+   UInt ty   = INSN(23,22);
+   UInt mm   = INSN(20,16);
+   UInt cond = INSN(15,12);
+   UInt nn   = INSN(9,5);
+   UInt op   = INSN(4,4);
+   UInt nzcv = INSN(3,0);
+   vassert(ty < 4 && op <= 1);
+
+   if (ty <= BITS2(0,1)) {
+      /* -------- 00,0 FCCMP  s_s -------- */
+      /* -------- 00,1 FCCMPE s_s -------- */
+      /* -------- 01,0 FCCMP  d_d -------- */
+      /* -------- 01,1 FCCMPE d_d -------- */
+
+      /* FCCMPE generates Invalid Operation exn if either arg is any kind
+         of NaN.  FCCMP generates Invalid Operation exn if either arg is a
+         signalling NaN.  We ignore this detail here and produce the same
+         IR for both.
+      */
+      Bool   isD    = (ty & 1) == 1;
+      Bool   isCMPE = op == 1;
+      IRType ity    = isD ? Ity_F64 : Ity_F32;
+      IRTemp argL   = newTemp(ity);
+      IRTemp argR   = newTemp(ity);
+      IRTemp irRes  = newTemp(Ity_I32);
+      assign(argL,  getQRegLO(nn, ity));
+      assign(argR,  getQRegLO(mm, ity));
+      assign(irRes, binop(isD ? Iop_CmpF64 : Iop_CmpF32,
+                          mkexpr(argL), mkexpr(argR)));
+      IRTemp condT = newTemp(Ity_I1);
+      assign(condT, unop(Iop_64to1, mk_arm64g_calculate_condition(cond)));
+      IRTemp nzcvT = mk_convert_IRCmpF64Result_to_NZCV(irRes);
+
+      IRTemp nzcvT_28x0 = newTemp(Ity_I64);
+      assign(nzcvT_28x0, binop(Iop_Shl64, mkexpr(nzcvT), mkU8(28)));
+
+      IRExpr* nzcvF_28x0 = mkU64(((ULong)nzcv) << 28);
+
+      IRTemp nzcv_28x0 = newTemp(Ity_I64);
+      assign(nzcv_28x0, IRExpr_ITE(mkexpr(condT),
+                                   mkexpr(nzcvT_28x0), nzcvF_28x0));
+      setFlags_COPY(nzcv_28x0);
+      DIP("fccmp%s %s, %s, #%u, %s\n", isCMPE ? "e" : "",
+          nameQRegLO(nn, ity), nameQRegLO(mm, ity), nzcv, nameCC(cond));
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_conditional_select(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31        23 21 20 15   11 9 5
+      000 11110 ty 1  m  cond 11 n d
+      The first 3 bits are really "M 0 S", but M and S are always zero.
+      Decode fields: ty  
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,24) != BITS8(0,0,0,1,1,1,1,0) || INSN(21,21) != 1
+       || INSN(11,10) != BITS2(1,1)) {
+      return False;
+   }
+   UInt ty   = INSN(23,22);
+   UInt mm   = INSN(20,16);
+   UInt cond = INSN(15,12);
+   UInt nn   = INSN(9,5);
+   UInt dd   = INSN(4,0);
+   if (ty <= X01) {
+      /* -------- 00: FCSEL s_s -------- */
+      /* -------- 00: FCSEL d_d -------- */
+      IRType ity = ty == X01 ? Ity_F64 : Ity_F32;
+      IRTemp srcT = newTemp(ity);
+      IRTemp srcF = newTemp(ity);
+      IRTemp res  = newTemp(ity);
+      assign(srcT, getQRegLO(nn, ity));
+      assign(srcF, getQRegLO(mm, ity));
+      assign(res, IRExpr_ITE(
+                     unop(Iop_64to1, mk_arm64g_calculate_condition(cond)),
+                     mkexpr(srcT), mkexpr(srcF)));
+      putQReg128(dd, mkV128(0x0000));
+      putQRegLO(dd, mkexpr(res));
+      DIP("fcsel %s, %s, %s, %s\n",
+          nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity),
+          nameCC(cond));
+      return True;
+   }
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_data_proc_1_source(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  28    23 21 20     14    9 4
+      000 11110 ty 1  opcode 10000 n d
+      The first 3 bits are really "M 0 S", but M and S are always zero.
+      Decode fields: ty,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,24) != BITS8(0,0,0,1,1,1,1,0)
+       || INSN(21,21) != 1 || INSN(14,10) != BITS5(1,0,0,0,0)) {
+      return False;
+   }
+   UInt ty     = INSN(23,22);
+   UInt opcode = INSN(20,15);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+
+   if (ty <= X01 && opcode <= BITS6(0,0,0,0,1,1)) {
+      /* -------- 0x,000000: FMOV  d_d, s_s -------- */
+      /* -------- 0x,000001: FABS  d_d, s_s -------- */
+      /* -------- 0x,000010: FNEG  d_d, s_s -------- */
+      /* -------- 0x,000011: FSQRT d_d, s_s -------- */
+      IRType ity = ty == X01 ? Ity_F64 : Ity_F32;
+      IRTemp src = newTemp(ity);
+      IRTemp res = newTemp(ity);
+      const HChar* nm = "??";
+      assign(src, getQRegLO(nn, ity));
+      switch (opcode) {
+         case BITS6(0,0,0,0,0,0):
+            nm = "fmov"; assign(res, mkexpr(src)); break;
+         case BITS6(0,0,0,0,0,1):
+            nm = "fabs"; assign(res, unop(mkABSF(ity), mkexpr(src))); break;
+         case BITS6(0,0,0,0,1,0):
+            nm = "fabs"; assign(res, unop(mkNEGF(ity), mkexpr(src))); break;
+         case BITS6(0,0,0,0,1,1):
+            nm = "fsqrt";
+            assign(res, binop(mkSQRTF(ity), 
+                              mkexpr(mk_get_IR_rounding_mode()),
+                              mkexpr(src))); break;
+         default:
+            vassert(0);
+      }
+      putQReg128(dd, mkV128(0x0000));
+      putQRegLO(dd, mkexpr(res));
+      DIP("%s %s, %s\n", nm, nameQRegLO(dd, ity), nameQRegLO(nn, ity));
+      return True;
+   }
+
+   if (   (ty == X11 && (opcode == BITS6(0,0,0,1,0,0) 
+                         || opcode == BITS6(0,0,0,1,0,1)))
+       || (ty == X00 && (opcode == BITS6(0,0,0,1,1,1) 
+                         || opcode == BITS6(0,0,0,1,0,1)))
+       || (ty == X01 && (opcode == BITS6(0,0,0,1,1,1) 
+                         || opcode == BITS6(0,0,0,1,0,0)))) {
+      /* -------- 11,000100: FCVT s_h -------- */
+      /* -------- 11,000101: FCVT d_h -------- */
+      /* -------- 00,000111: FCVT h_s -------- */
+      /* -------- 00,000101: FCVT d_s -------- */
+      /* -------- 01,000111: FCVT h_d -------- */
+      /* -------- 01,000100: FCVT s_d -------- */
+      /* 31        23 21    16 14    9 4
+         000 11110 11 10001 00 10000 n d   FCVT Sd, Hn
+         --------- 11 ----- 01 ---------   FCVT Dd, Hn
+         --------- 00 ----- 11 ---------   FCVT Hd, Sn
+         --------- 00 ----- 01 ---------   FCVT Dd, Sn
+         --------- 01 ----- 11 ---------   FCVT Hd, Dn
+         --------- 01 ----- 00 ---------   FCVT Sd, Dn
+         Rounding, when dst is smaller than src, is per the FPCR.
+      */
+      UInt b2322 = ty;
+      UInt b1615 = opcode & BITS2(1,1);
+      switch ((b2322 << 2) | b1615) {
+         case BITS4(0,0,0,1):   // S -> D
+         case BITS4(1,1,0,1): { // H -> D
+            Bool   srcIsH = b2322 == BITS2(1,1);
+            IRType srcTy  = srcIsH ? Ity_F16 : Ity_F32;
+            IRTemp res    = newTemp(Ity_F64);
+            assign(res, unop(srcIsH ? Iop_F16toF64 : Iop_F32toF64,
+                             getQRegLO(nn, srcTy)));
+            putQReg128(dd, mkV128(0x0000));
+            putQRegLO(dd, mkexpr(res));
+            DIP("fcvt %s, %s\n",
+                nameQRegLO(dd, Ity_F64), nameQRegLO(nn, srcTy));
+            return True;
+         }
+         case BITS4(0,1,0,0):   // D -> S
+         case BITS4(0,1,1,1): { // D -> H
+            Bool   dstIsH = b1615 == BITS2(1,1);
+            IRType dstTy  = dstIsH ? Ity_F16 : Ity_F32;
+            IRTemp res    = newTemp(dstTy);
+            assign(res, binop(dstIsH ? Iop_F64toF16 : Iop_F64toF32,
+                              mkexpr(mk_get_IR_rounding_mode()),
+                              getQRegLO(nn, Ity_F64)));
+            putQReg128(dd, mkV128(0x0000));
+            putQRegLO(dd, mkexpr(res));
+            DIP("fcvt %s, %s\n",
+                nameQRegLO(dd, dstTy), nameQRegLO(nn, Ity_F64));
+            return True;
+         }
+         case BITS4(0,0,1,1):   // S -> H
+         case BITS4(1,1,0,0): { // H -> S
+            Bool   toH   = b1615 == BITS2(1,1);
+            IRType srcTy = toH ? Ity_F32 : Ity_F16;
+            IRType dstTy = toH ? Ity_F16 : Ity_F32;
+            IRTemp res = newTemp(dstTy);
+            if (toH) {
+               assign(res, binop(Iop_F32toF16,
+                                 mkexpr(mk_get_IR_rounding_mode()),
+                                 getQRegLO(nn, srcTy)));
+
+            } else {
+               assign(res, unop(Iop_F16toF32,
+                                getQRegLO(nn, srcTy)));
+            }
+            putQReg128(dd, mkV128(0x0000));
+            putQRegLO(dd, mkexpr(res));
+            DIP("fcvt %s, %s\n",
+                nameQRegLO(dd, dstTy), nameQRegLO(nn, srcTy));
+            return True;
+         }
+         default:
+            break;
+      }
+      /* else unhandled */
+      return False;
+   }
+
+   if (ty <= X01
+       && opcode >= BITS6(0,0,1,0,0,0) && opcode <= BITS6(0,0,1,1,1,1)
+       && opcode != BITS6(0,0,1,1,0,1)) {
+      /* -------- 0x,001000 FRINTN d_d, s_s -------- */
+      /* -------- 0x,001001 FRINTP d_d, s_s -------- */
+      /* -------- 0x,001010 FRINTM d_d, s_s -------- */
+      /* -------- 0x,001011 FRINTZ d_d, s_s -------- */
+      /* -------- 0x,001100 FRINTA d_d, s_s -------- */
+      /* -------- 0x,001110 FRINTX d_d, s_s -------- */
+      /* -------- 0x,001111 FRINTI d_d, s_s -------- */
+      /* 31        23 21   17  14    9 4
+         000 11110 0x 1001 111 10000 n d  FRINTI Fd, Fm (round per FPCR)
+                           rm
+         x==0 => S-registers, x==1 => D-registers
+         rm (17:15) encodings:
+            111 per FPCR  (FRINTI)
+            001 +inf      (FRINTP)
+            010 -inf      (FRINTM)
+            011 zero      (FRINTZ)
+            000 tieeven   (FRINTN) -- !! FIXME KLUDGED !!
+            100 tieaway   (FRINTA) -- !! FIXME KLUDGED !!
+            110 per FPCR + "exact = TRUE" (FRINTX)
+            101 unallocated
+      */
+      Bool    isD   = (ty & 1) == 1;
+      UInt    rm    = opcode & BITS6(0,0,0,1,1,1);
+      IRType  ity   = isD ? Ity_F64 : Ity_F32;
+      IRExpr* irrmE = NULL;
+      UChar   ch    = '?';
+      switch (rm) {
+         case BITS3(0,1,1): ch = 'z'; irrmE = mkU32(Irrm_ZERO); break;
+         case BITS3(0,1,0): ch = 'm'; irrmE = mkU32(Irrm_NegINF); break;
+         case BITS3(0,0,1): ch = 'p'; irrmE = mkU32(Irrm_PosINF); break;
+         // The following is a kludge.  Should be: Irrm_NEAREST_TIE_AWAY_0
+         case BITS3(1,0,0): ch = 'a'; irrmE = mkU32(Irrm_NEAREST); break;
+         // I am unsure about the following, due to the "integral exact"
+         // description in the manual.  What does it mean? (frintx, that is)
+         case BITS3(1,1,0):
+            ch = 'x'; irrmE = mkexpr(mk_get_IR_rounding_mode()); break;
+         case BITS3(1,1,1):
+            ch = 'i'; irrmE = mkexpr(mk_get_IR_rounding_mode()); break;
+         // The following is a kludge.  There's no Irrm_ value to represent
+         // this ("to nearest, with ties to even")
+         case BITS3(0,0,0): ch = 'n'; irrmE = mkU32(Irrm_NEAREST); break;
+         default: break;
+      }
+      if (irrmE) {
+         IRTemp src = newTemp(ity);
+         IRTemp dst = newTemp(ity);
+         assign(src, getQRegLO(nn, ity));
+         assign(dst, binop(isD ? Iop_RoundF64toInt : Iop_RoundF32toInt,
+                           irrmE, mkexpr(src)));
+         putQReg128(dd, mkV128(0x0000));
+         putQRegLO(dd, mkexpr(dst));
+         DIP("frint%c %s, %s\n",
+             ch, nameQRegLO(dd, ity), nameQRegLO(nn, ity));
+         return True;
+      }
+      return False;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_data_proc_2_source(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  28    23 21 20 15     11 9 4
+      000 11110 ty 1  m  opcode 10 n d
+      The first 3 bits are really "M 0 S", but M and S are always zero.
+      Decode fields: ty, opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,24) != BITS8(0,0,0,1,1,1,1,0)
+       || INSN(21,21) != 1 || INSN(11,10) != BITS2(1,0)) {
+      return False;
+   }
+   UInt ty     = INSN(23,22);
+   UInt mm     = INSN(20,16);
+   UInt opcode = INSN(15,12);
+   UInt nn     = INSN(9,5);
+   UInt dd     = INSN(4,0);
+
+   if (ty <= X01 && opcode <= BITS4(0,1,1,1)) {
+      /* ------- 0x,0000: FMUL d_d, s_s ------- */
+      /* ------- 0x,0001: FDIV d_d, s_s ------- */
+      /* ------- 0x,0010: FADD d_d, s_s ------- */
+      /* ------- 0x,0011: FSUB d_d, s_s ------- */
+      /* ------- 0x,0100: FMAX d_d, s_s ------- */
+      /* ------- 0x,0101: FMIN d_d, s_s ------- */
+      /* ------- 0x,0110: FMAXNM d_d, s_s ------- (FIXME KLUDGED) */
+      /* ------- 0x,0111: FMINNM d_d, s_s ------- (FIXME KLUDGED) */
+      IRType ity = ty == X00 ? Ity_F32 : Ity_F64;
+      IROp   iop = Iop_INVALID;
+      const HChar* nm = "???";
+      switch (opcode) {
+         case BITS4(0,0,0,0): nm = "fmul"; iop = mkMULF(ity); break;
+         case BITS4(0,0,0,1): nm = "fdiv"; iop = mkDIVF(ity); break;
+         case BITS4(0,0,1,0): nm = "fadd"; iop = mkADDF(ity); break;
+         case BITS4(0,0,1,1): nm = "fsub"; iop = mkSUBF(ity); break;
+         case BITS4(0,1,0,0): nm = "fmax"; iop = mkVecMAXF(ty+2); break;
+         case BITS4(0,1,0,1): nm = "fmin"; iop = mkVecMINF(ty+2); break;
+         case BITS4(0,1,1,0): nm = "fmaxnm"; iop = mkVecMAXF(ty+2); break; //!!
+         case BITS4(0,1,1,1): nm = "fminnm"; iop = mkVecMINF(ty+2); break; //!!
+         default: vassert(0);
+      }
+      if (opcode <= BITS4(0,0,1,1)) {
+         // This is really not good code.  TODO: avoid width-changing
+         IRTemp res = newTemp(ity);
+         assign(res, triop(iop, mkexpr(mk_get_IR_rounding_mode()),
+                                getQRegLO(nn, ity), getQRegLO(mm, ity)));
+         putQReg128(dd, mkV128(0));
+         putQRegLO(dd, mkexpr(res));
+      } else {
+         putQReg128(dd, unop(mkVecZEROHIxxOFV128(ty+2),
+                             binop(iop, getQReg128(nn), getQReg128(mm))));
+      }
+      DIP("%s %s, %s, %s\n",
+          nm, nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity));
+      return True;
+   }
+
+   if (ty <= X01 && opcode == BITS4(1,0,0,0)) {
+      /* ------- 0x,1000: FNMUL d_d, s_s ------- */
+      IRType ity  = ty == X00 ? Ity_F32 : Ity_F64;
+      IROp   iop  = mkMULF(ity);
+      IROp   iopn = mkNEGF(ity);
+      const HChar* nm = "fnmul";
+      IRExpr* resE = unop(iopn,
+                          triop(iop, mkexpr(mk_get_IR_rounding_mode()),
+                                getQRegLO(nn, ity), getQRegLO(mm, ity)));
+      IRTemp  res  = newTemp(ity);
+      assign(res, resE);
+      putQReg128(dd, mkV128(0));
+      putQRegLO(dd, mkexpr(res));
+      DIP("%s %s, %s, %s\n",
+          nm, nameQRegLO(dd, ity), nameQRegLO(nn, ity), nameQRegLO(mm, ity));
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_data_proc_3_source(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  28    23 21 20 15 14 9 4
+      000 11111 ty o1 m  o0 a  n d
+      The first 3 bits are really "M 0 S", but M and S are always zero.
+      Decode fields: ty,o1,o0
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,24) != BITS8(0,0,0,1,1,1,1,1)) {
+      return False;
+   }
+   UInt ty    = INSN(23,22);
+   UInt bitO1 = INSN(21,21);
+   UInt mm    = INSN(20,16);
+   UInt bitO0 = INSN(15,15);
+   UInt aa    = INSN(14,10);
+   UInt nn    = INSN(9,5);
+   UInt dd    = INSN(4,0);
+   vassert(ty < 4);
+
+   if (ty <= X01) {
+      /* -------- 0x,0,0 FMADD  d_d_d_d, s_s_s_s -------- */
+      /* -------- 0x,0,1 FMSUB  d_d_d_d, s_s_s_s -------- */
+      /* -------- 0x,1,0 FNMADD d_d_d_d, s_s_s_s -------- */
+      /* -------- 0x,1,1 FNMSUB d_d_d_d, s_s_s_s -------- */
+      /* -------------------- F{N}M{ADD,SUB} -------------------- */
+      /* 31          22   20 15 14 9 4   ix
+         000 11111 0 sz 0 m  0  a  n d   0   FMADD  Fd,Fn,Fm,Fa
+         000 11111 0 sz 0 m  1  a  n d   1   FMSUB  Fd,Fn,Fm,Fa
+         000 11111 0 sz 1 m  0  a  n d   2   FNMADD Fd,Fn,Fm,Fa
+         000 11111 0 sz 1 m  1  a  n d   3   FNMSUB Fd,Fn,Fm,Fa
+         where Fx=Dx when sz=1, Fx=Sx when sz=0
+
+                  -----SPEC------    ----IMPL----
+         fmadd       a +    n * m    a + n * m
+         fmsub       a + (-n) * m    a - n * m
+         fnmadd   (-a) + (-n) * m    -(a + n * m)
+         fnmsub   (-a) +    n * m    -(a - n * m)
+      */
+      Bool    isD   = (ty & 1) == 1;
+      UInt    ix    = (bitO1 << 1) | bitO0;
+      IRType  ity   = isD ? Ity_F64 : Ity_F32;
+      IROp    opADD = mkADDF(ity);
+      IROp    opSUB = mkSUBF(ity);
+      IROp    opMUL = mkMULF(ity);
+      IROp    opNEG = mkNEGF(ity);
+      IRTemp  res   = newTemp(ity);
+      IRExpr* eA    = getQRegLO(aa, ity);
+      IRExpr* eN    = getQRegLO(nn, ity);
+      IRExpr* eM    = getQRegLO(mm, ity);
+      IRExpr* rm    = mkexpr(mk_get_IR_rounding_mode());
+      IRExpr* eNxM  = triop(opMUL, rm, eN, eM);
+      switch (ix) {
+         case 0:  assign(res, triop(opADD, rm, eA, eNxM)); break;
+         case 1:  assign(res, triop(opSUB, rm, eA, eNxM)); break;
+         case 2:  assign(res, unop(opNEG, triop(opADD, rm, eA, eNxM))); break;
+         case 3:  assign(res, unop(opNEG, triop(opSUB, rm, eA, eNxM))); break;
+         default: vassert(0);
+      }
+      putQReg128(dd, mkV128(0x0000));
+      putQRegLO(dd, mkexpr(res));
+      const HChar* names[4] = { "fmadd", "fmsub", "fnmadd", "fnmsub" };
+      DIP("%s %s, %s, %s, %s\n",
+          names[ix], nameQRegLO(dd, ity), nameQRegLO(nn, ity),
+                     nameQRegLO(mm, ity), nameQRegLO(aa, ity));
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_immediate(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31  28    23 21 20   12  9    4
+      000 11110 ty 1  imm8 100 imm5 d
+      The first 3 bits are really "M 0 S", but M and S are always zero.
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(31,24) != BITS8(0,0,0,1,1,1,1,0)
+       || INSN(21,21) != 1 || INSN(12,10) != BITS3(1,0,0)) {
+      return False;
+   }
+   UInt ty     = INSN(23,22);
+   UInt imm8   = INSN(20,13);
+   UInt imm5   = INSN(9,5);
+   UInt dd     = INSN(4,0);
+
+   /* ------- 00,00000: FMOV s_imm ------- */
+   /* ------- 01,00000: FMOV d_imm ------- */
+   if (ty <= X01 && imm5 == BITS5(0,0,0,0,0)) {
+      Bool  isD  = (ty & 1) == 1;
+      ULong imm  = VFPExpandImm(imm8, isD ? 64 : 32);
+      if (!isD) {
+         vassert(0 == (imm & 0xFFFFFFFF00000000ULL));
+      }
+      putQReg128(dd, mkV128(0));
+      putQRegLO(dd, isD ? mkU64(imm) : mkU32(imm & 0xFFFFFFFFULL));
+      DIP("fmov %s, #0x%llx\n",
+          nameQRegLO(dd, isD ? Ity_F64 : Ity_F32), imm);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_to_from_fixedp_conv(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   /* 31 30 29 28    23   21 20    18     15    9 4
+      sf  0  0 11110 type 0  rmode opcode scale n d
+      The first 3 bits are really "sf 0 S", but S is always zero.
+      Decode fields: sf,type,rmode,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(30,29) != BITS2(0,0)
+       || INSN(28,24) != BITS5(1,1,1,1,0)
+       || INSN(21,21) != 0) {
+      return False;
+   }
+   UInt bitSF = INSN(31,31);
+   UInt ty    = INSN(23,22); // type
+   UInt rm    = INSN(20,19); // rmode
+   UInt op    = INSN(18,16); // opcode
+   UInt sc    = INSN(15,10); // scale
+   UInt nn    = INSN(9,5);
+   UInt dd    = INSN(4,0);
+
+   if (ty <= X01 && rm == X11 
+       && (op == BITS3(0,0,0) || op == BITS3(0,0,1))) {
+      /* -------- (ix) sf ty rm opc -------- */
+      /* -------- 0    0  00 11 000: FCVTZS w_s_#fbits -------- */
+      /* -------- 1    0  01 11 000: FCVTZS w_d_#fbits -------- */
+      /* -------- 2    1  00 11 000: FCVTZS x_s_#fbits -------- */
+      /* -------- 3    1  01 11 000: FCVTZS x_d_#fbits -------- */
+
+      /* -------- 4    0  00 11 001: FCVTZU w_s_#fbits -------- */
+      /* -------- 5    0  01 11 001: FCVTZU w_d_#fbits -------- */
+      /* -------- 6    1  00 11 001: FCVTZU x_s_#fbits -------- */
+      /* -------- 7    1  01 11 001: FCVTZU x_d_#fbits -------- */
+      Bool isI64 = bitSF == 1;
+      Bool isF64 = (ty & 1) == 1;
+      Bool isU   = (op & 1) == 1;
+      UInt ix    = (isU ? 4 : 0) | (isI64 ? 2 : 0) | (isF64 ? 1 : 0);
+
+      Int fbits = 64 - sc;
+      vassert(fbits >= 1 && fbits <= (isI64 ? 64 : 32));      
+
+      Double  scale  = two_to_the_plus(fbits);
+      IRExpr* scaleE = isF64 ? IRExpr_Const(IRConst_F64(scale))
+                             : IRExpr_Const(IRConst_F32( (Float)scale ));
+      IROp    opMUL  = isF64 ? Iop_MulF64 : Iop_MulF32;
+
+      const IROp ops[8]
+        = { Iop_F32toI32S, Iop_F64toI32S, Iop_F32toI64S, Iop_F64toI64S,
+            Iop_F32toI32U, Iop_F64toI32U, Iop_F32toI64U, Iop_F64toI64U };
+      IRTemp irrm = newTemp(Ity_I32);
+      assign(irrm, mkU32(Irrm_ZERO));
+
+      IRExpr* src = getQRegLO(nn, isF64 ? Ity_F64 : Ity_F32);
+      IRExpr* res = binop(ops[ix], mkexpr(irrm),
+                                   triop(opMUL, mkexpr(irrm), src, scaleE));
+      putIRegOrZR(isI64, dd, res);
+
+      DIP("fcvtz%c %s, %s, #%d\n",
+          isU ? 'u' : 's', nameIRegOrZR(isI64, dd),
+          nameQRegLO(nn, isF64 ? Ity_F64 : Ity_F32), fbits);
+      return True;
+   }
+
+   /* ------ sf,ty,rm,opc ------ */
+   /* ------ x,0x,00,010  SCVTF s/d, w/x, #fbits  ------ */
+   /* ------ x,0x,00,011  UCVTF s/d, w/x, #fbits  ------ */
+   /* (ix) sf  S 28    ty   rm opc 15    9 4
+      0    0 0 0 11110 00 0 00 010 scale n d  SCVTF Sd, Wn, #fbits
+      1    0 0 0 11110 01 0 00 010 scale n d  SCVTF Dd, Wn, #fbits
+      2    1 0 0 11110 00 0 00 010 scale n d  SCVTF Sd, Xn, #fbits
+      3    1 0 0 11110 01 0 00 010 scale n d  SCVTF Dd, Xn, #fbits
+
+      4    0 0 0 11110 00 0 00 011 scale n d  UCVTF Sd, Wn, #fbits
+      5    0 0 0 11110 01 0 00 011 scale n d  UCVTF Dd, Wn, #fbits
+      6    1 0 0 11110 00 0 00 011 scale n d  UCVTF Sd, Xn, #fbits
+      7    1 0 0 11110 01 0 00 011 scale n d  UCVTF Dd, Xn, #fbits
+
+      These are signed/unsigned conversion from integer registers to
+      FP registers, all 4 32/64-bit combinations, rounded per FPCR,
+      scaled per |scale|.
+   */
+   if (ty <= X01 && rm == X00 
+       && (op == BITS3(0,1,0) || op == BITS3(0,1,1))
+       && (bitSF == 1 || ((sc >> 5) & 1) == 1)) {
+      Bool isI64 = bitSF == 1;
+      Bool isF64 = (ty & 1) == 1;
+      Bool isU   = (op & 1) == 1;
+      UInt ix    = (isU ? 4 : 0) | (isI64 ? 2 : 0) | (isF64 ? 1 : 0);
+
+      Int fbits = 64 - sc;
+      vassert(fbits >= 1 && fbits <= (isI64 ? 64 : 32));      
+
+      Double  scale  = two_to_the_minus(fbits);
+      IRExpr* scaleE = isF64 ? IRExpr_Const(IRConst_F64(scale))
+                             : IRExpr_Const(IRConst_F32( (Float)scale ));
+      IROp    opMUL  = isF64 ? Iop_MulF64 : Iop_MulF32;
+
+      const IROp ops[8]
+        = { Iop_I32StoF32, Iop_I32StoF64, Iop_I64StoF32, Iop_I64StoF64,
+            Iop_I32UtoF32, Iop_I32UtoF64, Iop_I64UtoF32, Iop_I64UtoF64 };
+      IRExpr* src = getIRegOrZR(isI64, nn);
+      IRExpr* res = (isF64 && !isI64) 
+                       ? unop(ops[ix], src)
+                       : binop(ops[ix],
+                               mkexpr(mk_get_IR_rounding_mode()), src);
+      putQReg128(dd, mkV128(0));
+      putQRegLO(dd, triop(opMUL, mkU32(Irrm_NEAREST), res, scaleE));
+
+      DIP("%ccvtf %s, %s, #%d\n",
+          isU ? 'u' : 's', nameQRegLO(dd, isF64 ? Ity_F64 : Ity_F32), 
+          nameIRegOrZR(isI64, nn), fbits);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_AdvSIMD_fp_to_from_int_conv(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   /* 31 30 29 28    23   21 20    18     15     9 4
+      sf  0  0 11110 type 1  rmode opcode 000000 n d
+      The first 3 bits are really "sf 0 S", but S is always zero.
+      Decode fields: sf,type,rmode,opcode
+   */
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+   if (INSN(30,29) != BITS2(0,0)
+       || INSN(28,24) != BITS5(1,1,1,1,0)
+       || INSN(21,21) != 1
+       || INSN(15,10) != BITS6(0,0,0,0,0,0)) {
+      return False;
+   }
+   UInt bitSF = INSN(31,31);
+   UInt ty    = INSN(23,22); // type
+   UInt rm    = INSN(20,19); // rmode
+   UInt op    = INSN(18,16); // opcode
+   UInt nn    = INSN(9,5);
+   UInt dd    = INSN(4,0);
+
+   // op = 000, 001
+   /* -------- FCVT{N,P,M,Z,A}{S,U} (scalar, integer) -------- */
+   /*    30       23   20 18  15     9 4
+      sf 00 11110 0x 1 00 000 000000 n d  FCVTNS Rd, Fn (round to
+      sf 00 11110 0x 1 00 001 000000 n d  FCVTNU Rd, Fn  nearest)
+      ---------------- 01 --------------  FCVTP-------- (round to +inf)
+      ---------------- 10 --------------  FCVTM-------- (round to -inf)
+      ---------------- 11 --------------  FCVTZ-------- (round to zero)
+      ---------------- 00 100 ----------  FCVTAS------- (nearest, ties away)
+      ---------------- 00 101 ----------  FCVTAU------- (nearest, ties away)
+
+      Rd is Xd when sf==1, Wd when sf==0
+      Fn is Dn when x==1, Sn when x==0
+      20:19 carry the rounding mode, using the same encoding as FPCR
+   */
+   if (ty <= X01
+       && (   ((op == BITS3(0,0,0) || op == BITS3(0,0,1)) && True)
+           || ((op == BITS3(1,0,0) || op == BITS3(1,0,1)) && rm == BITS2(0,0))
+          )
+      ) {
+      Bool isI64 = bitSF == 1;
+      Bool isF64 = (ty & 1) == 1;
+      Bool isU   = (op & 1) == 1;
+      /* Decide on the IR rounding mode to use. */
+      IRRoundingMode irrm = 8; /*impossible*/
+      HChar ch = '?';
+      if (op == BITS3(0,0,0) || op == BITS3(0,0,1)) {
+         switch (rm) {
+            case BITS2(0,0): ch = 'n'; irrm = Irrm_NEAREST; break;
+            case BITS2(0,1): ch = 'p'; irrm = Irrm_PosINF; break;
+            case BITS2(1,0): ch = 'm'; irrm = Irrm_NegINF; break;
+            case BITS2(1,1): ch = 'z'; irrm = Irrm_ZERO; break;
+            default: vassert(0);
+         }
+      } else {
+         vassert(op == BITS3(1,0,0) || op == BITS3(1,0,1));
+         switch (rm) {
+            case BITS2(0,0): ch = 'a'; irrm = Irrm_NEAREST; break;
+            default: vassert(0);
+         }
+      }
+      vassert(irrm != 8);
+      /* Decide on the conversion primop, based on the source size,
+         dest size and signedness (8 possibilities).  Case coding:
+            F32 ->s I32   0
+            F32 ->u I32   1
+            F32 ->s I64   2
+            F32 ->u I64   3
+            F64 ->s I32   4
+            F64 ->u I32   5
+            F64 ->s I64   6
+            F64 ->u I64   7
+      */
+      UInt ix = (isF64 ? 4 : 0) | (isI64 ? 2 : 0) | (isU ? 1 : 0);
+      vassert(ix < 8);
+      const IROp iops[8] 
+         = { Iop_F32toI32S, Iop_F32toI32U, Iop_F32toI64S, Iop_F32toI64U,
+             Iop_F64toI32S, Iop_F64toI32U, Iop_F64toI64S, Iop_F64toI64U };
+      IROp iop = iops[ix];
+      // A bit of ATCery: bounce all cases we haven't seen an example of.
+      if (/* F32toI32S */
+             (iop == Iop_F32toI32S && irrm == Irrm_ZERO)   /* FCVTZS Wd,Sn */
+          || (iop == Iop_F32toI32S && irrm == Irrm_NegINF) /* FCVTMS Wd,Sn */
+          || (iop == Iop_F32toI32S && irrm == Irrm_PosINF) /* FCVTPS Wd,Sn */
+          || (iop == Iop_F32toI32S && irrm == Irrm_NEAREST)/* FCVT{A,N}S W,S */
+          /* F32toI32U */
+          || (iop == Iop_F32toI32U && irrm == Irrm_ZERO)   /* FCVTZU Wd,Sn */
+          || (iop == Iop_F32toI32U && irrm == Irrm_NegINF) /* FCVTMU Wd,Sn */
+          || (iop == Iop_F32toI32U && irrm == Irrm_PosINF) /* FCVTPU Wd,Sn */
+          || (iop == Iop_F32toI32U && irrm == Irrm_NEAREST)/* FCVT{A,N}U W,S */
+          /* F32toI64S */
+          || (iop == Iop_F32toI64S && irrm == Irrm_ZERO)   /* FCVTZS Xd,Sn */
+          || (iop == Iop_F32toI64S && irrm == Irrm_NegINF) /* FCVTMS Xd,Sn */
+          || (iop == Iop_F32toI64S && irrm == Irrm_PosINF) /* FCVTPS Xd,Sn */
+          || (iop == Iop_F32toI64S && irrm == Irrm_NEAREST)/* FCVT{A,N}S X,S */
+          /* F32toI64U */
+          || (iop == Iop_F32toI64U && irrm == Irrm_ZERO)   /* FCVTZU Xd,Sn */
+          || (iop == Iop_F32toI64U && irrm == Irrm_NegINF) /* FCVTMU Xd,Sn */
+          || (iop == Iop_F32toI64U && irrm == Irrm_PosINF) /* FCVTPU Xd,Sn */
+          || (iop == Iop_F32toI64U && irrm == Irrm_NEAREST)/* FCVT{A,N}U X,S */
+          /* F64toI32S */
+          || (iop == Iop_F64toI32S && irrm == Irrm_ZERO)   /* FCVTZS Wd,Dn */
+          || (iop == Iop_F64toI32S && irrm == Irrm_NegINF) /* FCVTMS Wd,Dn */
+          || (iop == Iop_F64toI32S && irrm == Irrm_PosINF) /* FCVTPS Wd,Dn */
+          || (iop == Iop_F64toI32S && irrm == Irrm_NEAREST)/* FCVT{A,N}S W,D */
+          /* F64toI32U */
+          || (iop == Iop_F64toI32U && irrm == Irrm_ZERO)   /* FCVTZU Wd,Dn */
+          || (iop == Iop_F64toI32U && irrm == Irrm_NegINF) /* FCVTMU Wd,Dn */
+          || (iop == Iop_F64toI32U && irrm == Irrm_PosINF) /* FCVTPU Wd,Dn */
+          || (iop == Iop_F64toI32U && irrm == Irrm_NEAREST)/* FCVT{A,N}U W,D */
+          /* F64toI64S */
+          || (iop == Iop_F64toI64S && irrm == Irrm_ZERO)   /* FCVTZS Xd,Dn */
+          || (iop == Iop_F64toI64S && irrm == Irrm_NegINF) /* FCVTMS Xd,Dn */
+          || (iop == Iop_F64toI64S && irrm == Irrm_PosINF) /* FCVTPS Xd,Dn */
+          || (iop == Iop_F64toI64S && irrm == Irrm_NEAREST)/* FCVT{A,N}S X,D */
+          /* F64toI64U */
+          || (iop == Iop_F64toI64U && irrm == Irrm_ZERO)   /* FCVTZU Xd,Dn */
+          || (iop == Iop_F64toI64U && irrm == Irrm_NegINF) /* FCVTMU Xd,Dn */
+          || (iop == Iop_F64toI64U && irrm == Irrm_PosINF) /* FCVTPU Xd,Dn */
+          || (iop == Iop_F64toI64U && irrm == Irrm_NEAREST)/* FCVT{A,N}U X,D */
+         ) {
+        /* validated */
+      } else {
+        return False;
+      }
+      IRType srcTy  = isF64 ? Ity_F64 : Ity_F32;
+      IRType dstTy  = isI64 ? Ity_I64 : Ity_I32;
+      IRTemp src    = newTemp(srcTy);
+      IRTemp dst    = newTemp(dstTy);
+      assign(src, getQRegLO(nn, srcTy));
+      assign(dst, binop(iop, mkU32(irrm), mkexpr(src)));
+      putIRegOrZR(isI64, dd, mkexpr(dst));
+      DIP("fcvt%c%c %s, %s\n", ch, isU ? 'u' : 's',
+          nameIRegOrZR(isI64, dd), nameQRegLO(nn, srcTy));
+      return True;
+   }
+
+   // op = 010, 011
+   /* -------------- {S,U}CVTF (scalar, integer) -------------- */
+   /* (ix) sf  S 28    ty   rm op  15     9 4
+      0    0 0 0 11110 00 1 00 010 000000 n d  SCVTF Sd, Wn
+      1    0 0 0 11110 01 1 00 010 000000 n d  SCVTF Dd, Wn
+      2    1 0 0 11110 00 1 00 010 000000 n d  SCVTF Sd, Xn
+      3    1 0 0 11110 01 1 00 010 000000 n d  SCVTF Dd, Xn
+
+      4    0 0 0 11110 00 1 00 011 000000 n d  UCVTF Sd, Wn
+      5    0 0 0 11110 01 1 00 011 000000 n d  UCVTF Dd, Wn
+      6    1 0 0 11110 00 1 00 011 000000 n d  UCVTF Sd, Xn
+      7    1 0 0 11110 01 1 00 011 000000 n d  UCVTF Dd, Xn
+
+      These are signed/unsigned conversion from integer registers to
+      FP registers, all 4 32/64-bit combinations, rounded per FPCR.
+   */
+   if (ty <= X01 && rm == X00 && (op == BITS3(0,1,0) || op == BITS3(0,1,1))) {
+      Bool isI64 = bitSF == 1;
+      Bool isF64 = (ty & 1) == 1;
+      Bool isU   = (op & 1) == 1;
+      UInt ix    = (isU ? 4 : 0) | (isI64 ? 2 : 0) | (isF64 ? 1 : 0);
+      const IROp ops[8]
+        = { Iop_I32StoF32, Iop_I32StoF64, Iop_I64StoF32, Iop_I64StoF64,
+            Iop_I32UtoF32, Iop_I32UtoF64, Iop_I64UtoF32, Iop_I64UtoF64 };
+      IRExpr* src = getIRegOrZR(isI64, nn);
+      IRExpr* res = (isF64 && !isI64) 
+                       ? unop(ops[ix], src)
+                       : binop(ops[ix],
+                               mkexpr(mk_get_IR_rounding_mode()), src);
+      putQReg128(dd, mkV128(0));
+      putQRegLO(dd, res);
+      DIP("%ccvtf %s, %s\n",
+          isU ? 'u' : 's', nameQRegLO(dd, isF64 ? Ity_F64 : Ity_F32), 
+          nameIRegOrZR(isI64, nn));
+      return True;
+   }
+
+   // op = 110, 111
+   /* -------- FMOV (general) -------- */
+   /* case sf  S       ty   rm op  15     9 4
+       (1) 0 0 0 11110 00 1 00 111 000000 n d     FMOV Sd,      Wn
+       (2) 1 0 0 11110 01 1 00 111 000000 n d     FMOV Dd,      Xn
+       (3) 1 0 0 11110 10 1 01 111 000000 n d     FMOV Vd.D[1], Xn
+
+       (4) 0 0 0 11110 00 1 00 110 000000 n d     FMOV Wd, Sn
+       (5) 1 0 0 11110 01 1 00 110 000000 n d     FMOV Xd, Dn
+       (6) 1 0 0 11110 10 1 01 110 000000 n d     FMOV Xd, Vn.D[1]
+   */
+   if (1) {
+      UInt ix = 0; // case
+      if (bitSF == 0) {
+         if (ty == BITS2(0,0) && rm == BITS2(0,0) && op == BITS3(1,1,1))
+            ix = 1;
+         else
+         if (ty == BITS2(0,0) && rm == BITS2(0,0) && op == BITS3(1,1,0))
+            ix = 4;
+      } else {
+         vassert(bitSF == 1);
+         if (ty == BITS2(0,1) && rm == BITS2(0,0) && op == BITS3(1,1,1))
+            ix = 2;
+         else
+         if (ty == BITS2(0,1) && rm == BITS2(0,0) && op == BITS3(1,1,0))
+            ix = 5;
+         else
+         if (ty == BITS2(1,0) && rm == BITS2(0,1) && op == BITS3(1,1,1))
+            ix = 3;
+         else
+         if (ty == BITS2(1,0) && rm == BITS2(0,1) && op == BITS3(1,1,0))
+            ix = 6;
+      }
+      if (ix > 0) {
+         switch (ix) {
+            case 1:
+               putQReg128(dd, mkV128(0));
+               putQRegLO(dd, getIReg32orZR(nn));
+               DIP("fmov s%u, w%u\n", dd, nn);
+               break;
+            case 2:
+               putQReg128(dd, mkV128(0));
+               putQRegLO(dd, getIReg64orZR(nn));
+               DIP("fmov d%u, x%u\n", dd, nn);
+               break;
+            case 3:
+               putQRegHI64(dd, getIReg64orZR(nn));
+               DIP("fmov v%u.d[1], x%u\n", dd, nn);
+               break;
+            case 4:
+               putIReg32orZR(dd, getQRegLO(nn, Ity_I32));
+               DIP("fmov w%u, s%u\n", dd, nn);
+               break;
+            case 5:
+               putIReg64orZR(dd, getQRegLO(nn, Ity_I64));
+               DIP("fmov x%u, d%u\n", dd, nn);
+               break;
+            case 6:
+               putIReg64orZR(dd, getQRegHI64(nn));
+               DIP("fmov x%u, v%u.d[1]\n", dd, nn);
+               break;
+            default:
+               vassert(0);
+         }
+         return True;
+      }
+      /* undecodable; fall through */
+   }
+
+   return False;
+#  undef INSN
+}
+
+
+static
+Bool dis_ARM64_simd_and_fp(/*MB_OUT*/DisResult* dres, UInt insn)
+{
+   Bool ok;
+   ok = dis_AdvSIMD_EXT(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_TBL_TBX(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_ZIP_UZP_TRN(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_across_lanes(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_copy(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_modified_immediate(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_scalar_copy(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_scalar_pairwise(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_scalar_shift_by_imm(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_scalar_three_different(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_scalar_three_same(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_scalar_two_reg_misc(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_scalar_x_indexed_element(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_shift_by_immediate(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_three_different(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_three_same(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_two_reg_misc(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_vector_x_indexed_elem(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_crypto_aes(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_crypto_three_reg_sha(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_crypto_two_reg_sha(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_compare(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_conditional_compare(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_conditional_select(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_data_proc_1_source(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_data_proc_2_source(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_data_proc_3_source(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_immediate(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_to_from_fixedp_conv(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   ok = dis_AdvSIMD_fp_to_from_int_conv(dres, insn);
+   if (UNLIKELY(ok)) return True;
+   return False;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassemble a single ARM64 instruction               ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single ARM64 instruction into IR.  The instruction
+   has is located at |guest_instr| and has guest IP of
+   |guest_PC_curr_instr|, which will have been set before the call
+   here.  Returns True iff the instruction was decoded, in which case
+   *dres will be set accordingly, or False, in which case *dres should
+   be ignored by the caller. */
+
+static
+Bool disInstr_ARM64_WRK (
+        /*MB_OUT*/DisResult* dres,
+        Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+        Bool         resteerCisOk,
+        void*        callback_opaque,
+        const UChar* guest_instr,
+        const VexArchInfo* archinfo,
+        const VexAbiInfo*  abiinfo
+     )
+{
+   // A macro to fish bits out of 'insn'.
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+
+//ZZ    DisResult dres;
+//ZZ    UInt      insn;
+//ZZ    //Bool      allow_VFP = False;
+//ZZ    //UInt      hwcaps = archinfo->hwcaps;
+//ZZ    IRTemp    condT; /* :: Ity_I32 */
+//ZZ    UInt      summary;
+//ZZ    HChar     dis_buf[128];  // big enough to hold LDMIA etc text
+//ZZ 
+//ZZ    /* What insn variants are we supporting today? */
+//ZZ    //allow_VFP  = (0 != (hwcaps & VEX_HWCAPS_ARM_VFP));
+//ZZ    // etc etc
+
+   /* Set result defaults. */
+   dres->whatNext    = Dis_Continue;
+   dres->len         = 4;
+   dres->continueAt  = 0;
+   dres->jk_StopHere = Ijk_INVALID;
+
+   /* At least this is simple on ARM64: insns are all 4 bytes long, and
+      4-aligned.  So just fish the whole thing out of memory right now
+      and have done. */
+   UInt insn = getUIntLittleEndianly( guest_instr );
+
+   if (0) vex_printf("insn: 0x%x\n", insn);
+
+   DIP("\t(arm64) 0x%llx:  ", (ULong)guest_PC_curr_instr);
+
+   vassert(0 == (guest_PC_curr_instr & 3ULL));
+
+   /* ----------------------------------------------------------- */
+
+   /* Spot "Special" instructions (see comment at top of file). */
+   {
+      const UChar* code = guest_instr;
+      /* Spot the 16-byte preamble: 
+            93CC0D8C   ror x12, x12, #3
+            93CC358C   ror x12, x12, #13
+            93CCCD8C   ror x12, x12, #51
+            93CCF58C   ror x12, x12, #61
+      */
+      UInt word1 = 0x93CC0D8C;
+      UInt word2 = 0x93CC358C;
+      UInt word3 = 0x93CCCD8C;
+      UInt word4 = 0x93CCF58C;
+      if (getUIntLittleEndianly(code+ 0) == word1 &&
+          getUIntLittleEndianly(code+ 4) == word2 &&
+          getUIntLittleEndianly(code+ 8) == word3 &&
+          getUIntLittleEndianly(code+12) == word4) {
+         /* Got a "Special" instruction preamble.  Which one is it? */
+         if (getUIntLittleEndianly(code+16) == 0xAA0A014A
+                                               /* orr x10,x10,x10 */) {
+            /* X3 = client_request ( X4 ) */
+            DIP("x3 = client_request ( x4 )\n");
+            putPC(mkU64( guest_PC_curr_instr + 20 ));
+            dres->jk_StopHere = Ijk_ClientReq;
+            dres->whatNext    = Dis_StopHere;
+            return True;
+         }
+         else
+         if (getUIntLittleEndianly(code+16) == 0xAA0B016B
+                                               /* orr x11,x11,x11 */) {
+            /* X3 = guest_NRADDR */
+            DIP("x3 = guest_NRADDR\n");
+            dres->len = 20;
+            putIReg64orZR(3, IRExpr_Get( OFFB_NRADDR, Ity_I64 ));
+            return True;
+         }
+         else
+         if (getUIntLittleEndianly(code+16) == 0xAA0C018C
+                                               /* orr x12,x12,x12 */) {
+            /*  branch-and-link-to-noredir X8 */
+            DIP("branch-and-link-to-noredir x8\n");
+            putIReg64orZR(30, mkU64(guest_PC_curr_instr + 20));
+            putPC(getIReg64orZR(8));
+            dres->jk_StopHere = Ijk_NoRedir;
+            dres->whatNext    = Dis_StopHere;
+            return True;
+         }
+         else
+         if (getUIntLittleEndianly(code+16) == 0xAA090129
+                                               /* orr x9,x9,x9 */) {
+            /* IR injection */
+            DIP("IR injection\n");
+            vex_inject_ir(irsb, Iend_LE);
+            // Invalidate the current insn. The reason is that the IRop we're
+            // injecting here can change. In which case the translation has to
+            // be redone. For ease of handling, we simply invalidate all the
+            // time.
+            stmt(IRStmt_Put(OFFB_CMSTART, mkU64(guest_PC_curr_instr)));
+            stmt(IRStmt_Put(OFFB_CMLEN,   mkU64(20)));
+            putPC(mkU64( guest_PC_curr_instr + 20 ));
+            dres->whatNext    = Dis_StopHere;
+            dres->jk_StopHere = Ijk_InvalICache;
+            return True;
+         }
+         /* We don't know what it is. */
+         return False;
+         /*NOTREACHED*/
+      }
+   }
+
+   /* ----------------------------------------------------------- */
+
+   /* Main ARM64 instruction decoder starts here. */
+
+   Bool ok = False;
+
+   /* insn[28:25] determines the top-level grouping, so let's start
+      off with that.
+
+      For all of these dis_ARM64_ functions, we pass *dres with the
+      normal default results "insn OK, 4 bytes long, keep decoding" so
+      they don't need to change it.  However, decodes of control-flow
+      insns may cause *dres to change.
+   */
+   switch (INSN(28,25)) {
+      case BITS4(1,0,0,0): case BITS4(1,0,0,1):
+         // Data processing - immediate
+         ok = dis_ARM64_data_processing_immediate(dres, insn);
+         break;
+      case BITS4(1,0,1,0): case BITS4(1,0,1,1):
+         // Branch, exception generation and system instructions
+         ok = dis_ARM64_branch_etc(dres, insn, archinfo);
+         break;
+      case BITS4(0,1,0,0): case BITS4(0,1,1,0):
+      case BITS4(1,1,0,0): case BITS4(1,1,1,0):
+         // Loads and stores
+         ok = dis_ARM64_load_store(dres, insn);
+         break;
+      case BITS4(0,1,0,1): case BITS4(1,1,0,1):
+         // Data processing - register
+         ok = dis_ARM64_data_processing_register(dres, insn);
+         break;
+      case BITS4(0,1,1,1): case BITS4(1,1,1,1): 
+         // Data processing - SIMD and floating point
+         ok = dis_ARM64_simd_and_fp(dres, insn);
+         break;
+      case BITS4(0,0,0,0): case BITS4(0,0,0,1):
+      case BITS4(0,0,1,0): case BITS4(0,0,1,1):
+         // UNALLOCATED
+         break;
+      default:
+         vassert(0); /* Can't happen */
+   }
+
+   /* If the next-level down decoders failed, make sure |dres| didn't
+      get changed. */
+   if (!ok) {
+      vassert(dres->whatNext    == Dis_Continue);
+      vassert(dres->len         == 4);
+      vassert(dres->continueAt  == 0);
+      vassert(dres->jk_StopHere == Ijk_INVALID);
+   }
+
+   return ok;
+
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+DisResult disInstr_ARM64 ( IRSB*        irsb_IN,
+                           Bool         (*resteerOkFn) ( void*, Addr ),
+                           Bool         resteerCisOk,
+                           void*        callback_opaque,
+                           const UChar* guest_code_IN,
+                           Long         delta_IN,
+                           Addr         guest_IP,
+                           VexArch      guest_arch,
+                           const VexArchInfo* archinfo,
+                           const VexAbiInfo*  abiinfo,
+                           VexEndness   host_endness_IN,
+                           Bool         sigill_diag_IN )
+{
+   DisResult dres;
+   vex_bzero(&dres, sizeof(dres));
+
+   /* Set globals (see top of this file) */
+   vassert(guest_arch == VexArchARM64);
+
+   irsb                = irsb_IN;
+   host_endness        = host_endness_IN;
+   guest_PC_curr_instr = (Addr64)guest_IP;
+
+   /* Sanity checks */
+   /* (x::UInt - 2) <= 15   ===   x >= 2 && x <= 17 (I hope) */
+   vassert((archinfo->arm64_dMinLine_lg2_szB - 2) <= 15);
+   vassert((archinfo->arm64_iMinLine_lg2_szB - 2) <= 15);
+
+   /* Try to decode */
+   Bool ok = disInstr_ARM64_WRK( &dres,
+                                 resteerOkFn, resteerCisOk, callback_opaque,
+                                 &guest_code_IN[delta_IN],
+                                 archinfo, abiinfo );
+   if (ok) {
+      /* All decode successes end up here. */
+      vassert(dres.len == 4 || dres.len == 20);
+      switch (dres.whatNext) {
+         case Dis_Continue:
+            putPC( mkU64(dres.len + guest_PC_curr_instr) );
+            break;
+         case Dis_ResteerU:
+         case Dis_ResteerC:
+            putPC(mkU64(dres.continueAt));
+            break;
+         case Dis_StopHere:
+            break;
+         default:
+            vassert(0);
+      }
+      DIP("\n");
+   } else {
+      /* All decode failures end up here. */
+      if (sigill_diag_IN) {
+         Int   i, j;
+         UChar buf[64];
+         UInt  insn
+                  = getUIntLittleEndianly( &guest_code_IN[delta_IN] );
+         vex_bzero(buf, sizeof(buf));
+         for (i = j = 0; i < 32; i++) {
+            if (i > 0) {
+              if ((i & 7) == 0) buf[j++] = ' ';
+              else if ((i & 3) == 0) buf[j++] = '\'';
+            }
+            buf[j++] = (insn & (1<<(31-i))) ? '1' : '0';
+         }
+         vex_printf("disInstr(arm64): unhandled instruction 0x%08x\n", insn);
+         vex_printf("disInstr(arm64): %s\n", buf);
+      }
+
+      /* Tell the dispatcher that this insn cannot be decoded, and so
+         has not been executed, and (is currently) the next to be
+         executed.  PC should be up-to-date since it is made so at the
+         start of each insn, but nevertheless be paranoid and update
+         it again right now. */
+      putPC( mkU64(guest_PC_curr_instr) );
+      dres.len         = 0;
+      dres.whatNext    = Dis_StopHere;
+      dres.jk_StopHere = Ijk_NoDecode;
+      dres.continueAt  = 0;
+   }
+   return dres;
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end                                       guest_arm64_toIR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_arm_defs.h b/VEX/priv/guest_arm_defs.h
new file mode 100644
index 0000000..b5b012b
--- /dev/null
+++ b/VEX/priv/guest_arm_defs.h
@@ -0,0 +1,245 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  guest_arm_defs.h ---*/
+/*---------------------------------------------------------------*/
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Only to be used within the guest-arm directory. */
+
+#ifndef __VEX_GUEST_ARM_DEFS_H
+#define __VEX_GUEST_ARM_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "guest_generic_bb_to_IR.h"     // DisResult
+
+/*---------------------------------------------------------*/
+/*--- arm to IR conversion                              ---*/
+/*---------------------------------------------------------*/
+
+/* Convert one ARM insn to IR.  See the type DisOneInstrFn in
+   bb_to_IR.h. */
+extern
+DisResult disInstr_ARM ( IRSB*        irbb,
+                         Bool         (*resteerOkFn) ( void*, Addr ),
+                         Bool         resteerCisOk,
+                         void*        callback_opaque,
+                         const UChar* guest_code,
+                         Long         delta,
+                         Addr         guest_IP,
+                         VexArch      guest_arch,
+                         const VexArchInfo* archinfo,
+                         const VexAbiInfo*  abiinfo,
+                         VexEndness   host_endness,
+                         Bool         sigill_diag );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern
+IRExpr* guest_arm_spechelper ( const HChar* function_name,
+                               IRExpr** args,
+                               IRStmt** precedingStmts,
+                               Int      n_precedingStmts );
+
+/* Describes to the optimser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern 
+Bool guest_arm_state_requires_precise_mem_exns ( Int, Int,
+                                                 VexRegisterUpdates );
+
+extern
+VexGuestLayout armGuest_layout;
+
+
+/*---------------------------------------------------------*/
+/*--- arm guest helpers                                 ---*/
+/*---------------------------------------------------------*/
+
+/* --- CLEAN HELPERS --- */
+
+/* Calculate NZCV from the supplied thunk components, in the positions
+   they appear in the CPSR, viz bits 31:28 for N Z V C respectively.
+   Returned bits 27:0 are zero. */
+extern 
+UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
+                                 UInt cc_dep2, UInt cc_dep3 );
+
+/* Calculate the C flag from the thunk components, in the lowest bit
+   of the word (bit 0). */
+extern 
+UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
+                             UInt cc_dep2, UInt cc_dep3 );
+
+/* Calculate the V flag from the thunk components, in the lowest bit
+   of the word (bit 0). */
+extern 
+UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
+                             UInt cc_dep2, UInt cc_dep3 );
+
+/* Calculate the specified condition from the thunk components, in the
+   lowest bit of the word (bit 0). */
+extern 
+UInt armg_calculate_condition ( UInt cond_n_op /* ARMCondcode << 4 | cc_op */,
+                                UInt cc_dep1,
+                                UInt cc_dep2, UInt cc_dep3 );
+
+/* Calculate the QC flag from the thunk components, in the lowest bit
+   of the word (bit 0). */
+extern 
+UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
+                              UInt resR1, UInt resR2 );
+
+
+/*---------------------------------------------------------*/
+/*--- Condition code stuff                              ---*/
+/*---------------------------------------------------------*/
+
+/* Flags masks.  Defines positions of flags bits in the CPSR. */
+#define ARMG_CC_SHIFT_N  31
+#define ARMG_CC_SHIFT_Z  30
+#define ARMG_CC_SHIFT_C  29
+#define ARMG_CC_SHIFT_V  28
+#define ARMG_CC_SHIFT_Q  27
+
+#define ARMG_CC_MASK_N    (1 << ARMG_CC_SHIFT_N)
+#define ARMG_CC_MASK_Z    (1 << ARMG_CC_SHIFT_Z)
+#define ARMG_CC_MASK_C    (1 << ARMG_CC_SHIFT_C)
+#define ARMG_CC_MASK_V    (1 << ARMG_CC_SHIFT_V)
+#define ARMG_CC_MASK_Q    (1 << ARMG_CC_SHIFT_Q)
+
+/* Flag thunk descriptors.  A four-word thunk is used to record
+   details of the most recent flag-setting operation, so NZCV can
+   be computed later if needed.
+
+   The four words are:
+
+      CC_OP, which describes the operation.
+
+      CC_DEP1, CC_DEP2, CC_DEP3.  These are arguments to the
+         operation.  We want set up the mcx_masks in flag helper calls
+         involving these fields so that Memcheck "believes" that the
+         resulting flags are data-dependent on both CC_DEP1 and
+         CC_DEP2.  Hence the name DEP.
+
+   When building the thunk, it is always necessary to write words into
+   CC_DEP1/2/3, even if those args are not used given the
+   CC_OP field.  This is important because otherwise Memcheck could
+   give false positives as it does not understand the relationship
+   between the CC_OP field and CC_DEP1/2/3, and so believes
+   that the definedness of the stored flags always depends on
+   all 3 DEP values.
+
+   Fields carrying only 1 or 2 bits of useful information (old_C,
+   shifter_co, old_V, oldC:oldV) must have their top 31 or 30 bits
+   (respectively) zero.  The text "31x0:" or "30x0:" denotes this.
+
+   A summary of the field usages is:
+
+   OP                DEP1              DEP2              DEP3
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+   OP_COPY           curr_NZCV:28x0    unused            unused
+   OP_ADD            argL              argR              unused
+   OP_SUB            argL              argR              unused
+   OP_ADC            argL              argR              31x0:old_C
+   OP_SBB            argL              argR              31x0:old_C
+   OP_LOGIC          result            31x0:shifter_co   31x0:old_V
+   OP_MUL            result            unused            30x0:old_C:old_V
+   OP_MULL           resLO32           resHI32           30x0:old_C:old_V
+*/
+
+enum {
+   ARMG_CC_OP_COPY=0,  /* DEP1 = NZCV in 31:28, DEP2 = 0, DEP3 = 0
+                          just copy DEP1 to output */
+
+   ARMG_CC_OP_ADD,     /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
+                          DEP3 = 0 */
+
+   ARMG_CC_OP_SUB,     /* DEP1 = argL (Rn), DEP2 = argR (shifter_op),
+                          DEP3 = 0 */
+
+   ARMG_CC_OP_ADC,     /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
+                          DEP3 = oldC (in LSB) */
+
+   ARMG_CC_OP_SBB,     /* DEP1 = argL (Rn), DEP2 = arg2 (shifter_op),
+                          DEP3 = oldC (in LSB) */
+
+   ARMG_CC_OP_LOGIC,   /* DEP1 = result, DEP2 = shifter_carry_out (in LSB),
+                          DEP3 = old V flag (in LSB) */
+
+   ARMG_CC_OP_MUL,     /* DEP1 = result, DEP2 = 0, DEP3 = oldC:old_V
+                          (in bits 1:0) */
+
+   ARMG_CC_OP_MULL,    /* DEP1 = resLO32, DEP2 = resHI32, DEP3 = oldC:old_V
+                          (in bits 1:0) */
+
+   ARMG_CC_OP_NUMBER
+};
+
+/* XXXX because of the calling conventions for
+   armg_calculate_condition, all this OP values MUST be in the range
+   0 .. 15 only (viz, 4-bits). */
+
+
+
+/* Defines conditions which we can ask for (ARM ARM 2e page A3-6) */
+
+typedef
+   enum {
+      ARMCondEQ     = 0,  /* equal                         : Z=1 */
+      ARMCondNE     = 1,  /* not equal                     : Z=0 */
+
+      ARMCondHS     = 2,  /* >=u (higher or same)          : C=1 */
+      ARMCondLO     = 3,  /* <u  (lower)                   : C=0 */
+
+      ARMCondMI     = 4,  /* minus (negative)              : N=1 */
+      ARMCondPL     = 5,  /* plus (zero or +ve)            : N=0 */
+
+      ARMCondVS     = 6,  /* overflow                      : V=1 */
+      ARMCondVC     = 7,  /* no overflow                   : V=0 */
+
+      ARMCondHI     = 8,  /* >u   (higher)                 : C=1 && Z=0 */
+      ARMCondLS     = 9,  /* <=u  (lower or same)          : C=0 || Z=1 */
+
+      ARMCondGE     = 10, /* >=s (signed greater or equal) : N=V */
+      ARMCondLT     = 11, /* <s  (signed less than)        : N!=V */
+
+      ARMCondGT     = 12, /* >s  (signed greater)          : Z=0 && N=V */
+      ARMCondLE     = 13, /* <=s (signed less or equal)    : Z=1 || N!=V */
+
+      ARMCondAL     = 14, /* always (unconditional)        : 1 */
+      ARMCondNV     = 15  /* never (unconditional):        : 0 */
+      /* NB: ARM have deprecated the use of the NV condition code.
+         You are now supposed to use MOV R0,R0 as a noop rather than
+         MOVNV R0,R0 as was previously recommended.  Future processors
+         may have the NV condition code reused to do other things.  */
+   }
+   ARMCondcode;
+
+#endif /* ndef __VEX_GUEST_ARM_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                    guest_arm_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_arm_helpers.c b/VEX/priv/guest_arm_helpers.c
new file mode 100644
index 0000000..a07eeeb
--- /dev/null
+++ b/VEX/priv/guest_arm_helpers.c
@@ -0,0 +1,1192 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               guest_arm_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_arm.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_arm_defs.h"
+
+
+/* This file contains helper functions for arm guest code.  Calls to
+   these functions are generated by the back end.  These calls are of
+   course in the host machine code and this file will be compiled to
+   host machine code, so that all makes sense.
+
+   Only change the signatures of these helper functions very
+   carefully.  If you change the signature here, you'll have to change
+   the parameters passed to it in the IR calls constructed by
+   guest-arm/toIR.c.
+*/
+
+
+/* Set to 1 to get detailed profiling info about individual N, Z, C
+   and V flag evaluation. */
+#define PROFILE_NZCV_FLAGS 0
+
+#if PROFILE_NZCV_FLAGS
+
+static UInt tab_n_eval[ARMG_CC_OP_NUMBER];
+static UInt tab_z_eval[ARMG_CC_OP_NUMBER];
+static UInt tab_c_eval[ARMG_CC_OP_NUMBER];
+static UInt tab_v_eval[ARMG_CC_OP_NUMBER];
+static UInt initted = 0;
+static UInt tot_evals = 0;
+
+static void initCounts ( void )
+{
+   UInt i;
+   for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
+      tab_n_eval[i] = tab_z_eval[i] = tab_c_eval[i] = tab_v_eval[i] = 0;
+   }
+   initted = 1;
+}
+
+static void showCounts ( void )
+{
+   UInt i;
+   vex_printf("\n                 N          Z          C          V\n");
+   vex_printf(  "---------------------------------------------------\n");
+   for (i = 0; i < ARMG_CC_OP_NUMBER; i++) {
+      vex_printf("CC_OP=%d  %9d  %9d  %9d  %9d\n",
+                 i,
+                 tab_n_eval[i], tab_z_eval[i],
+                 tab_c_eval[i], tab_v_eval[i] );
+    }
+}
+
+#define NOTE_N_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_n_eval)
+#define NOTE_Z_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_z_eval)
+#define NOTE_C_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_c_eval)
+#define NOTE_V_EVAL(_cc_op) NOTE_EVAL(_cc_op, tab_v_eval)
+
+#define NOTE_EVAL(_cc_op, _tab) \
+   do { \
+      if (!initted) initCounts(); \
+      vassert( ((UInt)(_cc_op)) < ARMG_CC_OP_NUMBER); \
+      _tab[(UInt)(_cc_op)]++; \
+      tot_evals++; \
+      if (0 == (tot_evals & 0xFFFFF)) \
+        showCounts(); \
+   } while (0)
+
+#endif /* PROFILE_NZCV_FLAGS */
+
+
+/* Calculate the N flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 31:1 are zero. */
+static
+UInt armg_calculate_flag_n ( UInt cc_op, UInt cc_dep1,
+                             UInt cc_dep2, UInt cc_dep3 )
+{
+#  if PROFILE_NZCV_FLAGS
+   NOTE_N_EVAL(cc_op);
+#  endif
+
+   switch (cc_op) {
+      case ARMG_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         UInt nf   = (cc_dep1 >> ARMG_CC_SHIFT_N) & 1;
+         return nf;
+      }
+      case ARMG_CC_OP_ADD: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt res  = argL + argR;
+         UInt nf   = res >> 31;
+         return nf;
+      }
+      case ARMG_CC_OP_SUB: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt res  = argL - argR;
+         UInt nf   = res >> 31;
+         return nf;
+      }
+      case ARMG_CC_OP_ADC: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt res  = argL + argR + oldC;
+         UInt nf   = res >> 31;
+         return nf;
+      }
+      case ARMG_CC_OP_SBB: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt res  = argL - argR - (oldC ^ 1);
+         UInt nf   = res >> 31;
+         return nf;
+      }
+      case ARMG_CC_OP_LOGIC: {
+         /* (res, shco, oldV) */
+         UInt res  = cc_dep1;
+         UInt nf   = res >> 31;
+         return nf;
+      }
+      case ARMG_CC_OP_MUL: {
+         /* (res, unused, oldC:oldV) */
+         UInt res  = cc_dep1;
+         UInt nf   = res >> 31;
+         return nf;
+      }
+      case ARMG_CC_OP_MULL: {
+         /* (resLo32, resHi32, oldC:oldV) */
+         UInt resHi32 = cc_dep2;
+         UInt nf      = resHi32 >> 31;
+         return nf;
+      }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("armg_calculate_flag_n"
+                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("armg_calculate_flags_n");
+   }
+}
+
+
+/* Calculate the Z flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 31:1 are zero. */
+static
+UInt armg_calculate_flag_z ( UInt cc_op, UInt cc_dep1,
+                             UInt cc_dep2, UInt cc_dep3 )
+{
+#  if PROFILE_NZCV_FLAGS
+   NOTE_Z_EVAL(cc_op);
+#  endif
+
+   switch (cc_op) {
+      case ARMG_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         UInt zf   = (cc_dep1 >> ARMG_CC_SHIFT_Z) & 1;
+         return zf;
+      }
+      case ARMG_CC_OP_ADD: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt res  = argL + argR;
+         UInt zf   = res == 0;
+         return zf;
+      }
+      case ARMG_CC_OP_SUB: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt res  = argL - argR;
+         UInt zf   = res == 0;
+         return zf;
+      }
+      case ARMG_CC_OP_ADC: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt res  = argL + argR + oldC;
+         UInt zf   = res == 0;
+         return zf;
+      }
+      case ARMG_CC_OP_SBB: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt res  = argL - argR - (oldC ^ 1);
+         UInt zf   = res == 0;
+         return zf;
+      }
+      case ARMG_CC_OP_LOGIC: {
+         /* (res, shco, oldV) */
+         UInt res  = cc_dep1;
+         UInt zf   = res == 0;
+         return zf;
+      }
+      case ARMG_CC_OP_MUL: {
+         /* (res, unused, oldC:oldV) */
+         UInt res  = cc_dep1;
+         UInt zf   = res == 0;
+         return zf;
+      }
+      case ARMG_CC_OP_MULL: {
+         /* (resLo32, resHi32, oldC:oldV) */
+         UInt resLo32 = cc_dep1;
+         UInt resHi32 = cc_dep2;
+         UInt zf      = (resHi32|resLo32) == 0;
+         return zf;
+      }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("armg_calculate_flags_z"
+                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("armg_calculate_flags_z");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate the C flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 31:1 are zero. */
+UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
+                             UInt cc_dep2, UInt cc_dep3 )
+{
+#  if PROFILE_NZCV_FLAGS
+   NOTE_C_EVAL(cc_op);
+#  endif
+
+   switch (cc_op) {
+      case ARMG_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         UInt cf   = (cc_dep1 >> ARMG_CC_SHIFT_C) & 1;
+         return cf;
+      }
+      case ARMG_CC_OP_ADD: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt res  = argL + argR;
+         UInt cf   = res < argL;
+         return cf;
+      }
+      case ARMG_CC_OP_SUB: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt cf   = argL >= argR;
+         return cf;
+      }
+      case ARMG_CC_OP_ADC: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt res  = argL + argR + oldC;
+         UInt cf   = oldC ? (res <= argL) : (res < argL);
+         return cf;
+      }
+      case ARMG_CC_OP_SBB: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt cf   = oldC ? (argL >= argR) : (argL > argR);
+         return cf;
+      }
+      case ARMG_CC_OP_LOGIC: {
+         /* (res, shco, oldV) */
+         UInt shco = cc_dep2;
+         vassert((shco & ~1) == 0);
+         UInt cf   = shco;
+         return cf;
+      }
+      case ARMG_CC_OP_MUL: {
+         /* (res, unused, oldC:oldV) */
+         UInt oldC = (cc_dep3 >> 1) & 1;
+         vassert((cc_dep3 & ~3) == 0);
+         UInt cf   = oldC;
+         return cf;
+      }
+      case ARMG_CC_OP_MULL: {
+         /* (resLo32, resHi32, oldC:oldV) */
+         UInt oldC    = (cc_dep3 >> 1) & 1;
+         vassert((cc_dep3 & ~3) == 0);
+         UInt cf      = oldC;
+         return cf;
+      }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("armg_calculate_flag_c"
+                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("armg_calculate_flag_c");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate the V flag from the supplied thunk components, in the
+   least significant bit of the word.  Returned bits 31:1 are zero. */
+UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
+                             UInt cc_dep2, UInt cc_dep3 )
+{
+#  if PROFILE_NZCV_FLAGS
+   NOTE_V_EVAL(cc_op);
+#  endif
+
+   switch (cc_op) {
+      case ARMG_CC_OP_COPY: {
+         /* (nzcv:28x0, unused, unused) */
+         UInt vf   = (cc_dep1 >> ARMG_CC_SHIFT_V) & 1;
+         return vf;
+      }
+      case ARMG_CC_OP_ADD: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt res  = argL + argR;
+         UInt vf   = ((res ^ argL) & (res ^ argR)) >> 31;
+         return vf;
+      }
+      case ARMG_CC_OP_SUB: {
+         /* (argL, argR, unused) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt res  = argL - argR;
+         UInt vf   = ((argL ^ argR) & (argL ^ res)) >> 31;
+         return vf;
+      }
+      case ARMG_CC_OP_ADC: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt res  = argL + argR + oldC;
+         UInt vf   = ((res ^ argL) & (res ^ argR)) >> 31;
+         return vf;
+      }
+      case ARMG_CC_OP_SBB: {
+         /* (argL, argR, oldC) */
+         UInt argL = cc_dep1;
+         UInt argR = cc_dep2;
+         UInt oldC = cc_dep3;
+         vassert((oldC & ~1) == 0);
+         UInt res  = argL - argR - (oldC ^ 1);
+         UInt vf   = ((argL ^ argR) & (argL ^ res)) >> 31;
+         return vf;
+      }
+      case ARMG_CC_OP_LOGIC: {
+         /* (res, shco, oldV) */
+         UInt oldV = cc_dep3;
+         vassert((oldV & ~1) == 0);
+         UInt vf   = oldV;
+         return vf;
+      }
+      case ARMG_CC_OP_MUL: {
+         /* (res, unused, oldC:oldV) */
+         UInt oldV = (cc_dep3 >> 0) & 1;
+         vassert((cc_dep3 & ~3) == 0);
+         UInt vf   = oldV;
+         return vf;
+      }
+      case ARMG_CC_OP_MULL: {
+         /* (resLo32, resHi32, oldC:oldV) */
+         UInt oldV    = (cc_dep3 >> 0) & 1;
+         vassert((cc_dep3 & ~3) == 0);
+         UInt vf      = oldV;
+         return vf;
+      }
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("armg_calculate_flag_v"
+                    "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
+                    cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("armg_calculate_flag_v");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate NZCV from the supplied thunk components, in the positions
+   they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
+   Returned bits 27:0 are zero. */
+UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
+                                 UInt cc_dep2, UInt cc_dep3 )
+{
+   UInt f;
+   UInt res = 0;
+   f = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARMG_CC_SHIFT_N);
+   f = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARMG_CC_SHIFT_Z);
+   f = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARMG_CC_SHIFT_C);
+   f = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+   res |= (f << ARMG_CC_SHIFT_V);
+   return res;
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate the QC flag from the arguments, in the lowest bit
+   of the word (bit 0).  Urr, having this out of line is bizarre.
+   Push back inline. */
+UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
+                              UInt resR1, UInt resR2 )
+{
+   if (resL1 != resR1 || resL2 != resR2)
+      return 1;
+   else
+      return 0;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate the specified condition from the thunk components, in the
+   lowest bit of the word (bit 0).  Returned bits 31:1 are zero. */
+UInt armg_calculate_condition ( UInt cond_n_op /* (ARMCondcode << 4) | cc_op */,
+                                UInt cc_dep1,
+                                UInt cc_dep2, UInt cc_dep3 )
+{
+   UInt cond  = cond_n_op >> 4;
+   UInt cc_op = cond_n_op & 0xF;
+   UInt nf, zf, vf, cf, inv;
+   //   vex_printf("XXXXXXXX %x %x %x %x\n", 
+   //              cond_n_op, cc_dep1, cc_dep2, cc_dep3);
+
+   // skip flags computation in this case
+   if (cond == ARMCondAL) return 1;
+
+   inv  = cond & 1;
+
+   switch (cond) {
+      case ARMCondEQ:    // Z=1         => z
+      case ARMCondNE:    // Z=0
+         zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ zf;
+
+      case ARMCondHS:    // C=1         => c
+      case ARMCondLO:    // C=0
+         cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ cf;
+
+      case ARMCondMI:    // N=1         => n
+      case ARMCondPL:    // N=0
+         nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ nf;
+
+      case ARMCondVS:    // V=1         => v
+      case ARMCondVC:    // V=0
+         vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ vf;
+
+      case ARMCondHI:    // C=1 && Z=0   => c & ~z
+      case ARMCondLS:    // C=0 || Z=1
+         cf = armg_calculate_flag_c(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ (1 & (cf & ~zf));
+
+      case ARMCondGE:    // N=V          => ~(n^v)
+      case ARMCondLT:    // N!=V
+         nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ (1 & ~(nf ^ vf));
+
+      case ARMCondGT:    // Z=0 && N=V   => ~z & ~(n^v)  =>  ~(z | (n^v))
+      case ARMCondLE:    // Z=1 || N!=V
+         nf = armg_calculate_flag_n(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         vf = armg_calculate_flag_v(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         zf = armg_calculate_flag_z(cc_op, cc_dep1, cc_dep2, cc_dep3);
+         return inv ^ (1 & ~(zf | (nf ^ vf)));
+
+      case ARMCondAL: // handled above
+      case ARMCondNV: // should never get here: Illegal instr
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("armg_calculate_condition(ARM)"
+                    "( %u, %u, 0x%x, 0x%x, 0x%x )\n",
+                    cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
+         vpanic("armg_calculate_condition(ARM)");
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Flag-helpers translation-time function specialisers.    ---*/
+/*--- These help iropt specialise calls the above run-time    ---*/
+/*--- flags functions.                                        ---*/
+/*---------------------------------------------------------------*/
+
+/* Used by the optimiser to try specialisations.  Returns an
+   equivalent expression, or NULL if none. */
+
+static Bool isU32 ( IRExpr* e, UInt n )
+{
+   return
+      toBool( e->tag == Iex_Const
+              && e->Iex.Const.con->tag == Ico_U32
+              && e->Iex.Const.con->Ico.U32 == n );
+}
+
+IRExpr* guest_arm_spechelper ( const HChar* function_name,
+                               IRExpr** args,
+                               IRStmt** precedingStmts,
+                               Int      n_precedingStmts )
+{
+#  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
+#  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
+#  define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
+#  define mkU8(_n)  IRExpr_Const(IRConst_U8(_n))
+
+   Int i, arity = 0;
+   for (i = 0; args[i]; i++)
+      arity++;
+#  if 0
+   vex_printf("spec request:\n");
+   vex_printf("   %s  ", function_name);
+   for (i = 0; i < arity; i++) {
+      vex_printf("  ");
+      ppIRExpr(args[i]);
+   }
+   vex_printf("\n");
+#  endif
+
+   /* --------- specialising "armg_calculate_condition" --------- */
+
+   if (vex_streq(function_name, "armg_calculate_condition")) {
+
+      /* specialise calls to the "armg_calculate_condition" function.
+         Not sure whether this is strictly necessary, but: the
+         replacement IR must produce only the values 0 or 1.  Bits
+         31:1 are required to be zero. */
+      IRExpr *cond_n_op, *cc_dep1, *cc_dep2, *cc_ndep;
+      vassert(arity == 4);
+      cond_n_op = args[0]; /* (ARMCondcode << 4)  |  ARMG_CC_OP_* */
+      cc_dep1   = args[1];
+      cc_dep2   = args[2];
+      cc_ndep   = args[3];
+
+      /*---------------- SUB ----------------*/
+
+      if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_SUB)) {
+         /* EQ after SUB --> test argL == argR */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
+      }
+      if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_SUB)) {
+         /* NE after SUB --> test argL != argR */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpNE32, cc_dep1, cc_dep2));
+      }
+
+      if (isU32(cond_n_op, (ARMCondGT << 4) | ARMG_CC_OP_SUB)) {
+         /* GT after SUB --> test argL >s argR
+                         --> test argR <s argL */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32S, cc_dep2, cc_dep1));
+      }
+      if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_SUB)) {
+         /* LE after SUB --> test argL <=s argR */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
+      }
+
+      if (isU32(cond_n_op, (ARMCondLT << 4) | ARMG_CC_OP_SUB)) {
+         /* LT after SUB --> test argL <s argR */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
+      }
+
+      if (isU32(cond_n_op, (ARMCondGE << 4) | ARMG_CC_OP_SUB)) {
+         /* GE after SUB --> test argL >=s argR
+                         --> test argR <=s argL */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLE32S, cc_dep2, cc_dep1));
+      }
+
+      if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SUB)) {
+         /* HS after SUB --> test argL >=u argR
+                         --> test argR <=u argL */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
+      }
+      if (isU32(cond_n_op, (ARMCondLO << 4) | ARMG_CC_OP_SUB)) {
+         /* LO after SUB --> test argL <u argR */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32U, cc_dep1, cc_dep2));
+      }
+
+      if (isU32(cond_n_op, (ARMCondLS << 4) | ARMG_CC_OP_SUB)) {
+         /* LS after SUB --> test argL <=u argR */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
+      }
+      if (isU32(cond_n_op, (ARMCondHI << 4) | ARMG_CC_OP_SUB)) {
+         /* HI after SUB --> test argL >u argR
+                         --> test argR <u argL */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32U, cc_dep2, cc_dep1));
+      }
+
+      /*---------------- SBB ----------------*/
+
+      if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SBB)) {
+         /* This seems to happen a lot in softfloat code, eg __divdf3+140 */
+         /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
+         /* HS after SBB (same as C after SBB below)
+            --> oldC ? (argL >=u argR) : (argL >u argR)
+            --> oldC ? (argR <=u argL) : (argR <u argL)
+         */
+         return
+            IRExpr_ITE(
+               binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
+               /* case oldC != 0 */
+               unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
+               /* case oldC == 0 */
+               unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
+            );
+      }
+
+      /*---------------- LOGIC ----------------*/
+
+      if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
+         /* EQ after LOGIC --> test res == 0 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
+      }
+      if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
+         /* NE after LOGIC --> test res != 0 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
+      }
+
+      if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_LOGIC)) {
+         /* PL after LOGIC --> test (res >> 31) == 0 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32,
+                           binop(Iop_Shr32, cc_dep1, mkU8(31)),
+                           mkU32(0)));
+      }
+      if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_LOGIC)) {
+         /* MI after LOGIC --> test (res >> 31) == 1 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32,
+                           binop(Iop_Shr32, cc_dep1, mkU8(31)),
+                           mkU32(1)));
+      }
+
+      /*---------------- COPY ----------------*/
+
+      /* --- 0,1 --- */
+      if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_COPY)) {
+         /* EQ after COPY --> (cc_dep1 >> ARMG_CC_SHIFT_Z) & 1 */
+         return binop(Iop_And32,
+                      binop(Iop_Shr32, cc_dep1,
+                            mkU8(ARMG_CC_SHIFT_Z)),
+                      mkU32(1));
+      }
+      if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_COPY)) {
+         /* NE after COPY --> ((cc_dep1 >> ARMG_CC_SHIFT_Z) ^ 1) & 1 */
+         return binop(Iop_And32,
+                      binop(Iop_Xor32,
+                            binop(Iop_Shr32, cc_dep1,
+                                             mkU8(ARMG_CC_SHIFT_Z)),
+                            mkU32(1)),
+                      mkU32(1));
+      }
+
+      /* --- 4,5 --- */
+      if (isU32(cond_n_op, (ARMCondMI << 4) | ARMG_CC_OP_COPY)) {
+         /* MI after COPY --> (cc_dep1 >> ARMG_CC_SHIFT_N) & 1 */
+         return binop(Iop_And32,
+                      binop(Iop_Shr32, cc_dep1,
+                            mkU8(ARMG_CC_SHIFT_N)),
+                      mkU32(1));
+      }
+      if (isU32(cond_n_op, (ARMCondPL << 4) | ARMG_CC_OP_COPY)) {
+         /* PL after COPY --> ((cc_dep1 >> ARMG_CC_SHIFT_N) ^ 1) & 1 */
+         return binop(Iop_And32,
+                      binop(Iop_Xor32,
+                            binop(Iop_Shr32, cc_dep1,
+                                             mkU8(ARMG_CC_SHIFT_N)),
+                            mkU32(1)),
+                      mkU32(1));
+      }
+
+      /* --- 12,13 --- */
+      if (isU32(cond_n_op, (ARMCondGT << 4) | ARMG_CC_OP_COPY)) {
+         /* GT after COPY --> ((z | (n^v)) & 1) ^ 1 */
+         IRExpr* n = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_N));
+         IRExpr* v = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_V));
+         IRExpr* z = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_Z));
+         return binop(Iop_Xor32,
+                      binop(Iop_And32, 
+                            binop(Iop_Or32, z, binop(Iop_Xor32, n, v)),
+                            mkU32(1)),
+                      mkU32(1));
+      }
+      if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_COPY)) {
+         /* LE after COPY --> ((z | (n^v)) & 1) ^ 0 */
+         IRExpr* n = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_N));
+         IRExpr* v = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_V));
+         IRExpr* z = binop(Iop_Shr32, cc_dep1, mkU8(ARMG_CC_SHIFT_Z));
+         return binop(Iop_Xor32,
+                      binop(Iop_And32, 
+                            binop(Iop_Or32, z, binop(Iop_Xor32, n, v)),
+                            mkU32(1)),
+                      mkU32(0));
+      }
+
+      /*----------------- AL -----------------*/
+
+      /* A critically important case for Thumb code.
+
+         What we're trying to spot is the case where cond_n_op is an
+         expression of the form Or32(..., 0xE0) since that means the
+         caller is asking for CondAL and we can simply return 1
+         without caring what the ... part is.  This is a potentially
+         dodgy kludge in that it assumes that the ... part has zeroes
+         in bits 7:4, so that the result of the Or32 is guaranteed to
+         be 0xE in bits 7:4.  Given that the places where this first
+         arg are constructed (in guest_arm_toIR.c) are very
+         constrained, we can get away with this.  To make this
+         guaranteed safe would require to have a new primop, Slice44
+         or some such, thusly
+
+         Slice44(arg1, arg2) = 0--(24)--0 arg1[7:4] arg2[3:0]
+
+         and we would then look for Slice44(0xE0, ...)
+         which would give the required safety property.
+
+         It would be infeasibly expensive to scan backwards through
+         the entire block looking for an assignment to the temp, so
+         just look at the previous 16 statements.  That should find it
+         if it is an interesting case, as a result of how the
+         boilerplate guff at the start of each Thumb insn translation
+         is made.
+      */
+      if (cond_n_op->tag == Iex_RdTmp) {
+         Int    j;
+         IRTemp look_for = cond_n_op->Iex.RdTmp.tmp;
+         Int    limit    = n_precedingStmts - 16;
+         if (limit < 0) limit = 0;
+         if (0) vex_printf("scanning %d .. %d\n", n_precedingStmts-1, limit);
+         for (j = n_precedingStmts - 1; j >= limit; j--) {
+            IRStmt* st = precedingStmts[j];
+            if (st->tag == Ist_WrTmp
+                && st->Ist.WrTmp.tmp == look_for
+                && st->Ist.WrTmp.data->tag == Iex_Binop
+                && st->Ist.WrTmp.data->Iex.Binop.op == Iop_Or32
+                && isU32(st->Ist.WrTmp.data->Iex.Binop.arg2, (ARMCondAL << 4)))
+               return mkU32(1);
+         }
+         /* Didn't find any useful binding to the first arg
+            in the previous 16 stmts. */
+      }
+   }
+
+   /* --------- specialising "armg_calculate_flag_c" --------- */
+
+   else
+   if (vex_streq(function_name, "armg_calculate_flag_c")) {
+
+      /* specialise calls to the "armg_calculate_flag_c" function.
+         Note that the returned value must be either 0 or 1; nonzero
+         bits 31:1 are not allowed.  In turn, incoming oldV and oldC
+         values (from the thunk) are assumed to have bits 31:1
+         clear. */
+      IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
+      vassert(arity == 4);
+      cc_op   = args[0]; /* ARMG_CC_OP_* */
+      cc_dep1 = args[1];
+      cc_dep2 = args[2];
+      cc_ndep = args[3];
+
+      if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
+         /* Thunk args are (result, shco, oldV) */
+         /* C after LOGIC --> shco */
+         return cc_dep2;
+      }
+
+      if (isU32(cc_op, ARMG_CC_OP_SUB)) {
+         /* Thunk args are (argL, argR, unused) */
+         /* C after SUB --> argL >=u argR
+                        --> argR <=u argL */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
+      }
+
+      if (isU32(cc_op, ARMG_CC_OP_SBB)) {
+         /* This happens occasionally in softfloat code, eg __divdf3+140 */
+         /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
+         /* C after SBB (same as HS after SBB above)
+            --> oldC ? (argL >=u argR) : (argL >u argR)
+            --> oldC ? (argR <=u argL) : (argR <u argL)
+         */
+         return
+            IRExpr_ITE(
+               binop(Iop_CmpNE32, cc_ndep, mkU32(0)),
+               /* case oldC != 0 */
+               unop(Iop_1Uto32, binop(Iop_CmpLE32U, cc_dep2, cc_dep1)),
+               /* case oldC == 0 */
+               unop(Iop_1Uto32, binop(Iop_CmpLT32U, cc_dep2, cc_dep1))
+            );
+      }
+
+   }
+
+   /* --------- specialising "armg_calculate_flag_v" --------- */
+
+   else
+   if (vex_streq(function_name, "armg_calculate_flag_v")) {
+
+      /* specialise calls to the "armg_calculate_flag_v" function.
+         Note that the returned value must be either 0 or 1; nonzero
+         bits 31:1 are not allowed.  In turn, incoming oldV and oldC
+         values (from the thunk) are assumed to have bits 31:1
+         clear. */
+      IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
+      vassert(arity == 4);
+      cc_op   = args[0]; /* ARMG_CC_OP_* */
+      cc_dep1 = args[1];
+      cc_dep2 = args[2];
+      cc_ndep = args[3];
+
+      if (isU32(cc_op, ARMG_CC_OP_LOGIC)) {
+         /* Thunk args are (result, shco, oldV) */
+         /* V after LOGIC --> oldV */
+         return cc_ndep;
+      }
+
+      if (isU32(cc_op, ARMG_CC_OP_SUB)) {
+         /* Thunk args are (argL, argR, unused) */
+         /* V after SUB 
+            --> let res = argL - argR
+                in ((argL ^ argR) & (argL ^ res)) >> 31
+            --> ((argL ^ argR) & (argL ^ (argL - argR))) >> 31
+         */
+         IRExpr* argL = cc_dep1;
+         IRExpr* argR = cc_dep2;
+         return
+            binop(Iop_Shr32,
+                  binop(Iop_And32,
+                        binop(Iop_Xor32, argL, argR),
+                        binop(Iop_Xor32, argL, binop(Iop_Sub32, argL, argR))
+                  ),
+                  mkU8(31)
+            );
+      }
+
+      if (isU32(cc_op, ARMG_CC_OP_SBB)) {
+         /* This happens occasionally in softfloat code, eg __divdf3+140 */
+         /* thunk is: (dep1=argL, dep2=argR, ndep=oldC) */
+         /* V after SBB
+            --> let res = argL - argR - (oldC ^ 1)
+                in  (argL ^ argR) & (argL ^ res) & 1
+         */
+         return
+            binop(
+               Iop_And32,
+               binop(
+                  Iop_And32,
+                  // argL ^ argR
+                  binop(Iop_Xor32, cc_dep1, cc_dep2),
+                  // argL ^ (argL - argR - (oldC ^ 1))
+                  binop(Iop_Xor32,
+                        cc_dep1,
+                        binop(Iop_Sub32,
+                              binop(Iop_Sub32, cc_dep1, cc_dep2),
+                              binop(Iop_Xor32, cc_ndep, mkU32(1)))
+                  )
+               ),
+               mkU32(1)
+            );
+      }
+
+   }
+
+#  undef unop
+#  undef binop
+#  undef mkU32
+#  undef mkU8
+
+   return NULL;
+}
+
+
+/*----------------------------------------------*/
+/*--- The exported fns ..                    ---*/
+/*----------------------------------------------*/
+
+/* VISIBLE TO LIBVEX CLIENT */
+#if 0
+void LibVEX_GuestARM_put_flags ( UInt flags_native,
+                                 /*OUT*/VexGuestARMState* vex_state )
+{
+   vassert(0); // FIXME
+
+   /* Mask out everything except N Z V C. */
+   flags_native
+      &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
+   
+   vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = flags_native;
+   vex_state->guest_CC_DEP2 = 0;
+   vex_state->guest_CC_NDEP = 0;
+}
+#endif
+
+/* VISIBLE TO LIBVEX CLIENT */
+UInt LibVEX_GuestARM_get_cpsr ( /*IN*/const VexGuestARMState* vex_state )
+{
+   UInt cpsr = 0;
+   // NZCV
+   cpsr |= armg_calculate_flags_nzcv(
+               vex_state->guest_CC_OP,
+               vex_state->guest_CC_DEP1,
+               vex_state->guest_CC_DEP2,
+               vex_state->guest_CC_NDEP
+            );
+   vassert(0 == (cpsr & 0x0FFFFFFF));
+   // Q
+   if (vex_state->guest_QFLAG32 > 0)
+      cpsr |= (1 << 27);
+   // GE
+   if (vex_state->guest_GEFLAG0 > 0)
+      cpsr |= (1 << 16);
+   if (vex_state->guest_GEFLAG1 > 0)
+      cpsr |= (1 << 17);
+   if (vex_state->guest_GEFLAG2 > 0)
+      cpsr |= (1 << 18);
+   if (vex_state->guest_GEFLAG3 > 0)
+      cpsr |= (1 << 19);
+   // M
+   cpsr |= (1 << 4); // 0b10000 means user-mode
+   // J,T   J (bit 24) is zero by initialisation above
+   // T  we copy from R15T[0]
+   if (vex_state->guest_R15T & 1)
+      cpsr |= (1 << 5);
+   // ITSTATE we punt on for the time being.  Could compute it
+   // if needed though.
+   // E, endianness, 0 (littleendian) from initialisation above
+   // A,I,F disable some async exceptions.  Not sure about these.
+   // Leave as zero for the time being.
+   return cpsr;
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
+{
+   vex_state->host_EvC_FAILADDR = 0;
+   vex_state->host_EvC_COUNTER = 0;
+
+   vex_state->guest_R0  = 0;
+   vex_state->guest_R1  = 0;
+   vex_state->guest_R2  = 0;
+   vex_state->guest_R3  = 0;
+   vex_state->guest_R4  = 0;
+   vex_state->guest_R5  = 0;
+   vex_state->guest_R6  = 0;
+   vex_state->guest_R7  = 0;
+   vex_state->guest_R8  = 0;
+   vex_state->guest_R9  = 0;
+   vex_state->guest_R10 = 0;
+   vex_state->guest_R11 = 0;
+   vex_state->guest_R12 = 0;
+   vex_state->guest_R13 = 0;
+   vex_state->guest_R14 = 0;
+   vex_state->guest_R15T = 0;  /* NB: implies ARM mode */
+
+   vex_state->guest_CC_OP   = ARMG_CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = 0;
+   vex_state->guest_CC_DEP2 = 0;
+   vex_state->guest_CC_NDEP = 0;
+   vex_state->guest_QFLAG32 = 0;
+   vex_state->guest_GEFLAG0 = 0;
+   vex_state->guest_GEFLAG1 = 0;
+   vex_state->guest_GEFLAG2 = 0;
+   vex_state->guest_GEFLAG3 = 0;
+
+   vex_state->guest_EMNOTE  = EmNote_NONE;
+   vex_state->guest_CMSTART = 0;
+   vex_state->guest_CMLEN   = 0;
+   vex_state->guest_NRADDR  = 0;
+   vex_state->guest_IP_AT_SYSCALL = 0;
+
+   vex_state->guest_D0  = 0;
+   vex_state->guest_D1  = 0;
+   vex_state->guest_D2  = 0;
+   vex_state->guest_D3  = 0;
+   vex_state->guest_D4  = 0;
+   vex_state->guest_D5  = 0;
+   vex_state->guest_D6  = 0;
+   vex_state->guest_D7  = 0;
+   vex_state->guest_D8  = 0;
+   vex_state->guest_D9  = 0;
+   vex_state->guest_D10 = 0;
+   vex_state->guest_D11 = 0;
+   vex_state->guest_D12 = 0;
+   vex_state->guest_D13 = 0;
+   vex_state->guest_D14 = 0;
+   vex_state->guest_D15 = 0;
+   vex_state->guest_D16 = 0;
+   vex_state->guest_D17 = 0;
+   vex_state->guest_D18 = 0;
+   vex_state->guest_D19 = 0;
+   vex_state->guest_D20 = 0;
+   vex_state->guest_D21 = 0;
+   vex_state->guest_D22 = 0;
+   vex_state->guest_D23 = 0;
+   vex_state->guest_D24 = 0;
+   vex_state->guest_D25 = 0;
+   vex_state->guest_D26 = 0;
+   vex_state->guest_D27 = 0;
+   vex_state->guest_D28 = 0;
+   vex_state->guest_D29 = 0;
+   vex_state->guest_D30 = 0;
+   vex_state->guest_D31 = 0;
+
+   /* ARM encoded; zero is the default as it happens (result flags
+      (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
+      all exns masked, all exn sticky bits cleared). */
+   vex_state->guest_FPSCR = 0;
+
+   vex_state->guest_TPIDRURO = 0;
+
+   /* Not in a Thumb IT block. */
+   vex_state->guest_ITSTATE = 0;
+
+   vex_state->padding1 = 0;
+}
+
+
+/*-----------------------------------------------------------*/
+/*--- Describing the arm guest state, for the benefit     ---*/
+/*--- of iropt and instrumenters.                         ---*/
+/*-----------------------------------------------------------*/
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this generates significantly slower code).  
+
+   We enforce precise exns for guest R13(sp), R15T(pc), R7, R11.
+
+
+   Only R13(sp) is needed in mode VexRegUpdSpAtMemAccess.   
+*/
+Bool guest_arm_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   Int sp_min = offsetof(VexGuestARMState, guest_R13);
+   Int sp_max = sp_min + 4 - 1;
+   Int pc_min = offsetof(VexGuestARMState, guest_R15T);
+   Int pc_max = pc_min + 4 - 1;
+
+   if (maxoff < sp_min || minoff > sp_max) {
+      /* no overlap with sp */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False; // We only need to check stack pointer.
+   } else {
+      return True;
+   }
+
+   if (maxoff < pc_min || minoff > pc_max) {
+      /* no overlap with pc */
+   } else {
+      return True;
+   }
+
+   /* We appear to need precise updates of R11 in order to get proper
+      stacktraces from non-optimised code. */
+   Int r11_min = offsetof(VexGuestARMState, guest_R11);
+   Int r11_max = r11_min + 4 - 1;
+
+   if (maxoff < r11_min || minoff > r11_max) {
+      /* no overlap with r11 */
+   } else {
+      return True;
+   }
+
+   /* Ditto R7, particularly needed for proper stacktraces in Thumb
+      code. */
+   Int r7_min = offsetof(VexGuestARMState, guest_R7);
+   Int r7_max = r7_min + 4 - 1;
+
+   if (maxoff < r7_min || minoff > r7_max) {
+      /* no overlap with r7 */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+
+
+#define ALWAYSDEFD(field)                           \
+    { offsetof(VexGuestARMState, field),            \
+      (sizeof ((VexGuestARMState*)0)->field) }
+
+VexGuestLayout
+   armGuest_layout 
+      = { 
+          /* Total size of the guest state, in bytes. */
+          .total_sizeB = sizeof(VexGuestARMState),
+
+          /* Describe the stack pointer. */
+          .offset_SP = offsetof(VexGuestARMState,guest_R13),
+          .sizeof_SP = 4,
+
+          /* Describe the instruction pointer. */
+          .offset_IP = offsetof(VexGuestARMState,guest_R15T),
+          .sizeof_IP = 4,
+
+          /* Describe any sections to be regarded by Memcheck as
+             'always-defined'. */
+          .n_alwaysDefd = 10,
+
+          /* flags thunk: OP is always defd, whereas DEP1 and DEP2
+             have to be tracked.  See detailed comment in gdefs.h on
+             meaning of thunk fields. */
+          .alwaysDefd
+             = { /* 0 */ ALWAYSDEFD(guest_R15T),
+                 /* 1 */ ALWAYSDEFD(guest_CC_OP),
+                 /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
+                 /* 3 */ ALWAYSDEFD(guest_EMNOTE),
+                 /* 4 */ ALWAYSDEFD(guest_CMSTART),
+                 /* 5 */ ALWAYSDEFD(guest_CMLEN),
+                 /* 6 */ ALWAYSDEFD(guest_NRADDR),
+                 /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
+                 /* 8 */ ALWAYSDEFD(guest_TPIDRURO),
+                 /* 9 */ ALWAYSDEFD(guest_ITSTATE)
+               }
+        };
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                 guest_arm_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_arm_toIR.c b/VEX/priv/guest_arm_toIR.c
new file mode 100644
index 0000000..7cedb7d
--- /dev/null
+++ b/VEX/priv/guest_arm_toIR.c
@@ -0,0 +1,22343 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                       guest_arm_toIR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   NEON support is
+   Copyright (C) 2010-2013 Samsung Electronics
+   contributed by Dmitry Zhurikhin <zhur@ispras.ru>
+              and Kirill Batuzov <batuzovk@ispras.ru>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* XXXX thumb to check:
+   that all cases where putIRegT writes r15, we generate a jump.
+
+   All uses of newTemp assign to an IRTemp and not a UInt
+
+   For all thumb loads and stores, including VFP ones, new-ITSTATE is
+   backed out before the memory op, and restored afterwards.  This
+   needs to happen even after we go uncond.  (and for sure it doesn't
+   happen for VFP loads/stores right now).
+
+   VFP on thumb: check that we exclude all r13/r15 cases that we
+   should.
+
+   XXXX thumb to do: improve the ITSTATE-zeroing optimisation by
+   taking into account the number of insns guarded by an IT.
+
+   remove the nasty hack, in the spechelper, of looking for Or32(...,
+   0xE0) in as the first arg to armg_calculate_condition, and instead
+   use Slice44 as specified in comments in the spechelper.
+
+   add specialisations for armg_calculate_flag_c and _v, as they
+   are moderately often needed in Thumb code.
+
+   Correctness: ITSTATE handling in Thumb SVCs is wrong.
+
+   Correctness (obscure): in m_transtab, when invalidating code
+   address ranges, invalidate up to 18 bytes after the end of the
+   range.  This is because the ITSTATE optimisation at the top of
+   _THUMB_WRK below analyses up to 18 bytes before the start of any
+   given instruction, and so might depend on the invalidated area.
+*/
+
+/* Limitations, etc
+
+   - pretty dodgy exception semantics for {LD,ST}Mxx and {LD,ST}RD.
+     These instructions are non-restartable in the case where the
+     transfer(s) fault.
+
+   - SWP: the restart jump back is Ijk_Boring; it should be
+     Ijk_NoRedir but that's expensive.  See comments on casLE() in
+     guest_x86_toIR.c.
+*/
+
+/* "Special" instructions.
+
+   This instruction decoder can decode four special instructions
+   which mean nothing natively (are no-ops as far as regs/mem are
+   concerned) but have meaning for supporting Valgrind.  A special
+   instruction is flagged by a 16-byte preamble:
+
+      E1A0C1EC E1A0C6EC E1A0CEEC E1A0C9EC
+      (mov r12, r12, ROR #3;   mov r12, r12, ROR #13;
+       mov r12, r12, ROR #29;  mov r12, r12, ROR #19)
+
+   Following that, one of the following 3 are allowed
+   (standard interpretation in parentheses):
+
+      E18AA00A (orr r10,r10,r10)   R3 = client_request ( R4 )
+      E18BB00B (orr r11,r11,r11)   R3 = guest_NRADDR
+      E18CC00C (orr r12,r12,r12)   branch-and-link-to-noredir R4
+      E1899009 (orr r9,r9,r9)      IR injection
+
+   Any other bytes following the 16-byte preamble are illegal and
+   constitute a failure in instruction decoding.  This all assumes
+   that the preamble will never occur except in specific code
+   fragments designed for Valgrind to catch.
+*/
+
+/* Translates ARM(v5) code to IR. */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_guest_arm.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_arm_defs.h"
+
+
+/*------------------------------------------------------------*/
+/*--- Globals                                              ---*/
+/*------------------------------------------------------------*/
+
+/* These are set at the start of the translation of a instruction, so
+   that we don't have to pass them around endlessly.  CONST means does
+   not change during translation of the instruction.
+*/
+
+/* CONST: what is the host's endianness?  This has to do with float vs
+   double register accesses on VFP, but it's complex and not properly
+   thought out. */
+static VexEndness host_endness;
+
+/* CONST: The guest address for the instruction currently being
+   translated.  This is the real, "decoded" address (not subject
+   to the CPSR.T kludge). */
+static Addr32 guest_R15_curr_instr_notENC;
+
+/* CONST, FOR ASSERTIONS ONLY.  Indicates whether currently processed
+   insn is Thumb (True) or ARM (False). */
+static Bool __curr_is_Thumb;
+
+/* MOD: The IRSB* into which we're generating code. */
+static IRSB* irsb;
+
+/* These are to do with handling writes to r15.  They are initially
+   set at the start of disInstr_ARM_WRK to indicate no update,
+   possibly updated during the routine, and examined again at the end.
+   If they have been set to indicate a r15 update then a jump is
+   generated.  Note, "explicit" jumps (b, bx, etc) are generated
+   directly, not using this mechanism -- this is intended to handle
+   the implicit-style jumps resulting from (eg) assigning to r15 as
+   the result of insns we wouldn't normally consider branchy. */
+
+/* MOD.  Initially False; set to True iff abovementioned handling is
+   required. */
+static Bool r15written;
+
+/* MOD.  Initially IRTemp_INVALID.  If the r15 branch to be generated
+   is conditional, this holds the gating IRTemp :: Ity_I32.  If the
+   branch to be generated is unconditional, this remains
+   IRTemp_INVALID. */
+static IRTemp r15guard; /* :: Ity_I32, 0 or 1 */
+
+/* MOD.  Initially Ijk_Boring.  If an r15 branch is to be generated,
+   this holds the jump kind. */
+static IRTemp r15kind;
+
+
+/*------------------------------------------------------------*/
+/*--- Debugging output                                     ---*/
+/*------------------------------------------------------------*/
+
+#define DIP(format, args...)           \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_printf(format, ## args)
+
+#define DIS(buf, format, args...)      \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_sprintf(buf, format, ## args)
+
+#define ASSERT_IS_THUMB \
+   do { vassert(__curr_is_Thumb); } while (0)
+
+#define ASSERT_IS_ARM \
+   do { vassert(! __curr_is_Thumb); } while (0)
+
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for deconstructing the        ---*/
+/*--- arm insn stream.                                     ---*/
+/*------------------------------------------------------------*/
+
+/* Do a little-endian load of a 32-bit word, regardless of the
+   endianness of the underlying host. */
+static inline UInt getUIntLittleEndianly ( const UChar* p )
+{
+   UInt w = 0;
+   w = (w << 8) | p[3];
+   w = (w << 8) | p[2];
+   w = (w << 8) | p[1];
+   w = (w << 8) | p[0];
+   return w;
+}
+
+/* Do a little-endian load of a 16-bit word, regardless of the
+   endianness of the underlying host. */
+static inline UShort getUShortLittleEndianly ( const UChar* p )
+{
+   UShort w = 0;
+   w = (w << 8) | p[1];
+   w = (w << 8) | p[0];
+   return w;
+}
+
+static UInt ROR32 ( UInt x, UInt sh ) {
+   vassert(sh >= 0 && sh < 32);
+   if (sh == 0)
+      return x;
+   else
+      return (x << (32-sh)) | (x >> sh);
+}
+
+static Int popcount32 ( UInt x )
+{
+   Int res = 0, i;
+   for (i = 0; i < 32; i++) {
+      res += (x & 1);
+      x >>= 1;
+   }
+   return res;
+}
+
+static UInt setbit32 ( UInt x, Int ix, UInt b )
+{
+   UInt mask = 1 << ix;
+   x &= ~mask;
+   x |= ((b << ix) & mask);
+   return x;
+}
+
+#define BITS2(_b1,_b0) \
+   (((_b1) << 1) | (_b0))
+
+#define BITS3(_b2,_b1,_b0)                      \
+  (((_b2) << 2) | ((_b1) << 1) | (_b0))
+
+#define BITS4(_b3,_b2,_b1,_b0) \
+   (((_b3) << 3) | ((_b2) << 2) | ((_b1) << 1) | (_b0))
+
+#define BITS8(_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   ((BITS4((_b7),(_b6),(_b5),(_b4)) << 4) \
+    | BITS4((_b3),(_b2),(_b1),(_b0)))
+
+#define BITS5(_b4,_b3,_b2,_b1,_b0)  \
+   (BITS8(0,0,0,(_b4),(_b3),(_b2),(_b1),(_b0)))
+#define BITS6(_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (BITS8(0,0,(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+#define BITS7(_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (BITS8(0,(_b6),(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+
+#define BITS9(_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)      \
+   (((_b8) << 8) \
+    | BITS8((_b7),(_b6),(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+
+#define BITS10(_b9,_b8,_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   (((_b9) << 9) | ((_b8) << 8)                                \
+    | BITS8((_b7),(_b6),(_b5),(_b4),(_b3),(_b2),(_b1),(_b0)))
+
+/* produces _uint[_bMax:_bMin] */
+#define SLICE_UInt(_uint,_bMax,_bMin) \
+   (( ((UInt)(_uint)) >> (_bMin)) \
+    & (UInt)((1ULL << ((_bMax) - (_bMin) + 1)) - 1ULL))
+
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for creating IR fragments.    ---*/
+/*------------------------------------------------------------*/
+
+static IRExpr* mkU64 ( ULong i )
+{
+   return IRExpr_Const(IRConst_U64(i));
+}
+
+static IRExpr* mkU32 ( UInt i )
+{
+   return IRExpr_Const(IRConst_U32(i));
+}
+
+static IRExpr* mkU8 ( UInt i )
+{
+   vassert(i < 256);
+   return IRExpr_Const(IRConst_U8( (UChar)i ));
+}
+
+static IRExpr* mkexpr ( IRTemp tmp )
+{
+   return IRExpr_RdTmp(tmp);
+}
+
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* triop ( IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3 )
+{
+   return IRExpr_Triop(op, a1, a2, a3);
+}
+
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
+{
+   return IRExpr_Load(Iend_LE, ty, addr);
+}
+
+/* Add a statement to the list held by "irbb". */
+static void stmt ( IRStmt* st )
+{
+   addStmtToIRSB( irsb, st );
+}
+
+static void assign ( IRTemp dst, IRExpr* e )
+{
+   stmt( IRStmt_WrTmp(dst, e) );
+}
+
+static void storeLE ( IRExpr* addr, IRExpr* data )
+{
+   stmt( IRStmt_Store(Iend_LE, addr, data) );
+}
+
+static void storeGuardedLE ( IRExpr* addr, IRExpr* data, IRTemp guardT )
+{
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional */
+      storeLE(addr, data);
+   } else {
+      stmt( IRStmt_StoreG(Iend_LE, addr, data,
+                          binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0))) );
+   }
+}
+
+static void loadGuardedLE ( IRTemp dst, IRLoadGOp cvt,
+                            IRExpr* addr, IRExpr* alt, 
+                            IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+{
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional */
+      IRExpr* loaded = NULL;
+      switch (cvt) {
+         case ILGop_Ident32:
+            loaded = loadLE(Ity_I32, addr); break;
+         case ILGop_8Uto32:
+            loaded = unop(Iop_8Uto32, loadLE(Ity_I8, addr)); break;
+         case ILGop_8Sto32:
+            loaded = unop(Iop_8Sto32, loadLE(Ity_I8, addr)); break;
+         case ILGop_16Uto32:
+            loaded = unop(Iop_16Uto32, loadLE(Ity_I16, addr)); break;
+         case ILGop_16Sto32:
+            loaded = unop(Iop_16Sto32, loadLE(Ity_I16, addr)); break;
+         default:
+            vassert(0);
+      }
+      vassert(loaded != NULL);
+      assign(dst, loaded);
+   } else {
+      /* Generate a guarded load into 'dst', but apply 'cvt' to the
+         loaded data before putting the data in 'dst'.  If the load
+         does not take place, 'alt' is placed directly in 'dst'. */
+      stmt( IRStmt_LoadG(Iend_LE, cvt, dst, addr, alt,
+                         binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0))) );
+   }
+}
+
+/* Generate a new temporary of the given type. */
+static IRTemp newTemp ( IRType ty )
+{
+   vassert(isPlausibleIRType(ty));
+   return newIRTemp( irsb->tyenv, ty );
+}
+
+/* Produces a value in 0 .. 3, which is encoded as per the type
+   IRRoundingMode. */
+static IRExpr* /* :: Ity_I32 */ get_FAKE_roundingmode ( void )
+{
+   return mkU32(Irrm_NEAREST);
+}
+
+/* Generate an expression for SRC rotated right by ROT. */
+static IRExpr* genROR32( IRTemp src, Int rot )
+{
+   vassert(rot >= 0 && rot < 32);
+   if (rot == 0)
+      return mkexpr(src);
+   return
+      binop(Iop_Or32,
+            binop(Iop_Shl32, mkexpr(src), mkU8(32 - rot)),
+            binop(Iop_Shr32, mkexpr(src), mkU8(rot)));
+}
+
+static IRExpr* mkU128 ( ULong i )
+{
+   return binop(Iop_64HLtoV128, mkU64(i), mkU64(i));
+}
+
+/* Generate a 4-aligned version of the given expression if
+   the given condition is true.  Else return it unchanged. */
+static IRExpr* align4if ( IRExpr* e, Bool b )
+{
+   if (b)
+      return binop(Iop_And32, e, mkU32(~3));
+   else
+      return e;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for accessing guest registers.               ---*/
+/*------------------------------------------------------------*/
+
+#define OFFB_R0       offsetof(VexGuestARMState,guest_R0)
+#define OFFB_R1       offsetof(VexGuestARMState,guest_R1)
+#define OFFB_R2       offsetof(VexGuestARMState,guest_R2)
+#define OFFB_R3       offsetof(VexGuestARMState,guest_R3)
+#define OFFB_R4       offsetof(VexGuestARMState,guest_R4)
+#define OFFB_R5       offsetof(VexGuestARMState,guest_R5)
+#define OFFB_R6       offsetof(VexGuestARMState,guest_R6)
+#define OFFB_R7       offsetof(VexGuestARMState,guest_R7)
+#define OFFB_R8       offsetof(VexGuestARMState,guest_R8)
+#define OFFB_R9       offsetof(VexGuestARMState,guest_R9)
+#define OFFB_R10      offsetof(VexGuestARMState,guest_R10)
+#define OFFB_R11      offsetof(VexGuestARMState,guest_R11)
+#define OFFB_R12      offsetof(VexGuestARMState,guest_R12)
+#define OFFB_R13      offsetof(VexGuestARMState,guest_R13)
+#define OFFB_R14      offsetof(VexGuestARMState,guest_R14)
+#define OFFB_R15T     offsetof(VexGuestARMState,guest_R15T)
+
+#define OFFB_CC_OP    offsetof(VexGuestARMState,guest_CC_OP)
+#define OFFB_CC_DEP1  offsetof(VexGuestARMState,guest_CC_DEP1)
+#define OFFB_CC_DEP2  offsetof(VexGuestARMState,guest_CC_DEP2)
+#define OFFB_CC_NDEP  offsetof(VexGuestARMState,guest_CC_NDEP)
+#define OFFB_NRADDR   offsetof(VexGuestARMState,guest_NRADDR)
+
+#define OFFB_D0       offsetof(VexGuestARMState,guest_D0)
+#define OFFB_D1       offsetof(VexGuestARMState,guest_D1)
+#define OFFB_D2       offsetof(VexGuestARMState,guest_D2)
+#define OFFB_D3       offsetof(VexGuestARMState,guest_D3)
+#define OFFB_D4       offsetof(VexGuestARMState,guest_D4)
+#define OFFB_D5       offsetof(VexGuestARMState,guest_D5)
+#define OFFB_D6       offsetof(VexGuestARMState,guest_D6)
+#define OFFB_D7       offsetof(VexGuestARMState,guest_D7)
+#define OFFB_D8       offsetof(VexGuestARMState,guest_D8)
+#define OFFB_D9       offsetof(VexGuestARMState,guest_D9)
+#define OFFB_D10      offsetof(VexGuestARMState,guest_D10)
+#define OFFB_D11      offsetof(VexGuestARMState,guest_D11)
+#define OFFB_D12      offsetof(VexGuestARMState,guest_D12)
+#define OFFB_D13      offsetof(VexGuestARMState,guest_D13)
+#define OFFB_D14      offsetof(VexGuestARMState,guest_D14)
+#define OFFB_D15      offsetof(VexGuestARMState,guest_D15)
+#define OFFB_D16      offsetof(VexGuestARMState,guest_D16)
+#define OFFB_D17      offsetof(VexGuestARMState,guest_D17)
+#define OFFB_D18      offsetof(VexGuestARMState,guest_D18)
+#define OFFB_D19      offsetof(VexGuestARMState,guest_D19)
+#define OFFB_D20      offsetof(VexGuestARMState,guest_D20)
+#define OFFB_D21      offsetof(VexGuestARMState,guest_D21)
+#define OFFB_D22      offsetof(VexGuestARMState,guest_D22)
+#define OFFB_D23      offsetof(VexGuestARMState,guest_D23)
+#define OFFB_D24      offsetof(VexGuestARMState,guest_D24)
+#define OFFB_D25      offsetof(VexGuestARMState,guest_D25)
+#define OFFB_D26      offsetof(VexGuestARMState,guest_D26)
+#define OFFB_D27      offsetof(VexGuestARMState,guest_D27)
+#define OFFB_D28      offsetof(VexGuestARMState,guest_D28)
+#define OFFB_D29      offsetof(VexGuestARMState,guest_D29)
+#define OFFB_D30      offsetof(VexGuestARMState,guest_D30)
+#define OFFB_D31      offsetof(VexGuestARMState,guest_D31)
+
+#define OFFB_FPSCR    offsetof(VexGuestARMState,guest_FPSCR)
+#define OFFB_TPIDRURO offsetof(VexGuestARMState,guest_TPIDRURO)
+#define OFFB_ITSTATE  offsetof(VexGuestARMState,guest_ITSTATE)
+#define OFFB_QFLAG32  offsetof(VexGuestARMState,guest_QFLAG32)
+#define OFFB_GEFLAG0  offsetof(VexGuestARMState,guest_GEFLAG0)
+#define OFFB_GEFLAG1  offsetof(VexGuestARMState,guest_GEFLAG1)
+#define OFFB_GEFLAG2  offsetof(VexGuestARMState,guest_GEFLAG2)
+#define OFFB_GEFLAG3  offsetof(VexGuestARMState,guest_GEFLAG3)
+
+#define OFFB_CMSTART  offsetof(VexGuestARMState,guest_CMSTART)
+#define OFFB_CMLEN    offsetof(VexGuestARMState,guest_CMLEN)
+
+
+/* ---------------- Integer registers ---------------- */
+
+static Int integerGuestRegOffset ( UInt iregNo )
+{
+   /* Do we care about endianness here?  We do if sub-parts of integer
+      registers are accessed, but I don't think that ever happens on
+      ARM. */
+   switch (iregNo) {
+      case 0:  return OFFB_R0;
+      case 1:  return OFFB_R1;
+      case 2:  return OFFB_R2;
+      case 3:  return OFFB_R3;
+      case 4:  return OFFB_R4;
+      case 5:  return OFFB_R5;
+      case 6:  return OFFB_R6;
+      case 7:  return OFFB_R7;
+      case 8:  return OFFB_R8;
+      case 9:  return OFFB_R9;
+      case 10: return OFFB_R10;
+      case 11: return OFFB_R11;
+      case 12: return OFFB_R12;
+      case 13: return OFFB_R13;
+      case 14: return OFFB_R14;
+      case 15: return OFFB_R15T;
+      default: vassert(0);
+   }
+}
+
+/* Plain ("low level") read from a reg; no +8 offset magic for r15. */
+static IRExpr* llGetIReg ( UInt iregNo )
+{
+   vassert(iregNo < 16);
+   return IRExpr_Get( integerGuestRegOffset(iregNo), Ity_I32 );
+}
+
+/* Architected read from a reg in ARM mode.  This automagically adds 8
+   to all reads of r15. */
+static IRExpr* getIRegA ( UInt iregNo )
+{
+   IRExpr* e;
+   ASSERT_IS_ARM;
+   vassert(iregNo < 16);
+   if (iregNo == 15) {
+      /* If asked for r15, don't read the guest state value, as that
+         may not be up to date in the case where loop unrolling has
+         happened, because the first insn's write to the block is
+         omitted; hence in the 2nd and subsequent unrollings we don't
+         have a correct value in guest r15.  Instead produce the
+         constant that we know would be produced at this point. */
+      vassert(0 == (guest_R15_curr_instr_notENC & 3));
+      e = mkU32(guest_R15_curr_instr_notENC + 8);
+   } else {
+      e = IRExpr_Get( integerGuestRegOffset(iregNo), Ity_I32 );
+   }
+   return e;
+}
+
+/* Architected read from a reg in Thumb mode.  This automagically adds
+   4 to all reads of r15. */
+static IRExpr* getIRegT ( UInt iregNo )
+{
+   IRExpr* e;
+   ASSERT_IS_THUMB;
+   vassert(iregNo < 16);
+   if (iregNo == 15) {
+      /* Ditto comment in getIReg. */
+      vassert(0 == (guest_R15_curr_instr_notENC & 1));
+      e = mkU32(guest_R15_curr_instr_notENC + 4);
+   } else {
+      e = IRExpr_Get( integerGuestRegOffset(iregNo), Ity_I32 );
+   }
+   return e;
+}
+
+/* Plain ("low level") write to a reg; no jump or alignment magic for
+   r15. */
+static void llPutIReg ( UInt iregNo, IRExpr* e )
+{
+   vassert(iregNo < 16);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   stmt( IRStmt_Put(integerGuestRegOffset(iregNo), e) );
+}
+
+/* Architected write to an integer register in ARM mode.  If it is to
+   r15, record info so at the end of this insn's translation, a branch
+   to it can be made.  Also handles conditional writes to the
+   register: if guardT == IRTemp_INVALID then the write is
+   unconditional.  If writing r15, also 4-align it. */
+static void putIRegA ( UInt       iregNo,
+                       IRExpr*    e,
+                       IRTemp     guardT /* :: Ity_I32, 0 or 1 */,
+                       IRJumpKind jk /* if a jump is generated */ )
+{
+   /* if writing r15, force e to be 4-aligned. */
+   // INTERWORKING FIXME.  this needs to be relaxed so that
+   // puts caused by LDMxx which load r15 interwork right.
+   // but is no aligned too relaxed?
+   //if (iregNo == 15)
+   //   e = binop(Iop_And32, e, mkU32(~3));
+   ASSERT_IS_ARM;
+   /* So, generate either an unconditional or a conditional write to
+      the reg. */
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional write */
+      llPutIReg( iregNo, e );
+   } else {
+      llPutIReg( iregNo,
+                 IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+                             e, llGetIReg(iregNo) ));
+   }
+   if (iregNo == 15) {
+      // assert against competing r15 updates.  Shouldn't
+      // happen; should be ruled out by the instr matching
+      // logic.
+      vassert(r15written == False);
+      vassert(r15guard   == IRTemp_INVALID);
+      vassert(r15kind    == Ijk_Boring);
+      r15written = True;
+      r15guard   = guardT;
+      r15kind    = jk;
+   }
+}
+
+
+/* Architected write to an integer register in Thumb mode.  Writes to
+   r15 are not allowed.  Handles conditional writes to the register:
+   if guardT == IRTemp_INVALID then the write is unconditional. */
+static void putIRegT ( UInt       iregNo,
+                       IRExpr*    e,
+                       IRTemp     guardT /* :: Ity_I32, 0 or 1 */ )
+{
+   /* So, generate either an unconditional or a conditional write to
+      the reg. */
+   ASSERT_IS_THUMB;
+   vassert(iregNo >= 0 && iregNo <= 14);
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional write */
+      llPutIReg( iregNo, e );
+   } else {
+      llPutIReg( iregNo,
+                 IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+                             e, llGetIReg(iregNo) ));
+   }
+}
+
+
+/* Thumb16 and Thumb32 only.
+   Returns true if reg is 13 or 15.  Implements the BadReg
+   predicate in the ARM ARM. */
+static Bool isBadRegT ( UInt r )
+{
+   vassert(r <= 15);
+   ASSERT_IS_THUMB;
+   return r == 13 || r == 15;
+}
+
+
+/* ---------------- Double registers ---------------- */
+
+static Int doubleGuestRegOffset ( UInt dregNo )
+{
+   /* Do we care about endianness here?  Probably do if we ever get
+      into the situation of dealing with the single-precision VFP
+      registers. */
+   switch (dregNo) {
+      case 0:  return OFFB_D0;
+      case 1:  return OFFB_D1;
+      case 2:  return OFFB_D2;
+      case 3:  return OFFB_D3;
+      case 4:  return OFFB_D4;
+      case 5:  return OFFB_D5;
+      case 6:  return OFFB_D6;
+      case 7:  return OFFB_D7;
+      case 8:  return OFFB_D8;
+      case 9:  return OFFB_D9;
+      case 10: return OFFB_D10;
+      case 11: return OFFB_D11;
+      case 12: return OFFB_D12;
+      case 13: return OFFB_D13;
+      case 14: return OFFB_D14;
+      case 15: return OFFB_D15;
+      case 16: return OFFB_D16;
+      case 17: return OFFB_D17;
+      case 18: return OFFB_D18;
+      case 19: return OFFB_D19;
+      case 20: return OFFB_D20;
+      case 21: return OFFB_D21;
+      case 22: return OFFB_D22;
+      case 23: return OFFB_D23;
+      case 24: return OFFB_D24;
+      case 25: return OFFB_D25;
+      case 26: return OFFB_D26;
+      case 27: return OFFB_D27;
+      case 28: return OFFB_D28;
+      case 29: return OFFB_D29;
+      case 30: return OFFB_D30;
+      case 31: return OFFB_D31;
+      default: vassert(0);
+   }
+}
+
+/* Plain ("low level") read from a VFP Dreg. */
+static IRExpr* llGetDReg ( UInt dregNo )
+{
+   vassert(dregNo < 32);
+   return IRExpr_Get( doubleGuestRegOffset(dregNo), Ity_F64 );
+}
+
+/* Architected read from a VFP Dreg. */
+static IRExpr* getDReg ( UInt dregNo ) {
+   return llGetDReg( dregNo );
+}
+
+/* Plain ("low level") write to a VFP Dreg. */
+static void llPutDReg ( UInt dregNo, IRExpr* e )
+{
+   vassert(dregNo < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64);
+   stmt( IRStmt_Put(doubleGuestRegOffset(dregNo), e) );
+}
+
+/* Architected write to a VFP Dreg.  Handles conditional writes to the
+   register: if guardT == IRTemp_INVALID then the write is
+   unconditional. */
+static void putDReg ( UInt    dregNo,
+                      IRExpr* e,
+                      IRTemp  guardT /* :: Ity_I32, 0 or 1 */)
+{
+   /* So, generate either an unconditional or a conditional write to
+      the reg. */
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional write */
+      llPutDReg( dregNo, e );
+   } else {
+      llPutDReg( dregNo,
+                 IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+                             e, llGetDReg(dregNo) ));
+   }
+}
+
+/* And now exactly the same stuff all over again, but this time
+   taking/returning I64 rather than F64, to support 64-bit Neon
+   ops. */
+
+/* Plain ("low level") read from a Neon Integer Dreg. */
+static IRExpr* llGetDRegI64 ( UInt dregNo )
+{
+   vassert(dregNo < 32);
+   return IRExpr_Get( doubleGuestRegOffset(dregNo), Ity_I64 );
+}
+
+/* Architected read from a Neon Integer Dreg. */
+static IRExpr* getDRegI64 ( UInt dregNo ) {
+   return llGetDRegI64( dregNo );
+}
+
+/* Plain ("low level") write to a Neon Integer Dreg. */
+static void llPutDRegI64 ( UInt dregNo, IRExpr* e )
+{
+   vassert(dregNo < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64);
+   stmt( IRStmt_Put(doubleGuestRegOffset(dregNo), e) );
+}
+
+/* Architected write to a Neon Integer Dreg.  Handles conditional
+   writes to the register: if guardT == IRTemp_INVALID then the write
+   is unconditional. */
+static void putDRegI64 ( UInt    dregNo,
+                         IRExpr* e,
+                         IRTemp  guardT /* :: Ity_I32, 0 or 1 */)
+{
+   /* So, generate either an unconditional or a conditional write to
+      the reg. */
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional write */
+      llPutDRegI64( dregNo, e );
+   } else {
+      llPutDRegI64( dregNo,
+                    IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+                                e, llGetDRegI64(dregNo) ));
+   }
+}
+
+/* ---------------- Quad registers ---------------- */
+
+static Int quadGuestRegOffset ( UInt qregNo )
+{
+   /* Do we care about endianness here?  Probably do if we ever get
+      into the situation of dealing with the 64 bit Neon registers. */
+   switch (qregNo) {
+      case 0:  return OFFB_D0;
+      case 1:  return OFFB_D2;
+      case 2:  return OFFB_D4;
+      case 3:  return OFFB_D6;
+      case 4:  return OFFB_D8;
+      case 5:  return OFFB_D10;
+      case 6:  return OFFB_D12;
+      case 7:  return OFFB_D14;
+      case 8:  return OFFB_D16;
+      case 9:  return OFFB_D18;
+      case 10: return OFFB_D20;
+      case 11: return OFFB_D22;
+      case 12: return OFFB_D24;
+      case 13: return OFFB_D26;
+      case 14: return OFFB_D28;
+      case 15: return OFFB_D30;
+      default: vassert(0);
+   }
+}
+
+/* Plain ("low level") read from a Neon Qreg. */
+static IRExpr* llGetQReg ( UInt qregNo )
+{
+   vassert(qregNo < 16);
+   return IRExpr_Get( quadGuestRegOffset(qregNo), Ity_V128 );
+}
+
+/* Architected read from a Neon Qreg. */
+static IRExpr* getQReg ( UInt qregNo ) {
+   return llGetQReg( qregNo );
+}
+
+/* Plain ("low level") write to a Neon Qreg. */
+static void llPutQReg ( UInt qregNo, IRExpr* e )
+{
+   vassert(qregNo < 16);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_V128);
+   stmt( IRStmt_Put(quadGuestRegOffset(qregNo), e) );
+}
+
+/* Architected write to a Neon Qreg.  Handles conditional writes to the
+   register: if guardT == IRTemp_INVALID then the write is
+   unconditional. */
+static void putQReg ( UInt    qregNo,
+                      IRExpr* e,
+                      IRTemp  guardT /* :: Ity_I32, 0 or 1 */)
+{
+   /* So, generate either an unconditional or a conditional write to
+      the reg. */
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional write */
+      llPutQReg( qregNo, e );
+   } else {
+      llPutQReg( qregNo,
+                 IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+                             e, llGetQReg(qregNo) ));
+   }
+}
+
+
+/* ---------------- Float registers ---------------- */
+
+static Int floatGuestRegOffset ( UInt fregNo )
+{
+   /* Start with the offset of the containing double, and then correct
+      for endianness.  Actually this is completely bogus and needs
+      careful thought. */
+   Int off;
+   vassert(fregNo < 32);
+   off = doubleGuestRegOffset(fregNo >> 1);
+   if (host_endness == VexEndnessLE) {
+      if (fregNo & 1)
+         off += 4;
+   } else {
+      vassert(0);
+   }
+   return off;
+}
+
+/* Plain ("low level") read from a VFP Freg. */
+static IRExpr* llGetFReg ( UInt fregNo )
+{
+   vassert(fregNo < 32);
+   return IRExpr_Get( floatGuestRegOffset(fregNo), Ity_F32 );
+}
+
+/* Architected read from a VFP Freg. */
+static IRExpr* getFReg ( UInt fregNo ) {
+   return llGetFReg( fregNo );
+}
+
+/* Plain ("low level") write to a VFP Freg. */
+static void llPutFReg ( UInt fregNo, IRExpr* e )
+{
+   vassert(fregNo < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F32);
+   stmt( IRStmt_Put(floatGuestRegOffset(fregNo), e) );
+}
+
+/* Architected write to a VFP Freg.  Handles conditional writes to the
+   register: if guardT == IRTemp_INVALID then the write is
+   unconditional. */
+static void putFReg ( UInt    fregNo,
+                      IRExpr* e,
+                      IRTemp  guardT /* :: Ity_I32, 0 or 1 */)
+{
+   /* So, generate either an unconditional or a conditional write to
+      the reg. */
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional write */
+      llPutFReg( fregNo, e );
+   } else {
+      llPutFReg( fregNo,
+                 IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+                             e, llGetFReg(fregNo) ));
+   }
+}
+
+
+/* ---------------- Misc registers ---------------- */
+
+static void putMiscReg32 ( UInt    gsoffset, 
+                           IRExpr* e, /* :: Ity_I32 */
+                           IRTemp  guardT /* :: Ity_I32, 0 or 1 */)
+{
+   switch (gsoffset) {
+      case OFFB_FPSCR:   break;
+      case OFFB_QFLAG32: break;
+      case OFFB_GEFLAG0: break;
+      case OFFB_GEFLAG1: break;
+      case OFFB_GEFLAG2: break;
+      case OFFB_GEFLAG3: break;
+      default: vassert(0); /* awaiting more cases */
+   }
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional write */
+      stmt(IRStmt_Put(gsoffset, e));
+   } else {
+      stmt(IRStmt_Put(
+         gsoffset,
+         IRExpr_ITE( binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)),
+                     e, IRExpr_Get(gsoffset, Ity_I32) )
+      ));
+   }
+}
+
+static IRTemp get_ITSTATE ( void )
+{
+   ASSERT_IS_THUMB;
+   IRTemp t = newTemp(Ity_I32);
+   assign(t, IRExpr_Get( OFFB_ITSTATE, Ity_I32));
+   return t;
+}
+
+static void put_ITSTATE ( IRTemp t )
+{
+   ASSERT_IS_THUMB;
+   stmt( IRStmt_Put( OFFB_ITSTATE, mkexpr(t)) );
+}
+
+static IRTemp get_QFLAG32 ( void )
+{
+   IRTemp t = newTemp(Ity_I32);
+   assign(t, IRExpr_Get( OFFB_QFLAG32, Ity_I32));
+   return t;
+}
+
+static void put_QFLAG32 ( IRTemp t, IRTemp condT )
+{
+   putMiscReg32( OFFB_QFLAG32, mkexpr(t), condT );
+}
+
+/* Stickily set the 'Q' flag (APSR bit 27) of the APSR (Application Program
+   Status Register) to indicate that overflow or saturation occurred.
+   Nb: t must be zero to denote no saturation, and any nonzero
+   value to indicate saturation. */
+static void or_into_QFLAG32 ( IRExpr* e, IRTemp condT )
+{
+   IRTemp old = get_QFLAG32();
+   IRTemp nyu = newTemp(Ity_I32);
+   assign(nyu, binop(Iop_Or32, mkexpr(old), e) );
+   put_QFLAG32(nyu, condT);
+}
+
+/* Generate code to set APSR.GE[flagNo]. Each fn call sets 1 bit.
+   flagNo: which flag bit to set [3...0]
+   lowbits_to_ignore:  0 = look at all 32 bits
+                       8 = look at top 24 bits only
+                      16 = look at top 16 bits only
+                      31 = look at the top bit only
+   e: input value to be evaluated.
+   The new value is taken from 'e' with the lowest 'lowbits_to_ignore'
+   masked out.  If the resulting value is zero then the GE flag is
+   set to 0; any other value sets the flag to 1. */
+static void put_GEFLAG32 ( Int flagNo,            /* 0, 1, 2 or 3 */
+                           Int lowbits_to_ignore, /* 0, 8, 16 or 31   */
+                           IRExpr* e,             /* Ity_I32 */
+                           IRTemp condT )
+{
+   vassert( flagNo >= 0 && flagNo <= 3 );
+   vassert( lowbits_to_ignore == 0  || 
+            lowbits_to_ignore == 8  || 
+            lowbits_to_ignore == 16 ||
+            lowbits_to_ignore == 31 );
+   IRTemp masked = newTemp(Ity_I32);
+   assign(masked, binop(Iop_Shr32, e, mkU8(lowbits_to_ignore)));
+ 
+   switch (flagNo) {
+      case 0: putMiscReg32(OFFB_GEFLAG0, mkexpr(masked), condT); break;
+      case 1: putMiscReg32(OFFB_GEFLAG1, mkexpr(masked), condT); break;
+      case 2: putMiscReg32(OFFB_GEFLAG2, mkexpr(masked), condT); break;
+      case 3: putMiscReg32(OFFB_GEFLAG3, mkexpr(masked), condT); break;
+      default: vassert(0);
+   }
+}
+
+/* Return the (32-bit, zero-or-nonzero representation scheme) of
+   the specified GE flag. */
+static IRExpr* get_GEFLAG32( Int flagNo /* 0, 1, 2, 3 */ )
+{
+   switch (flagNo) {
+      case 0: return IRExpr_Get( OFFB_GEFLAG0, Ity_I32 );
+      case 1: return IRExpr_Get( OFFB_GEFLAG1, Ity_I32 );
+      case 2: return IRExpr_Get( OFFB_GEFLAG2, Ity_I32 );
+      case 3: return IRExpr_Get( OFFB_GEFLAG3, Ity_I32 );
+      default: vassert(0);
+   }
+}
+
+/* Set all 4 GE flags from the given 32-bit value as follows: GE 3 and
+   2 are set from bit 31 of the value, and GE 1 and 0 are set from bit
+   15 of the value.  All other bits are ignored. */
+static void set_GE_32_10_from_bits_31_15 ( IRTemp t32, IRTemp condT )
+{
+   IRTemp ge10 = newTemp(Ity_I32);
+   IRTemp ge32 = newTemp(Ity_I32);
+   assign(ge10, binop(Iop_And32, mkexpr(t32), mkU32(0x00008000)));
+   assign(ge32, binop(Iop_And32, mkexpr(t32), mkU32(0x80000000)));
+   put_GEFLAG32( 0, 0, mkexpr(ge10), condT );
+   put_GEFLAG32( 1, 0, mkexpr(ge10), condT );
+   put_GEFLAG32( 2, 0, mkexpr(ge32), condT );
+   put_GEFLAG32( 3, 0, mkexpr(ge32), condT );
+}
+
+
+/* Set all 4 GE flags from the given 32-bit value as follows: GE 3
+   from bit 31, GE 2 from bit 23, GE 1 from bit 15, and GE0 from
+   bit 7.  All other bits are ignored. */
+static void set_GE_3_2_1_0_from_bits_31_23_15_7 ( IRTemp t32, IRTemp condT )
+{
+   IRTemp ge0 = newTemp(Ity_I32);
+   IRTemp ge1 = newTemp(Ity_I32);
+   IRTemp ge2 = newTemp(Ity_I32);
+   IRTemp ge3 = newTemp(Ity_I32);
+   assign(ge0, binop(Iop_And32, mkexpr(t32), mkU32(0x00000080)));
+   assign(ge1, binop(Iop_And32, mkexpr(t32), mkU32(0x00008000)));
+   assign(ge2, binop(Iop_And32, mkexpr(t32), mkU32(0x00800000)));
+   assign(ge3, binop(Iop_And32, mkexpr(t32), mkU32(0x80000000)));
+   put_GEFLAG32( 0, 0, mkexpr(ge0), condT );
+   put_GEFLAG32( 1, 0, mkexpr(ge1), condT );
+   put_GEFLAG32( 2, 0, mkexpr(ge2), condT );
+   put_GEFLAG32( 3, 0, mkexpr(ge3), condT );
+}
+
+
+/* ---------------- FPSCR stuff ---------------- */
+
+/* Generate IR to get hold of the rounding mode bits in FPSCR, and
+   convert them to IR format.  Bind the final result to the
+   returned temp. */
+static IRTemp /* :: Ity_I32 */ mk_get_IR_rounding_mode ( void )
+{
+   /* The ARMvfp encoding for rounding mode bits is:
+         00  to nearest
+         01  to +infinity
+         10  to -infinity
+         11  to zero
+      We need to convert that to the IR encoding:
+         00  to nearest (the default)
+         10  to +infinity
+         01  to -infinity
+         11  to zero
+      Which can be done by swapping bits 0 and 1.
+      The rmode bits are at 23:22 in FPSCR.
+   */
+   IRTemp armEncd = newTemp(Ity_I32);
+   IRTemp swapped = newTemp(Ity_I32);
+   /* Fish FPSCR[23:22] out, and slide to bottom.  Doesn't matter that
+      we don't zero out bits 24 and above, since the assignment to
+      'swapped' will mask them out anyway. */
+   assign(armEncd,
+          binop(Iop_Shr32, IRExpr_Get(OFFB_FPSCR, Ity_I32), mkU8(22)));
+   /* Now swap them. */
+   assign(swapped,
+          binop(Iop_Or32,
+                binop(Iop_And32,
+                      binop(Iop_Shl32, mkexpr(armEncd), mkU8(1)),
+                      mkU32(2)),
+                binop(Iop_And32,
+                      binop(Iop_Shr32, mkexpr(armEncd), mkU8(1)),
+                      mkU32(1))
+         ));
+   return swapped;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for flag handling and conditional insns      ---*/
+/*------------------------------------------------------------*/
+
+static const HChar* name_ARMCondcode ( ARMCondcode cond )
+{
+   switch (cond) {
+      case ARMCondEQ:  return "{eq}";
+      case ARMCondNE:  return "{ne}";
+      case ARMCondHS:  return "{hs}";  // or 'cs'
+      case ARMCondLO:  return "{lo}";  // or 'cc'
+      case ARMCondMI:  return "{mi}";
+      case ARMCondPL:  return "{pl}";
+      case ARMCondVS:  return "{vs}";
+      case ARMCondVC:  return "{vc}";
+      case ARMCondHI:  return "{hi}";
+      case ARMCondLS:  return "{ls}";
+      case ARMCondGE:  return "{ge}";
+      case ARMCondLT:  return "{lt}";
+      case ARMCondGT:  return "{gt}";
+      case ARMCondLE:  return "{le}";
+      case ARMCondAL:  return ""; // {al}: is the default
+      case ARMCondNV:  return "{nv}";
+      default: vpanic("name_ARMCondcode");
+   }
+}
+/* and a handy shorthand for it */
+static const HChar* nCC ( ARMCondcode cond ) {
+   return name_ARMCondcode(cond);
+}
+
+
+/* Build IR to calculate some particular condition from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression of type
+   Ity_I32, suitable for narrowing.  Although the return type is
+   Ity_I32, the returned value is either 0 or 1.  'cond' must be
+   :: Ity_I32 and must denote the condition to compute in 
+   bits 7:4, and be zero everywhere else.
+*/
+static IRExpr* mk_armg_calculate_condition_dyn ( IRExpr* cond )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, cond) == Ity_I32);
+   /* And 'cond' had better produce a value in which only bits 7:4 are
+      nonzero.  However, obviously we can't assert for that. */
+
+   /* So what we're constructing for the first argument is 
+      "(cond << 4) | stored-operation".
+      However, as per comments above, 'cond' must be supplied
+      pre-shifted to this function.
+
+      This pairing scheme requires that the ARM_CC_OP_ values all fit
+      in 4 bits.  Hence we are passing a (COND, OP) pair in the lowest
+      8 bits of the first argument. */
+   IRExpr** args
+      = mkIRExprVec_4(
+           binop(Iop_Or32, IRExpr_Get(OFFB_CC_OP, Ity_I32), cond),
+           IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+           IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+           IRExpr_Get(OFFB_CC_NDEP, Ity_I32)
+        );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I32,
+           0/*regparm*/, 
+           "armg_calculate_condition", &armg_calculate_condition,
+           args
+        );
+
+   /* Exclude the requested condition, OP and NDEP from definedness
+      checking.  We're only interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+/* Build IR to calculate some particular condition from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression of type
+   Ity_I32, suitable for narrowing.  Although the return type is
+   Ity_I32, the returned value is either 0 or 1.
+*/
+static IRExpr* mk_armg_calculate_condition ( ARMCondcode cond )
+{
+  /* First arg is "(cond << 4) | condition".  This requires that the
+     ARM_CC_OP_ values all fit in 4 bits.  Hence we are passing a
+     (COND, OP) pair in the lowest 8 bits of the first argument. */
+   vassert(cond >= 0 && cond <= 15);
+   return mk_armg_calculate_condition_dyn( mkU32(cond << 4) );
+}
+
+
+/* Build IR to calculate just the carry flag from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+   Ity_I32. */
+static IRExpr* mk_armg_calculate_flag_c ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I32) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I32,
+           0/*regparm*/, 
+           "armg_calculate_flag_c", &armg_calculate_flag_c,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+/* Build IR to calculate just the overflow flag from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+   Ity_I32. */
+static IRExpr* mk_armg_calculate_flag_v ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I32) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I32,
+           0/*regparm*/, 
+           "armg_calculate_flag_v", &armg_calculate_flag_v,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+/* Build IR to calculate N Z C V in bits 31:28 of the
+   returned word. */
+static IRExpr* mk_armg_calculate_flags_nzcv ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I32) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I32,
+           0/*regparm*/, 
+           "armg_calculate_flags_nzcv", &armg_calculate_flags_nzcv,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+static IRExpr* mk_armg_calculate_flag_qc ( IRExpr* resL, IRExpr* resR, Bool Q )
+{
+   IRExpr** args1;
+   IRExpr** args2;
+   IRExpr *call1, *call2, *res;
+
+   if (Q) {
+      args1 = mkIRExprVec_4 ( binop(Iop_GetElem32x4, resL, mkU8(0)),
+                              binop(Iop_GetElem32x4, resL, mkU8(1)),
+                              binop(Iop_GetElem32x4, resR, mkU8(0)),
+                              binop(Iop_GetElem32x4, resR, mkU8(1)) );
+      args2 = mkIRExprVec_4 ( binop(Iop_GetElem32x4, resL, mkU8(2)),
+                              binop(Iop_GetElem32x4, resL, mkU8(3)),
+                              binop(Iop_GetElem32x4, resR, mkU8(2)),
+                              binop(Iop_GetElem32x4, resR, mkU8(3)) );
+   } else {
+      args1 = mkIRExprVec_4 ( binop(Iop_GetElem32x2, resL, mkU8(0)),
+                              binop(Iop_GetElem32x2, resL, mkU8(1)),
+                              binop(Iop_GetElem32x2, resR, mkU8(0)),
+                              binop(Iop_GetElem32x2, resR, mkU8(1)) );
+   }
+
+   call1 = mkIRExprCCall(
+             Ity_I32,
+             0/*regparm*/, 
+             "armg_calculate_flag_qc", &armg_calculate_flag_qc,
+             args1
+          );
+   if (Q) {
+      call2 = mkIRExprCCall(
+                Ity_I32,
+                0/*regparm*/, 
+                "armg_calculate_flag_qc", &armg_calculate_flag_qc,
+                args2
+             );
+   }
+   if (Q) {
+      res = binop(Iop_Or32, call1, call2);
+   } else {
+      res = call1;
+   }
+   return res;
+}
+
+// FIXME: this is named wrongly .. looks like a sticky set of
+// QC, not a write to it.
+static void setFlag_QC ( IRExpr* resL, IRExpr* resR, Bool Q,
+                         IRTemp condT )
+{
+   putMiscReg32 (OFFB_FPSCR,
+                 binop(Iop_Or32,
+                       IRExpr_Get(OFFB_FPSCR, Ity_I32),
+                       binop(Iop_Shl32,
+                             mk_armg_calculate_flag_qc(resL, resR, Q),
+                             mkU8(27))),
+                 condT);
+}
+
+/* Build IR to conditionally set the flags thunk.  As with putIReg, if
+   guard is IRTemp_INVALID then it's unconditional, else it holds a
+   condition :: Ity_I32. */
+static
+void setFlags_D1_D2_ND ( UInt cc_op, IRTemp t_dep1,
+                         IRTemp t_dep2, IRTemp t_ndep,
+                         IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+{
+   vassert(typeOfIRTemp(irsb->tyenv, t_dep1 == Ity_I32));
+   vassert(typeOfIRTemp(irsb->tyenv, t_dep2 == Ity_I32));
+   vassert(typeOfIRTemp(irsb->tyenv, t_ndep == Ity_I32));
+   vassert(cc_op >= ARMG_CC_OP_COPY && cc_op < ARMG_CC_OP_NUMBER);
+   if (guardT == IRTemp_INVALID) {
+      /* unconditional */
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(cc_op) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(t_dep1) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkexpr(t_dep2) ));
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(t_ndep) ));
+   } else {
+      /* conditional */
+      IRTemp c1 = newTemp(Ity_I1);
+      assign( c1, binop(Iop_CmpNE32, mkexpr(guardT), mkU32(0)) );
+      stmt( IRStmt_Put(
+               OFFB_CC_OP,
+               IRExpr_ITE( mkexpr(c1),
+                           mkU32(cc_op),
+                           IRExpr_Get(OFFB_CC_OP, Ity_I32) ) ));
+      stmt( IRStmt_Put(
+               OFFB_CC_DEP1,
+               IRExpr_ITE( mkexpr(c1),
+                           mkexpr(t_dep1),
+                           IRExpr_Get(OFFB_CC_DEP1, Ity_I32) ) ));
+      stmt( IRStmt_Put(
+               OFFB_CC_DEP2,
+               IRExpr_ITE( mkexpr(c1),
+                           mkexpr(t_dep2),
+                           IRExpr_Get(OFFB_CC_DEP2, Ity_I32) ) ));
+      stmt( IRStmt_Put(
+               OFFB_CC_NDEP,
+               IRExpr_ITE( mkexpr(c1),
+                           mkexpr(t_ndep),
+                           IRExpr_Get(OFFB_CC_NDEP, Ity_I32) ) ));
+   }
+}
+
+
+/* Minor variant of the above that sets NDEP to zero (if it
+   sets it at all) */
+static void setFlags_D1_D2 ( UInt cc_op, IRTemp t_dep1,
+                             IRTemp t_dep2,
+                             IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+{
+   IRTemp z32 = newTemp(Ity_I32);
+   assign( z32, mkU32(0) );
+   setFlags_D1_D2_ND( cc_op, t_dep1, t_dep2, z32, guardT );
+}
+
+
+/* Minor variant of the above that sets DEP2 to zero (if it
+   sets it at all) */
+static void setFlags_D1_ND ( UInt cc_op, IRTemp t_dep1,
+                             IRTemp t_ndep,
+                             IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+{
+   IRTemp z32 = newTemp(Ity_I32);
+   assign( z32, mkU32(0) );
+   setFlags_D1_D2_ND( cc_op, t_dep1, z32, t_ndep, guardT );
+}
+
+
+/* Minor variant of the above that sets DEP2 and NDEP to zero (if it
+   sets them at all) */
+static void setFlags_D1 ( UInt cc_op, IRTemp t_dep1,
+                          IRTemp guardT /* :: Ity_I32, 0 or 1 */ )
+{
+   IRTemp z32 = newTemp(Ity_I32);
+   assign( z32, mkU32(0) );
+   setFlags_D1_D2_ND( cc_op, t_dep1, z32, z32, guardT );
+}
+
+
+/* ARM only */
+/* Generate a side-exit to the next instruction, if the given guard
+   expression :: Ity_I32 is 0 (note!  the side exit is taken if the
+   condition is false!)  This is used to skip over conditional
+   instructions which we can't generate straight-line code for, either
+   because they are too complex or (more likely) they potentially
+   generate exceptions.
+*/
+static void mk_skip_over_A32_if_cond_is_false ( 
+               IRTemp guardT /* :: Ity_I32, 0 or 1 */
+            )
+{
+   ASSERT_IS_ARM;
+   vassert(guardT != IRTemp_INVALID);
+   vassert(0 == (guest_R15_curr_instr_notENC & 3));
+   stmt( IRStmt_Exit(
+            unop(Iop_Not1, unop(Iop_32to1, mkexpr(guardT))),
+            Ijk_Boring,
+            IRConst_U32(toUInt(guest_R15_curr_instr_notENC + 4)),
+            OFFB_R15T
+       ));
+}
+
+/* Thumb16 only */
+/* ditto, but jump over a 16-bit thumb insn */
+static void mk_skip_over_T16_if_cond_is_false ( 
+               IRTemp guardT /* :: Ity_I32, 0 or 1 */
+            )
+{
+   ASSERT_IS_THUMB;
+   vassert(guardT != IRTemp_INVALID);
+   vassert(0 == (guest_R15_curr_instr_notENC & 1));
+   stmt( IRStmt_Exit(
+            unop(Iop_Not1, unop(Iop_32to1, mkexpr(guardT))),
+            Ijk_Boring,
+            IRConst_U32(toUInt((guest_R15_curr_instr_notENC + 2) | 1)),
+            OFFB_R15T
+       ));
+}
+
+
+/* Thumb32 only */
+/* ditto, but jump over a 32-bit thumb insn */
+static void mk_skip_over_T32_if_cond_is_false ( 
+               IRTemp guardT /* :: Ity_I32, 0 or 1 */
+            )
+{
+   ASSERT_IS_THUMB;
+   vassert(guardT != IRTemp_INVALID);
+   vassert(0 == (guest_R15_curr_instr_notENC & 1));
+   stmt( IRStmt_Exit(
+            unop(Iop_Not1, unop(Iop_32to1, mkexpr(guardT))),
+            Ijk_Boring,
+            IRConst_U32(toUInt((guest_R15_curr_instr_notENC + 4) | 1)),
+            OFFB_R15T
+       ));
+}
+
+
+/* Thumb16 and Thumb32 only
+   Generate a SIGILL followed by a restart of the current instruction
+   if the given temp is nonzero. */
+static void gen_SIGILL_T_if_nonzero ( IRTemp t /* :: Ity_I32 */ )
+{
+   ASSERT_IS_THUMB;
+   vassert(t != IRTemp_INVALID);
+   vassert(0 == (guest_R15_curr_instr_notENC & 1));
+   stmt(
+      IRStmt_Exit(
+         binop(Iop_CmpNE32, mkexpr(t), mkU32(0)),
+         Ijk_NoDecode,
+         IRConst_U32(toUInt(guest_R15_curr_instr_notENC | 1)),
+         OFFB_R15T
+      )
+   );
+}
+
+
+/* Inspect the old_itstate, and generate a SIGILL if it indicates that
+   we are currently in an IT block and are not the last in the block.
+   This also rolls back guest_ITSTATE to its old value before the exit
+   and restores it to its new value afterwards.  This is so that if
+   the exit is taken, we have an up to date version of ITSTATE
+   available.  Without doing that, we have no hope of making precise
+   exceptions work. */
+static void gen_SIGILL_T_if_in_but_NLI_ITBlock (
+               IRTemp old_itstate /* :: Ity_I32 */,
+               IRTemp new_itstate /* :: Ity_I32 */
+            )
+{
+   ASSERT_IS_THUMB;
+   put_ITSTATE(old_itstate); // backout
+   IRTemp guards_for_next3 = newTemp(Ity_I32);
+   assign(guards_for_next3,
+          binop(Iop_Shr32, mkexpr(old_itstate), mkU8(8)));
+   gen_SIGILL_T_if_nonzero(guards_for_next3);
+   put_ITSTATE(new_itstate); //restore
+}
+
+
+/* Simpler version of the above, which generates a SIGILL if
+   we're anywhere within an IT block. */
+static void gen_SIGILL_T_if_in_ITBlock (
+               IRTemp old_itstate /* :: Ity_I32 */,
+               IRTemp new_itstate /* :: Ity_I32 */
+            )
+{
+   put_ITSTATE(old_itstate); // backout
+   gen_SIGILL_T_if_nonzero(old_itstate);
+   put_ITSTATE(new_itstate); //restore
+}
+
+
+/* Generate an APSR value, from the NZCV thunk, and
+   from QFLAG32 and GEFLAG0 .. GEFLAG3. */
+static IRTemp synthesise_APSR ( void )
+{
+   IRTemp res1 = newTemp(Ity_I32);
+   // Get NZCV
+   assign( res1, mk_armg_calculate_flags_nzcv() );
+   // OR in the Q value
+   IRTemp res2 = newTemp(Ity_I32);
+   assign(
+      res2,
+      binop(Iop_Or32,
+            mkexpr(res1),
+            binop(Iop_Shl32,
+                  unop(Iop_1Uto32,
+                       binop(Iop_CmpNE32,
+                             mkexpr(get_QFLAG32()),
+                             mkU32(0))),
+                  mkU8(ARMG_CC_SHIFT_Q)))
+   );
+   // OR in GE0 .. GE3
+   IRExpr* ge0
+      = unop(Iop_1Uto32, binop(Iop_CmpNE32, get_GEFLAG32(0), mkU32(0)));
+   IRExpr* ge1
+      = unop(Iop_1Uto32, binop(Iop_CmpNE32, get_GEFLAG32(1), mkU32(0)));
+   IRExpr* ge2
+      = unop(Iop_1Uto32, binop(Iop_CmpNE32, get_GEFLAG32(2), mkU32(0)));
+   IRExpr* ge3
+      = unop(Iop_1Uto32, binop(Iop_CmpNE32, get_GEFLAG32(3), mkU32(0)));
+   IRTemp res3 = newTemp(Ity_I32);
+   assign(res3,
+          binop(Iop_Or32,
+                mkexpr(res2),
+                binop(Iop_Or32,
+                      binop(Iop_Or32,
+                            binop(Iop_Shl32, ge0, mkU8(16)),
+                            binop(Iop_Shl32, ge1, mkU8(17))),
+                      binop(Iop_Or32,
+                            binop(Iop_Shl32, ge2, mkU8(18)),
+                            binop(Iop_Shl32, ge3, mkU8(19))) )));
+   return res3;
+}
+
+
+/* and the inverse transformation: given an APSR value,
+   set the NZCV thunk, the Q flag, and the GE flags. */
+static void desynthesise_APSR ( Bool write_nzcvq, Bool write_ge,
+                                IRTemp apsrT, IRTemp condT )
+{
+   vassert(write_nzcvq || write_ge);
+   if (write_nzcvq) {
+      // Do NZCV
+      IRTemp immT = newTemp(Ity_I32);
+      assign(immT, binop(Iop_And32, mkexpr(apsrT), mkU32(0xF0000000)) );
+      setFlags_D1(ARMG_CC_OP_COPY, immT, condT);
+      // Do Q
+      IRTemp qnewT = newTemp(Ity_I32);
+      assign(qnewT, binop(Iop_And32, mkexpr(apsrT), mkU32(ARMG_CC_MASK_Q)));
+      put_QFLAG32(qnewT, condT);
+   }
+   if (write_ge) {
+      // Do GE3..0
+      put_GEFLAG32(0, 0, binop(Iop_And32, mkexpr(apsrT), mkU32(1<<16)),
+                   condT);
+      put_GEFLAG32(1, 0, binop(Iop_And32, mkexpr(apsrT), mkU32(1<<17)),
+                   condT);
+      put_GEFLAG32(2, 0, binop(Iop_And32, mkexpr(apsrT), mkU32(1<<18)),
+                   condT);
+      put_GEFLAG32(3, 0, binop(Iop_And32, mkexpr(apsrT), mkU32(1<<19)),
+                   condT);
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for saturation                               ---*/
+/*------------------------------------------------------------*/
+
+/* FIXME: absolutely the only diff. between (a) armUnsignedSatQ and
+   (b) armSignedSatQ is that in (a) the floor is set to 0, whereas in
+   (b) the floor is computed from the value of imm5.  these two fnsn
+   should be commoned up. */
+
+/* UnsignedSatQ(): 'clamp' each value so it lies between 0 <= x <= (2^N)-1
+   Optionally return flag resQ saying whether saturation occurred.
+   See definition in manual, section A2.2.1, page 41
+   (bits(N), boolean) UnsignedSatQ( integer i, integer N )
+   {
+     if ( i > (2^N)-1 ) { result = (2^N)-1; saturated = TRUE; }
+     elsif ( i < 0 )    { result = 0; saturated = TRUE; }
+     else               { result = i; saturated = FALSE; }
+     return ( result<N-1:0>, saturated );
+   }
+*/
+static void armUnsignedSatQ( IRTemp* res,  /* OUT - Ity_I32 */
+                             IRTemp* resQ, /* OUT - Ity_I32  */
+                             IRTemp regT,  /* value to clamp - Ity_I32 */
+                             UInt imm5 )   /* saturation ceiling */
+{
+   UInt ceil  = (1 << imm5) - 1;    // (2^imm5)-1
+   UInt floor = 0;
+
+   IRTemp nd0 = newTemp(Ity_I32);
+   IRTemp nd1 = newTemp(Ity_I32);
+   IRTemp nd2 = newTemp(Ity_I1);
+   IRTemp nd3 = newTemp(Ity_I32);
+   IRTemp nd4 = newTemp(Ity_I32);
+   IRTemp nd5 = newTemp(Ity_I1);
+   IRTemp nd6 = newTemp(Ity_I32);
+
+   assign( nd0, mkexpr(regT) );
+   assign( nd1, mkU32(ceil) );
+   assign( nd2, binop( Iop_CmpLT32S, mkexpr(nd1), mkexpr(nd0) ) );
+   assign( nd3, IRExpr_ITE(mkexpr(nd2), mkexpr(nd1), mkexpr(nd0)) );
+   assign( nd4, mkU32(floor) );
+   assign( nd5, binop( Iop_CmpLT32S, mkexpr(nd3), mkexpr(nd4) ) );
+   assign( nd6, IRExpr_ITE(mkexpr(nd5), mkexpr(nd4), mkexpr(nd3)) );
+   assign( *res, mkexpr(nd6) );
+
+   /* if saturation occurred, then resQ is set to some nonzero value
+      if sat did not occur, resQ is guaranteed to be zero. */
+   if (resQ) {
+      assign( *resQ, binop(Iop_Xor32, mkexpr(*res), mkexpr(regT)) );
+   }
+}
+
+
+/* SignedSatQ(): 'clamp' each value so it lies between  -2^N <= x <= (2^N) - 1
+   Optionally return flag resQ saying whether saturation occurred.
+   - see definition in manual, section A2.2.1, page 41
+   (bits(N), boolean ) SignedSatQ( integer i, integer N ) 
+   {
+     if ( i > 2^(N-1) - 1 )    { result = 2^(N-1) - 1; saturated = TRUE; }
+     elsif ( i < -(2^(N-1)) )  { result = -(2^(N-1));  saturated = FALSE; }
+     else                      { result = i;           saturated = FALSE; }
+     return ( result[N-1:0], saturated );
+   }
+*/
+static void armSignedSatQ( IRTemp regT,    /* value to clamp - Ity_I32 */
+                           UInt imm5,      /* saturation ceiling */
+                           IRTemp* res,    /* OUT - Ity_I32 */
+                           IRTemp* resQ )  /* OUT - Ity_I32  */
+{
+   Int ceil  =  (1 << (imm5-1)) - 1;  //  (2^(imm5-1))-1
+   Int floor = -(1 << (imm5-1));      // -(2^(imm5-1))
+
+   IRTemp nd0 = newTemp(Ity_I32);
+   IRTemp nd1 = newTemp(Ity_I32);
+   IRTemp nd2 = newTemp(Ity_I1);
+   IRTemp nd3 = newTemp(Ity_I32);
+   IRTemp nd4 = newTemp(Ity_I32);
+   IRTemp nd5 = newTemp(Ity_I1);
+   IRTemp nd6 = newTemp(Ity_I32);
+
+   assign( nd0, mkexpr(regT) );
+   assign( nd1, mkU32(ceil) );
+   assign( nd2, binop( Iop_CmpLT32S, mkexpr(nd1), mkexpr(nd0) ) );
+   assign( nd3, IRExpr_ITE( mkexpr(nd2), mkexpr(nd1), mkexpr(nd0) ) );
+   assign( nd4, mkU32(floor) );
+   assign( nd5, binop( Iop_CmpLT32S, mkexpr(nd3), mkexpr(nd4) ) );
+   assign( nd6, IRExpr_ITE( mkexpr(nd5), mkexpr(nd4), mkexpr(nd3) ) );
+   assign( *res, mkexpr(nd6) );
+
+   /* if saturation occurred, then resQ is set to some nonzero value
+      if sat did not occur, resQ is guaranteed to be zero. */
+   if (resQ) {
+     assign( *resQ, binop(Iop_Xor32, mkexpr(*res), mkexpr(regT)) );
+   }
+}
+
+
+/* Compute a value 0 :: I32 or 1 :: I32, indicating whether signed
+   overflow occurred for 32-bit addition.  Needs both args and the
+   result.  HD p27. */
+static
+IRExpr* signed_overflow_after_Add32 ( IRExpr* resE,
+                                      IRTemp argL, IRTemp argR )
+{
+   IRTemp res = newTemp(Ity_I32);
+   assign(res, resE);
+   return
+      binop( Iop_Shr32, 
+             binop( Iop_And32,
+                    binop( Iop_Xor32, mkexpr(res), mkexpr(argL) ),
+                    binop( Iop_Xor32, mkexpr(res), mkexpr(argR) )), 
+             mkU8(31) );
+}
+
+/* Similarly .. also from HD p27 .. */
+static
+IRExpr* signed_overflow_after_Sub32 ( IRExpr* resE,
+                                      IRTemp argL, IRTemp argR )
+{
+   IRTemp res = newTemp(Ity_I32);
+   assign(res, resE);
+   return
+      binop( Iop_Shr32, 
+             binop( Iop_And32,
+                    binop( Iop_Xor32, mkexpr(argL), mkexpr(argR) ),
+                    binop( Iop_Xor32, mkexpr(res),  mkexpr(argL) )), 
+             mkU8(31) );
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Larger helpers                                       ---*/
+/*------------------------------------------------------------*/
+
+/* Compute both the result and new C flag value for a LSL by an imm5
+   or by a register operand.  May generate reads of the old C value
+   (hence only safe to use before any writes to guest state happen).
+   Are factored out so can be used by both ARM and Thumb.
+
+   Note that in compute_result_and_C_after_{LSL,LSR,ASR}_by{imm5,reg},
+   "res" (the result)  is a.k.a. "shop", shifter operand
+   "newC" (the new C)  is a.k.a. "shco", shifter carry out
+
+   The calling convention for res and newC is a bit funny.  They could
+   be passed by value, but instead are passed by ref.
+
+   The C (shco) value computed must be zero in bits 31:1, as the IR
+   optimisations for flag handling (guest_arm_spechelper) rely on
+   that, and the slow-path handlers (armg_calculate_flags_nzcv) assert
+   for it.  Same applies to all these functions that compute shco
+   after a shift or rotate, not just this one.
+*/
+
+static void compute_result_and_C_after_LSL_by_imm5 (
+               /*OUT*/HChar* buf,
+               IRTemp* res,
+               IRTemp* newC,
+               IRTemp rMt, UInt shift_amt, /* operands */
+               UInt rM      /* only for debug printing */
+            )
+{
+   if (shift_amt == 0) {
+      if (newC) {
+         assign( *newC, mk_armg_calculate_flag_c() );
+      }
+      assign( *res, mkexpr(rMt) );
+      DIS(buf, "r%u", rM);
+   } else {
+      vassert(shift_amt >= 1 && shift_amt <= 31);
+      if (newC) {
+         assign( *newC,
+                 binop(Iop_And32,
+                       binop(Iop_Shr32, mkexpr(rMt), 
+                                        mkU8(32 - shift_amt)),
+                       mkU32(1)));
+      }
+      assign( *res,
+              binop(Iop_Shl32, mkexpr(rMt), mkU8(shift_amt)) );
+      DIS(buf, "r%u, LSL #%u", rM, shift_amt);
+   }
+}
+
+
+static void compute_result_and_C_after_LSL_by_reg (
+               /*OUT*/HChar* buf,
+               IRTemp* res,
+               IRTemp* newC,
+               IRTemp rMt, IRTemp rSt,  /* operands */
+               UInt rM,    UInt rS      /* only for debug printing */
+            )
+{
+   // shift left in range 0 .. 255
+   // amt  = rS & 255
+   // res  = amt < 32 ?  Rm << amt  : 0
+   // newC = amt == 0     ? oldC  :
+   //        amt in 1..32 ?  Rm[32-amt]  : 0
+   IRTemp amtT = newTemp(Ity_I32);
+   assign( amtT, binop(Iop_And32, mkexpr(rSt), mkU32(255)) );
+   if (newC) {
+      /* mux0X(amt == 0,
+               mux0X(amt < 32, 
+                     0,
+                     Rm[(32-amt) & 31]),
+               oldC)
+      */
+      /* About the best you can do is pray that iropt is able
+         to nuke most or all of the following junk. */
+      IRTemp oldC = newTemp(Ity_I32);
+      assign(oldC, mk_armg_calculate_flag_c() );
+      assign(
+         *newC,
+         IRExpr_ITE(
+            binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
+            mkexpr(oldC),
+            IRExpr_ITE(
+               binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
+               binop(Iop_And32,
+                     binop(Iop_Shr32,
+                           mkexpr(rMt),
+                           unop(Iop_32to8,
+                                binop(Iop_And32,
+                                      binop(Iop_Sub32,
+                                            mkU32(32),
+                                            mkexpr(amtT)),
+                                      mkU32(31)
+                                )
+                           )
+                     ),
+                     mkU32(1)
+                     ),
+               mkU32(0)
+            )
+         )
+      );
+   }
+   // (Rm << (Rs & 31))  &  (((Rs & 255) - 32) >>s 31)
+   // Lhs of the & limits the shift to 31 bits, so as to
+   // give known IR semantics.  Rhs of the & is all 1s for
+   // Rs <= 31 and all 0s for Rs >= 32.
+   assign(
+      *res,
+      binop(
+         Iop_And32,
+         binop(Iop_Shl32,
+               mkexpr(rMt),
+               unop(Iop_32to8,
+                    binop(Iop_And32, mkexpr(rSt), mkU32(31)))),
+         binop(Iop_Sar32,
+               binop(Iop_Sub32,
+                     mkexpr(amtT),
+                     mkU32(32)),
+               mkU8(31))));
+    DIS(buf, "r%u, LSL r%u", rM, rS);
+}
+
+
+static void compute_result_and_C_after_LSR_by_imm5 (
+               /*OUT*/HChar* buf,
+               IRTemp* res,
+               IRTemp* newC,
+               IRTemp rMt, UInt shift_amt, /* operands */
+               UInt rM      /* only for debug printing */
+            )
+{
+   if (shift_amt == 0) {
+      // conceptually a 32-bit shift, however:
+      // res  = 0
+      // newC = Rm[31]
+      if (newC) {
+         assign( *newC,
+                 binop(Iop_And32,
+                       binop(Iop_Shr32, mkexpr(rMt), mkU8(31)), 
+                       mkU32(1)));
+      }
+      assign( *res, mkU32(0) );
+      DIS(buf, "r%u, LSR #0(a.k.a. 32)", rM);
+   } else {
+      // shift in range 1..31
+      // res  = Rm >>u shift_amt
+      // newC = Rm[shift_amt - 1]
+      vassert(shift_amt >= 1 && shift_amt <= 31);
+      if (newC) {
+         assign( *newC,
+                 binop(Iop_And32,
+                       binop(Iop_Shr32, mkexpr(rMt), 
+                                        mkU8(shift_amt - 1)),
+                       mkU32(1)));
+      }
+      assign( *res,
+              binop(Iop_Shr32, mkexpr(rMt), mkU8(shift_amt)) );
+      DIS(buf, "r%u, LSR #%u", rM, shift_amt);
+   }
+}
+
+
+static void compute_result_and_C_after_LSR_by_reg (
+               /*OUT*/HChar* buf,
+               IRTemp* res,
+               IRTemp* newC,
+               IRTemp rMt, IRTemp rSt,  /* operands */
+               UInt rM,    UInt rS      /* only for debug printing */
+            )
+{
+   // shift right in range 0 .. 255
+   // amt = rS & 255
+   // res  = amt < 32 ?  Rm >>u amt  : 0
+   // newC = amt == 0     ? oldC  :
+   //        amt in 1..32 ?  Rm[amt-1]  : 0
+   IRTemp amtT = newTemp(Ity_I32);
+   assign( amtT, binop(Iop_And32, mkexpr(rSt), mkU32(255)) );
+   if (newC) {
+      /* mux0X(amt == 0,
+               mux0X(amt < 32, 
+                     0,
+                     Rm[(amt-1) & 31]),
+               oldC)
+      */
+      IRTemp oldC = newTemp(Ity_I32);
+      assign(oldC, mk_armg_calculate_flag_c() );
+      assign(
+         *newC,
+         IRExpr_ITE(
+            binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)), 
+            mkexpr(oldC),
+            IRExpr_ITE(
+               binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
+               binop(Iop_And32,
+                     binop(Iop_Shr32,
+                           mkexpr(rMt),
+                           unop(Iop_32to8,
+                                binop(Iop_And32,
+                                      binop(Iop_Sub32,
+                                            mkexpr(amtT),
+                                            mkU32(1)),
+                                      mkU32(31)
+                                )
+                           )
+                     ),
+                     mkU32(1)
+                     ),
+               mkU32(0)
+            )
+         )
+      );
+   }
+   // (Rm >>u (Rs & 31))  &  (((Rs & 255) - 32) >>s 31)
+   // Lhs of the & limits the shift to 31 bits, so as to
+   // give known IR semantics.  Rhs of the & is all 1s for
+   // Rs <= 31 and all 0s for Rs >= 32.
+   assign(
+      *res,
+      binop(
+         Iop_And32,
+         binop(Iop_Shr32,
+               mkexpr(rMt),
+               unop(Iop_32to8,
+                    binop(Iop_And32, mkexpr(rSt), mkU32(31)))),
+         binop(Iop_Sar32,
+               binop(Iop_Sub32,
+                     mkexpr(amtT),
+                     mkU32(32)),
+               mkU8(31))));
+    DIS(buf, "r%u, LSR r%u", rM, rS);
+}
+
+
+static void compute_result_and_C_after_ASR_by_imm5 (
+               /*OUT*/HChar* buf,
+               IRTemp* res,
+               IRTemp* newC,
+               IRTemp rMt, UInt shift_amt, /* operands */
+               UInt rM      /* only for debug printing */
+            )
+{
+   if (shift_amt == 0) {
+      // conceptually a 32-bit shift, however:
+      // res  = Rm >>s 31
+      // newC = Rm[31]
+      if (newC) {
+         assign( *newC,
+                 binop(Iop_And32,
+                       binop(Iop_Shr32, mkexpr(rMt), mkU8(31)), 
+                       mkU32(1)));
+      }
+      assign( *res, binop(Iop_Sar32, mkexpr(rMt), mkU8(31)) );
+      DIS(buf, "r%u, ASR #0(a.k.a. 32)", rM);
+   } else {
+      // shift in range 1..31
+      // res = Rm >>s shift_amt
+      // newC = Rm[shift_amt - 1]
+      vassert(shift_amt >= 1 && shift_amt <= 31);
+      if (newC) {
+         assign( *newC,
+                 binop(Iop_And32,
+                       binop(Iop_Shr32, mkexpr(rMt), 
+                                        mkU8(shift_amt - 1)),
+                       mkU32(1)));
+      }
+      assign( *res,
+              binop(Iop_Sar32, mkexpr(rMt), mkU8(shift_amt)) );
+      DIS(buf, "r%u, ASR #%u", rM, shift_amt);
+   }
+}
+
+
+static void compute_result_and_C_after_ASR_by_reg (
+               /*OUT*/HChar* buf,
+               IRTemp* res,
+               IRTemp* newC,
+               IRTemp rMt, IRTemp rSt,  /* operands */
+               UInt rM,    UInt rS      /* only for debug printing */
+            )
+{
+   // arithmetic shift right in range 0 .. 255
+   // amt = rS & 255
+   // res  = amt < 32 ?  Rm >>s amt  : Rm >>s 31
+   // newC = amt == 0     ? oldC  :
+   //        amt in 1..32 ?  Rm[amt-1]  : Rm[31]
+   IRTemp amtT = newTemp(Ity_I32);
+   assign( amtT, binop(Iop_And32, mkexpr(rSt), mkU32(255)) );
+   if (newC) {
+      /* mux0X(amt == 0,
+               mux0X(amt < 32, 
+                     Rm[31],
+                     Rm[(amt-1) & 31])
+               oldC)
+      */
+      IRTemp oldC = newTemp(Ity_I32);
+      assign(oldC, mk_armg_calculate_flag_c() );
+      assign(
+         *newC,
+         IRExpr_ITE(
+            binop(Iop_CmpEQ32, mkexpr(amtT), mkU32(0)),
+            mkexpr(oldC),
+            IRExpr_ITE(
+               binop(Iop_CmpLE32U, mkexpr(amtT), mkU32(32)),
+               binop(Iop_And32,
+                     binop(Iop_Shr32,
+                           mkexpr(rMt),
+                           unop(Iop_32to8,
+                                binop(Iop_And32,
+                                      binop(Iop_Sub32,
+                                            mkexpr(amtT),
+                                            mkU32(1)),
+                                      mkU32(31)
+                                )
+                           )
+                     ),
+                     mkU32(1)
+                     ),
+               binop(Iop_And32,
+                     binop(Iop_Shr32,
+                           mkexpr(rMt),
+                           mkU8(31)
+                     ),
+                     mkU32(1)
+               )
+            )
+         )
+      );
+   }
+   // (Rm >>s (amt <u 32 ? amt : 31))
+   assign(
+      *res,
+      binop(
+         Iop_Sar32,
+         mkexpr(rMt),
+         unop(
+            Iop_32to8,
+            IRExpr_ITE(
+               binop(Iop_CmpLT32U, mkexpr(amtT), mkU32(32)),
+               mkexpr(amtT),
+               mkU32(31)))));
+    DIS(buf, "r%u, ASR r%u", rM, rS);
+}
+
+
+static void compute_result_and_C_after_ROR_by_reg (
+               /*OUT*/HChar* buf,
+               IRTemp* res,
+               IRTemp* newC,
+               IRTemp rMt, IRTemp rSt,  /* operands */
+               UInt rM,    UInt rS      /* only for debug printing */
+            )
+{
+   // rotate right in range 0 .. 255
+   // amt = rS & 255
+   // shop =  Rm `ror` (amt & 31)
+   // shco =  amt == 0 ? oldC : Rm[(amt-1) & 31]
+   IRTemp amtT = newTemp(Ity_I32);
+   assign( amtT, binop(Iop_And32, mkexpr(rSt), mkU32(255)) );
+   IRTemp amt5T = newTemp(Ity_I32);
+   assign( amt5T, binop(Iop_And32, mkexpr(rSt), mkU32(31)) );
+   IRTemp oldC = newTemp(Ity_I32);
+   assign(oldC, mk_armg_calculate_flag_c() );
+   if (newC) {
+      assign(
+         *newC,
+         IRExpr_ITE(
+            binop(Iop_CmpNE32, mkexpr(amtT), mkU32(0)),
+            binop(Iop_And32,
+                  binop(Iop_Shr32,
+                        mkexpr(rMt), 
+                        unop(Iop_32to8,
+                             binop(Iop_And32,
+                                   binop(Iop_Sub32,
+                                         mkexpr(amtT), 
+                                         mkU32(1)
+                                   ),
+                                   mkU32(31)
+                             )
+                        )
+                  ),
+                  mkU32(1)
+            ),
+            mkexpr(oldC)
+         )
+      );
+   }
+   assign(
+      *res,
+      IRExpr_ITE(
+         binop(Iop_CmpNE32, mkexpr(amt5T), mkU32(0)),
+         binop(Iop_Or32,
+               binop(Iop_Shr32,
+                     mkexpr(rMt), 
+                     unop(Iop_32to8, mkexpr(amt5T))
+               ),
+               binop(Iop_Shl32,
+                     mkexpr(rMt),
+                     unop(Iop_32to8,
+                          binop(Iop_Sub32, mkU32(32), mkexpr(amt5T))
+                     )
+               )
+               ),
+         mkexpr(rMt)
+      )
+   );
+   DIS(buf, "r%u, ROR r#%u", rM, rS);
+}
+
+
+/* Generate an expression corresponding to the immediate-shift case of
+   a shifter operand.  This is used both for ARM and Thumb2.
+
+   Bind it to a temporary, and return that via *res.  If newC is
+   non-NULL, also compute a value for the shifter's carry out (in the
+   LSB of a word), bind it to a temporary, and return that via *shco.
+
+   Generates GETs from the guest state and is therefore not safe to
+   use once we start doing PUTs to it, for any given instruction.
+
+   'how' is encoded thusly:
+      00b LSL,  01b LSR,  10b ASR,  11b ROR
+   Most but not all ARM and Thumb integer insns use this encoding.
+   Be careful to ensure the right value is passed here.
+*/
+static void compute_result_and_C_after_shift_by_imm5 (
+               /*OUT*/HChar* buf,
+               /*OUT*/IRTemp* res,
+               /*OUT*/IRTemp* newC,
+               IRTemp  rMt,       /* reg to shift */
+               UInt    how,       /* what kind of shift */
+               UInt    shift_amt, /* shift amount (0..31) */
+               UInt    rM         /* only for debug printing */
+            )
+{
+   vassert(shift_amt < 32);
+   vassert(how < 4);
+
+   switch (how) {
+
+      case 0:
+         compute_result_and_C_after_LSL_by_imm5(
+            buf, res, newC, rMt, shift_amt, rM
+         );
+         break;
+
+      case 1:
+         compute_result_and_C_after_LSR_by_imm5(
+            buf, res, newC, rMt, shift_amt, rM
+         );
+         break;
+
+      case 2:
+         compute_result_and_C_after_ASR_by_imm5(
+            buf, res, newC, rMt, shift_amt, rM
+         );
+         break;
+
+      case 3:
+         if (shift_amt == 0) {
+            IRTemp oldcT = newTemp(Ity_I32);
+            // rotate right 1 bit through carry (?)
+            // RRX -- described at ARM ARM A5-17
+            // res  = (oldC << 31) | (Rm >>u 1)
+            // newC = Rm[0]
+            if (newC) {
+               assign( *newC,
+                       binop(Iop_And32, mkexpr(rMt), mkU32(1)));
+            }
+            assign( oldcT, mk_armg_calculate_flag_c() );
+            assign( *res, 
+                    binop(Iop_Or32,
+                          binop(Iop_Shl32, mkexpr(oldcT), mkU8(31)),
+                          binop(Iop_Shr32, mkexpr(rMt), mkU8(1))) );
+            DIS(buf, "r%u, RRX", rM);
+         } else {
+            // rotate right in range 1..31
+            // res  = Rm `ror` shift_amt
+            // newC = Rm[shift_amt - 1]
+            vassert(shift_amt >= 1 && shift_amt <= 31);
+            if (newC) {
+               assign( *newC,
+                       binop(Iop_And32,
+                             binop(Iop_Shr32, mkexpr(rMt), 
+                                              mkU8(shift_amt - 1)),
+                             mkU32(1)));
+            }
+            assign( *res,
+                    binop(Iop_Or32,
+                          binop(Iop_Shr32, mkexpr(rMt), mkU8(shift_amt)),
+                          binop(Iop_Shl32, mkexpr(rMt),
+                                           mkU8(32-shift_amt))));
+            DIS(buf, "r%u, ROR #%u", rM, shift_amt);
+         }
+         break;
+
+      default:
+         /*NOTREACHED*/
+         vassert(0);
+   }
+}
+
+
+/* Generate an expression corresponding to the register-shift case of
+   a shifter operand.  This is used both for ARM and Thumb2.
+
+   Bind it to a temporary, and return that via *res.  If newC is
+   non-NULL, also compute a value for the shifter's carry out (in the
+   LSB of a word), bind it to a temporary, and return that via *shco.
+
+   Generates GETs from the guest state and is therefore not safe to
+   use once we start doing PUTs to it, for any given instruction.
+
+   'how' is encoded thusly:
+      00b LSL,  01b LSR,  10b ASR,  11b ROR
+   Most but not all ARM and Thumb integer insns use this encoding.
+   Be careful to ensure the right value is passed here.
+*/
+static void compute_result_and_C_after_shift_by_reg (
+               /*OUT*/HChar*  buf,
+               /*OUT*/IRTemp* res,
+               /*OUT*/IRTemp* newC,
+               IRTemp  rMt,       /* reg to shift */
+               UInt    how,       /* what kind of shift */
+               IRTemp  rSt,       /* shift amount */
+               UInt    rM,        /* only for debug printing */
+               UInt    rS         /* only for debug printing */
+            )
+{
+   vassert(how < 4);
+   switch (how) {
+      case 0: { /* LSL */
+         compute_result_and_C_after_LSL_by_reg(
+            buf, res, newC, rMt, rSt, rM, rS
+         );
+         break;
+      }
+      case 1: { /* LSR */
+         compute_result_and_C_after_LSR_by_reg(
+            buf, res, newC, rMt, rSt, rM, rS
+         );
+         break;
+      }
+      case 2: { /* ASR */
+         compute_result_and_C_after_ASR_by_reg(
+            buf, res, newC, rMt, rSt, rM, rS
+         );
+         break;
+      }
+      case 3: { /* ROR */
+         compute_result_and_C_after_ROR_by_reg(
+             buf, res, newC, rMt, rSt, rM, rS
+         );
+         break;
+      }
+      default:
+         /*NOTREACHED*/
+         vassert(0);
+   }
+}
+
+
+/* Generate an expression corresponding to a shifter_operand, bind it
+   to a temporary, and return that via *shop.  If shco is non-NULL,
+   also compute a value for the shifter's carry out (in the LSB of a
+   word), bind it to a temporary, and return that via *shco.
+
+   If for some reason we can't come up with a shifter operand (missing
+   case?  not really a shifter operand?) return False.
+
+   Generates GETs from the guest state and is therefore not safe to
+   use once we start doing PUTs to it, for any given instruction.
+
+   For ARM insns only; not for Thumb.
+*/
+static Bool mk_shifter_operand ( UInt insn_25, UInt insn_11_0,
+                                 /*OUT*/IRTemp* shop,
+                                 /*OUT*/IRTemp* shco,
+                                 /*OUT*/HChar* buf )
+{
+   UInt insn_4 = (insn_11_0 >> 4) & 1;
+   UInt insn_7 = (insn_11_0 >> 7) & 1;
+   vassert(insn_25 <= 0x1);
+   vassert(insn_11_0 <= 0xFFF);
+
+   vassert(shop && *shop == IRTemp_INVALID);
+   *shop = newTemp(Ity_I32);
+
+   if (shco) {
+      vassert(*shco == IRTemp_INVALID);
+      *shco = newTemp(Ity_I32);
+   }
+
+   /* 32-bit immediate */
+
+   if (insn_25 == 1) {
+      /* immediate: (7:0) rotated right by 2 * (11:8) */
+      UInt imm = (insn_11_0 >> 0) & 0xFF;
+      UInt rot = 2 * ((insn_11_0 >> 8) & 0xF);
+      vassert(rot <= 30);
+      imm = ROR32(imm, rot);
+      if (shco) {
+         if (rot == 0) {
+            assign( *shco, mk_armg_calculate_flag_c() );
+         } else {
+            assign( *shco, mkU32( (imm >> 31) & 1 ) );
+         }
+      }
+      DIS(buf, "#0x%x", imm);
+      assign( *shop, mkU32(imm) );
+      return True;
+   }
+
+   /* Shift/rotate by immediate */
+
+   if (insn_25 == 0 && insn_4 == 0) {
+      /* Rm (3:0) shifted (6:5) by immediate (11:7) */
+      UInt shift_amt = (insn_11_0 >> 7) & 0x1F;
+      UInt rM        = (insn_11_0 >> 0) & 0xF;
+      UInt how       = (insn_11_0 >> 5) & 3;
+      /* how: 00 = Shl, 01 = Shr, 10 = Sar, 11 = Ror */
+      IRTemp rMt = newTemp(Ity_I32);
+      assign(rMt, getIRegA(rM));
+
+      vassert(shift_amt <= 31);
+
+      compute_result_and_C_after_shift_by_imm5(
+         buf, shop, shco, rMt, how, shift_amt, rM
+      );
+      return True;
+   }
+
+   /* Shift/rotate by register */
+   if (insn_25 == 0 && insn_4 == 1) {
+      /* Rm (3:0) shifted (6:5) by Rs (11:8) */
+      UInt rM  = (insn_11_0 >> 0) & 0xF;
+      UInt rS  = (insn_11_0 >> 8) & 0xF;
+      UInt how = (insn_11_0 >> 5) & 3;
+      /* how: 00 = Shl, 01 = Shr, 10 = Sar, 11 = Ror */
+      IRTemp rMt = newTemp(Ity_I32);
+      IRTemp rSt = newTemp(Ity_I32);
+
+      if (insn_7 == 1)
+         return False; /* not really a shifter operand */
+
+      assign(rMt, getIRegA(rM));
+      assign(rSt, getIRegA(rS));
+
+      compute_result_and_C_after_shift_by_reg(
+         buf, shop, shco, rMt, how, rSt, rM, rS
+      );
+      return True;
+   }
+
+   vex_printf("mk_shifter_operand(0x%x,0x%x)\n", insn_25, insn_11_0 );
+   return False;
+}
+
+
+/* ARM only */
+static 
+IRExpr* mk_EA_reg_plusminus_imm12 ( UInt rN, UInt bU, UInt imm12,
+                                    /*OUT*/HChar* buf )
+{
+   vassert(rN < 16);
+   vassert(bU < 2);
+   vassert(imm12 < 0x1000);
+   HChar opChar = bU == 1 ? '+' : '-';
+   DIS(buf, "[r%u, #%c%u]", rN, opChar, imm12);
+   return
+      binop( (bU == 1 ? Iop_Add32 : Iop_Sub32),
+             getIRegA(rN),
+             mkU32(imm12) );
+}
+
+
+/* ARM only.
+   NB: This is "DecodeImmShift" in newer versions of the the ARM ARM.
+*/
+static
+IRExpr* mk_EA_reg_plusminus_shifted_reg ( UInt rN, UInt bU, UInt rM,
+                                          UInt sh2, UInt imm5,
+                                          /*OUT*/HChar* buf )
+{
+   vassert(rN < 16);
+   vassert(bU < 2);
+   vassert(rM < 16);
+   vassert(sh2 < 4);
+   vassert(imm5 < 32);
+   HChar   opChar = bU == 1 ? '+' : '-';
+   IRExpr* index  = NULL;
+   switch (sh2) {
+      case 0: /* LSL */
+         /* imm5 can be in the range 0 .. 31 inclusive. */
+         index = binop(Iop_Shl32, getIRegA(rM), mkU8(imm5));
+         DIS(buf, "[r%u, %c r%u LSL #%u]", rN, opChar, rM, imm5); 
+         break;
+      case 1: /* LSR */
+         if (imm5 == 0) {
+            index = mkU32(0);
+            vassert(0); // ATC
+         } else {
+            index = binop(Iop_Shr32, getIRegA(rM), mkU8(imm5));
+         }
+         DIS(buf, "[r%u, %cr%u, LSR #%u]",
+                  rN, opChar, rM, imm5 == 0 ? 32 : imm5); 
+         break;
+      case 2: /* ASR */
+         /* Doesn't this just mean that the behaviour with imm5 == 0
+            is the same as if it had been 31 ? */
+         if (imm5 == 0) {
+            index = binop(Iop_Sar32, getIRegA(rM), mkU8(31));
+            vassert(0); // ATC
+         } else {
+            index = binop(Iop_Sar32, getIRegA(rM), mkU8(imm5));
+         }
+         DIS(buf, "[r%u, %cr%u, ASR #%u]",
+                  rN, opChar, rM, imm5 == 0 ? 32 : imm5); 
+         break;
+      case 3: /* ROR or RRX */
+         if (imm5 == 0) {
+            IRTemp rmT    = newTemp(Ity_I32);
+            IRTemp cflagT = newTemp(Ity_I32);
+            assign(rmT, getIRegA(rM));
+            assign(cflagT, mk_armg_calculate_flag_c());
+            index = binop(Iop_Or32, 
+                          binop(Iop_Shl32, mkexpr(cflagT), mkU8(31)),
+                          binop(Iop_Shr32, mkexpr(rmT), mkU8(1)));
+            DIS(buf, "[r%u, %cr%u, RRX]", rN, opChar, rM);
+         } else {
+            IRTemp rmT = newTemp(Ity_I32);
+            assign(rmT, getIRegA(rM));
+            vassert(imm5 >= 1 && imm5 <= 31);
+            index = binop(Iop_Or32, 
+                          binop(Iop_Shl32, mkexpr(rmT), mkU8(32-imm5)),
+                          binop(Iop_Shr32, mkexpr(rmT), mkU8(imm5)));
+            DIS(buf, "[r%u, %cr%u, ROR #%u]", rN, opChar, rM, imm5); 
+         }
+         break;
+      default:
+         vassert(0);
+   }
+   vassert(index);
+   return binop(bU == 1 ? Iop_Add32 : Iop_Sub32,
+                getIRegA(rN), index);
+}
+
+
+/* ARM only */
+static 
+IRExpr* mk_EA_reg_plusminus_imm8 ( UInt rN, UInt bU, UInt imm8,
+                                   /*OUT*/HChar* buf )
+{
+   vassert(rN < 16);
+   vassert(bU < 2);
+   vassert(imm8 < 0x100);
+   HChar opChar = bU == 1 ? '+' : '-';
+   DIS(buf, "[r%u, #%c%u]", rN, opChar, imm8);
+   return
+      binop( (bU == 1 ? Iop_Add32 : Iop_Sub32),
+             getIRegA(rN),
+             mkU32(imm8) );
+}
+
+
+/* ARM only */
+static
+IRExpr* mk_EA_reg_plusminus_reg ( UInt rN, UInt bU, UInt rM,
+                                  /*OUT*/HChar* buf )
+{
+   vassert(rN < 16);
+   vassert(bU < 2);
+   vassert(rM < 16);
+   HChar   opChar = bU == 1 ? '+' : '-';
+   IRExpr* index  = getIRegA(rM);
+   DIS(buf, "[r%u, %c r%u]", rN, opChar, rM); 
+   return binop(bU == 1 ? Iop_Add32 : Iop_Sub32,
+                getIRegA(rN), index);
+}
+
+
+/* irRes :: Ity_I32 holds a floating point comparison result encoded
+   as an IRCmpF64Result.  Generate code to convert it to an
+   ARM-encoded (N,Z,C,V) group in the lowest 4 bits of an I32 value.
+   Assign a new temp to hold that value, and return the temp. */
+static
+IRTemp mk_convert_IRCmpF64Result_to_NZCV ( IRTemp irRes )
+{
+   IRTemp ix       = newTemp(Ity_I32);
+   IRTemp termL    = newTemp(Ity_I32);
+   IRTemp termR    = newTemp(Ity_I32);
+   IRTemp nzcv     = newTemp(Ity_I32);
+
+   /* This is where the fun starts.  We have to convert 'irRes' from
+      an IR-convention return result (IRCmpF64Result) to an
+      ARM-encoded (N,Z,C,V) group.  The final result is in the bottom
+      4 bits of 'nzcv'. */
+   /* Map compare result from IR to ARM(nzcv) */
+   /*
+      FP cmp result | IR   | ARM(nzcv)
+      --------------------------------
+      UN              0x45   0011
+      LT              0x01   1000
+      GT              0x00   0010
+      EQ              0x40   0110
+   */
+   /* Now since you're probably wondering WTF ..
+
+      ix fishes the useful bits out of the IR value, bits 6 and 0, and
+      places them side by side, giving a number which is 0, 1, 2 or 3.
+
+      termL is a sequence cooked up by GNU superopt.  It converts ix
+         into an almost correct value NZCV value (incredibly), except
+         for the case of UN, where it produces 0100 instead of the
+         required 0011.
+
+      termR is therefore a correction term, also computed from ix.  It
+         is 1 in the UN case and 0 for LT, GT and UN.  Hence, to get
+         the final correct value, we subtract termR from termL.
+
+      Don't take my word for it.  There's a test program at the bottom
+      of this file, to try this out with.
+   */
+   assign(
+      ix,
+      binop(Iop_Or32,
+            binop(Iop_And32,
+                  binop(Iop_Shr32, mkexpr(irRes), mkU8(5)),
+                  mkU32(3)),
+            binop(Iop_And32, mkexpr(irRes), mkU32(1))));
+
+   assign(
+      termL,
+      binop(Iop_Add32,
+            binop(Iop_Shr32,
+                  binop(Iop_Sub32,
+                        binop(Iop_Shl32,
+                              binop(Iop_Xor32, mkexpr(ix), mkU32(1)),
+                              mkU8(30)),
+                        mkU32(1)),
+                  mkU8(29)),
+            mkU32(1)));
+
+   assign(
+      termR,
+      binop(Iop_And32,
+            binop(Iop_And32,
+                  mkexpr(ix),
+                  binop(Iop_Shr32, mkexpr(ix), mkU8(1))),
+            mkU32(1)));
+
+   assign(nzcv, binop(Iop_Sub32, mkexpr(termL), mkexpr(termR)));
+   return nzcv;
+}
+
+
+/* Thumb32 only.  This is "ThumbExpandImm" in the ARM ARM.  If
+   updatesC is non-NULL, a boolean is written to it indicating whether
+   or not the C flag is updated, as per ARM ARM "ThumbExpandImm_C".
+*/
+static UInt thumbExpandImm ( Bool* updatesC,
+                             UInt imm1, UInt imm3, UInt imm8 )
+{
+   vassert(imm1 < (1<<1));
+   vassert(imm3 < (1<<3));
+   vassert(imm8 < (1<<8));
+   UInt i_imm3_a = (imm1 << 4) | (imm3 << 1) | ((imm8 >> 7) & 1);
+   UInt abcdefgh = imm8;
+   UInt lbcdefgh = imm8 | 0x80;
+   if (updatesC) {
+      *updatesC = i_imm3_a >= 8;
+   }
+   switch (i_imm3_a) {
+      case 0: case 1:
+         return abcdefgh;
+      case 2: case 3:
+         return (abcdefgh << 16) | abcdefgh;
+      case 4: case 5:
+         return (abcdefgh << 24) | (abcdefgh << 8);
+      case 6: case 7:
+         return (abcdefgh << 24) | (abcdefgh << 16)
+                | (abcdefgh << 8) | abcdefgh;
+      case 8 ... 31:
+         return lbcdefgh << (32 - i_imm3_a);
+      default:
+         break;
+   }
+   /*NOTREACHED*/vassert(0);
+}
+
+
+/* Version of thumbExpandImm where we simply feed it the
+   instruction halfwords (the lowest addressed one is I0). */
+static UInt thumbExpandImm_from_I0_I1 ( Bool* updatesC,
+                                        UShort i0s, UShort i1s )
+{
+   UInt i0    = (UInt)i0s;
+   UInt i1    = (UInt)i1s;
+   UInt imm1  = SLICE_UInt(i0,10,10);
+   UInt imm3  = SLICE_UInt(i1,14,12);
+   UInt imm8  = SLICE_UInt(i1,7,0);
+   return thumbExpandImm(updatesC, imm1, imm3, imm8);
+}
+
+
+/* Thumb16 only.  Given the firstcond and mask fields from an IT
+   instruction, compute the 32-bit ITSTATE value implied, as described
+   in libvex_guest_arm.h.  This is not the ARM ARM representation.
+   Also produce the t/e chars for the 2nd, 3rd, 4th insns, for
+   disassembly printing.  Returns False if firstcond or mask
+   denote something invalid.
+
+   The number and conditions for the instructions to be
+   conditionalised depend on firstcond and mask:
+
+   mask      cond 1    cond 2      cond 3      cond 4
+
+   1000      fc[3:0]
+   x100      fc[3:0]   fc[3:1]:x
+   xy10      fc[3:0]   fc[3:1]:x   fc[3:1]:y
+   xyz1      fc[3:0]   fc[3:1]:x   fc[3:1]:y   fc[3:1]:z
+
+   The condition fields are assembled in *itstate backwards (cond 4 at
+   the top, cond 1 at the bottom).  Conditions are << 4'd and then
+   ^0xE'd, and those fields that correspond to instructions in the IT
+   block are tagged with a 1 bit.
+*/
+static Bool compute_ITSTATE ( /*OUT*/UInt*  itstate,
+                              /*OUT*/HChar* ch1,
+                              /*OUT*/HChar* ch2,
+                              /*OUT*/HChar* ch3,
+                              UInt firstcond, UInt mask )
+{
+   vassert(firstcond <= 0xF);
+   vassert(mask <= 0xF);
+   *itstate = 0;
+   *ch1 = *ch2 = *ch3 = '.';
+   if (mask == 0)
+      return False; /* the logic below actually ensures this anyway,
+                       but clearer to make it explicit. */
+   if (firstcond == 0xF)
+      return False; /* NV is not allowed */
+   if (firstcond == 0xE && popcount32(mask) != 1) 
+      return False; /* if firstcond is AL then all the rest must be too */
+
+   UInt m3 = (mask >> 3) & 1;
+   UInt m2 = (mask >> 2) & 1;
+   UInt m1 = (mask >> 1) & 1;
+   UInt m0 = (mask >> 0) & 1;
+
+   UInt fc = (firstcond << 4) | 1/*in-IT-block*/;
+   UInt ni = (0xE/*AL*/ << 4) | 0/*not-in-IT-block*/;
+
+   if (m3 == 1 && (m2|m1|m0) == 0) {
+      *itstate = (ni << 24) | (ni << 16) | (ni << 8) | fc;
+      *itstate ^= 0xE0E0E0E0;
+      return True;
+   }
+
+   if (m2 == 1 && (m1|m0) == 0) {
+      *itstate = (ni << 24) | (ni << 16) | (setbit32(fc, 4, m3) << 8) | fc;
+      *itstate ^= 0xE0E0E0E0;
+      *ch1 = m3 == (firstcond & 1) ? 't' : 'e';
+      return True;
+   }
+
+   if (m1 == 1 && m0 == 0) {
+      *itstate = (ni << 24)
+                 | (setbit32(fc, 4, m2) << 16)
+                 | (setbit32(fc, 4, m3) << 8) | fc;
+      *itstate ^= 0xE0E0E0E0;
+      *ch1 = m3 == (firstcond & 1) ? 't' : 'e';
+      *ch2 = m2 == (firstcond & 1) ? 't' : 'e';
+      return True;
+   }
+
+   if (m0 == 1) {
+      *itstate = (setbit32(fc, 4, m1) << 24)
+                 | (setbit32(fc, 4, m2) << 16)
+                 | (setbit32(fc, 4, m3) << 8) | fc;
+      *itstate ^= 0xE0E0E0E0;
+      *ch1 = m3 == (firstcond & 1) ? 't' : 'e';
+      *ch2 = m2 == (firstcond & 1) ? 't' : 'e';
+      *ch3 = m1 == (firstcond & 1) ? 't' : 'e';
+      return True;
+   }
+
+   return False;
+}
+
+
+/* Generate IR to do 32-bit bit reversal, a la Hacker's Delight
+   Chapter 7 Section 1. */
+static IRTemp gen_BITREV ( IRTemp x0 )
+{
+   IRTemp x1 = newTemp(Ity_I32);
+   IRTemp x2 = newTemp(Ity_I32);
+   IRTemp x3 = newTemp(Ity_I32);
+   IRTemp x4 = newTemp(Ity_I32);
+   IRTemp x5 = newTemp(Ity_I32);
+   UInt   c1 = 0x55555555;
+   UInt   c2 = 0x33333333;
+   UInt   c3 = 0x0F0F0F0F;
+   UInt   c4 = 0x00FF00FF;
+   UInt   c5 = 0x0000FFFF;
+   assign(x1,
+          binop(Iop_Or32,
+                binop(Iop_Shl32,
+                      binop(Iop_And32, mkexpr(x0), mkU32(c1)),
+                      mkU8(1)),
+                binop(Iop_Shr32,
+                      binop(Iop_And32, mkexpr(x0), mkU32(~c1)),
+                      mkU8(1))
+   ));
+   assign(x2,
+          binop(Iop_Or32,
+                binop(Iop_Shl32,
+                      binop(Iop_And32, mkexpr(x1), mkU32(c2)),
+                      mkU8(2)),
+                binop(Iop_Shr32,
+                      binop(Iop_And32, mkexpr(x1), mkU32(~c2)),
+                      mkU8(2))
+   ));
+   assign(x3,
+          binop(Iop_Or32,
+                binop(Iop_Shl32,
+                      binop(Iop_And32, mkexpr(x2), mkU32(c3)),
+                      mkU8(4)),
+                binop(Iop_Shr32,
+                      binop(Iop_And32, mkexpr(x2), mkU32(~c3)),
+                      mkU8(4))
+   ));
+   assign(x4,
+          binop(Iop_Or32,
+                binop(Iop_Shl32,
+                      binop(Iop_And32, mkexpr(x3), mkU32(c4)),
+                      mkU8(8)),
+                binop(Iop_Shr32,
+                      binop(Iop_And32, mkexpr(x3), mkU32(~c4)),
+                      mkU8(8))
+   ));
+   assign(x5,
+          binop(Iop_Or32,
+                binop(Iop_Shl32,
+                      binop(Iop_And32, mkexpr(x4), mkU32(c5)),
+                      mkU8(16)),
+                binop(Iop_Shr32,
+                      binop(Iop_And32, mkexpr(x4), mkU32(~c5)),
+                      mkU8(16))
+   ));
+   return x5;
+}
+
+
+/* Generate IR to do rearrange bytes 3:2:1:0 in a word in to the order
+   0:1:2:3 (aka byte-swap). */
+static IRTemp gen_REV ( IRTemp arg )
+{
+   IRTemp res = newTemp(Ity_I32);
+   assign(res, 
+          binop(Iop_Or32,
+                binop(Iop_Shl32, mkexpr(arg), mkU8(24)),
+          binop(Iop_Or32,
+                binop(Iop_And32, binop(Iop_Shl32, mkexpr(arg), mkU8(8)), 
+                                 mkU32(0x00FF0000)),
+          binop(Iop_Or32,
+                binop(Iop_And32, binop(Iop_Shr32, mkexpr(arg), mkU8(8)),
+                                       mkU32(0x0000FF00)),
+                binop(Iop_And32, binop(Iop_Shr32, mkexpr(arg), mkU8(24)),
+                                       mkU32(0x000000FF) )
+   ))));
+   return res;
+}
+
+
+/* Generate IR to do rearrange bytes 3:2:1:0 in a word in to the order
+   2:3:0:1 (swap within lo and hi halves). */
+static IRTemp gen_REV16 ( IRTemp arg )
+{
+   IRTemp res = newTemp(Ity_I32);
+   assign(res,
+          binop(Iop_Or32,
+                binop(Iop_And32,
+                      binop(Iop_Shl32, mkexpr(arg), mkU8(8)),
+                      mkU32(0xFF00FF00)),
+                binop(Iop_And32,
+                      binop(Iop_Shr32, mkexpr(arg), mkU8(8)),
+                      mkU32(0x00FF00FF))));
+   return res;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Advanced SIMD (NEON) instructions                    ---*/
+/*------------------------------------------------------------*/
+
+/*------------------------------------------------------------*/
+/*--- NEON data processing                                 ---*/
+/*------------------------------------------------------------*/
+
+/* For all NEON DP ops, we use the normal scheme to handle conditional
+   writes to registers -- pass in condT and hand that on to the
+   put*Reg functions.  In ARM mode condT is always IRTemp_INVALID
+   since NEON is unconditional for ARM.  In Thumb mode condT is
+   derived from the ITSTATE shift register in the normal way. */
+
+static
+UInt get_neon_d_regno(UInt theInstr)
+{
+   UInt x = ((theInstr >> 18) & 0x10) | ((theInstr >> 12) & 0xF);
+   if (theInstr & 0x40) {
+      if (x & 1) {
+         x = x + 0x100;
+      } else {
+         x = x >> 1;
+      }
+   }
+   return x;
+}
+
+static
+UInt get_neon_n_regno(UInt theInstr)
+{
+   UInt x = ((theInstr >> 3) & 0x10) | ((theInstr >> 16) & 0xF);
+   if (theInstr & 0x40) {
+      if (x & 1) {
+         x = x + 0x100;
+      } else {
+         x = x >> 1;
+      }
+   }
+   return x;
+}
+
+static
+UInt get_neon_m_regno(UInt theInstr)
+{
+   UInt x = ((theInstr >> 1) & 0x10) | (theInstr & 0xF);
+   if (theInstr & 0x40) {
+      if (x & 1) {
+         x = x + 0x100;
+      } else {
+         x = x >> 1;
+      }
+   }
+   return x;
+}
+
+static
+Bool dis_neon_vext ( UInt theInstr, IRTemp condT )
+{
+   UInt dreg = get_neon_d_regno(theInstr);
+   UInt mreg = get_neon_m_regno(theInstr);
+   UInt nreg = get_neon_n_regno(theInstr);
+   UInt imm4 = (theInstr >> 8) & 0xf;
+   UInt Q = (theInstr >> 6) & 1;
+   HChar reg_t = Q ? 'q' : 'd';
+
+   if (Q) {
+      putQReg(dreg, triop(Iop_SliceV128, /*hiV128*/getQReg(mreg),
+                          /*loV128*/getQReg(nreg), mkU8(imm4)), condT);
+   } else {
+      putDRegI64(dreg, triop(Iop_Slice64, /*hiI64*/getDRegI64(mreg),
+                             /*loI64*/getDRegI64(nreg), mkU8(imm4)), condT);
+   }
+   DIP("vext.8 %c%d, %c%d, %c%d, #%d\n", reg_t, dreg, reg_t, nreg,
+                                         reg_t, mreg, imm4);
+   return True;
+}
+
+/* Generate specific vector FP binary ops, possibly with a fake
+   rounding mode as required by the primop. */
+static
+IRExpr* binop_w_fake_RM ( IROp op, IRExpr* argL, IRExpr* argR )
+{
+   switch (op) {
+      case Iop_Add32Fx4:
+      case Iop_Sub32Fx4:
+      case Iop_Mul32Fx4:
+         return triop(op, get_FAKE_roundingmode(), argL, argR );
+      case Iop_Add32x4: case Iop_Add16x8:
+      case Iop_Sub32x4: case Iop_Sub16x8:
+      case Iop_Mul32x4: case Iop_Mul16x8:
+      case Iop_Mul32x2: case Iop_Mul16x4:
+      case Iop_Add32Fx2:
+      case Iop_Sub32Fx2:
+      case Iop_Mul32Fx2:
+      case Iop_PwAdd32Fx2:
+         return binop(op, argL, argR);
+      default:
+        ppIROp(op);
+        vassert(0);
+   }
+}
+
+/* VTBL, VTBX */
+static
+Bool dis_neon_vtb ( UInt theInstr, IRTemp condT )
+{
+   UInt op = (theInstr >> 6) & 1;
+   UInt dreg = get_neon_d_regno(theInstr & ~(1 << 6));
+   UInt nreg = get_neon_n_regno(theInstr & ~(1 << 6));
+   UInt mreg = get_neon_m_regno(theInstr & ~(1 << 6));
+   UInt len = (theInstr >> 8) & 3;
+   Int i;
+   IROp cmp;
+   ULong imm;
+   IRTemp arg_l;
+   IRTemp old_mask, new_mask, cur_mask;
+   IRTemp old_res, new_res;
+   IRTemp old_arg, new_arg;
+
+   if (dreg >= 0x100 || mreg >= 0x100 || nreg >= 0x100)
+      return False;
+   if (nreg + len > 31)
+      return False;
+
+   cmp = Iop_CmpGT8Ux8;
+
+   old_mask = newTemp(Ity_I64);
+   old_res = newTemp(Ity_I64);
+   old_arg = newTemp(Ity_I64);
+   assign(old_mask, mkU64(0));
+   assign(old_res, mkU64(0));
+   assign(old_arg, getDRegI64(mreg));
+   imm = 8;
+   imm = (imm <<  8) | imm;
+   imm = (imm << 16) | imm;
+   imm = (imm << 32) | imm;
+
+   for (i = 0; i <= len; i++) {
+      arg_l = newTemp(Ity_I64);
+      new_mask = newTemp(Ity_I64);
+      cur_mask = newTemp(Ity_I64);
+      new_res = newTemp(Ity_I64);
+      new_arg = newTemp(Ity_I64);
+      assign(arg_l, getDRegI64(nreg+i));
+      assign(new_arg, binop(Iop_Sub8x8, mkexpr(old_arg), mkU64(imm)));
+      assign(cur_mask, binop(cmp, mkU64(imm), mkexpr(old_arg)));
+      assign(new_mask, binop(Iop_Or64, mkexpr(old_mask), mkexpr(cur_mask)));
+      assign(new_res, binop(Iop_Or64,
+                            mkexpr(old_res),
+                            binop(Iop_And64,
+                                  binop(Iop_Perm8x8,
+                                        mkexpr(arg_l),
+                                        binop(Iop_And64,
+                                              mkexpr(old_arg),
+                                              mkexpr(cur_mask))),
+                                  mkexpr(cur_mask))));
+
+      old_arg = new_arg;
+      old_mask = new_mask;
+      old_res = new_res;
+   }
+   if (op) {
+      new_res = newTemp(Ity_I64);
+      assign(new_res, binop(Iop_Or64,
+                            binop(Iop_And64,
+                                  getDRegI64(dreg),
+                                  unop(Iop_Not64, mkexpr(old_mask))),
+                            mkexpr(old_res)));
+      old_res = new_res;
+   }
+
+   putDRegI64(dreg, mkexpr(old_res), condT);
+   DIP("vtb%c.8 d%u, {", op ? 'x' : 'l', dreg);
+   if (len > 0) {
+      DIP("d%u-d%u", nreg, nreg + len);
+   } else {
+      DIP("d%u", nreg);
+   }
+   DIP("}, d%u\n", mreg);
+   return True;
+}
+
+/* VDUP (scalar)  */
+static
+Bool dis_neon_vdup ( UInt theInstr, IRTemp condT )
+{
+   UInt Q = (theInstr >> 6) & 1;
+   UInt dreg = ((theInstr >> 18) & 0x10) | ((theInstr >> 12) & 0xF);
+   UInt mreg = ((theInstr >> 1) & 0x10) | (theInstr & 0xF);
+   UInt imm4 = (theInstr >> 16) & 0xF;
+   UInt index;
+   UInt size;
+   IRTemp arg_m;
+   IRTemp res;
+   IROp op, op2;
+
+   if ((imm4 == 0) || (imm4 == 8))
+      return False;
+   if ((Q == 1) && ((dreg & 1) == 1))
+      return False;
+   if (Q)
+      dreg >>= 1;
+   arg_m = newTemp(Ity_I64);
+   assign(arg_m, getDRegI64(mreg));
+   if (Q)
+      res = newTemp(Ity_V128);
+   else
+      res = newTemp(Ity_I64);
+   if ((imm4 & 1) == 1) {
+      op = Q ? Iop_Dup8x16 : Iop_Dup8x8;
+      op2 = Iop_GetElem8x8;
+      index = imm4 >> 1;
+      size = 8;
+   } else if ((imm4 & 3) == 2) {
+      op = Q ? Iop_Dup16x8 : Iop_Dup16x4;
+      op2 = Iop_GetElem16x4;
+      index = imm4 >> 2;
+      size = 16;
+   } else if ((imm4 & 7) == 4) {
+      op = Q ? Iop_Dup32x4 : Iop_Dup32x2;
+      op2 = Iop_GetElem32x2;
+      index = imm4 >> 3;
+      size = 32;
+   } else {
+      return False; // can this ever happen?
+   }
+   assign(res, unop(op, binop(op2, mkexpr(arg_m), mkU8(index))));
+   if (Q) {
+      putQReg(dreg, mkexpr(res), condT);
+   } else {
+      putDRegI64(dreg, mkexpr(res), condT);
+   }
+   DIP("vdup.%d %c%d, d%d[%d]\n", size, Q ? 'q' : 'd', dreg, mreg, index);
+   return True;
+}
+
+/* A7.4.1 Three registers of the same length */
+static
+Bool dis_neon_data_3same ( UInt theInstr, IRTemp condT )
+{
+   UInt Q = (theInstr >> 6) & 1;
+   UInt dreg = get_neon_d_regno(theInstr);
+   UInt nreg = get_neon_n_regno(theInstr);
+   UInt mreg = get_neon_m_regno(theInstr);
+   UInt A = (theInstr >> 8) & 0xF;
+   UInt B = (theInstr >> 4) & 1;
+   UInt C = (theInstr >> 20) & 0x3;
+   UInt U = (theInstr >> 24) & 1;
+   UInt size = C;
+
+   IRTemp arg_n;
+   IRTemp arg_m;
+   IRTemp res;
+
+   if (Q) {
+      arg_n = newTemp(Ity_V128);
+      arg_m = newTemp(Ity_V128);
+      res = newTemp(Ity_V128);
+      assign(arg_n, getQReg(nreg));
+      assign(arg_m, getQReg(mreg));
+   } else {
+      arg_n = newTemp(Ity_I64);
+      arg_m = newTemp(Ity_I64);
+      res = newTemp(Ity_I64);
+      assign(arg_n, getDRegI64(nreg));
+      assign(arg_m, getDRegI64(mreg));
+   }
+
+   switch(A) {
+      case 0:
+         if (B == 0) {
+            /* VHADD */
+            ULong imm = 0;
+            IRExpr *imm_val;
+            IROp addOp;
+            IROp andOp;
+            IROp shOp;
+            HChar regType = Q ? 'q' : 'd';
+
+            if (size == 3)
+               return False;
+            switch(size) {
+               case 0: imm = 0x101010101010101LL; break;
+               case 1: imm = 0x1000100010001LL; break;
+               case 2: imm = 0x100000001LL; break;
+               default: vassert(0);
+            }
+            if (Q) {
+               imm_val = binop(Iop_64HLtoV128, mkU64(imm), mkU64(imm));
+               andOp = Iop_AndV128;
+            } else {
+               imm_val = mkU64(imm);
+               andOp = Iop_And64;
+            }
+            if (U) {
+               switch(size) {
+                  case 0:
+                     addOp = Q ? Iop_Add8x16 : Iop_Add8x8;
+                     shOp = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     break;
+                  case 1:
+                     addOp = Q ? Iop_Add16x8 : Iop_Add16x4;
+                     shOp = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     break;
+                  case 2:
+                     addOp = Q ? Iop_Add32x4 : Iop_Add32x2;
+                     shOp = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch(size) {
+                  case 0:
+                     addOp = Q ? Iop_Add8x16 : Iop_Add8x8;
+                     shOp = Q ? Iop_SarN8x16 : Iop_SarN8x8;
+                     break;
+                  case 1:
+                     addOp = Q ? Iop_Add16x8 : Iop_Add16x4;
+                     shOp = Q ? Iop_SarN16x8 : Iop_SarN16x4;
+                     break;
+                  case 2:
+                     addOp = Q ? Iop_Add32x4 : Iop_Add32x2;
+                     shOp = Q ? Iop_SarN32x4 : Iop_SarN32x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            assign(res,
+                   binop(addOp,
+                         binop(addOp,
+                               binop(shOp, mkexpr(arg_m), mkU8(1)),
+                               binop(shOp, mkexpr(arg_n), mkU8(1))),
+                         binop(shOp,
+                               binop(addOp,
+                                     binop(andOp, mkexpr(arg_m), imm_val),
+                                     binop(andOp, mkexpr(arg_n), imm_val)),
+                               mkU8(1))));
+            DIP("vhadd.%c%d %c%d, %c%d, %c%d\n",
+                U ? 'u' : 's', 8 << size, regType,
+                dreg, regType, nreg, regType, mreg);
+         } else {
+            /* VQADD */
+            IROp op, op2;
+            IRTemp tmp;
+            HChar reg_t = Q ? 'q' : 'd';
+            if (Q) {
+               switch (size) {
+                  case 0:
+                     op = U ? Iop_QAdd8Ux16 : Iop_QAdd8Sx16;
+                     op2 = Iop_Add8x16;
+                     break;
+                  case 1:
+                     op = U ? Iop_QAdd16Ux8 : Iop_QAdd16Sx8;
+                     op2 = Iop_Add16x8;
+                     break;
+                  case 2:
+                     op = U ? Iop_QAdd32Ux4 : Iop_QAdd32Sx4;
+                     op2 = Iop_Add32x4;
+                     break;
+                  case 3:
+                     op = U ? Iop_QAdd64Ux2 : Iop_QAdd64Sx2;
+                     op2 = Iop_Add64x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op = U ? Iop_QAdd8Ux8 : Iop_QAdd8Sx8;
+                     op2 = Iop_Add8x8;
+                     break;
+                  case 1:
+                     op = U ? Iop_QAdd16Ux4 : Iop_QAdd16Sx4;
+                     op2 = Iop_Add16x4;
+                     break;
+                  case 2:
+                     op = U ? Iop_QAdd32Ux2 : Iop_QAdd32Sx2;
+                     op2 = Iop_Add32x2;
+                     break;
+                  case 3:
+                     op = U ? Iop_QAdd64Ux1 : Iop_QAdd64Sx1;
+                     op2 = Iop_Add64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q) {
+               tmp = newTemp(Ity_V128);
+            } else {
+               tmp = newTemp(Ity_I64);
+            }
+            assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+            assign(tmp, binop(op2, mkexpr(arg_n), mkexpr(arg_m)));
+            setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
+            DIP("vqadd.%c%d %c%d, %c%d, %c%d\n",
+                U ? 'u' : 's',
+                8 << size, reg_t, dreg, reg_t, nreg, reg_t, mreg);
+         }
+         break;
+      case 1:
+         if (B == 0) {
+            /* VRHADD */
+            /* VRHADD C, A, B ::=
+                 C = (A >> 1) + (B >> 1) + (((A & 1) + (B & 1) + 1) >> 1) */
+            IROp shift_op, add_op;
+            IRTemp cc;
+            ULong one = 1;
+            HChar reg_t = Q ? 'q' : 'd';
+            switch (size) {
+               case 0: one = (one <<  8) | one; /* fall through */
+               case 1: one = (one << 16) | one; /* fall through */
+               case 2: one = (one << 32) | one; break;
+               case 3: return False;
+               default: vassert(0);
+            }
+            if (Q) {
+               switch (size) {
+                  case 0:
+                     shift_op = U ? Iop_ShrN8x16 : Iop_SarN8x16;
+                     add_op = Iop_Add8x16;
+                     break;
+                  case 1:
+                     shift_op = U ? Iop_ShrN16x8 : Iop_SarN16x8;
+                     add_op = Iop_Add16x8;
+                     break;
+                  case 2:
+                     shift_op = U ? Iop_ShrN32x4 : Iop_SarN32x4;
+                     add_op = Iop_Add32x4;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     shift_op = U ? Iop_ShrN8x8 : Iop_SarN8x8;
+                     add_op = Iop_Add8x8;
+                     break;
+                  case 1:
+                     shift_op = U ? Iop_ShrN16x4 : Iop_SarN16x4;
+                     add_op = Iop_Add16x4;
+                     break;
+                  case 2:
+                     shift_op = U ? Iop_ShrN32x2 : Iop_SarN32x2;
+                     add_op = Iop_Add32x2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q) {
+               cc = newTemp(Ity_V128);
+               assign(cc, binop(shift_op,
+                                binop(add_op,
+                                      binop(add_op,
+                                            binop(Iop_AndV128,
+                                                  mkexpr(arg_n),
+                                                  binop(Iop_64HLtoV128,
+                                                        mkU64(one),
+                                                        mkU64(one))),
+                                            binop(Iop_AndV128,
+                                                  mkexpr(arg_m),
+                                                  binop(Iop_64HLtoV128,
+                                                        mkU64(one),
+                                                        mkU64(one)))),
+                                      binop(Iop_64HLtoV128,
+                                            mkU64(one),
+                                            mkU64(one))),
+                                mkU8(1)));
+               assign(res, binop(add_op,
+                                 binop(add_op,
+                                       binop(shift_op,
+                                             mkexpr(arg_n),
+                                             mkU8(1)),
+                                       binop(shift_op,
+                                             mkexpr(arg_m),
+                                             mkU8(1))),
+                                 mkexpr(cc)));
+            } else {
+               cc = newTemp(Ity_I64);
+               assign(cc, binop(shift_op,
+                                binop(add_op,
+                                      binop(add_op,
+                                            binop(Iop_And64,
+                                                  mkexpr(arg_n),
+                                                  mkU64(one)),
+                                            binop(Iop_And64,
+                                                  mkexpr(arg_m),
+                                                  mkU64(one))),
+                                      mkU64(one)),
+                                mkU8(1)));
+               assign(res, binop(add_op,
+                                 binop(add_op,
+                                       binop(shift_op,
+                                             mkexpr(arg_n),
+                                             mkU8(1)),
+                                       binop(shift_op,
+                                             mkexpr(arg_m),
+                                             mkU8(1))),
+                                 mkexpr(cc)));
+            }
+            DIP("vrhadd.%c%d %c%d, %c%d, %c%d\n",
+                U ? 'u' : 's',
+                8 << size, reg_t, dreg, reg_t, nreg, reg_t, mreg);
+         } else {
+            if (U == 0)  {
+               switch(C) {
+                  case 0: {
+                     /* VAND  */
+                     HChar reg_t = Q ? 'q' : 'd';
+                     if (Q) {
+                        assign(res, binop(Iop_AndV128, mkexpr(arg_n), 
+                                                       mkexpr(arg_m)));
+                     } else {
+                        assign(res, binop(Iop_And64, mkexpr(arg_n),
+                                                     mkexpr(arg_m)));
+                     }
+                     DIP("vand %c%d, %c%d, %c%d\n",
+                         reg_t, dreg, reg_t, nreg, reg_t, mreg);
+                     break;
+                  }
+                  case 1: {
+                     /* VBIC  */
+                     HChar reg_t = Q ? 'q' : 'd';
+                     if (Q) {
+                        assign(res, binop(Iop_AndV128,mkexpr(arg_n),
+                               unop(Iop_NotV128, mkexpr(arg_m))));
+                     } else {
+                        assign(res, binop(Iop_And64, mkexpr(arg_n),
+                               unop(Iop_Not64, mkexpr(arg_m))));
+                     }
+                     DIP("vbic %c%d, %c%d, %c%d\n",
+                         reg_t, dreg, reg_t, nreg, reg_t, mreg);
+                     break;
+                  }
+                  case 2:
+                     if ( nreg != mreg) {
+                        /* VORR  */
+                        HChar reg_t = Q ? 'q' : 'd';
+                        if (Q) {
+                           assign(res, binop(Iop_OrV128, mkexpr(arg_n),
+                                                         mkexpr(arg_m)));
+                        } else {
+                           assign(res, binop(Iop_Or64, mkexpr(arg_n),
+                                                       mkexpr(arg_m)));
+                        }
+                        DIP("vorr %c%d, %c%d, %c%d\n",
+                            reg_t, dreg, reg_t, nreg, reg_t, mreg);
+                     } else {
+                        /* VMOV  */
+                        HChar reg_t = Q ? 'q' : 'd';
+                        assign(res, mkexpr(arg_m));
+                        DIP("vmov %c%d, %c%d\n", reg_t, dreg, reg_t, mreg);
+                     }
+                     break;
+                  case 3:{
+                     /* VORN  */
+                     HChar reg_t = Q ? 'q' : 'd';
+                     if (Q) {
+                        assign(res, binop(Iop_OrV128,mkexpr(arg_n),
+                               unop(Iop_NotV128, mkexpr(arg_m))));
+                     } else {
+                        assign(res, binop(Iop_Or64, mkexpr(arg_n),
+                               unop(Iop_Not64, mkexpr(arg_m))));
+                     }
+                     DIP("vorn %c%d, %c%d, %c%d\n",
+                         reg_t, dreg, reg_t, nreg, reg_t, mreg);
+                     break;
+                  }
+               }
+            } else {
+               switch(C) {
+                  case 0:
+                     /* VEOR (XOR)  */
+                     if (Q) {
+                        assign(res, binop(Iop_XorV128, mkexpr(arg_n),
+                                                       mkexpr(arg_m)));
+                     } else {
+                        assign(res, binop(Iop_Xor64, mkexpr(arg_n),
+                                                     mkexpr(arg_m)));
+                     }
+                     DIP("veor %c%u, %c%u, %c%u\n", Q ? 'q' : 'd', dreg,
+                           Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+                     break;
+                  case 1:
+                     /* VBSL  */
+                     if (Q) {
+                        IRTemp reg_d = newTemp(Ity_V128);
+                        assign(reg_d, getQReg(dreg));
+                        assign(res,
+                               binop(Iop_OrV128,
+                                     binop(Iop_AndV128, mkexpr(arg_n),
+                                                        mkexpr(reg_d)),
+                                     binop(Iop_AndV128,
+                                           mkexpr(arg_m),
+                                           unop(Iop_NotV128,
+                                                 mkexpr(reg_d)) ) ) );
+                     } else {
+                        IRTemp reg_d = newTemp(Ity_I64);
+                        assign(reg_d, getDRegI64(dreg));
+                        assign(res,
+                               binop(Iop_Or64,
+                                     binop(Iop_And64, mkexpr(arg_n),
+                                                      mkexpr(reg_d)),
+                                     binop(Iop_And64,
+                                           mkexpr(arg_m),
+                                           unop(Iop_Not64, mkexpr(reg_d)))));
+                     }
+                     DIP("vbsl %c%u, %c%u, %c%u\n",
+                         Q ? 'q' : 'd', dreg,
+                         Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+                     break;
+                  case 2:
+                     /* VBIT  */
+                     if (Q) {
+                        IRTemp reg_d = newTemp(Ity_V128);
+                        assign(reg_d, getQReg(dreg));
+                        assign(res,
+                               binop(Iop_OrV128,
+                                     binop(Iop_AndV128, mkexpr(arg_n), 
+                                                        mkexpr(arg_m)),
+                                     binop(Iop_AndV128,
+                                           mkexpr(reg_d),
+                                           unop(Iop_NotV128, mkexpr(arg_m)))));
+                     } else {
+                        IRTemp reg_d = newTemp(Ity_I64);
+                        assign(reg_d, getDRegI64(dreg));
+                        assign(res,
+                               binop(Iop_Or64,
+                                     binop(Iop_And64, mkexpr(arg_n),
+                                                      mkexpr(arg_m)),
+                                     binop(Iop_And64,
+                                           mkexpr(reg_d),
+                                           unop(Iop_Not64, mkexpr(arg_m)))));
+                     }
+                     DIP("vbit %c%u, %c%u, %c%u\n",
+                         Q ? 'q' : 'd', dreg,
+                         Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+                     break;
+                  case 3:
+                     /* VBIF  */
+                     if (Q) {
+                        IRTemp reg_d = newTemp(Ity_V128);
+                        assign(reg_d, getQReg(dreg));
+                        assign(res,
+                               binop(Iop_OrV128,
+                                     binop(Iop_AndV128, mkexpr(reg_d),
+                                                        mkexpr(arg_m)),
+                                     binop(Iop_AndV128,
+                                           mkexpr(arg_n),
+                                           unop(Iop_NotV128, mkexpr(arg_m)))));
+                     } else {
+                        IRTemp reg_d = newTemp(Ity_I64);
+                        assign(reg_d, getDRegI64(dreg));
+                        assign(res,
+                               binop(Iop_Or64,
+                                     binop(Iop_And64, mkexpr(reg_d),
+                                                      mkexpr(arg_m)),
+                                     binop(Iop_And64,
+                                           mkexpr(arg_n),
+                                           unop(Iop_Not64, mkexpr(arg_m)))));
+                     }
+                     DIP("vbif %c%u, %c%u, %c%u\n",
+                         Q ? 'q' : 'd', dreg,
+                         Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+                     break;
+               }
+            }
+         }
+         break;
+      case 2:
+         if (B == 0) {
+            /* VHSUB */
+            /* (A >> 1) - (B >> 1) - (NOT (A) & B & 1)   */
+            ULong imm = 0;
+            IRExpr *imm_val;
+            IROp subOp;
+            IROp notOp;
+            IROp andOp;
+            IROp shOp;
+            if (size == 3)
+               return False;
+            switch(size) {
+               case 0: imm = 0x101010101010101LL; break;
+               case 1: imm = 0x1000100010001LL; break;
+               case 2: imm = 0x100000001LL; break;
+               default: vassert(0);
+            }
+            if (Q) {
+               imm_val = binop(Iop_64HLtoV128, mkU64(imm), mkU64(imm));
+               andOp = Iop_AndV128;
+               notOp = Iop_NotV128;
+            } else {
+               imm_val = mkU64(imm);
+               andOp = Iop_And64;
+               notOp = Iop_Not64;
+            }
+            if (U) {
+               switch(size) {
+                  case 0:
+                     subOp = Q ? Iop_Sub8x16 : Iop_Sub8x8;
+                     shOp = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     break;
+                  case 1:
+                     subOp = Q ? Iop_Sub16x8 : Iop_Sub16x4;
+                     shOp = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     break;
+                  case 2:
+                     subOp = Q ? Iop_Sub32x4 : Iop_Sub32x2;
+                     shOp = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch(size) {
+                  case 0:
+                     subOp = Q ? Iop_Sub8x16 : Iop_Sub8x8;
+                     shOp = Q ? Iop_SarN8x16 : Iop_SarN8x8;
+                     break;
+                  case 1:
+                     subOp = Q ? Iop_Sub16x8 : Iop_Sub16x4;
+                     shOp = Q ? Iop_SarN16x8 : Iop_SarN16x4;
+                     break;
+                  case 2:
+                     subOp = Q ? Iop_Sub32x4 : Iop_Sub32x2;
+                     shOp = Q ? Iop_SarN32x4 : Iop_SarN32x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            assign(res,
+                   binop(subOp,
+                         binop(subOp,
+                               binop(shOp, mkexpr(arg_n), mkU8(1)),
+                               binop(shOp, mkexpr(arg_m), mkU8(1))),
+                         binop(andOp,
+                               binop(andOp,
+                                     unop(notOp, mkexpr(arg_n)),
+                                     mkexpr(arg_m)),
+                               imm_val)));
+            DIP("vhsub.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         } else {
+            /* VQSUB */
+            IROp op, op2;
+            IRTemp tmp;
+            if (Q) {
+               switch (size) {
+                  case 0:
+                     op = U ? Iop_QSub8Ux16 : Iop_QSub8Sx16;
+                     op2 = Iop_Sub8x16;
+                     break;
+                  case 1:
+                     op = U ? Iop_QSub16Ux8 : Iop_QSub16Sx8;
+                     op2 = Iop_Sub16x8;
+                     break;
+                  case 2:
+                     op = U ? Iop_QSub32Ux4 : Iop_QSub32Sx4;
+                     op2 = Iop_Sub32x4;
+                     break;
+                  case 3:
+                     op = U ? Iop_QSub64Ux2 : Iop_QSub64Sx2;
+                     op2 = Iop_Sub64x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op = U ? Iop_QSub8Ux8 : Iop_QSub8Sx8;
+                     op2 = Iop_Sub8x8;
+                     break;
+                  case 1:
+                     op = U ? Iop_QSub16Ux4 : Iop_QSub16Sx4;
+                     op2 = Iop_Sub16x4;
+                     break;
+                  case 2:
+                     op = U ? Iop_QSub32Ux2 : Iop_QSub32Sx2;
+                     op2 = Iop_Sub32x2;
+                     break;
+                  case 3:
+                     op = U ? Iop_QSub64Ux1 : Iop_QSub64Sx1;
+                     op2 = Iop_Sub64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q)
+               tmp = newTemp(Ity_V128);
+            else
+               tmp = newTemp(Ity_I64);
+            assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+            assign(tmp, binop(op2, mkexpr(arg_n), mkexpr(arg_m)));
+            setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
+            DIP("vqsub.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         }
+         break;
+      case 3: {
+            IROp op;
+            if (Q) {
+               switch (size) {
+                  case 0: op = U ? Iop_CmpGT8Ux16 : Iop_CmpGT8Sx16; break;
+                  case 1: op = U ? Iop_CmpGT16Ux8 : Iop_CmpGT16Sx8; break;
+                  case 2: op = U ? Iop_CmpGT32Ux4 : Iop_CmpGT32Sx4; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0: op = U ? Iop_CmpGT8Ux8 : Iop_CmpGT8Sx8; break;
+                  case 1: op = U ? Iop_CmpGT16Ux4 : Iop_CmpGT16Sx4; break;
+                  case 2: op = U ? Iop_CmpGT32Ux2: Iop_CmpGT32Sx2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+            }
+            if (B == 0) {
+               /* VCGT  */
+               assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+               DIP("vcgt.%c%u %c%u, %c%u, %c%u\n",
+                   U ? 'u' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                   mreg);
+            } else {
+               /* VCGE  */
+               /* VCGE res, argn, argm
+                    is equal to
+                  VCGT tmp, argm, argn
+                  VNOT res, tmp */
+               assign(res,
+                      unop(Q ? Iop_NotV128 : Iop_Not64,
+                           binop(op, mkexpr(arg_m), mkexpr(arg_n))));
+               DIP("vcge.%c%u %c%u, %c%u, %c%u\n",
+                   U ? 'u' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                   mreg);
+            }
+         }
+         break;
+      case 4:
+         if (B == 0) {
+            /* VSHL */
+            IROp op = Iop_INVALID, sub_op = Iop_INVALID;
+            IRTemp tmp = IRTemp_INVALID;
+            if (U) {
+               switch (size) {
+                  case 0: op = Q ? Iop_Shl8x16 : Iop_Shl8x8; break;
+                  case 1: op = Q ? Iop_Shl16x8 : Iop_Shl16x4; break;
+                  case 2: op = Q ? Iop_Shl32x4 : Iop_Shl32x2; break;
+                  case 3: op = Q ? Iop_Shl64x2 : Iop_Shl64; break;
+                  default: vassert(0);
+               }
+            } else {
+               tmp = newTemp(Q ? Ity_V128 : Ity_I64);
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Sar8x16 : Iop_Sar8x8;
+                     sub_op = Q ? Iop_Sub8x16 : Iop_Sub8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_Sar16x8 : Iop_Sar16x4;
+                     sub_op = Q ? Iop_Sub16x8 : Iop_Sub16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_Sar32x4 : Iop_Sar32x2;
+                     sub_op = Q ? Iop_Sub32x4 : Iop_Sub32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_Sar64x2 : Iop_Sar64;
+                     sub_op = Q ? Iop_Sub64x2 : Iop_Sub64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (U) {
+               if (!Q && (size == 3))
+                  assign(res, binop(op, mkexpr(arg_m),
+                                        unop(Iop_64to8, mkexpr(arg_n))));
+               else
+                  assign(res, binop(op, mkexpr(arg_m), mkexpr(arg_n)));
+            } else {
+               if (Q)
+                  assign(tmp, binop(sub_op,
+                                    binop(Iop_64HLtoV128, mkU64(0), mkU64(0)),
+                                    mkexpr(arg_n)));
+               else
+                  assign(tmp, binop(sub_op, mkU64(0), mkexpr(arg_n)));
+               if (!Q && (size == 3))
+                  assign(res, binop(op, mkexpr(arg_m),
+                                        unop(Iop_64to8, mkexpr(tmp))));
+               else
+                  assign(res, binop(op, mkexpr(arg_m), mkexpr(tmp)));
+            }
+            DIP("vshl.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
+                nreg);
+         } else {
+            /* VQSHL */
+            IROp op, op_rev, op_shrn, op_shln, cmp_neq, cmp_gt;
+            IRTemp tmp, shval, mask, old_shval;
+            UInt i;
+            ULong esize;
+            cmp_neq = Q ? Iop_CmpNEZ8x16 : Iop_CmpNEZ8x8;
+            cmp_gt = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8;
+            if (U) {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_QShl8x16 : Iop_QShl8x8;
+                     op_rev = Q ? Iop_Shr8x16 : Iop_Shr8x8;
+                     op_shrn = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     op_shln = Q ? Iop_ShlN8x16 : Iop_ShlN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_QShl16x8 : Iop_QShl16x4;
+                     op_rev = Q ? Iop_Shr16x8 : Iop_Shr16x4;
+                     op_shrn = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     op_shln = Q ? Iop_ShlN16x8 : Iop_ShlN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QShl32x4 : Iop_QShl32x2;
+                     op_rev = Q ? Iop_Shr32x4 : Iop_Shr32x2;
+                     op_shrn = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     op_shln = Q ? Iop_ShlN32x4 : Iop_ShlN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_QShl64x2 : Iop_QShl64x1;
+                     op_rev = Q ? Iop_Shr64x2 : Iop_Shr64;
+                     op_shrn = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     op_shln = Q ? Iop_ShlN64x2 : Iop_Shl64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_QSal8x16 : Iop_QSal8x8;
+                     op_rev = Q ? Iop_Sar8x16 : Iop_Sar8x8;
+                     op_shrn = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     op_shln = Q ? Iop_ShlN8x16 : Iop_ShlN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_QSal16x8 : Iop_QSal16x4;
+                     op_rev = Q ? Iop_Sar16x8 : Iop_Sar16x4;
+                     op_shrn = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     op_shln = Q ? Iop_ShlN16x8 : Iop_ShlN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QSal32x4 : Iop_QSal32x2;
+                     op_rev = Q ? Iop_Sar32x4 : Iop_Sar32x2;
+                     op_shrn = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     op_shln = Q ? Iop_ShlN32x4 : Iop_ShlN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_QSal64x2 : Iop_QSal64x1;
+                     op_rev = Q ? Iop_Sar64x2 : Iop_Sar64;
+                     op_shrn = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     op_shln = Q ? Iop_ShlN64x2 : Iop_Shl64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q) {
+               tmp = newTemp(Ity_V128);
+               shval = newTemp(Ity_V128);
+               mask = newTemp(Ity_V128);
+            } else {
+               tmp = newTemp(Ity_I64);
+               shval = newTemp(Ity_I64);
+               mask = newTemp(Ity_I64);
+            }
+            assign(res, binop(op, mkexpr(arg_m), mkexpr(arg_n)));
+            /* Only least significant byte from second argument is used.
+               Copy this byte to the whole vector element. */
+            assign(shval, binop(op_shrn,
+                                binop(op_shln,
+                                       mkexpr(arg_n),
+                                       mkU8((8 << size) - 8)),
+                                mkU8((8 << size) - 8)));
+            for(i = 0; i < size; i++) {
+               old_shval = shval;
+               shval = newTemp(Q ? Ity_V128 : Ity_I64);
+               assign(shval, binop(Q ? Iop_OrV128 : Iop_Or64,
+                                   mkexpr(old_shval),
+                                   binop(op_shln,
+                                         mkexpr(old_shval),
+                                         mkU8(8 << i))));
+            }
+            /* If shift is greater or equal to the element size and
+               element is non-zero, then QC flag should be set. */
+            esize = (8 << size) - 1;
+            esize = (esize <<  8) | esize;
+            esize = (esize << 16) | esize;
+            esize = (esize << 32) | esize;
+            setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                             binop(cmp_gt, mkexpr(shval),
+                                           Q ? mkU128(esize) : mkU64(esize)),
+                             unop(cmp_neq, mkexpr(arg_m))),
+                       Q ? mkU128(0) : mkU64(0),
+                       Q, condT);
+            /* Othervise QC flag should be set if shift value is positive and
+               result beign rightshifted the same value is not equal to left
+               argument. */
+            assign(mask, binop(cmp_gt, mkexpr(shval),
+                                       Q ? mkU128(0) : mkU64(0)));
+            if (!Q && size == 3)
+               assign(tmp, binop(op_rev, mkexpr(res),
+                                         unop(Iop_64to8, mkexpr(arg_n))));
+            else
+               assign(tmp, binop(op_rev, mkexpr(res), mkexpr(arg_n)));
+            setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                             mkexpr(tmp), mkexpr(mask)),
+                       binop(Q ? Iop_AndV128 : Iop_And64,
+                             mkexpr(arg_m), mkexpr(mask)),
+                       Q, condT);
+            DIP("vqshl.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
+                nreg);
+         }
+         break;
+      case 5:
+         if (B == 0) {
+            /* VRSHL */
+            IROp op, op_shrn, op_shln, cmp_gt, op_add;
+            IRTemp shval, old_shval, imm_val, round;
+            UInt i;
+            ULong imm;
+            cmp_gt = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8;
+            imm = 1L;
+            switch (size) {
+               case 0: imm = (imm <<  8) | imm; /* fall through */
+               case 1: imm = (imm << 16) | imm; /* fall through */
+               case 2: imm = (imm << 32) | imm; /* fall through */
+               case 3: break;
+               default: vassert(0);
+            }
+            imm_val = newTemp(Q ? Ity_V128 : Ity_I64);
+            round = newTemp(Q ? Ity_V128 : Ity_I64);
+            assign(imm_val, Q ? mkU128(imm) : mkU64(imm));
+            if (U) {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Shl8x16 : Iop_Shl8x8;
+                     op_add = Q ? Iop_Add8x16 : Iop_Add8x8;
+                     op_shrn = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     op_shln = Q ? Iop_ShlN8x16 : Iop_ShlN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_Shl16x8 : Iop_Shl16x4;
+                     op_add = Q ? Iop_Add16x8 : Iop_Add16x4;
+                     op_shrn = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     op_shln = Q ? Iop_ShlN16x8 : Iop_ShlN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_Shl32x4 : Iop_Shl32x2;
+                     op_add = Q ? Iop_Add32x4 : Iop_Add32x2;
+                     op_shrn = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     op_shln = Q ? Iop_ShlN32x4 : Iop_ShlN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_Shl64x2 : Iop_Shl64;
+                     op_add = Q ? Iop_Add64x2 : Iop_Add64;
+                     op_shrn = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     op_shln = Q ? Iop_ShlN64x2 : Iop_Shl64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Sal8x16 : Iop_Sal8x8;
+                     op_add = Q ? Iop_Add8x16 : Iop_Add8x8;
+                     op_shrn = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     op_shln = Q ? Iop_ShlN8x16 : Iop_ShlN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_Sal16x8 : Iop_Sal16x4;
+                     op_add = Q ? Iop_Add16x8 : Iop_Add16x4;
+                     op_shrn = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     op_shln = Q ? Iop_ShlN16x8 : Iop_ShlN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_Sal32x4 : Iop_Sal32x2;
+                     op_add = Q ? Iop_Add32x4 : Iop_Add32x2;
+                     op_shrn = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     op_shln = Q ? Iop_ShlN32x4 : Iop_ShlN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_Sal64x2 : Iop_Sal64x1;
+                     op_add = Q ? Iop_Add64x2 : Iop_Add64;
+                     op_shrn = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     op_shln = Q ? Iop_ShlN64x2 : Iop_Shl64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q) {
+               shval = newTemp(Ity_V128);
+            } else {
+               shval = newTemp(Ity_I64);
+            }
+            /* Only least significant byte from second argument is used.
+               Copy this byte to the whole vector element. */
+            assign(shval, binop(op_shrn,
+                                binop(op_shln,
+                                       mkexpr(arg_n),
+                                       mkU8((8 << size) - 8)),
+                                mkU8((8 << size) - 8)));
+            for (i = 0; i < size; i++) {
+               old_shval = shval;
+               shval = newTemp(Q ? Ity_V128 : Ity_I64);
+               assign(shval, binop(Q ? Iop_OrV128 : Iop_Or64,
+                                   mkexpr(old_shval),
+                                   binop(op_shln,
+                                         mkexpr(old_shval),
+                                         mkU8(8 << i))));
+            }
+            /* Compute the result */
+            if (!Q && size == 3 && U) {
+               assign(round, binop(Q ? Iop_AndV128 : Iop_And64,
+                                   binop(op,
+                                         mkexpr(arg_m),
+                                         unop(Iop_64to8,
+                                              binop(op_add,
+                                                    mkexpr(arg_n),
+                                                    mkexpr(imm_val)))),
+                                   binop(Q ? Iop_AndV128 : Iop_And64,
+                                         mkexpr(imm_val),
+                                         binop(cmp_gt,
+                                               Q ? mkU128(0) : mkU64(0),
+                                               mkexpr(arg_n)))));
+               assign(res, binop(op_add,
+                                 binop(op,
+                                       mkexpr(arg_m),
+                                       unop(Iop_64to8, mkexpr(arg_n))),
+                                 mkexpr(round)));
+            } else {
+               assign(round, binop(Q ? Iop_AndV128 : Iop_And64,
+                                   binop(op,
+                                         mkexpr(arg_m),
+                                         binop(op_add,
+                                               mkexpr(arg_n),
+                                               mkexpr(imm_val))),
+                                   binop(Q ? Iop_AndV128 : Iop_And64,
+                                         mkexpr(imm_val),
+                                         binop(cmp_gt,
+                                               Q ? mkU128(0) : mkU64(0),
+                                               mkexpr(arg_n)))));
+               assign(res, binop(op_add,
+                                 binop(op, mkexpr(arg_m), mkexpr(arg_n)),
+                                 mkexpr(round)));
+            }
+            DIP("vrshl.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
+                nreg);
+         } else {
+            /* VQRSHL */
+            IROp op, op_rev, op_shrn, op_shln, cmp_neq, cmp_gt, op_add;
+            IRTemp tmp, shval, mask, old_shval, imm_val, round;
+            UInt i;
+            ULong esize, imm;
+            cmp_neq = Q ? Iop_CmpNEZ8x16 : Iop_CmpNEZ8x8;
+            cmp_gt = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8;
+            imm = 1L;
+            switch (size) {
+               case 0: imm = (imm <<  8) | imm; /* fall through */
+               case 1: imm = (imm << 16) | imm; /* fall through */
+               case 2: imm = (imm << 32) | imm; /* fall through */
+               case 3: break;
+               default: vassert(0);
+            }
+            imm_val = newTemp(Q ? Ity_V128 : Ity_I64);
+            round = newTemp(Q ? Ity_V128 : Ity_I64);
+            assign(imm_val, Q ? mkU128(imm) : mkU64(imm));
+            if (U) {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_QShl8x16 : Iop_QShl8x8;
+                     op_add = Q ? Iop_Add8x16 : Iop_Add8x8;
+                     op_rev = Q ? Iop_Shr8x16 : Iop_Shr8x8;
+                     op_shrn = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     op_shln = Q ? Iop_ShlN8x16 : Iop_ShlN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_QShl16x8 : Iop_QShl16x4;
+                     op_add = Q ? Iop_Add16x8 : Iop_Add16x4;
+                     op_rev = Q ? Iop_Shr16x8 : Iop_Shr16x4;
+                     op_shrn = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     op_shln = Q ? Iop_ShlN16x8 : Iop_ShlN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QShl32x4 : Iop_QShl32x2;
+                     op_add = Q ? Iop_Add32x4 : Iop_Add32x2;
+                     op_rev = Q ? Iop_Shr32x4 : Iop_Shr32x2;
+                     op_shrn = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     op_shln = Q ? Iop_ShlN32x4 : Iop_ShlN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_QShl64x2 : Iop_QShl64x1;
+                     op_add = Q ? Iop_Add64x2 : Iop_Add64;
+                     op_rev = Q ? Iop_Shr64x2 : Iop_Shr64;
+                     op_shrn = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     op_shln = Q ? Iop_ShlN64x2 : Iop_Shl64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_QSal8x16 : Iop_QSal8x8;
+                     op_add = Q ? Iop_Add8x16 : Iop_Add8x8;
+                     op_rev = Q ? Iop_Sar8x16 : Iop_Sar8x8;
+                     op_shrn = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     op_shln = Q ? Iop_ShlN8x16 : Iop_ShlN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_QSal16x8 : Iop_QSal16x4;
+                     op_add = Q ? Iop_Add16x8 : Iop_Add16x4;
+                     op_rev = Q ? Iop_Sar16x8 : Iop_Sar16x4;
+                     op_shrn = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     op_shln = Q ? Iop_ShlN16x8 : Iop_ShlN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QSal32x4 : Iop_QSal32x2;
+                     op_add = Q ? Iop_Add32x4 : Iop_Add32x2;
+                     op_rev = Q ? Iop_Sar32x4 : Iop_Sar32x2;
+                     op_shrn = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     op_shln = Q ? Iop_ShlN32x4 : Iop_ShlN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_QSal64x2 : Iop_QSal64x1;
+                     op_add = Q ? Iop_Add64x2 : Iop_Add64;
+                     op_rev = Q ? Iop_Sar64x2 : Iop_Sar64;
+                     op_shrn = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     op_shln = Q ? Iop_ShlN64x2 : Iop_Shl64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q) {
+               tmp = newTemp(Ity_V128);
+               shval = newTemp(Ity_V128);
+               mask = newTemp(Ity_V128);
+            } else {
+               tmp = newTemp(Ity_I64);
+               shval = newTemp(Ity_I64);
+               mask = newTemp(Ity_I64);
+            }
+            /* Only least significant byte from second argument is used.
+               Copy this byte to the whole vector element. */
+            assign(shval, binop(op_shrn,
+                                binop(op_shln,
+                                       mkexpr(arg_n),
+                                       mkU8((8 << size) - 8)),
+                                mkU8((8 << size) - 8)));
+            for (i = 0; i < size; i++) {
+               old_shval = shval;
+               shval = newTemp(Q ? Ity_V128 : Ity_I64);
+               assign(shval, binop(Q ? Iop_OrV128 : Iop_Or64,
+                                   mkexpr(old_shval),
+                                   binop(op_shln,
+                                         mkexpr(old_shval),
+                                         mkU8(8 << i))));
+            }
+            /* Compute the result */
+            assign(round, binop(Q ? Iop_AndV128 : Iop_And64,
+                                binop(op,
+                                      mkexpr(arg_m),
+                                      binop(op_add,
+                                            mkexpr(arg_n),
+                                            mkexpr(imm_val))),
+                                binop(Q ? Iop_AndV128 : Iop_And64,
+                                      mkexpr(imm_val),
+                                      binop(cmp_gt,
+                                            Q ? mkU128(0) : mkU64(0),
+                                            mkexpr(arg_n)))));
+            assign(res, binop(op_add,
+                              binop(op, mkexpr(arg_m), mkexpr(arg_n)),
+                              mkexpr(round)));
+            /* If shift is greater or equal to the element size and element is
+               non-zero, then QC flag should be set. */
+            esize = (8 << size) - 1;
+            esize = (esize <<  8) | esize;
+            esize = (esize << 16) | esize;
+            esize = (esize << 32) | esize;
+            setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                             binop(cmp_gt, mkexpr(shval),
+                                           Q ? mkU128(esize) : mkU64(esize)),
+                             unop(cmp_neq, mkexpr(arg_m))),
+                       Q ? mkU128(0) : mkU64(0),
+                       Q, condT);
+            /* Othervise QC flag should be set if shift value is positive and
+               result beign rightshifted the same value is not equal to left
+               argument. */
+            assign(mask, binop(cmp_gt, mkexpr(shval),
+                               Q ? mkU128(0) : mkU64(0)));
+            if (!Q && size == 3)
+               assign(tmp, binop(op_rev, mkexpr(res),
+                                         unop(Iop_64to8, mkexpr(arg_n))));
+            else
+               assign(tmp, binop(op_rev, mkexpr(res), mkexpr(arg_n)));
+            setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                             mkexpr(tmp), mkexpr(mask)),
+                       binop(Q ? Iop_AndV128 : Iop_And64,
+                             mkexpr(arg_m), mkexpr(mask)),
+                       Q, condT);
+            DIP("vqrshl.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, Q ? 'q' : 'd',
+                nreg);
+         }
+         break;
+      case 6:
+         /* VMAX, VMIN  */
+         if (B == 0) {
+            /* VMAX */
+            IROp op;
+            if (U == 0) {
+               switch (size) {
+                  case 0: op = Q ? Iop_Max8Sx16 : Iop_Max8Sx8; break;
+                  case 1: op = Q ? Iop_Max16Sx8 : Iop_Max16Sx4; break;
+                  case 2: op = Q ? Iop_Max32Sx4 : Iop_Max32Sx2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0: op = Q ? Iop_Max8Ux16 : Iop_Max8Ux8; break;
+                  case 1: op = Q ? Iop_Max16Ux8 : Iop_Max16Ux4; break;
+                  case 2: op = Q ? Iop_Max32Ux4 : Iop_Max32Ux2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+            }
+            assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+            DIP("vmax.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         } else {
+            /* VMIN */
+            IROp op;
+            if (U == 0) {
+               switch (size) {
+                  case 0: op = Q ? Iop_Min8Sx16 : Iop_Min8Sx8; break;
+                  case 1: op = Q ? Iop_Min16Sx8 : Iop_Min16Sx4; break;
+                  case 2: op = Q ? Iop_Min32Sx4 : Iop_Min32Sx2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0: op = Q ? Iop_Min8Ux16 : Iop_Min8Ux8; break;
+                  case 1: op = Q ? Iop_Min16Ux8 : Iop_Min16Ux4; break;
+                  case 2: op = Q ? Iop_Min32Ux4 : Iop_Min32Ux2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+            }
+            assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+            DIP("vmin.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         }
+         break;
+      case 7:
+         if (B == 0) {
+            /* VABD */
+            IROp op_cmp, op_sub;
+            IRTemp cond;
+            if ((theInstr >> 23) & 1) {
+               vpanic("VABDL should not be in dis_neon_data_3same\n");
+            }
+            if (Q) {
+               switch (size) {
+                  case 0:
+                     op_cmp = U ? Iop_CmpGT8Ux16 : Iop_CmpGT8Sx16;
+                     op_sub = Iop_Sub8x16;
+                     break;
+                  case 1:
+                     op_cmp = U ? Iop_CmpGT16Ux8 : Iop_CmpGT16Sx8;
+                     op_sub = Iop_Sub16x8;
+                     break;
+                  case 2:
+                     op_cmp = U ? Iop_CmpGT32Ux4 : Iop_CmpGT32Sx4;
+                     op_sub = Iop_Sub32x4;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op_cmp = U ? Iop_CmpGT8Ux8 : Iop_CmpGT8Sx8;
+                     op_sub = Iop_Sub8x8;
+                     break;
+                  case 1:
+                     op_cmp = U ? Iop_CmpGT16Ux4 : Iop_CmpGT16Sx4;
+                     op_sub = Iop_Sub16x4;
+                     break;
+                  case 2:
+                     op_cmp = U ? Iop_CmpGT32Ux2 : Iop_CmpGT32Sx2;
+                     op_sub = Iop_Sub32x2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q) {
+               cond = newTemp(Ity_V128);
+            } else {
+               cond = newTemp(Ity_I64);
+            }
+            assign(cond, binop(op_cmp, mkexpr(arg_n), mkexpr(arg_m)));
+            assign(res, binop(Q ? Iop_OrV128 : Iop_Or64,
+                              binop(Q ? Iop_AndV128 : Iop_And64,
+                                    binop(op_sub, mkexpr(arg_n),
+                                                  mkexpr(arg_m)),
+                                    mkexpr(cond)),
+                              binop(Q ? Iop_AndV128 : Iop_And64,
+                                    binop(op_sub, mkexpr(arg_m),
+                                                  mkexpr(arg_n)),
+                                    unop(Q ? Iop_NotV128 : Iop_Not64,
+                                         mkexpr(cond)))));
+            DIP("vabd.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         } else {
+            /* VABA */
+            IROp op_cmp, op_sub, op_add;
+            IRTemp cond, acc, tmp;
+            if ((theInstr >> 23) & 1) {
+               vpanic("VABAL should not be in dis_neon_data_3same");
+            }
+            if (Q) {
+               switch (size) {
+                  case 0:
+                     op_cmp = U ? Iop_CmpGT8Ux16 : Iop_CmpGT8Sx16;
+                     op_sub = Iop_Sub8x16;
+                     op_add = Iop_Add8x16;
+                     break;
+                  case 1:
+                     op_cmp = U ? Iop_CmpGT16Ux8 : Iop_CmpGT16Sx8;
+                     op_sub = Iop_Sub16x8;
+                     op_add = Iop_Add16x8;
+                     break;
+                  case 2:
+                     op_cmp = U ? Iop_CmpGT32Ux4 : Iop_CmpGT32Sx4;
+                     op_sub = Iop_Sub32x4;
+                     op_add = Iop_Add32x4;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op_cmp = U ? Iop_CmpGT8Ux8 : Iop_CmpGT8Sx8;
+                     op_sub = Iop_Sub8x8;
+                     op_add = Iop_Add8x8;
+                     break;
+                  case 1:
+                     op_cmp = U ? Iop_CmpGT16Ux4 : Iop_CmpGT16Sx4;
+                     op_sub = Iop_Sub16x4;
+                     op_add = Iop_Add16x4;
+                     break;
+                  case 2:
+                     op_cmp = U ? Iop_CmpGT32Ux2 : Iop_CmpGT32Sx2;
+                     op_sub = Iop_Sub32x2;
+                     op_add = Iop_Add32x2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            }
+            if (Q) {
+               cond = newTemp(Ity_V128);
+               acc = newTemp(Ity_V128);
+               tmp = newTemp(Ity_V128);
+               assign(acc, getQReg(dreg));
+            } else {
+               cond = newTemp(Ity_I64);
+               acc = newTemp(Ity_I64);
+               tmp = newTemp(Ity_I64);
+               assign(acc, getDRegI64(dreg));
+            }
+            assign(cond, binop(op_cmp, mkexpr(arg_n), mkexpr(arg_m)));
+            assign(tmp, binop(Q ? Iop_OrV128 : Iop_Or64,
+                              binop(Q ? Iop_AndV128 : Iop_And64,
+                                    binop(op_sub, mkexpr(arg_n),
+                                                  mkexpr(arg_m)),
+                                    mkexpr(cond)),
+                              binop(Q ? Iop_AndV128 : Iop_And64,
+                                    binop(op_sub, mkexpr(arg_m),
+                                                  mkexpr(arg_n)),
+                                    unop(Q ? Iop_NotV128 : Iop_Not64,
+                                         mkexpr(cond)))));
+            assign(res, binop(op_add, mkexpr(acc), mkexpr(tmp)));
+            DIP("vaba.%c%u %c%u, %c%u, %c%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         }
+         break;
+      case 8:
+         if (B == 0) {
+            IROp op;
+            if (U == 0) {
+               /* VADD  */
+               switch (size) {
+                  case 0: op = Q ? Iop_Add8x16 : Iop_Add8x8; break;
+                  case 1: op = Q ? Iop_Add16x8 : Iop_Add16x4; break;
+                  case 2: op = Q ? Iop_Add32x4 : Iop_Add32x2; break;
+                  case 3: op = Q ? Iop_Add64x2 : Iop_Add64; break;
+                  default: vassert(0);
+               }
+               DIP("vadd.i%u %c%u, %c%u, %c%u\n",
+                   8 << size, Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            } else {
+               /* VSUB  */
+               switch (size) {
+                  case 0: op = Q ? Iop_Sub8x16 : Iop_Sub8x8; break;
+                  case 1: op = Q ? Iop_Sub16x8 : Iop_Sub16x4; break;
+                  case 2: op = Q ? Iop_Sub32x4 : Iop_Sub32x2; break;
+                  case 3: op = Q ? Iop_Sub64x2 : Iop_Sub64; break;
+                  default: vassert(0);
+               }
+               DIP("vsub.i%u %c%u, %c%u, %c%u\n",
+                   8 << size, Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            }
+            assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+         } else {
+            IROp op;
+            switch (size) {
+               case 0: op = Q ? Iop_CmpNEZ8x16 : Iop_CmpNEZ8x8; break;
+               case 1: op = Q ? Iop_CmpNEZ16x8 : Iop_CmpNEZ16x4; break;
+               case 2: op = Q ? Iop_CmpNEZ32x4 : Iop_CmpNEZ32x2; break;
+               case 3: op = Q ? Iop_CmpNEZ64x2 : Iop_CmpwNEZ64; break;
+               default: vassert(0);
+            }
+            if (U == 0) {
+               /* VTST  */
+               assign(res, unop(op, binop(Q ? Iop_AndV128 : Iop_And64,
+                                          mkexpr(arg_n),
+                                          mkexpr(arg_m))));
+               DIP("vtst.%u %c%u, %c%u, %c%u\n",
+                   8 << size, Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            } else {
+               /* VCEQ  */
+               assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
+                                unop(op,
+                                     binop(Q ? Iop_XorV128 : Iop_Xor64,
+                                           mkexpr(arg_n),
+                                           mkexpr(arg_m)))));
+               DIP("vceq.i%u %c%u, %c%u, %c%u\n",
+                   8 << size, Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            }
+         }
+         break;
+      case 9:
+         if (B == 0) {
+            /* VMLA, VMLS (integer) */
+            IROp op, op2;
+            UInt P = (theInstr >> 24) & 1;
+            if (P) {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Mul8x16 : Iop_Mul8x8;
+                     op2 = Q ? Iop_Sub8x16 : Iop_Sub8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_Mul16x8 : Iop_Mul16x4;
+                     op2 = Q ? Iop_Sub16x8 : Iop_Sub16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_Mul32x4 : Iop_Mul32x2;
+                     op2 = Q ? Iop_Sub32x4 : Iop_Sub32x2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Mul8x16 : Iop_Mul8x8;
+                     op2 = Q ? Iop_Add8x16 : Iop_Add8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_Mul16x8 : Iop_Mul16x4;
+                     op2 = Q ? Iop_Add16x8 : Iop_Add16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_Mul32x4 : Iop_Mul32x2;
+                     op2 = Q ? Iop_Add32x4 : Iop_Add32x2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            }
+            assign(res, binop(op2,
+                              Q ? getQReg(dreg) : getDRegI64(dreg),
+                              binop(op, mkexpr(arg_n), mkexpr(arg_m))));
+            DIP("vml%c.i%u %c%u, %c%u, %c%u\n",
+                P ? 's' : 'a', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         } else {
+            /* VMUL */
+            IROp op;
+            UInt P = (theInstr >> 24) & 1;
+            if (P) {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_PolynomialMul8x16 : Iop_PolynomialMul8x8;
+                     break;
+                  case 1: case 2: case 3: return False;
+                  default: vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0: op = Q ? Iop_Mul8x16 : Iop_Mul8x8; break;
+                  case 1: op = Q ? Iop_Mul16x8 : Iop_Mul16x4; break;
+                  case 2: op = Q ? Iop_Mul32x4 : Iop_Mul32x2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+            }
+            assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+            DIP("vmul.%c%u %c%u, %c%u, %c%u\n",
+                P ? 'p' : 'i', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd',
+                mreg);
+         }
+         break;
+      case 10: {
+         /* VPMAX, VPMIN  */
+         UInt P = (theInstr >> 4) & 1;
+         IROp op;
+         if (Q)
+            return False;
+         if (P) {
+            switch (size) {
+               case 0: op = U ? Iop_PwMin8Ux8  : Iop_PwMin8Sx8; break;
+               case 1: op = U ? Iop_PwMin16Ux4 : Iop_PwMin16Sx4; break;
+               case 2: op = U ? Iop_PwMin32Ux2 : Iop_PwMin32Sx2; break;
+               case 3: return False;
+               default: vassert(0);
+            }
+         } else {
+            switch (size) {
+               case 0: op = U ? Iop_PwMax8Ux8  : Iop_PwMax8Sx8; break;
+               case 1: op = U ? Iop_PwMax16Ux4 : Iop_PwMax16Sx4; break;
+               case 2: op = U ? Iop_PwMax32Ux2 : Iop_PwMax32Sx2; break;
+               case 3: return False;
+               default: vassert(0);
+            }
+         }
+         assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+         DIP("vp%s.%c%u %c%u, %c%u, %c%u\n",
+             P ? "min" : "max", U ? 'u' : 's',
+             8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg,
+             Q ? 'q' : 'd', mreg);
+         break;
+      }
+      case 11:
+         if (B == 0) {
+            if (U == 0) {
+               /* VQDMULH  */
+               IROp op ,op2;
+               ULong imm;
+               switch (size) {
+                  case 0: case 3:
+                     return False;
+                  case 1:
+                     op = Q ? Iop_QDMulHi16Sx8 : Iop_QDMulHi16Sx4;
+                     op2 = Q ? Iop_CmpEQ16x8 : Iop_CmpEQ16x4;
+                     imm = 1LL << 15;
+                     imm = (imm << 16) | imm;
+                     imm = (imm << 32) | imm;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QDMulHi32Sx4 : Iop_QDMulHi32Sx2;
+                     op2 = Q ? Iop_CmpEQ32x4 : Iop_CmpEQ32x2;
+                     imm = 1LL << 31;
+                     imm = (imm << 32) | imm;
+                     break;
+                  default:
+                     vassert(0);
+               }
+               assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+               setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                                binop(op2, mkexpr(arg_n),
+                                           Q ? mkU128(imm) : mkU64(imm)),
+                                binop(op2, mkexpr(arg_m),
+                                           Q ? mkU128(imm) : mkU64(imm))),
+                          Q ? mkU128(0) : mkU64(0),
+                          Q, condT);
+               DIP("vqdmulh.s%u %c%u, %c%u, %c%u\n",
+                   8 << size, Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            } else {
+               /* VQRDMULH */
+               IROp op ,op2;
+               ULong imm;
+               switch(size) {
+                  case 0: case 3:
+                     return False;
+                  case 1:
+                     imm = 1LL << 15;
+                     imm = (imm << 16) | imm;
+                     imm = (imm << 32) | imm;
+                     op = Q ? Iop_QRDMulHi16Sx8 : Iop_QRDMulHi16Sx4;
+                     op2 = Q ? Iop_CmpEQ16x8 : Iop_CmpEQ16x4;
+                     break;
+                  case 2:
+                     imm = 1LL << 31;
+                     imm = (imm << 32) | imm;
+                     op = Q ? Iop_QRDMulHi32Sx4 : Iop_QRDMulHi32Sx2;
+                     op2 = Q ? Iop_CmpEQ32x4 : Iop_CmpEQ32x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+               assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+               setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                                binop(op2, mkexpr(arg_n),
+                                           Q ? mkU128(imm) : mkU64(imm)),
+                                binop(op2, mkexpr(arg_m),
+                                           Q ? mkU128(imm) : mkU64(imm))),
+                          Q ? mkU128(0) : mkU64(0),
+                          Q, condT);
+               DIP("vqrdmulh.s%u %c%u, %c%u, %c%u\n",
+                   8 << size, Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            }
+         } else {
+            if (U == 0) {
+               /* VPADD */
+               IROp op;
+               if (Q)
+                  return False;
+               switch (size) {
+                  case 0: op = Q ? Iop_PwAdd8x16 : Iop_PwAdd8x8;  break;
+                  case 1: op = Q ? Iop_PwAdd16x8 : Iop_PwAdd16x4; break;
+                  case 2: op = Q ? Iop_PwAdd32x4 : Iop_PwAdd32x2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+               assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+               DIP("vpadd.i%d %c%u, %c%u, %c%u\n",
+                   8 << size, Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            }
+         }
+         break;
+      /* Starting from here these are FP SIMD cases */
+      case 13:
+         if (B == 0) {
+            IROp op;
+            if (U == 0) {
+               if ((C >> 1) == 0) {
+                  /* VADD  */
+                  op = Q ? Iop_Add32Fx4 : Iop_Add32Fx2 ;
+                  DIP("vadd.f32 %c%u, %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               } else {
+                  /* VSUB  */
+                  op = Q ? Iop_Sub32Fx4 : Iop_Sub32Fx2 ;
+                  DIP("vsub.f32 %c%u, %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               }
+            } else {
+               if ((C >> 1) == 0) {
+                  /* VPADD */
+                  if (Q)
+                     return False;
+                  op = Iop_PwAdd32Fx2;
+                  DIP("vpadd.f32 d%u, d%u, d%u\n", dreg, nreg, mreg);
+               } else {
+                  /* VABD  */
+                  if (Q) {
+                     assign(res, unop(Iop_Abs32Fx4,
+                                      triop(Iop_Sub32Fx4,
+                                            get_FAKE_roundingmode(),
+                                            mkexpr(arg_n),
+                                            mkexpr(arg_m))));
+                  } else {
+                     assign(res, unop(Iop_Abs32Fx2,
+                                      binop(Iop_Sub32Fx2,
+                                            mkexpr(arg_n),
+                                            mkexpr(arg_m))));
+                  }
+                  DIP("vabd.f32 %c%u, %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+                  break;
+               }
+            }
+            assign(res, binop_w_fake_RM(op, mkexpr(arg_n), mkexpr(arg_m)));
+         } else {
+            if (U == 0) {
+               /* VMLA, VMLS  */
+               IROp op, op2;
+               UInt P = (theInstr >> 21) & 1;
+               if (P) {
+                  switch (size & 1) {
+                     case 0:
+                        op = Q ? Iop_Mul32Fx4 : Iop_Mul32Fx2;
+                        op2 = Q ? Iop_Sub32Fx4 : Iop_Sub32Fx2;
+                        break;
+                     case 1: return False;
+                     default: vassert(0);
+                  }
+               } else {
+                  switch (size & 1) {
+                     case 0:
+                        op = Q ? Iop_Mul32Fx4 : Iop_Mul32Fx2;
+                        op2 = Q ? Iop_Add32Fx4 : Iop_Add32Fx2;
+                        break;
+                     case 1: return False;
+                     default: vassert(0);
+                  }
+               }
+               assign(res, binop_w_fake_RM(
+                              op2,
+                              Q ? getQReg(dreg) : getDRegI64(dreg),
+                              binop_w_fake_RM(op, mkexpr(arg_n),
+                                                  mkexpr(arg_m))));
+
+               DIP("vml%c.f32 %c%u, %c%u, %c%u\n",
+                   P ? 's' : 'a', Q ? 'q' : 'd',
+                   dreg, Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            } else {
+               /* VMUL  */
+               IROp op;
+               if ((C >> 1) != 0)
+                  return False;
+               op = Q ? Iop_Mul32Fx4 : Iop_Mul32Fx2 ;
+               assign(res, binop_w_fake_RM(op, mkexpr(arg_n), mkexpr(arg_m)));
+               DIP("vmul.f32 %c%u, %c%u, %c%u\n",
+                   Q ? 'q' : 'd', dreg,
+                   Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+            }
+         }
+         break;
+      case 14:
+         if (B == 0) {
+            if (U == 0) {
+               if ((C >> 1) == 0) {
+                  /* VCEQ  */
+                  IROp op;
+                  if ((theInstr >> 20) & 1)
+                     return False;
+                  op = Q ? Iop_CmpEQ32Fx4 : Iop_CmpEQ32Fx2;
+                  assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+                  DIP("vceq.f32 %c%u, %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               } else {
+                  return False;
+               }
+            } else {
+               if ((C >> 1) == 0) {
+                  /* VCGE  */
+                  IROp op;
+                  if ((theInstr >> 20) & 1)
+                     return False;
+                  op = Q ? Iop_CmpGE32Fx4 : Iop_CmpGE32Fx2;
+                  assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+                  DIP("vcge.f32 %c%u, %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               } else {
+                  /* VCGT  */
+                  IROp op;
+                  if ((theInstr >> 20) & 1)
+                     return False;
+                  op = Q ? Iop_CmpGT32Fx4 : Iop_CmpGT32Fx2;
+                  assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+                  DIP("vcgt.f32 %c%u, %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               }
+            }
+         } else {
+            if (U == 1) {
+               /* VACGE, VACGT */
+               UInt op_bit = (theInstr >> 21) & 1;
+               IROp op, op2;
+               op2 = Q ? Iop_Abs32Fx4 : Iop_Abs32Fx2;
+               if (op_bit) {
+                  op = Q ? Iop_CmpGT32Fx4 : Iop_CmpGT32Fx2;
+                  assign(res, binop(op,
+                                    unop(op2, mkexpr(arg_n)),
+                                    unop(op2, mkexpr(arg_m))));
+               } else {
+                  op = Q ? Iop_CmpGE32Fx4 : Iop_CmpGE32Fx2;
+                  assign(res, binop(op,
+                                    unop(op2, mkexpr(arg_n)),
+                                    unop(op2, mkexpr(arg_m))));
+               }
+               DIP("vacg%c.f32 %c%u, %c%u, %c%u\n", op_bit ? 't' : 'e',
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg,
+                   Q ? 'q' : 'd', mreg);
+            }
+         }
+         break;
+      case 15:
+         if (B == 0) {
+            if (U == 0) {
+               /* VMAX, VMIN  */
+               IROp op;
+               if ((theInstr >> 20) & 1)
+                  return False;
+               if ((theInstr >> 21) & 1) {
+                  op = Q ? Iop_Min32Fx4 : Iop_Min32Fx2;
+                  DIP("vmin.f32 %c%u, %c%u, %c%u\n", Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               } else {
+                  op = Q ? Iop_Max32Fx4 : Iop_Max32Fx2;
+                  DIP("vmax.f32 %c%u, %c%u, %c%u\n", Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               }
+               assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+            } else {
+               /* VPMAX, VPMIN   */
+               IROp op;
+               if (Q)
+                  return False;
+               if ((theInstr >> 20) & 1)
+                  return False;
+               if ((theInstr >> 21) & 1) {
+                  op = Iop_PwMin32Fx2;
+                  DIP("vpmin.f32 d%u, d%u, d%u\n", dreg, nreg, mreg);
+               } else {
+                  op = Iop_PwMax32Fx2;
+                  DIP("vpmax.f32 d%u, d%u, d%u\n", dreg, nreg, mreg);
+               }
+               assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+            }
+         } else {
+            if (U == 0) {
+               if ((C >> 1) == 0) {
+                  /* VRECPS */
+                  if ((theInstr >> 20) & 1)
+                     return False;
+                  assign(res, binop(Q ? Iop_RecipStep32Fx4
+                                      : Iop_RecipStep32Fx2,
+                                    mkexpr(arg_n),
+                                    mkexpr(arg_m)));
+                  DIP("vrecps.f32 %c%u, %c%u, %c%u\n", Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               } else {
+                  /* VRSQRTS  */
+                  if ((theInstr >> 20) & 1)
+                     return False;
+                  assign(res, binop(Q ? Iop_RSqrtStep32Fx4
+                                      : Iop_RSqrtStep32Fx2,
+                                    mkexpr(arg_n),
+                                    mkexpr(arg_m)));
+                  DIP("vrsqrts.f32 %c%u, %c%u, %c%u\n", Q ? 'q' : 'd', dreg,
+                      Q ? 'q' : 'd', nreg, Q ? 'q' : 'd', mreg);
+               }
+            }
+         }
+         break;
+   }
+
+   if (Q) {
+      putQReg(dreg, mkexpr(res), condT);
+   } else {
+      putDRegI64(dreg, mkexpr(res), condT);
+   }
+
+   return True;
+}
+
+/* A7.4.2 Three registers of different length */
+static
+Bool dis_neon_data_3diff ( UInt theInstr, IRTemp condT )
+{
+   UInt A = (theInstr >> 8) & 0xf;
+   UInt B = (theInstr >> 20) & 3;
+   UInt U = (theInstr >> 24) & 1;
+   UInt P = (theInstr >> 9) & 1;
+   UInt mreg = get_neon_m_regno(theInstr);
+   UInt nreg = get_neon_n_regno(theInstr);
+   UInt dreg = get_neon_d_regno(theInstr);
+   UInt size = B;
+   ULong imm;
+   IRTemp res, arg_m, arg_n, cond, tmp;
+   IROp cvt, cvt2, cmp, op, op2, sh, add;
+   switch (A) {
+      case 0: case 1: case 2: case 3:
+         /* VADDL, VADDW, VSUBL, VSUBW */
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         size = B;
+         switch (size) {
+            case 0:
+               cvt = U ? Iop_Widen8Uto16x8 : Iop_Widen8Sto16x8;
+               op = (A & 2) ? Iop_Sub16x8 : Iop_Add16x8;
+               break;
+            case 1:
+               cvt = U ? Iop_Widen16Uto32x4 : Iop_Widen16Sto32x4;
+               op = (A & 2) ? Iop_Sub32x4 : Iop_Add32x4;
+               break;
+            case 2:
+               cvt = U ? Iop_Widen32Uto64x2 : Iop_Widen32Sto64x2;
+               op = (A & 2) ? Iop_Sub64x2 : Iop_Add64x2;
+               break;
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         arg_n = newTemp(Ity_V128);
+         arg_m = newTemp(Ity_V128);
+         if (A & 1) {
+            if (nreg & 1)
+               return False;
+            nreg >>= 1;
+            assign(arg_n, getQReg(nreg));
+         } else {
+            assign(arg_n, unop(cvt, getDRegI64(nreg)));
+         }
+         assign(arg_m, unop(cvt, getDRegI64(mreg)));
+         putQReg(dreg, binop(op, mkexpr(arg_n), mkexpr(arg_m)),
+                       condT);
+         DIP("v%s%c.%c%u q%u, %c%u, d%u\n", (A & 2) ? "sub" : "add",
+             (A & 1) ? 'w' : 'l', U ? 'u' : 's', 8 << size, dreg,
+             (A & 1) ? 'q' : 'd', nreg, mreg);
+         return True;
+      case 4:
+         /* VADDHN, VRADDHN */
+         if (mreg & 1)
+            return False;
+         mreg >>= 1;
+         if (nreg & 1)
+            return False;
+         nreg >>= 1;
+         size = B;
+         switch (size) {
+            case 0:
+               op = Iop_Add16x8;
+               cvt = Iop_NarrowUn16to8x8;
+               sh = Iop_ShrN16x8;
+               imm = 1U << 7;
+               imm = (imm << 16) | imm;
+               imm = (imm << 32) | imm;
+               break;
+            case 1:
+               op = Iop_Add32x4;
+               cvt = Iop_NarrowUn32to16x4;
+               sh = Iop_ShrN32x4;
+               imm = 1U << 15;
+               imm = (imm << 32) | imm;
+               break;
+            case 2:
+               op = Iop_Add64x2;
+               cvt = Iop_NarrowUn64to32x2;
+               sh = Iop_ShrN64x2;
+               imm = 1U << 31;
+               break;
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         tmp = newTemp(Ity_V128);
+         res = newTemp(Ity_V128);
+         assign(tmp, binop(op, getQReg(nreg), getQReg(mreg)));
+         if (U) {
+            /* VRADDHN */
+            assign(res, binop(op, mkexpr(tmp),
+                     binop(Iop_64HLtoV128, mkU64(imm), mkU64(imm))));
+         } else {
+            assign(res, mkexpr(tmp));
+         }
+         putDRegI64(dreg, unop(cvt, binop(sh, mkexpr(res), mkU8(8 << size))),
+                    condT);
+         DIP("v%saddhn.i%u d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
+             nreg, mreg);
+         return True;
+      case 5:
+         /* VABAL */
+         if (!((theInstr >> 23) & 1)) {
+            vpanic("VABA should not be in dis_neon_data_3diff\n");
+         }
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         switch (size) {
+            case 0:
+               cmp = U ? Iop_CmpGT8Ux8 : Iop_CmpGT8Sx8;
+               cvt = U ? Iop_Widen8Uto16x8 : Iop_Widen8Sto16x8;
+               cvt2 = Iop_Widen8Sto16x8;
+               op = Iop_Sub16x8;
+               op2 = Iop_Add16x8;
+               break;
+            case 1:
+               cmp = U ? Iop_CmpGT16Ux4 : Iop_CmpGT16Sx4;
+               cvt = U ? Iop_Widen16Uto32x4 : Iop_Widen16Sto32x4;
+               cvt2 = Iop_Widen16Sto32x4;
+               op = Iop_Sub32x4;
+               op2 = Iop_Add32x4;
+               break;
+            case 2:
+               cmp = U ? Iop_CmpGT32Ux2 : Iop_CmpGT32Sx2;
+               cvt = U ? Iop_Widen32Uto64x2 : Iop_Widen32Sto64x2;
+               cvt2 = Iop_Widen32Sto64x2;
+               op = Iop_Sub64x2;
+               op2 = Iop_Add64x2;
+               break;
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         arg_n = newTemp(Ity_V128);
+         arg_m = newTemp(Ity_V128);
+         cond = newTemp(Ity_V128);
+         res = newTemp(Ity_V128);
+         assign(arg_n, unop(cvt, getDRegI64(nreg)));
+         assign(arg_m, unop(cvt, getDRegI64(mreg)));
+         assign(cond, unop(cvt2, binop(cmp, getDRegI64(nreg),
+                                            getDRegI64(mreg))));
+         assign(res, binop(op2,
+                           binop(Iop_OrV128,
+                                 binop(Iop_AndV128,
+                                       binop(op, mkexpr(arg_n), mkexpr(arg_m)),
+                                       mkexpr(cond)),
+                                 binop(Iop_AndV128,
+                                       binop(op, mkexpr(arg_m), mkexpr(arg_n)),
+                                       unop(Iop_NotV128, mkexpr(cond)))),
+                           getQReg(dreg)));
+         putQReg(dreg, mkexpr(res), condT);
+         DIP("vabal.%c%u q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
+             nreg, mreg);
+         return True;
+      case 6:
+         /* VSUBHN, VRSUBHN */
+         if (mreg & 1)
+            return False;
+         mreg >>= 1;
+         if (nreg & 1)
+            return False;
+         nreg >>= 1;
+         size = B;
+         switch (size) {
+            case 0:
+               op = Iop_Sub16x8;
+               op2 = Iop_Add16x8;
+               cvt = Iop_NarrowUn16to8x8;
+               sh = Iop_ShrN16x8;
+               imm = 1U << 7;
+               imm = (imm << 16) | imm;
+               imm = (imm << 32) | imm;
+               break;
+            case 1:
+               op = Iop_Sub32x4;
+               op2 = Iop_Add32x4;
+               cvt = Iop_NarrowUn32to16x4;
+               sh = Iop_ShrN32x4;
+               imm = 1U << 15;
+               imm = (imm << 32) | imm;
+               break;
+            case 2:
+               op = Iop_Sub64x2;
+               op2 = Iop_Add64x2;
+               cvt = Iop_NarrowUn64to32x2;
+               sh = Iop_ShrN64x2;
+               imm = 1U << 31;
+               break;
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         tmp = newTemp(Ity_V128);
+         res = newTemp(Ity_V128);
+         assign(tmp, binop(op, getQReg(nreg), getQReg(mreg)));
+         if (U) {
+            /* VRSUBHN */
+            assign(res, binop(op2, mkexpr(tmp),
+                     binop(Iop_64HLtoV128, mkU64(imm), mkU64(imm))));
+         } else {
+            assign(res, mkexpr(tmp));
+         }
+         putDRegI64(dreg, unop(cvt, binop(sh, mkexpr(res), mkU8(8 << size))),
+                    condT);
+         DIP("v%ssubhn.i%u d%u, q%u, q%u\n", U ? "r" : "", 16 << size, dreg,
+             nreg, mreg);
+         return True;
+      case 7:
+         /* VABDL */
+         if (!((theInstr >> 23) & 1)) {
+            vpanic("VABL should not be in dis_neon_data_3diff\n");
+         }
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         switch (size) {
+            case 0:
+               cmp = U ? Iop_CmpGT8Ux8 : Iop_CmpGT8Sx8;
+               cvt = U ? Iop_Widen8Uto16x8 : Iop_Widen8Sto16x8;
+               cvt2 = Iop_Widen8Sto16x8;
+               op = Iop_Sub16x8;
+               break;
+            case 1:
+               cmp = U ? Iop_CmpGT16Ux4 : Iop_CmpGT16Sx4;
+               cvt = U ? Iop_Widen16Uto32x4 : Iop_Widen16Sto32x4;
+               cvt2 = Iop_Widen16Sto32x4;
+               op = Iop_Sub32x4;
+               break;
+            case 2:
+               cmp = U ? Iop_CmpGT32Ux2 : Iop_CmpGT32Sx2;
+               cvt = U ? Iop_Widen32Uto64x2 : Iop_Widen32Sto64x2;
+               cvt2 = Iop_Widen32Sto64x2;
+               op = Iop_Sub64x2;
+               break;
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         arg_n = newTemp(Ity_V128);
+         arg_m = newTemp(Ity_V128);
+         cond = newTemp(Ity_V128);
+         res = newTemp(Ity_V128);
+         assign(arg_n, unop(cvt, getDRegI64(nreg)));
+         assign(arg_m, unop(cvt, getDRegI64(mreg)));
+         assign(cond, unop(cvt2, binop(cmp, getDRegI64(nreg),
+                                            getDRegI64(mreg))));
+         assign(res, binop(Iop_OrV128,
+                           binop(Iop_AndV128,
+                                 binop(op, mkexpr(arg_n), mkexpr(arg_m)),
+                                 mkexpr(cond)),
+                           binop(Iop_AndV128,
+                                 binop(op, mkexpr(arg_m), mkexpr(arg_n)),
+                                 unop(Iop_NotV128, mkexpr(cond)))));
+         putQReg(dreg, mkexpr(res), condT);
+         DIP("vabdl.%c%u q%u, d%u, d%u\n", U ? 'u' : 's', 8 << size, dreg,
+             nreg, mreg);
+         return True;
+      case 8:
+      case 10:
+         /* VMLAL, VMLSL (integer) */
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         size = B;
+         switch (size) {
+            case 0:
+               op = U ? Iop_Mull8Ux8 : Iop_Mull8Sx8;
+               op2 = P ? Iop_Sub16x8 : Iop_Add16x8;
+               break;
+            case 1:
+               op = U ? Iop_Mull16Ux4 : Iop_Mull16Sx4;
+               op2 = P ? Iop_Sub32x4 : Iop_Add32x4;
+               break;
+            case 2:
+               op = U ? Iop_Mull32Ux2 : Iop_Mull32Sx2;
+               op2 = P ? Iop_Sub64x2 : Iop_Add64x2;
+               break;
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         res = newTemp(Ity_V128);
+         assign(res, binop(op, getDRegI64(nreg),getDRegI64(mreg)));
+         putQReg(dreg, binop(op2, getQReg(dreg), mkexpr(res)), condT);
+         DIP("vml%cl.%c%u q%u, d%u, d%u\n", P ? 's' : 'a', U ? 'u' : 's',
+             8 << size, dreg, nreg, mreg);
+         return True;
+      case 9:
+      case 11:
+         /* VQDMLAL, VQDMLSL */
+         if (U)
+            return False;
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         size = B;
+         switch (size) {
+            case 0: case 3:
+               return False;
+            case 1:
+               op = Iop_QDMull16Sx4;
+               cmp = Iop_CmpEQ16x4;
+               add = P ? Iop_QSub32Sx4 : Iop_QAdd32Sx4;
+               op2 = P ? Iop_Sub32x4 : Iop_Add32x4;
+               imm = 1LL << 15;
+               imm = (imm << 16) | imm;
+               imm = (imm << 32) | imm;
+               break;
+            case 2:
+               op = Iop_QDMull32Sx2;
+               cmp = Iop_CmpEQ32x2;
+               add = P ? Iop_QSub64Sx2 : Iop_QAdd64Sx2;
+               op2 = P ? Iop_Sub64x2 : Iop_Add64x2;
+               imm = 1LL << 31;
+               imm = (imm << 32) | imm;
+               break;
+            default:
+               vassert(0);
+         }
+         res = newTemp(Ity_V128);
+         tmp = newTemp(Ity_V128);
+         assign(res, binop(op, getDRegI64(nreg), getDRegI64(mreg)));
+         assign(tmp, binop(op2, getQReg(dreg), mkexpr(res)));
+         setFlag_QC(mkexpr(tmp), binop(add, getQReg(dreg), mkexpr(res)),
+                    True, condT);
+         setFlag_QC(binop(Iop_And64,
+                          binop(cmp, getDRegI64(nreg), mkU64(imm)),
+                          binop(cmp, getDRegI64(mreg), mkU64(imm))),
+                    mkU64(0),
+                    False, condT);
+         putQReg(dreg, binop(add, getQReg(dreg), mkexpr(res)), condT);
+         DIP("vqdml%cl.s%u q%u, d%u, d%u\n", P ? 's' : 'a', 8 << size, dreg,
+             nreg, mreg);
+         return True;
+      case 12:
+      case 14:
+         /* VMULL (integer or polynomial) */
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         size = B;
+         switch (size) {
+            case 0:
+               op = (U) ? Iop_Mull8Ux8 : Iop_Mull8Sx8;
+               if (P)
+                  op = Iop_PolynomialMull8x8;
+               break;
+            case 1:
+               op = (U) ? Iop_Mull16Ux4 : Iop_Mull16Sx4;
+               break;
+            case 2:
+               op = (U) ? Iop_Mull32Ux2 : Iop_Mull32Sx2;
+               break;
+            default:
+               vassert(0);
+         }
+         putQReg(dreg, binop(op, getDRegI64(nreg),
+                                 getDRegI64(mreg)), condT);
+         DIP("vmull.%c%u q%u, d%u, d%u\n", P ? 'p' : (U ? 'u' : 's'),
+               8 << size, dreg, nreg, mreg);
+         return True;
+      case 13:
+         /* VQDMULL */
+         if (U)
+            return False;
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         size = B;
+         switch (size) {
+            case 0:
+            case 3:
+               return False;
+            case 1:
+               op = Iop_QDMull16Sx4;
+               op2 = Iop_CmpEQ16x4;
+               imm = 1LL << 15;
+               imm = (imm << 16) | imm;
+               imm = (imm << 32) | imm;
+               break;
+            case 2:
+               op = Iop_QDMull32Sx2;
+               op2 = Iop_CmpEQ32x2;
+               imm = 1LL << 31;
+               imm = (imm << 32) | imm;
+               break;
+            default:
+               vassert(0);
+         }
+         putQReg(dreg, binop(op, getDRegI64(nreg), getDRegI64(mreg)),
+               condT);
+         setFlag_QC(binop(Iop_And64,
+                          binop(op2, getDRegI64(nreg), mkU64(imm)),
+                          binop(op2, getDRegI64(mreg), mkU64(imm))),
+                    mkU64(0),
+                    False, condT);
+         DIP("vqdmull.s%u q%u, d%u, d%u\n", 8 << size, dreg, nreg, mreg);
+         return True;
+      default:
+         return False;
+   }
+   return False;
+}
+
+/* A7.4.3 Two registers and a scalar */
+static
+Bool dis_neon_data_2reg_and_scalar ( UInt theInstr, IRTemp condT )
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(theInstr, (_bMax), (_bMin))
+   UInt U = INSN(24,24);
+   UInt dreg = get_neon_d_regno(theInstr & ~(1 << 6));
+   UInt nreg = get_neon_n_regno(theInstr & ~(1 << 6));
+   UInt mreg = get_neon_m_regno(theInstr & ~(1 << 6));
+   UInt size = INSN(21,20);
+   UInt index;
+   UInt Q = INSN(24,24);
+
+   if (INSN(27,25) != 1 || INSN(23,23) != 1
+       || INSN(6,6) != 1 || INSN(4,4) != 0)
+      return False;
+
+   /* VMLA, VMLS (scalar)  */
+   if ((INSN(11,8) & BITS4(1,0,1,0)) == BITS4(0,0,0,0)) {
+      IRTemp res, arg_m, arg_n;
+      IROp dup, get, op, op2, add, sub;
+      if (Q) {
+         if ((dreg & 1) || (nreg & 1))
+            return False;
+         dreg >>= 1;
+         nreg >>= 1;
+         res = newTemp(Ity_V128);
+         arg_m = newTemp(Ity_V128);
+         arg_n = newTemp(Ity_V128);
+         assign(arg_n, getQReg(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x8;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x4;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      } else {
+         res = newTemp(Ity_I64);
+         arg_m = newTemp(Ity_I64);
+         arg_n = newTemp(Ity_I64);
+         assign(arg_n, getDRegI64(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x4;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x2;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      }
+      if (INSN(8,8)) {
+         switch (size) {
+            case 2:
+               op = Q ? Iop_Mul32Fx4 : Iop_Mul32Fx2;
+               add = Q ? Iop_Add32Fx4 : Iop_Add32Fx2;
+               sub = Q ? Iop_Sub32Fx4 : Iop_Sub32Fx2;
+               break;
+            case 0:
+            case 1:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+      } else {
+         switch (size) {
+            case 1:
+               op = Q ? Iop_Mul16x8 : Iop_Mul16x4;
+               add = Q ? Iop_Add16x8 : Iop_Add16x4;
+               sub = Q ? Iop_Sub16x8 : Iop_Sub16x4;
+               break;
+            case 2:
+               op = Q ? Iop_Mul32x4 : Iop_Mul32x2;
+               add = Q ? Iop_Add32x4 : Iop_Add32x2;
+               sub = Q ? Iop_Sub32x4 : Iop_Sub32x2;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+      }
+      op2 = INSN(10,10) ? sub : add;
+      assign(res, binop_w_fake_RM(op, mkexpr(arg_n), mkexpr(arg_m)));
+      if (Q)
+         putQReg(dreg, binop_w_fake_RM(op2, getQReg(dreg), mkexpr(res)),
+                 condT);
+      else
+         putDRegI64(dreg, binop(op2, getDRegI64(dreg), mkexpr(res)),
+                    condT);
+      DIP("vml%c.%c%u %c%u, %c%u, d%u[%u]\n", INSN(10,10) ? 's' : 'a',
+            INSN(8,8) ? 'f' : 'i', 8 << size,
+            Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', nreg, mreg, index);
+      return True;
+   }
+
+   /* VMLAL, VMLSL (scalar)   */
+   if ((INSN(11,8) & BITS4(1,0,1,1)) == BITS4(0,0,1,0)) {
+      IRTemp res, arg_m, arg_n;
+      IROp dup, get, op, op2, add, sub;
+      if (dreg & 1)
+         return False;
+      dreg >>= 1;
+      res = newTemp(Ity_V128);
+      arg_m = newTemp(Ity_I64);
+      arg_n = newTemp(Ity_I64);
+      assign(arg_n, getDRegI64(nreg));
+      switch(size) {
+         case 1:
+            dup = Iop_Dup16x4;
+            get = Iop_GetElem16x4;
+            index = mreg >> 3;
+            mreg &= 7;
+            break;
+         case 2:
+            dup = Iop_Dup32x2;
+            get = Iop_GetElem32x2;
+            index = mreg >> 4;
+            mreg &= 0xf;
+            break;
+         case 0:
+         case 3:
+            return False;
+         default:
+            vassert(0);
+      }
+      assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      switch (size) {
+         case 1:
+            op = U ? Iop_Mull16Ux4 : Iop_Mull16Sx4;
+            add = Iop_Add32x4;
+            sub = Iop_Sub32x4;
+            break;
+         case 2:
+            op = U ? Iop_Mull32Ux2 : Iop_Mull32Sx2;
+            add = Iop_Add64x2;
+            sub = Iop_Sub64x2;
+            break;
+         case 0:
+         case 3:
+            return False;
+         default:
+            vassert(0);
+      }
+      op2 = INSN(10,10) ? sub : add;
+      assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+      putQReg(dreg, binop(op2, getQReg(dreg), mkexpr(res)), condT);
+      DIP("vml%cl.%c%u q%u, d%u, d%u[%u]\n",
+          INSN(10,10) ? 's' : 'a', U ? 'u' : 's',
+          8 << size, dreg, nreg, mreg, index);
+      return True;
+   }
+
+   /* VQDMLAL, VQDMLSL (scalar)  */
+   if ((INSN(11,8) & BITS4(1,0,1,1)) == BITS4(0,0,1,1) && !U) {
+      IRTemp res, arg_m, arg_n, tmp;
+      IROp dup, get, op, op2, add, cmp;
+      UInt P = INSN(10,10);
+      ULong imm;
+      if (dreg & 1)
+         return False;
+      dreg >>= 1;
+      res = newTemp(Ity_V128);
+      arg_m = newTemp(Ity_I64);
+      arg_n = newTemp(Ity_I64);
+      assign(arg_n, getDRegI64(nreg));
+      switch(size) {
+         case 1:
+            dup = Iop_Dup16x4;
+            get = Iop_GetElem16x4;
+            index = mreg >> 3;
+            mreg &= 7;
+            break;
+         case 2:
+            dup = Iop_Dup32x2;
+            get = Iop_GetElem32x2;
+            index = mreg >> 4;
+            mreg &= 0xf;
+            break;
+         case 0:
+         case 3:
+            return False;
+         default:
+            vassert(0);
+      }
+      assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      switch (size) {
+         case 0:
+         case 3:
+            return False;
+         case 1:
+            op = Iop_QDMull16Sx4;
+            cmp = Iop_CmpEQ16x4;
+            add = P ? Iop_QSub32Sx4 : Iop_QAdd32Sx4;
+            op2 = P ? Iop_Sub32x4 : Iop_Add32x4;
+            imm = 1LL << 15;
+            imm = (imm << 16) | imm;
+            imm = (imm << 32) | imm;
+            break;
+         case 2:
+            op = Iop_QDMull32Sx2;
+            cmp = Iop_CmpEQ32x2;
+            add = P ? Iop_QSub64Sx2 : Iop_QAdd64Sx2;
+            op2 = P ? Iop_Sub64x2 : Iop_Add64x2;
+            imm = 1LL << 31;
+            imm = (imm << 32) | imm;
+            break;
+         default:
+            vassert(0);
+      }
+      res = newTemp(Ity_V128);
+      tmp = newTemp(Ity_V128);
+      assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+      assign(tmp, binop(op2, getQReg(dreg), mkexpr(res)));
+      setFlag_QC(binop(Iop_And64,
+                       binop(cmp, mkexpr(arg_n), mkU64(imm)),
+                       binop(cmp, mkexpr(arg_m), mkU64(imm))),
+                 mkU64(0),
+                 False, condT);
+      setFlag_QC(mkexpr(tmp), binop(add, getQReg(dreg), mkexpr(res)),
+                 True, condT);
+      putQReg(dreg, binop(add, getQReg(dreg), mkexpr(res)), condT);
+      DIP("vqdml%cl.s%u q%u, d%u, d%u[%u]\n", P ? 's' : 'a', 8 << size,
+          dreg, nreg, mreg, index);
+      return True;
+   }
+
+   /* VMUL (by scalar)  */
+   if ((INSN(11,8) & BITS4(1,1,1,0)) == BITS4(1,0,0,0)) {
+      IRTemp res, arg_m, arg_n;
+      IROp dup, get, op;
+      if (Q) {
+         if ((dreg & 1) || (nreg & 1))
+            return False;
+         dreg >>= 1;
+         nreg >>= 1;
+         res = newTemp(Ity_V128);
+         arg_m = newTemp(Ity_V128);
+         arg_n = newTemp(Ity_V128);
+         assign(arg_n, getQReg(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x8;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x4;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      } else {
+         res = newTemp(Ity_I64);
+         arg_m = newTemp(Ity_I64);
+         arg_n = newTemp(Ity_I64);
+         assign(arg_n, getDRegI64(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x4;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x2;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      }
+      if (INSN(8,8)) {
+         switch (size) {
+            case 2:
+               op = Q ? Iop_Mul32Fx4 : Iop_Mul32Fx2;
+               break;
+            case 0:
+            case 1:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+      } else {
+         switch (size) {
+            case 1:
+               op = Q ? Iop_Mul16x8 : Iop_Mul16x4;
+               break;
+            case 2:
+               op = Q ? Iop_Mul32x4 : Iop_Mul32x2;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+      }
+      assign(res, binop_w_fake_RM(op, mkexpr(arg_n), mkexpr(arg_m)));
+      if (Q)
+         putQReg(dreg, mkexpr(res), condT);
+      else
+         putDRegI64(dreg, mkexpr(res), condT);
+      DIP("vmul.%c%u %c%u, %c%u, d%u[%u]\n", INSN(8,8) ? 'f' : 'i',
+          8 << size, Q ? 'q' : 'd', dreg,
+          Q ? 'q' : 'd', nreg, mreg, index);
+      return True;
+   }
+
+   /* VMULL (scalar) */
+   if (INSN(11,8) == BITS4(1,0,1,0)) {
+      IRTemp res, arg_m, arg_n;
+      IROp dup, get, op;
+      if (dreg & 1)
+         return False;
+      dreg >>= 1;
+      res = newTemp(Ity_V128);
+      arg_m = newTemp(Ity_I64);
+      arg_n = newTemp(Ity_I64);
+      assign(arg_n, getDRegI64(nreg));
+      switch(size) {
+         case 1:
+            dup = Iop_Dup16x4;
+            get = Iop_GetElem16x4;
+            index = mreg >> 3;
+            mreg &= 7;
+            break;
+         case 2:
+            dup = Iop_Dup32x2;
+            get = Iop_GetElem32x2;
+            index = mreg >> 4;
+            mreg &= 0xf;
+            break;
+         case 0:
+         case 3:
+            return False;
+         default:
+            vassert(0);
+      }
+      assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      switch (size) {
+         case 1: op = U ? Iop_Mull16Ux4 : Iop_Mull16Sx4; break;
+         case 2: op = U ? Iop_Mull32Ux2 : Iop_Mull32Sx2; break;
+         case 0: case 3: return False;
+         default: vassert(0);
+      }
+      assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+      putQReg(dreg, mkexpr(res), condT);
+      DIP("vmull.%c%u q%u, d%u, d%u[%u]\n", U ? 'u' : 's', 8 << size, dreg,
+          nreg, mreg, index);
+      return True;
+   }
+
+   /* VQDMULL */
+   if (INSN(11,8) == BITS4(1,0,1,1) && !U) {
+      IROp op ,op2, dup, get;
+      ULong imm;
+      IRTemp arg_m, arg_n;
+      if (dreg & 1)
+         return False;
+      dreg >>= 1;
+      arg_m = newTemp(Ity_I64);
+      arg_n = newTemp(Ity_I64);
+      assign(arg_n, getDRegI64(nreg));
+      switch(size) {
+         case 1:
+            dup = Iop_Dup16x4;
+            get = Iop_GetElem16x4;
+            index = mreg >> 3;
+            mreg &= 7;
+            break;
+         case 2:
+            dup = Iop_Dup32x2;
+            get = Iop_GetElem32x2;
+            index = mreg >> 4;
+            mreg &= 0xf;
+            break;
+         case 0:
+         case 3:
+            return False;
+         default:
+            vassert(0);
+      }
+      assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      switch (size) {
+         case 0:
+         case 3:
+            return False;
+         case 1:
+            op = Iop_QDMull16Sx4;
+            op2 = Iop_CmpEQ16x4;
+            imm = 1LL << 15;
+            imm = (imm << 16) | imm;
+            imm = (imm << 32) | imm;
+            break;
+         case 2:
+            op = Iop_QDMull32Sx2;
+            op2 = Iop_CmpEQ32x2;
+            imm = 1LL << 31;
+            imm = (imm << 32) | imm;
+            break;
+         default:
+            vassert(0);
+      }
+      putQReg(dreg, binop(op, mkexpr(arg_n), mkexpr(arg_m)),
+            condT);
+      setFlag_QC(binop(Iop_And64,
+                       binop(op2, mkexpr(arg_n), mkU64(imm)),
+                       binop(op2, mkexpr(arg_m), mkU64(imm))),
+                 mkU64(0),
+                 False, condT);
+      DIP("vqdmull.s%u q%u, d%u, d%u[%u]\n", 8 << size, dreg, nreg, mreg,
+          index);
+      return True;
+   }
+
+   /* VQDMULH */
+   if (INSN(11,8) == BITS4(1,1,0,0)) {
+      IROp op ,op2, dup, get;
+      ULong imm;
+      IRTemp res, arg_m, arg_n;
+      if (Q) {
+         if ((dreg & 1) || (nreg & 1))
+            return False;
+         dreg >>= 1;
+         nreg >>= 1;
+         res = newTemp(Ity_V128);
+         arg_m = newTemp(Ity_V128);
+         arg_n = newTemp(Ity_V128);
+         assign(arg_n, getQReg(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x8;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x4;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      } else {
+         res = newTemp(Ity_I64);
+         arg_m = newTemp(Ity_I64);
+         arg_n = newTemp(Ity_I64);
+         assign(arg_n, getDRegI64(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x4;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x2;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      }
+      switch (size) {
+         case 0:
+         case 3:
+            return False;
+         case 1:
+            op = Q ? Iop_QDMulHi16Sx8 : Iop_QDMulHi16Sx4;
+            op2 = Q ? Iop_CmpEQ16x8 : Iop_CmpEQ16x4;
+            imm = 1LL << 15;
+            imm = (imm << 16) | imm;
+            imm = (imm << 32) | imm;
+            break;
+         case 2:
+            op = Q ? Iop_QDMulHi32Sx4 : Iop_QDMulHi32Sx2;
+            op2 = Q ? Iop_CmpEQ32x4 : Iop_CmpEQ32x2;
+            imm = 1LL << 31;
+            imm = (imm << 32) | imm;
+            break;
+         default:
+            vassert(0);
+      }
+      assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+      setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                       binop(op2, mkexpr(arg_n),
+                                  Q ? mkU128(imm) : mkU64(imm)),
+                       binop(op2, mkexpr(arg_m),
+                             Q ? mkU128(imm) : mkU64(imm))),
+                 Q ? mkU128(0) : mkU64(0),
+                 Q, condT);
+      if (Q)
+         putQReg(dreg, mkexpr(res), condT);
+      else
+         putDRegI64(dreg, mkexpr(res), condT);
+      DIP("vqdmulh.s%u %c%u, %c%u, d%u[%u]\n",
+          8 << size, Q ? 'q' : 'd', dreg,
+          Q ? 'q' : 'd', nreg, mreg, index);
+      return True;
+   }
+
+   /* VQRDMULH (scalar) */
+   if (INSN(11,8) == BITS4(1,1,0,1)) {
+      IROp op ,op2, dup, get;
+      ULong imm;
+      IRTemp res, arg_m, arg_n;
+      if (Q) {
+         if ((dreg & 1) || (nreg & 1))
+            return False;
+         dreg >>= 1;
+         nreg >>= 1;
+         res = newTemp(Ity_V128);
+         arg_m = newTemp(Ity_V128);
+         arg_n = newTemp(Ity_V128);
+         assign(arg_n, getQReg(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x8;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x4;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      } else {
+         res = newTemp(Ity_I64);
+         arg_m = newTemp(Ity_I64);
+         arg_n = newTemp(Ity_I64);
+         assign(arg_n, getDRegI64(nreg));
+         switch(size) {
+            case 1:
+               dup = Iop_Dup16x4;
+               get = Iop_GetElem16x4;
+               index = mreg >> 3;
+               mreg &= 7;
+               break;
+            case 2:
+               dup = Iop_Dup32x2;
+               get = Iop_GetElem32x2;
+               index = mreg >> 4;
+               mreg &= 0xf;
+               break;
+            case 0:
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(arg_m, unop(dup, binop(get, getDRegI64(mreg), mkU8(index))));
+      }
+      switch (size) {
+         case 0:
+         case 3:
+            return False;
+         case 1:
+            op = Q ? Iop_QRDMulHi16Sx8 : Iop_QRDMulHi16Sx4;
+            op2 = Q ? Iop_CmpEQ16x8 : Iop_CmpEQ16x4;
+            imm = 1LL << 15;
+            imm = (imm << 16) | imm;
+            imm = (imm << 32) | imm;
+            break;
+         case 2:
+            op = Q ? Iop_QRDMulHi32Sx4 : Iop_QRDMulHi32Sx2;
+            op2 = Q ? Iop_CmpEQ32x4 : Iop_CmpEQ32x2;
+            imm = 1LL << 31;
+            imm = (imm << 32) | imm;
+            break;
+         default:
+            vassert(0);
+      }
+      assign(res, binop(op, mkexpr(arg_n), mkexpr(arg_m)));
+      setFlag_QC(binop(Q ? Iop_AndV128 : Iop_And64,
+                       binop(op2, mkexpr(arg_n),
+                                  Q ? mkU128(imm) : mkU64(imm)),
+                       binop(op2, mkexpr(arg_m),
+                                  Q ? mkU128(imm) : mkU64(imm))),
+                 Q ? mkU128(0) : mkU64(0),
+                 Q, condT);
+      if (Q)
+         putQReg(dreg, mkexpr(res), condT);
+      else
+         putDRegI64(dreg, mkexpr(res), condT);
+      DIP("vqrdmulh.s%u %c%u, %c%u, d%u[%u]\n",
+          8 << size, Q ? 'q' : 'd', dreg,
+          Q ? 'q' : 'd', nreg, mreg, index);
+      return True;
+   }
+
+   return False;
+#  undef INSN
+}
+
+/* A7.4.4 Two registers and a shift amount */
+static
+Bool dis_neon_data_2reg_and_shift ( UInt theInstr, IRTemp condT )
+{
+   UInt A = (theInstr >> 8) & 0xf;
+   UInt B = (theInstr >> 6) & 1;
+   UInt L = (theInstr >> 7) & 1;
+   UInt U = (theInstr >> 24) & 1;
+   UInt Q = B;
+   UInt imm6 = (theInstr >> 16) & 0x3f;
+   UInt shift_imm;
+   UInt size = 4;
+   UInt tmp;
+   UInt mreg = get_neon_m_regno(theInstr);
+   UInt dreg = get_neon_d_regno(theInstr);
+   ULong imm = 0;
+   IROp op, cvt, add = Iop_INVALID, cvt2, op_rev;
+   IRTemp reg_m, res, mask;
+
+   if (L == 0 && ((theInstr >> 19) & 7) == 0)
+      /* It is one reg and immediate */
+      return False;
+
+   tmp = (L << 6) | imm6;
+   if (tmp & 0x40) {
+      size = 3;
+      shift_imm = 64 - imm6;
+   } else if (tmp & 0x20) {
+      size = 2;
+      shift_imm = 64 - imm6;
+   } else if (tmp & 0x10) {
+      size = 1;
+      shift_imm = 32 - imm6;
+   } else if (tmp & 0x8) {
+      size = 0;
+      shift_imm = 16 - imm6;
+   } else {
+      return False;
+   }
+
+   switch (A) {
+      case 3:
+      case 2:
+         /* VRSHR, VRSRA */
+         if (shift_imm > 0) {
+            IRExpr *imm_val;
+            imm = 1L;
+            switch (size) {
+               case 0:
+                  imm = (imm << 8) | imm;
+                  /* fall through */
+               case 1:
+                  imm = (imm << 16) | imm;
+                  /* fall through */
+               case 2:
+                  imm = (imm << 32) | imm;
+                  /* fall through */
+               case 3:
+                  break;
+               default:
+                  vassert(0);
+            }
+            if (Q) {
+               reg_m = newTemp(Ity_V128);
+               res = newTemp(Ity_V128);
+               imm_val = binop(Iop_64HLtoV128, mkU64(imm), mkU64(imm));
+               assign(reg_m, getQReg(mreg));
+               switch (size) {
+                  case 0:
+                     add = Iop_Add8x16;
+                     op = U ? Iop_ShrN8x16 : Iop_SarN8x16;
+                     break;
+                  case 1:
+                     add = Iop_Add16x8;
+                     op = U ? Iop_ShrN16x8 : Iop_SarN16x8;
+                     break;
+                  case 2:
+                     add = Iop_Add32x4;
+                     op = U ? Iop_ShrN32x4 : Iop_SarN32x4;
+                     break;
+                  case 3:
+                     add = Iop_Add64x2;
+                     op = U ? Iop_ShrN64x2 : Iop_SarN64x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               reg_m = newTemp(Ity_I64);
+               res = newTemp(Ity_I64);
+               imm_val = mkU64(imm);
+               assign(reg_m, getDRegI64(mreg));
+               switch (size) {
+                  case 0:
+                     add = Iop_Add8x8;
+                     op = U ? Iop_ShrN8x8 : Iop_SarN8x8;
+                     break;
+                  case 1:
+                     add = Iop_Add16x4;
+                     op = U ? Iop_ShrN16x4 : Iop_SarN16x4;
+                     break;
+                  case 2:
+                     add = Iop_Add32x2;
+                     op = U ? Iop_ShrN32x2 : Iop_SarN32x2;
+                     break;
+                  case 3:
+                     add = Iop_Add64;
+                     op = U ? Iop_Shr64 : Iop_Sar64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            assign(res,
+                   binop(add,
+                         binop(op,
+                               mkexpr(reg_m),
+                               mkU8(shift_imm)),
+                         binop(Q ? Iop_AndV128 : Iop_And64,
+                               binop(op,
+                                     mkexpr(reg_m),
+                                     mkU8(shift_imm - 1)),
+                               imm_val)));
+         } else {
+            if (Q) {
+               res = newTemp(Ity_V128);
+               assign(res, getQReg(mreg));
+            } else {
+               res = newTemp(Ity_I64);
+               assign(res, getDRegI64(mreg));
+            }
+         }
+         if (A == 3) {
+            if (Q) {
+               putQReg(dreg, binop(add, mkexpr(res), getQReg(dreg)),
+                             condT);
+            } else {
+               putDRegI64(dreg, binop(add, mkexpr(res), getDRegI64(dreg)),
+                                condT);
+            }
+            DIP("vrsra.%c%u %c%u, %c%u, #%u\n",
+                U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
+         } else {
+            if (Q) {
+               putQReg(dreg, mkexpr(res), condT);
+            } else {
+               putDRegI64(dreg, mkexpr(res), condT);
+            }
+            DIP("vrshr.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
+         }
+         return True;
+      case 1:
+      case 0:
+         /* VSHR, VSRA */
+         if (Q) {
+            reg_m = newTemp(Ity_V128);
+            assign(reg_m, getQReg(mreg));
+            res = newTemp(Ity_V128);
+         } else {
+            reg_m = newTemp(Ity_I64);
+            assign(reg_m, getDRegI64(mreg));
+            res = newTemp(Ity_I64);
+         }
+         if (Q) {
+            switch (size) {
+               case 0:
+                  op = U ? Iop_ShrN8x16 : Iop_SarN8x16;
+                  add = Iop_Add8x16;
+                  break;
+               case 1:
+                  op = U ? Iop_ShrN16x8 : Iop_SarN16x8;
+                  add = Iop_Add16x8;
+                  break;
+               case 2:
+                  op = U ? Iop_ShrN32x4 : Iop_SarN32x4;
+                  add = Iop_Add32x4;
+                  break;
+               case 3:
+                  op = U ? Iop_ShrN64x2 : Iop_SarN64x2;
+                  add = Iop_Add64x2;
+                  break;
+               default:
+                  vassert(0);
+            }
+         } else {
+            switch (size) {
+               case 0:
+                  op =  U ? Iop_ShrN8x8 : Iop_SarN8x8;
+                  add = Iop_Add8x8;
+                  break;
+               case 1:
+                  op = U ? Iop_ShrN16x4 : Iop_SarN16x4;
+                  add = Iop_Add16x4;
+                  break;
+               case 2:
+                  op = U ? Iop_ShrN32x2 : Iop_SarN32x2;
+                  add = Iop_Add32x2;
+                  break;
+               case 3:
+                  op = U ? Iop_Shr64 : Iop_Sar64;
+                  add = Iop_Add64;
+                  break;
+               default:
+                  vassert(0);
+            }
+         }
+         assign(res, binop(op, mkexpr(reg_m), mkU8(shift_imm)));
+         if (A == 1) {
+            if (Q) {
+               putQReg(dreg, binop(add, mkexpr(res), getQReg(dreg)),
+                             condT);
+            } else {
+               putDRegI64(dreg, binop(add, mkexpr(res), getDRegI64(dreg)),
+                                condT);
+            }
+            DIP("vsra.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+                  Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
+         } else {
+            if (Q) {
+               putQReg(dreg, mkexpr(res), condT);
+            } else {
+               putDRegI64(dreg, mkexpr(res), condT);
+            }
+            DIP("vshr.%c%u %c%u, %c%u, #%u\n", U ? 'u' : 's', 8 << size,
+                  Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
+         }
+         return True;
+      case 4:
+         /* VSRI */
+         if (!U)
+            return False;
+         if (Q) {
+            res = newTemp(Ity_V128);
+            mask = newTemp(Ity_V128);
+         } else {
+            res = newTemp(Ity_I64);
+            mask = newTemp(Ity_I64);
+         }
+         switch (size) {
+            case 0: op = Q ? Iop_ShrN8x16 : Iop_ShrN8x8; break;
+            case 1: op = Q ? Iop_ShrN16x8 : Iop_ShrN16x4; break;
+            case 2: op = Q ? Iop_ShrN32x4 : Iop_ShrN32x2; break;
+            case 3: op = Q ? Iop_ShrN64x2 : Iop_Shr64; break;
+            default: vassert(0);
+         }
+         if (Q) {
+            assign(mask, binop(op, binop(Iop_64HLtoV128,
+                                         mkU64(0xFFFFFFFFFFFFFFFFLL),
+                                         mkU64(0xFFFFFFFFFFFFFFFFLL)),
+                               mkU8(shift_imm)));
+            assign(res, binop(Iop_OrV128,
+                              binop(Iop_AndV128,
+                                    getQReg(dreg),
+                                    unop(Iop_NotV128,
+                                         mkexpr(mask))),
+                              binop(op,
+                                    getQReg(mreg),
+                                    mkU8(shift_imm))));
+            putQReg(dreg, mkexpr(res), condT);
+         } else {
+            assign(mask, binop(op, mkU64(0xFFFFFFFFFFFFFFFFLL),
+                               mkU8(shift_imm)));
+            assign(res, binop(Iop_Or64,
+                              binop(Iop_And64,
+                                    getDRegI64(dreg),
+                                    unop(Iop_Not64,
+                                         mkexpr(mask))),
+                              binop(op,
+                                    getDRegI64(mreg),
+                                    mkU8(shift_imm))));
+            putDRegI64(dreg, mkexpr(res), condT);
+         }
+         DIP("vsri.%u %c%u, %c%u, #%u\n",
+             8 << size, Q ? 'q' : 'd', dreg,
+             Q ? 'q' : 'd', mreg, shift_imm);
+         return True;
+      case 5:
+         if (U) {
+            /* VSLI */
+            shift_imm = 8 * (1 << size) - shift_imm;
+            if (Q) {
+               res = newTemp(Ity_V128);
+               mask = newTemp(Ity_V128);
+            } else {
+               res = newTemp(Ity_I64);
+               mask = newTemp(Ity_I64);
+            }
+            switch (size) {
+               case 0: op = Q ? Iop_ShlN8x16 : Iop_ShlN8x8; break;
+               case 1: op = Q ? Iop_ShlN16x8 : Iop_ShlN16x4; break;
+               case 2: op = Q ? Iop_ShlN32x4 : Iop_ShlN32x2; break;
+               case 3: op = Q ? Iop_ShlN64x2 : Iop_Shl64; break;
+               default: vassert(0);
+            }
+            if (Q) {
+               assign(mask, binop(op, binop(Iop_64HLtoV128,
+                                            mkU64(0xFFFFFFFFFFFFFFFFLL),
+                                            mkU64(0xFFFFFFFFFFFFFFFFLL)),
+                                  mkU8(shift_imm)));
+               assign(res, binop(Iop_OrV128,
+                                 binop(Iop_AndV128,
+                                       getQReg(dreg),
+                                       unop(Iop_NotV128,
+                                            mkexpr(mask))),
+                                 binop(op,
+                                       getQReg(mreg),
+                                       mkU8(shift_imm))));
+               putQReg(dreg, mkexpr(res), condT);
+            } else {
+               assign(mask, binop(op, mkU64(0xFFFFFFFFFFFFFFFFLL),
+                                  mkU8(shift_imm)));
+               assign(res, binop(Iop_Or64,
+                                 binop(Iop_And64,
+                                       getDRegI64(dreg),
+                                       unop(Iop_Not64,
+                                            mkexpr(mask))),
+                                 binop(op,
+                                       getDRegI64(mreg),
+                                       mkU8(shift_imm))));
+               putDRegI64(dreg, mkexpr(res), condT);
+            }
+            DIP("vsli.%u %c%u, %c%u, #%u\n",
+                8 << size, Q ? 'q' : 'd', dreg,
+                Q ? 'q' : 'd', mreg, shift_imm);
+            return True;
+         } else {
+            /* VSHL #imm */
+            shift_imm = 8 * (1 << size) - shift_imm;
+            if (Q) {
+               res = newTemp(Ity_V128);
+            } else {
+               res = newTemp(Ity_I64);
+            }
+            switch (size) {
+               case 0: op = Q ? Iop_ShlN8x16 : Iop_ShlN8x8; break;
+               case 1: op = Q ? Iop_ShlN16x8 : Iop_ShlN16x4; break;
+               case 2: op = Q ? Iop_ShlN32x4 : Iop_ShlN32x2; break;
+               case 3: op = Q ? Iop_ShlN64x2 : Iop_Shl64; break;
+               default: vassert(0);
+            }
+            assign(res, binop(op, Q ? getQReg(mreg) : getDRegI64(mreg),
+                     mkU8(shift_imm)));
+            if (Q) {
+               putQReg(dreg, mkexpr(res), condT);
+            } else {
+               putDRegI64(dreg, mkexpr(res), condT);
+            }
+            DIP("vshl.i%u %c%u, %c%u, #%u\n",
+                8 << size, Q ? 'q' : 'd', dreg,
+                Q ? 'q' : 'd', mreg, shift_imm);
+            return True;
+         }
+         break;
+      case 6:
+      case 7:
+         /* VQSHL, VQSHLU */
+         shift_imm = 8 * (1 << size) - shift_imm;
+         if (U) {
+            if (A & 1) {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_QShlNsatUU8x16 : Iop_QShlNsatUU8x8;
+                     op_rev = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_QShlNsatUU16x8 : Iop_QShlNsatUU16x4;
+                     op_rev = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QShlNsatUU32x4 : Iop_QShlNsatUU32x2;
+                     op_rev = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_QShlNsatUU64x2 : Iop_QShlNsatUU64x1;
+                     op_rev = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+               DIP("vqshl.u%u %c%u, %c%u, #%u\n",
+                   8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
+            } else {
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_QShlNsatSU8x16 : Iop_QShlNsatSU8x8;
+                     op_rev = Q ? Iop_ShrN8x16 : Iop_ShrN8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_QShlNsatSU16x8 : Iop_QShlNsatSU16x4;
+                     op_rev = Q ? Iop_ShrN16x8 : Iop_ShrN16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QShlNsatSU32x4 : Iop_QShlNsatSU32x2;
+                     op_rev = Q ? Iop_ShrN32x4 : Iop_ShrN32x2;
+                     break;
+                  case 3:
+                     op = Q ? Iop_QShlNsatSU64x2 : Iop_QShlNsatSU64x1;
+                     op_rev = Q ? Iop_ShrN64x2 : Iop_Shr64;
+                     break;
+                  default:
+                     vassert(0);
+               }
+               DIP("vqshlu.s%u %c%u, %c%u, #%u\n",
+                   8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
+            }
+         } else {
+            if (!(A & 1))
+               return False;
+            switch (size) {
+               case 0:
+                  op = Q ? Iop_QShlNsatSS8x16 : Iop_QShlNsatSS8x8;
+                  op_rev = Q ? Iop_SarN8x16 : Iop_SarN8x8;
+                  break;
+               case 1:
+                  op = Q ? Iop_QShlNsatSS16x8 : Iop_QShlNsatSS16x4;
+                  op_rev = Q ? Iop_SarN16x8 : Iop_SarN16x4;
+                  break;
+               case 2:
+                  op = Q ? Iop_QShlNsatSS32x4 : Iop_QShlNsatSS32x2;
+                  op_rev = Q ? Iop_SarN32x4 : Iop_SarN32x2;
+                  break;
+               case 3:
+                  op = Q ? Iop_QShlNsatSS64x2 : Iop_QShlNsatSS64x1;
+                  op_rev = Q ? Iop_SarN64x2 : Iop_Sar64;
+                  break;
+               default:
+                  vassert(0);
+            }
+            DIP("vqshl.s%u %c%u, %c%u, #%u\n",
+                8 << size,
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg, shift_imm);
+         }
+         if (Q) {
+            tmp = newTemp(Ity_V128);
+            res = newTemp(Ity_V128);
+            reg_m = newTemp(Ity_V128);
+            assign(reg_m, getQReg(mreg));
+         } else {
+            tmp = newTemp(Ity_I64);
+            res = newTemp(Ity_I64);
+            reg_m = newTemp(Ity_I64);
+            assign(reg_m, getDRegI64(mreg));
+         }
+         assign(res, binop(op, mkexpr(reg_m), mkU8(shift_imm)));
+         assign(tmp, binop(op_rev, mkexpr(res), mkU8(shift_imm)));
+         setFlag_QC(mkexpr(tmp), mkexpr(reg_m), Q, condT);
+         if (Q)
+            putQReg(dreg, mkexpr(res), condT);
+         else
+            putDRegI64(dreg, mkexpr(res), condT);
+         return True;
+      case 8:
+         if (!U) {
+            if (L == 1)
+               return False;
+            size++;
+            dreg = ((theInstr >> 18) & 0x10) | ((theInstr >> 12) & 0xF);
+            mreg = ((theInstr >> 1) & 0x10) | (theInstr & 0xF);
+            if (mreg & 1)
+               return False;
+            mreg >>= 1;
+            if (!B) {
+               /* VSHRN*/
+               IROp narOp;
+               reg_m = newTemp(Ity_V128);
+               assign(reg_m, getQReg(mreg));
+               res = newTemp(Ity_I64);
+               switch (size) {
+                  case 1:
+                     op = Iop_ShrN16x8;
+                     narOp = Iop_NarrowUn16to8x8;
+                     break;
+                  case 2:
+                     op = Iop_ShrN32x4;
+                     narOp = Iop_NarrowUn32to16x4;
+                     break;
+                  case 3:
+                     op = Iop_ShrN64x2;
+                     narOp = Iop_NarrowUn64to32x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+               assign(res, unop(narOp,
+                                binop(op,
+                                      mkexpr(reg_m),
+                                      mkU8(shift_imm))));
+               putDRegI64(dreg, mkexpr(res), condT);
+               DIP("vshrn.i%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+                   shift_imm);
+               return True;
+            } else {
+               /* VRSHRN   */
+               IROp addOp, shOp, narOp;
+               IRExpr *imm_val;
+               reg_m = newTemp(Ity_V128);
+               assign(reg_m, getQReg(mreg));
+               res = newTemp(Ity_I64);
+               imm = 1L;
+               switch (size) {
+                  case 0: imm = (imm <<  8) | imm; /* fall through */
+                  case 1: imm = (imm << 16) | imm; /* fall through */
+                  case 2: imm = (imm << 32) | imm; /* fall through */
+                  case 3: break;
+                  default: vassert(0);
+               }
+               imm_val = binop(Iop_64HLtoV128, mkU64(imm), mkU64(imm));
+               switch (size) {
+                  case 1:
+                     addOp = Iop_Add16x8;
+                     shOp = Iop_ShrN16x8;
+                     narOp = Iop_NarrowUn16to8x8;
+                     break;
+                  case 2:
+                     addOp = Iop_Add32x4;
+                     shOp = Iop_ShrN32x4;
+                     narOp = Iop_NarrowUn32to16x4;
+                     break;
+                  case 3:
+                     addOp = Iop_Add64x2;
+                     shOp = Iop_ShrN64x2;
+                     narOp = Iop_NarrowUn64to32x2;
+                     break;
+                  default:
+                     vassert(0);
+               }
+               assign(res, unop(narOp,
+                                binop(addOp,
+                                      binop(shOp,
+                                            mkexpr(reg_m),
+                                            mkU8(shift_imm)),
+                                      binop(Iop_AndV128,
+                                            binop(shOp,
+                                                  mkexpr(reg_m),
+                                                  mkU8(shift_imm - 1)),
+                                            imm_val))));
+               putDRegI64(dreg, mkexpr(res), condT);
+               if (shift_imm == 0) {
+                  DIP("vmov%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+                      shift_imm);
+               } else {
+                  DIP("vrshrn.i%u d%u, q%u, #%u\n", 8 << size, dreg, mreg,
+                      shift_imm);
+               }
+               return True;
+            }
+         } else {
+            /* fall through */
+         }
+      case 9:
+         dreg = ((theInstr >> 18) & 0x10) | ((theInstr >> 12) & 0xF);
+         mreg = ((theInstr >>  1) & 0x10) | (theInstr & 0xF);
+         if (mreg & 1)
+            return False;
+         mreg >>= 1;
+         size++;
+         if ((theInstr >> 8) & 1) {
+            switch (size) {
+               case 1:
+                  op = U ? Iop_ShrN16x8 : Iop_SarN16x8;
+                  cvt = U ? Iop_QNarrowUn16Uto8Ux8 : Iop_QNarrowUn16Sto8Sx8;
+                  cvt2 = U ? Iop_Widen8Uto16x8 : Iop_Widen8Sto16x8;
+                  break;
+               case 2:
+                  op = U ? Iop_ShrN32x4 : Iop_SarN32x4;
+                  cvt = U ? Iop_QNarrowUn32Uto16Ux4 : Iop_QNarrowUn32Sto16Sx4;
+                  cvt2 = U ? Iop_Widen16Uto32x4 : Iop_Widen16Sto32x4;
+                  break;
+               case 3:
+                  op = U ? Iop_ShrN64x2 : Iop_SarN64x2;
+                  cvt = U ? Iop_QNarrowUn64Uto32Ux2 : Iop_QNarrowUn64Sto32Sx2;
+                  cvt2 = U ? Iop_Widen32Uto64x2 : Iop_Widen32Sto64x2;
+                  break;
+               default:
+                  vassert(0);
+            }
+            DIP("vq%sshrn.%c%u d%u, q%u, #%u\n", B ? "r" : "",
+                U ? 'u' : 's', 8 << size, dreg, mreg, shift_imm);
+         } else {
+            vassert(U);
+            switch (size) {
+               case 1:
+                  op = Iop_SarN16x8;
+                  cvt = Iop_QNarrowUn16Sto8Ux8;
+                  cvt2 = Iop_Widen8Uto16x8;
+                  break;
+               case 2:
+                  op = Iop_SarN32x4;
+                  cvt = Iop_QNarrowUn32Sto16Ux4;
+                  cvt2 = Iop_Widen16Uto32x4;
+                  break;
+               case 3:
+                  op = Iop_SarN64x2;
+                  cvt = Iop_QNarrowUn64Sto32Ux2;
+                  cvt2 = Iop_Widen32Uto64x2;
+                  break;
+               default:
+                  vassert(0);
+            }
+            DIP("vq%sshrun.s%u d%u, q%u, #%u\n", B ? "r" : "",
+                8 << size, dreg, mreg, shift_imm);
+         }
+         if (B) {
+            if (shift_imm > 0) {
+               imm = 1;
+               switch (size) {
+                  case 1: imm = (imm << 16) | imm; /* fall through */
+                  case 2: imm = (imm << 32) | imm; /* fall through */
+                  case 3: break;
+                  case 0: default: vassert(0);
+               }
+               switch (size) {
+                  case 1: add = Iop_Add16x8; break;
+                  case 2: add = Iop_Add32x4; break;
+                  case 3: add = Iop_Add64x2; break;
+                  case 0: default: vassert(0);
+               }
+            }
+         }
+         reg_m = newTemp(Ity_V128);
+         res = newTemp(Ity_V128);
+         assign(reg_m, getQReg(mreg));
+         if (B) {
+            /* VQRSHRN, VQRSHRUN */
+            assign(res, binop(add,
+                              binop(op, mkexpr(reg_m), mkU8(shift_imm)),
+                              binop(Iop_AndV128,
+                                    binop(op,
+                                          mkexpr(reg_m),
+                                          mkU8(shift_imm - 1)),
+                                    mkU128(imm))));
+         } else {
+            /* VQSHRN, VQSHRUN */
+            assign(res, binop(op, mkexpr(reg_m), mkU8(shift_imm)));
+         }
+         setFlag_QC(unop(cvt2, unop(cvt, mkexpr(res))), mkexpr(res),
+                    True, condT);
+         putDRegI64(dreg, unop(cvt, mkexpr(res)), condT);
+         return True;
+      case 10:
+         /* VSHLL
+            VMOVL ::= VSHLL #0 */
+         if (B)
+            return False;
+         if (dreg & 1)
+            return False;
+         dreg >>= 1;
+         shift_imm = (8 << size) - shift_imm;
+         res = newTemp(Ity_V128);
+         switch (size) {
+            case 0:
+               op = Iop_ShlN16x8;
+               cvt = U ? Iop_Widen8Uto16x8 : Iop_Widen8Sto16x8;
+               break;
+            case 1:
+               op = Iop_ShlN32x4;
+               cvt = U ? Iop_Widen16Uto32x4 : Iop_Widen16Sto32x4;
+               break;
+            case 2:
+               op = Iop_ShlN64x2;
+               cvt = U ? Iop_Widen32Uto64x2 : Iop_Widen32Sto64x2;
+               break;
+            case 3:
+               return False;
+            default:
+               vassert(0);
+         }
+         assign(res, binop(op, unop(cvt, getDRegI64(mreg)), mkU8(shift_imm)));
+         putQReg(dreg, mkexpr(res), condT);
+         if (shift_imm == 0) {
+            DIP("vmovl.%c%u q%u, d%u\n", U ? 'u' : 's', 8 << size,
+                dreg, mreg);
+         } else {
+            DIP("vshll.%c%u q%u, d%u, #%u\n", U ? 'u' : 's', 8 << size,
+                dreg, mreg, shift_imm);
+         }
+         return True;
+      case 14:
+      case 15:
+         /* VCVT floating-point <-> fixed-point */
+         if ((theInstr >> 8) & 1) {
+            if (U) {
+               op = Q ? Iop_F32ToFixed32Ux4_RZ : Iop_F32ToFixed32Ux2_RZ;
+            } else {
+               op = Q ? Iop_F32ToFixed32Sx4_RZ : Iop_F32ToFixed32Sx2_RZ;
+            }
+            DIP("vcvt.%c32.f32 %c%u, %c%u, #%u\n", U ? 'u' : 's',
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg,
+                64 - ((theInstr >> 16) & 0x3f));
+         } else {
+            if (U) {
+               op = Q ? Iop_Fixed32UToF32x4_RN : Iop_Fixed32UToF32x2_RN;
+            } else {
+               op = Q ? Iop_Fixed32SToF32x4_RN : Iop_Fixed32SToF32x2_RN;
+            }
+            DIP("vcvt.f32.%c32 %c%u, %c%u, #%u\n", U ? 'u' : 's',
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg,
+                64 - ((theInstr >> 16) & 0x3f));
+         }
+         if (((theInstr >> 21) & 1) == 0)
+            return False;
+         if (Q) {
+            putQReg(dreg, binop(op, getQReg(mreg),
+                     mkU8(64 - ((theInstr >> 16) & 0x3f))), condT);
+         } else {
+            putDRegI64(dreg, binop(op, getDRegI64(mreg),
+                       mkU8(64 - ((theInstr >> 16) & 0x3f))), condT);
+         }
+         return True;
+      default:
+         return False;
+
+   }
+   return False;
+}
+
+/* A7.4.5 Two registers, miscellaneous */
+static
+Bool dis_neon_data_2reg_misc ( UInt theInstr, IRTemp condT )
+{
+   UInt A = (theInstr >> 16) & 3;
+   UInt B = (theInstr >> 6) & 0x1f;
+   UInt Q = (theInstr >> 6) & 1;
+   UInt U = (theInstr >> 24) & 1;
+   UInt size = (theInstr >> 18) & 3;
+   UInt dreg = get_neon_d_regno(theInstr);
+   UInt mreg = get_neon_m_regno(theInstr);
+   UInt F = (theInstr >> 10) & 1;
+   IRTemp arg_d = IRTemp_INVALID;
+   IRTemp arg_m = IRTemp_INVALID;
+   IRTemp res = IRTemp_INVALID;
+   switch (A) {
+      case 0:
+         if (Q) {
+            arg_m = newTemp(Ity_V128);
+            res = newTemp(Ity_V128);
+            assign(arg_m, getQReg(mreg));
+         } else {
+            arg_m = newTemp(Ity_I64);
+            res = newTemp(Ity_I64);
+            assign(arg_m, getDRegI64(mreg));
+         }
+         switch (B >> 1) {
+            case 0: {
+               /* VREV64 */
+               IROp op;
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Reverse8sIn64_x2 : Iop_Reverse8sIn64_x1;
+                     break;
+                  case 1:
+                     op = Q ? Iop_Reverse16sIn64_x2 : Iop_Reverse16sIn64_x1;
+                     break;
+                  case 2:
+                     op = Q ? Iop_Reverse32sIn64_x2 : Iop_Reverse32sIn64_x1;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+               assign(res, unop(op, mkexpr(arg_m)));
+               DIP("vrev64.%u %c%u, %c%u\n", 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 1: {
+               /* VREV32 */
+               IROp op;
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Reverse8sIn32_x4 : Iop_Reverse8sIn32_x2;
+                     break;
+                  case 1:
+                     op = Q ? Iop_Reverse16sIn32_x4 : Iop_Reverse16sIn32_x2;
+                     break;
+                  case 2:
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+               assign(res, unop(op, mkexpr(arg_m)));
+               DIP("vrev32.%u %c%u, %c%u\n", 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 2: {
+               /* VREV16 */
+               IROp op;
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_Reverse8sIn16_x8 : Iop_Reverse8sIn16_x4;
+                     break;
+                  case 1:
+                  case 2:
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+               assign(res, unop(op, mkexpr(arg_m)));
+               DIP("vrev16.%u %c%u, %c%u\n", 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 3:
+               return False;
+            case 4:
+            case 5: {
+               /* VPADDL */
+               IROp op;
+               U = (theInstr >> 7) & 1;
+               if (Q) {
+                  switch (size) {
+                     case 0: op = U ? Iop_PwAddL8Ux16 : Iop_PwAddL8Sx16; break;
+                     case 1: op = U ? Iop_PwAddL16Ux8 : Iop_PwAddL16Sx8; break;
+                     case 2: op = U ? Iop_PwAddL32Ux4 : Iop_PwAddL32Sx4; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+               } else {
+                  switch (size) {
+                     case 0: op = U ? Iop_PwAddL8Ux8  : Iop_PwAddL8Sx8;  break;
+                     case 1: op = U ? Iop_PwAddL16Ux4 : Iop_PwAddL16Sx4; break;
+                     case 2: op = U ? Iop_PwAddL32Ux2 : Iop_PwAddL32Sx2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+               }
+               assign(res, unop(op, mkexpr(arg_m)));
+               DIP("vpaddl.%c%u %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 6:
+            case 7:
+               return False;
+            case 8: {
+               /* VCLS */
+               IROp op;
+               switch (size) {
+                  case 0: op = Q ? Iop_Cls8x16 : Iop_Cls8x8; break;
+                  case 1: op = Q ? Iop_Cls16x8 : Iop_Cls16x4; break;
+                  case 2: op = Q ? Iop_Cls32x4 : Iop_Cls32x2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+               assign(res, unop(op, mkexpr(arg_m)));
+               DIP("vcls.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+                   Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 9: {
+               /* VCLZ */
+               IROp op;
+               switch (size) {
+                  case 0: op = Q ? Iop_Clz8x16 : Iop_Clz8x8; break;
+                  case 1: op = Q ? Iop_Clz16x8 : Iop_Clz16x4; break;
+                  case 2: op = Q ? Iop_Clz32x4 : Iop_Clz32x2; break;
+                  case 3: return False;
+                  default: vassert(0);
+               }
+               assign(res, unop(op, mkexpr(arg_m)));
+               DIP("vclz.i%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+                   Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 10:
+               /* VCNT */
+               assign(res, unop(Q ? Iop_Cnt8x16 : Iop_Cnt8x8, mkexpr(arg_m)));
+               DIP("vcnt.8 %c%u, %c%u\n", Q ? 'q' : 'd', dreg, Q ? 'q' : 'd',
+                   mreg);
+               break;
+            case 11:
+               /* VMVN */
+               if (Q)
+                  assign(res, unop(Iop_NotV128, mkexpr(arg_m)));
+               else
+                  assign(res, unop(Iop_Not64, mkexpr(arg_m)));
+               DIP("vmvn %c%u, %c%u\n", Q ? 'q' : 'd', dreg, Q ? 'q' : 'd',
+                   mreg);
+               break;
+            case 12:
+            case 13: {
+               /* VPADAL */
+               IROp op, add_op;
+               U = (theInstr >> 7) & 1;
+               if (Q) {
+                  switch (size) {
+                     case 0:
+                        op = U ? Iop_PwAddL8Ux16 : Iop_PwAddL8Sx16;
+                        add_op = Iop_Add16x8;
+                        break;
+                     case 1:
+                        op = U ? Iop_PwAddL16Ux8 : Iop_PwAddL16Sx8;
+                        add_op = Iop_Add32x4;
+                        break;
+                     case 2:
+                        op = U ? Iop_PwAddL32Ux4 : Iop_PwAddL32Sx4;
+                        add_op = Iop_Add64x2;
+                        break;
+                     case 3:
+                        return False;
+                     default:
+                        vassert(0);
+                  }
+               } else {
+                  switch (size) {
+                     case 0:
+                        op = U ? Iop_PwAddL8Ux8 : Iop_PwAddL8Sx8;
+                        add_op = Iop_Add16x4;
+                        break;
+                     case 1:
+                        op = U ? Iop_PwAddL16Ux4 : Iop_PwAddL16Sx4;
+                        add_op = Iop_Add32x2;
+                        break;
+                     case 2:
+                        op = U ? Iop_PwAddL32Ux2 : Iop_PwAddL32Sx2;
+                        add_op = Iop_Add64;
+                        break;
+                     case 3:
+                        return False;
+                     default:
+                        vassert(0);
+                  }
+               }
+               if (Q) {
+                  arg_d = newTemp(Ity_V128);
+                  assign(arg_d, getQReg(dreg));
+               } else {
+                  arg_d = newTemp(Ity_I64);
+                  assign(arg_d, getDRegI64(dreg));
+               }
+               assign(res, binop(add_op, unop(op, mkexpr(arg_m)),
+                                         mkexpr(arg_d)));
+               DIP("vpadal.%c%u %c%u, %c%u\n", U ? 'u' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 14: {
+               /* VQABS */
+               IROp op_sub, op_qsub, op_cmp;
+               IRTemp mask, tmp;
+               IRExpr *zero1, *zero2;
+               IRExpr *neg, *neg2;
+               if (Q) {
+                  zero1 = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+                  zero2 = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+                  mask = newTemp(Ity_V128);
+                  tmp = newTemp(Ity_V128);
+               } else {
+                  zero1 = mkU64(0);
+                  zero2 = mkU64(0);
+                  mask = newTemp(Ity_I64);
+                  tmp = newTemp(Ity_I64);
+               }
+               switch (size) {
+                  case 0:
+                     op_sub = Q ? Iop_Sub8x16 : Iop_Sub8x8;
+                     op_qsub = Q ? Iop_QSub8Sx16 : Iop_QSub8Sx8;
+                     op_cmp = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8;
+                     break;
+                  case 1:
+                     op_sub = Q ? Iop_Sub16x8 : Iop_Sub16x4;
+                     op_qsub = Q ? Iop_QSub16Sx8 : Iop_QSub16Sx4;
+                     op_cmp = Q ? Iop_CmpGT16Sx8 : Iop_CmpGT16Sx4;
+                     break;
+                  case 2:
+                     op_sub = Q ? Iop_Sub32x4 : Iop_Sub32x2;
+                     op_qsub = Q ? Iop_QSub32Sx4 : Iop_QSub32Sx2;
+                     op_cmp = Q ? Iop_CmpGT32Sx4 : Iop_CmpGT32Sx2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+               assign(mask, binop(op_cmp, mkexpr(arg_m), zero1));
+               neg = binop(op_qsub, zero2, mkexpr(arg_m));
+               neg2 = binop(op_sub, zero2, mkexpr(arg_m));
+               assign(res, binop(Q ? Iop_OrV128 : Iop_Or64,
+                                 binop(Q ? Iop_AndV128 : Iop_And64,
+                                       mkexpr(mask),
+                                       mkexpr(arg_m)),
+                                 binop(Q ? Iop_AndV128 : Iop_And64,
+                                       unop(Q ? Iop_NotV128 : Iop_Not64,
+                                            mkexpr(mask)),
+                                       neg)));
+               assign(tmp, binop(Q ? Iop_OrV128 : Iop_Or64,
+                                 binop(Q ? Iop_AndV128 : Iop_And64,
+                                       mkexpr(mask),
+                                       mkexpr(arg_m)),
+                                 binop(Q ? Iop_AndV128 : Iop_And64,
+                                       unop(Q ? Iop_NotV128 : Iop_Not64,
+                                            mkexpr(mask)),
+                                       neg2)));
+               setFlag_QC(mkexpr(res), mkexpr(tmp), Q, condT);
+               DIP("vqabs.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+                   Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 15: {
+               /* VQNEG */
+               IROp op, op2;
+               IRExpr *zero;
+               if (Q) {
+                  zero = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+               } else {
+                  zero = mkU64(0);
+               }
+               switch (size) {
+                  case 0:
+                     op = Q ? Iop_QSub8Sx16 : Iop_QSub8Sx8;
+                     op2 = Q ? Iop_Sub8x16 : Iop_Sub8x8;
+                     break;
+                  case 1:
+                     op = Q ? Iop_QSub16Sx8 : Iop_QSub16Sx4;
+                     op2 = Q ? Iop_Sub16x8 : Iop_Sub16x4;
+                     break;
+                  case 2:
+                     op = Q ? Iop_QSub32Sx4 : Iop_QSub32Sx2;
+                     op2 = Q ? Iop_Sub32x4 : Iop_Sub32x2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+               assign(res, binop(op, zero, mkexpr(arg_m)));
+               setFlag_QC(mkexpr(res), binop(op2, zero, mkexpr(arg_m)),
+                          Q, condT);
+               DIP("vqneg.s%u %c%u, %c%u\n", 8 << size, Q ? 'q' : 'd', dreg,
+                   Q ? 'q' : 'd', mreg);
+               break;
+            }
+            default:
+               vassert(0);
+         }
+         if (Q) {
+            putQReg(dreg, mkexpr(res), condT);
+         } else {
+            putDRegI64(dreg, mkexpr(res), condT);
+         }
+         return True;
+      case 1:
+         if (Q) {
+            arg_m = newTemp(Ity_V128);
+            res = newTemp(Ity_V128);
+            assign(arg_m, getQReg(mreg));
+         } else {
+            arg_m = newTemp(Ity_I64);
+            res = newTemp(Ity_I64);
+            assign(arg_m, getDRegI64(mreg));
+         }
+         switch ((B >> 1) & 0x7) {
+            case 0: {
+               /* VCGT #0 */
+               IRExpr *zero;
+               IROp op;
+               if (Q) {
+                  zero = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+               } else {
+                  zero = mkU64(0);
+               }
+               if (F) {
+                  switch (size) {
+                     case 0: case 1: case 3: return False;
+                     case 2: op = Q ? Iop_CmpGT32Fx4 : Iop_CmpGT32Fx2; break;
+                     default: vassert(0);
+                  }
+               } else {
+                  switch (size) {
+                     case 0: op = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8; break;
+                     case 1: op = Q ? Iop_CmpGT16Sx8 : Iop_CmpGT16Sx4; break;
+                     case 2: op = Q ? Iop_CmpGT32Sx4 : Iop_CmpGT32Sx2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+               }
+               assign(res, binop(op, mkexpr(arg_m), zero));
+               DIP("vcgt.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 1: {
+               /* VCGE #0 */
+               IROp op;
+               IRExpr *zero;
+               if (Q) {
+                  zero = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+               } else {
+                  zero = mkU64(0);
+               }
+               if (F) {
+                  switch (size) {
+                     case 0: case 1: case 3: return False;
+                     case 2: op = Q ? Iop_CmpGE32Fx4 : Iop_CmpGE32Fx2; break;
+                     default: vassert(0);
+                  }
+                  assign(res, binop(op, mkexpr(arg_m), zero));
+               } else {
+                  switch (size) {
+                     case 0: op = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8; break;
+                     case 1: op = Q ? Iop_CmpGT16Sx8 : Iop_CmpGT16Sx4; break;
+                     case 2: op = Q ? Iop_CmpGT32Sx4 : Iop_CmpGT32Sx2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
+                                   binop(op, zero, mkexpr(arg_m))));
+               }
+               DIP("vcge.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 2: {
+               /* VCEQ #0 */
+               IROp op;
+               IRExpr *zero;
+               if (F) {
+                  if (Q) {
+                     zero = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+                  } else {
+                     zero = mkU64(0);
+                  }
+                  switch (size) {
+                     case 0: case 1: case 3: return False;
+                     case 2: op = Q ? Iop_CmpEQ32Fx4 : Iop_CmpEQ32Fx2; break;
+                     default: vassert(0);
+                  }
+                  assign(res, binop(op, zero, mkexpr(arg_m)));
+               } else {
+                  switch (size) {
+                     case 0: op = Q ? Iop_CmpNEZ8x16 : Iop_CmpNEZ8x8; break;
+                     case 1: op = Q ? Iop_CmpNEZ16x8 : Iop_CmpNEZ16x4; break;
+                     case 2: op = Q ? Iop_CmpNEZ32x4 : Iop_CmpNEZ32x2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
+                                   unop(op, mkexpr(arg_m))));
+               }
+               DIP("vceq.%c%u %c%u, %c%u, #0\n", F ? 'f' : 'i', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 3: {
+               /* VCLE #0 */
+               IRExpr *zero;
+               IROp op;
+               if (Q) {
+                  zero = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+               } else {
+                  zero = mkU64(0);
+               }
+               if (F) {
+                  switch (size) {
+                     case 0: case 1: case 3: return False;
+                     case 2: op = Q ? Iop_CmpGE32Fx4 : Iop_CmpGE32Fx2; break;
+                     default: vassert(0);
+                  }
+                  assign(res, binop(op, zero, mkexpr(arg_m)));
+               } else {
+                  switch (size) {
+                     case 0: op = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8; break;
+                     case 1: op = Q ? Iop_CmpGT16Sx8 : Iop_CmpGT16Sx4; break;
+                     case 2: op = Q ? Iop_CmpGT32Sx4 : Iop_CmpGT32Sx2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  assign(res, unop(Q ? Iop_NotV128 : Iop_Not64,
+                                   binop(op, mkexpr(arg_m), zero)));
+               }
+               DIP("vcle.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 4: {
+               /* VCLT #0 */
+               IROp op;
+               IRExpr *zero;
+               if (Q) {
+                  zero = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+               } else {
+                  zero = mkU64(0);
+               }
+               if (F) {
+                  switch (size) {
+                     case 0: case 1: case 3: return False;
+                     case 2: op = Q ? Iop_CmpGT32Fx4 : Iop_CmpGT32Fx2; break;
+                     default: vassert(0);
+                  }
+                  assign(res, binop(op, zero, mkexpr(arg_m)));
+               } else {
+                  switch (size) {
+                     case 0: op = Q ? Iop_CmpGT8Sx16 : Iop_CmpGT8Sx8; break;
+                     case 1: op = Q ? Iop_CmpGT16Sx8 : Iop_CmpGT16Sx4; break;
+                     case 2: op = Q ? Iop_CmpGT32Sx4 : Iop_CmpGT32Sx2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  assign(res, binop(op, zero, mkexpr(arg_m)));
+               }
+               DIP("vclt.%c%u %c%u, %c%u, #0\n", F ? 'f' : 's', 8 << size,
+                   Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 5:
+               return False;
+            case 6: {
+               /* VABS */
+               if (!F) {
+                  IROp op;
+                  switch(size) {
+                     case 0: op = Q ? Iop_Abs8x16 : Iop_Abs8x8; break;
+                     case 1: op = Q ? Iop_Abs16x8 : Iop_Abs16x4; break;
+                     case 2: op = Q ? Iop_Abs32x4 : Iop_Abs32x2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  assign(res, unop(op, mkexpr(arg_m)));
+               } else {
+                  assign(res, unop(Q ? Iop_Abs32Fx4 : Iop_Abs32Fx2,
+                                   mkexpr(arg_m)));
+               }
+               DIP("vabs.%c%u %c%u, %c%u\n",
+                   F ? 'f' : 's', 8 << size, Q ? 'q' : 'd', dreg,
+                   Q ? 'q' : 'd', mreg);
+               break;
+            }
+            case 7: {
+               /* VNEG */
+               IROp op;
+               IRExpr *zero;
+               if (F) {
+                  switch (size) {
+                     case 0: case 1: case 3: return False;
+                     case 2: op = Q ? Iop_Neg32Fx4 : Iop_Neg32Fx2; break;
+                     default: vassert(0);
+                  }
+                  assign(res, unop(op, mkexpr(arg_m)));
+               } else {
+                  if (Q) {
+                     zero = binop(Iop_64HLtoV128, mkU64(0), mkU64(0));
+                  } else {
+                     zero = mkU64(0);
+                  }
+                  switch (size) {
+                     case 0: op = Q ? Iop_Sub8x16 : Iop_Sub8x8; break;
+                     case 1: op = Q ? Iop_Sub16x8 : Iop_Sub16x4; break;
+                     case 2: op = Q ? Iop_Sub32x4 : Iop_Sub32x2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  assign(res, binop(op, zero, mkexpr(arg_m)));
+               }
+               DIP("vneg.%c%u %c%u, %c%u\n",
+                   F ? 'f' : 's', 8 << size, Q ? 'q' : 'd', dreg,
+                   Q ? 'q' : 'd', mreg);
+               break;
+            }
+            default:
+               vassert(0);
+         }
+         if (Q) {
+            putQReg(dreg, mkexpr(res), condT);
+         } else {
+            putDRegI64(dreg, mkexpr(res), condT);
+         }
+         return True;
+      case 2:
+         if ((B >> 1) == 0) {
+            /* VSWP */
+            if (Q) {
+               arg_m = newTemp(Ity_V128);
+               assign(arg_m, getQReg(mreg));
+               putQReg(mreg, getQReg(dreg), condT);
+               putQReg(dreg, mkexpr(arg_m), condT);
+            } else {
+               arg_m = newTemp(Ity_I64);
+               assign(arg_m, getDRegI64(mreg));
+               putDRegI64(mreg, getDRegI64(dreg), condT);
+               putDRegI64(dreg, mkexpr(arg_m), condT);
+            }
+            DIP("vswp %c%u, %c%u\n",
+                Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+            return True;
+         } else if ((B >> 1) == 1) {
+            /* VTRN */
+            IROp op_odd = Iop_INVALID, op_even = Iop_INVALID;
+            IRTemp old_m, old_d, new_d, new_m;
+            if (Q) {
+               old_m = newTemp(Ity_V128);
+               old_d = newTemp(Ity_V128);
+               new_m = newTemp(Ity_V128);
+               new_d = newTemp(Ity_V128);
+               assign(old_m, getQReg(mreg));
+               assign(old_d, getQReg(dreg));
+            } else {
+               old_m = newTemp(Ity_I64);
+               old_d = newTemp(Ity_I64);
+               new_m = newTemp(Ity_I64);
+               new_d = newTemp(Ity_I64);
+               assign(old_m, getDRegI64(mreg));
+               assign(old_d, getDRegI64(dreg));
+            }
+            if (Q) {
+               switch (size) {
+                  case 0:
+                     op_odd  = Iop_InterleaveOddLanes8x16;
+                     op_even = Iop_InterleaveEvenLanes8x16;
+                     break;
+                  case 1:
+                     op_odd  = Iop_InterleaveOddLanes16x8;
+                     op_even = Iop_InterleaveEvenLanes16x8;
+                     break;
+                  case 2:
+                     op_odd  = Iop_InterleaveOddLanes32x4;
+                     op_even = Iop_InterleaveEvenLanes32x4;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            } else {
+               switch (size) {
+                  case 0:
+                     op_odd  = Iop_InterleaveOddLanes8x8;
+                     op_even = Iop_InterleaveEvenLanes8x8;
+                     break;
+                  case 1:
+                     op_odd  = Iop_InterleaveOddLanes16x4;
+                     op_even = Iop_InterleaveEvenLanes16x4;
+                     break;
+                  case 2:
+                     op_odd  = Iop_InterleaveHI32x2;
+                     op_even = Iop_InterleaveLO32x2;
+                     break;
+                  case 3:
+                     return False;
+                  default:
+                     vassert(0);
+               }
+            }
+            assign(new_d, binop(op_even, mkexpr(old_m), mkexpr(old_d)));
+            assign(new_m, binop(op_odd, mkexpr(old_m), mkexpr(old_d)));
+            if (Q) {
+               putQReg(dreg, mkexpr(new_d), condT);
+               putQReg(mreg, mkexpr(new_m), condT);
+            } else {
+               putDRegI64(dreg, mkexpr(new_d), condT);
+               putDRegI64(mreg, mkexpr(new_m), condT);
+            }
+            DIP("vtrn.%u %c%u, %c%u\n",
+                8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+            return True;
+         } else if ((B >> 1) == 2) {
+            /* VUZP */
+            IROp op_even, op_odd;
+            IRTemp old_m, old_d, new_m, new_d;
+            if (!Q && size == 2)
+               return False;
+            if (Q) {
+               old_m = newTemp(Ity_V128);
+               old_d = newTemp(Ity_V128);
+               new_m = newTemp(Ity_V128);
+               new_d = newTemp(Ity_V128);
+               assign(old_m, getQReg(mreg));
+               assign(old_d, getQReg(dreg));
+            } else {
+               old_m = newTemp(Ity_I64);
+               old_d = newTemp(Ity_I64);
+               new_m = newTemp(Ity_I64);
+               new_d = newTemp(Ity_I64);
+               assign(old_m, getDRegI64(mreg));
+               assign(old_d, getDRegI64(dreg));
+            }
+            switch (size) {
+               case 0:
+                  op_odd  = Q ? Iop_CatOddLanes8x16 : Iop_CatOddLanes8x8;
+                  op_even = Q ? Iop_CatEvenLanes8x16 : Iop_CatEvenLanes8x8;
+                  break;
+               case 1:
+                  op_odd  = Q ? Iop_CatOddLanes16x8 : Iop_CatOddLanes16x4;
+                  op_even = Q ? Iop_CatEvenLanes16x8 : Iop_CatEvenLanes16x4;
+                  break;
+               case 2:
+                  op_odd  = Iop_CatOddLanes32x4;
+                  op_even = Iop_CatEvenLanes32x4;
+                  break;
+               case 3:
+                  return False;
+               default:
+                  vassert(0);
+            }
+            assign(new_d, binop(op_even, mkexpr(old_m), mkexpr(old_d)));
+            assign(new_m, binop(op_odd,  mkexpr(old_m), mkexpr(old_d)));
+            if (Q) {
+               putQReg(dreg, mkexpr(new_d), condT);
+               putQReg(mreg, mkexpr(new_m), condT);
+            } else {
+               putDRegI64(dreg, mkexpr(new_d), condT);
+               putDRegI64(mreg, mkexpr(new_m), condT);
+            }
+            DIP("vuzp.%u %c%u, %c%u\n",
+                8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+            return True;
+         } else if ((B >> 1) == 3) {
+            /* VZIP */
+            IROp op_lo, op_hi;
+            IRTemp old_m, old_d, new_m, new_d;
+            if (!Q && size == 2)
+               return False;
+            if (Q) {
+               old_m = newTemp(Ity_V128);
+               old_d = newTemp(Ity_V128);
+               new_m = newTemp(Ity_V128);
+               new_d = newTemp(Ity_V128);
+               assign(old_m, getQReg(mreg));
+               assign(old_d, getQReg(dreg));
+            } else {
+               old_m = newTemp(Ity_I64);
+               old_d = newTemp(Ity_I64);
+               new_m = newTemp(Ity_I64);
+               new_d = newTemp(Ity_I64);
+               assign(old_m, getDRegI64(mreg));
+               assign(old_d, getDRegI64(dreg));
+            }
+            switch (size) {
+               case 0:
+                  op_hi = Q ? Iop_InterleaveHI8x16 : Iop_InterleaveHI8x8;
+                  op_lo = Q ? Iop_InterleaveLO8x16 : Iop_InterleaveLO8x8;
+                  break;
+               case 1:
+                  op_hi = Q ? Iop_InterleaveHI16x8 : Iop_InterleaveHI16x4;
+                  op_lo = Q ? Iop_InterleaveLO16x8 : Iop_InterleaveLO16x4;
+                  break;
+               case 2:
+                  op_hi = Iop_InterleaveHI32x4;
+                  op_lo = Iop_InterleaveLO32x4;
+                  break;
+               case 3:
+                  return False;
+               default:
+                  vassert(0);
+            }
+            assign(new_d, binop(op_lo, mkexpr(old_m), mkexpr(old_d)));
+            assign(new_m, binop(op_hi, mkexpr(old_m), mkexpr(old_d)));
+            if (Q) {
+               putQReg(dreg, mkexpr(new_d), condT);
+               putQReg(mreg, mkexpr(new_m), condT);
+            } else {
+               putDRegI64(dreg, mkexpr(new_d), condT);
+               putDRegI64(mreg, mkexpr(new_m), condT);
+            }
+            DIP("vzip.%u %c%u, %c%u\n",
+                8 << size, Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+            return True;
+         } else if (B == 8) {
+            /* VMOVN */
+            IROp op;
+            mreg >>= 1;
+            switch (size) {
+               case 0: op = Iop_NarrowUn16to8x8;  break;
+               case 1: op = Iop_NarrowUn32to16x4; break;
+               case 2: op = Iop_NarrowUn64to32x2; break;
+               case 3: return False;
+               default: vassert(0);
+            }
+            putDRegI64(dreg, unop(op, getQReg(mreg)), condT);
+            DIP("vmovn.i%u d%u, q%u\n", 16 << size, dreg, mreg);
+            return True;
+         } else if (B == 9 || (B >> 1) == 5) {
+            /* VQMOVN, VQMOVUN */
+            IROp op, op2;
+            IRTemp tmp;
+            dreg = ((theInstr >> 18) & 0x10) | ((theInstr >> 12) & 0xF);
+            mreg = ((theInstr >> 1) & 0x10) | (theInstr & 0xF);
+            if (mreg & 1)
+               return False;
+            mreg >>= 1;
+            switch (size) {
+               case 0: op2 = Iop_NarrowUn16to8x8;  break;
+               case 1: op2 = Iop_NarrowUn32to16x4; break;
+               case 2: op2 = Iop_NarrowUn64to32x2; break;
+               case 3: return False;
+               default: vassert(0);
+            }
+            switch (B & 3) {
+               case 0:
+                  vassert(0);
+               case 1:
+                  switch (size) {
+                     case 0: op = Iop_QNarrowUn16Sto8Ux8;  break;
+                     case 1: op = Iop_QNarrowUn32Sto16Ux4; break;
+                     case 2: op = Iop_QNarrowUn64Sto32Ux2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  DIP("vqmovun.s%u d%u, q%u\n", 16 << size, dreg, mreg);
+                  break;
+               case 2:
+                  switch (size) {
+                     case 0: op = Iop_QNarrowUn16Sto8Sx8;  break;
+                     case 1: op = Iop_QNarrowUn32Sto16Sx4; break;
+                     case 2: op = Iop_QNarrowUn64Sto32Sx2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  DIP("vqmovn.s%u d%u, q%u\n", 16 << size, dreg, mreg);
+                  break;
+               case 3:
+                  switch (size) {
+                     case 0: op = Iop_QNarrowUn16Uto8Ux8;  break;
+                     case 1: op = Iop_QNarrowUn32Uto16Ux4; break;
+                     case 2: op = Iop_QNarrowUn64Uto32Ux2; break;
+                     case 3: return False;
+                     default: vassert(0);
+                  }
+                  DIP("vqmovn.u%u d%u, q%u\n", 16 << size, dreg, mreg);
+                  break;
+               default:
+                  vassert(0);
+            }
+            res = newTemp(Ity_I64);
+            tmp = newTemp(Ity_I64);
+            assign(res, unop(op, getQReg(mreg)));
+            assign(tmp, unop(op2, getQReg(mreg)));
+            setFlag_QC(mkexpr(res), mkexpr(tmp), False, condT);
+            putDRegI64(dreg, mkexpr(res), condT);
+            return True;
+         } else if (B == 12) {
+            /* VSHLL (maximum shift) */
+            IROp op, cvt;
+            UInt shift_imm;
+            if (Q)
+               return False;
+            if (dreg & 1)
+               return False;
+            dreg >>= 1;
+            shift_imm = 8 << size;
+            res = newTemp(Ity_V128);
+            switch (size) {
+               case 0: op = Iop_ShlN16x8; cvt = Iop_Widen8Uto16x8;  break;
+               case 1: op = Iop_ShlN32x4; cvt = Iop_Widen16Uto32x4; break;
+               case 2: op = Iop_ShlN64x2; cvt = Iop_Widen32Uto64x2; break;
+               case 3: return False;
+               default: vassert(0);
+            }
+            assign(res, binop(op, unop(cvt, getDRegI64(mreg)),
+                                  mkU8(shift_imm)));
+            putQReg(dreg, mkexpr(res), condT);
+            DIP("vshll.i%u q%u, d%u, #%u\n", 8 << size, dreg, mreg, 8 << size);
+            return True;
+         } else if ((B >> 3) == 3 && (B & 3) == 0) {
+            /* VCVT (half<->single) */
+            /* Half-precision extensions are needed to run this */
+            vassert(0); // ATC
+            if (((theInstr >> 18) & 3) != 1)
+               return False;
+            if ((theInstr >> 8) & 1) {
+               if (dreg & 1)
+                  return False;
+               dreg >>= 1;
+               putQReg(dreg, unop(Iop_F16toF32x4, getDRegI64(mreg)),
+                     condT);
+               DIP("vcvt.f32.f16 q%u, d%u\n", dreg, mreg);
+            } else {
+               if (mreg & 1)
+                  return False;
+               mreg >>= 1;
+               putDRegI64(dreg, unop(Iop_F32toF16x4, getQReg(mreg)),
+                                condT);
+               DIP("vcvt.f16.f32 d%u, q%u\n", dreg, mreg);
+            }
+            return True;
+         } else {
+            return False;
+         }
+         vassert(0);
+         return True;
+      case 3:
+         if (((B >> 1) & BITS4(1,1,0,1)) == BITS4(1,0,0,0)) {
+            /* VRECPE */
+            IROp op;
+            F = (theInstr >> 8) & 1;
+            if (size != 2)
+               return False;
+            if (Q) {
+               op = F ? Iop_RecipEst32Fx4 : Iop_RecipEst32Ux4;
+               putQReg(dreg, unop(op, getQReg(mreg)), condT);
+               DIP("vrecpe.%c32 q%u, q%u\n", F ? 'f' : 'u', dreg, mreg);
+            } else {
+               op = F ? Iop_RecipEst32Fx2 : Iop_RecipEst32Ux2;
+               putDRegI64(dreg, unop(op, getDRegI64(mreg)), condT);
+               DIP("vrecpe.%c32 d%u, d%u\n", F ? 'f' : 'u', dreg, mreg);
+            }
+            return True;
+         } else if (((B >> 1) & BITS4(1,1,0,1)) == BITS4(1,0,0,1)) {
+            /* VRSQRTE */
+            IROp op;
+            F = (B >> 2) & 1;
+            if (size != 2)
+               return False;
+            if (F) {
+               /* fp */
+               op = Q ? Iop_RSqrtEst32Fx4 : Iop_RSqrtEst32Fx2;
+            } else {
+               /* unsigned int */
+               op = Q ? Iop_RSqrtEst32Ux4 : Iop_RSqrtEst32Ux2;
+            }
+            if (Q) {
+               putQReg(dreg, unop(op, getQReg(mreg)), condT);
+               DIP("vrsqrte.%c32 q%u, q%u\n", F ? 'f' : 'u', dreg, mreg);
+            } else {
+               putDRegI64(dreg, unop(op, getDRegI64(mreg)), condT);
+               DIP("vrsqrte.%c32 d%u, d%u\n", F ? 'f' : 'u', dreg, mreg);
+            }
+            return True;
+         } else if ((B >> 3) == 3) {
+            /* VCVT (fp<->integer) */
+            IROp op;
+            if (size != 2)
+               return False;
+            switch ((B >> 1) & 3) {
+               case 0:
+                  op = Q ? Iop_I32StoFx4 : Iop_I32StoFx2;
+                  DIP("vcvt.f32.s32 %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+                  break;
+               case 1:
+                  op = Q ? Iop_I32UtoFx4 : Iop_I32UtoFx2;
+                  DIP("vcvt.f32.u32 %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+                  break;
+               case 2:
+                  op = Q ? Iop_FtoI32Sx4_RZ : Iop_FtoI32Sx2_RZ;
+                  DIP("vcvt.s32.f32 %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+                  break;
+               case 3:
+                  op = Q ? Iop_FtoI32Ux4_RZ : Iop_FtoI32Ux2_RZ;
+                  DIP("vcvt.u32.f32 %c%u, %c%u\n",
+                      Q ? 'q' : 'd', dreg, Q ? 'q' : 'd', mreg);
+                  break;
+               default:
+                  vassert(0);
+            }
+            if (Q) {
+               putQReg(dreg, unop(op, getQReg(mreg)), condT);
+            } else {
+               putDRegI64(dreg, unop(op, getDRegI64(mreg)), condT);
+            }
+            return True;
+         } else {
+            return False;
+         }
+         vassert(0);
+         return True;
+      default:
+         vassert(0);
+   }
+   return False;
+}
+
+/* A7.4.6 One register and a modified immediate value */
+static
+void ppNeonImm(UInt imm, UInt cmode, UInt op)
+{
+   int i;
+   switch (cmode) {
+      case 0: case 1: case 8: case 9:
+         vex_printf("0x%x", imm);
+         break;
+      case 2: case 3: case 10: case 11:
+         vex_printf("0x%x00", imm);
+         break;
+      case 4: case 5:
+         vex_printf("0x%x0000", imm);
+         break;
+      case 6: case 7:
+         vex_printf("0x%x000000", imm);
+         break;
+      case 12:
+         vex_printf("0x%xff", imm);
+         break;
+      case 13:
+         vex_printf("0x%xffff", imm);
+         break;
+      case 14:
+         if (op) {
+            vex_printf("0x");
+            for (i = 7; i >= 0; i--)
+               vex_printf("%s", (imm & (1 << i)) ? "ff" : "00");
+         } else {
+            vex_printf("0x%x", imm);
+         }
+         break;
+      case 15:
+         vex_printf("0x%x", imm);
+         break;
+   }
+}
+
+static
+const char *ppNeonImmType(UInt cmode, UInt op)
+{
+   switch (cmode) {
+      case 0 ... 7:
+      case 12: case 13:
+         return "i32";
+      case 8 ... 11:
+         return "i16";
+      case 14:
+         if (op)
+            return "i64";
+         else
+            return "i8";
+      case 15:
+         if (op)
+            vassert(0);
+         else
+            return "f32";
+      default:
+         vassert(0);
+   }
+}
+
+static
+void DIPimm(UInt imm, UInt cmode, UInt op,
+            const char *instr, UInt Q, UInt dreg)
+{
+   if (vex_traceflags & VEX_TRACE_FE) {
+      vex_printf("%s.%s %c%u, #", instr,
+                 ppNeonImmType(cmode, op), Q ? 'q' : 'd', dreg);
+      ppNeonImm(imm, cmode, op);
+      vex_printf("\n");
+   }
+}
+
+static
+Bool dis_neon_data_1reg_and_imm ( UInt theInstr, IRTemp condT )
+{
+   UInt dreg = get_neon_d_regno(theInstr);
+   ULong imm_raw = ((theInstr >> 17) & 0x80) | ((theInstr >> 12) & 0x70) |
+                  (theInstr & 0xf);
+   ULong imm_raw_pp = imm_raw;
+   UInt cmode = (theInstr >> 8) & 0xf;
+   UInt op_bit = (theInstr >> 5) & 1;
+   ULong imm = 0;
+   UInt Q = (theInstr >> 6) & 1;
+   int i, j;
+   UInt tmp;
+   IRExpr *imm_val;
+   IRExpr *expr;
+   IRTemp tmp_var;
+   switch(cmode) {
+      case 7: case 6:
+         imm_raw = imm_raw << 8;
+         /* fallthrough */
+      case 5: case 4:
+         imm_raw = imm_raw << 8;
+         /* fallthrough */
+      case 3: case 2:
+         imm_raw = imm_raw << 8;
+         /* fallthrough */
+      case 0: case 1:
+         imm = (imm_raw << 32) | imm_raw;
+         break;
+      case 11: case 10:
+         imm_raw = imm_raw << 8;
+         /* fallthrough */
+      case 9: case 8:
+         imm_raw = (imm_raw << 16) | imm_raw;
+         imm = (imm_raw << 32) | imm_raw;
+         break;
+      case 13:
+         imm_raw = (imm_raw << 8) | 0xff;
+         /* fallthrough */
+      case 12:
+         imm_raw = (imm_raw << 8) | 0xff;
+         imm = (imm_raw << 32) | imm_raw;
+         break;
+      case 14:
+         if (! op_bit) {
+            for(i = 0; i < 8; i++) {
+               imm = (imm << 8) | imm_raw;
+            }
+         } else {
+            for(i = 7; i >= 0; i--) {
+               tmp = 0;
+               for(j = 0; j < 8; j++) {
+                  tmp = (tmp << 1) | ((imm_raw >> i) & 1);
+               }
+               imm = (imm << 8) | tmp;
+            }
+         }
+         break;
+      case 15:
+         imm = (imm_raw & 0x80) << 5;
+         imm |= ((~imm_raw & 0x40) << 5);
+         for(i = 1; i <= 4; i++)
+            imm |= (imm_raw & 0x40) << i;
+         imm |= (imm_raw & 0x7f);
+         imm = imm << 19;
+         imm = (imm << 32) | imm;
+         break;
+      default:
+         return False;
+   }
+   if (Q) {
+      imm_val = binop(Iop_64HLtoV128, mkU64(imm), mkU64(imm));
+   } else {
+      imm_val = mkU64(imm);
+   }
+   if (((op_bit == 0) &&
+      (((cmode & 9) == 0) || ((cmode & 13) == 8) || ((cmode & 12) == 12))) ||
+      ((op_bit == 1) && (cmode == 14))) {
+      /* VMOV (immediate) */
+      if (Q) {
+         putQReg(dreg, imm_val, condT);
+      } else {
+         putDRegI64(dreg, imm_val, condT);
+      }
+      DIPimm(imm_raw_pp, cmode, op_bit, "vmov", Q, dreg);
+      return True;
+   }
+   if ((op_bit == 1) &&
+      (((cmode & 9) == 0) || ((cmode & 13) == 8) || ((cmode & 14) == 12))) {
+      /* VMVN (immediate) */
+      if (Q) {
+         putQReg(dreg, unop(Iop_NotV128, imm_val), condT);
+      } else {
+         putDRegI64(dreg, unop(Iop_Not64, imm_val), condT);
+      }
+      DIPimm(imm_raw_pp, cmode, op_bit, "vmvn", Q, dreg);
+      return True;
+   }
+   if (Q) {
+      tmp_var = newTemp(Ity_V128);
+      assign(tmp_var, getQReg(dreg));
+   } else {
+      tmp_var = newTemp(Ity_I64);
+      assign(tmp_var, getDRegI64(dreg));
+   }
+   if ((op_bit == 0) && (((cmode & 9) == 1) || ((cmode & 13) == 9))) {
+      /* VORR (immediate) */
+      if (Q)
+         expr = binop(Iop_OrV128, mkexpr(tmp_var), imm_val);
+      else
+         expr = binop(Iop_Or64, mkexpr(tmp_var), imm_val);
+      DIPimm(imm_raw_pp, cmode, op_bit, "vorr", Q, dreg);
+   } else if ((op_bit == 1) && (((cmode & 9) == 1) || ((cmode & 13) == 9))) {
+      /* VBIC (immediate) */
+      if (Q)
+         expr = binop(Iop_AndV128, mkexpr(tmp_var),
+                                   unop(Iop_NotV128, imm_val));
+      else
+         expr = binop(Iop_And64, mkexpr(tmp_var), unop(Iop_Not64, imm_val));
+      DIPimm(imm_raw_pp, cmode, op_bit, "vbic", Q, dreg);
+   } else {
+      return False;
+   }
+   if (Q)
+      putQReg(dreg, expr, condT);
+   else
+      putDRegI64(dreg, expr, condT);
+   return True;
+}
+
+/* A7.4 Advanced SIMD data-processing instructions */
+static
+Bool dis_neon_data_processing ( UInt theInstr, IRTemp condT )
+{
+   UInt A = (theInstr >> 19) & 0x1F;
+   UInt B = (theInstr >>  8) & 0xF;
+   UInt C = (theInstr >>  4) & 0xF;
+   UInt U = (theInstr >> 24) & 0x1;
+
+   if (! (A & 0x10)) {
+      return dis_neon_data_3same(theInstr, condT);
+   }
+   if (((A & 0x17) == 0x10) && ((C & 0x9) == 0x1)) {
+      return dis_neon_data_1reg_and_imm(theInstr, condT);
+   }
+   if ((C & 1) == 1) {
+      return dis_neon_data_2reg_and_shift(theInstr, condT);
+   }
+   if (((C & 5) == 0) && (((A & 0x14) == 0x10) || ((A & 0x16) == 0x14))) {
+      return dis_neon_data_3diff(theInstr, condT);
+   }
+   if (((C & 5) == 4) && (((A & 0x14) == 0x10) || ((A & 0x16) == 0x14))) {
+      return dis_neon_data_2reg_and_scalar(theInstr, condT);
+   }
+   if ((A & 0x16) == 0x16) {
+      if ((U == 0) && ((C & 1) == 0)) {
+         return dis_neon_vext(theInstr, condT);
+      }
+      if ((U != 1) || ((C & 1) == 1))
+         return False;
+      if ((B & 8) == 0) {
+         return dis_neon_data_2reg_misc(theInstr, condT);
+      }
+      if ((B & 12) == 8) {
+         return dis_neon_vtb(theInstr, condT);
+      }
+      if ((B == 12) && ((C & 9) == 0)) {
+         return dis_neon_vdup(theInstr, condT);
+      }
+   }
+   return False;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- NEON loads and stores                                ---*/
+/*------------------------------------------------------------*/
+
+/* For NEON memory operations, we use the standard scheme to handle
+   conditionalisation: generate a jump around the instruction if the
+   condition is false.  That's only necessary in Thumb mode, however,
+   since in ARM mode NEON instructions are unconditional. */
+
+/* A helper function for what follows.  It assumes we already went
+   uncond as per comments at the top of this section. */
+static
+void mk_neon_elem_load_to_one_lane( UInt rD, UInt inc, UInt index,
+                                    UInt N, UInt size, IRTemp addr )
+{
+   UInt i;
+   switch (size) {
+      case 0:
+         putDRegI64(rD, triop(Iop_SetElem8x8, getDRegI64(rD), mkU8(index),
+                    loadLE(Ity_I8, mkexpr(addr))), IRTemp_INVALID);
+         break;
+      case 1:
+         putDRegI64(rD, triop(Iop_SetElem16x4, getDRegI64(rD), mkU8(index),
+                    loadLE(Ity_I16, mkexpr(addr))), IRTemp_INVALID);
+         break;
+      case 2:
+         putDRegI64(rD, triop(Iop_SetElem32x2, getDRegI64(rD), mkU8(index),
+                    loadLE(Ity_I32, mkexpr(addr))), IRTemp_INVALID);
+         break;
+      default:
+         vassert(0);
+   }
+   for (i = 1; i <= N; i++) {
+      switch (size) {
+         case 0:
+            putDRegI64(rD + i * inc,
+                       triop(Iop_SetElem8x8,
+                             getDRegI64(rD + i * inc),
+                             mkU8(index),
+                             loadLE(Ity_I8, binop(Iop_Add32,
+                                                  mkexpr(addr),
+                                                  mkU32(i * 1)))),
+                       IRTemp_INVALID);
+            break;
+         case 1:
+            putDRegI64(rD + i * inc,
+                       triop(Iop_SetElem16x4,
+                             getDRegI64(rD + i * inc),
+                             mkU8(index),
+                             loadLE(Ity_I16, binop(Iop_Add32,
+                                                   mkexpr(addr),
+                                                   mkU32(i * 2)))),
+                       IRTemp_INVALID);
+            break;
+         case 2:
+            putDRegI64(rD + i * inc,
+                       triop(Iop_SetElem32x2,
+                             getDRegI64(rD + i * inc),
+                             mkU8(index),
+                             loadLE(Ity_I32, binop(Iop_Add32,
+                                                   mkexpr(addr),
+                                                   mkU32(i * 4)))),
+                       IRTemp_INVALID);
+            break;
+         default:
+            vassert(0);
+      }
+   }
+}
+
+/* A(nother) helper function for what follows.  It assumes we already
+   went uncond as per comments at the top of this section. */
+static
+void mk_neon_elem_store_from_one_lane( UInt rD, UInt inc, UInt index,
+                                       UInt N, UInt size, IRTemp addr )
+{
+   UInt i;
+   switch (size) {
+      case 0:
+         storeLE(mkexpr(addr),
+                 binop(Iop_GetElem8x8, getDRegI64(rD), mkU8(index)));
+         break;
+      case 1:
+         storeLE(mkexpr(addr),
+                 binop(Iop_GetElem16x4, getDRegI64(rD), mkU8(index)));
+         break;
+      case 2:
+         storeLE(mkexpr(addr),
+                 binop(Iop_GetElem32x2, getDRegI64(rD), mkU8(index)));
+         break;
+      default:
+         vassert(0);
+   }
+   for (i = 1; i <= N; i++) {
+      switch (size) {
+         case 0:
+            storeLE(binop(Iop_Add32, mkexpr(addr), mkU32(i * 1)),
+                    binop(Iop_GetElem8x8, getDRegI64(rD + i * inc),
+                                          mkU8(index)));
+            break;
+         case 1:
+            storeLE(binop(Iop_Add32, mkexpr(addr), mkU32(i * 2)),
+                    binop(Iop_GetElem16x4, getDRegI64(rD + i * inc),
+                                           mkU8(index)));
+            break;
+         case 2:
+            storeLE(binop(Iop_Add32, mkexpr(addr), mkU32(i * 4)),
+                    binop(Iop_GetElem32x2, getDRegI64(rD + i * inc),
+                                           mkU8(index)));
+            break;
+         default:
+            vassert(0);
+      }
+   }
+}
+
+/* Generate 2x64 -> 2x64 deinterleave code, for VLD2.  Caller must
+   make *u0 and *u1 be valid IRTemps before the call. */
+static void math_DEINTERLEAVE_2 (/*OUT*/IRTemp* u0, /*OUT*/IRTemp* u1,
+                                 IRTemp i0, IRTemp i1, Int laneszB)
+{
+   /* The following assumes that the guest is little endian, and hence
+      that the memory-side (interleaved) data is stored
+      little-endianly. */
+   vassert(u0 && u1);
+   /* This is pretty easy, since we have primitives directly to
+      hand. */
+   if (laneszB == 4) {
+      // memLE(128 bits) == A0 B0 A1 B1
+      // i0 == B0 A0, i1 == B1 A1
+      // u0 == A1 A0, u1 == B1 B0
+      assign(*u0, binop(Iop_InterleaveLO32x2, mkexpr(i1), mkexpr(i0)));
+      assign(*u1, binop(Iop_InterleaveHI32x2, mkexpr(i1), mkexpr(i0)));
+   } else if (laneszB == 2) {
+      // memLE(128 bits) == A0 B0 A1 B1 A2 B2 A3 B3
+      // i0 == B1 A1 B0 A0, i1 == B3 A3 B2 A2
+      // u0 == A3 A2 A1 A0, u1 == B3 B2 B1 B0
+      assign(*u0, binop(Iop_CatEvenLanes16x4, mkexpr(i1), mkexpr(i0)));
+      assign(*u1, binop(Iop_CatOddLanes16x4,  mkexpr(i1), mkexpr(i0)));
+   } else if (laneszB == 1) {
+      // memLE(128 bits) == A0 B0 A1 B1 A2 B2 A3 B3 A4 B4 A5 B5 A6 B6 A7 B7
+      // i0 == B3 A3 B2 A2 B1 A1 B0 A0, i1 == B7 A7 B6 A6 B5 A5 B4 A4
+      // u0 == A7 A6 A5 A4 A3 A2 A1 A0, u1 == B7 B6 B5 B4 B3 B2 B1 B0
+      assign(*u0, binop(Iop_CatEvenLanes8x8, mkexpr(i1), mkexpr(i0)));
+      assign(*u1, binop(Iop_CatOddLanes8x8,  mkexpr(i1), mkexpr(i0)));
+   } else {
+      // Can never happen, since VLD2 only has valid lane widths of 32,
+      // 16 or 8 bits.
+      vpanic("math_DEINTERLEAVE_2");
+   }
+}
+
+/* Generate 2x64 -> 2x64 interleave code, for VST2.  Caller must make
+   *u0 and *u1 be valid IRTemps before the call. */
+static void math_INTERLEAVE_2 (/*OUT*/IRTemp* i0, /*OUT*/IRTemp* i1,
+                               IRTemp u0, IRTemp u1, Int laneszB)
+{
+   /* The following assumes that the guest is little endian, and hence
+      that the memory-side (interleaved) data is stored
+      little-endianly. */
+   vassert(i0 && i1);
+   /* This is pretty easy, since we have primitives directly to
+      hand. */
+   if (laneszB == 4) {
+      // memLE(128 bits) == A0 B0 A1 B1
+      // i0 == B0 A0, i1 == B1 A1
+      // u0 == A1 A0, u1 == B1 B0
+      assign(*i0, binop(Iop_InterleaveLO32x2, mkexpr(u1), mkexpr(u0)));
+      assign(*i1, binop(Iop_InterleaveHI32x2, mkexpr(u1), mkexpr(u0)));
+   } else if (laneszB == 2) {
+      // memLE(128 bits) == A0 B0 A1 B1 A2 B2 A3 B3
+      // i0 == B1 A1 B0 A0, i1 == B3 A3 B2 A2
+      // u0 == A3 A2 A1 A0, u1 == B3 B2 B1 B0
+      assign(*i0, binop(Iop_InterleaveLO16x4, mkexpr(u1), mkexpr(u0)));
+      assign(*i1, binop(Iop_InterleaveHI16x4, mkexpr(u1), mkexpr(u0)));
+   } else if (laneszB == 1) {
+      // memLE(128 bits) == A0 B0 A1 B1 A2 B2 A3 B3 A4 B4 A5 B5 A6 B6 A7 B7
+      // i0 == B3 A3 B2 A2 B1 A1 B0 A0, i1 == B7 A7 B6 A6 B5 A5 B4 A4
+      // u0 == A7 A6 A5 A4 A3 A2 A1 A0, u1 == B7 B6 B5 B4 B3 B2 B1 B0
+      assign(*i0, binop(Iop_InterleaveLO8x8, mkexpr(u1), mkexpr(u0)));
+      assign(*i1, binop(Iop_InterleaveHI8x8, mkexpr(u1), mkexpr(u0)));
+   } else {
+      // Can never happen, since VST2 only has valid lane widths of 32,
+      // 16 or 8 bits.
+      vpanic("math_INTERLEAVE_2");
+   }
+}
+
+// Helper function for generating arbitrary slicing 'n' dicing of
+// 3 8x8 vectors, as needed for VLD3.8 and VST3.8.
+static IRExpr* math_PERM_8x8x3(const UChar* desc,
+                               IRTemp s0, IRTemp s1, IRTemp s2)
+{
+   // desc is an array of 8 pairs, encoded as 16 bytes,
+   // that describe how to assemble the result lanes, starting with
+   // lane 7.  Each pair is: first component (0..2) says which of 
+   // s0/s1/s2 to use.  Second component (0..7) is the lane number
+   // in the source to use.
+   UInt si;
+   for (si = 0; si < 7; si++) {
+      vassert(desc[2 * si + 0] <= 2);
+      vassert(desc[2 * si + 1] <= 7);
+   }
+   IRTemp h3 = newTemp(Ity_I64);
+   IRTemp h2 = newTemp(Ity_I64);
+   IRTemp h1 = newTemp(Ity_I64);
+   IRTemp h0 = newTemp(Ity_I64);
+   IRTemp srcs[3] = {s0, s1, s2};
+#  define SRC_VEC(_lane)   mkexpr(srcs[desc[2 * (7-(_lane)) + 0]])
+#  define SRC_SHIFT(_lane) mkU8(56-8*(desc[2 * (7-(_lane)) + 1]))
+   assign(h3, binop(Iop_InterleaveHI8x8,
+                    binop(Iop_Shl64, SRC_VEC(7), SRC_SHIFT(7)),
+                    binop(Iop_Shl64, SRC_VEC(6), SRC_SHIFT(6))));
+   assign(h2, binop(Iop_InterleaveHI8x8,
+                    binop(Iop_Shl64, SRC_VEC(5), SRC_SHIFT(5)),
+                    binop(Iop_Shl64, SRC_VEC(4), SRC_SHIFT(4))));
+   assign(h1, binop(Iop_InterleaveHI8x8,
+                    binop(Iop_Shl64, SRC_VEC(3), SRC_SHIFT(3)),
+                    binop(Iop_Shl64, SRC_VEC(2), SRC_SHIFT(2))));
+   assign(h0, binop(Iop_InterleaveHI8x8,
+                    binop(Iop_Shl64, SRC_VEC(1), SRC_SHIFT(1)),
+                    binop(Iop_Shl64, SRC_VEC(0), SRC_SHIFT(0))));
+#  undef SRC_VEC
+#  undef SRC_SHIFT
+   // Now h3..h0 are 64 bit vectors with useful information only
+   // in the top 16 bits.  We now concatentate those four 16-bit
+   // groups so as to produce the final result.
+   IRTemp w1 = newTemp(Ity_I64);
+   IRTemp w0 = newTemp(Ity_I64);
+   assign(w1, binop(Iop_InterleaveHI16x4, mkexpr(h3), mkexpr(h2)));
+   assign(w0, binop(Iop_InterleaveHI16x4, mkexpr(h1), mkexpr(h0)));
+   return binop(Iop_InterleaveHI32x2, mkexpr(w1), mkexpr(w0));
+}
+
+/* Generate 3x64 -> 3x64 deinterleave code, for VLD3.  Caller must
+   make *u0, *u1 and *u2 be valid IRTemps before the call. */
+static void math_DEINTERLEAVE_3 (
+               /*OUT*/IRTemp* u0, /*OUT*/IRTemp* u1, /*OUT*/IRTemp* u2,
+               IRTemp i0, IRTemp i1, IRTemp i2, Int laneszB
+            )
+{
+#  define IHI32x2(_e1, _e2) binop(Iop_InterleaveHI32x2, (_e1), (_e2))
+#  define IHI16x4(_e1, _e2) binop(Iop_InterleaveHI16x4, (_e1), (_e2))
+#  define SHL64(_tmp, _amt) binop(Iop_Shl64, mkexpr(_tmp), mkU8(_amt))
+   /* The following assumes that the guest is little endian, and hence
+      that the memory-side (interleaved) data is stored
+      little-endianly. */
+   vassert(u0 && u1 && u2);
+   if (laneszB == 4) {
+      // memLE(192 bits) == A0 B0 C0 A1 B1 C1
+      // i0 == B0 A0, i1 == A1 C0, i2 == C1 B1
+      // u0 == A1 A0, u1 == B1 B0, u2 == C1 C0
+      assign(*u0, IHI32x2(SHL64(i1,  0), SHL64(i0, 32)));
+      assign(*u1, IHI32x2(SHL64(i2, 32), SHL64(i0,  0)));
+      assign(*u2, IHI32x2(SHL64(i2,  0), SHL64(i1, 32)));
+   } else if (laneszB == 2) {
+      // memLE(192 bits) == A0 B0 C0 A1, B1 C1 A2 B2, C2 A3 B3 C3
+      // i0 == A1 C0 B0 A0, i1 == B2 A2 C1 B1, i2 == C3 B3 A3 C2
+      // u0 == A3 A2 A1 A0, u1 == B3 B2 B1 B0, u2 == C3 C2 C1 C0
+#     define XXX(_tmp3,_la3,_tmp2,_la2,_tmp1,_la1,_tmp0,_la0) \
+                IHI32x2(                                      \
+                   IHI16x4(SHL64((_tmp3),48-16*(_la3)),       \
+                           SHL64((_tmp2),48-16*(_la2))),      \
+                   IHI16x4(SHL64((_tmp1),48-16*(_la1)),       \
+                           SHL64((_tmp0),48-16*(_la0))))
+      assign(*u0, XXX(i2,1, i1,2, i0,3, i0,0));
+      assign(*u1, XXX(i2,2, i1,3, i1,0, i0,1));
+      assign(*u2, XXX(i2,3, i2,0, i1,1, i0,2));
+#     undef XXX
+   } else if (laneszB == 1) {
+      // These describe how the result vectors [7..0] are
+      // assembled from the source vectors.  Each pair is
+      // (source vector number, lane number).
+      static const UChar de0[16] = {2,5, 2,2, 1,7, 1,4, 1,1, 0,6, 0,3, 0,0};
+      static const UChar de1[16] = {2,6, 2,3, 2,0, 1,5, 1,2, 0,7, 0,4, 0,1};
+      static const UChar de2[16] = {2,7, 2,4, 2,1, 1,6, 1,3, 1,0, 0,5, 0,2};
+      assign(*u0, math_PERM_8x8x3(de0, i0, i1, i2));
+      assign(*u1, math_PERM_8x8x3(de1, i0, i1, i2));
+      assign(*u2, math_PERM_8x8x3(de2, i0, i1, i2));
+   } else {
+      // Can never happen, since VLD3 only has valid lane widths of 32,
+      // 16 or 8 bits.
+      vpanic("math_DEINTERLEAVE_3");
+   }
+#  undef SHL64
+#  undef IHI16x4
+#  undef IHI32x2
+}
+
+/* Generate 3x64 -> 3x64 interleave code, for VST3.  Caller must
+   make *i0, *i1 and *i2 be valid IRTemps before the call. */
+static void math_INTERLEAVE_3 (
+               /*OUT*/IRTemp* i0, /*OUT*/IRTemp* i1, /*OUT*/IRTemp* i2,
+               IRTemp u0, IRTemp u1, IRTemp u2, Int laneszB
+            )
+{
+#  define IHI32x2(_e1, _e2) binop(Iop_InterleaveHI32x2, (_e1), (_e2))
+#  define IHI16x4(_e1, _e2) binop(Iop_InterleaveHI16x4, (_e1), (_e2))
+#  define SHL64(_tmp, _amt) binop(Iop_Shl64, mkexpr(_tmp), mkU8(_amt))
+   /* The following assumes that the guest is little endian, and hence
+      that the memory-side (interleaved) data is stored
+      little-endianly. */
+   vassert(i0 && i1 && i2);
+   if (laneszB == 4) {
+      // memLE(192 bits) == A0 B0 C0 A1 B1 C1
+      // i0 == B0 A0, i1 == A1 C0, i2 == C1 B1
+      // u0 == A1 A0, u1 == B1 B0, u2 == C1 C0
+      assign(*i0, IHI32x2(SHL64(u1, 32), SHL64(u0, 32)));
+      assign(*i1, IHI32x2(SHL64(u0,  0), SHL64(u2, 32)));
+      assign(*i2, IHI32x2(SHL64(u2,  0), SHL64(u1,  0)));
+   } else if (laneszB == 2) {
+      // memLE(192 bits) == A0 B0 C0 A1, B1 C1 A2 B2, C2 A3 B3 C3
+      // i0 == A1 C0 B0 A0, i1 == B2 A2 C1 B1, i2 == C3 B3 A3 C2
+      // u0 == A3 A2 A1 A0, u1 == B3 B2 B1 B0, u2 == C3 C2 C1 C0
+#     define XXX(_tmp3,_la3,_tmp2,_la2,_tmp1,_la1,_tmp0,_la0) \
+                IHI32x2(                                      \
+                   IHI16x4(SHL64((_tmp3),48-16*(_la3)),       \
+                           SHL64((_tmp2),48-16*(_la2))),      \
+                   IHI16x4(SHL64((_tmp1),48-16*(_la1)),       \
+                           SHL64((_tmp0),48-16*(_la0))))
+      assign(*i0, XXX(u0,1, u2,0, u1,0, u0,0));
+      assign(*i1, XXX(u1,2, u0,2, u2,1, u1,1));
+      assign(*i2, XXX(u2,3, u1,3, u0,3, u2,2));
+#     undef XXX
+   } else if (laneszB == 1) {
+      // These describe how the result vectors [7..0] are
+      // assembled from the source vectors.  Each pair is
+      // (source vector number, lane number).
+      static const UChar in0[16] = {1,2, 0,2, 2,1, 1,1, 0,1, 2,0, 1,0, 0,0};
+      static const UChar in1[16] = {0,5, 2,4, 1,4, 0,4, 2,3, 1,3, 0,3, 2,2};
+      static const UChar in2[16] = {2,7, 1,7, 0,7, 2,6, 1,6, 0,6, 2,5, 1,5};
+      assign(*i0, math_PERM_8x8x3(in0, u0, u1, u2));
+      assign(*i1, math_PERM_8x8x3(in1, u0, u1, u2));
+      assign(*i2, math_PERM_8x8x3(in2, u0, u1, u2));
+   } else {
+      // Can never happen, since VST3 only has valid lane widths of 32,
+      // 16 or 8 bits.
+      vpanic("math_INTERLEAVE_3");
+   }
+#  undef SHL64
+#  undef IHI16x4
+#  undef IHI32x2
+}
+
+/* Generate 4x64 -> 4x64 deinterleave code, for VLD4.  Caller must
+   make *u0, *u1, *u2 and *u3 be valid IRTemps before the call. */
+static void math_DEINTERLEAVE_4 (
+               /*OUT*/IRTemp* u0, /*OUT*/IRTemp* u1,
+               /*OUT*/IRTemp* u2, /*OUT*/IRTemp* u3,
+               IRTemp i0, IRTemp i1, IRTemp i2, IRTemp i3, Int laneszB
+            )
+{
+#  define IHI32x2(_t1, _t2) \
+             binop(Iop_InterleaveHI32x2, mkexpr(_t1), mkexpr(_t2))
+#  define ILO32x2(_t1, _t2) \
+             binop(Iop_InterleaveLO32x2, mkexpr(_t1), mkexpr(_t2))
+#  define IHI16x4(_t1, _t2) \
+             binop(Iop_InterleaveHI16x4, mkexpr(_t1), mkexpr(_t2))
+#  define ILO16x4(_t1, _t2) \
+             binop(Iop_InterleaveLO16x4, mkexpr(_t1), mkexpr(_t2))
+#  define IHI8x8(_t1, _e2) \
+             binop(Iop_InterleaveHI8x8, mkexpr(_t1), _e2)
+#  define SHL64(_tmp, _amt) \
+             binop(Iop_Shl64, mkexpr(_tmp), mkU8(_amt))
+   /* The following assumes that the guest is little endian, and hence
+      that the memory-side (interleaved) data is stored
+      little-endianly. */
+   vassert(u0 && u1 && u2 && u3);
+   if (laneszB == 4) {
+      assign(*u0, ILO32x2(i2, i0));
+      assign(*u1, IHI32x2(i2, i0));
+      assign(*u2, ILO32x2(i3, i1));
+      assign(*u3, IHI32x2(i3, i1));
+   } else if (laneszB == 2) {
+      IRTemp b1b0a1a0 = newTemp(Ity_I64);
+      IRTemp b3b2a3a2 = newTemp(Ity_I64);
+      IRTemp d1d0c1c0 = newTemp(Ity_I64);
+      IRTemp d3d2c3c2 = newTemp(Ity_I64);
+      assign(b1b0a1a0, ILO16x4(i1, i0));
+      assign(b3b2a3a2, ILO16x4(i3, i2));
+      assign(d1d0c1c0, IHI16x4(i1, i0));
+      assign(d3d2c3c2, IHI16x4(i3, i2));
+      // And now do what we did for the 32-bit case.
+      assign(*u0, ILO32x2(b3b2a3a2, b1b0a1a0));
+      assign(*u1, IHI32x2(b3b2a3a2, b1b0a1a0));
+      assign(*u2, ILO32x2(d3d2c3c2, d1d0c1c0));
+      assign(*u3, IHI32x2(d3d2c3c2, d1d0c1c0));
+   } else if (laneszB == 1) {
+      // Deinterleave into 16-bit chunks, then do as the 16-bit case.
+      IRTemp i0x = newTemp(Ity_I64);
+      IRTemp i1x = newTemp(Ity_I64);
+      IRTemp i2x = newTemp(Ity_I64);
+      IRTemp i3x = newTemp(Ity_I64);
+      assign(i0x, IHI8x8(i0, SHL64(i0, 32)));
+      assign(i1x, IHI8x8(i1, SHL64(i1, 32)));
+      assign(i2x, IHI8x8(i2, SHL64(i2, 32)));
+      assign(i3x, IHI8x8(i3, SHL64(i3, 32)));
+      // From here on is like the 16 bit case.
+      IRTemp b1b0a1a0 = newTemp(Ity_I64);
+      IRTemp b3b2a3a2 = newTemp(Ity_I64);
+      IRTemp d1d0c1c0 = newTemp(Ity_I64);
+      IRTemp d3d2c3c2 = newTemp(Ity_I64);
+      assign(b1b0a1a0, ILO16x4(i1x, i0x));
+      assign(b3b2a3a2, ILO16x4(i3x, i2x));
+      assign(d1d0c1c0, IHI16x4(i1x, i0x));
+      assign(d3d2c3c2, IHI16x4(i3x, i2x));
+      // And now do what we did for the 32-bit case.
+      assign(*u0, ILO32x2(b3b2a3a2, b1b0a1a0));
+      assign(*u1, IHI32x2(b3b2a3a2, b1b0a1a0));
+      assign(*u2, ILO32x2(d3d2c3c2, d1d0c1c0));
+      assign(*u3, IHI32x2(d3d2c3c2, d1d0c1c0));
+   } else {
+      // Can never happen, since VLD4 only has valid lane widths of 32,
+      // 16 or 8 bits.
+      vpanic("math_DEINTERLEAVE_4");
+   }
+#  undef SHL64
+#  undef IHI8x8
+#  undef ILO16x4
+#  undef IHI16x4
+#  undef ILO32x2
+#  undef IHI32x2
+}
+
+/* Generate 4x64 -> 4x64 interleave code, for VST4.  Caller must
+   make *i0, *i1, *i2 and *i3 be valid IRTemps before the call. */
+static void math_INTERLEAVE_4 (
+               /*OUT*/IRTemp* i0, /*OUT*/IRTemp* i1,
+               /*OUT*/IRTemp* i2, /*OUT*/IRTemp* i3,
+               IRTemp u0, IRTemp u1, IRTemp u2, IRTemp u3, Int laneszB
+            )
+{
+#  define IHI32x2(_t1, _t2) \
+             binop(Iop_InterleaveHI32x2, mkexpr(_t1), mkexpr(_t2))
+#  define ILO32x2(_t1, _t2) \
+             binop(Iop_InterleaveLO32x2, mkexpr(_t1), mkexpr(_t2))
+#  define CEV16x4(_t1, _t2) \
+             binop(Iop_CatEvenLanes16x4, mkexpr(_t1), mkexpr(_t2))
+#  define COD16x4(_t1, _t2) \
+             binop(Iop_CatOddLanes16x4, mkexpr(_t1), mkexpr(_t2))
+#  define COD8x8(_t1, _e2) \
+             binop(Iop_CatOddLanes8x8, mkexpr(_t1), _e2)
+#  define SHL64(_tmp, _amt) \
+             binop(Iop_Shl64, mkexpr(_tmp), mkU8(_amt))
+   /* The following assumes that the guest is little endian, and hence
+      that the memory-side (interleaved) data is stored
+      little-endianly. */
+   vassert(u0 && u1 && u2 && u3);
+   if (laneszB == 4) {
+      assign(*i0, ILO32x2(u1, u0));
+      assign(*i1, ILO32x2(u3, u2));
+      assign(*i2, IHI32x2(u1, u0));
+      assign(*i3, IHI32x2(u3, u2));
+   } else if (laneszB == 2) {
+      // First, interleave at the 32-bit lane size.
+      IRTemp b1b0a1a0 = newTemp(Ity_I64);
+      IRTemp b3b2a3a2 = newTemp(Ity_I64);
+      IRTemp d1d0c1c0 = newTemp(Ity_I64);
+      IRTemp d3d2c3c2 = newTemp(Ity_I64);
+      assign(b1b0a1a0, ILO32x2(u1, u0));
+      assign(b3b2a3a2, IHI32x2(u1, u0));
+      assign(d1d0c1c0, ILO32x2(u3, u2));
+      assign(d3d2c3c2, IHI32x2(u3, u2));
+      // And interleave (cat) at the 16 bit size.
+      assign(*i0, CEV16x4(d1d0c1c0, b1b0a1a0));
+      assign(*i1, COD16x4(d1d0c1c0, b1b0a1a0));
+      assign(*i2, CEV16x4(d3d2c3c2, b3b2a3a2));
+      assign(*i3, COD16x4(d3d2c3c2, b3b2a3a2));
+   } else if (laneszB == 1) {
+      // First, interleave at the 32-bit lane size.
+      IRTemp b1b0a1a0 = newTemp(Ity_I64);
+      IRTemp b3b2a3a2 = newTemp(Ity_I64);
+      IRTemp d1d0c1c0 = newTemp(Ity_I64);
+      IRTemp d3d2c3c2 = newTemp(Ity_I64);
+      assign(b1b0a1a0, ILO32x2(u1, u0));
+      assign(b3b2a3a2, IHI32x2(u1, u0));
+      assign(d1d0c1c0, ILO32x2(u3, u2));
+      assign(d3d2c3c2, IHI32x2(u3, u2));
+      // And interleave (cat) at the 16 bit size.
+      IRTemp i0x = newTemp(Ity_I64);
+      IRTemp i1x = newTemp(Ity_I64);
+      IRTemp i2x = newTemp(Ity_I64);
+      IRTemp i3x = newTemp(Ity_I64);
+      assign(i0x, CEV16x4(d1d0c1c0, b1b0a1a0));
+      assign(i1x, COD16x4(d1d0c1c0, b1b0a1a0));
+      assign(i2x, CEV16x4(d3d2c3c2, b3b2a3a2));
+      assign(i3x, COD16x4(d3d2c3c2, b3b2a3a2));
+      // And rearrange within each word, to get the right 8 bit lanes.
+      assign(*i0, COD8x8(i0x, SHL64(i0x, 8)));
+      assign(*i1, COD8x8(i1x, SHL64(i1x, 8)));
+      assign(*i2, COD8x8(i2x, SHL64(i2x, 8)));
+      assign(*i3, COD8x8(i3x, SHL64(i3x, 8)));
+   } else {
+      // Can never happen, since VLD4 only has valid lane widths of 32,
+      // 16 or 8 bits.
+      vpanic("math_DEINTERLEAVE_4");
+   }
+#  undef SHL64
+#  undef COD8x8
+#  undef COD16x4
+#  undef CEV16x4
+#  undef ILO32x2
+#  undef IHI32x2
+}
+
+/* A7.7 Advanced SIMD element or structure load/store instructions */
+static
+Bool dis_neon_load_or_store ( UInt theInstr,
+                              Bool isT, IRTemp condT )
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(theInstr, (_bMax), (_bMin))
+   UInt bA = INSN(23,23);
+   UInt fB = INSN(11,8);
+   UInt bL = INSN(21,21);
+   UInt rD = (INSN(22,22) << 4) | INSN(15,12);
+   UInt rN = INSN(19,16);
+   UInt rM = INSN(3,0);
+   UInt N, size, i, j;
+   UInt inc;
+   UInt regs = 1;
+
+   if (isT) {
+      vassert(condT != IRTemp_INVALID);
+   } else {
+      vassert(condT == IRTemp_INVALID);
+   }
+   /* So now, if condT is not IRTemp_INVALID, we know we're
+      dealing with Thumb code. */
+
+   if (INSN(20,20) != 0)
+      return False;
+
+   IRTemp initialRn = newTemp(Ity_I32);
+   assign(initialRn, isT ? getIRegT(rN) : getIRegA(rN));
+
+   IRTemp initialRm = newTemp(Ity_I32);
+   assign(initialRm, isT ? getIRegT(rM) : getIRegA(rM));
+
+   /* There are 3 cases:
+      (1) VSTn / VLDn (n-element structure from/to one lane)
+      (2) VLDn (single element to all lanes)
+      (3) VSTn / VLDn (multiple n-element structures)
+   */
+   if (bA) {
+      N = fB & 3;
+      if ((fB >> 2) < 3) {
+         /* ------------ Case (1) ------------
+            VSTn / VLDn (n-element structure from/to one lane) */
+
+         size = fB >> 2;
+
+         switch (size) {
+            case 0: i = INSN(7,5); inc = 1; break;
+            case 1: i = INSN(7,6); inc = INSN(5,5) ? 2 : 1; break;
+            case 2: i = INSN(7,7); inc = INSN(6,6) ? 2 : 1; break;
+            case 3: return False;
+            default: vassert(0);
+         }
+
+         IRTemp addr = newTemp(Ity_I32);
+         assign(addr, mkexpr(initialRn));
+
+         // go uncond
+         if (condT != IRTemp_INVALID)
+            mk_skip_over_T32_if_cond_is_false(condT);
+         // now uncond
+
+         if (bL)
+            mk_neon_elem_load_to_one_lane(rD, inc, i, N, size, addr);
+         else
+            mk_neon_elem_store_from_one_lane(rD, inc, i, N, size, addr);
+         DIP("v%s%u.%u {", bL ? "ld" : "st", N + 1, 8 << size);
+         for (j = 0; j <= N; j++) {
+            if (j)
+               DIP(", ");
+            DIP("d%u[%u]", rD + j * inc, i);
+         }
+         DIP("}, [r%u]", rN);
+         if (rM != 13 && rM != 15) {
+            DIP(", r%u\n", rM);
+         } else {
+            DIP("%s\n", (rM != 15) ? "!" : "");
+         }
+      } else {
+         /* ------------ Case (2) ------------ 
+            VLDn (single element to all lanes) */
+         UInt r;
+         if (bL == 0)
+            return False;
+
+         inc = INSN(5,5) + 1;
+         size = INSN(7,6);
+
+         /* size == 3 and size == 2 cases differ in alignment constraints */
+         if (size == 3 && N == 3 && INSN(4,4) == 1)
+            size = 2;
+
+         if (size == 0 && N == 0 && INSN(4,4) == 1)
+            return False;
+         if (N == 2 && INSN(4,4) == 1)
+            return False;
+         if (size == 3)
+            return False;
+
+         // go uncond
+         if (condT != IRTemp_INVALID)
+            mk_skip_over_T32_if_cond_is_false(condT);
+         // now uncond
+
+         IRTemp addr = newTemp(Ity_I32);
+         assign(addr, mkexpr(initialRn));
+
+         if (N == 0 && INSN(5,5))
+            regs = 2;
+
+         for (r = 0; r < regs; r++) {
+            switch (size) {
+               case 0:
+                  putDRegI64(rD + r, unop(Iop_Dup8x8,
+                                          loadLE(Ity_I8, mkexpr(addr))),
+                             IRTemp_INVALID);
+                  break;
+               case 1:
+                  putDRegI64(rD + r, unop(Iop_Dup16x4,
+                                          loadLE(Ity_I16, mkexpr(addr))),
+                             IRTemp_INVALID);
+                  break;
+               case 2:
+                  putDRegI64(rD + r, unop(Iop_Dup32x2,
+                                          loadLE(Ity_I32, mkexpr(addr))),
+                             IRTemp_INVALID);
+                  break;
+               default:
+                  vassert(0);
+            }
+            for (i = 1; i <= N; i++) {
+               switch (size) {
+                  case 0:
+                     putDRegI64(rD + r + i * inc,
+                                unop(Iop_Dup8x8,
+                                     loadLE(Ity_I8, binop(Iop_Add32,
+                                                          mkexpr(addr),
+                                                          mkU32(i * 1)))),
+                                IRTemp_INVALID);
+                     break;
+                  case 1:
+                     putDRegI64(rD + r + i * inc,
+                                unop(Iop_Dup16x4,
+                                     loadLE(Ity_I16, binop(Iop_Add32,
+                                                           mkexpr(addr),
+                                                           mkU32(i * 2)))),
+                                IRTemp_INVALID);
+                     break;
+                  case 2:
+                     putDRegI64(rD + r + i * inc,
+                                unop(Iop_Dup32x2,
+                                     loadLE(Ity_I32, binop(Iop_Add32,
+                                                           mkexpr(addr),
+                                                           mkU32(i * 4)))),
+                                IRTemp_INVALID);
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+         }
+         DIP("vld%u.%u {", N + 1, 8 << size);
+         for (r = 0; r < regs; r++) {
+            for (i = 0; i <= N; i++) {
+               if (i || r)
+                  DIP(", ");
+               DIP("d%u[]", rD + r + i * inc);
+            }
+         }
+         DIP("}, [r%u]", rN);
+         if (rM != 13 && rM != 15) {
+            DIP(", r%u\n", rM);
+         } else {
+            DIP("%s\n", (rM != 15) ? "!" : "");
+         }
+      }
+      /* Writeback.  We're uncond here, so no condT-ing. */
+      if (rM != 15) {
+         if (rM == 13) {
+            IRExpr* e = binop(Iop_Add32,
+                              mkexpr(initialRn),
+                              mkU32((1 << size) * (N + 1)));
+            if (isT)
+               putIRegT(rN, e, IRTemp_INVALID);
+            else
+               putIRegA(rN, e, IRTemp_INVALID, Ijk_Boring);
+         } else {
+            IRExpr* e = binop(Iop_Add32,
+                              mkexpr(initialRn),
+                              mkexpr(initialRm));
+            if (isT)
+               putIRegT(rN, e, IRTemp_INVALID);
+            else
+               putIRegA(rN, e, IRTemp_INVALID, Ijk_Boring);
+         }
+      }
+      return True;
+   } else {
+      /* ------------ Case (3) ------------
+         VSTn / VLDn (multiple n-element structures) */
+      inc = (fB & 1) + 1;
+
+      if (fB == BITS4(0,0,1,0)       // Dd, Dd+1, Dd+2, Dd+3  inc = 1  regs = 4
+          || fB == BITS4(0,1,1,0)    // Dd, Dd+1, Dd+2        inc = 1  regs = 3
+          || fB == BITS4(0,1,1,1)    // Dd                    inc = 2  regs = 1
+          || fB == BITS4(1,0,1,0)) { // Dd, Dd+1              inc = 1  regs = 2
+         N = 0; // VLD1/VST1.  'inc' does not appear to have any
+                // meaning for the VLD1/VST1 cases.  'regs' is the number of
+                // registers involved.
+         if (rD + regs > 32) return False;
+      } 
+      else 
+      if (fB == BITS4(0,0,1,1)       // Dd, Dd+1, Dd+2, Dd+3  inc=2  regs = 2
+          || fB == BITS4(1,0,0,0)    // Dd, Dd+1              inc=1  regs = 1
+          || fB == BITS4(1,0,0,1)) { // Dd, Dd+2              inc=2  regs = 1
+         N = 1; // VLD2/VST2.  'regs' is the number of register-pairs involved
+         if (regs == 1 && inc == 1 && rD + 1 >= 32) return False;
+         if (regs == 1 && inc == 2 && rD + 2 >= 32) return False;
+         if (regs == 2 && inc == 2 && rD + 3 >= 32) return False;
+      } else if (fB == BITS4(0,1,0,0) || fB == BITS4(0,1,0,1)) {
+         N = 2; // VLD3/VST3
+         if (inc == 1 && rD + 2 >= 32) return False;
+         if (inc == 2 && rD + 4 >= 32) return False;
+      } else if (fB == BITS4(0,0,0,0) || fB == BITS4(0,0,0,1)) {
+         N = 3; // VLD4/VST4
+         if (inc == 1 && rD + 3 >= 32) return False;
+         if (inc == 2 && rD + 6 >= 32) return False;
+      } else {
+         return False;
+      }
+
+      if (N == 1 && fB == BITS4(0,0,1,1)) {
+         regs = 2;
+      } else if (N == 0) {
+         if (fB == BITS4(1,0,1,0)) {
+            regs = 2;
+         } else if (fB == BITS4(0,1,1,0)) {
+            regs = 3;
+         } else if (fB == BITS4(0,0,1,0)) {
+            regs = 4;
+         }
+      }
+
+      size = INSN(7,6);
+      if (N == 0 && size == 3)
+         size = 2;
+      if (size == 3)
+         return False;
+
+      // go uncond
+      if (condT != IRTemp_INVALID)
+         mk_skip_over_T32_if_cond_is_false(condT);
+      // now uncond
+
+      IRTemp addr = newTemp(Ity_I32);
+      assign(addr, mkexpr(initialRn));
+
+      if (N == 0 /* No interleaving -- VLD1/VST1 */) {
+         UInt r;
+         vassert(regs == 1 || regs == 2 || regs == 3 || regs == 4);
+         /* inc has no relevance here */
+         for (r = 0; r < regs; r++) {
+            if (bL)
+               putDRegI64(rD+r, loadLE(Ity_I64, mkexpr(addr)), IRTemp_INVALID);
+            else
+               storeLE(mkexpr(addr), getDRegI64(rD+r));
+            IRTemp tmp = newTemp(Ity_I32);
+            assign(tmp, binop(Iop_Add32, mkexpr(addr), mkU32(8)));
+            addr = tmp;
+         }
+      }
+      else
+      if (N == 1 /* 2-interleaving -- VLD2/VST2 */) {
+         vassert( (regs == 1 && (inc == 1 || inc == 2))
+                   || (regs == 2 && inc == 2) );
+         // Make 'nregs' be the number of registers and 'regstep'
+         // equal the actual register-step.  The ARM encoding, using 'regs'
+         // and 'inc', is bizarre.  After this, we have:
+         // Dd, Dd+1              regs = 1, inc = 1,   nregs = 2, regstep = 1
+         // Dd, Dd+2              regs = 1, inc = 2,   nregs = 2, regstep = 2
+         // Dd, Dd+1, Dd+2, Dd+3  regs = 2, inc = 2,   nregs = 4, regstep = 1
+         UInt nregs   = 2;
+         UInt regstep = 1;
+         if (regs == 1 && inc == 1) {
+            /* nothing */
+         } else if (regs == 1 && inc == 2) {
+            regstep = 2;
+         } else if (regs == 2 && inc == 2) {
+            nregs = 4;
+         } else {
+            vassert(0);
+         }
+         // 'a' is address,
+         // 'di' is interleaved data, 'du' is uninterleaved data
+         if (nregs == 2) {
+            IRExpr* a0  = binop(Iop_Add32, mkexpr(addr), mkU32(0));
+            IRExpr* a1  = binop(Iop_Add32, mkexpr(addr), mkU32(8));
+            IRTemp  di0 = newTemp(Ity_I64);
+            IRTemp  di1 = newTemp(Ity_I64);
+            IRTemp  du0 = newTemp(Ity_I64); 
+            IRTemp  du1 = newTemp(Ity_I64);
+            if (bL) {
+               assign(di0, loadLE(Ity_I64, a0));
+               assign(di1, loadLE(Ity_I64, a1));
+               math_DEINTERLEAVE_2(&du0, &du1, di0, di1, 1 << size);
+               putDRegI64(rD + 0 * regstep, mkexpr(du0), IRTemp_INVALID);
+               putDRegI64(rD + 1 * regstep, mkexpr(du1), IRTemp_INVALID);
+            } else {
+               assign(du0, getDRegI64(rD + 0 * regstep));
+               assign(du1, getDRegI64(rD + 1 * regstep));
+               math_INTERLEAVE_2(&di0, &di1, du0, du1, 1 << size);
+               storeLE(a0, mkexpr(di0));
+               storeLE(a1, mkexpr(di1));
+            }
+            IRTemp tmp = newTemp(Ity_I32);
+            assign(tmp, binop(Iop_Add32, mkexpr(addr), mkU32(16)));
+            addr = tmp;
+         } else {
+            vassert(nregs == 4);
+            vassert(regstep == 1);
+            IRExpr* a0  = binop(Iop_Add32, mkexpr(addr), mkU32(0));
+            IRExpr* a1  = binop(Iop_Add32, mkexpr(addr), mkU32(8));
+            IRExpr* a2  = binop(Iop_Add32, mkexpr(addr), mkU32(16));
+            IRExpr* a3  = binop(Iop_Add32, mkexpr(addr), mkU32(24));
+            IRTemp  di0 = newTemp(Ity_I64);
+            IRTemp  di1 = newTemp(Ity_I64);
+            IRTemp  di2 = newTemp(Ity_I64);
+            IRTemp  di3 = newTemp(Ity_I64);
+            IRTemp  du0 = newTemp(Ity_I64); 
+            IRTemp  du1 = newTemp(Ity_I64);
+            IRTemp  du2 = newTemp(Ity_I64); 
+            IRTemp  du3 = newTemp(Ity_I64);
+            if (bL) {
+               assign(di0, loadLE(Ity_I64, a0));
+               assign(di1, loadLE(Ity_I64, a1));
+               assign(di2, loadLE(Ity_I64, a2));
+               assign(di3, loadLE(Ity_I64, a3));
+               // Note spooky interleaving: du0, du2, di0, di1 etc
+               math_DEINTERLEAVE_2(&du0, &du2, di0, di1, 1 << size);
+               math_DEINTERLEAVE_2(&du1, &du3, di2, di3, 1 << size);
+               putDRegI64(rD + 0 * regstep, mkexpr(du0), IRTemp_INVALID);
+               putDRegI64(rD + 1 * regstep, mkexpr(du1), IRTemp_INVALID);
+               putDRegI64(rD + 2 * regstep, mkexpr(du2), IRTemp_INVALID);
+               putDRegI64(rD + 3 * regstep, mkexpr(du3), IRTemp_INVALID);
+            } else {
+               assign(du0, getDRegI64(rD + 0 * regstep));
+               assign(du1, getDRegI64(rD + 1 * regstep));
+               assign(du2, getDRegI64(rD + 2 * regstep));
+               assign(du3, getDRegI64(rD + 3 * regstep));
+               // Note spooky interleaving: du0, du2, di0, di1 etc
+               math_INTERLEAVE_2(&di0, &di1, du0, du2, 1 << size);
+               math_INTERLEAVE_2(&di2, &di3, du1, du3, 1 << size);
+               storeLE(a0, mkexpr(di0));
+               storeLE(a1, mkexpr(di1));
+               storeLE(a2, mkexpr(di2));
+               storeLE(a3, mkexpr(di3));
+            }
+
+            IRTemp tmp = newTemp(Ity_I32);
+            assign(tmp, binop(Iop_Add32, mkexpr(addr), mkU32(32)));
+            addr = tmp;
+         }
+      }
+      else
+      if (N == 2 /* 3-interleaving -- VLD3/VST3 */) {
+         // Dd, Dd+1, Dd+2   regs = 1, inc = 1
+         // Dd, Dd+2, Dd+4   regs = 1, inc = 2
+         vassert(regs == 1 && (inc == 1 || inc == 2));
+         IRExpr* a0  = binop(Iop_Add32, mkexpr(addr), mkU32(0));
+         IRExpr* a1  = binop(Iop_Add32, mkexpr(addr), mkU32(8));
+         IRExpr* a2  = binop(Iop_Add32, mkexpr(addr), mkU32(16));
+         IRTemp  di0 = newTemp(Ity_I64);
+         IRTemp  di1 = newTemp(Ity_I64);
+         IRTemp  di2 = newTemp(Ity_I64);
+         IRTemp  du0 = newTemp(Ity_I64); 
+         IRTemp  du1 = newTemp(Ity_I64);
+         IRTemp  du2 = newTemp(Ity_I64);
+         if (bL) {
+            assign(di0, loadLE(Ity_I64, a0));
+            assign(di1, loadLE(Ity_I64, a1));
+            assign(di2, loadLE(Ity_I64, a2));
+            math_DEINTERLEAVE_3(&du0, &du1, &du2, di0, di1, di2, 1 << size);
+            putDRegI64(rD + 0 * inc, mkexpr(du0), IRTemp_INVALID);
+            putDRegI64(rD + 1 * inc, mkexpr(du1), IRTemp_INVALID);
+            putDRegI64(rD + 2 * inc, mkexpr(du2), IRTemp_INVALID);
+         } else {
+            assign(du0, getDRegI64(rD + 0 * inc));
+            assign(du1, getDRegI64(rD + 1 * inc));
+            assign(du2, getDRegI64(rD + 2 * inc));
+            math_INTERLEAVE_3(&di0, &di1, &di2, du0, du1, du2, 1 << size);
+            storeLE(a0, mkexpr(di0));
+            storeLE(a1, mkexpr(di1));
+            storeLE(a2, mkexpr(di2));
+         }
+         IRTemp tmp = newTemp(Ity_I32);
+         assign(tmp, binop(Iop_Add32, mkexpr(addr), mkU32(24)));
+         addr = tmp;
+      }
+      else 
+      if (N == 3 /* 4-interleaving -- VLD4/VST4 */) {
+         // Dd, Dd+1, Dd+2, Dd+3   regs = 1, inc = 1
+         // Dd, Dd+2, Dd+4, Dd+6   regs = 1, inc = 2
+         vassert(regs == 1 && (inc == 1 || inc == 2));
+         IRExpr* a0  = binop(Iop_Add32, mkexpr(addr), mkU32(0));
+         IRExpr* a1  = binop(Iop_Add32, mkexpr(addr), mkU32(8));
+         IRExpr* a2  = binop(Iop_Add32, mkexpr(addr), mkU32(16));
+         IRExpr* a3  = binop(Iop_Add32, mkexpr(addr), mkU32(24));
+         IRTemp  di0 = newTemp(Ity_I64);
+         IRTemp  di1 = newTemp(Ity_I64);
+         IRTemp  di2 = newTemp(Ity_I64);
+         IRTemp  di3 = newTemp(Ity_I64);
+         IRTemp  du0 = newTemp(Ity_I64); 
+         IRTemp  du1 = newTemp(Ity_I64);
+         IRTemp  du2 = newTemp(Ity_I64);
+         IRTemp  du3 = newTemp(Ity_I64);
+         if (bL) {
+            assign(di0, loadLE(Ity_I64, a0));
+            assign(di1, loadLE(Ity_I64, a1));
+            assign(di2, loadLE(Ity_I64, a2));
+            assign(di3, loadLE(Ity_I64, a3));
+            math_DEINTERLEAVE_4(&du0, &du1, &du2, &du3,
+                                di0, di1, di2, di3, 1 << size);
+            putDRegI64(rD + 0 * inc, mkexpr(du0), IRTemp_INVALID);
+            putDRegI64(rD + 1 * inc, mkexpr(du1), IRTemp_INVALID);
+            putDRegI64(rD + 2 * inc, mkexpr(du2), IRTemp_INVALID);
+            putDRegI64(rD + 3 * inc, mkexpr(du3), IRTemp_INVALID);
+         } else {
+            assign(du0, getDRegI64(rD + 0 * inc));
+            assign(du1, getDRegI64(rD + 1 * inc));
+            assign(du2, getDRegI64(rD + 2 * inc));
+            assign(du3, getDRegI64(rD + 3 * inc));
+            math_INTERLEAVE_4(&di0, &di1, &di2, &di3,
+                              du0, du1, du2, du3, 1 << size);
+            storeLE(a0, mkexpr(di0));
+            storeLE(a1, mkexpr(di1));
+            storeLE(a2, mkexpr(di2));
+            storeLE(a3, mkexpr(di3));
+         }
+         IRTemp tmp = newTemp(Ity_I32);
+         assign(tmp, binop(Iop_Add32, mkexpr(addr), mkU32(32)));
+         addr = tmp;
+      }
+      else {
+         vassert(0);
+      }
+
+      /* Writeback */
+      if (rM != 15) {
+         IRExpr* e;
+         if (rM == 13) {
+            e = binop(Iop_Add32, mkexpr(initialRn),
+                                 mkU32(8 * (N + 1) * regs));
+         } else {
+            e = binop(Iop_Add32, mkexpr(initialRn),
+                                 mkexpr(initialRm));
+         }
+         if (isT)
+            putIRegT(rN, e, IRTemp_INVALID);
+         else
+            putIRegA(rN, e, IRTemp_INVALID, Ijk_Boring);
+      }
+
+      DIP("v%s%u.%u {", bL ? "ld" : "st", N + 1, 8 << INSN(7,6));
+      if ((inc == 1 && regs * (N + 1) > 1)
+          || (inc == 2 && regs > 1 && N > 0)) {
+         DIP("d%u-d%u", rD, rD + regs * (N + 1) - 1);
+      } else {
+         UInt r;
+         for (r = 0; r < regs; r++) {
+            for (i = 0; i <= N; i++) {
+               if (i || r)
+                  DIP(", ");
+               DIP("d%u", rD + r + i * inc);
+            }
+         }
+      }
+      DIP("}, [r%u]", rN);
+      if (rM != 13 && rM != 15) {
+         DIP(", r%u\n", rM);
+      } else {
+         DIP("%s\n", (rM != 15) ? "!" : "");
+      }
+      return True;
+   }
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- NEON, top level control                              ---*/
+/*------------------------------------------------------------*/
+
+/* Both ARM and Thumb */
+
+/* Translate a NEON instruction.    If successful, returns
+   True and *dres may or may not be updated.  If failure, returns
+   False and doesn't change *dres nor create any IR.
+
+   The Thumb and ARM encodings are similar for the 24 bottom bits, but
+   the top 8 bits are slightly different.  In both cases, the caller
+   must pass the entire 32 bits.  Callers may pass any instruction;
+   this ignores non-NEON ones.
+
+   Caller must supply an IRTemp 'condT' holding the gating condition,
+   or IRTemp_INVALID indicating the insn is always executed.  In ARM
+   code, this must always be IRTemp_INVALID because NEON insns are
+   unconditional for ARM.
+
+   Finally, the caller must indicate whether this occurs in ARM or in
+   Thumb code.
+*/
+static Bool decode_NEON_instruction (
+               /*MOD*/DisResult* dres,
+               UInt              insn32,
+               IRTemp            condT,
+               Bool              isT
+            )
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn32, (_bMax), (_bMin))
+
+   /* There are two kinds of instruction to deal with: load/store and
+      data processing.  In each case, in ARM mode we merely identify
+      the kind, and pass it on to the relevant sub-handler.  In Thumb
+      mode we identify the kind, swizzle the bits around to make it
+      have the same encoding as in ARM, and hand it on to the
+      sub-handler.
+   */
+
+   /* In ARM mode, NEON instructions can't be conditional. */
+   if (!isT)
+      vassert(condT == IRTemp_INVALID);
+
+   /* Data processing:
+      Thumb: 111U 1111 AAAA Axxx xxxx BBBB CCCC xxxx
+      ARM:   1111 001U AAAA Axxx xxxx BBBB CCCC xxxx
+   */
+   if (!isT && INSN(31,25) == BITS7(1,1,1,1,0,0,1)) {
+      // ARM, DP
+      return dis_neon_data_processing(INSN(31,0), condT);
+   }
+   if (isT && INSN(31,29) == BITS3(1,1,1)
+       && INSN(27,24) == BITS4(1,1,1,1)) {
+      // Thumb, DP
+      UInt reformatted = INSN(23,0);
+      reformatted |= (INSN(28,28) << 24); // U bit
+      reformatted |= (BITS7(1,1,1,1,0,0,1) << 25);
+      return dis_neon_data_processing(reformatted, condT);
+   }
+
+   /* Load/store:
+      Thumb: 1111 1001 AxL0 xxxx xxxx BBBB xxxx xxxx
+      ARM:   1111 0100 AxL0 xxxx xxxx BBBB xxxx xxxx
+   */
+   if (!isT && INSN(31,24) == BITS8(1,1,1,1,0,1,0,0)) {
+      // ARM, memory
+      return dis_neon_load_or_store(INSN(31,0), isT, condT);
+   }
+   if (isT && INSN(31,24) == BITS8(1,1,1,1,1,0,0,1)) {
+      UInt reformatted = INSN(23,0);
+      reformatted |= (BITS8(1,1,1,1,0,1,0,0) << 24);
+      return dis_neon_load_or_store(reformatted, isT, condT);
+   }
+
+   /* Doesn't match. */
+   return False;
+
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- V6 MEDIA instructions                                ---*/
+/*------------------------------------------------------------*/
+
+/* Both ARM and Thumb */
+
+/* Translate a V6 media instruction.    If successful, returns
+   True and *dres may or may not be updated.  If failure, returns
+   False and doesn't change *dres nor create any IR.
+
+   The Thumb and ARM encodings are completely different.  In Thumb
+   mode, the caller must pass the entire 32 bits.  In ARM mode it must
+   pass the lower 28 bits.  Apart from that, callers may pass any
+   instruction; this function ignores anything it doesn't recognise.
+
+   Caller must supply an IRTemp 'condT' holding the gating condition,
+   or IRTemp_INVALID indicating the insn is always executed.
+
+   Caller must also supply an ARMCondcode 'cond'.  This is only used
+   for debug printing, no other purpose.  For ARM, this is simply the
+   top 4 bits of the original instruction.  For Thumb, the condition
+   is not (really) known until run time, and so ARMCondAL should be
+   passed, only so that printing of these instructions does not show
+   any condition.
+
+   Finally, the caller must indicate whether this occurs in ARM or in
+   Thumb code.
+*/
+static Bool decode_V6MEDIA_instruction (
+               /*MOD*/DisResult* dres,
+               UInt              insnv6m,
+               IRTemp            condT,
+               ARMCondcode       conq,
+               Bool              isT
+            )
+{
+#  define INSNA(_bMax,_bMin)   SLICE_UInt(insnv6m, (_bMax), (_bMin))
+#  define INSNT0(_bMax,_bMin)  SLICE_UInt( ((insnv6m >> 16) & 0xFFFF), \
+                                           (_bMax), (_bMin) )
+#  define INSNT1(_bMax,_bMin)  SLICE_UInt( ((insnv6m >> 0)  & 0xFFFF), \
+                                           (_bMax), (_bMin) )
+   HChar dis_buf[128];
+   dis_buf[0] = 0;
+
+   if (isT) {
+      vassert(conq == ARMCondAL);
+   } else {
+      vassert(INSNA(31,28) == BITS4(0,0,0,0)); // caller's obligation
+      vassert(conq >= ARMCondEQ && conq <= ARMCondAL);
+   }
+
+   /* ----------- smulbb, smulbt, smultb, smultt ----------- */
+   {
+     UInt regD = 99, regM = 99, regN = 99, bitM = 0, bitN = 0;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFB1 && INSNT1(15,12) == BITS4(1,1,1,1)
+            && INSNT1(7,6) == BITS2(0,0)) {
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           regN = INSNT0(3,0);
+           bitM = INSNT1(4,4);
+           bitN = INSNT1(5,5);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (BITS8(0,0,0,1,0,1,1,0) == INSNA(27,20) &&
+            BITS4(0,0,0,0)         == INSNA(15,12) &&
+            BITS4(1,0,0,0)         == (INSNA(7,4) & BITS4(1,0,0,1)) ) {
+           regD = INSNA(19,16);
+           regM = INSNA(11,8);
+           regN = INSNA(3,0);
+           bitM = INSNA(6,6);
+           bitN = INSNA(5,5);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp srcN = newTemp(Ity_I32);
+        IRTemp srcM = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+
+        assign( srcN, binop(Iop_Sar32,
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regN) : getIRegA(regN),
+                                  mkU8(bitN ? 0 : 16)), mkU8(16)) );
+        assign( srcM, binop(Iop_Sar32,
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regM) : getIRegA(regM),
+                                  mkU8(bitM ? 0 : 16)), mkU8(16)) );
+        assign( res, binop(Iop_Mul32, mkexpr(srcN), mkexpr(srcM)) );
+
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        DIP( "smul%c%c%s r%u, r%u, r%u\n", bitN ? 't' : 'b', bitM ? 't' : 'b',
+             nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------ smulwb<y><c> <Rd>,<Rn>,<Rm> ------------- */
+   /* ------------ smulwt<y><c> <Rd>,<Rn>,<Rm> ------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99, bitM = 0;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFB3 && INSNT1(15,12) == BITS4(1,1,1,1)
+            && INSNT1(7,5) == BITS3(0,0,0)) {
+          regN = INSNT0(3,0);
+          regD = INSNT1(11,8);
+          regM = INSNT1(3,0);
+          bitM = INSNT1(4,4);
+          if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+             gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,0,1,0) && 
+            INSNA(15,12) == BITS4(0,0,0,0)         &&
+            (INSNA(7,4) & BITS4(1,0,1,1)) == BITS4(1,0,1,0)) {
+           regD = INSNA(19,16);
+           regN = INSNA(3,0);
+           regM = INSNA(11,8);
+           bitM = INSNA(6,6);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_prod = newTemp(Ity_I64);
+
+        assign( irt_prod, 
+                binop(Iop_MullS32,
+                      isT ? getIRegT(regN) : getIRegA(regN),
+                      binop(Iop_Sar32, 
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regM) : getIRegA(regM),
+                                  mkU8(bitM ? 0 : 16)), 
+                            mkU8(16))) );
+
+        IRExpr* ire_result = binop(Iop_Or32, 
+                                   binop( Iop_Shl32, 
+                                          unop(Iop_64HIto32, mkexpr(irt_prod)), 
+                                          mkU8(16) ), 
+                                   binop( Iop_Shr32, 
+                                          unop(Iop_64to32, mkexpr(irt_prod)), 
+                                          mkU8(16) ) );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP("smulw%c%s r%u, r%u, r%u\n",
+            bitM ? 't' : 'b', nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------ pkhbt<c> Rd, Rn, Rm {,LSL #imm} ------------- */
+   /* ------------ pkhtb<c> Rd, Rn, Rm {,ASR #imm} ------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99, imm5 = 99, shift_type = 99;
+     Bool tbform = False;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xEAC 
+            && INSNT1(15,15) == 0 && INSNT1(4,4) == 0) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           imm5 = (INSNT1(14,12) << 2) | INSNT1(7,6);
+           shift_type = (INSNT1(5,5) << 1) | 0;
+           tbform = (INSNT1(5,5) == 0) ? False : True;
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,1,0,0,0) &&
+            INSNA(5,4)   == BITS2(0,1)             &&
+            (INSNA(6,6)  == 0 || INSNA(6,6) == 1) ) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           imm5 = INSNA(11,7);
+           shift_type = (INSNA(6,6) << 1) | 0;
+           tbform = (INSNA(6,6) == 0) ? False : True;
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regM       = newTemp(Ity_I32);
+        IRTemp irt_regM_shift = newTemp(Ity_I32);
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+        compute_result_and_C_after_shift_by_imm5(
+           dis_buf, &irt_regM_shift, NULL, irt_regM, shift_type, imm5, regM );
+
+        UInt mask = (tbform == True) ? 0x0000FFFF : 0xFFFF0000;
+        IRExpr* ire_result 
+          = binop( Iop_Or32, 
+                   binop(Iop_And32, mkexpr(irt_regM_shift), mkU32(mask)), 
+                   binop(Iop_And32, isT ? getIRegT(regN) : getIRegA(regN),
+                                    unop(Iop_Not32, mkU32(mask))) );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "pkh%s%s r%u, r%u, r%u %s\n", tbform ? "tb" : "bt", 
+             nCC(conq), regD, regN, regM, dis_buf );
+
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ---------- usat<c> <Rd>,#<imm5>,<Rn>{,<shift>} ----------- */
+   {
+     UInt regD = 99, regN = 99, shift_type = 99, imm5 = 99, sat_imm = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,6) == BITS10(1,1,1,1,0,0,1,1,1,0)
+            && INSNT0(4,4) == 0
+            && INSNT1(15,15) == 0 && INSNT1(5,5) == 0) {
+           regD       = INSNT1(11,8);
+           regN       = INSNT0(3,0);
+           shift_type = (INSNT0(5,5) << 1) | 0;
+           imm5       = (INSNT1(14,12) << 2) | INSNT1(7,6);
+           sat_imm    = INSNT1(4,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN))
+              gate = True;
+           if (shift_type == BITS2(1,0) && imm5 == 0)
+              gate = False;
+        }
+     } else {
+        if (INSNA(27,21) == BITS7(0,1,1,0,1,1,1) &&
+            INSNA(5,4)   == BITS2(0,1)) {
+           regD       = INSNA(15,12);
+           regN       = INSNA(3,0);
+           shift_type = (INSNA(6,6) << 1) | 0;
+           imm5       = INSNA(11,7);
+           sat_imm    = INSNA(20,16);
+           if (regD != 15 && regN != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN       = newTemp(Ity_I32);
+        IRTemp irt_regN_shift = newTemp(Ity_I32);
+        IRTemp irt_sat_Q      = newTemp(Ity_I32);
+        IRTemp irt_result     = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        compute_result_and_C_after_shift_by_imm5(
+                dis_buf, &irt_regN_shift, NULL,
+                irt_regN, shift_type, imm5, regN );
+
+        armUnsignedSatQ( &irt_result, &irt_sat_Q, irt_regN_shift, sat_imm );
+        or_into_QFLAG32( mkexpr(irt_sat_Q), condT );
+
+        if (isT)
+           putIRegT( regD, mkexpr(irt_result), condT );
+        else
+           putIRegA( regD, mkexpr(irt_result), condT, Ijk_Boring );
+
+        DIP("usat%s r%u, #0x%04x, %s\n",
+            nCC(conq), regD, imm5, dis_buf);
+        return True;
+     }
+     /* fall through */
+   }
+
+  /* ----------- ssat<c> <Rd>,#<imm5>,<Rn>{,<shift>} ----------- */
+   {
+     UInt regD = 99, regN = 99, shift_type = 99, imm5 = 99, sat_imm = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,6) == BITS10(1,1,1,1,0,0,1,1,0,0)
+            && INSNT0(4,4) == 0
+            && INSNT1(15,15) == 0 && INSNT1(5,5) == 0) {
+           regD       = INSNT1(11,8);
+           regN       = INSNT0(3,0);
+           shift_type = (INSNT0(5,5) << 1) | 0;
+           imm5       = (INSNT1(14,12) << 2) | INSNT1(7,6);
+           sat_imm    = INSNT1(4,0) + 1;
+           if (!isBadRegT(regD) && !isBadRegT(regN))
+              gate = True;
+           if (shift_type == BITS2(1,0) && imm5 == 0)
+              gate = False;
+        }
+     } else {
+        if (INSNA(27,21) == BITS7(0,1,1,0,1,0,1) &&
+            INSNA(5,4)   == BITS2(0,1)) {
+           regD       = INSNA(15,12);
+           regN       = INSNA(3,0);
+           shift_type = (INSNA(6,6) << 1) | 0;
+           imm5       = INSNA(11,7);
+           sat_imm    = INSNA(20,16) + 1;
+           if (regD != 15 && regN != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN       = newTemp(Ity_I32);
+        IRTemp irt_regN_shift = newTemp(Ity_I32);
+        IRTemp irt_sat_Q      = newTemp(Ity_I32);
+        IRTemp irt_result     = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        compute_result_and_C_after_shift_by_imm5(
+                dis_buf, &irt_regN_shift, NULL,
+                irt_regN, shift_type, imm5, regN );
+
+        armSignedSatQ( irt_regN_shift, sat_imm, &irt_result, &irt_sat_Q );
+        or_into_QFLAG32( mkexpr(irt_sat_Q), condT );
+
+        if (isT)
+           putIRegT( regD, mkexpr(irt_result), condT );
+        else
+           putIRegA( regD, mkexpr(irt_result), condT, Ijk_Boring );
+
+        DIP( "ssat%s r%u, #0x%04x, %s\n",
+             nCC(conq), regD, imm5, dis_buf);
+        return True;
+    }
+    /* fall through */
+  }
+
+   /* ----------- ssat16<c> <Rd>,#<imm>,<Rn> ----------- */
+   {
+     UInt regD = 99, regN = 99, sat_imm = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,6) == BITS10(1,1,1,1,0,0,1,1,0,0)
+            && INSNT0(5,4) == BITS2(1,0)
+            && INSNT1(15,12) == BITS4(0,0,0,0)
+            && INSNT1(7,4) == BITS4(0,0,0,0)) {
+           regD       = INSNT1(11,8);
+           regN       = INSNT0(3,0);
+           sat_imm    = INSNT1(3,0) + 1;
+           if (!isBadRegT(regD) && !isBadRegT(regN))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,1,0,1,0) &&
+            INSNA(11,4)   == BITS8(1,1,1,1,0,0,1,1)) {
+           regD       = INSNA(15,12);
+           regN       = INSNA(3,0);
+           sat_imm    = INSNA(19,16) + 1;
+           if (regD != 15 && regN != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN    = newTemp(Ity_I32);
+        IRTemp irt_regN_lo = newTemp(Ity_I32);
+        IRTemp irt_regN_hi = newTemp(Ity_I32);
+        IRTemp irt_Q_lo    = newTemp(Ity_I32);
+        IRTemp irt_Q_hi    = newTemp(Ity_I32);
+        IRTemp irt_res_lo  = newTemp(Ity_I32);
+        IRTemp irt_res_hi  = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regN_lo,
+                binop( Iop_Sar32,
+                       binop(Iop_Shl32, mkexpr(irt_regN), mkU8(16)),
+                       mkU8(16)) );
+        assign( irt_regN_hi, binop(Iop_Sar32, mkexpr(irt_regN), mkU8(16)) );
+
+        armSignedSatQ( irt_regN_lo, sat_imm, &irt_res_lo, &irt_Q_lo );
+        or_into_QFLAG32( mkexpr(irt_Q_lo), condT );
+
+        armSignedSatQ( irt_regN_hi, sat_imm, &irt_res_hi, &irt_Q_hi );
+        or_into_QFLAG32( mkexpr(irt_Q_hi), condT );
+
+        IRExpr* ire_result 
+           = binop(Iop_Or32, 
+                   binop(Iop_And32, mkexpr(irt_res_lo), mkU32(0xFFFF)),
+                   binop(Iop_Shl32, mkexpr(irt_res_hi), mkU8(16)));
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "ssat16%s r%u, #0x%04x, r%u\n", nCC(conq), regD, sat_imm, regN );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* -------------- usat16<c> <Rd>,#<imm4>,<Rn> --------------- */
+   {
+     UInt regD = 99, regN = 99, sat_imm = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xF3A && (INSNT1(15,0) & 0xF0F0) == 0x0000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           sat_imm = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN))
+              gate = True;
+       }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,1,1,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,1,1)) {
+           regD    = INSNA(15,12);
+           regN    = INSNA(3,0);
+           sat_imm = INSNA(19,16);
+           if (regD != 15 && regN != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN    = newTemp(Ity_I32);
+        IRTemp irt_regN_lo = newTemp(Ity_I32);
+        IRTemp irt_regN_hi = newTemp(Ity_I32);
+        IRTemp irt_Q_lo    = newTemp(Ity_I32);
+        IRTemp irt_Q_hi    = newTemp(Ity_I32);
+        IRTemp irt_res_lo  = newTemp(Ity_I32);
+        IRTemp irt_res_hi  = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regN_lo, binop( Iop_Sar32, 
+                                    binop(Iop_Shl32, mkexpr(irt_regN), mkU8(16)), 
+                                    mkU8(16)) );
+        assign( irt_regN_hi, binop(Iop_Sar32, mkexpr(irt_regN), mkU8(16)) );
+
+        armUnsignedSatQ( &irt_res_lo, &irt_Q_lo, irt_regN_lo, sat_imm );
+        or_into_QFLAG32( mkexpr(irt_Q_lo), condT );
+
+        armUnsignedSatQ( &irt_res_hi, &irt_Q_hi, irt_regN_hi, sat_imm );
+        or_into_QFLAG32( mkexpr(irt_Q_hi), condT );
+
+        IRExpr* ire_result = binop( Iop_Or32, 
+                                    binop(Iop_Shl32, mkexpr(irt_res_hi), mkU8(16)),
+                                    mkexpr(irt_res_lo) );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "usat16%s r%u, #0x%04x, r%u\n", nCC(conq), regD, sat_imm, regN );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* -------------- uadd16<c> <Rd>,<Rn>,<Rm> -------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA9 && (INSNT1(15,0) & 0xF0F0) == 0xF040) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,0,1) && 
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Add16x2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, binop(Iop_HAdd16Ux2, mkexpr(rNt), mkexpr(rMt)));
+        set_GE_32_10_from_bits_31_15(reso, condT);
+
+        DIP("uadd16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* -------------- sadd16<c> <Rd>,<Rn>,<Rm> -------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA9 && (INSNT1(15,0) & 0xF0F0) == 0xF000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,0,1) && 
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Add16x2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, unop(Iop_Not32,
+                          binop(Iop_HAdd16Sx2, mkexpr(rNt), mkexpr(rMt))));
+        set_GE_32_10_from_bits_31_15(reso, condT);
+
+        DIP("sadd16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ---------------- usub16<c> <Rd>,<Rn>,<Rm> ---------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAD && (INSNT1(15,0) & 0xF0F0) == 0xF040) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,0,1) && 
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+             gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Sub16x2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, unop(Iop_Not32,
+                          binop(Iop_HSub16Ux2, mkexpr(rNt), mkexpr(rMt))));
+        set_GE_32_10_from_bits_31_15(reso, condT);
+
+        DIP("usub16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* -------------- ssub16<c> <Rd>,<Rn>,<Rm> -------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAD && (INSNT1(15,0) & 0xF0F0) == 0xF000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,0,1) && 
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Sub16x2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, unop(Iop_Not32,
+                          binop(Iop_HSub16Sx2, mkexpr(rNt), mkexpr(rMt))));
+        set_GE_32_10_from_bits_31_15(reso, condT);
+
+        DIP("ssub16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uadd8<c> <Rd>,<Rn>,<Rm> ---------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF040) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            (INSNA(7,4)  == BITS4(1,0,0,1))) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Add8x4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, binop(Iop_HAdd8Ux4, mkexpr(rNt), mkexpr(rMt)));
+        set_GE_3_2_1_0_from_bits_31_23_15_7(reso, condT);
+
+        DIP("uadd8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- sadd8<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            (INSNA(7,4)  == BITS4(1,0,0,1))) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Add8x4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, unop(Iop_Not32,
+                          binop(Iop_HAdd8Sx4, mkexpr(rNt), mkexpr(rMt))));
+        set_GE_3_2_1_0_from_bits_31_23_15_7(reso, condT);
+
+        DIP("sadd8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- usub8<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAC && (INSNT1(15,0) & 0xF0F0) == 0xF040) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            (INSNA(7,4)  == BITS4(1,1,1,1))) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+             gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Sub8x4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, unop(Iop_Not32,
+                          binop(Iop_HSub8Ux4, mkexpr(rNt), mkexpr(rMt))));
+        set_GE_3_2_1_0_from_bits_31_23_15_7(reso, condT);
+
+        DIP("usub8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- ssub8<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAC && (INSNT1(15,0) & 0xF0F0) == 0xF000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt  = newTemp(Ity_I32);
+        IRTemp rMt  = newTemp(Ity_I32);
+        IRTemp res  = newTemp(Ity_I32);
+        IRTemp reso = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res, binop(Iop_Sub8x4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res), condT );
+        else
+           putIRegA( regD, mkexpr(res), condT, Ijk_Boring );
+
+        assign(reso, unop(Iop_Not32,
+                          binop(Iop_HSub8Sx4, mkexpr(rNt), mkexpr(rMt))));
+        set_GE_3_2_1_0_from_bits_31_23_15_7(reso, condT);
+
+        DIP("ssub8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qadd8<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF010) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QAdd8Sx4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("qadd8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qsub8<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAC && (INSNT1(15,0) & 0xF0F0) == 0xF010) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QSub8Sx4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("qsub8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ uqadd8<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF050) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            (INSNA(7,4)  == BITS4(1,0,0,1))) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QAdd8Ux4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uqadd8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ uqsub8<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAC && (INSNT1(15,0) & 0xF0F0) == 0xF050) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            (INSNA(7,4)  == BITS4(1,1,1,1))) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+             gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QSub8Ux4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uqsub8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uhadd8<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF060) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HAdd8Ux4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uhadd8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uhadd16<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA9 && (INSNT1(15,0) & 0xF0F0) == 0xF060) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HAdd16Ux2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uhadd16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- shadd8<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF020) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HAdd8Sx4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("shadd8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qadd16<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA9 && (INSNT1(15,0) & 0xF0F0) == 0xF010) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QAdd16Sx2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("qadd16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qsub16<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+      if (isT) {
+        if (INSNT0(15,4) == 0xFAD && (INSNT1(15,0) & 0xF0F0) == 0xF010) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+             gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QSub16Sx2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("qsub16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- qsax<c> <Rd>,<Rn>,<Rm> ------------------- */
+   /* note: the hardware seems to construct the result differently
+      from wot the manual says. */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAE && (INSNT1(15,0) & 0xF0F0) == 0xF010) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN     = newTemp(Ity_I32);
+        IRTemp irt_regM     = newTemp(Ity_I32);
+        IRTemp irt_sum      = newTemp(Ity_I32);
+        IRTemp irt_diff     = newTemp(Ity_I32);
+        IRTemp irt_sum_res  = newTemp(Ity_I32);
+        IRTemp irt_diff_res = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff, 
+                binop( Iop_Sub32, 
+                       binop( Iop_Sar32, mkexpr(irt_regN), mkU8(16) ),
+                       binop( Iop_Sar32, 
+                              binop(Iop_Shl32, mkexpr(irt_regM), mkU8(16)), 
+                              mkU8(16) ) ) );
+        armSignedSatQ( irt_diff, 0x10, &irt_diff_res, NULL);
+
+        assign( irt_sum, 
+                binop( Iop_Add32, 
+                       binop( Iop_Sar32, 
+                              binop( Iop_Shl32, mkexpr(irt_regN), mkU8(16) ), 
+                              mkU8(16) ), 
+                       binop( Iop_Sar32, mkexpr(irt_regM), mkU8(16) )) );
+        armSignedSatQ( irt_sum, 0x10, &irt_sum_res, NULL );
+
+        IRExpr* ire_result = binop( Iop_Or32, 
+                                    binop( Iop_Shl32, mkexpr(irt_diff_res), 
+                                           mkU8(16) ), 
+                                    binop( Iop_And32, mkexpr(irt_sum_res), 
+                                           mkU32(0xFFFF)) );
+
+        if (isT) 
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "qsax%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- qasx<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAA && (INSNT1(15,0) & 0xF0F0) == 0xF010) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN     = newTemp(Ity_I32);
+        IRTemp irt_regM     = newTemp(Ity_I32);
+        IRTemp irt_sum      = newTemp(Ity_I32);
+        IRTemp irt_diff     = newTemp(Ity_I32);
+        IRTemp irt_res_sum  = newTemp(Ity_I32);
+        IRTemp irt_res_diff = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff,  
+                binop( Iop_Sub32, 
+                       binop( Iop_Sar32, 
+                              binop( Iop_Shl32, mkexpr(irt_regN), mkU8(16) ), 
+                              mkU8(16) ), 
+                       binop( Iop_Sar32, mkexpr(irt_regM), mkU8(16) ) ) );
+        armSignedSatQ( irt_diff, 0x10, &irt_res_diff, NULL );
+
+        assign( irt_sum, 
+                binop( Iop_Add32, 
+                       binop( Iop_Sar32, mkexpr(irt_regN), mkU8(16) ), 
+                       binop( Iop_Sar32, 
+                              binop( Iop_Shl32, mkexpr(irt_regM), mkU8(16) ), 
+                              mkU8(16) ) ) );
+        armSignedSatQ( irt_sum, 0x10, &irt_res_sum, NULL );
+       
+        IRExpr* ire_result 
+          = binop( Iop_Or32, 
+                   binop( Iop_Shl32, mkexpr(irt_res_sum), mkU8(16) ), 
+                   binop( Iop_And32, mkexpr(irt_res_diff), mkU32(0xFFFF) ) );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "qasx%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- sasx<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAA && (INSNT1(15,0) & 0xF0F0) == 0xF000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN = newTemp(Ity_I32);
+        IRTemp irt_regM = newTemp(Ity_I32);
+        IRTemp irt_sum  = newTemp(Ity_I32);
+        IRTemp irt_diff = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff,  
+                binop( Iop_Sub32, 
+                       binop( Iop_Sar32, 
+                              binop( Iop_Shl32, mkexpr(irt_regN), mkU8(16) ), 
+                              mkU8(16) ), 
+                       binop( Iop_Sar32, mkexpr(irt_regM), mkU8(16) ) ) );
+
+        assign( irt_sum, 
+                binop( Iop_Add32, 
+                       binop( Iop_Sar32, mkexpr(irt_regN), mkU8(16) ), 
+                       binop( Iop_Sar32, 
+                              binop( Iop_Shl32, mkexpr(irt_regM), mkU8(16) ), 
+                              mkU8(16) ) ) );
+       
+        IRExpr* ire_result 
+          = binop( Iop_Or32, 
+                   binop( Iop_Shl32, mkexpr(irt_sum), mkU8(16) ), 
+                   binop( Iop_And32, mkexpr(irt_diff), mkU32(0xFFFF) ) );
+
+        IRTemp ge10 = newTemp(Ity_I32);
+        assign(ge10, unop(Iop_Not32, mkexpr(irt_diff)));
+        put_GEFLAG32( 0, 31, mkexpr(ge10), condT );
+        put_GEFLAG32( 1, 31, mkexpr(ge10), condT );
+
+        IRTemp ge32 = newTemp(Ity_I32);
+        assign(ge32, unop(Iop_Not32, mkexpr(irt_sum)));
+        put_GEFLAG32( 2, 31, mkexpr(ge32), condT );
+        put_GEFLAG32( 3, 31, mkexpr(ge32), condT );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "sasx%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* --------------- smuad, smuadx<c><Rd>,<Rn>,<Rm> --------------- */
+   /* --------------- smsad, smsadx<c><Rd>,<Rn>,<Rm> --------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99, bitM = 99;
+     Bool gate = False, isAD = False;
+
+     if (isT) {
+        if ((INSNT0(15,4) == 0xFB2 || INSNT0(15,4) == 0xFB4)
+            && (INSNT1(15,0) & 0xF0E0) == 0xF000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           bitM = INSNT1(4,4);
+           isAD = INSNT0(15,4) == 0xFB2;
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,1,0,0,0,0) &&
+            INSNA(15,12) == BITS4(1,1,1,1)         &&
+            (INSNA(7,4) & BITS4(1,0,0,1)) == BITS4(0,0,0,1) ) {
+           regD = INSNA(19,16);
+           regN = INSNA(3,0);
+           regM = INSNA(11,8);
+           bitM = INSNA(5,5);
+           isAD = INSNA(6,6) == 0;
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN    = newTemp(Ity_I32);
+        IRTemp irt_regM    = newTemp(Ity_I32);
+        IRTemp irt_prod_lo = newTemp(Ity_I32);
+        IRTemp irt_prod_hi = newTemp(Ity_I32);
+        IRTemp tmpM        = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+
+        assign( tmpM, isT ? getIRegT(regM) : getIRegA(regM) );
+        assign( irt_regM, genROR32(tmpM, (bitM & 1) ? 16 : 0) );
+
+        assign( irt_prod_lo, 
+                binop( Iop_Mul32, 
+                       binop( Iop_Sar32, 
+                              binop(Iop_Shl32, mkexpr(irt_regN), mkU8(16)), 
+                              mkU8(16) ), 
+                       binop( Iop_Sar32, 
+                              binop(Iop_Shl32, mkexpr(irt_regM), mkU8(16)), 
+                              mkU8(16) ) ) );
+        assign( irt_prod_hi, binop(Iop_Mul32, 
+                                   binop(Iop_Sar32, mkexpr(irt_regN), mkU8(16)), 
+                                   binop(Iop_Sar32, mkexpr(irt_regM), mkU8(16))) );
+        IRExpr* ire_result 
+           = binop( isAD ? Iop_Add32 : Iop_Sub32,
+                    mkexpr(irt_prod_lo), mkexpr(irt_prod_hi) );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        if (isAD) {
+           or_into_QFLAG32(
+              signed_overflow_after_Add32( ire_result,
+                                           irt_prod_lo, irt_prod_hi ),
+              condT
+           );
+        }
+
+        DIP("smu%cd%s%s r%u, r%u, r%u\n",
+            isAD ? 'a' : 's',
+            bitM ? "x" : "", nCC(conq), regD, regN, regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* --------------- smlad{X}<c> <Rd>,<Rn>,<Rm>,<Ra> -------------- */
+   /* --------------- smlsd{X}<c> <Rd>,<Rn>,<Rm>,<Ra> -------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99, regA = 99, bitM = 99;
+     Bool gate = False, isAD = False;
+
+     if (isT) {
+       if ((INSNT0(15,4) == 0xFB2 || INSNT0(15,4) == 0xFB4)
+           && INSNT1(7,5) == BITS3(0,0,0)) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           regA = INSNT1(15,12);
+           bitM = INSNT1(4,4);
+           isAD = INSNT0(15,4) == 0xFB2;
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM)
+               && !isBadRegT(regA))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,1,0,0,0,0) &&
+            (INSNA(7,4) & BITS4(1,0,0,1)) == BITS4(0,0,0,1)) {
+           regD = INSNA(19,16);
+           regA = INSNA(15,12);
+           regN = INSNA(3,0);
+           regM = INSNA(11,8);
+           bitM = INSNA(5,5);
+           isAD = INSNA(6,6) == 0;
+           if (regD != 15 && regN != 15 && regM != 15 && regA != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN    = newTemp(Ity_I32);
+        IRTemp irt_regM    = newTemp(Ity_I32);
+        IRTemp irt_regA    = newTemp(Ity_I32);
+        IRTemp irt_prod_lo = newTemp(Ity_I32);
+        IRTemp irt_prod_hi = newTemp(Ity_I32);
+        IRTemp irt_sum     = newTemp(Ity_I32);
+        IRTemp tmpM        = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regA, isT ? getIRegT(regA) : getIRegA(regA) );
+
+        assign( tmpM, isT ? getIRegT(regM) : getIRegA(regM) );
+        assign( irt_regM, genROR32(tmpM, (bitM & 1) ? 16 : 0) );
+
+        assign( irt_prod_lo, 
+                binop(Iop_Mul32, 
+                      binop(Iop_Sar32, 
+                            binop( Iop_Shl32, mkexpr(irt_regN), mkU8(16) ), 
+                            mkU8(16)), 
+                      binop(Iop_Sar32, 
+                            binop( Iop_Shl32, mkexpr(irt_regM), mkU8(16) ), 
+                            mkU8(16))) );
+        assign( irt_prod_hi, 
+                binop( Iop_Mul32, 
+                       binop( Iop_Sar32, mkexpr(irt_regN), mkU8(16) ), 
+                       binop( Iop_Sar32, mkexpr(irt_regM), mkU8(16) ) ) );
+        assign( irt_sum, binop( isAD ? Iop_Add32 : Iop_Sub32, 
+                                mkexpr(irt_prod_lo), mkexpr(irt_prod_hi) ) );
+
+        IRExpr* ire_result = binop(Iop_Add32, mkexpr(irt_sum), mkexpr(irt_regA));
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        if (isAD) {
+           or_into_QFLAG32(
+              signed_overflow_after_Add32( mkexpr(irt_sum),
+                                           irt_prod_lo, irt_prod_hi ),
+              condT
+           );
+        }
+
+        or_into_QFLAG32(
+           signed_overflow_after_Add32( ire_result, irt_sum, irt_regA ),
+           condT
+        );
+
+        DIP("sml%cd%s%s r%u, r%u, r%u, r%u\n",
+            isAD ? 'a' : 's',
+            bitM ? "x" : "", nCC(conq), regD, regN, regM, regA);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----- smlabb, smlabt, smlatb, smlatt <Rd>,<Rn>,<Rm>,<Ra> ----- */
+   {
+     UInt regD = 99, regN = 99, regM = 99, regA = 99, bitM = 99, bitN = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFB1 && INSNT1(7,6) == BITS2(0,0)) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           regA = INSNT1(15,12);
+           bitM = INSNT1(4,4);
+           bitN = INSNT1(5,5);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM)
+               && !isBadRegT(regA))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,0,0,0) &&
+            (INSNA(7,4) & BITS4(1,0,0,1)) == BITS4(1,0,0,0)) {
+           regD = INSNA(19,16);
+           regN = INSNA(3,0);
+           regM = INSNA(11,8);
+           regA = INSNA(15,12);
+           bitM = INSNA(6,6);
+           bitN = INSNA(5,5);
+           if (regD != 15 && regN != 15 && regM != 15 && regA != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regA = newTemp(Ity_I32);
+        IRTemp irt_prod = newTemp(Ity_I32);
+
+        assign( irt_prod, 
+                binop(Iop_Mul32, 
+                      binop(Iop_Sar32, 
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regN) : getIRegA(regN),
+                                  mkU8(bitN ? 0 : 16)),
+                            mkU8(16)), 
+                      binop(Iop_Sar32, 
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regM) : getIRegA(regM),
+                                  mkU8(bitM ? 0 : 16)), 
+                            mkU8(16))) );
+
+        assign( irt_regA, isT ? getIRegT(regA) : getIRegA(regA) );
+
+        IRExpr* ire_result = binop(Iop_Add32, mkexpr(irt_prod), mkexpr(irt_regA));
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Add32( ire_result, irt_prod, irt_regA ),
+           condT
+        );
+
+        DIP( "smla%c%c%s r%u, r%u, r%u, r%u\n", 
+             bitN ? 't' : 'b', bitM ? 't' : 'b', 
+             nCC(conq), regD, regN, regM, regA );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----- smlalbb, smlalbt, smlaltb, smlaltt <Rd>,<Rn>,<Rm>,<Ra> ----- */
+   {
+     UInt regDHi = 99, regN = 99, regM = 99, regDLo = 99, bitM = 99, bitN = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFBC && INSNT1(7,6) == BITS2(1,0)) {
+           regN   = INSNT0(3,0);
+           regDHi = INSNT1(11,8);
+           regM   = INSNT1(3,0);
+           regDLo = INSNT1(15,12);
+           bitM   = INSNT1(4,4);
+           bitN   = INSNT1(5,5);
+           if (!isBadRegT(regDHi) && !isBadRegT(regN) && !isBadRegT(regM)
+               && !isBadRegT(regDLo) && regDHi != regDLo)
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,1,0,0) &&
+            (INSNA(7,4) & BITS4(1,0,0,1)) == BITS4(1,0,0,0)) {
+           regDHi = INSNA(19,16);
+           regN   = INSNA(3,0);
+           regM   = INSNA(11,8);
+           regDLo = INSNA(15,12);
+           bitM   = INSNA(6,6);
+           bitN   = INSNA(5,5);
+           if (regDHi != 15 && regN != 15 && regM != 15 && regDLo != 15 &&
+               regDHi != regDLo)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regD  = newTemp(Ity_I64);
+        IRTemp irt_prod  = newTemp(Ity_I64);
+        IRTemp irt_res   = newTemp(Ity_I64);
+        IRTemp irt_resHi = newTemp(Ity_I32);
+        IRTemp irt_resLo = newTemp(Ity_I32);
+
+        assign( irt_prod,
+                binop(Iop_MullS32,
+                      binop(Iop_Sar32,
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regN) : getIRegA(regN),
+                                  mkU8(bitN ? 0 : 16)),
+                            mkU8(16)),
+                      binop(Iop_Sar32,
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regM) : getIRegA(regM),
+                                  mkU8(bitM ? 0 : 16)),
+                            mkU8(16))) );
+
+        assign( irt_regD, binop(Iop_32HLto64,
+                                isT ? getIRegT(regDHi) : getIRegA(regDHi),
+                                isT ? getIRegT(regDLo) : getIRegA(regDLo)) );
+        assign( irt_res, binop(Iop_Add64, mkexpr(irt_regD), mkexpr(irt_prod)) );
+        assign( irt_resHi, unop(Iop_64HIto32, mkexpr(irt_res)) );
+        assign( irt_resLo, unop(Iop_64to32, mkexpr(irt_res)) );
+
+        if (isT) {
+           putIRegT( regDHi, mkexpr(irt_resHi), condT );
+           putIRegT( regDLo, mkexpr(irt_resLo), condT );
+        } else {
+           putIRegA( regDHi, mkexpr(irt_resHi), condT, Ijk_Boring );
+           putIRegA( regDLo, mkexpr(irt_resLo), condT, Ijk_Boring );
+        }
+
+        DIP( "smlal%c%c%s r%u, r%u, r%u, r%u\n",
+             bitN ? 't' : 'b', bitM ? 't' : 'b',
+             nCC(conq), regDHi, regN, regM, regDLo );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----- smlawb, smlawt <Rd>,<Rn>,<Rm>,<Ra> ----- */
+   {
+     UInt regD = 99, regN = 99, regM = 99, regA = 99, bitM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFB3 && INSNT1(7,5) == BITS3(0,0,0)) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           regA = INSNT1(15,12);
+           bitM = INSNT1(4,4);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM)
+               && !isBadRegT(regA))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,0,1,0) &&
+            (INSNA(7,4) & BITS4(1,0,1,1)) == BITS4(1,0,0,0)) {
+           regD = INSNA(19,16);
+           regN = INSNA(3,0);
+           regM = INSNA(11,8);
+           regA = INSNA(15,12);
+           bitM = INSNA(6,6);
+           if (regD != 15 && regN != 15 && regM != 15 && regA != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regA = newTemp(Ity_I32);
+        IRTemp irt_prod = newTemp(Ity_I64);
+
+        assign( irt_prod, 
+                binop(Iop_MullS32, 
+                      isT ? getIRegT(regN) : getIRegA(regN),
+                      binop(Iop_Sar32, 
+                            binop(Iop_Shl32,
+                                  isT ? getIRegT(regM) : getIRegA(regM),
+                                  mkU8(bitM ? 0 : 16)), 
+                            mkU8(16))) );
+
+        assign( irt_regA, isT ? getIRegT(regA) : getIRegA(regA) );
+
+        IRTemp prod32 = newTemp(Ity_I32);
+        assign(prod32,
+               binop(Iop_Or32,
+                     binop(Iop_Shl32, unop(Iop_64HIto32, mkexpr(irt_prod)), mkU8(16)),
+                     binop(Iop_Shr32, unop(Iop_64to32, mkexpr(irt_prod)), mkU8(16))
+        ));
+
+        IRExpr* ire_result = binop(Iop_Add32, mkexpr(prod32), mkexpr(irt_regA));
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Add32( ire_result, prod32, irt_regA ),
+           condT
+        );
+
+        DIP( "smlaw%c%s r%u, r%u, r%u, r%u\n", 
+             bitM ? 't' : 'b', 
+             nCC(conq), regD, regN, regM, regA );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- sel<c> <Rd>,<Rn>,<Rm> -------------------- */
+   /* fixme: fix up the test in v6media.c so that we can pass the ge
+      flags as part of the test. */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAA && (INSNT1(15,0) & 0xF0F0) == 0xF080) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,1,0,0,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,0,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_ge_flag0 = newTemp(Ity_I32);
+        IRTemp irt_ge_flag1 = newTemp(Ity_I32);
+        IRTemp irt_ge_flag2 = newTemp(Ity_I32);
+        IRTemp irt_ge_flag3 = newTemp(Ity_I32);
+
+        assign( irt_ge_flag0, get_GEFLAG32(0) );
+        assign( irt_ge_flag1, get_GEFLAG32(1) );
+        assign( irt_ge_flag2, get_GEFLAG32(2) );
+        assign( irt_ge_flag3, get_GEFLAG32(3) );
+
+        IRExpr* ire_ge_flag0_or 
+          = binop(Iop_Or32, mkexpr(irt_ge_flag0), 
+                  binop(Iop_Sub32, mkU32(0), mkexpr(irt_ge_flag0)));
+        IRExpr* ire_ge_flag1_or 
+          = binop(Iop_Or32, mkexpr(irt_ge_flag1), 
+                  binop(Iop_Sub32, mkU32(0), mkexpr(irt_ge_flag1)));
+        IRExpr* ire_ge_flag2_or 
+          = binop(Iop_Or32, mkexpr(irt_ge_flag2), 
+                  binop(Iop_Sub32, mkU32(0), mkexpr(irt_ge_flag2)));
+        IRExpr* ire_ge_flag3_or 
+          = binop(Iop_Or32, mkexpr(irt_ge_flag3), 
+                  binop(Iop_Sub32, mkU32(0), mkexpr(irt_ge_flag3)));
+
+        IRExpr* ire_ge_flags 
+          = binop( Iop_Or32, 
+                   binop(Iop_Or32, 
+                         binop(Iop_And32, 
+                               binop(Iop_Sar32, ire_ge_flag0_or, mkU8(31)), 
+                               mkU32(0x000000ff)), 
+                         binop(Iop_And32, 
+                               binop(Iop_Sar32, ire_ge_flag1_or, mkU8(31)), 
+                               mkU32(0x0000ff00))), 
+                   binop(Iop_Or32, 
+                         binop(Iop_And32, 
+                               binop(Iop_Sar32, ire_ge_flag2_or, mkU8(31)), 
+                               mkU32(0x00ff0000)), 
+                         binop(Iop_And32, 
+                               binop(Iop_Sar32, ire_ge_flag3_or, mkU8(31)), 
+                               mkU32(0xff000000))) );
+
+        IRExpr* ire_result 
+          = binop(Iop_Or32, 
+                  binop(Iop_And32,
+                        isT ? getIRegT(regN) : getIRegA(regN),
+                        ire_ge_flags ), 
+                  binop(Iop_And32,
+                        isT ? getIRegT(regM) : getIRegA(regM),
+                        unop(Iop_Not32, ire_ge_flags)));
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP("sel%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uxtab16<c> Rd,Rn,Rm{,rot} ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99, rotate = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA3 && (INSNT1(15,0) & 0xF0C0) == 0xF080) {
+           regN   = INSNT0(3,0);
+           regD   = INSNT1(11,8);
+           regM   = INSNT1(3,0);
+           rotate = INSNT1(5,4);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,1,1,0,0) &&
+            INSNA(9,4)   == BITS6(0,0,0,1,1,1) ) {
+           regD   = INSNA(15,12);
+           regN   = INSNA(19,16);
+           regM   = INSNA(3,0);
+           rotate = INSNA(11,10);
+           if (regD != 15 && regN != 15 && regM != 15)
+             gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN = newTemp(Ity_I32);
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+
+        IRTemp irt_regM = newTemp(Ity_I32);
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        IRTemp irt_rot = newTemp(Ity_I32);
+        assign( irt_rot, binop(Iop_And32,
+                               genROR32(irt_regM, 8 * rotate),
+                               mkU32(0x00FF00FF)) );
+
+        IRExpr* resLo
+           = binop(Iop_And32,
+                   binop(Iop_Add32, mkexpr(irt_regN), mkexpr(irt_rot)),
+                   mkU32(0x0000FFFF));
+
+        IRExpr* resHi
+           = binop(Iop_Add32, 
+                   binop(Iop_And32, mkexpr(irt_regN), mkU32(0xFFFF0000)),
+                   binop(Iop_And32, mkexpr(irt_rot),  mkU32(0xFFFF0000)));
+
+        IRExpr* ire_result 
+           = binop( Iop_Or32, resHi, resLo );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "uxtab16%s r%u, r%u, r%u, ROR #%u\n", 
+             nCC(conq), regD, regN, regM, 8 * rotate );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* --------------- usad8  Rd,Rn,Rm    ---------------- */
+   /* --------------- usada8 Rd,Rn,Rm,Ra ---------------- */
+   {
+     UInt rD = 99, rN = 99, rM = 99, rA = 99;
+     Bool gate = False;
+
+     if (isT) {
+       if (INSNT0(15,4) == 0xFB7 && INSNT1(7,4) == BITS4(0,0,0,0)) {
+           rN = INSNT0(3,0);
+           rA = INSNT1(15,12);
+           rD = INSNT1(11,8);
+           rM = INSNT1(3,0);
+           if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM) && rA != 13)
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,1,1,0,0,0) &&
+            INSNA(7,4)   == BITS4(0,0,0,1) ) {
+           rD = INSNA(19,16);
+           rA = INSNA(15,12);
+           rM = INSNA(11,8);
+           rN = INSNA(3,0);
+           if (rD != 15 && rN != 15 && rM != 15 /* but rA can be 15 */)
+              gate = True;
+        }
+     }
+     /* We allow rA == 15, to denote the usad8 (no accumulator) case. */
+
+     if (gate) {
+        IRExpr* rNe = isT ? getIRegT(rN) : getIRegA(rN);
+        IRExpr* rMe = isT ? getIRegT(rM) : getIRegA(rM);
+        IRExpr* rAe = rA == 15 ? mkU32(0)
+                               : (isT ? getIRegT(rA) : getIRegA(rA)); 
+        IRExpr* res = binop(Iop_Add32,
+                            binop(Iop_Sad8Ux4, rNe, rMe),
+                            rAe);
+        if (isT)
+           putIRegT( rD, res, condT );
+        else
+           putIRegA( rD, res, condT, Ijk_Boring );
+
+        if (rA == 15) {
+           DIP( "usad8%s r%u, r%u, r%u\n", 
+                nCC(conq), rD, rN, rM );
+        } else {
+           DIP( "usada8%s r%u, r%u, r%u, r%u\n", 
+                nCC(conq), rD, rN, rM, rA );
+        }
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qadd<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF080) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,0,0,0) &&
+            INSNA(11,8)  == BITS4(0,0,0,0)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QAdd32S, mkexpr(rMt), mkexpr(rNt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Add32(
+              binop(Iop_Add32, mkexpr(rMt), mkexpr(rNt)), rMt, rNt),
+           condT
+        );
+
+        DIP("qadd%s r%u, r%u, r%u\n", nCC(conq),regD,regM,regN);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qdadd<c> <Rd>,<Rm>,<Rn> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF090) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,1,0,0) &&
+            INSNA(11,8)  == BITS4(0,0,0,0)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp rN_d  = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Add32(
+              binop(Iop_Add32, mkexpr(rNt), mkexpr(rNt)), rNt, rNt),
+           condT
+        );
+
+        assign(rN_d,  binop(Iop_QAdd32S, mkexpr(rNt), mkexpr(rNt)));
+        assign(res_q, binop(Iop_QAdd32S, mkexpr(rMt), mkexpr(rN_d)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Add32(
+              binop(Iop_Add32, mkexpr(rMt), mkexpr(rN_d)), rMt, rN_d),
+           condT
+        );
+
+        DIP("qdadd%s r%u, r%u, r%u\n", nCC(conq),regD,regM,regN);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qsub<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF0A0) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,0,1,0) &&
+            INSNA(11,8)  == BITS4(0,0,0,0)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QSub32S, mkexpr(rMt), mkexpr(rNt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Sub32(
+              binop(Iop_Sub32, mkexpr(rMt), mkexpr(rNt)), rMt, rNt),
+           condT
+        );
+
+        DIP("qsub%s r%u, r%u, r%u\n", nCC(conq),regD,regM,regN);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ qdsub<c> <Rd>,<Rm>,<Rn> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA8 && (INSNT1(15,0) & 0xF0F0) == 0xF0B0) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,0,0,1,0,1,1,0) &&
+            INSNA(11,8)  == BITS4(0,0,0,0)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp rN_d  = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Add32(
+              binop(Iop_Add32, mkexpr(rNt), mkexpr(rNt)), rNt, rNt),
+           condT
+        );
+
+        assign(rN_d,  binop(Iop_QAdd32S, mkexpr(rNt), mkexpr(rNt)));
+        assign(res_q, binop(Iop_QSub32S, mkexpr(rMt), mkexpr(rN_d)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        or_into_QFLAG32(
+           signed_overflow_after_Sub32(
+              binop(Iop_Sub32, mkexpr(rMt), mkexpr(rN_d)), rMt, rN_d),
+           condT
+        );
+
+        DIP("qdsub%s r%u, r%u, r%u\n", nCC(conq),regD,regM,regN);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ uqsub16<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAD && (INSNT1(15,0) & 0xF0F0) == 0xF050) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+             gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QSub16Ux2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uqsub16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- shadd16<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA9 && (INSNT1(15,0) & 0xF0F0) == 0xF020) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HAdd16Sx2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("shadd16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uhsub8<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAC && (INSNT1(15,0) & 0xF0F0) == 0xF060) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HSub8Ux4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uhsub8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uhsub16<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAD && (INSNT1(15,0) & 0xF0F0) == 0xF060) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HSub16Ux2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uhsub16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------ uqadd16<c> <Rd>,<Rn>,<Rm> ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA9 && (INSNT1(15,0) & 0xF0F0) == 0xF050) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_QAdd16Ux2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uqadd16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- uqsax<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAE && (INSNT1(15,0) & 0xF0F0) == 0xF050) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN     = newTemp(Ity_I32);
+        IRTemp irt_regM     = newTemp(Ity_I32);
+        IRTemp irt_sum      = newTemp(Ity_I32);
+        IRTemp irt_diff     = newTemp(Ity_I32);
+        IRTemp irt_sum_res  = newTemp(Ity_I32);
+        IRTemp irt_diff_res = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff, 
+                binop( Iop_Sub32, 
+                       binop( Iop_Shr32, mkexpr(irt_regN), mkU8(16) ),
+                       binop( Iop_Shr32, 
+                              binop(Iop_Shl32, mkexpr(irt_regM), mkU8(16)), 
+                              mkU8(16) ) ) );
+        armUnsignedSatQ( &irt_diff_res, NULL, irt_diff, 0x10);
+
+        assign( irt_sum, 
+                binop( Iop_Add32, 
+                       binop( Iop_Shr32, 
+                              binop( Iop_Shl32, mkexpr(irt_regN), mkU8(16) ), 
+                              mkU8(16) ), 
+                       binop( Iop_Shr32, mkexpr(irt_regM), mkU8(16) )) );
+        armUnsignedSatQ( &irt_sum_res, NULL, irt_sum, 0x10 );
+
+        IRExpr* ire_result = binop( Iop_Or32, 
+                                    binop( Iop_Shl32, mkexpr(irt_diff_res), 
+                                           mkU8(16) ), 
+                                    binop( Iop_And32, mkexpr(irt_sum_res), 
+                                           mkU32(0xFFFF)) );
+
+        if (isT) 
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "uqsax%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- uqasx<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAA && (INSNT1(15,0) & 0xF0F0) == 0xF050) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,0) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN     = newTemp(Ity_I32);
+        IRTemp irt_regM     = newTemp(Ity_I32);
+        IRTemp irt_sum      = newTemp(Ity_I32);
+        IRTemp irt_diff     = newTemp(Ity_I32);
+        IRTemp irt_res_sum  = newTemp(Ity_I32);
+        IRTemp irt_res_diff = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff,  
+                binop( Iop_Sub32,
+                       binop( Iop_Shr32, 
+                              binop( Iop_Shl32, mkexpr(irt_regN), mkU8(16) ), 
+                              mkU8(16) ), 
+                       binop( Iop_Shr32, mkexpr(irt_regM), mkU8(16) ) ) );
+        armUnsignedSatQ( &irt_res_diff, NULL, irt_diff, 0x10 );
+
+        assign( irt_sum, 
+                binop( Iop_Add32, 
+                       binop( Iop_Shr32, mkexpr(irt_regN), mkU8(16) ), 
+                       binop( Iop_Shr32, 
+                              binop( Iop_Shl32, mkexpr(irt_regM), mkU8(16) ), 
+                              mkU8(16) ) ) );
+        armUnsignedSatQ( &irt_res_sum, NULL, irt_sum, 0x10 );
+       
+        IRExpr* ire_result 
+          = binop( Iop_Or32, 
+                   binop( Iop_Shl32, mkexpr(irt_res_sum), mkU8(16) ), 
+                   binop( Iop_And32, mkexpr(irt_res_diff), mkU32(0xFFFF) ) );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "uqasx%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- usax<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAE && (INSNT1(15,0) & 0xF0F0) == 0xF040) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN = newTemp(Ity_I32);
+        IRTemp irt_regM = newTemp(Ity_I32);
+        IRTemp irt_sum  = newTemp(Ity_I32);
+        IRTemp irt_diff = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_sum,  
+                binop( Iop_Add32, 
+                       unop( Iop_16Uto32,
+                             unop( Iop_32to16, mkexpr(irt_regN) )
+                       ),  
+                       binop( Iop_Shr32, mkexpr(irt_regM), mkU8(16) ) ) );
+
+        assign( irt_diff, 
+                binop( Iop_Sub32, 
+                       binop( Iop_Shr32, mkexpr(irt_regN), mkU8(16) ), 
+                       unop( Iop_16Uto32, 
+                             unop( Iop_32to16, mkexpr(irt_regM) )
+                       )
+                )
+        );
+       
+        IRExpr* ire_result 
+          = binop( Iop_Or32, 
+                   binop( Iop_Shl32, mkexpr(irt_diff), mkU8(16) ), 
+                   binop( Iop_And32, mkexpr(irt_sum), mkU32(0xFFFF) ) );
+
+        IRTemp ge10 = newTemp(Ity_I32);
+        assign( ge10, IRExpr_ITE( binop( Iop_CmpLE32U, 
+                                         mkU32(0x10000), mkexpr(irt_sum) ),
+                                  mkU32(1), mkU32(0) ) );
+        put_GEFLAG32( 0, 0, mkexpr(ge10), condT );
+        put_GEFLAG32( 1, 0, mkexpr(ge10), condT );
+
+        IRTemp ge32 = newTemp(Ity_I32);
+        assign(ge32, unop(Iop_Not32, mkexpr(irt_diff)));
+        put_GEFLAG32( 2, 31, mkexpr(ge32), condT );
+        put_GEFLAG32( 3, 31, mkexpr(ge32), condT );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "usax%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- uasx<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAA && (INSNT1(15,0) & 0xF0F0) == 0xF040) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN = newTemp(Ity_I32);
+        IRTemp irt_regM = newTemp(Ity_I32);
+        IRTemp irt_sum  = newTemp(Ity_I32);
+        IRTemp irt_diff = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff,  
+                binop( Iop_Sub32, 
+                       unop( Iop_16Uto32, 
+                             unop( Iop_32to16, mkexpr(irt_regN) )
+                       ), 
+                       binop( Iop_Shr32, mkexpr(irt_regM), mkU8(16) ) ) );
+
+        assign( irt_sum, 
+                binop( Iop_Add32, 
+                       binop( Iop_Shr32, mkexpr(irt_regN), mkU8(16) ), 
+                       unop( Iop_16Uto32, 
+                             unop( Iop_32to16, mkexpr(irt_regM) )
+                       ) ) );
+       
+        IRExpr* ire_result 
+          = binop( Iop_Or32, 
+                   binop( Iop_Shl32, mkexpr(irt_sum), mkU8(16) ), 
+                   binop( Iop_And32, mkexpr(irt_diff), mkU32(0xFFFF) ) );
+
+        IRTemp ge10 = newTemp(Ity_I32);
+        assign(ge10, unop(Iop_Not32, mkexpr(irt_diff)));
+        put_GEFLAG32( 0, 31, mkexpr(ge10), condT );
+        put_GEFLAG32( 1, 31, mkexpr(ge10), condT );
+
+        IRTemp ge32 = newTemp(Ity_I32);
+        assign( ge32, IRExpr_ITE( binop( Iop_CmpLE32U,
+                                         mkU32(0x10000), mkexpr(irt_sum) ),
+                                  mkU32(1), mkU32(0) ) );
+        put_GEFLAG32( 2, 0, mkexpr(ge32), condT );
+        put_GEFLAG32( 3, 0, mkexpr(ge32), condT );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "uasx%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ------------------- ssax<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAE && (INSNT1(15,0) & 0xF0F0) == 0xF000) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,0,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN = newTemp(Ity_I32);
+        IRTemp irt_regM = newTemp(Ity_I32);
+        IRTemp irt_sum  = newTemp(Ity_I32);
+        IRTemp irt_diff = newTemp(Ity_I32);
+
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_sum,  
+                binop( Iop_Add32, 
+                       binop( Iop_Sar32, 
+                              binop( Iop_Shl32, mkexpr(irt_regN), mkU8(16) ), 
+                              mkU8(16) ), 
+                       binop( Iop_Sar32, mkexpr(irt_regM), mkU8(16) ) ) );
+
+        assign( irt_diff, 
+                binop( Iop_Sub32, 
+                       binop( Iop_Sar32, mkexpr(irt_regN), mkU8(16) ), 
+                       binop( Iop_Sar32, 
+                              binop( Iop_Shl32, mkexpr(irt_regM), mkU8(16) ), 
+                              mkU8(16) ) ) );
+       
+        IRExpr* ire_result 
+          = binop( Iop_Or32, 
+                   binop( Iop_Shl32, mkexpr(irt_diff), mkU8(16) ), 
+                   binop( Iop_And32, mkexpr(irt_sum), mkU32(0xFFFF) ) );
+
+        IRTemp ge10 = newTemp(Ity_I32);
+        assign(ge10, unop(Iop_Not32, mkexpr(irt_sum)));
+        put_GEFLAG32( 0, 31, mkexpr(ge10), condT );
+        put_GEFLAG32( 1, 31, mkexpr(ge10), condT );
+
+        IRTemp ge32 = newTemp(Ity_I32);
+        assign(ge32, unop(Iop_Not32, mkexpr(irt_diff)));
+        put_GEFLAG32( 2, 31, mkexpr(ge32), condT );
+        put_GEFLAG32( 3, 31, mkexpr(ge32), condT );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "ssax%s r%u, r%u, r%u\n", nCC(conq), regD, regN, regM );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- shsub8<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAC && (INSNT1(15,0) & 0xF0F0) == 0xF020) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(1,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HSub8Sx4, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("shsub8%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- sxtab16<c> Rd,Rn,Rm{,rot} ------------------ */
+   {
+     UInt regD = 99, regN = 99, regM = 99, rotate = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFA2 && (INSNT1(15,0) & 0xF0C0) == 0xF080) {
+           regN   = INSNT0(3,0);
+           regD   = INSNT1(11,8);
+           regM   = INSNT1(3,0);
+           rotate = INSNT1(5,4);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,1,0,0,0) &&
+            INSNA(9,4)   == BITS6(0,0,0,1,1,1) ) {
+           regD   = INSNA(15,12);
+           regN   = INSNA(19,16);
+           regM   = INSNA(3,0);
+           rotate = INSNA(11,10);
+           if (regD != 15 && regN != 15 && regM != 15)
+             gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_regN = newTemp(Ity_I32);
+        assign( irt_regN, isT ? getIRegT(regN) : getIRegA(regN) );
+
+        IRTemp irt_regM = newTemp(Ity_I32);
+        assign( irt_regM, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        IRTemp irt_rot = newTemp(Ity_I32);
+        assign( irt_rot, genROR32(irt_regM, 8 * rotate) );
+
+        /* FIXME Maybe we can write this arithmetic in shorter form. */
+        IRExpr* resLo
+           = binop(Iop_And32,
+                   binop(Iop_Add32,
+                         mkexpr(irt_regN),
+                         unop(Iop_16Uto32,
+                              unop(Iop_8Sto16,
+                                   unop(Iop_32to8, mkexpr(irt_rot))))),
+                   mkU32(0x0000FFFF));
+
+        IRExpr* resHi
+           = binop(Iop_And32,
+                   binop(Iop_Add32,
+                         mkexpr(irt_regN),
+                         binop(Iop_Shl32,
+                               unop(Iop_16Uto32,
+                                    unop(Iop_8Sto16,
+                                         unop(Iop_32to8,
+                                              binop(Iop_Shr32,
+                                                    mkexpr(irt_rot),
+                                                    mkU8(16))))),
+                               mkU8(16))),
+                   mkU32(0xFFFF0000));
+
+        IRExpr* ire_result 
+           = binop( Iop_Or32, resHi, resLo );
+
+        if (isT)
+           putIRegT( regD, ire_result, condT );
+        else
+           putIRegA( regD, ire_result, condT, Ijk_Boring );
+
+        DIP( "sxtab16%s r%u, r%u, r%u, ROR #%u\n", 
+             nCC(conq), regD, regN, regM, 8 * rotate );
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- shasx<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAA && (INSNT1(15,0) & 0xF0F0) == 0xF020) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp irt_diff  = newTemp(Ity_I32);
+        IRTemp irt_sum   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff,
+                binop(Iop_Sub32,
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16,
+                                mkexpr(rNt)
+                           )
+                      ),
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rMt), mkU8(16)
+                                )
+                           )
+                      )
+                )
+        );
+
+        assign( irt_sum,
+                binop(Iop_Add32,
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rNt), mkU8(16)
+                                )
+                           )
+                      ),
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16, mkexpr(rMt)
+                           )
+                      )
+                )
+        );
+
+        assign( res_q,
+                binop(Iop_Or32, 
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(irt_diff), mkU8(1)
+                                )
+                           )
+                      ),
+                      binop(Iop_Shl32,
+                            binop(Iop_Shr32,
+                                  mkexpr(irt_sum), mkU8(1)
+                            ),
+                            mkU8(16)
+                     )
+                )
+        );
+
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("shasx%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uhasx<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAA && (INSNT1(15,0) & 0xF0F0) == 0xF060) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,0,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp irt_diff  = newTemp(Ity_I32);
+        IRTemp irt_sum   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_diff,
+                binop(Iop_Sub32,
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                mkexpr(rNt)
+                           )
+                      ),
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rMt), mkU8(16)
+                                )
+                           )
+                      )
+                )
+        );
+
+        assign( irt_sum,
+                binop(Iop_Add32,
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rNt), mkU8(16)
+                                )
+                           )
+                      ),
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16, mkexpr(rMt)
+                           )
+                      )
+                )
+        );
+
+        assign( res_q,
+                binop(Iop_Or32, 
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(irt_diff), mkU8(1)
+                                )
+                           )
+                      ),
+                      binop(Iop_Shl32,
+                            binop(Iop_Shr32,
+                                  mkexpr(irt_sum), mkU8(1)
+                            ),
+                            mkU8(16)
+                     )
+                )
+        );
+
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uhasx%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- shsax<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAE && (INSNT1(15,0) & 0xF0F0) == 0xF020) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp irt_diff  = newTemp(Ity_I32);
+        IRTemp irt_sum   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_sum,
+                binop(Iop_Add32,
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16,
+                                mkexpr(rNt)
+                           )
+                      ),
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rMt), mkU8(16)
+                                )
+                           )
+                      )
+                )
+        );
+
+        assign( irt_diff,
+                binop(Iop_Sub32,
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rNt), mkU8(16)
+                                )
+                           )
+                      ),
+                      unop(Iop_16Sto32,
+                           unop(Iop_32to16, mkexpr(rMt)
+                           )
+                      )
+                )
+        );
+
+        assign( res_q,
+                binop(Iop_Or32, 
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(irt_sum), mkU8(1)
+                                )
+                           )
+                      ),
+                      binop(Iop_Shl32,
+                            binop(Iop_Shr32,
+                                  mkexpr(irt_diff), mkU8(1)
+                            ),
+                            mkU8(16)
+                     )
+                )
+        );
+
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("shsax%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- uhsax<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAE && (INSNT1(15,0) & 0xF0F0) == 0xF060) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,1,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,0,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp irt_diff  = newTemp(Ity_I32);
+        IRTemp irt_sum   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign( irt_sum,
+                binop(Iop_Add32,
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                mkexpr(rNt)
+                           )
+                      ),
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rMt), mkU8(16)
+                                )
+                           )
+                      )
+                )
+        );
+
+        assign( irt_diff,
+                binop(Iop_Sub32,
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(rNt), mkU8(16)
+                                )
+                           )
+                      ),
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16, mkexpr(rMt)
+                           )
+                      )
+                )
+        );
+
+        assign( res_q,
+                binop(Iop_Or32, 
+                      unop(Iop_16Uto32,
+                           unop(Iop_32to16,
+                                binop(Iop_Shr32,
+                                      mkexpr(irt_sum), mkU8(1)
+                                )
+                           )
+                      ),
+                      binop(Iop_Shl32,
+                            binop(Iop_Shr32,
+                                  mkexpr(irt_diff), mkU8(1)
+                            ),
+                            mkU8(16)
+                     )
+                )
+        );
+
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("uhsax%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- shsub16<c> <Rd>,<Rn>,<Rm> ------------------- */
+   {
+     UInt regD = 99, regN = 99, regM = 99;
+     Bool gate = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFAD && (INSNT1(15,0) & 0xF0F0) == 0xF020) {
+           regN = INSNT0(3,0);
+           regD = INSNT1(11,8);
+           regM = INSNT1(3,0);
+           if (!isBadRegT(regD) && !isBadRegT(regN) && !isBadRegT(regM))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,0,0,0,1,1) &&
+            INSNA(11,8)  == BITS4(1,1,1,1)         &&
+            INSNA(7,4)   == BITS4(0,1,1,1)) {
+           regD = INSNA(15,12);
+           regN = INSNA(19,16);
+           regM = INSNA(3,0);
+           if (regD != 15 && regN != 15 && regM != 15)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp rNt   = newTemp(Ity_I32);
+        IRTemp rMt   = newTemp(Ity_I32);
+        IRTemp res_q = newTemp(Ity_I32);
+
+        assign( rNt, isT ? getIRegT(regN) : getIRegA(regN) );
+        assign( rMt, isT ? getIRegT(regM) : getIRegA(regM) );
+
+        assign(res_q, binop(Iop_HSub16Sx2, mkexpr(rNt), mkexpr(rMt)));
+        if (isT)
+           putIRegT( regD, mkexpr(res_q), condT );
+        else
+           putIRegA( regD, mkexpr(res_q), condT, Ijk_Boring );
+
+        DIP("shsub16%s r%u, r%u, r%u\n", nCC(conq),regD,regN,regM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ----------------- smmls{r}<c> <Rd>,<Rn>,<Rm>,<Ra> ------------------- */
+   {
+     UInt rD = 99, rN = 99, rM = 99, rA = 99;
+     Bool round  = False;
+     Bool gate   = False;
+
+     if (isT) {
+        if (INSNT0(15,7) == BITS9(1,1,1,1,1,0,1,1,0)
+            && INSNT0(6,4) == BITS3(1,1,0)
+            && INSNT1(7,5) == BITS3(0,0,0)) {
+           round = INSNT1(4,4);
+           rA    = INSNT1(15,12);
+           rD    = INSNT1(11,8);
+           rM    = INSNT1(3,0);
+           rN    = INSNT0(3,0);
+           if (!isBadRegT(rD)
+               && !isBadRegT(rN) && !isBadRegT(rM) && !isBadRegT(rA))
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,1,0,1,0,1)
+            && INSNA(15,12) != BITS4(1,1,1,1)
+            && (INSNA(7,4) & BITS4(1,1,0,1)) == BITS4(1,1,0,1)) {
+           round = INSNA(5,5);
+           rD    = INSNA(19,16);
+           rA    = INSNA(15,12);
+           rM    = INSNA(11,8);
+           rN    = INSNA(3,0);
+           if (rD != 15 && rM != 15 && rN != 15)
+              gate = True;
+        }
+     }
+     if (gate) {
+        IRTemp irt_rA   = newTemp(Ity_I32);
+        IRTemp irt_rN   = newTemp(Ity_I32);
+        IRTemp irt_rM   = newTemp(Ity_I32);
+        assign( irt_rA, isT ? getIRegT(rA) : getIRegA(rA) );
+        assign( irt_rN, isT ? getIRegT(rN) : getIRegA(rN) );
+        assign( irt_rM, isT ? getIRegT(rM) : getIRegA(rM) );
+        IRExpr* res
+        = unop(Iop_64HIto32,
+               binop(Iop_Add64,
+                     binop(Iop_Sub64,
+                           binop(Iop_32HLto64, mkexpr(irt_rA), mkU32(0)),
+                           binop(Iop_MullS32, mkexpr(irt_rN), mkexpr(irt_rM))),
+                     mkU64(round ? 0x80000000ULL : 0ULL)));
+        if (isT)
+           putIRegT( rD, res, condT );
+        else
+           putIRegA(rD, res, condT, Ijk_Boring);
+        DIP("smmls%s%s r%u, r%u, r%u, r%u\n",
+            round ? "r" : "", nCC(conq), rD, rN, rM, rA);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* -------------- smlald{x}<c> <RdLo>,<RdHi>,<Rn>,<Rm> ---------------- */
+   {
+     UInt rN = 99, rDlo = 99, rDhi = 99, rM = 99;
+     Bool m_swap = False;
+     Bool gate   = False;
+
+     if (isT) {
+        if (INSNT0(15,4) == 0xFBC &&
+            (INSNT1(7,4) & BITS4(1,1,1,0)) == BITS4(1,1,0,0)) {
+           rN     = INSNT0(3,0);
+           rDlo   = INSNT1(15,12);
+           rDhi   = INSNT1(11,8);
+           rM     = INSNT1(3,0);
+           m_swap = (INSNT1(4,4) & 1) == 1;
+           if (!isBadRegT(rDlo) && !isBadRegT(rDhi) && !isBadRegT(rN)
+               && !isBadRegT(rM) && rDhi != rDlo)
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,1,0,1,0,0)
+            && (INSNA(7,4) & BITS4(1,1,0,1)) == BITS4(0,0,0,1)) {
+           rN     = INSNA(3,0);
+           rDlo   = INSNA(15,12);
+           rDhi   = INSNA(19,16);
+           rM     = INSNA(11,8);
+           m_swap = ( INSNA(5,5) & 1 ) == 1;
+           if (rDlo != 15 && rDhi != 15
+               && rN != 15 && rM != 15 && rDlo != rDhi)
+              gate = True;
+        }
+     }
+
+     if (gate) {
+        IRTemp irt_rM   = newTemp(Ity_I32);
+        IRTemp irt_rN   = newTemp(Ity_I32);
+        IRTemp irt_rDhi = newTemp(Ity_I32);
+        IRTemp irt_rDlo = newTemp(Ity_I32);
+        IRTemp op_2     = newTemp(Ity_I32);
+        IRTemp pr_1     = newTemp(Ity_I64);
+        IRTemp pr_2     = newTemp(Ity_I64);
+        IRTemp result   = newTemp(Ity_I64);
+        IRTemp resHi    = newTemp(Ity_I32);
+        IRTemp resLo    = newTemp(Ity_I32);
+        assign( irt_rM, isT ? getIRegT(rM) : getIRegA(rM));
+        assign( irt_rN, isT ? getIRegT(rN) : getIRegA(rN));
+        assign( irt_rDhi, isT ? getIRegT(rDhi) : getIRegA(rDhi));
+        assign( irt_rDlo, isT ? getIRegT(rDlo) : getIRegA(rDlo));
+        assign( op_2, genROR32(irt_rM, m_swap ? 16 : 0) );
+        assign( pr_1, binop(Iop_MullS32,
+                            unop(Iop_16Sto32,
+                                 unop(Iop_32to16, mkexpr(irt_rN))
+                            ),
+                            unop(Iop_16Sto32,
+                                 unop(Iop_32to16, mkexpr(op_2))
+                            )
+                      )
+        );
+        assign( pr_2, binop(Iop_MullS32,
+                            binop(Iop_Sar32, mkexpr(irt_rN), mkU8(16)),
+                            binop(Iop_Sar32, mkexpr(op_2), mkU8(16))
+                      )
+        );
+        assign( result, binop(Iop_Add64,
+                              binop(Iop_Add64,
+                                    mkexpr(pr_1),
+                                    mkexpr(pr_2)
+                              ),
+                              binop(Iop_32HLto64,
+                                    mkexpr(irt_rDhi),
+                                    mkexpr(irt_rDlo)
+                              )
+                        )
+        );
+        assign( resHi, unop(Iop_64HIto32, mkexpr(result)) );
+        assign( resLo, unop(Iop_64to32, mkexpr(result)) );
+        if (isT) {
+           putIRegT( rDhi, mkexpr(resHi), condT );
+           putIRegT( rDlo, mkexpr(resLo), condT );
+        } else {
+           putIRegA( rDhi, mkexpr(resHi), condT, Ijk_Boring );
+           putIRegA( rDlo, mkexpr(resLo), condT, Ijk_Boring );
+        }
+        DIP("smlald%c%s r%u, r%u, r%u, r%u\n",
+            m_swap ? 'x' : ' ', nCC(conq), rDlo, rDhi, rN, rM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* -------------- smlsld{x}<c> <RdLo>,<RdHi>,<Rn>,<Rm> ---------------- */
+   {
+     UInt rN = 99, rDlo = 99, rDhi = 99, rM = 99;
+     Bool m_swap = False;
+     Bool gate   = False;
+
+     if (isT) {
+        if ((INSNT0(15,4) == 0xFBD &&
+            (INSNT1(7,4) & BITS4(1,1,1,0)) == BITS4(1,1,0,0))) {
+           rN     = INSNT0(3,0);
+           rDlo   = INSNT1(15,12);
+           rDhi   = INSNT1(11,8);
+           rM     = INSNT1(3,0);
+           m_swap = (INSNT1(4,4) & 1) == 1;
+           if (!isBadRegT(rDlo) && !isBadRegT(rDhi) && !isBadRegT(rN) &&
+               !isBadRegT(rM) && rDhi != rDlo)
+              gate = True;
+        }
+     } else {
+        if (INSNA(27,20) == BITS8(0,1,1,1,0,1,0,0) &&
+            (INSNA(7,4) & BITS4(1,1,0,1)) == BITS4(0,1,0,1)) {
+           rN     = INSNA(3,0);
+           rDlo   = INSNA(15,12);
+           rDhi   = INSNA(19,16);
+           rM     = INSNA(11,8);
+           m_swap = (INSNA(5,5) & 1) == 1;
+           if (rDlo != 15 && rDhi != 15 &&
+               rN != 15 && rM != 15 && rDlo != rDhi)
+              gate = True;
+        }
+     }
+     if (gate) {
+        IRTemp irt_rM   = newTemp(Ity_I32);
+        IRTemp irt_rN   = newTemp(Ity_I32);
+        IRTemp irt_rDhi = newTemp(Ity_I32);
+        IRTemp irt_rDlo = newTemp(Ity_I32);
+        IRTemp op_2     = newTemp(Ity_I32);
+        IRTemp pr_1     = newTemp(Ity_I64);
+        IRTemp pr_2     = newTemp(Ity_I64);
+        IRTemp result   = newTemp(Ity_I64);
+        IRTemp resHi    = newTemp(Ity_I32);
+        IRTemp resLo    = newTemp(Ity_I32);
+        assign( irt_rM, isT ? getIRegT(rM) : getIRegA(rM) );
+        assign( irt_rN, isT ? getIRegT(rN) : getIRegA(rN) );
+        assign( irt_rDhi, isT ? getIRegT(rDhi) : getIRegA(rDhi) );
+        assign( irt_rDlo, isT ? getIRegT(rDlo) : getIRegA(rDlo) );
+        assign( op_2, genROR32(irt_rM, m_swap ? 16 : 0) );
+        assign( pr_1, binop(Iop_MullS32,
+                            unop(Iop_16Sto32,
+                                 unop(Iop_32to16, mkexpr(irt_rN))
+                            ),
+                            unop(Iop_16Sto32,
+                                 unop(Iop_32to16, mkexpr(op_2))
+                            )
+                      )
+        );
+        assign( pr_2, binop(Iop_MullS32,
+                            binop(Iop_Sar32, mkexpr(irt_rN), mkU8(16)),
+                            binop(Iop_Sar32, mkexpr(op_2), mkU8(16))
+                      )
+        );
+        assign( result, binop(Iop_Add64,
+                              binop(Iop_Sub64,
+                                    mkexpr(pr_1),
+                                    mkexpr(pr_2)
+                              ),
+                              binop(Iop_32HLto64,
+                                    mkexpr(irt_rDhi),
+                                    mkexpr(irt_rDlo)
+                              )
+                        )
+        );
+        assign( resHi, unop(Iop_64HIto32, mkexpr(result)) );
+        assign( resLo, unop(Iop_64to32, mkexpr(result)) );
+        if (isT) {
+           putIRegT( rDhi, mkexpr(resHi), condT );
+           putIRegT( rDlo, mkexpr(resLo), condT );
+        } else {
+           putIRegA( rDhi, mkexpr(resHi), condT, Ijk_Boring );
+           putIRegA( rDlo, mkexpr(resLo), condT, Ijk_Boring );
+        }
+        DIP("smlsld%c%s r%u, r%u, r%u, r%u\n",
+            m_swap ? 'x' : ' ', nCC(conq), rDlo, rDhi, rN, rM);
+        return True;
+     }
+     /* fall through */
+   }
+
+   /* ---------- Doesn't match anything. ---------- */
+   return False;
+
+#  undef INSNA
+#  undef INSNT0
+#  undef INSNT1
+}
+
+
+/*------------------------------------------------------------*/
+/*--- LDMxx/STMxx helper (both ARM and Thumb32)            ---*/
+/*------------------------------------------------------------*/
+
+/* Generate IR for LDMxx and STMxx.  This is complex.  Assumes it's
+   unconditional, so the caller must produce a jump-around before
+   calling this, if the insn is to be conditional.  Caller is
+   responsible for all validation of parameters.  For LDMxx, if PC is
+   amongst the values loaded, caller is also responsible for
+   generating the jump. */
+static void mk_ldm_stm ( Bool arm,     /* True: ARM, False: Thumb */
+                         UInt rN,      /* base reg */
+                         UInt bINC,    /* 1: inc,  0: dec */
+                         UInt bBEFORE, /* 1: inc/dec before, 0: after */
+                         UInt bW,      /* 1: writeback to Rn */
+                         UInt bL,      /* 1: load, 0: store */
+                         UInt regList )
+{
+   Int i, r, m, nRegs;
+   IRTemp jk = Ijk_Boring;
+
+   /* Get hold of the old Rn value.  We might need to write its value
+      to memory during a store, and if it's also the writeback
+      register then we need to get its value now.  We can't treat it
+      exactly like the other registers we're going to transfer,
+      because for xxMDA and xxMDB writeback forms, the generated IR
+      updates Rn in the guest state before any transfers take place.
+      We have to do this as per comments below, in order that if Rn is
+      the stack pointer then it always has a value is below or equal
+      to any of the transfer addresses.  Ick. */
+   IRTemp oldRnT = newTemp(Ity_I32);
+   assign(oldRnT, arm ? getIRegA(rN) : getIRegT(rN));
+
+   IRTemp anchorT = newTemp(Ity_I32);
+   /* The old (Addison-Wesley) ARM ARM seems to say that LDMxx/STMxx
+      ignore the bottom two bits of the address.  However, Cortex-A8
+      doesn't seem to care.  Hence: */
+   /* No .. don't force alignment .. */
+   /* assign(anchorT, binop(Iop_And32, mkexpr(oldRnT), mkU32(~3U))); */
+   /* Instead, use the potentially misaligned address directly. */
+   assign(anchorT, mkexpr(oldRnT));
+
+   IROp opADDorSUB = bINC ? Iop_Add32 : Iop_Sub32;
+   // bINC == 1:  xxMIA, xxMIB
+   // bINC == 0:  xxMDA, xxMDB
+
+   // For xxMDA and xxMDB, update Rn first if necessary.  We have
+   // to do this first so that, for the common idiom of the transfers
+   // faulting because we're pushing stuff onto a stack and the stack
+   // is growing down onto allocate-on-fault pages (as Valgrind simulates),
+   // we need to have the SP up-to-date "covering" (pointing below) the
+   // transfer area.  For the same reason, if we are doing xxMIA or xxMIB,
+   // do the transfer first, and then update rN afterwards.
+   nRegs = 0;
+   for (i = 0; i < 16; i++) {
+     if ((regList & (1 << i)) != 0)
+         nRegs++;
+   }
+   if (bW == 1 && !bINC) {
+      IRExpr* e = binop(opADDorSUB, mkexpr(oldRnT), mkU32(4*nRegs));
+      if (arm)
+         putIRegA( rN, e, IRTemp_INVALID, Ijk_Boring );
+      else
+         putIRegT( rN, e, IRTemp_INVALID );
+   }
+
+   // Make up a list of the registers to transfer, and their offsets
+   // in memory relative to the anchor.  If the base reg (Rn) is part
+   // of the transfer, then do it last for a load and first for a store.
+   UInt xReg[16], xOff[16];
+   Int  nX = 0;
+   m = 0;
+   for (i = 0; i < 16; i++) {
+      r = bINC ? i : (15-i);
+      if (0 == (regList & (1<<r)))
+         continue;
+      if (bBEFORE)
+         m++;
+      /* paranoia: check we aren't transferring the writeback
+         register during a load. Should be assured by decode-point
+         check above. */
+      if (bW == 1 && bL == 1)
+         vassert(r != rN);
+
+      xOff[nX] = 4 * m;
+      xReg[nX] = r;
+      nX++;
+
+      if (!bBEFORE)
+         m++;
+   }
+   vassert(m == nRegs);
+   vassert(nX == nRegs);
+   vassert(nX <= 16);
+
+   if (bW == 0 && (regList & (1<<rN)) != 0) {
+      /* Non-writeback, and basereg is to be transferred.  Do its
+         transfer last for a load and first for a store.  Requires
+         reordering xOff/xReg. */
+      if (0) {
+         vex_printf("\nREG_LIST_PRE: (rN=%d)\n", rN);
+         for (i = 0; i < nX; i++)
+            vex_printf("reg %d   off %d\n", xReg[i], xOff[i]);
+         vex_printf("\n");
+      }
+
+      vassert(nX > 0);
+      for (i = 0; i < nX; i++) {
+         if (xReg[i] == rN)
+             break;
+      }
+      vassert(i < nX); /* else we didn't find it! */
+      UInt tReg = xReg[i];
+      UInt tOff = xOff[i];
+      if (bL == 1) {
+         /* load; make this transfer happen last */
+         if (i < nX-1) {
+            for (m = i+1; m < nX; m++) {
+               xReg[m-1] = xReg[m];
+               xOff[m-1] = xOff[m];
+            }
+            vassert(m == nX);
+            xReg[m-1] = tReg;
+            xOff[m-1] = tOff;
+         }
+      } else {
+         /* store; make this transfer happen first */
+         if (i > 0) {
+            for (m = i-1; m >= 0; m--) {
+               xReg[m+1] = xReg[m];
+               xOff[m+1] = xOff[m];
+            }
+            vassert(m == -1);
+            xReg[0] = tReg;
+            xOff[0] = tOff;
+         }
+      }
+
+      if (0) {
+         vex_printf("REG_LIST_POST:\n");
+         for (i = 0; i < nX; i++)
+            vex_printf("reg %d   off %d\n", xReg[i], xOff[i]);
+         vex_printf("\n");
+      }
+   }
+
+   /* According to the Cortex A8 TRM Sec. 5.2.1, LDM(1) with r13 as the base
+       register and PC in the register list is a return for purposes of branch
+       prediction.
+      The ARM ARM Sec. C9.10.1 further specifies that writeback must be enabled
+       to be counted in event 0x0E (Procedure return).*/
+   if (rN == 13 && bL == 1 && bINC && !bBEFORE && bW == 1) {
+      jk = Ijk_Ret;
+   }
+
+   /* Actually generate the transfers */
+   for (i = 0; i < nX; i++) {
+      r = xReg[i];
+      if (bL == 1) {
+         IRExpr* e = loadLE(Ity_I32,
+                            binop(opADDorSUB, mkexpr(anchorT),
+                                  mkU32(xOff[i])));
+         if (arm) {
+            putIRegA( r, e, IRTemp_INVALID, jk );
+         } else {
+            // no: putIRegT( r, e, IRTemp_INVALID );
+            // putIRegT refuses to write to R15.  But that might happen.
+            // Since this is uncond, and we need to be able to
+            // write the PC, just use the low level put:
+            llPutIReg( r, e );
+         }
+      } else {
+         /* if we're storing Rn, make sure we use the correct
+            value, as per extensive comments above */
+         storeLE( binop(opADDorSUB, mkexpr(anchorT), mkU32(xOff[i])),
+                  r == rN ? mkexpr(oldRnT) 
+                          : (arm ? getIRegA(r) : getIRegT(r) ) );
+      }
+   }
+
+   // If we are doing xxMIA or xxMIB,
+   // do the transfer first, and then update rN afterwards.
+   if (bW == 1 && bINC) {
+      IRExpr* e = binop(opADDorSUB, mkexpr(oldRnT), mkU32(4*nRegs));
+      if (arm)
+         putIRegA( rN, e, IRTemp_INVALID, Ijk_Boring );
+      else
+         putIRegT( rN, e, IRTemp_INVALID );
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- VFP (CP 10 and 11) instructions                      ---*/
+/*------------------------------------------------------------*/
+
+/* Both ARM and Thumb */
+
+/* Translate a CP10 or CP11 instruction.  If successful, returns
+   True and *dres may or may not be updated.  If failure, returns
+   False and doesn't change *dres nor create any IR.
+
+   The ARM and Thumb encodings are identical for the low 28 bits of
+   the insn (yay!) and that's what the caller must supply, iow, imm28
+   has the top 4 bits masked out.  Caller is responsible for
+   determining whether the masked-out bits are valid for a CP10/11
+   insn.  The rules for the top 4 bits are:
+
+     ARM: 0000 to 1110 allowed, and this is the gating condition.
+     1111 (NV) is not allowed.
+
+     Thumb: must be 1110.  The gating condition is taken from
+     ITSTATE in the normal way.
+
+   Conditionalisation:
+
+   Caller must supply an IRTemp 'condT' holding the gating condition,
+   or IRTemp_INVALID indicating the insn is always executed.
+
+   Caller must also supply an ARMCondcode 'cond'.  This is only used
+   for debug printing, no other purpose.  For ARM, this is simply the
+   top 4 bits of the original instruction.  For Thumb, the condition
+   is not (really) known until run time, and so ARMCondAL should be
+   passed, only so that printing of these instructions does not show
+   any condition.
+
+   Finally, the caller must indicate whether this occurs in ARM or
+   Thumb code.
+*/
+static Bool decode_CP10_CP11_instruction (
+               /*MOD*/DisResult* dres,
+               UInt              insn28,
+               IRTemp            condT,
+               ARMCondcode       conq,
+               Bool              isT
+            )
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn28, (_bMax), (_bMin))
+
+   vassert(INSN(31,28) == BITS4(0,0,0,0)); // caller's obligation
+
+   if (isT) {
+      vassert(conq == ARMCondAL);
+   } else {
+      vassert(conq >= ARMCondEQ && conq <= ARMCondAL);
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- VFP instructions -- double precision (mostly)         -- */
+   /* ----------------------------------------------------------- */
+
+   /* --------------------- fldmx, fstmx --------------------- */
+   /*
+                                 31   27   23   19 15 11   7   0
+                                         P U WL
+      C4-100, C5-26  1  FSTMX    cond 1100 1000 Rn Dd 1011 offset
+      C4-100, C5-28  2  FSTMIAX  cond 1100 1010 Rn Dd 1011 offset
+      C4-100, C5-30  3  FSTMDBX  cond 1101 0010 Rn Dd 1011 offset
+
+      C4-42, C5-26   1  FLDMX    cond 1100 1001 Rn Dd 1011 offset
+      C4-42, C5-28   2  FLDMIAX  cond 1100 1011 Rn Dd 1011 offset
+      C4-42, C5-30   3  FLDMDBX  cond 1101 0011 Rn Dd 1011 offset
+
+      Regs transferred: Dd .. D(d + (offset-3)/2)
+      offset must be odd, must not imply a reg > 15
+      IA/DB: Rn is changed by (4 + 8 x # regs transferred)
+
+      case coding:
+         1  at-Rn   (access at Rn)
+         2  ia-Rn   (access at Rn, then Rn += 4+8n)
+         3  db-Rn   (Rn -= 4+8n,   then access at Rn)
+   */
+   if (BITS8(1,1,0,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,0,0,0,0,0))
+       && INSN(11,8) == BITS4(1,0,1,1)) {
+      UInt bP      = (insn28 >> 24) & 1;
+      UInt bU      = (insn28 >> 23) & 1;
+      UInt bW      = (insn28 >> 21) & 1;
+      UInt bL      = (insn28 >> 20) & 1;
+      UInt offset  = (insn28 >> 0) & 0xFF;
+      UInt rN      = INSN(19,16);
+      UInt dD      = (INSN(22,22) << 4) | INSN(15,12);
+      UInt nRegs   = (offset - 1) / 2;
+      UInt summary = 0;
+      Int  i;
+
+      /**/ if (bP == 0 && bU == 1 && bW == 0) {
+         summary = 1;
+      }
+      else if (bP == 0 && bU == 1 && bW == 1) {
+         summary = 2;
+      }
+      else if (bP == 1 && bU == 0 && bW == 1) {
+         summary = 3;
+      }
+      else goto after_vfp_fldmx_fstmx;
+
+      /* no writebacks to r15 allowed.  No use of r15 in thumb mode. */
+      if (rN == 15 && (summary == 2 || summary == 3 || isT))
+         goto after_vfp_fldmx_fstmx;
+
+      /* offset must be odd, and specify at least one register */
+      if (0 == (offset & 1) || offset < 3)
+         goto after_vfp_fldmx_fstmx;
+
+      /* can't transfer regs after D15 */
+      if (dD + nRegs - 1 >= 32)
+         goto after_vfp_fldmx_fstmx;
+
+      /* Now, we can't do a conditional load or store, since that very
+         likely will generate an exception.  So we have to take a side
+         exit at this point if the condition is false. */
+      if (condT != IRTemp_INVALID) {
+         if (isT)
+            mk_skip_over_T32_if_cond_is_false( condT );
+         else
+            mk_skip_over_A32_if_cond_is_false( condT );
+         condT = IRTemp_INVALID;
+      }
+      /* Ok, now we're unconditional.  Do the load or store. */
+
+      /* get the old Rn value */
+      IRTemp rnT = newTemp(Ity_I32);
+      assign(rnT, align4if(isT ? getIRegT(rN) : getIRegA(rN),
+                           rN == 15));
+
+      /* make a new value for Rn, post-insn */
+      IRTemp rnTnew = IRTemp_INVALID;
+      if (summary == 2 || summary == 3) {
+         rnTnew = newTemp(Ity_I32);
+         assign(rnTnew, binop(summary == 2 ? Iop_Add32 : Iop_Sub32,
+                              mkexpr(rnT),
+                              mkU32(4 + 8 * nRegs)));
+      }
+
+      /* decide on the base transfer address */
+      IRTemp taT = newTemp(Ity_I32);
+      assign(taT,  summary == 3 ? mkexpr(rnTnew) : mkexpr(rnT));
+
+      /* update Rn if necessary -- in case 3, we're moving it down, so
+         update before any memory reference, in order to keep Memcheck
+         and V's stack-extending logic (on linux) happy */
+      if (summary == 3) {
+         if (isT)
+            putIRegT(rN, mkexpr(rnTnew), IRTemp_INVALID);
+         else
+            putIRegA(rN, mkexpr(rnTnew), IRTemp_INVALID, Ijk_Boring);
+      }
+
+      /* generate the transfers */
+      for (i = 0; i < nRegs; i++) {
+         IRExpr* addr = binop(Iop_Add32, mkexpr(taT), mkU32(8*i));
+         if (bL) {
+            putDReg(dD + i, loadLE(Ity_F64, addr), IRTemp_INVALID);
+         } else {
+            storeLE(addr, getDReg(dD + i));
+         }
+      }
+
+      /* update Rn if necessary -- in case 2, we're moving it up, so
+         update after any memory reference, in order to keep Memcheck
+         and V's stack-extending logic (on linux) happy */
+      if (summary == 2) {
+         if (isT)
+            putIRegT(rN, mkexpr(rnTnew), IRTemp_INVALID);
+         else
+            putIRegA(rN, mkexpr(rnTnew), IRTemp_INVALID, Ijk_Boring);
+      }
+
+      const HChar* nm = bL==1 ? "ld" : "st";
+      switch (summary) {
+         case 1:  DIP("f%smx%s r%u, {d%u-d%u}\n", 
+                      nm, nCC(conq), rN, dD, dD + nRegs - 1);
+                  break;
+         case 2:  DIP("f%smiax%s r%u!, {d%u-d%u}\n", 
+                      nm, nCC(conq), rN, dD, dD + nRegs - 1);
+                  break;
+         case 3:  DIP("f%smdbx%s r%u!, {d%u-d%u}\n", 
+                      nm, nCC(conq), rN, dD, dD + nRegs - 1);
+                  break;
+         default: vassert(0);
+      }
+
+      goto decode_success_vfp;
+      /* FIXME alignment constraints? */
+   }
+
+  after_vfp_fldmx_fstmx:
+
+   /* --------------------- fldmd, fstmd --------------------- */
+   /*
+                                 31   27   23   19 15 11   7   0
+                                         P U WL
+      C4-96, C5-26   1  FSTMD    cond 1100 1000 Rn Dd 1011 offset
+      C4-96, C5-28   2  FSTMDIA  cond 1100 1010 Rn Dd 1011 offset
+      C4-96, C5-30   3  FSTMDDB  cond 1101 0010 Rn Dd 1011 offset
+
+      C4-38, C5-26   1  FLDMD    cond 1100 1001 Rn Dd 1011 offset
+      C4-38, C5-28   2  FLDMIAD  cond 1100 1011 Rn Dd 1011 offset
+      C4-38, C5-30   3  FLDMDBD  cond 1101 0011 Rn Dd 1011 offset
+
+      Regs transferred: Dd .. D(d + (offset-2)/2)
+      offset must be even, must not imply a reg > 15
+      IA/DB: Rn is changed by (8 x # regs transferred)
+
+      case coding:
+         1  at-Rn   (access at Rn)
+         2  ia-Rn   (access at Rn, then Rn += 8n)
+         3  db-Rn   (Rn -= 8n,     then access at Rn)
+   */
+   if (BITS8(1,1,0,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,0,0,0,0,0))
+       && INSN(11,8) == BITS4(1,0,1,1)) {
+      UInt bP      = (insn28 >> 24) & 1;
+      UInt bU      = (insn28 >> 23) & 1;
+      UInt bW      = (insn28 >> 21) & 1;
+      UInt bL      = (insn28 >> 20) & 1;
+      UInt offset  = (insn28 >> 0) & 0xFF;
+      UInt rN      = INSN(19,16);
+      UInt dD      = (INSN(22,22) << 4) | INSN(15,12);
+      UInt nRegs   = offset / 2;
+      UInt summary = 0;
+      Int  i;
+
+      /**/ if (bP == 0 && bU == 1 && bW == 0) {
+         summary = 1;
+      }
+      else if (bP == 0 && bU == 1 && bW == 1) {
+         summary = 2;
+      }
+      else if (bP == 1 && bU == 0 && bW == 1) {
+         summary = 3;
+      }
+      else goto after_vfp_fldmd_fstmd;
+
+      /* no writebacks to r15 allowed.  No use of r15 in thumb mode. */
+      if (rN == 15 && (summary == 2 || summary == 3 || isT))
+         goto after_vfp_fldmd_fstmd;
+
+      /* offset must be even, and specify at least one register */
+      if (1 == (offset & 1) || offset < 2)
+         goto after_vfp_fldmd_fstmd;
+
+      /* can't transfer regs after D15 */
+      if (dD + nRegs - 1 >= 32)
+         goto after_vfp_fldmd_fstmd;
+
+      /* Now, we can't do a conditional load or store, since that very
+         likely will generate an exception.  So we have to take a side
+         exit at this point if the condition is false. */
+      if (condT != IRTemp_INVALID) {
+         if (isT)
+            mk_skip_over_T32_if_cond_is_false( condT );
+         else
+            mk_skip_over_A32_if_cond_is_false( condT );
+         condT = IRTemp_INVALID;
+      }
+      /* Ok, now we're unconditional.  Do the load or store. */
+
+      /* get the old Rn value */
+      IRTemp rnT = newTemp(Ity_I32);
+      assign(rnT, align4if(isT ? getIRegT(rN) : getIRegA(rN),
+                           rN == 15));
+
+      /* make a new value for Rn, post-insn */
+      IRTemp rnTnew = IRTemp_INVALID;
+      if (summary == 2 || summary == 3) {
+         rnTnew = newTemp(Ity_I32);
+         assign(rnTnew, binop(summary == 2 ? Iop_Add32 : Iop_Sub32,
+                              mkexpr(rnT),
+                              mkU32(8 * nRegs)));
+      }
+
+      /* decide on the base transfer address */
+      IRTemp taT = newTemp(Ity_I32);
+      assign(taT, summary == 3 ? mkexpr(rnTnew) : mkexpr(rnT));
+
+      /* update Rn if necessary -- in case 3, we're moving it down, so
+         update before any memory reference, in order to keep Memcheck
+         and V's stack-extending logic (on linux) happy */
+      if (summary == 3) {
+         if (isT)
+            putIRegT(rN, mkexpr(rnTnew), IRTemp_INVALID);
+         else
+            putIRegA(rN, mkexpr(rnTnew), IRTemp_INVALID, Ijk_Boring);
+      }
+
+      /* generate the transfers */
+      for (i = 0; i < nRegs; i++) {
+         IRExpr* addr = binop(Iop_Add32, mkexpr(taT), mkU32(8*i));
+         if (bL) {
+            putDReg(dD + i, loadLE(Ity_F64, addr), IRTemp_INVALID);
+         } else {
+            storeLE(addr, getDReg(dD + i));
+         }
+      }
+
+      /* update Rn if necessary -- in case 2, we're moving it up, so
+         update after any memory reference, in order to keep Memcheck
+         and V's stack-extending logic (on linux) happy */
+      if (summary == 2) {
+         if (isT)
+            putIRegT(rN, mkexpr(rnTnew), IRTemp_INVALID);
+         else
+            putIRegA(rN, mkexpr(rnTnew), IRTemp_INVALID, Ijk_Boring);
+      }
+
+      const HChar* nm = bL==1 ? "ld" : "st";
+      switch (summary) {
+         case 1:  DIP("f%smd%s r%u, {d%u-d%u}\n", 
+                      nm, nCC(conq), rN, dD, dD + nRegs - 1);
+                  break;
+         case 2:  DIP("f%smiad%s r%u!, {d%u-d%u}\n", 
+                      nm, nCC(conq), rN, dD, dD + nRegs - 1);
+                  break;
+         case 3:  DIP("f%smdbd%s r%u!, {d%u-d%u}\n", 
+                      nm, nCC(conq), rN, dD, dD + nRegs - 1);
+                  break;
+         default: vassert(0);
+      }
+
+      goto decode_success_vfp;
+      /* FIXME alignment constraints? */
+   }
+
+  after_vfp_fldmd_fstmd:
+
+   /* ------------------- fmrx, fmxr ------------------- */
+   if (BITS8(1,1,1,0,1,1,1,1) == INSN(27,20)
+       && BITS4(1,0,1,0) == INSN(11,8)
+       && BITS8(0,0,0,1,0,0,0,0) == (insn28 & 0xFF)) {
+      UInt rD  = INSN(15,12);
+      UInt reg = INSN(19,16);
+      if (reg == BITS4(0,0,0,1)) {
+         if (rD == 15) {
+            IRTemp nzcvT = newTemp(Ity_I32);
+            /* When rD is 15, we are copying the top 4 bits of FPSCR
+               into CPSR.  That is, set the flags thunk to COPY and
+               install FPSCR[31:28] as the value to copy. */
+            assign(nzcvT, binop(Iop_And32,
+                                IRExpr_Get(OFFB_FPSCR, Ity_I32),
+                                mkU32(0xF0000000)));
+            setFlags_D1(ARMG_CC_OP_COPY, nzcvT, condT);
+            DIP("fmstat%s\n", nCC(conq));
+         } else {
+            /* Otherwise, merely transfer FPSCR to r0 .. r14. */
+            IRExpr* e = IRExpr_Get(OFFB_FPSCR, Ity_I32);
+            if (isT)
+               putIRegT(rD, e, condT);
+            else
+               putIRegA(rD, e, condT, Ijk_Boring);
+            DIP("fmrx%s r%u, fpscr\n", nCC(conq), rD);
+         }
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }
+
+   if (BITS8(1,1,1,0,1,1,1,0) == INSN(27,20)
+       && BITS4(1,0,1,0) == INSN(11,8)
+       && BITS8(0,0,0,1,0,0,0,0) == (insn28 & 0xFF)) {
+      UInt rD  = INSN(15,12);
+      UInt reg = INSN(19,16);
+      if (reg == BITS4(0,0,0,1)) {
+         putMiscReg32(OFFB_FPSCR,
+                      isT ? getIRegT(rD) : getIRegA(rD), condT);
+         DIP("fmxr%s fpscr, r%u\n", nCC(conq), rD);
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }
+
+   /* --------------------- vmov --------------------- */
+   // VMOV dM, rD, rN
+   if (0x0C400B10 == (insn28 & 0x0FF00FD0)) {
+      UInt dM = INSN(3,0) | (INSN(5,5) << 4);
+      UInt rD = INSN(15,12); /* lo32 */
+      UInt rN = INSN(19,16); /* hi32 */
+      if (rD == 15 || rN == 15 || (isT && (rD == 13 || rN == 13))) {
+         /* fall through */
+      } else {
+         putDReg(dM,
+                 unop(Iop_ReinterpI64asF64,
+                      binop(Iop_32HLto64,
+                            isT ? getIRegT(rN) : getIRegA(rN),
+                            isT ? getIRegT(rD) : getIRegA(rD))),
+                 condT);
+         DIP("vmov%s d%u, r%u, r%u\n", nCC(conq), dM, rD, rN);
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }
+
+   // VMOV rD, rN, dM
+   if (0x0C500B10 == (insn28 & 0x0FF00FD0)) {
+      UInt dM = INSN(3,0) | (INSN(5,5) << 4);
+      UInt rD = INSN(15,12); /* lo32 */
+      UInt rN = INSN(19,16); /* hi32 */
+      if (rD == 15 || rN == 15 || (isT && (rD == 13 || rN == 13))
+          || rD == rN) {
+         /* fall through */
+      } else {
+         IRTemp i64 = newTemp(Ity_I64);
+         assign(i64, unop(Iop_ReinterpF64asI64, getDReg(dM)));
+         IRExpr* hi32 = unop(Iop_64HIto32, mkexpr(i64));
+         IRExpr* lo32 = unop(Iop_64to32,   mkexpr(i64));
+         if (isT) {
+            putIRegT(rN, hi32, condT);
+            putIRegT(rD, lo32, condT);
+         } else {
+            putIRegA(rN, hi32, condT, Ijk_Boring);
+            putIRegA(rD, lo32, condT, Ijk_Boring);
+         }
+         DIP("vmov%s r%u, r%u, d%u\n", nCC(conq), rD, rN, dM);
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }
+
+   // VMOV sD, sD+1, rN, rM
+   if (0x0C400A10 == (insn28 & 0x0FF00FD0)) {
+      UInt sD = (INSN(3,0) << 1) | INSN(5,5);
+      UInt rN = INSN(15,12);
+      UInt rM = INSN(19,16);
+      if (rM == 15 || rN == 15 || (isT && (rM == 13 || rN == 13))
+          || sD == 31) {
+         /* fall through */
+      } else {
+         putFReg(sD,
+                 unop(Iop_ReinterpI32asF32, isT ? getIRegT(rN) : getIRegA(rN)),
+                 condT);
+         putFReg(sD+1,
+                 unop(Iop_ReinterpI32asF32, isT ? getIRegT(rM) : getIRegA(rM)),
+                 condT);
+         DIP("vmov%s, s%u, s%u, r%u, r%u\n",
+              nCC(conq), sD, sD + 1, rN, rM);
+         goto decode_success_vfp;
+      }
+   }
+
+   // VMOV rN, rM, sD, sD+1
+   if (0x0C500A10 == (insn28 & 0x0FF00FD0)) {
+      UInt sD = (INSN(3,0) << 1) | INSN(5,5);
+      UInt rN = INSN(15,12);
+      UInt rM = INSN(19,16);
+      if (rM == 15 || rN == 15 || (isT && (rM == 13 || rN == 13))
+          || sD == 31 || rN == rM) {
+         /* fall through */
+      } else {
+         IRExpr* res0 = unop(Iop_ReinterpF32asI32, getFReg(sD));
+         IRExpr* res1 = unop(Iop_ReinterpF32asI32, getFReg(sD+1));
+         if (isT) {
+            putIRegT(rN, res0, condT);
+            putIRegT(rM, res1, condT);
+         } else {
+            putIRegA(rN, res0, condT, Ijk_Boring);
+            putIRegA(rM, res1, condT, Ijk_Boring);
+         }
+         DIP("vmov%s, r%u, r%u, s%u, s%u\n",
+             nCC(conq), rN, rM, sD, sD + 1);
+         goto decode_success_vfp;
+      }
+   }
+
+   // VMOV rD[x], rT  (ARM core register to scalar)
+   if (0x0E000B10 == (insn28 & 0x0F900F1F)) {
+      UInt rD  = (INSN(7,7) << 4) | INSN(19,16);
+      UInt rT  = INSN(15,12);
+      UInt opc = (INSN(22,21) << 2) | INSN(6,5);
+      UInt index;
+      if (rT == 15 || (isT && rT == 13)) {
+         /* fall through */
+      } else {
+         if ((opc & BITS4(1,0,0,0)) == BITS4(1,0,0,0)) {
+            index = opc & 7;
+            putDRegI64(rD, triop(Iop_SetElem8x8,
+                                 getDRegI64(rD),
+                                 mkU8(index),
+                                 unop(Iop_32to8,
+                                      isT ? getIRegT(rT) : getIRegA(rT))),
+                           condT);
+            DIP("vmov%s.8 d%u[%u], r%u\n", nCC(conq), rD, index, rT);
+            goto decode_success_vfp;
+         }
+         else if ((opc & BITS4(1,0,0,1)) == BITS4(0,0,0,1)) {
+            index = (opc >> 1) & 3;
+            putDRegI64(rD, triop(Iop_SetElem16x4,
+                                 getDRegI64(rD),
+                                 mkU8(index),
+                                 unop(Iop_32to16,
+                                      isT ? getIRegT(rT) : getIRegA(rT))),
+                           condT);
+            DIP("vmov%s.16 d%u[%u], r%u\n", nCC(conq), rD, index, rT);
+            goto decode_success_vfp;
+         }
+         else if ((opc & BITS4(1,0,1,1)) == BITS4(0,0,0,0)) {
+            index = (opc >> 2) & 1;
+            putDRegI64(rD, triop(Iop_SetElem32x2,
+                                 getDRegI64(rD),
+                                 mkU8(index),
+                                 isT ? getIRegT(rT) : getIRegA(rT)),
+                           condT);
+            DIP("vmov%s.32 d%u[%u], r%u\n", nCC(conq), rD, index, rT);
+            goto decode_success_vfp;
+         } else {
+            /* fall through */
+         }
+      }
+   }
+
+   // VMOV (scalar to ARM core register)
+   // VMOV rT, rD[x]
+   if (0x0E100B10 == (insn28 & 0x0F100F1F)) {
+      UInt rN  = (INSN(7,7) << 4) | INSN(19,16);
+      UInt rT  = INSN(15,12);
+      UInt U   = INSN(23,23);
+      UInt opc = (INSN(22,21) << 2) | INSN(6,5);
+      UInt index;
+      if (rT == 15 || (isT && rT == 13)) {
+         /* fall through */
+      } else {
+         if ((opc & BITS4(1,0,0,0)) == BITS4(1,0,0,0)) {
+            index = opc & 7;
+            IRExpr* e = unop(U ? Iop_8Uto32 : Iop_8Sto32,
+                             binop(Iop_GetElem8x8,
+                                   getDRegI64(rN),
+                                   mkU8(index)));
+            if (isT)
+               putIRegT(rT, e, condT);
+            else
+               putIRegA(rT, e, condT, Ijk_Boring);
+            DIP("vmov%s.%c8 r%u, d%u[%u]\n", nCC(conq), U ? 'u' : 's',
+                  rT, rN, index);
+            goto decode_success_vfp;
+         }
+         else if ((opc & BITS4(1,0,0,1)) == BITS4(0,0,0,1)) {
+            index = (opc >> 1) & 3;
+            IRExpr* e = unop(U ? Iop_16Uto32 : Iop_16Sto32,
+                             binop(Iop_GetElem16x4,
+                                   getDRegI64(rN),
+                                   mkU8(index)));
+            if (isT)
+               putIRegT(rT, e, condT);
+            else
+               putIRegA(rT, e, condT, Ijk_Boring);
+            DIP("vmov%s.%c16 r%u, d%u[%u]\n", nCC(conq), U ? 'u' : 's',
+                  rT, rN, index);
+            goto decode_success_vfp;
+         }
+         else if ((opc & BITS4(1,0,1,1)) == BITS4(0,0,0,0) && U == 0) {
+            index = (opc >> 2) & 1;
+            IRExpr* e = binop(Iop_GetElem32x2, getDRegI64(rN), mkU8(index));
+            if (isT)
+               putIRegT(rT, e, condT);
+            else
+               putIRegA(rT, e, condT, Ijk_Boring);
+            DIP("vmov%s.32 r%u, d%u[%u]\n", nCC(conq), rT, rN, index);
+            goto decode_success_vfp;
+         } else {
+            /* fall through */
+         }
+      }
+   }
+
+   // VMOV.F32 sD, #imm
+   // FCONSTS sD, #imm
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,0,0,0) == INSN(7,4) && INSN(11,8) == BITS4(1,0,1,0)) {
+      UInt rD   = (INSN(15,12) << 1) | INSN(22,22);
+      UInt imm8 = (INSN(19,16) << 4) | INSN(3,0);
+      UInt b    = (imm8 >> 6) & 1;
+      UInt imm;
+      imm = (BITS8((imm8 >> 7) & 1,(~b) & 1,b,b,b,b,b,(imm8 >> 5) & 1) << 8)
+             | ((imm8 & 0x1f) << 3);
+      imm <<= 16;
+      putFReg(rD, unop(Iop_ReinterpI32asF32, mkU32(imm)), condT);
+      DIP("fconsts%s s%u #%u", nCC(conq), rD, imm8);
+      goto decode_success_vfp;
+   }
+
+   // VMOV.F64 dD, #imm
+   // FCONSTD dD, #imm
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,0,0,0) == INSN(7,4) && INSN(11,8) == BITS4(1,0,1,1)) {
+      UInt rD   = INSN(15,12) | (INSN(22,22) << 4);
+      UInt imm8 = (INSN(19,16) << 4) | INSN(3,0);
+      UInt b    = (imm8 >> 6) & 1;
+      ULong imm;
+      imm = (BITS8((imm8 >> 7) & 1,(~b) & 1,b,b,b,b,b,b) << 8)
+             | BITS8(b,b,0,0,0,0,0,0) | (imm8 & 0x3f);
+      imm <<= 48;
+      putDReg(rD, unop(Iop_ReinterpI64asF64, mkU64(imm)), condT);
+      DIP("fconstd%s d%u #%u", nCC(conq), rD, imm8);
+      goto decode_success_vfp;
+   }
+
+   /* ---------------------- vdup ------------------------- */
+   // VDUP dD, rT
+   // VDUP qD, rT
+   if (BITS8(1,1,1,0,1,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,0,1))
+       && BITS4(1,0,1,1) == INSN(11,8) && INSN(6,6) == 0 && INSN(4,4) == 1) {
+      UInt rD   = (INSN(7,7) << 4) | INSN(19,16);
+      UInt rT   = INSN(15,12);
+      UInt Q    = INSN(21,21);
+      UInt size = (INSN(22,22) << 1) | INSN(5,5);
+      if (rT == 15 || (isT && rT == 13) || size == 3 || (Q && (rD & 1))) {
+         /* fall through */
+      } else {
+         IRExpr* e = isT ? getIRegT(rT) : getIRegA(rT);
+         if (Q) {
+            rD >>= 1;
+            switch (size) {
+               case 0:
+                  putQReg(rD, unop(Iop_Dup32x4, e), condT);
+                  break;
+               case 1:
+                  putQReg(rD, unop(Iop_Dup16x8, unop(Iop_32to16, e)),
+                              condT);
+                  break;
+               case 2:
+                  putQReg(rD, unop(Iop_Dup8x16, unop(Iop_32to8, e)),
+                              condT);
+                  break;
+               default:
+                  vassert(0);
+            }
+            DIP("vdup.%u q%u, r%u\n", 32 / (1<<size), rD, rT);
+         } else {
+            switch (size) {
+               case 0:
+                  putDRegI64(rD, unop(Iop_Dup32x2, e), condT);
+                  break;
+               case 1:
+                  putDRegI64(rD, unop(Iop_Dup16x4, unop(Iop_32to16, e)),
+                               condT);
+                  break;
+               case 2:
+                  putDRegI64(rD, unop(Iop_Dup8x8, unop(Iop_32to8, e)),
+                               condT);
+                  break;
+               default:
+                  vassert(0);
+            }
+            DIP("vdup.%u d%u, r%u\n", 32 / (1<<size), rD, rT);
+         }
+         goto decode_success_vfp;
+      }
+   }
+
+   /* --------------------- f{ld,st}d --------------------- */
+   // FLDD, FSTD
+   if (BITS8(1,1,0,1,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,0,0,1,0))
+       && BITS4(1,0,1,1) == INSN(11,8)) {
+      UInt dD     = INSN(15,12) | (INSN(22,22) << 4);
+      UInt rN     = INSN(19,16);
+      UInt offset = (insn28 & 0xFF) << 2;
+      UInt bU     = (insn28 >> 23) & 1; /* 1: +offset  0: -offset */
+      UInt bL     = (insn28 >> 20) & 1; /* 1: load  0: store */
+      /* make unconditional */
+      if (condT != IRTemp_INVALID) {
+         if (isT)
+            mk_skip_over_T32_if_cond_is_false( condT );
+         else
+            mk_skip_over_A32_if_cond_is_false( condT );
+         condT = IRTemp_INVALID;
+      }
+      IRTemp ea = newTemp(Ity_I32);
+      assign(ea, binop(bU ? Iop_Add32 : Iop_Sub32,
+                       align4if(isT ? getIRegT(rN) : getIRegA(rN),
+                                rN == 15),
+                       mkU32(offset)));
+      if (bL) {
+         putDReg(dD, loadLE(Ity_F64,mkexpr(ea)), IRTemp_INVALID);
+      } else {
+         storeLE(mkexpr(ea), getDReg(dD));
+      }
+      DIP("f%sd%s d%u, [r%u, %c#%u]\n",
+          bL ? "ld" : "st", nCC(conq), dD, rN,
+          bU ? '+' : '-', offset);
+      goto decode_success_vfp;
+   }
+
+   /* --------------------- dp insns (D) --------------------- */
+   if (BITS8(1,1,1,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,0,0,0,0))
+       && BITS4(1,0,1,1) == INSN(11,8)
+       && BITS4(0,0,0,0) == (INSN(7,4) & BITS4(0,0,0,1))) {
+      UInt    dM  = INSN(3,0)   | (INSN(5,5) << 4);       /* argR */
+      UInt    dD  = INSN(15,12) | (INSN(22,22) << 4);   /* dst/acc */
+      UInt    dN  = INSN(19,16) | (INSN(7,7) << 4);     /* argL */
+      UInt    bP  = (insn28 >> 23) & 1;
+      UInt    bQ  = (insn28 >> 21) & 1;
+      UInt    bR  = (insn28 >> 20) & 1;
+      UInt    bS  = (insn28 >> 6) & 1;
+      UInt    opc = (bP << 3) | (bQ << 2) | (bR << 1) | bS;
+      IRExpr* rm  = get_FAKE_roundingmode(); /* XXXROUNDINGFIXME */
+      switch (opc) {
+         case BITS4(0,0,0,0): /* MAC: d + n * m */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              getDReg(dD),
+                              triop(Iop_MulF64, rm, getDReg(dN),
+                                                    getDReg(dM))),
+                        condT);
+            DIP("fmacd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(0,0,0,1): /* NMAC: d + -(n * m) */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              getDReg(dD),
+                              unop(Iop_NegF64,
+                                   triop(Iop_MulF64, rm, getDReg(dN),
+                                                         getDReg(dM)))),
+                        condT);
+            DIP("fnmacd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(0,0,1,0): /* MSC: - d + n * m */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              unop(Iop_NegF64, getDReg(dD)),
+                              triop(Iop_MulF64, rm, getDReg(dN),
+                                                    getDReg(dM))),
+                        condT);
+            DIP("fmscd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(0,0,1,1): /* NMSC: - d + -(n * m) */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              unop(Iop_NegF64, getDReg(dD)),
+                              unop(Iop_NegF64,
+                                   triop(Iop_MulF64, rm, getDReg(dN),
+                                                         getDReg(dM)))),
+                        condT);
+            DIP("fnmscd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(0,1,0,0): /* MUL: n * m */
+            putDReg(dD, triop(Iop_MulF64, rm, getDReg(dN), getDReg(dM)),
+                        condT);
+            DIP("fmuld%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(0,1,0,1): /* NMUL: - n * m */
+            putDReg(dD, unop(Iop_NegF64,
+                             triop(Iop_MulF64, rm, getDReg(dN),
+                                                   getDReg(dM))),
+                    condT);
+            DIP("fnmuld%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(0,1,1,0): /* ADD: n + m */
+            putDReg(dD, triop(Iop_AddF64, rm, getDReg(dN), getDReg(dM)),
+                        condT);
+            DIP("faddd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(0,1,1,1): /* SUB: n - m */
+            putDReg(dD, triop(Iop_SubF64, rm, getDReg(dN), getDReg(dM)),
+                        condT);
+            DIP("fsubd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(1,0,0,0): /* DIV: n / m */
+            putDReg(dD, triop(Iop_DivF64, rm, getDReg(dN), getDReg(dM)),
+                        condT);
+            DIP("fdivd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(1,0,1,0): /* VNFMS: -(d - n * m) (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              unop(Iop_NegF64, getDReg(dD)),
+                              triop(Iop_MulF64, rm,
+                                                getDReg(dN),
+                                                getDReg(dM))),
+                        condT);
+            DIP("vfnmsd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(1,0,1,1): /* VNFMA: -(d + n * m) (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              unop(Iop_NegF64, getDReg(dD)),
+                              triop(Iop_MulF64, rm,
+                                                unop(Iop_NegF64, getDReg(dN)),
+                                                getDReg(dM))),
+                        condT);
+            DIP("vfnmad%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(1,1,0,0): /* VFMA: d + n * m (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              getDReg(dD),
+                              triop(Iop_MulF64, rm, getDReg(dN),
+                                                    getDReg(dM))),
+                        condT);
+            DIP("vfmad%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         case BITS4(1,1,0,1): /* VFMS: d + (-n * m) (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putDReg(dD, triop(Iop_AddF64, rm,
+                              getDReg(dD),
+                              triop(Iop_MulF64, rm,
+                                    unop(Iop_NegF64, getDReg(dN)),
+                                    getDReg(dM))),
+                        condT);
+            DIP("vfmsd%s d%u, d%u, d%u\n", nCC(conq), dD, dN, dM);
+            goto decode_success_vfp;
+         default:
+            break;
+      }
+   }
+
+   /* --------------------- compares (D) --------------------- */
+   /*          31   27   23   19   15 11   7    3
+                 28   24   20   16 12    8    4    0 
+      FCMPD    cond 1110 1D11 0100 Dd 1011 0100 Dm
+      FCMPED   cond 1110 1D11 0100 Dd 1011 1100 Dm
+      FCMPZD   cond 1110 1D11 0101 Dd 1011 0100 0000
+      FCMPZED  cond 1110 1D11 0101 Dd 1011 1100 0000
+                                 Z         N
+
+      Z=0 Compare Dd vs Dm     and set FPSCR 31:28 accordingly
+      Z=1 Compare Dd vs zero
+
+      N=1 generates Invalid Operation exn if either arg is any kind of NaN
+      N=0 generates Invalid Operation exn if either arg is a signalling NaN
+      (Not that we pay any attention to N here)
+   */
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,1,0,0) == (INSN(19,16) & BITS4(1,1,1,0))
+       && BITS4(1,0,1,1) == INSN(11,8)
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt bZ = (insn28 >> 16) & 1;
+      UInt bN = (insn28 >> 7) & 1;
+      UInt dD = INSN(15,12) | (INSN(22,22) << 4);
+      UInt dM = INSN(3,0) | (INSN(5,5) << 4);
+      if (bZ && INSN(3,0) != 0) {
+         /* does not decode; fall through */
+      } else {
+         IRTemp argL = newTemp(Ity_F64);
+         IRTemp argR = newTemp(Ity_F64);
+         IRTemp irRes = newTemp(Ity_I32);
+         assign(argL, getDReg(dD));
+         assign(argR, bZ ? IRExpr_Const(IRConst_F64i(0)) : getDReg(dM));
+         assign(irRes, binop(Iop_CmpF64, mkexpr(argL), mkexpr(argR)));
+
+         IRTemp nzcv     = IRTemp_INVALID;
+         IRTemp oldFPSCR = newTemp(Ity_I32);
+         IRTemp newFPSCR = newTemp(Ity_I32);
+
+         /* This is where the fun starts.  We have to convert 'irRes'
+            from an IR-convention return result (IRCmpF64Result) to an
+            ARM-encoded (N,Z,C,V) group.  The final result is in the
+            bottom 4 bits of 'nzcv'. */
+         /* Map compare result from IR to ARM(nzcv) */
+         /*
+            FP cmp result | IR   | ARM(nzcv)
+            --------------------------------
+            UN              0x45   0011
+            LT              0x01   1000
+            GT              0x00   0010
+            EQ              0x40   0110
+         */
+         nzcv = mk_convert_IRCmpF64Result_to_NZCV(irRes);
+
+         /* And update FPSCR accordingly */
+         assign(oldFPSCR, IRExpr_Get(OFFB_FPSCR, Ity_I32));
+         assign(newFPSCR, 
+                binop(Iop_Or32, 
+                      binop(Iop_And32, mkexpr(oldFPSCR), mkU32(0x0FFFFFFF)),
+                      binop(Iop_Shl32, mkexpr(nzcv), mkU8(28))));
+
+         putMiscReg32(OFFB_FPSCR, mkexpr(newFPSCR), condT);
+
+         if (bZ) {
+            DIP("fcmpz%sd%s d%u\n", bN ? "e" : "", nCC(conq), dD);
+         } else {
+            DIP("fcmp%sd%s d%u, d%u\n", bN ? "e" : "", nCC(conq), dD, dM);
+         }
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }  
+
+   /* --------------------- unary (D) --------------------- */
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,0,0,0) == (INSN(19,16) & BITS4(1,1,1,0))
+       && BITS4(1,0,1,1) == INSN(11,8)
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt dD  = INSN(15,12) | (INSN(22,22) << 4);
+      UInt dM  = INSN(3,0) | (INSN(5,5) << 4);
+      UInt b16 = (insn28 >> 16) & 1;
+      UInt b7  = (insn28 >> 7) & 1;
+      /**/ if (b16 == 0 && b7 == 0) {
+         // FCPYD
+         putDReg(dD, getDReg(dM), condT);
+         DIP("fcpyd%s d%u, d%u\n", nCC(conq), dD, dM);
+         goto decode_success_vfp;
+      }
+      else if (b16 == 0 && b7 == 1) {
+         // FABSD
+         putDReg(dD, unop(Iop_AbsF64, getDReg(dM)), condT);
+         DIP("fabsd%s d%u, d%u\n", nCC(conq), dD, dM);
+         goto decode_success_vfp;
+      }
+      else if (b16 == 1 && b7 == 0) {
+         // FNEGD
+         putDReg(dD, unop(Iop_NegF64, getDReg(dM)), condT);
+         DIP("fnegd%s d%u, d%u\n", nCC(conq), dD, dM);
+         goto decode_success_vfp;
+      }
+      else if (b16 == 1 && b7 == 1) {
+         // FSQRTD
+         IRExpr* rm = get_FAKE_roundingmode(); /* XXXROUNDINGFIXME */
+         putDReg(dD, binop(Iop_SqrtF64, rm, getDReg(dM)), condT);
+         DIP("fsqrtd%s d%u, d%u\n", nCC(conq), dD, dM);
+         goto decode_success_vfp;
+      }
+      else
+         vassert(0);
+
+      /* fall through */
+   }
+
+   /* ----------------- I <-> D conversions ----------------- */
+
+   // F{S,U}ITOD dD, fM
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(1,0,0,0) == (INSN(19,16) & BITS4(1,1,1,1))
+       && BITS4(1,0,1,1) == INSN(11,8)
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt bM    = (insn28 >> 5) & 1;
+      UInt fM    = (INSN(3,0) << 1) | bM;
+      UInt dD    = INSN(15,12) | (INSN(22,22) << 4);
+      UInt syned = (insn28 >> 7) & 1;
+      if (syned) {
+         // FSITOD
+         putDReg(dD, unop(Iop_I32StoF64,
+                          unop(Iop_ReinterpF32asI32, getFReg(fM))),
+                 condT);
+         DIP("fsitod%s d%u, s%u\n", nCC(conq), dD, fM);
+      } else {
+         // FUITOD
+         putDReg(dD, unop(Iop_I32UtoF64,
+                          unop(Iop_ReinterpF32asI32, getFReg(fM))),
+                 condT);
+         DIP("fuitod%s d%u, s%u\n", nCC(conq), dD, fM);
+      }
+      goto decode_success_vfp;
+   }
+
+   // FTO{S,U}ID fD, dM
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(1,1,0,0) == (INSN(19,16) & BITS4(1,1,1,0))
+       && BITS4(1,0,1,1) == INSN(11,8)
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt   bD    = (insn28 >> 22) & 1;
+      UInt   fD    = (INSN(15,12) << 1) | bD;
+      UInt   dM    = INSN(3,0) | (INSN(5,5) << 4);
+      UInt   bZ    = (insn28 >> 7) & 1;
+      UInt   syned = (insn28 >> 16) & 1;
+      IRTemp rmode = newTemp(Ity_I32);
+      assign(rmode, bZ ? mkU32(Irrm_ZERO)
+                       : mkexpr(mk_get_IR_rounding_mode()));
+      if (syned) {
+         // FTOSID
+         putFReg(fD, unop(Iop_ReinterpI32asF32,
+                          binop(Iop_F64toI32S, mkexpr(rmode),
+                                getDReg(dM))),
+                 condT);
+         DIP("ftosi%sd%s s%u, d%u\n", bZ ? "z" : "",
+             nCC(conq), fD, dM);
+      } else {
+         // FTOUID
+         putFReg(fD, unop(Iop_ReinterpI32asF32,
+                          binop(Iop_F64toI32U, mkexpr(rmode),
+                                getDReg(dM))),
+                 condT);
+         DIP("ftoui%sd%s s%u, d%u\n", bZ ? "z" : "",
+             nCC(conq), fD, dM);
+      }
+      goto decode_success_vfp;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- VFP instructions -- single precision                  -- */
+   /* ----------------------------------------------------------- */
+
+   /* --------------------- fldms, fstms --------------------- */
+   /*
+                                 31   27   23   19 15 11   7   0
+                                         P UDWL
+      C4-98, C5-26   1  FSTMD    cond 1100 1x00 Rn Fd 1010 offset
+      C4-98, C5-28   2  FSTMDIA  cond 1100 1x10 Rn Fd 1010 offset
+      C4-98, C5-30   3  FSTMDDB  cond 1101 0x10 Rn Fd 1010 offset
+
+      C4-40, C5-26   1  FLDMD    cond 1100 1x01 Rn Fd 1010 offset
+      C4-40, C5-26   2  FLDMIAD  cond 1100 1x11 Rn Fd 1010 offset
+      C4-40, C5-26   3  FLDMDBD  cond 1101 0x11 Rn Fd 1010 offset
+
+      Regs transferred: F(Fd:D) .. F(Fd:d + offset)
+      offset must not imply a reg > 15
+      IA/DB: Rn is changed by (4 x # regs transferred)
+
+      case coding:
+         1  at-Rn   (access at Rn)
+         2  ia-Rn   (access at Rn, then Rn += 4n)
+         3  db-Rn   (Rn -= 4n,     then access at Rn)
+   */
+   if (BITS8(1,1,0,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,0,0,0,0,0))
+       && INSN(11,8) == BITS4(1,0,1,0)) {
+      UInt bP      = (insn28 >> 24) & 1;
+      UInt bU      = (insn28 >> 23) & 1;
+      UInt bW      = (insn28 >> 21) & 1;
+      UInt bL      = (insn28 >> 20) & 1;
+      UInt bD      = (insn28 >> 22) & 1;
+      UInt offset  = (insn28 >> 0) & 0xFF;
+      UInt rN      = INSN(19,16);
+      UInt fD      = (INSN(15,12) << 1) | bD;
+      UInt nRegs   = offset;
+      UInt summary = 0;
+      Int  i;
+
+      /**/ if (bP == 0 && bU == 1 && bW == 0) {
+         summary = 1;
+      }
+      else if (bP == 0 && bU == 1 && bW == 1) {
+         summary = 2;
+      }
+      else if (bP == 1 && bU == 0 && bW == 1) {
+         summary = 3;
+      }
+      else goto after_vfp_fldms_fstms;
+
+      /* no writebacks to r15 allowed.  No use of r15 in thumb mode. */
+      if (rN == 15 && (summary == 2 || summary == 3 || isT))
+         goto after_vfp_fldms_fstms;
+
+      /* offset must specify at least one register */
+      if (offset < 1)
+         goto after_vfp_fldms_fstms;
+
+      /* can't transfer regs after S31 */
+      if (fD + nRegs - 1 >= 32)
+         goto after_vfp_fldms_fstms;
+
+      /* Now, we can't do a conditional load or store, since that very
+         likely will generate an exception.  So we have to take a side
+         exit at this point if the condition is false. */
+      if (condT != IRTemp_INVALID) {
+         if (isT)
+            mk_skip_over_T32_if_cond_is_false( condT );
+         else
+            mk_skip_over_A32_if_cond_is_false( condT );
+         condT = IRTemp_INVALID;
+      }
+      /* Ok, now we're unconditional.  Do the load or store. */
+
+      /* get the old Rn value */
+      IRTemp rnT = newTemp(Ity_I32);
+      assign(rnT, align4if(isT ? getIRegT(rN) : getIRegA(rN),
+                           rN == 15));
+
+      /* make a new value for Rn, post-insn */
+      IRTemp rnTnew = IRTemp_INVALID;
+      if (summary == 2 || summary == 3) {
+         rnTnew = newTemp(Ity_I32);
+         assign(rnTnew, binop(summary == 2 ? Iop_Add32 : Iop_Sub32,
+                              mkexpr(rnT),
+                              mkU32(4 * nRegs)));
+      }
+
+      /* decide on the base transfer address */
+      IRTemp taT = newTemp(Ity_I32);
+      assign(taT, summary == 3 ? mkexpr(rnTnew) : mkexpr(rnT));
+
+      /* update Rn if necessary -- in case 3, we're moving it down, so
+         update before any memory reference, in order to keep Memcheck
+         and V's stack-extending logic (on linux) happy */
+      if (summary == 3) {
+         if (isT)
+            putIRegT(rN, mkexpr(rnTnew), IRTemp_INVALID);
+         else
+            putIRegA(rN, mkexpr(rnTnew), IRTemp_INVALID, Ijk_Boring);
+      }
+
+      /* generate the transfers */
+      for (i = 0; i < nRegs; i++) {
+         IRExpr* addr = binop(Iop_Add32, mkexpr(taT), mkU32(4*i));
+         if (bL) {
+            putFReg(fD + i, loadLE(Ity_F32, addr), IRTemp_INVALID);
+         } else {
+            storeLE(addr, getFReg(fD + i));
+         }
+      }
+
+      /* update Rn if necessary -- in case 2, we're moving it up, so
+         update after any memory reference, in order to keep Memcheck
+         and V's stack-extending logic (on linux) happy */
+      if (summary == 2) {
+         if (isT)
+            putIRegT(rN, mkexpr(rnTnew), IRTemp_INVALID);
+         else
+            putIRegA(rN, mkexpr(rnTnew), IRTemp_INVALID, Ijk_Boring);
+      }
+
+      const HChar* nm = bL==1 ? "ld" : "st";
+      switch (summary) {
+         case 1:  DIP("f%sms%s r%u, {s%u-s%u}\n", 
+                      nm, nCC(conq), rN, fD, fD + nRegs - 1);
+                  break;
+         case 2:  DIP("f%smias%s r%u!, {s%u-s%u}\n", 
+                      nm, nCC(conq), rN, fD, fD + nRegs - 1);
+                  break;
+         case 3:  DIP("f%smdbs%s r%u!, {s%u-s%u}\n", 
+                      nm, nCC(conq), rN, fD, fD + nRegs - 1);
+                  break;
+         default: vassert(0);
+      }
+
+      goto decode_success_vfp;
+      /* FIXME alignment constraints? */
+   }
+
+  after_vfp_fldms_fstms:
+
+   /* --------------------- fmsr, fmrs --------------------- */
+   if (BITS8(1,1,1,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,1,1,1,0))
+       && BITS4(1,0,1,0) == INSN(11,8)
+       && BITS4(0,0,0,0) == INSN(3,0)
+       && BITS4(0,0,0,1) == (INSN(7,4) & BITS4(0,1,1,1))) {
+      UInt rD  = INSN(15,12);
+      UInt b7  = (insn28 >> 7) & 1;
+      UInt fN  = (INSN(19,16) << 1) | b7;
+      UInt b20 = (insn28 >> 20) & 1;
+      if (rD == 15) {
+         /* fall through */
+         /* Let's assume that no sane person would want to do
+            floating-point transfers to or from the program counter,
+            and simply decline to decode the instruction.  The ARM ARM
+            doesn't seem to explicitly disallow this case, though. */
+      } else {
+         if (b20) {
+            IRExpr* res = unop(Iop_ReinterpF32asI32, getFReg(fN));
+            if (isT)
+               putIRegT(rD, res, condT);
+            else
+               putIRegA(rD, res, condT, Ijk_Boring);
+            DIP("fmrs%s r%u, s%u\n", nCC(conq), rD, fN);
+         } else {
+            putFReg(fN, unop(Iop_ReinterpI32asF32,
+                             isT ? getIRegT(rD) : getIRegA(rD)),
+                        condT);
+            DIP("fmsr%s s%u, r%u\n", nCC(conq), fN, rD);
+         }
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }
+
+   /* --------------------- f{ld,st}s --------------------- */
+   // FLDS, FSTS
+   if (BITS8(1,1,0,1,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,0,0,1,0))
+       && BITS4(1,0,1,0) == INSN(11,8)) {
+      UInt bD     = (insn28 >> 22) & 1;
+      UInt fD     = (INSN(15,12) << 1) | bD;
+      UInt rN     = INSN(19,16);
+      UInt offset = (insn28 & 0xFF) << 2;
+      UInt bU     = (insn28 >> 23) & 1; /* 1: +offset  0: -offset */
+      UInt bL     = (insn28 >> 20) & 1; /* 1: load  0: store */
+      /* make unconditional */
+      if (condT != IRTemp_INVALID) {
+         if (isT)
+            mk_skip_over_T32_if_cond_is_false( condT );
+         else
+            mk_skip_over_A32_if_cond_is_false( condT );
+         condT = IRTemp_INVALID;
+      }
+      IRTemp ea = newTemp(Ity_I32);
+      assign(ea, binop(bU ? Iop_Add32 : Iop_Sub32,
+                       align4if(isT ? getIRegT(rN) : getIRegA(rN),
+                                rN == 15),
+                       mkU32(offset)));
+      if (bL) {
+         putFReg(fD, loadLE(Ity_F32,mkexpr(ea)), IRTemp_INVALID);
+      } else {
+         storeLE(mkexpr(ea), getFReg(fD));
+      }
+      DIP("f%ss%s s%u, [r%u, %c#%u]\n",
+          bL ? "ld" : "st", nCC(conq), fD, rN,
+          bU ? '+' : '-', offset);
+      goto decode_success_vfp;
+   }
+
+   /* --------------------- dp insns (F) --------------------- */
+   if (BITS8(1,1,1,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,0,0,0,0))
+       && BITS4(1,0,1,0) == (INSN(11,8) & BITS4(1,1,1,0))
+       && BITS4(0,0,0,0) == (INSN(7,4) & BITS4(0,0,0,1))) {
+      UInt    bM  = (insn28 >> 5) & 1;
+      UInt    bD  = (insn28 >> 22) & 1;
+      UInt    bN  = (insn28 >> 7) & 1;
+      UInt    fM  = (INSN(3,0) << 1) | bM;   /* argR */
+      UInt    fD  = (INSN(15,12) << 1) | bD; /* dst/acc */
+      UInt    fN  = (INSN(19,16) << 1) | bN; /* argL */
+      UInt    bP  = (insn28 >> 23) & 1;
+      UInt    bQ  = (insn28 >> 21) & 1;
+      UInt    bR  = (insn28 >> 20) & 1;
+      UInt    bS  = (insn28 >> 6) & 1;
+      UInt    opc = (bP << 3) | (bQ << 2) | (bR << 1) | bS;
+      IRExpr* rm  = get_FAKE_roundingmode(); /* XXXROUNDINGFIXME */
+      switch (opc) {
+         case BITS4(0,0,0,0): /* MAC: d + n * m */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              getFReg(fD),
+                              triop(Iop_MulF32, rm, getFReg(fN), getFReg(fM))),
+                        condT);
+            DIP("fmacs%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(0,0,0,1): /* NMAC: d + -(n * m) */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              getFReg(fD),
+                              unop(Iop_NegF32,
+                                   triop(Iop_MulF32, rm, getFReg(fN),
+                                                         getFReg(fM)))),
+                        condT);
+            DIP("fnmacs%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(0,0,1,0): /* MSC: - d + n * m */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              unop(Iop_NegF32, getFReg(fD)),
+                              triop(Iop_MulF32, rm, getFReg(fN), getFReg(fM))),
+                        condT);
+            DIP("fmscs%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(0,0,1,1): /* NMSC: - d + -(n * m) */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              unop(Iop_NegF32, getFReg(fD)),
+                              unop(Iop_NegF32,
+                                   triop(Iop_MulF32, rm,
+                                                     getFReg(fN),
+                                                    getFReg(fM)))),
+                        condT);
+            DIP("fnmscs%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(0,1,0,0): /* MUL: n * m */
+            putFReg(fD, triop(Iop_MulF32, rm, getFReg(fN), getFReg(fM)),
+                        condT);
+            DIP("fmuls%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(0,1,0,1): /* NMUL: - n * m */
+            putFReg(fD, unop(Iop_NegF32,
+                             triop(Iop_MulF32, rm, getFReg(fN),
+                                                   getFReg(fM))),
+                    condT);
+            DIP("fnmuls%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(0,1,1,0): /* ADD: n + m */
+            putFReg(fD, triop(Iop_AddF32, rm, getFReg(fN), getFReg(fM)),
+                        condT);
+            DIP("fadds%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(0,1,1,1): /* SUB: n - m */
+            putFReg(fD, triop(Iop_SubF32, rm, getFReg(fN), getFReg(fM)),
+                        condT);
+            DIP("fsubs%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(1,0,0,0): /* DIV: n / m */
+            putFReg(fD, triop(Iop_DivF32, rm, getFReg(fN), getFReg(fM)),
+                        condT);
+            DIP("fdivs%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(1,0,1,0): /* VNFMS: -(d - n * m) (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              unop(Iop_NegF32, getFReg(fD)),
+                              triop(Iop_MulF32, rm,
+                                                getFReg(fN),
+                                                getFReg(fM))),
+                        condT);
+            DIP("vfnmss%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(1,0,1,1): /* VNFMA: -(d + n * m) (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              unop(Iop_NegF32, getFReg(fD)),
+                              triop(Iop_MulF32, rm,
+                                                unop(Iop_NegF32, getFReg(fN)),
+                                                getFReg(fM))),
+                        condT);
+            DIP("vfnmas%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(1,1,0,0): /* VFMA: d + n * m (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              getFReg(fD),
+                              triop(Iop_MulF32, rm, getFReg(fN),
+                                                    getFReg(fM))),
+                        condT);
+            DIP("vfmas%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         case BITS4(1,1,0,1): /* VFMS: d + (-n * m) (fused) */
+            /* XXXROUNDINGFIXME look up ARM reference for fused
+               multiply-add rounding */
+            putFReg(fD, triop(Iop_AddF32, rm,
+                              getFReg(fD),
+                              triop(Iop_MulF32, rm,
+                                    unop(Iop_NegF32, getFReg(fN)),
+                                    getFReg(fM))),
+                        condT);
+            DIP("vfmss%s s%u, s%u, s%u\n", nCC(conq), fD, fN, fM);
+            goto decode_success_vfp;
+         default:
+            break;
+      }
+   }
+
+   /* --------------------- compares (S) --------------------- */
+   /*          31   27   23   19   15 11   7    3
+                 28   24   20   16 12    8    4    0 
+      FCMPS    cond 1110 1D11 0100 Fd 1010 01M0 Fm
+      FCMPES   cond 1110 1D11 0100 Fd 1010 11M0 Fm
+      FCMPZS   cond 1110 1D11 0101 Fd 1010 0100 0000
+      FCMPZED  cond 1110 1D11 0101 Fd 1010 1100 0000
+                                 Z         N
+
+      Z=0 Compare Fd:D vs Fm:M     and set FPSCR 31:28 accordingly
+      Z=1 Compare Fd:D vs zero
+
+      N=1 generates Invalid Operation exn if either arg is any kind of NaN
+      N=0 generates Invalid Operation exn if either arg is a signalling NaN
+      (Not that we pay any attention to N here)
+   */
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,1,0,0) == (INSN(19,16) & BITS4(1,1,1,0))
+       && BITS4(1,0,1,0) == INSN(11,8)
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt bZ = (insn28 >> 16) & 1;
+      UInt bN = (insn28 >> 7) & 1;
+      UInt bD = (insn28 >> 22) & 1;
+      UInt bM = (insn28 >> 5) & 1;
+      UInt fD = (INSN(15,12) << 1) | bD;
+      UInt fM = (INSN(3,0) << 1) | bM;
+      if (bZ && (INSN(3,0) != 0 || (INSN(7,4) & 3) != 0)) {
+         /* does not decode; fall through */
+      } else {
+         IRTemp argL = newTemp(Ity_F64);
+         IRTemp argR = newTemp(Ity_F64);
+         IRTemp irRes = newTemp(Ity_I32);
+
+         assign(argL, unop(Iop_F32toF64, getFReg(fD)));
+         assign(argR, bZ ? IRExpr_Const(IRConst_F64i(0))
+                         : unop(Iop_F32toF64, getFReg(fM)));
+         assign(irRes, binop(Iop_CmpF64, mkexpr(argL), mkexpr(argR)));
+
+         IRTemp nzcv     = IRTemp_INVALID;
+         IRTemp oldFPSCR = newTemp(Ity_I32);
+         IRTemp newFPSCR = newTemp(Ity_I32);
+
+         /* This is where the fun starts.  We have to convert 'irRes'
+            from an IR-convention return result (IRCmpF64Result) to an
+            ARM-encoded (N,Z,C,V) group.  The final result is in the
+            bottom 4 bits of 'nzcv'. */
+         /* Map compare result from IR to ARM(nzcv) */
+         /*
+            FP cmp result | IR   | ARM(nzcv)
+            --------------------------------
+            UN              0x45   0011
+            LT              0x01   1000
+            GT              0x00   0010
+            EQ              0x40   0110
+         */
+         nzcv = mk_convert_IRCmpF64Result_to_NZCV(irRes);
+
+         /* And update FPSCR accordingly */
+         assign(oldFPSCR, IRExpr_Get(OFFB_FPSCR, Ity_I32));
+         assign(newFPSCR, 
+                binop(Iop_Or32, 
+                      binop(Iop_And32, mkexpr(oldFPSCR), mkU32(0x0FFFFFFF)),
+                      binop(Iop_Shl32, mkexpr(nzcv), mkU8(28))));
+
+         putMiscReg32(OFFB_FPSCR, mkexpr(newFPSCR), condT);
+
+         if (bZ) {
+            DIP("fcmpz%ss%s s%u\n", bN ? "e" : "", nCC(conq), fD);
+         } else {
+            DIP("fcmp%ss%s s%u, s%u\n", bN ? "e" : "",
+                nCC(conq), fD, fM);
+         }
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }  
+
+   /* --------------------- unary (S) --------------------- */
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,0,0,0) == (INSN(19,16) & BITS4(1,1,1,0))
+       && BITS4(1,0,1,0) == INSN(11,8)
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt bD = (insn28 >> 22) & 1;
+      UInt bM = (insn28 >> 5) & 1;
+      UInt fD  = (INSN(15,12) << 1) | bD;
+      UInt fM  = (INSN(3,0) << 1) | bM;
+      UInt b16 = (insn28 >> 16) & 1;
+      UInt b7  = (insn28 >> 7) & 1;
+      /**/ if (b16 == 0 && b7 == 0) {
+         // FCPYS
+         putFReg(fD, getFReg(fM), condT);
+         DIP("fcpys%s s%u, s%u\n", nCC(conq), fD, fM);
+         goto decode_success_vfp;
+      }
+      else if (b16 == 0 && b7 == 1) {
+         // FABSS
+         putFReg(fD, unop(Iop_AbsF32, getFReg(fM)), condT);
+         DIP("fabss%s s%u, s%u\n", nCC(conq), fD, fM);
+         goto decode_success_vfp;
+      }
+      else if (b16 == 1 && b7 == 0) {
+         // FNEGS
+         putFReg(fD, unop(Iop_NegF32, getFReg(fM)), condT);
+         DIP("fnegs%s s%u, s%u\n", nCC(conq), fD, fM);
+         goto decode_success_vfp;
+      }
+      else if (b16 == 1 && b7 == 1) {
+         // FSQRTS
+         IRExpr* rm = get_FAKE_roundingmode(); /* XXXROUNDINGFIXME */
+         putFReg(fD, binop(Iop_SqrtF32, rm, getFReg(fM)), condT);
+         DIP("fsqrts%s s%u, s%u\n", nCC(conq), fD, fM);
+         goto decode_success_vfp;
+      }
+      else
+         vassert(0);
+
+      /* fall through */
+   }
+
+   /* ----------------- I <-> S conversions ----------------- */
+
+   // F{S,U}ITOS fD, fM
+   /* These are more complex than FSITOD/FUITOD.  In the D cases, a 32
+      bit int will always fit within the 53 bit mantissa, so there's
+      no possibility of a loss of precision, but that's obviously not
+      the case here.  Hence this case possibly requires rounding, and
+      so it drags in the current rounding mode. */
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(1,0,0,0) == INSN(19,16)
+       && BITS4(1,0,1,0) == (INSN(11,8) & BITS4(1,1,1,0))
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt bM    = (insn28 >> 5) & 1;
+      UInt bD    = (insn28 >> 22) & 1;
+      UInt fM    = (INSN(3,0) << 1) | bM;
+      UInt fD    = (INSN(15,12) << 1) | bD;
+      UInt syned = (insn28 >> 7) & 1;
+      IRTemp rmode = newTemp(Ity_I32);
+      assign(rmode, mkexpr(mk_get_IR_rounding_mode()));
+      if (syned) {
+         // FSITOS
+         putFReg(fD, binop(Iop_F64toF32,
+                           mkexpr(rmode),
+                           unop(Iop_I32StoF64,
+                                unop(Iop_ReinterpF32asI32, getFReg(fM)))),
+                 condT);
+         DIP("fsitos%s s%u, s%u\n", nCC(conq), fD, fM);
+      } else {
+         // FUITOS
+         putFReg(fD, binop(Iop_F64toF32,
+                           mkexpr(rmode),
+                           unop(Iop_I32UtoF64,
+                                unop(Iop_ReinterpF32asI32, getFReg(fM)))),
+                 condT);
+         DIP("fuitos%s s%u, s%u\n", nCC(conq), fD, fM);
+      }
+      goto decode_success_vfp;
+   }
+
+   // FTO{S,U}IS fD, fM
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(1,1,0,0) == (INSN(19,16) & BITS4(1,1,1,0))
+       && BITS4(1,0,1,0) == INSN(11,8)
+       && BITS4(0,1,0,0) == (INSN(7,4) & BITS4(0,1,0,1))) {
+      UInt   bM    = (insn28 >> 5) & 1;
+      UInt   bD    = (insn28 >> 22) & 1;
+      UInt   fD    = (INSN(15,12) << 1) | bD;
+      UInt   fM    = (INSN(3,0) << 1) | bM;
+      UInt   bZ    = (insn28 >> 7) & 1;
+      UInt   syned = (insn28 >> 16) & 1;
+      IRTemp rmode = newTemp(Ity_I32);
+      assign(rmode, bZ ? mkU32(Irrm_ZERO)
+                       : mkexpr(mk_get_IR_rounding_mode()));
+      if (syned) {
+         // FTOSIS
+         putFReg(fD, unop(Iop_ReinterpI32asF32,
+                          binop(Iop_F64toI32S, mkexpr(rmode),
+                                unop(Iop_F32toF64, getFReg(fM)))),
+                 condT);
+         DIP("ftosi%ss%s s%u, d%u\n", bZ ? "z" : "",
+             nCC(conq), fD, fM);
+         goto decode_success_vfp;
+      } else {
+         // FTOUIS
+         putFReg(fD, unop(Iop_ReinterpI32asF32,
+                          binop(Iop_F64toI32U, mkexpr(rmode),
+                                unop(Iop_F32toF64, getFReg(fM)))),
+                 condT);
+         DIP("ftoui%ss%s s%u, d%u\n", bZ ? "z" : "",
+             nCC(conq), fD, fM);
+         goto decode_success_vfp;
+      }
+   }
+
+   /* ----------------- S <-> D conversions ----------------- */
+
+   // FCVTDS
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,1,1,1) == INSN(19,16)
+       && BITS4(1,0,1,0) == INSN(11,8)
+       && BITS4(1,1,0,0) == (INSN(7,4) & BITS4(1,1,0,1))) {
+      UInt dD = INSN(15,12) | (INSN(22,22) << 4);
+      UInt bM = (insn28 >> 5) & 1;
+      UInt fM = (INSN(3,0) << 1) | bM;
+      putDReg(dD, unop(Iop_F32toF64, getFReg(fM)), condT);
+      DIP("fcvtds%s d%u, s%u\n", nCC(conq), dD, fM);
+      goto decode_success_vfp;
+   }
+
+   // FCVTSD
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,1,1,1) == INSN(19,16)
+       && BITS4(1,0,1,1) == INSN(11,8)
+       && BITS4(1,1,0,0) == (INSN(7,4) & BITS4(1,1,0,1))) {
+      UInt   bD    = (insn28 >> 22) & 1;
+      UInt   fD    = (INSN(15,12) << 1) | bD;
+      UInt   dM    = INSN(3,0) | (INSN(5,5) << 4);
+      IRTemp rmode = newTemp(Ity_I32);
+      assign(rmode, mkexpr(mk_get_IR_rounding_mode()));
+      putFReg(fD, binop(Iop_F64toF32, mkexpr(rmode), getDReg(dM)),
+                  condT);
+      DIP("fcvtsd%s s%u, d%u\n", nCC(conq), fD, dM);
+      goto decode_success_vfp;
+   }
+
+   /* --------------- VCVT fixed<->floating, VFP --------------- */
+   /*          31   27   23   19   15 11   7    3
+                 28   24   20   16 12    8    4    0 
+
+               cond 1110 1D11 1p1U Vd 101f x1i0 imm4
+
+      VCVT<c>.<Td>.F64 <Dd>, <Dd>, #fbits
+      VCVT<c>.<Td>.F32 <Dd>, <Dd>, #fbits
+      VCVT<c>.F64.<Td> <Dd>, <Dd>, #fbits
+      VCVT<c>.F32.<Td> <Dd>, <Dd>, #fbits
+      are of this form.  We only handle a subset of the cases though.
+   */
+   if (BITS8(1,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(1,0,1,0) == (INSN(19,16) & BITS4(1,0,1,0))
+       && BITS3(1,0,1) == INSN(11,9)
+       && BITS3(1,0,0) == (INSN(6,4) & BITS3(1,0,1))) {
+      UInt bD        = INSN(22,22);
+      UInt bOP       = INSN(18,18);
+      UInt bU        = INSN(16,16);
+      UInt Vd        = INSN(15,12);
+      UInt bSF       = INSN(8,8);
+      UInt bSX       = INSN(7,7);
+      UInt bI        = INSN(5,5);
+      UInt imm4      = INSN(3,0);
+      Bool to_fixed  = bOP == 1;
+      Bool dp_op     = bSF == 1;
+      Bool unsyned   = bU == 1;
+      UInt size      = bSX == 0 ? 16 : 32;
+      Int  frac_bits = size - ((imm4 << 1) | bI);
+      UInt d         = dp_op  ? ((bD << 4) | Vd)  : ((Vd << 1) | bD);
+      if (frac_bits >= 1 && frac_bits <= 32 && !to_fixed && !dp_op
+                                            && size == 32) {
+         /* VCVT.F32.{S,U}32 S[d], S[d], #frac_bits */
+         /* This generates really horrible code.  We could potentially
+            do much better. */
+         IRTemp rmode = newTemp(Ity_I32);
+         assign(rmode, mkU32(Irrm_NEAREST)); // per the spec
+         IRTemp src32 = newTemp(Ity_I32);
+         assign(src32,  unop(Iop_ReinterpF32asI32, getFReg(d)));
+         IRExpr* as_F64 = unop( unsyned ? Iop_I32UtoF64 : Iop_I32StoF64,
+                                mkexpr(src32 ) );
+         IRTemp scale = newTemp(Ity_F64);
+         assign(scale, unop(Iop_I32UtoF64, mkU32( 1 << (frac_bits-1) )));
+         IRExpr* rm     = mkU32(Irrm_NEAREST);
+         IRExpr* resF64 = triop(Iop_DivF64,
+                                rm, as_F64, 
+                                triop(Iop_AddF64, rm, mkexpr(scale),
+                                                      mkexpr(scale)));
+         IRExpr* resF32 = binop(Iop_F64toF32, mkexpr(rmode), resF64);
+         putFReg(d, resF32, condT);
+         DIP("vcvt.f32.%c32, s%u, s%u, #%d\n",
+             unsyned ? 'u' : 's', d, d, frac_bits);
+         goto decode_success_vfp;
+      }
+      if (frac_bits >= 1 && frac_bits <= 32 && !to_fixed && dp_op
+                                            && size == 32) {
+         /* VCVT.F64.{S,U}32 D[d], D[d], #frac_bits */
+         /* This generates really horrible code.  We could potentially
+            do much better. */
+         IRTemp src32 = newTemp(Ity_I32);
+         assign(src32, unop(Iop_64to32, getDRegI64(d)));
+         IRExpr* as_F64 = unop( unsyned ? Iop_I32UtoF64 : Iop_I32StoF64,
+                                mkexpr(src32 ) );
+         IRTemp scale = newTemp(Ity_F64);
+         assign(scale, unop(Iop_I32UtoF64, mkU32( 1 << (frac_bits-1) )));
+         IRExpr* rm     = mkU32(Irrm_NEAREST);
+         IRExpr* resF64 = triop(Iop_DivF64,
+                                rm, as_F64, 
+                                triop(Iop_AddF64, rm, mkexpr(scale),
+                                                      mkexpr(scale)));
+         putDReg(d, resF64, condT);
+         DIP("vcvt.f64.%c32, d%u, d%u, #%d\n",
+             unsyned ? 'u' : 's', d, d, frac_bits);
+         goto decode_success_vfp;
+      }
+      if (frac_bits >= 1 && frac_bits <= 32 && to_fixed && dp_op
+                                            && size == 32) {
+         /* VCVT.{S,U}32.F64 D[d], D[d], #frac_bits */
+         IRTemp srcF64 = newTemp(Ity_F64);
+         assign(srcF64, getDReg(d));
+         IRTemp scale = newTemp(Ity_F64);
+         assign(scale, unop(Iop_I32UtoF64, mkU32( 1 << (frac_bits-1) )));
+         IRTemp scaledF64 = newTemp(Ity_F64);
+         IRExpr* rm = mkU32(Irrm_NEAREST);
+         assign(scaledF64, triop(Iop_MulF64,
+                                 rm, mkexpr(srcF64),
+                                 triop(Iop_AddF64, rm, mkexpr(scale),
+                                                       mkexpr(scale))));
+         IRTemp rmode = newTemp(Ity_I32);
+         assign(rmode, mkU32(Irrm_ZERO)); // as per the spec
+         IRTemp asI32 = newTemp(Ity_I32);
+         assign(asI32, binop(unsyned ? Iop_F64toI32U : Iop_F64toI32S,
+                             mkexpr(rmode), mkexpr(scaledF64)));
+         putDRegI64(d, unop(unsyned ? Iop_32Uto64 : Iop_32Sto64,
+                            mkexpr(asI32)), condT);
+         goto decode_success_vfp;
+      }
+      /* fall through */
+   }
+
+   /* FAILURE */
+   return False;
+
+  decode_success_vfp:
+   /* Check that any accepted insn really is a CP10 or CP11 insn, iow,
+      assert that we aren't accepting, in this fn, insns that actually
+      should be handled somewhere else. */
+   vassert(INSN(11,9) == BITS3(1,0,1)); // 11:8 = 1010 or 1011
+   return True;  
+
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Instructions in NV (never) space                     ---*/
+/*------------------------------------------------------------*/
+
+/* ARM only */
+/* Translate a NV space instruction.  If successful, returns True and
+   *dres may or may not be updated.  If failure, returns False and
+   doesn't change *dres nor create any IR.
+
+   Note that all NEON instructions (in ARM mode) are handled through
+   here, since they are all in NV space.
+*/
+static Bool decode_NV_instruction ( /*MOD*/DisResult* dres,
+                                    const VexArchInfo* archinfo,
+                                    UInt insn )
+{
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+#  define INSN_COND          SLICE_UInt(insn, 31, 28)
+
+   HChar dis_buf[128];
+
+   // Should only be called for NV instructions
+   vassert(BITS4(1,1,1,1) == INSN_COND);
+
+   /* ------------------------ pld{w} ------------------------ */
+   if (BITS8(0,1,0,1, 0,0, 0,1) == (INSN(27,20) & BITS8(1,1,1,1, 0,0, 1,1))
+       && BITS4(1,1,1,1) == INSN(15,12)) {
+      UInt rN    = INSN(19,16);
+      UInt imm12 = INSN(11,0);
+      UInt bU    = INSN(23,23);
+      UInt bR    = INSN(22,22);
+      DIP("pld%c [r%u, #%c%u]\n", bR ? ' ' : 'w', rN, bU ? '+' : '-', imm12);
+      return True;
+   }
+
+   if (BITS8(0,1,1,1, 0,0, 0,1) == (INSN(27,20) & BITS8(1,1,1,1, 0,0, 1,1))
+       && BITS4(1,1,1,1) == INSN(15,12)
+       && 0 == INSN(4,4)) {
+      UInt rN   = INSN(19,16);
+      UInt rM   = INSN(3,0);
+      UInt imm5 = INSN(11,7);
+      UInt sh2  = INSN(6,5);
+      UInt bU   = INSN(23,23);
+      UInt bR   = INSN(22,22);
+      if (rM != 15 && (rN != 15 || bR)) {
+         IRExpr* eaE = mk_EA_reg_plusminus_shifted_reg(rN, bU, rM,
+                                                       sh2, imm5, dis_buf);
+         IRTemp eaT = newTemp(Ity_I32);
+         /* Bind eaE to a temp merely for debugging-vex purposes, so we
+            can check it's a plausible decoding.  It will get removed
+            by iropt a little later on. */
+         vassert(eaE);
+         assign(eaT, eaE);
+         DIP("pld%c %s\n", bR ? ' ' : 'w', dis_buf);
+         return True;
+      }
+      /* fall through */
+   }
+
+   /* ------------------------ pli ------------------------ */
+   if (BITS8(0,1,0,0, 0, 1,0,1) == (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1))
+       && BITS4(1,1,1,1) == INSN(15,12)) {
+      UInt rN    = INSN(19,16);
+      UInt imm12 = INSN(11,0);
+      UInt bU    = INSN(23,23);
+      DIP("pli [r%u, #%c%u]\n", rN, bU ? '+' : '-', imm12);
+      return True;
+   }
+
+   /* --------------------- Interworking branches --------------------- */
+
+   // BLX (1), viz, unconditional branch and link to R15+simm24
+   // and set CPSR.T = 1, that is, switch to Thumb mode
+   if (INSN(31,25) == BITS7(1,1,1,1,1,0,1)) {
+      UInt bitH   = INSN(24,24);
+      Int  uimm24 = INSN(23,0);
+      Int  simm24 = (((uimm24 << 8) >> 8) << 2) + (bitH << 1);
+      /* Now this is a bit tricky.  Since we're decoding an ARM insn,
+         it is implies that CPSR.T == 0.  Hence the current insn's
+         address is guaranteed to be of the form X--(30)--X00.  So, no
+         need to mask any bits off it.  But need to set the lowest bit
+         to 1 to denote we're in Thumb mode after this, since
+         guest_R15T has CPSR.T as the lowest bit.  And we can't chase
+         into the call, so end the block at this point. */
+      UInt dst = guest_R15_curr_instr_notENC + 8 + (simm24 | 1);
+      putIRegA( 14, mkU32(guest_R15_curr_instr_notENC + 4),
+                    IRTemp_INVALID/*because AL*/, Ijk_Boring );
+      llPutIReg(15, mkU32(dst));
+      dres->jk_StopHere = Ijk_Call;
+      dres->whatNext    = Dis_StopHere;
+      DIP("blx 0x%x (and switch to Thumb mode)\n", dst - 1);
+      return True;
+   }
+
+   /* ------------------- v7 barrier insns ------------------- */
+   switch (insn) {
+      case 0xF57FF06F: /* ISB */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("ISB\n");
+         return True;
+      case 0xF57FF04F: /* DSB sy */
+      case 0xF57FF04E: /* DSB st */
+      case 0xF57FF04B: /* DSB ish */
+      case 0xF57FF04A: /* DSB ishst */
+      case 0xF57FF047: /* DSB nsh */
+      case 0xF57FF046: /* DSB nshst */
+      case 0xF57FF043: /* DSB osh */
+      case 0xF57FF042: /* DSB oshst */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("DSB\n");
+         return True;
+      case 0xF57FF05F: /* DMB sy */
+      case 0xF57FF05E: /* DMB st */
+      case 0xF57FF05B: /* DMB ish */
+      case 0xF57FF05A: /* DMB ishst */
+      case 0xF57FF057: /* DMB nsh */
+      case 0xF57FF056: /* DMB nshst */
+      case 0xF57FF053: /* DMB osh */
+      case 0xF57FF052: /* DMB oshst */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("DMB\n");
+         return True;
+      default:
+         break;
+   }
+
+   /* ------------------- CLREX ------------------ */
+   if (insn == 0xF57FF01F) {
+      /* AFAICS, this simply cancels a (all?) reservations made by a
+         (any?) preceding LDREX(es).  Arrange to hand it through to
+         the back end. */
+      stmt( IRStmt_MBE(Imbe_CancelReservation) );
+      DIP("clrex\n");
+      return True;
+   }
+
+   /* ------------------- NEON ------------------- */
+   if (archinfo->hwcaps & VEX_HWCAPS_ARM_NEON) {
+      Bool ok_neon = decode_NEON_instruction(
+                        dres, insn, IRTemp_INVALID/*unconditional*/, 
+                        False/*!isT*/
+                     );
+      if (ok_neon)
+         return True;
+   }
+
+   // unrecognised
+   return False;
+
+#  undef INSN_COND
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassemble a single ARM instruction                 ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single ARM instruction into IR.  The instruction is
+   located in host memory at guest_instr, and has (decoded) guest IP
+   of guest_R15_curr_instr_notENC, which will have been set before the
+   call here. */
+
+static
+DisResult disInstr_ARM_WRK (
+             Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+             Bool         resteerCisOk,
+             void*        callback_opaque,
+             const UChar* guest_instr,
+             const VexArchInfo* archinfo,
+             const VexAbiInfo*  abiinfo,
+             Bool         sigill_diag
+          )
+{
+   // A macro to fish bits out of 'insn'.
+#  define INSN(_bMax,_bMin)  SLICE_UInt(insn, (_bMax), (_bMin))
+#  define INSN_COND          SLICE_UInt(insn, 31, 28)
+
+   DisResult dres;
+   UInt      insn;
+   //Bool      allow_VFP = False;
+   //UInt      hwcaps = archinfo->hwcaps;
+   IRTemp    condT; /* :: Ity_I32 */
+   UInt      summary;
+   HChar     dis_buf[128];  // big enough to hold LDMIA etc text
+
+   /* What insn variants are we supporting today? */
+   //allow_VFP  = (0 != (hwcaps & VEX_HWCAPS_ARM_VFP));
+   // etc etc
+
+   /* Set result defaults. */
+   dres.whatNext    = Dis_Continue;
+   dres.len         = 4;
+   dres.continueAt  = 0;
+   dres.jk_StopHere = Ijk_INVALID;
+
+   /* Set default actions for post-insn handling of writes to r15, if
+      required. */
+   r15written = False;
+   r15guard   = IRTemp_INVALID; /* unconditional */
+   r15kind    = Ijk_Boring;
+
+   /* At least this is simple on ARM: insns are all 4 bytes long, and
+      4-aligned.  So just fish the whole thing out of memory right now
+      and have done. */
+   insn = getUIntLittleEndianly( guest_instr );
+
+   if (0) vex_printf("insn: 0x%x\n", insn);
+
+   DIP("\t(arm) 0x%x:  ", (UInt)guest_R15_curr_instr_notENC);
+
+   vassert(0 == (guest_R15_curr_instr_notENC & 3));
+
+   /* ----------------------------------------------------------- */
+
+   /* Spot "Special" instructions (see comment at top of file). */
+   {
+      const UChar* code = guest_instr;
+      /* Spot the 16-byte preamble: 
+
+         e1a0c1ec  mov r12, r12, ROR #3
+         e1a0c6ec  mov r12, r12, ROR #13
+         e1a0ceec  mov r12, r12, ROR #29
+         e1a0c9ec  mov r12, r12, ROR #19
+      */
+      UInt word1 = 0xE1A0C1EC;
+      UInt word2 = 0xE1A0C6EC;
+      UInt word3 = 0xE1A0CEEC;
+      UInt word4 = 0xE1A0C9EC;
+      if (getUIntLittleEndianly(code+ 0) == word1 &&
+          getUIntLittleEndianly(code+ 4) == word2 &&
+          getUIntLittleEndianly(code+ 8) == word3 &&
+          getUIntLittleEndianly(code+12) == word4) {
+         /* Got a "Special" instruction preamble.  Which one is it? */
+         if (getUIntLittleEndianly(code+16) == 0xE18AA00A
+                                               /* orr r10,r10,r10 */) {
+            /* R3 = client_request ( R4 ) */
+            DIP("r3 = client_request ( %%r4 )\n");
+            llPutIReg(15, mkU32( guest_R15_curr_instr_notENC + 20 ));
+            dres.jk_StopHere = Ijk_ClientReq;
+            dres.whatNext    = Dis_StopHere;
+            goto decode_success;
+         }
+         else
+         if (getUIntLittleEndianly(code+16) == 0xE18BB00B
+                                               /* orr r11,r11,r11 */) {
+            /* R3 = guest_NRADDR */
+            DIP("r3 = guest_NRADDR\n");
+            dres.len = 20;
+            llPutIReg(3, IRExpr_Get( OFFB_NRADDR, Ity_I32 ));
+            goto decode_success;
+         }
+         else
+         if (getUIntLittleEndianly(code+16) == 0xE18CC00C
+                                               /* orr r12,r12,r12 */) {
+            /*  branch-and-link-to-noredir R4 */
+            DIP("branch-and-link-to-noredir r4\n");
+            llPutIReg(14, mkU32( guest_R15_curr_instr_notENC + 20) );
+            llPutIReg(15, llGetIReg(4));
+            dres.jk_StopHere = Ijk_NoRedir;
+            dres.whatNext    = Dis_StopHere;
+            goto decode_success;
+         }
+         else
+         if (getUIntLittleEndianly(code+16) == 0xE1899009
+                                               /* orr r9,r9,r9 */) {
+            /* IR injection */
+            DIP("IR injection\n");
+            vex_inject_ir(irsb, Iend_LE);
+            // Invalidate the current insn. The reason is that the IRop we're
+            // injecting here can change. In which case the translation has to
+            // be redone. For ease of handling, we simply invalidate all the
+            // time.
+            stmt(IRStmt_Put(OFFB_CMSTART, mkU32(guest_R15_curr_instr_notENC)));
+            stmt(IRStmt_Put(OFFB_CMLEN,   mkU32(20)));
+            llPutIReg(15, mkU32( guest_R15_curr_instr_notENC + 20 ));
+            dres.whatNext    = Dis_StopHere;
+            dres.jk_StopHere = Ijk_InvalICache;
+            goto decode_success;
+         }
+         /* We don't know what it is.  Set opc1/opc2 so decode_failure
+            can print the insn following the Special-insn preamble. */
+         insn = getUIntLittleEndianly(code+16);
+         goto decode_failure;
+         /*NOTREACHED*/
+      }
+
+   }
+
+   /* ----------------------------------------------------------- */
+
+   /* Main ARM instruction decoder starts here. */
+
+   /* Deal with the condition.  Strategy is to merely generate a
+      condition temporary at this point (or IRTemp_INVALID, meaning
+      unconditional).  We leave it to lower-level instruction decoders
+      to decide whether they can generate straight-line code, or
+      whether they must generate a side exit before the instruction.
+      condT :: Ity_I32 and is always either zero or one. */
+   condT = IRTemp_INVALID;
+   switch ( (ARMCondcode)INSN_COND ) {
+      case ARMCondNV: {
+         // Illegal instruction prior to v5 (see ARM ARM A3-5), but
+         // some cases are acceptable
+         Bool ok = decode_NV_instruction(&dres, archinfo, insn);
+         if (ok)
+            goto decode_success;
+         else
+            goto decode_failure;
+      }
+      case ARMCondAL: // Always executed
+         break;
+      case ARMCondEQ: case ARMCondNE: case ARMCondHS: case ARMCondLO:
+      case ARMCondMI: case ARMCondPL: case ARMCondVS: case ARMCondVC:
+      case ARMCondHI: case ARMCondLS: case ARMCondGE: case ARMCondLT:
+      case ARMCondGT: case ARMCondLE:
+         condT = newTemp(Ity_I32);
+         assign( condT, mk_armg_calculate_condition( INSN_COND ));
+         break;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- ARMv5 integer instructions                            -- */
+   /* ----------------------------------------------------------- */
+
+   /* ---------------- Data processing ops ------------------- */
+
+   if (0 == (INSN(27,20) & BITS8(1,1,0,0,0,0,0,0))
+       && !(INSN(25,25) == 0 && INSN(7,7) == 1 && INSN(4,4) == 1)) {
+      IRTemp  shop = IRTemp_INVALID; /* shifter operand */
+      IRTemp  shco = IRTemp_INVALID; /* shifter carry out */
+      UInt    rD   = (insn >> 12) & 0xF; /* 15:12 */
+      UInt    rN   = (insn >> 16) & 0xF; /* 19:16 */
+      UInt    bitS = (insn >> 20) & 1; /* 20:20 */
+      IRTemp  rNt  = IRTemp_INVALID;
+      IRTemp  res  = IRTemp_INVALID;
+      IRTemp  oldV = IRTemp_INVALID;
+      IRTemp  oldC = IRTemp_INVALID;
+      const HChar*  name = NULL;
+      IROp    op   = Iop_INVALID;
+      Bool    ok;
+
+      switch (INSN(24,21)) {
+
+         /* --------- ADD, SUB, AND, OR --------- */
+         case BITS4(0,1,0,0): /* ADD:  Rd = Rn + shifter_operand */
+            name = "add"; op = Iop_Add32; goto rd_eq_rn_op_SO;
+         case BITS4(0,0,1,0): /* SUB:  Rd = Rn - shifter_operand */
+            name = "sub"; op = Iop_Sub32; goto rd_eq_rn_op_SO;
+         case BITS4(0,0,1,1): /* RSB:  Rd = shifter_operand - Rn */
+            name = "rsb"; op = Iop_Sub32; goto rd_eq_rn_op_SO;
+         case BITS4(0,0,0,0): /* AND:  Rd = Rn & shifter_operand */
+            name = "and"; op = Iop_And32; goto rd_eq_rn_op_SO;
+         case BITS4(1,1,0,0): /* OR:   Rd = Rn | shifter_operand */
+            name = "orr"; op = Iop_Or32; goto rd_eq_rn_op_SO;
+         case BITS4(0,0,0,1): /* EOR:  Rd = Rn ^ shifter_operand */
+            name = "eor"; op = Iop_Xor32; goto rd_eq_rn_op_SO;
+         case BITS4(1,1,1,0): /* BIC:  Rd = Rn & ~shifter_operand */
+            name = "bic"; op = Iop_And32; goto rd_eq_rn_op_SO;
+         rd_eq_rn_op_SO: {
+            Bool isRSB = False;
+            Bool isBIC = False;
+            switch (INSN(24,21)) {
+               case BITS4(0,0,1,1):
+                  vassert(op == Iop_Sub32); isRSB = True; break;
+               case BITS4(1,1,1,0):
+                  vassert(op == Iop_And32); isBIC = True; break;
+               default:
+                  break;
+            }
+            rNt = newTemp(Ity_I32);
+            assign(rNt, getIRegA(rN));
+            ok = mk_shifter_operand(
+                    INSN(25,25), INSN(11,0), 
+                    &shop, bitS ? &shco : NULL, dis_buf
+                 );
+            if (!ok)
+               break;
+            res = newTemp(Ity_I32);
+            // compute the main result
+            if (isRSB) {
+               // reverse-subtract: shifter_operand - Rn
+               vassert(op == Iop_Sub32);
+               assign(res, binop(op, mkexpr(shop), mkexpr(rNt)) );
+            } else if (isBIC) {
+               // andn: shifter_operand & ~Rn
+               vassert(op == Iop_And32);
+               assign(res, binop(op, mkexpr(rNt),
+                                     unop(Iop_Not32, mkexpr(shop))) );
+            } else {
+               // normal: Rn op shifter_operand
+               assign(res, binop(op, mkexpr(rNt), mkexpr(shop)) );
+            }
+            // but don't commit it until after we've finished
+            // all necessary reads from the guest state
+            if (bitS
+                && (op == Iop_And32 || op == Iop_Or32 || op == Iop_Xor32)) {
+               oldV = newTemp(Ity_I32);
+               assign( oldV, mk_armg_calculate_flag_v() );
+            }
+            // can't safely read guest state after here
+            // now safe to put the main result
+            putIRegA( rD, mkexpr(res), condT, Ijk_Boring );
+            // XXXX!! not safe to read any guest state after
+            // this point (I think the code below doesn't do that).
+            if (!bitS)
+               vassert(shco == IRTemp_INVALID);
+            /* Update the flags thunk if necessary */
+            if (bitS) {
+               vassert(shco != IRTemp_INVALID);
+               switch (op) {
+                  case Iop_Add32:
+                     setFlags_D1_D2( ARMG_CC_OP_ADD, rNt, shop, condT );
+                     break;
+                  case Iop_Sub32:
+                     if (isRSB) {
+                        setFlags_D1_D2( ARMG_CC_OP_SUB, shop, rNt, condT );
+                     } else {
+                        setFlags_D1_D2( ARMG_CC_OP_SUB, rNt, shop, condT );
+                     }
+                     break;
+                  case Iop_And32: /* BIC and AND set the flags the same */
+                  case Iop_Or32:
+                  case Iop_Xor32:
+                     // oldV has been read just above
+                     setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC,
+                                        res, shco, oldV, condT );
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            DIP("%s%s%s r%u, r%u, %s\n",
+                name, nCC(INSN_COND), bitS ? "s" : "", rD, rN, dis_buf );
+            goto decode_success;
+         }
+
+         /* --------- MOV, MVN --------- */
+         case BITS4(1,1,0,1):   /* MOV: Rd = shifter_operand */
+         case BITS4(1,1,1,1): { /* MVN: Rd = not(shifter_operand) */
+            Bool isMVN = INSN(24,21) == BITS4(1,1,1,1);
+            IRTemp jk = Ijk_Boring;
+            if (rN != 0)
+               break; /* rN must be zero */
+            ok = mk_shifter_operand(
+                    INSN(25,25), INSN(11,0), 
+                    &shop, bitS ? &shco : NULL, dis_buf
+                 );
+            if (!ok)
+               break;
+            res = newTemp(Ity_I32);
+            assign( res, isMVN ? unop(Iop_Not32, mkexpr(shop))
+                               : mkexpr(shop) );
+            if (bitS) {
+               vassert(shco != IRTemp_INVALID);
+               oldV = newTemp(Ity_I32);
+               assign( oldV, mk_armg_calculate_flag_v() );
+            } else {
+               vassert(shco == IRTemp_INVALID);
+            }
+            /* According to the Cortex A8 TRM Sec. 5.2.1, MOV PC, r14 is a
+                return for purposes of branch prediction. */
+            if (!isMVN && INSN(11,0) == 14) {
+              jk = Ijk_Ret;
+            }
+            // can't safely read guest state after here
+            putIRegA( rD, mkexpr(res), condT, jk );
+            /* Update the flags thunk if necessary */
+            if (bitS) {
+               setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, 
+                                  res, shco, oldV, condT );
+            }
+            DIP("%s%s%s r%u, %s\n",
+                isMVN ? "mvn" : "mov",
+                nCC(INSN_COND), bitS ? "s" : "", rD, dis_buf );
+            goto decode_success;
+         }
+
+         /* --------- CMP --------- */
+         case BITS4(1,0,1,0):   /* CMP:  (void) Rn - shifter_operand */
+         case BITS4(1,0,1,1): { /* CMN:  (void) Rn + shifter_operand */
+            Bool isCMN = INSN(24,21) == BITS4(1,0,1,1);
+            if (rD != 0)
+               break; /* rD must be zero */
+            if (bitS == 0)
+               break; /* if S (bit 20) is not set, it's not CMP/CMN */
+            rNt = newTemp(Ity_I32);
+            assign(rNt, getIRegA(rN));
+            ok = mk_shifter_operand(
+                    INSN(25,25), INSN(11,0), 
+                    &shop, NULL, dis_buf
+                 );
+            if (!ok)
+               break;
+            // can't safely read guest state after here
+            /* Update the flags thunk. */
+            setFlags_D1_D2( isCMN ? ARMG_CC_OP_ADD : ARMG_CC_OP_SUB,
+                            rNt, shop, condT );
+            DIP("%s%s r%u, %s\n",
+                isCMN ? "cmn" : "cmp",
+                nCC(INSN_COND), rN, dis_buf );
+            goto decode_success;
+         }
+
+         /* --------- TST --------- */
+         case BITS4(1,0,0,0):   /* TST:  (void) Rn & shifter_operand */
+         case BITS4(1,0,0,1): { /* TEQ:  (void) Rn ^ shifter_operand */
+            Bool isTEQ = INSN(24,21) == BITS4(1,0,0,1);
+            if (rD != 0)
+               break; /* rD must be zero */
+            if (bitS == 0)
+               break; /* if S (bit 20) is not set, it's not TST/TEQ */
+            rNt = newTemp(Ity_I32);
+            assign(rNt, getIRegA(rN));
+            ok = mk_shifter_operand(
+                    INSN(25,25), INSN(11,0), 
+                    &shop, &shco, dis_buf
+                 );
+            if (!ok)
+               break;
+            /* Update the flags thunk. */
+            res = newTemp(Ity_I32);
+            assign( res, binop(isTEQ ? Iop_Xor32 : Iop_And32, 
+                               mkexpr(rNt), mkexpr(shop)) );
+            oldV = newTemp(Ity_I32);
+            assign( oldV, mk_armg_calculate_flag_v() );
+            // can't safely read guest state after here
+            setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC,
+                               res, shco, oldV, condT );
+            DIP("%s%s r%u, %s\n",
+                isTEQ ? "teq" : "tst",
+                nCC(INSN_COND), rN, dis_buf );
+            goto decode_success;
+         }
+
+         /* --------- ADC, SBC, RSC --------- */
+         case BITS4(0,1,0,1): /* ADC:  Rd = Rn + shifter_operand + oldC */
+            name = "adc"; goto rd_eq_rn_op_SO_op_oldC;
+         case BITS4(0,1,1,0): /* SBC:  Rd = Rn - shifter_operand - (oldC ^ 1) */
+            name = "sbc"; goto rd_eq_rn_op_SO_op_oldC;
+         case BITS4(0,1,1,1): /* RSC:  Rd = shifter_operand - Rn - (oldC ^ 1) */
+            name = "rsc"; goto rd_eq_rn_op_SO_op_oldC;
+         rd_eq_rn_op_SO_op_oldC: {
+            // FIXME: shco isn't used for anything.  Get rid of it.
+            rNt = newTemp(Ity_I32);
+            assign(rNt, getIRegA(rN));
+            ok = mk_shifter_operand(
+                    INSN(25,25), INSN(11,0), 
+                    &shop, bitS ? &shco : NULL, dis_buf
+                 );
+            if (!ok)
+               break;
+            oldC = newTemp(Ity_I32);
+            assign( oldC, mk_armg_calculate_flag_c() );
+            res = newTemp(Ity_I32);
+            // compute the main result
+            switch (INSN(24,21)) {
+               case BITS4(0,1,0,1): /* ADC */
+                  assign(res,
+                         binop(Iop_Add32,
+                               binop(Iop_Add32, mkexpr(rNt), mkexpr(shop)),
+                               mkexpr(oldC) ));
+                  break;
+               case BITS4(0,1,1,0): /* SBC */
+                  assign(res,
+                         binop(Iop_Sub32,
+                               binop(Iop_Sub32, mkexpr(rNt), mkexpr(shop)),
+                               binop(Iop_Xor32, mkexpr(oldC), mkU32(1)) ));
+                  break;
+               case BITS4(0,1,1,1): /* RSC */
+                  assign(res,
+                         binop(Iop_Sub32,
+                               binop(Iop_Sub32, mkexpr(shop), mkexpr(rNt)),
+                               binop(Iop_Xor32, mkexpr(oldC), mkU32(1)) ));
+                  break;
+               default:
+                  vassert(0);
+            }
+            // but don't commit it until after we've finished
+            // all necessary reads from the guest state
+            // now safe to put the main result
+            putIRegA( rD, mkexpr(res), condT, Ijk_Boring );
+            // XXXX!! not safe to read any guest state after
+            // this point (I think the code below doesn't do that).
+            if (!bitS)
+               vassert(shco == IRTemp_INVALID);
+            /* Update the flags thunk if necessary */
+            if (bitS) {
+               vassert(shco != IRTemp_INVALID);
+               switch (INSN(24,21)) {
+                  case BITS4(0,1,0,1): /* ADC */
+                     setFlags_D1_D2_ND( ARMG_CC_OP_ADC,
+                                        rNt, shop, oldC, condT );
+                     break;
+                  case BITS4(0,1,1,0): /* SBC */
+                     setFlags_D1_D2_ND( ARMG_CC_OP_SBB,
+                                        rNt, shop, oldC, condT );
+                     break;
+                  case BITS4(0,1,1,1): /* RSC */
+                     setFlags_D1_D2_ND( ARMG_CC_OP_SBB,
+                                        shop, rNt, oldC, condT );
+                     break;
+                  default:
+                     vassert(0);
+               }
+            }
+            DIP("%s%s%s r%u, r%u, %s\n",
+                name, nCC(INSN_COND), bitS ? "s" : "", rD, rN, dis_buf );
+            goto decode_success;
+         }
+
+         default:
+            vassert(0);
+      }
+   } /* if (0 == (INSN(27,20) & BITS8(1,1,0,0,0,0,0,0)) */
+
+   /* --------------------- Load/store (ubyte & word) -------- */
+   // LDR STR LDRB STRB
+   /*                 31   27   23   19 15 11    6   4 3  # highest bit
+                        28   24   20 16 12
+      A5-20   1 | 16  cond 0101 UB0L Rn Rd imm12
+      A5-22   1 | 32  cond 0111 UBOL Rn Rd imm5  sh2 0 Rm
+      A5-24   2 | 16  cond 0101 UB1L Rn Rd imm12
+      A5-26   2 | 32  cond 0111 UB1L Rn Rd imm5  sh2 0 Rm
+      A5-28   3 | 16  cond 0100 UB0L Rn Rd imm12
+      A5-32   3 | 32  cond 0110 UB0L Rn Rd imm5  sh2 0 Rm
+   */
+   /* case coding:
+             1   at-ea               (access at ea)
+             2   at-ea-then-upd      (access at ea, then Rn = ea)
+             3   at-Rn-then-upd      (access at Rn, then Rn = ea)
+      ea coding
+             16  Rn +/- imm12
+             32  Rn +/- Rm sh2 imm5
+   */
+   /* Quickly skip over all of this for hopefully most instructions */
+   if ((INSN(27,24) & BITS4(1,1,0,0)) != BITS4(0,1,0,0))
+      goto after_load_store_ubyte_or_word;
+
+   summary = 0;
+   
+   /**/ if (INSN(27,24) == BITS4(0,1,0,1) && INSN(21,21) == 0) {
+      summary = 1 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,1,1,1) && INSN(21,21) == 0
+                                          && INSN(4,4) == 0) {
+      summary = 1 | 32;
+   }
+   else if (INSN(27,24) == BITS4(0,1,0,1) && INSN(21,21) == 1) {
+      summary = 2 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,1,1,1) && INSN(21,21) == 1
+                                          && INSN(4,4) == 0) {
+      summary = 2 | 32;
+   }
+   else if (INSN(27,24) == BITS4(0,1,0,0) && INSN(21,21) == 0) {
+      summary = 3 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,1,1,0) && INSN(21,21) == 0
+                                          && INSN(4,4) == 0) {
+      summary = 3 | 32;
+   }
+   else goto after_load_store_ubyte_or_word;
+
+   { UInt rN = (insn >> 16) & 0xF; /* 19:16 */
+     UInt rD = (insn >> 12) & 0xF; /* 15:12 */
+     UInt rM = (insn >> 0)  & 0xF; /*  3:0  */
+     UInt bU = (insn >> 23) & 1;      /* 23 */
+     UInt bB = (insn >> 22) & 1;      /* 22 */
+     UInt bL = (insn >> 20) & 1;      /* 20 */
+     UInt imm12 = (insn >> 0) & 0xFFF; /* 11:0 */
+     UInt imm5  = (insn >> 7) & 0x1F;  /* 11:7 */
+     UInt sh2   = (insn >> 5) & 3;     /* 6:5 */
+
+     /* Skip some invalid cases, which would lead to two competing
+        updates to the same register, or which are otherwise
+        disallowed by the spec. */
+     switch (summary) {
+        case 1 | 16:
+           break;
+        case 1 | 32: 
+           if (rM == 15) goto after_load_store_ubyte_or_word;
+           break;
+        case 2 | 16: case 3 | 16:
+           if (rN == 15) goto after_load_store_ubyte_or_word;
+           if (bL == 1 && rN == rD) goto after_load_store_ubyte_or_word;
+           break;
+        case 2 | 32: case 3 | 32:
+           if (rM == 15) goto after_load_store_ubyte_or_word;
+           if (rN == 15) goto after_load_store_ubyte_or_word;
+           if (rN == rM) goto after_load_store_ubyte_or_word;
+           if (bL == 1 && rN == rD) goto after_load_store_ubyte_or_word;
+           break;
+        default:
+           vassert(0);
+     }
+
+     /* compute the effective address.  Bind it to a tmp since we
+        may need to use it twice. */
+     IRExpr* eaE = NULL;
+     switch (summary & 0xF0) {
+        case 16:
+           eaE = mk_EA_reg_plusminus_imm12( rN, bU, imm12, dis_buf );
+           break;
+        case 32:
+           eaE = mk_EA_reg_plusminus_shifted_reg( rN, bU, rM, sh2, imm5,
+                                                  dis_buf );
+           break;
+     }
+     vassert(eaE);
+     IRTemp eaT = newTemp(Ity_I32);
+     assign(eaT, eaE);
+
+     /* get the old Rn value */
+     IRTemp rnT = newTemp(Ity_I32);
+     assign(rnT, getIRegA(rN));
+
+     /* decide on the transfer address */
+     IRTemp taT = IRTemp_INVALID;
+     switch (summary & 0x0F) {
+        case 1: case 2: taT = eaT; break;
+        case 3:         taT = rnT; break;
+     }
+     vassert(taT != IRTemp_INVALID);
+
+     if (bL == 0) {
+       /* Store.  If necessary, update the base register before the
+          store itself, so that the common idiom of "str rX, [sp,
+          #-4]!" (store rX at sp-4, then do new sp = sp-4, a.k.a "push
+          rX") doesn't cause Memcheck to complain that the access is
+          below the stack pointer.  Also, not updating sp before the
+          store confuses Valgrind's dynamic stack-extending logic.  So
+          do it before the store.  Hence we need to snarf the store
+          data before doing the basereg update. */
+
+        /* get hold of the data to be stored */
+        IRTemp rDt = newTemp(Ity_I32);
+        assign(rDt, getIRegA(rD));
+
+        /* Update Rn if necessary. */
+        switch (summary & 0x0F) {
+           case 2: case 3:
+              putIRegA( rN, mkexpr(eaT), condT, Ijk_Boring );
+              break;
+        }
+
+        /* generate the transfer */
+        if (bB == 0) { // word store
+           storeGuardedLE( mkexpr(taT), mkexpr(rDt), condT );
+        } else { // byte store
+           vassert(bB == 1);
+           storeGuardedLE( mkexpr(taT), unop(Iop_32to8, mkexpr(rDt)), condT );
+        }
+
+     } else {
+        /* Load */
+        vassert(bL == 1);
+
+        /* generate the transfer */
+        if (bB == 0) { // word load
+           IRTemp jk = Ijk_Boring;
+           /* According to the Cortex A8 TRM Sec. 5.2.1, LDR(1) with r13 as the
+               base register and PC as the destination register is a return for
+               purposes of branch prediction.
+              The ARM ARM Sec. C9.10.1 further specifies that it must use a
+               post-increment by immediate addressing mode to be counted in
+               event 0x0E (Procedure return).*/
+           if (rN == 13 && summary == (3 | 16) && bB == 0) {
+              jk = Ijk_Ret;
+           }
+           IRTemp tD = newTemp(Ity_I32);
+           loadGuardedLE( tD, ILGop_Ident32,
+                          mkexpr(taT), llGetIReg(rD), condT );
+           /* "rD == 15 ? condT : IRTemp_INVALID": simply
+              IRTemp_INVALID would be correct in all cases here, and
+              for the non-r15 case it generates better code, by
+              avoiding two tests of the cond (since it is already
+              tested by loadGuardedLE).  However, the logic at the end
+              of this function, that deals with writes to r15, has an
+              optimisation which depends on seeing whether or not the
+              write is conditional.  Hence in this particular case we
+              let it "see" the guard condition. */
+           putIRegA( rD, mkexpr(tD),
+                     rD == 15 ? condT : IRTemp_INVALID, jk );
+        } else { // byte load
+           vassert(bB == 1);
+           IRTemp tD = newTemp(Ity_I32);
+           loadGuardedLE( tD, ILGop_8Uto32, mkexpr(taT), llGetIReg(rD), condT );
+           /* No point in similar 3rd arg complexity here, since we
+              can't sanely write anything to r15 like this. */
+           putIRegA( rD, mkexpr(tD), IRTemp_INVALID, Ijk_Boring );
+        }
+
+        /* Update Rn if necessary. */
+        switch (summary & 0x0F) {
+           case 2: case 3:
+              // should be assured by logic above:
+              if (bL == 1)
+                 vassert(rD != rN); /* since we just wrote rD */
+              putIRegA( rN, mkexpr(eaT), condT, Ijk_Boring );
+              break;
+        }
+     }
+ 
+     switch (summary & 0x0F) {
+        case 1:  DIP("%sr%s%s r%u, %s\n",
+                     bL == 0 ? "st" : "ld",
+                     bB == 0 ? "" : "b", nCC(INSN_COND), rD, dis_buf);
+                 break;
+        case 2:  DIP("%sr%s%s r%u, %s! (at-EA-then-Rn=EA)\n",
+                     bL == 0 ? "st" : "ld",
+                     bB == 0 ? "" : "b", nCC(INSN_COND), rD, dis_buf);
+                 break;
+        case 3:  DIP("%sr%s%s r%u, %s! (at-Rn-then-Rn=EA)\n",
+                     bL == 0 ? "st" : "ld",
+                     bB == 0 ? "" : "b", nCC(INSN_COND), rD, dis_buf);
+                 break;
+        default: vassert(0);
+     }
+
+     /* XXX deal with alignment constraints */
+
+     goto decode_success;
+
+     /* Complications:
+
+        For all loads: if the Amode specifies base register
+        writeback, and the same register is specified for Rd and Rn,
+        the results are UNPREDICTABLE.
+
+        For all loads and stores: if R15 is written, branch to
+        that address afterwards.
+
+        STRB: straightforward
+        LDRB: loaded data is zero extended
+        STR:  lowest 2 bits of address are ignored
+        LDR:  if the lowest 2 bits of the address are nonzero
+              then the loaded value is rotated right by 8 * the lowest 2 bits
+     */
+   }
+
+  after_load_store_ubyte_or_word:
+
+   /* --------------------- Load/store (sbyte & hword) -------- */
+   // LDRH LDRSH STRH LDRSB
+   /*                 31   27   23   19 15 11   7    3     # highest bit
+                        28   24   20 16 12    8    4    0
+      A5-36   1 | 16  cond 0001 U10L Rn Rd im4h 1SH1 im4l
+      A5-38   1 | 32  cond 0001 U00L Rn Rd 0000 1SH1 Rm
+      A5-40   2 | 16  cond 0001 U11L Rn Rd im4h 1SH1 im4l
+      A5-42   2 | 32  cond 0001 U01L Rn Rd 0000 1SH1 Rm
+      A5-44   3 | 16  cond 0000 U10L Rn Rd im4h 1SH1 im4l
+      A5-46   3 | 32  cond 0000 U00L Rn Rd 0000 1SH1 Rm
+   */
+   /* case coding:
+             1   at-ea               (access at ea)
+             2   at-ea-then-upd      (access at ea, then Rn = ea)
+             3   at-Rn-then-upd      (access at Rn, then Rn = ea)
+      ea coding
+             16  Rn +/- imm8
+             32  Rn +/- Rm
+   */
+   /* Quickly skip over all of this for hopefully most instructions */
+   if ((INSN(27,24) & BITS4(1,1,1,0)) != BITS4(0,0,0,0))
+      goto after_load_store_sbyte_or_hword;
+
+   /* Check the "1SH1" thing. */
+   if ((INSN(7,4) & BITS4(1,0,0,1)) != BITS4(1,0,0,1))
+      goto after_load_store_sbyte_or_hword;
+
+   summary = 0;
+
+   /**/ if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,21) == BITS2(1,0)) {
+      summary = 1 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,21) == BITS2(0,0)) {
+      summary = 1 | 32;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,21) == BITS2(1,1)) {
+      summary = 2 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,21) == BITS2(0,1)) {
+      summary = 2 | 32;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,0) && INSN(22,21) == BITS2(1,0)) {
+      summary = 3 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,0) && INSN(22,21) == BITS2(0,0)) {
+      summary = 3 | 32;
+   }
+   else goto after_load_store_sbyte_or_hword;
+
+   { UInt rN   = (insn >> 16) & 0xF; /* 19:16 */
+     UInt rD   = (insn >> 12) & 0xF; /* 15:12 */
+     UInt rM   = (insn >> 0)  & 0xF; /*  3:0  */
+     UInt bU   = (insn >> 23) & 1;   /* 23 U=1 offset+, U=0 offset- */
+     UInt bL   = (insn >> 20) & 1;   /* 20 L=1 load, L=0 store */
+     UInt bH   = (insn >> 5) & 1;    /* H=1 halfword, H=0 byte */
+     UInt bS   = (insn >> 6) & 1;    /* S=1 signed, S=0 unsigned */
+     UInt imm8 = ((insn >> 4) & 0xF0) | (insn & 0xF); /* 11:8, 3:0 */
+
+     /* Skip combinations that are either meaningless or already
+        handled by main word-or-unsigned-byte load-store
+        instructions. */
+     if (bS == 0 && bH == 0) /* "unsigned byte" */
+        goto after_load_store_sbyte_or_hword;
+     if (bS == 1 && bL == 0) /* "signed store" */
+        goto after_load_store_sbyte_or_hword;
+
+     /* Require 11:8 == 0 for Rn +/- Rm cases */
+     if ((summary & 32) != 0 && (imm8 & 0xF0) != 0)
+        goto after_load_store_sbyte_or_hword;
+
+     /* Skip some invalid cases, which would lead to two competing
+        updates to the same register, or which are otherwise
+        disallowed by the spec. */
+     switch (summary) {
+        case 1 | 16:
+           break;
+        case 1 | 32: 
+           if (rM == 15) goto after_load_store_sbyte_or_hword;
+           break;
+        case 2 | 16: case 3 | 16:
+           if (rN == 15) goto after_load_store_sbyte_or_hword;
+           if (bL == 1 && rN == rD) goto after_load_store_sbyte_or_hword;
+           break;
+        case 2 | 32: case 3 | 32:
+           if (rM == 15) goto after_load_store_sbyte_or_hword;
+           if (rN == 15) goto after_load_store_sbyte_or_hword;
+           if (rN == rM) goto after_load_store_sbyte_or_hword;
+           if (bL == 1 && rN == rD) goto after_load_store_sbyte_or_hword;
+           break;
+        default:
+           vassert(0);
+     }
+
+     /* If this is a branch, make it unconditional at this point.
+        Doing conditional branches in-line is too complex (for now).
+        Note that you'd have to be insane to use any of these loads to
+        do a branch, since they only load 16 bits at most, but we
+        handle it just in case. */
+     if (bL == 1 && rD == 15 && condT != IRTemp_INVALID) {
+        // go uncond
+        mk_skip_over_A32_if_cond_is_false( condT );
+        condT = IRTemp_INVALID;
+        // now uncond
+     }
+
+     /* compute the effective address.  Bind it to a tmp since we
+        may need to use it twice. */
+     IRExpr* eaE = NULL;
+     switch (summary & 0xF0) {
+        case 16:
+           eaE = mk_EA_reg_plusminus_imm8( rN, bU, imm8, dis_buf );
+           break;
+        case 32:
+           eaE = mk_EA_reg_plusminus_reg( rN, bU, rM, dis_buf );
+           break;
+     }
+     vassert(eaE);
+     IRTemp eaT = newTemp(Ity_I32);
+     assign(eaT, eaE);
+
+     /* get the old Rn value */
+     IRTemp rnT = newTemp(Ity_I32);
+     assign(rnT, getIRegA(rN));
+
+     /* decide on the transfer address */
+     IRTemp taT = IRTemp_INVALID;
+     switch (summary & 0x0F) {
+        case 1: case 2: taT = eaT; break;
+        case 3:         taT = rnT; break;
+     }
+     vassert(taT != IRTemp_INVALID);
+
+     /* ll previous value of rD, for dealing with conditional loads */
+     IRTemp llOldRd = newTemp(Ity_I32);
+     assign(llOldRd, llGetIReg(rD));
+
+     /* halfword store  H 1  L 0  S 0
+        uhalf load      H 1  L 1  S 0
+        shalf load      H 1  L 1  S 1
+        sbyte load      H 0  L 1  S 1
+     */
+     const HChar* name = NULL;
+     /* generate the transfer */
+     /**/ if (bH == 1 && bL == 0 && bS == 0) { // halfword store
+        storeGuardedLE( mkexpr(taT),
+                        unop(Iop_32to16, getIRegA(rD)), condT );
+        name = "strh";
+     }
+     else if (bH == 1 && bL == 1 && bS == 0) { // uhalf load
+        IRTemp newRd = newTemp(Ity_I32);
+        loadGuardedLE( newRd, ILGop_16Uto32, 
+                       mkexpr(taT), mkexpr(llOldRd), condT );
+        putIRegA( rD, mkexpr(newRd), IRTemp_INVALID, Ijk_Boring );
+        name = "ldrh";
+     }
+     else if (bH == 1 && bL == 1 && bS == 1) { // shalf load
+        IRTemp newRd = newTemp(Ity_I32);
+        loadGuardedLE( newRd, ILGop_16Sto32, 
+                       mkexpr(taT), mkexpr(llOldRd), condT );
+        putIRegA( rD, mkexpr(newRd), IRTemp_INVALID, Ijk_Boring );
+        name = "ldrsh";
+     }
+     else if (bH == 0 && bL == 1 && bS == 1) { // sbyte load
+        IRTemp newRd = newTemp(Ity_I32);
+        loadGuardedLE( newRd, ILGop_8Sto32, 
+                       mkexpr(taT), mkexpr(llOldRd), condT );
+        putIRegA( rD, mkexpr(newRd), IRTemp_INVALID, Ijk_Boring );
+        name = "ldrsb";
+     }
+     else
+        vassert(0); // should be assured by logic above
+
+     /* Update Rn if necessary. */
+     switch (summary & 0x0F) {
+        case 2: case 3:
+           // should be assured by logic above:
+           if (bL == 1)
+              vassert(rD != rN); /* since we just wrote rD */
+           putIRegA( rN, mkexpr(eaT), condT, Ijk_Boring );
+           break;
+     }
+
+     switch (summary & 0x0F) {
+        case 1:  DIP("%s%s r%u, %s\n", name, nCC(INSN_COND), rD, dis_buf);
+                 break;
+        case 2:  DIP("%s%s r%u, %s! (at-EA-then-Rn=EA)\n",
+                     name, nCC(INSN_COND), rD, dis_buf);
+                 break;
+        case 3:  DIP("%s%s r%u, %s! (at-Rn-then-Rn=EA)\n",
+                     name, nCC(INSN_COND), rD, dis_buf);
+                 break;
+        default: vassert(0);
+     }
+
+     /* XXX deal with alignment constraints */
+
+     goto decode_success;
+
+     /* Complications:
+
+        For all loads: if the Amode specifies base register
+        writeback, and the same register is specified for Rd and Rn,
+        the results are UNPREDICTABLE.
+
+        For all loads and stores: if R15 is written, branch to
+        that address afterwards.
+
+        Misaligned halfword stores => Unpredictable
+        Misaligned halfword loads  => Unpredictable
+     */
+   }
+
+  after_load_store_sbyte_or_hword:
+
+   /* --------------------- Load/store multiple -------------- */
+   // LD/STMIA LD/STMIB LD/STMDA LD/STMDB
+   // Remarkably complex and difficult to get right
+   // match 27:20 as 100XX0WL
+   if (BITS8(1,0,0,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,0,0,1,0,0))) {
+      // A5-50 LD/STMIA  cond 1000 10WL Rn RegList
+      // A5-51 LD/STMIB  cond 1001 10WL Rn RegList
+      // A5-53 LD/STMDA  cond 1000 00WL Rn RegList
+      // A5-53 LD/STMDB  cond 1001 00WL Rn RegList
+      //                   28   24   20 16       0
+
+      UInt bINC    = (insn >> 23) & 1;
+      UInt bBEFORE = (insn >> 24) & 1;
+
+      UInt bL      = (insn >> 20) & 1;  /* load=1, store=0 */
+      UInt bW      = (insn >> 21) & 1;  /* Rn wback=1, no wback=0 */
+      UInt rN      = (insn >> 16) & 0xF;
+      UInt regList = insn & 0xFFFF;
+      /* Skip some invalid cases, which would lead to two competing
+         updates to the same register, or which are otherwise
+         disallowed by the spec.  Note the test above has required
+         that S == 0, since that looks like a kernel-mode only thing.
+         Done by forcing the real pattern, viz 100XXSWL to actually be
+         100XX0WL. */
+      if (rN == 15) goto after_load_store_multiple;
+      // reglist can't be empty
+      if (regList == 0) goto after_load_store_multiple;
+      // if requested to writeback Rn, and this is a load instruction,
+      // then Rn can't appear in RegList, since we'd have two competing
+      // new values for Rn.  We do however accept this case for store
+      // instructions.
+      if (bW == 1 && bL == 1 && ((1 << rN) & regList) > 0)
+         goto after_load_store_multiple;
+
+      /* Now, we can't do a conditional load or store, since that very
+         likely will generate an exception.  So we have to take a side
+         exit at this point if the condition is false. */
+      if (condT != IRTemp_INVALID) {
+         mk_skip_over_A32_if_cond_is_false( condT );
+         condT = IRTemp_INVALID;
+      }
+
+      /* Ok, now we're unconditional.  Generate the IR. */
+      mk_ldm_stm( True/*arm*/, rN, bINC, bBEFORE, bW, bL, regList );
+
+      DIP("%sm%c%c%s r%u%s, {0x%04x}\n",
+          bL == 1 ? "ld" : "st", bINC ? 'i' : 'd', bBEFORE ? 'b' : 'a',
+          nCC(INSN_COND),
+          rN, bW ? "!" : "", regList);
+
+      goto decode_success;
+   }
+
+  after_load_store_multiple:
+
+   /* --------------------- Control flow --------------------- */
+   // B, BL (Branch, or Branch-and-Link, to immediate offset)
+   //
+   if (BITS8(1,0,1,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,0,0,0,0,0))) {
+      UInt link   = (insn >> 24) & 1;
+      UInt uimm24 = insn & ((1<<24)-1);
+      Int  simm24 = (Int)uimm24;
+      UInt dst    = guest_R15_curr_instr_notENC + 8
+                    + (((simm24 << 8) >> 8) << 2);
+      IRJumpKind jk = link ? Ijk_Call : Ijk_Boring;
+      if (link) {
+         putIRegA(14, mkU32(guest_R15_curr_instr_notENC + 4),
+                      condT, Ijk_Boring);
+      }
+      if (condT == IRTemp_INVALID) {
+         /* unconditional transfer to 'dst'.  See if we can simply
+            continue tracing at the destination. */
+         if (resteerOkFn( callback_opaque, dst )) {
+            /* yes */
+            dres.whatNext   = Dis_ResteerU;
+            dres.continueAt = dst;
+         } else {
+            /* no; terminate the SB at this point. */
+            llPutIReg(15, mkU32(dst));
+            dres.jk_StopHere = jk;
+            dres.whatNext    = Dis_StopHere;
+         }
+         DIP("b%s 0x%x\n", link ? "l" : "", dst);
+      } else {
+         /* conditional transfer to 'dst' */
+         const HChar* comment = "";
+
+         /* First see if we can do some speculative chasing into one
+            arm or the other.  Be conservative and only chase if
+            !link, that is, this is a normal conditional branch to a
+            known destination. */
+         if (!link
+             && resteerCisOk
+             && vex_control.guest_chase_cond
+             && dst < guest_R15_curr_instr_notENC
+             && resteerOkFn( callback_opaque, dst) ) {
+            /* Speculation: assume this backward branch is taken.  So
+               we need to emit a side-exit to the insn following this
+               one, on the negation of the condition, and continue at
+               the branch target address (dst). */
+            stmt( IRStmt_Exit( unop(Iop_Not1,
+                                    unop(Iop_32to1, mkexpr(condT))),
+                               Ijk_Boring,
+                               IRConst_U32(guest_R15_curr_instr_notENC+4),
+                               OFFB_R15T ));
+            dres.whatNext   = Dis_ResteerC;
+            dres.continueAt = (Addr32)dst;
+            comment = "(assumed taken)";
+         }
+         else
+         if (!link
+             && resteerCisOk
+             && vex_control.guest_chase_cond
+             && dst >= guest_R15_curr_instr_notENC
+             && resteerOkFn( callback_opaque, 
+                             guest_R15_curr_instr_notENC+4) ) {
+            /* Speculation: assume this forward branch is not taken.
+               So we need to emit a side-exit to dst (the dest) and
+               continue disassembling at the insn immediately
+               following this one. */
+            stmt( IRStmt_Exit( unop(Iop_32to1, mkexpr(condT)),
+                               Ijk_Boring,
+                               IRConst_U32(dst),
+                               OFFB_R15T ));
+            dres.whatNext   = Dis_ResteerC;
+            dres.continueAt = guest_R15_curr_instr_notENC+4;
+            comment = "(assumed not taken)";
+         }
+         else {
+            /* Conservative default translation - end the block at
+               this point. */
+            stmt( IRStmt_Exit( unop(Iop_32to1, mkexpr(condT)),
+                               jk, IRConst_U32(dst), OFFB_R15T ));
+            llPutIReg(15, mkU32(guest_R15_curr_instr_notENC + 4));
+            dres.jk_StopHere = Ijk_Boring;
+            dres.whatNext    = Dis_StopHere;
+         }
+         DIP("b%s%s 0x%x %s\n", link ? "l" : "", nCC(INSN_COND),
+             dst, comment);
+      }
+      goto decode_success;
+   }
+
+   // B, BL (Branch, or Branch-and-Link, to a register)
+   // NB: interworking branch
+   if (INSN(27,20) == BITS8(0,0,0,1,0,0,1,0)
+       && INSN(19,12) == BITS8(1,1,1,1,1,1,1,1)
+       && (INSN(11,4) == BITS8(1,1,1,1,0,0,1,1)
+           || INSN(11,4) == BITS8(1,1,1,1,0,0,0,1))) {
+      IRTemp  dst = newTemp(Ity_I32);
+      UInt    link = (INSN(11,4) >> 1) & 1;
+      UInt    rM   = INSN(3,0);
+      // we don't decode the case (link && rM == 15), as that's
+      // Unpredictable.
+      if (!(link && rM == 15)) {
+         if (condT != IRTemp_INVALID) {
+            mk_skip_over_A32_if_cond_is_false( condT );
+         }
+         // rM contains an interworking address exactly as we require
+         // (with continuation CPSR.T in bit 0), so we can use it
+         // as-is, with no masking.
+         assign( dst, getIRegA(rM) );
+         if (link) {
+            putIRegA( 14, mkU32(guest_R15_curr_instr_notENC + 4),
+                      IRTemp_INVALID/*because AL*/, Ijk_Boring );
+         }
+         llPutIReg(15, mkexpr(dst));
+         dres.jk_StopHere = link ? Ijk_Call
+                                 : (rM == 14 ? Ijk_Ret : Ijk_Boring);
+         dres.whatNext    = Dis_StopHere;
+         if (condT == IRTemp_INVALID) {
+            DIP("b%sx r%u\n", link ? "l" : "", rM);
+         } else {
+            DIP("b%sx%s r%u\n", link ? "l" : "", nCC(INSN_COND), rM);
+         }
+         goto decode_success;
+      }
+      /* else: (link && rM == 15): just fall through */
+   }
+
+   /* --- NB: ARM interworking branches are in NV space, hence
+      are handled elsewhere by decode_NV_instruction.
+      ---
+   */
+
+   /* --------------------- Clz --------------------- */
+   // CLZ
+   if (INSN(27,20) == BITS8(0,0,0,1,0,1,1,0)
+       && INSN(19,16) == BITS4(1,1,1,1)
+       && INSN(11,4) == BITS8(1,1,1,1,0,0,0,1)) {
+      UInt rD = INSN(15,12);
+      UInt rM = INSN(3,0);
+      IRTemp arg = newTemp(Ity_I32);
+      IRTemp res = newTemp(Ity_I32);
+      assign(arg, getIRegA(rM));
+      assign(res, IRExpr_ITE(
+                     binop(Iop_CmpEQ32, mkexpr(arg), mkU32(0)),
+                     mkU32(32),
+                     unop(Iop_Clz32, mkexpr(arg))
+            ));
+      putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+      DIP("clz%s r%u, r%u\n", nCC(INSN_COND), rD, rM);
+      goto decode_success;
+   }
+
+   /* --------------------- Mul etc --------------------- */
+   // MUL
+   if (BITS8(0,0,0,0,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,1,1,1,0))
+       && INSN(15,12) == BITS4(0,0,0,0)
+       && INSN(7,4) == BITS4(1,0,0,1)) {
+      UInt bitS = (insn >> 20) & 1; /* 20:20 */
+      UInt rD = INSN(19,16);
+      UInt rS = INSN(11,8);
+      UInt rM = INSN(3,0);
+      if (rD == 15 || rM == 15 || rS == 15) {
+         /* Unpredictable; don't decode; fall through */
+      } else {
+         IRTemp argL = newTemp(Ity_I32);
+         IRTemp argR = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         IRTemp oldC = IRTemp_INVALID;
+         IRTemp oldV = IRTemp_INVALID;
+         assign( argL, getIRegA(rM));
+         assign( argR, getIRegA(rS));
+         assign( res, binop(Iop_Mul32, mkexpr(argL), mkexpr(argR)) );
+         if (bitS) {
+            oldC = newTemp(Ity_I32);
+            assign(oldC, mk_armg_calculate_flag_c());
+            oldV = newTemp(Ity_I32);
+            assign(oldV, mk_armg_calculate_flag_v());
+         }
+         // now update guest state
+         putIRegA( rD, mkexpr(res), condT, Ijk_Boring );
+         if (bitS) {
+            IRTemp pair = newTemp(Ity_I32);
+            assign( pair, binop(Iop_Or32,
+                                binop(Iop_Shl32, mkexpr(oldC), mkU8(1)),
+                                mkexpr(oldV)) );
+            setFlags_D1_ND( ARMG_CC_OP_MUL, res, pair, condT );
+         }
+         DIP("mul%c%s r%u, r%u, r%u\n",
+             bitS ? 's' : ' ', nCC(INSN_COND), rD, rM, rS);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* --------------------- Integer Divides --------------------- */
+   // SDIV
+   if (BITS8(0,1,1,1,0,0,0,1) == INSN(27,20)
+       && INSN(15,12) == BITS4(1,1,1,1)
+       && INSN(7,4) == BITS4(0,0,0,1)) {
+      UInt rD = INSN(19,16);
+      UInt rM = INSN(11,8);
+      UInt rN = INSN(3,0);
+      if (rD == 15 || rM == 15 || rN == 15) {
+         /* Unpredictable; don't decode; fall through */
+      } else {
+         IRTemp res  = newTemp(Ity_I32);
+         IRTemp argL = newTemp(Ity_I32);
+         IRTemp argR = newTemp(Ity_I32);
+         assign(argL, getIRegA(rN));
+         assign(argR, getIRegA(rM));
+         assign(res, binop(Iop_DivS32, mkexpr(argL), mkexpr(argR)));
+         putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+         DIP("sdiv r%u, r%u, r%u\n", rD, rN, rM);
+         goto decode_success;
+      }
+    }
+
+   // UDIV
+   if (BITS8(0,1,1,1,0,0,1,1) == INSN(27,20)
+       && INSN(15,12) == BITS4(1,1,1,1)
+       && INSN(7,4) == BITS4(0,0,0,1)) {
+      UInt rD = INSN(19,16);
+      UInt rM = INSN(11,8);
+      UInt rN = INSN(3,0);
+      if (rD == 15 || rM == 15 || rN == 15) {
+         /* Unpredictable; don't decode; fall through */
+      } else {
+         IRTemp res  = newTemp(Ity_I32);
+         IRTemp argL = newTemp(Ity_I32);
+         IRTemp argR = newTemp(Ity_I32);
+         assign(argL, getIRegA(rN));
+         assign(argR, getIRegA(rM));
+         assign(res, binop(Iop_DivU32, mkexpr(argL), mkexpr(argR)));
+         putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+         DIP("udiv r%u, r%u, r%u\n", rD, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   // MLA, MLS
+   if (BITS8(0,0,0,0,0,0,1,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,0))
+       && INSN(7,4) == BITS4(1,0,0,1)) {
+      UInt bitS  = (insn >> 20) & 1; /* 20:20 */
+      UInt isMLS = (insn >> 22) & 1; /* 22:22 */
+      UInt rD = INSN(19,16);
+      UInt rN = INSN(15,12);
+      UInt rS = INSN(11,8);
+      UInt rM = INSN(3,0);
+      if (bitS == 1 && isMLS == 1) {
+         /* This isn't allowed (MLS that sets flags).  don't decode;
+            fall through */
+      }
+      else
+      if (rD == 15 || rM == 15 || rS == 15 || rN == 15) {
+         /* Unpredictable; don't decode; fall through */
+      } else {
+         IRTemp argL = newTemp(Ity_I32);
+         IRTemp argR = newTemp(Ity_I32);
+         IRTemp argP = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         IRTemp oldC = IRTemp_INVALID;
+         IRTemp oldV = IRTemp_INVALID;
+         assign( argL, getIRegA(rM));
+         assign( argR, getIRegA(rS));
+         assign( argP, getIRegA(rN));
+         assign( res, binop(isMLS ? Iop_Sub32 : Iop_Add32,
+                            mkexpr(argP),
+                            binop(Iop_Mul32, mkexpr(argL), mkexpr(argR)) ));
+         if (bitS) {
+            vassert(!isMLS); // guaranteed above
+            oldC = newTemp(Ity_I32);
+            assign(oldC, mk_armg_calculate_flag_c());
+            oldV = newTemp(Ity_I32);
+            assign(oldV, mk_armg_calculate_flag_v());
+         }
+         // now update guest state
+         putIRegA( rD, mkexpr(res), condT, Ijk_Boring );
+         if (bitS) {
+            IRTemp pair = newTemp(Ity_I32);
+            assign( pair, binop(Iop_Or32,
+                                binop(Iop_Shl32, mkexpr(oldC), mkU8(1)),
+                                mkexpr(oldV)) );
+            setFlags_D1_ND( ARMG_CC_OP_MUL, res, pair, condT );
+         }
+         DIP("ml%c%c%s r%u, r%u, r%u, r%u\n",
+             isMLS ? 's' : 'a', bitS ? 's' : ' ',
+             nCC(INSN_COND), rD, rM, rS, rN);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   // SMULL, UMULL
+   if (BITS8(0,0,0,0,1,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,0))
+       && INSN(7,4) == BITS4(1,0,0,1)) {
+      UInt bitS = (insn >> 20) & 1; /* 20:20 */
+      UInt rDhi = INSN(19,16);
+      UInt rDlo = INSN(15,12);
+      UInt rS   = INSN(11,8);
+      UInt rM   = INSN(3,0);
+      UInt isS  = (INSN(27,20) >> 2) & 1; /* 22:22 */
+      if (rDhi == 15 || rDlo == 15 || rM == 15 || rS == 15 || rDhi == rDlo)  {
+         /* Unpredictable; don't decode; fall through */
+      } else {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp res   = newTemp(Ity_I64);
+         IRTemp resHi = newTemp(Ity_I32);
+         IRTemp resLo = newTemp(Ity_I32);
+         IRTemp oldC  = IRTemp_INVALID;
+         IRTemp oldV  = IRTemp_INVALID;
+         IROp   mulOp = isS ? Iop_MullS32 : Iop_MullU32;
+         assign( argL, getIRegA(rM));
+         assign( argR, getIRegA(rS));
+         assign( res, binop(mulOp, mkexpr(argL), mkexpr(argR)) );
+         assign( resHi, unop(Iop_64HIto32, mkexpr(res)) );
+         assign( resLo, unop(Iop_64to32, mkexpr(res)) );
+         if (bitS) {
+            oldC = newTemp(Ity_I32);
+            assign(oldC, mk_armg_calculate_flag_c());
+            oldV = newTemp(Ity_I32);
+            assign(oldV, mk_armg_calculate_flag_v());
+         }
+         // now update guest state
+         putIRegA( rDhi, mkexpr(resHi), condT, Ijk_Boring );
+         putIRegA( rDlo, mkexpr(resLo), condT, Ijk_Boring );
+         if (bitS) {
+            IRTemp pair = newTemp(Ity_I32);
+            assign( pair, binop(Iop_Or32,
+                                binop(Iop_Shl32, mkexpr(oldC), mkU8(1)),
+                                mkexpr(oldV)) );
+            setFlags_D1_D2_ND( ARMG_CC_OP_MULL, resLo, resHi, pair, condT );
+         }
+         DIP("%cmull%c%s r%u, r%u, r%u, r%u\n",
+             isS ? 's' : 'u', bitS ? 's' : ' ',
+             nCC(INSN_COND), rDlo, rDhi, rM, rS);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   // SMLAL, UMLAL
+   if (BITS8(0,0,0,0,1,0,1,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,0))
+       && INSN(7,4) == BITS4(1,0,0,1)) {
+      UInt bitS = (insn >> 20) & 1; /* 20:20 */
+      UInt rDhi = INSN(19,16);
+      UInt rDlo = INSN(15,12);
+      UInt rS   = INSN(11,8);
+      UInt rM   = INSN(3,0);
+      UInt isS  = (INSN(27,20) >> 2) & 1; /* 22:22 */
+      if (rDhi == 15 || rDlo == 15 || rM == 15 || rS == 15 || rDhi == rDlo)  {
+         /* Unpredictable; don't decode; fall through */
+      } else {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp old   = newTemp(Ity_I64);
+         IRTemp res   = newTemp(Ity_I64);
+         IRTemp resHi = newTemp(Ity_I32);
+         IRTemp resLo = newTemp(Ity_I32);
+         IRTemp oldC  = IRTemp_INVALID;
+         IRTemp oldV  = IRTemp_INVALID;
+         IROp   mulOp = isS ? Iop_MullS32 : Iop_MullU32;
+         assign( argL, getIRegA(rM));
+         assign( argR, getIRegA(rS));
+         assign( old, binop(Iop_32HLto64, getIRegA(rDhi), getIRegA(rDlo)) );
+         assign( res, binop(Iop_Add64,
+                            mkexpr(old),
+                            binop(mulOp, mkexpr(argL), mkexpr(argR))) );
+         assign( resHi, unop(Iop_64HIto32, mkexpr(res)) );
+         assign( resLo, unop(Iop_64to32, mkexpr(res)) );
+         if (bitS) {
+            oldC = newTemp(Ity_I32);
+            assign(oldC, mk_armg_calculate_flag_c());
+            oldV = newTemp(Ity_I32);
+            assign(oldV, mk_armg_calculate_flag_v());
+         }
+         // now update guest state
+         putIRegA( rDhi, mkexpr(resHi), condT, Ijk_Boring );
+         putIRegA( rDlo, mkexpr(resLo), condT, Ijk_Boring );
+         if (bitS) {
+            IRTemp pair = newTemp(Ity_I32);
+            assign( pair, binop(Iop_Or32,
+                                binop(Iop_Shl32, mkexpr(oldC), mkU8(1)),
+                                mkexpr(oldV)) );
+            setFlags_D1_D2_ND( ARMG_CC_OP_MULL, resLo, resHi, pair, condT );
+         }
+         DIP("%cmlal%c%s r%u, r%u, r%u, r%u\n",
+             isS ? 's' : 'u', bitS ? 's' : ' ', nCC(INSN_COND),
+             rDlo, rDhi, rM, rS);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   // UMAAL
+   if (BITS8(0,0,0,0,0,1,0,0) == INSN(27,20) && INSN(7,4) == BITS4(1,0,0,1)) {
+      UInt rDhi = INSN(19,16);
+      UInt rDlo = INSN(15,12);
+      UInt rM   = INSN(11,8);
+      UInt rN   = INSN(3,0);
+      if (rDlo == 15 || rDhi == 15 || rN == 15 || rM == 15 || rDhi == rDlo)  {
+         /* Unpredictable; don't decode; fall through */
+      } else {
+         IRTemp argN   = newTemp(Ity_I32);
+         IRTemp argM   = newTemp(Ity_I32);
+         IRTemp argDhi = newTemp(Ity_I32);
+         IRTemp argDlo = newTemp(Ity_I32);
+         IRTemp res    = newTemp(Ity_I64);
+         IRTemp resHi  = newTemp(Ity_I32);
+         IRTemp resLo  = newTemp(Ity_I32);
+         assign( argN,   getIRegA(rN) );
+         assign( argM,   getIRegA(rM) );
+         assign( argDhi, getIRegA(rDhi) );
+         assign( argDlo, getIRegA(rDlo) );
+         assign( res, 
+                 binop(Iop_Add64,
+                       binop(Iop_Add64,
+                             binop(Iop_MullU32, mkexpr(argN), mkexpr(argM)),
+                             unop(Iop_32Uto64, mkexpr(argDhi))),
+                       unop(Iop_32Uto64, mkexpr(argDlo))) );
+         assign( resHi, unop(Iop_64HIto32, mkexpr(res)) );
+         assign( resLo, unop(Iop_64to32, mkexpr(res)) );
+         // now update guest state
+         putIRegA( rDhi, mkexpr(resHi), condT, Ijk_Boring );
+         putIRegA( rDlo, mkexpr(resLo), condT, Ijk_Boring );
+         DIP("umaal %s r%u, r%u, r%u, r%u\n",
+             nCC(INSN_COND), rDlo, rDhi, rN, rM);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* --------------------- Msr etc --------------------- */
+
+   // MSR apsr, #imm
+   if (INSN(27,20) == BITS8(0,0,1,1,0,0,1,0)
+       && INSN(17,12) == BITS6(0,0,1,1,1,1)) {
+      UInt write_ge    = INSN(18,18);
+      UInt write_nzcvq = INSN(19,19);
+      if (write_nzcvq || write_ge) {
+         UInt   imm = (INSN(11,0) >> 0) & 0xFF;
+         UInt   rot = 2 * ((INSN(11,0) >> 8) & 0xF);
+         IRTemp immT = newTemp(Ity_I32);
+         vassert(rot <= 30);
+         imm = ROR32(imm, rot);
+         assign(immT, mkU32(imm));
+         desynthesise_APSR( write_nzcvq, write_ge, immT, condT );
+         DIP("msr%s cpsr%s%sf, #0x%08x\n", nCC(INSN_COND),
+             write_nzcvq ? "f" : "", write_ge ? "g" : "", imm);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   // MSR apsr, reg
+   if (INSN(27,20) == BITS8(0,0,0,1,0,0,1,0) 
+       && INSN(17,12) == BITS6(0,0,1,1,1,1)
+       && INSN(11,4) == BITS8(0,0,0,0,0,0,0,0)) {
+      UInt rN          = INSN(3,0);
+      UInt write_ge    = INSN(18,18);
+      UInt write_nzcvq = INSN(19,19);
+      if (rN != 15 && (write_nzcvq || write_ge)) {
+         IRTemp rNt = newTemp(Ity_I32);
+         assign(rNt, getIRegA(rN));
+         desynthesise_APSR( write_nzcvq, write_ge, rNt, condT );
+         DIP("msr%s cpsr_%s%s, r%u\n", nCC(INSN_COND),
+             write_nzcvq ? "f" : "", write_ge ? "g" : "", rN);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   // MRS rD, cpsr
+   if ((insn & 0x0FFF0FFF) == 0x010F0000) {
+      UInt rD   = INSN(15,12);
+      if (rD != 15) {
+         IRTemp apsr = synthesise_APSR();
+         putIRegA( rD, mkexpr(apsr), condT, Ijk_Boring );
+         DIP("mrs%s r%u, cpsr\n", nCC(INSN_COND), rD);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* --------------------- Svc --------------------- */
+   if (BITS8(1,1,1,1,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,0,0,0,0))) {
+      UInt imm24 = (insn >> 0) & 0xFFFFFF;
+      if (imm24 == 0) {
+         /* A syscall.  We can't do this conditionally, hence: */
+         if (condT != IRTemp_INVALID) {
+            mk_skip_over_A32_if_cond_is_false( condT );
+         }
+         // AL after here
+         llPutIReg(15, mkU32( guest_R15_curr_instr_notENC + 4 ));
+         dres.jk_StopHere = Ijk_Sys_syscall;
+         dres.whatNext    = Dis_StopHere;
+         DIP("svc%s #0x%08x\n", nCC(INSN_COND), imm24);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* ------------------------ swp ------------------------ */
+
+   // SWP, SWPB
+   if (BITS8(0,0,0,1,0,0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,0,0,0) == INSN(11,8)
+       && BITS4(1,0,0,1) == INSN(7,4)) {
+      UInt   rN   = INSN(19,16);
+      UInt   rD   = INSN(15,12);
+      UInt   rM   = INSN(3,0);
+      IRTemp tRn  = newTemp(Ity_I32);
+      IRTemp tNew = newTemp(Ity_I32);
+      IRTemp tOld = IRTemp_INVALID;
+      IRTemp tSC1 = newTemp(Ity_I1);
+      UInt   isB  = (insn >> 22) & 1;
+
+      if (rD == 15 || rN == 15 || rM == 15 || rN == rM || rN == rD) {
+         /* undecodable; fall through */
+      } else {
+         /* make unconditional */
+         if (condT != IRTemp_INVALID) {
+            mk_skip_over_A32_if_cond_is_false( condT );
+            condT = IRTemp_INVALID;
+         }
+         /* Ok, now we're unconditional.  Generate a LL-SC loop. */
+         assign(tRn, getIRegA(rN));
+         assign(tNew, getIRegA(rM));
+         if (isB) {
+            /* swpb */
+            tOld = newTemp(Ity_I8);
+            stmt( IRStmt_LLSC(Iend_LE, tOld, mkexpr(tRn),
+                              NULL/*=>isLL*/) );
+            stmt( IRStmt_LLSC(Iend_LE, tSC1, mkexpr(tRn),
+                              unop(Iop_32to8, mkexpr(tNew))) );
+         } else {
+            /* swp */
+            tOld = newTemp(Ity_I32);
+            stmt( IRStmt_LLSC(Iend_LE, tOld, mkexpr(tRn),
+                              NULL/*=>isLL*/) );
+            stmt( IRStmt_LLSC(Iend_LE, tSC1, mkexpr(tRn),
+                              mkexpr(tNew)) );
+         }
+         stmt( IRStmt_Exit(unop(Iop_Not1, mkexpr(tSC1)),
+                           /*Ijk_NoRedir*/Ijk_Boring,
+                           IRConst_U32(guest_R15_curr_instr_notENC),
+                           OFFB_R15T ));
+         putIRegA(rD, isB ? unop(Iop_8Uto32, mkexpr(tOld)) : mkexpr(tOld),
+                      IRTemp_INVALID, Ijk_Boring);
+         DIP("swp%s%s r%u, r%u, [r%u]\n",
+             isB ? "b" : "", nCC(INSN_COND), rD, rM, rN);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- ARMv6 instructions                                    -- */
+   /* ----------------------------------------------------------- */
+
+   /* ------------------- {ldr,str}ex{,b,h,d} ------------------- */
+
+   // LDREXD, LDREX, LDREXH, LDREXB
+   if (0x01900F9F == (insn & 0x0F900FFF)) {
+      UInt   rT    = INSN(15,12);
+      UInt   rN    = INSN(19,16);
+      IRType ty    = Ity_INVALID;
+      IROp   widen = Iop_INVALID;
+      const HChar* nm = NULL;
+      Bool   valid = True;
+      switch (INSN(22,21)) {
+         case 0: nm = "";  ty = Ity_I32; break;
+         case 1: nm = "d"; ty = Ity_I64; break;
+         case 2: nm = "b"; ty = Ity_I8;  widen = Iop_8Uto32; break;
+         case 3: nm = "h"; ty = Ity_I16; widen = Iop_16Uto32; break;
+         default: vassert(0);
+      }
+      if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
+         if (rT == 15 || rN == 15)
+            valid = False;
+      } else {
+         vassert(ty == Ity_I64);
+         if ((rT & 1) == 1 || rT == 14 || rN == 15)
+            valid = False;
+      }
+      if (valid) {
+         IRTemp res;
+         /* make unconditional */
+         if (condT != IRTemp_INVALID) {
+           mk_skip_over_A32_if_cond_is_false( condT );
+           condT = IRTemp_INVALID;
+         }
+         /* Ok, now we're unconditional.  Do the load. */
+         res = newTemp(ty);
+         // FIXME: assumes little-endian guest
+         stmt( IRStmt_LLSC(Iend_LE, res, getIRegA(rN),
+                           NULL/*this is a load*/) );
+         if (ty == Ity_I64) {
+            // FIXME: assumes little-endian guest
+            putIRegA(rT+0, unop(Iop_64to32, mkexpr(res)),
+                           IRTemp_INVALID, Ijk_Boring);
+            putIRegA(rT+1, unop(Iop_64HIto32, mkexpr(res)),
+                           IRTemp_INVALID, Ijk_Boring);
+            DIP("ldrex%s%s r%u, r%u, [r%u]\n",
+                nm, nCC(INSN_COND), rT+0, rT+1, rN);
+         } else {
+            putIRegA(rT, widen == Iop_INVALID
+                            ? mkexpr(res) : unop(widen, mkexpr(res)),
+                     IRTemp_INVALID, Ijk_Boring);
+            DIP("ldrex%s%s r%u, [r%u]\n", nm, nCC(INSN_COND), rT, rN);
+         }
+         goto decode_success;
+      }
+      /* undecodable; fall through */
+   }
+
+   // STREXD, STREX, STREXH, STREXB
+   if (0x01800F90 == (insn & 0x0F900FF0)) {
+      UInt   rT     = INSN(3,0);
+      UInt   rN     = INSN(19,16);
+      UInt   rD     = INSN(15,12);
+      IRType ty     = Ity_INVALID;
+      IROp   narrow = Iop_INVALID;
+      const HChar* nm = NULL;
+      Bool   valid  = True;
+      switch (INSN(22,21)) {
+         case 0: nm = "";  ty = Ity_I32; break;
+         case 1: nm = "d"; ty = Ity_I64; break;
+         case 2: nm = "b"; ty = Ity_I8;  narrow = Iop_32to8; break;
+         case 3: nm = "h"; ty = Ity_I16; narrow = Iop_32to16; break;
+         default: vassert(0);
+      }
+      if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
+         if (rD == 15 || rN == 15 || rT == 15
+             || rD == rN || rD == rT)
+            valid = False;
+      } else {
+         vassert(ty == Ity_I64);
+         if (rD == 15 || (rT & 1) == 1 || rT == 14 || rN == 15
+             || rD == rN || rD == rT || rD == rT+1)
+            valid = False;
+      }
+      if (valid) {
+         IRTemp resSC1, resSC32, data;
+         /* make unconditional */
+         if (condT != IRTemp_INVALID) {
+            mk_skip_over_A32_if_cond_is_false( condT );
+            condT = IRTemp_INVALID;
+         }
+         /* Ok, now we're unconditional.  Do the store. */
+         data = newTemp(ty);
+         assign(data,
+                ty == Ity_I64
+                   // FIXME: assumes little-endian guest
+                   ? binop(Iop_32HLto64, getIRegA(rT+1), getIRegA(rT+0))
+                   : narrow == Iop_INVALID
+                      ? getIRegA(rT)
+                      : unop(narrow, getIRegA(rT)));
+         resSC1 = newTemp(Ity_I1);
+         // FIXME: assumes little-endian guest
+         stmt( IRStmt_LLSC(Iend_LE, resSC1, getIRegA(rN), mkexpr(data)) );
+
+         /* Set rD to 1 on failure, 0 on success.  Currently we have
+            resSC1 == 0 on failure, 1 on success. */
+         resSC32 = newTemp(Ity_I32);
+         assign(resSC32,
+                unop(Iop_1Uto32, unop(Iop_Not1, mkexpr(resSC1))));
+
+         putIRegA(rD, mkexpr(resSC32),
+                      IRTemp_INVALID, Ijk_Boring);
+         if (ty == Ity_I64) {
+            DIP("strex%s%s r%u, r%u, r%u, [r%u]\n",
+                nm, nCC(INSN_COND), rD, rT, rT+1, rN);
+         } else {
+            DIP("strex%s%s r%u, r%u, [r%u]\n",
+                nm, nCC(INSN_COND), rD, rT, rN);
+         }
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* --------------------- movw, movt --------------------- */
+   if (0x03000000 == (insn & 0x0FF00000)
+       || 0x03400000 == (insn & 0x0FF00000)) /* pray for CSE */ {
+      UInt rD    = INSN(15,12);
+      UInt imm16 = (insn & 0xFFF) | ((insn >> 4) & 0x0000F000);
+      UInt isT   = (insn >> 22) & 1;
+      if (rD == 15) {
+         /* forget it */
+      } else {
+         if (isT) {
+            putIRegA(rD,
+                     binop(Iop_Or32,
+                           binop(Iop_And32, getIRegA(rD), mkU32(0xFFFF)),
+                           mkU32(imm16 << 16)),
+                     condT, Ijk_Boring);
+            DIP("movt%s r%u, #0x%04x\n", nCC(INSN_COND), rD, imm16);
+            goto decode_success;
+         } else {
+            putIRegA(rD, mkU32(imm16), condT, Ijk_Boring);
+            DIP("movw%s r%u, #0x%04x\n", nCC(INSN_COND), rD, imm16);
+            goto decode_success;
+         }
+      }
+      /* fall through */
+   }
+
+   /* ----------- uxtb, sxtb, uxth, sxth, uxtb16, sxtb16 ----------- */
+   /* FIXME: this is an exact duplicate of the Thumb version.  They
+      should be commoned up. */
+   if (BITS8(0,1,1,0,1, 0,0,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,0,0))
+       && BITS4(1,1,1,1) == INSN(19,16)
+       && BITS4(0,1,1,1) == INSN(7,4)
+       && BITS4(0,0, 0,0) == (INSN(11,8) & BITS4(0,0,1,1))) {
+      UInt subopc = INSN(27,20) & BITS8(0,0,0,0,0, 1,1,1);
+      if (subopc != BITS4(0,0,0,1) && subopc != BITS4(0,1,0,1)) {
+         Int    rot  = (INSN(11,8) >> 2) & 3;
+         UInt   rM   = INSN(3,0);
+         UInt   rD   = INSN(15,12);
+         IRTemp srcT = newTemp(Ity_I32);
+         IRTemp rotT = newTemp(Ity_I32);
+         IRTemp dstT = newTemp(Ity_I32);
+         const HChar* nm = "???";
+         assign(srcT, getIRegA(rM));
+         assign(rotT, genROR32(srcT, 8 * rot)); /* 0, 8, 16 or 24 only */
+         switch (subopc) {
+            case BITS4(0,1,1,0): // UXTB
+               assign(dstT, unop(Iop_8Uto32, unop(Iop_32to8, mkexpr(rotT))));
+               nm = "uxtb";
+               break;
+            case BITS4(0,0,1,0): // SXTB
+               assign(dstT, unop(Iop_8Sto32, unop(Iop_32to8, mkexpr(rotT))));
+               nm = "sxtb";
+               break;
+            case BITS4(0,1,1,1): // UXTH
+               assign(dstT, unop(Iop_16Uto32, unop(Iop_32to16, mkexpr(rotT))));
+               nm = "uxth";
+               break;
+            case BITS4(0,0,1,1): // SXTH
+               assign(dstT, unop(Iop_16Sto32, unop(Iop_32to16, mkexpr(rotT))));
+               nm = "sxth";
+               break;
+            case BITS4(0,1,0,0): // UXTB16
+               assign(dstT, binop(Iop_And32, mkexpr(rotT), mkU32(0x00FF00FF)));
+               nm = "uxtb16";
+               break;
+            case BITS4(0,0,0,0): { // SXTB16
+               IRTemp lo32 = newTemp(Ity_I32);
+               IRTemp hi32 = newTemp(Ity_I32);
+               assign(lo32, binop(Iop_And32, mkexpr(rotT), mkU32(0xFF)));
+               assign(hi32, binop(Iop_Shr32, mkexpr(rotT), mkU8(16)));
+               assign(
+                  dstT,
+                  binop(Iop_Or32,
+                        binop(Iop_And32,
+                              unop(Iop_8Sto32,
+                                   unop(Iop_32to8, mkexpr(lo32))),
+                              mkU32(0xFFFF)),
+                        binop(Iop_Shl32,
+                              unop(Iop_8Sto32,
+                                   unop(Iop_32to8, mkexpr(hi32))),
+                              mkU8(16))
+               ));
+               nm = "sxtb16";
+               break;
+            }
+            default:
+               vassert(0); // guarded by "if" above
+         }
+         putIRegA(rD, mkexpr(dstT), condT, Ijk_Boring);
+         DIP("%s%s r%u, r%u, ROR #%u\n", nm, nCC(INSN_COND), rD, rM, rot);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* ------------------- bfi, bfc ------------------- */
+   if (BITS8(0,1,1,1,1,1,0, 0) == (INSN(27,20) & BITS8(1,1,1,1,1,1,1,0))
+       && BITS4(0, 0,0,1) == (INSN(7,4) & BITS4(0,1,1,1))) {
+      UInt rD  = INSN(15,12);
+      UInt rN  = INSN(3,0);
+      UInt msb = (insn >> 16) & 0x1F; /* 20:16 */
+      UInt lsb = (insn >> 7) & 0x1F;  /* 11:7 */
+      if (rD == 15 || msb < lsb) {
+         /* undecodable; fall through */
+      } else {
+         IRTemp src    = newTemp(Ity_I32);
+         IRTemp olddst = newTemp(Ity_I32);
+         IRTemp newdst = newTemp(Ity_I32);
+         UInt   mask = 1 << (msb - lsb);
+         mask = (mask - 1) + mask;
+         vassert(mask != 0); // guaranteed by "msb < lsb" check above
+         mask <<= lsb;
+
+         assign(src, rN == 15 ? mkU32(0) : getIRegA(rN));
+         assign(olddst, getIRegA(rD));
+         assign(newdst,
+                binop(Iop_Or32,
+                   binop(Iop_And32,
+                         binop(Iop_Shl32, mkexpr(src), mkU8(lsb)), 
+                         mkU32(mask)),
+                   binop(Iop_And32,
+                         mkexpr(olddst),
+                         mkU32(~mask)))
+               );
+
+         putIRegA(rD, mkexpr(newdst), condT, Ijk_Boring);
+
+         if (rN == 15) {
+            DIP("bfc%s r%u, #%u, #%u\n",
+                nCC(INSN_COND), rD, lsb, msb-lsb+1);
+         } else {
+            DIP("bfi%s r%u, r%u, #%u, #%u\n",
+                nCC(INSN_COND), rD, rN, lsb, msb-lsb+1);
+         }
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* ------------------- {u,s}bfx ------------------- */
+   if (BITS8(0,1,1,1,1,0,1,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,0))
+       && BITS4(0,1,0,1) == (INSN(7,4) & BITS4(0,1,1,1))) {
+      UInt rD  = INSN(15,12);
+      UInt rN  = INSN(3,0);
+      UInt wm1 = (insn >> 16) & 0x1F; /* 20:16 */
+      UInt lsb = (insn >> 7) & 0x1F;  /* 11:7 */
+      UInt msb = lsb + wm1;
+      UInt isU = (insn >> 22) & 1;    /* 22:22 */
+      if (rD == 15 || rN == 15 || msb >= 32) {
+         /* undecodable; fall through */
+      } else {
+         IRTemp src  = newTemp(Ity_I32);
+         IRTemp tmp  = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         UInt   mask = ((1 << wm1) - 1) + (1 << wm1);
+         vassert(msb >= 0 && msb <= 31);
+         vassert(mask != 0); // guaranteed by msb being in 0 .. 31 inclusive
+
+         assign(src, getIRegA(rN));
+         assign(tmp, binop(Iop_And32,
+                           binop(Iop_Shr32, mkexpr(src), mkU8(lsb)),
+                           mkU32(mask)));
+         assign(res, binop(isU ? Iop_Shr32 : Iop_Sar32,
+                           binop(Iop_Shl32, mkexpr(tmp), mkU8(31-wm1)),
+                           mkU8(31-wm1)));
+
+         putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+
+         DIP("%s%s r%u, r%u, #%u, #%u\n",
+             isU ? "ubfx" : "sbfx",
+             nCC(INSN_COND), rD, rN, lsb, wm1 + 1);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* --------------------- Load/store doubleword ------------- */
+   // LDRD STRD
+   /*                 31   27   23   19 15 11   7    3     # highest bit
+                        28   24   20 16 12    8    4    0
+      A5-36   1 | 16  cond 0001 U100 Rn Rd im4h 11S1 im4l
+      A5-38   1 | 32  cond 0001 U000 Rn Rd 0000 11S1 Rm
+      A5-40   2 | 16  cond 0001 U110 Rn Rd im4h 11S1 im4l
+      A5-42   2 | 32  cond 0001 U010 Rn Rd 0000 11S1 Rm
+      A5-44   3 | 16  cond 0000 U100 Rn Rd im4h 11S1 im4l
+      A5-46   3 | 32  cond 0000 U000 Rn Rd 0000 11S1 Rm
+   */
+   /* case coding:
+             1   at-ea               (access at ea)
+             2   at-ea-then-upd      (access at ea, then Rn = ea)
+             3   at-Rn-then-upd      (access at Rn, then Rn = ea)
+      ea coding
+             16  Rn +/- imm8
+             32  Rn +/- Rm
+   */
+   /* Quickly skip over all of this for hopefully most instructions */
+   if ((INSN(27,24) & BITS4(1,1,1,0)) != BITS4(0,0,0,0))
+      goto after_load_store_doubleword;
+
+   /* Check the "11S1" thing. */
+   if ((INSN(7,4) & BITS4(1,1,0,1)) != BITS4(1,1,0,1))
+      goto after_load_store_doubleword;
+
+   summary = 0;
+
+   /**/ if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,20) == BITS3(1,0,0)) {
+      summary = 1 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,20) == BITS3(0,0,0)) {
+      summary = 1 | 32;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,20) == BITS3(1,1,0)) {
+      summary = 2 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,1) && INSN(22,20) == BITS3(0,1,0)) {
+      summary = 2 | 32;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,0) && INSN(22,20) == BITS3(1,0,0)) {
+      summary = 3 | 16;
+   }
+   else if (INSN(27,24) == BITS4(0,0,0,0) && INSN(22,20) == BITS3(0,0,0)) {
+      summary = 3 | 32;
+   }
+   else goto after_load_store_doubleword;
+
+   { UInt rN   = (insn >> 16) & 0xF; /* 19:16 */
+     UInt rD   = (insn >> 12) & 0xF; /* 15:12 */
+     UInt rM   = (insn >> 0)  & 0xF; /*  3:0  */
+     UInt bU   = (insn >> 23) & 1;   /* 23 U=1 offset+, U=0 offset- */
+     UInt bS   = (insn >> 5) & 1;    /* S=1 store, S=0 load */
+     UInt imm8 = ((insn >> 4) & 0xF0) | (insn & 0xF); /* 11:8, 3:0 */
+
+     /* Require rD to be an even numbered register */
+     if ((rD & 1) != 0)
+        goto after_load_store_doubleword;
+
+     /* Require 11:8 == 0 for Rn +/- Rm cases */
+     if ((summary & 32) != 0 && (imm8 & 0xF0) != 0)
+        goto after_load_store_doubleword;
+
+     /* Skip some invalid cases, which would lead to two competing
+        updates to the same register, or which are otherwise
+        disallowed by the spec. */
+     switch (summary) {
+        case 1 | 16:
+           break;
+        case 1 | 32: 
+           if (rM == 15) goto after_load_store_doubleword;
+           break;
+        case 2 | 16: case 3 | 16:
+           if (rN == 15) goto after_load_store_doubleword;
+           if (bS == 0 && (rN == rD || rN == rD+1))
+              goto after_load_store_doubleword;
+           break;
+        case 2 | 32: case 3 | 32:
+           if (rM == 15) goto after_load_store_doubleword;
+           if (rN == 15) goto after_load_store_doubleword;
+           if (rN == rM) goto after_load_store_doubleword;
+           if (bS == 0 && (rN == rD || rN == rD+1))
+              goto after_load_store_doubleword;
+           break;
+        default:
+           vassert(0);
+     }
+
+     /* If this is a branch, make it unconditional at this point.
+        Doing conditional branches in-line is too complex (for
+        now). */
+     vassert((rD & 1) == 0); /* from tests above */
+     if (bS == 0 && rD+1 == 15 && condT != IRTemp_INVALID) {
+        // go uncond
+        mk_skip_over_A32_if_cond_is_false( condT );
+        condT = IRTemp_INVALID;
+        // now uncond
+     }
+
+     /* compute the effective address.  Bind it to a tmp since we
+        may need to use it twice. */
+     IRExpr* eaE = NULL;
+     switch (summary & 0xF0) {
+        case 16:
+           eaE = mk_EA_reg_plusminus_imm8( rN, bU, imm8, dis_buf );
+           break;
+        case 32:
+           eaE = mk_EA_reg_plusminus_reg( rN, bU, rM, dis_buf );
+           break;
+     }
+     vassert(eaE);
+     IRTemp eaT = newTemp(Ity_I32);
+     assign(eaT, eaE);
+
+     /* get the old Rn value */
+     IRTemp rnT = newTemp(Ity_I32);
+     assign(rnT, getIRegA(rN));
+
+     /* decide on the transfer address */
+     IRTemp taT = IRTemp_INVALID;
+     switch (summary & 0x0F) {
+        case 1: case 2: taT = eaT; break;
+        case 3:         taT = rnT; break;
+     }
+     vassert(taT != IRTemp_INVALID);
+
+     /* XXX deal with alignment constraints */
+     /* XXX: but the A8 doesn't seem to trap for misaligned loads, so,
+        ignore alignment issues for the time being. */
+
+     /* For almost all cases, we do the writeback after the transfers.
+        However, that leaves the stack "uncovered" in this case:
+           strd    rD, [sp, #-8]
+        In which case, do the writeback to SP now, instead of later.
+        This is bad in that it makes the insn non-restartable if the
+        accesses fault, but at least keeps Memcheck happy. */
+     Bool writeback_already_done = False;
+     if (bS == 1 /*store*/ && summary == (2 | 16)
+         && rN == 13 && rN != rD && rN != rD+1
+         && bU == 0/*minus*/ && imm8 == 8) {
+        putIRegA( rN, mkexpr(eaT), condT, Ijk_Boring );
+        writeback_already_done = True;
+     }
+
+     /* doubleword store  S 1
+        doubleword load   S 0
+     */
+     const HChar* name = NULL;
+     /* generate the transfers */
+     if (bS == 1) { // doubleword store
+        storeGuardedLE( binop(Iop_Add32, mkexpr(taT), mkU32(0)),
+                        getIRegA(rD+0), condT );
+        storeGuardedLE( binop(Iop_Add32, mkexpr(taT), mkU32(4)),
+                        getIRegA(rD+1), condT );
+        name = "strd";
+     } else { // doubleword load
+        IRTemp oldRd0 = newTemp(Ity_I32);
+        IRTemp oldRd1 = newTemp(Ity_I32);
+        assign(oldRd0, llGetIReg(rD+0));
+        assign(oldRd1, llGetIReg(rD+1));
+        IRTemp newRd0 = newTemp(Ity_I32);
+        IRTemp newRd1 = newTemp(Ity_I32);
+        loadGuardedLE( newRd0, ILGop_Ident32,
+                       binop(Iop_Add32, mkexpr(taT), mkU32(0)),
+                       mkexpr(oldRd0), condT );
+        putIRegA( rD+0, mkexpr(newRd0), IRTemp_INVALID, Ijk_Boring );
+        loadGuardedLE( newRd1, ILGop_Ident32,
+                       binop(Iop_Add32, mkexpr(taT), mkU32(4)),
+                       mkexpr(oldRd1), condT );
+        putIRegA( rD+1, mkexpr(newRd1), IRTemp_INVALID, Ijk_Boring );
+        name = "ldrd";
+     }
+
+     /* Update Rn if necessary. */
+     switch (summary & 0x0F) {
+        case 2: case 3:
+           // should be assured by logic above:
+           vassert(rN != 15); /* from checks above */
+           if (bS == 0) {
+              vassert(rD+0 != rN); /* since we just wrote rD+0 */
+              vassert(rD+1 != rN); /* since we just wrote rD+1 */
+           }
+           if (!writeback_already_done)
+              putIRegA( rN, mkexpr(eaT), condT, Ijk_Boring );
+           break;
+     }
+
+     switch (summary & 0x0F) {
+        case 1:  DIP("%s%s r%u, %s\n", name, nCC(INSN_COND), rD, dis_buf);
+                 break;
+        case 2:  DIP("%s%s r%u, %s! (at-EA-then-Rn=EA)\n",
+                     name, nCC(INSN_COND), rD, dis_buf);
+                 break;
+        case 3:  DIP("%s%s r%u, %s! (at-Rn-then-Rn=EA)\n",
+                     name, nCC(INSN_COND), rD, dis_buf);
+                 break;
+        default: vassert(0);
+     }
+
+     goto decode_success;
+   }
+
+  after_load_store_doubleword:
+
+   /* ------------------- {s,u}xtab ------------- */
+   if (BITS8(0,1,1,0,1,0,1,0) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,0,0,0) == (INSN(11,8) & BITS4(0,0,1,1))
+       && BITS4(0,1,1,1) == INSN(7,4)) {
+      UInt rN  = INSN(19,16);
+      UInt rD  = INSN(15,12);
+      UInt rM  = INSN(3,0);
+      UInt rot = (insn >> 10) & 3;
+      UInt isU = INSN(22,22);
+      if (rN == 15/*it's {S,U}XTB*/ || rD == 15 || rM == 15) {
+         /* undecodable; fall through */
+      } else {
+         IRTemp srcL = newTemp(Ity_I32);
+         IRTemp srcR = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         assign(srcR, getIRegA(rM));
+         assign(srcL, getIRegA(rN));
+         assign(res,  binop(Iop_Add32,
+                            mkexpr(srcL),
+                            unop(isU ? Iop_8Uto32 : Iop_8Sto32,
+                                 unop(Iop_32to8, 
+                                      genROR32(srcR, 8 * rot)))));
+         putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+         DIP("%cxtab%s r%u, r%u, r%u, ror #%u\n",
+             isU ? 'u' : 's', nCC(INSN_COND), rD, rN, rM, rot);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* ------------------- {s,u}xtah ------------- */
+   if (BITS8(0,1,1,0,1,0,1,1) == (INSN(27,20) & BITS8(1,1,1,1,1,0,1,1))
+       && BITS4(0,0,0,0) == (INSN(11,8) & BITS4(0,0,1,1))
+       && BITS4(0,1,1,1) == INSN(7,4)) {
+      UInt rN  = INSN(19,16);
+      UInt rD  = INSN(15,12);
+      UInt rM  = INSN(3,0);
+      UInt rot = (insn >> 10) & 3;
+      UInt isU = INSN(22,22);
+      if (rN == 15/*it's {S,U}XTH*/ || rD == 15 || rM == 15) {
+         /* undecodable; fall through */
+      } else {
+         IRTemp srcL = newTemp(Ity_I32);
+         IRTemp srcR = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         assign(srcR, getIRegA(rM));
+         assign(srcL, getIRegA(rN));
+         assign(res,  binop(Iop_Add32,
+                            mkexpr(srcL),
+                            unop(isU ? Iop_16Uto32 : Iop_16Sto32,
+                                 unop(Iop_32to16, 
+                                      genROR32(srcR, 8 * rot)))));
+         putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+
+         DIP("%cxtah%s r%u, r%u, r%u, ror #%u\n",
+             isU ? 'u' : 's', nCC(INSN_COND), rD, rN, rM, rot);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* ------------------- rev16, rev ------------------ */
+   if (INSN(27,16) == 0x6BF
+       && (INSN(11,4) == 0xFB/*rev16*/ || INSN(11,4) == 0xF3/*rev*/)) {
+      Bool isREV = INSN(11,4) == 0xF3;
+      UInt rM    = INSN(3,0);
+      UInt rD    = INSN(15,12);
+      if (rM != 15 && rD != 15) {
+         IRTemp rMt = newTemp(Ity_I32);
+         assign(rMt, getIRegA(rM));
+         IRTemp res = isREV ? gen_REV(rMt) : gen_REV16(rMt);
+         putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+         DIP("rev%s%s r%u, r%u\n", isREV ? "" : "16",
+             nCC(INSN_COND), rD, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- revsh ----------------------- */
+   if (INSN(27,16) == 0x6FF && INSN(11,4) == 0xFB) {
+      UInt rM = INSN(3,0);
+      UInt rD = INSN(15,12);
+      if (rM != 15 && rD != 15) {
+         IRTemp irt_rM  = newTemp(Ity_I32);
+         IRTemp irt_hi  = newTemp(Ity_I32);
+         IRTemp irt_low = newTemp(Ity_I32);
+         IRTemp irt_res = newTemp(Ity_I32);
+         assign(irt_rM, getIRegA(rM));
+         assign(irt_hi,
+                binop(Iop_Sar32,
+                      binop(Iop_Shl32, mkexpr(irt_rM), mkU8(24)),
+                      mkU8(16)
+                )
+         );
+         assign(irt_low,
+                binop(Iop_And32,
+                      binop(Iop_Shr32, mkexpr(irt_rM), mkU8(8)),
+                      mkU32(0xFF)
+                )
+         );
+         assign(irt_res,
+                binop(Iop_Or32, mkexpr(irt_hi), mkexpr(irt_low))
+         );
+         putIRegA(rD, mkexpr(irt_res), condT, Ijk_Boring);
+         DIP("revsh%s r%u, r%u\n", nCC(INSN_COND), rD, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- rbit ------------------ */
+   if (INSN(27,16) == 0x6FF && INSN(11,4) == 0xF3) {
+      UInt rD = INSN(15,12);
+      UInt rM = INSN(3,0);
+      if (rD != 15 && rM != 15) {
+         IRTemp arg = newTemp(Ity_I32);
+         assign(arg, getIRegA(rM));
+         IRTemp res = gen_BITREV(arg);
+         putIRegA(rD, mkexpr(res), condT, Ijk_Boring);
+         DIP("rbit r%u, r%u\n", rD, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- smmul ------------------ */
+   if (INSN(27,20) == BITS8(0,1,1,1,0,1,0,1)
+       && INSN(15,12) == BITS4(1,1,1,1)
+       && (INSN(7,4) & BITS4(1,1,0,1)) == BITS4(0,0,0,1)) {
+      UInt bitR = INSN(5,5);
+      UInt rD = INSN(19,16);
+      UInt rM = INSN(11,8);
+      UInt rN = INSN(3,0);
+      if (rD != 15 && rM != 15 && rN != 15) {
+         IRExpr* res
+         = unop(Iop_64HIto32,
+                binop(Iop_Add64,
+                      binop(Iop_MullS32, getIRegA(rN), getIRegA(rM)),
+                      mkU64(bitR ? 0x80000000ULL : 0ULL)));
+         putIRegA(rD, res, condT, Ijk_Boring);
+         DIP("smmul%s%s r%u, r%u, r%u\n",
+             nCC(INSN_COND), bitR ? "r" : "", rD, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- smmla ------------------ */
+   if (INSN(27,20) == BITS8(0,1,1,1,0,1,0,1)
+       && INSN(15,12) != BITS4(1,1,1,1)
+       && (INSN(7,4) & BITS4(1,1,0,1)) == BITS4(0,0,0,1)) {
+      UInt bitR = INSN(5,5);
+      UInt rD = INSN(19,16);
+      UInt rA = INSN(15,12);
+      UInt rM = INSN(11,8);
+      UInt rN = INSN(3,0);
+      if (rD != 15 && rM != 15 && rN != 15) {
+         IRExpr* res
+         = unop(Iop_64HIto32,
+                binop(Iop_Add64,
+                      binop(Iop_Add64,
+                            binop(Iop_32HLto64, getIRegA(rA), mkU32(0)),
+                            binop(Iop_MullS32, getIRegA(rN), getIRegA(rM))),
+                      mkU64(bitR ? 0x80000000ULL : 0ULL)));
+         putIRegA(rD, res, condT, Ijk_Boring);
+         DIP("smmla%s%s r%u, r%u, r%u, r%u\n",
+             nCC(INSN_COND), bitR ? "r" : "", rD, rN, rM, rA);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- NOP ------------------ */
+   if (0x0320F000 == (insn & 0x0FFFFFFF)) {
+      DIP("nop%s\n", nCC(INSN_COND));
+      goto decode_success;
+   }
+
+   /* -------------- (A1) LDRT reg+/-#imm12 -------------- */
+   /* Load Register Unprivileged:
+      ldrt<c> Rt, [Rn] {, #+/-imm12}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,0,0,0,0,1,1) ) {
+      UInt rT     = INSN(15,12);
+      UInt rN     = INSN(19,16);
+      UInt imm12  = INSN(11,0);
+      UInt bU     = INSN(23,23);
+      Bool valid  = True;
+      if (rT == 15 || rN == 15 || rN == rT) valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_Ident32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), mkU32(imm12));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrt%s r%u, [r%u], #%c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm12);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) LDRT reg+/-reg with shift -------------- */
+   /* Load Register Unprivileged:
+      ldrt<c> Rt, [Rn], +/-Rm{, shift}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,1,0,0,0,1,1)
+        && INSN(4,4) == 0 ) {
+      UInt rT     = INSN(15,12);
+      UInt rN     = INSN(19,16);
+      UInt rM     = INSN(3,0);
+      UInt imm5   = INSN(11,7);
+      UInt bU     = INSN(23,23);
+      UInt type   = INSN(6,5);
+      Bool valid  = True;
+      if (rT == 15 || rN == 15 || rN == rT || rM == 15
+          /* || (ArchVersion() < 6 && rM == rN) */)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_Ident32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         // dis_buf generated is slightly bogus, in fact.
+         IRExpr* erN = mk_EA_reg_plusminus_shifted_reg(rN, bU, rM,
+                                                       type, imm5, dis_buf);
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrt%s r%u, %s\n", nCC(INSN_COND), rT, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A1) LDRBT reg+/-#imm12 -------------- */
+   /* Load Register Byte Unprivileged:
+      ldrbt<c> Rt, [Rn], #+/-imm12
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,0,0,0,1,1,1) ) {
+      UInt rT     = INSN(15,12);
+      UInt rN     = INSN(19,16);
+      UInt imm12  = INSN(11,0);
+      UInt bU     = INSN(23,23);
+      Bool valid  = True;
+      if (rT == 15 || rN == 15 || rN == rT) valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_8Uto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), mkU32(imm12));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrbt%s r%u, [r%u], #%c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm12);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) LDRBT reg+/-reg with shift -------------- */
+   /* Load Register Byte Unprivileged:
+      ldrbt<c> Rt, [Rn], +/-Rm{, shift}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,1,0,0,1,1,1)
+        && INSN(4,4) == 0 ) {
+      UInt rT     = INSN(15,12);
+      UInt rN     = INSN(19,16);
+      UInt rM     = INSN(3,0);
+      UInt imm5   = INSN(11,7);
+      UInt bU     = INSN(23,23);
+      UInt type   = INSN(6,5);
+      Bool valid  = True;
+      if (rT == 15 || rN == 15 || rN == rT || rM == 15
+          /* || (ArchVersion() < 6 && rM == rN) */)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_8Uto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         // dis_buf generated is slightly bogus, in fact.
+         IRExpr* erN = mk_EA_reg_plusminus_shifted_reg(rN, bU, rM,
+                                                       type, imm5, dis_buf);
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrbt%s r%u, %s\n", nCC(INSN_COND), rT, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A1) LDRHT reg+#imm8 -------------- */
+   /* Load Register Halfword Unprivileged:
+      ldrht<c> Rt, [Rn] {, #+/-imm8}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,1,1,1)
+       && INSN(7,4) == BITS4(1,0,1,1) ) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt bU    = INSN(23,23);
+      UInt imm4H = INSN(11,8);
+      UInt imm4L = INSN(3,0);
+      UInt imm8  = (imm4H << 4) | imm4L;
+      Bool valid = True;
+      if (rT == 15 || rN == 15 || rN == rT)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_16Uto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), mkU32(imm8));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrht%s r%u, [r%u], #%c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) LDRHT reg+/-reg -------------- */
+   /* Load Register Halfword Unprivileged:
+      ldrht<c> Rt, [Rn], +/-Rm
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,0,1,1)
+       && INSN(11,4) == BITS8(0,0,0,0,1,0,1,1) ) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt rM    = INSN(3,0);
+      UInt bU    = INSN(23,23);
+      Bool valid = True;
+      if (rT == 15 || rN == 15 || rN == rT || rM == 15)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_16Uto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), getIRegA(rM));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrht%s r%u, [r%u], %cr%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', rM);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A1) LDRSHT reg+#imm8 -------------- */
+   /* Load Register Signed Halfword Unprivileged:
+      ldrsht<c> Rt, [Rn] {, #+/-imm8}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,1,1,1)
+       && INSN(7,4) == BITS4(1,1,1,1)) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt bU    = INSN(23,23);
+      UInt imm4H = INSN(11,8);
+      UInt imm4L = INSN(3,0);
+      UInt imm8  = (imm4H << 4) | imm4L;
+      Bool valid = True;
+      if (rN == 15 || rT == 15 || rN == rT)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_16Sto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), mkU32(imm8));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrsht%s r%u, [r%u], #%c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) LDRSHT reg+/-reg -------------- */
+   /* Load Register Signed Halfword Unprivileged:
+      ldrsht<c> Rt, [Rn], +/-Rm
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,0,1,1)
+       && INSN(11,4) == BITS8(0,0,0,0,1,1,1,1)) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt rM    = INSN(3,0);
+      UInt bU    = INSN(23,23);
+      Bool valid = True;
+      if (rN == 15 || rT == 15 || rN == rT || rM == 15)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_16Sto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), getIRegA(rM));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrsht%s r%u, [r%u], %cr%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', rM);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A1) LDRSBT reg+#imm8 -------------- */
+   /* Load Register Signed Byte Unprivileged:
+      ldrsbt<c> Rt, [Rn] {, #+/-imm8}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,1,1,1)
+       && INSN(7,4) == BITS4(1,1,0,1)) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt bU    = INSN(23,23);
+      UInt imm4H = INSN(11,8);
+      UInt imm4L = INSN(3,0);
+      UInt imm8  = (imm4H << 4) | imm4L;
+      Bool valid = True;
+      if (rT == 15 || rN == 15 || rN == rT)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_8Sto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), mkU32(imm8));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrsbt%s r%u, [r%u], #%c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) LDRSBT reg+/-reg -------------- */
+   /* Load Register Signed Byte Unprivileged:
+      ldrsbt<c> Rt, [Rn], +/-Rm
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,0,1,1)
+       && INSN(11,4) == BITS8(0,0,0,0,1,1,0,1)) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt bU    = INSN(23,23);
+      UInt rM    = INSN(3,0);
+      Bool valid = True;
+      if (rT == 15 || rN == 15 || rN == rT || rM == 15)
+         valid = False;
+      if (valid) {
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt,
+                        ILGop_8Sto32, getIRegA(rN), getIRegA(rT), condT );
+         putIRegA(rT, mkexpr(newRt), IRTemp_INVALID, Ijk_Boring);
+         IRExpr* erN = binop(bU ? Iop_Add32 : Iop_Sub32,
+                             getIRegA(rN), getIRegA(rM));
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("ldrsbt%s r%u, [r%u], %cr%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', rM);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A1) STRBT reg+#imm12 -------------- */
+   /* Store Register Byte Unprivileged:
+      strbt<c> Rt, [Rn], #+/-imm12
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,0,0,0,1,1,0) ) {
+      UInt rT     = INSN(15,12);
+      UInt rN     = INSN(19,16);
+      UInt imm12  = INSN(11,0);
+      UInt bU     = INSN(23,23);
+      Bool valid = True;
+      if (rT == 15 || rN == 15 || rN == rT) valid = False;
+      if (valid) {
+         IRExpr* address = getIRegA(rN);
+         IRExpr* data = unop(Iop_32to8, getIRegA(rT));
+         storeGuardedLE( address, data, condT);
+         IRExpr* newRn = binop(bU ? Iop_Add32 : Iop_Sub32,
+                               getIRegA(rN), mkU32(imm12));
+         putIRegA(rN, newRn, condT, Ijk_Boring);
+         DIP("strbt%s r%u, [r%u], #%c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm12);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) STRBT reg+/-reg -------------- */
+   /* Store Register Byte Unprivileged:
+      strbt<c> Rt, [Rn], +/-Rm{, shift}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,1,0,0,1,1,0)
+       && INSN(4,4) == 0) {
+      UInt rT     = INSN(15,12);
+      UInt rN     = INSN(19,16);
+      UInt imm5   = INSN(11,7);
+      UInt type   = INSN(6,5);
+      UInt rM     = INSN(3,0);
+      UInt bU     = INSN(23,23);
+      Bool valid  = True;
+      if (rT == 15 || rN == 15 || rN == rT || rM == 15) valid = False;
+      if (valid) {
+         IRExpr* address = getIRegA(rN);
+         IRExpr* data = unop(Iop_32to8, getIRegA(rT));
+         storeGuardedLE( address, data, condT);
+         // dis_buf generated is slightly bogus, in fact.
+         IRExpr* erN = mk_EA_reg_plusminus_shifted_reg(rN, bU, rM,
+                                                       type, imm5, dis_buf);
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("strbt%s r%u, %s\n", nCC(INSN_COND), rT, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A1) STRHT reg+#imm8 -------------- */
+   /* Store Register Halfword Unprivileged:
+      strht<c> Rt, [Rn], #+/-imm8
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,1,1,0)
+       && INSN(7,4) == BITS4(1,0,1,1) ) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt imm4H = INSN(11,8);
+      UInt imm4L = INSN(3,0);
+      UInt imm8  = (imm4H << 4) | imm4L;
+      UInt bU    = INSN(23,23);
+      Bool valid = True;
+      if (rT == 15 || rN == 15 || rN == rT) valid = False;
+      if (valid) {
+         IRExpr* address = getIRegA(rN);
+         IRExpr* data = unop(Iop_32to16, getIRegA(rT));
+         storeGuardedLE( address, data, condT);
+         IRExpr* newRn = binop(bU ? Iop_Add32 : Iop_Sub32,
+                               getIRegA(rN), mkU32(imm8));
+         putIRegA(rN, newRn, condT, Ijk_Boring);
+         DIP("strht%s r%u, [r%u], #%c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) STRHT reg+reg -------------- */
+   /* Store Register Halfword Unprivileged:
+      strht<c> Rt, [Rn], +/-Rm
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,0,0,0,0,0,1,0)
+       && INSN(11,4) == BITS8(0,0,0,0,1,0,1,1) ) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt rM    = INSN(3,0);
+      UInt bU    = INSN(23,23);
+      Bool valid = True;
+      if (rT == 15 || rN == 15 || rN == rT || rM == 15) valid = False;
+      if (valid) {
+         IRExpr* address = getIRegA(rN);
+         IRExpr* data = unop(Iop_32to16, getIRegA(rT));
+         storeGuardedLE( address, data, condT);
+         IRExpr* newRn = binop(bU ? Iop_Add32 : Iop_Sub32,
+                               getIRegA(rN), getIRegA(rM));
+         putIRegA(rN, newRn, condT, Ijk_Boring);
+         DIP("strht%s r%u, [r%u], %cr%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', rM);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A1) STRT reg+imm12 -------------- */
+   /* Store Register Unprivileged:
+      strt<c> Rt, [Rn], #+/-imm12
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,0,0,0,0,1,0) ) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt imm12 = INSN(11,0);
+      UInt bU    = INSN(23,23);
+      Bool valid = True;
+      if (rN == 15 || rN == rT) valid = False;
+      if (valid) {
+         IRExpr* address = getIRegA(rN);
+         storeGuardedLE( address, getIRegA(rT), condT);
+         IRExpr* newRn = binop(bU ? Iop_Add32 : Iop_Sub32,
+                               getIRegA(rN), mkU32(imm12));
+         putIRegA(rN, newRn, condT, Ijk_Boring);
+         DIP("strt%s r%u, [r%u], %c%u\n",
+             nCC(INSN_COND), rT, rN, bU ? '+' : '-', imm12);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (A2) STRT reg+reg -------------- */
+   /* Store Register Unprivileged:
+      strt<c> Rt, [Rn], +/-Rm{, shift}
+   */
+   if ( (INSN(27,20) & BITS8(1,1,1,1,0,1,1,1)) == BITS8(0,1,1,0,0,0,1,0)
+       && INSN(4,4) == 0 ) {
+      UInt rT    = INSN(15,12);
+      UInt rN    = INSN(19,16);
+      UInt rM    = INSN(3,0);
+      UInt type  = INSN(6,5);
+      UInt imm5  = INSN(11,7);
+      UInt bU    = INSN(23,23);
+      Bool valid = True;
+      if (rN == 15 || rN == rT || rM == 15) valid = False;
+      /* FIXME We didn't do:
+         if ArchVersion() < 6 && rM == rN then UNPREDICTABLE */
+      if (valid) {
+         storeGuardedLE( getIRegA(rN), getIRegA(rT), condT);
+         // dis_buf generated is slightly bogus, in fact.
+         IRExpr* erN = mk_EA_reg_plusminus_shifted_reg(rN, bU, rM,
+                                                       type, imm5, dis_buf);
+         putIRegA(rN, erN, condT, Ijk_Boring);
+         DIP("strt%s r%u, %s\n", nCC(INSN_COND), rT, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- ARMv7 instructions                                    -- */
+   /* ----------------------------------------------------------- */
+
+   /* -------------- read CP15 TPIDRURO register ------------- */
+   /* mrc     p15, 0, r0, c13, c0, 3  up to
+      mrc     p15, 0, r14, c13, c0, 3
+   */
+   /* I don't know whether this is really v7-only.  But anyway, we
+      have to support it since arm-linux uses TPIDRURO as a thread
+      state register. */
+   if (0x0E1D0F70 == (insn & 0x0FFF0FFF)) {
+      UInt rD = INSN(15,12);
+      if (rD <= 14) {
+         /* skip r15, that's too stupid to handle */
+         putIRegA(rD, IRExpr_Get(OFFB_TPIDRURO, Ity_I32),
+                      condT, Ijk_Boring);
+         DIP("mrc%s p15,0, r%u, c13, c0, 3\n", nCC(INSN_COND), rD);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* Handle various kinds of barriers.  This is rather indiscriminate
+      in the sense that they are all turned into an IR Fence, which
+      means we don't know which they are, so the back end has to
+      re-emit them all when it comes acrosss an IR Fence.
+   */
+   /* v6 */ /* mcr 15, 0, rT, c7, c10, 5 */
+   if (0xEE070FBA == (insn & 0xFFFF0FFF)) {
+      UInt rT = INSN(15,12);
+      if (rT <= 14) {
+         /* mcr 15, 0, rT, c7, c10, 5 (v6) equiv to DMB (v7).  Data
+            Memory Barrier -- ensures ordering of memory accesses. */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("mcr 15, 0, r%u, c7, c10, 5 (data memory barrier)\n", rT);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+   /* other flavours of barrier */
+   switch (insn) {
+      case 0xEE070F9A: /* v6 */
+         /* mcr 15, 0, r0, c7, c10, 4 (v6) equiv to DSB (v7).  Data
+            Synch Barrier -- ensures completion of memory accesses. */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("mcr 15, 0, r0, c7, c10, 4 (data synch barrier)\n");
+         goto decode_success;
+      case 0xEE070F95: /* v6 */
+         /* mcr 15, 0, r0, c7, c5, 4 (v6) equiv to ISB (v7).
+            Instruction Synchronisation Barrier (or Flush Prefetch
+            Buffer) -- a pipe flush, I think.  I suspect we could
+            ignore those, but to be on the safe side emit a fence
+            anyway. */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("mcr 15, 0, r0, c7, c5, 4 (insn synch barrier)\n");
+         goto decode_success;
+      default:
+         break;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- VFP (CP 10, CP 11) instructions (in ARM mode)         -- */
+   /* ----------------------------------------------------------- */
+
+   if (INSN_COND != ARMCondNV) {
+      Bool ok_vfp = decode_CP10_CP11_instruction (
+                       &dres, INSN(27,0), condT, INSN_COND,
+                       False/*!isT*/
+                    );
+      if (ok_vfp)
+         goto decode_success;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- NEON instructions (in ARM mode)                       -- */
+   /* ----------------------------------------------------------- */
+
+   /* These are all in NV space, and so are taken care of (far) above,
+      by a call from this function to decode_NV_instruction(). */
+
+   /* ----------------------------------------------------------- */
+   /* -- v6 media instructions (in ARM mode)                   -- */
+   /* ----------------------------------------------------------- */
+
+   { Bool ok_v6m = decode_V6MEDIA_instruction(
+                       &dres, INSN(27,0), condT, INSN_COND,
+                       False/*!isT*/
+                   );
+     if (ok_v6m)
+        goto decode_success;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- Undecodable                                           -- */
+   /* ----------------------------------------------------------- */
+
+   goto decode_failure;
+   /*NOTREACHED*/
+
+  decode_failure:
+   /* All decode failures end up here. */
+   if (sigill_diag) {
+      vex_printf("disInstr(arm): unhandled instruction: "
+                 "0x%x\n", insn);
+      vex_printf("                 cond=%d(0x%x) 27:20=%u(0x%02x) "
+                                   "4:4=%d "
+                                   "3:0=%u(0x%x)\n",
+                 (Int)INSN_COND, (UInt)INSN_COND,
+                 (Int)INSN(27,20), (UInt)INSN(27,20),
+                 (Int)INSN(4,4),
+                 (Int)INSN(3,0), (UInt)INSN(3,0) );
+   }
+
+   /* Tell the dispatcher that this insn cannot be decoded, and so has
+      not been executed, and (is currently) the next to be executed.
+      R15 should be up-to-date since it made so at the start of each
+      insn, but nevertheless be paranoid and update it again right
+      now. */
+   vassert(0 == (guest_R15_curr_instr_notENC & 3));
+   llPutIReg( 15, mkU32(guest_R15_curr_instr_notENC) );
+   dres.len         = 0;
+   dres.whatNext    = Dis_StopHere;
+   dres.jk_StopHere = Ijk_NoDecode;
+   dres.continueAt  = 0;
+   return dres;
+
+  decode_success:
+   /* All decode successes end up here. */
+   DIP("\n");
+
+   vassert(dres.len == 4 || dres.len == 20);
+
+   /* Now then.  Do we have an implicit jump to r15 to deal with? */
+   if (r15written) {
+      /* If we get jump to deal with, we assume that there's been no
+         other competing branch stuff previously generated for this
+         insn.  That's reasonable, in the sense that the ARM insn set
+         appears to declare as "Unpredictable" any instruction which
+         generates more than one possible new value for r15.  Hence
+         just assert.  The decoders themselves should check against
+         all such instructions which are thusly Unpredictable, and
+         decline to decode them.  Hence we should never get here if we
+         have competing new values for r15, and hence it is safe to
+         assert here. */
+      vassert(dres.whatNext == Dis_Continue);
+      vassert(irsb->next == NULL);
+      vassert(irsb->jumpkind == Ijk_Boring);
+      /* If r15 is unconditionally written, terminate the block by
+         jumping to it.  If it's conditionally written, still
+         terminate the block (a shame, but we can't do side exits to
+         arbitrary destinations), but first jump to the next
+         instruction if the condition doesn't hold. */
+      /* We can't use getIReg(15) to get the destination, since that
+         will produce r15+8, which isn't what we want.  Must use
+         llGetIReg(15) instead. */
+      if (r15guard == IRTemp_INVALID) {
+         /* unconditional */
+      } else {
+         /* conditional */
+         stmt( IRStmt_Exit(
+                  unop(Iop_32to1,
+                       binop(Iop_Xor32,
+                             mkexpr(r15guard), mkU32(1))),
+                  r15kind,
+                  IRConst_U32(guest_R15_curr_instr_notENC + 4),
+                  OFFB_R15T
+         ));
+      }
+      /* This seems crazy, but we're required to finish the insn with
+         a write to the guest PC.  As usual we rely on ir_opt to tidy
+         up later. */
+      llPutIReg(15, llGetIReg(15));
+      dres.whatNext    = Dis_StopHere;
+      dres.jk_StopHere = r15kind;
+   } else {
+      /* Set up the end-state in the normal way. */
+      switch (dres.whatNext) {
+         case Dis_Continue:
+            llPutIReg(15, mkU32(dres.len + guest_R15_curr_instr_notENC));
+            break;
+         case Dis_ResteerU:
+         case Dis_ResteerC:
+            llPutIReg(15, mkU32(dres.continueAt));
+            break;
+         case Dis_StopHere:
+            break;
+         default:
+            vassert(0);
+      }
+   }
+
+   return dres;
+
+#  undef INSN_COND
+#  undef INSN
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassemble a single Thumb2 instruction              ---*/
+/*------------------------------------------------------------*/
+
+static const UChar it_length_table[256]; /* fwds */
+
+/* NB: in Thumb mode we do fetches of regs with getIRegT, which
+   automagically adds 4 to fetches of r15.  However, writes to regs
+   are done with putIRegT, which disallows writes to r15.  Hence any
+   r15 writes and associated jumps have to be done "by hand". */
+
+/* Disassemble a single Thumb instruction into IR.  The instruction is
+   located in host memory at guest_instr, and has (decoded) guest IP
+   of guest_R15_curr_instr_notENC, which will have been set before the
+   call here. */
+
+static   
+DisResult disInstr_THUMB_WRK (
+             Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+             Bool         resteerCisOk,
+             void*        callback_opaque,
+             const UChar* guest_instr,
+             const VexArchInfo* archinfo,
+             const VexAbiInfo*  abiinfo,
+             Bool         sigill_diag
+          )
+{
+   /* A macro to fish bits out of insn0.  There's also INSN1, to fish
+      bits out of insn1, but that's defined only after the end of the
+      16-bit insn decoder, so as to stop it mistakenly being used
+      therein. */
+#  define INSN0(_bMax,_bMin)  SLICE_UInt(((UInt)insn0), (_bMax), (_bMin))
+
+   DisResult dres;
+   UShort    insn0; /*  first 16 bits of the insn */
+   UShort    insn1; /* second 16 bits of the insn */
+   //Bool      allow_VFP = False;
+   //UInt      hwcaps = archinfo->hwcaps;
+   HChar     dis_buf[128];  // big enough to hold LDMIA etc text
+
+   /* Summary result of the ITxxx backwards analysis: False == safe
+      but suboptimal. */
+   Bool guaranteedUnconditional = False;
+
+   /* What insn variants are we supporting today? */
+   //allow_VFP  = (0 != (hwcaps & VEX_HWCAPS_ARM_VFP));
+   // etc etc
+
+   /* Set result defaults. */
+   dres.whatNext    = Dis_Continue;
+   dres.len         = 2;
+   dres.continueAt  = 0;
+   dres.jk_StopHere = Ijk_INVALID;
+
+   /* Set default actions for post-insn handling of writes to r15, if
+      required. */
+   r15written = False;
+   r15guard   = IRTemp_INVALID; /* unconditional */
+   r15kind    = Ijk_Boring;
+
+   /* Insns could be 2 or 4 bytes long.  Just get the first 16 bits at
+      this point.  If we need the second 16, get them later.  We can't
+      get them both out immediately because it risks a fault (very
+      unlikely, but ..) if the second 16 bits aren't actually
+      necessary. */
+   insn0 = getUShortLittleEndianly( guest_instr );
+   insn1 = 0; /* We'll get it later, once we know we need it. */
+
+   /* Similarly, will set this later. */
+   IRTemp old_itstate = IRTemp_INVALID;
+
+   if (0) vex_printf("insn: 0x%x\n", insn0);
+
+   DIP("\t(thumb) 0x%x:  ", (UInt)guest_R15_curr_instr_notENC);
+
+   vassert(0 == (guest_R15_curr_instr_notENC & 1));
+
+   /* ----------------------------------------------------------- */
+   /* Spot "Special" instructions (see comment at top of file). */
+   {
+      const UChar* code = guest_instr;
+      /* Spot the 16-byte preamble: 
+
+         ea4f 0cfc  mov.w   ip, ip, ror #3
+         ea4f 3c7c  mov.w   ip, ip, ror #13
+         ea4f 7c7c  mov.w   ip, ip, ror #29
+         ea4f 4cfc  mov.w   ip, ip, ror #19
+      */
+      UInt word1 = 0x0CFCEA4F;
+      UInt word2 = 0x3C7CEA4F;
+      UInt word3 = 0x7C7CEA4F;
+      UInt word4 = 0x4CFCEA4F;
+      if (getUIntLittleEndianly(code+ 0) == word1 &&
+          getUIntLittleEndianly(code+ 4) == word2 &&
+          getUIntLittleEndianly(code+ 8) == word3 &&
+          getUIntLittleEndianly(code+12) == word4) {
+         /* Got a "Special" instruction preamble.  Which one is it? */
+         // 0x 0A 0A EA 4A
+         if (getUIntLittleEndianly(code+16) == 0x0A0AEA4A
+                                               /* orr.w r10,r10,r10 */) {
+            /* R3 = client_request ( R4 ) */
+            DIP("r3 = client_request ( %%r4 )\n");
+            llPutIReg(15, mkU32( (guest_R15_curr_instr_notENC + 20) | 1 ));
+            dres.jk_StopHere = Ijk_ClientReq;
+            dres.whatNext    = Dis_StopHere;
+            goto decode_success;
+         }
+         else
+         // 0x 0B 0B EA 4B
+         if (getUIntLittleEndianly(code+16) == 0x0B0BEA4B
+                                               /* orr r11,r11,r11 */) {
+            /* R3 = guest_NRADDR */
+            DIP("r3 = guest_NRADDR\n");
+            dres.len = 20;
+            llPutIReg(3, IRExpr_Get( OFFB_NRADDR, Ity_I32 ));
+            goto decode_success;
+         }
+         else
+         // 0x 0C 0C EA 4C
+         if (getUIntLittleEndianly(code+16) == 0x0C0CEA4C
+                                               /* orr r12,r12,r12 */) {
+            /*  branch-and-link-to-noredir R4 */
+            DIP("branch-and-link-to-noredir r4\n");
+            llPutIReg(14, mkU32( (guest_R15_curr_instr_notENC + 20) | 1 ));
+            llPutIReg(15, getIRegT(4));
+            dres.jk_StopHere = Ijk_NoRedir;
+            dres.whatNext    = Dis_StopHere;
+            goto decode_success;
+         }
+         else
+         // 0x 09 09 EA 49
+         if (getUIntLittleEndianly(code+16) == 0x0909EA49
+                                               /* orr r9,r9,r9 */) {
+            /* IR injection */
+            DIP("IR injection\n");
+            vex_inject_ir(irsb, Iend_LE);
+            // Invalidate the current insn. The reason is that the IRop we're
+            // injecting here can change. In which case the translation has to
+            // be redone. For ease of handling, we simply invalidate all the
+            // time.
+            stmt(IRStmt_Put(OFFB_CMSTART, mkU32(guest_R15_curr_instr_notENC)));
+            stmt(IRStmt_Put(OFFB_CMLEN,   mkU32(20)));
+            llPutIReg(15, mkU32( (guest_R15_curr_instr_notENC + 20) | 1 ));
+            dres.whatNext    = Dis_StopHere;
+            dres.jk_StopHere = Ijk_InvalICache;
+            goto decode_success;
+         }
+         /* We don't know what it is.  Set insn0 so decode_failure
+            can print the insn following the Special-insn preamble. */
+         insn0 = getUShortLittleEndianly(code+16);
+         goto decode_failure;
+         /*NOTREACHED*/
+      }
+
+   }
+
+   /* ----------------------------------------------------------- */
+
+   /* Main Thumb instruction decoder starts here.  It's a series of
+      switches which examine ever longer bit sequences at the MSB of
+      the instruction word, first for 16-bit insns, then for 32-bit
+      insns. */
+
+   /* --- BEGIN ITxxx optimisation analysis --- */
+   /* This is a crucial optimisation for the ITState boilerplate that
+      follows.  Examine the 9 halfwords preceding this instruction,
+      and if we are absolutely sure that none of them constitute an
+      'it' instruction, then we can be sure that this instruction is
+      not under the control of any 'it' instruction, and so
+      guest_ITSTATE must be zero.  So write zero into ITSTATE right
+      now, so that iropt can fold out almost all of the resulting
+      junk.
+
+      If we aren't sure, we can always safely skip this step.  So be a
+      bit conservative about it: only poke around in the same page as
+      this instruction, lest we get a fault from the previous page
+      that would not otherwise have happened.  The saving grace is
+      that such skipping is pretty rare -- it only happens,
+      statistically, 18/4096ths of the time, so is judged unlikely to
+      be a performance problems.
+
+      FIXME: do better.  Take into account the number of insns covered
+      by any IT insns we find, to rule out cases where an IT clearly
+      cannot cover this instruction.  This would improve behaviour for
+      branch targets immediately following an IT-guarded group that is
+      not of full length.  Eg, (and completely ignoring issues of 16-
+      vs 32-bit insn length):
+
+             ite cond
+             insn1
+             insn2
+      label: insn3
+             insn4
+
+      The 'it' only conditionalises insn1 and insn2.  However, the
+      current analysis is conservative and considers insn3 and insn4
+      also possibly guarded.  Hence if 'label:' is the start of a hot
+      loop we will get a big performance hit.
+   */
+   {
+      /* Summary result of this analysis: False == safe but
+         suboptimal. */
+      vassert(guaranteedUnconditional == False);
+
+      UInt pc = guest_R15_curr_instr_notENC;
+      vassert(0 == (pc & 1));
+
+      UInt pageoff = pc & 0xFFF;
+      if (pageoff >= 18) {
+         /* It's safe to poke about in the 9 halfwords preceding this
+            insn.  So, have a look at them. */
+         guaranteedUnconditional = True; /* assume no 'it' insn found,
+                                            till we do */
+         UShort* hwp = (UShort*)(HWord)pc;
+         Int i;
+         for (i = -1; i >= -9; i--) {
+            /* We're in the same page.  (True, but commented out due
+               to expense.) */
+            /*
+            vassert( ( ((UInt)(&hwp[i])) & 0xFFFFF000 )
+                      == ( pc & 0xFFFFF000 ) );
+            */
+            /* All valid IT instructions must have the form 0xBFxy,
+               where x can be anything, but y must be nonzero.  Find
+               the number of insns covered by it (1 .. 4) and check to
+               see if it can possibly reach up to the instruction in
+               question.  Some (x,y) combinations mean UNPREDICTABLE,
+               and the table is constructed to be conservative by
+               returning 4 for those cases, so the analysis is safe
+               even if the code uses unpredictable IT instructions (in
+               which case its authors are nuts, but hey.)  */
+            UShort hwp_i = hwp[i];
+            if (UNLIKELY((hwp_i & 0xFF00) == 0xBF00 && (hwp_i & 0xF) != 0)) {
+               /* might be an 'it' insn. */
+               /* # guarded insns */
+               Int n_guarded = (Int)it_length_table[hwp_i & 0xFF];
+               vassert(n_guarded >= 1 && n_guarded <= 4);
+               if (n_guarded * 2 /* # guarded HWs, worst case */
+                   > (-(i+1)))   /* -(i+1): # remaining HWs after the IT */
+                   /* -(i+0) also seems to work, even though I think
+                      it's wrong.  I don't understand that. */
+                  guaranteedUnconditional = False;
+               break;
+            }
+         }
+      }
+   }
+   /* --- END ITxxx optimisation analysis --- */
+
+   /* Generate the guarding condition for this insn, by examining
+      ITSTATE.  Assign it to condT.  Also, generate new
+      values for ITSTATE ready for stuffing back into the
+      guest state, but don't actually do the Put yet, since it will
+      need to stuffed back in only after the instruction gets to a
+      point where it is sure to complete.  Mostly we let the code at
+      decode_success handle this, but in cases where the insn contains
+      a side exit, we have to update them before the exit. */
+
+   /* If the ITxxx optimisation analysis above could not prove that
+      this instruction is guaranteed unconditional, we insert a
+      lengthy IR preamble to compute the guarding condition at
+      runtime.  If it can prove it (which obviously we hope is the
+      normal case) then we insert a minimal preamble, which is
+      equivalent to setting guest_ITSTATE to zero and then folding
+      that through the full preamble (which completely disappears). */
+
+   IRTemp condT              = IRTemp_INVALID;
+   IRTemp cond_AND_notInIT_T = IRTemp_INVALID;
+
+   IRTemp new_itstate        = IRTemp_INVALID;
+   vassert(old_itstate == IRTemp_INVALID);
+
+   if (guaranteedUnconditional) {
+      /* BEGIN "partial eval { ITSTATE = 0; STANDARD_PREAMBLE; }" */
+
+      // ITSTATE = 0 :: I32
+      IRTemp z32 = newTemp(Ity_I32);
+      assign(z32, mkU32(0));
+      put_ITSTATE(z32);
+
+      // old_itstate = 0 :: I32
+      //
+      // old_itstate = get_ITSTATE();
+      old_itstate = z32; /* 0 :: I32 */
+
+      // new_itstate = old_itstate >> 8
+      //             = 0 >> 8
+      //             = 0 :: I32
+      //
+      // new_itstate = newTemp(Ity_I32);
+      // assign(new_itstate,
+      //        binop(Iop_Shr32, mkexpr(old_itstate), mkU8(8)));
+      new_itstate = z32;
+
+      // ITSTATE = 0 :: I32(again)
+      //
+      // put_ITSTATE(new_itstate);
+
+      // condT1 = calc_cond_dyn( xor(and(old_istate,0xF0), 0xE0) )
+      //        = calc_cond_dyn( xor(0,0xE0) )
+      //        = calc_cond_dyn ( 0xE0 )
+      //        = 1 :: I32
+      // Not that this matters, since the computed value is not used:
+      // see condT folding below
+      //
+      // IRTemp condT1 = newTemp(Ity_I32);
+      // assign(condT1,
+      //        mk_armg_calculate_condition_dyn(
+      //           binop(Iop_Xor32,
+      //                 binop(Iop_And32, mkexpr(old_itstate), mkU32(0xF0)),
+      //                 mkU32(0xE0))
+      //       )
+      // );
+
+      // condT = 32to8(and32(old_itstate,0xF0)) == 0  ? 1  : condT1
+      //       = 32to8(and32(0,0xF0)) == 0  ? 1  : condT1
+      //       = 32to8(0) == 0  ? 1  : condT1
+      //       = 0 == 0  ? 1  : condT1
+      //       = 1
+      //
+      // condT = newTemp(Ity_I32);
+      // assign(condT, IRExpr_ITE(
+      //                  unop(Iop_32to8, binop(Iop_And32,
+      //                                        mkexpr(old_itstate),
+      //                                        mkU32(0xF0))),
+      //                  mkexpr(condT1),
+      //                  mkU32(1))
+      //       ));
+      condT = newTemp(Ity_I32);
+      assign(condT, mkU32(1));
+
+      // notInITt = xor32(and32(old_itstate, 1), 1)
+      //          = xor32(and32(0, 1), 1)
+      //          = xor32(0, 1)
+      //          = 1 :: I32
+      //
+      // IRTemp notInITt = newTemp(Ity_I32);
+      // assign(notInITt,
+      //        binop(Iop_Xor32,
+      //              binop(Iop_And32, mkexpr(old_itstate), mkU32(1)),
+      //              mkU32(1)));
+
+      // cond_AND_notInIT_T = and32(notInITt, condT)
+      //                    = and32(1, 1)
+      //                    = 1
+      //
+      // cond_AND_notInIT_T = newTemp(Ity_I32);
+      // assign(cond_AND_notInIT_T,
+      //        binop(Iop_And32, mkexpr(notInITt), mkexpr(condT)));
+      cond_AND_notInIT_T = condT; /* 1 :: I32 */
+
+      /* END "partial eval { ITSTATE = 0; STANDARD_PREAMBLE; }" */
+   } else {
+      /* BEGIN { STANDARD PREAMBLE; } */
+
+      old_itstate = get_ITSTATE();
+
+      new_itstate = newTemp(Ity_I32);
+      assign(new_itstate,
+             binop(Iop_Shr32, mkexpr(old_itstate), mkU8(8)));
+
+      put_ITSTATE(new_itstate);
+
+      /* Same strategy as for ARM insns: generate a condition
+         temporary at this point (or IRTemp_INVALID, meaning
+         unconditional).  We leave it to lower-level instruction
+         decoders to decide whether they can generate straight-line
+         code, or whether they must generate a side exit before the
+         instruction.  condT :: Ity_I32 and is always either zero or
+         one. */
+      IRTemp condT1 = newTemp(Ity_I32);
+      assign(condT1,
+             mk_armg_calculate_condition_dyn(
+                binop(Iop_Xor32,
+                      binop(Iop_And32, mkexpr(old_itstate), mkU32(0xF0)),
+                      mkU32(0xE0))
+            )
+      );
+
+      /* This is a bit complex, but needed to make Memcheck understand
+         that, if the condition in old_itstate[7:4] denotes AL (that
+         is, if this instruction is to be executed unconditionally),
+         then condT does not depend on the results of calling the
+         helper.
+
+         We test explicitly for old_itstate[7:4] == AL ^ 0xE, and in
+         that case set condT directly to 1.  Else we use the results
+         of the helper.  Since old_itstate is always defined and
+         because Memcheck does lazy V-bit propagation through ITE,
+         this will cause condT to always be a defined 1 if the
+         condition is 'AL'.  From an execution semantics point of view
+         this is irrelevant since we're merely duplicating part of the
+         behaviour of the helper.  But it makes it clear to Memcheck,
+         in this case, that condT does not in fact depend on the
+         contents of the condition code thunk.  Without it, we get
+         quite a lot of false errors.
+
+         So, just to clarify: from a straight semantics point of view,
+         we can simply do "assign(condT, mkexpr(condT1))", and the
+         simulator still runs fine.  It's just that we get loads of
+         false errors from Memcheck. */
+      condT = newTemp(Ity_I32);
+      assign(condT, IRExpr_ITE(
+                       binop(Iop_CmpNE32, binop(Iop_And32,
+                                                mkexpr(old_itstate),
+                                                mkU32(0xF0)),
+                                          mkU32(0)),
+                       mkexpr(condT1),
+                       mkU32(1)
+            ));
+
+      /* Something we don't have in ARM: generate a 0 or 1 value
+         indicating whether or not we are in an IT block (NB: 0 = in
+         IT block, 1 = not in IT block).  This is used to gate
+         condition code updates in 16-bit Thumb instructions. */
+      IRTemp notInITt = newTemp(Ity_I32);
+      assign(notInITt,
+             binop(Iop_Xor32,
+                   binop(Iop_And32, mkexpr(old_itstate), mkU32(1)),
+                   mkU32(1)));
+
+      /* Compute 'condT && notInITt' -- that is, the instruction is
+         going to execute, and we're not in an IT block.  This is the
+         gating condition for updating condition codes in 16-bit Thumb
+         instructions, except for CMP, CMN and TST. */
+      cond_AND_notInIT_T = newTemp(Ity_I32);
+      assign(cond_AND_notInIT_T,
+             binop(Iop_And32, mkexpr(notInITt), mkexpr(condT)));
+      /* END { STANDARD PREAMBLE; } */
+   }
+
+
+   /* At this point:
+      * ITSTATE has been updated
+      * condT holds the guarding condition for this instruction (0 or 1),
+      * notInITt is 1 if we're in "normal" code, 0 if in an IT block
+      * cond_AND_notInIT_T is the AND of the above two.
+
+      If the instruction proper can't trap, then there's nothing else
+      to do w.r.t. ITSTATE -- just go and and generate IR for the
+      insn, taking into account the guarding condition.
+
+      If, however, the instruction might trap, then we must back up
+      ITSTATE to the old value, and re-update it after the potentially
+      trapping IR section.  A trap can happen either via a memory
+      reference or because we need to throw SIGILL.
+
+      If an instruction has a side exit, we need to be sure that any
+      ITSTATE backup is re-updated before the side exit.
+   */
+
+   /* ----------------------------------------------------------- */
+   /* --                                                       -- */
+   /* -- Thumb 16-bit integer instructions                     -- */
+   /* --                                                       -- */
+   /* -- IMPORTANT: references to insn1 or INSN1 are           -- */
+   /* --            not allowed in this section                -- */
+   /* --                                                       -- */
+   /* ----------------------------------------------------------- */
+
+   /* 16-bit instructions inside an IT block, apart from CMP, CMN and
+      TST, do not set the condition codes.  Hence we must dynamically
+      test for this case for every condition code update. */
+
+   IROp   anOp   = Iop_INVALID;
+   const HChar* anOpNm = NULL;
+
+   /* ================ 16-bit 15:6 cases ================ */
+
+   switch (INSN0(15,6)) {
+
+   case 0x10a:   // CMP
+   case 0x10b: { // CMN
+      /* ---------------- CMP Rn, Rm ---------------- */
+      Bool   isCMN = INSN0(15,6) == 0x10b;
+      UInt   rN    = INSN0(2,0);
+      UInt   rM    = INSN0(5,3);
+      IRTemp argL  = newTemp(Ity_I32);
+      IRTemp argR  = newTemp(Ity_I32);
+      assign( argL, getIRegT(rN) );
+      assign( argR, getIRegT(rM) );
+      /* Update flags regardless of whether in an IT block or not. */
+      setFlags_D1_D2( isCMN ? ARMG_CC_OP_ADD : ARMG_CC_OP_SUB,
+                      argL, argR, condT );
+      DIP("%s r%u, r%u\n", isCMN ? "cmn" : "cmp", rN, rM);
+      goto decode_success;
+   }
+
+   case 0x108: {
+      /* ---------------- TST Rn, Rm ---------------- */
+      UInt   rN   = INSN0(2,0);
+      UInt   rM   = INSN0(5,3);
+      IRTemp oldC = newTemp(Ity_I32);
+      IRTemp oldV = newTemp(Ity_I32);
+      IRTemp res  = newTemp(Ity_I32);
+      assign( oldC, mk_armg_calculate_flag_c() );
+      assign( oldV, mk_armg_calculate_flag_v() );
+      assign( res,  binop(Iop_And32, getIRegT(rN), getIRegT(rM)) );
+      /* Update flags regardless of whether in an IT block or not. */
+      setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV, condT );
+      DIP("tst r%u, r%u\n", rN, rM);
+      goto decode_success;
+   }
+
+   case 0x109: {
+      /* ---------------- NEGS Rd, Rm ---------------- */
+      /* Rd = -Rm */
+      UInt   rM   = INSN0(5,3);
+      UInt   rD   = INSN0(2,0);
+      IRTemp arg  = newTemp(Ity_I32);
+      IRTemp zero = newTemp(Ity_I32);
+      assign(arg, getIRegT(rM));
+      assign(zero, mkU32(0));
+      // rD can never be r15
+      putIRegT(rD, binop(Iop_Sub32, mkexpr(zero), mkexpr(arg)), condT);
+      setFlags_D1_D2( ARMG_CC_OP_SUB, zero, arg, cond_AND_notInIT_T);
+      DIP("negs r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x10F: {
+      /* ---------------- MVNS Rd, Rm ---------------- */
+      /* Rd = ~Rm */
+      UInt   rM   = INSN0(5,3);
+      UInt   rD   = INSN0(2,0);
+      IRTemp oldV = newTemp(Ity_I32);
+      IRTemp oldC = newTemp(Ity_I32);
+      IRTemp res  = newTemp(Ity_I32);
+      assign( oldV, mk_armg_calculate_flag_v() );
+      assign( oldC, mk_armg_calculate_flag_c() );
+      assign(res, unop(Iop_Not32, getIRegT(rM)));
+      // rD can never be r15
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                         cond_AND_notInIT_T );
+      DIP("mvns r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x10C:
+      /* ---------------- ORRS Rd, Rm ---------------- */
+      anOp = Iop_Or32; anOpNm = "orr"; goto and_orr_eor_mul;
+   case 0x100:
+      /* ---------------- ANDS Rd, Rm ---------------- */
+      anOp = Iop_And32; anOpNm = "and"; goto and_orr_eor_mul;
+   case 0x101:
+      /* ---------------- EORS Rd, Rm ---------------- */
+      anOp = Iop_Xor32; anOpNm = "eor"; goto and_orr_eor_mul;
+   case 0x10d:
+      /* ---------------- MULS Rd, Rm ---------------- */
+      anOp = Iop_Mul32; anOpNm = "mul"; goto and_orr_eor_mul;
+   and_orr_eor_mul: {
+      /* Rd = Rd `op` Rm */
+      UInt   rM   = INSN0(5,3);
+      UInt   rD   = INSN0(2,0);
+      IRTemp res  = newTemp(Ity_I32);
+      IRTemp oldV = newTemp(Ity_I32);
+      IRTemp oldC = newTemp(Ity_I32);
+      assign( oldV, mk_armg_calculate_flag_v() );
+      assign( oldC, mk_armg_calculate_flag_c() );
+      assign( res, binop(anOp, getIRegT(rD), getIRegT(rM) ));
+      // not safe to read guest state after here
+      // rD can never be r15
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                         cond_AND_notInIT_T );
+      DIP("%s r%u, r%u\n", anOpNm, rD, rM);
+      goto decode_success;
+   }
+
+   case 0x10E: {
+      /* ---------------- BICS Rd, Rm ---------------- */
+      /* Rd = Rd & ~Rm */
+      UInt   rM   = INSN0(5,3);
+      UInt   rD   = INSN0(2,0);
+      IRTemp res  = newTemp(Ity_I32);
+      IRTemp oldV = newTemp(Ity_I32);
+      IRTemp oldC = newTemp(Ity_I32);
+      assign( oldV, mk_armg_calculate_flag_v() );
+      assign( oldC, mk_armg_calculate_flag_c() );
+      assign( res, binop(Iop_And32, getIRegT(rD),
+                                    unop(Iop_Not32, getIRegT(rM) )));
+      // not safe to read guest state after here
+      // rD can never be r15
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                         cond_AND_notInIT_T );
+      DIP("bics r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x105: {
+      /* ---------------- ADCS Rd, Rm ---------------- */
+      /* Rd = Rd + Rm + oldC */
+      UInt   rM   = INSN0(5,3);
+      UInt   rD   = INSN0(2,0);
+      IRTemp argL = newTemp(Ity_I32);
+      IRTemp argR = newTemp(Ity_I32);
+      IRTemp oldC = newTemp(Ity_I32);
+      IRTemp res  = newTemp(Ity_I32);
+      assign(argL, getIRegT(rD));
+      assign(argR, getIRegT(rM));
+      assign(oldC, mk_armg_calculate_flag_c());
+      assign(res, binop(Iop_Add32,
+                        binop(Iop_Add32, mkexpr(argL), mkexpr(argR)),
+                        mkexpr(oldC)));
+      // rD can never be r15
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_ADC, argL, argR, oldC,
+                         cond_AND_notInIT_T );
+      DIP("adcs r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x106: {
+      /* ---------------- SBCS Rd, Rm ---------------- */
+      /* Rd = Rd - Rm - (oldC ^ 1) */
+      UInt   rM   = INSN0(5,3);
+      UInt   rD   = INSN0(2,0);
+      IRTemp argL = newTemp(Ity_I32);
+      IRTemp argR = newTemp(Ity_I32);
+      IRTemp oldC = newTemp(Ity_I32);
+      IRTemp res  = newTemp(Ity_I32);
+      assign(argL, getIRegT(rD));
+      assign(argR, getIRegT(rM));
+      assign(oldC, mk_armg_calculate_flag_c());
+      assign(res, binop(Iop_Sub32,
+                        binop(Iop_Sub32, mkexpr(argL), mkexpr(argR)),
+                        binop(Iop_Xor32, mkexpr(oldC), mkU32(1))));
+      // rD can never be r15
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_SBB, argL, argR, oldC,
+                         cond_AND_notInIT_T );
+      DIP("sbcs r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x2CB: {
+      /* ---------------- UXTB Rd, Rm ---------------- */
+      /* Rd = 8Uto32(Rm) */
+      UInt rM = INSN0(5,3);
+      UInt rD = INSN0(2,0);
+      putIRegT(rD, binop(Iop_And32, getIRegT(rM), mkU32(0xFF)),
+                   condT);
+      DIP("uxtb r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x2C9: {
+      /* ---------------- SXTB Rd, Rm ---------------- */
+      /* Rd = 8Sto32(Rm) */
+      UInt rM = INSN0(5,3);
+      UInt rD = INSN0(2,0);
+      putIRegT(rD, binop(Iop_Sar32,
+                         binop(Iop_Shl32, getIRegT(rM), mkU8(24)),
+                         mkU8(24)),
+                   condT);
+      DIP("sxtb r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x2CA: {
+      /* ---------------- UXTH Rd, Rm ---------------- */
+      /* Rd = 16Uto32(Rm) */
+      UInt rM = INSN0(5,3);
+      UInt rD = INSN0(2,0);
+      putIRegT(rD, binop(Iop_And32, getIRegT(rM), mkU32(0xFFFF)),
+                   condT);
+      DIP("uxth r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x2C8: {
+      /* ---------------- SXTH Rd, Rm ---------------- */
+      /* Rd = 16Sto32(Rm) */
+      UInt rM = INSN0(5,3);
+      UInt rD = INSN0(2,0);
+      putIRegT(rD, binop(Iop_Sar32,
+                         binop(Iop_Shl32, getIRegT(rM), mkU8(16)),
+                         mkU8(16)),
+                   condT);
+      DIP("sxth r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x102:   // LSLS
+   case 0x103:   // LSRS
+   case 0x104:   // ASRS
+   case 0x107: { // RORS
+      /* ---------------- LSLS Rs, Rd ---------------- */
+      /* ---------------- LSRS Rs, Rd ---------------- */
+      /* ---------------- ASRS Rs, Rd ---------------- */
+      /* ---------------- RORS Rs, Rd ---------------- */
+      /* Rd = Rd `op` Rs, and set flags */
+      UInt   rS   = INSN0(5,3);
+      UInt   rD   = INSN0(2,0);
+      IRTemp oldV = newTemp(Ity_I32);
+      IRTemp rDt  = newTemp(Ity_I32);
+      IRTemp rSt  = newTemp(Ity_I32);
+      IRTemp res  = newTemp(Ity_I32);
+      IRTemp resC = newTemp(Ity_I32);
+      const HChar* wot  = "???";
+      assign(rSt, getIRegT(rS));
+      assign(rDt, getIRegT(rD));
+      assign(oldV, mk_armg_calculate_flag_v());
+      /* Does not appear to be the standard 'how' encoding. */
+      switch (INSN0(15,6)) {
+         case 0x102:
+            compute_result_and_C_after_LSL_by_reg(
+               dis_buf, &res, &resC, rDt, rSt, rD, rS
+            );
+            wot = "lsl";
+            break;
+         case 0x103:
+            compute_result_and_C_after_LSR_by_reg(
+               dis_buf, &res, &resC, rDt, rSt, rD, rS
+            );
+            wot = "lsr";
+            break;
+         case 0x104:
+            compute_result_and_C_after_ASR_by_reg(
+               dis_buf, &res, &resC, rDt, rSt, rD, rS
+            );
+            wot = "asr";
+            break;
+         case 0x107:
+            compute_result_and_C_after_ROR_by_reg(
+               dis_buf, &res, &resC, rDt, rSt, rD, rS
+            );
+            wot = "ror";
+            break;
+         default:
+            /*NOTREACHED*/vassert(0);
+      }
+      // not safe to read guest state after this point
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, resC, oldV,
+                         cond_AND_notInIT_T );
+      DIP("%ss r%u, r%u\n", wot, rS, rD);
+      goto decode_success;
+   }
+
+   case 0x2E8:   // REV
+   case 0x2E9: { // REV16
+      /* ---------------- REV   Rd, Rm ---------------- */
+      /* ---------------- REV16 Rd, Rm ---------------- */
+      UInt rM = INSN0(5,3);
+      UInt rD = INSN0(2,0);
+      Bool isREV = INSN0(15,6) == 0x2E8;
+      IRTemp arg = newTemp(Ity_I32);
+      assign(arg, getIRegT(rM));
+      IRTemp res = isREV ? gen_REV(arg) : gen_REV16(arg);
+      putIRegT(rD, mkexpr(res), condT);
+      DIP("rev%s r%u, r%u\n", isREV ? "" : "16", rD, rM);
+      goto decode_success;
+   }
+
+   case 0x2EB: { // REVSH
+      /* ---------------- REVSH Rd, Rn ---------------- */
+      UInt rM = INSN0(5,3);
+      UInt rD = INSN0(2,0);
+      IRTemp irt_rM  = newTemp(Ity_I32);
+      IRTemp irt_hi  = newTemp(Ity_I32);
+      IRTemp irt_low = newTemp(Ity_I32);
+      IRTemp irt_res = newTemp(Ity_I32);
+      assign(irt_rM, getIRegT(rM));
+      assign(irt_hi,
+             binop(Iop_Sar32,
+                   binop(Iop_Shl32, mkexpr(irt_rM), mkU8(24)),
+                   mkU8(16)
+             )
+      );
+      assign(irt_low,
+             binop(Iop_And32,
+                   binop(Iop_Shr32, mkexpr(irt_rM), mkU8(8)),
+                   mkU32(0xFF)
+             )
+      );
+      assign(irt_res,
+             binop(Iop_Or32, mkexpr(irt_hi), mkexpr(irt_low))
+      );
+      putIRegT(rD, mkexpr(irt_res), condT);
+      DIP("revsh r%u, r%u\n", rD, rM);
+      goto decode_success;
+   }
+
+   default:
+      break; /* examine the next shortest prefix */
+
+   }
+
+
+   /* ================ 16-bit 15:7 cases ================ */
+
+   switch (INSN0(15,7)) {
+
+   case BITS9(1,0,1,1,0,0,0,0,0): {
+      /* ------------ ADD SP, #imm7 * 4 ------------ */
+      UInt uimm7 = INSN0(6,0);
+      putIRegT(13, binop(Iop_Add32, getIRegT(13), mkU32(uimm7 * 4)),
+                   condT);
+      DIP("add sp, #%u\n", uimm7 * 4);
+      goto decode_success;
+   }
+
+   case BITS9(1,0,1,1,0,0,0,0,1): {
+      /* ------------ SUB SP, #imm7 * 4 ------------ */
+      UInt uimm7 = INSN0(6,0);
+      putIRegT(13, binop(Iop_Sub32, getIRegT(13), mkU32(uimm7 * 4)),
+                   condT);
+      DIP("sub sp, #%u\n", uimm7 * 4);
+      goto decode_success;
+   }
+
+   case BITS9(0,1,0,0,0,1,1,1,0): {
+      /* ---------------- BX rM ---------------- */
+      /* Branch to reg, and optionally switch modes.  Reg contains a
+         suitably encoded address therefore (w CPSR.T at the bottom).
+         Have to special-case r15, as usual. */
+      UInt rM = (INSN0(6,6) << 3) | INSN0(5,3);
+      if (BITS3(0,0,0) == INSN0(2,0)) {
+         IRTemp dst = newTemp(Ity_I32);
+         gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+         mk_skip_over_T16_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         // now uncond
+         if (rM <= 14) {
+            assign( dst, getIRegT(rM) );
+         } else {
+            vassert(rM == 15);
+            assign( dst, mkU32(guest_R15_curr_instr_notENC + 4) );
+         }
+         llPutIReg(15, mkexpr(dst));
+         dres.jk_StopHere = rM == 14 ? Ijk_Ret : Ijk_Boring;
+         dres.whatNext    = Dis_StopHere;
+         DIP("bx r%u (possibly switch to ARM mode)\n", rM);
+         goto decode_success;
+      }
+      break;
+   }
+
+   /* ---------------- BLX rM ---------------- */
+   /* Branch and link to interworking address in rM. */
+   case BITS9(0,1,0,0,0,1,1,1,1): {
+      if (BITS3(0,0,0) == INSN0(2,0)) {
+         UInt rM = (INSN0(6,6) << 3) | INSN0(5,3);
+         IRTemp dst = newTemp(Ity_I32);
+         if (rM <= 14) {
+            gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+            mk_skip_over_T16_if_cond_is_false(condT);
+            condT = IRTemp_INVALID;
+            // now uncond
+            /* We're returning to Thumb code, hence "| 1" */
+            assign( dst, getIRegT(rM) );
+            putIRegT( 14, mkU32( (guest_R15_curr_instr_notENC + 2) | 1 ),
+                          IRTemp_INVALID );
+            llPutIReg(15, mkexpr(dst));
+            dres.jk_StopHere = Ijk_Call;
+            dres.whatNext    = Dis_StopHere;
+            DIP("blx r%u (possibly switch to ARM mode)\n", rM);
+            goto decode_success;
+         }
+         /* else unpredictable, fall through */
+      }
+      break;
+   }
+
+   default:
+      break; /* examine the next shortest prefix */
+
+   }
+
+
+   /* ================ 16-bit 15:8 cases ================ */
+
+   switch (INSN0(15,8)) {
+
+   case BITS8(1,1,0,1,1,1,1,1): {
+      /* ---------------- SVC ---------------- */
+      UInt imm8 = INSN0(7,0);
+      if (imm8 == 0) {
+         /* A syscall.  We can't do this conditionally, hence: */
+         mk_skip_over_T16_if_cond_is_false( condT );
+         // FIXME: what if we have to back up and restart this insn?
+         // then ITSTATE will be wrong (we'll have it as "used")
+         // when it isn't.  Correct is to save ITSTATE in a 
+         // stash pseudo-reg, and back up from that if we have to
+         // restart.
+         // uncond after here
+         llPutIReg(15, mkU32( (guest_R15_curr_instr_notENC + 2) | 1 ));
+         dres.jk_StopHere = Ijk_Sys_syscall;
+         dres.whatNext    = Dis_StopHere;
+         DIP("svc #0x%08x\n", imm8);
+         goto decode_success;
+      }
+      /* else fall through */
+      break;
+   }
+
+   case BITS8(0,1,0,0,0,1,0,0): {
+      /* ---------------- ADD(HI) Rd, Rm ---------------- */
+      UInt h1 = INSN0(7,7);
+      UInt h2 = INSN0(6,6);
+      UInt rM = (h2 << 3) | INSN0(5,3);
+      UInt rD = (h1 << 3) | INSN0(2,0);
+      //if (h1 == 0 && h2 == 0) { // Original T1 was more restrictive
+      if (rD == 15 && rM == 15) {
+         // then it's invalid
+      } else {
+         IRTemp res = newTemp(Ity_I32);
+         assign( res, binop(Iop_Add32, getIRegT(rD), getIRegT(rM) ));
+         if (rD != 15) {
+            putIRegT( rD, mkexpr(res), condT );
+         } else {
+            /* Only allowed outside or last-in IT block; SIGILL if not so. */
+            gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+            /* jump over insn if not selected */
+            mk_skip_over_T16_if_cond_is_false(condT);
+            condT = IRTemp_INVALID;
+            // now uncond
+            /* non-interworking branch */
+            llPutIReg(15, binop(Iop_Or32, mkexpr(res), mkU32(1)));
+            dres.jk_StopHere = Ijk_Boring;
+            dres.whatNext    = Dis_StopHere;
+         }
+         DIP("add(hi) r%u, r%u\n", rD, rM);
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS8(0,1,0,0,0,1,0,1): {
+      /* ---------------- CMP(HI) Rd, Rm ---------------- */
+      UInt h1 = INSN0(7,7);
+      UInt h2 = INSN0(6,6);
+      UInt rM = (h2 << 3) | INSN0(5,3);
+      UInt rN = (h1 << 3) | INSN0(2,0);
+      if (h1 != 0 || h2 != 0) {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         assign( argL, getIRegT(rN) );
+         assign( argR, getIRegT(rM) );
+         /* Update flags regardless of whether in an IT block or not. */
+         setFlags_D1_D2( ARMG_CC_OP_SUB, argL, argR, condT );
+         DIP("cmphi r%u, r%u\n", rN, rM);
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS8(0,1,0,0,0,1,1,0): {
+      /* ---------------- MOV(HI) Rd, Rm ---------------- */
+      UInt h1 = INSN0(7,7);
+      UInt h2 = INSN0(6,6);
+      UInt rM = (h2 << 3) | INSN0(5,3);
+      UInt rD = (h1 << 3) | INSN0(2,0);
+      /* The old ARM ARM seems to disallow the case where both Rd and
+         Rm are "low" registers, but newer versions allow it. */
+      if (1 /*h1 != 0 || h2 != 0*/) {
+         IRTemp val = newTemp(Ity_I32);
+         assign( val, getIRegT(rM) );
+         if (rD != 15) {
+            putIRegT( rD, mkexpr(val), condT );
+         } else {
+            /* Only allowed outside or last-in IT block; SIGILL if not so. */
+            gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+            /* jump over insn if not selected */
+            mk_skip_over_T16_if_cond_is_false(condT);
+            condT = IRTemp_INVALID;
+            // now uncond
+            /* non-interworking branch */
+            llPutIReg(15, binop(Iop_Or32, mkexpr(val), mkU32(1)));
+            dres.jk_StopHere = rM == 14 ? Ijk_Ret : Ijk_Boring;
+            dres.whatNext    = Dis_StopHere;
+         }
+         DIP("mov r%u, r%u\n", rD, rM);
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS8(1,0,1,1,1,1,1,1): {
+      /* ---------------- IT (if-then) ---------------- */
+      UInt firstcond = INSN0(7,4);
+      UInt mask = INSN0(3,0);
+      UInt newITSTATE = 0;
+      /* This is the ITSTATE represented as described in
+         libvex_guest_arm.h.  It is not the ARM ARM representation. */
+      HChar c1 = '.';
+      HChar c2 = '.';
+      HChar c3 = '.';
+      Bool valid = compute_ITSTATE( &newITSTATE, &c1, &c2, &c3,
+                                    firstcond, mask );
+      if (valid && firstcond != 0xF/*NV*/) {
+         /* Not allowed in an IT block; SIGILL if so. */
+         gen_SIGILL_T_if_in_ITBlock(old_itstate, new_itstate);
+
+         IRTemp t = newTemp(Ity_I32);
+         assign(t, mkU32(newITSTATE));
+         put_ITSTATE(t);
+
+         DIP("it%c%c%c %s\n", c1, c2, c3, nCC(firstcond));
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS8(1,0,1,1,0,0,0,1):
+   case BITS8(1,0,1,1,0,0,1,1):
+   case BITS8(1,0,1,1,1,0,0,1):
+   case BITS8(1,0,1,1,1,0,1,1): {
+      /* ---------------- CB{N}Z ---------------- */
+      UInt rN    = INSN0(2,0);
+      UInt bOP   = INSN0(11,11);
+      UInt imm32 = (INSN0(9,9) << 6) | (INSN0(7,3) << 1);
+      gen_SIGILL_T_if_in_ITBlock(old_itstate, new_itstate);
+      /* It's a conditional branch forward. */
+      IRTemp kond = newTemp(Ity_I1);
+      assign( kond, binop(bOP ? Iop_CmpNE32 : Iop_CmpEQ32,
+                          getIRegT(rN), mkU32(0)) );
+
+      vassert(0 == (guest_R15_curr_instr_notENC & 1));
+      /* Looks like the nearest insn we can branch to is the one after
+         next.  That makes sense, as there's no point in being able to
+         encode a conditional branch to the next instruction. */
+      UInt dst = (guest_R15_curr_instr_notENC + 4 + imm32) | 1;
+      stmt(IRStmt_Exit( mkexpr(kond),
+                        Ijk_Boring,
+                        IRConst_U32(toUInt(dst)),
+                        OFFB_R15T ));
+      DIP("cb%s r%u, 0x%x\n", bOP ? "nz" : "z", rN, dst - 1);
+      goto decode_success;
+   }
+
+   default:
+      break; /* examine the next shortest prefix */
+
+   }
+
+
+   /* ================ 16-bit 15:9 cases ================ */
+
+   switch (INSN0(15,9)) {
+
+   case BITS7(1,0,1,1,0,1,0): {
+      /* ---------------- PUSH ---------------- */
+      /* This is a bit like STMxx, but way simpler. Complications we
+         don't have to deal with:
+         * SP being one of the transferred registers
+         * direction (increment vs decrement)
+         * before-vs-after-ness
+      */
+      Int  i, nRegs;
+      UInt bitR    = INSN0(8,8);
+      UInt regList = INSN0(7,0);
+      if (bitR) regList |= (1 << 14);
+   
+      /* At least one register must be transferred, else result is
+         UNPREDICTABLE. */
+      if (regList != 0) {
+         /* Since we can't generate a guaranteed non-trapping IR
+            sequence, (1) jump over the insn if it is gated false, and
+            (2) back out the ITSTATE update. */
+         mk_skip_over_T16_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         put_ITSTATE(old_itstate);
+         // now uncond
+
+         nRegs = 0;
+         for (i = 0; i < 16; i++) {
+            if ((regList & (1 << i)) != 0)
+               nRegs++;
+         }
+         vassert(nRegs >= 1 && nRegs <= 9);
+
+         /* Move SP down first of all, so we're "covered".  And don't
+            mess with its alignment. */
+         IRTemp newSP = newTemp(Ity_I32);
+         assign(newSP, binop(Iop_Sub32, getIRegT(13), mkU32(4 * nRegs)));
+         putIRegT(13, mkexpr(newSP), IRTemp_INVALID);
+
+         /* Generate a transfer base address as a forced-aligned
+            version of the final SP value. */
+         IRTemp base = newTemp(Ity_I32);
+         assign(base, binop(Iop_And32, mkexpr(newSP), mkU32(~3)));
+
+         /* Now the transfers */
+         nRegs = 0;
+         for (i = 0; i < 16; i++) {
+            if ((regList & (1 << i)) != 0) {
+               storeLE( binop(Iop_Add32, mkexpr(base), mkU32(4 * nRegs)),
+                        getIRegT(i) );
+               nRegs++;
+            }
+         }
+
+         /* Reinstate the ITSTATE update. */
+         put_ITSTATE(new_itstate);
+
+         DIP("push {%s0x%04x}\n", bitR ? "lr," : "", regList & 0xFF);
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS7(1,0,1,1,1,1,0): {
+      /* ---------------- POP ---------------- */
+      Int  i, nRegs;
+      UInt bitR    = INSN0(8,8);
+      UInt regList = INSN0(7,0);
+   
+      /* At least one register must be transferred, else result is
+         UNPREDICTABLE. */
+      if (regList != 0 || bitR) {
+         /* Since we can't generate a guaranteed non-trapping IR
+            sequence, (1) jump over the insn if it is gated false, and
+            (2) back out the ITSTATE update. */
+         mk_skip_over_T16_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         put_ITSTATE(old_itstate);
+         // now uncond
+
+         nRegs = 0;
+         for (i = 0; i < 8; i++) {
+            if ((regList & (1 << i)) != 0)
+               nRegs++;
+         }
+         vassert(nRegs >= 0 && nRegs <= 8);
+         vassert(bitR == 0 || bitR == 1);
+
+         IRTemp oldSP = newTemp(Ity_I32);
+         assign(oldSP, getIRegT(13));
+
+         /* Generate a transfer base address as a forced-aligned
+            version of the original SP value. */
+         IRTemp base = newTemp(Ity_I32);
+         assign(base, binop(Iop_And32, mkexpr(oldSP), mkU32(~3)));
+
+         /* Compute a new value for SP, but don't install it yet, so
+            that we're "covered" until all the transfers are done.
+            And don't mess with its alignment. */
+         IRTemp newSP = newTemp(Ity_I32);
+         assign(newSP, binop(Iop_Add32, mkexpr(oldSP),
+                                        mkU32(4 * (nRegs + bitR))));
+
+         /* Now the transfers, not including PC */
+         nRegs = 0;
+         for (i = 0; i < 8; i++) {
+            if ((regList & (1 << i)) != 0) {
+               putIRegT(i, loadLE( Ity_I32,
+                                   binop(Iop_Add32, mkexpr(base),
+                                                    mkU32(4 * nRegs))),
+                           IRTemp_INVALID );
+               nRegs++;
+            }
+         }
+
+         IRTemp newPC = IRTemp_INVALID;
+         if (bitR) {
+            newPC = newTemp(Ity_I32);
+            assign( newPC, loadLE( Ity_I32,
+                                   binop(Iop_Add32, mkexpr(base),
+                                                    mkU32(4 * nRegs))));
+         }
+
+         /* Now we can safely install the new SP value */
+         putIRegT(13, mkexpr(newSP), IRTemp_INVALID);
+
+         /* Reinstate the ITSTATE update. */
+         put_ITSTATE(new_itstate);
+
+         /* now, do we also have to do a branch?  If so, it turns out
+            that the new PC value is encoded exactly as we need it to
+            be -- with CPSR.T in the bottom bit.  So we can simply use
+            it as is, no need to mess with it.  Note, therefore, this
+            is an interworking return. */
+         if (bitR) {
+            llPutIReg(15, mkexpr(newPC));
+            dres.jk_StopHere = Ijk_Ret;
+            dres.whatNext    = Dis_StopHere;
+         }
+
+         DIP("pop {%s0x%04x}\n", bitR ? "pc," : "", regList & 0xFF);
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS7(0,0,0,1,1,1,0):   /* ADDS */
+   case BITS7(0,0,0,1,1,1,1): { /* SUBS */
+      /* ---------------- ADDS Rd, Rn, #uimm3 ---------------- */
+      /* ---------------- SUBS Rd, Rn, #uimm3 ---------------- */
+      UInt   uimm3 = INSN0(8,6);
+      UInt   rN    = INSN0(5,3);
+      UInt   rD    = INSN0(2,0);
+      UInt   isSub = INSN0(9,9);
+      IRTemp argL  = newTemp(Ity_I32);
+      IRTemp argR  = newTemp(Ity_I32);
+      assign( argL, getIRegT(rN) );
+      assign( argR, mkU32(uimm3) );
+      putIRegT(rD, binop(isSub ? Iop_Sub32 : Iop_Add32,
+                         mkexpr(argL), mkexpr(argR)),
+                   condT);
+      setFlags_D1_D2( isSub ? ARMG_CC_OP_SUB : ARMG_CC_OP_ADD,
+                      argL, argR, cond_AND_notInIT_T );
+      DIP("%s r%u, r%u, #%u\n", isSub ? "subs" : "adds", rD, rN, uimm3);
+      goto decode_success;
+   }
+
+   case BITS7(0,0,0,1,1,0,0):   /* ADDS */
+   case BITS7(0,0,0,1,1,0,1): { /* SUBS */
+      /* ---------------- ADDS Rd, Rn, Rm ---------------- */
+      /* ---------------- SUBS Rd, Rn, Rm ---------------- */
+      UInt   rM    = INSN0(8,6);
+      UInt   rN    = INSN0(5,3);
+      UInt   rD    = INSN0(2,0);
+      UInt   isSub = INSN0(9,9);
+      IRTemp argL  = newTemp(Ity_I32);
+      IRTemp argR  = newTemp(Ity_I32);
+      assign( argL, getIRegT(rN) );
+      assign( argR, getIRegT(rM) );
+      putIRegT( rD, binop(isSub ? Iop_Sub32 : Iop_Add32,
+                          mkexpr(argL), mkexpr(argR)),
+                    condT );
+      setFlags_D1_D2( isSub ? ARMG_CC_OP_SUB : ARMG_CC_OP_ADD,
+                      argL, argR, cond_AND_notInIT_T );
+      DIP("%s r%u, r%u, r%u\n", isSub ? "subs" : "adds", rD, rN, rM);
+      goto decode_success;
+   }
+
+   case BITS7(0,1,0,1,0,0,0):   /* STR */
+   case BITS7(0,1,0,1,1,0,0): { /* LDR */
+      /* ------------- LDR Rd, [Rn, Rm] ------------- */
+      /* ------------- STR Rd, [Rn, Rm] ------------- */
+      /* LDR/STR Rd, [Rn + Rm] */
+      UInt    rD   = INSN0(2,0);
+      UInt    rN   = INSN0(5,3);
+      UInt    rM   = INSN0(8,6);
+      UInt    isLD = INSN0(11,11);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), getIRegT(rM));
+      put_ITSTATE(old_itstate); // backout
+      if (isLD) {
+         IRTemp tD = newTemp(Ity_I32);
+         loadGuardedLE( tD, ILGop_Ident32, ea, llGetIReg(rD), condT );
+         putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      } else {
+         storeGuardedLE(ea, getIRegT(rD), condT);
+      }
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("%s r%u, [r%u, r%u]\n", isLD ? "ldr" : "str", rD, rN, rM);
+      goto decode_success;
+   }
+
+   case BITS7(0,1,0,1,0,0,1):
+   case BITS7(0,1,0,1,1,0,1): {
+      /* ------------- LDRH Rd, [Rn, Rm] ------------- */
+      /* ------------- STRH Rd, [Rn, Rm] ------------- */
+      /* LDRH/STRH Rd, [Rn + Rm] */
+      UInt    rD   = INSN0(2,0);
+      UInt    rN   = INSN0(5,3);
+      UInt    rM   = INSN0(8,6);
+      UInt    isLD = INSN0(11,11);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), getIRegT(rM));
+      put_ITSTATE(old_itstate); // backout
+      if (isLD) {
+         IRTemp tD = newTemp(Ity_I32);
+         loadGuardedLE(tD, ILGop_16Uto32, ea, llGetIReg(rD), condT);
+         putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      } else {
+         storeGuardedLE( ea, unop(Iop_32to16, getIRegT(rD)), condT );
+      }
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("%sh r%u, [r%u, r%u]\n", isLD ? "ldr" : "str", rD, rN, rM);
+      goto decode_success;
+   }
+
+   case BITS7(0,1,0,1,1,1,1): {
+      /* ------------- LDRSH Rd, [Rn, Rm] ------------- */
+      /* LDRSH Rd, [Rn + Rm] */
+      UInt    rD = INSN0(2,0);
+      UInt    rN = INSN0(5,3);
+      UInt    rM = INSN0(8,6);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), getIRegT(rM));
+      put_ITSTATE(old_itstate); // backout
+      IRTemp tD = newTemp(Ity_I32);
+      loadGuardedLE(tD, ILGop_16Sto32, ea, llGetIReg(rD), condT);
+      putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("ldrsh r%u, [r%u, r%u]\n", rD, rN, rM);
+      goto decode_success;
+   }
+
+   case BITS7(0,1,0,1,0,1,1): {
+      /* ------------- LDRSB Rd, [Rn, Rm] ------------- */
+      /* LDRSB Rd, [Rn + Rm] */
+      UInt    rD = INSN0(2,0);
+      UInt    rN = INSN0(5,3);
+      UInt    rM = INSN0(8,6);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), getIRegT(rM));
+      put_ITSTATE(old_itstate); // backout
+      IRTemp tD = newTemp(Ity_I32);
+      loadGuardedLE(tD, ILGop_8Sto32, ea, llGetIReg(rD), condT);
+      putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("ldrsb r%u, [r%u, r%u]\n", rD, rN, rM);
+      goto decode_success;
+   }
+
+   case BITS7(0,1,0,1,0,1,0):
+   case BITS7(0,1,0,1,1,1,0): {
+      /* ------------- LDRB Rd, [Rn, Rm] ------------- */
+      /* ------------- STRB Rd, [Rn, Rm] ------------- */
+      /* LDRB/STRB Rd, [Rn + Rm] */
+      UInt    rD   = INSN0(2,0);
+      UInt    rN   = INSN0(5,3);
+      UInt    rM   = INSN0(8,6);
+      UInt    isLD = INSN0(11,11);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), getIRegT(rM));
+      put_ITSTATE(old_itstate); // backout
+      if (isLD) {
+         IRTemp tD = newTemp(Ity_I32);
+         loadGuardedLE(tD, ILGop_8Uto32, ea, llGetIReg(rD), condT);
+         putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      } else {
+         storeGuardedLE( ea, unop(Iop_32to8, getIRegT(rD)), condT );
+      }
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("%sb r%u, [r%u, r%u]\n", isLD ? "ldr" : "str", rD, rN, rM);
+      goto decode_success;
+   }
+
+   default:
+      break; /* examine the next shortest prefix */
+
+   }
+
+
+   /* ================ 16-bit 15:11 cases ================ */
+
+   switch (INSN0(15,11)) {
+
+   case BITS5(0,0,1,1,0):
+   case BITS5(0,0,1,1,1): {
+      /* ---------------- ADDS Rn, #uimm8 ---------------- */
+      /* ---------------- SUBS Rn, #uimm8 ---------------- */
+      UInt   isSub = INSN0(11,11);
+      UInt   rN    = INSN0(10,8);
+      UInt   uimm8 = INSN0(7,0);
+      IRTemp argL  = newTemp(Ity_I32);
+      IRTemp argR  = newTemp(Ity_I32);
+      assign( argL, getIRegT(rN) );
+      assign( argR, mkU32(uimm8) );
+      putIRegT( rN, binop(isSub ? Iop_Sub32 : Iop_Add32,
+                          mkexpr(argL), mkexpr(argR)), condT );
+      setFlags_D1_D2( isSub ? ARMG_CC_OP_SUB : ARMG_CC_OP_ADD,
+                      argL, argR, cond_AND_notInIT_T );
+      DIP("%s r%u, #%u\n", isSub ? "subs" : "adds", rN, uimm8);
+      goto decode_success;
+   }
+
+   case BITS5(1,0,1,0,0): {
+      /* ---------------- ADD rD, PC, #imm8 * 4 ---------------- */
+      /* a.k.a. ADR */
+      /* rD = align4(PC) + imm8 * 4 */
+      UInt rD   = INSN0(10,8);
+      UInt imm8 = INSN0(7,0);
+      putIRegT(rD, binop(Iop_Add32, 
+                         binop(Iop_And32, getIRegT(15), mkU32(~3U)),
+                         mkU32(imm8 * 4)),
+                   condT);
+      DIP("add r%u, pc, #%u\n", rD, imm8 * 4);
+      goto decode_success;
+   }
+
+   case BITS5(1,0,1,0,1): {
+      /* ---------------- ADD rD, SP, #imm8 * 4 ---------------- */
+      UInt rD   = INSN0(10,8);
+      UInt imm8 = INSN0(7,0);
+      putIRegT(rD, binop(Iop_Add32, getIRegT(13), mkU32(imm8 * 4)),
+                   condT);
+      DIP("add r%u, r13, #%u\n", rD, imm8 * 4);
+      goto decode_success;
+   }
+
+   case BITS5(0,0,1,0,1): {
+      /* ---------------- CMP Rn, #uimm8 ---------------- */
+      UInt   rN    = INSN0(10,8);
+      UInt   uimm8 = INSN0(7,0);
+      IRTemp argL  = newTemp(Ity_I32);
+      IRTemp argR  = newTemp(Ity_I32);
+      assign( argL, getIRegT(rN) );
+      assign( argR, mkU32(uimm8) );
+      /* Update flags regardless of whether in an IT block or not. */
+      setFlags_D1_D2( ARMG_CC_OP_SUB, argL, argR, condT );
+      DIP("cmp r%u, #%u\n", rN, uimm8);
+      goto decode_success;
+   }
+
+   case BITS5(0,0,1,0,0): {
+      /* -------------- (T1) MOVS Rn, #uimm8 -------------- */
+      UInt   rD    = INSN0(10,8);
+      UInt   uimm8 = INSN0(7,0);
+      IRTemp oldV  = newTemp(Ity_I32);
+      IRTemp oldC  = newTemp(Ity_I32);
+      IRTemp res   = newTemp(Ity_I32);
+      assign( oldV, mk_armg_calculate_flag_v() );
+      assign( oldC, mk_armg_calculate_flag_c() );
+      assign( res, mkU32(uimm8) );
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                         cond_AND_notInIT_T );
+      DIP("movs r%u, #%u\n", rD, uimm8);
+      goto decode_success;
+   }
+
+   case BITS5(0,1,0,0,1): {
+      /* ------------- LDR Rd, [PC, #imm8 * 4] ------------- */
+      /* LDR Rd, [align4(PC) + imm8 * 4] */
+      UInt   rD   = INSN0(10,8);
+      UInt   imm8 = INSN0(7,0);
+      IRTemp ea   = newTemp(Ity_I32);
+
+      assign(ea, binop(Iop_Add32, 
+                       binop(Iop_And32, getIRegT(15), mkU32(~3U)),
+                       mkU32(imm8 * 4)));
+      put_ITSTATE(old_itstate); // backout
+      IRTemp tD = newTemp(Ity_I32);
+      loadGuardedLE( tD, ILGop_Ident32, mkexpr(ea), llGetIReg(rD), condT );
+      putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("ldr r%u, [pc, #%u]\n", rD, imm8 * 4);
+      goto decode_success;
+   }
+
+   case BITS5(0,1,1,0,0):   /* STR */
+   case BITS5(0,1,1,0,1): { /* LDR */
+      /* ------------- LDR Rd, [Rn, #imm5 * 4] ------------- */
+      /* ------------- STR Rd, [Rn, #imm5 * 4] ------------- */
+      /* LDR/STR Rd, [Rn + imm5 * 4] */
+      UInt    rD   = INSN0(2,0);
+      UInt    rN   = INSN0(5,3);
+      UInt    imm5 = INSN0(10,6);
+      UInt    isLD = INSN0(11,11);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm5 * 4));
+      put_ITSTATE(old_itstate); // backout
+      if (isLD) {
+         IRTemp tD = newTemp(Ity_I32);
+         loadGuardedLE( tD, ILGop_Ident32, ea, llGetIReg(rD), condT );
+         putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      } else {
+         storeGuardedLE( ea, getIRegT(rD), condT );
+      }
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("%s r%u, [r%u, #%u]\n", isLD ? "ldr" : "str", rD, rN, imm5 * 4);
+      goto decode_success;
+   }
+
+   case BITS5(1,0,0,0,0):   /* STRH */
+   case BITS5(1,0,0,0,1): { /* LDRH */
+      /* ------------- LDRH Rd, [Rn, #imm5 * 2] ------------- */
+      /* ------------- STRH Rd, [Rn, #imm5 * 2] ------------- */
+      /* LDRH/STRH Rd, [Rn + imm5 * 2] */
+      UInt    rD   = INSN0(2,0);
+      UInt    rN   = INSN0(5,3);
+      UInt    imm5 = INSN0(10,6);
+      UInt    isLD = INSN0(11,11);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm5 * 2));
+      put_ITSTATE(old_itstate); // backout
+      if (isLD) {
+         IRTemp tD = newTemp(Ity_I32);
+         loadGuardedLE( tD, ILGop_16Uto32, ea, llGetIReg(rD), condT );
+         putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      } else {
+         storeGuardedLE( ea, unop(Iop_32to16, getIRegT(rD)), condT );
+      }
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("%sh r%u, [r%u, #%u]\n", isLD ? "ldr" : "str", rD, rN, imm5 * 2);
+      goto decode_success;
+   }
+
+   case BITS5(0,1,1,1,0):   /* STRB */
+   case BITS5(0,1,1,1,1): { /* LDRB */
+      /* ------------- LDRB Rd, [Rn, #imm5] ------------- */
+      /* ------------- STRB Rd, [Rn, #imm5] ------------- */
+      /* LDRB/STRB Rd, [Rn + imm5] */
+      UInt    rD   = INSN0(2,0);
+      UInt    rN   = INSN0(5,3);
+      UInt    imm5 = INSN0(10,6);
+      UInt    isLD = INSN0(11,11);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm5));
+      put_ITSTATE(old_itstate); // backout
+      if (isLD) {
+         IRTemp tD = newTemp(Ity_I32);
+         loadGuardedLE( tD, ILGop_8Uto32, ea, llGetIReg(rD), condT );
+         putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      } else {
+         storeGuardedLE( ea, unop(Iop_32to8, getIRegT(rD)), condT );
+      }
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("%sb r%u, [r%u, #%u]\n", isLD ? "ldr" : "str", rD, rN, imm5);
+      goto decode_success;
+   }
+
+   case BITS5(1,0,0,1,0):   /* STR */
+   case BITS5(1,0,0,1,1): { /* LDR */
+      /* ------------- LDR Rd, [SP, #imm8 * 4] ------------- */
+      /* ------------- STR Rd, [SP, #imm8 * 4] ------------- */
+      /* LDR/STR Rd, [SP + imm8 * 4] */
+      UInt rD    = INSN0(10,8);
+      UInt imm8  = INSN0(7,0);
+      UInt isLD  = INSN0(11,11);
+
+      IRExpr* ea = binop(Iop_Add32, getIRegT(13), mkU32(imm8 * 4));
+      put_ITSTATE(old_itstate); // backout
+      if (isLD) {
+         IRTemp tD = newTemp(Ity_I32);
+         loadGuardedLE( tD, ILGop_Ident32, ea, llGetIReg(rD), condT );
+         putIRegT(rD, mkexpr(tD), IRTemp_INVALID);
+      } else {
+         storeGuardedLE(ea, getIRegT(rD), condT);
+      }
+      put_ITSTATE(new_itstate); // restore
+
+      DIP("%s r%u, [sp, #%u]\n", isLD ? "ldr" : "str", rD, imm8 * 4);
+      goto decode_success;
+   }
+
+   case BITS5(1,1,0,0,1): {
+      /* ------------- LDMIA Rn!, {reglist} ------------- */
+      Int i, nRegs = 0;
+      UInt rN   = INSN0(10,8);
+      UInt list = INSN0(7,0);
+      /* Empty lists aren't allowed. */
+      if (list != 0) {
+         mk_skip_over_T16_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         put_ITSTATE(old_itstate);
+         // now uncond
+
+         IRTemp oldRn = newTemp(Ity_I32);
+         IRTemp base  = newTemp(Ity_I32);
+         assign(oldRn, getIRegT(rN));
+         assign(base, binop(Iop_And32, mkexpr(oldRn), mkU32(~3U)));
+         for (i = 0; i < 8; i++) {
+            if (0 == (list & (1 << i)))
+               continue;
+            nRegs++;
+            putIRegT(
+               i, loadLE(Ity_I32,
+                         binop(Iop_Add32, mkexpr(base),
+                                          mkU32(nRegs * 4 - 4))),
+               IRTemp_INVALID
+            );
+         }
+         /* Only do the writeback for rN if it isn't in the list of
+            registers to be transferred. */
+         if (0 == (list & (1 << rN))) {
+            putIRegT(rN,
+                     binop(Iop_Add32, mkexpr(oldRn),
+                                      mkU32(nRegs * 4)),
+                     IRTemp_INVALID
+            );
+         }
+
+         /* Reinstate the ITSTATE update. */
+         put_ITSTATE(new_itstate);
+
+         DIP("ldmia r%u!, {0x%04x}\n", rN, list);
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS5(1,1,0,0,0): {
+      /* ------------- STMIA Rn!, {reglist} ------------- */
+      Int i, nRegs = 0;
+      UInt rN   = INSN0(10,8);
+      UInt list = INSN0(7,0);
+      /* Empty lists aren't allowed.  Also, if rN is in the list then
+         it must be the lowest numbered register in the list. */
+      Bool valid = list != 0;
+      if (valid && 0 != (list & (1 << rN))) {
+         for (i = 0; i < rN; i++) {
+            if (0 != (list & (1 << i)))
+               valid = False;
+         }
+      }
+      if (valid) {
+         mk_skip_over_T16_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         put_ITSTATE(old_itstate);
+         // now uncond
+
+         IRTemp oldRn = newTemp(Ity_I32);
+         IRTemp base = newTemp(Ity_I32);
+         assign(oldRn, getIRegT(rN));
+         assign(base, binop(Iop_And32, mkexpr(oldRn), mkU32(~3U)));
+         for (i = 0; i < 8; i++) {
+            if (0 == (list & (1 << i)))
+               continue;
+            nRegs++;
+            storeLE( binop(Iop_Add32, mkexpr(base), mkU32(nRegs * 4 - 4)),
+                     getIRegT(i) );
+         }
+         /* Always do the writeback. */
+         putIRegT(rN,
+                  binop(Iop_Add32, mkexpr(oldRn),
+                                   mkU32(nRegs * 4)),
+                  IRTemp_INVALID);
+
+         /* Reinstate the ITSTATE update. */
+         put_ITSTATE(new_itstate);
+
+         DIP("stmia r%u!, {0x%04x}\n", rN, list);
+         goto decode_success;
+      }
+      break;
+   }
+
+   case BITS5(0,0,0,0,0):   /* LSLS */
+   case BITS5(0,0,0,0,1):   /* LSRS */
+   case BITS5(0,0,0,1,0): { /* ASRS */
+      /* ---------------- LSLS Rd, Rm, #imm5 ---------------- */
+      /* ---------------- LSRS Rd, Rm, #imm5 ---------------- */
+      /* ---------------- ASRS Rd, Rm, #imm5 ---------------- */
+      UInt   rD   = INSN0(2,0);
+      UInt   rM   = INSN0(5,3);
+      UInt   imm5 = INSN0(10,6);
+      IRTemp res  = newTemp(Ity_I32);
+      IRTemp resC = newTemp(Ity_I32);
+      IRTemp rMt  = newTemp(Ity_I32);
+      IRTemp oldV = newTemp(Ity_I32);
+      const HChar* wot  = "???";
+      assign(rMt, getIRegT(rM));
+      assign(oldV, mk_armg_calculate_flag_v());
+      /* Looks like INSN0(12,11) are the standard 'how' encoding.
+         Could compactify if the ROR case later appears. */
+      switch (INSN0(15,11)) {
+         case BITS5(0,0,0,0,0):
+            compute_result_and_C_after_LSL_by_imm5(
+               dis_buf, &res, &resC, rMt, imm5, rM
+            );
+            wot = "lsl";
+            break;
+         case BITS5(0,0,0,0,1):
+            compute_result_and_C_after_LSR_by_imm5(
+               dis_buf, &res, &resC, rMt, imm5, rM
+            );
+            wot = "lsr";
+            break;
+         case BITS5(0,0,0,1,0):
+            compute_result_and_C_after_ASR_by_imm5(
+               dis_buf, &res, &resC, rMt, imm5, rM
+            );
+            wot = "asr";
+            break;
+         default:
+            /*NOTREACHED*/vassert(0);
+      }
+      // not safe to read guest state after this point
+      putIRegT(rD, mkexpr(res), condT);
+      setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, resC, oldV,
+                         cond_AND_notInIT_T );
+      /* ignore buf and roll our own output */
+      DIP("%ss r%u, r%u, #%u\n", wot, rD, rM, imm5);
+      goto decode_success;
+   }
+
+   case BITS5(1,1,1,0,0): {
+      /* ---------------- B #simm11 ---------------- */
+      Int  simm11 = INSN0(10,0);
+           simm11 = (simm11 << 21) >> 20;
+      UInt dst    = simm11 + guest_R15_curr_instr_notENC + 4;
+      /* Only allowed outside or last-in IT block; SIGILL if not so. */
+      gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+      // and skip this insn if not selected; being cleverer is too
+      // difficult
+      mk_skip_over_T16_if_cond_is_false(condT);
+      condT = IRTemp_INVALID;
+      // now uncond
+      llPutIReg(15, mkU32( dst | 1 /*CPSR.T*/ ));
+      dres.jk_StopHere = Ijk_Boring;
+      dres.whatNext    = Dis_StopHere;
+      DIP("b 0x%x\n", dst);
+      goto decode_success;
+   }
+
+   default:
+      break; /* examine the next shortest prefix */
+
+   }
+
+
+   /* ================ 16-bit 15:12 cases ================ */
+
+   switch (INSN0(15,12)) {
+
+   case BITS4(1,1,0,1): {
+      /* ---------------- Bcond #simm8 ---------------- */
+      UInt cond  = INSN0(11,8);
+      Int  simm8 = INSN0(7,0);
+           simm8 = (simm8 << 24) >> 23;
+      UInt dst   = simm8 + guest_R15_curr_instr_notENC + 4;
+      if (cond != ARMCondAL && cond != ARMCondNV) {
+         /* Not allowed in an IT block; SIGILL if so. */
+         gen_SIGILL_T_if_in_ITBlock(old_itstate, new_itstate);
+
+         IRTemp kondT = newTemp(Ity_I32);
+         assign( kondT, mk_armg_calculate_condition(cond) );
+         stmt( IRStmt_Exit( unop(Iop_32to1, mkexpr(kondT)),
+                            Ijk_Boring,
+                            IRConst_U32(dst | 1/*CPSR.T*/),
+                            OFFB_R15T ));
+         llPutIReg(15, mkU32( (guest_R15_curr_instr_notENC + 2) 
+                              | 1 /*CPSR.T*/ ));
+         dres.jk_StopHere = Ijk_Boring;
+         dres.whatNext    = Dis_StopHere;
+         DIP("b%s 0x%x\n", nCC(cond), dst);
+         goto decode_success;
+      }
+      break;
+   }
+
+   default:
+      break; /* hmm, nothing matched */
+
+   }
+
+   /* ================ 16-bit misc cases ================ */
+
+   switch (INSN0(15,0)) {
+      case 0xBF00:
+         /* ------ NOP ------ */
+         DIP("nop\n");
+         goto decode_success;
+      case 0xBF20:
+         /* ------ WFE ------ */
+         /* WFE gets used as a spin-loop hint.  Do the usual thing,
+            which is to continue after yielding. */
+         stmt( IRStmt_Exit( unop(Iop_32to1, mkexpr(condT)),
+                            Ijk_Yield,
+                            IRConst_U32((guest_R15_curr_instr_notENC + 2) 
+                                        | 1 /*CPSR.T*/),
+                            OFFB_R15T ));
+         DIP("wfe\n");
+         goto decode_success;
+      case 0xBF40:
+         /* ------ SEV ------ */
+         /* Treat this as a no-op.  Any matching WFEs won't really
+            cause the host CPU to snooze; they just cause V to try to
+            run some other thread for a while.  So there's no point in
+            really doing anything for SEV. */
+         DIP("sev\n");
+         goto decode_success;
+      default:
+         break; /* fall through */
+   }
+
+   /* ----------------------------------------------------------- */
+   /* --                                                       -- */
+   /* -- Thumb 32-bit integer instructions                     -- */
+   /* --                                                       -- */
+   /* ----------------------------------------------------------- */
+
+#  define INSN1(_bMax,_bMin)  SLICE_UInt(((UInt)insn1), (_bMax), (_bMin))
+
+   /* second 16 bits of the instruction, if any */
+   vassert(insn1 == 0);
+   insn1 = getUShortLittleEndianly( guest_instr+2 );
+
+   anOp   = Iop_INVALID; /* paranoia */
+   anOpNm = NULL;        /* paranoia */
+
+   /* Change result defaults to suit 32-bit insns. */
+   vassert(dres.whatNext   == Dis_Continue);
+   vassert(dres.len        == 2);
+   vassert(dres.continueAt == 0);
+   dres.len = 4;
+
+   /* ---------------- BL/BLX simm26 ---------------- */
+   if (BITS5(1,1,1,1,0) == INSN0(15,11) && BITS2(1,1) == INSN1(15,14)) {
+      UInt isBL = INSN1(12,12);
+      UInt bS   = INSN0(10,10);
+      UInt bJ1  = INSN1(13,13);
+      UInt bJ2  = INSN1(11,11);
+      UInt bI1  = 1 ^ (bJ1 ^ bS);
+      UInt bI2  = 1 ^ (bJ2 ^ bS);
+      Int simm25
+         =   (bS          << (1 + 1 + 10 + 11 + 1))
+           | (bI1         << (1 + 10 + 11 + 1))
+           | (bI2         << (10 + 11 + 1))
+           | (INSN0(9,0)  << (11 + 1))
+           | (INSN1(10,0) << 1);
+      simm25 = (simm25 << 7) >> 7;
+
+      vassert(0 == (guest_R15_curr_instr_notENC & 1));
+      UInt dst = simm25 + guest_R15_curr_instr_notENC + 4;
+
+      /* One further validity case to check: in the case of BLX
+         (not-BL), that insn1[0] must be zero. */
+      Bool valid = True;
+      if (isBL == 0 && INSN1(0,0) == 1) valid = False;
+      if (valid) {
+         /* Only allowed outside or last-in IT block; SIGILL if not so. */
+         gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+         // and skip this insn if not selected; being cleverer is too
+         // difficult
+         mk_skip_over_T32_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         // now uncond
+
+         /* We're returning to Thumb code, hence "| 1" */
+         putIRegT( 14, mkU32( (guest_R15_curr_instr_notENC + 4) | 1 ),
+                   IRTemp_INVALID);
+         if (isBL) {
+            /* BL: unconditional T -> T call */
+            /* we're calling Thumb code, hence "| 1" */
+            llPutIReg(15, mkU32( dst | 1 ));
+            DIP("bl 0x%x (stay in Thumb mode)\n", dst);
+         } else {
+            /* BLX: unconditional T -> A call */
+            /* we're calling ARM code, hence "& 3" to align to a
+               valid ARM insn address */
+            llPutIReg(15, mkU32( dst & ~3 ));
+            DIP("blx 0x%x (switch to ARM mode)\n", dst & ~3);
+         }
+         dres.whatNext    = Dis_StopHere;
+         dres.jk_StopHere = Ijk_Call;
+         goto decode_success;
+      }
+   }
+
+   /* ---------------- {LD,ST}M{IA,DB} ---------------- */
+   if (0x3a2 == INSN0(15,6) // {LD,ST}MIA
+       || 0x3a4 == INSN0(15,6)) { // {LD,ST}MDB
+      UInt bW      = INSN0(5,5); /* writeback Rn ? */
+      UInt bL      = INSN0(4,4);
+      UInt rN      = INSN0(3,0);
+      UInt bP      = INSN1(15,15); /* reglist entry for r15 */
+      UInt bM      = INSN1(14,14); /* reglist entry for r14 */
+      UInt rLmost  = INSN1(12,0);  /* reglist entry for r0 .. 12 */
+      UInt rL13    = INSN1(13,13); /* must be zero */
+      UInt regList = 0;
+      Bool valid   = True;
+
+      UInt bINC    = 1;
+      UInt bBEFORE = 0;
+      if (INSN0(15,6) == 0x3a4) {
+         bINC    = 0;
+         bBEFORE = 1;
+      }
+
+      /* detect statically invalid cases, and construct the final
+         reglist */
+      if (rL13 == 1)
+         valid = False;
+
+      if (bL == 1) {
+         regList = (bP << 15) | (bM << 14) | rLmost;
+         if (rN == 15)                       valid = False;
+         if (popcount32(regList) < 2)        valid = False;
+         if (bP == 1 && bM == 1)             valid = False;
+         if (bW == 1 && (regList & (1<<rN))) valid = False;
+      } else {
+         regList = (bM << 14) | rLmost;
+         if (bP == 1)                        valid = False;
+         if (rN == 15)                       valid = False;
+         if (popcount32(regList) < 2)        valid = False;
+         if (bW == 1 && (regList & (1<<rN))) valid = False;
+      }
+
+      if (valid) {
+         if (bL == 1 && bP == 1) {
+            // We'll be writing the PC.  Hence:
+            /* Only allowed outside or last-in IT block; SIGILL if not so. */
+            gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+         }
+
+         /* Go uncond: */
+         mk_skip_over_T32_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         // now uncond
+
+         /* Generate the IR.  This might generate a write to R15. */
+         mk_ldm_stm(False/*!arm*/, rN, bINC, bBEFORE, bW, bL, regList);
+
+         if (bL == 1 && (regList & (1<<15))) {
+            // If we wrote to R15, we have an interworking return to
+            // deal with.
+            llPutIReg(15, llGetIReg(15));
+            dres.jk_StopHere = Ijk_Ret;
+            dres.whatNext    = Dis_StopHere;
+         }
+
+         DIP("%sm%c%c r%u%s, {0x%04x}\n",
+              bL == 1 ? "ld" : "st", bINC ? 'i' : 'd', bBEFORE ? 'b' : 'a',
+              rN, bW ? "!" : "", regList);
+
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T3) ADD{S}.W Rd, Rn, #constT -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && INSN0(9,5) == BITS5(0,1,0,0,0)
+       && INSN1(15,15) == 0) {
+      UInt bS = INSN0(4,4);
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      Bool valid = !isBadRegT(rN) && !isBadRegT(rD);
+      /* but allow "add.w reg, sp, #constT" for reg != PC */ 
+      if (!valid && rD <= 14 && rN == 13)
+         valid = True;
+      if (valid) {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp res   = newTemp(Ity_I32);
+         UInt   imm32 = thumbExpandImm_from_I0_I1(NULL, insn0, insn1);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(imm32));
+         assign(res,  binop(Iop_Add32, mkexpr(argL), mkexpr(argR)));
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS == 1)
+            setFlags_D1_D2( ARMG_CC_OP_ADD, argL, argR, condT );
+         DIP("add%s.w r%u, r%u, #%u\n",
+             bS == 1 ? "s" : "", rD, rN, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* ---------------- (T4) ADDW Rd, Rn, #uimm12 -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && INSN0(9,4) == BITS6(1,0,0,0,0,0)
+       && INSN1(15,15) == 0) {
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      Bool valid = !isBadRegT(rN) && !isBadRegT(rD);
+      /* but allow "addw reg, sp, #uimm12" for reg != PC */
+      if (!valid && rD <= 14 && rN == 13)
+         valid = True;
+      if (valid) {
+         IRTemp argL = newTemp(Ity_I32);
+         IRTemp argR = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         UInt imm12  = (INSN0(10,10) << 11) | (INSN1(14,12) << 8) | INSN1(7,0);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(imm12));
+         assign(res,  binop(Iop_Add32, mkexpr(argL), mkexpr(argR)));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("addw r%u, r%u, #%u\n", rD, rN, imm12);
+         goto decode_success;
+      }
+   }
+
+   /* ---------------- (T2) CMP.W Rn, #constT ---------------- */
+   /* ---------------- (T2) CMN.W Rn, #constT ---------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && (   INSN0(9,4) == BITS6(0,1,1,0,1,1)  // CMP
+           || INSN0(9,4) == BITS6(0,1,0,0,0,1)) // CMN
+       && INSN1(15,15) == 0
+       && INSN1(11,8) == BITS4(1,1,1,1)) {
+      UInt rN = INSN0(3,0);
+      if (rN != 15) {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         Bool   isCMN = INSN0(9,4) == BITS6(0,1,0,0,0,1);
+         UInt   imm32 = thumbExpandImm_from_I0_I1(NULL, insn0, insn1);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(imm32));
+         setFlags_D1_D2( isCMN ? ARMG_CC_OP_ADD : ARMG_CC_OP_SUB,
+                         argL, argR, condT );
+         DIP("%s.w r%u, #%u\n", isCMN ? "cmn" : "cmp", rN, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) TST.W Rn, #constT -------------- */
+   /* -------------- (T1) TEQ.W Rn, #constT -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && (   INSN0(9,4) == BITS6(0,0,0,0,0,1)  // TST
+           || INSN0(9,4) == BITS6(0,0,1,0,0,1)) // TEQ
+       && INSN1(15,15) == 0
+       && INSN1(11,8) == BITS4(1,1,1,1)) {
+      UInt rN = INSN0(3,0);
+      if (!isBadRegT(rN)) { // yes, really, it's inconsistent with CMP.W
+         Bool  isTST  = INSN0(9,4) == BITS6(0,0,0,0,0,1);
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp res   = newTemp(Ity_I32);
+         IRTemp oldV  = newTemp(Ity_I32);
+         IRTemp oldC  = newTemp(Ity_I32);
+         Bool   updC  = False;
+         UInt   imm32 = thumbExpandImm_from_I0_I1(&updC, insn0, insn1);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(imm32));
+         assign(res,  binop(isTST ? Iop_And32 : Iop_Xor32,
+                            mkexpr(argL), mkexpr(argR)));
+         assign( oldV, mk_armg_calculate_flag_v() );
+         assign( oldC, updC 
+                       ? mkU32((imm32 >> 31) & 1)
+                       : mk_armg_calculate_flag_c() );
+         setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV, condT );
+         DIP("%s.w r%u, #%u\n", isTST ? "tst" : "teq", rN, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T3) SUB{S}.W Rd, Rn, #constT -------------- */
+   /* -------------- (T3) RSB{S}.W Rd, Rn, #constT -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && (INSN0(9,5) == BITS5(0,1,1,0,1) // SUB
+           || INSN0(9,5) == BITS5(0,1,1,1,0)) // RSB
+       && INSN1(15,15) == 0) {
+      Bool isRSB = INSN0(9,5) == BITS5(0,1,1,1,0);
+      UInt bS    = INSN0(4,4);
+      UInt rN    = INSN0(3,0);
+      UInt rD    = INSN1(11,8);
+      Bool valid = !isBadRegT(rN) && !isBadRegT(rD);
+      /* but allow "sub{s}.w reg, sp, #constT 
+         this is (T2) of "SUB (SP minus immediate)" */
+      if (!valid && !isRSB && rN == 13 && rD != 15)
+         valid = True;
+      if (valid) {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp res   = newTemp(Ity_I32);
+         UInt   imm32 = thumbExpandImm_from_I0_I1(NULL, insn0, insn1);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(imm32));
+         assign(res,  isRSB
+                      ? binop(Iop_Sub32, mkexpr(argR), mkexpr(argL))
+                      : binop(Iop_Sub32, mkexpr(argL), mkexpr(argR)));
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS == 1) {
+            if (isRSB)
+               setFlags_D1_D2( ARMG_CC_OP_SUB, argR, argL, condT );
+            else
+               setFlags_D1_D2( ARMG_CC_OP_SUB, argL, argR, condT );
+         }
+         DIP("%s%s.w r%u, r%u, #%u\n",
+             isRSB ? "rsb" : "sub", bS == 1 ? "s" : "", rD, rN, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T4) SUBW Rd, Rn, #uimm12 ------------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && INSN0(9,4) == BITS6(1,0,1,0,1,0)
+       && INSN1(15,15) == 0) {
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      Bool valid = !isBadRegT(rN) && !isBadRegT(rD);
+      /* but allow "subw sp, sp, #uimm12" */
+      if (!valid && rD == 13 && rN == 13)
+         valid = True;
+      if (valid) {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp res   = newTemp(Ity_I32);
+         UInt imm12   = (INSN0(10,10) << 11) | (INSN1(14,12) << 8) | INSN1(7,0);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(imm12));
+         assign(res,  binop(Iop_Sub32, mkexpr(argL), mkexpr(argR)));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("subw r%u, r%u, #%u\n", rD, rN, imm12);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) ADC{S}.W Rd, Rn, #constT -------------- */
+   /* -------------- (T1) SBC{S}.W Rd, Rn, #constT -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && (   INSN0(9,5) == BITS5(0,1,0,1,0)  // ADC
+           || INSN0(9,5) == BITS5(0,1,0,1,1)) // SBC
+       && INSN1(15,15) == 0) {
+      /* ADC:  Rd = Rn + constT + oldC */
+      /* SBC:  Rd = Rn - constT - (oldC ^ 1) */
+      UInt bS    = INSN0(4,4);
+      UInt rN    = INSN0(3,0);
+      UInt rD    = INSN1(11,8);
+      if (!isBadRegT(rN) && !isBadRegT(rD)) {
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp res   = newTemp(Ity_I32);
+         IRTemp oldC  = newTemp(Ity_I32);
+         UInt   imm32 = thumbExpandImm_from_I0_I1(NULL, insn0, insn1);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(imm32));
+         assign(oldC, mk_armg_calculate_flag_c() );
+         const HChar* nm  = "???";
+         switch (INSN0(9,5)) {
+            case BITS5(0,1,0,1,0): // ADC
+               nm = "adc";
+               assign(res,
+                      binop(Iop_Add32,
+                            binop(Iop_Add32, mkexpr(argL), mkexpr(argR)),
+                            mkexpr(oldC) ));
+               putIRegT(rD, mkexpr(res), condT);
+               if (bS)
+                  setFlags_D1_D2_ND( ARMG_CC_OP_ADC,
+                                     argL, argR, oldC, condT );
+               break;
+            case BITS5(0,1,0,1,1): // SBC
+               nm = "sbc";
+               assign(res,
+                      binop(Iop_Sub32,
+                            binop(Iop_Sub32, mkexpr(argL), mkexpr(argR)),
+                            binop(Iop_Xor32, mkexpr(oldC), mkU32(1)) ));
+               putIRegT(rD, mkexpr(res), condT);
+               if (bS)
+                  setFlags_D1_D2_ND( ARMG_CC_OP_SBB,
+                                     argL, argR, oldC, condT );
+               break;
+            default:
+              vassert(0);
+         }
+         DIP("%s%s.w r%u, r%u, #%u\n",
+             nm, bS == 1 ? "s" : "", rD, rN, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) ORR{S}.W Rd, Rn, #constT -------------- */
+   /* -------------- (T1) AND{S}.W Rd, Rn, #constT -------------- */
+   /* -------------- (T1) BIC{S}.W Rd, Rn, #constT -------------- */
+   /* -------------- (T1) EOR{S}.W Rd, Rn, #constT -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && (   INSN0(9,5) == BITS5(0,0,0,1,0)  // ORR
+           || INSN0(9,5) == BITS5(0,0,0,0,0)  // AND
+           || INSN0(9,5) == BITS5(0,0,0,0,1)  // BIC
+           || INSN0(9,5) == BITS5(0,0,1,0,0)  // EOR
+           || INSN0(9,5) == BITS5(0,0,0,1,1)) // ORN
+       && INSN1(15,15) == 0) {
+      UInt bS = INSN0(4,4);
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      if (!isBadRegT(rN) && !isBadRegT(rD)) {
+         Bool   notArgR = False;
+         IROp   op      = Iop_INVALID;
+         const HChar* nm = "???";
+         switch (INSN0(9,5)) {
+            case BITS5(0,0,0,1,0): op = Iop_Or32;  nm = "orr"; break;
+            case BITS5(0,0,0,0,0): op = Iop_And32; nm = "and"; break;
+            case BITS5(0,0,0,0,1): op = Iop_And32; nm = "bic";
+                                   notArgR = True; break;
+            case BITS5(0,0,1,0,0): op = Iop_Xor32; nm = "eor"; break;
+            case BITS5(0,0,0,1,1): op = Iop_Or32;  nm = "orn";
+                                   notArgR = True; break;
+            default: vassert(0);
+         }
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp res   = newTemp(Ity_I32);
+         Bool   updC  = False;
+         UInt   imm32 = thumbExpandImm_from_I0_I1(&updC, insn0, insn1);
+         assign(argL, getIRegT(rN));
+         assign(argR, mkU32(notArgR ? ~imm32 : imm32));
+         assign(res,  binop(op, mkexpr(argL), mkexpr(argR)));
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS) {
+            IRTemp oldV = newTemp(Ity_I32);
+            IRTemp oldC = newTemp(Ity_I32);
+            assign( oldV, mk_armg_calculate_flag_v() );
+            assign( oldC, updC 
+                          ? mkU32((imm32 >> 31) & 1)
+                          : mk_armg_calculate_flag_c() );
+            setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                               condT );
+         }
+         DIP("%s%s.w r%u, r%u, #%u\n",
+             nm, bS == 1 ? "s" : "", rD, rN, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* ---------- (T3) ADD{S}.W Rd, Rn, Rm, {shift} ---------- */
+   /* ---------- (T3) SUB{S}.W Rd, Rn, Rm, {shift} ---------- */
+   /* ---------- (T3) RSB{S}.W Rd, Rn, Rm, {shift} ---------- */
+   if (INSN0(15,9) == BITS7(1,1,1,0,1,0,1)
+       && (   INSN0(8,5) == BITS4(1,0,0,0)  // add subopc
+           || INSN0(8,5) == BITS4(1,1,0,1)  // sub subopc
+           || INSN0(8,5) == BITS4(1,1,1,0)) // rsb subopc
+       && INSN1(15,15) == 0) {
+      UInt rN   = INSN0(3,0);
+      UInt rD   = INSN1(11,8);
+      UInt rM   = INSN1(3,0);
+      UInt bS   = INSN0(4,4);
+      UInt imm5 = (INSN1(14,12) << 2) | INSN1(7,6);
+      UInt how  = INSN1(5,4);
+
+      Bool valid = !isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM);
+      /* but allow "add.w reg, sp, reg, lsl #N for N=0,1,2 or 3
+         (T3) "ADD (SP plus register) */
+      if (!valid && INSN0(8,5) == BITS4(1,0,0,0) // add
+          && rD != 15 && rN == 13 && imm5 <= 3 && how == 0) {
+         valid = True;
+      }
+      /* also allow "sub.w reg, sp, reg   w/ no shift
+         (T1) "SUB (SP minus register) */
+      if (!valid && INSN0(8,5) == BITS4(1,1,0,1) // sub
+          && rD != 15 && rN == 13 && imm5 == 0 && how == 0) {
+         valid = True;
+      }
+      if (valid) {
+         Bool   swap = False;
+         IROp   op   = Iop_INVALID;
+         const HChar* nm = "???";
+         switch (INSN0(8,5)) {
+            case BITS4(1,0,0,0): op = Iop_Add32; nm = "add"; break;
+            case BITS4(1,1,0,1): op = Iop_Sub32; nm = "sub"; break;
+            case BITS4(1,1,1,0): op = Iop_Sub32; nm = "rsb"; 
+                                 swap = True; break;
+            default: vassert(0);
+         }
+
+         IRTemp argL = newTemp(Ity_I32);
+         assign(argL, getIRegT(rN));
+
+         IRTemp rMt = newTemp(Ity_I32);
+         assign(rMt, getIRegT(rM));
+
+         IRTemp argR = newTemp(Ity_I32);
+         compute_result_and_C_after_shift_by_imm5(
+            dis_buf, &argR, NULL, rMt, how, imm5, rM
+         );
+
+         IRTemp res = newTemp(Ity_I32);
+         assign(res, swap 
+                     ? binop(op, mkexpr(argR), mkexpr(argL))
+                     : binop(op, mkexpr(argL), mkexpr(argR)));
+
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS) {
+            switch (op) {
+               case Iop_Add32:
+                  setFlags_D1_D2( ARMG_CC_OP_ADD, argL, argR, condT );
+                  break;
+               case Iop_Sub32:
+                  if (swap)
+                     setFlags_D1_D2( ARMG_CC_OP_SUB, argR, argL, condT );
+                  else
+                     setFlags_D1_D2( ARMG_CC_OP_SUB, argL, argR, condT );
+                  break;
+               default:
+                  vassert(0);
+            }
+         }
+
+         DIP("%s%s.w r%u, r%u, %s\n",
+             nm, bS ? "s" : "", rD, rN, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* ---------- (T3) ADC{S}.W Rd, Rn, Rm, {shift} ---------- */
+   /* ---------- (T2) SBC{S}.W Rd, Rn, Rm, {shift} ---------- */
+   if (INSN0(15,9) == BITS7(1,1,1,0,1,0,1)
+       && (   INSN0(8,5) == BITS4(1,0,1,0)   // adc subopc
+           || INSN0(8,5) == BITS4(1,0,1,1))  // sbc subopc
+       && INSN1(15,15) == 0) {
+      /* ADC:  Rd = Rn + shifter_operand + oldC */
+      /* SBC:  Rd = Rn - shifter_operand - (oldC ^ 1) */
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         UInt bS   = INSN0(4,4);
+         UInt imm5 = (INSN1(14,12) << 2) | INSN1(7,6);
+         UInt how  = INSN1(5,4);
+
+         IRTemp argL = newTemp(Ity_I32);
+         assign(argL, getIRegT(rN));
+
+         IRTemp rMt = newTemp(Ity_I32);
+         assign(rMt, getIRegT(rM));
+
+         IRTemp oldC = newTemp(Ity_I32);
+         assign(oldC, mk_armg_calculate_flag_c());
+
+         IRTemp argR = newTemp(Ity_I32);
+         compute_result_and_C_after_shift_by_imm5(
+            dis_buf, &argR, NULL, rMt, how, imm5, rM
+         );
+
+         const HChar* nm  = "???";
+         IRTemp res = newTemp(Ity_I32);
+         switch (INSN0(8,5)) {
+            case BITS4(1,0,1,0): // ADC
+               nm = "adc";
+               assign(res,
+                      binop(Iop_Add32,
+                            binop(Iop_Add32, mkexpr(argL), mkexpr(argR)),
+                            mkexpr(oldC) ));
+               putIRegT(rD, mkexpr(res), condT);
+               if (bS)
+                  setFlags_D1_D2_ND( ARMG_CC_OP_ADC,
+                                     argL, argR, oldC, condT );
+               break;
+            case BITS4(1,0,1,1): // SBC
+               nm = "sbc";
+               assign(res,
+                      binop(Iop_Sub32,
+                            binop(Iop_Sub32, mkexpr(argL), mkexpr(argR)),
+                            binop(Iop_Xor32, mkexpr(oldC), mkU32(1)) ));
+               putIRegT(rD, mkexpr(res), condT);
+               if (bS)
+                  setFlags_D1_D2_ND( ARMG_CC_OP_SBB,
+                                     argL, argR, oldC, condT );
+               break;
+            default:
+               vassert(0);
+         }
+
+         DIP("%s%s.w r%u, r%u, %s\n",
+             nm, bS ? "s" : "", rD, rN, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* ---------- (T3) AND{S}.W Rd, Rn, Rm, {shift} ---------- */
+   /* ---------- (T3) ORR{S}.W Rd, Rn, Rm, {shift} ---------- */
+   /* ---------- (T3) EOR{S}.W Rd, Rn, Rm, {shift} ---------- */
+   /* ---------- (T3) BIC{S}.W Rd, Rn, Rm, {shift} ---------- */
+   /* ---------- (T1) ORN{S}.W Rd, Rn, Rm, {shift} ---------- */
+   if (INSN0(15,9) == BITS7(1,1,1,0,1,0,1)
+       && (   INSN0(8,5) == BITS4(0,0,0,0)  // and subopc
+           || INSN0(8,5) == BITS4(0,0,1,0)  // orr subopc
+           || INSN0(8,5) == BITS4(0,1,0,0)  // eor subopc
+           || INSN0(8,5) == BITS4(0,0,0,1)  // bic subopc
+           || INSN0(8,5) == BITS4(0,0,1,1)) // orn subopc
+       && INSN1(15,15) == 0) {
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         Bool notArgR = False;
+         IROp op      = Iop_INVALID;
+         const HChar* nm  = "???";
+         switch (INSN0(8,5)) {
+            case BITS4(0,0,0,0): op = Iop_And32; nm = "and"; break;
+            case BITS4(0,0,1,0): op = Iop_Or32;  nm = "orr"; break;
+            case BITS4(0,1,0,0): op = Iop_Xor32; nm = "eor"; break;
+            case BITS4(0,0,0,1): op = Iop_And32; nm = "bic";
+                                 notArgR = True; break;
+            case BITS4(0,0,1,1): op = Iop_Or32; nm = "orn";
+                                 notArgR = True; break;
+            default: vassert(0);
+         }
+         UInt bS   = INSN0(4,4);
+         UInt imm5 = (INSN1(14,12) << 2) | INSN1(7,6);
+         UInt how  = INSN1(5,4);
+
+         IRTemp rNt = newTemp(Ity_I32);
+         assign(rNt, getIRegT(rN));
+
+         IRTemp rMt = newTemp(Ity_I32);
+         assign(rMt, getIRegT(rM));
+
+         IRTemp argR = newTemp(Ity_I32);
+         IRTemp oldC = bS ? newTemp(Ity_I32) : IRTemp_INVALID;
+
+         compute_result_and_C_after_shift_by_imm5(
+            dis_buf, &argR, bS ? &oldC : NULL, rMt, how, imm5, rM
+         );
+
+         IRTemp res = newTemp(Ity_I32);
+         if (notArgR) {
+            vassert(op == Iop_And32 || op == Iop_Or32);
+            assign(res, binop(op, mkexpr(rNt),
+                                  unop(Iop_Not32, mkexpr(argR))));
+         } else {
+            assign(res, binop(op, mkexpr(rNt), mkexpr(argR)));
+         }
+
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS) {
+            IRTemp oldV = newTemp(Ity_I32);
+            assign( oldV, mk_armg_calculate_flag_v() );
+            setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                               condT );
+         }
+
+         DIP("%s%s.w r%u, r%u, %s\n",
+             nm, bS ? "s" : "", rD, rN, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T?) LSL{S}.W Rd, Rn, Rm -------------- */
+   /* -------------- (T?) LSR{S}.W Rd, Rn, Rm -------------- */
+   /* -------------- (T?) ASR{S}.W Rd, Rn, Rm -------------- */
+   /* -------------- (T?) ROR{S}.W Rd, Rn, Rm -------------- */
+   if (INSN0(15,7) == BITS9(1,1,1,1,1,0,1,0,0)
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,4) == BITS4(0,0,0,0)) {
+      UInt how = INSN0(6,5); // standard encoding
+      UInt rN  = INSN0(3,0);
+      UInt rD  = INSN1(11,8);
+      UInt rM  = INSN1(3,0);
+      UInt bS  = INSN0(4,4);
+      Bool valid = !isBadRegT(rN) && !isBadRegT(rM) && !isBadRegT(rD);
+      if (valid) {
+         IRTemp rNt    = newTemp(Ity_I32);
+         IRTemp rMt    = newTemp(Ity_I32);
+         IRTemp res    = newTemp(Ity_I32);
+         IRTemp oldC   = bS ? newTemp(Ity_I32) : IRTemp_INVALID;
+         IRTemp oldV   = bS ? newTemp(Ity_I32) : IRTemp_INVALID;
+         const HChar* nms[4] = { "lsl", "lsr", "asr", "ror" };
+         const HChar* nm     = nms[how];
+         assign(rNt, getIRegT(rN));
+         assign(rMt, getIRegT(rM));
+         compute_result_and_C_after_shift_by_reg(
+            dis_buf, &res, bS ? &oldC : NULL,
+            rNt, how, rMt, rN, rM
+         );
+         if (bS)
+            assign(oldV, mk_armg_calculate_flag_v());
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS) {
+            setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                               condT );
+         }
+         DIP("%s%s.w r%u, r%u, r%u\n",
+             nm, bS ? "s" : "", rD, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------ (T?) MOV{S}.W Rd, Rn, {shift} ------------ */
+   /* ------------ (T?) MVN{S}.W Rd, Rn, {shift} ------------ */
+   if ((INSN0(15,0) & 0xFFCF) == 0xEA4F
+       && INSN1(15,15) == 0) {
+      UInt rD = INSN1(11,8);
+      UInt rN = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN)) {
+         UInt bS    = INSN0(4,4);
+         UInt isMVN = INSN0(5,5);
+         UInt imm5  = (INSN1(14,12) << 2) | INSN1(7,6);
+         UInt how   = INSN1(5,4);
+
+         IRTemp rNt = newTemp(Ity_I32);
+         assign(rNt, getIRegT(rN));
+
+         IRTemp oldRn = newTemp(Ity_I32);
+         IRTemp oldC  = bS ? newTemp(Ity_I32) : IRTemp_INVALID;
+         compute_result_and_C_after_shift_by_imm5(
+            dis_buf, &oldRn, bS ? &oldC : NULL, rNt, how, imm5, rN
+         );
+
+         IRTemp res = newTemp(Ity_I32);
+         assign(res, isMVN ? unop(Iop_Not32, mkexpr(oldRn))
+                           : mkexpr(oldRn));
+
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS) {
+            IRTemp oldV = newTemp(Ity_I32);
+            assign( oldV, mk_armg_calculate_flag_v() );
+            setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV, condT);
+         }
+         DIP("%s%s.w r%u, %s\n",
+             isMVN ? "mvn" : "mov", bS ? "s" : "", rD, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T?) TST.W Rn, Rm, {shift} -------------- */
+   /* -------------- (T?) TEQ.W Rn, Rm, {shift} -------------- */
+   if (INSN0(15,9) == BITS7(1,1,1,0,1,0,1)
+       && (   INSN0(8,4) == BITS5(0,0,0,0,1)  // TST
+           || INSN0(8,4) == BITS5(0,1,0,0,1)) // TEQ
+       && INSN1(15,15) == 0
+       && INSN1(11,8) == BITS4(1,1,1,1)) {
+      UInt rN = INSN0(3,0);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rN) && !isBadRegT(rM)) {
+         Bool isTST = INSN0(8,4) == BITS5(0,0,0,0,1);
+
+         UInt how  = INSN1(5,4);
+         UInt imm5 = (INSN1(14,12) << 2) | INSN1(7,6);
+
+         IRTemp argL = newTemp(Ity_I32);
+         assign(argL, getIRegT(rN));
+
+         IRTemp rMt = newTemp(Ity_I32);
+         assign(rMt, getIRegT(rM));
+
+         IRTemp argR = newTemp(Ity_I32);
+         IRTemp oldC = newTemp(Ity_I32);
+         compute_result_and_C_after_shift_by_imm5(
+            dis_buf, &argR, &oldC, rMt, how, imm5, rM
+         );
+
+         IRTemp oldV = newTemp(Ity_I32);
+         assign( oldV, mk_armg_calculate_flag_v() );
+
+         IRTemp res = newTemp(Ity_I32);
+         assign(res, binop(isTST ? Iop_And32 : Iop_Xor32,
+                           mkexpr(argL), mkexpr(argR)));
+
+         setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                            condT );
+         DIP("%s.w r%u, %s\n", isTST ? "tst" : "teq", rN, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T3) CMP.W Rn, Rm, {shift} -------------- */
+   /* -------------- (T2) CMN.W Rn, Rm, {shift} -------------- */
+   if (INSN0(15,9) == BITS7(1,1,1,0,1,0,1)
+       && (   INSN0(8,4) == BITS5(1,1,0,1,1)  // CMP
+           || INSN0(8,4) == BITS5(1,0,0,0,1)) // CMN
+       && INSN1(15,15) == 0
+       && INSN1(11,8) == BITS4(1,1,1,1)) {
+      UInt rN = INSN0(3,0);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rN) && !isBadRegT(rM)) {
+         Bool isCMN = INSN0(8,4) == BITS5(1,0,0,0,1);
+         UInt how   = INSN1(5,4);
+         UInt imm5  = (INSN1(14,12) << 2) | INSN1(7,6);
+
+         IRTemp argL = newTemp(Ity_I32);
+         assign(argL, getIRegT(rN));
+
+         IRTemp rMt = newTemp(Ity_I32);
+         assign(rMt, getIRegT(rM));
+
+         IRTemp argR = newTemp(Ity_I32);
+         compute_result_and_C_after_shift_by_imm5(
+            dis_buf, &argR, NULL, rMt, how, imm5, rM
+         );
+
+         setFlags_D1_D2( isCMN ? ARMG_CC_OP_ADD : ARMG_CC_OP_SUB,
+                         argL, argR, condT );
+
+         DIP("%s.w r%u, %s\n", isCMN ? "cmn" : "cmp", rN, dis_buf);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T2) MOV{S}.W Rd, #constT -------------- */
+   /* -------------- (T2) MVN{S}.W Rd, #constT -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && (   INSN0(9,5) == BITS5(0,0,0,1,0)  // MOV
+           || INSN0(9,5) == BITS5(0,0,0,1,1)) // MVN
+       && INSN0(3,0) == BITS4(1,1,1,1)
+       && INSN1(15,15) == 0) {
+      UInt rD = INSN1(11,8);
+      if (!isBadRegT(rD)) {
+         Bool   updC  = False;
+         UInt   bS    = INSN0(4,4);
+         Bool   isMVN = INSN0(5,5) == 1;
+         UInt   imm32 = thumbExpandImm_from_I0_I1(&updC, insn0, insn1);
+         IRTemp res   = newTemp(Ity_I32);
+         assign(res, mkU32(isMVN ? ~imm32 : imm32));
+         putIRegT(rD, mkexpr(res), condT);
+         if (bS) {
+            IRTemp oldV = newTemp(Ity_I32);
+            IRTemp oldC = newTemp(Ity_I32);
+            assign( oldV, mk_armg_calculate_flag_v() );
+            assign( oldC, updC 
+                          ? mkU32((imm32 >> 31) & 1)
+                          : mk_armg_calculate_flag_c() );
+            setFlags_D1_D2_ND( ARMG_CC_OP_LOGIC, res, oldC, oldV,
+                               condT );
+         }
+         DIP("%s%s.w r%u, #%u\n",
+             isMVN ? "mvn" : "mov", bS ? "s" : "", rD, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T3) MOVW Rd, #imm16 -------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && INSN0(9,4) == BITS6(1,0,0,1,0,0)
+       && INSN1(15,15) == 0) {
+      UInt rD = INSN1(11,8);
+      if (!isBadRegT(rD)) {
+         UInt imm16 = (INSN0(3,0) << 12) | (INSN0(10,10) << 11)
+                      | (INSN1(14,12) << 8) | INSN1(7,0);
+         putIRegT(rD, mkU32(imm16), condT);
+         DIP("movw r%u, #%u\n", rD, imm16);
+         goto decode_success;
+      }
+   }
+
+   /* ---------------- MOVT Rd, #imm16 ---------------- */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && INSN0(9,4) == BITS6(1,0,1,1,0,0)
+       && INSN1(15,15) == 0) {
+      UInt rD = INSN1(11,8);
+      if (!isBadRegT(rD)) {
+         UInt imm16 = (INSN0(3,0) << 12) | (INSN0(10,10) << 11)
+                      | (INSN1(14,12) << 8) | INSN1(7,0);
+         IRTemp res = newTemp(Ity_I32);
+         assign(res,
+                binop(Iop_Or32,
+                      binop(Iop_And32, getIRegT(rD), mkU32(0xFFFF)),
+                      mkU32(imm16 << 16)));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("movt r%u, #%u\n", rD, imm16);
+         goto decode_success;
+      }
+   }
+
+   /* ---------------- LD/ST reg+/-#imm8 ---------------- */
+   /* Loads and stores of the form:
+         op  Rt, [Rn, #-imm8]      or
+         op  Rt, [Rn], #+/-imm8    or
+         op  Rt, [Rn, #+/-imm8]!  
+      where op is one of
+         ldrb ldrh ldr  ldrsb ldrsh
+         strb strh str
+   */
+   if (INSN0(15,9) == BITS7(1,1,1,1,1,0,0) && INSN1(11,11) == 1) {
+      Bool   valid  = True;
+      Bool   syned  = False;
+      Bool   isST   = False;
+      IRType ty     = Ity_I8;
+      const HChar* nm = "???";
+
+      switch (INSN0(8,4)) {
+         case BITS5(0,0,0,0,0):   // strb
+            nm = "strb"; isST = True; break;
+         case BITS5(0,0,0,0,1):   // ldrb
+            nm = "ldrb"; break;
+         case BITS5(1,0,0,0,1):   // ldrsb
+            nm = "ldrsb"; syned = True; break;
+         case BITS5(0,0,0,1,0):   // strh
+            nm = "strh"; ty = Ity_I16; isST = True; break;
+         case BITS5(0,0,0,1,1):   // ldrh
+            nm = "ldrh"; ty = Ity_I16; break;
+         case BITS5(1,0,0,1,1):   // ldrsh
+            nm = "ldrsh"; ty = Ity_I16; syned = True; break;
+         case BITS5(0,0,1,0,0):   // str
+            nm = "str"; ty = Ity_I32; isST = True; break;
+         case BITS5(0,0,1,0,1):
+            nm = "ldr"; ty = Ity_I32; break;  // ldr
+         default:
+            valid = False; break;
+      }
+
+      UInt rN      = INSN0(3,0);
+      UInt rT      = INSN1(15,12);
+      UInt bP      = INSN1(10,10);
+      UInt bU      = INSN1(9,9);
+      UInt bW      = INSN1(8,8);
+      UInt imm8    = INSN1(7,0);
+      Bool loadsPC = False;
+
+      if (valid) {
+         if (bP == 1 && bU == 1 && bW == 0)
+            valid = False;
+         if (bP == 0 && bW == 0)
+            valid = False;
+         if (rN == 15)
+            valid = False;
+         if (bW == 1 && rN == rT)
+            valid = False;
+         if (ty == Ity_I8 || ty == Ity_I16) {
+            if (isBadRegT(rT))
+               valid = False;
+         } else {
+            /* ty == Ity_I32 */
+            if (isST && rT == 15)
+               valid = False;
+            if (!isST && rT == 15)
+               loadsPC = True;
+         }
+      }
+
+      if (valid) {
+         // if it's a branch, it can't happen in the middle of an IT block
+         // Also, if it is a branch, make it unconditional at this point.
+         // Doing conditional branches in-line is too complex (for now)
+         if (loadsPC) {
+            gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+            // go uncond
+            mk_skip_over_T32_if_cond_is_false(condT);
+            condT = IRTemp_INVALID;
+            // now uncond
+         }
+
+         IRTemp preAddr = newTemp(Ity_I32);
+         assign(preAddr, getIRegT(rN));
+
+         IRTemp postAddr = newTemp(Ity_I32);
+         assign(postAddr, binop(bU == 1 ? Iop_Add32 : Iop_Sub32,
+                                mkexpr(preAddr), mkU32(imm8)));
+
+         IRTemp transAddr = bP == 1 ? postAddr : preAddr;
+
+         if (isST) {
+
+            /* Store.  If necessary, update the base register before
+               the store itself, so that the common idiom of "str rX,
+               [sp, #-4]!" (store rX at sp-4, then do new sp = sp-4,
+               a.k.a "push rX") doesn't cause Memcheck to complain
+               that the access is below the stack pointer.  Also, not
+               updating sp before the store confuses Valgrind's
+               dynamic stack-extending logic.  So do it before the
+               store.  Hence we need to snarf the store data before
+               doing the basereg update. */
+
+            /* get hold of the data to be stored */
+            IRTemp oldRt = newTemp(Ity_I32);
+            assign(oldRt, getIRegT(rT));
+
+            /* Update Rn if necessary. */
+            if (bW == 1) {
+               vassert(rN != rT); // assured by validity check above
+               putIRegT(rN, mkexpr(postAddr), condT);
+            }
+
+            /* generate the transfer */
+            IRExpr* data = NULL;
+            switch (ty) {
+               case Ity_I8:
+                  data = unop(Iop_32to8, mkexpr(oldRt));
+                  break;
+               case Ity_I16:
+                  data = unop(Iop_32to16, mkexpr(oldRt));
+                  break;
+               case Ity_I32:
+                  data = mkexpr(oldRt);
+                  break;
+               default:
+                  vassert(0);
+            }
+            storeGuardedLE(mkexpr(transAddr), data, condT);
+
+         } else {
+
+            /* Load. */
+            IRTemp llOldRt = newTemp(Ity_I32);
+            assign(llOldRt, llGetIReg(rT));
+
+            /* generate the transfer */
+            IRTemp    newRt = newTemp(Ity_I32);
+            IRLoadGOp widen = ILGop_INVALID;
+            switch (ty) {
+               case Ity_I8:
+                  widen = syned ? ILGop_8Sto32 : ILGop_8Uto32; break;
+               case Ity_I16:
+                  widen = syned ? ILGop_16Sto32 : ILGop_16Uto32; break;
+               case Ity_I32:
+                  widen = ILGop_Ident32; break;
+               default:
+                  vassert(0);
+            }
+            loadGuardedLE(newRt, widen,
+                          mkexpr(transAddr), mkexpr(llOldRt), condT);
+            if (rT == 15) {
+               vassert(loadsPC);
+               /* We'll do the write to the PC just below */
+            } else {
+               vassert(!loadsPC);
+               /* IRTemp_INVALID is OK here because in the case where
+                  condT is false at run time, we're just putting the
+                  old rT value back. */
+               putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+            }
+
+            /* Update Rn if necessary. */
+            if (bW == 1) {
+               vassert(rN != rT); // assured by validity check above
+               putIRegT(rN, mkexpr(postAddr), condT);
+            }
+
+            if (loadsPC) {
+               /* Presumably this is an interworking branch. */
+               vassert(rN != 15); // assured by validity check above
+               vassert(rT == 15);
+               vassert(condT == IRTemp_INVALID); /* due to check above */
+               llPutIReg(15, mkexpr(newRt));
+               dres.jk_StopHere = Ijk_Boring;  /* or _Ret ? */
+               dres.whatNext    = Dis_StopHere;
+            }
+         }
+
+         if (bP == 1 && bW == 0) {
+            DIP("%s.w r%u, [r%u, #%c%u]\n",
+                nm, rT, rN, bU ? '+' : '-', imm8);
+         }
+         else if (bP == 1 && bW == 1) {
+            DIP("%s.w r%u, [r%u, #%c%u]!\n",
+                nm, rT, rN, bU ? '+' : '-', imm8);
+         }
+         else {
+            vassert(bP == 0 && bW == 1);
+            DIP("%s.w r%u, [r%u], #%c%u\n",
+                nm, rT, rN, bU ? '+' : '-', imm8);
+         }
+
+         goto decode_success;
+      }
+   }
+
+   /* ------------- LD/ST reg+(reg<<imm2) ------------- */
+   /* Loads and stores of the form:
+         op  Rt, [Rn, Rm, LSL #imm8]
+      where op is one of
+         ldrb ldrh ldr  ldrsb ldrsh
+         strb strh str
+   */
+   if (INSN0(15,9) == BITS7(1,1,1,1,1,0,0)
+       && INSN1(11,6) == BITS6(0,0,0,0,0,0)) {
+      Bool   valid  = True;
+      Bool   syned  = False;
+      Bool   isST   = False;
+      IRType ty     = Ity_I8;
+      const HChar* nm = "???";
+
+      switch (INSN0(8,4)) {
+         case BITS5(0,0,0,0,0):   // strb
+            nm = "strb"; isST = True; break;
+         case BITS5(0,0,0,0,1):   // ldrb
+            nm = "ldrb"; break;
+         case BITS5(1,0,0,0,1):   // ldrsb
+            nm = "ldrsb"; syned = True; break;
+         case BITS5(0,0,0,1,0):   // strh
+            nm = "strh"; ty = Ity_I16; isST = True; break;
+         case BITS5(0,0,0,1,1):   // ldrh
+            nm = "ldrh"; ty = Ity_I16; break;
+         case BITS5(1,0,0,1,1):   // ldrsh
+            nm = "ldrsh"; ty = Ity_I16; syned = True; break;
+         case BITS5(0,0,1,0,0):   // str
+            nm = "str"; ty = Ity_I32; isST = True; break;
+         case BITS5(0,0,1,0,1):
+            nm = "ldr"; ty = Ity_I32; break;  // ldr
+         default:
+            valid = False; break;
+      }
+
+      UInt rN      = INSN0(3,0);
+      UInt rM      = INSN1(3,0);
+      UInt rT      = INSN1(15,12);
+      UInt imm2    = INSN1(5,4);
+      Bool loadsPC = False;
+
+      if (ty == Ity_I8 || ty == Ity_I16) {
+         /* all 8- and 16-bit load and store cases have the
+            same exclusion set. */
+         if (rN == 15 || isBadRegT(rT) || isBadRegT(rM))
+            valid = False;
+      } else {
+         vassert(ty == Ity_I32);
+         if (rN == 15 || isBadRegT(rM))
+            valid = False;
+         if (isST && rT == 15)
+            valid = False;
+         /* If it is a load and rT is 15, that's only allowable if we
+            not in an IT block, or are the last in it.  Need to insert
+            a dynamic check for that. */
+         if (!isST && rT == 15)
+            loadsPC = True;
+      }
+
+      if (valid) {
+         // if it's a branch, it can't happen in the middle of an IT block
+         // Also, if it is a branch, make it unconditional at this point.
+         // Doing conditional branches in-line is too complex (for now)
+         if (loadsPC) {
+            gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+            // go uncond
+            mk_skip_over_T32_if_cond_is_false(condT);
+            condT = IRTemp_INVALID;
+            // now uncond
+         }
+
+         IRTemp transAddr = newTemp(Ity_I32);
+         assign(transAddr,
+                binop( Iop_Add32,
+                       getIRegT(rN),
+                       binop(Iop_Shl32, getIRegT(rM), mkU8(imm2)) ));
+
+         if (isST) {
+
+            /* get hold of the data to be stored */
+            IRTemp oldRt = newTemp(Ity_I32);
+            assign(oldRt, getIRegT(rT));
+
+            /* generate the transfer */
+            IRExpr* data = NULL;
+            switch (ty) {
+               case Ity_I8:
+                  data = unop(Iop_32to8, mkexpr(oldRt));
+                  break;
+               case Ity_I16:
+                  data = unop(Iop_32to16, mkexpr(oldRt));
+                  break;
+              case Ity_I32:
+                  data = mkexpr(oldRt);
+                  break;
+              default:
+                 vassert(0);
+            }
+            storeGuardedLE(mkexpr(transAddr), data, condT);
+
+         } else {
+
+            /* Load. */
+            IRTemp llOldRt = newTemp(Ity_I32);
+            assign(llOldRt, llGetIReg(rT));
+
+            /* generate the transfer */
+            IRTemp    newRt = newTemp(Ity_I32);
+            IRLoadGOp widen = ILGop_INVALID;
+            switch (ty) {
+               case Ity_I8:
+                  widen = syned ? ILGop_8Sto32 : ILGop_8Uto32; break;
+               case Ity_I16:
+                  widen = syned ? ILGop_16Sto32 : ILGop_16Uto32; break;
+               case Ity_I32:
+                  widen = ILGop_Ident32; break;
+               default:
+                  vassert(0);
+            }
+            loadGuardedLE(newRt, widen,
+                          mkexpr(transAddr), mkexpr(llOldRt), condT);
+
+            if (rT == 15) {
+               vassert(loadsPC);
+               /* We'll do the write to the PC just below */
+            } else {
+               vassert(!loadsPC);
+               /* IRTemp_INVALID is OK here because in the case where
+                  condT is false at run time, we're just putting the
+                  old rT value back. */
+               putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+            }
+
+            if (loadsPC) {
+               /* Presumably this is an interworking branch. */
+               vassert(rN != 15); // assured by validity check above
+               vassert(rT == 15);
+               vassert(condT == IRTemp_INVALID); /* due to check above */
+               llPutIReg(15, mkexpr(newRt));
+               dres.jk_StopHere = Ijk_Boring;  /* or _Ret ? */
+               dres.whatNext    = Dis_StopHere;
+            }
+         }
+
+         DIP("%s.w r%u, [r%u, r%u, LSL #%u]\n",
+             nm, rT, rN, rM, imm2);
+
+         goto decode_success;
+      }
+   }
+
+   /* --------------- LD/ST reg+imm12 --------------- */
+   /* Loads and stores of the form:
+         op  Rt, [Rn, #+-imm12]
+      where op is one of
+         ldrb ldrh ldr  ldrsb ldrsh
+         strb strh str
+   */
+   if (INSN0(15,9) == BITS7(1,1,1,1,1,0,0)) {
+      Bool   valid  = True;
+      Bool   syned  = INSN0(8,8) == 1;
+      Bool   isST   = False;
+      IRType ty     = Ity_I8;
+      UInt   bU     = INSN0(7,7); // 1: +imm   0: -imm
+                                  // -imm is only supported by literal versions
+      const HChar* nm = "???";
+
+      switch (INSN0(6,4)) {
+         case BITS3(0,0,0):   // strb
+            nm = "strb"; isST = True; break;
+         case BITS3(0,0,1):   // ldrb
+            nm = syned ? "ldrsb" : "ldrb"; break;
+         case BITS3(0,1,0):   // strh
+            nm = "strh"; ty = Ity_I16; isST = True; break;
+         case BITS3(0,1,1):   // ldrh
+            nm = syned ? "ldrsh" : "ldrh"; ty = Ity_I16; break;
+         case BITS3(1,0,0):   // str
+            nm = "str"; ty = Ity_I32; isST = True; break;
+         case BITS3(1,0,1):
+            nm = "ldr"; ty = Ity_I32; break;  // ldr
+         default:
+            valid = False; break;
+      }
+
+      UInt rN      = INSN0(3,0);
+      UInt rT      = INSN1(15,12);
+      UInt imm12   = INSN1(11,0);
+      Bool loadsPC = False;
+
+      if (rN != 15 && bU == 0) {
+         // only pc supports #-imm12
+         valid = False;
+      }
+
+      if (isST) {
+         if (syned) valid = False;
+         if (rN == 15 || rT == 15)
+            valid = False;
+      } else {
+         /* For a 32-bit load, rT == 15 is only allowable if we are not
+            in an IT block, or are the last in it.  Need to insert
+            a dynamic check for that.  Also, in this particular
+            case, rN == 15 is allowable.  In this case however, the
+            value obtained for rN is (apparently)
+            "word-align(address of current insn + 4)". */
+         if (rT == 15) {
+            if (ty == Ity_I32)
+               loadsPC = True;
+            else // Can't do it for B/H loads
+               valid = False;
+         }
+      }
+
+      if (valid) {
+         // if it's a branch, it can't happen in the middle of an IT block
+         // Also, if it is a branch, make it unconditional at this point.
+         // Doing conditional branches in-line is too complex (for now)
+         if (loadsPC) {
+            gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+            // go uncond
+            mk_skip_over_T32_if_cond_is_false(condT);
+            condT = IRTemp_INVALID;
+            // now uncond
+         }
+
+         IRTemp rNt = newTemp(Ity_I32);
+         if (rN == 15) {
+            vassert(!isST);
+            assign(rNt, binop(Iop_And32, getIRegT(15), mkU32(~3)));
+         } else {
+            assign(rNt, getIRegT(rN));
+         }
+
+         IRTemp transAddr = newTemp(Ity_I32);
+         assign(transAddr,
+                binop(bU == 1 ? Iop_Add32 : Iop_Sub32,
+                      mkexpr(rNt), mkU32(imm12)));
+
+         IRTemp oldRt = newTemp(Ity_I32);
+         assign(oldRt, getIRegT(rT));
+
+         IRTemp llOldRt = newTemp(Ity_I32);
+         assign(llOldRt, llGetIReg(rT));
+
+         if (isST) {
+            IRExpr* data = NULL;
+            switch (ty) {
+               case Ity_I8:
+                  data = unop(Iop_32to8, mkexpr(oldRt));
+                  break;
+               case Ity_I16:
+                  data = unop(Iop_32to16, mkexpr(oldRt));
+                  break;
+              case Ity_I32:
+                  data = mkexpr(oldRt);
+                  break;
+              default:
+                 vassert(0);
+            }
+            storeGuardedLE(mkexpr(transAddr), data, condT);
+         } else {
+            IRTemp    newRt = newTemp(Ity_I32);
+            IRLoadGOp widen = ILGop_INVALID;
+            switch (ty) {
+               case Ity_I8:
+                  widen = syned ? ILGop_8Sto32 : ILGop_8Uto32; break;
+               case Ity_I16:
+                  widen = syned ? ILGop_16Sto32 : ILGop_16Uto32; break;
+               case Ity_I32:
+                  widen = ILGop_Ident32; break;
+               default:
+                  vassert(0);
+            }
+            loadGuardedLE(newRt, widen,
+                          mkexpr(transAddr), mkexpr(llOldRt), condT);
+            if (rT == 15) {
+               vassert(loadsPC);
+               /* We'll do the write to the PC just below */
+            } else {
+               vassert(!loadsPC);
+               /* IRTemp_INVALID is OK here because in the case where
+                  condT is false at run time, we're just putting the
+                  old rT value back. */
+               putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+            }
+
+            if (loadsPC) {
+               /* Presumably this is an interworking branch. */
+               vassert(rT == 15);
+               vassert(condT == IRTemp_INVALID); /* due to check above */
+               llPutIReg(15, mkexpr(newRt));
+               dres.jk_StopHere = Ijk_Boring;
+               dres.whatNext    = Dis_StopHere;
+            }
+         }
+
+         DIP("%s.w r%u, [r%u, +#%u]\n", nm, rT, rN, imm12);
+
+         goto decode_success;
+      }
+   }
+
+   /* -------------- LDRD/STRD reg+/-#imm8 -------------- */
+   /* Doubleword loads and stores of the form:
+         ldrd/strd  Rt, Rt2, [Rn, #+/-imm8]    or
+         ldrd/strd  Rt, Rt2, [Rn], #+/-imm8    or
+         ldrd/strd  Rt, Rt2, [Rn, #+/-imm8]!  
+   */
+   if (INSN0(15,9) == BITS7(1,1,1,0,1,0,0) && INSN0(6,6) == 1) {
+      UInt bP   = INSN0(8,8);
+      UInt bU   = INSN0(7,7);
+      UInt bW   = INSN0(5,5);
+      UInt bL   = INSN0(4,4);  // 1: load  0: store
+      UInt rN   = INSN0(3,0);
+      UInt rT   = INSN1(15,12);
+      UInt rT2  = INSN1(11,8);
+      UInt imm8 = INSN1(7,0);
+
+      Bool valid = True;
+      if (bP == 0 && bW == 0)                 valid = False;
+      if (bW == 1 && (rN == rT || rN == rT2)) valid = False;
+      if (isBadRegT(rT) || isBadRegT(rT2))    valid = False;
+      if (bL == 1 && rT == rT2)               valid = False;
+      /* It's OK to use PC as the base register only in the
+         following case: ldrd Rt, Rt2, [PC, #+/-imm8] */
+      if (rN == 15 && (bL == 0/*store*/
+                       || bW == 1/*wb*/))     valid = False;
+
+      if (valid) {
+         IRTemp preAddr = newTemp(Ity_I32);
+         assign(preAddr, 15 == rN
+                           ? binop(Iop_And32, getIRegT(15), mkU32(~3U))
+                           : getIRegT(rN));
+
+         IRTemp postAddr = newTemp(Ity_I32);
+         assign(postAddr, binop(bU == 1 ? Iop_Add32 : Iop_Sub32,
+                                mkexpr(preAddr), mkU32(imm8 << 2)));
+
+         IRTemp transAddr = bP == 1 ? postAddr : preAddr;
+
+         /* For almost all cases, we do the writeback after the transfers.
+            However, that leaves the stack "uncovered" in this case:
+               strd    rD, [sp, #-8]
+            In which case, do the writeback to SP now, instead of later.
+            This is bad in that it makes the insn non-restartable if the
+            accesses fault, but at least keeps Memcheck happy. */
+         Bool writeback_already_done = False;
+         if (bL == 0/*store*/ && bW == 1/*wb*/
+             && rN == 13 && rN != rT && rN != rT2
+             && bU == 0/*minus*/ && (imm8 << 2) == 8) {
+            putIRegT(rN, mkexpr(postAddr), condT);
+            writeback_already_done = True;
+         }
+
+         if (bL == 0) {
+            IRTemp oldRt  = newTemp(Ity_I32);
+            IRTemp oldRt2 = newTemp(Ity_I32);
+            assign(oldRt,  getIRegT(rT));
+            assign(oldRt2, getIRegT(rT2));
+            storeGuardedLE( mkexpr(transAddr),
+                            mkexpr(oldRt), condT );
+            storeGuardedLE( binop(Iop_Add32, mkexpr(transAddr), mkU32(4)),
+                            mkexpr(oldRt2), condT );
+         } else {
+            IRTemp oldRt  = newTemp(Ity_I32);
+            IRTemp oldRt2 = newTemp(Ity_I32);
+            IRTemp newRt  = newTemp(Ity_I32);
+            IRTemp newRt2 = newTemp(Ity_I32);
+            assign(oldRt,  llGetIReg(rT));
+            assign(oldRt2, llGetIReg(rT2));
+            loadGuardedLE( newRt, ILGop_Ident32,
+                           mkexpr(transAddr),
+                           mkexpr(oldRt), condT );
+            loadGuardedLE( newRt2, ILGop_Ident32,
+                           binop(Iop_Add32, mkexpr(transAddr), mkU32(4)),
+                           mkexpr(oldRt2), condT );
+            /* Put unconditionally, since we already switched on the condT
+               in the guarded loads. */ 
+            putIRegT(rT,  mkexpr(newRt),  IRTemp_INVALID);
+            putIRegT(rT2, mkexpr(newRt2), IRTemp_INVALID);
+         }
+
+         if (bW == 1 && !writeback_already_done) {
+            putIRegT(rN, mkexpr(postAddr), condT);
+         }
+
+         const HChar* nm = bL ? "ldrd" : "strd";
+
+         if (bP == 1 && bW == 0) {
+            DIP("%s.w r%u, r%u, [r%u, #%c%u]\n",
+                nm, rT, rT2, rN, bU ? '+' : '-', imm8 << 2);
+         }
+         else if (bP == 1 && bW == 1) {
+            DIP("%s.w r%u, r%u, [r%u, #%c%u]!\n",
+                nm, rT, rT2, rN, bU ? '+' : '-', imm8 << 2);
+         }
+         else {
+            vassert(bP == 0 && bW == 1);
+            DIP("%s.w r%u, r%u, [r%u], #%c%u\n",
+                nm, rT, rT2, rN, bU ? '+' : '-', imm8 << 2);
+         }
+
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T3) Bcond.W label -------------- */
+   /* This variant carries its own condition, so can't be part of an
+      IT block ... */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && INSN1(15,14) == BITS2(1,0)
+       && INSN1(12,12) == 0) {
+      UInt cond = INSN0(9,6);
+      if (cond != ARMCondAL && cond != ARMCondNV) {
+         Int simm21
+            =   (INSN0(10,10) << (1 + 1 + 6 + 11 + 1))
+              | (INSN1(11,11) << (1 + 6 + 11 + 1))
+              | (INSN1(13,13) << (6 + 11 + 1))
+              | (INSN0(5,0)   << (11 + 1))
+              | (INSN1(10,0)  << 1);
+         simm21 = (simm21 << 11) >> 11;
+
+         vassert(0 == (guest_R15_curr_instr_notENC & 1));
+         UInt dst = simm21 + guest_R15_curr_instr_notENC + 4;
+
+         /* Not allowed in an IT block; SIGILL if so. */
+         gen_SIGILL_T_if_in_ITBlock(old_itstate, new_itstate);
+
+         IRTemp kondT = newTemp(Ity_I32);
+         assign( kondT, mk_armg_calculate_condition(cond) );
+         stmt( IRStmt_Exit( unop(Iop_32to1, mkexpr(kondT)),
+                            Ijk_Boring,
+                            IRConst_U32(dst | 1/*CPSR.T*/),
+                            OFFB_R15T ));
+         llPutIReg(15, mkU32( (guest_R15_curr_instr_notENC + 4) 
+                              | 1 /*CPSR.T*/ ));
+         dres.jk_StopHere = Ijk_Boring;
+         dres.whatNext    = Dis_StopHere;
+         DIP("b%s.w 0x%x\n", nCC(cond), dst);
+         goto decode_success;
+      }
+   }
+
+   /* ---------------- (T4) B.W label ---------------- */
+   /* ... whereas this variant doesn't carry its own condition, so it
+      has to be either unconditional or the conditional by virtue of
+      being the last in an IT block.  The upside is that there's 4
+      more bits available for the jump offset, so it has a 16-times
+      greater branch range than the T3 variant. */
+   if (INSN0(15,11) == BITS5(1,1,1,1,0)
+       && INSN1(15,14) == BITS2(1,0)
+       && INSN1(12,12) == 1) {
+      if (1) {
+         UInt bS  = INSN0(10,10);
+         UInt bJ1 = INSN1(13,13);
+         UInt bJ2 = INSN1(11,11);
+         UInt bI1 = 1 ^ (bJ1 ^ bS);
+         UInt bI2 = 1 ^ (bJ2 ^ bS);
+         Int simm25
+            =   (bS          << (1 + 1 + 10 + 11 + 1))
+              | (bI1         << (1 + 10 + 11 + 1))
+              | (bI2         << (10 + 11 + 1))
+              | (INSN0(9,0)  << (11 + 1))
+              | (INSN1(10,0) << 1);
+         simm25 = (simm25 << 7) >> 7;
+
+         vassert(0 == (guest_R15_curr_instr_notENC & 1));
+         UInt dst = simm25 + guest_R15_curr_instr_notENC + 4;
+
+         /* If in an IT block, must be the last insn. */
+         gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+
+         // go uncond
+         mk_skip_over_T32_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+         // now uncond
+
+         // branch to dst
+         llPutIReg(15, mkU32( dst | 1 /*CPSR.T*/ ));
+         dres.jk_StopHere = Ijk_Boring;
+         dres.whatNext    = Dis_StopHere;
+         DIP("b.w 0x%x\n", dst);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ TBB, TBH ------------------ */
+   if (INSN0(15,4) == 0xE8D && INSN1(15,5) == 0x780) {
+      UInt rN = INSN0(3,0);
+      UInt rM = INSN1(3,0);
+      UInt bH = INSN1(4,4);
+      if (bH/*ATC*/ || (rN != 13 && !isBadRegT(rM))) {
+         /* Must be last or not-in IT block */
+         gen_SIGILL_T_if_in_but_NLI_ITBlock(old_itstate, new_itstate);
+         /* Go uncond */
+         mk_skip_over_T32_if_cond_is_false(condT);
+         condT = IRTemp_INVALID;
+
+         IRExpr* ea
+             = binop(Iop_Add32,
+                     getIRegT(rN),
+                     bH ? binop(Iop_Shl32, getIRegT(rM), mkU8(1))
+                        : getIRegT(rM));
+
+         IRTemp delta = newTemp(Ity_I32);
+         if (bH) {
+            assign(delta, unop(Iop_16Uto32, loadLE(Ity_I16, ea)));
+         } else {
+            assign(delta, unop(Iop_8Uto32, loadLE(Ity_I8, ea)));
+         }
+
+         llPutIReg(
+            15,
+            binop(Iop_Or32,
+                  binop(Iop_Add32,
+                        getIRegT(15),
+                        binop(Iop_Shl32, mkexpr(delta), mkU8(1))
+                  ),
+                  mkU32(1)
+         ));
+         dres.jk_StopHere = Ijk_Boring;
+         dres.whatNext    = Dis_StopHere;
+         DIP("tb%c [r%u, r%u%s]\n",
+             bH ? 'h' : 'b', rN, rM, bH ? ", LSL #1" : "");
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ UBFX ------------------ */
+   /* ------------------ SBFX ------------------ */
+   /* There's also ARM versions of same, but it doesn't seem worth the
+      hassle to common up the handling (it's only a couple of C
+      statements). */
+   if ((INSN0(15,4) == 0xF3C // UBFX
+        || INSN0(15,4) == 0xF34) // SBFX
+       && INSN1(15,15) == 0 && INSN1(5,5) == 0) {
+      UInt rN  = INSN0(3,0);
+      UInt rD  = INSN1(11,8);
+      UInt lsb = (INSN1(14,12) << 2) | INSN1(7,6);
+      UInt wm1 = INSN1(4,0);
+      UInt msb =  lsb + wm1;
+      if (!isBadRegT(rD) && !isBadRegT(rN) && msb <= 31) {
+         Bool   isU  = INSN0(15,4) == 0xF3C;
+         IRTemp src  = newTemp(Ity_I32);
+         IRTemp tmp  = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         UInt   mask = ((1 << wm1) - 1) + (1 << wm1);
+         vassert(msb >= 0 && msb <= 31);
+         vassert(mask != 0); // guaranteed by msb being in 0 .. 31 inclusive
+
+         assign(src, getIRegT(rN));
+         assign(tmp, binop(Iop_And32,
+                           binop(Iop_Shr32, mkexpr(src), mkU8(lsb)),
+                           mkU32(mask)));
+         assign(res, binop(isU ? Iop_Shr32 : Iop_Sar32,
+                           binop(Iop_Shl32, mkexpr(tmp), mkU8(31-wm1)),
+                           mkU8(31-wm1)));
+
+         putIRegT(rD, mkexpr(res), condT);
+
+         DIP("%s r%u, r%u, #%u, #%u\n",
+             isU ? "ubfx" : "sbfx", rD, rN, lsb, wm1 + 1);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ UXTB ------------------ */
+   /* ------------------ UXTH ------------------ */
+   /* ------------------ SXTB ------------------ */
+   /* ------------------ SXTH ------------------ */
+   /* ----------------- UXTB16 ----------------- */
+   /* ----------------- SXTB16 ----------------- */
+   /* FIXME: this is an exact duplicate of the ARM version.  They
+      should be commoned up. */
+   if ((INSN0(15,0) == 0xFA5F     // UXTB
+        || INSN0(15,0) == 0xFA1F  // UXTH
+        || INSN0(15,0) == 0xFA4F  // SXTB
+        || INSN0(15,0) == 0xFA0F  // SXTH
+        || INSN0(15,0) == 0xFA3F  // UXTB16
+        || INSN0(15,0) == 0xFA2F) // SXTB16
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,6) == BITS2(1,0)) {
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      UInt rot = INSN1(5,4);
+      if (!isBadRegT(rD) && !isBadRegT(rM)) {
+         const HChar* nm = "???";
+         IRTemp srcT = newTemp(Ity_I32);
+         IRTemp rotT = newTemp(Ity_I32);
+         IRTemp dstT = newTemp(Ity_I32);
+         assign(srcT, getIRegT(rM));
+         assign(rotT, genROR32(srcT, 8 * rot));
+         switch (INSN0(15,0)) {
+            case 0xFA5F: // UXTB
+               nm = "uxtb";
+               assign(dstT, unop(Iop_8Uto32,
+                                 unop(Iop_32to8, mkexpr(rotT))));
+               break;
+            case 0xFA1F: // UXTH
+               nm = "uxth";
+               assign(dstT, unop(Iop_16Uto32,
+                                 unop(Iop_32to16, mkexpr(rotT))));
+               break;
+            case 0xFA4F: // SXTB
+               nm = "sxtb";
+               assign(dstT, unop(Iop_8Sto32,
+                                 unop(Iop_32to8, mkexpr(rotT))));
+               break;
+            case 0xFA0F: // SXTH
+               nm = "sxth";
+               assign(dstT, unop(Iop_16Sto32,
+                                 unop(Iop_32to16, mkexpr(rotT))));
+               break;
+            case 0xFA3F: // UXTB16
+               nm = "uxtb16";
+               assign(dstT, binop(Iop_And32, mkexpr(rotT),
+                                             mkU32(0x00FF00FF)));
+               break;
+            case 0xFA2F: { // SXTB16
+               nm = "sxtb16";
+               IRTemp lo32 = newTemp(Ity_I32);
+               IRTemp hi32 = newTemp(Ity_I32);
+               assign(lo32, binop(Iop_And32, mkexpr(rotT), mkU32(0xFF)));
+               assign(hi32, binop(Iop_Shr32, mkexpr(rotT), mkU8(16)));
+               assign(
+                  dstT,
+                  binop(Iop_Or32,
+                        binop(Iop_And32,
+                              unop(Iop_8Sto32,
+                                   unop(Iop_32to8, mkexpr(lo32))),
+                              mkU32(0xFFFF)),
+                        binop(Iop_Shl32,
+                              unop(Iop_8Sto32,
+                                   unop(Iop_32to8, mkexpr(hi32))),
+                              mkU8(16))
+               ));
+               break;
+            }
+            default:
+               vassert(0);
+         }
+         putIRegT(rD, mkexpr(dstT), condT);
+         DIP("%s r%u, r%u, ror #%u\n", nm, rD, rM, 8 * rot);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- MUL.W Rd, Rn, Rm -------------- */
+   if (INSN0(15,4) == 0xFB0
+       && (INSN1(15,0) & 0xF0F0) == 0xF000) {
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         IRTemp res = newTemp(Ity_I32);
+         assign(res, binop(Iop_Mul32, getIRegT(rN), getIRegT(rM)));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("mul.w r%u, r%u, r%u\n", rD, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- SDIV.W Rd, Rn, Rm -------------- */
+   if (INSN0(15,4) == 0xFB9
+       && (INSN1(15,0) & 0xF0F0) == 0xF0F0) {
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         IRTemp res  = newTemp(Ity_I32);
+         IRTemp argL = newTemp(Ity_I32);
+         IRTemp argR = newTemp(Ity_I32);
+         assign(argL, getIRegT(rN));
+         assign(argR, getIRegT(rM));
+         assign(res, binop(Iop_DivS32, mkexpr(argL), mkexpr(argR)));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("sdiv.w r%u, r%u, r%u\n", rD, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- UDIV.W Rd, Rn, Rm -------------- */
+   if (INSN0(15,4) == 0xFBB
+       && (INSN1(15,0) & 0xF0F0) == 0xF0F0) {
+      UInt rN = INSN0(3,0);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         IRTemp res  = newTemp(Ity_I32);
+         IRTemp argL = newTemp(Ity_I32);
+         IRTemp argR = newTemp(Ity_I32);
+         assign(argL, getIRegT(rN));
+         assign(argR, getIRegT(rM));
+         assign(res, binop(Iop_DivU32, mkexpr(argL), mkexpr(argR)));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("udiv.w r%u, r%u, r%u\n", rD, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ {U,S}MULL ------------------ */
+   if ((INSN0(15,4) == 0xFB8 || INSN0(15,4) == 0xFBA)
+       && INSN1(7,4) == BITS4(0,0,0,0)) {
+      UInt isU  = INSN0(5,5);
+      UInt rN   = INSN0(3,0);
+      UInt rDlo = INSN1(15,12);
+      UInt rDhi = INSN1(11,8);
+      UInt rM   = INSN1(3,0);
+      if (!isBadRegT(rDhi) && !isBadRegT(rDlo)
+          && !isBadRegT(rN) && !isBadRegT(rM) && rDlo != rDhi) {
+         IRTemp res   = newTemp(Ity_I64);
+         assign(res, binop(isU ? Iop_MullU32 : Iop_MullS32,
+                           getIRegT(rN), getIRegT(rM)));
+         putIRegT( rDhi, unop(Iop_64HIto32, mkexpr(res)), condT );
+         putIRegT( rDlo, unop(Iop_64to32, mkexpr(res)), condT );
+         DIP("%cmull r%u, r%u, r%u, r%u\n",
+             isU ? 'u' : 's', rDlo, rDhi, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ ML{A,S} ------------------ */
+   if (INSN0(15,4) == 0xFB0
+       && (   INSN1(7,4) == BITS4(0,0,0,0)    // MLA
+           || INSN1(7,4) == BITS4(0,0,0,1))) { // MLS
+      UInt rN = INSN0(3,0);
+      UInt rA = INSN1(15,12);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN)
+          && !isBadRegT(rM) && !isBadRegT(rA)) {
+         Bool   isMLA = INSN1(7,4) == BITS4(0,0,0,0);
+         IRTemp res   = newTemp(Ity_I32);
+         assign(res,
+                binop(isMLA ? Iop_Add32 : Iop_Sub32,
+                      getIRegT(rA),
+                      binop(Iop_Mul32, getIRegT(rN), getIRegT(rM))));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("%s r%u, r%u, r%u, r%u\n",
+             isMLA ? "mla" : "mls", rD, rN, rM, rA);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ (T3) ADR ------------------ */
+   if ((INSN0(15,0) == 0xF20F || INSN0(15,0) == 0xF60F)
+       && INSN1(15,15) == 0) {
+      /* rD = align4(PC) + imm32 */
+      UInt rD = INSN1(11,8);
+      if (!isBadRegT(rD)) {
+         UInt imm32 = (INSN0(10,10) << 11)
+                      | (INSN1(14,12) << 8) | INSN1(7,0);
+         putIRegT(rD, binop(Iop_Add32, 
+                            binop(Iop_And32, getIRegT(15), mkU32(~3U)),
+                            mkU32(imm32)),
+                      condT);
+         DIP("add r%u, pc, #%u\n", rD, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* ----------------- (T1) UMLAL ----------------- */
+   /* ----------------- (T1) SMLAL ----------------- */
+   if ((INSN0(15,4) == 0xFBE // UMLAL
+        || INSN0(15,4) == 0xFBC) // SMLAL
+       && INSN1(7,4) == BITS4(0,0,0,0)) {
+      UInt rN   = INSN0(3,0);
+      UInt rDlo = INSN1(15,12);
+      UInt rDhi = INSN1(11,8);
+      UInt rM   = INSN1(3,0);
+      if (!isBadRegT(rDlo) && !isBadRegT(rDhi) && !isBadRegT(rN)
+          && !isBadRegT(rM) && rDhi != rDlo) {
+         Bool   isS   = INSN0(15,4) == 0xFBC;
+         IRTemp argL  = newTemp(Ity_I32);
+         IRTemp argR  = newTemp(Ity_I32);
+         IRTemp old   = newTemp(Ity_I64);
+         IRTemp res   = newTemp(Ity_I64);
+         IRTemp resHi = newTemp(Ity_I32);
+         IRTemp resLo = newTemp(Ity_I32);
+         IROp   mulOp = isS ? Iop_MullS32 : Iop_MullU32;
+         assign( argL, getIRegT(rM));
+         assign( argR, getIRegT(rN));
+         assign( old, binop(Iop_32HLto64, getIRegT(rDhi), getIRegT(rDlo)) );
+         assign( res, binop(Iop_Add64,
+                            mkexpr(old),
+                            binop(mulOp, mkexpr(argL), mkexpr(argR))) );
+         assign( resHi, unop(Iop_64HIto32, mkexpr(res)) );
+         assign( resLo, unop(Iop_64to32, mkexpr(res)) );
+         putIRegT( rDhi, mkexpr(resHi), condT );
+         putIRegT( rDlo, mkexpr(resLo), condT );
+         DIP("%cmlal r%u, r%u, r%u, r%u\n",
+             isS ? 's' : 'u', rDlo, rDhi, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ (T1) UMAAL ------------------ */
+   if (INSN0(15,4) == 0xFBE && INSN1(7,4) == BITS4(0,1,1,0)) {
+      UInt rN   = INSN0(3,0);
+      UInt rDlo = INSN1(15,12);
+      UInt rDhi = INSN1(11,8);
+      UInt rM   = INSN1(3,0);
+      if (!isBadRegT(rDlo) && !isBadRegT(rDhi) && !isBadRegT(rN)
+          && !isBadRegT(rM) && rDhi != rDlo) {
+         IRTemp argN   = newTemp(Ity_I32);
+         IRTemp argM   = newTemp(Ity_I32);
+         IRTemp argDhi = newTemp(Ity_I32);
+         IRTemp argDlo = newTemp(Ity_I32);
+         IRTemp res    = newTemp(Ity_I64);
+         IRTemp resHi  = newTemp(Ity_I32);
+         IRTemp resLo  = newTemp(Ity_I32);
+         assign( argN,   getIRegT(rN) );
+         assign( argM,   getIRegT(rM) );
+         assign( argDhi, getIRegT(rDhi) );
+         assign( argDlo, getIRegT(rDlo) );
+         assign( res, 
+                 binop(Iop_Add64,
+                       binop(Iop_Add64,
+                             binop(Iop_MullU32, mkexpr(argN), mkexpr(argM)),
+                             unop(Iop_32Uto64, mkexpr(argDhi))),
+                       unop(Iop_32Uto64, mkexpr(argDlo))) );
+         assign( resHi, unop(Iop_64HIto32, mkexpr(res)) );
+         assign( resLo, unop(Iop_64to32, mkexpr(res)) );
+         putIRegT( rDhi, mkexpr(resHi), condT );
+         putIRegT( rDlo, mkexpr(resLo), condT );
+         DIP("umaal r%u, r%u, r%u, r%u\n", rDlo, rDhi, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T1) SMMUL{R} ------------------ */
+   if (INSN0(15,7) == BITS9(1,1,1,1,1,0,1,1,0)
+       && INSN0(6,4) == BITS3(1,0,1)
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,5) == BITS3(0,0,0)) {
+      UInt bitR = INSN1(4,4);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      UInt rN = INSN0(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         IRExpr* res
+         = unop(Iop_64HIto32,
+                binop(Iop_Add64,
+                      binop(Iop_MullS32, getIRegT(rN), getIRegT(rM)),
+                      mkU64(bitR ? 0x80000000ULL : 0ULL)));
+         putIRegT(rD, res, condT);
+         DIP("smmul%s r%u, r%u, r%u\n",
+             bitR ? "r" : "", rD, rN, rM);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T1) SMMLA{R} ------------------ */
+   if (INSN0(15,7) == BITS9(1,1,1,1,1,0,1,1,0)
+       && INSN0(6,4) == BITS3(1,0,1)
+       && INSN1(7,5) == BITS3(0,0,0)) {
+      UInt bitR = INSN1(4,4);
+      UInt rA = INSN1(15,12);
+      UInt rD = INSN1(11,8);
+      UInt rM = INSN1(3,0);
+      UInt rN = INSN0(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM) && (rA != 13)) {
+         IRExpr* res
+         = unop(Iop_64HIto32,
+                binop(Iop_Add64,
+                      binop(Iop_Add64,
+                            binop(Iop_32HLto64, getIRegT(rA), mkU32(0)),
+                            binop(Iop_MullS32, getIRegT(rN), getIRegT(rM))),
+                      mkU64(bitR ? 0x80000000ULL : 0ULL)));
+         putIRegT(rD, res, condT);
+         DIP("smmla%s r%u, r%u, r%u, r%u\n",
+             bitR ? "r" : "", rD, rN, rM, rA);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------ (T2) ADR ------------------ */
+   if ((INSN0(15,0) == 0xF2AF || INSN0(15,0) == 0xF6AF)
+       && INSN1(15,15) == 0) {
+      /* rD = align4(PC) - imm32 */
+      UInt rD = INSN1(11,8);
+      if (!isBadRegT(rD)) {
+         UInt imm32 = (INSN0(10,10) << 11)
+                      | (INSN1(14,12) << 8) | INSN1(7,0);
+         putIRegT(rD, binop(Iop_Sub32, 
+                            binop(Iop_And32, getIRegT(15), mkU32(~3U)),
+                            mkU32(imm32)),
+                      condT);
+         DIP("sub r%u, pc, #%u\n", rD, imm32);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T1) BFI ------------------- */
+   /* ------------------- (T1) BFC ------------------- */
+   if (INSN0(15,4) == 0xF36 && INSN1(15,15) == 0 && INSN1(5,5) == 0) {
+      UInt rD  = INSN1(11,8);
+      UInt rN  = INSN0(3,0);
+      UInt msb = INSN1(4,0);
+      UInt lsb = (INSN1(14,12) << 2) | INSN1(7,6);
+      if (isBadRegT(rD) || rN == 13 || msb < lsb) {
+         /* undecodable; fall through */
+      } else {
+         IRTemp src    = newTemp(Ity_I32);
+         IRTemp olddst = newTemp(Ity_I32);
+         IRTemp newdst = newTemp(Ity_I32);
+         UInt   mask = 1 << (msb - lsb);
+         mask = (mask - 1) + mask;
+         vassert(mask != 0); // guaranteed by "msb < lsb" check above
+         mask <<= lsb;
+
+         assign(src, rN == 15 ? mkU32(0) : getIRegT(rN));
+         assign(olddst, getIRegT(rD));
+         assign(newdst,
+                binop(Iop_Or32,
+                   binop(Iop_And32,
+                         binop(Iop_Shl32, mkexpr(src), mkU8(lsb)), 
+                         mkU32(mask)),
+                   binop(Iop_And32,
+                         mkexpr(olddst),
+                         mkU32(~mask)))
+               );
+
+         putIRegT(rD, mkexpr(newdst), condT);
+
+         if (rN == 15) {
+            DIP("bfc r%u, #%u, #%u\n",
+                rD, lsb, msb-lsb+1);
+         } else {
+            DIP("bfi r%u, r%u, #%u, #%u\n",
+                rD, rN, lsb, msb-lsb+1);
+         }
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T1) SXTAH ------------------- */
+   /* ------------------- (T1) UXTAH ------------------- */
+   if ((INSN0(15,4) == 0xFA1      // UXTAH
+        || INSN0(15,4) == 0xFA0)  // SXTAH
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,6) == BITS2(1,0)) {
+      Bool isU = INSN0(15,4) == 0xFA1;
+      UInt rN  = INSN0(3,0);
+      UInt rD  = INSN1(11,8);
+      UInt rM  = INSN1(3,0);
+      UInt rot = INSN1(5,4);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         IRTemp srcL = newTemp(Ity_I32);
+         IRTemp srcR = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         assign(srcR, getIRegT(rM));
+         assign(srcL, getIRegT(rN));
+         assign(res,  binop(Iop_Add32,
+                            mkexpr(srcL),
+                            unop(isU ? Iop_16Uto32 : Iop_16Sto32,
+                                 unop(Iop_32to16, 
+                                      genROR32(srcR, 8 * rot)))));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("%cxtah r%u, r%u, r%u, ror #%u\n",
+             isU ? 'u' : 's', rD, rN, rM, rot);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T1) SXTAB ------------------- */
+   /* ------------------- (T1) UXTAB ------------------- */
+   if ((INSN0(15,4) == 0xFA5      // UXTAB
+        || INSN0(15,4) == 0xFA4)  // SXTAB
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,6) == BITS2(1,0)) {
+      Bool isU = INSN0(15,4) == 0xFA5;
+      UInt rN  = INSN0(3,0);
+      UInt rD  = INSN1(11,8);
+      UInt rM  = INSN1(3,0);
+      UInt rot = INSN1(5,4);
+      if (!isBadRegT(rD) && !isBadRegT(rN) && !isBadRegT(rM)) {
+         IRTemp srcL = newTemp(Ity_I32);
+         IRTemp srcR = newTemp(Ity_I32);
+         IRTemp res  = newTemp(Ity_I32);
+         assign(srcR, getIRegT(rM));
+         assign(srcL, getIRegT(rN));
+         assign(res,  binop(Iop_Add32,
+                            mkexpr(srcL),
+                            unop(isU ? Iop_8Uto32 : Iop_8Sto32,
+                                 unop(Iop_32to8, 
+                                      genROR32(srcR, 8 * rot)))));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("%cxtab r%u, r%u, r%u, ror #%u\n",
+             isU ? 'u' : 's', rD, rN, rM, rot);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T1) CLZ ------------------- */
+   if (INSN0(15,4) == 0xFAB
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,4) == BITS4(1,0,0,0)) {
+      UInt rM1 = INSN0(3,0);
+      UInt rD  = INSN1(11,8);
+      UInt rM2 = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rM1) && rM1 == rM2) {
+         IRTemp arg = newTemp(Ity_I32);
+         IRTemp res = newTemp(Ity_I32);
+         assign(arg, getIRegT(rM1));
+         assign(res, IRExpr_ITE(
+                        binop(Iop_CmpEQ32, mkexpr(arg), mkU32(0)),
+                        mkU32(32),
+                        unop(Iop_Clz32, mkexpr(arg))
+         ));
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("clz r%u, r%u\n", rD, rM1);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T1) RBIT ------------------- */
+   if (INSN0(15,4) == 0xFA9
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,4) == BITS4(1,0,1,0)) {
+      UInt rM1 = INSN0(3,0);
+      UInt rD  = INSN1(11,8);
+      UInt rM2 = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rM1) && rM1 == rM2) {
+         IRTemp arg = newTemp(Ity_I32);
+         assign(arg, getIRegT(rM1));
+         IRTemp res = gen_BITREV(arg);
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("rbit r%u, r%u\n", rD, rM1);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T2) REV   ------------------- */
+   /* ------------------- (T2) REV16 ------------------- */
+   if (INSN0(15,4) == 0xFA9
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && (   INSN1(7,4) == BITS4(1,0,0,0)     // REV
+           || INSN1(7,4) == BITS4(1,0,0,1))) { // REV16
+      UInt rM1   = INSN0(3,0);
+      UInt rD    = INSN1(11,8);
+      UInt rM2   = INSN1(3,0);
+      Bool isREV = INSN1(7,4) == BITS4(1,0,0,0);
+      if (!isBadRegT(rD) && !isBadRegT(rM1) && rM1 == rM2) {
+         IRTemp arg = newTemp(Ity_I32);
+         assign(arg, getIRegT(rM1));
+         IRTemp res = isREV ? gen_REV(arg) : gen_REV16(arg);
+         putIRegT(rD, mkexpr(res), condT);
+         DIP("rev%s r%u, r%u\n", isREV ? "" : "16", rD, rM1);
+         goto decode_success;
+      }
+   }
+
+   /* ------------------- (T2) REVSH ------------------ */
+   if (INSN0(15,4) == 0xFA9
+       && INSN1(15,12) == BITS4(1,1,1,1)
+       && INSN1(7,4) == BITS4(1,0,1,1)) {
+      UInt rM1 = INSN0(3,0);
+      UInt rM2 = INSN1(3,0);
+      UInt rD  = INSN1(11,8);
+      if (!isBadRegT(rD) && !isBadRegT(rM1) && rM1 == rM2) {
+         IRTemp irt_rM  = newTemp(Ity_I32);
+         IRTemp irt_hi  = newTemp(Ity_I32);
+         IRTemp irt_low = newTemp(Ity_I32);
+         IRTemp irt_res = newTemp(Ity_I32);
+         assign(irt_rM, getIRegT(rM1));
+         assign(irt_hi,
+                binop(Iop_Sar32,
+                      binop(Iop_Shl32, mkexpr(irt_rM), mkU8(24)),
+                      mkU8(16)
+                )
+         );
+         assign(irt_low,
+                binop(Iop_And32,
+                      binop(Iop_Shr32, mkexpr(irt_rM), mkU8(8)),
+                      mkU32(0xFF)
+                )
+         );
+         assign(irt_res,
+                binop(Iop_Or32, mkexpr(irt_hi), mkexpr(irt_low))
+         );
+         putIRegT(rD, mkexpr(irt_res), condT);
+         DIP("revsh r%u, r%u\n", rD, rM1);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) MSR apsr, reg -------------- */
+   if (INSN0(15,4) == 0xF38 
+       && INSN1(15,12) == BITS4(1,0,0,0) && INSN1(9,0) == 0x000) {
+      UInt rN          = INSN0(3,0);
+      UInt write_ge    = INSN1(10,10);
+      UInt write_nzcvq = INSN1(11,11);
+      if (!isBadRegT(rN) && (write_nzcvq || write_ge)) {
+         IRTemp rNt = newTemp(Ity_I32);
+         assign(rNt, getIRegT(rN));
+         desynthesise_APSR( write_nzcvq, write_ge, rNt, condT );
+         DIP("msr cpsr_%s%s, r%u\n",
+             write_nzcvq ? "f" : "", write_ge ? "g" : "", rN);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) MRS reg, apsr -------------- */
+   if (INSN0(15,0) == 0xF3EF
+       && INSN1(15,12) == BITS4(1,0,0,0) && INSN1(7,0) == 0x00) {
+      UInt rD = INSN1(11,8);
+      if (!isBadRegT(rD)) {
+         IRTemp apsr = synthesise_APSR();
+         putIRegT( rD, mkexpr(apsr), condT );
+         DIP("mrs r%u, cpsr\n", rD);
+         goto decode_success;
+      }
+   }
+
+   /* ----------------- (T1) LDREX ----------------- */
+   if (INSN0(15,4) == 0xE85 && INSN1(11,8) == BITS4(1,1,1,1)) {
+      UInt rN   = INSN0(3,0);
+      UInt rT   = INSN1(15,12);
+      UInt imm8 = INSN1(7,0);
+      if (!isBadRegT(rT) && rN != 15) {
+         IRTemp res;
+         // go uncond
+         mk_skip_over_T32_if_cond_is_false( condT );
+         // now uncond
+         res = newTemp(Ity_I32);
+         stmt( IRStmt_LLSC(Iend_LE,
+                           res,
+                           binop(Iop_Add32, getIRegT(rN), mkU32(imm8 * 4)),
+                           NULL/*this is a load*/ ));
+         putIRegT(rT, mkexpr(res), IRTemp_INVALID);
+         DIP("ldrex r%u, [r%u, #+%u]\n", rT, rN, imm8 * 4);
+         goto decode_success;
+      }
+   }
+
+   /* --------------- (T1) LDREX{B,H} --------------- */
+   if (INSN0(15,4) == 0xE8D
+       && (INSN1(11,0) == 0xF4F || INSN1(11,0) == 0xF5F)) {
+      UInt rN  = INSN0(3,0);
+      UInt rT  = INSN1(15,12);
+      Bool isH = INSN1(11,0) == 0xF5F;
+      if (!isBadRegT(rT) && rN != 15) {
+         IRTemp res;
+         // go uncond
+         mk_skip_over_T32_if_cond_is_false( condT );
+         // now uncond
+         res = newTemp(isH ? Ity_I16 : Ity_I8);
+         stmt( IRStmt_LLSC(Iend_LE, res, getIRegT(rN),
+                           NULL/*this is a load*/ ));
+         putIRegT(rT, unop(isH ? Iop_16Uto32 : Iop_8Uto32, mkexpr(res)),
+                      IRTemp_INVALID);
+         DIP("ldrex%c r%u, [r%u]\n", isH ? 'h' : 'b', rT, rN);
+         goto decode_success;
+      }
+   }
+
+   /* --------------- (T1) LDREXD --------------- */
+   if (INSN0(15,4) == 0xE8D && INSN1(7,0) == 0x7F) {
+      UInt rN  = INSN0(3,0);
+      UInt rT  = INSN1(15,12);
+      UInt rT2 = INSN1(11,8);
+      if (!isBadRegT(rT) && !isBadRegT(rT2) && rT != rT2 && rN != 15) {
+         IRTemp res;
+         // go uncond
+         mk_skip_over_T32_if_cond_is_false( condT );
+         // now uncond
+         res = newTemp(Ity_I64);
+         // FIXME: assumes little-endian guest
+         stmt( IRStmt_LLSC(Iend_LE, res, getIRegT(rN),
+                           NULL/*this is a load*/ ));
+         // FIXME: assumes little-endian guest
+         putIRegT(rT,  unop(Iop_64to32,   mkexpr(res)), IRTemp_INVALID);
+         putIRegT(rT2, unop(Iop_64HIto32, mkexpr(res)), IRTemp_INVALID);
+         DIP("ldrexd r%u, r%u, [r%u]\n", rT, rT2, rN);
+         goto decode_success;
+      }
+   }
+
+   /* ----------------- (T1) STREX ----------------- */
+   if (INSN0(15,4) == 0xE84) {
+      UInt rN   = INSN0(3,0);
+      UInt rT   = INSN1(15,12);
+      UInt rD   = INSN1(11,8);
+      UInt imm8 = INSN1(7,0);
+      if (!isBadRegT(rD) && !isBadRegT(rT) && rN != 15 
+          && rD != rN && rD != rT) {
+         IRTemp resSC1, resSC32;
+         // go uncond
+         mk_skip_over_T32_if_cond_is_false( condT );
+         // now uncond
+         /* Ok, now we're unconditional.  Do the store. */
+         resSC1 = newTemp(Ity_I1);
+         stmt( IRStmt_LLSC(Iend_LE,
+                           resSC1,
+                           binop(Iop_Add32, getIRegT(rN), mkU32(imm8 * 4)),
+                           getIRegT(rT)) );
+         /* Set rD to 1 on failure, 0 on success.  Currently we have
+            resSC1 == 0 on failure, 1 on success. */
+         resSC32 = newTemp(Ity_I32);
+         assign(resSC32,
+                unop(Iop_1Uto32, unop(Iop_Not1, mkexpr(resSC1))));
+         putIRegT(rD, mkexpr(resSC32), IRTemp_INVALID);
+         DIP("strex r%u, r%u, [r%u, #+%u]\n", rD, rT, rN, imm8 * 4);
+         goto decode_success;
+      }
+   }
+
+   /* --------------- (T1) STREX{B,H} --------------- */
+   if (INSN0(15,4) == 0xE8C
+       && (INSN1(11,4) == 0xF4 || INSN1(11,4) == 0xF5)) {
+      UInt rN  = INSN0(3,0);
+      UInt rT  = INSN1(15,12);
+      UInt rD  = INSN1(3,0);
+      Bool isH = INSN1(11,4) == 0xF5;
+      if (!isBadRegT(rD) && !isBadRegT(rT) && rN != 15 
+          && rD != rN && rD != rT) {
+         IRTemp resSC1, resSC32;
+         // go uncond
+         mk_skip_over_T32_if_cond_is_false( condT );
+         // now uncond
+         /* Ok, now we're unconditional.  Do the store. */
+         resSC1 = newTemp(Ity_I1);
+         stmt( IRStmt_LLSC(Iend_LE, resSC1, getIRegT(rN),
+                           unop(isH ? Iop_32to16 : Iop_32to8,
+                                getIRegT(rT))) );
+         /* Set rD to 1 on failure, 0 on success.  Currently we have
+            resSC1 == 0 on failure, 1 on success. */
+         resSC32 = newTemp(Ity_I32);
+         assign(resSC32,
+                unop(Iop_1Uto32, unop(Iop_Not1, mkexpr(resSC1))));
+         putIRegT(rD, mkexpr(resSC32), IRTemp_INVALID);
+         DIP("strex%c r%u, r%u, [r%u]\n", isH ? 'h' : 'b', rD, rT, rN);
+         goto decode_success;
+      }
+   }
+
+   /* ---------------- (T1) STREXD ---------------- */
+   if (INSN0(15,4) == 0xE8C && INSN1(7,4) == BITS4(0,1,1,1)) {
+      UInt rN  = INSN0(3,0);
+      UInt rT  = INSN1(15,12);
+      UInt rT2 = INSN1(11,8);
+      UInt rD  = INSN1(3,0);
+      if (!isBadRegT(rD) && !isBadRegT(rT) && !isBadRegT(rT2)
+          && rN != 15 && rD != rN && rD != rT && rD != rT) {
+         IRTemp resSC1, resSC32, data;
+         // go uncond
+         mk_skip_over_T32_if_cond_is_false( condT );
+         // now uncond
+         /* Ok, now we're unconditional.  Do the store. */
+         resSC1 = newTemp(Ity_I1);
+         data = newTemp(Ity_I64);
+         // FIXME: assumes little-endian guest
+         assign(data, binop(Iop_32HLto64, getIRegT(rT2), getIRegT(rT)));
+         // FIXME: assumes little-endian guest
+         stmt( IRStmt_LLSC(Iend_LE, resSC1, getIRegT(rN), mkexpr(data)));
+         /* Set rD to 1 on failure, 0 on success.  Currently we have
+            resSC1 == 0 on failure, 1 on success. */
+         resSC32 = newTemp(Ity_I32);
+         assign(resSC32,
+                unop(Iop_1Uto32, unop(Iop_Not1, mkexpr(resSC1))));
+         putIRegT(rD, mkexpr(resSC32), IRTemp_INVALID);
+         DIP("strexd r%u, r%u, r%u, [r%u]\n", rD, rT, rT2, rN);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- v7 barrier insns -------------- */
+   if (INSN0(15,0) == 0xF3BF && (INSN1(15,0) & 0xFF00) == 0x8F00) {
+      /* FIXME: should this be unconditional? */
+      /* XXX this isn't really right, is it?  The generated IR does
+         them unconditionally.  I guess it doesn't matter since it
+         doesn't do any harm to do them even when the guarding
+         condition is false -- it's just a performance loss. */
+      switch (INSN1(7,0)) {
+         case 0x4F: /* DSB sy */
+         case 0x4E: /* DSB st */
+         case 0x4B: /* DSB ish */
+         case 0x4A: /* DSB ishst */
+         case 0x47: /* DSB nsh */
+         case 0x46: /* DSB nshst */
+         case 0x43: /* DSB osh */
+         case 0x42: /* DSB oshst */
+            stmt( IRStmt_MBE(Imbe_Fence) );
+            DIP("DSB\n");
+            goto decode_success;
+         case 0x5F: /* DMB sy */
+         case 0x5E: /* DMB st */
+         case 0x5B: /* DMB ish */
+         case 0x5A: /* DMB ishst */
+         case 0x57: /* DMB nsh */
+         case 0x56: /* DMB nshst */
+         case 0x53: /* DMB osh */
+         case 0x52: /* DMB oshst */
+            stmt( IRStmt_MBE(Imbe_Fence) );
+            DIP("DMB\n");
+            goto decode_success;
+         case 0x6F: /* ISB */
+            stmt( IRStmt_MBE(Imbe_Fence) );
+            DIP("ISB\n");
+            goto decode_success;
+         default:
+            break;
+      }
+   }
+
+   /* ---------------------- PLD{,W} ---------------------- */
+   if ((INSN0(15,4) & 0xFFD) == 0xF89 && INSN1(15,12) == 0xF) {
+      /* FIXME: should this be unconditional? */
+      /* PLD/PLDW immediate, encoding T1 */
+      UInt rN    = INSN0(3,0);
+      UInt bW    = INSN0(5,5);
+      UInt imm12 = INSN1(11,0);
+      DIP("pld%s [r%u, #%u]\n", bW ? "w" : "",  rN, imm12);
+      goto decode_success;
+   }
+
+   if ((INSN0(15,4) & 0xFFD) == 0xF81 && INSN1(15,8) == 0xFC) {
+      /* FIXME: should this be unconditional? */
+      /* PLD/PLDW immediate, encoding T2 */
+      UInt rN    = INSN0(3,0);
+      UInt bW    = INSN0(5,5);
+      UInt imm8  = INSN1(7,0);
+      DIP("pld%s [r%u, #-%u]\n", bW ? "w" : "",  rN, imm8);
+      goto decode_success;
+   }
+
+   if ((INSN0(15,4) & 0xFFD) == 0xF81 && INSN1(15,6) == 0x3C0) {
+      /* FIXME: should this be unconditional? */
+      /* PLD/PLDW register, encoding T1 */
+      UInt rN   = INSN0(3,0);
+      UInt rM   = INSN1(3,0);
+      UInt bW   = INSN0(5,5);
+      UInt imm2 = INSN1(5,4);
+      if (!isBadRegT(rM)) {
+         DIP("pld%s [r%u, r%u, lsl %d]\n", bW ? "w" : "", rN, rM, imm2);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* -------------- read CP15 TPIDRURO register ------------- */
+   /* mrc     p15, 0,  r0, c13, c0, 3  up to
+      mrc     p15, 0, r14, c13, c0, 3
+   */
+   /* I don't know whether this is really v7-only.  But anyway, we
+      have to support it since arm-linux uses TPIDRURO as a thread
+      state register. */
+   if ((INSN0(15,0) == 0xEE1D) && (INSN1(11,0) == 0x0F70)) {
+      /* FIXME: should this be unconditional? */
+      UInt rD = INSN1(15,12);
+      if (!isBadRegT(rD)) {
+         putIRegT(rD, IRExpr_Get(OFFB_TPIDRURO, Ity_I32), IRTemp_INVALID);
+         DIP("mrc p15,0, r%u, c13, c0, 3\n", rD);
+         goto decode_success;
+      }
+      /* fall through */
+   }
+
+   /* ------------------- CLREX ------------------ */
+   if (INSN0(15,0) == 0xF3BF && INSN1(15,0) == 0x8F2F) {
+      /* AFAICS, this simply cancels a (all?) reservations made by a
+         (any?) preceding LDREX(es).  Arrange to hand it through to
+         the back end. */
+      mk_skip_over_T32_if_cond_is_false( condT );
+      stmt( IRStmt_MBE(Imbe_CancelReservation) );
+      DIP("clrex\n");
+      goto decode_success;
+   }
+
+   /* ------------------- NOP ------------------ */
+   if (INSN0(15,0) == 0xF3AF && INSN1(15,0) == 0x8000) {
+      DIP("nop\n");
+      goto decode_success;
+   }
+
+   /* -------------- (T1) LDRT reg+#imm8 -------------- */
+   /* Load Register Unprivileged:
+      ldrt Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,0,0,1) && INSN0(5,4) == BITS2(0,1)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rT    = INSN1(15,12);
+      UInt rN    = INSN0(3,0);
+      UInt imm8  = INSN1(7,0);
+      Bool valid = True;
+      if (rN == 15 || isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt, ILGop_Ident32, ea, llGetIReg(rT), condT );
+         putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+         put_ITSTATE(new_itstate);
+         DIP("ldrt r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) STRT reg+#imm8 -------------- */
+   /* Store Register Unprivileged:
+      strt Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,0,0,1) && INSN0(5,4) == BITS2(0,0)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rT    = INSN1(15,12);
+      UInt rN    = INSN0(3,0);
+      UInt imm8  = INSN1(7,0);
+      Bool valid = True;
+      if (rN == 15 || isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* address = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         storeGuardedLE( address, llGetIReg(rT), condT );
+         put_ITSTATE(new_itstate);
+         DIP("strt r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) STRBT reg+#imm8 -------------- */
+   /* Store Register Byte Unprivileged:
+      strbt Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,0,0,0) && INSN0(5,4) == BITS2(0,0)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rT    = INSN1(15,12);
+      UInt rN    = INSN0(3,0);
+      UInt imm8  = INSN1(7,0);
+      Bool valid = True;
+      if (rN == 15 || isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* address = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         IRExpr* data = unop(Iop_32to8, llGetIReg(rT));
+         storeGuardedLE( address, data, condT );
+         put_ITSTATE(new_itstate);
+         DIP("strbt r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) LDRHT reg+#imm8 -------------- */
+   /* Load Register Halfword Unprivileged:
+      ldrht Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,0,0,0) && INSN0(5,4) == BITS2(1,1)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rN    = INSN0(3,0);
+      Bool valid = True;
+      if (rN == 15) {
+         /* In this case our instruction is LDRH (literal), in fact:
+            LDRH (literal) was realized earlier, so we don't want to
+            make it twice. */
+         valid = False;
+      }
+      UInt rT    = INSN1(15,12);
+      UInt imm8  = INSN1(7,0);
+      if (isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt, ILGop_16Uto32, ea, llGetIReg(rT), condT );
+         putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+         put_ITSTATE(new_itstate);
+         DIP("ldrht r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) LDRSHT reg+#imm8 -------------- */
+   /* Load Register Signed Halfword Unprivileged:
+      ldrsht Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,1,0,0) && INSN0(5,4) == BITS2(1,1)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rN    = INSN0(3,0);
+      Bool valid = True;
+      if (rN == 15) {
+         /* In this case our instruction is LDRSH (literal), in fact:
+            LDRSH (literal) was realized earlier, so we don't want to
+            make it twice. */
+         valid = False;
+      }
+      UInt rT    = INSN1(15,12);
+      UInt imm8  = INSN1(7,0);
+      if (isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt, ILGop_16Sto32, ea, llGetIReg(rT), condT );
+         putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+         put_ITSTATE(new_itstate);
+         DIP("ldrsht r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) STRHT reg+#imm8 -------------- */
+   /* Store Register Halfword Unprivileged:
+      strht Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,0,0,0) && INSN0(5,4) == BITS2(1,0)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rT    = INSN1(15,12);
+      UInt rN    = INSN0(3,0);
+      UInt imm8  = INSN1(7,0);
+      Bool valid = True;
+      if (rN == 15 || isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* address = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         IRExpr* data = unop(Iop_32to16, llGetIReg(rT));
+         storeGuardedLE( address, data, condT );
+         put_ITSTATE(new_itstate);
+         DIP("strht r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) LDRBT reg+#imm8 -------------- */
+   /* Load Register Byte Unprivileged:
+      ldrbt Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,0,0,0) && INSN0(5,4) == BITS2(0,1)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rN    = INSN0(3,0);
+      UInt rT    = INSN1(15,12);
+      UInt imm8  = INSN1(7,0);
+      Bool valid = True;
+      if (rN == 15 /* insn is LDRB (literal) */) valid = False;
+      if (isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt, ILGop_8Uto32, ea, llGetIReg(rT), condT );
+         putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+         put_ITSTATE(new_itstate);
+         DIP("ldrbt r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) LDRSBT reg+#imm8 -------------- */
+   /* Load Register Signed Byte Unprivileged:
+      ldrsbt Rt, [Rn, #imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,1,0,0) && INSN0(5,4) == BITS2(0,1)
+       && INSN1(11,8) == BITS4(1,1,1,0)) {
+      UInt rN    = INSN0(3,0);
+      Bool valid = True;
+      UInt rT    = INSN1(15,12);
+      UInt imm8  = INSN1(7,0);
+      if (rN == 15 /* insn is LDRSB (literal) */) valid = False;
+      if (isBadRegT(rT)) valid = False;
+      if (valid) {
+         put_ITSTATE(old_itstate);
+         IRExpr* ea = binop(Iop_Add32, getIRegT(rN), mkU32(imm8));
+         IRTemp newRt = newTemp(Ity_I32);
+         loadGuardedLE( newRt, ILGop_8Sto32, ea, llGetIReg(rT), condT );
+         putIRegT(rT, mkexpr(newRt), IRTemp_INVALID);
+         put_ITSTATE(new_itstate);
+         DIP("ldrsbt r%u, [r%u, #%u]\n", rT, rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T1) PLI reg+#imm12 -------------- */
+   /* Preload Instruction:
+      pli [Rn, #imm12]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,1,1,0) && INSN0(5,4) == BITS2(0,1)
+       && INSN1(15,12) == BITS4(1,1,1,1)) {
+      UInt rN    = INSN0(3,0);
+      UInt imm12 = INSN1(11,0);
+      if (rN != 15) {
+         DIP("pli [r%u, #%u]\n", rN, imm12);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T2) PLI reg-#imm8 -------------- */
+   /* Preload Instruction:
+      pli [Rn, #-imm8]
+   */
+   if (INSN0(15,6) == BITS10(1,1,1,1,1,0,0,1,0,0) && INSN0(5,4) == BITS2(0,1)
+       && INSN1(15,8) == BITS8(1,1,1,1,1,1,0,0)) {
+      UInt rN   = INSN0(3,0);
+      UInt imm8 = INSN1(7,0);
+      if (rN != 15) {
+         DIP("pli [r%u, #-%u]\n", rN, imm8);
+         goto decode_success;
+      }
+   }
+
+   /* -------------- (T3) PLI PC+/-#imm12 -------------- */
+   /* Preload Instruction:
+      pli [PC, #+/-imm12]
+   */
+   if (INSN0(15,8) == BITS8(1,1,1,1,1,0,0,1)
+       && INSN0(6,0) == BITS7(0,0,1,1,1,1,1)
+       && INSN1(15,12) == BITS4(1,1,1,1)) {
+      UInt imm12 = INSN1(11,0);
+      UInt bU    = INSN0(7,7);
+      DIP("pli [pc, #%c%u]\n", bU == 1 ? '+' : '-', imm12);
+      goto decode_success;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- VFP (CP 10, CP 11) instructions (in Thumb mode)       -- */
+   /* ----------------------------------------------------------- */
+
+   if (INSN0(15,12) == BITS4(1,1,1,0)) {
+      UInt insn28 = (INSN0(11,0) << 16) | INSN1(15,0);
+      Bool ok_vfp = decode_CP10_CP11_instruction (
+                       &dres, insn28, condT, ARMCondAL/*bogus*/,
+                       True/*isT*/
+                    );
+      if (ok_vfp)
+         goto decode_success;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- NEON instructions (in Thumb mode)                     -- */
+   /* ----------------------------------------------------------- */
+
+   if (archinfo->hwcaps & VEX_HWCAPS_ARM_NEON) {
+      UInt insn32 = (INSN0(15,0) << 16) | INSN1(15,0);
+      Bool ok_neon = decode_NEON_instruction(
+                        &dres, insn32, condT, True/*isT*/
+                     );
+      if (ok_neon)
+         goto decode_success;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- v6 media instructions (in Thumb mode)                 -- */
+   /* ----------------------------------------------------------- */
+
+   { UInt insn32 = (INSN0(15,0) << 16) | INSN1(15,0);
+     Bool ok_v6m = decode_V6MEDIA_instruction(
+                      &dres, insn32, condT, ARMCondAL/*bogus*/,
+                      True/*isT*/
+                   );
+     if (ok_v6m)
+        goto decode_success;
+   }
+
+   /* ----------------------------------------------------------- */
+   /* -- Undecodable                                           -- */
+   /* ----------------------------------------------------------- */
+
+   goto decode_failure;
+   /*NOTREACHED*/
+
+  decode_failure:
+   /* All decode failures end up here. */
+   if (sigill_diag)
+      vex_printf("disInstr(thumb): unhandled instruction: "
+                 "0x%04x 0x%04x\n", (UInt)insn0, (UInt)insn1);
+
+   /* Back up ITSTATE to the initial value for this instruction.
+      If we don't do that, any subsequent restart of the instruction
+      will restart with the wrong value. */
+   if (old_itstate != IRTemp_INVALID)
+      put_ITSTATE(old_itstate);
+
+   /* Tell the dispatcher that this insn cannot be decoded, and so has
+      not been executed, and (is currently) the next to be executed.
+      R15 should be up-to-date since it made so at the start of each
+      insn, but nevertheless be paranoid and update it again right
+      now. */
+   vassert(0 == (guest_R15_curr_instr_notENC & 1));
+   llPutIReg( 15, mkU32(guest_R15_curr_instr_notENC | 1) );
+   dres.len         = 0;
+   dres.whatNext    = Dis_StopHere;
+   dres.jk_StopHere = Ijk_NoDecode;
+   dres.continueAt  = 0;
+   return dres;
+
+  decode_success:
+   /* All decode successes end up here. */
+   vassert(dres.len == 4 || dres.len == 2 || dres.len == 20);
+   switch (dres.whatNext) {
+      case Dis_Continue:
+         llPutIReg(15, mkU32(dres.len + (guest_R15_curr_instr_notENC | 1)));
+         break;
+      case Dis_ResteerU:
+      case Dis_ResteerC:
+         llPutIReg(15, mkU32(dres.continueAt));
+         break;
+      case Dis_StopHere:
+         break;
+      default:
+         vassert(0);
+   }
+
+   DIP("\n");
+
+   return dres;
+
+#  undef INSN0
+#  undef INSN1
+}
+
+#undef DIP
+#undef DIS
+
+
+/* Helper table for figuring out how many insns an IT insn
+   conditionalises.
+
+   An ITxyz instruction of the format "1011 1111 firstcond mask"
+   conditionalises some number of instructions, as indicated by the
+   following table.  A value of zero indicates the instruction is
+   invalid in some way.
+
+   mask = 0 means this isn't an IT instruction
+   fc = 15 (NV) means unpredictable
+
+   The line fc = 14 (AL) is different from the others; there are
+   additional constraints in this case.
+
+          mask(0 ..                   15)
+        +--------------------------------
+   fc(0 | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+   ..   | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 0 2 0 0 0 1 0 0 0 0 0 0 0 
+   15)  | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
+
+   To be conservative with the analysis, let's rule out the mask = 0
+   case, since that isn't an IT insn at all.  But for all the other
+   cases where the table contains zero, that means unpredictable, so
+   let's say 4 to be conservative.  Hence we have a safe value for any
+   IT (mask,fc) pair that the CPU would actually identify as an IT
+   instruction.  The final table is
+
+          mask(0 ..                   15)
+        +--------------------------------
+   fc(0 | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+   ..   | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 3 4 1 4 3 4 2 4 3 4 
+        | 0 4 3 4 2 4 4 4 1 4 4 4 4 4 4 4 
+   15)  | 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 
+*/
+static const UChar it_length_table[256]
+   = { 0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4, 
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4,
+       0, 4, 3, 4, 2, 4, 4, 4, 1, 4, 4, 4, 4, 4, 4, 4,
+       0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
+     };
+
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+DisResult disInstr_ARM ( IRSB*        irsb_IN,
+                         Bool         (*resteerOkFn) ( void*, Addr ),
+                         Bool         resteerCisOk,
+                         void*        callback_opaque,
+                         const UChar* guest_code_IN,
+                         Long         delta_ENCODED,
+                         Addr         guest_IP_ENCODED,
+                         VexArch      guest_arch,
+                         const VexArchInfo* archinfo,
+                         const VexAbiInfo*  abiinfo,
+                         VexEndness   host_endness_IN,
+                         Bool         sigill_diag_IN )
+{
+   DisResult dres;
+   Bool isThumb = (Bool)(guest_IP_ENCODED & 1);
+
+   /* Set globals (see top of this file) */
+   vassert(guest_arch == VexArchARM);
+
+   irsb            = irsb_IN;
+   host_endness    = host_endness_IN;
+   __curr_is_Thumb = isThumb;
+
+   if (isThumb) {
+      guest_R15_curr_instr_notENC = (Addr32)guest_IP_ENCODED - 1;
+   } else {
+      guest_R15_curr_instr_notENC = (Addr32)guest_IP_ENCODED;
+   }
+
+   if (isThumb) {
+      dres = disInstr_THUMB_WRK ( resteerOkFn,
+                                  resteerCisOk, callback_opaque,
+                                  &guest_code_IN[delta_ENCODED - 1],
+                                  archinfo, abiinfo, sigill_diag_IN );
+   } else {
+      dres = disInstr_ARM_WRK ( resteerOkFn,
+                                resteerCisOk, callback_opaque,
+                                &guest_code_IN[delta_ENCODED],
+                                archinfo, abiinfo, sigill_diag_IN );
+   }
+
+   return dres;
+}
+
+/* Test program for the conversion of IRCmpF64Result values to VFP
+   nzcv values.  See handling of FCMPD et al above. */
+/*
+UInt foo ( UInt x )
+{
+   UInt ix    = ((x >> 5) & 3) | (x & 1);
+   UInt termL = (((((ix ^ 1) << 30) - 1) >> 29) + 1);
+   UInt termR = (ix & (ix >> 1) & 1);
+   return termL  -  termR;
+}
+
+void try ( char* s, UInt ir, UInt req )
+{
+   UInt act = foo(ir);
+   printf("%s 0x%02x -> req %d%d%d%d act %d%d%d%d (0x%x)\n",
+          s, ir, (req >> 3) & 1, (req >> 2) & 1, 
+                 (req >> 1) & 1, (req >> 0) & 1, 
+                 (act >> 3) & 1, (act >> 2) & 1, 
+                 (act >> 1) & 1, (act >> 0) & 1, act);
+
+}
+
+int main ( void )
+{
+   printf("\n");
+   try("UN", 0x45, 0b0011);
+   try("LT", 0x01, 0b1000);
+   try("GT", 0x00, 0b0010);
+   try("EQ", 0x40, 0b0110);
+   printf("\n");
+   return 0;
+}
+*/
+
+/* Spare code for doing reference implementations of various 64-bit
+   SIMD interleaves/deinterleaves/concatenation ops. */
+/*
+// Split a 64 bit value into 4 16 bit ones, in 32-bit IRTemps with
+// the top halves guaranteed to be zero.
+static void break64to16s ( IRTemp* out3, IRTemp* out2, IRTemp* out1,
+                           IRTemp* out0, IRTemp v64 )
+{
+  if (out3) *out3 = newTemp(Ity_I32);
+  if (out2) *out2 = newTemp(Ity_I32);
+  if (out1) *out1 = newTemp(Ity_I32);
+  if (out0) *out0 = newTemp(Ity_I32);
+  IRTemp hi32 = newTemp(Ity_I32);
+  IRTemp lo32 = newTemp(Ity_I32);
+  assign(hi32, unop(Iop_64HIto32, mkexpr(v64)) );
+  assign(lo32, unop(Iop_64to32, mkexpr(v64)) );
+  if (out3) assign(*out3, binop(Iop_Shr32, mkexpr(hi32), mkU8(16)));
+  if (out2) assign(*out2, binop(Iop_And32, mkexpr(hi32), mkU32(0xFFFF)));
+  if (out1) assign(*out1, binop(Iop_Shr32, mkexpr(lo32), mkU8(16)));
+  if (out0) assign(*out0, binop(Iop_And32, mkexpr(lo32), mkU32(0xFFFF)));
+}
+
+// Make a 64 bit value from 4 16 bit ones, each of which is in a 32 bit
+// IRTemp.
+static IRTemp mk64from16s ( IRTemp in3, IRTemp in2, IRTemp in1, IRTemp in0 )
+{
+  IRTemp hi32 = newTemp(Ity_I32);
+  IRTemp lo32 = newTemp(Ity_I32);
+  assign(hi32,
+         binop(Iop_Or32,
+               binop(Iop_Shl32, mkexpr(in3), mkU8(16)),
+               binop(Iop_And32, mkexpr(in2), mkU32(0xFFFF))));
+  assign(lo32,
+         binop(Iop_Or32,
+               binop(Iop_Shl32, mkexpr(in1), mkU8(16)),
+               binop(Iop_And32, mkexpr(in0), mkU32(0xFFFF))));
+  IRTemp res = newTemp(Ity_I64);
+  assign(res, binop(Iop_32HLto64, mkexpr(hi32), mkexpr(lo32)));
+  return res;
+}
+
+static IRExpr* mk_InterleaveLO16x4 ( IRTemp a3210, IRTemp b3210 )
+{
+  // returns a1 b1 a0 b0
+  IRTemp a1, a0, b1, b0;
+  break64to16s(NULL, NULL, &a1, &a0, a3210);
+  break64to16s(NULL, NULL, &b1, &b0, b3210);
+  return mkexpr(mk64from16s(a1, b1, a0, b0));
+}
+
+static IRExpr* mk_InterleaveHI16x4 ( IRTemp a3210, IRTemp b3210 )
+{
+  // returns a3 b3 a2 b2
+  IRTemp a3, a2, b3, b2;
+  break64to16s(&a3, &a2, NULL, NULL, a3210);
+  break64to16s(&b3, &b2, NULL, NULL, b3210);
+  return mkexpr(mk64from16s(a3, b3, a2, b2));
+}
+
+static IRExpr* mk_CatEvenLanes16x4 ( IRTemp a3210, IRTemp b3210 )
+{
+  // returns a2 a0 b2 b0
+  IRTemp a2, a0, b2, b0;
+  break64to16s(NULL, &a2, NULL, &a0, a3210);
+  break64to16s(NULL, &b2, NULL, &b0, b3210);
+  return mkexpr(mk64from16s(a2, a0, b2, b0));
+}
+
+static IRExpr* mk_CatOddLanes16x4 ( IRTemp a3210, IRTemp b3210 )
+{
+  // returns a3 a1 b3 b1
+  IRTemp a3, a1, b3, b1;
+  break64to16s(&a3, NULL, &a1, NULL, a3210);
+  break64to16s(&b3, NULL, &b1, NULL, b3210);
+  return mkexpr(mk64from16s(a3, a1, b3, b1));
+}
+
+static IRExpr* mk_InterleaveOddLanes16x4 ( IRTemp a3210, IRTemp b3210 )
+{
+  // returns a3 b3 a1 b1
+  IRTemp a3, b3, a1, b1;
+  break64to16s(&a3, NULL, &a1, NULL, a3210);
+  break64to16s(&b3, NULL, &b1, NULL, b3210);
+  return mkexpr(mk64from16s(a3, b3, a1, b1));
+}
+
+static IRExpr* mk_InterleaveEvenLanes16x4 ( IRTemp a3210, IRTemp b3210 )
+{
+  // returns a2 b2 a0 b0
+  IRTemp a2, b2, a0, b0;
+  break64to16s(NULL, &a2, NULL, &a0, a3210);
+  break64to16s(NULL, &b2, NULL, &b0, b3210);
+  return mkexpr(mk64from16s(a2, b2, a0, b0));
+}
+
+static void break64to8s ( IRTemp* out7, IRTemp* out6, IRTemp* out5,
+                          IRTemp* out4, IRTemp* out3, IRTemp* out2,
+                          IRTemp* out1,IRTemp* out0, IRTemp v64 )
+{
+  if (out7) *out7 = newTemp(Ity_I32);
+  if (out6) *out6 = newTemp(Ity_I32);
+  if (out5) *out5 = newTemp(Ity_I32);
+  if (out4) *out4 = newTemp(Ity_I32);
+  if (out3) *out3 = newTemp(Ity_I32);
+  if (out2) *out2 = newTemp(Ity_I32);
+  if (out1) *out1 = newTemp(Ity_I32);
+  if (out0) *out0 = newTemp(Ity_I32);
+  IRTemp hi32 = newTemp(Ity_I32);
+  IRTemp lo32 = newTemp(Ity_I32);
+  assign(hi32, unop(Iop_64HIto32, mkexpr(v64)) );
+  assign(lo32, unop(Iop_64to32, mkexpr(v64)) );
+  if (out7)
+    assign(*out7, binop(Iop_And32,
+                        binop(Iop_Shr32, mkexpr(hi32), mkU8(24)),
+                        mkU32(0xFF)));
+  if (out6)
+    assign(*out6, binop(Iop_And32,
+                        binop(Iop_Shr32, mkexpr(hi32), mkU8(16)),
+                        mkU32(0xFF)));
+  if (out5)
+    assign(*out5, binop(Iop_And32,
+                        binop(Iop_Shr32, mkexpr(hi32), mkU8(8)),
+                        mkU32(0xFF)));
+  if (out4)
+    assign(*out4, binop(Iop_And32, mkexpr(hi32), mkU32(0xFF)));
+  if (out3)
+    assign(*out3, binop(Iop_And32,
+                        binop(Iop_Shr32, mkexpr(lo32), mkU8(24)),
+                        mkU32(0xFF)));
+  if (out2)
+    assign(*out2, binop(Iop_And32,
+                        binop(Iop_Shr32, mkexpr(lo32), mkU8(16)),
+                        mkU32(0xFF)));
+  if (out1)
+    assign(*out1, binop(Iop_And32,
+                        binop(Iop_Shr32, mkexpr(lo32), mkU8(8)),
+                        mkU32(0xFF)));
+  if (out0)
+    assign(*out0, binop(Iop_And32, mkexpr(lo32), mkU32(0xFF)));
+}
+
+static IRTemp mk64from8s ( IRTemp in7, IRTemp in6, IRTemp in5, IRTemp in4,
+                           IRTemp in3, IRTemp in2, IRTemp in1, IRTemp in0 )
+{
+  IRTemp hi32 = newTemp(Ity_I32);
+  IRTemp lo32 = newTemp(Ity_I32);
+  assign(hi32,
+         binop(Iop_Or32,
+               binop(Iop_Or32,
+                     binop(Iop_Shl32,
+                           binop(Iop_And32, mkexpr(in7), mkU32(0xFF)),
+                           mkU8(24)),
+                     binop(Iop_Shl32,
+                           binop(Iop_And32, mkexpr(in6), mkU32(0xFF)),
+                           mkU8(16))),
+               binop(Iop_Or32,
+                     binop(Iop_Shl32,
+                           binop(Iop_And32, mkexpr(in5), mkU32(0xFF)), mkU8(8)),
+                     binop(Iop_And32,
+                           mkexpr(in4), mkU32(0xFF)))));
+  assign(lo32,
+         binop(Iop_Or32,
+               binop(Iop_Or32,
+                     binop(Iop_Shl32,
+                           binop(Iop_And32, mkexpr(in3), mkU32(0xFF)),
+                           mkU8(24)),
+                     binop(Iop_Shl32,
+                           binop(Iop_And32, mkexpr(in2), mkU32(0xFF)),
+                           mkU8(16))),
+               binop(Iop_Or32,
+                     binop(Iop_Shl32,
+                           binop(Iop_And32, mkexpr(in1), mkU32(0xFF)), mkU8(8)),
+                     binop(Iop_And32,
+                           mkexpr(in0), mkU32(0xFF)))));
+  IRTemp res = newTemp(Ity_I64);
+  assign(res, binop(Iop_32HLto64, mkexpr(hi32), mkexpr(lo32)));
+  return res;
+}
+
+static IRExpr* mk_InterleaveLO8x8 ( IRTemp a76543210, IRTemp b76543210 )
+{
+  // returns a3 b3 a2 b2 a1 b1 a0 b0
+  IRTemp a3, b3, a2, b2, a1, a0, b1, b0;
+  break64to8s(NULL, NULL, NULL, NULL, &a3, &a2, &a1, &a0, a76543210);
+  break64to8s(NULL, NULL, NULL, NULL, &b3, &b2, &b1, &b0, b76543210);
+  return mkexpr(mk64from8s(a3, b3, a2, b2, a1, b1, a0, b0));
+}
+
+static IRExpr* mk_InterleaveHI8x8 ( IRTemp a76543210, IRTemp b76543210 )
+{
+  // returns a7 b7 a6 b6 a5 b5 a4 b4
+  IRTemp a7, b7, a6, b6, a5, b5, a4, b4;
+  break64to8s(&a7, &a6, &a5, &a4, NULL, NULL, NULL, NULL, a76543210);
+  break64to8s(&b7, &b6, &b5, &b4, NULL, NULL, NULL, NULL, b76543210);
+  return mkexpr(mk64from8s(a7, b7, a6, b6, a5, b5, a4, b4));
+}
+
+static IRExpr* mk_CatEvenLanes8x8 ( IRTemp a76543210, IRTemp b76543210 )
+{
+  // returns a6 a4 a2 a0 b6 b4 b2 b0
+  IRTemp a6, a4, a2, a0, b6, b4, b2, b0;
+  break64to8s(NULL, &a6, NULL, &a4, NULL, &a2, NULL, &a0, a76543210);
+  break64to8s(NULL, &b6, NULL, &b4, NULL, &b2, NULL, &b0, b76543210);
+  return mkexpr(mk64from8s(a6, a4, a2, a0, b6, b4, b2, b0));
+}
+
+static IRExpr* mk_CatOddLanes8x8 ( IRTemp a76543210, IRTemp b76543210 )
+{
+  // returns a7 a5 a3 a1 b7 b5 b3 b1
+  IRTemp a7, a5, a3, a1, b7, b5, b3, b1;
+  break64to8s(&a7, NULL, &a5, NULL, &a3, NULL, &a1, NULL, a76543210);
+  break64to8s(&b7, NULL, &b5, NULL, &b3, NULL, &b1, NULL, b76543210);
+  return mkexpr(mk64from8s(a7, a5, a3, a1, b7, b5, b3, b1));
+}
+
+static IRExpr* mk_InterleaveEvenLanes8x8 ( IRTemp a76543210, IRTemp b76543210 )
+{
+  // returns a6 b6 a4 b4 a2 b2 a0 b0
+  IRTemp a6, b6, a4, b4, a2, b2, a0, b0;
+  break64to8s(NULL, &a6, NULL, &a4, NULL, &a2, NULL, &a0, a76543210);
+  break64to8s(NULL, &b6, NULL, &b4, NULL, &b2, NULL, &b0, b76543210);
+  return mkexpr(mk64from8s(a6, b6, a4, b4, a2, b2, a0, b0));
+}
+
+static IRExpr* mk_InterleaveOddLanes8x8 ( IRTemp a76543210, IRTemp b76543210 )
+{
+  // returns a7 b7 a5 b5 a3 b3 a1 b1
+  IRTemp a7, b7, a5, b5, a3, b3, a1, b1;
+  break64to8s(&a7, NULL, &a5, NULL, &a3, NULL, &a1, NULL, a76543210);
+  break64to8s(&b7, NULL, &b5, NULL, &b3, NULL, &b1, NULL, b76543210);
+  return mkexpr(mk64from8s(a7, b7, a5, b5, a3, b3, a1, b1));
+}
+
+static IRExpr* mk_InterleaveLO32x2 ( IRTemp a10, IRTemp b10 )
+{
+  // returns a0 b0
+  return binop(Iop_32HLto64, unop(Iop_64to32, mkexpr(a10)),
+                             unop(Iop_64to32, mkexpr(b10)));
+}
+
+static IRExpr* mk_InterleaveHI32x2 ( IRTemp a10, IRTemp b10 )
+{
+  // returns a1 b1
+  return binop(Iop_32HLto64, unop(Iop_64HIto32, mkexpr(a10)),
+                             unop(Iop_64HIto32, mkexpr(b10)));
+}
+*/
+
+/*--------------------------------------------------------------------*/
+/*--- end                                         guest_arm_toIR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_generic_bb_to_IR.c b/VEX/priv/guest_generic_bb_to_IR.c
new file mode 100644
index 0000000..ca3682e
--- /dev/null
+++ b/VEX/priv/guest_generic_bb_to_IR.c
@@ -0,0 +1,1294 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                               guest_generic_bb_to_IR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+
+
+/* Forwards .. */
+VEX_REGPARM(2)
+static UInt genericg_compute_checksum_4al ( HWord first_w32, HWord n_w32s );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_1 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_2 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_3 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_4 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_5 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_6 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_7 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_8 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_9 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_10 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_11 ( HWord first_w32 );
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_12 ( HWord first_w32 );
+
+VEX_REGPARM(2)
+static ULong genericg_compute_checksum_8al ( HWord first_w64, HWord n_w64s );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_1 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_2 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_3 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_4 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_5 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_6 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_7 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_8 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_9 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_10 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_11 ( HWord first_w64 );
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_12 ( HWord first_w64 );
+
+/* Small helpers */
+static Bool const_False ( void* callback_opaque, Addr a ) { 
+   return False; 
+}
+
+/* Disassemble a complete basic block, starting at guest_IP_start, 
+   returning a new IRSB.  The disassembler may chase across basic
+   block boundaries if it wishes and if chase_into_ok allows it.
+   The precise guest address ranges from which code has been taken
+   are written into vge.  guest_IP_bbstart is taken to be the IP in
+   the guest's address space corresponding to the instruction at
+   &guest_code[0].  
+
+   dis_instr_fn is the arch-specific fn to disassemble on function; it
+   is this that does the real work.
+
+   needs_self_check is a callback used to ask the caller which of the
+   extents, if any, a self check is required for.  The returned value
+   is a bitmask with a 1 in position i indicating that the i'th extent
+   needs a check.  Since there can be at most 3 extents, the returned
+   values must be between 0 and 7.
+
+   The number of extents which did get a self check (0 to 3) is put in
+   n_sc_extents.  The caller already knows this because it told us
+   which extents to add checks for, via the needs_self_check callback,
+   but we ship the number back out here for the caller's convenience.
+
+   preamble_function is a callback which allows the caller to add
+   its own IR preamble (following the self-check, if any).  May be
+   NULL.  If non-NULL, the IRSB under construction is handed to 
+   this function, which presumably adds IR statements to it.  The
+   callback may optionally complete the block and direct bb_to_IR
+   not to disassemble any instructions into it; this is indicated
+   by the callback returning True.
+
+   offB_CMADDR and offB_CMLEN are the offsets of guest_CMADDR and
+   guest_CMLEN.  Since this routine has to work for any guest state,
+   without knowing what it is, those offsets have to passed in.
+
+   callback_opaque is a caller-supplied pointer to data which the
+   callbacks may want to see.  Vex has no idea what it is.
+   (In fact it's a VgInstrumentClosure.)
+*/
+
+/* Regarding IP updating.  dis_instr_fn (that does the guest specific
+   work of disassembling an individual instruction) must finish the
+   resulting IR with "PUT(guest_IP) = ".  Hence in all cases it must
+   state the next instruction address.
+
+   If the block is to be ended at that point, then this routine
+   (bb_to_IR) will set up the next/jumpkind/offsIP fields so as to
+   make a transfer (of the right kind) to "GET(guest_IP)".  Hence if
+   dis_instr_fn generates incorrect IP updates we will see it
+   immediately (due to jumping to the wrong next guest address).
+
+   However it is also necessary to set this up so it can be optimised
+   nicely.  The IRSB exit is defined to update the guest IP, so that
+   chaining works -- since the chain_me stubs expect the chain-to
+   address to be in the guest state.  Hence what the IRSB next fields
+   will contain initially is (implicitly)
+
+   PUT(guest_IP) [implicitly] = GET(guest_IP) [explicit expr on ::next]
+
+   which looks pretty strange at first.  Eg so unconditional branch
+   to some address 0x123456 looks like this:
+
+   PUT(guest_IP) = 0x123456;  // dis_instr_fn generates this
+   // the exit
+   PUT(guest_IP) [implicitly] = GET(guest_IP); exit-Boring
+
+   after redundant-GET and -PUT removal by iropt, we get what we want:
+
+   // the exit
+   PUT(guest_IP) [implicitly] = 0x123456; exit-Boring
+
+   This makes the IRSB-end case the same as the side-exit case: update
+   IP, then transfer.  There is no redundancy of representation for
+   the destination, and we use the destination specified by
+   dis_instr_fn, so any errors it makes show up sooner.
+*/
+
+IRSB* bb_to_IR ( 
+         /*OUT*/VexGuestExtents* vge,
+         /*OUT*/UInt*            n_sc_extents,
+         /*OUT*/UInt*            n_guest_instrs, /* stats only */
+         /*MOD*/VexRegisterUpdates* pxControl,
+         /*IN*/ void*            callback_opaque,
+         /*IN*/ DisOneInstrFn    dis_instr_fn,
+         /*IN*/ const UChar*     guest_code,
+         /*IN*/ Addr             guest_IP_bbstart,
+         /*IN*/ Bool             (*chase_into_ok)(void*,Addr),
+         /*IN*/ VexEndness       host_endness,
+         /*IN*/ Bool             sigill_diag,
+         /*IN*/ VexArch          arch_guest,
+         /*IN*/ const VexArchInfo* archinfo_guest,
+         /*IN*/ const VexAbiInfo*  abiinfo_both,
+         /*IN*/ IRType           guest_word_type,
+         /*IN*/ UInt             (*needs_self_check)
+                                    (void*, /*MB_MOD*/VexRegisterUpdates*,
+                                            const VexGuestExtents*),
+         /*IN*/ Bool             (*preamble_function)(void*,IRSB*),
+         /*IN*/ Int              offB_GUEST_CMSTART,
+         /*IN*/ Int              offB_GUEST_CMLEN,
+         /*IN*/ Int              offB_GUEST_IP,
+         /*IN*/ Int              szB_GUEST_IP
+      )
+{
+   Long       delta;
+   Int        i, n_instrs, first_stmt_idx;
+   Bool       resteerOK, debug_print;
+   DisResult  dres;
+   IRStmt*    imark;
+   IRStmt*    nop;
+   static Int n_resteers = 0;
+   Int        d_resteers = 0;
+   Int        selfcheck_idx = 0;
+   IRSB*      irsb;
+   Addr       guest_IP_curr_instr;
+   IRConst*   guest_IP_bbstart_IRConst = NULL;
+   Int        n_cond_resteers_allowed = 2;
+
+   Bool (*resteerOKfn)(void*,Addr) = NULL;
+
+   debug_print = toBool(vex_traceflags & VEX_TRACE_FE);
+
+   /* check sanity .. */
+   vassert(sizeof(HWord) == sizeof(void*));
+   vassert(vex_control.guest_max_insns >= 1);
+   vassert(vex_control.guest_max_insns <= 100);
+   vassert(vex_control.guest_chase_thresh >= 0);
+   vassert(vex_control.guest_chase_thresh < vex_control.guest_max_insns);
+   vassert(guest_word_type == Ity_I32 || guest_word_type == Ity_I64);
+
+   if (guest_word_type == Ity_I32) {
+      vassert(szB_GUEST_IP == 4);
+      vassert((offB_GUEST_IP % 4) == 0);
+   } else {
+      vassert(szB_GUEST_IP == 8);
+      vassert((offB_GUEST_IP % 8) == 0);
+   }
+
+   /* Start a new, empty extent. */
+   vge->n_used  = 1;
+   vge->base[0] = guest_IP_bbstart;
+   vge->len[0]  = 0;
+   *n_sc_extents = 0;
+
+   /* And a new IR superblock to dump the result into. */
+   irsb = emptyIRSB();
+
+   /* Delta keeps track of how far along the guest_code array we have
+      so far gone. */
+   delta    = 0;
+   n_instrs = 0;
+   *n_guest_instrs = 0;
+
+   /* Guest addresses as IRConsts.  Used in self-checks to specify the
+      restart-after-discard point. */
+   guest_IP_bbstart_IRConst
+      = guest_word_type==Ity_I32 
+           ? IRConst_U32(toUInt(guest_IP_bbstart))
+           : IRConst_U64(guest_IP_bbstart);
+
+   /* Leave 15 spaces in which to put the check statements for a self
+      checking translation (up to 3 extents, and 5 stmts required for
+      each).  We won't know until later the extents and checksums of
+      the areas, if any, that need to be checked. */
+   nop = IRStmt_NoOp();
+   selfcheck_idx = irsb->stmts_used;
+   for (i = 0; i < 3 * 5; i++)
+      addStmtToIRSB( irsb, nop );
+
+   /* If the caller supplied a function to add its own preamble, use
+      it now. */
+   if (preamble_function) {
+      Bool stopNow = preamble_function( callback_opaque, irsb );
+      if (stopNow) {
+         /* The callback has completed the IR block without any guest
+            insns being disassembled into it, so just return it at
+            this point, even if a self-check was requested - as there
+            is nothing to self-check.  The 15 self-check no-ops will
+            still be in place, but they are harmless. */
+         return irsb;
+      }
+   }
+
+   /* Process instructions. */
+   while (True) {
+      vassert(n_instrs < vex_control.guest_max_insns);
+
+      /* Regardless of what chase_into_ok says, is chasing permissible
+         at all right now?  Set resteerOKfn accordingly. */
+      resteerOK 
+         = toBool(
+              n_instrs < vex_control.guest_chase_thresh
+              /* we can't afford to have a resteer once we're on the
+                 last extent slot. */
+              && vge->n_used < 3
+           );
+
+      resteerOKfn
+         = resteerOK ? chase_into_ok : const_False;
+
+      /* n_cond_resteers_allowed keeps track of whether we're still
+         allowing dis_instr_fn to chase conditional branches.  It
+         starts (at 2) and gets decremented each time dis_instr_fn
+         tells us it has chased a conditional branch.  We then
+         decrement it, and use it to tell later calls to dis_instr_fn
+         whether or not it is allowed to chase conditional
+         branches. */
+      vassert(n_cond_resteers_allowed >= 0 && n_cond_resteers_allowed <= 2);
+
+      /* This is the IP of the instruction we're just about to deal
+         with. */
+      guest_IP_curr_instr = guest_IP_bbstart + delta;
+
+      /* This is the irsb statement array index of the first stmt in
+         this insn.  That will always be the instruction-mark
+         descriptor. */
+      first_stmt_idx = irsb->stmts_used;
+
+      /* Add an instruction-mark statement.  We won't know until after
+         disassembling the instruction how long it instruction is, so
+         just put in a zero length and we'll fix it up later.
+
+         On ARM, the least significant bit of the instr address
+         distinguishes ARM vs Thumb instructions.  All instructions
+         actually start on at least 2-aligned addresses.  So we need
+         to ignore the bottom bit of the insn address when forming the
+         IMark's address field, but put that bottom bit in the delta
+         field, so that comparisons against guest_R15T for Thumb can
+         be done correctly.  By inspecting the delta field,
+         instruction processors can determine whether the instruction
+         was originally Thumb or ARM.  For more details of this
+         convention, see comments on definition of guest_R15T in
+         libvex_guest_arm.h. */
+      if (arch_guest == VexArchARM && (guest_IP_curr_instr & 1)) {
+         /* Thumb insn => mask out the T bit, but put it in delta */
+         addStmtToIRSB( irsb,
+                        IRStmt_IMark(guest_IP_curr_instr & ~(Addr)1,
+                                     0, /* len */
+                                     1  /* delta */
+                        )
+         );
+      } else {
+         /* All other targets: store IP as-is, and set delta to zero. */
+         addStmtToIRSB( irsb,
+                        IRStmt_IMark(guest_IP_curr_instr,
+                                     0, /* len */
+                                     0  /* delta */
+                        )
+         );
+      }
+
+      if (debug_print && n_instrs > 0)
+         vex_printf("\n");
+
+      /* Finally, actually disassemble an instruction. */
+      vassert(irsb->next == NULL);
+      dres = dis_instr_fn ( irsb,
+                            resteerOKfn,
+                            toBool(n_cond_resteers_allowed > 0),
+                            callback_opaque,
+                            guest_code,
+                            delta,
+                            guest_IP_curr_instr,
+                            arch_guest,
+                            archinfo_guest,
+                            abiinfo_both,
+                            host_endness,
+                            sigill_diag );
+
+      /* stay sane ... */
+      vassert(dres.whatNext == Dis_StopHere
+              || dres.whatNext == Dis_Continue
+              || dres.whatNext == Dis_ResteerU
+              || dres.whatNext == Dis_ResteerC);
+      /* ... disassembled insn length is sane ... */
+      vassert(dres.len >= 0 && dres.len <= 24);
+      /* ... continueAt is zero if no resteer requested ... */
+      if (dres.whatNext != Dis_ResteerU && dres.whatNext != Dis_ResteerC)
+         vassert(dres.continueAt == 0);
+      /* ... if we disallowed conditional resteers, check that one
+             didn't actually happen anyway ... */
+      if (n_cond_resteers_allowed == 0)
+         vassert(dres.whatNext != Dis_ResteerC);
+
+      /* Fill in the insn-mark length field. */
+      vassert(first_stmt_idx >= 0 && first_stmt_idx < irsb->stmts_used);
+      imark = irsb->stmts[first_stmt_idx];
+      vassert(imark);
+      vassert(imark->tag == Ist_IMark);
+      vassert(imark->Ist.IMark.len == 0);
+      imark->Ist.IMark.len = dres.len;
+
+      /* Print the resulting IR, if needed. */
+      if (vex_traceflags & VEX_TRACE_FE) {
+         for (i = first_stmt_idx; i < irsb->stmts_used; i++) {
+            vex_printf("              ");
+            ppIRStmt(irsb->stmts[i]);
+            vex_printf("\n");
+         }
+      }
+
+      /* Individual insn disassembly may not mess with irsb->next.
+         This function is the only place where it can be set. */
+      vassert(irsb->next == NULL);
+      vassert(irsb->jumpkind == Ijk_Boring);
+      vassert(irsb->offsIP == 0);
+
+      /* Individual insn disassembly must finish the IR for each
+         instruction with an assignment to the guest PC. */
+      vassert(first_stmt_idx < irsb->stmts_used);
+      /* it follows that irsb->stmts_used must be > 0 */
+      { IRStmt* st = irsb->stmts[irsb->stmts_used-1];
+        vassert(st);
+        vassert(st->tag == Ist_Put);
+        vassert(st->Ist.Put.offset == offB_GUEST_IP);
+        /* Really we should also check that the type of the Put'd data
+           == guest_word_type, but that's a bit expensive. */
+      }
+
+      /* Update the VexGuestExtents we are constructing. */
+      /* If vex_control.guest_max_insns is required to be < 100 and
+         each insn is at max 20 bytes long, this limit of 5000 then
+         seems reasonable since the max possible extent length will be
+         100 * 20 == 2000. */
+      vassert(vge->len[vge->n_used-1] < 5000);
+      vge->len[vge->n_used-1] 
+         = toUShort(toUInt( vge->len[vge->n_used-1] + dres.len ));
+      n_instrs++;
+
+      /* Advance delta (inconspicuous but very important :-) */
+      delta += (Long)dres.len;
+
+      switch (dres.whatNext) {
+         case Dis_Continue:
+            vassert(dres.continueAt == 0);
+            vassert(dres.jk_StopHere == Ijk_INVALID);
+            if (n_instrs < vex_control.guest_max_insns) {
+               /* keep going */
+            } else {
+               /* We have to stop.  See comment above re irsb field
+                  settings here. */
+               irsb->next = IRExpr_Get(offB_GUEST_IP, guest_word_type);
+               /* irsb->jumpkind must already by Ijk_Boring */
+               irsb->offsIP = offB_GUEST_IP;
+               goto done;
+            }
+            break;
+         case Dis_StopHere:
+            vassert(dres.continueAt == 0);
+            vassert(dres.jk_StopHere != Ijk_INVALID);
+            /* See comment above re irsb field settings here. */
+            irsb->next = IRExpr_Get(offB_GUEST_IP, guest_word_type);
+            irsb->jumpkind = dres.jk_StopHere;
+            irsb->offsIP = offB_GUEST_IP;
+            goto done;
+
+         case Dis_ResteerU:
+         case Dis_ResteerC:
+            /* Check that we actually allowed a resteer .. */
+            vassert(resteerOK);
+            if (dres.whatNext == Dis_ResteerC) {
+               vassert(n_cond_resteers_allowed > 0);
+               n_cond_resteers_allowed--;
+            }
+            /* figure out a new delta to continue at. */
+            vassert(resteerOKfn(callback_opaque,dres.continueAt));
+            delta = dres.continueAt - guest_IP_bbstart;
+            /* we now have to start a new extent slot. */
+            vge->n_used++;
+            vassert(vge->n_used <= 3);
+            vge->base[vge->n_used-1] = dres.continueAt;
+            vge->len[vge->n_used-1] = 0;
+            n_resteers++;
+            d_resteers++;
+            if (0 && (n_resteers & 0xFF) == 0)
+            vex_printf("resteer[%d,%d] to 0x%lx (delta = %lld)\n",
+                       n_resteers, d_resteers,
+                       dres.continueAt, delta);
+            break;
+         default:
+            vpanic("bb_to_IR");
+      }
+   }
+   /*NOTREACHED*/
+   vassert(0);
+
+  done:
+   /* We're done.  The only thing that might need attending to is that
+      a self-checking preamble may need to be created.  If so it gets
+      placed in the 15 slots reserved above.
+
+      The scheme is to compute a rather crude checksum of the code
+      we're making a translation of, and add to the IR a call to a
+      helper routine which recomputes the checksum every time the
+      translation is run, and requests a retranslation if it doesn't
+      match.  This is obviously very expensive and considerable
+      efforts are made to speed it up:
+
+      * the checksum is computed from all the naturally aligned
+        host-sized words that overlap the translated code.  That means
+        it could depend on up to 7 bytes before and 7 bytes after
+        which aren't part of the translated area, and so if those
+        change then we'll unnecessarily have to discard and
+        retranslate.  This seems like a pretty remote possibility and
+        it seems as if the benefit of not having to deal with the ends
+        of the range at byte precision far outweigh any possible extra
+        translations needed.
+
+      * there's a generic routine and 12 specialised cases, which
+        handle the cases of 1 through 12-word lengths respectively.
+        They seem to cover about 90% of the cases that occur in
+        practice.
+
+      We ask the caller, via needs_self_check, which of the 3 vge
+      extents needs a check, and only generate check code for those
+      that do.
+   */
+   {
+      Addr     base2check;
+      UInt     len2check;
+      HWord    expectedhW;
+      IRTemp   tistart_tmp, tilen_tmp;
+      HWord    VEX_REGPARM(2) (*fn_generic)(HWord, HWord);
+      HWord    VEX_REGPARM(1) (*fn_spec)(HWord);
+      const HChar* nm_generic;
+      const HChar* nm_spec;
+      HWord    fn_generic_entry = 0;
+      HWord    fn_spec_entry = 0;
+      UInt     host_word_szB = sizeof(HWord);
+      IRType   host_word_type = Ity_INVALID;
+
+      UInt extents_needing_check
+         = needs_self_check(callback_opaque, pxControl, vge);
+
+      if (host_word_szB == 4) host_word_type = Ity_I32;
+      if (host_word_szB == 8) host_word_type = Ity_I64;
+      vassert(host_word_type != Ity_INVALID);
+
+      vassert(vge->n_used >= 1 && vge->n_used <= 3);
+
+      /* Caller shouldn't claim that nonexistent extents need a
+         check. */
+      vassert((extents_needing_check >> vge->n_used) == 0);
+
+      for (i = 0; i < vge->n_used; i++) {
+
+         /* Do we need to generate a check for this extent? */
+         if ((extents_needing_check & (1 << i)) == 0)
+            continue;
+
+         /* Tell the caller */
+         (*n_sc_extents)++;
+
+         /* the extent we're generating a check for */
+         base2check = vge->base[i];
+         len2check  = vge->len[i];
+
+         /* stay sane */
+         vassert(len2check >= 0 && len2check < 1000/*arbitrary*/);
+
+         /* Skip the check if the translation involved zero bytes */
+         if (len2check == 0)
+            continue;
+
+         HWord first_hW = ((HWord)base2check)
+                          & ~(HWord)(host_word_szB-1);
+         HWord last_hW  = (((HWord)base2check) + len2check - 1)
+                          & ~(HWord)(host_word_szB-1);
+         vassert(first_hW <= last_hW);
+         HWord hW_diff = last_hW - first_hW;
+         vassert(0 == (hW_diff & (host_word_szB-1)));
+         HWord hWs_to_check = (hW_diff + host_word_szB) / host_word_szB;
+         vassert(hWs_to_check > 0
+                 && hWs_to_check < 1004/*arbitrary*/ / host_word_szB);
+
+         /* vex_printf("%lx %lx  %ld\n", first_hW, last_hW, hWs_to_check); */
+
+         if (host_word_szB == 8) {
+            fn_generic =  (VEX_REGPARM(2) HWord(*)(HWord, HWord))
+                          genericg_compute_checksum_8al;
+            nm_generic = "genericg_compute_checksum_8al";
+         } else {
+            fn_generic =  (VEX_REGPARM(2) HWord(*)(HWord, HWord))
+                          genericg_compute_checksum_4al;
+            nm_generic = "genericg_compute_checksum_4al";
+         }
+
+         fn_spec = NULL;
+         nm_spec = NULL;
+
+         if (host_word_szB == 8) {
+            const HChar* nm = NULL;
+            ULong  VEX_REGPARM(1) (*fn)(HWord)  = NULL;
+            switch (hWs_to_check) {
+               case 1:  fn =  genericg_compute_checksum_8al_1;
+                        nm = "genericg_compute_checksum_8al_1"; break;
+               case 2:  fn =  genericg_compute_checksum_8al_2;
+                        nm = "genericg_compute_checksum_8al_2"; break;
+               case 3:  fn =  genericg_compute_checksum_8al_3;
+                        nm = "genericg_compute_checksum_8al_3"; break;
+               case 4:  fn =  genericg_compute_checksum_8al_4;
+                        nm = "genericg_compute_checksum_8al_4"; break;
+               case 5:  fn =  genericg_compute_checksum_8al_5;
+                        nm = "genericg_compute_checksum_8al_5"; break;
+               case 6:  fn =  genericg_compute_checksum_8al_6;
+                        nm = "genericg_compute_checksum_8al_6"; break;
+               case 7:  fn =  genericg_compute_checksum_8al_7;
+                        nm = "genericg_compute_checksum_8al_7"; break;
+               case 8:  fn =  genericg_compute_checksum_8al_8;
+                        nm = "genericg_compute_checksum_8al_8"; break;
+               case 9:  fn =  genericg_compute_checksum_8al_9;
+                        nm = "genericg_compute_checksum_8al_9"; break;
+               case 10: fn =  genericg_compute_checksum_8al_10;
+                        nm = "genericg_compute_checksum_8al_10"; break;
+               case 11: fn =  genericg_compute_checksum_8al_11;
+                        nm = "genericg_compute_checksum_8al_11"; break;
+               case 12: fn =  genericg_compute_checksum_8al_12;
+                        nm = "genericg_compute_checksum_8al_12"; break;
+               default: break;
+            }
+            fn_spec = (VEX_REGPARM(1) HWord(*)(HWord)) fn;
+            nm_spec = nm;
+         } else {
+            const HChar* nm = NULL;
+            UInt   VEX_REGPARM(1) (*fn)(HWord) = NULL;
+            switch (hWs_to_check) {
+               case 1:  fn =  genericg_compute_checksum_4al_1;
+                        nm = "genericg_compute_checksum_4al_1"; break;
+               case 2:  fn =  genericg_compute_checksum_4al_2;
+                        nm = "genericg_compute_checksum_4al_2"; break;
+               case 3:  fn =  genericg_compute_checksum_4al_3;
+                        nm = "genericg_compute_checksum_4al_3"; break;
+               case 4:  fn =  genericg_compute_checksum_4al_4;
+                        nm = "genericg_compute_checksum_4al_4"; break;
+               case 5:  fn =  genericg_compute_checksum_4al_5;
+                        nm = "genericg_compute_checksum_4al_5"; break;
+               case 6:  fn =  genericg_compute_checksum_4al_6;
+                        nm = "genericg_compute_checksum_4al_6"; break;
+               case 7:  fn =  genericg_compute_checksum_4al_7;
+                        nm = "genericg_compute_checksum_4al_7"; break;
+               case 8:  fn =  genericg_compute_checksum_4al_8;
+                        nm = "genericg_compute_checksum_4al_8"; break;
+               case 9:  fn =  genericg_compute_checksum_4al_9;
+                        nm = "genericg_compute_checksum_4al_9"; break;
+               case 10: fn =  genericg_compute_checksum_4al_10;
+                        nm = "genericg_compute_checksum_4al_10"; break;
+               case 11: fn =  genericg_compute_checksum_4al_11;
+                        nm = "genericg_compute_checksum_4al_11"; break;
+               case 12: fn =  genericg_compute_checksum_4al_12;
+                        nm = "genericg_compute_checksum_4al_12"; break;
+               default: break;
+            }
+            fn_spec = (VEX_REGPARM(1) HWord(*)(HWord))fn;
+            nm_spec = nm;
+         }
+
+         expectedhW = fn_generic( first_hW, hWs_to_check );
+         /* If we got a specialised version, check it produces the same
+            result as the generic version! */
+         if (fn_spec) {
+            vassert(nm_spec);
+            vassert(expectedhW == fn_spec( first_hW ));
+         } else {
+            vassert(!nm_spec);
+         }
+
+         /* Set CMSTART and CMLEN.  These will describe to the despatcher
+            the area of guest code to invalidate should we exit with a
+            self-check failure. */
+
+         tistart_tmp = newIRTemp(irsb->tyenv, guest_word_type);
+         tilen_tmp   = newIRTemp(irsb->tyenv, guest_word_type);
+
+         IRConst* base2check_IRConst
+            = guest_word_type==Ity_I32 ? IRConst_U32(toUInt(base2check))
+                                       : IRConst_U64(base2check);
+         IRConst* len2check_IRConst
+            = guest_word_type==Ity_I32 ? IRConst_U32(len2check)
+                                       : IRConst_U64(len2check);
+
+         irsb->stmts[selfcheck_idx + i * 5 + 0]
+            = IRStmt_WrTmp(tistart_tmp, IRExpr_Const(base2check_IRConst) );
+
+         irsb->stmts[selfcheck_idx + i * 5 + 1]
+            = IRStmt_WrTmp(tilen_tmp, IRExpr_Const(len2check_IRConst) );
+
+         irsb->stmts[selfcheck_idx + i * 5 + 2]
+            = IRStmt_Put( offB_GUEST_CMSTART, IRExpr_RdTmp(tistart_tmp) );
+
+         irsb->stmts[selfcheck_idx + i * 5 + 3]
+            = IRStmt_Put( offB_GUEST_CMLEN, IRExpr_RdTmp(tilen_tmp) );
+
+         /* Generate the entry point descriptors */
+         if (abiinfo_both->host_ppc_calls_use_fndescrs) {
+            HWord* descr = (HWord*)fn_generic;
+            fn_generic_entry = descr[0];
+            if (fn_spec) {
+               descr = (HWord*)fn_spec;
+               fn_spec_entry = descr[0];
+            } else {
+               fn_spec_entry = (HWord)NULL;
+            }
+         } else {
+            fn_generic_entry = (HWord)fn_generic;
+            if (fn_spec) {
+               fn_spec_entry = (HWord)fn_spec;
+            } else {
+               fn_spec_entry = (HWord)NULL;
+            }
+         }
+
+         IRExpr* callexpr = NULL;
+         if (fn_spec) {
+            callexpr = mkIRExprCCall( 
+                          host_word_type, 1/*regparms*/, 
+                          nm_spec, (void*)fn_spec_entry,
+                          mkIRExprVec_1(
+                             mkIRExpr_HWord( (HWord)first_hW )
+                          )
+                       );
+         } else {
+            callexpr = mkIRExprCCall( 
+                          host_word_type, 2/*regparms*/, 
+                          nm_generic, (void*)fn_generic_entry,
+                          mkIRExprVec_2(
+                             mkIRExpr_HWord( (HWord)first_hW ),
+                             mkIRExpr_HWord( (HWord)hWs_to_check )
+                          )
+                       );
+         }
+
+         irsb->stmts[selfcheck_idx + i * 5 + 4]
+            = IRStmt_Exit( 
+                 IRExpr_Binop( 
+                    host_word_type==Ity_I64 ? Iop_CmpNE64 : Iop_CmpNE32,
+                    callexpr,
+                       host_word_type==Ity_I64
+                          ? IRExpr_Const(IRConst_U64(expectedhW))
+                          : IRExpr_Const(IRConst_U32(expectedhW))
+                 ),
+                 Ijk_InvalICache,
+                 /* Where we must restart if there's a failure: at the
+                    first extent, regardless of which extent the
+                    failure actually happened in. */
+                 guest_IP_bbstart_IRConst,
+                 offB_GUEST_IP
+              );
+      } /* for (i = 0; i < vge->n_used; i++) */
+   }
+
+   /* irsb->next must now be set, since we've finished the block.
+      Print it if necessary.*/
+   vassert(irsb->next != NULL);
+   if (debug_print) {
+      vex_printf("              ");
+      vex_printf( "PUT(%d) = ", irsb->offsIP);
+      ppIRExpr( irsb->next );
+      vex_printf( "; exit-");
+      ppIRJumpKind(irsb->jumpkind);
+      vex_printf( "\n");
+      vex_printf( "\n");
+   }
+
+   *n_guest_instrs = n_instrs;
+   return irsb;
+}
+
+
+/*-------------------------------------------------------------
+  A support routine for doing self-checking translations. 
+  -------------------------------------------------------------*/
+
+/* CLEAN HELPER */
+/* CALLED FROM GENERATED CODE */
+
+/* Compute a checksum of host memory at [addr .. addr+len-1], as fast
+   as possible.  All _4al versions assume that the supplied address is
+   4 aligned.  All length values are in 4-byte chunks.  These fns
+   arecalled once for every use of a self-checking translation, so
+   they needs to be as fast as possible. */
+
+/* --- 32-bit versions, used only on 32-bit hosts --- */
+
+static inline UInt ROL32 ( UInt w, Int n ) {
+   w = (w << n) | (w >> (32-n));
+   return w;
+}
+
+VEX_REGPARM(2)
+static UInt genericg_compute_checksum_4al ( HWord first_w32, HWord n_w32s )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   /* unrolled */
+   while (n_w32s >= 4) {
+      UInt  w;
+      w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      p += 4;
+      n_w32s -= 4;
+      sum1 ^= sum2;
+   }
+   while (n_w32s >= 1) {
+      UInt  w;
+      w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      p += 1;
+      n_w32s -= 1;
+      sum1 ^= sum2;
+   }
+   return sum1 + sum2;
+}
+
+/* Specialised versions of the above function */
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_1 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_2 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_3 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_4 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_5 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_6 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[5];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_7 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[5];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[6];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_8 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[5];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[6];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[7];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_9 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[5];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[6];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[7];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_10 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[5];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[6];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[7];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[9];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_11 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[5];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[6];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[7];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[9];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[10]; sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static UInt genericg_compute_checksum_4al_12 ( HWord first_w32 )
+{
+   UInt  sum1 = 0, sum2 = 0;
+   UInt* p = (UInt*)first_w32;
+   UInt  w;
+   w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[5];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[6];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[7];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[9];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[10]; sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   w = p[11]; sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+
+/* --- 64-bit versions, used only on 64-bit hosts --- */
+
+static inline ULong ROL64 ( ULong w, Int n ) {
+   w = (w << n) | (w >> (64-n));
+   return w;
+}
+
+VEX_REGPARM(2)
+static ULong genericg_compute_checksum_8al ( HWord first_w64, HWord n_w64s )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   /* unrolled */
+   while (n_w64s >= 4) {
+      ULong  w;
+      w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+      w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+      w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+      w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+      p += 4;
+      n_w64s -= 4;
+      sum1 ^= sum2;
+   }
+   while (n_w64s >= 1) {
+      ULong  w;
+      w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+      p += 1;
+      n_w64s -= 1;
+      sum1 ^= sum2;
+   }
+   return sum1 + sum2;
+}
+
+/* Specialised versions of the above function */
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_1 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_2 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_3 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_4 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_5 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_6 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[5];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_7 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[5];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[6];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_8 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[5];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[6];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[7];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_9 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[5];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[6];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[7];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_10 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[5];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[6];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[7];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[9];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_11 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[5];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[6];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[7];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[9];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[10]; sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+VEX_REGPARM(1)
+static ULong genericg_compute_checksum_8al_12 ( HWord first_w64 )
+{
+   ULong  sum1 = 0, sum2 = 0;
+   ULong* p = (ULong*)first_w64;
+   ULong  w;
+   w = p[0];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[1];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[2];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[3];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[4];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[5];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[6];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[7];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   w = p[8];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[9];  sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[10]; sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   w = p[11]; sum1 = ROL64(sum1 ^ w, 63);  sum2 += w;
+   sum1 ^= sum2;
+   return sum1 + sum2;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end                                 guest_generic_bb_to_IR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_generic_bb_to_IR.h b/VEX/priv/guest_generic_bb_to_IR.h
new file mode 100644
index 0000000..49c94d7
--- /dev/null
+++ b/VEX/priv/guest_generic_bb_to_IR.h
@@ -0,0 +1,201 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                               guest_generic_bb_to_IR.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_GUEST_GENERIC_BB_TO_IR_H
+#define __VEX_GUEST_GENERIC_BB_TO_IR_H
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"              // IRJumpKind
+#include "libvex.h"                 // VexArch
+
+/* This defines stuff needed by the guest insn disassemblers.
+   It's a bit circular; is imported by
+   - the guest-specific toIR.c files (guest-{x86,amd64,ppc,arm}/toIR.c)
+   - the generic disassembly driver (bb_to_IR.c)
+   - vex_main.c
+*/
+
+
+/* ---------------------------------------------------------------
+   Result of disassembling an instruction
+   --------------------------------------------------------------- */
+
+/* The results of disassembling an instruction.  There are three
+   possible outcomes.  For Dis_Resteer, the disassembler _must_
+   continue at the specified address.  For Dis_StopHere, the
+   disassembler _must_ terminate the BB.  For Dis_Continue, we may at
+   our option either disassemble the next insn, or terminate the BB;
+   but in the latter case we must set the bb's ->next field to point
+   to the next instruction.  */
+
+typedef
+
+   struct {
+
+      /* The disassembled insn has this length.  Must always be
+         set. */
+      UInt len;
+
+      /* What happens next?
+         Dis_StopHere:  this insn terminates the BB; we must stop.
+         Dis_Continue:  we can optionally continue into the next insn
+         Dis_ResteerU:  followed an unconditional branch; continue at 
+                        'continueAt'
+         Dis_ResteerC:  (speculatively, of course) followed a
+                        conditional branch; continue at 'continueAt'
+      */
+      enum { Dis_StopHere, Dis_Continue, 
+             Dis_ResteerU, Dis_ResteerC } whatNext;
+
+      /* For Dis_StopHere, we need to end the block and create a
+         transfer to whatever the NIA is.  That will have presumably
+         been set by the IR generated for this insn.  So we need to
+         know the jump kind to use.  Should Ijk_INVALID in other Dis_
+         cases. */
+      IRJumpKind jk_StopHere;
+
+      /* For Dis_Resteer, this is the guest address we should continue
+         at.  Otherwise ignored (should be zero). */
+      Addr   continueAt;
+
+   }
+
+   DisResult;
+
+
+/* ---------------------------------------------------------------
+   The type of a function which disassembles one instruction.
+   C's function-type syntax is really astonishing bizarre.
+   --------------------------------------------------------------- */
+
+/* A function of this type (DisOneInstrFn) disassembles an instruction
+   located at host address &guest_code[delta], whose guest IP is
+   guest_IP (this may be entirely unrelated to where the insn is
+   actually located in the host's address space.).  The returned
+   DisResult.len field carries its size.  If the returned
+   DisResult.whatNext field is Dis_Resteer then DisResult.continueAt
+   should hold the guest IP of the next insn to disassemble.
+
+   disInstr is not permitted to return Dis_Resteer if resteerOkFn,
+   when applied to the address which it wishes to resteer into,
+   returns False.  
+
+   The resulting IR is added to the end of irbb.
+*/
+
+typedef
+
+   DisResult (*DisOneInstrFn) ( 
+
+      /* This is the IRSB to which the resulting IR is to be appended. */
+      /*OUT*/ IRSB*        irbb,
+
+      /* Return True iff resteering to the given addr is allowed (for
+         branches/calls to destinations that are known at JIT-time) */
+      /*IN*/  Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+
+      /* Should we speculatively resteer across conditional branches?
+         (Experimental and not enabled by default).  The strategy is
+         to assume that backward branches are taken and forward
+         branches are not taken. */
+      /*IN*/  Bool         resteerCisOk,
+
+      /* Vex-opaque data passed to all caller (valgrind) supplied
+         callbacks. */
+      /*IN*/  void*        callback_opaque,
+
+      /* Where is the guest code? */
+      /*IN*/  const UChar* guest_code,
+
+      /* Where is the actual insn?  Note: it's at &guest_code[delta] */
+      /*IN*/  Long         delta,
+
+      /* What is the guest IP of the insn? */
+      /*IN*/  Addr         guest_IP,
+
+      /* Info about the guest architecture */
+      /*IN*/  VexArch      guest_arch,
+      /*IN*/  const VexArchInfo* archinfo,
+
+      /* ABI info for both guest and host */
+      /*IN*/  const VexAbiInfo*  abiinfo,
+
+      /* The endianness of the host */
+      /*IN*/  VexEndness   host_endness,
+
+      /* Should diagnostics be printed for illegal instructions? */
+      /*IN*/  Bool         sigill_diag
+
+   );
+
+
+/* ---------------------------------------------------------------
+   Top-level BB to IR conversion fn.
+   --------------------------------------------------------------- */
+
+/* See detailed comment in guest_generic_bb_to_IR.c. */
+extern
+IRSB* bb_to_IR ( 
+         /*OUT*/VexGuestExtents* vge,
+         /*OUT*/UInt*            n_sc_extents,
+         /*OUT*/UInt*            n_guest_instrs, /* stats only */
+         /*MOD*/VexRegisterUpdates* pxControl,
+         /*IN*/ void*            callback_opaque,
+         /*IN*/ DisOneInstrFn    dis_instr_fn,
+         /*IN*/ const UChar*     guest_code,
+         /*IN*/ Addr             guest_IP_bbstart,
+         /*IN*/ Bool             (*chase_into_ok)(void*,Addr),
+         /*IN*/ VexEndness       host_endness,
+         /*IN*/ Bool             sigill_diag,
+         /*IN*/ VexArch          arch_guest,
+         /*IN*/ const VexArchInfo* archinfo_guest,
+         /*IN*/ const VexAbiInfo*  abiinfo_both,
+         /*IN*/ IRType           guest_word_type,
+         /*IN*/ UInt             (*needs_self_check)
+                                    (void*, /*MB_MOD*/VexRegisterUpdates*,
+                                            const VexGuestExtents*),
+         /*IN*/ Bool             (*preamble_function)(void*,IRSB*),
+         /*IN*/ Int              offB_GUEST_CMSTART,
+         /*IN*/ Int              offB_GUEST_CMLEN,
+         /*IN*/ Int              offB_GUEST_IP,
+         /*IN*/ Int              szB_GUEST_IP
+      );
+
+
+#endif /* ndef __VEX_GUEST_GENERIC_BB_TO_IR_H */
+
+/*--------------------------------------------------------------------*/
+/*--- end                                 guest_generic_bb_to_IR.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_generic_x87.c b/VEX/priv/guest_generic_x87.c
new file mode 100644
index 0000000..0e36e94
--- /dev/null
+++ b/VEX/priv/guest_generic_x87.c
@@ -0,0 +1,1221 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               guest_generic_x87.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* This file contains functions for doing some x87-specific
+   operations.  Both the amd64 and x86 front ends (guests) indirectly
+   call these functions via guest helper calls.  By putting them here,
+   code duplication is avoided.  Some of these functions are tricky
+   and hard to verify, so there is much to be said for only having one
+   copy thereof.
+*/
+
+#include "libvex_basictypes.h"
+
+#include "main_util.h"
+#include "guest_generic_x87.h"
+
+
+/* 80 and 64-bit floating point formats:
+
+   80-bit:
+
+    S  0       0-------0      zero
+    S  0       0X------X      denormals
+    S  1-7FFE  1X------X      normals (all normals have leading 1)
+    S  7FFF    10------0      infinity
+    S  7FFF    10X-----X      snan
+    S  7FFF    11X-----X      qnan
+
+   S is the sign bit.  For runs X----X, at least one of the Xs must be
+   nonzero.  Exponent is 15 bits, fractional part is 63 bits, and
+   there is an explicitly represented leading 1, and a sign bit,
+   giving 80 in total.
+
+   64-bit avoids the confusion of an explicitly represented leading 1
+   and so is simpler:
+
+    S  0      0------0   zero
+    S  0      X------X   denormals
+    S  1-7FE  any        normals
+    S  7FF    0------0   infinity
+    S  7FF    0X-----X   snan
+    S  7FF    1X-----X   qnan
+
+   Exponent is 11 bits, fractional part is 52 bits, and there is a 
+   sign bit, giving 64 in total.
+*/
+
+
+static inline UInt read_bit_array ( UChar* arr, UInt n )
+{
+   UChar c = arr[n >> 3];
+   c >>= (n&7);
+   return c & 1;
+}
+
+static inline void write_bit_array ( UChar* arr, UInt n, UInt b )
+{
+   UChar c = arr[n >> 3];
+   c = toUChar( c & ~(1 << (n&7)) );
+   c = toUChar( c | ((b&1) << (n&7)) );
+   arr[n >> 3] = c;
+}
+
+/* Convert an IEEE754 double (64-bit) into an x87 extended double
+   (80-bit), mimicing the hardware fairly closely.  Both numbers are
+   stored little-endian.  Limitations, all of which could be fixed,
+   given some level of hassle:
+
+   * Identity of NaNs is not preserved.
+
+   See comments in the code for more details.
+*/
+void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 )
+{
+   Bool  mantissaIsZero;
+   Int   bexp, i, j, shift;
+   UChar sign;
+
+   sign = toUChar( (f64[7] >> 7) & 1 );
+   bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F);
+   bexp &= 0x7FF;
+
+   mantissaIsZero = False;
+   if (bexp == 0 || bexp == 0x7FF) {
+      /* We'll need to know whether or not the mantissa (bits 51:0) is
+         all zeroes in order to handle these cases.  So figure it
+         out. */
+      mantissaIsZero
+         = toBool( 
+              (f64[6] & 0x0F) == 0 
+              && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 
+              && f64[2] == 0 && f64[1] == 0 && f64[0] == 0
+           );
+   }
+
+   /* If the exponent is zero, either we have a zero or a denormal.
+      Produce a zero.  This is a hack in that it forces denormals to
+      zero.  Could do better. */
+   if (bexp == 0) {
+      f80[9] = toUChar( sign << 7 );
+      f80[8] = f80[7] = f80[6] = f80[5] = f80[4]
+             = f80[3] = f80[2] = f80[1] = f80[0] = 0;
+
+      if (mantissaIsZero)
+         /* It really is zero, so that's all we can do. */
+         return;
+
+      /* There is at least one 1-bit in the mantissa.  So it's a
+         potentially denormalised double -- but we can produce a
+         normalised long double.  Count the leading zeroes in the
+         mantissa so as to decide how much to bump the exponent down
+         by.  Note, this is SLOW. */
+      shift = 0;
+      for (i = 51; i >= 0; i--) {
+        if (read_bit_array(f64, i))
+           break;
+        shift++;
+      }
+
+      /* and copy into place as many bits as we can get our hands on. */
+      j = 63;
+      for (i = 51 - shift; i >= 0; i--) {
+         write_bit_array( f80, j,
+     	 read_bit_array( f64, i ) );
+         j--;
+      }
+
+      /* Set the exponent appropriately, and we're done. */
+      bexp -= shift;
+      bexp += (16383 - 1023);
+      f80[9] = toUChar( (sign << 7) | ((bexp >> 8) & 0xFF) );
+      f80[8] = toUChar( bexp & 0xFF );
+      return;
+   }
+
+   /* If the exponent is 7FF, this is either an Infinity, a SNaN or
+      QNaN, as determined by examining bits 51:0, thus:
+          0  ... 0    Inf
+          0X ... X    SNaN
+          1X ... X    QNaN
+      where at least one of the Xs is not zero.
+   */
+   if (bexp == 0x7FF) {
+      if (mantissaIsZero) {
+         /* Produce an appropriately signed infinity:
+            S 1--1 (15)  1  0--0 (63)
+         */
+         f80[9] = toUChar( (sign << 7) | 0x7F );
+         f80[8] = 0xFF;
+         f80[7] = 0x80;
+         f80[6] = f80[5] = f80[4] = f80[3] 
+                = f80[2] = f80[1] = f80[0] = 0;
+         return;
+      }
+      /* So it's either a QNaN or SNaN.  Distinguish by considering
+         bit 51.  Note, this destroys all the trailing bits
+         (identity?) of the NaN.  IEEE754 doesn't require preserving
+         these (it only requires that there be one QNaN value and one
+         SNaN value), but x87 does seem to have some ability to
+         preserve them.  Anyway, here, the NaN's identity is
+         destroyed.  Could be improved. */
+      if (f64[6] & 8) {
+         /* QNaN.  Make a canonical QNaN:
+            S 1--1 (15)  1 1  0--0 (62) 
+         */
+         f80[9] = toUChar( (sign << 7) | 0x7F );
+         f80[8] = 0xFF;
+         f80[7] = 0xC0;
+         f80[6] = f80[5] = f80[4] = f80[3] 
+                = f80[2] = f80[1] = f80[0] = 0x00;
+      } else {
+         /* SNaN.  Make a SNaN:
+            S 1--1 (15)  1 0  1--1 (62) 
+         */
+         f80[9] = toUChar( (sign << 7) | 0x7F );
+         f80[8] = 0xFF;
+         f80[7] = 0xBF;
+         f80[6] = f80[5] = f80[4] = f80[3] 
+                = f80[2] = f80[1] = f80[0] = 0xFF;
+      }
+      return;
+   }
+
+   /* It's not a zero, denormal, infinity or nan.  So it must be a
+      normalised number.  Rebias the exponent and build the new
+      number.  */
+   bexp += (16383 - 1023);
+
+   f80[9] = toUChar( (sign << 7) | ((bexp >> 8) & 0xFF) );
+   f80[8] = toUChar( bexp & 0xFF );
+   f80[7] = toUChar( (1 << 7) | ((f64[6] << 3) & 0x78) 
+                              | ((f64[5] >> 5) & 7) );
+   f80[6] = toUChar( ((f64[5] << 3) & 0xF8) | ((f64[4] >> 5) & 7) );
+   f80[5] = toUChar( ((f64[4] << 3) & 0xF8) | ((f64[3] >> 5) & 7) );
+   f80[4] = toUChar( ((f64[3] << 3) & 0xF8) | ((f64[2] >> 5) & 7) );
+   f80[3] = toUChar( ((f64[2] << 3) & 0xF8) | ((f64[1] >> 5) & 7) );
+   f80[2] = toUChar( ((f64[1] << 3) & 0xF8) | ((f64[0] >> 5) & 7) );
+   f80[1] = toUChar( ((f64[0] << 3) & 0xF8) );
+   f80[0] = toUChar( 0 );
+}
+
+
+/* Convert an x87 extended double (80-bit) into an IEEE 754 double
+   (64-bit), mimicking the hardware fairly closely.  Both numbers are
+   stored little-endian.  Limitations, both of which could be fixed,
+   given some level of hassle:
+
+   * Rounding following truncation could be a bit better.
+
+   * Identity of NaNs is not preserved.
+
+   See comments in the code for more details.
+*/
+void convert_f80le_to_f64le ( /*IN*/UChar* f80, /*OUT*/UChar* f64 )
+{
+   Bool  isInf;
+   Int   bexp, i, j;
+   UChar sign;
+
+   sign = toUChar((f80[9] >> 7) & 1);
+   bexp = (((UInt)f80[9]) << 8) | (UInt)f80[8];
+   bexp &= 0x7FFF;
+
+   /* If the exponent is zero, either we have a zero or a denormal.
+      But an extended precision denormal becomes a double precision
+      zero, so in either case, just produce the appropriately signed
+      zero. */
+   if (bexp == 0) {
+      f64[7] = toUChar(sign << 7);
+      f64[6] = f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+      return;
+   }
+   
+   /* If the exponent is 7FFF, this is either an Infinity, a SNaN or
+      QNaN, as determined by examining bits 62:0, thus:
+          10  ... 0    Inf
+          10X ... X    SNaN
+          11X ... X    QNaN
+      where at least one of the Xs is not zero.
+   */
+   if (bexp == 0x7FFF) {
+      isInf = toBool(
+                 (f80[7] & 0x7F) == 0 
+                 && f80[6] == 0 && f80[5] == 0 && f80[4] == 0 
+                 && f80[3] == 0 && f80[2] == 0 && f80[1] == 0 
+                 && f80[0] == 0
+              );
+      if (isInf) {
+         if (0 == (f80[7] & 0x80))
+            goto wierd_NaN;
+         /* Produce an appropriately signed infinity:
+            S 1--1 (11)  0--0 (52)
+         */
+         f64[7] = toUChar((sign << 7) | 0x7F);
+         f64[6] = 0xF0;
+         f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+         return;
+      }
+      /* So it's either a QNaN or SNaN.  Distinguish by considering
+         bit 61.  Note, this destroys all the trailing bits
+         (identity?) of the NaN.  IEEE754 doesn't require preserving
+         these (it only requires that there be one QNaN value and one
+         SNaN value), but x87 does seem to have some ability to
+         preserve them.  Anyway, here, the NaN's identity is
+         destroyed.  Could be improved. */
+      if (f80[7] & 0x40) {
+         /* QNaN.  Make a canonical QNaN:
+            S 1--1 (11)  1  0--0 (51) 
+         */
+         f64[7] = toUChar((sign << 7) | 0x7F);
+         f64[6] = 0xF8;
+         f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0x00;
+      } else {
+         /* SNaN.  Make a SNaN:
+            S 1--1 (11)  0  1--1 (51) 
+         */
+         f64[7] = toUChar((sign << 7) | 0x7F);
+         f64[6] = 0xF7;
+         f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0xFF;
+      }
+      return;
+   }
+
+   /* If it's not a Zero, NaN or Inf, and the integer part (bit 62) is
+      zero, the x87 FPU appears to consider the number denormalised
+      and converts it to a QNaN. */
+   if (0 == (f80[7] & 0x80)) {
+      wierd_NaN:
+      /* Strange hardware QNaN:
+         S 1--1 (11)  1  0--0 (51) 
+      */
+      /* On a PIII, these QNaNs always appear with sign==1.  I have
+         no idea why. */
+      f64[7] = (1 /*sign*/ << 7) | 0x7F;
+      f64[6] = 0xF8;
+      f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+      return;
+   }
+
+   /* It's not a zero, denormal, infinity or nan.  So it must be a 
+      normalised number.  Rebias the exponent and consider. */
+   bexp -= (16383 - 1023);
+   if (bexp >= 0x7FF) {
+      /* It's too big for a double.  Construct an infinity. */
+      f64[7] = toUChar((sign << 7) | 0x7F);
+      f64[6] = 0xF0;
+      f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+      return;
+   }
+
+   if (bexp <= 0) {
+      /* It's too small for a normalised double.  First construct a
+         zero and then see if it can be improved into a denormal.  */
+      f64[7] = toUChar(sign << 7);
+      f64[6] = f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+
+      if (bexp < -52)
+         /* Too small even for a denormal. */
+         return;
+
+      /* Ok, let's make a denormal.  Note, this is SLOW. */
+      /* Copy bits 63, 62, 61, etc of the src mantissa into the dst, 
+         indexes 52+bexp, 51+bexp, etc, until k+bexp < 0. */
+      /* bexp is in range -52 .. 0 inclusive */
+      for (i = 63; i >= 0; i--) {
+         j = i - 12 + bexp;
+         if (j < 0) break;
+         /* We shouldn't really call vassert from generated code. */
+         vassert(j >= 0 && j < 52);
+         write_bit_array ( f64,
+                           j,
+                           read_bit_array ( f80, i ) );
+      }
+      /* and now we might have to round ... */
+      if (read_bit_array(f80, 10+1 - bexp) == 1) 
+         goto do_rounding;
+
+      return;
+   }
+
+   /* Ok, it's a normalised number which is representable as a double.
+      Copy the exponent and mantissa into place. */
+   /*
+   for (i = 0; i < 52; i++)
+      write_bit_array ( f64,
+                        i,
+                        read_bit_array ( f80, i+11 ) );
+   */
+   f64[0] = toUChar( (f80[1] >> 3) | (f80[2] << 5) );
+   f64[1] = toUChar( (f80[2] >> 3) | (f80[3] << 5) );
+   f64[2] = toUChar( (f80[3] >> 3) | (f80[4] << 5) );
+   f64[3] = toUChar( (f80[4] >> 3) | (f80[5] << 5) );
+   f64[4] = toUChar( (f80[5] >> 3) | (f80[6] << 5) );
+   f64[5] = toUChar( (f80[6] >> 3) | (f80[7] << 5) );
+
+   f64[6] = toUChar( ((bexp << 4) & 0xF0) | ((f80[7] >> 3) & 0x0F) );
+
+   f64[7] = toUChar( (sign << 7) | ((bexp >> 4) & 0x7F) );
+
+   /* Now consider any rounding that needs to happen as a result of
+      truncating the mantissa. */
+   if (f80[1] & 4) /* read_bit_array(f80, 10) == 1) */ {
+
+      /* If the bottom bits of f80 are "100 0000 0000", then the
+         infinitely precise value is deemed to be mid-way between the
+         two closest representable values.  Since we're doing
+         round-to-nearest (the default mode), in that case it is the
+         bit immediately above which indicates whether we should round
+         upwards or not -- if 0, we don't.  All that is encapsulated
+         in the following simple test. */
+      if ((f80[1] & 0xF) == 4/*0100b*/ && f80[0] == 0)
+         return;
+
+      do_rounding:
+      /* Round upwards.  This is a kludge.  Once in every 2^24
+         roundings (statistically) the bottom three bytes are all 0xFF
+         and so we don't round at all.  Could be improved. */
+      if (f64[0] != 0xFF) { 
+         f64[0]++; 
+      }
+      else 
+      if (f64[0] == 0xFF && f64[1] != 0xFF) {
+         f64[0] = 0;
+         f64[1]++;
+      }
+      else      
+      if (f64[0] == 0xFF && f64[1] == 0xFF && f64[2] != 0xFF) {
+         f64[0] = 0;
+         f64[1] = 0;
+         f64[2]++;
+      }
+      /* else we don't round, but we should. */
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Extract the signed significand or exponent component as per
+   fxtract.  Arg and result are doubles travelling under the guise of
+   ULongs.  Returns significand when getExp is zero and exponent
+   otherwise. */
+ULong x86amd64g_calculate_FXTRACT ( ULong arg, HWord getExp )
+{
+   ULong  uSig, uExp;
+   /* Long   sSig; */
+   Int    sExp, i;
+   UInt   sign, expExp;
+
+   /*
+    S  7FF    0------0   infinity
+    S  7FF    0X-----X   snan
+    S  7FF    1X-----X   qnan
+   */
+   const ULong posInf  = 0x7FF0000000000000ULL;
+   const ULong negInf  = 0xFFF0000000000000ULL;
+   const ULong nanMask = 0x7FF0000000000000ULL;
+   const ULong qNan    = 0x7FF8000000000000ULL;
+   const ULong posZero = 0x0000000000000000ULL;
+   const ULong negZero = 0x8000000000000000ULL;
+   const ULong bit51   = 1ULL << 51;
+   const ULong bit52   = 1ULL << 52;
+   const ULong sigMask = bit52 - 1;
+
+   /* Mimic Core i5 behaviour for special cases. */
+   if (arg == posInf)
+      return getExp ? posInf : posInf;
+   if (arg == negInf)
+      return getExp ? posInf : negInf;
+   if ((arg & nanMask) == nanMask)
+      return qNan | (arg & (1ULL << 63));
+   if (arg == posZero)
+      return getExp ? negInf : posZero;
+   if (arg == negZero)
+      return getExp ? negInf : negZero;
+
+   /* Split into sign, exponent and significand. */
+   sign = ((UInt)(arg >> 63)) & 1;
+
+   /* Mask off exponent & sign. uSig is in range 0 .. 2^52-1. */
+   uSig = arg & sigMask;
+
+   /* Get the exponent. */
+   sExp = ((Int)(arg >> 52)) & 0x7FF;
+
+   /* Deal with denormals: if the exponent is zero, then the
+      significand cannot possibly be zero (negZero/posZero are handled
+      above).  Shift the significand left until bit 51 of it becomes
+      1, and decrease the exponent accordingly.
+   */
+   if (sExp == 0) {
+      for (i = 0; i < 52; i++) {
+         if (uSig & bit51)
+            break;
+         uSig <<= 1;
+         sExp--;
+      }
+      uSig <<= 1;
+   } else {
+      /* Add the implied leading-1 in the significand. */
+      uSig |= bit52;
+   }
+
+   /* Roll in the sign. */
+   /* sSig = uSig; */
+   /* if (sign) sSig =- sSig; */
+
+   /* Convert sig into a double.  This should be an exact conversion.
+      Then divide by 2^52, which should give a value in the range 1.0
+      to 2.0-epsilon, at least for normalised args. */
+   /* dSig = (Double)sSig; */
+   /* dSig /= 67108864.0;  */ /* 2^26 */
+   /* dSig /= 67108864.0;  */ /* 2^26 */
+   uSig &= sigMask;
+   uSig |= 0x3FF0000000000000ULL;
+   if (sign)
+      uSig ^= negZero;
+
+   /* Convert exp into a double.  Also an exact conversion. */
+   /* dExp = (Double)(sExp - 1023); */
+   sExp -= 1023;
+   if (sExp == 0) {
+      uExp = 0;
+   } else {
+      uExp   = sExp < 0 ? -sExp : sExp;
+      expExp = 0x3FF +52;
+      /* 1 <= uExp <= 1074 */
+      /* Skip first 42 iterations of normalisation loop as we know they
+         will always happen */
+      uExp <<= 42;
+      expExp -= 42;
+      for (i = 0; i < 52-42; i++) {
+         if (uExp & bit52)
+            break;
+         uExp <<= 1;
+         expExp--;
+      }
+      uExp &= sigMask;
+      uExp |= ((ULong)expExp) << 52;
+      if (sExp < 0) uExp ^= negZero;
+   }
+
+   return getExp ? uExp : uSig;
+}
+
+
+
+/*---------------------------------------------------------*/
+/*--- SSE4.2 PCMP{E,I}STR{I,M} helpers                  ---*/
+/*---------------------------------------------------------*/
+
+/* We need the definitions for OSZACP eflags/rflags offsets.
+   #including guest_{amd64,x86}_defs.h causes chaos, so just copy the
+   required values directly.  They are not going to change in the
+   foreseeable future :-)
+*/
+
+#define SHIFT_O   11
+#define SHIFT_S   7
+#define SHIFT_Z   6
+#define SHIFT_A   4
+#define SHIFT_C   0
+#define SHIFT_P   2
+
+#define MASK_O    (1 << SHIFT_O)
+#define MASK_S    (1 << SHIFT_S)
+#define MASK_Z    (1 << SHIFT_Z)
+#define MASK_A    (1 << SHIFT_A)
+#define MASK_C    (1 << SHIFT_C)
+#define MASK_P    (1 << SHIFT_P)
+
+
+/* Count leading zeroes, w/ 0-produces-32 semantics, a la Hacker's
+   Delight. */
+static UInt clz32 ( UInt x )
+{
+   Int y, m, n;
+   y = -(x >> 16);
+   m = (y >> 16) & 16;
+   n = 16 - m;
+   x = x >> m;
+   y = x - 0x100;
+   m = (y >> 16) & 8;
+   n = n + m;
+   x = x << m;
+   y = x - 0x1000;
+   m = (y >> 16) & 4;
+   n = n + m;
+   x = x << m;
+   y = x - 0x4000;
+   m = (y >> 16) & 2;
+   n = n + m;
+   x = x << m;
+   y = x >> 14;
+   m = y & ~(y >> 1);
+   return n + 2 - m;
+}
+
+static UInt ctz32 ( UInt x )
+{
+   return 32 - clz32((~x) & (x-1));
+}
+
+/* Convert a 4-bit value to a 32-bit value by cloning each bit 8
+   times.  There's surely a better way to do this, but I don't know
+   what it is. */
+static UInt bits4_to_bytes4 ( UInt bits4 )
+{
+   UInt r = 0;
+   r |= (bits4 & 1) ? 0x000000FF : 0;
+   r |= (bits4 & 2) ? 0x0000FF00 : 0;
+   r |= (bits4 & 4) ? 0x00FF0000 : 0;
+   r |= (bits4 & 8) ? 0xFF000000 : 0;
+   return r;
+}
+
+
+/* Convert a 2-bit value to a 32-bit value by cloning each bit 16
+   times.  There's surely a better way to do this, but I don't know
+   what it is. */
+static UInt bits2_to_bytes4 ( UInt bits2 )
+{
+   UInt r = 0;
+   r |= (bits2 & 1) ? 0x0000FFFF : 0;
+   r |= (bits2 & 2) ? 0xFFFF0000 : 0;
+   return r;
+}
+
+
+/* Given partial results from a pcmpXstrX operation (intRes1,
+   basically), generate an I- or M-format output value, also the new
+   OSZACP flags.  */
+static
+void compute_PCMPxSTRx_gen_output (/*OUT*/V128* resV,
+                                   /*OUT*/UInt* resOSZACP,
+                                   UInt intRes1,
+                                   UInt zmaskL, UInt zmaskR,
+                                   UInt validL,
+                                   UInt pol, UInt idx,
+                                   Bool isxSTRM )
+{
+   vassert((pol >> 2) == 0);
+   vassert((idx >> 1) == 0);
+
+   UInt intRes2 = 0;
+   switch (pol) {
+      case 0: intRes2 = intRes1;          break; // pol +
+      case 1: intRes2 = ~intRes1;         break; // pol -
+      case 2: intRes2 = intRes1;          break; // pol m+
+      case 3: intRes2 = intRes1 ^ validL; break; // pol m-
+   }
+   intRes2 &= 0xFFFF;
+
+   if (isxSTRM) {
+ 
+      // generate M-format output (a bit or byte mask in XMM0)
+      if (idx) {
+         resV->w32[0] = bits4_to_bytes4( (intRes2 >>  0) & 0xF );
+         resV->w32[1] = bits4_to_bytes4( (intRes2 >>  4) & 0xF );
+         resV->w32[2] = bits4_to_bytes4( (intRes2 >>  8) & 0xF );
+         resV->w32[3] = bits4_to_bytes4( (intRes2 >> 12) & 0xF );
+      } else {
+         resV->w32[0] = intRes2 & 0xFFFF;
+         resV->w32[1] = 0;
+         resV->w32[2] = 0;
+         resV->w32[3] = 0;
+      }
+
+   } else {
+
+      // generate I-format output (an index in ECX)
+      // generate ecx value
+      UInt newECX = 0;
+      if (idx) {
+         // index of ms-1-bit
+         newECX = intRes2 == 0 ? 16 : (31 - clz32(intRes2));
+      } else {
+         // index of ls-1-bit
+         newECX = intRes2 == 0 ? 16 : ctz32(intRes2);
+      }
+
+      resV->w32[0] = newECX;
+      resV->w32[1] = 0;
+      resV->w32[2] = 0;
+      resV->w32[3] = 0;
+
+   }
+
+   // generate new flags, common to all ISTRI and ISTRM cases
+   *resOSZACP    // A, P are zero
+     = ((intRes2 == 0) ? 0 : MASK_C) // C == 0 iff intRes2 == 0
+     | ((zmaskL == 0)  ? 0 : MASK_Z) // Z == 1 iff any in argL is 0
+     | ((zmaskR == 0)  ? 0 : MASK_S) // S == 1 iff any in argR is 0
+     | ((intRes2 & 1) << SHIFT_O);   // O == IntRes2[0]
+}
+
+
+/* Given partial results from a 16-bit pcmpXstrX operation (intRes1,
+   basically), generate an I- or M-format output value, also the new
+   OSZACP flags.  */
+static
+void compute_PCMPxSTRx_gen_output_wide (/*OUT*/V128* resV,
+                                        /*OUT*/UInt* resOSZACP,
+                                        UInt intRes1,
+                                        UInt zmaskL, UInt zmaskR,
+                                        UInt validL,
+                                        UInt pol, UInt idx,
+                                        Bool isxSTRM )
+{
+   vassert((pol >> 2) == 0);
+   vassert((idx >> 1) == 0);
+
+   UInt intRes2 = 0;
+   switch (pol) {
+      case 0: intRes2 = intRes1;          break; // pol +
+      case 1: intRes2 = ~intRes1;         break; // pol -
+      case 2: intRes2 = intRes1;          break; // pol m+
+      case 3: intRes2 = intRes1 ^ validL; break; // pol m-
+   }
+   intRes2 &= 0xFF;
+
+   if (isxSTRM) {
+ 
+      // generate M-format output (a bit or byte mask in XMM0)
+      if (idx) {
+         resV->w32[0] = bits2_to_bytes4( (intRes2 >> 0) & 0x3 );
+         resV->w32[1] = bits2_to_bytes4( (intRes2 >> 2) & 0x3 );
+         resV->w32[2] = bits2_to_bytes4( (intRes2 >> 4) & 0x3 );
+         resV->w32[3] = bits2_to_bytes4( (intRes2 >> 6) & 0x3 );
+      } else {
+         resV->w32[0] = intRes2 & 0xFF;
+         resV->w32[1] = 0;
+         resV->w32[2] = 0;
+         resV->w32[3] = 0;
+      }
+
+   } else {
+
+      // generate I-format output (an index in ECX)
+      // generate ecx value
+      UInt newECX = 0;
+      if (idx) {
+         // index of ms-1-bit
+         newECX = intRes2 == 0 ? 8 : (31 - clz32(intRes2));
+      } else {
+         // index of ls-1-bit
+         newECX = intRes2 == 0 ? 8 : ctz32(intRes2);
+      }
+
+      resV->w32[0] = newECX;
+      resV->w32[1] = 0;
+      resV->w32[2] = 0;
+      resV->w32[3] = 0;
+
+   }
+
+   // generate new flags, common to all ISTRI and ISTRM cases
+   *resOSZACP    // A, P are zero
+     = ((intRes2 == 0) ? 0 : MASK_C) // C == 0 iff intRes2 == 0
+     | ((zmaskL == 0)  ? 0 : MASK_Z) // Z == 1 iff any in argL is 0
+     | ((zmaskR == 0)  ? 0 : MASK_S) // S == 1 iff any in argR is 0
+     | ((intRes2 & 1) << SHIFT_O);   // O == IntRes2[0]
+}
+
+
+/* Compute result and new OSZACP flags for all PCMP{E,I}STR{I,M}
+   variants on 8-bit data.
+
+   For xSTRI variants, the new ECX value is placed in the 32 bits
+   pointed to by *resV, and the top 96 bits are zeroed.  For xSTRM
+   variants, the result is a 128 bit value and is placed at *resV in
+   the obvious way.
+
+   For all variants, the new OSZACP value is placed at *resOSZACP.
+
+   argLV and argRV are the vector args.  The caller must prepare a
+   16-bit mask for each, zmaskL and zmaskR.  For ISTRx variants this
+   must be 1 for each zero byte of of the respective arg.  For ESTRx
+   variants this is derived from the explicit length indication, and
+   must be 0 in all places except at the bit index corresponding to
+   the valid length (0 .. 16).  If the valid length is 16 then the
+   mask must be all zeroes.  In all cases, bits 31:16 must be zero.
+
+   imm8 is the original immediate from the instruction.  isSTRM
+   indicates whether this is a xSTRM or xSTRI variant, which controls
+   how much of *res is written.
+
+   If the given imm8 case can be handled, the return value is True.
+   If not, False is returned, and neither *res not *resOSZACP are
+   altered.
+*/
+
+Bool compute_PCMPxSTRx ( /*OUT*/V128* resV,
+                         /*OUT*/UInt* resOSZACP,
+                         V128* argLV,  V128* argRV,
+                         UInt zmaskL, UInt zmaskR,
+                         UInt imm8,   Bool isxSTRM )
+{
+   vassert(imm8 < 0x80);
+   vassert((zmaskL >> 16) == 0);
+   vassert((zmaskR >> 16) == 0);
+
+   /* Explicitly reject any imm8 values that haven't been validated,
+      even if they would probably work.  Life is too short to have
+      unvalidated cases in the code base. */
+   switch (imm8) {
+      case 0x00: case 0x02: case 0x08: case 0x0A: case 0x0C: case 0x0E:
+      case 0x12: case 0x14: case 0x1A:
+      case 0x30: case 0x34: case 0x38: case 0x3A:
+      case 0x40: case 0x44: case 0x46: case 0x4A:
+         break;
+      default:
+         return False;
+   }
+
+   UInt fmt = (imm8 >> 0) & 3; // imm8[1:0]  data format
+   UInt agg = (imm8 >> 2) & 3; // imm8[3:2]  aggregation fn
+   UInt pol = (imm8 >> 4) & 3; // imm8[5:4]  polarity
+   UInt idx = (imm8 >> 6) & 1; // imm8[6]    1==msb/bytemask
+
+   /*----------------------------------------*/
+   /*-- strcmp on byte data                --*/
+   /*----------------------------------------*/
+
+   if (agg == 2/*equal each, aka strcmp*/
+       && (fmt == 0/*ub*/ || fmt == 2/*sb*/)) {
+      Int    i;
+      UChar* argL = (UChar*)argLV;
+      UChar* argR = (UChar*)argRV;
+      UInt boolResII = 0;
+      for (i = 15; i >= 0; i--) {
+         UChar cL  = argL[i];
+         UChar cR  = argR[i];
+         boolResII = (boolResII << 1) | (cL == cR ? 1 : 0);
+      }
+      UInt validL = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt validR = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+
+      // do invalidation, common to all equal-each cases
+      UInt intRes1
+         = (boolResII & validL & validR)  // if both valid, use cmpres
+           | (~ (validL | validR));       // if both invalid, force 1
+                                          // else force 0
+      intRes1 &= 0xFFFF;
+
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   /*----------------------------------------*/
+   /*-- set membership on byte data        --*/
+   /*----------------------------------------*/
+
+   if (agg == 0/*equal any, aka find chars in a set*/
+       && (fmt == 0/*ub*/ || fmt == 2/*sb*/)) {
+      /* argL: the string,  argR: charset */
+      UInt   si, ci;
+      UChar* argL    = (UChar*)argLV;
+      UChar* argR    = (UChar*)argRV;
+      UInt   boolRes = 0;
+      UInt   validL  = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt   validR  = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+
+      for (si = 0; si < 16; si++) {
+         if ((validL & (1 << si)) == 0)
+            // run off the end of the string.
+            break;
+         UInt m = 0;
+         for (ci = 0; ci < 16; ci++) {
+            if ((validR & (1 << ci)) == 0) break;
+            if (argR[ci] == argL[si]) { m = 1; break; }
+         }
+         boolRes |= (m << si);
+      }
+
+      // boolRes is "pre-invalidated"
+      UInt intRes1 = boolRes & 0xFFFF;
+   
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   /*----------------------------------------*/
+   /*-- substring search on byte data      --*/
+   /*----------------------------------------*/
+
+   if (agg == 3/*equal ordered, aka substring search*/
+       && (fmt == 0/*ub*/ || fmt == 2/*sb*/)) {
+
+      /* argL: haystack,  argR: needle */
+      UInt   ni, hi;
+      UChar* argL    = (UChar*)argLV;
+      UChar* argR    = (UChar*)argRV;
+      UInt   boolRes = 0;
+      UInt   validL  = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt   validR  = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+      for (hi = 0; hi < 16; hi++) {
+         UInt m = 1;
+         for (ni = 0; ni < 16; ni++) {
+            if ((validR & (1 << ni)) == 0) break;
+            UInt i = ni + hi;
+            if (i >= 16) break;
+            if (argL[i] != argR[ni]) { m = 0; break; }
+         }
+         boolRes |= (m << hi);
+         if ((validL & (1 << hi)) == 0)
+            // run off the end of the haystack
+            break;
+      }
+
+      // boolRes is "pre-invalidated"
+      UInt intRes1 = boolRes & 0xFFFF;
+
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   /*----------------------------------------*/
+   /*-- ranges, unsigned byte data         --*/
+   /*----------------------------------------*/
+
+   if (agg == 1/*ranges*/
+       && fmt == 0/*ub*/) {
+
+      /* argL: string,  argR: range-pairs */
+      UInt   ri, si;
+      UChar* argL    = (UChar*)argLV;
+      UChar* argR    = (UChar*)argRV;
+      UInt   boolRes = 0;
+      UInt   validL  = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt   validR  = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+      for (si = 0; si < 16; si++) {
+         if ((validL & (1 << si)) == 0)
+            // run off the end of the string
+            break;
+         UInt m = 0;
+         for (ri = 0; ri < 16; ri += 2) {
+            if ((validR & (3 << ri)) != (3 << ri)) break;
+            if (argR[ri] <= argL[si] && argL[si] <= argR[ri+1]) { 
+               m = 1; break;
+            }
+         }
+         boolRes |= (m << si);
+      }
+
+      // boolRes is "pre-invalidated"
+      UInt intRes1 = boolRes & 0xFFFF;
+
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   /*----------------------------------------*/
+   /*-- ranges, signed byte data           --*/
+   /*----------------------------------------*/
+
+   if (agg == 1/*ranges*/
+       && fmt == 2/*sb*/) {
+
+      /* argL: string,  argR: range-pairs */
+      UInt   ri, si;
+      Char*  argL    = (Char*)argLV;
+      Char*  argR    = (Char*)argRV;
+      UInt   boolRes = 0;
+      UInt   validL  = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt   validR  = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+      for (si = 0; si < 16; si++) {
+         if ((validL & (1 << si)) == 0)
+            // run off the end of the string
+            break;
+         UInt m = 0;
+         for (ri = 0; ri < 16; ri += 2) {
+            if ((validR & (3 << ri)) != (3 << ri)) break;
+            if (argR[ri] <= argL[si] && argL[si] <= argR[ri+1]) { 
+               m = 1; break;
+            }
+         }
+         boolRes |= (m << si);
+      }
+
+      // boolRes is "pre-invalidated"
+      UInt intRes1 = boolRes & 0xFFFF;
+
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   return False;
+}
+
+
+/* Compute result and new OSZACP flags for all PCMP{E,I}STR{I,M}
+   variants on 16-bit characters.
+
+   For xSTRI variants, the new ECX value is placed in the 32 bits
+   pointed to by *resV, and the top 96 bits are zeroed.  For xSTRM
+   variants, the result is a 128 bit value and is placed at *resV in
+   the obvious way.
+
+   For all variants, the new OSZACP value is placed at *resOSZACP.
+
+   argLV and argRV are the vector args.  The caller must prepare a
+   8-bit mask for each, zmaskL and zmaskR.  For ISTRx variants this
+   must be 1 for each zero byte of of the respective arg.  For ESTRx
+   variants this is derived from the explicit length indication, and
+   must be 0 in all places except at the bit index corresponding to
+   the valid length (0 .. 8).  If the valid length is 8 then the
+   mask must be all zeroes.  In all cases, bits 31:8 must be zero.
+
+   imm8 is the original immediate from the instruction.  isSTRM
+   indicates whether this is a xSTRM or xSTRI variant, which controls
+   how much of *res is written.
+
+   If the given imm8 case can be handled, the return value is True.
+   If not, False is returned, and neither *res not *resOSZACP are
+   altered.
+*/
+
+Bool compute_PCMPxSTRx_wide ( /*OUT*/V128* resV,
+                              /*OUT*/UInt* resOSZACP,
+                              V128* argLV,  V128* argRV,
+                              UInt zmaskL, UInt zmaskR,
+                              UInt imm8,   Bool isxSTRM )
+{
+   vassert(imm8 < 0x80);
+   vassert((zmaskL >> 8) == 0);
+   vassert((zmaskR >> 8) == 0);
+
+   /* Explicitly reject any imm8 values that haven't been validated,
+      even if they would probably work.  Life is too short to have
+      unvalidated cases in the code base. */
+   switch (imm8) {
+      case 0x01: case 0x03: case 0x09: case 0x0B: case 0x0D:
+      case 0x13:            case 0x1B:
+                            case 0x39: case 0x3B:
+                 case 0x45:            case 0x4B:
+         break;
+      default:
+         return False;
+   }
+
+   UInt fmt = (imm8 >> 0) & 3; // imm8[1:0]  data format
+   UInt agg = (imm8 >> 2) & 3; // imm8[3:2]  aggregation fn
+   UInt pol = (imm8 >> 4) & 3; // imm8[5:4]  polarity
+   UInt idx = (imm8 >> 6) & 1; // imm8[6]    1==msb/bytemask
+
+   /*----------------------------------------*/
+   /*-- strcmp on wide data                --*/
+   /*----------------------------------------*/
+
+   if (agg == 2/*equal each, aka strcmp*/
+       && (fmt == 1/*uw*/ || fmt == 3/*sw*/)) {
+      Int     i;
+      UShort* argL = (UShort*)argLV;
+      UShort* argR = (UShort*)argRV;
+      UInt boolResII = 0;
+      for (i = 7; i >= 0; i--) {
+         UShort cL  = argL[i];
+         UShort cR  = argR[i];
+         boolResII = (boolResII << 1) | (cL == cR ? 1 : 0);
+      }
+      UInt validL = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt validR = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+
+      // do invalidation, common to all equal-each cases
+      UInt intRes1
+         = (boolResII & validL & validR)  // if both valid, use cmpres
+           | (~ (validL | validR));       // if both invalid, force 1
+                                          // else force 0
+      intRes1 &= 0xFF;
+
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output_wide(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   /*----------------------------------------*/
+   /*-- set membership on wide data        --*/
+   /*----------------------------------------*/
+
+   if (agg == 0/*equal any, aka find chars in a set*/
+       && (fmt == 1/*uw*/ || fmt == 3/*sw*/)) {
+      /* argL: the string,  argR: charset */
+      UInt    si, ci;
+      UShort* argL    = (UShort*)argLV;
+      UShort* argR    = (UShort*)argRV;
+      UInt    boolRes = 0;
+      UInt    validL  = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt    validR  = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+
+      for (si = 0; si < 8; si++) {
+         if ((validL & (1 << si)) == 0)
+            // run off the end of the string.
+            break;
+         UInt m = 0;
+         for (ci = 0; ci < 8; ci++) {
+            if ((validR & (1 << ci)) == 0) break;
+            if (argR[ci] == argL[si]) { m = 1; break; }
+         }
+         boolRes |= (m << si);
+      }
+
+      // boolRes is "pre-invalidated"
+      UInt intRes1 = boolRes & 0xFF;
+   
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output_wide(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   /*----------------------------------------*/
+   /*-- substring search on wide data      --*/
+   /*----------------------------------------*/
+
+   if (agg == 3/*equal ordered, aka substring search*/
+       && (fmt == 1/*uw*/ || fmt == 3/*sw*/)) {
+
+      /* argL: haystack,  argR: needle */
+      UInt    ni, hi;
+      UShort* argL    = (UShort*)argLV;
+      UShort* argR    = (UShort*)argRV;
+      UInt    boolRes = 0;
+      UInt    validL  = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt    validR  = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+      for (hi = 0; hi < 8; hi++) {
+         UInt m = 1;
+         for (ni = 0; ni < 8; ni++) {
+            if ((validR & (1 << ni)) == 0) break;
+            UInt i = ni + hi;
+            if (i >= 8) break;
+            if (argL[i] != argR[ni]) { m = 0; break; }
+         }
+         boolRes |= (m << hi);
+         if ((validL & (1 << hi)) == 0)
+            // run off the end of the haystack
+            break;
+      }
+
+      // boolRes is "pre-invalidated"
+      UInt intRes1 = boolRes & 0xFF;
+
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output_wide(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   /*----------------------------------------*/
+   /*-- ranges, unsigned wide data         --*/
+   /*----------------------------------------*/
+
+   if (agg == 1/*ranges*/
+       && fmt == 1/*uw*/) {
+
+      /* argL: string,  argR: range-pairs */
+      UInt    ri, si;
+      UShort* argL    = (UShort*)argLV;
+      UShort* argR    = (UShort*)argRV;
+      UInt    boolRes = 0;
+      UInt    validL  = ~(zmaskL | -zmaskL);  // not(left(zmaskL))
+      UInt    validR  = ~(zmaskR | -zmaskR);  // not(left(zmaskR))
+      for (si = 0; si < 8; si++) {
+         if ((validL & (1 << si)) == 0)
+            // run off the end of the string
+            break;
+         UInt m = 0;
+         for (ri = 0; ri < 8; ri += 2) {
+            if ((validR & (3 << ri)) != (3 << ri)) break;
+            if (argR[ri] <= argL[si] && argL[si] <= argR[ri+1]) { 
+               m = 1; break;
+            }
+         }
+         boolRes |= (m << si);
+      }
+
+      // boolRes is "pre-invalidated"
+      UInt intRes1 = boolRes & 0xFF;
+
+      // generate I-format output
+      compute_PCMPxSTRx_gen_output_wide(
+         resV, resOSZACP,
+         intRes1, zmaskL, zmaskR, validL, pol, idx, isxSTRM
+      );
+
+      return True;
+   }
+
+   return False;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                 guest_generic_x87.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_generic_x87.h b/VEX/priv/guest_generic_x87.h
new file mode 100644
index 0000000..8ccdfd6
--- /dev/null
+++ b/VEX/priv/guest_generic_x87.h
@@ -0,0 +1,142 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               guest_generic_x87.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* This file contains functions for doing some x87-specific
+   operations.  Both the amd64 and x86 front ends (guests) indirectly
+   call these functions via guest helper calls.  By putting them here,
+   code duplication is avoided.  Some of these functions are tricky
+   and hard to verify, so there is much to be said for only having one
+   copy thereof.
+*/
+
+#ifndef __VEX_GUEST_GENERIC_X87_H
+#define __VEX_GUEST_GENERIC_X87_H
+
+#include "libvex_basictypes.h"
+
+
+/* Convert an IEEE754 double (64-bit) into an x87 extended double
+   (80-bit), mimicing the hardware fairly closely.  Both numbers are
+   stored little-endian.  Limitations, all of which could be fixed,
+   given some level of hassle:
+
+   * Identity of NaNs is not preserved.
+
+   See comments in the code for more details.
+*/
+extern
+void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 );
+
+
+/* Convert an x87 extended double (80-bit) into an IEEE 754 double
+   (64-bit), mimicking the hardware fairly closely.  Both numbers are
+   stored little-endian.  Limitations, both of which could be fixed,
+   given some level of hassle:
+
+   * Rounding following truncation could be a bit better.
+
+   * Identity of NaNs is not preserved.
+
+   See comments in the code for more details.
+*/
+extern
+void convert_f80le_to_f64le ( /*IN*/UChar* f80, /*OUT*/UChar* f64 );
+
+
+/* Layout of the real x87 state. */
+typedef
+   struct {
+      UShort env[14];
+      UChar  reg[80];
+   }
+   Fpu_State;
+
+/* Offsets, in 16-bit ints, into the FPU environment (env) area. */
+#define FP_ENV_CTRL   0
+#define FP_ENV_STAT   2
+#define FP_ENV_TAG    4
+#define FP_ENV_IP     6 /* and 7 */
+#define FP_ENV_CS     8
+#define FP_ENV_LSTOP  9
+#define FP_ENV_OPOFF  10 /* and 11 */
+#define FP_ENV_OPSEL  12
+#define FP_REG(ii)    (10*(7-(ii)))
+
+
+/* Layout of the 16-bit FNSAVE x87 state. */
+typedef
+   struct {
+      UShort env[7];
+      UChar  reg[80];
+   }
+   Fpu_State_16;
+
+/* Offsets, in 16-bit ints, into the FPU environment (env) area. */
+#define FPS_ENV_CTRL   0
+#define FPS_ENV_STAT   1
+#define FPS_ENV_TAG    2
+#define FPS_ENV_IP     3
+#define FPS_ENV_CS     4
+#define FPS_ENV_OPOFF  5
+#define FPS_ENV_OPSEL  6
+
+
+/* Do the computations for x86/amd64 FXTRACT.  Called directly from
+   generated code.  CLEAN HELPER. */
+extern ULong x86amd64g_calculate_FXTRACT ( ULong arg, HWord getExp );
+
+/* Compute result and new OSZACP flags for all 8-bit PCMP{E,I}STR{I,M}
+   variants.  See bigger comment on implementation of this function
+   for details on call/return conventions. */
+extern Bool compute_PCMPxSTRx ( /*OUT*/V128* resV,
+                                /*OUT*/UInt* resOSZACP,
+                                V128* argLV,  V128* argRV,
+                                UInt zmaskL, UInt zmaskR,
+                                UInt imm8,   Bool isxSTRM );
+
+/* Compute result and new OSZACP flags for all 16-bit PCMP{E,I}STR{I,M}
+   variants.  See bigger comment on implementation of this function
+   for details on call/return conventions. */
+extern Bool compute_PCMPxSTRx_wide ( /*OUT*/V128* resV,
+                                     /*OUT*/UInt* resOSZACP,
+                                     V128* argLV,  V128* argRV,
+                                     UInt zmaskL, UInt zmaskR,
+                                     UInt imm8,   Bool isxSTRM );
+
+#endif /* ndef __VEX_GUEST_GENERIC_X87_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                 guest_generic_x87.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_mips_defs.h b/VEX/priv/guest_mips_defs.h
new file mode 100644
index 0000000..c60e026
--- /dev/null
+++ b/VEX/priv/guest_mips_defs.h
@@ -0,0 +1,147 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 guest_mips_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Only to be used within the guest-mips directory. */
+
+#ifndef __VEX_GUEST_MIPS_DEFS_H
+#define __VEX_GUEST_MIPS_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "guest_generic_bb_to_IR.h"  /* DisResult */
+
+/*---------------------------------------------------------*/
+/*---               mips to IR conversion               ---*/
+/*---------------------------------------------------------*/
+
+/* Convert one MIPS insn to IR. See the type DisOneInstrFn in bb_to_IR.h. */
+extern DisResult disInstr_MIPS ( IRSB*        irbb,
+                                 Bool         (*resteerOkFn) (void *, Addr),
+                                 Bool         resteerCisOk,
+                                 void*        callback_opaque,
+                                 const UChar* guest_code,
+                                 Long         delta,
+                                 Addr         guest_IP,
+                                 VexArch      guest_arch,
+                                 const VexArchInfo* archinfo,
+                                 const VexAbiInfo*  abiinfo,
+                                 VexEndness   host_endness,
+                                 Bool         sigill_diag );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern IRExpr *guest_mips32_spechelper ( const HChar * function_name,
+                                         IRExpr ** args,
+                                         IRStmt ** precedingStmts,
+                                         Int n_precedingStmts );
+
+extern IRExpr *guest_mips64_spechelper ( const HChar * function_name,
+                                         IRExpr ** args,
+                                         IRStmt ** precedingStmts,
+                                         Int n_precedingStmts);
+
+/* Describes to the optimser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern
+Bool guest_mips32_state_requires_precise_mem_exns ( Int, Int,
+                                                    VexRegisterUpdates );
+
+extern
+Bool guest_mips64_state_requires_precise_mem_exns ( Int, Int,
+                                                    VexRegisterUpdates );
+
+extern VexGuestLayout mips32Guest_layout;
+extern VexGuestLayout mips64Guest_layout;
+
+/*---------------------------------------------------------*/
+/*---                mips guest helpers                 ---*/
+/*---------------------------------------------------------*/
+typedef enum {
+   CEILWS=0, CEILWD,  CEILLS,  CEILLD,
+   FLOORWS,  FLOORWD, FLOORLS, FLOORLD,
+   ROUNDWS,  ROUNDWD, ROUNDLS, ROUNDLD,
+   TRUNCWS,  TRUNCWD, TRUNCLS, TRUNCLD,
+   CVTDS,    CVTDW,   CVTSD,   CVTSW,
+   CVTWS,    CVTWD,   CVTDL,   CVTLS,
+   CVTLD,    CVTSL,   ADDS,    ADDD,
+   SUBS,     SUBD,    DIVS
+} flt_op;
+
+extern UInt mips32_dirtyhelper_mfc0 ( UInt rd, UInt sel );
+
+extern ULong mips64_dirtyhelper_dmfc0 ( UInt rd, UInt sel );
+
+
+#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
+extern UInt mips32_dirtyhelper_rdhwr ( UInt rt, UInt rd );
+extern ULong mips64_dirtyhelper_rdhwr ( ULong rt, ULong rd );
+#endif
+
+/* Calculate FCSR in fp32 mode. */
+extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* guest_state, UInt fs,
+                                                   UInt ft, flt_op op );
+/* Calculate FCSR in fp64 mode. */
+extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* guest_state, UInt fs,
+                                                   UInt ft, flt_op op );
+
+/*---------------------------------------------------------*/
+/*---               Condition code stuff                ---*/
+/*---------------------------------------------------------*/
+
+typedef enum {
+   MIPSCondEQ = 0,   /* equal                         : Z=1 */
+   MIPSCondNE = 1,   /* not equal                     : Z=0 */
+
+   MIPSCondHS = 2,   /* >=u (higher or same)          : C=1 */
+   MIPSCondLO = 3,   /* <u  (lower)                   : C=0 */
+
+   MIPSCondMI = 4,   /* minus (negative)              : N=1 */
+   MIPSCondPL = 5,   /* plus (zero or +ve)            : N=0 */
+
+   MIPSCondVS = 6,   /* overflow                      : V=1 */
+   MIPSCondVC = 7,   /* no overflow                   : V=0 */
+
+   MIPSCondHI = 8,   /* >u   (higher)                 : C=1 && Z=0 */
+   MIPSCondLS = 9,   /* <=u  (lower or same)          : C=0 || Z=1 */
+
+   MIPSCondGE = 10,  /* >=s (signed greater or equal) : N=V */
+   MIPSCondLT = 11,  /* <s  (signed less than)        : N!=V */
+
+   MIPSCondGT = 12,  /* >s  (signed greater)          : Z=0 && N=V */
+   MIPSCondLE = 13,  /* <=s (signed less or equal)    : Z=1 || N!=V */
+
+   MIPSCondAL = 14,  /* always (unconditional)        : 1 */
+   MIPSCondNV = 15   /* never (unconditional):        : 0 */
+} MIPSCondcode;
+
+#endif            /* __VEX_GUEST_MIPS_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                   guest_mips_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_mips_helpers.c b/VEX/priv/guest_mips_helpers.c
new file mode 100644
index 0000000..863efda
--- /dev/null
+++ b/VEX/priv/guest_mips_helpers.c
@@ -0,0 +1,1439 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                              guest_mips_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_mips32.h"
+#include "libvex_guest_mips64.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_mips_defs.h"
+
+/* This file contains helper functions for mips guest code.  Calls to
+   these functions are generated by the back end.
+*/
+
+#define ALWAYSDEFD32(field)                            \
+    { offsetof(VexGuestMIPS32State, field),            \
+      (sizeof ((VexGuestMIPS32State*)0)->field) }
+
+#define ALWAYSDEFD64(field)                            \
+    { offsetof(VexGuestMIPS64State, field),            \
+      (sizeof ((VexGuestMIPS64State*)0)->field) }
+
+IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args,
+                                IRStmt ** precedingStmts, Int n_precedingStmts)
+{
+   return NULL;
+}
+
+IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args,
+                                  IRStmt ** precedingStmts,
+                                  Int n_precedingStmts )
+{
+   return NULL;
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state)
+{
+   vex_state->guest_r0 = 0;   /* Hardwired to 0 */
+   vex_state->guest_r1 = 0;   /* Assembler temporary */
+   vex_state->guest_r2 = 0;   /* Values for function returns ... */
+   vex_state->guest_r3 = 0;   /* ...and expression evaluation */
+   vex_state->guest_r4 = 0;   /* Function arguments */
+   vex_state->guest_r5 = 0;
+   vex_state->guest_r6 = 0;
+   vex_state->guest_r7 = 0;
+   vex_state->guest_r8 = 0;   /* Temporaries */
+   vex_state->guest_r9 = 0;
+   vex_state->guest_r10 = 0;
+   vex_state->guest_r11 = 0;
+   vex_state->guest_r12 = 0;
+   vex_state->guest_r13 = 0;
+   vex_state->guest_r14 = 0;
+   vex_state->guest_r15 = 0;
+   vex_state->guest_r16 = 0;  /* Saved temporaries */
+   vex_state->guest_r17 = 0;
+   vex_state->guest_r18 = 0;
+   vex_state->guest_r19 = 0;
+   vex_state->guest_r20 = 0;
+   vex_state->guest_r21 = 0;
+   vex_state->guest_r22 = 0;
+   vex_state->guest_r23 = 0;
+   vex_state->guest_r24 = 0;  /* Temporaries */
+   vex_state->guest_r25 = 0;
+   vex_state->guest_r26 = 0;  /* Reserved for OS kernel */
+   vex_state->guest_r27 = 0;
+   vex_state->guest_r28 = 0;  /* Global pointer */
+   vex_state->guest_r29 = 0;  /* Stack pointer */
+   vex_state->guest_r30 = 0;  /* Frame pointer */
+   vex_state->guest_r31 = 0;  /* Return address */
+   vex_state->guest_PC = 0;   /* Program counter */
+   vex_state->guest_HI = 0;   /* Multiply and divide register higher result */
+   vex_state->guest_LO = 0;   /* Multiply and divide register lower result */
+
+   /* FPU Registers */
+   vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */
+   vex_state->guest_f1 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f2 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f3 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f4 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f5 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f6 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f7 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f8 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f9 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f10 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f11 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f12 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f13 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f14 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f15 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f16 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f17 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f18 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f19 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f20 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f21 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f22 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f23 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f24 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f25 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f26 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f27 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f28 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f29 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f30 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f31 = 0x7ff800007ff80000ULL;
+
+   vex_state->guest_FIR = 0;  /* FP implementation and revision register */
+   vex_state->guest_FCCR = 0; /* FP condition codes register */
+   vex_state->guest_FEXR = 0; /* FP exceptions register */
+   vex_state->guest_FENR = 0; /* FP enables register */
+   vex_state->guest_FCSR = 0; /* FP control/status register */
+   vex_state->guest_ULR = 0; /* TLS */
+
+   /* Various pseudo-regs mandated by Vex or Valgrind. */
+   /* Emulation notes */
+   vex_state->guest_EMNOTE = 0;
+
+   /* For clflush: record start and length of area to invalidate */
+   vex_state->guest_CMSTART = 0;
+   vex_state->guest_CMLEN = 0;
+   vex_state->host_EvC_COUNTER = 0;
+   vex_state->host_EvC_FAILADDR = 0;
+
+   /* Used to record the unredirected guest address at the start of
+      a translation whose start has been redirected. By reading
+      this pseudo-register shortly afterwards, the translation can
+      find out what the corresponding no-redirection address was.
+      Note, this is only set for wrap-style redirects, not for
+      replace-style ones. */
+   vex_state->guest_NRADDR = 0;
+
+   vex_state->guest_COND = 0;
+
+   /* MIPS32 DSP ASE(r2) specific registers */
+   vex_state->guest_DSPControl = 0;   /* DSPControl register */
+   vex_state->guest_ac0 = 0;          /* Accumulator 0 */
+   vex_state->guest_ac1 = 0;          /* Accumulator 1 */
+   vex_state->guest_ac2 = 0;          /* Accumulator 2 */
+   vex_state->guest_ac3 = 0;          /* Accumulator 3 */
+}
+
+void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state )
+{
+   vex_state->guest_r0 = 0;  /* Hardwired to 0 */
+   vex_state->guest_r1 = 0;  /* Assembler temporary */
+   vex_state->guest_r2 = 0;  /* Values for function returns ... */
+   vex_state->guest_r3 = 0;
+   vex_state->guest_r4 = 0;  /* Function arguments */
+   vex_state->guest_r5 = 0;
+   vex_state->guest_r6 = 0;
+   vex_state->guest_r7 = 0;
+   vex_state->guest_r8 = 0;
+   vex_state->guest_r9 = 0;
+   vex_state->guest_r10 = 0;
+   vex_state->guest_r11 = 0;
+   vex_state->guest_r12 = 0;  /* Temporaries */
+   vex_state->guest_r13 = 0;
+   vex_state->guest_r14 = 0;
+   vex_state->guest_r15 = 0;
+   vex_state->guest_r16 = 0;  /* Saved temporaries */
+   vex_state->guest_r17 = 0;
+   vex_state->guest_r18 = 0;
+   vex_state->guest_r19 = 0;
+   vex_state->guest_r20 = 0;
+   vex_state->guest_r21 = 0;
+   vex_state->guest_r22 = 0;
+   vex_state->guest_r23 = 0;
+   vex_state->guest_r24 = 0;  /* Temporaries */
+   vex_state->guest_r25 = 0;
+   vex_state->guest_r26 = 0;  /* Reserved for OS kernel */
+   vex_state->guest_r27 = 0;
+   vex_state->guest_r28 = 0;  /* Global pointer */
+   vex_state->guest_r29 = 0;  /* Stack pointer */
+   vex_state->guest_r30 = 0;  /* Frame pointer */
+   vex_state->guest_r31 = 0;  /* Return address */
+   vex_state->guest_PC = 0;   /* Program counter */
+   vex_state->guest_HI = 0;   /* Multiply and divide register higher result */
+   vex_state->guest_LO = 0;   /* Multiply and divide register lower result */
+
+   /* FPU Registers */
+   vex_state->guest_f0 =  0x7ff800007ff80000ULL;  /* Floting point registers */
+   vex_state->guest_f1 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f2 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f3 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f4 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f5 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f6 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f7 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f8 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f9 =  0x7ff800007ff80000ULL;
+   vex_state->guest_f10 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f11 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f12 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f13 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f14 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f15 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f16 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f17 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f18 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f19 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f20 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f21 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f22 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f23 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f24 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f25 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f26 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f27 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f28 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f29 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f30 = 0x7ff800007ff80000ULL;
+   vex_state->guest_f31 = 0x7ff800007ff80000ULL;
+
+   vex_state->guest_FIR = 0;   /* FP implementation and revision register */
+   vex_state->guest_FCCR = 0;  /* FP condition codes register */
+   vex_state->guest_FEXR = 0;  /* FP exceptions register */
+   vex_state->guest_FENR = 0;  /* FP enables register */
+   vex_state->guest_FCSR = 0;  /* FP control/status register */
+
+   vex_state->guest_ULR = 0;
+
+   /* Various pseudo-regs mandated by Vex or Valgrind. */
+   /* Emulation notes */
+   vex_state->guest_EMNOTE = 0;
+
+   /* For clflush: record start and length of area to invalidate */
+   vex_state->guest_CMSTART = 0;
+   vex_state->guest_CMLEN = 0;
+   vex_state->host_EvC_COUNTER = 0;
+   vex_state->host_EvC_FAILADDR = 0;
+
+   /* Used to record the unredirected guest address at the start of
+      a translation whose start has been redirected. By reading
+      this pseudo-register shortly afterwards, the translation can
+      find out what the corresponding no-redirection address was.
+      Note, this is only set for wrap-style redirects, not for
+      replace-style ones. */
+   vex_state->guest_NRADDR = 0;
+
+   vex_state->guest_COND = 0;
+}
+
+/*-----------------------------------------------------------*/
+/*--- Describing the mips guest state, for the benefit    ---*/
+/*--- of iropt and instrumenters.                         ---*/
+/*-----------------------------------------------------------*/
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this generates significantly slower code).
+
+   We enforce precise exns for guest SP, PC.
+
+   Only SP is needed in mode VexRegUpdSpAtMemAccess.
+*/
+Bool guest_mips32_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   Int sp_min = offsetof(VexGuestMIPS32State, guest_r29);
+   Int sp_max = sp_min + 4 - 1;
+   Int pc_min = offsetof(VexGuestMIPS32State, guest_PC);
+   Int pc_max = pc_min + 4 - 1;
+
+   if (maxoff < sp_min || minoff > sp_max) {
+      /* no overlap with sp */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False;  /* We only need to check stack pointer. */
+   } else {
+      return True;
+   }
+
+   if (maxoff < pc_min || minoff > pc_max) {
+      /* no overlap with pc */
+   } else {
+      return True;
+   }
+
+   /* We appear to need precise updates of R11 in order to get proper
+      stacktraces from non-optimised code. */
+   Int fp_min = offsetof(VexGuestMIPS32State, guest_r30);
+   Int fp_max = fp_min + 4 - 1;
+
+   if (maxoff < fp_min || minoff > fp_max) {
+      /* no overlap with fp */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+Bool guest_mips64_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   Int sp_min = offsetof(VexGuestMIPS64State, guest_r29);
+   Int sp_max = sp_min + 8 - 1;
+   Int pc_min = offsetof(VexGuestMIPS64State, guest_PC);
+   Int pc_max = pc_min + 8 - 1;
+
+   if ( maxoff < sp_min || minoff > sp_max ) {
+      /* no overlap with sp */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False;  /* We only need to check stack pointer. */
+   } else {
+      return True;
+   }
+
+   if ( maxoff < pc_min || minoff > pc_max ) {
+      /* no overlap with pc */
+   } else {
+      return True;
+   }
+
+   Int fp_min = offsetof(VexGuestMIPS64State, guest_r30);
+   Int fp_max = fp_min + 8 - 1;
+
+   if ( maxoff < fp_min || minoff > fp_max ) {
+      /* no overlap with fp */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+VexGuestLayout mips32Guest_layout = {
+   /* Total size of the guest state, in bytes. */
+   .total_sizeB = sizeof(VexGuestMIPS32State),
+   /* Describe the stack pointer. */
+   .offset_SP = offsetof(VexGuestMIPS32State, guest_r29),
+   .sizeof_SP = 4,
+   /* Describe the frame pointer. */
+   .offset_FP = offsetof(VexGuestMIPS32State, guest_r30),
+   .sizeof_FP = 4,
+   /* Describe the instruction pointer. */
+   .offset_IP = offsetof(VexGuestMIPS32State, guest_PC),
+   .sizeof_IP = 4,
+   /* Describe any sections to be regarded by Memcheck as
+      'always-defined'. */
+   .n_alwaysDefd = 8,
+   /* ? :(  */
+   .alwaysDefd = {
+             /* 0 */ ALWAYSDEFD32(guest_r0),
+             /* 1 */ ALWAYSDEFD32(guest_r1),
+             /* 2 */ ALWAYSDEFD32(guest_EMNOTE),
+             /* 3 */ ALWAYSDEFD32(guest_CMSTART),
+             /* 4 */ ALWAYSDEFD32(guest_CMLEN),
+             /* 5 */ ALWAYSDEFD32(guest_r29),
+             /* 6 */ ALWAYSDEFD32(guest_r31),
+             /* 7 */ ALWAYSDEFD32(guest_ULR)
+             }
+};
+
+VexGuestLayout mips64Guest_layout = {
+   /* Total size of the guest state, in bytes. */
+   .total_sizeB = sizeof(VexGuestMIPS64State),
+   /* Describe the stack pointer. */
+   .offset_SP = offsetof(VexGuestMIPS64State, guest_r29),
+   .sizeof_SP = 8,
+   /* Describe the frame pointer. */
+   .offset_FP = offsetof(VexGuestMIPS64State, guest_r30),
+   .sizeof_FP = 8,
+   /* Describe the instruction pointer. */
+   .offset_IP = offsetof(VexGuestMIPS64State, guest_PC),
+   .sizeof_IP = 8,
+   /* Describe any sections to be regarded by Memcheck as
+      'always-defined'. */
+   .n_alwaysDefd = 7,
+   /* ? :(  */
+   .alwaysDefd = {
+                  /* 0 */ ALWAYSDEFD64 (guest_r0),
+                  /* 1 */ ALWAYSDEFD64 (guest_EMNOTE),
+                  /* 2 */ ALWAYSDEFD64 (guest_CMSTART),
+                  /* 3 */ ALWAYSDEFD64 (guest_CMLEN),
+                  /* 4 */ ALWAYSDEFD64 (guest_r29),
+                  /* 5 */ ALWAYSDEFD64 (guest_r31),
+                  /* 6 */ ALWAYSDEFD64 (guest_ULR)
+                  }
+};
+
+#define ASM_VOLATILE_CASE(rd, sel) \
+         case rd: \
+            asm volatile ("mfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \
+            break;
+
+UInt mips32_dirtyhelper_mfc0(UInt rd, UInt sel)
+{
+   UInt x = 0;
+#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
+   switch (sel) {
+      case 0:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 0);
+            ASM_VOLATILE_CASE(1, 0);
+            ASM_VOLATILE_CASE(2, 0);
+            ASM_VOLATILE_CASE(3, 0);
+            ASM_VOLATILE_CASE(4, 0);
+            ASM_VOLATILE_CASE(5, 0);
+            ASM_VOLATILE_CASE(6, 0);
+            ASM_VOLATILE_CASE(7, 0);
+            ASM_VOLATILE_CASE(8, 0);
+            ASM_VOLATILE_CASE(9, 0);
+            ASM_VOLATILE_CASE(10, 0);
+            ASM_VOLATILE_CASE(11, 0);
+            ASM_VOLATILE_CASE(12, 0);
+            ASM_VOLATILE_CASE(13, 0);
+            ASM_VOLATILE_CASE(14, 0);
+            ASM_VOLATILE_CASE(15, 0);
+            ASM_VOLATILE_CASE(16, 0);
+            ASM_VOLATILE_CASE(17, 0);
+            ASM_VOLATILE_CASE(18, 0);
+            ASM_VOLATILE_CASE(19, 0);
+            ASM_VOLATILE_CASE(20, 0);
+            ASM_VOLATILE_CASE(21, 0);
+            ASM_VOLATILE_CASE(22, 0);
+            ASM_VOLATILE_CASE(23, 0);
+            ASM_VOLATILE_CASE(24, 0);
+            ASM_VOLATILE_CASE(25, 0);
+            ASM_VOLATILE_CASE(26, 0);
+            ASM_VOLATILE_CASE(27, 0);
+            ASM_VOLATILE_CASE(28, 0);
+            ASM_VOLATILE_CASE(29, 0);
+            ASM_VOLATILE_CASE(30, 0);
+            ASM_VOLATILE_CASE(31, 0);
+         default:
+            break;
+         }
+         break;
+      case 1:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 1);
+            ASM_VOLATILE_CASE(1, 1);
+            ASM_VOLATILE_CASE(2, 1);
+            ASM_VOLATILE_CASE(3, 1);
+            ASM_VOLATILE_CASE(4, 1);
+            ASM_VOLATILE_CASE(5, 1);
+            ASM_VOLATILE_CASE(6, 1);
+            ASM_VOLATILE_CASE(7, 1);
+            ASM_VOLATILE_CASE(8, 1);
+            ASM_VOLATILE_CASE(9, 1);
+            ASM_VOLATILE_CASE(10, 1);
+            ASM_VOLATILE_CASE(11, 1);
+            ASM_VOLATILE_CASE(12, 1);
+            ASM_VOLATILE_CASE(13, 1);
+            ASM_VOLATILE_CASE(14, 1);
+            ASM_VOLATILE_CASE(15, 1);
+            ASM_VOLATILE_CASE(16, 1);
+            ASM_VOLATILE_CASE(17, 1);
+            ASM_VOLATILE_CASE(18, 1);
+            ASM_VOLATILE_CASE(19, 1);
+            ASM_VOLATILE_CASE(20, 1);
+            ASM_VOLATILE_CASE(21, 1);
+            ASM_VOLATILE_CASE(22, 1);
+            ASM_VOLATILE_CASE(23, 1);
+            ASM_VOLATILE_CASE(24, 1);
+            ASM_VOLATILE_CASE(25, 1);
+            ASM_VOLATILE_CASE(26, 1);
+            ASM_VOLATILE_CASE(27, 1);
+            ASM_VOLATILE_CASE(28, 1);
+            ASM_VOLATILE_CASE(29, 1);
+            ASM_VOLATILE_CASE(30, 1);
+            ASM_VOLATILE_CASE(31, 1);
+         default:
+            break;
+         }
+         break;
+      case 2:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 2);
+            ASM_VOLATILE_CASE(1, 2);
+            ASM_VOLATILE_CASE(2, 2);
+            ASM_VOLATILE_CASE(3, 1);
+            ASM_VOLATILE_CASE(4, 2);
+            ASM_VOLATILE_CASE(5, 2);
+            ASM_VOLATILE_CASE(6, 2);
+            ASM_VOLATILE_CASE(7, 2);
+            ASM_VOLATILE_CASE(8, 2);
+            ASM_VOLATILE_CASE(9, 2);
+            ASM_VOLATILE_CASE(10, 2);
+            ASM_VOLATILE_CASE(11, 2);
+            ASM_VOLATILE_CASE(12, 2);
+            ASM_VOLATILE_CASE(13, 2);
+            ASM_VOLATILE_CASE(14, 2);
+            ASM_VOLATILE_CASE(15, 2);
+            ASM_VOLATILE_CASE(16, 2);
+            ASM_VOLATILE_CASE(17, 2);
+            ASM_VOLATILE_CASE(18, 2);
+            ASM_VOLATILE_CASE(19, 2);
+            ASM_VOLATILE_CASE(20, 2);
+            ASM_VOLATILE_CASE(21, 2);
+            ASM_VOLATILE_CASE(22, 2);
+            ASM_VOLATILE_CASE(23, 2);
+            ASM_VOLATILE_CASE(24, 2);
+            ASM_VOLATILE_CASE(25, 2);
+            ASM_VOLATILE_CASE(26, 2);
+            ASM_VOLATILE_CASE(27, 2);
+            ASM_VOLATILE_CASE(28, 2);
+            ASM_VOLATILE_CASE(29, 2);
+            ASM_VOLATILE_CASE(30, 2);
+            ASM_VOLATILE_CASE(31, 2);
+         default:
+            break;
+         }
+         break;
+      case 3:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 3);
+            ASM_VOLATILE_CASE(1, 3);
+            ASM_VOLATILE_CASE(2, 3);
+            ASM_VOLATILE_CASE(3, 3);
+            ASM_VOLATILE_CASE(4, 3);
+            ASM_VOLATILE_CASE(5, 3);
+            ASM_VOLATILE_CASE(6, 3);
+            ASM_VOLATILE_CASE(7, 3);
+            ASM_VOLATILE_CASE(8, 3);
+            ASM_VOLATILE_CASE(9, 3);
+            ASM_VOLATILE_CASE(10, 3);
+            ASM_VOLATILE_CASE(11, 3);
+            ASM_VOLATILE_CASE(12, 3);
+            ASM_VOLATILE_CASE(13, 3);
+            ASM_VOLATILE_CASE(14, 3);
+            ASM_VOLATILE_CASE(15, 3);
+            ASM_VOLATILE_CASE(16, 3);
+            ASM_VOLATILE_CASE(17, 3);
+            ASM_VOLATILE_CASE(18, 3);
+            ASM_VOLATILE_CASE(19, 3);
+            ASM_VOLATILE_CASE(20, 3);
+            ASM_VOLATILE_CASE(21, 3);
+            ASM_VOLATILE_CASE(22, 3);
+            ASM_VOLATILE_CASE(23, 3);
+            ASM_VOLATILE_CASE(24, 3);
+            ASM_VOLATILE_CASE(25, 3);
+            ASM_VOLATILE_CASE(26, 3);
+            ASM_VOLATILE_CASE(27, 3);
+            ASM_VOLATILE_CASE(28, 3);
+            ASM_VOLATILE_CASE(29, 3);
+            ASM_VOLATILE_CASE(30, 3);
+            ASM_VOLATILE_CASE(31, 3);
+         default:
+            break;
+         }
+         break;
+      case 4:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 4);
+            ASM_VOLATILE_CASE(1, 4);
+            ASM_VOLATILE_CASE(2, 4);
+            ASM_VOLATILE_CASE(3, 4);
+            ASM_VOLATILE_CASE(4, 4);
+            ASM_VOLATILE_CASE(5, 4);
+            ASM_VOLATILE_CASE(6, 4);
+            ASM_VOLATILE_CASE(7, 4);
+            ASM_VOLATILE_CASE(8, 4);
+            ASM_VOLATILE_CASE(9, 4);
+            ASM_VOLATILE_CASE(10, 4);
+            ASM_VOLATILE_CASE(11, 4);
+            ASM_VOLATILE_CASE(12, 4);
+            ASM_VOLATILE_CASE(13, 4);
+            ASM_VOLATILE_CASE(14, 4);
+            ASM_VOLATILE_CASE(15, 4);
+            ASM_VOLATILE_CASE(16, 4);
+            ASM_VOLATILE_CASE(17, 4);
+            ASM_VOLATILE_CASE(18, 4);
+            ASM_VOLATILE_CASE(19, 4);
+            ASM_VOLATILE_CASE(20, 4);
+            ASM_VOLATILE_CASE(21, 4);
+            ASM_VOLATILE_CASE(22, 4);
+            ASM_VOLATILE_CASE(23, 4);
+            ASM_VOLATILE_CASE(24, 4);
+            ASM_VOLATILE_CASE(25, 4);
+            ASM_VOLATILE_CASE(26, 4);
+            ASM_VOLATILE_CASE(27, 4);
+            ASM_VOLATILE_CASE(28, 4);
+            ASM_VOLATILE_CASE(29, 4);
+            ASM_VOLATILE_CASE(30, 4);
+            ASM_VOLATILE_CASE(31, 4);
+         default:
+            break;
+         }
+         break;
+      case 5:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 5);
+            ASM_VOLATILE_CASE(1, 5);
+            ASM_VOLATILE_CASE(2, 5);
+            ASM_VOLATILE_CASE(3, 5);
+            ASM_VOLATILE_CASE(4, 5);
+            ASM_VOLATILE_CASE(5, 5);
+            ASM_VOLATILE_CASE(6, 5);
+            ASM_VOLATILE_CASE(7, 5);
+            ASM_VOLATILE_CASE(8, 5);
+            ASM_VOLATILE_CASE(9, 5);
+            ASM_VOLATILE_CASE(10, 5);
+            ASM_VOLATILE_CASE(11, 5);
+            ASM_VOLATILE_CASE(12, 5);
+            ASM_VOLATILE_CASE(13, 5);
+            ASM_VOLATILE_CASE(14, 5);
+            ASM_VOLATILE_CASE(15, 5);
+            ASM_VOLATILE_CASE(16, 5);
+            ASM_VOLATILE_CASE(17, 5);
+            ASM_VOLATILE_CASE(18, 5);
+            ASM_VOLATILE_CASE(19, 5);
+            ASM_VOLATILE_CASE(20, 5);
+            ASM_VOLATILE_CASE(21, 5);
+            ASM_VOLATILE_CASE(22, 5);
+            ASM_VOLATILE_CASE(23, 5);
+            ASM_VOLATILE_CASE(24, 5);
+            ASM_VOLATILE_CASE(25, 5);
+            ASM_VOLATILE_CASE(26, 5);
+            ASM_VOLATILE_CASE(27, 5);
+            ASM_VOLATILE_CASE(28, 5);
+            ASM_VOLATILE_CASE(29, 5);
+            ASM_VOLATILE_CASE(30, 5);
+            ASM_VOLATILE_CASE(31, 5);
+         default:
+            break;
+         }
+         break;
+      case 6:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 6);
+            ASM_VOLATILE_CASE(1, 6);
+            ASM_VOLATILE_CASE(2, 6);
+            ASM_VOLATILE_CASE(3, 6);
+            ASM_VOLATILE_CASE(4, 6);
+            ASM_VOLATILE_CASE(5, 6);
+            ASM_VOLATILE_CASE(6, 6);
+            ASM_VOLATILE_CASE(7, 6);
+            ASM_VOLATILE_CASE(8, 6);
+            ASM_VOLATILE_CASE(9, 6);
+            ASM_VOLATILE_CASE(10, 6);
+            ASM_VOLATILE_CASE(11, 6);
+            ASM_VOLATILE_CASE(12, 6);
+            ASM_VOLATILE_CASE(13, 6);
+            ASM_VOLATILE_CASE(14, 6);
+            ASM_VOLATILE_CASE(15, 6);
+            ASM_VOLATILE_CASE(16, 6);
+            ASM_VOLATILE_CASE(17, 6);
+            ASM_VOLATILE_CASE(18, 6);
+            ASM_VOLATILE_CASE(19, 6);
+            ASM_VOLATILE_CASE(20, 6);
+            ASM_VOLATILE_CASE(21, 6);
+            ASM_VOLATILE_CASE(22, 6);
+            ASM_VOLATILE_CASE(23, 6);
+            ASM_VOLATILE_CASE(24, 6);
+            ASM_VOLATILE_CASE(25, 6);
+            ASM_VOLATILE_CASE(26, 6);
+            ASM_VOLATILE_CASE(27, 6);
+            ASM_VOLATILE_CASE(28, 6);
+            ASM_VOLATILE_CASE(29, 6);
+            ASM_VOLATILE_CASE(30, 6);
+            ASM_VOLATILE_CASE(31, 6);
+         default:
+            break;
+         }
+         break;
+      case 7:
+         /* __asm__("mfc0 %0, $1, 0" :"=r" (x)); */
+         switch (rd) {
+            ASM_VOLATILE_CASE(0, 7);
+            ASM_VOLATILE_CASE(1, 7);
+            ASM_VOLATILE_CASE(2, 7);
+            ASM_VOLATILE_CASE(3, 7);
+            ASM_VOLATILE_CASE(4, 7);
+            ASM_VOLATILE_CASE(5, 7);
+            ASM_VOLATILE_CASE(6, 7);
+            ASM_VOLATILE_CASE(7, 7);
+            ASM_VOLATILE_CASE(8, 7);
+            ASM_VOLATILE_CASE(9, 7);
+            ASM_VOLATILE_CASE(10, 7);
+            ASM_VOLATILE_CASE(11, 7);
+            ASM_VOLATILE_CASE(12, 7);
+            ASM_VOLATILE_CASE(13, 7);
+            ASM_VOLATILE_CASE(14, 7);
+            ASM_VOLATILE_CASE(15, 7);
+            ASM_VOLATILE_CASE(16, 7);
+            ASM_VOLATILE_CASE(17, 7);
+            ASM_VOLATILE_CASE(18, 7);
+            ASM_VOLATILE_CASE(19, 7);
+            ASM_VOLATILE_CASE(20, 7);
+            ASM_VOLATILE_CASE(21, 7);
+            ASM_VOLATILE_CASE(22, 7);
+            ASM_VOLATILE_CASE(23, 7);
+            ASM_VOLATILE_CASE(24, 7);
+            ASM_VOLATILE_CASE(25, 7);
+            ASM_VOLATILE_CASE(26, 7);
+            ASM_VOLATILE_CASE(27, 7);
+            ASM_VOLATILE_CASE(28, 7);
+            ASM_VOLATILE_CASE(29, 7);
+            ASM_VOLATILE_CASE(30, 7);
+            ASM_VOLATILE_CASE(31, 7);
+         default:
+            break;
+         }
+      break;
+
+   default:
+      break;
+   }
+#endif
+   return x;
+}
+
+#undef ASM_VOLATILE_CASE
+
+#define ASM_VOLATILE_CASE(rd, sel) \
+         case rd: \
+            asm volatile ("dmfc0 %0, $" #rd ", "#sel"\n\t" :"=r" (x) ); \
+            break;
+
+ULong mips64_dirtyhelper_dmfc0 ( UInt rd, UInt sel )
+{
+   ULong x = 0;
+#if defined(VGP_mips64_linux)
+   switch (sel) {
+     case 0:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 0);
+           ASM_VOLATILE_CASE (1, 0);
+           ASM_VOLATILE_CASE (2, 0);
+           ASM_VOLATILE_CASE (3, 0);
+           ASM_VOLATILE_CASE (4, 0);
+           ASM_VOLATILE_CASE (5, 0);
+           ASM_VOLATILE_CASE (6, 0);
+           ASM_VOLATILE_CASE (7, 0);
+           ASM_VOLATILE_CASE (8, 0);
+           ASM_VOLATILE_CASE (9, 0);
+           ASM_VOLATILE_CASE (10, 0);
+           ASM_VOLATILE_CASE (11, 0);
+           ASM_VOLATILE_CASE (12, 0);
+           ASM_VOLATILE_CASE (13, 0);
+           ASM_VOLATILE_CASE (14, 0);
+           ASM_VOLATILE_CASE (15, 0);
+           ASM_VOLATILE_CASE (16, 0);
+           ASM_VOLATILE_CASE (17, 0);
+           ASM_VOLATILE_CASE (18, 0);
+           ASM_VOLATILE_CASE (19, 0);
+           ASM_VOLATILE_CASE (20, 0);
+           ASM_VOLATILE_CASE (21, 0);
+           ASM_VOLATILE_CASE (22, 0);
+           ASM_VOLATILE_CASE (23, 0);
+           ASM_VOLATILE_CASE (24, 0);
+           ASM_VOLATILE_CASE (25, 0);
+           ASM_VOLATILE_CASE (26, 0);
+           ASM_VOLATILE_CASE (27, 0);
+           ASM_VOLATILE_CASE (28, 0);
+           ASM_VOLATILE_CASE (29, 0);
+           ASM_VOLATILE_CASE (30, 0);
+           ASM_VOLATILE_CASE (31, 0);
+         default:
+           break;
+        }
+        break;
+     case 1:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 1);
+           ASM_VOLATILE_CASE (1, 1);
+           ASM_VOLATILE_CASE (2, 1);
+           ASM_VOLATILE_CASE (3, 1);
+           ASM_VOLATILE_CASE (4, 1);
+           ASM_VOLATILE_CASE (5, 1);
+           ASM_VOLATILE_CASE (6, 1);
+           ASM_VOLATILE_CASE (7, 1);
+           ASM_VOLATILE_CASE (8, 1);
+           ASM_VOLATILE_CASE (9, 1);
+           ASM_VOLATILE_CASE (10, 1);
+           ASM_VOLATILE_CASE (11, 1);
+           ASM_VOLATILE_CASE (12, 1);
+           ASM_VOLATILE_CASE (13, 1);
+           ASM_VOLATILE_CASE (14, 1);
+           ASM_VOLATILE_CASE (15, 1);
+           ASM_VOLATILE_CASE (16, 1);
+           ASM_VOLATILE_CASE (17, 1);
+           ASM_VOLATILE_CASE (18, 1);
+           ASM_VOLATILE_CASE (19, 1);
+           ASM_VOLATILE_CASE (20, 1);
+           ASM_VOLATILE_CASE (21, 1);
+           ASM_VOLATILE_CASE (22, 1);
+           ASM_VOLATILE_CASE (23, 1);
+           ASM_VOLATILE_CASE (24, 1);
+           ASM_VOLATILE_CASE (25, 1);
+           ASM_VOLATILE_CASE (26, 1);
+           ASM_VOLATILE_CASE (27, 1);
+           ASM_VOLATILE_CASE (28, 1);
+           ASM_VOLATILE_CASE (29, 1);
+           ASM_VOLATILE_CASE (30, 1);
+           ASM_VOLATILE_CASE (31, 1);
+        default:
+           break;
+        }
+        break;
+     case 2:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 2);
+           ASM_VOLATILE_CASE (1, 2);
+           ASM_VOLATILE_CASE (2, 2);
+           ASM_VOLATILE_CASE (3, 1);
+           ASM_VOLATILE_CASE (4, 2);
+           ASM_VOLATILE_CASE (5, 2);
+           ASM_VOLATILE_CASE (6, 2);
+           ASM_VOLATILE_CASE (7, 2);
+           ASM_VOLATILE_CASE (8, 2);
+           ASM_VOLATILE_CASE (9, 2);
+           ASM_VOLATILE_CASE (10, 2);
+           ASM_VOLATILE_CASE (11, 2);
+           ASM_VOLATILE_CASE (12, 2);
+           ASM_VOLATILE_CASE (13, 2);
+           ASM_VOLATILE_CASE (14, 2);
+           ASM_VOLATILE_CASE (15, 2);
+           ASM_VOLATILE_CASE (16, 2);
+           ASM_VOLATILE_CASE (17, 2);
+           ASM_VOLATILE_CASE (18, 2);
+           ASM_VOLATILE_CASE (19, 2);
+           ASM_VOLATILE_CASE (20, 2);
+           ASM_VOLATILE_CASE (21, 2);
+           ASM_VOLATILE_CASE (22, 2);
+           ASM_VOLATILE_CASE (23, 2);
+           ASM_VOLATILE_CASE (24, 2);
+           ASM_VOLATILE_CASE (25, 2);
+           ASM_VOLATILE_CASE (26, 2);
+           ASM_VOLATILE_CASE (27, 2);
+           ASM_VOLATILE_CASE (28, 2);
+           ASM_VOLATILE_CASE (29, 2);
+           ASM_VOLATILE_CASE (30, 2);
+           ASM_VOLATILE_CASE (31, 2);
+         default:
+           break;
+         }
+         break;
+     case 3:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 3);
+           ASM_VOLATILE_CASE (1, 3);
+           ASM_VOLATILE_CASE (2, 3);
+           ASM_VOLATILE_CASE (3, 3);
+           ASM_VOLATILE_CASE (4, 3);
+           ASM_VOLATILE_CASE (5, 3);
+           ASM_VOLATILE_CASE (6, 3);
+           ASM_VOLATILE_CASE (7, 3);
+           ASM_VOLATILE_CASE (8, 3);
+           ASM_VOLATILE_CASE (9, 3);
+           ASM_VOLATILE_CASE (10, 3);
+           ASM_VOLATILE_CASE (11, 3);
+           ASM_VOLATILE_CASE (12, 3);
+           ASM_VOLATILE_CASE (13, 3);
+           ASM_VOLATILE_CASE (14, 3);
+           ASM_VOLATILE_CASE (15, 3);
+           ASM_VOLATILE_CASE (16, 3);
+           ASM_VOLATILE_CASE (17, 3);
+           ASM_VOLATILE_CASE (18, 3);
+           ASM_VOLATILE_CASE (19, 3);
+           ASM_VOLATILE_CASE (20, 3);
+           ASM_VOLATILE_CASE (21, 3);
+           ASM_VOLATILE_CASE (22, 3);
+           ASM_VOLATILE_CASE (23, 3);
+           ASM_VOLATILE_CASE (24, 3);
+           ASM_VOLATILE_CASE (25, 3);
+           ASM_VOLATILE_CASE (26, 3);
+           ASM_VOLATILE_CASE (27, 3);
+           ASM_VOLATILE_CASE (28, 3);
+           ASM_VOLATILE_CASE (29, 3);
+           ASM_VOLATILE_CASE (30, 3);
+           ASM_VOLATILE_CASE (31, 3);
+        default:
+           break;
+        }
+        break;
+     case 4:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 4);
+           ASM_VOLATILE_CASE (1, 4);
+           ASM_VOLATILE_CASE (2, 4);
+           ASM_VOLATILE_CASE (3, 4);
+           ASM_VOLATILE_CASE (4, 4);
+           ASM_VOLATILE_CASE (5, 4);
+           ASM_VOLATILE_CASE (6, 4);
+           ASM_VOLATILE_CASE (7, 4);
+           ASM_VOLATILE_CASE (8, 4);
+           ASM_VOLATILE_CASE (9, 4);
+           ASM_VOLATILE_CASE (10, 4);
+           ASM_VOLATILE_CASE (11, 4);
+           ASM_VOLATILE_CASE (12, 4);
+           ASM_VOLATILE_CASE (13, 4);
+           ASM_VOLATILE_CASE (14, 4);
+           ASM_VOLATILE_CASE (15, 4);
+           ASM_VOLATILE_CASE (16, 4);
+           ASM_VOLATILE_CASE (17, 4);
+           ASM_VOLATILE_CASE (18, 4);
+           ASM_VOLATILE_CASE (19, 4);
+           ASM_VOLATILE_CASE (20, 4);
+           ASM_VOLATILE_CASE (21, 4);
+           ASM_VOLATILE_CASE (22, 4);
+           ASM_VOLATILE_CASE (23, 4);
+           ASM_VOLATILE_CASE (24, 4);
+           ASM_VOLATILE_CASE (25, 4);
+           ASM_VOLATILE_CASE (26, 4);
+           ASM_VOLATILE_CASE (27, 4);
+           ASM_VOLATILE_CASE (28, 4);
+           ASM_VOLATILE_CASE (29, 4);
+           ASM_VOLATILE_CASE (30, 4);
+           ASM_VOLATILE_CASE (31, 4);
+           default:
+              break;
+           }
+        break;
+     case 5:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 5);
+           ASM_VOLATILE_CASE (1, 5);
+           ASM_VOLATILE_CASE (2, 5);
+           ASM_VOLATILE_CASE (3, 5);
+           ASM_VOLATILE_CASE (4, 5);
+           ASM_VOLATILE_CASE (5, 5);
+           ASM_VOLATILE_CASE (6, 5);
+           ASM_VOLATILE_CASE (7, 5);
+           ASM_VOLATILE_CASE (8, 5);
+           ASM_VOLATILE_CASE (9, 5);
+           ASM_VOLATILE_CASE (10, 5);
+           ASM_VOLATILE_CASE (11, 5);
+           ASM_VOLATILE_CASE (12, 5);
+           ASM_VOLATILE_CASE (13, 5);
+           ASM_VOLATILE_CASE (14, 5);
+           ASM_VOLATILE_CASE (15, 5);
+           ASM_VOLATILE_CASE (16, 5);
+           ASM_VOLATILE_CASE (17, 5);
+           ASM_VOLATILE_CASE (18, 5);
+           ASM_VOLATILE_CASE (19, 5);
+           ASM_VOLATILE_CASE (20, 5);
+           ASM_VOLATILE_CASE (21, 5);
+           ASM_VOLATILE_CASE (22, 5);
+           ASM_VOLATILE_CASE (23, 5);
+           ASM_VOLATILE_CASE (24, 5);
+           ASM_VOLATILE_CASE (25, 5);
+           ASM_VOLATILE_CASE (26, 5);
+           ASM_VOLATILE_CASE (27, 5);
+           ASM_VOLATILE_CASE (28, 5);
+           ASM_VOLATILE_CASE (29, 5);
+           ASM_VOLATILE_CASE (30, 5);
+           ASM_VOLATILE_CASE (31, 5);
+           default:
+              break;
+        }
+        break;
+     case 6:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 6);
+           ASM_VOLATILE_CASE (1, 6);
+           ASM_VOLATILE_CASE (2, 6);
+           ASM_VOLATILE_CASE (3, 6);
+           ASM_VOLATILE_CASE (4, 6);
+           ASM_VOLATILE_CASE (5, 6);
+           ASM_VOLATILE_CASE (6, 6);
+           ASM_VOLATILE_CASE (7, 6);
+           ASM_VOLATILE_CASE (8, 6);
+           ASM_VOLATILE_CASE (9, 6);
+           ASM_VOLATILE_CASE (10, 6);
+           ASM_VOLATILE_CASE (11, 6);
+           ASM_VOLATILE_CASE (12, 6);
+           ASM_VOLATILE_CASE (13, 6);
+           ASM_VOLATILE_CASE (14, 6);
+           ASM_VOLATILE_CASE (15, 6);
+           ASM_VOLATILE_CASE (16, 6);
+           ASM_VOLATILE_CASE (17, 6);
+           ASM_VOLATILE_CASE (18, 6);
+           ASM_VOLATILE_CASE (19, 6);
+           ASM_VOLATILE_CASE (20, 6);
+           ASM_VOLATILE_CASE (21, 6);
+           ASM_VOLATILE_CASE (22, 6);
+           ASM_VOLATILE_CASE (23, 6);
+           ASM_VOLATILE_CASE (24, 6);
+           ASM_VOLATILE_CASE (25, 6);
+           ASM_VOLATILE_CASE (26, 6);
+           ASM_VOLATILE_CASE (27, 6);
+           ASM_VOLATILE_CASE (28, 6);
+           ASM_VOLATILE_CASE (29, 6);
+           ASM_VOLATILE_CASE (30, 6);
+           ASM_VOLATILE_CASE (31, 6);
+        default:
+           break;
+        }
+        break;
+     case 7:
+        /* __asm__("dmfc0 %0, $1, 0" :"=r" (x)); */
+        switch (rd) {
+           ASM_VOLATILE_CASE (0, 7);
+           ASM_VOLATILE_CASE (1, 7);
+           ASM_VOLATILE_CASE (2, 7);
+           ASM_VOLATILE_CASE (3, 7);
+           ASM_VOLATILE_CASE (4, 7);
+           ASM_VOLATILE_CASE (5, 7);
+           ASM_VOLATILE_CASE (6, 7);
+           ASM_VOLATILE_CASE (7, 7);
+           ASM_VOLATILE_CASE (8, 7);
+           ASM_VOLATILE_CASE (9, 7);
+           ASM_VOLATILE_CASE (10, 7);
+           ASM_VOLATILE_CASE (11, 7);
+           ASM_VOLATILE_CASE (12, 7);
+           ASM_VOLATILE_CASE (13, 7);
+           ASM_VOLATILE_CASE (14, 7);
+           ASM_VOLATILE_CASE (15, 7);
+           ASM_VOLATILE_CASE (16, 7);
+           ASM_VOLATILE_CASE (17, 7);
+           ASM_VOLATILE_CASE (18, 7);
+           ASM_VOLATILE_CASE (19, 7);
+           ASM_VOLATILE_CASE (20, 7);
+           ASM_VOLATILE_CASE (21, 7);
+           ASM_VOLATILE_CASE (22, 7);
+           ASM_VOLATILE_CASE (23, 7);
+           ASM_VOLATILE_CASE (24, 7);
+           ASM_VOLATILE_CASE (25, 7);
+           ASM_VOLATILE_CASE (26, 7);
+           ASM_VOLATILE_CASE (27, 7);
+           ASM_VOLATILE_CASE (28, 7);
+           ASM_VOLATILE_CASE (29, 7);
+           ASM_VOLATILE_CASE (30, 7);
+           ASM_VOLATILE_CASE (31, 7);
+         default:
+           break;
+         }
+       break;
+
+     default:
+       break;
+     }
+#endif
+   return x;
+}
+
+#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
+UInt mips32_dirtyhelper_rdhwr ( UInt rt, UInt rd )
+{
+   UInt x = 0;
+   switch (rd) {
+      case 1:  /* x = SYNCI_StepSize() */
+         __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) );
+         break;
+
+      default:
+         vassert(0);
+         break;
+   }
+   return x;
+}
+
+ULong mips64_dirtyhelper_rdhwr ( ULong rt, ULong rd )
+{
+   ULong x = 0;
+   switch (rd) {
+      case 1:  /* x = SYNCI_StepSize() */
+         __asm__ __volatile__("rdhwr %0, $1\n\t" : "=r" (x) );
+         break;
+
+      default:
+         vassert(0);
+         break;
+   }
+   return x;
+}
+#endif
+
+#define ASM_VOLATILE_UNARY32(inst)                                  \
+   __asm__ volatile("cfc1  $t0,  $31"   "\n\t"                      \
+                    "ctc1  %2,   $31"   "\n\t"                      \
+                    "mtc1  %1,   $f20"  "\n\t"                      \
+                    #inst" $f20, $f20"  "\n\t"                      \
+                    "cfc1  %0,   $31"   "\n\t"                      \
+                    "ctc1  $t0,  $31"   "\n\t"                      \
+                    : "=r" (ret)                                    \
+                    : "r" (loFsVal), "r" (fcsr)                     \
+                    : "t0", "$f20"                                  \
+                   );
+
+#define ASM_VOLATILE_UNARY32_DOUBLE(inst)                           \
+   __asm__ volatile("cfc1  $t0,  $31"   "\n\t"                      \
+                    "ctc1  %3,   $31"   "\n\t"                      \
+                    "mtc1  %1,   $f20"  "\n\t"                      \
+                    "mtc1  %2,   $f21"  "\n\t"                      \
+                    #inst" $f20, $f20"  "\n\t"                      \
+                    "cfc1  %0,   $31"   "\n\t"                      \
+                    "ctc1  $t0,  $31"   "\n\t"                      \
+                    : "=r" (ret)                                    \
+                    : "r" (loFsVal), "r" (hiFsVal), "r" (fcsr)      \
+                    : "t0", "$f20", "$f21"                          \
+                   );
+
+#define ASM_VOLATILE_UNARY64(inst)                                  \
+   __asm__ volatile("cfc1  $t0,  $31"    "\n\t"                     \
+                    "ctc1  %2,   $31"    "\n\t"                     \
+                    "ldc1  $f24, 0(%1)"  "\n\t"                     \
+                    #inst" $f24, $f24"   "\n\t"                     \
+                    "cfc1  %0,   $31"    "\n\t"                     \
+                    "ctc1  $t0,  $31"    "\n\t"                     \
+                    : "=r" (ret)                                    \
+                    : "r" (&(addr[fs])), "r" (fcsr)                 \
+                    : "t0", "$f24"                                  \
+                   );
+
+#define ASM_VOLATILE_BINARY32(inst)                                 \
+   __asm__ volatile("cfc1  $t0,  $31"         "\n\t"                \
+                    "ctc1  %3,   $31"         "\n\t"                \
+                    "mtc1  %1,   $f20"        "\n\t"                \
+                    "mtc1  %2,   $f22"        "\n\t"                \
+                    #inst" $f20, $f20, $f22"  "\n\t"                \
+                    "cfc1  %0,   $31"         "\n\t"                \
+                    "ctc1  $t0,  $31"         "\n\t"                \
+                    : "=r" (ret)                                    \
+                    : "r" (loFsVal), "r" (loFtVal), "r" (fcsr)      \
+                    : "t0", "$f20", "$f22"                          \
+                   );
+
+#define ASM_VOLATILE_BINARY32_DOUBLE(inst)                          \
+   __asm__ volatile("cfc1  $t0,  $31"         "\n\t"                \
+                    "ctc1  %5,   $31"         "\n\t"                \
+                    "mtc1  %1,   $f20"        "\n\t"                \
+                    "mtc1  %2,   $f21"        "\n\t"                \
+                    "mtc1  %3,   $f22"        "\n\t"                \
+                    "mtc1  %4,   $f23"        "\n\t"                \
+                    #inst" $f20, $f20, $f22"  "\n\t"                \
+                    "cfc1  %0,   $31"         "\n\t"                \
+                    "ctc1  $t0,  $31"         "\n\t"                \
+                    : "=r" (ret)                                    \
+                    : "r" (loFsVal), "r" (hiFsVal), "r" (loFtVal),  \
+                      "r" (hiFtVal), "r" (fcsr)                     \
+                    : "t0", "$f20", "$f21", "$f22", "$f23"          \
+                   );
+
+#define ASM_VOLATILE_BINARY64(inst)                                     \
+   __asm__ volatile("cfc1  $t0,  $31"         "\n\t"                    \
+                    "ctc1  %3,   $31"         "\n\t"                    \
+                    "ldc1  $f24, 0(%1)"       "\n\t"                    \
+                    "ldc1  $f26, 0(%2)"       "\n\t"                    \
+                    #inst" $f24, $f24, $f26"  "\n\t"                    \
+                    "cfc1  %0,   $31"         "\n\t"                    \
+                    "ctc1  $t0,  $31"         "\n\t"                    \
+                    : "=r" (ret)                                        \
+                    : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr)  \
+                    : "t0", "$f24", "$f26"                              \
+                   );
+
+/* TODO: Add cases for all fpu instructions because all fpu instructions are
+         change the value of FCSR register. */
+extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft,
+                                                   flt_op inst )
+{
+   UInt ret = 0;
+#if defined(__mips__)
+   VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
+   UInt loFsVal, hiFsVal, loFtVal, hiFtVal;
+#if defined (_MIPSEL)
+   ULong *addr = (ULong *)&guest_state->guest_f0;
+   loFsVal     = (UInt)addr[fs];
+   hiFsVal     = (UInt)addr[fs+1];
+   loFtVal     = (UInt)addr[ft];
+   hiFtVal     = (UInt)addr[ft+1];
+#elif defined (_MIPSEB)
+   UInt *addr = (UInt *)&guest_state->guest_f0;
+   loFsVal    = (UInt)addr[fs*2];
+   hiFsVal    = (UInt)addr[fs*2+2];
+   loFtVal    = (UInt)addr[ft*2];
+   hiFtVal    = (UInt)addr[ft*2+2];
+#endif
+   UInt fcsr     = guest_state->guest_FCSR;
+   switch (inst) {
+      case ROUNDWD:
+         ASM_VOLATILE_UNARY32_DOUBLE(round.w.d)
+         break;
+      case FLOORWS:
+         ASM_VOLATILE_UNARY32(floor.w.s)
+         break;
+      case FLOORWD:
+         ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d)
+         break;
+      case TRUNCWS:
+         ASM_VOLATILE_UNARY32(trunc.w.s)
+         break;
+      case TRUNCWD:
+         ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d)
+         break;
+      case CEILWS:
+         ASM_VOLATILE_UNARY32(ceil.w.s)
+         break;
+      case CEILWD:
+         ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d)
+         break;
+      case CVTDS:
+         ASM_VOLATILE_UNARY32(cvt.d.s)
+         break;
+      case CVTDW:
+         ASM_VOLATILE_UNARY32(cvt.d.w)
+         break;
+      case CVTSW:
+         ASM_VOLATILE_UNARY32(cvt.s.w)
+         break;
+      case CVTSD:
+         ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d)
+         break;
+      case CVTWS:
+         ASM_VOLATILE_UNARY32(cvt.w.s)
+         break;
+      case CVTWD:
+         ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d)
+         break;
+      case ROUNDWS:
+         ASM_VOLATILE_UNARY32(round.w.s)
+         break;
+#if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \
+    || (__mips == 64)
+      case CEILLS:
+         ASM_VOLATILE_UNARY32(ceil.l.s)
+         break;
+      case CEILLD:
+         ASM_VOLATILE_UNARY32_DOUBLE(ceil.l.d)
+         break;
+      case CVTDL:
+         ASM_VOLATILE_UNARY32_DOUBLE(cvt.d.l)
+         break;
+      case CVTLS:
+         ASM_VOLATILE_UNARY32(cvt.l.s)
+         break;
+      case CVTLD:
+         ASM_VOLATILE_UNARY32_DOUBLE(cvt.l.d)
+         break;
+      case CVTSL:
+         ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.l)
+         break;
+      case FLOORLS:
+         ASM_VOLATILE_UNARY32(floor.l.s)
+         break;
+      case FLOORLD:
+         ASM_VOLATILE_UNARY32_DOUBLE(floor.l.d)
+         break;
+      case ROUNDLS:
+         ASM_VOLATILE_UNARY32(round.l.s)
+         break;
+      case ROUNDLD:
+         ASM_VOLATILE_UNARY32_DOUBLE(round.l.d)
+         break;
+      case TRUNCLS:
+         ASM_VOLATILE_UNARY32(trunc.l.s)
+         break;
+      case TRUNCLD:
+         ASM_VOLATILE_UNARY32_DOUBLE(trunc.l.d)
+         break;
+#endif
+      case ADDS:
+          ASM_VOLATILE_BINARY32(add.s)
+          break;
+      case ADDD:
+          ASM_VOLATILE_BINARY32_DOUBLE(add.d)
+          break;
+      case SUBS:
+          ASM_VOLATILE_BINARY32(sub.s)
+          break;
+      case SUBD:
+          ASM_VOLATILE_BINARY32_DOUBLE(sub.d)
+          break;
+      case DIVS:
+          ASM_VOLATILE_BINARY32(div.s)
+          break;
+      default:
+         vassert(0);
+         break;
+   }
+#endif
+   return ret;
+}
+
+/* TODO: Add cases for all fpu instructions because all fpu instructions are
+         change the value of FCSR register. */
+extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft,
+                                                   flt_op inst )
+{
+   UInt ret = 0;
+#if defined(__mips__)
+#if defined(VGA_mips32)
+   VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
+#else
+   VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
+#endif
+   ULong *addr = (ULong *)&guest_state->guest_f0;
+   UInt fcsr   = guest_state->guest_FCSR;
+   switch (inst) {
+      case ROUNDWD:
+         ASM_VOLATILE_UNARY64(round.w.d)
+         break;
+      case FLOORWS:
+         ASM_VOLATILE_UNARY64(floor.w.s)
+         break;
+      case FLOORWD:
+         ASM_VOLATILE_UNARY64(floor.w.d)
+         break;
+      case TRUNCWS:
+         ASM_VOLATILE_UNARY64(trunc.w.s)
+         break;
+      case TRUNCWD:
+         ASM_VOLATILE_UNARY64(trunc.w.d)
+         break;
+      case CEILWS:
+         ASM_VOLATILE_UNARY64(ceil.w.s)
+         break;
+      case CEILWD:
+         ASM_VOLATILE_UNARY64(ceil.w.d)
+         break;
+      case CVTDS:
+         ASM_VOLATILE_UNARY64(cvt.d.s)
+         break;
+      case CVTDW:
+         ASM_VOLATILE_UNARY64(cvt.d.w)
+         break;
+      case CVTSW:
+         ASM_VOLATILE_UNARY64(cvt.s.w)
+         break;
+      case CVTSD:
+         ASM_VOLATILE_UNARY64(cvt.s.d)
+         break;
+      case CVTWS:
+         ASM_VOLATILE_UNARY64(cvt.w.s)
+         break;
+      case CVTWD:
+         ASM_VOLATILE_UNARY64(cvt.w.d)
+         break;
+      case ROUNDWS:
+         ASM_VOLATILE_UNARY64(round.w.s)
+         break;
+#if ((__mips == 32) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) \
+    || (__mips == 64)
+      case CEILLS:
+         ASM_VOLATILE_UNARY64(ceil.l.s)
+         break;
+      case CEILLD:
+         ASM_VOLATILE_UNARY64(ceil.l.d)
+         break;
+      case CVTDL:
+         ASM_VOLATILE_UNARY64(cvt.d.l)
+         break;
+      case CVTLS:
+         ASM_VOLATILE_UNARY64(cvt.l.s)
+         break;
+      case CVTLD:
+         ASM_VOLATILE_UNARY64(cvt.l.d)
+         break;
+      case CVTSL:
+         ASM_VOLATILE_UNARY64(cvt.s.l)
+         break;
+      case FLOORLS:
+         ASM_VOLATILE_UNARY64(floor.l.s)
+         break;
+      case FLOORLD:
+         ASM_VOLATILE_UNARY64(floor.l.d)
+         break;
+      case ROUNDLS:
+         ASM_VOLATILE_UNARY64(round.l.s)
+         break;
+      case ROUNDLD:
+         ASM_VOLATILE_UNARY64(round.l.d)
+         break;
+      case TRUNCLS:
+         ASM_VOLATILE_UNARY64(trunc.l.s)
+         break;
+      case TRUNCLD:
+         ASM_VOLATILE_UNARY64(trunc.l.d)
+         break;
+#endif
+      case ADDS:
+          ASM_VOLATILE_BINARY64(add.s)
+          break;
+      case ADDD:
+          ASM_VOLATILE_BINARY64(add.d)
+          break;
+      case SUBS:
+          ASM_VOLATILE_BINARY64(sub.s)
+          break;
+      case SUBD:
+          ASM_VOLATILE_BINARY64(sub.d)
+          break;
+      case DIVS:
+          ASM_VOLATILE_BINARY64(div.s)
+          break;
+      default:
+         vassert(0);
+         break;
+   }
+#endif
+   return ret;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                guest_mips_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_mips_toIR.c b/VEX/priv/guest_mips_toIR.c
new file mode 100644
index 0000000..8e94298
--- /dev/null
+++ b/VEX/priv/guest_mips_toIR.c
@@ -0,0 +1,17327 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                      guest_mips_toIR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Translates MIPS code to IR. */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_guest_mips32.h"
+#include "libvex_guest_mips64.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_mips_defs.h"
+
+/*------------------------------------------------------------*/
+/*---                      Globals                         ---*/
+/*------------------------------------------------------------*/
+
+/* These are set at the start of the translation of a instruction, so
+   that we don't have to pass them around endlessly. CONST means does
+   not change during translation of the instruction. */
+
+/* CONST: what is the host's endianness?  This has to do with float vs
+   double register accesses on VFP, but it's complex and not properly
+   thought out. */
+static VexEndness host_endness;
+
+/* Pointer to the guest code area. */
+static const UChar *guest_code;
+
+/* CONST: The guest address for the instruction currently being
+   translated. */
+#if defined(VGP_mips32_linux)
+static Addr32 guest_PC_curr_instr;
+#else
+static Addr64 guest_PC_curr_instr;
+#endif
+
+/* MOD: The IRSB* into which we're generating code. */
+static IRSB *irsb;
+
+/* Is our guest binary 32 or 64bit? Set at each call to
+   disInstr_MIPS below. */
+static Bool mode64 = False;
+
+/* CPU has FPU and 32 dbl. prec. FP registers. */
+static Bool fp_mode64 = False;
+
+/* Define 1.0 in single and double precision. */
+#define ONE_SINGLE 0x3F800000
+#define ONE_DOUBLE 0x3FF0000000000000ULL
+
+/*------------------------------------------------------------*/
+/*---                  Debugging output                    ---*/
+/*------------------------------------------------------------*/
+
+#define DIP(format, args...)           \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_printf(format, ## args)
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for deconstructing the        ---*/
+/*--- mips insn stream.                                    ---*/
+/*------------------------------------------------------------*/
+
+/* ---------------- Integer registers ---------------- */
+
+static UInt integerGuestRegOffset(UInt iregNo)
+{
+   /* Do we care about endianness here?  We do if sub-parts of integer
+      registers are accessed, but I don't think that ever happens on
+      MIPS. */
+   UInt ret;
+   if (!mode64)
+      switch (iregNo) {
+         case 0:
+            ret = offsetof(VexGuestMIPS32State, guest_r0); break;
+         case 1:
+            ret = offsetof(VexGuestMIPS32State, guest_r1); break;
+         case 2:
+            ret = offsetof(VexGuestMIPS32State, guest_r2); break;
+         case 3:
+            ret = offsetof(VexGuestMIPS32State, guest_r3); break;
+         case 4:
+            ret = offsetof(VexGuestMIPS32State, guest_r4); break;
+         case 5:
+            ret = offsetof(VexGuestMIPS32State, guest_r5); break;
+         case 6:
+            ret = offsetof(VexGuestMIPS32State, guest_r6); break;
+         case 7:
+            ret = offsetof(VexGuestMIPS32State, guest_r7); break;
+         case 8:
+            ret = offsetof(VexGuestMIPS32State, guest_r8); break;
+         case 9:
+            ret = offsetof(VexGuestMIPS32State, guest_r9); break;
+         case 10:
+            ret = offsetof(VexGuestMIPS32State, guest_r10); break;
+         case 11:
+            ret = offsetof(VexGuestMIPS32State, guest_r11); break;
+         case 12:
+            ret = offsetof(VexGuestMIPS32State, guest_r12); break;
+         case 13:
+            ret = offsetof(VexGuestMIPS32State, guest_r13); break;
+         case 14:
+            ret = offsetof(VexGuestMIPS32State, guest_r14); break;
+         case 15:
+            ret = offsetof(VexGuestMIPS32State, guest_r15); break;
+         case 16:
+            ret = offsetof(VexGuestMIPS32State, guest_r16); break;
+         case 17:
+            ret = offsetof(VexGuestMIPS32State, guest_r17); break;
+         case 18:
+            ret = offsetof(VexGuestMIPS32State, guest_r18); break;
+         case 19:
+            ret = offsetof(VexGuestMIPS32State, guest_r19); break;
+         case 20:
+            ret = offsetof(VexGuestMIPS32State, guest_r20); break;
+         case 21:
+            ret = offsetof(VexGuestMIPS32State, guest_r21); break;
+         case 22:
+            ret = offsetof(VexGuestMIPS32State, guest_r22); break;
+         case 23:
+            ret = offsetof(VexGuestMIPS32State, guest_r23); break;
+         case 24:
+            ret = offsetof(VexGuestMIPS32State, guest_r24); break;
+         case 25:
+            ret = offsetof(VexGuestMIPS32State, guest_r25); break;
+         case 26:
+            ret = offsetof(VexGuestMIPS32State, guest_r26); break;
+         case 27:
+            ret = offsetof(VexGuestMIPS32State, guest_r27); break;
+         case 28:
+            ret = offsetof(VexGuestMIPS32State, guest_r28); break;
+         case 29:
+            ret = offsetof(VexGuestMIPS32State, guest_r29); break;
+         case 30:
+            ret = offsetof(VexGuestMIPS32State, guest_r30); break;
+         case 31:
+            ret = offsetof(VexGuestMIPS32State, guest_r31); break;
+         default:
+            vassert(0);
+            break;
+      }
+   else
+      switch (iregNo) {
+         case 0:
+            ret = offsetof(VexGuestMIPS64State, guest_r0); break;
+         case 1:
+            ret = offsetof(VexGuestMIPS64State, guest_r1); break;
+         case 2:
+            ret = offsetof(VexGuestMIPS64State, guest_r2); break;
+         case 3:
+            ret = offsetof(VexGuestMIPS64State, guest_r3); break;
+         case 4:
+            ret = offsetof(VexGuestMIPS64State, guest_r4); break;
+         case 5:
+            ret = offsetof(VexGuestMIPS64State, guest_r5); break;
+         case 6:
+            ret = offsetof(VexGuestMIPS64State, guest_r6); break;
+         case 7:
+            ret = offsetof(VexGuestMIPS64State, guest_r7); break;
+         case 8:
+            ret = offsetof(VexGuestMIPS64State, guest_r8); break;
+         case 9:
+            ret = offsetof(VexGuestMIPS64State, guest_r9); break;
+         case 10:
+            ret = offsetof(VexGuestMIPS64State, guest_r10); break;
+         case 11:
+            ret = offsetof(VexGuestMIPS64State, guest_r11); break;
+         case 12:
+            ret = offsetof(VexGuestMIPS64State, guest_r12); break;
+         case 13:
+            ret = offsetof(VexGuestMIPS64State, guest_r13); break;
+         case 14:
+            ret = offsetof(VexGuestMIPS64State, guest_r14); break;
+         case 15:
+            ret = offsetof(VexGuestMIPS64State, guest_r15); break;
+         case 16:
+            ret = offsetof(VexGuestMIPS64State, guest_r16); break;
+         case 17:
+            ret = offsetof(VexGuestMIPS64State, guest_r17); break;
+         case 18:
+            ret = offsetof(VexGuestMIPS64State, guest_r18); break;
+         case 19:
+            ret = offsetof(VexGuestMIPS64State, guest_r19); break;
+         case 20:
+            ret = offsetof(VexGuestMIPS64State, guest_r20); break;
+         case 21:
+            ret = offsetof(VexGuestMIPS64State, guest_r21); break;
+         case 22:
+            ret = offsetof(VexGuestMIPS64State, guest_r22); break;
+         case 23:
+            ret = offsetof(VexGuestMIPS64State, guest_r23); break;
+         case 24:
+            ret = offsetof(VexGuestMIPS64State, guest_r24); break;
+         case 25:
+            ret = offsetof(VexGuestMIPS64State, guest_r25); break;
+         case 26:
+            ret = offsetof(VexGuestMIPS64State, guest_r26); break;
+         case 27:
+            ret = offsetof(VexGuestMIPS64State, guest_r27); break;
+         case 28:
+            ret = offsetof(VexGuestMIPS64State, guest_r28); break;
+         case 29:
+            ret = offsetof(VexGuestMIPS64State, guest_r29); break;
+         case 30:
+            ret = offsetof(VexGuestMIPS64State, guest_r30); break;
+         case 31:
+            ret = offsetof(VexGuestMIPS64State, guest_r31); break;
+         default:
+            vassert(0);
+            break;
+      }
+   return ret;
+}
+
+#if defined(VGP_mips32_linux)
+#define OFFB_PC     offsetof(VexGuestMIPS32State, guest_PC)
+#else
+#define OFFB_PC     offsetof(VexGuestMIPS64State, guest_PC)
+#endif
+
+/* ---------------- Floating point registers ---------------- */
+
+static UInt floatGuestRegOffset(UInt fregNo)
+{
+   vassert(fregNo < 32);
+   UInt ret;
+   if (!mode64)
+      switch (fregNo) {
+         case 0:
+            ret = offsetof(VexGuestMIPS32State, guest_f0); break;
+         case 1:
+            ret = offsetof(VexGuestMIPS32State, guest_f1); break;
+         case 2:
+            ret = offsetof(VexGuestMIPS32State, guest_f2); break;
+         case 3:
+            ret = offsetof(VexGuestMIPS32State, guest_f3); break;
+         case 4:
+            ret = offsetof(VexGuestMIPS32State, guest_f4); break;
+         case 5:
+            ret = offsetof(VexGuestMIPS32State, guest_f5); break;
+         case 6:
+            ret = offsetof(VexGuestMIPS32State, guest_f6); break;
+         case 7:
+            ret = offsetof(VexGuestMIPS32State, guest_f7); break;
+         case 8:
+            ret = offsetof(VexGuestMIPS32State, guest_f8); break;
+         case 9:
+            ret = offsetof(VexGuestMIPS32State, guest_f9); break;
+         case 10:
+            ret = offsetof(VexGuestMIPS32State, guest_f10); break;
+         case 11:
+            ret = offsetof(VexGuestMIPS32State, guest_f11); break;
+         case 12:
+            ret = offsetof(VexGuestMIPS32State, guest_f12); break;
+         case 13:
+            ret = offsetof(VexGuestMIPS32State, guest_f13); break;
+         case 14:
+            ret = offsetof(VexGuestMIPS32State, guest_f14); break;
+         case 15:
+            ret = offsetof(VexGuestMIPS32State, guest_f15); break;
+         case 16:
+            ret = offsetof(VexGuestMIPS32State, guest_f16); break;
+         case 17:
+            ret = offsetof(VexGuestMIPS32State, guest_f17); break;
+         case 18:
+            ret = offsetof(VexGuestMIPS32State, guest_f18); break;
+         case 19:
+            ret = offsetof(VexGuestMIPS32State, guest_f19); break;
+         case 20:
+            ret = offsetof(VexGuestMIPS32State, guest_f20); break;
+         case 21:
+            ret = offsetof(VexGuestMIPS32State, guest_f21); break;
+         case 22:
+            ret = offsetof(VexGuestMIPS32State, guest_f22); break;
+         case 23:
+            ret = offsetof(VexGuestMIPS32State, guest_f23); break;
+         case 24:
+            ret = offsetof(VexGuestMIPS32State, guest_f24); break;
+         case 25:
+            ret = offsetof(VexGuestMIPS32State, guest_f25); break;
+         case 26:
+            ret = offsetof(VexGuestMIPS32State, guest_f26); break;
+         case 27:
+            ret = offsetof(VexGuestMIPS32State, guest_f27); break;
+         case 28:
+            ret = offsetof(VexGuestMIPS32State, guest_f28); break;
+         case 29:
+            ret = offsetof(VexGuestMIPS32State, guest_f29); break;
+         case 30:
+            ret = offsetof(VexGuestMIPS32State, guest_f30); break;
+         case 31:
+            ret = offsetof(VexGuestMIPS32State, guest_f31); break;
+         default:
+            vassert(0);
+            break;
+      }
+   else
+      switch (fregNo) {
+         case 0:
+            ret = offsetof(VexGuestMIPS64State, guest_f0); break;
+         case 1:
+            ret = offsetof(VexGuestMIPS64State, guest_f1); break;
+         case 2:
+            ret = offsetof(VexGuestMIPS64State, guest_f2); break;
+         case 3:
+            ret = offsetof(VexGuestMIPS64State, guest_f3); break;
+         case 4:
+            ret = offsetof(VexGuestMIPS64State, guest_f4); break;
+         case 5:
+            ret = offsetof(VexGuestMIPS64State, guest_f5); break;
+         case 6:
+            ret = offsetof(VexGuestMIPS64State, guest_f6); break;
+         case 7:
+            ret = offsetof(VexGuestMIPS64State, guest_f7); break;
+         case 8:
+            ret = offsetof(VexGuestMIPS64State, guest_f8); break;
+         case 9:
+            ret = offsetof(VexGuestMIPS64State, guest_f9); break;
+         case 10:
+            ret = offsetof(VexGuestMIPS64State, guest_f10); break;
+         case 11:
+            ret = offsetof(VexGuestMIPS64State, guest_f11); break;
+         case 12:
+            ret = offsetof(VexGuestMIPS64State, guest_f12); break;
+         case 13:
+            ret = offsetof(VexGuestMIPS64State, guest_f13); break;
+         case 14:
+            ret = offsetof(VexGuestMIPS64State, guest_f14); break;
+         case 15:
+            ret = offsetof(VexGuestMIPS64State, guest_f15); break;
+         case 16:
+            ret = offsetof(VexGuestMIPS64State, guest_f16); break;
+         case 17:
+            ret = offsetof(VexGuestMIPS64State, guest_f17); break;
+         case 18:
+            ret = offsetof(VexGuestMIPS64State, guest_f18); break;
+         case 19:
+            ret = offsetof(VexGuestMIPS64State, guest_f19); break;
+         case 20:
+            ret = offsetof(VexGuestMIPS64State, guest_f20); break;
+         case 21:
+            ret = offsetof(VexGuestMIPS64State, guest_f21); break;
+         case 22:
+            ret = offsetof(VexGuestMIPS64State, guest_f22); break;
+         case 23:
+            ret = offsetof(VexGuestMIPS64State, guest_f23); break;
+         case 24:
+            ret = offsetof(VexGuestMIPS64State, guest_f24); break;
+         case 25:
+            ret = offsetof(VexGuestMIPS64State, guest_f25); break;
+         case 26:
+            ret = offsetof(VexGuestMIPS64State, guest_f26); break;
+         case 27:
+            ret = offsetof(VexGuestMIPS64State, guest_f27); break;
+         case 28:
+            ret = offsetof(VexGuestMIPS64State, guest_f28); break;
+         case 29:
+            ret = offsetof(VexGuestMIPS64State, guest_f29); break;
+         case 30:
+            ret = offsetof(VexGuestMIPS64State, guest_f30); break;
+         case 31:
+            ret = offsetof(VexGuestMIPS64State, guest_f31); break;
+         default:
+            vassert(0);
+            break;
+      }
+   return ret;
+}
+
+/* ---------------- MIPS32 DSP ASE(r2) accumulators ---------------- */
+
+static UInt accumulatorGuestRegOffset(UInt acNo)
+{
+   vassert(!mode64);
+   vassert(acNo <= 3);
+   UInt ret;
+   switch (acNo) {
+      case 0:
+         ret = offsetof(VexGuestMIPS32State, guest_ac0); break;
+      case 1:
+         ret = offsetof(VexGuestMIPS32State, guest_ac1); break;
+      case 2:
+         ret = offsetof(VexGuestMIPS32State, guest_ac2); break;
+      case 3:
+         ret = offsetof(VexGuestMIPS32State, guest_ac3); break;
+      default:
+         vassert(0);
+    break;
+   }
+   return ret;
+}
+
+/* Do a endian load of a 32-bit word, regardless of the endianness of the
+   underlying host. */
+static inline UInt getUInt(const UChar * p)
+{
+   UInt w = 0;
+#if defined (_MIPSEL)
+   w = (w << 8) | p[3];
+   w = (w << 8) | p[2];
+   w = (w << 8) | p[1];
+   w = (w << 8) | p[0];
+#elif defined (_MIPSEB)
+   w = (w << 8) | p[0];
+   w = (w << 8) | p[1];
+   w = (w << 8) | p[2];
+   w = (w << 8) | p[3];
+#endif
+   return w;
+}
+
+#define BITS2(_b1,_b0) \
+   (((_b1) << 1) | (_b0))
+
+#define BITS3(_b2,_b1,_b0)                      \
+  (((_b2) << 2) | ((_b1) << 1) | (_b0))
+
+#define BITS4(_b3,_b2,_b1,_b0) \
+   (((_b3) << 3) | ((_b2) << 2) | ((_b1) << 1) | (_b0))
+
+#define BITS5(_b4,_b3,_b2,_b1,_b0)  \
+   (((_b4) << 4) | BITS4((_b3),(_b2),(_b1),(_b0)))
+
+#define BITS6(_b5,_b4,_b3,_b2,_b1,_b0)  \
+   ((BITS2((_b5),(_b4)) << 4) \
+    | BITS4((_b3),(_b2),(_b1),(_b0)))
+
+#define BITS8(_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0)  \
+   ((BITS4((_b7),(_b6),(_b5),(_b4)) << 4) \
+    | BITS4((_b3),(_b2),(_b1),(_b0)))
+
+#define LOAD_STORE_PATTERN \
+   t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
+      if(!mode64) \
+         assign(t1, binop(Iop_Add32, getIReg(rs), \
+                                     mkU32(extend_s_16to32(imm)))); \
+      else \
+         assign(t1, binop(Iop_Add64, getIReg(rs), \
+                                     mkU64(extend_s_16to64(imm)))); \
+
+#define LOADX_STORE_PATTERN \
+   t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
+      if(!mode64) \
+         assign(t1, binop(Iop_Add32, getIReg(regRs), getIReg(regRt))); \
+      else \
+         assign(t1, binop(Iop_Add64, getIReg(regRs), getIReg(regRt)));
+
+#define LWX_SWX_PATTERN64 \
+   t2 = newTemp(Ity_I64); \
+   assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFCULL))); \
+   t4 = newTemp(Ity_I32); \
+   assign(t4, mkNarrowTo32( ty, binop(Iop_And64, \
+                                      mkexpr(t1), mkU64(0x3))));
+
+#define LWX_SWX_PATTERN64_1 \
+   t2 = newTemp(Ity_I64); \
+   assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8ULL))); \
+   t4 = newTemp(Ity_I64); \
+   assign(t4, binop(Iop_And64, mkexpr(t1), mkU64(0x7)));
+
+#define LWX_SWX_PATTERN \
+   t2 = newTemp(Ity_I32); \
+   assign(t2, binop(Iop_And32, mkexpr(t1), mkU32(0xFFFFFFFC))); \
+   t4 = newTemp(Ity_I32); \
+   assign(t4, binop(Iop_And32, mkexpr(t1), mkU32(0x00000003)))
+
+#define SXXV_PATTERN(op) \
+   putIReg(rd, binop(op, \
+         getIReg(rt), \
+            unop(Iop_32to8, \
+               binop(Iop_And32, \
+                  getIReg(rs), \
+                  mkU32(0x0000001F) \
+               ) \
+            ) \
+         ) \
+      )
+
+#define SXXV_PATTERN64(op) \
+   putIReg(rd, mkWidenFrom32(ty, binop(op, \
+           mkNarrowTo32(ty, getIReg(rt)), \
+             unop(Iop_32to8, \
+                binop(Iop_And32, \
+                   mkNarrowTo32(ty, getIReg(rs)), \
+                   mkU32(0x0000001F) \
+                ) \
+             ) \
+          ), True \
+       ))
+
+#define SXX_PATTERN(op) \
+   putIReg(rd, binop(op, getIReg(rt), mkU8(sa)));
+
+#define ALU_PATTERN(op) \
+   putIReg(rd, binop(op, getIReg(rs), getIReg(rt)));
+
+#define ALUI_PATTERN(op) \
+   putIReg(rt, binop(op, getIReg(rs), mkU32(imm)));
+
+#define ALUI_PATTERN64(op) \
+   putIReg(rt, binop(op, getIReg(rs), mkU64(imm)));
+
+#define ALU_PATTERN64(op) \
+   putIReg(rd, mkWidenFrom32(ty, binop(op, \
+                             mkNarrowTo32(ty, getIReg(rs)), \
+                             mkNarrowTo32(ty, getIReg(rt))), True));
+
+#define FP_CONDITIONAL_CODE \
+   t3 = newTemp(Ity_I32);   \
+   assign(t3, binop(Iop_And32, \
+                 IRExpr_ITE( binop(Iop_CmpEQ32, mkU32(cc), mkU32(0)), \
+                             binop(Iop_Shr32, getFCSR(), mkU8(23)), \
+                             binop(Iop_Shr32, getFCSR(), mkU8(24+cc))), \
+                 mkU32(0x1)));
+
+#define ILLEGAL_INSTRUCTON \
+   putPC(mkU32(guest_PC_curr_instr + 4)); \
+   dres.jk_StopHere = Ijk_SigILL; \
+   dres.whatNext    = Dis_StopHere;
+
+/*------------------------------------------------------------*/
+/*---                  Field helpers                       ---*/
+/*------------------------------------------------------------*/
+
+static UInt get_opcode(UInt mipsins)
+{
+   return (0xFC000000 & mipsins) >> 26;
+}
+
+static UInt get_rs(UInt mipsins)
+{
+   return (0x03E00000 & mipsins) >> 21;
+}
+
+static UInt get_rt(UInt mipsins)
+{
+   return (0x001F0000 & mipsins) >> 16;
+}
+
+static UInt get_imm(UInt mipsins)
+{
+   return (0x0000FFFF & mipsins);
+}
+
+static UInt get_instr_index(UInt mipsins)
+{
+   return (0x03FFFFFF & mipsins);
+}
+
+static UInt get_rd(UInt mipsins)
+{
+   return (0x0000F800 & mipsins) >> 11;
+}
+
+static UInt get_sa(UInt mipsins)
+{
+   return (0x000007C0 & mipsins) >> 6;
+}
+
+static UInt get_function(UInt mipsins)
+{
+   return (0x0000003F & mipsins);
+}
+
+static UInt get_ft(UInt mipsins)
+{
+   return (0x001F0000 & mipsins) >> 16;
+}
+
+static UInt get_fs(UInt mipsins)
+{
+   return (0x0000F800 & mipsins) >> 11;
+}
+
+static UInt get_fd(UInt mipsins)
+{
+   return (0x000007C0 & mipsins) >> 6;
+}
+
+static UInt get_mov_cc(UInt mipsins)
+{
+   return (0x001C0000 & mipsins) >> 18;
+}
+
+static UInt get_bc1_cc(UInt mipsins)
+{
+   return (0x001C0000 & mipsins) >> 18;
+}
+
+static UInt get_fpc_cc(UInt mipsins)
+{
+   return (0x00000700 & mipsins) >> 8;
+}
+
+static UInt get_tf(UInt mipsins)
+{
+   return (0x00010000 & mipsins) >> 16;
+}
+
+static UInt get_nd(UInt mipsins)
+{
+   return (0x00020000 & mipsins) >> 17;
+}
+
+static UInt get_fmt(UInt mipsins)
+{
+   return (0x03E00000 & mipsins) >> 21;
+}
+
+static UInt get_FC(UInt mipsins)
+{
+   return (0x000000F0 & mipsins) >> 4;
+}
+
+static UInt get_cond(UInt mipsins)
+{
+   return (0x0000000F & mipsins);
+}
+
+/* for break & syscall */
+static UInt get_code(UInt mipsins)
+{
+   return (0xFFC0 & mipsins) >> 6;
+}
+
+static UInt get_lsb(UInt mipsins)
+{
+   return (0x7C0 & mipsins) >> 6;
+}
+
+static UInt get_msb(UInt mipsins)
+{
+   return (0x0000F800 & mipsins) >> 11;
+}
+
+static UInt get_rot(UInt mipsins)
+{
+   return (0x00200000 & mipsins) >> 21;
+}
+
+static UInt get_rotv(UInt mipsins)
+{
+   return (0x00000040 & mipsins) >> 6;
+}
+
+static UInt get_sel(UInt mipsins)
+{
+   return (0x00000007 & mipsins);
+}
+
+/* Get acc number for all MIPS32 DSP ASE(r2) instructions that use them,
+   except for MFHI and MFLO. */
+static UInt get_acNo(UInt mipsins)
+{
+   return (0x00001800 & mipsins) >> 11;
+}
+
+/* Get accumulator number for MIPS32 DSP ASEr2 MFHI and MFLO instructions. */
+static UInt get_acNo_mfhilo(UInt mipsins)
+{
+   return (0x00600000 & mipsins) >> 21;
+}
+
+/* Get mask field (helper function for wrdsp instruction). */
+static UInt get_wrdspMask(UInt mipsins)
+{
+   return (0x001ff800 & mipsins) >> 11;
+}
+
+/* Get mask field (helper function for rddsp instruction). */
+static UInt get_rddspMask(UInt mipsins)
+{
+   return (0x03ff0000 & mipsins) >> 16;
+}
+
+/* Get shift field (helper function for DSP ASE instructions). */
+static UInt get_shift(UInt mipsins)
+{
+   return (0x03f00000 & mipsins) >> 20;
+}
+
+/* Get immediate field for DSP ASE instructions. */
+static UInt get_dspImm(UInt mipsins)
+{
+   return (0x03ff0000 & mipsins) >> 16;
+}
+
+static Bool branch_or_jump(const UChar * addr)
+{
+   UInt fmt;
+   UInt cins = getUInt(addr);
+
+   UInt opcode = get_opcode(cins);
+   UInt rt = get_rt(cins);
+   UInt function = get_function(cins);
+
+   /* bgtz, blez, bne, beq, jal */
+   if (opcode == 0x07 || opcode == 0x06 || opcode == 0x05 || opcode == 0x04
+       || opcode == 0x03 || opcode == 0x02) {
+      return True;
+   }
+
+   /* bgez */
+   if (opcode == 0x01 && rt == 0x01) {
+      return True;
+   }
+
+   /* bgezal */
+   if (opcode == 0x01 && rt == 0x11) {
+      return True;
+   }
+
+   /* bltzal */
+   if (opcode == 0x01 && rt == 0x10) {
+      return True;
+   }
+
+   /* bltz */
+   if (opcode == 0x01 && rt == 0x00) {
+      return True;
+   }
+
+   /* jalr */
+   if (opcode == 0x00 && function == 0x09) {
+      return True;
+   }
+
+   /* jr */
+   if (opcode == 0x00 && function == 0x08) {
+      return True;
+   }
+
+   if (opcode == 0x11) {
+      /*bc1f & bc1t */
+      fmt = get_fmt(cins);
+      if (fmt == 0x08) {
+         return True;
+      }
+   }
+
+   /* bposge32 */
+   if (opcode == 0x01 && rt == 0x1c) {
+      return True;
+   }
+
+   /* Cavium Specific instructions. */
+   if (opcode == 0x32 || opcode == 0x3A || opcode == 0x36 || opcode == 0x3E) {
+       /* BBIT0, BBIT1, BBIT032, BBIT132 */
+      return True;
+   }
+
+   return False;
+}
+
+static Bool is_Branch_or_Jump_and_Link(const UChar * addr)
+{
+   UInt cins = getUInt(addr);
+
+   UInt opcode = get_opcode(cins);
+   UInt rt = get_rt(cins);
+   UInt function = get_function(cins);
+
+   /* jal */
+   if (opcode == 0x02) {
+      return True;
+   }
+
+   /* bgezal */
+   if (opcode == 0x01 && rt == 0x11) {
+      return True;
+   }
+
+   /* bltzal */
+   if (opcode == 0x01 && rt == 0x10) {
+      return True;
+   }
+
+   /* jalr */
+   if (opcode == 0x00 && function == 0x09) {
+      return True;
+   }
+
+   return False;
+}
+
+static Bool branch_or_link_likely(const UChar * addr)
+{
+   UInt cins = getUInt(addr);
+   UInt opcode = get_opcode(cins);
+   UInt rt = get_rt(cins);
+
+   /* bgtzl, blezl, bnel, beql */
+   if (opcode == 0x17 || opcode == 0x16 || opcode == 0x15 || opcode == 0x14)
+      return True;
+
+   /* bgezl */
+   if (opcode == 0x01 && rt == 0x03)
+      return True;
+
+   /* bgezall */
+   if (opcode == 0x01 && rt == 0x13)
+      return True;
+
+   /* bltzall */
+   if (opcode == 0x01 && rt == 0x12)
+      return True;
+
+   /* bltzl */
+   if (opcode == 0x01 && rt == 0x02)
+      return True;
+
+   return False;
+}
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for creating IR fragments.    ---*/
+/*------------------------------------------------------------*/
+
+static IRExpr *mkU8(UInt i)
+{
+   vassert(i < 256);
+   return IRExpr_Const(IRConst_U8((UChar) i));
+}
+
+/* Create an expression node for a 16-bit integer constant. */
+static IRExpr *mkU16(UInt i)
+{
+   return IRExpr_Const(IRConst_U16(i));
+}
+
+/* Create an expression node for a 32-bit integer constant. */
+static IRExpr *mkU32(UInt i)
+{
+   return IRExpr_Const(IRConst_U32(i));
+}
+
+/* Create an expression node for a 64-bit integer constant. */
+static IRExpr *mkU64(ULong i)
+{
+   return IRExpr_Const(IRConst_U64(i));
+}
+
+static IRExpr *mkexpr(IRTemp tmp)
+{
+   return IRExpr_RdTmp(tmp);
+}
+
+static IRExpr *unop(IROp op, IRExpr * a)
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr *binop(IROp op, IRExpr * a1, IRExpr * a2)
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr *triop(IROp op, IRExpr * a1, IRExpr * a2, IRExpr * a3)
+{
+   return IRExpr_Triop(op, a1, a2, a3);
+}
+
+static IRExpr *qop ( IROp op, IRExpr * a1, IRExpr * a2, IRExpr * a3,
+                     IRExpr * a4 )
+{
+   return IRExpr_Qop(op, a1, a2, a3, a4);
+}
+
+static IRExpr *load(IRType ty, IRExpr * addr)
+{
+   IRExpr *load1 = NULL;
+#if defined (_MIPSEL)
+   load1 = IRExpr_Load(Iend_LE, ty, addr);
+#elif defined (_MIPSEB)
+   load1 = IRExpr_Load(Iend_BE, ty, addr);
+#endif
+   return load1;
+}
+
+/* Add a statement to the list held by "irsb". */
+static void stmt(IRStmt * st)
+{
+   addStmtToIRSB(irsb, st);
+}
+
+static void assign(IRTemp dst, IRExpr * e)
+{
+   stmt(IRStmt_WrTmp(dst, e));
+}
+
+static void store(IRExpr * addr, IRExpr * data)
+{
+#if defined (_MIPSEL)
+   stmt(IRStmt_Store(Iend_LE, addr, data));
+#elif defined (_MIPSEB)
+   stmt(IRStmt_Store(Iend_BE, addr, data));
+#endif
+}
+
+/* Generate a new temporary of the given type. */
+static IRTemp newTemp(IRType ty)
+{
+   vassert(isPlausibleIRType(ty));
+   return newIRTemp(irsb->tyenv, ty);
+}
+
+/* Generate an expression for SRC rotated right by ROT. */
+static IRExpr *genROR32(IRExpr * src, Int rot)
+{
+   vassert(rot >= 0 && rot < 32);
+   if (rot == 0)
+      return src;
+   return binop(Iop_Or32, binop(Iop_Shl32, src, mkU8(32 - rot)),
+                          binop(Iop_Shr32, src, mkU8(rot)));
+}
+
+static IRExpr *genRORV32(IRExpr * src, IRExpr * rs)
+{
+   IRTemp t0 = newTemp(Ity_I8);
+   IRTemp t1 = newTemp(Ity_I8);
+
+   assign(t0, unop(Iop_32to8, binop(Iop_And32, rs, mkU32(0x0000001F))));
+   assign(t1, binop(Iop_Sub8, mkU8(32), mkexpr(t0)));
+   return binop(Iop_Or32, binop(Iop_Shl32, src, mkexpr(t1)),
+                          binop(Iop_Shr32, src, mkexpr(t0)));
+}
+
+static UShort extend_s_10to16(UInt x)
+{
+   return (UShort) ((((Int) x) << 22) >> 22);
+}
+
+static ULong extend_s_10to32(UInt x)
+{
+   return (ULong)((((Long) x) << 22) >> 22);
+}
+
+static ULong extend_s_10to64(UInt x)
+{
+   return (ULong)((((Long) x) << 54) >> 54);
+}
+
+static UInt extend_s_16to32(UInt x)
+{
+   return (UInt) ((((Int) x) << 16) >> 16);
+}
+
+static UInt extend_s_18to32(UInt x)
+{
+   return (UInt) ((((Int) x) << 14) >> 14);
+}
+
+static ULong extend_s_16to64 ( UInt x )
+{
+   return (ULong) ((((Long) x) << 48) >> 48);
+}
+
+static ULong extend_s_18to64 ( UInt x )
+{
+   return (ULong) ((((Long) x) << 46) >> 46);
+}
+
+static ULong extend_s_32to64 ( UInt x )
+{
+   return (ULong) ((((Long) x) << 32) >> 32);
+}
+
+static void jmp_lit32 ( /*MOD*/ DisResult* dres, IRJumpKind kind, Addr32 d32 )
+{
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = kind;
+   stmt( IRStmt_Put( OFFB_PC, mkU32(d32) ) );
+}
+
+static void jmp_lit64 ( /*MOD*/ DisResult* dres, IRJumpKind kind, Addr64 d64 )
+{
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = kind;
+   stmt(IRStmt_Put(OFFB_PC, mkU64(d64)));
+}
+
+/* Get value from accumulator (helper function for MIPS32 DSP ASE instructions).
+   This function should be called before any other operation if widening
+   multiplications are used. */
+static IRExpr *getAcc(UInt acNo)
+{
+   vassert(!mode64);
+   vassert(acNo <= 3);
+   return IRExpr_Get(accumulatorGuestRegOffset(acNo), Ity_I64);
+}
+
+/* Get value from DSPControl register (helper function for MIPS32 DSP ASE
+   instructions). */
+static IRExpr *getDSPControl(void)
+{
+   vassert(!mode64);
+   return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_DSPControl), Ity_I32);
+}
+
+/* Put value to DSPControl register. Expression e is written to DSPControl as
+   is. If only certain bits of DSPControl need to be changed, it should be done
+   before calling putDSPControl(). It could be done by reading DSPControl and
+   ORing it with appropriate mask. */
+static void putDSPControl(IRExpr * e)
+{
+   vassert(!mode64);
+   stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_DSPControl), e));
+}
+
+/* Fetch a byte from the guest insn stream. */
+static UChar getIByte(Int delta)
+{
+   return guest_code[delta];
+}
+
+static IRExpr *getIReg(UInt iregNo)
+{
+   if (0 == iregNo) {
+      return mode64 ? mkU64(0x0) : mkU32(0x0);
+   } else {
+      IRType ty = mode64 ? Ity_I64 : Ity_I32;
+      vassert(iregNo < 32);
+      return IRExpr_Get(integerGuestRegOffset(iregNo), ty);
+   }
+}
+
+static IRExpr *getHI(void)
+{
+   if (mode64)
+      return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_HI), Ity_I64);
+   else
+      return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_HI), Ity_I32);
+}
+
+static IRExpr *getLO(void)
+{
+   if (mode64)
+      return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_LO), Ity_I64);
+   else
+      return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_LO), Ity_I32);
+}
+
+static IRExpr *getFCSR(void)
+{
+   if (mode64)
+      return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_FCSR), Ity_I32);
+   else
+      return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_FCSR), Ity_I32);
+}
+
+/* Get byte from register reg, byte pos from 0 to 3 (or 7 for MIPS64) . */
+static IRExpr *getByteFromReg(UInt reg, UInt byte_pos)
+{
+  UInt pos = byte_pos * 8;
+  if (mode64)
+      return unop(Iop_64to8, binop(Iop_And64,
+                                   binop(Iop_Shr64, getIReg(reg), mkU8(pos)),
+                                   mkU64(0xFF)));
+   else
+      return unop(Iop_32to8, binop(Iop_And32,
+                                   binop(Iop_Shr32, getIReg(reg), mkU8(pos)),
+                                   mkU32(0xFF)));
+}
+
+static void putFCSR(IRExpr * e)
+{
+   if (mode64)
+      stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_FCSR), e));
+   else
+      stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_FCSR), e));
+}
+
+/* fs   - fpu source register number.
+   inst - fpu instruction that needs to be executed.
+   sz32 - size of source register.
+   opN  - number of operads:
+          1 - unary operation.
+          2 - binary operation. */
+static void calculateFCSR(UInt fs, UInt ft, UInt inst, Bool sz32, UInt opN)
+{
+   IRDirty *d;
+   IRTemp fcsr = newTemp(Ity_I32);
+   /* IRExpr_BBPTR() => Need to pass pointer to guest state to helper. */
+   if (fp_mode64)
+      d = unsafeIRDirty_1_N(fcsr, 0,
+                            "mips_dirtyhelper_calculate_FCSR_fp64",
+                            &mips_dirtyhelper_calculate_FCSR_fp64,
+                            mkIRExprVec_4(IRExpr_BBPTR(),
+                                          mkU32(fs),
+                                          mkU32(ft),
+                                          mkU32(inst)));
+   else
+      d = unsafeIRDirty_1_N(fcsr, 0,
+                            "mips_dirtyhelper_calculate_FCSR_fp32",
+                            &mips_dirtyhelper_calculate_FCSR_fp32,
+                            mkIRExprVec_4(IRExpr_BBPTR(),
+                                          mkU32(fs),
+                                          mkU32(ft),
+                                          mkU32(inst)));
+
+   if (opN == 1) {  /* Unary operation. */
+      /* Declare we're reading guest state. */
+      if (sz32 || fp_mode64)
+         d->nFxState = 2;
+      else
+         d->nFxState = 3;
+      vex_bzero(&d->fxState, sizeof(d->fxState));
+
+      d->fxState[0].fx     = Ifx_Read;  /* read */
+      if (mode64)
+         d->fxState[0].offset = offsetof(VexGuestMIPS64State, guest_FCSR);
+      else
+         d->fxState[0].offset = offsetof(VexGuestMIPS32State, guest_FCSR);
+      d->fxState[0].size   = sizeof(UInt);
+      d->fxState[1].fx     = Ifx_Read;  /* read */
+      d->fxState[1].offset = floatGuestRegOffset(fs);
+      d->fxState[1].size   = sizeof(ULong);
+
+      if (!(sz32 || fp_mode64)) {
+         d->fxState[2].fx     = Ifx_Read;  /* read */
+         d->fxState[2].offset = floatGuestRegOffset(fs+1);
+         d->fxState[2].size   = sizeof(ULong);
+      }
+   } else if (opN == 2) {  /* Binary operation. */
+      /* Declare we're reading guest state. */
+      if (sz32 || fp_mode64)
+         d->nFxState = 3;
+      else
+         d->nFxState = 5;
+      vex_bzero(&d->fxState, sizeof(d->fxState));
+
+      d->fxState[0].fx     = Ifx_Read;  /* read */
+      if (mode64)
+         d->fxState[0].offset = offsetof(VexGuestMIPS64State, guest_FCSR);
+      else
+         d->fxState[0].offset = offsetof(VexGuestMIPS32State, guest_FCSR);
+      d->fxState[0].size   = sizeof(UInt);
+      d->fxState[1].fx     = Ifx_Read;  /* read */
+      d->fxState[1].offset = floatGuestRegOffset(fs);
+      d->fxState[1].size   = sizeof(ULong);
+      d->fxState[2].fx     = Ifx_Read;  /* read */
+      d->fxState[2].offset = floatGuestRegOffset(ft);
+      d->fxState[2].size   = sizeof(ULong);
+
+      if (!(sz32 || fp_mode64)) {
+         d->fxState[3].fx     = Ifx_Read;  /* read */
+         d->fxState[3].offset = floatGuestRegOffset(fs+1);
+         d->fxState[3].size   = sizeof(ULong);
+         d->fxState[4].fx     = Ifx_Read;  /* read */
+         d->fxState[4].offset = floatGuestRegOffset(ft+1);
+         d->fxState[4].size   = sizeof(ULong);
+      }
+   }
+
+   stmt(IRStmt_Dirty(d));
+
+   putFCSR(mkexpr(fcsr));
+}
+
+static IRExpr *getULR(void)
+{
+   if (mode64)
+      return IRExpr_Get(offsetof(VexGuestMIPS64State, guest_ULR), Ity_I64);
+   else
+      return IRExpr_Get(offsetof(VexGuestMIPS32State, guest_ULR), Ity_I32);
+}
+
+static void putIReg(UInt archreg, IRExpr * e)
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert(archreg < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
+   if (archreg != 0)
+      stmt(IRStmt_Put(integerGuestRegOffset(archreg), e));
+}
+
+static IRExpr *mkNarrowTo32(IRType ty, IRExpr * src)
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ty == Ity_I64 ? unop(Iop_64to32, src) : src;
+}
+
+static void putLO(IRExpr * e)
+{
+   if (mode64) {
+      stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_LO), e));
+   } else {
+      stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_LO), e));
+   /* Add value to lower 32 bits of ac0 to maintain compatibility between
+      regular MIPS32 instruction set and MIPS DSP ASE. Keep higher 32bits
+      unchanged. */
+      IRTemp t_lo = newTemp(Ity_I32);
+      IRTemp t_hi = newTemp(Ity_I32);
+      assign(t_lo, e);
+      assign(t_hi, unop(Iop_64HIto32, getAcc(0)));
+      stmt(IRStmt_Put(accumulatorGuestRegOffset(0),
+           binop(Iop_32HLto64, mkexpr(t_hi), mkexpr(t_lo))));
+   }
+}
+
+static void putHI(IRExpr * e)
+{
+   if (mode64) {
+      stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_HI), e));
+   } else {
+      stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_HI), e));
+   /* Add value to higher 32 bits of ac0 to maintain compatibility between
+      regular MIPS32 instruction set and MIPS DSP ASE. Keep lower 32bits
+      unchanged. */
+      IRTemp t_lo = newTemp(Ity_I32);
+      IRTemp t_hi = newTemp(Ity_I32);
+      assign(t_hi, e);
+      assign(t_lo, unop(Iop_64to32, getAcc(0)));
+      stmt(IRStmt_Put(accumulatorGuestRegOffset(0),
+           binop(Iop_32HLto64, mkexpr(t_hi), mkexpr(t_lo))));
+   }
+}
+
+/* Put value to accumulator(helper function for MIPS32 DSP ASE instructions). */
+static void putAcc(UInt acNo, IRExpr * e)
+{
+   vassert(!mode64);
+   vassert(acNo <= 3);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I64);
+   stmt(IRStmt_Put(accumulatorGuestRegOffset(acNo), e));
+/* If acNo = 0, split value to HI and LO regs in order to maintain compatibility
+   between MIPS32 and MIPS DSP ASE insn sets. */
+   if (0 == acNo) {
+     putLO(unop(Iop_64to32, e));
+     putHI(unop(Iop_64HIto32, e));
+   }
+}
+
+static IRExpr *mkNarrowTo8 ( IRType ty, IRExpr * src )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ty == Ity_I64 ? unop(Iop_64to8, src) : unop(Iop_32to8, src);
+}
+
+static void putPC(IRExpr * e)
+{
+   stmt(IRStmt_Put(OFFB_PC, e));
+}
+
+static IRExpr *mkWidenFrom32(IRType ty, IRExpr * src, Bool sined)
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   if (ty == Ity_I32)
+      return src;
+   return (sined) ? unop(Iop_32Sto64, src) : unop(Iop_32Uto64, src);
+}
+
+/* Narrow 8/16/32 bit int expr to 8/16/32.  Clearly only some
+   of these combinations make sense. */
+static IRExpr *narrowTo(IRType dst_ty, IRExpr * e)
+{
+   IRType src_ty = typeOfIRExpr(irsb->tyenv, e);
+   if (src_ty == dst_ty)
+      return e;
+   if (src_ty == Ity_I32 && dst_ty == Ity_I16)
+      return unop(Iop_32to16, e);
+   if (src_ty == Ity_I32 && dst_ty == Ity_I8)
+      return unop(Iop_32to8, e);
+   if (src_ty == Ity_I64 && dst_ty == Ity_I8) {
+      vassert(mode64);
+      return unop(Iop_64to8, e);
+   }
+   if (src_ty == Ity_I64 && dst_ty == Ity_I16) {
+      vassert(mode64);
+      return unop(Iop_64to16, e);
+   }
+   vpanic("narrowTo(mips)");
+   return 0;
+}
+
+static IRExpr *getLoFromF64(IRType ty, IRExpr * src)
+{
+   vassert(ty == Ity_F32 || ty == Ity_F64);
+   if (ty == Ity_F64) {
+      IRTemp t0, t1;
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I32);
+      assign(t0, unop(Iop_ReinterpF64asI64, src));
+      assign(t1, unop(Iop_64to32, mkexpr(t0)));
+      return unop(Iop_ReinterpI32asF32, mkexpr(t1));
+   } else
+      return src;
+}
+
+static IRExpr *mkWidenFromF32(IRType ty, IRExpr * src)
+{
+   vassert(ty == Ity_F32 || ty == Ity_F64);
+   if (ty == Ity_F64) {
+      IRTemp t0 = newTemp(Ity_I32);
+      IRTemp t1 = newTemp(Ity_I64);
+      assign(t0, unop(Iop_ReinterpF32asI32, src));
+      assign(t1, binop(Iop_32HLto64, mkU32(0x0), mkexpr(t0)));
+      return unop(Iop_ReinterpI64asF64, mkexpr(t1));
+   } else
+      return src;
+}
+
+static IRExpr *dis_branch_likely(IRExpr * guard, UInt imm)
+{
+   ULong branch_offset;
+   IRTemp t0;
+
+   /* PC = PC + (SignExtend(signed_immed_24) << 2)
+      An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
+      is added to the address of the instruction following
+      the branch (not the branch itself), in the branch delay slot, to form
+      a PC-relative effective target address. */
+   if (mode64)
+      branch_offset = extend_s_18to64(imm << 2);
+   else
+      branch_offset = extend_s_18to32(imm << 2);
+
+   t0 = newTemp(Ity_I1);
+   assign(t0, guard);
+
+   if (mode64)
+      stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+                       IRConst_U64(guest_PC_curr_instr + 8), OFFB_PC));
+   else
+      stmt(IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+                       IRConst_U32(guest_PC_curr_instr + 8), OFFB_PC));
+
+   irsb->jumpkind = Ijk_Boring;
+
+   if (mode64)
+      return mkU64(guest_PC_curr_instr + 4 + branch_offset);
+   else
+      return mkU32(guest_PC_curr_instr + 4 + branch_offset);
+}
+
+static void dis_branch(Bool link, IRExpr * guard, UInt imm, IRStmt ** set)
+{
+   ULong branch_offset;
+   IRTemp t0;
+
+   if (link) {  /* LR (GPR31) = addr of the 2nd instr after branch instr */
+      if (mode64)
+         putIReg(31, mkU64(guest_PC_curr_instr + 8));
+      else
+         putIReg(31, mkU32(guest_PC_curr_instr + 8));
+   }
+
+   /* PC = PC + (SignExtend(signed_immed_24) << 2)
+      An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
+      is added to the address of the instruction following
+      the branch (not the branch itself), in the branch delay slot, to form
+      a PC-relative effective target address. */
+
+   if (mode64)
+      branch_offset = extend_s_18to64(imm << 2);
+   else
+      branch_offset = extend_s_18to32(imm << 2);
+
+   t0 = newTemp(Ity_I1);
+   assign(t0, guard);
+   if (mode64)
+      *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring,
+                         IRConst_U64(guest_PC_curr_instr + 4 + branch_offset),
+                         OFFB_PC);
+   else
+      *set = IRStmt_Exit(mkexpr(t0), link ? Ijk_Call : Ijk_Boring,
+                         IRConst_U32(guest_PC_curr_instr + 4 +
+                                     (UInt) branch_offset), OFFB_PC);
+}
+
+static IRExpr *getFReg(UInt fregNo)
+{
+   vassert(fregNo < 32);
+   IRType ty = fp_mode64 ? Ity_F64 : Ity_F32;
+   return IRExpr_Get(floatGuestRegOffset(fregNo), ty);
+}
+
+static IRExpr *getDReg(UInt dregNo)
+{
+   vassert(dregNo < 32);
+   if (fp_mode64) {
+      return IRExpr_Get(floatGuestRegOffset(dregNo), Ity_F64);
+   } else {
+      /* Read a floating point register pair and combine their contents into a
+         64-bit value */
+      IRTemp t0 = newTemp(Ity_F32);
+      IRTemp t1 = newTemp(Ity_F32);
+      IRTemp t2 = newTemp(Ity_F64);
+      IRTemp t3 = newTemp(Ity_I32);
+      IRTemp t4 = newTemp(Ity_I32);
+      IRTemp t5 = newTemp(Ity_I64);
+
+      assign(t0, getFReg(dregNo));
+      assign(t1, getFReg(dregNo + 1));
+
+      assign(t3, unop(Iop_ReinterpF32asI32, mkexpr(t0)));
+      assign(t4, unop(Iop_ReinterpF32asI32, mkexpr(t1)));
+      assign(t5, binop(Iop_32HLto64, mkexpr(t4), mkexpr(t3)));
+      assign(t2, unop(Iop_ReinterpI64asF64, mkexpr(t5)));
+
+      return mkexpr(t2);
+   }
+}
+
+static void putFReg(UInt dregNo, IRExpr * e)
+{
+   vassert(dregNo < 32);
+   IRType ty = fp_mode64 ? Ity_F64 : Ity_F32;
+   vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
+   stmt(IRStmt_Put(floatGuestRegOffset(dregNo), e));
+}
+
+static void putDReg(UInt dregNo, IRExpr * e)
+{
+   if (fp_mode64) {
+      vassert(dregNo < 32);
+      IRType ty = Ity_F64;
+      vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
+      stmt(IRStmt_Put(floatGuestRegOffset(dregNo), e));
+   } else {
+      vassert(dregNo < 32);
+      vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64);
+      IRTemp t1 = newTemp(Ity_F64);
+      IRTemp t4 = newTemp(Ity_I32);
+      IRTemp t5 = newTemp(Ity_I32);
+      IRTemp t6 = newTemp(Ity_I64);
+      assign(t1, e);
+      assign(t6, unop(Iop_ReinterpF64asI64, mkexpr(t1)));
+      assign(t4, unop(Iop_64HIto32, mkexpr(t6)));  /* hi */
+      assign(t5, unop(Iop_64to32, mkexpr(t6)));    /* lo */
+      putFReg(dregNo, unop(Iop_ReinterpI32asF32, mkexpr(t5)));
+      putFReg(dregNo + 1, unop(Iop_ReinterpI32asF32, mkexpr(t4)));
+   }
+}
+
+static void setFPUCondCode(IRExpr * e, UInt cc)
+{
+   if (cc == 0) {
+      putFCSR(binop(Iop_And32, getFCSR(), mkU32(0xFF7FFFFF)));
+      putFCSR(binop(Iop_Or32, getFCSR(), binop(Iop_Shl32, e, mkU8(23))));
+   } else {
+      putFCSR(binop(Iop_And32, getFCSR(), unop(Iop_Not32,
+                               binop(Iop_Shl32, mkU32(0x01000000), mkU8(cc)))));
+      putFCSR(binop(Iop_Or32, getFCSR(), binop(Iop_Shl32, e, mkU8(24 + cc))));
+   }
+}
+
+static IRExpr* get_IR_roundingmode ( void )
+{
+/*
+   rounding mode | MIPS | IR
+   ------------------------
+   to nearest    | 00  | 00
+   to zero       | 01  | 11
+   to +infinity  | 10  | 10
+   to -infinity  | 11  | 01
+*/
+   IRTemp rm_MIPS = newTemp(Ity_I32);
+   /* Last two bits in FCSR are rounding mode. */
+
+   if (mode64)
+      assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS64State,
+                                       guest_FCSR), Ity_I32), mkU32(3)));
+   else
+      assign(rm_MIPS, binop(Iop_And32, IRExpr_Get(offsetof(VexGuestMIPS32State,
+                                       guest_FCSR), Ity_I32), mkU32(3)));
+
+   /* rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) */
+
+   return binop(Iop_Xor32, mkexpr(rm_MIPS), binop(Iop_And32,
+                binop(Iop_Shl32, mkexpr(rm_MIPS), mkU8(1)), mkU32(2)));
+}
+
+/* sz, ULong -> IRExpr */
+static IRExpr *mkSzImm ( IRType ty, ULong imm64 )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ty == Ity_I64 ? mkU64(imm64) : mkU32((UInt) imm64);
+}
+
+static IRConst *mkSzConst ( IRType ty, ULong imm64 )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return (ty == Ity_I64 ? IRConst_U64(imm64) : IRConst_U32((UInt) imm64));
+}
+
+/* Make sure we get valid 32 and 64bit addresses */
+static Addr64 mkSzAddr ( IRType ty, Addr64 addr )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return (ty == Ity_I64 ? (Addr64) addr :
+                           (Addr64) extend_s_32to64(toUInt(addr)));
+}
+
+/* Shift and Rotate instructions for MIPS64 */
+static Bool dis_instr_shrt ( UInt theInstr )
+{
+   UInt opc2 = get_function(theInstr);
+   UChar regRs = get_rs(theInstr);
+   UChar regRt = get_rt(theInstr);
+   UChar regRd = get_rd(theInstr);
+   UChar uImmsa = get_sa(theInstr);
+   Long sImmsa = extend_s_16to64(uImmsa);
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp tmp = newTemp(ty);
+   IRTemp tmpOr = newTemp(ty);
+   IRTemp tmpRt = newTemp(ty);
+   IRTemp tmpRs = newTemp(ty);
+   IRTemp tmpRd = newTemp(ty);
+
+   assign(tmpRs, getIReg(regRs));
+   assign(tmpRt, getIReg(regRt));
+
+   switch (opc2) {
+      case 0x3A:
+         if ((regRs & 0x01) == 0) {
+            /* Doubleword Shift Right Logical - DSRL; MIPS64 */
+            DIP("dsrl r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+            assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
+            putIReg(regRd, mkexpr(tmpRd));
+         } else if ((regRs & 0x01) == 1) {
+            /* Doubleword Rotate Right - DROTR; MIPS64r2 */
+            vassert(mode64);
+            DIP("drotr r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+            IRTemp tmpL = newTemp(ty);
+            IRTemp tmpR = newTemp(ty);
+            assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa)));
+            assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(63 - uImmsa)));
+            assign(tmpL, binop(Iop_Shl64, mkexpr(tmp), mkU8(1)));
+            assign(tmpRd, binop(Iop_Or64, mkexpr(tmpL), mkexpr(tmpR)));
+            putIReg(regRd, mkexpr(tmpRd));
+         } else
+            return False;
+         break;
+
+      case 0x3E:
+         if ((regRs & 0x01) == 0) {
+            /* Doubleword Shift Right Logical Plus 32 - DSRL32; MIPS64 */
+            DIP("dsrl32 r%u, r%u, %d", regRd, regRt, (Int)(sImmsa + 32));
+            assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+            putIReg(regRd, mkexpr(tmpRd));
+         } else if ((regRs & 0x01) == 1) {
+            /* Doubleword Rotate Right Plus 32 - DROTR32; MIPS64r2 */
+            DIP("drotr32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+            vassert(mode64);
+            IRTemp tmpL = newTemp(ty);
+            IRTemp tmpR = newTemp(ty);
+            /* (tmpRt >> sa) | (tmpRt << (64 - sa)) */
+            assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+            assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt),
+                              mkU8(63 - (uImmsa + 32))));
+            assign(tmpL, binop(Iop_Shl64, mkexpr(tmp), mkU8(1)));
+            assign(tmpRd, binop(Iop_Or64, mkexpr(tmpL), mkexpr(tmpR)));
+            putIReg(regRd, mkexpr(tmpRd));
+         } else
+            return False;
+         break;
+
+      case 0x16:
+         if ((uImmsa & 0x01) == 0) {
+            /* Doubleword Shift Right Logical Variable - DSRLV; MIPS64 */
+            DIP("dsrlv r%u, r%u, r%u", regRd, regRt, regRs);
+            IRTemp tmpRs8 = newTemp(Ity_I8);
+            /* s = tmpRs[5..0] */
+            assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkU64(63)));
+            assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+            assign(tmpRd, binop(Iop_Shr64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+            putIReg(regRd, mkexpr(tmpRd));
+         } else if ((uImmsa & 0x01) == 1) {
+            /* Doubleword Rotate Right Variable - DROTRV; MIPS64r2 */
+            DIP("drotrv r%u, r%u, r%u", regRd, regRt, regRs);
+            IRTemp tmpL = newTemp(ty);
+            IRTemp tmpR = newTemp(ty);
+            IRTemp tmpRs8 = newTemp(Ity_I8);
+            IRTemp tmpLs8 = newTemp(Ity_I8);
+            IRTemp tmp64 = newTemp(ty);
+            /* s = tmpRs[5...0]
+               m = 64 - s
+               (tmpRt << s) | (tmpRt >> m) */
+
+            assign(tmp64, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63)));
+            assign(tmp, binop(Iop_Sub64, mkU64(63), mkexpr(tmp64)));
+
+            assign(tmpLs8, mkNarrowTo8(ty, mkexpr(tmp)));
+            assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp64)));
+
+            assign(tmpR, binop(Iop_Shr64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+            assign(tmpL, binop(Iop_Shl64, mkexpr(tmpRt), mkexpr(tmpLs8)));
+            assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpL), mkU8(1)));
+            assign(tmpOr, binop(Iop_Or64, mkexpr(tmpRd), mkexpr(tmpR)));
+
+            putIReg(regRd, mkexpr(tmpOr));
+         } else
+            return False;
+         break;
+
+      case 0x38:  /* Doubleword Shift Left Logical - DSLL; MIPS64 */
+         DIP("dsll r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+         vassert(mode64);
+         assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa)));
+         putIReg(regRd, mkexpr(tmpRd));
+         break;
+
+      case 0x3C:  /* Doubleword Shift Left Logical Plus 32 - DSLL32; MIPS64 */
+         DIP("dsll32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+         assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+         putIReg(regRd, mkexpr(tmpRd));
+         break;
+
+      case 0x14: {  /* Doubleword Shift Left Logical Variable - DSLLV; MIPS64 */
+         DIP("dsllv r%u, r%u, r%u", regRd, regRt, regRs);
+         IRTemp tmpRs8 = newTemp(Ity_I8);
+
+         assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63)));
+         assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+         assign(tmpRd, binop(Iop_Shl64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+         putIReg(regRd, mkexpr(tmpRd));
+         break;
+      }
+
+      case 0x3B:  /* Doubleword Shift Right Arithmetic - DSRA; MIPS64 */
+         DIP("dsra r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+         assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa)));
+         putIReg(regRd, mkexpr(tmpRd));
+         break;
+
+      case 0x3F:  /* Doubleword Shift Right Arithmetic Plus 32 - DSRA32;
+                     MIPS64 */
+         DIP("dsra32 r%u, r%u, %d", regRd, regRt, (Int)sImmsa);
+         assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkU8(uImmsa + 32)));
+         putIReg(regRd, mkexpr(tmpRd));
+         break;
+
+      case 0x17: {  /* Doubleword Shift Right Arithmetic Variable - DSRAV;
+                       MIPS64 */
+         DIP("dsrav r%u, r%u, r%u", regRd, regRt, regRs);
+         IRTemp tmpRs8 = newTemp(Ity_I8);
+         assign(tmp, binop(Iop_And64, mkexpr(tmpRs), mkSzImm(ty, 63)));
+         assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+         assign(tmpRd, binop(Iop_Sar64, mkexpr(tmpRt), mkexpr(tmpRs8)));
+         putIReg(regRd, mkexpr(tmpRd));
+         break;
+
+      }
+
+      default:
+         return False;
+
+   }
+   return True;
+}
+
+static IROp mkSzOp ( IRType ty, IROp op8 )
+{
+   Int adj;
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ty == Ity_I64);
+   vassert(op8 == Iop_Add8 || op8 == Iop_Sub8 || op8 == Iop_Mul8
+           || op8 == Iop_Or8 || op8 == Iop_And8 || op8 == Iop_Xor8
+           || op8 == Iop_Shl8 || op8 == Iop_Shr8 || op8 == Iop_Sar8
+           || op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8 || op8 == Iop_Not8);
+   adj = ty == Ity_I8 ? 0 : (ty == Ity_I16 ? 1 : (ty == Ity_I32 ? 2 : 3));
+   return adj + op8;
+}
+
+/*********************************************************/
+/*---             Floating Point Compare              ---*/
+/*********************************************************/
+/* Function that returns a string that represent mips cond
+   mnemonic for the input code. */
+static const HChar* showCondCode(UInt code) {
+   const HChar* ret;
+   switch (code) {
+      case 0: ret = "f"; break;
+      case 1: ret = "un"; break;
+      case 2: ret = "eq"; break;
+      case 3: ret = "ueq"; break;
+      case 4: ret = "olt"; break;
+      case 5: ret = "ult"; break;
+      case 6: ret = "ole"; break;
+      case 7: ret = "ule"; break;
+      case 8: ret = "sf"; break;
+      case 9: ret = "ngle"; break;
+      case 10: ret = "seq"; break;
+      case 11: ret = "ngl"; break;
+      case 12: ret = "lt"; break;
+      case 13: ret = "nge"; break;
+      case 14: ret = "le"; break;
+      case 15: ret = "ngt"; break;
+      default: vpanic("showCondCode"); break;
+   }
+   return ret;
+}
+
+static Bool dis_instr_CCondFmt ( UInt cins )
+{
+   IRTemp t0, t1, t2, t3, tmp5, tmp6;
+   IRTemp ccIR = newTemp(Ity_I32);
+   IRTemp ccMIPS = newTemp(Ity_I32);
+   UInt FC = get_FC(cins);
+   UInt fmt = get_fmt(cins);
+   UInt fs = get_fs(cins);
+   UInt ft = get_ft(cins);
+   UInt cond = get_cond(cins);
+
+   if (FC == 0x3) {  /* C.cond.fmt */
+      UInt fpc_cc = get_fpc_cc(cins);
+      switch (fmt) {
+         case 0x10: {  /* C.cond.S */
+            DIP("c.%s.s %d, f%d, f%d", showCondCode(cond), fpc_cc, fs, ft);
+            if (fp_mode64) {
+               t0 = newTemp(Ity_I32);
+               t1 = newTemp(Ity_I32);
+               t2 = newTemp(Ity_I32);
+               t3 = newTemp(Ity_I32);
+
+               tmp5 = newTemp(Ity_F64);
+               tmp6 = newTemp(Ity_F64);
+
+               assign(tmp5, unop(Iop_F32toF64, getLoFromF64(Ity_F64,
+                                 getFReg(fs))));
+               assign(tmp6, unop(Iop_F32toF64, getLoFromF64(Ity_F64,
+                                 getFReg(ft))));
+
+               assign(ccIR, binop(Iop_CmpF64, mkexpr(tmp5), mkexpr(tmp6)));
+               putHI(mkWidenFrom32(mode64 ? Ity_I64: Ity_I32,
+                                   mkexpr(ccIR), True));
+               /* Map compare result from IR to MIPS
+                  FP cmp result | MIPS | IR
+                  --------------------------
+                  UN            | 0x1 | 0x45
+                  EQ            | 0x2 | 0x40
+                  GT            | 0x4 | 0x00
+                  LT            | 0x8 | 0x01
+                */
+
+               /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
+               assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,
+                              binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32,
+                              binop(Iop_Shr32, mkexpr(ccIR),mkU8(5))),mkU32(2)),
+                              binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR),
+                              binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))),
+                              mkU32(1))))));
+               putLO(mkWidenFrom32(mode64 ? Ity_I64: Ity_I32,
+                                   mkexpr(ccMIPS), True));
+
+               /* UN */
+               assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1)));
+               /* EQ */
+               assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+                                                 mkU8(0x1)), mkU32(0x1)));
+               /* NGT */
+               assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32,
+                                 mkexpr(ccMIPS), mkU8(0x2))),mkU32(0x1)));
+               /* LT */
+               assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+                                                 mkU8(0x3)), mkU32(0x1)));
+               switch (cond) {
+                  case 0x0:
+                     setFPUCondCode(mkU32(0), fpc_cc);
+                     break;
+                  case 0x1:
+                     setFPUCondCode(mkexpr(t0), fpc_cc);
+                     break;
+                  case 0x2:
+                     setFPUCondCode(mkexpr(t1), fpc_cc);
+                     break;
+                  case 0x3:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0x4:
+                     setFPUCondCode(mkexpr(t3), fpc_cc);
+                     break;
+                  case 0x5:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+                                          fpc_cc);
+                     break;
+                  case 0x6:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0x7:
+                     setFPUCondCode(mkexpr(t2), fpc_cc);
+                     break;
+                  case 0x8:
+                     setFPUCondCode(mkU32(0), fpc_cc);
+                     break;
+                  case 0x9:
+                     setFPUCondCode(mkexpr(t0), fpc_cc);
+                     break;
+                  case 0xA:
+                     setFPUCondCode(mkexpr(t1), fpc_cc);
+                     break;
+                  case 0xB:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0xC:
+                     setFPUCondCode(mkexpr(t3), fpc_cc);
+                     break;
+                  case 0xD:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+                                          fpc_cc);
+                     break;
+                  case 0xE:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0xF:
+                     setFPUCondCode(mkexpr(t2), fpc_cc);
+                     break;
+
+                  default:
+                     return False;
+               }
+
+            } else {
+               t0 = newTemp(Ity_I32);
+               t1 = newTemp(Ity_I32);
+               t2 = newTemp(Ity_I32);
+               t3 = newTemp(Ity_I32);
+
+               assign(ccIR, binop(Iop_CmpF64, unop(Iop_F32toF64, getFReg(fs)),
+                                  unop(Iop_F32toF64, getFReg(ft))));
+               /* Map compare result from IR to MIPS
+                  FP cmp result | MIPS | IR
+                  --------------------------
+                  UN            | 0x1 | 0x45
+                  EQ            | 0x2 | 0x40
+                  GT            | 0x4 | 0x00
+                  LT            | 0x8 | 0x01
+                */
+
+               /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
+               assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,
+                              binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32,
+                              binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))),
+                                    mkU32(2)), binop(Iop_And32,
+                              binop(Iop_Xor32, mkexpr(ccIR),
+                              binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))),
+                              mkU32(1))))));
+               /* UN */
+               assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1)));
+               /* EQ */
+               assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+                      mkU8(0x1)), mkU32(0x1)));
+               /* NGT */
+               assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32,
+                      mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1)));
+               /* LT */
+               assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+                      mkU8(0x3)), mkU32(0x1)));
+
+               switch (cond) {
+                  case 0x0:
+                     setFPUCondCode(mkU32(0), fpc_cc);
+                     break;
+                  case 0x1:
+                     setFPUCondCode(mkexpr(t0), fpc_cc);
+                     break;
+                  case 0x2:
+                     setFPUCondCode(mkexpr(t1), fpc_cc);
+                     break;
+                  case 0x3:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0x4:
+                     setFPUCondCode(mkexpr(t3), fpc_cc);
+                     break;
+                  case 0x5:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+                                          fpc_cc);
+                     break;
+                  case 0x6:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0x7:
+                     setFPUCondCode(mkexpr(t2), fpc_cc);
+                     break;
+                  case 0x8:
+                     setFPUCondCode(mkU32(0), fpc_cc);
+                     break;
+                  case 0x9:
+                     setFPUCondCode(mkexpr(t0), fpc_cc);
+                     break;
+                  case 0xA:
+                     setFPUCondCode(mkexpr(t1), fpc_cc);
+                     break;
+                  case 0xB:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0xC:
+                     setFPUCondCode(mkexpr(t3), fpc_cc);
+                     break;
+                  case 0xD:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+                                          fpc_cc);
+                     break;
+                  case 0xE:
+                     setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+                                          fpc_cc);
+                     break;
+                  case 0xF:
+                     setFPUCondCode(mkexpr(t2), fpc_cc);
+                     break;
+
+                  default:
+                     return False;
+               }
+            }
+         }
+            break;
+
+         case 0x11: {  /* C.cond.D */
+            DIP("c.%s.d %d, f%d, f%d", showCondCode(cond), fpc_cc, fs, ft);
+            t0 = newTemp(Ity_I32);
+            t1 = newTemp(Ity_I32);
+            t2 = newTemp(Ity_I32);
+            t3 = newTemp(Ity_I32);
+            assign(ccIR, binop(Iop_CmpF64, getDReg(fs), getDReg(ft)));
+            /* Map compare result from IR to MIPS
+               FP cmp result | MIPS | IR
+               --------------------------
+               UN            | 0x1 | 0x45
+               EQ            | 0x2 | 0x40
+               GT            | 0x4 | 0x00
+               LT            | 0x8 | 0x01
+             */
+
+            /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
+            assign(ccMIPS, binop(Iop_Shl32, mkU32(1), unop(Iop_32to8,
+                           binop(Iop_Or32, binop(Iop_And32, unop(Iop_Not32,
+                           binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))), mkU32(2)),
+                           binop(Iop_And32, binop(Iop_Xor32, mkexpr(ccIR),
+                           binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))),
+                           mkU32(1))))));
+
+            /* UN */
+            assign(t0, binop(Iop_And32, mkexpr(ccMIPS), mkU32(0x1)));
+            /* EQ */
+            assign(t1, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+                   mkU8(0x1)), mkU32(0x1)));
+            /* NGT */
+            assign(t2, binop(Iop_And32, unop(Iop_Not32, binop(Iop_Shr32,
+                   mkexpr(ccMIPS), mkU8(0x2))), mkU32(0x1)));
+            /* LT */
+            assign(t3, binop(Iop_And32, binop(Iop_Shr32, mkexpr(ccMIPS),
+                   mkU8(0x3)), mkU32(0x1)));
+
+            switch (cond) {
+               case 0x0:
+                  setFPUCondCode(mkU32(0), fpc_cc);
+                  break;
+               case 0x1:
+                  setFPUCondCode(mkexpr(t0), fpc_cc);
+                  break;
+               case 0x2:
+                  setFPUCondCode(mkexpr(t1), fpc_cc);
+                  break;
+               case 0x3:
+                  setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                                       fpc_cc);
+                  break;
+               case 0x4:
+                  setFPUCondCode(mkexpr(t3), fpc_cc);
+                  break;
+               case 0x5:
+                  setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+                                       fpc_cc);
+                  break;
+               case 0x6:
+                  setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+                                       fpc_cc);
+                  break;
+               case 0x7:
+                  setFPUCondCode(mkexpr(t2), fpc_cc);
+                  break;
+               case 0x8:
+                  setFPUCondCode(mkU32(0), fpc_cc);
+                  break;
+               case 0x9:
+                  setFPUCondCode(mkexpr(t0), fpc_cc);
+                  break;
+               case 0xA:
+                  setFPUCondCode(mkexpr(t1), fpc_cc);
+                  break;
+               case 0xB:
+                  setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                                       fpc_cc);
+                  break;
+               case 0xC:
+                  setFPUCondCode(mkexpr(t3), fpc_cc);
+                  break;
+               case 0xD:
+                  setFPUCondCode(binop(Iop_Or32, mkexpr(t0), mkexpr(t3)),
+                                       fpc_cc);
+                  break;
+               case 0xE:
+                  setFPUCondCode(binop(Iop_Or32, mkexpr(t3), mkexpr(t1)),
+                                       fpc_cc);
+                  break;
+               case 0xF:
+                  setFPUCondCode(mkexpr(t2), fpc_cc);
+                  break;
+               default:
+                  return False;
+            }
+         }
+         break;
+
+         default:
+            return False;
+      }
+   } else {
+      return False;
+   }
+
+   return True;
+}
+
+/*********************************************************/
+/*---        Branch Instructions for mips64           ---*/
+/*********************************************************/
+static Bool dis_instr_branch ( UInt theInstr, DisResult * dres,
+                               Bool(*resteerOkFn) (void *, Addr),
+                               void *callback_opaque, IRStmt ** set )
+{
+   UInt jmpKind = 0;
+   UChar opc1 = get_opcode(theInstr);
+   UChar regRs = get_rs(theInstr);
+   UChar regRt = get_rt(theInstr);
+   UInt offset = get_imm(theInstr);
+   Long sOffset = extend_s_16to64(offset);
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IROp opSlt = mode64 ? Iop_CmpLT64S : Iop_CmpLT32S;
+
+   IRTemp tmp = newTemp(ty);
+   IRTemp tmpRs = newTemp(ty);
+   IRTemp tmpRt = newTemp(ty);
+   IRTemp tmpLt = newTemp(ty);
+   IRTemp tmpReg0 = newTemp(ty);
+
+   UChar regLnk = 31;   /* reg 31 is link reg in MIPS */
+   Addr64 addrTgt = 0;
+   Addr64 cia = guest_PC_curr_instr;
+
+   IRExpr *eConst0 = mkSzImm(ty, (UInt) 0);
+   IRExpr *eNia = mkSzImm(ty, cia + 8);
+   IRExpr *eCond = NULL;
+
+   assign(tmpRs, getIReg(regRs));
+   assign(tmpRt, getIReg(regRt));
+   assign(tmpReg0, getIReg(0));
+
+   eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpReg0), mkexpr(tmpReg0));
+
+   switch (opc1) {
+      case 0x01:
+         switch (regRt) {
+            case 0x00: {  /* BLTZ rs, offset */
+               addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+               IRTemp tmpLtRes = newTemp(Ity_I1);
+
+               assign(tmp, eConst0);
+               assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp)));
+               assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) :
+                      unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+
+               eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpLt),
+                             mkexpr(tmpReg0));
+
+               jmpKind = Ijk_Boring;
+               break;
+            }
+
+            case 0x01: {  /* BGEZ rs, offset */
+               IRTemp tmpLtRes = newTemp(Ity_I1);
+               addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+
+               assign(tmp, eConst0);
+               assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp)));
+               assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) :
+                                      unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+               eCond = binop(mkSzOp(ty, Iop_CmpEQ8), mkexpr(tmpLt),
+                                    mkexpr(tmpReg0));
+
+               jmpKind = Ijk_Boring;
+               break;
+            }
+
+            case 0x11: {  /* BGEZAL rs, offset */
+               addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+               putIReg(regLnk, eNia);
+               IRTemp tmpLtRes = newTemp(Ity_I1);
+
+               assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), eConst0));
+               assign(tmpLt, mode64 ? unop(Iop_1Uto64, mkexpr(tmpLtRes)) :
+                                      unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+
+               eCond = binop(mkSzOp(ty, Iop_CmpEQ8), mkexpr(tmpLt),
+                                    mkexpr(tmpReg0));
+
+               jmpKind = Ijk_Call;
+               break;
+            }
+
+            case 0x10: {  /* BLTZAL rs, offset */
+               IRTemp tmpLtRes = newTemp(Ity_I1);
+               IRTemp tmpRes = newTemp(ty);
+
+               addrTgt = mkSzAddr(ty, cia + 4 + (sOffset << 2));
+               putIReg(regLnk, eNia);
+
+               assign(tmp, eConst0);
+               assign(tmpLtRes, binop(opSlt, mkexpr(tmpRs), mkexpr(tmp)));
+               assign(tmpRes, mode64 ? unop(Iop_1Uto64,
+                      mkexpr(tmpLtRes)) : unop(Iop_1Uto32, mkexpr(tmpLtRes)));
+               eCond = binop(mkSzOp(ty, Iop_CmpNE8), mkexpr(tmpRes),
+                                                     mkexpr(tmpReg0));
+
+               jmpKind = Ijk_Call;
+               break;
+            }
+
+         }
+         break;
+      default:
+         return False;
+      }
+   *set = IRStmt_Exit(eCond, jmpKind, mkSzConst(ty, addrTgt), OFFB_PC);
+   return True;
+}
+
+/*********************************************************/
+/*---         Cavium Specific Instructions            ---*/
+/*********************************************************/
+
+/* Convenience function to yield to thread scheduler */
+static void jump_back(IRExpr *condition)
+{
+   stmt( IRStmt_Exit(condition,
+                     Ijk_Yield,
+                     IRConst_U64( guest_PC_curr_instr ),
+                     OFFB_PC) );
+}
+
+/* Based on s390_irgen_load_and_add32. */
+static void mips_irgen_load_and_add32(IRTemp op1addr, IRTemp new_val,
+                                      UChar rd, Bool putIntoRd)
+{
+   IRCAS *cas;
+   IRTemp old_mem = newTemp(Ity_I32);
+   IRTemp expd    = newTemp(Ity_I32);
+
+   assign(expd, load(Ity_I32, mkexpr(op1addr)));
+
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_LE, mkexpr(op1addr),
+                 NULL, mkexpr(expd), /* expected value */
+                 NULL, mkexpr(new_val)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* If old_mem contains the expected value, then the CAS succeeded.
+      Otherwise, it did not */
+   jump_back(binop(Iop_CmpNE32, mkexpr(old_mem), mkexpr(expd)));
+   if (putIntoRd)
+      putIReg(rd, mkWidenFrom32(Ity_I64, mkexpr(old_mem), True));
+}
+
+/* Based on s390_irgen_load_and_add64. */
+static void mips_irgen_load_and_add64(IRTemp op1addr, IRTemp new_val,
+                                      UChar rd, Bool putIntoRd)
+{
+   IRCAS *cas;
+   IRTemp old_mem = newTemp(Ity_I64);
+   IRTemp expd    = newTemp(Ity_I64);
+
+   assign(expd, load(Ity_I64, mkexpr(op1addr)));
+
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_LE, mkexpr(op1addr),
+                 NULL, mkexpr(expd), /* expected value */
+                 NULL, mkexpr(new_val)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* If old_mem contains the expected value, then the CAS succeeded.
+      Otherwise, it did not */
+   jump_back(binop(Iop_CmpNE64, mkexpr(old_mem), mkexpr(expd)));
+   if (putIntoRd)
+      putIReg(rd, mkexpr(old_mem));
+}
+
+static Bool dis_instr_CVM ( UInt theInstr )
+{
+   UChar  opc2     = get_function(theInstr);
+   UChar  opc1     = get_opcode(theInstr);
+   UChar  regRs    = get_rs(theInstr);
+   UChar  regRt    = get_rt(theInstr);
+   UChar  regRd    = get_rd(theInstr);
+   /* MIPS trap instructions extract code from theInstr[15:6].
+      Cavium OCTEON instructions SNEI, SEQI extract immediate operands
+      from the same bit field [15:6]. */
+   UInt   imm      = get_code(theInstr);
+   UChar  lenM1    = get_msb(theInstr);
+   UChar  p        = get_lsb(theInstr);
+   IRType ty       = mode64? Ity_I64 : Ity_I32;
+   IRTemp tmp      = newTemp(ty);
+   IRTemp tmpRs    = newTemp(ty);
+   IRTemp tmpRt    = newTemp(ty);
+   IRTemp t1       = newTemp(ty);
+   UInt size;
+   assign(tmpRs, getIReg(regRs));
+
+   switch(opc1) {
+      case 0x1C: {
+         switch(opc2) { 
+            case 0x03: {  /* DMUL rd, rs, rt */
+               DIP("dmul r%d, r%d, r%d", regRd, regRs, regRt);
+               IRTemp t0 = newTemp(Ity_I128);
+               assign(t0, binop(Iop_MullU64, getIReg(regRs), getIReg(regRt)));
+               putIReg(regRd, unop(Iop_128to64, mkexpr(t0)));
+               break;
+            }
+
+            case 0x18: {  /* Store Atomic Add Word - SAA; Cavium OCTEON */
+               DIP("saa r%u, (r%u)", regRt, regRs);
+               IRTemp addr = newTemp(Ity_I64);
+               IRTemp new  = newTemp(Ity_I32);
+               assign (addr, getIReg(regRs));
+               assign(new, binop(Iop_Add32,
+                                 load(Ity_I32, mkexpr(addr)),
+                                 mkNarrowTo32(ty, getIReg(regRt))));
+               mips_irgen_load_and_add32(addr, new, 0, False);
+               break;
+            }
+
+            /* Store Atomic Add Doubleword - SAAD; Cavium OCTEON */
+            case 0x19: {
+               DIP( "saad r%u, (r%u)", regRt, regRs);
+               IRTemp addr = newTemp(Ity_I64);
+               IRTemp new  = newTemp(Ity_I64);
+               assign (addr, getIReg(regRs));
+               assign(new, binop(Iop_Add64,
+                                 load(Ity_I64, mkexpr(addr)),
+                                 getIReg(regRt)));
+               mips_irgen_load_and_add64(addr, new, 0, False);
+               break;
+            }
+
+            /* LAI, LAID, LAD, LADD, LAS, LASD,
+               LAC, LACD, LAA, LAAD, LAW, LAWD */
+            case 0x1f: {
+               UInt opc3 = get_sa(theInstr);
+               IRTemp addr = newTemp(Ity_I64);
+               switch (opc3) {
+                  /* Load Atomic Increment Word - LAI; Cavium OCTEON2 */
+                  case 0x02: {
+                     DIP("lai r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I32);
+                     assign(addr, getIReg(regRs));
+                     assign(new, binop(Iop_Add32,
+                                       load(Ity_I32, mkexpr(addr)),
+                                       mkU32(1)));
+                     mips_irgen_load_and_add32(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Increment Doubleword - LAID; Cavium OCTEON2 */
+                  case 0x03: {
+                     DIP("laid r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I64);
+                     assign(addr, getIReg(regRs));
+                     assign(new, binop(Iop_Add64,
+                                       load(Ity_I64, mkexpr(addr)),
+                                       mkU64(1)));
+                     mips_irgen_load_and_add64(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Decrement Word - LAD; Cavium OCTEON2 */
+                  case 0x06: {
+                     DIP("lad r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I32);
+                     assign(addr, getIReg(regRs));
+                     assign(new, binop(Iop_Sub32,
+                                       load(Ity_I32, mkexpr(addr)),
+                                       mkU32(1)));
+                     mips_irgen_load_and_add32(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Decrement Doubleword - LADD; Cavium OCTEON2 */
+                  case 0x07: {
+                     DIP("ladd r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I64);
+                     assign (addr, getIReg(regRs));
+                     assign(new, binop(Iop_Sub64,
+                                       load(Ity_I64, mkexpr(addr)),
+                                       mkU64(1)));
+                     mips_irgen_load_and_add64(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Set Word - LAS; Cavium OCTEON2 */
+                  case 0x0a: {
+                     DIP("las r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I32);
+                     assign(addr, getIReg(regRs));
+                     assign(new, mkU32(0xffffffff));
+                     mips_irgen_load_and_add32(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Set Doubleword - LASD; Cavium OCTEON2 */
+                  case 0x0b: {
+                     DIP("lasd r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I64);
+                     assign (addr, getIReg(regRs));
+                     assign(new, mkU64(0xffffffffffffffffULL));
+                     mips_irgen_load_and_add64(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Clear Word - LAC; Cavium OCTEON2 */
+                  case 0x0e: {
+                     DIP("lac r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I32);
+                     assign (addr, getIReg(regRs));
+                     assign(new, mkU32(0));
+                     mips_irgen_load_and_add32(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Clear Doubleword - LACD; Cavium OCTEON2 */
+                  case 0x0f: {
+                     DIP("lacd r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I64);
+                     assign(addr, getIReg(regRs));
+                     assign(new, mkU64(0));
+                     mips_irgen_load_and_add64(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Add Word - LAA; Cavium OCTEON2 */
+                  case 0x12: {
+                     DIP("laa r%u,(r%u),r%u\n", regRd, regRs, regRt);
+                     IRTemp new  = newTemp(Ity_I32);
+                     assign(addr, getIReg(regRs));
+                     assign(new, binop(Iop_Add32,
+                                       load(Ity_I32, mkexpr(addr)),
+                                       mkNarrowTo32(ty, getIReg(regRt))));
+                     mips_irgen_load_and_add32(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Add Doubleword - LAAD; Cavium OCTEON2 */
+                  case 0x13: {
+                     DIP("laad r%u,(r%u),r%u\n", regRd, regRs, regRt);
+                     IRTemp new  = newTemp(Ity_I64);
+                     assign (addr, getIReg(regRs));
+                     assign(new, binop(Iop_Add64,
+                                       load(Ity_I64, mkexpr(addr)),
+                                       getIReg(regRt)));
+                     mips_irgen_load_and_add64(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Swap Word - LAW; Cavium OCTEON2 */
+                  case 0x16: {
+                     DIP("law r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I32);
+                     assign(addr, getIReg(regRs));
+                     assign(new, mkNarrowTo32(ty, getIReg(regRt)));
+                     mips_irgen_load_and_add32(addr, new, regRd, True);
+                     break;
+                  }
+                  /* Load Atomic Swap Doubleword - LAWD; Cavium OCTEON2 */
+                  case 0x17: {
+                     DIP("lawd r%u,(r%u)\n", regRd, regRs);
+                     IRTemp new  = newTemp(Ity_I64);
+                     assign(addr, getIReg(regRs));
+                     assign(new, getIReg(regRt));
+                     mips_irgen_load_and_add64(addr, new, regRd, True);
+                     break;
+                  }
+                  default:
+                     vex_printf("Unknown laxx instruction, opc3=0x%x\n", opc3);
+                     vex_printf("Instruction=0x%08x\n", theInstr);
+                     return False;
+               }
+               break;
+            }
+
+            /* Unsigned Byte Add - BADDU rd, rs, rt; Cavium OCTEON */
+            case 0x28: {
+               DIP("BADDU r%d, r%d, r%d", regRs, regRt, regRd);
+               IRTemp t0 = newTemp(Ity_I8);
+ 
+               assign(t0, binop(Iop_Add8,
+                                mkNarrowTo8(ty, getIReg(regRs)),
+                                mkNarrowTo8(ty, getIReg(regRt))));
+ 
+               if (mode64)
+                  putIReg(regRd, binop(mkSzOp(ty, Iop_And8),
+                                       unop(Iop_8Uto64, mkexpr(t0)),
+                                       mkSzImm(ty, 0xFF)));
+               else
+                  putIReg(regRd, binop(mkSzOp(ty, Iop_And8),
+                                       unop(Iop_8Uto32, mkexpr(t0)),
+                                       mkSzImm(ty, 0xFF)));
+               break;
+            }
+ 
+            case 0x2c: {  /* Count Ones in a Word - POP; Cavium OCTEON */
+               int i, shift[5];
+               IRTemp mask[5];
+               IRTemp old = newTemp(ty);
+               IRTemp nyu = IRTemp_INVALID;
+               assign(old, getIReg(regRs));
+               DIP("pop r%d, r%d", regRd, regRs);
+ 
+               for (i = 0; i < 5; i++) {
+                  mask[i] = newTemp(ty);
+                  shift[i] = 1 << i;
+               }
+               if(mode64) {
+                  assign(mask[0], mkU64(0x0000000055555555));
+                  assign(mask[1], mkU64(0x0000000033333333));
+                  assign(mask[2], mkU64(0x000000000F0F0F0F));
+                  assign(mask[3], mkU64(0x0000000000FF00FF));
+                  assign(mask[4], mkU64(0x000000000000FFFF));
+  
+                  for (i = 0; i < 5; i++) {
+                     nyu = newTemp(ty);
+                     assign(nyu,
+                            binop(Iop_Add64,
+                                  binop(Iop_And64,
+                                        mkexpr(old), mkexpr(mask[i])),
+                                  binop(Iop_And64,
+                                        binop(Iop_Shr64,
+                                              mkexpr(old), mkU8(shift[i])),
+                                        mkexpr(mask[i]))));
+                     old = nyu;
+                  }
+               } else {
+                  assign(mask[0], mkU32(0x55555555));
+                  assign(mask[1], mkU32(0x33333333));
+                  assign(mask[2], mkU32(0x0F0F0F0F));
+                  assign(mask[3], mkU32(0x00FF00FF));
+                  assign(mask[4], mkU32(0x0000FFFF));
+                  assign(old, getIReg(regRs));
+ 
+                  for (i = 0; i < 5; i++) {
+                     nyu = newTemp(ty);
+                     assign(nyu,
+                            binop(Iop_Add32,
+                                  binop(Iop_And32,
+                                        mkexpr(old), mkexpr(mask[i])),
+                                  binop(Iop_And32,
+                                        binop(Iop_Shr32,
+                                              mkexpr(old), mkU8(shift[i])),
+                                        mkexpr(mask[i]))));
+                     old = nyu;
+                  }
+               }
+               putIReg(regRd, mkexpr(nyu));
+               break;
+            }
+ 
+            /* Count Ones in a Doubleword - DPOP; Cavium OCTEON */
+            case 0x2d: {
+               int i, shift[6];
+               IRTemp mask[6];
+               IRTemp old = newTemp(ty);
+               IRTemp nyu = IRTemp_INVALID;
+               DIP("dpop r%d, r%d", regRd, regRs);
+ 
+               for (i = 0; i < 6; i++) {
+                  mask[i] = newTemp(ty);
+                  shift[i] = 1 << i;
+               }
+               vassert(mode64); /*Caution! Only for Mode 64*/
+               assign(mask[0], mkU64(0x5555555555555555ULL));
+               assign(mask[1], mkU64(0x3333333333333333ULL));
+               assign(mask[2], mkU64(0x0F0F0F0F0F0F0F0FULL));
+               assign(mask[3], mkU64(0x00FF00FF00FF00FFULL));
+               assign(mask[4], mkU64(0x0000FFFF0000FFFFULL));
+               assign(mask[5], mkU64(0x00000000FFFFFFFFULL));
+               assign(old, getIReg(regRs));
+               for (i = 0; i < 6; i++) {
+                  nyu = newTemp(Ity_I64);
+                  assign(nyu,
+                         binop(Iop_Add64,
+                               binop(Iop_And64,
+                                     mkexpr(old), mkexpr(mask[i])),
+                               binop(Iop_And64,
+                                     binop(Iop_Shr64,
+                                           mkexpr(old), mkU8(shift[i])),
+                                     mkexpr(mask[i]))));
+                  old = nyu;
+               }
+               putIReg(regRd, mkexpr(nyu));
+               break;
+            }
+
+            case 0x32:  /* 5. CINS rd, rs, p, lenm1 */
+               DIP("cins r%u, r%u, %d, %d\n", regRt, regRs, p, lenM1); 
+               assign ( tmp  , binop(Iop_Shl64, mkexpr(tmpRs),
+                                     mkU8(64-( lenM1+1 ))));
+               assign ( tmpRt, binop(Iop_Shr64, mkexpr( tmp ),
+                                     mkU8(64-(p+lenM1+1))));
+               putIReg( regRt, mkexpr(tmpRt));
+               break;
+
+            case 0x33:  /* 6. CINS32 rd, rs, p+32, lenm1 */
+               DIP("cins32 r%u, r%u, %d, %d\n", regRt, regRs, p+32, lenM1);
+               assign ( tmp  , binop(Iop_Shl64, mkexpr(tmpRs),
+                                     mkU8(64-( lenM1+1 ))));
+               assign ( tmpRt, binop(Iop_Shr64, mkexpr( tmp ),
+                                     mkU8(32-(p+lenM1+1))));
+               putIReg( regRt, mkexpr(tmpRt));
+               break;
+
+            case 0x3A:  /* 3. EXTS rt, rs, p len */
+               DIP("exts r%u, r%u, %d, %d\n", regRt, regRs, p, lenM1); 
+               size = lenM1 + 1;  /* lenm1+1 */
+               UChar lsAmt = 64 - (p + size);  /* p+lenm1+1 */
+               UChar rsAmt = 64 - size;  /* lenm1+1 */
+               tmp = newTemp(Ity_I64);
+               assign(tmp, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt)));
+               putIReg(regRt, binop(Iop_Sar64, mkexpr(tmp), mkU8(rsAmt)));
+               break;
+
+            case 0x3B:  /* 4. EXTS32 rt, rs, p len */
+               DIP("exts32 r%u, r%u, %d, %d\n", regRt, regRs, p, lenM1); 
+               assign ( tmp  , binop(Iop_Shl64, mkexpr(tmpRs),
+                                     mkU8(32-(p+lenM1+1))));
+               assign ( tmpRt, binop(Iop_Sar64, mkexpr(tmp),
+                                     mkU8(64-(lenM1+1))) );
+               putIReg( regRt, mkexpr(tmpRt));
+               break;
+
+            case 0x2B:  /* 20. SNE rd, rs, rt */
+               DIP("sne r%d, r%d, r%d", regRd,regRs, regRt);
+               if (mode64)
+                  putIReg(regRd, unop(Iop_1Uto64, binop(Iop_CmpNE64,
+                                                        getIReg(regRs),
+                                                        getIReg(regRt))));
+               else
+                  putIReg(regRd,unop(Iop_1Uto32, binop(Iop_CmpNE32,
+                                                       getIReg(regRs),
+                                                       getIReg(regRt))));
+               break;
+
+            case 0x2A:  /* Set Equals - SEQ; Cavium OCTEON */
+               DIP("seq r%d, r%d, %d", regRd, regRs, regRt);
+               if (mode64)
+                  putIReg(regRd, unop(Iop_1Uto64,
+                                      binop(Iop_CmpEQ64, getIReg(regRs),
+                                            getIReg(regRt))));
+               else
+                  putIReg(regRd, unop(Iop_1Uto32,
+                                      binop(Iop_CmpEQ32, getIReg(regRs),
+                                            getIReg(regRt))));
+               break;
+
+            case 0x2E:  /* Set Equals Immediate - SEQI; Cavium OCTEON */
+               DIP("seqi r%d, r%d, %d", regRt, regRs, imm);
+               if (mode64)
+                  putIReg(regRt, unop(Iop_1Uto64,
+                                      binop(Iop_CmpEQ64, getIReg(regRs),
+                                            mkU64(extend_s_10to64(imm)))));
+               else
+                  putIReg(regRt, unop(Iop_1Uto32,
+                                      binop(Iop_CmpEQ32, getIReg(regRs),
+                                            mkU32(extend_s_10to32(imm)))));
+               break;
+
+            case 0x2F:  /* Set Not Equals Immediate - SNEI; Cavium OCTEON */
+               DIP("snei r%d, r%d, %d", regRt, regRs, imm);
+               if (mode64)
+                  putIReg(regRt, unop(Iop_1Uto64,
+                                   binop(Iop_CmpNE64,
+                                         getIReg(regRs),
+                                         mkU64(extend_s_10to64(imm)))));
+               else
+                  putIReg(regRt, unop(Iop_1Uto32,
+                                   binop(Iop_CmpNE32,
+                                         getIReg(regRs),
+                                         mkU32(extend_s_10to32(imm)))));
+               break;
+
+            default:
+               return False;
+         }
+         break;
+      } /* opc1 0x1C ends here*/
+      case 0x1F: {
+         switch(opc2) {
+            case 0x0A: {  // lx - Load indexed instructions
+               switch (get_sa(theInstr)) {
+                  case 0x00: {  // LWX rd, index(base)
+                     DIP("lwx r%d, r%d(r%d)", regRd, regRt, regRs);
+                     LOADX_STORE_PATTERN;
+                     putIReg(regRd, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)),
+                                                  True));
+                     break;
+                  }
+                  case 0x08: {  // LDX rd, index(base)
+                     DIP("ldx r%d, r%d(r%d)", regRd, regRt, regRs);
+                     vassert(mode64); /* Currently Implemented only for n64 */
+                     LOADX_STORE_PATTERN;
+                     putIReg(regRd, load(Ity_I64, mkexpr(t1)));
+                     break;
+                  }
+                  case 0x06: {  // LBUX rd, index(base)
+                     DIP("lbux r%d, r%d(r%d)", regRd, regRt, regRs);
+                     LOADX_STORE_PATTERN;
+                     if (mode64)
+                        putIReg(regRd, unop(Iop_8Uto64, load(Ity_I8,
+                                                             mkexpr(t1))));
+                     else
+                        putIReg(regRd, unop(Iop_8Uto32, load(Ity_I8,
+                                                             mkexpr(t1))));
+                     break;
+                  }
+                  case 0x10: {  // LWUX rd, index(base) (Cavium OCTEON)
+                     DIP("lwux r%d, r%d(r%d)", regRd, regRt, regRs);
+                     LOADX_STORE_PATTERN; /* same for both 32 and 64 modes*/
+                     putIReg(regRd, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)),
+                                                  False));
+                     break;
+                  }
+                  case 0x14: {  // LHUX rd, index(base) (Cavium OCTEON)
+                     DIP("lhux r%d, r%d(r%d)", regRd, regRt, regRs);
+                     LOADX_STORE_PATTERN;
+                     if (mode64)
+                        putIReg(regRd,
+                                unop(Iop_16Uto64, load(Ity_I16, mkexpr(t1))));
+                     else
+                        putIReg(regRd,
+                                unop(Iop_16Uto32, load(Ity_I16, mkexpr(t1))));
+                     break;
+                  }
+                  case 0x16: {  // LBX rd, index(base) (Cavium OCTEON)
+                     DIP("lbx r%d, r%d(r%d)", regRd, regRs, regRt);
+                     LOADX_STORE_PATTERN;
+                     if (mode64)
+                        putIReg(regRd,
+                                unop(Iop_8Sto64, load(Ity_I8, mkexpr(t1))));
+                     else
+                        putIReg(regRd,
+                                unop(Iop_8Sto32, load(Ity_I8, mkexpr(t1))));
+                     break;
+                  }
+                  default:
+                     vex_printf("\nUnhandled LX instruction opc3 = %x\n",
+                                get_sa(theInstr));
+                     return False;
+               }
+               break;
+            }
+         } /* opc1 = 0x1F & opc2 = 0xA (LX) ends here*/
+         break;
+      } /* opc1 = 0x1F ends here*/
+      default:
+         return False; 
+   } /* main opc1 switch ends here */
+   return True;
+}
+
+/*------------------------------------------------------------*/
+/*---       Disassemble a single DSP ASE instruction       ---*/
+/*------------------------------------------------------------*/
+
+static UInt disDSPInstr_MIPS_WRK ( UInt cins )
+{
+   IRTemp t0, t1 = 0, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14,
+          t15, t16, t17;
+   UInt opcode, rs, rt, rd, sa, function, ac, ac_mfhilo, rddsp_mask,
+        wrdsp_mask, dsp_imm, shift;
+
+   opcode = get_opcode(cins);
+   rs = get_rs(cins);
+   rt = get_rt(cins);
+   rd = get_rd(cins);
+   sa = get_sa(cins);
+   function = get_function(cins);
+   ac = get_acNo(cins);
+   ac_mfhilo = get_acNo_mfhilo(cins);
+   rddsp_mask = get_rddspMask(cins);
+   wrdsp_mask = get_wrdspMask(cins);
+   dsp_imm = get_dspImm(cins);
+   shift = get_shift(cins);
+
+   switch (opcode) {
+      case 0x00: {  /* Special */
+         switch (function) {
+            case 0x10: {  /* MFHI */
+               DIP("mfhi ac%d r%d", ac_mfhilo, rd);
+               putIReg(rd, unop(Iop_64HIto32, getAcc(ac_mfhilo)));
+               break;
+            }
+
+            case 0x11: {  /* MTHI */
+               DIP("mthi ac%d r%d", ac, rs);
+               t1 = newTemp(Ity_I32);
+               assign(t1, unop(Iop_64to32, getAcc(ac)));
+               putAcc(ac, binop(Iop_32HLto64, getIReg(rs), mkexpr(t1)));
+               break;
+            }
+
+            case 0x12: {  /* MFLO */
+               DIP("mflo ac%d r%d", ac_mfhilo, rd);
+               putIReg(rd, unop(Iop_64to32, getAcc(ac_mfhilo)));
+               break;
+            }
+
+            case 0x13: {  /* MTLO */
+               DIP("mtlo ac%d r%d", ac, rs);
+               t1 = newTemp(Ity_I32);
+               assign(t1, unop(Iop_64HIto32, getAcc(ac)));
+               putAcc(ac, binop(Iop_32HLto64, mkexpr(t1), getIReg(rs)));
+               break;
+            }
+
+            case 0x18: {  /* MULT */
+               DIP("mult ac%d r%d, r%d", ac, rs, rt);
+               t1 = newTemp(Ity_I64);
+               assign(t1, binop(Iop_MullS32, mkNarrowTo32(Ity_I32, getIReg(rs)),
+                                mkNarrowTo32(Ity_I32, getIReg(rt))));
+               putAcc(ac, mkexpr(t1));
+               break;
+            }
+
+            case 0x19: {  /* MULTU */
+               DIP("multu ac%d r%d, r%d", ac, rs, rt);
+               t1 = newTemp(Ity_I64);
+               assign(t1, binop(Iop_MullU32, mkNarrowTo32(Ity_I32, getIReg(rs)),
+                                             mkNarrowTo32(Ity_I32,
+                                                          getIReg(rt))));
+               putAcc(ac, mkexpr(t1));
+            break;
+            }
+         }
+         break;
+      }
+      case 0x1C: {  /* Special2 */
+         switch (function) {
+            case 0x00: {  /* MADD */
+               DIP("madd ac%d, r%d, r%d", ac, rs, rt);
+               t1 = newTemp(Ity_I64);
+               t2 = newTemp(Ity_I64);
+               t3 = newTemp(Ity_I64);
+
+               assign(t1, getAcc(ac));
+               assign(t2, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
+               assign(t3, binop(Iop_Add64, mkexpr(t1), mkexpr(t2)));
+
+               putAcc(ac, mkexpr(t3));
+               break;
+            }
+            case 0x01: {  /* MADDU */
+               DIP("maddu ac%d r%d, r%d", ac, rs, rt);
+               t1 = newTemp(Ity_I64);
+               t2 = newTemp(Ity_I64);
+               t3 = newTemp(Ity_I64);
+
+               assign(t1, getAcc(ac));
+               assign(t2, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
+               assign(t3, binop(Iop_Add64, mkexpr(t2), mkexpr(t1)));
+
+               putAcc(ac, mkexpr(t3));
+               break;
+            }
+            case 0x04: {  /* MSUB */
+               DIP("msub ac%d r%d, r%d", ac, rs, rt);
+               t1 = newTemp(Ity_I64);
+               t2 = newTemp(Ity_I64);
+               t3 = newTemp(Ity_I64);
+
+               assign(t1, getAcc(ac));
+               assign(t2, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
+               assign(t3, binop(Iop_Sub64, mkexpr(t1), mkexpr(t2)));
+
+               putAcc(ac, mkexpr(t3));
+               break;
+            }
+            case 0x05: {  /* MSUBU */
+               DIP("msubu ac%d r%d, r%d", ac, rs, rt);
+               t1 = newTemp(Ity_I64);
+               t2 = newTemp(Ity_I64);
+               t3 = newTemp(Ity_I64);
+
+               assign(t1, getAcc(ac));
+               assign(t2, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
+               assign(t3, binop(Iop_Sub64, mkexpr(t1), mkexpr(t2)));
+
+               putAcc(ac, mkexpr(t3));
+               break;
+            }
+         }
+         break;
+      }
+      case 0x1F: {  /* Special3 */
+         switch (function) {
+            case 0x12: {  /* ABSQ_S.PH */
+               switch (sa) {
+                  case 0x1: {  /* ABSQ_S.QB */
+                     DIP("absq_s.qb r%d, r%d", rd, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I8);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I8);
+                     t4 = newTemp(Ity_I8);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I8);
+                     t8 = newTemp(Ity_I8);
+                     t9 = newTemp(Ity_I1);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I8);
+                     t12 = newTemp(Ity_I8);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I1);
+                     t15 = newTemp(Ity_I8);
+                     t16 = newTemp(Ity_I32);
+                     t17 = newTemp(Ity_I32);
+
+                     /* Absolute value of the rightmost byte (bits 7-0). */
+                     /* t0 - rightmost byte. */
+                     assign(t0, unop(Iop_16to8, unop(Iop_32to16, getIReg(rt))));
+                     /* t1 holds 1 if t0 is equal to 0x80, or 0 otherwise. */
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32, mkexpr(t0)),
+                                      mkU32(0x00000080)));
+                     /* t2 holds 1 if value in t0 is negative, 0 otherwise. */
+                     assign(t2, unop(Iop_32to1,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 getIReg(rt),
+                                                 mkU32(0x00000080)),
+                                           mkU8(0x7))));
+                     /* t3 holds abs(t0). */
+                     assign(t3, IRExpr_ITE(mkexpr(t1),
+                                           mkU8(0x7F),
+                                           IRExpr_ITE(mkexpr(t2),
+                                                      binop(Iop_Add8,
+                                                            unop(Iop_Not8,
+                                                                 mkexpr(t0)),
+                                                            mkU8(0x1)),
+                                                      mkexpr(t0))));
+
+                     /* Absolute value of bits 15-8. */
+                     /* t4 - input byte. */
+                     assign(t4,
+                            unop(Iop_16HIto8, unop(Iop_32to16, getIReg(rt))));
+                     /* t5 holds 1 if t4 is equal to 0x80, or 0 otherwise. */
+                     assign(t5, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32, mkexpr(t4)),
+                                      mkU32(0x00000080)));
+                     /* t6 holds 1 if value in t4 is negative, 0 otherwise. */
+                     assign(t6, unop(Iop_32to1,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 getIReg(rt),
+                                                 mkU32(0x00008000)),
+                                           mkU8(15))));
+                     /* t3 holds abs(t4). */
+                     assign(t7, IRExpr_ITE(mkexpr(t5),
+                                           mkU8(0x7F),
+                                           IRExpr_ITE(mkexpr(t6),
+                                                      binop(Iop_Add8,
+                                                            unop(Iop_Not8,
+                                                                 mkexpr(t4)),
+                                                            mkU8(0x1)),
+                                                      mkexpr(t4))));
+
+                     /* Absolute value of bits 23-15. */
+                     /* t8 - input byte. */
+                     assign(t8,
+                            unop(Iop_16to8, unop(Iop_32HIto16, getIReg(rt))));
+                     /* t9 holds 1 if t8 is equal to 0x80, or 0 otherwise. */
+                     assign(t9, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32, mkexpr(t8)),
+                                      mkU32(0x00000080)));
+                     /* t6 holds 1 if value in t8 is negative, 0 otherwise. */
+                     assign(t10, unop(Iop_32to1,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  getIReg(rt),
+                                                  mkU32(0x00800000)),
+                                            mkU8(23))));
+                     /* t3 holds abs(t8). */
+                     assign(t11, IRExpr_ITE(mkexpr(t9),
+                                            mkU8(0x7F),
+                                            IRExpr_ITE(mkexpr(t10),
+                                                       binop(Iop_Add8,
+                                                             unop(Iop_Not8,
+                                                                  mkexpr(t8)),
+                                                             mkU8(0x1)),
+                                                       mkexpr(t8))));
+
+                     /* Absolute value of bits 31-24. */
+                     /* t12 - input byte. */
+                     assign(t12,
+                            unop(Iop_16HIto8, unop(Iop_32HIto16, getIReg(rt))));
+                     /* t13 holds 1 if t12 is equal to 0x80, or 0 otherwise. */
+                     assign(t13, binop(Iop_CmpEQ32,
+                                       unop(Iop_8Uto32, mkexpr(t12)),
+                                       mkU32(0x00000080)));
+                     /* t14 holds 1 if value in t12 is negative, 0 otherwise. */
+                     assign(t14, unop(Iop_32to1,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  getIReg(rt),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31))));
+                     /* t15 holds abs(t12). */
+                     assign(t15, IRExpr_ITE(mkexpr(t13),
+                                            mkU8(0x7F),
+                                            IRExpr_ITE(mkexpr(t14),
+                                                       binop(Iop_Add8,
+                                                             unop(Iop_Not8,
+                                                                  mkexpr(t12)),
+                                                             mkU8(0x1)),
+                                                       mkexpr(t12))));
+
+                     /* t16 holds !0 if any of input bytes is 0x80 or 0
+                        otherwise. */
+                     assign(t16,
+                            binop(Iop_Or32,
+                                  binop(Iop_Or32,
+                                        binop(Iop_Or32,
+                                              unop(Iop_1Sto32, mkexpr(t13)),
+                                              unop(Iop_1Sto32, mkexpr(t9))),
+                                        unop(Iop_1Sto32, mkexpr(t5))),
+                                  unop(Iop_1Sto32, mkexpr(t1))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    mkexpr(t16),
+                                                    mkU32(0x0)),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000))));
+
+                     /* t17 = t15|t11|t7|t3 */
+                     assign(t17,
+                            binop(Iop_16HLto32,
+                                  binop(Iop_8HLto16, mkexpr(t15), mkexpr(t11)),
+                                  binop(Iop_8HLto16, mkexpr(t7), mkexpr(t3))));
+
+                     putIReg(rd, mkexpr(t17));
+                     break;
+                  }
+                  case 0x2: {  /* REPL.QB */
+                     DIP("repl.qb r%d, %d", rd, dsp_imm);
+                     vassert(!mode64);
+
+                     putIReg(rd, mkU32((dsp_imm << 24) | (dsp_imm << 16) |
+                                       (dsp_imm << 8) | (dsp_imm)));
+                     break;
+                  }
+                  case 0x3: {  /* REPLV.QB */
+                     DIP("replv.qb r%d, r%d", rd, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I8);
+
+                     assign(t0, unop(Iop_32to8,
+                                binop(Iop_And32, getIReg(rt), mkU32(0xff))));
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16, mkexpr(t0), mkexpr(t0)),
+                                   binop(Iop_8HLto16, mkexpr(t0), mkexpr(t0))));
+                     break;
+                  }
+                  case 0x4: {  /* PRECEQU.PH.QBL */
+                     DIP("precequ.ph.qbl r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0xff000000)),
+                                             mkU8(1)),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x00ff0000)),
+                                             mkU8(9))));
+                     break;
+                  }
+                  case 0x5: {  /* PRECEQU.PH.QBR */
+                     DIP("precequ.ph.qbr r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Shl32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x0000ff00)),
+                                             mkU8(15)),
+                                       binop(Iop_Shl32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x000000ff)),
+                                             mkU8(7))));
+                     break;
+                  }
+                  case 0x6: {  /* PRECEQU.PH.QBLA */
+                     DIP("precequ.ph.qbla r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0xff000000)),
+                                             mkU8(1)),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x0000ff00)),
+                                             mkU8(1))));
+                     break;
+                  }
+                  case 0x7: {  /* PRECEQU.PH.QBRA */
+                     DIP("precequ.ph.qbra r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Shl32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x00ff0000)),
+                                             mkU8(7)),
+                                       binop(Iop_Shl32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x000000ff)),
+                                             mkU8(7))));
+                     break;
+                  }
+                  case 0x9: {  /* ABSQ_S.PH */
+                     DIP("absq_s.ph r%d, r%d", rd, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I16);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I16);
+                     t4 = newTemp(Ity_I16);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I16);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+
+                     /* t0 holds lower 16 bits of value in rt. */
+                     assign(t0, unop(Iop_32to16, getIReg(rt)));
+                     /* t1 holds 1 if t0 is equal to 0x8000. */
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32, mkexpr(t0)),
+                                      mkU32(0x00008000)));
+                     /* t2 holds 1 if value in t0 is negative, 0 otherwise. */
+                     assign(t2, unop(Iop_32to1,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 getIReg(rt),
+                                                 mkU32(0x00008000)),
+                                           mkU8(15))));
+                     /* t3 holds abs(t0). */
+                     assign(t3, IRExpr_ITE(mkexpr(t1),
+                                           mkU16(0x7FFF),
+                                           IRExpr_ITE(mkexpr(t2),
+                                                      binop(Iop_Add16,
+                                                            unop(Iop_Not16,
+                                                                 mkexpr(t0)),
+                                                            mkU16(0x1)),
+                                                      mkexpr(t0))));
+
+                     /* t4 holds lower 16 bits of value in rt. */
+                     assign(t4, unop(Iop_32HIto16, getIReg(rt)));
+                     /* t5 holds 1 if t4 is equal to 0x8000. */
+                     assign(t5, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32, mkexpr(t4)),
+                                      mkU32(0x00008000)));
+                     /* t6 holds 1 if value in t4 is negative, 0 otherwise. */
+                     assign(t6, unop(Iop_32to1,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 getIReg(rt),
+                                                 mkU32(0x80000000)),
+                                           mkU8(31))));
+                     /* t7 holds abs(t4). */
+                     assign(t7, IRExpr_ITE(mkexpr(t5),
+                                           mkU16(0x7FFF),
+                                           IRExpr_ITE(mkexpr(t6),
+                                                      binop(Iop_Add16,
+                                                            unop(Iop_Not16,
+                                                                 mkexpr(t4)),
+                                                            mkU16(0x1)),
+                                                      mkexpr(t4))));
+                     /* If any of the two input halfwords is equal 0x8000,
+                        set bit 20 in DSPControl register. */
+                     assign(t8, binop(Iop_Or32,
+                                      unop(Iop_1Sto32, mkexpr(t5)),
+                                      unop(Iop_1Sto32, mkexpr(t1))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    mkexpr(t8),
+                                                    mkU32(0x0)),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000))));
+
+                     /* t9 = t7|t3 */
+                     assign(t9, binop(Iop_16HLto32, mkexpr(t7), mkexpr(t3)));
+
+                     putIReg(rd, mkexpr(t9));
+                     break;
+                  }
+                  case 0xA: {  /* REPL.PH */
+                     DIP("repl.ph r%d, %d", rd, dsp_imm);
+                     vassert(!mode64);
+                     UShort immediate = extend_s_10to16(dsp_imm);
+
+                     putIReg(rd, mkU32(immediate << 16 | immediate));
+                     break;
+                  }
+                  case 0xB: {  /* REPLV.PH */
+                     DIP("replv.ph r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, getIReg(rt)),
+                                       unop(Iop_32to16, getIReg(rt))));
+                     break;
+                  }
+                  case 0xC: {  /* PRECEQ.W.PHL */
+                     DIP("preceq.w.phl r%d, r%d", rd, rt);
+                     vassert(!mode64);
+                     putIReg(rd, binop(Iop_And32,
+                                       getIReg(rt),
+                                       mkU32(0xffff0000)));
+                     break;
+                  }
+                  case 0xD: {  /* PRECEQ.W.PHR */
+                     DIP("preceq.w.phr r%d, r%d", rd, rt);
+                     vassert(!mode64);
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, getIReg(rt)),
+                                       mkU16(0x0)));
+                     break;
+                  }
+                  case 0x11: {  /* ABSQ_S.W */
+                     DIP("absq_s.w r%d, r%d", rd, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I1);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+
+                     assign(t0,
+                            binop(Iop_CmpEQ32, getIReg(rt), mkU32(0x80000000)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t0),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     assign(t1, binop(Iop_CmpLT32S, getIReg(rt), mkU32(0x0)));
+
+                     assign(t2, IRExpr_ITE(mkexpr(t0),
+                                           mkU32(0x7FFFFFFF),
+                                           IRExpr_ITE(mkexpr(t1),
+                                                      binop(Iop_Add32,
+                                                            unop(Iop_Not32,
+                                                                 getIReg(rt)),
+                                                            mkU32(0x1)),
+                                                      getIReg(rt))));
+                     putIReg(rd, mkexpr(t2));
+                     break;
+                  }
+                  case 0x1B: {  /* BITREV */
+                     DIP("bitrev r%d, r%d", rd, rt);
+                     vassert(!mode64);
+                     /* 32bit reversal as seen on Bit Twiddling Hacks site
+                        http://graphics.stanford.edu/~seander/bithacks.html
+                        section ReverseParallel */
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+
+                     assign(t1, binop(Iop_Or32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  getIReg(rt),
+                                                  mkU32(0xaaaaaaaa)),
+                                            mkU8(0x1)),
+                                      binop(Iop_Shl32,
+                                            binop(Iop_And32,
+                                                  getIReg(rt),
+                                                  mkU32(0x55555555)),
+                                            mkU8(0x1))));
+                     assign(t2, binop(Iop_Or32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t1),
+                                                  mkU32(0xcccccccc)),
+                                            mkU8(0x2)),
+                                      binop(Iop_Shl32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t1),
+                                                  mkU32(0x33333333)),
+                                            mkU8(0x2))));
+                     assign(t3, binop(Iop_Or32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t2),
+                                                  mkU32(0xf0f0f0f0)),
+                                            mkU8(0x4)),
+                                      binop(Iop_Shl32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t2),
+                                                  mkU32(0x0f0f0f0f)),
+                                            mkU8(0x4))));
+                     assign(t4, binop(Iop_Or32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t3),
+                                                  mkU32(0xff00ff00)),
+                                            mkU8(0x8)),
+                                      binop(Iop_Shl32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t3),
+                                                  mkU32(0x00ff00ff)),
+                                            mkU8(0x8))));
+                     assign(t5, binop(Iop_Or32,
+                                      binop(Iop_Shr32,
+                                            mkexpr(t4),
+                                            mkU8(0x10)),
+                                      binop(Iop_Shl32,
+                                            mkexpr(t4),
+                                            mkU8(0x10))));
+                     putIReg(rd, binop(Iop_Shr32,
+                                       mkexpr(t5),
+                                       mkU8(16)));
+                     break;
+                  }
+                  case 0x1C: {  /* PRECEU.PH.QBL */
+                     DIP("preceu.ph.qbl r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0xff000000)),
+                                             mkU8(8)),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x00ff0000)),
+                                             mkU8(16))));
+                     break;
+                  }
+                  case 0x1E: {  /* PRECEU.PH.QBLA */
+                     DIP("preceu.ph.qbla r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0xff000000)),
+                                             mkU8(8)),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x0000ff00)),
+                                             mkU8(8))));
+                     break;
+                  }
+                  case 0x1D: {  /* PRECEU.PH.QBR */
+                     DIP("preceu.ph.qbr r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Shl32,
+                                             binop(Iop_And32,
+                                                   getIReg(rt),
+                                                   mkU32(0x0000ff00)),
+                                             mkU8(8)),
+                                       binop(Iop_And32,
+                                             getIReg(rt),
+                                             mkU32(0x000000ff))));
+                     break;
+                  }
+                  case 0x1F: {  /* PRECEU.PH.QBRA */
+                     DIP("preceu.ph.qbra r%d, r%d", rd, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_And32,
+                                             getIReg(rt),
+                                             mkU32(0x00ff0000)),
+                                       binop(Iop_And32,
+                                             getIReg(rt),
+                                             mkU32(0x000000ff))));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of ABSQ_S.PH */
+            }
+            case 0x38: {  /* EXTR.W */
+               switch(sa) {
+                  case 0x0: {  /* EXTR.W */
+                     DIP("extr.w r%d, ac%d, %d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I1);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+                     if (0 == rs) {
+                        assign(t1, mkexpr(t0));
+                     } else {
+                        assign(t1, binop(Iop_Sar64, mkexpr(t0), mkU8(rs)));
+                     }
+                     /* Check if bits 63..31 of the result in t1 aren't 0. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+                     /* Check if bits 63..31 of the result in t1 aren't
+                        0x1ffffffff. */
+                     assign(t5, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0xffffffff)));
+                     assign(t6, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+                     /* If bits 63..31 aren't 0 nor 0x1ffffffff, set DSP
+                        control register. */
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t3)),
+                                            unop(Iop_1Sto32, mkexpr(t4))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t5)),
+                                            unop(Iop_1Sto32, mkexpr(t6)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t7),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     /* If the last discarded bit is 1, there would be carry
+                        when rounding, otherwise there wouldn't. We use that
+                        fact and just add the value of the last discarded bit
+                        to the least sifgnificant bit of the shifted value
+                        from acc. */
+                     if (0 == rs) {
+                        assign(t8, mkU64(0x0ULL));
+                     } else {
+                        assign(t8, binop(Iop_And64,
+                                         binop(Iop_Shr64,
+                                               mkexpr(t0),
+                                               mkU8(rs-1)),
+                                         mkU64(0x1ULL)));
+                     }
+                     assign(t9, binop(Iop_Add64, mkexpr(t1), mkexpr(t8)));
+
+                     /* Repeat previous steps for the rounded value. */
+                     assign(t10, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0)));
+                     assign(t11, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+
+                     assign(t12, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0xffffffff)));
+                     assign(t13, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+
+                     assign(t14, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t10)),
+                                            unop(Iop_1Sto32, mkexpr(t11))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t12)),
+                                            unop(Iop_1Sto32, mkexpr(t13)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t14),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     if (0 == rs) {
+                        putIReg(rt, unop(Iop_64to32, mkexpr(t0)));
+                     } else {
+                        putIReg(rt, unop(Iop_64to32, mkexpr(t1)));
+                     }
+                     break;
+                  }
+                  case 0x1: {  /* EXTRV.W */
+                     DIP("extrv.w r%d, ac%d, r%d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I1);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I32);
+                     t15 = newTemp(Ity_I8);
+
+                     assign(t15, unop(Iop_32to8,
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0x1f))));
+                     assign(t0, getAcc(ac));
+                     assign(t1, binop(Iop_Sar64, mkexpr(t0), mkexpr(t15)));
+                     putIReg(rt, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                        unop(Iop_8Uto32,
+                                                             mkexpr(t15)),
+                                                        mkU32(0)),
+                                                  unop(Iop_64to32, mkexpr(t0)),
+                                                  unop(Iop_64to32, mkexpr(t1))));
+
+                     /* Check if bits 63..31 of the result in t1 aren't 0. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+                     /* Check if bits 63..31 of the result in t1 aren't
+                        0x1ffffffff. */
+                     assign(t5, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0xffffffff)));
+                     assign(t6, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+                     /* If bits 63..31 aren't 0 nor 0x1ffffffff, set DSP
+                        control register. */
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t3)),
+                                            unop(Iop_1Sto32, mkexpr(t4))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t5)),
+                                            unop(Iop_1Sto32, mkexpr(t6)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t7),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     /* If the last discarded bit is 1, there would be carry
+                        when rounding, otherwise there wouldn't. We use that
+                        fact and just add the value of the last discarded bit
+                        to the least sifgnificant bit of the shifted value
+                        from acc. */
+                     assign(t8,
+                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                             unop(Iop_8Uto32,
+                                                  mkexpr(t15)),
+                                             mkU32(0)),
+                                       mkU64(0x0ULL),
+                                       binop(Iop_And64,
+                                             binop(Iop_Shr64,
+                                                   mkexpr(t0),
+                                                   unop(Iop_32to8,
+                                                        binop(Iop_Sub32,
+                                                              unop(Iop_8Uto32,
+                                                                   mkexpr(t15)),
+                                                                   mkU32(1)))),
+                                             mkU64(0x1ULL))));
+
+                     assign(t9, binop(Iop_Add64, mkexpr(t1), mkexpr(t8)));
+
+                     /* Repeat previous steps for the rounded value. */
+                     assign(t10, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0)));
+                     assign(t11, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+
+                     assign(t12, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0xffffffff)));
+                     assign(t13, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+
+                     assign(t14, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t10)),
+                                            unop(Iop_1Sto32, mkexpr(t11))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t12)),
+                                            unop(Iop_1Sto32, mkexpr(t13)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t14),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     break;
+                  }
+                  case 0x2: {  /* EXTP */
+                     DIP("extp r%d, ac%d, %d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I8);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+                     /* Extract pos field of DSPControl register. */
+                     assign(t1, binop(Iop_And32, getDSPControl(), mkU32(0x3f)));
+
+                     /* Check if (pos - size) >= 0 [size <= pos]
+                        if (pos < size)
+                           put 1 to EFI field of DSPControl register
+                        else
+                           extract bits from acc and put 0 to EFI field of
+                           DSPCtrl */
+                     assign(t2, binop(Iop_CmpLT32U, mkexpr(t1), mkU32(rs)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    binop(Iop_And32,
+                                                          getDSPControl(),
+                                                          mkU32(0xffffbfff)),
+                                                    mkU32(0x4000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xffffbfff))));
+
+                     /* If pos <= 31, shift right the value from the acc
+                        (pos-size) times and take (size+1) bits from the least
+                        significant positions. Otherwise, shift left the value
+                        (63-pos) times, take (size+1) bits from the most
+                        significant positions and shift right (31-size) times.*/
+                     assign(t3, binop(Iop_CmpLE32U, mkexpr(t1), mkU32(31)));
+
+                     assign(t4,
+                           IRExpr_ITE(mkexpr(t3),
+                                      unop(Iop_32to8,
+                                           binop(Iop_Sub32,
+                                                 mkexpr(t1), mkU32(rs))),
+                                      unop(Iop_32to8,
+                                           binop(Iop_Sub32,
+                                                 mkU32(63), mkexpr(t1)))));
+
+                     assign(t5, IRExpr_ITE(mkexpr(t3),
+                                           binop(Iop_Shr64,
+                                                 mkexpr(t0), mkexpr(t4)),
+                                           binop(Iop_Shl64,
+                                                 mkexpr(t0), mkexpr(t4))));
+
+                     /* t6 holds a mask for bit extraction */
+                     assign(t6,
+                            IRExpr_ITE(mkexpr(t3),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shl64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  mkU8(rs+1))),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shr64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  mkU8(rs+1)))));
+
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           unop(Iop_64to32,
+                                                binop(Iop_And64,
+                                                      mkexpr(t5),
+                                                      mkexpr(t6))),
+                                           binop(Iop_Shr32,
+                                                 unop(Iop_64HIto32,
+                                                      binop(Iop_And64,
+                                                            mkexpr(t5),
+                                                            mkexpr(t6))),
+                                                 mkU8(31-rs))));
+
+                     putIReg(rt, mkexpr(t7));
+                     break;
+                  }
+                  case 0x3: {  /* EXTPV */
+                     DIP("extpv r%d, ac%d, r%d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I8);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t8, binop(Iop_And32, getIReg(rs), mkU32(0x1f)));
+                     assign(t0, getAcc(ac));
+                     /* Extract pos field of DSPControl register. */
+                     assign(t1, binop(Iop_And32, getDSPControl(), mkU32(0x3f)));
+
+                     /* Check if (pos - size) >= 0 [size <= pos]
+                        if (pos < size)
+                           put 1 to EFI field of DSPControl register
+                        else
+                           extract bits from acc and put 0 to EFI field of
+                           DSPCtrl */
+                     assign(t2, binop(Iop_CmpLT32U, mkexpr(t1), mkexpr(t8)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    binop(Iop_And32,
+                                                          getDSPControl(),
+                                                          mkU32(0xffffbfff)),
+                                                    mkU32(0x4000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xffffbfff))));
+
+                     /* If pos <= 31, shift right the value from the acc
+                        (pos-size) times and take (size+1) bits from the least
+                        significant positions. Otherwise, shift left the value
+                        (63-pos) times, take (size+1) bits from the most
+                        significant positions and shift right (31-size)
+                        times. */
+                     assign(t3, binop(Iop_CmpLE32U, mkexpr(t1), mkU32(31)));
+
+                     assign(t4,
+                           IRExpr_ITE(mkexpr(t3),
+                                      unop(Iop_32to8,
+                                           binop(Iop_Sub32,
+                                                 mkexpr(t1), mkexpr(t8))),
+                                      unop(Iop_32to8,
+                                           binop(Iop_Sub32,
+                                                 mkU32(63), mkexpr(t1)))));
+
+                     assign(t5, IRExpr_ITE(mkexpr(t3),
+                                           binop(Iop_Shr64,
+                                                 mkexpr(t0), mkexpr(t4)),
+                                           binop(Iop_Shl64,
+                                                 mkexpr(t0), mkexpr(t4))));
+
+                     /* t6 holds a mask for bit extraction. */
+                     assign(t6,
+                            IRExpr_ITE(mkexpr(t3),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shl64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  unop(Iop_32to8,
+                                                       binop(Iop_Add32,
+                                                             mkexpr(t8),
+                                                             mkU32(1))))),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shr64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  unop(Iop_32to8,
+                                                       binop(Iop_Add32,
+                                                             mkexpr(t8),
+                                                             mkU32(1)))))));
+
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           unop(Iop_64to32,
+                                                binop(Iop_And64,
+                                                      mkexpr(t5),
+                                                      mkexpr(t6))),
+                                           binop(Iop_Shr32,
+                                                 unop(Iop_64HIto32,
+                                                      binop(Iop_And64,
+                                                            mkexpr(t5),
+                                                            mkexpr(t6))),
+                                                 unop(Iop_32to8,
+                                                      binop(Iop_Sub32,
+                                                            mkU32(31),
+                                                            mkexpr(t8))))));
+
+                     putIReg(rt, mkexpr(t7));
+                     break;
+                  }
+                  case 0x4: {  /* EXTR_R.W */
+                     DIP("extr_r.w r%d, ac%d, %d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I1);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I32);
+                     t15 = newTemp(Ity_I64);
+                     t16 = newTemp(Ity_I1);
+
+                     assign(t0, getAcc(ac));
+                     assign(t16, binop(Iop_CmpEQ32,
+                                       mkU32(rs),
+                                       mkU32(0)));
+                     assign(t1, IRExpr_ITE(mkexpr(t16),
+                                           mkexpr(t0),
+                                           binop(Iop_Sar64,
+                                                 mkexpr(t0),
+                                                 mkU8(rs))));
+                     /* If the last discarded bit is 1, there would be carry
+                        when rounding, otherwise there wouldn't. We use that
+                        fact and just add the value of the last discarded bit
+                        to the least significant bit of the shifted value
+                        from acc. */
+                     assign(t15, binop(Iop_Shr64,
+                                       mkexpr(t0),
+                                       unop(Iop_32to8,
+                                            binop(Iop_Sub32,
+                                                  binop(Iop_And32,
+                                                        mkU32(rs),
+                                                        mkU32(0x1f)),
+                                                  mkU32(1)))));
+
+                     assign(t8,
+                            IRExpr_ITE(mkexpr(t16),
+                                       mkU64(0x0ULL),
+                                       binop(Iop_And64,
+                                             mkexpr(t15),
+                                             mkU64(0x0000000000000001ULL))));
+                     assign(t9, binop(Iop_Add64, mkexpr(t1), mkexpr(t8)));
+                     putIReg(rt, unop(Iop_64to32, mkexpr(t9)));
+
+                     /* Check if bits 63..31 of the result in t1 aren't 0. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+
+                     /* Check if bits 63..31 of the result in t1 aren't
+                        0x1ffffffff. */
+                     assign(t5, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0xffffffff)));
+                     assign(t6, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+                     /* If bits 63..31 aren't 0 nor 0x1ffffffff, set DSP
+                        control register. */
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t3)),
+                                            unop(Iop_1Sto32, mkexpr(t4))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t5)),
+                                            unop(Iop_1Sto32, mkexpr(t6)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t7),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     /* Repeat previous steps for the rounded value. */
+                     assign(t10, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0)));
+                     assign(t11, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+
+                     assign(t12, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0xffffffff)));
+                     assign(t13, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+
+                     assign(t14, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t10)),
+                                            unop(Iop_1Sto32, mkexpr(t11))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t12)),
+                                            unop(Iop_1Sto32, mkexpr(t13)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t14),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     break;
+                  }
+                  case 0x5: {  /* EXTRV_R.W */
+                     DIP("extrv_r.w r%d, ac%d, r%d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I1);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I32);
+                     t15 = newTemp(Ity_I8);
+
+                     assign(t15, unop(Iop_32to8,
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0x1f))));
+                     assign(t0, getAcc(ac));
+                     assign(t1, binop(Iop_Sar64, mkexpr(t0), mkexpr(t15)));
+
+                     /* Check if bits 63..31 of the result in t1 aren't 0. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+                     /* Check if bits 63..31 of the result in t1 aren't
+                        0x1ffffffff. */
+                     assign(t5, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0xffffffff)));
+                     assign(t6, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+                     /* If bits 63..31 aren't 0 nor 0x1ffffffff, set DSP
+                        control register. */
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t3)),
+                                            unop(Iop_1Sto32, mkexpr(t4))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t5)),
+                                            unop(Iop_1Sto32, mkexpr(t6)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t7),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     /* If the last discarded bit is 1, there would be carry
+                        when rounding, otherwise there wouldn't. We use that
+                        fact and just add the value of the last discarded bit
+                        to the least sifgnificant bit of the shifted value
+                        from acc. */
+                     assign(t8,
+                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                             unop(Iop_8Uto32,
+                                                  mkexpr(t15)),
+                                             mkU32(0)),
+                                       mkU64(0x0ULL),
+                                       binop(Iop_And64,
+                                             binop(Iop_Shr64,
+                                                   mkexpr(t0),
+                                                   unop(Iop_32to8,
+                                                        binop(Iop_Sub32,
+                                                              unop(Iop_8Uto32,
+                                                                   mkexpr(t15)),
+                                                                   mkU32(1)))),
+                                             mkU64(0x1ULL))));
+
+                     assign(t9, binop(Iop_Add64, mkexpr(t1), mkexpr(t8)));
+                     /* Put rounded value in destination register. */
+                     putIReg(rt, unop(Iop_64to32, mkexpr(t9)));
+
+                     /* Repeat previous steps for the rounded value. */
+                     assign(t10, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0)));
+                     assign(t11, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+
+                     assign(t12, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0xffffffff)));
+                     assign(t13, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+
+                     assign(t14, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t10)),
+                                            unop(Iop_1Sto32, mkexpr(t11))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t12)),
+                                            unop(Iop_1Sto32, mkexpr(t13)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t14),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     break;
+                  }
+                  case 0x6: {  /* EXTR_RS.W */
+                     DIP("extr_rs.w r%d, ac%d, %d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I1);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I32);
+                     t16 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+                     if (0 == rs) {
+                        assign(t1, mkexpr(t0));
+                     } else {
+                        assign(t1, binop(Iop_Sar64, mkexpr(t0), mkU8(rs)));
+                     }
+
+                     /* Check if bits 63..31 of the result in t1 aren't 0. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+                     /* Check if bits 63..31 of the result in t1 aren't
+                        0x1ffffffff. */
+                     assign(t5, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0xffffffff)));
+                     assign(t6, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+                     /* If bits 63..31 aren't 0 nor 0x1ffffffff, set DSP
+                        control register. */
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t3)),
+                                            unop(Iop_1Sto32, mkexpr(t4))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t5)),
+                                            unop(Iop_1Sto32, mkexpr(t6)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t7),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     /* If the last discarded bit is 1, there would be carry
+                        when rounding, otherwise there wouldn't. We use that
+                        fact and just add the value of the last discarded bit
+                        to the least sifgnificant bit of the shifted value
+                        from acc. */
+                     if (0 == rs) {
+                        assign(t8, mkU64(0x0ULL));
+                     } else {
+                        assign(t8, binop(Iop_And64,
+                                         binop(Iop_Shr64,
+                                               mkexpr(t0),
+                                               mkU8(rs-1)),
+                                         mkU64(0x1ULL)));
+                     }
+
+                     assign(t9, binop(Iop_Add64, mkexpr(t1), mkexpr(t8)));
+
+                     /* Repeat previous steps for the rounded value. */
+                     assign(t10, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0)));
+                     assign(t11, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+
+                     assign(t12, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0xffffffff)));
+                     assign(t13, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+
+                     assign(t14, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t10)),
+                                            unop(Iop_1Sto32, mkexpr(t11))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t12)),
+                                            unop(Iop_1Sto32, mkexpr(t13)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t14),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     assign(t16, binop(Iop_And32,
+                                       unop(Iop_64HIto32,
+                                            mkexpr(t9)),
+                                       mkU32(0x80000000)));
+                     putIReg(rt, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                  mkexpr(t14),
+                                                  mkU32(0)),
+                                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                             mkexpr(t16),
+                                                             mkU32(0)),
+                                                       mkU32(0x7fffffff),
+                                                       mkU32(0x80000000)),
+                                            unop(Iop_64to32, mkexpr(t9))));
+                     break;
+                  }
+                  case 0x7: {  /* EXTRV_RS.W */
+                     DIP("extrv_rs.w r%d, ac%d, r%d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I1);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I32);
+                     t15 = newTemp(Ity_I32);
+                     t16 = newTemp(Ity_I32);
+                     t17 = newTemp(Ity_I1);
+
+                     assign(t15, binop(Iop_And32,
+                                       getIReg(rs),
+                                       mkU32(0x1f)));
+                     assign(t17, binop(Iop_CmpEQ32,
+                                       mkexpr(t15),
+                                       mkU32(0)));
+                     assign(t0, getAcc(ac));
+                     assign(t1, IRExpr_ITE(mkexpr(t17),
+                                           mkexpr(t0),
+                                           binop(Iop_Sar64,
+                                                 mkexpr(t0),
+                                                 unop(Iop_32to8,
+                                                      mkexpr(t15)))));
+
+                     /* Check if bits 63..31 of the result in t1 aren't 0. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+                     /* Check if bits 63..31 of the result in t1 aren't
+                        0x1ffffffff. */
+                     assign(t5, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t1)),
+                                      mkU32(0xffffffff)));
+                     assign(t6, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+                     /* If bits 63..31 aren't 0 nor 0x1ffffffff, set DSP
+                        control register. */
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t3)),
+                                            unop(Iop_1Sto32, mkexpr(t4))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t5)),
+                                            unop(Iop_1Sto32, mkexpr(t6)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t7),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     /* If the last discarded bit is 1, there would be carry
+                        when rounding, otherwise there wouldn't. We use that
+                        fact and just add the value of the last discarded bit
+                        to the least sifgnificant bit of the shifted value
+                        from acc. */
+                     assign(t8,
+                            IRExpr_ITE(mkexpr(t17),
+                                       mkU64(0x0ULL),
+                                       binop(Iop_And64,
+                                             binop(Iop_Shr64,
+                                                   mkexpr(t0),
+                                                   unop(Iop_32to8,
+                                                        binop(Iop_Sub32,
+                                                              mkexpr(t15),
+                                                              mkU32(1)))),
+                                             mkU64(0x1ULL))));
+
+                     assign(t9, binop(Iop_Add64, mkexpr(t1), mkexpr(t8)));
+
+                     /* Repeat previous steps for the rounded value. */
+                     assign(t10, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0)));
+                     assign(t11, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0)));
+
+                     assign(t12, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32,
+                                           mkexpr(t9)),
+                                      mkU32(0xffffffff)));
+                     assign(t13, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32,
+                                                 mkexpr(t9)),
+                                            mkU32(0x80000000)),
+                                      mkU32(0x80000000)));
+
+                     assign(t14, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t10)),
+                                            unop(Iop_1Sto32, mkexpr(t11))),
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32, mkexpr(t12)),
+                                            unop(Iop_1Sto32, mkexpr(t13)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t14),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+
+                     assign(t16, binop(Iop_And32,
+                                       unop(Iop_64HIto32,
+                                            mkexpr(t9)),
+                                       mkU32(0x80000000)));
+                     putIReg(rt, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                  mkexpr(t14),
+                                                  mkU32(0)),
+                                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                             mkexpr(t16),
+                                                             mkU32(0)),
+                                                       mkU32(0x7fffffff),
+                                                       mkU32(0x80000000)),
+                                            unop(Iop_64to32, mkexpr(t9))));
+                     break;
+                  }
+                  case 0xA: {  /* EXTPDP */
+                     DIP("extpdp r%d, ac%d, %d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I8);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+                     /* Extract pos field of DSPControl register. */
+                     assign(t1, binop(Iop_And32, getDSPControl(), mkU32(0x3f)));
+
+                     /* Check if (pos - size) >= 0 [size <= pos]
+                        if (pos < size)
+                           put 1 to EFI field of DSPControl register
+                        else
+                           extract bits from acc and put 0 to EFI field of
+                           DSPCtrl */
+                     assign(t2, binop(Iop_CmpLT32U, mkexpr(t1), mkU32(rs)));
+
+                     assign(t8, binop(Iop_Or32,
+                                      binop(Iop_And32,
+                                            getDSPControl(),
+                                            mkU32(0xffffbfc0)),
+                                      binop(Iop_And32,
+                                            binop(Iop_Sub32,
+                                                  binop(Iop_And32,
+                                                        getDSPControl(),
+                                                        mkU32(0x3f)),
+                                                  mkU32(rs+1)),
+                                            mkU32(0x3f))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                     binop(Iop_And32,
+                                                           getDSPControl(),
+                                                           mkU32(0xffffbfff)),
+                                                     mkU32(0x4000)),
+                                              mkexpr(t8)));
+
+                     /* If pos <= 31, shift right the value from the acc
+                        (pos-size) times and take (size+1) bits from the least
+                        significant positions. Otherwise, shift left the value
+                        (63-pos) times, take (size+1) bits from the most
+                        significant positions and shift right (31-size) times.
+                     */
+                     assign(t3, binop(Iop_CmpLE32U, mkexpr(t1), mkU32(31)));
+
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t3),
+                                       unop(Iop_32to8,
+                                            binop(Iop_Sub32,
+                                                  mkexpr(t1), mkU32(rs))),
+                                       unop(Iop_32to8,
+                                            binop(Iop_Sub32,
+                                                  mkU32(63), mkexpr(t1)))));
+
+                     assign(t5, IRExpr_ITE(mkexpr(t3),
+                                           binop(Iop_Shr64,
+                                                 mkexpr(t0), mkexpr(t4)),
+                                           binop(Iop_Shl64,
+                                                 mkexpr(t0), mkexpr(t4))));
+
+                     /* t6 holds a mask for bit extraction. */
+                     assign(t6,
+                            IRExpr_ITE(mkexpr(t3),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shl64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  mkU8(rs+1))),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shr64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  mkU8(rs+1)))));
+
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           unop(Iop_64to32,
+                                                binop(Iop_And64,
+                                                      mkexpr(t5),
+                                                      mkexpr(t6))),
+                                           binop(Iop_Shr32,
+                                                 unop(Iop_64HIto32,
+                                                      binop(Iop_And64,
+                                                            mkexpr(t5),
+                                                            mkexpr(t6))),
+                                                 mkU8(31-rs))));
+
+                     putIReg(rt, mkexpr(t7));
+                     break;
+                  }
+                  case 0xB: {  /* EXTPDPV */
+                     DIP("extpdpv r%d, ac%d, r%d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I8);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+
+                     assign(t8, binop(Iop_And32, getIReg(rs), mkU32(0x1f)));
+                     assign(t0, getAcc(ac));
+                     /* Extract pos field of DSPControl register. */
+                     assign(t1, binop(Iop_And32, getDSPControl(), mkU32(0x3f)));
+
+                     /* Check if (pos - size) >= 0 [size <= pos]
+                        if (pos < size)
+                           put 1 to EFI field of DSPControl register
+                        else
+                           extract bits from acc and put 0 to EFI field of
+                           DSPCtrl */
+                     assign(t2, binop(Iop_CmpLT32U, mkexpr(t1), mkexpr(t8)));
+
+                     assign(t9, binop(Iop_Or32,
+                                      binop(Iop_And32,
+                                            getDSPControl(),
+                                            mkU32(0xffffbfc0)),
+                                      binop(Iop_And32,
+                                            binop(Iop_Sub32,
+                                                  binop(Iop_And32,
+                                                        getDSPControl(),
+                                                        mkU32(0x3f)),
+                                                  binop(Iop_Add32,
+                                                        mkexpr(t8),
+                                                        mkU32(0x1))),
+                                            mkU32(0x3f))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    binop(Iop_And32,
+                                                          getDSPControl(),
+                                                          mkU32(0xffffbfff)),
+                                                    mkU32(0x4000)),
+                                              mkexpr(t9)));
+
+                     /* If pos <= 31, shift right the value from the acc
+                        (pos-size) times and take (size+1) bits from the least
+                        significant positions. Otherwise, shift left the value
+                        (63-pos) times, take (size+1) bits from the most
+                        significant positions and shift right (31-size) times.
+                     */
+                     assign(t3, binop(Iop_CmpLE32U, mkexpr(t1), mkU32(31)));
+
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t3),
+                                      unop(Iop_32to8,
+                                           binop(Iop_Sub32,
+                                                 mkexpr(t1), mkexpr(t8))),
+                                      unop(Iop_32to8,
+                                           binop(Iop_Sub32,
+                                                 mkU32(63), mkexpr(t1)))));
+
+                     assign(t5, IRExpr_ITE(mkexpr(t3),
+                                           binop(Iop_Shr64,
+                                                 mkexpr(t0), mkexpr(t4)),
+                                           binop(Iop_Shl64,
+                                                 mkexpr(t0), mkexpr(t4))));
+
+                     /* t6 holds a mask for bit extraction. */
+                     assign(t6,
+                            IRExpr_ITE(mkexpr(t3),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shl64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  unop(Iop_32to8,
+                                                       binop(Iop_Add32,
+                                                             mkexpr(t8),
+                                                             mkU32(1))))),
+                                       unop(Iop_Not64,
+                                            binop(Iop_Shr64,
+                                                  mkU64(0xffffffffffffffffULL),
+                                                  unop(Iop_32to8,
+                                                       binop(Iop_Add32,
+                                                             mkexpr(t8),
+                                                             mkU32(1)))))));
+
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           unop(Iop_64to32,
+                                                binop(Iop_And64,
+                                                      mkexpr(t5),
+                                                      mkexpr(t6))),
+                                           binop(Iop_Shr32,
+                                                 unop(Iop_64HIto32,
+                                                      binop(Iop_And64,
+                                                            mkexpr(t5),
+                                                            mkexpr(t6))),
+                                                 unop(Iop_32to8,
+                                                      binop(Iop_Sub32,
+                                                            mkU32(31),
+                                                            mkexpr(t8))))));
+
+                     putIReg(rt, mkexpr(t7));
+                     break;
+                  }
+                  case 0xE: {  /* EXTR_S.H */
+                     DIP("extr_s.h r%d, ac%d, %d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I64);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Sar64, mkexpr(t0), mkU8(rs)));
+
+                     assign(t2, binop(Iop_Or32,
+                                      getDSPControl(), mkU32(0x00800000)));
+
+                     assign(t9, binop(Iop_And32,
+                                      unop(Iop_64to32,
+                                           mkexpr(t1)),
+                                      mkU32(0x80000000)));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t9),
+                                                    binop(Iop_And32,
+                                                          unop(Iop_64HIto32,
+                                                               mkexpr(t0)),
+                                                          mkU32(0x80000000))),
+                                              mkexpr(t2),
+                                              getDSPControl()));
+
+                     /* Check if t1 > 0x7fff ((t1 - 0x7fff) > 0)
+                        1. subtract 0x7fff from t1
+                        2. if the resulting number is positive (sign bit = 0)
+                           and any of the other bits is 1, the value is > 0. */
+                     assign(t3, binop(Iop_Sub64,
+                                      mkexpr(t1),
+                                      mkU64(0x0000000000007fffULL)));
+                     assign(t4, binop(Iop_And32,
+                                       binop(Iop_Or32,
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       binop(Iop_And32,
+                                                             unop(Iop_64HIto32,
+                                                                  mkexpr(t3)),
+                                                             mkU32(0x7fffffff)))),
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       unop(Iop_64to32,
+                                                            mkexpr(t3))))),
+                                       unop(Iop_1Sto32,
+                                            binop(Iop_CmpEQ32,
+                                                  binop(Iop_And32,
+                                                        unop(Iop_64HIto32,
+                                                                  mkexpr(t3)),
+                                                             mkU32(0x80000000)),
+                                                  mkU32(0)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkU32(0),
+                                                    mkexpr(t4)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     /* Check if t1<0xffffffffffff8000 (0xffffffffffff8000-t1)>0
+                        1. subtract t1 from 0xffffffffffff8000
+                        2. if the resulting number is positive (sign bit = 0)
+                            and any of the other bits is 1, the value is > 0 */
+                     assign(t6, binop(Iop_Sub64,
+                                       mkU64(0xffffffffffff8000ULL),
+                                       mkexpr(t1)));
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       binop(Iop_And32,
+                                                             unop(Iop_64HIto32,
+                                                                  mkexpr(t6)),
+                                                             mkU32(0x7fffffff)))),
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       unop(Iop_64to32,
+                                                            mkexpr(t6))))),
+                                      unop(Iop_1Sto32,
+                                            binop(Iop_CmpEQ32,
+                                                  binop(Iop_And32,
+                                                        unop(Iop_64HIto32,
+                                                                  mkexpr(t6)),
+                                                             mkU32(0x80000000)),
+                                                  mkU32(0)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkU32(0),
+                                                    mkexpr(t7)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     putIReg(rt, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkU32(0),
+                                                    mkexpr(t4)),
+                                            mkU32(0x00007fff),
+                                            IRExpr_ITE(binop(Iop_CmpNE32,
+                                                             mkU32(0),
+                                                             mkexpr(t7)),
+                                                       mkU32(0xffff8000),
+                                                       unop(Iop_64to32,
+                                                            mkexpr(t1)))));
+                     break;
+                  }
+                  case 0xF: {  /* EXTRV_S.H */
+                     DIP("extrv_s.h r%d, ac%d, %d", rt, ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I64);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Sar64,
+                                      mkexpr(t0),
+                                      unop(Iop_32to8,
+                                           binop(Iop_And32,
+                                                 getIReg(rs),
+                                                 mkU32(0x1f)))));
+
+                     assign(t2, binop(Iop_Or32,
+                                      getDSPControl(), mkU32(0x00800000)));
+
+                     assign(t9, binop(Iop_And32,
+                                      unop(Iop_64to32,
+                                           mkexpr(t1)),
+                                      mkU32(0x80000000)));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t9),
+                                                    binop(Iop_And32,
+                                                          unop(Iop_64HIto32,
+                                                               mkexpr(t0)),
+                                                          mkU32(0x80000000))),
+                                              mkexpr(t2),
+                                              getDSPControl()));
+
+                     /* Check if t1 > 0x7fff ((t1 - 0x7fff) > 0)
+                        1. subtract 0x7fff from t1
+                        2. if the resulting number is positive (sign bit = 0)
+                           and any of the other bits is 1, the value is > 0. */
+                     assign(t3, binop(Iop_Sub64,
+                                      mkexpr(t1),
+                                      mkU64(0x0000000000007fffULL)));
+                     assign(t4, binop(Iop_And32,
+                                       binop(Iop_Or32,
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       binop(Iop_And32,
+                                                             unop(Iop_64HIto32,
+                                                                  mkexpr(t3)),
+                                                             mkU32(0x7fffffff)))),
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       unop(Iop_64to32,
+                                                            mkexpr(t3))))),
+                                       unop(Iop_1Sto32,
+                                            binop(Iop_CmpEQ32,
+                                                  binop(Iop_And32,
+                                                        unop(Iop_64HIto32,
+                                                                  mkexpr(t3)),
+                                                             mkU32(0x80000000)),
+                                                  mkU32(0)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkU32(0),
+                                                    mkexpr(t4)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     /* Check if t1<0xffffffffffff8000 (0xffffffffffff8000-t1)>0
+                        1. subtract t1 from 0xffffffffffff8000
+                        2. if the resulting number is positive (sign bit = 0)
+                            and any of the other bits is 1, the value is > 0 */
+                     assign(t6, binop(Iop_Sub64,
+                                       mkU64(0xffffffffffff8000ULL),
+                                       mkexpr(t1)));
+                     assign(t7, binop(Iop_And32,
+                                      binop(Iop_Or32,
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       binop(Iop_And32,
+                                                             unop(Iop_64HIto32,
+                                                                  mkexpr(t6)),
+                                                             mkU32(0x7fffffff)))),
+                                            unop(Iop_1Sto32,
+                                                 binop(Iop_CmpNE32,
+                                                       mkU32(0),
+                                                       unop(Iop_64to32,
+                                                            mkexpr(t6))))),
+                                      unop(Iop_1Sto32,
+                                            binop(Iop_CmpEQ32,
+                                                  binop(Iop_And32,
+                                                        unop(Iop_64HIto32,
+                                                                  mkexpr(t6)),
+                                                             mkU32(0x80000000)),
+                                                  mkU32(0)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkU32(0),
+                                                    mkexpr(t7)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00800000)),
+                                              getDSPControl()));
+                     putIReg(rt, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkU32(0),
+                                                    mkexpr(t4)),
+                                            mkU32(0x00007fff),
+                                            IRExpr_ITE(binop(Iop_CmpNE32,
+                                                             mkU32(0),
+                                                             mkexpr(t7)),
+                                                       mkU32(0xffff8000),
+                                                       unop(Iop_64to32,
+                                                            mkexpr(t1)))));
+                     break;
+                  }
+                  case 0x12: {  /* RDDSP*/
+                     DIP("rddsp r%d, mask 0x%x", rd, rddsp_mask);
+                     vassert(!mode64);
+
+                     putIReg(rd, mkU32(0x0));
+
+                     if ((rddsp_mask & 0x1) == 0x1) {
+                        /* Read pos field (bits 5-0) of DSPControl register. */
+                        putIReg(rd, binop(Iop_Or32,
+                                          getIReg(rd),
+                                          binop(Iop_And32,
+                                                getDSPControl(),
+                                                mkU32(0x0000003F))));
+                     }
+
+                     if ((rddsp_mask & 0x2) == 0x2) {
+                        /* Read scount field (bits 12-7) of DSPControl
+                           register. */
+                        putIReg(rd, binop(Iop_Or32,
+                                          getIReg(rd),
+                                          binop(Iop_And32,
+                                                getDSPControl(),
+                                                mkU32(0x00001F80))));
+                     }
+
+                     if ((rddsp_mask & 0x4) == 0x4) {
+                        /* Read C field (bit 13) of DSPControl register. */
+                        putIReg(rd, binop(Iop_Or32,
+                                          getIReg(rd),
+                                          binop(Iop_And32,
+                                                getDSPControl(),
+                                                mkU32(0x00002000))));
+                     }
+
+                     if ((rddsp_mask & 0x8) == 0x8) {
+                        /* Read outflag field (bit s 23-16) of DSPControl
+                           register. */
+                        putIReg(rd, binop(Iop_Or32,
+                                          getIReg(rd),
+                                          binop(Iop_And32,
+                                                getDSPControl(),
+                                                mkU32(0x00FF0000))));
+                     }
+
+                     if ((rddsp_mask & 0x10) == 0x10) {
+                        /* Read ccond field (bits 31-24) of DSPControl
+                           register. */
+                        putIReg(rd, binop(Iop_Or32,
+                                          getIReg(rd),
+                                          binop(Iop_And32,
+                                                getDSPControl(),
+                                                mkU32(0xFF000000))));
+                     }
+
+                     if ((rddsp_mask & 0x20) == 0x20) {
+                        /* Read EFI field (bit 14) of DSPControl register. */
+                        putIReg(rd, binop(Iop_Or32,
+                                          getIReg(rd),
+                                          binop(Iop_And32,
+                                                getDSPControl(),
+                                                mkU32(0x00004000))));
+                     }
+
+                     if ((rddsp_mask & 0x3f) == 0x3f) {
+                        /* Read all fields of DSPControl register. */
+                        putIReg(rd, getDSPControl());
+                     }
+                     break;
+                  }
+                  case 0x13: {  /* WRDSP */
+                     DIP("wrdsp r%d, mask 0x%x", rs, wrdsp_mask);
+                     vassert(!mode64);
+
+                     if ((wrdsp_mask & 0x3f) == 0x3f) {
+                        /* If mips64 put all fields of rs, except bit 15 and bit
+                           6, to DSPControl register, otherwise put all except
+                           bits 15, 6 and bits 31..28. */
+                        putDSPControl(mode64 ?
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0xffff7fbf)) :
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0x0fff7fbf)));
+                     } else {
+                        if ((wrdsp_mask & 0x1) == 0x1) {
+                           /* Put bits 5-0 of rs to DSPControl register pos
+                              field. */
+                           putDSPControl(binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     getDSPControl(),
+                                                     mkU32(0xFFFF7F40)),
+                                               binop(Iop_And32,
+                                                     getIReg(rs),
+                                                     mkU32(0x0000003F))));
+                        }
+
+                        if ((wrdsp_mask & 0x2) == 0x2) {
+                           /* Put bits 12-7 of rs to DSPControl scount field. */
+                           putDSPControl(binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     getDSPControl(),
+                                                     mkU32(0xFFFFE03F)),
+                                               binop(Iop_And32,
+                                                     getIReg(rs),
+                                                     mkU32(0x00001F80))));
+                        }
+
+                        if ((wrdsp_mask & 0x4) == 0x4) {
+                           /* Put bit 13 of rs to DSPControl register C
+                              field. */
+                           putDSPControl(binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     getDSPControl(),
+                                                     mkU32(0xFFFF5FBF)),
+                                               binop(Iop_And32,
+                                                     getIReg(rs),
+                                                     mkU32(0x00002000))));
+                        }
+
+                        if ((wrdsp_mask & 0x8) == 0x8) {
+                           /* Put bits 23-16 of rs to DSPControl reg outflag
+                              field. */
+                           putDSPControl(binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     getDSPControl(),
+                                                     mkU32(0xFF007FBF)),
+                                               binop(Iop_And32,
+                                                     getIReg(rs),
+                                                     mkU32(0x00FF0000))));
+                        }
+
+                        if ((wrdsp_mask & 0x10) == 0x10) {
+                           /* Put bits 31-24 of rs to DSPControl reg ccond
+                              field. */
+                           putDSPControl(binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     getDSPControl(),
+                                                     mkU32(0x00FF7FBF)),
+                                               binop(Iop_And32,
+                                                     getIReg(rs),
+                                                     mode64 ? mkU32(0xFF000000)
+                                                            : mkU32(0x0F000000))
+                                               )
+                                        );
+                        }
+
+                        if ((wrdsp_mask & 0x20) == 0x20) {
+                           /* Put bit 14 of rs to DSPControl register EFI
+                              field. */
+                           putDSPControl(binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     getDSPControl(),
+                                                     mkU32(0xFFFF3FBF)),
+                                               binop(Iop_And32,
+                                                     getIReg(rs),
+                                                     mkU32(0x00004000))));
+                        }
+                     }
+                     break;
+                  }
+                  case 0x1A: {  /* SHILO */
+                     DIP("shilo ac%d, %d", ac, shift);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+
+                     putAcc(ac, mkexpr(t0));
+
+                     if (0x20 == (shift & 0x3f)) {
+                        putAcc(ac, binop(Iop_32HLto64,
+                                         unop(Iop_64to32, mkexpr(t0)),
+                                         mkU32(0x0)));
+                     } else if (0x20 == (shift & 0x20)) {
+                        assign(t1, binop(Iop_Shl64,
+                                         mkexpr(t0),
+                                         unop(Iop_32to8,
+                                              binop(Iop_Add32,
+                                                    unop(Iop_Not32,
+                                                         mkU32(shift)),
+                                                    mkU32(0x1)))));
+
+                        putAcc(ac, mkexpr(t1));
+                     } else {
+                        assign(t1, binop(Iop_Shr64, mkexpr(t0), mkU8(shift)));
+
+                        putAcc(ac, mkexpr(t1));
+                     }
+                     break;
+                  }
+                  case 0x1B: {  /* SHILOV */
+                     DIP("shilov ac%d, r%d", ac, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I64);
+                     t4 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+                     assign(t1, binop(Iop_And32, getIReg(rs), mkU32(0x3f)));
+                     assign(t2, binop(Iop_CmpEQ32, mkexpr(t1), mkU32(0x20)));
+                     assign(t3, binop(Iop_Shl64,
+                                      mkexpr(t0),
+                                      unop(Iop_32to8,
+                                           binop(Iop_Add32,
+                                                 unop(Iop_Not32,
+                                                      mkexpr(t1)),
+                                                 mkU32(0x1)))));
+                     assign(t4, binop(Iop_Shr64,
+                                      mkexpr(t0),
+                                      unop(Iop_32to8,
+                                           mkexpr(t1))));
+
+                     putAcc(ac,
+                            IRExpr_ITE(mkexpr(t2),
+                                       binop(Iop_32HLto64,
+                                             unop(Iop_64to32, mkexpr(t0)),
+                                             mkU32(0x0)),
+                                       IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                        binop(Iop_And32,
+                                                              mkexpr(t1),
+                                                              mkU32(0x20)),
+                                                        mkU32(0x20)),
+                                                  mkexpr(t3),
+                                                  mkexpr(t4))));
+                     break;
+                  }
+                  case 0x1F: {  /* MTHLIP */
+                     DIP("mthlip r%d, ac%d", rs, ac);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+
+                     assign(t0, getAcc(ac));
+                     putAcc(ac, binop(Iop_32HLto64,
+                                      unop(Iop_64to32, mkexpr(t0)),
+                                      getIReg(rs)));
+                     assign(t1, binop(Iop_And32, getDSPControl(), mkU32(0x3f)));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpLE32U,
+                                                    mkU32(32),
+                                                    mkexpr(t1)),
+                                              binop(Iop_Or32,
+                                                    binop(Iop_Sub32,
+                                                          mkexpr(t1),
+                                                          mkU32(32)),
+                                                   binop(Iop_And32,
+                                                         getDSPControl(),
+                                                         mkU32(0xffffffc0))),
+                                              binop(Iop_Or32,
+                                                    binop(Iop_Add32,
+                                                          mkexpr(t1),
+                                                          mkU32(32)),
+                                                    binop(Iop_And32,
+                                                          getDSPControl(),
+                                                          mkU32(0xffffffc0)))));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of EXTR.W */
+            }
+            case 0xA: {  /* LX */
+               switch(sa) {
+                  case 0x0: {  /* LWX */
+                     DIP("lwx r%d, r%d(r%d)", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_Add32, getIReg(rt), getIReg(rs)));
+
+                     putIReg(rd, load(Ity_I32, mkexpr(t0)));
+                     break;
+                  }
+                  case 0x4: {  /* LHX */
+                     DIP("lhx r%d, r%d(r%d)", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_Add32, getIReg(rt), getIReg(rs)));
+
+                     putIReg(rd, unop(Iop_16Sto32, load(Ity_I16, mkexpr(t0))));
+                     break;
+                  }
+                  case 0x6: {  /* LBUX */
+                     DIP("lbux r%d, r%d(r%d)", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_Add32, getIReg(rt), getIReg(rs)));
+
+                     putIReg(rd, unop(Iop_8Uto32, load(Ity_I8, mkexpr(t0))));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of LX */
+            }
+            case 0xC: {  /* INSV */
+               switch(sa) {
+                  case 0x0: {  /* INSV */
+                     DIP("insv r%d, r%d", rt, rs);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I8);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+
+                     /* t0 <- pos field of DSPControl register. */
+                     assign(t0, binop(Iop_And32, getDSPControl(), mkU32(0x3f)));
+                     /* t1 <- scount field of DSPControl register. */
+                     assign(t1, binop(Iop_Shr32,
+                                      binop(Iop_And32,
+                                            getDSPControl(),
+                                            mkU32(0x1f80)),
+                                      mkU8(7)));
+
+                     assign(t2, unop(Iop_32to8,
+                                     binop(Iop_Add32,
+                                           mkexpr(t1),
+                                           mkexpr(t0))));
+
+                     /* 32-(pos+size) most significant bits of rt. */
+                     assign(t6, binop(Iop_Shl32,
+                                      binop(Iop_Shr32,
+                                            getIReg(rt),
+                                            mkexpr(t2)),
+                                      mkexpr(t2)));
+
+                     assign(t3, unop(Iop_32to8,
+                                     binop(Iop_Sub32,
+                                           mkU32(32),
+                                           mkexpr(t0))));
+                     /* Pos least significant bits of rt. */
+                     assign(t7, binop(Iop_Shr32,
+                                      binop(Iop_Shl32,
+                                            getIReg(rt),
+                                            mkexpr(t3)),
+                                      mkexpr(t3)));
+
+                     /* Size least significant bits of rs,
+                        shifted to appropriate position. */
+                     assign(t8, binop(Iop_Shl32,
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            unop(Iop_Not32,
+                                                 binop(Iop_Shl32,
+                                                       mkU32(0xffffffff),
+                                                       unop(Iop_32to8,
+                                                            mkexpr(t1))))),
+                                      unop(Iop_32to8,
+                                           mkexpr(t0))));
+
+                     putIReg(rt, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                  mkexpr(t0),
+                                                  mkU32(0)),
+                                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                             mkexpr(t1),
+                                                             mkU32(32)),
+                                                       getIReg(rs),
+                                                       binop(Iop_Or32,
+                                                             mkexpr(t6),
+                                                             mkexpr(t8))),
+                                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                             unop(Iop_8Uto32,
+                                                                  mkexpr(t2)),
+                                                             mkU32(32)),
+                                                       binop(Iop_Or32,
+                                                             mkexpr(t7),
+                                                             mkexpr(t8)),
+                                                       binop(Iop_Or32,
+                                                             binop(Iop_Or32,
+                                                                   mkexpr(t6),
+                                                                   mkexpr(t7)),
+                                                             mkexpr(t8)))));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* enf of INSV */
+            }
+            case 0x10: {  /* ADDU.QB */
+               switch(sa) {
+                  case 0x00: {  /* ADDU.QB */
+                     DIP("addu.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I32);
+
+                     /* Add rightmost bytes of rs and rt. */
+                     assign(t0,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     /* t1 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t0),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     /* Add bits 15-8 of rs and rt. */
+                     assign(t2,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     /* t3 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t2),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     /* Add bits 15-8 of rs and rt. */
+                     assign(t4,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     /* t5 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t5, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t4),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     /* Add bits 15-8 of rs and rt. */
+                     assign(t6,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     /* t7 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t6),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     assign(t8,
+                            binop(Iop_Or32,
+                                  binop(Iop_Or32,
+                                        binop(Iop_Or32,
+                                              unop(Iop_1Sto32, mkexpr(t7)),
+                                              unop(Iop_1Sto32,  mkexpr(t5))),
+                                        unop(Iop_1Sto32, mkexpr(t3))),
+                                  unop(Iop_1Sto32, mkexpr(t1))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    mkexpr(t8),
+                                                    mkU32(0x0)),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000))));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       binop(Iop_8HLto16,
+                                             unop(Iop_32to8, mkexpr(t6)),
+                                             unop(Iop_32to8, mkexpr(t4))),
+                                       binop(Iop_8HLto16,
+                                             unop(Iop_32to8, mkexpr(t2)),
+                                             unop(Iop_32to8, mkexpr(t0)))));
+                     break;
+                  }
+                  case 0x1: {  /* SUBU.QB */
+                     DIP("subu.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I32);
+
+                     /* Subtract rightmost bytes of rs and rt. */
+                     assign(t0,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     /* t1 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t0),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     /* Subtract bits 15-8 of rs and rt. */
+                     assign(t2,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     /* t3 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t2),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     /* Subtract bits 15-8 of rs and rt. */
+                     assign(t4,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     /* t5 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t5, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t4),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     /* Subtract bits 15-8 of rs and rt. */
+                     assign(t6,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     /* t7 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t6),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+
+                     assign(t8, binop(Iop_Or32,
+                                      binop(Iop_Or32,
+                                            binop(Iop_Or32,
+                                                  unop(Iop_1Sto32, mkexpr(t7)),
+                                                  unop(Iop_1Sto32, mkexpr(t5))),
+                                            unop(Iop_1Sto32, mkexpr(t3))),
+                                      unop(Iop_1Sto32, mkexpr(t1))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                     mkexpr(t8),
+                                                     mkU32(0x0)),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000))));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       binop(Iop_8HLto16,
+                                             unop(Iop_32to8, mkexpr(t6)),
+                                             unop(Iop_32to8, mkexpr(t4))),
+                                       binop(Iop_8HLto16,
+                                             unop(Iop_32to8, mkexpr(t2)),
+                                             unop(Iop_32to8, mkexpr(t0)))));
+                     break;
+                  }
+                  case 0x04: {  /* ADDU_S.QB */
+                     DIP("addu_s.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I8);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I8);
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I8);
+                     t12 = newTemp(Ity_I32);
+
+                     /* Add rightmost bytes of rs and rt. */
+                     assign(t0,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     /* t1 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t0),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+                     /* Saturate if necessary. */
+                     assign(t2, IRExpr_ITE(mkexpr(t1),
+                                           mkU8(0xff),
+                                           unop(Iop_32to8, mkexpr(t0))));
+
+                     /* Add bits 15-8 of rs and rt. */
+                     assign(t3,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     /* t4 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t4, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t3),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+                     /* Saturate if necessary. */
+                     assign(t5, IRExpr_ITE(mkexpr(t4),
+                                           mkU8(0xff),
+                                           unop(Iop_32to8, mkexpr(t3))));
+
+                     /* Add bits 15-8 of rs and rt. */
+                     assign(t6,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     /* t7 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t6),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+                     /* Saturate if necessary. */
+                     assign(t8, IRExpr_ITE(mkexpr(t7),
+                                           mkU8(0xff),
+                                           unop(Iop_32to8, mkexpr(t6))));
+
+                     /* Add bits 15-8 of rs and rt. */
+                     assign(t9,
+                            binop(Iop_Add32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     /* t10 will be 1 if there is overflow, 0 otherwise. */
+                     assign(t10, binop(Iop_CmpEQ32,
+                                       binop(Iop_And32,
+                                             mkexpr(t9),
+                                             mkU32(0x00000100)),
+                                       mkU32(0x00000100)));
+                     /* Saturate if necessary. */
+                     assign(t11, IRExpr_ITE(mkexpr(t10),
+                                            mkU8(0xff),
+                                            unop(Iop_32to8, mkexpr(t9))));
+
+                     assign(t12,
+                            binop(Iop_Or32,
+                                  binop(Iop_Or32,
+                                        binop(Iop_Or32,
+                                              unop(Iop_1Sto32, mkexpr(t10)),
+                                              unop(Iop_1Sto32, mkexpr(t7))),
+                                        unop(Iop_1Sto32, mkexpr(t4))),
+                                  unop(Iop_1Sto32, mkexpr(t1))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    mkexpr(t12),
+                                                    mkU32(0x0)),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000))));
+
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16, mkexpr(t11), mkexpr(t8)),
+                                   binop(Iop_8HLto16, mkexpr(t5), mkexpr(t2))));
+                     break;
+                  }
+                  case 0x05: {  /* SUBU_S.QB */
+                     DIP("subu_s.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+
+                     /* Use C function to easily calculate the result
+                        and write it in the register more conveniently
+                        Underflow is checked using step by step subtraction. */
+                     assign(t1, binop(Iop_QSub8Ux4, getIReg(rs), getIReg(rt)));
+
+                     /* Subtract each byte of rs and rt. */
+                     assign(t6,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t7,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t8,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t9,
+                            binop(Iop_Sub32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+
+                     /* Put 1 to bit 20 in DSPControl if there is underflow
+                        in either byte. */
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t6),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t7),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     assign(t4, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t8),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     assign(t5, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            mkexpr(t9),
+                                            mkU32(0x00000100)),
+                                      mkU32(0x00000100)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t5),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     putIReg(rd, mkexpr(t1));
+                     break;
+                  }
+                  case 0x6: {  /* MULEU_S.PH.QBL */
+                     DIP("muleu_s.ph.qbl r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+
+                     assign(t0,
+                            unop(Iop_64to32,
+                                 binop(Iop_MullU32,
+                                       unop(Iop_8Uto32,
+                                            unop(Iop_16HIto8,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rs)))),
+                                       unop(Iop_16Uto32,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t1,
+                            unop(Iop_64to32,
+                                 binop(Iop_MullU32,
+                                       unop(Iop_8Uto32,
+                                            unop(Iop_16to8,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rs)))),
+                                       unop(Iop_16Uto32,
+                                            unop(Iop_32to16, getIReg(rt))))));
+
+                     assign(t2, binop(Iop_CmpNE32,
+                                      mkU32(0x0),
+                                      binop(Iop_And32,
+                                            mkexpr(t0),
+                                            mkU32(0x03ff0000))));
+                     assign(t3, binop(Iop_CmpNE32,
+                                      mkU32(0x0),
+                                      binop(Iop_And32,
+                                            mkexpr(t1),
+                                            mkU32(0x03ff0000))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x200000)),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x200000)),
+                                                         getDSPControl())));
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   IRExpr_ITE(mkexpr(t2),
+                                              mkU16(0xffff),
+                                              unop(Iop_32to16, mkexpr(t0))),
+                                   IRExpr_ITE(mkexpr(t3),
+                                              mkU16(0xffff),
+                                              unop(Iop_32to16, mkexpr(t1)))));
+                     break;
+                  }
+                  case 0x7: {  /* MULEU_S.PH.QBR */
+                     DIP("muleu_s.ph.qbr r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+
+                     assign(t0, unop(Iop_64to32,
+                                     binop(Iop_MullU32,
+                                           unop(Iop_8Uto32,
+                                                unop(Iop_16HIto8,
+                                                     unop(Iop_32to16,
+                                                          getIReg(rs)))),
+                                           unop(Iop_16Uto32,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t1, unop(Iop_64to32,
+                                     binop(Iop_MullU32,
+                                           unop(Iop_8Uto32,
+                                                unop(Iop_16to8,
+                                                     unop(Iop_32to16,
+                                                          getIReg(rs)))),
+                                           unop(Iop_16Uto32,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+
+                     assign(t2, binop(Iop_CmpNE32,
+                                      mkU32(0x0),
+                                      binop(Iop_And32,
+                                            mkexpr(t0),
+                                            mkU32(0x03ff0000))));
+                     assign(t3, binop(Iop_CmpNE32,
+                                      mkU32(0x0),
+                                      binop(Iop_And32,
+                                            mkexpr(t1),
+                                            mkU32(0x03ff0000))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x200000)),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x200000)),
+                                                         getDSPControl())));
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       IRExpr_ITE(mkexpr(t2),
+                                                  mkU16(0xffff),
+                                                  unop(Iop_32to16,
+                                                       mkexpr(t0))),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU16(0xffff),
+                                                  unop(Iop_32to16,
+                                                       mkexpr(t1)))));
+                     break;
+                  }
+                  case 0x08: {  /* ADDU.PH */
+                     DIP("addu.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+
+                     /* Add lower halves. */
+                     assign(t0, binop(Iop_Add32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Detect overflow. */
+                     assign(t1, binop(Iop_CmpLT32U,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, mkexpr(t0))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs)))));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     /* Add higher halves. */
+                     assign(t2, binop(Iop_Add32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Detect overflow. */
+                     assign(t3, binop(Iop_CmpLT32U,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, mkexpr(t2))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16,
+                                                getIReg(rs)))));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, mkexpr(t2)),
+                                       unop(Iop_32to16, mkexpr(t0))));
+                     break;
+                  }
+                  case 0x9: {  /* SUBU.PH */
+                     DIP("subu.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+
+                     /* Substract lower halves. */
+                     assign(t0, binop(Iop_Sub32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Detect underflow. */
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            mkexpr(t0),
+                                            mkU32(0x00010000)),
+                                      mkU32(0x0)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     /* Subtract higher halves. */
+                     assign(t2, binop(Iop_Sub32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Detect underflow. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            mkexpr(t2),
+                                            mkU32(0x00010000)),
+                                      mkU32(0x0)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, mkexpr(t2)),
+                                       unop(Iop_32to16, mkexpr(t0))));
+                     break;
+                  }
+                  case 0xA: {  /* ADDQ.PH */
+                     DIP("addq.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+
+                     /* Add lower halves. */
+                     assign(t0, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t6, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t0))),
+                                      mkU32(0x1)));
+                     /* Detect overflow. */
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t0),
+                                                  mkU32(0x8000)),
+                                            mkU8(15)),
+                                      mkexpr(t6)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     /* Add higher halves. */
+                     assign(t2, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t7, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0x1)));
+                     /* Detect overflow. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t2),
+                                                  mkU32(0x00008000)),
+                                            mkU8(15)),
+                                      mkexpr(t7)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, mkexpr(t2)),
+                                       unop(Iop_32to16, mkexpr(t0))));
+                     break;
+                  }
+                  case 0xB: {  /* SUBQ.PH */
+                     DIP("subq.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+
+                     /* Subtract lower halves. */
+                     assign(t0, binop(Iop_Sub32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t6, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t0))),
+                                      mkU32(0x1)));
+                     /* Compare the signs of input value and the result. */
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t0),
+                                                  mkU32(0x8000)),
+                                            mkU8(15)),
+                                      mkexpr(t6)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     /* Subtract higher halves. */
+                     assign(t2, binop(Iop_Sub32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t7, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0x1)));
+                     /* Compare the signs of input value and the result. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t2),
+                                                  mkU32(0x00008000)),
+                                            mkU8(15)),
+                                      mkexpr(t7)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, mkexpr(t2)),
+                                       unop(Iop_32to16, mkexpr(t0))));
+                     break;
+                  }
+                  case 0xC: {  /* ADDU_S.PH */
+                     DIP("addu_s.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+
+                     /* Add lower halves. */
+                     assign(t0, binop(Iop_Add32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Detect overflow. */
+                     assign(t1, binop(Iop_CmpLT32U,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, mkexpr(t0))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs)))));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     /* Add higher halves. */
+                     assign(t2, binop(Iop_Add32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Detect overflow. */
+                     assign(t3, binop(Iop_CmpLT32U,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, mkexpr(t2))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs)))));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU16(0xffff),
+                                                  unop(Iop_32to16,
+                                                       mkexpr(t2))),
+                                       IRExpr_ITE(mkexpr(t1),
+                                                  mkU16(0xffff),
+                                                  unop(Iop_32to16,
+                                                       mkexpr(t0)))));
+                     break;
+                  }
+                  case 0xD: {  /* SUBU_S.PH */
+                     DIP("subu_s.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+
+                     /* Subtract lower halves. */
+                     assign(t0, binop(Iop_Sub32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Detect underflow. */
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            mkexpr(t0), mkU32(0x00010000)),
+                                      mkU32(0x0)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     /* Subtract higher halves. */
+                     assign(t2, binop(Iop_Sub32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Detect underflow. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            mkexpr(t2), mkU32(0x00010000)),
+                                      mkU32(0x0)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   IRExpr_ITE(mkexpr(t3),
+                                              mkU16(0x0000),
+                                              unop(Iop_32to16, mkexpr(t2))),
+                                   IRExpr_ITE(mkexpr(t1),
+                                              mkU16(0x0000),
+                                              unop(Iop_32to16, mkexpr(t0)))));
+                     break;
+                  }
+                  case 0xE: {  /* ADDQ_S.PH */
+                     DIP("addq_s.ph r%d r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I16);
+                     t5 = newTemp(Ity_I16);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+
+                     /* Add lower halves. */
+                     assign(t0, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t6, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t0))),
+                                      mkU32(0x1)));
+                     /* Detect overflow. */
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t0),
+                                                  mkU32(0x8000)),
+                                            mkU8(15)),
+                                      mkexpr(t6)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     /* Saturate if needed. */
+                     assign(t4, IRExpr_ITE(mkexpr(t1),
+                                           IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                            mkexpr(t6),
+                                                            mkU32(0x0)),
+                                                      mkU16(0x7fff),
+                                                      mkU16(0x8000)),
+                                           unop(Iop_32to16, mkexpr(t0))));
+
+                     /* Add higher halves. */
+                     assign(t2, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t7, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0x1)));
+                     /* Detect overflow. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t2),
+                                                  mkU32(0x00008000)),
+                                            mkU8(15)),
+                                      mkexpr(t7)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     /* Saturate if needed. */
+                     assign(t5, IRExpr_ITE(mkexpr(t3),
+                                           IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                            mkexpr(t7),
+                                                            mkU32(0x0)),
+                                                      mkU16(0x7fff),
+                                                      mkU16(0x8000)),
+                                           unop(Iop_32to16, mkexpr(t2))));
+
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t5), mkexpr(t4)));
+                     break;
+                  }
+                  case 0xF: {  /* SUBQ_S.PH */
+                     DIP("subq_s.ph r%d r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I16);
+                     t5 = newTemp(Ity_I16);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+
+                     /* Subtract lower halves. */
+                     assign(t0, binop(Iop_Sub32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t6, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t0))),
+                                      mkU32(0x1)));
+                     /* Detect overflow or underflow. */
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t0),
+                                                  mkU32(0x8000)),
+                                            mkU8(15)),
+                                      mkexpr(t6)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     /* Saturate if needed. */
+                     assign(t4, IRExpr_ITE(mkexpr(t1),
+                                           IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                            mkexpr(t6),
+                                                            mkU32(0x0)),
+                                                      mkU16(0x7fff),
+                                                      mkU16(0x8000)),
+                                           unop(Iop_32to16, mkexpr(t0))));
+
+                     /* Subtract higher halves. */
+                     assign(t2, binop(Iop_Sub32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Bit 16 of the result. */
+                     assign(t7, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0x1)));
+                     /* Detect overflow or underflow. */
+                     assign(t3, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  mkexpr(t2),
+                                                  mkU32(0x00008000)),
+                                            mkU8(15)),
+                                      mkexpr(t7)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     /* Saturate if needed. */
+                     assign(t5, IRExpr_ITE(mkexpr(t3),
+                                           IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                            mkexpr(t7),
+                                                            mkU32(0x0)),
+                                                      mkU16(0x7fff),
+                                                      mkU16(0x8000)),
+                                           unop(Iop_32to16, mkexpr(t2))));
+
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t5), mkexpr(t4)));
+                     break;
+                  }
+                  case 0x10: {  /* ADDSC */
+                     DIP("addsc r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I1);
+
+                     /* The carry bit result out of the addition operation is
+                        written to bit 13(the c field) of the DSPControl reg. */
+                     assign(t0, binop(Iop_Add64,
+                                      unop(Iop_32Uto64, getIReg(rs)),
+                                      unop(Iop_32Uto64, getIReg(rt))));
+
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t0)),
+                                            mkU32(0x1)),
+                                      mkU32(0x1)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x2000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xffffdfff))));
+
+                     putIReg(rd, unop(Iop_64to32, mkexpr(t0)));
+                     break;
+                  }
+                  case 0x11: {  /* ADDWC */
+                     DIP("addwc r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I1);
+
+                     /* Get carry bit from DSPControl register. */
+                     assign(t0, binop(Iop_Shr32,
+                                       binop(Iop_And32,
+                                             getDSPControl(),
+                                             mkU32(0x2000)),
+                                       mkU8(0xd)));
+                     assign(t1, binop(Iop_Add64,
+                                      unop(Iop_32Sto64, getIReg(rs)),
+                                      unop(Iop_32Sto64,
+                                           binop(Iop_Add32,
+                                                 getIReg(rt),
+                                                 mkexpr(t0)))));
+
+                     /* Extract bits 32 and 31. */
+                     assign(t2, binop(Iop_And32,
+                                      unop(Iop_64HIto32, mkexpr(t1)),
+                                      mkU32(0x1)));
+                     assign(t3, binop(Iop_Shr32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64to32, mkexpr(t1)),
+                                            mkU32(0x80000000)),
+                                      mkU8(31)));
+                     assign(t4, binop(Iop_CmpNE32, mkexpr(t2), mkexpr(t3)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+                     putIReg(rd, unop(Iop_64to32, mkexpr(t1)));
+                     break;
+                  }
+                  case 0x12: {  /* MODSUB */
+                     DIP("modsub r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+
+                     /* decr_7..0 */
+                     assign(t0,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16to8,
+                                      unop(Iop_32to16, getIReg(rt)))));
+
+                     /* lastindex_15..0 */
+                     assign(t1,
+                            unop(Iop_16Uto32,
+                                 binop(Iop_8HLto16,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rt))),
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     /* temp_15..0 */
+                     assign(t2,
+                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                             getIReg(rs),
+                                             mkU32(0x00000000)),
+                                       mkexpr(t1),
+                                       binop(Iop_Sub32,
+                                             getIReg(rs), mkexpr(t0))));
+                     putIReg(rd, mkexpr(t2));
+                     break;
+                  }
+                  case 0x14: {  /* RADDU.W.QB */
+                     DIP("raddu.w.qb r%d, r%d", rd, rs);
+                     vassert(!mode64);
+                     putIReg(rd, binop(Iop_Add32,
+                                       binop(Iop_Add32,
+                                             unop(Iop_8Uto32,
+                                                  unop(Iop_16to8,
+                                                       unop(Iop_32to16,
+                                                            getIReg(rs)))),
+                                             unop(Iop_8Uto32,
+                                                  unop(Iop_16HIto8,
+                                                       unop(Iop_32to16,
+                                                            getIReg(rs))))),
+                                       binop(Iop_Add32,
+                                             unop(Iop_8Uto32,
+                                                  unop(Iop_16to8,
+                                                       unop(Iop_32HIto16,
+                                                            getIReg(rs)))),
+                                             unop(Iop_8Uto32,
+                                                  unop(Iop_16HIto8,
+                                                       unop(Iop_32HIto16,
+                                                            getIReg(rs)))))));
+                     break;
+                  }
+                  case 0x16: {  /* ADDQ_S.W */
+                     DIP("addq_s.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_Add64,
+                                      unop(Iop_32Sto64, getIReg(rs)),
+                                      unop(Iop_32Sto64, getIReg(rt))));
+
+                     assign(t3, binop(Iop_And32,
+                                      unop(Iop_64HIto32, mkexpr(t0)),
+                                      mkU32(0x1)));
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_64to32, mkexpr(t0)),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31)),
+                                      mkexpr(t3)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                             mkexpr(t3),
+                                                             mkU32(0x0)),
+                                                       mkU32(0x7fffffff),
+                                                       mkU32(0x80000000)),
+                                            unop(Iop_64to32, mkexpr(t0))));
+                     break;
+                  }
+                  case 0x17: {  /* SUBQ_S.W */
+                     DIP("subq_s.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_Sub64,
+                                      unop(Iop_32Sto64, getIReg(rs)),
+                                      unop(Iop_32Sto64, getIReg(rt))));
+
+                     assign(t3, binop(Iop_And32,
+                                      unop(Iop_64HIto32, mkexpr(t0)),
+                                      mkU32(0x1)));
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_64to32, mkexpr(t0)),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31)),
+                                      mkexpr(t3)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00100000)),
+                                              getDSPControl()));
+
+                     putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                             mkexpr(t3),
+                                                             mkU32(0x0)),
+                                                       mkU32(0x7fffffff),
+                                                       mkU32(0x80000000)),
+                                            unop(Iop_64to32, mkexpr(t0))));
+                     break;
+                  }
+                  case 0x1C: {  /* MULEQ_S.W.PHL */
+                     DIP("muleq_s.w.phl r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I32);
+
+                     assign(t0,
+                            binop(Iop_Shl32,
+                                  binop(Iop_Mul32,
+                                        unop(Iop_16Sto32,
+                                             unop(Iop_32HIto16, getIReg(rt))),
+                                        unop(Iop_16Sto32,
+                                             unop(Iop_32HIto16, getIReg(rs)))),
+                                  mkU8(0x1)));
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0xffff0000)),
+                                      mkU32(0x80000000)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0xffff0000)),
+                                      mkU32(0x80000000)));
+                     assign(t3, IRExpr_ITE(mkexpr(t1),
+                                           IRExpr_ITE(mkexpr(t2),
+                                                      binop(Iop_Or32,
+                                                            getDSPControl(),
+                                                            mkU32(0x00200000)),
+                                                      getDSPControl()),
+                                           getDSPControl()));
+                     putDSPControl(mkexpr(t3));
+
+                     putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                            IRExpr_ITE(mkexpr(t2),
+                                                       mkU32(0x7fffffff),
+                                                       mkexpr(t0)),
+                                            mkexpr(t0)));
+                     break;
+                  }
+                  case 0x1D: {  /* MULEQ_S.W.PHR */
+                     DIP("muleq_s.w.phr r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+
+                     assign(t0,
+                            binop(Iop_Shl32,
+                                  binop(Iop_Mul32,
+                                        unop(Iop_16Sto32,
+                                             unop(Iop_32to16, getIReg(rt))),
+                                        unop(Iop_16Sto32,
+                                             unop(Iop_32to16, getIReg(rs)))),
+                                  mkU8(0x1)));
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0xffff)),
+                                      mkU32(0x8000)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0xffff)),
+                                      mkU32(0x8000)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              IRExpr_ITE(mkexpr(t2),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+                     putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                            IRExpr_ITE(mkexpr(t2),
+                                                       mkU32(0x7fffffff),
+                                                       mkexpr(t0)),
+                                            mkexpr(t0)));
+                     break;
+                  }
+                  case 0x1E: {  /* MULQ_S.PH */
+                     DIP("mulq_s.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I16);
+                     t3 = newTemp(Ity_I16);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t5,
+                            unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rs))));
+                     assign(t6,
+                            unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rt))));
+
+                     assign(t7,
+                            unop(Iop_16Sto32, unop(Iop_32HIto16, getIReg(rs))));
+                     assign(t8,
+                            unop(Iop_16Sto32, unop(Iop_32HIto16, getIReg(rt))));
+
+                     assign(t0, binop(Iop_And32,
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t5),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000))),
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t6),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000)))));
+                     assign(t1, binop(Iop_And32,
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t7),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000))),
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t8),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000)))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    binop(Iop_Or32,
+                                                          mkexpr(t0),
+                                                          mkexpr(t1)),
+                                                    mkU32(0x0)),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x200000))));
+
+                     assign(t2, unop(Iop_32HIto16,
+                                     binop(Iop_Shl32,
+                                           unop(Iop_64to32,
+                                                binop(Iop_MullS32,
+                                                      mkexpr(t7),
+                                                      mkexpr(t8))),
+                                           mkU8(0x1))));
+                     assign(t3, unop(Iop_32HIto16,
+                                     binop(Iop_Shl32,
+                                           unop(Iop_64to32,
+                                                binop(Iop_MullS32,
+                                                      mkexpr(t5),
+                                                      mkexpr(t6))),
+                                           mkU8(0x1))));
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                        mkexpr(t1),
+                                                        mkU32(0x0)),
+                                                  mkexpr(t2),
+                                                  mkU16(0x7fff)),
+                                       IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                        mkexpr(t0),
+                                                        mkU32(0x0)),
+                                                  mkexpr(t3),
+                                                  mkU16(0x7fff))));
+                     break;
+                  }
+                  case 0x1F: {  /* MULQ_RS.PH */
+                     DIP("mulq_rs.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I16);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I16);
+
+                     /* Multiply and round lower halfwords. */
+                     assign(t0, binop(Iop_Add32,
+                                      binop(Iop_Shl32,
+                                            binop(Iop_Mul32,
+                                                  unop(Iop_16Sto32,
+                                                       unop(Iop_32to16,
+                                                            getIReg(rt))),
+                                                  unop(Iop_16Sto32,
+                                                       unop(Iop_32to16,
+                                                            getIReg(rs)))),
+                                            mkU8(0x1)),
+                                      mkU32(0x00008000)));
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rt), mkU32(0xffff)),
+                                      mkU32(0x8000)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rs), mkU32(0xffff)),
+                                      mkU32(0x8000)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              IRExpr_ITE(mkexpr(t2),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+                     assign(t3, IRExpr_ITE(mkexpr(t1),
+                                           IRExpr_ITE(mkexpr(t2),
+                                                      mkU16(0x7fff),
+                                                      unop(Iop_32HIto16,
+                                                           mkexpr(t0))),
+                                           unop(Iop_32HIto16, mkexpr(t0))));
+
+                     /* Multiply and round higher halfwords. */
+                     assign(t4, binop(Iop_Add32,
+                                      binop(Iop_Shl32,
+                                            binop(Iop_Mul32,
+                                                  unop(Iop_16Sto32,
+                                                       unop(Iop_32HIto16,
+                                                            getIReg(rt))),
+                                                  unop(Iop_16Sto32,
+                                                       unop(Iop_32HIto16,
+                                                            getIReg(rs)))),
+                                            mkU8(0x1)),
+                                      mkU32(0x00008000)));
+                     assign(t5, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0xffff0000)),
+                                      mkU32(0x80000000)));
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0xffff0000)),
+                                      mkU32(0x80000000)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t5),
+                                             IRExpr_ITE(mkexpr(t6),
+                                                        binop(Iop_Or32,
+                                                             getDSPControl(),
+                                                             mkU32(0x00200000)),
+                                                        getDSPControl()),
+                                             getDSPControl()));
+                     assign(t7, IRExpr_ITE(mkexpr(t5),
+                                           IRExpr_ITE(mkexpr(t6),
+                                                      mkU16(0x7fff),
+                                                      unop(Iop_32HIto16,
+                                                           mkexpr(t4))),
+                                           unop(Iop_32HIto16, mkexpr(t4))));
+
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t7), mkexpr(t3)));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of ADDU.QB */
+            }
+            case 0x11: {  /* CMPU.EQ.QB */
+               switch(sa) {
+                  case 0x0: {  /* CMPU.EQ.QB */
+                     DIP("cmpu.eq.qb r%d, r%d", rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+
+                     assign(t1,
+                            binop(Iop_CmpEQ32,
+                                  binop(Iop_And32, getIReg(rs), mkU32(0xff)),
+                                  binop(Iop_And32, getIReg(rt), mkU32(0xff))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x04000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfbffffff))));
+
+                     assign(t4, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x08000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xf7ffffff))));
+                     break;
+                  }
+                  case 0x1: {  /* CMPU.LT.QB */
+                     DIP("cmpu.lt.qb r%d, r%d", rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+
+                     assign(t1, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+
+                     assign(t3, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x04000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfbffffff))));
+
+                     assign(t4, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x08000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xf7ffffff))));
+                     break;
+                  }
+                  case 0x2: {  /* CMPU.LE.QB */
+                     DIP("cmpu.le.qb r%d, r%d", rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+
+                     assign(t1, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+
+                     assign(t3, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x04000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfbffffff))));
+
+                     assign(t4, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x08000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xf7ffffff))));
+                     break;
+                  }
+                  case 0x3: {  /* PICK.QB */
+                     DIP("pick.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I8);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I8);
+                     t4 = newTemp(Ity_I8);
+
+                     assign(t0, getDSPControl());
+                     assign(t1, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x01000000)),
+                                                 mkU32(0x0)),
+                                           unop(Iop_16to8,
+                                                 unop(Iop_32to16,
+                                                      getIReg(rs))),
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt)))));
+                     assign(t2, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x02000000)),
+                                                 mkU32(0x0)),
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16, getIReg(rs))),
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt)))));
+                     assign(t3, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x04000000)),
+                                                 mkU32(0x0)),
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs))),
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt)))));
+                     assign(t4, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x08000000)),
+                                                 mkU32(0x0)),
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs))),
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt)))));
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16, mkexpr(t4), mkexpr(t3)),
+                                   binop(Iop_8HLto16, mkexpr(t2), mkexpr(t1))));
+                     break;
+                  }
+                  case 0x4: {  /* CMPGU.EQ.QB */
+                     DIP("cmpgu.eq.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t5, IRExpr_ITE(mkexpr(t1),
+                                           mkU32(0x00000001), mkU32(0)));
+
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t6, IRExpr_ITE(mkexpr(t2),
+                                           mkU32(0x00000002), mkU32(0)));
+
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           mkU32(0x00000004), mkU32(0)));
+
+                     assign(t4, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t8, IRExpr_ITE(mkexpr(t4),
+                                           mkU32(0x00000008), mkU32(0)));
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Or32,
+                                             binop(Iop_Or32,
+                                                   mkexpr(t5), mkexpr(t6)),
+                                             mkexpr(t7)),
+                                       mkexpr(t8)));
+                     break;
+                  }
+                  case 0x5: {  /* CMPGU.LT.QB */
+                     DIP("cmpgu.lt.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t1, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t5, IRExpr_ITE(mkexpr(t1),
+                                           mkU32(0x00000001), mkU32(0)));
+
+                     assign(t2, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t6, IRExpr_ITE(mkexpr(t2),
+                                           mkU32(0x00000002), mkU32(0)));
+
+                     assign(t3, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           mkU32(0x00000004), mkU32(0)));
+
+                     assign(t4, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t8, IRExpr_ITE(mkexpr(t4),
+                                           mkU32(0x00000008), mkU32(0)));
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Or32,
+                                             binop(Iop_Or32,
+                                                   mkexpr(t5), mkexpr(t6)),
+                                             mkexpr(t7)),
+                                       mkexpr(t8)));
+                     break;
+                  }
+                  case 0x6: {  /* CMPGU.LE.QB */
+                     DIP("cmpgu.le.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t1, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t5, IRExpr_ITE(mkexpr(t1),
+                                           mkU32(0x00000001), mkU32(0)));
+
+                     assign(t2, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t6, IRExpr_ITE(mkexpr(t2),
+                                           mkU32(0x00000002), mkU32(0)));
+
+                     assign(t3, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           mkU32(0x00000004), mkU32(0)));
+
+                     assign(t4, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t8, IRExpr_ITE(mkexpr(t4),
+                                           mkU32(0x00000008), mkU32(0)));
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Or32,
+                                             binop(Iop_Or32,
+                                                   mkexpr(t5), mkexpr(t6)),
+                                             mkexpr(t7)),
+                                       mkexpr(t8)));
+                     break;
+                  }
+                  case 0x8: {  /* CMP.EQ.PH */
+                     DIP("cmp.eq.ph r%d, r%d", rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+
+                     assign(t1, binop(Iop_CmpEQ16,
+                                      unop(Iop_32to16, getIReg(rs)),
+                                      unop(Iop_32to16, getIReg(rt))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+                     assign(t2, binop(Iop_CmpEQ16,
+                                      unop(Iop_32HIto16, getIReg(rs)),
+                                      unop(Iop_32HIto16, getIReg(rt))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+                     break;
+                  }
+                  case 0x9: {  /* CMP.LT.PH */
+                     DIP("cmp.lt.ph r%d, r%d", rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+
+                     assign(t1, binop(Iop_CmpLT32S,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpLT32S,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+                     break;
+                  }
+                  case 0xA: {  /* CMP.LE.PH */
+                     DIP("cmp.le.ph r%d, r%d", rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+
+                     assign(t1, binop(Iop_CmpLE32S,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpLE32S,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+                     break;
+                  }
+                  case 0xB: {  /* PICK.PH */
+                     DIP("pick.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I16);
+                     t2 = newTemp(Ity_I16);
+
+                     assign(t0, getDSPControl());
+
+                     assign(t1, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x01000000)),
+                                                 mkU32(0x0)),
+                                           unop(Iop_32to16, getIReg(rs)),
+                                           unop(Iop_32to16, getIReg(rt))));
+
+                     assign(t2, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x02000000)),
+                                                 mkU32(0x0)),
+                                           unop(Iop_32HIto16, getIReg(rs)),
+                                           unop(Iop_32HIto16, getIReg(rt))));
+
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t2), mkexpr(t1)));
+                     break;
+                  }
+                  case 0xC: {  /* PRECRQ.QB.PH */
+                     DIP("precrq.qb.ph r%d, r%d, %d", rd, rs, rt);
+                     vassert(!mode64);
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_16HIto8,
+                                              unop(Iop_32HIto16, getIReg(rs))),
+                                         unop(Iop_16HIto8,
+                                              unop(Iop_32to16, getIReg(rs)))),
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_16HIto8,
+                                              unop(Iop_32HIto16, getIReg(rt))),
+                                         unop(Iop_16HIto8,
+                                              unop(Iop_32to16, getIReg(rt))))));
+                     break;
+                  }
+                  case 0xD: {  /* PRECR.QB.PH */
+                     DIP("precr.qb.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_16to8,
+                                              unop(Iop_32HIto16, getIReg(rs))),
+                                         unop(Iop_16to8,
+                                              unop(Iop_32to16, getIReg(rs)))),
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_16to8,
+                                              unop(Iop_32HIto16, getIReg(rt))),
+                                         unop(Iop_16to8,
+                                              unop(Iop_32to16, getIReg(rt))))));
+                     break;
+                  }
+                  case 0xF: {  /* PRECRQU_S.QB.PH */
+                     DIP("precrqu_s.qb.ph r%d, r%d, %d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I8);
+                     t1 = newTemp(Ity_I8);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I8);
+                     t4 = newTemp(Ity_I8);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I8);
+                     t8 = newTemp(Ity_I1);
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I8);
+                     t11 = newTemp(Ity_I1);
+                     t12 = newTemp(Ity_I32);
+                     t13 = newTemp(Ity_I8);
+                     t14 = newTemp(Ity_I1);
+                     t15 = newTemp(Ity_I32);
+
+                     assign(t4, IRExpr_ITE(binop(Iop_CmpLT32U,
+                                                 mkU32(0x7f80),
+                                                 binop(Iop_And32,
+                                                       unop(Iop_16Uto32,
+                                                            unop(Iop_32to16,
+                                                            getIReg(rs))),
+                                                       mkU32(0x7fff))),
+                                           mkU8(0xff),
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     binop(Iop_Shl32,
+                                                           getIReg(rs),
+                                                           mkU8(1))))));
+                     assign(t0, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_16Uto32,
+                                                            unop(Iop_32to16,
+                                                                 getIReg(rs))),
+                                                       mkU32(0x00008000)),
+                                                 mkU32(0x0)),
+                                           mkexpr(t4),
+                                           mkU8(0x0)));
+                     assign(t5, binop(Iop_And32,
+                                      unop(Iop_16Uto32,
+                                            unop(Iop_32to16,
+                                                 getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t6, binop(Iop_CmpLT32U,
+                                      mkU32(0x7f80),
+                                      binop(Iop_And32,
+                                            unop(Iop_16Uto32,
+                                                 unop(Iop_32to16,
+                                                 getIReg(rs))),
+                                            mkU32(0x7fff))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    mkexpr(t5),
+                                                    mkU32(0x0)),
+                                              IRExpr_ITE(mkexpr(t6),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00400000)
+                                                              ),
+                                                         getDSPControl()),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00400000))));
+
+                     assign(t7, IRExpr_ITE(binop(Iop_CmpLT32U,
+                                                 mkU32(0x7f80),
+                                                 binop(Iop_And32,
+                                                       unop(Iop_16Uto32,
+                                                            unop(Iop_32HIto16,
+                                                                 getIReg(rs))),
+                                                       mkU32(0x7fff))),
+                                           mkU8(0xff),
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     binop(Iop_Shl32,
+                                                           getIReg(rs),
+                                                           mkU8(1))))));
+                     assign(t1, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_16Uto32,
+                                                            unop(Iop_32HIto16,
+                                                                 getIReg(rs))),
+                                                       mkU32(0x00008000)),
+                                                 mkU32(0x0)),
+                                           mkexpr(t7),
+                                           mkU8(0x0)));
+                     assign(t8, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            unop(Iop_16Uto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rs))),
+                                            mkU32(0x00008000)),
+                                      mkU32(0x0)));
+                     assign(t9, IRExpr_ITE(binop(Iop_CmpLT32U,
+                                                 mkU32(0x7f80),
+                                                 binop(Iop_And32,
+                                                       unop(Iop_16Uto32,
+                                                            unop(Iop_32HIto16,
+                                                                 getIReg(rs))),
+                                                       mkU32(0x7fff))),
+                                           binop(Iop_Or32,
+                                                 getDSPControl(),
+                                                 mkU32(0x00400000)),
+                                           getDSPControl()));
+                     putDSPControl(IRExpr_ITE(mkexpr(t8),
+                                              mkexpr(t9),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00400000))));
+
+                     assign(t10, IRExpr_ITE(binop(Iop_CmpLT32U,
+                                                  mkU32(0x7f80),
+                                                  binop(Iop_And32,
+                                                        unop(Iop_16Uto32,
+                                                             unop(Iop_32to16,
+                                                             getIReg(rt))),
+                                                        mkU32(0x7fff))),
+                                            mkU8(0xff),
+                                            unop(Iop_16HIto8,
+                                                 unop(Iop_32to16,
+                                                      binop(Iop_Shl32,
+                                                            getIReg(rt),
+                                                            mkU8(1))))));
+                     assign(t2, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_16Uto32,
+                                                            unop(Iop_32to16,
+                                                                 getIReg(rt))),
+                                                       mkU32(0x00008000)),
+                                                 mkU32(0x0)),
+                                           mkexpr(t10),
+                                           mkU8(0x0)));
+                     assign(t11, binop(Iop_CmpEQ32,
+                                       binop(Iop_And32,
+                                             unop(Iop_16Uto32,
+                                                  unop(Iop_32to16,
+                                                       getIReg(rt))),
+                                             mkU32(0x00008000)),
+                                       mkU32(0x0)));
+                     assign(t12, IRExpr_ITE(binop(Iop_CmpLT32U,
+                                                  mkU32(0x7f80),
+                                                  binop(Iop_And32,
+                                                        unop(Iop_16Uto32,
+                                                             unop(Iop_32to16,
+                                                             getIReg(rt))),
+                                                        mkU32(0x7fff))),
+                                            binop(Iop_Or32,
+                                                  getDSPControl(),
+                                                  mkU32(0x00400000)),
+                                            getDSPControl()));
+                     putDSPControl(IRExpr_ITE(mkexpr(t11),
+                                              mkexpr(t12),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00400000))));
+
+                     assign(t13, IRExpr_ITE(binop(Iop_CmpLT32U,
+                                                  mkU32(0x7f80),
+                                                  binop(Iop_And32,
+                                                        unop(Iop_16Uto32,
+                                                             unop(Iop_32HIto16,
+                                                                  getIReg(rt))),
+                                                        mkU32(0x7fff))),
+                                            mkU8(0xff),
+                                            unop(Iop_16HIto8,
+                                                 unop(Iop_32HIto16,
+                                                      binop(Iop_Shl32,
+                                                            getIReg(rt),
+                                                            mkU8(1))))));
+                     assign(t3, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_16Uto32,
+                                                            unop(Iop_32HIto16,
+                                                                 getIReg(rt))),
+                                                       mkU32(0x00008000)),
+                                                 mkU32(0x0)),
+                                           mkexpr(t13),
+                                           mkU8(0x0)));
+                     assign(t14, binop(Iop_CmpEQ32,
+                                       binop(Iop_And32,
+                                             unop(Iop_16Uto32,
+                                                  unop(Iop_32HIto16,
+                                                       getIReg(rt))),
+                                             mkU32(0x00008000)),
+                                       mkU32(0x0)));
+                     assign(t15, IRExpr_ITE(binop(Iop_CmpLT32U,
+                                                  mkU32(0x7f80),
+                                                  binop(Iop_And32,
+                                                        unop(Iop_16Uto32,
+                                                             unop(Iop_32HIto16,
+                                                                  getIReg(rt))),
+                                                        mkU32(0x7fff))),
+                                            binop(Iop_Or32,
+                                                  getDSPControl(),
+                                                  mkU32(0x00400000)),
+                                            getDSPControl()));
+                     putDSPControl(IRExpr_ITE(mkexpr(t14),
+                                              mkexpr(t15),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00400000))));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       binop(Iop_8HLto16,
+                                             mkexpr(t1), mkexpr(t0)),
+                                       binop(Iop_8HLto16,
+                                             mkexpr(t3), mkexpr(t2))));
+                     break;
+                  }
+                  case 0x14: {  /* PRECRQ.PH.W */
+                     DIP("precrq.ph.w r%d, r%d, %d", rd, rs, rt);
+                     vassert(!mode64);
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32HIto16, getIReg(rs)),
+                                       unop(Iop_32HIto16, getIReg(rt))));
+                     break;
+                  }
+                  case 0x15: {  /* PRECRQ_RS.PH.W */
+                     DIP("precrq_rs.ph.w r%d, r%d, %d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I64);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_Add64,
+                                      binop(Iop_32HLto64,
+                                            binop(Iop_Shr32,
+                                                  binop(Iop_And32,
+                                                        getIReg(rs),
+                                                        mkU32(0x80000000)),
+                                                  mkU8(31)),
+                                            getIReg(rs)),
+                                      mkU64(0x0000000000008000ULL)));
+                     assign(t1, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t0)),
+                                            mkU32(0x1)),
+                                      binop(Iop_And32,
+                                            binop(Iop_Shr32,
+                                                  unop(Iop_64to32, mkexpr(t0)),
+                                                  mkU8(31)),
+                                            mkU32(0x1))));
+                     assign(t2, IRExpr_ITE(mkexpr(t1),
+                                           mkU32(0x7fffffff),
+                                           unop(Iop_64to32, mkexpr(t0))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              getDSPControl()));
+                     assign(t3, binop(Iop_Add64,
+                                      binop(Iop_32HLto64,
+                                            binop(Iop_Shr32,
+                                                  binop(Iop_And32,
+                                                        getIReg(rt),
+                                                        mkU32(0x80000000)),
+                                                  mkU8(31)),
+                                            getIReg(rt)),
+                                      mkU64(0x0000000000008000ULL)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t3)),
+                                            mkU32(0x1)),
+                                      binop(Iop_And32,
+                                            binop(Iop_Shr32,
+                                                  unop(Iop_64to32, mkexpr(t3)),
+                                                  mkU8(31)),
+                                            mkU32(0x1))));
+                     assign(t5, IRExpr_ITE(mkexpr(t4),
+                                           mkU32(0x7fffffff),
+                                           unop(Iop_64to32, mkexpr(t3))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              getDSPControl()));
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32HIto16, mkexpr(t2)),
+                                       unop(Iop_32HIto16, mkexpr(t5))));
+                     break;
+                  }
+                  case 0x1E: {  /* PRECR_SRA.PH.W */
+                     DIP("precr_sra.ph.w r%d, r%d, %d", rt, rs, rd);
+                     vassert(!mode64);
+
+                     if (0 == rd) {
+                        putIReg(rt, binop(Iop_16HLto32,
+                                          unop(Iop_32to16, getIReg(rt)),
+                                          unop(Iop_32to16, getIReg(rs))));
+                     } else {
+                        putIReg(rt, binop(Iop_16HLto32,
+                                          unop(Iop_32to16, binop(Iop_Sar32,
+                                                                 getIReg(rt),
+                                                                 mkU8(rd))),
+                                          unop(Iop_32to16, binop(Iop_Sar32,
+                                                                 getIReg(rs),
+                                                                 mkU8(rd)))));
+                     }
+                     break;
+                  }
+                  case 0x1F: {  /* PRECR_SRA_R.PH.W */
+                     DIP("precr_sra_r.ph.w r%d, r%d, %d", rt, rs, rd);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+
+                     if (0 == rd) {
+                        putIReg(rt, binop(Iop_16HLto32,
+                                          unop(Iop_32to16, getIReg(rt)),
+                                          unop(Iop_32to16, getIReg(rs))));
+                     } else {
+                        assign(t0, binop(Iop_Shr32,
+                                         binop(Iop_Add32,
+                                               binop(Iop_Sar32,
+                                                     getIReg(rt),
+                                                     mkU8(rd-1)),
+                                               mkU32(0x1)),
+                                         mkU8(0x1)));
+                        assign(t1, binop(Iop_Shr32,
+                                         binop(Iop_Add32,
+                                               binop(Iop_Sar32,
+                                                     getIReg(rs),
+                                                     mkU8(rd-1)),
+                                               mkU32(0x1)),
+                                         mkU8(0x1)));
+                        putIReg(rt, binop(Iop_16HLto32,
+                                          unop(Iop_32to16, mkexpr(t0)),
+                                          unop(Iop_32to16, mkexpr(t1))));
+                     };
+                     break;
+                  }
+                  case 0xE: {  /* PACKRL.PH */
+                     DIP("packrl.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, getIReg(rs)),
+                                       unop(Iop_32HIto16, getIReg(rt))));
+                     break;
+                  }
+                  case 0x18: {  /* CMPGDU.EQ.QB */
+                     DIP("cmpgdu.eq.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t1,
+                            binop(Iop_CmpEQ32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t5, IRExpr_ITE(mkexpr(t1),
+                                           mkU32(0x00000001), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t6, IRExpr_ITE(mkexpr(t2),
+                                           mkU32(0x00000002), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           mkU32(0x00000004), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x04000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfbffffff))));
+
+                     assign(t4, binop(Iop_CmpEQ32,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t8, IRExpr_ITE(mkexpr(t4),
+                                           mkU32(0x00000008), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x08000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xf7ffffff))));
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Or32,
+                                             binop(Iop_Or32,
+                                                   mkexpr(t5), mkexpr(t6)),
+                                             mkexpr(t7)),
+                                       mkexpr(t8)));
+                     break;
+                  }
+                  case 0x19: {  /* CMPGDU.LT.QB */
+                     DIP("cmpgdu.lt.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t1, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t5, IRExpr_ITE(mkexpr(t1),
+                                           mkU32(0x00000001), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t6, IRExpr_ITE(mkexpr(t2),
+                                           mkU32(0x00000002), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+
+                     assign(t3, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           mkU32(0x00000004), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x04000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfbffffff))));
+
+                     assign(t4, binop(Iop_CmpLT32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t8, IRExpr_ITE(mkexpr(t4),
+                                           mkU32(0x00000008), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x08000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xf7ffffff))));
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Or32,
+                                             binop(Iop_Or32,
+                                                   mkexpr(t5), mkexpr(t6)),
+                                             mkexpr(t7)),
+                                       mkexpr(t8)));
+                     break;
+                  }
+                  case 0x1A: {  /* CMPGDU.LE.QB */
+                     DIP("cmpgdu.le.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     assign(t1, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t5, IRExpr_ITE(mkexpr(t1),
+                                           mkU32(0x00000001),
+                                           mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x01000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfeffffff))));
+
+                     assign(t2, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16, getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32to16,
+                                                     getIReg(rt))))));
+                     assign(t6, IRExpr_ITE(mkexpr(t2),
+                                           mkU32(0x00000002), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x02000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfdffffff))));
+
+                     assign(t3, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16to8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t7, IRExpr_ITE(mkexpr(t3),
+                                           mkU32(0x00000004), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x04000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xfbffffff))));
+
+                     assign(t4, binop(Iop_CmpLE32U,
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rs)))),
+                                      unop(Iop_8Uto32,
+                                           unop(Iop_16HIto8,
+                                                unop(Iop_32HIto16,
+                                                     getIReg(rt))))));
+                     assign(t8, IRExpr_ITE(mkexpr(t4),
+                                           mkU32(0x00000008), mkU32(0)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t4),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x08000000)),
+                                              binop(Iop_And32,
+                                                    getDSPControl(),
+                                                    mkU32(0xf7ffffff))));
+
+                     putIReg(rd, binop(Iop_Or32,
+                                       binop(Iop_Or32,
+                                             binop(Iop_Or32,
+                                                   mkexpr(t5), mkexpr(t6)),
+                                             mkexpr(t7)),
+                                       mkexpr(t8)));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of CMPU.EQ.QB */
+            }
+            case 0x13: {  /* SHLL.QB */
+               switch(sa) {
+                  case 0x0: {  /* SHLL.QB */
+                     DIP("shll.qb r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I1);
+                     t9 = newTemp(Ity_I1);
+                     t10 = newTemp(Ity_I1);
+
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        /* Shift bits 7..0 and 23..16. */
+                        assign(t0, binop(Iop_Shl32,
+                                         binop(Iop_And32,
+                                               getIReg(rt),
+                                               mkU32(0x00ff00ff)),
+                                         mkU8(rs)));
+                        assign(t1, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t0),
+                                              mkU32(0xff000000)),
+                                        mkU32(0x00000000)));
+                        assign(t2, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t0),
+                                              mkU32(0xff000000)),
+                                        mkU32(0xff000000)));
+                        assign(t7, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t0),
+                                              mkU32(0x0000ff00)),
+                                        mkU32(0x00000000)));
+                        assign(t8, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t0),
+                                              mkU32(0x0000ff00)),
+                                        mkU32(0x000ff00)));
+                        /* Shift bits 15..8 and 31..24. */
+                        assign(t3, binop(Iop_Shl32,
+                                         binop(Iop_Shr32,
+                                               binop(Iop_And32,
+                                                     getIReg(rt),
+                                                     mkU32(0xff00ff00)),
+                                               mkU8(8)),
+                                         mkU8(rs)));
+                        assign(t4, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t3),
+                                              mkU32(0xff000000)),
+                                        mkU32(0x00000000)));
+                        assign(t5, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t3),
+                                              mkU32(0xff000000)),
+                                        mkU32(0xff000000)));
+                        assign(t9, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t3),
+                                              mkU32(0x0000ff00)),
+                                        mkU32(0x00000000)));
+                        assign(t10, binop(Iop_CmpNE32,
+                                        binop(Iop_And32,
+                                              mkexpr(t3),
+                                              mkU32(0x0000ff00)),
+                                        mkU32(0x0000ff00)));
+
+                        assign(t6, binop(Iop_Or32,
+                                         binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t1)),
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t2))),
+                                               binop(Iop_And32,
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t7)),
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t8)))),
+                                         binop(Iop_Or32,
+                                               binop(Iop_And32,
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t4)),
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t5))),
+                                               binop(Iop_And32,
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t9)),
+                                                     unop(Iop_1Uto32,
+                                                          mkexpr(t10))))));
+
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                       mkexpr(t6),
+                                                       mkU32(0x0)),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000)),
+                                                 getDSPControl()));
+                        putIReg(rd, binop(Iop_Or32,
+                                          binop(Iop_Shl32,
+                                                binop(Iop_And32,
+                                                      mkexpr(t3),
+                                                      mkU32(0x00ff00ff)),
+                                                mkU8(8)),
+                                          binop(Iop_And32,
+                                                mkexpr(t0),
+                                                mkU32(0x00ff00ff))));
+                     }
+                     break;
+                  }
+                  case 0x3: {  /* SHRL.QB */
+                     DIP("shrl.qb r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I8);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I8);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I8);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I8);
+                     t9 = newTemp(Ity_I32);
+
+                     assign(t9, binop(Iop_And32, getIReg(rs), mkU32(0x7)));
+                     assign(t0, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t1, unop(Iop_32to8,
+                                     binop(Iop_Shr32,
+                                           mkexpr(t0),
+                                           unop(Iop_32to8, mkexpr(t9)))));
+
+                     assign(t2, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t3, unop(Iop_32to8,
+                                     binop(Iop_Shr32,
+                                           mkexpr(t2),
+                                           unop(Iop_32to8, mkexpr(t9)))));
+
+                     assign(t4, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t5, unop(Iop_32to8,
+                                     binop(Iop_Shr32,
+                                           mkexpr(t4),
+                                           unop(Iop_32to8, mkexpr(t9)))));
+
+                     assign(t6, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t7, unop(Iop_32to8,
+                                     binop(Iop_Shr32,
+                                           mkexpr(t6),
+                                           unop(Iop_32to8, mkexpr(t9)))));
+                     putIReg(rd, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                  mkexpr(t9),
+                                                  mkU32(0x0)),
+                                            getIReg(rt),
+                                            binop(Iop_16HLto32,
+                                                  binop(Iop_8HLto16,
+                                                        mkexpr(t7),
+                                                        mkexpr(t5)),
+                                                  binop(Iop_8HLto16,
+                                                        mkexpr(t3),
+                                                        mkexpr(t1)))));
+                     break;
+                  }
+                  case 0x2: {  /* SHLLV.QB */
+                     DIP("shllv.qb r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I1);
+                     t9 = newTemp(Ity_I1);
+                     t10 = newTemp(Ity_I1);
+                     t11 = newTemp(Ity_I8);
+
+                     assign(t11, unop(Iop_32to8,
+                                      binop(Iop_And32,
+                                            getIReg(rs),
+                                            mkU32(0x7))));
+                     /* Shift bits 7..0 and 23..16. */
+                     assign(t0, binop(Iop_Shl32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0x00ff00ff)),
+                                      mkexpr(t11)));
+                     assign(t1, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t0),
+                                           mkU32(0xff000000)),
+                                     mkU32(0x00000000)));
+                     assign(t2, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t0),
+                                           mkU32(0xff000000)),
+                                     mkU32(0xff000000)));
+                     assign(t7, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t0),
+                                           mkU32(0x0000ff00)),
+                                     mkU32(0x00000000)));
+                     assign(t8, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t0),
+                                           mkU32(0x0000ff00)),
+                                     mkU32(0x000ff00)));
+                     /* Shift bits 15..8 and 31..24. */
+                     assign(t3, binop(Iop_Shl32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  getIReg(rt),
+                                                  mkU32(0xff00ff00)),
+                                            mkU8(8)),
+                                      mkexpr(t11)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t3),
+                                           mkU32(0xff000000)),
+                                     mkU32(0x00000000)));
+                     assign(t5, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t3),
+                                           mkU32(0xff000000)),
+                                     mkU32(0xff000000)));
+                     assign(t9, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t3),
+                                           mkU32(0x0000ff00)),
+                                     mkU32(0x00000000)));
+                     assign(t10, binop(Iop_CmpNE32,
+                                     binop(Iop_And32,
+                                           mkexpr(t3),
+                                           mkU32(0x0000ff00)),
+                                     mkU32(0x0000ff00)));
+
+                     assign(t6, binop(Iop_Or32,
+                                      binop(Iop_Or32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t1)),
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t2))),
+                                            binop(Iop_And32,
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t7)),
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t8)))),
+                                      binop(Iop_Or32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t4)),
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t5))),
+                                            binop(Iop_And32,
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t9)),
+                                                  unop(Iop_1Uto32,
+                                                       mkexpr(t10))))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t6),
+                                                    mkU32(0x0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              getDSPControl()));
+                     putIReg(rd, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                  unop(Iop_8Uto32, mkexpr(t11)),
+                                                  mkU32(0)),
+                                            getIReg(rt),
+                                            binop(Iop_Or32,
+                                                  binop(Iop_Shl32,
+                                                        binop(Iop_And32,
+                                                              mkexpr(t3),
+                                                              mkU32(0xff00ff)),
+                                                        mkU8(8)),
+                                                  binop(Iop_And32,
+                                                        mkexpr(t0),
+                                                        mkU32(0x00ff00ff)))));
+                     break;
+                  }
+                  case 0x1: {  /* SHRLV.QB */
+                     DIP("shrlv.qb r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I8);
+                     t1 = newTemp(Ity_I8);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I8);
+
+                     assign(t0, unop(Iop_32to8,
+                                     binop(Iop_Shr32,
+                                           unop(Iop_8Uto32,
+                                                unop(Iop_32to8, getIReg(rt))),
+                                           mkU8(rs))));
+                     assign(t1, unop(Iop_32to8,
+                                     binop(Iop_Shr32,
+                                           unop(Iop_8Uto32,
+                                                unop(Iop_16HIto8,
+                                                     unop(Iop_32to16,
+                                                          getIReg(rt)))),
+                                           mkU8(rs))));
+                     assign(t2, unop(Iop_32to8,
+                                      binop(Iop_Shr32,
+                                            unop(Iop_8Uto32,
+                                                 unop(Iop_16to8,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rt)))),
+                                            mkU8(rs))));
+                     assign(t3, unop(Iop_32to8,
+                                     binop(Iop_Shr32,
+                                           unop(Iop_8Uto32,
+                                                unop(Iop_16HIto8,
+                                                     unop(Iop_32HIto16,
+                                                          getIReg(rt)))),
+                                           mkU8(rs))));
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16, mkexpr(t3), mkexpr(t2)),
+                                   binop(Iop_8HLto16, mkexpr(t1), mkexpr(t0))));
+                     break;
+                  }
+                  case 0x4: {  /* SHRA.QB */
+                     DIP("shra.qb r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I32);
+                     t11 = newTemp(Ity_I32);
+
+                     /* ========== GPR[rt]_31..24 ========== */
+                     assign(t1,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16HIto8,
+                                      unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t2,
+                            binop(Iop_Shr32, mkexpr(t1), mkU8(rs)));
+                     /* tempD_7..0 */
+                     assign(t0,
+                            binop(Iop_Or32,
+                                  mkexpr(t2),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t1),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8, mkU8(0x8), mkU8(rs)))));
+
+                     /* ========== GPR[rt]_23..16 ========== */
+                     assign(t4,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16to8,
+                                      unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t5, binop(Iop_Shr32, mkexpr(t4), mkU8(rs)));
+                     /* tempC_7..0 */
+                     assign(t3,
+                            binop(Iop_Or32,
+                                  mkexpr(t5),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t4),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8, mkU8(0x8), mkU8(rs)))));
+
+                     /* ========== GPR[rt]_15..8 ========== */
+                     assign(t7,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16HIto8,
+                                      unop(Iop_32to16, getIReg(rt)))));
+                     assign(t8, binop(Iop_Shr32, mkexpr(t7), mkU8(rs)));
+                     /* tempB_7..0 */
+                     assign(t6,
+                            binop(Iop_Or32,
+                                  mkexpr(t8),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t7),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8, mkU8(0x8), mkU8(rs)))));
+
+                     /* ========== GPR[rt]_7..0 ========== */
+                     assign(t10,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16to8,
+                                      unop(Iop_32to16, getIReg(rt)))));
+                     assign(t11, binop(Iop_Shr32, mkexpr(t10), mkU8(rs)));
+                     /* tempB_7..0 */
+                     assign(t9,
+                            binop(Iop_Or32,
+                                  mkexpr(t11),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t10),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8, mkU8(0x8), mkU8(rs)))));
+
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_32to8, mkexpr(t0)),
+                                         unop(Iop_32to8, mkexpr(t3))),
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_32to8, mkexpr(t6)),
+                                         unop(Iop_32to8, mkexpr(t9)))));
+                     break;
+                  }
+                  case 0x5: {  /* SHRA_R.QB */
+                     DIP("shra_r.qb r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I8);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I8);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I8);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I8);
+
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        assign(t0, unop(Iop_8Sto32,
+                                        unop(Iop_16to8,
+                                             unop(Iop_32to16, getIReg(rt)))));
+                        assign(t1, unop(Iop_32to8,
+                                        binop(Iop_Sar32,
+                                              binop(Iop_Add32,
+                                                    mkexpr(t0),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(rs-1))),
+                                              mkU8(rs))));
+
+                        assign(t2, unop(Iop_8Sto32,
+                                        unop(Iop_16HIto8,
+                                             unop(Iop_32to16, getIReg(rt)))));
+                        assign(t3, unop(Iop_32to8,
+                                        binop(Iop_Sar32,
+                                              binop(Iop_Add32,
+                                                    mkexpr(t2),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(rs-1))),
+                                              mkU8(rs))));
+
+                        assign(t4, unop(Iop_8Sto32,
+                                        unop(Iop_16to8,
+                                             unop(Iop_32HIto16, getIReg(rt)))));
+                        assign(t5, unop(Iop_32to8,
+                                        binop(Iop_Sar32,
+                                              binop(Iop_Add32,
+                                                    mkexpr(t4),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(rs-1))),
+                                              mkU8(rs))));
+
+                        assign(t6, unop(Iop_8Sto32,
+                                        unop(Iop_16HIto8,
+                                             unop(Iop_32HIto16, getIReg(rt)))));
+                        assign(t7, unop(Iop_32to8,
+                                        binop(Iop_Sar32,
+                                              binop(Iop_Add32,
+                                                    mkexpr(t6),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(rs-1))),
+                                              mkU8(rs))));
+                        putIReg(rd, binop(Iop_16HLto32,
+                                         binop(Iop_8HLto16,
+                                               mkexpr(t7), mkexpr(t5)),
+                                         binop(Iop_8HLto16,
+                                               mkexpr(t3), mkexpr(t1))));
+                     }
+                     break;
+                  }
+                  case 0x6: {  /* SHRAV.QB */
+                     DIP("shrav.qb r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I32);
+                     t11 = newTemp(Ity_I32);
+
+                     /* ========== GPR[rt]_31..24 ========== */
+                     assign(t1,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16HIto8,
+                                      unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t2,
+                            binop(Iop_Shr32,
+                                  mkexpr(t1),
+                                  unop(Iop_32to8, binop(Iop_And32,
+                                                        getIReg(rs),
+                                                        mkU32(0x7)))));
+                     /* tempD_7..0 */
+                     assign(t0,
+                            binop(Iop_Or32,
+                                  mkexpr(t2),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t1),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8,
+                                              mkU8(0x8),
+                                              unop(Iop_32to8, binop(Iop_And32,
+                                                                    getIReg(rs),
+                                                                    mkU32(0x7)))
+                                              ))));
+
+                     /* ========== GPR[rt]_23..16 ========== */
+                     assign(t4,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16to8,
+                                      unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t5,
+                            binop(Iop_Shr32,
+                                  mkexpr(t4),
+                                  unop(Iop_32to8, binop(Iop_And32,
+                                                        getIReg(rs),
+                                                        mkU32(0x7)))));
+                     /* tempC_7..0 */
+                     assign(t3,
+                            binop(Iop_Or32,
+                                  mkexpr(t5),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t4),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8,
+                                              mkU8(0x8),
+                                              unop(Iop_32to8, binop(Iop_And32,
+                                                                    getIReg(rs),
+                                                                    mkU32(0x7)))
+                                              ))));
+
+                     /* ========== GPR[rt]_15..8 ========== */
+                     assign(t7,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16HIto8,
+                                      unop(Iop_32to16, getIReg(rt)))));
+                     assign(t8,
+                            binop(Iop_Shr32,
+                                  mkexpr(t7),
+                                  unop(Iop_32to8, binop(Iop_And32,
+                                                        getIReg(rs),
+                                                        mkU32(0x7)))));
+                     /* tempB_7..0 */
+                     assign(t6,
+                            binop(Iop_Or32,
+                                  mkexpr(t8),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t7),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8,
+                                              mkU8(0x8),
+                                              unop(Iop_32to8, binop(Iop_And32,
+                                                                    getIReg(rs),
+                                                                    mkU32(0x7)))
+                                              ))));
+
+                     /* ========== GPR[rt]_7..0 ========== */
+                     assign(t10,
+                            unop(Iop_8Uto32,
+                                 unop(Iop_16to8,
+                                      unop(Iop_32to16, getIReg(rt)))));
+                     assign(t11,
+                            binop(Iop_Shr32,
+                                  mkexpr(t10),
+                                  unop(Iop_32to8, binop(Iop_And32,
+                                                        getIReg(rs),
+                                                        mkU32(0x7)))));
+                     /* tempB_7..0 */
+                     assign(t9,
+                            binop(Iop_Or32,
+                                  mkexpr(t11),
+                                  binop(Iop_Shl32,
+                                        IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                         binop(Iop_And32,
+                                                               mkexpr(t10),
+                                                               mkU32(0x00000080)
+                                                              ),
+                                                         mkU32(0x00000080)),
+                                                   mkU32(0xFFFFFFFF),
+                                                   mkU32(0x00000000)),
+                                        binop(Iop_Sub8,
+                                              mkU8(0x8),
+                                              unop(Iop_32to8, binop(Iop_And32,
+                                                                    getIReg(rs),
+                                                                    mkU32(0x7)))
+                                              ))));
+
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_32to8,
+                                              IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                               binop(Iop_And32,
+                                                                     mkU32(rs),
+                                                                     mkU32(0x7)
+                                                                    ),
+                                                               mkU32(0x0)),
+                                                         mkexpr(t1),
+                                                         mkexpr(t0))),
+                                         unop(Iop_32to8,
+                                              IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                               binop(Iop_And32,
+                                                                     mkU32(rs),
+                                                                     mkU32(0x7)
+                                                                    ),
+                                                               mkU32(0x0)),
+                                                         mkexpr(t2),
+                                                         mkexpr(t3)))),
+                                   binop(Iop_8HLto16,
+                                         unop(Iop_32to8,
+                                              IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                               binop(Iop_And32,
+                                                                     mkU32(rs),
+                                                                     mkU32(0x7)
+                                                                    ),
+                                                               mkU32(0x0)),
+                                                         mkexpr(t5),
+                                                         mkexpr(t6))),
+                                         unop(Iop_32to8,
+                                              IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                               binop(Iop_And32,
+                                                                     mkU32(rs),
+                                                                     mkU32(0x7)
+                                                                    ),
+                                                               mkU32(0x0)),
+                                                         mkexpr(t8),
+                                                         mkexpr(t9))))));
+                     break;
+                  }
+                  case 0x7: {  /* SHRAV_R.QB */
+                     DIP("shrav_r.qb r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I8);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I8);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I8);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I8);
+                     t8 = newTemp(Ity_I8);
+                     t9 = newTemp(Ity_I32);
+
+                     assign(t9, binop(Iop_And32, getIReg(rs), mkU32(0x7)));
+                     assign(t8, unop(Iop_32to8,
+                                     binop(Iop_Sub32, mkexpr(t9), mkU32(0x1))));
+                     assign(t0, unop(Iop_8Sto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t1, unop(Iop_32to8,
+                                     binop(Iop_Sar32,
+                                           binop(Iop_Add32,
+                                                 mkexpr(t0),
+                                                 binop(Iop_Shl32,
+                                                       mkU32(0x1),
+                                                       mkexpr(t8))),
+                                           unop(Iop_32to8,
+                                                mkexpr(t9)))));
+
+                     assign(t2, unop(Iop_8Sto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t3, unop(Iop_32to8,
+                                     binop(Iop_Sar32,
+                                           binop(Iop_Add32,
+                                                 mkexpr(t2),
+                                                 binop(Iop_Shl32,
+                                                       mkU32(0x1),
+                                                       mkexpr(t8))),
+                                           unop(Iop_32to8, mkexpr(t9)))));
+
+                     assign(t4, unop(Iop_8Sto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t5, unop(Iop_32to8,
+                                     binop(Iop_Sar32,
+                                           binop(Iop_Add32,
+                                                 mkexpr(t4),
+                                                 binop(Iop_Shl32,
+                                                       mkU32(0x1),
+                                                       mkexpr(t8))),
+                                           unop(Iop_32to8, mkexpr(t9)))));
+
+                     assign(t6, unop(Iop_8Sto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t7, unop(Iop_32to8,
+                                     binop(Iop_Sar32,
+                                           binop(Iop_Add32,
+                                                 mkexpr(t6),
+                                                 binop(Iop_Shl32,
+                                                       mkU32(0x1),
+                                                       mkexpr(t8))),
+                                           unop(Iop_32to8, mkexpr(t9)))));
+                     putIReg(rd, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                  mkexpr(t9),
+                                                  mkU32(0x0)),
+                                            getIReg(rt),
+                                            binop(Iop_16HLto32,
+                                                  binop(Iop_8HLto16,
+                                                        mkexpr(t7),
+                                                        mkexpr(t5)),
+                                                  binop(Iop_8HLto16,
+                                                        mkexpr(t3),
+                                                        mkexpr(t1)))));
+                     break;
+                  }
+                  case 0x8: {  /* SHLL.PH */
+                     DIP("shll.ph r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        /* Shift lower 16 bits. */
+                        assign(t0, binop(Iop_Shl32,
+                                         unop(Iop_16Sto32,
+                                              unop(Iop_32to16, getIReg(rt))),
+                                         mkU8(rs)));
+
+                        assign(t1, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                               binop(Iop_Sar32,
+                                                     mkexpr(t0),
+                                                     mkU8(16)),
+                                               mkU32(0))));
+                        assign(t2, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                              binop(Iop_Sar32,
+                                                    mkexpr(t0),
+                                                    mkU8(16)),
+                                              mkU32(0xffffffff))));
+                        assign(t3, binop(Iop_And32,
+                                         mkexpr(t1),
+                                         mkexpr(t2)));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       mkexpr(t3),
+                                                       mkU32(0x1)),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000)),
+                                                 getDSPControl()));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       binop(Iop_And32,
+                                                             getIReg(rt),
+                                                             mkU32(0x00008000)),
+                                                       binop(Iop_And32,
+                                                             mkexpr(t0),
+                                                             mkU32(0x00008000))
+                                                      ),
+                                                 getDSPControl(),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000))));
+                        /* Shift higher 16 bits. */
+                        assign(t4, binop(Iop_Shl32,
+                                         unop(Iop_16Sto32,
+                                              unop(Iop_32HIto16, getIReg(rt))),
+                                         mkU8(rs)));
+
+                        assign(t5, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                               binop(Iop_Sar32,
+                                                     mkexpr(t4),
+                                                     mkU8(16)),
+                                               mkU32(0))));
+                        assign(t6, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                              binop(Iop_Sar32,
+                                                    mkexpr(t4),
+                                                    mkU8(16)),
+                                              mkU32(0xffffffff))));
+                        assign(t7, binop(Iop_And32,
+                                         mkexpr(t5),
+                                         mkexpr(t6)));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       mkexpr(t7),
+                                                       mkU32(0x1)),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000)),
+                                                 getDSPControl()));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       mkexpr(t7),
+                                                       mkU32(0x1)),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000)),
+                                                 getDSPControl()));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       binop(Iop_And32,
+                                                             getIReg(rt),
+                                                             mkU32(0x80000000)),
+                                                       binop(Iop_Shl32,
+                                                             binop(Iop_And32,
+                                                                   mkexpr(t4),
+                                                                   mkU32(0x00008000)),
+                                                             mkU8(16))
+                                                      ),
+                                                 getDSPControl(),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000))));
+                        putIReg(rd, binop(Iop_16HLto32,
+                                          unop(Iop_32to16, mkexpr(t4)),
+                                          unop(Iop_32to16, mkexpr(t0))));
+                     }
+                     break;
+                  }
+                  case 0x9: {  /* SHRA.PH */
+                     DIP("shra.ph r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        assign(t0, binop(Iop_Sar32,
+                                         unop(Iop_16Sto32,
+                                              unop(Iop_32to16, getIReg(rt))),
+                                         mkU8(rs)));
+                        assign(t1, binop(Iop_Sar32,
+                                         unop(Iop_16Sto32,
+                                              unop(Iop_32HIto16, getIReg(rt))),
+                                         mkU8(rs)));
+                        putIReg(rd, binop(Iop_16HLto32,
+                                          unop(Iop_32to16, mkexpr(t1)),
+                                          unop(Iop_32to16, mkexpr(t0))));
+                     }
+                     break;
+                  }
+                  case 0xA: {  /* SHLLV.PH */
+                     DIP("shllv.ph r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I1);
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I32);
+                     t11 = newTemp(Ity_I32);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+
+                     assign(t0, binop(Iop_And32, getIReg(rs), mkU32(0x0f)));
+
+                     /* Shift lower 16 bits. */
+                     assign(t2, binop(Iop_Shl32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0x00000000)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0xffffffff)));
+                     assign(t10, binop(Iop_And32,
+                                       unop(Iop_1Sto32, mkexpr(t3)),
+                                       unop(Iop_1Sto32, mkexpr(t4))));
+                     assign(t5, binop(Iop_Shr32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0x00008000)),
+                                      mkU8(15)));
+                     assign(t12, binop(Iop_CmpEQ32,
+                                       mkexpr(t5),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   mkexpr(t2),
+                                                   mkU32(0x00008000)),
+                                             mkU8(15))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t10),
+                                                    mkU32(0x0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              IRExpr_ITE(mkexpr(t12),
+                                                         getDSPControl(),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x400000)))
+                                             ));
+                     /* Shift higher 16 bits. */
+                     assign(t6, binop(Iop_Shl32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+
+                     assign(t7, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t6))),
+                                      mkU32(0x00000000)));
+                     assign(t8, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t6))),
+                                      mkU32(0xffffffff)));
+                     assign(t11, binop(Iop_And32,
+                                       unop(Iop_1Sto32, mkexpr(t7)),
+                                       unop(Iop_1Sto32, mkexpr(t8))));
+
+                     assign(t9, binop(Iop_Shr32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0x80000000)),
+                                      mkU8(31)));
+                     assign(t13, binop(Iop_CmpEQ32,
+                                       mkexpr(t9),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   mkexpr(t6),
+                                                   mkU32(0x00008000)),
+                                             mkU8(15))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t11),
+                                                    mkU32(0x0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              IRExpr_ITE(mkexpr(t13),
+                                                         getDSPControl(),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x400000)))
+                                             ));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, mkexpr(t6)),
+                                       unop(Iop_32to16, mkexpr(t2))));
+                     break;
+                  }
+                  case 0xB: {  /* SHRAV.PH */
+                     DIP("shrav.ph r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_And32, getIReg(rs), mkU32(0x0f)));
+                     assign(t1, binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x0)));
+                     assign(t2, binop(Iop_Sar32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+                     assign(t3, binop(Iop_Sar32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+                     putIReg(rd,
+                             binop(Iop_16HLto32,
+                                   IRExpr_ITE(mkexpr(t1),
+                                              unop(Iop_32HIto16, getIReg(rt)),
+                                              unop(Iop_32to16, mkexpr(t3))),
+                                   IRExpr_ITE(mkexpr(t1),
+                                              unop(Iop_32to16, getIReg(rt)),
+                                              unop(Iop_32to16, mkexpr(t2)))));
+                     break;
+                  }
+                  case 0xC: {  /* SHLL_S.PH */
+                     DIP("shll_s.ph r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I32);
+                     t11 = newTemp(Ity_I32);
+                     t12 = newTemp(Ity_I32);
+                     t13 = newTemp(Ity_I32);
+                     t14 = newTemp(Ity_I32);
+
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        /* Shift lower 16 bits. */
+                        assign(t0, binop(Iop_Shl32,
+                                         unop(Iop_16Sto32,
+                                              unop(Iop_32to16, getIReg(rt))),
+                                         mkU8(rs)));
+
+                        assign(t1, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                               binop(Iop_Sar32,
+                                                     mkexpr(t0),
+                                                     mkU8(16)),
+                                               mkU32(0))));
+                        assign(t2, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                              binop(Iop_Sar32,
+                                                    mkexpr(t0),
+                                                    mkU8(16)),
+                                              mkU32(0xffffffff))));
+                        assign(t3, binop(Iop_And32,
+                                         mkexpr(t1),
+                                         mkexpr(t2)));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       mkexpr(t3),
+                                                       mkU32(0x1)),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000)),
+                                                 getDSPControl()));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       binop(Iop_And32,
+                                                             getIReg(rt),
+                                                             mkU32(0x00008000)),
+                                                       binop(Iop_And32,
+                                                             mkexpr(t0),
+                                                             mkU32(0x00008000))
+                                                      ),
+                                                 getDSPControl(),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000))));
+                        assign(t8,
+                               IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                mkexpr(t3),
+                                                mkU32(0x1)),
+                                          IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                           binop(Iop_And32,
+                                                                 getIReg(rt),
+                                                                 mkU32(0x8000)),
+                                                           mkU32(0)),
+                                                     mkU32(0x00007fff),
+                                                     mkU32(0x00008000)),
+                                          binop(Iop_And32,
+                                                mkexpr(t0),
+                                                mkU32(0x0000ffff))));
+                        assign(t10,
+                               IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                binop(Iop_And32,
+                                                      getIReg(rt),
+                                                      mkU32(0x00008000)),
+                                                binop(Iop_And32,
+                                                      mkexpr(t0),
+                                                      mkU32(0x00008000))),
+                                          mkexpr(t8),
+                                          IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                           binop(Iop_And32,
+                                                                 getIReg(rt),
+                                                                 mkU32(0x8000)),
+                                                           mkU32(0)),
+                                                     mkU32(0x00007fff),
+                                                     mkU32(0x00008000))));
+                        /* Shift higher 16 bits. */
+                        assign(t4, binop(Iop_Shl32,
+                                         unop(Iop_16Sto32,
+                                              unop(Iop_32HIto16, getIReg(rt))),
+                                         mkU8(rs)));
+
+                        assign(t5, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                               binop(Iop_Sar32,
+                                                     mkexpr(t4),
+                                                     mkU8(16)),
+                                               mkU32(0))));
+                        assign(t6, unop(Iop_1Uto32,
+                                        binop(Iop_CmpNE32,
+                                              binop(Iop_Sar32,
+                                                    mkexpr(t4),
+                                                    mkU8(16)),
+                                              mkU32(0xffffffff))));
+                        assign(t7, binop(Iop_And32,
+                                         mkexpr(t5),
+                                         mkexpr(t6)));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       mkexpr(t7),
+                                                       mkU32(0x1)),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000)),
+                                                 getDSPControl()));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       mkexpr(t7),
+                                                       mkU32(0x1)),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000)),
+                                                 getDSPControl()));
+                        assign(t12, binop(Iop_Shl32,
+                                          binop(Iop_And32,
+                                                mkexpr(t4),
+                                                mkU32(0x8000)),
+                                          mkU8(16)));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       binop(Iop_And32,
+                                                             getIReg(rt),
+                                                             mkU32(0x80000000)),
+                                                       mkexpr(t12)),
+                                                 getDSPControl(),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000))));
+                        assign(t13, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                     binop(Iop_And32,
+                                                           getIReg(rt),
+                                                           mkU32(0x80000000)),
+                                                     mkU32(0)),
+                                               mkU32(0x7fff0000),
+                                               mkU32(0x80000000)));
+                        assign(t9,
+                               IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                mkexpr(t7),
+                                                mkU32(0x1)),
+                                          mkexpr(t13),
+                                          binop(Iop_Shl32,
+                                                binop(Iop_And32,
+                                                      mkexpr(t4),
+                                                      mkU32(0x0000ffff)),
+                                                mkU8(16))));
+                        assign(t14, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                     binop(Iop_And32,
+                                                           getIReg(rt),
+                                                           mkU32(0x80000000)),
+                                                     mkU32(0)),
+                                               mkU32(0x7fff0000),
+                                               mkU32(0x80000000)));
+                        assign(t11,
+                               IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                binop(Iop_And32,
+                                                      getIReg(rt),
+                                                      mkU32(0x80000000)),
+                                                binop(Iop_Shl32,
+                                                      binop(Iop_And32,
+                                                            mkexpr(t4),
+                                                            mkU32(0x00008000)),
+                                                      mkU8(16))),
+                                          mkexpr(t9),
+                                          mkexpr(t14)));
+                        putIReg(rd, binop(Iop_Or32,
+                                          mkexpr(t10),
+                                          mkexpr(t11)));
+                     }
+                     break;
+                  }
+                  case 0xD: {  /* SHRA_R.PH */
+                     DIP("shra.ph r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        assign(t0, binop(Iop_Sar32,
+                                         binop(Iop_Add32,
+                                               unop(Iop_16Sto32,
+                                                    unop(Iop_32to16,
+                                                         getIReg(rt))),
+                                               binop(Iop_Shl32,
+                                                     mkU32(0x1),
+                                                     mkU8(rs-1))),
+                                         mkU8(rs)));
+                        assign(t1, binop(Iop_Sar32,
+                                         binop(Iop_Add32,
+                                               unop(Iop_16Sto32,
+                                                    unop(Iop_32HIto16,
+                                                         getIReg(rt))),
+                                               binop(Iop_Shl32,
+                                                     mkU32(0x1),
+                                                     mkU8(rs-1))),
+                                         mkU8(rs)));
+                        putIReg(rd, binop(Iop_16HLto32,
+                                          unop(Iop_32to16, mkexpr(t1)),
+                                          unop(Iop_32to16, mkexpr(t0))));
+                     }
+                     break;
+                  }
+                  case 0xE: {  /* SHLLV_S.PH */
+                     DIP("shllv_s.ph r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I1);
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I32);
+                     t11 = newTemp(Ity_I32);
+                     t12 = newTemp(Ity_I1);
+                     t13 = newTemp(Ity_I1);
+                     t14 = newTemp(Ity_I16);
+                     t15 = newTemp(Ity_I16);
+                     t16 = newTemp(Ity_I16);
+                     t17 = newTemp(Ity_I16);
+
+                     assign(t0, binop(Iop_And32, getIReg(rs), mkU32(0x0f)));
+
+                     /* Shift lower 16 bits. */
+                     assign(t2, binop(Iop_Shl32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+
+                     assign(t3, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0x00000000)));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t2))),
+                                      mkU32(0xffffffff)));
+                     assign(t10, binop(Iop_And32,
+                                       unop(Iop_1Sto32, mkexpr(t3)),
+                                       unop(Iop_1Sto32, mkexpr(t4))));
+                     assign(t5, binop(Iop_Shr32,
+                                       binop(Iop_And32,
+                                             getIReg(rt),
+                                             mkU32(0x00008000)),
+                                       mkU8(15)));
+                     assign(t12, binop(Iop_CmpEQ32,
+                                       mkexpr(t5),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   mkexpr(t2),
+                                                   mkU32(0x00008000)),
+                                             mkU8(15))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t10),
+                                                    mkU32(0x0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              IRExpr_ITE(mkexpr(t12),
+                                                         getDSPControl(),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x400000)))
+                                             ));
+                     assign(t14, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                  mkexpr(t5),
+                                                  mkU32(0x0)),
+                                            mkU16(0x8000),
+                                            mkU16(0x7fff)));
+                     assign(t15, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                  mkexpr(t10),
+                                                  mkU32(0x0)),
+                                            mkexpr(t14),
+                                            IRExpr_ITE(mkexpr(t12),
+                                                       unop(Iop_32to16,
+                                                            mkexpr(t2)),
+                                                       mkexpr(t14))));
+                     /* Shift higher 16 bits. */
+                     assign(t6, binop(Iop_Shl32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+
+                     assign(t7, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t6))),
+                                      mkU32(0x00000000)));
+                     assign(t8, binop(Iop_CmpNE32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, mkexpr(t6))),
+                                      mkU32(0xffffffff)));
+                     assign(t11, binop(Iop_And32,
+                                       unop(Iop_1Sto32, mkexpr(t7)),
+                                       unop(Iop_1Sto32, mkexpr(t8))));
+
+                     assign(t9, binop(Iop_Shr32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0x80000000)),
+                                      mkU8(31)));
+                     assign(t13, binop(Iop_CmpEQ32,
+                                       mkexpr(t9),
+                                       binop(Iop_Shr32,
+                                             binop(Iop_And32,
+                                                   mkexpr(t6),
+                                                   mkU32(0x00008000)),
+                                             mkU8(15))));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t11),
+                                                    mkU32(0x0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              IRExpr_ITE(mkexpr(t13),
+                                                         getDSPControl(),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x400000)))
+                                             ));
+
+                     assign(t16, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                  mkexpr(t9),
+                                                  mkU32(0x0)),
+                                            mkU16(0x8000),
+                                            mkU16(0x7fff)));
+                     assign(t17, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                  mkexpr(t11),
+                                                  mkU32(0x0)),
+                                            mkexpr(t16),
+                                            IRExpr_ITE(mkexpr(t13),
+                                                       unop(Iop_32to16,
+                                                            mkexpr(t6)),
+                                                       mkexpr(t16))));
+
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t17), mkexpr(t15)));
+                     break;
+                  }
+                  case 0xF: {  /* SHRAV_R.PH */
+                     DIP("shrav_r.ph r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_And32, getIReg(rs), mkU32(0x0f)));
+                     assign(t1, binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x0)));
+                     assign(t2, unop(Iop_32to8,
+                                     binop(Iop_Sub32, mkexpr(t0), mkU32(1))));
+
+                     assign(t3, binop(Iop_Sar32,
+                                      binop(Iop_Add32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32to16, getIReg(rt))),
+                                            binop(Iop_Shl32,
+                                                  mkU32(0x1),
+                                                  mkexpr(t2))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+                     assign(t4, binop(Iop_Sar32,
+                                      binop(Iop_Add32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rt))),
+                                            binop(Iop_Shl32,
+                                                  mkU32(0x1),
+                                                  mkexpr(t2))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       IRExpr_ITE(mkexpr(t1),
+                                                  unop(Iop_32HIto16,
+                                                       getIReg(rt)),
+                                                  unop(Iop_32to16,
+                                                       mkexpr(t4))),
+                                       IRExpr_ITE(mkexpr(t1),
+                                                  unop(Iop_32to16, getIReg(rt)),
+                                                  unop(Iop_32to16,
+                                                       mkexpr(t3)))));
+                     break;
+                  }
+                  case 0x14: {  /* SHLL_S.W */
+                     DIP("shll_s.w r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        /* t0-bits that will be discarded, sign extended to
+                           32bits. */
+                        assign(t0, binop(Iop_Sar32,
+                                         binop(Iop_And32,
+                                               getIReg(rt),
+                                               binop(Iop_Sar32,
+                                                     mkU32(0x80000000),
+                                                     mkU8(rs-1))),
+                                         mkU8(32-rs)));
+
+                        assign(t1, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    binop(Iop_And32,
+                                                          getIReg(rt),
+                                                          mkU32(0x80000000)),
+                                                    mkU32(0x0)),
+                                              mkU32(0x7fffffff),
+                                              mkU32(0x80000000)));
+
+                        assign(t2, binop(Iop_Shl32, getIReg(rt), mkU8(rs)));
+                        assign(t3, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    binop(Iop_And32,
+                                                          getIReg(rt),
+                                                          mkU32(0x80000000)),
+                                                    binop(Iop_And32,
+                                                          mkexpr(t2),
+                                                          mkU32(0x80000000))),
+                                              mkexpr(t2),
+                                              mkexpr(t1)));
+
+                        assign(t4, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t0),
+                                                    mkU32(0x0)),
+                                              IRExpr_ITE(binop(Iop_CmpNE32,
+                                                               mkexpr(t0),
+                                                               mkU32(0xffffffff)
+                                                              ),
+                                                         mkexpr(t1),
+                                                         mkexpr(t3)),
+                                              mkexpr(t3)));
+                        assign(t5, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t0),
+                                                    mkU32(0xffffffff)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x400000)),
+                                              getDSPControl()));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x0)),
+                                                 mkexpr(t5),
+                                                 getDSPControl()));
+                        putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                       binop(Iop_And32,
+                                                             getIReg(rt),
+                                                             mkU32(0x80000000)),
+                                                       binop(Iop_And32,
+                                                             mkexpr(t2),
+                                                             mkU32(0x80000000))
+                                                            ),
+                                                 getDSPControl(),
+                                                 binop(Iop_Or32,
+                                                       getDSPControl(),
+                                                       mkU32(0x400000))));
+                        putIReg(rd, mkexpr(t4));
+                     }
+                     break;
+                  }
+                  case 0x15: {  /* SHRA_R.W */
+                     DIP("shra_r.w r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     if (0 == rs) {
+                        putIReg(rd, getIReg(rt));
+                     } else {
+                        putIReg(rd, binop(Iop_Add32,
+                                          binop(Iop_Sar32,
+                                                getIReg(rt), mkU8(rs)),
+                                          binop(Iop_Shr32,
+                                                binop(Iop_And32,
+                                                      getIReg(rt),
+                                                      binop(Iop_Shl32,
+                                                            mkU32(0x1),
+                                                            mkU8(rs-1))),
+                                                mkU8(rs-1))));
+                     }
+                     break;
+                  }
+                  case 0x16: {  /* SHLLV_S.W */
+                     DIP("shllv_s.w r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I64);
+                     t4 = newTemp(Ity_I1);
+                     t5 = newTemp(Ity_I1);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I32);
+
+                     /* Check if shift amount is zero. */
+                     assign(t0, binop(Iop_And32, getIReg(rs), mkU32(0x1f)));
+                     assign(t1, binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x0)));
+
+                     /* t2 = sign of the input value. */
+                     assign(t2, binop(Iop_Shr32,
+                                      binop(Iop_And32,
+                                            getIReg(rt),
+                                            mkU32(0x80000000)),
+                                      mkU8(31)));
+                     /* Shift left input value and check for overflow. */
+                     assign(t3, binop(Iop_Shl64,
+                                      unop(Iop_32Sto64, getIReg(rt)),
+                                      unop(Iop_32to8, mkexpr(t0))));
+                     assign(t4, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32, mkexpr(t3)),
+                                      mkU32(0x00000000)));
+                     assign(t5, binop(Iop_CmpNE32,
+                                      unop(Iop_64HIto32, mkexpr(t3)),
+                                      mkU32(0xffffffff)));
+                     assign(t6, binop(Iop_And32,
+                                      unop(Iop_1Uto32, mkexpr(t4)),
+                                      unop(Iop_1Uto32, mkexpr(t5))));
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  getIReg(rt),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31)),
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_64to32, mkexpr(t3)),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31))));
+
+                     putDSPControl(IRExpr_ITE(unop(Iop_32to1, mkexpr(t6)),
+                                                   binop(Iop_Or32,
+                                                         getDSPControl(),
+                                                         mkU32(0x400000)),
+                                              IRExpr_ITE(mkexpr(t7),
+                                                         getDSPControl(),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x400000)))
+                                             ));
+
+                     assign(t8, IRExpr_ITE(unop(Iop_32to1,
+                                                mkexpr(t2)),
+                                           mkU32(0x80000000),
+                                           mkU32(0x7fffffff)));
+                     putIReg(rd, IRExpr_ITE(unop(Iop_32to1, mkexpr(t6)),
+                                            IRExpr_ITE(unop(Iop_32to1,
+                                                            mkexpr(t2)),
+                                                       mkU32(0x80000000),
+                                                       mkU32(0x7fffffff)),
+                                            IRExpr_ITE(mkexpr(t7),
+                                                       unop(Iop_64to32,
+                                                            mkexpr(t3)),
+                                                       mkexpr(t8))));
+                     break;
+                  }
+                  case 0x17: {  /* SHRAV_R.W */
+                     DIP("shrav_r.w r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_And32, getIReg(rs), mkU32(0x1f)));
+                     assign(t1, binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x0)));
+                     assign(t2, unop(Iop_32to8,
+                                     binop(Iop_Sub32, mkexpr(t0), mkU32(1))));
+
+                     putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                            getIReg(rt),
+                                            binop(Iop_Sar32,
+                                                  binop(Iop_Add32,
+                                                        binop(Iop_Sar32,
+                                                              getIReg(rt),
+                                                              mkexpr(t2)),
+                                                        mkU32(0x1)),
+                                                  mkU8(1))));
+                     break;
+                  }
+                  case 0x19: {  /* SHRL.PH */
+                     DIP("shrl.ph r%d, r%d, %d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     assign(t0, binop(Iop_Shr32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU8(rs)));
+                     assign(t1, binop(Iop_Shr32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU8(rs)));
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       unop(Iop_32to16, mkexpr(t1)),
+                                       unop(Iop_32to16, mkexpr(t0))));
+                     break;
+                  }
+                  case 0x1B: {  /* SHRLV.PH */
+                     DIP("shrlv.ph r%d, r%d, r%d", rd, rt, rs);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I16);
+                     t5 = newTemp(Ity_I16);
+
+                     /* Get shift amount from lower 5 bits of rs
+                        and check if it is zero. */
+                     assign(t0, binop(Iop_And32, getIReg(rs), mkU32(0x0f)));
+                     assign(t1, binop(Iop_CmpEQ32, mkexpr(t0), mkU32(0x0)));
+
+                     assign(t2, binop(Iop_Shr32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+                     assign(t3, binop(Iop_Shr32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      unop(Iop_32to8, mkexpr(t0))));
+
+                     assign(t4, IRExpr_ITE(mkexpr(t1),
+                                           unop(Iop_32HIto16, getIReg(rt)),
+                                           unop(Iop_32to16, mkexpr(t3))));
+                     assign(t5, IRExpr_ITE(mkexpr(t1),
+                                           unop(Iop_32to16, getIReg(rt)),
+                                           unop(Iop_32to16, mkexpr(t2))));
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t4), mkexpr(t5)));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of SHLL.QB */
+            }
+            case 0x18: {  /* ADDUH.QB/MUL.PH */
+               switch(sa) {
+                  case 0x00: {  /* ADDUH.QB */
+                     DIP("adduh.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_HAdd8Ux4, getIReg(rs), getIReg(rt)));
+
+                     putIReg(rd, mkexpr(t0));
+                     break;
+                  }
+                  case 0x1: {  /* SUBUH.QB */
+                     DIP("subuh.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+
+                     assign(t0, binop(Iop_HSub8Ux4, getIReg(rs), getIReg(rt)));
+
+                     putIReg(rd, mkexpr(t0));
+                     break;
+                  }
+                  case 0x02: {  /* ADDUH_R.QB */
+                     DIP("adduh_r.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I8);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I8);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I8);
+                     t9 = newTemp(Ity_I32);
+                     t10 = newTemp(Ity_I32);
+                     t11 = newTemp(Ity_I8);
+
+                     /* Extract input bytes, add values, add 1 and half the
+                        result. */
+                     assign(t0, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32to16, getIReg(rs)))));
+                     assign(t1, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t2, unop(Iop_16to8,
+                                     unop(Iop_32to16,
+                                          binop(Iop_Shr32,
+                                                binop(Iop_Add32,
+                                                      binop(Iop_Add32,
+                                                            mkexpr(t0),
+                                                            mkexpr(t1)),
+                                                      mkU32(0x00000001)),
+                                                mkU8(0x01)))));
+
+                     assign(t3, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32to16, getIReg(rs)))));
+                     assign(t4, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t5, unop(Iop_16to8,
+                                     unop(Iop_32to16,
+                                          binop(Iop_Shr32,
+                                                binop(Iop_Add32,
+                                                      binop(Iop_Add32,
+                                                            mkexpr(t3),
+                                                            mkexpr(t4)),
+                                                      mkU32(0x00000001)),
+                                                mkU8(0x01)))));
+
+                     assign(t6, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32HIto16, getIReg(rs)))));
+                     assign(t7, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t8, unop(Iop_16to8,
+                                     unop(Iop_32to16,
+                                          binop(Iop_Shr32,
+                                                binop(Iop_Add32,
+                                                      binop(Iop_Add32,
+                                                            mkexpr(t7),
+                                                            mkexpr(t6)),
+                                                      mkU32(0x00000001)),
+                                                mkU8(0x01)))));
+
+                     assign(t9, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32HIto16, getIReg(rs)))));
+                     assign(t10, unop(Iop_8Uto32,
+                                      unop(Iop_16HIto8,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t11, unop(Iop_16to8,
+                                      unop(Iop_32to16,
+                                           binop(Iop_Shr32,
+                                                 binop(Iop_Add32,
+                                                       binop(Iop_Add32,
+                                                             mkexpr(t9),
+                                                             mkexpr(t10)),
+                                                       mkU32(0x00000001)),
+                                                 mkU8(0x01)))));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       binop(Iop_8HLto16,
+                                             mkexpr(t11), mkexpr(t8)),
+                                       binop(Iop_8HLto16,
+                                             mkexpr(t5), mkexpr(t2))));
+                     break;
+                  }
+                  case 0x3: {  /* SUBUH_R.QB */
+                     DIP("subuh_r.qb r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I32);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I8);
+                     t10 = newTemp(Ity_I8);
+                     t11 = newTemp(Ity_I8);
+                     t12 = newTemp(Ity_I8);
+
+                     /* Extract each byte of rs and rt. */
+                     assign(t1, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32to16, getIReg(rs)))));
+                     assign(t2, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32to16, getIReg(rs)))));
+                     assign(t3, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32HIto16, getIReg(rs)))));
+                     assign(t4, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32HIto16, getIReg(rs)))));
+
+                     assign(t5, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t6, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32to16, getIReg(rt)))));
+                     assign(t7, unop(Iop_8Uto32,
+                                     unop(Iop_16to8,
+                                          unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t8, unop(Iop_8Uto32,
+                                     unop(Iop_16HIto8,
+                                          unop(Iop_32HIto16, getIReg(rt)))));
+
+                     /* Add 1 to each resulting byte and half the results. */
+                     assign(t9, unop(Iop_16to8,
+                                     unop(Iop_32to16,
+                                          binop(Iop_Shr32,
+                                                binop(Iop_Add32,
+                                                      binop(Iop_Sub32,
+                                                            mkexpr(t1),
+                                                            mkexpr(t5)),
+                                                      mkU32(0x00000001)),
+                                                mkU8(0x01)))));
+                     assign(t10, unop(Iop_16to8,
+                                      unop(Iop_32to16,
+                                           binop(Iop_Shr32,
+                                                 binop(Iop_Add32,
+                                                       binop(Iop_Sub32,
+                                                             mkexpr(t2),
+                                                             mkexpr(t6)),
+                                                       mkU32(0x00000001)),
+                                                 mkU8(0x01)))));
+                     assign(t11, unop(Iop_16to8,
+                                      unop(Iop_32to16,
+                                            binop(Iop_Shr32,
+                                                  binop(Iop_Add32,
+                                                        binop(Iop_Sub32,
+                                                              mkexpr(t3),
+                                                              mkexpr(t7)),
+                                                        mkU32(0x00000001)),
+                                                  mkU8(0x01)))));
+                     assign(t12, unop(Iop_16to8,
+                                      unop(Iop_32to16,
+                                           binop(Iop_Shr32,
+                                                 binop(Iop_Add32,
+                                                       binop(Iop_Sub32,
+                                                             mkexpr(t4),
+                                                             mkexpr(t8)),
+                                                       mkU32(0x00000001)),
+                                                 mkU8(0x01)))));
+
+                     putIReg(rd, binop(Iop_16HLto32,
+                                       binop(Iop_8HLto16,
+                                             mkexpr(t12), mkexpr(t11)),
+                                       binop(Iop_8HLto16,
+                                             mkexpr(t10), mkexpr(t9))));
+                     break;
+                  }
+                  case 0x8: {  /* ADDQH.PH */
+                     DIP("addqh.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I16);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I16);
+
+                     /* Add lower halfs of rs and rt
+                        and right shift the result by 1. */
+                     assign(t0, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+                     assign(t1, unop(Iop_32to16,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 mkexpr(t0),
+                                                 mkU32(0x0001fffe)),
+                                           mkU8(0x1))));
+                     /* Add higher halfs of rs and rt
+                        and right shift the result by 1. */
+                     assign(t2, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t3, unop(Iop_32to16,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 mkexpr(t2),
+                                                 mkU32(0x0001fffe)),
+                                           mkU8(0x1))));
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t3), mkexpr(t1)));
+                     break;
+                  }
+                  case 0x9: {  /* SUBQH.PH */
+                     DIP("subqh.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+
+                     putIReg(rd, binop(Iop_HSub16Sx2,
+                                       getIReg(rs), getIReg(rt)));
+                     break;
+                  }
+                  case 0xA: {/* ADDQH_R.PH */
+                     DIP("addqh_r.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I16);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I16);
+
+                     /* Add lower halfs of rs and rt, add 1
+                        and right shift the result by 1. */
+                     assign(t0, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+                     assign(t1, unop(Iop_32to16,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 binop(Iop_Add32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x1)),
+                                                 mkU32(0x0001fffe)),
+                                           mkU8(0x1))));
+                     /* Add higher halfs of rs and rt, add 1
+                        and right shift the result by 1. */
+                     assign(t2, binop(Iop_Add32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t3, unop(Iop_32to16,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 binop(Iop_Add32,
+                                                       mkexpr(t2),
+                                                       mkU32(0x1)),
+                                                 mkU32(0x0001fffe)),
+                                           mkU8(0x1))));
+
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t3), mkexpr(t1)));
+                     break;
+                  }
+                  case 0xB: {  /* SUBQH_R.PH */
+                     DIP("subqh_r.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I16);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I16);
+
+                     /* Sub lower halfs of rs and rt, add 1
+                        and right shift the result by 1. */
+                     assign(t0, binop(Iop_Sub32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+                     assign(t1, unop(Iop_32to16,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 binop(Iop_Add32,
+                                                       mkexpr(t0),
+                                                       mkU32(0x1)),
+                                                 mkU32(0x0001fffe)),
+                                           mkU8(0x1))));
+                     /* Sub higher halfs of rs and rt, add 1
+                        and right shift the result by 1. */
+                     assign(t2, binop(Iop_Sub32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt)))));
+                     assign(t3, unop(Iop_32to16,
+                                     binop(Iop_Shr32,
+                                           binop(Iop_And32,
+                                                 binop(Iop_Add32,
+                                                       mkexpr(t2),
+                                                       mkU32(0x1)),
+                                                 mkU32(0x0001fffe)),
+                                           mkU8(0x1))));
+
+                     putIReg(rd, binop(Iop_16HLto32, mkexpr(t3), mkexpr(t1)));
+                     break;
+                  }
+                  case 0xC: {  /* MUL.PH */
+                     DIP("mul.ph r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+
+                     assign(t0,
+                            binop(Iop_Mul32,
+                                  unop(Iop_16Sto32,
+                                       unop(Iop_32HIto16, getIReg(rs))),
+                                  unop(Iop_16Sto32,
+                                       unop(Iop_32HIto16, getIReg(rt)))));
+                     /* DSP Control flag. */
+                     putDSPControl(IRExpr_ITE(unop(Iop_Not1,
+                                                   binop(Iop_CmpLE32S,
+                                                         mkexpr(t0),
+                                                         mkU32(0x7FFF))),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00200000)),
+                                              IRExpr_ITE(binop(Iop_CmpLT32S,
+                                                               mkexpr(t0),
+                                                               mkU32(0xFFFF8000)
+                                                             ),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl())));
+
+                     assign(t1,
+                            binop(Iop_Mul32,
+                                  unop(Iop_16Sto32,
+                                       unop(Iop_32to16, getIReg(rs))),
+                                  unop(Iop_16Sto32,
+                                       unop(Iop_32to16, getIReg(rt)))));
+                     /* DSP Control flag. */
+                     putDSPControl(IRExpr_ITE(unop(Iop_Not1,
+                                                   binop(Iop_CmpLE32S,
+                                                         mkexpr(t1),
+                                                         mkU32(0x7FFF))),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00200000)),
+                                              IRExpr_ITE(binop(Iop_CmpLT32S,
+                                                               mkexpr(t1),
+                                                               mkU32(0xFFFF8000)
+                                                              ),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl())));
+
+                     assign(t2, binop(Iop_16HLto32,
+                                      unop(Iop_32to16, mkexpr(t0)),
+                                      unop(Iop_32to16, mkexpr(t1))));
+                     putIReg(rd, mkexpr(t2));
+                     break;
+                  }
+                  case 0xE: {  /* MUL_S.PH */
+                     DIP("mul_s.ph r%d r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+
+                     /* t0 - signed intermediate result. */
+                     assign(t0,
+                           binop(Iop_Mul32,
+                                 unop(Iop_16Sto32,
+                                      unop(Iop_32HIto16, getIReg(rs))),
+                                 unop(Iop_16Sto32,
+                                      unop(Iop_32HIto16, getIReg(rt)))));
+
+                     assign(t1,
+                            IRExpr_ITE(unop(Iop_Not1,
+                                            binop(Iop_CmpLE32S,
+                                                  mkexpr(t0),
+                                                  mkU32(0x7FFF))),
+                                       mkU32(0x00007FFF),
+                                       IRExpr_ITE(binop(Iop_CmpLT32S,
+                                                        mkexpr(t0),
+                                                        mkU32(0xFFFF8000)),
+                                                  mkU32(0xFFFF8000),
+                                                  mkexpr(t0))));
+
+                     /* DSP Control flag. */
+                     putDSPControl(IRExpr_ITE(unop(Iop_Not1,
+                                                   binop(Iop_CmpLE32S,
+                                                         mkexpr(t0),
+                                                         mkU32(0x7FFF))),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00200000)),
+                                              IRExpr_ITE(binop(Iop_CmpLT32S,
+                                                               mkexpr(t0),
+                                                               mkU32(0xFFFF8000)
+                                                              ),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl())));
+
+                     /* t2 - signed intermediate result. */
+                     assign(t2, binop(Iop_Mul32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt)))));
+
+                     assign(t3, IRExpr_ITE(unop(Iop_Not1,
+                                                binop(Iop_CmpLE32S,
+                                                      mkexpr(t2),
+                                                      mkU32(0x7FFF))),
+                                           mkU32(0x00007FFF),
+                                           IRExpr_ITE(binop(Iop_CmpLT32S,
+                                                            mkexpr(t2),
+                                                            mkU32(0xFFFF8000)),
+                                                      mkU32(0xFFFF8000),
+                                                      mkexpr(t2))));
+
+                     /* DSP Control flag. */
+                     putDSPControl(IRExpr_ITE(unop(Iop_Not1,
+                                                   binop(Iop_CmpLE32S,
+                                                         mkexpr(t2),
+                                                         mkU32(0x7FFF))),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    mkU32(0x00200000)),
+                                              IRExpr_ITE(binop(Iop_CmpLT32S,
+                                                               mkexpr(t2),
+                                                               mkU32(0xFFFF8000)
+                                                              ),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl())));
+
+                     assign(t4, binop(Iop_16HLto32,
+                                      unop(Iop_32to16, mkexpr(t1)),
+                                      unop(Iop_32to16, mkexpr(t3))));
+                     putIReg(rd, mkexpr(t4));
+                     break;
+                  }
+                  case 0x10: {  /* ADDQH.W */
+                     DIP("addqh.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+
+                     assign(t0, binop(Iop_Add64,
+                                      unop(Iop_32Sto64, getIReg(rs)),
+                                      unop(Iop_32Sto64, getIReg(rt))));
+                     assign(t1, binop(Iop_And64,
+                                      mkexpr(t0),
+                                      mkU64(0x00000001fffffffeULL)));
+                     putIReg(rd, unop(Iop_64to32,
+                                      binop(Iop_Shr64, mkexpr(t1), mkU8(0x1))));
+                     break;
+                  }
+                  case 0x11: {  /* SUBQH.W */
+                     DIP("subqh.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+
+                     assign(t0, binop(Iop_Sub64,
+                                      unop(Iop_32Sto64, getIReg(rs)),
+                                      unop(Iop_32Sto64, getIReg(rt))));
+                     assign(t1, binop(Iop_And64,
+                                      mkexpr(t0),
+                                      mkU64(0x00000001fffffffeULL)));
+                     putIReg(rd, unop(Iop_64to32,
+                                      binop(Iop_Shr64, mkexpr(t1), mkU8(0x1))));
+                     break;
+                  }
+                  case 0x12: {  /* ADDQH_R.W */
+                     DIP("addqh_r.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I64);
+
+                     assign(t0, binop(Iop_Add64,
+                                      unop(Iop_32Sto64, getIReg(rs)),
+                                      unop(Iop_32Sto64, getIReg(rt))));
+                     assign(t1, binop(Iop_Add64,
+                                      mkexpr(t0),
+                                      mkU64(0x0000000000000001ULL)));
+                     assign(t2, binop(Iop_And64,
+                                      mkexpr(t1),
+                                      mkU64(0x00000001fffffffeULL)));
+                     putIReg(rd, unop(Iop_64to32,
+                                      binop(Iop_Shr64, mkexpr(t2), mkU8(0x1))));
+                     break;
+                  }
+                  case 0x13: {  /* SUBQH_R.W */
+                     DIP("subqh_r.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I64);
+
+                     assign(t0, binop(Iop_Sub64,
+                                      unop(Iop_32Sto64, getIReg(rs)),
+                                      unop(Iop_32Sto64, getIReg(rt))));
+                     assign(t1, binop(Iop_Add64,
+                                      mkexpr(t0),
+                                      mkU64(0x0000000000000001ULL)));
+                     assign(t2, binop(Iop_And64,
+                                      mkexpr(t1),
+                                      mkU64(0x00000001fffffffeULL)));
+                     putIReg(rd, unop(Iop_64to32,
+                                      binop(Iop_Shr64, mkexpr(t2), mkU8(0x1))));
+                     break;
+                  }
+                  case 0x16: {  /* MULQ_S.W */
+                     DIP("mulq_s.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+
+                     assign(t0, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            getIReg(rt), getIReg(rs)),
+                                      mkU8(0x1)));
+                     assign(t1, binop(Iop_CmpEQ32,
+                                      getIReg(rt), mkU32(0x80000000)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      getIReg(rs), mkU32(0x80000000)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              IRExpr_ITE(mkexpr(t2),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+                     putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                            IRExpr_ITE(mkexpr(t2),
+                                                       mkU32(0x7fffffff),
+                                                       unop(Iop_64HIto32,
+                                                            mkexpr(t0))),
+                                            unop(Iop_64HIto32, mkexpr(t0))));
+                     break;
+                  }
+                  case 0x17: {  /* MULQ_RS.W */
+                     DIP("mulq_rs.w r%d, r%d, r%d", rd, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I1);
+
+                     assign(t0, binop(Iop_Add64,
+                                      binop(Iop_Shl64,
+                                            binop(Iop_MullS32,
+                                                  getIReg(rt),
+                                                  getIReg(rs)),
+                                            mkU8(0x1)),
+                                      mkU64(0x0000000080000000ULL)));
+                     assign(t1,
+                            binop(Iop_CmpEQ32, getIReg(rt), mkU32(0x80000000)));
+                     assign(t2,
+                            binop(Iop_CmpEQ32, getIReg(rs), mkU32(0x80000000)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t1),
+                                              IRExpr_ITE(mkexpr(t2),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               mkU32(0x00200000)
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+                     putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                            IRExpr_ITE(mkexpr(t2),
+                                                       mkU32(0x7fffffff),
+                                                       unop(Iop_64HIto32,
+                                                            mkexpr(t0))),
+                                            unop(Iop_64HIto32, mkexpr(t0))));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of ADDUH.QB/MUL.PH */
+            }
+            case 0x30: {  /* DPAQ.W.PH */
+               switch(sa) {
+                  case 0x0: {  /* DPA.W.PH */
+                     DIP("dpa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t1,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t2,
+                            binop(Iop_Add64,
+                                  getAcc(ac),
+                                  binop(Iop_Add64, mkexpr(t0), mkexpr(t1))));
+                     putAcc(ac, mkexpr(t2));
+                     break;
+                  }
+                  case 0x1: {  /* DPS.W.PH */
+                     DIP("dps.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t1,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t2,
+                            binop(Iop_Sub64,
+                                  getAcc(ac),
+                                  binop(Iop_Add64, mkexpr(t0), mkexpr(t1))));
+                     putAcc(ac, mkexpr(t2));
+                     break;
+                  }
+                  case 0x2: {  /* MULSA.W.PH */
+                     DIP("mulsa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+
+                     assign(t4, getAcc(ac));
+                     assign(t0, binop(Iop_Mul32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32to16, getIReg(rs)))));
+                     assign(t1, binop(Iop_Mul32,
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      unop(Iop_16Sto32,
+                                           unop(Iop_32HIto16, getIReg(rs)))));
+                     assign(t2, binop(Iop_Sub32, mkexpr(t1), mkexpr(t0)));
+                     putAcc(ac, binop(Iop_Add64,
+                                      mkexpr(t4),
+                                      unop(Iop_32Sto64, mkexpr(t2))));
+                     break;
+                  }
+                  case 0x3: {  /* DPAU.H.QBL */
+                     DIP("dpau.h.qbl ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I64);
+                     t3 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t1,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t2,
+                            unop(Iop_32Uto64,
+                                 binop(Iop_Add32,
+                                       mkexpr(t0),
+                                       mkexpr(t1))));
+                     assign(t3,
+                            binop(Iop_Add64, getAcc(ac), mkexpr(t2)));
+                     putAcc(ac, mkexpr(t3));
+                     break;
+                  }
+                  case 0x4: {  /* DPAQ_S.W.PH */
+                     DIP("dpaq_s.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rs))),
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rt)))),
+                                      mkU8(0x1)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t5, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32to16, getIReg(rs))),
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32to16, getIReg(rt)))
+                                           ),
+                                      mkU8(0x1)));
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t8,
+                            IRExpr_ITE(mkexpr(t6),
+                                       IRExpr_ITE(mkexpr(t7),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t5)),
+                                       mkexpr(t5)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t6),
+                                              IRExpr_ITE(mkexpr(t7),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t9, binop(Iop_Add64,
+                                      binop(Iop_Add64, mkexpr(t4), mkexpr(t8)),
+                                      mkexpr(t0)));
+                     putAcc(ac, mkexpr(t9));
+                     break;
+                  }
+                  case 0x5: {  /* DPSQ_S.W.PH */
+                     DIP("dpsq_s.w.ph ac%d r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rs))),
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rt)))),
+                                      mkU8(0x1)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t5,
+                            binop(Iop_Shl64,
+                                  binop(Iop_MullS32,
+                                        unop(Iop_16Sto32,
+                                             unop(Iop_32to16, getIReg(rs))),
+                                        unop(Iop_16Sto32,
+                                             unop(Iop_32to16, getIReg(rt)))),
+                                  mkU8(0x1)));
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t8,
+                            IRExpr_ITE(mkexpr(t6),
+                                       IRExpr_ITE(mkexpr(t7),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t5)),
+                                       mkexpr(t5)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t6),
+                                              IRExpr_ITE(mkexpr(t7),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t9,
+                            binop(Iop_Sub64,
+                                  mkexpr(t0),
+                                  binop(Iop_Add64, mkexpr(t4), mkexpr(t8))));
+                     putAcc(ac, mkexpr(t9));
+                     break;
+                  }
+                  case 0x6: {  /* MULSAQ_S.W.PH */
+                     DIP("mulsaq_s.w.ph ac%d r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I32);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I64);
+                     t8 = newTemp(Ity_I32);
+                     t9 = newTemp(Ity_I32);
+
+                     assign(t0, unop(Iop_16Sto32,
+                                     unop(Iop_32HIto16, getIReg(rs))));
+                     assign(t1, unop(Iop_16Sto32,
+                                     unop(Iop_32HIto16, getIReg(rt))));
+
+                     assign(t8, binop(Iop_And32,
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 unop(Iop_16Uto32,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rs))),
+                                                 mkU32(0x8000))),
+                                    unop(Iop_1Sto32,
+                                         binop(Iop_CmpEQ32,
+                                               unop(Iop_16Uto32,
+                                                    unop(Iop_32HIto16,
+                                                         getIReg(rt))),
+                                               mkU32(0x8000)))));
+                     /* DSPControl_outflag:16+acc <- 1 */
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t8),
+                                                    mkU32(0x0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x00010000),
+                                                          mkU8(ac))),
+                                              getDSPControl()));
+
+                     /* tempB_31..0 */
+                     assign(t2,
+                            IRExpr_ITE(binop(Iop_CmpNE32,
+                                             mkexpr(t8), mkU32(0x0)),
+                                       mkU32(0x7FFFFFFF),
+                                       binop(Iop_Shl32,
+                                             binop(Iop_Mul32,
+                                                   mkexpr(t0), mkexpr(t1)),
+                                             mkU8(1))));
+
+                     assign(t3, unop(Iop_16Sto32,
+                                     unop(Iop_32to16, getIReg(rs))));
+                     assign(t4, unop(Iop_16Sto32,
+                                     unop(Iop_32to16, getIReg(rt))));
+
+                     assign(t9, binop(Iop_And32,
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 unop(Iop_16Uto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rs))),
+                                                 mkU32(0x8000))),
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 unop(Iop_16Uto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rt))),
+                                                 mkU32(0x8000)))));
+                     /* DSPControl_outflag:16+acc <- 1 */
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    mkexpr(t9),
+                                                    mkU32(0x0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x00010000),
+                                                          mkU8(ac))),
+                                              getDSPControl()));
+                     /* tempA_31..0 */
+                     assign(t5,
+                            IRExpr_ITE(binop(Iop_CmpNE32,
+                                             mkexpr(t9),
+                                             mkU32(0x0)),
+                                       mkU32(0x7FFFFFFF),
+                                       binop(Iop_Shl32,
+                                             binop(Iop_Mul32,
+                                                   mkexpr(t3),
+                                                   mkexpr(t4)),
+                                             mkU8(1))));
+                     /* dotp_63..0 */
+                     assign(t6,
+                            binop(Iop_Sub64,
+                                  unop(Iop_32Sto64, mkexpr(t2)),
+                                  unop(Iop_32Sto64, mkexpr(t5))));
+                     /* tempC_63..0 */
+                     assign(t7, binop(Iop_Add64, getAcc(ac), mkexpr(t6)));
+
+                     putAcc(ac, mkexpr(t7));
+                     break;
+                  }
+                  case 0x7: {  /* DPAU.H.QBR */
+                     DIP("dpau.h.qbr ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I64);
+                     t3 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t1,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t2, unop(Iop_32Uto64,
+                                     binop(Iop_Add32, mkexpr(t0), mkexpr(t1))));
+                     assign(t3, binop(Iop_Add64, getAcc(ac), mkexpr(t2)));
+                     putAcc(ac, mkexpr(t3));
+                     break;
+                  }
+                  case 0x8: {  /* DPAX.W.PH */
+                     DIP("dpax.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t1,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t2,
+                            binop(Iop_Add64,
+                                  getAcc(ac),
+                                  binop(Iop_Add64, mkexpr(t0), mkexpr(t1))));
+                     putAcc(ac, mkexpr(t2));
+                     break;
+                  }
+                  case 0x9: {  /* DPSX.W.PH */
+                     DIP("dpsx.w.ph ac%d r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t1,
+                            unop(Iop_32Sto64,
+                                 binop(Iop_Mul32,
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32to16, getIReg(rs))),
+                                       unop(Iop_16Sto32,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t2,
+                            binop(Iop_Sub64,
+                                  getAcc(ac),
+                                  binop(Iop_Add64, mkexpr(t0), mkexpr(t1))));
+                     putAcc(ac, mkexpr(t2));
+                     break;
+                  }
+                  case 0xB: {  /* DPSU.H.QBL */
+                     DIP("dpsu.h.qbl ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I64);
+                     t3 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t1,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32HIto16, getIReg(rt))))));
+                     assign(t2,
+                            unop(Iop_32Uto64,
+                                 binop(Iop_Add32, mkexpr(t0), mkexpr(t1))));
+                     assign(t3,
+                            binop(Iop_Sub64, getAcc(ac), mkexpr(t2)));
+                     putAcc(ac, mkexpr(t3));
+                     break;
+                  }
+                  case 0xC: {  /* DPAQ_SA.L.W */
+                     DIP("dpaq_sa.l.w ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I64);
+                     t8 = newTemp(Ity_I1);
+                     t9 = newTemp(Ity_I1);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            getIReg(rs), getIReg(rt)),
+                                      mkU8(0x1)));
+
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      getIReg(rs),
+                                      mkU32(0x80000000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      getIReg(rt),
+                                      mkU32(0x80000000)));
+
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x7fffffffffffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t5, binop(Iop_Add64,
+                                      unop(Iop_32Uto64,
+                                           unop(Iop_64to32, mkexpr(t0))),
+                                      unop(Iop_32Uto64,
+                                           unop(Iop_64to32, mkexpr(t4)))));
+                     assign(t6,
+                            binop(Iop_Add64,
+                                  binop(Iop_Add64,
+                                        unop(Iop_32Sto64,
+                                             unop(Iop_64HIto32, mkexpr(t0))),
+                                        unop(Iop_32Sto64,
+                                             unop(Iop_64HIto32, mkexpr(t4)))),
+                                  unop(Iop_32Uto64,
+                                       binop(Iop_And32,
+                                             unop(Iop_64HIto32, mkexpr(t5)),
+                                             mkU32(0x1)))));
+                     assign(t7, binop(Iop_32HLto64,
+                                      unop(Iop_64to32, mkexpr(t6)),
+                                      unop(Iop_64to32, mkexpr(t5))));
+                     assign(t8, binop(Iop_CmpEQ32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_64to32, mkexpr(t6)),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31)),
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t6)),
+                                            mkU32(0x00000001))));
+                     assign(t9, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32,
+                                                 mkexpr(t6)),
+                                            mkU32(0x00000001)),
+                                      mkU32(0x1)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t8),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+                     putAcc(ac,
+                            IRExpr_ITE(mkexpr(t8),
+                                       mkexpr(t7),
+                                       IRExpr_ITE(mkexpr(t9),
+                                                  mkU64(0x8000000000000000ULL),
+                                                  mkU64(0x7fffffffffffffffULL)))
+                           );
+                     break;
+                  }
+                  case 0xD: {  /* DPSQ_SA.L.W */
+                     DIP("dpsq_sa.l.w ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I64);
+                     t7 = newTemp(Ity_I64);
+                     t8 = newTemp(Ity_I1);
+                     t9 = newTemp(Ity_I1);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            getIReg(rs), getIReg(rt)),
+                                      mkU8(0x1)));
+
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      getIReg(rs),
+                                      mkU32(0x80000000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      getIReg(rt),
+                                      mkU32(0x80000000)));
+
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x7fffffffffffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t5, binop(Iop_Sub64,
+                                      unop(Iop_32Uto64,
+                                           unop(Iop_64to32, mkexpr(t0))),
+                                      unop(Iop_32Uto64,
+                                           unop(Iop_64to32, mkexpr(t4)))));
+                     assign(t6, binop(Iop_Sub64,
+                                      binop(Iop_Add64,
+                                            unop(Iop_32Sto64,
+                                                 unop(Iop_64HIto32, mkexpr(t0))
+                                                ),
+                                            unop(Iop_32Sto64,
+                                                 unop(Iop_1Sto32,
+                                                      binop(Iop_CmpLT32U,
+                                                            unop(Iop_64to32,
+                                                                 mkexpr(t0)),
+                                                            unop(Iop_64to32,
+                                                                mkexpr(t4)))))),
+                                      unop(Iop_32Sto64,
+                                           unop(Iop_64HIto32, mkexpr(t4)))));
+                     assign(t7, binop(Iop_32HLto64,
+                                      unop(Iop_64to32, mkexpr(t6)),
+                                      unop(Iop_64to32, mkexpr(t5))));
+                     assign(t8, binop(Iop_CmpEQ32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_64to32, mkexpr(t6)),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31)),
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t6)),
+                                            mkU32(0x00000001))));
+                     assign(t9, binop(Iop_CmpEQ32,
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t6)),
+                                            mkU32(0x00000001)),
+                                      mkU32(0x1)));
+                     putDSPControl(IRExpr_ITE(mkexpr(t8),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+                     putAcc(ac,
+                            IRExpr_ITE(mkexpr(t8),
+                                       mkexpr(t7),
+                                       IRExpr_ITE(mkexpr(t9),
+                                                  mkU64(0x8000000000000000ULL),
+                                                  mkU64(0x7fffffffffffffffULL)))
+                           );
+                     break;
+                  }
+                  case 0xF: {  /* DPSU.H.QBR */
+                     DIP("dpsu.h.qbr ac%d r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I64);
+                     t3 = newTemp(Ity_I64);
+
+                     assign(t0,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16HIto8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t1,
+                            binop(Iop_Mul32,
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rs)))),
+                                  unop(Iop_8Uto32,
+                                       unop(Iop_16to8,
+                                            unop(Iop_32to16, getIReg(rt))))));
+                     assign(t2, unop(Iop_32Uto64,
+                                     binop(Iop_Add32, mkexpr(t0), mkexpr(t1))));
+                     assign(t3, binop(Iop_Sub64, getAcc(ac), mkexpr(t2)));
+                     putAcc(ac, mkexpr(t3));
+
+                     break;
+                  }
+                  case 0x10: {  /* MAQ_SA.W.PHL */
+                     DIP("maq_sa.w.phl ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+                     assign(t1, unop(Iop_32Sto64,
+                                     binop(Iop_Shl32,
+                                           binop(Iop_Mul32,
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rs))),
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rt)))),
+                                           mkU8(0x1))));
+
+                     /* If both input arguments are equal 0x8000, saturate
+                        intermediate product and write to DSPControl register.
+                     */
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+                     /* Add intermediate product and value in the
+                        accumulator. */
+                     assign(t5, binop(Iop_Add64, mkexpr(t0), mkexpr(t4)));
+
+                     /* Compare bits 31 and 32 of the value in t5. */
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_64to32, mkexpr(t5)),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31)),
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t5)),
+                                            mkU32(1))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t6),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+                     assign(t7,
+                            IRExpr_ITE(mkexpr(t6),
+                                       mkexpr(t5),
+                                       IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                        binop(Iop_And32,
+                                                              unop(Iop_64HIto32,
+                                                                   mkexpr(t5)),
+                                                              mkU32(1)),
+                                                        mkU32(0x0)),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkU64(0xffffffff80000000ULL)))
+                           );
+                     putAcc(ac, mkexpr(t7));
+                     break;
+                  }
+                  case 0x12: {  /* MAQ_SA.W.PHR */
+                     DIP("maq_sa.w.phr ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+                     assign(t1, unop(Iop_32Sto64,
+                                     binop(Iop_Shl32,
+                                           binop(Iop_Mul32,
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rs))),
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rt)))),
+                                           mkU8(0x1))));
+
+                     /* If both input arguments are equal 0x8000, saturate
+                        intermediate product and write to DSPControl
+                        register. */
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+                     /* Add intermediate product and value in the
+                        accumulator. */
+                     assign(t5, binop(Iop_Add64, mkexpr(t0), mkexpr(t4)));
+
+                     /* Compare bits 31 and 32 of the value in t5. */
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      binop(Iop_Shr32,
+                                            binop(Iop_And32,
+                                                  unop(Iop_64to32, mkexpr(t5)),
+                                                  mkU32(0x80000000)),
+                                            mkU8(31)),
+                                      binop(Iop_And32,
+                                            unop(Iop_64HIto32, mkexpr(t5)),
+                                            mkU32(1))));
+                     putDSPControl(IRExpr_ITE(mkexpr(t6),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+                     assign(t7,
+                            IRExpr_ITE(mkexpr(t6),
+                                       mkexpr(t5),
+                                       IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                        binop(Iop_And32,
+                                                              unop(Iop_64HIto32,
+                                                                   mkexpr(t5)),
+                                                              mkU32(1)),
+                                                        mkU32(0x0)),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkU64(0xffffffff80000000ULL)))
+                           );
+                     putAcc(ac, mkexpr(t7));
+                     break;
+                  }
+                  case 0x14: {  /* MAQ_S.W.PHL */
+                     DIP("maq_s.w.phl ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I64);
+
+                     assign(t5, getAcc(ac));
+
+                     assign(t0, unop(Iop_16Sto32,
+                                     unop(Iop_32HIto16, getIReg(rs))));
+                     assign(t1, unop(Iop_16Sto32,
+                                     unop(Iop_32HIto16, getIReg(rt))));
+
+                     assign(t2, binop(Iop_And32,
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000))),
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t1),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000)))));
+
+                     assign(t3, binop(Iop_CmpEQ32, mkexpr(t2), mkU32(0x0)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+
+                     assign(t4, unop(Iop_64to32,
+                                     binop(Iop_MullS32,
+                                           mkexpr(t0), mkexpr(t1))));
+                     putAcc(ac, IRExpr_ITE(mkexpr(t3),
+                                           binop(Iop_Add64,
+                                                 unop(Iop_32Sto64,
+                                                      binop(Iop_Shl32,
+                                                            mkexpr(t4),
+                                                            mkU8(0x1))),
+                                                 mkexpr(t5)),
+                                           binop(Iop_Add64,
+                                                 mkexpr(t5),
+                                                 unop(Iop_32Sto64,
+                                                      mkU32(0x7fffffff)))));
+                     break;
+                  }
+                  case 0x16: {  /* MAQ_S.W.PHR */
+                     DIP("maq_s.w.phr ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I64);
+
+                     assign(t5, getAcc(ac));
+
+                     assign(t0, unop(Iop_16Sto32,
+                                     unop(Iop_32to16, getIReg(rs))));
+                     assign(t1, unop(Iop_16Sto32,
+                                     unop(Iop_32to16, getIReg(rt))));
+
+                     assign(t2, binop(Iop_And32,
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t0),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000))),
+                                      unop(Iop_1Sto32,
+                                           binop(Iop_CmpEQ32,
+                                                 binop(Iop_And32,
+                                                       mkexpr(t1),
+                                                       mkU32(0xffff)),
+                                                 mkU32(0x8000)))));
+
+                     assign(t3, binop(Iop_CmpEQ32, mkexpr(t2), mkU32(0x0)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t3),
+                                              getDSPControl(),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+
+                     assign(t4, unop(Iop_64to32,
+                                     binop(Iop_MullS32,
+                                           mkexpr(t0), mkexpr(t1))));
+                     putAcc(ac, IRExpr_ITE(mkexpr(t3),
+                                           binop(Iop_Add64,
+                                                 unop(Iop_32Sto64,
+                                                      binop(Iop_Shl32,
+                                                            mkexpr(t4),
+                                                            mkU8(0x1))),
+                                                 mkexpr(t5)),
+                                           binop(Iop_Add64,
+                                                 mkexpr(t5),
+                                                 unop(Iop_32Sto64,
+                                                      mkU32(0x7fffffff)))));
+                     break;
+                  }
+                  case 0x18: {  /* DPAQX_S.W.PH */
+                     DIP("dpaqx_s.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rs))),
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32to16,
+                                                      getIReg(rt)))),
+                                      mkU8(0x1)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                  mkU32(0x1),
+                                                                  mkU8(ac+16))),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t5, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32to16,
+                                                      getIReg(rs))),
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rt)))),
+                                      mkU8(0x1)));
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t8,
+                            IRExpr_ITE(mkexpr(t6),
+                                       IRExpr_ITE(mkexpr(t7),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t5)),
+                                       mkexpr(t5)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t6),
+                                              IRExpr_ITE(mkexpr(t7),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t9, binop(Iop_Add64,
+                                      binop(Iop_Add64, mkexpr(t4), mkexpr(t8)),
+                                      mkexpr(t0)));
+                     putAcc(ac, mkexpr(t9));
+                     break;
+                  }
+                  case 0x19: {  /* DPSQX_S.W.PH */
+                     DIP("dpsqx_s.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+
+                     assign(t0, getAcc(ac));
+
+                     assign(t1, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rs))),
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32to16,
+                                                      getIReg(rt)))),
+                                      mkU8(0x1)));
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t4,
+                            IRExpr_ITE(mkexpr(t2),
+                                       IRExpr_ITE(mkexpr(t3),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t1)),
+                                       mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t2),
+                                              IRExpr_ITE(mkexpr(t3),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t5, binop(Iop_Shl64,
+                                      binop(Iop_MullS32,
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32to16,
+                                                      getIReg(rs))),
+                                            unop(Iop_16Sto32,
+                                                 unop(Iop_32HIto16,
+                                                      getIReg(rt)))),
+                                      mkU8(0x1)));
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+                     assign(t8,
+                            IRExpr_ITE(mkexpr(t6),
+                                       IRExpr_ITE(mkexpr(t7),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t5)),
+                                       mkexpr(t5)));
+
+                     putDSPControl(IRExpr_ITE(mkexpr(t6),
+                                              IRExpr_ITE(mkexpr(t7),
+                                                         binop(Iop_Or32,
+                                                               getDSPControl(),
+                                                               binop(Iop_Shl32,
+                                                                     mkU32(0x1),
+                                                                     mkU8(ac+16)
+                                                                    )
+                                                              ),
+                                                         getDSPControl()),
+                                              getDSPControl()));
+
+                     assign(t9, binop(Iop_Sub64,
+                                     mkexpr(t0),
+                                     binop(Iop_Add64, mkexpr(t4), mkexpr(t8))));
+                     putAcc(ac, mkexpr(t9));
+                     break;
+                  }
+                  case 0x1A: {  /* DPAQX_SA.W.PH */
+                     DIP("dpaqx_sa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+                     /* Calculate the first cross dot product and saturate if
+                        needed. */
+                     assign(t1, unop(Iop_32Sto64,
+                                     binop(Iop_Shl32,
+                                           binop(Iop_Mul32,
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rs))),
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rt)))),
+                                           mkU8(0x1))));
+
+                     /* If both input arguments are equal 0x8000, saturate
+                        intermediate product and write to DSPControl
+                        register. */
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+
+                     assign(t4, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t2)),
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t3))),
+                                                 mkU32(0)),
+                                           mkU64(0x000000007fffffffULL),
+                                           mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    binop(Iop_And32,
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t2)),
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t3))),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16))),
+                                              getDSPControl()));
+                     /* Calculate second cross dot product and saturate if
+                        needed. */
+                     assign(t5, unop(Iop_32Sto64,
+                                     binop(Iop_Shl32,
+                                           binop(Iop_Mul32,
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rs))),
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rt)))),
+                                           mkU8(0x1))));
+
+                     /* If both input arguments are equal 0x8000, saturate
+                        intermediate product and write to DSPControl
+                        register. */
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+
+                     assign(t8, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t6)),
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t7))),
+                                                 mkU32(0)),
+                                           mkU64(0x000000007fffffffULL),
+                                           mkexpr(t5)));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    binop(Iop_And32,
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t6)),
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t7))),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16))),
+                                              getDSPControl()));
+                     /* Subtract intermediate products from value in the
+                        accumulator. */
+                     assign(t9,
+                            binop(Iop_Add64,
+                                  mkexpr(t0),
+                                  binop(Iop_Add64, mkexpr(t8), mkexpr(t4))));
+
+                     putAcc(ac,
+                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                             binop(Iop_And32,
+                                                   unop(Iop_64HIto32,
+                                                        mkexpr(t9)),
+                                                   mkU32(0x80000000)),
+                                             mkU32(0x0)),
+                                       IRExpr_ITE(binop(Iop_CmpNE32,
+                                                        unop(Iop_64HIto32,
+                                                             binop(Iop_Shl64,
+                                                                   mkexpr(t9),
+                                                                   mkU8(1))),
+                                                        mkU32(0x0)),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t9)),
+                                       IRExpr_ITE(binop(Iop_CmpNE32,
+                                                        unop(Iop_64HIto32,
+                                                             binop(Iop_Shl64,
+                                                                   mkexpr(t9),
+                                                                   mkU8(1))),
+                                                        mkU32(0xffffffff)),
+                                                  mkU64(0xffffffff80000000ULL),
+                                                  mkexpr(t9))));
+                     assign(t10, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                  unop(Iop_64to32,
+                                                       mkexpr(t9)),
+                                                  unop(Iop_64to32,
+                                                       getAcc(ac))),
+                                           getDSPControl(),
+                                           binop(Iop_Or32,
+                                                 getDSPControl(),
+                                                 binop(Iop_Shl32,
+                                                       mkU32(0x1),
+                                                       mkU8(ac+16)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    unop(Iop_64HIto32,
+                                                         mkexpr(t9)),
+                                                    unop(Iop_64HIto32,
+                                                         getAcc(ac))),
+                                              mkexpr(t10),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+                     break;
+                  }
+                  case 0x1B: {  /* DPSQX_SA.W.PH */
+                     DIP("dpsqx_sa.w.ph ac%d, r%d, r%d", ac, rs, rt);
+                     vassert(!mode64);
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I64);
+                     t2 = newTemp(Ity_I1);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_I64);
+                     t5 = newTemp(Ity_I64);
+                     t6 = newTemp(Ity_I1);
+                     t7 = newTemp(Ity_I1);
+                     t8 = newTemp(Ity_I64);
+                     t9 = newTemp(Ity_I64);
+                     t10 = newTemp(Ity_I32);
+
+                     assign(t0, getAcc(ac));
+                     /* Calculate the first cross dot product and saturate if
+                        needed. */
+                     assign(t1, unop(Iop_32Sto64,
+                                     binop(Iop_Shl32,
+                                           binop(Iop_Mul32,
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rs))),
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rt)))),
+                                           mkU8(0x1))));
+
+                     /* If both input arguments are equal 0x8000, saturate
+                        intermediate product and write to DSPControl
+                        register. */
+                     assign(t2, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t3, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+
+                     assign(t4, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t2)),
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t3))),
+                                                 mkU32(0)),
+                                           mkU64(0x000000007fffffffULL),
+                                           mkexpr(t1)));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    binop(Iop_And32,
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t2)),
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t3))),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16))),
+                                              getDSPControl()));
+                     /* Calculate second cross dot product and saturate if
+                        needed. */
+                     assign(t5, unop(Iop_32Sto64,
+                                     binop(Iop_Shl32,
+                                           binop(Iop_Mul32,
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32to16,
+                                                           getIReg(rs))),
+                                                 unop(Iop_16Sto32,
+                                                      unop(Iop_32HIto16,
+                                                           getIReg(rt)))),
+                                           mkU8(0x1))));
+
+                     /* If both input arguments are equal 0x8000, saturate
+                        intermediate product and write to DSPControl
+                        register. */
+                     assign(t6, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32to16, getIReg(rs))),
+                                      mkU32(0x00008000)));
+                     assign(t7, binop(Iop_CmpEQ32,
+                                      unop(Iop_16Uto32,
+                                           unop(Iop_32HIto16, getIReg(rt))),
+                                      mkU32(0x00008000)));
+
+                     assign(t8, IRExpr_ITE(binop(Iop_CmpNE32,
+                                                 binop(Iop_And32,
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t6)),
+                                                       unop(Iop_1Sto32,
+                                                            mkexpr(t7))),
+                                                 mkU32(0)),
+                                           mkU64(0x000000007fffffffULL),
+                                           mkexpr(t5)));
+
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpNE32,
+                                                    binop(Iop_And32,
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t6)),
+                                                          unop(Iop_1Sto32,
+                                                               mkexpr(t7))),
+                                                    mkU32(0)),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16))),
+                                              getDSPControl()));
+                     /* Subtract intermediate products from value in the
+                        accumulator. */
+                     assign(t9,
+                            binop(Iop_Sub64,
+                                  mkexpr(t0),
+                                  binop(Iop_Add64, mkexpr(t8), mkexpr(t4))));
+
+                     putAcc(ac,
+                            IRExpr_ITE(binop(Iop_CmpEQ32,
+                                             binop(Iop_And32,
+                                                   unop(Iop_64HIto32,
+                                                        mkexpr(t9)),
+                                                   mkU32(0x80000000)),
+                                             mkU32(0x0)),
+                                       IRExpr_ITE(binop(Iop_CmpNE32,
+                                                        unop(Iop_64HIto32,
+                                                             binop(Iop_Shl64,
+                                                                   mkexpr(t9),
+                                                                   mkU8(1))),
+                                                        mkU32(0x0)),
+                                                  mkU64(0x000000007fffffffULL),
+                                                  mkexpr(t9)),
+                                       IRExpr_ITE(binop(Iop_CmpNE32,
+                                                        unop(Iop_64HIto32,
+                                                             binop(Iop_Shl64,
+                                                                   mkexpr(t9),
+                                                                   mkU8(1))),
+                                                        mkU32(0xffffffff)),
+                                                  mkU64(0xffffffff80000000ULL),
+                                                  mkexpr(t9))));
+                     assign(t10, IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                  unop(Iop_64to32,
+                                                       mkexpr(t9)),
+                                                  unop(Iop_64to32,
+                                                       getAcc(ac))),
+                                           getDSPControl(),
+                                           binop(Iop_Or32,
+                                                 getDSPControl(),
+                                                 binop(Iop_Shl32,
+                                                       mkU32(0x1),
+                                                       mkU8(ac+16)))));
+                     putDSPControl(IRExpr_ITE(binop(Iop_CmpEQ32,
+                                                    unop(Iop_64HIto32,
+                                                         mkexpr(t9)),
+                                                    unop(Iop_64HIto32,
+                                                         getAcc(ac))),
+                                              mkexpr(t10),
+                                              binop(Iop_Or32,
+                                                    getDSPControl(),
+                                                    binop(Iop_Shl32,
+                                                          mkU32(0x1),
+                                                          mkU8(ac+16)))));
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of DPAQ.W.PH */
+            }
+            case 0x31: {  /* APPEND */
+               switch(sa) {
+                  case 0x0: {  /* APPEND */
+                     DIP("append r%d, r%d, %d", rt, rs, rd);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+
+                     assign(t1, binop(Iop_Shl32, getIReg(rt), mkU8(rd)));
+
+                     if (31 == rd) {
+                        putIReg(rt, binop(Iop_Or32,
+                                          mkexpr(t1),
+                                          binop(Iop_And32,
+                                                getIReg(rs),
+                                                mkU32(0x7fffffff))));
+                     } else if (1 == rd) {
+                        putIReg(rt,
+                                binop(Iop_Or32,
+                                      mkexpr(t1),
+                                      binop(Iop_And32,
+                                            getIReg(rs), mkU32(0x1))));
+                     } else {
+                        assign(t2,
+                               unop(Iop_Not32,
+                                    binop(Iop_Shl32,
+                                          mkU32(0xffffffff), mkU8(rd))));
+
+                        putIReg(rt, binop(Iop_Or32,
+                                          mkexpr(t1),
+                                          binop(Iop_And32,
+                                                getIReg(rs), mkexpr(t2))));
+                     }
+                     break;
+                  }
+                  case 0x1: {  /* PREPEND */
+                     DIP("prepend r%d, r%d, %d", rt, rs, rd);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+
+                     if (0 != rd) {
+                        assign(t1, binop(Iop_Shr32, getIReg(rt), mkU8(rd)));
+
+                        if (31 == rd) {
+                           putIReg(rt, binop(Iop_Or32,
+                                             mkexpr(t1),
+                                             binop(Iop_Shl32,
+                                                   binop(Iop_And32,
+                                                         getIReg(rs),
+                                                         mkU32(0x7fffffff)),
+                                                   mkU8(1))));
+                        } else if (1 == rd) {
+                           putIReg(rt, binop(Iop_Or32,
+                                             mkexpr(t1),
+                                             binop(Iop_Shl32,
+                                                   binop(Iop_And32,
+                                                         getIReg(rs),
+                                                         mkU32(0x1)),
+                                                   mkU8(31))));
+                        } else {
+                           assign(t2, binop(Iop_Add32, mkU32(rd), mkU32(0x1)));
+
+                           assign(t3, unop(Iop_Not32,
+                                           binop(Iop_Shl32,
+                                                 mkU32(0xffffffff),
+                                                 unop(Iop_32to8, mkexpr(t2)))));
+
+                           putIReg(rt, binop(Iop_Or32,
+                                             mkexpr(t1),
+                                             binop(Iop_Shl32,
+                                                   binop(Iop_And32,
+                                                         getIReg(rs),
+                                                         mkexpr(t3)),
+                                                   mkU8(32-rd))));
+                        }
+                     }
+                     break;
+                  }
+                  case 0x10: {  /* BALIGN */
+                     DIP("balign r%d, r%d, %d", rt, rs, rd);
+                     vassert(!mode64);
+                     t1 = newTemp(Ity_I32);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I32);
+
+                     if ((2 != rd) && (0 != rd)) {
+                        assign(t1, binop(Iop_Shl32,
+                                         binop(Iop_And32,
+                                               mkU32(rd), mkU32(0x3)),
+                                         mkU8(0x3)));
+                        assign(t2, binop(Iop_Shl32,
+                                         getIReg(rt),
+                                         unop(Iop_32to8, mkexpr(t1))));
+                        assign(t3, binop(Iop_Shr32,
+                                         getIReg(rs),
+                                         unop(Iop_32to8,
+                                              binop(Iop_Shl32,
+                                                    binop(Iop_Sub32,
+                                                          mkU32(0x4),
+                                                          binop(Iop_And32,
+                                                                mkU32(rd),
+                                                                mkU32(0x3))),
+                                                    mkU8(0x3)))));
+                        putIReg(rt, binop(Iop_Or32, mkexpr(t2), mkexpr(t3)));
+                     }
+                     break;
+                  }
+                  default:
+                     return -1;
+               }
+               break;  /* end of APPEND */
+            }
+            default:
+               return -1;
+         }
+         break;
+      }
+      default:
+            return -1;
+   }
+   return 0;
+}
+
+/*------------------------------------------------------------*/
+/*---          Disassemble a single instruction            ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR. The instruction is
+   located in host memory at guest_instr, and has guest IP of
+   guest_PC_curr_instr, which will have been set before the call
+   here. */
+
+static DisResult disInstr_MIPS_WRK ( Bool(*resteerOkFn) (/*opaque */void *,
+                                                                    Addr),
+                                     Bool         resteerCisOk,
+                                     void*        callback_opaque,
+                                     Long         delta64,
+                                     const VexArchInfo* archinfo,
+                                     const VexAbiInfo*  abiinfo,
+                                     Bool         sigill_diag )
+{
+   IRTemp t0, t1 = 0, t2, t3, t4, t5, t6, t7;
+
+   UInt opcode, cins, rs, rt, rd, sa, ft, fs, fd, fmt, tf, nd, function,
+        trap_code, imm, instr_index, p, msb, lsb, size, rot, sel;
+   /* Additional variables for instruction fields in DSP ASE insructions */
+   UInt ac;
+
+   DisResult dres;
+
+   static IRExpr *lastn = NULL;  /* last jump addr */
+   static IRStmt *bstmt = NULL;  /* branch (Exit) stmt */
+
+   /* The running delta */
+   Int delta = (Int) delta64;
+
+   /* Holds eip at the start of the insn, so that we can print
+      consistent error messages for unimplemented insns. */
+   Int delta_start = delta;
+
+   /* Are we in a delay slot ? */
+   Bool delay_slot_branch, likely_delay_slot, delay_slot_jump;
+
+   /* Set result defaults. */
+   dres.whatNext = Dis_Continue;
+   dres.len = 0;
+   dres.continueAt = 0;
+   dres.jk_StopHere = Ijk_INVALID;
+
+   delay_slot_branch = likely_delay_slot = delay_slot_jump = False;
+
+   const UChar *code = guest_code + delta;
+   cins = getUInt(code);
+   DIP("\t0x%lx:\t0x%08x\t", (long)guest_PC_curr_instr, cins);
+
+   if (delta != 0) {
+      if (branch_or_jump(guest_code + delta - 4)) {
+         if (lastn == NULL && bstmt == NULL) {
+            vassert(0);
+         } else {
+            dres.whatNext = Dis_StopHere;
+            if (lastn != NULL) {
+               delay_slot_jump = True;
+            } else if (bstmt != NULL) {
+               delay_slot_branch = True;
+            }
+         }
+      }
+
+      if (branch_or_link_likely(guest_code + delta - 4)) {
+         likely_delay_slot = True;
+      }
+   }
+
+   /* Spot "Special" instructions (see comment at top of file). */
+   {
+      /* Spot the 16-byte preamble:
+       ****mips32****
+       "srl $0, $0, 13
+       "srl $0, $0, 29
+       "srl $0, $0, 3
+       "srl $0, $0, 19
+
+       ****mips64****
+       dsll $0, $0, 3
+       dsll $0, $0, 13
+       dsll $0, $0, 29
+       dsll $0, $0, 19 */
+
+      UInt word1 = mode64 ? 0xF8  : 0x342;
+      UInt word2 = mode64 ? 0x378 : 0x742;
+      UInt word3 = mode64 ? 0x778 : 0xC2;
+      UInt word4 = mode64 ? 0x4F8 : 0x4C2;
+      if (getUInt(code + 0) == word1 && getUInt(code + 4) == word2 &&
+          getUInt(code + 8) == word3 && getUInt(code + 12) == word4) {
+         /* Got a "Special" instruction preamble. Which one is it? */
+         if (getUInt(code + 16) == 0x01ad6825 /* or $13, $13, $13 */ ) {
+            /* $11 = client_request ( $12 ) */
+            DIP("$11 = client_request ( $12 )");
+            if (mode64)
+               putPC(mkU64(guest_PC_curr_instr + 20));
+            else
+               putPC(mkU32(guest_PC_curr_instr + 20));
+            dres.jk_StopHere = Ijk_ClientReq;
+            dres.whatNext    = Dis_StopHere;
+
+            goto decode_success;
+         } else if (getUInt(code + 16) == 0x01ce7025 /* or $14, $14, $14 */ ) {
+            /* $11 = guest_NRADDR */
+            DIP("$11 = guest_NRADDR");
+            dres.len = 20;
+            delta += 20;
+            if (mode64)
+               putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS64State,
+                                               guest_NRADDR), Ity_I64));
+            else
+               putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS32State,
+                                               guest_NRADDR), Ity_I32));
+            goto decode_success;
+         } else if (getUInt(code + 16) == 0x01ef7825 /* or $15, $15, $15 */ ) {
+            /*  branch-and-link-to-noredir $25 */
+            DIP("branch-and-link-to-noredir $25");
+            if (mode64)
+               putIReg(31, mkU64(guest_PC_curr_instr + 20));
+            else
+               putIReg(31, mkU32(guest_PC_curr_instr + 20));
+            putPC(getIReg(25));
+            dres.jk_StopHere = Ijk_NoRedir;
+            dres.whatNext    = Dis_StopHere;
+            goto decode_success;
+         } else if (getUInt(code + 16) == 0x016b5825 /* or $11,$11,$11 */ ) {
+           /* IR injection */
+            DIP("IR injection");
+#if defined (_MIPSEL)
+            vex_inject_ir(irsb, Iend_LE);
+#elif defined (_MIPSEB)
+            vex_inject_ir(irsb, Iend_BE);
+#endif
+            if (mode64) {
+               stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_CMSTART),
+                               mkU64(guest_PC_curr_instr)));
+               stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_CMLEN),
+                               mkU64(20)));
+
+               putPC(mkU64(guest_PC_curr_instr + 20));
+            } else {
+               stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_CMSTART),
+                               mkU32(guest_PC_curr_instr)));
+               stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_CMLEN),
+                               mkU32(20)));
+
+               putPC(mkU32(guest_PC_curr_instr + 20));
+            }
+            dres.whatNext    = Dis_StopHere;
+            dres.jk_StopHere = Ijk_InvalICache;
+            dres.len = 20;
+            delta += 20;
+            goto decode_success;
+         }
+
+         /* We don't know what it is.  Set opc1/opc2 so decode_failure
+            can print the insn following the Special-insn preamble. */
+         delta += 16;
+         goto decode_failure;
+       /*NOTREACHED*/}
+   }
+
+   opcode = get_opcode(cins);
+   imm = get_imm(cins);
+   rs = get_rs(cins);
+   rt = get_rt(cins);
+   rd = get_rd(cins);
+   sa = get_sa(cins);
+   fs = get_fs(cins);
+   fd = get_fd(cins);
+   ft = get_ft(cins);
+   tf = get_tf(cins);
+   nd = get_nd(cins);
+   sel = get_sel(cins);
+   fmt = get_fmt(cins);
+   instr_index = get_instr_index(cins);
+   trap_code = get_code(cins);
+   function = get_function(cins);
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRType tyF = fp_mode64 ? Ity_F64 : Ity_F32;
+
+   ac = get_acNo(cins);
+
+   switch (opcode) {
+
+   case 0x03:     /* JAL */
+      DIP("jal 0x%x", instr_index);
+      if (mode64) {
+         putIReg(31, mkU64(guest_PC_curr_instr + 8));
+         t0 = newTemp(ty);
+         assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000ULL) |
+                          (instr_index << 2)));
+      } else {
+         putIReg(31, mkU32(guest_PC_curr_instr + 8));
+         t0 = newTemp(ty);
+         assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) |
+                          (instr_index << 2)));
+      }
+      lastn = mkexpr(t0);
+      break;
+   case 0x02:     /* J */
+      DIP("j 0x%x", instr_index);
+      t0 = newTemp(ty);
+      if (mode64)
+         assign(t0, mkU64((guest_PC_curr_instr & 0xFFFFFFFFF0000000ULL) |
+                          (instr_index << 2)));
+      else
+         assign(t0, mkU32((guest_PC_curr_instr & 0xF0000000) |
+                          (instr_index << 2)));
+      lastn = mkexpr(t0);
+      break;
+
+   case 0x11: {  /* COP1 */
+      if (fmt == 0x3 && fd == 0 && function == 0) {  /* MFHC1 */
+         DIP("mfhc1 r%d, f%d", rt, fs);
+         if (fp_mode64) {
+            t0 = newTemp(Ity_I64);
+            t1 = newTemp(Ity_I32);
+            assign(t0, unop(Iop_ReinterpF64asI64, getDReg(fs)));
+            assign(t1, unop(Iop_64HIto32, mkexpr(t0)));
+            putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
+         } else {
+            ILLEGAL_INSTRUCTON;
+         }
+         break;
+      } else if (fmt == 0x7 && fd == 0 && function == 0) {  /* MTHC1 */
+         DIP("mthc1 r%d, f%d", rt, fs);
+         if (fp_mode64) {
+            t0 = newTemp(Ity_I64);
+            assign(t0, binop(Iop_32HLto64, getIReg(rt),
+                             unop(Iop_ReinterpF32asI32,
+                                  getLoFromF64(Ity_F64 /* 32FPR mode. */,
+                                               getDReg(fs)))));
+            putDReg(fs, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+         } else {
+            ILLEGAL_INSTRUCTON;
+         }
+         break;
+      } else if (fmt == 0x8) {  /* BC */
+         /* FcConditionalCode(bc1_cc) */
+         UInt bc1_cc = get_bc1_cc(cins);
+         t1 = newTemp(Ity_I1);
+         t2 = newTemp(Ity_I32);
+         t3 = newTemp(Ity_I1);
+
+         assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(bc1_cc)));
+         assign(t2, IRExpr_ITE(mkexpr(t1),
+                               binop(Iop_And32,
+                                     binop(Iop_Shr32, getFCSR(), mkU8(23)),
+                                     mkU32(0x1)),
+                               binop(Iop_And32,
+                                     binop(Iop_Shr32, getFCSR(),
+                                           mkU8(24 + bc1_cc)),
+                                     mkU32(0x1))));
+
+         if (tf == 1 && nd == 0) {
+            /* branch on true */
+            DIP("bc1t %d, %d", bc1_cc, imm);
+            assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
+            dis_branch(False, mkexpr(t3), imm, &bstmt);
+            break;
+         } else if (tf == 0 && nd == 0) {
+            /* branch on false */
+            DIP("bc1f %d, %d", bc1_cc, imm);
+            assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
+            dis_branch(False, mkexpr(t3), imm, &bstmt);
+            break;
+         } else if (nd == 1 && tf == 0) {
+            DIP("bc1fl %d, %d", bc1_cc, imm);
+            lastn = dis_branch_likely(binop(Iop_CmpNE32, mkexpr(t2),
+                                            mkU32(0x0)), imm);
+            break;
+         } else if (nd == 1 && tf == 1) {
+            DIP("bc1tl %d, %d", bc1_cc, imm);
+            lastn = dis_branch_likely(binop(Iop_CmpEQ32, mkexpr(t2),
+                                            mkU32(0x0)), imm);
+            break;
+         } else
+            goto decode_failure;
+      } else {
+         switch (function) {
+            case 0x4: {  /* SQRT.fmt */
+               switch (fmt) {
+                  case 0x10: {  /* S */
+                     IRExpr *rm = get_IR_roundingmode();
+                     putFReg(fd, mkWidenFromF32(tyF, binop(Iop_SqrtF32, rm,
+                                 getLoFromF64(tyF, getFReg(fs)))));
+                     break;
+                  }
+                  case 0x11: {  /* D */
+                     IRExpr *rm = get_IR_roundingmode();
+                     putDReg(fd, binop(Iop_SqrtF64, rm, getDReg(fs)));
+                     break;
+                  }
+                  default:
+                     goto decode_failure;
+                  }
+               }
+               break;
+            case 0x5:  /* abs.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("abs.s f%d, f%d", fd, fs);
+                     putFReg(fd, mkWidenFromF32(tyF, unop(Iop_AbsF32,
+                                 getLoFromF64(tyF, getFReg(fs)))));
+                     break;
+                  case 0x11:  /* D  */
+                     DIP("abs.d f%d, f%d", fd, fs);
+                     putDReg(fd, unop(Iop_AbsF64, getDReg(fs)));
+                     break;
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* case 0x5 */
+
+            case 0x02:  /* MUL.fmt */
+               switch (fmt) {
+                  case 0x11: {  /* D */
+                     DIP("mul.d f%d, f%d, f%d", fd, fs, ft);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putDReg(fd, triop(Iop_MulF64, rm, getDReg(fs),
+                                       getDReg(ft)));
+                     break;
+                  }
+                  case 0x10: {  /* S */
+                     DIP("mul.s f%d, f%d, f%d", fd, fs, ft);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putFReg(fd, mkWidenFromF32(tyF, triop(Iop_MulF32, rm,
+                                 getLoFromF64(tyF, getFReg(fs)),
+                                 getLoFromF64(tyF, getFReg(ft)))));
+                     break;
+                  }
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* MUL.fmt */
+
+            case 0x03:  /* DIV.fmt */
+               switch (fmt) {
+                  case 0x11: {  /* D */
+                     DIP("div.d f%d, f%d, f%d", fd, fs, ft);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putDReg(fd, triop(Iop_DivF64, rm, getDReg(fs),
+                                 getDReg(ft)));
+                     break;
+                  }
+                  case 0x10: {  /* S */
+                     DIP("div.s f%d, f%d, f%d", fd, fs, ft);
+                     calculateFCSR(fs, ft, DIVS, False, 2);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, rm,
+                                 getLoFromF64(tyF, getFReg(fs)),
+                                 getLoFromF64(tyF, getFReg(ft)))));
+                     break;
+                  }
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* DIV.fmt */
+
+            case 0x01:  /* SUB.fmt */
+               switch (fmt) {
+                  case 0x11: {  /* D */
+                     DIP("sub.d f%d, f%d, f%d", fd, fs, ft);
+                     calculateFCSR(fs, ft, SUBD, False, 2);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putDReg(fd, triop(Iop_SubF64, rm, getDReg(fs),
+                                       getDReg(ft)));
+                     break;
+                  }
+                  case 0x10: {  /* S */
+                     DIP("sub.s f%d, f%d, f%d", fd, fs, ft);
+                     calculateFCSR(fs, ft, SUBS, True, 2);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putFReg(fd, mkWidenFromF32(tyF, triop(Iop_SubF32, rm,
+                                 getLoFromF64(tyF, getFReg(fs)),
+                                 getLoFromF64(tyF, getFReg(ft)))));
+                     break;
+                  }
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* SUB.fmt */
+
+            case 0x06:  /* MOV.fmt */
+               switch (fmt) {
+                  case 0x11:  /* D */
+                     DIP("mov.d f%d, f%d", fd, fs);
+                     if (fp_mode64) {
+                        putDReg(fd, getDReg(fs));
+                     } else {
+                        putFReg(fd, getFReg(fs));
+                        putFReg(fd + 1, getFReg(fs + 1));
+                     }
+                     break;
+                  case 0x10:  /* S */
+                     DIP("mov.s f%d, f%d", fd, fs);
+                     putFReg(fd, getFReg(fs));
+                     break;
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* MOV.fmt */
+
+            case 0x7:  /* neg.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("neg.s f%d, f%d", fd, fs);
+                     putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32,
+                                 getLoFromF64(tyF, getFReg(fs)))));
+                     break;
+                  case 0x11:  /* D */
+                     DIP("neg.d f%d, f%d", fd, fs);
+                     putDReg(fd, unop(Iop_NegF64, getDReg(fs)));
+                     break;
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* case 0x7 */
+
+            case 0x08:  /* ROUND.L.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("round.l.s f%d, f%d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, ROUNDLS, True, 1);
+                        t0 = newTemp(Ity_I64);
+
+                        assign(t0, binop(Iop_F32toI64S, mkU32(0x0),
+                                         getLoFromF64(Ity_F64, getFReg(fs))));
+
+                        putDReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+                  case 0x11:  /* D */
+                     DIP("round.l.d f%d, f%d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, ROUNDLD, False, 1);
+                        putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x0),
+                                          getDReg(fs)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+                  default:
+                    goto decode_failure;
+
+               }
+               break;  /* ROUND.L.fmt */
+
+            case 0x09:  /* TRUNC.L.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("trunc.l.s f%d, f%d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, TRUNCLS, True, 1);
+                        t0 = newTemp(Ity_I64);
+                        assign(t0, binop(Iop_F32toI64S, mkU32(0x3),
+                                         getLoFromF64(Ity_F64, getFReg(fs))));
+
+                        putDReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+                  case 0x11:  /* D */
+                     DIP("trunc.l.d f%d, f%d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, TRUNCLD, False, 1);
+                        putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x3),
+                                          getDReg(fs)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+                  default:
+                     goto decode_failure;
+                 }
+              break;  /* TRUNC.L.fmt */
+
+            case 0x15:  /* RECIP.fmt */
+               switch (fmt) {
+                  case 0x10: {  /* S */
+                     DIP("recip.s f%d, f%d", fd, fs);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32,
+                                 rm, unop(Iop_ReinterpI32asF32,
+                                 mkU32(ONE_SINGLE)), getLoFromF64(tyF,
+                                 getFReg(fs)))));
+                     break;
+                  }
+                  case 0x11: {  /* D */
+                     DIP("recip.d f%d, f%d", fd, fs);
+                     IRExpr *rm = get_IR_roundingmode();
+                     /* putDReg(fd, 1.0/getDreg(fs)); */
+                     putDReg(fd, triop(Iop_DivF64, rm,
+                                 unop(Iop_ReinterpI64asF64,
+                                 mkU64(ONE_DOUBLE)), getDReg(fs)));
+                     break;
+                  }
+               default:
+                  goto decode_failure;
+
+               }
+               break;  /* case 0x15 */
+
+            case 0x13:  /* MOVN.fmt */
+               switch (fmt) {
+               case 0x10:  /* S */
+                  DIP("movn.s f%d, f%d, r%d", fd, fs, rt);
+                  t1 = newTemp(Ity_F64);
+                  t2 = newTemp(Ity_F64);
+                  t3 = newTemp(Ity_I1);
+                  t4 = newTemp(Ity_F64);
+                  if (mode64) {
+                     assign(t1, getFReg(fs));
+                     assign(t2, getFReg(fd));
+                     assign(t3, binop(Iop_CmpNE64, mkU64(0), getIReg(rt)));
+                  } else {
+                     if (fp_mode64) {
+                        assign(t1, getFReg(fs));
+                        assign(t2, getFReg(fd));
+                        assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt)));
+                     } else {
+                        assign(t1, unop(Iop_F32toF64, getFReg(fs)));
+                        assign(t2, unop(Iop_F32toF64, getFReg(fd)));
+                        assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt)));
+                     }
+                  }
+
+                  assign(t4, IRExpr_ITE(mkexpr(t3), mkexpr(t1), mkexpr(t2)));
+                  if (fp_mode64) {
+                     IRTemp f = newTemp(Ity_F64);
+                     IRTemp fd_hi = newTemp(Ity_I32);
+                     t5 = newTemp(Ity_I64);
+                     assign(f, getFReg(fd));
+                     assign(fd_hi, unop(Iop_64HIto32, unop(Iop_ReinterpF64asI64,
+                                        mkexpr(f))));
+
+                     assign(t5, mkWidenFrom32(Ity_I64, unop(Iop_64to32,
+                                unop(Iop_ReinterpF64asI64, mkexpr(t4))), True));
+
+                     putFReg(fd, unop (Iop_ReinterpI64asF64, mkexpr(t5)));
+                  } else
+                     putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+                                       mkexpr(t4)));
+                  break;
+               case 0x11:  /* D */
+                  DIP("movn.d f%d, f%d, r%d", fd, fs, rt);
+
+                  t3 = newTemp(Ity_I1);
+                  t4 = newTemp(Ity_F64);
+
+                  if (mode64)
+                     assign(t3, binop(Iop_CmpNE64, mkU64(0), getIReg(rt)));
+                  else
+                     assign(t3, binop(Iop_CmpNE32, mkU32(0), getIReg(rt)));
+
+                  putDReg(fd, IRExpr_ITE(mkexpr(t3), getDReg(fs), getDReg(fd)));
+                  break;
+               default:
+                  goto decode_failure;
+               }
+               break;  /* MOVN.fmt */
+
+            case 0x12:  /* MOVZ.fmt */
+               switch (fmt) {
+               case 0x10:  /* S */
+                  DIP("movz.s f%d, f%d, r%d", fd, fs, rt);
+
+                  t1 = newTemp(Ity_F64);
+                  t2 = newTemp(Ity_F64);
+                  t3 = newTemp(Ity_I1);
+                  t4 = newTemp(Ity_F64);
+                  if (fp_mode64) {
+                     assign(t1, getFReg(fs));
+                     assign(t2, getFReg(fd));
+                     if (mode64)
+                        assign(t3, binop(Iop_CmpEQ64, mkU64(0), getIReg(rt)));
+                     else
+                        assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt)));
+                  } else {
+                     assign(t1, unop(Iop_F32toF64, getFReg(fs)));
+                     assign(t2, unop(Iop_F32toF64, getFReg(fd)));
+                     assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt)));
+                  }
+                  assign(t4, IRExpr_ITE(mkexpr(t3), mkexpr(t1), mkexpr(t2)));
+
+                 if (fp_mode64) {
+                     IRTemp f = newTemp(Ity_F64);
+                     IRTemp fd_hi = newTemp(Ity_I32);
+                     t7 = newTemp(Ity_I64);
+                     assign(f, getFReg(fd));
+                     assign(fd_hi, unop(Iop_64HIto32,
+                                   unop(Iop_ReinterpF64asI64, mkexpr(f))));
+                     assign(t7, mkWidenFrom32(Ity_I64, unop(Iop_64to32,
+                                unop(Iop_ReinterpF64asI64, mkexpr(t4))), True));
+
+                     putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7)));
+                  } else
+                     putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+                                       mkexpr(t4)));
+
+                  break;
+               case 0x11:  /* D */
+                  DIP("movz.d f%d, f%d, r%d", fd, fs, rt);
+                  t3 = newTemp(Ity_I1);
+                  t4 = newTemp(Ity_F64);
+                  if (mode64)
+                     assign(t3, binop(Iop_CmpEQ64, mkU64(0), getIReg(rt)));
+                  else
+                     assign(t3, binop(Iop_CmpEQ32, mkU32(0), getIReg(rt)));
+
+                  putDReg(fd, IRExpr_ITE(mkexpr(t3), getDReg(fs), getDReg(fd)));
+                  break;
+               default:
+                  goto decode_failure;
+               }
+               break;  /* MOVZ.fmt */
+
+            case 0x11:  /* MOVT.fmt */
+               if (tf == 1) {
+                  UInt mov_cc = get_mov_cc(cins);
+                  switch (fmt) {  /* MOVCF = 010001 */
+                  case 0x11:  /* D */
+                     DIP("movt.d f%d, f%d, %d", fd, fs, mov_cc);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_F64);
+
+                     assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+                     assign(t2, IRExpr_ITE(mkexpr(t1),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(23)),
+                                                 mkU32(0x1)),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(24 + mov_cc)),
+                                                 mkU32(0x1))
+                                           ));
+
+                     assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
+                     assign(t4, IRExpr_ITE(mkexpr(t3),
+                                           getDReg(fs), getDReg(fd)));
+                     putDReg(fd, mkexpr(t4));
+                     break;
+                  case 0x10:  /* S */
+                     DIP("movt.s f%d, f%d, %d", fd, fs, mov_cc);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_F64);
+                     t5 = newTemp(Ity_F64);
+                     t6 = newTemp(Ity_F64);
+                     t7 = newTemp(Ity_I64);
+
+                     if (fp_mode64) {
+                        assign(t5, getFReg(fs));
+                        assign(t6, getFReg(fd));
+                     } else {
+                        assign(t5, unop(Iop_F32toF64, getFReg(fs)));
+                        assign(t6, unop(Iop_F32toF64, getFReg(fd)));
+                     }
+
+                     assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+                     assign(t2, IRExpr_ITE(mkexpr(t1),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(23)),
+                                                 mkU32(0x1)),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(24 + mov_cc)),
+                                                 mkU32(0x1))
+                                           ));
+
+                     assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
+                     assign(t4, IRExpr_ITE(mkexpr(t3),
+                                           mkexpr(t5), mkexpr(t6)));
+
+                     if (fp_mode64) {
+                        IRTemp f = newTemp(Ity_F64);
+                        IRTemp fd_hi = newTemp(Ity_I32);
+                        assign(f, getFReg(fd));
+                        assign(fd_hi, unop(Iop_64HIto32,
+                                      unop(Iop_ReinterpF64asI64, mkexpr(f))));
+                        assign(t7, mkWidenFrom32(Ity_I64, unop(Iop_64to32,
+                                      unop(Iop_ReinterpF64asI64, mkexpr(t4))),
+                                      True));
+
+                        putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7)));
+                     } else
+                        putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+                                          mkexpr(t4)));
+                     break;
+                  default:
+                     goto decode_failure;
+                  }
+               } else if (tf == 0)  /* movf.fmt */
+               {
+                  UInt mov_cc = get_mov_cc(cins);
+                  switch (fmt)  /* MOVCF = 010001 */
+                  {
+                  case 0x11:  /* D */
+                     DIP("movf.d f%d, f%d, %d", fd, fs, mov_cc);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_F64);
+
+                     assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+                     assign(t2, IRExpr_ITE(mkexpr(t1),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(23)),
+                                                 mkU32(0x1)),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(24 + mov_cc)),
+                                                 mkU32(0x1))
+                                           ));
+
+                     assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
+                     assign(t4, IRExpr_ITE(mkexpr(t3),
+                                           getDReg(fs), getDReg(fd)));
+                     putDReg(fd, mkexpr(t4));
+                     break;
+                  case 0x10:  /* S */
+                     DIP("movf.s f%d, f%d, %d", fd, fs, mov_cc);
+                     t1 = newTemp(Ity_I1);
+                     t2 = newTemp(Ity_I32);
+                     t3 = newTemp(Ity_I1);
+                     t4 = newTemp(Ity_F64);
+                     t5 = newTemp(Ity_F64);
+                     t6 = newTemp(Ity_F64);
+
+                     if (fp_mode64) {
+                        assign(t5, getFReg(fs));
+                        assign(t6, getFReg(fd));
+                     } else {
+                        assign(t5, unop(Iop_F32toF64, getFReg(fs)));
+                        assign(t6, unop(Iop_F32toF64, getFReg(fd)));
+                     }
+
+                     assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+                     assign(t2, IRExpr_ITE(mkexpr(t1),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(23)),
+                                                 mkU32(0x1)),
+                                           binop(Iop_And32,
+                                                 binop(Iop_Shr32, getFCSR(),
+                                                       mkU8(24 + mov_cc)),
+                                                 mkU32(0x1))
+                                           ));
+
+                     assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
+                     assign(t4, IRExpr_ITE(mkexpr(t3),
+                                           mkexpr(t5), mkexpr(t6)));
+
+                     if (fp_mode64) {
+                        IRTemp f = newTemp(Ity_F64);
+                        IRTemp fd_hi = newTemp(Ity_I32);
+                        t7 = newTemp(Ity_I64);
+                        assign(f, getFReg(fd));
+                        assign(fd_hi, unop(Iop_64HIto32,
+                                      unop(Iop_ReinterpF64asI64, mkexpr(f))));
+                        assign(t7, mkWidenFrom32(Ity_I64, unop(Iop_64to32,
+                                   unop(Iop_ReinterpF64asI64, mkexpr(t4))),
+                                   True));
+
+                        putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t7)));
+                     } else
+                        putFReg(fd, binop(Iop_F64toF32, get_IR_roundingmode(),
+                                          mkexpr(t4)));
+                     break;
+                  default:
+                     goto decode_failure;
+                  }
+               }
+
+               break;  /* MOVT.fmt */
+
+            case 0x0:  /* add.fmt */
+               switch (fmt) {
+               case 0x10: {  /* S */
+                  DIP("add.s f%d, f%d, f%d", fd, fs, ft);
+                  calculateFCSR(fs, ft, ADDS, True, 2);
+                  IRExpr *rm = get_IR_roundingmode();
+                  putFReg(fd, mkWidenFromF32(tyF, triop(Iop_AddF32, rm,
+                              getLoFromF64(tyF, getFReg(fs)),
+                              getLoFromF64(tyF, getFReg(ft)))));
+                  break;
+               }
+               case 0x11: {  /* D */
+                  DIP("add.d f%d, f%d, f%d", fd, fs, ft);
+                  calculateFCSR(fs, ft, ADDD, False, 2);
+                  IRExpr *rm = get_IR_roundingmode();
+                  putDReg(fd, triop(Iop_AddF64, rm, getDReg(fs), getDReg(ft)));
+                  break;
+               }
+
+               case 0x4:  /* MTC1 (Move Word to Floating Point) */
+                  DIP("mtc1 r%d, f%d", rt, fs);
+                  if (fp_mode64) {
+                     t0 = newTemp(Ity_I32);
+                     t1 = newTemp(Ity_F32);
+                     assign(t0, mkNarrowTo32(ty, getIReg(rt)));
+                     assign(t1, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+
+                     putFReg(fs, mkWidenFromF32(tyF, mkexpr(t1)));
+                  } else
+                     putFReg(fs, unop(Iop_ReinterpI32asF32, getIReg(rt)));
+                  break;
+
+               case 0x5:  /* Doubleword Move to Floating Point DMTC1; MIPS64 */
+                  DIP("dmtc1 r%d, f%d", rt, fs);
+                  vassert(mode64);
+                  putFReg(fs, unop(Iop_ReinterpI64asF64, getIReg(rt)));
+                  break;
+
+               case 0x0:  /* MFC1 */
+                  DIP("mfc1 r%d, f%d", rt, fs);
+                  if (fp_mode64) {
+                     t0 = newTemp(Ity_I64);
+                     t1 = newTemp(Ity_I32);
+                     assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+                     assign(t1, unop(Iop_64to32, mkexpr(t0)));
+                     putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
+                  } else
+                     putIReg(rt, unop(Iop_ReinterpF32asI32, getFReg(fs)));
+                  break;
+
+               case 0x1:  /* Doubleword Move from Floating Point DMFC1;
+                             MIPS64 */
+                  DIP("dmfc1 r%d, f%d", rt, fs);
+                  putIReg(rt, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+                  break;
+
+               case 0x6:  /* CTC1 */
+                  DIP("ctc1 r%d, f%d", rt, fs);
+                  t0 = newTemp(Ity_I32);
+                  t1 = newTemp(Ity_I32);
+                  t2 = newTemp(Ity_I32);
+                  t3 = newTemp(Ity_I32);
+                  t4 = newTemp(Ity_I32);
+                  t5 = newTemp(Ity_I32);
+                  t6 = newTemp(Ity_I32);
+                  assign(t0, mkNarrowTo32(ty, getIReg(rt)));
+                  if (fs == 25) {  /* FCCR */
+                     assign(t1, binop(Iop_Shl32, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x000000FE)), mkU8(24)));
+                     assign(t2, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x01000000)));
+                     assign(t3, binop(Iop_Shl32, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x00000001)), mkU8(23)));
+                     assign(t4, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x007FFFFF)));
+                     putFCSR(binop(Iop_Or32, binop(Iop_Or32, mkexpr(t1),
+                                   mkexpr(t2)), binop(Iop_Or32, mkexpr(t3),
+                                   mkexpr(t4))));
+                  } else if (fs == 26) {  /* FEXR */
+                     assign(t1, binop(Iop_And32, getFCSR(), mkU32(0xFFFC0000)));
+                     assign(t2, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x0003F000)));
+                     assign(t3, binop(Iop_And32, getFCSR(), mkU32(0x00000F80)));
+                     assign(t4, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x0000007C)));
+                     assign(t5, binop(Iop_And32, getFCSR(), mkU32(0x00000003)));
+                     putFCSR(binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32,
+                                   mkexpr(t1), mkexpr(t2)), binop(Iop_Or32,
+                                   mkexpr(t3), mkexpr(t4))), mkexpr(t5)));
+                  } else if (fs == 28) {
+                     assign(t1, binop(Iop_And32, getFCSR(), mkU32(0xFE000000)));
+                     assign(t2, binop(Iop_Shl32, binop(Iop_And32, mkexpr(t0),
+                                mkU32(0x00000002)), mkU8(22)));
+                     assign(t3, binop(Iop_And32, getFCSR(), mkU32(0x00FFF000)));
+                     assign(t4, binop(Iop_And32, mkexpr(t0),
+                                mkU32(0x00000F80)));
+                     assign(t5, binop(Iop_And32, getFCSR(), mkU32(0x0000007C)));
+                     assign(t6, binop(Iop_And32, mkexpr(t0),
+                                mkU32(0x00000003)));
+                     putFCSR(binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32,
+                                   mkexpr(t1), mkexpr(t2)), binop(Iop_Or32,
+                                   mkexpr(t3), mkexpr(t4))), binop(Iop_Or32,
+                                   mkexpr(t5), mkexpr(t6))));
+                  } else if (fs == 31) {
+                     putFCSR(mkexpr(t0));
+                  }
+                  break;
+               case 0x2:  /* CFC1 */
+                  DIP("cfc1 r%d, f%d", rt, fs);
+                  t0 = newTemp(Ity_I32);
+                  t1 = newTemp(Ity_I32);
+                  t2 = newTemp(Ity_I32);
+                  t3 = newTemp(Ity_I32);
+                  t4 = newTemp(Ity_I32);
+                  t5 = newTemp(Ity_I32);
+                  t6 = newTemp(Ity_I32);
+                  assign(t0, getFCSR());
+                  if (fs == 0) {
+                     putIReg(rt, mkWidenFrom32(ty,
+                             IRExpr_Get(offsetof(VexGuestMIPS32State,
+                                                 guest_FIR),
+                                       Ity_I32),
+                             False));
+                  } else if (fs == 25) {
+                     assign(t1, mkU32(0x000000FF));
+                     assign(t2, binop(Iop_Shr32, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0xFE000000)), mkU8(25)));
+                     assign(t3, binop(Iop_Shr32, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x00800000)), mkU8(23)));
+                     putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32,
+                                 binop(Iop_Or32, mkexpr(t1), mkexpr(t2)),
+                                 mkexpr(t3)), False));
+                  } else if (fs == 26) {
+                     assign(t1, mkU32(0xFFFFF07C));
+                     assign(t2, binop(Iop_And32, mkexpr(t0),
+                                mkU32(0x0003F000)));
+                     assign(t3, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x0000007C)));
+                     putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32,
+                                 binop(Iop_Or32, mkexpr(t1), mkexpr(t2)),
+                                 mkexpr(t3)), False));
+                  } else if (fs == 28) {
+                     assign(t1, mkU32(0x00000F87));
+                     assign(t2, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x00000F83)));
+                     assign(t3, binop(Iop_Shr32, binop(Iop_And32, mkexpr(t0),
+                                      mkU32(0x01000000)), mkU8(22)));
+                     putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32,
+                                 binop(Iop_Or32, mkexpr(t1), mkexpr(t2)),
+                                 mkexpr(t3)), False));
+                  } else if (fs == 31) {
+                     putIReg(rt, mkWidenFrom32(ty, getFCSR(), False));
+                  }
+                  break;
+               default:
+                  goto decode_failure;
+               }
+               break;
+
+            case 0x21:  /* CVT.D */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("cvt.d.s f%d, f%d", fd, fs);
+                     calculateFCSR(fs, 0, CVTDS, True, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I64);
+                        t1 = newTemp(Ity_I32);
+                        t3 = newTemp(Ity_F32);
+                        t4 = newTemp(Ity_F32);
+                        /* get lo half of FPR */
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+                        assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+                        putFReg(fd, unop(Iop_F32toF64, mkexpr(t3)));
+                     } else
+                        putDReg(fd, unop(Iop_F32toF64, getFReg(fs)));
+                     break;
+
+                  case 0x14:
+                     DIP("cvt.d.w %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, CVTDW, True, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I64);
+                        t1 = newTemp(Ity_I32);
+                        t3 = newTemp(Ity_F32);
+                        t4 = newTemp(Ity_F32);
+                        /* get lo half of FPR */
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        assign(t1, unop(Iop_64to32, mkexpr(t0)));
+                        putDReg(fd,unop(Iop_I32StoF64, mkexpr(t1)));
+                        break;
+                     } else {
+                        t0 = newTemp(Ity_I32);
+                        assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs)));
+                        putDReg(fd, unop(Iop_I32StoF64, mkexpr(t0)));
+                        break;
+                     }
+
+                  case 0x15: {  /* L */
+                     if (fp_mode64) {
+                        DIP("cvt.d.l %d, %d", fd, fs);
+                        calculateFCSR(fs, 0, CVTDL, False, 1);
+                        t0 = newTemp(Ity_I64);
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        putFReg(fd, binop(Iop_I64StoF64,
+                                          get_IR_roundingmode(), mkexpr(t0)));
+                        break;
+                     } else
+                        goto decode_failure;
+                  }
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* CVT.D */
+
+            case 0x20:  /* cvt.s */
+               switch (fmt) {
+                  case 0x14:  /* W */
+                     DIP("cvt.s.w %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, CVTSW, True, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I64);
+                        t1 = newTemp(Ity_I32);
+                        t3 = newTemp(Ity_F32);
+                        t4 = newTemp(Ity_F32);
+                        /* get lo half of FPR */
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        assign(t1, unop(Iop_64to32, mkexpr(t0)));
+                        putFReg(fd, mkWidenFromF32(tyF, binop(Iop_I32StoF32,
+                                    get_IR_roundingmode(), mkexpr(t1))));
+                     } else {
+                        t0 = newTemp(Ity_I32);
+                        assign(t0, unop(Iop_ReinterpF32asI32, getFReg(fs)));
+                        putFReg(fd, binop(Iop_I32StoF32, get_IR_roundingmode(),
+                                    mkexpr(t0)));
+                     }
+                     break;
+
+                  case 0x11:  /* D */
+                     DIP("cvt.s.d %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, CVTSD, False, 1);
+                     t0 = newTemp(Ity_F32);
+                     assign(t0, binop(Iop_F64toF32, get_IR_roundingmode(),
+                                      getDReg(fs)));
+                     putFReg(fd, mkWidenFromF32(tyF, mkexpr(t0)));
+                     break;
+
+                  case 0x15:  /* L */
+                     DIP("cvt.s.l %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, CVTSL, False, 1);
+                     t0 = newTemp(Ity_I64);
+                     assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                     putFReg(fd, mkWidenFromF32(tyF, binop(Iop_I64StoF32,
+                                 get_IR_roundingmode(), mkexpr(t0))));
+                     break;
+
+                  default:
+                     goto decode_failure;
+               }
+               break;  /* cvt.s */
+
+            case 0x24:  /* cvt.w */
+               switch (fmt) {
+               case 0x10:  /* S */
+                  DIP("cvt.w.s %d, %d", fd, fs);
+                  calculateFCSR(fs, 0, CVTWS, True, 1);
+                  putFReg(fd,
+                          mkWidenFromF32(tyF,
+                                         binop(Iop_RoundF32toInt,
+                                               get_IR_roundingmode(),
+                                               getLoFromF64(tyF, getFReg(fs))))
+                         );
+                  break;
+
+               case 0x11:
+                  DIP("cvt.w.d %d, %d", fd, fs);
+                  calculateFCSR(fs, 0, CVTWD, False, 1);
+                  t0 = newTemp(Ity_I32);
+                  t1 = newTemp(Ity_F32);
+                  assign(t0, binop(Iop_F64toI32S, get_IR_roundingmode(),
+                                   getDReg(fs)));
+                  assign(t1, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+                  putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+                  break;
+
+               default:
+                  goto decode_failure;
+
+               }
+               break;
+
+            case 0x25:  /* cvt.l */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("cvt.l.s %d, %d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, CVTLS, True, 1);
+                        t0 = newTemp(Ity_I64);
+
+                        assign(t0, binop(Iop_F32toI64S, get_IR_roundingmode(),
+                                         getLoFromF64(tyF, getFReg(fs))));
+
+                        putDReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+
+                  case 0x11: {  /* D */
+                     DIP("cvt.l.d %d, %d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, CVTLD, False, 1);
+                        putDReg(fd, binop(Iop_RoundF64toInt,
+                                get_IR_roundingmode(), getDReg(fs)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+                  }
+
+                  default:
+                     goto decode_failure;
+               }
+               break;
+
+            case 0x0B:  /* FLOOR.L.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("floor.l.s %d, %d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, FLOORLS, True, 1);
+                        t0 = newTemp(Ity_I64);
+
+                        assign(t0, binop(Iop_F32toI64S, mkU32(0x1),
+                                         getLoFromF64(tyF, getFReg(fs))));
+
+                        putDReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+
+                  case 0x11:  /* D */
+                     DIP("floor.l.d %d, %d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, FLOORLD, False, 1);
+                        putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x1),
+                                          getDReg(fs)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+                  default:
+                     goto decode_failure;
+               }
+               break;
+
+            case 0x0C:  /* ROUND.W.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("round.w.s f%d, f%d", fd, fs);
+                     calculateFCSR(fs, 0, ROUNDWS, True, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I64);
+                        t1 = newTemp(Ity_I32);
+                        t3 = newTemp(Ity_F32);
+                        t4 = newTemp(Ity_F32);
+                        /* get lo half of FPR */
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+                        assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+                        assign(t4, binop(Iop_RoundF32toInt, mkU32(0x0),
+                                         mkexpr(t3)));
+
+                        putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+                     } else
+                        putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x0),
+                                          getFReg(fs)));
+                     break;
+
+                  case 0x11:  /* D */
+                     DIP("round.w.d f%d, f%d", fd, fs);
+                     calculateFCSR(fs, 0, ROUNDWD, False, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I32);
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x0),
+                                         getDReg(fs)));
+                        putFReg(fd, mkWidenFromF32(tyF,
+                                    unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+                     } else {
+                        t0 = newTemp(Ity_I32);
+
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x0),
+                                         getDReg(fs)));
+
+                        putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+                     }
+                     break;
+                  default:
+                     goto decode_failure;
+
+                  }
+                  break;  /* ROUND.W.fmt */
+
+            case 0x0F:  /* FLOOR.W.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("floor.w.s f%d, f%d", fd, fs);
+                     calculateFCSR(fs, 0, FLOORWS, True, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I64);
+                        t1 = newTemp(Ity_I32);
+                        t3 = newTemp(Ity_F32);
+                        t4 = newTemp(Ity_F32);
+                        /* get lo half of FPR */
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+                        assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+                        assign(t4, binop(Iop_RoundF32toInt, mkU32(0x1),
+                                         mkexpr(t3)));
+
+                        putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+                     } else
+                        putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x1),
+                                         getFReg(fs)));
+                     break;
+
+                  case 0x11:  /* D */
+                     DIP("floor.w.d f%d, f%d", fd, fs);
+                     calculateFCSR(fs, 0, FLOORWD, False, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I32);
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x1),
+                                         getDReg(fs)));
+                        putFReg(fd, mkWidenFromF32(tyF,
+                                    unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+                        break;
+                     } else {
+                        t0 = newTemp(Ity_I32);
+
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x1),
+                                         getDReg(fs)));
+
+                        putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+                        break;
+                     }
+                  default:
+                     goto decode_failure;
+
+               }
+               break;  /* FLOOR.W.fmt */
+
+            case 0x0D:  /* TRUNC.W */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("trunc.w.s %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, TRUNCWS, True, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I64);
+                        t1 = newTemp(Ity_I32);
+                        t3 = newTemp(Ity_F32);
+                        t4 = newTemp(Ity_F32);
+                        /* get lo half of FPR */
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+                        assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+                        assign(t4, binop(Iop_RoundF32toInt, mkU32(0x3),
+                                         mkexpr(t3)));
+
+                        putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+                     } else
+                        putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x3),
+                                       getFReg(fs)));
+                     break;
+                  case 0x11:  /* D */
+                     DIP("trunc.w.d %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, TRUNCWD, False, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I32);
+
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x3),
+                                         getFReg(fs)));
+
+                        putFReg(fd, mkWidenFromF32(tyF,
+                                    unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+                     } else {
+                        t0 = newTemp(Ity_I32);
+
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x3),
+                                         getDReg(fs)));
+
+                        putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+                     }
+                     break;
+                  default:
+                     goto decode_failure;
+
+               }
+               break;
+
+            case 0x0E:  /* CEIL.W.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("ceil.w.s %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, CEILWS, True, 1);
+                     if (fp_mode64) {
+                        t0 = newTemp(Ity_I64);
+                        t1 = newTemp(Ity_I32);
+                        t3 = newTemp(Ity_F32);
+                        t4 = newTemp(Ity_F32);
+                        /* get lo half of FPR */
+                        assign(t0, unop(Iop_ReinterpF64asI64, getFReg(fs)));
+
+                        assign(t1, unop(Iop_64to32, mkexpr(t0)));
+
+                        assign(t3, unop(Iop_ReinterpI32asF32, mkexpr(t1)));
+
+                        assign(t4, binop(Iop_RoundF32toInt, mkU32(0x2),
+                                         mkexpr(t3)));
+
+                        putFReg(fd, mkWidenFromF32(tyF, mkexpr(t4)));
+                     } else
+                        putFReg(fd, binop(Iop_RoundF32toInt, mkU32(0x2),
+                                          getFReg(fs)));
+                     break;
+
+                  case 0x11:  /* D */
+                     DIP("ceil.w.d %d, %d", fd, fs);
+                     calculateFCSR(fs, 0, CEILWD, False, 1);
+                     if (!fp_mode64) {
+                        t0 = newTemp(Ity_I32);
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x2),
+                                         getDReg(fs)));
+                        putFReg(fd, unop(Iop_ReinterpI32asF32, mkexpr(t0)));
+                     } else {
+                        t0 = newTemp(Ity_I32);
+                        assign(t0, binop(Iop_F64toI32S, mkU32(0x2),
+                                         getDReg(fs)));
+                        putFReg(fd, mkWidenFromF32(tyF,
+                                    unop(Iop_ReinterpI32asF32, mkexpr(t0))));
+                     }
+                     break;
+                  default:
+                     goto decode_failure;
+
+               }
+               break;
+
+            case 0x0A:  /* CEIL.L.fmt */
+               switch (fmt) {
+                  case 0x10:  /* S */
+                     DIP("ceil.l.s %d, %d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, CEILLS, True, 1);
+                        t0 = newTemp(Ity_I64);
+
+                        assign(t0, binop(Iop_F32toI64S, mkU32(0x2),
+                                   getLoFromF64(tyF, getFReg(fs))));
+
+                        putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t0)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+
+                  case 0x11:  /* D */
+                     DIP("ceil.l.d %d, %d", fd, fs);
+                     if (fp_mode64) {
+                        calculateFCSR(fs, 0, CEILLD, False, 1);
+                        putDReg(fd, binop(Iop_RoundF64toInt, mkU32(0x2),
+                                          getDReg(fs)));
+                     } else {
+                        ILLEGAL_INSTRUCTON;
+                     }
+                     break;
+
+                  default:
+                     goto decode_failure;
+
+               }
+               break;
+
+            case 0x16:  /* RSQRT.fmt */
+               switch (fmt) {
+                  case 0x10: {  /* S */
+                     DIP("rsqrt.s %d, %d", fd, fs);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putFReg(fd, mkWidenFromF32(tyF, triop(Iop_DivF32, rm,
+                                 unop(Iop_ReinterpI32asF32, mkU32(ONE_SINGLE)),
+                                 binop(Iop_SqrtF32, rm, getLoFromF64(tyF,
+                                 getFReg(fs))))));
+                     break;
+                  }
+                  case 0x11: {  /* D */
+                     DIP("rsqrt.d %d, %d", fd, fs);
+                     IRExpr *rm = get_IR_roundingmode();
+                     putDReg(fd, triop(Iop_DivF64, rm,
+                                 unop(Iop_ReinterpI64asF64,
+                                 mkU64(ONE_DOUBLE)),
+                                 binop(Iop_SqrtF64, rm, getDReg(fs))));
+                     break;
+                  }
+                  default:
+                     goto decode_failure;
+
+               }
+               break;
+
+            default:
+               if (dis_instr_CCondFmt(cins))
+                  break;
+               goto decode_failure;
+
+            }
+
+         }
+      }
+      break;  /* COP1 */
+   case 0x10:  /* COP0 */
+      if (rs == 0) {  /* MFC0 */
+         DIP("mfc0 r%d, r%d, %d", rt, rd, sel);
+         IRTemp   val  = newTemp(Ity_I32);
+         IRExpr** args = mkIRExprVec_3 (IRExpr_BBPTR(), mkU32(rd), mkU32(sel));
+         IRDirty *d = unsafeIRDirty_1_N(val,
+                                        0,
+                                        "mips32_dirtyhelper_mfc0",
+                                        &mips32_dirtyhelper_mfc0,
+                                        args);
+         stmt(IRStmt_Dirty(d));
+         putIReg(rt, mkexpr(val));
+      } else if (rs == 1) {
+         /* Doubleword Move from Coprocessor 0 - DMFC0; MIPS64 */
+         DIP("dmfc0 r%d, r%d, %d", rt, rd, sel);
+         IRTemp   val  = newTemp(Ity_I64);
+         IRExpr** args = mkIRExprVec_3 (IRExpr_BBPTR(), mkU64(rd), mkU64(sel));
+         IRDirty *d = unsafeIRDirty_1_N(val,
+                                        0,
+                                        "mips64_dirtyhelper_dmfc0",
+                                        &mips64_dirtyhelper_dmfc0,
+                                        args);
+         stmt(IRStmt_Dirty(d));
+         putDReg(rt, mkexpr(val));
+      } else
+         goto decode_failure;
+      break;
+
+   case 0x31:  /* LWC1 */
+      /* Load Word to Floating Point - LWC1 (MIPS32) */
+      DIP("lwc1 f%d, %d(r%d)", ft, imm, rs);
+      if (fp_mode64) {
+         t1 = newTemp(Ity_F32);
+         t2 = newTemp(Ity_I64);
+         if (mode64) {
+            t0 = newTemp(Ity_I64);
+            /* new LO */
+            assign(t0, binop(Iop_Add64, getIReg(rs),
+                             mkU64(extend_s_16to64(imm))));
+         } else {
+            t0 = newTemp(Ity_I32);
+            /* new LO */
+            assign(t0, binop(Iop_Add32, getIReg(rs),
+                             mkU32(extend_s_16to32(imm))));
+         }
+         assign(t1, load(Ity_F32, mkexpr(t0)));
+         assign(t2, mkWidenFrom32(Ity_I64, unop(Iop_ReinterpF32asI32,
+                                                mkexpr(t1)), True));
+         putDReg(ft, unop(Iop_ReinterpI64asF64, mkexpr(t2)));
+      } else {
+         t0 = newTemp(Ity_I32);
+         assign(t0, binop(Iop_Add32, getIReg(rs),
+                           mkU32(extend_s_16to32(imm))));
+         putFReg(ft, load(Ity_F32, mkexpr(t0)));
+      }
+      break;
+
+   case 0x39:  /* SWC1 */
+      DIP("swc1 f%d, %d(r%d)", ft, imm, rs);
+      if (fp_mode64) {
+         t0 = newTemp(Ity_I64);
+         t2 = newTemp(Ity_I32);
+         LOAD_STORE_PATTERN;
+         assign(t0, unop(Iop_ReinterpF64asI64, getFReg(ft)));
+         assign(t2, unop(Iop_64to32, mkexpr(t0)));
+         store(mkexpr(t1), unop(Iop_ReinterpI32asF32, mkexpr(t2)));
+      } else {
+         LOAD_STORE_PATTERN;
+         store(mkexpr(t1), getFReg(ft));
+      }
+      break;
+
+   case 0x33:  /* PREF */
+      DIP("pref");
+      break;
+
+   case 0x35:
+      /* Load Doubleword to Floating Point - LDC1 (MIPS32) */
+      DIP("ldc1 f%d, %d(%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      putDReg(ft, load(Ity_F64, mkexpr(t1)));
+      break;
+
+   case 0x3D:
+      /* Store Doubleword from Floating Point - SDC1 */
+      DIP("sdc1 f%d, %d(%d)", ft, imm, rs);
+      LOAD_STORE_PATTERN;
+      store(mkexpr(t1), getDReg(ft));
+      break;
+
+   case 0x23:  /* LW */
+      DIP("lw r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), True));
+      break;
+
+   case 0x20:  /* LB */
+      DIP("lb r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      if (mode64)
+         putIReg(rt, unop(Iop_8Sto64, load(Ity_I8, mkexpr(t1))));
+      else
+         putIReg(rt, unop(Iop_8Sto32, load(Ity_I8, mkexpr(t1))));
+      break;
+
+   case 0x24:  /* LBU */
+      DIP("lbu r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      if (mode64)
+         putIReg(rt, unop(Iop_8Uto64, load(Ity_I8, mkexpr(t1))));
+      else
+         putIReg(rt, unop(Iop_8Uto32, load(Ity_I8, mkexpr(t1))));
+      break;
+
+   case 0x21:  /* LH */
+      DIP("lh r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      if (mode64)
+         putIReg(rt, unop(Iop_16Sto64, load(Ity_I16, mkexpr(t1))));
+      else
+         putIReg(rt, unop(Iop_16Sto32, load(Ity_I16, mkexpr(t1))));
+      break;
+
+   case 0x25:  /* LHU */
+      DIP("lhu r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      if (mode64)
+         putIReg(rt, unop(Iop_16Uto64, load(Ity_I16, mkexpr(t1))));
+      else
+         putIReg(rt, unop(Iop_16Uto32, load(Ity_I16, mkexpr(t1))));
+      break;
+
+   case 0x0F:  /* LUI */
+      p = (imm << 16);
+      DIP("lui r%d, imm: 0x%x", rt, imm);
+      if (mode64)
+         putIReg(rt, mkU64(extend_s_32to64(p)));
+      else
+         putIReg(rt, mkU32(p));
+      break;
+
+   case 0x13:  /* COP1X */
+      switch (function) {
+      case 0x0: {  /* LWXC1 */
+         /* Load Word  Indexed to Floating Point - LWXC1 (MIPS32r2) */
+         DIP("lwxc1 f%d, r%d(r%d)", fd, rt, rs);
+         if (fp_mode64) {
+            t0 = newTemp(Ity_I64);
+            t1 = newTemp(Ity_I32);
+            t3 = newTemp(Ity_F32);
+            t4 = newTemp(Ity_I64);
+
+            t2 = newTemp(ty);
+            /* new LO */
+            assign(t2, binop(mode64 ? Iop_Add64 : Iop_Add32, getIReg(rs),
+                             getIReg(rt)));
+            assign(t3, load(Ity_F32, mkexpr(t2)));
+
+            assign(t4, mkWidenFrom32(Ity_I64, unop(Iop_ReinterpF32asI32,
+                                                   mkexpr(t3)), True));
+
+            putFReg(fd, unop(Iop_ReinterpI64asF64, mkexpr(t4)));
+         } else {
+            t0 = newTemp(Ity_I32);
+            assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+            putFReg(fd, load(Ity_F32, mkexpr(t0)));
+         }
+         break;
+      }
+
+      case 0x1: {  /* LDXC1 */
+         /* Load Doubleword  Indexed to Floating Point
+            LDXC1 (MIPS32r2 and MIPS64) */
+         if (fp_mode64) {
+            DIP("ldxc1 f%d, r%d(r%d)", fd, rt, rs);
+            t0 = newTemp(ty);
+            assign(t0, binop(mode64 ? Iop_Add64 : Iop_Add32, getIReg(rs),
+                             getIReg(rt)));
+            putFReg(fd, load(Ity_F64, mkexpr(t0)));
+            break;
+         } else {
+            t0 = newTemp(Ity_I32);
+            assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+
+            t1 = newTemp(Ity_I32);
+            assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4)));
+
+#if defined (_MIPSEL)
+            putFReg(fd, load(Ity_F32, mkexpr(t0)));
+            putFReg(fd + 1, load(Ity_F32, mkexpr(t1)));
+#elif defined (_MIPSEB)
+            putFReg(fd + 1, load(Ity_F32, mkexpr(t0)));
+            putFReg(fd, load(Ity_F32, mkexpr(t1)));
+#endif
+            break;
+         }
+      }
+
+      case 0x5:  /* Load Doubleword Indexed Unaligned to Floating Point - LUXC1;
+                    MIPS32r2 */
+         DIP("luxc1 f%d, r%d(r%d)", fd, rt, rs);
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I64);
+         assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
+         assign(t1, binop(Iop_And64, mkexpr(t0),
+                                     mkU64(0xfffffffffffffff8ULL)));
+         putFReg(fd, load(Ity_F64, mkexpr(t1)));
+         break;
+
+      case 0x8: {  /* Store Word Indexed from Floating Point - SWXC1 */
+         DIP("swxc1 f%d, r%d(r%d)", ft, rt, rs);
+         if (fp_mode64) {
+            t0 = newTemp(ty);
+            assign(t0, binop(mode64 ? Iop_Add64 : Iop_Add32, getIReg(rs),
+                             getIReg(rt)));
+            store(mkexpr(t0), getLoFromF64(tyF, getFReg(fs)));
+
+         } else {
+            t0 = newTemp(Ity_I32);
+            assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+
+            store(mkexpr(t0), getFReg(fs));
+         }
+         break;
+      }
+      case 0x9: {  /* Store Doubleword Indexed from Floating Point - SDXC1 */
+         DIP("sdc1 f%d, %d(%d)", ft, imm, rs);
+         if (fp_mode64) {
+            t0 = newTemp(ty);
+            assign(t0, binop(mode64 ? Iop_Add64 : Iop_Add32, getIReg(rs),
+                             getIReg(rt)));
+            store(mkexpr(t0), getFReg(fs));
+         } else {
+            t0 = newTemp(Ity_I32);
+            assign(t0, binop(Iop_Add32, getIReg(rs), getIReg(rt)));
+
+            t1 = newTemp(Ity_I32);
+            assign(t1, binop(Iop_Add32, mkexpr(t0), mkU32(4)));
+
+#if defined (_MIPSEL)
+            store(mkexpr(t0), getFReg(fs));
+            store(mkexpr(t1), getFReg(fs + 1));
+#elif defined (_MIPSEB)
+            store(mkexpr(t0), getFReg(fs + 1));
+            store(mkexpr(t1), getFReg(fs));
+#endif
+         }
+         break;
+      }
+      case 0xD:  /* Store Doubleword Indexed Unaligned from Floating Point -
+                    SUXC1; MIPS64 MIPS32r2 */
+         DIP("suxc1 f%d, r%d(r%d)", fd, rt, rs);
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I64);
+         assign(t0, binop(Iop_Add64, getIReg(rs), getIReg(rt)));
+         assign(t1, binop(Iop_And64, mkexpr(t0), mkU64(0xfffffffffffffff8ULL)));
+         store(mkexpr(t1), getFReg(fs));
+         break;
+
+      case 0x0F: {
+         DIP("prefx");
+         break;
+      }
+      case 0x20:  {  /* MADD.S */
+         DIP("madd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         t1 = newTemp(Ity_F32);
+         assign(t1, qop(Iop_MAddF32, rm,
+                        getLoFromF64(tyF, getFReg(fmt)),
+                        getLoFromF64(tyF, getFReg(fs)),
+                        getLoFromF64(tyF, getFReg(ft))));
+         putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+         break;  /* MADD.S */
+      }
+      case 0x21: {  /* MADD.D */
+         DIP("madd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         putDReg(fd, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs),
+                         getDReg(ft)));
+         break;  /* MADD.D */
+      }
+      case 0x28: {  /* MSUB.S */
+         DIP("msub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         t1 = newTemp(Ity_F32);
+         assign(t1, qop(Iop_MSubF32, rm,
+                        getLoFromF64(tyF, getFReg(fmt)),
+                        getLoFromF64(tyF, getFReg(fs)),
+                        getLoFromF64(tyF, getFReg(ft))));
+         putFReg(fd, mkWidenFromF32(tyF, mkexpr(t1)));
+         break;  /* MSUB.S */
+      }
+      case 0x29: {  /* MSUB.D */
+         DIP("msub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         putDReg(fd, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs),
+                         getDReg(ft)));
+         break;  /* MSUB.D */
+      }
+      case 0x30: {  /* NMADD.S */
+         DIP("nmadd.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         t1 = newTemp(Ity_F32);
+         assign(t1, qop(Iop_MAddF32, rm,
+                        getLoFromF64(tyF, getFReg(fmt)),
+                        getLoFromF64(tyF, getFReg(fs)),
+                        getLoFromF64(tyF, getFReg(ft))));
+
+         putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t1))));
+         break;  /* NMADD.S */
+      }
+      case 0x31: {  /* NMADD.D */
+         DIP("nmadd.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         t1 = newTemp(Ity_F64);
+         assign(t1, qop(Iop_MAddF64, rm, getDReg(fmt), getDReg(fs),
+                        getDReg(ft)));
+         putDReg(fd, unop(Iop_NegF64, mkexpr(t1)));
+         break;  /* NMADD.D */
+      }
+      case 0x38: {  /* NMSUBB.S */
+         DIP("nmsub.s f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         t1 = newTemp(Ity_F32);
+         assign(t1, qop(Iop_MSubF32, rm,
+                        getLoFromF64(tyF, getFReg(fmt)),
+                        getLoFromF64(tyF, getFReg(fs)),
+                        getLoFromF64(tyF, getFReg(ft))));
+
+         putFReg(fd, mkWidenFromF32(tyF, unop(Iop_NegF32, mkexpr(t1))));
+         break;  /* NMSUBB.S */
+      }
+      case 0x39: {  /* NMSUBB.D */
+         DIP("nmsub.d f%d, f%d, f%d, f%d", fd, fmt, fs, ft);
+         IRExpr *rm = get_IR_roundingmode();
+         t1 = newTemp(Ity_F64);
+         assign(t1, qop(Iop_MSubF64, rm, getDReg(fmt), getDReg(fs),
+                        getDReg(ft)));
+         putDReg(fd, unop(Iop_NegF64, mkexpr(t1)));
+         break;  /* NMSUBB.D */
+      }
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+   case 0x22:  /* LWL */
+      DIP("lwl r%d, %d(r%d)", rt, imm, rs);
+      if (mode64) {
+         /* t1 = addr */
+         t1 = newTemp(Ity_I64);
+#if defined (_MIPSEL)
+         assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+         /* t2 = word addr */
+         /* t4 = addr mod 4 */
+         LWX_SWX_PATTERN64;
+
+         /* t3 = word content - shifted */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_Shl32, mkNarrowTo32(ty, load(Ity_I64,
+                          mkexpr(t2))), narrowTo(Ity_I8, binop(Iop_Shl32,
+                    binop(Iop_Sub32, mkU32(0x03), mkexpr(t4)), mkU8(3)))));
+
+         /* rt content - adjusted */
+         t5 = newTemp(Ity_I32);
+         assign(t5, binop(Iop_And32,
+                          mkNarrowTo32(ty, getIReg(rt)),
+                          binop(Iop_Shr32,
+                                mkU32(0x00FFFFFF),
+                                      narrowTo(Ity_I8, binop(Iop_Mul32,
+                                                             mkU32(0x08),
+                                                             mkexpr(t4))))));
+
+         putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5),
+                                             mkexpr(t3)), True));
+#elif defined (_MIPSEB)
+         assign(t1, binop(Iop_Xor64, mkU64(0x3),
+                binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm)))));
+         /* t2 = word addr */
+         /* t4 = addr mod 4 */
+         LWX_SWX_PATTERN64;
+
+         /* t3 = word content - shifted */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_Shl32, unop(Iop_64HIto32, load(Ity_I64,
+                          mkexpr(t2))), narrowTo(Ity_I8, binop(Iop_Shl32,
+                    binop(Iop_Sub32, mkU32(0x03), mkexpr(t4)), mkU8(3)))));
+
+         /* rt content - adjusted */
+         t5 = newTemp(Ity_I32);
+         assign(t5, binop(Iop_And32,
+                          mkNarrowTo32(ty, getIReg(rt)),
+                          binop(Iop_Shr32,
+                                mkU32(0x00FFFFFF),
+                                      narrowTo(Ity_I8, binop(Iop_Mul32,
+                                                             mkU32(0x08),
+                                                             mkexpr(t4))))));
+
+         putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5),
+                                             mkexpr(t3)), True));
+#endif
+      } else {
+         /* t1 = addr */
+         t1 = newTemp(Ity_I32);
+#if defined (_MIPSEL)
+         assign(t1, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm))));
+#elif defined (_MIPSEB)
+         assign(t1, binop(Iop_Xor32, mkU32(0x3), binop(Iop_Add32, getIReg(rs),
+                                     mkU32(extend_s_16to32(imm)))));
+#endif
+
+         /* t2 = word addr */
+         /* t4 = addr mod 4 */
+         LWX_SWX_PATTERN;
+
+         /* t3 = word content - shifted */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_Shl32, load(Ity_I32, mkexpr(t2)), narrowTo(Ity_I8,
+                    binop(Iop_Shl32, binop(Iop_Sub32, mkU32(0x03), mkexpr(t4)),
+                    mkU8(3)))));
+
+         /* rt content  - adjusted */
+         t5 = newTemp(Ity_I32);
+         assign(t5, binop(Iop_And32,
+                          getIReg(rt),
+                          binop(Iop_Shr32,
+                                mkU32(0x00FFFFFF),
+                                      narrowTo(Ity_I8, binop(Iop_Mul32,
+                                                             mkU32(0x08),
+                                                             mkexpr(t4))))));
+
+         putIReg(rt, binop(Iop_Or32, mkexpr(t5), mkexpr(t3)));
+      }
+      break;
+
+   case 0x26:  /* LWR */
+      DIP("lwr r%d, %d(r%d)", rt, imm, rs);
+      if (mode64) {
+         /* t1 = addr */
+         t1 = newTemp(Ity_I64);
+#if defined (_MIPSEL)
+         assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+         /* t2 = word addr */
+         /* t4 = addr mod 8 */
+         LWX_SWX_PATTERN64;
+
+         /* t3 = word content - shifted */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_Shr32, mkNarrowTo32(ty, load(Ity_I64,mkexpr(t2))),
+                    narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(3)))));
+
+         /* rt content  - adjusted */
+         t5 = newTemp(Ity_I32);
+         assign(t5, binop(Iop_And32, mkNarrowTo32(ty, getIReg(rt)),
+                unop(Iop_Not32, binop(Iop_Shr32, mkU32(0xFFFFFFFF),
+                narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(0x3)))))));
+
+         putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5),
+                                       mkexpr(t3)), True));
+#elif defined (_MIPSEB)
+         assign(t1, binop(Iop_Xor64, mkU64(0x3), binop(Iop_Add64, getIReg(rs),
+                          mkU64(extend_s_16to64(imm)))));
+         /* t2 = word addr */
+         /* t4 = addr mod 4 */
+         LWX_SWX_PATTERN64;
+
+         /* t3 = word content - shifted */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_Shr32, unop(Iop_64HIto32, load(Ity_I64,mkexpr(t2))),
+                    narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(3)))));
+
+         /* rt content  - adjusted */
+         t5 = newTemp(Ity_I32);
+         assign(t5, binop(Iop_And32, mkNarrowTo32(ty, getIReg(rt)),
+                unop(Iop_Not32, binop(Iop_Shr32, mkU32(0xFFFFFFFF),
+                narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4), mkU8(0x3)))))));
+
+         putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t5),
+                                       mkexpr(t3)), True));
+#endif
+
+      } else {
+         /* t1 = addr */
+         t1 = newTemp(Ity_I32);
+#if defined (_MIPSEL)
+         assign(t1, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm))));
+#elif defined (_MIPSEB)
+         assign(t1, binop(Iop_Xor32, mkU32(0x3), binop(Iop_Add32, getIReg(rs),
+                                     mkU32(extend_s_16to32(imm)))));
+#endif
+
+         /* t2 = word addr */
+         /* t4 = addr mod 4 */
+         LWX_SWX_PATTERN;
+
+         /* t3 = word content - shifted */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_Shr32, load(Ity_I32, mkexpr(t2)),
+                    narrowTo(Ity_I8, binop(Iop_Shl32, mkexpr(t4),
+                    mkU8(3)))));
+
+         /* rt content  - adjusted */
+         t5 = newTemp(Ity_I32);
+         assign(t5, binop(Iop_And32, getIReg(rt), unop(Iop_Not32,
+                    binop(Iop_Shr32, mkU32(0xFFFFFFFF), narrowTo(Ity_I8,
+                          binop(Iop_Shl32, mkexpr(t4), mkU8(0x3)))))));
+
+         putIReg(rt, binop(Iop_Or32, mkexpr(t5), mkexpr(t3)));
+      }
+      break;
+
+   case 0x2B:  /* SW */
+      DIP("sw r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      store(mkexpr(t1), mkNarrowTo32(ty, getIReg(rt)));
+      break;
+
+   case 0x2C: {  /* SDL rt, offset(base) MIPS64 */
+      DIP("sdl r%u, %d(r%u)", rt, (Int) imm, rs);
+      vassert(mode64);
+      IRTemp A_byte = newTemp(Ity_I8);
+      IRTemp B_byte = newTemp(Ity_I8);
+      IRTemp C_byte = newTemp(Ity_I8);
+      IRTemp D_byte = newTemp(Ity_I8);
+      IRTemp E_byte = newTemp(Ity_I8);
+      IRTemp F_byte = newTemp(Ity_I8);
+      IRTemp G_byte = newTemp(Ity_I8);
+      IRTemp H_byte = newTemp(Ity_I8);
+      IRTemp B_pos  = newTemp(Ity_I64);
+      IRTemp C_pos  = newTemp(Ity_I64);
+      IRTemp D_pos  = newTemp(Ity_I64);
+      IRTemp E_pos  = newTemp(Ity_I64);
+      IRTemp F_pos  = newTemp(Ity_I64);
+      IRTemp G_pos  = newTemp(Ity_I64);
+
+      /* H byte */
+      assign(H_byte, getByteFromReg(rt, 0));
+      /* G byte */
+      assign(G_byte, getByteFromReg(rt, 1));
+      /* F byte */
+      assign(F_byte, getByteFromReg(rt, 2));
+      /* E byte */
+      assign(E_byte, getByteFromReg(rt, 3));
+      /* D byte */
+      assign(D_byte, getByteFromReg(rt, 4));
+      /* C byte */
+      assign(C_byte, getByteFromReg(rt, 5));
+      /* B byte */
+      assign(B_byte, getByteFromReg(rt, 6));
+      /* A byte */
+      assign(A_byte, getByteFromReg(rt, 7));
+
+      /* t1 = addr */
+      t1 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+
+      /* t2 = word addr */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8ULL)));
+
+      /* t3 = addr mod 7 */
+      t3 = newTemp(Ity_I64);
+      assign(t3, binop(Iop_And64, mkexpr(t1), mkU64(0x7)));
+
+#if defined (_MIPSEL)
+      /* Calculate X_byte position. */
+      assign(B_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x1)),
+                               mkU64(0x0),
+                               mkU64(0x1)));
+
+      assign(C_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x2)),
+                               mkU64(0x0),
+                               mkU64(0x2)));
+
+      assign(D_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x3)),
+                               mkU64(0x0),
+                               mkU64(0x3)));
+
+      assign(E_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x4)),
+                               mkU64(0x0),
+                               mkU64(0x4)));
+
+      assign(F_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x5)),
+                               mkU64(0x0),
+                               mkU64(0x5)));
+
+      assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x7)),
+                               mkU64(0x1),
+                               mkU64(0x0)));
+
+      /* Store X_byte on the right place. */
+      store(mkexpr(t2), mkexpr(H_byte));
+      store(binop(Iop_Add64, mkexpr(t2), mkexpr(G_pos)), mkexpr(G_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(E_pos)), mkexpr(E_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(D_pos)), mkexpr(D_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(C_pos)), mkexpr(C_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(B_pos)), mkexpr(B_byte));
+      store(mkexpr(t1), mkexpr(A_byte));
+
+#else /* _MIPSEB */
+      /* Calculate X_byte position. */
+      assign(B_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x7)),
+                               mkU64(0x0),
+                               mkU64(0x1)));
+
+      assign(C_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x6)),
+                               mkU64(0x2),
+                               mkU64(0x0)));
+
+      assign(D_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x5)),
+                               mkU64(0x3),
+                               mkU64(0x0)));
+
+      assign(E_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x4)),
+                               mkU64(0x4),
+                               mkU64(0x0)));
+
+      assign(F_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkexpr(t3), mkU64(0x3)),
+                               mkU64(0x5),
+                               mkU64(0x0)));
+
+      assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x0)),
+                               mkU64(0x6),
+                               mkU64(0x7)));
+
+      /* Store X_byte on the right place. */
+      store(binop(Iop_Add64, mkexpr(t2), mkU64(0x7)), mkexpr(H_byte));
+      store(binop(Iop_Add64, mkexpr(t2), mkexpr(G_pos)), mkexpr(G_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(E_pos)), mkexpr(E_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(D_pos)), mkexpr(D_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(C_pos)), mkexpr(C_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(B_pos)), mkexpr(B_byte));
+      store(mkexpr(t1), mkexpr(A_byte));
+#endif
+
+      break;
+   }
+
+   case 0x2D: {
+      /* SDR rt, offset(base) - MIPS64 */
+      vassert(mode64);
+      DIP("sdr r%u, %d(r%u)", rt, imm, rs);
+      IRTemp A_byte = newTemp(Ity_I8);
+      IRTemp B_byte = newTemp(Ity_I8);
+      IRTemp C_byte = newTemp(Ity_I8);
+      IRTemp D_byte = newTemp(Ity_I8);
+      IRTemp E_byte = newTemp(Ity_I8);
+      IRTemp F_byte = newTemp(Ity_I8);
+      IRTemp G_byte = newTemp(Ity_I8);
+      IRTemp H_byte = newTemp(Ity_I8);
+      IRTemp B_pos  = newTemp(Ity_I64);
+      IRTemp C_pos  = newTemp(Ity_I64);
+      IRTemp D_pos  = newTemp(Ity_I64);
+      IRTemp E_pos  = newTemp(Ity_I64);
+      IRTemp F_pos  = newTemp(Ity_I64);
+      IRTemp G_pos  = newTemp(Ity_I64);
+
+      /* H byte */
+      assign(H_byte, getByteFromReg(rt, 0));
+      /* G byte */
+      assign(G_byte, getByteFromReg(rt, 1));
+      /* F byte */
+      assign(F_byte, getByteFromReg(rt, 2));
+      /* E byte */
+      assign(E_byte, getByteFromReg(rt, 3));
+      /* D byte */
+      assign(D_byte, getByteFromReg(rt, 4));
+      /* C byte */
+      assign(C_byte, getByteFromReg(rt, 5));
+      /* B byte */
+      assign(B_byte, getByteFromReg(rt, 6));
+      /* A byte */
+      assign(A_byte, getByteFromReg(rt, 7));
+
+      /* t1 = addr */
+      t1 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+
+      /* t2 = word addr */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8ULL)));
+
+      /* t3 = addr mod 7 */
+      t3 = newTemp(Ity_I64);
+      assign(t3, binop(Iop_And64, mkexpr(t1), mkU64(0x7)));
+
+#if defined (_MIPSEL)
+      /* Calculate X_byte position. */
+      assign(B_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x1), mkexpr(t3)),
+                               mkU64(0x0),
+                               mkU64(0x6)));
+
+      assign(C_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x2), mkexpr(t3)),
+                               mkU64(0x0),
+                               mkU64(0x5)));
+
+      assign(D_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x3), mkexpr(t3)),
+                               mkU64(0x0),
+                               mkU64(0x4)));
+
+      assign(E_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x4), mkexpr(t3)),
+                               mkU64(0x0),
+                               mkU64(0x3)));
+
+      assign(F_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x5), mkexpr(t3)),
+                               mkU64(0x0),
+                               mkU64(0x2)));
+
+      assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x7)),
+                               mkU64(0x0),
+                               mkU64(0x1)));
+
+      /* Store X_byte on the right place. */
+      store(binop(Iop_Add64, mkexpr(t2), mkU64(0x7)), mkexpr(A_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(B_pos)), mkexpr(B_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(C_pos)), mkexpr(C_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(D_pos)), mkexpr(D_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(E_pos)), mkexpr(E_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+      store(binop(Iop_Add64, mkexpr(t1), mkexpr(G_pos)), mkexpr(G_byte));
+      store(mkexpr(t1), mkexpr(H_byte));
+
+#else /* _MIPSEB */
+      /* Calculate X_byte position. */
+      assign(B_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x5), mkexpr(t3)),
+                               mkU64(0x6),
+                               mkU64(0x0)));
+
+      assign(C_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x4), mkexpr(t3)),
+                               mkU64(0x5),
+                               mkU64(0x0)));
+
+      assign(D_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x3), mkexpr(t3)),
+                               mkU64(0x4),
+                               mkU64(0x0)));
+
+      assign(E_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x2), mkexpr(t3)),
+                               mkU64(0x3),
+                               mkU64(0x0)));
+
+      assign(F_pos, IRExpr_ITE(binop(Iop_CmpLT64U, mkU64(0x1), mkexpr(t3)),
+                               mkU64(0x2),
+                               mkU64(0x0)));
+
+      assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x0)),
+                               mkU64(0x0),
+                               mkU64(0x1)));
+
+      /* Store X_byte on the right place. */
+      store(mkexpr(t2), mkexpr(A_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(B_pos)), mkexpr(B_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(C_pos)), mkexpr(C_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(D_pos)), mkexpr(D_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(E_pos)), mkexpr(E_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+      store(binop(Iop_Sub64, mkexpr(t1), mkexpr(G_pos)), mkexpr(G_byte));
+      store(mkexpr(t1), mkexpr(H_byte));
+#endif
+      break;
+   }
+
+   case 0x28:  /* SB */
+      DIP("sb r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      store(mkexpr(t1), narrowTo(Ity_I8, getIReg(rt)));
+      break;
+
+   case 0x29:  /* SH */
+      DIP("sh r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      store(mkexpr(t1), narrowTo(Ity_I16, getIReg(rt)));
+      break;
+
+   case 0x2A:  /* SWL */
+      DIP("swl r%d, %d(r%d)", rt, imm, rs);
+      if (mode64) {
+         IRTemp E_byte = newTemp(Ity_I8);
+         IRTemp F_byte = newTemp(Ity_I8);
+         IRTemp G_byte = newTemp(Ity_I8);
+         IRTemp H_byte = newTemp(Ity_I8);
+         IRTemp F_pos  = newTemp(Ity_I64);
+         IRTemp G_pos  = newTemp(Ity_I64);
+
+         /* H byte */
+         assign(H_byte, getByteFromReg(rt, 0));
+         /* G byte */
+         assign(G_byte, getByteFromReg(rt, 1));
+         /* F byte */
+         assign(F_byte, getByteFromReg(rt, 2));
+         /* E byte */
+         assign(E_byte, getByteFromReg(rt, 3));
+
+         /* t1 = addr */
+         t1 = newTemp(Ity_I64);
+         assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+
+         /* t2 = word addr */
+         t2 = newTemp(Ity_I64);
+         assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFCULL)));
+
+         /* t3 = addr mod 4 */
+         t3 = newTemp(Ity_I64);
+         assign(t3, binop(Iop_And64, mkexpr(t1), mkU64(0x3)));
+
+#if defined (_MIPSEL)
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x0)),
+                                  mkU64(0x0),
+                                  mkU64(0x1)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x3)),
+                                  mkU64(0x1),
+                                  mkU64(0x0)));
+
+         /* Store X_byte on the right place. */
+         store(mkexpr(t2), mkexpr(H_byte));
+         store(binop(Iop_Add64, mkexpr(t2), mkexpr(G_pos)), mkexpr(G_byte));
+         store(binop(Iop_Sub64, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+         store(mkexpr(t1), mkexpr(E_byte));
+
+#else    /* _MIPSEB */
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x3)),
+                                  mkU64(0x0),
+                                  mkU64(0x1)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x0)),
+                                  mkU64(0x2),
+                                  mkU64(0x3)));
+
+         store(binop(Iop_Add64, mkexpr(t2), mkU64(3)), mkexpr(H_byte));
+         store(binop(Iop_Add64, mkexpr(t2), mkexpr(G_pos)), mkexpr(G_byte));
+         store(binop(Iop_Add64, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+         store(mkexpr(t1), mkexpr(E_byte));
+
+#endif
+      } else {
+         IRTemp E_byte = newTemp(Ity_I8);
+         IRTemp F_byte = newTemp(Ity_I8);
+         IRTemp G_byte = newTemp(Ity_I8);
+         IRTemp H_byte = newTemp(Ity_I8);
+         IRTemp F_pos  = newTemp(Ity_I32);
+         IRTemp G_pos  = newTemp(Ity_I32);
+
+         /* H byte */
+         assign(H_byte, getByteFromReg(rt, 0));
+         /* G byte */
+         assign(G_byte, getByteFromReg(rt, 1));
+         /* F byte */
+         assign(F_byte, getByteFromReg(rt, 2));
+         /* E byte */
+         assign(E_byte, getByteFromReg(rt, 3));
+
+         /* t1 = addr */
+         t1 = newTemp(Ity_I32);
+         assign(t1, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm))));
+
+         /* t2 = word addr */
+         t2 = newTemp(Ity_I32);
+         assign(t2, binop(Iop_And32, mkexpr(t1), mkU32(0xFFFFFFFCULL)));
+
+         /* t3 = addr mod 4 */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_And32, mkexpr(t1), mkU32(0x3)));
+
+#if defined (_MIPSEL)
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x0)),
+                                  mkU32(0x0),
+                                  mkU32(0x1)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x3)),
+                                  mkU32(0x1),
+                                  mkU32(0x0)));
+
+         /* Store X_byte on the right place. */
+         store(mkexpr(t2), mkexpr(H_byte));
+         store(binop(Iop_Add32, mkexpr(t2), mkexpr(G_pos)), mkexpr(G_byte));
+         store(binop(Iop_Sub32, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+         store(mkexpr(t1), mkexpr(E_byte));
+
+#else    /* _MIPSEB */
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x3)),
+                                  mkU32(0x0),
+                                  mkU32(0x1)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x0)),
+                                  mkU32(0x2),
+                                  mkU32(0x3)));
+
+         store(binop(Iop_Add32, mkexpr(t2), mkU32(3)), mkexpr(H_byte));
+         store(binop(Iop_Add32, mkexpr(t2), mkexpr(G_pos)), mkexpr(G_byte));
+         store(binop(Iop_Add32, mkexpr(t1), mkexpr(F_pos)), mkexpr(F_byte));
+         store(mkexpr(t1), mkexpr(E_byte));
+
+#endif
+      }
+      break;
+
+   case 0x2E:  /* SWR */
+      DIP("swr r%d, %d(r%d)", rt, imm, rs);
+      if (mode64) {
+         IRTemp E_byte = newTemp(Ity_I8);
+         IRTemp F_byte = newTemp(Ity_I8);
+         IRTemp G_byte = newTemp(Ity_I8);
+         IRTemp H_byte = newTemp(Ity_I8);
+         IRTemp F_pos  = newTemp(Ity_I64);
+         IRTemp G_pos  = newTemp(Ity_I64);
+
+         /* H byte */
+         assign(H_byte, getByteFromReg(rt, 0));
+         /* G byte */
+         assign(G_byte, getByteFromReg(rt, 1));
+         /* F byte */
+         assign(F_byte, getByteFromReg(rt, 2));
+         /* E byte */
+         assign(E_byte, getByteFromReg(rt, 3));
+
+         /* t1 = addr */
+         t1 = newTemp(Ity_I64);
+         assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+
+         /* t2 = word addr */
+         t2 = newTemp(Ity_I64);
+         assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFCULL)));
+
+         /* t3 = addr mod 4 */
+         t3 = newTemp(Ity_I64);
+         assign(t3, binop(Iop_And64, mkexpr(t1), mkU64(0x3)));
+
+#if defined (_MIPSEL)
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x0)),
+                                  mkU64(0x2),
+                                  mkU64(0x3)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x3)),
+                                  mkU64(0x0),
+                                  mkU64(0x1)));
+
+         /* Store X_byte on the right place. */
+         store(binop(Iop_Add64, mkexpr(t2), mkU64(0x3)), mkexpr(E_byte));
+         store(binop(Iop_Add64, mkexpr(t2), mkexpr(F_pos)), mkexpr(F_byte));
+         store(binop(Iop_Add64, mkexpr(t1), mkexpr(G_pos)), mkexpr(G_byte));
+         store(mkexpr(t1), mkexpr(H_byte));
+
+#else    /* _MIPSEB */
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x3)),
+                                  mkU64(0x1),
+                                  mkU64(0x0)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ64, mkexpr(t3), mkU64(0x0)),
+                                  mkU64(0x0),
+                                  mkU64(0x1)));
+
+         /* Store X_byte on the right place. */
+         store(mkexpr(t2), mkexpr(E_byte));
+         store(binop(Iop_Add64, mkexpr(t2), mkexpr(F_pos)), mkexpr(F_byte));
+         store(binop(Iop_Sub64, mkexpr(t1), mkexpr(G_pos)), mkexpr(G_byte));
+         store(mkexpr(t1), mkexpr(H_byte));
+#endif
+      } else {
+         IRTemp E_byte = newTemp(Ity_I8);
+         IRTemp F_byte = newTemp(Ity_I8);
+         IRTemp G_byte = newTemp(Ity_I8);
+         IRTemp H_byte = newTemp(Ity_I8);
+         IRTemp F_pos  = newTemp(Ity_I32);
+         IRTemp G_pos  = newTemp(Ity_I32);
+
+         /* H byte */
+         assign(H_byte, getByteFromReg(rt, 0));
+         /* G byte */
+         assign(G_byte, getByteFromReg(rt, 1));
+         /* F byte */
+         assign(F_byte, getByteFromReg(rt, 2));
+         /* E byte */
+         assign(E_byte, getByteFromReg(rt, 3));
+
+         /* t1 = addr */
+         t1 = newTemp(Ity_I32);
+         assign(t1, binop(Iop_Add32, getIReg(rs), mkU32(extend_s_16to32(imm))));
+
+         /* t2 = word addr */
+         t2 = newTemp(Ity_I32);
+         assign(t2, binop(Iop_And32, mkexpr(t1), mkU32(0xFFFFFFFCULL)));
+
+         /* t3 = addr mod 4 */
+         t3 = newTemp(Ity_I32);
+         assign(t3, binop(Iop_And32, mkexpr(t1), mkU32(0x3)));
+
+#if defined (_MIPSEL)
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x0)),
+                                  mkU32(0x2),
+                                  mkU32(0x3)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x3)),
+                                  mkU32(0x0),
+                                  mkU32(0x1)));
+
+         /* Store X_byte on the right place. */
+         store(binop(Iop_Add32, mkexpr(t2), mkU32(0x3)), mkexpr(E_byte));
+         store(binop(Iop_Add32, mkexpr(t2), mkexpr(F_pos)), mkexpr(F_byte));
+         store(binop(Iop_Add32, mkexpr(t1), mkexpr(G_pos)), mkexpr(G_byte));
+         store(mkexpr(t1), mkexpr(H_byte));
+
+#else    /* _MIPSEB */
+         /* Calculate X_byte position. */
+         assign(F_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x3)),
+                                  mkU32(0x1),
+                                  mkU32(0x0)));
+
+         assign(G_pos, IRExpr_ITE(binop(Iop_CmpEQ32, mkexpr(t3), mkU32(0x0)),
+                                  mkU32(0x0),
+                                  mkU32(0x1)));
+
+         /* Store X_byte on the right place. */
+         store(mkexpr(t2), mkexpr(E_byte));
+         store(binop(Iop_Add32, mkexpr(t2), mkexpr(F_pos)), mkexpr(F_byte));
+         store(binop(Iop_Sub32, mkexpr(t1), mkexpr(G_pos)), mkexpr(G_byte));
+         store(mkexpr(t1), mkexpr(H_byte));
+#endif
+      }
+      break;
+
+   case 0x1C:  /* Special2 */
+      switch (function) {
+         /* Cavium Specific instructions */
+         case 0x03: case 0x32: case 0x33:  /* DMUL, CINS , CINS32 */
+         case 0x3A: case 0x3B: case 0x2B:  /* EXT,  EXT32, SNE    */
+         /* CVM Compare Instructions */
+         case 0x2A: case 0x2E: case 0x2F:  /* SEQ,  SEQI,  SNEI   */
+         /* CPU Load, Store, Memory, and Control Instructions */
+         case 0x18: case 0x19:             /* SAA, SAAD */
+         case 0x1F:                        /* LAA, LAAD, LAI, LAID */
+         case 0x28: case 0x2C: case 0x2D:  /* BADDU, POP, DPOP */
+            if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
+               if (dis_instr_CVM(cins))
+                  break;
+               goto decode_failure;
+            } else {
+               goto decode_failure;
+            }
+         break;
+
+         case 0x02: {  /* MUL */
+            DIP("mul r%d, r%d, r%d", rd, rs, rt);
+            if (mode64) {
+               IRTemp tmpRs32 = newTemp(Ity_I32);
+               IRTemp tmpRt32 = newTemp(Ity_I32);
+               IRTemp tmpRes = newTemp(Ity_I32);
+
+               assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+               assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+               assign(tmpRes, binop(Iop_Mul32,
+                                    mkexpr(tmpRs32), mkexpr(tmpRt32)));
+               putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpRes), True));
+            } else
+               putIReg(rd, binop(Iop_Mul32, getIReg(rs), getIReg(rt)));
+            break;
+         }
+
+         case 0x00: {  /* MADD */
+            if (mode64) {
+               DIP("madd r%d, r%d", rs, rt);
+               t1 = newTemp(Ity_I32);
+               t2 = newTemp(Ity_I32);
+               t3 = newTemp(Ity_I64);
+               t4 = newTemp(Ity_I64);
+               t5 = newTemp(Ity_I64);
+               t6 = newTemp(Ity_I32);
+
+               assign(t1, mkNarrowTo32(ty, getHI()));
+               assign(t2, mkNarrowTo32(ty, getLO()));
+
+               assign(t3, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
+                                             mkNarrowTo32(ty, getIReg(rt))));
+
+               assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+               assign(t5, binop(Iop_Add64, mkexpr(t3), mkexpr(t4)));
+
+               putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+               putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+            } else {
+               if ( (1 <= ac) && ( 3 >= ac) ) {
+                  if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                     /* If DSP is present -> DSP ASE MADD */
+                     UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                     if (0 != retVal ) {
+                        goto decode_failure_dsp;
+                     }
+                     break;
+                  } else {
+                     goto decode_failure_dsp;
+                  }
+               } else {
+                  DIP("madd r%d, r%d", rs, rt);
+                  t1 = newTemp(Ity_I32);
+                  t2 = newTemp(Ity_I32);
+                  t3 = newTemp(Ity_I64);
+                  t4 = newTemp(Ity_I32);
+                  t5 = newTemp(Ity_I32);
+                  t6 = newTemp(Ity_I32);
+
+                  assign(t1, getHI());
+                  assign(t2, getLO());
+
+                  assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
+
+                  assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32,
+                                                               mkexpr(t3))));
+
+                  assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4),
+                                              unop(Iop_64to32, mkexpr(t3)))));
+                  assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1)));
+
+                  putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32,
+                                                          mkexpr(t3))));
+                  putLO(mkexpr(t4));
+                  break;
+               }
+            }
+            break;
+         }
+
+      case 0x01: {  /* MADDU */
+         if (mode64) {
+            DIP("maddu r%d, r%d", rs, rt);
+            t1 = newTemp(Ity_I32);
+            t2 = newTemp(Ity_I32);
+            t3 = newTemp(Ity_I64);
+            t4 = newTemp(Ity_I64);
+            t5 = newTemp(Ity_I64);
+            t6 = newTemp(Ity_I32);
+
+            assign(t1, mkNarrowTo32(ty, getHI()));
+            assign(t2, mkNarrowTo32(ty, getLO()));
+
+            assign(t3, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
+                                          mkNarrowTo32(ty, getIReg(rt))));
+
+            assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+            assign(t5, binop(Iop_Add64, mkexpr(t3), mkexpr(t4)));
+
+            putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+            putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+         } else {
+            if ( (1 <= ac) && ( 3 >= ac) ) {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  /* If DSP is present -> DSP ASE MADDU */
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+            } else {
+               DIP("maddu r%d, r%d", rs, rt);
+               t1 = newTemp(Ity_I32);
+               t2 = newTemp(Ity_I32);
+               t3 = newTemp(Ity_I64);
+               t4 = newTemp(Ity_I32);
+               t5 = newTemp(Ity_I32);
+               t6 = newTemp(Ity_I32);
+
+               assign(t1, getHI());
+               assign(t2, getLO());
+
+               assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
+
+               assign(t4, binop(Iop_Add32, mkexpr(t2), unop(Iop_64to32,
+                                                            mkexpr(t3))));
+               assign(t5, unop(Iop_1Uto32, binop(Iop_CmpLT32U, mkexpr(t4),
+                                           unop(Iop_64to32, mkexpr(t3)))));
+               assign(t6, binop(Iop_Add32, mkexpr(t5), mkexpr(t1)));
+
+               putHI(binop(Iop_Add32, mkexpr(t6), unop(Iop_64HIto32,
+                                                      mkexpr(t3))));
+               putLO(mkexpr(t4));
+               break;
+            }
+         }
+         break;
+      }
+
+      case 0x04: {  /* MSUB */
+         if (mode64) {
+            DIP("msub r%d, r%d", rs, rt);
+            t1 = newTemp(Ity_I32);
+            t2 = newTemp(Ity_I32);
+            t3 = newTemp(Ity_I64);
+            t4 = newTemp(Ity_I64);
+            t5 = newTemp(Ity_I64);
+            t6 = newTemp(Ity_I32);
+
+            assign(t1, mkNarrowTo32(ty, getHI()));
+            assign(t2, mkNarrowTo32(ty, getLO()));
+
+            assign(t3, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
+                                          mkNarrowTo32(ty, getIReg(rt))));
+
+            assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+            assign(t5, binop(Iop_Sub64, mkexpr(t4), mkexpr(t3)));
+
+            putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+            putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+         } else {
+            if ( (1 <= ac) && ( 3 >= ac) ) {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  /* If DSP is present -> DSP ASE MSUB */
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+            } else {
+               DIP("msub r%d, r%d", rs, rt);
+               t1 = newTemp(Ity_I32);
+               t2 = newTemp(Ity_I32);
+               t3 = newTemp(Ity_I64);
+               t4 = newTemp(Ity_I32);
+               t5 = newTemp(Ity_I1);
+               t6 = newTemp(Ity_I32);
+
+               assign(t1, getHI());
+               assign(t2, getLO());
+
+               assign(t3, binop(Iop_MullS32, getIReg(rs), getIReg(rt)));
+               assign(t4, unop(Iop_64to32, mkexpr(t3)));  /* new lo */
+
+               /* if lo<lo(mul) hi = hi - 1 */
+               assign(t5, binop(Iop_CmpLT32U,
+                                 mkexpr(t2),
+                                 mkexpr(t4)));
+
+               assign(t6, IRExpr_ITE(mkexpr(t5),
+                                       binop(Iop_Sub32, mkexpr(t1), mkU32(0x1)),
+                                       mkexpr(t1)));
+
+               putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32,
+                                                      mkexpr(t3))));
+               putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
+               break;
+            }
+         }
+         break;
+      }
+
+      case 0x05: {  /* MSUBU */
+         if (mode64) {
+            DIP("msubu r%d, r%d", rs, rt);
+            t1 = newTemp(Ity_I32);
+            t2 = newTemp(Ity_I32);
+            t3 = newTemp(Ity_I64);
+            t4 = newTemp(Ity_I64);
+            t5 = newTemp(Ity_I64);
+            t6 = newTemp(Ity_I32);
+
+            assign(t1, mkNarrowTo32(ty, getHI()));
+            assign(t2, mkNarrowTo32(ty, getLO()));
+
+            assign(t3, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
+                                          mkNarrowTo32(ty, getIReg(rt))));
+
+            assign(t4, binop(Iop_32HLto64, mkexpr(t1), mkexpr(t2)));
+            assign(t5, binop(Iop_Sub64, mkexpr(t4), mkexpr(t3)));
+
+            putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t5)), True));
+            putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t5)), True));
+         } else {
+            if ( (1 <= ac) && ( 3 >= ac) ) {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  /* If DSP is present -> DSP ASE MSUBU */
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+            } else {
+               DIP("msubu r%d, r%d", rs, rt);
+               t1 = newTemp(Ity_I32);
+               t2 = newTemp(Ity_I32);
+               t3 = newTemp(Ity_I64);
+               t4 = newTemp(Ity_I32);
+               t5 = newTemp(Ity_I1);
+               t6 = newTemp(Ity_I32);
+
+               assign(t1, getHI());
+               assign(t2, getLO());
+
+               assign(t3, binop(Iop_MullU32, getIReg(rs), getIReg(rt)));
+               assign(t4, unop(Iop_64to32, mkexpr(t3)));  /* new lo */
+
+               /* if lo<lo(mul) hi = hi - 1 */
+               assign(t5, binop(Iop_CmpLT32U,
+                                 mkexpr(t2),
+                                 mkexpr(t4)));
+
+               assign(t6, IRExpr_ITE(mkexpr(t5),
+                                    binop(Iop_Sub32,
+                                          mkexpr(t1),
+                                          mkU32(0x1)),
+                                    mkexpr(t1)));
+
+               putHI(binop(Iop_Sub32, mkexpr(t6), unop(Iop_64HIto32,
+                                                      mkexpr(t3))));
+               putLO(binop(Iop_Sub32, mkexpr(t2), mkexpr(t4)));
+               break;
+            }
+         }
+         break;
+      }
+
+      case 0x6:  /* dmul MIPS64 - Netlogic */
+         DIP("dmul r%u, r%u, r%u", rd, rs, rt);
+         t0 = newTemp(Ity_I128);
+
+         assign(t0, binop(Iop_MullU64, getIReg(rs), getIReg(rt)));
+
+         putIReg(rd, unop(Iop_128to64, mkexpr(t0)));
+         break;
+
+      case 0x10:  /* LDADDW - Swap Word - Netlogic */
+         DIP("ldaddw r%u, r%u", rt, rs);
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         t2 = newTemp(Ity_I32);
+         t3 = newTemp(Ity_I64);
+         t4 = newTemp(Ity_I32);
+         t5 = newTemp(Ity_I32);
+         t6 = newTemp(Ity_I32);
+
+         /* v = GPR[rt] */
+         assign(t0, mkNarrowTo32(ty, getIReg(rt)));
+
+         /* GPR[rt] = memory[base]; */
+         assign(t1, load(Ity_I32, getIReg(rs)));
+         putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
+
+         /* memory[base] = memory[base] + v; */
+         store(getIReg(rs), binop(Iop_Add32, mkexpr(t0), mkexpr(t1)));
+         break;
+
+      case 0x12:  /* LDADDD - Swap Word - Netlogic */
+         DIP("ldaddw r%u, r%u", rt, rs);
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I64);
+
+         /*  v = GPR[rt] */
+         assign(t0, getIReg(rt));
+
+         /* GPR[rt] = memory[base]; */
+         assign(t1, load(Ity_I64, getIReg(rs)));
+         putIReg(rt, mkexpr(t1));
+
+         /* memory[base] = memory[base] + v; */
+         store(getIReg(rs), binop(Iop_Add64, mkexpr(t0), mkexpr(t1)));
+         break;
+
+      case 0x14:  /* SWAPW - Swap Word - Netlogic */
+         DIP("swapw r%u, r%u", rt, rs);
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         assign(t0, mkNarrowTo32(ty, getIReg(rt)));
+         assign(t1, load(Ity_I32, getIReg(rs)));
+         putIReg(rt, mkWidenFrom32(ty, mkexpr(t1), True));
+         store(getIReg(rs), mkexpr(t0));
+         break;
+
+      case 0x16:  /* SWAPD - Swap Double - Netlogic */
+         DIP("swapw r%u, r%u", rt, rs);
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I64);
+         assign(t0, getIReg(rt));
+         assign(t1, load(Ity_I64, getIReg(rs)));
+         putIReg(rt, mkexpr(t1));
+         store(getIReg(rs), mkexpr(t0));
+         break;
+
+      case 0x20: {  /* CLZ */
+         DIP("clz r%d, r%d", rd, rs);
+         if (mode64) {
+            IRTemp tmpClz32 = newTemp(Ity_I32);
+            IRTemp tmpRs32 = newTemp(Ity_I32);
+
+            assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+            assign(tmpClz32, unop(Iop_Clz32, mkexpr(tmpRs32)));
+            putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpClz32), True));
+         } else {
+            t1 = newTemp(Ity_I1);
+            assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0)));
+            putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                   mkU32(0x00000020),
+                                   unop(Iop_Clz32, getIReg(rs))));
+         }
+         break;
+      }
+
+      case 0x21: {  /* CLO */
+         DIP("clo r%d, r%d", rd, rs);
+         if (mode64) {
+            IRTemp tmpClo32 = newTemp(Ity_I32);
+            IRTemp tmpRs32 = newTemp(Ity_I32);
+            assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+
+            t1 = newTemp(Ity_I1);
+            assign(t1, binop(Iop_CmpEQ32, mkexpr(tmpRs32), mkU32(0xffffffff)));
+            assign(tmpClo32, IRExpr_ITE(mkexpr(t1),
+                      mkU32(0x00000020),
+                      unop(Iop_Clz32, unop(Iop_Not32, mkexpr(tmpRs32)))));
+
+            putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpClo32), True));
+            break;
+         } else {
+            t1 = newTemp(Ity_I1);
+            assign(t1, binop(Iop_CmpEQ32, getIReg(rs), mkU32(0xffffffff)));
+            putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                   mkU32(0x00000020),
+                                   unop(Iop_Clz32,
+                                        unop(Iop_Not32, getIReg(rs)))));
+            break;
+         }
+      }
+
+      case 0x24:  /* Count Leading Zeros in Doubleword - DCLZ; MIPS64 */
+         DIP("dclz r%d, r%d", rd, rs);
+         t1 = newTemp(Ity_I1);
+         assign(t1, binop(Iop_CmpEQ64, getIReg(rs), mkU64(0)));
+         putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                     mkU64(0x00000040),
+                     unop(Iop_Clz64, getIReg(rs))));
+         break;
+
+      case 0x25:  /* Count Leading Ones in Doubleword - DCLO; MIPS64 */
+         DIP("dclo r%d, r%d", rd, rs);
+         t1 = newTemp(Ity_I1);
+         assign(t1, binop(Iop_CmpEQ64, getIReg(rs),
+                                        mkU64(0xffffffffffffffffULL)));
+         putIReg(rd, IRExpr_ITE(mkexpr(t1),
+                                mkU64(0x40),
+                                unop(Iop_Clz64, unop(Iop_Not64,
+                                                     getIReg(rs)))));
+         break;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+   case 0x1F:  /* Special3 */
+      switch (function) {
+         case 0x01: {
+            /* Doubleword Extract Bit Field - DEXTM; MIPS64r2 */
+            msb = get_msb(cins);
+            lsb = get_lsb(cins);
+            size = msb + 1;
+            UInt srcPos = lsb;
+            UInt dstSz = msb + 33;
+            t1 = newTemp(Ity_I64);
+            DIP("dextm r%u, r%u, %d, %d", rt, rs, lsb, msb + 1);
+
+            UChar lsAmt = 64 - (srcPos + dstSz);  /* left shift amount; */
+            UChar rsAmt = 64 - dstSz;  /* right shift amount; */
+
+            assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt)));
+            putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt)));
+
+            break;
+         }
+         case 0x02: {
+            /* Doubleword Extract Bit Field Upper - DEXTU; MIPS64r2 */
+            msb = get_msb(cins);
+            lsb = get_lsb(cins);
+            size = msb + 1;
+            UInt srcPos = lsb + 32;
+            UInt dstSz = msb + 1;
+            DIP("dextu r%u, r%u, %d, %d", rt, rs, srcPos, dstSz);
+            t1 = newTemp(Ity_I64);
+
+            vassert(srcPos >= 32 && srcPos < 64);
+            vassert(dstSz > 0 && dstSz <= 32);
+            vassert((srcPos + dstSz) > 32 && (srcPos + dstSz) <= 64);
+
+            UChar lsAmt = 64 - (srcPos + dstSz);  /* left shift amount; */
+            UChar rsAmt = 64 - dstSz;  /* right shift amount; */
+
+            assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt)));
+            putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt)));
+            break;
+         }
+         case 0x05: {
+            /* Doubleword Insert Bit Field Middle - DINSM; MIPS64r2 */
+            msb = get_msb(cins);
+            lsb = get_lsb(cins);
+            size = msb + 1;
+            UInt dstPos = lsb;
+            UInt srcSz = msb - lsb + 33;
+            t1 = newTemp(ty);
+            t2 = newTemp(ty);
+            t3 = newTemp(ty);
+            t4 = newTemp(ty);
+            IRTemp tmpT1 = newTemp(ty);
+            IRTemp tmpT2 = newTemp(ty);
+            IRTemp tmpT3 = newTemp(ty);
+            IRTemp tmpT4 = newTemp(ty);
+            IRTemp tmpT5 = newTemp(ty);
+            IRTemp tmpT6 = newTemp(ty);
+            IRTemp tmpT7 = newTemp(ty);
+            IRTemp tmpRs = newTemp(ty);
+            IRTemp tmpRt = newTemp(ty);
+            IRTemp tmpRd = newTemp(ty);
+
+            assign(tmpRs, getIReg(rs));
+            assign(tmpRt, getIReg(rt));
+            DIP("dinsm r%u, r%u, %d, %d", rt, rs, lsb, msb);
+
+            UChar lsAmt = dstPos + srcSz - 1;   /* left shift amount; */
+            UChar rsAmt = dstPos + srcSz - 1;   /* right shift amount; */
+
+            assign(t1, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt)));
+            assign(tmpT1, binop(Iop_Shr64, mkexpr(t1), mkU8(1)));
+            assign(t2, binop(Iop_Shl64, mkexpr(tmpT1), mkU8(lsAmt)));
+            assign(tmpT2, binop(Iop_Shl64, mkexpr(t2), mkU8(1)));
+
+            lsAmt = 63 - dstPos; /* left shift amount; */
+            rsAmt = 63 - dstPos; /* right shift amount; */
+
+            assign(t3, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt)));
+            assign(tmpT3, binop(Iop_Shl64, mkexpr(t3), mkU8(1)));
+            assign(t4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(rsAmt)));
+            assign(tmpT4, binop(Iop_Shr64, mkexpr(t4), mkU8(1)));
+
+            /* extract size from src register */
+            lsAmt = 64 - srcSz;  /* left shift amount; */
+            rsAmt = 64 - (lsb + srcSz);   /* right shift amount; */
+
+            assign(tmpT5, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt)));
+            assign(tmpT6, binop(Iop_Shr64, mkexpr(tmpT5), mkU8(rsAmt)));
+
+            assign(tmpT7, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT4)));
+            assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT6), mkexpr(tmpT7)));
+            putIReg(rt, mkexpr(tmpRd));
+            break;
+         }
+         case 0x06: {
+            /* Doubleword Insert Bit Field Upper - DINSU; MIPS64r2 */
+            msb = get_msb(cins);
+            lsb = get_lsb(cins);
+            size = msb + 1;
+            UInt dstPos = lsb + 32;
+            UInt srcSz = msb - lsb + 1;
+            IRTemp tmpT1 = newTemp(ty);
+            IRTemp tmpT2 = newTemp(ty);
+            IRTemp tmpT3 = newTemp(ty);
+            IRTemp tmpT4 = newTemp(ty);
+            IRTemp tmpT5 = newTemp(ty);
+            IRTemp tmpT6 = newTemp(ty);
+            IRTemp tmpT7 = newTemp(ty);
+            IRTemp tmpT8 = newTemp(ty);
+            IRTemp tmpT9 = newTemp(ty);
+            IRTemp tmpRs = newTemp(ty);
+            IRTemp tmpRt = newTemp(ty);
+            IRTemp tmpRd = newTemp(ty);
+
+            assign(tmpRs, getIReg(rs));
+            assign(tmpRt, getIReg(rt));
+            DIP("dinsu r%u, r%u, %d, %d", rt, rs, lsb, msb);
+
+            UChar lsAmt = 64 - srcSz;  /* left shift amount; */
+            UChar rsAmt = 64 - (dstPos + srcSz);  /* right shift amount; */
+            assign(tmpT1, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt)));
+            assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(rsAmt)));
+
+            lsAmt = 64 - dstPos;  /* left shift amount; */
+            rsAmt = 64 - dstPos;  /* right shift amount; */
+            assign(tmpT3, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt)));
+            assign(tmpT4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(rsAmt)));
+
+            lsAmt = dstPos;  /* left shift amount; */
+            rsAmt = srcSz;  /* right shift amount; */
+            assign(tmpT5, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt)));
+            assign(tmpT6, binop(Iop_Shr64, mkexpr(tmpT5), mkU8(lsAmt)));
+
+            assign(tmpT7, binop(Iop_Shl64, mkexpr(tmpT6), mkU8(rsAmt)));
+            assign(tmpT8, binop(Iop_Shl64, mkexpr(tmpT7), mkU8(lsAmt)));
+
+            assign(tmpT9, binop(Iop_Or64, mkexpr(tmpT8), mkexpr(tmpT4)));
+            assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT9)));
+            putIReg(rt, mkexpr(tmpRd));
+            break;
+         }
+         case 0x07: {
+            /* Doubleword Insert Bit Field - DINS; MIPS64r2 */
+            IRTemp tmp1 = newTemp(ty);
+            IRTemp tmpT1 = newTemp(ty);
+            IRTemp tmpT2 = newTemp(ty);
+            IRTemp tmpT3 = newTemp(ty);
+            IRTemp tmpT4 = newTemp(ty);
+            IRTemp tmpT5 = newTemp(ty);
+            IRTemp tmpT6 = newTemp(ty);
+            IRTemp tmpT7 = newTemp(ty);
+            IRTemp tmpT8 = newTemp(ty);
+            IRTemp tmpT9 = newTemp(ty);
+            IRTemp tmp = newTemp(ty);
+            IRTemp tmpRs = newTemp(ty);
+            IRTemp tmpRt = newTemp(ty);
+            IRTemp tmpRd = newTemp(ty);
+
+            assign(tmpRs, getIReg(rs));
+            assign(tmpRt, getIReg(rt));
+
+            msb = get_msb(cins);
+            lsb = get_lsb(cins);
+            size = msb + 1;
+            DIP("dins r%u, r%u, %d, %d", rt, rs, lsb,
+                msb - lsb + 1);
+            UChar lsAmt = 63 - lsb;  /* left shift amount; */
+            UChar rsAmt = 63 - lsb;  /* right shift amount; */
+            assign(tmp, binop(Iop_Shl64, mkexpr(tmpRt), mkU8(lsAmt)));
+            assign(tmpT1, binop(Iop_Shl64, mkexpr(tmp), mkU8(1)));
+            assign(tmp1, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(rsAmt)));
+            assign(tmpT2, binop(Iop_Shr64, mkexpr(tmp1), mkU8(1)));
+
+            lsAmt = msb;  /* left shift amount; */
+            rsAmt = 1;  /*right shift amount; */
+            assign(tmpT3, binop(Iop_Shr64, mkexpr(tmpRt), mkU8(rsAmt)));
+            assign(tmpT4, binop(Iop_Shr64, mkexpr(tmpT3), mkU8(lsAmt)));
+            assign(tmpT5, binop(Iop_Shl64, mkexpr(tmpT4), mkU8(rsAmt)));
+            assign(tmpT6, binop(Iop_Shl64, mkexpr(tmpT5), mkU8(lsAmt)));
+
+            lsAmt = 64 - (msb - lsb + 1);  /* left shift amount; */
+            rsAmt = 64 - (msb + 1);  /* right shift amount; */
+            assign(tmpT7, binop(Iop_Shl64, mkexpr(tmpRs), mkU8(lsAmt)));
+            assign(tmpT8, binop(Iop_Shr64, mkexpr(tmpT7), mkU8(rsAmt)));
+
+            assign(tmpT9, binop(Iop_Or64, mkexpr(tmpT2), mkexpr(tmpT8)));
+            assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT6), mkexpr(tmpT9)));
+            putIReg(rt, mkexpr(tmpRd));
+            break;
+         }
+      case 0x24:  /* DBSHFL */
+         lsb = get_lsb(cins);
+         IRTemp tmpRs = newTemp(ty);
+         IRTemp tmpRt = newTemp(ty);
+         IRTemp tmpRd = newTemp(ty);
+         assign(tmpRs, getIReg(rs));
+         assign(tmpRt, getIReg(rt));
+         switch (lsb) {
+            case 0x02: {  /* DSBH */
+               DIP("dsbh r%u, r%u", rd, rt);
+               IRTemp tmpT1 = newTemp(ty);
+               IRTemp tmpT2 = newTemp(ty);
+               IRTemp tmpT3 = newTemp(ty);
+               IRTemp tmpT4 = newTemp(ty);
+               IRTemp tmpT5 = newTemp(Ity_I64);
+               IRTemp tmpT6 = newTemp(ty);
+               assign(tmpT5, mkU64(0xFF00FF00FF00FF00ULL));
+               assign(tmpT6, mkU64(0x00FF00FF00FF00FFULL));
+               assign(tmpT1, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT5)));
+               assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(8)));
+               assign(tmpT3, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT6)));
+               assign(tmpT4, binop(Iop_Shl64, mkexpr(tmpT3), mkU8(8)));
+               assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT4), mkexpr(tmpT2)));
+               putIReg(rd, mkexpr(tmpRd));
+               break;
+            }
+            case 0x05: {  /* DSHD */
+               DIP("dshd r%u, r%u\n", rd, rt);
+               IRTemp tmpT1 = newTemp(ty);
+               IRTemp tmpT2 = newTemp(ty);
+               IRTemp tmpT3 = newTemp(ty);
+               IRTemp tmpT4 = newTemp(ty);
+               IRTemp tmpT5 = newTemp(Ity_I64);
+               IRTemp tmpT6 = newTemp(ty);
+               IRTemp tmpT7 = newTemp(ty);
+               IRTemp tmpT8 = newTemp(ty);
+               IRTemp tmpT9 = newTemp(ty);
+               assign(tmpT5, mkU64(0xFFFF0000FFFF0000ULL));
+               assign(tmpT6, mkU64(0x0000FFFF0000FFFFULL));
+               assign(tmpT1, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT5)));
+               assign(tmpT2, binop(Iop_Shr64, mkexpr(tmpT1), mkU8(16)));
+               assign(tmpT3, binop(Iop_And64, mkexpr(tmpRt), mkexpr(tmpT6)));
+               assign(tmpT4, binop(Iop_Shl64, mkexpr(tmpT3), mkU8(16)));
+               assign(tmpT7, binop(Iop_Or64, mkexpr(tmpT4), mkexpr(tmpT2)));
+               assign(tmpT8, binop(Iop_Shl64, mkexpr(tmpT7), mkU8(32)));
+               assign(tmpT9, binop(Iop_Shr64, mkexpr(tmpT7), mkU8(32)));
+               assign(tmpRd, binop(Iop_Or64, mkexpr(tmpT8), mkexpr(tmpT9)));
+               putIReg(rd, mkexpr(tmpRd));
+               break;
+            }
+         default:
+            vex_printf("\nop6o10 = %d", lsb);
+            goto decode_failure;;
+         }
+         break;
+      case 0x3B: {  /* RDHWR */
+         DIP("rdhwr r%d, r%d", rt, rd);
+            if (rd == 29) {
+               putIReg(rt, getULR());
+#if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 2))
+            } else if (rd == 1) {
+               if (mode64) {
+                  IRTemp   val  = newTemp(Ity_I64);
+                  IRExpr** args = mkIRExprVec_2 (mkU64(rt), mkU64(rd));
+                  IRDirty *d = unsafeIRDirty_1_N(val,
+                                                 0,
+                                                 "mips64_dirtyhelper_rdhwr",
+                                                 &mips64_dirtyhelper_rdhwr,
+                                                 args);
+                  stmt(IRStmt_Dirty(d));
+                  putIReg(rt, mkexpr(val));
+               } else {
+                  IRTemp   val  = newTemp(Ity_I32);
+                  IRExpr** args = mkIRExprVec_2 (mkU32(rt), mkU32(rd));
+                  IRDirty *d = unsafeIRDirty_1_N(val,
+                                                 0,
+                                                 "mips32_dirtyhelper_rdhwr",
+                                                 &mips32_dirtyhelper_rdhwr,
+                                                 args);
+                  stmt(IRStmt_Dirty(d));
+                  putIReg(rt, mkexpr(val));
+               }
+#endif
+            } else
+               goto decode_failure;
+            break;
+         }
+      case 0x04:  /* INS */
+         msb = get_msb(cins);
+         lsb = get_lsb(cins);
+         size = msb - lsb + 1;
+         DIP("ins size:%d msb:%d lsb:%d", size, msb, lsb);
+
+         vassert(lsb + size <= 32);
+         vassert(lsb + size > 0);
+
+         /* put size bits from rs at the pos in temporary */
+         t0 = newTemp(Ity_I32);
+         t3 = newTemp(Ity_I32);
+         /* shift left for 32 - size to clear leading bits and get zeros
+            at the end */
+         assign(t0, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rs)),
+                          mkU8(32 - size)));
+         /* now set it at pos */
+         t1 = newTemp(Ity_I32);
+         assign(t1, binop(Iop_Shr32, mkexpr(t0), mkU8(32 - size - lsb)));
+
+         if (lsb > 0) {
+            t2 = newTemp(Ity_I32);
+            /* clear everything but lower pos bits from rt */
+            assign(t2, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rt)),
+                             mkU8(32 - lsb)));
+            assign(t3, binop(Iop_Shr32, mkexpr(t2), mkU8(32 - lsb)));
+         } else
+            assign(t3, mkU32(0));
+
+         if (msb < 31) {
+            t4 = newTemp(Ity_I32);
+            /* clear everything but upper msb + 1 bits from rt */
+            assign(t4, binop(Iop_Shr32, mkNarrowTo32(ty, getIReg(rt)),
+                             mkU8(msb + 1)));
+            t5 = newTemp(Ity_I32);
+            assign(t5, binop(Iop_Shl32, mkexpr(t4), mkU8(msb + 1)));
+
+            /* now combine these registers */
+            if (lsb > 0) {
+               t6 = newTemp(Ity_I32);
+               assign(t6, binop(Iop_Or32, mkexpr(t5), mkexpr(t1)));
+               putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t6),
+                                                   mkexpr(t3)), True));
+            } else {
+               putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t1),
+                                                   mkexpr(t5)), True));
+            }
+         } else {
+            putIReg(rt, mkWidenFrom32(ty, binop(Iop_Or32, mkexpr(t1),
+                                                mkexpr(t3)), True));
+         }
+         break;
+
+      case 0x00:  /* EXT */
+         msb = get_msb(cins);
+         lsb = get_lsb(cins);
+         size = msb + 1;
+         DIP("ext size:%d msb:%d lsb:%d", size, msb, lsb);
+         vassert(lsb + size <= 32);
+         vassert(lsb + size > 0);
+         /* put size bits from rs at the top of in temporary */
+         if (lsb + size < 32) {
+            t0 = newTemp(Ity_I32);
+            assign(t0, binop(Iop_Shl32, mkNarrowTo32(ty, getIReg(rs)),
+                             mkU8(32 - lsb - size)));
+
+            putIReg(rt, mkWidenFrom32(ty, binop(Iop_Shr32, mkexpr(t0),
+                                                mkU8(32 - size)), True));
+         } else {
+            putIReg(rt, mkWidenFrom32(ty, binop(Iop_Shr32,
+                                                mkNarrowTo32(ty, getIReg(rs)),
+                                                mkU8(32 - size)), True));
+         }
+         break;
+
+      case 0x03:  /* Doubleword Extract Bit Field - DEXT; MIPS64r2 */
+         msb = get_msb(cins);
+         lsb = get_lsb(cins);
+         size = msb + 1;
+         DIP("dext r%u, r%u, %d, %d", rt, rs, lsb, msb + 1);
+         t1 = newTemp(Ity_I64);
+         vassert(lsb >= 0 && lsb < 32);
+         vassert(size > 0 && size <= 32);
+         vassert((lsb + size) > 0 && (lsb + size) <= 63);
+
+         UChar lsAmt = 63 - (lsb + msb);  /* left shift amount; */
+         UChar rsAmt = 63 - msb;  /* right shift amount; */
+
+         assign(t1, binop(Iop_Shl64, getIReg(rs), mkU8(lsAmt)));
+         putIReg(rt, binop(Iop_Shr64, mkexpr(t1), mkU8(rsAmt)));
+
+         break;
+
+      case 0x20:  /* BSHFL */
+         switch (sa) {
+            case 0x02:  /* WSBH */
+               DIP("wsbh r%d, r%d", rd, rt);
+               t0 = newTemp(Ity_I32);
+               t1 = newTemp(Ity_I32);
+               t2 = newTemp(Ity_I32);
+               t3 = newTemp(Ity_I32);
+               assign(t0, binop(Iop_Shl32, binop(Iop_And32, mkNarrowTo32(ty,
+                                           getIReg(rt)), mkU32(0x00FF0000)),
+                                           mkU8(0x8)));
+               assign(t1, binop(Iop_Shr32, binop(Iop_And32, mkNarrowTo32(ty,
+                                getIReg(rt)), mkU32(0xFF000000)), mkU8(0x8)));
+               assign(t2, binop(Iop_Shl32, binop(Iop_And32, mkNarrowTo32(ty,
+                                getIReg(rt)), mkU32(0x000000FF)), mkU8(0x8)));
+               assign(t3, binop(Iop_Shr32, binop(Iop_And32, mkNarrowTo32(ty,
+                                getIReg(rt)), mkU32(0x0000FF00)), mkU8(0x8)));
+               putIReg(rd, mkWidenFrom32(ty, binop(Iop_Or32, binop(Iop_Or32,
+                                         mkexpr(t0), mkexpr(t1)),
+                                         binop(Iop_Or32, mkexpr(t2),
+                                         mkexpr(t3))), True));
+               break;
+
+            case 0x10:  /* SEB */
+               DIP("seb r%d, r%d", rd, rt);
+               if (mode64)
+                  putIReg(rd, unop(Iop_8Sto64, unop(Iop_64to8, getIReg(rt))));
+               else
+                  putIReg(rd, unop(Iop_8Sto32, unop(Iop_32to8, getIReg(rt))));
+               break;
+
+            case 0x18:  /* SEH */
+               DIP("seh r%d, r%d", rd, rt);
+               if (mode64)
+                  putIReg(rd, unop(Iop_16Sto64, unop(Iop_64to16, getIReg(rt))));
+               else
+                  putIReg(rd, unop(Iop_16Sto32, unop(Iop_32to16, getIReg(rt))));
+               break;
+
+            default:
+               goto decode_failure;
+
+         }
+         break;  /* BSHFL */
+
+      /* --- MIPS32(r2) DSP ASE(r2) / Cavium Specfic (LX) instructions --- */
+      case 0xA:  /* LX */
+         if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
+            if (dis_instr_CVM(cins))
+               break;
+            goto decode_failure;
+         }
+      case 0xC:  /* INSV */
+      case 0x38: {  /* EXTR.W */
+         if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+            UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+            if (0 != retVal ) {
+               goto decode_failure_dsp;
+            }
+            break;
+         } else {
+            goto decode_failure_dsp;
+         }
+         break;
+      }
+      case 0x10: {  /* ADDU.QB */
+         switch(sa) {
+            case  0xC:  /* SUBU_S.PH */
+            case  0xD:  /* ADDU_S.PH */
+            case 0x1E: {  /* MULQ_S.PH */
+               if (VEX_MIPS_PROC_DSP2(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+            default: {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+         }
+         break;
+      }
+      case 0x11: {  /* CMPU.EQ.QB */
+         switch(sa) {
+            case 0x18:  /* CMPGDU.EQ.QB */
+            case 0x19:  /* CMPGDU.LT.QB */
+            case 0x1A:  /* CMPGDU.LE.QB */
+            case 0x0D:  /* PRECR.QB.PH */
+            case 0x1E:  /* PRECR_SRA.PH.W */
+            case 0x1F: {  /* PRECR_SRA_R.PH.W */
+               if (VEX_MIPS_PROC_DSP2(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+            default: {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+         }
+         break;
+      }
+      case 0x12: {  /* ABSQ_S.PH */
+         switch(sa){
+            case 0x1: {  /* ABSQ_S.QB */
+               if (VEX_MIPS_PROC_DSP2(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+            default: {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+         }
+         break;
+      }
+      case 0x13: {  /* SHLL.QB */
+         switch(sa) {
+            case 0x04:  /* SHRA.QB */
+            case 0x05:  /* SHRA_R.QB */
+            case 0x06:  /* SHRAV.QB */
+            case 0x07:  /* SHRAV_R.QB */
+            case 0x19:  /* SHLR.PH */
+            case 0x1B: {  /* SHLRV.PH */
+               if (VEX_MIPS_PROC_DSP2(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+            default: {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+         }
+         break;
+      }
+      case 0x30: {  /* DPAQ.W.PH */
+         switch(sa) {
+            case  0x0:  /* DPA.W.PH */
+            case 0x18:  /* DPAQX_S.W.PH */
+            case 0x1A:  /* DPAQX_SA.W.PH */
+            case  0x8:  /* DPAX.W.PH */
+            case  0x1:  /* DPS.W.PH */
+            case 0x19:  /* DPSQX_S.W.PH */
+            case 0x1B:  /* DPSQX_SA.W.PH */
+            case  0x9:  /* DPSX.W.PH */
+            case  0x2: {  /* MULSA.W.PH */
+               if (VEX_MIPS_PROC_DSP2(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+            default: {
+               if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+                  UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+                  if (0 != retVal ) {
+                     goto decode_failure_dsp;
+                  }
+                  break;
+               } else {
+                  goto decode_failure_dsp;
+               }
+               break;
+            }
+         }
+         break;
+      }
+      case 0x18:  /* ADDUH.QB/MUL.PH */
+      case 0x31: {  /* APPEND */
+         if (VEX_MIPS_PROC_DSP2(archinfo->hwcaps)) {
+            UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+            if (0 != retVal ) {
+               goto decode_failure_dsp;
+            }
+            break;
+         } else {
+            goto decode_failure_dsp;
+         }
+      }
+      default:
+         goto decode_failure;
+
+   }
+      break;  /* Special3 */
+
+   case 0x3B:
+      if (0x3B == function &&
+          (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_BROADCOM)) {
+         /*RDHWR*/
+         DIP("rdhwr r%d, r%d", rt, rd);
+         if (rd == 29) {
+            putIReg(rt, getULR());
+         } else
+            goto decode_failure;
+         break;
+      } else {
+         goto decode_failure;
+      }
+
+   case 0x00:  /* Special */
+
+      switch (function) {
+      case 0x1: {
+         UInt mov_cc = get_mov_cc(cins);
+         if (tf == 0) {  /* MOVF */
+            DIP("movf r%d, r%d, %d", rd, rs, mov_cc);
+            t1 = newTemp(Ity_I1);
+            t2 = newTemp(Ity_I32);
+            t3 = newTemp(Ity_I1);
+
+            assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+            assign(t2, IRExpr_ITE(mkexpr(t1),
+                                  binop(Iop_And32,
+                                        binop(Iop_Shr32, getFCSR(),
+                                              mkU8(23)),
+                                        mkU32(0x1)),
+                                  binop(Iop_And32,
+                                        binop(Iop_Shr32, getFCSR(),
+                                              mkU8(24 + mov_cc)),
+                                        mkU32(0x1))
+                                  ));
+            assign(t3, binop(Iop_CmpEQ32, mkU32(0), mkexpr(t2)));
+            putIReg(rd, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd)));
+         } else if (tf == 1) {  /* MOVT */
+            DIP("movt r%d, r%d, %d", rd, rs, mov_cc);
+            t1 = newTemp(Ity_I1);
+            t2 = newTemp(Ity_I32);
+            t3 = newTemp(Ity_I1);
+
+            assign(t1, binop(Iop_CmpEQ32, mkU32(0), mkU32(mov_cc)));
+            assign(t2, IRExpr_ITE(mkexpr(t1),
+                                  binop(Iop_And32,
+                                        binop(Iop_Shr32, getFCSR(),
+                                              mkU8(23)),
+                                        mkU32(0x1)),
+                                  binop(Iop_And32,
+                                        binop(Iop_Shr32, getFCSR(),
+                                              mkU8(24 + mov_cc)),
+                                        mkU32(0x1))
+                                  ));
+            assign(t3, binop(Iop_CmpEQ32, mkU32(1), mkexpr(t2)));
+            putIReg(rd, IRExpr_ITE(mkexpr(t3), getIReg(rs), getIReg(rd)));
+         }
+         break;
+      }
+      case 0x0A: {  /* MOVZ */
+         DIP("movz r%d, r%d, r%d", rd, rs, rt);
+         t1 = newTemp(ty);
+         t2 = newTemp(ty);
+         if (mode64) {
+            assign(t1, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpEQ64,
+                            getIReg(rt), mkU64(0x0)))));
+            assign(t2, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpNE64,
+                            getIReg(rt), mkU64(0x0)))));
+            putIReg(rd, binop(Iop_Add64, binop(Iop_And64, getIReg(rs),
+                        mkexpr(t1)), binop(Iop_And64, getIReg(rd),mkexpr(t2))));
+         } else {
+            assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rt),
+                                              mkU32(0x0))));
+            assign(t2, unop(Iop_1Sto32, binop(Iop_CmpNE32, getIReg(rt),
+                                              mkU32(0x0))));
+            putIReg(rd, binop(Iop_Add32, binop(Iop_And32, getIReg(rs),
+                        mkexpr(t1)), binop(Iop_And32, getIReg(rd),
+                        mkexpr(t2))));
+         }
+         break;
+      }
+
+      case 0x0B: {  /* MOVN */
+         DIP("movn r%d, r%d, r%d", rd, rs, rt);
+         t1 = newTemp(ty);
+         t2 = newTemp(ty);
+         if (mode64) {
+            assign(t1, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpEQ64,
+                            getIReg(rt), mkU64(0x0)))));
+            assign(t2, unop(Iop_32Sto64, unop(Iop_1Sto32, binop(Iop_CmpNE64,
+                            getIReg(rt), mkU64(0x0)))));
+            putIReg(rd, binop(Iop_Add64, binop(Iop_And64, getIReg(rs),
+                        mkexpr(t2)), binop(Iop_And64, getIReg(rd),
+                                           mkexpr(t1))));
+         } else {
+            assign(t1, unop(Iop_1Sto32, binop(Iop_CmpEQ32, getIReg(rt),
+                                              mkU32(0x0))));
+            assign(t2, unop(Iop_1Sto32, binop(Iop_CmpNE32, getIReg(rt),
+                                              mkU32(0x0))));
+            putIReg(rd, binop(Iop_Add32, binop(Iop_And32, getIReg(rs),
+                        mkexpr(t2)), binop(Iop_And32, getIReg(rd),
+                        mkexpr(t1))));
+         }
+         break;
+      }
+
+      case 0x18:  {  /* MULT */
+         if ( (1 <= ac) && ( 3 >= ac) ) {
+            if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+               /* If DSP is present -> DSP ASE MULT */
+               UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+               if (0 != retVal ) {
+                  goto decode_failure_dsp;
+               }
+               break;
+            } else {
+               goto decode_failure_dsp;
+            }
+         } else {
+            DIP("mult r%d, r%d", rs, rt);
+            t2 = newTemp(Ity_I64);
+
+            assign(t2, binop(Iop_MullS32, mkNarrowTo32(ty, getIReg(rs)),
+                                          mkNarrowTo32(ty, getIReg(rt))));
+
+            putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+            putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+            break;
+         }
+      }
+      case 0x19:  {  /* MULTU */
+         if ( (1 <= ac) && ( 3 >= ac) ) {
+            if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+               /* If DSP is present -> DSP ASE MULTU */
+               UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+               if (0 != retVal ) {
+                  goto decode_failure_dsp;
+               }
+               break;
+            } else {
+               goto decode_failure_dsp;
+            }
+         } else {
+            DIP("multu r%d, r%d", rs, rt);
+            t2 = newTemp(Ity_I64);
+
+            assign(t2, binop(Iop_MullU32, mkNarrowTo32(ty, getIReg(rs)),
+                                          mkNarrowTo32(ty, getIReg(rt))));
+
+            putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+            putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+            break;
+         }
+      }
+      case 0x20: {  /* ADD */
+         DIP("add r%d, r%d, r%d", rd, rs, rt);
+         IRTemp tmpRs32 = newTemp(Ity_I32);
+         IRTemp tmpRt32 = newTemp(Ity_I32);
+
+         assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+         assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         t2 = newTemp(Ity_I32);
+         t3 = newTemp(Ity_I32);
+         t4 = newTemp(Ity_I32);
+         /* dst = src0 + src1
+            if (sign(src0 ) != sign(src1 ))
+            goto no overflow;
+            if (sign(dst) == sign(src0 ))
+            goto no overflow;
+            we have overflow! */
+
+         assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+         assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(tmpRt32)));
+         assign(t2, unop(Iop_1Uto32,
+                         binop(Iop_CmpEQ32,
+                               binop(Iop_And32, mkexpr(t1), mkU32(0x80000000)),
+                               mkU32(0x80000000))));
+
+         assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
+         assign(t4, unop(Iop_1Uto32,
+                         binop(Iop_CmpNE32,
+                               binop(Iop_And32, mkexpr(t3), mkU32(0x80000000)),
+                               mkU32(0x80000000))));
+
+         stmt(IRStmt_Exit(binop(Iop_CmpEQ32,
+                                binop(Iop_Or32, mkexpr(t2), mkexpr(t4)),
+                                mkU32(0)),
+                          Ijk_SigFPE_IntOvf,
+                          mode64 ? IRConst_U64(guest_PC_curr_instr + 4) :
+                                   IRConst_U32(guest_PC_curr_instr + 4),
+                          OFFB_PC));
+
+         putIReg(rd,  mkWidenFrom32(ty, mkexpr(t0), True));
+         break;
+      }
+      case 0x1A:  /* DIV */
+         DIP("div r%d, r%d", rs, rt);
+         if (mode64) {
+            t2 = newTemp(Ity_I64);
+
+            assign(t2, binop(Iop_DivModS64to32,
+                             getIReg(rs), mkNarrowTo32(ty, getIReg(rt))));
+
+            putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+            putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+         } else {
+            t1 = newTemp(Ity_I64);
+            t2 = newTemp(Ity_I64);
+
+            assign(t1, unop(Iop_32Sto64, getIReg(rs)));
+            assign(t2, binop(Iop_DivModS64to32, mkexpr(t1), getIReg(rt)));
+
+            putHI(unop(Iop_64HIto32, mkexpr(t2)));
+            putLO(unop(Iop_64to32, mkexpr(t2)));
+         }
+         break;
+
+      case 0x1B:  /* DIVU */
+         DIP("divu r%d, r%d", rs, rt);
+         if (mode64) {
+            t2 = newTemp(Ity_I64);
+
+            assign(t2, binop(Iop_DivModU64to32,
+                             getIReg(rs), mkNarrowTo32(ty, getIReg(rt))));
+
+            putHI(mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(t2)), True));
+            putLO(mkWidenFrom32(ty, unop(Iop_64to32, mkexpr(t2)), True));
+         } else {
+            t1 = newTemp(Ity_I64);
+            t2 = newTemp(Ity_I64);
+            assign(t1, unop(Iop_32Uto64, getIReg(rs)));
+            assign(t2, binop(Iop_DivModU64to32, mkexpr(t1), getIReg(rt)));
+            putHI(unop(Iop_64HIto32, mkexpr(t2)));
+            putLO(unop(Iop_64to32, mkexpr(t2)));
+         }
+         break;
+
+      case 0x1C:  /* Doubleword Multiply - DMULT; MIPS64 */
+         DIP("dmult r%u, r%u", rs, rt);
+         t0 = newTemp(Ity_I128);
+
+         assign(t0, binop(Iop_MullS64, getIReg(rs), getIReg(rt)));
+
+         putHI(unop(Iop_128HIto64, mkexpr(t0)));
+         putLO(unop(Iop_128to64, mkexpr(t0)));
+         break;
+
+      case 0x1D:  /* Doubleword Multiply Unsigned - DMULTU; MIPS64 */
+         DIP("dmultu r%u, r%u", rs, rt);
+         t0 = newTemp(Ity_I128);
+
+         assign(t0, binop(Iop_MullU64, getIReg(rs), getIReg(rt)));
+
+         putHI(unop(Iop_128HIto64, mkexpr(t0)));
+         putLO(unop(Iop_128to64, mkexpr(t0)));
+         break;
+
+      case 0x1E:  /* Doubleword Divide DDIV; MIPS64 */
+         DIP("ddiv r%u, r%u", rs, rt);
+         t1 = newTemp(Ity_I128);
+
+         assign(t1, binop(Iop_DivModS64to64, getIReg(rs), getIReg(rt)));
+
+         putHI(unop(Iop_128HIto64, mkexpr(t1)));
+         putLO(unop(Iop_128to64, mkexpr(t1)));
+         break;
+
+      case 0x1F:  /* Doubleword Divide Unsigned DDIVU; MIPS64 check this */
+         DIP("ddivu r%u, r%u", rs, rt);
+         t1 = newTemp(Ity_I128);
+         t2 = newTemp(Ity_I128);
+
+         assign(t1, binop(Iop_64HLto128, mkU64(0), getIReg(rs)));
+
+         assign(t2, binop(Iop_DivModU128to64, mkexpr(t1), getIReg(rt)));
+
+         putHI(unop(Iop_128HIto64, mkexpr(t2)));
+         putLO(unop(Iop_128to64, mkexpr(t2)));
+         break;
+
+      case 0x10: {  /* MFHI */
+         if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+            /* If DSP is present -> DSP ASE MFHI */
+            UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+            if (0 != retVal ) {
+               goto decode_failure;
+            }
+            break;
+         } else {
+            DIP("mfhi r%d", rd);
+            putIReg(rd, getHI());
+            break;
+         }
+      }
+
+      case 0x11:  {  /* MTHI */
+         if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+            /* If DSP is present -> DSP ASE MTHI */
+            UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+            if (0 != retVal ) {
+               goto decode_failure;
+            }
+            break;
+         } else {
+            DIP("mthi r%d", rs);
+            putHI(getIReg(rs));
+            break;
+         }
+      }
+
+      case 0x12:  {  /* MFLO */
+         if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+            /* If DSP is present -> DSP ASE MFLO */
+            UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+            if (0 != retVal ) {
+               goto decode_failure;
+            }
+            break;
+         } else {
+            DIP("mflo r%d", rd);
+            putIReg(rd, getLO());
+            break;
+         }
+      }
+
+      case 0x13:  {  /* MTLO */
+         if (VEX_MIPS_PROC_DSP(archinfo->hwcaps)) {
+            /* If DSP is present -> DSP ASE MTLO */
+            UInt retVal = disDSPInstr_MIPS_WRK ( cins );
+            if (0 != retVal ) {
+               goto decode_failure;
+            }
+            break;
+         } else {
+            DIP("mtlo r%d", rs);
+            putLO(getIReg(rs));
+            break;
+         }
+      }
+
+      case 0x21:  /* ADDU */
+         DIP("addu r%d, r%d, r%d", rd, rs, rt);
+         if (mode64) {
+            ALU_PATTERN64(Iop_Add32);
+         } else {
+            ALU_PATTERN(Iop_Add32);
+         }
+         break;
+
+      case 0x22: {  /* SUB */
+         DIP("sub r%d, r%d, r%d", rd, rs, rt);
+         IRTemp tmpRs32 = newTemp(Ity_I32);
+         IRTemp tmpRt32 = newTemp(Ity_I32);
+
+         assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+         assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         t2 = newTemp(Ity_I32);
+         t3 = newTemp(Ity_I32);
+         t4 = newTemp(Ity_I32);
+         t5 = newTemp(Ity_I32);
+         /* dst = src0 + (-1 * src1)
+            if(sign(src0 ) != sign((-1 * src1) ))
+            goto no overflow;
+            if(sign(dst) == sign(src0 ))
+            goto no overflow;
+            we have overflow! */
+
+         assign(t5, binop(Iop_Mul32, mkexpr(tmpRt32), mkU32(-1)));
+         assign(t0, binop(Iop_Add32, mkexpr(tmpRs32), mkexpr(t5)));
+         assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32), mkexpr(t5)));
+         assign(t2, unop(Iop_1Sto32, binop(Iop_CmpEQ32, binop(Iop_And32,
+                         mkexpr(t1), mkU32(0x80000000)), mkU32(0x80000000))));
+
+         assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
+         assign(t4, unop(Iop_1Sto32, binop(Iop_CmpNE32, binop(Iop_And32,
+                         mkexpr(t3), mkU32(0x80000000)), mkU32(0x80000000))));
+
+         stmt(IRStmt_Exit(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(t2),
+                                mkexpr(t4)), mkU32(0)), Ijk_SigFPE_IntOvf,
+                          mode64 ? IRConst_U64(guest_PC_curr_instr + 4) :
+                                   IRConst_U32(guest_PC_curr_instr + 4),
+                          OFFB_PC));
+
+         putIReg(rd, mkWidenFrom32(ty, mkexpr(t0), True));
+         break;
+      }
+      case 0x23:  /* SUBU */
+         DIP("subu r%d, r%d, r%d", rd, rs, rt);
+         if (mode64) {
+            ALU_PATTERN64(Iop_Sub32);
+         } else {
+            ALU_PATTERN(Iop_Sub32);
+         }
+         break;
+
+      case 0x24:  /* AND */
+         DIP("and r%d, r%d, r%d", rd, rs, rt);
+         if (mode64) {
+            ALU_PATTERN(Iop_And64);
+         } else {
+            ALU_PATTERN(Iop_And32);
+         }
+         break;
+
+      case 0x25:  /* OR */
+         DIP("or r%d, r%d, r%d", rd, rs, rt);
+         if (mode64) {
+            ALU_PATTERN(Iop_Or64);
+         } else {
+            ALU_PATTERN(Iop_Or32);
+         }
+         break;
+
+      case 0x26:  /* XOR */
+         DIP("xor r%d, r%d, r%d", rd, rs, rt);
+         if (mode64) {
+            ALU_PATTERN(Iop_Xor64);
+         } else {
+            ALU_PATTERN(Iop_Xor32);
+         }
+         break;
+
+      case 0x27:  /* NOR */
+         DIP("nor r%d, r%d, r%d", rd, rs, rt);
+         if (mode64)
+            putIReg(rd, unop(Iop_Not64, binop(Iop_Or64, getIReg(rs),
+                                              getIReg(rt))));
+         else
+            putIReg(rd, unop(Iop_Not32, binop(Iop_Or32, getIReg(rs),
+                                              getIReg(rt))));
+         break;
+
+      case 0x08:  /* JR */
+         DIP("jr r%d", rs);
+         t0 = newTemp(ty);
+         assign(t0, getIReg(rs));
+         lastn = mkexpr(t0);
+         break;
+
+      case 0x09:  /* JALR */
+         DIP("jalr r%d r%d", rd, rs);
+         if (mode64) {
+            putIReg(rd, mkU64(guest_PC_curr_instr + 8));
+            t0 = newTemp(Ity_I64);
+            assign(t0, getIReg(rs));
+            lastn = mkexpr(t0);
+         } else {
+            putIReg(rd, mkU32(guest_PC_curr_instr + 8));
+            t0 = newTemp(Ity_I32);
+            assign(t0, getIReg(rs));
+            lastn = mkexpr(t0);
+         }
+         break;
+
+      case 0x0C:  /* SYSCALL */
+         DIP("syscall");
+         if (mode64)
+            putPC(mkU64(guest_PC_curr_instr + 4));
+         else
+            putPC(mkU32(guest_PC_curr_instr + 4));
+         dres.jk_StopHere = Ijk_Sys_syscall;
+         dres.whatNext    = Dis_StopHere;
+         break;
+
+      case 0x2A:  /* SLT */
+         DIP("slt r%d, r%d, r%d", rd, rs, rt);
+         if (mode64)
+            putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs),
+                                               getIReg(rt))));
+         else
+            putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs),
+                                               getIReg(rt))));
+         break;
+
+      case 0x2B:  /* SLTU */
+         DIP("sltu r%d, r%d, r%d", rd, rs, rt);
+         if (mode64)
+            putIReg(rd, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs),
+                                         getIReg(rt))));
+         else
+            putIReg(rd, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs),
+                                         getIReg(rt))));
+         break;
+
+      case 0x00: {  /* SLL */
+         DIP("sll r%d, r%d, %d", rd, rt, sa);
+         IRTemp tmpRt32 = newTemp(Ity_I32);
+         IRTemp tmpSh32 = newTemp(Ity_I32);
+         IRTemp tmpRd = newTemp(Ity_I64);
+         if (mode64) {
+            assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+            assign(tmpSh32, binop(Iop_Shl32, mkexpr(tmpRt32), mkU8(sa)));
+            assign(tmpRd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+            putIReg(rd, mkexpr(tmpRd));
+         } else
+            SXX_PATTERN(Iop_Shl32);
+         break;
+      }
+
+      case 0x04: {  /* SLLV */
+         DIP("sllv r%d, r%d, r%d", rd, rt, rs);
+         if (mode64) {
+            IRTemp tmpRs8 = newTemp(Ity_I8);
+            IRTemp tmpRt32 = newTemp(Ity_I32);
+            IRTemp tmpSh32 = newTemp(Ity_I32);
+            IRTemp tmp = newTemp(ty);
+            assign(tmp, binop(mkSzOp(ty, Iop_And8), getIReg(rs),
+                              mkSzImm(ty, 31)));
+            assign(tmpRs8, mkNarrowTo8(ty, mkexpr(tmp)));
+            assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+            assign(tmpSh32, binop(Iop_Shl32, mkexpr(tmpRt32), mkexpr(tmpRs8)));
+            putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+         } else {
+            SXXV_PATTERN(Iop_Shl32);
+         }
+         break;
+      }
+
+      case 0x03:  /* SRA */
+         DIP("sra r%d, r%d, %d", rd, rt, sa);
+         if (mode64) {
+            IRTemp tmpRt32 = newTemp(Ity_I32);
+            IRTemp tmpSh32 = newTemp(Ity_I32);
+
+            t1 = newTemp(Ity_I64);
+            t2 = newTemp(Ity_I64);
+            t3 = newTemp(Ity_I64);
+
+            assign(t1, binop(Iop_And64, getIReg(rt),  /* hi */
+                             mkU64(0xFFFFFFFF00000000ULL)));
+
+            assign(t2, binop(Iop_Sar64, mkexpr(t1), mkU8(sa)));
+
+            assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+            assign(tmpSh32, binop(Iop_Sar32, mkexpr(tmpRt32), mkU8(sa)));
+
+            putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+         } else {
+            SXX_PATTERN(Iop_Sar32);
+         }
+         break;
+
+      case 0x07:  /* SRAV */
+         DIP("srav r%d, r%d, r%d", rd, rt, rs);
+         if (mode64) {
+            IRTemp tmpRt32 = newTemp(Ity_I32);
+            IRTemp tmpSh32 = newTemp(Ity_I32);
+
+            t1 = newTemp(Ity_I64);
+            t2 = newTemp(Ity_I64);
+            t3 = newTemp(Ity_I64);
+            t4 = newTemp(Ity_I8);
+
+            assign(t4, unop(Iop_32to8, binop(Iop_And32,
+                       mkNarrowTo32(ty, getIReg(rs)), mkU32(0x0000001F))));
+
+            assign(t1, binop(Iop_And64, getIReg(rt),  /* hi */
+                   mkU64(0xFFFFFFFF00000000ULL)));
+
+            assign(t2, binop(Iop_Sar64, mkexpr(t1), mkexpr(t4)));
+
+            assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+            assign(tmpSh32, binop(Iop_Sar32, mkexpr(tmpRt32), mkexpr(t4)));
+
+            putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+         } else {
+            SXXV_PATTERN(Iop_Sar32);
+         }
+         break;
+
+      case 0x02: {  /* SRL */
+         rot = get_rot(cins);
+         if (rot) {
+            DIP("rotr r%d, r%d, %d", rd, rt, sa);
+            putIReg(rd, mkWidenFrom32(ty, genROR32(mkNarrowTo32(ty,
+                        getIReg(rt)), sa), True));
+         } else {
+            DIP("srl r%d, r%d, %d", rd, rt, sa);
+            if (mode64) {
+               IRTemp tmpSh32 = newTemp(Ity_I32);
+               IRTemp tmpRt32 = newTemp(Ity_I32);
+
+               assign(tmpRt32, mkNarrowTo32(ty, getIReg(rt)));
+               assign(tmpSh32, binop(Iop_Shr32, mkexpr(tmpRt32), mkU8(sa)));
+               putIReg(rd, mkWidenFrom32(ty, mkexpr(tmpSh32), True));
+            } else {
+               SXX_PATTERN(Iop_Shr32);
+            }
+         }
+      break;
+      }
+
+      case 0x06: {
+         rot = get_rotv(cins);
+         if (rot) {
+            DIP("rotrv r%d, r%d, r%d", rd, rt, rs);
+            putIReg(rd, mkWidenFrom32(ty, genRORV32(mkNarrowTo32(ty,
+                        getIReg(rt)), mkNarrowTo32(ty, getIReg(rs))), True));
+            break;
+         } else {  /* SRLV */
+            DIP("srlv r%d, r%d, r%d", rd, rt, rs);
+            if (mode64) {
+               SXXV_PATTERN64(Iop_Shr32);
+            } else {
+               SXXV_PATTERN(Iop_Shr32);
+            }
+            break;
+         }
+      }
+
+      case 0x0D:  /* BREAK */
+         DIP("break 0x%x", trap_code);
+         if (mode64)
+            jmp_lit64(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4));
+         else
+            jmp_lit32(&dres, Ijk_SigTRAP, (guest_PC_curr_instr + 4));
+         vassert(dres.whatNext == Dis_StopHere);
+         break;
+
+      case 0x30: {  /* TGE */
+         DIP("tge r%d, r%d %d", rs, rt, trap_code);
+         if (mode64) {
+            if (trap_code == 7)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT64S,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                Ijk_SigFPE_IntDiv,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT64S,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                Ijk_SigFPE_IntOvf,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT64S,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                Ijk_SigTRAP,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         } else {
+            if (trap_code == 7)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT32S,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigFPE_IntDiv,
+                                  IRConst_U32(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+            else if (trap_code == 6)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT32S,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigFPE_IntOvf,
+                                  IRConst_U32(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+            else
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT32S,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigTRAP,
+                                  IRConst_U32(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+         }
+         break;
+      }
+      case 0x31: {  /* TGEU */
+         DIP("tgeu r%d, r%d %d", rs, rt, trap_code);
+         if (mode64) {
+            if (trap_code == 7)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT64U,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigFPE_IntDiv,
+                                  IRConst_U64(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+            else if (trap_code == 6)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT64U,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigFPE_IntOvf,
+                                  IRConst_U64(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+            else
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT64U,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigTRAP,
+                                  IRConst_U64(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+         } else {
+            if (trap_code == 7)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT32U,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigFPE_IntDiv,
+                                  IRConst_U32(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+            else if (trap_code == 6)
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT32U,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigFPE_IntOvf,
+                                  IRConst_U32(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+            else
+               stmt (IRStmt_Exit (unop (Iop_Not1,
+                                        binop (Iop_CmpLT32U,
+                                               getIReg (rs),
+                                               getIReg (rt))),
+                                  Ijk_SigTRAP,
+                                  IRConst_U32(guest_PC_curr_instr + 4),
+                                  OFFB_PC));
+         }
+         break;
+      }
+      case 0x32: {  /* TLT */
+         DIP("tlt r%d, r%d %d", rs, rt, trap_code);
+         if (mode64) {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpLT64S, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigTRAP,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         } else {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpLT32S, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigTRAP,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         }
+         break;
+      }
+      case 0x33: {  /* TLTU */
+         DIP("tltu r%d, r%d %d", rs, rt, trap_code);
+         if (mode64) {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpLT64U, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigTRAP,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         } else {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpLT32U, getIReg(rs),
+                                      getIReg (rt)), Ijk_SigTRAP,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         }
+         break;
+      }
+      case 0x34: {  /* TEQ */
+         DIP("teq r%d, r%d, %d", rs, rt, trap_code);
+         if (mode64) {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpEQ64, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigTRAP,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         } else {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpEQ32, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigTRAP,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         }
+         break;
+      }
+      case 0x36: {  /* TNE */
+         DIP("tne r%d, r%d %d", rs, rt, trap_code);
+         if (mode64) {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpNE64, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigTRAP,
+                                IRConst_U64(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         } else {
+            if (trap_code == 7)
+               stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntDiv,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else if (trap_code == 6)
+               stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigFPE_IntOvf,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+            else
+               stmt(IRStmt_Exit(binop(Iop_CmpNE32, getIReg(rs),
+                                      getIReg(rt)), Ijk_SigTRAP,
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                                OFFB_PC));
+         }
+         break;
+      }
+      case 0x14:
+      case 0x16:
+      case 0x17:  /* DSLLV, DROTRV:DSRLV, DSRAV */
+      case 0x38:
+      case 0x3A:
+      case 0x3B:  /* DSLL, DROTL:DSRL, DSRA  */
+      case 0x3C:
+      case 0x3E:
+      case 0x3F:  /* DSLL32, DROTR32:DSRL32, DSRA32 */
+         if (dis_instr_shrt(cins))
+            break;
+         goto decode_failure;
+
+      case 0x0F:  /* SYNC */
+         DIP("sync 0x%x", sel);
+         /* Just ignore it. */
+         break;
+
+      case 0x2C: {  /* Doubleword Add - DADD; MIPS64 */
+         DIP("dadd r%d, r%d, r%d", rd, rs, rt);
+         IRTemp tmpRs64 = newTemp(Ity_I64);
+         IRTemp tmpRt64 = newTemp(Ity_I64);
+
+         assign(tmpRs64, getIReg(rs));
+         assign(tmpRt64, getIReg(rt));
+
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I64);
+         t2 = newTemp(Ity_I64);
+         t3 = newTemp(Ity_I64);
+         t4 = newTemp(Ity_I64);
+         /* dst = src0 + src1
+            if(sign(src0 ) != sign(src1 ))
+            goto no overflow;
+            if(sign(dst) == sign(src0 ))
+            goto no overflow;
+            we have overflow! */
+
+         assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), mkexpr(tmpRt64)));
+         assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), mkexpr(tmpRt64)));
+         assign(t2, unop(Iop_1Uto64,
+                         binop(Iop_CmpEQ64,
+                               binop(Iop_And64, mkexpr(t1),
+                                     mkU64(0x8000000000000000ULL)),
+                               mkU64(0x8000000000000000ULL))));
+
+         assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
+         assign(t4, unop(Iop_1Uto64,
+                         binop(Iop_CmpNE64,
+                               binop(Iop_And64, mkexpr(t3),
+                                     mkU64(0x8000000000000000ULL)),
+                               mkU64(0x8000000000000000ULL))));
+
+         stmt(IRStmt_Exit(binop(Iop_CmpEQ64,
+                                binop(Iop_Or64, mkexpr(t2), mkexpr(t4)),
+                                mkU64(0)),
+                          Ijk_SigFPE_IntOvf,
+                          IRConst_U64(guest_PC_curr_instr + 4),
+                          OFFB_PC));
+
+         putIReg(rd,  mkexpr(t0));
+         break;
+      }
+
+      case 0x2D:  /* Doubleword Add Unsigned - DADDU; MIPS64 */
+         DIP("daddu r%d, r%d, r%d", rd, rs, rt);
+         ALU_PATTERN(Iop_Add64);
+         break;
+
+      case 0x2E: {  /* Doubleword Subtract - DSUB; MIPS64 */
+         DIP("dsub r%u, r%u, r%u", rd, rs, rt);
+         IRTemp tmpRs64 = newTemp(Ity_I64);
+         IRTemp tmpRt64 = newTemp(Ity_I64);
+
+         assign(tmpRs64, getIReg(rs));
+         assign(tmpRt64, getIReg(rt));
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I64);
+         t2 = newTemp(Ity_I64);
+         t3 = newTemp(Ity_I64);
+         t4 = newTemp(Ity_I64);
+         t5 = newTemp(Ity_I64);
+         /* dst = src0 + (-1 * src1)
+            if(sign(src0 ) != sign((-1 * src1) ))
+            goto no overflow;
+            if(sign(dst) == sign(src0 ))
+            goto no overflow;
+            we have overflow! */
+
+         assign(t5, binop(Iop_Mul64,
+                          mkexpr(tmpRt64),
+                          mkU64(0xffffffffffffffffULL)));
+         assign(t0, binop(Iop_Add64, mkexpr(tmpRs64), mkexpr(t5)));
+         assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64), mkexpr(t5)));
+         assign(t2, unop(Iop_1Sto64,
+                         binop(Iop_CmpEQ64,
+                               binop(Iop_And64,
+                                     mkexpr(t1),
+                                     mkU64(0x8000000000000000ULL)),
+                               mkU64(0x8000000000000000ULL))));
+
+         assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
+         assign(t4, unop(Iop_1Sto64,
+                         binop(Iop_CmpNE64,
+                               binop(Iop_And64,
+                                     mkexpr(t3),
+                                     mkU64(0x8000000000000000ULL)),
+                               mkU64(0x8000000000000000ULL))));
+
+         stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2),
+                                mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf,
+                          IRConst_U64(guest_PC_curr_instr + 4),
+                          OFFB_PC));
+
+         putIReg(rd, binop(Iop_Sub64, getIReg(rs), getIReg(rt)));
+         break;
+      }
+
+      case 0x2F:  /* Doubleword Subtract Unsigned - DSUBU; MIPS64 */
+         DIP("dsub r%u, r%u,r%u", rd, rt, rt);
+         ALU_PATTERN(Iop_Sub64);
+         break;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+   case 0x01:  /* Regimm */
+
+      switch (rt) {
+      case 0x00:  /* BLTZ */
+         DIP("bltz r%d, %d", rs, imm);
+         if (mode64) {
+            if (!dis_instr_branch(cins, &dres, resteerOkFn,
+                        callback_opaque, &bstmt))
+               goto decode_failure;
+         } else
+            dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+                       mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt);
+         break;
+
+      case 0x01:  /* BGEZ */
+         DIP("bgez r%d, %d", rs, imm);
+         if (mode64) {
+            if (!dis_instr_branch(cins, &dres, resteerOkFn,
+                                  callback_opaque, &bstmt))
+               goto decode_failure;
+         } else
+            dis_branch(False, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+                              mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt);
+         break;
+
+      case 0x02:  /* BLTZL */
+         DIP("bltzl r%d, %d", rs, imm);
+         lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+                     binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+                     mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+                     mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+                     imm);
+         break;
+
+      case 0x03:  /* BGEZL */
+         DIP("bgezl r%d, %d", rs, imm);
+         lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+                     binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+                     mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+                     mode64 ? mkU64(0x0) : mkU32(0x0)), imm);
+         break;
+
+      case 0x10:  /* BLTZAL */
+         DIP("bltzal r%d, %d", rs, imm);
+         if (mode64) {
+            if (!dis_instr_branch(cins, &dres, resteerOkFn,
+                        callback_opaque, &bstmt))
+               goto decode_failure;
+         } else
+            dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+                       mkU32(0x80000000)), mkU32(0x80000000)), imm, &bstmt);
+         break;
+
+      case 0x12:  /* BLTZALL */
+         DIP("bltzall r%d, %d", rs, imm);
+         putIReg(31, mode64 ? mkU64(guest_PC_curr_instr + 8) :
+                              mkU32(guest_PC_curr_instr + 8));
+         lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+                     binop(mode64 ? Iop_And64 : Iop_And32, getIReg(rs),
+                     mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+                     mode64 ? mkU64(0x8000000000000000ULL) : mkU32(0x80000000)),
+                     imm);
+         break;
+
+      case 0x11:  /* BGEZAL */
+         DIP("bgezal r%d, %d", rs, imm);
+         if (mode64) {
+            if (!dis_instr_branch(cins, &dres, resteerOkFn,
+                        callback_opaque, &bstmt))
+               goto decode_failure;
+         } else
+            dis_branch(True, binop(Iop_CmpEQ32, binop(Iop_And32, getIReg(rs),
+                       mkU32(0x80000000)), mkU32(0x0)), imm, &bstmt);
+         break;
+
+      case 0x13:  /* BGEZALL */
+         DIP("bgezall r%d, %d", rs, imm);
+         if (mode64) {
+            putIReg(31, mkU64(guest_PC_curr_instr + 8));
+            lastn = dis_branch_likely(binop(Iop_CmpNE64,
+                                            binop(Iop_And64,
+                                                  getIReg(rs),
+                                                  mkU64(0x8000000000000000ULL)),
+                                            mkU64(0x0)),
+                                      imm);
+         } else {
+            putIReg(31, mkU32(guest_PC_curr_instr + 8));
+            lastn = dis_branch_likely(binop(Iop_CmpNE32, binop(Iop_And32,
+                                      getIReg(rs), mkU32(0x80000000)),
+                                      mkU32(0x0)), imm);
+         }
+         break;
+
+      case 0x08:  /* TGEI */
+         DIP("tgei r%d, %d %d", rs, imm, trap_code);
+         if (mode64) {
+            stmt (IRStmt_Exit (unop (Iop_Not1,
+                                     binop (Iop_CmpLT64S,
+                                            getIReg (rs),
+                                            mkU64 (extend_s_16to64 (imm)))),
+                             Ijk_SigTRAP,
+                             IRConst_U64(guest_PC_curr_instr + 4),
+                             OFFB_PC));
+         } else {
+            stmt (IRStmt_Exit (unop (Iop_Not1,
+                                     binop (Iop_CmpLT32S,
+                                     getIReg (rs),
+                                     mkU32 (extend_s_16to32 (imm)))),
+                             Ijk_SigTRAP,
+                             IRConst_U32(guest_PC_curr_instr + 4),
+                             OFFB_PC));
+         }
+         break;
+
+      case 0x09: {  /* TGEIU */
+         DIP("tgeiu r%d, %d %d", rs, imm, trap_code);
+         if (mode64) {
+            stmt (IRStmt_Exit (unop (Iop_Not1,
+                                     binop (Iop_CmpLT64U,
+                                            getIReg (rs),
+                                            mkU64 (extend_s_16to64 (imm)))),
+                             Ijk_SigTRAP,
+                             IRConst_U64(guest_PC_curr_instr + 4),
+                             OFFB_PC));
+         } else {
+            stmt (IRStmt_Exit (unop (Iop_Not1,
+                                     binop (Iop_CmpLT32U,
+                                            getIReg (rs),
+                                            mkU32 (extend_s_16to32 (imm)))),
+                               Ijk_SigTRAP,
+                               IRConst_U32(guest_PC_curr_instr + 4),
+                               OFFB_PC));
+         }
+         break;
+      }
+      case 0x0A: {  /* TLTI */
+         DIP("tlti r%d, %d %d", rs, imm, trap_code);
+         if (mode64) {
+            stmt (IRStmt_Exit (binop (Iop_CmpLT64S, getIReg (rs),
+                                      mkU64 (extend_s_16to64 (imm))),
+                             Ijk_SigTRAP,
+                             IRConst_U64(guest_PC_curr_instr + 4),
+                             OFFB_PC));
+         } else {
+            stmt (IRStmt_Exit (binop (Iop_CmpLT32S, getIReg (rs),
+                                      mkU32 (extend_s_16to32 (imm))),
+                               Ijk_SigTRAP,
+                               IRConst_U32(guest_PC_curr_instr + 4),
+                               OFFB_PC));
+         }
+         break;
+      }
+      case 0x0B: {  /* TLTIU */
+         DIP("tltiu r%d, %d %d", rs, imm, trap_code);
+         if (mode64) {
+            stmt (IRStmt_Exit (binop (Iop_CmpLT64U, getIReg (rs),
+                                      mkU64 (extend_s_16to64 (imm))),
+                             Ijk_SigTRAP,
+                             IRConst_U64(guest_PC_curr_instr + 4),
+                             OFFB_PC));
+         } else {
+            stmt (IRStmt_Exit (binop (Iop_CmpLT32U, getIReg (rs),
+                                      mkU32 (extend_s_16to32 (imm))),
+                               Ijk_SigTRAP,
+                               IRConst_U32(guest_PC_curr_instr + 4),
+                               OFFB_PC));
+         }
+         break;
+      }
+      case 0x0C: {  /* TEQI */
+          DIP("teqi r%d, %d %d", rs, imm, trap_code);
+         if (mode64) {
+            stmt (IRStmt_Exit (binop (Iop_CmpEQ64, getIReg (rs),
+                                      mkU64 (extend_s_16to64 (imm))),
+                               Ijk_SigTRAP,
+                               IRConst_U64(guest_PC_curr_instr + 4),
+                               OFFB_PC));
+         } else {
+            stmt (IRStmt_Exit (binop (Iop_CmpEQ32, getIReg (rs),
+                                      mkU32 (extend_s_16to32 (imm))),
+                               Ijk_SigTRAP,
+                               IRConst_U32(guest_PC_curr_instr + 4),
+                               OFFB_PC));
+         }
+         break;
+      }
+      case 0x0E: {  /* TNEI */
+         DIP("tnei r%d, %d %d", rs, imm, trap_code);
+         if (mode64) {
+            stmt (IRStmt_Exit (binop (Iop_CmpNE64, getIReg (rs),
+                                      mkU64 (extend_s_16to64 (imm))),
+                               Ijk_SigTRAP,
+                               IRConst_U64(guest_PC_curr_instr + 4),
+                               OFFB_PC));
+         } else {
+            stmt (IRStmt_Exit (binop (Iop_CmpNE32, getIReg (rs),
+                                      mkU32 (extend_s_16to32 (imm))),
+                               Ijk_SigTRAP,
+                               IRConst_U32(guest_PC_curr_instr + 4),
+                               OFFB_PC));
+         }
+         break;
+      }
+      case 0x1C: {  /* BPOSGE32 */
+         DIP("bposge32 %d", imm);
+         vassert(!mode64);
+         t0 = newTemp(Ity_I32);
+         /* Get pos field from DSPControl register. */
+         assign(t0, binop(Iop_And32, getDSPControl(), mkU32(0x3f)));
+         dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLT32U, mkexpr(t0),
+                                mkU32(32))), imm, &bstmt);
+      }
+      case 0x1F:
+         /* SYNCI */
+         /* Just ignore it */
+         break;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+   case 0x04:
+      DIP("beq r%d, r%d, %d", rs, rt, imm);
+      if (mode64)
+         dis_branch(False, binop(Iop_CmpEQ64, getIReg(rs), getIReg(rt)),
+                                 imm, &bstmt);
+      else
+         dis_branch(False, binop(Iop_CmpEQ32, getIReg(rs), getIReg(rt)),
+                                 imm, &bstmt);
+      break;
+
+   case 0x14:
+      DIP("beql r%d, r%d, %d", rs, rt, imm);
+      lastn = dis_branch_likely(binop(mode64 ? Iop_CmpNE64 : Iop_CmpNE32,
+                                getIReg(rs), getIReg(rt)), imm);
+      break;
+
+   case 0x05:
+      DIP("bne r%d, r%d, %d", rs, rt, imm);
+      if (mode64)
+         dis_branch(False, binop(Iop_CmpNE64, getIReg(rs), getIReg(rt)),
+                                 imm, &bstmt);
+      else
+         dis_branch(False, binop(Iop_CmpNE32, getIReg(rs), getIReg(rt)),
+                                 imm, &bstmt);
+      break;
+
+   case 0x15:
+      DIP("bnel r%d, r%d, %d", rs, rt, imm);
+      lastn = dis_branch_likely(binop(mode64 ? Iop_CmpEQ64 : Iop_CmpEQ32,
+                                      getIReg(rs), getIReg(rt)), imm);
+      break;
+
+   case 0x07:  /* BGTZ */
+      DIP("bgtz r%d, %d", rs, imm);
+      if (mode64)
+         dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE64S, getIReg(rs),
+                                mkU64(0x00))), imm, &bstmt);
+      else
+         dis_branch(False, unop(Iop_Not1, binop(Iop_CmpLE32S, getIReg(rs),
+                                mkU32(0x00))), imm, &bstmt);
+      break;
+
+   case 0x17:  /* BGTZL */
+      DIP("bgtzl r%d, %d", rs, imm);
+      if (mode64)
+         lastn = dis_branch_likely(binop(Iop_CmpLE64S, getIReg(rs),
+                                         mkU64(0x00)), imm);
+      else
+         lastn = dis_branch_likely(binop(Iop_CmpLE32S, getIReg(rs),
+                                         mkU32(0x00)), imm);
+      break;
+
+   case 0x06:  /* BLEZ */
+      DIP("blez r%d, %d", rs, imm);
+      if (mode64)
+         dis_branch(False, binop(Iop_CmpLE64S, getIReg(rs), mkU64(0x0)),
+                                imm, &bstmt);
+      else
+         dis_branch(False,binop(Iop_CmpLE32S, getIReg(rs), mkU32(0x0)), imm,
+                                &bstmt);
+      break;
+
+   case 0x16:  /* BLEZL */
+      DIP("blezl r%d, %d", rs, imm);
+      lastn = dis_branch_likely(unop(Iop_Not1, (binop(mode64 ? Iop_CmpLE64S :
+                                     Iop_CmpLE32S, getIReg(rs), mode64 ?
+                                     mkU64(0x0) : mkU32(0x0)))), imm);
+      break;
+
+   case 0x08: {  /* ADDI */
+      DIP("addi r%d, r%d, %d", rt, rs, imm);
+      IRTemp tmpRs32 = newTemp(Ity_I32);
+      assign(tmpRs32, mkNarrowTo32(ty, getIReg(rs)));
+
+      t0 = newTemp(Ity_I32);
+      t1 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I32);
+      t3 = newTemp(Ity_I32);
+      t4 = newTemp(Ity_I32);
+      /* dst = src0 + sign(imm)
+         if(sign(src0 ) != sign(imm ))
+         goto no overflow;
+         if(sign(dst) == sign(src0 ))
+         goto no overflow;
+         we have overflow! */
+
+      assign(t0, binop(Iop_Add32, mkexpr(tmpRs32),
+                       mkU32(extend_s_16to32(imm))));
+      assign(t1, binop(Iop_Xor32, mkexpr(tmpRs32),
+                       mkU32(extend_s_16to32(imm))));
+      assign(t2, unop(Iop_1Sto32, binop(Iop_CmpEQ32, binop(Iop_And32,
+                      mkexpr(t1), mkU32(0x80000000)), mkU32(0x80000000))));
+
+      assign(t3, binop(Iop_Xor32, mkexpr(t0), mkexpr(tmpRs32)));
+      assign(t4, unop(Iop_1Sto32, binop(Iop_CmpNE32, binop(Iop_And32,
+                      mkexpr(t3), mkU32(0x80000000)), mkU32(0x80000000))));
+
+      stmt(IRStmt_Exit(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(t2),
+                             mkexpr(t4)), mkU32(0)), Ijk_SigFPE_IntOvf,
+                       mode64 ? IRConst_U64(guest_PC_curr_instr + 4) :
+                                IRConst_U32(guest_PC_curr_instr + 4),
+                       OFFB_PC));
+
+      putIReg(rt,  mkWidenFrom32(ty, mkexpr(t0), True));
+      break;
+   }
+   case 0x09:  /* ADDIU */
+      DIP("addiu r%d, r%d, %d", rt, rs, imm);
+      if (mode64) {
+         putIReg(rt, mkWidenFrom32(ty, binop(Iop_Add32,
+                     mkNarrowTo32(ty, getIReg(rs)),mkU32(extend_s_16to32(imm))),
+                     True));
+      } else
+         putIReg(rt, binop(Iop_Add32, getIReg(rs),mkU32(extend_s_16to32(imm))));
+      break;
+
+   case 0x0C:  /* ANDI */
+      DIP("andi r%d, r%d, %d", rt, rs, imm);
+      if (mode64) {
+         ALUI_PATTERN64(Iop_And64);
+      } else {
+         ALUI_PATTERN(Iop_And32);
+      }
+      break;
+
+   case 0x0E:  /* XORI */
+      DIP("xori r%d, r%d, %d", rt, rs, imm);
+      if (mode64) {
+         ALUI_PATTERN64(Iop_Xor64);
+      } else {
+         ALUI_PATTERN(Iop_Xor32);
+      }
+      break;
+
+   case 0x0D:  /* ORI */
+      DIP("ori r%d, r%d, %d", rt, rs, imm);
+      if (mode64) {
+         ALUI_PATTERN64(Iop_Or64);
+      } else {
+         ALUI_PATTERN(Iop_Or32);
+      }
+      break;
+
+   case 0x0A:  /* SLTI */
+      DIP("slti r%d, r%d, %d", rt, rs, imm);
+      if (mode64)
+         putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64S, getIReg(rs),
+                                            mkU64(extend_s_16to64(imm)))));
+      else
+         putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32S, getIReg(rs),
+                                            mkU32(extend_s_16to32(imm)))));
+      break;
+
+   case 0x0B:  /* SLTIU */
+      DIP("sltiu r%d, r%d, %d", rt, rs, imm);
+      if (mode64)
+         putIReg(rt, unop(Iop_1Uto64, binop(Iop_CmpLT64U, getIReg(rs),
+                                            mkU64(extend_s_16to64(imm)))));
+      else
+         putIReg(rt, unop(Iop_1Uto32, binop(Iop_CmpLT32U, getIReg(rs),
+                                            mkU32(extend_s_16to32(imm)))));
+      break;
+
+   case 0x18: {  /* Doubleword Add Immidiate - DADD; MIPS64 */
+      DIP("daddi r%d, r%d, %d", rt, rs, imm);
+      IRTemp tmpRs64 = newTemp(Ity_I64);
+      assign(tmpRs64, getIReg(rs));
+
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+      t4 = newTemp(Ity_I64);
+      /* dst = src0 + sign(imm)
+         if(sign(src0 ) != sign(imm ))
+         goto no overflow;
+         if(sign(dst) == sign(src0 ))
+         goto no overflow;
+         we have overflow! */
+
+      assign(t0, binop(Iop_Add64, mkexpr(tmpRs64),
+                       mkU64(extend_s_16to64(imm))));
+      assign(t1, binop(Iop_Xor64, mkexpr(tmpRs64),
+                       mkU64(extend_s_16to64(imm))));
+      assign(t2, unop(Iop_1Sto64, binop(Iop_CmpEQ64, binop(Iop_And64,
+                      mkexpr(t1), mkU64(0x8000000000000000ULL)),
+                                        mkU64(0x8000000000000000ULL))));
+
+      assign(t3, binop(Iop_Xor64, mkexpr(t0), mkexpr(tmpRs64)));
+      assign(t4, unop(Iop_1Sto64, binop(Iop_CmpNE64, binop(Iop_And64,
+                      mkexpr(t3), mkU64(0x8000000000000000ULL)),
+                                        mkU64(0x8000000000000000ULL))));
+
+      stmt(IRStmt_Exit(binop(Iop_CmpEQ64, binop(Iop_Or64, mkexpr(t2),
+                             mkexpr(t4)), mkU64(0)), Ijk_SigFPE_IntOvf,
+                       IRConst_U64(guest_PC_curr_instr + 4),
+                       OFFB_PC));
+
+      putIReg(rt,  mkexpr(t0));
+      break;
+   }
+
+   case 0x19:  /* Doubleword Add Immidiate Unsigned - DADDIU; MIPS64 */
+      DIP("daddiu r%d, r%d, %d", rt, rs, imm);
+      putIReg(rt, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+      break;
+
+   case 0x1A: {
+      /* Load Doubleword Left - LDL; MIPS64 */
+      vassert(mode64);
+      DIP("ldl r%u, %d(r%u)", rt, imm, rs);
+      /* t1 = addr */
+#if defined (_MIPSEL)
+      t1 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+      t1 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs),
+                                  mkU64(extend_s_16to64(imm)))));
+#endif
+      /* t2 = word addr */
+      /* t4 = addr mod 8 */
+      LWX_SWX_PATTERN64_1;
+
+      /* t3 = word content - shifted */
+      t3 = newTemp(Ity_I64);
+      assign(t3, binop(Iop_Shl64, load(Ity_I64, mkexpr(t2)),
+                 narrowTo(Ity_I8, binop(Iop_Shl64, binop(Iop_Sub64, mkU64(0x07),
+                 mkexpr(t4)), mkU8(3)))));
+
+      /* rt content  - adjusted */
+      t5 = newTemp(Ity_I64);
+      t6 = newTemp(Ity_I64);
+      t7 = newTemp(Ity_I64);
+
+      assign(t5, binop(Iop_Mul64, mkexpr(t4), mkU64(0x8)));
+
+      assign(t6, binop(Iop_Shr64, mkU64(0x00FFFFFFFFFFFFFFULL),
+                       narrowTo(Ity_I8, mkexpr(t5))));
+
+      assign(t7, binop(Iop_And64, getIReg(rt), mkexpr(t6)));
+
+      putIReg(rt, binop(Iop_Or64, mkexpr(t7), mkexpr(t3)));
+      break;
+   }
+
+   case 0x1B: {
+      /* Load Doubleword Right - LDR; MIPS64 */
+      vassert(mode64);
+      DIP("ldr r%u,%d(r%u)", rt, imm, rs);
+      /* t1 = addr */
+#if defined (_MIPSEL)
+      t1 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Add64, getIReg(rs), mkU64(extend_s_16to64(imm))));
+#elif defined (_MIPSEB)
+      t1 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Xor64, mkU64(0x7), binop(Iop_Add64, getIReg(rs),
+                                  mkU64(extend_s_16to64(imm)))));
+#endif
+      /* t2 = word addr */
+      /* t4 = addr mod 8 */
+      LWX_SWX_PATTERN64_1;
+
+      /* t3 = word content - shifted */
+      t3 = newTemp(Ity_I64);
+      assign(t3, binop(Iop_Shr64, load(Ity_I64, mkexpr(t2)),
+                 narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(3)))));
+
+      /* rt content  - adjusted */
+      t5 = newTemp(Ity_I64);
+      assign(t5, binop(Iop_And64, getIReg(rt), unop(Iop_Not64,
+                 binop(Iop_Shr64, mkU64(0xFFFFFFFFFFFFFFFFULL),
+                 narrowTo(Ity_I8, binop(Iop_Shl64, mkexpr(t4), mkU8(0x3)))))));
+
+      putIReg(rt, binop(Iop_Or64, mkexpr(t5), mkexpr(t3)));
+      break;
+   }
+
+   case 0x27:  /* Load Word unsigned - LWU; MIPS64 */
+      DIP("lwu r%u,%d(r%u)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+
+      putIReg(rt, mkWidenFrom32(ty, load(Ity_I32, mkexpr(t1)), False));
+      break;
+
+   case 0x30:  /* LL / LWC0 */
+      DIP("ll r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+
+      t2 = newTemp(Ity_I32);
+#if defined (_MIPSEL)
+      stmt(IRStmt_LLSC(Iend_LE, t2, mkexpr(t1), NULL /* this is a load */ ));
+#elif defined (_MIPSEB)
+      stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), NULL /* this is a load */ ));
+#endif
+      if (mode64)
+         putIReg(rt, unop(Iop_32Sto64, mkexpr(t2)));
+      else
+         putIReg(rt, mkexpr(t2));
+      break;
+
+   case 0x34:  /* Load Linked Doubleword - LLD; MIPS64 */
+      DIP("lld r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+
+      t2 = newTemp(Ity_I64);
+#if defined (_MIPSEL)
+      stmt(IRStmt_LLSC
+           (Iend_LE, t2, mkexpr(t1), NULL /* this is a load */ ));
+#elif defined (_MIPSEB)
+      stmt(IRStmt_LLSC
+           (Iend_BE, t2, mkexpr(t1), NULL /* this is a load */ ));
+#endif
+
+      putIReg(rt, mkexpr(t2));
+      break;
+
+   case 0x38:  /* SC / SWC0 */
+      DIP("sc r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+
+      t2 = newTemp(Ity_I1);
+#if defined (_MIPSEL)
+      stmt(IRStmt_LLSC(Iend_LE, t2, mkexpr(t1), mkNarrowTo32(ty, getIReg(rt))));
+#elif defined (_MIPSEB)
+      stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), mkNarrowTo32(ty, getIReg(rt))));
+#endif
+
+      putIReg(rt, unop(mode64 ? Iop_1Uto64 : Iop_1Uto32, mkexpr(t2)));
+      break;
+
+   case 0x3C:  /* Store Conditional Doubleword - SCD; MIPS64 */
+      DIP("sdc r%d, %d(r%d)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+
+      t2 = newTemp(Ity_I1);
+#if defined (_MIPSEL)
+      stmt(IRStmt_LLSC(Iend_LE, t2, mkexpr(t1), getIReg(rt)));
+#elif defined (_MIPSEB)
+      stmt(IRStmt_LLSC(Iend_BE, t2, mkexpr(t1), getIReg(rt)));
+#endif
+
+      putIReg(rt, unop(Iop_1Uto64, mkexpr(t2)));
+      break;
+
+   case 0x37:  /* Load Doubleword - LD; MIPS64 */
+      DIP("ld r%u, %d(r%u)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      putIReg(rt, load(Ity_I64, mkexpr(t1)));
+      break;
+
+   case 0x3F:  /* Store Doubleword - SD; MIPS64 */
+      DIP("sd r%u, %d(r%u)", rt, imm, rs);
+      LOAD_STORE_PATTERN;
+      store(mkexpr(t1), getIReg(rt));
+      break;
+
+   case 0x32:  /* Branch on Bit Clear - BBIT0; Cavium OCTEON */
+      /* Cavium Specific instructions. */
+      if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
+         DIP("bbit0 r%d, 0x%x, %x", rs, rt, imm);
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         assign(t0, mkU32(0x1));
+         assign(t1, binop(Iop_Shl32, mkexpr(t0), mkU8(rt)));
+         dis_branch(False, binop(Iop_CmpEQ32,
+                                 binop(Iop_And32,
+                                       mkexpr(t1),
+                                       mkNarrowTo32(ty, getIReg(rs))),
+                                 mkU32(0x0)),
+                    imm, &bstmt);
+         break;
+      } else {
+         goto decode_failure;
+      }
+
+   case 0x36:  /* Branch on Bit Clear Plus 32 - BBIT032; Cavium OCTEON */
+      /* Cavium Specific instructions. */
+      if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
+         DIP("bbit032 r%d, 0x%x, %x", rs, rt, imm);
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I8);  /* Shift. */
+         t2 = newTemp(Ity_I64);
+         assign(t0, mkU64(0x1));
+         assign(t1, binop(Iop_Add8, mkU8(rt), mkU8(32)));
+         assign(t2, binop(Iop_Shl64, mkexpr(t0), mkexpr(t1)));
+         dis_branch(False, binop(Iop_CmpEQ64,
+                                 binop(Iop_And64,
+                                       mkexpr(t2),
+                                       getIReg(rs)),
+                                 mkU64(0x0)),
+                    imm, &bstmt);
+         break;
+      } else {
+         goto decode_failure;
+      }
+
+   case 0x3A:  /* Branch on Bit Set - BBIT1; Cavium OCTEON */
+      /* Cavium Specific instructions. */
+      if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
+         DIP("bbit1 r%d, 0x%x, %x", rs, rt, imm);
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         assign(t0, mkU32(0x1));
+         assign(t1, binop(Iop_Shl32, mkexpr(t0), mkU8(rt)));
+         dis_branch(False, binop(Iop_CmpNE32,
+                                 binop(Iop_And32,
+                                       mkexpr(t1),
+                                       mkNarrowTo32(ty, getIReg(rs))),
+                                 mkU32(0x0)),
+                    imm, &bstmt);
+         break;
+      } else {
+         goto decode_failure;
+      }
+
+   case 0x3E:  /* Branch on Bit Set Plus 32 - BBIT132; Cavium OCTEON */
+      /* Cavium Specific instructions. */
+      if (VEX_MIPS_COMP_ID(archinfo->hwcaps) == VEX_PRID_COMP_CAVIUM) {
+         DIP("bbit132 r%d, 0x%x, %x", rs, rt, imm);
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I8);  /* Shift. */
+         t2 = newTemp(Ity_I64);
+         assign(t0, mkU64(0x1));
+         assign(t1, binop(Iop_Add8, mkU8(rt), mkU8(32)));
+         assign(t2, binop(Iop_Shl64, mkexpr(t0), mkexpr(t1)));
+         dis_branch(False, binop(Iop_CmpNE64,
+                                 binop(Iop_And64,
+                                       mkexpr(t2),
+                                       getIReg(rs)),
+                                 mkU64(0x0)),
+                    imm, &bstmt);
+         break;
+      } else {
+         goto decode_failure;
+      }
+
+   default:
+      goto decode_failure;
+
+   decode_failure_dsp:
+      vex_printf("Error occured while trying to decode MIPS32 DSP "
+                 "instruction.\nYour platform probably doesn't support "
+                 "MIPS32 DSP ASE.\n");
+   decode_failure:
+      /* All decode failures end up here. */
+      if (sigill_diag)
+         vex_printf("vex mips->IR: unhandled instruction bytes: "
+                    "0x%x 0x%x 0x%x 0x%x\n",
+                    (Int) getIByte(delta_start + 0),
+                    (Int) getIByte(delta_start + 1),
+                    (Int) getIByte(delta_start + 2),
+                    (Int) getIByte(delta_start + 3));
+
+      /* Tell the dispatcher that this insn cannot be decoded, and so has
+         not been executed, and (is currently) the next to be executed.
+         EIP should be up-to-date since it made so at the start bnezof each
+         insn, but nevertheless be paranoid and update it again right
+         now. */
+      if (mode64) {
+         stmt(IRStmt_Put(offsetof(VexGuestMIPS64State, guest_PC),
+              mkU64(guest_PC_curr_instr)));
+         jmp_lit64(&dres, Ijk_NoDecode, guest_PC_curr_instr);
+      } else {
+         stmt(IRStmt_Put(offsetof(VexGuestMIPS32State, guest_PC),
+              mkU32(guest_PC_curr_instr)));
+         jmp_lit32(&dres, Ijk_NoDecode, guest_PC_curr_instr);
+      }
+      dres.whatNext = Dis_StopHere;
+      dres.len = 0;
+      return dres;
+   }  /* switch (opc) for the main (primary) opcode switch. */
+
+   /* All MIPS insn have 4 bytes */
+
+   if (delay_slot_branch) {
+      delay_slot_branch = False;
+      stmt(bstmt);
+      bstmt = NULL;
+      if (mode64)
+         putPC(mkU64(guest_PC_curr_instr + 4));
+      else
+         putPC(mkU32(guest_PC_curr_instr + 4));
+      dres.jk_StopHere = is_Branch_or_Jump_and_Link(guest_code + delta - 4) ?
+                         Ijk_Call : Ijk_Boring;
+   }
+
+   if (likely_delay_slot) {
+      dres.jk_StopHere = Ijk_Boring;
+      dres.whatNext = Dis_StopHere;
+      putPC(lastn);
+      lastn = NULL;
+   }
+   if (delay_slot_jump) {
+      putPC(lastn);
+      lastn = NULL;
+      dres.jk_StopHere = is_Branch_or_Jump_and_Link(guest_code + delta - 4) ?
+                         Ijk_Call : Ijk_Boring;
+   }
+
+ decode_success:
+   /* All decode successes end up here. */
+   switch (dres.whatNext) {
+      case Dis_Continue:
+         if (mode64)
+            putPC(mkU64(guest_PC_curr_instr + 4));
+         else
+            putPC(mkU32(guest_PC_curr_instr + 4));
+         break;
+      case Dis_ResteerU:
+      case Dis_ResteerC:
+         putPC(mkU32(dres.continueAt));
+         break;
+      case Dis_StopHere:
+         break;
+      default:
+         vassert(0);
+         break;
+   }
+
+   /* On MIPS we need to check if the last instruction in block is branch or
+      jump. */
+   if (((vex_control.guest_max_insns - 1) == (delta + 4) / 4)
+       &&  (dres.whatNext != Dis_StopHere))
+      if (branch_or_jump(guest_code + delta + 4)) {
+         dres.whatNext = Dis_StopHere;
+         dres.jk_StopHere = Ijk_Boring;
+         if (mode64)
+            putPC(mkU64(guest_PC_curr_instr + 4));
+         else
+            putPC(mkU32(guest_PC_curr_instr + 4));
+      }
+   dres.len = 4;
+
+   DIP("\n");
+
+   return dres;
+
+}
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+DisResult disInstr_MIPS( IRSB*        irsb_IN,
+                         Bool         (*resteerOkFn) ( void *, Addr ),
+                         Bool         resteerCisOk,
+                         void*        callback_opaque,
+                         const UChar* guest_code_IN,
+                         Long         delta,
+                         Addr         guest_IP,
+                         VexArch      guest_arch,
+                         const VexArchInfo* archinfo,
+                         const VexAbiInfo*  abiinfo,
+                         VexEndness   host_endness_IN,
+                         Bool         sigill_diag_IN )
+{
+   DisResult dres;
+   /* Set globals (see top of this file) */
+   vassert(guest_arch == VexArchMIPS32 || guest_arch == VexArchMIPS64);
+
+   mode64 = guest_arch != VexArchMIPS32;
+#if (__mips_fpr==64)
+   fp_mode64 = ((VEX_MIPS_REV(archinfo->hwcaps) == VEX_PRID_CPU_32FPR)
+                || guest_arch == VexArchMIPS64);
+#endif
+
+   guest_code = guest_code_IN;
+   irsb = irsb_IN;
+   host_endness = host_endness_IN;
+#if defined(VGP_mips32_linux)
+   guest_PC_curr_instr = (Addr32)guest_IP;
+#elif defined(VGP_mips64_linux)
+   guest_PC_curr_instr = (Addr64)guest_IP;
+#endif
+
+   dres = disInstr_MIPS_WRK(resteerOkFn, resteerCisOk, callback_opaque,
+                            delta, archinfo, abiinfo, sigill_diag_IN);
+
+   return dres;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end                                        guest_mips_toIR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_ppc_defs.h b/VEX/priv/guest_ppc_defs.h
new file mode 100644
index 0000000..8ef1706
--- /dev/null
+++ b/VEX/priv/guest_ppc_defs.h
@@ -0,0 +1,173 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  guest_ppc_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Only to be used within the guest-ppc directory. */
+
+
+#ifndef __VEX_GUEST_PPC_DEFS_H
+#define __VEX_GUEST_PPC_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex_guest_ppc32.h"         // VexGuestPPC32State
+#include "libvex_guest_ppc64.h"         // VexGuestPPC64State
+#include "guest_generic_bb_to_IR.h"     // DisResult
+
+/*---------------------------------------------------------*/
+/*--- ppc to IR conversion                              ---*/
+/*---------------------------------------------------------*/
+
+/* Convert one ppc insn to IR.  See the type DisOneInstrFn in
+   bb_to_IR.h. */
+extern
+DisResult disInstr_PPC ( IRSB*        irbb,
+                         Bool         (*resteerOkFn) ( void*, Addr ),
+                         Bool         resteerCisOk,
+                         void*        callback_opaque,
+                         const UChar* guest_code,
+                         Long         delta,
+                         Addr         guest_IP,
+                         VexArch      guest_arch,
+                         const VexArchInfo* archinfo,
+                         const VexAbiInfo*  abiinfo,
+                         VexEndness   host_endness,
+                         Bool         sigill_diag );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern
+IRExpr* guest_ppc32_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts );
+
+extern
+IRExpr* guest_ppc64_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts );
+
+/* Describes to the optimser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern 
+Bool guest_ppc32_state_requires_precise_mem_exns ( Int, Int,
+                                                   VexRegisterUpdates );
+
+extern 
+Bool guest_ppc64_state_requires_precise_mem_exns ( Int, Int,
+                                                   VexRegisterUpdates );
+
+extern
+VexGuestLayout ppc32Guest_layout;
+
+extern
+VexGuestLayout ppc64Guest_layout;
+
+
+/* FP Rounding mode - different encoding to IR */
+typedef
+   enum {
+      PPCrm_NEAREST = 0,
+      PPCrm_NegINF  = 1,
+      PPCrm_PosINF  = 2,
+      PPCrm_ZERO    = 3
+   } PPCRoundingMode;
+
+/* Floating point comparison values - different encoding to IR */
+typedef
+   enum {
+      PPCcr_LT = 0x8,
+      PPCcr_GT = 0x4,
+      PPCcr_EQ = 0x2,
+      PPCcr_UN = 0x1
+   }
+   PPCCmpF64Result;
+
+/*
+  Enumeration for xer_ca/ov calculation helper functions
+*/
+enum {
+   /* 0  */ PPCG_FLAG_OP_ADD=0,   // addc[o], addic
+   /* 1  */ PPCG_FLAG_OP_ADDE,    // adde[o], addme[o], addze[o]
+   /* 2  */ PPCG_FLAG_OP_DIVW,    // divwo
+   /* 3  */ PPCG_FLAG_OP_DIVWU,   // divwuo
+   /* 4  */ PPCG_FLAG_OP_MULLW,   // mullwo
+   /* 5  */ PPCG_FLAG_OP_NEG,     // nego
+   /* 6  */ PPCG_FLAG_OP_SUBF,    // subfo
+   /* 7  */ PPCG_FLAG_OP_SUBFC,   // subfc[o]
+   /* 8  */ PPCG_FLAG_OP_SUBFE,   // subfe[o], subfme[o], subfze[o]
+   /* 9  */ PPCG_FLAG_OP_SUBFI,   // subfic
+   /* 10 */ PPCG_FLAG_OP_SRAW,    // sraw
+   /* 11 */ PPCG_FLAG_OP_SRAWI,   // srawi
+   /* 12 */ PPCG_FLAG_OP_SRAD,    // srad
+   /* 13 */ PPCG_FLAG_OP_SRADI,   // sradi
+   /* 14 */ PPCG_FLAG_OP_DIVDE,   // divdeo
+   /* 15 */ PPCG_FLAG_OP_DIVWEU,  // divweuo
+   /* 16 */ PPCG_FLAG_OP_DIVWE,   // divweo
+   /* 17 */ PPCG_FLAG_OP_DIVDEU,  // divdeuo
+   /* 18 */ PPCG_FLAG_OP_MULLD,   // mulldo
+   PPCG_FLAG_OP_NUMBER
+};
+
+
+/*---------------------------------------------------------*/
+/*--- ppc guest helpers                                 ---*/
+/*---------------------------------------------------------*/
+
+/* --- CLEAN HELPERS --- */
+
+/* none, right now */
+
+/* --- DIRTY HELPERS --- */
+
+extern ULong ppcg_dirtyhelper_MFTB ( void );
+
+extern UInt ppc32g_dirtyhelper_MFSPR_268_269 ( UInt );
+
+extern UInt ppc32g_dirtyhelper_MFSPR_287 ( void );
+
+extern void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst,
+                                     UInt vD_idx, UInt sh,
+                                     UInt shift_right );
+
+extern void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst,
+                                     UInt vD_idx, UInt sh,
+                                     UInt shift_right,
+                                     UInt endness );
+
+#endif /* ndef __VEX_GUEST_PPC_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                    guest_ppc_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_ppc_helpers.c b/VEX/priv/guest_ppc_helpers.c
new file mode 100644
index 0000000..dc36818
--- /dev/null
+++ b/VEX/priv/guest_ppc_helpers.c
@@ -0,0 +1,879 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               guest_ppc_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_ppc32.h"
+#include "libvex_guest_ppc64.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_ppc_defs.h"
+
+
+/* This file contains helper functions for ppc32 and ppc64 guest code.
+   Calls to these functions are generated by the back end.  These
+   calls are of course in the host machine code and this file will be
+   compiled to host machine code, so that all makes sense.
+
+   Only change the signatures of these helper functions very
+   carefully.  If you change the signature here, you'll have to change
+   the parameters passed to it in the IR calls constructed by
+   guest-ppc/toIR.c.
+*/
+
+
+/*---------------------------------------------------------------*/
+/*--- Misc integer helpers.                                   ---*/
+/*---------------------------------------------------------------*/
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-ppc platforms, return 1. */
+/* Reads a complete, consistent 64-bit TB value. */
+ULong ppcg_dirtyhelper_MFTB ( void )
+{
+#  if defined(__powerpc__)
+   ULong res;
+   UInt  lo, hi1, hi2;
+   while (1) {
+      __asm__ __volatile__ ("\n"
+         "\tmftbu %0\n"
+         "\tmftb %1\n"
+         "\tmftbu %2\n"
+         : "=r" (hi1), "=r" (lo), "=r" (hi2)
+      );
+      if (hi1 == hi2) break;
+   }
+   res = ((ULong)hi1) << 32;
+   res |= (ULong)lo;
+   return res;
+#  else
+   return 1ULL;
+#  endif
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially transparent) */
+UInt ppc32g_dirtyhelper_MFSPR_268_269 ( UInt r269 )
+{
+#  if defined(__powerpc__)
+   UInt spr;
+   if (r269) {
+      __asm__ __volatile__("mfspr %0,269" : "=b"(spr));
+   } else {
+      __asm__ __volatile__("mfspr %0,268" : "=b"(spr));
+   }
+   return spr;
+#  else
+   return 0;
+#  endif
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (I'm not really sure what the side effects are) */
+UInt ppc32g_dirtyhelper_MFSPR_287 ( void )
+{
+#  if defined(__powerpc__)
+   UInt spr;
+   __asm__ __volatile__("mfspr %0,287" : "=b"(spr));
+   return spr;
+#  else
+   return 0;
+#  endif
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+void ppc32g_dirtyhelper_LVS ( VexGuestPPC32State* gst,
+                              UInt vD_off, UInt sh, UInt shift_right )
+{
+  static
+  UChar ref[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                    0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+                    0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                    0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F };
+  U128* pU128_src;
+  U128* pU128_dst;
+
+  vassert( vD_off       <= sizeof(VexGuestPPC32State)-8 );
+  vassert( sh           <= 15 );
+  vassert( shift_right  <=  1 );
+  if (shift_right)
+     sh = 16-sh;
+  /* else shift left  */
+
+  pU128_src = (U128*)&ref[sh];
+  pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
+
+  (*pU128_dst)[0] = (*pU128_src)[0];
+  (*pU128_dst)[1] = (*pU128_src)[1];
+  (*pU128_dst)[2] = (*pU128_src)[2];
+  (*pU128_dst)[3] = (*pU128_src)[3];
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+void ppc64g_dirtyhelper_LVS ( VexGuestPPC64State* gst,
+                              UInt vD_off, UInt sh, UInt shift_right,
+                              UInt endness )
+{
+  UChar ref[32];
+  ULong i;
+  Int k;
+  /* ref[] used to be a static const array, but this doesn't work on
+     ppc64 because VEX doesn't load the TOC pointer for the call here,
+     and so we wind up picking up some totally random other data.
+     (It's a wonder we don't segfault.)  So, just to be clear, this
+     "fix" (vex r2073) is really a kludgearound for the fact that
+     VEX's 64-bit ppc code generation doesn't provide a valid TOC
+     pointer for helper function calls.  Ick.  (Bug 250038) */
+  for (i = 0; i < 32; i++) ref[i] = i;
+
+  U128* pU128_src;
+  U128* pU128_dst;
+
+  vassert( vD_off       <= sizeof(VexGuestPPC64State)-8 );
+  vassert( sh           <= 15 );
+  vassert( shift_right  <=  1 );
+  if (shift_right)
+     sh = 16-sh;
+  /* else shift left  */
+
+  pU128_src = (U128*)&ref[sh];
+  pU128_dst = (U128*)( ((UChar*)gst) + vD_off );
+
+  if ((0x1 & endness) == 0x0) {
+     /* Little endian */
+     unsigned char *srcp, *dstp;
+     srcp = (unsigned char *)pU128_src;
+     dstp = (unsigned char *)pU128_dst;
+     for (k = 15; k >= 0; k--, srcp++)
+        dstp[k] = *srcp;
+  } else {
+     (*pU128_dst)[0] = (*pU128_src)[0];
+     (*pU128_dst)[1] = (*pU128_src)[1];
+     (*pU128_dst)[2] = (*pU128_src)[2];
+     (*pU128_dst)[3] = (*pU128_src)[3];
+  }
+}
+
+
+/* Helper-function specialiser. */
+
+IRExpr* guest_ppc32_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts )
+{
+   return NULL;
+}
+
+IRExpr* guest_ppc64_spechelper ( const HChar* function_name,
+                                 IRExpr** args,
+                                 IRStmt** precedingStmts,
+                                 Int      n_precedingStmts )
+{
+   return NULL;
+}
+
+
+/*----------------------------------------------*/
+/*--- The exported fns ..                    ---*/
+/*----------------------------------------------*/
+
+/* VISIBLE TO LIBVEX CLIENT */
+UInt LibVEX_GuestPPC32_get_CR ( /*IN*/const VexGuestPPC32State* vex_state )
+{
+#  define FIELD(_n)                                    \
+      ( ( (UInt)                                       \
+           ( (vex_state->guest_CR##_n##_321 & (7<<1))  \
+             | (vex_state->guest_CR##_n##_0 & 1)       \
+           )                                           \
+        )                                              \
+        << (4 * (7-(_n)))                              \
+      )
+
+   return 
+      FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
+      | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
+
+#  undef FIELD
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Note: %CR is 32 bits even for ppc64 */
+UInt LibVEX_GuestPPC64_get_CR ( /*IN*/const VexGuestPPC64State* vex_state )
+{
+#  define FIELD(_n)                                    \
+      ( ( (UInt)                                       \
+           ( (vex_state->guest_CR##_n##_321 & (7<<1))  \
+             | (vex_state->guest_CR##_n##_0 & 1)       \
+           )                                           \
+        )                                              \
+        << (4 * (7-(_n)))                              \
+      )
+
+   return 
+      FIELD(0) | FIELD(1) | FIELD(2) | FIELD(3)
+      | FIELD(4) | FIELD(5) | FIELD(6) | FIELD(7);
+
+#  undef FIELD
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestPPC32_put_CR ( UInt cr_native,
+                                /*OUT*/VexGuestPPC32State* vex_state )
+{
+   UInt t;
+
+#  define FIELD(_n)                                           \
+      do {                                                    \
+         t = cr_native >> (4*(7-(_n)));                       \
+         vex_state->guest_CR##_n##_0 = toUChar(t & 1);        \
+         vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
+      } while (0)
+
+   FIELD(0);
+   FIELD(1);
+   FIELD(2);
+   FIELD(3);
+   FIELD(4);
+   FIELD(5);
+   FIELD(6);
+   FIELD(7);
+
+#  undef FIELD
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Note: %CR is 32 bits even for ppc64 */
+void LibVEX_GuestPPC64_put_CR ( UInt cr_native,
+                                /*OUT*/VexGuestPPC64State* vex_state )
+{
+   UInt t;
+
+#  define FIELD(_n)                                           \
+      do {                                                    \
+         t = cr_native >> (4*(7-(_n)));                       \
+         vex_state->guest_CR##_n##_0 = toUChar(t & 1);        \
+         vex_state->guest_CR##_n##_321 = toUChar(t & (7<<1)); \
+      } while (0)
+
+   FIELD(0);
+   FIELD(1);
+   FIELD(2);
+   FIELD(3);
+   FIELD(4);
+   FIELD(5);
+   FIELD(6);
+   FIELD(7);
+
+#  undef FIELD
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+UInt LibVEX_GuestPPC32_get_XER ( /*IN*/const VexGuestPPC32State* vex_state )
+{
+   UInt w = 0;
+   w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
+   w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
+   w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
+   w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
+   return w;
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Note: %XER is 32 bits even for ppc64 */
+UInt LibVEX_GuestPPC64_get_XER ( /*IN*/const VexGuestPPC64State* vex_state )
+{
+   UInt w = 0;
+   w |= ( ((UInt)vex_state->guest_XER_BC) & 0xFF );
+   w |= ( (((UInt)vex_state->guest_XER_SO) & 0x1) << 31 );
+   w |= ( (((UInt)vex_state->guest_XER_OV) & 0x1) << 30 );
+   w |= ( (((UInt)vex_state->guest_XER_CA) & 0x1) << 29 );
+   return w;
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestPPC32_put_XER ( UInt xer_native,
+                                 /*OUT*/VexGuestPPC32State* vex_state )
+{
+   vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
+   vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
+   vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
+   vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+/* Note: %XER is 32 bits even for ppc64 */
+void LibVEX_GuestPPC64_put_XER ( UInt xer_native,
+                                 /*OUT*/VexGuestPPC64State* vex_state )
+{
+   vex_state->guest_XER_BC = toUChar(xer_native & 0xFF);
+   vex_state->guest_XER_SO = toUChar((xer_native >> 31) & 0x1);
+   vex_state->guest_XER_OV = toUChar((xer_native >> 30) & 0x1);
+   vex_state->guest_XER_CA = toUChar((xer_native >> 29) & 0x1);
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestPPC32_initialise ( /*OUT*/VexGuestPPC32State* vex_state )
+{
+   Int i;
+   vex_state->host_EvC_FAILADDR = 0;
+   vex_state->host_EvC_COUNTER  = 0;
+   vex_state->pad3 = 0;
+   vex_state->pad4 = 0;
+
+   vex_state->guest_GPR0  = 0;
+   vex_state->guest_GPR1  = 0;
+   vex_state->guest_GPR2  = 0;
+   vex_state->guest_GPR3  = 0;
+   vex_state->guest_GPR4  = 0;
+   vex_state->guest_GPR5  = 0;
+   vex_state->guest_GPR6  = 0;
+   vex_state->guest_GPR7  = 0;
+   vex_state->guest_GPR8  = 0;
+   vex_state->guest_GPR9  = 0;
+   vex_state->guest_GPR10 = 0;
+   vex_state->guest_GPR11 = 0;
+   vex_state->guest_GPR12 = 0;
+   vex_state->guest_GPR13 = 0;
+   vex_state->guest_GPR14 = 0;
+   vex_state->guest_GPR15 = 0;
+   vex_state->guest_GPR16 = 0;
+   vex_state->guest_GPR17 = 0;
+   vex_state->guest_GPR18 = 0;
+   vex_state->guest_GPR19 = 0;
+   vex_state->guest_GPR20 = 0;
+   vex_state->guest_GPR21 = 0;
+   vex_state->guest_GPR22 = 0;
+   vex_state->guest_GPR23 = 0;
+   vex_state->guest_GPR24 = 0;
+   vex_state->guest_GPR25 = 0;
+   vex_state->guest_GPR26 = 0;
+   vex_state->guest_GPR27 = 0;
+   vex_state->guest_GPR28 = 0;
+   vex_state->guest_GPR29 = 0;
+   vex_state->guest_GPR30 = 0;
+   vex_state->guest_GPR31 = 0;
+
+   /* Initialise the vector state. */
+#  define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
+
+   VECZERO(vex_state->guest_VSR0 );
+   VECZERO(vex_state->guest_VSR1 );
+   VECZERO(vex_state->guest_VSR2 );
+   VECZERO(vex_state->guest_VSR3 );
+   VECZERO(vex_state->guest_VSR4 );
+   VECZERO(vex_state->guest_VSR5 );
+   VECZERO(vex_state->guest_VSR6 );
+   VECZERO(vex_state->guest_VSR7 );
+   VECZERO(vex_state->guest_VSR8 );
+   VECZERO(vex_state->guest_VSR9 );
+   VECZERO(vex_state->guest_VSR10);
+   VECZERO(vex_state->guest_VSR11);
+   VECZERO(vex_state->guest_VSR12);
+   VECZERO(vex_state->guest_VSR13);
+   VECZERO(vex_state->guest_VSR14);
+   VECZERO(vex_state->guest_VSR15);
+   VECZERO(vex_state->guest_VSR16);
+   VECZERO(vex_state->guest_VSR17);
+   VECZERO(vex_state->guest_VSR18);
+   VECZERO(vex_state->guest_VSR19);
+   VECZERO(vex_state->guest_VSR20);
+   VECZERO(vex_state->guest_VSR21);
+   VECZERO(vex_state->guest_VSR22);
+   VECZERO(vex_state->guest_VSR23);
+   VECZERO(vex_state->guest_VSR24);
+   VECZERO(vex_state->guest_VSR25);
+   VECZERO(vex_state->guest_VSR26);
+   VECZERO(vex_state->guest_VSR27);
+   VECZERO(vex_state->guest_VSR28);
+   VECZERO(vex_state->guest_VSR29);
+   VECZERO(vex_state->guest_VSR30);
+   VECZERO(vex_state->guest_VSR31);
+   VECZERO(vex_state->guest_VSR32);
+   VECZERO(vex_state->guest_VSR33);
+   VECZERO(vex_state->guest_VSR34);
+   VECZERO(vex_state->guest_VSR35);
+   VECZERO(vex_state->guest_VSR36);
+   VECZERO(vex_state->guest_VSR37);
+   VECZERO(vex_state->guest_VSR38);
+   VECZERO(vex_state->guest_VSR39);
+   VECZERO(vex_state->guest_VSR40);
+   VECZERO(vex_state->guest_VSR41);
+   VECZERO(vex_state->guest_VSR42);
+   VECZERO(vex_state->guest_VSR43);
+   VECZERO(vex_state->guest_VSR44);
+   VECZERO(vex_state->guest_VSR45);
+   VECZERO(vex_state->guest_VSR46);
+   VECZERO(vex_state->guest_VSR47);
+   VECZERO(vex_state->guest_VSR48);
+   VECZERO(vex_state->guest_VSR49);
+   VECZERO(vex_state->guest_VSR50);
+   VECZERO(vex_state->guest_VSR51);
+   VECZERO(vex_state->guest_VSR52);
+   VECZERO(vex_state->guest_VSR53);
+   VECZERO(vex_state->guest_VSR54);
+   VECZERO(vex_state->guest_VSR55);
+   VECZERO(vex_state->guest_VSR56);
+   VECZERO(vex_state->guest_VSR57);
+   VECZERO(vex_state->guest_VSR58);
+   VECZERO(vex_state->guest_VSR59);
+   VECZERO(vex_state->guest_VSR60);
+   VECZERO(vex_state->guest_VSR61);
+   VECZERO(vex_state->guest_VSR62);
+   VECZERO(vex_state->guest_VSR63);
+
+#  undef VECZERO
+
+   vex_state->guest_CIA  = 0;
+   vex_state->guest_LR   = 0;
+   vex_state->guest_CTR  = 0;
+
+   vex_state->guest_XER_SO = 0;
+   vex_state->guest_XER_OV = 0;
+   vex_state->guest_XER_CA = 0;
+   vex_state->guest_XER_BC = 0;
+
+   vex_state->guest_CR0_321 = 0;
+   vex_state->guest_CR0_0   = 0;
+   vex_state->guest_CR1_321 = 0;
+   vex_state->guest_CR1_0   = 0;
+   vex_state->guest_CR2_321 = 0;
+   vex_state->guest_CR2_0   = 0;
+   vex_state->guest_CR3_321 = 0;
+   vex_state->guest_CR3_0   = 0;
+   vex_state->guest_CR4_321 = 0;
+   vex_state->guest_CR4_0   = 0;
+   vex_state->guest_CR5_321 = 0;
+   vex_state->guest_CR5_0   = 0;
+   vex_state->guest_CR6_321 = 0;
+   vex_state->guest_CR6_0   = 0;
+   vex_state->guest_CR7_321 = 0;
+   vex_state->guest_CR7_0   = 0;
+
+   vex_state->guest_FPROUND  = PPCrm_NEAREST;
+   vex_state->guest_DFPROUND = PPCrm_NEAREST;
+   vex_state->pad1 = 0;
+   vex_state->pad2 = 0;
+
+   vex_state->guest_VRSAVE = 0;
+
+   vex_state->guest_VSCR = 0x0;  // Non-Java mode = 0
+
+   vex_state->guest_EMNOTE = EmNote_NONE;
+
+   vex_state->guest_CMSTART = 0;
+   vex_state->guest_CMLEN   = 0;
+
+   vex_state->guest_NRADDR = 0;
+   vex_state->guest_NRADDR_GPR2 = 0;
+
+   vex_state->guest_REDIR_SP = -1;
+   for (i = 0; i < VEX_GUEST_PPC32_REDIR_STACK_SIZE; i++)
+      vex_state->guest_REDIR_STACK[i] = 0;
+
+   vex_state->guest_IP_AT_SYSCALL = 0;
+   vex_state->guest_SPRG3_RO = 0;
+
+   vex_state->padding1 = 0;
+   vex_state->padding2 = 0;
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestPPC64_initialise ( /*OUT*/VexGuestPPC64State* vex_state )
+{
+   Int i;
+   vex_state->host_EvC_FAILADDR = 0;
+   vex_state->host_EvC_COUNTER = 0;
+   vex_state->pad0 = 0;
+   vex_state->guest_GPR0  = 0;
+   vex_state->guest_GPR1  = 0;
+   vex_state->guest_GPR2  = 0;
+   vex_state->guest_GPR3  = 0;
+   vex_state->guest_GPR4  = 0;
+   vex_state->guest_GPR5  = 0;
+   vex_state->guest_GPR6  = 0;
+   vex_state->guest_GPR7  = 0;
+   vex_state->guest_GPR8  = 0;
+   vex_state->guest_GPR9  = 0;
+   vex_state->guest_GPR10 = 0;
+   vex_state->guest_GPR11 = 0;
+   vex_state->guest_GPR12 = 0;
+   vex_state->guest_GPR13 = 0;
+   vex_state->guest_GPR14 = 0;
+   vex_state->guest_GPR15 = 0;
+   vex_state->guest_GPR16 = 0;
+   vex_state->guest_GPR17 = 0;
+   vex_state->guest_GPR18 = 0;
+   vex_state->guest_GPR19 = 0;
+   vex_state->guest_GPR20 = 0;
+   vex_state->guest_GPR21 = 0;
+   vex_state->guest_GPR22 = 0;
+   vex_state->guest_GPR23 = 0;
+   vex_state->guest_GPR24 = 0;
+   vex_state->guest_GPR25 = 0;
+   vex_state->guest_GPR26 = 0;
+   vex_state->guest_GPR27 = 0;
+   vex_state->guest_GPR28 = 0;
+   vex_state->guest_GPR29 = 0;
+   vex_state->guest_GPR30 = 0;
+   vex_state->guest_GPR31 = 0;
+
+   /* Initialise the vector state. */
+#  define VECZERO(_vr) _vr[0]=_vr[1]=_vr[2]=_vr[3] = 0;
+
+   VECZERO(vex_state->guest_VSR0 );
+   VECZERO(vex_state->guest_VSR1 );
+   VECZERO(vex_state->guest_VSR2 );
+   VECZERO(vex_state->guest_VSR3 );
+   VECZERO(vex_state->guest_VSR4 );
+   VECZERO(vex_state->guest_VSR5 );
+   VECZERO(vex_state->guest_VSR6 );
+   VECZERO(vex_state->guest_VSR7 );
+   VECZERO(vex_state->guest_VSR8 );
+   VECZERO(vex_state->guest_VSR9 );
+   VECZERO(vex_state->guest_VSR10);
+   VECZERO(vex_state->guest_VSR11);
+   VECZERO(vex_state->guest_VSR12);
+   VECZERO(vex_state->guest_VSR13);
+   VECZERO(vex_state->guest_VSR14);
+   VECZERO(vex_state->guest_VSR15);
+   VECZERO(vex_state->guest_VSR16);
+   VECZERO(vex_state->guest_VSR17);
+   VECZERO(vex_state->guest_VSR18);
+   VECZERO(vex_state->guest_VSR19);
+   VECZERO(vex_state->guest_VSR20);
+   VECZERO(vex_state->guest_VSR21);
+   VECZERO(vex_state->guest_VSR22);
+   VECZERO(vex_state->guest_VSR23);
+   VECZERO(vex_state->guest_VSR24);
+   VECZERO(vex_state->guest_VSR25);
+   VECZERO(vex_state->guest_VSR26);
+   VECZERO(vex_state->guest_VSR27);
+   VECZERO(vex_state->guest_VSR28);
+   VECZERO(vex_state->guest_VSR29);
+   VECZERO(vex_state->guest_VSR30);
+   VECZERO(vex_state->guest_VSR31);
+   VECZERO(vex_state->guest_VSR32);
+   VECZERO(vex_state->guest_VSR33);
+   VECZERO(vex_state->guest_VSR34);
+   VECZERO(vex_state->guest_VSR35);
+   VECZERO(vex_state->guest_VSR36);
+   VECZERO(vex_state->guest_VSR37);
+   VECZERO(vex_state->guest_VSR38);
+   VECZERO(vex_state->guest_VSR39);
+   VECZERO(vex_state->guest_VSR40);
+   VECZERO(vex_state->guest_VSR41);
+   VECZERO(vex_state->guest_VSR42);
+   VECZERO(vex_state->guest_VSR43);
+   VECZERO(vex_state->guest_VSR44);
+   VECZERO(vex_state->guest_VSR45);
+   VECZERO(vex_state->guest_VSR46);
+   VECZERO(vex_state->guest_VSR47);
+   VECZERO(vex_state->guest_VSR48);
+   VECZERO(vex_state->guest_VSR49);
+   VECZERO(vex_state->guest_VSR50);
+   VECZERO(vex_state->guest_VSR51);
+   VECZERO(vex_state->guest_VSR52);
+   VECZERO(vex_state->guest_VSR53);
+   VECZERO(vex_state->guest_VSR54);
+   VECZERO(vex_state->guest_VSR55);
+   VECZERO(vex_state->guest_VSR56);
+   VECZERO(vex_state->guest_VSR57);
+   VECZERO(vex_state->guest_VSR58);
+   VECZERO(vex_state->guest_VSR59);
+   VECZERO(vex_state->guest_VSR60);
+   VECZERO(vex_state->guest_VSR61);
+   VECZERO(vex_state->guest_VSR62);
+   VECZERO(vex_state->guest_VSR63);
+
+#  undef VECZERO
+
+   vex_state->guest_CIA  = 0;
+   vex_state->guest_LR   = 0;
+   vex_state->guest_CTR  = 0;
+
+   vex_state->guest_XER_SO = 0;
+   vex_state->guest_XER_OV = 0;
+   vex_state->guest_XER_CA = 0;
+   vex_state->guest_XER_BC = 0;
+
+   vex_state->guest_CR0_321 = 0;
+   vex_state->guest_CR0_0   = 0;
+   vex_state->guest_CR1_321 = 0;
+   vex_state->guest_CR1_0   = 0;
+   vex_state->guest_CR2_321 = 0;
+   vex_state->guest_CR2_0   = 0;
+   vex_state->guest_CR3_321 = 0;
+   vex_state->guest_CR3_0   = 0;
+   vex_state->guest_CR4_321 = 0;
+   vex_state->guest_CR4_0   = 0;
+   vex_state->guest_CR5_321 = 0;
+   vex_state->guest_CR5_0   = 0;
+   vex_state->guest_CR6_321 = 0;
+   vex_state->guest_CR6_0   = 0;
+   vex_state->guest_CR7_321 = 0;
+   vex_state->guest_CR7_0   = 0;
+
+   vex_state->guest_FPROUND  = PPCrm_NEAREST;
+   vex_state->guest_DFPROUND = PPCrm_NEAREST;
+   vex_state->pad1 = 0;
+   vex_state->pad2 = 0;
+
+   vex_state->guest_VRSAVE = 0;
+
+   vex_state->guest_VSCR = 0x0;  // Non-Java mode = 0
+
+   vex_state->guest_EMNOTE = EmNote_NONE;
+
+   vex_state->padding = 0;
+
+   vex_state->guest_CMSTART = 0;
+   vex_state->guest_CMLEN   = 0;
+
+   vex_state->guest_NRADDR = 0;
+   vex_state->guest_NRADDR_GPR2 = 0;
+
+   vex_state->guest_REDIR_SP = -1;
+   for (i = 0; i < VEX_GUEST_PPC64_REDIR_STACK_SIZE; i++)
+      vex_state->guest_REDIR_STACK[i] = 0;
+
+   vex_state->guest_IP_AT_SYSCALL = 0;
+   vex_state->guest_SPRG3_RO = 0;
+   vex_state->guest_TFHAR  = 0;
+   vex_state->guest_TFIAR  = 0;
+   vex_state->guest_TEXASR = 0;
+}
+
+
+/*-----------------------------------------------------------*/
+/*--- Describing the ppc guest state, for the benefit     ---*/
+/*--- of iropt and instrumenters.                         ---*/
+/*-----------------------------------------------------------*/
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this is generates significantly slower code).  
+
+   By default we enforce precise exns for guest R1 (stack pointer),
+   CIA (current insn address) and LR (link register).  These are the
+   minimum needed to extract correct stack backtraces from ppc
+   code. [[NB: not sure if keeping LR up to date is actually
+   necessary.]]
+
+   Only R1 is needed in mode VexRegUpdSpAtMemAccess.   
+*/
+Bool guest_ppc32_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   Int lr_min  = offsetof(VexGuestPPC32State, guest_LR);
+   Int lr_max  = lr_min + 4 - 1;
+   Int r1_min  = offsetof(VexGuestPPC32State, guest_GPR1);
+   Int r1_max  = r1_min + 4 - 1;
+   Int cia_min = offsetof(VexGuestPPC32State, guest_CIA);
+   Int cia_max = cia_min + 4 - 1;
+
+   if (maxoff < r1_min || minoff > r1_max) {
+      /* no overlap with R1 */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False; // We only need to check stack pointer.
+   } else {
+      return True;
+   }
+
+   if (maxoff < lr_min || minoff > lr_max) {
+      /* no overlap with LR */
+   } else {
+      return True;
+   }
+
+   if (maxoff < cia_min || minoff > cia_max) {
+      /* no overlap with CIA */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+Bool guest_ppc64_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   /* Given that R2 is a Big Deal in the ELF ppc64 ABI, it seems
+      prudent to be conservative with it, even though thus far there
+      is no evidence to suggest that it actually needs to be kept up
+      to date wrt possible exceptions. */
+   Int lr_min  = offsetof(VexGuestPPC64State, guest_LR);
+   Int lr_max  = lr_min + 8 - 1;
+   Int r1_min  = offsetof(VexGuestPPC64State, guest_GPR1);
+   Int r1_max  = r1_min + 8 - 1;
+   Int r2_min  = offsetof(VexGuestPPC64State, guest_GPR2);
+   Int r2_max  = r2_min + 8 - 1;
+   Int cia_min = offsetof(VexGuestPPC64State, guest_CIA);
+   Int cia_max = cia_min + 8 - 1;
+
+   if (maxoff < r1_min || minoff > r1_max) {
+      /* no overlap with R1 */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False; // We only need to check stack pointer.
+   } else {
+      return True;
+   }
+
+   if (maxoff < lr_min || minoff > lr_max) {
+      /* no overlap with LR */
+   } else {
+      return True;
+   }
+
+   if (maxoff < r2_min || minoff > r2_max) {
+      /* no overlap with R2 */
+   } else {
+      return True;
+   }
+
+   if (maxoff < cia_min || minoff > cia_max) {
+      /* no overlap with CIA */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+
+#define ALWAYSDEFD32(field)                           \
+    { offsetof(VexGuestPPC32State, field),            \
+      (sizeof ((VexGuestPPC32State*)0)->field) }
+
+VexGuestLayout
+   ppc32Guest_layout 
+      = { 
+          /* Total size of the guest state, in bytes. */
+          .total_sizeB = sizeof(VexGuestPPC32State),
+
+          /* Describe the stack pointer. */
+          .offset_SP = offsetof(VexGuestPPC32State,guest_GPR1),
+          .sizeof_SP = 4,
+
+          /* Describe the frame pointer. */
+          .offset_FP = offsetof(VexGuestPPC32State,guest_GPR1),
+          .sizeof_FP = 4,
+
+          /* Describe the instruction pointer. */
+          .offset_IP = offsetof(VexGuestPPC32State,guest_CIA),
+          .sizeof_IP = 4,
+
+          /* Describe any sections to be regarded by Memcheck as
+             'always-defined'. */
+          .n_alwaysDefd = 11,
+
+          .alwaysDefd 
+	  = { /*  0 */ ALWAYSDEFD32(guest_CIA),
+	      /*  1 */ ALWAYSDEFD32(guest_EMNOTE),
+	      /*  2 */ ALWAYSDEFD32(guest_CMSTART),
+	      /*  3 */ ALWAYSDEFD32(guest_CMLEN),
+	      /*  4 */ ALWAYSDEFD32(guest_VSCR),
+	      /*  5 */ ALWAYSDEFD32(guest_FPROUND),
+              /*  6 */ ALWAYSDEFD32(guest_NRADDR),
+	      /*  7 */ ALWAYSDEFD32(guest_NRADDR_GPR2),
+	      /*  8 */ ALWAYSDEFD32(guest_REDIR_SP),
+	      /*  9 */ ALWAYSDEFD32(guest_REDIR_STACK),
+	      /* 10 */ ALWAYSDEFD32(guest_IP_AT_SYSCALL)
+            }
+        };
+
+#define ALWAYSDEFD64(field)                           \
+    { offsetof(VexGuestPPC64State, field),            \
+      (sizeof ((VexGuestPPC64State*)0)->field) }
+
+VexGuestLayout
+   ppc64Guest_layout 
+      = { 
+          /* Total size of the guest state, in bytes. */
+          .total_sizeB = sizeof(VexGuestPPC64State),
+
+          /* Describe the stack pointer. */
+          .offset_SP = offsetof(VexGuestPPC64State,guest_GPR1),
+          .sizeof_SP = 8,
+
+          /* Describe the frame pointer. */
+          .offset_FP = offsetof(VexGuestPPC64State,guest_GPR1),
+          .sizeof_FP = 8,
+
+          /* Describe the instruction pointer. */
+          .offset_IP = offsetof(VexGuestPPC64State,guest_CIA),
+          .sizeof_IP = 8,
+
+          /* Describe any sections to be regarded by Memcheck as
+             'always-defined'. */
+          .n_alwaysDefd = 11,
+
+          .alwaysDefd 
+	  = { /*  0 */ ALWAYSDEFD64(guest_CIA),
+	      /*  1 */ ALWAYSDEFD64(guest_EMNOTE),
+	      /*  2 */ ALWAYSDEFD64(guest_CMSTART),
+	      /*  3 */ ALWAYSDEFD64(guest_CMLEN),
+	      /*  4 */ ALWAYSDEFD64(guest_VSCR),
+	      /*  5 */ ALWAYSDEFD64(guest_FPROUND),
+	      /*  6 */ ALWAYSDEFD64(guest_NRADDR),
+	      /*  7 */ ALWAYSDEFD64(guest_NRADDR_GPR2),
+	      /*  8 */ ALWAYSDEFD64(guest_REDIR_SP),
+	      /*  9 */ ALWAYSDEFD64(guest_REDIR_STACK),
+	      /* 10 */ ALWAYSDEFD64(guest_IP_AT_SYSCALL)
+            }
+        };
+
+/*---------------------------------------------------------------*/
+/*--- end                                 guest_ppc_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_ppc_toIR.c b/VEX/priv/guest_ppc_toIR.c
new file mode 100644
index 0000000..f15e166
--- /dev/null
+++ b/VEX/priv/guest_ppc_toIR.c
@@ -0,0 +1,20498 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                       guest_ppc_toIR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* TODO 18/Nov/05:
+
+   Spot rld... cases which are simply left/right shifts and emit
+   Shl64/Shr64 accordingly.
+
+   Altivec
+   - datastream insns
+   - lvxl,stvxl: load/store with 'least recently used' hint
+   - vexptefp, vlogefp
+
+   LIMITATIONS:
+
+   Various, including:
+
+   - Some invalid forms of lswi and lswx are accepted when they should
+     not be.
+
+   - Floating Point:
+     - All exceptions disabled in FPSCR
+     - condition codes not set in FPSCR
+
+   - Altivec floating point:
+     - vmaddfp, vnmsubfp
+       Because we're using Java/IEEE mode (FPSCR[NJ]), rather than the
+       system default of Non-Java mode, we get some small errors
+       (lowest bit only).
+       This is because Non-Java mode brutally hacks denormalised results
+       to zero, whereas we keep maximum accuracy.  However, using
+       Non-Java mode would give us more inaccuracy, as our intermediate
+       results would then be zeroed, too.
+
+   - AbiHints for the stack red zone are only emitted for
+       unconditional calls and returns (bl, blr).  They should also be
+       emitted for conditional calls and returns, but we don't have a 
+       way to express that right now.  Ah well.
+
+   - Uses of Iop_{Add,Sub,Mul}32Fx4: the backend (host_ppc_isel.c)
+       ignores the rounding mode, and generates code that assumes
+       round-to-nearest.  This means V will compute incorrect results
+       for uses of these IROps when the rounding mode (first) arg is
+       not mkU32(Irrm_NEAREST).
+*/
+
+/* "Special" instructions.
+
+   This instruction decoder can decode four special instructions
+   which mean nothing natively (are no-ops as far as regs/mem are
+   concerned) but have meaning for supporting Valgrind.  A special
+   instruction is flagged by a 16-byte preamble:
+
+      32-bit mode: 5400183E 5400683E 5400E83E 5400983E
+                   (rlwinm 0,0,3,0,31; rlwinm 0,0,13,0,31; 
+                    rlwinm 0,0,29,0,31; rlwinm 0,0,19,0,31)
+
+      64-bit mode: 78001800 78006800 7800E802 78009802
+                   (rotldi 0,0,3; rotldi 0,0,13;
+                    rotldi 0,0,61; rotldi 0,0,51)
+
+   Following that, one of the following 3 are allowed
+   (standard interpretation in parentheses):
+
+      7C210B78 (or 1,1,1)   %R3 = client_request ( %R4 )
+      7C421378 (or 2,2,2)   %R3 = guest_NRADDR
+      7C631B78 (or 3,3,3)   branch-and-link-to-noredir %R11  Big endian
+      7C631B78 (or 3,3,3)   branch-and-link-to-noredir %R12  Little endian
+      7C842378 (or 4,4,4)   %R3 = guest_NRADDR_GPR2
+      7CA52B78 (or 5,5,5)   IR injection
+
+   Any other bytes following the 16-byte preamble are illegal and
+   constitute a failure in instruction decoding.  This all assumes
+   that the preamble will never occur except in specific code
+   fragments designed for Valgrind to catch.
+*/
+
+/*  Little Endian notes  */
+/*
+ * Vector operations in little Endian mode behave in non-obvious ways at times.
+ * Below is an attempt at explaining this.
+ *
+ * LE/BE vector example
+ *   With a vector of unsigned ints declared as follows:
+ *     vector unsigned int vec_inA =
+                            { 0x11111111, 0x22222222, 0x33333333, 0x44444444 };
+ *   The '0x11111111' word is word zero in both LE and BE format.  But the
+ *   loaded vector register will have word zero on the far left in BE mode and
+ *   on the far right in LE mode. The lvx and stvx instructions work naturally
+ *   for whatever endianness is in effect.  For example, in LE mode, the stvx
+ *   stores word zero (far right word) of the vector at the lowest memory
+ *   address of the EA; in BE mode, stvx still stores word zero at the lowest
+ *   memory address, but with word zero interpreted as the one at the far left
+ *   of the register.
+ *
+ *   The lxvd2x and stxvd2x instructions are not so well suited for LE mode.
+ *   When the compiler generates an lxvd2x instruction to load the
+ *   above-declared vector of unsigned integers, it loads the vector as two
+ *   double words, but they are in BE word-wise format.  To put the vector in
+ *   the right order for LE, the compiler also generates an xxswapd after the
+ *   load, which puts it in proper LE format.  Similarly, the stxvd2x
+ *   instruction has a BE bias, storing the vector in BE word-wise format. But
+ *   the compiler also generates an xxswapd prior to the store, thus ensuring
+ *   the vector is stored in memory in the correct LE order.
+ *
+ *   Vector-flavored Iops, such Iop_V128Hito64, reference the hi and lo parts
+ *   of a double words and words within a vector.  Because of the reverse order
+ *   of numbering for LE as described above, the high part refers to word 1 in
+ *   LE format. When input data is saved to a guest state vector register
+ *   (e.g., via Iop_64HLtoV128), it is first saved to memory and then the
+ *   register is loaded via PPCInstr_AvLdSt, which does an lvx instruction.
+ *   The saving of the data to memory must be done in proper LE order.  For the
+ *   inverse operation of extracting data from a vector register (e.g.,
+ *   Iop_V128Hito64), the register is first saved (by PPCInstr_AvLdSt resulting
+ *   in stvx), and then integer registers are loaded from the memory location
+ *   from where the vector register was saved.  Again, this must be done in
+ *   proper LE order.  So for these various vector Iops, we have LE-specific
+ *   code in host_ppc_isel.c
+ *
+ *   Another unique behavior of vectors in LE mode is with the vector scalar
+ *   (VSX) operations that operate on "double word 0" of the source register,
+ *   storing the result in "double word 0" of the output vector register.  For
+ *   these operations, "double word 0" is interpreted as "high half of the
+ *   register" (i.e, the part on the left side).
+ *
+ */
+/* Translates PPC32/64 code to IR. */
+
+/* References
+
+#define PPC32
+   "PowerPC Microprocessor Family:
+    The Programming Environments Manual for 32-Bit Microprocessors"
+    02/21/2000
+    http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600719DF2
+
+#define PPC64
+   "PowerPC Microprocessor Family:
+    Programming Environments Manual for 64-Bit Microprocessors"
+    06/10/2003
+   http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/F7E732FF811F783187256FDD004D3797
+
+#define AV
+   "PowerPC Microprocessor Family:
+    AltiVec(TM) Technology Programming Environments Manual"
+    07/10/2003
+   http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/FBFA164F824370F987256D6A006F424D
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_ppc32.h"
+#include "libvex_guest_ppc64.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_ppc_defs.h"
+
+/*------------------------------------------------------------*/
+/*--- Globals                                              ---*/
+/*------------------------------------------------------------*/
+
+/* These are set at the start of the translation of an insn, right
+   down in disInstr_PPC, so that we don't have to pass them around
+   endlessly.  They are all constant during the translation of any
+   given insn. */
+
+/* We need to know this to do sub-register accesses correctly. */
+static VexEndness host_endness;
+
+/* Pointer to the guest code area. */
+static const UChar* guest_code;
+
+/* The guest address corresponding to guest_code[0]. */
+static Addr64 guest_CIA_bbstart;
+
+/* The guest address for the instruction currently being
+   translated. */
+static Addr64 guest_CIA_curr_instr;
+
+/* The IRSB* into which we're generating code. */
+static IRSB* irsb;
+
+/* Is our guest binary 32 or 64bit?  Set at each call to
+   disInstr_PPC below. */
+static Bool mode64 = False;
+
+// Given a pointer to a function as obtained by "& functionname" in C,
+// produce a pointer to the actual entry point for the function.  For
+// most platforms it's the identity function.  Unfortunately, on
+// ppc64-linux it isn't (sigh)
+static void* fnptr_to_fnentry( const VexAbiInfo* vbi, void* f )
+{
+   if (vbi->host_ppc_calls_use_fndescrs) {
+      /* f is a pointer to a 3-word function descriptor, of which the
+         first word is the entry address. */
+      /* note, this is correct even with cross-jitting, since this is
+         purely a host issue, not a guest one. */
+      HWord* fdescr = (HWord*)f;
+      return (void*)(fdescr[0]);
+   } else {
+      /* Simple; "& f" points directly at the code for f. */
+      return f;
+   }
+}
+
+#define SIGN_BIT  0x8000000000000000ULL
+#define SIGN_MASK 0x7fffffffffffffffULL
+#define SIGN_BIT32  0x80000000
+#define SIGN_MASK32 0x7fffffff
+
+
+/*------------------------------------------------------------*/
+/*--- Debugging output                                     ---*/
+/*------------------------------------------------------------*/
+
+#define DIP(format, args...)           \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_printf(format, ## args)
+
+#define DIS(buf, format, args...)      \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_sprintf(buf, format, ## args)
+
+
+/*------------------------------------------------------------*/
+/*--- Offsets of various parts of the ppc32/64 guest state ---*/
+/*------------------------------------------------------------*/
+
+#define offsetofPPCGuestState(_x) \
+   (mode64 ? offsetof(VexGuestPPC64State, _x) : \
+             offsetof(VexGuestPPC32State, _x))
+
+#define OFFB_CIA         offsetofPPCGuestState(guest_CIA)
+#define OFFB_IP_AT_SYSCALL offsetofPPCGuestState(guest_IP_AT_SYSCALL)
+#define OFFB_SPRG3_RO    offsetofPPCGuestState(guest_SPRG3_RO)
+#define OFFB_LR          offsetofPPCGuestState(guest_LR)
+#define OFFB_CTR         offsetofPPCGuestState(guest_CTR)
+#define OFFB_XER_SO      offsetofPPCGuestState(guest_XER_SO)
+#define OFFB_XER_OV      offsetofPPCGuestState(guest_XER_OV)
+#define OFFB_XER_CA      offsetofPPCGuestState(guest_XER_CA)
+#define OFFB_XER_BC      offsetofPPCGuestState(guest_XER_BC)
+#define OFFB_FPROUND     offsetofPPCGuestState(guest_FPROUND)
+#define OFFB_DFPROUND    offsetofPPCGuestState(guest_DFPROUND)
+#define OFFB_VRSAVE      offsetofPPCGuestState(guest_VRSAVE)
+#define OFFB_VSCR        offsetofPPCGuestState(guest_VSCR)
+#define OFFB_EMNOTE      offsetofPPCGuestState(guest_EMNOTE)
+#define OFFB_CMSTART     offsetofPPCGuestState(guest_CMSTART)
+#define OFFB_CMLEN       offsetofPPCGuestState(guest_CMLEN)
+#define OFFB_NRADDR      offsetofPPCGuestState(guest_NRADDR)
+#define OFFB_NRADDR_GPR2 offsetofPPCGuestState(guest_NRADDR_GPR2)
+#define OFFB_TFHAR       offsetofPPCGuestState(guest_TFHAR)
+#define OFFB_TEXASR      offsetofPPCGuestState(guest_TEXASR)
+#define OFFB_TEXASRU     offsetofPPCGuestState(guest_TEXASRU)
+#define OFFB_TFIAR       offsetofPPCGuestState(guest_TFIAR)
+
+
+/*------------------------------------------------------------*/
+/*--- Extract instruction fields                          --- */
+/*------------------------------------------------------------*/
+
+/* Extract field from insn, given idx (zero = lsb) and field length */
+#define IFIELD( insn, idx, len ) ((insn >> idx) & ((1<<len)-1))
+
+/* Extract primary opcode, instr[31:26] */
+static UChar ifieldOPC( UInt instr ) {
+   return toUChar( IFIELD( instr, 26, 6 ) );
+}
+
+/* Extract 10-bit secondary opcode, instr[10:1] */
+static UInt ifieldOPClo10 ( UInt instr) {
+   return IFIELD( instr, 1, 10 );
+}
+
+/* Extract 9-bit secondary opcode, instr[9:1] */
+static UInt ifieldOPClo9 ( UInt instr) {
+   return IFIELD( instr, 1, 9 );
+}
+
+/* Extract 8-bit secondary opcode, instr[8:1] */
+static UInt ifieldOPClo8 ( UInt instr) {
+   return IFIELD( instr, 1, 8 );
+}
+
+/* Extract 5-bit secondary opcode, instr[5:1] */
+static UInt ifieldOPClo5 ( UInt instr) {
+   return IFIELD( instr, 1, 5 );
+}
+
+/* Extract RD (destination register) field, instr[25:21] */
+static UChar ifieldRegDS( UInt instr ) {
+   return toUChar( IFIELD( instr, 21, 5 ) );
+}
+
+/* Extract XT (destination register) field, instr[0,25:21] */
+static UChar ifieldRegXT ( UInt instr )
+{
+  UChar upper_bit = toUChar (IFIELD (instr, 0, 1));
+  UChar lower_bits = toUChar (IFIELD (instr, 21, 5));
+  return (upper_bit << 5) | lower_bits;
+}
+
+/* Extract XS (store source register) field, instr[0,25:21] */
+static inline UChar ifieldRegXS ( UInt instr )
+{
+  return ifieldRegXT ( instr );
+}
+
+/* Extract RA (1st source register) field, instr[20:16] */
+static UChar ifieldRegA ( UInt instr ) {
+   return toUChar( IFIELD( instr, 16, 5 ) );
+}
+
+/* Extract XA (1st source register) field, instr[2,20:16] */
+static UChar ifieldRegXA ( UInt instr )
+{
+  UChar upper_bit = toUChar (IFIELD (instr, 2, 1));
+  UChar lower_bits = toUChar (IFIELD (instr, 16, 5));
+  return (upper_bit << 5) | lower_bits;
+}
+
+/* Extract RB (2nd source register) field, instr[15:11] */
+static UChar ifieldRegB ( UInt instr ) {
+   return toUChar( IFIELD( instr, 11, 5 ) );
+}
+
+/* Extract XB (2nd source register) field, instr[1,15:11] */
+static UChar ifieldRegXB ( UInt instr )
+{
+  UChar upper_bit = toUChar (IFIELD (instr, 1, 1));
+  UChar lower_bits = toUChar (IFIELD (instr, 11, 5));
+  return (upper_bit << 5) | lower_bits;
+}
+
+/* Extract RC (3rd source register) field, instr[10:6] */
+static UChar ifieldRegC ( UInt instr ) {
+   return toUChar( IFIELD( instr, 6, 5 ) );
+}
+
+/* Extract XC (3rd source register) field, instr[3,10:6] */
+static UChar ifieldRegXC ( UInt instr )
+{
+  UChar upper_bit = toUChar (IFIELD (instr, 3, 1));
+  UChar lower_bits = toUChar (IFIELD (instr, 6, 5));
+  return (upper_bit << 5) | lower_bits;
+}
+
+/* Extract bit 10, instr[10] */
+static UChar ifieldBIT10 ( UInt instr ) {
+   return toUChar( IFIELD( instr, 10, 1 ) );
+}
+
+/* Extract 2nd lowest bit, instr[1] */
+static UChar ifieldBIT1 ( UInt instr ) {
+   return toUChar( IFIELD( instr, 1, 1 ) );
+}
+
+/* Extract lowest bit, instr[0] */
+static UChar ifieldBIT0 ( UInt instr ) {
+   return toUChar( instr & 0x1 );
+}
+
+/* Extract unsigned bottom half, instr[15:0] */
+static UInt ifieldUIMM16 ( UInt instr ) {
+   return instr & 0xFFFF;
+}
+
+/* Extract unsigned bottom 26 bits, instr[25:0] */
+static UInt ifieldUIMM26 ( UInt instr ) {
+   return instr & 0x3FFFFFF;
+}
+
+/* Extract DM field, instr[9:8] */
+static UChar ifieldDM ( UInt instr ) {
+   return toUChar( IFIELD( instr, 8, 2 ) );
+}
+
+/* Extract SHW field, instr[9:8] */
+static inline UChar ifieldSHW ( UInt instr )
+{
+  return ifieldDM ( instr );
+}
+
+/*------------------------------------------------------------*/
+/*--- Guest-state identifiers                              ---*/
+/*------------------------------------------------------------*/
+
+typedef enum {
+    PPC_GST_CIA,    // Current Instruction Address
+    PPC_GST_LR,     // Link Register
+    PPC_GST_CTR,    // Count Register
+    PPC_GST_XER,    // Overflow, carry flags, byte count
+    PPC_GST_CR,     // Condition Register
+    PPC_GST_FPSCR,  // Floating Point Status/Control Register
+    PPC_GST_VRSAVE, // Vector Save/Restore Register
+    PPC_GST_VSCR,   // Vector Status and Control Register
+    PPC_GST_EMWARN, // Emulation warnings
+    PPC_GST_CMSTART,// For icbi: start of area to invalidate
+    PPC_GST_CMLEN,  // For icbi: length of area to invalidate
+    PPC_GST_IP_AT_SYSCALL, // the CIA of the most recently executed SC insn
+    PPC_GST_SPRG3_RO, // SPRG3
+    PPC_GST_TFHAR,  // Transactional Failure Handler Address Register
+    PPC_GST_TFIAR,  // Transactional Failure Instruction Address Register
+    PPC_GST_TEXASR, // Transactional EXception And Summary Register
+    PPC_GST_TEXASRU, // Transactional EXception And Summary Register Upper
+    PPC_GST_MAX
+} PPC_GST;
+
+#define MASK_FPSCR_RN   0x3ULL  // Binary floating point rounding mode
+#define MASK_FPSCR_DRN  0x700000000ULL // Decimal floating point rounding mode
+#define MASK_VSCR_VALID 0x00010001
+
+
+/*------------------------------------------------------------*/
+/*---  FP Helpers                                          ---*/
+/*------------------------------------------------------------*/
+
+/* Produce the 32-bit pattern corresponding to the supplied
+   float. */
+static UInt float_to_bits ( Float f )
+{
+   union { UInt i; Float f; } u;
+   vassert(4 == sizeof(UInt));
+   vassert(4 == sizeof(Float));
+   vassert(4 == sizeof(u));
+   u.f = f;
+   return u.i;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Misc Helpers                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Generate mask with 1's from 'begin' through 'end',
+   wrapping if begin > end.
+   begin->end works from right to left, 0=lsb
+*/
+static UInt MASK32( UInt begin, UInt end )
+{
+   UInt m1, m2, mask;
+   vassert(begin < 32);
+   vassert(end < 32);
+   m1   = ((UInt)(-1)) << begin;
+   m2   = ((UInt)(-1)) << end << 1;
+   mask = m1 ^ m2;
+   if (begin > end) mask = ~mask;  // wrap mask
+   return mask;
+}
+
+static ULong MASK64( UInt begin, UInt end )
+{
+   ULong m1, m2, mask;
+   vassert(begin < 64);
+   vassert(end < 64);
+   m1   = ((ULong)(-1)) << begin;
+   m2   = ((ULong)(-1)) << end << 1;
+   mask = m1 ^ m2;
+   if (begin > end) mask = ~mask;  // wrap mask
+   return mask;
+}
+
+static Addr64 nextInsnAddr( void )
+{
+   return guest_CIA_curr_instr + 4;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for deconstructing the        ---*/
+/*--- ppc32/64 insn stream.                                ---*/
+/*------------------------------------------------------------*/
+
+/* Add a statement to the list held by "irsb". */
+static void stmt ( IRStmt* st )
+{
+   addStmtToIRSB( irsb, st );
+}
+
+/* Generate a new temporary of the given type. */
+static IRTemp newTemp ( IRType ty )
+{
+   vassert(isPlausibleIRType(ty));
+   return newIRTemp( irsb->tyenv, ty );
+}
+
+/* Various simple conversions */
+
+static UChar extend_s_5to8 ( UChar x )
+{
+   return toUChar((((Int)x) << 27) >> 27);
+}
+
+static UInt extend_s_8to32( UChar x )
+{
+   return (UInt)((((Int)x) << 24) >> 24);
+}
+
+static UInt extend_s_16to32 ( UInt x )
+{
+   return (UInt)((((Int)x) << 16) >> 16);
+}
+
+static ULong extend_s_16to64 ( UInt x )
+{
+   return (ULong)((((Long)x) << 48) >> 48);
+}
+
+static ULong extend_s_26to64 ( UInt x )
+{
+   return (ULong)((((Long)x) << 38) >> 38);
+}
+
+static ULong extend_s_32to64 ( UInt x )
+{
+   return (ULong)((((Long)x) << 32) >> 32);
+}
+
+/* Do a proper-endian load of a 32-bit word, regardless of the endianness
+   of the underlying host. */
+static UInt getUIntPPCendianly ( const UChar* p )
+{
+   UInt w = 0;
+   if (host_endness == VexEndnessBE) {
+       w = (w << 8) | p[0];
+       w = (w << 8) | p[1];
+       w = (w << 8) | p[2];
+       w = (w << 8) | p[3];
+   } else {
+       w = (w << 8) | p[3];
+       w = (w << 8) | p[2];
+       w = (w << 8) | p[1];
+       w = (w << 8) | p[0];
+   }
+   return w;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for constructing IR.                         ---*/
+/*------------------------------------------------------------*/
+
+static void assign ( IRTemp dst, IRExpr* e )
+{
+   stmt( IRStmt_WrTmp(dst, e) );
+}
+
+/* This generates a normal (non store-conditional) store. */
+static void store ( IRExpr* addr, IRExpr* data )
+{
+   IRType tyA = typeOfIRExpr(irsb->tyenv, addr);
+   vassert(tyA == Ity_I32 || tyA == Ity_I64);
+
+   if (host_endness == VexEndnessBE)
+      stmt( IRStmt_Store(Iend_BE, addr, data) );
+   else
+      stmt( IRStmt_Store(Iend_LE, addr, data) );
+}
+
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* triop ( IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3 )
+{
+   return IRExpr_Triop(op, a1, a2, a3);
+}
+
+static IRExpr* qop ( IROp op, IRExpr* a1, IRExpr* a2, 
+                              IRExpr* a3, IRExpr* a4 )
+{
+   return IRExpr_Qop(op, a1, a2, a3, a4);
+}
+
+static IRExpr* mkexpr ( IRTemp tmp )
+{
+   return IRExpr_RdTmp(tmp);
+}
+
+static IRExpr* mkU8 ( UChar i )
+{
+   return IRExpr_Const(IRConst_U8(i));
+}
+
+static IRExpr* mkU16 ( UInt i )
+{
+   return IRExpr_Const(IRConst_U16(i));
+}
+
+static IRExpr* mkU32 ( UInt i )
+{
+   return IRExpr_Const(IRConst_U32(i));
+}
+
+static IRExpr* mkU64 ( ULong i )
+{
+   return IRExpr_Const(IRConst_U64(i));
+}
+
+static IRExpr* mkV128 ( UShort i )
+{
+   vassert(i == 0 || i == 0xffff);
+   return IRExpr_Const(IRConst_V128(i));
+}
+
+/* This generates a normal (non load-linked) load. */
+static IRExpr* load ( IRType ty, IRExpr* addr )
+{
+   if (host_endness == VexEndnessBE)
+      return IRExpr_Load(Iend_BE, ty, addr);
+   else
+      return IRExpr_Load(Iend_LE, ty, addr);
+}
+
+static IRStmt* stmt_load ( IRTemp result,
+                           IRExpr* addr, IRExpr* storedata )
+{
+   if (host_endness == VexEndnessBE)
+      return IRStmt_LLSC(Iend_BE, result, addr, storedata);
+   else
+      return IRStmt_LLSC(Iend_LE, result, addr, storedata);
+}
+
+static IRExpr* mkOR1 ( IRExpr* arg1, IRExpr* arg2 )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
+   vassert(typeOfIRExpr(irsb->tyenv, arg2) == Ity_I1);
+   return unop(Iop_32to1, binop(Iop_Or32, unop(Iop_1Uto32, arg1), 
+                                          unop(Iop_1Uto32, arg2)));
+}
+
+static IRExpr* mkAND1 ( IRExpr* arg1, IRExpr* arg2 )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, arg1) == Ity_I1);
+   vassert(typeOfIRExpr(irsb->tyenv, arg2) == Ity_I1);
+   return unop(Iop_32to1, binop(Iop_And32, unop(Iop_1Uto32, arg1), 
+                                           unop(Iop_1Uto32, arg2)));
+}
+
+/* expand V128_8Ux16 to 2x V128_16Ux8's */
+static void expand8Ux16( IRExpr* vIn,
+                         /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+{
+   IRTemp ones8x16 = newTemp(Ity_V128);
+
+   vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
+   vassert(vEvn && *vEvn == IRTemp_INVALID);
+   vassert(vOdd && *vOdd == IRTemp_INVALID);
+   *vEvn = newTemp(Ity_V128);
+   *vOdd = newTemp(Ity_V128);
+
+   assign( ones8x16, unop(Iop_Dup8x16, mkU8(0x1)) );
+   assign( *vOdd, binop(Iop_MullEven8Ux16, mkexpr(ones8x16), vIn) );
+   assign( *vEvn, binop(Iop_MullEven8Ux16, mkexpr(ones8x16), 
+                        binop(Iop_ShrV128, vIn, mkU8(8))) );
+}
+
+/* expand V128_8Sx16 to 2x V128_16Sx8's */
+static void expand8Sx16( IRExpr* vIn,
+                         /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+{
+   IRTemp ones8x16 = newTemp(Ity_V128);
+
+   vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
+   vassert(vEvn && *vEvn == IRTemp_INVALID);
+   vassert(vOdd && *vOdd == IRTemp_INVALID);
+   *vEvn = newTemp(Ity_V128);
+   *vOdd = newTemp(Ity_V128);
+
+   assign( ones8x16, unop(Iop_Dup8x16, mkU8(0x1)) );
+   assign( *vOdd, binop(Iop_MullEven8Sx16, mkexpr(ones8x16), vIn) );
+   assign( *vEvn, binop(Iop_MullEven8Sx16, mkexpr(ones8x16), 
+                        binop(Iop_ShrV128, vIn, mkU8(8))) );
+}
+
+/* expand V128_16Uto8 to 2x V128_32Ux4's */
+static void expand16Ux8( IRExpr* vIn,
+                         /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+{
+   IRTemp ones16x8 = newTemp(Ity_V128);
+
+   vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
+   vassert(vEvn && *vEvn == IRTemp_INVALID);
+   vassert(vOdd && *vOdd == IRTemp_INVALID);
+   *vEvn = newTemp(Ity_V128);
+   *vOdd = newTemp(Ity_V128);
+
+   assign( ones16x8, unop(Iop_Dup16x8, mkU16(0x1)) );
+   assign( *vOdd, binop(Iop_MullEven16Ux8, mkexpr(ones16x8), vIn) );
+   assign( *vEvn, binop(Iop_MullEven16Ux8, mkexpr(ones16x8), 
+                        binop(Iop_ShrV128, vIn, mkU8(16))) );
+}
+
+/* expand V128_16Sto8 to 2x V128_32Sx4's */
+static void expand16Sx8( IRExpr* vIn,
+                         /*OUTs*/ IRTemp* vEvn, IRTemp* vOdd )
+{
+   IRTemp ones16x8 = newTemp(Ity_V128);
+
+   vassert(typeOfIRExpr(irsb->tyenv, vIn) == Ity_V128);
+   vassert(vEvn && *vEvn == IRTemp_INVALID);
+   vassert(vOdd && *vOdd == IRTemp_INVALID);
+   *vEvn = newTemp(Ity_V128);
+   *vOdd = newTemp(Ity_V128);
+
+   assign( ones16x8, unop(Iop_Dup16x8, mkU16(0x1)) );
+   assign( *vOdd, binop(Iop_MullEven16Sx8, mkexpr(ones16x8), vIn) );
+   assign( *vEvn, binop(Iop_MullEven16Sx8, mkexpr(ones16x8), 
+                       binop(Iop_ShrV128, vIn, mkU8(16))) );
+}
+
+/* break V128 to 4xF64's*/
+static void breakV128to4xF64( IRExpr* t128,
+                              /*OUTs*/
+                              IRTemp* t3, IRTemp* t2,
+                              IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi64 = newTemp(Ity_I64);
+   IRTemp lo64 = newTemp(Ity_I64);
+
+   vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   *t0 = newTemp(Ity_F64);
+   *t1 = newTemp(Ity_F64);
+   *t2 = newTemp(Ity_F64);
+   *t3 = newTemp(Ity_F64);
+
+   assign( hi64, unop(Iop_V128HIto64, t128) );
+   assign( lo64, unop(Iop_V128to64,   t128) );
+   assign( *t3,
+           unop( Iop_F32toF64,
+                 unop( Iop_ReinterpI32asF32,
+                       unop( Iop_64HIto32, mkexpr( hi64 ) ) ) ) );
+   assign( *t2,
+           unop( Iop_F32toF64,
+                 unop( Iop_ReinterpI32asF32, unop( Iop_64to32, mkexpr( hi64 ) ) ) ) );
+   assign( *t1,
+           unop( Iop_F32toF64,
+                 unop( Iop_ReinterpI32asF32,
+                       unop( Iop_64HIto32, mkexpr( lo64 ) ) ) ) );
+   assign( *t0,
+           unop( Iop_F32toF64,
+                 unop( Iop_ReinterpI32asF32, unop( Iop_64to32, mkexpr( lo64 ) ) ) ) );
+}
+
+
+/* break V128 to 4xI32's, then sign-extend to I64's */
+static void breakV128to4x64S( IRExpr* t128,
+                              /*OUTs*/
+                              IRTemp* t3, IRTemp* t2,
+                              IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi64 = newTemp(Ity_I64);
+   IRTemp lo64 = newTemp(Ity_I64);
+
+   vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   *t0 = newTemp(Ity_I64);
+   *t1 = newTemp(Ity_I64);
+   *t2 = newTemp(Ity_I64);
+   *t3 = newTemp(Ity_I64);
+
+   assign( hi64, unop(Iop_V128HIto64, t128) );
+   assign( lo64, unop(Iop_V128to64,   t128) );
+   assign( *t3, unop(Iop_32Sto64, unop(Iop_64HIto32, mkexpr(hi64))) );
+   assign( *t2, unop(Iop_32Sto64, unop(Iop_64to32,   mkexpr(hi64))) );
+   assign( *t1, unop(Iop_32Sto64, unop(Iop_64HIto32, mkexpr(lo64))) );
+   assign( *t0, unop(Iop_32Sto64, unop(Iop_64to32,   mkexpr(lo64))) );
+}
+
+/* break V128 to 4xI32's, then zero-extend to I64's */
+static void breakV128to4x64U ( IRExpr* t128,
+                               /*OUTs*/
+                               IRTemp* t3, IRTemp* t2,
+                               IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi64 = newTemp(Ity_I64);
+   IRTemp lo64 = newTemp(Ity_I64);
+
+   vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   *t0 = newTemp(Ity_I64);
+   *t1 = newTemp(Ity_I64);
+   *t2 = newTemp(Ity_I64);
+   *t3 = newTemp(Ity_I64);
+
+   assign( hi64, unop(Iop_V128HIto64, t128) );
+   assign( lo64, unop(Iop_V128to64,   t128) );
+   assign( *t3, unop(Iop_32Uto64, unop(Iop_64HIto32, mkexpr(hi64))) );
+   assign( *t2, unop(Iop_32Uto64, unop(Iop_64to32,   mkexpr(hi64))) );
+   assign( *t1, unop(Iop_32Uto64, unop(Iop_64HIto32, mkexpr(lo64))) );
+   assign( *t0, unop(Iop_32Uto64, unop(Iop_64to32,   mkexpr(lo64))) );
+}
+
+static void breakV128to4x32( IRExpr* t128,
+                              /*OUTs*/
+                              IRTemp* t3, IRTemp* t2,
+                              IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi64 = newTemp(Ity_I64);
+   IRTemp lo64 = newTemp(Ity_I64);
+
+   vassert(typeOfIRExpr(irsb->tyenv, t128) == Ity_V128);
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+   *t0 = newTemp(Ity_I32);
+   *t1 = newTemp(Ity_I32);
+   *t2 = newTemp(Ity_I32);
+   *t3 = newTemp(Ity_I32);
+
+   assign( hi64, unop(Iop_V128HIto64, t128) );
+   assign( lo64, unop(Iop_V128to64,   t128) );
+   assign( *t3, unop(Iop_64HIto32, mkexpr(hi64)) );
+   assign( *t2, unop(Iop_64to32,   mkexpr(hi64)) );
+   assign( *t1, unop(Iop_64HIto32, mkexpr(lo64)) );
+   assign( *t0, unop(Iop_64to32,   mkexpr(lo64)) );
+}
+
+static IRExpr* mkV128from32( IRTemp t3, IRTemp t2,
+                               IRTemp t1, IRTemp t0 )
+{
+   return
+      binop( Iop_64HLtoV128,
+             binop(Iop_32HLto64, mkexpr(t3), mkexpr(t2)),
+             binop(Iop_32HLto64, mkexpr(t1), mkexpr(t0))
+   );
+}
+
+
+/* Signed saturating narrow 64S to 32 */
+static IRExpr* mkQNarrow64Sto32 ( IRExpr* t64 )
+{
+   IRTemp hi32 = newTemp(Ity_I32);
+   IRTemp lo32 = newTemp(Ity_I32);
+
+   vassert(typeOfIRExpr(irsb->tyenv, t64) == Ity_I64);
+
+   assign( hi32, unop(Iop_64HIto32, t64));
+   assign( lo32, unop(Iop_64to32,   t64));
+
+   return IRExpr_ITE(
+             /* if (hi32 == (lo32 >>s 31)) */
+             binop(Iop_CmpEQ32, mkexpr(hi32),
+                   binop( Iop_Sar32, mkexpr(lo32), mkU8(31))),
+             /* then: within signed-32 range: lo half good enough */
+             mkexpr(lo32),
+             /* else: sign dep saturate: 1->0x80000000, 0->0x7FFFFFFF */
+             binop(Iop_Add32, mkU32(0x7FFFFFFF),
+                   binop(Iop_Shr32, mkexpr(hi32), mkU8(31))));
+}
+
+/* Unsigned saturating narrow 64S to 32 */
+static IRExpr* mkQNarrow64Uto32 ( IRExpr* t64 )
+{
+   IRTemp hi32 = newTemp(Ity_I32);
+   IRTemp lo32 = newTemp(Ity_I32);
+
+   vassert(typeOfIRExpr(irsb->tyenv, t64) == Ity_I64);
+
+   assign( hi32, unop(Iop_64HIto32, t64));
+   assign( lo32, unop(Iop_64to32,   t64));
+
+   return IRExpr_ITE(
+            /* if (top 32 bits of t64 are 0) */
+            binop(Iop_CmpEQ32, mkexpr(hi32), mkU32(0)),
+            /* then: within unsigned-32 range: lo half good enough */
+            mkexpr(lo32),
+            /* else: positive saturate -> 0xFFFFFFFF */
+            mkU32(0xFFFFFFFF));
+}
+
+/* Signed saturate narrow 64->32, combining to V128 */
+static IRExpr* mkV128from4x64S ( IRExpr* t3, IRExpr* t2,
+                                 IRExpr* t1, IRExpr* t0 )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, t3) == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv, t2) == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv, t1) == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv, t0) == Ity_I64);
+   return binop(Iop_64HLtoV128,
+                binop(Iop_32HLto64,
+                      mkQNarrow64Sto32( t3 ),
+                      mkQNarrow64Sto32( t2 )),
+                binop(Iop_32HLto64,
+                      mkQNarrow64Sto32( t1 ),
+                      mkQNarrow64Sto32( t0 )));
+}
+
+/* Unsigned saturate narrow 64->32, combining to V128 */
+static IRExpr* mkV128from4x64U ( IRExpr* t3, IRExpr* t2,
+                                 IRExpr* t1, IRExpr* t0 )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, t3) == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv, t2) == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv, t1) == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv, t0) == Ity_I64);
+   return binop(Iop_64HLtoV128,
+                binop(Iop_32HLto64,
+                      mkQNarrow64Uto32( t3 ),
+                      mkQNarrow64Uto32( t2 )),
+                binop(Iop_32HLto64,
+                      mkQNarrow64Uto32( t1 ),
+                      mkQNarrow64Uto32( t0 )));
+}
+
+/* Simulate irops Iop_MullOdd*, since we don't have them  */
+#define MK_Iop_MullOdd8Ux16( expr_vA, expr_vB ) \
+      binop(Iop_MullEven8Ux16, \
+            binop(Iop_ShrV128, expr_vA, mkU8(8)), \
+            binop(Iop_ShrV128, expr_vB, mkU8(8)))
+
+#define MK_Iop_MullOdd8Sx16( expr_vA, expr_vB ) \
+      binop(Iop_MullEven8Sx16, \
+            binop(Iop_ShrV128, expr_vA, mkU8(8)), \
+            binop(Iop_ShrV128, expr_vB, mkU8(8)))
+
+#define MK_Iop_MullOdd16Ux8( expr_vA, expr_vB ) \
+      binop(Iop_MullEven16Ux8, \
+            binop(Iop_ShrV128, expr_vA, mkU8(16)), \
+            binop(Iop_ShrV128, expr_vB, mkU8(16)))
+
+#define MK_Iop_MullOdd32Ux4( expr_vA, expr_vB ) \
+      binop(Iop_MullEven32Ux4, \
+            binop(Iop_ShrV128, expr_vA, mkU8(32)), \
+            binop(Iop_ShrV128, expr_vB, mkU8(32)))
+
+#define MK_Iop_MullOdd16Sx8( expr_vA, expr_vB ) \
+      binop(Iop_MullEven16Sx8, \
+            binop(Iop_ShrV128, expr_vA, mkU8(16)), \
+            binop(Iop_ShrV128, expr_vB, mkU8(16)))
+
+#define MK_Iop_MullOdd32Sx4( expr_vA, expr_vB ) \
+      binop(Iop_MullEven32Sx4, \
+            binop(Iop_ShrV128, expr_vA, mkU8(32)), \
+            binop(Iop_ShrV128, expr_vB, mkU8(32)))
+
+
+static IRExpr* /* :: Ity_I64 */ mk64lo32Sto64 ( IRExpr* src )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, src) == Ity_I64);
+   return unop(Iop_32Sto64, unop(Iop_64to32, src));
+}
+
+static IRExpr* /* :: Ity_I64 */ mk64lo32Uto64 ( IRExpr* src )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, src) == Ity_I64);
+   return unop(Iop_32Uto64, unop(Iop_64to32, src));
+}
+
+static IROp mkSzOp ( IRType ty, IROp op8 )
+{
+   Int adj;
+   vassert(ty == Ity_I8  || ty == Ity_I16 ||
+           ty == Ity_I32 || ty == Ity_I64);
+   vassert(op8 == Iop_Add8   || op8 == Iop_Sub8   || op8 == Iop_Mul8 ||
+           op8 == Iop_Or8    || op8 == Iop_And8   || op8 == Iop_Xor8 ||
+           op8 == Iop_Shl8   || op8 == Iop_Shr8   || op8 == Iop_Sar8 ||
+           op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8 ||
+           op8 == Iop_Not8 );
+   adj = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : (ty==Ity_I32 ? 2 : 3));
+   return adj + op8;
+}
+
+/* Make sure we get valid 32 and 64bit addresses */
+static Addr64 mkSzAddr ( IRType ty, Addr64 addr )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ( ty == Ity_I64 ?
+            (Addr64)addr :
+            (Addr64)extend_s_32to64( toUInt(addr) ) );
+}
+
+/* sz, ULong -> IRExpr */
+static IRExpr* mkSzImm ( IRType ty, ULong imm64 )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ty == Ity_I64 ? mkU64(imm64) : mkU32((UInt)imm64);
+}
+
+/* sz, ULong -> IRConst */
+static IRConst* mkSzConst ( IRType ty, ULong imm64 )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ( ty == Ity_I64 ?
+            IRConst_U64(imm64) :
+            IRConst_U32((UInt)imm64) );
+}
+
+/* Sign extend imm16 -> IRExpr* */
+static IRExpr* mkSzExtendS16 ( IRType ty, UInt imm16 )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ( ty == Ity_I64 ?
+            mkU64(extend_s_16to64(imm16)) :
+            mkU32(extend_s_16to32(imm16)) );
+}
+
+/* Sign extend imm32 -> IRExpr* */
+static IRExpr* mkSzExtendS32 ( IRType ty, UInt imm32 )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ( ty == Ity_I64 ?
+            mkU64(extend_s_32to64(imm32)) :
+            mkU32(imm32) );
+}
+
+/* IR narrows I32/I64 -> I8/I16/I32 */
+static IRExpr* mkNarrowTo8 ( IRType ty, IRExpr* src )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ty == Ity_I64 ? unop(Iop_64to8, src) : unop(Iop_32to8, src);
+}
+
+static IRExpr* mkNarrowTo16 ( IRType ty, IRExpr* src )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ty == Ity_I64 ? unop(Iop_64to16, src) : unop(Iop_32to16, src);
+}
+
+static IRExpr* mkNarrowTo32 ( IRType ty, IRExpr* src )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   return ty == Ity_I64 ? unop(Iop_64to32, src) : src;
+}
+
+/* Signed/Unsigned IR widens I8/I16/I32 -> I32/I64 */
+static IRExpr* mkWidenFrom8 ( IRType ty, IRExpr* src, Bool sined )
+{
+   IROp op;
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   if (sined) op = (ty==Ity_I32) ? Iop_8Sto32 : Iop_8Sto64;
+   else       op = (ty==Ity_I32) ? Iop_8Uto32 : Iop_8Uto64;
+   return unop(op, src);
+}
+
+static IRExpr* mkWidenFrom16 ( IRType ty, IRExpr* src, Bool sined )
+{
+   IROp op;
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   if (sined) op = (ty==Ity_I32) ? Iop_16Sto32 : Iop_16Sto64;
+   else       op = (ty==Ity_I32) ? Iop_16Uto32 : Iop_16Uto64;
+   return unop(op, src);
+}
+
+static IRExpr* mkWidenFrom32 ( IRType ty, IRExpr* src, Bool sined )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I64);
+   if (ty == Ity_I32)
+      return src;
+   return (sined) ? unop(Iop_32Sto64, src) : unop(Iop_32Uto64, src);
+}
+
+
+static Int integerGuestRegOffset ( UInt archreg )
+{
+   vassert(archreg < 32);
+   
+   // jrs: probably not necessary; only matters if we reference sub-parts
+   // of the ppc registers, but that isn't the case
+   // later: this might affect Altivec though?
+
+   switch (archreg) {
+   case  0: return offsetofPPCGuestState(guest_GPR0);
+   case  1: return offsetofPPCGuestState(guest_GPR1);
+   case  2: return offsetofPPCGuestState(guest_GPR2);
+   case  3: return offsetofPPCGuestState(guest_GPR3);
+   case  4: return offsetofPPCGuestState(guest_GPR4);
+   case  5: return offsetofPPCGuestState(guest_GPR5);
+   case  6: return offsetofPPCGuestState(guest_GPR6);
+   case  7: return offsetofPPCGuestState(guest_GPR7);
+   case  8: return offsetofPPCGuestState(guest_GPR8);
+   case  9: return offsetofPPCGuestState(guest_GPR9);
+   case 10: return offsetofPPCGuestState(guest_GPR10);
+   case 11: return offsetofPPCGuestState(guest_GPR11);
+   case 12: return offsetofPPCGuestState(guest_GPR12);
+   case 13: return offsetofPPCGuestState(guest_GPR13);
+   case 14: return offsetofPPCGuestState(guest_GPR14);
+   case 15: return offsetofPPCGuestState(guest_GPR15);
+   case 16: return offsetofPPCGuestState(guest_GPR16);
+   case 17: return offsetofPPCGuestState(guest_GPR17);
+   case 18: return offsetofPPCGuestState(guest_GPR18);
+   case 19: return offsetofPPCGuestState(guest_GPR19);
+   case 20: return offsetofPPCGuestState(guest_GPR20);
+   case 21: return offsetofPPCGuestState(guest_GPR21);
+   case 22: return offsetofPPCGuestState(guest_GPR22);
+   case 23: return offsetofPPCGuestState(guest_GPR23);
+   case 24: return offsetofPPCGuestState(guest_GPR24);
+   case 25: return offsetofPPCGuestState(guest_GPR25);
+   case 26: return offsetofPPCGuestState(guest_GPR26);
+   case 27: return offsetofPPCGuestState(guest_GPR27);
+   case 28: return offsetofPPCGuestState(guest_GPR28);
+   case 29: return offsetofPPCGuestState(guest_GPR29);
+   case 30: return offsetofPPCGuestState(guest_GPR30);
+   case 31: return offsetofPPCGuestState(guest_GPR31);
+   default: break;
+   }
+   vpanic("integerGuestRegOffset(ppc,be)"); /*notreached*/
+}
+
+static IRExpr* getIReg ( UInt archreg )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert(archreg < 32);
+   return IRExpr_Get( integerGuestRegOffset(archreg), ty );
+}
+
+/* Ditto, but write to a reg instead. */
+static void putIReg ( UInt archreg, IRExpr* e )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert(archreg < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == ty );
+   stmt( IRStmt_Put(integerGuestRegOffset(archreg), e) );
+}
+
+
+/* Floating point egisters are mapped to VSX registers[0..31]. */
+static Int floatGuestRegOffset ( UInt archreg )
+{
+   vassert(archreg < 32);
+   
+   if (host_endness == VexEndnessLE) {
+      switch (archreg) {
+         case  0: return offsetofPPCGuestState(guest_VSR0) + 8;
+         case  1: return offsetofPPCGuestState(guest_VSR1) + 8;
+         case  2: return offsetofPPCGuestState(guest_VSR2) + 8;
+         case  3: return offsetofPPCGuestState(guest_VSR3) + 8;
+         case  4: return offsetofPPCGuestState(guest_VSR4) + 8;
+         case  5: return offsetofPPCGuestState(guest_VSR5) + 8;
+         case  6: return offsetofPPCGuestState(guest_VSR6) + 8;
+         case  7: return offsetofPPCGuestState(guest_VSR7) + 8;
+         case  8: return offsetofPPCGuestState(guest_VSR8) + 8;
+         case  9: return offsetofPPCGuestState(guest_VSR9) + 8;
+         case 10: return offsetofPPCGuestState(guest_VSR10) + 8;
+         case 11: return offsetofPPCGuestState(guest_VSR11) + 8;
+         case 12: return offsetofPPCGuestState(guest_VSR12) + 8;
+         case 13: return offsetofPPCGuestState(guest_VSR13) + 8;
+         case 14: return offsetofPPCGuestState(guest_VSR14) + 8;
+         case 15: return offsetofPPCGuestState(guest_VSR15) + 8;
+         case 16: return offsetofPPCGuestState(guest_VSR16) + 8;
+         case 17: return offsetofPPCGuestState(guest_VSR17) + 8;
+         case 18: return offsetofPPCGuestState(guest_VSR18) + 8;
+         case 19: return offsetofPPCGuestState(guest_VSR19) + 8;
+         case 20: return offsetofPPCGuestState(guest_VSR20) + 8;
+         case 21: return offsetofPPCGuestState(guest_VSR21) + 8;
+         case 22: return offsetofPPCGuestState(guest_VSR22) + 8;
+         case 23: return offsetofPPCGuestState(guest_VSR23) + 8;
+         case 24: return offsetofPPCGuestState(guest_VSR24) + 8;
+         case 25: return offsetofPPCGuestState(guest_VSR25) + 8;
+         case 26: return offsetofPPCGuestState(guest_VSR26) + 8;
+         case 27: return offsetofPPCGuestState(guest_VSR27) + 8;
+         case 28: return offsetofPPCGuestState(guest_VSR28) + 8;
+         case 29: return offsetofPPCGuestState(guest_VSR29) + 8;
+         case 30: return offsetofPPCGuestState(guest_VSR30) + 8;
+         case 31: return offsetofPPCGuestState(guest_VSR31) + 8;
+         default: break;
+      }
+   } else {
+      switch (archreg) {
+         case  0: return offsetofPPCGuestState(guest_VSR0);
+         case  1: return offsetofPPCGuestState(guest_VSR1);
+         case  2: return offsetofPPCGuestState(guest_VSR2);
+         case  3: return offsetofPPCGuestState(guest_VSR3);
+         case  4: return offsetofPPCGuestState(guest_VSR4);
+         case  5: return offsetofPPCGuestState(guest_VSR5);
+         case  6: return offsetofPPCGuestState(guest_VSR6);
+         case  7: return offsetofPPCGuestState(guest_VSR7);
+         case  8: return offsetofPPCGuestState(guest_VSR8);
+         case  9: return offsetofPPCGuestState(guest_VSR9);
+         case 10: return offsetofPPCGuestState(guest_VSR10);
+         case 11: return offsetofPPCGuestState(guest_VSR11);
+         case 12: return offsetofPPCGuestState(guest_VSR12);
+         case 13: return offsetofPPCGuestState(guest_VSR13);
+         case 14: return offsetofPPCGuestState(guest_VSR14);
+         case 15: return offsetofPPCGuestState(guest_VSR15);
+         case 16: return offsetofPPCGuestState(guest_VSR16);
+         case 17: return offsetofPPCGuestState(guest_VSR17);
+         case 18: return offsetofPPCGuestState(guest_VSR18);
+         case 19: return offsetofPPCGuestState(guest_VSR19);
+         case 20: return offsetofPPCGuestState(guest_VSR20);
+         case 21: return offsetofPPCGuestState(guest_VSR21);
+         case 22: return offsetofPPCGuestState(guest_VSR22);
+         case 23: return offsetofPPCGuestState(guest_VSR23);
+         case 24: return offsetofPPCGuestState(guest_VSR24);
+         case 25: return offsetofPPCGuestState(guest_VSR25);
+         case 26: return offsetofPPCGuestState(guest_VSR26);
+         case 27: return offsetofPPCGuestState(guest_VSR27);
+         case 28: return offsetofPPCGuestState(guest_VSR28);
+         case 29: return offsetofPPCGuestState(guest_VSR29);
+         case 30: return offsetofPPCGuestState(guest_VSR30);
+         case 31: return offsetofPPCGuestState(guest_VSR31);
+         default: break;
+      }
+   }
+   vpanic("floatGuestRegOffset(ppc)"); /*notreached*/
+}
+
+static IRExpr* getFReg ( UInt archreg )
+{
+   vassert(archreg < 32);
+   return IRExpr_Get( floatGuestRegOffset(archreg), Ity_F64 );
+}
+
+/* Ditto, but write to a reg instead. */
+static void putFReg ( UInt archreg, IRExpr* e )
+{
+   vassert(archreg < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_F64);
+   stmt( IRStmt_Put(floatGuestRegOffset(archreg), e) );
+}
+
+/* get Decimal float value.  Note, they share floating point register file. */
+static IRExpr* getDReg(UInt archreg) {
+   IRExpr *e;
+   vassert( archreg < 32 );
+   e = IRExpr_Get( floatGuestRegOffset( archreg ), Ity_D64 );
+   return e;
+}
+static IRExpr* getDReg32(UInt archreg) {
+   IRExpr *e;
+   vassert( archreg < 32 );
+   e = IRExpr_Get( floatGuestRegOffset( archreg ), Ity_D32 );
+   return e;
+}
+
+/* Read a floating point register pair and combine their contents into a
+ 128-bit value */
+static IRExpr *getDReg_pair(UInt archreg) {
+   IRExpr *high = getDReg( archreg );
+   IRExpr *low = getDReg( archreg + 1 );
+
+   return binop( Iop_D64HLtoD128, high, low );
+}
+
+/* Ditto, but write to a reg instead. */
+static void putDReg32(UInt archreg, IRExpr* e) {
+   vassert( archreg < 32 );
+   vassert( typeOfIRExpr(irsb->tyenv, e) == Ity_D32 );
+   stmt( IRStmt_Put( floatGuestRegOffset( archreg ), e ) );
+}
+
+static void putDReg(UInt archreg, IRExpr* e) {
+   vassert( archreg < 32 );
+   vassert( typeOfIRExpr(irsb->tyenv, e) == Ity_D64 );
+   stmt( IRStmt_Put( floatGuestRegOffset( archreg ), e ) );
+}
+
+/* Write a 128-bit floating point value into a register pair. */
+static void putDReg_pair(UInt archreg, IRExpr *e) {
+   IRTemp low = newTemp( Ity_D64 );
+   IRTemp high = newTemp( Ity_D64 );
+
+   vassert( archreg < 32 );
+   vassert( typeOfIRExpr(irsb->tyenv, e) == Ity_D128 );
+
+   assign( low, unop( Iop_D128LOtoD64, e ) );
+   assign( high, unop( Iop_D128HItoD64, e ) );
+
+   stmt( IRStmt_Put( floatGuestRegOffset( archreg ), mkexpr( high ) ) );
+   stmt( IRStmt_Put( floatGuestRegOffset( archreg + 1 ), mkexpr( low ) ) );
+}
+
+static Int vsxGuestRegOffset ( UInt archreg )
+{
+   vassert(archreg < 64);
+   switch (archreg) {
+   case  0: return offsetofPPCGuestState(guest_VSR0);
+   case  1: return offsetofPPCGuestState(guest_VSR1);
+   case  2: return offsetofPPCGuestState(guest_VSR2);
+   case  3: return offsetofPPCGuestState(guest_VSR3);
+   case  4: return offsetofPPCGuestState(guest_VSR4);
+   case  5: return offsetofPPCGuestState(guest_VSR5);
+   case  6: return offsetofPPCGuestState(guest_VSR6);
+   case  7: return offsetofPPCGuestState(guest_VSR7);
+   case  8: return offsetofPPCGuestState(guest_VSR8);
+   case  9: return offsetofPPCGuestState(guest_VSR9);
+   case 10: return offsetofPPCGuestState(guest_VSR10);
+   case 11: return offsetofPPCGuestState(guest_VSR11);
+   case 12: return offsetofPPCGuestState(guest_VSR12);
+   case 13: return offsetofPPCGuestState(guest_VSR13);
+   case 14: return offsetofPPCGuestState(guest_VSR14);
+   case 15: return offsetofPPCGuestState(guest_VSR15);
+   case 16: return offsetofPPCGuestState(guest_VSR16);
+   case 17: return offsetofPPCGuestState(guest_VSR17);
+   case 18: return offsetofPPCGuestState(guest_VSR18);
+   case 19: return offsetofPPCGuestState(guest_VSR19);
+   case 20: return offsetofPPCGuestState(guest_VSR20);
+   case 21: return offsetofPPCGuestState(guest_VSR21);
+   case 22: return offsetofPPCGuestState(guest_VSR22);
+   case 23: return offsetofPPCGuestState(guest_VSR23);
+   case 24: return offsetofPPCGuestState(guest_VSR24);
+   case 25: return offsetofPPCGuestState(guest_VSR25);
+   case 26: return offsetofPPCGuestState(guest_VSR26);
+   case 27: return offsetofPPCGuestState(guest_VSR27);
+   case 28: return offsetofPPCGuestState(guest_VSR28);
+   case 29: return offsetofPPCGuestState(guest_VSR29);
+   case 30: return offsetofPPCGuestState(guest_VSR30);
+   case 31: return offsetofPPCGuestState(guest_VSR31);
+   case 32: return offsetofPPCGuestState(guest_VSR32);
+   case 33: return offsetofPPCGuestState(guest_VSR33);
+   case 34: return offsetofPPCGuestState(guest_VSR34);
+   case 35: return offsetofPPCGuestState(guest_VSR35);
+   case 36: return offsetofPPCGuestState(guest_VSR36);
+   case 37: return offsetofPPCGuestState(guest_VSR37);
+   case 38: return offsetofPPCGuestState(guest_VSR38);
+   case 39: return offsetofPPCGuestState(guest_VSR39);
+   case 40: return offsetofPPCGuestState(guest_VSR40);
+   case 41: return offsetofPPCGuestState(guest_VSR41);
+   case 42: return offsetofPPCGuestState(guest_VSR42);
+   case 43: return offsetofPPCGuestState(guest_VSR43);
+   case 44: return offsetofPPCGuestState(guest_VSR44);
+   case 45: return offsetofPPCGuestState(guest_VSR45);
+   case 46: return offsetofPPCGuestState(guest_VSR46);
+   case 47: return offsetofPPCGuestState(guest_VSR47);
+   case 48: return offsetofPPCGuestState(guest_VSR48);
+   case 49: return offsetofPPCGuestState(guest_VSR49);
+   case 50: return offsetofPPCGuestState(guest_VSR50);
+   case 51: return offsetofPPCGuestState(guest_VSR51);
+   case 52: return offsetofPPCGuestState(guest_VSR52);
+   case 53: return offsetofPPCGuestState(guest_VSR53);
+   case 54: return offsetofPPCGuestState(guest_VSR54);
+   case 55: return offsetofPPCGuestState(guest_VSR55);
+   case 56: return offsetofPPCGuestState(guest_VSR56);
+   case 57: return offsetofPPCGuestState(guest_VSR57);
+   case 58: return offsetofPPCGuestState(guest_VSR58);
+   case 59: return offsetofPPCGuestState(guest_VSR59);
+   case 60: return offsetofPPCGuestState(guest_VSR60);
+   case 61: return offsetofPPCGuestState(guest_VSR61);
+   case 62: return offsetofPPCGuestState(guest_VSR62);
+   case 63: return offsetofPPCGuestState(guest_VSR63);
+   default: break;
+   }
+   vpanic("vsxGuestRegOffset(ppc)"); /*notreached*/
+}
+
+/* Vector registers are mapped to VSX registers[32..63]. */
+static Int vectorGuestRegOffset ( UInt archreg )
+{
+   vassert(archreg < 32);
+   
+   switch (archreg) {
+   case  0: return offsetofPPCGuestState(guest_VSR32);
+   case  1: return offsetofPPCGuestState(guest_VSR33);
+   case  2: return offsetofPPCGuestState(guest_VSR34);
+   case  3: return offsetofPPCGuestState(guest_VSR35);
+   case  4: return offsetofPPCGuestState(guest_VSR36);
+   case  5: return offsetofPPCGuestState(guest_VSR37);
+   case  6: return offsetofPPCGuestState(guest_VSR38);
+   case  7: return offsetofPPCGuestState(guest_VSR39);
+   case  8: return offsetofPPCGuestState(guest_VSR40);
+   case  9: return offsetofPPCGuestState(guest_VSR41);
+   case 10: return offsetofPPCGuestState(guest_VSR42);
+   case 11: return offsetofPPCGuestState(guest_VSR43);
+   case 12: return offsetofPPCGuestState(guest_VSR44);
+   case 13: return offsetofPPCGuestState(guest_VSR45);
+   case 14: return offsetofPPCGuestState(guest_VSR46);
+   case 15: return offsetofPPCGuestState(guest_VSR47);
+   case 16: return offsetofPPCGuestState(guest_VSR48);
+   case 17: return offsetofPPCGuestState(guest_VSR49);
+   case 18: return offsetofPPCGuestState(guest_VSR50);
+   case 19: return offsetofPPCGuestState(guest_VSR51);
+   case 20: return offsetofPPCGuestState(guest_VSR52);
+   case 21: return offsetofPPCGuestState(guest_VSR53);
+   case 22: return offsetofPPCGuestState(guest_VSR54);
+   case 23: return offsetofPPCGuestState(guest_VSR55);
+   case 24: return offsetofPPCGuestState(guest_VSR56);
+   case 25: return offsetofPPCGuestState(guest_VSR57);
+   case 26: return offsetofPPCGuestState(guest_VSR58);
+   case 27: return offsetofPPCGuestState(guest_VSR59);
+   case 28: return offsetofPPCGuestState(guest_VSR60);
+   case 29: return offsetofPPCGuestState(guest_VSR61);
+   case 30: return offsetofPPCGuestState(guest_VSR62);
+   case 31: return offsetofPPCGuestState(guest_VSR63);
+   default: break;
+   }
+   vpanic("vextorGuestRegOffset(ppc)"); /*notreached*/
+}
+
+static IRExpr* getVReg ( UInt archreg )
+{
+   vassert(archreg < 32);
+   return IRExpr_Get( vectorGuestRegOffset(archreg), Ity_V128 );
+}
+
+/* Ditto, but write to a reg instead. */
+static void putVReg ( UInt archreg, IRExpr* e )
+{
+   vassert(archreg < 32);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_V128);
+   stmt( IRStmt_Put(vectorGuestRegOffset(archreg), e) );
+}
+
+/* Get contents of VSX guest register */
+static IRExpr* getVSReg ( UInt archreg )
+{
+   vassert(archreg < 64);
+   return IRExpr_Get( vsxGuestRegOffset(archreg), Ity_V128 );
+}
+
+/* Ditto, but write to a VSX reg instead. */
+static void putVSReg ( UInt archreg, IRExpr* e )
+{
+   vassert(archreg < 64);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_V128);
+   stmt( IRStmt_Put(vsxGuestRegOffset(archreg), e) );
+}
+
+
+static Int guestCR321offset ( UInt cr )
+{
+   switch (cr) {
+   case 0: return offsetofPPCGuestState(guest_CR0_321 );
+   case 1: return offsetofPPCGuestState(guest_CR1_321 );
+   case 2: return offsetofPPCGuestState(guest_CR2_321 );
+   case 3: return offsetofPPCGuestState(guest_CR3_321 );
+   case 4: return offsetofPPCGuestState(guest_CR4_321 );
+   case 5: return offsetofPPCGuestState(guest_CR5_321 );
+   case 6: return offsetofPPCGuestState(guest_CR6_321 );
+   case 7: return offsetofPPCGuestState(guest_CR7_321 );
+   default: vpanic("guestCR321offset(ppc)");
+   }
+} 
+
+static Int guestCR0offset ( UInt cr )
+{
+   switch (cr) {
+   case 0: return offsetofPPCGuestState(guest_CR0_0 );
+   case 1: return offsetofPPCGuestState(guest_CR1_0 );
+   case 2: return offsetofPPCGuestState(guest_CR2_0 );
+   case 3: return offsetofPPCGuestState(guest_CR3_0 );
+   case 4: return offsetofPPCGuestState(guest_CR4_0 );
+   case 5: return offsetofPPCGuestState(guest_CR5_0 );
+   case 6: return offsetofPPCGuestState(guest_CR6_0 );
+   case 7: return offsetofPPCGuestState(guest_CR7_0 );
+   default: vpanic("guestCR3offset(ppc)");
+   }
+}
+
+typedef enum {
+   _placeholder0,
+   _placeholder1,
+   _placeholder2,
+   BYTE,
+   HWORD,
+   WORD,
+   DWORD
+} _popcount_data_type;
+
+/* Generate an IR sequence to do a popcount operation on the supplied
+   IRTemp, and return a new IRTemp holding the result.  'ty' may be
+   Ity_I32 or Ity_I64 only. */
+static IRTemp gen_POPCOUNT ( IRType ty, IRTemp src, _popcount_data_type data_type )
+{
+  /* Do count across 2^data_type bits,
+     byte:        data_type = 3
+     half word:   data_type = 4
+     word:        data_type = 5
+     double word: data_type = 6  (not supported for 32-bit type)
+    */
+   Int shift[6];
+   _popcount_data_type idx, i;
+   IRTemp mask[6];
+   IRTemp old = IRTemp_INVALID;
+   IRTemp nyu = IRTemp_INVALID;
+
+   vassert(ty == Ity_I64 || ty == Ity_I32);
+
+   if (ty == Ity_I32) {
+
+      for (idx = 0; idx < WORD; idx++) {
+         mask[idx]  = newTemp(ty);
+         shift[idx] = 1 << idx;
+      }
+      assign(mask[0], mkU32(0x55555555));
+      assign(mask[1], mkU32(0x33333333));
+      assign(mask[2], mkU32(0x0F0F0F0F));
+      assign(mask[3], mkU32(0x00FF00FF));
+      assign(mask[4], mkU32(0x0000FFFF));
+      old = src;
+      for (i = 0; i < data_type; i++) {
+         nyu = newTemp(ty);
+         assign(nyu,
+                binop(Iop_Add32,
+                      binop(Iop_And32,
+                            mkexpr(old),
+                            mkexpr(mask[i])),
+                      binop(Iop_And32,
+                            binop(Iop_Shr32, mkexpr(old), mkU8(shift[i])),
+                            mkexpr(mask[i]))));
+         old = nyu;
+      }
+      return nyu;
+   }
+
+// else, ty == Ity_I64
+   vassert(mode64);
+
+   for (i = 0; i < DWORD; i++) {
+      mask[i] = newTemp( Ity_I64 );
+      shift[i] = 1 << i;
+   }
+   assign( mask[0], mkU64( 0x5555555555555555ULL ) );
+   assign( mask[1], mkU64( 0x3333333333333333ULL ) );
+   assign( mask[2], mkU64( 0x0F0F0F0F0F0F0F0FULL ) );
+   assign( mask[3], mkU64( 0x00FF00FF00FF00FFULL ) );
+   assign( mask[4], mkU64( 0x0000FFFF0000FFFFULL ) );
+   assign( mask[5], mkU64( 0x00000000FFFFFFFFULL ) );
+   old = src;
+   for (i = 0; i < data_type; i++) {
+      nyu = newTemp( Ity_I64 );
+      assign( nyu,
+              binop( Iop_Add64,
+                     binop( Iop_And64, mkexpr( old ), mkexpr( mask[i] ) ),
+                     binop( Iop_And64,
+                            binop( Iop_Shr64, mkexpr( old ), mkU8( shift[i] ) ),
+                            mkexpr( mask[i] ) ) ) );
+      old = nyu;
+   }
+   return nyu;
+}
+
+/* Special purpose population count function for
+ * vpopcntd in 32-bit mode.
+ */
+static IRTemp gen_vpopcntd_mode32 ( IRTemp src1, IRTemp src2 )
+{
+   Int i, shift[6];
+   IRTemp mask[6];
+   IRTemp old = IRTemp_INVALID;
+   IRTemp nyu1 = IRTemp_INVALID;
+   IRTemp nyu2 = IRTemp_INVALID;
+   IRTemp retval = newTemp(Ity_I64);
+
+   vassert(!mode64);
+
+   for (i = 0; i < WORD; i++) {
+      mask[i]  = newTemp(Ity_I32);
+      shift[i] = 1 << i;
+   }
+   assign(mask[0], mkU32(0x55555555));
+   assign(mask[1], mkU32(0x33333333));
+   assign(mask[2], mkU32(0x0F0F0F0F));
+   assign(mask[3], mkU32(0x00FF00FF));
+   assign(mask[4], mkU32(0x0000FFFF));
+   old = src1;
+   for (i = 0; i < WORD; i++) {
+      nyu1 = newTemp(Ity_I32);
+      assign(nyu1,
+             binop(Iop_Add32,
+                   binop(Iop_And32,
+                         mkexpr(old),
+                         mkexpr(mask[i])),
+                   binop(Iop_And32,
+                         binop(Iop_Shr32, mkexpr(old), mkU8(shift[i])),
+                         mkexpr(mask[i]))));
+      old = nyu1;
+   }
+
+   old = src2;
+   for (i = 0; i < WORD; i++) {
+      nyu2 = newTemp(Ity_I32);
+      assign(nyu2,
+             binop(Iop_Add32,
+                   binop(Iop_And32,
+                         mkexpr(old),
+                         mkexpr(mask[i])),
+                   binop(Iop_And32,
+                         binop(Iop_Shr32, mkexpr(old), mkU8(shift[i])),
+                         mkexpr(mask[i]))));
+      old = nyu2;
+   }
+   assign(retval, unop(Iop_32Uto64, binop(Iop_Add32, mkexpr(nyu1), mkexpr(nyu2))));
+   return retval;
+}
+
+
+// ROTL(src32/64, rot_amt5/6)
+static IRExpr* /* :: Ity_I32/64 */ ROTL ( IRExpr* src,
+                                          IRExpr* rot_amt )
+{
+   IRExpr *mask, *rot;
+   vassert(typeOfIRExpr(irsb->tyenv,rot_amt) == Ity_I8);
+
+   if (typeOfIRExpr(irsb->tyenv,src) == Ity_I64) {
+      // rot = (src << rot_amt) | (src >> (64-rot_amt))
+      mask = binop(Iop_And8, rot_amt, mkU8(63));
+      rot  = binop(Iop_Or64,
+                binop(Iop_Shl64, src, mask),
+                binop(Iop_Shr64, src, binop(Iop_Sub8, mkU8(64), mask)));
+   } else {
+      // rot = (src << rot_amt) | (src >> (32-rot_amt))
+      mask = binop(Iop_And8, rot_amt, mkU8(31));
+      rot  = binop(Iop_Or32,
+                binop(Iop_Shl32, src, mask),
+                binop(Iop_Shr32, src, binop(Iop_Sub8, mkU8(32), mask)));
+   }
+   /* Note: the ITE not merely an optimisation; it's needed
+      because otherwise the Shr is a shift by the word size when
+      mask denotes zero.  For rotates by immediates, a lot of
+      this junk gets folded out. */
+   return IRExpr_ITE( binop(Iop_CmpNE8, mask, mkU8(0)),
+                      /* non-zero rotate */ rot,
+                      /*     zero rotate */ src);
+}
+
+/* Standard effective address calc: (rA + rB) */
+static IRExpr* ea_rA_idxd ( UInt rA, UInt rB )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert(rA < 32);
+   vassert(rB < 32);
+   return binop(mkSzOp(ty, Iop_Add8), getIReg(rA), getIReg(rB));
+}
+
+/* Standard effective address calc: (rA + simm) */
+static IRExpr* ea_rA_simm ( UInt rA, UInt simm16 )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert(rA < 32);
+   return binop(mkSzOp(ty, Iop_Add8), getIReg(rA),
+                mkSzExtendS16(ty, simm16));
+}
+
+/* Standard effective address calc: (rA|0) */
+static IRExpr* ea_rAor0 ( UInt rA )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert(rA < 32);
+   if (rA == 0) {
+      return mkSzImm(ty, 0);
+   } else {
+      return getIReg(rA);
+   }
+}
+
+/* Standard effective address calc: (rA|0) + rB */
+static IRExpr* ea_rAor0_idxd ( UInt rA, UInt rB )
+{
+   vassert(rA < 32);
+   vassert(rB < 32);
+   return (rA == 0) ? getIReg(rB) : ea_rA_idxd( rA, rB );
+}
+
+/* Standard effective address calc: (rA|0) + simm16 */
+static IRExpr* ea_rAor0_simm ( UInt rA, UInt simm16 )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert(rA < 32);
+   if (rA == 0) {
+      return mkSzExtendS16(ty, simm16);
+   } else {
+      return ea_rA_simm( rA, simm16 );
+   }
+}
+
+
+/* Align effective address */
+static IRExpr* addr_align( IRExpr* addr, UChar align )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   Long mask;
+   switch (align) {
+   case 1:  return addr;                    // byte aligned
+   case 2:  mask = ((Long)-1) << 1; break;  // half-word aligned
+   case 4:  mask = ((Long)-1) << 2; break;  // word aligned
+   case 16: mask = ((Long)-1) << 4; break;  // quad-word aligned
+   default:
+      vex_printf("addr_align: align = %u\n", align);
+      vpanic("addr_align(ppc)");
+   }
+
+   vassert(typeOfIRExpr(irsb->tyenv,addr) == ty);
+   return binop( mkSzOp(ty, Iop_And8), addr, mkSzImm(ty, mask) );
+}
+
+
+/* Exit the trace if ADDR (intended to be a guest memory address) is
+   not ALIGN-aligned, generating a request for a SIGBUS followed by a
+   restart of the current insn. */
+static void gen_SIGBUS_if_misaligned ( IRTemp addr, UChar align )
+{
+   vassert(align == 2 || align == 4 || align == 8 || align == 16);
+   if (mode64) {
+      vassert(typeOfIRTemp(irsb->tyenv, addr) == Ity_I64);
+      stmt(
+         IRStmt_Exit(
+            binop(Iop_CmpNE64,
+                  binop(Iop_And64, mkexpr(addr), mkU64(align-1)),
+                  mkU64(0)),
+            Ijk_SigBUS,
+            IRConst_U64( guest_CIA_curr_instr ), OFFB_CIA
+         )
+      );
+   } else {
+      vassert(typeOfIRTemp(irsb->tyenv, addr) == Ity_I32);
+      stmt(
+         IRStmt_Exit(
+            binop(Iop_CmpNE32,
+                  binop(Iop_And32, mkexpr(addr), mkU32(align-1)),
+                  mkU32(0)),
+            Ijk_SigBUS,
+            IRConst_U32( guest_CIA_curr_instr ), OFFB_CIA
+         )
+      );
+   }
+}
+
+
+/* Generate AbiHints which mark points at which the ELF or PowerOpen
+   ABIs say that the stack red zone (viz, -N(r1) .. -1(r1), for some
+   N) becomes undefined.  That is at function calls and returns.  ELF
+   ppc32 doesn't have this "feature" (how fortunate for it).  nia is
+   the address of the next instruction to be executed.
+*/
+static void make_redzone_AbiHint ( const VexAbiInfo* vbi, 
+                                   IRTemp nia, const HChar* who )
+{
+   Int szB = vbi->guest_stack_redzone_size;
+   if (0) vex_printf("AbiHint: %s\n", who);
+   vassert(szB >= 0);
+   if (szB > 0) {
+      if (mode64) {
+         vassert(typeOfIRTemp(irsb->tyenv, nia) == Ity_I64);
+         stmt( IRStmt_AbiHint( 
+                  binop(Iop_Sub64, getIReg(1), mkU64(szB)), 
+                  szB,
+                  mkexpr(nia)
+         ));
+      } else {
+         vassert(typeOfIRTemp(irsb->tyenv, nia) == Ity_I32);
+         stmt( IRStmt_AbiHint( 
+                  binop(Iop_Sub32, getIReg(1), mkU32(szB)), 
+                  szB,
+                  mkexpr(nia)
+         ));
+      }
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for condition codes.                         ---*/
+/*------------------------------------------------------------*/
+
+/* Condition register layout. 
+
+   In the hardware, CR is laid out like this.  The leftmost end is the
+   most significant bit in the register; however the IBM documentation
+   numbers the bits backwards for some reason.
+
+   CR0      CR1    ..........   CR6       CR7
+   0 .. 3   .......................  28 .. 31    (IBM bit numbering)
+   31  28                             3    0     (normal bit numbering)
+
+   Each CR field is 4 bits:  [<,>,==,SO]
+
+   Hence in IBM's notation, BI=0 is CR7[SO], BI=1 is CR7[==], etc.
+
+   Indexing from BI to guest state:
+
+     let    n = BI / 4
+          off = BI % 4
+     this references CR n:
+
+        off==0   ->  guest_CRn_321 >> 3
+        off==1   ->  guest_CRn_321 >> 2
+        off==2   ->  guest_CRn_321 >> 1
+        off==3   ->  guest_CRn_SO
+
+   Bear in mind the only significant bit in guest_CRn_SO is bit 0
+   (normal notation) and in guest_CRn_321 the significant bits are
+   3, 2 and 1 (normal notation).
+*/
+
+static void putCR321 ( UInt cr, IRExpr* e )
+{
+   vassert(cr < 8);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
+   stmt( IRStmt_Put(guestCR321offset(cr), e) );
+}
+
+static void putCR0 ( UInt cr, IRExpr* e )
+{
+   vassert(cr < 8);
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
+   stmt( IRStmt_Put(guestCR0offset(cr), e) );
+}
+
+static IRExpr* /* :: Ity_I8 */ getCR0 ( UInt cr )
+{
+   vassert(cr < 8);
+   return IRExpr_Get(guestCR0offset(cr), Ity_I8);
+}
+
+static IRExpr* /* :: Ity_I8 */ getCR321 ( UInt cr )
+{
+   vassert(cr < 8);
+   return IRExpr_Get(guestCR321offset(cr), Ity_I8);
+}
+
+/* Fetch the specified CR bit (as per IBM/hardware notation) and
+   return it at the bottom of an I32; the top 31 bits are guaranteed
+   to be zero. */
+static IRExpr* /* :: Ity_I32 */ getCRbit ( UInt bi )
+{
+   UInt n   = bi / 4;
+   UInt off = bi % 4;
+   vassert(bi < 32);
+   if (off == 3) {
+      /* Fetch the SO bit for this CR field */
+      /* Note: And32 is redundant paranoia iff guest state only has 0
+         or 1 in that slot. */
+      return binop(Iop_And32, unop(Iop_8Uto32, getCR0(n)), mkU32(1));
+   } else {
+      /* Fetch the <, > or == bit for this CR field */
+      return binop( Iop_And32, 
+                    binop( Iop_Shr32, 
+                           unop(Iop_8Uto32, getCR321(n)),
+                           mkU8(toUChar(3-off)) ),
+                    mkU32(1) );
+   }
+}
+
+/* Dually, write the least significant bit of BIT to the specified CR
+   bit.  Indexing as per getCRbit. */
+static void putCRbit ( UInt bi, IRExpr* bit )
+{
+   UInt    n, off;
+   IRExpr* safe;
+   vassert(typeOfIRExpr(irsb->tyenv,bit) == Ity_I32);
+   safe = binop(Iop_And32, bit, mkU32(1));
+   n   = bi / 4;
+   off = bi % 4;
+   vassert(bi < 32);
+   if (off == 3) {
+      /* This is the SO bit for this CR field */
+      putCR0(n, unop(Iop_32to8, safe));
+   } else {
+      off = 3 - off;
+      vassert(off == 1 || off == 2 || off == 3);
+      putCR321(
+         n,
+         unop( Iop_32to8,
+               binop( Iop_Or32,
+                      /* old value with field masked out */
+                      binop(Iop_And32, unop(Iop_8Uto32, getCR321(n)),
+                                       mkU32(~(1 << off))),
+                      /* new value in the right place */
+                      binop(Iop_Shl32, safe, mkU8(toUChar(off)))
+               )
+         )
+      );
+   }
+}
+
+/* Fetch the specified CR bit (as per IBM/hardware notation) and
+   return it somewhere in an I32; it does not matter where, but
+   whichever bit it is, all other bits are guaranteed to be zero.  In
+   other words, the I32-typed expression will be zero if the bit is
+   zero and nonzero if the bit is 1.  Write into *where the index
+   of where the bit will be. */
+
+static
+IRExpr* /* :: Ity_I32 */ getCRbit_anywhere ( UInt bi, Int* where )
+{
+   UInt n   = bi / 4;
+   UInt off = bi % 4;
+   vassert(bi < 32);
+   if (off == 3) {
+      /* Fetch the SO bit for this CR field */
+      /* Note: And32 is redundant paranoia iff guest state only has 0
+         or 1 in that slot. */
+      *where = 0;
+      return binop(Iop_And32, unop(Iop_8Uto32, getCR0(n)), mkU32(1));
+   } else {
+      /* Fetch the <, > or == bit for this CR field */
+      *where = 3-off;
+      return binop( Iop_And32, 
+                    unop(Iop_8Uto32, getCR321(n)),
+                    mkU32(1 << (3-off)) );
+   }
+}
+
+/* Set the CR0 flags following an arithmetic operation.
+   (Condition Register CR0 Field Definition, PPC32 p60)
+*/
+static IRExpr* getXER_SO ( void );
+static void set_CR0 ( IRExpr* result )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,result) == Ity_I32 ||
+           typeOfIRExpr(irsb->tyenv,result) == Ity_I64);
+   if (mode64) {
+      putCR321( 0, unop(Iop_64to8,
+                        binop(Iop_CmpORD64S, result, mkU64(0))) );
+   } else {
+      putCR321( 0, unop(Iop_32to8,
+                        binop(Iop_CmpORD32S, result, mkU32(0))) );
+   }
+   putCR0( 0, getXER_SO() );
+}
+
+
+/* Set the CR6 flags following an AltiVec compare operation.
+ * NOTE: This also works for VSX single-precision compares.
+ * */
+static void set_AV_CR6 ( IRExpr* result, Bool test_all_ones )
+{
+   /* CR6[0:3] = {all_ones, 0, all_zeros, 0}
+      all_ones  = (v[0] && v[1] && v[2] && v[3])
+      all_zeros = ~(v[0] || v[1] || v[2] || v[3])
+   */
+   IRTemp v0 = newTemp(Ity_V128);
+   IRTemp v1 = newTemp(Ity_V128);
+   IRTemp v2 = newTemp(Ity_V128);
+   IRTemp v3 = newTemp(Ity_V128);
+   IRTemp rOnes  = newTemp(Ity_I8);
+   IRTemp rZeros = newTemp(Ity_I8);
+
+   vassert(typeOfIRExpr(irsb->tyenv,result) == Ity_V128);
+
+   assign( v0, result );
+   assign( v1, binop(Iop_ShrV128, result, mkU8(32)) );
+   assign( v2, binop(Iop_ShrV128, result, mkU8(64)) );
+   assign( v3, binop(Iop_ShrV128, result, mkU8(96)) );
+
+   assign( rZeros, unop(Iop_1Uto8,
+       binop(Iop_CmpEQ32, mkU32(0xFFFFFFFF),
+             unop(Iop_Not32,
+                  unop(Iop_V128to32,
+                       binop(Iop_OrV128,
+                             binop(Iop_OrV128, mkexpr(v0), mkexpr(v1)),
+                             binop(Iop_OrV128, mkexpr(v2), mkexpr(v3))))
+                  ))) );
+
+   if (test_all_ones) {
+      assign( rOnes, unop(Iop_1Uto8,
+         binop(Iop_CmpEQ32, mkU32(0xFFFFFFFF),
+               unop(Iop_V128to32,
+                    binop(Iop_AndV128,
+                          binop(Iop_AndV128, mkexpr(v0), mkexpr(v1)),
+                          binop(Iop_AndV128, mkexpr(v2), mkexpr(v3)))
+                    ))) );
+      putCR321( 6, binop(Iop_Or8,
+                         binop(Iop_Shl8, mkexpr(rOnes),  mkU8(3)),
+                         binop(Iop_Shl8, mkexpr(rZeros), mkU8(1))) );
+   } else {
+      putCR321( 6, binop(Iop_Shl8, mkexpr(rZeros), mkU8(1)) );
+   }
+   putCR0( 6, mkU8(0) );
+} 
+
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for XER flags.                               ---*/
+/*------------------------------------------------------------*/
+
+static void putXER_SO ( IRExpr* e )
+{
+   IRExpr* so;
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
+   so = binop(Iop_And8, e, mkU8(1));
+   stmt( IRStmt_Put( OFFB_XER_SO, so ) );
+}
+
+static void putXER_OV ( IRExpr* e )
+{
+   IRExpr* ov;
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
+   ov = binop(Iop_And8, e, mkU8(1));
+   stmt( IRStmt_Put( OFFB_XER_OV, ov ) );
+}
+
+static void putXER_CA ( IRExpr* e )
+{
+   IRExpr* ca;
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
+   ca = binop(Iop_And8, e, mkU8(1));
+   stmt( IRStmt_Put( OFFB_XER_CA, ca ) );
+}
+
+static void putXER_BC ( IRExpr* e )
+{
+   IRExpr* bc;
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
+   bc = binop(Iop_And8, e, mkU8(0x7F));
+   stmt( IRStmt_Put( OFFB_XER_BC, bc ) );
+}
+
+static IRExpr* /* :: Ity_I8 */ getXER_SO ( void )
+{
+   return IRExpr_Get( OFFB_XER_SO, Ity_I8 );
+}
+
+static IRExpr* /* :: Ity_I32 */ getXER_SO32 ( void )
+{
+   return binop( Iop_And32, unop(Iop_8Uto32, getXER_SO()), mkU32(1) );
+}
+
+static IRExpr* /* :: Ity_I8 */ getXER_OV ( void )
+{
+   return IRExpr_Get( OFFB_XER_OV, Ity_I8 );
+}
+
+static IRExpr* /* :: Ity_I32 */ getXER_OV32 ( void )
+{
+   return binop( Iop_And32, unop(Iop_8Uto32, getXER_OV()), mkU32(1) );
+}
+
+static IRExpr* /* :: Ity_I32 */ getXER_CA32 ( void )
+{
+   IRExpr* ca = IRExpr_Get( OFFB_XER_CA, Ity_I8 );
+   return binop( Iop_And32, unop(Iop_8Uto32, ca ), mkU32(1) );
+}
+
+static IRExpr* /* :: Ity_I8 */ getXER_BC ( void )
+{
+   return IRExpr_Get( OFFB_XER_BC, Ity_I8 );
+}
+
+static IRExpr* /* :: Ity_I32 */ getXER_BC32 ( void )
+{
+   IRExpr* bc = IRExpr_Get( OFFB_XER_BC, Ity_I8 );
+   return binop( Iop_And32, unop(Iop_8Uto32, bc), mkU32(0x7F) );
+}
+
+
+/* RES is the result of doing OP on ARGL and ARGR.  Set %XER.OV and
+   %XER.SO accordingly. */
+
+static void set_XER_OV_32( UInt op, IRExpr* res,
+                           IRExpr* argL, IRExpr* argR )
+{
+   IRTemp  t64;
+   IRExpr* xer_ov;
+   vassert(op < PPCG_FLAG_OP_NUMBER);
+   vassert(typeOfIRExpr(irsb->tyenv,res)  == Ity_I32);
+   vassert(typeOfIRExpr(irsb->tyenv,argL) == Ity_I32);
+   vassert(typeOfIRExpr(irsb->tyenv,argR) == Ity_I32);
+
+#  define INT32_MIN 0x80000000
+
+#  define XOR2(_aa,_bb) \
+      binop(Iop_Xor32,(_aa),(_bb))
+
+#  define XOR3(_cc,_dd,_ee) \
+      binop(Iop_Xor32,binop(Iop_Xor32,(_cc),(_dd)),(_ee))
+
+#  define AND3(_ff,_gg,_hh) \
+      binop(Iop_And32,binop(Iop_And32,(_ff),(_gg)),(_hh))
+
+#define NOT(_jj) \
+      unop(Iop_Not32, (_jj))
+
+   switch (op) {
+   case /* 0  */ PPCG_FLAG_OP_ADD:
+   case /* 1  */ PPCG_FLAG_OP_ADDE:
+      /* (argL^argR^-1) & (argL^res) & (1<<31)  ?1:0 */
+      // i.e. ((both_same_sign) & (sign_changed) & (sign_mask))
+      xer_ov 
+         = AND3( XOR3(argL,argR,mkU32(-1)),
+                 XOR2(argL,res),
+                 mkU32(INT32_MIN) );
+      /* xer_ov can only be 0 or 1<<31 */
+      xer_ov 
+         = binop(Iop_Shr32, xer_ov, mkU8(31) );
+      break;
+      
+   case /* 2  */ PPCG_FLAG_OP_DIVW:
+      /* (argL == INT32_MIN && argR == -1) || argR == 0 */
+      xer_ov
+         = mkOR1(
+              mkAND1( 
+                 binop(Iop_CmpEQ32, argL, mkU32(INT32_MIN)),
+                 binop(Iop_CmpEQ32, argR, mkU32(-1)) 
+              ),
+              binop(Iop_CmpEQ32, argR, mkU32(0) ) 
+           );
+      xer_ov 
+         = unop(Iop_1Uto32, xer_ov);
+      break;
+      
+   case /* 3  */ PPCG_FLAG_OP_DIVWU:
+      /* argR == 0 */
+      xer_ov 
+         = unop(Iop_1Uto32, binop(Iop_CmpEQ32, argR, mkU32(0)));
+      break;
+      
+   case /* 4  */ PPCG_FLAG_OP_MULLW:
+      /* OV true if result can't be represented in 32 bits
+         i.e sHi != sign extension of sLo */
+      t64 = newTemp(Ity_I64);
+      assign( t64, binop(Iop_MullS32, argL, argR) );
+      xer_ov 
+         = binop( Iop_CmpNE32,
+                  unop(Iop_64HIto32, mkexpr(t64)),
+                  binop( Iop_Sar32, 
+                         unop(Iop_64to32, mkexpr(t64)), 
+                         mkU8(31))
+                  );
+      xer_ov
+         = unop(Iop_1Uto32, xer_ov);
+      break;
+      
+   case /* 5  */ PPCG_FLAG_OP_NEG:
+      /* argL == INT32_MIN */
+      xer_ov
+         = unop( Iop_1Uto32, 
+                 binop(Iop_CmpEQ32, argL, mkU32(INT32_MIN)) );
+      break;
+      
+   case /* 6  */ PPCG_FLAG_OP_SUBF:
+   case /* 7  */ PPCG_FLAG_OP_SUBFC:
+   case /* 8  */ PPCG_FLAG_OP_SUBFE:
+      /* ((~argL)^argR^-1) & ((~argL)^res) & (1<<31) ?1:0; */
+      xer_ov 
+         = AND3( XOR3(NOT(argL),argR,mkU32(-1)),
+                 XOR2(NOT(argL),res),
+                 mkU32(INT32_MIN) );
+      /* xer_ov can only be 0 or 1<<31 */
+      xer_ov 
+         = binop(Iop_Shr32, xer_ov, mkU8(31) );
+      break;
+      
+   case PPCG_FLAG_OP_DIVWEU:
+      xer_ov
+               = binop( Iop_Or32,
+                        unop( Iop_1Uto32, binop( Iop_CmpEQ32, argR, mkU32( 0 ) ) ),
+                        unop( Iop_1Uto32, binop( Iop_CmpLT32U, argR, argL ) ) );
+      break;
+
+   case PPCG_FLAG_OP_DIVWE:
+
+      /* If argR == 0 of if the result cannot fit in the 32-bit destination register,
+       * then OV <- 1.   If dest reg is 0 AND both dividend and divisor are non-zero,
+       * an overflow is implied.
+       */
+      xer_ov = binop( Iop_Or32,
+                      unop( Iop_1Uto32, binop( Iop_CmpEQ32, argR, mkU32( 0 ) ) ),
+                      unop( Iop_1Uto32, mkAND1( binop( Iop_CmpEQ32, res, mkU32( 0 ) ),
+                              mkAND1( binop( Iop_CmpNE32, argL, mkU32( 0 ) ),
+                                      binop( Iop_CmpNE32, argR, mkU32( 0 ) ) ) ) ) );
+      break;
+
+
+
+   default: 
+      vex_printf("set_XER_OV: op = %u\n", op);
+      vpanic("set_XER_OV(ppc)");
+   }
+   
+   /* xer_ov MUST denote either 0 or 1, no other value allowed */
+   putXER_OV( unop(Iop_32to8, xer_ov) );
+
+   /* Update the summary overflow */
+   putXER_SO( binop(Iop_Or8, getXER_SO(), getXER_OV()) );
+
+#  undef INT32_MIN
+#  undef AND3
+#  undef XOR3
+#  undef XOR2
+#  undef NOT
+}
+
+static void set_XER_OV_64( UInt op, IRExpr* res,
+                           IRExpr* argL, IRExpr* argR )
+{
+   IRExpr* xer_ov;
+   vassert(op < PPCG_FLAG_OP_NUMBER);
+   vassert(typeOfIRExpr(irsb->tyenv,res)  == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv,argL) == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv,argR) == Ity_I64);
+
+#  define INT64_MIN 0x8000000000000000ULL
+
+#  define XOR2(_aa,_bb) \
+      binop(Iop_Xor64,(_aa),(_bb))
+
+#  define XOR3(_cc,_dd,_ee) \
+      binop(Iop_Xor64,binop(Iop_Xor64,(_cc),(_dd)),(_ee))
+
+#  define AND3(_ff,_gg,_hh) \
+      binop(Iop_And64,binop(Iop_And64,(_ff),(_gg)),(_hh))
+
+#define NOT(_jj) \
+      unop(Iop_Not64, (_jj))
+
+   switch (op) {
+   case /* 0  */ PPCG_FLAG_OP_ADD:
+   case /* 1  */ PPCG_FLAG_OP_ADDE:
+      /* (argL^argR^-1) & (argL^res) & (1<<63)  ? 1:0 */
+      // i.e. ((both_same_sign) & (sign_changed) & (sign_mask))
+      xer_ov 
+         = AND3( XOR3(argL,argR,mkU64(-1)),
+                 XOR2(argL,res),
+                 mkU64(INT64_MIN) );
+      /* xer_ov can only be 0 or 1<<63 */
+      xer_ov 
+         = unop(Iop_64to1, binop(Iop_Shr64, xer_ov, mkU8(63)));
+      break;
+      
+   case /* 2  */ PPCG_FLAG_OP_DIVW:
+      /* (argL == INT64_MIN && argR == -1) || argR == 0 */
+      xer_ov
+         = mkOR1(
+              mkAND1( 
+                 binop(Iop_CmpEQ64, argL, mkU64(INT64_MIN)),
+                 binop(Iop_CmpEQ64, argR, mkU64(-1)) 
+              ),
+              binop(Iop_CmpEQ64, argR, mkU64(0) ) 
+           );
+      break;
+
+   case /* 3  */ PPCG_FLAG_OP_DIVWU:
+      /* argR == 0 */
+      xer_ov 
+         = binop(Iop_CmpEQ64, argR, mkU64(0));
+      break;
+      
+   case /* 4  */ PPCG_FLAG_OP_MULLW: {
+      /* OV true if result can't be represented in 64 bits
+         i.e sHi != sign extension of sLo */
+      xer_ov 
+         = binop( Iop_CmpNE32,
+                  unop(Iop_64HIto32, res),
+                  binop( Iop_Sar32, 
+                         unop(Iop_64to32, res), 
+                         mkU8(31))
+                  );
+      break;
+   }
+      
+   case /* 5  */ PPCG_FLAG_OP_NEG:
+      /* argL == INT64_MIN */
+      xer_ov
+         = binop(Iop_CmpEQ64, argL, mkU64(INT64_MIN));
+      break;
+      
+   case /* 6  */ PPCG_FLAG_OP_SUBF:
+   case /* 7  */ PPCG_FLAG_OP_SUBFC:
+   case /* 8  */ PPCG_FLAG_OP_SUBFE:
+      /* ((~argL)^argR^-1) & ((~argL)^res) & (1<<63) ?1:0; */
+      xer_ov 
+         = AND3( XOR3(NOT(argL),argR,mkU64(-1)),
+                 XOR2(NOT(argL),res),
+                 mkU64(INT64_MIN) );
+      /* xer_ov can only be 0 or 1<<63 */
+      xer_ov 
+         = unop(Iop_64to1, binop(Iop_Shr64, xer_ov, mkU8(63)));
+      break;
+      
+   case PPCG_FLAG_OP_DIVDE:
+
+      /* If argR == 0, we must set the OV bit.  But there's another condition
+       * where we can get overflow set for divde . . . when the
+       * result cannot fit in the 64-bit destination register.  If dest reg is 0 AND
+       * both dividend and divisor are non-zero, it implies an overflow.
+       */
+      xer_ov
+                  = mkOR1( binop( Iop_CmpEQ64, argR, mkU64( 0 ) ),
+                           mkAND1( binop( Iop_CmpEQ64, res, mkU64( 0 ) ),
+                                   mkAND1( binop( Iop_CmpNE64, argL, mkU64( 0 ) ),
+                                           binop( Iop_CmpNE64, argR, mkU64( 0 ) ) ) ) );
+      break;
+
+   case PPCG_FLAG_OP_DIVDEU:
+     /* If argR == 0 or if argL >= argR, set OV. */
+     xer_ov = mkOR1( binop( Iop_CmpEQ64, argR, mkU64( 0 ) ),
+                         binop( Iop_CmpLE64U, argR, argL ) );
+     break;
+
+   case /* 18 */ PPCG_FLAG_OP_MULLD: {
+      IRTemp  t128;
+      /* OV true if result can't be represented in 64 bits
+         i.e sHi != sign extension of sLo */
+      t128 = newTemp(Ity_I128);
+      assign( t128, binop(Iop_MullS64, argL, argR) );
+      xer_ov 
+         = binop( Iop_CmpNE64,
+                  unop(Iop_128HIto64, mkexpr(t128)),
+                  binop( Iop_Sar64,
+                         unop(Iop_128to64, mkexpr(t128)),
+                         mkU8(63))
+                  );
+      break;
+   }
+      
+   default: 
+      vex_printf("set_XER_OV: op = %u\n", op);
+      vpanic("set_XER_OV(ppc64)");
+   }
+   
+   /* xer_ov MUST denote either 0 or 1, no other value allowed */
+   putXER_OV( unop(Iop_1Uto8, xer_ov) );
+
+   /* Update the summary overflow */
+   putXER_SO( binop(Iop_Or8, getXER_SO(), getXER_OV()) );
+
+#  undef INT64_MIN
+#  undef AND3
+#  undef XOR3
+#  undef XOR2
+#  undef NOT
+}
+
+static void set_XER_OV ( IRType ty, UInt op, IRExpr* res,
+                         IRExpr* argL, IRExpr* argR )
+{
+   if (ty == Ity_I32)
+      set_XER_OV_32( op, res, argL, argR );
+   else
+      set_XER_OV_64( op, res, argL, argR );
+}
+
+
+
+/* RES is the result of doing OP on ARGL and ARGR with the old %XER.CA
+   value being OLDCA.  Set %XER.CA accordingly. */
+
+static void set_XER_CA_32 ( UInt op, IRExpr* res,
+                            IRExpr* argL, IRExpr* argR, IRExpr* oldca )
+{
+   IRExpr* xer_ca;
+   vassert(op < PPCG_FLAG_OP_NUMBER);
+   vassert(typeOfIRExpr(irsb->tyenv,res)   == Ity_I32);
+   vassert(typeOfIRExpr(irsb->tyenv,argL)  == Ity_I32);
+   vassert(typeOfIRExpr(irsb->tyenv,argR)  == Ity_I32);
+   vassert(typeOfIRExpr(irsb->tyenv,oldca) == Ity_I32);
+
+   /* Incoming oldca is assumed to hold the values 0 or 1 only.  This
+      seems reasonable given that it's always generated by
+      getXER_CA32(), which masks it accordingly.  In any case it being
+      0 or 1 is an invariant of the ppc guest state representation;
+      if it has any other value, that invariant has been violated. */
+
+   switch (op) {
+   case /* 0 */ PPCG_FLAG_OP_ADD:
+      /* res <u argL */
+      xer_ca
+         = unop(Iop_1Uto32, binop(Iop_CmpLT32U, res, argL));
+      break;
+      
+   case /* 1 */ PPCG_FLAG_OP_ADDE:
+      /* res <u argL || (old_ca==1 && res==argL) */
+      xer_ca 
+         = mkOR1( 
+              binop(Iop_CmpLT32U, res, argL),
+              mkAND1( 
+                 binop(Iop_CmpEQ32, oldca, mkU32(1)),
+                 binop(Iop_CmpEQ32, res, argL) 
+              ) 
+           );
+      xer_ca 
+         = unop(Iop_1Uto32, xer_ca);
+      break;
+      
+   case /* 8 */ PPCG_FLAG_OP_SUBFE:
+      /* res <u argR || (old_ca==1 && res==argR) */
+      xer_ca 
+         = mkOR1( 
+              binop(Iop_CmpLT32U, res, argR),
+              mkAND1( 
+                 binop(Iop_CmpEQ32, oldca, mkU32(1)),
+                 binop(Iop_CmpEQ32, res, argR) 
+              ) 
+           );
+      xer_ca 
+         = unop(Iop_1Uto32, xer_ca);
+      break;
+      
+   case /* 7 */ PPCG_FLAG_OP_SUBFC:
+   case /* 9 */ PPCG_FLAG_OP_SUBFI:
+      /* res <=u argR */
+      xer_ca
+         = unop(Iop_1Uto32, binop(Iop_CmpLE32U, res, argR));
+      break;
+      
+   case /* 10 */ PPCG_FLAG_OP_SRAW:
+      /* The shift amount is guaranteed to be in 0 .. 63 inclusive.
+         If it is <= 31, behave like SRAWI; else XER.CA is the sign
+         bit of argL. */
+      /* This term valid for shift amount < 32 only */
+      xer_ca
+         = binop(
+              Iop_And32,
+              binop(Iop_Sar32, argL, mkU8(31)),
+              binop( Iop_And32,
+                     argL,
+                     binop( Iop_Sub32,
+                            binop(Iop_Shl32, mkU32(1),
+                                             unop(Iop_32to8,argR)),
+                            mkU32(1) )
+                     )
+              );
+      xer_ca 
+         = IRExpr_ITE(
+              /* shift amt > 31 ? */
+              binop(Iop_CmpLT32U, mkU32(31), argR),
+              /* yes -- get sign bit of argL */
+              binop(Iop_Shr32, argL, mkU8(31)),
+              /* no -- be like srawi */
+              unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0)))
+           );
+      break;
+
+   case /* 11 */ PPCG_FLAG_OP_SRAWI:
+      /* xer_ca is 1 iff src was negative and bits_shifted_out != 
+         0.  Since the shift amount is known to be in the range
+         0 .. 31 inclusive the following seems viable:
+         xer.ca == 1 iff the following is nonzero:
+         (argL >>s 31)           -- either all 0s or all 1s
+         & (argL & (1<<argR)-1)  -- the stuff shifted out */
+      xer_ca
+         = binop(
+              Iop_And32,
+              binop(Iop_Sar32, argL, mkU8(31)),
+              binop( Iop_And32,
+                     argL,
+                     binop( Iop_Sub32,
+                            binop(Iop_Shl32, mkU32(1),
+                                             unop(Iop_32to8,argR)),
+                            mkU32(1) )
+                     )
+              );
+      xer_ca 
+         = unop(Iop_1Uto32, binop(Iop_CmpNE32, xer_ca, mkU32(0)));
+      break;
+      
+   default: 
+      vex_printf("set_XER_CA: op = %u\n", op);
+      vpanic("set_XER_CA(ppc)");
+   }
+
+   /* xer_ca MUST denote either 0 or 1, no other value allowed */
+   putXER_CA( unop(Iop_32to8, xer_ca) );
+}
+
+static void set_XER_CA_64 ( UInt op, IRExpr* res,
+                            IRExpr* argL, IRExpr* argR, IRExpr* oldca )
+{
+   IRExpr* xer_ca;
+   vassert(op < PPCG_FLAG_OP_NUMBER);
+   vassert(typeOfIRExpr(irsb->tyenv,res)   == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv,argL)  == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv,argR)  == Ity_I64);
+   vassert(typeOfIRExpr(irsb->tyenv,oldca) == Ity_I64);
+
+   /* Incoming oldca is assumed to hold the values 0 or 1 only.  This
+      seems reasonable given that it's always generated by
+      getXER_CA32(), which masks it accordingly.  In any case it being
+      0 or 1 is an invariant of the ppc guest state representation;
+      if it has any other value, that invariant has been violated. */
+
+   switch (op) {
+   case /* 0 */ PPCG_FLAG_OP_ADD:
+      /* res <u argL */
+      xer_ca
+         = unop(Iop_1Uto32, binop(Iop_CmpLT64U, res, argL));
+      break;
+      
+   case /* 1 */ PPCG_FLAG_OP_ADDE:
+      /* res <u argL || (old_ca==1 && res==argL) */
+      xer_ca 
+         = mkOR1( 
+              binop(Iop_CmpLT64U, res, argL),
+              mkAND1( 
+                 binop(Iop_CmpEQ64, oldca, mkU64(1)),
+                 binop(Iop_CmpEQ64, res, argL) 
+                 ) 
+              );
+      xer_ca 
+         = unop(Iop_1Uto32, xer_ca);
+      break;
+      
+   case /* 8 */ PPCG_FLAG_OP_SUBFE:
+      /* res <u argR || (old_ca==1 && res==argR) */
+      xer_ca 
+         = mkOR1( 
+              binop(Iop_CmpLT64U, res, argR),
+              mkAND1( 
+                 binop(Iop_CmpEQ64, oldca, mkU64(1)),
+                 binop(Iop_CmpEQ64, res, argR) 
+              ) 
+           );
+      xer_ca 
+         = unop(Iop_1Uto32, xer_ca);
+      break;
+      
+   case /* 7 */ PPCG_FLAG_OP_SUBFC:
+   case /* 9 */ PPCG_FLAG_OP_SUBFI:
+      /* res <=u argR */
+      xer_ca
+         = unop(Iop_1Uto32, binop(Iop_CmpLE64U, res, argR));
+      break;
+      
+      
+   case /* 10 */ PPCG_FLAG_OP_SRAW:
+      /* The shift amount is guaranteed to be in 0 .. 31 inclusive.
+         If it is <= 31, behave like SRAWI; else XER.CA is the sign
+         bit of argL. */
+         /* This term valid for shift amount < 31 only */
+
+      xer_ca
+         = binop(
+              Iop_And64,
+              binop(Iop_Sar64, argL, mkU8(31)),
+              binop( Iop_And64,
+                     argL,
+                     binop( Iop_Sub64,
+                            binop(Iop_Shl64, mkU64(1),
+                                             unop(Iop_64to8,argR)),
+                            mkU64(1) )
+              )
+           );
+      xer_ca 
+         = IRExpr_ITE(
+              /* shift amt > 31 ? */
+              binop(Iop_CmpLT64U, mkU64(31), argR),
+              /* yes -- get sign bit of argL */
+              unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))),
+              /* no -- be like srawi */
+              unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)))
+          );
+      break;
+      
+   case /* 11 */ PPCG_FLAG_OP_SRAWI:
+      /* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
+         Since the shift amount is known to be in the range 0 .. 31
+         inclusive the following seems viable:
+         xer.ca == 1 iff the following is nonzero:
+         (argL >>s 31)           -- either all 0s or all 1s
+         & (argL & (1<<argR)-1)  -- the stuff shifted out */
+
+      xer_ca
+         = binop(
+              Iop_And64,
+              binop(Iop_Sar64, argL, mkU8(31)),
+              binop( Iop_And64,
+                     argL,
+                     binop( Iop_Sub64,
+                            binop(Iop_Shl64, mkU64(1),
+                                             unop(Iop_64to8,argR)),
+                            mkU64(1) )
+              )
+           );
+      xer_ca 
+         = unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)));
+      break;
+      
+
+   case /* 12 */ PPCG_FLAG_OP_SRAD:
+      /* The shift amount is guaranteed to be in 0 .. 63 inclusive.
+         If it is <= 63, behave like SRADI; else XER.CA is the sign
+         bit of argL. */
+         /* This term valid for shift amount < 63 only */
+
+      xer_ca
+         = binop(
+              Iop_And64,
+              binop(Iop_Sar64, argL, mkU8(63)),
+              binop( Iop_And64,
+                     argL,
+                     binop( Iop_Sub64,
+                            binop(Iop_Shl64, mkU64(1),
+                                             unop(Iop_64to8,argR)),
+                            mkU64(1) )
+              )
+           );
+      xer_ca 
+         = IRExpr_ITE(
+              /* shift amt > 63 ? */
+              binop(Iop_CmpLT64U, mkU64(63), argR),
+              /* yes -- get sign bit of argL */
+              unop(Iop_64to32, binop(Iop_Shr64, argL, mkU8(63))),
+              /* no -- be like sradi */
+              unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)))
+           );
+      break;
+
+
+   case /* 13 */ PPCG_FLAG_OP_SRADI:
+      /* xer_ca is 1 iff src was negative and bits_shifted_out != 0.
+         Since the shift amount is known to be in the range 0 .. 63
+         inclusive, the following seems viable:
+         xer.ca == 1 iff the following is nonzero:
+         (argL >>s 63)           -- either all 0s or all 1s
+         & (argL & (1<<argR)-1)  -- the stuff shifted out */
+
+      xer_ca
+         = binop(
+              Iop_And64,
+              binop(Iop_Sar64, argL, mkU8(63)),
+              binop( Iop_And64,
+                     argL,
+                     binop( Iop_Sub64,
+                            binop(Iop_Shl64, mkU64(1),
+                                             unop(Iop_64to8,argR)),
+                            mkU64(1) )
+              )
+           );
+      xer_ca 
+         = unop(Iop_1Uto32, binop(Iop_CmpNE64, xer_ca, mkU64(0)));
+      break;
+
+   default: 
+      vex_printf("set_XER_CA: op = %u\n", op);
+      vpanic("set_XER_CA(ppc64)");
+   }
+
+   /* xer_ca MUST denote either 0 or 1, no other value allowed */
+   putXER_CA( unop(Iop_32to8, xer_ca) );
+}
+
+static void set_XER_CA ( IRType ty, UInt op, IRExpr* res,
+                         IRExpr* argL, IRExpr* argR, IRExpr* oldca )
+{
+   if (ty == Ity_I32)
+      set_XER_CA_32( op, res, argL, argR, oldca );
+   else
+      set_XER_CA_64( op, res, argL, argR, oldca );
+}
+
+
+
+/*------------------------------------------------------------*/
+/*--- Read/write to guest-state                           --- */
+/*------------------------------------------------------------*/
+
+static IRExpr* /* :: Ity_I32/64 */ getGST ( PPC_GST reg )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   switch (reg) {
+   case PPC_GST_SPRG3_RO:
+      return IRExpr_Get( OFFB_SPRG3_RO, ty );
+
+   case PPC_GST_CIA: 
+      return IRExpr_Get( OFFB_CIA, ty );
+
+   case PPC_GST_LR: 
+      return IRExpr_Get( OFFB_LR, ty );
+
+   case PPC_GST_CTR: 
+      return IRExpr_Get( OFFB_CTR, ty );
+
+   case PPC_GST_VRSAVE: 
+      return IRExpr_Get( OFFB_VRSAVE, Ity_I32 );
+
+   case PPC_GST_VSCR:
+      return binop(Iop_And32, IRExpr_Get( OFFB_VSCR,Ity_I32 ),
+                              mkU32(MASK_VSCR_VALID));
+
+   case PPC_GST_CR: {
+      /* Synthesise the entire CR into a single word.  Expensive. */
+#     define FIELD(_n)                                               \
+         binop(Iop_Shl32,                                            \
+               unop(Iop_8Uto32,                                      \
+                    binop(Iop_Or8,                                   \
+                          binop(Iop_And8, getCR321(_n), mkU8(7<<1)), \
+                          binop(Iop_And8, getCR0(_n), mkU8(1))       \
+                    )                                                \
+               ),                                                    \
+               mkU8(4 * (7-(_n)))                                    \
+         )
+      return binop(Iop_Or32,
+                   binop(Iop_Or32,
+                         binop(Iop_Or32, FIELD(0), FIELD(1)),
+                         binop(Iop_Or32, FIELD(2), FIELD(3))
+                         ),
+                   binop(Iop_Or32,
+                         binop(Iop_Or32, FIELD(4), FIELD(5)),
+                         binop(Iop_Or32, FIELD(6), FIELD(7))
+                         )
+                   );
+#     undef FIELD
+   }
+
+   case PPC_GST_XER:
+      return binop(Iop_Or32,
+                   binop(Iop_Or32,
+                         binop( Iop_Shl32, getXER_SO32(), mkU8(31)),
+                         binop( Iop_Shl32, getXER_OV32(), mkU8(30))),
+                   binop(Iop_Or32,
+                         binop( Iop_Shl32, getXER_CA32(), mkU8(29)),
+                         getXER_BC32()));
+
+   case PPC_GST_TFHAR:
+      return IRExpr_Get( OFFB_TFHAR, ty );
+
+   case PPC_GST_TEXASR:
+      return IRExpr_Get( OFFB_TEXASR, ty );
+
+   case PPC_GST_TEXASRU:
+      return IRExpr_Get( OFFB_TEXASRU, ty );
+
+   case PPC_GST_TFIAR:
+      return IRExpr_Get( OFFB_TFIAR, ty );
+
+   default:
+      vex_printf("getGST(ppc): reg = %u", reg);
+      vpanic("getGST(ppc)");
+   }
+}
+
+/* Get a masked word from the given reg */
+static IRExpr* /* ::Ity_I32 */ getGST_masked ( PPC_GST reg, UInt mask )
+{
+   IRTemp val = newTemp(Ity_I32);
+   vassert( reg < PPC_GST_MAX );
+    
+   switch (reg) {
+
+   case PPC_GST_FPSCR: {
+      /* Vex-generated code expects the FPSCR to be set as follows:
+         all exceptions masked, round-to-nearest.
+         This corresponds to a FPSCR value of 0x0. */
+
+      /* In the lower 32 bits of FPSCR, we're only keeping track of
+       * the binary floating point rounding mode, so if the mask isn't
+       * asking for this, just return 0x0.
+       */
+      if (mask & MASK_FPSCR_RN) {
+         assign( val, unop( Iop_8Uto32, IRExpr_Get( OFFB_FPROUND, Ity_I8 ) ) );
+      } else {
+         assign( val, mkU32(0x0) );
+      }
+      break;
+   }
+
+   default:
+      vex_printf("getGST_masked(ppc): reg = %u", reg);
+      vpanic("getGST_masked(ppc)");
+   }
+
+   if (mask != 0xFFFFFFFF) {
+      return binop(Iop_And32, mkexpr(val), mkU32(mask));
+   } else {
+      return mkexpr(val);
+   }
+}
+
+/* Get a masked word from the given reg */
+static IRExpr* /* ::Ity_I32 */getGST_masked_upper(PPC_GST reg, ULong mask) {
+   IRExpr * val;
+   vassert( reg < PPC_GST_MAX );
+
+   switch (reg) {
+
+   case PPC_GST_FPSCR: {
+      /* In the upper 32 bits of FPSCR, we're only keeping track
+       * of the decimal floating point rounding mode, so if the mask
+       * isn't asking for this, just return 0x0.
+       */
+      if (mask & MASK_FPSCR_DRN) {
+         val = binop( Iop_And32,
+                      unop( Iop_8Uto32, IRExpr_Get( OFFB_DFPROUND, Ity_I8 ) ),
+                      unop( Iop_64HIto32, mkU64( mask ) ) );
+      } else {
+         val = mkU32( 0x0ULL );
+      }
+      break;
+   }
+
+   default:
+      vex_printf( "getGST_masked_upper(ppc): reg = %u", reg );
+      vpanic( "getGST_masked_upper(ppc)" );
+   }
+   return val;
+}
+
+
+/* Fetch the specified REG[FLD] nibble (as per IBM/hardware notation)
+   and return it at the bottom of an I32; the top 27 bits are
+   guaranteed to be zero. */
+static IRExpr* /* ::Ity_I32 */ getGST_field ( PPC_GST reg, UInt fld )
+{
+   UInt shft, mask;
+
+   vassert( fld < 8 );
+   vassert( reg < PPC_GST_MAX );
+   
+   shft = 4*(7-fld);
+   mask = 0xF<<shft;
+
+   switch (reg) {
+   case PPC_GST_XER:
+      vassert(fld ==7);
+      return binop(Iop_Or32,
+                   binop(Iop_Or32,
+                         binop(Iop_Shl32, getXER_SO32(), mkU8(3)),
+                         binop(Iop_Shl32, getXER_OV32(), mkU8(2))),
+                   binop(      Iop_Shl32, getXER_CA32(), mkU8(1)));
+      break;
+
+   default:
+      if (shft == 0)
+         return getGST_masked( reg, mask );
+      else
+         return binop(Iop_Shr32,
+                      getGST_masked( reg, mask ),
+                      mkU8(toUChar( shft )));
+   }
+}
+
+static void putGST ( PPC_GST reg, IRExpr* src )
+{
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRType ty_src = typeOfIRExpr(irsb->tyenv,src );
+   vassert( reg < PPC_GST_MAX );
+   switch (reg) {
+   case PPC_GST_IP_AT_SYSCALL: 
+      vassert( ty_src == ty );
+      stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL, src ) );
+      break;
+   case PPC_GST_CIA: 
+      vassert( ty_src == ty );
+      stmt( IRStmt_Put( OFFB_CIA, src ) );
+      break;
+   case PPC_GST_LR: 
+      vassert( ty_src == ty );
+      stmt( IRStmt_Put( OFFB_LR, src ) );
+      break;
+   case PPC_GST_CTR: 
+      vassert( ty_src == ty );
+      stmt( IRStmt_Put( OFFB_CTR, src ) );
+      break;
+   case PPC_GST_VRSAVE: 
+      vassert( ty_src == Ity_I32 );
+      stmt( IRStmt_Put( OFFB_VRSAVE,src));
+      break;
+   case PPC_GST_VSCR:
+      vassert( ty_src == Ity_I32 );
+      stmt( IRStmt_Put( OFFB_VSCR,
+                        binop(Iop_And32, src,
+                              mkU32(MASK_VSCR_VALID)) ) );
+      break;
+   case PPC_GST_XER:
+      vassert( ty_src == Ity_I32 );
+      putXER_SO( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(31))) );
+      putXER_OV( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(30))) );
+      putXER_CA( unop(Iop_32to8, binop(Iop_Shr32, src, mkU8(29))) );
+      putXER_BC( unop(Iop_32to8, src) );
+      break;
+      
+   case PPC_GST_EMWARN:
+      vassert( ty_src == Ity_I32 );
+      stmt( IRStmt_Put( OFFB_EMNOTE,src) );
+      break;
+      
+   case PPC_GST_CMSTART: 
+      vassert( ty_src == ty );
+      stmt( IRStmt_Put( OFFB_CMSTART, src) );
+      break;
+      
+   case PPC_GST_CMLEN: 
+      vassert( ty_src == ty );
+      stmt( IRStmt_Put( OFFB_CMLEN, src) );
+      break;
+      
+   case PPC_GST_TEXASR:
+      vassert( ty_src == Ity_I64 );
+      stmt( IRStmt_Put( OFFB_TEXASR, src ) );
+      break;
+
+   case PPC_GST_TEXASRU:
+      vassert( ty_src == Ity_I32 );
+      stmt( IRStmt_Put( OFFB_TEXASRU, src ) );
+      break;
+
+   case PPC_GST_TFIAR:
+      vassert( ty_src == Ity_I64 );
+      stmt( IRStmt_Put( OFFB_TFIAR, src ) );
+      break;
+   case PPC_GST_TFHAR:
+      vassert( ty_src == Ity_I64 );
+      stmt( IRStmt_Put( OFFB_TFHAR, src ) );
+      break;
+   default:
+      vex_printf("putGST(ppc): reg = %u", reg);
+      vpanic("putGST(ppc)");
+   }
+}
+
+/* Write masked src to the given reg */
+static void putGST_masked ( PPC_GST reg, IRExpr* src, ULong mask )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   vassert( reg < PPC_GST_MAX );
+   vassert( typeOfIRExpr( irsb->tyenv,src ) == Ity_I64 );
+
+   switch (reg) {
+   case PPC_GST_FPSCR: {
+      /* Allow writes to either binary or decimal floating point
+         Rounding Mode.
+      */
+      /* If any part of |mask| covers FPSCR.RN, update the bits of
+         FPSCR.RN by copying in |src| for locations where the
+         corresponding bit in |mask| is 1, and leaving it unchanged
+         for corresponding |mask| zero bits. */
+      if (mask & MASK_FPSCR_RN) {
+         stmt( 
+            IRStmt_Put(
+               OFFB_FPROUND,
+               unop(
+                  Iop_32to8,
+                  binop(
+                     Iop_Or32, 
+                     binop(
+                        Iop_And32,
+                        unop(Iop_64to32, src),
+                        mkU32(MASK_FPSCR_RN & mask)
+                     ),
+                     binop(
+                        Iop_And32, 
+                        unop(Iop_8Uto32, IRExpr_Get(OFFB_FPROUND,Ity_I8)),
+                        mkU32(MASK_FPSCR_RN & ~mask)
+                     )
+                  )
+               )
+            )
+         );
+      }
+      /* Similarly, update FPSCR.DRN if any bits of |mask|
+         corresponding to FPSCR.DRN are set. */
+      if (mask & MASK_FPSCR_DRN) {
+         stmt( 
+            IRStmt_Put(
+               OFFB_DFPROUND,
+               unop(
+                  Iop_32to8,
+                  binop(
+                     Iop_Or32, 
+                     binop(
+                        Iop_And32,
+                        unop(Iop_64HIto32, src),
+                        mkU32((MASK_FPSCR_DRN & mask) >> 32)
+                     ),
+                     binop(
+                        Iop_And32, 
+                        unop(Iop_8Uto32, IRExpr_Get(OFFB_DFPROUND,Ity_I8)),
+                        mkU32((MASK_FPSCR_DRN & ~mask) >> 32)
+                     )
+                  )
+               )
+            )
+         );
+      }
+
+      /* Give EmNote for attempted writes to:
+         - Exception Controls
+         - Non-IEEE Mode
+      */
+      if (mask & 0xFC) {  // Exception Control, Non-IEE mode
+         VexEmNote ew = EmWarn_PPCexns;
+
+         /* If any of the src::exception_control bits are actually set,
+            side-exit to the next insn, reporting the warning,
+            so that Valgrind's dispatcher sees the warning. */
+         putGST( PPC_GST_EMWARN, mkU32(ew) );
+         stmt( 
+            IRStmt_Exit(
+               binop(Iop_CmpNE32, mkU32(ew), mkU32(EmNote_NONE)),
+               Ijk_EmWarn,
+               mkSzConst( ty, nextInsnAddr()), OFFB_CIA ));
+      }
+
+      /* Ignore all other writes */
+      break;
+   }
+
+   default:
+      vex_printf("putGST_masked(ppc): reg = %u", reg);
+      vpanic("putGST_masked(ppc)");
+   }
+}
+
+/* Write the least significant nibble of src to the specified
+   REG[FLD] (as per IBM/hardware notation). */
+static void putGST_field ( PPC_GST reg, IRExpr* src, UInt fld )
+{
+   UInt shft;
+   ULong mask;
+
+   vassert( typeOfIRExpr(irsb->tyenv,src ) == Ity_I32 );
+   vassert( fld < 16 );
+   vassert( reg < PPC_GST_MAX );
+   
+   if (fld < 8)
+      shft = 4*(7-fld);
+   else
+      shft = 4*(15-fld);
+   mask = 0xF;
+   mask = mask << shft;
+
+   switch (reg) {
+   case PPC_GST_CR:
+      putCR0  (fld, binop(Iop_And8, mkU8(1   ), unop(Iop_32to8, src)));
+      putCR321(fld, binop(Iop_And8, mkU8(7<<1), unop(Iop_32to8, src)));
+      break;
+
+   default:
+      {
+         IRExpr * src64 = unop( Iop_32Uto64, src );
+
+         if (shft == 0) {
+            putGST_masked( reg, src64, mask );
+         } else {
+            putGST_masked( reg,
+                           binop( Iop_Shl64, src64, mkU8( toUChar( shft ) ) ),
+                           mask );
+         }
+      }
+   }
+}
+
+/*------------------------------------------------------------*/
+/* Helpers for VSX instructions that do floating point
+ * operations and need to determine if a src contains a
+ * special FP value.
+ *
+ *------------------------------------------------------------*/
+
+#define NONZERO_FRAC_MASK 0x000fffffffffffffULL
+#define FP_FRAC_PART(x) binop( Iop_And64, \
+                               mkexpr( x ), \
+                               mkU64( NONZERO_FRAC_MASK ) )
+
+// Returns exponent part of a single precision floating point as I32
+static IRExpr * fp_exp_part_sp(IRTemp src)
+{
+   return binop( Iop_And32,
+                 binop( Iop_Shr32, mkexpr( src ), mkU8( 23 ) ),
+                 mkU32( 0xff ) );
+}
+
+// Returns exponent part of floating point as I32
+static IRExpr * fp_exp_part(IRTemp src, Bool sp)
+{
+   IRExpr * exp;
+   if (sp)
+      return fp_exp_part_sp(src);
+
+   if (!mode64)
+      exp = binop( Iop_And32, binop( Iop_Shr32, unop( Iop_64HIto32,
+                                                      mkexpr( src ) ),
+                                     mkU8( 20 ) ), mkU32( 0x7ff ) );
+   else
+      exp = unop( Iop_64to32,
+                  binop( Iop_And64,
+                         binop( Iop_Shr64, mkexpr( src ), mkU8( 52 ) ),
+                         mkU64( 0x7ff ) ) );
+   return exp;
+}
+
+static IRExpr * is_Inf_sp(IRTemp src)
+{
+   IRTemp frac_part = newTemp(Ity_I32);
+   IRExpr * Inf_exp;
+
+   assign( frac_part, binop( Iop_And32, mkexpr(src), mkU32(0x007fffff)) );
+   Inf_exp = binop( Iop_CmpEQ32, fp_exp_part( src, True /*single precision*/ ), mkU32( 0xff ) );
+   return mkAND1( Inf_exp, binop( Iop_CmpEQ32, mkexpr( frac_part ), mkU32( 0 ) ) );
+}
+
+
+// Infinity: exp = 7ff and fraction is zero; s = 0/1
+static IRExpr * is_Inf(IRTemp src, Bool sp)
+{
+   IRExpr * Inf_exp, * hi32, * low32;
+   IRTemp frac_part;
+
+   if (sp)
+      return is_Inf_sp(src);
+
+   frac_part = newTemp(Ity_I64);
+   assign( frac_part, FP_FRAC_PART(src) );
+   Inf_exp = binop( Iop_CmpEQ32, fp_exp_part( src, False /*not single precision*/  ), mkU32( 0x7ff ) );
+   hi32 = unop( Iop_64HIto32, mkexpr( frac_part ) );
+   low32 = unop( Iop_64to32, mkexpr( frac_part ) );
+   return mkAND1( Inf_exp, binop( Iop_CmpEQ32, binop( Iop_Or32, low32, hi32 ),
+                                  mkU32( 0 ) ) );
+}
+
+static IRExpr * is_Zero_sp(IRTemp src)
+{
+   IRTemp sign_less_part = newTemp(Ity_I32);
+   assign( sign_less_part, binop( Iop_And32, mkexpr( src ), mkU32( SIGN_MASK32 ) ) );
+   return binop( Iop_CmpEQ32, mkexpr( sign_less_part ), mkU32( 0 ) );
+}
+
+// Zero: exp is zero and fraction is zero; s = 0/1
+static IRExpr * is_Zero(IRTemp src, Bool sp)
+{
+   IRExpr * hi32, * low32;
+   IRTemp sign_less_part;
+   if (sp)
+      return is_Zero_sp(src);
+
+   sign_less_part = newTemp(Ity_I64);
+
+   assign( sign_less_part, binop( Iop_And64, mkexpr( src ), mkU64( SIGN_MASK ) ) );
+   hi32 = unop( Iop_64HIto32, mkexpr( sign_less_part ) );
+   low32 = unop( Iop_64to32, mkexpr( sign_less_part ) );
+   return binop( Iop_CmpEQ32, binop( Iop_Or32, low32, hi32 ),
+                              mkU32( 0 ) );
+}
+
+/*  SNAN: s = 1/0; exp = 0x7ff; fraction is nonzero, with highest bit '1'
+ *  QNAN: s = 1/0; exp = 0x7ff; fraction is nonzero, with highest bit '0'
+ *  This function returns an IRExpr value of '1' for any type of NaN.
+ */
+static IRExpr * is_NaN(IRTemp src)
+{
+   IRExpr * NaN_exp, * hi32, * low32;
+   IRTemp frac_part = newTemp(Ity_I64);
+
+   assign( frac_part, FP_FRAC_PART(src) );
+   hi32 = unop( Iop_64HIto32, mkexpr( frac_part ) );
+   low32 = unop( Iop_64to32, mkexpr( frac_part ) );
+   NaN_exp = binop( Iop_CmpEQ32, fp_exp_part( src, False /*not single precision*/ ),
+                    mkU32( 0x7ff ) );
+
+   return mkAND1( NaN_exp, binop( Iop_CmpNE32, binop( Iop_Or32, low32, hi32 ),
+                                               mkU32( 0 ) ) );
+}
+
+/* This function returns an IRExpr value of '1' for any type of NaN.
+ * The passed 'src' argument is assumed to be Ity_I32.
+ */
+static IRExpr * is_NaN_32(IRTemp src)
+{
+#define NONZERO_FRAC_MASK32 0x007fffffULL
+#define FP_FRAC_PART32(x) binop( Iop_And32, \
+                                 mkexpr( x ), \
+                                 mkU32( NONZERO_FRAC_MASK32 ) )
+
+   IRExpr * frac_part = FP_FRAC_PART32(src);
+   IRExpr * exp_part = binop( Iop_And32,
+                              binop( Iop_Shr32, mkexpr( src ), mkU8( 23 ) ),
+                              mkU32( 0x0ff ) );
+   IRExpr * NaN_exp = binop( Iop_CmpEQ32, exp_part, mkU32( 0xff ) );
+
+   return mkAND1( NaN_exp, binop( Iop_CmpNE32, frac_part, mkU32( 0 ) ) );
+}
+
+/* This function takes an Ity_I32 input argument interpreted
+ * as a single-precision floating point value. If src is a
+ * SNaN, it is changed to a QNaN and returned; otherwise,
+ * the original value is returned.
+ */
+static IRExpr * handle_SNaN_to_QNaN_32(IRExpr * src)
+{
+#define SNAN_MASK32 0x00400000
+   IRTemp tmp = newTemp(Ity_I32);
+   IRTemp mask = newTemp(Ity_I32);
+   IRTemp is_SNAN = newTemp(Ity_I1);
+
+   vassert( typeOfIRExpr(irsb->tyenv, src ) == Ity_I32 );
+   assign(tmp, src);
+
+   /* check if input is SNaN, if it is convert to QNaN */
+   assign( is_SNAN,
+           mkAND1( is_NaN_32( tmp ),
+                   binop( Iop_CmpEQ32,
+                          binop( Iop_And32, mkexpr( tmp ),
+                                 mkU32( SNAN_MASK32 ) ),
+                          mkU32( 0 ) ) ) );
+   /* create mask with QNaN bit set to make it a QNaN if tmp is SNaN */
+   assign ( mask, binop( Iop_And32,
+                         unop( Iop_1Sto32, mkexpr( is_SNAN ) ),
+                         mkU32( SNAN_MASK32 ) ) );
+   return binop( Iop_Or32, mkexpr( mask ), mkexpr( tmp) );
+}
+
+
+/* This helper function performs the negation part of operations of the form:
+ *    "Negate Multiply-<op>"
+ *  where "<op>" is either "Add" or "Sub".
+ *
+ * This function takes one argument -- the floating point intermediate result (converted to
+ * Ity_I64 via Iop_ReinterpF64asI64) that was obtained from the "Multip-<op>" part of
+ * the operation described above.
+ */
+static IRTemp getNegatedResult(IRTemp intermediateResult)
+{
+   ULong signbit_mask = 0x8000000000000000ULL;
+   IRTemp signbit_32 = newTemp(Ity_I32);
+   IRTemp resultantSignbit = newTemp(Ity_I1);
+   IRTemp negatedResult = newTemp(Ity_I64);
+   assign( signbit_32, binop( Iop_Shr32,
+                          unop( Iop_64HIto32,
+                                 binop( Iop_And64, mkexpr( intermediateResult ),
+                                        mkU64( signbit_mask ) ) ),
+                                 mkU8( 31 ) ) );
+   /* We negate the signbit if and only if the intermediate result from the
+    * multiply-<op> was NOT a NaN.  This is an XNOR predicate.
+    */
+   assign( resultantSignbit,
+        unop( Iop_Not1,
+              binop( Iop_CmpEQ32,
+                     binop( Iop_Xor32,
+                            mkexpr( signbit_32 ),
+                            unop( Iop_1Uto32, is_NaN( intermediateResult ) ) ),
+                     mkU32( 1 ) ) ) );
+
+   assign( negatedResult,
+        binop( Iop_Or64,
+               binop( Iop_And64,
+                      mkexpr( intermediateResult ),
+                      mkU64( ~signbit_mask ) ),
+               binop( Iop_32HLto64,
+                      binop( Iop_Shl32,
+                             unop( Iop_1Uto32, mkexpr( resultantSignbit ) ),
+                             mkU8( 31 ) ),
+                      mkU32( 0 ) ) ) );
+
+   return negatedResult;
+}
+
+/* This helper function performs the negation part of operations of the form:
+ *    "Negate Multiply-<op>"
+ *  where "<op>" is either "Add" or "Sub".
+ *
+ * This function takes one argument -- the floating point intermediate result (converted to
+ * Ity_I32 via Iop_ReinterpF32asI32) that was obtained from the "Multip-<op>" part of
+ * the operation described above.
+ */
+static IRTemp getNegatedResult_32(IRTemp intermediateResult)
+{
+   UInt signbit_mask = 0x80000000;
+   IRTemp signbit_32 = newTemp(Ity_I32);
+   IRTemp resultantSignbit = newTemp(Ity_I1);
+   IRTemp negatedResult = newTemp(Ity_I32);
+   assign( signbit_32, binop( Iop_Shr32,
+                                 binop( Iop_And32, mkexpr( intermediateResult ),
+                                        mkU32( signbit_mask ) ),
+                                 mkU8( 31 ) ) );
+   /* We negate the signbit if and only if the intermediate result from the
+    * multiply-<op> was NOT a NaN.  This is an XNOR predicate.
+    */
+   assign( resultantSignbit,
+        unop( Iop_Not1,
+              binop( Iop_CmpEQ32,
+                     binop( Iop_Xor32,
+                            mkexpr( signbit_32 ),
+                            unop( Iop_1Uto32, is_NaN_32( intermediateResult ) ) ),
+                     mkU32( 1 ) ) ) );
+
+   assign( negatedResult,
+           binop( Iop_Or32,
+                  binop( Iop_And32,
+                         mkexpr( intermediateResult ),
+                         mkU32( ~signbit_mask ) ),
+                  binop( Iop_Shl32,
+                         unop( Iop_1Uto32, mkexpr( resultantSignbit ) ),
+                         mkU8( 31 ) ) ) );
+
+   return negatedResult;
+}
+
+/*------------------------------------------------------------*/
+/* Transactional memory helpers
+ *
+ *------------------------------------------------------------*/
+
+static ULong generate_TMreason( UInt failure_code,
+                                             UInt persistant,
+                                             UInt nest_overflow,
+                                             UInt tm_exact )
+{
+   ULong tm_err_code =
+     ( (ULong) 0) << (63-6)   /* Failure code */
+     | ( (ULong) persistant) << (63-7)     /* Failure persistant */
+     | ( (ULong) 0) << (63-8)   /* Disallowed */
+     | ( (ULong) nest_overflow) << (63-9)   /* Nesting Overflow */
+     | ( (ULong) 0) << (63-10)  /* Footprint Overflow */
+     | ( (ULong) 0) << (63-11)  /* Self-Induced Conflict */
+     | ( (ULong) 0) << (63-12)  /* Non-Transactional Conflict */
+     | ( (ULong) 0) << (63-13)  /* Transactional Conflict */
+     | ( (ULong) 0) << (63-14)  /* Translation Invalidation Conflict */
+     | ( (ULong) 0) << (63-15)  /* Implementation-specific */
+     | ( (ULong) 0) << (63-16)  /* Instruction Fetch Conflict */
+     | ( (ULong) 0) << (63-30)  /* Reserved */
+     | ( (ULong) 0) << (63-31)  /* Abort */
+     | ( (ULong) 0) << (63-32)  /* Suspend */
+     | ( (ULong) 0) << (63-33)  /* Reserved */
+     | ( (ULong) 0) << (63-35)  /* Privilege */
+     | ( (ULong) 0) << (63-36)  /* Failure Summary */
+     | ( (ULong) tm_exact) << (63-37)  /* TFIAR Exact */
+     | ( (ULong) 0) << (63-38)  /* ROT */
+     | ( (ULong) 0) << (63-51)  /* Reserved */
+     | ( (ULong) 0) << (63-63);  /* Transaction Level */
+
+     return tm_err_code;
+}
+
+static void storeTMfailure( Addr64 err_address, ULong tm_reason,
+                            Addr64 handler_address )
+{
+   putGST( PPC_GST_TFIAR,   mkU64( err_address ) );
+   putGST( PPC_GST_TEXASR,  mkU64( tm_reason ) );
+   putGST( PPC_GST_TEXASRU, mkU32( 0 ) );
+   putGST( PPC_GST_TFHAR,   mkU64( handler_address ) );
+}
+
+/*------------------------------------------------------------*/
+/*--- Integer Instruction Translation                     --- */
+/*------------------------------------------------------------*/
+
+/*
+  Integer Arithmetic Instructions
+*/
+static Bool dis_int_arith ( UInt theInstr )
+{
+   /* D-Form, XO-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar rD_addr = ifieldRegDS(theInstr);
+   UChar rA_addr = ifieldRegA(theInstr);
+   UInt  uimm16  = ifieldUIMM16(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UChar flag_OE = ifieldBIT10(theInstr);
+   UInt  opc2    = ifieldOPClo9(theInstr);
+   UChar flag_rC = ifieldBIT0(theInstr);
+
+   Long   simm16 = extend_s_16to64(uimm16);
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp rA     = newTemp(ty);
+   IRTemp rB     = newTemp(ty);
+   IRTemp rD     = newTemp(ty);
+
+   Bool do_rc = False;
+
+   assign( rA, getIReg(rA_addr) );
+   assign( rB, getIReg(rB_addr) );         // XO-Form: rD, rA, rB
+
+   switch (opc1) {
+   /* D-Form */
+   case 0x0C: // addic  (Add Immediate Carrying, PPC32 p351
+      DIP("addic r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
+      assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
+                         mkSzExtendS16(ty, uimm16) ) );
+      set_XER_CA( ty, PPCG_FLAG_OP_ADD, 
+                  mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
+                  mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
+      break;
+    
+   case 0x0D: // addic. (Add Immediate Carrying and Record, PPC32 p352)
+      DIP("addic. r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
+      assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
+                         mkSzExtendS16(ty, uimm16) ) );
+      set_XER_CA( ty, PPCG_FLAG_OP_ADD, 
+                  mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
+                  mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
+      do_rc = True;  // Always record to CR
+      flag_rC = 1;
+      break;
+
+   case 0x0E: // addi   (Add Immediate, PPC32 p350)
+      // li rD,val   == addi rD,0,val
+      // la disp(rA) == addi rD,rA,disp
+      if ( rA_addr == 0 ) {
+         DIP("li r%u,%d\n", rD_addr, (Int)simm16);
+         assign( rD, mkSzExtendS16(ty, uimm16) );
+      } else {
+         DIP("addi r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
+         assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
+                            mkSzExtendS16(ty, uimm16) ) );
+      }
+      break;
+
+   case 0x0F: // addis  (Add Immediate Shifted, PPC32 p353)
+      // lis rD,val == addis rD,0,val
+      if ( rA_addr == 0 ) {
+         DIP("lis r%u,%d\n", rD_addr, (Int)simm16);
+         assign( rD, mkSzExtendS32(ty, uimm16 << 16) );
+      } else {
+         DIP("addis r%u,r%u,0x%x\n", rD_addr, rA_addr, (Int)simm16);
+         assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
+                            mkSzExtendS32(ty, uimm16 << 16) ) );
+      }
+      break;
+
+   case 0x07: // mulli    (Multiply Low Immediate, PPC32 p490)
+      DIP("mulli r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
+      if (mode64)
+         assign( rD, unop(Iop_128to64,
+                          binop(Iop_MullS64, mkexpr(rA),
+                                mkSzExtendS16(ty, uimm16))) );
+      else
+         assign( rD, unop(Iop_64to32,
+                          binop(Iop_MullS32, mkexpr(rA),
+                                mkSzExtendS16(ty, uimm16))) );
+      break;
+
+   case 0x08: // subfic   (Subtract from Immediate Carrying, PPC32 p540)
+      DIP("subfic r%u,r%u,%d\n", rD_addr, rA_addr, (Int)simm16);
+      // rD = simm16 - rA
+      assign( rD, binop( mkSzOp(ty, Iop_Sub8),
+                         mkSzExtendS16(ty, uimm16),
+                         mkexpr(rA)) );
+      set_XER_CA( ty, PPCG_FLAG_OP_SUBFI, 
+                  mkexpr(rD), mkexpr(rA), mkSzExtendS16(ty, uimm16),
+                  mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
+      break;
+
+   /* XO-Form */
+   case 0x1F:
+      do_rc = True;    // All below record to CR
+      
+      switch (opc2) {
+      case 0x10A: // add  (Add, PPC32 p347)
+         DIP("add%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, binop( mkSzOp(ty, Iop_Add8),
+                            mkexpr(rA), mkexpr(rB) ) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_ADD,
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+
+      case 0x00A: // addc      (Add Carrying, PPC32 p348)
+         DIP("addc%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, binop( mkSzOp(ty, Iop_Add8),
+                            mkexpr(rA), mkexpr(rB)) );
+         set_XER_CA( ty, PPCG_FLAG_OP_ADD, 
+                     mkexpr(rD), mkexpr(rA), mkexpr(rB),
+                     mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_ADD, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+         
+      case 0x08A: { // adde      (Add Extended, PPC32 p349)
+         IRTemp old_xer_ca = newTemp(ty);
+         DIP("adde%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         // rD = rA + rB + XER[CA]
+         assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
+         assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
+                            binop( mkSzOp(ty, Iop_Add8),
+                                   mkexpr(rB), mkexpr(old_xer_ca))) );
+         set_XER_CA( ty, PPCG_FLAG_OP_ADDE, 
+                     mkexpr(rD), mkexpr(rA), mkexpr(rB),
+                     mkexpr(old_xer_ca) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_ADDE, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+      }
+
+      case 0x0EA: { // addme     (Add to Minus One Extended, PPC32 p354)
+         IRTemp old_xer_ca = newTemp(ty);
+         IRExpr *min_one;
+         if (rB_addr != 0) {
+            vex_printf("dis_int_arith(ppc)(addme,rB_addr)\n");
+            return False;
+         }
+         DIP("addme%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         // rD = rA + (-1) + XER[CA]
+         // => Just another form of adde
+         assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
+         min_one = mkSzImm(ty, (Long)-1);
+         assign( rD, binop( mkSzOp(ty, Iop_Add8), mkexpr(rA),
+                            binop( mkSzOp(ty, Iop_Add8),
+                                   min_one, mkexpr(old_xer_ca)) ));
+         set_XER_CA( ty, PPCG_FLAG_OP_ADDE,
+                     mkexpr(rD), mkexpr(rA), min_one,
+                     mkexpr(old_xer_ca) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_ADDE, 
+                        mkexpr(rD), mkexpr(rA), min_one );
+         }
+         break;
+      }
+
+      case 0x0CA: { // addze      (Add to Zero Extended, PPC32 p355)
+         IRTemp old_xer_ca = newTemp(ty);
+         if (rB_addr != 0) {
+            vex_printf("dis_int_arith(ppc)(addze,rB_addr)\n");
+            return False;
+         }
+         DIP("addze%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         // rD = rA + (0) + XER[CA]
+         // => Just another form of adde
+         assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
+         assign( rD, binop( mkSzOp(ty, Iop_Add8),
+                            mkexpr(rA), mkexpr(old_xer_ca)) );
+         set_XER_CA( ty, PPCG_FLAG_OP_ADDE, 
+                     mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0), 
+                     mkexpr(old_xer_ca) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_ADDE, 
+                        mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) );
+         }
+         break;
+      }
+
+      case 0x1EB: // divw       (Divide Word, PPC32 p388)
+         DIP("divw%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         if (mode64) {
+            /* Note:
+               XER settings are mode independent, and reflect the 
+               overflow of the low-order 32bit result
+               CR0[LT|GT|EQ] are undefined if flag_rC && mode64
+            */
+            /* rD[hi32] are undefined: setting them to sign of lo32
+                - makes set_CR0 happy */
+            IRExpr* dividend = mk64lo32Sto64( mkexpr(rA) );
+            IRExpr* divisor  = mk64lo32Sto64( mkexpr(rB) );
+            assign( rD, mk64lo32Uto64( binop(Iop_DivS64, dividend,
+                                                         divisor) ) );
+            if (flag_OE) {
+               set_XER_OV( ty, PPCG_FLAG_OP_DIVW, 
+                           mkexpr(rD), dividend, divisor );
+            }
+         } else {
+            assign( rD, binop(Iop_DivS32, mkexpr(rA), mkexpr(rB)) );
+            if (flag_OE) {
+               set_XER_OV( ty, PPCG_FLAG_OP_DIVW, 
+                           mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+            }
+         }
+         /* Note:
+            if (0x8000_0000 / -1) or (x / 0)
+            => rD=undef, if(flag_rC) CR7=undef, if(flag_OE) XER_OV=1
+            => But _no_ exception raised. */
+         break;
+
+      case 0x1CB: // divwu      (Divide Word Unsigned, PPC32 p389)
+         DIP("divwu%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         if (mode64) {
+            /* Note:
+               XER settings are mode independent, and reflect the 
+               overflow of the low-order 32bit result
+               CR0[LT|GT|EQ] are undefined if flag_rC && mode64
+            */
+            IRExpr* dividend = mk64lo32Uto64( mkexpr(rA) );
+            IRExpr* divisor  = mk64lo32Uto64( mkexpr(rB) );
+            assign( rD, mk64lo32Uto64( binop(Iop_DivU64, dividend,
+                                                         divisor) ) );
+            if (flag_OE) {
+               set_XER_OV( ty, PPCG_FLAG_OP_DIVWU, 
+                           mkexpr(rD), dividend, divisor );
+            }
+         } else {
+            assign( rD, binop(Iop_DivU32, mkexpr(rA), mkexpr(rB)) );
+            if (flag_OE) {
+               set_XER_OV( ty, PPCG_FLAG_OP_DIVWU, 
+                           mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+            }
+         }
+         /* Note: ditto comment divw, for (x / 0) */
+         break;
+
+      case 0x04B: // mulhw      (Multiply High Word, PPC32 p488)
+         if (flag_OE != 0) {
+            vex_printf("dis_int_arith(ppc)(mulhw,flag_OE)\n");
+            return False;
+         }
+         DIP("mulhw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         if (mode64) {
+            /* rD[hi32] are undefined: setting them to sign of lo32
+                - makes set_CR0 happy */
+            assign( rD, binop(Iop_Sar64,
+                           binop(Iop_Mul64,
+                                 mk64lo32Sto64( mkexpr(rA) ),
+                                 mk64lo32Sto64( mkexpr(rB) )),
+                              mkU8(32)) );
+         } else {
+            assign( rD, unop(Iop_64HIto32,
+                             binop(Iop_MullS32,
+                                   mkexpr(rA), mkexpr(rB))) );
+         }
+         break;
+
+      case 0x00B: // mulhwu    (Multiply High Word Unsigned, PPC32 p489)
+         if (flag_OE != 0) {
+            vex_printf("dis_int_arith(ppc)(mulhwu,flag_OE)\n");
+            return False;
+         }
+         DIP("mulhwu%s r%u,r%u,r%u\n", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         if (mode64) {
+            /* rD[hi32] are undefined: setting them to sign of lo32
+                - makes set_CR0 happy */
+            assign( rD, binop(Iop_Sar64,
+                           binop(Iop_Mul64,
+                                 mk64lo32Uto64( mkexpr(rA) ),
+                                 mk64lo32Uto64( mkexpr(rB) ) ),
+                              mkU8(32)) );
+         } else {
+            assign( rD, unop(Iop_64HIto32, 
+                             binop(Iop_MullU32,
+                                   mkexpr(rA), mkexpr(rB))) );
+         }
+         break;
+         
+      case 0x0EB: // mullw      (Multiply Low Word, PPC32 p491)
+         DIP("mullw%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         if (mode64) {
+            /* rD[hi32] are undefined: setting them to sign of lo32
+                - set_XER_OV() and set_CR0() depend on this */
+            IRExpr *a = unop(Iop_64to32, mkexpr(rA) );
+            IRExpr *b = unop(Iop_64to32, mkexpr(rB) );
+            assign( rD, binop(Iop_MullS32, a, b) );
+            if (flag_OE) {
+               set_XER_OV( ty, PPCG_FLAG_OP_MULLW, 
+                           mkexpr(rD),
+                           unop(Iop_32Uto64, a), unop(Iop_32Uto64, b) );
+            }
+         } else {
+            assign( rD, unop(Iop_64to32,
+                             binop(Iop_MullU32,
+                                   mkexpr(rA), mkexpr(rB))) );
+            if (flag_OE) {
+               set_XER_OV( ty, PPCG_FLAG_OP_MULLW, 
+                           mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+            }
+         }
+         break;
+
+      case 0x068: // neg        (Negate, PPC32 p493)
+         if (rB_addr != 0) {
+            vex_printf("dis_int_arith(ppc)(neg,rB_addr)\n");
+            return False;
+         }
+         DIP("neg%s%s r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr);
+         // rD = (~rA) + 1
+         assign( rD, binop( mkSzOp(ty, Iop_Add8),
+                            unop( mkSzOp(ty, Iop_Not8), mkexpr(rA) ),
+                            mkSzImm(ty, 1)) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_NEG, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+
+      case 0x028: // subf       (Subtract From, PPC32 p537)
+         DIP("subf%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         // rD = rB - rA
+         assign( rD, binop( mkSzOp(ty, Iop_Sub8),
+                            mkexpr(rB), mkexpr(rA)) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_SUBF, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+
+      case 0x008: // subfc      (Subtract from Carrying, PPC32 p538)
+         DIP("subfc%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         // rD = rB - rA
+         assign( rD, binop( mkSzOp(ty, Iop_Sub8),
+                            mkexpr(rB), mkexpr(rA)) );
+         set_XER_CA( ty, PPCG_FLAG_OP_SUBFC, 
+                     mkexpr(rD), mkexpr(rA), mkexpr(rB),
+                     mkSzImm(ty, 0)/*old xer.ca, which is ignored*/ );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_SUBFC, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+         
+      case 0x088: {// subfe      (Subtract from Extended, PPC32 p539)
+         IRTemp old_xer_ca = newTemp(ty);
+         DIP("subfe%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         // rD = (log not)rA + rB + XER[CA]
+         assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
+         assign( rD, binop( mkSzOp(ty, Iop_Add8),
+                            unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)),
+                            binop( mkSzOp(ty, Iop_Add8),
+                                   mkexpr(rB), mkexpr(old_xer_ca))) );
+         set_XER_CA( ty, PPCG_FLAG_OP_SUBFE, 
+                     mkexpr(rD), mkexpr(rA), mkexpr(rB), 
+                     mkexpr(old_xer_ca) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_SUBFE, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+      }
+
+      case 0x0E8: { // subfme    (Subtract from -1 Extended, PPC32 p541)
+         IRTemp old_xer_ca = newTemp(ty);
+         IRExpr *min_one;
+         if (rB_addr != 0) {
+            vex_printf("dis_int_arith(ppc)(subfme,rB_addr)\n");
+            return False;
+         }
+         DIP("subfme%s%s r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr);
+         // rD = (log not)rA + (-1) + XER[CA]
+         // => Just another form of subfe
+         assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
+         min_one = mkSzImm(ty, (Long)-1);
+         assign( rD, binop( mkSzOp(ty, Iop_Add8),
+                            unop( mkSzOp(ty, Iop_Not8), mkexpr(rA)),
+                            binop( mkSzOp(ty, Iop_Add8),
+                                   min_one, mkexpr(old_xer_ca))) );
+         set_XER_CA( ty, PPCG_FLAG_OP_SUBFE,
+                     mkexpr(rD), mkexpr(rA), min_one,
+                     mkexpr(old_xer_ca) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_SUBFE, 
+                        mkexpr(rD), mkexpr(rA), min_one );
+         }
+         break;
+      }
+
+      case 0x0C8: { // subfze  (Subtract from Zero Extended, PPC32 p542)
+         IRTemp old_xer_ca = newTemp(ty);
+         if (rB_addr != 0) {
+            vex_printf("dis_int_arith(ppc)(subfze,rB_addr)\n");
+            return False;
+         }
+         DIP("subfze%s%s r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr);
+         // rD = (log not)rA + (0) + XER[CA]
+         // => Just another form of subfe
+         assign( old_xer_ca, mkWidenFrom32(ty, getXER_CA32(), False) );
+         assign( rD, binop( mkSzOp(ty, Iop_Add8),
+                           unop( mkSzOp(ty, Iop_Not8),
+                                 mkexpr(rA)), mkexpr(old_xer_ca)) );
+         set_XER_CA( ty, PPCG_FLAG_OP_SUBFE,
+                     mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0), 
+                     mkexpr(old_xer_ca) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_SUBFE,
+                        mkexpr(rD), mkexpr(rA), mkSzImm(ty, 0) );
+         }
+         break;
+      }
+
+
+      /* 64bit Arithmetic */
+      case 0x49:  // mulhd (Multiply High DWord, PPC64 p539)
+         if (flag_OE != 0) {
+            vex_printf("dis_int_arith(ppc)(mulhd,flagOE)\n");
+            return False;
+         }
+         DIP("mulhd%s r%u,r%u,r%u\n", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, unop(Iop_128HIto64, 
+                          binop(Iop_MullS64,
+                                mkexpr(rA), mkexpr(rB))) );
+
+         break;
+
+      case 0x9:   // mulhdu  (Multiply High DWord Unsigned, PPC64 p540)
+         if (flag_OE != 0) {
+            vex_printf("dis_int_arith(ppc)(mulhdu,flagOE)\n");
+            return False;
+         }
+         DIP("mulhdu%s r%u,r%u,r%u\n", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, unop(Iop_128HIto64, 
+                          binop(Iop_MullU64,
+                                mkexpr(rA), mkexpr(rB))) );
+         break;
+
+      case 0xE9:  // mulld (Multiply Low DWord, PPC64 p543)
+         DIP("mulld%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, binop(Iop_Mul64, mkexpr(rA), mkexpr(rB)) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_MULLD, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+
+      case 0x1E9: // divd (Divide DWord, PPC64 p419)
+         DIP("divd%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, binop(Iop_DivS64, mkexpr(rA), mkexpr(rB)) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_DIVW, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+         /* Note:
+            if (0x8000_0000_0000_0000 / -1) or (x / 0)
+            => rD=undef, if(flag_rC) CR7=undef, if(flag_OE) XER_OV=1
+            => But _no_ exception raised. */
+
+      case 0x1C9: // divdu (Divide DWord Unsigned, PPC64 p420)
+         DIP("divdu%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, binop(Iop_DivU64, mkexpr(rA), mkexpr(rB)) );
+         if (flag_OE) {
+            set_XER_OV( ty, PPCG_FLAG_OP_DIVWU, 
+                        mkexpr(rD), mkexpr(rA), mkexpr(rB) );
+         }
+         break;
+         /* Note: ditto comment divd, for (x / 0) */
+
+      case 0x18B: // divweu (Divide Word Extended Unsigned)
+      {
+        /*
+         *  If (RA) >= (RB), or if an attempt is made to perform the division
+         *         <anything> / 0
+         * then the contents of register RD are undefined as are (if Rc=1) the contents of
+         * the LT, GT, and EQ bits of CR Field 0. In these cases, if OE=1 then OV is set
+         * to 1.
+         */
+         IRTemp res = newTemp(Ity_I32);
+         IRExpr * dividend, * divisor;
+         DIP("divweu%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+                                         rD_addr, rA_addr, rB_addr);
+         if (mode64) {
+            dividend = unop( Iop_64to32, mkexpr( rA ) );
+            divisor = unop( Iop_64to32, mkexpr( rB ) );
+            assign( res, binop( Iop_DivU32E, dividend, divisor ) );
+            assign( rD, binop( Iop_32HLto64, mkU32( 0 ), mkexpr( res ) ) );
+         } else {
+            dividend = mkexpr( rA );
+            divisor =  mkexpr( rB );
+            assign( res, binop( Iop_DivU32E, dividend, divisor ) );
+            assign( rD, mkexpr( res) );
+         }
+
+         if (flag_OE) {
+            set_XER_OV_32( PPCG_FLAG_OP_DIVWEU,
+                           mkexpr(res), dividend, divisor );
+         }
+         break;
+      }
+
+      case 0x1AB: // divwe (Divide Word Extended)
+      {
+         /*
+          * If the quotient cannot be represented in 32 bits, or if an
+          * attempt is made to perform the division
+          *      <anything> / 0
+          * then the contents of register RD are undefined as are (if
+          * Rc=1) the contents of the LT, GT, and EQ bits of CR
+          * Field 0. In these cases, if OE=1 then OV is set to 1.
+          */
+
+         IRTemp res = newTemp(Ity_I32);
+         IRExpr * dividend, * divisor;
+         DIP("divwe%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+                                         rD_addr, rA_addr, rB_addr);
+         if (mode64) {
+            dividend = unop( Iop_64to32, mkexpr( rA ) );
+            divisor = unop( Iop_64to32, mkexpr( rB ) );
+            assign( res, binop( Iop_DivS32E, dividend, divisor ) );
+            assign( rD, binop( Iop_32HLto64, mkU32( 0 ), mkexpr( res ) ) );
+         } else {
+            dividend = mkexpr( rA );
+            divisor =  mkexpr( rB );
+            assign( res, binop( Iop_DivS32E, dividend, divisor ) );
+            assign( rD, mkexpr( res) );
+         }
+
+         if (flag_OE) {
+            set_XER_OV_32( PPCG_FLAG_OP_DIVWE,
+                           mkexpr(res), dividend, divisor );
+         }
+         break;
+      }
+
+
+      case 0x1A9: // divde (Divide Doubleword Extended)
+        /*
+         * If the quotient cannot be represented in 64 bits, or if an
+         * attempt is made to perform the division
+         *      <anything> / 0
+         * then the contents of register RD are undefined as are (if
+         * Rc=1) the contents of the LT, GT, and EQ bits of CR
+         * Field 0. In these cases, if OE=1 then OV is set to 1.
+         */
+         DIP("divde%s%s r%u,r%u,r%u\n",
+             flag_OE ? "o" : "", flag_rC ? ".":"",
+             rD_addr, rA_addr, rB_addr);
+         assign( rD, binop(Iop_DivS64E, mkexpr(rA), mkexpr(rB)) );
+         if (flag_OE) {
+            set_XER_OV_64( PPCG_FLAG_OP_DIVDE, mkexpr( rD ),
+                           mkexpr( rA ), mkexpr( rB ) );
+         }
+         break;
+
+      case 0x189: //  divdeuo (Divide Doubleword Extended Unsigned)
+        // Same CR and OV rules as given for divweu above
+        DIP("divdeu%s%s r%u,r%u,r%u\n",
+            flag_OE ? "o" : "", flag_rC ? ".":"",
+            rD_addr, rA_addr, rB_addr);
+        assign( rD, binop(Iop_DivU64E, mkexpr(rA), mkexpr(rB)) );
+        if (flag_OE) {
+           set_XER_OV_64( PPCG_FLAG_OP_DIVDEU, mkexpr( rD ),
+                          mkexpr( rA ), mkexpr( rB ) );
+        }
+        break;
+
+      default:
+         vex_printf("dis_int_arith(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+
+   default:
+      vex_printf("dis_int_arith(ppc)(opc1)\n");
+      return False;
+   }
+
+   putIReg( rD_addr, mkexpr(rD) );
+
+   if (do_rc && flag_rC) {
+      set_CR0( mkexpr(rD) );
+   }
+   return True;
+}
+
+
+
+/*
+  Integer Compare Instructions
+*/
+static Bool dis_int_cmp ( UInt theInstr )
+{
+   /* D-Form, X-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar crfD    = toUChar( IFIELD( theInstr, 23, 3 ) );
+   UChar b22     = toUChar( IFIELD( theInstr, 22, 1 ) );
+   UChar flag_L  = toUChar( IFIELD( theInstr, 21, 1 ) );
+   UChar rA_addr = ifieldRegA(theInstr);
+   UInt  uimm16  = ifieldUIMM16(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar b0      = ifieldBIT0(theInstr);
+
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRExpr *a = getIReg(rA_addr);
+   IRExpr *b;
+
+   if (!mode64 && flag_L==1) {  // L==1 invalid for 32 bit.
+      vex_printf("dis_int_cmp(ppc)(flag_L)\n");
+      return False;
+   }
+   
+   if (b22 != 0) {
+      vex_printf("dis_int_cmp(ppc)(b22)\n");
+      return False;
+   }
+   
+   switch (opc1) {
+   case 0x0B: // cmpi (Compare Immediate, PPC32 p368)
+      DIP("cmpi cr%u,%u,r%u,%d\n", crfD, flag_L, rA_addr,
+          (Int)extend_s_16to32(uimm16));
+      b = mkSzExtendS16( ty, uimm16 );
+      if (flag_L == 1) {
+         putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
+      } else {
+         a = mkNarrowTo32( ty, a );
+         b = mkNarrowTo32( ty, b );
+         putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32S, a, b)));
+      }
+      putCR0( crfD, getXER_SO() );
+      break;
+      
+   case 0x0A: // cmpli (Compare Logical Immediate, PPC32 p370)
+      DIP("cmpli cr%u,%u,r%u,0x%x\n", crfD, flag_L, rA_addr, uimm16);
+      b = mkSzImm( ty, uimm16 );
+      if (flag_L == 1) {
+         putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
+      } else {
+         a = mkNarrowTo32( ty, a );
+         b = mkNarrowTo32( ty, b );
+         putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
+      }
+      putCR0( crfD, getXER_SO() );
+      break;
+      
+   /* X Form */
+   case 0x1F:
+      if (b0 != 0) {
+         vex_printf("dis_int_cmp(ppc)(0x1F,b0)\n");
+         return False;
+      }
+      b = getIReg(rB_addr);
+
+      switch (opc2) {
+      case 0x000: // cmp (Compare, PPC32 p367)
+         DIP("cmp cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
+         /* Comparing a reg with itself produces a result which
+            doesn't depend on the contents of the reg.  Therefore
+            remove the false dependency, which has been known to cause
+            memcheck to produce false errors. */
+         if (rA_addr == rB_addr)
+            a = b = typeOfIRExpr(irsb->tyenv,a) == Ity_I64
+                    ? mkU64(0)  : mkU32(0);
+         if (flag_L == 1) {
+            putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64S, a, b)));
+         } else {
+            a = mkNarrowTo32( ty, a );
+            b = mkNarrowTo32( ty, b );
+            putCR321(crfD, unop(Iop_32to8,binop(Iop_CmpORD32S, a, b)));
+         }
+         putCR0( crfD, getXER_SO() );
+         break;
+         
+      case 0x020: // cmpl (Compare Logical, PPC32 p369)
+         DIP("cmpl cr%u,%u,r%u,r%u\n", crfD, flag_L, rA_addr, rB_addr);
+         /* Comparing a reg with itself produces a result which
+            doesn't depend on the contents of the reg.  Therefore
+            remove the false dependency, which has been known to cause
+            memcheck to produce false errors. */
+         if (rA_addr == rB_addr)
+            a = b = typeOfIRExpr(irsb->tyenv,a) == Ity_I64
+                    ? mkU64(0)  : mkU32(0);
+         if (flag_L == 1) {
+            putCR321(crfD, unop(Iop_64to8, binop(Iop_CmpORD64U, a, b)));
+         } else {
+            a = mkNarrowTo32( ty, a );
+            b = mkNarrowTo32( ty, b );
+            putCR321(crfD, unop(Iop_32to8, binop(Iop_CmpORD32U, a, b)));
+         }
+         putCR0( crfD, getXER_SO() );
+         break;
+
+      default:
+         vex_printf("dis_int_cmp(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+      
+   default:
+      vex_printf("dis_int_cmp(ppc)(opc1)\n");
+      return False;
+   }
+   
+   return True;
+}
+
+
+/*
+  Integer Logical Instructions
+*/
+static Bool dis_int_logic ( UInt theInstr )
+{
+   /* D-Form, X-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar rS_addr = ifieldRegDS(theInstr);
+   UChar rA_addr = ifieldRegA(theInstr);
+   UInt  uimm16  = ifieldUIMM16(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar flag_rC = ifieldBIT0(theInstr);
+   
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp rS     = newTemp(ty);
+   IRTemp rA     = newTemp(ty);
+   IRTemp rB     = newTemp(ty);
+   IRExpr* irx;
+   Bool do_rc    = False;
+
+   assign( rS, getIReg(rS_addr) );
+   assign( rB, getIReg(rB_addr) );
+   
+   switch (opc1) {
+   case 0x1C: // andi. (AND Immediate, PPC32 p358)
+      DIP("andi. r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
+      assign( rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS),
+                         mkSzImm(ty, uimm16)) );
+      do_rc = True;  // Always record to CR
+      flag_rC = 1;
+      break;
+      
+   case 0x1D: // andis. (AND Immediate Shifted, PPC32 p359)
+      DIP("andis r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
+      assign( rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS),
+                         mkSzImm(ty, uimm16 << 16)) );
+      do_rc = True;  // Always record to CR
+      flag_rC = 1;
+      break;
+
+   case 0x18: // ori (OR Immediate, PPC32 p497)
+      DIP("ori r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
+      assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS),
+                         mkSzImm(ty, uimm16)) );
+      break;
+
+   case 0x19: // oris (OR Immediate Shifted, PPC32 p498)
+      DIP("oris r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
+      assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS),
+                         mkSzImm(ty, uimm16 << 16)) );
+      break;
+
+   case 0x1A: // xori (XOR Immediate, PPC32 p550)
+      DIP("xori r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
+      assign( rA, binop( mkSzOp(ty, Iop_Xor8), mkexpr(rS),
+                         mkSzImm(ty, uimm16)) );
+      break;
+
+   case 0x1B: // xoris (XOR Immediate Shifted, PPC32 p551)
+      DIP("xoris r%u,r%u,0x%x\n", rA_addr, rS_addr, uimm16);
+      assign( rA, binop( mkSzOp(ty, Iop_Xor8), mkexpr(rS),
+                         mkSzImm(ty, uimm16 << 16)) );
+      break;
+
+   /* X Form */
+   case 0x1F:
+      do_rc = True; // All below record to CR, except for where we return at case end.
+
+      switch (opc2) {
+      case 0x01C: // and (AND, PPC32 p356)
+         DIP("and%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         assign(rA, binop( mkSzOp(ty, Iop_And8),
+                           mkexpr(rS), mkexpr(rB)));
+         break;
+         
+      case 0x03C: // andc (AND with Complement, PPC32 p357)
+         DIP("andc%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         assign(rA, binop( mkSzOp(ty, Iop_And8), mkexpr(rS),
+                           unop( mkSzOp(ty, Iop_Not8),
+                                 mkexpr(rB))));
+         break;
+         
+      case 0x01A: { // cntlzw (Count Leading Zeros Word, PPC32 p371)
+         IRExpr* lo32;
+         if (rB_addr!=0) {
+            vex_printf("dis_int_logic(ppc)(cntlzw,rB_addr)\n");
+            return False;
+         }
+         DIP("cntlzw%s r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr);
+         
+         // mode64: count in low word only
+         lo32 = mode64 ? unop(Iop_64to32, mkexpr(rS)) : mkexpr(rS);
+         
+         // Iop_Clz32 undefined for arg==0, so deal with that case:
+         irx =  binop(Iop_CmpNE32, lo32, mkU32(0));
+         assign(rA, mkWidenFrom32(ty,
+                         IRExpr_ITE( irx,
+                                     unop(Iop_Clz32, lo32),
+                                     mkU32(32)),
+                         False));
+
+         // TODO: alternatively: assign(rA, verbose_Clz32(rS));
+         break;
+      }
+         
+      case 0x11C: // eqv (Equivalent, PPC32 p396)
+         DIP("eqv%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         assign( rA, unop( mkSzOp(ty, Iop_Not8),
+                           binop( mkSzOp(ty, Iop_Xor8),
+                                  mkexpr(rS), mkexpr(rB))) );
+         break;
+
+      case 0x3BA: // extsb (Extend Sign Byte, PPC32 p397
+         if (rB_addr!=0) {
+            vex_printf("dis_int_logic(ppc)(extsb,rB_addr)\n");
+            return False;
+         }
+         DIP("extsb%s r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr);
+         if (mode64)
+            assign( rA, unop(Iop_8Sto64, unop(Iop_64to8, mkexpr(rS))) );
+         else
+            assign( rA, unop(Iop_8Sto32, unop(Iop_32to8, mkexpr(rS))) );
+         break;
+
+      case 0x39A: // extsh (Extend Sign Half Word, PPC32 p398)
+         if (rB_addr!=0) {
+            vex_printf("dis_int_logic(ppc)(extsh,rB_addr)\n");
+            return False;
+         }
+         DIP("extsh%s r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr);
+         if (mode64)
+            assign( rA, unop(Iop_16Sto64,
+                             unop(Iop_64to16, mkexpr(rS))) );
+         else
+            assign( rA, unop(Iop_16Sto32,
+                             unop(Iop_32to16, mkexpr(rS))) );
+         break;
+
+      case 0x1DC: // nand (NAND, PPC32 p492)
+         DIP("nand%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         assign( rA, unop( mkSzOp(ty, Iop_Not8),
+                           binop( mkSzOp(ty, Iop_And8),
+                                  mkexpr(rS), mkexpr(rB))) );
+         break;
+         
+      case 0x07C: // nor (NOR, PPC32 p494)
+         DIP("nor%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         assign( rA, unop( mkSzOp(ty, Iop_Not8),
+                           binop( mkSzOp(ty, Iop_Or8),
+                                  mkexpr(rS), mkexpr(rB))) );
+         break;
+
+      case 0x1BC: // or (OR, PPC32 p495)
+         if ((!flag_rC) && rS_addr == rB_addr) {
+            DIP("mr r%u,r%u\n", rA_addr, rS_addr);
+            assign( rA, mkexpr(rS) );
+         } else {
+            DIP("or%s r%u,r%u,r%u\n",
+                flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+            assign( rA, binop( mkSzOp(ty, Iop_Or8),
+                               mkexpr(rS), mkexpr(rB)) );
+         }
+         break;
+
+      case 0x19C: // orc  (OR with Complement, PPC32 p496)
+         DIP("orc%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         assign( rA, binop( mkSzOp(ty, Iop_Or8), mkexpr(rS),
+                            unop(mkSzOp(ty, Iop_Not8), mkexpr(rB))));
+         break;
+         
+      case 0x13C: // xor (XOR, PPC32 p549)
+         DIP("xor%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         assign( rA, binop( mkSzOp(ty, Iop_Xor8),
+                            mkexpr(rS), mkexpr(rB)) );
+         break;
+
+
+      /* 64bit Integer Logical Instructions */
+      case 0x3DA: // extsw (Extend Sign Word, PPC64 p430)
+         if (rB_addr!=0) {
+            vex_printf("dis_int_logic(ppc)(extsw,rB_addr)\n");
+            return False;
+         }
+         DIP("extsw%s r%u,r%u\n", flag_rC ? ".":"", rA_addr, rS_addr);
+         assign(rA, unop(Iop_32Sto64, unop(Iop_64to32, mkexpr(rS))));
+         break;
+
+      case 0x03A: // cntlzd (Count Leading Zeros DWord, PPC64 p401)
+         if (rB_addr!=0) {
+            vex_printf("dis_int_logic(ppc)(cntlzd,rB_addr)\n");
+            return False;
+         }
+         DIP("cntlzd%s r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr);
+         // Iop_Clz64 undefined for arg==0, so deal with that case:
+         irx =  binop(Iop_CmpNE64, mkexpr(rS), mkU64(0));
+         assign(rA, IRExpr_ITE( irx,
+                                unop(Iop_Clz64, mkexpr(rS)),
+                                mkU64(64) ));
+         // TODO: alternatively: assign(rA, verbose_Clz64(rS));
+         break;
+
+      case 0x1FC: // cmpb (Power6: compare bytes)
+         DIP("cmpb r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
+
+         if (mode64)
+            assign( rA, unop( Iop_V128to64,
+                              binop( Iop_CmpEQ8x16,
+                                     binop( Iop_64HLtoV128, mkU64(0), mkexpr(rS) ),
+                                     binop( Iop_64HLtoV128, mkU64(0), mkexpr(rB) )
+                                     )) );
+         else
+            assign( rA, unop( Iop_V128to32,
+                              binop( Iop_CmpEQ8x16,
+                                     unop( Iop_32UtoV128, mkexpr(rS) ),
+                                     unop( Iop_32UtoV128, mkexpr(rB) )
+                                     )) );
+         break;
+
+      case 0x2DF: { // mftgpr (move floating-point to general purpose register)
+         IRTemp frB = newTemp(Ity_F64);
+         DIP("mftgpr r%u,fr%u\n", rS_addr, rB_addr);
+
+         assign( frB, getFReg(rB_addr));  // always F64
+         if (mode64)
+            assign( rA, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
+         else
+            assign( rA, unop( Iop_64to32, unop( Iop_ReinterpF64asI64, mkexpr(frB))) );
+
+         putIReg( rS_addr, mkexpr(rA));
+         return True;
+      }
+
+      case 0x25F: { // mffgpr (move floating-point from general purpose register)
+         IRTemp frA = newTemp(Ity_F64);
+         DIP("mffgpr fr%u,r%u\n", rS_addr, rB_addr);
+
+         if (mode64)
+            assign( frA, unop( Iop_ReinterpI64asF64, mkexpr(rB)) );
+         else
+            assign( frA, unop( Iop_ReinterpI64asF64, unop( Iop_32Uto64, mkexpr(rB))) );
+
+         putFReg( rS_addr, mkexpr(frA));
+         return True;
+      }
+      case 0x1FA: // popcntd (population count doubleword
+      {
+    	  DIP("popcntd r%u,r%u\n", rA_addr, rS_addr);
+    	  IRTemp result = gen_POPCOUNT(ty, rS, DWORD);
+    	  putIReg( rA_addr, mkexpr(result) );
+    	  return True;
+      }
+      case 0x17A: // popcntw (Population Count Words)
+      {
+         DIP("popcntw r%u,r%u\n", rA_addr, rS_addr);
+         if (mode64) {
+            IRTemp resultHi, resultLo;
+            IRTemp argLo = newTemp(Ity_I32);
+            IRTemp argHi = newTemp(Ity_I32);
+            assign(argLo, unop(Iop_64to32, mkexpr(rS)));
+            assign(argHi, unop(Iop_64HIto32, mkexpr(rS)));
+            resultLo = gen_POPCOUNT(Ity_I32, argLo, WORD);
+            resultHi = gen_POPCOUNT(Ity_I32, argHi, WORD);
+            putIReg( rA_addr, binop(Iop_32HLto64, mkexpr(resultHi), mkexpr(resultLo)));
+         } else {
+            IRTemp result = gen_POPCOUNT(ty, rS, WORD);
+            putIReg( rA_addr, mkexpr(result) );
+         }
+         return True;
+      }
+      case 0x7A: // popcntb (Population Count Byte)
+      {
+         DIP("popcntb r%u,r%u\n", rA_addr, rS_addr);
+
+         if (mode64) {
+            IRTemp resultHi, resultLo;
+            IRTemp argLo = newTemp(Ity_I32);
+            IRTemp argHi = newTemp(Ity_I32);
+            assign(argLo, unop(Iop_64to32, mkexpr(rS)));
+            assign(argHi, unop(Iop_64HIto32, mkexpr(rS)));
+            resultLo = gen_POPCOUNT(Ity_I32, argLo, BYTE);
+            resultHi = gen_POPCOUNT(Ity_I32, argHi, BYTE);
+            putIReg( rA_addr, binop(Iop_32HLto64, mkexpr(resultHi),
+                                    mkexpr(resultLo)));
+         } else {
+            IRTemp result = gen_POPCOUNT(ty, rS, BYTE);
+            putIReg( rA_addr, mkexpr(result) );
+         }
+         return True;
+      }
+       case 0x0FC: // bpermd (Bit Permute Doubleword)
+       {
+          /* This is a lot of rigmarole to emulate bpermd like this, as it
+           * could be done much faster by implementing a call to the native
+           * instruction.  However, where possible I want to avoid using new
+           * native instructions so that we can use valgrind to emulate those
+           * instructions on older PPC64 hardware.
+           */
+ #define BPERMD_IDX_MASK 0x00000000000000FFULL
+ #define BPERMD_BIT_MASK 0x8000000000000000ULL
+          int i;
+          IRExpr * rS_expr = mkexpr(rS);
+          IRExpr * res = binop(Iop_And64, mkU64(0), mkU64(0));
+          DIP("bpermd r%u,r%u,r%u\n", rA_addr, rS_addr, rB_addr);
+          for (i = 0; i < 8; i++) {
+             IRTemp idx_tmp = newTemp( Ity_I64 );
+             IRTemp perm_bit = newTemp( Ity_I64 );
+             IRTemp idx = newTemp( Ity_I8 );
+             IRTemp idx_LT64 = newTemp( Ity_I1 );
+             IRTemp idx_LT64_ity64 = newTemp( Ity_I64 );
+
+             assign( idx_tmp,
+                     binop( Iop_And64, mkU64( BPERMD_IDX_MASK ), rS_expr ) );
+             assign( idx_LT64,
+                           binop( Iop_CmpLT64U, mkexpr( idx_tmp ), mkU64( 64 ) ) );
+             assign( idx,
+                           binop( Iop_And8,
+                                  unop( Iop_1Sto8,
+                                        mkexpr(idx_LT64) ),
+                                  unop( Iop_64to8, mkexpr( idx_tmp ) ) ) );
+             /* If idx_LT64 == 0, we must force the perm bit to '0'. Below, we se idx
+              * to determine which bit of rB to use for the perm bit, and then we shift
+              * that bit to the MSB position.  We AND that with a 64-bit-ized idx_LT64
+              * to set the final perm bit.
+              */
+             assign( idx_LT64_ity64,
+                           unop( Iop_32Uto64, unop( Iop_1Uto32, mkexpr(idx_LT64 ) ) ) );
+             assign( perm_bit,
+                           binop( Iop_And64,
+                                  mkexpr( idx_LT64_ity64 ),
+                                  binop( Iop_Shr64,
+                                         binop( Iop_And64,
+                                                mkU64( BPERMD_BIT_MASK ),
+                                                binop( Iop_Shl64,
+                                                       mkexpr( rB ),
+                                                       mkexpr( idx ) ) ),
+                                         mkU8( 63 ) ) ) );
+             res = binop( Iop_Or64,
+                                res,
+                                binop( Iop_Shl64,
+                                       mkexpr( perm_bit ),
+                                       mkU8( i ) ) );
+             rS_expr = binop( Iop_Shr64, rS_expr, mkU8( 8 ) );
+          }
+          putIReg(rA_addr, res);
+          return True;
+       }
+
+      default:
+         vex_printf("dis_int_logic(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+      
+   default:
+      vex_printf("dis_int_logic(ppc)(opc1)\n");
+      return False;
+   }
+
+   putIReg( rA_addr, mkexpr(rA) );
+
+   if (do_rc && flag_rC) {
+      set_CR0( mkexpr(rA) );
+   }
+   return True;
+}
+
+/*
+  Integer Parity Instructions
+*/
+static Bool dis_int_parity ( UInt theInstr )
+{
+   /* X-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar rS_addr = ifieldRegDS(theInstr);
+   UChar rA_addr = ifieldRegA(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar b0      = ifieldBIT0(theInstr);
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+
+   IRTemp rS     = newTemp(ty);
+   IRTemp rA     = newTemp(ty);
+   IRTemp iTot1  = newTemp(Ity_I32);
+   IRTemp iTot2  = newTemp(Ity_I32);
+   IRTemp iTot3  = newTemp(Ity_I32);
+   IRTemp iTot4  = newTemp(Ity_I32);
+   IRTemp iTot5  = newTemp(Ity_I32);
+   IRTemp iTot6  = newTemp(Ity_I32);
+   IRTemp iTot7  = newTemp(Ity_I32);
+   IRTemp iTot8  = newTemp(Ity_I32);
+   IRTemp rS1    = newTemp(ty);
+   IRTemp rS2    = newTemp(ty);
+   IRTemp rS3    = newTemp(ty);
+   IRTemp rS4    = newTemp(ty);
+   IRTemp rS5    = newTemp(ty);
+   IRTemp rS6    = newTemp(ty);
+   IRTemp rS7    = newTemp(ty);
+   IRTemp iHi    = newTemp(Ity_I32);
+   IRTemp iLo    = newTemp(Ity_I32);
+   IROp to_bit   = (mode64 ? Iop_64to1 : Iop_32to1);
+   IROp shr_op   = (mode64 ? Iop_Shr64 : Iop_Shr32);
+
+   if (opc1 != 0x1f || rB_addr || b0) {
+      vex_printf("dis_int_parity(ppc)(0x1F,opc1:rB|b0)\n");
+      return False;
+   }
+
+   assign( rS, getIReg(rS_addr) );
+
+   switch (opc2) {
+   case 0xba:  // prtyd (Parity Doubleword, ISA 2.05 p320)
+      DIP("prtyd r%u,r%u\n", rA_addr, rS_addr);
+      assign( iTot1, unop(Iop_1Uto32, unop(to_bit, mkexpr(rS))) );
+      assign( rS1, binop(shr_op, mkexpr(rS), mkU8(8)) );
+      assign( iTot2, binop(Iop_Add32,
+                           unop(Iop_1Uto32, unop(to_bit, mkexpr(rS1))),
+                           mkexpr(iTot1)) );
+      assign( rS2, binop(shr_op, mkexpr(rS1), mkU8(8)) );
+      assign( iTot3, binop(Iop_Add32,
+                           unop(Iop_1Uto32, unop(to_bit, mkexpr(rS2))),
+                           mkexpr(iTot2)) );
+      assign( rS3, binop(shr_op, mkexpr(rS2), mkU8(8)) );
+      assign( iTot4, binop(Iop_Add32,
+                           unop(Iop_1Uto32, unop(to_bit, mkexpr(rS3))),
+                           mkexpr(iTot3)) );
+      if (mode64) {
+         assign( rS4, binop(shr_op, mkexpr(rS3), mkU8(8)) );
+         assign( iTot5, binop(Iop_Add32,
+                              unop(Iop_1Uto32, unop(to_bit, mkexpr(rS4))),
+                              mkexpr(iTot4)) );
+         assign( rS5, binop(shr_op, mkexpr(rS4), mkU8(8)) );
+         assign( iTot6, binop(Iop_Add32,
+                              unop(Iop_1Uto32, unop(to_bit, mkexpr(rS5))),
+                              mkexpr(iTot5)) );
+         assign( rS6, binop(shr_op, mkexpr(rS5), mkU8(8)) );
+         assign( iTot7, binop(Iop_Add32,
+                              unop(Iop_1Uto32, unop(to_bit, mkexpr(rS6))),
+                              mkexpr(iTot6)) );
+         assign( rS7, binop(shr_op, mkexpr(rS6), mkU8(8)) );
+         assign( iTot8, binop(Iop_Add32,
+                              unop(Iop_1Uto32, unop(to_bit, mkexpr(rS7))),
+                              mkexpr(iTot7)) );
+         assign( rA, unop(Iop_32Uto64,
+                          binop(Iop_And32, mkexpr(iTot8), mkU32(1))) );
+      } else
+         assign( rA, mkexpr(iTot4) );
+
+      break;
+   case 0x9a:  // prtyw (Parity Word, ISA 2.05 p320)
+      assign( iTot1, unop(Iop_1Uto32, unop(to_bit, mkexpr(rS))) );
+      assign( rS1, binop(shr_op, mkexpr(rS), mkU8(8)) );
+      assign( iTot2, binop(Iop_Add32,
+                           unop(Iop_1Uto32, unop(to_bit, mkexpr(rS1))),
+                           mkexpr(iTot1)) );
+      assign( rS2, binop(shr_op, mkexpr(rS1), mkU8(8)) );
+      assign( iTot3, binop(Iop_Add32,
+                           unop(Iop_1Uto32, unop(to_bit, mkexpr(rS2))),
+                           mkexpr(iTot2)) );
+      assign( rS3, binop(shr_op, mkexpr(rS2), mkU8(8)) );
+      assign( iTot4, binop(Iop_Add32,
+                           unop(Iop_1Uto32, unop(to_bit, mkexpr(rS3))),
+                           mkexpr(iTot3)) );
+      assign( iLo, unop(Iop_1Uto32, unop(Iop_32to1, mkexpr(iTot4) )) );
+
+      if (mode64) {
+         assign( rS4, binop(shr_op, mkexpr(rS3), mkU8(8)) );
+         assign( iTot5, unop(Iop_1Uto32, unop(to_bit, mkexpr(rS4))) );
+         assign( rS5, binop(shr_op, mkexpr(rS4), mkU8(8)) );
+         assign( iTot6, binop(Iop_Add32,
+                              unop(Iop_1Uto32, unop(to_bit, mkexpr(rS5))),
+                              mkexpr(iTot5)) );
+         assign( rS6, binop(shr_op, mkexpr(rS5), mkU8(8)) );
+         assign( iTot7, binop(Iop_Add32,
+                              unop(Iop_1Uto32, unop(to_bit, mkexpr(rS6))),
+                              mkexpr(iTot6)) );
+         assign( rS7, binop(shr_op, mkexpr(rS6), mkU8(8)));
+         assign( iTot8, binop(Iop_Add32,
+                              unop(Iop_1Uto32, unop(to_bit, mkexpr(rS7))),
+                              mkexpr(iTot7)) );
+         assign( iHi, binop(Iop_And32, mkU32(1), mkexpr(iTot8)) ),
+            assign( rA, binop(Iop_32HLto64, mkexpr(iHi), mkexpr(iLo)) );
+      } else
+         assign( rA, binop(Iop_Or32, mkU32(0), mkexpr(iLo)) );
+      break;
+   default:
+      vex_printf("dis_int_parity(ppc)(opc2)\n");
+      return False;
+   }
+
+   putIReg( rA_addr, mkexpr(rA) );
+
+   return True;
+}
+
+
+/*
+  Integer Rotate Instructions
+*/
+static Bool dis_int_rot ( UInt theInstr )
+{
+   /* M-Form, MDS-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar rS_addr = ifieldRegDS(theInstr);
+   UChar rA_addr = ifieldRegA(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UChar sh_imm  = rB_addr;
+   UChar MaskBeg = toUChar( IFIELD( theInstr, 6, 5 ) );
+   UChar MaskEnd = toUChar( IFIELD( theInstr, 1, 5 ) );
+   UChar msk_imm = toUChar( IFIELD( theInstr, 5, 6 ) );
+   UChar opc2    = toUChar( IFIELD( theInstr, 2, 3 ) );
+   UChar b1      = ifieldBIT1(theInstr);
+   UChar flag_rC = ifieldBIT0(theInstr);
+
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp rS     = newTemp(ty);
+   IRTemp rA     = newTemp(ty);
+   IRTemp rB     = newTemp(ty);
+   IRTemp rot    = newTemp(ty);
+   IRExpr *r;
+   UInt   mask32;
+   ULong  mask64;
+
+   assign( rS, getIReg(rS_addr) );
+   assign( rB, getIReg(rB_addr) );
+
+   switch (opc1) {
+   case 0x14: {
+      // rlwimi (Rotate Left Word Imm then Mask Insert, PPC32 p500)
+      DIP("rlwimi%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
+          rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
+      if (mode64) {
+         // tmp32 = (ROTL(rS_Lo32, Imm)
+         // rA = ((tmp32 || tmp32) & mask64) | (rA & ~mask64)
+         mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
+         r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
+         r = unop(Iop_32Uto64, r);
+         assign( rot, binop(Iop_Or64, r,
+                            binop(Iop_Shl64, r, mkU8(32))) );
+         assign( rA,
+            binop(Iop_Or64,
+                  binop(Iop_And64, mkexpr(rot), mkU64(mask64)),
+                  binop(Iop_And64, getIReg(rA_addr), mkU64(~mask64))) );
+      }
+      else {
+         // rA = (ROTL(rS, Imm) & mask) | (rA & ~mask);
+         mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
+         r = ROTL(mkexpr(rS), mkU8(sh_imm));
+         assign( rA,
+            binop(Iop_Or32,
+                  binop(Iop_And32, mkU32(mask32), r),
+                  binop(Iop_And32, getIReg(rA_addr), mkU32(~mask32))) );
+      }
+      break;
+   }
+
+   case 0x15: {
+      // rlwinm (Rotate Left Word Imm then AND with Mask, PPC32 p501)
+      vassert(MaskBeg < 32);
+      vassert(MaskEnd < 32);
+      vassert(sh_imm  < 32);
+
+      if (mode64) {
+         IRTemp rTmp = newTemp(Ity_I64);
+         mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
+         DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
+             rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
+         // tmp32 = (ROTL(rS_Lo32, Imm)
+         // rA = ((tmp32 || tmp32) & mask64)
+         r = ROTL( unop(Iop_64to32, mkexpr(rS) ), mkU8(sh_imm) );
+         r = unop(Iop_32Uto64, r);
+         assign( rTmp, r );
+         r = NULL;
+         assign( rot, binop(Iop_Or64, mkexpr(rTmp),
+                            binop(Iop_Shl64, mkexpr(rTmp), mkU8(32))) );
+         assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) );
+      }
+      else {
+         if (MaskBeg == 0 && sh_imm+MaskEnd == 31) {
+            /* Special-case the ,n,0,31-n form as that is just n-bit
+               shift left, PPC32 p501 */
+            DIP("slwi%s r%u,r%u,%d\n", flag_rC ? ".":"",
+                rA_addr, rS_addr, sh_imm);
+            assign( rA, binop(Iop_Shl32, mkexpr(rS), mkU8(sh_imm)) );
+         }
+         else if (MaskEnd == 31 && sh_imm+MaskBeg == 32) {
+            /* Special-case the ,32-n,n,31 form as that is just n-bit
+               unsigned shift right, PPC32 p501 */
+            DIP("srwi%s r%u,r%u,%d\n", flag_rC ? ".":"",
+                rA_addr, rS_addr, MaskBeg);
+            assign( rA, binop(Iop_Shr32, mkexpr(rS), mkU8(MaskBeg)) );
+         }
+         else {
+            /* General case. */
+            mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
+            DIP("rlwinm%s r%u,r%u,%d,%d,%d\n", flag_rC ? ".":"",
+                rA_addr, rS_addr, sh_imm, MaskBeg, MaskEnd);
+            // rA = ROTL(rS, Imm) & mask
+            assign( rA, binop(Iop_And32,
+                              ROTL(mkexpr(rS), mkU8(sh_imm)), 
+                              mkU32(mask32)) );
+         }
+      }
+      break;
+   }
+
+   case 0x17: {
+      // rlwnm (Rotate Left Word then AND with Mask, PPC32 p503
+      DIP("rlwnm%s r%u,r%u,r%u,%d,%d\n", flag_rC ? ".":"",
+          rA_addr, rS_addr, rB_addr, MaskBeg, MaskEnd);
+      if (mode64) {
+         mask64 = MASK64(31-MaskEnd, 31-MaskBeg);
+         /* weird insn alert!
+            tmp32 = (ROTL(rS_Lo32, rB[0-4])
+            rA = ((tmp32 || tmp32) & mask64)
+         */
+         // note, ROTL does the masking, so we don't do it here
+         r = ROTL( unop(Iop_64to32, mkexpr(rS)),
+                   unop(Iop_64to8, mkexpr(rB)) );
+         r = unop(Iop_32Uto64, r);
+         assign(rot, binop(Iop_Or64, r, binop(Iop_Shl64, r, mkU8(32))));
+         assign( rA, binop(Iop_And64, mkexpr(rot), mkU64(mask64)) );
+      } else {
+         mask32 = MASK32(31-MaskEnd, 31-MaskBeg);
+         // rA = ROTL(rS, rB[0-4]) & mask
+         // note, ROTL does the masking, so we don't do it here
+         assign( rA, binop(Iop_And32,
+                           ROTL(mkexpr(rS),
+                                unop(Iop_32to8, mkexpr(rB))),
+                           mkU32(mask32)) );
+      }
+      break;
+   }
+
+   /* 64bit Integer Rotates */
+   case 0x1E: {
+      msk_imm = ((msk_imm & 1) << 5) | (msk_imm >> 1);
+      sh_imm |= b1 << 5;
+
+      vassert( msk_imm < 64 );
+      vassert( sh_imm < 64 );
+
+      switch (opc2) {
+      case 0x4: {
+         /* r = ROTL64( rS, rB_lo6) */
+         r = ROTL( mkexpr(rS), unop(Iop_64to8, mkexpr(rB)) );
+
+         if (b1 == 0) { // rldcl (Rotl DWord, Clear Left, PPC64 p555)
+            DIP("rldcl%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"",
+                rA_addr, rS_addr, rB_addr, msk_imm);
+            // note, ROTL does the masking, so we don't do it here
+            mask64 = MASK64(0, 63-msk_imm);
+            assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+            break;
+         } else {       // rldcr (Rotl DWord, Clear Right, PPC64 p556)
+            DIP("rldcr%s r%u,r%u,r%u,%u\n", flag_rC ? ".":"",
+                rA_addr, rS_addr, rB_addr, msk_imm);
+            mask64 = MASK64(63-msk_imm, 63);
+            assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+            break;
+         }
+         break;
+      }
+      case 0x2: // rldic (Rotl DWord Imm, Clear, PPC64 p557)
+         DIP("rldic%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
+             rA_addr, rS_addr, sh_imm, msk_imm);
+         r = ROTL(mkexpr(rS), mkU8(sh_imm));
+         mask64 = MASK64(sh_imm, 63-msk_imm);
+         assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+         break;
+         // later: deal with special case: (msk_imm==0) => SHL(sh_imm)
+         /*
+           Hmm... looks like this'll do the job more simply:
+           r = SHL(rS, sh_imm)
+           m = ~(1 << (63-msk_imm))
+           assign(rA, r & m);
+         */
+         
+      case 0x0: // rldicl (Rotl DWord Imm, Clear Left, PPC64 p558)
+         if (mode64
+             && sh_imm + msk_imm == 64 && msk_imm >= 1 && msk_imm <= 63) {
+            /* special-case the ,64-n,n form as that is just
+               unsigned shift-right by n */
+            DIP("srdi%s r%u,r%u,%u\n",
+                flag_rC ? ".":"", rA_addr, rS_addr, msk_imm);
+            assign( rA, binop(Iop_Shr64, mkexpr(rS), mkU8(msk_imm)) );
+         } else {
+            DIP("rldicl%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
+                rA_addr, rS_addr, sh_imm, msk_imm);
+            r = ROTL(mkexpr(rS), mkU8(sh_imm));
+            mask64 = MASK64(0, 63-msk_imm);
+            assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+         }
+         break;
+         
+      case 0x1: // rldicr (Rotl DWord Imm, Clear Right, PPC64 p559)
+         if (mode64 
+             && sh_imm + msk_imm == 63 && sh_imm >= 1 && sh_imm <= 63) {
+            /* special-case the ,n,63-n form as that is just
+               shift-left by n */
+            DIP("sldi%s r%u,r%u,%u\n",
+                flag_rC ? ".":"", rA_addr, rS_addr, sh_imm);
+            assign( rA, binop(Iop_Shl64, mkexpr(rS), mkU8(sh_imm)) );
+         } else {
+            DIP("rldicr%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
+                rA_addr, rS_addr, sh_imm, msk_imm);
+            r = ROTL(mkexpr(rS), mkU8(sh_imm));
+            mask64 = MASK64(63-msk_imm, 63);
+            assign( rA, binop(Iop_And64, r, mkU64(mask64)) );
+         }
+         break;
+         
+      case 0x3: { // rldimi (Rotl DWord Imm, Mask Insert, PPC64 p560)
+         IRTemp rA_orig = newTemp(ty);
+         DIP("rldimi%s r%u,r%u,%u,%u\n", flag_rC ? ".":"",
+             rA_addr, rS_addr, sh_imm, msk_imm);
+         r = ROTL(mkexpr(rS), mkU8(sh_imm));
+         mask64 = MASK64(sh_imm, 63-msk_imm);
+         assign( rA_orig, getIReg(rA_addr) );
+         assign( rA, binop(Iop_Or64,
+                           binop(Iop_And64, mkU64(mask64),  r),
+                           binop(Iop_And64, mkU64(~mask64),
+                                            mkexpr(rA_orig))) );
+         break;
+      }
+      default:
+         vex_printf("dis_int_rot(ppc)(opc2)\n");
+         return False;
+      }
+      break;         
+   }
+
+   default:
+      vex_printf("dis_int_rot(ppc)(opc1)\n");
+      return False;
+   }
+
+   putIReg( rA_addr, mkexpr(rA) );
+
+   if (flag_rC) {
+      set_CR0( mkexpr(rA) );
+   }
+   return True;
+}
+
+
+/*
+  Integer Load Instructions
+*/
+static Bool dis_int_load ( UInt theInstr )
+{
+   /* D-Form, X-Form, DS-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar rD_addr  = ifieldRegDS(theInstr);
+   UChar rA_addr  = ifieldRegA(theInstr);
+   UInt  uimm16   = ifieldUIMM16(theInstr);
+   UChar rB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar b1       = ifieldBIT1(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+
+   Int     simm16 = extend_s_16to32(uimm16);
+   IRType  ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp  EA     = newTemp(ty);
+   IRExpr* val;
+
+   switch (opc1) {
+   case 0x1F: // register offset
+      assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+      break;
+   case 0x38: // immediate offset: 64bit: lq: maskoff
+              // lowest 4 bits of immediate before forming EA
+      simm16 = simm16 & 0xFFFFFFF0;
+      assign( EA, ea_rAor0_simm( rA_addr, simm16  ) );
+      break;
+   case 0x3A: // immediate offset: 64bit: ld/ldu/lwa: mask off
+              // lowest 2 bits of immediate before forming EA
+      simm16 = simm16 & 0xFFFFFFFC;
+      assign( EA, ea_rAor0_simm( rA_addr, simm16  ) );
+      break;
+   default:   // immediate offset
+      assign( EA, ea_rAor0_simm( rA_addr, simm16  ) );
+      break;
+   }
+
+   switch (opc1) {
+   case 0x22: // lbz (Load B & Zero, PPC32 p433)
+      DIP("lbz r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I8, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
+      break;
+      
+   case 0x23: // lbzu (Load B & Zero, Update, PPC32 p434)
+      if (rA_addr == 0 || rA_addr == rD_addr) {
+         vex_printf("dis_int_load(ppc)(lbzu,rA_addr|rD_addr)\n");
+         return False;
+      }
+      DIP("lbzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I8, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+      
+   case 0x2A: // lha (Load HW Alg, PPC32 p445)
+      DIP("lha r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I16, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
+      break;
+
+   case 0x2B: // lhau (Load HW Alg, Update, PPC32 p446)
+      if (rA_addr == 0 || rA_addr == rD_addr) {
+         vex_printf("dis_int_load(ppc)(lhau,rA_addr|rD_addr)\n");
+         return False;
+      }
+      DIP("lhau r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I16, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+      
+   case 0x28: // lhz (Load HW & Zero, PPC32 p450)
+      DIP("lhz r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I16, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
+      break;
+      
+   case 0x29: // lhzu (Load HW & and Zero, Update, PPC32 p451)
+      if (rA_addr == 0 || rA_addr == rD_addr) {
+         vex_printf("dis_int_load(ppc)(lhzu,rA_addr|rD_addr)\n");
+         return False;
+      }
+      DIP("lhzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I16, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+
+   case 0x20: // lwz (Load W & Zero, PPC32 p460)
+      DIP("lwz r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I32, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
+      break;
+      
+   case 0x21: // lwzu (Load W & Zero, Update, PPC32 p461))
+      if (rA_addr == 0 || rA_addr == rD_addr) {
+         vex_printf("dis_int_load(ppc)(lwzu,rA_addr|rD_addr)\n");
+         return False;
+      }
+      DIP("lwzu r%u,%d(r%u)\n", rD_addr, (Int)simm16, rA_addr);
+      val = load(Ity_I32, mkexpr(EA));
+      putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+      
+   /* X Form */
+   case 0x1F:
+      if (b0 != 0) {
+         vex_printf("dis_int_load(ppc)(Ox1F,b0)\n");
+         return False;
+      }
+
+      switch (opc2) {
+      case 0x077: // lbzux (Load B & Zero, Update Indexed, PPC32 p435)
+         DIP("lbzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         if (rA_addr == 0 || rA_addr == rD_addr) {
+            vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n");
+            return False;
+         }
+         val = load(Ity_I8, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+         
+      case 0x057: // lbzx (Load B & Zero, Indexed, PPC32 p436)
+         DIP("lbzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         val = load(Ity_I8, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom8(ty, val, False) );
+         break;
+         
+      case 0x177: // lhaux (Load HW Alg, Update Indexed, PPC32 p447)
+         if (rA_addr == 0 || rA_addr == rD_addr) {
+            vex_printf("dis_int_load(ppc)(lhaux,rA_addr|rD_addr)\n");
+            return False;
+         }
+         DIP("lhaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         val = load(Ity_I16, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+         
+      case 0x157: // lhax (Load HW Alg, Indexed, PPC32 p448)
+         DIP("lhax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         val = load(Ity_I16, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom16(ty, val, True) );
+         break;
+         
+      case 0x137: // lhzux (Load HW & Zero, Update Indexed, PPC32 p452)
+         if (rA_addr == 0 || rA_addr == rD_addr) {
+            vex_printf("dis_int_load(ppc)(lhzux,rA_addr|rD_addr)\n");
+            return False;
+         }
+         DIP("lhzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         val = load(Ity_I16, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+         
+      case 0x117: // lhzx (Load HW & Zero, Indexed, PPC32 p453)
+         DIP("lhzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         val = load(Ity_I16, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom16(ty, val, False) );
+         break;
+
+      case 0x037: // lwzux (Load W & Zero, Update Indexed, PPC32 p462)
+         if (rA_addr == 0 || rA_addr == rD_addr) {
+            vex_printf("dis_int_load(ppc)(lwzux,rA_addr|rD_addr)\n");
+            return False;
+         }
+         DIP("lwzux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         val = load(Ity_I32, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+         
+      case 0x017: // lwzx (Load W & Zero, Indexed, PPC32 p463)
+         DIP("lwzx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         val = load(Ity_I32, mkexpr(EA));
+         putIReg( rD_addr, mkWidenFrom32(ty, val, False) );
+         break;
+
+
+      /* 64bit Loads */
+      case 0x035: // ldux (Load DWord, Update Indexed, PPC64 p475)
+         if (rA_addr == 0 || rA_addr == rD_addr) {
+            vex_printf("dis_int_load(ppc)(ldux,rA_addr|rD_addr)\n");
+            return False;
+         }
+         DIP("ldux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         putIReg( rD_addr, load(Ity_I64, mkexpr(EA)) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+
+      case 0x015: // ldx (Load DWord, Indexed, PPC64 p476)
+         DIP("ldx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         putIReg( rD_addr, load(Ity_I64, mkexpr(EA)) );
+         break;
+
+      case 0x175: // lwaux (Load W Alg, Update Indexed, PPC64 p501)
+         if (rA_addr == 0 || rA_addr == rD_addr) {
+            vex_printf("dis_int_load(ppc)(lwaux,rA_addr|rD_addr)\n");
+            return False;
+         }
+         DIP("lwaux r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         putIReg( rD_addr,
+                  unop(Iop_32Sto64, load(Ity_I32, mkexpr(EA))) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+
+      case 0x155: // lwax (Load W Alg, Indexed, PPC64 p502)
+         DIP("lwax r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         putIReg( rD_addr,
+                  unop(Iop_32Sto64, load(Ity_I32, mkexpr(EA))) );
+         break;
+
+      default:
+         vex_printf("dis_int_load(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+
+   /* DS Form - 64bit Loads.  In each case EA will have been formed
+      with the lowest 2 bits masked off the immediate offset. */
+   case 0x3A:
+      switch ((b1<<1) | b0) {
+      case 0x0: // ld (Load DWord, PPC64 p472)
+         DIP("ld r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+         putIReg( rD_addr, load(Ity_I64, mkexpr(EA)) );
+         break;
+
+      case 0x1: // ldu (Load DWord, Update, PPC64 p474)
+         if (rA_addr == 0 || rA_addr == rD_addr) {
+            vex_printf("dis_int_load(ppc)(ldu,rA_addr|rD_addr)\n");
+            return False;
+         }
+         DIP("ldu r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+         putIReg( rD_addr, load(Ity_I64, mkexpr(EA)) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+
+      case 0x2: // lwa (Load Word Alg, PPC64 p499)
+         DIP("lwa r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+         putIReg( rD_addr,
+                  unop(Iop_32Sto64, load(Ity_I32, mkexpr(EA))) );
+         break;
+
+      default:
+         vex_printf("dis_int_load(ppc)(0x3A, opc2)\n");
+         return False;
+      }
+      break;
+
+   case 0x38: {
+      IRTemp  high = newTemp(ty);
+      IRTemp  low  = newTemp(ty);
+      /* DQ Form - 128bit Loads. Lowest bits [1:0] are the PT field. */
+      DIP("lq r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+      /* NOTE: there are some changes to XER[41:42] that have not been
+       * implemented.
+       */
+      // trap if EA misaligned on 16 byte address
+      if (mode64) {
+         if (host_endness == VexEndnessBE) {
+            assign(high, load(ty, mkexpr( EA ) ) );
+            assign(low, load(ty, binop( Iop_Add64,
+                                        mkexpr( EA ),
+                                        mkU64( 8 ) ) ) );
+	 } else {
+            assign(low, load(ty, mkexpr( EA ) ) );
+            assign(high, load(ty, binop( Iop_Add64,
+                                         mkexpr( EA ),
+                                         mkU64( 8 ) ) ) );
+	 }
+      } else {
+         assign(high, load(ty, binop( Iop_Add32,
+                                      mkexpr( EA ),
+                                      mkU32( 4 ) ) ) );
+         assign(low, load(ty, binop( Iop_Add32,
+                                      mkexpr( EA ),
+                                      mkU32( 12 ) ) ) );
+      }
+      gen_SIGBUS_if_misaligned( EA, 16 );
+      putIReg( rD_addr,  mkexpr( high) );
+      putIReg( rD_addr+1,  mkexpr( low) );
+      break;
+   }
+   default:
+      vex_printf("dis_int_load(ppc)(opc1)\n");
+      return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Integer Store Instructions
+*/
+static Bool dis_int_store ( UInt theInstr, const VexAbiInfo* vbi )
+{
+   /* D-Form, X-Form, DS-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UInt  rS_addr = ifieldRegDS(theInstr);
+   UInt  rA_addr = ifieldRegA(theInstr);
+   UInt  uimm16  = ifieldUIMM16(theInstr);
+   UInt  rB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar b1      = ifieldBIT1(theInstr);
+   UChar b0      = ifieldBIT0(theInstr);
+
+   Int    simm16 = extend_s_16to32(uimm16);
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp rS     = newTemp(ty);
+   IRTemp rB     = newTemp(ty);
+   IRTemp EA     = newTemp(ty);
+   
+   assign( rB, getIReg(rB_addr) );
+   assign( rS, getIReg(rS_addr) );
+   
+   switch (opc1) {
+   case 0x1F: // register offset
+      assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+      break;
+   case 0x3E: // immediate offset: 64bit: std/stdu/stq: mask off
+              // lowest 2 bits of immediate before forming EA
+      simm16 = simm16 & 0xFFFFFFFC;
+   default:   // immediate offset
+      assign( EA, ea_rAor0_simm( rA_addr, simm16  ) );
+      break;
+   }
+
+   switch (opc1) {
+   case 0x26: // stb (Store B, PPC32 p509)
+      DIP("stb r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+      store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
+      break;
+       
+   case 0x27: // stbu (Store B, Update, PPC32 p510)
+      if (rA_addr == 0 ) {
+         vex_printf("dis_int_store(ppc)(stbu,rA_addr)\n");
+         return False;
+      }
+      DIP("stbu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+      putIReg( rA_addr, mkexpr(EA) );
+      store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
+      break;
+
+   case 0x2C: // sth (Store HW, PPC32 p522)
+      DIP("sth r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+      store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
+      break;
+      
+   case 0x2D: // sthu (Store HW, Update, PPC32 p524)
+      if (rA_addr == 0) {
+         vex_printf("dis_int_store(ppc)(sthu,rA_addr)\n");
+         return False;
+      }
+      DIP("sthu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+      putIReg( rA_addr, mkexpr(EA) );
+      store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
+      break;
+
+   case 0x24: // stw (Store W, PPC32 p530)
+      DIP("stw r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+      store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
+      break;
+
+   case 0x25: // stwu (Store W, Update, PPC32 p534)
+      if (rA_addr == 0) {
+         vex_printf("dis_int_store(ppc)(stwu,rA_addr)\n");
+         return False;
+      }
+      DIP("stwu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+      putIReg( rA_addr, mkexpr(EA) );
+      store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
+      break;
+      
+   /* X Form : all these use EA_indexed */
+   case 0x1F:
+      if (b0 != 0) {
+         vex_printf("dis_int_store(ppc)(0x1F,b0)\n");
+         return False;
+      }
+
+      switch (opc2) {
+      case 0x0F7: // stbux (Store B, Update Indexed, PPC32 p511)
+         if (rA_addr == 0) {
+            vex_printf("dis_int_store(ppc)(stbux,rA_addr)\n");
+            return False;
+         }
+         DIP("stbux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         putIReg( rA_addr, mkexpr(EA) );
+         store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
+         break;
+         
+      case 0x0D7: // stbx (Store B Indexed, PPC32 p512)
+         DIP("stbx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         store( mkexpr(EA), mkNarrowTo8(ty, mkexpr(rS)) );
+         break;
+         
+      case 0x1B7: // sthux (Store HW, Update Indexed, PPC32 p525)
+         if (rA_addr == 0) {
+            vex_printf("dis_int_store(ppc)(sthux,rA_addr)\n");
+            return False;
+         }
+         DIP("sthux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         putIReg( rA_addr, mkexpr(EA) );
+         store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
+         break;
+         
+      case 0x197: // sthx (Store HW Indexed, PPC32 p526)
+         DIP("sthx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         store( mkexpr(EA), mkNarrowTo16(ty, mkexpr(rS)) );
+         break;
+         
+      case 0x0B7: // stwux (Store W, Update Indexed, PPC32 p535)
+         if (rA_addr == 0) {
+            vex_printf("dis_int_store(ppc)(stwux,rA_addr)\n");
+            return False;
+         }
+         DIP("stwux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         putIReg( rA_addr, mkexpr(EA) );
+         store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
+         break;
+
+      case 0x097: // stwx (Store W Indexed, PPC32 p536)
+         DIP("stwx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         store( mkexpr(EA), mkNarrowTo32(ty, mkexpr(rS)) );
+         break;
+         
+
+      /* 64bit Stores */
+      case 0x0B5: // stdux (Store DWord, Update Indexed, PPC64 p584)
+         if (rA_addr == 0) {
+            vex_printf("dis_int_store(ppc)(stdux,rA_addr)\n");
+            return False;
+         }
+         DIP("stdux r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         putIReg( rA_addr, mkexpr(EA) );
+         store( mkexpr(EA), mkexpr(rS) );
+         break;
+
+      case 0x095: // stdx (Store DWord Indexed, PPC64 p585)
+         DIP("stdx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         store( mkexpr(EA), mkexpr(rS) );
+         break;
+
+      default:
+         vex_printf("dis_int_store(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+
+   /* DS Form - 64bit Stores.  In each case EA will have been formed
+      with the lowest 2 bits masked off the immediate offset. */
+   case 0x3E:
+      switch ((b1<<1) | b0) {
+      case 0x0: // std (Store DWord, PPC64 p580)
+         if (!mode64)
+            return False;
+
+         DIP("std r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+         store( mkexpr(EA), mkexpr(rS) );
+         break;
+
+      case 0x1: // stdu (Store DWord, Update, PPC64 p583)
+         if (!mode64)
+            return False;
+
+         DIP("stdu r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+         putIReg( rA_addr, mkexpr(EA) );
+         store( mkexpr(EA), mkexpr(rS) );
+         break;
+
+      case 0x2: { // stq (Store QuadWord, Update, PPC64 p583)
+         IRTemp EA_hi = newTemp(ty);
+         IRTemp EA_lo = newTemp(ty);
+         DIP("stq r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+
+         if (mode64) {
+            if (host_endness == VexEndnessBE) {
+
+               /* upper 64-bits */
+               assign( EA_hi, ea_rAor0_simm( rA_addr, simm16 ) );
+
+               /* lower 64-bits */
+               assign( EA_lo, ea_rAor0_simm( rA_addr, simm16+8 ) );
+	    } else {
+               /* upper 64-bits */
+               assign( EA_hi, ea_rAor0_simm( rA_addr, simm16+8 ) );
+
+               /* lower 64-bits */
+               assign( EA_lo, ea_rAor0_simm( rA_addr, simm16 ) );
+	    }
+         } else {
+            /* upper half of upper 64-bits */
+            assign( EA_hi, ea_rAor0_simm( rA_addr, simm16+4 ) );
+
+            /* lower half of upper 64-bits */
+            assign( EA_lo, ea_rAor0_simm( rA_addr, simm16+12 ) );
+         }
+         store( mkexpr(EA_hi), mkexpr(rS) );
+         store( mkexpr(EA_lo), getIReg( rS_addr+1 ) );
+         break;
+      }
+      default:
+         vex_printf("dis_int_load(ppc)(0x3A, opc2)\n");
+         return False;
+      }
+      break;
+
+   default:
+      vex_printf("dis_int_store(ppc)(opc1)\n");
+      return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Integer Load/Store Multiple Instructions
+*/
+static Bool dis_int_ldst_mult ( UInt theInstr )
+{
+   /* D-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar rD_addr  = ifieldRegDS(theInstr);
+   UChar rS_addr  = rD_addr;
+   UChar rA_addr  = ifieldRegA(theInstr);
+   UInt  uimm16   = ifieldUIMM16(theInstr);
+
+   Int     simm16 = extend_s_16to32(uimm16);
+   IRType  ty     = mode64 ? Ity_I64 : Ity_I32;
+   IROp    mkAdd  = mode64 ? Iop_Add64 : Iop_Add32;
+   IRTemp  EA     = newTemp(ty);
+   UInt    r      = 0;
+   UInt    ea_off = 0;
+   IRExpr* irx_addr;
+
+   assign( EA, ea_rAor0_simm( rA_addr, simm16 ) );
+
+   switch (opc1) {
+   case 0x2E: // lmw (Load Multiple Word, PPC32 p454)
+      if (rA_addr >= rD_addr) {
+         vex_printf("dis_int_ldst_mult(ppc)(lmw,rA_addr)\n");
+         return False;
+      }
+      DIP("lmw r%u,%d(r%u)\n", rD_addr, simm16, rA_addr);
+      for (r = rD_addr; r <= 31; r++) {
+         irx_addr = binop(mkAdd, mkexpr(EA), mode64 ? mkU64(ea_off) : mkU32(ea_off));
+         putIReg( r, mkWidenFrom32(ty, load(Ity_I32, irx_addr ),
+                                       False) );
+         ea_off += 4;
+      }
+      break;
+      
+   case 0x2F: // stmw (Store Multiple Word, PPC32 p527)
+      DIP("stmw r%u,%d(r%u)\n", rS_addr, simm16, rA_addr);
+      for (r = rS_addr; r <= 31; r++) {
+         irx_addr = binop(mkAdd, mkexpr(EA), mode64 ? mkU64(ea_off) : mkU32(ea_off));
+         store( irx_addr, mkNarrowTo32(ty, getIReg(r)) );
+         ea_off += 4;
+      }
+      break;
+      
+   default:
+      vex_printf("dis_int_ldst_mult(ppc)(opc1)\n");
+      return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Integer Load/Store String Instructions
+*/
+static 
+void generate_lsw_sequence ( IRTemp tNBytes,   // # bytes, :: Ity_I32
+                             IRTemp EA,        // EA
+                             Int    rD,        // first dst register
+                             Int    maxBytes ) // 32 or 128
+{
+   Int     i, shift = 24;
+   IRExpr* e_nbytes = mkexpr(tNBytes);
+   IRExpr* e_EA     = mkexpr(EA);
+   IRType  ty       = mode64 ? Ity_I64 : Ity_I32;
+
+   vassert(rD >= 0 && rD < 32);
+   rD--; if (rD < 0) rD = 31;
+
+   for (i = 0; i < maxBytes; i++) {
+      /* if (nBytes < (i+1)) goto NIA; */
+      stmt( IRStmt_Exit( binop(Iop_CmpLT32U, e_nbytes, mkU32(i+1)),
+                         Ijk_Boring, 
+                         mkSzConst( ty, nextInsnAddr()), OFFB_CIA ));
+      /* when crossing into a new dest register, set it to zero. */
+      if ((i % 4) == 0) {
+         rD++; if (rD == 32) rD = 0;
+         putIReg(rD, mkSzImm(ty, 0));
+         shift = 24;
+      }
+      /* rD |=  (8Uto32(*(EA+i))) << shift */
+      vassert(shift == 0 || shift == 8 || shift == 16 || shift == 24);
+      putIReg( 
+         rD, 
+         mkWidenFrom32(
+            ty, 
+            binop(
+               Iop_Or32, 
+               mkNarrowTo32(ty, getIReg(rD)),
+               binop(
+                  Iop_Shl32, 
+                  unop(
+                     Iop_8Uto32, 
+                     load( Ity_I8,
+                           binop( mkSzOp(ty,Iop_Add8),
+                                  e_EA, mkSzImm(ty,i)))
+                  ), 
+                  mkU8(toUChar(shift))
+               )
+            ),
+            /*Signed*/False
+	 ) 
+      ); 
+      shift -= 8;
+   }
+}
+
+static 
+void generate_stsw_sequence ( IRTemp tNBytes,   // # bytes, :: Ity_I32
+                              IRTemp EA,        // EA
+                              Int    rS,        // first src register
+                              Int    maxBytes ) // 32 or 128
+{
+   Int     i, shift = 24;
+   IRExpr* e_nbytes = mkexpr(tNBytes);
+   IRExpr* e_EA     = mkexpr(EA);
+   IRType  ty       = mode64 ? Ity_I64 : Ity_I32;
+
+   vassert(rS >= 0 && rS < 32);
+   rS--; if (rS < 0) rS = 31;
+
+   for (i = 0; i < maxBytes; i++) {
+      /* if (nBytes < (i+1)) goto NIA; */
+      stmt( IRStmt_Exit( binop(Iop_CmpLT32U, e_nbytes, mkU32(i+1)),
+                         Ijk_Boring, 
+                         mkSzConst( ty, nextInsnAddr() ), OFFB_CIA ));
+      /* check for crossing into a new src register. */
+      if ((i % 4) == 0) {
+         rS++; if (rS == 32) rS = 0;
+         shift = 24;
+      }
+      /* *(EA+i) = 32to8(rS >> shift) */
+      vassert(shift == 0 || shift == 8 || shift == 16 || shift == 24);
+      store(
+            binop( mkSzOp(ty,Iop_Add8), e_EA, mkSzImm(ty,i)),
+            unop( Iop_32to8,
+                  binop( Iop_Shr32,
+                         mkNarrowTo32( ty, getIReg(rS) ),
+                         mkU8( toUChar(shift) )))
+      );
+      shift -= 8;
+   }
+}
+
+static Bool dis_int_ldst_str ( UInt theInstr, /*OUT*/Bool* stopHere )
+{
+   /* X-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar rD_addr  = ifieldRegDS(theInstr);
+   UChar rS_addr  = rD_addr;
+   UChar rA_addr  = ifieldRegA(theInstr);
+   UChar rB_addr  = ifieldRegB(theInstr);
+   UChar NumBytes = rB_addr;
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+
+   IRType ty      = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp t_EA    = newTemp(ty);
+   IRTemp t_nbytes = IRTemp_INVALID;
+
+   *stopHere = False;
+
+   if (opc1 != 0x1F || b0 != 0) {
+      vex_printf("dis_int_ldst_str(ppc)(opc1)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x255: // lswi (Load String Word Immediate, PPC32 p455)
+      /* NB: does not reject the case where RA is in the range of
+         registers to be loaded.  It should. */
+      DIP("lswi r%u,r%u,%d\n", rD_addr, rA_addr, NumBytes);
+      assign( t_EA, ea_rAor0(rA_addr) );
+      if (NumBytes == 8 && !mode64) {
+         /* Special case hack */
+         /* rD = Mem[EA]; (rD+1)%32 = Mem[EA+4] */
+         putIReg( rD_addr,          
+                  load(Ity_I32, mkexpr(t_EA)) );
+         putIReg( (rD_addr+1) % 32, 
+                  load(Ity_I32,
+                       binop(Iop_Add32, mkexpr(t_EA), mkU32(4))) );
+      } else {
+         t_nbytes = newTemp(Ity_I32);
+         assign( t_nbytes, mkU32(NumBytes==0 ? 32 : NumBytes) );
+         generate_lsw_sequence( t_nbytes, t_EA, rD_addr, 32 );
+         *stopHere = True;
+      }
+      return True;
+
+   case 0x215: // lswx (Load String Word Indexed, PPC32 p456)
+      /* NB: does not reject the case where RA is in the range of
+         registers to be loaded.  It should.  Although considering
+         that that can only be detected at run time, it's not easy to
+         do so. */
+      if (rD_addr == rA_addr || rD_addr == rB_addr)
+         return False;
+      if (rD_addr == 0 && rA_addr == 0)
+         return False;
+      DIP("lswx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+      t_nbytes = newTemp(Ity_I32);
+      assign( t_EA, ea_rAor0_idxd(rA_addr,rB_addr) );
+      assign( t_nbytes, unop( Iop_8Uto32, getXER_BC() ) );
+      generate_lsw_sequence( t_nbytes, t_EA, rD_addr, 128 );
+      *stopHere = True;
+      return True;
+
+   case 0x2D5: // stswi (Store String Word Immediate, PPC32 p528)
+      DIP("stswi r%u,r%u,%d\n", rS_addr, rA_addr, NumBytes);
+      assign( t_EA, ea_rAor0(rA_addr) );
+      if (NumBytes == 8 && !mode64) {
+         /* Special case hack */
+         /* Mem[EA] = rD; Mem[EA+4] = (rD+1)%32 */
+         store( mkexpr(t_EA),
+                getIReg(rD_addr) );
+         store( binop(Iop_Add32, mkexpr(t_EA), mkU32(4)),
+                getIReg((rD_addr+1) % 32) );
+      } else {
+         t_nbytes = newTemp(Ity_I32);
+         assign( t_nbytes, mkU32(NumBytes==0 ? 32 : NumBytes) );
+         generate_stsw_sequence( t_nbytes, t_EA, rD_addr, 32 );
+         *stopHere = True;
+      }
+      return True;
+
+   case 0x295: // stswx (Store String Word Indexed, PPC32 p529)
+      DIP("stswx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+      t_nbytes = newTemp(Ity_I32);
+      assign( t_EA, ea_rAor0_idxd(rA_addr,rB_addr) );
+      assign( t_nbytes, unop( Iop_8Uto32, getXER_BC() ) );
+      generate_stsw_sequence( t_nbytes, t_EA, rS_addr, 128 );
+      *stopHere = True;
+      return True;
+
+   default:
+      vex_printf("dis_int_ldst_str(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+
+/* ------------------------------------------------------------------
+   Integer Branch Instructions
+   ------------------------------------------------------------------ */
+
+/*
+  Branch helper function
+  ok = BO[2] | ((CTR[0] != 0) ^ BO[1])
+  Returns an I32 which is 0x00000000 if the ctr condition failed
+  and 0xFFFFFFFF otherwise.
+*/
+static IRExpr* /* :: Ity_I32 */ branch_ctr_ok( UInt BO )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp ok = newTemp(Ity_I32);
+
+   if ((BO >> 2) & 1) {     // independent of ctr
+      assign( ok, mkU32(0xFFFFFFFF) );
+   } else {
+      if ((BO >> 1) & 1) {  // ctr == 0 ?
+         assign( ok, unop( Iop_1Sto32,
+                           binop( mkSzOp(ty, Iop_CmpEQ8),
+                                  getGST( PPC_GST_CTR ),
+                                  mkSzImm(ty,0))) );
+      } else {              // ctr != 0 ?
+         assign( ok, unop( Iop_1Sto32,
+                           binop( mkSzOp(ty, Iop_CmpNE8),
+                                  getGST( PPC_GST_CTR ),
+                                  mkSzImm(ty,0))) );
+      }
+   }
+   return mkexpr(ok);
+}
+
+
+/*
+  Branch helper function cond_ok = BO[4] | (CR[BI] == BO[3])
+  Returns an I32 which is either 0 if the condition failed or 
+  some arbitrary nonzero value otherwise. */
+
+static IRExpr* /* :: Ity_I32 */ branch_cond_ok( UInt BO, UInt BI )
+{
+   Int where;
+   IRTemp res   = newTemp(Ity_I32);
+   IRTemp cr_bi = newTemp(Ity_I32);
+   
+   if ((BO >> 4) & 1) {
+      assign( res, mkU32(1) );
+   } else {
+      // ok = (CR[BI] == BO[3]) Note, the following relies on
+      // getCRbit_anywhere returning a value which
+      // is either zero or has exactly 1 bit set.  
+      assign( cr_bi, getCRbit_anywhere( BI, &where ) );
+
+      if ((BO >> 3) & 1) {
+         /* We can use cr_bi as-is. */
+         assign( res, mkexpr(cr_bi) );
+      } else {
+         /* We have to invert the sense of the information held in
+            cr_bi.  For that we need to know which bit
+            getCRbit_anywhere regards as significant. */
+         assign( res, binop(Iop_Xor32, mkexpr(cr_bi),
+                                       mkU32(1<<where)) );
+      }
+   }
+   return mkexpr(res);
+}
+
+
+/*
+  Integer Branch Instructions
+*/
+static Bool dis_branch ( UInt theInstr, 
+                         const VexAbiInfo* vbi,
+                         /*OUT*/DisResult* dres,
+                         Bool (*resteerOkFn)(void*,Addr),
+                         void* callback_opaque )
+{
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar BO      = ifieldRegDS(theInstr);
+   UChar BI      = ifieldRegA(theInstr);
+   UInt  BD_u16  = ifieldUIMM16(theInstr) & 0xFFFFFFFC; /* mask off */
+   UChar b11to15 = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UInt  LI_u26  = ifieldUIMM26(theInstr) & 0xFFFFFFFC; /* mask off */
+   UChar flag_AA = ifieldBIT1(theInstr);
+   UChar flag_LK = ifieldBIT0(theInstr);
+
+   IRType   ty        = mode64 ? Ity_I64 : Ity_I32;
+   Addr64   tgt       = 0;
+   Int      BD        = extend_s_16to32(BD_u16);
+   IRTemp   do_branch = newTemp(Ity_I32);
+   IRTemp   ctr_ok    = newTemp(Ity_I32);
+   IRTemp   cond_ok   = newTemp(Ity_I32);
+   IRExpr*  e_nia     = mkSzImm(ty, nextInsnAddr());
+   IRConst* c_nia     = mkSzConst(ty, nextInsnAddr());
+   IRTemp   lr_old    = newTemp(ty);
+
+   /* Hack to pass through code that just wants to read the PC */
+   if (theInstr == 0x429F0005) {
+      DIP("bcl 0x%x, 0x%x (a.k.a mr lr,cia+4)\n", BO, BI);
+      putGST( PPC_GST_LR, e_nia );
+      return True;
+   }
+
+   /* The default what-next.  Individual cases can override it. */    
+   dres->whatNext = Dis_StopHere;
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+
+   switch (opc1) {
+   case 0x12: // b     (Branch, PPC32 p360)
+      if (flag_AA) {
+         tgt = mkSzAddr( ty, extend_s_26to64(LI_u26) );
+      } else {
+         tgt = mkSzAddr( ty, guest_CIA_curr_instr +
+                             (Long)extend_s_26to64(LI_u26) );
+      }
+      if (mode64) {
+         DIP("b%s%s 0x%llx\n",
+             flag_LK ? "l" : "", flag_AA ? "a" : "", tgt);
+      } else {
+         DIP("b%s%s 0x%x\n",
+             flag_LK ? "l" : "", flag_AA ? "a" : "", (Addr32)tgt);
+      }
+
+      if (flag_LK) {
+         putGST( PPC_GST_LR, e_nia );
+         if (vbi->guest_ppc_zap_RZ_at_bl
+             && vbi->guest_ppc_zap_RZ_at_bl( (ULong)tgt) ) {
+            IRTemp t_tgt = newTemp(ty);
+            assign(t_tgt, mode64 ? mkU64(tgt) : mkU32(tgt) );
+            make_redzone_AbiHint( vbi, t_tgt,
+                                  "branch-and-link (unconditional call)" );
+         }
+      }
+
+      if (resteerOkFn( callback_opaque, tgt )) {
+         dres->whatNext   = Dis_ResteerU;
+         dres->continueAt = tgt;
+      } else {
+         dres->jk_StopHere = flag_LK ? Ijk_Call : Ijk_Boring; ;
+         putGST( PPC_GST_CIA, mkSzImm(ty, tgt) );
+      }
+      break;
+      
+   case 0x10: // bc    (Branch Conditional, PPC32 p361)
+      DIP("bc%s%s 0x%x, 0x%x, 0x%x\n",
+          flag_LK ? "l" : "", flag_AA ? "a" : "", BO, BI, BD);
+      
+      if (!(BO & 0x4)) {
+         putGST( PPC_GST_CTR,
+                 binop(mkSzOp(ty, Iop_Sub8),
+                       getGST( PPC_GST_CTR ), mkSzImm(ty, 1)) );
+      }
+
+      /* This is a bit subtle.  ctr_ok is either all 0s or all 1s.
+         cond_ok is either zero or nonzero, since that's the cheapest
+         way to compute it.  Anding them together gives a value which
+         is either zero or non zero and so that's what we must test
+         for in the IRStmt_Exit. */
+      assign( ctr_ok,  branch_ctr_ok( BO ) );
+      assign( cond_ok, branch_cond_ok( BO, BI ) );
+      assign( do_branch,
+              binop(Iop_And32, mkexpr(cond_ok), mkexpr(ctr_ok)) );
+
+      if (flag_AA) {
+         tgt = mkSzAddr(ty, extend_s_16to64(BD_u16));
+      } else {
+         tgt = mkSzAddr(ty, guest_CIA_curr_instr +
+                            (Long)extend_s_16to64(BD_u16));
+      }
+      if (flag_LK)
+         putGST( PPC_GST_LR, e_nia );
+      
+      stmt( IRStmt_Exit(
+               binop(Iop_CmpNE32, mkexpr(do_branch), mkU32(0)),
+               flag_LK ? Ijk_Call : Ijk_Boring,
+               mkSzConst(ty, tgt), OFFB_CIA ) );
+
+      dres->jk_StopHere = Ijk_Boring;
+      putGST( PPC_GST_CIA, e_nia );
+      break;
+      
+   case 0x13:
+      /* For bclr and bcctr, it appears that the lowest two bits of
+         b11to15 are a branch hint, and so we only need to ensure it's
+         of the form 000XX. */
+      if ((b11to15 & ~3) != 0) {
+         vex_printf("dis_int_branch(ppc)(0x13,b11to15)(%d)\n", (Int)b11to15);
+         return False;
+      }
+
+      switch (opc2) {
+      case 0x210: // bcctr (Branch Cond. to Count Register, PPC32 p363) 
+         if ((BO & 0x4) == 0) { // "decr and test CTR" option invalid
+            vex_printf("dis_int_branch(ppc)(bcctr,BO)\n");
+            return False;
+         }
+         DIP("bcctr%s 0x%x, 0x%x\n", flag_LK ? "l" : "", BO, BI);
+         
+         assign( cond_ok, branch_cond_ok( BO, BI ) );
+
+         /* FIXME: this is confusing.  lr_old holds the old value
+            of ctr, not lr :-) */
+         assign( lr_old, addr_align( getGST( PPC_GST_CTR ), 4 ));
+
+         if (flag_LK)
+            putGST( PPC_GST_LR, e_nia );
+         
+         stmt( IRStmt_Exit(
+                  binop(Iop_CmpEQ32, mkexpr(cond_ok), mkU32(0)),
+                  Ijk_Boring,
+                  c_nia, OFFB_CIA ));
+
+         if (flag_LK && vbi->guest_ppc_zap_RZ_at_bl) {
+            make_redzone_AbiHint( vbi, lr_old,
+                                  "b-ctr-l (indirect call)" );
+	 }
+
+         dres->jk_StopHere = flag_LK ? Ijk_Call : Ijk_Boring;;
+         putGST( PPC_GST_CIA, mkexpr(lr_old) );
+         break;
+         
+      case 0x010: { // bclr (Branch Cond. to Link Register, PPC32 p365) 
+         Bool vanilla_return = False;
+         if ((BO & 0x14 /* 1z1zz */) == 0x14 && flag_LK == 0) {
+            DIP("blr\n");
+            vanilla_return = True;
+         } else {
+            DIP("bclr%s 0x%x, 0x%x\n", flag_LK ? "l" : "", BO, BI);
+         }
+
+         if (!(BO & 0x4)) {
+            putGST( PPC_GST_CTR,
+                    binop(mkSzOp(ty, Iop_Sub8),
+                          getGST( PPC_GST_CTR ), mkSzImm(ty, 1)) );
+         }
+         
+         /* See comments above for 'bc' about this */
+         assign( ctr_ok,  branch_ctr_ok( BO ) );
+         assign( cond_ok, branch_cond_ok( BO, BI ) );
+         assign( do_branch,
+                 binop(Iop_And32, mkexpr(cond_ok), mkexpr(ctr_ok)) );
+         
+         assign( lr_old, addr_align( getGST( PPC_GST_LR ), 4 ));
+
+         if (flag_LK)
+            putGST( PPC_GST_LR,  e_nia );
+
+         stmt( IRStmt_Exit(
+                  binop(Iop_CmpEQ32, mkexpr(do_branch), mkU32(0)),
+                  Ijk_Boring,
+                  c_nia, OFFB_CIA ));
+
+         if (vanilla_return && vbi->guest_ppc_zap_RZ_at_blr) {
+            make_redzone_AbiHint( vbi, lr_old,
+                                  "branch-to-lr (unconditional return)" );
+         }
+
+         /* blrl is pretty strange; it's like a return that sets the
+            return address of its caller to the insn following this
+            one.  Mark it as a return. */
+         dres->jk_StopHere = Ijk_Ret;  /* was flag_LK ? Ijk_Call : Ijk_Ret; */
+         putGST( PPC_GST_CIA, mkexpr(lr_old) );
+         break;
+      }
+      default:
+         vex_printf("dis_int_branch(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+      
+   default:
+      vex_printf("dis_int_branch(ppc)(opc1)\n");
+      return False;
+   }
+   
+   return True;
+}
+
+
+
+/*
+  Condition Register Logical Instructions
+*/
+static Bool dis_cond_logic ( UInt theInstr )
+{
+   /* XL-Form */
+   UChar opc1      = ifieldOPC(theInstr);
+   UChar crbD_addr = ifieldRegDS(theInstr);
+   UChar crfD_addr = toUChar( IFIELD(theInstr, 23, 3) );
+   UChar crbA_addr = ifieldRegA(theInstr);
+   UChar crfS_addr = toUChar( IFIELD(theInstr, 18, 3) );
+   UChar crbB_addr = ifieldRegB(theInstr);
+   UInt  opc2      = ifieldOPClo10(theInstr);
+   UChar b0        = ifieldBIT0(theInstr);
+
+   IRTemp crbD     = newTemp(Ity_I32);
+   IRTemp crbA     = newTemp(Ity_I32);
+   IRTemp crbB     = newTemp(Ity_I32);
+
+   if (opc1 != 19 || b0 != 0) {
+      vex_printf("dis_cond_logic(ppc)(opc1)\n");
+      return False;
+   }
+
+   if (opc2 == 0) {  // mcrf    (Move Cond Reg Field, PPC32 p464)
+      if (((crbD_addr & 0x3) != 0) ||
+          ((crbA_addr & 0x3) != 0) || (crbB_addr != 0)) {
+         vex_printf("dis_cond_logic(ppc)(crbD|crbA|crbB != 0)\n");
+         return False;
+      }
+      DIP("mcrf cr%u,cr%u\n", crfD_addr, crfS_addr);
+      putCR0(   crfD_addr, getCR0(  crfS_addr) );
+      putCR321( crfD_addr, getCR321(crfS_addr) );
+   } else {
+      assign( crbA, getCRbit(crbA_addr) );
+      if (crbA_addr == crbB_addr)
+         crbB = crbA;
+      else
+         assign( crbB, getCRbit(crbB_addr) );
+
+      switch (opc2) {
+      case 0x101: // crand   (Cond Reg AND, PPC32 p372)
+         DIP("crand crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, binop(Iop_And32, mkexpr(crbA), mkexpr(crbB)) );
+         break;
+      case 0x081: // crandc  (Cond Reg AND w. Complement, PPC32 p373)
+         DIP("crandc crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, binop(Iop_And32, 
+                             mkexpr(crbA),
+                             unop(Iop_Not32, mkexpr(crbB))) );
+         break;
+      case 0x121: // creqv   (Cond Reg Equivalent, PPC32 p374)
+         DIP("creqv crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, unop(Iop_Not32,
+                            binop(Iop_Xor32, mkexpr(crbA), mkexpr(crbB))) );
+         break;
+      case 0x0E1: // crnand  (Cond Reg NAND, PPC32 p375)
+         DIP("crnand crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, unop(Iop_Not32,
+                            binop(Iop_And32, mkexpr(crbA), mkexpr(crbB))) );
+         break;
+      case 0x021: // crnor   (Cond Reg NOR, PPC32 p376)
+         DIP("crnor crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, unop(Iop_Not32,
+                            binop(Iop_Or32, mkexpr(crbA), mkexpr(crbB))) );
+         break;
+      case 0x1C1: // cror    (Cond Reg OR, PPC32 p377)
+         DIP("cror crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, binop(Iop_Or32, mkexpr(crbA), mkexpr(crbB)) );
+         break;
+      case 0x1A1: // crorc   (Cond Reg OR w. Complement, PPC32 p378)
+         DIP("crorc crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, binop(Iop_Or32, 
+                             mkexpr(crbA),
+                             unop(Iop_Not32, mkexpr(crbB))) );
+         break;
+      case 0x0C1: // crxor   (Cond Reg XOR, PPC32 p379)
+         DIP("crxor crb%d,crb%d,crb%d\n", crbD_addr, crbA_addr, crbB_addr);
+         assign( crbD, binop(Iop_Xor32, mkexpr(crbA), mkexpr(crbB)) );
+         break;
+      default:
+         vex_printf("dis_cond_logic(ppc)(opc2)\n");
+         return False;
+      }
+
+      putCRbit( crbD_addr, mkexpr(crbD) );
+   }
+   return True;
+}
+
+
+/* 
+  Trap instructions
+*/
+
+/* Do the code generation for a trap.  Returned Bool is true iff
+   this is an unconditional trap.  If the two arg IRExpr*s are 
+   Ity_I32s then the comparison is 32-bit.  If they are Ity_I64s
+   then they are 64-bit, and we must be disassembling 64-bit
+   instructions. */
+static Bool do_trap ( UChar TO, 
+                      IRExpr* argL0, IRExpr* argR0, Addr64 cia )
+{
+   IRTemp argL, argR;
+   IRExpr *argLe, *argRe, *cond, *tmp;
+
+   Bool    is32bit = typeOfIRExpr(irsb->tyenv, argL0 ) == Ity_I32;
+
+   IROp    opAND     = is32bit ? Iop_And32     : Iop_And64;
+   IROp    opOR      = is32bit ? Iop_Or32      : Iop_Or64;
+   IROp    opCMPORDS = is32bit ? Iop_CmpORD32S : Iop_CmpORD64S;
+   IROp    opCMPORDU = is32bit ? Iop_CmpORD32U : Iop_CmpORD64U;
+   IROp    opCMPNE   = is32bit ? Iop_CmpNE32   : Iop_CmpNE64;
+   IROp    opCMPEQ   = is32bit ? Iop_CmpEQ32   : Iop_CmpEQ64;
+   IRExpr* const0    = is32bit ? mkU32(0)      : mkU64(0);
+   IRExpr* const2    = is32bit ? mkU32(2)      : mkU64(2);
+   IRExpr* const4    = is32bit ? mkU32(4)      : mkU64(4);
+   IRExpr* const8    = is32bit ? mkU32(8)      : mkU64(8);
+
+   const UChar b11100 = 0x1C;
+   const UChar b00111 = 0x07;
+
+   if (is32bit) {
+      vassert( typeOfIRExpr(irsb->tyenv, argL0) == Ity_I32 );
+      vassert( typeOfIRExpr(irsb->tyenv, argR0) == Ity_I32 );
+   } else {
+      vassert( typeOfIRExpr(irsb->tyenv, argL0) == Ity_I64 );
+      vassert( typeOfIRExpr(irsb->tyenv, argR0) == Ity_I64 );
+      vassert( mode64 );
+   }
+
+   if ((TO & b11100) == b11100 || (TO & b00111) == b00111) {
+      /* Unconditional trap.  Just do the exit without 
+         testing the arguments. */
+      stmt( IRStmt_Exit( 
+               binop(opCMPEQ, const0, const0), 
+               Ijk_SigTRAP,
+               mode64 ? IRConst_U64(cia) : IRConst_U32((UInt)cia),
+               OFFB_CIA
+      ));
+      return True; /* unconditional trap */
+   }
+
+   if (is32bit) {
+      argL = newTemp(Ity_I32);
+      argR = newTemp(Ity_I32);
+   } else {
+      argL = newTemp(Ity_I64);
+      argR = newTemp(Ity_I64);
+   }
+
+   assign( argL, argL0 );
+   assign( argR, argR0 );
+
+   argLe = mkexpr(argL);
+   argRe = mkexpr(argR);
+
+   cond = const0;
+   if (TO & 16) { // L <s R
+      tmp = binop(opAND, binop(opCMPORDS, argLe, argRe), const8);
+      cond = binop(opOR, tmp, cond);
+   }
+   if (TO & 8) { // L >s R
+      tmp = binop(opAND, binop(opCMPORDS, argLe, argRe), const4);
+      cond = binop(opOR, tmp, cond);
+   }
+   if (TO & 4) { // L == R
+      tmp = binop(opAND, binop(opCMPORDS, argLe, argRe), const2);
+      cond = binop(opOR, tmp, cond);
+   }
+   if (TO & 2) { // L <u R
+      tmp = binop(opAND, binop(opCMPORDU, argLe, argRe), const8);
+      cond = binop(opOR, tmp, cond);
+   }
+   if (TO & 1) { // L >u R
+      tmp = binop(opAND, binop(opCMPORDU, argLe, argRe), const4);
+      cond = binop(opOR, tmp, cond);
+   }
+   stmt( IRStmt_Exit( 
+            binop(opCMPNE, cond, const0), 
+            Ijk_SigTRAP,
+            mode64 ? IRConst_U64(cia) : IRConst_U32((UInt)cia),
+            OFFB_CIA
+   ));
+   return False; /* not an unconditional trap */
+}
+
+static Bool dis_trapi ( UInt theInstr,
+                        /*OUT*/DisResult* dres )
+{
+   /* D-Form */
+   UChar  opc1    = ifieldOPC(theInstr);
+   UChar  TO      = ifieldRegDS(theInstr);
+   UChar  rA_addr = ifieldRegA(theInstr);
+   UInt   uimm16  = ifieldUIMM16(theInstr);
+   ULong  simm16  = extend_s_16to64(uimm16);
+   Addr64 cia     = guest_CIA_curr_instr;
+   IRType ty      = mode64 ? Ity_I64 : Ity_I32;
+   Bool   uncond  = False;
+
+   switch (opc1) {
+   case 0x03: // twi  (Trap Word Immediate, PPC32 p548)
+      uncond = do_trap( TO, 
+                        mode64 ? unop(Iop_64to32, getIReg(rA_addr)) 
+                               : getIReg(rA_addr),
+                        mkU32( (UInt)simm16 ),
+                        cia );
+      if (TO == 4) {
+         DIP("tweqi r%u,%d\n", (UInt)rA_addr, (Int)simm16);
+      } else {
+         DIP("tw%di r%u,%d\n", (Int)TO, (UInt)rA_addr, (Int)simm16);
+      }
+      break;
+   case 0x02: // tdi
+      if (!mode64)
+         return False;
+      uncond = do_trap( TO, getIReg(rA_addr), mkU64( (ULong)simm16 ), cia );
+      if (TO == 4) {
+         DIP("tdeqi r%u,%d\n", (UInt)rA_addr, (Int)simm16);
+      } else {
+         DIP("td%di r%u,%d\n", (Int)TO, (UInt)rA_addr, (Int)simm16);
+      }
+      break;
+   default:
+      return False;
+   }
+
+   if (uncond) {
+      /* If the trap shows signs of being unconditional, don't
+         continue decoding past it. */
+      putGST( PPC_GST_CIA, mkSzImm( ty, nextInsnAddr() ));
+      dres->jk_StopHere = Ijk_Boring;
+      dres->whatNext    = Dis_StopHere;
+   }
+
+   return True;
+}
+
+static Bool dis_trap ( UInt theInstr,
+                        /*OUT*/DisResult* dres )
+{
+   /* X-Form */
+   UInt   opc2    = ifieldOPClo10(theInstr);
+   UChar  TO      = ifieldRegDS(theInstr);
+   UChar  rA_addr = ifieldRegA(theInstr);
+   UChar  rB_addr = ifieldRegB(theInstr);
+   Addr64 cia     = guest_CIA_curr_instr;
+   IRType ty      = mode64 ? Ity_I64 : Ity_I32;
+   Bool   uncond  = False;
+
+   if (ifieldBIT0(theInstr) != 0)
+      return False;
+
+   switch (opc2) {
+   case 0x004: // tw  (Trap Word, PPC64 p540)
+      uncond = do_trap( TO, 
+                        mode64 ? unop(Iop_64to32, getIReg(rA_addr)) 
+                               : getIReg(rA_addr),
+                        mode64 ? unop(Iop_64to32, getIReg(rB_addr)) 
+                               : getIReg(rB_addr),
+                        cia );
+      if (TO == 4) {
+         DIP("tweq r%u,r%u\n", (UInt)rA_addr, (UInt)rB_addr);
+      } else {
+         DIP("tw%d r%u,r%u\n", (Int)TO, (UInt)rA_addr, (UInt)rB_addr);
+      }
+      break;
+   case 0x044: // td (Trap Doubleword, PPC64 p534)
+      if (!mode64)
+         return False;
+      uncond = do_trap( TO, getIReg(rA_addr), getIReg(rB_addr), cia );
+      if (TO == 4) {
+         DIP("tdeq r%u,r%u\n", (UInt)rA_addr, (UInt)rB_addr);
+      } else {
+         DIP("td%d r%u,r%u\n", (Int)TO, (UInt)rA_addr, (UInt)rB_addr);
+      }
+      break;
+   default:
+      return False;
+   }
+
+   if (uncond) {
+      /* If the trap shows signs of being unconditional, don't
+         continue decoding past it. */
+      putGST( PPC_GST_CIA, mkSzImm( ty, nextInsnAddr() ));
+      dres->jk_StopHere = Ijk_Boring;
+      dres->whatNext    = Dis_StopHere;
+   }
+
+   return True;
+}
+
+
+/*
+  System Linkage Instructions
+*/
+static Bool dis_syslink ( UInt theInstr, 
+                          const VexAbiInfo* abiinfo, DisResult* dres )
+{
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+
+   if (theInstr != 0x44000002) {
+      vex_printf("dis_syslink(ppc)(theInstr)\n");
+      return False;
+   }
+
+   // sc  (System Call, PPC32 p504)
+   DIP("sc\n");
+
+   /* Copy CIA into the IP_AT_SYSCALL pseudo-register, so that on Darwin
+      Valgrind can back the guest up to this instruction if it needs
+      to restart the syscall. */
+   putGST( PPC_GST_IP_AT_SYSCALL, getGST( PPC_GST_CIA ) );
+
+   /* It's important that all ArchRegs carry their up-to-date value
+      at this point.  So we declare an end-of-block here, which
+      forces any TempRegs caching ArchRegs to be flushed. */
+   putGST( PPC_GST_CIA, mkSzImm( ty, nextInsnAddr() ));
+
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = Ijk_Sys_syscall;
+   return True;
+}
+
+
+/*
+  Memory Synchronization Instructions
+
+  Note on Reservations:
+  We rely on the assumption that V will in fact only allow one thread at
+  once to run.  In effect, a thread can make a reservation, but we don't
+  check any stores it does.  Instead, the reservation is cancelled when
+  the scheduler switches to another thread (run_thread_for_a_while()).
+*/
+static Bool dis_memsync ( UInt theInstr )
+{
+   /* X-Form, XL-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UInt  b11to25 = IFIELD(theInstr, 11, 15);
+   UChar flag_L  = ifieldRegDS(theInstr);
+   UInt  b11to20 = IFIELD(theInstr, 11, 10);
+   UChar rD_addr = ifieldRegDS(theInstr);
+   UChar rS_addr = rD_addr;
+   UChar rA_addr = ifieldRegA(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar b0      = ifieldBIT0(theInstr);
+
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA     = newTemp(ty);
+
+   assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+
+   switch (opc1) {
+   /* XL-Form */
+   case 0x13:   // isync (Instruction Synchronize, PPC32 p432)
+      if (opc2 != 0x096) {
+         vex_printf("dis_memsync(ppc)(0x13,opc2)\n");
+         return False;
+      }
+      if (b11to25 != 0 || b0 != 0) {
+         vex_printf("dis_memsync(ppc)(0x13,b11to25|b0)\n");
+         return False;
+      }
+      DIP("isync\n");
+      stmt( IRStmt_MBE(Imbe_Fence) );
+      break;
+
+   /* X-Form */
+   case 0x1F:
+      switch (opc2) {
+      case 0x356: // eieio (Enforce In-Order Exec of I/O, PPC32 p394)
+         if (b11to25 != 0 || b0 != 0) {
+            vex_printf("dis_memsync(ppc)(eiei0,b11to25|b0)\n");
+            return False;
+         }
+         DIP("eieio\n");
+         /* Insert a memory fence, just to be on the safe side. */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         break;
+
+      case 0x014: { // lwarx (Load Word and Reserve Indexed, PPC32 p458)
+         IRTemp res;
+         /* According to the PowerPC ISA version 2.05, b0 (called EH
+            in the documentation) is merely a hint bit to the
+            hardware, I think as to whether or not contention is
+            likely.  So we can just ignore it. */
+         DIP("lwarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 4 );
+
+         // and actually do the load
+         res = newTemp(Ity_I32);
+         stmt( stmt_load(res, mkexpr(EA), NULL/*this is a load*/) );
+
+         putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(res), False) );
+         break;
+      }
+
+      case 0x034: { // lbarx (Load Word and Reserve Indexed)
+         IRTemp res;
+         /* According to the PowerPC ISA version 2.05, b0 (called EH
+            in the documentation) is merely a hint bit to the
+            hardware, I think as to whether or not contention is
+            likely.  So we can just ignore it. */
+         DIP("lbarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+
+         // and actually do the load
+         res = newTemp(Ity_I8);
+         stmt( stmt_load(res, mkexpr(EA), NULL/*this is a load*/) );
+
+         putIReg( rD_addr, mkWidenFrom8(ty, mkexpr(res), False) );
+         break;
+     }
+
+      case 0x074: { // lharx (Load Word and Reserve Indexed)
+         IRTemp res;
+         /* According to the PowerPC ISA version 2.05, b0 (called EH
+            in the documentation) is merely a hint bit to the
+            hardware, I think as to whether or not contention is
+            likely.  So we can just ignore it. */
+         DIP("lharx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 2 );
+
+         // and actually do the load
+         res = newTemp(Ity_I16);
+         stmt( stmt_load(res, mkexpr(EA), NULL/*this is a load*/) );
+
+         putIReg( rD_addr, mkWidenFrom16(ty, mkexpr(res), False) );
+         break;
+      }
+
+      case 0x096: { 
+         // stwcx. (Store Word Conditional Indexed, PPC32 p532)
+         // Note this has to handle stwcx. in both 32- and 64-bit modes,
+         // so isn't quite as straightforward as it might otherwise be.
+         IRTemp rS = newTemp(Ity_I32);
+         IRTemp resSC;
+         if (b0 != 1) {
+            vex_printf("dis_memsync(ppc)(stwcx.,b0)\n");
+            return False;
+         }
+         DIP("stwcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 4 );
+
+         // Get the data to be stored, and narrow to 32 bits if necessary
+         assign( rS, mkNarrowTo32(ty, getIReg(rS_addr)) );
+
+         // Do the store, and get success/failure bit into resSC
+         resSC = newTemp(Ity_I1);
+         stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
+
+         // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]  on failure
+         // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]  on success
+         putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
+         putCR0(0, getXER_SO());
+
+         /* Note:
+            If resaddr != lwarx_resaddr, CR0[EQ] is undefined, and
+            whether rS is stored is dependent on that value. */
+         /* So I guess we can just ignore this case? */
+         break;
+      }
+
+      case 0x2B6: {
+         // stbcx. (Store Byte Conditional Indexed)
+         // Note this has to handle stbcx. in both 32- and 64-bit modes,
+         // so isn't quite as straightforward as it might otherwise be.
+         IRTemp rS = newTemp(Ity_I8);
+         IRTemp resSC;
+         if (b0 != 1) {
+            vex_printf("dis_memsync(ppc)(stbcx.,b0)\n");
+            return False;
+         }
+         DIP("stbcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+
+         // Get the data to be stored, and narrow to 32 bits if necessary
+         assign( rS, mkNarrowTo8(ty, getIReg(rS_addr)) );
+
+         // Do the store, and get success/failure bit into resSC
+         resSC = newTemp(Ity_I1);
+         stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
+
+         // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]  on failure
+         // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]  on success
+         putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
+         putCR0(0, getXER_SO());
+
+         /* Note:
+            If resaddr != lbarx_resaddr, CR0[EQ] is undefined, and
+            whether rS is stored is dependent on that value. */
+         /* So I guess we can just ignore this case? */
+         break;
+      }
+
+      case 0x2D6: {
+         // sthcx. (Store Word Conditional Indexed, PPC32 p532)
+         // Note this has to handle sthcx. in both 32- and 64-bit modes,
+         // so isn't quite as straightforward as it might otherwise be.
+         IRTemp rS = newTemp(Ity_I16);
+         IRTemp resSC;
+         if (b0 != 1) {
+            vex_printf("dis_memsync(ppc)(stwcx.,b0)\n");
+            return False;
+         }
+         DIP("sthcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 2 );
+
+         // Get the data to be stored, and narrow to 16 bits if necessary
+         assign( rS, mkNarrowTo16(ty, getIReg(rS_addr)) );
+
+         // Do the store, and get success/failure bit into resSC
+         resSC = newTemp(Ity_I1);
+         stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
+
+         // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]  on failure
+         // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]  on success
+         putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
+         putCR0(0, getXER_SO());
+
+         /* Note:
+            If resaddr != lharx_resaddr, CR0[EQ] is undefined, and
+            whether rS is stored is dependent on that value. */
+         /* So I guess we can just ignore this case? */
+         break;
+      }
+
+      case 0x256: // sync (Synchronize, PPC32 p543), 
+                  // also lwsync (L==1), ptesync (L==2)
+         /* http://sources.redhat.com/ml/binutils/2000-12/msg00311.html
+
+            The PowerPC architecture used in IBM chips has expanded
+            the sync instruction into two variants: lightweight sync
+            and heavyweight sync.  The original sync instruction is
+            the new heavyweight sync and lightweight sync is a strict
+            subset of the heavyweight sync functionality. This allows
+            the programmer to specify a less expensive operation on
+            high-end systems when the full sync functionality is not
+            necessary.
+
+            The basic "sync" mnemonic now utilizes an operand. "sync"
+            without an operand now becomes a extended mnemonic for
+            heavyweight sync.  Processors without the lwsync
+            instruction will not decode the L field and will perform a
+            heavyweight sync.  Everything is backward compatible.
+
+            sync    =       sync 0
+            lwsync  =       sync 1
+            ptesync =       sync 2    *** TODO - not implemented ***
+         */
+         if (b11to20 != 0 || b0 != 0) {
+            vex_printf("dis_memsync(ppc)(sync/lwsync,b11to20|b0)\n");
+            return False;
+         }
+         if (flag_L != 0/*sync*/ && flag_L != 1/*lwsync*/) {
+            vex_printf("dis_memsync(ppc)(sync/lwsync,flag_L)\n");
+            return False;
+         }
+         DIP("%ssync\n", flag_L == 1 ? "lw" : "");
+         /* Insert a memory fence.  It's sometimes important that these
+            are carried through to the generated code. */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         break;
+
+      /* 64bit Memsync */
+      case 0x054: { // ldarx (Load DWord and Reserve Indexed, PPC64 p473)
+         IRTemp res;
+         /* According to the PowerPC ISA version 2.05, b0 (called EH
+            in the documentation) is merely a hint bit to the
+            hardware, I think as to whether or not contention is
+            likely.  So we can just ignore it. */
+         if (!mode64)
+            return False;
+         DIP("ldarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 8 );
+
+         // and actually do the load
+         res = newTemp(Ity_I64);
+         stmt( stmt_load( res, mkexpr(EA), NULL/*this is a load*/) );
+
+         putIReg( rD_addr, mkexpr(res) );
+         break;
+      }
+      
+      case 0x0D6: { // stdcx. (Store DWord Condition Indexd, PPC64 p581)
+         // A marginally simplified version of the stwcx. case
+         IRTemp rS = newTemp(Ity_I64);
+         IRTemp resSC;
+         if (b0 != 1) {
+            vex_printf("dis_memsync(ppc)(stdcx.,b0)\n");
+            return False;
+         }
+         if (!mode64)
+            return False;
+         DIP("stdcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 8 );
+
+         // Get the data to be stored
+         assign( rS, getIReg(rS_addr) );
+
+         // Do the store, and get success/failure bit into resSC
+         resSC = newTemp(Ity_I1);
+         stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS)) );
+
+         // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]  on failure
+         // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]  on success
+         putCR321(0, binop(Iop_Shl8, unop(Iop_1Uto8, mkexpr(resSC)), mkU8(1)));
+         putCR0(0, getXER_SO());
+
+         /* Note:
+            If resaddr != lwarx_resaddr, CR0[EQ] is undefined, and
+            whether rS is stored is dependent on that value. */
+         /* So I guess we can just ignore this case? */
+         break;
+      }
+
+      /* 128bit Memsync */
+      case 0x114: { // lqarx (Load QuadWord and Reserve Indexed)
+         IRTemp res_hi = newTemp(ty);
+         IRTemp res_lo = newTemp(ty);
+
+         /* According to the PowerPC ISA version 2.07, b0 (called EH
+            in the documentation) is merely a hint bit to the
+            hardware, I think as to whether or not contention is
+            likely.  So we can just ignore it. */
+         DIP("lqarx r%u,r%u,r%u,EH=%u\n", rD_addr, rA_addr, rB_addr, (UInt)b0);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 16 );
+
+         // and actually do the load
+         if (mode64) {
+            if (host_endness == VexEndnessBE) {
+               stmt( stmt_load( res_hi,
+                                mkexpr(EA), NULL/*this is a load*/) );
+               stmt( stmt_load( res_lo,
+                                binop(Iop_Add64, mkexpr(EA), mkU64(8) ),
+                                NULL/*this is a load*/) );
+	    } else {
+               stmt( stmt_load( res_lo,
+                                mkexpr(EA), NULL/*this is a load*/) );
+               stmt( stmt_load( res_hi,
+                                binop(Iop_Add64, mkexpr(EA), mkU64(8) ),
+                                NULL/*this is a load*/) );
+            }
+         } else {
+            stmt( stmt_load( res_hi,
+                             binop( Iop_Add32, mkexpr(EA), mkU32(4) ),
+                             NULL/*this is a load*/) );
+            stmt( stmt_load( res_lo,
+                             binop( Iop_Add32, mkexpr(EA), mkU32(12) ),
+                             NULL/*this is a load*/) );
+         }
+         putIReg( rD_addr,   mkexpr(res_hi) );
+         putIReg( rD_addr+1, mkexpr(res_lo) );
+         break;
+      }
+
+      case 0x0B6: { // stqcx. (Store QuadWord Condition Indexd, PPC64)
+         // A marginally simplified version of the stwcx. case
+         IRTemp rS_hi = newTemp(ty);
+         IRTemp rS_lo = newTemp(ty);
+         IRTemp resSC;
+         if (b0 != 1) {
+            vex_printf("dis_memsync(ppc)(stqcx.,b0)\n");
+            return False;
+         }
+
+         DIP("stqcx. r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+
+         // trap if misaligned
+         gen_SIGBUS_if_misaligned( EA, 16 );
+         // Get the data to be stored
+         assign( rS_hi, getIReg(rS_addr) );
+         assign( rS_lo, getIReg(rS_addr+1) );
+
+         // Do the store, and get success/failure bit into resSC
+         resSC = newTemp(Ity_I1);
+
+         if (mode64) {
+            if (host_endness == VexEndnessBE) {
+               stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS_hi) ) );
+               store( binop( Iop_Add64, mkexpr(EA), mkU64(8) ),
+                      mkexpr(rS_lo) );
+	    } else {
+               stmt( stmt_load( resSC, mkexpr(EA), mkexpr(rS_lo) ) );
+               store( binop( Iop_Add64, mkexpr(EA), mkU64(8) ),
+                      mkexpr(rS_hi) );
+	    }
+         } else {
+            stmt( stmt_load( resSC, binop( Iop_Add32,
+                                           mkexpr(EA),
+                                           mkU32(4) ),
+                                           mkexpr(rS_hi) ) );
+            store( binop(Iop_Add32, mkexpr(EA), mkU32(12) ), mkexpr(rS_lo) );
+         }
+
+         // Set CR0[LT GT EQ S0] = 0b000 || XER[SO]  on failure
+         // Set CR0[LT GT EQ S0] = 0b001 || XER[SO]  on success
+         putCR321(0, binop( Iop_Shl8,
+                            unop(Iop_1Uto8, mkexpr(resSC) ),
+                            mkU8(1)));
+         putCR0(0, getXER_SO());
+         break;
+      }
+
+      default:
+         vex_printf("dis_memsync(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+
+   default:
+      vex_printf("dis_memsync(ppc)(opc1)\n");
+      return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Integer Shift Instructions
+*/
+static Bool dis_int_shift ( UInt theInstr )
+{
+   /* X-Form, XS-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar rS_addr = ifieldRegDS(theInstr);
+   UChar rA_addr = ifieldRegA(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UChar sh_imm  = rB_addr;
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar b1      = ifieldBIT1(theInstr);
+   UChar flag_rC = ifieldBIT0(theInstr);
+
+   IRType  ty         = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp  rA         = newTemp(ty);
+   IRTemp  rS         = newTemp(ty);
+   IRTemp  rB         = newTemp(ty);
+   IRTemp  outofrange = newTemp(Ity_I1);
+   IRTemp  rS_lo32    = newTemp(Ity_I32);
+   IRTemp  rB_lo32    = newTemp(Ity_I32);
+   IRExpr* e_tmp;
+
+   assign( rS, getIReg(rS_addr) );
+   assign( rB, getIReg(rB_addr) );
+   assign( rS_lo32, mkNarrowTo32(ty, mkexpr(rS)) );
+   assign( rB_lo32, mkNarrowTo32(ty, mkexpr(rB)) );
+   
+   if (opc1 == 0x1F) {
+      switch (opc2) {
+      case 0x018: { // slw (Shift Left Word, PPC32 p505)
+         DIP("slw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
+             rA_addr, rS_addr, rB_addr);
+         /* rA = rS << rB */
+         /* ppc32 semantics are: 
+            slw(x,y) = (x << (y & 31))         -- primary result
+                       & ~((y << 26) >>s 31)   -- make result 0 
+                                                  for y in 32 .. 63
+         */
+         e_tmp =
+            binop( Iop_And32,
+               binop( Iop_Shl32,
+                      mkexpr(rS_lo32), 
+                      unop( Iop_32to8,
+                            binop(Iop_And32,
+                                  mkexpr(rB_lo32), mkU32(31)))),
+               unop( Iop_Not32,
+                     binop( Iop_Sar32,
+                            binop(Iop_Shl32, mkexpr(rB_lo32), mkU8(26)),
+                            mkU8(31))) );
+         assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */False) );
+         break;
+      }
+
+      case 0x318: { // sraw (Shift Right Alg Word, PPC32 p506)
+         IRTemp sh_amt = newTemp(Ity_I32);
+         DIP("sraw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
+             rA_addr, rS_addr, rB_addr);
+         /* JRS: my reading of the (poorly worded) PPC32 doc p506 is:
+            amt = rB & 63
+            rA = Sar32( rS, amt > 31 ? 31 : amt )
+            XER.CA = amt > 31 ? sign-of-rS : (computation as per srawi)
+         */
+         assign( sh_amt, binop(Iop_And32, mkU32(0x3F),
+                                          mkexpr(rB_lo32)) );
+         assign( outofrange,
+                 binop(Iop_CmpLT32U, mkU32(31), mkexpr(sh_amt)) );
+         e_tmp = binop( Iop_Sar32, 
+                        mkexpr(rS_lo32), 
+                        unop( Iop_32to8, 
+                              IRExpr_ITE( mkexpr(outofrange), 
+                                          mkU32(31),
+                                          mkexpr(sh_amt)) ) );
+         assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */True) );
+
+         set_XER_CA( ty, PPCG_FLAG_OP_SRAW,
+                     mkexpr(rA),
+                     mkWidenFrom32(ty, mkexpr(rS_lo32), True),
+                     mkWidenFrom32(ty, mkexpr(sh_amt), True ),
+                     mkWidenFrom32(ty, getXER_CA32(), True) );
+         break;
+      }
+         
+      case 0x338: // srawi (Shift Right Alg Word Immediate, PPC32 p507)
+         DIP("srawi%s r%u,r%u,%d\n", flag_rC ? ".":"",
+             rA_addr, rS_addr, sh_imm);
+         vassert(sh_imm < 32);
+         if (mode64) {
+            assign( rA, binop(Iop_Sar64,
+                              binop(Iop_Shl64, getIReg(rS_addr),
+                                               mkU8(32)),
+                              mkU8(32 + sh_imm)) );
+         } else {
+            assign( rA, binop(Iop_Sar32, mkexpr(rS_lo32),
+                                         mkU8(sh_imm)) );
+         }
+
+         set_XER_CA( ty, PPCG_FLAG_OP_SRAWI, 
+                     mkexpr(rA),
+                     mkWidenFrom32(ty, mkexpr(rS_lo32), /* Syned */True),
+                     mkSzImm(ty, sh_imm),
+                     mkWidenFrom32(ty, getXER_CA32(), /* Syned */False) );
+         break;
+      
+      case 0x218: // srw (Shift Right Word, PPC32 p508)
+         DIP("srw%s r%u,r%u,r%u\n", flag_rC ? ".":"",
+             rA_addr, rS_addr, rB_addr);
+         /* rA = rS >>u rB */
+         /* ppc32 semantics are: 
+            srw(x,y) = (x >>u (y & 31))        -- primary result
+                       & ~((y << 26) >>s 31)   -- make result 0 
+                                                  for y in 32 .. 63
+         */
+         e_tmp = 
+            binop(
+               Iop_And32,
+               binop( Iop_Shr32, 
+                      mkexpr(rS_lo32), 
+                      unop( Iop_32to8, 
+                            binop(Iop_And32, mkexpr(rB_lo32),
+                                             mkU32(31)))),
+               unop( Iop_Not32, 
+                     binop( Iop_Sar32, 
+                            binop(Iop_Shl32, mkexpr(rB_lo32),
+                                             mkU8(26)), 
+                            mkU8(31))));
+         assign( rA, mkWidenFrom32(ty, e_tmp, /* Signed */False) );
+         break;
+
+
+      /* 64bit Shifts */
+      case 0x01B: // sld (Shift Left DWord, PPC64 p568)
+         DIP("sld%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         /* rA = rS << rB */
+         /* ppc64 semantics are: 
+            slw(x,y) = (x << (y & 63))         -- primary result
+                       & ~((y << 57) >>s 63)   -- make result 0 
+                                                  for y in 64 .. 
+         */
+         assign( rA,
+            binop(
+               Iop_And64,
+               binop( Iop_Shl64,
+                      mkexpr(rS), 
+                      unop( Iop_64to8, 
+                            binop(Iop_And64, mkexpr(rB), mkU64(63)))),
+               unop( Iop_Not64,
+                     binop( Iop_Sar64,
+                            binop(Iop_Shl64, mkexpr(rB), mkU8(57)), 
+                            mkU8(63)))) );
+         break;
+      
+      case 0x31A: { // srad (Shift Right Alg DWord, PPC64 p570)
+         IRTemp sh_amt = newTemp(Ity_I64);
+         DIP("srad%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         /* amt = rB & 127
+            rA = Sar64( rS, amt > 63 ? 63 : amt )
+            XER.CA = amt > 63 ? sign-of-rS : (computation as per srawi)
+         */
+         assign( sh_amt, binop(Iop_And64, mkU64(0x7F), mkexpr(rB)) );
+         assign( outofrange,
+                 binop(Iop_CmpLT64U, mkU64(63), mkexpr(sh_amt)) );
+         assign( rA,
+                 binop( Iop_Sar64, 
+                        mkexpr(rS), 
+                        unop( Iop_64to8, 
+                              IRExpr_ITE( mkexpr(outofrange), 
+                                          mkU64(63),
+                                          mkexpr(sh_amt)) ))
+               );
+         set_XER_CA( ty, PPCG_FLAG_OP_SRAD,
+                     mkexpr(rA), mkexpr(rS), mkexpr(sh_amt),
+                     mkWidenFrom32(ty, getXER_CA32(), /* Syned */False) );
+         break;
+      }
+
+      case 0x33A: case 0x33B: // sradi (Shr Alg DWord Imm, PPC64 p571)
+         sh_imm |= b1<<5;
+         vassert(sh_imm < 64);
+         DIP("sradi%s r%u,r%u,%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, sh_imm);
+         assign( rA, binop(Iop_Sar64, getIReg(rS_addr), mkU8(sh_imm)) );
+
+         set_XER_CA( ty, PPCG_FLAG_OP_SRADI, 
+                     mkexpr(rA),
+                     getIReg(rS_addr),
+                     mkU64(sh_imm), 
+                     mkWidenFrom32(ty, getXER_CA32(), /* Syned */False) );
+         break;
+
+      case 0x21B: // srd (Shift Right DWord, PPC64 p574)
+         DIP("srd%s r%u,r%u,r%u\n",
+             flag_rC ? ".":"", rA_addr, rS_addr, rB_addr);
+         /* rA = rS >>u rB */
+         /* ppc semantics are: 
+            srw(x,y) = (x >>u (y & 63))        -- primary result
+                       & ~((y << 57) >>s 63)   -- make result 0 
+                                                  for y in 64 .. 127
+         */
+         assign( rA,
+            binop(
+               Iop_And64,
+               binop( Iop_Shr64, 
+                      mkexpr(rS), 
+                      unop( Iop_64to8, 
+                            binop(Iop_And64, mkexpr(rB), mkU64(63)))),
+               unop( Iop_Not64, 
+                     binop( Iop_Sar64, 
+                            binop(Iop_Shl64, mkexpr(rB), mkU8(57)), 
+                            mkU8(63)))) );
+         break;
+     
+      default:
+         vex_printf("dis_int_shift(ppc)(opc2)\n");
+         return False;
+      }
+   } else {
+      vex_printf("dis_int_shift(ppc)(opc1)\n");
+      return False;
+   }
+
+   putIReg( rA_addr, mkexpr(rA) );
+   
+   if (flag_rC) {
+      set_CR0( mkexpr(rA) );
+   }
+   return True;
+}
+
+
+
+/*
+  Integer Load/Store Reverse Instructions
+*/
+/* Generates code to swap the byte order in an Ity_I32. */
+static IRExpr* /* :: Ity_I32 */ gen_byterev32 ( IRTemp t )
+{
+   vassert(typeOfIRTemp(irsb->tyenv, t) == Ity_I32);
+   return
+      binop(Iop_Or32,
+         binop(Iop_Shl32, mkexpr(t), mkU8(24)),
+      binop(Iop_Or32,
+         binop(Iop_And32, binop(Iop_Shl32, mkexpr(t), mkU8(8)), 
+                          mkU32(0x00FF0000)),
+      binop(Iop_Or32,
+         binop(Iop_And32, binop(Iop_Shr32, mkexpr(t), mkU8(8)),
+                          mkU32(0x0000FF00)),
+         binop(Iop_And32, binop(Iop_Shr32, mkexpr(t), mkU8(24)),
+                          mkU32(0x000000FF) )
+      )));
+}
+
+/* Generates code to swap the byte order in the lower half of an Ity_I32,
+   and zeroes the upper half. */
+static IRExpr* /* :: Ity_I32 */ gen_byterev16 ( IRTemp t )
+{
+   vassert(typeOfIRTemp(irsb->tyenv, t) == Ity_I32);
+   return
+      binop(Iop_Or32,
+         binop(Iop_And32, binop(Iop_Shl32, mkexpr(t), mkU8(8)),
+                          mkU32(0x0000FF00)),
+         binop(Iop_And32, binop(Iop_Shr32, mkexpr(t), mkU8(8)),
+                          mkU32(0x000000FF))
+      );
+}
+
+static Bool dis_int_ldst_rev ( UInt theInstr )
+{
+   /* X-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar rD_addr = ifieldRegDS(theInstr);
+   UChar rS_addr = rD_addr;
+   UChar rA_addr = ifieldRegA(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar b0      = ifieldBIT0(theInstr);
+
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA = newTemp(ty);
+   IRTemp w1 = newTemp(Ity_I32);
+   IRTemp w2 = newTemp(Ity_I32);
+
+   if (opc1 != 0x1F || b0 != 0) {
+      vex_printf("dis_int_ldst_rev(ppc)(opc1|b0)\n");
+      return False;
+   }
+
+   assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+   
+   switch (opc2) {
+
+      case 0x316: // lhbrx (Load Halfword Byte-Reverse Indexed, PPC32 p449)
+         DIP("lhbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         assign( w1, unop(Iop_16Uto32, load(Ity_I16, mkexpr(EA))) );
+         assign( w2, gen_byterev16(w1) );
+         putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(w2),
+                                         /* Signed */False) );
+         break;
+
+      case 0x216: // lwbrx (Load Word Byte-Reverse Indexed, PPC32 p459)
+         DIP("lwbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         assign( w1, load(Ity_I32, mkexpr(EA)) );
+         assign( w2, gen_byterev32(w1) );
+         putIReg( rD_addr, mkWidenFrom32(ty, mkexpr(w2),
+                                         /* Signed */False) );
+         break;
+
+      case 0x214: // ldbrx (Load Doubleword Byte-Reverse Indexed)
+      {
+         IRExpr * nextAddr;
+         IRTemp w3 = newTemp( Ity_I32 );
+         IRTemp w4 = newTemp( Ity_I32 );
+         DIP("ldbrx r%u,r%u,r%u\n", rD_addr, rA_addr, rB_addr);
+         assign( w1, load( Ity_I32, mkexpr( EA ) ) );
+         assign( w2, gen_byterev32( w1 ) );
+         nextAddr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
+                           ty == Ity_I64 ? mkU64( 4 ) : mkU32( 4 ) );
+         assign( w3, load( Ity_I32, nextAddr ) );
+         assign( w4, gen_byterev32( w3 ) );
+         if (host_endness == VexEndnessLE)
+            putIReg( rD_addr, binop( Iop_32HLto64, mkexpr( w2 ), mkexpr( w4 ) ) );
+         else
+            putIReg( rD_addr, binop( Iop_32HLto64, mkexpr( w4 ), mkexpr( w2 ) ) );
+         break;
+      }
+
+      case 0x396: // sthbrx (Store Half Word Byte-Reverse Indexed, PPC32 p523)
+         DIP("sthbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         assign( w1, mkNarrowTo32(ty, getIReg(rS_addr)) );
+         store( mkexpr(EA), unop(Iop_32to16, gen_byterev16(w1)) );
+         break;
+      
+      case 0x296: // stwbrx (Store Word Byte-Reverse Indxd, PPC32 p531)
+         DIP("stwbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         assign( w1, mkNarrowTo32(ty, getIReg(rS_addr)) );
+         store( mkexpr(EA), gen_byterev32(w1) );
+         break;
+
+      case 0x294: // stdbrx (Store Doubleword Byte-Reverse Indexed)
+      {
+         IRTemp lo = newTemp(Ity_I32);
+         IRTemp hi = newTemp(Ity_I32);
+         IRTemp rS = newTemp(Ity_I64);
+         assign( rS, getIReg( rS_addr ) );
+         DIP("stdbrx r%u,r%u,r%u\n", rS_addr, rA_addr, rB_addr);
+         assign(lo, unop(Iop_64HIto32, mkexpr(rS)));
+         assign(hi, unop(Iop_64to32, mkexpr(rS)));
+         store( mkexpr( EA ),
+                binop( Iop_32HLto64, gen_byterev32( hi ),
+                       gen_byterev32( lo ) ) );
+         break;
+      }
+
+      default:
+         vex_printf("dis_int_ldst_rev(ppc)(opc2)\n");
+         return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Processor Control Instructions
+*/
+static Bool dis_proc_ctl ( const VexAbiInfo* vbi, UInt theInstr )
+{
+   UChar opc1     = ifieldOPC(theInstr);
+   
+   /* X-Form */
+   UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+   UChar b21to22  = toUChar( IFIELD( theInstr, 21, 2 ) );
+   UChar rD_addr  = ifieldRegDS(theInstr);
+   UInt  b11to20  = IFIELD( theInstr, 11, 10 );
+
+   /* XFX-Form */
+   UChar rS_addr  = rD_addr;
+   UInt  SPR      = b11to20;
+   UInt  TBR      = b11to20;
+   UChar b20      = toUChar( IFIELD( theInstr, 20, 1 ) );
+   UInt  CRM      = IFIELD( theInstr, 12, 8 );
+   UChar b11      = toUChar( IFIELD( theInstr, 11, 1 ) );
+
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp rS = newTemp(ty);
+   assign( rS, getIReg(rS_addr) );
+
+   /* Reorder SPR field as per PPC32 p470 */
+   SPR = ((SPR & 0x1F) << 5) | ((SPR >> 5) & 0x1F);
+   /* Reorder TBR field as per PPC32 p475 */
+   TBR = ((TBR & 31) << 5) | ((TBR >> 5) & 31);
+   
+   /* b0 = 0, inst is treated as floating point inst for reservation purposes
+    * b0 = 1, inst is treated as vector inst for reservation purposes
+    */
+   if (opc1 != 0x1F) {
+      vex_printf("dis_proc_ctl(ppc)(opc1|b%d)\n", b0);
+      return False;
+   }
+   
+   switch (opc2) {
+   /* X-Form */
+   case 0x200: { // mcrxr (Move to Cond Register from XER, PPC32 p466)
+      if (b21to22 != 0 || b11to20 != 0) {
+         vex_printf("dis_proc_ctl(ppc)(mcrxr,b21to22|b11to20)\n");
+         return False;
+      }
+      DIP("mcrxr crf%d\n", crfD);
+      /* Move XER[0-3] (the top 4 bits of XER) to CR[crfD] */
+      putGST_field( PPC_GST_CR,
+                    getGST_field( PPC_GST_XER, 7 ),
+                    crfD );
+
+      // Clear XER[0-3]
+      putXER_SO( mkU8(0) );
+      putXER_OV( mkU8(0) );
+      putXER_CA( mkU8(0) );
+      break;
+   }
+      
+   case 0x013: 
+      // b11to20==0:      mfcr (Move from Cond Register, PPC32 p467)
+      // b20==1 & b11==0: mfocrf (Move from One CR Field)
+      // However it seems that the 'mfcr' behaviour is an acceptable
+      // implementation of mfocr (from the 2.02 arch spec)
+      if (b11to20 == 0) {
+         DIP("mfcr r%u\n", rD_addr);
+         putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_CR ),
+                                         /* Signed */False) );
+         break;
+      }
+      if (b20 == 1 && b11 == 0) {
+         DIP("mfocrf r%u,%u\n", rD_addr, CRM);
+         putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_CR ),
+                                         /* Signed */False) );
+         break;
+      }
+      /* not decodable */
+      return False;
+
+   /* XFX-Form */
+   case 0x153: // mfspr (Move from Special-Purpose Register, PPC32 p470)
+      
+      switch (SPR) {  // Choose a register...
+      case 0x1:
+         DIP("mfxer r%u\n", rD_addr);
+         putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_XER ),
+                                         /* Signed */False) );
+         break;
+      case 0x8:
+         DIP("mflr r%u\n", rD_addr);
+         putIReg( rD_addr, getGST( PPC_GST_LR ) ); 
+         break;
+      case 0x9:
+         DIP("mfctr r%u\n", rD_addr);
+         putIReg( rD_addr, getGST( PPC_GST_CTR ) ); 
+         break;
+      case 0x80:  // 128
+         DIP("mfspr r%u (TFHAR)\n", rD_addr);
+         putIReg( rD_addr, getGST( PPC_GST_TFHAR) );
+         break;
+      case 0x81:  // 129
+         DIP("mfspr r%u (TFIAR)\n", rD_addr);
+         putIReg( rD_addr, getGST( PPC_GST_TFIAR) );
+         break;
+      case 0x82:  // 130
+         DIP("mfspr r%u (TEXASR)\n", rD_addr);
+         putIReg( rD_addr, getGST( PPC_GST_TEXASR) );
+         break;
+      case 0x83:  // 131
+         DIP("mfspr r%u (TEXASRU)\n", rD_addr);
+         putIReg( rD_addr, getGST( PPC_GST_TEXASRU) );
+         break;
+      case 0x100: 
+         DIP("mfvrsave r%u\n", rD_addr);
+         putIReg( rD_addr, mkWidenFrom32(ty, getGST( PPC_GST_VRSAVE ),
+                                         /* Signed */False) );
+         break;
+
+      case 0x103:
+         DIP("mfspr r%u, SPRG3(readonly)\n", rD_addr);
+         putIReg( rD_addr, getGST( PPC_GST_SPRG3_RO ) );
+         break;
+
+      /* Even a lowly PPC7400 can run the associated helper, so no
+         obvious need for feature testing at this point. */
+      case 268 /* 0x10C */:
+      case 269 /* 0x10D */: {
+         UInt     arg  = SPR==268 ? 0 : 1;
+         IRTemp   val  = newTemp(Ity_I32);
+         IRExpr** args = mkIRExprVec_1( mkU32(arg) );
+         IRDirty* d    = unsafeIRDirty_1_N(
+                            val,
+                            0/*regparms*/,
+                            "ppc32g_dirtyhelper_MFSPR_268_269",
+                            fnptr_to_fnentry
+                               (vbi, &ppc32g_dirtyhelper_MFSPR_268_269),
+                            args
+                         );
+         /* execute the dirty call, dumping the result in val. */
+         stmt( IRStmt_Dirty(d) );
+         putIReg( rD_addr,
+                  mkWidenFrom32(ty, mkexpr(val), False/*unsigned*/) );
+         DIP("mfspr r%u,%u", rD_addr, (UInt)SPR);
+         break;
+      }
+
+      /* Again, runs natively on PPC7400 (7447, really).  Not
+         bothering with a feature test. */
+      case 287: /* 0x11F */ {
+         IRTemp   val  = newTemp(Ity_I32);
+         IRExpr** args = mkIRExprVec_0();
+         IRDirty* d    = unsafeIRDirty_1_N(
+                            val,
+                            0/*regparms*/,
+                            "ppc32g_dirtyhelper_MFSPR_287",
+                            fnptr_to_fnentry
+                               (vbi, &ppc32g_dirtyhelper_MFSPR_287),
+                            args
+                         );
+         /* execute the dirty call, dumping the result in val. */
+         stmt( IRStmt_Dirty(d) );
+         putIReg( rD_addr,
+                  mkWidenFrom32(ty, mkexpr(val), False/*unsigned*/) );
+         DIP("mfspr r%u,%u", rD_addr, (UInt)SPR);
+         break;
+      }
+
+      default:
+         vex_printf("dis_proc_ctl(ppc)(mfspr,SPR)(0x%x)\n", SPR);
+         return False;
+      }
+      break;
+      
+   case 0x173: { // mftb (Move from Time Base, PPC32 p475)
+      IRTemp   val  = newTemp(Ity_I64);
+      IRExpr** args = mkIRExprVec_0();
+      IRDirty* d    = unsafeIRDirty_1_N(
+                              val, 
+                              0/*regparms*/, 
+                              "ppcg_dirtyhelper_MFTB", 
+                              fnptr_to_fnentry(vbi, &ppcg_dirtyhelper_MFTB), 
+                              args );
+      /* execute the dirty call, dumping the result in val. */
+      stmt( IRStmt_Dirty(d) );
+
+      switch (TBR) {
+      case 269: 
+         DIP("mftbu r%u", rD_addr);
+         putIReg( rD_addr,
+                  mkWidenFrom32(ty, unop(Iop_64HIto32, mkexpr(val)),
+                                /* Signed */False) );
+         break;
+      case 268: 
+         DIP("mftb r%u", rD_addr);
+         putIReg( rD_addr, (mode64) ? mkexpr(val) :
+                                      unop(Iop_64to32, mkexpr(val)) );
+         break;
+      default:
+         return False; /* illegal instruction */
+      }
+      break;
+   }
+
+   case 0x090: { 
+      // b20==0: mtcrf (Move to Cond Register Fields, PPC32 p477)
+      // b20==1: mtocrf (Move to One Cond Reg Field)
+      Int   cr;
+      UChar shft;
+      if (b11 != 0)
+         return False;
+      if (b20 == 1) {
+         /* ppc64 v2.02 spec says mtocrf gives undefined outcome if >
+            1 field is written.  It seems more robust to decline to
+            decode the insn if so. */
+         switch (CRM) {
+            case 0x01: case 0x02: case 0x04: case 0x08:
+            case 0x10: case 0x20: case 0x40: case 0x80:
+               break;
+            default: 
+               return False; 
+         }
+      }
+      DIP("%s 0x%x,r%u\n", b20==1 ? "mtocrf" : "mtcrf", 
+                           CRM, rS_addr);
+      /* Write to each field specified by CRM */
+      for (cr = 0; cr < 8; cr++) {
+         if ((CRM & (1 << (7-cr))) == 0)
+            continue;
+         shft = 4*(7-cr);
+         putGST_field( PPC_GST_CR,
+                       binop(Iop_Shr32,
+                             mkNarrowTo32(ty, mkexpr(rS)),
+                             mkU8(shft)), cr );
+      }
+      break;
+   }
+
+   case 0x1D3: // mtspr (Move to Special-Purpose Register, PPC32 p483)
+      
+      switch (SPR) {  // Choose a register...
+      case 0x1:
+         DIP("mtxer r%u\n", rS_addr);
+         putGST( PPC_GST_XER, mkNarrowTo32(ty, mkexpr(rS)) );
+         break;
+      case 0x8:
+         DIP("mtlr r%u\n", rS_addr);
+         putGST( PPC_GST_LR, mkexpr(rS) ); 
+         break;
+      case 0x9:
+         DIP("mtctr r%u\n", rS_addr);
+         putGST( PPC_GST_CTR, mkexpr(rS) ); 
+         break;
+      case 0x100:
+         DIP("mtvrsave r%u\n", rS_addr);
+         putGST( PPC_GST_VRSAVE, mkNarrowTo32(ty, mkexpr(rS)) );
+         break;
+      case 0x80:  // 128
+         DIP("mtspr r%u (TFHAR)\n", rS_addr);
+         putGST( PPC_GST_TFHAR, mkexpr(rS) );
+         break;
+      case 0x81:  // 129
+         DIP("mtspr r%u (TFIAR)\n", rS_addr);
+         putGST( PPC_GST_TFIAR, mkexpr(rS) );
+         break;
+      case 0x82:  // 130
+         DIP("mtspr r%u (TEXASR)\n", rS_addr);
+         putGST( PPC_GST_TEXASR, mkexpr(rS) );
+         break;
+      default:
+         vex_printf("dis_proc_ctl(ppc)(mtspr,SPR)(%u)\n", SPR);
+         return False;
+      }
+      break;
+
+   case 0x33:                // mfvsrd
+   {
+      UChar XS = ifieldRegXS( theInstr );
+      UChar rA_addr = ifieldRegA(theInstr);
+      IRExpr * high64;
+      IRTemp vS = newTemp( Ity_V128 );
+      DIP("mfvsrd r%u,vsr%d\n", rA_addr, (UInt)XS);
+
+      /*  XS = SX || S
+       *  For SX=0, mfvsrd is treated as a Floating-Point
+       *            instruction in terms of resource availability.
+       *  For SX=1, mfvsrd is treated as a Vector instruction in
+       *            terms of resource availability.
+       * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
+       */
+      assign( vS, getVSReg( XS ) );
+      high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
+      putIReg( rA_addr, (mode64) ? high64 :
+      unop( Iop_64to32, high64 ) );
+      break;
+   }
+
+   case 0x73:                // mfvsrwz
+   {
+      UChar XS = ifieldRegXS( theInstr );
+      UChar rA_addr = ifieldRegA(theInstr);
+      IRExpr * high64;
+      IRTemp vS = newTemp( Ity_V128 );
+      DIP("mfvsrwz r%u,vsr%d\n", rA_addr, (UInt)XS);
+      /*  XS = SX || S
+       *  For SX=0, mfvsrwz is treated as a Floating-Point
+       *            instruction in terms of resource availability.
+       *  For SX=1, mfvsrwz is treated as a Vector instruction in
+       *            terms of resource availability.
+       * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
+       */
+
+      assign( vS, getVSReg( XS ) );
+      high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
+      /* move value to the destination setting the upper 32-bits to zero */
+      putIReg( rA_addr, (mode64) ?
+                                  binop( Iop_And64, high64, mkU64( 0xFFFFFFFF ) ) :
+                                  unop(  Iop_64to32,
+                                         binop( Iop_And64, high64, mkU64( 0xFFFFFFFF ) ) ) );
+      break;
+   }
+
+   case 0xB3:                // mtvsrd
+   {
+      UChar XT = ifieldRegXT( theInstr );
+      UChar rA_addr = ifieldRegA(theInstr);
+      IRTemp rA = newTemp(ty);
+      DIP("mtvsrd vsr%d,r%u\n", (UInt)XT, rA_addr);
+      /*  XS = SX || S
+       *  For SX=0, mfvsrd is treated as a Floating-Point
+       *            instruction in terms of resource availability.
+       *  For SX=1, mfvsrd is treated as a Vector instruction in
+       *            terms of resource availability.
+       * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
+       */
+      assign( rA, getIReg(rA_addr) );
+
+      if (mode64)
+         putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( rA ), mkU64( 0 ) ) );
+      else
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              binop( Iop_32HLto64,
+                                     mkU32( 0 ),
+                                     mkexpr( rA ) ),
+                                     mkU64( 0 ) ) );
+      break;
+   }
+
+   case 0xD3:                // mtvsrwa
+   {
+      UChar XT = ifieldRegXT( theInstr );
+      UChar rA_addr = ifieldRegA(theInstr);
+      IRTemp rA = newTemp( Ity_I32 );
+      DIP("mtvsrwa vsr%d,r%u\n", (UInt)XT, rA_addr);
+      /*  XS = SX || S
+       *  For SX=0, mtvsrwa is treated as a Floating-Point
+       *            instruction in terms of resource availability.
+       *  For SX=1, mtvsrwa is treated as a Vector instruction in
+       *            terms of resource availability.
+       * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
+       */
+      if (mode64)
+         assign( rA, unop( Iop_64to32, getIReg( rA_addr ) ) );
+      else
+         assign( rA, getIReg(rA_addr) );
+
+      putVSReg( XT, binop( Iop_64HLtoV128,
+                           unop( Iop_32Sto64, mkexpr( rA ) ),
+                           mkU64( 0 ) ) );
+      break;
+   }
+
+   case 0xF3:                // mtvsrwz
+      {
+         UChar XT = ifieldRegXT( theInstr );
+         UChar rA_addr = ifieldRegA(theInstr);
+         IRTemp rA = newTemp( Ity_I32 );
+         DIP("mtvsrwz vsr%d,r%u\n", rA_addr, (UInt)XT);
+         /*  XS = SX || S
+          *  For SX=0, mtvsrwz is treated as a Floating-Point
+          *            instruction in terms of resource availability.
+          *  For SX=1, mtvsrwz is treated as a Vector instruction in
+          *            terms of resource availability.
+          * FIXME: NEED TO FIGURE OUT HOW TO IMPLEMENT THE RESOURCE AVAILABILITY PART
+          */
+         if (mode64)
+             assign( rA, unop( Iop_64to32, getIReg( rA_addr ) ) );
+         else
+             assign( rA, getIReg(rA_addr) );
+
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              binop( Iop_32HLto64, mkU32( 0 ), mkexpr ( rA ) ),
+                              mkU64( 0 ) ) );
+         break;
+      }
+
+   default:
+      vex_printf("dis_proc_ctl(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+
+/*
+  Cache Management Instructions
+*/
+static Bool dis_cache_manage ( UInt         theInstr, 
+                               DisResult*   dres,
+                               const VexArchInfo* guest_archinfo )
+{
+   /* X-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar b21to25 = ifieldRegDS(theInstr);
+   UChar rA_addr = ifieldRegA(theInstr);
+   UChar rB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar b0      = ifieldBIT0(theInstr);
+   UInt  lineszB = guest_archinfo->ppc_icache_line_szB;
+   Bool  is_dcbzl = False;
+
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+
+   // Check for valid hint values for dcbt and dcbtst as currently described in
+   // ISA 2.07.  If valid, then we simply set b21to25 to zero since we have no
+   // means of modeling the hint anyway.
+   if (opc1 == 0x1F && ((opc2 == 0x116) || (opc2 == 0xF6))) {
+      if (b21to25 == 0x10 || b21to25 < 0x10)
+         b21to25 = 0;
+   }
+   if (opc1 == 0x1F && opc2 == 0x116 && b21to25 == 0x11)
+      b21to25 = 0;
+
+   if (opc1 == 0x1F && opc2 == 0x3F6) { // dcbz
+      if (b21to25 == 1) {
+         is_dcbzl = True;
+         b21to25 = 0;
+         if (!(guest_archinfo->ppc_dcbzl_szB)) {
+            vex_printf("dis_cache_manage(ppc)(dcbzl not supported by host)\n");
+            return False;
+         }
+      }
+   }
+
+   if (opc1 != 0x1F || b21to25 != 0 || b0 != 0) {
+      if (0) vex_printf("dis_cache_manage %d %d %d\n", 
+                        (Int)opc1, (Int)b21to25, (Int)b0);
+      vex_printf("dis_cache_manage(ppc)(opc1|b21to25|b0)\n");
+      return False;
+   }
+
+   /* stay sane .. */
+   vassert(lineszB == 16 || lineszB == 32 || lineszB == 64 || lineszB == 128);
+   
+   switch (opc2) {
+//zz    case 0x2F6: // dcba (Data Cache Block Allocate, PPC32 p380)
+//zz       vassert(0); /* AWAITING TEST CASE */
+//zz       DIP("dcba r%u,r%u\n", rA_addr, rB_addr);
+//zz       if (0) vex_printf("vex ppc->IR: kludged dcba\n");
+//zz       break;
+      
+   case 0x056: // dcbf (Data Cache Block Flush, PPC32 p382)
+      DIP("dcbf r%u,r%u\n", rA_addr, rB_addr);
+      /* nop as far as vex is concerned */
+      break;
+      
+   case 0x036: // dcbst (Data Cache Block Store, PPC32 p384)
+      DIP("dcbst r%u,r%u\n", rA_addr, rB_addr);
+      /* nop as far as vex is concerned */
+      break;
+
+   case 0x116: // dcbt (Data Cache Block Touch, PPC32 p385)
+      DIP("dcbt r%u,r%u\n", rA_addr, rB_addr);
+      /* nop as far as vex is concerned */
+      break;
+      
+   case 0x0F6: // dcbtst (Data Cache Block Touch for Store, PPC32 p386)
+      DIP("dcbtst r%u,r%u\n", rA_addr, rB_addr);
+      /* nop as far as vex is concerned */
+      break;
+      
+   case 0x3F6: { // dcbz (Data Cache Block Clear to Zero, PPC32 p387)
+                 // dcbzl (Data Cache Block Clear to Zero Long, bug#135264)
+      /* Clear all bytes in cache block at (rA|0) + rB. */
+      IRTemp  EA   = newTemp(ty);
+      IRTemp  addr = newTemp(ty);
+      IRExpr* irx_addr;
+      UInt    i;
+      UInt clearszB;
+      if (is_dcbzl) {
+          clearszB = guest_archinfo->ppc_dcbzl_szB;
+          DIP("dcbzl r%u,r%u\n", rA_addr, rB_addr);
+      }
+      else {
+          clearszB = guest_archinfo->ppc_dcbz_szB;
+          DIP("dcbz r%u,r%u\n", rA_addr, rB_addr);
+      }
+
+      assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+
+      if (mode64) {
+         /* Round EA down to the start of the containing block. */
+         assign( addr, binop( Iop_And64,
+                              mkexpr(EA),
+                              mkU64( ~((ULong)clearszB-1) )) );
+         
+         for (i = 0; i < clearszB / 8; i++) {
+            irx_addr = binop( Iop_Add64, mkexpr(addr), mkU64(i*8) );
+            store( irx_addr, mkU64(0) );
+         }
+      } else {
+         /* Round EA down to the start of the containing block. */
+         assign( addr, binop( Iop_And32,
+                              mkexpr(EA),
+                              mkU32( ~(clearszB-1) )) );
+         
+         for (i = 0; i < clearszB / 4; i++) {
+            irx_addr = binop( Iop_Add32, mkexpr(addr), mkU32(i*4) );
+            store( irx_addr, mkU32(0) );
+         }
+      }
+      break;
+   }
+
+   case 0x3D6: { 
+      // icbi (Instruction Cache Block Invalidate, PPC32 p431)
+      /* Invalidate all translations containing code from the cache
+         block at (rA|0) + rB. */
+      IRTemp EA   = newTemp(ty);
+      IRTemp addr = newTemp(ty);
+      DIP("icbi r%u,r%u\n", rA_addr, rB_addr);
+      assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+
+      /* Round EA down to the start of the containing block. */
+      assign( addr, binop( mkSzOp(ty, Iop_And8),
+                           mkexpr(EA),
+                           mkSzImm(ty, ~(((ULong)lineszB)-1) )) );
+      putGST( PPC_GST_CMSTART, mkexpr(addr) );
+      putGST( PPC_GST_CMLEN, mkSzImm(ty, lineszB) );
+
+      /* be paranoid ... */
+      stmt( IRStmt_MBE(Imbe_Fence) );
+
+      putGST( PPC_GST_CIA, mkSzImm(ty, nextInsnAddr()));
+      dres->jk_StopHere = Ijk_InvalICache;
+      dres->whatNext    = Dis_StopHere;
+      break;
+   }
+
+   default:
+      vex_printf("dis_cache_manage(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Floating Point Helpers                               ---*/
+/*------------------------------------------------------------*/
+
+/* --------- Synthesise a 2-bit FPU rounding mode. --------- */
+/* Produces a value in 0 .. 3, which is encoded as per the type
+   IRRoundingMode.  PPCRoundingMode encoding is different to
+   IRRoundingMode, so need to map it.
+*/
+static IRExpr* /* :: Ity_I32 */ get_IR_roundingmode ( void )
+{
+/* 
+   rounding mode | PPC | IR
+   ------------------------
+   to nearest    | 00  | 00
+   to zero       | 01  | 11
+   to +infinity  | 10  | 10
+   to -infinity  | 11  | 01
+*/
+   IRTemp rm_PPC32 = newTemp(Ity_I32);
+   assign( rm_PPC32, getGST_masked( PPC_GST_FPSCR, MASK_FPSCR_RN ) );
+
+   // rm_IR = XOR( rm_PPC32, (rm_PPC32 << 1) & 2)
+   return binop( Iop_Xor32, 
+                 mkexpr(rm_PPC32),
+                 binop( Iop_And32, 
+                        binop(Iop_Shl32, mkexpr(rm_PPC32), mkU8(1)),
+                        mkU32(2) ));
+}
+
+/* The DFP IR rounding modes were chosen such that the existing PPC to IR
+ * mapping would still work with the extended three bit DFP rounding 
+ * mode designator.
+
+ *  rounding mode                     | PPC  |  IR
+ *  -----------------------------------------------
+ *  to nearest, ties to even          | 000  | 000
+ *  to zero                           | 001  | 011
+ *  to +infinity                      | 010  | 010
+ *  to -infinity                      | 011  | 001
+ *  to nearest, ties away from 0      | 100  | 100
+ *  to nearest, ties toward 0         | 101  | 111
+ *  to away from 0                    | 110  | 110
+ *  to prepare for shorter precision  | 111  | 101
+ */
+static IRExpr* /* :: Ity_I32 */ get_IR_roundingmode_DFP( void )
+{
+   IRTemp rm_PPC32 = newTemp( Ity_I32 );
+   assign( rm_PPC32, getGST_masked_upper( PPC_GST_FPSCR, MASK_FPSCR_DRN ) );
+
+   // rm_IR = XOR( rm_PPC32, (rm_PPC32 << 1) & 2)
+   return binop( Iop_Xor32,
+                 mkexpr( rm_PPC32 ),
+                 binop( Iop_And32,
+                        binop( Iop_Shl32, mkexpr( rm_PPC32 ), mkU8( 1 ) ),
+                        mkU32( 2 ) ) );
+}
+
+#define NANmaskSingle   0x7F800000
+#define NANmaskDouble   0x7FF00000
+
+static IRExpr * Check_NaN( IRExpr * value, IRExpr * Hi32Mask )
+{
+   IRTemp exp_zero  = newTemp(Ity_I8);
+   IRTemp frac_mask = newTemp(Ity_I32);
+   IRTemp frac_not_zero = newTemp(Ity_I8);
+
+   /* Check if the result is QNAN or SNAN and not +infinity or -infinity.
+    * The input value is always 64-bits, for single precision values, the
+    * lower 32 bits must be zero.
+    *
+    * Single Pricision 
+    *  [62:54] exponent field is equal to 0xFF for NAN and Infinity.
+    *  [53:32] fraction field is zero for Infinity and non-zero for NAN
+    *  [31:0]  unused for single precision representation
+    *
+    * Double Pricision 
+    *  [62:51] exponent field is equal to 0xFF for NAN and Infinity.
+    *  [50:0]  fraction field is zero for Infinity and non-zero for NAN
+    *
+    * Returned result is a U32 value of 0xFFFFFFFF for NaN and 0 otherwise.
+    */
+   assign( frac_mask, unop( Iop_Not32,
+                            binop( Iop_Or32,
+                                   mkU32( 0x80000000ULL ), Hi32Mask) ) );
+
+   assign( exp_zero,
+           unop( Iop_1Sto8,
+                 binop( Iop_CmpEQ32,
+                        binop( Iop_And32,
+                               unop( Iop_64HIto32,
+                                     unop( Iop_ReinterpF64asI64,
+                                           value ) ),
+                               Hi32Mask ),
+                        Hi32Mask ) ) );
+   assign( frac_not_zero,
+           binop( Iop_Or8,
+                  unop( Iop_1Sto8,
+                        binop( Iop_CmpNE32,
+                               binop( Iop_And32,
+                                      unop( Iop_64HIto32,
+                                            unop( Iop_ReinterpF64asI64,
+                                                  value ) ),
+                                      mkexpr( frac_mask ) ),
+                               mkU32( 0x0 ) ) ),
+                  unop( Iop_1Sto8,
+                        binop( Iop_CmpNE32,
+                               binop( Iop_And32,
+                                      unop( Iop_64to32,
+                                            unop( Iop_ReinterpF64asI64,
+                                                  value ) ),
+                                      mkU32( 0xFFFFFFFF ) ),
+                               mkU32( 0x0 ) ) ) ) );
+   return unop( Iop_8Sto32,
+                binop( Iop_And8,
+                       mkexpr( exp_zero ),
+                       mkexpr( frac_not_zero ) ) );
+}
+
+static IRExpr * Complement_non_NaN( IRExpr * value, IRExpr * nan_mask )
+{
+   /* This function will only complement the 64-bit floating point value if it
+    * is not Nan.  NaN is not a signed value.  Need to do computations using
+    * 32-bit operands to ensure it will run in 32-bit mode.
+    */
+   return  binop( Iop_32HLto64,
+                  binop( Iop_Or32,
+                         binop( Iop_And32,
+                                nan_mask,
+                                unop( Iop_64HIto32,
+                                      unop( Iop_ReinterpF64asI64,
+                                            value ) ) ),
+                         binop( Iop_And32,
+                                unop( Iop_Not32,
+                                      nan_mask ),
+                                unop( Iop_64HIto32,
+                                      unop( Iop_ReinterpF64asI64,
+                                            unop( Iop_NegF64,
+                                                  value ) ) ) ) ),
+                  unop( Iop_64to32,
+                        unop( Iop_ReinterpF64asI64, value ) ) );
+}
+
+/*------------------------------------------------------------*/
+/*--- Floating Point Instruction Translation               ---*/
+/*------------------------------------------------------------*/
+
+/*
+  Floating Point Load Instructions
+*/
+static Bool dis_fp_load ( UInt theInstr )
+{
+   /* X-Form, D-Form */
+   UChar opc1      = ifieldOPC(theInstr);
+   UChar frD_addr  = ifieldRegDS(theInstr);
+   UChar rA_addr   = ifieldRegA(theInstr);
+   UChar rB_addr   = ifieldRegB(theInstr);
+   UInt  opc2      = ifieldOPClo10(theInstr);
+   UChar b0        = ifieldBIT0(theInstr);
+   UInt  uimm16    = ifieldUIMM16(theInstr);
+
+   Int    simm16 = extend_s_16to32(uimm16);
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA     = newTemp(ty);
+   IRTemp rA     = newTemp(ty);
+   IRTemp rB     = newTemp(ty);
+   IRTemp iHi    = newTemp(Ity_I32);
+   IRTemp iLo    = newTemp(Ity_I32);
+
+   assign( rA, getIReg(rA_addr) );
+   assign( rB, getIReg(rB_addr) );
+
+   /* These are completely straightforward from a rounding and status
+      bits perspective: no rounding involved and no funny status or CR
+      bits affected. */
+
+   switch (opc1) {
+   case 0x30: // lfs (Load Float Single, PPC32 p441)
+      DIP("lfs fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
+      assign( EA, ea_rAor0_simm(rA_addr, simm16) );
+      putFReg( frD_addr,
+               unop(Iop_F32toF64, load(Ity_F32, mkexpr(EA))) );
+      break;
+
+   case 0x31: // lfsu (Load Float Single, Update, PPC32 p442)
+      if (rA_addr == 0)
+         return False;
+      DIP("lfsu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
+      assign( EA, ea_rA_simm(rA_addr, simm16) );
+      putFReg( frD_addr,
+               unop(Iop_F32toF64, load(Ity_F32, mkexpr(EA))) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+      
+   case 0x32: // lfd (Load Float Double, PPC32 p437)
+      DIP("lfd fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
+      assign( EA, ea_rAor0_simm(rA_addr, simm16) );
+      putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
+      break;
+
+   case 0x33: // lfdu (Load Float Double, Update, PPC32 p438)
+      if (rA_addr == 0)
+         return False;
+      DIP("lfdu fr%u,%d(r%u)\n", frD_addr, simm16, rA_addr);
+      assign( EA, ea_rA_simm(rA_addr, simm16) );
+      putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+
+   case 0x1F:
+      if (b0 != 0) {
+         vex_printf("dis_fp_load(ppc)(instr,b0)\n");
+         return False;
+      }
+
+      switch(opc2) {
+      case 0x217: // lfsx (Load Float Single Indexed, PPC32 p444)
+         DIP("lfsx fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
+         assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+         putFReg( frD_addr, unop( Iop_F32toF64, 
+                                  load(Ity_F32, mkexpr(EA))) );
+         break;
+         
+      case 0x237: // lfsux (Load Float Single, Update Indxd, PPC32 p443)
+         if (rA_addr == 0)
+            return False;
+         DIP("lfsux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
+         assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
+         putFReg( frD_addr,
+                  unop(Iop_F32toF64, load(Ity_F32, mkexpr(EA))) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+         
+      case 0x257: // lfdx (Load Float Double Indexed, PPC32 p440)
+         DIP("lfdx fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
+         assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+         putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
+         break;
+         
+      case 0x277: // lfdux (Load Float Double, Update Indxd, PPC32 p439)
+         if (rA_addr == 0)
+            return False;
+         DIP("lfdux fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
+         assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
+         putFReg( frD_addr, load(Ity_F64, mkexpr(EA)) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+         
+      case 0x357: // lfiwax (Load Float As Integer, Indxd, ISA 2.05 p120)
+         DIP("lfiwax fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
+         assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+         assign( iLo, load(Ity_I32, mkexpr(EA)) );
+         assign( iHi, binop(Iop_Sub32,
+                            mkU32(0),
+                            binop(Iop_Shr32, mkexpr(iLo), mkU8(31)))  );
+         putFReg( frD_addr, unop(Iop_ReinterpI64asF64,
+                                 binop(Iop_32HLto64, mkexpr(iHi), mkexpr(iLo))) );
+         break;
+
+      case 0x377: // lfiwzx (Load floating-point as integer word, zero indexed
+      {
+         IRTemp dw = newTemp( Ity_I64 );
+         DIP("lfiwzx fr%u,r%u,r%u\n", frD_addr, rA_addr, rB_addr);
+         assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+         assign( iLo, load(Ity_I32, mkexpr(EA)) );
+         assign( dw, binop( Iop_32HLto64, mkU32( 0 ), mkexpr( iLo ) ) );
+         putFReg( frD_addr, unop( Iop_ReinterpI64asF64, mkexpr( dw ) ) );
+         break;
+      }
+
+      default:
+         vex_printf("dis_fp_load(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+
+   default:
+      vex_printf("dis_fp_load(ppc)(opc1)\n");
+      return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Floating Point Store Instructions
+*/
+static Bool dis_fp_store ( UInt theInstr )
+{
+   /* X-Form, D-Form */
+   UChar opc1      = ifieldOPC(theInstr);
+   UChar frS_addr  = ifieldRegDS(theInstr);
+   UChar rA_addr   = ifieldRegA(theInstr);
+   UChar rB_addr   = ifieldRegB(theInstr);
+   UInt  opc2      = ifieldOPClo10(theInstr);
+   UChar b0        = ifieldBIT0(theInstr);
+   Int   uimm16    = ifieldUIMM16(theInstr);
+
+   Int    simm16 = extend_s_16to32(uimm16);
+   IRTemp frS    = newTemp(Ity_F64);
+   IRType ty     = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA     = newTemp(ty);
+   IRTemp rA     = newTemp(ty);
+   IRTemp rB     = newTemp(ty);
+
+   assign( frS, getFReg(frS_addr) );
+   assign( rA,  getIReg(rA_addr) );
+   assign( rB,  getIReg(rB_addr) );
+
+   /* These are straightforward from a status bits perspective: no
+      funny status or CR bits affected.  For single precision stores,
+      the values are truncated and denormalised (not rounded) to turn
+      them into single precision values. */
+
+   switch (opc1) {
+
+   case 0x34: // stfs (Store Float Single, PPC32 p518)
+      DIP("stfs fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
+      assign( EA, ea_rAor0_simm(rA_addr, simm16) );
+      /* Use Iop_TruncF64asF32 to truncate and possible denormalise
+         the value to be stored in the correct way, without any
+         rounding. */
+      store( mkexpr(EA), unop(Iop_TruncF64asF32, mkexpr(frS)) );
+      break;
+
+   case 0x35: // stfsu (Store Float Single, Update, PPC32 p519)
+      if (rA_addr == 0)
+         return False;
+      DIP("stfsu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
+      assign( EA, ea_rA_simm(rA_addr, simm16) );
+      /* See comment for stfs */
+      store( mkexpr(EA), unop(Iop_TruncF64asF32, mkexpr(frS)) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+
+   case 0x36: // stfd (Store Float Double, PPC32 p513)
+      DIP("stfd fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
+      assign( EA, ea_rAor0_simm(rA_addr, simm16) );
+      store( mkexpr(EA), mkexpr(frS) );
+      break;
+
+   case 0x37: // stfdu (Store Float Double, Update, PPC32 p514)
+      if (rA_addr == 0)
+         return False;
+      DIP("stfdu fr%u,%d(r%u)\n", frS_addr, simm16, rA_addr);
+      assign( EA, ea_rA_simm(rA_addr, simm16) );
+      store( mkexpr(EA), mkexpr(frS) );
+      putIReg( rA_addr, mkexpr(EA) );
+      break;
+
+   case 0x1F:
+      if (b0 != 0) {
+         vex_printf("dis_fp_store(ppc)(instr,b0)\n");
+         return False;
+      }
+      switch(opc2) {
+      case 0x297: // stfsx (Store Float Single Indexed, PPC32 p521)
+         DIP("stfsx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
+         assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+         /* See note for stfs */
+         store( mkexpr(EA),
+                unop(Iop_TruncF64asF32, mkexpr(frS)) );
+         break;
+         
+      case 0x2B7: // stfsux (Store Float Sgl, Update Indxd, PPC32 p520)
+         if (rA_addr == 0)
+            return False;
+         DIP("stfsux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
+         assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
+         /* See note for stfs */
+         store( mkexpr(EA), unop(Iop_TruncF64asF32, mkexpr(frS)) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+
+      case 0x2D7: // stfdx (Store Float Double Indexed, PPC32 p516)
+         DIP("stfdx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
+         assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+         store( mkexpr(EA), mkexpr(frS) );
+         break;
+         
+      case 0x2F7: // stfdux (Store Float Dbl, Update Indxd, PPC32 p515)
+         if (rA_addr == 0)
+            return False;
+         DIP("stfdux fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
+         assign( EA, ea_rA_idxd(rA_addr, rB_addr) );
+         store( mkexpr(EA), mkexpr(frS) );
+         putIReg( rA_addr, mkexpr(EA) );
+         break;
+
+      case 0x3D7: // stfiwx (Store Float as Int, Indexed, PPC32 p517)
+         // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
+         DIP("stfiwx fr%u,r%u,r%u\n", frS_addr, rA_addr, rB_addr);
+         assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+         store( mkexpr(EA),
+                unop(Iop_64to32, unop(Iop_ReinterpF64asI64, mkexpr(frS))) );
+         break;
+
+      default:
+         vex_printf("dis_fp_store(ppc)(opc2)\n");
+         return False;
+      }
+      break;
+
+   default:
+      vex_printf("dis_fp_store(ppc)(opc1)\n");
+      return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Floating Point Arith Instructions
+*/
+static Bool dis_fp_arith ( UInt theInstr )
+{
+   /* A-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar frD_addr = ifieldRegDS(theInstr);
+   UChar frA_addr = ifieldRegA(theInstr);
+   UChar frB_addr = ifieldRegB(theInstr);
+   UChar frC_addr = ifieldRegC(theInstr);
+   UChar opc2     = ifieldOPClo5(theInstr);
+   UChar flag_rC  = ifieldBIT0(theInstr);
+
+   IRTemp  frD = newTemp(Ity_F64);
+   IRTemp  frA = newTemp(Ity_F64);
+   IRTemp  frB = newTemp(Ity_F64);
+   IRTemp  frC = newTemp(Ity_F64);
+   IRExpr* rm  = get_IR_roundingmode();
+
+   /* By default, we will examine the results of the operation and set
+      fpscr[FPRF] accordingly. */
+   Bool set_FPRF = True;
+
+   /* By default, if flag_RC is set, we will clear cr1 after the
+      operation.  In reality we should set cr1 to indicate the
+      exception status of the operation, but since we're not
+      simulating exceptions, the exception status will appear to be
+      zero.  Hence cr1 should be cleared if this is a . form insn. */
+   Bool clear_CR1 = True;
+
+   assign( frA, getFReg(frA_addr));
+   assign( frB, getFReg(frB_addr));
+   assign( frC, getFReg(frC_addr));
+
+   switch (opc1) {
+   case 0x3B:
+      switch (opc2) {
+      case 0x12: // fdivs (Floating Divide Single, PPC32 p407)
+         if (frC_addr != 0)
+            return False;
+         DIP("fdivs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frB_addr);
+         assign( frD, triop( Iop_DivF64r32, 
+                             rm, mkexpr(frA), mkexpr(frB) ));
+         break;
+
+      case 0x14: // fsubs (Floating Subtract Single, PPC32 p430)
+         if (frC_addr != 0)
+            return False;
+         DIP("fsubs%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frB_addr);
+         assign( frD, triop( Iop_SubF64r32, 
+                             rm, mkexpr(frA), mkexpr(frB) ));
+         break;
+
+      case 0x15: // fadds (Floating Add Single, PPC32 p401)
+         if (frC_addr != 0)
+            return False;
+         DIP("fadds%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frB_addr);
+         assign( frD, triop( Iop_AddF64r32, 
+                             rm, mkexpr(frA), mkexpr(frB) ));
+         break;
+
+      case 0x16: // fsqrts (Floating SqRt (Single-Precision), PPC32 p428)
+         // NOTE: POWERPC OPTIONAL, "General-Purpose Group" (PPC32_FX)
+         if (frA_addr != 0 || frC_addr != 0)
+            return False;
+         DIP("fsqrts%s fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frB_addr);
+         // however illogically, on ppc970 this insn behaves identically
+         // to fsqrt (double-precision).  So use SqrtF64, not SqrtF64r32.
+         assign( frD, binop( Iop_SqrtF64, rm, mkexpr(frB) ));
+         break;
+
+      case 0x18: // fres (Floating Reciprocal Estimate Single, PPC32 p421)
+         // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
+         if (frA_addr != 0 || frC_addr != 0)
+            return False;
+         DIP("fres%s fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frB_addr);
+         { IRExpr* ieee_one
+              = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
+           assign( frD, triop( Iop_DivF64r32, 
+                               rm,
+                               ieee_one, mkexpr(frB) ));
+         }
+         break;
+
+      case 0x19: // fmuls (Floating Multiply Single, PPC32 p414)
+         if (frB_addr != 0)
+            return False;
+         DIP("fmuls%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frC_addr);
+         assign( frD, triop( Iop_MulF64r32,
+                             rm, mkexpr(frA), mkexpr(frC) ));
+         break;
+
+      case 0x1A: // frsqrtes (Floating Recip SqRt Est Single)
+         // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
+         // Undocumented instruction?
+         if (frA_addr != 0 || frC_addr != 0)
+            return False;
+         DIP("frsqrtes%s fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frB_addr);
+         assign( frD, unop(Iop_RSqrtEst5GoodF64, mkexpr(frB)) );
+         break;
+
+      default:
+         vex_printf("dis_fp_arith(ppc)(3B: opc2)\n");
+         return False;
+      }
+      break;
+
+   case 0x3F:
+      switch (opc2) {           
+      case 0x12: // fdiv (Floating Div (Double-Precision), PPC32 p406)
+         if (frC_addr != 0)
+            return False;
+         DIP("fdiv%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frB_addr);
+         assign( frD, triop(Iop_DivF64, rm, mkexpr(frA), mkexpr(frB)) );
+         break;
+
+      case 0x14: // fsub (Floating Sub (Double-Precision), PPC32 p429)
+         if (frC_addr != 0)
+            return False;
+         DIP("fsub%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frB_addr);
+         assign( frD, triop(Iop_SubF64, rm, mkexpr(frA), mkexpr(frB)) );
+         break;
+
+      case 0x15: // fadd (Floating Add (Double-Precision), PPC32 p400)
+         if (frC_addr != 0)
+            return False;
+         DIP("fadd%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frB_addr);
+         assign( frD, triop(Iop_AddF64, rm, mkexpr(frA), mkexpr(frB)) );
+         break;
+
+      case 0x16: // fsqrt (Floating SqRt (Double-Precision), PPC32 p427)
+         // NOTE: POWERPC OPTIONAL, "General-Purpose Group" (PPC32_FX)
+         if (frA_addr != 0 || frC_addr != 0)
+            return False;
+         DIP("fsqrt%s fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frB_addr);
+         assign( frD, binop(Iop_SqrtF64, rm, mkexpr(frB)) );
+         break;
+
+      case 0x17: { // fsel (Floating Select, PPC32 p426)
+         // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
+         IRTemp cc    = newTemp(Ity_I32);
+         IRTemp cc_b0 = newTemp(Ity_I32);
+
+         DIP("fsel%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frC_addr, frB_addr);
+
+         // cc: UN == 0x41, LT == 0x01, GT == 0x00, EQ == 0x40
+         // => GT|EQ == (cc & 0x1 == 0)
+         assign( cc, binop(Iop_CmpF64, mkexpr(frA),
+                                       IRExpr_Const(IRConst_F64(0))) );
+         assign( cc_b0, binop(Iop_And32, mkexpr(cc), mkU32(1)) );
+
+         // frD = (frA >= 0.0) ? frC : frB
+         //     = (cc_b0 == 0) ? frC : frB
+         assign( frD,
+                 IRExpr_ITE(
+                    binop(Iop_CmpEQ32, mkexpr(cc_b0), mkU32(0)),
+                    mkexpr(frC),
+                    mkexpr(frB) ));
+
+         /* One of the rare ones which don't mess with FPRF */
+         set_FPRF = False;
+         break;
+      }
+
+      case 0x18: // fre (Floating Reciprocal Estimate)
+         // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
+         // Note: unclear whether this insn really exists or not
+         // ppc970 doesn't have it, but POWER5 does
+         if (frA_addr != 0 || frC_addr != 0)
+            return False;
+         DIP("fre%s fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frB_addr);
+         { IRExpr* ieee_one
+              = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
+           assign( frD, triop( Iop_DivF64, 
+                               rm,
+                               ieee_one, mkexpr(frB) ));
+         }
+         break;
+
+      case 0x19: // fmul (Floating Mult (Double Precision), PPC32 p413)
+         if (frB_addr != 0)
+            vex_printf("dis_fp_arith(ppc)(instr,fmul)\n");
+         DIP("fmul%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frC_addr);
+         assign( frD, triop(Iop_MulF64, rm, mkexpr(frA), mkexpr(frC)) );
+         break;
+
+      case 0x1A: // frsqrte (Floating Recip SqRt Est., PPC32 p424)
+         // NOTE: POWERPC OPTIONAL, "Graphics Group" (PPC32_GX)
+         if (frA_addr != 0 || frC_addr != 0)
+            return False;
+         DIP("frsqrte%s fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frB_addr);
+         assign( frD, unop(Iop_RSqrtEst5GoodF64, mkexpr(frB)) );
+         break;
+
+      default:
+         vex_printf("dis_fp_arith(ppc)(3F: opc2)\n");
+         return False;
+      }
+      break;
+
+   default:
+      vex_printf("dis_fp_arith(ppc)(opc1)\n");
+      return False;
+   }
+
+   putFReg( frD_addr, mkexpr(frD) );
+
+   if (set_FPRF) {
+      // XXX XXX XXX FIXME
+      // set FPRF from frD
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8(0) );
+      putCR0( 1, mkU8(0) );
+   }
+
+   return True;
+}
+
+
+
+/*
+  Floating Point Mult-Add Instructions
+*/
+static Bool dis_fp_multadd ( UInt theInstr )
+{
+   /* A-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar frD_addr = ifieldRegDS(theInstr);
+   UChar frA_addr = ifieldRegA(theInstr);
+   UChar frB_addr = ifieldRegB(theInstr);
+   UChar frC_addr = ifieldRegC(theInstr);
+   UChar opc2     = ifieldOPClo5(theInstr);
+   UChar flag_rC  = ifieldBIT0(theInstr);
+
+   IRTemp  frD = newTemp(Ity_F64);
+   IRTemp  frA = newTemp(Ity_F64);
+   IRTemp  frB = newTemp(Ity_F64);
+   IRTemp  frC = newTemp(Ity_F64);
+   IRTemp  rmt = newTemp(Ity_I32);
+   IRTemp  tmp = newTemp(Ity_F64);
+   IRTemp  sign_tmp = newTemp(Ity_I64);
+   IRTemp  nan_mask = newTemp(Ity_I32);
+   IRExpr* rm;
+
+   /* By default, we will examine the results of the operation and set
+      fpscr[FPRF] accordingly. */
+   Bool set_FPRF = True;
+
+   /* By default, if flag_RC is set, we will clear cr1 after the
+      operation.  In reality we should set cr1 to indicate the
+      exception status of the operation, but since we're not
+      simulating exceptions, the exception status will appear to be
+      zero.  Hence cr1 should be cleared if this is a . form insn. */
+   Bool clear_CR1 = True;
+
+   /* Bind the rounding mode expression to a temp; there's no
+      point in creating gratuitous CSEs, as we know we'll need 
+      to use it twice. */
+   assign( rmt, get_IR_roundingmode() );
+   rm = mkexpr(rmt);
+
+   assign( frA, getFReg(frA_addr));
+   assign( frB, getFReg(frB_addr));
+   assign( frC, getFReg(frC_addr));
+
+   /* The rounding in this is all a bit dodgy.  The idea is to only do
+      one rounding.  That clearly isn't achieveable without dedicated
+      four-input IR primops, although in the single precision case we
+      can sort-of simulate it by doing the inner multiply in double
+      precision. 
+
+      In the negated cases, the negation happens after rounding. */
+
+   switch (opc1) {
+   case 0x3B:
+      switch (opc2) {
+      case 0x1C: // fmsubs (Floating Mult-Subtr Single, PPC32 p412)
+         DIP("fmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frC_addr, frB_addr);
+         assign( frD, qop( Iop_MSubF64r32, rm,
+                           mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
+         break;
+
+      case 0x1D: // fmadds (Floating Mult-Add Single, PPC32 p409)
+         DIP("fmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frC_addr, frB_addr);
+         assign( frD, qop( Iop_MAddF64r32, rm,
+                           mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
+         break;
+
+      case 0x1E: // fnmsubs (Float Neg Mult-Subtr Single, PPC32 p420)
+      case 0x1F: // fnmadds (Floating Negative Multiply-Add Single, PPC32 p418)
+
+         if (opc2 == 0x1E) {
+            DIP("fnmsubs%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+                     frD_addr, frA_addr, frC_addr, frB_addr);
+            assign( tmp, qop( Iop_MSubF64r32, rm,
+                              mkexpr(frA), mkexpr(frC), mkexpr(frB) ) );
+         } else {
+            DIP("fnmadds%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+                     frD_addr, frA_addr, frC_addr, frB_addr);
+            assign( tmp, qop( Iop_MAddF64r32, rm,
+                              mkexpr(frA), mkexpr(frC), mkexpr(frB) ) );
+         }
+
+         assign( nan_mask, Check_NaN( mkexpr( tmp ),
+                                      mkU32( NANmaskSingle ) ) );
+         assign( sign_tmp, Complement_non_NaN( mkexpr( tmp ),
+                                               mkexpr( nan_mask ) ) );
+         assign( frD, unop( Iop_ReinterpI64asF64, mkexpr( sign_tmp ) ) ); 
+         break;
+
+      default:
+         vex_printf("dis_fp_multadd(ppc)(3B: opc2)\n");
+         return False;
+      }
+      break;
+
+   case 0x3F:
+      switch (opc2) {           
+      case 0x1C: // fmsub (Float Mult-Sub (Dbl Precision), PPC32 p411)
+         DIP("fmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frC_addr, frB_addr);
+         assign( frD, qop( Iop_MSubF64, rm,
+                           mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
+         break;
+
+      case 0x1D: // fmadd (Float Mult-Add (Dbl Precision), PPC32 p408)
+         DIP("fmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+             frD_addr, frA_addr, frC_addr, frB_addr);
+         assign( frD, qop( Iop_MAddF64, rm,
+                           mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
+         break;
+
+      case 0x1E: // fnmsub (Float Neg Mult-Subtr (Dbl Precision), PPC32 p419)
+      case 0x1F: // fnmadd (Float Neg Mult-Add (Dbl Precision), PPC32 p417)
+
+         if (opc2 == 0x1E) {
+            DIP("fnmsub%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+                     frD_addr, frA_addr, frC_addr, frB_addr);
+            assign( tmp, qop( Iop_MSubF64, rm,
+                              mkexpr(frA), mkexpr(frC), mkexpr(frB) ) );
+         } else {
+            DIP("fnmadd%s fr%u,fr%u,fr%u,fr%u\n", flag_rC ? ".":"",
+                     frD_addr, frA_addr, frC_addr, frB_addr);
+            assign( tmp, qop( Iop_MAddF64, rm,
+                              mkexpr(frA), mkexpr(frC), mkexpr(frB) ));
+         }
+
+         assign( nan_mask, Check_NaN( mkexpr( tmp ),
+                                      mkU32( NANmaskDouble ) ) );
+         assign( sign_tmp, Complement_non_NaN( mkexpr( tmp ),
+                                               mkexpr( nan_mask ) ) );
+         assign( frD, unop( Iop_ReinterpI64asF64, mkexpr( sign_tmp ) ) ); 
+         break;
+
+      default:
+         vex_printf("dis_fp_multadd(ppc)(3F: opc2)\n");
+         return False;
+      }
+      break;
+
+   default:
+      vex_printf("dis_fp_multadd(ppc)(opc1)\n");
+      return False;
+   }
+
+   putFReg( frD_addr, mkexpr(frD) );
+
+   if (set_FPRF) {
+      // XXX XXX XXX FIXME
+      // set FPRF from frD
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8(0) );
+      putCR0( 1, mkU8(0) );
+   }
+
+   return True;
+}
+
+/*
+ * fe_flag is set to 1 if any of the following conditions occurs:
+ *  - The floating-point operand in register FRB is a Zero, a
+ *    NaN, an Infinity, or a negative value.
+ *  - e_b is less than or equal to: -970 for double precision; -103 for single precision
+ *  Otherwise fe_flag is set to 0.
+ *
+ * fg_flag is set to 1 if either of the following conditions occurs.
+ *   - The floating-point operand in register FRB is a Zero, an
+ *     Infinity, or a denormalized value.
+ *  Otherwise fg_flag is set to 0.
+ *
+ */
+static void do_fp_tsqrt(IRTemp frB_Int, Bool sp, IRTemp * fe_flag_tmp, IRTemp * fg_flag_tmp)
+{
+   // The following temps are for holding intermediate results
+   IRTemp e_b = newTemp(Ity_I32);
+   IRExpr * fe_flag,  * fg_flag;
+   IRTemp frB_exp_shR = newTemp(Ity_I32);
+   UInt bias = sp? 127 : 1023;
+   IRExpr * frbNaN, * frbDenorm, * frBNeg;
+   IRExpr * eb_LTE;
+   IRTemp  frbZero_tmp = newTemp(Ity_I1);
+   IRTemp  frbInf_tmp = newTemp(Ity_I1);
+   *fe_flag_tmp = newTemp(Ity_I32);
+   *fg_flag_tmp = newTemp(Ity_I32);
+   assign( frB_exp_shR, fp_exp_part( frB_Int, sp ) );
+   assign(e_b, binop( Iop_Sub32, mkexpr(frB_exp_shR), mkU32( bias ) ));
+
+   //////////////////  fe_flag tests BEGIN //////////////////////
+   /* We first do all tests that may result in setting fe_flag to '1'.
+    * (NOTE: These tests are similar to those used for ftdiv.  See do_fp_tdiv()
+    * for details.)
+    */
+   frbNaN = sp ? is_NaN_32(frB_Int) : is_NaN(frB_Int);
+   assign( frbInf_tmp, is_Inf(frB_Int, sp) );
+   assign( frbZero_tmp, is_Zero(frB_Int, sp ) );
+   {
+      // Test_value = -970 for double precision
+      UInt test_value = sp ? 0xffffff99 : 0xfffffc36;
+      eb_LTE = binop( Iop_CmpLE32S, mkexpr( e_b ), mkU32( test_value ) );
+   }
+   frBNeg = binop( Iop_CmpEQ32,
+                   binop( Iop_Shr32,
+                          sp ? mkexpr( frB_Int ) : unop( Iop_64HIto32, mkexpr( frB_Int ) ),
+                          mkU8( 31 ) ),
+                   mkU32( 1 ) );
+   //////////////////  fe_flag tests END //////////////////////
+
+   //////////////////  fg_flag tests BEGIN //////////////////////
+   /*
+    * The following tests were already performed above in the fe_flag
+    * tests.  So these conditions will result in both fe_ and fg_ flags
+    * being set.
+    *   - Test if FRB is Zero
+    *   - Test if FRB is an Infinity
+    */
+
+   /*
+    * Test if FRB holds a denormalized value.  A denormalized value is one where
+    * the exp is 0 and the fraction is non-zero.
+    */
+   if (sp) {
+      IRTemp frac_part = newTemp(Ity_I32);
+      assign( frac_part, binop( Iop_And32, mkexpr(frB_Int), mkU32(0x007fffff)) );
+      frbDenorm
+               = mkAND1( binop( Iop_CmpEQ32, mkexpr( frB_exp_shR ), mkU32( 0 ) ),
+                         binop( Iop_CmpNE32, mkexpr( frac_part ), mkU32( 0 ) ) );
+   } else {
+      IRExpr * hi32, * low32, * fraction_is_nonzero;
+      IRTemp frac_part = newTemp(Ity_I64);
+
+      assign( frac_part, FP_FRAC_PART(frB_Int) );
+      hi32 = unop( Iop_64HIto32, mkexpr( frac_part ) );
+      low32 = unop( Iop_64to32, mkexpr( frac_part ) );
+      fraction_is_nonzero = binop( Iop_CmpNE32, binop( Iop_Or32, low32, hi32 ),
+                                                mkU32( 0 ) );
+      frbDenorm
+               = mkAND1( binop( Iop_CmpEQ32, mkexpr( frB_exp_shR ), mkU32( 0 ) ),
+                         fraction_is_nonzero );
+   }
+   //////////////////  fg_flag tests END //////////////////////
+
+   /////////////////////////
+   fe_flag = mkOR1( mkexpr( frbZero_tmp ),
+                    mkOR1( frbNaN,
+                           mkOR1( mkexpr( frbInf_tmp ),
+                                  mkOR1( frBNeg, eb_LTE ) ) ) );
+
+   fe_flag = unop(Iop_1Uto32, fe_flag);
+
+   fg_flag = mkOR1( mkexpr( frbZero_tmp ),
+                    mkOR1( mkexpr( frbInf_tmp ), frbDenorm ) );
+   fg_flag = unop(Iop_1Uto32, fg_flag);
+   assign (*fg_flag_tmp, fg_flag);
+   assign (*fe_flag_tmp, fe_flag);
+}
+/*
+ * fe_flag is set to 1 if any of the following conditions occurs:
+ *  - The double-precision floating-point operand in register FRA is a NaN or an
+ *    Infinity.
+ *  - The double-precision floating-point operand in register FRB is a Zero, a
+ *    NaN, or an Infinity.
+ *  - e_b is less than or equal to -1022.
+ *  - e_b is greater than or equal to 1021.
+ *  - The double-precision floating-point operand in register FRA is not a zero
+ *    and the difference, e_a - e_b, is greater than or equal to 1023.
+ *  - The double-precision floating-point operand in register FRA is not a zero
+ *    and the difference, e_a - e_b, is less than or equal to -1021.
+ *  - The double-precision floating-point operand in register FRA is not a zero
+ *    and e_a is less than or equal to -970
+ *  Otherwise fe_flag is set to 0.
+ *
+ * fg_flag is set to 1 if either of the following conditions occurs.
+ *   - The double-precision floating-point operand in register FRA is an Infinity.
+ *   - The double-precision floating-point operand in register FRB is a Zero, an
+ *     Infinity, or a denormalized value.
+ *  Otherwise fg_flag is set to 0.
+ *
+ */
+static void _do_fp_tdiv(IRTemp frA_int, IRTemp frB_int, Bool sp, IRTemp * fe_flag_tmp, IRTemp * fg_flag_tmp)
+{
+   // The following temps are for holding intermediate results
+   IRTemp e_a = newTemp(Ity_I32);
+   IRTemp e_b = newTemp(Ity_I32);
+   IRTemp frA_exp_shR = newTemp(Ity_I32);
+   IRTemp frB_exp_shR = newTemp(Ity_I32);
+
+   UInt bias = sp? 127 : 1023;
+   *fe_flag_tmp = newTemp(Ity_I32);
+   *fg_flag_tmp = newTemp(Ity_I32);
+
+   /* The following variables hold boolean results from tests
+    * that are OR'ed together for setting the fe_ and fg_ flags.
+    * For some cases, the booleans are used more than once, so
+    * I make those IRTemp's instead of IRExpr's.
+    */
+   IRExpr * fraNaN, * frbNaN, * frbDenorm;
+   IRExpr * eb_LTE, * eb_GTE, * ea_eb_GTE, * ea_eb_LTE, * ea_LTE;
+   IRTemp  fraInf_tmp = newTemp(Ity_I1);
+   IRTemp  frbZero_tmp = newTemp(Ity_I1);
+   IRTemp  frbInf_tmp = newTemp(Ity_I1);
+   IRTemp  fraNotZero_tmp = newTemp(Ity_I1);
+
+/* The following are the flags that are set by OR'ing the results of
+ * all the tests done for tdiv.  These flags are the input to the specified CR.
+ */
+   IRExpr * fe_flag, * fg_flag;
+
+   // Create temps that will be used throughout the following tests.
+   assign( frA_exp_shR, fp_exp_part( frA_int, sp ) );
+   assign( frB_exp_shR, fp_exp_part( frB_int, sp ) );
+   /* Let e_[a|b] be the unbiased exponent: i.e. exp - 1023. */
+   assign(e_a, binop( Iop_Sub32, mkexpr(frA_exp_shR), mkU32( bias ) ));
+   assign(e_b, binop( Iop_Sub32, mkexpr(frB_exp_shR), mkU32( bias ) ));
+
+
+   //////////////////  fe_flag tests BEGIN //////////////////////
+   /* We first do all tests that may result in setting fe_flag to '1'. */
+
+   /*
+    * Test if the double-precision floating-point operand in register FRA is
+    * a NaN:
+    */
+   fraNaN = sp ? is_NaN_32(frA_int) : is_NaN(frA_int);
+   /*
+    * Test if the double-precision floating-point operand in register FRA is
+    * an Infinity.
+    */
+   assign(fraInf_tmp, is_Inf(frA_int, sp));
+
+   /*
+    * Test if the double-precision floating-point operand in register FRB is
+    * a NaN:
+    */
+   frbNaN = sp ? is_NaN_32(frB_int) : is_NaN(frB_int);
+   /*
+    * Test if the double-precision floating-point operand in register FRB is
+    * an Infinity.
+    */
+   assign( frbInf_tmp, is_Inf(frB_int, sp) );
+   /*
+    * Test if the double-precision floating-point operand in register FRB is
+    * a Zero.
+    */
+   assign( frbZero_tmp, is_Zero(frB_int, sp) );
+
+   /*
+    * Test if e_b <= -1022 for double precision;
+    * or e_b <= -126 for single precision
+    */
+   {
+      UInt test_value = sp ? 0xffffff82 : 0xfffffc02;
+      eb_LTE = binop(Iop_CmpLE32S, mkexpr(e_b), mkU32(test_value));
+   }
+
+   /*
+    * Test if e_b >= 1021 (i.e., 1021 < e_b) for double precision;
+    * or e_b >= -125 (125 < e_b) for single precision
+    */
+   {
+      Int test_value = sp ? 125 : 1021;
+      eb_GTE = binop(Iop_CmpLT32S, mkU32(test_value), mkexpr(e_b));
+   }
+
+   /*
+    * Test if FRA != Zero and (e_a - e_b) >= bias
+    */
+   assign( fraNotZero_tmp, unop( Iop_Not1, is_Zero( frA_int, sp ) ) );
+   ea_eb_GTE = mkAND1( mkexpr( fraNotZero_tmp ),
+                       binop( Iop_CmpLT32S, mkU32( bias ),
+                              binop( Iop_Sub32, mkexpr( e_a ),
+                                     mkexpr( e_b ) ) ) );
+
+   /*
+    * Test if FRA != Zero and (e_a - e_b) <= [-1021 (double precision) or -125 (single precision)]
+    */
+   {
+      UInt test_value = sp ? 0xffffff83 : 0xfffffc03;
+
+      ea_eb_LTE = mkAND1( mkexpr( fraNotZero_tmp ),
+                          binop( Iop_CmpLE32S,
+                                 binop( Iop_Sub32,
+                                        mkexpr( e_a ),
+                                        mkexpr( e_b ) ),
+                                        mkU32( test_value ) ) );
+   }
+
+   /*
+    * Test if FRA != Zero and e_a <= [-970 (double precision) or -103 (single precision)]
+    */
+   {
+      UInt test_value = 0xfffffc36;  //Int test_value = -970;
+
+      ea_LTE = mkAND1( mkexpr( fraNotZero_tmp ), binop( Iop_CmpLE32S,
+                                                        mkexpr( e_a ),
+                                                        mkU32( test_value ) ) );
+   }
+   //////////////////  fe_flag tests END //////////////////////
+
+   //////////////////  fg_flag tests BEGIN //////////////////////
+   /*
+    * The following tests were already performed above in the fe_flag
+    * tests.  So these conditions will result in both fe_ and fg_ flags
+    * being set.
+    *   - Test if FRA is an Infinity
+    *   - Test if FRB ix Zero
+    *   - Test if FRB is an Infinity
+    */
+
+   /*
+    * Test if FRB holds a denormalized value.  A denormalized value is one where
+    * the exp is 0 and the fraction is non-zero.
+    */
+   {
+      IRExpr * fraction_is_nonzero;
+
+      if (sp) {
+         fraction_is_nonzero = binop( Iop_CmpNE32, FP_FRAC_PART32(frB_int),
+                                      mkU32( 0 ) );
+      } else {
+         IRExpr * hi32, * low32;
+         IRTemp frac_part = newTemp(Ity_I64);
+         assign( frac_part, FP_FRAC_PART(frB_int) );
+
+         hi32 = unop( Iop_64HIto32, mkexpr( frac_part ) );
+         low32 = unop( Iop_64to32, mkexpr( frac_part ) );
+         fraction_is_nonzero = binop( Iop_CmpNE32, binop( Iop_Or32, low32, hi32 ),
+                                      mkU32( 0 ) );
+      }
+      frbDenorm = mkAND1( binop( Iop_CmpEQ32, mkexpr( frB_exp_shR ),
+                                 mkU32( 0x0 ) ), fraction_is_nonzero );
+
+   }
+   //////////////////  fg_flag tests END //////////////////////
+
+   fe_flag
+   = mkOR1(
+            fraNaN,
+            mkOR1(
+                   mkexpr( fraInf_tmp ),
+                   mkOR1(
+                          mkexpr( frbZero_tmp ),
+                          mkOR1(
+                                 frbNaN,
+                                 mkOR1(
+                                        mkexpr( frbInf_tmp ),
+                                        mkOR1( eb_LTE,
+                                               mkOR1( eb_GTE,
+                                                      mkOR1( ea_eb_GTE,
+                                                             mkOR1( ea_eb_LTE,
+                                                                    ea_LTE ) ) ) ) ) ) ) ) );
+
+   fe_flag = unop(Iop_1Uto32, fe_flag);
+
+   fg_flag = mkOR1( mkexpr( fraInf_tmp ), mkOR1( mkexpr( frbZero_tmp ),
+                                                 mkOR1( mkexpr( frbInf_tmp ),
+                                                        frbDenorm ) ) );
+   fg_flag = unop(Iop_1Uto32, fg_flag);
+   assign(*fe_flag_tmp, fe_flag);
+   assign(*fg_flag_tmp, fg_flag);
+}
+
+/* See description for _do_fp_tdiv() above. */
+static IRExpr * do_fp_tdiv(IRTemp frA_int, IRTemp frB_int)
+{
+   IRTemp  fe_flag, fg_flag;
+   /////////////////////////
+   /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
+    * where fl_flag == 1 on ppc64.
+    */
+   IRExpr * fl_flag = unop(Iop_Not32, mkU32(0xFFFFFE));
+   fe_flag = fg_flag = IRTemp_INVALID;
+   _do_fp_tdiv(frA_int, frB_int, False/*not single precision*/, &fe_flag, &fg_flag);
+   return binop( Iop_Or32,
+                 binop( Iop_Or32,
+                        binop( Iop_Shl32, fl_flag, mkU8( 3 ) ),
+                        binop( Iop_Shl32, mkexpr(fg_flag), mkU8( 2 ) ) ),
+                 binop( Iop_Shl32, mkexpr(fe_flag), mkU8( 1 ) ) );
+}
+
+static Bool dis_fp_tests ( UInt theInstr )
+{
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+   UChar frB_addr = ifieldRegB(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   IRTemp frB_I64     = newTemp(Ity_I64);
+
+   if (opc1 != 0x3F || b0 != 0 ){
+      vex_printf("dis_fp_tests(ppc)(ftdiv)\n");
+      return False;
+   }
+   assign( frB_I64, unop( Iop_ReinterpF64asI64, getFReg( frB_addr ) ) );
+
+   switch (opc2) {
+      case 0x080: // ftdiv
+      {
+         UChar frA_addr = ifieldRegA(theInstr);
+         IRTemp frA_I64     = newTemp(Ity_I64);
+         UChar b21to22  = toUChar( IFIELD( theInstr, 21, 2 ) );
+         if (b21to22 != 0 ) {
+            vex_printf("dis_fp_tests(ppc)(ftdiv)\n");
+            return False;
+         }
+
+         assign( frA_I64, unop( Iop_ReinterpF64asI64, getFReg( frA_addr ) ) );
+         putGST_field( PPC_GST_CR, do_fp_tdiv(frA_I64, frB_I64), crfD );
+
+         DIP("ftdiv crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr);
+         break;
+      }
+      case 0x0A0: // ftsqrt
+      {
+         IRTemp flags = newTemp(Ity_I32);
+         IRTemp  fe_flag, fg_flag;
+         fe_flag = fg_flag = IRTemp_INVALID;
+         UChar b18to22  = toUChar( IFIELD( theInstr, 18, 5 ) );
+         if ( b18to22 != 0) {
+            vex_printf("dis_fp_tests(ppc)(ftsqrt)\n");
+            return False;
+         }
+         DIP("ftsqrt crf%d,fr%u\n", crfD, frB_addr);
+         do_fp_tsqrt(frB_I64, False /* not single precision*/, &fe_flag, &fg_flag);
+         /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
+          * where fl_flag == 1 on ppc64.
+          */
+         assign( flags,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag), mkU8( 1 ) ) ) );
+         putGST_field( PPC_GST_CR, mkexpr(flags), crfD );
+         break;
+      }
+
+      default:
+         vex_printf("dis_fp_tests(ppc)(opc2)\n");
+         return False;
+
+   }
+   return True;
+}
+
+/*
+  Floating Point Compare Instructions
+*/
+static Bool dis_fp_cmp ( UInt theInstr )
+{   
+   /* X-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+   UChar b21to22  = toUChar( IFIELD( theInstr, 21, 2 ) );
+   UChar frA_addr = ifieldRegA(theInstr);
+   UChar frB_addr = ifieldRegB(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+
+   IRTemp ccIR    = newTemp(Ity_I32);
+   IRTemp ccPPC32 = newTemp(Ity_I32);
+
+   IRTemp frA     = newTemp(Ity_F64);
+   IRTemp frB     = newTemp(Ity_F64);
+
+   if (opc1 != 0x3F || b21to22 != 0 || b0 != 0) {
+      vex_printf("dis_fp_cmp(ppc)(instr)\n");
+      return False;
+   }
+
+   assign( frA, getFReg(frA_addr));
+   assign( frB, getFReg(frB_addr));
+
+   assign( ccIR, binop(Iop_CmpF64, mkexpr(frA), mkexpr(frB)) );
+   
+   /* Map compare result from IR to PPC32 */
+   /*
+     FP cmp result | PPC | IR
+     --------------------------
+     UN            | 0x1 | 0x45
+     EQ            | 0x2 | 0x40
+     GT            | 0x4 | 0x00
+     LT            | 0x8 | 0x01
+   */
+
+   // ccPPC32 = Shl(1, (~(ccIR>>5) & 2) 
+   //                    | ((ccIR ^ (ccIR>>6)) & 1)
+   assign(
+      ccPPC32,
+      binop(
+         Iop_Shl32, 
+         mkU32(1),
+         unop(
+            Iop_32to8, 
+            binop(
+               Iop_Or32,
+               binop(
+                  Iop_And32, 
+                  unop(
+                     Iop_Not32,
+                     binop(Iop_Shr32, mkexpr(ccIR), mkU8(5))
+                  ),
+                  mkU32(2)
+               ),
+               binop(
+                  Iop_And32, 
+                  binop(
+                     Iop_Xor32, 
+                     mkexpr(ccIR),
+                     binop(Iop_Shr32, mkexpr(ccIR), mkU8(6))
+                  ),
+                  mkU32(1)
+               )
+            )
+         )
+      )
+   );
+
+   putGST_field( PPC_GST_CR, mkexpr(ccPPC32), crfD );
+
+   /* CAB: TODO?: Support writing cc to FPSCR->FPCC ?
+      putGST_field( PPC_GST_FPSCR, mkexpr(ccPPC32), 4 );
+   */
+   // XXX XXX XXX FIXME
+   // Also write the result into FPRF (it's not entirely clear how)
+
+   /* Note: Differences between fcmpu and fcmpo are only in exception
+      flag settings, which aren't supported anyway. */
+   switch (opc2) {
+   case 0x000: // fcmpu (Floating Compare Unordered, PPC32 p403)
+      DIP("fcmpu crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr);
+      break;
+   case 0x020: // fcmpo (Floating Compare Ordered, PPC32 p402)
+      DIP("fcmpo crf%d,fr%u,fr%u\n", crfD, frA_addr, frB_addr);
+      break;
+   default:
+      vex_printf("dis_fp_cmp(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+
+
+/*
+  Floating Point Rounding/Conversion Instructions
+*/
+static Bool dis_fp_round ( UInt theInstr )
+{
+   /* X-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar b16to20  = ifieldRegA(theInstr);
+   UChar frD_addr = ifieldRegDS(theInstr);
+   UChar frB_addr = ifieldRegB(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar flag_rC  = ifieldBIT0(theInstr);
+
+   IRTemp  frD     = newTemp(Ity_F64);
+   IRTemp  frB     = newTemp(Ity_F64);
+   IRTemp  r_tmp32 = newTemp(Ity_I32);
+   IRTemp  r_tmp64 = newTemp(Ity_I64);
+   IRExpr* rm      = get_IR_roundingmode();
+
+   /* By default, we will examine the results of the operation and set
+      fpscr[FPRF] accordingly. */
+   Bool set_FPRF = True;
+
+   /* By default, if flag_RC is set, we will clear cr1 after the
+      operation.  In reality we should set cr1 to indicate the
+      exception status of the operation, but since we're not
+      simulating exceptions, the exception status will appear to be
+      zero.  Hence cr1 should be cleared if this is a . form insn. */
+   Bool clear_CR1 = True;
+   if ((!(opc1 == 0x3F || opc1 == 0x3B)) || b16to20 != 0) {
+      vex_printf("dis_fp_round(ppc)(instr)\n");
+      return False;
+   }
+
+   assign( frB, getFReg(frB_addr));
+   if (opc1 == 0x3B) {
+      /* The fcfid[u]s instructions (from ISA 2.06) are a bit odd because
+       * they're very similar to the other instructions handled here, but have
+       * a different primary opcode.
+       */
+      switch (opc2) {
+         case 0x34E: // fcfids (Float convert from signed DWord to single precision)
+            DIP("fcfids%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+            assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
+            assign( frD, binop( Iop_RoundF64toF32, rm, binop( Iop_I64StoF64, rm,
+                                                              mkexpr( r_tmp64 ) ) ) );
+            goto putFR;
+
+         case 0x3Ce: // fcfidus (Float convert from unsigned DWord to single precision)
+            DIP("fcfidus%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+            assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
+            assign( frD, unop( Iop_F32toF64, binop( Iop_I64UtoF32, rm, mkexpr( r_tmp64 ) ) ) );
+            goto putFR;
+      }
+   }
+
+
+   switch (opc2) {
+   case 0x00C: // frsp (Float Round to Single, PPC32 p423)
+      DIP("frsp%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( frD, binop( Iop_RoundF64toF32, rm, mkexpr(frB) ));
+      break;
+      
+   case 0x00E: // fctiw (Float Conv to Int, PPC32 p404)
+      DIP("fctiw%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp32,
+              binop(Iop_F64toI32S, rm, mkexpr(frB)) );
+      assign( frD, unop( Iop_ReinterpI64asF64,
+                         unop( Iop_32Uto64, mkexpr(r_tmp32))));
+      /* FPRF is undefined after fctiw.  Leave unchanged. */
+      set_FPRF = False;
+      break;
+      
+   case 0x00F: // fctiwz (Float Conv to Int, Round to Zero, PPC32 p405)
+      DIP("fctiwz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp32, 
+              binop(Iop_F64toI32S, mkU32(Irrm_ZERO), mkexpr(frB) ));
+      assign( frD, unop( Iop_ReinterpI64asF64,
+                         unop( Iop_32Uto64, mkexpr(r_tmp32))));
+      /* FPRF is undefined after fctiwz.  Leave unchanged. */
+      set_FPRF = False;
+      break;
+
+   case 0x08F: case 0x08E: // fctiwu[z]
+      DIP("fctiwu%s%s fr%u,fr%u\n", opc2 == 0x08F ? "z" : "",
+               flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp32,
+              binop( Iop_F64toI32U,
+                     opc2 == 0x08F ? mkU32( Irrm_ZERO ) : rm,
+                     mkexpr( frB ) ) );
+      assign( frD, unop( Iop_ReinterpI64asF64,
+                         unop( Iop_32Uto64, mkexpr(r_tmp32))));
+      /* FPRF is undefined after fctiwz.  Leave unchanged. */
+      set_FPRF = False;
+      break;
+
+
+   case 0x32E: // fctid (Float Conv to Int DWord, PPC64 p437)
+      DIP("fctid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp64,
+              binop(Iop_F64toI64S, rm, mkexpr(frB)) );
+      assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
+      /* FPRF is undefined after fctid.  Leave unchanged. */
+      set_FPRF = False;
+      break;
+
+   case 0x32F: // fctidz (Float Conv to Int DWord, Round to Zero, PPC64 p437)
+      DIP("fctidz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp64, 
+              binop(Iop_F64toI64S, mkU32(Irrm_ZERO), mkexpr(frB)) );
+      assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
+      /* FPRF is undefined after fctidz.  Leave unchanged. */
+      set_FPRF = False;
+      break;
+
+   case 0x3AE: case 0x3AF: // fctidu[z] (Float Conv to Int DWord Unsigned [Round to Zero])
+   {
+      DIP("fctidu%s%s fr%u,fr%u\n", opc2 == 0x3AE ? "" : "z",
+               flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp64,
+              binop(Iop_F64toI64U, opc2 == 0x3AE ? rm : mkU32(Irrm_ZERO), mkexpr(frB)) );
+      assign( frD, unop( Iop_ReinterpI64asF64, mkexpr(r_tmp64)) );
+      /* FPRF is undefined after fctidz.  Leave unchanged. */
+      set_FPRF = False;
+      break;
+   }
+   case 0x34E: // fcfid (Float Conv from Int DWord, PPC64 p434)
+      DIP("fcfid%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
+      assign( frD, 
+              binop(Iop_I64StoF64, rm, mkexpr(r_tmp64)) );
+      break;
+
+   case 0x3CE: // fcfidu (Float convert from unsigned DWord)
+      DIP("fcfidu%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( r_tmp64, unop( Iop_ReinterpF64asI64, mkexpr(frB)) );
+      assign( frD, binop( Iop_I64UtoF64, rm, mkexpr( r_tmp64 ) ) );
+      break;
+
+   case 0x188: case 0x1A8: case 0x1C8: case 0x1E8: // frin, friz, frip, frim
+      switch(opc2) {
+      case 0x188: // frin (Floating Round to Integer Nearest)
+         DIP("frin%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+         assign( r_tmp64,
+                 binop(Iop_F64toI64S, mkU32(Irrm_NEAREST), mkexpr(frB)) );
+         break;
+      case 0x1A8: // friz (Floating Round to Integer Toward Zero)
+         DIP("friz%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+         assign( r_tmp64,
+                 binop(Iop_F64toI64S, mkU32(Irrm_ZERO), mkexpr(frB)) );
+         break;
+      case 0x1C8: // frip (Floating Round to Integer Plus)
+         DIP("frip%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+         assign( r_tmp64,
+                 binop(Iop_F64toI64S, mkU32(Irrm_PosINF), mkexpr(frB)) );
+         break;
+      case 0x1E8: // frim (Floating Round to Integer Minus)
+         DIP("frim%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+         assign( r_tmp64,
+                 binop(Iop_F64toI64S, mkU32(Irrm_NegINF), mkexpr(frB)) );
+         break;
+      }
+
+      /* don't use the rounded integer if frB is outside -9e18..9e18 */
+      /* F64 has only log10(2**52) significant digits anyway */
+      /* need to preserve sign of zero */
+      /*   frD = (fabs(frB) > 9e18) ? frB :
+               (sign(frB)) ? -fabs((double)r_tmp64) : (double)r_tmp64  */
+      assign(frD, IRExpr_ITE(
+                     binop(Iop_CmpNE8,
+                           unop(Iop_32to8,
+                                binop(Iop_CmpF64,
+                                      IRExpr_Const(IRConst_F64(9e18)),
+                                      unop(Iop_AbsF64, mkexpr(frB)))),
+                           mkU8(0)),
+                     mkexpr(frB),
+                     IRExpr_ITE(
+                        binop(Iop_CmpNE32,
+                              binop(Iop_Shr32,
+                                    unop(Iop_64HIto32,
+                                         unop(Iop_ReinterpF64asI64,
+                                              mkexpr(frB))),
+                                    mkU8(31)),
+                              mkU32(0)),
+                        unop(Iop_NegF64,
+                             unop( Iop_AbsF64,
+                                   binop(Iop_I64StoF64, mkU32(0),
+                                         mkexpr(r_tmp64)) )),
+                        binop(Iop_I64StoF64, mkU32(0), mkexpr(r_tmp64) )
+                     )
+      ));
+      break;
+
+   default:
+      vex_printf("dis_fp_round(ppc)(opc2)\n");
+      return False;
+   }
+putFR:
+   putFReg( frD_addr, mkexpr(frD) );
+
+   if (set_FPRF) {
+      // XXX XXX XXX FIXME
+      // set FPRF from frD
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8(0) );
+      putCR0( 1, mkU8(0) );
+   }
+
+   return True;
+}
+
+/*
+  Floating Point Pair Instructions
+*/
+static Bool dis_fp_pair ( UInt theInstr )
+{
+   /* X-Form/DS-Form */
+   UChar  opc1         = ifieldOPC(theInstr);
+   UChar  frT_hi_addr  = ifieldRegDS(theInstr);
+   UChar  frT_lo_addr  = frT_hi_addr + 1;
+   UChar  rA_addr      = ifieldRegA(theInstr);
+   UChar  rB_addr      = ifieldRegB(theInstr);
+   UInt  uimm16        = ifieldUIMM16(theInstr);
+   Int    simm16       = extend_s_16to32(uimm16);
+   UInt   opc2         = ifieldOPClo10(theInstr);
+   IRType ty           = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA_hi        = newTemp(ty);
+   IRTemp EA_lo        = newTemp(ty);
+   IRTemp frT_hi       = newTemp(Ity_F64);
+   IRTemp frT_lo       = newTemp(Ity_F64);
+   UChar b0            = ifieldBIT0(theInstr);
+   Bool is_load        = 0;
+
+   if ((frT_hi_addr %2) != 0) {
+      vex_printf("dis_fp_pair(ppc) : odd frT register\n");
+      return False;
+   }
+
+   switch (opc1) {
+   case 0x1F: // register offset
+      switch(opc2) {
+      case 0x317:     // lfdpx (FP Load Double Pair X-form, ISA 2.05  p125)
+         DIP("ldpx fr%u,r%u,r%u\n", frT_hi_addr, rA_addr, rB_addr);
+         is_load = 1;
+         break;
+      case 0x397:     // stfdpx (FP STORE Double Pair X-form, ISA 2.05  p125)
+         DIP("stdpx fr%u,r%u,r%u\n", frT_hi_addr, rA_addr, rB_addr);
+         break;
+      default:
+         vex_printf("dis_fp_pair(ppc) : X-form wrong opc2\n");
+         return False;
+      }
+
+      if (b0 != 0) {
+         vex_printf("dis_fp_pair(ppc)(0x1F,b0)\n");
+         return False;
+      }
+      assign( EA_hi, ea_rAor0_idxd( rA_addr, rB_addr ) );
+      break;
+   case 0x39: // lfdp (FP Load Double Pair DS-form, ISA 2.05  p125)
+      DIP("lfdp fr%u,%d(r%u)\n", frT_hi_addr, simm16, rA_addr);
+      assign( EA_hi, ea_rAor0_simm( rA_addr, simm16  ) );
+      is_load = 1;
+      break;
+   case 0x3d: // stfdp (FP Store Double Pair DS-form, ISA 2.05  p125)
+      DIP("stfdp fr%u,%d(r%u)\n", frT_hi_addr, simm16, rA_addr);
+      assign( EA_hi, ea_rAor0_simm( rA_addr, simm16  ) );
+      break;
+   default:   // immediate offset
+      vex_printf("dis_fp_pair(ppc)(instr)\n");
+      return False;
+   }
+
+   if (mode64)
+      assign( EA_lo, binop(Iop_Add64, mkexpr(EA_hi), mkU64(8)) );
+   else
+      assign( EA_lo, binop(Iop_Add32, mkexpr(EA_hi), mkU32(8)) );
+
+   assign( frT_hi, getFReg(frT_hi_addr) );
+   assign( frT_lo, getFReg(frT_lo_addr) );
+
+   if (is_load) {
+      putFReg( frT_hi_addr, load(Ity_F64, mkexpr(EA_hi)) );
+      putFReg( frT_lo_addr, load(Ity_F64, mkexpr(EA_lo)) );
+   } else {
+      store( mkexpr(EA_hi), mkexpr(frT_hi) );
+      store( mkexpr(EA_lo), mkexpr(frT_lo) );
+   }
+
+   return True;
+}
+
+
+/*
+  Floating Point Merge Instructions
+*/
+static Bool dis_fp_merge ( UInt theInstr )
+{
+   /* X-Form */
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar frD_addr = ifieldRegDS(theInstr);
+   UChar frA_addr = ifieldRegA(theInstr);
+   UChar frB_addr = ifieldRegB(theInstr);
+
+   IRTemp frD = newTemp(Ity_F64);
+   IRTemp frA = newTemp(Ity_F64);
+   IRTemp frB = newTemp(Ity_F64);
+
+   assign( frA, getFReg(frA_addr));
+   assign( frB, getFReg(frB_addr));
+
+   switch (opc2) {
+   case 0x3c6: // fmrgew floating merge even word
+      DIP("fmrgew fr%u,fr%u,fr%u\n", frD_addr, frA_addr, frB_addr);
+
+      assign( frD, unop( Iop_ReinterpI64asF64,
+                         binop( Iop_32HLto64,
+                                unop( Iop_64HIto32,
+                                      unop( Iop_ReinterpF64asI64,
+                                            mkexpr(frA) ) ),
+                                unop( Iop_64HIto32,
+                                      unop( Iop_ReinterpF64asI64,
+                                            mkexpr(frB) ) ) ) ) );
+   break;
+
+   case 0x346: // fmrgow floating merge odd word
+      DIP("fmrgow fr%u,fr%u,fr%u\n", frD_addr, frA_addr, frB_addr);
+
+      assign( frD, unop( Iop_ReinterpI64asF64,
+                         binop( Iop_32HLto64,
+                                unop( Iop_64to32,
+                                      unop( Iop_ReinterpF64asI64,
+                                            mkexpr(frA) ) ),
+                                unop( Iop_64to32,
+                                      unop( Iop_ReinterpF64asI64,
+                                            mkexpr(frB) ) ) ) ) );
+   break;
+
+   default:
+      vex_printf("dis_fp_merge(ppc)(opc2)\n");
+      return False;
+   }
+
+   putFReg( frD_addr, mkexpr(frD) );
+   return True;
+}
+
+/*
+  Floating Point Move Instructions
+*/
+static Bool dis_fp_move ( UInt theInstr )
+{
+   /* X-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar frD_addr = ifieldRegDS(theInstr);
+   UChar frA_addr = ifieldRegA(theInstr);
+   UChar frB_addr = ifieldRegB(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar flag_rC  = ifieldBIT0(theInstr);
+
+   IRTemp frD = newTemp(Ity_F64);
+   IRTemp frB = newTemp(Ity_F64);
+   IRTemp itmpB = newTemp(Ity_F64);
+   IRTemp frA;
+   IRTemp signA;
+   IRTemp hiD;
+
+   if (opc1 != 0x3F || (frA_addr != 0 && opc2 != 0x008)) {
+      vex_printf("dis_fp_move(ppc)(instr)\n");
+      return False;
+   }
+
+   assign( frB, getFReg(frB_addr));
+
+   switch (opc2) {
+   case 0x008: // fcpsgn (Floating Copy Sign, ISA_V2.05 p126)
+      DIP("fcpsgn%s fr%u,fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frA_addr,
+          frB_addr);
+      signA = newTemp(Ity_I32);
+      hiD = newTemp(Ity_I32);
+      itmpB = newTemp(Ity_I64);
+      frA = newTemp(Ity_F64);
+      assign( frA, getFReg(frA_addr) );
+
+      /* get A's sign bit */
+      assign(signA, binop(Iop_And32,
+                          unop(Iop_64HIto32, unop(Iop_ReinterpF64asI64,
+                                                  mkexpr(frA))),
+                          mkU32(0x80000000)) );
+
+      assign( itmpB, unop(Iop_ReinterpF64asI64, mkexpr(frB)) );
+
+      /* mask off B's sign bit and or in A's sign bit */
+      assign(hiD, binop(Iop_Or32,
+                        binop(Iop_And32,
+                              unop(Iop_64HIto32,
+                                   mkexpr(itmpB)),  /* frB's high 32 bits */
+                              mkU32(0x7fffffff)),
+                        mkexpr(signA)) );
+
+      /* combine hiD/loB into frD */
+      assign( frD, unop(Iop_ReinterpI64asF64,
+                        binop(Iop_32HLto64,
+                              mkexpr(hiD),
+                              unop(Iop_64to32,
+                                   mkexpr(itmpB)))) );   /* frB's low 32 bits */
+      break;
+
+   case 0x028: // fneg (Floating Negate, PPC32 p416)
+      DIP("fneg%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( frD, unop( Iop_NegF64, mkexpr(frB) ));
+      break;
+      
+   case 0x048: // fmr (Floating Move Register, PPC32 p410)
+      DIP("fmr%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( frD, mkexpr(frB) );
+      break;
+      
+   case 0x088: // fnabs (Floating Negative Absolute Value, PPC32 p415)
+      DIP("fnabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( frD, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr(frB) )));
+      break;
+      
+   case 0x108: // fabs (Floating Absolute Value, PPC32 p399)
+      DIP("fabs%s fr%u,fr%u\n", flag_rC ? ".":"", frD_addr, frB_addr);
+      assign( frD, unop( Iop_AbsF64, mkexpr(frB) ));
+      break;
+      
+   default:
+      vex_printf("dis_fp_move(ppc)(opc2)\n");
+      return False;
+   }
+
+   putFReg( frD_addr, mkexpr(frD) );
+
+   /* None of these change FPRF.  cr1 is set in the usual way though,
+      if flag_rC is set. */
+
+   if (flag_rC) {
+      putCR321( 1, mkU8(0) );
+      putCR0( 1, mkU8(0) );
+   }
+
+   return True;
+}
+
+
+
+/*
+  Floating Point Status/Control Register Instructions
+*/
+static Bool dis_fp_scr ( UInt theInstr, Bool GX_level )
+{
+   /* Many forms - see each switch case */
+   UChar opc1    = ifieldOPC(theInstr);
+   UInt  opc2    = ifieldOPClo10(theInstr);
+   UChar flag_rC = ifieldBIT0(theInstr);
+
+   if (opc1 != 0x3F) {
+      vex_printf("dis_fp_scr(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x026: { // mtfsb1 (Move to FPSCR Bit 1, PPC32 p479)
+      // Bit crbD of the FPSCR is set.
+      UChar crbD    = ifieldRegDS(theInstr);
+      UInt  b11to20 = IFIELD(theInstr, 11, 10);
+
+      if (b11to20 != 0) {
+         vex_printf("dis_fp_scr(ppc)(instr,mtfsb1)\n");
+         return False;
+      }
+      DIP("mtfsb1%s crb%d \n", flag_rC ? ".":"", crbD);
+      putGST_masked( PPC_GST_FPSCR, mkU64( 1 <<( 31 - crbD ) ),
+		     1ULL << ( 31 - crbD ) );
+      break;
+   }
+
+   case 0x040: { // mcrfs (Move to Condition Register from FPSCR, PPC32 p465)
+      UChar   crfD    = toUChar( IFIELD( theInstr, 23, 3 ) );
+      UChar   b21to22 = toUChar( IFIELD( theInstr, 21, 2 ) );
+      UChar   crfS    = toUChar( IFIELD( theInstr, 18, 3 ) );
+      UChar   b11to17 = toUChar( IFIELD( theInstr, 11, 7 ) );
+      IRTemp  tmp     = newTemp(Ity_I32);
+      IRExpr* fpscr_all;
+      if (b21to22 != 0 || b11to17 != 0 || flag_rC != 0) {
+         vex_printf("dis_fp_scr(ppc)(instr,mcrfs)\n");
+         return False;
+      }
+      DIP("mcrfs crf%d,crf%d\n", crfD, crfS);
+      vassert(crfD < 8);
+      vassert(crfS < 8);
+      fpscr_all = getGST_masked( PPC_GST_FPSCR, MASK_FPSCR_RN );
+      assign( tmp, binop(Iop_And32,
+                         binop(Iop_Shr32,fpscr_all,mkU8(4 * (7-crfS))),
+                        mkU32(0xF)) );
+      putGST_field( PPC_GST_CR, mkexpr(tmp), crfD );
+      break;
+   }
+
+   case 0x046: { // mtfsb0 (Move to FPSCR Bit 0, PPC32 p478)
+      // Bit crbD of the FPSCR is cleared.
+      UChar crbD    = ifieldRegDS(theInstr);
+      UInt  b11to20 = IFIELD(theInstr, 11, 10);
+
+      if (b11to20 != 0) {
+         vex_printf("dis_fp_scr(ppc)(instr,mtfsb0)\n");
+         return False;
+      }      
+      DIP("mtfsb0%s crb%d\n", flag_rC ? ".":"", crbD);
+      putGST_masked( PPC_GST_FPSCR, mkU64( 0 ), 1ULL << ( 31 - crbD ) );
+      break;
+   }
+
+   case 0x086: { // mtfsfi (Move to FPSCR Field Immediate, PPC32 p481)
+      UInt crfD     = IFIELD( theInstr, 23, 3 );
+      UChar b16to22 = toUChar( IFIELD( theInstr, 16, 7 ) );
+      UChar IMM     = toUChar( IFIELD( theInstr, 12, 4 ) );
+      UChar b11     = toUChar( IFIELD( theInstr, 11, 1 ) );
+      UChar Wbit;
+
+      if (b16to22 != 0 || b11 != 0) {
+         vex_printf("dis_fp_scr(ppc)(instr,mtfsfi)\n");
+         return False;
+      }      
+      DIP("mtfsfi%s crf%d,%d\n", flag_rC ? ".":"", crfD, IMM);
+      if (GX_level) {
+         /* This implies that Decimal Floating Point is supported, and the
+          * FPSCR must be managed as a 64-bit register.
+          */
+         Wbit = toUChar( IFIELD(theInstr, 16, 1) );
+      } else {
+         Wbit = 0;
+      }
+      crfD = crfD + (8 * (1 - Wbit) );
+      putGST_field( PPC_GST_FPSCR, mkU32( IMM ), crfD );
+      break;
+   }
+
+   case 0x247: { // mffs (Move from FPSCR, PPC32 p468)
+      UChar   frD_addr  = ifieldRegDS(theInstr);
+      UInt    b11to20   = IFIELD(theInstr, 11, 10);
+      IRExpr* fpscr_lower = getGST_masked( PPC_GST_FPSCR, MASK_FPSCR_RN );
+      IRExpr* fpscr_upper = getGST_masked_upper( PPC_GST_FPSCR,
+                                                 MASK_FPSCR_DRN );
+
+      if (b11to20 != 0) {
+         vex_printf("dis_fp_scr(ppc)(instr,mffs)\n");
+         return False;
+      }
+      DIP("mffs%s fr%u\n", flag_rC ? ".":"", frD_addr);
+      putFReg( frD_addr,
+          unop( Iop_ReinterpI64asF64,
+                binop( Iop_32HLto64, fpscr_upper, fpscr_lower ) ) );
+      break;
+   }
+
+   case 0x2C7: { // mtfsf (Move to FPSCR Fields, PPC32 p480)
+      UChar b25      = toUChar( IFIELD(theInstr, 25, 1) );
+      UChar FM       = toUChar( IFIELD(theInstr, 17, 8) );
+      UChar frB_addr = ifieldRegB(theInstr);
+      IRTemp frB   = newTemp(Ity_F64);
+      IRTemp rB_64 = newTemp( Ity_I64 );
+      Int i;
+      ULong mask;
+      UChar Wbit;
+#define BFP_MASK_SEED 0x3000000000000000ULL
+#define DFP_MASK_SEED 0x7000000000000000ULL
+
+      if (GX_level) {
+         /* This implies that Decimal Floating Point is supported, and the
+          * FPSCR must be managed as a 64-bit register.
+          */
+         Wbit = toUChar( IFIELD(theInstr, 16, 1) );
+      } else {
+         Wbit = 0;
+      }
+
+      if (b25 == 1) {
+         /* new 64 bit move variant for power 6.  If L field (bit 25) is
+          * a one do a full 64 bit move.  Note, the FPSCR is not really
+          * properly modeled.  This instruciton only changes the value of
+          * the rounding mode.  The HW exception bits do not get set in
+          * the simulator.  1/12/09
+          */
+         DIP("mtfsf%s %d,fr%u (L=1)\n", flag_rC ? ".":"", FM, frB_addr);
+         mask = 0xFF;
+
+      } else {
+         DIP("mtfsf%s %d,fr%u\n", flag_rC ? ".":"", FM, frB_addr);
+         // Build 32bit mask from FM:
+         mask = 0;
+         for (i=0; i<8; i++) {
+            if ((FM & (1<<(7-i))) == 1) {
+               /* FPSCR field k is set to the contents of the corresponding
+                * field of register FRB, where k = i+8x(1-W).  In the Power
+                * ISA, register field numbering is from left to right, so field
+                * 15 is the least significant field in a 64-bit register.  To
+                * generate the mask, we set all the appropriate rounding mode
+                * bits in the highest order nibble (field 0) and shift right 
+                * 'k x nibble length'.
+                */
+               if (Wbit)
+                  mask |= DFP_MASK_SEED >> ( 4 * ( i + 8 * ( 1 - Wbit ) ) );
+               else
+                  mask |= BFP_MASK_SEED >> ( 4 * ( i + 8 * ( 1 - Wbit ) ) );
+            }
+         }
+      }
+      assign( frB, getFReg(frB_addr));
+      assign( rB_64, unop( Iop_ReinterpF64asI64, mkexpr( frB ) ) );
+      putGST_masked( PPC_GST_FPSCR, mkexpr( rB_64 ), mask );
+      break;
+   }
+
+   default:
+      vex_printf("dis_fp_scr(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*------------------------------------------------------------*/
+/*--- Decimal Floating Point (DFP)  Helper functions       ---*/
+/*------------------------------------------------------------*/
+#define DFP_LONG  1
+#define DFP_EXTND 2
+#define DFP_LONG_BIAS   398
+#define DFP_LONG_ENCODED_FIELD_MASK  0x1F00
+#define DFP_EXTND_BIAS  6176
+#define DFP_EXTND_ENCODED_FIELD_MASK 0x1F000
+#define DFP_LONG_EXP_MSK   0XFF
+#define DFP_EXTND_EXP_MSK  0XFFF
+
+#define DFP_G_FIELD_LONG_MASK     0x7FFC0000  // upper 32-bits only
+#define DFP_LONG_GFIELD_RT_SHIFT  (63 - 13 - 32) // adj for upper 32-bits 
+#define DFP_G_FIELD_EXTND_MASK    0x7FFFC000  // upper 32-bits only
+#define DFP_EXTND_GFIELD_RT_SHIFT (63 - 17 - 32) //adj for upper 32 bits
+#define DFP_T_FIELD_LONG_MASK     0x3FFFF  // mask for upper 32-bits
+#define DFP_T_FIELD_EXTND_MASK    0x03FFFF // mask for upper 32-bits
+#define DFP_LONG_EXP_MAX          369      // biased max
+#define DFP_LONG_EXP_MIN          0        // biased min
+#define DFP_EXTND_EXP_MAX         6111     // biased max
+#define DFP_EXTND_EXP_MIN         0        // biased min
+#define DFP_LONG_MAX_SIG_DIGITS   16
+#define DFP_EXTND_MAX_SIG_DIGITS  34
+#define MAX_DIGITS_IN_STRING      8
+
+
+#define  AND(x, y) binop( Iop_And32, x, y )
+#define AND4(w, x, y, z) AND( AND( w, x ), AND( y, z ) )
+#define   OR(x, y) binop( Iop_Or32,  x, y )
+#define  OR3(x, y, z)    OR( x, OR( y, z ) )
+#define  OR4(w, x, y, z) OR( OR( w, x ), OR( y, z ) )
+#define  NOT(x) unop( Iop_1Uto32, unop( Iop_Not1, unop( Iop_32to1,  mkexpr( x ) ) ) )
+
+#define  SHL(value, by) binop( Iop_Shl32, value, mkU8( by ) )
+#define  SHR(value, by) binop( Iop_Shr32, value, mkU8( by ) )
+
+#define BITS5(_b4,_b3,_b2,_b1,_b0) \
+   (((_b4) << 4) | ((_b3) << 3) | ((_b2) << 2) | \
+    ((_b1) << 1) | ((_b0) << 0))
+
+static IRExpr * Gfield_encoding( IRExpr * lmexp, IRExpr * lmd32 )
+{
+   IRTemp lmd_07_mask   = newTemp( Ity_I32 );
+   IRTemp lmd_8_mask    = newTemp( Ity_I32 );
+   IRTemp lmd_9_mask    = newTemp( Ity_I32 );
+   IRTemp lmexp_00_mask = newTemp( Ity_I32 );
+   IRTemp lmexp_01_mask = newTemp( Ity_I32 );
+   IRTemp lmexp_10_mask = newTemp( Ity_I32 );
+   IRTemp lmd_07_val    = newTemp( Ity_I32 );
+   IRTemp lmd_8_val     = newTemp( Ity_I32 );
+   IRTemp lmd_9_val     = newTemp( Ity_I32 );
+
+   /* The encodig is as follows:
+    * lmd - left most digit
+    * lme - left most 2-bits of the exponent
+    *
+    *    lmd
+    *   0 - 7    (lmexp << 3) | lmd
+    *     8      0b11000 (24 decimal) if lme=0b00;
+    *            0b11010 (26 decimal) if lme=0b01;
+    *            0b11100 (28 decimal) if lme=0b10;
+    *     9      0b11001 (25 decimal) if lme=0b00;
+    *            0b11011 (27 decimal) if lme=0b01;
+    *            0b11101 (29 decimal) if lme=0b10;
+    */
+
+   /* Generate the masks for each condition */
+   assign( lmd_07_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpLE32U, lmd32, mkU32( 7 ) ) ) );
+   assign( lmd_8_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmd32, mkU32( 8 ) ) ) );
+   assign( lmd_9_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmd32, mkU32( 9 ) ) ) );
+   assign( lmexp_00_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmexp, mkU32( 0 ) ) ) );
+   assign( lmexp_01_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmexp, mkU32( 1 ) ) ) );
+   assign( lmexp_10_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32, lmexp, mkU32( 2 ) ) ) );
+
+   /* Generate the values for each LMD condition, assuming the condition
+    * is TRUE.
+    */
+   assign( lmd_07_val,
+           binop( Iop_Or32, binop( Iop_Shl32, lmexp, mkU8( 3 ) ), lmd32 ) );
+   assign( lmd_8_val,
+           binop( Iop_Or32,
+                  binop( Iop_Or32,
+                         binop( Iop_And32,
+                                mkexpr( lmexp_00_mask ),
+                                mkU32( 24 ) ),
+                         binop( Iop_And32,
+                                mkexpr( lmexp_01_mask ),
+                                mkU32( 26 ) ) ),
+                  binop( Iop_And32, mkexpr( lmexp_10_mask ), mkU32( 28 ) ) ) );
+   assign( lmd_9_val,
+           binop( Iop_Or32,
+                  binop( Iop_Or32,
+                         binop( Iop_And32,
+                                mkexpr( lmexp_00_mask ),
+                                mkU32( 25 ) ),
+                         binop( Iop_And32,
+                                mkexpr( lmexp_01_mask ),
+                                mkU32( 27 ) ) ),
+                  binop( Iop_And32, mkexpr( lmexp_10_mask ), mkU32( 29 ) ) ) );
+
+   /* generate the result from the possible LMD values */
+   return binop( Iop_Or32,
+                 binop( Iop_Or32,
+                        binop( Iop_And32,
+                               mkexpr( lmd_07_mask ),
+                               mkexpr( lmd_07_val ) ),
+                        binop( Iop_And32,
+                               mkexpr( lmd_8_mask ),
+                               mkexpr( lmd_8_val ) ) ),
+                 binop( Iop_And32, mkexpr( lmd_9_mask ), mkexpr( lmd_9_val ) ) );
+}
+
+static void Get_lmd( IRTemp * lmd, IRExpr * gfield_0_4 )
+{
+   /* Extract the exponent and the left most digit of the mantissa
+    * from the G field bits [0:4].
+    */
+   IRTemp lmd_07_mask   = newTemp( Ity_I32 );
+   IRTemp lmd_8_00_mask = newTemp( Ity_I32 );
+   IRTemp lmd_8_01_mask = newTemp( Ity_I32 );
+   IRTemp lmd_8_10_mask = newTemp( Ity_I32 );
+   IRTemp lmd_9_00_mask = newTemp( Ity_I32 );
+   IRTemp lmd_9_01_mask = newTemp( Ity_I32 );
+   IRTemp lmd_9_10_mask = newTemp( Ity_I32 );
+
+   IRTemp lmd_07_val = newTemp( Ity_I32 );
+   IRTemp lmd_8_val  = newTemp( Ity_I32 );
+   IRTemp lmd_9_val  = newTemp( Ity_I32 );
+
+   /* The left most digit (LMD) encoding is as follows:
+    *    lmd
+    *   0 - 7    (lmexp << 3) | lmd
+    *     8      0b11000 (24 decimal) if lme=0b00;
+    *            0b11010 (26 decimal) if lme=0b01;
+    *            0b11100 (28 decimal) if lme=0b10
+    *     9      0b11001 (25 decimal) if lme=0b00;
+    *            0b11011 (27 decimal) if lme=0b01;
+    *            0b11101 (29 decimal) if lme=0b10;
+    */
+
+   /* Generate the masks for each condition of LMD and exponent bits */
+   assign( lmd_07_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpLE32U,
+                                    gfield_0_4,
+                                    mkU32( BITS5(1,0,1,1,1) ) ) ) );
+   assign( lmd_8_00_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32,
+                                    gfield_0_4,
+                                    mkU32( BITS5(1,1,0,0,0) ) ) ) );
+   assign( lmd_8_01_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32,
+                                    gfield_0_4,
+                                    mkU32( BITS5(1,1,0,1,0) ) ) ) );
+   assign( lmd_8_10_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32,
+                                    gfield_0_4,
+                                    mkU32( BITS5(1,1,1,0,0) ) ) ) );
+   assign( lmd_9_00_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32,
+                                    gfield_0_4,
+                                    mkU32( BITS5(1,1,0,0,1) ) ) ) );
+   assign( lmd_9_01_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32,
+                                    gfield_0_4,
+                                    mkU32( BITS5(1,1,0,1,1) ) ) ) );
+   assign( lmd_9_10_mask,
+           unop( Iop_1Sto32, binop( Iop_CmpEQ32,
+                                    gfield_0_4,
+                                    mkU32( BITS5(1,1,1,0,1) ) ) ) );
+
+   /* Generate the values for each LMD condition, assuming the condition
+    * is TRUE.
+    */
+   assign( lmd_07_val, binop( Iop_And32, gfield_0_4, mkU32( 0x7 ) ) );
+   assign( lmd_8_val, mkU32( 0x8 ) );
+   assign( lmd_9_val, mkU32( 0x9 ) );
+
+   assign( *lmd,
+           OR( OR3 ( AND( mkexpr( lmd_07_mask ), mkexpr( lmd_07_val ) ),
+                     AND( mkexpr( lmd_8_00_mask ), mkexpr( lmd_8_val ) ),
+                     AND( mkexpr( lmd_8_01_mask ), mkexpr( lmd_8_val ) )),
+                     OR4( AND( mkexpr( lmd_8_10_mask ), mkexpr( lmd_8_val ) ),
+                          AND( mkexpr( lmd_9_00_mask ), mkexpr( lmd_9_val ) ),
+                          AND( mkexpr( lmd_9_01_mask ), mkexpr( lmd_9_val ) ),
+                          AND( mkexpr( lmd_9_10_mask ), mkexpr( lmd_9_val ) )
+                     ) ) );
+}
+
+#define DIGIT1_SHR 4    // shift digit 1 to bottom 4 bits
+#define DIGIT2_SHR 8    // shift digit 2 to bottom 4 bits
+#define DIGIT3_SHR 12
+#define DIGIT4_SHR 16
+#define DIGIT5_SHR 20
+#define DIGIT6_SHR 24
+#define DIGIT7_SHR 28
+
+static IRExpr * bcd_digit_inval( IRExpr * bcd_u, IRExpr * bcd_l )
+{
+   /* 60-bit BCD string stored in two 32-bit values.  Check that each,
+    * digit is a valid BCD number, i.e. less then 9.
+    */
+   IRTemp valid = newTemp( Ity_I32 );
+
+   assign( valid,
+           AND4( AND4 ( unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            bcd_l,
+                                            mkU32 ( 0xF ) ),
+                                      mkU32( 0x9 ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            binop( Iop_Shr32,
+                                                   bcd_l,
+                                                   mkU8 ( DIGIT1_SHR ) ),
+                                             mkU32 ( 0xF ) ),
+                                      mkU32( 0x9 ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            binop( Iop_Shr32,
+                                                   bcd_l,
+                                                   mkU8 ( DIGIT2_SHR ) ),
+                                            mkU32 ( 0xF ) ),
+                                      mkU32( 0x9 ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            binop( Iop_Shr32,
+                                                   bcd_l,
+                                                   mkU8 ( DIGIT3_SHR ) ),
+                                             mkU32 ( 0xF ) ),
+                                      mkU32( 0x9 ) ) ) ),
+                 AND4 ( unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            binop( Iop_Shr32,
+                                                   bcd_l,
+                                                   mkU8 ( DIGIT4_SHR ) ),
+                                            mkU32 ( 0xF ) ),
+                                     mkU32( 0x9 ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            binop( Iop_Shr32,
+                                                   bcd_l,
+                                                   mkU8 ( DIGIT5_SHR ) ),
+                                            mkU32 ( 0xF ) ),
+                                     mkU32( 0x9 ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            binop( Iop_Shr32,
+                                                   bcd_l,
+                                                   mkU8 ( DIGIT6_SHR ) ),
+                                            mkU32 ( 0xF ) ),
+                                     mkU32( 0x9 ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpLE32U,
+                                     binop( Iop_And32,
+                                            binop( Iop_Shr32,
+                                                   bcd_l,
+                                                   mkU8 ( DIGIT7_SHR ) ),
+                                            mkU32 ( 0xF ) ),
+                                     mkU32( 0x9 ) ) ) ),
+                 AND4( unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           bcd_u,
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ),
+                       unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           binop( Iop_Shr32,
+                                                  bcd_u,
+                                                  mkU8 ( DIGIT1_SHR ) ),
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ),
+                       unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           binop( Iop_Shr32,
+                                                  bcd_u,
+                                                  mkU8 ( DIGIT2_SHR ) ),
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ),
+                       unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           binop( Iop_Shr32,
+                                                  bcd_u,
+                                                  mkU8 ( DIGIT3_SHR ) ),
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ) ),
+                 AND4( unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           binop( Iop_Shr32,
+                                                  bcd_u,
+                                                  mkU8 ( DIGIT4_SHR ) ),
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ),
+                       unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           binop( Iop_Shr32,
+                                                  bcd_u,
+                                                  mkU8 ( DIGIT5_SHR ) ),
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ),
+                       unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           binop( Iop_Shr32,
+                                                  bcd_u,
+                                                  mkU8 ( DIGIT6_SHR ) ),
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ),
+                       unop( Iop_1Sto32,
+                             binop( Iop_CmpLE32U,
+                                    binop( Iop_And32,
+                                           binop( Iop_Shr32,
+                                                  bcd_u,
+                                                  mkU8 ( DIGIT7_SHR ) ),
+                                           mkU32 ( 0xF ) ),
+                                    mkU32( 0x9 ) ) ) ) ) );
+           
+   return unop( Iop_Not32, mkexpr( valid ) );
+}
+#undef DIGIT1_SHR
+#undef DIGIT2_SHR
+#undef DIGIT3_SHR
+#undef DIGIT4_SHR
+#undef DIGIT5_SHR
+#undef DIGIT6_SHR
+#undef DIGIT7_SHR
+
+static IRExpr * Generate_neg_sign_mask( IRExpr * sign )
+{
+   return binop( Iop_Or32,
+                 unop( Iop_1Sto32, binop( Iop_CmpEQ32, sign, mkU32( 0xB ) ) ),
+                 unop( Iop_1Sto32, binop( Iop_CmpEQ32, sign, mkU32( 0xD ) ) )
+               );
+}
+
+static IRExpr * Generate_pos_sign_mask( IRExpr * sign )
+{
+   return binop( Iop_Or32,
+                 binop( Iop_Or32,
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpEQ32, sign, mkU32( 0xA ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpEQ32, sign, mkU32( 0xC ) ) ) ),
+                 binop( Iop_Or32,
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpEQ32, sign, mkU32( 0xE ) ) ),
+                        unop( Iop_1Sto32,
+                              binop( Iop_CmpEQ32, sign, mkU32( 0xF ) ) ) ) );
+}
+
+static IRExpr * Generate_sign_bit( IRExpr * pos_sign_mask,
+                                   IRExpr * neg_sign_mask )
+{
+   return binop( Iop_Or32,
+                 binop( Iop_And32, neg_sign_mask, mkU32( 0x80000000 ) ),
+                 binop( Iop_And32, pos_sign_mask, mkU32( 0x00000000 ) ) );
+}
+
+static IRExpr * Generate_inv_mask( IRExpr * invalid_bcd_mask,
+                                   IRExpr * pos_sign_mask,
+                                   IRExpr * neg_sign_mask )
+/* first argument is all 1's if the BCD string had an invalid digit in it. */
+{
+   return binop( Iop_Or32,
+                 invalid_bcd_mask,
+                 unop( Iop_1Sto32,
+                       binop( Iop_CmpEQ32,
+                              binop( Iop_Or32, pos_sign_mask, neg_sign_mask ),
+                              mkU32( 0x0 ) ) ) );
+}
+
+static void Generate_132_bit_bcd_string( IRExpr * frBI64_hi, IRExpr * frBI64_lo,
+                                         IRTemp * top_12_l, IRTemp * mid_60_u,
+                                         IRTemp * mid_60_l, IRTemp * low_60_u,
+                                         IRTemp * low_60_l)
+{
+   IRTemp tmplow60 = newTemp( Ity_I64 );
+   IRTemp tmpmid60 = newTemp( Ity_I64 );
+   IRTemp tmptop12 = newTemp( Ity_I64 );
+   IRTemp low_50   = newTemp( Ity_I64 );
+   IRTemp mid_50   = newTemp( Ity_I64 );
+   IRTemp top_10   = newTemp( Ity_I64 );
+   IRTemp top_12_u = newTemp( Ity_I32 ); // only needed for a dummy arg
+
+   /* Convert the 110-bit densely packed BCD string to a 128-bit BCD string */
+
+   /* low_50[49:0] = ((frBI64_lo[49:32]  << 14) | frBI64_lo[31:0]) */
+   assign( low_50,
+           binop( Iop_32HLto64,
+                  binop( Iop_And32,
+                         unop( Iop_64HIto32, frBI64_lo ),
+                         mkU32( 0x3FFFF ) ),
+                         unop( Iop_64to32, frBI64_lo ) ) );
+
+   /* Convert the 50 bit densely packed BCD string to a 60 bit
+    * BCD string.
+    */
+   assign( tmplow60, unop( Iop_DPBtoBCD, mkexpr( low_50 ) ) );
+   assign( *low_60_u, unop( Iop_64HIto32, mkexpr( tmplow60 ) ) );
+   assign( *low_60_l, unop( Iop_64to32, mkexpr( tmplow60 ) ) );
+
+   /* mid_50[49:0] =  ((frBI64_hi[35:32] << 14) | frBI64_hi[31:18]) |
+    *                 ((frBI64_hi[17:0]  << 14) | frBI64_lo[63:50])
+    */
+   assign( mid_50,
+           binop( Iop_32HLto64,
+                  binop( Iop_Or32,
+                         binop( Iop_Shl32,
+                                binop( Iop_And32,
+                                       unop( Iop_64HIto32, frBI64_hi ),
+                                       mkU32( 0xF ) ),
+                                mkU8( 14 ) ),
+                         binop( Iop_Shr32,
+                                unop( Iop_64to32, frBI64_hi ),
+                                mkU8( 18 ) ) ),
+                  binop( Iop_Or32,
+                         binop( Iop_Shl32,
+                                unop( Iop_64to32, frBI64_hi ),
+                                mkU8( 14 ) ),
+                         binop( Iop_Shr32,
+                                unop( Iop_64HIto32, frBI64_lo ),
+                                mkU8( 18 ) ) ) ) );
+
+   /* Convert the 50 bit densely packed BCD string to a 60 bit
+    * BCD string.
+    */
+   assign( tmpmid60, unop( Iop_DPBtoBCD, mkexpr( mid_50 ) ) );
+   assign( *mid_60_u, unop( Iop_64HIto32, mkexpr( tmpmid60 ) ) );
+   assign( *mid_60_l, unop( Iop_64to32, mkexpr( tmpmid60 ) ) );
+
+   /* top_10[49:0] = frBI64_hi[45:36]) |  */
+   assign( top_10,
+           binop( Iop_32HLto64,
+                  mkU32( 0 ),
+                  binop( Iop_And32,
+                         binop( Iop_Shr32,
+                                unop( Iop_64HIto32, frBI64_hi ),
+                                mkU8( 4 ) ),
+                         mkU32( 0x3FF ) ) ) );
+
+   /* Convert the 10 bit densely packed BCD string to a 12 bit
+    * BCD string.
+    */
+   assign( tmptop12, unop( Iop_DPBtoBCD, mkexpr( top_10 ) ) );
+   assign( top_12_u, unop( Iop_64HIto32, mkexpr( tmptop12 ) ) );
+   assign( *top_12_l, unop( Iop_64to32, mkexpr( tmptop12 ) ) );
+}
+
+static void Count_zeros( int start, IRExpr * init_cnt, IRExpr * init_flag,
+                         IRTemp * final_cnt, IRTemp * final_flag,
+                         IRExpr * string )
+{
+   IRTemp cnt[MAX_DIGITS_IN_STRING + 1];IRTemp flag[MAX_DIGITS_IN_STRING+1];
+   int digits = MAX_DIGITS_IN_STRING;
+   int i;
+
+   cnt[start-1] = newTemp( Ity_I8 );
+   flag[start-1] = newTemp( Ity_I8 );
+   assign( cnt[start-1], init_cnt);
+   assign( flag[start-1], init_flag);
+
+   for ( i = start; i <= digits; i++) {
+      cnt[i] = newTemp( Ity_I8 );
+      flag[i] = newTemp( Ity_I8 );
+      assign( cnt[i],
+              binop( Iop_Add8,
+                     mkexpr( cnt[i-1] ),
+                     binop(Iop_And8,
+                           unop( Iop_1Uto8,
+                                 binop(Iop_CmpEQ32,
+                                       binop(Iop_And32,
+                                             string,
+                                             mkU32( 0xF <<
+                                                    ( ( digits - i ) * 4) ) ),
+                                       mkU32( 0 ) ) ),
+                           binop( Iop_Xor8, /* complement flag */
+                                  mkexpr( flag[i - 1] ),
+                                  mkU8( 0xFF ) ) ) ) );
+
+      /* set flag to 1 if digit was not a zero */
+      assign( flag[i],
+              binop(Iop_Or8,
+                    unop( Iop_1Sto8,
+                          binop(Iop_CmpNE32,
+                                binop(Iop_And32,
+                                      string,
+                                      mkU32( 0xF <<
+                                             ( (digits - i) * 4) ) ),
+                                mkU32( 0 ) ) ),
+                    mkexpr( flag[i - 1] ) ) );
+   }
+
+   *final_cnt = cnt[digits];
+   *final_flag = flag[digits];
+}
+
+static IRExpr * Count_leading_zeros_60( IRExpr * lmd, IRExpr * upper_28,
+                                        IRExpr * low_32 )
+{
+   IRTemp num_lmd    = newTemp( Ity_I8 );
+   IRTemp num_upper  = newTemp( Ity_I8 );
+   IRTemp num_low    = newTemp( Ity_I8 );
+   IRTemp lmd_flag   = newTemp( Ity_I8 );
+   IRTemp upper_flag = newTemp( Ity_I8 );
+   IRTemp low_flag   = newTemp( Ity_I8 );
+
+   assign( num_lmd, unop( Iop_1Uto8, binop( Iop_CmpEQ32, lmd, mkU32( 0 ) ) ) );
+   assign( lmd_flag, unop( Iop_Not8, mkexpr( num_lmd ) ) );
+
+   Count_zeros( 2,
+                mkexpr( num_lmd ),
+                mkexpr( lmd_flag ),
+                &num_upper,
+                &upper_flag,
+                upper_28 );
+
+   Count_zeros( 1,
+                mkexpr( num_upper ),
+                mkexpr( upper_flag ),
+                &num_low,
+                &low_flag,
+                low_32 );
+
+   return mkexpr( num_low );
+}
+
+static IRExpr * Count_leading_zeros_128( IRExpr * lmd, IRExpr * top_12_l,
+                                         IRExpr * mid_60_u, IRExpr * mid_60_l,
+                                         IRExpr * low_60_u, IRExpr * low_60_l)
+{
+   IRTemp num_lmd   = newTemp( Ity_I8 );
+   IRTemp num_top   = newTemp( Ity_I8 );
+   IRTemp num_mid_u = newTemp( Ity_I8 );
+   IRTemp num_mid_l = newTemp( Ity_I8 );
+   IRTemp num_low_u = newTemp( Ity_I8 );
+   IRTemp num_low_l = newTemp( Ity_I8 );
+
+   IRTemp lmd_flag   = newTemp( Ity_I8 );
+   IRTemp top_flag   = newTemp( Ity_I8 );
+   IRTemp mid_u_flag = newTemp( Ity_I8 );
+   IRTemp mid_l_flag = newTemp( Ity_I8 );
+   IRTemp low_u_flag = newTemp( Ity_I8 );
+   IRTemp low_l_flag = newTemp( Ity_I8 );
+
+   /* Check the LMD, digit 16, to see if it is zero. */
+   assign( num_lmd, unop( Iop_1Uto8, binop( Iop_CmpEQ32, lmd, mkU32( 0 ) ) ) );
+
+   assign( lmd_flag, unop( Iop_Not8, mkexpr( num_lmd ) ) );
+
+   Count_zeros( 6,
+                mkexpr( num_lmd ),
+                mkexpr( lmd_flag ),
+                &num_top,
+                &top_flag,
+                top_12_l );
+
+   Count_zeros( 1,
+                mkexpr( num_top ),
+                mkexpr( top_flag ),
+                &num_mid_u,
+                &mid_u_flag,
+                binop( Iop_Or32,
+                       binop( Iop_Shl32, mid_60_u, mkU8( 2 ) ),
+                       binop( Iop_Shr32, mid_60_l, mkU8( 30 ) ) ) );
+
+   Count_zeros( 2,
+                mkexpr( num_mid_u ),
+                mkexpr( mid_u_flag ),
+                &num_mid_l,
+                &mid_l_flag,
+                mid_60_l );
+
+   Count_zeros( 1,
+                mkexpr( num_mid_l ),
+                mkexpr( mid_l_flag ),
+                &num_low_u,
+                &low_u_flag,
+                binop( Iop_Or32,
+                       binop( Iop_Shl32, low_60_u, mkU8( 2 ) ),
+                       binop( Iop_Shr32, low_60_l, mkU8( 30 ) ) ) );
+
+   Count_zeros( 2,
+                mkexpr( num_low_u ),
+                mkexpr( low_u_flag ),
+                &num_low_l,
+                &low_l_flag,
+                low_60_l );
+
+   return mkexpr( num_low_l );
+}
+
+static IRExpr * Check_unordered(IRExpr * val)
+{
+   IRTemp gfield0to5 = newTemp( Ity_I32 );
+
+   /* Extract G[0:4] */
+   assign( gfield0to5,
+           binop( Iop_And32,
+                  binop( Iop_Shr32, unop( Iop_64HIto32, val ), mkU8( 26 ) ),
+                  mkU32( 0x1F ) ) );
+
+   /* Check for unordered, return all 1'x if true */
+   return binop( Iop_Or32, /* QNaN check */
+                 unop( Iop_1Sto32,
+                       binop( Iop_CmpEQ32,
+                              mkexpr( gfield0to5 ),
+                              mkU32( 0x1E ) ) ),
+                              unop( Iop_1Sto32, /* SNaN check */
+                                    binop( Iop_CmpEQ32,
+                                           mkexpr( gfield0to5 ),
+                                           mkU32( 0x1F ) ) ) );
+}
+
+#undef AND
+#undef AND4
+#undef OR
+#undef OR3
+#undef OR4
+#undef NOT
+#undef SHR
+#undef SHL
+#undef BITS5
+
+/*------------------------------------------------------------*/
+/*--- Decimal Floating Point (DFP) instruction translation ---*/
+/*------------------------------------------------------------*/
+
+/* DFP Arithmetic instructions */
+static Bool dis_dfp_arith(UInt theInstr)
+{
+   UInt opc2 = ifieldOPClo10( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar flag_rC = ifieldBIT0( theInstr );
+
+   IRTemp frA = newTemp( Ity_D64 );
+   IRTemp frB = newTemp( Ity_D64 );
+   IRTemp frS = newTemp( Ity_D64 );
+   IRExpr* round = get_IR_roundingmode_DFP();
+
+   /* By default, if flag_RC is set, we will clear cr1 after the
+    * operation.  In reality we should set cr1 to indicate the
+    * exception status of the operation, but since we're not
+    * simulating exceptions, the exception status will appear to be
+    * zero.  Hence cr1 should be cleared if this is a . form insn.
+    */
+   Bool clear_CR1 = True;
+
+   assign( frA, getDReg( frA_addr ) );
+   assign( frB, getDReg( frB_addr ) );
+
+   switch (opc2) {
+   case 0x2: // dadd
+      DIP( "dadd%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_AddD64, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   case 0x202: // dsub
+      DIP( "dsub%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_SubD64, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   case 0x22: // dmul
+      DIP( "dmul%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_MulD64, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   case 0x222: // ddiv
+      DIP( "ddiv%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_DivD64, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   }
+
+   putDReg( frS_addr, mkexpr( frS ) );
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+/* Quad DFP Arithmetic instructions */
+static Bool dis_dfp_arithq(UInt theInstr)
+{
+   UInt opc2 = ifieldOPClo10( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar flag_rC = ifieldBIT0( theInstr );
+
+   IRTemp frA = newTemp( Ity_D128 );
+   IRTemp frB = newTemp( Ity_D128 );
+   IRTemp frS = newTemp( Ity_D128 );
+   IRExpr* round = get_IR_roundingmode_DFP();
+
+   /* By default, if flag_RC is set, we will clear cr1 after the
+    * operation.  In reality we should set cr1 to indicate the
+    * exception status of the operation, but since we're not
+    * simulating exceptions, the exception status will appear to be
+    * zero.  Hence cr1 should be cleared if this is a . form insn.
+    */
+   Bool clear_CR1 = True;
+
+   assign( frA, getDReg_pair( frA_addr ) );
+   assign( frB, getDReg_pair( frB_addr ) );
+
+   switch (opc2) {
+   case 0x2: // daddq
+      DIP( "daddq%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_AddD128, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   case 0x202: // dsubq
+      DIP( "dsubq%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_SubD128, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   case 0x22: // dmulq
+      DIP( "dmulq%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_MulD128, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   case 0x222: // ddivq
+      DIP( "ddivq%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, triop( Iop_DivD128, round, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   }
+
+   putDReg_pair( frS_addr, mkexpr( frS ) );
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+/* DFP 64-bit logical shift instructions  */
+static Bool dis_dfp_shift(UInt theInstr) {
+   UInt opc2       = ifieldOPClo9( theInstr );
+   UChar frS_addr  = ifieldRegDS( theInstr );
+   UChar frA_addr  = ifieldRegA( theInstr );
+   UChar shift_val = IFIELD(theInstr, 10, 6);
+   UChar flag_rC   = ifieldBIT0( theInstr );
+
+   IRTemp frA = newTemp( Ity_D64 );
+   IRTemp frS = newTemp( Ity_D64 );
+   Bool clear_CR1 = True;
+
+   assign( frA, getDReg( frA_addr ) );
+
+   switch (opc2) {
+   case 0x42: // dscli
+      DIP( "dscli%s fr%u,fr%u,%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
+      assign( frS, binop( Iop_ShlD64, mkexpr( frA ), mkU8( shift_val ) ) );
+      break;
+   case 0x62: // dscri
+      DIP( "dscri%s fr%u,fr%u,%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
+      assign( frS, binop( Iop_ShrD64, mkexpr( frA ), mkU8( shift_val ) ) );
+      break;
+   }
+
+   putDReg( frS_addr, mkexpr( frS ) );
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+/* Quad DFP  logical shift instructions  */
+static Bool dis_dfp_shiftq(UInt theInstr) {
+   UInt opc2       = ifieldOPClo9( theInstr );
+   UChar frS_addr  = ifieldRegDS( theInstr );
+   UChar frA_addr  = ifieldRegA( theInstr );
+   UChar shift_val = IFIELD(theInstr, 10, 6);
+   UChar flag_rC   = ifieldBIT0( theInstr );
+
+   IRTemp frA = newTemp( Ity_D128 );
+   IRTemp frS = newTemp( Ity_D128 );
+   Bool clear_CR1 = True;
+
+   assign( frA, getDReg_pair( frA_addr ) );
+
+   switch (opc2) {
+   case 0x42: // dscliq
+      DIP( "dscliq%s fr%u,fr%u,%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
+      assign( frS, binop( Iop_ShlD128, mkexpr( frA ), mkU8( shift_val ) ) );
+      break;
+   case 0x62: // dscriq
+      DIP( "dscriq%s fr%u,fr%u,%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, shift_val );
+      assign( frS, binop( Iop_ShrD128, mkexpr( frA ), mkU8( shift_val ) ) );
+      break;
+   }
+
+   putDReg_pair( frS_addr, mkexpr( frS ) );
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+/* DFP 64-bit format conversion instructions */
+static Bool dis_dfp_fmt_conv(UInt theInstr) {
+   UInt opc2      = ifieldOPClo10( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   IRExpr* round  = get_IR_roundingmode_DFP();
+   UChar flag_rC  = ifieldBIT0( theInstr );
+   IRTemp frB;
+   IRTemp frS;
+   Bool clear_CR1 = True;
+
+   switch (opc2) {
+   case 0x102: //dctdp
+      DIP( "dctdp%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+
+      frB = newTemp( Ity_D32 );
+      frS = newTemp( Ity_D64 );
+      assign( frB, getDReg32( frB_addr ) );
+      assign( frS, unop( Iop_D32toD64, mkexpr( frB ) ) );
+      putDReg( frS_addr, mkexpr( frS ) );
+      break;
+   case 0x302: // drsp
+      DIP( "drsp%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+      frB = newTemp( Ity_D64 );
+      frS = newTemp( Ity_D32 );
+      assign( frB, getDReg( frB_addr ) );
+      assign( frS, binop( Iop_D64toD32, round, mkexpr( frB ) ) );
+      putDReg32( frS_addr, mkexpr( frS ) );
+      break;
+   case 0x122: // dctfix
+      {
+         IRTemp tmp = newTemp( Ity_I64 );
+
+         DIP( "dctfix%s fr%u,fr%u\n",
+              flag_rC ? ".":"", frS_addr, frB_addr );
+         frB = newTemp( Ity_D64 );
+         frS = newTemp( Ity_D64 );
+         assign( frB, getDReg( frB_addr ) );
+         assign( tmp, binop( Iop_D64toI64S, round, mkexpr( frB ) ) );
+         assign( frS, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
+         putDReg( frS_addr, mkexpr( frS ) );
+      }
+      break;
+   case 0x322: // dcffix
+      DIP( "dcffix%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+      frB = newTemp( Ity_D64 );
+      frS = newTemp( Ity_D64 );
+      assign( frB, getDReg( frB_addr ) );
+      assign( frS, binop( Iop_I64StoD64,
+                          round,
+                          unop( Iop_ReinterpD64asI64, mkexpr( frB ) ) ) );
+      putDReg( frS_addr, mkexpr( frS ) );
+      break;
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+/* Quad DFP format conversion instructions */
+static Bool dis_dfp_fmt_convq(UInt theInstr) {
+   UInt opc2      = ifieldOPClo10( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   IRExpr* round  = get_IR_roundingmode_DFP();
+   IRTemp frB64   = newTemp( Ity_D64 );
+   IRTemp frB128  = newTemp( Ity_D128 );
+   IRTemp frS64   = newTemp( Ity_D64 );
+   IRTemp frS128  = newTemp( Ity_D128 );
+   UChar flag_rC  = ifieldBIT0( theInstr );
+   Bool clear_CR1 = True;
+
+   switch (opc2) {
+   case 0x102: // dctqpq
+      DIP( "dctqpq%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+      assign( frB64, getDReg( frB_addr ) );
+      assign( frS128, unop( Iop_D64toD128, mkexpr( frB64 ) ) );
+      putDReg_pair( frS_addr, mkexpr( frS128 ) );
+      break;
+   case 0x122: // dctfixq
+      {
+         IRTemp tmp = newTemp( Ity_I64 );
+
+         DIP( "dctfixq%s fr%u,fr%u\n",
+              flag_rC ? ".":"", frS_addr, frB_addr );
+         assign( frB128, getDReg_pair( frB_addr ) );
+         assign( tmp, binop( Iop_D128toI64S, round, mkexpr( frB128 ) ) );
+         assign( frS64, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
+         putDReg( frS_addr, mkexpr( frS64 ) );
+      }
+      break;
+   case 0x302: //drdpq
+      DIP( "drdpq%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+      assign( frB128, getDReg_pair( frB_addr ) );
+      assign( frS64, binop( Iop_D128toD64, round, mkexpr( frB128 ) ) );
+      putDReg( frS_addr, mkexpr( frS64 ) );
+      break;
+   case 0x322: // dcffixq
+     {
+      /* Have to introduce an IOP for this instruction so it will work
+       * on POWER 6 because emulating the instruction requires a POWER 7
+       * DFP instruction in the emulation code.
+       */
+      DIP( "dcffixq%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+      assign( frB64, getDReg( frB_addr ) );
+      assign( frS128, unop( Iop_I64StoD128,
+                            unop( Iop_ReinterpD64asI64,
+                                  mkexpr( frB64 ) ) ) );
+      putDReg_pair( frS_addr, mkexpr( frS128 ) );
+      break;
+     }
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+static Bool dis_dfp_round( UInt theInstr ) {
+   UChar frS_addr = ifieldRegDS(theInstr);
+   UChar R        = IFIELD(theInstr, 16, 1);
+   UChar RMC      = IFIELD(theInstr, 9, 2);
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar flag_rC  = ifieldBIT0( theInstr );
+   IRTemp frB     = newTemp( Ity_D64 );
+   IRTemp frS     = newTemp( Ity_D64 );
+   UInt opc2      = ifieldOPClo8( theInstr );
+   Bool clear_CR1 = True;
+
+   switch (opc2) {
+   /* drintn, is the same as drintx.  The only difference is this
+    * instruction does not generate an exception for an inexact operation.
+    * Currently not supporting inexact exceptions.
+    */
+   case 0x63: // drintx
+   case 0xE3: // drintn
+      DIP( "drintx/drintn%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+
+      /* NOTE, this instruction takes a DFP value and rounds to the
+       * neares floating point integer value, i.e. fractional part
+       * is zero.  The result is a floating point number.
+       */
+      /* pass the value of R and RMC in the same field */
+      assign( frB, getDReg( frB_addr ) );
+      assign( frS, binop( Iop_RoundD64toInt,
+                          mkU32( ( R << 3 ) | RMC ),
+                          mkexpr( frB ) ) );
+      putDReg( frS_addr, mkexpr( frS ) );
+      break;
+   default:
+      vex_printf("dis_dfp_round(ppc)(opc2)\n");
+      return False;
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+static Bool dis_dfp_roundq(UInt theInstr) {
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar R = IFIELD(theInstr, 16, 1);
+   UChar RMC = IFIELD(theInstr, 9, 2);
+   UChar flag_rC = ifieldBIT0( theInstr );
+   IRTemp frB = newTemp( Ity_D128 );
+   IRTemp frS = newTemp( Ity_D128 );
+   Bool clear_CR1 = True;
+   UInt opc2 = ifieldOPClo8( theInstr );
+
+   switch (opc2) {
+   /* drintnq, is the same as drintxq.  The only difference is this
+    * instruction does not generate an exception for an inexact operation.
+    * Currently not supporting inexact exceptions.
+    */
+   case 0x63: // drintxq
+   case 0xE3: // drintnq
+      DIP( "drintxq/drintnq%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frB_addr );
+
+      /* pass the value of R and RMC in the same field */
+      assign( frB, getDReg_pair( frB_addr ) );
+      assign( frS, binop( Iop_RoundD128toInt,
+                          mkU32( ( R << 3 ) | RMC ),
+                          mkexpr( frB ) ) );
+      putDReg_pair( frS_addr, mkexpr( frS ) );
+      break;
+   default:
+      vex_printf("dis_dfp_roundq(ppc)(opc2)\n");
+      return False;
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+static Bool dis_dfp_quantize_sig_rrnd(UInt theInstr) {
+   UInt opc2 = ifieldOPClo8( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar flag_rC = ifieldBIT0( theInstr );
+   UInt TE_value = IFIELD(theInstr, 16, 4);
+   UInt TE_sign  = IFIELD(theInstr, 20, 1);
+   UInt RMC = IFIELD(theInstr, 9, 2);
+   IRTemp frA = newTemp( Ity_D64 );
+   IRTemp frB = newTemp( Ity_D64 );
+   IRTemp frS = newTemp( Ity_D64 );
+   Bool clear_CR1 = True;
+
+   assign( frB, getDReg( frB_addr ) );
+
+   switch (opc2) {
+   case 0x43: // dquai
+      DIP( "dquai%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      IRTemp TE_I64 = newTemp( Ity_I64 );
+
+      /* Generate a reference DFP value frA with the desired exponent
+       * given by TE using significand from frB.  Need to add the bias
+       * 398 to TE.  TE is stored as a 2's complement number.
+       */
+      if (TE_sign == 1) {
+         /* Take 2's complement of the 5-bit value and subtract from bias. 
+          *  Bias is adjusted for the +1 required when taking 2's complement.
+          */
+         assign( TE_I64,
+                 unop( Iop_32Uto64,
+                       binop( Iop_Sub32, mkU32( 397 ),
+                              binop( Iop_And32, mkU32( 0xF ),
+                                     unop( Iop_Not32, mkU32( TE_value ) )
+                                     ) ) ) );
+
+      } else {
+          assign( TE_I64,
+                  unop( Iop_32Uto64,
+                        binop( Iop_Add32, mkU32( 398 ), mkU32( TE_value ) )
+                        ) );
+      }
+
+      assign( frA, binop( Iop_InsertExpD64, mkexpr( TE_I64 ),
+                          unop( Iop_ReinterpI64asD64, mkU64( 1 ) ) ) );
+
+      assign( frS, triop( Iop_QuantizeD64,
+                          mkU32( RMC ),
+                          mkexpr( frA ),
+                          mkexpr( frB ) ) );
+      break;
+
+   case 0x3: // dqua
+      DIP( "dqua%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frA, getDReg( frA_addr ) );
+      assign( frS, triop( Iop_QuantizeD64,
+                          mkU32( RMC ),
+                          mkexpr( frA ),
+                          mkexpr( frB ) ) );
+      break;
+   case 0x23: // drrnd
+      {
+         IRTemp tmp = newTemp( Ity_I8 );
+
+         DIP( "drrnd%s fr%u,fr%u,fr%u\n",
+              flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+         assign( frA, getDReg( frA_addr ) );
+         /* Iop_64to8 not supported in 32 bit mode, do it in two steps. */
+         assign( tmp, unop( Iop_32to8,
+                            unop( Iop_64to32,
+                                  unop( Iop_ReinterpD64asI64,
+                                        mkexpr( frA ) ) ) ) );
+         assign( frS, triop( Iop_SignificanceRoundD64,
+                             mkU32( RMC ),
+                             mkexpr( tmp ),
+                             mkexpr( frB ) ) );
+      }
+      break;
+   default:
+      vex_printf("dis_dfp_quantize_sig_rrnd(ppc)(opc2)\n");
+      return False;
+   }
+   putDReg( frS_addr, mkexpr( frS ) );
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+static Bool dis_dfp_quantize_sig_rrndq(UInt theInstr) {
+   UInt opc2 = ifieldOPClo8( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar flag_rC = ifieldBIT0( theInstr );
+   UInt TE_value = IFIELD(theInstr, 16, 4);
+   UInt TE_sign  = IFIELD(theInstr, 20, 1);
+   UInt RMC = IFIELD(theInstr, 9, 2);
+   IRTemp frA = newTemp( Ity_D128 );
+   IRTemp frB = newTemp( Ity_D128 );
+   IRTemp frS = newTemp( Ity_D128 );
+   Bool clear_CR1 = True;
+
+   assign( frB, getDReg_pair( frB_addr ) );
+
+   switch (opc2) {
+   case 0x43: // dquaiq
+      DIP( "dquaiq%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      IRTemp TE_I64 = newTemp( Ity_I64 );
+
+      /* Generate a reference DFP value frA with the desired exponent
+       * given by TE using significand of 1.  Need to add the bias
+       * 6176 to TE.
+       */
+      if (TE_sign == 1) {
+         /* Take 2's complement of the 5-bit value and subtract from bias. 
+          *  Bias adjusted for the +1 required when taking 2's complement.
+          */
+         assign( TE_I64,
+                 unop( Iop_32Uto64,
+                       binop( Iop_Sub32, mkU32( 6175 ),
+                              binop( Iop_And32, mkU32( 0xF ),
+                                     unop( Iop_Not32, mkU32( TE_value ) )
+                                     ) ) ) );
+
+      } else {
+         assign( TE_I64,
+                 unop( Iop_32Uto64,
+                       binop( Iop_Add32,
+                             mkU32( 6176 ),
+                             mkU32( TE_value ) ) ) );
+      }
+
+      assign( frA,
+              binop( Iop_InsertExpD128, mkexpr( TE_I64 ),
+                     unop( Iop_D64toD128,
+                           unop( Iop_ReinterpI64asD64, mkU64( 1 ) ) ) ) );
+      assign( frS, triop( Iop_QuantizeD128,
+                          mkU32( RMC ),
+                          mkexpr( frA ),
+                          mkexpr( frB ) ) );
+      break;
+   case 0x3: // dquaq
+      DIP( "dquaiq%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frA, getDReg_pair( frA_addr ) );
+      assign( frS, triop( Iop_QuantizeD128,
+                          mkU32( RMC ),
+                          mkexpr( frA ),
+                          mkexpr( frB ) ) );
+      break;
+   case 0x23: // drrndq
+      {
+         IRTemp tmp = newTemp( Ity_I8 );
+
+         DIP( "drrndq%s fr%u,fr%u,fr%u\n",
+              flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+         assign( frA, getDReg_pair( frA_addr ) );
+         assign( tmp, unop( Iop_32to8,
+                            unop( Iop_64to32,
+                                  unop( Iop_ReinterpD64asI64,
+                                        unop( Iop_D128HItoD64,
+                                              mkexpr( frA ) ) ) ) ) );
+         assign( frS, triop( Iop_SignificanceRoundD128,
+                             mkU32( RMC ),
+                             mkexpr( tmp ),
+                             mkexpr( frB ) ) );
+      }
+      break;
+   default:
+      vex_printf("dis_dfp_quantize_sig_rrndq(ppc)(opc2)\n");
+      return False;
+   }
+   putDReg_pair( frS_addr, mkexpr( frS ) );
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+static Bool dis_dfp_extract_insert(UInt theInstr) {
+   UInt opc2 = ifieldOPClo10( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar flag_rC = ifieldBIT0( theInstr );
+   Bool clear_CR1 = True;
+
+   IRTemp frA = newTemp( Ity_D64 );
+   IRTemp frB = newTemp( Ity_D64 );
+   IRTemp frS = newTemp( Ity_D64 );
+   IRTemp tmp = newTemp( Ity_I64 );
+
+   assign( frA, getDReg( frA_addr ) );
+   assign( frB, getDReg( frB_addr ) );
+
+   switch (opc2) {
+   case 0x162: // dxex
+      DIP( "dxex%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( tmp, unop( Iop_ExtractExpD64, mkexpr( frB ) ) );
+      assign( frS, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
+      break;
+   case 0x362: // diex
+      DIP( "diex%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frS, binop( Iop_InsertExpD64,
+                          unop( Iop_ReinterpD64asI64,
+                                mkexpr( frA ) ),
+                          mkexpr( frB ) ) );
+      break;
+   default:
+      vex_printf("dis_dfp_extract_insert(ppc)(opc2)\n");
+      return False;
+   }
+
+   putDReg( frS_addr, mkexpr( frS ) );
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+static Bool dis_dfp_extract_insertq(UInt theInstr) {
+   UInt opc2 = ifieldOPClo10( theInstr );
+   UChar frS_addr = ifieldRegDS( theInstr );
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UChar flag_rC = ifieldBIT0( theInstr );
+
+   IRTemp frA   = newTemp( Ity_D64 );
+   IRTemp frB   = newTemp( Ity_D128 );
+   IRTemp frS64 = newTemp( Ity_D64 );
+   IRTemp frS   = newTemp( Ity_D128 );
+   IRTemp tmp   = newTemp( Ity_I64 );
+   Bool clear_CR1 = True;
+
+   assign( frB, getDReg_pair( frB_addr ) );
+
+   switch (opc2) {
+   case 0x162:  // dxexq
+      DIP( "dxexq%s fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr,  frB_addr );
+      /* Instruction actually returns a 64-bit result.  So as to be
+       * consistent and not have to add a new struct, the emulation returns
+       * the 64-bit result in the upper and lower register.
+       */
+      assign( tmp, unop( Iop_ExtractExpD128, mkexpr( frB ) ) );
+      assign( frS64, unop( Iop_ReinterpI64asD64, mkexpr( tmp ) ) );
+      putDReg( frS_addr, mkexpr( frS64 ) );
+      break;
+   case 0x362:  // diexq
+      DIP( "diexq%s fr%u,fr%u,fr%u\n",
+           flag_rC ? ".":"", frS_addr, frA_addr, frB_addr );
+      assign( frA, getDReg( frA_addr ) );
+      assign( frS, binop( Iop_InsertExpD128,
+                          unop( Iop_ReinterpD64asI64, mkexpr( frA ) ),
+                          mkexpr( frB ) ) );
+      putDReg_pair( frS_addr, mkexpr( frS ) );
+      break;
+   default:
+      vex_printf("dis_dfp_extract_insertq(ppc)(opc2)\n");
+      return False;
+   }
+
+   if (flag_rC && clear_CR1) {
+      putCR321( 1, mkU8( 0 ) );
+      putCR0( 1, mkU8( 0 ) );
+   }
+
+   return True;
+}
+
+/* DFP 64-bit comparison instructions */
+static Bool dis_dfp_compare(UInt theInstr) {
+   /* X-Form */
+   UChar crfD = toUChar( IFIELD( theInstr, 23, 3 ) ); // AKA BF
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   UInt opc1 = ifieldOPC( theInstr );
+   IRTemp frA;
+   IRTemp frB;
+
+   IRTemp ccIR = newTemp( Ity_I32 );
+   IRTemp ccPPC32 = newTemp( Ity_I32 );
+
+
+   /* Note: Differences between dcmpu and dcmpo are only in exception
+    flag settings, which aren't supported anyway. */
+   switch (opc1) {
+   case 0x3B: /* dcmpo and dcmpu, DFP 64-bit */
+      DIP( "dcmpo %u,fr%u,fr%u\n", crfD, frA_addr, frB_addr );
+      frA = newTemp( Ity_D64 );
+      frB = newTemp( Ity_D64 );
+
+      assign( frA, getDReg( frA_addr ) );
+      assign( frB, getDReg( frB_addr ) );
+
+      assign( ccIR, binop( Iop_CmpD64, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   case 0x3F: /* dcmpoq and dcmpuq,DFP 128-bit */
+      DIP( "dcmpoq %u,fr%u,fr%u\n", crfD, frA_addr, frB_addr );
+      frA = newTemp( Ity_D128 );
+      frB = newTemp( Ity_D128 );
+
+      assign( frA, getDReg_pair( frA_addr ) );
+      assign( frB, getDReg_pair( frB_addr ) );
+      assign( ccIR, binop( Iop_CmpD128, mkexpr( frA ), mkexpr( frB ) ) );
+      break;
+   default:
+      vex_printf("dis_dfp_compare(ppc)(opc2)\n");
+      return False;
+   }
+
+   /* Map compare result from IR to PPC32 */
+   /*
+    FP cmp result | PPC | IR
+    --------------------------
+    UN            | 0x1 | 0x45
+    EQ            | 0x2 | 0x40
+    GT            | 0x4 | 0x00
+    LT            | 0x8 | 0x01
+    */
+
+   assign( ccPPC32,
+           binop( Iop_Shl32,
+                  mkU32( 1 ),
+                  unop( Iop_32to8,
+                        binop( Iop_Or32,
+                               binop( Iop_And32,
+                                      unop( Iop_Not32,
+                                            binop( Iop_Shr32,
+                                                   mkexpr( ccIR ),
+                                                   mkU8( 5 ) ) ),
+                                      mkU32( 2 ) ),
+                               binop( Iop_And32,
+                                      binop( Iop_Xor32,
+                                             mkexpr( ccIR ),
+                                             binop( Iop_Shr32,
+                                                    mkexpr( ccIR ),
+                                                    mkU8( 6 ) ) ),
+                                      mkU32( 1 ) ) ) ) ) );
+
+   putGST_field( PPC_GST_CR, mkexpr( ccPPC32 ), crfD );
+   return True;
+}
+
+/* Test class/group/exponent/significance instructions. */
+static Bool dis_dfp_exponent_test ( UInt theInstr )
+{
+   UChar frA_addr   = ifieldRegA( theInstr );
+   UChar frB_addr   = ifieldRegB( theInstr );
+   UChar crfD       = toUChar( IFIELD( theInstr, 23, 3 ) );
+   IRTemp frA       = newTemp( Ity_D64 );
+   IRTemp frB       = newTemp( Ity_D64 );
+   IRTemp frA128    = newTemp( Ity_D128 );
+   IRTemp frB128    = newTemp( Ity_D128 );
+   UInt opc1        = ifieldOPC( theInstr );
+   IRTemp gfield_A  = newTemp( Ity_I32 );
+   IRTemp gfield_B  = newTemp( Ity_I32 );
+   IRTemp gfield_mask   = newTemp( Ity_I32 );
+   IRTemp exponent_A    = newTemp( Ity_I32 );
+   IRTemp exponent_B    = newTemp( Ity_I32 );
+   IRTemp A_NaN_true    = newTemp( Ity_I32 );
+   IRTemp B_NaN_true    = newTemp( Ity_I32 );
+   IRTemp A_inf_true    = newTemp( Ity_I32 );
+   IRTemp B_inf_true    = newTemp( Ity_I32 );
+   IRTemp A_equals_B    = newTemp( Ity_I32 );
+   IRTemp finite_number = newTemp( Ity_I32 );
+   IRTemp cc0 = newTemp( Ity_I32 );
+   IRTemp cc1 = newTemp( Ity_I32 );
+   IRTemp cc2 = newTemp( Ity_I32 );
+   IRTemp cc3 = newTemp( Ity_I32 );
+
+   /* The dtstex and dtstexg instructions only differ in the size of the
+    * exponent field.  The following switch statement takes care of the size
+    * specific setup.  Once the value of the exponents, the G-field shift
+    * and mask is setup the remaining code is identical.
+    */
+   switch (opc1) {
+   case 0x3b: // dtstex       Extended instruction setup
+      DIP("dtstex %u,r%u,r%d\n", crfD, frA_addr, frB_addr);
+      assign( frA, getDReg( frA_addr ) );
+      assign( frB, getDReg( frB_addr ) );
+      assign( gfield_mask, mkU32( DFP_G_FIELD_LONG_MASK ) );
+      assign(exponent_A, unop( Iop_64to32,
+                               unop( Iop_ExtractExpD64,
+                                     mkexpr( frA ) ) ) );
+      assign(exponent_B, unop( Iop_64to32,
+                               unop( Iop_ExtractExpD64,
+                                     mkexpr( frB ) ) ) );
+      break;
+
+   case 0x3F: //  dtstexq      Quad instruction setup
+      DIP("dtstexq %u,r%u,r%d\n", crfD, frA_addr, frB_addr);
+      assign( frA128, getDReg_pair( frA_addr ) );
+      assign( frB128, getDReg_pair( frB_addr ) );
+      assign( frA, unop( Iop_D128HItoD64, mkexpr( frA128 ) ) );
+      assign( frB, unop( Iop_D128HItoD64, mkexpr( frB128 ) ) );
+      assign( gfield_mask, mkU32( DFP_G_FIELD_EXTND_MASK ) );
+      assign( exponent_A, unop( Iop_64to32,
+                                unop( Iop_ExtractExpD128,
+                                      mkexpr( frA128 ) ) ) );
+      assign( exponent_B, unop( Iop_64to32,
+                                unop( Iop_ExtractExpD128,
+                                      mkexpr( frB128 ) ) ) );
+      break;
+   default:
+      vex_printf("dis_dfp_exponent_test(ppc)(opc2)\n");
+      return False;
+   }
+
+   /* Extract the Gfield */
+   assign( gfield_A, binop( Iop_And32,
+                            mkexpr( gfield_mask ),
+                            unop( Iop_64HIto32,
+                                  unop( Iop_ReinterpD64asI64,
+                                        mkexpr(frA) ) ) ) );
+
+   assign( gfield_B, binop( Iop_And32,
+                            mkexpr( gfield_mask ),
+                            unop( Iop_64HIto32,
+                                  unop( Iop_ReinterpD64asI64,
+                                        mkexpr(frB) ) ) ) );
+
+   /* check for NAN */
+   assign( A_NaN_true, binop(Iop_Or32,
+                             unop( Iop_1Sto32,
+                                   binop( Iop_CmpEQ32,
+                                          mkexpr( gfield_A ),
+                                          mkU32( 0x7C000000 ) ) ),
+                             unop( Iop_1Sto32,
+                                   binop( Iop_CmpEQ32,
+                                          mkexpr( gfield_A ),
+                                          mkU32( 0x7E000000 ) )
+                                   ) ) );
+   assign( B_NaN_true, binop(Iop_Or32,
+                             unop( Iop_1Sto32,
+                                   binop( Iop_CmpEQ32,
+                                          mkexpr( gfield_B ),
+                                          mkU32( 0x7C000000 ) ) ),
+                             unop( Iop_1Sto32,
+                                   binop( Iop_CmpEQ32,
+                                          mkexpr( gfield_B ),
+                                          mkU32( 0x7E000000 ) )
+                             ) ) );
+
+   /* check for infinity */ 
+   assign( A_inf_true,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpEQ32,
+                        mkexpr( gfield_A ),
+                        mkU32( 0x78000000 ) ) ) );
+
+   assign( B_inf_true,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpEQ32,
+                        mkexpr( gfield_B ),
+                        mkU32( 0x78000000 ) ) ) );
+
+   assign( finite_number,
+           unop( Iop_Not32,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32,
+                               mkexpr( A_NaN_true ),
+                               mkexpr( B_NaN_true ) ),
+                        binop( Iop_Or32,
+                               mkexpr( A_inf_true ),
+                               mkexpr( B_inf_true ) ) ) ) );
+
+   /* Calculate the condition code bits
+    * If QNaN,SNaN, +infinity, -infinity then cc0, cc1 and cc2 are zero
+    * regardless of the value of the comparisons and cc3 is 1.  Otherwise,
+    * cc0, cc1 and cc0 reflect the results of the comparisons.
+    */
+   assign( A_equals_B,
+           binop( Iop_Or32,
+                  unop( Iop_1Uto32,
+                  binop( Iop_CmpEQ32,
+                         mkexpr( exponent_A ),
+                         mkexpr( exponent_B ) ) ),
+                  binop( Iop_Or32,
+                         binop( Iop_And32,
+                                mkexpr( A_inf_true ),
+                                mkexpr( B_inf_true ) ),
+                         binop( Iop_And32,
+                                mkexpr( A_NaN_true ),
+                                mkexpr( B_NaN_true ) ) ) ) );
+
+   assign( cc0, binop( Iop_And32,
+                       mkexpr( finite_number ),
+                       binop( Iop_Shl32,
+                              unop( Iop_1Uto32,
+                                    binop( Iop_CmpLT32U,
+                                           mkexpr( exponent_A ),
+                                           mkexpr( exponent_B ) ) ),
+                                           mkU8( 3 ) ) ) );
+
+   assign( cc1, binop( Iop_And32,
+                       mkexpr( finite_number ),
+                       binop( Iop_Shl32,
+                              unop( Iop_1Uto32,
+                                    binop( Iop_CmpLT32U,
+                                           mkexpr( exponent_B ),
+                                           mkexpr( exponent_A ) ) ),
+                                           mkU8( 2 ) ) ) );
+
+   assign( cc2, binop( Iop_Shl32, 
+                       binop( Iop_And32,
+                              mkexpr( A_equals_B ),
+                              mkU32( 1 ) ),
+                              mkU8( 1 ) ) );
+
+   assign( cc3, binop( Iop_And32,
+                       unop( Iop_Not32, mkexpr( A_equals_B ) ),
+                       binop( Iop_And32,
+                              mkU32( 0x1 ),
+                              binop( Iop_Or32,
+                                     binop( Iop_Or32,
+                                            mkexpr ( A_inf_true ),
+                                            mkexpr ( B_inf_true ) ),
+                                            binop( Iop_Or32,
+                                                   mkexpr ( A_NaN_true ),
+                                                   mkexpr ( B_NaN_true ) ) )
+                              ) ) );
+
+   /* store the condition code */
+   putGST_field( PPC_GST_CR,
+                 binop( Iop_Or32,
+                        mkexpr( cc0 ),
+                        binop( Iop_Or32,
+                               mkexpr( cc1 ),
+                               binop( Iop_Or32,
+                                      mkexpr( cc2 ),
+                                      mkexpr( cc3 ) ) ) ),
+                 crfD );
+   return True;
+}
+
+/* Test class/group/exponent/significance instructions. */
+static Bool dis_dfp_class_test ( UInt theInstr )
+{
+   UChar frA_addr   = ifieldRegA( theInstr );
+   IRTemp frA       = newTemp( Ity_D64 );
+   IRTemp abs_frA   = newTemp( Ity_D64 );
+   IRTemp frAI64_hi = newTemp( Ity_I64 );
+   IRTemp frAI64_lo = newTemp( Ity_I64 );
+   UInt opc1        = ifieldOPC( theInstr );
+   UInt opc2        = ifieldOPClo9( theInstr );
+   UChar crfD       = toUChar( IFIELD( theInstr, 23, 3 ) );  // AKA BF
+   UInt DCM         = IFIELD( theInstr, 10, 6 );
+   IRTemp DCM_calc  = newTemp( Ity_I32 );
+   UInt max_exp     = 0;
+   UInt min_exp     = 0;
+   IRTemp min_subnormalD64  = newTemp( Ity_D64 );
+   IRTemp min_subnormalD128 = newTemp( Ity_D128 );
+   IRTemp significand64  = newTemp( Ity_D64 );
+   IRTemp significand128 = newTemp( Ity_D128 );
+   IRTemp exp_min_normal = newTemp( Ity_I64 );
+   IRTemp exponent       = newTemp( Ity_I32 );
+
+   IRTemp infinity_true  = newTemp( Ity_I32 );
+   IRTemp SNaN_true      = newTemp( Ity_I32 );
+   IRTemp QNaN_true      = newTemp( Ity_I32 );
+   IRTemp subnormal_true = newTemp( Ity_I32 );
+   IRTemp normal_true    = newTemp( Ity_I32 );
+   IRTemp extreme_true   = newTemp( Ity_I32 );
+   IRTemp lmd            = newTemp( Ity_I32 );
+   IRTemp lmd_zero_true  = newTemp( Ity_I32 );
+   IRTemp zero_true      = newTemp( Ity_I32 );
+   IRTemp sign           = newTemp( Ity_I32 );
+   IRTemp field          = newTemp( Ity_I32 );
+   IRTemp ccIR_zero      = newTemp( Ity_I32 );
+   IRTemp ccIR_subnormal = newTemp( Ity_I32 );
+
+   /* UInt size     = DFP_LONG;  JRS:unused */
+   IRTemp gfield = newTemp( Ity_I32 );
+   IRTemp gfield_0_4_shift  = newTemp( Ity_I8 );
+   IRTemp gfield_mask       = newTemp( Ity_I32 );
+   IRTemp dcm0 = newTemp( Ity_I32 );
+   IRTemp dcm1 = newTemp( Ity_I32 );
+   IRTemp dcm2 = newTemp( Ity_I32 );
+   IRTemp dcm3 = newTemp( Ity_I32 );
+   IRTemp dcm4 = newTemp( Ity_I32 );
+   IRTemp dcm5 = newTemp( Ity_I32 );
+
+   /* The only difference between the dtstdc and dtstdcq instructions is
+    * size of the T and G fields.  The calculation of the 4 bit field
+    * is the same.  Setup the parameters and values that are DFP size
+    * specific.  The rest of the code is independent of the DFP size.
+    *
+    * The Io_CmpD64 is used below.  The instruction sets the ccIR values.
+    * The interpretation of the ccIR values is as follows:
+    *
+    *    DFP cmp result | IR
+    * --------------------------
+    *	 UN             | 0x45
+    *	 EQ             | 0x40
+    *	 GT             | 0x00
+    *	 LT             | 0x01
+    */
+
+   assign( frA, getDReg( frA_addr ) );
+   assign( frAI64_hi, unop( Iop_ReinterpD64asI64, mkexpr( frA ) ) );
+
+   assign( abs_frA, unop( Iop_ReinterpI64asD64,
+                          binop( Iop_And64,
+                                 unop( Iop_ReinterpD64asI64,
+                                       mkexpr( frA ) ),
+                                 mkU64( 0x7FFFFFFFFFFFFFFFULL ) ) ) );
+   assign( gfield_0_4_shift, mkU8( 31 - 5 ) );  // G-field[0:4]
+   switch (opc1) {
+   case 0x3b: // dtstdc, dtstdg
+      DIP("dtstd%s %u,r%u,%d\n", opc2 == 0xc2 ? "c" : "g",
+               crfD, frA_addr, DCM);
+      /* setup the parameters for the long format of the two instructions */
+      assign( frAI64_lo, mkU64( 0 ) );
+      assign( gfield_mask, mkU32( DFP_G_FIELD_LONG_MASK ) );
+      max_exp = DFP_LONG_EXP_MAX;
+      min_exp = DFP_LONG_EXP_MIN;
+
+      assign( exponent, unop( Iop_64to32,
+                              unop( Iop_ExtractExpD64,
+                                    mkexpr( frA ) ) ) );
+      assign( significand64,
+              unop( Iop_ReinterpI64asD64,
+                    mkU64( 0x2234000000000001ULL ) ) );  // dfp 1.0
+      assign( exp_min_normal,mkU64( 398 - 383 ) );
+      assign( min_subnormalD64,
+              binop( Iop_InsertExpD64,
+                     mkexpr( exp_min_normal ),
+                     mkexpr( significand64 ) ) );
+
+      assign( ccIR_subnormal,
+              binop( Iop_CmpD64,
+                     mkexpr( abs_frA ),
+                     mkexpr( min_subnormalD64 ) ) );
+
+      /* compare absolute value of frA with zero */
+      assign( ccIR_zero,
+              binop( Iop_CmpD64,
+                     mkexpr( abs_frA ),
+                     unop( Iop_ReinterpI64asD64,
+                           mkU64( 0x2238000000000000ULL ) ) ) );
+
+      /* size = DFP_LONG; JRS: unused */
+      break;
+
+   case 0x3F:   // dtstdcq, dtstdgq
+      DIP("dtstd%sq %u,r%u,%d\n", opc2 == 0xc2 ? "c" : "g",
+               crfD, frA_addr, DCM);
+      /* setup the parameters for the extended format of the
+       * two instructions
+       */
+      assign( frAI64_lo, unop( Iop_ReinterpD64asI64,
+                               getDReg( frA_addr+1 ) ) );
+
+      assign( gfield_mask, mkU32( DFP_G_FIELD_EXTND_MASK ) );
+      max_exp = DFP_EXTND_EXP_MAX;
+      min_exp = DFP_EXTND_EXP_MIN;
+      assign( exponent, unop( Iop_64to32, 
+                              unop( Iop_ExtractExpD128,
+                                    getDReg_pair( frA_addr) ) ) );
+
+      /* create quand exponent for minimum normal number */
+      assign( exp_min_normal, mkU64( 6176 - 6143 ) );
+      assign( significand128,
+              unop( Iop_D64toD128,
+                    unop( Iop_ReinterpI64asD64,
+                          mkU64( 0x2234000000000001ULL ) ) ) );  // dfp 1.0
+
+      assign( min_subnormalD128,
+              binop( Iop_InsertExpD128,
+                     mkexpr( exp_min_normal ),
+                     mkexpr( significand128 ) ) );
+
+      assign( ccIR_subnormal, 
+              binop( Iop_CmpD128,
+                     binop( Iop_D64HLtoD128,
+                            unop( Iop_ReinterpI64asD64,
+                                  binop( Iop_And64,
+                                         unop( Iop_ReinterpD64asI64,
+                                               mkexpr( frA ) ),
+                                         mkU64( 0x7FFFFFFFFFFFFFFFULL ) ) ),
+                            getDReg( frA_addr+1 ) ),
+                     mkexpr( min_subnormalD128 ) ) );
+      assign( ccIR_zero,
+              binop( Iop_CmpD128,
+                     binop( Iop_D64HLtoD128,
+                            mkexpr( abs_frA ),
+                            getDReg( frA_addr+1 ) ),
+                     unop( Iop_D64toD128,
+                           unop( Iop_ReinterpI64asD64,
+                                 mkU64( 0x0ULL ) ) ) ) );
+
+      /* size = DFP_EXTND; JRS:unused */
+      break;
+   default:
+      vex_printf("dis_dfp_class_test(ppc)(opc2)\n");
+      return False;
+   }
+
+   /* The G-field is in the upper 32-bits.  The I64 logical operations
+    * do not seem to be supported in 32-bit mode so keep things as 32-bit
+    * operations.
+    */
+   assign( gfield, binop( Iop_And32,
+                          mkexpr( gfield_mask ),
+                          unop( Iop_64HIto32,
+                                mkexpr(frAI64_hi) ) ) );
+
+   /* There is a lot of code that is the same to do the class and group
+    * instructions.  Later there is an if statement to handle the specific
+    * instruction.
+    *
+    * Will be using I32 values, compares, shifts and logical operations for
+    * this code as the 64-bit compare, shifts, logical operations are not 
+    * supported in 32-bit mode.
+    */
+
+   /* Check the bits for Infinity, QNaN or Signaling NaN */
+   assign( infinity_true,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpEQ32,
+                        binop( Iop_And32,
+                               mkU32( 0x7C000000 ),
+                               mkexpr( gfield ) ),
+                        mkU32( 0x78000000 ) ) ) );
+
+   assign( SNaN_true,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpEQ32,
+                        binop( Iop_And32,
+                               mkU32( 0x7E000000 ),
+                               mkexpr( gfield ) ),
+                        mkU32( 0x7E000000 ) ) ) );
+
+   assign( QNaN_true,
+           binop( Iop_And32,
+                  unop( Iop_1Sto32,
+                       binop( Iop_CmpEQ32,
+                              binop( Iop_And32,
+                                     mkU32( 0x7E000000 ),
+                                     mkexpr( gfield ) ),
+                              mkU32( 0x7C000000 ) ) ),
+                  unop( Iop_Not32,
+                        mkexpr( SNaN_true ) ) ) );
+
+   assign( zero_true,
+           binop( Iop_And32,
+                  unop(Iop_1Sto32,
+                       binop( Iop_CmpEQ32,
+                              mkexpr( ccIR_zero ),
+                              mkU32( 0x40 ) ) ),  // ccIR code for Equal
+                  unop( Iop_Not32,
+                        binop( Iop_Or32,
+                               mkexpr( infinity_true ),
+                               binop( Iop_Or32,
+                                      mkexpr( QNaN_true ),
+                                      mkexpr( SNaN_true ) ) ) ) ) );
+
+   /* Do compare of frA the minimum normal value.  Comparison is size
+    * depenent and was done above to get the ccIR value.
+    */
+   assign( subnormal_true, 
+           binop( Iop_And32,
+                  binop( Iop_Or32,
+                         unop( Iop_1Sto32,
+                               binop( Iop_CmpEQ32,
+                                      mkexpr( ccIR_subnormal ),
+                                      mkU32( 0x40 ) ) ), // ccIR code for Equal
+                         unop( Iop_1Sto32,
+                               binop( Iop_CmpEQ32,
+                                      mkexpr( ccIR_subnormal ),
+                                      mkU32( 0x1 ) ) ) ), // ccIR code for LT
+           unop( Iop_Not32,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32,
+                               mkexpr( infinity_true ),
+                               mkexpr( zero_true) ),
+                        binop( Iop_Or32,
+                               mkexpr( QNaN_true ),
+                               mkexpr( SNaN_true ) ) ) ) ) );
+
+   /* Normal number is not subnormal, infinity, NaN or Zero */
+   assign( normal_true,
+           unop( Iop_Not32,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32,
+                               mkexpr( infinity_true ),
+                               mkexpr( zero_true ) ),
+                        binop( Iop_Or32,
+                               mkexpr( subnormal_true ),
+                               binop( Iop_Or32,
+                                      mkexpr( QNaN_true ),
+                                      mkexpr( SNaN_true ) ) ) ) ) );
+
+   /* Calculate the DCM bit field based on the tests for the specific
+    * instruction
+    */
+   if (opc2 == 0xC2) {    // dtstdc, dtstdcq
+      /* DCM[0:5] Bit   Data Class definition
+       *   0   Zero
+       *   1   Subnormal
+       *   2   Normal
+       *   3   Infinity
+       *   4   Quiet NaN
+       *   5   Signaling NaN
+       */
+
+      assign( dcm0, binop( Iop_Shl32,
+                           mkexpr( zero_true ),
+                           mkU8( 5 ) ) );
+      assign( dcm1, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  mkexpr( subnormal_true ),
+                                  mkU32( 1 ) ),
+                           mkU8( 4 ) ) );
+      assign( dcm2, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  mkexpr( normal_true ),
+                                  mkU32( 1 ) ),
+                           mkU8( 3 ) ) );
+      assign( dcm3, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  mkexpr( infinity_true),
+                                  mkU32( 1 ) ),
+                           mkU8( 2 ) ) );
+      assign( dcm4, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  mkexpr( QNaN_true ),
+                                  mkU32( 1 ) ),
+                           mkU8( 1 ) ) );
+      assign( dcm5, binop( Iop_And32, mkexpr( SNaN_true), mkU32( 1 ) ) );
+
+   } else if (opc2 == 0xE2) {   // dtstdg, dtstdgq
+      /* check if the exponent is extreme */
+      assign( extreme_true, binop( Iop_Or32,
+                                   unop( Iop_1Sto32,
+                                         binop( Iop_CmpEQ32,
+                                                mkexpr( exponent ),
+                                                mkU32( max_exp ) ) ),
+                                   unop( Iop_1Sto32,
+                                         binop( Iop_CmpEQ32,
+                                                mkexpr( exponent ),
+                                                mkU32( min_exp ) ) ) ) );
+
+      /* Check if LMD is zero */
+      Get_lmd( &lmd, binop( Iop_Shr32,
+                            mkexpr( gfield ), mkU8( 31 - 5 ) ) );
+
+      assign( lmd_zero_true, unop( Iop_1Sto32,
+                                   binop( Iop_CmpEQ32,
+                                          mkexpr( lmd ),
+                                          mkU32( 0 ) ) ) );
+
+      /* DCM[0:5] Bit   Data Class definition
+       *  0   Zero with non-extreme exponent
+       *  1   Zero with extreme exponent
+       *  2   Subnormal or (Normal with extreme exponent)
+       *  3   Normal with non-extreme exponent and
+       *      leftmost zero digit in significand
+       *  4   Normal with non-extreme exponent and
+       *      leftmost nonzero digit in significand
+       *  5   Special symbol (Infinity, QNaN, or SNaN)
+       */
+      assign( dcm0, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  binop( Iop_And32,
+                                         unop( Iop_Not32,
+                                               mkexpr( extreme_true ) ),
+                                         mkexpr( zero_true ) ),
+                                  mkU32( 0x1 ) ),
+                           mkU8( 5 ) ) );
+
+      assign( dcm1, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  binop( Iop_And32,
+                                         mkexpr( extreme_true ),
+                                         mkexpr( zero_true ) ),
+                                  mkU32( 0x1 ) ),
+                           mkU8( 4 ) ) );
+
+      assign( dcm2, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  binop( Iop_Or32,
+                                         binop( Iop_And32,
+                                                mkexpr( extreme_true ),
+                                                mkexpr( normal_true ) ),
+                                         mkexpr( subnormal_true ) ),
+                                  mkU32( 0x1 ) ),
+                           mkU8( 3 ) ) );
+
+      assign( dcm3, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  binop( Iop_And32,
+                                         binop( Iop_And32,
+                                                unop( Iop_Not32,
+                                                      mkexpr( extreme_true ) ),
+                                                      mkexpr( normal_true ) ),
+                                         unop( Iop_1Sto32,
+                                               binop( Iop_CmpEQ32,
+                                                      mkexpr( lmd ),
+                                                      mkU32( 0 ) ) ) ),
+                                  mkU32( 0x1 ) ),
+                           mkU8( 2 ) ) );
+
+      assign( dcm4, binop( Iop_Shl32,
+                           binop( Iop_And32,
+                                  binop( Iop_And32,
+                                         binop( Iop_And32,
+                                                unop( Iop_Not32,
+                                                      mkexpr( extreme_true ) ),
+                                                mkexpr( normal_true ) ),
+                                          unop( Iop_1Sto32,
+                                                binop( Iop_CmpNE32,
+                                                       mkexpr( lmd ),
+                                                       mkU32( 0 ) ) ) ),
+                                  mkU32( 0x1 ) ),
+                           mkU8( 1 ) ) );
+
+      assign( dcm5, binop( Iop_And32,
+                           binop( Iop_Or32,
+                                  mkexpr( SNaN_true),
+                                  binop( Iop_Or32,
+                                         mkexpr( QNaN_true),
+                                         mkexpr( infinity_true) ) ),
+                           mkU32( 0x1 ) ) );
+   }
+
+   /* create DCM field */
+   assign( DCM_calc,
+           binop( Iop_Or32,
+                  mkexpr( dcm0 ),
+                  binop( Iop_Or32,
+                         mkexpr( dcm1 ),
+                         binop( Iop_Or32,
+                                mkexpr( dcm2 ),
+                                binop( Iop_Or32,
+                                       mkexpr( dcm3 ),
+                                       binop( Iop_Or32,
+                                              mkexpr( dcm4 ),
+                                              mkexpr( dcm5 ) ) ) ) ) ) );
+
+   /* Get the sign of the DFP number, ignore sign for QNaN */
+   assign( sign,
+           unop( Iop_1Uto32,
+                 binop( Iop_CmpEQ32,
+                        binop( Iop_Shr32,
+                               unop( Iop_64HIto32, mkexpr( frAI64_hi ) ),
+                               mkU8( 63 - 32 ) ),
+                        mkU32( 1 ) ) ) );
+
+   /* This instruction generates a four bit field to be stored in the
+    * condition code register.  The condition code register consists of 7
+    * fields.  The field to be written to is specified by the BF (AKA crfD)
+    * field.
+    *
+    * The field layout is as follows:
+    *
+    *      Field          Meaning
+    *      0000           Operand positive with no match
+    *      0100           Operand positive with at least one match
+    *      0001           Operand negative with no match
+    *      0101           Operand negative with at least one match
+    */
+   assign( field, binop( Iop_Or32,
+                         binop( Iop_Shl32,
+                                mkexpr( sign ),
+                                mkU8( 3 ) ),
+                                binop( Iop_Shl32,
+                                       unop( Iop_1Uto32,
+                                             binop( Iop_CmpNE32,
+                                                    binop( Iop_And32,
+                                                           mkU32( DCM ),
+                                                           mkexpr( DCM_calc ) ),
+                                                     mkU32( 0 ) ) ),
+                                       mkU8( 1 ) ) ) );
+
+   putGST_field( PPC_GST_CR, mkexpr( field ), crfD );
+   return True;
+}
+
+static Bool dis_dfp_bcd(UInt theInstr) {
+   UInt opc2        = ifieldOPClo10( theInstr );
+   ULong sp         = IFIELD(theInstr, 19, 2);
+   ULong s          = IFIELD(theInstr, 20, 1);
+   UChar frT_addr   = ifieldRegDS( theInstr );
+   UChar frB_addr   = ifieldRegB( theInstr );
+   IRTemp frB       = newTemp( Ity_D64 );
+   IRTemp frBI64    = newTemp( Ity_I64 );
+   IRTemp result    = newTemp( Ity_I64 );
+   IRTemp resultD64 = newTemp( Ity_D64 );
+   IRTemp bcd64     = newTemp( Ity_I64 );
+   IRTemp bcd_u     = newTemp( Ity_I32 );
+   IRTemp bcd_l     = newTemp( Ity_I32 );
+   IRTemp dbcd_u    = newTemp( Ity_I32 );
+   IRTemp dbcd_l    = newTemp( Ity_I32 );
+   IRTemp lmd       = newTemp( Ity_I32 );
+
+   assign( frB, getDReg( frB_addr ) );
+   assign( frBI64, unop( Iop_ReinterpD64asI64, mkexpr( frB ) ) );
+
+   switch ( opc2 ) {
+   case 0x142: // ddedpd   DFP Decode DPD to BCD
+      DIP( "ddedpd %llu,r%u,r%u\n", sp, frT_addr, frB_addr );
+
+         assign( bcd64, unop( Iop_DPBtoBCD, mkexpr( frBI64 ) ) );
+         assign( bcd_u, unop( Iop_64HIto32, mkexpr( bcd64 ) ) );
+         assign( bcd_l, unop( Iop_64to32, mkexpr( bcd64 ) ) );
+
+      if ( ( sp == 0 ) || ( sp == 1 ) ) {
+         /* Unsigned BCD string */
+         Get_lmd( &lmd,
+                  binop( Iop_Shr32,
+                         unop( Iop_64HIto32, mkexpr( frBI64 ) ),
+                         mkU8( 31 - 5 ) ) ); // G-field[0:4]
+
+         assign( result,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32, mkexpr( lmd ), mkU8( 28 ) ),
+                               mkexpr( bcd_u ) ),
+                        mkexpr( bcd_l ) ) );
+
+      } else {
+         /* Signed BCD string, the cases for sp 2 and 3 only differ in how
+          * the positive and negative values are encoded in the least
+          * significant bits.
+          */
+         IRTemp sign = newTemp( Ity_I32 );
+
+         if (sp == 2) {
+            /* Positive sign = 0xC, negative sign = 0xD */
+
+            assign( sign,
+                    binop( Iop_Or32,
+                           binop( Iop_Shr32,
+                                  unop( Iop_64HIto32, mkexpr( frBI64 ) ),
+                                  mkU8( 31 ) ),
+                           mkU32( 0xC ) ) );
+
+         } else if ( sp == 3 ) {
+            /* Positive sign = 0xF, negative sign = 0xD */
+            IRTemp tmp32 = newTemp( Ity_I32 );
+
+            /* Complement sign bit then OR into bit position 1 */
+            assign( tmp32,
+                    binop( Iop_Xor32,
+                           binop( Iop_Shr32,
+                                  unop( Iop_64HIto32, mkexpr( frBI64 ) ),
+                                  mkU8( 30 ) ),
+                           mkU32( 0x2 ) ) );
+
+            assign( sign, binop( Iop_Or32, mkexpr( tmp32 ), mkU32( 0xD ) ) );
+
+         } else {
+            vpanic( "The impossible happened: dis_dfp_bcd(ppc), undefined SP field" );
+         }
+
+         /* Put sign in bottom 4 bits, move most significant 4-bits from
+          * bcd_l to bcd_u.
+          */
+         assign( result,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_Shr32,
+                                      mkexpr( bcd_l ),
+                                      mkU8( 28 ) ),
+                               binop( Iop_Shl32,
+                                      mkexpr( bcd_u ),
+                                      mkU8( 4 ) ) ),
+                        binop( Iop_Or32,
+                                      mkexpr( sign ),
+                               binop( Iop_Shl32,
+                                      mkexpr( bcd_l ),
+                                      mkU8( 4 ) ) ) ) );
+      }
+
+      putDReg( frT_addr, unop( Iop_ReinterpI64asD64, mkexpr( result ) ) );
+      break;
+
+   case 0x342: // denbcd   DFP Encode BCD to DPD
+   {
+      IRTemp valid_mask   = newTemp( Ity_I32 );
+      IRTemp invalid_mask = newTemp( Ity_I32 );
+      IRTemp without_lmd  = newTemp( Ity_I64 );
+      IRTemp tmp64        = newTemp( Ity_I64 );
+      IRTemp dbcd64       = newTemp( Ity_I64 );
+      IRTemp left_exp     = newTemp( Ity_I32 );
+      IRTemp g0_4         = newTemp( Ity_I32 );
+
+      DIP( "denbcd %llu,r%u,r%u\n", s, frT_addr, frB_addr );
+
+      if ( s == 0 ) {
+         /* Unsigned BCD string */
+         assign( dbcd64, unop( Iop_BCDtoDPB, mkexpr(frBI64 ) ) );
+         assign( dbcd_u, unop( Iop_64HIto32, mkexpr( dbcd64 ) ) );
+         assign( dbcd_l, unop( Iop_64to32, mkexpr( dbcd64 ) ) );
+
+         assign( lmd,
+                 binop( Iop_Shr32,
+                        binop( Iop_And32,
+                               unop( Iop_64HIto32, mkexpr( frBI64 ) ),
+                               mkU32( 0xF0000000 ) ),
+                        mkU8( 28 ) ) );
+
+         assign( invalid_mask,
+                 bcd_digit_inval( unop( Iop_64HIto32, mkexpr( frBI64 ) ),
+                                  unop( Iop_64to32, mkexpr( frBI64 ) ) ) );
+         assign( valid_mask, unop( Iop_Not32, mkexpr( invalid_mask ) ) );
+
+         assign( without_lmd,
+                 unop( Iop_ReinterpD64asI64,
+                       binop( Iop_InsertExpD64,
+                              mkU64( DFP_LONG_BIAS ),
+                              unop( Iop_ReinterpI64asD64,
+                                    binop( Iop_32HLto64,
+                                           mkexpr( dbcd_u ),
+                                           mkexpr( dbcd_l ) ) ) ) ) );
+         assign( left_exp,
+                 binop( Iop_Shr32,
+                        binop( Iop_And32,
+                               unop( Iop_64HIto32, mkexpr( without_lmd ) ),
+                               mkU32( 0x60000000 ) ),
+                        mkU8( 29 ) ) );
+
+         assign( g0_4,
+                 binop( Iop_Shl32,
+                        Gfield_encoding( mkexpr( left_exp ), mkexpr( lmd ) ),
+                        mkU8( 26 ) ) );
+
+         assign( tmp64,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_And32,
+                                      unop( Iop_64HIto32,
+                                            mkexpr( without_lmd ) ),
+                                      mkU32( 0x83FFFFFF ) ),
+                               mkexpr( g0_4 ) ),
+                        unop( Iop_64to32, mkexpr( without_lmd ) ) ) );
+
+      } else if ( s == 1 ) {
+         IRTemp sign = newTemp( Ity_I32 );
+         IRTemp sign_bit = newTemp( Ity_I32 );
+         IRTemp pos_sign_mask = newTemp( Ity_I32 );
+         IRTemp neg_sign_mask = newTemp( Ity_I32 );
+         IRTemp tmp = newTemp( Ity_I64 );
+
+         /* Signed BCD string, least significant 4 bits are sign bits
+          * positive sign = 0xC, negative sign = 0xD
+          */
+         assign( tmp, unop( Iop_BCDtoDPB,
+                            binop( Iop_32HLto64,
+                                   binop( Iop_Shr32,
+                                          unop( Iop_64HIto32,
+                                                mkexpr( frBI64 ) ),
+                                                mkU8( 4 ) ),
+                                   binop( Iop_Or32,
+                                          binop( Iop_Shr32,
+                                                 unop( Iop_64to32,
+                                                       mkexpr( frBI64 ) ),
+                                                  mkU8( 4 ) ),
+                                          binop( Iop_Shl32,
+                                                 unop( Iop_64HIto32,
+                                                       mkexpr( frBI64 ) ),
+                                                       mkU8( 28 ) ) ) ) ) );
+
+         assign( dbcd_u, unop( Iop_64HIto32, mkexpr( tmp ) ) );
+         assign( dbcd_l, unop( Iop_64to32, mkexpr( tmp ) ) );
+
+         /* Get the sign of the BCD string. */
+         assign( sign,
+                 binop( Iop_And32,
+                        unop( Iop_64to32, mkexpr( frBI64 ) ),
+                        mkU32( 0xF ) ) );
+
+         assign( neg_sign_mask, Generate_neg_sign_mask( mkexpr( sign ) ) );
+         assign( pos_sign_mask, Generate_pos_sign_mask( mkexpr( sign ) ) );
+         assign( sign_bit,
+                 Generate_sign_bit( mkexpr( pos_sign_mask ),
+                                    mkexpr( neg_sign_mask ) ) );
+
+         /* Check for invalid sign and BCD digit.  Don't check the bottom
+          * four bits of bcd_l as that is the sign value.
+          */
+         assign( invalid_mask,
+                 Generate_inv_mask(
+                                   bcd_digit_inval( unop( Iop_64HIto32,
+                                                          mkexpr( frBI64 ) ),
+                                                    binop( Iop_Shr32,
+                                                           unop( Iop_64to32,
+                                                                 mkexpr( frBI64 ) ),
+                                                           mkU8( 4 ) ) ),
+                                   mkexpr( pos_sign_mask ),
+                                   mkexpr( neg_sign_mask ) ) );
+
+         assign( valid_mask, unop( Iop_Not32, mkexpr( invalid_mask ) ) );
+
+         /* Generate the result assuming the sign value was valid. */
+         assign( tmp64,
+                 unop( Iop_ReinterpD64asI64,
+                       binop( Iop_InsertExpD64,
+                              mkU64( DFP_LONG_BIAS ),
+                              unop( Iop_ReinterpI64asD64,
+                                    binop( Iop_32HLto64,
+                                           binop( Iop_Or32,
+                                                  mkexpr( dbcd_u ),
+                                                  mkexpr( sign_bit ) ),
+                                           mkexpr( dbcd_l ) ) ) ) ) );
+      }
+
+      /* Generate the value to store depending on the validity of the
+       * sign value and the validity of the BCD digits.
+       */
+      assign( resultD64,
+              unop( Iop_ReinterpI64asD64,
+                    binop( Iop_32HLto64,
+                           binop( Iop_Or32,
+                                  binop( Iop_And32,
+                                         mkexpr( valid_mask ),
+                                         unop( Iop_64HIto32,
+                                               mkexpr( tmp64 ) ) ),
+                                  binop( Iop_And32,
+                                         mkU32( 0x7C000000 ),
+                                         mkexpr( invalid_mask ) ) ),
+                           binop( Iop_Or32,
+                                  binop( Iop_And32,
+                                         mkexpr( valid_mask ),
+                                         unop( Iop_64to32, mkexpr( tmp64 ) ) ),
+                                  binop( Iop_And32,
+                                         mkU32( 0x0 ),
+                                         mkexpr( invalid_mask ) ) ) ) ) );
+      putDReg( frT_addr, mkexpr( resultD64 ) );
+   }
+   break;
+   default:
+      vpanic( "ERROR: dis_dfp_bcd(ppc), undefined opc2 case " );
+      return False;
+   }
+   return True;
+}
+
+static Bool dis_dfp_bcdq( UInt theInstr )
+{
+   UInt opc2        = ifieldOPClo10( theInstr );
+   ULong sp         = IFIELD(theInstr, 19, 2);
+   ULong s          = IFIELD(theInstr, 20, 1);
+   IRTemp frB_hi    = newTemp( Ity_D64 );
+   IRTemp frB_lo    = newTemp( Ity_D64 );
+   IRTemp frBI64_hi = newTemp( Ity_I64 );
+   IRTemp frBI64_lo = newTemp( Ity_I64 );
+   UChar frT_addr   = ifieldRegDS( theInstr );
+   UChar frB_addr   = ifieldRegB( theInstr );
+
+   IRTemp lmd       = newTemp( Ity_I32 );
+   IRTemp result_hi = newTemp( Ity_I64 );
+   IRTemp result_lo = newTemp( Ity_I64 );
+
+   assign( frB_hi, getDReg( frB_addr ) );
+   assign( frB_lo, getDReg( frB_addr + 1 ) );
+   assign( frBI64_hi, unop( Iop_ReinterpD64asI64, mkexpr( frB_hi ) ) );
+   assign( frBI64_lo, unop( Iop_ReinterpD64asI64, mkexpr( frB_lo ) ) );
+
+   switch ( opc2 ) {
+   case 0x142: // ddedpdq   DFP Decode DPD to BCD
+   {
+      IRTemp low_60_u = newTemp( Ity_I32 );
+      IRTemp low_60_l = newTemp( Ity_I32 );
+      IRTemp mid_60_u = newTemp( Ity_I32 );
+      IRTemp mid_60_l = newTemp( Ity_I32 );
+      IRTemp top_12_l = newTemp( Ity_I32 );
+
+      DIP( "ddedpdq %llu,r%u,r%u\n", sp, frT_addr, frB_addr );
+
+      /* Note, instruction only stores the lower 32 BCD digits in
+       * the result
+       */
+      Generate_132_bit_bcd_string( mkexpr( frBI64_hi ),
+                                   mkexpr( frBI64_lo ),
+                                   &top_12_l,
+                                   &mid_60_u,
+                                   &mid_60_l,
+                                   &low_60_u,
+                                   &low_60_l );
+
+      if ( ( sp == 0 ) || ( sp == 1 ) ) {
+         /* Unsigned BCD string */
+         assign( result_hi,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32,
+                                      mkexpr( top_12_l ),
+                                      mkU8( 24 ) ),
+                               binop( Iop_Shr32,
+                                      mkexpr( mid_60_u ),
+                                      mkU8( 4 ) ) ),
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32,
+                                      mkexpr( mid_60_u ),
+                                      mkU8( 28 ) ),
+                               binop( Iop_Shr32,
+                                      mkexpr( mid_60_l ),
+                                      mkU8( 4 ) ) ) ) );
+
+         assign( result_lo,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32,
+                                      mkexpr( mid_60_l ),
+                                      mkU8( 28 ) ),
+                               mkexpr( low_60_u ) ),
+                        mkexpr( low_60_l ) ) );
+
+      } else {
+         /* Signed BCD string, the cases for sp 2 and 3 only differ in how
+          * the positive and negative values are encoded in the least
+          * significant bits.
+          */
+         IRTemp sign = newTemp( Ity_I32 );
+
+         if ( sp == 2 ) {
+            /* Positive sign = 0xC, negative sign = 0xD */
+            assign( sign,
+                    binop( Iop_Or32,
+                           binop( Iop_Shr32,
+                                  unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
+                                  mkU8( 31 ) ),
+                           mkU32( 0xC ) ) );
+
+         } else if ( sp == 3 ) {
+            IRTemp tmp32 = newTemp( Ity_I32 );
+
+            /* Positive sign = 0xF, negative sign = 0xD.
+             * Need to complement sign bit then OR into bit position 1.
+             */
+            assign( tmp32,
+                    binop( Iop_Xor32,
+                           binop( Iop_Shr32,
+                                  unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
+                                  mkU8( 30 ) ),
+                           mkU32( 0x2 ) ) );
+
+            assign( sign, binop( Iop_Or32, mkexpr( tmp32 ), mkU32( 0xD ) ) );
+
+         } else {
+            vpanic( "The impossible happened: dis_dfp_bcd(ppc), undefined SP field" );
+         }
+
+         assign( result_hi,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32,
+                                      mkexpr( top_12_l ),
+                                      mkU8( 28 ) ),
+                               mkexpr( mid_60_u ) ),
+                        mkexpr( mid_60_l ) ) );
+
+         assign( result_lo,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32,
+                                      mkexpr( low_60_u ),
+                                      mkU8( 4 ) ),
+                               binop( Iop_Shr32,
+                                      mkexpr( low_60_l ),
+                                      mkU8( 28 ) ) ),
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32,
+                                      mkexpr( low_60_l ),
+                                      mkU8( 4 ) ),
+                               mkexpr( sign ) ) ) );
+      }
+
+      putDReg( frT_addr, unop( Iop_ReinterpI64asD64, mkexpr( result_hi ) ) );
+      putDReg( frT_addr + 1,
+               unop( Iop_ReinterpI64asD64, mkexpr( result_lo ) ) );
+   }
+   break;
+   case 0x342: // denbcdq   DFP Encode BCD to DPD
+   {
+      IRTemp valid_mask      = newTemp( Ity_I32 );
+      IRTemp invalid_mask    = newTemp( Ity_I32 );
+      IRTemp result128       = newTemp( Ity_D128 );
+      IRTemp dfp_significand = newTemp( Ity_D128 );
+      IRTemp tmp_hi          = newTemp( Ity_I64 );
+      IRTemp tmp_lo          = newTemp( Ity_I64 );
+      IRTemp dbcd_top_l      = newTemp( Ity_I32 );
+      IRTemp dbcd_mid_u      = newTemp( Ity_I32 );
+      IRTemp dbcd_mid_l      = newTemp( Ity_I32 );
+      IRTemp dbcd_low_u      = newTemp( Ity_I32 );
+      IRTemp dbcd_low_l      = newTemp( Ity_I32 );
+      IRTemp bcd_top_8       = newTemp( Ity_I64 );
+      IRTemp bcd_mid_60      = newTemp( Ity_I64 );
+      IRTemp bcd_low_60      = newTemp( Ity_I64 );
+      IRTemp sign_bit        = newTemp( Ity_I32 );
+      IRTemp tmptop10        = newTemp( Ity_I64 );
+      IRTemp tmpmid50        = newTemp( Ity_I64 );
+      IRTemp tmplow50        = newTemp( Ity_I64 );
+      IRTemp inval_bcd_digit_mask = newTemp( Ity_I32 );
+
+      DIP( "denbcd %llu,r%u,r%u\n", s, frT_addr, frB_addr );
+
+      if ( s == 0 ) {
+         /* Unsigned BCD string */
+         assign( sign_bit, mkU32( 0 ) ); // set to zero for unsigned string
+
+         assign( bcd_top_8,
+                 binop( Iop_32HLto64,
+                        mkU32( 0 ),
+                        binop( Iop_And32,
+                               binop( Iop_Shr32,
+                                      unop( Iop_64HIto32,
+                                            mkexpr( frBI64_hi ) ),
+                                      mkU8( 24 ) ),
+                               mkU32( 0xFF ) ) ) );
+         assign( bcd_mid_60,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_Shr32,
+                                      unop( Iop_64to32,
+                                            mkexpr( frBI64_hi ) ),
+                                      mkU8( 28 ) ),
+                               binop( Iop_Shl32,
+                                      unop( Iop_64HIto32,
+                                            mkexpr( frBI64_hi ) ),
+                                      mkU8( 4 ) ) ),
+                        binop( Iop_Or32,
+                               binop( Iop_Shl32,
+                                      unop( Iop_64to32,
+                                            mkexpr( frBI64_hi ) ),
+                                      mkU8( 4 ) ),
+                               binop( Iop_Shr32,
+                                      unop( Iop_64HIto32,
+                                            mkexpr( frBI64_lo ) ),
+                                      mkU8( 28 ) ) ) ) );
+
+         /* Note, the various helper functions ignores top 4-bits */
+         assign( bcd_low_60, mkexpr( frBI64_lo ) );
+
+         assign( tmptop10, unop( Iop_BCDtoDPB, mkexpr( bcd_top_8 ) ) );
+         assign( dbcd_top_l, unop( Iop_64to32, mkexpr( tmptop10 ) ) );
+
+         assign( tmpmid50, unop( Iop_BCDtoDPB, mkexpr( bcd_mid_60 ) ) );
+         assign( dbcd_mid_u, unop( Iop_64HIto32, mkexpr( tmpmid50 ) ) );
+         assign( dbcd_mid_l, unop( Iop_64to32, mkexpr( tmpmid50 ) ) );
+
+         assign( tmplow50, unop( Iop_BCDtoDPB, mkexpr( bcd_low_60 ) ) );
+         assign( dbcd_low_u, unop( Iop_64HIto32, mkexpr( tmplow50 ) ) );
+         assign( dbcd_low_l, unop( Iop_64to32, mkexpr( tmplow50 ) ) );
+
+         /* The entire BCD string fits in lower 110-bits.  The LMD = 0,
+          * value is not part of the final result. Only the right most
+          * BCD digits are stored.
+          */
+         assign( lmd, mkU32( 0 ) );
+
+         assign( invalid_mask,
+                 binop( Iop_Or32,
+                        bcd_digit_inval( mkU32( 0 ),
+                                         unop( Iop_64to32,
+                                               mkexpr( bcd_top_8 ) ) ),
+                        binop( Iop_Or32,
+                               bcd_digit_inval( unop( Iop_64HIto32,
+                                                      mkexpr( bcd_mid_60 ) ),
+                                                unop( Iop_64to32,
+                                                      mkexpr( bcd_mid_60 ) ) ),
+                               bcd_digit_inval( unop( Iop_64HIto32,
+                                                      mkexpr( bcd_low_60 ) ),
+                                                unop( Iop_64to32,
+                                                      mkexpr( bcd_low_60 ) )
+                                                ) ) ) );
+
+      } else if ( s == 1 ) {
+         IRTemp sign          = newTemp( Ity_I32 );
+         IRTemp zero          = newTemp( Ity_I32 );
+         IRTemp pos_sign_mask = newTemp( Ity_I32 );
+         IRTemp neg_sign_mask = newTemp( Ity_I32 );
+
+         /* The sign of the BCD string is stored in lower 4 bits */
+         assign( sign,
+                 binop( Iop_And32,
+                        unop( Iop_64to32, mkexpr( frBI64_lo ) ),
+                        mkU32( 0xF ) ) );
+         assign( neg_sign_mask, Generate_neg_sign_mask( mkexpr( sign ) ) );
+         assign( pos_sign_mask, Generate_pos_sign_mask( mkexpr( sign ) ) );
+         assign( sign_bit,
+                 Generate_sign_bit( mkexpr( pos_sign_mask ),
+                                    mkexpr( neg_sign_mask ) ) );
+
+         /* Generate the value assuminig the sign and BCD digits are vaild */
+         assign( bcd_top_8,
+                 binop( Iop_32HLto64,
+                        mkU32( 0x0 ),
+                        binop( Iop_Shr32,
+                               unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
+                               mkU8( 28 ) ) ) );
+
+         /* The various helper routines ignore the upper 4-bits */
+         assign( bcd_mid_60, mkexpr( frBI64_hi ) );
+
+         /* Remove bottom four sign bits */
+         assign( bcd_low_60,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Shr32,
+                               unop( Iop_64HIto32,
+                                     mkexpr( frBI64_lo ) ),
+                               mkU8( 4 ) ),
+                               binop( Iop_Or32,
+                                      binop( Iop_Shl32,
+                                             unop( Iop_64HIto32,
+                                                   mkexpr( frBI64_lo ) ),
+                                             mkU8( 28 ) ),
+                                      binop( Iop_Shr32,
+                                             unop( Iop_64to32,
+                                                   mkexpr( frBI64_lo ) ),
+                                             mkU8( 4 ) ) ) ) );
+         assign( tmptop10, unop( Iop_BCDtoDPB, mkexpr(bcd_top_8 ) ) );
+         assign( dbcd_top_l, unop( Iop_64to32, mkexpr( tmptop10 ) ) );
+
+         assign( tmpmid50, unop( Iop_BCDtoDPB, mkexpr(bcd_mid_60 ) ) );
+         assign( dbcd_mid_u, unop( Iop_64HIto32, mkexpr( tmpmid50 ) ) );
+         assign( dbcd_mid_l, unop( Iop_64to32, mkexpr( tmpmid50 ) ) );
+
+         assign( tmplow50, unop( Iop_BCDtoDPB, mkexpr( bcd_low_60 ) ) );
+         assign( dbcd_low_u, unop( Iop_64HIto32, mkexpr( tmplow50 ) ) );
+         assign( dbcd_low_l, unop( Iop_64to32, mkexpr( tmplow50 ) ) );
+
+         /* The entire BCD string fits in lower 110-bits.  The LMD value
+          * is not stored in the final result for the DFP Long instruction.
+          */
+         assign( lmd, mkU32( 0 ) );
+
+         /* Check for invalid sign and invalid BCD digit.  Don't check the
+          *  bottom four bits of frBI64_lo as that is the sign value.
+          */
+         assign( zero, mkU32( 0 ) );
+         assign( inval_bcd_digit_mask,
+                 binop( Iop_Or32,
+                        bcd_digit_inval( mkexpr( zero ),
+                                         unop( Iop_64to32,
+                                               mkexpr( bcd_top_8 ) ) ),
+                        binop( Iop_Or32,
+                               bcd_digit_inval( unop( Iop_64HIto32,
+                                                     mkexpr( bcd_mid_60 ) ),
+                                               unop( Iop_64to32,
+                                                     mkexpr( bcd_mid_60 ) ) ),
+                               bcd_digit_inval( unop( Iop_64HIto32,
+                                                     mkexpr( frBI64_lo ) ),
+                                               binop( Iop_Shr32,
+                                                      unop( Iop_64to32,
+                                                            mkexpr( frBI64_lo ) ),
+                                                        mkU8( 4 ) ) ) ) ) );
+         assign( invalid_mask,
+                 Generate_inv_mask( mkexpr( inval_bcd_digit_mask ),
+                                    mkexpr( pos_sign_mask ),
+                                    mkexpr( neg_sign_mask ) ) );
+
+      }
+
+      assign( valid_mask, unop( Iop_Not32, mkexpr( invalid_mask ) ) );
+
+      /* Calculate the value of the result assuming sign and BCD digits
+       * are all valid.
+       */
+      assign( dfp_significand,
+              binop( Iop_D64HLtoD128,
+                     unop( Iop_ReinterpI64asD64,
+                           binop( Iop_32HLto64,
+                                  binop( Iop_Or32,
+                                         mkexpr( sign_bit ),
+                                         mkexpr( dbcd_top_l ) ),
+                                  binop( Iop_Or32,
+                                         binop( Iop_Shl32,
+                                                mkexpr( dbcd_mid_u ),
+                                                mkU8( 18 ) ),
+                                         binop( Iop_Shr32,
+                                                mkexpr( dbcd_mid_l ),
+                                                mkU8( 14 ) ) ) ) ),
+                     unop( Iop_ReinterpI64asD64,
+                           binop( Iop_32HLto64,
+                                  binop( Iop_Or32,
+                                         mkexpr( dbcd_low_u ),
+                                         binop( Iop_Shl32,
+                                                mkexpr( dbcd_mid_l ),
+                                                mkU8( 18 ) ) ),
+                                  mkexpr( dbcd_low_l ) ) ) ) );
+
+      /* Break the result back down to 32-bit chunks and replace chunks.
+       * If there was an invalid BCD digit or invalid sign value, replace
+       * the calculated result with the invalid bit string.
+       */
+      assign( result128,
+              binop( Iop_InsertExpD128,
+                     mkU64( DFP_EXTND_BIAS ),
+                     mkexpr( dfp_significand ) ) );
+
+      assign( tmp_hi,
+              unop( Iop_ReinterpD64asI64,
+                    unop( Iop_D128HItoD64, mkexpr( result128 ) ) ) );
+
+      assign( tmp_lo,
+              unop( Iop_ReinterpD64asI64,
+                    unop( Iop_D128LOtoD64, mkexpr( result128 ) ) ) );
+
+      assign( result_hi,
+              binop( Iop_32HLto64,
+                     binop( Iop_Or32,
+                            binop( Iop_And32,
+                                   mkexpr( valid_mask ),
+                                   unop( Iop_64HIto32, mkexpr( tmp_hi ) ) ),
+                            binop( Iop_And32,
+                                   mkU32( 0x7C000000 ),
+                                   mkexpr( invalid_mask ) ) ),
+                     binop( Iop_Or32,
+                            binop( Iop_And32,
+                                   mkexpr( valid_mask ),
+                                   unop( Iop_64to32, mkexpr( tmp_hi ) ) ),
+                            binop( Iop_And32,
+                                   mkU32( 0x0 ),
+                                   mkexpr( invalid_mask ) ) ) ) );
+
+      assign( result_lo,
+              binop( Iop_32HLto64,
+                     binop( Iop_Or32,
+                            binop( Iop_And32,
+                                   mkexpr( valid_mask ),
+                                   unop( Iop_64HIto32, mkexpr( tmp_lo ) ) ),
+                            binop( Iop_And32,
+                                   mkU32( 0x0 ),
+                                   mkexpr( invalid_mask ) ) ),
+                     binop( Iop_Or32,
+                            binop( Iop_And32,
+                                   mkexpr( valid_mask ),
+                                   unop( Iop_64to32, mkexpr( tmp_lo ) ) ),
+                            binop( Iop_And32,
+                                   mkU32( 0x0 ),
+                                   mkexpr( invalid_mask ) ) ) ) );
+
+      putDReg( frT_addr, unop( Iop_ReinterpI64asD64, mkexpr( result_hi ) ) );
+      putDReg( frT_addr + 1,
+               unop( Iop_ReinterpI64asD64, mkexpr( result_lo ) ) );
+
+   }
+   break;
+   default:
+      vpanic( "ERROR: dis_dfp_bcdq(ppc), undefined opc2 case " );
+      break;
+   }
+   return True;
+}
+
+static Bool dis_dfp_significant_digits( UInt theInstr )
+{
+   UChar frA_addr = ifieldRegA( theInstr );
+   UChar frB_addr = ifieldRegB( theInstr );
+   IRTemp frA     = newTemp( Ity_D64 );
+   UInt opc1      = ifieldOPC( theInstr );
+   IRTemp B_sig   = newTemp( Ity_I8 );
+   IRTemp K       = newTemp( Ity_I8 );
+   IRTemp lmd_B   = newTemp( Ity_I32 );
+   IRTemp field   = newTemp( Ity_I32 );
+   UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) ); // AKA BF
+   IRTemp Unordered_true     = newTemp( Ity_I32 );
+   IRTemp Eq_true_mask       = newTemp( Ity_I32 );
+   IRTemp Lt_true_mask       = newTemp( Ity_I32 );
+   IRTemp Gt_true_mask       = newTemp( Ity_I32 );
+   IRTemp KisZero_true_mask  = newTemp( Ity_I32 );
+   IRTemp KisZero_false_mask = newTemp( Ity_I32 );
+
+   /* Get the reference singificance stored in frA */
+   assign( frA, getDReg( frA_addr ) );
+
+   /* Convert from 64 bit to 8 bits in two steps.  The Iop_64to8 is not 
+    * supported in 32-bit mode.
+    */
+   assign( K, unop( Iop_32to8,
+                    binop( Iop_And32,
+                           unop( Iop_64to32,
+                                 unop( Iop_ReinterpD64asI64,
+                                       mkexpr( frA ) ) ),
+                           mkU32( 0x3F ) ) ) );
+
+   switch ( opc1 ) {
+   case 0x3b: // dtstsf   DFP Test Significance
+   {
+      IRTemp frB     = newTemp( Ity_D64 );
+      IRTemp frBI64  = newTemp( Ity_I64 );
+      IRTemp B_bcd_u = newTemp( Ity_I32 );
+      IRTemp B_bcd_l = newTemp( Ity_I32 );
+      IRTemp tmp64   = newTemp( Ity_I64 );
+
+      DIP( "dtstsf %u,r%u,r%u\n", crfD, frA_addr, frB_addr );
+
+      assign( frB, getDReg( frB_addr ) );
+      assign( frBI64, unop( Iop_ReinterpD64asI64, mkexpr( frB ) ) );
+
+      /* Get the BCD string for the value stored in a series of I32 values.
+       * Count the number of leading zeros.  Subtract the number of leading
+       * zeros from 16 (maximum number of significant digits in DFP
+       * Long).
+       */
+      Get_lmd( &lmd_B,
+               binop( Iop_Shr32,
+                      unop( Iop_64HIto32, mkexpr( frBI64 ) ),
+                      mkU8( 31 - 5 ) ) ); // G-field[0:4]
+
+      assign( tmp64, unop( Iop_DPBtoBCD, mkexpr( frBI64 ) ) );
+      assign( B_bcd_u, unop( Iop_64HIto32, mkexpr( tmp64 ) ) );
+      assign( B_bcd_l, unop( Iop_64to32, mkexpr( tmp64 ) ) );
+
+      assign( B_sig,
+              binop( Iop_Sub8,
+                     mkU8( DFP_LONG_MAX_SIG_DIGITS ),
+                     Count_leading_zeros_60( mkexpr( lmd_B ),
+                                             mkexpr( B_bcd_u ),
+                                             mkexpr( B_bcd_l ) ) ) );
+      assign( Unordered_true, Check_unordered( mkexpr( frBI64 ) ) );
+   }
+   break;
+   case 0x3F: // dtstsfq     DFP Test Significance
+   {
+      IRTemp frB_hi     = newTemp( Ity_D64 );
+      IRTemp frB_lo     = newTemp( Ity_D64 );
+      IRTemp frBI64_hi  = newTemp( Ity_I64 );
+      IRTemp frBI64_lo  = newTemp( Ity_I64 );
+      IRTemp B_low_60_u = newTemp( Ity_I32 );
+      IRTemp B_low_60_l = newTemp( Ity_I32 );
+      IRTemp B_mid_60_u = newTemp( Ity_I32 );
+      IRTemp B_mid_60_l = newTemp( Ity_I32 );
+      IRTemp B_top_12_l = newTemp( Ity_I32 );
+
+      DIP( "dtstsfq %u,r%u,r%u\n", crfD, frA_addr, frB_addr );
+
+      assign( frB_hi, getDReg( frB_addr ) );
+      assign( frB_lo, getDReg( frB_addr + 1 ) );
+
+      assign( frBI64_hi, unop( Iop_ReinterpD64asI64, mkexpr( frB_hi ) ) );
+      assign( frBI64_lo, unop( Iop_ReinterpD64asI64, mkexpr( frB_lo ) ) );
+
+      /* Get the BCD string for the value stored in a series of I32 values.
+       * Count the number of leading zeros.  Subtract the number of leading
+       * zeros from 32 (maximum number of significant digits in DFP
+       * extended).
+       */
+      Get_lmd( &lmd_B,
+               binop( Iop_Shr32,
+                      unop( Iop_64HIto32, mkexpr( frBI64_hi ) ),
+                      mkU8( 31 - 5 ) ) ); // G-field[0:4]
+
+      Generate_132_bit_bcd_string( mkexpr( frBI64_hi ),
+                                   mkexpr( frBI64_lo ),
+                                   &B_top_12_l,
+                                   &B_mid_60_u,
+                                   &B_mid_60_l,
+                                   &B_low_60_u,
+                                   &B_low_60_l );
+
+      assign( B_sig,
+              binop( Iop_Sub8,
+                     mkU8( DFP_EXTND_MAX_SIG_DIGITS ),
+                     Count_leading_zeros_128( mkexpr( lmd_B ),
+                                              mkexpr( B_top_12_l ),
+                                              mkexpr( B_mid_60_u ),
+                                              mkexpr( B_mid_60_l ),
+                                              mkexpr( B_low_60_u ),
+                                              mkexpr( B_low_60_l ) ) ) );
+
+      assign( Unordered_true, Check_unordered( mkexpr( frBI64_hi ) ) );
+   }
+   break;
+   }
+
+   /* Compare (16 - cnt[0]) against K and set the condition code field
+    * accordingly.
+    *
+    * The field layout is as follows:
+    *
+    * bit[3:0]    Description
+    *    3     K != 0 and K < Number of significant digits if FRB
+    *    2     K != 0 and K > Number of significant digits if FRB OR K = 0
+    *    1     K != 0 and K = Number of significant digits if FRB
+    *    0     K ? Number of significant digits if FRB
+    */
+   assign( Eq_true_mask,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpEQ32,
+                        unop( Iop_8Uto32, mkexpr( K ) ),
+                        unop( Iop_8Uto32, mkexpr( B_sig ) ) ) ) );
+   assign( Lt_true_mask,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpLT32U,
+                        unop( Iop_8Uto32, mkexpr( K ) ),
+                        unop( Iop_8Uto32, mkexpr( B_sig ) ) ) ) );
+   assign( Gt_true_mask,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpLT32U,
+                        unop( Iop_8Uto32, mkexpr( B_sig ) ),
+                        unop( Iop_8Uto32, mkexpr( K ) ) ) ) );
+
+   assign( KisZero_true_mask,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpEQ32,
+                        unop( Iop_8Uto32, mkexpr( K ) ),
+                        mkU32( 0 ) ) ) );
+   assign( KisZero_false_mask,
+           unop( Iop_1Sto32,
+                 binop( Iop_CmpNE32,
+                        unop( Iop_8Uto32, mkexpr( K ) ),
+                        mkU32( 0 ) ) ) );
+
+   assign( field,
+           binop( Iop_Or32,
+                  binop( Iop_And32,
+                         mkexpr( KisZero_false_mask ),
+                         binop( Iop_Or32,
+                                binop( Iop_And32,
+                                       mkexpr( Lt_true_mask ),
+                                       mkU32( 0x8 ) ),
+                                binop( Iop_Or32,
+                                       binop( Iop_And32,
+                                              mkexpr( Gt_true_mask ),
+                                              mkU32( 0x4 ) ),
+                                       binop( Iop_And32,
+                                              mkexpr( Eq_true_mask ),
+                                              mkU32( 0x2 ) ) ) ) ),
+                  binop( Iop_And32,
+                         mkexpr( KisZero_true_mask ),
+                         mkU32( 0x4 ) ) ) );
+
+   putGST_field( PPC_GST_CR,
+                 binop( Iop_Or32,
+                        binop( Iop_And32,
+                               mkexpr( Unordered_true ),
+                               mkU32( 0x1 ) ),
+                        binop( Iop_And32,
+                               unop( Iop_Not32, mkexpr( Unordered_true ) ),
+                               mkexpr( field ) ) ),
+                 crfD );
+
+   return True;
+}
+
+/*------------------------------------------------------------*/
+/*--- AltiVec Instruction Translation                      ---*/
+/*------------------------------------------------------------*/
+
+/*
+  Altivec Cache Control Instructions (Data Streams)
+*/
+static Bool dis_av_datastream ( UInt theInstr )
+{
+   /* X-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar flag_T   = toUChar( IFIELD( theInstr, 25, 1 ) );
+   UChar flag_A   = flag_T;
+   UChar b23to24  = toUChar( IFIELD( theInstr, 23, 2 ) );
+   UChar STRM     = toUChar( IFIELD( theInstr, 21, 2 ) );
+   UChar rA_addr  = ifieldRegA(theInstr);
+   UChar rB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+
+   if (opc1 != 0x1F || b23to24 != 0 || b0 != 0) {
+      vex_printf("dis_av_datastream(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x156: // dst (Data Stream Touch, AV p115)
+      DIP("dst%s r%u,r%u,%d\n", flag_T ? "t" : "",
+                                rA_addr, rB_addr, STRM);
+      break;
+
+   case 0x176: // dstst (Data Stream Touch for Store, AV p117)
+      DIP("dstst%s r%u,r%u,%d\n", flag_T ? "t" : "",
+                                  rA_addr, rB_addr, STRM);
+      break;
+
+   case 0x336: // dss (Data Stream Stop, AV p114)
+      if (rA_addr != 0 || rB_addr != 0) {
+         vex_printf("dis_av_datastream(ppc)(opc2,dst)\n");
+         return False;
+      }
+      if (flag_A == 0) {
+         DIP("dss %d\n", STRM);
+      } else {
+         DIP("dssall\n");
+      }
+      break;
+
+   default:
+      vex_printf("dis_av_datastream(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Processor Control Instructions
+*/
+static Bool dis_av_procctl ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar vD_addr = ifieldRegDS(theInstr);
+   UChar vA_addr = ifieldRegA(theInstr);
+   UChar vB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = IFIELD( theInstr, 0, 11 );
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_procctl(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x604: // mfvscr (Move from VSCR, AV p129)
+      if (vA_addr != 0 || vB_addr != 0) {
+         vex_printf("dis_av_procctl(ppc)(opc2,dst)\n");
+         return False;
+      }
+      DIP("mfvscr v%d\n", vD_addr);
+      putVReg( vD_addr, unop(Iop_32UtoV128, getGST( PPC_GST_VSCR )) ); 
+      break;
+
+   case 0x644: { // mtvscr (Move to VSCR, AV p130)
+      IRTemp vB = newTemp(Ity_V128);
+      if (vD_addr != 0 || vA_addr != 0) {
+         vex_printf("dis_av_procctl(ppc)(opc2,dst)\n");
+         return False;
+      }
+      DIP("mtvscr v%d\n", vB_addr);
+      assign( vB, getVReg(vB_addr));
+      putGST( PPC_GST_VSCR, unop(Iop_V128to32, mkexpr(vB)) ); 
+      break;
+   }
+   default:
+      vex_printf("dis_av_procctl(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+ * VSX scalar and vector convert instructions
+ */
+static Bool
+dis_vx_conv ( UInt theInstr, UInt opc2 )
+{
+   /* XX2-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT( theInstr );
+   UChar XB = ifieldRegXB( theInstr );
+   IRTemp xB, xB2;
+   IRTemp b3, b2, b1, b0;
+   xB = xB2 = IRTemp_INVALID;
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vx_conv(ppc)(instr)\n" );
+      return False;
+   }
+
+   /* Create and assign temps only as needed for the given instruction. */
+   switch (opc2) {
+      // scalar double-precision floating point argument
+      case 0x2B0: case 0x0b0: case 0x290: case 0x212: case 0x216: case 0x090:
+         xB = newTemp(Ity_F64);
+         assign( xB,
+                 unop( Iop_ReinterpI64asF64,
+                       unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
+         break;
+      // vector double-precision floating point arguments
+      case 0x1b0: case 0x312: case 0x390: case 0x190: case 0x3B0:
+
+         xB = newTemp(Ity_F64);
+         xB2 = newTemp(Ity_F64);
+         assign( xB,
+                 unop( Iop_ReinterpI64asF64,
+                       unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
+         assign( xB2,
+                 unop( Iop_ReinterpI64asF64,
+                       unop( Iop_V128to64, getVSReg( XB ) ) ) );
+         break;
+      // vector single precision or [un]signed integer word arguments
+      case 0x130: case 0x392: case 0x330: case 0x310: case 0x110:
+      case 0x1f0: case 0x1d0:
+         b3 = b2 = b1 = b0 = IRTemp_INVALID;
+         breakV128to4x32(getVSReg(XB), &b3, &b2, &b1, &b0);
+         break;
+         // vector [un]signed integer doubleword argument
+      case 0x3f0: case 0x370: case 0x3d0: case 0x350:
+         xB = newTemp(Ity_I64);
+         assign( xB, unop( Iop_V128HIto64, getVSReg( XB ) ) );
+         xB2 = newTemp(Ity_I64);
+         assign( xB2, unop( Iop_V128to64, getVSReg( XB ) ) );
+         break;
+      // scalar [un]signed integer doubleword argument
+      case 0x250: case 0x270: case 0x2D0: case 0x2F0:
+         xB = newTemp(Ity_I64);
+         assign( xB, unop( Iop_V128HIto64, getVSReg( XB ) ) );
+         break;
+      // scalar single precision argument
+      case 0x292: // xscvspdp
+         xB  = newTemp(Ity_I32);
+
+         assign( xB, handle_SNaN_to_QNaN_32(unop( Iop_64HIto32,
+                                                  unop( Iop_V128HIto64,
+                                                        getVSReg( XB ) ) ) ) );
+         break;
+      case 0x296: // xscvspdpn (non signaling version of xscvpdp)
+         xB = newTemp(Ity_I32);
+         assign( xB,
+                 unop( Iop_64HIto32, unop( Iop_V128HIto64, getVSReg( XB ) ) ) );
+         break;
+
+      /* Certain instructions have their complete implementation in the main switch statement
+       * that follows this one; thus we have a "do nothing" case for those instructions here.
+       */
+      case 0x170: case 0x150:
+         break; // do nothing
+
+      default:
+         vex_printf( "dis_vx_conv(ppc)(opc2)\n" );
+         return False;
+   }
+
+
+   switch (opc2) {
+      case 0x2B0:
+         // xscvdpsxds (VSX Scalar truncate Double-Precision to integer and Convert
+         //             to Signed Integer Doubleword format with Saturate)
+         DIP("xscvdpsxds v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128, binop( Iop_F64toI64S,
+                                                 mkU32( Irrm_ZERO ),
+                                                 mkexpr( xB ) ), mkU64( 0 ) ) );
+         break;
+      case 0x0b0: // xscvdpsxws (VSX Scalar truncate Double-Precision to integer and
+                  //             Convert to Signed Integer Word format with Saturate)
+         DIP("xscvdpsxws v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_32Sto64,
+                                binop( Iop_F64toI32S,
+                                       mkU32( Irrm_ZERO ),
+                                       mkexpr( xB ) ) ),
+                                       mkU64( 0ULL ) ) );
+         break;
+      case 0x290: // xscvdpuxds (VSX Scalar truncate Double-Precision integer and Convert
+                  //             to Unsigned Integer Doubleword format with Saturate)
+         DIP("xscvdpuxds v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_F64toI64U,
+                                 mkU32( Irrm_ZERO ),
+                                 mkexpr( xB ) ),
+                                 mkU64( 0ULL ) ) );
+         break;
+      case 0x270:
+         // xscvsxdsp (VSX Scalar Convert and round Signed Integer Doubleword
+         //             to Single-Precision format)
+         DIP("xscvsxdsp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_RoundF64toF32,
+                                       get_IR_roundingmode(),
+                                       binop( Iop_I64StoF64,
+                                              get_IR_roundingmode(),
+                                              mkexpr( xB ) ) ) ),
+                          mkU64( 0 ) ) );
+         break;
+      case 0x2F0:
+         // xscvsxddp (VSX Scalar Convert and round Signed Integer Doubleword to
+         //            Double-Precision format)
+         DIP("xscvsxddp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                binop( Iop_I64StoF64, get_IR_roundingmode(),
+                                                       mkexpr( xB ) ) ),
+                                                       mkU64( 0 ) ) );
+         break;
+      case 0x250:
+         // xscvuxdsp (VSX Scalar Convert and round Unsigned Integer
+         //            Doubleword to Singel-Precision format)
+         DIP("xscvuxdsp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_RoundF64toF32,
+                                       get_IR_roundingmode(),
+                                       binop( Iop_I64UtoF64,
+                                              get_IR_roundingmode(),
+                                              mkexpr( xB ) ) ) ),
+                          mkU64( 0 ) ) );
+         break;
+      case 0x2D0:
+         // xscvuxddp (VSX Scalar Convert and round Unsigned Integer Doubleword to
+         //            Double-Precision format)
+         DIP("xscvuxddp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                binop( Iop_I64UtoF64, get_IR_roundingmode(),
+                                                       mkexpr( xB ) ) ),
+                                                       mkU64( 0 ) ) );
+         break;
+      case 0x1b0: // xvcvdpsxws (VSX Vector truncate Double-Precision to integer and Convert
+                  //             to Signed Integer Word format with Saturate)
+      {
+         IRTemp hiResult_32 = newTemp(Ity_I32);
+         IRTemp loResult_32 = newTemp(Ity_I32);
+         IRExpr* rmZero = mkU32(Irrm_ZERO);
+
+         DIP("xvcvdpsxws v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         assign(hiResult_32, binop(Iop_F64toI32S, rmZero, mkexpr(xB)));
+         assign(loResult_32, binop(Iop_F64toI32S, rmZero, mkexpr(xB2)));
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_32Sto64, mkexpr( hiResult_32 ) ),
+                          unop( Iop_32Sto64, mkexpr( loResult_32 ) ) ) );
+         break;
+      }
+      case 0x130: case 0x110: // xvcvspsxws, xvcvspuxws
+         //  (VSX Vector truncate Single-Precision to integer and
+         //   Convert to [Un]signed Integer Word format with Saturate)
+      {
+         IRExpr * b0_result, * b1_result, * b2_result, * b3_result;
+         IRTemp tempResult = newTemp(Ity_V128);
+         IRTemp res0 = newTemp(Ity_I32);
+         IRTemp res1 = newTemp(Ity_I32);
+         IRTemp res2 = newTemp(Ity_I32);
+         IRTemp res3 = newTemp(Ity_I32);
+         IRTemp hi64 = newTemp(Ity_I64);
+         IRTemp lo64 = newTemp(Ity_I64);
+         Bool un_signed = (opc2 == 0x110);
+         IROp op = un_signed ? Iop_QFtoI32Ux4_RZ : Iop_QFtoI32Sx4_RZ;
+
+         DIP("xvcvsp%sxws v%u,v%u\n", un_signed ? "u" : "s", (UInt)XT, (UInt)XB);
+         /* The xvcvsp{s|u}xws instruction is similar to vct{s|u}xs, except if src is a NaN,
+          * then result is set to 0x80000000.  */
+         assign(tempResult, unop(op, getVSReg(XB)));
+         assign( hi64, unop(Iop_V128HIto64, mkexpr(tempResult)) );
+         assign( lo64, unop(Iop_V128to64,   mkexpr(tempResult)) );
+         assign( res3, unop(Iop_64HIto32, mkexpr(hi64)) );
+         assign( res2, unop(Iop_64to32,   mkexpr(hi64)) );
+         assign( res1, unop(Iop_64HIto32, mkexpr(lo64)) );
+         assign( res0, unop(Iop_64to32,   mkexpr(lo64)) );
+
+         b3_result = IRExpr_ITE(is_NaN_32(b3),
+                                // then: result is 0x{8|0}80000000
+                                mkU32(un_signed ? 0x00000000 : 0x80000000),
+                                // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+                                mkexpr(res3));
+         b2_result = IRExpr_ITE(is_NaN_32(b2),
+                                // then: result is 0x{8|0}80000000
+                                mkU32(un_signed ? 0x00000000 : 0x80000000),
+                                // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+                                mkexpr(res2));
+         b1_result = IRExpr_ITE(is_NaN_32(b1),
+                                // then: result is 0x{8|0}80000000
+                                mkU32(un_signed ? 0x00000000 : 0x80000000),
+                                // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+                                mkexpr(res1));
+         b0_result = IRExpr_ITE(is_NaN_32(b0),
+                                // then: result is 0x{8|0}80000000
+                                mkU32(un_signed ? 0x00000000 : 0x80000000),
+                                // else: result is from the Iop_QFtoI32{s|u}x4_RZ
+                                mkexpr(res0));
+
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64, b3_result, b2_result ),
+                          binop( Iop_32HLto64, b1_result, b0_result ) ) );
+         break;
+      }
+      case 0x212: // xscvdpsp (VSX Scalar round Double-Precision to single-precision and
+                  //           Convert to Single-Precision format
+         DIP("xscvdpsp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             binop( Iop_RoundF64toF32,
+                                                    get_IR_roundingmode(),
+                                                    mkexpr( xB ) ) ) ),
+                                 mkU32( 0 ) ),
+                          mkU64( 0ULL ) ) );
+         break;
+      case 0x216: /* xscvdpspn (VSX Scalar convert scalar Single-Precision to
+                              vector Single-Precision non-signalling */
+         DIP("xscvdpspn v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             mkexpr( xB ) ) ),
+                                 mkU32( 0 ) ),
+                          mkU64( 0ULL ) ) );
+         break;
+      case 0x090: // xscvdpuxws (VSX Scalar truncate Double-Precision to integer
+                  //             and Convert to Unsigned Integer Word format with Saturate)
+         DIP("xscvdpuxws v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64,
+                                 mkU32( 0 ),
+                                 binop( Iop_F64toI32U,
+                                        mkU32( Irrm_ZERO ),
+                                        mkexpr( xB ) ) ),
+                          mkU64( 0ULL ) ) );
+         break;
+      case 0x292: // xscvspdp (VSX Scalar Convert Single-Precision to Double-Precision format, signaling)
+         DIP("xscvspdp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                unop( Iop_F32toF64,
+                                      unop( Iop_ReinterpI32asF32, mkexpr( xB ) ) ) ),
+                          mkU64( 0ULL ) ) );
+         break;
+      case 0x296: // xscvspdpn (VSX Scalar Convert Single-Precision to Double-Precision format Non signaling)
+         DIP("xscvspdpn v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                unop( Iop_F32toF64,
+                                      unop( Iop_ReinterpI32asF32, mkexpr( xB ) ) ) ),
+                                      mkU64( 0ULL ) ) );
+         break;
+      case 0x312: // xvcvdpsp (VSX Vector round Double-Precision to single-precision
+                  //           and Convert to Single-Precision format)
+         DIP("xvcvdpsp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             binop( Iop_RoundF64toF32,
+                                                    get_IR_roundingmode(),
+                                                    mkexpr( xB ) ) ) ),
+                                 mkU32( 0 ) ),
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             binop( Iop_RoundF64toF32,
+                                                    get_IR_roundingmode(),
+                                                    mkexpr( xB2 ) ) ) ),
+                                 mkU32( 0 ) ) ) );
+         break;
+      case 0x390: // xvcvdpuxds (VSX Vector truncate Double-Precision to integer
+                  //             and Convert to Unsigned Integer Doubleword format
+                  //             with Saturate)
+         DIP("xvcvdpuxds v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_F64toI64U, mkU32( Irrm_ZERO ), mkexpr( xB ) ),
+                          binop( Iop_F64toI64U, mkU32( Irrm_ZERO ), mkexpr( xB2 ) ) ) );
+         break;
+      case 0x190: // xvcvdpuxws (VSX Vector truncate Double-Precision to integer and
+                  //             Convert to Unsigned Integer Word format with Saturate)
+         DIP("xvcvdpuxws v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64,
+                                 binop( Iop_F64toI32U,
+                                        mkU32( Irrm_ZERO ),
+                                        mkexpr( xB ) ),
+                                 mkU32( 0 ) ),
+                          binop( Iop_32HLto64,
+                                 binop( Iop_F64toI32U,
+                                        mkU32( Irrm_ZERO ),
+                                        mkexpr( xB2 ) ),
+                                 mkU32( 0 ) ) ) );
+         break;
+      case 0x392: // xvcvspdp (VSX Vector Convert Single-Precision to Double-Precision format)
+         DIP("xvcvspdp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                unop( Iop_F32toF64,
+                                      unop( Iop_ReinterpI32asF32,
+                                            handle_SNaN_to_QNaN_32( mkexpr( b3 ) ) ) ) ),
+                          unop( Iop_ReinterpF64asI64,
+                                unop( Iop_F32toF64,
+                                      unop( Iop_ReinterpI32asF32,
+                                            handle_SNaN_to_QNaN_32( mkexpr( b1 ) ) ) ) ) ) );
+         break;
+      case 0x330: // xvcvspsxds (VSX Vector truncate Single-Precision to integer and
+                  //           Convert to Signed Integer Doubleword format with Saturate)
+         DIP("xvcvspsxds v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_F64toI64S,
+                                 mkU32( Irrm_ZERO ),
+                                 unop( Iop_F32toF64,
+                                       unop( Iop_ReinterpI32asF32, mkexpr( b3 ) ) ) ),
+                          binop( Iop_F64toI64S,
+                                 mkU32( Irrm_ZERO ),
+                                 unop( Iop_F32toF64,
+                                       unop( Iop_ReinterpI32asF32, mkexpr( b1 ) ) ) ) ) );
+         break;
+      case 0x310: // xvcvspuxds (VSX Vector truncate Single-Precision to integer and
+                  //            Convert to Unsigned Integer Doubleword format with Saturate)
+         DIP("xvcvspuxds v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_F64toI64U,
+                                 mkU32( Irrm_ZERO ),
+                                 unop( Iop_F32toF64,
+                                       unop( Iop_ReinterpI32asF32, mkexpr( b3 ) ) ) ),
+                          binop( Iop_F64toI64U,
+                                 mkU32( Irrm_ZERO ),
+                                 unop( Iop_F32toF64,
+                                       unop( Iop_ReinterpI32asF32, mkexpr( b1 ) ) ) ) ) );
+         break;
+      case 0x3B0: // xvcvdpsxds (VSX Vector truncate Double-Precision to integer and
+                  //             Convert to Signed Integer Doubleword format with Saturate)
+         DIP("xvcvdpsxds v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_F64toI64S, mkU32( Irrm_ZERO ), mkexpr( xB ) ),
+                          binop( Iop_F64toI64S, mkU32( Irrm_ZERO ), mkexpr( xB2 ) ) ) );
+         break;
+      case 0x3f0: // xvcvsxddp (VSX Vector Convert and round Signed Integer Doubleword
+                  //            to Double-Precision format)
+         DIP("xvcvsxddp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64StoF64,
+                                       get_IR_roundingmode(),
+                                       mkexpr( xB ) ) ),
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64StoF64,
+                                       get_IR_roundingmode(),
+                                       mkexpr( xB2 ) ) ) ) );
+         break;
+      case 0x3d0: // xvcvuxddp (VSX Vector Convert and round Unsigned Integer Doubleword
+                  //            to Double-Precision format)
+         DIP("xvcvuxddp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64UtoF64,
+                                       get_IR_roundingmode(),
+                                       mkexpr( xB ) ) ),
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64UtoF64,
+                                       get_IR_roundingmode(),
+                                       mkexpr( xB2 ) ) ) ) );
+
+         break;
+      case 0x370: // xvcvsxdsp (VSX Vector Convert and round Signed Integer Doubleword
+                  //            to Single-Precision format)
+         DIP("xvcvsxddp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             binop( Iop_RoundF64toF32,
+                                                    get_IR_roundingmode(),
+                                                    binop( Iop_I64StoF64,
+                                                           get_IR_roundingmode(),
+                                                           mkexpr( xB ) ) ) ) ),
+                                 mkU32( 0 ) ),
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             binop( Iop_RoundF64toF32,
+                                                    get_IR_roundingmode(),
+                                                    binop( Iop_I64StoF64,
+                                                           get_IR_roundingmode(),
+                                                           mkexpr( xB2 ) ) ) ) ),
+                                 mkU32( 0 ) ) ) );
+         break;
+      case 0x350: // xvcvuxdsp (VSX Vector Convert and round Unsigned Integer Doubleword
+                  //            to Single-Precision format)
+         DIP("xvcvuxddp v%u,v%u\n", (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             binop( Iop_RoundF64toF32,
+                                                    get_IR_roundingmode(),
+                                                    binop( Iop_I64UtoF64,
+                                                           get_IR_roundingmode(),
+                                                           mkexpr( xB ) ) ) ) ),
+                                 mkU32( 0 ) ),
+                          binop( Iop_32HLto64,
+                                 unop( Iop_ReinterpF32asI32,
+                                       unop( Iop_TruncF64asF32,
+                                             binop( Iop_RoundF64toF32,
+                                                    get_IR_roundingmode(),
+                                                    binop( Iop_I64UtoF64,
+                                                           get_IR_roundingmode(),
+                                                           mkexpr( xB2 ) ) ) ) ),
+                                 mkU32( 0 ) ) ) );
+         break;
+
+      case 0x1f0: // xvcvsxwdp (VSX Vector Convert Signed Integer Word to Double-Precision format)
+         DIP("xvcvsxwdp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64StoF64, get_IR_roundingmode(),
+                                       unop( Iop_32Sto64, mkexpr( b3 ) ) ) ),
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64StoF64, get_IR_roundingmode(),
+                                       unop( Iop_32Sto64, mkexpr( b1 ) ) ) ) ) );
+         break;
+      case 0x1d0: // xvcvuxwdp (VSX Vector Convert Unsigned Integer Word to Double-Precision format)
+         DIP("xvcvuxwdp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64UtoF64, get_IR_roundingmode(),
+                                       unop( Iop_32Uto64, mkexpr( b3 ) ) ) ),
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_I64UtoF64, get_IR_roundingmode(),
+                                       unop( Iop_32Uto64, mkexpr( b1 ) ) ) ) ) );
+         break;
+      case 0x170: // xvcvsxwsp (VSX Vector Convert Signed Integer Word to Single-Precision format)
+         DIP("xvcvsxwsp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT, unop( Iop_I32StoFx4, getVSReg( XB ) ) );
+         break;
+      case 0x150: // xvcvuxwsp (VSX Vector Convert Unsigned Integer Word to Single-Precision format)
+         DIP("xvcvuxwsp v%u,v%u\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT, unop( Iop_I32UtoFx4, getVSReg( XB ) ) );
+         break;
+
+      default:
+         vex_printf( "dis_vx_conv(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+/*
+ * VSX vector Double Precision Floating Point Arithmetic Instructions
+ */
+static Bool
+dis_vxv_dp_arith ( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT( theInstr );
+   UChar XA = ifieldRegXA( theInstr );
+   UChar XB = ifieldRegXB( theInstr );
+   IRExpr* rm = get_IR_roundingmode();
+   IRTemp frA = newTemp(Ity_F64);
+   IRTemp frB = newTemp(Ity_F64);
+   IRTemp frA2 = newTemp(Ity_F64);
+   IRTemp frB2 = newTemp(Ity_F64);
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vxv_dp_arith(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign(frA,  unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XA ))));
+   assign(frB,  unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
+   assign(frA2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg( XA ))));
+   assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg( XB ))));
+
+   switch (opc2) {
+      case 0x1E0: // xvdivdp (VSX Vector Divide Double-Precision)
+      case 0x1C0: // xvmuldp (VSX Vector Multiply Double-Precision)
+      case 0x180: // xvadddp (VSX Vector Add Double-Precision)
+      case 0x1A0: // xvsubdp (VSX Vector Subtract Double-Precision)
+      {
+         IROp mOp;
+         const HChar * oper_name;
+         switch (opc2) {
+            case 0x1E0:
+               mOp = Iop_DivF64;
+               oper_name = "div";
+               break;
+            case 0x1C0:
+               mOp = Iop_MulF64;
+               oper_name = "mul";
+               break;
+            case 0x180:
+               mOp = Iop_AddF64;
+               oper_name = "add";
+               break;
+            case 0x1A0:
+               mOp = Iop_SubF64;
+               oper_name = "sub";
+               break;
+
+            default:
+               vpanic("The impossible happened: dis_vxv_dp_arith(ppc)");
+         }
+         IRTemp hiResult = newTemp(Ity_I64);
+         IRTemp loResult = newTemp(Ity_I64);
+         DIP("xv%sdp v%d,v%d,v%d\n", oper_name, (UInt)XT, (UInt)XA, (UInt)XB);
+
+         assign( hiResult,
+                 unop( Iop_ReinterpF64asI64,
+                       triop( mOp, rm, mkexpr( frA ), mkexpr( frB ) ) ) );
+         assign( loResult,
+                 unop( Iop_ReinterpF64asI64,
+                       triop( mOp, rm, mkexpr( frA2 ), mkexpr( frB2 ) ) ) );
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128, mkexpr( hiResult ), mkexpr( loResult ) ) );
+         break;
+      }
+      case 0x196: // xvsqrtdp
+      {
+         IRTemp hiResult = newTemp(Ity_I64);
+         IRTemp loResult = newTemp(Ity_I64);
+         DIP("xvsqrtdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+
+         assign( hiResult,
+                 unop( Iop_ReinterpF64asI64,
+                       binop( Iop_SqrtF64, rm, mkexpr( frB ) ) ) );
+         assign( loResult,
+                 unop( Iop_ReinterpF64asI64,
+                       binop( Iop_SqrtF64, rm, mkexpr( frB2 ) ) ) );
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128, mkexpr( hiResult ), mkexpr( loResult ) ) );
+         break;
+      }
+      case 0x184: case 0x1A4: // xvmaddadp, xvmaddmdp (VSX Vector Multiply-Add Double-Precision)
+      case 0x1C4: case 0x1E4: // xvmsubadp, xvmsubmdp (VSX Vector Multiply-Subtract Double-Precision)
+      case 0x384: case 0x3A4: // xvnmaddadp, xvnmaddmdp (VSX Vector Negate Multiply-Add Double-Precision)
+      case 0x3C4: case 0x3E4: // xvnmsubadp, xvnmsubmdp (VSX Vector Negate Multiply-Subtract Double-Precision)
+      {
+         /* xvm{add|sub}mdp XT,XA,XB is element-wise equivalent to fm{add|sub} FRT,FRA,FRC,FRB with . . .
+          *    XT == FRC
+          *    XA == FRA
+          *    XB == FRB
+          *
+          * and for xvm{add|sub}adp . . .
+          *    XT == FRB
+          *    XA == FRA
+          *    XB == FRC
+          */
+         Bool negate;
+         IROp mOp = Iop_INVALID;
+         const HChar * oper_name = NULL;
+         Bool mdp = False;
+
+         switch (opc2) {
+            case 0x184: case 0x1A4:
+            case 0x384: case 0x3A4:
+               mOp = Iop_MAddF64;
+               oper_name = "add";
+               mdp = (opc2 & 0x0FF) == 0x0A4;
+               break;
+
+            case 0x1C4: case 0x1E4:
+            case 0x3C4: case 0x3E4:
+               mOp = Iop_MSubF64;
+               oper_name = "sub";
+               mdp = (opc2 & 0x0FF) == 0x0E4;
+               break;
+
+            default:
+               vpanic("The impossible happened: dis_vxv_sp_arith(ppc)");
+         }
+
+         switch (opc2) {
+            case 0x384: case 0x3A4:
+            case 0x3C4: case 0x3E4:
+               negate = True;
+               break;
+            default:
+               negate = False;
+         }
+         IRTemp hiResult = newTemp(Ity_I64);
+         IRTemp loResult = newTemp(Ity_I64);
+         IRTemp frT = newTemp(Ity_F64);
+         IRTemp frT2 = newTemp(Ity_F64);
+         DIP("xv%sm%s%s v%d,v%d,v%d\n", negate ? "n" : "", oper_name, mdp ? "mdp" : "adp",
+             (UInt)XT, (UInt)XA, (UInt)XB);
+         assign(frT,  unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XT ) ) ) );
+         assign(frT2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg( XT ) ) ) );
+
+         assign( hiResult,
+                 unop( Iop_ReinterpF64asI64,
+                       qop( mOp,
+                            rm,
+                            mkexpr( frA ),
+                            mkexpr( mdp ? frT : frB ),
+                            mkexpr( mdp ? frB : frT ) ) ) );
+         assign( loResult,
+                 unop( Iop_ReinterpF64asI64,
+                       qop( mOp,
+                            rm,
+                            mkexpr( frA2 ),
+                            mkexpr( mdp ? frT2 : frB2 ),
+                            mkexpr( mdp ? frB2 : frT2 ) ) ) );
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          mkexpr( negate ? getNegatedResult( hiResult )
+                                         : hiResult ),
+                          mkexpr( negate ? getNegatedResult( loResult )
+                                         : loResult ) ) );
+         break;
+      }
+      case 0x1D4: // xvtsqrtdp (VSX Vector Test for software Square Root Double-Precision)
+      {
+         IRTemp frBHi_I64 = newTemp(Ity_I64);
+         IRTemp frBLo_I64 = newTemp(Ity_I64);
+         IRTemp flagsHi = newTemp(Ity_I32);
+         IRTemp flagsLo = newTemp(Ity_I32);
+         UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+         IRTemp  fe_flagHi, fg_flagHi, fe_flagLo, fg_flagLo;
+         fe_flagHi = fg_flagHi = fe_flagLo = fg_flagLo = IRTemp_INVALID;
+
+         DIP("xvtsqrtdp cr%d,v%d\n", (UInt)crfD, (UInt)XB);
+         assign( frBHi_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
+         assign( frBLo_I64, unop(Iop_V128to64, getVSReg( XB )) );
+         do_fp_tsqrt(frBHi_I64, False /*not single precision*/, &fe_flagHi, &fg_flagHi);
+         do_fp_tsqrt(frBLo_I64, False /*not single precision*/, &fe_flagLo, &fg_flagLo);
+         /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
+          * where fl_flag == 1 on ppc64.
+          */
+         assign( flagsHi,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flagHi), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flagHi), mkU8( 1 ) ) ) );
+         assign( flagsLo,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flagLo), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flagLo), mkU8( 1 ) ) ) );
+         putGST_field( PPC_GST_CR,
+                       binop( Iop_Or32, mkexpr( flagsHi ), mkexpr( flagsLo ) ),
+                       crfD );
+         break;
+      }
+      case 0x1F4: // xvtdivdp (VSX Vector Test for software Divide Double-Precision)
+      {
+         IRTemp frBHi_I64 = newTemp(Ity_I64);
+         IRTemp frBLo_I64 = newTemp(Ity_I64);
+         IRTemp frAHi_I64 = newTemp(Ity_I64);
+         IRTemp frALo_I64 = newTemp(Ity_I64);
+         IRTemp flagsHi = newTemp(Ity_I32);
+         IRTemp flagsLo = newTemp(Ity_I32);
+         UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+         IRTemp  fe_flagHi, fg_flagHi, fe_flagLo, fg_flagLo;
+         fe_flagHi = fg_flagHi = fe_flagLo = fg_flagLo = IRTemp_INVALID;
+
+         DIP("xvtdivdp cr%d,v%d,v%d\n", (UInt)crfD, (UInt)XA, (UInt)XB);
+         assign( frAHi_I64, unop(Iop_V128HIto64, getVSReg( XA )) );
+         assign( frALo_I64, unop(Iop_V128to64, getVSReg( XA )) );
+         assign( frBHi_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
+         assign( frBLo_I64, unop(Iop_V128to64, getVSReg( XB )) );
+
+         _do_fp_tdiv(frAHi_I64, frBHi_I64, False/*dp*/, &fe_flagHi, &fg_flagHi);
+         _do_fp_tdiv(frALo_I64, frBLo_I64, False/*dp*/, &fe_flagLo, &fg_flagLo);
+         /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
+          * where fl_flag == 1 on ppc64.
+          */
+         assign( flagsHi,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flagHi), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flagHi), mkU8( 1 ) ) ) );
+         assign( flagsLo,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flagLo), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flagLo), mkU8( 1 ) ) ) );
+         putGST_field( PPC_GST_CR,
+                       binop( Iop_Or32, mkexpr( flagsHi ), mkexpr( flagsLo ) ),
+                       crfD );
+         break;
+      }
+
+      default:
+         vex_printf( "dis_vxv_dp_arith(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+/*
+ * VSX vector Single Precision Floating Point Arithmetic Instructions
+ */
+static Bool
+dis_vxv_sp_arith ( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT( theInstr );
+   UChar XA = ifieldRegXA( theInstr );
+   UChar XB = ifieldRegXB( theInstr );
+   IRExpr* rm = get_IR_roundingmode();
+   IRTemp a3, a2, a1, a0;
+   IRTemp b3, b2, b1, b0;
+   IRTemp res0 = newTemp(Ity_I32);
+   IRTemp res1 = newTemp(Ity_I32);
+   IRTemp res2 = newTemp(Ity_I32);
+   IRTemp res3 = newTemp(Ity_I32);
+
+   a3 = a2 = a1 = a0 = IRTemp_INVALID;
+   b3 = b2 = b1 = b0 = IRTemp_INVALID;
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vxv_sp_arith(ppc)(instr)\n" );
+      return False;
+   }
+
+   switch (opc2) {
+      case 0x100: // xvaddsp (VSX Vector Add Single-Precision)
+         DIP("xvaddsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         // WARNING: BOGUS! The backend ignores rm on Iop_Add32Fx4
+         putVSReg( XT, triop(Iop_Add32Fx4, rm,
+                             getVSReg( XA ), getVSReg( XB )) );
+         break;
+
+      case 0x140: // xvmulsp (VSX Vector Multiply Single-Precision)
+         DIP("xvmulsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         // WARNING: BOGUS! The backend ignores rm on Iop_Mul32Fx4
+         putVSReg( XT, triop(Iop_Mul32Fx4, rm,
+                             getVSReg( XA ), getVSReg( XB )) );
+         break;
+
+      case 0x120: // xvsubsp (VSX Vector Subtract Single-Precision)
+         DIP("xvsubsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         // WARNING: BOGUS! The backend ignores rm on Iop_Sub32Fx4
+         putVSReg( XT, triop(Iop_Sub32Fx4, rm,
+                             getVSReg( XA ), getVSReg( XB )) );
+         break;
+
+      case 0x160: // xvdivsp (VSX Vector Divide Single-Precision)
+      {
+         /* Iop_Div32Fx4 is not implemented for ppc64 (in host_ppc_{isel|defs}.c.
+          * So there are two choices:
+          *   1. Implement the xvdivsp with a native insn; or
+          *   2. Extract the 4 single precision floats from each vector
+          *      register inputs and perform fdivs on each pair
+          * I will do the latter, due to the general philosophy of
+          * reusing existing implementations when practical.
+          */
+         DIP("xvdivsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
+         breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
+
+         assign( res0,
+              unop( Iop_ReinterpF32asI32,
+                    unop( Iop_TruncF64asF32,
+                          triop( Iop_DivF64r32, rm, mkexpr( a0 ), mkexpr( b0 ) ) ) ) );
+         assign( res1,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             triop( Iop_DivF64r32, rm, mkexpr( a1 ), mkexpr( b1 ) ) ) ) );
+         assign( res2,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             triop( Iop_DivF64r32, rm, mkexpr( a2 ), mkexpr( b2 ) ) ) ) );
+         assign( res3,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             triop( Iop_DivF64r32, rm, mkexpr( a3 ), mkexpr( b3 ) ) ) ) );
+
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
+                          binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
+         break;
+      }
+      case 0x116: // xvsqrtsp (VSX Vector Square Root Single-Precision)
+      {
+         DIP("xvsqrtsp v%d,v%d\n", (UInt)XT, (UInt)XB);
+         breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
+         /* Note: The native xvsqrtsp insruction does not always give the same precision
+          * as what we get with Iop_SqrtF64.  But it doesn't seem worthwhile to implement
+          * an Iop_SqrtF32 that would give us a lower precision result, albeit more true
+          * to the actual instruction.
+          */
+
+         assign( res0,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             binop(Iop_SqrtF64, rm, mkexpr( b0 ) ) ) ) );
+         assign( res1,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             binop(Iop_SqrtF64, rm, mkexpr( b1 ) ) ) ) );
+         assign( res2,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             binop(Iop_SqrtF64, rm, mkexpr( b2) ) ) ) );
+         assign( res3,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             binop(Iop_SqrtF64, rm, mkexpr( b3 ) ) ) ) );
+
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
+                          binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
+         break;
+      }
+
+      case 0x104: case 0x124: // xvmaddasp, xvmaddmsp (VSX Vector Multiply-Add Single-Precision)
+      case 0x144: case 0x164: // xvmsubasp, xvmsubmsp (VSX Vector Multiply-Subtract Single-Precision)
+      case 0x304: case 0x324: // xvnmaddasp, xvnmaddmsp (VSX Vector Negate Multiply-Add Single-Precision)
+      case 0x344: case 0x364: // xvnmsubasp, xvnmsubmsp (VSX Vector Negate Multiply-Subtract Single-Precision)
+      {
+         IRTemp t3, t2, t1, t0;
+         Bool msp = False;
+         Bool negate;
+         const HChar * oper_name = NULL;
+         IROp mOp = Iop_INVALID;
+         switch (opc2) {
+            case 0x104: case 0x124:
+            case 0x304: case 0x324:
+               msp = (opc2 & 0x0FF) == 0x024;
+               mOp = Iop_MAddF64r32;
+               oper_name = "madd";
+               break;
+
+            case 0x144: case 0x164:
+            case 0x344: case 0x364:
+               msp = (opc2 & 0x0FF) == 0x064;
+               mOp = Iop_MSubF64r32;
+               oper_name = "sub";
+               break;
+
+            default:
+               vpanic("The impossible happened: dis_vxv_sp_arith(ppc)");
+         }
+
+         switch (opc2) {
+            case 0x304: case 0x324:
+            case 0x344: case 0x364:
+               negate = True;
+               break;
+
+            default:
+               negate = False;
+         }
+
+         DIP("xv%sm%s%s v%d,v%d,v%d\n", negate ? "n" : "", oper_name, msp ? "msp" : "asp",
+             (UInt)XT, (UInt)XA, (UInt)XB);
+
+         t3 = t2 = t1 = t0 = IRTemp_INVALID;
+         breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
+         breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
+         breakV128to4xF64( getVSReg( XT ), &t3, &t2, &t1, &t0 );
+
+         assign( res0,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             qop( mOp,
+                                  rm,
+                                  mkexpr( a0 ),
+                                  mkexpr( msp ? t0 : b0 ),
+                                  mkexpr( msp ? b0 : t0 ) ) ) ) );
+         assign( res1,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             qop( mOp,
+                                  rm,
+                                  mkexpr( a1 ),
+                                  mkexpr( msp ? t1 : b1 ),
+                                  mkexpr( msp ? b1 : t1 ) ) ) ) );
+         assign( res2,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             qop( mOp,
+                                  rm,
+                                  mkexpr( a2 ),
+                                  mkexpr( msp ? t2 : b2 ),
+                                  mkexpr( msp ? b2 : t2 ) ) ) ) );
+         assign( res3,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             qop( mOp,
+                                  rm,
+                                  mkexpr( a3 ),
+                                  mkexpr( msp ? t3 : b3 ),
+                                  mkexpr( msp ? b3 : t3 ) ) ) ) );
+
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64, mkexpr( negate ? getNegatedResult_32( res3 ) : res3 ),
+                                 mkexpr( negate ? getNegatedResult_32( res2 ) : res2 ) ),
+                          binop( Iop_32HLto64, mkexpr( negate ? getNegatedResult_32( res1 ) : res1 ),
+                                 mkexpr( negate ? getNegatedResult_32( res0 ) : res0 ) ) ) );
+
+         break;
+      }
+      case 0x154: // xvtsqrtsp (VSX Vector Test for software Square Root Single-Precision)
+      {
+         IRTemp flags0 = newTemp(Ity_I32);
+         IRTemp flags1 = newTemp(Ity_I32);
+         IRTemp flags2 = newTemp(Ity_I32);
+         IRTemp flags3 = newTemp(Ity_I32);
+         UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+         IRTemp  fe_flag0, fg_flag0, fe_flag1, fg_flag1;
+         IRTemp  fe_flag2, fg_flag2, fe_flag3, fg_flag3;
+         fe_flag0 = fg_flag0 = fe_flag1 = fg_flag1 = IRTemp_INVALID;
+         fe_flag2 = fg_flag2 = fe_flag3 = fg_flag3 = IRTemp_INVALID;
+         DIP("xvtsqrtsp cr%d,v%d\n", (UInt)crfD, (UInt)XB);
+
+         breakV128to4x32( getVSReg( XB ), &b3, &b2, &b1, &b0 );
+         do_fp_tsqrt(b0, True /* single precision*/, &fe_flag0, &fg_flag0);
+         do_fp_tsqrt(b1, True /* single precision*/, &fe_flag1, &fg_flag1);
+         do_fp_tsqrt(b2, True /* single precision*/, &fe_flag2, &fg_flag2);
+         do_fp_tsqrt(b3, True /* single precision*/, &fe_flag3, &fg_flag3);
+
+         /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
+          * where fl_flag == 1 on ppc64.
+          */
+         assign( flags0,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag0), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag0), mkU8( 1 ) ) ) );
+         assign( flags1,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag1), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag1), mkU8( 1 ) ) ) );
+         assign( flags2,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag2), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag2), mkU8( 1 ) ) ) );
+         assign( flags3,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag3), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag3), mkU8( 1 ) ) ) );
+         putGST_field( PPC_GST_CR,
+                       binop( Iop_Or32,
+                              mkexpr( flags0 ),
+                              binop( Iop_Or32,
+                                     mkexpr( flags1 ),
+                                     binop( Iop_Or32,
+                                            mkexpr( flags2 ),
+                                            mkexpr( flags3 ) ) ) ),
+                       crfD );
+
+         break;
+      }
+      case 0x174: // xvtdivsp (VSX Vector Test for software Divide Single-Precision)
+      {
+         IRTemp flags0 = newTemp(Ity_I32);
+         IRTemp flags1 = newTemp(Ity_I32);
+         IRTemp flags2 = newTemp(Ity_I32);
+         IRTemp flags3 = newTemp(Ity_I32);
+         UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+         IRTemp  fe_flag0, fg_flag0, fe_flag1, fg_flag1;
+         IRTemp  fe_flag2, fg_flag2, fe_flag3, fg_flag3;
+         fe_flag0 = fg_flag0 = fe_flag1 = fg_flag1 = IRTemp_INVALID;
+         fe_flag2 = fg_flag2 = fe_flag3 = fg_flag3 = IRTemp_INVALID;
+         DIP("xvtdivsp cr%d,v%d,v%d\n", (UInt)crfD, (UInt)XA, (UInt)XB);
+
+         breakV128to4x32( getVSReg( XA ), &a3, &a2, &a1, &a0 );
+         breakV128to4x32( getVSReg( XB ), &b3, &b2, &b1, &b0 );
+         _do_fp_tdiv(a0, b0, True /* single precision*/, &fe_flag0, &fg_flag0);
+         _do_fp_tdiv(a1, b1, True /* single precision*/, &fe_flag1, &fg_flag1);
+         _do_fp_tdiv(a2, b2, True /* single precision*/, &fe_flag2, &fg_flag2);
+         _do_fp_tdiv(a3, b3, True /* single precision*/, &fe_flag3, &fg_flag3);
+
+         /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
+          * where fl_flag == 1 on ppc64.
+          */
+         assign( flags0,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag0), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag0), mkU8( 1 ) ) ) );
+         assign( flags1,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag1), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag1), mkU8( 1 ) ) ) );
+         assign( flags2,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag2), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag2), mkU8( 1 ) ) ) );
+         assign( flags3,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag3), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag3), mkU8( 1 ) ) ) );
+         putGST_field( PPC_GST_CR,
+                       binop( Iop_Or32,
+                              mkexpr( flags0 ),
+                              binop( Iop_Or32,
+                                     mkexpr( flags1 ),
+                                     binop( Iop_Or32,
+                                            mkexpr( flags2 ),
+                                            mkexpr( flags3 ) ) ) ),
+                       crfD );
+
+         break;
+      }
+
+      default:
+         vex_printf( "dis_vxv_sp_arith(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+/*
+ * Vector Population Count/bit matrix transpose
+ */
+static Bool
+dis_av_count_bitTranspose ( UInt theInstr, UInt opc2 )
+{
+   UChar vRB_addr = ifieldRegB(theInstr);
+   UChar vRT_addr = ifieldRegDS(theInstr);
+   UChar opc1 = ifieldOPC( theInstr );
+   IRTemp vB = newTemp(Ity_V128);
+   assign( vB, getVReg(vRB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf( "dis_av_count_bitTranspose(ppc)(instr)\n" );
+      return False;
+   }
+
+   switch (opc2) {
+      case 0x702:    // vclzb
+         DIP("vclzb v%d,v%d\n", vRT_addr, vRB_addr);
+         putVReg( vRT_addr, unop(Iop_Clz8x16, mkexpr( vB ) ) );
+         break;
+
+      case 0x742:    // vclzh
+         DIP("vclzh v%d,v%d\n", vRT_addr, vRB_addr);
+         putVReg( vRT_addr, unop(Iop_Clz16x8, mkexpr( vB ) ) );
+         break;
+
+      case 0x782:    // vclzw
+         DIP("vclzw v%d,v%d\n", vRT_addr, vRB_addr);
+         putVReg( vRT_addr, unop(Iop_Clz32x4, mkexpr( vB ) ) );
+         break;
+
+      case 0x7C2:    // vclzd
+         DIP("vclzd v%d,v%d\n", vRT_addr, vRB_addr);
+         putVReg( vRT_addr, unop(Iop_Clz64x2, mkexpr( vB ) ) );
+         break;
+
+      case 0x703:    // vpopcntb
+      {
+         /* Break vector into 32-bit words and do the population count
+          * on byte in the words
+          */
+         IRType ty = Ity_I32;
+         IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
+         bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
+         IRTemp cnt_bits0_31, cnt_bits32_63, cnt_bits64_95, cnt_bits96_127;
+         cnt_bits0_31 = cnt_bits32_63 = cnt_bits64_95 = cnt_bits96_127 = IRTemp_INVALID;
+
+         DIP("vpopcntb v%d,v%d\n", vRT_addr, vRB_addr);
+         breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95, &bits32_63, &bits0_31 );
+         cnt_bits0_31   = gen_POPCOUNT(ty, bits0_31,   BYTE);
+         cnt_bits32_63  = gen_POPCOUNT(ty, bits32_63,  BYTE);
+         cnt_bits64_95  = gen_POPCOUNT(ty, bits64_95,  BYTE);
+         cnt_bits96_127 = gen_POPCOUNT(ty, bits96_127, BYTE);
+
+         putVReg( vRT_addr, mkV128from32(cnt_bits96_127, cnt_bits64_95,
+                                         cnt_bits32_63, cnt_bits0_31) );
+         break;
+      }
+
+      case 0x743:    // vpopcnth
+      {
+         /* Break vector into 32-bit words and do the population count
+          * for each half word
+          */
+         IRType ty = Ity_I32;
+         IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
+         bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
+         IRTemp cnt_bits0_31, cnt_bits32_63, cnt_bits64_95, cnt_bits96_127;
+         cnt_bits0_31 = cnt_bits32_63 = cnt_bits64_95 = cnt_bits96_127 = IRTemp_INVALID;
+
+         DIP("vpopcnth v%d,v%d\n", vRT_addr, vRB_addr);
+         breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95, &bits32_63, &bits0_31 );
+
+         cnt_bits0_31   = gen_POPCOUNT(ty, bits0_31,   HWORD);
+         cnt_bits32_63  = gen_POPCOUNT(ty, bits32_63,  HWORD);
+         cnt_bits64_95  = gen_POPCOUNT(ty, bits64_95,  HWORD);
+         cnt_bits96_127 = gen_POPCOUNT(ty, bits96_127, HWORD);
+
+         putVReg( vRT_addr, mkV128from32(cnt_bits96_127, cnt_bits64_95,
+                                         cnt_bits32_63, cnt_bits0_31) );
+         break;
+      }
+
+      case 0x783:    // vpopcntw
+      {
+         /* Break vector into 32-bit words and do the population count
+          * on each word.
+          */
+         IRType ty = Ity_I32;
+         IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
+         bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
+         IRTemp cnt_bits0_31, cnt_bits32_63, cnt_bits64_95, cnt_bits96_127;
+         cnt_bits0_31 = cnt_bits32_63 = cnt_bits64_95 = cnt_bits96_127 = IRTemp_INVALID;
+
+         DIP("vpopcntw v%d,v%d\n", vRT_addr, vRB_addr);
+         breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95, &bits32_63, &bits0_31 );
+
+         cnt_bits0_31   = gen_POPCOUNT(ty, bits0_31,   WORD);
+         cnt_bits32_63  = gen_POPCOUNT(ty, bits32_63,  WORD);
+         cnt_bits64_95  = gen_POPCOUNT(ty, bits64_95,  WORD);
+         cnt_bits96_127 = gen_POPCOUNT(ty, bits96_127, WORD);
+
+         putVReg( vRT_addr, mkV128from32(cnt_bits96_127, cnt_bits64_95,
+                                         cnt_bits32_63, cnt_bits0_31) );
+         break;
+      }
+
+      case 0x7C3:    // vpopcntd
+      {
+         if (mode64) {
+            /* Break vector into 64-bit double words and do the population count
+             * on each double word.
+             */
+            IRType ty = Ity_I64;
+            IRTemp bits0_63   = newTemp(Ity_I64);
+            IRTemp bits64_127 = newTemp(Ity_I64);
+            IRTemp cnt_bits0_63   = newTemp(Ity_I64);
+            IRTemp cnt_bits64_127 = newTemp(Ity_I64);
+
+            DIP("vpopcntd v%d,v%d\n", vRT_addr, vRB_addr);
+
+            assign(bits0_63,   unop( Iop_V128to64,   mkexpr( vB ) ) );
+            assign(bits64_127, unop( Iop_V128HIto64, mkexpr( vB ) ) );
+            cnt_bits0_63   = gen_POPCOUNT(ty, bits0_63,   DWORD);
+            cnt_bits64_127 = gen_POPCOUNT(ty, bits64_127, DWORD);
+
+            putVReg( vRT_addr, binop( Iop_64HLtoV128,
+                                      mkexpr( cnt_bits64_127 ),
+                                      mkexpr( cnt_bits0_63 ) ) );
+         } else {
+            /* Break vector into 32-bit words and do the population count
+             * on each doubleword.
+             */
+            IRTemp bits0_31, bits32_63, bits64_95, bits96_127;
+            bits0_31 = bits32_63 = bits64_95 = bits96_127 = IRTemp_INVALID;
+            IRTemp cnt_bits0_63   = newTemp(Ity_I64);
+            IRTemp cnt_bits64_127  = newTemp(Ity_I64);
+
+            DIP("vpopcntd v%d,v%d\n", vRT_addr, vRB_addr);
+            breakV128to4x32(mkexpr( vB), &bits96_127, &bits64_95, &bits32_63, &bits0_31 );
+
+            cnt_bits0_63   = gen_vpopcntd_mode32(bits0_31, bits32_63);
+            cnt_bits64_127 = gen_vpopcntd_mode32(bits64_95, bits96_127);
+
+            putVReg( vRT_addr, binop( Iop_64HLtoV128,
+                                      mkexpr( cnt_bits64_127 ),
+                                      mkexpr( cnt_bits0_63 ) ) );
+         }
+         break;
+      }
+
+      case 0x50C:  // vgbbd Vector Gather Bits by Bytes by Doubleword
+         DIP("vgbbd v%d,v%d\n", vRT_addr, vRB_addr);
+         putVReg( vRT_addr, unop( Iop_PwBitMtxXpose64x2, mkexpr( vB ) ) );
+         break;
+
+      default:
+         vex_printf("dis_av_count_bitTranspose(ppc)(opc2)\n");
+         return False;
+      break;
+   }
+   return True;
+}
+
+typedef enum {
+   PPC_CMP_EQ = 2,
+   PPC_CMP_GT = 4,
+   PPC_CMP_GE = 6,
+   PPC_CMP_LT = 8
+} ppc_cmp_t;
+
+
+/*
+  This helper function takes as input the IRExpr returned
+  from a binop( Iop_CmpF64, fpA, fpB), whose result is returned
+  in IR form.  This helper function converts it to PPC form.
+
+  Map compare result from IR to PPC
+
+  FP cmp result | PPC | IR
+  --------------------------
+  UN            | 0x1 | 0x45
+  EQ            | 0x2 | 0x40
+  GT            | 0x4 | 0x00
+  LT            | 0x8 | 0x01
+
+ condcode = Shl(1, (~(ccIR>>5) & 2)
+                    | ((ccIR ^ (ccIR>>6)) & 1)
+*/
+static IRTemp
+get_fp_cmp_CR_val (IRExpr * ccIR_expr)
+{
+   IRTemp condcode = newTemp( Ity_I32 );
+   IRTemp ccIR = newTemp( Ity_I32 );
+
+   assign(ccIR, ccIR_expr);
+   assign( condcode,
+           binop( Iop_Shl32,
+                  mkU32( 1 ),
+                  unop( Iop_32to8,
+                        binop( Iop_Or32,
+                               binop( Iop_And32,
+                                      unop( Iop_Not32,
+                                            binop( Iop_Shr32,
+                                                   mkexpr( ccIR ),
+                                                   mkU8( 5 ) ) ),
+                                      mkU32( 2 ) ),
+                               binop( Iop_And32,
+                                      binop( Iop_Xor32,
+                                             mkexpr( ccIR ),
+                                             binop( Iop_Shr32,
+                                                    mkexpr( ccIR ),
+                                                    mkU8( 6 ) ) ),
+                                      mkU32( 1 ) ) ) ) ) );
+   return condcode;
+}
+
+/*
+ * Helper function for get_max_min_fp for ascertaining the max or min between two doubles
+ * following these special rules:
+ *   - The max/min of a QNaN and any value is that value
+ *     (When two QNaNs are being compared, the frA QNaN is the return value.)
+ *   - The max/min of any value and an SNaN is that SNaN converted to a QNaN
+ *     (When two SNaNs are being compared, the frA SNaN is converted to a QNaN.)
+ */
+static IRExpr * _get_maxmin_fp_NaN(IRTemp frA_I64, IRTemp frB_I64)
+{
+   IRTemp frA_isNaN = newTemp(Ity_I1);
+   IRTemp frB_isNaN = newTemp(Ity_I1);
+   IRTemp frA_isSNaN = newTemp(Ity_I1);
+   IRTemp frB_isSNaN = newTemp(Ity_I1);
+   IRTemp frA_isQNaN = newTemp(Ity_I1);
+   IRTemp frB_isQNaN = newTemp(Ity_I1);
+
+   assign( frA_isNaN, is_NaN( frA_I64 ) );
+   assign( frB_isNaN, is_NaN( frB_I64 ) );
+   // If operand is a NAN and bit 12 is '0', then it's an SNaN
+   assign( frA_isSNaN,
+           mkAND1( mkexpr(frA_isNaN),
+                   binop( Iop_CmpEQ32,
+                          binop( Iop_And32,
+                                 unop( Iop_64HIto32, mkexpr( frA_I64 ) ),
+                                 mkU32( 0x00080000 ) ),
+                          mkU32( 0 ) ) ) );
+   assign( frB_isSNaN,
+           mkAND1( mkexpr(frB_isNaN),
+                   binop( Iop_CmpEQ32,
+                          binop( Iop_And32,
+                                 unop( Iop_64HIto32, mkexpr( frB_I64 ) ),
+                                 mkU32( 0x00080000 ) ),
+                          mkU32( 0 ) ) ) );
+   assign( frA_isQNaN,
+           mkAND1( mkexpr( frA_isNaN ), unop( Iop_Not1, mkexpr( frA_isSNaN ) ) ) );
+   assign( frB_isQNaN,
+           mkAND1( mkexpr( frB_isNaN ), unop( Iop_Not1, mkexpr( frB_isSNaN ) ) ) );
+
+   /* Based on the rules specified in the function prologue, the algorithm is as follows:
+    *  <<<<<<<<<>>>>>>>>>>>>>>>>>>
+    *   if frA is a SNaN
+    *     result = frA converted to QNaN
+    *   else if frB is a SNaN
+    *     result = frB converted to QNaN
+    *   else if frB is a QNaN
+    *     result = frA
+    *   // One of frA or frB was a NaN in order for this function to be called, so
+    *   // if we get to this point, we KNOW that frA must be a QNaN.
+    *   else // frA is a QNaN
+    *     result = frB
+    *  <<<<<<<<<>>>>>>>>>>>>>>>>>>
+    */
+
+#define SNAN_MASK 0x0008000000000000ULL
+   return
+   IRExpr_ITE(mkexpr(frA_isSNaN),
+              /* then: result = frA converted to QNaN */
+              binop(Iop_Or64, mkexpr(frA_I64), mkU64(SNAN_MASK)),
+              /* else:  if frB is a SNaN */
+              IRExpr_ITE(mkexpr(frB_isSNaN),
+                         /* then: result = frB converted to QNaN */
+                         binop(Iop_Or64, mkexpr(frB_I64), mkU64(SNAN_MASK)),
+                         /* else:  if frB is a QNaN */
+                         IRExpr_ITE(mkexpr(frB_isQNaN),
+                                    /* then: result = frA */
+                                    mkexpr(frA_I64),
+                                    /* else:  frA is a QNaN, so result = frB */
+                                    mkexpr(frB_I64))));
+}
+
+/*
+ * Helper function for get_max_min_fp.
+ */
+static IRExpr * _get_maxmin_fp_cmp(IRTemp src1, IRTemp src2, Bool isMin)
+{
+   IRTemp src1cmpsrc2 = get_fp_cmp_CR_val( binop( Iop_CmpF64,
+                                                  unop( Iop_ReinterpI64asF64,
+                                                        mkexpr( src1 ) ),
+                                                  unop( Iop_ReinterpI64asF64,
+                                                        mkexpr( src2 ) ) ) );
+
+   return IRExpr_ITE( binop( Iop_CmpEQ32,
+                               mkexpr( src1cmpsrc2 ),
+                               mkU32( isMin ? PPC_CMP_LT : PPC_CMP_GT ) ),
+                      /* then: use src1 */
+                      mkexpr( src1 ),
+                      /* else: use src2 */
+                      mkexpr( src2 ) );
+}
+
+/*
+ * Helper function for "Maximum/Minimum Double Precision" operations.
+ * Arguments: frA and frb are Ity_I64
+ * Returns Ity_I64 IRExpr that answers the "which is Maxiumum/Minimum" question
+ */
+static IRExpr * get_max_min_fp(IRTemp frA_I64, IRTemp frB_I64, Bool isMin)
+{
+   /* There are three special cases where get_fp_cmp_CR_val is not helpful
+    * for ascertaining the maximum between two doubles:
+    *   1. The max/min of +0 and -0 is +0.
+    *   2. The max/min of a QNaN and any value is that value.
+    *   3. The max/min of any value and an SNaN is that SNaN converted to a QNaN.
+    * We perform the check for [+/-]0 here in this function and use the
+    * _get_maxmin_fp_NaN helper for the two NaN cases; otherwise we call _get_maxmin_fp_cmp
+    * to do the standard comparison function.
+    */
+   IRTemp anyNaN = newTemp(Ity_I1);
+   IRTemp frA_isZero = newTemp(Ity_I1);
+   IRTemp frB_isZero = newTemp(Ity_I1);
+   assign(frA_isZero, is_Zero(frA_I64, False /*not single precision*/ ));
+   assign(frB_isZero, is_Zero(frB_I64, False /*not single precision*/ ));
+   assign(anyNaN, mkOR1(is_NaN(frA_I64), is_NaN(frB_I64)));
+#define MINUS_ZERO 0x8000000000000000ULL
+
+   return IRExpr_ITE( /* If both arguments are zero . . . */
+                     mkAND1( mkexpr( frA_isZero ), mkexpr( frB_isZero ) ),
+                     /* then: if frA is -0 and isMin==True, return -0;
+                      *     else if frA is +0 and isMin==False; return +0;
+                      *     otherwise, simply return frB. */
+                     IRExpr_ITE( binop( Iop_CmpEQ32,
+                                        unop( Iop_64HIto32,
+                                              mkexpr( frA_I64 ) ),
+                                        mkU32( isMin ? 0x80000000 : 0 ) ),
+                                 mkU64( isMin ? MINUS_ZERO : 0ULL ),
+                                 mkexpr( frB_I64 ) ),
+                     /* else: check if either input is a NaN*/
+                     IRExpr_ITE( mkexpr( anyNaN ),
+                                 /* then: use "NaN helper" */
+                                 _get_maxmin_fp_NaN( frA_I64, frB_I64 ),
+                                 /* else: use "comparison helper" */
+                                 _get_maxmin_fp_cmp( frB_I64, frA_I64, isMin ) ));
+}
+
+static const HChar * _get_vsx_rdpi_suffix(UInt opc2)
+{
+   switch (opc2 & 0x7F) {
+      case 0x72:
+         return "m";
+      case 0x52:
+         return "p";
+      case 0x56:
+         return "c";
+      case 0x32:
+         return "z";
+      case 0x12:
+         return "";
+
+      default: // Impossible to get here
+         vex_printf("Unrecognized opcode %x\n", opc2);
+         vpanic("_get_vsx_rdpi_suffix(ppc)(opc2)");
+   }
+}
+
+/*
+ * Helper function for vector/scalar double precision fp round to integer instructions.
+ */
+static IRExpr * _do_vsx_fp_roundToInt(IRTemp frB_I64, UInt opc2)
+{
+
+   /* The same rules apply for x{s|v}rdpi{m|p|c|z} as for floating point round operations (fri{m|n|p|z}). */
+   IRTemp frB = newTemp(Ity_F64);
+   IRTemp frD = newTemp(Ity_F64);
+   IRTemp intermediateResult = newTemp(Ity_I64);
+   IRTemp is_SNAN = newTemp(Ity_I1);
+   IRExpr * hi32;
+   IRExpr * rxpi_rm;
+   switch (opc2 & 0x7F) {
+      case 0x72:
+         rxpi_rm = mkU32(Irrm_NegINF);
+         break;
+      case 0x52:
+         rxpi_rm = mkU32(Irrm_PosINF);
+         break;
+      case 0x56:
+         rxpi_rm = get_IR_roundingmode();
+         break;
+      case 0x32:
+         rxpi_rm = mkU32(Irrm_ZERO);
+         break;
+      case 0x12:
+         rxpi_rm = mkU32(Irrm_NEAREST);
+         break;
+
+      default: // Impossible to get here
+         vex_printf("Unrecognized opcode %x\n", opc2);
+         vpanic("_do_vsx_fp_roundToInt(ppc)(opc2)");
+   }
+   assign(frB, unop(Iop_ReinterpI64asF64, mkexpr(frB_I64)));
+   assign( intermediateResult,
+           binop( Iop_F64toI64S, rxpi_rm,
+                  mkexpr( frB ) ) );
+
+   /* don't use the rounded integer if frB is outside -9e18..9e18 */
+   /* F64 has only log10(2**52) significant digits anyway */
+   /* need to preserve sign of zero */
+   /*   frD = (fabs(frB) > 9e18) ? frB :
+            (sign(frB)) ? -fabs((double)intermediateResult) : (double)intermediateResult  */
+   assign( frD,
+           IRExpr_ITE(
+              binop( Iop_CmpNE8,
+                     unop( Iop_32to8,
+                           binop( Iop_CmpF64,
+                                  IRExpr_Const( IRConst_F64( 9e18 ) ),
+                                  unop( Iop_AbsF64, mkexpr( frB ) ) ) ),
+                     mkU8(0) ),
+              mkexpr( frB ),
+              IRExpr_ITE(
+                 binop( Iop_CmpNE32,
+                        binop( Iop_Shr32,
+                               unop( Iop_64HIto32,
+                                     mkexpr( frB_I64 ) ),
+                               mkU8( 31 ) ),
+                        mkU32(0) ),
+                 unop( Iop_NegF64,
+                       unop( Iop_AbsF64,
+                             binop( Iop_I64StoF64,
+                                    mkU32( 0 ),
+                                    mkexpr( intermediateResult ) ) ) ),
+                 binop( Iop_I64StoF64,
+                        mkU32( 0 ),
+                        mkexpr( intermediateResult ) )
+              )
+           )
+   );
+
+   /* See Appendix "Floating-Point Round to Integer Model" in ISA doc.
+    * If frB is a SNAN, then frD <- frB, with bit 12 set to '1'.
+    */
+#define SNAN_MASK 0x0008000000000000ULL
+   hi32 = unop( Iop_64HIto32, mkexpr(frB_I64) );
+   assign( is_SNAN,
+           mkAND1( is_NaN( frB_I64 ),
+                   binop( Iop_CmpEQ32,
+                          binop( Iop_And32, hi32, mkU32( 0x00080000 ) ),
+                          mkU32( 0 ) ) ) );
+
+   return IRExpr_ITE( mkexpr( is_SNAN ),
+                        unop( Iop_ReinterpI64asF64,
+                              binop( Iop_Xor64,
+                                     mkU64( SNAN_MASK ),
+                                     mkexpr( frB_I64 ) ) ),
+                      mkexpr( frD ));
+}
+
+/*
+ * Miscellaneous VSX vector instructions
+ */
+static Bool
+dis_vxv_misc ( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT( theInstr );
+   UChar XB = ifieldRegXB( theInstr );
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vxv_misc(ppc)(instr)\n" );
+      return False;
+   }
+
+   switch (opc2) {
+      case 0x1B4:  // xvredp (VSX Vector Reciprocal Estimate Double-Precision)
+      case 0x194:  // xvrsqrtedp (VSX Vector Reciprocal Square Root Estimate
+                   //             Double-Precision)
+      {
+         IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
+         IRExpr* rm  = get_IR_roundingmode();
+         IRTemp frB = newTemp(Ity_I64);
+         IRTemp frB2 = newTemp(Ity_I64);
+         Bool redp = opc2 == 0x1B4;
+         IRTemp sqrtHi = newTemp(Ity_F64);
+         IRTemp sqrtLo = newTemp(Ity_F64);
+         assign(frB,  unop(Iop_V128HIto64, getVSReg( XB )));
+         assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
+
+         DIP("%s v%d,v%d\n", redp ? "xvredp" : "xvrsqrtedp", (UInt)XT, (UInt)XB);
+         if (!redp) {
+            assign( sqrtHi,
+                    binop( Iop_SqrtF64,
+                           rm,
+                           unop( Iop_ReinterpI64asF64, mkexpr( frB ) ) ) );
+            assign( sqrtLo,
+                    binop( Iop_SqrtF64,
+                           rm,
+                           unop( Iop_ReinterpI64asF64, mkexpr( frB2 ) ) ) );
+         }
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                triop( Iop_DivF64,
+                                       rm,
+                                       ieee_one,
+                                       redp ? unop( Iop_ReinterpI64asF64,
+                                                    mkexpr( frB ) )
+                                            : mkexpr( sqrtHi ) ) ),
+                          unop( Iop_ReinterpF64asI64,
+                                triop( Iop_DivF64,
+                                       rm,
+                                       ieee_one,
+                                       redp ? unop( Iop_ReinterpI64asF64,
+                                                    mkexpr( frB2 ) )
+                                            : mkexpr( sqrtLo ) ) ) ) );
+         break;
+
+      }
+      case 0x134: // xvresp (VSX Vector Reciprocal Estimate Single-Precision)
+      case 0x114: // xvrsqrtesp (VSX Vector Reciprocal Square Root Estimate Single-Precision)
+      {
+         IRTemp b3, b2, b1, b0;
+         IRTemp res0 = newTemp(Ity_I32);
+         IRTemp res1 = newTemp(Ity_I32);
+         IRTemp res2 = newTemp(Ity_I32);
+         IRTemp res3 = newTemp(Ity_I32);
+         IRTemp sqrt3 = newTemp(Ity_F64);
+         IRTemp sqrt2 = newTemp(Ity_F64);
+         IRTemp sqrt1 = newTemp(Ity_F64);
+         IRTemp sqrt0 = newTemp(Ity_F64);
+         IRExpr* rm  = get_IR_roundingmode();
+         Bool resp = opc2 == 0x134;
+
+         IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
+
+         b3 = b2 = b1 = b0 = IRTemp_INVALID;
+         DIP("%s v%d,v%d\n", resp ? "xvresp" : "xvrsqrtesp", (UInt)XT, (UInt)XB);
+         breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
+
+         if (!resp) {
+            assign( sqrt3, binop( Iop_SqrtF64, rm, mkexpr( b3 ) ) );
+            assign( sqrt2, binop( Iop_SqrtF64, rm, mkexpr( b2 ) ) );
+            assign( sqrt1, binop( Iop_SqrtF64, rm, mkexpr( b1 ) ) );
+            assign( sqrt0, binop( Iop_SqrtF64, rm, mkexpr( b0 ) ) );
+         }
+
+         assign( res0,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             triop( Iop_DivF64r32,
+                                    rm,
+                                    ieee_one,
+                                    resp ? mkexpr( b0 ) : mkexpr( sqrt0 ) ) ) ) );
+         assign( res1,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             triop( Iop_DivF64r32,
+                                    rm,
+                                    ieee_one,
+                                    resp ? mkexpr( b1 ) : mkexpr( sqrt1 ) ) ) ) );
+         assign( res2,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             triop( Iop_DivF64r32,
+                                    rm,
+                                    ieee_one,
+                                    resp ? mkexpr( b2 ) : mkexpr( sqrt2 ) ) ) ) );
+         assign( res3,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             triop( Iop_DivF64r32,
+                                    rm,
+                                    ieee_one,
+                                    resp ? mkexpr( b3 ) : mkexpr( sqrt3 ) ) ) ) );
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
+                          binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
+         break;
+      }
+      case 0x300: // xvmaxsp (VSX Vector Maximum Single-Precision)
+      case 0x320: // xvminsp (VSX Vector Minimum Single-Precision)
+      {
+         UChar XA = ifieldRegXA( theInstr );
+         IRTemp a3, a2, a1, a0;
+         IRTemp b3, b2, b1, b0;
+         IRTemp res0 = newTemp( Ity_I32 );
+         IRTemp res1 = newTemp( Ity_I32 );
+         IRTemp res2 = newTemp( Ity_I32 );
+         IRTemp res3 = newTemp( Ity_I32 );
+         IRTemp a0_I64 = newTemp( Ity_I64 );
+         IRTemp a1_I64 = newTemp( Ity_I64 );
+         IRTemp a2_I64 = newTemp( Ity_I64 );
+         IRTemp a3_I64 = newTemp( Ity_I64 );
+         IRTemp b0_I64 = newTemp( Ity_I64 );
+         IRTemp b1_I64 = newTemp( Ity_I64 );
+         IRTemp b2_I64 = newTemp( Ity_I64 );
+         IRTemp b3_I64 = newTemp( Ity_I64 );
+
+         Bool isMin = opc2 == 0x320 ? True : False;
+
+         a3 = a2 = a1 = a0 = IRTemp_INVALID;
+         b3 = b2 = b1 = b0 = IRTemp_INVALID;
+         DIP("%s v%d,v%d v%d\n", isMin ? "xvminsp" : "xvmaxsp", (UInt)XT, (UInt)XA, (UInt)XB);
+         breakV128to4xF64( getVSReg( XA ), &a3, &a2, &a1, &a0 );
+         breakV128to4xF64( getVSReg( XB ), &b3, &b2, &b1, &b0 );
+         assign( a0_I64, unop( Iop_ReinterpF64asI64, mkexpr( a0 ) ) );
+         assign( b0_I64, unop( Iop_ReinterpF64asI64, mkexpr( b0 ) ) );
+         assign( a1_I64, unop( Iop_ReinterpF64asI64, mkexpr( a1 ) ) );
+         assign( b1_I64, unop( Iop_ReinterpF64asI64, mkexpr( b1 ) ) );
+         assign( a2_I64, unop( Iop_ReinterpF64asI64, mkexpr( a2 ) ) );
+         assign( b2_I64, unop( Iop_ReinterpF64asI64, mkexpr( b2 ) ) );
+         assign( a3_I64, unop( Iop_ReinterpF64asI64, mkexpr( a3 ) ) );
+         assign( b3_I64, unop( Iop_ReinterpF64asI64, mkexpr( b3 ) ) );
+         assign( res0,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             unop( Iop_ReinterpI64asF64,
+                                   get_max_min_fp( a0_I64, b0_I64, isMin ) ) ) ) );
+         assign( res1,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             unop( Iop_ReinterpI64asF64,
+                                   get_max_min_fp( a1_I64, b1_I64, isMin ) ) ) ) );
+         assign( res2,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             unop( Iop_ReinterpI64asF64,
+                                   get_max_min_fp( a2_I64, b2_I64, isMin ) ) ) ) );
+         assign( res3,
+                 unop( Iop_ReinterpF32asI32,
+                       unop( Iop_TruncF64asF32,
+                             unop( Iop_ReinterpI64asF64,
+                                   get_max_min_fp( a3_I64, b3_I64, isMin ) ) ) ) );
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_32HLto64, mkexpr( res3 ), mkexpr( res2 ) ),
+                          binop( Iop_32HLto64, mkexpr( res1 ), mkexpr( res0 ) ) ) );
+         break;
+      }
+      case 0x380: // xvmaxdp (VSX Vector Maximum Double-Precision)
+      case 0x3A0: // xvmindp (VSX Vector Minimum Double-Precision)
+      {
+         UChar XA = ifieldRegXA( theInstr );
+         IRTemp frA = newTemp(Ity_I64);
+         IRTemp frB = newTemp(Ity_I64);
+         IRTemp frA2 = newTemp(Ity_I64);
+         IRTemp frB2 = newTemp(Ity_I64);
+         Bool isMin = opc2 == 0x3A0 ? True : False;
+
+         assign(frA,  unop(Iop_V128HIto64, getVSReg( XA )));
+         assign(frB,  unop(Iop_V128HIto64, getVSReg( XB )));
+         assign(frA2, unop(Iop_V128to64, getVSReg( XA )));
+         assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
+         DIP("%s v%d,v%d v%d\n", isMin ? "xvmindp" : "xvmaxdp", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128, get_max_min_fp(frA, frB, isMin), get_max_min_fp(frA2, frB2, isMin) ) );
+
+         break;
+      }
+      case 0x3c0: // xvcpsgndp (VSX Vector Copy Sign Double-Precision)
+      {
+         UChar XA = ifieldRegXA( theInstr );
+         IRTemp frA = newTemp(Ity_I64);
+         IRTemp frB = newTemp(Ity_I64);
+         IRTemp frA2 = newTemp(Ity_I64);
+         IRTemp frB2 = newTemp(Ity_I64);
+         assign(frA,  unop(Iop_V128HIto64, getVSReg( XA )));
+         assign(frB,  unop(Iop_V128HIto64, getVSReg( XB )));
+         assign(frA2, unop(Iop_V128to64, getVSReg( XA )));
+         assign(frB2, unop(Iop_V128to64, getVSReg( XB )));
+
+         DIP("xvcpsgndp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          binop( Iop_Or64,
+                                 binop( Iop_And64,
+                                        mkexpr( frA ),
+                                        mkU64( SIGN_BIT ) ),
+                                 binop( Iop_And64,
+                                        mkexpr( frB ),
+                                        mkU64( SIGN_MASK ) ) ),
+                          binop( Iop_Or64,
+                                 binop( Iop_And64,
+                                        mkexpr( frA2 ),
+                                        mkU64( SIGN_BIT ) ),
+                                 binop( Iop_And64,
+                                        mkexpr( frB2 ),
+                                        mkU64( SIGN_MASK ) ) ) ) );
+         break;
+      }
+      case 0x340: // xvcpsgnsp
+      {
+         UChar XA = ifieldRegXA( theInstr );
+         IRTemp a3_I64, a2_I64, a1_I64, a0_I64;
+         IRTemp b3_I64, b2_I64, b1_I64, b0_I64;
+         IRTemp resHi = newTemp(Ity_I64);
+         IRTemp resLo = newTemp(Ity_I64);
+
+         a3_I64 = a2_I64 = a1_I64 = a0_I64 = IRTemp_INVALID;
+         b3_I64 = b2_I64 = b1_I64 = b0_I64 = IRTemp_INVALID;
+         DIP("xvcpsgnsp v%d,v%d v%d\n",(UInt)XT, (UInt)XA, (UInt)XB);
+         breakV128to4x64U( getVSReg( XA ), &a3_I64, &a2_I64, &a1_I64, &a0_I64 );
+         breakV128to4x64U( getVSReg( XB ), &b3_I64, &b2_I64, &b1_I64, &b0_I64 );
+
+         assign( resHi,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( a3_I64 ) ),
+                                      mkU32( SIGN_BIT32 ) ),
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( b3_I64 ) ),
+                                      mkU32( SIGN_MASK32) ) ),
+
+                        binop( Iop_Or32,
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( a2_I64 ) ),
+                                      mkU32( SIGN_BIT32 ) ),
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( b2_I64 ) ),
+                                      mkU32( SIGN_MASK32 ) ) ) ) );
+         assign( resLo,
+                 binop( Iop_32HLto64,
+                        binop( Iop_Or32,
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( a1_I64 ) ),
+                                      mkU32( SIGN_BIT32 ) ),
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( b1_I64 ) ),
+                                      mkU32( SIGN_MASK32 ) ) ),
+
+                        binop( Iop_Or32,
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( a0_I64 ) ),
+                                      mkU32( SIGN_BIT32 ) ),
+                               binop( Iop_And32,
+                                      unop(Iop_64to32, mkexpr( b0_I64 ) ),
+                                      mkU32( SIGN_MASK32 ) ) ) ) );
+         putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( resHi ), mkexpr( resLo ) ) );
+         break;
+      }
+      case 0x3B2: // xvabsdp (VSX Vector Absolute Value Double-Precision)
+      case 0x3D2: // xvnabsdp VSX Vector Negative Absolute Value Double-Precision)
+      {
+         IRTemp frB = newTemp(Ity_F64);
+         IRTemp frB2 = newTemp(Ity_F64);
+         IRTemp abs_resultHi = newTemp(Ity_F64);
+         IRTemp abs_resultLo = newTemp(Ity_F64);
+         Bool make_negative = (opc2 == 0x3D2) ? True : False;
+         assign(frB,  unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
+         assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg(XB))));
+
+         DIP("xv%sabsdp v%d,v%d\n", make_negative ? "n" : "", (UInt)XT, (UInt)XB);
+         if (make_negative) {
+            assign(abs_resultHi, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr( frB ) ) ) );
+            assign(abs_resultLo, unop( Iop_NegF64, unop( Iop_AbsF64, mkexpr( frB2 ) ) ) );
+
+         } else {
+            assign(abs_resultHi, unop( Iop_AbsF64, mkexpr( frB ) ) );
+            assign(abs_resultLo, unop( Iop_AbsF64, mkexpr( frB2 ) ) );
+         }
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              unop( Iop_ReinterpF64asI64, mkexpr( abs_resultHi ) ),
+                              unop( Iop_ReinterpF64asI64, mkexpr( abs_resultLo ) ) ) );
+         break;
+      }
+      case 0x332: // xvabssp (VSX Vector Absolute Value Single-Precision)
+      case 0x352: // xvnabssp (VSX Vector Negative Absolute Value Single-Precision)
+      {
+         /*
+          * The Iop_AbsF32 IRop is not implemented for ppc64 since, up until introduction
+          * of xvabssp, there has not been an abs(sp) type of instruction.  But since emulation
+          * of this function is so easy using shifts, I choose to emulate this instruction that
+          * way versus a native instruction method of implementation.
+          */
+         Bool make_negative = (opc2 == 0x352) ? True : False;
+         IRTemp shiftVector = newTemp(Ity_V128);
+         IRTemp absVal_vector = newTemp(Ity_V128);
+         assign( shiftVector,
+                 binop( Iop_64HLtoV128,
+                        binop( Iop_32HLto64, mkU32( 1 ), mkU32( 1 ) ),
+                        binop( Iop_32HLto64, mkU32( 1 ), mkU32( 1 ) ) ) );
+         assign( absVal_vector,
+                   binop( Iop_Shr32x4,
+                          binop( Iop_Shl32x4,
+                                 getVSReg( XB ),
+                                 mkexpr( shiftVector ) ),
+                          mkexpr( shiftVector ) ) );
+         if (make_negative) {
+            IRTemp signBit_vector = newTemp(Ity_V128);
+            assign( signBit_vector,
+                    binop( Iop_64HLtoV128,
+                           binop( Iop_32HLto64,
+                                  mkU32( 0x80000000 ),
+                                  mkU32( 0x80000000 ) ),
+                           binop( Iop_32HLto64,
+                                  mkU32( 0x80000000 ),
+                                  mkU32( 0x80000000 ) ) ) );
+            putVSReg( XT,
+                      binop( Iop_OrV128,
+                             mkexpr( absVal_vector ),
+                             mkexpr( signBit_vector ) ) );
+         } else {
+            putVSReg( XT, mkexpr( absVal_vector ) );
+         }
+         break;
+      }
+      case 0x3F2: // xvnegdp (VSX Vector Negate Double-Precision)
+      {
+         IRTemp frB = newTemp(Ity_F64);
+         IRTemp frB2 = newTemp(Ity_F64);
+         assign(frB,  unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
+         assign(frB2, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, getVSReg(XB))));
+         DIP("xvnegdp v%d,v%d\n",  (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                unop( Iop_NegF64, mkexpr( frB ) ) ),
+                          unop( Iop_ReinterpF64asI64,
+                                unop( Iop_NegF64, mkexpr( frB2 ) ) ) ) );
+         break;
+      }
+      case 0x192: // xvrdpi  (VSX Vector Round to Double-Precision Integer using round toward Nearest Away)
+      case 0x1D6: // xvrdpic (VSX Vector Round to Double-Precision Integer using Current rounding mode)
+      case 0x1F2: // xvrdpim (VSX Vector Round to Double-Precision Integer using round toward -Infinity)
+      case 0x1D2: // xvrdpip (VSX Vector Round to Double-Precision Integer using round toward +Infinity)
+      case 0x1B2: // xvrdpiz (VSX Vector Round to Double-Precision Integer using round toward Zero)
+      {
+         IRTemp frBHi_I64 = newTemp(Ity_I64);
+         IRTemp frBLo_I64 = newTemp(Ity_I64);
+         IRExpr * frD_fp_roundHi = NULL;
+         IRExpr * frD_fp_roundLo = NULL;
+
+         assign( frBHi_I64, unop( Iop_V128HIto64, getVSReg( XB ) ) );
+         frD_fp_roundHi = _do_vsx_fp_roundToInt(frBHi_I64, opc2);
+         assign( frBLo_I64, unop( Iop_V128to64, getVSReg( XB ) ) );
+         frD_fp_roundLo = _do_vsx_fp_roundToInt(frBLo_I64, opc2);
+
+         DIP("xvrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64, frD_fp_roundHi ),
+                          unop( Iop_ReinterpF64asI64, frD_fp_roundLo ) ) );
+         break;
+      }
+      case 0x112: // xvrspi  (VSX Vector Round to Single-Precision Integer using round toward Nearest Away)
+      case 0x156: // xvrspic (VSX Vector Round to SinglePrecision Integer using Current rounding mode)
+      case 0x172: // xvrspim (VSX Vector Round to SinglePrecision Integer using round toward -Infinity)
+      case 0x152: // xvrspip (VSX Vector Round to SinglePrecision Integer using round toward +Infinity)
+      case 0x132: // xvrspiz (VSX Vector Round to SinglePrecision Integer using round toward Zero)
+      {
+         const HChar * insn_suffix = NULL;
+         IROp op;
+         if (opc2 != 0x156) {
+            // Use pre-defined IRop's for vrfi{m|n|p|z}
+            switch (opc2) {
+               case 0x112:
+                  insn_suffix = "";
+                  op = Iop_RoundF32x4_RN;
+                  break;
+               case 0x172:
+                  insn_suffix = "m";
+                  op = Iop_RoundF32x4_RM;
+                  break;
+               case 0x152:
+                  insn_suffix = "p";
+                  op = Iop_RoundF32x4_RP;
+                  break;
+               case 0x132:
+                  insn_suffix = "z";
+                  op = Iop_RoundF32x4_RZ;
+                  break;
+
+               default:
+                  vex_printf("Unrecognized opcode %x\n", opc2);
+                  vpanic("dis_vxv_misc(ppc)(vrspi<x>)(opc2)\n");
+            }
+            DIP("xvrspi%s v%d,v%d\n", insn_suffix, (UInt)XT, (UInt)XB);
+            putVSReg( XT, unop( op, getVSReg(XB) ) );
+         } else {
+            // Handle xvrspic.  Unfortunately there is no corresponding "vfric" instruction.
+            IRExpr * frD_fp_roundb3, * frD_fp_roundb2, * frD_fp_roundb1, * frD_fp_roundb0;
+            IRTemp b3_F64, b2_F64, b1_F64, b0_F64;
+            IRTemp b3_I64 = newTemp(Ity_I64);
+            IRTemp b2_I64 = newTemp(Ity_I64);
+            IRTemp b1_I64 = newTemp(Ity_I64);
+            IRTemp b0_I64 = newTemp(Ity_I64);
+
+            b3_F64 = b2_F64 = b1_F64 = b0_F64 = IRTemp_INVALID;
+            frD_fp_roundb3 = frD_fp_roundb2 = frD_fp_roundb1 = frD_fp_roundb0 = NULL;
+            breakV128to4xF64( getVSReg(XB), &b3_F64, &b2_F64, &b1_F64, &b0_F64);
+            assign(b3_I64, unop(Iop_ReinterpF64asI64, mkexpr(b3_F64)));
+            assign(b2_I64, unop(Iop_ReinterpF64asI64, mkexpr(b2_F64)));
+            assign(b1_I64, unop(Iop_ReinterpF64asI64, mkexpr(b1_F64)));
+            assign(b0_I64, unop(Iop_ReinterpF64asI64, mkexpr(b0_F64)));
+            frD_fp_roundb3 = unop(Iop_TruncF64asF32,
+                                  _do_vsx_fp_roundToInt(b3_I64, opc2));
+            frD_fp_roundb2 = unop(Iop_TruncF64asF32,
+                                  _do_vsx_fp_roundToInt(b2_I64, opc2));
+            frD_fp_roundb1 = unop(Iop_TruncF64asF32,
+                                  _do_vsx_fp_roundToInt(b1_I64, opc2));
+            frD_fp_roundb0 = unop(Iop_TruncF64asF32,
+                                  _do_vsx_fp_roundToInt(b0_I64, opc2));
+            DIP("xvrspic v%d,v%d\n", (UInt)XT, (UInt)XB);
+            putVSReg( XT,
+                      binop( Iop_64HLtoV128,
+                             binop( Iop_32HLto64,
+                                    unop( Iop_ReinterpF32asI32, frD_fp_roundb3 ),
+                                    unop( Iop_ReinterpF32asI32, frD_fp_roundb2 ) ),
+                             binop( Iop_32HLto64,
+                                    unop( Iop_ReinterpF32asI32, frD_fp_roundb1 ),
+                                    unop( Iop_ReinterpF32asI32, frD_fp_roundb0 ) ) ) );
+         }
+         break;
+      }
+
+      default:
+         vex_printf( "dis_vxv_misc(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+
+/*
+ * VSX Scalar Floating Point Arithmetic Instructions
+ */
+static Bool
+dis_vxs_arith ( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT( theInstr );
+   UChar XA = ifieldRegXA( theInstr );
+   UChar XB = ifieldRegXB( theInstr );
+   IRExpr* rm = get_IR_roundingmode();
+   IRTemp frA = newTemp(Ity_F64);
+   IRTemp frB = newTemp(Ity_F64);
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vxs_arith(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign(frA, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XA ))));
+   assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
+
+   /* For all the VSX sclar arithmetic instructions, the contents of doubleword element 1
+    * of VSX[XT] are undefined after the operation; therefore, we can simply set
+    * element to zero where it makes sense to do so.
+    */
+   switch (opc2) {
+      case 0x000: // xsaddsp  (VSX Scalar Add Single-Precision)
+         DIP("xsaddsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              unop( Iop_ReinterpF64asI64,
+                                    binop( Iop_RoundF64toF32, rm,
+                                           triop( Iop_AddF64, rm,
+                                                  mkexpr( frA ),
+                                                  mkexpr( frB ) ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      case 0x020: // xssubsp  (VSX Scalar Subtract Single-Precision)
+         DIP("xssubsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              unop( Iop_ReinterpF64asI64,
+                                    binop( Iop_RoundF64toF32, rm,
+                                           triop( Iop_SubF64, rm,
+                                                  mkexpr( frA ),
+                                                  mkexpr( frB ) ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      case 0x080: // xsadddp (VSX scalar add double-precision)
+         DIP("xsadddp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                    triop( Iop_AddF64, rm,
+                                                           mkexpr( frA ),
+                                                           mkexpr( frB ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      case 0x060: // xsdivsp (VSX scalar divide single-precision)
+         DIP("xsdivsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              unop( Iop_ReinterpF64asI64,
+                                    binop( Iop_RoundF64toF32, rm,
+                                           triop( Iop_DivF64, rm,
+                                                  mkexpr( frA ),
+                                                  mkexpr( frB ) ) ) ),
+                               mkU64( 0 ) ) );
+         break;
+      case 0x0E0: // xsdivdp (VSX scalar divide double-precision)
+         DIP("xsdivdp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                    triop( Iop_DivF64, rm,
+                                                           mkexpr( frA ),
+                                                           mkexpr( frB ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      case 0x004: case 0x024: /* xsmaddasp, xsmaddmsp (VSX scalar multiply-add
+                               * single-precision)
+                               */
+      {
+         IRTemp frT = newTemp(Ity_F64);
+         Bool mdp = opc2 == 0x024;
+         DIP("xsmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_RoundF64toF32, rm,
+                                       qop( Iop_MAddF64, rm,
+                                            mkexpr( frA ),
+                                            mkexpr( mdp ? frT : frB ),
+                                            mkexpr( mdp ? frB : frT ) ) ) ),
+                          mkU64( 0 ) ) );
+         break;
+      }
+      case 0x084: case 0x0A4: // xsmaddadp, xsmaddmdp (VSX scalar multiply-add double-precision)
+      {
+         IRTemp frT = newTemp(Ity_F64);
+         Bool mdp = opc2 == 0x0A4;
+         DIP("xsmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                    qop( Iop_MAddF64, rm,
+                                                         mkexpr( frA ),
+                                                         mkexpr( mdp ? frT : frB ),
+                                                         mkexpr( mdp ? frB : frT ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      }
+      case 0x044: case 0x064: /* xsmsubasp, xsmsubmsp (VSX scalar
+                               * multiply-subtract single-precision)
+			       */
+      {
+         IRTemp frT = newTemp(Ity_F64);
+         Bool mdp = opc2 == 0x064;
+         DIP("xsmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_RoundF64toF32, rm,
+                                       qop( Iop_MSubF64, rm,
+                                            mkexpr( frA ),
+                                            mkexpr( mdp ? frT : frB ),
+                                            mkexpr( mdp ? frB : frT ) ) ) ),
+                          mkU64( 0 ) ) );
+         break;
+      }
+      case 0x0C4: case 0x0E4: // xsmsubadp, xsmsubmdp (VSX scalar multiply-subtract double-precision)
+      {
+         IRTemp frT = newTemp(Ity_F64);
+         Bool mdp = opc2 == 0x0E4;
+         DIP("xsmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                    qop( Iop_MSubF64, rm,
+                                                         mkexpr( frA ),
+                                                         mkexpr( mdp ? frT : frB ),
+                                                         mkexpr( mdp ? frB : frT ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      }
+      case 0x284: case 0x2A4: // xsnmaddadp, xsnmaddmdp (VSX scalar multiply-add double-precision)
+      {
+         /* TODO: mpj -- Naturally, I expected to be able to leverage the implementation
+          * of fnmadd and use pretty much the same code. However, that code has a bug in the
+          * way it blindly negates the signbit, even if the floating point result is a NaN.
+          * So, the TODO is to fix fnmadd (which I'll do in a different patch).
+          * FIXED 7/1/2012: carll fnmadd and fnmsubs fixed to not negate sign
+          * bit for NaN result.
+          */
+         Bool mdp = opc2 == 0x2A4;
+         IRTemp frT = newTemp(Ity_F64);
+         IRTemp maddResult = newTemp(Ity_I64);
+
+         DIP("xsnmadd%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         assign( maddResult, unop( Iop_ReinterpF64asI64, qop( Iop_MAddF64, rm,
+                                                              mkexpr( frA ),
+                                                              mkexpr( mdp ? frT : frB ),
+                                                              mkexpr( mdp ? frB : frT ) ) ) );
+
+         putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( getNegatedResult(maddResult) ),
+                              mkU64( 0 ) ) );
+         break;
+      }
+      case 0x204: case 0x224: /* xsnmaddasp, xsnmaddmsp (VSX scalar
+                               * multiply-add single-precision)
+                               */
+      {
+         Bool mdp = opc2 == 0x224;
+         IRTemp frT = newTemp(Ity_F64);
+         IRTemp maddResult = newTemp(Ity_I64);
+
+         DIP("xsnmadd%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         assign( maddResult,
+                 unop( Iop_ReinterpF64asI64,
+                       binop( Iop_RoundF64toF32, rm,
+                              qop( Iop_MAddF64, rm,
+                                   mkexpr( frA ),
+                                   mkexpr( mdp ? frT : frB ),
+                                   mkexpr( mdp ? frB : frT ) ) ) ) );
+
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              mkexpr( getNegatedResult(maddResult) ),
+                              mkU64( 0 ) ) );
+         break;
+      }
+      case 0x244: case 0x264: /* xsnmsubasp, xsnmsubmsp (VSX Scalar Negative
+                               * Multiply-Subtract Single-Precision)
+                               */
+      {
+         IRTemp frT = newTemp(Ity_F64);
+         Bool mdp = opc2 == 0x264;
+         IRTemp msubResult = newTemp(Ity_I64);
+
+         DIP("xsnmsub%ssp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         assign( msubResult,
+                 unop( Iop_ReinterpF64asI64,
+                       binop( Iop_RoundF64toF32, rm,
+                              qop( Iop_MSubF64, rm,
+                                   mkexpr( frA ),
+                                   mkexpr( mdp ? frT : frB ),
+                                   mkexpr( mdp ? frB : frT ) ) ) ) );
+
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              mkexpr( getNegatedResult(msubResult) ),
+                              mkU64( 0 ) ) );
+
+         break;
+      }
+
+      case 0x2C4: case 0x2E4: // xsnmsubadp, xsnmsubmdp (VSX Scalar Negative Multiply-Subtract Double-Precision)
+      {
+         IRTemp frT = newTemp(Ity_F64);
+         Bool mdp = opc2 == 0x2E4;
+         IRTemp msubResult = newTemp(Ity_I64);
+
+         DIP("xsnmsub%sdp v%d,v%d,v%d\n", mdp ? "m" : "a", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( frT, unop( Iop_ReinterpI64asF64, unop( Iop_V128HIto64,
+                                                        getVSReg( XT ) ) ) );
+         assign(msubResult, unop( Iop_ReinterpF64asI64,
+                                      qop( Iop_MSubF64,
+                                           rm,
+                                           mkexpr( frA ),
+                                           mkexpr( mdp ? frT : frB ),
+                                           mkexpr( mdp ? frB : frT ) ) ));
+
+         putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( getNegatedResult(msubResult) ), mkU64( 0 ) ) );
+
+         break;
+      }
+
+      case 0x040: // xsmulsp (VSX Scalar Multiply Single-Precision)
+         DIP("xsmulsp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              unop( Iop_ReinterpF64asI64,
+                                    binop( Iop_RoundF64toF32, rm,
+                                           triop( Iop_MulF64, rm,
+                                                   mkexpr( frA ),
+                                                   mkexpr( frB ) ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+
+      case 0x0C0: // xsmuldp (VSX Scalar Multiply Double-Precision)
+         DIP("xsmuldp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                    triop( Iop_MulF64, rm,
+                                                           mkexpr( frA ),
+                                                           mkexpr( frB ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      case 0x0A0: // xssubdp (VSX Scalar Subtract Double-Precision)
+         DIP("xssubdp v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                    triop( Iop_SubF64, rm,
+                                                           mkexpr( frA ),
+                                                           mkexpr( frB ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+
+      case 0x016: // xssqrtsp (VSX Scalar Square Root Single-Precision)
+         DIP("xssqrtsp v%d,v%d\n", (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64,
+                                binop( Iop_RoundF64toF32, rm,
+                                       binop( Iop_SqrtF64, rm,
+                                              mkexpr( frB ) ) ) ),
+                          mkU64( 0 ) ) );
+         break;
+
+      case 0x096: // xssqrtdp (VSX Scalar Square Root Double-Precision)
+         DIP("xssqrtdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+         putVSReg( XT,  binop( Iop_64HLtoV128, unop( Iop_ReinterpF64asI64,
+                                                     binop( Iop_SqrtF64, rm,
+                                                            mkexpr( frB ) ) ),
+                               mkU64( 0 ) ) );
+         break;
+
+      case 0x0F4: // xstdivdp (VSX Scalar Test for software Divide Double-Precision)
+      {
+         UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+         IRTemp frA_I64 = newTemp(Ity_I64);
+         IRTemp frB_I64 = newTemp(Ity_I64);
+         DIP("xstdivdp crf%d,v%d,v%d\n", crfD, (UInt)XA, (UInt)XB);
+         assign( frA_I64, unop( Iop_ReinterpF64asI64, mkexpr( frA ) ) );
+         assign( frB_I64, unop( Iop_ReinterpF64asI64, mkexpr( frB ) ) );
+         putGST_field( PPC_GST_CR, do_fp_tdiv(frA_I64, frB_I64), crfD );
+         break;
+      }
+      case 0x0D4: // xstsqrtdp (VSX Vector Test for software Square Root Double-Precision)
+      {
+         IRTemp frB_I64 = newTemp(Ity_I64);
+         UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+         IRTemp flags = newTemp(Ity_I32);
+         IRTemp  fe_flag, fg_flag;
+         fe_flag = fg_flag = IRTemp_INVALID;
+         DIP("xstsqrtdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+         assign( frB_I64, unop(Iop_V128HIto64, getVSReg( XB )) );
+         do_fp_tsqrt(frB_I64, False /*not single precision*/, &fe_flag, &fg_flag);
+         /* The CR field consists of fl_flag || fg_flag || fe_flag || 0b0
+          * where fl_flag == 1 on ppc64.
+          */
+         assign( flags,
+                 binop( Iop_Or32,
+                        binop( Iop_Or32, mkU32( 8 ), // fl_flag
+                               binop( Iop_Shl32, mkexpr(fg_flag), mkU8( 2 ) ) ),
+                        binop( Iop_Shl32, mkexpr(fe_flag), mkU8( 1 ) ) ) );
+         putGST_field( PPC_GST_CR, mkexpr(flags), crfD );
+         break;
+      }
+
+      default:
+         vex_printf( "dis_vxs_arith(ppc)(opc2)\n" );
+         return False;
+   }
+
+   return True;
+}
+
+
+/*
+ * VSX Floating Point Compare Instructions
+ */
+static Bool
+dis_vx_cmp( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form and XX2-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar crfD     = toUChar( IFIELD( theInstr, 23, 3 ) );
+   IRTemp ccPPC32;
+   UChar XA       = ifieldRegXA ( theInstr );
+   UChar XB       = ifieldRegXB ( theInstr );
+   IRTemp frA     = newTemp(Ity_F64);
+   IRTemp frB     = newTemp(Ity_F64);
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vx_cmp(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign(frA, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XA ))));
+   assign(frB, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, getVSReg( XB ))));
+   switch (opc2) {
+      case 0x08C: case 0x0AC: // xscmpudp, xscmpodp
+         /* Note: Differences between xscmpudp and xscmpodp are only in
+          * exception flag settings, which aren't supported anyway. */
+         DIP("xscmp%sdp crf%d,fr%u,fr%u\n", opc2 == 0x08c ? "u" : "o",
+                                           crfD, (UInt)XA, (UInt)XB);
+         ccPPC32 = get_fp_cmp_CR_val( binop(Iop_CmpF64, mkexpr(frA), mkexpr(frB)));
+         putGST_field( PPC_GST_CR, mkexpr(ccPPC32), crfD );
+         break;
+
+      default:
+         vex_printf( "dis_vx_cmp(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+static void
+do_vvec_fp_cmp ( IRTemp vA, IRTemp vB, UChar XT, UChar flag_rC,
+                 ppc_cmp_t cmp_type )
+{
+   IRTemp frA_hi     = newTemp(Ity_F64);
+   IRTemp frB_hi     = newTemp(Ity_F64);
+   IRTemp frA_lo     = newTemp(Ity_F64);
+   IRTemp frB_lo     = newTemp(Ity_F64);
+   IRTemp ccPPC32    = newTemp(Ity_I32);
+   IRTemp ccIR_hi;
+   IRTemp ccIR_lo;
+
+   IRTemp hiResult = newTemp(Ity_I64);
+   IRTemp loResult = newTemp(Ity_I64);
+   IRTemp hiEQlo = newTemp(Ity_I1);
+   IRTemp all_elem_true = newTemp(Ity_I32);
+   IRTemp all_elem_false = newTemp(Ity_I32);
+
+   assign(frA_hi, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, mkexpr( vA ))));
+   assign(frB_hi, unop(Iop_ReinterpI64asF64, unop(Iop_V128HIto64, mkexpr( vB ))));
+   assign(frA_lo, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, mkexpr( vA ))));
+   assign(frB_lo, unop(Iop_ReinterpI64asF64, unop(Iop_V128to64, mkexpr( vB ))));
+
+   ccIR_hi = get_fp_cmp_CR_val( binop( Iop_CmpF64,
+                                       mkexpr( frA_hi ),
+                                       mkexpr( frB_hi ) ) );
+   ccIR_lo = get_fp_cmp_CR_val( binop( Iop_CmpF64,
+                                       mkexpr( frA_lo ),
+                                       mkexpr( frB_lo ) ) );
+
+   if (cmp_type != PPC_CMP_GE) {
+      assign( hiResult,
+              unop( Iop_1Sto64,
+                    binop( Iop_CmpEQ32, mkexpr( ccIR_hi ), mkU32( cmp_type ) ) ) );
+      assign( loResult,
+              unop( Iop_1Sto64,
+                    binop( Iop_CmpEQ32, mkexpr( ccIR_lo ), mkU32( cmp_type ) ) ) );
+   } else {
+      // For PPC_CMP_GE, one element compare may return "4" (for "greater than") and
+      // the other element compare may return "2" (for "equal to").
+      IRTemp lo_GE = newTemp(Ity_I1);
+      IRTemp hi_GE = newTemp(Ity_I1);
+
+      assign(hi_GE, mkOR1( binop( Iop_CmpEQ32, mkexpr( ccIR_hi ), mkU32( 2 ) ),
+                           binop( Iop_CmpEQ32, mkexpr( ccIR_hi ), mkU32( 4 ) ) ) );
+      assign( hiResult,unop( Iop_1Sto64, mkexpr( hi_GE ) ) );
+
+      assign(lo_GE, mkOR1( binop( Iop_CmpEQ32, mkexpr( ccIR_lo ), mkU32( 2 ) ),
+                           binop( Iop_CmpEQ32, mkexpr( ccIR_lo ), mkU32( 4 ) ) ) );
+      assign( loResult, unop( Iop_1Sto64, mkexpr( lo_GE ) ) );
+   }
+
+   // The [hi/lo]Result will be all 1's or all 0's.  We just look at the lower word.
+   assign( hiEQlo,
+           binop( Iop_CmpEQ32,
+                  unop( Iop_64to32, mkexpr( hiResult ) ),
+                  unop( Iop_64to32, mkexpr( loResult ) ) ) );
+   putVSReg( XT,
+             binop( Iop_64HLtoV128, mkexpr( hiResult ), mkexpr( loResult ) ) );
+
+   assign( all_elem_true,
+           unop( Iop_1Uto32,
+                 mkAND1( mkexpr( hiEQlo ),
+                         binop( Iop_CmpEQ32,
+                                mkU32( 0xffffffff ),
+                                unop( Iop_64to32,
+                                mkexpr( hiResult ) ) ) ) ) );
+
+   assign( all_elem_false,
+           unop( Iop_1Uto32,
+                 mkAND1( mkexpr( hiEQlo ),
+                         binop( Iop_CmpEQ32,
+                                mkU32( 0 ),
+                                unop( Iop_64to32,
+                                mkexpr( hiResult ) ) ) ) ) );
+   assign( ccPPC32,
+           binop( Iop_Or32,
+                  binop( Iop_Shl32, mkexpr( all_elem_false ), mkU8( 1 ) ),
+                  binop( Iop_Shl32, mkexpr( all_elem_true ), mkU8( 3 ) ) ) );
+
+   if (flag_rC) {
+      putGST_field( PPC_GST_CR, mkexpr(ccPPC32), 6 );
+   }
+}
+
+/*
+ * VSX Vector Compare Instructions
+ */
+static Bool
+dis_vvec_cmp( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT ( theInstr );
+   UChar XA = ifieldRegXA ( theInstr );
+   UChar XB = ifieldRegXB ( theInstr );
+   UChar flag_rC  = ifieldBIT10(theInstr);
+   IRTemp vA = newTemp( Ity_V128 );
+   IRTemp vB = newTemp( Ity_V128 );
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vvec_cmp(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign( vA, getVSReg( XA ) );
+   assign( vB, getVSReg( XB ) );
+
+   switch (opc2) {
+      case 0x18C: case 0x38C:  // xvcmpeqdp[.] (VSX Vector Compare Equal To Double-Precision [ & Record ])
+      {
+         DIP("xvcmpeqdp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
+             (UInt)XT, (UInt)XA, (UInt)XB);
+         do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_EQ);
+         break;
+      }
+
+      case 0x1CC: case 0x3CC: // xvcmpgedp[.] (VSX Vector Compare Greater Than or Equal To Double-Precision [ & Record ])
+      {
+         DIP("xvcmpgedp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
+             (UInt)XT, (UInt)XA, (UInt)XB);
+         do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_GE);
+         break;
+      }
+
+      case 0x1AC: case 0x3AC: // xvcmpgtdp[.] (VSX Vector Compare Greater Than Double-Precision [ & Record ])
+      {
+         DIP("xvcmpgtdp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
+             (UInt)XT, (UInt)XA, (UInt)XB);
+         do_vvec_fp_cmp(vA, vB, XT, flag_rC, PPC_CMP_GT);
+         break;
+      }
+
+      case 0x10C: case 0x30C: // xvcmpeqsp[.] (VSX Vector Compare Equal To Single-Precision [ & Record ])
+      {
+         IRTemp vD = newTemp(Ity_V128);
+
+         DIP("xvcmpeqsp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
+             (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( vD, binop(Iop_CmpEQ32Fx4, mkexpr(vA), mkexpr(vB)) );
+         putVSReg( XT, mkexpr(vD) );
+         if (flag_rC) {
+            set_AV_CR6( mkexpr(vD), True );
+         }
+         break;
+      }
+
+      case 0x14C: case 0x34C: // xvcmpgesp[.] (VSX Vector Compare Greater Than or Equal To Single-Precision [ & Record ])
+      {
+         IRTemp vD = newTemp(Ity_V128);
+
+         DIP("xvcmpgesp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
+             (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( vD, binop(Iop_CmpGE32Fx4, mkexpr(vA), mkexpr(vB)) );
+         putVSReg( XT, mkexpr(vD) );
+         if (flag_rC) {
+            set_AV_CR6( mkexpr(vD), True );
+         }
+         break;
+      }
+
+      case 0x12C: case 0x32C: //xvcmpgtsp[.] (VSX Vector Compare Greater Than Single-Precision [ & Record ])
+      {
+         IRTemp vD = newTemp(Ity_V128);
+
+         DIP("xvcmpgtsp%s crf%d,fr%u,fr%u\n", (flag_rC ? ".":""),
+             (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( vD, binop(Iop_CmpGT32Fx4, mkexpr(vA), mkexpr(vB)) );
+         putVSReg( XT, mkexpr(vD) );
+         if (flag_rC) {
+            set_AV_CR6( mkexpr(vD), True );
+         }
+         break;
+      }
+
+      default:
+         vex_printf( "dis_vvec_cmp(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+/*
+ * Miscellaneous VSX Scalar Instructions
+ */
+static Bool
+dis_vxs_misc( UInt theInstr, UInt opc2 )
+{
+#define VG_PPC_SIGN_MASK 0x7fffffffffffffffULL
+   /* XX3-Form and XX2-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT ( theInstr );
+   UChar XA = ifieldRegXA ( theInstr );
+   UChar XB = ifieldRegXB ( theInstr );
+   IRTemp vA = newTemp( Ity_V128 );
+   IRTemp vB = newTemp( Ity_V128 );
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vxs_misc(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign( vA, getVSReg( XA ) );
+   assign( vB, getVSReg( XB ) );
+
+   /* For all the VSX move instructions, the contents of doubleword element 1
+    * of VSX[XT] are undefined after the operation; therefore, we can simply
+    * move the entire array element where it makes sense to do so.
+    */
+
+   switch (opc2) {
+      case 0x2B2: // xsabsdp (VSX scalar absolute value double-precision
+      {
+         /* Move abs val of dw 0 of VSX[XB] to dw 0 of VSX[XT]. */
+         IRTemp absVal = newTemp(Ity_V128);
+         if (host_endness == VexEndnessLE) {
+            IRTemp hi64 = newTemp(Ity_I64);
+            IRTemp lo64 = newTemp(Ity_I64);
+            assign( hi64, unop( Iop_V128HIto64, mkexpr(vB) ) );
+            assign( lo64, unop( Iop_V128to64, mkexpr(vB) ) );
+            assign( absVal, binop( Iop_64HLtoV128,
+                                   binop( Iop_And64, mkexpr(hi64),
+                                          mkU64(VG_PPC_SIGN_MASK) ),
+                                   mkexpr(lo64) ) );
+         } else {
+            assign(absVal, binop(Iop_ShrV128,
+                                 binop(Iop_ShlV128, mkexpr(vB),
+                                       mkU8(1)), mkU8(1)));
+         }
+         DIP("xsabsdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+         putVSReg(XT, mkexpr(absVal));
+         break;
+      }
+      case 0x2C0: // xscpsgndp
+      {
+         /* Scalar copy sign double-precision */
+         IRTemp vecA_signed = newTemp(Ity_I64);
+         IRTemp vecB_unsigned = newTemp(Ity_I64);
+         IRTemp vec_result = newTemp(Ity_V128);
+         DIP("xscpsgndp v%d,v%d v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         assign( vecA_signed, binop( Iop_And64,
+                                     unop( Iop_V128HIto64,
+                                           mkexpr(vA)),
+                                           mkU64(~VG_PPC_SIGN_MASK) ) );
+         assign( vecB_unsigned, binop( Iop_And64,
+                                       unop( Iop_V128HIto64,
+                                             mkexpr(vB) ),
+                                             mkU64(VG_PPC_SIGN_MASK) ) );
+         assign( vec_result, binop( Iop_64HLtoV128,
+                                    binop( Iop_Or64,
+                                           mkexpr(vecA_signed),
+                                           mkexpr(vecB_unsigned) ),
+                                    mkU64(0x0ULL)));
+         putVSReg(XT, mkexpr(vec_result));
+         break;
+      }
+      case 0x2D2: // xsnabsdp
+      {
+         /* Scalar negative absolute value double-precision */
+         IRTemp BHi_signed = newTemp(Ity_I64);
+         DIP("xsnabsdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+         assign( BHi_signed, binop( Iop_Or64,
+                                    unop( Iop_V128HIto64,
+                                          mkexpr(vB) ),
+                                          mkU64(~VG_PPC_SIGN_MASK) ) );
+         putVSReg(XT, binop( Iop_64HLtoV128,
+                             mkexpr(BHi_signed), mkU64(0x0ULL) ) );
+         break;
+      }
+      case 0x2F2: // xsnegdp
+      {
+         /* Scalar negate double-precision */
+         IRTemp BHi_signed = newTemp(Ity_I64);
+         IRTemp BHi_unsigned = newTemp(Ity_I64);
+         IRTemp BHi_negated = newTemp(Ity_I64);
+         IRTemp BHi_negated_signbit = newTemp(Ity_I1);
+         IRTemp vec_result = newTemp(Ity_V128);
+         DIP("xsnabsdp v%d,v%d\n", (UInt)XT, (UInt)XB);
+         assign( BHi_signed, unop( Iop_V128HIto64, mkexpr(vB) ) );
+         assign( BHi_unsigned, binop( Iop_And64, mkexpr(BHi_signed),
+                                      mkU64(VG_PPC_SIGN_MASK) ) );
+         assign( BHi_negated_signbit,
+                 unop( Iop_Not1,
+                       unop( Iop_32to1,
+                             binop( Iop_Shr32,
+                                    unop( Iop_64HIto32,
+                                          binop( Iop_And64,
+                                                 mkexpr(BHi_signed),
+                                                 mkU64(~VG_PPC_SIGN_MASK) )
+                                          ),
+                                    mkU8(31) ) ) ) );
+         assign( BHi_negated,
+                 binop( Iop_Or64,
+                        binop( Iop_32HLto64,
+                               binop( Iop_Shl32,
+                                      unop( Iop_1Uto32,
+                                            mkexpr(BHi_negated_signbit) ),
+                                      mkU8(31) ),
+                               mkU32(0) ),
+                        mkexpr(BHi_unsigned) ) );
+         assign( vec_result, binop( Iop_64HLtoV128, mkexpr(BHi_negated),
+                                    mkU64(0x0ULL)));
+         putVSReg( XT, mkexpr(vec_result));
+         break;
+      }
+      case 0x280: // xsmaxdp (VSX Scalar Maximum Double-Precision)
+      case 0x2A0: // xsmindp (VSX Scalar Minimum Double-Precision)
+      {
+         IRTemp frA     = newTemp(Ity_I64);
+         IRTemp frB     = newTemp(Ity_I64);
+         Bool isMin = opc2 == 0x2A0 ? True : False;
+         DIP("%s v%d,v%d v%d\n", isMin ? "xsmaxdp" : "xsmindp", (UInt)XT, (UInt)XA, (UInt)XB);
+
+         assign(frA, unop(Iop_V128HIto64, mkexpr( vA )));
+         assign(frB, unop(Iop_V128HIto64, mkexpr( vB )));
+         putVSReg( XT, binop( Iop_64HLtoV128, get_max_min_fp(frA, frB, isMin), mkU64( 0 ) ) );
+
+         break;
+      }
+      case 0x0F2: // xsrdpim (VSX Scalar Round to Double-Precision Integer using round toward -Infinity)
+      case 0x0D2: // xsrdpip (VSX Scalar Round to Double-Precision Integer using round toward +Infinity)
+      case 0x0D6: // xsrdpic (VSX Scalar Round to Double-Precision Integer using Current rounding mode)
+      case 0x0B2: // xsrdpiz (VSX Scalar Round to Double-Precision Integer using round toward Zero)
+      case 0x092: // xsrdpi  (VSX Scalar Round to Double-Precision Integer using round toward Nearest Away)
+      {
+         IRTemp frB_I64 = newTemp(Ity_I64);
+         IRExpr * frD_fp_round = NULL;
+
+         assign(frB_I64, unop(Iop_V128HIto64, mkexpr( vB )));
+         frD_fp_round = _do_vsx_fp_roundToInt(frB_I64, opc2);
+
+         DIP("xsrdpi%s v%d,v%d\n", _get_vsx_rdpi_suffix(opc2), (UInt)XT, (UInt)XB);
+         putVSReg( XT,
+                   binop( Iop_64HLtoV128,
+                          unop( Iop_ReinterpF64asI64, frD_fp_round),
+                          mkU64( 0 ) ) );
+         break;
+      }
+      case 0x034: // xsresp (VSX Scalar Reciprocal Estimate single-Precision)
+      case 0x014: /* xsrsqrtesp (VSX Scalar Reciprocal Square Root Estimate
+                   * single-Precision)
+                   */
+      {
+         IRTemp frB = newTemp(Ity_F64);
+         IRTemp sqrt = newTemp(Ity_F64);
+         IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
+         IRExpr* rm  = get_IR_roundingmode();
+         Bool redp = opc2 == 0x034;
+         DIP("%s v%d,v%d\n", redp ? "xsresp" : "xsrsqrtesp", (UInt)XT,
+             (UInt)XB);
+
+         assign( frB,
+                 unop( Iop_ReinterpI64asF64,
+                       unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
+
+         if (!redp)
+            assign( sqrt,
+                    binop( Iop_SqrtF64,
+                           rm,
+                           mkexpr(frB) ) );
+         putVSReg( XT,
+                      binop( Iop_64HLtoV128,
+                             unop( Iop_ReinterpF64asI64,
+                                   binop( Iop_RoundF64toF32, rm,
+                                          triop( Iop_DivF64,
+                                                 rm,
+                                                 ieee_one,
+                                                 redp ? mkexpr( frB ) :
+                                                        mkexpr( sqrt ) ) ) ),
+                             mkU64( 0 ) ) );
+         break;
+      }
+
+      case 0x0B4: // xsredp (VSX Scalar Reciprocal Estimate Double-Precision)
+      case 0x094: // xsrsqrtedp (VSX Scalar Reciprocal Square Root Estimate Double-Precision)
+
+      {
+         IRTemp frB = newTemp(Ity_F64);
+         IRTemp sqrt = newTemp(Ity_F64);
+         IRExpr* ieee_one = IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL));
+         IRExpr* rm  = get_IR_roundingmode();
+         Bool redp = opc2 == 0x0B4;
+         DIP("%s v%d,v%d\n", redp ? "xsredp" : "xsrsqrtedp", (UInt)XT, (UInt)XB);
+         assign( frB,
+                 unop( Iop_ReinterpI64asF64,
+                       unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
+
+         if (!redp)
+            assign( sqrt,
+                    binop( Iop_SqrtF64,
+                           rm,
+                           mkexpr(frB) ) );
+         putVSReg( XT,
+                      binop( Iop_64HLtoV128,
+                             unop( Iop_ReinterpF64asI64,
+                                   triop( Iop_DivF64,
+                                          rm,
+                                          ieee_one,
+                                          redp ? mkexpr( frB ) : mkexpr( sqrt ) ) ),
+                             mkU64( 0 ) ) );
+         break;
+      }
+
+      case 0x232: // xsrsp (VSX Scalar Round to Single-Precision)
+      {
+         IRTemp frB = newTemp(Ity_F64);
+         IRExpr* rm  = get_IR_roundingmode();
+         DIP("xsrsp v%d, v%d\n", (UInt)XT, (UInt)XB);
+         assign( frB,
+                 unop( Iop_ReinterpI64asF64,
+                       unop( Iop_V128HIto64, mkexpr( vB ) ) ) );
+
+         putVSReg( XT, binop( Iop_64HLtoV128,
+                              unop( Iop_ReinterpF64asI64,
+                                    binop( Iop_RoundF64toF32,
+                                           rm,
+                                           mkexpr( frB ) ) ),
+                              mkU64( 0 ) ) );
+         break;
+      }
+
+      default:
+         vex_printf( "dis_vxs_misc(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+/*
+ * VSX Logical Instructions
+ */
+static Bool
+dis_vx_logic ( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT ( theInstr );
+   UChar XA = ifieldRegXA ( theInstr );
+   UChar XB = ifieldRegXB ( theInstr );
+   IRTemp vA = newTemp( Ity_V128 );
+   IRTemp vB = newTemp( Ity_V128 );
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vx_logic(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign( vA, getVSReg( XA ) );
+   assign( vB, getVSReg( XB ) );
+
+   switch (opc2) {
+      case 0x268: // xxlxor
+         DIP("xxlxor v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_XorV128, mkexpr( vA ), mkexpr( vB ) ) );
+         break;
+      case 0x248: // xxlor
+         DIP("xxlor v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_OrV128, mkexpr( vA ), mkexpr( vB ) ) );
+         break;
+      case 0x288: // xxlnor
+         DIP("xxlnor v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, unop( Iop_NotV128, binop( Iop_OrV128, mkexpr( vA ),
+                                                 mkexpr( vB ) ) ) );
+         break;
+      case 0x208: // xxland
+         DIP("xxland v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_AndV128, mkexpr( vA ), mkexpr( vB ) ) );
+         break;
+      case 0x228: //xxlandc
+         DIP("xxlandc v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_AndV128, mkexpr( vA ), unop( Iop_NotV128,
+                                                               mkexpr( vB ) ) ) );
+         break;
+      case 0x2A8: // xxlorc (VSX Logical OR with complement)
+         DIP("xxlorc v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, binop( Iop_OrV128,
+                              mkexpr( vA ),
+                              unop( Iop_NotV128, mkexpr( vB ) ) ) );
+         break;
+      case 0x2C8: // xxlnand (VSX Logical NAND)
+         DIP("xxlnand v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, unop( Iop_NotV128,
+                             binop( Iop_AndV128, mkexpr( vA ),
+                                    mkexpr( vB ) ) ) );
+         break;
+      case 0x2E8: // xxleqv (VSX Logical Equivalence)
+         DIP("xxleqv v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, unop( Iop_NotV128,
+                             binop( Iop_XorV128,
+                             mkexpr( vA ), mkexpr( vB ) ) ) );
+         break;
+      default:
+         vex_printf( "dis_vx_logic(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+/*
+ * VSX Load Instructions
+ * NOTE: VSX supports word-aligned storage access.
+ */
+static Bool
+dis_vx_load ( UInt theInstr )
+{
+   /* XX1-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT ( theInstr );
+   UChar rA_addr = ifieldRegA( theInstr );
+   UChar rB_addr = ifieldRegB( theInstr );
+   UInt opc2 = ifieldOPClo10( theInstr );
+
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA = newTemp( ty );
+
+   if (opc1 != 0x1F) {
+      vex_printf( "dis_vx_load(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+
+   switch (opc2) {
+   case 0x00C: // lxsiwzx (Load VSX Scalar as Integer Word and Zero Indexed)
+   {
+      IRExpr * exp;
+      DIP("lxsiwzx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+
+      if (host_endness == VexEndnessLE)
+         exp = unop( Iop_64to32, load( Ity_I64, mkexpr( EA ) ) );
+      else
+         exp = unop( Iop_64HIto32, load( Ity_I64, mkexpr( EA ) ) );
+
+      putVSReg( XT, binop( Iop_64HLtoV128,
+                           unop( Iop_32Uto64, exp),
+                           mkU64(0) ) );
+      break;
+   }
+   case 0x04C: // lxsiwax (Load VSX Scalar as Integer Word Algebraic Indexed)
+   {
+      IRExpr * exp;
+      DIP("lxsiwax %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+
+      if (host_endness == VexEndnessLE)
+         exp = unop( Iop_64to32, load( Ity_I64, mkexpr( EA ) ) );
+      else
+         exp = unop( Iop_64HIto32, load( Ity_I64, mkexpr( EA ) ) );
+
+      putVSReg( XT, binop( Iop_64HLtoV128,
+                           unop( Iop_32Sto64, exp),
+                           mkU64(0) ) );
+      break;
+   }
+   case 0x20C: // lxsspx (Load VSX Scalar Single-Precision Indexed)
+   {
+      IRExpr * exp;
+      DIP("lxsspx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+      /* Take 32-bit floating point value in the upper half of the fetched
+       * 64-bit value, convert to 64-bit floating point value and load into
+       * top word of V128.
+       */
+      exp = unop( Iop_ReinterpF64asI64,
+                  unop( Iop_F32toF64,
+                        unop( Iop_ReinterpI32asF32,
+                              load( Ity_I32, mkexpr( EA ) ) ) ) );
+
+      putVSReg( XT, binop( Iop_64HLtoV128, exp, mkU64( 0 ) ) );
+      break;
+   }
+   case 0x24C: // lxsdx
+   {
+      IRExpr * exp;
+      DIP("lxsdx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+      exp = load( Ity_I64, mkexpr( EA ) );
+      // We need to pass an expression of type Ity_V128 with putVSReg, but the load
+      // we just performed is only a DW.  But since the contents of VSR[XT] element 1
+      // are undefined after this operation, we can just do a splat op.
+      putVSReg( XT, binop( Iop_64HLtoV128, exp, exp ) );
+      break;
+   }
+   case 0x34C: // lxvd2x
+   {
+      IROp addOp = ty == Ity_I64 ? Iop_Add64 : Iop_Add32;
+      IRExpr * high, *low;
+      ULong ea_off = 8;
+      IRExpr* high_addr;
+      DIP("lxvd2x %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+      high = load( Ity_I64, mkexpr( EA ) );
+      high_addr = binop( addOp, mkexpr( EA ), ty == Ity_I64 ? mkU64( ea_off )
+            : mkU32( ea_off ) );
+      low = load( Ity_I64, high_addr );
+      putVSReg( XT, binop( Iop_64HLtoV128, high, low ) );
+      break;
+   }
+   case 0x14C: // lxvdsx
+   {
+      IRTemp data = newTemp(Ity_I64);
+      DIP("lxvdsx %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+      assign( data, load( Ity_I64, mkexpr( EA ) ) );
+      putVSReg( XT, binop( Iop_64HLtoV128, mkexpr( data ), mkexpr( data ) ) );
+      break;
+   }
+   case 0x30C:
+   {
+      IRExpr *t0;
+
+      DIP("lxvw4x %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
+
+      /* The load will result in the data being in BE order. */
+      if (host_endness == VexEndnessLE) {
+         IRExpr *t0_BE;
+         IRTemp perm_LE = newTemp(Ity_V128);
+
+         t0_BE = load( Ity_V128, mkexpr( EA ) );
+
+         /*  Permute the data to LE format */
+         assign( perm_LE, binop( Iop_64HLtoV128, mkU64(0x0c0d0e0f08090a0bULL),
+                                 mkU64(0x0405060700010203ULL)));
+
+         t0 = binop( Iop_Perm8x16, t0_BE, mkexpr(perm_LE) );
+      } else {
+         t0 = load( Ity_V128, mkexpr( EA ) );
+      }
+
+      putVSReg( XT, t0 );
+      break;
+   }
+   default:
+      vex_printf( "dis_vx_load(ppc)(opc2)\n" );
+      return False;
+   }
+   return True;
+}
+
+/*
+ * VSX Store Instructions
+ * NOTE: VSX supports word-aligned storage access.
+ */
+static Bool
+dis_vx_store ( UInt theInstr )
+{
+   /* XX1-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XS = ifieldRegXS( theInstr );
+   UChar rA_addr = ifieldRegA( theInstr );
+   UChar rB_addr = ifieldRegB( theInstr );
+   IRTemp vS = newTemp( Ity_V128 );
+   UInt opc2 = ifieldOPClo10( theInstr );
+
+   IRType ty = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA = newTemp( ty );
+
+   if (opc1 != 0x1F) {
+      vex_printf( "dis_vx_store(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign( EA, ea_rAor0_idxd( rA_addr, rB_addr ) );
+   assign( vS, getVSReg( XS ) );
+
+   switch (opc2) {
+   case 0x08C:
+   {
+     /* Need the next to the most significant 32-bit word from
+      * the 128-bit vector.
+      */
+      IRExpr * high64, * low32;
+      DIP("stxsiwx %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+      high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
+      low32  = unop( Iop_64to32, high64 );
+      store( mkexpr( EA ), low32 );
+      break;
+   }
+   case 0x28C:
+   {
+      IRTemp high64 = newTemp(Ity_F64);
+      IRTemp val32  = newTemp(Ity_I32);
+      DIP("stxsspx %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+      assign(high64, unop( Iop_ReinterpI64asF64,
+                           unop( Iop_V128HIto64, mkexpr( vS ) ) ) );
+      assign(val32, unop( Iop_ReinterpF32asI32,
+                          unop( Iop_TruncF64asF32,
+                                mkexpr(high64) ) ) );
+      store( mkexpr( EA ), mkexpr( val32 ) );
+      break;
+   }
+   case 0x2CC:
+   {
+      IRExpr * high64;
+      DIP("stxsdx %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+      high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
+      store( mkexpr( EA ), high64 );
+      break;
+   }
+   case 0x3CC:
+   {
+      IRExpr * high64, *low64;
+      DIP("stxvd2x %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+      high64 = unop( Iop_V128HIto64, mkexpr( vS ) );
+      low64 = unop( Iop_V128to64, mkexpr( vS ) );
+      store( mkexpr( EA ), high64 );
+      store( binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
+                    ty == Ity_I64 ? mkU64( 8 ) : mkU32( 8 ) ), low64 );
+      break;
+   }
+   case 0x38C:
+   {
+      UInt ea_off = 0;
+      IRExpr* irx_addr;
+      IRTemp hi64 = newTemp( Ity_I64 );
+      IRTemp lo64 = newTemp( Ity_I64 );
+
+      DIP("stxvw4x %d,r%u,r%u\n", (UInt)XS, rA_addr, rB_addr);
+
+      // This instruction supports word-aligned stores, so EA may not be
+      // quad-word aligned.  Therefore, do 4 individual word-size stores.
+      assign( hi64, unop( Iop_V128HIto64, mkexpr( vS ) ) );
+      assign( lo64, unop( Iop_V128to64, mkexpr( vS ) ) );
+      store( mkexpr( EA ), unop( Iop_64HIto32, mkexpr( hi64 ) ) );
+      ea_off += 4;
+      irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
+                        ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
+      store( irx_addr, unop( Iop_64to32, mkexpr( hi64 ) ) );
+      ea_off += 4;
+      irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
+                        ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
+      store( irx_addr, unop( Iop_64HIto32, mkexpr( lo64 ) ) );
+      ea_off += 4;
+      irx_addr = binop( mkSzOp( ty, Iop_Add8 ), mkexpr( EA ),
+                        ty == Ity_I64 ? mkU64( ea_off ) : mkU32( ea_off ) );
+      store( irx_addr, unop( Iop_64to32, mkexpr( lo64 ) ) );
+
+      break;
+   }
+   default:
+      vex_printf( "dis_vx_store(ppc)(opc2)\n" );
+      return False;
+   }
+   return True;
+}
+
+/*
+ * VSX permute and other miscealleous instructions
+ */
+static Bool
+dis_vx_permute_misc( UInt theInstr, UInt opc2 )
+{
+   /* XX3-Form */
+   UChar opc1 = ifieldOPC( theInstr );
+   UChar XT = ifieldRegXT ( theInstr );
+   UChar XA = ifieldRegXA ( theInstr );
+   UChar XB = ifieldRegXB ( theInstr );
+   IRTemp vT = newTemp( Ity_V128 );
+   IRTemp vA = newTemp( Ity_V128 );
+   IRTemp vB = newTemp( Ity_V128 );
+
+   if (opc1 != 0x3C) {
+      vex_printf( "dis_vx_permute_misc(ppc)(instr)\n" );
+      return False;
+   }
+
+   assign( vA, getVSReg( XA ) );
+   assign( vB, getVSReg( XB ) );
+
+   switch (opc2) {
+      case 0x8: // xxsldwi (VSX Shift Left Double by Word Immediate)
+      {
+         UChar SHW = ifieldSHW ( theInstr );
+         IRTemp result = newTemp(Ity_V128);
+         if ( SHW != 0 ) {
+             IRTemp hi = newTemp(Ity_V128);
+             IRTemp lo = newTemp(Ity_V128);
+             assign( hi, binop(Iop_ShlV128, mkexpr(vA), mkU8(SHW*32)) );
+             assign( lo, binop(Iop_ShrV128, mkexpr(vB), mkU8(128-SHW*32)) );
+             assign ( result, binop(Iop_OrV128, mkexpr(hi), mkexpr(lo)) );
+         } else
+             assign ( result, mkexpr(vA) );
+         DIP("xxsldwi v%d,v%d,v%d,%d\n", (UInt)XT, (UInt)XA, (UInt)XB, (UInt)SHW);
+         putVSReg( XT, mkexpr(result) );
+         break;
+      }
+      case 0x28: // xpermdi (VSX Permute Doubleword Immediate)
+      {
+         UChar DM = ifieldDM ( theInstr );
+         IRTemp hi = newTemp(Ity_I64);
+         IRTemp lo = newTemp(Ity_I64);
+
+         if (DM & 0x2)
+           assign( hi, unop(Iop_V128to64, mkexpr(vA)) );
+         else
+           assign( hi, unop(Iop_V128HIto64, mkexpr(vA)) );
+
+         if (DM & 0x1)
+           assign( lo, unop(Iop_V128to64, mkexpr(vB)) );
+         else
+           assign( lo, unop(Iop_V128HIto64, mkexpr(vB)) );
+
+         assign( vT, binop(Iop_64HLtoV128, mkexpr(hi), mkexpr(lo)) );
+
+         DIP("xxpermdi v%d,v%d,v%d,0x%x\n", (UInt)XT, (UInt)XA, (UInt)XB, (UInt)DM);
+         putVSReg( XT, mkexpr( vT ) );
+         break;
+      }
+      case 0x48: // xxmrghw (VSX Merge High Word)
+      case 0xc8: // xxmrglw (VSX Merge Low Word)
+      {
+         const HChar type = (opc2 == 0x48) ? 'h' : 'l';
+         IROp word_op = (opc2 == 0x48) ? Iop_V128HIto64 : Iop_V128to64;
+         IRTemp a64 = newTemp(Ity_I64);
+         IRTemp ahi32 = newTemp(Ity_I32);
+         IRTemp alo32 = newTemp(Ity_I32);
+         IRTemp b64 = newTemp(Ity_I64);
+         IRTemp bhi32 = newTemp(Ity_I32);
+         IRTemp blo32 = newTemp(Ity_I32);
+
+         assign( a64, unop(word_op, mkexpr(vA)) );
+         assign( ahi32, unop(Iop_64HIto32, mkexpr(a64)) );
+         assign( alo32, unop(Iop_64to32, mkexpr(a64)) );
+
+         assign( b64, unop(word_op, mkexpr(vB)) );
+         assign( bhi32, unop(Iop_64HIto32, mkexpr(b64)) );
+         assign( blo32, unop(Iop_64to32, mkexpr(b64)) );
+
+         assign( vT, binop(Iop_64HLtoV128,
+                           binop(Iop_32HLto64, mkexpr(ahi32), mkexpr(bhi32)),
+                           binop(Iop_32HLto64, mkexpr(alo32), mkexpr(blo32))) );
+
+         DIP("xxmrg%cw v%d,v%d,v%d\n", type, (UInt)XT, (UInt)XA, (UInt)XB);
+         putVSReg( XT, mkexpr( vT ) );
+         break;
+      }
+      case 0x018: // xxsel (VSX Select)
+      {
+         UChar XC = ifieldRegXC(theInstr);
+         IRTemp vC = newTemp( Ity_V128 );
+         assign( vC, getVSReg( XC ) );
+         DIP("xxsel v%d,v%d,v%d,v%d\n", (UInt)XT, (UInt)XA, (UInt)XB, (UInt)XC);
+         /* vD = (vA & ~vC) | (vB & vC) */
+         putVSReg( XT, binop(Iop_OrV128,
+            binop(Iop_AndV128, mkexpr(vA), unop(Iop_NotV128, mkexpr(vC))),
+            binop(Iop_AndV128, mkexpr(vB), mkexpr(vC))) );
+         break;
+      }
+      case 0x148: // xxspltw (VSX Splat Word)
+      {
+         UChar UIM   = ifieldRegA(theInstr) & 3;
+         UChar sh_uim = (3 - (UIM)) * 32;
+         DIP("xxspltw v%d,v%d,%d\n", (UInt)XT, (UInt)XB, UIM);
+         putVSReg( XT,
+                   unop( Iop_Dup32x4,
+                         unop( Iop_V128to32,
+                               binop( Iop_ShrV128, mkexpr( vB ), mkU8( sh_uim ) ) ) ) );
+         break;
+      }
+
+      default:
+         vex_printf( "dis_vx_permute_misc(ppc)(opc2)\n" );
+         return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Load Instructions
+*/
+static Bool dis_av_load ( const VexAbiInfo* vbi, UInt theInstr )
+{
+   /* X-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar rA_addr  = ifieldRegA(theInstr);
+   UChar rB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+
+   IRType ty         = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA         = newTemp(ty);
+   IRTemp EA_align16 = newTemp(ty);
+
+   if (opc1 != 0x1F || b0 != 0) {
+      vex_printf("dis_av_load(ppc)(instr)\n");
+      return False;
+   }
+
+   assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+   assign( EA_align16, addr_align( mkexpr(EA), 16 ) );
+
+   switch (opc2) {
+
+   case 0x006: { // lvsl (Load Vector for Shift Left, AV p123)
+      IRDirty* d;
+      UInt vD_off = vectorGuestRegOffset(vD_addr);
+      IRExpr** args_be = mkIRExprVec_5(
+                         IRExpr_BBPTR(),
+                         mkU32(vD_off),
+                         binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
+                                          mkU32(0xF)),
+                         mkU32(0)/*left*/,
+                         mkU32(1)/*Big Endian*/);
+      IRExpr** args_le = mkIRExprVec_5(
+                         IRExpr_BBPTR(),
+                         mkU32(vD_off),
+                         binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
+                                          mkU32(0xF)),
+                         mkU32(0)/*left*/,
+                         mkU32(0)/*Little Endian*/);
+      if (!mode64) {
+         d = unsafeIRDirty_0_N (
+                        0/*regparms*/, 
+                        "ppc32g_dirtyhelper_LVS",
+                        fnptr_to_fnentry(vbi, &ppc32g_dirtyhelper_LVS),
+                        args_be );
+      } else {
+         if (host_endness == VexEndnessBE)
+            d = unsafeIRDirty_0_N (
+                           0/*regparms*/,
+                           "ppc64g_dirtyhelper_LVS",
+                           fnptr_to_fnentry(vbi, &ppc64g_dirtyhelper_LVS),
+                           args_be );
+         else
+            d = unsafeIRDirty_0_N (
+                           0/*regparms*/,
+                           "ppc64g_dirtyhelper_LVS",
+                           &ppc64g_dirtyhelper_LVS,
+                           args_le );
+      }
+      DIP("lvsl v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
+      /* declare guest state effects */
+      d->nFxState = 1;
+      vex_bzero(&d->fxState, sizeof(d->fxState));
+      d->fxState[0].fx     = Ifx_Write;
+      d->fxState[0].offset = vD_off;
+      d->fxState[0].size   = sizeof(U128);
+
+      /* execute the dirty call, side-effecting guest state */
+      stmt( IRStmt_Dirty(d) );
+      break;
+   }
+   case 0x026: { // lvsr (Load Vector for Shift Right, AV p125)
+      IRDirty* d;
+      UInt vD_off = vectorGuestRegOffset(vD_addr);
+      IRExpr** args_be = mkIRExprVec_5(
+                             IRExpr_BBPTR(),
+                             mkU32(vD_off),
+                             binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
+                                              mkU32(0xF)),
+                             mkU32(1)/*right*/,
+                             mkU32(1)/*Big Endian*/);
+      IRExpr** args_le = mkIRExprVec_5(
+                             IRExpr_BBPTR(),
+                             mkU32(vD_off),
+                             binop(Iop_And32, mkNarrowTo32(ty, mkexpr(EA)),
+                                              mkU32(0xF)),
+                             mkU32(1)/*right*/,
+                             mkU32(0)/*Little Endian*/);
+
+      if (!mode64) {
+         d = unsafeIRDirty_0_N (
+                        0/*regparms*/,
+                        "ppc32g_dirtyhelper_LVS",
+                        fnptr_to_fnentry(vbi, &ppc32g_dirtyhelper_LVS),
+                        args_be );
+      } else {
+         if (host_endness == VexEndnessBE)
+            d = unsafeIRDirty_0_N (
+                           0/*regparms*/,
+                           "ppc64g_dirtyhelper_LVS",
+                           fnptr_to_fnentry(vbi, &ppc64g_dirtyhelper_LVS),
+                           args_be );
+         else
+            d = unsafeIRDirty_0_N (
+                           0/*regparms*/,
+                           "ppc64g_dirtyhelper_LVS",
+                           &ppc64g_dirtyhelper_LVS,
+                           args_le );
+      }
+      DIP("lvsr v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
+      /* declare guest state effects */
+      d->nFxState = 1;
+      vex_bzero(&d->fxState, sizeof(d->fxState));
+      d->fxState[0].fx     = Ifx_Write;
+      d->fxState[0].offset = vD_off;
+      d->fxState[0].size   = sizeof(U128);
+
+      /* execute the dirty call, side-effecting guest state */
+      stmt( IRStmt_Dirty(d) );
+      break;
+   }
+   case 0x007: // lvebx (Load Vector Element Byte Indexed, AV p119)
+      DIP("lvebx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
+      /* loads addressed byte into vector[EA[0:3]
+         since all other destination bytes are undefined,
+         can simply load entire vector from 16-aligned EA */
+      putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
+      break;
+
+   case 0x027: // lvehx (Load Vector Element Half Word Indexed, AV p121)
+      DIP("lvehx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
+      /* see note for lvebx */
+      putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
+      break;
+
+   case 0x047: // lvewx (Load Vector Element Word Indexed, AV p122)
+      DIP("lvewx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
+      /* see note for lvebx */
+      putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
+      break;
+
+   case 0x067: // lvx (Load Vector Indexed, AV p127)
+      DIP("lvx v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
+      putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
+      break;
+
+   case 0x167: // lvxl (Load Vector Indexed LRU, AV p128)
+      DIP("lvxl v%d,r%u,r%u\n", vD_addr, rA_addr, rB_addr);
+      putVReg( vD_addr, load(Ity_V128, mkexpr(EA_align16)) );
+      break;
+
+   default:
+      vex_printf("dis_av_load(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Store Instructions
+*/
+static Bool dis_av_store ( UInt theInstr )
+{
+   /* X-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vS_addr  = ifieldRegDS(theInstr);
+   UChar rA_addr  = ifieldRegA(theInstr);
+   UChar rB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = ifieldOPClo10(theInstr);
+   UChar b0       = ifieldBIT0(theInstr);
+
+   IRType ty           = mode64 ? Ity_I64 : Ity_I32;
+   IRTemp EA           = newTemp(ty);
+   IRTemp addr_aligned = newTemp(ty);
+   IRTemp vS           = newTemp(Ity_V128);
+   IRTemp eb           = newTemp(Ity_I8);
+   IRTemp idx          = newTemp(Ity_I8);
+
+   if (opc1 != 0x1F || b0 != 0) {
+      vex_printf("dis_av_store(ppc)(instr)\n");
+      return False;
+   }
+
+   assign( vS, getVReg(vS_addr));
+   assign( EA, ea_rAor0_idxd(rA_addr, rB_addr) );
+
+   switch (opc2) {
+   case 0x087: { // stvebx (Store Vector Byte Indexed, AV p131)
+      DIP("stvebx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
+      assign( eb, binop(Iop_And8, mkU8(0xF),
+                        unop(Iop_32to8,
+                             mkNarrowTo32(ty, mkexpr(EA)) )) );
+     if (host_endness == VexEndnessLE) {
+         assign( idx, binop(Iop_Shl8, mkexpr(eb), mkU8(3)) );
+      } else {
+         assign( idx, binop(Iop_Shl8,
+                            binop(Iop_Sub8, mkU8(15), mkexpr(eb)),
+                            mkU8(3)) );
+      }
+      store( mkexpr(EA),
+             unop( Iop_32to8, unop(Iop_V128to32,
+                   binop(Iop_ShrV128, mkexpr(vS), mkexpr(idx)))) );
+      break;
+   }
+   case 0x0A7: { // stvehx (Store Vector Half Word Indexed, AV p132)
+      DIP("stvehx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
+      assign( addr_aligned, addr_align(mkexpr(EA), 2) );
+      assign( eb, binop(Iop_And8, mkU8(0xF),
+                        mkNarrowTo8(ty, mkexpr(addr_aligned) )) );
+      if (host_endness == VexEndnessLE) {
+          assign( idx, binop(Iop_Shl8, mkexpr(eb), mkU8(3)) );
+      } else {
+         assign( idx, binop(Iop_Shl8,
+                            binop(Iop_Sub8, mkU8(14), mkexpr(eb)),
+                            mkU8(3)) );
+      }
+      store( mkexpr(addr_aligned),
+             unop( Iop_32to16, unop(Iop_V128to32,
+                   binop(Iop_ShrV128, mkexpr(vS), mkexpr(idx)))) );
+      break;
+   }
+   case 0x0C7: { // stvewx (Store Vector Word Indexed, AV p133)
+      DIP("stvewx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
+      assign( addr_aligned, addr_align(mkexpr(EA), 4) );
+      assign( eb, binop(Iop_And8, mkU8(0xF),
+                        mkNarrowTo8(ty, mkexpr(addr_aligned) )) );
+      if (host_endness == VexEndnessLE) {
+         assign( idx, binop(Iop_Shl8, mkexpr(eb), mkU8(3)) );
+      } else {
+         assign( idx, binop(Iop_Shl8,
+                            binop(Iop_Sub8, mkU8(12), mkexpr(eb)),
+                            mkU8(3)) );
+      }
+      store( mkexpr( addr_aligned),
+             unop( Iop_V128to32,
+                   binop(Iop_ShrV128, mkexpr(vS), mkexpr(idx))) );
+      break;
+   }
+
+   case 0x0E7: // stvx (Store Vector Indexed, AV p134)
+      DIP("stvx v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
+      store( addr_align( mkexpr(EA), 16 ), mkexpr(vS) );
+      break;
+
+   case 0x1E7: // stvxl (Store Vector Indexed LRU, AV p135)
+      DIP("stvxl v%d,r%u,r%u\n", vS_addr, rA_addr, rB_addr);
+      store( addr_align( mkexpr(EA), 16 ), mkexpr(vS) );
+      break;
+
+   default:
+      vex_printf("dis_av_store(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Arithmetic Instructions
+*/
+static Bool dis_av_arith ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = IFIELD( theInstr, 0, 11 );
+
+   IRTemp vA = newTemp(Ity_V128);
+   IRTemp vB = newTemp(Ity_V128);
+   IRTemp z3 = newTemp(Ity_I64);
+   IRTemp z2 = newTemp(Ity_I64);
+   IRTemp z1 = newTemp(Ity_I64);
+   IRTemp z0 = newTemp(Ity_I64);
+   IRTemp aEvn, aOdd;
+   IRTemp a15, a14, a13, a12, a11, a10, a9, a8;
+   IRTemp a7, a6, a5, a4, a3, a2, a1, a0;
+   IRTemp b3, b2, b1, b0;
+
+   aEvn = aOdd = IRTemp_INVALID;
+   a15 = a14 = a13 = a12 = a11 = a10 = a9 = a8 = IRTemp_INVALID;
+   a7 = a6 = a5 = a4 = a3 = a2 = a1 = a0 = IRTemp_INVALID;
+   b3 = b2 = b1 = b0 = IRTemp_INVALID;
+
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_arith(ppc)(opc1 != 0x4)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   /* Add */
+   case 0x180: { // vaddcuw (Add Carryout Unsigned Word, AV p136)
+      DIP("vaddcuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      /* unsigned_ov(x+y) = (y >u not(x)) */
+      putVReg( vD_addr, binop(Iop_ShrN32x4,
+                              binop(Iop_CmpGT32Ux4, mkexpr(vB),
+                                    unop(Iop_NotV128, mkexpr(vA))),
+                              mkU8(31)) );
+      break;
+   }
+   case 0x000: // vaddubm (Add Unsigned Byte Modulo, AV p141)
+      DIP("vaddubm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Add8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x040: // vadduhm (Add Unsigned Half Word Modulo, AV p143)
+      DIP("vadduhm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Add16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x080: // vadduwm (Add Unsigned Word Modulo, AV p145)
+      DIP("vadduwm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Add32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x0C0: // vaddudm (Add Unsigned Double Word Modulo)
+      DIP("vaddudm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Add64x2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x200: // vaddubs (Add Unsigned Byte Saturate, AV p142)
+      DIP("vaddubs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QAdd8Ux16, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT], perhaps via new primop: Iop_SatOfQAdd8Ux16
+      break;
+
+   case 0x240: // vadduhs (Add Unsigned Half Word Saturate, AV p144)
+      DIP("vadduhs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QAdd16Ux8, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x280: // vadduws (Add Unsigned Word Saturate, AV p146)
+      DIP("vadduws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QAdd32Ux4, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x300: // vaddsbs (Add Signed Byte Saturate, AV p138)
+      DIP("vaddsbs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QAdd8Sx16, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x340: // vaddshs (Add Signed Half Word Saturate, AV p139)
+      DIP("vaddshs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QAdd16Sx8, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x380: // vaddsws (Add Signed Word Saturate, AV p140)
+      DIP("vaddsws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QAdd32Sx4, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+
+   /* Subtract */
+   case 0x580: { // vsubcuw (Subtract Carryout Unsigned Word, AV p260)
+      DIP("vsubcuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      /* unsigned_ov(x-y) = (y >u x) */
+      putVReg( vD_addr, binop(Iop_ShrN32x4,
+                              unop(Iop_NotV128,
+                                   binop(Iop_CmpGT32Ux4, mkexpr(vB),
+                                         mkexpr(vA))),
+                              mkU8(31)) );
+      break;
+   }     
+   case 0x400: // vsububm (Subtract Unsigned Byte Modulo, AV p265)
+      DIP("vsububm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sub8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x440: // vsubuhm (Subtract Unsigned Half Word Modulo, AV p267)
+      DIP("vsubuhm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sub16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x480: // vsubuwm (Subtract Unsigned Word Modulo, AV p269)
+      DIP("vsubuwm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sub32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x4C0: // vsubudm (Subtract Unsigned Double Word Modulo)
+      DIP("vsubudm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sub64x2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x600: // vsububs (Subtract Unsigned Byte Saturate, AV p266)
+      DIP("vsububs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QSub8Ux16, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x640: // vsubuhs (Subtract Unsigned HWord Saturate, AV p268)
+      DIP("vsubuhs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QSub16Ux8, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x680: // vsubuws (Subtract Unsigned Word Saturate, AV p270)
+      DIP("vsubuws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QSub32Ux4, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x700: // vsubsbs (Subtract Signed Byte Saturate, AV p262)
+      DIP("vsubsbs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QSub8Sx16, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x740: // vsubshs (Subtract Signed Half Word Saturate, AV p263)
+      DIP("vsubshs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QSub16Sx8, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+   case 0x780: // vsubsws (Subtract Signed Word Saturate, AV p264)
+      DIP("vsubsws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_QSub32Sx4, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      break;
+
+
+   /* Maximum */
+   case 0x002: // vmaxub (Maximum Unsigned Byte, AV p182)
+      DIP("vmaxub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max8Ux16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x042: // vmaxuh (Maximum Unsigned Half Word, AV p183)
+      DIP("vmaxuh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max16Ux8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x082: // vmaxuw (Maximum Unsigned Word, AV p184)
+      DIP("vmaxuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max32Ux4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x0C2: // vmaxud (Maximum Unsigned Double word)
+      DIP("vmaxud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max64Ux2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x102: // vmaxsb (Maximum Signed Byte, AV p179)
+      DIP("vmaxsb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max8Sx16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x142: // vmaxsh (Maximum Signed Half Word, AV p180)
+      DIP("vmaxsh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max16Sx8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x182: // vmaxsw (Maximum Signed Word, AV p181)
+      DIP("vmaxsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max32Sx4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x1C2: // vmaxsd (Maximum Signed Double word)
+      DIP("vmaxsd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max64Sx2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   /* Minimum */
+   case 0x202: // vminub (Minimum Unsigned Byte, AV p191)
+      DIP("vminub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min8Ux16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x242: // vminuh (Minimum Unsigned Half Word, AV p192)
+      DIP("vminuh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min16Ux8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x282: // vminuw (Minimum Unsigned Word, AV p193)
+      DIP("vminuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min32Ux4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x2C2: // vminud (Minimum Unsigned Double Word)
+      DIP("vminud v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min64Ux2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x302: // vminsb (Minimum Signed Byte, AV p188)
+      DIP("vminsb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min8Sx16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x342: // vminsh (Minimum Signed Half Word, AV p189)
+      DIP("vminsh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min16Sx8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x382: // vminsw (Minimum Signed Word, AV p190)
+      DIP("vminsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min32Sx4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x3C2: // vminsd (Minimum Signed Double Word)
+      DIP("vminsd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min64Sx2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+
+   /* Average */
+   case 0x402: // vavgub (Average Unsigned Byte, AV p152)
+      DIP("vavgub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Avg8Ux16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x442: // vavguh (Average Unsigned Half Word, AV p153)
+      DIP("vavguh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Avg16Ux8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x482: // vavguw (Average Unsigned Word, AV p154)
+      DIP("vavguw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Avg32Ux4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x502: // vavgsb (Average Signed Byte, AV p149)
+      DIP("vavgsb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Avg8Sx16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x542: // vavgsh (Average Signed Half Word, AV p150)
+      DIP("vavgsh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Avg16Sx8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x582: // vavgsw (Average Signed Word, AV p151)
+      DIP("vavgsw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Avg32Sx4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+
+   /* Multiply */
+   case 0x008: // vmuloub (Multiply Odd Unsigned Byte, AV p213)
+      DIP("vmuloub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB)));
+      break;
+
+   case 0x048: // vmulouh (Multiply Odd Unsigned Half Word, AV p214)
+      DIP("vmulouh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)));
+      break;
+
+   case 0x088: // vmulouw (Multiply Odd Unsigned Word)
+      DIP("vmulouw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop( Iop_MullEven32Ux4, mkexpr(vA), mkexpr(vB) ) );
+      break;
+
+   case 0x089: // vmuluwm (Multiply Unsigned Word Modulo)
+      DIP("vmuluwm v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop( Iop_Mul32x4, mkexpr(vA), mkexpr(vB) ) );
+      break;
+
+   case 0x108: // vmulosb (Multiply Odd Signed Byte, AV p211)
+      DIP("vmulosb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_MullEven8Sx16, mkexpr(vA), mkexpr(vB)));
+      break;
+
+   case 0x148: // vmulosh (Multiply Odd Signed Half Word, AV p212)
+      DIP("vmulosh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)));
+      break;
+
+   case 0x188: // vmulosw (Multiply Odd Signed Word)
+      DIP("vmulosw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop( Iop_MullEven32Sx4, mkexpr(vA), mkexpr(vB) ) );
+      break;
+
+   case 0x208: // vmuleub (Multiply Even Unsigned Byte, AV p209)
+      DIP("vmuleub v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, MK_Iop_MullOdd8Ux16( mkexpr(vA), mkexpr(vB) ));
+      break;
+
+   case 0x248: // vmuleuh (Multiply Even Unsigned Half Word, AV p210)
+      DIP("vmuleuh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, MK_Iop_MullOdd16Ux8( mkexpr(vA), mkexpr(vB) ));
+      break;
+
+   case 0x288: // vmuleuw (Multiply Even Unsigned Word)
+      DIP("vmuleuw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, MK_Iop_MullOdd32Ux4( mkexpr(vA), mkexpr(vB) ) );
+      break;
+
+   case 0x308: // vmulesb (Multiply Even Signed Byte, AV p207)
+      DIP("vmulesb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, MK_Iop_MullOdd8Sx16( mkexpr(vA), mkexpr(vB) ));
+      break;
+
+   case 0x348: // vmulesh (Multiply Even Signed Half Word, AV p208)
+      DIP("vmulesh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
+      break;
+
+   case 0x388: // vmulesw (Multiply Even Signed Word)
+      DIP("vmulesw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, MK_Iop_MullOdd32Sx4( mkexpr(vA), mkexpr(vB) ) );
+      break;
+
+   /* Sum Across Partial */
+   case 0x608: { // vsum4ubs (Sum Partial (1/4) UB Saturate, AV p275)
+      IRTemp aEE, aEO, aOE, aOO;
+      aEE = aEO = aOE = aOO = IRTemp_INVALID;
+      DIP("vsum4ubs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+
+      /* vA: V128_8Ux16 -> 4 x V128_32Ux4, sign-extended */
+      expand8Ux16( mkexpr(vA), &aEvn, &aOdd ); // (15,13...),(14,12...)
+      expand16Ux8( mkexpr(aEvn), &aEE, &aEO ); // (15,11...),(13, 9...)
+      expand16Ux8( mkexpr(aOdd), &aOE, &aOO ); // (14,10...),(12, 8...)
+
+      /* break V128 to 4xI32's, zero-extending to I64's */
+      breakV128to4x64U( mkexpr(aEE), &a15, &a11, &a7, &a3 );
+      breakV128to4x64U( mkexpr(aOE), &a14, &a10, &a6, &a2 );
+      breakV128to4x64U( mkexpr(aEO), &a13, &a9,  &a5, &a1 );
+      breakV128to4x64U( mkexpr(aOO), &a12, &a8,  &a4, &a0 );
+      breakV128to4x64U( mkexpr(vB),  &b3,  &b2,  &b1, &b0 );
+
+      /* add lanes */
+      assign( z3, binop(Iop_Add64, mkexpr(b3),
+                     binop(Iop_Add64,
+                        binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
+                        binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
+      assign( z2, binop(Iop_Add64, mkexpr(b2),
+                     binop(Iop_Add64,
+                         binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
+                         binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
+      assign( z1, binop(Iop_Add64, mkexpr(b1),
+                     binop(Iop_Add64,
+                         binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
+                         binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
+      assign( z0, binop(Iop_Add64, mkexpr(b0),
+                     binop(Iop_Add64,
+                         binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
+                         binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
+      
+      /* saturate-narrow to 32bit, and combine to V128 */
+      putVReg( vD_addr, mkV128from4x64U( mkexpr(z3), mkexpr(z2),
+                                         mkexpr(z1), mkexpr(z0)) );
+      break;
+   }
+   case 0x708: { // vsum4sbs (Sum Partial (1/4) SB Saturate, AV p273)
+      IRTemp aEE, aEO, aOE, aOO;
+      aEE = aEO = aOE = aOO = IRTemp_INVALID;
+      DIP("vsum4sbs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+
+      /* vA: V128_8Sx16 -> 4 x V128_32Sx4, sign-extended */
+      expand8Sx16( mkexpr(vA), &aEvn, &aOdd ); // (15,13...),(14,12...)
+      expand16Sx8( mkexpr(aEvn), &aEE, &aEO ); // (15,11...),(13, 9...)
+      expand16Sx8( mkexpr(aOdd), &aOE, &aOO ); // (14,10...),(12, 8...)
+
+      /* break V128 to 4xI32's, sign-extending to I64's */
+      breakV128to4x64S( mkexpr(aEE), &a15, &a11, &a7, &a3 );
+      breakV128to4x64S( mkexpr(aOE), &a14, &a10, &a6, &a2 );
+      breakV128to4x64S( mkexpr(aEO), &a13, &a9,  &a5, &a1 );
+      breakV128to4x64S( mkexpr(aOO), &a12, &a8,  &a4, &a0 );
+      breakV128to4x64S( mkexpr(vB),  &b3,  &b2,  &b1, &b0 );
+
+      /* add lanes */
+      assign( z3, binop(Iop_Add64, mkexpr(b3),
+                     binop(Iop_Add64,
+                        binop(Iop_Add64, mkexpr(a15), mkexpr(a14)),
+                        binop(Iop_Add64, mkexpr(a13), mkexpr(a12)))) );
+      assign( z2, binop(Iop_Add64, mkexpr(b2),
+                     binop(Iop_Add64,
+                        binop(Iop_Add64, mkexpr(a11), mkexpr(a10)),
+                        binop(Iop_Add64, mkexpr(a9), mkexpr(a8)))) );
+      assign( z1, binop(Iop_Add64, mkexpr(b1),
+                     binop(Iop_Add64,
+                        binop(Iop_Add64, mkexpr(a7), mkexpr(a6)),
+                        binop(Iop_Add64, mkexpr(a5), mkexpr(a4)))) );
+      assign( z0, binop(Iop_Add64, mkexpr(b0),
+                     binop(Iop_Add64,
+                        binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
+                        binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
+      
+      /* saturate-narrow to 32bit, and combine to V128 */
+      putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2),
+                                         mkexpr(z1), mkexpr(z0)) );
+      break;
+   }
+   case 0x648: { // vsum4shs (Sum Partial (1/4) SHW Saturate, AV p274)
+      DIP("vsum4shs v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+
+      /* vA: V128_16Sx8 -> 2 x V128_32Sx4, sign-extended */
+      expand16Sx8( mkexpr(vA), &aEvn, &aOdd ); // (7,5...),(6,4...)
+
+      /* break V128 to 4xI32's, sign-extending to I64's */
+      breakV128to4x64S( mkexpr(aEvn), &a7, &a5, &a3, &a1 );
+      breakV128to4x64S( mkexpr(aOdd), &a6, &a4, &a2, &a0 );
+      breakV128to4x64S( mkexpr(vB),   &b3, &b2, &b1, &b0 );
+
+      /* add lanes */
+      assign( z3, binop(Iop_Add64, mkexpr(b3),
+                        binop(Iop_Add64, mkexpr(a7), mkexpr(a6))));
+      assign( z2, binop(Iop_Add64, mkexpr(b2),
+                        binop(Iop_Add64, mkexpr(a5), mkexpr(a4))));
+      assign( z1, binop(Iop_Add64, mkexpr(b1),
+                        binop(Iop_Add64, mkexpr(a3), mkexpr(a2))));
+      assign( z0, binop(Iop_Add64, mkexpr(b0),
+                        binop(Iop_Add64, mkexpr(a1), mkexpr(a0))));
+
+      /* saturate-narrow to 32bit, and combine to V128 */
+      putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2),
+                                         mkexpr(z1), mkexpr(z0)) );
+      break;
+   }
+   case 0x688: { // vsum2sws (Sum Partial (1/2) SW Saturate, AV p272)
+      DIP("vsum2sws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+
+      /* break V128 to 4xI32's, sign-extending to I64's */
+      breakV128to4x64S( mkexpr(vA), &a3, &a2, &a1, &a0 );
+      breakV128to4x64S( mkexpr(vB), &b3, &b2, &b1, &b0 );
+
+      /* add lanes */
+      assign( z2, binop(Iop_Add64, mkexpr(b2),
+                        binop(Iop_Add64, mkexpr(a3), mkexpr(a2))) );
+      assign( z0, binop(Iop_Add64, mkexpr(b0),
+                        binop(Iop_Add64, mkexpr(a1), mkexpr(a0))) );
+
+      /* saturate-narrow to 32bit, and combine to V128 */
+      putVReg( vD_addr, mkV128from4x64S( mkU64(0), mkexpr(z2),
+                                         mkU64(0), mkexpr(z0)) );
+      break;
+   }
+   case 0x788: { // vsumsws  (Sum SW Saturate, AV p271)
+      DIP("vsumsws v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+
+      /* break V128 to 4xI32's, sign-extending to I64's */
+      breakV128to4x64S( mkexpr(vA), &a3, &a2, &a1, &a0 );
+      breakV128to4x64S( mkexpr(vB), &b3, &b2, &b1, &b0 );
+
+      /* add lanes */
+      assign( z0, binop(Iop_Add64, mkexpr(b0),
+                     binop(Iop_Add64,
+                        binop(Iop_Add64, mkexpr(a3), mkexpr(a2)),
+                        binop(Iop_Add64, mkexpr(a1), mkexpr(a0)))) );
+
+      /* saturate-narrow to 32bit, and combine to V128 */
+      putVReg( vD_addr, mkV128from4x64S( mkU64(0), mkU64(0),
+                                         mkU64(0), mkexpr(z0)) );
+      break;
+   }
+   default:
+      vex_printf("dis_av_arith(ppc)(opc2=0x%x)\n", opc2);
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Logic Instructions
+*/
+static Bool dis_av_logic ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar vD_addr = ifieldRegDS(theInstr);
+   UChar vA_addr = ifieldRegA(theInstr);
+   UChar vB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = IFIELD( theInstr, 0, 11 );
+
+   IRTemp vA = newTemp(Ity_V128);
+   IRTemp vB = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_logic(ppc)(opc1 != 0x4)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x404: // vand (And, AV p147)
+      DIP("vand v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_AndV128, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x444: // vandc (And, AV p148)
+      DIP("vandc v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_AndV128, mkexpr(vA),
+                              unop(Iop_NotV128, mkexpr(vB))) );
+      break;
+
+   case 0x484: // vor (Or, AV p217)
+      DIP("vor v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_OrV128, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x4C4: // vxor (Xor, AV p282)
+      DIP("vxor v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_XorV128, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x504: // vnor (Nor, AV p216)
+      DIP("vnor v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+         unop(Iop_NotV128, binop(Iop_OrV128, mkexpr(vA), mkexpr(vB))) );
+      break;
+
+   case 0x544: // vorc (vA Or'd with complement of vb)
+      DIP("vorc v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop( Iop_OrV128,
+                               mkexpr( vA ),
+                               unop( Iop_NotV128, mkexpr( vB ) ) ) );
+      break;
+
+   case 0x584: // vnand (Nand)
+      DIP("vnand v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, unop( Iop_NotV128,
+                              binop(Iop_AndV128, mkexpr( vA ),
+                              mkexpr( vB ) ) ) );
+      break;
+
+   case 0x684: // veqv (complemented XOr)
+      DIP("veqv v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, unop( Iop_NotV128,
+                              binop( Iop_XorV128, mkexpr( vA ),
+                              mkexpr( vB ) ) ) );
+      break;
+
+   default:
+      vex_printf("dis_av_logic(ppc)(opc2=0x%x)\n", opc2);
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Compare Instructions
+*/
+static Bool dis_av_cmp ( UInt theInstr )
+{
+   /* VXR-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UChar flag_rC  = ifieldBIT10(theInstr);
+   UInt  opc2     = IFIELD( theInstr, 0, 10 );
+
+   IRTemp vA = newTemp(Ity_V128);
+   IRTemp vB = newTemp(Ity_V128);
+   IRTemp vD = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_cmp(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x006: // vcmpequb (Compare Equal-to Unsigned B, AV p160)
+      DIP("vcmpequb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpEQ8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x046: // vcmpequh (Compare Equal-to Unsigned HW, AV p161)
+      DIP("vcmpequh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpEQ16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x086: // vcmpequw (Compare Equal-to Unsigned W, AV p162)
+      DIP("vcmpequw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpEQ32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x0C7: // vcmpequd (Compare Equal-to Unsigned Doubleword)
+      DIP("vcmpequd%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpEQ64x2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x206: // vcmpgtub (Compare Greater-than Unsigned B, AV p168)
+      DIP("vcmpgtub%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT8Ux16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x246: // vcmpgtuh (Compare Greater-than Unsigned HW, AV p169)
+      DIP("vcmpgtuh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT16Ux8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x286: // vcmpgtuw (Compare Greater-than Unsigned W, AV p170)
+      DIP("vcmpgtuw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                       vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT32Ux4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x2C7: // vcmpgtud (Compare Greater-than Unsigned double)
+      DIP("vcmpgtud%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT64Ux2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x306: // vcmpgtsb (Compare Greater-than Signed B, AV p165)
+      DIP("vcmpgtsb%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                       vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT8Sx16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x346: // vcmpgtsh (Compare Greater-than Signed HW, AV p166)
+      DIP("vcmpgtsh%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT16Sx8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x386: // vcmpgtsw (Compare Greater-than Signed W, AV p167)
+      DIP("vcmpgtsw%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT32Sx4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x3C7: // vcmpgtsd (Compare Greater-than Signed double)
+      DIP("vcmpgtsd%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT64Sx2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   default:
+      vex_printf("dis_av_cmp(ppc)(opc2)\n");
+      return False;
+   }
+
+   putVReg( vD_addr, mkexpr(vD) );
+
+   if (flag_rC) {
+      set_AV_CR6( mkexpr(vD), True );
+   }
+   return True;
+}
+
+/*
+  AltiVec Multiply-Sum Instructions
+*/
+static Bool dis_av_multarith ( UInt theInstr )
+{
+   /* VA-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UChar vC_addr  = ifieldRegC(theInstr);
+   UChar opc2     = toUChar( IFIELD( theInstr, 0, 6 ) );
+
+   IRTemp vA    = newTemp(Ity_V128);
+   IRTemp vB    = newTemp(Ity_V128);
+   IRTemp vC    = newTemp(Ity_V128);
+   IRTemp zeros = newTemp(Ity_V128);
+   IRTemp aLo   = newTemp(Ity_V128);
+   IRTemp bLo   = newTemp(Ity_V128);
+   IRTemp cLo   = newTemp(Ity_V128);
+   IRTemp zLo   = newTemp(Ity_V128);
+   IRTemp aHi   = newTemp(Ity_V128);
+   IRTemp bHi   = newTemp(Ity_V128);
+   IRTemp cHi   = newTemp(Ity_V128);
+   IRTemp zHi   = newTemp(Ity_V128);
+   IRTemp abEvn = newTemp(Ity_V128);
+   IRTemp abOdd = newTemp(Ity_V128);
+   IRTemp z3    = newTemp(Ity_I64);
+   IRTemp z2    = newTemp(Ity_I64);
+   IRTemp z1    = newTemp(Ity_I64);
+   IRTemp z0    = newTemp(Ity_I64);
+   IRTemp ab7, ab6, ab5, ab4, ab3, ab2, ab1, ab0;
+   IRTemp c3, c2, c1, c0;
+
+   ab7 = ab6 = ab5 = ab4 = ab3 = ab2 = ab1 = ab0 = IRTemp_INVALID;
+   c3 = c2 = c1 = c0 = IRTemp_INVALID;
+
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+   assign( vC, getVReg(vC_addr));
+   assign( zeros, unop(Iop_Dup32x4, mkU32(0)) );
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_multarith(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   /* Multiply-Add */
+   case 0x20: { // vmhaddshs (Mult Hi, Add Signed HW Saturate, AV p185)
+      IRTemp cSigns = newTemp(Ity_V128);
+      DIP("vmhaddshs v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)));
+      assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
+      assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
+      assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC)));
+      assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
+      assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
+      assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC)));
+
+      assign( zLo, binop(Iop_Add32x4, mkexpr(cLo),
+                         binop(Iop_SarN32x4,
+                               binop(Iop_MullEven16Sx8,
+                                     mkexpr(aLo), mkexpr(bLo)),
+                               mkU8(15))) );
+
+      assign( zHi, binop(Iop_Add32x4, mkexpr(cHi),
+                         binop(Iop_SarN32x4,
+                               binop(Iop_MullEven16Sx8,
+                                     mkexpr(aHi), mkexpr(bHi)),
+                               mkU8(15))) );
+
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin32Sto16Sx8, mkexpr(zHi), mkexpr(zLo)) );
+      break;
+   }
+   case 0x21: { // vmhraddshs (Mult High Round, Add Signed HW Saturate, AV p186)
+      IRTemp zKonst = newTemp(Ity_V128);
+      IRTemp cSigns = newTemp(Ity_V128);
+      DIP("vmhraddshs v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      assign(cSigns, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vC)) );
+      assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
+      assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
+      assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(cSigns),mkexpr(vC)));
+      assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
+      assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
+      assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(cSigns),mkexpr(vC)));
+
+      /* shifting our const avoids store/load version of Dup */
+      assign( zKonst, binop(Iop_ShlN32x4, unop(Iop_Dup32x4, mkU32(0x1)),
+                            mkU8(14)) );
+
+      assign( zLo, binop(Iop_Add32x4, mkexpr(cLo),
+                         binop(Iop_SarN32x4,
+                               binop(Iop_Add32x4, mkexpr(zKonst),
+                                     binop(Iop_MullEven16Sx8,
+                                           mkexpr(aLo), mkexpr(bLo))),
+                               mkU8(15))) );
+
+      assign( zHi, binop(Iop_Add32x4, mkexpr(cHi),
+                         binop(Iop_SarN32x4,
+                               binop(Iop_Add32x4, mkexpr(zKonst),
+                                     binop(Iop_MullEven16Sx8,
+                                           mkexpr(aHi), mkexpr(bHi))),
+                               mkU8(15))) );
+
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin32Sto16Sx8, mkexpr(zHi), mkexpr(zLo)) );
+      break;
+   }
+   case 0x22: { // vmladduhm (Mult Low, Add Unsigned HW Modulo, AV p194)
+      DIP("vmladduhm v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      assign(aLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vA)));
+      assign(bLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vB)));
+      assign(cLo, binop(Iop_InterleaveLO16x8, mkexpr(zeros), mkexpr(vC)));
+      assign(aHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vA)));
+      assign(bHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vB)));
+      assign(cHi, binop(Iop_InterleaveHI16x8, mkexpr(zeros), mkexpr(vC)));
+      assign(zLo, binop(Iop_Add32x4,
+                     binop(Iop_MullEven16Ux8, mkexpr(aLo), mkexpr(bLo)),
+                     mkexpr(cLo)) );
+      assign(zHi, binop(Iop_Add32x4,
+                     binop(Iop_MullEven16Ux8, mkexpr(aHi), mkexpr(bHi)),
+                     mkexpr(cHi)));
+      putVReg( vD_addr,
+               binop(Iop_NarrowBin32to16x8, mkexpr(zHi), mkexpr(zLo)) );
+      break;
+   }
+
+
+   /* Multiply-Sum */
+   case 0x24: { // vmsumubm (Multiply Sum Unsigned B Modulo, AV p204)
+      IRTemp abEE, abEO, abOE, abOO;
+      abEE = abEO = abOE = abOO = IRTemp_INVALID;
+      DIP("vmsumubm v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+
+      /* multiply vA,vB (unsigned, widening) */
+      assign( abEvn, MK_Iop_MullOdd8Ux16( mkexpr(vA), mkexpr(vB) ));
+      assign( abOdd, binop(Iop_MullEven8Ux16, mkexpr(vA), mkexpr(vB)) );
+      
+      /* evn,odd: V128_16Ux8 -> 2 x V128_32Ux4, zero-extended */
+      expand16Ux8( mkexpr(abEvn), &abEE, &abEO );
+      expand16Ux8( mkexpr(abOdd), &abOE, &abOO );
+      
+      putVReg( vD_addr,
+         binop(Iop_Add32x4, mkexpr(vC),
+               binop(Iop_Add32x4,
+                     binop(Iop_Add32x4, mkexpr(abEE), mkexpr(abEO)),
+                     binop(Iop_Add32x4, mkexpr(abOE), mkexpr(abOO)))) );
+      break;
+   }
+   case 0x25: { // vmsummbm (Multiply Sum Mixed-Sign B Modulo, AV p201)
+      IRTemp aEvn, aOdd, bEvn, bOdd;
+      IRTemp abEE = newTemp(Ity_V128);
+      IRTemp abEO = newTemp(Ity_V128);
+      IRTemp abOE = newTemp(Ity_V128);
+      IRTemp abOO = newTemp(Ity_V128);
+      aEvn = aOdd = bEvn = bOdd = IRTemp_INVALID;
+      DIP("vmsummbm v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+
+      /* sign-extend vA, zero-extend vB, for mixed-sign multiply
+         (separating out adjacent lanes to different vectors) */
+      expand8Sx16( mkexpr(vA), &aEvn, &aOdd );
+      expand8Ux16( mkexpr(vB), &bEvn, &bOdd );
+
+      /* multiply vA, vB, again separating adjacent lanes */
+      assign( abEE, MK_Iop_MullOdd16Sx8( mkexpr(aEvn), mkexpr(bEvn) ));
+      assign( abEO, binop(Iop_MullEven16Sx8, mkexpr(aEvn), mkexpr(bEvn)) );
+      assign( abOE, MK_Iop_MullOdd16Sx8( mkexpr(aOdd), mkexpr(bOdd) ));
+      assign( abOO, binop(Iop_MullEven16Sx8, mkexpr(aOdd), mkexpr(bOdd)) );
+
+      /* add results together, + vC */
+      putVReg( vD_addr,
+         binop(Iop_QAdd32Sx4, mkexpr(vC),
+               binop(Iop_QAdd32Sx4,
+                     binop(Iop_QAdd32Sx4, mkexpr(abEE), mkexpr(abEO)),
+                     binop(Iop_QAdd32Sx4, mkexpr(abOE), mkexpr(abOO)))) );
+      break;
+   }
+   case 0x26: { // vmsumuhm (Multiply Sum Unsigned HW Modulo, AV p205)
+      DIP("vmsumuhm v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      assign( abEvn, MK_Iop_MullOdd16Ux8( mkexpr(vA), mkexpr(vB) ));
+      assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) );
+      putVReg( vD_addr,
+         binop(Iop_Add32x4, mkexpr(vC),
+               binop(Iop_Add32x4, mkexpr(abEvn), mkexpr(abOdd))) );
+      break;
+   }
+   case 0x27: { // vmsumuhs (Multiply Sum Unsigned HW Saturate, AV p206)
+      DIP("vmsumuhs v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      /* widening multiply, separating lanes */
+      assign( abEvn, MK_Iop_MullOdd16Ux8(mkexpr(vA), mkexpr(vB) ));
+      assign( abOdd, binop(Iop_MullEven16Ux8, mkexpr(vA), mkexpr(vB)) );
+
+      /* break V128 to 4xI32's, zero-extending to I64's */
+      breakV128to4x64U( mkexpr(abEvn), &ab7, &ab5, &ab3, &ab1 );
+      breakV128to4x64U( mkexpr(abOdd), &ab6, &ab4, &ab2, &ab0 );
+      breakV128to4x64U( mkexpr(vC),    &c3,  &c2,  &c1,  &c0  );
+
+      /* add lanes */
+      assign( z3, binop(Iop_Add64, mkexpr(c3),
+                        binop(Iop_Add64, mkexpr(ab7), mkexpr(ab6))));
+      assign( z2, binop(Iop_Add64, mkexpr(c2),
+                        binop(Iop_Add64, mkexpr(ab5), mkexpr(ab4))));
+      assign( z1, binop(Iop_Add64, mkexpr(c1),
+                        binop(Iop_Add64, mkexpr(ab3), mkexpr(ab2))));
+      assign( z0, binop(Iop_Add64, mkexpr(c0),
+                        binop(Iop_Add64, mkexpr(ab1), mkexpr(ab0))));
+
+      /* saturate-narrow to 32bit, and combine to V128 */
+      putVReg( vD_addr, mkV128from4x64U( mkexpr(z3), mkexpr(z2),
+                                         mkexpr(z1), mkexpr(z0)) );
+
+      break;
+   }
+   case 0x28: { // vmsumshm (Multiply Sum Signed HW Modulo, AV p202)
+      DIP("vmsumshm v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
+      assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) );
+      putVReg( vD_addr,
+         binop(Iop_Add32x4, mkexpr(vC),
+               binop(Iop_Add32x4, mkexpr(abOdd), mkexpr(abEvn))) );
+      break;
+   }
+   case 0x29: { // vmsumshs (Multiply Sum Signed HW Saturate, AV p203)
+      DIP("vmsumshs v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      /* widening multiply, separating lanes */
+      assign( abEvn, MK_Iop_MullOdd16Sx8( mkexpr(vA), mkexpr(vB) ));
+      assign( abOdd, binop(Iop_MullEven16Sx8, mkexpr(vA), mkexpr(vB)) );
+
+      /* break V128 to 4xI32's, sign-extending to I64's */
+      breakV128to4x64S( mkexpr(abEvn), &ab7, &ab5, &ab3, &ab1 );
+      breakV128to4x64S( mkexpr(abOdd), &ab6, &ab4, &ab2, &ab0 );
+      breakV128to4x64S( mkexpr(vC),    &c3,  &c2,  &c1,  &c0  );
+
+      /* add lanes */
+      assign( z3, binop(Iop_Add64, mkexpr(c3),
+                        binop(Iop_Add64, mkexpr(ab7), mkexpr(ab6))));
+      assign( z2, binop(Iop_Add64, mkexpr(c2),
+                        binop(Iop_Add64, mkexpr(ab5), mkexpr(ab4))));
+      assign( z1, binop(Iop_Add64, mkexpr(c1),
+                        binop(Iop_Add64, mkexpr(ab3), mkexpr(ab2))));
+      assign( z0, binop(Iop_Add64, mkexpr(c0),
+                        binop(Iop_Add64, mkexpr(ab1), mkexpr(ab0))));
+
+      /* saturate-narrow to 32bit, and combine to V128 */
+      putVReg( vD_addr, mkV128from4x64S( mkexpr(z3), mkexpr(z2),
+                                         mkexpr(z1), mkexpr(z0)) );
+      break;
+   }
+   default:
+      vex_printf("dis_av_multarith(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Polynomial Multiply-Sum Instructions
+*/
+static Bool dis_av_polymultarith ( UInt theInstr )
+{
+   /* VA-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UChar vC_addr  = ifieldRegC(theInstr);
+   UInt  opc2     = IFIELD(theInstr, 0, 11);
+   IRTemp vA    = newTemp(Ity_V128);
+   IRTemp vB    = newTemp(Ity_V128);
+   IRTemp vC    = newTemp(Ity_V128);
+
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+   assign( vC, getVReg(vC_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_polymultarith(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+      /* Polynomial Multiply-Add */
+      case 0x408:  // vpmsumb   Vector Polynomial Multipy-sum Byte
+         DIP("vpmsumb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr, binop(Iop_PolynomialMulAdd8x16,
+                                 mkexpr(vA), mkexpr(vB)) );
+         break;
+      case 0x448:  // vpmsumd   Vector Polynomial Multipy-sum Double Word
+         DIP("vpmsumd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr, binop(Iop_PolynomialMulAdd64x2,
+                                 mkexpr(vA), mkexpr(vB)) );
+         break;
+      case 0x488:  // vpmsumw   Vector Polynomial Multipy-sum Word
+         DIP("vpmsumw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr, binop(Iop_PolynomialMulAdd32x4,
+                                 mkexpr(vA), mkexpr(vB)) );
+         break;
+      case 0x4C8:  // vpmsumh   Vector Polynomial Multipy-sum Half Word
+         DIP("vpmsumh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr, binop(Iop_PolynomialMulAdd16x8,
+                                 mkexpr(vA), mkexpr(vB)) );
+         break;
+      default:
+         vex_printf("dis_av_polymultarith(ppc)(opc2=0x%x)\n", opc2);
+         return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Shift/Rotate Instructions
+*/
+static Bool dis_av_shift ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1    = ifieldOPC(theInstr);
+   UChar vD_addr = ifieldRegDS(theInstr);
+   UChar vA_addr = ifieldRegA(theInstr);
+   UChar vB_addr = ifieldRegB(theInstr);
+   UInt  opc2    = IFIELD( theInstr, 0, 11 );
+
+   IRTemp vA = newTemp(Ity_V128);
+   IRTemp vB = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+
+   if (opc1 != 0x4){
+      vex_printf("dis_av_shift(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   /* Rotate */
+   case 0x004: // vrlb (Rotate Left Integer B, AV p234)
+      DIP("vrlb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Rol8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x044: // vrlh (Rotate Left Integer HW, AV p235)
+      DIP("vrlh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Rol16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x084: // vrlw (Rotate Left Integer W, AV p236)
+      DIP("vrlw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Rol32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x0C4: // vrld (Rotate Left Integer Double Word)
+      DIP("vrld v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Rol64x2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+
+   /* Shift Left */
+   case 0x104: // vslb (Shift Left Integer B, AV p240)
+      DIP("vslb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shl8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x144: // vslh (Shift Left Integer HW, AV p242)
+      DIP("vslh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shl16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x184: // vslw (Shift Left Integer W, AV p244)
+      DIP("vslw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shl32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x5C4: // vsld (Shift Left Integer Double Word)
+      DIP("vsld v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shl64x2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x1C4: { // vsl (Shift Left, AV p239)
+      IRTemp sh = newTemp(Ity_I8);
+      DIP("vsl v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( sh, binop(Iop_And8, mkU8(0x7),
+                        unop(Iop_32to8,
+                             unop(Iop_V128to32, mkexpr(vB)))) );
+      putVReg( vD_addr,
+               binop(Iop_ShlV128, mkexpr(vA), mkexpr(sh)) );
+      break;
+   }
+   case 0x40C: { // vslo (Shift Left by Octet, AV p243)
+      IRTemp sh = newTemp(Ity_I8);
+      DIP("vslo v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( sh, binop(Iop_And8, mkU8(0x78),
+                        unop(Iop_32to8,
+                             unop(Iop_V128to32, mkexpr(vB)))) );
+      putVReg( vD_addr,
+               binop(Iop_ShlV128, mkexpr(vA), mkexpr(sh)) );
+      break;
+   }
+
+
+   /* Shift Right */
+   case 0x204: // vsrb (Shift Right B, AV p256)
+      DIP("vsrb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shr8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x244: // vsrh (Shift Right HW, AV p257)
+      DIP("vsrh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shr16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x284: // vsrw (Shift Right W, AV p259)
+      DIP("vsrw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shr32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x2C4: { // vsr (Shift Right, AV p251)
+      IRTemp sh = newTemp(Ity_I8);
+      DIP("vsr v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( sh, binop(Iop_And8, mkU8(0x7),
+                        unop(Iop_32to8,
+                             unop(Iop_V128to32, mkexpr(vB)))) );
+      putVReg( vD_addr,
+               binop(Iop_ShrV128, mkexpr(vA), mkexpr(sh)) );
+      break;
+   }
+   case 0x304: // vsrab (Shift Right Alg B, AV p253)
+      DIP("vsrab v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sar8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x344: // vsrah (Shift Right Alg HW, AV p254)
+      DIP("vsrah v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sar16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x384: // vsraw (Shift Right Alg W, AV p255)
+      DIP("vsraw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sar32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x3C4: // vsrad (Shift Right Alg Double Word)
+      DIP("vsrad v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Sar64x2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x44C: { // vsro (Shift Right by Octet, AV p258)
+      IRTemp sh = newTemp(Ity_I8);
+      DIP("vsro v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( sh, binop(Iop_And8, mkU8(0x78),
+                        unop(Iop_32to8,
+                             unop(Iop_V128to32, mkexpr(vB)))) );
+      putVReg( vD_addr,
+               binop(Iop_ShrV128, mkexpr(vA), mkexpr(sh)) );
+      break;
+   }
+
+   case 0x6C4: // vsrd (Shift Right Double Word)
+      DIP("vsrd v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Shr64x2, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+
+   default:
+      vex_printf("dis_av_shift(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Permute Instructions
+*/
+static Bool dis_av_permute ( UInt theInstr )
+{
+   /* VA-Form, VX-Form */
+   UChar opc1      = ifieldOPC(theInstr);
+   UChar vD_addr   = ifieldRegDS(theInstr);
+   UChar vA_addr   = ifieldRegA(theInstr);
+   UChar UIMM_5    = vA_addr;
+   UChar vB_addr   = ifieldRegB(theInstr);
+   UChar vC_addr   = ifieldRegC(theInstr);
+   UChar b10       = ifieldBIT10(theInstr);
+   UChar SHB_uimm4 = toUChar( IFIELD( theInstr, 6, 4 ) );
+   UInt  opc2      = toUChar( IFIELD( theInstr, 0, 6 ) );
+
+   UChar SIMM_8 = extend_s_5to8(UIMM_5);
+
+   IRTemp vA = newTemp(Ity_V128);
+   IRTemp vB = newTemp(Ity_V128);
+   IRTemp vC = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+   assign( vC, getVReg(vC_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_permute(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x2A: // vsel (Conditional Select, AV p238)
+      DIP("vsel v%d,v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr, vC_addr);
+      /* vD = (vA & ~vC) | (vB & vC) */
+      putVReg( vD_addr, binop(Iop_OrV128,
+         binop(Iop_AndV128, mkexpr(vA), unop(Iop_NotV128, mkexpr(vC))),
+         binop(Iop_AndV128, mkexpr(vB), mkexpr(vC))) );
+      return True;
+     
+   case 0x2B: { // vperm (Permute, AV p218)
+      /* limited to two args for IR, so have to play games... */
+      IRTemp a_perm  = newTemp(Ity_V128);
+      IRTemp b_perm  = newTemp(Ity_V128);
+      IRTemp mask    = newTemp(Ity_V128);
+      IRTemp vC_andF = newTemp(Ity_V128);
+      DIP("vperm v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vB_addr, vC_addr);
+      /* Limit the Perm8x16 steering values to 0 .. 15 as that is what
+         IR specifies, and also to hide irrelevant bits from
+         memcheck */
+      assign( vC_andF,
+              binop(Iop_AndV128, mkexpr(vC),
+                                 unop(Iop_Dup8x16, mkU8(0xF))) );
+      assign( a_perm,
+              binop(Iop_Perm8x16, mkexpr(vA), mkexpr(vC_andF)) );
+      assign( b_perm,
+              binop(Iop_Perm8x16, mkexpr(vB), mkexpr(vC_andF)) );
+      // mask[i8] = (vC[i8]_4 == 1) ? 0xFF : 0x0
+      assign( mask, binop(Iop_SarN8x16,
+                          binop(Iop_ShlN8x16, mkexpr(vC), mkU8(3)),
+                          mkU8(7)) );
+      // dst = (a & ~mask) | (b & mask)
+      putVReg( vD_addr, binop(Iop_OrV128,
+                              binop(Iop_AndV128, mkexpr(a_perm),
+                                    unop(Iop_NotV128, mkexpr(mask))),
+                              binop(Iop_AndV128, mkexpr(b_perm),
+                                    mkexpr(mask))) );
+      return True;
+   }
+   case 0x2C: // vsldoi (Shift Left Double by Octet Imm, AV p241)
+      if (b10 != 0) {
+         vex_printf("dis_av_permute(ppc)(vsldoi)\n");
+         return False;
+      }
+      DIP("vsldoi v%d,v%d,v%d,%d\n",
+          vD_addr, vA_addr, vB_addr, SHB_uimm4);
+      if (SHB_uimm4 == 0)
+         putVReg( vD_addr, mkexpr(vA) );
+      else
+         putVReg( vD_addr,
+            binop(Iop_OrV128,
+                  binop(Iop_ShlV128, mkexpr(vA), mkU8(SHB_uimm4*8)),
+                  binop(Iop_ShrV128, mkexpr(vB), mkU8((16-SHB_uimm4)*8))) );
+      return True;
+   case 0x2D: {  // vpermxor (Vector Permute and Exclusive-OR)
+      IRTemp a_perm  = newTemp(Ity_V128);
+      IRTemp b_perm  = newTemp(Ity_V128);
+      IRTemp vrc_a   = newTemp(Ity_V128);
+      IRTemp vrc_b   = newTemp(Ity_V128);
+
+      /* IBM index  is 0:7, Change index value to index 7:0 */
+      assign( vrc_b, binop( Iop_AndV128, mkexpr( vC ),
+                            unop( Iop_Dup8x16, mkU8( 0xF ) ) ) );
+      assign( vrc_a, binop( Iop_ShrV128,
+                            binop( Iop_AndV128, mkexpr( vC ),
+                                   unop( Iop_Dup8x16, mkU8( 0xF0 ) ) ),
+                            mkU8 ( 4 ) ) );
+      assign( a_perm, binop( Iop_Perm8x16, mkexpr( vA ), mkexpr( vrc_a ) ) );
+      assign( b_perm, binop( Iop_Perm8x16, mkexpr( vB ), mkexpr( vrc_b ) ) );
+      putVReg( vD_addr, binop( Iop_XorV128,
+                               mkexpr( a_perm ), mkexpr( b_perm) ) );
+      return True;
+   }
+   default:
+     break; // Fall through...
+   }
+
+   opc2 = IFIELD( theInstr, 0, 11 );
+   switch (opc2) {
+
+   /* Merge */
+   case 0x00C: // vmrghb (Merge High B, AV p195)
+      DIP("vmrghb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_InterleaveHI8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x04C: // vmrghh (Merge High HW, AV p196)
+      DIP("vmrghh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_InterleaveHI16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x08C: // vmrghw (Merge High W, AV p197)
+      DIP("vmrghw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_InterleaveHI32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x10C: // vmrglb (Merge Low B, AV p198)
+      DIP("vmrglb v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_InterleaveLO8x16, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x14C: // vmrglh (Merge Low HW, AV p199)
+      DIP("vmrglh v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_InterleaveLO16x8, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x18C: // vmrglw (Merge Low W, AV p200)
+      DIP("vmrglw v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_InterleaveLO32x4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+
+   /* Splat */
+   case 0x20C: { // vspltb (Splat Byte, AV p245)
+      /* vD = Dup8x16( vB[UIMM_5] ) */
+      UChar sh_uimm = (15 - (UIMM_5 & 15)) * 8;
+      DIP("vspltb v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
+      putVReg( vD_addr, unop(Iop_Dup8x16,
+           unop(Iop_32to8, unop(Iop_V128to32, 
+                binop(Iop_ShrV128, mkexpr(vB), mkU8(sh_uimm))))) );
+      break;
+   }
+   case 0x24C: { // vsplth (Splat Half Word, AV p246)
+      UChar sh_uimm = (7 - (UIMM_5 & 7)) * 16;
+      DIP("vsplth v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
+      putVReg( vD_addr, unop(Iop_Dup16x8,
+           unop(Iop_32to16, unop(Iop_V128to32, 
+                binop(Iop_ShrV128, mkexpr(vB), mkU8(sh_uimm))))) );
+      break;
+   }
+   case 0x28C: { // vspltw (Splat Word, AV p250)
+      /* vD = Dup32x4( vB[UIMM_5] ) */
+      UChar sh_uimm = (3 - (UIMM_5 & 3)) * 32;
+      DIP("vspltw v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
+      putVReg( vD_addr, unop(Iop_Dup32x4,
+         unop(Iop_V128to32,
+              binop(Iop_ShrV128, mkexpr(vB), mkU8(sh_uimm)))) );
+      break;
+   }
+   case 0x30C: // vspltisb (Splat Immediate Signed B, AV p247)
+      DIP("vspltisb v%d,%d\n", vD_addr, (Char)SIMM_8);
+      putVReg( vD_addr, unop(Iop_Dup8x16, mkU8(SIMM_8)) );
+      break;
+
+   case 0x34C: // vspltish (Splat Immediate Signed HW, AV p248)
+      DIP("vspltish v%d,%d\n", vD_addr, (Char)SIMM_8);
+      putVReg( vD_addr,
+               unop(Iop_Dup16x8, mkU16(extend_s_8to32(SIMM_8))) );
+      break;
+
+   case 0x38C: // vspltisw (Splat Immediate Signed W, AV p249)
+      DIP("vspltisw v%d,%d\n", vD_addr, (Char)SIMM_8);
+      putVReg( vD_addr,
+               unop(Iop_Dup32x4, mkU32(extend_s_8to32(SIMM_8))) );
+      break;
+
+   case 0x68C: // vmrgow (Merge Odd Word)
+     DIP("vmrgow v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      /*   VD[0] <- VA[1]
+           VD[1] <- VB[1]
+           VD[2] <- VA[3]
+           VD[3] <- VB[3]
+      */
+      putVReg( vD_addr,
+               binop(Iop_CatOddLanes32x4, mkexpr(vA), mkexpr(vB) ) );
+      break;
+
+   case 0x78C: // vmrgew (Merge Even Word)
+      DIP("vmrgew v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      /*   VD[0] <- VA[0]
+           VD[1] <- VB[0]
+           VD[2] <- VA[2]
+           VD[3] <- VB[2]
+      */
+      putVReg( vD_addr,
+               binop(Iop_CatEvenLanes32x4, mkexpr(vA), mkexpr(vB) ) );
+      break;
+
+   default:
+      vex_printf("dis_av_permute(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Pack/Unpack Instructions
+*/
+static Bool dis_av_pack ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = IFIELD( theInstr, 0, 11 );
+
+   IRTemp signs = IRTemp_INVALID;
+   IRTemp zeros = IRTemp_INVALID;
+   IRTemp vA    = newTemp(Ity_V128);
+   IRTemp vB    = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_pack(ppc)(instr)\n");
+      return False;
+   }
+   switch (opc2) {
+   /* Packing */
+   case 0x00E: // vpkuhum (Pack Unsigned HW Unsigned Modulo, AV p224)
+      DIP("vpkuhum v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_NarrowBin16to8x16, mkexpr(vA), mkexpr(vB)) );
+      return True;
+
+   case 0x04E: // vpkuwum (Pack Unsigned W Unsigned Modulo, AV p226)
+      DIP("vpkuwum v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_NarrowBin32to16x8, mkexpr(vA), mkexpr(vB)) );
+      return True;
+
+   case 0x08E: // vpkuhus (Pack Unsigned HW Unsigned Saturate, AV p225)
+      DIP("vpkuhus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin16Uto8Ux16, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      return True;
+
+   case 0x0CE: // vpkuwus (Pack Unsigned W Unsigned Saturate, AV p227)
+      DIP("vpkuwus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin32Uto16Ux8, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      return True;
+
+   case 0x10E: { // vpkshus (Pack Signed HW Unsigned Saturate, AV p221)
+      // This insn does a signed->unsigned saturating conversion.
+      // Conversion done here, then uses unsigned->unsigned vpk insn:
+      //  => UnsignedSaturatingNarrow( x & ~ (x >>s 15) )
+      IRTemp vA_tmp = newTemp(Ity_V128);
+      IRTemp vB_tmp = newTemp(Ity_V128);
+      DIP("vpkshus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( vA_tmp, binop(Iop_AndV128, mkexpr(vA),
+                            unop(Iop_NotV128,
+                                 binop(Iop_SarN16x8,
+                                       mkexpr(vA), mkU8(15)))) );
+      assign( vB_tmp, binop(Iop_AndV128, mkexpr(vB),
+                            unop(Iop_NotV128,
+                                 binop(Iop_SarN16x8,
+                                       mkexpr(vB), mkU8(15)))) );
+      putVReg( vD_addr, binop(Iop_QNarrowBin16Uto8Ux16,
+                              mkexpr(vA_tmp), mkexpr(vB_tmp)) );
+      // TODO: set VSCR[SAT]
+      return True;
+   }
+   case 0x14E: { // vpkswus (Pack Signed W Unsigned Saturate, AV p223)
+      // This insn does a signed->unsigned saturating conversion.
+      // Conversion done here, then uses unsigned->unsigned vpk insn:
+      //  => UnsignedSaturatingNarrow( x & ~ (x >>s 31) )
+      IRTemp vA_tmp = newTemp(Ity_V128);
+      IRTemp vB_tmp = newTemp(Ity_V128);
+      DIP("vpkswus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( vA_tmp, binop(Iop_AndV128, mkexpr(vA),
+                            unop(Iop_NotV128,
+                                 binop(Iop_SarN32x4,
+                                       mkexpr(vA), mkU8(31)))) );
+      assign( vB_tmp, binop(Iop_AndV128, mkexpr(vB),
+                            unop(Iop_NotV128,
+                                 binop(Iop_SarN32x4,
+                                       mkexpr(vB), mkU8(31)))) );
+      putVReg( vD_addr, binop(Iop_QNarrowBin32Uto16Ux8,
+                              mkexpr(vA_tmp), mkexpr(vB_tmp)) );
+      // TODO: set VSCR[SAT]
+      return True;
+   }
+   case 0x18E: // vpkshss (Pack Signed HW Signed Saturate, AV p220)
+      DIP("vpkshss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin16Sto8Sx16, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      return True;
+
+   case 0x1CE: // vpkswss (Pack Signed W Signed Saturate, AV p222)
+      DIP("vpkswss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin32Sto16Sx8, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      return True;
+
+   case 0x30E: { // vpkpx (Pack Pixel, AV p219)
+      /* CAB: Worth a new primop? */
+      /* Using shifts to compact pixel elements, then packing them */
+      IRTemp a1 = newTemp(Ity_V128);
+      IRTemp a2 = newTemp(Ity_V128);
+      IRTemp a3 = newTemp(Ity_V128);
+      IRTemp a_tmp = newTemp(Ity_V128);
+      IRTemp b1 = newTemp(Ity_V128);
+      IRTemp b2 = newTemp(Ity_V128);
+      IRTemp b3 = newTemp(Ity_V128);
+      IRTemp b_tmp = newTemp(Ity_V128);
+      DIP("vpkpx v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( a1, binop(Iop_ShlN16x8,
+                        binop(Iop_ShrN32x4, mkexpr(vA), mkU8(19)),
+                        mkU8(10)) );
+      assign( a2, binop(Iop_ShlN16x8, 
+                        binop(Iop_ShrN16x8, mkexpr(vA), mkU8(11)),
+                        mkU8(5)) );
+      assign( a3,  binop(Iop_ShrN16x8, 
+                         binop(Iop_ShlN16x8, mkexpr(vA), mkU8(8)),
+                         mkU8(11)) );
+      assign( a_tmp, binop(Iop_OrV128, mkexpr(a1),
+                           binop(Iop_OrV128, mkexpr(a2), mkexpr(a3))) );
+
+      assign( b1, binop(Iop_ShlN16x8,
+                        binop(Iop_ShrN32x4, mkexpr(vB), mkU8(19)),
+                        mkU8(10)) );
+      assign( b2, binop(Iop_ShlN16x8, 
+                        binop(Iop_ShrN16x8, mkexpr(vB), mkU8(11)),
+                        mkU8(5)) );
+      assign( b3,  binop(Iop_ShrN16x8, 
+                         binop(Iop_ShlN16x8, mkexpr(vB), mkU8(8)),
+                         mkU8(11)) );
+      assign( b_tmp, binop(Iop_OrV128, mkexpr(b1),
+                           binop(Iop_OrV128, mkexpr(b2), mkexpr(b3))) );
+
+      putVReg( vD_addr, binop(Iop_NarrowBin32to16x8,
+                              mkexpr(a_tmp), mkexpr(b_tmp)) );
+      return True;
+   }
+
+   case 0x44E: // vpkudum (Pack Unsigned Double Word Unsigned Modulo)
+      DIP("vpkudum v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_NarrowBin64to32x4, mkexpr(vA), mkexpr(vB)) );
+      return True;
+
+   case 0x4CE: // vpkudus (Pack Unsigned Double Word Unsigned Saturate)
+      DIP("vpkudus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin64Uto32Ux4, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      return True;
+
+   case 0x54E: { // vpksdus (Pack Signed Double Word Unsigned Saturate)
+      // This insn does a doubled signed->double unsigned saturating conversion
+      // Conversion done here, then uses unsigned->unsigned vpk insn:
+      //  => UnsignedSaturatingNarrow( x & ~ (x >>s 31) )
+      // This is similar to the technique used for vpkswus, except done
+      // with double word integers versus word integers.
+      IRTemp vA_tmp = newTemp(Ity_V128);
+      IRTemp vB_tmp = newTemp(Ity_V128);
+      DIP("vpksdus v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      assign( vA_tmp, binop(Iop_AndV128, mkexpr(vA),
+                            unop(Iop_NotV128,
+                                 binop(Iop_SarN64x2,
+                                       mkexpr(vA), mkU8(63)))) );
+      assign( vB_tmp, binop(Iop_AndV128, mkexpr(vB),
+                            unop(Iop_NotV128,
+                                 binop(Iop_SarN64x2,
+                                       mkexpr(vB), mkU8(63)))) );
+      putVReg( vD_addr, binop(Iop_QNarrowBin64Uto32Ux4,
+                              mkexpr(vA_tmp), mkexpr(vB_tmp)) );
+      // TODO: set VSCR[SAT]
+      return True;
+   }
+
+   case 0x5CE: // vpksdss (Pack Signed double word Signed Saturate)
+      DIP("vpksdss v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr,
+               binop(Iop_QNarrowBin64Sto32Sx4, mkexpr(vA), mkexpr(vB)) );
+      // TODO: set VSCR[SAT]
+      return True;
+   default:
+      break; // Fall through...
+   }
+
+
+   if (vA_addr != 0) {
+      vex_printf("dis_av_pack(ppc)(vA_addr)\n");
+      return False;
+   }
+
+   signs = newTemp(Ity_V128);
+   zeros = newTemp(Ity_V128);
+   assign( zeros, unop(Iop_Dup32x4, mkU32(0)) );
+
+   switch (opc2) {
+   /* Unpacking */
+   case 0x20E: { // vupkhsb (Unpack High Signed B, AV p277)
+      DIP("vupkhsb v%d,v%d\n", vD_addr, vB_addr);
+      assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) );
+      putVReg( vD_addr,
+               binop(Iop_InterleaveHI8x16, mkexpr(signs), mkexpr(vB)) );
+      break;
+   }
+   case 0x24E: { // vupkhsh (Unpack High Signed HW, AV p278)
+      DIP("vupkhsh v%d,v%d\n", vD_addr, vB_addr);
+      assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) );
+      putVReg( vD_addr,
+               binop(Iop_InterleaveHI16x8, mkexpr(signs), mkexpr(vB)) );
+      break;
+   }
+   case 0x28E: { // vupklsb (Unpack Low Signed B, AV p280)
+      DIP("vupklsb v%d,v%d\n", vD_addr, vB_addr);
+      assign( signs, binop(Iop_CmpGT8Sx16, mkexpr(zeros), mkexpr(vB)) );
+      putVReg( vD_addr,
+               binop(Iop_InterleaveLO8x16, mkexpr(signs), mkexpr(vB)) );
+      break;
+   }
+   case 0x2CE: { // vupklsh (Unpack Low Signed HW, AV p281)
+      DIP("vupklsh v%d,v%d\n", vD_addr, vB_addr);
+      assign( signs, binop(Iop_CmpGT16Sx8, mkexpr(zeros), mkexpr(vB)) );
+      putVReg( vD_addr,
+               binop(Iop_InterleaveLO16x8, mkexpr(signs), mkexpr(vB)) );
+      break;
+   }
+   case 0x34E: { // vupkhpx (Unpack High Pixel16, AV p276)
+      /* CAB: Worth a new primop? */
+      /* Using shifts to isolate pixel elements, then expanding them */
+      IRTemp z0  = newTemp(Ity_V128);
+      IRTemp z1  = newTemp(Ity_V128);
+      IRTemp z01 = newTemp(Ity_V128);
+      IRTemp z2  = newTemp(Ity_V128);
+      IRTemp z3  = newTemp(Ity_V128);
+      IRTemp z23 = newTemp(Ity_V128);
+      DIP("vupkhpx v%d,v%d\n", vD_addr, vB_addr);
+      assign( z0,  binop(Iop_ShlN16x8,
+                         binop(Iop_SarN16x8, mkexpr(vB), mkU8(15)),
+                         mkU8(8)) );
+      assign( z1,  binop(Iop_ShrN16x8, 
+                         binop(Iop_ShlN16x8, mkexpr(vB), mkU8(1)),
+                         mkU8(11)) );
+      assign( z01, binop(Iop_InterleaveHI16x8, mkexpr(zeros),
+                         binop(Iop_OrV128, mkexpr(z0), mkexpr(z1))) );
+      assign( z2,  binop(Iop_ShrN16x8,
+                         binop(Iop_ShlN16x8, 
+                               binop(Iop_ShrN16x8, mkexpr(vB), mkU8(5)),
+                               mkU8(11)),
+                         mkU8(3)) );
+      assign( z3,  binop(Iop_ShrN16x8, 
+                         binop(Iop_ShlN16x8, mkexpr(vB), mkU8(11)),
+                         mkU8(11)) );
+      assign( z23, binop(Iop_InterleaveHI16x8, mkexpr(zeros),
+                         binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) );
+      putVReg( vD_addr,
+               binop(Iop_OrV128,
+                     binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
+                     mkexpr(z23)) );
+      break;
+   }
+   case 0x3CE: { // vupklpx (Unpack Low Pixel16, AV p279)
+      /* identical to vupkhpx, except interleaving LO */
+      IRTemp z0  = newTemp(Ity_V128);
+      IRTemp z1  = newTemp(Ity_V128);
+      IRTemp z01 = newTemp(Ity_V128);
+      IRTemp z2  = newTemp(Ity_V128);
+      IRTemp z3  = newTemp(Ity_V128);
+      IRTemp z23 = newTemp(Ity_V128);
+      DIP("vupklpx v%d,v%d\n", vD_addr, vB_addr);
+      assign( z0,  binop(Iop_ShlN16x8,
+                         binop(Iop_SarN16x8, mkexpr(vB), mkU8(15)),
+                         mkU8(8)) );
+      assign( z1,  binop(Iop_ShrN16x8, 
+                         binop(Iop_ShlN16x8, mkexpr(vB), mkU8(1)),
+                         mkU8(11)) );
+      assign( z01, binop(Iop_InterleaveLO16x8, mkexpr(zeros),
+                         binop(Iop_OrV128, mkexpr(z0), mkexpr(z1))) );
+      assign( z2,  binop(Iop_ShrN16x8,
+                         binop(Iop_ShlN16x8, 
+                               binop(Iop_ShrN16x8, mkexpr(vB), mkU8(5)),
+                               mkU8(11)),
+                         mkU8(3)) );
+      assign( z3,  binop(Iop_ShrN16x8, 
+                         binop(Iop_ShlN16x8, mkexpr(vB), mkU8(11)),
+                         mkU8(11)) );
+      assign( z23, binop(Iop_InterleaveLO16x8, mkexpr(zeros),
+                         binop(Iop_OrV128, mkexpr(z2), mkexpr(z3))) );
+      putVReg( vD_addr,
+               binop(Iop_OrV128,
+                     binop(Iop_ShlN32x4, mkexpr(z01), mkU8(16)),
+                     mkexpr(z23)) );
+      break;
+   }
+   case 0x64E: { // vupkhsw (Unpack High Signed Word)
+      DIP("vupkhsw v%d,v%d\n", vD_addr, vB_addr);
+      assign( signs, binop(Iop_CmpGT32Sx4, mkexpr(zeros), mkexpr(vB)) );
+      putVReg( vD_addr,
+               binop(Iop_InterleaveHI32x4, mkexpr(signs), mkexpr(vB)) );
+      break;
+   }
+   case 0x6CE: { // vupklsw (Unpack Low Signed Word)
+      DIP("vupklsw v%d,v%d\n", vD_addr, vB_addr);
+      assign( signs, binop(Iop_CmpGT32Sx4, mkexpr(zeros), mkexpr(vB)) );
+      putVReg( vD_addr,
+               binop(Iop_InterleaveLO32x4, mkexpr(signs), mkexpr(vB)) );
+      break;
+   }
+   default:
+      vex_printf("dis_av_pack(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Cipher Instructions
+*/
+static Bool dis_av_cipher ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = IFIELD( theInstr, 0, 11 );
+
+   IRTemp vA    = newTemp(Ity_V128);
+   IRTemp vB    = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_cipher(ppc)(instr)\n");
+      return False;
+   }
+   switch (opc2) {
+      case 0x508: // vcipher (Vector Inverser Cipher)
+         DIP("vcipher v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr,
+                  binop(Iop_CipherV128, mkexpr(vA), mkexpr(vB)) );
+         return True;
+
+      case 0x509: // vcipherlast (Vector Inverser Cipher Last)
+         DIP("vcipherlast v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr,
+                  binop(Iop_CipherLV128, mkexpr(vA), mkexpr(vB)) );
+         return True;
+
+      case 0x548: // vncipher (Vector Inverser Cipher)
+         DIP("vncipher v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr,
+                  binop(Iop_NCipherV128, mkexpr(vA), mkexpr(vB)) );
+         return True;
+
+      case 0x549: // vncipherlast (Vector Inverser Cipher Last)
+         DIP("vncipherlast v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+         putVReg( vD_addr,
+                  binop(Iop_NCipherLV128, mkexpr(vA), mkexpr(vB)) );
+         return True;
+
+      case 0x5C8: /* vsbox (Vector SubBytes, this does the cipher
+       * subBytes transform)
+       */
+         DIP("vsbox v%d,v%d\n", vD_addr, vA_addr);
+         putVReg( vD_addr,
+                  unop(Iop_CipherSV128, mkexpr(vA) ) );
+         return True;
+
+      default:
+         vex_printf("dis_av_cipher(ppc)(opc2)\n");
+         return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Secure Hash Instructions
+*/
+static Bool dis_av_hash ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vRT_addr = ifieldRegDS(theInstr);
+   UChar vRA_addr  = ifieldRegA(theInstr);
+   UChar s_field  = IFIELD( theInstr, 11, 5 );  // st and six field
+   UChar st       = IFIELD( theInstr, 15, 1 );  // st
+   UChar six      = IFIELD( theInstr, 11, 4 );  // six field
+   UInt  opc2     = IFIELD( theInstr, 0, 11 );
+
+   IRTemp vA    = newTemp(Ity_V128);
+   IRTemp dst    = newTemp(Ity_V128);
+   assign( vA, getVReg(vRA_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_hash(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+      case 0x682:  // vshasigmaw
+         DIP("vshasigmaw v%d,v%d,%u,%u\n", vRT_addr, vRA_addr, st, six);
+         assign( dst, binop( Iop_SHA256, mkexpr( vA ), mkU8( s_field) ) );
+         putVReg( vRT_addr, mkexpr(dst));
+         return True;
+
+      case 0x6C2:  // vshasigmad,
+         DIP("vshasigmad v%d,v%d,%u,%u\n", vRT_addr, vRA_addr, st, six);
+         putVReg( vRT_addr, binop( Iop_SHA512, mkexpr( vA ), mkU8( s_field) ) );
+         return True;
+
+      default:
+         vex_printf("dis_av_hash(ppc)(opc2)\n");
+         return False;
+   }
+   return True;
+}
+
+/*
+ * This function is used by the Vector add/subtract [extended] modulo/carry
+ * instructions.
+ *   - For the non-extended add instructions, the cin arg is set to zero.
+ *   - For the extended add instructions, cin is the integer value of
+ *     src3.bit[127].
+ *   - For the non-extended subtract instructions, src1 is added to the one's
+ *     complement of src2 + 1.  We re-use the cin argument to hold the '1'
+ *     value for this operation.
+ *   - For the extended subtract instructions, cin is the integer value of src3.bit[127].
+ */
+static IRTemp _get_quad_modulo_or_carry(IRExpr * vecA, IRExpr * vecB,
+                                        IRExpr * cin, Bool modulo)
+{
+   IRTemp _vecA_32   = IRTemp_INVALID;
+   IRTemp _vecB_32   = IRTemp_INVALID;
+   IRTemp res_32     = IRTemp_INVALID;
+   IRTemp result     = IRTemp_INVALID;
+   IRTemp tmp_result = IRTemp_INVALID;
+   IRTemp carry      = IRTemp_INVALID;
+   Int i;
+   IRExpr * _vecA_low64 =  unop( Iop_V128to64, vecA );
+   IRExpr * _vecB_low64 =  unop( Iop_V128to64, vecB );
+   IRExpr * _vecA_high64 = unop( Iop_V128HIto64, vecA );
+   IRExpr * _vecB_high64 = unop( Iop_V128HIto64, vecB );
+
+   for (i = 0; i < 4; i++) {
+      _vecA_32 = newTemp(Ity_I32);
+      _vecB_32 = newTemp(Ity_I32);
+      res_32   = newTemp(Ity_I32);
+      switch (i) {
+      case 0:
+         assign(_vecA_32, unop( Iop_64to32, _vecA_low64 ) );
+         assign(_vecB_32, unop( Iop_64to32, _vecB_low64 ) );
+         break;
+      case 1:
+         assign(_vecA_32, unop( Iop_64HIto32, _vecA_low64 ) );
+         assign(_vecB_32, unop( Iop_64HIto32, _vecB_low64 ) );
+         break;
+      case 2:
+         assign(_vecA_32, unop( Iop_64to32, _vecA_high64 ) );
+         assign(_vecB_32, unop( Iop_64to32, _vecB_high64 ) );
+         break;
+      case 3:
+         assign(_vecA_32, unop( Iop_64HIto32, _vecA_high64 ) );
+         assign(_vecB_32, unop( Iop_64HIto32, _vecB_high64 ) );
+         break;
+      }
+
+      assign(res_32, binop( Iop_Add32,
+                            binop( Iop_Add32,
+                                   binop ( Iop_Add32,
+                                           mkexpr(_vecA_32),
+                                           mkexpr(_vecB_32) ),
+                                   (i == 0) ? mkU32(0) : mkexpr(carry) ),
+                            (i == 0) ? cin : mkU32(0) ) );
+      if (modulo) {
+         result = newTemp(Ity_V128);
+         assign(result, binop( Iop_OrV128,
+                              (i == 0) ? binop( Iop_64HLtoV128,
+                                                mkU64(0),
+                                                mkU64(0) ) : mkexpr(tmp_result),
+                              binop( Iop_ShlV128,
+                                     binop( Iop_64HLtoV128,
+                                            mkU64(0),
+                                            binop( Iop_32HLto64,
+                                                   mkU32(0),
+                                                   mkexpr(res_32) ) ),
+                                     mkU8(i * 32) ) ) );
+         tmp_result = newTemp(Ity_V128);
+         assign(tmp_result, mkexpr(result));
+      }
+      carry = newTemp(Ity_I32);
+      assign(carry, unop(Iop_1Uto32, binop( Iop_CmpLT32U,
+                                            mkexpr(res_32),
+                                            mkexpr(_vecA_32 ) ) ) );
+   }
+   if (modulo)
+      return result;
+   else
+      return carry;
+}
+
+
+static Bool dis_av_quad ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vRT_addr = ifieldRegDS(theInstr);
+   UChar vRA_addr = ifieldRegA(theInstr);
+   UChar vRB_addr = ifieldRegB(theInstr);
+   UChar vRC_addr;
+   UInt  opc2     = IFIELD( theInstr, 0, 11 );
+
+   IRTemp vA    = newTemp(Ity_V128);
+   IRTemp vB    = newTemp(Ity_V128);
+   IRTemp vC    = IRTemp_INVALID;
+   IRTemp cin    = IRTemp_INVALID;
+   assign( vA, getVReg(vRA_addr));
+   assign( vB, getVReg(vRB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_quad(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x140:  // vaddcuq
+     DIP("vaddcuq v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
+     putVReg( vRT_addr, unop( Iop_32UtoV128,
+                              mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
+                                                               mkexpr(vB),
+                                                               mkU32(0), False) ) ) );
+     return True;
+   case 0x100: // vadduqm
+      DIP("vadduqm v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
+      putVReg( vRT_addr, mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
+                                                          mkexpr(vB), mkU32(0), True) ) );
+      return True;
+   case 0x540: // vsubcuq
+      DIP("vsubcuq v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
+      putVReg( vRT_addr,
+               unop( Iop_32UtoV128,
+                     mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
+                                                      unop( Iop_NotV128,
+                                                            mkexpr(vB) ),
+                                                      mkU32(1), False) ) ) );
+      return True;
+   case 0x500: // vsubuqm
+      DIP("vsubuqm v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
+      putVReg( vRT_addr,
+               mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
+                                                unop( Iop_NotV128, mkexpr(vB) ),
+                                                mkU32(1), True) ) );
+      return True;
+   case 0x054C: // vbpermq
+   {
+#define BPERMD_IDX_MASK 0x00000000000000FFULL
+#define BPERMD_BIT_MASK 0x8000000000000000ULL
+      int i;
+      IRExpr * vB_expr = mkexpr(vB);
+      IRExpr * res = binop(Iop_AndV128, mkV128(0), mkV128(0));
+      DIP("vbpermq v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr);
+      for (i = 0; i < 16; i++) {
+         IRTemp idx_tmp = newTemp( Ity_V128 );
+         IRTemp perm_bit = newTemp( Ity_V128 );
+         IRTemp idx = newTemp( Ity_I8 );
+         IRTemp idx_LT127 = newTemp( Ity_I1 );
+         IRTemp idx_LT127_ity128 = newTemp( Ity_V128 );
+
+         assign( idx_tmp,
+                 binop( Iop_AndV128,
+                        binop( Iop_64HLtoV128,
+                               mkU64(0),
+                               mkU64(BPERMD_IDX_MASK) ),
+                        vB_expr ) );
+         assign( idx_LT127,
+                 binop( Iop_CmpEQ32,
+                        unop ( Iop_64to32,
+                               unop( Iop_V128to64, binop( Iop_ShrV128,
+                                                          mkexpr(idx_tmp),
+                                                          mkU8(7) ) ) ),
+                        mkU32(0) ) );
+
+         /* Below, we set idx to determine which bit of vA to use for the
+          * perm bit.  If idx_LT127 is 0, the perm bit is forced to '0'.
+          */
+         assign( idx,
+                 binop( Iop_And8,
+                        unop( Iop_1Sto8,
+                              mkexpr(idx_LT127) ),
+                        unop( Iop_32to8,
+                              unop( Iop_V128to32, mkexpr( idx_tmp ) ) ) ) );
+
+         assign( idx_LT127_ity128,
+                 binop( Iop_64HLtoV128,
+                        mkU64(0),
+                        unop( Iop_32Uto64,
+                              unop( Iop_1Uto32, mkexpr(idx_LT127 ) ) ) ) );
+         assign( perm_bit,
+                 binop( Iop_AndV128,
+                        mkexpr( idx_LT127_ity128 ),
+                        binop( Iop_ShrV128,
+                               binop( Iop_AndV128,
+                                      binop (Iop_64HLtoV128,
+                                             mkU64( BPERMD_BIT_MASK ),
+                                             mkU64(0)),
+                                      binop( Iop_ShlV128,
+                                             mkexpr( vA ),
+                                             mkexpr( idx ) ) ),
+                               mkU8( 127 ) ) ) );
+         res = binop( Iop_OrV128,
+                      res,
+                      binop( Iop_ShlV128,
+                             mkexpr( perm_bit ),
+                             mkU8( i + 64 ) ) );
+         vB_expr = binop( Iop_ShrV128, vB_expr, mkU8( 8 ) );
+      }
+      putVReg( vRT_addr, res);
+      return True;
+#undef BPERMD_IDX_MASK
+#undef BPERMD_BIT_MASK
+   }
+
+   default:
+      break;  // fall through
+   }
+
+   opc2     = IFIELD( theInstr, 0, 6 );
+   vRC_addr = ifieldRegC(theInstr);
+   vC = newTemp(Ity_V128);
+   cin = newTemp(Ity_I32);
+   switch (opc2) {
+      case 0x3D: // vaddecuq
+         assign( vC, getVReg(vRC_addr));
+         DIP("vaddecuq v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
+             vRC_addr);
+         assign(cin, binop( Iop_And32,
+                            unop( Iop_64to32,
+                                  unop( Iop_V128to64, mkexpr(vC) ) ),
+                            mkU32(1) ) );
+         putVReg( vRT_addr,
+                  unop( Iop_32UtoV128,
+                        mkexpr(_get_quad_modulo_or_carry(mkexpr(vA), mkexpr(vB),
+                                                         mkexpr(cin),
+                                                         False) ) ) );
+         return True;
+      case 0x3C: // vaddeuqm
+         assign( vC, getVReg(vRC_addr));
+         DIP("vaddeuqm v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
+             vRC_addr);
+         assign(cin, binop( Iop_And32,
+                            unop( Iop_64to32,
+                                  unop( Iop_V128to64, mkexpr(vC) ) ),
+                            mkU32(1) ) );
+         putVReg( vRT_addr,
+                  mkexpr(_get_quad_modulo_or_carry(mkexpr(vA), mkexpr(vB),
+                                                   mkexpr(cin),
+                                                   True) ) );
+         return True;
+      case 0x3F: // vsubecuq
+         assign( vC, getVReg(vRC_addr));
+         DIP("vsubecuq v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
+             vRC_addr);
+         assign(cin, binop( Iop_And32,
+                            unop( Iop_64to32,
+                                  unop( Iop_V128to64, mkexpr(vC) ) ),
+                            mkU32(1) ) );
+         putVReg( vRT_addr,
+                  unop( Iop_32UtoV128,
+                        mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
+                                                         unop( Iop_NotV128,
+                                                               mkexpr(vB) ),
+                                                         mkexpr(cin),
+                                                         False) ) ) );
+         return True;
+      case 0x3E: // vsubeuqm
+         assign( vC, getVReg(vRC_addr));
+         DIP("vsubeuqm v%d,v%d,v%d,v%d\n", vRT_addr, vRA_addr, vRB_addr,
+             vRC_addr);
+         assign(cin, binop( Iop_And32,
+                            unop( Iop_64to32,
+                                  unop( Iop_V128to64, mkexpr(vC) ) ),
+                            mkU32(1) ) );
+         putVReg( vRT_addr,
+                  mkexpr(_get_quad_modulo_or_carry(mkexpr(vA),
+                                                   unop( Iop_NotV128, mkexpr(vB) ),
+                                                   mkexpr(cin),
+                                                   True) ) );
+         return True;
+      default:
+         vex_printf("dis_av_quad(ppc)(opc2.2)\n");
+         return False;
+   }
+
+   return True;
+}
+
+
+/*
+  AltiVec BCD Arithmetic instructions.
+  These instructions modify CR6 for various conditions in the result,
+  including when an overflow occurs.  We could easily detect all conditions
+  except when an overflow occurs.  But since we can't be 100% accurate
+  in our emulation of CR6, it seems best to just not support it all.
+*/
+static Bool dis_av_bcd ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vRT_addr = ifieldRegDS(theInstr);
+   UChar vRA_addr = ifieldRegA(theInstr);
+   UChar vRB_addr = ifieldRegB(theInstr);
+   UChar ps       = IFIELD( theInstr, 9, 1 );
+   UInt  opc2     = IFIELD( theInstr, 0, 9 );
+
+   IRTemp vA    = newTemp(Ity_V128);
+   IRTemp vB    = newTemp(Ity_V128);
+   IRTemp dst    = newTemp(Ity_V128);
+   assign( vA, getVReg(vRA_addr));
+   assign( vB, getVReg(vRB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_bcd(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x1:  // bcdadd
+     DIP("bcdadd. v%d,v%d,v%d,%u\n", vRT_addr, vRA_addr, vRB_addr, ps);
+     assign( dst, triop( Iop_BCDAdd, mkexpr( vA ),
+                         mkexpr( vB ), mkU8( ps ) ) );
+     putVReg( vRT_addr, mkexpr(dst));
+     return True;
+
+   case 0x41:  // bcdsub
+     DIP("bcdsub. v%d,v%d,v%d,%u\n", vRT_addr, vRA_addr, vRB_addr, ps);
+     assign( dst, triop( Iop_BCDSub, mkexpr( vA ),
+                         mkexpr( vB ), mkU8( ps ) ) );
+     putVReg( vRT_addr, mkexpr(dst));
+     return True;
+
+   default:
+      vex_printf("dis_av_bcd(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Floating Point Arithmetic Instructions
+*/
+static Bool dis_av_fp_arith ( UInt theInstr )
+{
+   /* VA-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UChar vC_addr  = ifieldRegC(theInstr);
+   UInt  opc2=0;
+
+   IRTemp vA = newTemp(Ity_V128);
+   IRTemp vB = newTemp(Ity_V128);
+   IRTemp vC = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+   assign( vC, getVReg(vC_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_fp_arith(ppc)(instr)\n");
+      return False;
+   }
+
+   IRTemp rm = newTemp(Ity_I32);
+   assign(rm, get_IR_roundingmode());
+
+   opc2 = IFIELD( theInstr, 0, 6 );
+   switch (opc2) {
+   case 0x2E: // vmaddfp (Multiply Add FP, AV p177)
+      DIP("vmaddfp v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vC_addr, vB_addr);
+      putVReg( vD_addr,
+               triop(Iop_Add32Fx4, mkU32(Irrm_NEAREST),
+                     mkexpr(vB),
+                     triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
+                           mkexpr(vA), mkexpr(vC))) );
+      return True;
+
+   case 0x2F: { // vnmsubfp (Negative Multiply-Subtract FP, AV p215)
+      DIP("vnmsubfp v%d,v%d,v%d,v%d\n",
+          vD_addr, vA_addr, vC_addr, vB_addr);
+      putVReg( vD_addr,
+               triop(Iop_Sub32Fx4, mkU32(Irrm_NEAREST),
+                     mkexpr(vB),
+                     triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
+                           mkexpr(vA), mkexpr(vC))) );
+      return True;
+   }
+
+   default:
+     break; // Fall through...
+   }
+
+   opc2 = IFIELD( theInstr, 0, 11 );
+   switch (opc2) {
+   case 0x00A: // vaddfp (Add FP, AV p137)
+      DIP("vaddfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, triop(Iop_Add32Fx4,
+                              mkU32(Irrm_NEAREST), mkexpr(vA), mkexpr(vB)) );
+      return True;
+
+  case 0x04A: // vsubfp (Subtract FP, AV p261)
+      DIP("vsubfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, triop(Iop_Sub32Fx4,
+                              mkU32(Irrm_NEAREST), mkexpr(vA), mkexpr(vB)) );
+      return True;
+
+   case 0x40A: // vmaxfp (Maximum FP, AV p178)
+      DIP("vmaxfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Max32Fx4, mkexpr(vA), mkexpr(vB)) );
+      return True;
+
+   case 0x44A: // vminfp (Minimum FP, AV p187)
+      DIP("vminfp v%d,v%d,v%d\n", vD_addr, vA_addr, vB_addr);
+      putVReg( vD_addr, binop(Iop_Min32Fx4, mkexpr(vA), mkexpr(vB)) );
+      return True;
+
+   default:
+      break; // Fall through...
+   }
+
+
+   if (vA_addr != 0) {
+      vex_printf("dis_av_fp_arith(ppc)(vA_addr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x10A: // vrefp (Reciprocal Esimate FP, AV p228)
+      DIP("vrefp v%d,v%d\n", vD_addr, vB_addr);
+      putVReg( vD_addr, unop(Iop_RecipEst32Fx4, mkexpr(vB)) );
+      return True;
+
+   case 0x14A: // vrsqrtefp (Reciprocal Sqrt Estimate FP, AV p237)
+      DIP("vrsqrtefp v%d,v%d\n", vD_addr, vB_addr);
+      putVReg( vD_addr, unop(Iop_RSqrtEst32Fx4, mkexpr(vB)) );
+      return True;
+
+   case 0x18A: // vexptefp (2 Raised to the Exp Est FP, AV p173)
+      DIP("vexptefp v%d,v%d\n", vD_addr, vB_addr);
+      DIP(" => not implemented\n");
+      return False;
+
+   case 0x1CA: // vlogefp (Log2 Estimate FP, AV p175)
+      DIP("vlogefp v%d,v%d\n", vD_addr, vB_addr);
+      DIP(" => not implemented\n");
+      return False;
+
+   default:
+      vex_printf("dis_av_fp_arith(ppc)(opc2=0x%x)\n",opc2);
+      return False;
+   }
+   return True;
+}
+
+/*
+  AltiVec Floating Point Compare Instructions
+*/
+static Bool dis_av_fp_cmp ( UInt theInstr )
+{
+   /* VXR-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar vA_addr  = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UChar flag_rC  = ifieldBIT10(theInstr);
+   UInt  opc2     = IFIELD( theInstr, 0, 10 );
+
+   Bool cmp_bounds = False;
+
+   IRTemp vA = newTemp(Ity_V128);
+   IRTemp vB = newTemp(Ity_V128);
+   IRTemp vD = newTemp(Ity_V128);
+   assign( vA, getVReg(vA_addr));
+   assign( vB, getVReg(vB_addr));
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_fp_cmp(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x0C6: // vcmpeqfp (Compare Equal-to FP, AV p159)
+      DIP("vcmpeqfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpEQ32Fx4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x1C6: // vcmpgefp (Compare Greater-than-or-Equal-to, AV p163)
+      DIP("vcmpgefp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGE32Fx4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x2C6: // vcmpgtfp (Compare Greater-than FP, AV p164)
+      DIP("vcmpgtfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                      vD_addr, vA_addr, vB_addr);
+      assign( vD, binop(Iop_CmpGT32Fx4, mkexpr(vA), mkexpr(vB)) );
+      break;
+
+   case 0x3C6: { // vcmpbfp (Compare Bounds FP, AV p157)
+      IRTemp gt      = newTemp(Ity_V128);
+      IRTemp lt      = newTemp(Ity_V128);
+      IRTemp zeros   = newTemp(Ity_V128);
+      DIP("vcmpbfp%s v%d,v%d,v%d\n", (flag_rC ? ".":""),
+                                     vD_addr, vA_addr, vB_addr);
+      cmp_bounds = True;
+      assign( zeros,   unop(Iop_Dup32x4, mkU32(0)) );
+
+      /* Note: making use of fact that the ppc backend for compare insns
+         return zero'd lanes if either of the corresponding arg lanes is
+         a nan.
+
+         Perhaps better to have an irop Iop_isNan32Fx4, but then we'd
+         need this for the other compares too (vcmpeqfp etc)...
+         Better still, tighten down the spec for compare irops.
+       */
+      assign( gt, unop(Iop_NotV128,
+                       binop(Iop_CmpLE32Fx4, mkexpr(vA), mkexpr(vB))) );
+      assign( lt, unop(Iop_NotV128,
+                       binop(Iop_CmpGE32Fx4, mkexpr(vA),
+                             triop(Iop_Sub32Fx4, mkU32(Irrm_NEAREST),
+                                   mkexpr(zeros),
+                                   mkexpr(vB)))) );
+
+      // finally, just shift gt,lt to correct position
+      assign( vD, binop(Iop_ShlN32x4,
+                        binop(Iop_OrV128,
+                              binop(Iop_AndV128, mkexpr(gt),
+                                    unop(Iop_Dup32x4, mkU32(0x2))),
+                              binop(Iop_AndV128, mkexpr(lt),
+                                    unop(Iop_Dup32x4, mkU32(0x1)))),
+                        mkU8(30)) );
+      break;
+   }
+
+   default:
+      vex_printf("dis_av_fp_cmp(ppc)(opc2)\n");
+      return False;
+   }
+
+   putVReg( vD_addr, mkexpr(vD) );
+
+   if (flag_rC) {
+      set_AV_CR6( mkexpr(vD), !cmp_bounds );
+   }
+   return True;
+}
+
+/*
+  AltiVec Floating Point Convert/Round Instructions
+*/
+static Bool dis_av_fp_convert ( UInt theInstr )
+{
+   /* VX-Form */
+   UChar opc1     = ifieldOPC(theInstr);
+   UChar vD_addr  = ifieldRegDS(theInstr);
+   UChar UIMM_5   = ifieldRegA(theInstr);
+   UChar vB_addr  = ifieldRegB(theInstr);
+   UInt  opc2     = IFIELD( theInstr, 0, 11 );
+
+   IRTemp vB        = newTemp(Ity_V128);
+   IRTemp vScale    = newTemp(Ity_V128);
+   IRTemp vInvScale = newTemp(Ity_V128);
+
+   float scale, inv_scale;
+
+   assign( vB, getVReg(vB_addr));
+
+   /* scale = 2^UIMM, cast to float, reinterpreted as uint */
+   scale = (float)( (unsigned int) 1<<UIMM_5 );
+   assign( vScale, unop(Iop_Dup32x4, mkU32( float_to_bits(scale) )) );
+   inv_scale = 1/scale;
+   assign( vInvScale,
+           unop(Iop_Dup32x4, mkU32( float_to_bits(inv_scale) )) );
+
+   if (opc1 != 0x4) {
+      vex_printf("dis_av_fp_convert(ppc)(instr)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x30A: // vcfux (Convert from Unsigned Fixed-Point W, AV p156)
+      DIP("vcfux v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
+      putVReg( vD_addr, triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
+                              unop(Iop_I32UtoFx4, mkexpr(vB)),
+                              mkexpr(vInvScale)) );
+      return True;
+
+   case 0x34A: // vcfsx (Convert from Signed Fixed-Point W, AV p155)
+      DIP("vcfsx v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
+
+      putVReg( vD_addr, triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
+                              unop(Iop_I32StoFx4, mkexpr(vB)),
+                              mkexpr(vInvScale)) );
+      return True;
+
+   case 0x38A: // vctuxs (Convert to Unsigned Fixed-Point W Saturate, AV p172)
+      DIP("vctuxs v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
+      putVReg( vD_addr,
+               unop(Iop_QFtoI32Ux4_RZ, 
+                    triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
+                          mkexpr(vB), mkexpr(vScale))) );
+      return True;
+
+   case 0x3CA: // vctsxs (Convert to Signed Fixed-Point W Saturate, AV p171)
+      DIP("vctsxs v%d,v%d,%d\n", vD_addr, vB_addr, UIMM_5);
+      putVReg( vD_addr, 
+               unop(Iop_QFtoI32Sx4_RZ, 
+                     triop(Iop_Mul32Fx4, mkU32(Irrm_NEAREST),
+                           mkexpr(vB), mkexpr(vScale))) );
+      return True;
+
+   default:
+     break;    // Fall through...
+   }
+
+   if (UIMM_5 != 0) {
+      vex_printf("dis_av_fp_convert(ppc)(UIMM_5)\n");
+      return False;
+   }
+
+   switch (opc2) {
+   case 0x20A: // vrfin (Round to FP Integer Nearest, AV p231)
+      DIP("vrfin v%d,v%d\n", vD_addr, vB_addr);
+      putVReg( vD_addr, unop(Iop_RoundF32x4_RN, mkexpr(vB)) );
+      break;
+
+   case 0x24A: // vrfiz (Round to FP Integer toward zero, AV p233)
+      DIP("vrfiz v%d,v%d\n", vD_addr, vB_addr);
+      putVReg( vD_addr, unop(Iop_RoundF32x4_RZ, mkexpr(vB)) );
+      break;
+
+   case 0x28A: // vrfip (Round to FP Integer toward +inf, AV p232)
+      DIP("vrfip v%d,v%d\n", vD_addr, vB_addr);
+      putVReg( vD_addr, unop(Iop_RoundF32x4_RP, mkexpr(vB)) );
+      break;
+
+   case 0x2CA: // vrfim (Round to FP Integer toward -inf, AV p230)
+      DIP("vrfim v%d,v%d\n", vD_addr, vB_addr);
+      putVReg( vD_addr, unop(Iop_RoundF32x4_RM, mkexpr(vB)) );
+      break;
+
+   default:
+      vex_printf("dis_av_fp_convert(ppc)(opc2)\n");
+      return False;
+   }
+   return True;
+}
+
+static Bool dis_transactional_memory ( UInt theInstr, UInt nextInstr,
+                                       const VexAbiInfo* vbi,
+                                       /*OUT*/DisResult* dres,
+                                       Bool (*resteerOkFn)(void*,Addr),
+                                       void* callback_opaque )
+{
+   UInt   opc2      = IFIELD( theInstr, 1, 10 );
+
+   switch (opc2) {
+   case 0x28E: {        //tbegin.
+      /* The current implementation is to just fail the tbegin and execute
+       * the failure path.  The failure path is assumed to be functionaly
+       * equivalent to the transactional path with the needed data locking
+       * to ensure correctness.  The tend is just a noop and shouldn't
+       * actually get executed.
+       *   1) set cr0 to 0x2
+       *   2) Initialize TFHAR to CIA+4
+       *   3) Initialize TEXASR
+       *   4) Initialize TFIAR (probably to CIA, ie, the address of tbegin.)
+       *   5) Continue executing at the next instruction.
+       */
+      UInt R = IFIELD( theInstr, 21, 1 );
+
+      ULong tm_reason;
+      UInt failure_code = 0;  /* Forcing failure, will not be due to tabort
+                               * or treclaim.
+                               */
+      UInt persistant = 1;    /* set persistant since we are always failing
+                               * the tbegin.
+                               */
+      UInt nest_overflow = 1; /* Alowed nesting depth overflow, we use this
+                                 as the reason for failing the trasaction */
+      UInt tm_exact   = 1;    /* have exact address for failure */
+
+      DIP("tbegin. %d\n", R);
+
+      /* Set the CR0 field to indicate the tbegin failed.  Then let
+       * the code do the branch to the failure path.
+       *
+       * 000 || 0  Transaction initiation successful,
+       *           unnested (Transaction state of
+       *           Non-transactional prior to tbegin.)
+       * 010 || 0  Transaction initiation successful, nested
+       *           (Transaction state of Transactional
+       *           prior to tbegin.)
+       * 001 || 0  Transaction initiation unsuccessful,
+       *           (Transaction state of Suspended prior
+       *           to tbegin.)
+       */
+      putCR321( 0, mkU8( 0x2 ) );
+
+      tm_reason = generate_TMreason( failure_code, persistant,
+                                     nest_overflow, tm_exact );
+
+      storeTMfailure( guest_CIA_curr_instr, tm_reason,
+                      guest_CIA_curr_instr+4 );
+
+      return True;
+
+      break;
+   }
+
+   case 0x2AE: {        //tend.
+      /* The tend. is just a noop.  Do nothing */
+      UInt A = IFIELD( theInstr, 25, 1 );
+
+      DIP("tend. %d\n", A);
+      break;
+   }
+
+   case 0x2EE: {        //tsr.
+      /* The tsr. is just a noop.  Do nothing */
+      UInt L = IFIELD( theInstr, 21, 1 );
+
+      DIP("tsr. %d\n", L);
+      break;
+   }
+
+   case 0x2CE: {        //tcheck.
+      /* The tcheck. is just a noop.  Do nothing */
+      UInt BF = IFIELD( theInstr, 25, 1 );
+
+      DIP("tcheck. %d\n", BF);
+      break;
+   }
+
+   case 0x30E: {        //tbortwc.
+      /* The tabortwc. is just a noop.  Do nothing */
+      UInt TO = IFIELD( theInstr, 25, 1 );
+      UInt RA = IFIELD( theInstr, 16, 5 );
+      UInt RB = IFIELD( theInstr, 11, 5 );
+
+      DIP("tabortwc. %d,%d,%d\n", TO, RA, RB);
+      break;
+   }
+
+   case 0x32E: {        //tbortdc.
+      /* The tabortdc. is just a noop.  Do nothing */
+      UInt TO = IFIELD( theInstr, 25, 1 );
+      UInt RA = IFIELD( theInstr, 16, 5 );
+      UInt RB = IFIELD( theInstr, 11, 5 );
+
+      DIP("tabortdc. %d,%d,%d\n", TO, RA, RB);
+      break;
+   }
+
+   case 0x34E: {        //tbortwci.
+      /* The tabortwci. is just a noop.  Do nothing */
+      UInt TO = IFIELD( theInstr, 25, 1 );
+      UInt RA = IFIELD( theInstr, 16, 5 );
+      UInt SI = IFIELD( theInstr, 11, 5 );
+
+      DIP("tabortwci. %d,%d,%d\n", TO, RA, SI);
+      break;
+   }
+
+   case 0x36E: {        //tbortdci.
+      /* The tabortdci. is just a noop.  Do nothing */
+      UInt TO = IFIELD( theInstr, 25, 1 );
+      UInt RA = IFIELD( theInstr, 16, 5 );
+      UInt SI = IFIELD( theInstr, 11, 5 );
+
+      DIP("tabortdci. %d,%d,%d\n", TO, RA, SI);
+      break;
+   }
+
+   case 0x38E: {        //tbort.
+      /* The tabort. is just a noop.  Do nothing */
+      UInt RA = IFIELD( theInstr, 16, 5 );
+
+      DIP("tabort. %d\n", RA);
+      break;
+   }
+
+   case 0x3AE: {        //treclaim.
+      /* The treclaim. is just a noop.  Do nothing */
+      UInt RA = IFIELD( theInstr, 16, 5 );
+
+      DIP("treclaim. %d\n", RA);
+      break;
+   }
+
+   case 0x3EE: {        //trechkpt.
+      /* The trechkpt. is just a noop.  Do nothing */
+      DIP("trechkpt.\n");
+      break;
+   }
+
+   default:
+      vex_printf("dis_transactional_memory(ppc): unrecognized instruction\n");
+      return False;
+   }
+
+   return True;
+}
+
+
+/* The 0x3C primary opcode (VSX category) uses several different forms of
+ * extended opcodes:
+ *   o XX2-form:
+ *      - [10:2] (IBM notation [21:29])
+ *   o XX3-form variants:
+ *       - variant 1: [10:3] (IBM notation [21:28])
+ *       - variant 2: [9:3] (IBM notation [22:28])
+ *       - variant 3: [7:3] (IBM notation [24:28])
+ *   o XX-4 form:
+ *      - [10:6] (IBM notation [21:25])
+ *
+ * The XX2-form needs bit 0 masked from the standard extended opcode
+ * as returned by ifieldOPClo10; the XX3-form needs bits 0 and 1 masked;
+ * and the XX4-form needs bits 0, 1, and 2 masked.  Additionally, the
+ * XX4 and XX3 (variants 2 and 3) forms need certain bits masked on the
+ * front end since their encoding does not begin at bit 21 like the standard
+ * format.
+ *
+ * The get_VSX60_opc2() function uses the vsx_insn array below to obtain the
+ * secondary opcode for such VSX instructions.
+ *
+*/
+
+
+struct vsx_insn {
+   UInt opcode;
+   const HChar * name;
+};
+
+//  ATTENTION:  Keep this array sorted on the opcocde!!!
+static struct vsx_insn vsx_all[] = {
+      { 0x0, "xsaddsp" },
+      { 0x4, "xsmaddasp" },
+      { 0x8, "xxsldwi" },
+      { 0x14, "xsrsqrtesp" },
+      { 0x16, "xssqrtsp" },
+      { 0x18, "xxsel" },
+      { 0x20, "xssubsp" },
+      { 0x24, "xsmaddmsp" },
+      { 0x28, "xxpermdi" },
+      { 0x34, "xsresp" },
+      { 0x40, "xsmulsp" },
+      { 0x44, "xsmsubasp" },
+      { 0x48, "xxmrghw" },
+      { 0x60, "xsdivsp" },
+      { 0x64, "xsmsubmsp" },
+      { 0x80, "xsadddp" },
+      { 0x84, "xsmaddadp" },
+      { 0x8c, "xscmpudp" },
+      { 0x90, "xscvdpuxws" },
+      { 0x92, "xsrdpi" },
+      { 0x94, "xsrsqrtedp" },
+      { 0x96, "xssqrtdp" },
+      { 0xa0, "xssubdp" },
+      { 0xa4, "xsmaddmdp" },
+      { 0xac, "xscmpodp" },
+      { 0xb0, "xscvdpsxws" },
+      { 0xb2, "xsrdpiz" },
+      { 0xb4, "xsredp" },
+      { 0xc0, "xsmuldp" },
+      { 0xc4, "xsmsubadp" },
+      { 0xc8, "xxmrglw" },
+      { 0xd2, "xsrdpip" },
+      { 0xd4, "xstsqrtdp" },
+      { 0xd6, "xsrdpic" },
+      { 0xe0, "xsdivdp" },
+      { 0xe4, "xsmsubmdp" },
+      { 0xf2, "xsrdpim" },
+      { 0xf4, "xstdivdp" },
+      { 0x100, "xvaddsp" },
+      { 0x104, "xvmaddasp" },
+      { 0x10c, "xvcmpeqsp" },
+      { 0x110, "xvcvspuxws" },
+      { 0x112, "xvrspi" },
+      { 0x114, "xvrsqrtesp" },
+      { 0x116, "xvsqrtsp" },
+      { 0x120, "xvsubsp" },
+      { 0x124, "xvmaddmsp" },
+      { 0x12c, "xvcmpgtsp" },
+      { 0x130, "xvcvspsxws" },
+      { 0x132, "xvrspiz" },
+      { 0x134, "xvresp" },
+      { 0x140, "xvmulsp" },
+      { 0x144, "xvmsubasp" },
+      { 0x148, "xxspltw" },
+      { 0x14c, "xvcmpgesp" },
+      { 0x150, "xvcvuxwsp" },
+      { 0x152, "xvrspip" },
+      { 0x154, "xvtsqrtsp" },
+      { 0x156, "xvrspic" },
+      { 0x160, "xvdivsp" },
+      { 0x164, "xvmsubmsp" },
+      { 0x170, "xvcvsxwsp" },
+      { 0x172, "xvrspim" },
+      { 0x174, "xvtdivsp" },
+      { 0x180, "xvadddp" },
+      { 0x184, "xvmaddadp" },
+      { 0x18c, "xvcmpeqdp" },
+      { 0x190, "xvcvdpuxws" },
+      { 0x192, "xvrdpi" },
+      { 0x194, "xvrsqrtedp" },
+      { 0x196, "xvsqrtdp" },
+      { 0x1a0, "xvsubdp" },
+      { 0x1a4, "xvmaddmdp" },
+      { 0x1ac, "xvcmpgtdp" },
+      { 0x1b0, "xvcvdpsxws" },
+      { 0x1b2, "xvrdpiz" },
+      { 0x1b4, "xvredp" },
+      { 0x1c0, "xvmuldp" },
+      { 0x1c4, "xvmsubadp" },
+      { 0x1cc, "xvcmpgedp" },
+      { 0x1d0, "xvcvuxwdp" },
+      { 0x1d2, "xvrdpip" },
+      { 0x1d4, "xvtsqrtdp" },
+      { 0x1d6, "xvrdpic" },
+      { 0x1e0, "xvdivdp" },
+      { 0x1e4, "xvmsubmdp" },
+      { 0x1f0, "xvcvsxwdp" },
+      { 0x1f2, "xvrdpim" },
+      { 0x1f4, "xvtdivdp" },
+      { 0x204, "xsnmaddasp" },
+      { 0x208, "xxland" },
+      { 0x212, "xscvdpsp" },
+      { 0x216, "xscvdpspn" },
+      { 0x224, "xsnmaddmsp" },
+      { 0x228, "xxlandc" },
+      { 0x232, "xxrsp" },
+      { 0x244, "xsnmsubasp" },
+      { 0x248, "xxlor" },
+      { 0x250, "xscvuxdsp" },
+      { 0x264, "xsnmsubmsp" },
+      { 0x268, "xxlxor" },
+      { 0x270, "xscvsxdsp" },
+      { 0x280, "xsmaxdp" },
+      { 0x284, "xsnmaddadp" },
+      { 0x288, "xxlnor" },
+      { 0x290, "xscvdpuxds" },
+      { 0x292, "xscvspdp" },
+      { 0x296, "xscvspdpn" },
+      { 0x2a0, "xsmindp" },
+      { 0x2a4, "xsnmaddmdp" },
+      { 0x2a8, "xxlorc" },
+      { 0x2b0, "xscvdpsxds" },
+      { 0x2b2, "xsabsdp" },
+      { 0x2c0, "xscpsgndp" },
+      { 0x2c4, "xsnmsubadp" },
+      { 0x2c8, "xxlnand" },
+      { 0x2d0, "xscvuxddp" },
+      { 0x2d2, "xsnabsdp" },
+      { 0x2e4, "xsnmsubmdp" },
+      { 0x2e8, "xxleqv" },
+      { 0x2f0, "xscvsxddp" },
+      { 0x2f2, "xsnegdp" },
+      { 0x300, "xvmaxsp" },
+      { 0x304, "xvnmaddasp" },
+      { 0x30c, "xvcmpeqsp." },
+      { 0x310, "xvcvspuxds" },
+      { 0x312, "xvcvdpsp" },
+      { 0x320, "xvminsp" },
+      { 0x324, "xvnmaddmsp" },
+      { 0x32c, "xvcmpgtsp." },
+      { 0x330, "xvcvspsxds" },
+      { 0x332, "xvabssp" },
+      { 0x340, "xvcpsgnsp" },
+      { 0x344, "xvnmsubasp" },
+      { 0x34c, "xvcmpgesp." },
+      { 0x350, "xvcvuxdsp" },
+      { 0x352, "xvnabssp" },
+      { 0x364, "xvnmsubmsp" },
+      { 0x370, "xvcvsxdsp" },
+      { 0x372, "xvnegsp" },
+      { 0x380, "xvmaxdp" },
+      { 0x384, "xvnmaddadp" },
+      { 0x38c, "xvcmpeqdp." },
+      { 0x390, "xvcvdpuxds" },
+      { 0x392, "xvcvspdp" },
+      { 0x3a0, "xvmindp" },
+      { 0x3a4, "xvnmaddmdp" },
+      { 0x3ac, "xvcmpgtdp." },
+      { 0x3b0, "xvcvdpsxds" },
+      { 0x3b2, "xvabsdp" },
+      { 0x3c0, "xvcpsgndp" },
+      { 0x3c4, "xvnmsubadp" },
+      { 0x3cc, "xvcmpgedp." },
+      { 0x3d0, "xvcvuxddp" },
+      { 0x3d2, "xvnabsdp" },
+      { 0x3e4, "xvnmsubmdp" },
+      { 0x3f0, "xvcvsxddp" },
+      { 0x3f2, "xvnegdp" }
+};
+#define VSX_ALL_LEN (sizeof vsx_all / sizeof *vsx_all)
+
+
+// ATTENTION: This search function assumes vsx_all array is sorted.
+static Int findVSXextOpCode(UInt opcode)
+{
+   Int low, mid, high;
+   low = 0;
+   high = VSX_ALL_LEN - 1;
+   while (low <= high) {
+      mid = (low + high)/2;
+      if (opcode < vsx_all[mid].opcode)
+         high = mid - 1;
+      else if (opcode > vsx_all[mid].opcode)
+         low = mid + 1;
+      else
+         return mid;
+   }
+   return -1;
+}
+
+
+/* The full 10-bit extended opcode retrieved via ifieldOPClo10 is
+ * passed, and we then try to match it up with one of the VSX forms
+ * below.
+ */
+static UInt get_VSX60_opc2(UInt opc2_full)
+{
+#define XX2_MASK 0x000003FE
+#define XX3_1_MASK 0x000003FC
+#define XX3_2_MASK 0x000001FC
+#define XX3_3_MASK 0x0000007C
+#define XX4_MASK 0x00000018
+   Int ret;
+   UInt vsxExtOpcode = 0;
+
+   if (( ret = findVSXextOpCode(opc2_full & XX2_MASK)) >= 0)
+      vsxExtOpcode = vsx_all[ret].opcode;
+   else if (( ret = findVSXextOpCode(opc2_full & XX3_1_MASK)) >= 0)
+      vsxExtOpcode = vsx_all[ret].opcode;
+   else if (( ret = findVSXextOpCode(opc2_full & XX3_2_MASK)) >= 0)
+      vsxExtOpcode = vsx_all[ret].opcode;
+   else if (( ret = findVSXextOpCode(opc2_full & XX3_3_MASK)) >= 0)
+      vsxExtOpcode = vsx_all[ret].opcode;
+   else if (( ret = findVSXextOpCode(opc2_full & XX4_MASK)) >= 0)
+      vsxExtOpcode = vsx_all[ret].opcode;
+
+   return vsxExtOpcode;
+}
+
+/*------------------------------------------------------------*/
+/*--- Disassemble a single instruction                     ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+static   
+DisResult disInstr_PPC_WRK ( 
+             Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+             Bool         resteerCisOk,
+             void*        callback_opaque,
+             Long         delta64,
+             const VexArchInfo* archinfo,
+             const VexAbiInfo*  abiinfo,
+             Bool         sigill_diag
+          )
+{
+   UChar     opc1;
+   UInt      opc2;
+   DisResult dres;
+   UInt      theInstr;
+   IRType    ty = mode64 ? Ity_I64 : Ity_I32;
+   Bool      allow_F  = False;
+   Bool      allow_V  = False;
+   Bool      allow_FX = False;
+   Bool      allow_GX = False;
+   Bool      allow_VX = False;  // Equates to "supports Power ISA 2.06
+   Bool      allow_DFP = False;
+   Bool      allow_isa_2_07 = False;
+   UInt      hwcaps = archinfo->hwcaps;
+   Long      delta;
+
+   /* What insn variants are we supporting today? */
+   if (mode64) {
+      allow_F  = True;
+      allow_V  = (0 != (hwcaps & VEX_HWCAPS_PPC64_V));
+      allow_FX = (0 != (hwcaps & VEX_HWCAPS_PPC64_FX));
+      allow_GX = (0 != (hwcaps & VEX_HWCAPS_PPC64_GX));
+      allow_VX = (0 != (hwcaps & VEX_HWCAPS_PPC64_VX));
+      allow_DFP = (0 != (hwcaps & VEX_HWCAPS_PPC64_DFP));
+      allow_isa_2_07 = (0 != (hwcaps & VEX_HWCAPS_PPC64_ISA2_07));
+   } else {
+      allow_F  = (0 != (hwcaps & VEX_HWCAPS_PPC32_F));
+      allow_V  = (0 != (hwcaps & VEX_HWCAPS_PPC32_V));
+      allow_FX = (0 != (hwcaps & VEX_HWCAPS_PPC32_FX));
+      allow_GX = (0 != (hwcaps & VEX_HWCAPS_PPC32_GX));
+      allow_VX = (0 != (hwcaps & VEX_HWCAPS_PPC32_VX));
+      allow_DFP = (0 != (hwcaps & VEX_HWCAPS_PPC32_DFP));
+      allow_isa_2_07 = (0 != (hwcaps & VEX_HWCAPS_PPC32_ISA2_07));
+   }
+
+   /* The running delta */
+   delta = (Long)mkSzAddr(ty, (ULong)delta64);
+
+   /* Set result defaults. */
+   dres.whatNext    = Dis_Continue;
+   dres.len         = 0;
+   dres.continueAt  = 0;
+   dres.jk_StopHere = Ijk_INVALID;
+
+   /* At least this is simple on PPC32: insns are all 4 bytes long, and
+      4-aligned.  So just fish the whole thing out of memory right now
+      and have done. */
+   theInstr = getUIntPPCendianly( &guest_code[delta] );
+
+   if (0) vex_printf("insn: 0x%x\n", theInstr);
+
+   DIP("\t0x%llx:  ", (ULong)guest_CIA_curr_instr);
+
+   /* Spot "Special" instructions (see comment at top of file). */
+   {
+      const UChar* code = guest_code + delta;
+      /* Spot the 16-byte preamble: 
+         32-bit mode:
+            5400183E  rlwinm 0,0,3,0,31
+            5400683E  rlwinm 0,0,13,0,31
+            5400E83E  rlwinm 0,0,29,0,31
+            5400983E  rlwinm 0,0,19,0,31
+         64-bit mode:
+            78001800  rotldi 0,0,3
+            78006800  rotldi 0,0,13
+            7800E802  rotldi 0,0,61
+            78009802  rotldi 0,0,51
+      */
+      UInt word1 = mode64 ? 0x78001800 : 0x5400183E;
+      UInt word2 = mode64 ? 0x78006800 : 0x5400683E;
+      UInt word3 = mode64 ? 0x7800E802 : 0x5400E83E;
+      UInt word4 = mode64 ? 0x78009802 : 0x5400983E;
+      Bool is_special_preamble = False;
+      if (getUIntPPCendianly(code+ 0) == word1 &&
+          getUIntPPCendianly(code+ 4) == word2 &&
+          getUIntPPCendianly(code+ 8) == word3 &&
+          getUIntPPCendianly(code+12) == word4) {
+         is_special_preamble = True;
+      } else if (! mode64 &&
+                 getUIntPPCendianly(code+ 0) == 0x54001800 &&
+                 getUIntPPCendianly(code+ 4) == 0x54006800 &&
+                 getUIntPPCendianly(code+ 8) == 0x5400E800 &&
+                 getUIntPPCendianly(code+12) == 0x54009800) {
+         static Bool reported = False;
+         if (!reported) {
+            vex_printf("disInstr(ppc): old ppc32 instruction magic detected. Code might clobber r0.\n");
+            vex_printf("disInstr(ppc): source needs to be recompiled against latest valgrind.h.\n");
+            reported = True;
+         }
+         is_special_preamble = True;
+      }
+      if (is_special_preamble) {
+         /* Got a "Special" instruction preamble.  Which one is it? */
+         if (getUIntPPCendianly(code+16) == 0x7C210B78 /* or 1,1,1 */) {
+            /* %R3 = client_request ( %R4 ) */
+            DIP("r3 = client_request ( %%r4 )\n");
+            delta += 20;
+            putGST( PPC_GST_CIA, mkSzImm( ty, guest_CIA_bbstart + delta ));
+            dres.jk_StopHere = Ijk_ClientReq;
+            dres.whatNext    = Dis_StopHere;
+            goto decode_success;
+         }
+         else
+         if (getUIntPPCendianly(code+16) == 0x7C421378 /* or 2,2,2 */) {
+            /* %R3 = guest_NRADDR */
+            DIP("r3 = guest_NRADDR\n");
+            delta += 20;
+            dres.len = 20;
+            putIReg(3, IRExpr_Get( OFFB_NRADDR, ty ));
+            goto decode_success;
+         }
+         else
+         if (getUIntPPCendianly(code+16) == 0x7C631B78 /* or 3,3,3 */) {
+            delta += 20;
+            if (host_endness == VexEndnessLE) {
+                /*  branch-and-link-to-noredir %R12 */
+                DIP("branch-and-link-to-noredir r12\n");
+                putGST( PPC_GST_LR,
+                        mkSzImm(ty, guest_CIA_bbstart + (Long)delta) );
+                putGST( PPC_GST_CIA, getIReg(12));
+            } else {
+                /*  branch-and-link-to-noredir %R11 */
+                DIP("branch-and-link-to-noredir r11\n");
+                putGST( PPC_GST_LR,
+                        mkSzImm(ty, guest_CIA_bbstart + (Long)delta) );
+                putGST( PPC_GST_CIA, getIReg(11));
+            }
+            dres.jk_StopHere = Ijk_NoRedir;
+            dres.whatNext    = Dis_StopHere;
+            goto decode_success;
+         }
+         else
+         if (getUIntPPCendianly(code+16) == 0x7C842378 /* or 4,4,4 */) {
+            /* %R3 = guest_NRADDR_GPR2 */
+            DIP("r3 = guest_NRADDR_GPR2\n");
+            delta += 20;
+            dres.len = 20;
+            putIReg(3, IRExpr_Get( OFFB_NRADDR_GPR2, ty ));
+            goto decode_success;
+         }
+         else
+         if (getUIntPPCendianly(code+16) == 0x7CA52B78 /* or 5,5,5 */) {
+            DIP("IR injection\n");
+            if (host_endness == VexEndnessBE)
+               vex_inject_ir(irsb, Iend_BE);
+            else
+               vex_inject_ir(irsb, Iend_LE);
+
+            delta += 20;
+            dres.len = 20;
+
+            // Invalidate the current insn. The reason is that the IRop we're
+            // injecting here can change. In which case the translation has to
+            // be redone. For ease of handling, we simply invalidate all the
+            // time.
+
+            stmt(IRStmt_Put(OFFB_CMSTART, mkSzImm(ty, guest_CIA_curr_instr)));
+            stmt(IRStmt_Put(OFFB_CMLEN,   mkSzImm(ty, 20)));
+   
+            putGST( PPC_GST_CIA, mkSzImm( ty, guest_CIA_bbstart + delta ));
+            dres.whatNext    = Dis_StopHere;
+            dres.jk_StopHere = Ijk_InvalICache;
+            goto decode_success;
+         }
+         /* We don't know what it is.  Set opc1/opc2 so decode_failure
+            can print the insn following the Special-insn preamble. */
+         theInstr = getUIntPPCendianly(code+16);
+         opc1     = ifieldOPC(theInstr);
+         opc2     = ifieldOPClo10(theInstr);
+         goto decode_failure;
+         /*NOTREACHED*/
+      }
+   }
+
+   opc1 = ifieldOPC(theInstr);
+   opc2 = ifieldOPClo10(theInstr);
+
+   // Note: all 'reserved' bits must be cleared, else invalid
+   switch (opc1) {
+
+   /* Integer Arithmetic Instructions */
+   case 0x0C: case 0x0D: case 0x0E:  // addic, addic., addi
+   case 0x0F: case 0x07: case 0x08:  // addis, mulli,  subfic
+      if (dis_int_arith( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* Integer Compare Instructions */
+   case 0x0B: case 0x0A: // cmpi, cmpli
+      if (dis_int_cmp( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* Integer Logical Instructions */
+   case 0x1C: case 0x1D: case 0x18: // andi., andis., ori
+   case 0x19: case 0x1A: case 0x1B: // oris,  xori,   xoris
+      if (dis_int_logic( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* Integer Rotate Instructions */
+   case 0x14: case 0x15:  case 0x17: // rlwimi, rlwinm, rlwnm
+      if (dis_int_rot( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* 64bit Integer Rotate Instructions */
+   case 0x1E: // rldcl, rldcr, rldic, rldicl, rldicr, rldimi
+      if (!mode64) goto decode_failure;
+      if (dis_int_rot( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* Integer Load Instructions */
+   case 0x22: case 0x23: case 0x2A: // lbz,  lbzu, lha
+   case 0x2B: case 0x28: case 0x29: // lhau, lhz,  lhzu
+   case 0x20: case 0x21:            // lwz,  lwzu
+      if (dis_int_load( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* Integer Store Instructions */
+   case 0x26: case 0x27: case 0x2C: // stb,  stbu, sth
+   case 0x2D: case 0x24: case 0x25: // sthu, stw,  stwu
+      if (dis_int_store( theInstr, abiinfo )) goto decode_success;
+      goto decode_failure;
+
+   /* Integer Load and Store Multiple Instructions */
+   case 0x2E: case 0x2F: // lmw, stmw
+      if (dis_int_ldst_mult( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* Branch Instructions */
+   case 0x12: case 0x10: // b, bc
+      if (dis_branch(theInstr, abiinfo, &dres, 
+                               resteerOkFn, callback_opaque)) 
+         goto decode_success;
+      goto decode_failure;
+
+   /* System Linkage Instructions */
+   case 0x11: // sc
+      if (dis_syslink(theInstr, abiinfo, &dres)) goto decode_success;
+      goto decode_failure;
+
+   /* Trap Instructions */
+   case 0x02:    // tdi
+      if (!mode64) goto decode_failure;
+      if (dis_trapi(theInstr, &dres)) goto decode_success;
+      goto decode_failure;
+
+   case 0x03:   // twi
+      if (dis_trapi(theInstr, &dres)) goto decode_success;
+      goto decode_failure;
+
+   /* Floating Point Load Instructions */
+   case 0x30: case 0x31: case 0x32: // lfs, lfsu, lfd
+   case 0x33:                       // lfdu
+      if (!allow_F) goto decode_noF;
+      if (dis_fp_load( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* Floating Point Store Instructions */
+   case 0x34: case 0x35: case 0x36: // stfsx, stfsux, stfdx
+   case 0x37:                       // stfdux
+      if (!allow_F) goto decode_noF;
+      if (dis_fp_store( theInstr )) goto decode_success;
+      goto decode_failure;
+
+      /* Floating Point Load Double Pair Instructions */
+   case 0x39: case 0x3D:
+      if (!allow_F) goto decode_noF;
+      if (dis_fp_pair( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* 128-bit Integer Load */
+   case 0x38:  // lq
+      if (dis_int_load( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   /* 64bit Integer Loads */
+   case 0x3A:  // ld, ldu, lwa
+      if (!mode64) goto decode_failure;
+      if (dis_int_load( theInstr )) goto decode_success;
+      goto decode_failure;
+
+   case 0x3B:
+      if (!allow_F) goto decode_noF;
+      opc2 = ifieldOPClo10(theInstr);
+
+      switch (opc2) {
+         case 0x2:    // dadd - DFP Add
+         case 0x202:  // dsub - DFP Subtract
+         case 0x22:   // dmul - DFP Mult
+         case 0x222:  // ddiv - DFP Divide
+            if (!allow_DFP) goto decode_noDFP;
+            if (dis_dfp_arith( theInstr ))
+               goto decode_success;
+         case 0x82:   // dcmpo, DFP comparison ordered instruction
+         case 0x282:  // dcmpu, DFP comparison unordered instruction
+            if (!allow_DFP) goto decode_noDFP;
+            if (dis_dfp_compare( theInstr ) )
+               goto decode_success;
+            goto decode_failure;
+         case 0x102: // dctdp  - DFP convert to DFP long
+         case 0x302: // drsp   - DFP round to dfp short
+         case 0x122: // dctfix - DFP convert to fixed
+            if (!allow_DFP) goto decode_noDFP;
+            if (dis_dfp_fmt_conv( theInstr ))
+               goto decode_success;
+            goto decode_failure;
+         case 0x322: // POWER 7 inst, dcffix - DFP convert from fixed
+            if (!allow_VX)
+               goto decode_failure;
+            if (dis_dfp_fmt_conv( theInstr ))
+               goto decode_success;
+            goto decode_failure;
+         case 0x2A2: // dtstsf - DFP number of significant digits
+            if (!allow_DFP) goto decode_noDFP;
+            if (dis_dfp_significant_digits(theInstr))
+               goto decode_success;
+            goto decode_failure;
+         case 0x142: // ddedpd   DFP Decode DPD to BCD
+         case 0x342: // denbcd   DFP Encode BCD to DPD
+            if (!allow_DFP) goto decode_noDFP;
+            if (dis_dfp_bcd(theInstr))
+               goto decode_success;
+            goto decode_failure;
+         case 0x162:  // dxex - Extract exponent 
+         case 0x362:  // diex - Insert exponent
+            if (!allow_DFP) goto decode_noDFP;
+            if (dis_dfp_extract_insert( theInstr ) )
+               goto decode_success;
+            goto decode_failure;
+         case 0x3CE: // fcfidus (implemented as native insn)
+            if (!allow_VX)
+               goto decode_noVX;
+            if (dis_fp_round( theInstr ))
+               goto decode_success;
+            goto decode_failure;
+         case 0x34E: // fcfids
+            if (dis_fp_round( theInstr ))
+               goto decode_success;
+            goto decode_failure;
+      }
+
+      opc2 = ifieldOPClo9( theInstr );
+      switch (opc2) {
+      case 0x42: // dscli, DFP shift left
+      case 0x62: // dscri, DFP shift right
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_shift( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+      case 0xc2:  // dtstdc, DFP test data class
+      case 0xe2:  // dtstdg, DFP test data group
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_class_test( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+      }
+
+      opc2 = ifieldOPClo8( theInstr );
+      switch (opc2) {
+      case 0x3:   // dqua  - DFP Quantize
+      case 0x23:  // drrnd - DFP Reround
+      case 0x43:  // dquai - DFP Quantize immediate
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_quantize_sig_rrnd( theInstr ) )
+            goto decode_success;
+         goto decode_failure;
+      case 0xA2: // dtstex - DFP Test exponent
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_exponent_test( theInstr ) )
+            goto decode_success;
+         goto decode_failure;
+      case 0x63: // drintx - Round to an integer value
+      case 0xE3: // drintn - Round to an integer value
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_round( theInstr ) ) {
+            goto decode_success;
+         }
+         goto decode_failure;
+      default:
+         break;  /* fall through to next opc2 check */
+      }
+
+      opc2 = IFIELD(theInstr, 1, 5);
+      switch (opc2) {
+      /* Floating Point Arith Instructions */
+      case 0x12: case 0x14: case 0x15: // fdivs,  fsubs, fadds
+      case 0x19:                       // fmuls
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+      case 0x16:                       // fsqrts
+         if (!allow_FX) goto decode_noFX;
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+      case 0x18:                       // fres
+         if (!allow_GX) goto decode_noGX;
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      /* Floating Point Mult-Add Instructions */
+      case 0x1C: case 0x1D: case 0x1E: // fmsubs, fmadds, fnmsubs
+      case 0x1F:                       // fnmadds
+         if (dis_fp_multadd(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      case 0x1A:                       // frsqrtes
+         if (!allow_GX) goto decode_noGX;
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+   case 0x3C: // VSX instructions (except load/store)
+   {
+      // All of these VSX instructions use some VMX facilities, so
+      // if allow_V is not set, we'll skip trying to decode.
+      if (!allow_V) goto decode_noVX;
+
+      UInt vsxOpc2 = get_VSX60_opc2(opc2);
+      /* The vsxOpc2 returned is the "normalized" value, representing the
+       * instructions secondary opcode as taken from the standard secondary
+       * opcode field [21:30] (IBM notatition), even if the actual field
+       * is non-standard.  These normalized values are given in the opcode
+       * appendices of the ISA 2.06 document.
+       */
+
+      switch (vsxOpc2) {
+         case 0x8: case 0x28: case 0x48: case 0xc8: // xxsldwi, xxpermdi, xxmrghw, xxmrglw
+         case 0x018: case 0x148: // xxsel, xxspltw
+            if (dis_vx_permute_misc(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+         case 0x268: case 0x248: case 0x288: // xxlxor, xxlor, xxlnor,
+         case 0x208: case 0x228: case 0x2A8: // xxland, xxlandc, xxlorc
+         case 0x2C8: case 0x2E8: // xxlnand, xxleqv
+            if (dis_vx_logic(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+         case 0x2B2: case 0x2C0: // xsabsdp, xscpsgndp
+         case 0x2D2: case 0x2F2: // xsnabsdp, xsnegdp
+         case 0x280: case 0x2A0: // xsmaxdp, xsmindp
+         case 0x0F2: case 0x0D2: // xsrdpim, xsrdpip
+         case 0x034: case 0x014: // xsresp, xsrsqrtesp
+         case 0x0B4: case 0x094: // xsredp, xsrsqrtedp
+         case 0x0D6: case 0x0B2: // xsrdpic, xsrdpiz
+         case 0x092: case 0x232: // xsrdpi, xsrsp
+            if (dis_vxs_misc(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+         case 0x08C: case 0x0AC: // xscmpudp, xscmpodp
+            if (dis_vx_cmp(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+         case 0x0:   case 0x020: // xsaddsp, xssubsp
+         case 0x080:             // xsadddp
+         case 0x060: case 0x0E0: // xsdivsp, xsdivdp
+         case 0x004: case 0x024: // xsmaddasp, xsmaddmsp
+         case 0x084: case 0x0A4: // xsmaddadp, xsmaddmdp
+         case 0x044: case 0x064: // xsmsubasp, xsmsubmsp
+         case 0x0C4: case 0x0E4: // xsmsubadp, xsmsubmdp
+         case 0x204: case 0x224: // xsnmaddasp, xsnmaddmsp
+         case 0x284: case 0x2A4: // xsnmaddadp, xsnmaddmdp
+         case 0x244: case 0x264: // xsnmsubasp, xsnmsubmsp
+         case 0x2C4: case 0x2E4: // xsnmsubadp, xsnmsubmdp
+         case 0x040: case 0x0C0: // xsmulsp, xsmuldp
+         case 0x0A0:             // xssubdp
+         case 0x016: case 0x096: // xssqrtsp,xssqrtdp
+         case 0x0F4: case 0x0D4: // xstdivdp, xstsqrtdp
+            if (dis_vxs_arith(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+         case 0x180: // xvadddp
+         case 0x1E0: // xvdivdp
+         case 0x1C0: // xvmuldp
+         case 0x1A0: // xvsubdp
+         case 0x184: case 0x1A4: // xvmaddadp, xvmaddmdp
+         case 0x1C4: case 0x1E4: // xvmsubadp, xvmsubmdp
+         case 0x384: case 0x3A4: // xvnmaddadp, xvnmaddmdp
+         case 0x3C4: case 0x3E4: // xvnmsubadp, xvnmsubmdp
+         case 0x1D4: case 0x1F4: // xvtsqrtdp, xvtdivdp
+         case 0x196: // xvsqrtdp
+            if (dis_vxv_dp_arith(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+         case 0x100: // xvaddsp
+         case 0x160: // xvdivsp
+         case 0x140: // xvmulsp
+         case 0x120: // xvsubsp
+         case 0x104: case 0x124: // xvmaddasp, xvmaddmsp
+         case 0x144: case 0x164: // xvmsubasp, xvmsubmsp
+         case 0x304: case 0x324: // xvnmaddasp, xvnmaddmsp
+         case 0x344: case 0x364: // xvnmsubasp, xvnmsubmsp
+         case 0x154: case 0x174: // xvtsqrtsp, xvtdivsp
+         case 0x116: // xvsqrtsp
+            if (dis_vxv_sp_arith(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+
+         case 0x250:             // xscvuxdsp
+         case 0x2D0: case 0x3d0: // xscvuxddp, xvcvuxddp
+         case 0x350: case 0x1d0: // xvcvuxdsp, xvcvuxwdp
+         case 0x090: // xscvdpuxws
+            // The above VSX conversion instructions employ some ISA 2.06
+            // floating point conversion instructions under the covers,
+            // so if allow_VX (which means "supports ISA 2.06") is not set,
+            // we'll skip the decode.
+            if (!allow_VX) goto decode_noVX;
+            if (dis_vx_conv(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+
+         case 0x2B0: // xscvdpsxds
+         case 0x270: case 0x2F0: // xscvsxdsp, xscvsxddp
+         case 0x1b0: case 0x130: // xvcvdpsxws, xvcvspsxws
+         case 0x0b0: case 0x290: // xscvdpsxws, xscvdpuxds
+         case 0x212: case 0x216: // xscvdpsp, xscvdpspn
+         case 0x292: case 0x296: // xscvspdp, xscvspdpn
+         case 0x312: // xvcvdpsp
+         case 0x390: case 0x190: // xvcvdpuxds, xvcvdpuxws
+         case 0x3B0: case 0x310: // xvcvdpsxds, xvcvspuxds
+         case 0x392: case 0x330: // xvcvspdp, xvcvspsxds
+         case 0x110: case 0x3f0: // xvcvspuxws, xvcvsxddp
+         case 0x370: case 0x1f0: // xvcvsxdsp, xvcvsxwdp
+         case 0x170: case 0x150: // xvcvsxwsp, xvcvuxwsp
+            if (dis_vx_conv(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+
+         case 0x18C: case 0x38C: // xvcmpeqdp[.]
+         case 0x10C: case 0x30C: // xvcmpeqsp[.]
+         case 0x14C: case 0x34C: // xvcmpgesp[.]
+         case 0x12C: case 0x32C: // xvcmpgtsp[.]
+         case 0x1CC: case 0x3CC: // xvcmpgedp[.]
+         case 0x1AC: case 0x3AC: // xvcmpgtdp[.]
+             if (dis_vvec_cmp(theInstr, vsxOpc2)) goto decode_success;
+             goto decode_failure;
+
+         case 0x134:  // xvresp
+         case 0x1B4:  // xvredp
+         case 0x194: case 0x114: // xvrsqrtedp, xvrsqrtesp
+         case 0x380: case 0x3A0: // xvmaxdp, xvmindp
+         case 0x300: case 0x320: // xvmaxsp, xvminsp
+         case 0x3C0: case 0x340: // xvcpsgndp, xvcpsgnsp
+         case 0x3B2: case 0x332: // xvabsdp, xvabssp
+         case 0x3D2: case 0x352: // xvnabsdp, xvnabssp
+         case 0x192: case 0x1D6: // xvrdpi, xvrdpic
+         case 0x1F2: case 0x1D2: // xvrdpim, xvrdpip
+         case 0x1B2: case 0x3F2: // xvrdpiz, xvnegdp
+         case 0x112: case 0x156: // xvrspi, xvrspic
+         case 0x172: case 0x152: // xvrspim, xvrspip
+         case 0x132: // xvrspiz
+            if (dis_vxv_misc(theInstr, vsxOpc2)) goto decode_success;
+            goto decode_failure;
+
+         default:
+            goto decode_failure;
+      }
+      break;
+   }
+
+   /* 64bit Integer Stores */
+   case 0x3E:  // std, stdu, stq
+      if (dis_int_store( theInstr, abiinfo )) goto decode_success;
+      goto decode_failure;
+
+   case 0x3F:
+      if (!allow_F) goto decode_noF;
+      /* Instrs using opc[1:5] never overlap instrs using opc[1:10],
+         so we can simply fall through the first switch statement */
+
+      opc2 = IFIELD(theInstr, 1, 5);
+      switch (opc2) {
+      /* Floating Point Arith Instructions */
+      case 0x12: case 0x14: case 0x15: // fdiv, fsub, fadd
+      case 0x19:                       // fmul
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+      case 0x16:                       // fsqrt
+         if (!allow_FX) goto decode_noFX;
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+      case 0x17: case 0x1A:            // fsel, frsqrte
+         if (!allow_GX) goto decode_noGX;
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+         
+      /* Floating Point Mult-Add Instructions */         
+      case 0x1C: case 0x1D: case 0x1E: // fmsub, fmadd, fnmsub
+      case 0x1F:                       // fnmadd
+         if (dis_fp_multadd(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      case 0x18:                       // fre
+         if (!allow_GX) goto decode_noGX;
+         if (dis_fp_arith(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      default:
+         break; // Fall through
+      }
+
+      opc2 = IFIELD(theInstr, 1, 10);
+      switch (opc2) {
+      /* 128-bit DFP instructions */
+      case 0x2:    // daddq - DFP Add
+      case 0x202:  // dsubq - DFP Subtract
+      case 0x22:   // dmulq - DFP Mult
+      case 0x222:  // ddivq - DFP Divide
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_arithq( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+      case 0x162:  // dxexq - DFP Extract exponent
+      case 0x362:  // diexq - DFP Insert exponent
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_extract_insertq( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+
+      case 0x82:   // dcmpoq, DFP comparison ordered instruction
+      case 0x282:  // dcmpuq, DFP comparison unordered instruction
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_compare( theInstr ) )
+            goto decode_success;
+         goto decode_failure;
+
+      case 0x102: // dctqpq  - DFP convert to DFP extended
+      case 0x302: // drdpq   - DFP round to dfp Long
+      case 0x122: // dctfixq - DFP convert to fixed quad
+      case 0x322: // dcffixq - DFP convert from fixed quad
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_fmt_convq( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+
+      case 0x2A2: // dtstsfq - DFP number of significant digits
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_significant_digits(theInstr))
+            goto decode_success;
+         goto decode_failure;
+
+      case 0x142: // ddedpdq   DFP Decode DPD to BCD
+      case 0x342: // denbcdq   DFP Encode BCD to DPD
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_bcdq(theInstr))
+            goto decode_success;
+         goto decode_failure;
+
+      /* Floating Point Compare Instructions */         
+      case 0x000: // fcmpu
+      case 0x020: // fcmpo
+         if (dis_fp_cmp(theInstr)) goto decode_success;
+         goto decode_failure;
+         
+      case 0x080: // ftdiv
+      case 0x0A0: // ftsqrt
+         if (dis_fp_tests(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      /* Floating Point Rounding/Conversion Instructions */         
+      case 0x00C: // frsp
+      case 0x00E: // fctiw
+      case 0x00F: // fctiwz
+      case 0x32E: // fctid
+      case 0x32F: // fctidz
+      case 0x34E: // fcfid
+         if (dis_fp_round(theInstr)) goto decode_success;
+         goto decode_failure;
+      case 0x3CE: case 0x3AE: case 0x3AF: // fcfidu, fctidu[z] (implemented as native insns)
+      case 0x08F: case 0x08E: // fctiwu[z] (implemented as native insns)
+         if (!allow_VX) goto decode_noVX;
+         if (dis_fp_round(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      /* Power6 rounding stuff */
+      case 0x1E8: // frim
+      case 0x1C8: // frip
+      case 0x188: // frin
+      case 0x1A8: // friz
+         /* A hack to check for P6 capability . . . */
+         if ((allow_F && allow_V && allow_FX && allow_GX) &&
+             (dis_fp_round(theInstr)))
+            goto decode_success;
+         goto decode_failure;
+         
+      /* Floating Point Move Instructions */         
+      case 0x008: // fcpsgn
+      case 0x028: // fneg
+      case 0x048: // fmr
+      case 0x088: // fnabs
+      case 0x108: // fabs
+         if (dis_fp_move( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x3c6: case 0x346:          // fmrgew, fmrgow
+         if (dis_fp_merge( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Floating Point Status/Control Register Instructions */         
+      case 0x026: // mtfsb1
+      case 0x040: // mcrfs
+      case 0x046: // mtfsb0
+      case 0x086: // mtfsfi
+      case 0x247: // mffs
+      case 0x2C7: // mtfsf
+         // Some of the above instructions need to know more about the
+         // ISA level supported by the host.
+         if (dis_fp_scr( theInstr, allow_GX )) goto decode_success;
+         goto decode_failure;
+
+      default:
+         break; // Fall through...
+      }
+
+      opc2 = ifieldOPClo9( theInstr );
+      switch (opc2) {
+      case 0x42: // dscli, DFP shift left
+      case 0x62: // dscri, DFP shift right
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_shiftq( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+      case 0xc2:  // dtstdc, DFP test data class
+      case 0xe2:  // dtstdg, DFP test data group
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_class_test( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+      default:
+         break;
+      }
+
+      opc2 = ifieldOPClo8( theInstr );
+      switch (opc2) {
+      case 0x3:   // dquaq  - DFP Quantize Quad
+      case 0x23:  // drrndq - DFP Reround Quad
+      case 0x43:  // dquaiq - DFP Quantize immediate Quad
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_quantize_sig_rrndq( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+      case 0xA2: // dtstexq - DFP Test exponent Quad
+         if (dis_dfp_exponent_test( theInstr ) )
+            goto decode_success;
+         goto decode_failure;
+      case 0x63:  // drintxq - DFP Round to an integer value
+      case 0xE3:  // drintnq - DFP Round to an integer value
+         if (!allow_DFP) goto decode_noDFP;
+         if (dis_dfp_roundq( theInstr ))
+            goto decode_success;
+         goto decode_failure;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+   case 0x13:
+      switch (opc2) {
+
+      /* Condition Register Logical Instructions */
+      case 0x101: case 0x081: case 0x121: // crand,  crandc, creqv
+      case 0x0E1: case 0x021: case 0x1C1: // crnand, crnor,  cror
+      case 0x1A1: case 0x0C1: case 0x000: // crorc,  crxor,  mcrf
+         if (dis_cond_logic( theInstr )) goto decode_success;
+         goto decode_failure;
+         
+      /* Branch Instructions */
+      case 0x210: case 0x010: // bcctr, bclr
+         if (dis_branch(theInstr, abiinfo, &dres, 
+                                  resteerOkFn, callback_opaque)) 
+            goto decode_success;
+         goto decode_failure;
+         
+      /* Memory Synchronization Instructions */
+      case 0x096: // isync
+         if (dis_memsync( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+
+   case 0x1F:
+
+      /* For arith instns, bit10 is the OE flag (overflow enable) */
+
+      opc2 = IFIELD(theInstr, 1, 9);
+      switch (opc2) {
+      /* Integer Arithmetic Instructions */
+      case 0x10A: case 0x00A: case 0x08A: // add,   addc,  adde
+      case 0x0EA: case 0x0CA: case 0x1EB: // addme, addze, divw
+      case 0x1CB: case 0x04B: case 0x00B: // divwu, mulhw, mulhwu
+      case 0x0EB: case 0x068: case 0x028: // mullw, neg,   subf
+      case 0x008: case 0x088: case 0x0E8: // subfc, subfe, subfme
+      case 0x0C8: // subfze
+         if (dis_int_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x18B: // divweu (implemented as native insn)
+      case 0x1AB: // divwe (implemented as native insn)
+         if (!allow_VX) goto decode_noVX;
+         if (dis_int_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* 64bit Integer Arithmetic */
+      case 0x009: case 0x049: case 0x0E9: // mulhdu, mulhd, mulld
+      case 0x1C9: case 0x1E9: // divdu, divd
+         if (!mode64) goto decode_failure;
+         if (dis_int_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x1A9: //  divde (implemented as native insn)
+      case 0x189: //  divdeuo (implemented as native insn)
+         if (!allow_VX) goto decode_noVX;
+         if (!mode64) goto decode_failure;
+         if (dis_int_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x1FC:                         // cmpb
+         if (dis_int_logic( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      default:
+         break;  // Fall through...
+      }
+
+      /* All remaining opcodes use full 10 bits. */
+
+      opc2 = IFIELD(theInstr, 1, 10);
+      switch (opc2) {
+      /* Integer Compare Instructions  */
+      case 0x000: case 0x020: // cmp, cmpl
+         if (dis_int_cmp( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Integer Logical Instructions */
+      case 0x01C: case 0x03C: case 0x01A: // and,  andc,  cntlzw
+      case 0x11C: case 0x3BA: case 0x39A: // eqv,  extsb, extsh
+      case 0x1DC: case 0x07C: case 0x1BC: // nand, nor,   or
+      case 0x19C: case 0x13C:             // orc,  xor
+      case 0x2DF: case 0x25F:            // mftgpr, mffgpr
+         if (dis_int_logic( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x28E: case 0x2AE:             // tbegin., tend.
+      case 0x2EE: case 0x2CE: case 0x30E: // tsr., tcheck., tabortwc.
+      case 0x32E: case 0x34E: case 0x36E: // tabortdc., tabortwci., tabortdci.
+      case 0x38E: case 0x3AE: case 0x3EE: // tabort., treclaim., trechkpt.
+      if (dis_transactional_memory( theInstr,
+                                    getUIntPPCendianly( &guest_code[delta + 4]),
+                                    abiinfo, &dres,
+                                    resteerOkFn, callback_opaque))
+            goto decode_success;
+         goto decode_failure;
+
+      /* 64bit Integer Logical Instructions */
+      case 0x3DA: case 0x03A: // extsw, cntlzd
+         if (!mode64) goto decode_failure;
+         if (dis_int_logic( theInstr )) goto decode_success;
+         goto decode_failure;
+
+         /* 64bit Integer Parity Instructions */
+      case 0xba: // prtyd
+         if (!mode64) goto decode_failure;
+         if (dis_int_parity( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x9a: // prtyw
+         if (dis_int_parity( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Integer Shift Instructions */
+      case 0x018: case 0x318: case 0x338: // slw, sraw, srawi
+      case 0x218:                         // srw
+         if (dis_int_shift( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* 64bit Integer Shift Instructions */
+      case 0x01B: case 0x31A: // sld, srad
+      case 0x33A: case 0x33B: // sradi
+      case 0x21B:             // srd
+         if (!mode64) goto decode_failure;
+         if (dis_int_shift( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Integer Load Instructions */
+      case 0x057: case 0x077: case 0x157: // lbzx,  lbzux, lhax
+      case 0x177: case 0x117: case 0x137: // lhaux, lhzx,  lhzux
+      case 0x017: case 0x037:             // lwzx,  lwzux
+         if (dis_int_load( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* 64bit Integer Load Instructions */
+      case 0x035: case 0x015:             // ldux,  ldx
+      case 0x175: case 0x155:             // lwaux, lwax
+         if (!mode64) goto decode_failure;
+         if (dis_int_load( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Integer Store Instructions */
+      case 0x0F7: case 0x0D7: case 0x1B7: // stbux, stbx,  sthux
+      case 0x197: case 0x0B7: case 0x097: // sthx,  stwux, stwx
+         if (dis_int_store( theInstr, abiinfo )) goto decode_success;
+         goto decode_failure;
+
+      /* 64bit Integer Store Instructions */
+      case 0x0B5: case 0x095: // stdux, stdx
+         if (!mode64) goto decode_failure;
+         if (dis_int_store( theInstr, abiinfo )) goto decode_success;
+         goto decode_failure;
+
+      /* Integer Load and Store with Byte Reverse Instructions */
+      case 0x214: case 0x294: // ldbrx, stdbrx
+         if (!mode64) goto decode_failure;
+         if (dis_int_ldst_rev( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x216: case 0x316: case 0x296:    // lwbrx, lhbrx, stwbrx
+      case 0x396:                            // sthbrx
+         if (dis_int_ldst_rev( theInstr )) goto decode_success;
+         goto decode_failure;
+         
+      /* Integer Load and Store String Instructions */
+      case 0x255: case 0x215: case 0x2D5: // lswi, lswx, stswi
+      case 0x295: {                       // stswx
+         Bool stopHere = False;
+         Bool ok = dis_int_ldst_str( theInstr, &stopHere );
+         if (!ok) goto decode_failure;
+         if (stopHere) {
+            putGST( PPC_GST_CIA, mkSzImm(ty, nextInsnAddr()) );
+            dres.jk_StopHere = Ijk_Boring;
+            dres.whatNext    = Dis_StopHere;
+         }
+         goto decode_success;
+      }
+
+      /* Memory Synchronization Instructions */
+      case 0x034: case 0x074:             // lbarx, lharx
+      case 0x2B6: case 0x2D6:             // stbcx, sthcx
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_memsync( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x356: case 0x014: case 0x096: // eieio, lwarx, stwcx.
+      case 0x256:                         // sync
+         if (dis_memsync( theInstr )) goto decode_success;
+         goto decode_failure;
+         
+      /* 64bit Memory Synchronization Instructions */
+      case 0x054: case 0x0D6: // ldarx, stdcx.
+         if (!mode64) goto decode_failure;
+         if (dis_memsync( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x114: case 0x0B6: // lqarx, stqcx.
+         if (dis_memsync( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Processor Control Instructions */
+      case 0x33:  case 0x73: // mfvsrd, mfvsrwz
+      case 0xB3:  case 0xD3: case 0xF3: // mtvsrd, mtvsrwa, mtvsrwz
+      case 0x200: case 0x013: case 0x153: // mcrxr, mfcr,  mfspr
+      case 0x173: case 0x090: case 0x1D3: // mftb,  mtcrf, mtspr
+      case 0x220:                         // mcrxrt
+         if (dis_proc_ctl( abiinfo, theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Cache Management Instructions */
+      case 0x2F6: case 0x056: case 0x036: // dcba, dcbf,   dcbst
+      case 0x116: case 0x0F6: case 0x3F6: // dcbt, dcbtst, dcbz
+      case 0x3D6:                         // icbi
+         if (dis_cache_manage( theInstr, &dres, archinfo ))
+            goto decode_success;
+         goto decode_failure;
+
+//zz       /* External Control Instructions */
+//zz       case 0x136: case 0x1B6: // eciwx, ecowx
+//zz          DIP("external control op => not implemented\n");
+//zz          goto decode_failure;
+
+      /* Trap Instructions */
+      case 0x004:             // tw
+         if (dis_trap(theInstr, &dres)) goto decode_success;
+         goto decode_failure;
+
+      case 0x044:             // td
+         if (!mode64) goto decode_failure;
+         if (dis_trap(theInstr, &dres)) goto decode_success;
+         goto decode_failure;
+
+      /* Floating Point Load Instructions */
+      case 0x217: case 0x237: case 0x257: // lfsx, lfsux, lfdx
+      case 0x277:                         // lfdux
+         if (!allow_F) goto decode_noF;
+         if (dis_fp_load( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* Floating Point Store Instructions */
+      case 0x297: case 0x2B7: case 0x2D7: // stfs,  stfsu, stfd
+      case 0x2F7:                         // stfdu, stfiwx
+         if (!allow_F) goto decode_noF;
+         if (dis_fp_store( theInstr )) goto decode_success;
+         goto decode_failure;
+      case 0x3D7:                         // stfiwx
+         if (!allow_F) goto decode_noF;
+         if (!allow_GX) goto decode_noGX;
+         if (dis_fp_store( theInstr )) goto decode_success;
+         goto decode_failure;
+
+         /* Floating Point Double Pair Indexed Instructions */
+      case 0x317: // lfdpx (Power6)
+      case 0x397: // stfdpx (Power6)
+         if (!allow_F) goto decode_noF;
+         if (dis_fp_pair(theInstr)) goto decode_success;
+         goto decode_failure;
+
+      case 0x357:                         // lfiwax
+         if (!allow_F) goto decode_noF;
+         if (dis_fp_load( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x377:                         // lfiwzx
+         if (!allow_F) goto decode_noF;
+         if (dis_fp_load( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AltiVec instructions */
+
+      /* AV Cache Control - Data streams */
+      case 0x156: case 0x176: case 0x336: // dst, dstst, dss
+         if (!allow_V) goto decode_noV;
+         if (dis_av_datastream( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Load */
+      case 0x006: case 0x026:             // lvsl, lvsr
+      case 0x007: case 0x027: case 0x047: // lvebx, lvehx, lvewx
+      case 0x067: case 0x167:             // lvx, lvxl
+         if (!allow_V) goto decode_noV;
+         if (dis_av_load( abiinfo, theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Store */
+      case 0x087: case 0x0A7: case 0x0C7: // stvebx, stvehx, stvewx
+      case 0x0E7: case 0x1E7:             // stvx, stvxl
+         if (!allow_V) goto decode_noV;
+         if (dis_av_store( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* VSX Load */
+      case 0x00C: // lxsiwzx
+      case 0x04C: // lxsiwax
+      case 0x20C: // lxsspx
+      case 0x24C: // lxsdx
+      case 0x34C: // lxvd2x
+      case 0x14C: // lxvdsx
+      case 0x30C: // lxvw4x
+        // All of these VSX load instructions use some VMX facilities, so
+        // if allow_V is not set, we'll skip trying to decode.
+        if (!allow_V) goto decode_noV;
+
+	if (dis_vx_load( theInstr )) goto decode_success;
+          goto decode_failure;
+
+      /* VSX Store */
+      case 0x08C: // stxsiwx
+      case 0x28C: // stxsspx
+      case 0x2CC: // stxsdx
+      case 0x3CC: // stxvd2x
+      case 0x38C: // stxvw4x
+        // All of these VSX store instructions use some VMX facilities, so
+        // if allow_V is not set, we'll skip trying to decode.
+        if (!allow_V) goto decode_noV;
+
+	if (dis_vx_store( theInstr )) goto decode_success;
+    	  goto decode_failure;
+
+      /* Miscellaneous ISA 2.06 instructions */
+      case 0x1FA: // popcntd
+      case 0x17A: // popcntw
+      case 0x7A:  // popcntb
+	  if (dis_int_logic( theInstr )) goto decode_success;
+    	  goto decode_failure;
+
+      case 0x0FC: // bpermd
+         if (!mode64) goto decode_failure;
+         if (dis_int_logic( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      default:
+         /* Deal with some other cases that we would otherwise have
+            punted on. */
+         /* --- ISEL (PowerISA_V2.05.pdf, p74) --- */
+         /* only decode this insn when reserved bit 0 (31 in IBM's
+            notation) is zero */
+         if (IFIELD(theInstr, 0, 6) == (15<<1)) {
+            UInt rT = ifieldRegDS( theInstr );
+            UInt rA = ifieldRegA( theInstr );
+            UInt rB = ifieldRegB( theInstr );
+            UInt bi = ifieldRegC( theInstr );
+            putIReg(
+               rT,
+               IRExpr_ITE( binop(Iop_CmpNE32, getCRbit( bi ), mkU32(0)),
+                           rA == 0 ? (mode64 ? mkU64(0) : mkU32(0))
+                                   : getIReg(rA),
+                           getIReg(rB))
+
+            );
+            DIP("isel r%u,r%u,r%u,crb%u\n", rT,rA,rB,bi);
+            goto decode_success;
+         }
+         goto decode_failure;
+      }
+      break;
+
+
+   case 0x04:
+      /* AltiVec instructions */
+
+      opc2 = IFIELD(theInstr, 0, 6);
+      switch (opc2) {
+      /* AV Mult-Add, Mult-Sum */
+      case 0x20: case 0x21: case 0x22: // vmhaddshs, vmhraddshs, vmladduhm
+      case 0x24: case 0x25: case 0x26: // vmsumubm, vmsummbm, vmsumuhm
+      case 0x27: case 0x28: case 0x29: // vmsumuhs, vmsumshm, vmsumshs
+         if (!allow_V) goto decode_noV;
+         if (dis_av_multarith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Permutations */
+      case 0x2A:                       // vsel
+      case 0x2B:                       // vperm
+      case 0x2C:                       // vsldoi
+         if (!allow_V) goto decode_noV;
+         if (dis_av_permute( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x2D:                       // vpermxor
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_permute( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Floating Point Mult-Add/Sub */
+      case 0x2E: case 0x2F:            // vmaddfp, vnmsubfp
+         if (!allow_V) goto decode_noV;
+         if (dis_av_fp_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x3D: case 0x3C:            // vaddecuq, vaddeuqm
+      case 0x3F: case 0x3E:            // vsubecuq, vsubeuqm
+         if (!allow_V) goto decode_noV;
+         if (dis_av_quad( theInstr)) goto decode_success;
+         goto decode_failure;
+
+      default:
+         break;  // Fall through...
+      }
+
+      opc2 = IFIELD(theInstr, 0, 9);
+      switch (opc2) {
+      /* BCD arithmetic */
+      case 0x1: case 0x41:             // bcdadd, bcdsub
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_bcd( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      default:
+         break;  // Fall through...
+      }
+
+      opc2 = IFIELD(theInstr, 0, 11);
+      switch (opc2) {
+      /* AV Arithmetic */
+      case 0x180:                         // vaddcuw
+      case 0x000: case 0x040: case 0x080: // vaddubm, vadduhm, vadduwm
+      case 0x200: case 0x240: case 0x280: // vaddubs, vadduhs, vadduws
+      case 0x300: case 0x340: case 0x380: // vaddsbs, vaddshs, vaddsws
+      case 0x580:                         // vsubcuw
+      case 0x400: case 0x440: case 0x480: // vsububm, vsubuhm, vsubuwm
+      case 0x600: case 0x640: case 0x680: // vsububs, vsubuhs, vsubuws
+      case 0x700: case 0x740: case 0x780: // vsubsbs, vsubshs, vsubsws
+      case 0x402: case 0x442: case 0x482: // vavgub, vavguh, vavguw
+      case 0x502: case 0x542: case 0x582: // vavgsb, vavgsh, vavgsw
+      case 0x002: case 0x042: case 0x082: // vmaxub, vmaxuh, vmaxuw
+      case 0x102: case 0x142: case 0x182: // vmaxsb, vmaxsh, vmaxsw
+      case 0x202: case 0x242: case 0x282: // vminub, vminuh, vminuw
+      case 0x302: case 0x342: case 0x382: // vminsb, vminsh, vminsw
+      case 0x008: case 0x048:             // vmuloub, vmulouh
+      case 0x108: case 0x148:             // vmulosb, vmulosh
+      case 0x208: case 0x248:             // vmuleub, vmuleuh
+      case 0x308: case 0x348:             // vmulesb, vmulesh
+      case 0x608: case 0x708: case 0x648: // vsum4ubs, vsum4sbs, vsum4shs
+      case 0x688: case 0x788:             // vsum2sws, vsumsws
+         if (!allow_V) goto decode_noV;
+         if (dis_av_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x088: case 0x089:             // vmulouw, vmuluwm
+      case 0x0C0: case 0x0C2:             // vaddudm, vmaxud
+      case 0x1C2: case 0x2C2: case 0x3C2: // vnaxsd, vminud, vminsd
+      case 0x188: case 0x288: case 0x388: // vmulosw, vmuleuw, vmulesw
+      case 0x4C0:                         // vsubudm
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Polynomial Vector Multiply Add */
+      case 0x408: case 0x448:            // vpmsumb, vpmsumd
+      case 0x488: case 0x4C8:            // vpmsumw, vpmsumh
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_polymultarith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Rotate, Shift */
+      case 0x004: case 0x044: case 0x084: // vrlb, vrlh, vrlw
+      case 0x104: case 0x144: case 0x184: // vslb, vslh, vslw
+      case 0x204: case 0x244: case 0x284: // vsrb, vsrh, vsrw
+      case 0x304: case 0x344: case 0x384: // vsrab, vsrah, vsraw
+      case 0x1C4: case 0x2C4:             // vsl, vsr
+      case 0x40C: case 0x44C:             // vslo, vsro
+         if (!allow_V) goto decode_noV;
+         if (dis_av_shift( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x0C4:                         // vrld
+      case 0x3C4: case 0x5C4: case 0x6C4: // vsrad, vsld, vsrd
+          if (!allow_isa_2_07) goto decode_noP8;
+          if (dis_av_shift( theInstr )) goto decode_success;
+          goto decode_failure;
+
+      /* AV Logic */
+      case 0x404: case 0x444: case 0x484: // vand, vandc, vor
+      case 0x4C4: case 0x504:             // vxor, vnor
+         if (!allow_V) goto decode_noV;
+         if (dis_av_logic( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x544:                         // vorc
+      case 0x584: case 0x684:             // vnand, veqv
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_logic( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Processor Control */
+      case 0x604: case 0x644:             // mfvscr, mtvscr
+         if (!allow_V) goto decode_noV;
+         if (dis_av_procctl( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Floating Point Arithmetic */
+      case 0x00A: case 0x04A:             // vaddfp, vsubfp
+      case 0x10A: case 0x14A: case 0x18A: // vrefp, vrsqrtefp, vexptefp
+      case 0x1CA:                         // vlogefp
+      case 0x40A: case 0x44A:             // vmaxfp, vminfp
+         if (!allow_V) goto decode_noV;
+         if (dis_av_fp_arith( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Floating Point Round/Convert */
+      case 0x20A: case 0x24A: case 0x28A: // vrfin, vrfiz, vrfip
+      case 0x2CA:                         // vrfim
+      case 0x30A: case 0x34A: case 0x38A: // vcfux, vcfsx, vctuxs
+      case 0x3CA:                         // vctsxs
+         if (!allow_V) goto decode_noV;
+         if (dis_av_fp_convert( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      /* AV Merge, Splat */
+      case 0x00C: case 0x04C: case 0x08C: // vmrghb, vmrghh, vmrghw
+      case 0x10C: case 0x14C: case 0x18C: // vmrglb, vmrglh, vmrglw
+      case 0x20C: case 0x24C: case 0x28C: // vspltb, vsplth, vspltw
+      case 0x30C: case 0x34C: case 0x38C: // vspltisb, vspltish, vspltisw
+         if (!allow_V) goto decode_noV;
+         if (dis_av_permute( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x68C: case 0x78C:             // vmrgow, vmrgew
+          if (!allow_isa_2_07) goto decode_noP8;
+          if (dis_av_permute( theInstr )) goto decode_success;
+          goto decode_failure;
+
+      /* AV Pack, Unpack */
+      case 0x00E: case 0x04E: case 0x08E: // vpkuhum, vpkuwum, vpkuhus
+      case 0x0CE:                         // vpkuwus
+      case 0x10E: case 0x14E: case 0x18E: // vpkshus, vpkswus, vpkshss
+      case 0x1CE:                         // vpkswss
+      case 0x20E: case 0x24E: case 0x28E: // vupkhsb, vupkhsh, vupklsb
+      case 0x2CE:                         // vupklsh
+      case 0x30E: case 0x34E: case 0x3CE: // vpkpx, vupkhpx, vupklpx
+          if (!allow_V) goto decode_noV;
+          if (dis_av_pack( theInstr )) goto decode_success;
+          goto decode_failure;
+
+      case 0x44E: case 0x4CE: case 0x54E: // vpkudum, vpkudus, vpksdus
+      case 0x5CE: case 0x64E: case 0x6cE: // vpksdss, vupkhsw, vupklsw
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_pack( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x508: case 0x509:             // vcipher, vcipherlast
+      case 0x548: case 0x549:             // vncipher, vncipherlast
+      case 0x5C8:                         // vsbox
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_cipher( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x6C2: case 0x682:             // vshasigmaw, vshasigmad
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_hash( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x702: case 0x742:             // vclzb, vclzh
+      case 0x782: case 0x7c2:             // vclzw, vclzd
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_count_bitTranspose( theInstr, opc2 )) goto decode_success;
+         goto decode_failure;
+
+      case 0x703: case 0x743:             // vpopcntb, vpopcnth
+      case 0x783: case 0x7c3:             // vpopcntw, vpopcntd
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_count_bitTranspose( theInstr, opc2 )) goto decode_success;
+         goto decode_failure;
+
+      case 0x50c:                         // vgbbd
+         if (!allow_isa_2_07) goto decode_noP8;
+         if (dis_av_count_bitTranspose( theInstr, opc2 )) goto decode_success;
+         goto decode_failure;
+
+      case 0x140: case 0x100:             // vaddcuq, vadduqm
+      case 0x540: case 0x500:             // vsubcuq, vsubuqm
+      case 0x54C:                         // vbpermq
+         if (!allow_V) goto decode_noV;
+         if (dis_av_quad( theInstr)) goto decode_success;
+         goto decode_failure;
+
+      default:
+         break;  // Fall through...
+      }
+
+      opc2 = IFIELD(theInstr, 0, 10);
+      switch (opc2) {
+
+      /* AV Compare */
+      case 0x006: case 0x046: case 0x086: // vcmpequb, vcmpequh, vcmpequw
+      case 0x206: case 0x246: case 0x286: // vcmpgtub, vcmpgtuh, vcmpgtuw
+      case 0x306: case 0x346: case 0x386: // vcmpgtsb, vcmpgtsh, vcmpgtsw
+         if (!allow_V) goto decode_noV;
+         if (dis_av_cmp( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      case 0x0C7:                         // vcmpequd
+      case 0x2C7:                         // vcmpgtud
+      case 0x3C7:                         // vcmpgtsd
+          if (!allow_isa_2_07) goto decode_noP8;
+          if (dis_av_cmp( theInstr )) goto decode_success;
+          goto decode_failure;
+
+      /* AV Floating Point Compare */
+      case 0x0C6: case 0x1C6: case 0x2C6: // vcmpeqfp, vcmpgefp, vcmpgtfp
+      case 0x3C6:                         // vcmpbfp
+         if (!allow_V) goto decode_noV;
+         if (dis_av_fp_cmp( theInstr )) goto decode_success;
+         goto decode_failure;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+
+   default:
+      goto decode_failure;
+
+   decode_noF:
+      vassert(!allow_F);
+      vex_printf("disInstr(ppc): found the Floating Point instruction 0x%x that\n"
+		 "can't be handled by Valgrind on this host.  This instruction\n"
+		 "requires a host that supports Floating Point instructions.\n",
+		 theInstr);
+      goto not_supported;
+   decode_noV:
+      vassert(!allow_V);
+      vex_printf("disInstr(ppc): found an AltiVec or an e500 instruction 0x%x\n"
+		 "that can't be handled by Valgrind.  If this instruction is an\n"
+		 "Altivec instruction, Valgrind must be run on a host that supports"
+		 "AltiVec instructions.  If the application was compiled for e500, then\n"
+		 "unfortunately Valgrind does not yet support e500 instructions.\n",
+		 theInstr);
+      goto not_supported;
+   decode_noVX:
+      vassert(!allow_VX);
+      vex_printf("disInstr(ppc): found the instruction 0x%x that is defined in the\n"
+		 "Power ISA 2.06 ABI but can't be handled by Valgrind on this host.\n"
+		 "This instruction \nrequires a host that supports the ISA 2.06 ABI.\n",
+		 theInstr);
+      goto not_supported;
+   decode_noFX:
+      vassert(!allow_FX);
+      vex_printf("disInstr(ppc): found the General Purpose-Optional instruction 0x%x\n"
+		 "that can't be handled by Valgrind on this host. This instruction\n"
+		 "requires a host that supports the General Purpose-Optional instructions.\n",
+		 theInstr);
+      goto not_supported;
+   decode_noGX:
+      vassert(!allow_GX);
+      vex_printf("disInstr(ppc): found the Graphics-Optional instruction 0x%x\n"
+		 "that can't be handled by Valgrind on this host. This instruction\n"
+		 "requires a host that supports the Graphic-Optional instructions.\n",
+		 theInstr);
+      goto not_supported;
+   decode_noDFP:
+      vassert(!allow_DFP);
+      vex_printf("disInstr(ppc): found the decimal floating point (DFP) instruction 0x%x\n"
+		 "that can't be handled by Valgrind on this host.  This instruction\n"
+		 "requires a host that supports DFP instructions.\n",
+		 theInstr);
+      goto not_supported;
+   decode_noP8:
+      vassert(!allow_isa_2_07);
+      vex_printf("disInstr(ppc): found the Power 8 instruction 0x%x that can't be handled\n"
+		 "by Valgrind on this host.  This instruction requires a host that\n"
+		 "supports Power 8 instructions.\n",
+		 theInstr);
+      goto not_supported;
+
+
+   decode_failure:
+   /* All decode failures end up here. */
+   opc2 = (theInstr) & 0x7FF;
+   if (sigill_diag) {
+      vex_printf("disInstr(ppc): unhandled instruction: "
+                 "0x%x\n", theInstr);
+      vex_printf("                 primary %d(0x%x), secondary %u(0x%x)\n", 
+                 opc1, opc1, opc2, opc2);
+   }
+
+   not_supported:
+   /* Tell the dispatcher that this insn cannot be decoded, and so has
+      not been executed, and (is currently) the next to be executed.
+      CIA should be up-to-date since it made so at the start of each
+      insn, but nevertheless be paranoid and update it again right
+      now. */
+   putGST( PPC_GST_CIA, mkSzImm(ty, guest_CIA_curr_instr) );
+   dres.len         = 0;
+   dres.whatNext    = Dis_StopHere;
+   dres.jk_StopHere = Ijk_NoDecode;
+   dres.continueAt  = 0;
+   return dres;
+   } /* switch (opc) for the main (primary) opcode switch. */
+
+  decode_success:
+   /* All decode successes end up here. */
+   switch (dres.whatNext) {
+      case Dis_Continue:
+         putGST( PPC_GST_CIA, mkSzImm(ty, guest_CIA_curr_instr + 4));
+         break;
+      case Dis_ResteerU:
+      case Dis_ResteerC:
+         putGST( PPC_GST_CIA, mkSzImm(ty, dres.continueAt));
+         break;
+      case Dis_StopHere:
+         break;
+      default:
+         vassert(0);
+   }
+   DIP("\n");
+
+   if (dres.len == 0) {
+      dres.len = 4;
+   } else {
+      vassert(dres.len == 20);
+   }
+   return dres;
+}
+
+#undef DIP
+#undef DIS
+
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+DisResult disInstr_PPC ( IRSB*        irsb_IN,
+                         Bool         (*resteerOkFn) ( void*, Addr ),
+                         Bool         resteerCisOk,
+                         void*        callback_opaque,
+                         const UChar* guest_code_IN,
+                         Long         delta,
+                         Addr         guest_IP,
+                         VexArch      guest_arch,
+                         const VexArchInfo* archinfo,
+                         const VexAbiInfo*  abiinfo,
+                         VexEndness   host_endness_IN,
+                         Bool         sigill_diag_IN )
+{
+   IRType     ty;
+   DisResult  dres;
+   UInt       mask32, mask64;
+   UInt hwcaps_guest = archinfo->hwcaps;
+
+   vassert(guest_arch == VexArchPPC32 || guest_arch == VexArchPPC64);
+
+   /* global -- ick */
+   mode64 = guest_arch == VexArchPPC64;
+   ty = mode64 ? Ity_I64 : Ity_I32;
+   if (!mode64 && (host_endness_IN == VexEndnessLE)) {
+      vex_printf("disInstr(ppc): Little Endian 32-bit mode is not supported\n");
+      dres.len         = 0;
+      dres.whatNext    = Dis_StopHere;
+      dres.jk_StopHere = Ijk_NoDecode;
+      dres.continueAt   = 0;
+      return dres;
+   }
+
+   /* do some sanity checks */
+   mask32 = VEX_HWCAPS_PPC32_F | VEX_HWCAPS_PPC32_V
+            | VEX_HWCAPS_PPC32_FX | VEX_HWCAPS_PPC32_GX | VEX_HWCAPS_PPC32_VX
+            | VEX_HWCAPS_PPC32_DFP | VEX_HWCAPS_PPC32_ISA2_07;
+
+   mask64 = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX
+            | VEX_HWCAPS_PPC64_GX | VEX_HWCAPS_PPC64_VX | VEX_HWCAPS_PPC64_DFP
+            | VEX_HWCAPS_PPC64_ISA2_07;
+
+   if (mode64) {
+      vassert((hwcaps_guest & mask32) == 0);
+   } else {
+      vassert((hwcaps_guest & mask64) == 0);
+   }
+
+   /* Set globals (see top of this file) */
+   guest_code           = guest_code_IN;
+   irsb                 = irsb_IN;
+   host_endness         = host_endness_IN;
+
+   guest_CIA_curr_instr = mkSzAddr(ty, guest_IP);
+   guest_CIA_bbstart    = mkSzAddr(ty, guest_IP - delta);
+
+   dres = disInstr_PPC_WRK ( resteerOkFn, resteerCisOk, callback_opaque,
+                             delta, archinfo, abiinfo, sigill_diag_IN);
+
+   return dres;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Unused stuff                                         ---*/
+/*------------------------------------------------------------*/
+
+///* A potentially more memcheck-friendly implementation of Clz32, with
+//   the boundary case Clz32(0) = 32, which is what ppc requires. */
+//
+//static IRExpr* /* :: Ity_I32 */ verbose_Clz32 ( IRTemp arg )
+//{
+//   /* Welcome ... to SSA R Us. */
+//   IRTemp n1  = newTemp(Ity_I32);
+//   IRTemp n2  = newTemp(Ity_I32);
+//   IRTemp n3  = newTemp(Ity_I32);
+//   IRTemp n4  = newTemp(Ity_I32);
+//   IRTemp n5  = newTemp(Ity_I32);
+//   IRTemp n6  = newTemp(Ity_I32);
+//   IRTemp n7  = newTemp(Ity_I32);
+//   IRTemp n8  = newTemp(Ity_I32);
+//   IRTemp n9  = newTemp(Ity_I32);
+//   IRTemp n10 = newTemp(Ity_I32);
+//   IRTemp n11 = newTemp(Ity_I32);
+//   IRTemp n12 = newTemp(Ity_I32);
+//
+//   /* First, propagate the most significant 1-bit into all lower
+//      positions in the word. */
+//   /* unsigned int clz ( unsigned int n )
+//      {
+//         n |= (n >> 1);
+//         n |= (n >> 2);
+//         n |= (n >> 4);
+//         n |= (n >> 8);
+//         n |= (n >> 16);
+//         return bitcount(~n);
+//      }
+//   */
+//   assign(n1, mkexpr(arg));
+//   assign(n2, binop(Iop_Or32, mkexpr(n1), binop(Iop_Shr32, mkexpr(n1), mkU8(1))));
+//   assign(n3, binop(Iop_Or32, mkexpr(n2), binop(Iop_Shr32, mkexpr(n2), mkU8(2))));
+//   assign(n4, binop(Iop_Or32, mkexpr(n3), binop(Iop_Shr32, mkexpr(n3), mkU8(4))));
+//   assign(n5, binop(Iop_Or32, mkexpr(n4), binop(Iop_Shr32, mkexpr(n4), mkU8(8))));
+//   assign(n6, binop(Iop_Or32, mkexpr(n5), binop(Iop_Shr32, mkexpr(n5), mkU8(16))));
+//   /* This gives a word of the form 0---01---1.  Now invert it, giving
+//      a word of the form 1---10---0, then do a population-count idiom
+//      (to count the 1s, which is the number of leading zeroes, or 32
+//      if the original word was 0. */
+//   assign(n7, unop(Iop_Not32, mkexpr(n6)));
+//
+//   /* unsigned int bitcount ( unsigned int n )
+//      {
+//         n = n - ((n >> 1) & 0x55555555);
+//         n = (n & 0x33333333) + ((n >> 2) & 0x33333333);
+//         n = (n + (n >> 4)) & 0x0F0F0F0F;
+//         n = n + (n >> 8);
+//         n = (n + (n >> 16)) & 0x3F;
+//         return n;
+//      }
+//   */
+//   assign(n8, 
+//          binop(Iop_Sub32, 
+//                mkexpr(n7),  
+//                binop(Iop_And32, 
+//                      binop(Iop_Shr32, mkexpr(n7), mkU8(1)),
+//                      mkU32(0x55555555))));
+//   assign(n9,
+//          binop(Iop_Add32,
+//                binop(Iop_And32, mkexpr(n8), mkU32(0x33333333)),
+//                binop(Iop_And32,
+//                      binop(Iop_Shr32, mkexpr(n8), mkU8(2)),
+//                      mkU32(0x33333333))));
+//   assign(n10,
+//          binop(Iop_And32,
+//                binop(Iop_Add32, 
+//                      mkexpr(n9), 
+//                      binop(Iop_Shr32, mkexpr(n9), mkU8(4))),
+//                mkU32(0x0F0F0F0F)));
+//   assign(n11,
+//          binop(Iop_Add32,
+//                mkexpr(n10),
+//                binop(Iop_Shr32, mkexpr(n10), mkU8(8))));
+//   assign(n12,
+//          binop(Iop_Add32,
+//                mkexpr(n11),
+//                binop(Iop_Shr32, mkexpr(n11), mkU8(16))));
+//   return
+//      binop(Iop_And32, mkexpr(n12), mkU32(0x3F));
+//}
+
+/*--------------------------------------------------------------------*/
+/*--- end                                         guest_ppc_toIR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_s390_defs.h b/VEX/priv/guest_s390_defs.h
new file mode 100644
index 0000000..758cf91
--- /dev/null
+++ b/VEX/priv/guest_s390_defs.h
@@ -0,0 +1,261 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 guest_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#ifndef __VEX_GUEST_S390_DEFS_H
+#define __VEX_GUEST_S390_DEFS_H
+
+#include "libvex_basictypes.h"        // offsetof
+#include "guest_generic_bb_to_IR.h"   // DisResult
+#include "libvex_guest_s390x.h"       // VexGuestS390XState
+
+
+/* Convert one s390 insn to IR.  See the type DisOneInstrFn in
+   bb_to_IR.h. */
+DisResult disInstr_S390 ( IRSB*        irbb,
+                          Bool         (*resteerOkFn) ( void*, Addr ),
+                          Bool         resteerCisOk,
+                          void*        callback_opaque,
+                          const UChar* guest_code,
+                          Long         delta,
+                          Addr         guest_IP,
+                          VexArch      guest_arch,
+                          const VexArchInfo* archinfo,
+                          const VexAbiInfo*  abiinfo,
+                          VexEndness   host_endness,
+                          Bool         sigill_diag );
+
+/* Used by the optimiser to specialise calls to helpers. */
+IRExpr* guest_s390x_spechelper ( const HChar *function_name,
+                                 IRExpr **args,
+                                 IRStmt **precedingStmts,
+                                 Int n_precedingStmts);
+
+
+/* Describes to the optimiser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+Bool guest_s390x_state_requires_precise_mem_exns ( Int, Int,
+                                                   VexRegisterUpdates );
+
+extern VexGuestLayout s390xGuest_layout;
+
+
+#define S390X_GUEST_OFFSET(x)  offsetof(VexGuestS390XState, x)
+
+/*------------------------------------------------------------*/
+/*--- Helper functions.                                    ---*/
+/*------------------------------------------------------------*/
+void s390x_dirtyhelper_EX(ULong torun);
+ULong s390x_dirtyhelper_STCK(ULong *addr);
+ULong s390x_dirtyhelper_STCKF(ULong *addr);
+ULong s390x_dirtyhelper_STCKE(ULong *addr);
+ULong s390x_dirtyhelper_STFLE(VexGuestS390XState *guest_state, ULong *addr);
+void  s390x_dirtyhelper_CUxy(UChar *addr, ULong data, ULong num_bytes);
+
+ULong s390_do_cu12_cu14_helper1(UInt byte1, UInt etf3_and_m3_is_1);
+ULong s390_do_cu12_helper2(UInt byte1, UInt byte2, UInt byte3, UInt byte4,
+                           ULong stuff);
+ULong s390_do_cu14_helper2(UInt byte1, UInt byte2, UInt byte3, UInt byte4,
+                           ULong stuff);
+ULong s390_do_cu21(UInt srcvalue, UInt low_surrogate);
+ULong s390_do_cu24(UInt srcvalue, UInt low_surrogate);
+ULong s390_do_cu41(UInt srcvalue);
+ULong s390_do_cu42(UInt srcvalue);
+UInt  s390_do_cvb(ULong decimal);
+ULong s390_do_cvd(ULong binary);
+ULong s390_do_ecag(ULong op2addr);
+UInt  s390_do_pfpo(UInt gpr0);
+
+/* The various ways to compute the condition code. */
+enum {
+   S390_CC_OP_BITWISE = 0,
+   S390_CC_OP_SIGNED_COMPARE = 1,
+   S390_CC_OP_UNSIGNED_COMPARE = 2,
+   S390_CC_OP_SIGNED_ADD_32 = 3,
+   S390_CC_OP_SIGNED_ADD_64 = 4,
+   S390_CC_OP_UNSIGNED_ADD_32 = 5,
+   S390_CC_OP_UNSIGNED_ADD_64 = 6,
+   S390_CC_OP_UNSIGNED_ADDC_32 = 7,
+   S390_CC_OP_UNSIGNED_ADDC_64 = 8,
+   S390_CC_OP_SIGNED_SUB_32 = 9,
+   S390_CC_OP_SIGNED_SUB_64 = 10,
+   S390_CC_OP_UNSIGNED_SUB_32 = 11,
+   S390_CC_OP_UNSIGNED_SUB_64 = 12,
+   S390_CC_OP_UNSIGNED_SUBB_32 = 13,
+   S390_CC_OP_UNSIGNED_SUBB_64 = 14,
+   S390_CC_OP_LOAD_AND_TEST = 15,
+   S390_CC_OP_LOAD_POSITIVE_32 = 16,
+   S390_CC_OP_LOAD_POSITIVE_64 = 17,
+   S390_CC_OP_TEST_UNDER_MASK_8 = 18,
+   S390_CC_OP_TEST_UNDER_MASK_16 = 19,
+   S390_CC_OP_SHIFT_LEFT_32 = 20,
+   S390_CC_OP_SHIFT_LEFT_64 = 21,
+   S390_CC_OP_INSERT_CHAR_MASK_32 = 22,
+   S390_CC_OP_BFP_RESULT_32 = 23,
+   S390_CC_OP_BFP_RESULT_64 = 24,
+   S390_CC_OP_BFP_RESULT_128 = 25,
+   S390_CC_OP_BFP_32_TO_INT_32 = 26,
+   S390_CC_OP_BFP_64_TO_INT_32 = 27,
+   S390_CC_OP_BFP_128_TO_INT_32 = 28,
+   S390_CC_OP_BFP_32_TO_INT_64 = 29,
+   S390_CC_OP_BFP_64_TO_INT_64 = 30,
+   S390_CC_OP_BFP_128_TO_INT_64 = 31,
+   S390_CC_OP_BFP_TDC_32 = 32,
+   S390_CC_OP_BFP_TDC_64 = 33,
+   S390_CC_OP_BFP_TDC_128 = 34,
+   S390_CC_OP_SET = 35,
+   S390_CC_OP_BFP_32_TO_UINT_32 = 36,
+   S390_CC_OP_BFP_64_TO_UINT_32 = 37,
+   S390_CC_OP_BFP_128_TO_UINT_32 = 38,
+   S390_CC_OP_BFP_32_TO_UINT_64 = 39,
+   S390_CC_OP_BFP_64_TO_UINT_64 = 40,
+   S390_CC_OP_BFP_128_TO_UINT_64 = 41,
+   S390_CC_OP_DFP_RESULT_64 = 42,
+   S390_CC_OP_DFP_RESULT_128 = 43,
+   S390_CC_OP_DFP_TDC_32 = 44,
+   S390_CC_OP_DFP_TDC_64 = 45,
+   S390_CC_OP_DFP_TDC_128 = 46,
+   S390_CC_OP_DFP_TDG_32 = 47,
+   S390_CC_OP_DFP_TDG_64 = 48,
+   S390_CC_OP_DFP_TDG_128 = 49,
+   S390_CC_OP_DFP_64_TO_UINT_32 = 50,
+   S390_CC_OP_DFP_128_TO_UINT_32 = 51,
+   S390_CC_OP_DFP_64_TO_UINT_64 = 52,
+   S390_CC_OP_DFP_128_TO_UINT_64 = 53,
+   S390_CC_OP_DFP_64_TO_INT_32 = 54,
+   S390_CC_OP_DFP_128_TO_INT_32 = 55,
+   S390_CC_OP_DFP_64_TO_INT_64 = 56,
+   S390_CC_OP_DFP_128_TO_INT_64 = 57,
+   S390_CC_OP_PFPO_32 = 58,
+   S390_CC_OP_PFPO_64 = 59,
+   S390_CC_OP_PFPO_128 = 60
+};
+
+/*------------------------------------------------------------*/
+/*--- Thunk layout                                         ---*/
+/*------------------------------------------------------------*/
+
+/*
+   Z -- value is zero extended to 32 / 64 bit
+   S -- value is sign extended to 32 / 64 bit
+   F -- a binary floating point value
+   D -- a decimal floating point value
+
+   +--------------------------------+-----------------------+----------------------+-----------------+
+   | op                             |   cc_dep1             |   cc_dep2            |   cc_ndep       |
+   +--------------------------------+-----------------------+----------------------+-----------------+
+   | S390_CC_OP_BITWISE             | Z result              |                      |                 |
+   | S390_CC_OP_SIGNED_COMPARE      | S 1st operand         | S 2nd operand        |                 |
+   | S390_CC_OP_UNSIGNED_COMPARE    | Z 1st operand         | Z 2nd operand        |                 |
+   | S390_CC_OP_SIGNED_ADD_32       | S 1st operand         | S 2nd operand        |                 |
+   | S390_CC_OP_SIGNED_ADD_64       | S 1st operand         | S 2nd operand        |                 |
+   | S390_CC_OP_UNSIGNED_ADD_32     | Z 1st operand         | Z 2nd operand        |                 |
+   | S390_CC_OP_UNSIGNED_ADD_64     | Z 1st operand         | Z 2nd operand        |                 |
+   | S390_CC_OP_UNSIGNED_ADDC_32    | Z 1st operand         | Z 2nd operand        | Z carry in      |
+   | S390_CC_OP_UNSIGNED_ADDC_64    | Z 1st operand         | Z 2nd operand        | Z carry in      |
+   | S390_CC_OP_SIGNED_SUB_32       | S left operand        | S right operand      |                 |
+   | S390_CC_OP_SIGNED_SUB_64       | S left operand        | S right operand      |                 |
+   | S390_CC_OP_UNSIGNED_SUB_32     | Z left operand        | Z right operand      |                 |
+   | S390_CC_OP_UNSIGNED_SUB_64     | Z left operand        | Z right operand      |                 |
+   | S390_CC_OP_UNSIGNED_SUBB_32    | Z left operand        | Z right operand      | Z borrow in     |
+   | S390_CC_OP_UNSIGNED_SUBB_64    | Z left operand        | Z right operand      | Z borrow in     |
+   | S390_CC_OP_LOAD_AND_TEST       | S loaded value        |                      |                 |
+   | S390_CC_OP_LOAD_POSITIVE_32    | S loaded value        |                      |                 |
+   | S390_CC_OP_LOAD_POSITIVE_64    | S loaded value        |                      |                 |
+   | S390_CC_OP_TEST_UNDER_MASK_8   | Z tested value        | Z mask               |                 |
+   | S390_CC_OP_TEST_UNDER_MASK_16  | Z tested value        | Z mask               |                 |
+   | S390_CC_OP_SHIFT_LEFT_32       | Z value to be shifted | Z shift amount       |                 |
+   | S390_CC_OP_SHIFT_LEFT_64       | Z value to be shifted | Z shift amount       |                 |
+   | S390_CC_OP_INSERT_CHAR_MASK_32 | Z result              | Z mask               |                 |
+   | S390_CC_OP_BFP_RESULT_32       | F result              |                      |                 |
+   | S390_CC_OP_BFP_RESULT_64       | F result              |                      |                 |
+   | S390_CC_OP_BFP_RESULT_128      | F result hi 64 bits   | F result low 64 bits |                 |
+   | S390_CC_OP_BFP_32_TO_INT_32    | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_64_TO_INT_32    | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_128_TO_INT_32   | F source hi 64 bits   | F source low 64 bits | Z rounding mode |
+   | S390_CC_OP_BFP_32_TO_INT_64    | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_64_TO_INT_64    | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_128_TO_INT_64   | F source hi 64 bits   | F source low 64 bits | Z rounding mode |
+   | S390_CC_OP_BFP_TDC_32          | F value               | Z class              |                 |
+   | S390_CC_OP_BFP_TDC_64          | F value               | Z class              |                 |
+   | S390_CC_OP_BFP_TDC_128         | F value hi 64 bits    | F value low 64 bits  | Z class         |
+   | S390_CC_OP_SET                 | Z condition code      |                      |                 |
+   | S390_CC_OP_BFP_32_TO_UINT_32   | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_64_TO_UINT_32   | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_128_TO_UINT_32  | F source hi 64 bits   | F source low 64 bits | Z rounding mode |
+   | S390_CC_OP_BFP_32_TO_UINT_64   | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_64_TO_UINT_64   | F source              | Z rounding mode      |                 |
+   | S390_CC_OP_BFP_128_TO_UINT_64  | F source hi 64 bits   | F source low 64 bits | Z rounding mode |
+   | S390_CC_OP_DFP_RESULT_64       | D result              |                      |                 |
+   | S390_CC_OP_DFP_RESULT_128      | D result hi 64 bits   | D result low 64 bits |                 |
+   | S390_CC_OP_DFP_TDC_32          | D value               | Z class              |                 |
+   | S390_CC_OP_DFP_TDC_64          | D value               | Z class              |                 |
+   | S390_CC_OP_DFP_TDC_128         | D value hi 64 bits    | D value low 64 bits  | Z class         |
+   | S390_CC_OP_DFP_TDG_32          | D value               | Z group              |                 |
+   | S390_CC_OP_DFP_TDG_64          | D value               | Z group              |                 |
+   | S390_CC_OP_DFP_TDG_128         | D value hi 64 bits    | D value low 64 bits  | Z group         |
+   | S390_CC_OP_DFP_64_TO_UINT_32   | D source              | Z rounding mode      |                 |
+   | S390_CC_OP_DFP_128_TO_UINT_32  | D source hi 64 bits   | D source low 64 bits | Z rounding mode |
+   | S390_CC_OP_DFP_64_TO_UINT_64   | D source              | Z rounding mode      |                 |
+   | S390_CC_OP_DFP_128_TO_UINT_64  | D source hi 64 bits   | D source low 64 bits | Z rounding mode |
+   | S390_CC_OP_DFP_64_TO_INT_32    | D source              | Z rounding mode      |                 |
+   | S390_CC_OP_DFP_128_TO_INT_32   | D source hi 64 bits   | D source low 64 bits | Z rounding mode |
+   | S390_CC_OP_DFP_64_TO_INT_64    | D source              | Z rounding mode      |                 |
+   | S390_CC_OP_DFP_128_TO_INT_64   | D source hi 64 bits   | D source low 64 bits | Z rounding mode |
+   | S390_CC_OP_PFPO_32             | F|D source            | Z GR0 low 32 bits    |                 |
+   | S390_CC_OP_PFPO_64             | F|D source            | Z GR0 low 32 bits    |                 |
+   | S390_CC_OP_PFPO_128            | F|D source hi 64 bits | F|D src low 64 bits  | Z GR0 low 32 bits |
+   +--------------------------------+-----------------------+----------------------+-----------------+
+*/
+
+/*------------------------------------------------------------*/
+/*--- Condition code helpers.                             ---*/
+/*------------------------------------------------------------*/
+UInt s390_calculate_cc(ULong cc_op, ULong cc_dep1, ULong cc_dep2,
+                       ULong cc_ndep);
+UInt s390_calculate_cond(ULong mask, ULong op, ULong dep1, ULong dep2,
+                         ULong ndep);
+
+/* Size of special instruction preamble */
+#define S390_SPECIAL_OP_PREAMBLE_SIZE 8
+
+/* Size of special instructions */
+#define S390_SPECIAL_OP_SIZE 2
+
+/* Last target instruction for the EX helper */
+extern ULong last_execute_target;
+
+/*---------------------------------------------------------------*/
+/*--- end                                   guest_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __VEX_GUEST_S390_DEFS_H */
diff --git a/VEX/priv/guest_s390_helpers.c b/VEX/priv/guest_s390_helpers.c
new file mode 100644
index 0000000..622cdcc
--- /dev/null
+++ b/VEX/priv/guest_s390_helpers.c
@@ -0,0 +1,2403 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                              guest_s390_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_s390x.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_s390x_common.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_s390_defs.h"
+#include "s390_defs.h"               /* S390_BFP_ROUND_xyzzy */
+
+void
+LibVEX_GuestS390X_initialise(VexGuestS390XState *state)
+{
+/*------------------------------------------------------------*/
+/*--- Initialise ar registers                              ---*/
+/*------------------------------------------------------------*/
+
+   state->guest_a0 = 0;
+   state->guest_a1 = 0;
+   state->guest_a2 = 0;
+   state->guest_a3 = 0;
+   state->guest_a4 = 0;
+   state->guest_a5 = 0;
+   state->guest_a6 = 0;
+   state->guest_a7 = 0;
+   state->guest_a8 = 0;
+   state->guest_a9 = 0;
+   state->guest_a10 = 0;
+   state->guest_a11 = 0;
+   state->guest_a12 = 0;
+   state->guest_a13 = 0;
+   state->guest_a14 = 0;
+   state->guest_a15 = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise fpr registers                             ---*/
+/*------------------------------------------------------------*/
+
+   state->guest_f0 = 0;
+   state->guest_f1 = 0;
+   state->guest_f2 = 0;
+   state->guest_f3 = 0;
+   state->guest_f4 = 0;
+   state->guest_f5 = 0;
+   state->guest_f6 = 0;
+   state->guest_f7 = 0;
+   state->guest_f8 = 0;
+   state->guest_f9 = 0;
+   state->guest_f10 = 0;
+   state->guest_f11 = 0;
+   state->guest_f12 = 0;
+   state->guest_f13 = 0;
+   state->guest_f14 = 0;
+   state->guest_f15 = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise gpr registers                             ---*/
+/*------------------------------------------------------------*/
+
+   state->guest_r0 = 0;
+   state->guest_r1 = 0;
+   state->guest_r2 = 0;
+   state->guest_r3 = 0;
+   state->guest_r4 = 0;
+   state->guest_r5 = 0;
+   state->guest_r6 = 0;
+   state->guest_r7 = 0;
+   state->guest_r8 = 0;
+   state->guest_r9 = 0;
+   state->guest_r10 = 0;
+   state->guest_r11 = 0;
+   state->guest_r12 = 0;
+   state->guest_r13 = 0;
+   state->guest_r14 = 0;
+   state->guest_r15 = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise S390 miscellaneous registers              ---*/
+/*------------------------------------------------------------*/
+
+   state->guest_counter = 0;
+   state->guest_fpc = 0;
+   state->guest_IA = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise S390 pseudo registers                     ---*/
+/*------------------------------------------------------------*/
+
+   state->guest_SYSNO = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise generic pseudo registers                  ---*/
+/*------------------------------------------------------------*/
+
+   state->guest_NRADDR = 0;
+   state->guest_CMSTART = 0;
+   state->guest_CMLEN = 0;
+   state->guest_IP_AT_SYSCALL = 0;
+   state->guest_EMNOTE = EmNote_NONE;
+   state->host_EvC_COUNTER = 0;
+   state->host_EvC_FAILADDR = 0;
+
+/*------------------------------------------------------------*/
+/*--- Initialise thunk                                     ---*/
+/*------------------------------------------------------------*/
+
+   state->guest_CC_OP = 0;
+   state->guest_CC_DEP1 = 0;
+   state->guest_CC_DEP2 = 0;
+   state->guest_CC_NDEP = 0;
+
+   __builtin_memset(state->padding, 0x0, sizeof(state->padding));
+}
+
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this generates significantly slower code).  */
+Bool
+guest_s390x_state_requires_precise_mem_exns (
+   Int minoff, Int maxoff, VexRegisterUpdates pxControl
+)
+{
+   Int lr_min = S390X_GUEST_OFFSET(guest_LR);
+   Int lr_max = lr_min + 8 - 1;
+   Int sp_min = S390X_GUEST_OFFSET(guest_SP);
+   Int sp_max = sp_min + 8 - 1;
+   Int fp_min = S390X_GUEST_OFFSET(guest_FP);
+   Int fp_max = fp_min + 8 - 1;
+   Int ia_min = S390X_GUEST_OFFSET(guest_IA);
+   Int ia_max = ia_min + 8 - 1;
+
+   if (maxoff < sp_min || minoff > sp_max) {
+      /* No overlap with SP */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False; // We only need to check stack pointer.
+   } else {
+      return True;
+   }
+
+   if (maxoff < lr_min || minoff > lr_max) {
+      /* No overlap with LR */
+   } else {
+      return True;
+   }
+
+   if (maxoff < fp_min || minoff > fp_max) {
+      /* No overlap with FP */
+   } else {
+      return True;
+   }
+
+   if (maxoff < ia_min || minoff > ia_max) {
+      /* No overlap with IA */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+
+#define ALWAYSDEFD(field)                             \
+    { S390X_GUEST_OFFSET(field),            \
+      (sizeof ((VexGuestS390XState*)0)->field) }
+
+VexGuestLayout s390xGuest_layout = {
+
+   /* Total size of the guest state, in bytes. */
+   .total_sizeB = sizeof(VexGuestS390XState),
+
+   /* Describe the stack pointer. */
+   .offset_SP = S390X_GUEST_OFFSET(guest_SP),
+   .sizeof_SP = 8,
+
+   /* Describe the frame pointer. */
+   .offset_FP = S390X_GUEST_OFFSET(guest_FP),
+   .sizeof_FP = 8,
+
+   /* Describe the instruction pointer. */
+   .offset_IP = S390X_GUEST_OFFSET(guest_IA),
+   .sizeof_IP = 8,
+
+   /* Describe any sections to be regarded by Memcheck as
+      'always-defined'. */
+   .n_alwaysDefd = 9,
+
+   /* Flags thunk: OP and NDEP are always defined, whereas DEP1
+      and DEP2 have to be tracked.  See detailed comment in
+      gdefs.h on meaning of thunk fields. */
+   .alwaysDefd = {
+      /*  0 */ ALWAYSDEFD(guest_CC_OP),     /* generic */
+      /*  1 */ ALWAYSDEFD(guest_CC_NDEP),   /* generic */
+      /*  2 */ ALWAYSDEFD(guest_EMNOTE),    /* generic */
+      /*  3 */ ALWAYSDEFD(guest_CMSTART),   /* generic */
+      /*  4 */ ALWAYSDEFD(guest_CMLEN),     /* generic */
+      /*  5 */ ALWAYSDEFD(guest_IP_AT_SYSCALL), /* generic */
+      /*  6 */ ALWAYSDEFD(guest_IA),        /* control reg */
+      /*  7 */ ALWAYSDEFD(guest_fpc),       /* control reg */
+      /*  8 */ ALWAYSDEFD(guest_counter),   /* internal usage register */
+   }
+};
+
+/*------------------------------------------------------------*/
+/*--- Dirty helper for EXecute                             ---*/
+/*------------------------------------------------------------*/
+void
+s390x_dirtyhelper_EX(ULong torun)
+{
+   last_execute_target = torun;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Dirty helper for Clock instructions                  ---*/
+/*------------------------------------------------------------*/
+#if defined(VGA_s390x)
+ULong
+s390x_dirtyhelper_STCK(ULong *addr)
+{
+   UInt cc;
+
+   asm volatile("stck %0\n"
+                "ipm %1\n"
+                "srl %1,28\n"
+                : "+Q" (*addr), "=d" (cc) : : "cc");
+   return cc;
+}
+
+ULong
+s390x_dirtyhelper_STCKE(ULong *addr)
+{
+   UInt cc;
+
+   asm volatile("stcke %0\n"
+                "ipm %1\n"
+                "srl %1,28\n"
+                : "+Q" (*addr), "=d" (cc) : : "cc");
+   return cc;
+}
+
+ULong s390x_dirtyhelper_STCKF(ULong *addr)
+{
+   UInt cc;
+
+   asm volatile(".insn s,0xb27c0000,%0\n"
+                "ipm %1\n"
+                "srl %1,28\n"
+                : "+Q" (*addr), "=d" (cc) : : "cc");
+   return cc;
+}
+#else
+ULong s390x_dirtyhelper_STCK(ULong *addr)  {return 3;}
+ULong s390x_dirtyhelper_STCKF(ULong *addr) {return 3;}
+ULong s390x_dirtyhelper_STCKE(ULong *addr) {return 3;}
+#endif /* VGA_s390x */
+
+/*------------------------------------------------------------*/
+/*--- Dirty helper for Store Facility instruction          ---*/
+/*------------------------------------------------------------*/
+#if defined(VGA_s390x)
+static void
+s390_set_facility_bit(ULong *addr, UInt bitno, UInt value)
+{
+   addr  += bitno / 64;
+   bitno  = bitno % 64;
+
+   ULong mask = 1;
+   mask <<= (63 - bitno);
+
+   if (value == 1) {
+      *addr |= mask;   // set
+   } else {
+      *addr &= ~mask;  // clear
+   }
+}
+
+ULong
+s390x_dirtyhelper_STFLE(VexGuestS390XState *guest_state, ULong *addr)
+{
+   ULong hoststfle[S390_NUM_FACILITY_DW], cc, num_dw, i;
+   register ULong reg0 asm("0") = guest_state->guest_r0 & 0xF;  /* r0[56:63] */
+
+   /* We cannot store more than S390_NUM_FACILITY_DW
+      (and it makes not much sense to do so anyhow) */
+   if (reg0 > S390_NUM_FACILITY_DW - 1)
+      reg0 = S390_NUM_FACILITY_DW - 1;
+
+   num_dw = reg0 + 1;  /* number of double words written */
+
+   asm volatile(" .insn s,0xb2b00000,%0\n"   /* stfle */
+                "ipm    %2\n"
+                "srl    %2,28\n"
+                : "=m" (hoststfle), "+d"(reg0), "=d"(cc) : : "cc", "memory");
+
+   /* Update guest register 0  with what STFLE set r0 to */
+   guest_state->guest_r0 = reg0;
+
+   /* Set default: VM facilities = host facilities */
+   for (i = 0; i < num_dw; ++i)
+      addr[i] = hoststfle[i];
+
+   /* Now adjust the VM facilities according to what the VM supports */
+   s390_set_facility_bit(addr, S390_FAC_LDISP,  1);
+   s390_set_facility_bit(addr, S390_FAC_EIMM,   1);
+   s390_set_facility_bit(addr, S390_FAC_ETF2,   1);
+   s390_set_facility_bit(addr, S390_FAC_ETF3,   1);
+   s390_set_facility_bit(addr, S390_FAC_GIE,    1);
+   s390_set_facility_bit(addr, S390_FAC_EXEXT,  1);
+   s390_set_facility_bit(addr, S390_FAC_HIGHW,  1);
+
+   s390_set_facility_bit(addr, S390_FAC_HFPMAS, 0);
+   s390_set_facility_bit(addr, S390_FAC_HFPUNX, 0);
+   s390_set_facility_bit(addr, S390_FAC_XCPUT,  0);
+   s390_set_facility_bit(addr, S390_FAC_MSA,    0);
+   s390_set_facility_bit(addr, S390_FAC_PENH,   0);
+   s390_set_facility_bit(addr, S390_FAC_DFP,    0);
+   s390_set_facility_bit(addr, S390_FAC_PFPO,   0);
+   s390_set_facility_bit(addr, S390_FAC_DFPZC,  0);
+   s390_set_facility_bit(addr, S390_FAC_MISC,   0);
+   s390_set_facility_bit(addr, S390_FAC_CTREXE, 0);
+   s390_set_facility_bit(addr, S390_FAC_TREXE,  0);
+   s390_set_facility_bit(addr, S390_FAC_MSA4,   0);
+
+   return cc;
+}
+
+#else
+
+ULong
+s390x_dirtyhelper_STFLE(VexGuestS390XState *guest_state, ULong *addr)
+{
+   return 3;
+}
+#endif /* VGA_s390x */
+
+/*------------------------------------------------------------*/
+/*--- Dirty helper for the "convert unicode" insn family.  ---*/
+/*------------------------------------------------------------*/
+void
+s390x_dirtyhelper_CUxy(UChar *address, ULong data, ULong num_bytes)
+{
+   UInt i;
+
+   vassert(num_bytes >= 1 && num_bytes <= 4);
+
+   /* Store the least significant NUM_BYTES bytes in DATA left to right
+      at ADDRESS. */
+   for (i = 1; i <= num_bytes; ++i) {
+      address[num_bytes - i] = data & 0xff;
+      data >>= 8;
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for CU21.                               ---*/
+/*------------------------------------------------------------*/
+
+/* The function performs a CU21 operation. It returns three things
+   encoded in an ULong value:
+   - the converted bytes (at most 4)
+   - the number of converted bytes
+   - an indication whether LOW_SURROGATE, if any, is invalid
+
+   64      48                16           8                       0
+    +-------+-----------------+-----------+-----------------------+
+    |  0x0  | converted bytes | num_bytes | invalid_low_surrogate |
+    +-------+-----------------+-----------+-----------------------+
+*/
+ULong
+s390_do_cu21(UInt srcval, UInt low_surrogate)
+{
+   ULong retval = 0;   // shut up gcc
+   UInt b1, b2, b3, b4, num_bytes, invalid_low_surrogate = 0;
+
+   srcval &= 0xffff;
+
+   /* Determine the number of bytes in the converted value */
+   if (srcval <= 0x007f)
+      num_bytes = 1;
+   else if (srcval >= 0x0080 && srcval <= 0x07ff)
+      num_bytes = 2;
+   else if ((srcval >= 0x0800 && srcval <= 0xd7ff) ||
+            (srcval >= 0xdc00 && srcval <= 0xffff))
+      num_bytes = 3;
+   else
+      num_bytes = 4;
+
+   /* Determine UTF-8 bytes according to calculated num_bytes */
+   switch (num_bytes){
+   case 1:
+      retval = srcval;
+      break;
+
+   case 2:
+      /* order of bytes left to right: b1, b2 */
+      b1  = 0xc0;
+      b1 |= srcval >> 6;
+
+      b2  = 0x80;
+      b2 |= srcval & 0x3f;
+
+      retval = (b1 << 8) | b2;
+      break;
+
+   case 3:
+      /* order of bytes left to right: b1, b2, b3 */
+      b1  = 0xe0;
+      b1 |= srcval >> 12;
+
+      b2  = 0x80;
+      b2 |= (srcval >> 6) & 0x3f;
+
+      b3  = 0x80;
+      b3 |= srcval & 0x3f;
+
+      retval = (b1 << 16) | (b2 << 8) | b3;
+      break;
+
+   case 4: {
+      /* order of bytes left to right: b1, b2, b3, b4 */
+      UInt high_surrogate = srcval;
+      UInt uvwxy = ((high_surrogate >> 6) & 0xf) + 1;   // abcd + 1
+
+      b1  = 0xf0;
+      b1 |= uvwxy >> 2;     // uvw
+
+      b2  = 0x80;
+      b2 |= (uvwxy & 0x3) << 4;           // xy
+      b2 |= (high_surrogate >> 2) & 0xf;  // efgh
+
+      b3  = 0x80;
+      b3 |= (high_surrogate & 0x3) << 4;   // ij
+      b3 |= (low_surrogate >> 6) & 0xf;    // klmn
+
+      b4  = 0x80;
+      b4 |= low_surrogate & 0x3f;
+
+      retval = (b1 << 24) | (b2 << 16) | (b3 << 8) | b4;
+
+      invalid_low_surrogate = (low_surrogate & 0xfc00) != 0xdc00;
+      break;
+   }
+   }
+
+   /* At this point RETVAL contains the converted bytes.
+      Build up the final return value. */
+   return (retval << 16) | (num_bytes << 8) | invalid_low_surrogate;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for CU24.                               ---*/
+/*------------------------------------------------------------*/
+
+/* The function performs a CU24 operation. It returns two things
+   encoded in an ULong value:
+   - the 4 converted bytes
+   - an indication whether LOW_SURROGATE, if any, is invalid
+
+   64     40                 8                       0
+    +------------------------+-----------------------+
+    |  0x0 | converted bytes | invalid_low_surrogate |
+    +------------------------+-----------------------+
+*/
+ULong
+s390_do_cu24(UInt srcval, UInt low_surrogate)
+{
+   ULong retval;
+   UInt invalid_low_surrogate = 0;
+
+   srcval &= 0xffff;
+
+   if ((srcval >= 0x0000 && srcval <= 0xd7ff) ||
+       (srcval >= 0xdc00 && srcval <= 0xffff)) {
+      retval = srcval;
+   } else {
+      /* D800 - DBFF */
+      UInt high_surrogate = srcval;
+      UInt uvwxy  = ((high_surrogate >> 6) & 0xf) + 1;   // abcd + 1
+      UInt efghij = high_surrogate & 0x3f;
+      UInt klmnoprst = low_surrogate & 0x3ff;
+
+      retval = (uvwxy << 16) | (efghij << 10) | klmnoprst;
+
+      invalid_low_surrogate = (low_surrogate & 0xfc00) != 0xdc00;
+   }
+
+   /* At this point RETVAL contains the converted bytes.
+      Build up the final return value. */
+   return (retval << 8) | invalid_low_surrogate;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for CU42.                               ---*/
+/*------------------------------------------------------------*/
+
+/* The function performs a CU42 operation. It returns three things
+   encoded in an ULong value:
+   - the converted bytes (at most 4)
+   - the number of converted bytes (2 or 4; 0 if invalid character)
+   - an indication whether the UTF-32 character is invalid
+
+   64      48                16           8                   0
+    +-------+-----------------+-----------+-------------------+
+    |  0x0  | converted bytes | num_bytes | invalid_character |
+    +-------+-----------------+-----------+-------------------+
+*/
+ULong
+s390_do_cu42(UInt srcval)
+{
+   ULong retval;
+   UInt num_bytes, invalid_character = 0;
+
+   if ((srcval >= 0x0000 && srcval <= 0xd7ff) ||
+       (srcval >= 0xdc00 && srcval <= 0xffff)) {
+      retval = srcval;
+      num_bytes = 2;
+   } else if (srcval >= 0x00010000 && srcval <= 0x0010FFFF) {
+      UInt uvwxy  = srcval >> 16;
+      UInt abcd   = (uvwxy - 1) & 0xf;
+      UInt efghij = (srcval >> 10) & 0x3f;
+
+      UInt high_surrogate = (0xd8 << 8) | (abcd << 6) | efghij;
+      UInt low_surrogate  = (0xdc << 8) | (srcval & 0x3ff);
+
+      retval = (high_surrogate << 16) | low_surrogate;
+      num_bytes = 4;
+   } else {
+      /* D800 - DBFF or 00110000 - FFFFFFFF */
+      invalid_character = 1;
+      retval = num_bytes = 0;   /* does not matter; not used */
+   }
+
+   /* At this point RETVAL contains the converted bytes.
+      Build up the final return value. */
+   return (retval << 16) | (num_bytes << 8) | invalid_character;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for CU41.                               ---*/
+/*------------------------------------------------------------*/
+
+/* The function performs a CU41 operation. It returns three things
+   encoded in an ULong value:
+   - the converted bytes (at most 4)
+   - the number of converted bytes (1, 2, 3, or 4; 0 if invalid character)
+   - an indication whether the UTF-32 character is invalid
+
+   64      48                16           8                   0
+    +-------+-----------------+-----------+-------------------+
+    |  0x0  | converted bytes | num_bytes | invalid_character |
+    +-------+-----------------+-----------+-------------------+
+*/
+ULong
+s390_do_cu41(UInt srcval)
+{
+   ULong retval;
+   UInt num_bytes, invalid_character = 0;
+
+   if (srcval <= 0x7f) {
+      retval = srcval;
+      num_bytes = 1;
+   } else if (srcval >= 0x80 && srcval <= 0x7ff) {
+      UInt fghij  = srcval >> 6;
+      UInt klmnop = srcval & 0x3f;
+      UInt byte1  = (0xc0 | fghij);
+      UInt byte2  = (0x80 | klmnop);
+
+      retval = (byte1 << 8) | byte2;
+      num_bytes = 2;
+   } else if ((srcval >= 0x800  && srcval <= 0xd7ff) ||
+              (srcval >= 0xdc00 && srcval <= 0xffff)) {
+      UInt abcd   = srcval >> 12;
+      UInt efghij = (srcval >> 6) & 0x3f;
+      UInt klmnop = srcval & 0x3f;
+      UInt byte1  = 0xe0 | abcd;
+      UInt byte2  = 0x80 | efghij;
+      UInt byte3  = 0x80 | klmnop;
+
+      retval = (byte1 << 16) | (byte2 << 8) | byte3;
+      num_bytes = 3;
+   } else if (srcval >= 0x10000 && srcval <= 0x10ffff) {
+      UInt uvw    = (srcval >> 18) & 0x7;
+      UInt xy     = (srcval >> 16) & 0x3;
+      UInt efgh   = (srcval >> 12) & 0xf;
+      UInt ijklmn = (srcval >>  6) & 0x3f;
+      UInt opqrst = srcval & 0x3f;
+      UInt byte1  = 0xf0 | uvw;
+      UInt byte2  = 0x80 | (xy << 4) | efgh;
+      UInt byte3  = 0x80 | ijklmn;
+      UInt byte4  = 0x80 | opqrst;
+
+      retval = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
+      num_bytes = 4;
+   } else {
+      /* d800 ... dbff or 00110000 ... ffffffff */
+      invalid_character = 1;
+
+      retval = 0;
+      num_bytes = 0;
+   }
+
+   /* At this point RETVAL contains the converted bytes.
+      Build up the final return value. */
+   return (retval << 16) | (num_bytes << 8) | invalid_character;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Clean helpers for CU12.                              ---*/
+/*------------------------------------------------------------*/
+
+/* The function looks at the first byte of an UTF-8 character and returns
+   two things encoded in an ULong value:
+
+   - the number of bytes that need to be read
+   - an indication whether the UTF-8 character is invalid
+
+   64      16           8                   0
+    +-------------------+-------------------+
+    |  0x0  | num_bytes | invalid_character |
+    +-------+-----------+-------------------+
+*/
+ULong
+s390_do_cu12_cu14_helper1(UInt byte, UInt etf3_and_m3_is_1)
+{
+   vassert(byte <= 0xff);
+
+   /* Check whether the character is invalid */
+   if (byte >= 0x80 && byte <= 0xbf) return 1;
+   if (byte >= 0xf8) return 1;
+
+   if (etf3_and_m3_is_1) {
+      if (byte == 0xc0 || byte == 0xc1) return 1;
+      if (byte >= 0xf5 && byte <= 0xf7) return 1;
+   }
+
+   /* Character is valid */
+   if (byte <= 0x7f) return 1 << 8;   // 1 byte
+   if (byte <= 0xdf) return 2 << 8;   // 2 bytes
+   if (byte <= 0xef) return 3 << 8;   // 3 bytes
+
+   return 4 << 8;  // 4 bytes
+}
+
+/* The function performs a CU12 or CU14 operation. BYTE1, BYTE2, etc are the
+   bytes as read from the input stream, left to right. BYTE1 is a valid
+   byte. The function returns three things encoded in an ULong value:
+
+   - the converted bytes
+   - the number of converted bytes (2 or 4; 0 if invalid character)
+   - an indication whether the UTF-16 character is invalid
+
+   64      48                16           8                   0
+    +-------+-----------------+-----------+-------------------+
+    |  0x0  | converted bytes | num_bytes | invalid_character |
+    +-------+-----------------+-----------+-------------------+
+*/
+static ULong
+s390_do_cu12_cu14_helper2(UInt byte1, UInt byte2, UInt byte3, UInt byte4,
+                          ULong stuff, Bool is_cu12)
+{
+   UInt num_src_bytes = stuff >> 1, etf3_and_m3_is_1 = stuff & 0x1;
+   UInt num_bytes = 0, invalid_character = 0;
+   ULong retval = 0;
+
+   vassert(num_src_bytes <= 4);
+
+   switch (num_src_bytes) {
+   case 1:
+      num_bytes = 2;
+      retval = byte1;
+      break;
+
+   case 2: {
+      /* Test validity */
+      if (etf3_and_m3_is_1) {
+         if (byte2 < 0x80 || byte2 > 0xbf) {
+            invalid_character = 1;
+            break;
+         }
+      }
+
+      /* OK */
+      UInt fghij  = byte1 & 0x1f;
+      UInt klmnop = byte2 & 0x3f;
+
+      num_bytes = 2;
+      retval = (fghij << 6) | klmnop;
+      break;
+   }
+
+   case 3: {
+      /* Test validity */
+      if (etf3_and_m3_is_1) {
+         if (byte1 == 0xe0) {
+            if ((byte2 < 0xa0 || byte2 > 0xbf) ||
+                (byte3 < 0x80 || byte3 > 0xbf)) {
+               invalid_character = 1;
+               break;
+            }
+         }
+         if ((byte1 >= 0xe1 && byte1 <= 0xec) ||
+             byte1 == 0xee || byte1 == 0xef) {
+            if ((byte2 < 0x80 || byte2 > 0xbf) ||
+                (byte3 < 0x80 || byte3 > 0xbf)) {
+               invalid_character = 1;
+               break;
+            }
+         }
+         if (byte1 == 0xed) {
+            if ((byte2 < 0x80 || byte2 > 0x9f) ||
+                (byte3 < 0x80 || byte3 > 0xbf)) {
+               invalid_character = 1;
+               break;
+            }
+         }
+      }
+
+      /* OK */
+      UInt abcd   = byte1 & 0xf;
+      UInt efghij = byte2 & 0x3f;
+      UInt klmnop = byte3 & 0x3f;
+
+      num_bytes = 2;
+      retval = (abcd << 12) | (efghij << 6) | klmnop;
+      break;
+   }
+
+   case 4: {
+      /* Test validity */
+      if (etf3_and_m3_is_1) {
+         if (byte1 == 0xf0) {
+            if ((byte2 < 0x90 || byte2 > 0xbf) ||
+                (byte3 < 0x80 || byte3 > 0xbf) ||
+                (byte4 < 0x80 || byte4 > 0xbf)) {
+               invalid_character = 1;
+               break;
+            }
+         }
+         if (byte1 == 0xf1 || byte1 == 0xf2 || byte1 == 0xf3) {
+            if ((byte2 < 0x80 || byte2 > 0xbf) ||
+                (byte3 < 0x80 || byte3 > 0xbf) ||
+                (byte4 < 0x80 || byte4 > 0xbf)) {
+               invalid_character = 1;
+               break;
+            }
+         }
+         if (byte1 == 0xf4) {
+            if ((byte2 < 0x80 || byte2 > 0x8f) ||
+                (byte3 < 0x80 || byte3 > 0xbf) ||
+                (byte4 < 0x80 || byte4 > 0xbf)) {
+               invalid_character = 1;
+               break;
+            }
+         }
+      }
+
+      /* OK */
+      UInt uvw    = byte1 & 0x7;
+      UInt xy     = (byte2 >> 4) & 0x3;
+      UInt uvwxy  = (uvw << 2) | xy;
+      UInt efgh   = byte2 & 0xf;
+      UInt ij     = (byte3 >> 4) & 0x3;
+      UInt klmn   = byte3 & 0xf;
+      UInt opqrst = byte4 & 0x3f;
+      
+      if (is_cu12) {
+         UInt abcd = (uvwxy - 1) & 0xf;
+         UInt high_surrogate = (0xd8 << 8) | (abcd << 6) | (efgh << 2) | ij;
+         UInt low_surrogate  = (0xdc << 8) | (klmn << 6) | opqrst;
+
+         num_bytes = 4;
+         retval = (high_surrogate << 16) | low_surrogate;
+      } else {
+         num_bytes = 4;
+         retval =
+            (uvwxy << 16) | (efgh << 12) | (ij << 10) | (klmn << 6) | opqrst;
+      }
+      break;
+   }
+   }
+
+   if (! is_cu12) num_bytes = 4;   // for CU14, by definition
+
+   /* At this point RETVAL contains the converted bytes.
+      Build up the final return value. */
+   return (retval << 16) | (num_bytes << 8) | invalid_character;
+}
+
+ULong
+s390_do_cu12_helper2(UInt byte1, UInt byte2, UInt byte3, UInt byte4,
+                     ULong stuff)
+{
+   return s390_do_cu12_cu14_helper2(byte1, byte2, byte3, byte4, stuff,
+                                    /* is_cu12 = */ 1);
+}
+
+ULong
+s390_do_cu14_helper2(UInt byte1, UInt byte2, UInt byte3, UInt byte4,
+                     ULong stuff)
+{
+   return s390_do_cu12_cu14_helper2(byte1, byte2, byte3, byte4, stuff,
+                                    /* is_cu12 = */ 0);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for "convert to binary".                ---*/
+/*------------------------------------------------------------*/
+#if defined(VGA_s390x)
+UInt
+s390_do_cvb(ULong decimal)
+{
+   UInt binary;
+
+   __asm__ volatile (
+        "cvb %[result],%[input]\n\t"
+          : [result] "=d"(binary)
+          : [input] "m"(decimal)
+   );
+
+   return binary;
+}
+
+#else
+UInt s390_do_cvb(ULong decimal) { return 0; }
+#endif
+
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for "convert to decimal".                ---*/
+/*------------------------------------------------------------*/
+#if defined(VGA_s390x)
+ULong
+s390_do_cvd(ULong binary_in)
+{
+   UInt binary = binary_in & 0xffffffffULL;
+   ULong decimal;
+
+   __asm__ volatile (
+        "cvd %[input],%[result]\n\t"
+          : [result] "=m"(decimal)
+          : [input] "d"(binary)
+   );
+
+   return decimal;
+}
+
+#else
+ULong s390_do_cvd(ULong binary) { return 0; }
+#endif
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for "Extract cache attribute".          ---*/
+/*------------------------------------------------------------*/
+#if defined(VGA_s390x)
+ULong
+s390_do_ecag(ULong op2addr)
+{
+   ULong result;
+
+   __asm__ volatile(".insn rsy,0xEB000000004C,%[out],0,0(%[in])\n\t"
+                    : [out] "=d"(result)
+                    : [in] "d"(op2addr));
+   return result;
+}
+
+#else
+ULong s390_do_ecag(ULong op2addr) { return 0; }
+#endif
+
+/*------------------------------------------------------------*/
+/*--- Clean helper for "Perform Floating Point Operation". ---*/
+/*------------------------------------------------------------*/
+#if defined(VGA_s390x)
+UInt
+s390_do_pfpo(UInt gpr0)
+{
+   UChar rm;
+   UChar op1_ty, op2_ty;
+
+   rm  = gpr0 & 0xf;
+   if (rm > 1 && rm < 8)
+      return EmFail_S390X_invalid_PFPO_rounding_mode;
+
+   op1_ty = (gpr0 >> 16) & 0xff; // gpr0[40:47]
+   op2_ty = (gpr0 >> 8)  & 0xff; // gpr0[48:55]
+   /* Operand type must be BFP 32, 64, 128 or DFP 32, 64, 128
+      which correspond to 0x5, 0x6, 0x7, 0x8, 0x9, 0xa respectively.
+      Any other operand type value is unsupported */
+   if ((op1_ty == op2_ty) ||
+       (op1_ty < 0x5 || op1_ty > 0xa) ||
+       (op2_ty < 0x5 || op2_ty > 0xa))
+      return EmFail_S390X_invalid_PFPO_function;
+
+   return EmNote_NONE;
+}
+#else
+UInt s390_do_pfpo(UInt gpr0) { return 0; }
+#endif
+
+/*------------------------------------------------------------*/
+/*--- Helper for condition code.                           ---*/
+/*------------------------------------------------------------*/
+
+/* Convert an IRRoundingMode value to s390_bfp_round_t */
+#if defined(VGA_s390x)
+static s390_bfp_round_t
+decode_bfp_rounding_mode(UInt irrm)
+{
+   switch (irrm) {
+   case Irrm_NEAREST: return S390_BFP_ROUND_NEAREST_EVEN;
+   case Irrm_NegINF:  return S390_BFP_ROUND_NEGINF;
+   case Irrm_PosINF:  return S390_BFP_ROUND_POSINF;
+   case Irrm_ZERO:    return S390_BFP_ROUND_ZERO;
+   }
+   vpanic("decode_bfp_rounding_mode");
+}
+#endif
+
+
+#define S390_CC_FOR_BINARY(opcode,cc_dep1,cc_dep2) \
+({ \
+   __asm__ volatile ( \
+        opcode " %[op1],%[op2]\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw), [op1] "+d"(cc_dep1) \
+                                   : [op2] "d"(cc_dep2) \
+                                   : "cc");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_TERNARY_SUBB(opcode,cc_dep1,cc_dep2,cc_ndep) \
+({ \
+   /* Recover the original DEP2 value. See comment near s390_cc_thunk_put3 \
+      for rationale. */ \
+   cc_dep2 = cc_dep2 ^ cc_ndep; \
+   __asm__ volatile ( \
+	"lghi 0,1\n\t" \
+	"sr 0,%[op3]\n\t" /* borrow to cc */ \
+        opcode " %[op1],%[op2]\n\t" /* then redo the op */\
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw), [op1] "+&d"(cc_dep1) \
+                                   : [op2] "d"(cc_dep2), [op3] "d"(cc_ndep) \
+                                   : "0", "cc");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_TERNARY_ADDC(opcode,cc_dep1,cc_dep2,cc_ndep) \
+({ \
+   /* Recover the original DEP2 value. See comment near s390_cc_thunk_put3 \
+      for rationale. */ \
+   cc_dep2 = cc_dep2 ^ cc_ndep; \
+   __asm__ volatile ( \
+	"lgfr 0,%[op3]\n\t" /* first load cc_ndep */ \
+	"aghi 0,0\n\t" /* and convert it into a cc */ \
+        opcode " %[op1],%[op2]\n\t" /* then redo the op */\
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw), [op1] "+&d"(cc_dep1) \
+                                   : [op2] "d"(cc_dep2), [op3] "d"(cc_ndep) \
+                                   : "0", "cc");\
+   psw >> 28;   /* cc */ \
+})
+
+
+#define S390_CC_FOR_BFP_RESULT(opcode,cc_dep1) \
+({ \
+   __asm__ volatile ( \
+        opcode " 0,%[op]\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [op]  "f"(cc_dep1) \
+                                   : "cc", "f0");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_BFP128_RESULT(hi,lo) \
+({ \
+   __asm__ volatile ( \
+        "ldr   4,%[high]\n\t" \
+        "ldr   6,%[low]\n\t" \
+        "ltxbr 0,4\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [high] "f"(hi), [low] "f"(lo) \
+                                   : "cc", "f0", "f2", "f4", "f6");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_BFP_CONVERT_AUX(opcode,cc_dep1,rounding_mode) \
+({ \
+   __asm__ volatile ( \
+        opcode " 0," #rounding_mode ",%[op]\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [op]  "f"(cc_dep1) \
+                                   : "cc", "r0");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_BFP_CONVERT(opcode,cc_dep1,cc_dep2)   \
+({                                                        \
+   UInt cc;                                               \
+   switch (decode_bfp_rounding_mode(cc_dep2)) {           \
+   case S390_BFP_ROUND_NEAREST_EVEN:                      \
+      cc = S390_CC_FOR_BFP_CONVERT_AUX(opcode,cc_dep1,4); \
+      break;                                              \
+   case S390_BFP_ROUND_ZERO:                              \
+      cc = S390_CC_FOR_BFP_CONVERT_AUX(opcode,cc_dep1,5); \
+      break;                                              \
+   case S390_BFP_ROUND_POSINF:                            \
+      cc = S390_CC_FOR_BFP_CONVERT_AUX(opcode,cc_dep1,6); \
+      break;                                              \
+   case S390_BFP_ROUND_NEGINF:                            \
+      cc = S390_CC_FOR_BFP_CONVERT_AUX(opcode,cc_dep1,7); \
+      break;                                              \
+   default:                                               \
+      vpanic("unexpected bfp rounding mode");             \
+   }                                                      \
+   cc;                                                    \
+})
+
+#define S390_CC_FOR_BFP_UCONVERT_AUX(opcode,cc_dep1,rounding_mode) \
+({ \
+   __asm__ volatile ( \
+        opcode ",0,%[op]," #rounding_mode ",0\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [op]  "f"(cc_dep1) \
+                                   : "cc", "r0");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_BFP_UCONVERT(opcode,cc_dep1,cc_dep2)   \
+({                                                         \
+   UInt cc;                                                \
+   switch (decode_bfp_rounding_mode(cc_dep2)) {            \
+   case S390_BFP_ROUND_NEAREST_EVEN:                       \
+      cc = S390_CC_FOR_BFP_UCONVERT_AUX(opcode,cc_dep1,4); \
+      break;                                               \
+   case S390_BFP_ROUND_ZERO:                               \
+      cc = S390_CC_FOR_BFP_UCONVERT_AUX(opcode,cc_dep1,5); \
+      break;                                               \
+   case S390_BFP_ROUND_POSINF:                             \
+      cc = S390_CC_FOR_BFP_UCONVERT_AUX(opcode,cc_dep1,6); \
+      break;                                               \
+   case S390_BFP_ROUND_NEGINF:                             \
+      cc = S390_CC_FOR_BFP_UCONVERT_AUX(opcode,cc_dep1,7); \
+      break;                                               \
+   default:                                                \
+      vpanic("unexpected bfp rounding mode");              \
+   }                                                       \
+   cc;                                                     \
+})
+
+#define S390_CC_FOR_BFP128_CONVERT_AUX(opcode,hi,lo,rounding_mode) \
+({ \
+   __asm__ volatile ( \
+        "ldr   4,%[high]\n\t" \
+        "ldr   6,%[low]\n\t" \
+        opcode " 0," #rounding_mode ",4\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [high] "f"(hi), [low] "f"(lo) \
+                                   : "cc", "r0", "f4", "f6");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_BFP128_CONVERT(opcode,cc_dep1,cc_dep2,cc_ndep)   \
+({                                                                   \
+   UInt cc;                                                          \
+   /* Recover the original DEP2 value. See comment near              \
+      s390_cc_thunk_put3 for rationale. */                           \
+   cc_dep2 = cc_dep2 ^ cc_ndep;                                      \
+   switch (decode_bfp_rounding_mode(cc_ndep)) {                      \
+   case S390_BFP_ROUND_NEAREST_EVEN:                                 \
+      cc = S390_CC_FOR_BFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,4); \
+      break;                                                         \
+   case S390_BFP_ROUND_ZERO:                                         \
+      cc = S390_CC_FOR_BFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,5); \
+      break;                                                         \
+   case S390_BFP_ROUND_POSINF:                                       \
+      cc = S390_CC_FOR_BFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,6); \
+      break;                                                         \
+   case S390_BFP_ROUND_NEGINF:                                       \
+      cc = S390_CC_FOR_BFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,7); \
+      break;                                                         \
+   default:                                                          \
+      vpanic("unexpected bfp rounding mode");                        \
+   }                                                                 \
+   cc;                                                               \
+})
+
+#define S390_CC_FOR_BFP128_UCONVERT_AUX(opcode,hi,lo,rounding_mode) \
+({ \
+   __asm__ volatile ( \
+        "ldr   4,%[high]\n\t" \
+        "ldr   6,%[low]\n\t" \
+        opcode ",0,4," #rounding_mode ",0\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [high] "f"(hi), [low] "f"(lo) \
+                                   : "cc", "r0", "f4", "f6");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_BFP128_UCONVERT(opcode,cc_dep1,cc_dep2,cc_ndep)   \
+({                                                                    \
+   UInt cc;                                                           \
+   /* Recover the original DEP2 value. See comment near               \
+      s390_cc_thunk_put3 for rationale. */                            \
+   cc_dep2 = cc_dep2 ^ cc_ndep;                                       \
+   switch (decode_bfp_rounding_mode(cc_ndep)) {                       \
+   case S390_BFP_ROUND_NEAREST_EVEN:                                  \
+      cc = S390_CC_FOR_BFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,4); \
+      break;                                                          \
+   case S390_BFP_ROUND_ZERO:                                          \
+      cc = S390_CC_FOR_BFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,5); \
+      break;                                                          \
+   case S390_BFP_ROUND_POSINF:                                        \
+      cc = S390_CC_FOR_BFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,6); \
+      break;                                                          \
+   case S390_BFP_ROUND_NEGINF:                                        \
+      cc = S390_CC_FOR_BFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,7); \
+      break;                                                          \
+   default:                                                           \
+      vpanic("unexpected bfp rounding mode");                         \
+   }                                                                  \
+   cc;                                                                \
+})
+
+#define S390_CC_FOR_BFP_TDC(opcode,cc_dep1,cc_dep2) \
+({ \
+   __asm__ volatile ( \
+        opcode " %[value],0(%[class])\n\t" \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [value] "f"(cc_dep1), \
+                                     [class] "a"(cc_dep2)  \
+                                   : "cc");\
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_BFP128_TDC(cc_dep1,cc_dep2,cc_ndep) \
+({ \
+   /* Recover the original DEP2 value. See comment near \
+      s390_cc_thunk_put1f128Z for rationale. */ \
+   cc_dep2 = cc_dep2 ^ cc_ndep; \
+   __asm__ volatile ( \
+        "ldr  4,%[high]\n\t" \
+        "ldr  6,%[low]\n\t" \
+        "tcxb 4,0(%[class])\n\t" \
+        "ipm  %[psw]\n\t"          : [psw] "=d"(psw) \
+                                   : [high] "f"(cc_dep1), [low] "f"(cc_dep2), \
+                                     [class] "a"(cc_ndep)  \
+                                   : "cc", "f4", "f6");\
+   psw >> 28;   /* cc */ \
+})
+
+/* Convert an IRRoundingMode value to s390_dfp_round_t */
+#if defined(VGA_s390x)
+static s390_dfp_round_t
+decode_dfp_rounding_mode(UInt irrm)
+{
+   switch (irrm) {
+   case Irrm_NEAREST:
+      return S390_DFP_ROUND_NEAREST_EVEN_4;
+   case Irrm_NegINF:
+      return S390_DFP_ROUND_NEGINF_7;
+   case Irrm_PosINF:
+      return S390_DFP_ROUND_POSINF_6;
+   case Irrm_ZERO:
+      return S390_DFP_ROUND_ZERO_5;
+   case Irrm_NEAREST_TIE_AWAY_0:
+      return S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1;
+   case Irrm_PREPARE_SHORTER:
+      return S390_DFP_ROUND_PREPARE_SHORT_3;
+   case Irrm_AWAY_FROM_ZERO:
+      return S390_DFP_ROUND_AWAY_0;
+   case Irrm_NEAREST_TIE_TOWARD_0:
+      return S390_DFP_ROUND_NEAREST_TIE_TOWARD_0;
+   }
+   vpanic("decode_dfp_rounding_mode");
+}
+#endif
+
+#define S390_CC_FOR_DFP_RESULT(cc_dep1) \
+({ \
+   __asm__ volatile ( \
+        ".insn rre, 0xb3d60000,0,%[op]\n\t"              /* LTDTR */ \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw) \
+                                   : [op]  "f"(cc_dep1) \
+                                   : "cc", "f0"); \
+   psw >> 28;   /* cc */ \
+})
+
+#define S390_CC_FOR_DFP128_RESULT(hi,lo) \
+({ \
+   __asm__ volatile ( \
+        "ldr   4,%[high]\n\t"                                           \
+        "ldr   6,%[low]\n\t"                                            \
+        ".insn rre, 0xb3de0000,0,4\n\t"    /* LTXTR */                  \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw)                    \
+                                   : [high] "f"(hi), [low] "f"(lo)      \
+                                   : "cc", "f0", "f2", "f4", "f6");     \
+   psw >> 28;   /* cc */                                                \
+})
+
+#define S390_CC_FOR_DFP_TD(opcode,cc_dep1,cc_dep2)                      \
+({                                                                      \
+   __asm__ volatile (                                                   \
+        opcode ",%[value],0(%[class])\n\t"                              \
+        "ipm %[psw]\n\t"           : [psw] "=d"(psw)                    \
+                                   : [value] "f"(cc_dep1),              \
+                                     [class] "a"(cc_dep2)               \
+                                   : "cc");                             \
+   psw >> 28;   /* cc */                                                \
+})
+
+#define S390_CC_FOR_DFP128_TD(opcode,cc_dep1,cc_dep2,cc_ndep)           \
+({                                                                      \
+   /* Recover the original DEP2 value. See comment near                 \
+      s390_cc_thunk_put1d128Z for rationale. */                         \
+   cc_dep2 = cc_dep2 ^ cc_ndep;                                         \
+   __asm__ volatile (                                                   \
+        "ldr  4,%[high]\n\t"                                            \
+        "ldr  6,%[low]\n\t"                                             \
+        opcode ",4,0(%[class])\n\t"                                     \
+        "ipm  %[psw]\n\t"          : [psw] "=d"(psw)                    \
+                                   : [high] "f"(cc_dep1), [low] "f"(cc_dep2), \
+                                     [class] "a"(cc_ndep)               \
+                                   : "cc", "f4", "f6");                 \
+   psw >> 28;   /* cc */                                                \
+})
+
+#define S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,rounding_mode)       \
+   ({                                                                   \
+      __asm__ volatile (                                                \
+                        opcode ",0,%[op]," #rounding_mode ",0\n\t"      \
+                        "ipm %[psw]\n\t"           : [psw] "=d"(psw)    \
+                        : [op] "f"(cc_dep1)                             \
+                        : "cc", "r0");                                  \
+      psw >> 28;   /* cc */                                             \
+   })
+
+#define S390_CC_FOR_DFP_CONVERT(opcode,cc_dep1,cc_dep2)                 \
+   ({                                                                   \
+      UInt cc;                                                          \
+      switch (decode_dfp_rounding_mode(cc_dep2)) {                      \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1:                         \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12:                        \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,1);            \
+         break;                                                         \
+      case S390_DFP_ROUND_PREPARE_SHORT_3:                              \
+      case S390_DFP_ROUND_PREPARE_SHORT_15:                             \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,3);            \
+         break;                                                         \
+      case S390_DFP_ROUND_NEAREST_EVEN_4:                               \
+      case S390_DFP_ROUND_NEAREST_EVEN_8:                               \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,4);            \
+         break;                                                         \
+      case S390_DFP_ROUND_ZERO_5:                                       \
+      case S390_DFP_ROUND_ZERO_9:                                       \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,5);            \
+         break;                                                         \
+      case S390_DFP_ROUND_POSINF_6:                                     \
+      case S390_DFP_ROUND_POSINF_10:                                    \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,6);            \
+         break;                                                         \
+      case S390_DFP_ROUND_NEGINF_7:                                     \
+      case S390_DFP_ROUND_NEGINF_11:                                    \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,7);            \
+         break;                                                         \
+      case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0:                         \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,13);           \
+         break;                                                         \
+      case S390_DFP_ROUND_AWAY_0:                                       \
+         cc = S390_CC_FOR_DFP_CONVERT_AUX(opcode,cc_dep1,14);           \
+         break;                                                         \
+      default:                                                          \
+         vpanic("unexpected dfp rounding mode");                        \
+      }                                                                 \
+      cc;                                                               \
+   })
+
+#define S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,rounding_mode)      \
+   ({                                                                   \
+      __asm__ volatile (                                                \
+                        opcode ",0,%[op]," #rounding_mode ",0\n\t"      \
+                        "ipm %[psw]\n\t"           : [psw] "=d"(psw)    \
+                        : [op] "f"(cc_dep1)                             \
+                        : "cc", "r0");                                  \
+      psw >> 28;   /* cc */                                             \
+   })
+
+#define S390_CC_FOR_DFP_UCONVERT(opcode,cc_dep1,cc_dep2)                \
+   ({                                                                   \
+      UInt cc;                                                          \
+      switch (decode_dfp_rounding_mode(cc_dep2)) {                      \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1:                         \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12:                        \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,1);           \
+         break;                                                         \
+      case S390_DFP_ROUND_PREPARE_SHORT_3:                              \
+      case S390_DFP_ROUND_PREPARE_SHORT_15:                             \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,3);           \
+         break;                                                         \
+      case S390_DFP_ROUND_NEAREST_EVEN_4:                               \
+      case S390_DFP_ROUND_NEAREST_EVEN_8:                               \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,4);           \
+         break;                                                         \
+      case S390_DFP_ROUND_ZERO_5:                                       \
+      case S390_DFP_ROUND_ZERO_9:                                       \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,5);           \
+         break;                                                         \
+      case S390_DFP_ROUND_POSINF_6:                                     \
+      case S390_DFP_ROUND_POSINF_10:                                    \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,6);           \
+         break;                                                         \
+      case S390_DFP_ROUND_NEGINF_7:                                     \
+      case S390_DFP_ROUND_NEGINF_11:                                    \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,7);           \
+         break;                                                         \
+      case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0:                         \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,13);          \
+         break;                                                         \
+      case S390_DFP_ROUND_AWAY_0:                                       \
+         cc = S390_CC_FOR_DFP_UCONVERT_AUX(opcode,cc_dep1,14);          \
+         break;                                                         \
+      default:                                                          \
+         vpanic("unexpected dfp rounding mode");                        \
+      }                                                                 \
+      cc;                                                               \
+   })
+
+#define S390_CC_FOR_DFP128_CONVERT_AUX(opcode,hi,lo,rounding_mode)      \
+   ({                                                                   \
+      __asm__ volatile (                                                \
+                        "ldr   4,%[high]\n\t"                           \
+                        "ldr   6,%[low]\n\t"                            \
+                        opcode ",0,4," #rounding_mode ",0\n\t"          \
+                        "ipm %[psw]\n\t"           : [psw] "=d"(psw)    \
+                        : [high] "f"(hi), [low] "f"(lo)                 \
+                        : "cc", "r0", "f4", "f6");                      \
+      psw >> 28;   /* cc */                                             \
+   })
+
+#define S390_CC_FOR_DFP128_CONVERT(opcode,cc_dep1,cc_dep2,cc_ndep)       \
+   ({                                                                    \
+      UInt cc;                                                           \
+      /* Recover the original DEP2 value. See comment near               \
+         s390_cc_thunk_put3 for rationale. */                            \
+      cc_dep2 = cc_dep2 ^ cc_ndep;                                       \
+      switch (decode_dfp_rounding_mode(cc_ndep)) {                       \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1:                          \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12:                         \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,1);  \
+         break;                                                          \
+      case S390_DFP_ROUND_PREPARE_SHORT_3:                               \
+      case S390_DFP_ROUND_PREPARE_SHORT_15:                              \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,3);  \
+         break;                                                          \
+      case S390_DFP_ROUND_NEAREST_EVEN_4:                                \
+      case S390_DFP_ROUND_NEAREST_EVEN_8:                                \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,4);  \
+         break;                                                          \
+      case S390_DFP_ROUND_ZERO_5:                                        \
+      case S390_DFP_ROUND_ZERO_9:                                        \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,5);  \
+         break;                                                          \
+      case S390_DFP_ROUND_POSINF_6:                                      \
+      case S390_DFP_ROUND_POSINF_10:                                     \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,6);  \
+         break;                                                          \
+      case S390_DFP_ROUND_NEGINF_7:                                      \
+      case S390_DFP_ROUND_NEGINF_11:                                     \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,7);  \
+         break;                                                          \
+      case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0:                          \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,13); \
+         break;                                                          \
+      case S390_DFP_ROUND_AWAY_0:                                        \
+         cc = S390_CC_FOR_DFP128_CONVERT_AUX(opcode,cc_dep1,cc_dep2,14); \
+         break;                                                          \
+      default:                                                           \
+         vpanic("unexpected dfp rounding mode");                         \
+      }                                                                  \
+      cc;                                                                \
+   })
+
+#define S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,hi,lo,rounding_mode)      \
+   ({                                                                    \
+      __asm__ volatile (                                                 \
+                        "ldr   4,%[high]\n\t"                            \
+                        "ldr   6,%[low]\n\t"                             \
+                        opcode ",0,4," #rounding_mode ",0\n\t"           \
+                        "ipm %[psw]\n\t"           : [psw] "=d"(psw)     \
+                        : [high] "f"(hi), [low] "f"(lo)                  \
+                        : "cc", "r0", "f4", "f6");                       \
+      psw >> 28;   /* cc */                                              \
+   })
+
+#define S390_CC_FOR_DFP128_UCONVERT(opcode,cc_dep1,cc_dep2,cc_ndep)       \
+   ({                                                                     \
+      UInt cc;                                                            \
+      /* Recover the original DEP2 value. See comment near                \
+         s390_cc_thunk_put3 for rationale. */                             \
+      cc_dep2 = cc_dep2 ^ cc_ndep;                                        \
+      switch (decode_dfp_rounding_mode(cc_ndep)) {                        \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1:                           \
+      case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12:                          \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,1);  \
+         break;                                                           \
+      case S390_DFP_ROUND_PREPARE_SHORT_3:                                \
+      case S390_DFP_ROUND_PREPARE_SHORT_15:                               \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,3);  \
+         break;                                                           \
+      case S390_DFP_ROUND_NEAREST_EVEN_4:                                 \
+      case S390_DFP_ROUND_NEAREST_EVEN_8:                                 \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,4);  \
+         break;                                                           \
+      case S390_DFP_ROUND_ZERO_5:                                         \
+      case S390_DFP_ROUND_ZERO_9:                                         \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,5);  \
+         break;                                                           \
+      case S390_DFP_ROUND_POSINF_6:                                       \
+      case S390_DFP_ROUND_POSINF_10:                                      \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,6);  \
+         break;                                                           \
+      case S390_DFP_ROUND_NEGINF_7:                                       \
+      case S390_DFP_ROUND_NEGINF_11:                                      \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,7);  \
+         break;                                                           \
+      case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0:                           \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,13); \
+         break;                                                           \
+      case S390_DFP_ROUND_AWAY_0:                                         \
+         cc = S390_CC_FOR_DFP128_UCONVERT_AUX(opcode,cc_dep1,cc_dep2,14); \
+         break;                                                           \
+      default:                                                            \
+         vpanic("unexpected dfp rounding mode");                          \
+      }                                                                   \
+      cc;                                                                 \
+   })
+
+
+/* Return the value of the condition code from the supplied thunk parameters.
+   This is not the value of the PSW. It is the value of the 2 CC bits within
+   the PSW. The returned value is thusly in the interval [0:3]. */
+UInt
+s390_calculate_cc(ULong cc_op, ULong cc_dep1, ULong cc_dep2, ULong cc_ndep)
+{
+#if defined(VGA_s390x)
+   UInt psw;
+
+   switch (cc_op) {
+
+   case S390_CC_OP_BITWISE:
+      return S390_CC_FOR_BINARY("ogr", cc_dep1, (ULong)0);
+
+   case S390_CC_OP_SIGNED_COMPARE:
+      return S390_CC_FOR_BINARY("cgr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_UNSIGNED_COMPARE:
+      return S390_CC_FOR_BINARY("clgr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_SIGNED_ADD_64:
+      return S390_CC_FOR_BINARY("agr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_SIGNED_ADD_32:
+      return S390_CC_FOR_BINARY("ar", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_SIGNED_SUB_64:
+      return S390_CC_FOR_BINARY("sgr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_SIGNED_SUB_32:
+      return S390_CC_FOR_BINARY("sr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_UNSIGNED_ADD_64:
+      return S390_CC_FOR_BINARY("algr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_UNSIGNED_ADD_32:
+      return S390_CC_FOR_BINARY("alr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_UNSIGNED_ADDC_64:
+      return S390_CC_FOR_TERNARY_ADDC("alcgr", cc_dep1, cc_dep2, cc_ndep);
+
+   case S390_CC_OP_UNSIGNED_ADDC_32:
+      return S390_CC_FOR_TERNARY_ADDC("alcr", cc_dep1, cc_dep2, cc_ndep);
+
+   case S390_CC_OP_UNSIGNED_SUB_64:
+      return S390_CC_FOR_BINARY("slgr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_UNSIGNED_SUB_32:
+      return S390_CC_FOR_BINARY("slr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_UNSIGNED_SUBB_64:
+      return S390_CC_FOR_TERNARY_SUBB("slbgr", cc_dep1, cc_dep2, cc_ndep);
+
+   case S390_CC_OP_UNSIGNED_SUBB_32:
+      return S390_CC_FOR_TERNARY_SUBB("slbr", cc_dep1, cc_dep2, cc_ndep);
+
+   case S390_CC_OP_LOAD_AND_TEST:
+      /* Like signed comparison with 0 */
+      return S390_CC_FOR_BINARY("cgr", cc_dep1, (Long)0);
+
+   case S390_CC_OP_LOAD_POSITIVE_32:
+      __asm__ volatile (
+           "lpr  %[result],%[op]\n\t"
+           "ipm  %[psw]\n\t"         : [psw] "=d"(psw), [result] "=d"(cc_dep1)
+                                     : [op] "d"(cc_dep1)
+                                     : "cc");
+      return psw >> 28;   /* cc */
+
+   case S390_CC_OP_LOAD_POSITIVE_64:
+      __asm__ volatile (
+           "lpgr %[result],%[op]\n\t"
+           "ipm  %[psw]\n\t"         : [psw] "=d"(psw), [result] "=d"(cc_dep1)
+                                     : [op] "d"(cc_dep1)
+                                     : "cc");
+      return psw >> 28;   /* cc */
+
+   case S390_CC_OP_TEST_UNDER_MASK_8: {
+      UChar value  = cc_dep1;
+      UChar mask   = cc_dep2;
+
+      __asm__ volatile (
+           "bras %%r2,1f\n\t"             /* %r2 = address of next insn */
+           "tm %[value],0\n\t"            /* this is skipped, then EXecuted */
+           "1: ex %[mask],0(%%r2)\n\t"    /* EXecute TM after modifying mask */
+           "ipm %[psw]\n\t"             : [psw] "=d"(psw)
+                                        : [value] "m"(value), [mask] "a"(mask)
+                                        : "r2", "cc");
+      return psw >> 28;   /* cc */
+   }
+
+   case S390_CC_OP_TEST_UNDER_MASK_16: {
+      /* Create a TMLL insn with the mask as given by cc_dep2 */
+      UInt insn  = (0xA701u << 16) | cc_dep2;
+      UInt value = cc_dep1;
+
+      __asm__ volatile (
+           "lr   1,%[value]\n\t"
+           "lhi  2,0x10\n\t"
+           "ex   2,%[insn]\n\t"
+           "ipm  %[psw]\n\t"       : [psw] "=d"(psw)
+                                   : [value] "d"(value), [insn] "m"(insn)
+                                   : "r1", "r2", "cc");
+      return psw >> 28;   /* cc */
+   }
+
+   case S390_CC_OP_SHIFT_LEFT_32:
+      __asm__ volatile (
+           "sla  %[op],0(%[amount])\n\t"
+           "ipm  %[psw]\n\t"            : [psw] "=d"(psw), [op] "+d"(cc_dep1)
+                                        : [amount] "a"(cc_dep2)
+                                        : "cc");
+      return psw >> 28;   /* cc */
+
+   case S390_CC_OP_SHIFT_LEFT_64: {
+      Int high = (Int)(cc_dep1 >> 32);
+      Int low  = (Int)(cc_dep1 & 0xFFFFFFFF);
+
+      __asm__ volatile (
+           "lr   2,%[high]\n\t"
+           "lr   3,%[low]\n\t"
+           "slda 2,0(%[amount])\n\t"
+           "ipm %[psw]\n\t"             : [psw] "=d"(psw), [high] "+d"(high),
+                                          [low] "+d"(low)
+                                        : [amount] "a"(cc_dep2)
+                                        : "cc", "r2", "r3");
+      return psw >> 28;   /* cc */
+   }
+
+   case S390_CC_OP_INSERT_CHAR_MASK_32: {
+      Int inserted = 0;
+      Int msb = 0;
+
+      if (cc_dep2 & 1) {
+         inserted |= cc_dep1 & 0xff;
+         msb = 0x80;
+      }
+      if (cc_dep2 & 2) {
+         inserted |= cc_dep1 & 0xff00;
+         msb = 0x8000;
+      }
+      if (cc_dep2 & 4) {
+         inserted |= cc_dep1 & 0xff0000;
+         msb = 0x800000;
+      }
+      if (cc_dep2 & 8) {
+         inserted |= cc_dep1 & 0xff000000;
+         msb = 0x80000000;
+      }
+
+      if (inserted & msb)  // MSB is 1
+         return 1;
+      if (inserted > 0)
+         return 2;
+      return 0;
+   }
+
+   case S390_CC_OP_BFP_RESULT_32:
+      return S390_CC_FOR_BFP_RESULT("ltebr", cc_dep1);
+
+   case S390_CC_OP_BFP_RESULT_64:
+      return S390_CC_FOR_BFP_RESULT("ltdbr", cc_dep1);
+
+   case S390_CC_OP_BFP_RESULT_128:
+      return S390_CC_FOR_BFP128_RESULT(cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_32_TO_INT_32:
+      return S390_CC_FOR_BFP_CONVERT("cfebr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_64_TO_INT_32:
+      return S390_CC_FOR_BFP_CONVERT("cfdbr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_128_TO_INT_32:
+      return S390_CC_FOR_BFP128_CONVERT("cfxbr", cc_dep1, cc_dep2, cc_ndep);
+
+   case S390_CC_OP_BFP_32_TO_INT_64:
+      return S390_CC_FOR_BFP_CONVERT("cgebr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_64_TO_INT_64:
+      return S390_CC_FOR_BFP_CONVERT("cgdbr", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_128_TO_INT_64:
+      return S390_CC_FOR_BFP128_CONVERT("cgxbr", cc_dep1, cc_dep2, cc_ndep);
+
+   case S390_CC_OP_BFP_TDC_32:
+      return S390_CC_FOR_BFP_TDC("tceb", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_TDC_64:
+      return S390_CC_FOR_BFP_TDC("tcdb", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_TDC_128:
+      return S390_CC_FOR_BFP128_TDC(cc_dep1, cc_dep2, cc_ndep);
+
+   case S390_CC_OP_SET:
+      return cc_dep1;
+
+   case S390_CC_OP_BFP_32_TO_UINT_32:
+      return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb39c0000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_64_TO_UINT_32:
+      return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb39d0000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_128_TO_UINT_32:
+      return S390_CC_FOR_BFP128_UCONVERT(".insn rrf,0xb39e0000", cc_dep1,
+                                         cc_dep2, cc_ndep);
+
+   case S390_CC_OP_BFP_32_TO_UINT_64:
+      return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb3ac0000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_64_TO_UINT_64:
+      return S390_CC_FOR_BFP_UCONVERT(".insn rrf,0xb3ad0000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_BFP_128_TO_UINT_64:
+      return S390_CC_FOR_BFP128_UCONVERT(".insn rrf,0xb3ae0000", cc_dep1,
+                                         cc_dep2, cc_ndep);
+
+   case S390_CC_OP_DFP_RESULT_64:
+      return S390_CC_FOR_DFP_RESULT(cc_dep1);
+
+   case S390_CC_OP_DFP_RESULT_128:
+      return S390_CC_FOR_DFP128_RESULT(cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_TDC_32:  /* TDCET */
+      return S390_CC_FOR_DFP_TD(".insn rxe, 0xed0000000050", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_TDC_64:  /* TDCDT */
+      return S390_CC_FOR_DFP_TD(".insn rxe, 0xed0000000054", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_TDC_128: /* TDCXT */
+      return S390_CC_FOR_DFP128_TD(".insn rxe, 0xed0000000058", cc_dep1,
+                                   cc_dep2, cc_ndep);
+
+   case S390_CC_OP_DFP_TDG_32:  /* TDGET */
+      return S390_CC_FOR_DFP_TD(".insn rxe, 0xed0000000051", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_TDG_64:  /* TDGDT */
+      return S390_CC_FOR_DFP_TD(".insn rxe, 0xed0000000055", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_TDG_128: /* TDGXT */
+      return S390_CC_FOR_DFP128_TD(".insn rxe, 0xed0000000059", cc_dep1,
+                                   cc_dep2, cc_ndep);
+
+   case S390_CC_OP_DFP_64_TO_INT_32: /* CFDTR */
+      return S390_CC_FOR_DFP_CONVERT(".insn rrf,0xb9410000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_128_TO_INT_32: /* CFXTR */
+      return S390_CC_FOR_DFP128_CONVERT(".insn rrf,0xb9490000", cc_dep1,
+                                        cc_dep2, cc_ndep);
+
+   case S390_CC_OP_DFP_64_TO_INT_64: /* CGDTR */
+      return S390_CC_FOR_DFP_CONVERT(".insn rrf,0xb3e10000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_128_TO_INT_64: /* CGXTR */
+      return S390_CC_FOR_DFP128_CONVERT(".insn rrf,0xb3e90000", cc_dep1,
+                                        cc_dep2, cc_ndep);
+
+   case S390_CC_OP_DFP_64_TO_UINT_32: /* CLFDTR */
+      return S390_CC_FOR_DFP_UCONVERT(".insn rrf,0xb9430000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_128_TO_UINT_32: /* CLFXTR */
+      return S390_CC_FOR_DFP128_UCONVERT(".insn rrf,0xb94b0000", cc_dep1,
+                                         cc_dep2, cc_ndep);
+
+   case S390_CC_OP_DFP_64_TO_UINT_64: /* CLGDTR */
+      return S390_CC_FOR_DFP_UCONVERT(".insn rrf,0xb9420000", cc_dep1, cc_dep2);
+
+   case S390_CC_OP_DFP_128_TO_UINT_64: /* CLGXTR */
+      return S390_CC_FOR_DFP128_UCONVERT(".insn rrf,0xb94a0000", cc_dep1,
+                                         cc_dep2, cc_ndep);
+
+   case S390_CC_OP_PFPO_32: {
+      __asm__ volatile(
+           "ler 4, %[cc_dep1]\n\t"      /* 32 bit FR move */
+           "lr  0, %[cc_dep2]\n\t"      /* 32 bit GR move */
+           ".short 0x010a\n\t"          /* PFPO */
+           "ipm %[psw]\n\t"             : [psw] "=d"(psw)
+                                        : [cc_dep1] "f"(cc_dep1),
+                                          [cc_dep2] "d"(cc_dep2)
+                                        : "r0", "r1", "f4");
+      return psw >> 28;  /* cc */
+   }
+
+   case S390_CC_OP_PFPO_64: {
+      __asm__ volatile(
+           "ldr 4, %[cc_dep1]\n\t"
+           "lr  0, %[cc_dep2]\n\t"      /* 32 bit register move */
+           ".short 0x010a\n\t"          /* PFPO */
+           "ipm %[psw]\n\t"             : [psw] "=d"(psw)
+                                        : [cc_dep1] "f"(cc_dep1),
+                                          [cc_dep2] "d"(cc_dep2)
+                                        : "r0", "r1", "f4");
+      return psw >> 28;  /* cc */
+   }
+
+   case S390_CC_OP_PFPO_128: {
+      __asm__ volatile(
+           "ldr 4,%[cc_dep1]\n\t"
+           "ldr 6,%[cc_dep2]\n\t"
+           "lr  0,%[cc_ndep]\n\t"       /* 32 bit register move */
+           ".short 0x010a\n\t"          /* PFPO */
+           "ipm %[psw]\n\t"             : [psw] "=d"(psw)
+                                        : [cc_dep1] "f"(cc_dep1),
+                                          [cc_dep2] "f"(cc_dep2),
+                                          [cc_ndep] "d"(cc_ndep)
+                                        : "r0", "r1", "f0", "f2", "f4", "f6");
+      return psw >> 28;  /* cc */
+   }
+
+   default:
+      break;
+   }
+#endif
+   vpanic("s390_calculate_cc");
+}
+
+
+/* Note that this does *not* return a Boolean value. The result needs to be
+   explicitly tested against zero. */
+UInt
+s390_calculate_cond(ULong mask, ULong op, ULong dep1, ULong dep2, ULong ndep)
+{
+   UInt cc = s390_calculate_cc(op, dep1, dep2, ndep);
+
+   return ((mask << cc) & 0x8);
+}
+
+/*------------------------------------------------------------*/
+/*--- spechelper for performance                           ---*/
+/*------------------------------------------------------------*/
+
+
+/* Convenience macros */
+#define unop(op,a1) IRExpr_Unop((op),(a1))
+#define binop(op,a1,a2) IRExpr_Binop((op),(a1),(a2))
+#define mkU64(v) IRExpr_Const(IRConst_U64(v))
+#define mkU32(v) IRExpr_Const(IRConst_U32(v))
+#define mkU8(v)  IRExpr_Const(IRConst_U8(v))
+
+
+static inline Bool
+isC64(const IRExpr *expr)
+{
+   return expr->tag == Iex_Const && expr->Iex.Const.con->tag == Ico_U64;
+}
+
+
+/* The returned expression is NULL if no specialization was found. In that
+   case the helper function will be called. Otherwise, the expression has
+   type Ity_I32 and a Boolean value. */
+IRExpr *
+guest_s390x_spechelper(const HChar *function_name, IRExpr **args,
+                       IRStmt **precedingStmts, Int n_precedingStmts)
+{
+   UInt i, arity = 0;
+
+   for (i = 0; args[i]; i++)
+      arity++;
+
+#  if 0
+   vex_printf("spec request:\n");
+   vex_printf("   %s  ", function_name);
+   for (i = 0; i < arity; i++) {
+      vex_printf("  ");
+      ppIRExpr(args[i]);
+   }
+   vex_printf("\n");
+#  endif
+
+   /* --------- Specialising "s390_calculate_cond" --------- */
+
+   if (vex_streq(function_name, "s390_calculate_cond")) {
+      IRExpr *cond_expr, *cc_op_expr, *cc_dep1, *cc_dep2;
+      ULong cond, cc_op;
+
+      vassert(arity == 5);
+
+      cond_expr  = args[0];
+      cc_op_expr = args[1];
+
+      /* The necessary requirement for all optimizations here is that the
+         condition and the cc_op are constant. So check that upfront. */
+      if (! isC64(cond_expr))  return NULL;
+      if (! isC64(cc_op_expr)) return NULL;
+
+      cond    = cond_expr->Iex.Const.con->Ico.U64;
+      cc_op   = cc_op_expr->Iex.Const.con->Ico.U64;
+
+      vassert(cond <= 15);
+
+      /*
+        +------+---+---+---+---+
+        | cc   | 0 | 1 | 2 | 3 |
+        | cond | 8 | 4 | 2 | 1 |
+        +------+---+---+---+---+
+      */
+      cc_dep1 = args[2];
+      cc_dep2 = args[3];
+
+      /* S390_CC_OP_SIGNED_COMPARE */
+      if (cc_op == S390_CC_OP_SIGNED_COMPARE) {
+         /*
+            cc == 0  --> cc_dep1 == cc_dep2   (cond == 8)
+            cc == 1  --> cc_dep1 <  cc_dep2   (cond == 4)
+            cc == 2  --> cc_dep1 >  cc_dep2   (cond == 2)
+
+            Because cc == 3 cannot occur the rightmost bit of cond is
+            a don't care.
+         */
+         if (cond == 8 || cond == 8 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, cc_dep2));
+         }
+         if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, cc_dep2));
+         }
+         if (cond == 4 || cond == 4 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLT64S, cc_dep1, cc_dep2));
+         }
+         if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLE64S, cc_dep1, cc_dep2));
+         }
+         /* cc_dep1 > cc_dep2  ---->  cc_dep2 < cc_dep1 */
+         if (cond == 2 || cond == 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLT64S, cc_dep2, cc_dep1));
+         }
+         if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLE64S, cc_dep2, cc_dep1));
+         }
+         if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+            return mkU32(1);
+         }
+         /* Remaining case */
+         return mkU32(0);
+      }
+
+      /* S390_CC_OP_UNSIGNED_COMPARE */
+      if (cc_op == S390_CC_OP_UNSIGNED_COMPARE) {
+         /*
+            cc == 0  --> cc_dep1 == cc_dep2   (cond == 8)
+            cc == 1  --> cc_dep1 <  cc_dep2   (cond == 4)
+            cc == 2  --> cc_dep1 >  cc_dep2   (cond == 2)
+
+            Because cc == 3 cannot occur the rightmost bit of cond is
+            a don't care.
+         */
+         if (cond == 8 || cond == 8 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, cc_dep2));
+         }
+         if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, cc_dep2));
+         }
+         if (cond == 4 || cond == 4 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep1, cc_dep2));
+         }
+         if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep1, cc_dep2));
+         }
+         /* cc_dep1 > cc_dep2  ---->  cc_dep2 < cc_dep1 */
+         if (cond == 2 || cond == 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep2, cc_dep1));
+         }
+         if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep2, cc_dep1));
+         }
+         if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+            return mkU32(1);
+         }
+         /* Remaining case */
+         return mkU32(0);
+      }
+
+      /* S390_CC_OP_LOAD_AND_TEST */
+      if (cc_op == S390_CC_OP_LOAD_AND_TEST) {
+         /*
+            cc == 0  --> cc_dep1 == 0   (cond == 8)
+            cc == 1  --> cc_dep1 <  0   (cond == 4)
+            cc == 2  --> cc_dep1 >  0   (cond == 2)
+
+            Because cc == 3 cannot occur the rightmost bit of cond is
+            a don't care.
+         */
+         if (cond == 8 || cond == 8 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+         }
+         if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+         }
+         if (cond == 4 || cond == 4 + 1) {
+             /* Special case cc_dep < 0. Only check the MSB to avoid bogus
+               memcheck complaints due to gcc magic. Fixes 343802
+             */
+            return unop(Iop_64to32, binop(Iop_Shr64, cc_dep1, mkU8(63)));
+         }
+         if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLE64S, cc_dep1, mkU64(0)));
+         }
+         /* cc_dep1 > 0  ---->  0 < cc_dep1 */
+         if (cond == 2 || cond == 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLT64S, mkU64(0), cc_dep1));
+         }
+         if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+            /* Special case cc_dep >= 0. Only check the MSB to avoid bogus
+               memcheck complaints due to gcc magic. Fixes 308427
+             */
+            return unop(Iop_64to32, binop(Iop_Xor64,
+                                          binop(Iop_Shr64, cc_dep1, mkU8(63)),
+                                          mkU64(1)));
+         }
+         if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+            return mkU32(1);
+         }
+         /* Remaining case */
+         return mkU32(0);
+      }
+
+      /* S390_CC_OP_BITWISE */
+      if (cc_op == S390_CC_OP_BITWISE) {
+         /*
+            cc_dep1 is the result of the boolean operation.
+
+            cc == 0  --> cc_dep1 == 0   (cond == 8)
+            cc == 1  --> cc_dep1 != 0   (cond == 4)
+
+            Because cc == 2 and cc == 3 cannot occur the two rightmost bits of
+            cond are don't cares. Therefore:
+
+            cond == 00xx  -> always false
+            cond == 01xx  -> not equal
+            cond == 10xx  -> equal
+            cond == 11xx  -> always true
+         */
+         if ((cond & (8 + 4)) == 8 + 4) {
+            return mkU32(1);
+         }
+         if (cond & 8) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+         }
+         if (cond & 4) {
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+         }
+         /* Remaining case */
+         return mkU32(0);
+      }
+
+      /* S390_CC_OP_INSERT_CHAR_MASK_32
+         Since the mask comes from an immediate field in the opcode, we
+         expect the mask to be a constant here. That simplifies matters. */
+      if (cc_op == S390_CC_OP_INSERT_CHAR_MASK_32) {
+         ULong mask;
+         UInt imask = 0, shift = 0;
+         IRExpr *word;
+
+         if (! isC64(cc_dep2)) goto missed;
+
+         mask = cc_dep2->Iex.Const.con->Ico.U64;
+
+         /* Extract the 32-bit value from the thunk */
+
+         word = unop(Iop_64to32, cc_dep1);
+
+         switch (mask) {
+         case 0:  shift =  0; imask = 0x00000000; break;
+         case 1:  shift = 24; imask = 0x000000FF; break;
+         case 2:  shift = 16; imask = 0x0000FF00; break;
+         case 3:  shift = 16; imask = 0x0000FFFF; break;
+         case 4:  shift =  8; imask = 0x00FF0000; break;
+         case 5:  shift =  8; imask = 0x00FF00FF; break;
+         case 6:  shift =  8; imask = 0x00FFFF00; break;
+         case 7:  shift =  8; imask = 0x00FFFFFF; break;
+         case 8:  shift =  0; imask = 0xFF000000; break;
+         case 9:  shift =  0; imask = 0xFF0000FF; break;
+         case 10: shift =  0; imask = 0xFF00FF00; break;
+         case 11: shift =  0; imask = 0xFF00FFFF; break;
+         case 12: shift =  0; imask = 0xFFFF0000; break;
+         case 13: shift =  0; imask = 0xFFFF00FF; break;
+         case 14: shift =  0; imask = 0xFFFFFF00; break;
+         case 15: shift =  0; imask = 0xFFFFFFFF; break;
+         }
+
+         /* Select the bits that were inserted */
+         word = binop(Iop_And32, word, mkU32(imask));
+
+         /* cc == 0  --> all inserted bits zero or mask == 0   (cond == 8)
+            cc == 1  --> leftmost inserted bit is one          (cond == 4)
+            cc == 2  --> leftmost inserted bit is zero and not (cond == 2)
+                         all inserted bits are zero
+
+            Because cc == 0,1,2 the rightmost bit of the mask is a don't care */
+         if (cond == 8 || cond == 8 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ32, word, mkU32(0)));
+         }
+         if (cond == 4 + 2 || cond == 4 + 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpNE32, word, mkU32(0)));
+         }
+
+         /* Sign extend */
+         if (shift != 0) {
+            word = binop(Iop_Sar32, binop(Iop_Shl32, word, mkU8(shift)),
+                         mkU8(shift));
+         }
+
+         if (cond == 4 || cond == 4 + 1) {  /* word < 0 */
+            return unop(Iop_1Uto32, binop(Iop_CmpLT32S, word, mkU32(0)));
+         }
+         if (cond == 2 || cond == 2 + 1) {  /* word > 0 */
+            return unop(Iop_1Uto32, binop(Iop_CmpLT32S, mkU32(0), word));
+         }
+         if (cond == 8 + 4 || cond == 8 + 4 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLE32S, word, mkU32(0)));
+         }
+         if (cond == 8 + 2 || cond == 8 + 2 + 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpLE32S, mkU32(0), word));
+         }
+         if (cond == 8 + 4 + 2 || cond == 8 + 4 + 2 + 1) {
+            return mkU32(1);
+         }
+         /* Remaining case */
+         return mkU32(0);
+      }
+
+      /* S390_CC_OP_TEST_UNDER_MASK_8
+         Since the mask comes from an immediate field in the opcode, we
+         expect the mask to be a constant here. That simplifies matters. */
+      if (cc_op == S390_CC_OP_TEST_UNDER_MASK_8) {
+         ULong mask16;
+
+         if (! isC64(cc_dep2)) goto missed;
+
+         mask16 = cc_dep2->Iex.Const.con->Ico.U64;
+
+         /* Get rid of the mask16 == 0 case first. Some of the simplifications
+            below (e.g. for OVFL) only hold if mask16 == 0.  */
+         if (mask16 == 0) {   /* cc == 0 */
+            if (cond & 0x8) return mkU32(1);
+            return mkU32(0);
+         }
+
+         /* cc == 2 is a don't care */
+         if (cond == 8 || cond == 8 + 2) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 7 || cond == 7 - 2) {
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 1 || cond == 1 + 2) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          cc_dep2));
+         }
+         if (cond == 14 || cond == 14 - 2) {  /* ! OVFL */
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          cc_dep2));
+         }
+         goto missed;
+      }
+
+      /* S390_CC_OP_TEST_UNDER_MASK_16
+         Since the mask comes from an immediate field in the opcode, we
+         expect the mask to be a constant here. That simplifies matters. */
+      if (cc_op == S390_CC_OP_TEST_UNDER_MASK_16) {
+         ULong mask16;
+         UInt msb;
+
+         if (! isC64(cc_dep2)) goto missed;
+
+         mask16 = cc_dep2->Iex.Const.con->Ico.U64;
+
+         /* Get rid of the mask16 == 0 case first. Some of the simplifications
+            below (e.g. for OVFL) only hold if mask16 == 0.  */
+         if (mask16 == 0) {   /* cc == 0 */
+            if (cond & 0x8) return mkU32(1);
+            return mkU32(0);
+         }
+
+         if (cond == 8) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 7) {
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 1) {
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          mkU64(mask16)));
+         }
+         if (cond == 14) {  /* ! OVFL */
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+                                          binop(Iop_And64, cc_dep1, cc_dep2),
+                                          mkU64(mask16)));
+         }
+
+         /* Find MSB in mask */
+         msb = 0x8000;
+         while (msb > mask16)
+            msb >>= 1;
+
+         if (cond == 2) {  /* cc == 2 */
+            IRExpr *c1, *c2;
+
+            /* (cc_dep & msb) != 0 && (cc_dep & mask16) != mask16 */
+            c1 = binop(Iop_CmpNE64,
+                       binop(Iop_And64, cc_dep1, mkU64(msb)), mkU64(0));
+            c2 = binop(Iop_CmpNE64,
+                       binop(Iop_And64, cc_dep1, cc_dep2),
+                       mkU64(mask16));
+            return binop(Iop_And32, unop(Iop_1Uto32, c1),
+                         unop(Iop_1Uto32, c2));
+         }
+
+         if (cond == 4) {  /* cc == 1 */
+            IRExpr *c1, *c2;
+
+            /* (cc_dep & msb) == 0 && (cc_dep & mask16) != 0 */
+            c1 = binop(Iop_CmpEQ64,
+                       binop(Iop_And64, cc_dep1, mkU64(msb)), mkU64(0));
+            c2 = binop(Iop_CmpNE64,
+                       binop(Iop_And64, cc_dep1, cc_dep2),
+                       mkU64(0));
+            return binop(Iop_And32, unop(Iop_1Uto32, c1),
+                         unop(Iop_1Uto32, c2));
+         }
+
+         if (cond == 11) {  /* cc == 0,2,3 */
+            IRExpr *c1, *c2;
+
+            c1 = binop(Iop_CmpNE64,
+                       binop(Iop_And64, cc_dep1, mkU64(msb)), mkU64(0));
+            c2 = binop(Iop_CmpEQ64,
+                       binop(Iop_And64, cc_dep1, cc_dep2),
+                       mkU64(0));
+            return binop(Iop_Or32, unop(Iop_1Uto32, c1),
+                         unop(Iop_1Uto32, c2));
+         }
+
+         if (cond == 3) {  /* cc == 2 || cc == 3 */
+            return unop(Iop_1Uto32,
+                        binop(Iop_CmpNE64,
+                              binop(Iop_And64, cc_dep1, mkU64(msb)),
+                              mkU64(0)));
+         }
+         if (cond == 12) { /* cc == 0 || cc == 1 */
+            return unop(Iop_1Uto32,
+                        binop(Iop_CmpEQ64,
+                              binop(Iop_And64, cc_dep1, mkU64(msb)),
+                              mkU64(0)));
+         }
+         // vex_printf("TUM mask = 0x%llx\n", mask16);
+         goto missed;
+      }
+
+      /* S390_CC_OP_UNSIGNED_SUB_64/32 */
+      if (cc_op == S390_CC_OP_UNSIGNED_SUB_64 ||
+          cc_op == S390_CC_OP_UNSIGNED_SUB_32) {
+         /*
+            cc_dep1, cc_dep2 are the zero extended left and right operands
+
+            cc == 1  --> result != 0, borrow    (cond == 4)
+            cc == 2  --> result == 0, no borrow (cond == 2)
+            cc == 3  --> result != 0, no borrow (cond == 1)
+
+            cc = (cc_dep1 == cc_dep2) ? 2
+                                      : (cc_dep1 > cc_dep2) ? 3 : 1;
+
+            Because cc == 0 cannot occur the leftmost bit of cond is
+            a don't care.
+         */
+         if (cond == 1 || cond == 1 + 8) {  /* cc == 3   op2 < op1 */
+            return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep2, cc_dep1));
+         }
+         if (cond == 2 || cond == 2 + 8) {  /* cc == 2 */
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64, cc_dep1, cc_dep2));
+         }
+         if (cond == 4 || cond == 4 + 8) {  /* cc == 1 */
+            return unop(Iop_1Uto32, binop(Iop_CmpLT64U, cc_dep1, cc_dep2));
+         }
+         if (cond == 3 || cond == 3 + 8) {  /* cc == 2 || cc == 3 */
+            return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep2, cc_dep1));
+         }
+         if (cond == 6 || cond == 6 + 8) {  /* cc == 2 || cc == 1 */
+            return unop(Iop_1Uto32, binop(Iop_CmpLE64U, cc_dep1, cc_dep2));
+         }
+
+         if (cond == 5 || cond == 5 + 8) {  /* cc == 3 || cc == 1 */
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64, cc_dep1, cc_dep2));
+         }
+         if (cond == 7 || cond == 7 + 8) {
+            return mkU32(1);
+         }
+         /* Remaining case */
+         return mkU32(0);
+      }
+
+      /* S390_CC_OP_UNSIGNED_ADD_64 */
+      if (cc_op == S390_CC_OP_UNSIGNED_ADD_64) {
+         /*
+            cc_dep1, cc_dep2 are the zero extended left and right operands
+
+            cc == 0  --> result == 0, no carry  (cond == 8)
+            cc == 1  --> result != 0, no carry  (cond == 4)
+            cc == 2  --> result == 0, carry     (cond == 2)
+            cc == 3  --> result != 0, carry     (cond == 1)
+         */
+         if (cond == 8) { /* cc == 0 */
+            /* Both inputs are 0 */
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+                                          binop(Iop_Or64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 7) { /* cc == 1,2,3 */
+            /* Not both inputs are 0 */
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+                                          binop(Iop_Or64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 8 + 2) {  /* cc == 0,2  -> result is zero */
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+                                          binop(Iop_Add64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 4 + 1) {  /* cc == 1,3  -> result is not zero */
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+                                          binop(Iop_Add64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         goto missed;
+      }
+
+      /* S390_CC_OP_UNSIGNED_ADD_32 */
+      if (cc_op == S390_CC_OP_UNSIGNED_ADD_32) {
+         /*
+            cc_dep1, cc_dep2 are the zero extended left and right operands
+
+            cc == 0  --> result == 0, no carry  (cond == 8)
+            cc == 1  --> result != 0, no carry  (cond == 4)
+            cc == 2  --> result == 0, carry     (cond == 2)
+            cc == 3  --> result != 0, carry     (cond == 1)
+         */
+         if (cond == 8) { /* cc == 0 */
+            /* Both inputs are 0 */
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ64,
+                                          binop(Iop_Or64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 7) { /* cc == 1,2,3 */
+            /* Not both inputs are 0 */
+            return unop(Iop_1Uto32, binop(Iop_CmpNE64,
+                                          binop(Iop_Or64, cc_dep1, cc_dep2),
+                                          mkU64(0)));
+         }
+         if (cond == 8 + 2) {  /* cc == 0,2  -> result is zero */
+            return unop(Iop_1Uto32, binop(Iop_CmpEQ32,
+                                          binop(Iop_Add32,
+                                                unop(Iop_64to32, cc_dep1),
+                                                unop(Iop_64to32, cc_dep2)),
+                                          mkU32(0)));
+         }
+         if (cond == 4 + 1) {  /* cc == 1,3  -> result is not zero */
+            return unop(Iop_1Uto32, binop(Iop_CmpNE32,
+                                          binop(Iop_Add32,
+                                                unop(Iop_64to32, cc_dep1),
+                                                unop(Iop_64to32, cc_dep2)),
+                                          mkU32(0)));
+         }
+         goto missed;
+      }
+
+      /* S390_CC_OP_SET */
+      if (cc_op == S390_CC_OP_SET) {
+         /* cc_dep1 is the condition code
+
+            Return 1, if ((cond << cc_dep1) & 0x8) != 0 */
+
+        return unop(Iop_1Uto32,
+                    binop(Iop_CmpNE64,
+                          binop(Iop_And64,
+                                binop(Iop_Shl64, cond_expr,
+                                      unop(Iop_64to8, cc_dep1)),
+                                mkU64(8)),
+                          mkU64(0)));
+      }
+
+      goto missed;
+   }
+
+   /* --------- Specialising "s390_calculate_cond" --------- */
+
+   if (vex_streq(function_name, "s390_calculate_cc")) {
+      IRExpr *cc_op_expr, *cc_dep1;
+      ULong cc_op;
+
+      vassert(arity == 4);
+
+      cc_op_expr = args[0];
+
+      /* The necessary requirement for all optimizations here is that
+         cc_op is constant. So check that upfront. */
+      if (! isC64(cc_op_expr)) return NULL;
+
+      cc_op   = cc_op_expr->Iex.Const.con->Ico.U64;
+      cc_dep1 = args[1];
+
+      if (cc_op == S390_CC_OP_BITWISE) {
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+      }
+
+      if (cc_op == S390_CC_OP_SET) {
+         return unop(Iop_64to32, cc_dep1);
+      }
+
+      goto missed;
+   }
+
+missed:
+   return NULL;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                guest_s390_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_s390_toIR.c b/VEX/priv/guest_s390_toIR.c
new file mode 100644
index 0000000..023cb7d
--- /dev/null
+++ b/VEX/priv/guest_s390_toIR.c
@@ -0,0 +1,16681 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 guest_s390_toIR.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm and Christian Borntraeger */
+
+/* Translates s390 code to IR. */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex_emnote.h"
+#include "libvex_s390x_common.h"
+#include "main_util.h"               /* vassert */
+#include "main_globals.h"            /* vex_traceflags */
+#include "guest_generic_bb_to_IR.h"  /* DisResult */
+#include "guest_s390_defs.h"         /* prototypes for this file's functions */
+#include "s390_disasm.h"
+#include "s390_defs.h"               /* S390_BFP_ROUND_xyzzy */
+#include "host_s390_defs.h"          /* s390_host_has_xyzzy */
+
+
+/*------------------------------------------------------------*/
+/*--- Forward declarations                                 ---*/
+/*------------------------------------------------------------*/
+static UInt s390_decode_and_irgen(const UChar *, UInt, DisResult *);
+static void s390_irgen_xonc(IROp, IRTemp, IRTemp, IRTemp);
+static void s390_irgen_CLC_EX(IRTemp, IRTemp, IRTemp);
+
+
+/*------------------------------------------------------------*/
+/*--- Globals                                              ---*/
+/*------------------------------------------------------------*/
+
+/* The IRSB* into which we're generating code. */
+static IRSB *irsb;
+
+/* The guest address for the instruction currently being
+   translated. */
+static Addr64 guest_IA_curr_instr;
+
+/* The guest address for the instruction following the current instruction. */
+static Addr64 guest_IA_next_instr;
+
+/* Result of disassembly step. */
+static DisResult *dis_res;
+
+/* Resteer function and callback data */
+static Bool (*resteer_fn)(void *, Addr);
+static void *resteer_data;
+
+/* Whether to print diagnostics for illegal instructions. */
+static Bool sigill_diag;
+
+/* The last seen execute target instruction */
+ULong last_execute_target;
+
+/* The possible outcomes of a decoding operation */
+typedef enum {
+   S390_DECODE_OK,
+   S390_DECODE_UNKNOWN_INSN,
+   S390_DECODE_UNIMPLEMENTED_INSN,
+   S390_DECODE_UNKNOWN_SPECIAL_INSN,
+   S390_DECODE_ERROR
+} s390_decode_t;
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for constructing IR.                         ---*/
+/*------------------------------------------------------------*/
+
+/* Add a statement to the current irsb. */
+static __inline__ void
+stmt(IRStmt *st)
+{
+   addStmtToIRSB(irsb, st);
+}
+
+/* Allocate a new temporary of the given type. */
+static __inline__ IRTemp
+newTemp(IRType type)
+{
+   vassert(isPlausibleIRType(type));
+
+   return newIRTemp(irsb->tyenv, type);
+}
+
+/* Create an expression node for a temporary */
+static __inline__ IRExpr *
+mkexpr(IRTemp tmp)
+{
+   return IRExpr_RdTmp(tmp);
+}
+
+/* Generate an expression node for an address. */
+static __inline__ IRExpr *
+mkaddr_expr(Addr64 addr)
+{
+   return IRExpr_Const(IRConst_U64(addr));
+}
+
+/* Add a statement that assigns to a temporary */
+static __inline__ void
+assign(IRTemp dst, IRExpr *expr)
+{
+   stmt(IRStmt_WrTmp(dst, expr));
+}
+
+/* Write an address into the guest_IA */
+static __inline__ void
+put_IA(IRExpr *address)
+{
+   stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_IA), address));
+}
+
+/* Create a temporary of the given type and assign the expression to it */
+static __inline__ IRTemp
+mktemp(IRType type, IRExpr *expr)
+{
+   IRTemp temp = newTemp(type);
+
+   assign(temp, expr);
+
+   return temp;
+}
+
+/* Create a unary expression */
+static __inline__ IRExpr *
+unop(IROp kind, IRExpr *op)
+{
+   return IRExpr_Unop(kind, op);
+}
+
+/* Create a binary expression */
+static __inline__ IRExpr *
+binop(IROp kind, IRExpr *op1, IRExpr *op2)
+{
+   return IRExpr_Binop(kind, op1, op2);
+}
+
+/* Create a ternary expression */
+static __inline__ IRExpr *
+triop(IROp kind, IRExpr *op1, IRExpr *op2, IRExpr *op3)
+{
+   return IRExpr_Triop(kind, op1, op2, op3);
+}
+
+/* Create a quaternary expression */
+static __inline__  IRExpr *
+qop(IROp kind, IRExpr *op1, IRExpr *op2, IRExpr *op3, IRExpr *op4)
+{
+   return IRExpr_Qop(kind, op1, op2, op3, op4);
+}
+
+/* Create an expression node for an 8-bit integer constant */
+static __inline__ IRExpr *
+mkU8(UInt value)
+{
+   vassert(value < 256);
+
+   return IRExpr_Const(IRConst_U8((UChar)value));
+}
+
+/* Create an expression node for a 16-bit integer constant */
+static __inline__ IRExpr *
+mkU16(UInt value)
+{
+   vassert(value < 65536);
+
+   return IRExpr_Const(IRConst_U16((UShort)value));
+}
+
+/* Create an expression node for a 32-bit integer constant */
+static __inline__ IRExpr *
+mkU32(UInt value)
+{
+   return IRExpr_Const(IRConst_U32(value));
+}
+
+/* Create an expression node for a 64-bit integer constant */
+static __inline__ IRExpr *
+mkU64(ULong value)
+{
+   return IRExpr_Const(IRConst_U64(value));
+}
+
+/* Create an expression node for a 32-bit floating point constant
+   whose value is given by a bit pattern. */
+static __inline__ IRExpr *
+mkF32i(UInt value)
+{
+   return IRExpr_Const(IRConst_F32i(value));
+}
+
+/* Create an expression node for a 32-bit floating point constant
+   whose value is given by a bit pattern. */
+static __inline__ IRExpr *
+mkF64i(ULong value)
+{
+   return IRExpr_Const(IRConst_F64i(value));
+}
+
+/* Little helper function for my sanity. ITE = if-then-else */
+static IRExpr *
+mkite(IRExpr *condition, IRExpr *iftrue, IRExpr *iffalse)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+   return IRExpr_ITE(condition, iftrue, iffalse);
+}
+
+/* Add a statement that stores DATA at ADDR. This is a big-endian machine. */
+static __inline__ void
+store(IRExpr *addr, IRExpr *data)
+{
+   stmt(IRStmt_Store(Iend_BE, addr, data));
+}
+
+/* Create an expression that loads a TYPE sized value from ADDR.
+   This is a big-endian machine. */
+static __inline__ IRExpr *
+load(IRType type, IRExpr *addr)
+{
+   return IRExpr_Load(Iend_BE, type, addr);
+}
+
+/* Function call */
+static void
+call_function(IRExpr *callee_address)
+{
+   put_IA(callee_address);
+
+   dis_res->whatNext    = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_Call;
+}
+
+/* Function call with known target. */
+static void
+call_function_and_chase(Addr64 callee_address)
+{
+   if (resteer_fn(resteer_data, callee_address)) {
+      dis_res->whatNext   = Dis_ResteerU;
+      dis_res->continueAt = callee_address;
+   } else {
+      put_IA(mkaddr_expr(callee_address));
+
+      dis_res->whatNext = Dis_StopHere;
+      dis_res->jk_StopHere = Ijk_Call;
+   }
+}
+
+/* Function return sequence */
+static void
+return_from_function(IRExpr *return_address)
+{
+   put_IA(return_address);
+
+   dis_res->whatNext    = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_Ret;
+}
+
+/* A conditional branch whose target is not known at instrumentation time.
+
+   if (condition) goto computed_target;
+
+   Needs to be represented as:
+
+   if (! condition) goto next_instruction;
+   goto computed_target;
+*/
+static void
+if_condition_goto_computed(IRExpr *condition, IRExpr *target)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+   condition = unop(Iop_Not1, condition);
+
+   stmt(IRStmt_Exit(condition, Ijk_Boring, IRConst_U64(guest_IA_next_instr),
+                    S390X_GUEST_OFFSET(guest_IA)));
+
+   put_IA(target);
+
+   dis_res->whatNext    = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_Boring;
+}
+
+/* A conditional branch whose target is known at instrumentation time. */
+static void
+if_condition_goto(IRExpr *condition, Addr64 target)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+   stmt(IRStmt_Exit(condition, Ijk_Boring, IRConst_U64(target),
+                    S390X_GUEST_OFFSET(guest_IA)));
+
+   put_IA(mkaddr_expr(guest_IA_next_instr));
+
+   dis_res->whatNext    = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_Boring;
+}
+
+/* An unconditional branch. Target may or may not be known at instrumentation
+   time. */
+static void
+always_goto(IRExpr *target)
+{
+   put_IA(target);
+
+   dis_res->whatNext    = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_Boring;
+}
+
+
+/* An unconditional branch to a known target. */
+static void
+always_goto_and_chase(Addr64 target)
+{
+   if (resteer_fn(resteer_data, target)) {
+      /* Follow into the target */
+      dis_res->whatNext   = Dis_ResteerU;
+      dis_res->continueAt = target;
+   } else {
+      put_IA(mkaddr_expr(target));
+
+      dis_res->whatNext    = Dis_StopHere;
+      dis_res->jk_StopHere = Ijk_Boring;
+   }
+}
+
+/* A system call */
+static void
+system_call(IRExpr *sysno)
+{
+   /* Store the system call number in the pseudo register. */
+   stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_SYSNO), sysno));
+
+   /* Store the current IA into guest_IP_AT_SYSCALL. libvex_ir.h says so. */
+   stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_IP_AT_SYSCALL),
+                   mkU64(guest_IA_curr_instr)));
+
+   put_IA(mkaddr_expr(guest_IA_next_instr));
+
+   /* It's important that all ArchRegs carry their up-to-date value
+      at this point.  So we declare an end-of-block here, which
+      forces any TempRegs caching ArchRegs to be flushed. */
+   dis_res->whatNext    = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_Sys_syscall;
+}
+
+/* A side exit that branches back to the current insn if CONDITION is
+   true. Does not set DisResult. */
+static void
+iterate_if(IRExpr *condition)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+   stmt(IRStmt_Exit(condition, Ijk_Boring, IRConst_U64(guest_IA_curr_instr),
+                    S390X_GUEST_OFFSET(guest_IA)));
+}
+
+/* A side exit that branches back to the current insn.
+   Does not set DisResult. */
+static __inline__ void
+iterate(void)
+{
+   iterate_if(IRExpr_Const(IRConst_U1(True)));
+}
+
+/* A side exit that branches back to the insn immediately following the
+   current insn if CONDITION is true. Does not set DisResult. */
+static void
+next_insn_if(IRExpr *condition)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+   stmt(IRStmt_Exit(condition, Ijk_Boring, IRConst_U64(guest_IA_next_instr),
+                    S390X_GUEST_OFFSET(guest_IA)));
+}
+
+/* Convenience function to restart the current insn */
+static void
+restart_if(IRExpr *condition)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, condition) == Ity_I1);
+
+   stmt(IRStmt_Exit(condition, Ijk_InvalICache,
+                    IRConst_U64(guest_IA_curr_instr),
+                    S390X_GUEST_OFFSET(guest_IA)));
+}
+
+/* Convenience function to yield to thread scheduler */
+static void
+yield_if(IRExpr *condition)
+{
+   stmt(IRStmt_Exit(condition, Ijk_Yield, IRConst_U64(guest_IA_next_instr),
+                    S390X_GUEST_OFFSET(guest_IA)));
+}
+
+static __inline__ IRExpr *get_fpr_dw0(UInt);
+static __inline__ void    put_fpr_dw0(UInt, IRExpr *);
+static __inline__ IRExpr *get_dpr_dw0(UInt);
+static __inline__ void    put_dpr_dw0(UInt, IRExpr *);
+
+/* Read a floating point register pair and combine their contents into a
+   128-bit value */
+static IRExpr *
+get_fpr_pair(UInt archreg)
+{
+   IRExpr *high = get_fpr_dw0(archreg);
+   IRExpr *low  = get_fpr_dw0(archreg + 2);
+
+   return binop(Iop_F64HLtoF128, high, low);
+}
+
+/* Write a 128-bit floating point value into a register pair. */
+static void
+put_fpr_pair(UInt archreg, IRExpr *expr)
+{
+   IRExpr *high = unop(Iop_F128HItoF64, expr);
+   IRExpr *low  = unop(Iop_F128LOtoF64, expr);
+
+   put_fpr_dw0(archreg,     high);
+   put_fpr_dw0(archreg + 2, low);
+}
+
+/* Read a floating point register pair cointaining DFP value
+   and combine their contents into a 128-bit value */
+
+static IRExpr *
+get_dpr_pair(UInt archreg)
+{
+   IRExpr *high = get_dpr_dw0(archreg);
+   IRExpr *low  = get_dpr_dw0(archreg + 2);
+
+   return binop(Iop_D64HLtoD128, high, low);
+}
+
+/* Write a 128-bit decimal floating point value into a register pair. */
+static void
+put_dpr_pair(UInt archreg, IRExpr *expr)
+{
+   IRExpr *high = unop(Iop_D128HItoD64, expr);
+   IRExpr *low  = unop(Iop_D128LOtoD64, expr);
+
+   put_dpr_dw0(archreg,     high);
+   put_dpr_dw0(archreg + 2, low);
+}
+
+/* Terminate the current IRSB with an emulation failure. */
+static void
+emulation_failure_with_expr(IRExpr *emfailure)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, emfailure) == Ity_I32);
+
+   stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_EMNOTE), emfailure));
+   dis_res->whatNext = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_EmFail;
+}
+
+static void
+emulation_failure(VexEmNote fail_kind)
+{
+   emulation_failure_with_expr(mkU32(fail_kind));
+}
+
+/* Terminate the current IRSB with an emulation warning. */
+static void
+emulation_warning_with_expr(IRExpr *emwarning)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, emwarning) == Ity_I32);
+
+   stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_EMNOTE), emwarning));
+   dis_res->whatNext = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_EmWarn;
+}
+
+static void
+emulation_warning(VexEmNote warn_kind)
+{
+   emulation_warning_with_expr(mkU32(warn_kind));
+}
+
+/*------------------------------------------------------------*/
+/*--- IR Debugging aids.                                   ---*/
+/*------------------------------------------------------------*/
+#if 0
+
+static ULong
+s390_do_print(HChar *text, ULong value)
+{
+   vex_printf("%s %llu\n", text, value);
+   return 0;
+}
+
+static void
+s390_print(HChar *text, IRExpr *value)
+{
+   IRDirty *d;
+   
+   d = unsafeIRDirty_0_N(0 /* regparms */, "s390_do_print", &s390_do_print,
+                         mkIRExprVec_2(mkU64((ULong)text), value));
+   stmt(IRStmt_Dirty(d));
+}
+#endif
+
+
+/*------------------------------------------------------------*/
+/*--- Build the flags thunk.                               ---*/
+/*------------------------------------------------------------*/
+
+/* Completely fill the flags thunk. We're always filling all fields.
+   Apparently, that is better for redundant PUT elimination. */
+static void
+s390_cc_thunk_fill(IRExpr *op, IRExpr *dep1, IRExpr *dep2, IRExpr *ndep)
+{
+   UInt op_off, dep1_off, dep2_off, ndep_off;
+
+   op_off   = S390X_GUEST_OFFSET(guest_CC_OP);
+   dep1_off = S390X_GUEST_OFFSET(guest_CC_DEP1);
+   dep2_off = S390X_GUEST_OFFSET(guest_CC_DEP2);
+   ndep_off = S390X_GUEST_OFFSET(guest_CC_NDEP);
+
+   stmt(IRStmt_Put(op_off,   op));
+   stmt(IRStmt_Put(dep1_off, dep1));
+   stmt(IRStmt_Put(dep2_off, dep2));
+   stmt(IRStmt_Put(ndep_off, ndep));
+}
+
+
+/* Create an expression for V and widen the result to 64 bit. */
+static IRExpr *
+s390_cc_widen(IRTemp v, Bool sign_extend)
+{
+   IRExpr *expr;
+
+   expr = mkexpr(v);
+
+   switch (typeOfIRTemp(irsb->tyenv, v)) {
+   case Ity_I64:
+      break;
+   case Ity_I32:
+      expr = unop(sign_extend ? Iop_32Sto64 : Iop_32Uto64, expr);
+      break;
+   case Ity_I16:
+      expr = unop(sign_extend ? Iop_16Sto64 : Iop_16Uto64, expr);
+      break;
+   case Ity_I8:
+      expr = unop(sign_extend ? Iop_8Sto64 : Iop_8Uto64, expr);
+      break;
+   default:
+      vpanic("s390_cc_widen");
+   }
+
+   return expr;
+}
+
+static void
+s390_cc_thunk_put1(UInt opc, IRTemp d1, Bool sign_extend)
+{
+   IRExpr *op, *dep1, *dep2, *ndep;
+
+   op   = mkU64(opc);
+   dep1 = s390_cc_widen(d1, sign_extend);
+   dep2 = mkU64(0);
+   ndep = mkU64(0);
+
+   s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+static void
+s390_cc_thunk_put2(UInt opc, IRTemp d1, IRTemp d2, Bool sign_extend)
+{
+   IRExpr *op, *dep1, *dep2, *ndep;
+
+   op   = mkU64(opc);
+   dep1 = s390_cc_widen(d1, sign_extend);
+   dep2 = s390_cc_widen(d2, sign_extend);
+   ndep = mkU64(0);
+
+   s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+/* memcheck believes that the NDEP field in the flags thunk is always
+   defined. But for some flag computations (e.g. add with carry) that is
+   just not true. We therefore need to convey to memcheck that the value
+   of the ndep field does matter and therefore we make the DEP2 field
+   depend on it:
+
+   DEP2 = original_DEP2 ^ NDEP
+
+   In s390_calculate_cc we exploit that  (a^b)^b == a
+   I.e. we xor the DEP2 value with the NDEP value to recover the
+   original_DEP2 value. */
+static void
+s390_cc_thunk_put3(UInt opc, IRTemp d1, IRTemp d2, IRTemp nd, Bool sign_extend)
+{
+   IRExpr *op, *dep1, *dep2, *ndep, *dep2x;
+
+   op   = mkU64(opc);
+   dep1 = s390_cc_widen(d1, sign_extend);
+   dep2 = s390_cc_widen(d2, sign_extend);
+   ndep = s390_cc_widen(nd, sign_extend);
+
+   dep2x = binop(Iop_Xor64, dep2, ndep);
+
+   s390_cc_thunk_fill(op, dep1, dep2x, ndep);
+}
+
+
+/* Write one floating point value into the flags thunk */
+static void
+s390_cc_thunk_put1f(UInt opc, IRTemp d1)
+{
+   IRExpr *op, *dep1, *dep2, *ndep;
+
+   /* Make the CC_DEP1 slot appear completely defined.
+      Otherwise, assigning a 32-bit value will cause memcheck
+      to trigger an undefinedness error.
+   */
+   if (sizeofIRType(typeOfIRTemp(irsb->tyenv, d1)) == 4) {
+      UInt dep1_off = S390X_GUEST_OFFSET(guest_CC_DEP1);
+      stmt(IRStmt_Put(dep1_off, mkU64(0)));
+   }
+   op   = mkU64(opc);
+   dep1 = mkexpr(d1);
+   dep2 = mkU64(0);
+   ndep = mkU64(0);
+
+   s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+/* Write a floating point value and an integer into the flags thunk. The
+   integer value is zero-extended first. */
+static void
+s390_cc_thunk_putFZ(UInt opc, IRTemp d1, IRTemp d2)
+{
+   IRExpr *op, *dep1, *dep2, *ndep;
+
+   /* Make the CC_DEP1 slot appear completely defined.
+      Otherwise, assigning a 32-bit value will cause memcheck
+      to trigger an undefinedness error.
+   */
+   if (sizeofIRType(typeOfIRTemp(irsb->tyenv, d1)) == 4) {
+      UInt dep1_off = S390X_GUEST_OFFSET(guest_CC_DEP1);
+      stmt(IRStmt_Put(dep1_off, mkU64(0)));
+   }
+   op   = mkU64(opc);
+   dep1 = mkexpr(d1);
+   dep2 = s390_cc_widen(d2, False);
+   ndep = mkU64(0);
+
+   s390_cc_thunk_fill(op, dep1, dep2, ndep);
+}
+
+
+/* Write a 128-bit floating point value into the flags thunk. This is
+   done by splitting the value into two 64-bits values. */
+static void
+s390_cc_thunk_put1f128(UInt opc, IRTemp d1)
+{
+   IRExpr *op, *hi, *lo, *ndep;
+
+   op   = mkU64(opc);
+   hi   = unop(Iop_F128HItoF64, mkexpr(d1));
+   lo   = unop(Iop_F128LOtoF64, mkexpr(d1));
+   ndep = mkU64(0);
+
+   s390_cc_thunk_fill(op, hi, lo, ndep);
+}
+
+
+/* Write a 128-bit floating point value and an integer into the flags thunk.
+   The integer value is zero-extended first. */
+static void
+s390_cc_thunk_put1f128Z(UInt opc, IRTemp d1, IRTemp nd)
+{
+   IRExpr *op, *hi, *lo, *lox, *ndep;
+
+   op   = mkU64(opc);
+   hi   = unop(Iop_F128HItoF64, mkexpr(d1));
+   lo   = unop(Iop_ReinterpF64asI64, unop(Iop_F128LOtoF64, mkexpr(d1)));
+   ndep = s390_cc_widen(nd, False);
+
+   lox = binop(Iop_Xor64, lo, ndep);  /* convey dependency */
+
+   s390_cc_thunk_fill(op, hi, lox, ndep);
+}
+
+
+/* Write a 128-bit decimal floating point value into the flags thunk.
+   This is done by splitting the value into two 64-bits values. */
+static void
+s390_cc_thunk_put1d128(UInt opc, IRTemp d1)
+{
+   IRExpr *op, *hi, *lo, *ndep;
+
+   op   = mkU64(opc);
+   hi   = unop(Iop_D128HItoD64, mkexpr(d1));
+   lo   = unop(Iop_D128LOtoD64, mkexpr(d1));
+   ndep = mkU64(0);
+
+   s390_cc_thunk_fill(op, hi, lo, ndep);
+}
+
+
+/* Write a 128-bit decimal floating point value and an integer into the flags
+   thunk. The integer value is zero-extended first. */
+static void
+s390_cc_thunk_put1d128Z(UInt opc, IRTemp d1, IRTemp nd)
+{
+   IRExpr *op, *hi, *lo, *lox, *ndep;
+
+   op   = mkU64(opc);
+   hi   = unop(Iop_D128HItoD64, mkexpr(d1));
+   lo   = unop(Iop_ReinterpD64asI64, unop(Iop_D128LOtoD64, mkexpr(d1)));
+   ndep = s390_cc_widen(nd, False);
+
+   lox = binop(Iop_Xor64, lo, ndep);  /* convey dependency */
+
+   s390_cc_thunk_fill(op, hi, lox, ndep);
+}
+
+
+static void
+s390_cc_set(UInt val)
+{
+   s390_cc_thunk_fill(mkU64(S390_CC_OP_SET),
+                      mkU64(val), mkU64(0), mkU64(0));
+}
+
+/* Build IR to calculate the condition code from flags thunk.
+   Returns an expression of type Ity_I32 */
+static IRExpr *
+s390_call_calculate_cc(void)
+{
+   IRExpr **args, *call, *op, *dep1, *dep2, *ndep;
+
+   op   = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_OP),   Ity_I64);
+   dep1 = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_DEP1), Ity_I64);
+   dep2 = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_DEP2), Ity_I64);
+   ndep = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_NDEP), Ity_I64);
+
+   args = mkIRExprVec_4(op, dep1, dep2, ndep);
+   call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+                        "s390_calculate_cc", &s390_calculate_cc, args);
+
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+
+   return call;
+}
+
+/* Build IR to calculate the internal condition code for a "compare and branch"
+   insn. Returns an expression of type Ity_I32 */
+static IRExpr *
+s390_call_calculate_icc(UInt m, UInt opc, IRTemp op1, IRTemp op2)
+{
+   IRExpr **args, *call, *op, *dep1, *dep2, *mask;
+
+   switch (opc) {
+   case S390_CC_OP_SIGNED_COMPARE:
+      dep1 = s390_cc_widen(op1, True);
+      dep2 = s390_cc_widen(op2, True);
+      break;
+
+   case S390_CC_OP_UNSIGNED_COMPARE:
+      dep1 = s390_cc_widen(op1, False);
+      dep2 = s390_cc_widen(op2, False);
+      break;
+
+   default:
+      vpanic("s390_call_calculate_icc");
+   }
+
+   mask = mkU64(m);
+   op   = mkU64(opc);
+
+   args = mkIRExprVec_5(mask, op, dep1, dep2, mkU64(0) /* unused */);
+   call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+                        "s390_calculate_cond", &s390_calculate_cond, args);
+
+   /* Exclude the requested condition, OP and NDEP from definedness
+      checking.  We're only interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<1) | (1<<4);
+
+   return call;
+}
+
+/* Build IR to calculate the condition code from flags thunk.
+   Returns an expression of type Ity_I32 */
+static IRExpr *
+s390_call_calculate_cond(UInt m)
+{
+   IRExpr **args, *call, *op, *dep1, *dep2, *ndep, *mask;
+
+   mask = mkU64(m);
+   op   = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_OP),   Ity_I64);
+   dep1 = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_DEP1), Ity_I64);
+   dep2 = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_DEP2), Ity_I64);
+   ndep = IRExpr_Get(S390X_GUEST_OFFSET(guest_CC_NDEP), Ity_I64);
+
+   args = mkIRExprVec_5(mask, op, dep1, dep2, ndep);
+   call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+                        "s390_calculate_cond", &s390_calculate_cond, args);
+
+   /* Exclude the requested condition, OP and NDEP from definedness
+      checking.  We're only interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<1) | (1<<4);
+
+   return call;
+}
+
+#define s390_cc_thunk_putZ(op,dep1)  s390_cc_thunk_put1(op,dep1,False)
+#define s390_cc_thunk_putS(op,dep1)  s390_cc_thunk_put1(op,dep1,True)
+#define s390_cc_thunk_putF(op,dep1)  s390_cc_thunk_put1f(op,dep1)
+#define s390_cc_thunk_putZZ(op,dep1,dep2) s390_cc_thunk_put2(op,dep1,dep2,False)
+#define s390_cc_thunk_putSS(op,dep1,dep2) s390_cc_thunk_put2(op,dep1,dep2,True)
+#define s390_cc_thunk_putFF(op,dep1,dep2) s390_cc_thunk_put2f(op,dep1,dep2)
+#define s390_cc_thunk_putZZZ(op,dep1,dep2,ndep) \
+        s390_cc_thunk_put3(op,dep1,dep2,ndep,False)
+#define s390_cc_thunk_putSSS(op,dep1,dep2,ndep) \
+        s390_cc_thunk_put3(op,dep1,dep2,ndep,True)
+
+
+
+
+/*------------------------------------------------------------*/
+/*--- Guest register access                                ---*/
+/*------------------------------------------------------------*/
+
+
+/*------------------------------------------------------------*/
+/*--- ar registers                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Return the guest state offset of a ar register. */
+static UInt
+ar_offset(UInt archreg)
+{
+   static const UInt offset[16] = {
+      S390X_GUEST_OFFSET(guest_a0),
+      S390X_GUEST_OFFSET(guest_a1),
+      S390X_GUEST_OFFSET(guest_a2),
+      S390X_GUEST_OFFSET(guest_a3),
+      S390X_GUEST_OFFSET(guest_a4),
+      S390X_GUEST_OFFSET(guest_a5),
+      S390X_GUEST_OFFSET(guest_a6),
+      S390X_GUEST_OFFSET(guest_a7),
+      S390X_GUEST_OFFSET(guest_a8),
+      S390X_GUEST_OFFSET(guest_a9),
+      S390X_GUEST_OFFSET(guest_a10),
+      S390X_GUEST_OFFSET(guest_a11),
+      S390X_GUEST_OFFSET(guest_a12),
+      S390X_GUEST_OFFSET(guest_a13),
+      S390X_GUEST_OFFSET(guest_a14),
+      S390X_GUEST_OFFSET(guest_a15),
+   };
+
+   vassert(archreg < 16);
+
+   return offset[archreg];
+}
+
+
+/* Return the guest state offset of word #0 of a ar register. */
+static __inline__ UInt
+ar_w0_offset(UInt archreg)
+{
+   return ar_offset(archreg) + 0;
+}
+
+/* Write word #0 of a ar to the guest state. */
+static __inline__ void
+put_ar_w0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+   stmt(IRStmt_Put(ar_w0_offset(archreg), expr));
+}
+
+/* Read word #0 of a ar register. */
+static __inline__ IRExpr *
+get_ar_w0(UInt archreg)
+{
+   return IRExpr_Get(ar_w0_offset(archreg), Ity_I32);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- fpr registers                                        ---*/
+/*------------------------------------------------------------*/
+
+/* Return the guest state offset of a fpr register. */
+static UInt
+fpr_offset(UInt archreg)
+{
+   static const UInt offset[16] = {
+      S390X_GUEST_OFFSET(guest_f0),
+      S390X_GUEST_OFFSET(guest_f1),
+      S390X_GUEST_OFFSET(guest_f2),
+      S390X_GUEST_OFFSET(guest_f3),
+      S390X_GUEST_OFFSET(guest_f4),
+      S390X_GUEST_OFFSET(guest_f5),
+      S390X_GUEST_OFFSET(guest_f6),
+      S390X_GUEST_OFFSET(guest_f7),
+      S390X_GUEST_OFFSET(guest_f8),
+      S390X_GUEST_OFFSET(guest_f9),
+      S390X_GUEST_OFFSET(guest_f10),
+      S390X_GUEST_OFFSET(guest_f11),
+      S390X_GUEST_OFFSET(guest_f12),
+      S390X_GUEST_OFFSET(guest_f13),
+      S390X_GUEST_OFFSET(guest_f14),
+      S390X_GUEST_OFFSET(guest_f15),
+   };
+
+   vassert(archreg < 16);
+
+   return offset[archreg];
+}
+
+
+/* Return the guest state offset of word #0 of a fpr register. */
+static __inline__ UInt
+fpr_w0_offset(UInt archreg)
+{
+   return fpr_offset(archreg) + 0;
+}
+
+/* Write word #0 of a fpr to the guest state. */
+static __inline__ void
+put_fpr_w0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_F32);
+
+   stmt(IRStmt_Put(fpr_w0_offset(archreg), expr));
+}
+
+/* Read word #0 of a fpr register. */
+static __inline__ IRExpr *
+get_fpr_w0(UInt archreg)
+{
+   return IRExpr_Get(fpr_w0_offset(archreg), Ity_F32);
+}
+
+/* Return the guest state offset of double word #0 of a fpr register. */
+static __inline__ UInt
+fpr_dw0_offset(UInt archreg)
+{
+   return fpr_offset(archreg) + 0;
+}
+
+/* Write double word #0 of a fpr to the guest state. */
+static __inline__ void
+put_fpr_dw0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_F64);
+
+   stmt(IRStmt_Put(fpr_dw0_offset(archreg), expr));
+}
+
+/* Read double word #0 of a fpr register. */
+static __inline__ IRExpr *
+get_fpr_dw0(UInt archreg)
+{
+   return IRExpr_Get(fpr_dw0_offset(archreg), Ity_F64);
+}
+
+/* Write word #0 of a dpr to the guest state. */
+static __inline__ void
+put_dpr_w0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_D32);
+
+   stmt(IRStmt_Put(fpr_w0_offset(archreg), expr));
+}
+
+/* Read word #0 of a dpr register. */
+static __inline__ IRExpr *
+get_dpr_w0(UInt archreg)
+{
+   return IRExpr_Get(fpr_w0_offset(archreg), Ity_D32);
+}
+
+/* Write double word #0 of a fpr containg DFP value to the guest state. */
+static __inline__ void
+put_dpr_dw0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_D64);
+
+   stmt(IRStmt_Put(fpr_dw0_offset(archreg), expr));
+}
+
+/* Read double word #0 of a fpr register containing DFP value. */
+static __inline__ IRExpr *
+get_dpr_dw0(UInt archreg)
+{
+   return IRExpr_Get(fpr_dw0_offset(archreg), Ity_D64);
+}
+
+/*------------------------------------------------------------*/
+/*--- gpr registers                                        ---*/
+/*------------------------------------------------------------*/
+
+/* Return the guest state offset of a gpr register. */
+static UInt
+gpr_offset(UInt archreg)
+{
+   static const UInt offset[16] = {
+      S390X_GUEST_OFFSET(guest_r0),
+      S390X_GUEST_OFFSET(guest_r1),
+      S390X_GUEST_OFFSET(guest_r2),
+      S390X_GUEST_OFFSET(guest_r3),
+      S390X_GUEST_OFFSET(guest_r4),
+      S390X_GUEST_OFFSET(guest_r5),
+      S390X_GUEST_OFFSET(guest_r6),
+      S390X_GUEST_OFFSET(guest_r7),
+      S390X_GUEST_OFFSET(guest_r8),
+      S390X_GUEST_OFFSET(guest_r9),
+      S390X_GUEST_OFFSET(guest_r10),
+      S390X_GUEST_OFFSET(guest_r11),
+      S390X_GUEST_OFFSET(guest_r12),
+      S390X_GUEST_OFFSET(guest_r13),
+      S390X_GUEST_OFFSET(guest_r14),
+      S390X_GUEST_OFFSET(guest_r15),
+   };
+
+   vassert(archreg < 16);
+
+   return offset[archreg];
+}
+
+
+/* Return the guest state offset of word #0 of a gpr register. */
+static __inline__ UInt
+gpr_w0_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 0;
+}
+
+/* Write word #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_w0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+   stmt(IRStmt_Put(gpr_w0_offset(archreg), expr));
+}
+
+/* Read word #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_w0(UInt archreg)
+{
+   return IRExpr_Get(gpr_w0_offset(archreg), Ity_I32);
+}
+
+/* Return the guest state offset of double word #0 of a gpr register. */
+static __inline__ UInt
+gpr_dw0_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 0;
+}
+
+/* Write double word #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_dw0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I64);
+
+   stmt(IRStmt_Put(gpr_dw0_offset(archreg), expr));
+}
+
+/* Read double word #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_dw0(UInt archreg)
+{
+   return IRExpr_Get(gpr_dw0_offset(archreg), Ity_I64);
+}
+
+/* Return the guest state offset of half word #1 of a gpr register. */
+static __inline__ UInt
+gpr_hw1_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 2;
+}
+
+/* Write half word #1 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw1(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+   stmt(IRStmt_Put(gpr_hw1_offset(archreg), expr));
+}
+
+/* Read half word #1 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw1(UInt archreg)
+{
+   return IRExpr_Get(gpr_hw1_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #6 of a gpr register. */
+static __inline__ UInt
+gpr_b6_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 6;
+}
+
+/* Write byte #6 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b6(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b6_offset(archreg), expr));
+}
+
+/* Read byte #6 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b6(UInt archreg)
+{
+   return IRExpr_Get(gpr_b6_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #3 of a gpr register. */
+static __inline__ UInt
+gpr_b3_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 3;
+}
+
+/* Write byte #3 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b3(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b3_offset(archreg), expr));
+}
+
+/* Read byte #3 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b3(UInt archreg)
+{
+   return IRExpr_Get(gpr_b3_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #0 of a gpr register. */
+static __inline__ UInt
+gpr_b0_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 0;
+}
+
+/* Write byte #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b0_offset(archreg), expr));
+}
+
+/* Read byte #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b0(UInt archreg)
+{
+   return IRExpr_Get(gpr_b0_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of word #1 of a gpr register. */
+static __inline__ UInt
+gpr_w1_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 4;
+}
+
+/* Write word #1 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_w1(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+   stmt(IRStmt_Put(gpr_w1_offset(archreg), expr));
+}
+
+/* Read word #1 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_w1(UInt archreg)
+{
+   return IRExpr_Get(gpr_w1_offset(archreg), Ity_I32);
+}
+
+/* Return the guest state offset of half word #3 of a gpr register. */
+static __inline__ UInt
+gpr_hw3_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 6;
+}
+
+/* Write half word #3 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw3(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+   stmt(IRStmt_Put(gpr_hw3_offset(archreg), expr));
+}
+
+/* Read half word #3 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw3(UInt archreg)
+{
+   return IRExpr_Get(gpr_hw3_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #7 of a gpr register. */
+static __inline__ UInt
+gpr_b7_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 7;
+}
+
+/* Write byte #7 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b7(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b7_offset(archreg), expr));
+}
+
+/* Read byte #7 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b7(UInt archreg)
+{
+   return IRExpr_Get(gpr_b7_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of half word #0 of a gpr register. */
+static __inline__ UInt
+gpr_hw0_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 0;
+}
+
+/* Write half word #0 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw0(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+   stmt(IRStmt_Put(gpr_hw0_offset(archreg), expr));
+}
+
+/* Read half word #0 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw0(UInt archreg)
+{
+   return IRExpr_Get(gpr_hw0_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #4 of a gpr register. */
+static __inline__ UInt
+gpr_b4_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 4;
+}
+
+/* Write byte #4 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b4(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b4_offset(archreg), expr));
+}
+
+/* Read byte #4 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b4(UInt archreg)
+{
+   return IRExpr_Get(gpr_b4_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #1 of a gpr register. */
+static __inline__ UInt
+gpr_b1_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 1;
+}
+
+/* Write byte #1 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b1(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b1_offset(archreg), expr));
+}
+
+/* Read byte #1 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b1(UInt archreg)
+{
+   return IRExpr_Get(gpr_b1_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of half word #2 of a gpr register. */
+static __inline__ UInt
+gpr_hw2_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 4;
+}
+
+/* Write half word #2 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_hw2(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I16);
+
+   stmt(IRStmt_Put(gpr_hw2_offset(archreg), expr));
+}
+
+/* Read half word #2 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_hw2(UInt archreg)
+{
+   return IRExpr_Get(gpr_hw2_offset(archreg), Ity_I16);
+}
+
+/* Return the guest state offset of byte #5 of a gpr register. */
+static __inline__ UInt
+gpr_b5_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 5;
+}
+
+/* Write byte #5 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b5(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b5_offset(archreg), expr));
+}
+
+/* Read byte #5 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b5(UInt archreg)
+{
+   return IRExpr_Get(gpr_b5_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of byte #2 of a gpr register. */
+static __inline__ UInt
+gpr_b2_offset(UInt archreg)
+{
+   return gpr_offset(archreg) + 2;
+}
+
+/* Write byte #2 of a gpr to the guest state. */
+static __inline__ void
+put_gpr_b2(UInt archreg, IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I8);
+
+   stmt(IRStmt_Put(gpr_b2_offset(archreg), expr));
+}
+
+/* Read byte #2 of a gpr register. */
+static __inline__ IRExpr *
+get_gpr_b2(UInt archreg)
+{
+   return IRExpr_Get(gpr_b2_offset(archreg), Ity_I8);
+}
+
+/* Return the guest state offset of the counter register. */
+static UInt
+counter_offset(void)
+{
+   return S390X_GUEST_OFFSET(guest_counter);
+}
+
+/* Return the guest state offset of double word #0 of the counter register. */
+static __inline__ UInt
+counter_dw0_offset(void)
+{
+   return counter_offset() + 0;
+}
+
+/* Write double word #0 of the counter to the guest state. */
+static __inline__ void
+put_counter_dw0(IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I64);
+
+   stmt(IRStmt_Put(counter_dw0_offset(), expr));
+}
+
+/* Read double word #0 of the counter register. */
+static __inline__ IRExpr *
+get_counter_dw0(void)
+{
+   return IRExpr_Get(counter_dw0_offset(), Ity_I64);
+}
+
+/* Return the guest state offset of word #0 of the counter register. */
+static __inline__ UInt
+counter_w0_offset(void)
+{
+   return counter_offset() + 0;
+}
+
+/* Return the guest state offset of word #1 of the counter register. */
+static __inline__ UInt
+counter_w1_offset(void)
+{
+   return counter_offset() + 4;
+}
+
+/* Write word #0 of the counter to the guest state. */
+static __inline__ void
+put_counter_w0(IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+   stmt(IRStmt_Put(counter_w0_offset(), expr));
+}
+
+/* Read word #0 of the counter register. */
+static __inline__ IRExpr *
+get_counter_w0(void)
+{
+   return IRExpr_Get(counter_w0_offset(), Ity_I32);
+}
+
+/* Write word #1 of the counter to the guest state. */
+static __inline__ void
+put_counter_w1(IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+   stmt(IRStmt_Put(counter_w1_offset(), expr));
+}
+
+/* Read word #1 of the counter register. */
+static __inline__ IRExpr *
+get_counter_w1(void)
+{
+   return IRExpr_Get(counter_w1_offset(), Ity_I32);
+}
+
+/* Return the guest state offset of the fpc register. */
+static UInt
+fpc_offset(void)
+{
+   return S390X_GUEST_OFFSET(guest_fpc);
+}
+
+/* Return the guest state offset of word #0 of the fpc register. */
+static __inline__ UInt
+fpc_w0_offset(void)
+{
+   return fpc_offset() + 0;
+}
+
+/* Write word #0 of the fpc to the guest state. */
+static __inline__ void
+put_fpc_w0(IRExpr *expr)
+{
+   vassert(typeOfIRExpr(irsb->tyenv, expr) == Ity_I32);
+
+   stmt(IRStmt_Put(fpc_w0_offset(), expr));
+}
+
+/* Read word #0 of the fpc register. */
+static __inline__ IRExpr *
+get_fpc_w0(void)
+{
+   return IRExpr_Get(fpc_w0_offset(), Ity_I32);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Rounding modes                                       ---*/
+/*------------------------------------------------------------*/
+
+/* Extract the bfp rounding mode from the guest FPC reg and encode it as an
+   IRRoundingMode:
+
+   rounding mode | s390 | IR
+   -------------------------
+   to nearest    |  00  | 00
+   to zero       |  01  | 11
+   to +infinity  |  10  | 10
+   to -infinity  |  11  | 01
+
+   So:  IR = (4 - s390) & 3
+*/
+static IRExpr *
+get_bfp_rounding_mode_from_fpc(void)
+{
+   IRTemp fpc_bits = newTemp(Ity_I32);
+
+   /* For z196 and later the bfp rounding mode is stored in bits [29:31].
+      Prior to that bits [30:31] contained the bfp rounding mode with
+      bit 29 being unused and having a value of 0. So we can always
+      extract the least significant 3 bits. */
+   assign(fpc_bits, binop(Iop_And32, get_fpc_w0(), mkU32(7)));
+
+   /* fixs390:
+
+
+      if (! s390_host_has_fpext && rounding_mode > 3) {
+         emulation warning @ runtime and
+         set fpc to round nearest
+      }
+   */
+
+   /* For now silently adjust an unsupported rounding mode to "nearest" */
+   IRExpr *rm_s390 = mkite(binop(Iop_CmpLE32S, mkexpr(fpc_bits), mkU32(3)),
+                           mkexpr(fpc_bits),
+                           mkU32(S390_FPC_BFP_ROUND_NEAREST_EVEN));
+
+   // rm_IR = (4 - rm_s390) & 3;
+   return binop(Iop_And32, binop(Iop_Sub32, mkU32(4), rm_s390), mkU32(3));
+}
+
+/* Encode the s390 rounding mode as it appears in the m3 field of certain
+   instructions to VEX's IRRoundingMode. Rounding modes that cannot be
+   represented in VEX are converted to Irrm_NEAREST. The rationale is, that
+   Irrm_NEAREST refers to IEEE 754's roundTiesToEven which the standard
+   considers the default rounding mode (4.3.3). */
+static IRTemp
+encode_bfp_rounding_mode(UChar mode)
+{
+   IRExpr *rm;
+
+   switch (mode) {
+   case S390_BFP_ROUND_PER_FPC:
+      rm = get_bfp_rounding_mode_from_fpc();
+      break;
+   case S390_BFP_ROUND_NEAREST_AWAY:  /* not supported */
+   case S390_BFP_ROUND_PREPARE_SHORT: /* not supported */
+   case S390_BFP_ROUND_NEAREST_EVEN:  rm = mkU32(Irrm_NEAREST); break;
+   case S390_BFP_ROUND_ZERO:          rm = mkU32(Irrm_ZERO);    break;
+   case S390_BFP_ROUND_POSINF:        rm = mkU32(Irrm_PosINF);  break;
+   case S390_BFP_ROUND_NEGINF:        rm = mkU32(Irrm_NegINF);  break;
+   default:
+      vpanic("encode_bfp_rounding_mode");
+   }
+
+   return mktemp(Ity_I32, rm);
+}
+
+/* Extract the DFP rounding mode from the guest FPC reg and encode it as an
+   IRRoundingMode:
+
+   rounding mode                     | s390  | IR
+   ------------------------------------------------
+   to nearest, ties to even          |  000  | 000
+   to zero                           |  001  | 011
+   to +infinity                      |  010  | 010
+   to -infinity                      |  011  | 001
+   to nearest, ties away from 0      |  100  | 100
+   to nearest, ties toward 0         |  101  | 111
+   to away from 0                    |  110  | 110
+   to prepare for shorter precision  |  111  | 101
+
+   So:  IR = (s390 ^ ((s390 << 1) & 2))
+*/
+static IRExpr *
+get_dfp_rounding_mode_from_fpc(void)
+{
+   IRTemp fpc_bits = newTemp(Ity_I32);
+
+   /* The dfp rounding mode is stored in bits [25:27].
+      extract the bits at 25:27 and right shift 4 times. */
+   assign(fpc_bits, binop(Iop_Shr32,
+                          binop(Iop_And32, get_fpc_w0(), mkU32(0x70)),
+                          mkU8(4)));
+
+   IRExpr *rm_s390 = mkexpr(fpc_bits);
+   // rm_IR = (rm_s390 ^ ((rm_s390 << 1) & 2));
+
+   return binop(Iop_Xor32, rm_s390,
+                binop( Iop_And32,
+                       binop(Iop_Shl32, rm_s390, mkU8(1)),
+                       mkU32(2)));
+}
+
+/* Encode the s390 rounding mode as it appears in the m3 field of certain
+   instructions to VEX's IRRoundingMode. */
+static IRTemp
+encode_dfp_rounding_mode(UChar mode)
+{
+   IRExpr *rm;
+
+   switch (mode) {
+   case S390_DFP_ROUND_PER_FPC_0:
+   case S390_DFP_ROUND_PER_FPC_2:
+      rm = get_dfp_rounding_mode_from_fpc(); break;
+   case S390_DFP_ROUND_NEAREST_EVEN_4:
+   case S390_DFP_ROUND_NEAREST_EVEN_8:
+      rm = mkU32(Irrm_NEAREST); break;
+   case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1:
+   case S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12:
+      rm = mkU32(Irrm_NEAREST_TIE_AWAY_0); break;
+   case S390_DFP_ROUND_PREPARE_SHORT_3:
+   case S390_DFP_ROUND_PREPARE_SHORT_15:
+      rm = mkU32(Irrm_PREPARE_SHORTER); break;
+   case S390_DFP_ROUND_ZERO_5:
+   case S390_DFP_ROUND_ZERO_9:
+      rm = mkU32(Irrm_ZERO ); break;
+   case S390_DFP_ROUND_POSINF_6:
+   case S390_DFP_ROUND_POSINF_10:
+      rm = mkU32(Irrm_PosINF); break;
+   case S390_DFP_ROUND_NEGINF_7:
+   case S390_DFP_ROUND_NEGINF_11:
+      rm = mkU32(Irrm_NegINF); break;
+   case S390_DFP_ROUND_NEAREST_TIE_TOWARD_0:
+      rm = mkU32(Irrm_NEAREST_TIE_TOWARD_0); break;
+   case S390_DFP_ROUND_AWAY_0:
+      rm = mkU32(Irrm_AWAY_FROM_ZERO); break;
+   default:
+      vpanic("encode_dfp_rounding_mode");
+   }
+
+   return mktemp(Ity_I32, rm);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Condition code helpers                               ---*/
+/*------------------------------------------------------------*/
+
+/* The result of a Iop_CmpFxx operation is a condition code. It is
+   encoded using the values defined in type IRCmpFxxResult.
+   Before we can store the condition code into the guest state (or do
+   anything else with it for that matter) we need to convert it to
+   the encoding that s390 uses. This is what this function does.
+
+   s390     VEX                b6 b2 b0   cc.1  cc.0
+   0      0x40 EQ             1  0  0     0     0
+   1      0x01 LT             0  0  1     0     1
+   2      0x00 GT             0  0  0     1     0
+   3      0x45 Unordered      1  1  1     1     1
+
+   The following bits from the VEX encoding are interesting:
+   b0, b2, b6  with b0 being the LSB. We observe:
+
+   cc.0 = b0;
+   cc.1 = b2 | (~b0 & ~b6)
+
+   with cc being the s390 condition code.
+*/
+static IRExpr *
+convert_vex_bfpcc_to_s390(IRTemp vex_cc)
+{
+   IRTemp cc0  = newTemp(Ity_I32);
+   IRTemp cc1  = newTemp(Ity_I32);
+   IRTemp b0   = newTemp(Ity_I32);
+   IRTemp b2   = newTemp(Ity_I32);
+   IRTemp b6   = newTemp(Ity_I32);
+
+   assign(b0, binop(Iop_And32, mkexpr(vex_cc), mkU32(1)));
+   assign(b2, binop(Iop_And32, binop(Iop_Shr32, mkexpr(vex_cc), mkU8(2)),
+                    mkU32(1)));
+   assign(b6, binop(Iop_And32, binop(Iop_Shr32, mkexpr(vex_cc), mkU8(6)),
+                    mkU32(1)));
+
+   assign(cc0, mkexpr(b0));
+   assign(cc1, binop(Iop_Or32, mkexpr(b2),
+                     binop(Iop_And32,
+                           binop(Iop_Sub32, mkU32(1), mkexpr(b0)), /* ~b0 */
+                           binop(Iop_Sub32, mkU32(1), mkexpr(b6))  /* ~b6 */
+                           )));
+
+   return binop(Iop_Or32, mkexpr(cc0), binop(Iop_Shl32, mkexpr(cc1), mkU8(1)));
+}
+
+
+/* The result of a Iop_CmpDxx operation is a condition code. It is
+   encoded using the values defined in type IRCmpDxxResult.
+   Before we can store the condition code into the guest state (or do
+   anything else with it for that matter) we need to convert it to
+   the encoding that s390 uses. This is what this function does. */
+static IRExpr *
+convert_vex_dfpcc_to_s390(IRTemp vex_cc)
+{
+   /* The VEX encodings for IRCmpDxxResult and IRCmpFxxResult are the
+      same. currently. */
+   return convert_vex_bfpcc_to_s390(vex_cc);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Build IR for formats                                 ---*/
+/*------------------------------------------------------------*/
+static void
+s390_format_I(const HChar *(*irgen)(UChar i),
+              UChar i)
+{
+   const HChar *mnm = irgen(i);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(MNM, UINT), mnm, i);
+}
+
+static void
+s390_format_E(const HChar *(*irgen)(void))
+{
+   const HChar *mnm = irgen();
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC1(MNM), mnm);
+}
+
+static void
+s390_format_RI(const HChar *(*irgen)(UChar r1, UShort i2),
+               UChar r1, UShort i2)
+{
+   irgen(r1, i2);
+}
+
+static void
+s390_format_RI_RU(const HChar *(*irgen)(UChar r1, UShort i2),
+                  UChar r1, UShort i2)
+{
+   const HChar *mnm = irgen(r1, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, UINT), mnm, r1, i2);
+}
+
+static void
+s390_format_RI_RI(const HChar *(*irgen)(UChar r1, UShort i2),
+                  UChar r1, UShort i2)
+{
+   const HChar *mnm = irgen(r1, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, INT), mnm, r1, (Int)(Short)i2);
+}
+
+static void
+s390_format_RI_RP(const HChar *(*irgen)(UChar r1, UShort i2),
+                  UChar r1, UShort i2)
+{
+   const HChar *mnm = irgen(r1, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, PCREL), mnm, r1, (Int)(Short)i2);
+}
+
+static void
+s390_format_RIE_RRP(const HChar *(*irgen)(UChar r1, UChar r3, UShort i2),
+                    UChar r1, UChar r3, UShort i2)
+{
+   const HChar *mnm = irgen(r1, r3, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, GPR, PCREL), mnm, r1, r3, (Int)(Short)i2);
+}
+
+static void
+s390_format_RIE_RRI0(const HChar *(*irgen)(UChar r1, UChar r3, UShort i2),
+                     UChar r1, UChar r3, UShort i2)
+{
+   const HChar *mnm = irgen(r1, r3, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, GPR, INT), mnm, r1, r3, (Int)(Short)i2);
+}
+
+static void
+s390_format_RIE_RRUUU(const HChar *(*irgen)(UChar r1, UChar r2, UChar i3,
+                                            UChar i4, UChar i5),
+                      UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+   const HChar *mnm = irgen(r1, r2, i3, i4, i5);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC6(MNM, GPR, GPR, UINT, UINT, UINT), mnm, r1, r2, i3, i4,
+                  i5);
+}
+
+static void
+s390_format_RIE_RRPU(const HChar *(*irgen)(UChar r1, UChar r2, UShort i4,
+                                           UChar m3),
+                     UChar r1, UChar r2, UShort i4, UChar m3)
+{
+   const HChar *mnm = irgen(r1, r2, i4, m3);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(XMNM, GPR, GPR, CABM, PCREL), S390_XMNM_CAB, mnm, m3, r1,
+                  r2, m3, (Int)(Short)i4);
+}
+
+static void
+s390_format_RIE_RUPU(const HChar *(*irgen)(UChar r1, UChar m3, UShort i4,
+                                           UChar i2),
+                     UChar r1, UChar m3, UShort i4, UChar i2)
+{
+   const HChar *mnm = irgen(r1, m3, i4, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(XMNM, GPR, UINT, CABM, PCREL), S390_XMNM_CAB, mnm, m3,
+                  r1, i2, m3, (Int)(Short)i4);
+}
+
+static void
+s390_format_RIE_RUPI(const HChar *(*irgen)(UChar r1, UChar m3, UShort i4,
+                                           UChar i2),
+                     UChar r1, UChar m3, UShort i4, UChar i2)
+{
+   const HChar *mnm = irgen(r1, m3, i4, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(XMNM, GPR, INT, CABM, PCREL), S390_XMNM_CAB, mnm, m3, r1,
+                  (Int)(Char)i2, m3, (Int)(Short)i4);
+}
+
+static void
+s390_format_RIL(const HChar *(*irgen)(UChar r1, UInt i2),
+                UChar r1, UInt i2)
+{
+   irgen(r1, i2);
+}
+
+static void
+s390_format_RIL_RU(const HChar *(*irgen)(UChar r1, UInt i2),
+                   UChar r1, UInt i2)
+{
+   const HChar *mnm = irgen(r1, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, UINT), mnm, r1, i2);
+}
+
+static void
+s390_format_RIL_RI(const HChar *(*irgen)(UChar r1, UInt i2),
+                   UChar r1, UInt i2)
+{
+   const HChar *mnm = irgen(r1, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, INT), mnm, r1, i2);
+}
+
+static void
+s390_format_RIL_RP(const HChar *(*irgen)(UChar r1, UInt i2),
+                   UChar r1, UInt i2)
+{
+   const HChar *mnm = irgen(r1, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, PCREL), mnm, r1, i2);
+}
+
+static void
+s390_format_RIL_UP(const HChar *(*irgen)(void),
+                   UChar r1, UInt i2)
+{
+   const HChar *mnm = irgen();
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, UINT, PCREL), mnm, r1, i2);
+}
+
+static void
+s390_format_RIS_RURDI(const HChar *(*irgen)(UChar r1, UChar m3, UChar i2,
+                      IRTemp op4addr),
+                      UChar r1, UChar m3, UChar b4, UShort d4, UChar i2)
+{
+   const HChar *mnm;
+   IRTemp op4addr = newTemp(Ity_I64);
+
+   assign(op4addr, binop(Iop_Add64, mkU64(d4), b4 != 0 ? get_gpr_dw0(b4) :
+          mkU64(0)));
+
+   mnm = irgen(r1, m3, i2, op4addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(XMNM, GPR, INT, CABM, UDXB), S390_XMNM_CAB, mnm, m3, r1,
+                  (Int)(Char)i2, m3, d4, 0, b4);
+}
+
+static void
+s390_format_RIS_RURDU(const HChar *(*irgen)(UChar r1, UChar m3, UChar i2,
+                      IRTemp op4addr),
+                      UChar r1, UChar m3, UChar b4, UShort d4, UChar i2)
+{
+   const HChar *mnm;
+   IRTemp op4addr = newTemp(Ity_I64);
+
+   assign(op4addr, binop(Iop_Add64, mkU64(d4), b4 != 0 ? get_gpr_dw0(b4) :
+          mkU64(0)));
+
+   mnm = irgen(r1, m3, i2, op4addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(XMNM, GPR, UINT, CABM, UDXB), S390_XMNM_CAB, mnm, m3, r1,
+                  i2, m3, d4, 0, b4);
+}
+
+static void
+s390_format_RR(const HChar *(*irgen)(UChar r1, UChar r2),
+               UChar r1, UChar r2)
+{
+   irgen(r1, r2);
+}
+
+static void
+s390_format_RR_RR(const HChar *(*irgen)(UChar r1, UChar r2),
+                  UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, GPR), mnm, r1, r2);
+}
+
+static void
+s390_format_RR_FF(const HChar *(*irgen)(UChar r1, UChar r2),
+                  UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, FPR, FPR), mnm, r1, r2);
+}
+
+static void
+s390_format_RRE(const HChar *(*irgen)(UChar r1, UChar r2),
+                UChar r1, UChar r2)
+{
+   irgen(r1, r2);
+}
+
+static void
+s390_format_RRE_RR(const HChar *(*irgen)(UChar r1, UChar r2),
+                   UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, GPR), mnm, r1, r2);
+}
+
+static void
+s390_format_RRE_FF(const HChar *(*irgen)(UChar r1, UChar r2),
+                   UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, FPR, FPR), mnm, r1, r2);
+}
+
+static void
+s390_format_RRE_RF(const HChar *(*irgen)(UChar, UChar),
+                   UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, FPR), mnm, r1, r2);
+}
+
+static void
+s390_format_RRE_FR(const HChar *(*irgen)(UChar r1, UChar r2),
+                   UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, FPR, GPR), mnm, r1, r2);
+}
+
+static void
+s390_format_RRE_R0(const HChar *(*irgen)(UChar r1),
+                   UChar r1)
+{
+   const HChar *mnm = irgen(r1);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(MNM, GPR), mnm, r1);
+}
+
+static void
+s390_format_RRE_F0(const HChar *(*irgen)(UChar r1),
+                   UChar r1)
+{
+   const HChar *mnm = irgen(r1);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(MNM, FPR), mnm, r1);
+}
+
+static void
+s390_format_RRF_M0RERE(const HChar *(*irgen)(UChar m3, UChar r1, UChar r2),
+                       UChar m3, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(m3, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, GPR, UINT), mnm, r1, r2, m3);
+}
+
+static void
+s390_format_RRF_F0FF(const HChar *(*irgen)(UChar, UChar, UChar),
+                     UChar r1, UChar r3, UChar r2)
+{
+   const HChar *mnm = irgen(r1, r3, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, FPR, FPR, FPR), mnm, r1, r3, r2);
+}
+
+static void
+s390_format_RRF_F0FR(const HChar *(*irgen)(UChar, UChar, UChar),
+                     UChar r3, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r3, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, FPR, FPR, GPR), mnm, r1, r3, r2);
+}
+
+static void
+s390_format_RRF_UUFF(const HChar *(*irgen)(UChar m3, UChar m4, UChar r1,
+                                           UChar r2),
+                     UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(m3, m4, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(MNM, FPR, UINT, FPR, UINT), mnm, r1, m3, r2, m4);
+}
+
+static void
+s390_format_RRF_0UFF(const HChar *(*irgen)(UChar m4, UChar r1, UChar r2),
+                     UChar m4, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(m4, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, FPR, FPR, UINT), mnm, r1, r2, m4);
+}
+
+static void
+s390_format_RRF_UUFR(const HChar *(*irgen)(UChar m3, UChar m4, UChar r1,
+                                           UChar r2),
+                     UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(m3, m4, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), mnm, r1, m3, r2, m4);
+}
+
+static void
+s390_format_RRF_UURF(const HChar *(*irgen)(UChar m3, UChar m4, UChar r1,
+                                           UChar r2),
+                     UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(m3, m4, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), mnm, r1, m3, r2, m4);
+}
+
+
+static void
+s390_format_RRF_U0RR(const HChar *(*irgen)(UChar m3, UChar r1, UChar r2),
+                     UChar m3, UChar r1, UChar r2, Int xmnm_kind)
+{
+   irgen(m3, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(XMNM, GPR, GPR), xmnm_kind, m3, r1, r2);
+}
+
+static void
+s390_format_RRF_F0FF2(const HChar *(*irgen)(UChar, UChar, UChar),
+                      UChar r3, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r3, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, FPR, FPR, FPR), mnm, r1, r3, r2);
+}
+
+static void
+s390_format_RRF_FFRU(const HChar *(*irgen)(UChar, UChar, UChar, UChar),
+                     UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r3, m4, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(MNM, FPR, FPR, GPR, UINT), mnm, r1, r3, r2, m4);
+}
+
+static void
+s390_format_RRF_FUFF(const HChar *(*irgen)(UChar, UChar, UChar, UChar),
+                     UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r3, m4, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), mnm, r1, r3, r2, m4);
+}
+
+static void
+s390_format_RRF_FUFF2(const HChar *(*irgen)(UChar, UChar, UChar, UChar),
+                      UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r3, m4, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), mnm, r1, r2, r3, m4);
+}
+
+static void
+s390_format_RRF_R0RR2(const HChar *(*irgen)(UChar r3, UChar r1, UChar r2),
+                      UChar r3, UChar r1, UChar r2)
+{
+   const HChar *mnm = irgen(r3, r1, r2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, GPR, GPR), mnm, r1, r2, r3);
+}
+
+static void
+s390_format_RRS(const HChar *(*irgen)(UChar r1, UChar r2, UChar m3,
+                                      IRTemp op4addr),
+                UChar r1, UChar r2, UChar b4, UShort d4, UChar m3)
+{
+   const HChar *mnm;
+   IRTemp op4addr = newTemp(Ity_I64);
+
+   assign(op4addr, binop(Iop_Add64, mkU64(d4), b4 != 0 ? get_gpr_dw0(b4) :
+          mkU64(0)));
+
+   mnm = irgen(r1, r2, m3, op4addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC5(XMNM, GPR, GPR, CABM, UDXB), S390_XMNM_CAB, mnm, m3, r1,
+                  r2, m3, d4, 0, b4);
+}
+
+static void
+s390_format_RS_R0RD(const HChar *(*irgen)(UChar r1, IRTemp op2addr),
+                    UChar r1, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, UDXB), mnm, r1, d2, 0, b2);
+}
+
+static void
+s390_format_RS_RRRD(const HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+                    UChar r1, UChar r3, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, r3, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, GPR, UDXB), mnm, r1, r3, d2, 0, b2);
+}
+
+static void
+s390_format_RS_RURD(const HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+                    UChar r1, UChar r3, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, r3, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, UINT, UDXB), mnm, r1, r3, d2, 0, b2);
+}
+
+static void
+s390_format_RS_AARD(const HChar *(*irgen)(UChar, UChar, IRTemp),
+                    UChar r1, UChar r3, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, r3, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, AR, AR, UDXB), mnm, r1, r3, d2, 0, b2);
+}
+
+static void
+s390_format_RSI_RRP(const HChar *(*irgen)(UChar r1, UChar r3, UShort i2),
+                    UChar r1, UChar r3, UShort i2)
+{
+   const HChar *mnm = irgen(r1, r3, i2);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, GPR, PCREL), mnm, r1, r3, (Int)(Short)i2);
+}
+
+static void
+s390_format_RSY_RRRD(const HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+                     UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+   IRTemp d2 = newTemp(Ity_I64);
+
+   assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+   assign(op2addr, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, r3, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), mnm, r1, r3, dh2, dl2, 0, b2);
+}
+
+static void
+s390_format_RSY_AARD(const HChar *(*irgen)(UChar, UChar, IRTemp),
+                     UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+   IRTemp d2 = newTemp(Ity_I64);
+
+   assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+   assign(op2addr, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, r3, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, AR, AR, SDXB), mnm, r1, r3, dh2, dl2, 0, b2);
+}
+
+static void
+s390_format_RSY_RURD(const HChar *(*irgen)(UChar r1, UChar r3, IRTemp op2addr),
+                     UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+   IRTemp d2 = newTemp(Ity_I64);
+
+   assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+   assign(op2addr, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, r3, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, GPR, UINT, SDXB), mnm, r1, r3, dh2, dl2, 0, b2);
+}
+
+static void
+s390_format_RSY_RDRM(const HChar *(*irgen)(UChar r1, IRTemp op2addr),
+                     UChar r1, UChar m3, UChar b2, UShort dl2, UChar dh2,
+                     Int xmnm_kind)
+{
+   IRTemp op2addr = newTemp(Ity_I64);
+   IRTemp d2 = newTemp(Ity_I64);
+
+   next_insn_if(binop(Iop_CmpEQ32, s390_call_calculate_cond(m3), mkU32(0)));
+
+   assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+   assign(op2addr, binop(Iop_Add64, mkexpr(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   irgen(r1, op2addr);
+
+   vassert(dis_res->whatNext == Dis_Continue);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(XMNM, GPR, SDXB), xmnm_kind, m3, r1, dh2, dl2, 0, b2);
+}
+
+static void
+s390_format_RX(const HChar *(*irgen)(UChar r1, UChar x2, UChar b2, UShort d2,
+               IRTemp op2addr),
+               UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   irgen(r1, x2, b2, d2, op2addr);
+}
+
+static void
+s390_format_RX_RRRD(const HChar *(*irgen)(UChar r1, IRTemp op2addr),
+                    UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, UDXB), mnm, r1, d2, x2, b2);
+}
+
+static void
+s390_format_RX_FRRD(const HChar *(*irgen)(UChar r1, IRTemp op2addr),
+                    UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, FPR, UDXB), mnm, r1, d2, x2, b2);
+}
+
+static void
+s390_format_RXE_FRRD(const HChar *(*irgen)(UChar r1, IRTemp op2addr),
+                     UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, FPR, UDXB), mnm, r1, d2, x2, b2);
+}
+
+static void
+s390_format_RXF_FRRDF(const HChar *(*irgen)(UChar, IRTemp, UChar),
+                      UChar r3, UChar x2, UChar b2, UShort d2, UChar r1)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkU64(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   mnm = irgen(r3, op2addr, r1);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC4(MNM, FPR, FPR, UDXB), mnm, r1, r3, d2, x2, b2);
+}
+
+static void
+s390_format_RXY_RRRD(const HChar *(*irgen)(UChar r1, IRTemp op2addr),
+                     UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+   IRTemp d2 = newTemp(Ity_I64);
+
+   assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkexpr(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, SDXB), mnm, r1, dh2, dl2, x2, b2);
+}
+
+static void
+s390_format_RXY_FRRD(const HChar *(*irgen)(UChar r1, IRTemp op2addr),
+                     UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+   IRTemp d2 = newTemp(Ity_I64);
+
+   assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkexpr(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   mnm = irgen(r1, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, FPR, SDXB), mnm, r1, dh2, dl2, x2, b2);
+}
+
+static void
+s390_format_RXY_URRD(const HChar *(*irgen)(void),
+                     UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+   IRTemp d2 = newTemp(Ity_I64);
+
+   assign(d2, mkU64(((ULong)(Long)(Char)dh2 << 12) | ((ULong)dl2)));
+   assign(op2addr, binop(Iop_Add64, binop(Iop_Add64, mkexpr(d2),
+          b2 != 0 ? get_gpr_dw0(b2) : mkU64(0)), x2 != 0 ? get_gpr_dw0(x2) :
+          mkU64(0)));
+
+   mnm = irgen();
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, UINT, SDXB), mnm, r1, dh2, dl2, x2, b2);
+}
+
+static void
+s390_format_S_RD(const HChar *(*irgen)(IRTemp op2addr),
+                 UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(MNM, UDXB), mnm, d2, 0, b2);
+}
+
+static void
+s390_format_SI_URD(const HChar *(*irgen)(UChar i2, IRTemp op1addr),
+                   UChar i2, UChar b1, UShort d1)
+{
+   const HChar *mnm;
+   IRTemp op1addr = newTemp(Ity_I64);
+
+   assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) :
+          mkU64(0)));
+
+   mnm = irgen(i2, op1addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, UDXB, UINT), mnm, d1, 0, b1, i2);
+}
+
+static void
+s390_format_SIY_URD(const HChar *(*irgen)(UChar i2, IRTemp op1addr),
+                    UChar i2, UChar b1, UShort dl1, UChar dh1)
+{
+   const HChar *mnm;
+   IRTemp op1addr = newTemp(Ity_I64);
+   IRTemp d1 = newTemp(Ity_I64);
+
+   assign(d1, mkU64(((ULong)(Long)(Char)dh1 << 12) | ((ULong)dl1)));
+   assign(op1addr, binop(Iop_Add64, mkexpr(d1), b1 != 0 ? get_gpr_dw0(b1) :
+          mkU64(0)));
+
+   mnm = irgen(i2, op1addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, SDXB, UINT), mnm, dh1, dl1, 0, b1, i2);
+}
+
+static void
+s390_format_SIY_IRD(const HChar *(*irgen)(UChar i2, IRTemp op1addr),
+                    UChar i2, UChar b1, UShort dl1, UChar dh1)
+{
+   const HChar *mnm;
+   IRTemp op1addr = newTemp(Ity_I64);
+   IRTemp d1 = newTemp(Ity_I64);
+
+   assign(d1, mkU64(((ULong)(Long)(Char)dh1 << 12) | ((ULong)dl1)));
+   assign(op1addr, binop(Iop_Add64, mkexpr(d1), b1 != 0 ? get_gpr_dw0(b1) :
+          mkU64(0)));
+
+   mnm = irgen(i2, op1addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, SDXB, INT), mnm, dh1, dl1, 0, b1, (Int)(Char)i2);
+}
+
+static void
+s390_format_SS_L0RDRD(const HChar *(*irgen)(UChar, IRTemp, IRTemp),
+                      UChar l, UChar b1, UShort d1, UChar b2, UShort d2)
+{
+   const HChar *mnm;
+   IRTemp op1addr = newTemp(Ity_I64);
+   IRTemp op2addr = newTemp(Ity_I64);
+
+   assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) :
+          mkU64(0)));
+   assign(op2addr, binop(Iop_Add64, mkU64(d2), b2 != 0 ? get_gpr_dw0(b2) :
+          mkU64(0)));
+
+   mnm = irgen(l, op1addr, op2addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, UDLB, UDXB), mnm, d1, l, b1, d2, 0, b2);
+}
+
+static void
+s390_format_SIL_RDI(const HChar *(*irgen)(UShort i2, IRTemp op1addr),
+                    UChar b1, UShort d1, UShort i2)
+{
+   const HChar *mnm;
+   IRTemp op1addr = newTemp(Ity_I64);
+
+   assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) :
+          mkU64(0)));
+
+   mnm = irgen(i2, op1addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, UDXB, INT), mnm, d1, 0, b1, (Int)(Short)i2);
+}
+
+static void
+s390_format_SIL_RDU(const HChar *(*irgen)(UShort i2, IRTemp op1addr),
+                    UChar b1, UShort d1, UShort i2)
+{
+   const HChar *mnm;
+   IRTemp op1addr = newTemp(Ity_I64);
+
+   assign(op1addr, binop(Iop_Add64, mkU64(d1), b1 != 0 ? get_gpr_dw0(b1) :
+          mkU64(0)));
+
+   mnm = irgen(i2, op1addr);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, UDXB, UINT), mnm, d1, 0, b1, i2);
+}
+
+
+
+/*------------------------------------------------------------*/
+/*--- Build IR for opcodes                                 ---*/
+/*------------------------------------------------------------*/
+
+static const HChar *
+s390_irgen_AR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ar";
+}
+
+static const HChar *
+s390_irgen_AGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "agr";
+}
+
+static const HChar *
+s390_irgen_AGFR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "agfr";
+}
+
+static const HChar *
+s390_irgen_ARK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ark";
+}
+
+static const HChar *
+s390_irgen_AGRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Add64, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op2, op3);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "agrk";
+}
+
+static const HChar *
+s390_irgen_A(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "a";
+}
+
+static const HChar *
+s390_irgen_AY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ay";
+}
+
+static const HChar *
+s390_irgen_AG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "ag";
+}
+
+static const HChar *
+s390_irgen_AGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "agf";
+}
+
+static const HChar *
+s390_irgen_AFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = (Int)i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "afi";
+}
+
+static const HChar *
+s390_irgen_AGFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (Long)(Int)i2;
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkU64((ULong)op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, mktemp(Ity_I64,
+                       mkU64((ULong)op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "agfi";
+}
+
+static const HChar *
+s390_irgen_AHIK(UChar r1, UChar r3, UShort i2)
+{
+   Int op2;
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   op2 = (Int)(Short)i2;
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Add32, mkU32((UInt)op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, mktemp(Ity_I32, mkU32((UInt)
+                       op2)), op3);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ahik";
+}
+
+static const HChar *
+s390_irgen_AGHIK(UChar r1, UChar r3, UShort i2)
+{
+   Long op2;
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   op2 = (Long)(Short)i2;
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Add64, mkU64((ULong)op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, mktemp(Ity_I64, mkU64((ULong)
+                       op2)), op3);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "aghik";
+}
+
+static const HChar *
+s390_irgen_ASI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, load(Ity_I32, mkexpr(op1addr)));
+   op2 = (Int)(Char)i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+   store(mkexpr(op1addr), mkexpr(result));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+
+   return "asi";
+}
+
+static const HChar *
+s390_irgen_AGSI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, load(Ity_I64, mkexpr(op1addr)));
+   op2 = (Long)(Char)i2;
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkU64((ULong)op2)));
+   store(mkexpr(op1addr), mkexpr(result));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, mktemp(Ity_I64,
+                       mkU64((ULong)op2)));
+
+   return "agsi";
+}
+
+static const HChar *
+s390_irgen_AH(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ah";
+}
+
+static const HChar *
+s390_irgen_AHY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ahy";
+}
+
+static const HChar *
+s390_irgen_AHI(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = (Int)(Short)i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ahi";
+}
+
+static const HChar *
+s390_irgen_AGHI(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (Long)(Short)i2;
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkU64((ULong)op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op1, mktemp(Ity_I64,
+                       mkU64((ULong)op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "aghi";
+}
+
+static const HChar *
+s390_irgen_AHHHR(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r2));
+   assign(op3, get_gpr_w0(r3));
+   assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "ahhhr";
+}
+
+static const HChar *
+s390_irgen_AHHLR(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "ahhlr";
+}
+
+static const HChar *
+s390_irgen_AIH(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = (Int)i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32((UInt)op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "aih";
+}
+
+static const HChar *
+s390_irgen_ALR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "alr";
+}
+
+static const HChar *
+s390_irgen_ALGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "algr";
+}
+
+static const HChar *
+s390_irgen_ALGFR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Uto64, get_gpr_w1(r2)));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "algfr";
+}
+
+static const HChar *
+s390_irgen_ALRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "alrk";
+}
+
+static const HChar *
+s390_irgen_ALGRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Add64, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op2, op3);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "algrk";
+}
+
+static const HChar *
+s390_irgen_AL(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "al";
+}
+
+static const HChar *
+s390_irgen_ALY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "aly";
+}
+
+static const HChar *
+s390_irgen_ALG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "alg";
+}
+
+static const HChar *
+s390_irgen_ALGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "algf";
+}
+
+static const HChar *
+s390_irgen_ALFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, mktemp(Ity_I32,
+                       mkU32(op2)));
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "alfi";
+}
+
+static const HChar *
+s390_irgen_ALGFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   ULong op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (ULong)i2;
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkU64(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, mktemp(Ity_I64,
+                       mkU64(op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "algfi";
+}
+
+static const HChar *
+s390_irgen_ALHHHR(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r2));
+   assign(op3, get_gpr_w0(r3));
+   assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "alhhhr";
+}
+
+static const HChar *
+s390_irgen_ALHHLR(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "alhhlr";
+}
+
+static const HChar *
+s390_irgen_ALCR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp carry_in = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(carry_in, binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)));
+   assign(result, binop(Iop_Add32, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)),
+          mkexpr(carry_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_32, op1, op2, carry_in);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "alcr";
+}
+
+static const HChar *
+s390_irgen_ALCGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp carry_in = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(carry_in, unop(Iop_32Uto64, binop(Iop_Shr32, s390_call_calculate_cc(),
+          mkU8(1))));
+   assign(result, binop(Iop_Add64, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)),
+          mkexpr(carry_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_64, op1, op2, carry_in);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "alcgr";
+}
+
+static const HChar *
+s390_irgen_ALC(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp carry_in = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(carry_in, binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)));
+   assign(result, binop(Iop_Add32, binop(Iop_Add32, mkexpr(op1), mkexpr(op2)),
+          mkexpr(carry_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_32, op1, op2, carry_in);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "alc";
+}
+
+static const HChar *
+s390_irgen_ALCG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp carry_in = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(carry_in, unop(Iop_32Uto64, binop(Iop_Shr32, s390_call_calculate_cc(),
+          mkU8(1))));
+   assign(result, binop(Iop_Add64, binop(Iop_Add64, mkexpr(op1), mkexpr(op2)),
+          mkexpr(carry_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_ADDC_64, op1, op2, carry_in);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "alcg";
+}
+
+static const HChar *
+s390_irgen_ALSI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, load(Ity_I32, mkexpr(op1addr)));
+   op2 = (UInt)(Int)(Char)i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, mktemp(Ity_I32,
+                       mkU32(op2)));
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "alsi";
+}
+
+static const HChar *
+s390_irgen_ALGSI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   ULong op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, load(Ity_I64, mkexpr(op1addr)));
+   op2 = (ULong)(Long)(Char)i2;
+   assign(result, binop(Iop_Add64, mkexpr(op1), mkU64(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op1, mktemp(Ity_I64,
+                       mkU64(op2)));
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "algsi";
+}
+
+static const HChar *
+s390_irgen_ALHSIK(UChar r1, UChar r3, UShort i2)
+{
+   UInt op2;
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   op2 = (UInt)(Int)(Short)i2;
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Add32, mkU32(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, mktemp(Ity_I32, mkU32(op2)),
+                       op3);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "alhsik";
+}
+
+static const HChar *
+s390_irgen_ALGHSIK(UChar r1, UChar r3, UShort i2)
+{
+   ULong op2;
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   op2 = (ULong)(Long)(Short)i2;
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Add64, mkU64(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, mktemp(Ity_I64, mkU64(op2)),
+                       op3);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "alghsik";
+}
+
+static const HChar *
+s390_irgen_ALSIH(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op1, mktemp(Ity_I32,
+                       mkU32(op2)));
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "alsih";
+}
+
+static const HChar *
+s390_irgen_ALSIHN(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Add32, mkexpr(op1), mkU32(op2)));
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "alsihn";
+}
+
+static const HChar *
+s390_irgen_NR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_And32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "nr";
+}
+
+static const HChar *
+s390_irgen_NGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_And64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "ngr";
+}
+
+static const HChar *
+s390_irgen_NRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_And32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "nrk";
+}
+
+static const HChar *
+s390_irgen_NGRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_And64, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "ngrk";
+}
+
+static const HChar *
+s390_irgen_N(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_And32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "n";
+}
+
+static const HChar *
+s390_irgen_NY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_And32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ny";
+}
+
+static const HChar *
+s390_irgen_NG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_And64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "ng";
+}
+
+static const HChar *
+s390_irgen_NI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+   IRTemp result = newTemp(Ity_I8);
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   assign(result, binop(Iop_And8, mkexpr(op1), mkU8(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "ni";
+}
+
+static const HChar *
+s390_irgen_NIY(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+   IRTemp result = newTemp(Ity_I8);
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   assign(result, binop(Iop_And8, mkexpr(op1), mkU8(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "niy";
+}
+
+static const HChar *
+s390_irgen_NIHF(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = i2;
+   assign(result, binop(Iop_And32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "nihf";
+}
+
+static const HChar *
+s390_irgen_NIHH(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw0(r1));
+   op2 = i2;
+   assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw0(r1, mkexpr(result));
+
+   return "nihh";
+}
+
+static const HChar *
+s390_irgen_NIHL(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw1(r1));
+   op2 = i2;
+   assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw1(r1, mkexpr(result));
+
+   return "nihl";
+}
+
+static const HChar *
+s390_irgen_NILF(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = i2;
+   assign(result, binop(Iop_And32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "nilf";
+}
+
+static const HChar *
+s390_irgen_NILH(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw2(r1));
+   op2 = i2;
+   assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw2(r1, mkexpr(result));
+
+   return "nilh";
+}
+
+static const HChar *
+s390_irgen_NILL(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw3(r1));
+   op2 = i2;
+   assign(result, binop(Iop_And16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw3(r1, mkexpr(result));
+
+   return "nill";
+}
+
+static const HChar *
+s390_irgen_BASR(UChar r1, UChar r2)
+{
+   IRTemp target = newTemp(Ity_I64);
+
+   if (r2 == 0) {
+      put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 2ULL));
+   } else {
+      if (r1 != r2) {
+         put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 2ULL));
+         call_function(get_gpr_dw0(r2));
+      } else {
+         assign(target, get_gpr_dw0(r2));
+         put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 2ULL));
+         call_function(mkexpr(target));
+      }
+   }
+
+   return "basr";
+}
+
+static const HChar *
+s390_irgen_BAS(UChar r1, IRTemp op2addr)
+{
+   IRTemp target = newTemp(Ity_I64);
+
+   put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 4ULL));
+   assign(target, mkexpr(op2addr));
+   call_function(mkexpr(target));
+
+   return "bas";
+}
+
+static const HChar *
+s390_irgen_BCR(UChar r1, UChar r2)
+{
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (r2 == 0 && (r1 >= 14)) {    /* serialization */
+      stmt(IRStmt_MBE(Imbe_Fence));
+   }
+
+   if ((r2 == 0) || (r1 == 0)) {
+   } else {
+      if (r1 == 15) {
+         return_from_function(get_gpr_dw0(r2));
+      } else {
+         assign(cond, s390_call_calculate_cond(r1));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    get_gpr_dw0(r2));
+      }
+   }
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(XMNM, GPR), S390_XMNM_BCR, r1, r2);
+
+   return "bcr";
+}
+
+static const HChar *
+s390_irgen_BC(UChar r1, UChar x2, UChar b2, UShort d2, IRTemp op2addr)
+{
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (r1 == 0) {
+   } else {
+      if (r1 == 15) {
+         always_goto(mkexpr(op2addr));
+      } else {
+         assign(cond, s390_call_calculate_cond(r1));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    mkexpr(op2addr));
+      }
+   }
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(XMNM, UDXB), S390_XMNM_BC, r1, d2, x2, b2);
+
+   return "bc";
+}
+
+static const HChar *
+s390_irgen_BCTR(UChar r1, UChar r2)
+{
+   put_gpr_w1(r1, binop(Iop_Sub32, get_gpr_w1(r1), mkU32(1)));
+   if (r2 != 0) {
+      if_condition_goto_computed(binop(Iop_CmpNE32, get_gpr_w1(r1), mkU32(0)),
+                                 get_gpr_dw0(r2));
+   }
+
+   return "bctr";
+}
+
+static const HChar *
+s390_irgen_BCTGR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, binop(Iop_Sub64, get_gpr_dw0(r1), mkU64(1)));
+   if (r2 != 0) {
+      if_condition_goto_computed(binop(Iop_CmpNE64, get_gpr_dw0(r1), mkU64(0)),
+                                 get_gpr_dw0(r2));
+   }
+
+   return "bctgr";
+}
+
+static const HChar *
+s390_irgen_BCT(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, binop(Iop_Sub32, get_gpr_w1(r1), mkU32(1)));
+   if_condition_goto_computed(binop(Iop_CmpNE32, get_gpr_w1(r1), mkU32(0)),
+                              mkexpr(op2addr));
+
+   return "bct";
+}
+
+static const HChar *
+s390_irgen_BCTG(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, binop(Iop_Sub64, get_gpr_dw0(r1), mkU64(1)));
+   if_condition_goto_computed(binop(Iop_CmpNE64, get_gpr_dw0(r1), mkU64(0)),
+                              mkexpr(op2addr));
+
+   return "bctg";
+}
+
+static const HChar *
+s390_irgen_BXH(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp value = newTemp(Ity_I32);
+
+   assign(value, get_gpr_w1(r3 | 1));
+   put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+   if_condition_goto_computed(binop(Iop_CmpLT32S, mkexpr(value),
+                                    get_gpr_w1(r1)), mkexpr(op2addr));
+
+   return "bxh";
+}
+
+static const HChar *
+s390_irgen_BXHG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp value = newTemp(Ity_I64);
+
+   assign(value, get_gpr_dw0(r3 | 1));
+   put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+   if_condition_goto_computed(binop(Iop_CmpLT64S, mkexpr(value),
+                                    get_gpr_dw0(r1)), mkexpr(op2addr));
+
+   return "bxhg";
+}
+
+static const HChar *
+s390_irgen_BXLE(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp value = newTemp(Ity_I32);
+
+   assign(value, get_gpr_w1(r3 | 1));
+   put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+   if_condition_goto_computed(binop(Iop_CmpLE32S, get_gpr_w1(r1),
+                                    mkexpr(value)), mkexpr(op2addr));
+
+   return "bxle";
+}
+
+static const HChar *
+s390_irgen_BXLEG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp value = newTemp(Ity_I64);
+
+   assign(value, get_gpr_dw0(r3 | 1));
+   put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+   if_condition_goto_computed(binop(Iop_CmpLE64S, get_gpr_dw0(r1),
+                                    mkexpr(value)), mkexpr(op2addr));
+
+   return "bxleg";
+}
+
+static const HChar *
+s390_irgen_BRAS(UChar r1, UShort i2)
+{
+   put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 4ULL));
+   call_function_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+   return "bras";
+}
+
+static const HChar *
+s390_irgen_BRASL(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + 6ULL));
+   call_function_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1));
+
+   return "brasl";
+}
+
+static const HChar *
+s390_irgen_BRC(UChar r1, UShort i2)
+{
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (r1 == 0) {
+   } else {
+      if (r1 == 15) {
+         always_goto_and_chase(
+               guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+      } else {
+         assign(cond, s390_call_calculate_cond(r1));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+      }
+   }
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(XMNM, PCREL), S390_XMNM_BRC, r1, (Int)(Short)i2);
+
+   return "brc";
+}
+
+static const HChar *
+s390_irgen_BRCL(UChar r1, UInt i2)
+{
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (r1 == 0) {
+   } else {
+      if (r1 == 15) {
+         always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1));
+      } else {
+         assign(cond, s390_call_calculate_cond(r1));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1));
+      }
+   }
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC2(XMNM, PCREL), S390_XMNM_BRCL, r1, i2);
+
+   return "brcl";
+}
+
+static const HChar *
+s390_irgen_BRCT(UChar r1, UShort i2)
+{
+   put_gpr_w1(r1, binop(Iop_Sub32, get_gpr_w1(r1), mkU32(1)));
+   if_condition_goto(binop(Iop_CmpNE32, get_gpr_w1(r1), mkU32(0)),
+                     guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+   return "brct";
+}
+
+static const HChar *
+s390_irgen_BRCTG(UChar r1, UShort i2)
+{
+   put_gpr_dw0(r1, binop(Iop_Sub64, get_gpr_dw0(r1), mkU64(1)));
+   if_condition_goto(binop(Iop_CmpNE64, get_gpr_dw0(r1), mkU64(0)),
+                     guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+   return "brctg";
+}
+
+static const HChar *
+s390_irgen_BRXH(UChar r1, UChar r3, UShort i2)
+{
+   IRTemp value = newTemp(Ity_I32);
+
+   assign(value, get_gpr_w1(r3 | 1));
+   put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+   if_condition_goto(binop(Iop_CmpLT32S, mkexpr(value), get_gpr_w1(r1)),
+                     guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+   return "brxh";
+}
+
+static const HChar *
+s390_irgen_BRXHG(UChar r1, UChar r3, UShort i2)
+{
+   IRTemp value = newTemp(Ity_I64);
+
+   assign(value, get_gpr_dw0(r3 | 1));
+   put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+   if_condition_goto(binop(Iop_CmpLT64S, mkexpr(value), get_gpr_dw0(r1)),
+                     guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+   return "brxhg";
+}
+
+static const HChar *
+s390_irgen_BRXLE(UChar r1, UChar r3, UShort i2)
+{
+   IRTemp value = newTemp(Ity_I32);
+
+   assign(value, get_gpr_w1(r3 | 1));
+   put_gpr_w1(r1, binop(Iop_Add32, get_gpr_w1(r1), get_gpr_w1(r3)));
+   if_condition_goto(binop(Iop_CmpLE32S, get_gpr_w1(r1), mkexpr(value)),
+                     guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+   return "brxle";
+}
+
+static const HChar *
+s390_irgen_BRXLG(UChar r1, UChar r3, UShort i2)
+{
+   IRTemp value = newTemp(Ity_I64);
+
+   assign(value, get_gpr_dw0(r3 | 1));
+   put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), get_gpr_dw0(r3)));
+   if_condition_goto(binop(Iop_CmpLE64S, get_gpr_dw0(r1), mkexpr(value)),
+                     guest_IA_curr_instr + ((ULong)(Long)(Short)i2 << 1));
+
+   return "brxlg";
+}
+
+static const HChar *
+s390_irgen_CR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cr";
+}
+
+static const HChar *
+s390_irgen_CGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cgr";
+}
+
+static const HChar *
+s390_irgen_CGFR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cgfr";
+}
+
+static const HChar *
+s390_irgen_C(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "c";
+}
+
+static const HChar *
+s390_irgen_CY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cy";
+}
+
+static const HChar *
+s390_irgen_CG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cg";
+}
+
+static const HChar *
+s390_irgen_CGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cgf";
+}
+
+static const HChar *
+s390_irgen_CFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = (Int)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+
+   return "cfi";
+}
+
+static const HChar *
+s390_irgen_CGFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (Long)(Int)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64,
+                       mkU64((ULong)op2)));
+
+   return "cgfi";
+}
+
+static const HChar *
+s390_irgen_CRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
+          i2 << 1))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "crl";
+}
+
+static const HChar *
+s390_irgen_CGRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
+          i2 << 1))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cgrl";
+}
+
+static const HChar *
+s390_irgen_CGFRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
+          ((ULong)(Long)(Int)i2 << 1)))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cgfrl";
+}
+
+static const HChar *
+s390_irgen_CRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         assign(op2, get_gpr_w1(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond),
+                                          mkU32(0)), mkexpr(op4addr));
+      }
+   }
+
+   return "crb";
+}
+
+static const HChar *
+s390_irgen_CGRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         assign(op2, get_gpr_dw0(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond),
+                                          mkU32(0)), mkexpr(op4addr));
+      }
+   }
+
+   return "cgrb";
+}
+
+static const HChar *
+s390_irgen_CRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(
+                guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         assign(op2, get_gpr_w1(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "crj";
+}
+
+static const HChar *
+s390_irgen_CGRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(
+                guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         assign(op2, get_gpr_dw0(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "cgrj";
+}
+
+static const HChar *
+s390_irgen_CIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         op2 = (Int)(Char)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE, op1,
+                                              mktemp(Ity_I32, mkU32((UInt)op2))));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    mkexpr(op4addr));
+      }
+   }
+
+   return "cib";
+}
+
+static const HChar *
+s390_irgen_CGIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         op2 = (Long)(Char)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE, op1,
+                                              mktemp(Ity_I64, mkU64((ULong)op2))));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    mkexpr(op4addr));
+      }
+   }
+
+   return "cgib";
+}
+
+static const HChar *
+s390_irgen_CIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         op2 = (Int)(Char)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE, op1,
+                                              mktemp(Ity_I32, mkU32((UInt)op2))));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "cij";
+}
+
+static const HChar *
+s390_irgen_CGIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         op2 = (Long)(Char)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_SIGNED_COMPARE, op1,
+                                              mktemp(Ity_I64, mkU64((ULong)op2))));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "cgij";
+}
+
+static const HChar *
+s390_irgen_CH(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "ch";
+}
+
+static const HChar *
+s390_irgen_CHY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "chy";
+}
+
+static const HChar *
+s390_irgen_CGH(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_16Sto64, load(Ity_I16, mkexpr(op2addr))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cgh";
+}
+
+static const HChar *
+s390_irgen_CHI(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = (Int)(Short)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+
+   return "chi";
+}
+
+static const HChar *
+s390_irgen_CGHI(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (Long)(Short)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64,
+                       mkU64((ULong)op2)));
+
+   return "cghi";
+}
+
+static const HChar *
+s390_irgen_CHHSI(UShort i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   Short op2;
+
+   assign(op1, load(Ity_I16, mkexpr(op1addr)));
+   op2 = (Short)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I16,
+                       mkU16((UShort)op2)));
+
+   return "chhsi";
+}
+
+static const HChar *
+s390_irgen_CHSI(UShort i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+
+   assign(op1, load(Ity_I32, mkexpr(op1addr)));
+   op2 = (Int)(Short)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+
+   return "chsi";
+}
+
+static const HChar *
+s390_irgen_CGHSI(UShort i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Long op2;
+
+   assign(op1, load(Ity_I64, mkexpr(op1addr)));
+   op2 = (Long)(Short)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I64,
+                       mkU64((ULong)op2)));
+
+   return "cghsi";
+}
+
+static const HChar *
+s390_irgen_CHRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
+          ((ULong)(Long)(Int)i2 << 1)))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "chrl";
+}
+
+static const HChar *
+s390_irgen_CGHRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_16Sto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
+          ((ULong)(Long)(Int)i2 << 1)))));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "cghrl";
+}
+
+static const HChar *
+s390_irgen_CHHR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   assign(op2, get_gpr_w0(r2));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "chhr";
+}
+
+static const HChar *
+s390_irgen_CHLR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   assign(op2, get_gpr_w1(r2));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "chlr";
+}
+
+static const HChar *
+s390_irgen_CHF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, op2);
+
+   return "chf";
+}
+
+static const HChar *
+s390_irgen_CIH(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = (Int)i2;
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_COMPARE, op1, mktemp(Ity_I32,
+                       mkU32((UInt)op2)));
+
+   return "cih";
+}
+
+static const HChar *
+s390_irgen_CLR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clr";
+}
+
+static const HChar *
+s390_irgen_CLGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clgr";
+}
+
+static const HChar *
+s390_irgen_CLGFR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Uto64, get_gpr_w1(r2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clgfr";
+}
+
+static const HChar *
+s390_irgen_CL(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "cl";
+}
+
+static const HChar *
+s390_irgen_CLY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "cly";
+}
+
+static const HChar *
+s390_irgen_CLG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clg";
+}
+
+static const HChar *
+s390_irgen_CLGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clgf";
+}
+
+static const HChar *
+s390_irgen_CLFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32,
+                       mkU32(op2)));
+
+   return "clfi";
+}
+
+static const HChar *
+s390_irgen_CLGFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   ULong op2;
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (ULong)i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I64,
+                       mkU64(op2)));
+
+   return "clgfi";
+}
+
+static const HChar *
+s390_irgen_CLI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I8,
+                       mkU8(op2)));
+
+   return "cli";
+}
+
+static const HChar *
+s390_irgen_CLIY(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I8,
+                       mkU8(op2)));
+
+   return "cliy";
+}
+
+static const HChar *
+s390_irgen_CLFHSI(UShort i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+
+   assign(op1, load(Ity_I32, mkexpr(op1addr)));
+   op2 = (UInt)i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32,
+                       mkU32(op2)));
+
+   return "clfhsi";
+}
+
+static const HChar *
+s390_irgen_CLGHSI(UShort i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   ULong op2;
+
+   assign(op1, load(Ity_I64, mkexpr(op1addr)));
+   op2 = (ULong)i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I64,
+                       mkU64(op2)));
+
+   return "clghsi";
+}
+
+static const HChar *
+s390_irgen_CLHHSI(UShort i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+
+   assign(op1, load(Ity_I16, mkexpr(op1addr)));
+   op2 = i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I16,
+                       mkU16(op2)));
+
+   return "clhhsi";
+}
+
+static const HChar *
+s390_irgen_CLRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
+          i2 << 1))));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clrl";
+}
+
+static const HChar *
+s390_irgen_CLGRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
+          i2 << 1))));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clgrl";
+}
+
+static const HChar *
+s390_irgen_CLGFRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
+          ((ULong)(Long)(Int)i2 << 1)))));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clgfrl";
+}
+
+static const HChar *
+s390_irgen_CLHRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Uto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
+          ((ULong)(Long)(Int)i2 << 1)))));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clhrl";
+}
+
+static const HChar *
+s390_irgen_CLGHRL(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_16Uto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
+          ((ULong)(Long)(Int)i2 << 1)))));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clghrl";
+}
+
+static const HChar *
+s390_irgen_CLRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         assign(op2, get_gpr_w1(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    mkexpr(op4addr));
+      }
+   }
+
+   return "clrb";
+}
+
+static const HChar *
+s390_irgen_CLGRB(UChar r1, UChar r2, UChar m3, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         assign(op2, get_gpr_dw0(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    mkexpr(op4addr));
+      }
+   }
+
+   return "clgrb";
+}
+
+static const HChar *
+s390_irgen_CLRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         assign(op2, get_gpr_w1(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "clrj";
+}
+
+static const HChar *
+s390_irgen_CLGRJ(UChar r1, UChar r2, UShort i4, UChar m3)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         assign(op2, get_gpr_dw0(r2));
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE,
+                                              op1, op2));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "clgrj";
+}
+
+static const HChar *
+s390_irgen_CLIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         op2 = (UInt)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE, op1,
+                                              mktemp(Ity_I32, mkU32(op2))));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    mkexpr(op4addr));
+      }
+   }
+
+   return "clib";
+}
+
+static const HChar *
+s390_irgen_CLGIB(UChar r1, UChar m3, UChar i2, IRTemp op4addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   ULong op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto(mkexpr(op4addr));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         op2 = (ULong)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE, op1,
+                                              mktemp(Ity_I64, mkU64(op2))));
+         if_condition_goto_computed(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                                    mkexpr(op4addr));
+      }
+   }
+
+   return "clgib";
+}
+
+static const HChar *
+s390_irgen_CLIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_w1(r1));
+         op2 = (UInt)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE, op1,
+                                              mktemp(Ity_I32, mkU32(op2))));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "clij";
+}
+
+static const HChar *
+s390_irgen_CLGIJ(UChar r1, UChar m3, UShort i4, UChar i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   ULong op2;
+   IRTemp cond = newTemp(Ity_I32);
+
+   if (m3 == 0) {
+   } else {
+      if (m3 == 14) {
+         always_goto_and_chase(guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+      } else {
+         assign(op1, get_gpr_dw0(r1));
+         op2 = (ULong)i2;
+         assign(cond, s390_call_calculate_icc(m3, S390_CC_OP_UNSIGNED_COMPARE, op1,
+                                              mktemp(Ity_I64, mkU64(op2))));
+         if_condition_goto(binop(Iop_CmpNE32, mkexpr(cond), mkU32(0)),
+                           guest_IA_curr_instr + ((ULong)(Long)(Short)i4 << 1));
+
+      }
+   }
+
+   return "clgij";
+}
+
+static const HChar *
+s390_irgen_CLM(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp b0 = newTemp(Ity_I32);
+   IRTemp b1 = newTemp(Ity_I32);
+   IRTemp b2 = newTemp(Ity_I32);
+   IRTemp b3 = newTemp(Ity_I32);
+   IRTemp c0 = newTemp(Ity_I32);
+   IRTemp c1 = newTemp(Ity_I32);
+   IRTemp c2 = newTemp(Ity_I32);
+   IRTemp c3 = newTemp(Ity_I32);
+   UChar n;
+
+   n = 0;
+   if ((r3 & 8) != 0) {
+      assign(b0, unop(Iop_8Uto32, get_gpr_b4(r1)));
+      assign(c0, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+      n = n + 1;
+   } else {
+      assign(b0, mkU32(0));
+      assign(c0, mkU32(0));
+   }
+   if ((r3 & 4) != 0) {
+      assign(b1, unop(Iop_8Uto32, get_gpr_b5(r1)));
+      assign(c1, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b1, mkU32(0));
+      assign(c1, mkU32(0));
+   }
+   if ((r3 & 2) != 0) {
+      assign(b2, unop(Iop_8Uto32, get_gpr_b6(r1)));
+      assign(c2, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b2, mkU32(0));
+      assign(c2, mkU32(0));
+   }
+   if ((r3 & 1) != 0) {
+      assign(b3, unop(Iop_8Uto32, get_gpr_b7(r1)));
+      assign(c3, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b3, mkU32(0));
+      assign(c3, mkU32(0));
+   }
+   assign(op1, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32,
+          mkexpr(b0), mkU8(24)), binop(Iop_Shl32, mkexpr(b1), mkU8(16))),
+          binop(Iop_Shl32, mkexpr(b2), mkU8(8))), mkexpr(b3)));
+   assign(op2, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32,
+          mkexpr(c0), mkU8(24)), binop(Iop_Shl32, mkexpr(c1), mkU8(16))),
+          binop(Iop_Shl32, mkexpr(c2), mkU8(8))), mkexpr(c3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clm";
+}
+
+static const HChar *
+s390_irgen_CLMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp b0 = newTemp(Ity_I32);
+   IRTemp b1 = newTemp(Ity_I32);
+   IRTemp b2 = newTemp(Ity_I32);
+   IRTemp b3 = newTemp(Ity_I32);
+   IRTemp c0 = newTemp(Ity_I32);
+   IRTemp c1 = newTemp(Ity_I32);
+   IRTemp c2 = newTemp(Ity_I32);
+   IRTemp c3 = newTemp(Ity_I32);
+   UChar n;
+
+   n = 0;
+   if ((r3 & 8) != 0) {
+      assign(b0, unop(Iop_8Uto32, get_gpr_b4(r1)));
+      assign(c0, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+      n = n + 1;
+   } else {
+      assign(b0, mkU32(0));
+      assign(c0, mkU32(0));
+   }
+   if ((r3 & 4) != 0) {
+      assign(b1, unop(Iop_8Uto32, get_gpr_b5(r1)));
+      assign(c1, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b1, mkU32(0));
+      assign(c1, mkU32(0));
+   }
+   if ((r3 & 2) != 0) {
+      assign(b2, unop(Iop_8Uto32, get_gpr_b6(r1)));
+      assign(c2, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b2, mkU32(0));
+      assign(c2, mkU32(0));
+   }
+   if ((r3 & 1) != 0) {
+      assign(b3, unop(Iop_8Uto32, get_gpr_b7(r1)));
+      assign(c3, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b3, mkU32(0));
+      assign(c3, mkU32(0));
+   }
+   assign(op1, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32,
+          mkexpr(b0), mkU8(24)), binop(Iop_Shl32, mkexpr(b1), mkU8(16))),
+          binop(Iop_Shl32, mkexpr(b2), mkU8(8))), mkexpr(b3)));
+   assign(op2, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32,
+          mkexpr(c0), mkU8(24)), binop(Iop_Shl32, mkexpr(c1), mkU8(16))),
+          binop(Iop_Shl32, mkexpr(c2), mkU8(8))), mkexpr(c3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clmy";
+}
+
+static const HChar *
+s390_irgen_CLMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp b0 = newTemp(Ity_I32);
+   IRTemp b1 = newTemp(Ity_I32);
+   IRTemp b2 = newTemp(Ity_I32);
+   IRTemp b3 = newTemp(Ity_I32);
+   IRTemp c0 = newTemp(Ity_I32);
+   IRTemp c1 = newTemp(Ity_I32);
+   IRTemp c2 = newTemp(Ity_I32);
+   IRTemp c3 = newTemp(Ity_I32);
+   UChar n;
+
+   n = 0;
+   if ((r3 & 8) != 0) {
+      assign(b0, unop(Iop_8Uto32, get_gpr_b0(r1)));
+      assign(c0, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+      n = n + 1;
+   } else {
+      assign(b0, mkU32(0));
+      assign(c0, mkU32(0));
+   }
+   if ((r3 & 4) != 0) {
+      assign(b1, unop(Iop_8Uto32, get_gpr_b1(r1)));
+      assign(c1, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b1, mkU32(0));
+      assign(c1, mkU32(0));
+   }
+   if ((r3 & 2) != 0) {
+      assign(b2, unop(Iop_8Uto32, get_gpr_b2(r1)));
+      assign(c2, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b2, mkU32(0));
+      assign(c2, mkU32(0));
+   }
+   if ((r3 & 1) != 0) {
+      assign(b3, unop(Iop_8Uto32, get_gpr_b3(r1)));
+      assign(c3, unop(Iop_8Uto32, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr),
+             mkU64(n)))));
+      n = n + 1;
+   } else {
+      assign(b3, mkU32(0));
+      assign(c3, mkU32(0));
+   }
+   assign(op1, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32,
+          mkexpr(b0), mkU8(24)), binop(Iop_Shl32, mkexpr(b1), mkU8(16))),
+          binop(Iop_Shl32, mkexpr(b2), mkU8(8))), mkexpr(b3)));
+   assign(op2, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Or32, binop(Iop_Shl32,
+          mkexpr(c0), mkU8(24)), binop(Iop_Shl32, mkexpr(c1), mkU8(16))),
+          binop(Iop_Shl32, mkexpr(c2), mkU8(8))), mkexpr(c3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clmh";
+}
+
+static const HChar *
+s390_irgen_CLHHR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   assign(op2, get_gpr_w0(r2));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clhhr";
+}
+
+static const HChar *
+s390_irgen_CLHLR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   assign(op2, get_gpr_w1(r2));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clhlr";
+}
+
+static const HChar *
+s390_irgen_CLHF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, op2);
+
+   return "clhf";
+}
+
+static const HChar *
+s390_irgen_CLIH(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = i2;
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_COMPARE, op1, mktemp(Ity_I32,
+                       mkU32(op2)));
+
+   return "clih";
+}
+
+static const HChar *
+s390_irgen_CPYA(UChar r1, UChar r2)
+{
+   put_ar_w0(r1, get_ar_w0(r2));
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, AR, AR), "cpya", r1, r2);
+
+   return "cpya";
+}
+
+static const HChar *
+s390_irgen_XR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   if (r1 == r2) {
+      assign(result, mkU32(0));
+   } else {
+      assign(op1, get_gpr_w1(r1));
+      assign(op2, get_gpr_w1(r2));
+      assign(result, binop(Iop_Xor32, mkexpr(op1), mkexpr(op2)));
+   }
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "xr";
+}
+
+static const HChar *
+s390_irgen_XGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   if (r1 == r2) {
+      assign(result, mkU64(0));
+   } else {
+      assign(op1, get_gpr_dw0(r1));
+      assign(op2, get_gpr_dw0(r2));
+      assign(result, binop(Iop_Xor64, mkexpr(op1), mkexpr(op2)));
+   }
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "xgr";
+}
+
+static const HChar *
+s390_irgen_XRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Xor32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "xrk";
+}
+
+static const HChar *
+s390_irgen_XGRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Xor64, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "xgrk";
+}
+
+static const HChar *
+s390_irgen_X(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Xor32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "x";
+}
+
+static const HChar *
+s390_irgen_XY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Xor32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "xy";
+}
+
+static const HChar *
+s390_irgen_XG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_Xor64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "xg";
+}
+
+static const HChar *
+s390_irgen_XI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+   IRTemp result = newTemp(Ity_I8);
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   assign(result, binop(Iop_Xor8, mkexpr(op1), mkU8(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "xi";
+}
+
+static const HChar *
+s390_irgen_XIY(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+   IRTemp result = newTemp(Ity_I8);
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   assign(result, binop(Iop_Xor8, mkexpr(op1), mkU8(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "xiy";
+}
+
+static const HChar *
+s390_irgen_XIHF(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Xor32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "xihf";
+}
+
+static const HChar *
+s390_irgen_XILF(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Xor32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "xilf";
+}
+
+static const HChar *
+s390_irgen_EAR(UChar r1, UChar r2)
+{
+   put_gpr_w1(r1, get_ar_w0(r2));
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, GPR, AR), "ear", r1, r2);
+
+   return "ear";
+}
+
+static const HChar *
+s390_irgen_IC(UChar r1, IRTemp op2addr)
+{
+   put_gpr_b7(r1, load(Ity_I8, mkexpr(op2addr)));
+
+   return "ic";
+}
+
+static const HChar *
+s390_irgen_ICY(UChar r1, IRTemp op2addr)
+{
+   put_gpr_b7(r1, load(Ity_I8, mkexpr(op2addr)));
+
+   return "icy";
+}
+
+static const HChar *
+s390_irgen_ICM(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar n;
+   IRTemp result = newTemp(Ity_I32);
+   UInt mask;
+
+   n = 0;
+   mask = (UInt)r3;
+   if ((mask & 8) != 0) {
+      put_gpr_b4(r1, load(Ity_I8, mkexpr(op2addr)));
+      n = n + 1;
+   }
+   if ((mask & 4) != 0) {
+      put_gpr_b5(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   if ((mask & 2) != 0) {
+      put_gpr_b6(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   if ((mask & 1) != 0) {
+      put_gpr_b7(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   assign(result, get_gpr_w1(r1));
+   s390_cc_thunk_putZZ(S390_CC_OP_INSERT_CHAR_MASK_32, result, mktemp(Ity_I32,
+                       mkU32(mask)));
+
+   return "icm";
+}
+
+static const HChar *
+s390_irgen_ICMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar n;
+   IRTemp result = newTemp(Ity_I32);
+   UInt mask;
+
+   n = 0;
+   mask = (UInt)r3;
+   if ((mask & 8) != 0) {
+      put_gpr_b4(r1, load(Ity_I8, mkexpr(op2addr)));
+      n = n + 1;
+   }
+   if ((mask & 4) != 0) {
+      put_gpr_b5(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   if ((mask & 2) != 0) {
+      put_gpr_b6(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   if ((mask & 1) != 0) {
+      put_gpr_b7(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   assign(result, get_gpr_w1(r1));
+   s390_cc_thunk_putZZ(S390_CC_OP_INSERT_CHAR_MASK_32, result, mktemp(Ity_I32,
+                       mkU32(mask)));
+
+   return "icmy";
+}
+
+static const HChar *
+s390_irgen_ICMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar n;
+   IRTemp result = newTemp(Ity_I32);
+   UInt mask;
+
+   n = 0;
+   mask = (UInt)r3;
+   if ((mask & 8) != 0) {
+      put_gpr_b0(r1, load(Ity_I8, mkexpr(op2addr)));
+      n = n + 1;
+   }
+   if ((mask & 4) != 0) {
+      put_gpr_b1(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   if ((mask & 2) != 0) {
+      put_gpr_b2(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   if ((mask & 1) != 0) {
+      put_gpr_b3(r1, load(Ity_I8, binop(Iop_Add64, mkexpr(op2addr), mkU64(n))));
+
+      n = n + 1;
+   }
+   assign(result, get_gpr_w0(r1));
+   s390_cc_thunk_putZZ(S390_CC_OP_INSERT_CHAR_MASK_32, result, mktemp(Ity_I32,
+                       mkU32(mask)));
+
+   return "icmh";
+}
+
+static const HChar *
+s390_irgen_IIHF(UChar r1, UInt i2)
+{
+   put_gpr_w0(r1, mkU32(i2));
+
+   return "iihf";
+}
+
+static const HChar *
+s390_irgen_IIHH(UChar r1, UShort i2)
+{
+   put_gpr_hw0(r1, mkU16(i2));
+
+   return "iihh";
+}
+
+static const HChar *
+s390_irgen_IIHL(UChar r1, UShort i2)
+{
+   put_gpr_hw1(r1, mkU16(i2));
+
+   return "iihl";
+}
+
+static const HChar *
+s390_irgen_IILF(UChar r1, UInt i2)
+{
+   put_gpr_w1(r1, mkU32(i2));
+
+   return "iilf";
+}
+
+static const HChar *
+s390_irgen_IILH(UChar r1, UShort i2)
+{
+   put_gpr_hw2(r1, mkU16(i2));
+
+   return "iilh";
+}
+
+static const HChar *
+s390_irgen_IILL(UChar r1, UShort i2)
+{
+   put_gpr_hw3(r1, mkU16(i2));
+
+   return "iill";
+}
+
+static const HChar *
+s390_irgen_LR(UChar r1, UChar r2)
+{
+   put_gpr_w1(r1, get_gpr_w1(r2));
+
+   return "lr";
+}
+
+static const HChar *
+s390_irgen_LGR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, get_gpr_dw0(r2));
+
+   return "lgr";
+}
+
+static const HChar *
+s390_irgen_LGFR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_32Sto64, get_gpr_w1(r2)));
+
+   return "lgfr";
+}
+
+static const HChar *
+s390_irgen_L(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, load(Ity_I32, mkexpr(op2addr)));
+
+   return "l";
+}
+
+static const HChar *
+s390_irgen_LY(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, load(Ity_I32, mkexpr(op2addr)));
+
+   return "ly";
+}
+
+static const HChar *
+s390_irgen_LG(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, load(Ity_I64, mkexpr(op2addr)));
+
+   return "lg";
+}
+
+static const HChar *
+s390_irgen_LGF(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+
+   return "lgf";
+}
+
+static const HChar *
+s390_irgen_LGFI(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, mkU64((ULong)(Long)(Int)i2));
+
+   return "lgfi";
+}
+
+static const HChar *
+s390_irgen_LRL(UChar r1, UInt i2)
+{
+   put_gpr_w1(r1, load(Ity_I32, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
+              i2 << 1))));
+
+   return "lrl";
+}
+
+static const HChar *
+s390_irgen_LGRL(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, load(Ity_I64, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)
+               i2 << 1))));
+
+   return "lgrl";
+}
+
+static const HChar *
+s390_irgen_LGFRL(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, unop(Iop_32Sto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
+               ((ULong)(Long)(Int)i2 << 1)))));
+
+   return "lgfrl";
+}
+
+static const HChar *
+s390_irgen_LA(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, mkexpr(op2addr));
+
+   return "la";
+}
+
+static const HChar *
+s390_irgen_LAY(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, mkexpr(op2addr));
+
+   return "lay";
+}
+
+static const HChar *
+s390_irgen_LAE(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, mkexpr(op2addr));
+
+   return "lae";
+}
+
+static const HChar *
+s390_irgen_LAEY(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, mkexpr(op2addr));
+
+   return "laey";
+}
+
+static const HChar *
+s390_irgen_LARL(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)));
+
+   return "larl";
+}
+
+/* The IR representation of LAA and friends is an approximation of what 
+   happens natively. Essentially a loop containing a compare-and-swap is
+   constructed which will iterate until the CAS succeeds. As a consequence,
+   instrumenters may see more memory accesses than happen natively. See also
+   discussion here: https://bugs.kde.org/show_bug.cgi?id=306035 */
+static void
+s390_irgen_load_and_add32(UChar r1, UChar r3, IRTemp op2addr, Bool is_signed)
+{
+   IRCAS *cas;
+   IRTemp old_mem = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Add32, mkexpr(op2), mkexpr(op3)));
+
+   /* Place the addition of second operand and third operand at the
+      second-operand location everytime */
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_BE, mkexpr(op2addr),
+                 NULL, mkexpr(op2), /* expected value */
+                 NULL, mkexpr(result)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC according to 32-bit addition */
+   if (is_signed) {
+      s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_32, op2, op3);
+   } else {
+      s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_32, op2, op3);
+   }
+
+   /* If old_mem contains the expected value, then the CAS succeeded.
+      Otherwise, it did not */
+   yield_if(binop(Iop_CmpNE32, mkexpr(old_mem), mkexpr(op2)));
+   put_gpr_w1(r1, mkexpr(old_mem));
+}
+
+static void
+s390_irgen_load_and_add64(UChar r1, UChar r3, IRTemp op2addr, Bool is_signed)
+{
+   IRCAS *cas;
+   IRTemp old_mem = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Add64, mkexpr(op2), mkexpr(op3)));
+
+   /* Place the addition of second operand and third operand at the
+      second-operand location everytime */
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_BE, mkexpr(op2addr),
+                 NULL, mkexpr(op2), /* expected value */
+                 NULL, mkexpr(result)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC according to 64-bit addition */
+   if (is_signed) {
+      s390_cc_thunk_putSS(S390_CC_OP_SIGNED_ADD_64, op2, op3);
+   } else {
+      s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_ADD_64, op2, op3);
+   }
+
+   /* If old_mem contains the expected value, then the CAS succeeded.
+      Otherwise, it did not */
+   yield_if(binop(Iop_CmpNE64, mkexpr(old_mem), mkexpr(op2)));
+   put_gpr_dw0(r1, mkexpr(old_mem));
+}
+
+static void
+s390_irgen_load_and_bitwise32(UChar r1, UChar r3, IRTemp op2addr, IROp op)
+{
+   IRCAS *cas;
+   IRTemp old_mem = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(op, mkexpr(op2), mkexpr(op3)));
+
+   /* Place the addition of second operand and third operand at the
+      second-operand location everytime */
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_BE, mkexpr(op2addr),
+                 NULL, mkexpr(op2), /* expected value */
+                 NULL, mkexpr(result)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC according to bitwise operation */
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+   /* If old_mem contains the expected value, then the CAS succeeded.
+      Otherwise, it did not */
+   yield_if(binop(Iop_CmpNE32, mkexpr(old_mem), mkexpr(op2)));
+   put_gpr_w1(r1, mkexpr(old_mem));
+}
+
+static void
+s390_irgen_load_and_bitwise64(UChar r1, UChar r3, IRTemp op2addr, IROp op)
+{
+   IRCAS *cas;
+   IRTemp old_mem = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(op, mkexpr(op2), mkexpr(op3)));
+
+   /* Place the addition of second operand and third operand at the
+      second-operand location everytime */
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_BE, mkexpr(op2addr),
+                 NULL, mkexpr(op2), /* expected value */
+                 NULL, mkexpr(result)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC according to bitwise operation */
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+   /* If old_mem contains the expected value, then the CAS succeeded.
+      Otherwise, it did not */
+   yield_if(binop(Iop_CmpNE64, mkexpr(old_mem), mkexpr(op2)));
+   put_gpr_dw0(r1, mkexpr(old_mem));
+}
+
+static const HChar *
+s390_irgen_LAA(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_add32(r1, r3, op2addr, True /* is_signed */);
+
+   return "laa";
+}
+
+static const HChar *
+s390_irgen_LAAG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_add64(r1, r3, op2addr, True /* is_signed */);
+
+   return "laag";
+}
+
+static const HChar *
+s390_irgen_LAAL(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_add32(r1, r3, op2addr, False /* is_signed */);
+
+   return "laal";
+}
+
+static const HChar *
+s390_irgen_LAALG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_add64(r1, r3, op2addr, False /* is_signed */);
+
+   return "laalg";
+}
+
+static const HChar *
+s390_irgen_LAN(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_bitwise32(r1, r3, op2addr, Iop_And32);
+
+   return "lan";
+}
+
+static const HChar *
+s390_irgen_LANG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_bitwise64(r1, r3, op2addr, Iop_And64);
+
+   return "lang";
+}
+
+static const HChar *
+s390_irgen_LAX(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_bitwise32(r1, r3, op2addr, Iop_Xor32);
+
+   return "lax";
+}
+
+static const HChar *
+s390_irgen_LAXG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_bitwise64(r1, r3, op2addr, Iop_Xor64);
+
+   return "laxg";
+}
+
+static const HChar *
+s390_irgen_LAO(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_bitwise32(r1, r3, op2addr, Iop_Or32);
+
+   return "lao";
+}
+
+static const HChar *
+s390_irgen_LAOG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_and_bitwise64(r1, r3, op2addr, Iop_Or64);
+
+   return "laog";
+}
+
+static const HChar *
+s390_irgen_LTR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   put_gpr_w1(r1, mkexpr(op2));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+   return "ltr";
+}
+
+static const HChar *
+s390_irgen_LTGR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   put_gpr_dw0(r1, mkexpr(op2));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+   return "ltgr";
+}
+
+static const HChar *
+s390_irgen_LTGFR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+   put_gpr_dw0(r1, mkexpr(op2));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+   return "ltgfr";
+}
+
+static const HChar *
+s390_irgen_LT(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   put_gpr_w1(r1, mkexpr(op2));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+   return "lt";
+}
+
+static const HChar *
+s390_irgen_LTG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   put_gpr_dw0(r1, mkexpr(op2));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+   return "ltg";
+}
+
+static const HChar *
+s390_irgen_LTGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+   put_gpr_dw0(r1, mkexpr(op2));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, op2);
+
+   return "ltgf";
+}
+
+static const HChar *
+s390_irgen_LBR(UChar r1, UChar r2)
+{
+   put_gpr_w1(r1, unop(Iop_8Sto32, get_gpr_b7(r2)));
+
+   return "lbr";
+}
+
+static const HChar *
+s390_irgen_LGBR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_8Sto64, get_gpr_b7(r2)));
+
+   return "lgbr";
+}
+
+static const HChar *
+s390_irgen_LB(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, unop(Iop_8Sto32, load(Ity_I8, mkexpr(op2addr))));
+
+   return "lb";
+}
+
+static const HChar *
+s390_irgen_LGB(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, unop(Iop_8Sto64, load(Ity_I8, mkexpr(op2addr))));
+
+   return "lgb";
+}
+
+static const HChar *
+s390_irgen_LBH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w0(r1, unop(Iop_8Sto32, load(Ity_I8, mkexpr(op2addr))));
+
+   return "lbh";
+}
+
+static const HChar *
+s390_irgen_LCR(UChar r1, UChar r2)
+{
+   Int op1;
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   op1 = 0;
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_Sub32, mkU32((UInt)op1), mkexpr(op2)));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, mktemp(Ity_I32, mkU32((UInt)
+                       op1)), op2);
+
+   return "lcr";
+}
+
+static const HChar *
+s390_irgen_LCGR(UChar r1, UChar r2)
+{
+   Long op1;
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   op1 = 0ULL;
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_Sub64, mkU64((ULong)op1), mkexpr(op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, mktemp(Ity_I64, mkU64((ULong)
+                       op1)), op2);
+
+   return "lcgr";
+}
+
+static const HChar *
+s390_irgen_LCGFR(UChar r1, UChar r2)
+{
+   Long op1;
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   op1 = 0ULL;
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+   assign(result, binop(Iop_Sub64, mkU64((ULong)op1), mkexpr(op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, mktemp(Ity_I64, mkU64((ULong)
+                       op1)), op2);
+
+   return "lcgfr";
+}
+
+static const HChar *
+s390_irgen_LHR(UChar r1, UChar r2)
+{
+   put_gpr_w1(r1, unop(Iop_16Sto32, get_gpr_hw3(r2)));
+
+   return "lhr";
+}
+
+static const HChar *
+s390_irgen_LGHR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_16Sto64, get_gpr_hw3(r2)));
+
+   return "lghr";
+}
+
+static const HChar *
+s390_irgen_LH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+
+   return "lh";
+}
+
+static const HChar *
+s390_irgen_LHY(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+
+   return "lhy";
+}
+
+static const HChar *
+s390_irgen_LGH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, unop(Iop_16Sto64, load(Ity_I16, mkexpr(op2addr))));
+
+   return "lgh";
+}
+
+static const HChar *
+s390_irgen_LHI(UChar r1, UShort i2)
+{
+   put_gpr_w1(r1, mkU32((UInt)(Int)(Short)i2));
+
+   return "lhi";
+}
+
+static const HChar *
+s390_irgen_LGHI(UChar r1, UShort i2)
+{
+   put_gpr_dw0(r1, mkU64((ULong)(Long)(Short)i2));
+
+   return "lghi";
+}
+
+static const HChar *
+s390_irgen_LHRL(UChar r1, UInt i2)
+{
+   put_gpr_w1(r1, unop(Iop_16Sto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
+              ((ULong)(Long)(Int)i2 << 1)))));
+
+   return "lhrl";
+}
+
+static const HChar *
+s390_irgen_LGHRL(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, unop(Iop_16Sto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
+               ((ULong)(Long)(Int)i2 << 1)))));
+
+   return "lghrl";
+}
+
+static const HChar *
+s390_irgen_LHH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w0(r1, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+
+   return "lhh";
+}
+
+static const HChar *
+s390_irgen_LFH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w0(r1, load(Ity_I32, mkexpr(op2addr)));
+
+   return "lfh";
+}
+
+static const HChar *
+s390_irgen_LLGFR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_32Uto64, get_gpr_w1(r2)));
+
+   return "llgfr";
+}
+
+static const HChar *
+s390_irgen_LLGF(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+
+   return "llgf";
+}
+
+static const HChar *
+s390_irgen_LLGFRL(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, unop(Iop_32Uto64, load(Ity_I32, mkU64(guest_IA_curr_instr +
+               ((ULong)(Long)(Int)i2 << 1)))));
+
+   return "llgfrl";
+}
+
+static const HChar *
+s390_irgen_LLCR(UChar r1, UChar r2)
+{
+   put_gpr_w1(r1, unop(Iop_8Uto32, get_gpr_b7(r2)));
+
+   return "llcr";
+}
+
+static const HChar *
+s390_irgen_LLGCR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_8Uto64, get_gpr_b7(r2)));
+
+   return "llgcr";
+}
+
+static const HChar *
+s390_irgen_LLC(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+
+   return "llc";
+}
+
+static const HChar *
+s390_irgen_LLGC(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, unop(Iop_8Uto64, load(Ity_I8, mkexpr(op2addr))));
+
+   return "llgc";
+}
+
+static const HChar *
+s390_irgen_LLCH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w0(r1, unop(Iop_8Uto32, load(Ity_I8, mkexpr(op2addr))));
+
+   return "llch";
+}
+
+static const HChar *
+s390_irgen_LLHR(UChar r1, UChar r2)
+{
+   put_gpr_w1(r1, unop(Iop_16Uto32, get_gpr_hw3(r2)));
+
+   return "llhr";
+}
+
+static const HChar *
+s390_irgen_LLGHR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_16Uto64, get_gpr_hw3(r2)));
+
+   return "llghr";
+}
+
+static const HChar *
+s390_irgen_LLH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, unop(Iop_16Uto32, load(Ity_I16, mkexpr(op2addr))));
+
+   return "llh";
+}
+
+static const HChar *
+s390_irgen_LLGH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, unop(Iop_16Uto64, load(Ity_I16, mkexpr(op2addr))));
+
+   return "llgh";
+}
+
+static const HChar *
+s390_irgen_LLHRL(UChar r1, UInt i2)
+{
+   put_gpr_w1(r1, unop(Iop_16Uto32, load(Ity_I16, mkU64(guest_IA_curr_instr +
+              ((ULong)(Long)(Int)i2 << 1)))));
+
+   return "llhrl";
+}
+
+static const HChar *
+s390_irgen_LLGHRL(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, unop(Iop_16Uto64, load(Ity_I16, mkU64(guest_IA_curr_instr +
+               ((ULong)(Long)(Int)i2 << 1)))));
+
+   return "llghrl";
+}
+
+static const HChar *
+s390_irgen_LLHH(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w0(r1, unop(Iop_16Uto32, load(Ity_I16, mkexpr(op2addr))));
+
+   return "llhh";
+}
+
+static const HChar *
+s390_irgen_LLIHF(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, mkU64(((ULong)i2) << 32));
+
+   return "llihf";
+}
+
+static const HChar *
+s390_irgen_LLIHH(UChar r1, UShort i2)
+{
+   put_gpr_dw0(r1, mkU64(((ULong)i2) << 48));
+
+   return "llihh";
+}
+
+static const HChar *
+s390_irgen_LLIHL(UChar r1, UShort i2)
+{
+   put_gpr_dw0(r1, mkU64(((ULong)i2) << 32));
+
+   return "llihl";
+}
+
+static const HChar *
+s390_irgen_LLILF(UChar r1, UInt i2)
+{
+   put_gpr_dw0(r1, mkU64(i2));
+
+   return "llilf";
+}
+
+static const HChar *
+s390_irgen_LLILH(UChar r1, UShort i2)
+{
+   put_gpr_dw0(r1, mkU64(((ULong)i2) << 16));
+
+   return "llilh";
+}
+
+static const HChar *
+s390_irgen_LLILL(UChar r1, UShort i2)
+{
+   put_gpr_dw0(r1, mkU64(i2));
+
+   return "llill";
+}
+
+static const HChar *
+s390_irgen_LLGTR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_32Uto64, binop(Iop_And32, get_gpr_w1(r2),
+               mkU32(2147483647))));
+
+   return "llgtr";
+}
+
+static const HChar *
+s390_irgen_LLGT(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, unop(Iop_32Uto64, binop(Iop_And32, load(Ity_I32,
+               mkexpr(op2addr)), mkU32(2147483647))));
+
+   return "llgt";
+}
+
+static const HChar *
+s390_irgen_LNR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(result, mkite(binop(Iop_CmpLE32S, mkexpr(op2), mkU32(0)), mkexpr(op2),
+          binop(Iop_Sub32, mkU32(0), mkexpr(op2))));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_BITWISE, result);
+
+   return "lnr";
+}
+
+static const HChar *
+s390_irgen_LNGR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, mkite(binop(Iop_CmpLE64S, mkexpr(op2), mkU64(0)), mkexpr(op2),
+          binop(Iop_Sub64, mkU64(0), mkexpr(op2))));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_BITWISE, result);
+
+   return "lngr";
+}
+
+static const HChar *
+s390_irgen_LNGFR(UChar r1, UChar r2 __attribute__((unused)))
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r1)));
+   assign(result, mkite(binop(Iop_CmpLE64S, mkexpr(op2), mkU64(0)), mkexpr(op2),
+          binop(Iop_Sub64, mkU64(0), mkexpr(op2))));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_BITWISE, result);
+
+   return "lngfr";
+}
+
+static const HChar *
+s390_irgen_LOCR(UChar m3, UChar r1, UChar r2)
+{
+   next_insn_if(binop(Iop_CmpEQ32, s390_call_calculate_cond(m3), mkU32(0)));
+   put_gpr_w1(r1, get_gpr_w1(r2));
+
+   return "locr";
+}
+
+static const HChar *
+s390_irgen_LOCGR(UChar m3, UChar r1, UChar r2)
+{
+   next_insn_if(binop(Iop_CmpEQ32, s390_call_calculate_cond(m3), mkU32(0)));
+   put_gpr_dw0(r1, get_gpr_dw0(r2));
+
+   return "locgr";
+}
+
+static const HChar *
+s390_irgen_LOC(UChar r1, IRTemp op2addr)
+{
+   /* condition is checked in format handler */
+   put_gpr_w1(r1, load(Ity_I32, mkexpr(op2addr)));
+
+   return "loc";
+}
+
+static const HChar *
+s390_irgen_LOCG(UChar r1, IRTemp op2addr)
+{
+   /* condition is checked in format handler */
+   put_gpr_dw0(r1, load(Ity_I64, mkexpr(op2addr)));
+
+   return "locg";
+}
+
+static const HChar *
+s390_irgen_LPQ(UChar r1, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, load(Ity_I64, mkexpr(op2addr)));
+   put_gpr_dw0(r1 + 1, load(Ity_I64, binop(Iop_Add64, mkexpr(op2addr), mkU64(8))
+               ));
+
+   return "lpq";
+}
+
+static const HChar *
+s390_irgen_LPR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(result, mkite(binop(Iop_CmpLT32S, mkexpr(op2), mkU32(0)),
+          binop(Iop_Sub32, mkU32(0), mkexpr(op2)), mkexpr(op2)));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_POSITIVE_32, op2);
+
+   return "lpr";
+}
+
+static const HChar *
+s390_irgen_LPGR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, mkite(binop(Iop_CmpLT64S, mkexpr(op2), mkU64(0)),
+          binop(Iop_Sub64, mkU64(0), mkexpr(op2)), mkexpr(op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_POSITIVE_64, op2);
+
+   return "lpgr";
+}
+
+static const HChar *
+s390_irgen_LPGFR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+   assign(result, mkite(binop(Iop_CmpLT64S, mkexpr(op2), mkU64(0)),
+          binop(Iop_Sub64, mkU64(0), mkexpr(op2)), mkexpr(op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_POSITIVE_64, op2);
+
+   return "lpgfr";
+}
+
+static const HChar *
+s390_irgen_LRVR(UChar r1, UChar r2)
+{
+   IRTemp b0 = newTemp(Ity_I8);
+   IRTemp b1 = newTemp(Ity_I8);
+   IRTemp b2 = newTemp(Ity_I8);
+   IRTemp b3 = newTemp(Ity_I8);
+
+   assign(b3, get_gpr_b7(r2));
+   assign(b2, get_gpr_b6(r2));
+   assign(b1, get_gpr_b5(r2));
+   assign(b0, get_gpr_b4(r2));
+   put_gpr_b4(r1, mkexpr(b3));
+   put_gpr_b5(r1, mkexpr(b2));
+   put_gpr_b6(r1, mkexpr(b1));
+   put_gpr_b7(r1, mkexpr(b0));
+
+   return "lrvr";
+}
+
+static const HChar *
+s390_irgen_LRVGR(UChar r1, UChar r2)
+{
+   IRTemp b0 = newTemp(Ity_I8);
+   IRTemp b1 = newTemp(Ity_I8);
+   IRTemp b2 = newTemp(Ity_I8);
+   IRTemp b3 = newTemp(Ity_I8);
+   IRTemp b4 = newTemp(Ity_I8);
+   IRTemp b5 = newTemp(Ity_I8);
+   IRTemp b6 = newTemp(Ity_I8);
+   IRTemp b7 = newTemp(Ity_I8);
+
+   assign(b7, get_gpr_b7(r2));
+   assign(b6, get_gpr_b6(r2));
+   assign(b5, get_gpr_b5(r2));
+   assign(b4, get_gpr_b4(r2));
+   assign(b3, get_gpr_b3(r2));
+   assign(b2, get_gpr_b2(r2));
+   assign(b1, get_gpr_b1(r2));
+   assign(b0, get_gpr_b0(r2));
+   put_gpr_b0(r1, mkexpr(b7));
+   put_gpr_b1(r1, mkexpr(b6));
+   put_gpr_b2(r1, mkexpr(b5));
+   put_gpr_b3(r1, mkexpr(b4));
+   put_gpr_b4(r1, mkexpr(b3));
+   put_gpr_b5(r1, mkexpr(b2));
+   put_gpr_b6(r1, mkexpr(b1));
+   put_gpr_b7(r1, mkexpr(b0));
+
+   return "lrvgr";
+}
+
+static const HChar *
+s390_irgen_LRVH(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I16);
+
+   assign(op2, load(Ity_I16, mkexpr(op2addr)));
+   put_gpr_b6(r1, unop(Iop_16to8, mkexpr(op2)));
+   put_gpr_b7(r1, unop(Iop_16HIto8, mkexpr(op2)));
+
+   return "lrvh";
+}
+
+static const HChar *
+s390_irgen_LRV(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   put_gpr_b4(r1, unop(Iop_32to8, binop(Iop_And32, mkexpr(op2), mkU32(255))));
+   put_gpr_b5(r1, unop(Iop_32to8, binop(Iop_And32, binop(Iop_Shr32, mkexpr(op2),
+              mkU8(8)), mkU32(255))));
+   put_gpr_b6(r1, unop(Iop_32to8, binop(Iop_And32, binop(Iop_Shr32, mkexpr(op2),
+              mkU8(16)), mkU32(255))));
+   put_gpr_b7(r1, unop(Iop_32to8, binop(Iop_And32, binop(Iop_Shr32, mkexpr(op2),
+              mkU8(24)), mkU32(255))));
+
+   return "lrv";
+}
+
+static const HChar *
+s390_irgen_LRVG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   put_gpr_b0(r1, unop(Iop_64to8, binop(Iop_And64, mkexpr(op2), mkU64(255))));
+   put_gpr_b1(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2),
+              mkU8(8)), mkU64(255))));
+   put_gpr_b2(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2),
+              mkU8(16)), mkU64(255))));
+   put_gpr_b3(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2),
+              mkU8(24)), mkU64(255))));
+   put_gpr_b4(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2),
+              mkU8(32)), mkU64(255))));
+   put_gpr_b5(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2),
+              mkU8(40)), mkU64(255))));
+   put_gpr_b6(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2),
+              mkU8(48)), mkU64(255))));
+   put_gpr_b7(r1, unop(Iop_64to8, binop(Iop_And64, binop(Iop_Shr64, mkexpr(op2),
+              mkU8(56)), mkU64(255))));
+
+   return "lrvg";
+}
+
+static const HChar *
+s390_irgen_MVHHI(UShort i2, IRTemp op1addr)
+{
+   store(mkexpr(op1addr), mkU16(i2));
+
+   return "mvhhi";
+}
+
+static const HChar *
+s390_irgen_MVHI(UShort i2, IRTemp op1addr)
+{
+   store(mkexpr(op1addr), mkU32((UInt)(Int)(Short)i2));
+
+   return "mvhi";
+}
+
+static const HChar *
+s390_irgen_MVGHI(UShort i2, IRTemp op1addr)
+{
+   store(mkexpr(op1addr), mkU64((ULong)(Long)(Short)i2));
+
+   return "mvghi";
+}
+
+static const HChar *
+s390_irgen_MVI(UChar i2, IRTemp op1addr)
+{
+   store(mkexpr(op1addr), mkU8(i2));
+
+   return "mvi";
+}
+
+static const HChar *
+s390_irgen_MVIY(UChar i2, IRTemp op1addr)
+{
+   store(mkexpr(op1addr), mkU8(i2));
+
+   return "mviy";
+}
+
+static const HChar *
+s390_irgen_MR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1 + 1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+   return "mr";
+}
+
+static const HChar *
+s390_irgen_M(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1 + 1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+   return "m";
+}
+
+static const HChar *
+s390_irgen_MFY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1 + 1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+   return "mfy";
+}
+
+static const HChar *
+s390_irgen_MH(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I16);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I16, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), unop(Iop_16Sto32, mkexpr(op2))
+          ));
+   put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+   return "mh";
+}
+
+static const HChar *
+s390_irgen_MHY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I16);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I16, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), unop(Iop_16Sto32, mkexpr(op2))
+          ));
+   put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+   return "mhy";
+}
+
+static const HChar *
+s390_irgen_MHI(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Short op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = (Short)i2;
+   assign(result, binop(Iop_MullS32, mkexpr(op1), unop(Iop_16Sto32,
+          mkU16((UShort)op2))));
+   put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+   return "mhi";
+}
+
+static const HChar *
+s390_irgen_MGHI(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Short op2;
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (Short)i2;
+   assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_16Sto64,
+          mkU16((UShort)op2))));
+   put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+   return "mghi";
+}
+
+static const HChar *
+s390_irgen_MLR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1 + 1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_MullU32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+   return "mlr";
+}
+
+static const HChar *
+s390_irgen_MLGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1 + 1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_MullU64, mkexpr(op1), mkexpr(op2)));
+   put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result)));
+   put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result)));
+
+   return "mlgr";
+}
+
+static const HChar *
+s390_irgen_ML(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1 + 1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullU32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+   return "ml";
+}
+
+static const HChar *
+s390_irgen_MLG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1 + 1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullU64, mkexpr(op1), mkexpr(op2)));
+   put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result)));
+   put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result)));
+
+   return "mlg";
+}
+
+static const HChar *
+s390_irgen_MSR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+   return "msr";
+}
+
+static const HChar *
+s390_irgen_MSGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_MullS64, mkexpr(op1), mkexpr(op2)));
+   put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+   return "msgr";
+}
+
+static const HChar *
+s390_irgen_MSGFR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_32Sto64, mkexpr(op2))
+          ));
+   put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+   return "msgfr";
+}
+
+static const HChar *
+s390_irgen_MS(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+   return "ms";
+}
+
+static const HChar *
+s390_irgen_MSY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS32, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+   return "msy";
+}
+
+static const HChar *
+s390_irgen_MSG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS64, mkexpr(op1), mkexpr(op2)));
+   put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+   return "msg";
+}
+
+static const HChar *
+s390_irgen_MSGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_32Sto64, mkexpr(op2))
+          ));
+   put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+   return "msgf";
+}
+
+static const HChar *
+s390_irgen_MSFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   Int op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = (Int)i2;
+   assign(result, binop(Iop_MullS32, mkexpr(op1), mkU32((UInt)op2)));
+   put_gpr_w1(r1, unop(Iop_64to32, mkexpr(result)));
+
+   return "msfi";
+}
+
+static const HChar *
+s390_irgen_MSGFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   Int op2;
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (Int)i2;
+   assign(result, binop(Iop_MullS64, mkexpr(op1), unop(Iop_32Sto64, mkU32((UInt)
+          op2))));
+   put_gpr_dw0(r1, unop(Iop_128to64, mkexpr(result)));
+
+   return "msgfi";
+}
+
+static const HChar *
+s390_irgen_OR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_Or32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "or";
+}
+
+static const HChar *
+s390_irgen_OGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_Or64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "ogr";
+}
+
+static const HChar *
+s390_irgen_ORK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Or32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "ork";
+}
+
+static const HChar *
+s390_irgen_OGRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Or64, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "ogrk";
+}
+
+static const HChar *
+s390_irgen_O(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Or32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "o";
+}
+
+static const HChar *
+s390_irgen_OY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Or32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "oy";
+}
+
+static const HChar *
+s390_irgen_OG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_Or64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "og";
+}
+
+static const HChar *
+s390_irgen_OI(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+   IRTemp result = newTemp(Ity_I8);
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   assign(result, binop(Iop_Or8, mkexpr(op1), mkU8(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "oi";
+}
+
+static const HChar *
+s390_irgen_OIY(UChar i2, IRTemp op1addr)
+{
+   IRTemp op1 = newTemp(Ity_I8);
+   UChar op2;
+   IRTemp result = newTemp(Ity_I8);
+
+   assign(op1, load(Ity_I8, mkexpr(op1addr)));
+   op2 = i2;
+   assign(result, binop(Iop_Or8, mkexpr(op1), mkU8(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   store(mkexpr(op1addr), mkexpr(result));
+
+   return "oiy";
+}
+
+static const HChar *
+s390_irgen_OIHF(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w0(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Or32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "oihf";
+}
+
+static const HChar *
+s390_irgen_OIHH(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw0(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw0(r1, mkexpr(result));
+
+   return "oihh";
+}
+
+static const HChar *
+s390_irgen_OIHL(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw1(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw1(r1, mkexpr(result));
+
+   return "oihl";
+}
+
+static const HChar *
+s390_irgen_OILF(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Or32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "oilf";
+}
+
+static const HChar *
+s390_irgen_OILH(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw2(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw2(r1, mkexpr(result));
+
+   return "oilh";
+}
+
+static const HChar *
+s390_irgen_OILL(UChar r1, UShort i2)
+{
+   IRTemp op1 = newTemp(Ity_I16);
+   UShort op2;
+   IRTemp result = newTemp(Ity_I16);
+
+   assign(op1, get_gpr_hw3(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Or16, mkexpr(op1), mkU16(op2)));
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+   put_gpr_hw3(r1, mkexpr(result));
+
+   return "oill";
+}
+
+static const HChar *
+s390_irgen_PFD(void)
+{
+
+   return "pfd";
+}
+
+static const HChar *
+s390_irgen_PFDRL(void)
+{
+
+   return "pfdrl";
+}
+
+static IRExpr *
+get_rounding_mode_from_gr0(void)
+{
+   IRTemp rm_bits = newTemp(Ity_I32);
+   IRExpr *s390rm;
+   IRExpr *irrm;
+
+   /* The dfp/bfp rounding mode is stored in bits [60:63] of GR 0
+      when PFPO insn is called. So, extract the bits at [60:63] */
+   assign(rm_bits, binop(Iop_And32, get_gpr_w1(0), mkU32(0xf)));
+   s390rm = mkexpr(rm_bits);
+   irrm = mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0x1)),
+            mkexpr(encode_bfp_rounding_mode( S390_BFP_ROUND_PER_FPC)),
+            mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0x8)),
+              mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_NEAREST_EVEN_8)),
+              mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0x9)),
+                mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_ZERO_9)),
+                mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xa)),
+                  mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_POSINF_10)),
+                  mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xb)),
+                    mkexpr(encode_dfp_rounding_mode(S390_DFP_ROUND_NEGINF_11)),
+                    mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xc)),
+                      mkexpr(encode_dfp_rounding_mode(
+                               S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12)),
+                      mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xd)),
+                        mkexpr(encode_dfp_rounding_mode(
+                                 S390_DFP_ROUND_NEAREST_TIE_TOWARD_0)),
+                        mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xe)),
+                          mkexpr(encode_dfp_rounding_mode(
+                                   S390_DFP_ROUND_AWAY_0)),
+                          mkite(binop(Iop_CmpEQ32, s390rm, mkU32(0xf)),
+                            mkexpr(encode_dfp_rounding_mode(
+                                     S390_DFP_ROUND_PREPARE_SHORT_15)),
+                                /* if rounding mode is 0 or invalid (2-7)
+                                   set S390_DFP_ROUND_PER_FPC_0 */
+                            mkexpr(encode_dfp_rounding_mode(
+                                     S390_DFP_ROUND_PER_FPC_0)))))))))));
+
+   return irrm;
+}
+
+static IRExpr *
+s390_call_pfpo_helper(IRExpr *gr0)
+{
+   IRExpr **args, *call;
+
+   args = mkIRExprVec_1(gr0);
+   call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+                        "s390_do_pfpo", &s390_do_pfpo, args);
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_PFPO(void)
+{
+   IRTemp gr0 = newTemp(Ity_I32);     /* word 1 [32:63] of GR 0 */
+   IRTemp test_bit = newTemp(Ity_I32); /* bit 32 of GR 0 - test validity */
+   IRTemp fn = newTemp(Ity_I32);       /* [33:55] of GR 0 - function code */
+   IRTemp ef = newTemp(Ity_I32);       /* Emulation Failure */
+   IRTemp src1 = newTemp(Ity_F32);
+   IRTemp dst1 = newTemp(Ity_D32);
+   IRTemp src2 = newTemp(Ity_F32);
+   IRTemp dst2 = newTemp(Ity_D64);
+   IRTemp src3 = newTemp(Ity_F32);
+   IRTemp dst3 = newTemp(Ity_D128);
+   IRTemp src4 = newTemp(Ity_F64);
+   IRTemp dst4 = newTemp(Ity_D32);
+   IRTemp src5 = newTemp(Ity_F64);
+   IRTemp dst5 = newTemp(Ity_D64);
+   IRTemp src6 = newTemp(Ity_F64);
+   IRTemp dst6 = newTemp(Ity_D128);
+   IRTemp src7 = newTemp(Ity_F128);
+   IRTemp dst7 = newTemp(Ity_D32);
+   IRTemp src8 = newTemp(Ity_F128);
+   IRTemp dst8 = newTemp(Ity_D64);
+   IRTemp src9 = newTemp(Ity_F128);
+   IRTemp dst9 = newTemp(Ity_D128);
+   IRTemp src10 = newTemp(Ity_D32);
+   IRTemp dst10 = newTemp(Ity_F32);
+   IRTemp src11 = newTemp(Ity_D32);
+   IRTemp dst11 = newTemp(Ity_F64);
+   IRTemp src12 = newTemp(Ity_D32);
+   IRTemp dst12 = newTemp(Ity_F128);
+   IRTemp src13 = newTemp(Ity_D64);
+   IRTemp dst13 = newTemp(Ity_F32);
+   IRTemp src14 = newTemp(Ity_D64);
+   IRTemp dst14 = newTemp(Ity_F64);
+   IRTemp src15 = newTemp(Ity_D64);
+   IRTemp dst15 = newTemp(Ity_F128);
+   IRTemp src16 = newTemp(Ity_D128);
+   IRTemp dst16 = newTemp(Ity_F32);
+   IRTemp src17 = newTemp(Ity_D128);
+   IRTemp dst17 = newTemp(Ity_F64);
+   IRTemp src18 = newTemp(Ity_D128);
+   IRTemp dst18 = newTemp(Ity_F128);
+   IRExpr *irrm;
+
+   if (! s390_host_has_pfpo) {
+      emulation_failure(EmFail_S390X_pfpo);
+      goto done;
+   }
+
+   assign(gr0, get_gpr_w1(0));
+   /* get function code */
+   assign(fn, binop(Iop_And32, binop(Iop_Shr32, mkexpr(gr0), mkU8(8)),
+                    mkU32(0x7fffff)));
+   /* get validity test bit */
+   assign(test_bit, binop(Iop_And32, binop(Iop_Shr32, mkexpr(gr0), mkU8(31)),
+                          mkU32(0x1)));
+   irrm = get_rounding_mode_from_gr0();
+
+   /* test_bit is 1 */
+   assign(src1, get_fpr_w0(4)); /* get source from FPR 4,6 */
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src1, gr0);
+
+   /* Return code set in GR1 is usually 0. Non-zero value is set only
+      when exceptions are raised. See Programming Notes point 5 in the
+      instrcution description of pfpo in POP. Since valgrind does not
+      model exception, it might be safe to just set 0 to GR 1. */
+   put_gpr_w1(1, mkU32(0x0));
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(test_bit), mkU32(0x1)));
+
+   /* Check validity of function code in GR 0 */
+   assign(ef, s390_call_pfpo_helper(unop(Iop_32Uto64, mkexpr(gr0))));
+   emulation_failure_with_expr(mkexpr(ef));
+
+   stmt(
+        IRStmt_Exit(
+                    binop(Iop_CmpNE32, mkexpr(ef), mkU32(EmNote_NONE)),
+                    Ijk_EmFail,
+                    IRConst_U64(guest_IA_next_instr),
+                    S390X_GUEST_OFFSET(guest_IA)
+                    )
+        );
+
+   /* F32 -> D32 */
+   /* get source from FPR 4,6 - already set in src1 */
+   assign(dst1, binop(Iop_F32toD32, irrm, mkexpr(src1)));
+   put_dpr_w0(0, mkexpr(dst1)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_32, src1, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F32_TO_D32)));
+
+   /* F32 -> D64 */
+   assign(src2, get_fpr_w0(4)); /* get source from FPR 4,6 */
+   assign(dst2, binop(Iop_F32toD64, irrm, mkexpr(src2)));
+   put_dpr_dw0(0, mkexpr(dst2)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_32, src2, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F32_TO_D64)));
+
+   /* F32 -> D128 */
+   assign(src3, get_fpr_w0(4)); /* get source from FPR 4,6 */
+   assign(dst3, binop(Iop_F32toD128, irrm, mkexpr(src3)));
+   put_dpr_pair(0, mkexpr(dst3)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_32, src3, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F32_TO_D128)));
+
+   /* F64 -> D32 */
+   assign(src4, get_fpr_dw0(4)); /* get source from FPR 4,6 */
+   assign(dst4, binop(Iop_F64toD32, irrm, mkexpr(src4)));
+   put_dpr_w0(0, mkexpr(dst4)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src4, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F64_TO_D32)));
+
+   /* F64 -> D64 */
+   assign(src5, get_fpr_dw0(4)); /* get source from FPR 4,6 */
+   assign(dst5, binop(Iop_F64toD64, irrm, mkexpr(src5)));
+   put_dpr_dw0(0, mkexpr(dst5)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src5, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F64_TO_D64)));
+
+   /* F64 -> D128 */
+   assign(src6, get_fpr_dw0(4)); /* get source from FPR 4,6 */
+   assign(dst6, binop(Iop_F64toD128, irrm, mkexpr(src6)));
+   put_dpr_pair(0, mkexpr(dst6)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src6, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F64_TO_D128)));
+
+   /* F128 -> D32 */
+   assign(src7, get_fpr_pair(4)); /* get source from FPR 4,6 */
+   assign(dst7, binop(Iop_F128toD32, irrm, mkexpr(src7)));
+   put_dpr_w0(0, mkexpr(dst7)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_put1f128Z(S390_CC_OP_PFPO_128, src7, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F128_TO_D32)));
+
+   /* F128 -> D64 */
+   assign(src8, get_fpr_pair(4)); /* get source from FPR 4,6 */
+   assign(dst8, binop(Iop_F128toD64, irrm, mkexpr(src8)));
+   put_dpr_dw0(0, mkexpr(dst8)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_put1f128Z(S390_CC_OP_PFPO_128, src8, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F128_TO_D64)));
+
+   /* F128 -> D128 */
+   assign(src9, get_fpr_pair(4)); /* get source from FPR 4,6 */
+   assign(dst9, binop(Iop_F128toD128, irrm, mkexpr(src9)));
+   put_dpr_pair(0, mkexpr(dst9)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_put1f128Z(S390_CC_OP_PFPO_128, src9, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_F128_TO_D128)));
+
+   /* D32 -> F32 */
+   assign(src10, get_dpr_w0(4)); /* get source from FPR 4,6 */
+   assign(dst10, binop(Iop_D32toF32, irrm, mkexpr(src10)));
+   put_fpr_w0(0, mkexpr(dst10)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_32, src10, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D32_TO_F32)));
+
+   /* D32 -> F64 */
+   assign(src11, get_dpr_w0(4)); /* get source from FPR 4,6 */
+   assign(dst11, binop(Iop_D32toF64, irrm, mkexpr(src11)));
+   put_fpr_dw0(0, mkexpr(dst11)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_32, src11, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D32_TO_F64)));
+
+   /* D32 -> F128 */
+   assign(src12, get_dpr_w0(4)); /* get source from FPR 4,6 */
+   assign(dst12, binop(Iop_D32toF128, irrm, mkexpr(src12)));
+   put_fpr_pair(0, mkexpr(dst12)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_32, src12, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D32_TO_F128)));
+
+   /* D64 -> F32 */
+   assign(src13, get_dpr_dw0(4)); /* get source from FPR 4,6 */
+   assign(dst13, binop(Iop_D64toF32, irrm, mkexpr(src13)));
+   put_fpr_w0(0, mkexpr(dst13)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src13, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D64_TO_F32)));
+
+   /* D64 -> F64 */
+   assign(src14, get_dpr_dw0(4)); /* get source from FPR 4,6 */
+   assign(dst14, binop(Iop_D64toF64, irrm, mkexpr(src14)));
+   put_fpr_dw0(0, mkexpr(dst14)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src14, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D64_TO_F64)));
+
+   /* D64 -> F128 */
+   assign(src15, get_dpr_dw0(4)); /* get source from FPR 4,6 */
+   assign(dst15, binop(Iop_D64toF128, irrm, mkexpr(src15)));
+   put_fpr_pair(0, mkexpr(dst15)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_putFZ(S390_CC_OP_PFPO_64, src15, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D64_TO_F128)));
+
+   /* D128 -> F32 */
+   assign(src16, get_dpr_pair(4)); /* get source from FPR 4,6 */
+   assign(dst16, binop(Iop_D128toF32, irrm, mkexpr(src16)));
+   put_fpr_w0(0, mkexpr(dst16)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_put1d128Z(S390_CC_OP_PFPO_128, src16, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D128_TO_F32)));
+
+   /* D128 -> F64 */
+   assign(src17, get_dpr_pair(4)); /* get source from FPR 4,6 */
+   assign(dst17, binop(Iop_D128toF64, irrm, mkexpr(src17)));
+   put_fpr_dw0(0, mkexpr(dst17)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_put1d128Z(S390_CC_OP_PFPO_128, src17, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D128_TO_F64)));
+
+   /* D128 -> F128 */
+   assign(src18, get_dpr_pair(4)); /* get source from FPR 4,6 */
+   assign(dst18, binop(Iop_D128toF128, irrm, mkexpr(src18)));
+   put_fpr_pair(0, mkexpr(dst18)); /* put the result in FPR 0,2 */
+   put_gpr_w1(1, mkU32(0x0));
+   s390_cc_thunk_put1d128Z(S390_CC_OP_PFPO_128, src18, gr0);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(fn), mkU32(S390_PFPO_D128_TO_F128)));
+
+ done:
+   return "pfpo";
+}
+
+static const HChar *
+s390_irgen_RLL(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp amount = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I32);
+
+   assign(amount, binop(Iop_And64, mkexpr(op2addr), mkU64(31)));
+   assign(op, get_gpr_w1(r3));
+   put_gpr_w1(r1, binop(Iop_Or32, binop(Iop_Shl32, mkexpr(op), unop(Iop_64to8,
+              mkexpr(amount))), binop(Iop_Shr32, mkexpr(op), unop(Iop_64to8,
+              binop(Iop_Sub64, mkU64(32), mkexpr(amount))))));
+
+   return "rll";
+}
+
+static const HChar *
+s390_irgen_RLLG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp amount = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I64);
+
+   assign(amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+   assign(op, get_gpr_dw0(r3));
+   put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(op), unop(Iop_64to8,
+               mkexpr(amount))), binop(Iop_Shr64, mkexpr(op), unop(Iop_64to8,
+               binop(Iop_Sub64, mkU64(64), mkexpr(amount))))));
+
+   return "rllg";
+}
+
+static const HChar *
+s390_irgen_RNSBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+   UChar from;
+   UChar to;
+   UChar rot;
+   UChar t_bit;
+   ULong mask;
+   ULong maskc;
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   from = i3 & 63;
+   to = i4 & 63;
+   rot = i5 & 63;
+   t_bit = i3 & 128;
+   assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64,
+          get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2),
+          mkU8(64 - rot))));
+   if (from <= to) {
+      mask = ~0ULL;
+      mask = (mask >> from) & (mask << (63 - to));
+      maskc = ~mask;
+   } else {
+      maskc = ~0ULL;
+      maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+      mask = ~maskc;
+   }
+   assign(result, binop(Iop_And64, binop(Iop_And64, get_gpr_dw0(r1), mkexpr(op2)
+          ), mkU64(mask)));
+   if (t_bit == 0) {
+      put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1),
+                  mkU64(maskc)), mkexpr(result)));
+   }
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+   return "rnsbg";
+}
+
+static const HChar *
+s390_irgen_RXSBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+   UChar from;
+   UChar to;
+   UChar rot;
+   UChar t_bit;
+   ULong mask;
+   ULong maskc;
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   from = i3 & 63;
+   to = i4 & 63;
+   rot = i5 & 63;
+   t_bit = i3 & 128;
+   assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64,
+          get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2),
+          mkU8(64 - rot))));
+   if (from <= to) {
+      mask = ~0ULL;
+      mask = (mask >> from) & (mask << (63 - to));
+      maskc = ~mask;
+   } else {
+      maskc = ~0ULL;
+      maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+      mask = ~maskc;
+   }
+   assign(result, binop(Iop_And64, binop(Iop_Xor64, get_gpr_dw0(r1), mkexpr(op2)
+          ), mkU64(mask)));
+   if (t_bit == 0) {
+      put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1),
+                  mkU64(maskc)), mkexpr(result)));
+   }
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+   return "rxsbg";
+}
+
+static const HChar *
+s390_irgen_ROSBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+   UChar from;
+   UChar to;
+   UChar rot;
+   UChar t_bit;
+   ULong mask;
+   ULong maskc;
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+
+   from = i3 & 63;
+   to = i4 & 63;
+   rot = i5 & 63;
+   t_bit = i3 & 128;
+   assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64,
+          get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2),
+          mkU8(64 - rot))));
+   if (from <= to) {
+      mask = ~0ULL;
+      mask = (mask >> from) & (mask << (63 - to));
+      maskc = ~mask;
+   } else {
+      maskc = ~0ULL;
+      maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+      mask = ~maskc;
+   }
+   assign(result, binop(Iop_And64, binop(Iop_Or64, get_gpr_dw0(r1), mkexpr(op2)
+          ), mkU64(mask)));
+   if (t_bit == 0) {
+      put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1),
+                  mkU64(maskc)), mkexpr(result)));
+   }
+   s390_cc_thunk_putZ(S390_CC_OP_BITWISE, result);
+
+   return "rosbg";
+}
+
+static const HChar *
+s390_irgen_RISBG(UChar r1, UChar r2, UChar i3, UChar i4, UChar i5)
+{
+   UChar from;
+   UChar to;
+   UChar rot;
+   UChar z_bit;
+   ULong mask;
+   ULong maskc;
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   from = i3 & 63;
+   to = i4 & 63;
+   rot = i5 & 63;
+   z_bit = i4 & 128;
+   assign(op2, rot == 0 ? get_gpr_dw0(r2) : binop(Iop_Or64, binop(Iop_Shl64,
+          get_gpr_dw0(r2), mkU8(rot)), binop(Iop_Shr64, get_gpr_dw0(r2),
+          mkU8(64 - rot))));
+   if (from <= to) {
+      mask = ~0ULL;
+      mask = (mask >> from) & (mask << (63 - to));
+      maskc = ~mask;
+   } else {
+      maskc = ~0ULL;
+      maskc = (maskc >> (to + 1)) & (maskc << (64 - from));
+      mask = ~maskc;
+   }
+   if (z_bit == 0) {
+      put_gpr_dw0(r1, binop(Iop_Or64, binop(Iop_And64, get_gpr_dw0(r1),
+                  mkU64(maskc)), binop(Iop_And64, mkexpr(op2), mkU64(mask))));
+   } else {
+      put_gpr_dw0(r1, binop(Iop_And64, mkexpr(op2), mkU64(mask)));
+   }
+   assign(result, get_gpr_dw0(r1));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+   return "risbg";
+}
+
+static const HChar *
+s390_irgen_SAR(UChar r1, UChar r2)
+{
+   put_ar_w0(r1, get_gpr_w1(r2));
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, AR, GPR), "sar", r1, r2);
+
+   return "sar";
+}
+
+static const HChar *
+s390_irgen_SLDA(UChar r1, IRTemp op2addr)
+{
+   IRTemp p1 = newTemp(Ity_I64);
+   IRTemp p2 = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   ULong sign_mask;
+   IRTemp shift_amount = newTemp(Ity_I64);
+
+   assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+   assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+   assign(op, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1), mkU8(32)), mkexpr(p2)
+          ));
+   sign_mask = 1ULL << 63;
+   assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+   assign(result, binop(Iop_Or64, binop(Iop_And64, binop(Iop_Shl64, mkexpr(op),
+          unop(Iop_64to8, mkexpr(shift_amount))), mkU64(~sign_mask)),
+          binop(Iop_And64, mkexpr(op), mkU64(sign_mask))));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+   s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_64, op, shift_amount);
+
+   return "slda";
+}
+
+static const HChar *
+s390_irgen_SLDL(UChar r1, IRTemp op2addr)
+{
+   IRTemp p1 = newTemp(Ity_I64);
+   IRTemp p2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+   assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+   assign(result, binop(Iop_Shl64, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1),
+          mkU8(32)), mkexpr(p2)), unop(Iop_64to8, binop(Iop_And64,
+          mkexpr(op2addr), mkU64(63)))));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+   return "sldl";
+}
+
+static const HChar *
+s390_irgen_SLA(UChar r1, IRTemp op2addr)
+{
+   IRTemp uop = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   UInt sign_mask;
+   IRTemp shift_amount = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I32);
+
+   assign(op, get_gpr_w1(r1));
+   assign(uop, get_gpr_w1(r1));
+   sign_mask = 2147483648U;
+   assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+   assign(result, binop(Iop_Or32, binop(Iop_And32, binop(Iop_Shl32, mkexpr(uop),
+          unop(Iop_64to8, mkexpr(shift_amount))), mkU32(~sign_mask)),
+          binop(Iop_And32, mkexpr(uop), mkU32(sign_mask))));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_32, op, shift_amount);
+
+   return "sla";
+}
+
+static const HChar *
+s390_irgen_SLAK(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp uop = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   UInt sign_mask;
+   IRTemp shift_amount = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I32);
+
+   assign(op, get_gpr_w1(r3));
+   assign(uop, get_gpr_w1(r3));
+   sign_mask = 2147483648U;
+   assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+   assign(result, binop(Iop_Or32, binop(Iop_And32, binop(Iop_Shl32, mkexpr(uop),
+          unop(Iop_64to8, mkexpr(shift_amount))), mkU32(~sign_mask)),
+          binop(Iop_And32, mkexpr(uop), mkU32(sign_mask))));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_32, op, shift_amount);
+
+   return "slak";
+}
+
+static const HChar *
+s390_irgen_SLAG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp uop = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   ULong sign_mask;
+   IRTemp shift_amount = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I64);
+
+   assign(op, get_gpr_dw0(r3));
+   assign(uop, get_gpr_dw0(r3));
+   sign_mask = 9223372036854775808ULL;
+   assign(shift_amount, binop(Iop_And64, mkexpr(op2addr), mkU64(63)));
+   assign(result, binop(Iop_Or64, binop(Iop_And64, binop(Iop_Shl64, mkexpr(uop),
+          unop(Iop_64to8, mkexpr(shift_amount))), mkU64(~sign_mask)),
+          binop(Iop_And64, mkexpr(uop), mkU64(sign_mask))));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putZZ(S390_CC_OP_SHIFT_LEFT_64, op, shift_amount);
+
+   return "slag";
+}
+
+static const HChar *
+s390_irgen_SLL(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, binop(Iop_Shl32, get_gpr_w1(r1), unop(Iop_64to8,
+              binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+   return "sll";
+}
+
+static const HChar *
+s390_irgen_SLLK(UChar r1, UChar r3, IRTemp op2addr)
+{
+   put_gpr_w1(r1, binop(Iop_Shl32, get_gpr_w1(r3), unop(Iop_64to8,
+              binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+   return "sllk";
+}
+
+static const HChar *
+s390_irgen_SLLG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   put_gpr_dw0(r1, binop(Iop_Shl64, get_gpr_dw0(r3), unop(Iop_64to8,
+               binop(Iop_And64, mkexpr(op2addr), mkU64(63)))));
+
+   return "sllg";
+}
+
+static const HChar *
+s390_irgen_SRDA(UChar r1, IRTemp op2addr)
+{
+   IRTemp p1 = newTemp(Ity_I64);
+   IRTemp p2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+   assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+   assign(result, binop(Iop_Sar64, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1),
+          mkU8(32)), mkexpr(p2)), unop(Iop_64to8, binop(Iop_And64,
+          mkexpr(op2addr), mkU64(63)))));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+   return "srda";
+}
+
+static const HChar *
+s390_irgen_SRDL(UChar r1, IRTemp op2addr)
+{
+   IRTemp p1 = newTemp(Ity_I64);
+   IRTemp p2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(p1, unop(Iop_32Uto64, get_gpr_w1(r1)));
+   assign(p2, unop(Iop_32Uto64, get_gpr_w1(r1 + 1)));
+   assign(result, binop(Iop_Shr64, binop(Iop_Or64, binop(Iop_Shl64, mkexpr(p1),
+          mkU8(32)), mkexpr(p2)), unop(Iop_64to8, binop(Iop_And64,
+          mkexpr(op2addr), mkU64(63)))));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result)));
+
+   return "srdl";
+}
+
+static const HChar *
+s390_irgen_SRA(UChar r1, IRTemp op2addr)
+{
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp op = newTemp(Ity_I32);
+
+   assign(op, get_gpr_w1(r1));
+   assign(result, binop(Iop_Sar32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64,
+          mkexpr(op2addr), mkU64(63)))));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+   return "sra";
+}
+
+static const HChar *
+s390_irgen_SRAK(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp op = newTemp(Ity_I32);
+
+   assign(op, get_gpr_w1(r3));
+   assign(result, binop(Iop_Sar32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64,
+          mkexpr(op2addr), mkU64(63)))));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+   return "srak";
+}
+
+static const HChar *
+s390_irgen_SRAG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I64);
+
+   assign(op, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Sar64, mkexpr(op), unop(Iop_64to8, binop(Iop_And64,
+          mkexpr(op2addr), mkU64(63)))));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putS(S390_CC_OP_LOAD_AND_TEST, result);
+
+   return "srag";
+}
+
+static const HChar *
+s390_irgen_SRL(UChar r1, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_I32);
+
+   assign(op, get_gpr_w1(r1));
+   put_gpr_w1(r1, binop(Iop_Shr32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64,
+              mkexpr(op2addr), mkU64(63)))));
+
+   return "srl";
+}
+
+static const HChar *
+s390_irgen_SRLK(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_I32);
+
+   assign(op, get_gpr_w1(r3));
+   put_gpr_w1(r1, binop(Iop_Shr32, mkexpr(op), unop(Iop_64to8, binop(Iop_And64,
+              mkexpr(op2addr), mkU64(63)))));
+
+   return "srlk";
+}
+
+static const HChar *
+s390_irgen_SRLG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_I64);
+
+   assign(op, get_gpr_dw0(r3));
+   put_gpr_dw0(r1, binop(Iop_Shr64, mkexpr(op), unop(Iop_64to8, binop(Iop_And64,
+               mkexpr(op2addr), mkU64(63)))));
+
+   return "srlg";
+}
+
+static const HChar *
+s390_irgen_ST(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_w1(r1));
+
+   return "st";
+}
+
+static const HChar *
+s390_irgen_STY(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_w1(r1));
+
+   return "sty";
+}
+
+static const HChar *
+s390_irgen_STG(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_dw0(r1));
+
+   return "stg";
+}
+
+static const HChar *
+s390_irgen_STRL(UChar r1, UInt i2)
+{
+   store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)),
+         get_gpr_w1(r1));
+
+   return "strl";
+}
+
+static const HChar *
+s390_irgen_STGRL(UChar r1, UInt i2)
+{
+   store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)),
+         get_gpr_dw0(r1));
+
+   return "stgrl";
+}
+
+static const HChar *
+s390_irgen_STC(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_b7(r1));
+
+   return "stc";
+}
+
+static const HChar *
+s390_irgen_STCY(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_b7(r1));
+
+   return "stcy";
+}
+
+static const HChar *
+s390_irgen_STCH(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_b3(r1));
+
+   return "stch";
+}
+
+static const HChar *
+s390_irgen_STCM(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar mask;
+   UChar n;
+
+   mask = (UChar)r3;
+   n = 0;
+   if ((mask & 8) != 0) {
+      store(mkexpr(op2addr), get_gpr_b4(r1));
+      n = n + 1;
+   }
+   if ((mask & 4) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b5(r1));
+      n = n + 1;
+   }
+   if ((mask & 2) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b6(r1));
+      n = n + 1;
+   }
+   if ((mask & 1) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b7(r1));
+   }
+
+   return "stcm";
+}
+
+static const HChar *
+s390_irgen_STCMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar mask;
+   UChar n;
+
+   mask = (UChar)r3;
+   n = 0;
+   if ((mask & 8) != 0) {
+      store(mkexpr(op2addr), get_gpr_b4(r1));
+      n = n + 1;
+   }
+   if ((mask & 4) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b5(r1));
+      n = n + 1;
+   }
+   if ((mask & 2) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b6(r1));
+      n = n + 1;
+   }
+   if ((mask & 1) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b7(r1));
+   }
+
+   return "stcmy";
+}
+
+static const HChar *
+s390_irgen_STCMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar mask;
+   UChar n;
+
+   mask = (UChar)r3;
+   n = 0;
+   if ((mask & 8) != 0) {
+      store(mkexpr(op2addr), get_gpr_b0(r1));
+      n = n + 1;
+   }
+   if ((mask & 4) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b1(r1));
+      n = n + 1;
+   }
+   if ((mask & 2) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b2(r1));
+      n = n + 1;
+   }
+   if ((mask & 1) != 0) {
+      store(binop(Iop_Add64, mkexpr(op2addr), mkU64(n)), get_gpr_b3(r1));
+   }
+
+   return "stcmh";
+}
+
+static const HChar *
+s390_irgen_STH(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_hw3(r1));
+
+   return "sth";
+}
+
+static const HChar *
+s390_irgen_STHY(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_hw3(r1));
+
+   return "sthy";
+}
+
+static const HChar *
+s390_irgen_STHRL(UChar r1, UInt i2)
+{
+   store(mkU64(guest_IA_curr_instr + ((ULong)(Long)(Int)i2 << 1)),
+         get_gpr_hw3(r1));
+
+   return "sthrl";
+}
+
+static const HChar *
+s390_irgen_STHH(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_hw1(r1));
+
+   return "sthh";
+}
+
+static const HChar *
+s390_irgen_STFH(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_w0(r1));
+
+   return "stfh";
+}
+
+static const HChar *
+s390_irgen_STOC(UChar r1, IRTemp op2addr)
+{
+   /* condition is checked in format handler */
+   store(mkexpr(op2addr), get_gpr_w1(r1));
+
+   return "stoc";
+}
+
+static const HChar *
+s390_irgen_STOCG(UChar r1, IRTemp op2addr)
+{
+   /* condition is checked in format handler */
+   store(mkexpr(op2addr), get_gpr_dw0(r1));
+
+   return "stocg";
+}
+
+static const HChar *
+s390_irgen_STPQ(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_dw0(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(8)), get_gpr_dw0(r1 + 1));
+
+   return "stpq";
+}
+
+static const HChar *
+s390_irgen_STRVH(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_b7(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(1)), get_gpr_b6(r1));
+
+   return "strvh";
+}
+
+static const HChar *
+s390_irgen_STRV(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_b7(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(1)), get_gpr_b6(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(2)), get_gpr_b5(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(3)), get_gpr_b4(r1));
+
+   return "strv";
+}
+
+static const HChar *
+s390_irgen_STRVG(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_gpr_b7(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(1)), get_gpr_b6(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(2)), get_gpr_b5(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(3)), get_gpr_b4(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(4)), get_gpr_b3(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(5)), get_gpr_b2(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(6)), get_gpr_b1(r1));
+   store(binop(Iop_Add64, mkexpr(op2addr), mkU64(7)), get_gpr_b0(r1));
+
+   return "strvg";
+}
+
+static const HChar *
+s390_irgen_SR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "sr";
+}
+
+static const HChar *
+s390_irgen_SGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "sgr";
+}
+
+static const HChar *
+s390_irgen_SGFR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "sgfr";
+}
+
+static const HChar *
+s390_irgen_SRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op2, op3);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "srk";
+}
+
+static const HChar *
+s390_irgen_SGRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Sub64, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op2, op3);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "sgrk";
+}
+
+static const HChar *
+s390_irgen_S(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "s";
+}
+
+static const HChar *
+s390_irgen_SY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "sy";
+}
+
+static const HChar *
+s390_irgen_SG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "sg";
+}
+
+static const HChar *
+s390_irgen_SGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "sgf";
+}
+
+static const HChar *
+s390_irgen_SH(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "sh";
+}
+
+static const HChar *
+s390_irgen_SHY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, unop(Iop_16Sto32, load(Ity_I16, mkexpr(op2addr))));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "shy";
+}
+
+static const HChar *
+s390_irgen_SHHHR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r1));
+   assign(op3, get_gpr_w0(r2));
+   assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "shhhr";
+}
+
+static const HChar *
+s390_irgen_SHHLR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r1));
+   assign(op3, get_gpr_w1(r2));
+   assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putSS(S390_CC_OP_SIGNED_SUB_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "shhlr";
+}
+
+static const HChar *
+s390_irgen_SLR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "slr";
+}
+
+static const HChar *
+s390_irgen_SLGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slgr";
+}
+
+static const HChar *
+s390_irgen_SLGFR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Uto64, get_gpr_w1(r2)));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slgfr";
+}
+
+static const HChar *
+s390_irgen_SLRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   assign(op3, get_gpr_w1(r3));
+   assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op2, op3);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "slrk";
+}
+
+static const HChar *
+s390_irgen_SLGRK(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   assign(op3, get_gpr_dw0(r3));
+   assign(result, binop(Iop_Sub64, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op2, op3);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slgrk";
+}
+
+static const HChar *
+s390_irgen_SL(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "sl";
+}
+
+static const HChar *
+s390_irgen_SLY(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, op2);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "sly";
+}
+
+static const HChar *
+s390_irgen_SLG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slg";
+}
+
+static const HChar *
+s390_irgen_SLGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, unop(Iop_32Uto64, load(Ity_I32, mkexpr(op2addr))));
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, op2);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slgf";
+}
+
+static const HChar *
+s390_irgen_SLFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   UInt op2;
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   op2 = i2;
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkU32(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op1, mktemp(Ity_I32,
+                       mkU32(op2)));
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "slfi";
+}
+
+static const HChar *
+s390_irgen_SLGFI(UChar r1, UInt i2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   ULong op2;
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   op2 = (ULong)i2;
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkU64(op2)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_64, op1, mktemp(Ity_I64,
+                       mkU64(op2)));
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slgfi";
+}
+
+static const HChar *
+s390_irgen_SLHHHR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r1));
+   assign(op3, get_gpr_w0(r2));
+   assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "slhhhr";
+}
+
+static const HChar *
+s390_irgen_SLHHLR(UChar r3 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w0(r1));
+   assign(op3, get_gpr_w1(r2));
+   assign(result, binop(Iop_Sub32, mkexpr(op2), mkexpr(op3)));
+   s390_cc_thunk_putZZ(S390_CC_OP_UNSIGNED_SUB_32, op2, op3);
+   put_gpr_w0(r1, mkexpr(result));
+
+   return "slhhlr";
+}
+
+static const HChar *
+s390_irgen_SLBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp borrow_in = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, get_gpr_w1(r2));
+   assign(borrow_in, binop(Iop_Sub32, mkU32(1), binop(Iop_Shr32,
+          s390_call_calculate_cc(), mkU8(1))));
+   assign(result, binop(Iop_Sub32, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)),
+          mkexpr(borrow_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_32, op1, op2, borrow_in);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "slbr";
+}
+
+static const HChar *
+s390_irgen_SLBGR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp borrow_in = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, get_gpr_dw0(r2));
+   assign(borrow_in, unop(Iop_32Uto64, binop(Iop_Sub32, mkU32(1),
+          binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)))));
+   assign(result, binop(Iop_Sub64, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)),
+          mkexpr(borrow_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_64, op1, op2, borrow_in);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slbgr";
+}
+
+static const HChar *
+s390_irgen_SLB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp op2 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp borrow_in = newTemp(Ity_I32);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+   assign(borrow_in, binop(Iop_Sub32, mkU32(1), binop(Iop_Shr32,
+          s390_call_calculate_cc(), mkU8(1))));
+   assign(result, binop(Iop_Sub32, binop(Iop_Sub32, mkexpr(op1), mkexpr(op2)),
+          mkexpr(borrow_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_32, op1, op2, borrow_in);
+   put_gpr_w1(r1, mkexpr(result));
+
+   return "slb";
+}
+
+static const HChar *
+s390_irgen_SLBG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp op2 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp borrow_in = newTemp(Ity_I64);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+   assign(borrow_in, unop(Iop_32Uto64, binop(Iop_Sub32, mkU32(1),
+          binop(Iop_Shr32, s390_call_calculate_cc(), mkU8(1)))));
+   assign(result, binop(Iop_Sub64, binop(Iop_Sub64, mkexpr(op1), mkexpr(op2)),
+          mkexpr(borrow_in)));
+   s390_cc_thunk_putZZZ(S390_CC_OP_UNSIGNED_SUBB_64, op1, op2, borrow_in);
+   put_gpr_dw0(r1, mkexpr(result));
+
+   return "slbg";
+}
+
+static const HChar *
+s390_irgen_SVC(UChar i)
+{
+   IRTemp sysno = newTemp(Ity_I64);
+
+   if (i != 0) {
+      assign(sysno, mkU64(i));
+   } else {
+      assign(sysno, unop(Iop_32Uto64, get_gpr_w1(1)));
+   }
+   system_call(mkexpr(sysno));
+
+   return "svc";
+}
+
+static const HChar *
+s390_irgen_TM(UChar i2, IRTemp op1addr)
+{
+   UChar mask;
+   IRTemp value = newTemp(Ity_I8);
+
+   mask = i2;
+   assign(value, load(Ity_I8, mkexpr(op1addr)));
+   s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_8, value, mktemp(Ity_I8,
+                       mkU8(mask)));
+
+   return "tm";
+}
+
+static const HChar *
+s390_irgen_TMY(UChar i2, IRTemp op1addr)
+{
+   UChar mask;
+   IRTemp value = newTemp(Ity_I8);
+
+   mask = i2;
+   assign(value, load(Ity_I8, mkexpr(op1addr)));
+   s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_8, value, mktemp(Ity_I8,
+                       mkU8(mask)));
+
+   return "tmy";
+}
+
+static const HChar *
+s390_irgen_TMHH(UChar r1, UShort i2)
+{
+   UShort mask;
+   IRTemp value = newTemp(Ity_I16);
+
+   mask = i2;
+   assign(value, get_gpr_hw0(r1));
+   s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16,
+                       mkU16(mask)));
+
+   return "tmhh";
+}
+
+static const HChar *
+s390_irgen_TMHL(UChar r1, UShort i2)
+{
+   UShort mask;
+   IRTemp value = newTemp(Ity_I16);
+
+   mask = i2;
+   assign(value, get_gpr_hw1(r1));
+   s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16,
+                       mkU16(mask)));
+
+   return "tmhl";
+}
+
+static const HChar *
+s390_irgen_TMLH(UChar r1, UShort i2)
+{
+   UShort mask;
+   IRTemp value = newTemp(Ity_I16);
+
+   mask = i2;
+   assign(value, get_gpr_hw2(r1));
+   s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16,
+                       mkU16(mask)));
+
+   return "tmlh";
+}
+
+static const HChar *
+s390_irgen_TMLL(UChar r1, UShort i2)
+{
+   UShort mask;
+   IRTemp value = newTemp(Ity_I16);
+
+   mask = i2;
+   assign(value, get_gpr_hw3(r1));
+   s390_cc_thunk_putZZ(S390_CC_OP_TEST_UNDER_MASK_16, value, mktemp(Ity_I16,
+                       mkU16(mask)));
+
+   return "tmll";
+}
+
+static const HChar *
+s390_irgen_EFPC(UChar r1)
+{
+   put_gpr_w1(r1, get_fpc_w0());
+
+   return "efpc";
+}
+
+static const HChar *
+s390_irgen_LER(UChar r1, UChar r2)
+{
+   put_fpr_w0(r1, get_fpr_w0(r2));
+
+   return "ler";
+}
+
+static const HChar *
+s390_irgen_LDR(UChar r1, UChar r2)
+{
+   put_fpr_dw0(r1, get_fpr_dw0(r2));
+
+   return "ldr";
+}
+
+static const HChar *
+s390_irgen_LXR(UChar r1, UChar r2)
+{
+   put_fpr_dw0(r1, get_fpr_dw0(r2));
+   put_fpr_dw0(r1 + 2, get_fpr_dw0(r2 + 2));
+
+   return "lxr";
+}
+
+static const HChar *
+s390_irgen_LE(UChar r1, IRTemp op2addr)
+{
+   put_fpr_w0(r1, load(Ity_F32, mkexpr(op2addr)));
+
+   return "le";
+}
+
+static const HChar *
+s390_irgen_LD(UChar r1, IRTemp op2addr)
+{
+   put_fpr_dw0(r1, load(Ity_F64, mkexpr(op2addr)));
+
+   return "ld";
+}
+
+static const HChar *
+s390_irgen_LEY(UChar r1, IRTemp op2addr)
+{
+   put_fpr_w0(r1, load(Ity_F32, mkexpr(op2addr)));
+
+   return "ley";
+}
+
+static const HChar *
+s390_irgen_LDY(UChar r1, IRTemp op2addr)
+{
+   put_fpr_dw0(r1, load(Ity_F64, mkexpr(op2addr)));
+
+   return "ldy";
+}
+
+static const HChar *
+s390_irgen_LFPC(IRTemp op2addr)
+{
+   put_fpc_w0(load(Ity_I32, mkexpr(op2addr)));
+
+   return "lfpc";
+}
+
+static const HChar *
+s390_irgen_LZER(UChar r1)
+{
+   put_fpr_w0(r1, mkF32i(0x0));
+
+   return "lzer";
+}
+
+static const HChar *
+s390_irgen_LZDR(UChar r1)
+{
+   put_fpr_dw0(r1, mkF64i(0x0));
+
+   return "lzdr";
+}
+
+static const HChar *
+s390_irgen_LZXR(UChar r1)
+{
+   put_fpr_dw0(r1, mkF64i(0x0));
+   put_fpr_dw0(r1 + 2, mkF64i(0x0));
+
+   return "lzxr";
+}
+
+static const HChar *
+s390_irgen_SRNM(IRTemp op2addr)
+{
+   UInt input_mask, fpc_mask;
+
+   input_mask = 3;
+   fpc_mask = s390_host_has_fpext ? 7 : 3;
+
+   put_fpc_w0(binop(Iop_Or32,
+                    binop(Iop_And32, get_fpc_w0(), mkU32(~fpc_mask)),
+                    binop(Iop_And32, unop(Iop_64to32, mkexpr(op2addr)),
+                          mkU32(input_mask))));
+   return "srnm";
+}
+
+static const HChar *
+s390_irgen_SRNMB(IRTemp op2addr)
+{
+   UInt input_mask, fpc_mask;
+
+   input_mask = 7;
+   fpc_mask = 7;
+
+   put_fpc_w0(binop(Iop_Or32,
+                    binop(Iop_And32, get_fpc_w0(), mkU32(~fpc_mask)),
+                    binop(Iop_And32, unop(Iop_64to32, mkexpr(op2addr)),
+                          mkU32(input_mask))));
+   return "srnmb";
+}
+
+static void
+s390_irgen_srnmb_wrapper(UChar b2, UShort d2)
+{
+   if (b2 == 0) {  /* This is the typical case */
+      if (d2 > 3) {
+         if (s390_host_has_fpext && d2 == 7) {
+            /* ok */
+         } else {
+            emulation_warning(EmWarn_S390X_invalid_rounding);
+            d2 = S390_FPC_BFP_ROUND_NEAREST_EVEN;
+         }
+      }
+   }
+
+   s390_format_S_RD(s390_irgen_SRNMB, b2, d2);
+}
+
+/* Wrapper to validate the parameter as in SRNMB is not required, as all
+   the 8 values in op2addr[61:63] correspond to a valid DFP rounding mode */
+static const HChar *
+s390_irgen_SRNMT(IRTemp op2addr)
+{
+   UInt input_mask, fpc_mask;
+
+   input_mask = 7;
+   fpc_mask = 0x70;
+
+   /* fpc[25:27] <- op2addr[61:63]
+      fpc = (fpc & ~(0x70)) | ((op2addr & 7) << 4) */
+   put_fpc_w0(binop(Iop_Or32, binop(Iop_And32, get_fpc_w0(), mkU32(~fpc_mask)),
+                    binop(Iop_Shl32, binop(Iop_And32,
+                                           unop(Iop_64to32, mkexpr(op2addr)),
+                                           mkU32(input_mask)), mkU8(4))));
+   return "srnmt";
+}
+
+
+static const HChar *
+s390_irgen_SFPC(UChar r1)
+{
+   put_fpc_w0(get_gpr_w1(r1));
+
+   return "sfpc";
+}
+
+static const HChar *
+s390_irgen_STE(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_fpr_w0(r1));
+
+   return "ste";
+}
+
+static const HChar *
+s390_irgen_STD(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_fpr_dw0(r1));
+
+   return "std";
+}
+
+static const HChar *
+s390_irgen_STEY(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_fpr_w0(r1));
+
+   return "stey";
+}
+
+static const HChar *
+s390_irgen_STDY(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_fpr_dw0(r1));
+
+   return "stdy";
+}
+
+static const HChar *
+s390_irgen_STFPC(IRTemp op2addr)
+{
+   store(mkexpr(op2addr), get_fpc_w0());
+
+   return "stfpc";
+}
+
+static const HChar *
+s390_irgen_AEBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, get_fpr_w0(r2));
+   assign(result, triop(Iop_AddF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "aebr";
+}
+
+static const HChar *
+s390_irgen_ADBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, get_fpr_dw0(r2));
+   assign(result, triop(Iop_AddF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "adbr";
+}
+
+static const HChar *
+s390_irgen_AEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, load(Ity_F32, mkexpr(op2addr)));
+   assign(result, triop(Iop_AddF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "aeb";
+}
+
+static const HChar *
+s390_irgen_ADB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, load(Ity_F64, mkexpr(op2addr)));
+   assign(result, triop(Iop_AddF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "adb";
+}
+
+static const HChar *
+s390_irgen_CEFBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext && m3 != S390_BFP_ROUND_PER_FPC) {
+      emulation_warning(EmWarn_S390X_fpext_rounding);
+      m3 = S390_BFP_ROUND_PER_FPC;
+   }
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   put_fpr_w0(r1, binop(Iop_I32StoF32, mkexpr(encode_bfp_rounding_mode(m3)),
+                        mkexpr(op2)));
+
+   return "cefbr";
+}
+
+static const HChar *
+s390_irgen_CDFBR(UChar m3 __attribute__((unused)),
+                 UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   put_fpr_dw0(r1, unop(Iop_I32StoF64, mkexpr(op2)));
+
+   return "cdfbr";
+}
+
+static const HChar *
+s390_irgen_CEGBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext && m3 != S390_BFP_ROUND_PER_FPC) {
+      emulation_warning(EmWarn_S390X_fpext_rounding);
+      m3 = S390_BFP_ROUND_PER_FPC;
+   }
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   put_fpr_w0(r1, binop(Iop_I64StoF32, mkexpr(encode_bfp_rounding_mode(m3)),
+                        mkexpr(op2)));
+
+   return "cegbr";
+}
+
+static const HChar *
+s390_irgen_CDGBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext && m3 != S390_BFP_ROUND_PER_FPC) {
+      emulation_warning(EmWarn_S390X_fpext_rounding);
+      m3 = S390_BFP_ROUND_PER_FPC;
+   }
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   put_fpr_dw0(r1, binop(Iop_I64StoF64, mkexpr(encode_bfp_rounding_mode(m3)),
+                         mkexpr(op2)));
+
+   return "cdgbr";
+}
+
+static const HChar *
+s390_irgen_CELFBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op2 = newTemp(Ity_I32);
+
+      assign(op2, get_gpr_w1(r2));
+      put_fpr_w0(r1, binop(Iop_I32UtoF32, mkexpr(encode_bfp_rounding_mode(m3)),
+                           mkexpr(op2)));
+   }
+   return "celfbr";
+}
+
+static const HChar *
+s390_irgen_CDLFBR(UChar m3 __attribute__((unused)),
+                  UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op2 = newTemp(Ity_I32);
+
+      assign(op2, get_gpr_w1(r2));
+      put_fpr_dw0(r1, unop(Iop_I32UtoF64, mkexpr(op2)));
+   }
+   return "cdlfbr";
+}
+
+static const HChar *
+s390_irgen_CELGBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op2 = newTemp(Ity_I64);
+
+      assign(op2, get_gpr_dw0(r2));
+      put_fpr_w0(r1, binop(Iop_I64UtoF32, mkexpr(encode_bfp_rounding_mode(m3)),
+                           mkexpr(op2)));
+   }
+   return "celgbr";
+}
+
+static const HChar *
+s390_irgen_CDLGBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op2 = newTemp(Ity_I64);
+
+      assign(op2, get_gpr_dw0(r2));
+      put_fpr_dw0(r1, binop(Iop_I64UtoF64,
+                            mkexpr(encode_bfp_rounding_mode(m3)),
+                            mkexpr(op2)));
+   }
+   return "cdlgbr";
+}
+
+static const HChar *
+s390_irgen_CLFEBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op = newTemp(Ity_F32);
+      IRTemp result = newTemp(Ity_I32);
+      IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+      assign(op, get_fpr_w0(r2));
+      assign(result, binop(Iop_F32toI32U, mkexpr(rounding_mode),
+                           mkexpr(op)));
+      put_gpr_w1(r1, mkexpr(result));
+      s390_cc_thunk_putFZ(S390_CC_OP_BFP_32_TO_UINT_32, op, rounding_mode);
+   }
+   return "clfebr";
+}
+
+static const HChar *
+s390_irgen_CLFDBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op = newTemp(Ity_F64);
+      IRTemp result = newTemp(Ity_I32);
+      IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+      assign(op, get_fpr_dw0(r2));
+      assign(result, binop(Iop_F64toI32U, mkexpr(rounding_mode),
+                           mkexpr(op)));
+      put_gpr_w1(r1, mkexpr(result));
+      s390_cc_thunk_putFZ(S390_CC_OP_BFP_64_TO_UINT_32, op, rounding_mode);
+   }
+   return "clfdbr";
+}
+
+static const HChar *
+s390_irgen_CLGEBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op = newTemp(Ity_F32);
+      IRTemp result = newTemp(Ity_I64);
+      IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+      assign(op, get_fpr_w0(r2));
+      assign(result, binop(Iop_F32toI64U, mkexpr(rounding_mode),
+                           mkexpr(op)));
+      put_gpr_dw0(r1, mkexpr(result));
+      s390_cc_thunk_putFZ(S390_CC_OP_BFP_32_TO_UINT_64, op, rounding_mode);
+   }
+   return "clgebr";
+}
+
+static const HChar *
+s390_irgen_CLGDBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op = newTemp(Ity_F64);
+      IRTemp result = newTemp(Ity_I64);
+      IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+      assign(op, get_fpr_dw0(r2));
+      assign(result, binop(Iop_F64toI64U, mkexpr(rounding_mode),
+                           mkexpr(op)));
+      put_gpr_dw0(r1, mkexpr(result));
+      s390_cc_thunk_putFZ(S390_CC_OP_BFP_64_TO_UINT_64, op, rounding_mode);
+   }
+   return "clgdbr";
+}
+
+static const HChar *
+s390_irgen_CFEBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+   assign(op, get_fpr_w0(r2));
+   assign(result, binop(Iop_F32toI32S, mkexpr(rounding_mode),
+          mkexpr(op)));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putFZ(S390_CC_OP_BFP_32_TO_INT_32, op, rounding_mode);
+
+   return "cfebr";
+}
+
+static const HChar *
+s390_irgen_CFDBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+   assign(op, get_fpr_dw0(r2));
+   assign(result, binop(Iop_F64toI32S, mkexpr(rounding_mode),
+          mkexpr(op)));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_putFZ(S390_CC_OP_BFP_64_TO_INT_32, op, rounding_mode);
+
+   return "cfdbr";
+}
+
+static const HChar *
+s390_irgen_CGEBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+   assign(op, get_fpr_w0(r2));
+   assign(result, binop(Iop_F32toI64S, mkexpr(rounding_mode),
+          mkexpr(op)));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putFZ(S390_CC_OP_BFP_32_TO_INT_64, op, rounding_mode);
+
+   return "cgebr";
+}
+
+static const HChar *
+s390_irgen_CGDBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+   assign(op, get_fpr_dw0(r2));
+   assign(result, binop(Iop_F64toI64S, mkexpr(rounding_mode),
+          mkexpr(op)));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putFZ(S390_CC_OP_BFP_64_TO_INT_64, op, rounding_mode);
+
+   return "cgdbr";
+}
+
+static const HChar *
+s390_irgen_DEBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, get_fpr_w0(r2));
+   assign(result, triop(Iop_DivF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "debr";
+}
+
+static const HChar *
+s390_irgen_DDBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, get_fpr_dw0(r2));
+   assign(result, triop(Iop_DivF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "ddbr";
+}
+
+static const HChar *
+s390_irgen_DEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, load(Ity_F32, mkexpr(op2addr)));
+   assign(result, triop(Iop_DivF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "deb";
+}
+
+static const HChar *
+s390_irgen_DDB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, load(Ity_F64, mkexpr(op2addr)));
+   assign(result, triop(Iop_DivF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "ddb";
+}
+
+static const HChar *
+s390_irgen_LTEBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F32);
+
+   assign(result, get_fpr_w0(r2));
+   put_fpr_w0(r1, mkexpr(result));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+
+   return "ltebr";
+}
+
+static const HChar *
+s390_irgen_LTDBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, get_fpr_dw0(r2));
+   put_fpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+
+   return "ltdbr";
+}
+
+static const HChar *
+s390_irgen_LCEBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F32);
+
+   assign(result, unop(Iop_NegF32, get_fpr_w0(r2)));
+   put_fpr_w0(r1, mkexpr(result));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+
+   return "lcebr";
+}
+
+static const HChar *
+s390_irgen_LCDBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, unop(Iop_NegF64, get_fpr_dw0(r2)));
+   put_fpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+
+   return "lcdbr";
+}
+
+static const HChar *
+s390_irgen_LDEBR(UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F32);
+
+   assign(op, get_fpr_w0(r2));
+   put_fpr_dw0(r1, unop(Iop_F32toF64, mkexpr(op)));
+
+   return "ldebr";
+}
+
+static const HChar *
+s390_irgen_LDEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_F32);
+
+   assign(op, load(Ity_F32, mkexpr(op2addr)));
+   put_fpr_dw0(r1, unop(Iop_F32toF64, mkexpr(op)));
+
+   return "ldeb";
+}
+
+static const HChar *
+s390_irgen_LEDBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext && m3 != S390_BFP_ROUND_PER_FPC) {
+      emulation_warning(EmWarn_S390X_fpext_rounding);
+      m3 = S390_BFP_ROUND_PER_FPC;
+   }
+   IRTemp op = newTemp(Ity_F64);
+
+   assign(op, get_fpr_dw0(r2));
+   put_fpr_w0(r1, binop(Iop_F64toF32, mkexpr(encode_bfp_rounding_mode(m3)),
+                        mkexpr(op)));
+
+   return "ledbr";
+}
+
+static const HChar *
+s390_irgen_MEEBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRRoundingMode rounding_mode =
+      encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, get_fpr_w0(r2));
+   assign(result, triop(Iop_MulF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "meebr";
+}
+
+static const HChar *
+s390_irgen_MDBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, get_fpr_dw0(r2));
+   assign(result, triop(Iop_MulF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "mdbr";
+}
+
+static const HChar *
+s390_irgen_MEEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, load(Ity_F32, mkexpr(op2addr)));
+   assign(result, triop(Iop_MulF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "meeb";
+}
+
+static const HChar *
+s390_irgen_MDB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, load(Ity_F64, mkexpr(op2addr)));
+   assign(result, triop(Iop_MulF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "mdb";
+}
+
+static const HChar *
+s390_irgen_SEBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, get_fpr_w0(r2));
+   assign(result, triop(Iop_SubF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "sebr";
+}
+
+static const HChar *
+s390_irgen_SDBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, get_fpr_dw0(r2));
+   assign(result, triop(Iop_SubF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "sdbr";
+}
+
+static const HChar *
+s390_irgen_SEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, load(Ity_F32, mkexpr(op2addr)));
+   assign(result, triop(Iop_SubF32, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_32, result);
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "seb";
+}
+
+static const HChar *
+s390_irgen_SDB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, load(Ity_F64, mkexpr(op2addr)));
+   assign(result, triop(Iop_SubF64, mkexpr(rounding_mode), mkexpr(op1),
+          mkexpr(op2)));
+   s390_cc_thunk_putF(S390_CC_OP_BFP_RESULT_64, result);
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "sdb";
+}
+
+static const HChar *
+s390_irgen_ADTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D64);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp result = newTemp(Ity_D64);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_dw0(r2));
+      assign(op2, get_dpr_dw0(r3));
+      assign(result, triop(Iop_AddD64, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      s390_cc_thunk_putF(S390_CC_OP_DFP_RESULT_64, result);
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return (m4 == 0) ? "adtr" : "adtra";
+}
+
+static const HChar *
+s390_irgen_AXTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D128);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp result = newTemp(Ity_D128);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_pair(r2));
+      assign(op2, get_dpr_pair(r3));
+      assign(result, triop(Iop_AddD128, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_pair(r1, mkexpr(result));
+
+      s390_cc_thunk_put1d128(S390_CC_OP_DFP_RESULT_128, result);
+   }
+   return (m4 == 0) ? "axtr" : "axtra";
+}
+
+static const HChar *
+s390_irgen_CDTR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_D64);
+   IRTemp op2 = newTemp(Ity_D64);
+   IRTemp cc_vex  = newTemp(Ity_I32);
+   IRTemp cc_s390 = newTemp(Ity_I32);
+
+   assign(op1, get_dpr_dw0(r1));
+   assign(op2, get_dpr_dw0(r2));
+   assign(cc_vex, binop(Iop_CmpD64, mkexpr(op1), mkexpr(op2)));
+
+   assign(cc_s390, convert_vex_dfpcc_to_s390(cc_vex));
+   s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+   return "cdtr";
+}
+
+static const HChar *
+s390_irgen_CXTR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_D128);
+   IRTemp op2 = newTemp(Ity_D128);
+   IRTemp cc_vex  = newTemp(Ity_I32);
+   IRTemp cc_s390 = newTemp(Ity_I32);
+
+   assign(op1, get_dpr_pair(r1));
+   assign(op2, get_dpr_pair(r2));
+   assign(cc_vex, binop(Iop_CmpD128, mkexpr(op1), mkexpr(op2)));
+
+   assign(cc_s390, convert_vex_dfpcc_to_s390(cc_vex));
+   s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+   return "cxtr";
+}
+
+static const HChar *
+s390_irgen_CDFTR(UChar m3 __attribute__((unused)),
+                 UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op2 = newTemp(Ity_I32);
+
+         assign(op2, get_gpr_w1(r2));
+         put_dpr_dw0(r1, unop(Iop_I32StoD64, mkexpr(op2)));
+      }
+   }
+   return "cdftr";
+}
+
+static const HChar *
+s390_irgen_CXFTR(UChar m3 __attribute__((unused)),
+                 UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op2 = newTemp(Ity_I32);
+
+         assign(op2, get_gpr_w1(r2));
+         put_dpr_pair(r1, unop(Iop_I32StoD128, mkexpr(op2)));
+      }
+   }
+   return "cxftr";
+}
+
+static const HChar *
+s390_irgen_CDGTRA(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op2 = newTemp(Ity_I64);
+
+      if (! s390_host_has_fpext && m3 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m3 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      assign(op2, get_gpr_dw0(r2));
+      put_dpr_dw0(r1, binop(Iop_I64StoD64, mkexpr(encode_dfp_rounding_mode(m3)),
+                            mkexpr(op2)));
+   }
+   return (m3 == 0) ? "cdgtr" : "cdgtra";
+}
+
+static const HChar *
+s390_irgen_CXGTR(UChar m3 __attribute__((unused)),
+                 UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op2 = newTemp(Ity_I64);
+
+      /* No emulation warning here about an non-zero m3 on hosts without
+         floating point extension facility. No rounding is performed */
+
+      assign(op2, get_gpr_dw0(r2));
+      put_dpr_pair(r1, unop(Iop_I64StoD128, mkexpr(op2)));
+   }
+   return "cxgtr";
+}
+
+static const HChar *
+s390_irgen_CDLFTR(UChar m3 __attribute__((unused)),
+                  UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op2 = newTemp(Ity_I32);
+
+         assign(op2, get_gpr_w1(r2));
+         put_dpr_dw0(r1, unop(Iop_I32UtoD64, mkexpr(op2)));
+      }
+   }
+   return "cdlftr";
+}
+
+static const HChar *
+s390_irgen_CXLFTR(UChar m3 __attribute__((unused)),
+                  UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op2 = newTemp(Ity_I32);
+
+         assign(op2, get_gpr_w1(r2));
+         put_dpr_pair(r1, unop(Iop_I32UtoD128, mkexpr(op2)));
+      }
+   }
+   return "cxlftr";
+}
+
+static const HChar *
+s390_irgen_CDLGTR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op2 = newTemp(Ity_I64);
+
+         assign(op2, get_gpr_dw0(r2));
+         put_dpr_dw0(r1, binop(Iop_I64UtoD64,
+                               mkexpr(encode_dfp_rounding_mode(m3)),
+                               mkexpr(op2)));
+      }
+   }
+   return "cdlgtr";
+}
+
+static const HChar *
+s390_irgen_CXLGTR(UChar m3 __attribute__((unused)),
+                  UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op2 = newTemp(Ity_I64);
+
+         assign(op2, get_gpr_dw0(r2));
+         put_dpr_pair(r1, unop(Iop_I64UtoD128, mkexpr(op2)));
+      }
+   }
+   return "cxlgtr";
+}
+
+static const HChar *
+s390_irgen_CFDTR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op = newTemp(Ity_D64);
+         IRTemp result = newTemp(Ity_I32);
+         IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+         assign(op, get_dpr_dw0(r2));
+         assign(result, binop(Iop_D64toI32S, mkexpr(rounding_mode),
+                              mkexpr(op)));
+         put_gpr_w1(r1, mkexpr(result));
+         s390_cc_thunk_putFZ(S390_CC_OP_DFP_64_TO_INT_32, op, rounding_mode);
+      }
+   }
+   return "cfdtr";
+}
+
+static const HChar *
+s390_irgen_CFXTR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op = newTemp(Ity_D128);
+         IRTemp result = newTemp(Ity_I32);
+         IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+         assign(op, get_dpr_pair(r2));
+         assign(result, binop(Iop_D128toI32S, mkexpr(rounding_mode),
+                              mkexpr(op)));
+         put_gpr_w1(r1, mkexpr(result));
+         s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_128_TO_INT_32, op,
+                                 rounding_mode);
+      }
+   }
+   return "cfxtr";
+}
+
+static const HChar *
+s390_irgen_CGDTR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op = newTemp(Ity_D64);
+      IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+      /* If fpext is not installed and m3 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m3 > 0 && m3 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m3 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      assign(op, get_dpr_dw0(r2));
+      put_gpr_dw0(r1, binop(Iop_D64toI64S, mkexpr(rounding_mode), mkexpr(op)));
+      s390_cc_thunk_putFZ(S390_CC_OP_DFP_64_TO_INT_64, op, rounding_mode);
+   }
+   return "cgdtr";
+}
+
+static const HChar *
+s390_irgen_CGXTR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op = newTemp(Ity_D128);
+      IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+      /* If fpext is not installed and m3 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m3 > 0 && m3 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m3 = S390_DFP_ROUND_PER_FPC_0;
+      }
+      assign(op, get_dpr_pair(r2));
+      put_gpr_dw0(r1, binop(Iop_D128toI64S, mkexpr(rounding_mode), mkexpr(op)));
+      s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_128_TO_INT_64, op, rounding_mode);
+   }
+   return "cgxtr";
+}
+
+static const HChar *
+s390_irgen_CEDTR(UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D64);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp cc_vex  = newTemp(Ity_I32);
+      IRTemp cc_s390 = newTemp(Ity_I32);
+
+      assign(op1, get_dpr_dw0(r1));
+      assign(op2, get_dpr_dw0(r2));
+      assign(cc_vex, binop(Iop_CmpExpD64, mkexpr(op1), mkexpr(op2)));
+
+      assign(cc_s390, convert_vex_dfpcc_to_s390(cc_vex));
+      s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+   }
+   return "cedtr";
+}
+
+static const HChar *
+s390_irgen_CEXTR(UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D128);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp cc_vex  = newTemp(Ity_I32);
+      IRTemp cc_s390 = newTemp(Ity_I32);
+
+      assign(op1, get_dpr_pair(r1));
+      assign(op2, get_dpr_pair(r2));
+      assign(cc_vex, binop(Iop_CmpExpD128, mkexpr(op1), mkexpr(op2)));
+
+      assign(cc_s390, convert_vex_dfpcc_to_s390(cc_vex));
+      s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+   }
+   return "cextr";
+}
+
+static const HChar *
+s390_irgen_CLFDTR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op = newTemp(Ity_D64);
+         IRTemp result = newTemp(Ity_I32);
+         IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+         assign(op, get_dpr_dw0(r2));
+         assign(result, binop(Iop_D64toI32U, mkexpr(rounding_mode),
+                              mkexpr(op)));
+         put_gpr_w1(r1, mkexpr(result));
+         s390_cc_thunk_putFZ(S390_CC_OP_DFP_64_TO_UINT_32, op, rounding_mode);
+      }
+   }
+   return "clfdtr";
+}
+
+static const HChar *
+s390_irgen_CLFXTR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op = newTemp(Ity_D128);
+         IRTemp result = newTemp(Ity_I32);
+         IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+         assign(op, get_dpr_pair(r2));
+         assign(result, binop(Iop_D128toI32U, mkexpr(rounding_mode),
+                              mkexpr(op)));
+         put_gpr_w1(r1, mkexpr(result));
+         s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_128_TO_UINT_32, op,
+                                 rounding_mode);
+      }
+   }
+   return "clfxtr";
+}
+
+static const HChar *
+s390_irgen_CLGDTR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op = newTemp(Ity_D64);
+         IRTemp result = newTemp(Ity_I64);
+         IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+         assign(op, get_dpr_dw0(r2));
+         assign(result, binop(Iop_D64toI64U, mkexpr(rounding_mode),
+                              mkexpr(op)));
+         put_gpr_dw0(r1, mkexpr(result));
+         s390_cc_thunk_putFZ(S390_CC_OP_DFP_64_TO_UINT_64, op, rounding_mode);
+      }
+   }
+   return "clgdtr";
+}
+
+static const HChar *
+s390_irgen_CLGXTR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      if (! s390_host_has_fpext) {
+         emulation_failure(EmFail_S390X_fpext);
+      } else {
+         IRTemp op = newTemp(Ity_D128);
+         IRTemp result = newTemp(Ity_I64);
+         IRTemp rounding_mode = encode_dfp_rounding_mode(m3);
+
+         assign(op, get_dpr_pair(r2));
+         assign(result, binop(Iop_D128toI64U, mkexpr(rounding_mode),
+                              mkexpr(op)));
+         put_gpr_dw0(r1, mkexpr(result));
+         s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_128_TO_UINT_64, op,
+                                 rounding_mode);
+      }
+   }
+   return "clgxtr";
+}
+
+static const HChar *
+s390_irgen_DDTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D64);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp result = newTemp(Ity_D64);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_dw0(r2));
+      assign(op2, get_dpr_dw0(r3));
+      assign(result, triop(Iop_DivD64, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return (m4 == 0) ? "ddtr" : "ddtra";
+}
+
+static const HChar *
+s390_irgen_DXTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D128);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp result = newTemp(Ity_D128);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_pair(r2));
+      assign(op2, get_dpr_pair(r3));
+      assign(result, triop(Iop_DivD128, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_pair(r1, mkexpr(result));
+   }
+   return (m4 == 0) ? "dxtr" : "dxtra";
+}
+
+static const HChar *
+s390_irgen_EEDTR(UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      put_gpr_dw0(r1, unop(Iop_ExtractExpD64, get_dpr_dw0(r2)));
+   }
+   return "eedtr";
+}
+
+static const HChar *
+s390_irgen_EEXTR(UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      put_gpr_dw0(r1, unop(Iop_ExtractExpD128, get_dpr_pair(r2)));
+   }
+   return "eextr";
+}
+
+static const HChar *
+s390_irgen_ESDTR(UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      put_gpr_dw0(r1, unop(Iop_ExtractSigD64, get_dpr_dw0(r2)));
+   }
+   return "esdtr";
+}
+
+static const HChar *
+s390_irgen_ESXTR(UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      put_gpr_dw0(r1, unop(Iop_ExtractSigD128, get_dpr_pair(r2)));
+   }
+   return "esxtr";
+}
+
+static const HChar *
+s390_irgen_IEDTR(UChar r3, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_I64);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp result = newTemp(Ity_D64);
+
+      assign(op1, get_gpr_dw0(r2));
+      assign(op2, get_dpr_dw0(r3));
+      assign(result, binop(Iop_InsertExpD64, mkexpr(op1), mkexpr(op2)));
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return "iedtr";
+}
+
+static const HChar *
+s390_irgen_IEXTR(UChar r3, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_I64);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp result = newTemp(Ity_D128);
+
+      assign(op1, get_gpr_dw0(r2));
+      assign(op2, get_dpr_pair(r3));
+      assign(result, binop(Iop_InsertExpD128, mkexpr(op1), mkexpr(op2)));
+      put_dpr_pair(r1, mkexpr(result));
+   }
+   return "iextr";
+}
+
+static const HChar *
+s390_irgen_LDETR(UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op = newTemp(Ity_D32);
+
+      assign(op, get_dpr_w0(r2));
+      put_dpr_dw0(r1, unop(Iop_D32toD64, mkexpr(op)));
+   }
+   return "ldetr";
+}
+
+static const HChar *
+s390_irgen_LXDTR(UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_D64);
+
+   assign(op, get_dpr_dw0(r2));
+   put_dpr_pair(r1, unop(Iop_D64toD128, mkexpr(op)));
+
+   return "lxdtr";
+}
+
+static const HChar *
+s390_irgen_LDXTR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      /* If fpext is not installed and m3 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m3 > 0 && m3 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m3 = S390_DFP_ROUND_PER_FPC_0;
+      }
+      IRTemp result = newTemp(Ity_D64);
+
+      assign(result, binop(Iop_D128toD64, mkexpr(encode_dfp_rounding_mode(m3)),
+                           get_dpr_pair(r2)));
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return "ldxtr";
+}
+
+static const HChar *
+s390_irgen_LEDTR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      /* If fpext is not installed and m3 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m3 > 0 && m3 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m3 = S390_DFP_ROUND_PER_FPC_0;
+      }
+      IRTemp op = newTemp(Ity_D64);
+
+      assign(op, get_dpr_dw0(r2));
+      put_dpr_w0(r1, binop(Iop_D64toD32, mkexpr(encode_dfp_rounding_mode(m3)),
+                           mkexpr(op)));
+   }
+   return "ledtr";
+}
+
+static const HChar *
+s390_irgen_LTDTR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_D64);
+
+   assign(result, get_dpr_dw0(r2));
+   put_dpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_putF(S390_CC_OP_DFP_RESULT_64, result);
+
+   return "ltdtr";
+}
+
+static const HChar *
+s390_irgen_LTXTR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_D128);
+
+   assign(result, get_dpr_pair(r2));
+   put_dpr_pair(r1, mkexpr(result));
+   s390_cc_thunk_put1d128(S390_CC_OP_DFP_RESULT_128, result);
+
+   return "ltxtr";
+}
+
+static const HChar *
+s390_irgen_MDTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D64);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp result = newTemp(Ity_D64);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_dw0(r2));
+      assign(op2, get_dpr_dw0(r3));
+      assign(result, triop(Iop_MulD64, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return (m4 == 0) ? "mdtr" : "mdtra";
+}
+
+static const HChar *
+s390_irgen_MXTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D128);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp result = newTemp(Ity_D128);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_pair(r2));
+      assign(op2, get_dpr_pair(r3));
+      assign(result, triop(Iop_MulD128, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_pair(r1, mkexpr(result));
+   }
+   return (m4 == 0) ? "mxtr" : "mxtra";
+}
+
+static const HChar *
+s390_irgen_QADTR(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D64);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp result = newTemp(Ity_D64);
+      IRTemp rounding_mode;
+
+      /* If fpext is not installed and m4 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m4 > 0 && m4 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_dw0(r2));
+      assign(op2, get_dpr_dw0(r3));
+      assign(result, triop(Iop_QuantizeD64, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return "qadtr";
+}
+
+static const HChar *
+s390_irgen_QAXTR(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D128);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp result = newTemp(Ity_D128);
+      IRTemp rounding_mode;
+
+      /* If fpext is not installed and m4 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m4 > 0 && m4 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_pair(r2));
+      assign(op2, get_dpr_pair(r3));
+      assign(result, triop(Iop_QuantizeD128, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_pair(r1, mkexpr(result));
+   }
+   return "qaxtr";
+}
+
+static const HChar *
+s390_irgen_RRDTR(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_I8);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp result = newTemp(Ity_D64);
+      IRTemp rounding_mode;
+
+      /* If fpext is not installed and m4 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m4 > 0 && m4 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_gpr_b7(r2));
+      assign(op2, get_dpr_dw0(r3));
+      assign(result, triop(Iop_SignificanceRoundD64, mkexpr(rounding_mode),
+                           mkexpr(op1), mkexpr(op2)));
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return "rrdtr";
+}
+
+static const HChar *
+s390_irgen_RRXTR(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_I8);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp result = newTemp(Ity_D128);
+      IRTemp rounding_mode;
+
+      /* If fpext is not installed and m4 is in 1:7,
+         rounding mode performed is unpredictable */
+      if (! s390_host_has_fpext && m4 > 0 && m4 < 8) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_gpr_b7(r2));
+      assign(op2, get_dpr_pair(r3));
+      assign(result, triop(Iop_SignificanceRoundD128, mkexpr(rounding_mode),
+                           mkexpr(op1), mkexpr(op2)));
+      put_dpr_pair(r1, mkexpr(result));
+   }
+   return "rrxtr";
+}
+
+static const HChar *
+s390_irgen_SDTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D64);
+      IRTemp op2 = newTemp(Ity_D64);
+      IRTemp result = newTemp(Ity_D64);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_dw0(r2));
+      assign(op2, get_dpr_dw0(r3));
+      assign(result, triop(Iop_SubD64, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      s390_cc_thunk_putF(S390_CC_OP_DFP_RESULT_64, result);
+      put_dpr_dw0(r1, mkexpr(result));
+   }
+   return (m4 == 0) ? "sdtr" : "sdtra";
+}
+
+static const HChar *
+s390_irgen_SXTRA(UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op1 = newTemp(Ity_D128);
+      IRTemp op2 = newTemp(Ity_D128);
+      IRTemp result = newTemp(Ity_D128);
+      IRTemp rounding_mode;
+
+      if (! s390_host_has_fpext && m4 != S390_DFP_ROUND_PER_FPC_0) {
+         emulation_warning(EmWarn_S390X_fpext_rounding);
+         m4 = S390_DFP_ROUND_PER_FPC_0;
+      }
+
+      rounding_mode = encode_dfp_rounding_mode(m4);
+      assign(op1, get_dpr_pair(r2));
+      assign(op2, get_dpr_pair(r3));
+      assign(result, triop(Iop_SubD128, mkexpr(rounding_mode), mkexpr(op1),
+                           mkexpr(op2)));
+      put_dpr_pair(r1, mkexpr(result));
+
+      s390_cc_thunk_put1d128(S390_CC_OP_DFP_RESULT_128, result);
+   }
+   return (m4 == 0) ? "sxtr" : "sxtra";
+}
+
+static const HChar *
+s390_irgen_SLDT(UChar r3, IRTemp op2addr, UChar r1)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op = newTemp(Ity_D64);
+
+      assign(op, get_dpr_dw0(r3));
+      put_dpr_dw0(r1, binop(Iop_ShlD64, mkexpr(op),
+                            unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr),
+                                                  mkU64(63)))));
+   }
+   return "sldt";
+}
+
+static const HChar *
+s390_irgen_SLXT(UChar r3, IRTemp op2addr, UChar r1)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op = newTemp(Ity_D128);
+
+      assign(op, get_dpr_pair(r3));
+      put_dpr_pair(r1, binop(Iop_ShlD128, mkexpr(op),
+                             unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr),
+                                                   mkU64(63)))));
+   }
+   return "slxt";
+}
+
+static const HChar *
+s390_irgen_SRDT(UChar r3, IRTemp op2addr, UChar r1)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op = newTemp(Ity_D64);
+
+      assign(op, get_dpr_dw0(r3));
+      put_dpr_dw0(r1, binop(Iop_ShrD64, mkexpr(op),
+                            unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr),
+                                                  mkU64(63)))));
+   }
+   return "srdt";
+}
+
+static const HChar *
+s390_irgen_SRXT(UChar r3, IRTemp op2addr, UChar r1)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp op = newTemp(Ity_D128);
+
+      assign(op, get_dpr_pair(r3));
+      put_dpr_pair(r1, binop(Iop_ShrD128, mkexpr(op),
+                             unop(Iop_64to8, binop(Iop_And64, mkexpr(op2addr),
+                                                   mkU64(63)))));
+   }
+   return "srxt";
+}
+
+static const HChar *
+s390_irgen_TDCET(UChar r1, IRTemp op2addr)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp value = newTemp(Ity_D32);
+
+      assign(value, get_dpr_w0(r1));
+
+      s390_cc_thunk_putFZ(S390_CC_OP_DFP_TDC_32, value, op2addr);
+   }
+   return "tdcet";
+}
+
+static const HChar *
+s390_irgen_TDCDT(UChar r1, IRTemp op2addr)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp value = newTemp(Ity_D64);
+
+      assign(value, get_dpr_dw0(r1));
+
+      s390_cc_thunk_putFZ(S390_CC_OP_DFP_TDC_64, value, op2addr);
+   }
+   return "tdcdt";
+}
+
+static const HChar *
+s390_irgen_TDCXT(UChar r1, IRTemp op2addr)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp value = newTemp(Ity_D128);
+
+      assign(value, get_dpr_pair(r1));
+
+      s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_TDC_128, value, op2addr);
+   }
+   return "tdcxt";
+}
+
+static const HChar *
+s390_irgen_TDGET(UChar r1, IRTemp op2addr)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp value = newTemp(Ity_D32);
+
+      assign(value, get_dpr_w0(r1));
+
+      s390_cc_thunk_putFZ(S390_CC_OP_DFP_TDG_32, value, op2addr);
+   }
+   return "tdget";
+}
+
+static const HChar *
+s390_irgen_TDGDT(UChar r1, IRTemp op2addr)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp value = newTemp(Ity_D64);
+
+      assign(value, get_dpr_dw0(r1));
+
+      s390_cc_thunk_putFZ(S390_CC_OP_DFP_TDG_64, value, op2addr);
+   }
+   return "tdgdt";
+}
+
+static const HChar *
+s390_irgen_TDGXT(UChar r1, IRTemp op2addr)
+{
+   if (! s390_host_has_dfp) {
+      emulation_failure(EmFail_S390X_DFP_insn);
+   } else {
+      IRTemp value = newTemp(Ity_D128);
+
+      assign(value, get_dpr_pair(r1));
+
+      s390_cc_thunk_put1d128Z(S390_CC_OP_DFP_TDG_128, value, op2addr);
+   }
+   return "tdgxt";
+}
+
+static const HChar *
+s390_irgen_CLC(UChar length, IRTemp start1, IRTemp start2)
+{
+   IRTemp len = newTemp(Ity_I64);
+
+   assign(len, mkU64(length));
+   s390_irgen_CLC_EX(len, start1, start2);
+
+   return "clc";
+}
+
+static const HChar *
+s390_irgen_CLCL(UChar r1, UChar r2)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp addr1_load = newTemp(Ity_I64);
+   IRTemp addr2_load = newTemp(Ity_I64);
+   IRTemp len1 = newTemp(Ity_I32);
+   IRTemp len2 = newTemp(Ity_I32);
+   IRTemp r1p1 = newTemp(Ity_I32);   /* contents of r1 + 1 */
+   IRTemp r2p1 = newTemp(Ity_I32);   /* contents of r2 + 1 */
+   IRTemp single1 = newTemp(Ity_I8);
+   IRTemp single2 = newTemp(Ity_I8);
+   IRTemp pad = newTemp(Ity_I8);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(r1p1, get_gpr_w1(r1 + 1));
+   assign(len1, binop(Iop_And32, mkexpr(r1p1), mkU32(0x00ffffff)));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(r2p1, get_gpr_w1(r2 + 1));
+   assign(len2, binop(Iop_And32, mkexpr(r2p1), mkU32(0x00ffffff)));
+   assign(pad, get_gpr_b4(r2 + 1));
+
+   /* len1 == 0 and len2 == 0? Exit */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpEQ32, binop(Iop_Or32, mkexpr(len1),
+                                         mkexpr(len2)), mkU32(0)));
+
+   /* Because mkite evaluates both the then-clause and the else-clause
+      we cannot load directly from addr1 here. If len1 is 0, then adddr1
+      may be NULL and loading from there would segfault. So we provide a
+      valid dummy address in that case. Loading from there does no harm and
+      the value will be discarded at runtime. */
+   assign(addr1_load,
+          mkite(binop(Iop_CmpEQ32, mkexpr(len1), mkU32(0)),
+                mkU64(guest_IA_curr_instr), mkexpr(addr1)));
+   assign(single1,
+          mkite(binop(Iop_CmpEQ32, mkexpr(len1), mkU32(0)),
+                mkexpr(pad), load(Ity_I8, mkexpr(addr1_load))));
+
+   assign(addr2_load,
+          mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                mkU64(guest_IA_curr_instr), mkexpr(addr2)));
+   assign(single2,
+          mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                mkexpr(pad), load(Ity_I8, mkexpr(addr2_load))));
+
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, single1, single2, False);
+   /* Fields differ ? */
+   next_insn_if(binop(Iop_CmpNE8, mkexpr(single1), mkexpr(single2)));
+
+   /* Update len1 and addr1, unless len1 == 0. */
+   put_gpr_dw0(r1,
+               mkite(binop(Iop_CmpEQ32, mkexpr(len1), mkU32(0)),
+                     mkexpr(addr1),
+                     binop(Iop_Add64, mkexpr(addr1), mkU64(1))));
+
+   /* When updating len1 we must not modify bits (r1+1)[0:39] */
+   put_gpr_w1(r1 + 1,
+              mkite(binop(Iop_CmpEQ32, mkexpr(len1), mkU32(0)),
+                    binop(Iop_And32, mkexpr(r1p1), mkU32(0xFF000000u)),
+                    binop(Iop_Sub32, mkexpr(r1p1), mkU32(1))));
+
+   /* Update len2 and addr2, unless len2 == 0. */
+   put_gpr_dw0(r2,
+               mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                     mkexpr(addr2),
+                     binop(Iop_Add64, mkexpr(addr2), mkU64(1))));
+
+   /* When updating len2 we must not modify bits (r2+1)[0:39] */
+   put_gpr_w1(r2 + 1,
+              mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                    binop(Iop_And32, mkexpr(r2p1), mkU32(0xFF000000u)),
+                    binop(Iop_Sub32, mkexpr(r2p1), mkU32(1))));
+
+   iterate();
+
+   return "clcl";
+}
+
+static const HChar *
+s390_irgen_CLCLE(UChar r1, UChar r3, IRTemp pad2)
+{
+   IRTemp addr1, addr3, addr1_load, addr3_load, len1, len3, single1, single3;
+
+   addr1 = newTemp(Ity_I64);
+   addr3 = newTemp(Ity_I64);
+   addr1_load = newTemp(Ity_I64);
+   addr3_load = newTemp(Ity_I64);
+   len1 = newTemp(Ity_I64);
+   len3 = newTemp(Ity_I64);
+   single1 = newTemp(Ity_I8);
+   single3 = newTemp(Ity_I8);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(len1, get_gpr_dw0(r1 + 1));
+   assign(addr3, get_gpr_dw0(r3));
+   assign(len3, get_gpr_dw0(r3 + 1));
+
+   /* len1 == 0 and len3 == 0? Exit */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpEQ64,binop(Iop_Or64, mkexpr(len1),
+                                        mkexpr(len3)), mkU64(0)));
+
+   /* A mux requires both ways to be possible. This is a way to prevent clcle
+      from reading from addr1 if it should read from the pad. Since the pad
+      has no address, just read from the instruction, we discard that anyway */
+   assign(addr1_load,
+          mkite(binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0)),
+                mkU64(guest_IA_curr_instr), mkexpr(addr1)));
+
+   /* same for addr3 */
+   assign(addr3_load,
+          mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                mkU64(guest_IA_curr_instr), mkexpr(addr3)));
+
+   assign(single1,
+          mkite(binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0)),
+                unop(Iop_64to8, mkexpr(pad2)),
+                load(Ity_I8, mkexpr(addr1_load))));
+
+   assign(single3,
+          mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                unop(Iop_64to8, mkexpr(pad2)),
+                load(Ity_I8, mkexpr(addr3_load))));
+
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, single1, single3, False);
+   /* Both fields differ ? */
+   next_insn_if(binop(Iop_CmpNE8, mkexpr(single1), mkexpr(single3)));
+
+   /* If a length in 0 we must not change this length and the address */
+   put_gpr_dw0(r1,
+               mkite(binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0)),
+                     mkexpr(addr1),
+                     binop(Iop_Add64, mkexpr(addr1), mkU64(1))));
+
+   put_gpr_dw0(r1 + 1,
+               mkite(binop(Iop_CmpEQ64, mkexpr(len1), mkU64(0)),
+                     mkU64(0), binop(Iop_Sub64, mkexpr(len1), mkU64(1))));
+
+   put_gpr_dw0(r3,
+               mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                     mkexpr(addr3),
+                     binop(Iop_Add64, mkexpr(addr3), mkU64(1))));
+
+   put_gpr_dw0(r3 + 1,
+               mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                     mkU64(0), binop(Iop_Sub64, mkexpr(len3), mkU64(1))));
+
+   iterate();
+
+   return "clcle";
+}
+
+
+static void
+s390_irgen_XC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+   s390_irgen_xonc(Iop_Xor8, length, start1, start2);
+}
+
+
+static void
+s390_irgen_NC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+   s390_irgen_xonc(Iop_And8, length, start1, start2);
+}
+
+
+static void
+s390_irgen_OC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+   s390_irgen_xonc(Iop_Or8, length, start1, start2);
+}
+
+
+static void
+s390_irgen_CLC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+   IRTemp current1 = newTemp(Ity_I8);
+   IRTemp current2 = newTemp(Ity_I8);
+   IRTemp counter = newTemp(Ity_I64);
+
+   assign(counter, get_counter_dw0());
+   put_counter_dw0(mkU64(0));
+
+   assign(current1, load(Ity_I8, binop(Iop_Add64, mkexpr(start1),
+                                       mkexpr(counter))));
+   assign(current2, load(Ity_I8, binop(Iop_Add64, mkexpr(start2),
+                                       mkexpr(counter))));
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, current1, current2,
+                      False);
+
+   /* Both fields differ ? */
+   next_insn_if(binop(Iop_CmpNE8, mkexpr(current1), mkexpr(current2)));
+
+   /* Check for end of field */
+   put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+   iterate_if(binop(Iop_CmpNE64, mkexpr(counter), mkexpr(length)));
+   put_counter_dw0(mkU64(0));
+}
+
+static void
+s390_irgen_MVC_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+   IRTemp counter = newTemp(Ity_I64);
+
+   assign(counter, get_counter_dw0());
+
+   store(binop(Iop_Add64, mkexpr(start1), mkexpr(counter)),
+         load(Ity_I8, binop(Iop_Add64, mkexpr(start2), mkexpr(counter))));
+
+   /* Check for end of field */
+   put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+   iterate_if(binop(Iop_CmpNE64, mkexpr(counter), mkexpr(length)));
+   put_counter_dw0(mkU64(0));
+}
+
+static void
+s390_irgen_TR_EX(IRTemp length, IRTemp start1, IRTemp start2)
+{
+   IRTemp op = newTemp(Ity_I8);
+   IRTemp op1 = newTemp(Ity_I8);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp counter = newTemp(Ity_I64);
+
+   assign(counter, get_counter_dw0());
+
+   assign(op, load(Ity_I8, binop(Iop_Add64, mkexpr(start1), mkexpr(counter))));
+
+   assign(result, binop(Iop_Add64, unop(Iop_8Uto64, mkexpr(op)), mkexpr(start2)));
+
+   assign(op1, load(Ity_I8, mkexpr(result)));
+   store(binop(Iop_Add64, mkexpr(start1), mkexpr(counter)), mkexpr(op1));
+
+   put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+   iterate_if(binop(Iop_CmpNE64, mkexpr(counter), mkexpr(length)));
+   put_counter_dw0(mkU64(0));
+}
+
+
+static void
+s390_irgen_EX_SS(UChar r, IRTemp addr2,
+                 void (*irgen)(IRTemp length, IRTemp start1, IRTemp start2),
+                 UInt lensize)
+{
+   struct SS {
+      unsigned int op :  8;
+      unsigned int l  :  8;
+      unsigned int b1 :  4;
+      unsigned int d1 : 12;
+      unsigned int b2 :  4;
+      unsigned int d2 : 12;
+   };
+   union {
+      struct SS dec;
+      unsigned long bytes;
+   } ss;
+   IRTemp cond;
+   IRDirty *d;
+   IRTemp torun;
+
+   IRTemp start1 = newTemp(Ity_I64);
+   IRTemp start2 = newTemp(Ity_I64);
+   IRTemp len = newTemp(lensize == 64 ? Ity_I64 : Ity_I32);
+   cond = newTemp(Ity_I1);
+   torun = newTemp(Ity_I64);
+
+   assign(torun, load(Ity_I64, mkexpr(addr2)));
+   /* Start with a check that the saved code is still correct */
+   assign(cond, binop(Iop_CmpNE64, mkexpr(torun), mkU64(last_execute_target)));
+   /* If not, save the new value */
+   d = unsafeIRDirty_0_N (0, "s390x_dirtyhelper_EX", &s390x_dirtyhelper_EX,
+                          mkIRExprVec_1(mkexpr(torun)));
+   d->guard = mkexpr(cond);
+   stmt(IRStmt_Dirty(d));
+
+   /* and restart */
+   stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMSTART),
+                   mkU64(guest_IA_curr_instr)));
+   stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMLEN), mkU64(4)));
+   restart_if(mkexpr(cond));
+
+   ss.bytes = last_execute_target;
+   assign(start1, binop(Iop_Add64, mkU64(ss.dec.d1),
+          ss.dec.b1 != 0 ? get_gpr_dw0(ss.dec.b1) : mkU64(0)));
+   assign(start2, binop(Iop_Add64, mkU64(ss.dec.d2),
+          ss.dec.b2 != 0 ? get_gpr_dw0(ss.dec.b2) : mkU64(0)));
+   assign(len, unop(lensize == 64 ? Iop_8Uto64 : Iop_8Uto32, binop(Iop_Or8,
+          r != 0 ? get_gpr_b7(r): mkU8(0), mkU8(ss.dec.l))));
+   irgen(len, start1, start2);
+
+   last_execute_target = 0;
+}
+
+static const HChar *
+s390_irgen_EX(UChar r1, IRTemp addr2)
+{
+   switch(last_execute_target & 0xff00000000000000ULL) {
+   case 0:
+   {
+      /* no code information yet */
+      IRDirty *d;
+
+      /* so safe the code... */
+      d = unsafeIRDirty_0_N (0, "s390x_dirtyhelper_EX", &s390x_dirtyhelper_EX,
+                             mkIRExprVec_1(load(Ity_I64, mkexpr(addr2))));
+      stmt(IRStmt_Dirty(d));
+      /* and restart */
+      stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMSTART),
+                      mkU64(guest_IA_curr_instr)));
+      stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMLEN), mkU64(4)));
+      restart_if(IRExpr_Const(IRConst_U1(True)));
+
+      /* we know that this will be invalidated */
+      put_IA(mkaddr_expr(guest_IA_next_instr));
+      dis_res->whatNext = Dis_StopHere;
+      dis_res->jk_StopHere = Ijk_InvalICache;
+      break;
+   }
+
+   case 0xd200000000000000ULL:
+      /* special case MVC */
+      s390_irgen_EX_SS(r1, addr2, s390_irgen_MVC_EX, 64);
+      return "ex@mvc";
+
+   case 0xd500000000000000ULL:
+      /* special case CLC */
+      s390_irgen_EX_SS(r1, addr2, s390_irgen_CLC_EX, 64);
+      return "ex@clc";
+
+   case 0xd700000000000000ULL:
+      /* special case XC */
+      s390_irgen_EX_SS(r1, addr2, s390_irgen_XC_EX, 32);
+      return "ex@xc";
+
+   case 0xd600000000000000ULL:
+      /* special case OC */
+      s390_irgen_EX_SS(r1, addr2, s390_irgen_OC_EX, 32);
+      return "ex@oc";
+
+   case 0xd400000000000000ULL:
+      /* special case NC */
+      s390_irgen_EX_SS(r1, addr2, s390_irgen_NC_EX, 32);
+      return "ex@nc";
+
+   case 0xdc00000000000000ULL:
+      /* special case TR */
+      s390_irgen_EX_SS(r1, addr2, s390_irgen_TR_EX, 64);
+      return "ex@tr";
+
+   default:
+   {
+      /* everything else will get a self checking prefix that also checks the
+         register content */
+      IRDirty *d;
+      UChar *bytes;
+      IRTemp cond;
+      IRTemp orperand;
+      IRTemp torun;
+
+      cond = newTemp(Ity_I1);
+      orperand = newTemp(Ity_I64);
+      torun = newTemp(Ity_I64);
+
+      if (r1 == 0)
+         assign(orperand, mkU64(0));
+      else
+         assign(orperand, unop(Iop_8Uto64,get_gpr_b7(r1)));
+      /* This code is going to be translated */
+      assign(torun, binop(Iop_Or64, load(Ity_I64, mkexpr(addr2)),
+             binop(Iop_Shl64, mkexpr(orperand), mkU8(48))));
+
+      /* Start with a check that saved code is still correct */
+      assign(cond, binop(Iop_CmpNE64, mkexpr(torun),
+             mkU64(last_execute_target)));
+      /* If not, save the new value */
+      d = unsafeIRDirty_0_N (0, "s390x_dirtyhelper_EX", &s390x_dirtyhelper_EX,
+                             mkIRExprVec_1(mkexpr(torun)));
+      d->guard = mkexpr(cond);
+      stmt(IRStmt_Dirty(d));
+
+      /* and restart */
+      stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMSTART), mkU64(guest_IA_curr_instr)));
+      stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMLEN), mkU64(4)));
+      restart_if(mkexpr(cond));
+
+      /* Now comes the actual translation */
+      bytes = (UChar *) &last_execute_target;
+      s390_decode_and_irgen(bytes, ((((bytes[0] >> 6) + 1) >> 1) + 1) << 1,
+                            dis_res);
+      if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+         vex_printf("    which was executed by\n");
+      /* dont make useless translations in the next execute */
+      last_execute_target = 0;
+   }
+   }
+   return "ex";
+}
+
+static const HChar *
+s390_irgen_EXRL(UChar r1, UInt offset)
+{
+   IRTemp addr = newTemp(Ity_I64);
+   /* we might save one round trip because we know the target */
+   if (!last_execute_target)
+      last_execute_target = *(ULong *)(HWord)
+                             (guest_IA_curr_instr + offset * 2UL);
+   assign(addr, mkU64(guest_IA_curr_instr + offset * 2UL));
+   s390_irgen_EX(r1, addr);
+   return "exrl";
+}
+
+static const HChar *
+s390_irgen_IPM(UChar r1)
+{
+   // As long as we dont support SPM, lets just assume 0 as program mask
+   put_gpr_b4(r1, unop(Iop_32to8, binop(Iop_Or32, mkU32(0 /* program mask */),
+                       binop(Iop_Shl32, s390_call_calculate_cc(), mkU8(4)))));
+
+   return "ipm";
+}
+
+
+static const HChar *
+s390_irgen_SRST(UChar r1, UChar r2)
+{
+   IRTemp address = newTemp(Ity_I64);
+   IRTemp next = newTemp(Ity_I64);
+   IRTemp delim = newTemp(Ity_I8);
+   IRTemp counter = newTemp(Ity_I64);
+   IRTemp byte = newTemp(Ity_I8);
+
+   assign(address, get_gpr_dw0(r2));
+   assign(next, get_gpr_dw0(r1));
+
+   assign(counter, get_counter_dw0());
+   put_counter_dw0(mkU64(0));
+
+   // start = next?  CC=2 and out r1 and r2 unchanged
+   s390_cc_set(2);
+   put_gpr_dw0(r2, binop(Iop_Sub64, mkexpr(address), mkexpr(counter)));
+   next_insn_if(binop(Iop_CmpEQ64, mkexpr(address), mkexpr(next)));
+
+   assign(byte, load(Ity_I8, mkexpr(address)));
+   assign(delim, get_gpr_b7(0));
+
+   // byte = delim? CC=1, R1=address
+   s390_cc_set(1);
+   put_gpr_dw0(r1,  mkexpr(address));
+   next_insn_if(binop(Iop_CmpEQ8, mkexpr(delim), mkexpr(byte)));
+
+   // else: all equal, no end yet, loop
+   put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+   put_gpr_dw0(r1, mkexpr(next));
+   put_gpr_dw0(r2, binop(Iop_Add64, mkexpr(address), mkU64(1)));
+
+   iterate();
+
+   return "srst";
+}
+
+static const HChar *
+s390_irgen_CLST(UChar r1, UChar r2)
+{
+   IRTemp address1 = newTemp(Ity_I64);
+   IRTemp address2 = newTemp(Ity_I64);
+   IRTemp end = newTemp(Ity_I8);
+   IRTemp counter = newTemp(Ity_I64);
+   IRTemp byte1 = newTemp(Ity_I8);
+   IRTemp byte2 = newTemp(Ity_I8);
+
+   assign(address1, get_gpr_dw0(r1));
+   assign(address2, get_gpr_dw0(r2));
+   assign(end, get_gpr_b7(0));
+   assign(counter, get_counter_dw0());
+   put_counter_dw0(mkU64(0));
+   assign(byte1, load(Ity_I8, mkexpr(address1)));
+   assign(byte2, load(Ity_I8, mkexpr(address2)));
+
+   // end in both? all equal, reset r1 and r2 to start values
+   s390_cc_set(0);
+   put_gpr_dw0(r1, binop(Iop_Sub64, mkexpr(address1), mkexpr(counter)));
+   put_gpr_dw0(r2, binop(Iop_Sub64, mkexpr(address2), mkexpr(counter)));
+   next_insn_if(binop(Iop_CmpEQ8, mkU8(0),
+                      binop(Iop_Or8,
+                            binop(Iop_Xor8, mkexpr(byte1), mkexpr(end)),
+                            binop(Iop_Xor8, mkexpr(byte2), mkexpr(end)))));
+
+   put_gpr_dw0(r1, mkexpr(address1));
+   put_gpr_dw0(r2, mkexpr(address2));
+
+   // End found in string1
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpEQ8, mkexpr(end), mkexpr(byte1)));
+
+   // End found in string2
+   s390_cc_set(2);
+   next_insn_if(binop(Iop_CmpEQ8, mkexpr(end), mkexpr(byte2)));
+
+   // string1 < string2
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpLT32U, unop(Iop_8Uto32, mkexpr(byte1)),
+                      unop(Iop_8Uto32, mkexpr(byte2))));
+
+   // string2 < string1
+   s390_cc_set(2);
+   next_insn_if(binop(Iop_CmpLT32U, unop(Iop_8Uto32, mkexpr(byte2)),
+                      unop(Iop_8Uto32, mkexpr(byte1))));
+
+   // else: all equal, no end yet, loop
+   put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+   put_gpr_dw0(r1, binop(Iop_Add64, get_gpr_dw0(r1), mkU64(1)));
+   put_gpr_dw0(r2, binop(Iop_Add64, get_gpr_dw0(r2), mkU64(1)));
+
+   iterate();
+
+   return "clst";
+}
+
+static void
+s390_irgen_load_multiple_32bit(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      put_gpr_w1(reg, load(Ity_I32, mkexpr(addr)));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+      reg++;
+   } while (reg != (r3 + 1));
+}
+
+static const HChar *
+s390_irgen_LM(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_multiple_32bit(r1, r3, op2addr);
+
+   return "lm";
+}
+
+static const HChar *
+s390_irgen_LMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_multiple_32bit(r1, r3, op2addr);
+
+   return "lmy";
+}
+
+static const HChar *
+s390_irgen_LMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      put_gpr_w0(reg, load(Ity_I32, mkexpr(addr)));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+      reg++;
+   } while (reg != (r3 + 1));
+
+   return "lmh";
+}
+
+static const HChar *
+s390_irgen_LMG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      put_gpr_dw0(reg, load(Ity_I64, mkexpr(addr)));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(8)));
+      reg++;
+   } while (reg != (r3 + 1));
+
+   return "lmg";
+}
+
+static void
+s390_irgen_store_multiple_32bit(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      store(mkexpr(addr), get_gpr_w1(reg));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+      reg++;
+   } while( reg != (r3 + 1));
+}
+
+static const HChar *
+s390_irgen_STM(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_store_multiple_32bit(r1, r3, op2addr);
+
+   return "stm";
+}
+
+static const HChar *
+s390_irgen_STMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_store_multiple_32bit(r1, r3, op2addr);
+
+   return "stmy";
+}
+
+static const HChar *
+s390_irgen_STMH(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      store(mkexpr(addr), get_gpr_w0(reg));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+      reg++;
+   } while( reg != (r3 + 1));
+
+   return "stmh";
+}
+
+static const HChar *
+s390_irgen_STMG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      store(mkexpr(addr), get_gpr_dw0(reg));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(8)));
+      reg++;
+   } while( reg != (r3 + 1));
+
+   return "stmg";
+}
+
+static void
+s390_irgen_xonc(IROp op, IRTemp length, IRTemp start1, IRTemp start2)
+{
+   IRTemp old1 = newTemp(Ity_I8);
+   IRTemp old2 = newTemp(Ity_I8);
+   IRTemp new1 = newTemp(Ity_I8);
+   IRTemp counter = newTemp(Ity_I32);
+   IRTemp addr1 = newTemp(Ity_I64);
+
+   assign(counter, get_counter_w0());
+
+   assign(addr1, binop(Iop_Add64, mkexpr(start1),
+                       unop(Iop_32Uto64, mkexpr(counter))));
+
+   assign(old1, load(Ity_I8, mkexpr(addr1)));
+   assign(old2, load(Ity_I8, binop(Iop_Add64, mkexpr(start2),
+                                   unop(Iop_32Uto64,mkexpr(counter)))));
+   assign(new1, binop(op, mkexpr(old1), mkexpr(old2)));
+
+   /* Special case: xc is used to zero memory */
+   if (op == Iop_Xor8) {
+      store(mkexpr(addr1),
+            mkite(binop(Iop_CmpEQ64, mkexpr(start1), mkexpr(start2)),
+                  mkU8(0), mkexpr(new1)));
+   } else
+      store(mkexpr(addr1), mkexpr(new1));
+   put_counter_w1(binop(Iop_Or32, unop(Iop_8Uto32, mkexpr(new1)),
+                        get_counter_w1()));
+
+   /* Check for end of field */
+   put_counter_w0(binop(Iop_Add32, mkexpr(counter), mkU32(1)));
+   iterate_if(binop(Iop_CmpNE32, mkexpr(counter), mkexpr(length)));
+   s390_cc_thunk_put1(S390_CC_OP_BITWISE, mktemp(Ity_I32, get_counter_w1()),
+                      False);
+   put_counter_dw0(mkU64(0));
+}
+
+static const HChar *
+s390_irgen_XC(UChar length, IRTemp start1, IRTemp start2)
+{
+   IRTemp len = newTemp(Ity_I32);
+
+   assign(len, mkU32(length));
+   s390_irgen_xonc(Iop_Xor8, len, start1, start2);
+
+   return "xc";
+}
+
+static void
+s390_irgen_XC_sameloc(UChar length, UChar b, UShort d)
+{
+   IRTemp counter = newTemp(Ity_I32);
+   IRTemp start = newTemp(Ity_I64);
+   IRTemp addr  = newTemp(Ity_I64);
+
+   assign(start,
+          binop(Iop_Add64, mkU64(d), b != 0 ? get_gpr_dw0(b) : mkU64(0)));
+
+   if (length < 8) {
+      UInt i;
+
+      for (i = 0; i <= length; ++i) {
+         store(binop(Iop_Add64, mkexpr(start), mkU64(i)), mkU8(0));
+      }
+   } else {
+     assign(counter, get_counter_w0());
+
+     assign(addr, binop(Iop_Add64, mkexpr(start),
+                        unop(Iop_32Uto64, mkexpr(counter))));
+
+     store(mkexpr(addr), mkU8(0));
+
+     /* Check for end of field */
+     put_counter_w0(binop(Iop_Add32, mkexpr(counter), mkU32(1)));
+     iterate_if(binop(Iop_CmpNE32, mkexpr(counter), mkU32(length)));
+
+     /* Reset counter */
+     put_counter_dw0(mkU64(0));
+   }
+
+   s390_cc_thunk_put1(S390_CC_OP_BITWISE, mktemp(Ity_I32, mkU32(0)), False);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_FE))
+      s390_disasm(ENC3(MNM, UDLB, UDXB), "xc", d, length, b, d, 0, b);
+}
+
+static const HChar *
+s390_irgen_NC(UChar length, IRTemp start1, IRTemp start2)
+{
+   IRTemp len = newTemp(Ity_I32);
+
+   assign(len, mkU32(length));
+   s390_irgen_xonc(Iop_And8, len, start1, start2);
+
+   return "nc";
+}
+
+static const HChar *
+s390_irgen_OC(UChar length, IRTemp start1, IRTemp start2)
+{
+   IRTemp len = newTemp(Ity_I32);
+
+   assign(len, mkU32(length));
+   s390_irgen_xonc(Iop_Or8, len, start1, start2);
+
+   return "oc";
+}
+
+
+static const HChar *
+s390_irgen_MVC(UChar length, IRTemp start1, IRTemp start2)
+{
+   IRTemp len = newTemp(Ity_I64);
+
+   assign(len, mkU64(length));
+   s390_irgen_MVC_EX(len, start1, start2);
+
+   return "mvc";
+}
+
+static const HChar *
+s390_irgen_MVCL(UChar r1, UChar r2)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp addr2_load = newTemp(Ity_I64);
+   IRTemp r1p1 = newTemp(Ity_I32);   /* contents of r1 + 1 */
+   IRTemp r2p1 = newTemp(Ity_I32);   /* contents of r2 + 1 */
+   IRTemp len1 = newTemp(Ity_I32);
+   IRTemp len2 = newTemp(Ity_I32);
+   IRTemp pad = newTemp(Ity_I8);
+   IRTemp single = newTemp(Ity_I8);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(r1p1, get_gpr_w1(r1 + 1));
+   assign(len1, binop(Iop_And32, mkexpr(r1p1), mkU32(0x00ffffff)));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(r2p1, get_gpr_w1(r2 + 1));
+   assign(len2, binop(Iop_And32, mkexpr(r2p1), mkU32(0x00ffffff)));
+   assign(pad, get_gpr_b4(r2 + 1));
+
+   /* len1 == 0 ? */
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, len1, len2, False);
+   next_insn_if(binop(Iop_CmpEQ32, mkexpr(len1), mkU32(0)));
+
+   /* Check for destructive overlap:
+      addr1 > addr2 && addr2 + len1 > addr1 && (addr2 + len2) > addr1 */
+   s390_cc_set(3);
+   IRTemp cond1 = newTemp(Ity_I32);
+   assign(cond1, unop(Iop_1Uto32,
+                      binop(Iop_CmpLT64U, mkexpr(addr2), mkexpr(addr1))));
+   IRTemp cond2 = newTemp(Ity_I32);
+   assign(cond2, unop(Iop_1Uto32,
+                      binop(Iop_CmpLT64U, mkexpr(addr1),
+                            binop(Iop_Add64, mkexpr(addr2),
+                                  unop(Iop_32Uto64, mkexpr(len1))))));
+   IRTemp cond3 = newTemp(Ity_I32);
+   assign(cond3, unop(Iop_1Uto32,
+                      binop(Iop_CmpLT64U, 
+                            mkexpr(addr1),
+                            binop(Iop_Add64, mkexpr(addr2),
+                                  unop(Iop_32Uto64, mkexpr(len2))))));
+
+   next_insn_if(binop(Iop_CmpEQ32,
+                      binop(Iop_And32,
+                            binop(Iop_And32, mkexpr(cond1), mkexpr(cond2)),
+                            mkexpr(cond3)),
+                      mkU32(1)));
+
+   /* See s390_irgen_CLCL for explanation why we cannot load directly
+      and need two steps. */
+   assign(addr2_load,
+          mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                mkU64(guest_IA_curr_instr), mkexpr(addr2)));
+   assign(single,
+          mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                mkexpr(pad), load(Ity_I8, mkexpr(addr2_load))));
+
+   store(mkexpr(addr1), mkexpr(single));
+
+   /* Update addr1 and len1 */
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(addr1), mkU64(1)));
+   put_gpr_w1(r1 + 1, binop(Iop_Sub32, mkexpr(r1p1), mkU32(1)));
+
+   /* Update addr2 and len2 */
+   put_gpr_dw0(r2,
+               mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                     mkexpr(addr2),
+                     binop(Iop_Add64, mkexpr(addr2), mkU64(1))));
+
+   /* When updating len2 we must not modify bits (r2+1)[0:39] */
+   put_gpr_w1(r2 + 1,
+              mkite(binop(Iop_CmpEQ32, mkexpr(len2), mkU32(0)),
+                    binop(Iop_And32, mkexpr(r2p1), mkU32(0xFF000000u)),
+                    binop(Iop_Sub32, mkexpr(r2p1), mkU32(1))));
+
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, len1, len2, False);
+   iterate_if(binop(Iop_CmpNE32, mkexpr(len1), mkU32(1)));
+
+   return "mvcl";
+}
+
+
+static const HChar *
+s390_irgen_MVCLE(UChar r1, UChar r3, IRTemp pad2)
+{
+   IRTemp addr1, addr3, addr3_load, len1, len3, single;
+
+   addr1 = newTemp(Ity_I64);
+   addr3 = newTemp(Ity_I64);
+   addr3_load = newTemp(Ity_I64);
+   len1 = newTemp(Ity_I64);
+   len3 = newTemp(Ity_I64);
+   single = newTemp(Ity_I8);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(len1, get_gpr_dw0(r1 + 1));
+   assign(addr3, get_gpr_dw0(r3));
+   assign(len3, get_gpr_dw0(r3 + 1));
+
+   // len1 == 0 ?
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, len1, len3, False);
+   next_insn_if(binop(Iop_CmpEQ64,mkexpr(len1), mkU64(0)));
+
+   /* This is a hack to prevent mvcle from reading from addr3 if it
+      should read from the pad. Since the pad has no address, just
+      read from the instruction, we discard that anyway */
+   assign(addr3_load,
+          mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                mkU64(guest_IA_curr_instr), mkexpr(addr3)));
+
+   assign(single,
+          mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                unop(Iop_64to8, mkexpr(pad2)),
+                load(Ity_I8, mkexpr(addr3_load))));
+   store(mkexpr(addr1), mkexpr(single));
+
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(addr1), mkU64(1)));
+
+   put_gpr_dw0(r1 + 1, binop(Iop_Sub64, mkexpr(len1), mkU64(1)));
+
+   put_gpr_dw0(r3,
+               mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                     mkexpr(addr3),
+                     binop(Iop_Add64, mkexpr(addr3), mkU64(1))));
+
+   put_gpr_dw0(r3 + 1,
+               mkite(binop(Iop_CmpEQ64, mkexpr(len3), mkU64(0)),
+                     mkU64(0), binop(Iop_Sub64, mkexpr(len3), mkU64(1))));
+
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, len1, len3, False);
+   iterate_if(binop(Iop_CmpNE64, mkexpr(len1), mkU64(1)));
+
+   return "mvcle";
+}
+
+static const HChar *
+s390_irgen_MVST(UChar r1, UChar r2)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp end = newTemp(Ity_I8);
+   IRTemp byte = newTemp(Ity_I8);
+   IRTemp counter = newTemp(Ity_I64);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(counter, get_counter_dw0());
+   assign(end, get_gpr_b7(0));
+   assign(byte, load(Ity_I8, binop(Iop_Add64, mkexpr(addr2),mkexpr(counter))));
+   store(binop(Iop_Add64,mkexpr(addr1),mkexpr(counter)), mkexpr(byte));
+
+   // We use unlimited as cpu-determined number
+   put_counter_dw0(binop(Iop_Add64, mkexpr(counter), mkU64(1)));
+   iterate_if(binop(Iop_CmpNE8, mkexpr(end), mkexpr(byte)));
+
+   // and always set cc=1 at the end + update r1
+   s390_cc_set(1);
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(addr1), mkexpr(counter)));
+   put_counter_dw0(mkU64(0));
+
+   return "mvst";
+}
+
+static void
+s390_irgen_divide_64to32(IROp op, UChar r1, IRTemp op2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+
+   assign(op1, binop(Iop_32HLto64,
+                     get_gpr_w1(r1),         // high 32 bits
+                     get_gpr_w1(r1 + 1)));   // low  32 bits
+   assign(result, binop(op, mkexpr(op1), mkexpr(op2)));
+   put_gpr_w1(r1, unop(Iop_64HIto32, mkexpr(result)));   // remainder
+   put_gpr_w1(r1 + 1, unop(Iop_64to32, mkexpr(result))); // quotient
+}
+
+static void
+s390_irgen_divide_128to64(IROp op, UChar r1, IRTemp op2)
+{
+   IRTemp op1 = newTemp(Ity_I128);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, binop(Iop_64HLto128,
+                     get_gpr_dw0(r1),         // high 64 bits
+                     get_gpr_dw0(r1 + 1)));   // low  64 bits
+   assign(result, binop(op, mkexpr(op1), mkexpr(op2)));
+   put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result)));   // remainder
+   put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result))); // quotient
+}
+
+static void
+s390_irgen_divide_64to64(IROp op, UChar r1, IRTemp op2)
+{
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I128);
+
+   assign(op1, get_gpr_dw0(r1 + 1));
+   assign(result, binop(op, mkexpr(op1), mkexpr(op2)));
+   put_gpr_dw0(r1, unop(Iop_128HIto64, mkexpr(result)));   // remainder
+   put_gpr_dw0(r1 + 1, unop(Iop_128to64, mkexpr(result))); // quotient
+}
+
+static const HChar *
+s390_irgen_DR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+
+   s390_irgen_divide_64to32(Iop_DivModS64to32, r1, op2);
+
+   return "dr";
+}
+
+static const HChar *
+s390_irgen_D(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+
+   s390_irgen_divide_64to32(Iop_DivModS64to32, r1, op2);
+
+   return "d";
+}
+
+static const HChar *
+s390_irgen_DLR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+
+   s390_irgen_divide_64to32(Iop_DivModU64to32, r1, op2);
+
+   return "dlr";
+}
+
+static const HChar *
+s390_irgen_DL(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, load(Ity_I32, mkexpr(op2addr)));
+
+   s390_irgen_divide_64to32(Iop_DivModU64to32, r1, op2);
+
+   return "dl";
+}
+
+static const HChar *
+s390_irgen_DLG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+
+   s390_irgen_divide_128to64(Iop_DivModU128to64, r1, op2);
+
+   return "dlg";
+}
+
+static const HChar *
+s390_irgen_DLGR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+
+   s390_irgen_divide_128to64(Iop_DivModU128to64, r1, op2);
+
+   return "dlgr";
+}
+
+static const HChar *
+s390_irgen_DSGR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+
+   s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+   return "dsgr";
+}
+
+static const HChar *
+s390_irgen_DSG(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, load(Ity_I64, mkexpr(op2addr)));
+
+   s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+   return "dsg";
+}
+
+static const HChar *
+s390_irgen_DSGFR(UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, unop(Iop_32Sto64, get_gpr_w1(r2)));
+
+   s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+   return "dsgfr";
+}
+
+static const HChar *
+s390_irgen_DSGF(UChar r1, IRTemp op2addr)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, unop(Iop_32Sto64, load(Ity_I32, mkexpr(op2addr))));
+
+   s390_irgen_divide_64to64(Iop_DivModS64to64, r1, op2);
+
+   return "dsgf";
+}
+
+static void
+s390_irgen_load_ar_multiple(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      put_ar_w0(reg, load(Ity_I32, mkexpr(addr)));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+      reg++;
+   } while (reg != (r3 + 1));
+}
+
+static const HChar *
+s390_irgen_LAM(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_ar_multiple(r1, r3, op2addr);
+
+   return "lam";
+}
+
+static const HChar *
+s390_irgen_LAMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_load_ar_multiple(r1, r3, op2addr);
+
+   return "lamy";
+}
+
+static void
+s390_irgen_store_ar_multiple(UChar r1, UChar r3, IRTemp op2addr)
+{
+   UChar reg;
+   IRTemp addr = newTemp(Ity_I64);
+
+   assign(addr, mkexpr(op2addr));
+   reg = r1;
+   do {
+      IRTemp old = addr;
+
+      reg %= 16;
+      store(mkexpr(addr), get_ar_w0(reg));
+      addr = newTemp(Ity_I64);
+      assign(addr, binop(Iop_Add64, mkexpr(old), mkU64(4)));
+      reg++;
+   } while (reg != (r3 + 1));
+}
+
+static const HChar *
+s390_irgen_STAM(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_store_ar_multiple(r1, r3, op2addr);
+
+   return "stam";
+}
+
+static const HChar *
+s390_irgen_STAMY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_store_ar_multiple(r1, r3, op2addr);
+
+   return "stamy";
+}
+
+
+/* Implementation for 32-bit compare-and-swap */
+static void
+s390_irgen_cas_32(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRCAS *cas;
+   IRTemp op1 = newTemp(Ity_I32);
+   IRTemp old_mem = newTemp(Ity_I32);
+   IRTemp op3 = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp nequal = newTemp(Ity_I1);
+
+   assign(op1, get_gpr_w1(r1));
+   assign(op3, get_gpr_w1(r3));
+
+   /* The first and second operands are compared. If they are equal,
+      the third operand is stored at the second- operand location. */
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_BE, mkexpr(op2addr),
+                 NULL, mkexpr(op1), /* expected value */
+                 NULL, mkexpr(op3)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC. Operands compared equal -> 0, else 1. */
+   assign(result, binop(Iop_Sub32, mkexpr(op1), mkexpr(old_mem)));
+   s390_cc_thunk_put1(S390_CC_OP_BITWISE, result, False);
+
+   /* If operands were equal (cc == 0) just store the old value op1 in r1.
+      Otherwise, store the old_value from memory in r1 and yield. */
+   assign(nequal, binop(Iop_CmpNE32, s390_call_calculate_cc(), mkU32(0)));
+   put_gpr_w1(r1, mkite(mkexpr(nequal), mkexpr(old_mem), mkexpr(op1)));
+   yield_if(mkexpr(nequal));
+}
+
+static const HChar *
+s390_irgen_CS(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_cas_32(r1, r3, op2addr);
+
+   return "cs";
+}
+
+static const HChar *
+s390_irgen_CSY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_cas_32(r1, r3, op2addr);
+
+   return "csy";
+}
+
+static const HChar *
+s390_irgen_CSG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRCAS *cas;
+   IRTemp op1 = newTemp(Ity_I64);
+   IRTemp old_mem = newTemp(Ity_I64);
+   IRTemp op3 = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp nequal = newTemp(Ity_I1);
+
+   assign(op1, get_gpr_dw0(r1));
+   assign(op3, get_gpr_dw0(r3));
+
+   /* The first and second operands are compared. If they are equal,
+      the third operand is stored at the second- operand location. */
+   cas = mkIRCAS(IRTemp_INVALID, old_mem,
+                 Iend_BE, mkexpr(op2addr),
+                 NULL, mkexpr(op1), /* expected value */
+                 NULL, mkexpr(op3)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC. Operands compared equal -> 0, else 1. */
+   assign(result, binop(Iop_Sub64, mkexpr(op1), mkexpr(old_mem)));
+   s390_cc_thunk_put1(S390_CC_OP_BITWISE, result, False);
+
+   /* If operands were equal (cc == 0) just store the old value op1 in r1.
+      Otherwise, store the old_value from memory in r1 and yield. */
+   assign(nequal, binop(Iop_CmpNE32, s390_call_calculate_cc(), mkU32(0)));
+   put_gpr_dw0(r1, mkite(mkexpr(nequal), mkexpr(old_mem), mkexpr(op1)));
+   yield_if(mkexpr(nequal));
+
+   return "csg";
+}
+
+/* Implementation for 32-bit compare-double-and-swap */
+static void
+s390_irgen_cdas_32(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRCAS *cas;
+   IRTemp op1_high = newTemp(Ity_I32);
+   IRTemp op1_low  = newTemp(Ity_I32);
+   IRTemp old_mem_high = newTemp(Ity_I32);
+   IRTemp old_mem_low  = newTemp(Ity_I32);
+   IRTemp op3_high = newTemp(Ity_I32);
+   IRTemp op3_low  = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp nequal = newTemp(Ity_I1);
+
+   assign(op1_high, get_gpr_w1(r1));
+   assign(op1_low,  get_gpr_w1(r1+1));
+   assign(op3_high, get_gpr_w1(r3));
+   assign(op3_low,  get_gpr_w1(r3+1));
+
+   /* The first and second operands are compared. If they are equal,
+      the third operand is stored at the second-operand location. */
+   cas = mkIRCAS(old_mem_high, old_mem_low,
+                 Iend_BE, mkexpr(op2addr),
+                 mkexpr(op1_high), mkexpr(op1_low), /* expected value */
+                 mkexpr(op3_high), mkexpr(op3_low)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC. Operands compared equal -> 0, else 1. */
+   assign(result, unop(Iop_1Uto32,
+          binop(Iop_CmpNE32,
+                binop(Iop_Or32,
+                      binop(Iop_Xor32, mkexpr(op1_high), mkexpr(old_mem_high)),
+                      binop(Iop_Xor32, mkexpr(op1_low), mkexpr(old_mem_low))),
+                mkU32(0))));
+
+   s390_cc_thunk_put1(S390_CC_OP_BITWISE, result, False);
+
+   /* If operands were equal (cc == 0) just store the old value op1 in r1.
+      Otherwise, store the old_value from memory in r1 and yield. */
+   assign(nequal, binop(Iop_CmpNE32, s390_call_calculate_cc(), mkU32(0)));
+   put_gpr_w1(r1,   mkite(mkexpr(nequal), mkexpr(old_mem_high), mkexpr(op1_high)));
+   put_gpr_w1(r1+1, mkite(mkexpr(nequal), mkexpr(old_mem_low),  mkexpr(op1_low)));
+   yield_if(mkexpr(nequal));
+}
+
+static const HChar *
+s390_irgen_CDS(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_cdas_32(r1, r3, op2addr);
+
+   return "cds";
+}
+
+static const HChar *
+s390_irgen_CDSY(UChar r1, UChar r3, IRTemp op2addr)
+{
+   s390_irgen_cdas_32(r1, r3, op2addr);
+
+   return "cdsy";
+}
+
+static const HChar *
+s390_irgen_CDSG(UChar r1, UChar r3, IRTemp op2addr)
+{
+   IRCAS *cas;
+   IRTemp op1_high = newTemp(Ity_I64);
+   IRTemp op1_low  = newTemp(Ity_I64);
+   IRTemp old_mem_high = newTemp(Ity_I64);
+   IRTemp old_mem_low  = newTemp(Ity_I64);
+   IRTemp op3_high = newTemp(Ity_I64);
+   IRTemp op3_low  = newTemp(Ity_I64);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp nequal = newTemp(Ity_I1);
+
+   assign(op1_high, get_gpr_dw0(r1));
+   assign(op1_low,  get_gpr_dw0(r1+1));
+   assign(op3_high, get_gpr_dw0(r3));
+   assign(op3_low,  get_gpr_dw0(r3+1));
+
+   /* The first and second operands are compared. If they are equal,
+      the third operand is stored at the second-operand location. */
+   cas = mkIRCAS(old_mem_high, old_mem_low,
+                 Iend_BE, mkexpr(op2addr),
+                 mkexpr(op1_high), mkexpr(op1_low), /* expected value */
+                 mkexpr(op3_high), mkexpr(op3_low)  /* new value */);
+   stmt(IRStmt_CAS(cas));
+
+   /* Set CC. Operands compared equal -> 0, else 1. */
+   assign(result, unop(Iop_1Uto64,
+          binop(Iop_CmpNE64,
+                binop(Iop_Or64,
+                      binop(Iop_Xor64, mkexpr(op1_high), mkexpr(old_mem_high)),
+                      binop(Iop_Xor64, mkexpr(op1_low), mkexpr(old_mem_low))),
+                mkU64(0))));
+
+   s390_cc_thunk_put1(S390_CC_OP_BITWISE, result, False);
+
+   /* If operands were equal (cc == 0) just store the old value op1 in r1.
+      Otherwise, store the old_value from memory in r1 and yield. */
+   assign(nequal, binop(Iop_CmpNE32, s390_call_calculate_cc(), mkU32(0)));
+   put_gpr_dw0(r1,   mkite(mkexpr(nequal), mkexpr(old_mem_high), mkexpr(op1_high)));
+   put_gpr_dw0(r1+1, mkite(mkexpr(nequal), mkexpr(old_mem_low),  mkexpr(op1_low)));
+   yield_if(mkexpr(nequal));
+
+   return "cdsg";
+}
+
+
+/* Binary floating point */
+
+static const HChar *
+s390_irgen_AXBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F128);
+   IRTemp op2 = newTemp(Ity_F128);
+   IRTemp result = newTemp(Ity_F128);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_pair(r1));
+   assign(op2, get_fpr_pair(r2));
+   assign(result, triop(Iop_AddF128, mkexpr(rounding_mode), mkexpr(op1),
+                        mkexpr(op2)));
+   put_fpr_pair(r1, mkexpr(result));
+
+   s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+   return "axbr";
+}
+
+static const HChar *
+s390_irgen_CEBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp cc_vex  = newTemp(Ity_I32);
+   IRTemp cc_s390 = newTemp(Ity_I32);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, get_fpr_w0(r2));
+   assign(cc_vex, binop(Iop_CmpF32, mkexpr(op1), mkexpr(op2)));
+
+   assign(cc_s390, convert_vex_bfpcc_to_s390(cc_vex));
+   s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+   return "cebr";
+}
+
+static const HChar *
+s390_irgen_CDBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp cc_vex  = newTemp(Ity_I32);
+   IRTemp cc_s390 = newTemp(Ity_I32);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, get_fpr_dw0(r2));
+   assign(cc_vex, binop(Iop_CmpF64, mkexpr(op1), mkexpr(op2)));
+
+   assign(cc_s390, convert_vex_bfpcc_to_s390(cc_vex));
+   s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+   return "cdbr";
+}
+
+static const HChar *
+s390_irgen_CXBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F128);
+   IRTemp op2 = newTemp(Ity_F128);
+   IRTemp cc_vex  = newTemp(Ity_I32);
+   IRTemp cc_s390 = newTemp(Ity_I32);
+
+   assign(op1, get_fpr_pair(r1));
+   assign(op2, get_fpr_pair(r2));
+   assign(cc_vex, binop(Iop_CmpF128, mkexpr(op1), mkexpr(op2)));
+
+   assign(cc_s390, convert_vex_bfpcc_to_s390(cc_vex));
+   s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+   return "cxbr";
+}
+
+static const HChar *
+s390_irgen_CEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F32);
+   IRTemp op2 = newTemp(Ity_F32);
+   IRTemp cc_vex  = newTemp(Ity_I32);
+   IRTemp cc_s390 = newTemp(Ity_I32);
+
+   assign(op1, get_fpr_w0(r1));
+   assign(op2, load(Ity_F32, mkexpr(op2addr)));
+   assign(cc_vex,  binop(Iop_CmpF32, mkexpr(op1), mkexpr(op2)));
+
+   assign(cc_s390, convert_vex_bfpcc_to_s390(cc_vex));
+   s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+   return "ceb";
+}
+
+static const HChar *
+s390_irgen_CDB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op1 = newTemp(Ity_F64);
+   IRTemp op2 = newTemp(Ity_F64);
+   IRTemp cc_vex  = newTemp(Ity_I32);
+   IRTemp cc_s390 = newTemp(Ity_I32);
+
+   assign(op1, get_fpr_dw0(r1));
+   assign(op2, load(Ity_F64, mkexpr(op2addr)));
+   assign(cc_vex, binop(Iop_CmpF64, mkexpr(op1), mkexpr(op2)));
+
+   assign(cc_s390, convert_vex_bfpcc_to_s390(cc_vex));
+   s390_cc_thunk_put1(S390_CC_OP_SET, cc_s390, False);
+
+   return "cdb";
+}
+
+static const HChar *
+s390_irgen_CXFBR(UChar m3 __attribute__((unused)),
+                 UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I32);
+
+   assign(op2, get_gpr_w1(r2));
+   put_fpr_pair(r1, unop(Iop_I32StoF128, mkexpr(op2)));
+
+   return "cxfbr";
+}
+
+static const HChar *
+s390_irgen_CXLFBR(UChar m3 __attribute__((unused)),
+                  UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op2 = newTemp(Ity_I32);
+
+      assign(op2, get_gpr_w1(r2));
+      put_fpr_pair(r1, unop(Iop_I32UtoF128, mkexpr(op2)));
+   }
+   return "cxlfbr";
+}
+
+
+static const HChar *
+s390_irgen_CXGBR(UChar m3 __attribute__((unused)),
+                 UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   IRTemp op2 = newTemp(Ity_I64);
+
+   assign(op2, get_gpr_dw0(r2));
+   put_fpr_pair(r1, unop(Iop_I64StoF128, mkexpr(op2)));
+
+   return "cxgbr";
+}
+
+static const HChar *
+s390_irgen_CXLGBR(UChar m3 __attribute__((unused)),
+                  UChar m4 __attribute__((unused)), UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op2 = newTemp(Ity_I64);
+
+      assign(op2, get_gpr_dw0(r2));
+      put_fpr_pair(r1, unop(Iop_I64UtoF128, mkexpr(op2)));
+   }
+   return "cxlgbr";
+}
+
+static const HChar *
+s390_irgen_CFXBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F128);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+   assign(op, get_fpr_pair(r2));
+   assign(result, binop(Iop_F128toI32S, mkexpr(rounding_mode),
+                        mkexpr(op)));
+   put_gpr_w1(r1, mkexpr(result));
+   s390_cc_thunk_put1f128Z(S390_CC_OP_BFP_128_TO_INT_32, op, rounding_mode);
+
+   return "cfxbr";
+}
+
+static const HChar *
+s390_irgen_CLFXBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op = newTemp(Ity_F128);
+      IRTemp result = newTemp(Ity_I32);
+      IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+      assign(op, get_fpr_pair(r2));
+      assign(result, binop(Iop_F128toI32U, mkexpr(rounding_mode),
+                           mkexpr(op)));
+      put_gpr_w1(r1, mkexpr(result));
+      s390_cc_thunk_put1f128Z(S390_CC_OP_BFP_128_TO_UINT_32, op, rounding_mode);
+   }
+   return "clfxbr";
+}
+
+
+static const HChar *
+s390_irgen_CGXBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F128);
+   IRTemp result = newTemp(Ity_I64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+   assign(op, get_fpr_pair(r2));
+   assign(result, binop(Iop_F128toI64S, mkexpr(rounding_mode),
+                        mkexpr(op)));
+   put_gpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_put1f128Z(S390_CC_OP_BFP_128_TO_INT_64, op, rounding_mode);
+
+   return "cgxbr";
+}
+
+static const HChar *
+s390_irgen_CLGXBR(UChar m3, UChar m4 __attribute__((unused)),
+                  UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext) {
+      emulation_failure(EmFail_S390X_fpext);
+   } else {
+      IRTemp op = newTemp(Ity_F128);
+      IRTemp result = newTemp(Ity_I64);
+      IRTemp rounding_mode = encode_bfp_rounding_mode(m3);
+
+      assign(op, get_fpr_pair(r2));
+      assign(result, binop(Iop_F128toI64U, mkexpr(rounding_mode),
+                           mkexpr(op)));
+      put_gpr_dw0(r1, mkexpr(result));
+      s390_cc_thunk_put1f128Z(S390_CC_OP_BFP_128_TO_UINT_64, op,
+                              rounding_mode);
+   }
+   return "clgxbr";
+}
+
+static const HChar *
+s390_irgen_DXBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F128);
+   IRTemp op2 = newTemp(Ity_F128);
+   IRTemp result = newTemp(Ity_F128);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_pair(r1));
+   assign(op2, get_fpr_pair(r2));
+   assign(result, triop(Iop_DivF128, mkexpr(rounding_mode), mkexpr(op1),
+                        mkexpr(op2)));
+   put_fpr_pair(r1, mkexpr(result));
+
+   return "dxbr";
+}
+
+static const HChar *
+s390_irgen_LTXBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F128);
+
+   assign(result, get_fpr_pair(r2));
+   put_fpr_pair(r1, mkexpr(result));
+   s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+   return "ltxbr";
+}
+
+static const HChar *
+s390_irgen_LCXBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F128);
+
+   assign(result, unop(Iop_NegF128, get_fpr_pair(r2)));
+   put_fpr_pair(r1, mkexpr(result));
+   s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+   return "lcxbr";
+}
+
+static const HChar *
+s390_irgen_LXDBR(UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F64);
+
+   assign(op, get_fpr_dw0(r2));
+   put_fpr_pair(r1, unop(Iop_F64toF128, mkexpr(op)));
+
+   return "lxdbr";
+}
+
+static const HChar *
+s390_irgen_LXEBR(UChar r1, UChar r2)
+{
+   IRTemp op = newTemp(Ity_F32);
+
+   assign(op, get_fpr_w0(r2));
+   put_fpr_pair(r1, unop(Iop_F32toF128, mkexpr(op)));
+
+   return "lxebr";
+}
+
+static const HChar *
+s390_irgen_LXDB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_F64);
+
+   assign(op, load(Ity_F64, mkexpr(op2addr)));
+   put_fpr_pair(r1, unop(Iop_F64toF128, mkexpr(op)));
+
+   return "lxdb";
+}
+
+static const HChar *
+s390_irgen_LXEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_F32);
+
+   assign(op, load(Ity_F32, mkexpr(op2addr)));
+   put_fpr_pair(r1, unop(Iop_F32toF128, mkexpr(op)));
+
+   return "lxeb";
+}
+
+static const HChar *
+s390_irgen_LNEBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F32);
+
+   assign(result, unop(Iop_NegF32, unop(Iop_AbsF32, get_fpr_w0(r2))));
+   put_fpr_w0(r1, mkexpr(result));
+   s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_32, result);
+
+   return "lnebr";
+}
+
+static const HChar *
+s390_irgen_LNDBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, unop(Iop_NegF64, unop(Iop_AbsF64, get_fpr_dw0(r2))));
+   put_fpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_64, result);
+
+   return "lndbr";
+}
+
+static const HChar *
+s390_irgen_LNXBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F128);
+
+   assign(result, unop(Iop_NegF128, unop(Iop_AbsF128, get_fpr_pair(r2))));
+   put_fpr_pair(r1, mkexpr(result));
+   s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+   return "lnxbr";
+}
+
+static const HChar *
+s390_irgen_LPEBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F32);
+
+   assign(result, unop(Iop_AbsF32, get_fpr_w0(r2)));
+   put_fpr_w0(r1, mkexpr(result));
+   s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_32, result);
+
+   return "lpebr";
+}
+
+static const HChar *
+s390_irgen_LPDBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, unop(Iop_AbsF64, get_fpr_dw0(r2)));
+   put_fpr_dw0(r1, mkexpr(result));
+   s390_cc_thunk_put1f(S390_CC_OP_BFP_RESULT_64, result);
+
+   return "lpdbr";
+}
+
+static const HChar *
+s390_irgen_LPXBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F128);
+
+   assign(result, unop(Iop_AbsF128, get_fpr_pair(r2)));
+   put_fpr_pair(r1, mkexpr(result));
+   s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+   return "lpxbr";
+}
+
+static const HChar *
+s390_irgen_LDXBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext && m3 != S390_BFP_ROUND_PER_FPC) {
+      emulation_warning(EmWarn_S390X_fpext_rounding);
+      m3 = S390_BFP_ROUND_PER_FPC;
+   }
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, binop(Iop_F128toF64, mkexpr(encode_bfp_rounding_mode(m3)),
+                        get_fpr_pair(r2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "ldxbr";
+}
+
+static const HChar *
+s390_irgen_LEXBR(UChar m3, UChar m4 __attribute__((unused)),
+                 UChar r1, UChar r2)
+{
+   if (! s390_host_has_fpext && m3 != S390_BFP_ROUND_PER_FPC) {
+      emulation_warning(EmWarn_S390X_fpext_rounding);
+      m3 = S390_BFP_ROUND_PER_FPC;
+   }
+   IRTemp result = newTemp(Ity_F32);
+
+   assign(result, binop(Iop_F128toF32, mkexpr(encode_bfp_rounding_mode(m3)),
+                        get_fpr_pair(r2)));
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "lexbr";
+}
+
+static const HChar *
+s390_irgen_MXBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F128);
+   IRTemp op2 = newTemp(Ity_F128);
+   IRTemp result = newTemp(Ity_F128);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_pair(r1));
+   assign(op2, get_fpr_pair(r2));
+   assign(result, triop(Iop_MulF128, mkexpr(rounding_mode), mkexpr(op1),
+                        mkexpr(op2)));
+   put_fpr_pair(r1, mkexpr(result));
+
+   return "mxbr";
+}
+
+static const HChar *
+s390_irgen_MAEBR(UChar r1, UChar r3, UChar r2)
+{
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_w0(r1, qop(Iop_MAddF32, mkexpr(rounding_mode),
+                      get_fpr_w0(r3), get_fpr_w0(r2), get_fpr_w0(r1)));
+
+   return "maebr";
+}
+
+static const HChar *
+s390_irgen_MADBR(UChar r1, UChar r3, UChar r2)
+{
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_dw0(r1, qop(Iop_MAddF64, mkexpr(rounding_mode),
+                       get_fpr_dw0(r3), get_fpr_dw0(r2), get_fpr_dw0(r1)));
+
+   return "madbr";
+}
+
+static const HChar *
+s390_irgen_MAEB(UChar r3, IRTemp op2addr, UChar r1)
+{
+   IRExpr *op2 = load(Ity_F32, mkexpr(op2addr));
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_w0(r1, qop(Iop_MAddF32, mkexpr(rounding_mode),
+                      get_fpr_w0(r3), op2, get_fpr_w0(r1)));
+
+   return "maeb";
+}
+
+static const HChar *
+s390_irgen_MADB(UChar r3, IRTemp op2addr, UChar r1)
+{
+   IRExpr *op2 = load(Ity_F64, mkexpr(op2addr));
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_dw0(r1, qop(Iop_MAddF64, mkexpr(rounding_mode),
+                       get_fpr_dw0(r3), op2, get_fpr_dw0(r1)));
+
+   return "madb";
+}
+
+static const HChar *
+s390_irgen_MSEBR(UChar r1, UChar r3, UChar r2)
+{
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_w0(r1, qop(Iop_MSubF32, mkexpr(rounding_mode),
+                      get_fpr_w0(r3), get_fpr_w0(r2), get_fpr_w0(r1)));
+
+   return "msebr";
+}
+
+static const HChar *
+s390_irgen_MSDBR(UChar r1, UChar r3, UChar r2)
+{
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_dw0(r1, qop(Iop_MSubF64, mkexpr(rounding_mode),
+                       get_fpr_dw0(r3), get_fpr_dw0(r2), get_fpr_dw0(r1)));
+
+   return "msdbr";
+}
+
+static const HChar *
+s390_irgen_MSEB(UChar r3, IRTemp op2addr, UChar r1)
+{
+   IRExpr *op2 = load(Ity_F32, mkexpr(op2addr));
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_w0(r1, qop(Iop_MSubF32, mkexpr(rounding_mode),
+                      get_fpr_w0(r3), op2, get_fpr_w0(r1)));
+
+   return "mseb";
+}
+
+static const HChar *
+s390_irgen_MSDB(UChar r3, IRTemp op2addr, UChar r1)
+{
+   IRExpr *op2 = load(Ity_F64, mkexpr(op2addr));
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   put_fpr_dw0(r1, qop(Iop_MSubF64, mkexpr(rounding_mode),
+                       get_fpr_dw0(r3), op2, get_fpr_dw0(r1)));
+
+   return "msdb";
+}
+
+static const HChar *
+s390_irgen_SQEBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(result, binop(Iop_SqrtF32, mkexpr(rounding_mode), get_fpr_w0(r2)));
+   put_fpr_w0(r1, mkexpr(result));
+
+   return "sqebr";
+}
+
+static const HChar *
+s390_irgen_SQDBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(result, binop(Iop_SqrtF64, mkexpr(rounding_mode), get_fpr_dw0(r2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "sqdbr";
+}
+
+static const HChar *
+s390_irgen_SQXBR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F128);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(result, binop(Iop_SqrtF128, mkexpr(rounding_mode),
+                        get_fpr_pair(r2)));
+   put_fpr_pair(r1, mkexpr(result));
+
+   return "sqxbr";
+}
+
+static const HChar *
+s390_irgen_SQEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_F32);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op, load(Ity_F32, mkexpr(op2addr)));
+   put_fpr_w0(r1, binop(Iop_SqrtF32, mkexpr(rounding_mode), mkexpr(op)));
+
+   return "sqeb";
+}
+
+static const HChar *
+s390_irgen_SQDB(UChar r1, IRTemp op2addr)
+{
+   IRTemp op = newTemp(Ity_F64);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op, load(Ity_F64, mkexpr(op2addr)));
+   put_fpr_dw0(r1, binop(Iop_SqrtF64, mkexpr(rounding_mode), mkexpr(op)));
+
+   return "sqdb";
+}
+
+static const HChar *
+s390_irgen_SXBR(UChar r1, UChar r2)
+{
+   IRTemp op1 = newTemp(Ity_F128);
+   IRTemp op2 = newTemp(Ity_F128);
+   IRTemp result = newTemp(Ity_F128);
+   IRTemp rounding_mode = encode_bfp_rounding_mode(S390_BFP_ROUND_PER_FPC);
+
+   assign(op1, get_fpr_pair(r1));
+   assign(op2, get_fpr_pair(r2));
+   assign(result, triop(Iop_SubF128, mkexpr(rounding_mode), mkexpr(op1),
+                        mkexpr(op2)));
+   put_fpr_pair(r1, mkexpr(result));
+   s390_cc_thunk_put1f128(S390_CC_OP_BFP_RESULT_128, result);
+
+   return "sxbr";
+}
+
+static const HChar *
+s390_irgen_TCEB(UChar r1, IRTemp op2addr)
+{
+   IRTemp value = newTemp(Ity_F32);
+
+   assign(value, get_fpr_w0(r1));
+
+   s390_cc_thunk_putFZ(S390_CC_OP_BFP_TDC_32, value, op2addr);
+
+   return "tceb";
+}
+
+static const HChar *
+s390_irgen_TCDB(UChar r1, IRTemp op2addr)
+{
+   IRTemp value = newTemp(Ity_F64);
+
+   assign(value, get_fpr_dw0(r1));
+
+   s390_cc_thunk_putFZ(S390_CC_OP_BFP_TDC_64, value, op2addr);
+
+   return "tcdb";
+}
+
+static const HChar *
+s390_irgen_TCXB(UChar r1, IRTemp op2addr)
+{
+   IRTemp value = newTemp(Ity_F128);
+
+   assign(value, get_fpr_pair(r1));
+
+   s390_cc_thunk_put1f128Z(S390_CC_OP_BFP_TDC_128, value, op2addr);
+
+   return "tcxb";
+}
+
+static const HChar *
+s390_irgen_LCDFR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, unop(Iop_NegF64, get_fpr_dw0(r2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "lcdfr";
+}
+
+static const HChar *
+s390_irgen_LNDFR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, unop(Iop_NegF64, unop(Iop_AbsF64, get_fpr_dw0(r2))));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "lndfr";
+}
+
+static const HChar *
+s390_irgen_LPDFR(UChar r1, UChar r2)
+{
+   IRTemp result = newTemp(Ity_F64);
+
+   assign(result, unop(Iop_AbsF64, get_fpr_dw0(r2)));
+   put_fpr_dw0(r1, mkexpr(result));
+
+   return "lpdfr";
+}
+
+static const HChar *
+s390_irgen_LDGR(UChar r1, UChar r2)
+{
+   put_fpr_dw0(r1, unop(Iop_ReinterpI64asF64, get_gpr_dw0(r2)));
+
+   return "ldgr";
+}
+
+static const HChar *
+s390_irgen_LGDR(UChar r1, UChar r2)
+{
+   put_gpr_dw0(r1, unop(Iop_ReinterpF64asI64, get_fpr_dw0(r2)));
+
+   return "lgdr";
+}
+
+
+static const HChar *
+s390_irgen_CPSDR(UChar r3, UChar r1, UChar r2)
+{
+   IRTemp sign  = newTemp(Ity_I64);
+   IRTemp value = newTemp(Ity_I64);
+
+   assign(sign, binop(Iop_And64, unop(Iop_ReinterpF64asI64, get_fpr_dw0(r3)),
+                      mkU64(1ULL << 63)));
+   assign(value, binop(Iop_And64, unop(Iop_ReinterpF64asI64, get_fpr_dw0(r2)),
+                       mkU64((1ULL << 63) - 1)));
+   put_fpr_dw0(r1, unop(Iop_ReinterpI64asF64, binop(Iop_Or64, mkexpr(value),
+                                                    mkexpr(sign))));
+
+   return "cpsdr";
+}
+
+
+static IRExpr *
+s390_call_cvb(IRExpr *in)
+{
+   IRExpr **args, *call;
+
+   args = mkIRExprVec_1(in);
+   call = mkIRExprCCall(Ity_I32, 0 /*regparm*/,
+                        "s390_do_cvb", &s390_do_cvb, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_CVB(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, s390_call_cvb(load(Ity_I64, mkexpr(op2addr))));
+
+   return "cvb";
+}
+
+static const HChar *
+s390_irgen_CVBY(UChar r1, IRTemp op2addr)
+{
+   put_gpr_w1(r1, s390_call_cvb(load(Ity_I64, mkexpr(op2addr))));
+
+   return "cvby";
+}
+
+
+static IRExpr *
+s390_call_cvd(IRExpr *in)
+{
+   IRExpr **args, *call;
+
+   args = mkIRExprVec_1(in);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                        "s390_do_cvd", &s390_do_cvd, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_CVD(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), s390_call_cvd(unop(Iop_32Uto64, get_gpr_w1(r1))));
+
+   return "cvd";
+}
+
+static const HChar *
+s390_irgen_CVDY(UChar r1, IRTemp op2addr)
+{
+   store(mkexpr(op2addr), s390_call_cvd(get_gpr_w1(r1)));
+
+   return "cvdy";
+}
+
+static const HChar *
+s390_irgen_FLOGR(UChar r1, UChar r2)
+{
+   IRTemp input    = newTemp(Ity_I64);
+   IRTemp not_zero = newTemp(Ity_I64);
+   IRTemp tmpnum   = newTemp(Ity_I64);
+   IRTemp num      = newTemp(Ity_I64);
+   IRTemp shift_amount = newTemp(Ity_I8);
+
+   /* We use the "count leading zeroes" operator because the number of
+      leading zeroes is identical with the bit position of the first '1' bit.
+      However, that operator does not work when the input value is zero.
+      Therefore, we set the LSB of the input value to 1 and use Clz64 on
+      the modified value. If input == 0, then the result is 64. Otherwise,
+      the result of Clz64 is what we want. */
+
+   assign(input, get_gpr_dw0(r2));
+   assign(not_zero, binop(Iop_Or64, mkexpr(input), mkU64(1)));
+   assign(tmpnum, unop(Iop_Clz64, mkexpr(not_zero)));
+
+   /* num = (input == 0) ? 64 : tmpnum */
+   assign(num, mkite(binop(Iop_CmpEQ64, mkexpr(input), mkU64(0)),
+                     /* == 0 */ mkU64(64),
+                     /* != 0 */ mkexpr(tmpnum)));
+
+   put_gpr_dw0(r1, mkexpr(num));
+
+   /* Set the leftmost '1' bit of the input value to zero. The general scheme
+      is to first shift the input value by NUM + 1 bits to the left which
+      causes the leftmost '1' bit to disappear. Then we shift logically to
+      the right by NUM + 1 bits. Because the semantics of Iop_Shl64 and
+      Iop_Shr64 are undefined if the shift-amount is greater than or equal to
+      the width of the value-to-be-shifted, we need to special case
+      NUM + 1 >= 64. This is equivalent to INPUT != 0 && INPUT != 1.
+      For both such INPUT values the result will be 0. */
+
+   assign(shift_amount, unop(Iop_64to8, binop(Iop_Add64, mkexpr(num),
+                          mkU64(1))));
+
+   put_gpr_dw0(r1 + 1,
+               mkite(binop(Iop_CmpLE64U, mkexpr(input), mkU64(1)),
+                     /* == 0 || == 1*/ mkU64(0),
+                     /* otherwise */
+                     binop(Iop_Shr64,
+                           binop(Iop_Shl64, mkexpr(input),
+                                 mkexpr(shift_amount)),
+                           mkexpr(shift_amount))));
+
+   /* Compare the original value as an unsigned integer with 0. */
+   s390_cc_thunk_put2(S390_CC_OP_UNSIGNED_COMPARE, input,
+                      mktemp(Ity_I64, mkU64(0)), False);
+
+   return "flogr";
+}
+
+static const HChar *
+s390_irgen_STCK(IRTemp op2addr)
+{
+   IRDirty *d;
+   IRTemp cc = newTemp(Ity_I64);
+
+   d = unsafeIRDirty_1_N(cc, 0, "s390x_dirtyhelper_STCK",
+                         &s390x_dirtyhelper_STCK,
+                         mkIRExprVec_1(mkexpr(op2addr)));
+   d->mFx   = Ifx_Write;
+   d->mAddr = mkexpr(op2addr);
+   d->mSize = 8;
+   stmt(IRStmt_Dirty(d));
+   s390_cc_thunk_fill(mkU64(S390_CC_OP_SET),
+                      mkexpr(cc), mkU64(0), mkU64(0));
+   return "stck";
+}
+
+static const HChar *
+s390_irgen_STCKF(IRTemp op2addr)
+{
+   if (! s390_host_has_stckf) {
+      emulation_failure(EmFail_S390X_stckf);
+   } else {
+      IRTemp cc = newTemp(Ity_I64);
+
+      IRDirty *d = unsafeIRDirty_1_N(cc, 0, "s390x_dirtyhelper_STCKF",
+                                     &s390x_dirtyhelper_STCKF,
+                                     mkIRExprVec_1(mkexpr(op2addr)));
+      d->mFx   = Ifx_Write;
+      d->mAddr = mkexpr(op2addr);
+      d->mSize = 8;
+      stmt(IRStmt_Dirty(d));
+      s390_cc_thunk_fill(mkU64(S390_CC_OP_SET),
+                         mkexpr(cc), mkU64(0), mkU64(0));
+   }
+   return "stckf";
+}
+
+static const HChar *
+s390_irgen_STCKE(IRTemp op2addr)
+{
+   IRDirty *d;
+   IRTemp cc = newTemp(Ity_I64);
+
+   d = unsafeIRDirty_1_N(cc, 0, "s390x_dirtyhelper_STCKE",
+                         &s390x_dirtyhelper_STCKE,
+                         mkIRExprVec_1(mkexpr(op2addr)));
+   d->mFx   = Ifx_Write;
+   d->mAddr = mkexpr(op2addr);
+   d->mSize = 16;
+   stmt(IRStmt_Dirty(d));
+   s390_cc_thunk_fill(mkU64(S390_CC_OP_SET),
+                      mkexpr(cc), mkU64(0), mkU64(0));
+   return "stcke";
+}
+
+static const HChar *
+s390_irgen_STFLE(IRTemp op2addr)
+{
+   if (! s390_host_has_stfle) {
+      emulation_failure(EmFail_S390X_stfle);
+      return "stfle";
+   }
+
+   IRDirty *d;
+   IRTemp cc = newTemp(Ity_I64);
+
+   /* IRExpr_BBPTR() => Need to pass pointer to guest state to helper */
+   d = unsafeIRDirty_1_N(cc, 0, "s390x_dirtyhelper_STFLE",
+                         &s390x_dirtyhelper_STFLE,
+                         mkIRExprVec_2(IRExpr_BBPTR(), mkexpr(op2addr)));
+
+   d->nFxState = 1;
+   vex_bzero(&d->fxState, sizeof(d->fxState));
+
+   d->fxState[0].fx     = Ifx_Modify;  /* read then write */
+   d->fxState[0].offset = S390X_GUEST_OFFSET(guest_r0);
+   d->fxState[0].size   = sizeof(ULong);
+
+   d->mAddr = mkexpr(op2addr);
+   /* Pretend all double words are written */
+   d->mSize = S390_NUM_FACILITY_DW * sizeof(ULong);
+   d->mFx   = Ifx_Write;
+
+   stmt(IRStmt_Dirty(d));
+
+   s390_cc_thunk_fill(mkU64(S390_CC_OP_SET), mkexpr(cc), mkU64(0), mkU64(0));
+
+   return "stfle";
+}
+
+static const HChar *
+s390_irgen_CKSM(UChar r1,UChar r2)
+{
+   IRTemp addr = newTemp(Ity_I64);
+   IRTemp op = newTemp(Ity_I32);
+   IRTemp len = newTemp(Ity_I64);
+   IRTemp oldval = newTemp(Ity_I32);
+   IRTemp mask = newTemp(Ity_I32);
+   IRTemp newop = newTemp(Ity_I32);
+   IRTemp result = newTemp(Ity_I32);
+   IRTemp result1 = newTemp(Ity_I32);
+   IRTemp inc = newTemp(Ity_I64);
+
+   assign(oldval, get_gpr_w1(r1));
+   assign(addr, get_gpr_dw0(r2));
+   assign(len, get_gpr_dw0(r2+1));
+
+   /* Condition code is always zero. */
+   s390_cc_set(0);
+
+   /* If length is zero, there is no need to calculate the checksum */
+   next_insn_if(binop(Iop_CmpEQ64, mkexpr(len), mkU64(0)));
+
+   /* Assiging the increment variable to adjust address and length
+      later on. */
+   assign(inc, mkite(binop(Iop_CmpLT64U, mkexpr(len), mkU64(4)),
+                           mkexpr(len), mkU64(4)));
+
+   /* If length < 4 the final 4-byte 2nd operand value is computed by 
+      appending the remaining bytes to the right with 0. This is done
+      by AND'ing the 4 bytes loaded from memory with an appropriate
+      mask. If length >= 4, that mask is simply 0xffffffff. */
+
+   assign(mask, mkite(binop(Iop_CmpLT64U, mkexpr(len), mkU64(4)),
+                      /* Mask computation when len < 4:
+                         0xffffffff << (32 - (len % 4)*8) */
+                      binop(Iop_Shl32, mkU32(0xffffffff),
+                            unop(Iop_32to8,
+                                 binop(Iop_Sub32, mkU32(32),
+                                       binop(Iop_Shl32,
+                                             unop(Iop_64to32,
+                                                  binop(Iop_And64,
+                                                        mkexpr(len), mkU64(3))),
+                                             mkU8(3))))),
+                      mkU32(0xffffffff)));
+
+   assign(op, load(Ity_I32, mkexpr(addr)));
+   assign(newop, binop(Iop_And32, mkexpr(op), mkexpr(mask)));
+   assign(result, binop(Iop_Add32, mkexpr(newop), mkexpr(oldval)));
+
+   /* Checking for carry */
+   assign(result1, mkite(binop(Iop_CmpLT32U, mkexpr(result), mkexpr(newop)),
+                         binop(Iop_Add32, mkexpr(result), mkU32(1)),
+                         mkexpr(result)));
+
+   put_gpr_w1(r1, mkexpr(result1));
+   put_gpr_dw0(r2, binop(Iop_Add64, mkexpr(addr), mkexpr(inc)));
+   put_gpr_dw0(r2+1, binop(Iop_Sub64, mkexpr(len), mkexpr(inc)));
+
+   iterate_if(binop(Iop_CmpNE64, mkexpr(len), mkU64(0)));
+
+   return "cksm";
+}
+
+static const HChar *
+s390_irgen_TROO(UChar m3, UChar r1, UChar r2)
+{
+   IRTemp src_addr, des_addr, tab_addr, src_len, test_byte;
+   src_addr = newTemp(Ity_I64);
+   des_addr = newTemp(Ity_I64);
+   tab_addr = newTemp(Ity_I64);
+   test_byte = newTemp(Ity_I8);
+   src_len = newTemp(Ity_I64);
+
+   assign(src_addr, get_gpr_dw0(r2));
+   assign(des_addr, get_gpr_dw0(r1));
+   assign(tab_addr, get_gpr_dw0(1));
+   assign(src_len, get_gpr_dw0(r1+1));
+   assign(test_byte, get_gpr_b7(0));
+
+   IRTemp op = newTemp(Ity_I8);
+   IRTemp op1 = newTemp(Ity_I8);
+   IRTemp result = newTemp(Ity_I64);
+
+   /* End of source string? We're done; proceed to next insn */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpEQ64, mkexpr(src_len), mkU64(0)));
+
+   /* Load character from source string, index translation table and
+      store translated character in op1. */
+   assign(op, load(Ity_I8, mkexpr(src_addr)));
+
+   assign(result, binop(Iop_Add64, unop(Iop_8Uto64, mkexpr(op)),
+                        mkexpr(tab_addr)));
+   assign(op1, load(Ity_I8, mkexpr(result)));
+
+   if (! s390_host_has_etf2 || (m3 & 0x1) == 0) {
+      s390_cc_set(1);
+      next_insn_if(binop(Iop_CmpEQ8, mkexpr(op1), mkexpr(test_byte)));
+   }
+   store(get_gpr_dw0(r1), mkexpr(op1));
+
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(des_addr), mkU64(1)));
+   put_gpr_dw0(r2, binop(Iop_Add64, mkexpr(src_addr), mkU64(1)));
+   put_gpr_dw0(r1+1, binop(Iop_Sub64, mkexpr(src_len), mkU64(1)));
+
+   iterate();
+
+   return "troo";
+}
+
+static const HChar *
+s390_irgen_TRTO(UChar m3, UChar r1, UChar r2)
+{
+   IRTemp src_addr, des_addr, tab_addr, src_len, test_byte;
+   src_addr = newTemp(Ity_I64);
+   des_addr = newTemp(Ity_I64);
+   tab_addr = newTemp(Ity_I64);
+   test_byte = newTemp(Ity_I8);
+   src_len = newTemp(Ity_I64);
+
+   assign(src_addr, get_gpr_dw0(r2));
+   assign(des_addr, get_gpr_dw0(r1));
+   assign(tab_addr, get_gpr_dw0(1));
+   assign(src_len, get_gpr_dw0(r1+1));
+   assign(test_byte, get_gpr_b7(0));
+
+   IRTemp op = newTemp(Ity_I16);
+   IRTemp op1 = newTemp(Ity_I8);
+   IRTemp result = newTemp(Ity_I64);
+
+   /* End of source string? We're done; proceed to next insn */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpEQ64, mkexpr(src_len), mkU64(0)));
+
+   /* Load character from source string, index translation table and
+      store translated character in op1. */
+   assign(op, load(Ity_I16, mkexpr(src_addr)));
+
+   assign(result, binop(Iop_Add64, unop(Iop_16Uto64, mkexpr(op)),
+                        mkexpr(tab_addr)));
+
+   assign(op1, load(Ity_I8, mkexpr(result)));
+
+   if (! s390_host_has_etf2 || (m3 & 0x1) == 0) {
+      s390_cc_set(1);
+      next_insn_if(binop(Iop_CmpEQ8, mkexpr(op1), mkexpr(test_byte)));
+   }
+   store(get_gpr_dw0(r1), mkexpr(op1));
+
+   put_gpr_dw0(r2, binop(Iop_Add64, mkexpr(src_addr), mkU64(2)));
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(des_addr), mkU64(1)));
+   put_gpr_dw0(r1+1, binop(Iop_Sub64, mkexpr(src_len), mkU64(2)));
+
+   iterate();
+
+   return "trto";
+}
+
+static const HChar *
+s390_irgen_TROT(UChar m3, UChar r1, UChar r2)
+{
+   IRTemp src_addr, des_addr, tab_addr, src_len, test_byte;
+   src_addr = newTemp(Ity_I64);
+   des_addr = newTemp(Ity_I64);
+   tab_addr = newTemp(Ity_I64);
+   test_byte = newTemp(Ity_I16);
+   src_len = newTemp(Ity_I64);
+
+   assign(src_addr, get_gpr_dw0(r2));
+   assign(des_addr, get_gpr_dw0(r1));
+   assign(tab_addr, get_gpr_dw0(1));
+   assign(src_len, get_gpr_dw0(r1+1));
+   assign(test_byte, get_gpr_hw3(0));
+
+   IRTemp op = newTemp(Ity_I8);
+   IRTemp op1 = newTemp(Ity_I16);
+   IRTemp result = newTemp(Ity_I64);
+
+   /* End of source string? We're done; proceed to next insn */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpEQ64, mkexpr(src_len), mkU64(0)));
+
+   /* Load character from source string, index translation table and
+      store translated character in op1. */
+   assign(op, binop(Iop_Shl8, load(Ity_I8, mkexpr(src_addr)), mkU8(1)));
+
+   assign(result, binop(Iop_Add64, unop(Iop_8Uto64, mkexpr(op)), 
+                        mkexpr(tab_addr)));
+   assign(op1, load(Ity_I16, mkexpr(result)));
+
+   if (! s390_host_has_etf2 || (m3 & 0x1) == 0) {
+      s390_cc_set(1);
+      next_insn_if(binop(Iop_CmpEQ16, mkexpr(op1), mkexpr(test_byte)));
+   }
+   store(get_gpr_dw0(r1), mkexpr(op1));
+
+   put_gpr_dw0(r2, binop(Iop_Add64, mkexpr(src_addr), mkU64(1)));
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(des_addr), mkU64(2)));
+   put_gpr_dw0(r1+1, binop(Iop_Sub64, mkexpr(src_len), mkU64(1)));
+
+   iterate();
+
+   return "trot";
+}
+
+static const HChar *
+s390_irgen_TRTT(UChar m3, UChar r1, UChar r2)
+{
+   IRTemp src_addr, des_addr, tab_addr, src_len, test_byte;
+   src_addr = newTemp(Ity_I64);
+   des_addr = newTemp(Ity_I64);
+   tab_addr = newTemp(Ity_I64);
+   test_byte = newTemp(Ity_I16);
+   src_len = newTemp(Ity_I64);
+
+   assign(src_addr, get_gpr_dw0(r2));
+   assign(des_addr, get_gpr_dw0(r1));
+   assign(tab_addr, get_gpr_dw0(1));
+   assign(src_len, get_gpr_dw0(r1+1));
+   assign(test_byte, get_gpr_hw3(0));
+
+   IRTemp op = newTemp(Ity_I16);
+   IRTemp op1 = newTemp(Ity_I16);
+   IRTemp result = newTemp(Ity_I64);
+
+   /* End of source string? We're done; proceed to next insn */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpEQ64, mkexpr(src_len), mkU64(0)));
+
+   /* Load character from source string, index translation table and
+      store translated character in op1. */
+   assign(op, binop(Iop_Shl16, load(Ity_I16, mkexpr(src_addr)), mkU8(1)));
+
+   assign(result, binop(Iop_Add64, unop(Iop_16Uto64, mkexpr(op)),
+                        mkexpr(tab_addr)));
+   assign(op1, load(Ity_I16, mkexpr(result)));
+
+   if (! s390_host_has_etf2 || (m3 & 0x1) == 0) {
+      s390_cc_set(1);
+      next_insn_if(binop(Iop_CmpEQ16, mkexpr(op1), mkexpr(test_byte)));
+   }
+
+   store(get_gpr_dw0(r1), mkexpr(op1));
+
+   put_gpr_dw0(r2, binop(Iop_Add64, mkexpr(src_addr), mkU64(2)));
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(des_addr), mkU64(2)));
+   put_gpr_dw0(r1+1, binop(Iop_Sub64, mkexpr(src_len), mkU64(2)));
+
+   iterate();
+
+   return "trtt";
+}
+
+static const HChar *
+s390_irgen_TR(UChar length, IRTemp start1, IRTemp start2)
+{
+   IRTemp len = newTemp(Ity_I64);
+
+   assign(len, mkU64(length));
+   s390_irgen_TR_EX(len, start1, start2);
+
+   return "tr";
+}
+
+static const HChar *
+s390_irgen_TRE(UChar r1,UChar r2)
+{
+   IRTemp src_addr, tab_addr, src_len, test_byte;
+   src_addr = newTemp(Ity_I64);
+   tab_addr = newTemp(Ity_I64);
+   src_len = newTemp(Ity_I64);
+   test_byte = newTemp(Ity_I8);
+
+   assign(src_addr, get_gpr_dw0(r1));
+   assign(src_len, get_gpr_dw0(r1+1));
+   assign(tab_addr, get_gpr_dw0(r2));
+   assign(test_byte, get_gpr_b7(0));
+
+   IRTemp op = newTemp(Ity_I8);
+   IRTemp op1 = newTemp(Ity_I8);
+   IRTemp result = newTemp(Ity_I64);
+
+   /* End of source string? We're done; proceed to next insn */   
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpEQ64, mkexpr(src_len), mkU64(0)));
+
+   /* Load character from source string and compare with test byte */
+   assign(op, load(Ity_I8, mkexpr(src_addr)));
+   
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpEQ8, mkexpr(op), mkexpr(test_byte)));
+
+   assign(result, binop(Iop_Add64, unop(Iop_8Uto64, mkexpr(op)), 
+			mkexpr(tab_addr)));
+
+   assign(op1, load(Ity_I8, mkexpr(result)));
+
+   store(get_gpr_dw0(r1), mkexpr(op1));
+   put_gpr_dw0(r1, binop(Iop_Add64, mkexpr(src_addr), mkU64(1)));
+   put_gpr_dw0(r1+1, binop(Iop_Sub64, mkexpr(src_len), mkU64(1)));
+
+   iterate();
+
+   return "tre";
+}
+
+static IRExpr *
+s390_call_cu21(IRExpr *srcval, IRExpr *low_surrogate)
+{
+   IRExpr **args, *call;
+   args = mkIRExprVec_2(srcval, low_surrogate);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                       "s390_do_cu21", &s390_do_cu21, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_CU21(UChar m3, UChar r1, UChar r2)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp len1 = newTemp(Ity_I64);
+   IRTemp len2 = newTemp(Ity_I64);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(len1, get_gpr_dw0(r1 + 1));
+   assign(len2, get_gpr_dw0(r2 + 1));
+
+   /* We're processing the 2nd operand 2 bytes at a time. Therefore, if
+      there are less than 2 bytes left, then the 2nd operand is exhausted
+      and we're done here. cc = 0 */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len2), mkU64(2)));
+
+   /* There are at least two bytes there. Read them. */
+   IRTemp srcval = newTemp(Ity_I32);
+   assign(srcval, unop(Iop_16Uto32, load(Ity_I16, mkexpr(addr2))));
+
+   /* Find out whether this is a high surrogate. I.e. SRCVAL lies
+      inside the interval [0xd800 - 0xdbff] */
+   IRTemp  is_high_surrogate = newTemp(Ity_I32);
+   IRExpr *flag1 = mkite(binop(Iop_CmpLE32U, mkU32(0xd800), mkexpr(srcval)),
+                         mkU32(1), mkU32(0));
+   IRExpr *flag2 = mkite(binop(Iop_CmpLE32U, mkexpr(srcval), mkU32(0xdbff)),
+                         mkU32(1), mkU32(0));
+   assign(is_high_surrogate, binop(Iop_And32, flag1, flag2));
+
+   /* If SRCVAL is a high surrogate and there are less than 4 bytes left,
+      then the 2nd operand is exhausted and we're done here. cc = 0 */
+   IRExpr *not_enough_bytes =
+      mkite(binop(Iop_CmpLT64U, mkexpr(len2), mkU64(4)), mkU32(1), mkU32(0));
+
+   next_insn_if(binop(Iop_CmpEQ32,
+                      binop(Iop_And32, mkexpr(is_high_surrogate),
+                            not_enough_bytes), mkU32(1)));
+
+   /* The 2nd operand is not exhausted. If the first 2 bytes are a high
+      surrogate, read the next two bytes (low surrogate). */
+   IRTemp  low_surrogate = newTemp(Ity_I32);
+   IRExpr *low_surrogate_addr = binop(Iop_Add64, mkexpr(addr2), mkU64(2));
+
+   assign(low_surrogate,
+          mkite(binop(Iop_CmpEQ32, mkexpr(is_high_surrogate), mkU32(1)),
+                unop(Iop_16Uto32, load(Ity_I16, low_surrogate_addr)),
+                mkU32(0)));  // any value is fine; it will not be used
+
+   /* Call the helper */
+   IRTemp retval = newTemp(Ity_I64);
+   assign(retval, s390_call_cu21(unop(Iop_32Uto64, mkexpr(srcval)),
+                                 unop(Iop_32Uto64, mkexpr(low_surrogate))));
+
+   /* Before we can test whether the 1st operand is exhausted we need to
+      test for an invalid low surrogate. Because cc=2 outranks cc=1. */
+   if (s390_host_has_etf3 && (m3 & 0x1) == 1) {
+      IRExpr *invalid_low_surrogate =
+         binop(Iop_And64, mkexpr(retval), mkU64(0xff));
+
+      s390_cc_set(2);
+      next_insn_if(binop(Iop_CmpEQ64, invalid_low_surrogate, mkU64(1)));
+   }
+
+   /* Now test whether the 1st operand is exhausted */
+   IRTemp num_bytes = newTemp(Ity_I64);
+   assign(num_bytes, binop(Iop_And64,
+                           binop(Iop_Shr64, mkexpr(retval), mkU8(8)),
+                           mkU64(0xff)));
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len1), mkexpr(num_bytes)));
+
+   /* Extract the bytes to be stored at addr1 */
+   IRTemp data = newTemp(Ity_I64);
+   assign(data, binop(Iop_Shr64, mkexpr(retval), mkU8(16)));
+
+   /* To store the bytes construct 4 dirty helper calls. The helper calls
+      are guarded (num_bytes == 1, num_bytes == 2, etc) such that only
+      one of them will be called at runtime. */
+   UInt i;
+   for (i = 1; i <= 4; ++i) {
+      IRDirty *d;
+
+      d = unsafeIRDirty_0_N(0 /* regparms */, "s390x_dirtyhelper_CUxy",
+                            &s390x_dirtyhelper_CUxy,
+                            mkIRExprVec_3(mkexpr(addr1), mkexpr(data),
+                                          mkexpr(num_bytes)));
+      d->guard = binop(Iop_CmpEQ64, mkexpr(num_bytes), mkU64(i));
+      d->mFx   = Ifx_Write;
+      d->mAddr = mkexpr(addr1);
+      d->mSize = i;
+      stmt(IRStmt_Dirty(d));
+   }
+
+   /* Update source address and length */
+   IRTemp num_src_bytes = newTemp(Ity_I64);
+   assign(num_src_bytes,
+          mkite(binop(Iop_CmpEQ32, mkexpr(is_high_surrogate), mkU32(1)),
+                mkU64(4), mkU64(2)));
+   put_gpr_dw0(r2,     binop(Iop_Add64, mkexpr(addr2), mkexpr(num_src_bytes)));
+   put_gpr_dw0(r2 + 1, binop(Iop_Sub64, mkexpr(len2),  mkexpr(num_src_bytes)));
+
+   /* Update destination address and length */
+   put_gpr_dw0(r1,     binop(Iop_Add64, mkexpr(addr1), mkexpr(num_bytes)));
+   put_gpr_dw0(r1 + 1, binop(Iop_Sub64, mkexpr(len1),  mkexpr(num_bytes)));
+
+   iterate();
+
+   return "cu21";
+}
+
+static IRExpr *
+s390_call_cu24(IRExpr *srcval, IRExpr *low_surrogate)
+{
+   IRExpr **args, *call;
+   args = mkIRExprVec_2(srcval, low_surrogate);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                       "s390_do_cu24", &s390_do_cu24, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_CU24(UChar m3, UChar r1, UChar r2)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp len1 = newTemp(Ity_I64);
+   IRTemp len2 = newTemp(Ity_I64);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(len1, get_gpr_dw0(r1 + 1));
+   assign(len2, get_gpr_dw0(r2 + 1));
+
+   /* We're processing the 2nd operand 2 bytes at a time. Therefore, if
+      there are less than 2 bytes left, then the 2nd operand is exhausted
+      and we're done here. cc = 0 */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len2), mkU64(2)));
+
+   /* There are at least two bytes there. Read them. */
+   IRTemp srcval = newTemp(Ity_I32);
+   assign(srcval, unop(Iop_16Uto32, load(Ity_I16, mkexpr(addr2))));
+
+   /* Find out whether this is a high surrogate. I.e. SRCVAL lies
+      inside the interval [0xd800 - 0xdbff] */
+   IRTemp  is_high_surrogate = newTemp(Ity_I32);
+   IRExpr *flag1 = mkite(binop(Iop_CmpLE32U, mkU32(0xd800), mkexpr(srcval)),
+                         mkU32(1), mkU32(0));
+   IRExpr *flag2 = mkite(binop(Iop_CmpLE32U, mkexpr(srcval), mkU32(0xdbff)),
+                         mkU32(1), mkU32(0));
+   assign(is_high_surrogate, binop(Iop_And32, flag1, flag2));
+
+   /* If SRCVAL is a high surrogate and there are less than 4 bytes left,
+      then the 2nd operand is exhausted and we're done here. cc = 0 */
+   IRExpr *not_enough_bytes =
+      mkite(binop(Iop_CmpLT64U, mkexpr(len2), mkU64(4)), mkU32(1), mkU32(0));
+
+   next_insn_if(binop(Iop_CmpEQ32,
+                      binop(Iop_And32, mkexpr(is_high_surrogate),
+                            not_enough_bytes),
+                      mkU32(1)));
+
+   /* The 2nd operand is not exhausted. If the first 2 bytes are a high
+      surrogate, read the next two bytes (low surrogate). */
+   IRTemp  low_surrogate = newTemp(Ity_I32);
+   IRExpr *low_surrogate_addr = binop(Iop_Add64, mkexpr(addr2), mkU64(2));
+
+   assign(low_surrogate,
+          mkite(binop(Iop_CmpEQ32, mkexpr(is_high_surrogate), mkU32(1)),
+                unop(Iop_16Uto32, load(Ity_I16, low_surrogate_addr)),
+                mkU32(0)));  // any value is fine; it will not be used
+
+   /* Call the helper */
+   IRTemp retval = newTemp(Ity_I64);
+   assign(retval, s390_call_cu24(unop(Iop_32Uto64, mkexpr(srcval)),
+                                 unop(Iop_32Uto64, mkexpr(low_surrogate))));
+
+   /* Before we can test whether the 1st operand is exhausted we need to
+      test for an invalid low surrogate. Because cc=2 outranks cc=1. */
+   if (s390_host_has_etf3 && (m3 & 0x1) == 1) {
+      IRExpr *invalid_low_surrogate =
+         binop(Iop_And64, mkexpr(retval), mkU64(0xff));
+
+      s390_cc_set(2);
+      next_insn_if(binop(Iop_CmpEQ64, invalid_low_surrogate, mkU64(1)));
+   }
+
+   /* Now test whether the 1st operand is exhausted */
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len1), mkU64(4)));
+
+   /* Extract the bytes to be stored at addr1 */
+   IRExpr *data = unop(Iop_64to32, binop(Iop_Shr64, mkexpr(retval), mkU8(8)));
+
+   store(mkexpr(addr1), data);
+
+   /* Update source address and length */
+   IRTemp num_src_bytes = newTemp(Ity_I64);
+   assign(num_src_bytes,
+          mkite(binop(Iop_CmpEQ32, mkexpr(is_high_surrogate), mkU32(1)),
+                mkU64(4), mkU64(2)));
+   put_gpr_dw0(r2,     binop(Iop_Add64, mkexpr(addr2), mkexpr(num_src_bytes)));
+   put_gpr_dw0(r2 + 1, binop(Iop_Sub64, mkexpr(len2),  mkexpr(num_src_bytes)));
+
+   /* Update destination address and length */
+   put_gpr_dw0(r1,     binop(Iop_Add64, mkexpr(addr1), mkU64(4)));
+   put_gpr_dw0(r1 + 1, binop(Iop_Sub64, mkexpr(len1),  mkU64(4)));
+
+   iterate();
+
+   return "cu24";
+}
+
+static IRExpr *
+s390_call_cu42(IRExpr *srcval)
+{
+   IRExpr **args, *call;
+   args = mkIRExprVec_1(srcval);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                       "s390_do_cu42", &s390_do_cu42, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_CU42(UChar r1, UChar r2)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp len1 = newTemp(Ity_I64);
+   IRTemp len2 = newTemp(Ity_I64);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(len1, get_gpr_dw0(r1 + 1));
+   assign(len2, get_gpr_dw0(r2 + 1));
+
+   /* We're processing the 2nd operand 4 bytes at a time. Therefore, if
+      there are less than 4 bytes left, then the 2nd operand is exhausted
+      and we're done here. cc = 0 */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len2), mkU64(4)));
+
+   /* Read the 2nd operand. */
+   IRTemp srcval = newTemp(Ity_I32);
+   assign(srcval, load(Ity_I32, mkexpr(addr2)));
+
+   /* Call the helper */
+   IRTemp retval = newTemp(Ity_I64);
+   assign(retval, s390_call_cu42(unop(Iop_32Uto64, mkexpr(srcval))));
+
+   /* If the UTF-32 character was invalid, set cc=2 and we're done.
+      cc=2 outranks cc=1 (1st operand exhausted) */
+   IRExpr *invalid_character = binop(Iop_And64, mkexpr(retval), mkU64(0xff));
+
+   s390_cc_set(2);
+   next_insn_if(binop(Iop_CmpEQ64, invalid_character, mkU64(1)));
+
+   /* Now test whether the 1st operand is exhausted */
+   IRTemp num_bytes = newTemp(Ity_I64);
+   assign(num_bytes, binop(Iop_And64,
+                           binop(Iop_Shr64, mkexpr(retval), mkU8(8)),
+                           mkU64(0xff)));
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len1), mkexpr(num_bytes)));
+
+   /* Extract the bytes to be stored at addr1 */
+   IRTemp data = newTemp(Ity_I64);
+   assign(data, binop(Iop_Shr64, mkexpr(retval), mkU8(16)));
+
+   /* To store the bytes construct 2 dirty helper calls. The helper calls
+      are guarded (num_bytes == 2 and num_bytes == 4, respectively) such
+      that only one of them will be called at runtime. */
+
+   Int i;
+   for (i = 2; i <= 4; ++i) {
+      IRDirty *d;
+
+      if (i == 3) continue;  // skip this one
+
+      d = unsafeIRDirty_0_N(0 /* regparms */, "s390x_dirtyhelper_CUxy",
+                            &s390x_dirtyhelper_CUxy,
+                            mkIRExprVec_3(mkexpr(addr1), mkexpr(data),
+                                          mkexpr(num_bytes)));
+      d->guard = binop(Iop_CmpEQ64, mkexpr(num_bytes), mkU64(i));
+      d->mFx   = Ifx_Write;
+      d->mAddr = mkexpr(addr1);
+      d->mSize = i;
+      stmt(IRStmt_Dirty(d));
+   }
+
+   /* Update source address and length */
+   put_gpr_dw0(r2,     binop(Iop_Add64, mkexpr(addr2), mkU64(4)));
+   put_gpr_dw0(r2 + 1, binop(Iop_Sub64, mkexpr(len2),  mkU64(4)));
+
+   /* Update destination address and length */
+   put_gpr_dw0(r1,     binop(Iop_Add64, mkexpr(addr1), mkexpr(num_bytes)));
+   put_gpr_dw0(r1 + 1, binop(Iop_Sub64, mkexpr(len1),  mkexpr(num_bytes)));
+
+   iterate();
+
+   return "cu42";
+}
+
+static IRExpr *
+s390_call_cu41(IRExpr *srcval)
+{
+   IRExpr **args, *call;
+   args = mkIRExprVec_1(srcval);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                       "s390_do_cu41", &s390_do_cu41, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_CU41(UChar r1, UChar r2)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp len1 = newTemp(Ity_I64);
+   IRTemp len2 = newTemp(Ity_I64);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(len1, get_gpr_dw0(r1 + 1));
+   assign(len2, get_gpr_dw0(r2 + 1));
+
+   /* We're processing the 2nd operand 4 bytes at a time. Therefore, if
+      there are less than 4 bytes left, then the 2nd operand is exhausted
+      and we're done here. cc = 0 */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len2), mkU64(4)));
+
+   /* Read the 2nd operand. */
+   IRTemp srcval = newTemp(Ity_I32);
+   assign(srcval, load(Ity_I32, mkexpr(addr2)));
+
+   /* Call the helper */
+   IRTemp retval = newTemp(Ity_I64);
+   assign(retval, s390_call_cu41(unop(Iop_32Uto64, mkexpr(srcval))));
+
+   /* If the UTF-32 character was invalid, set cc=2 and we're done.
+      cc=2 outranks cc=1 (1st operand exhausted) */
+   IRExpr *invalid_character = binop(Iop_And64, mkexpr(retval), mkU64(0xff));
+
+   s390_cc_set(2);
+   next_insn_if(binop(Iop_CmpEQ64, invalid_character, mkU64(1)));
+
+   /* Now test whether the 1st operand is exhausted */
+   IRTemp num_bytes = newTemp(Ity_I64);
+   assign(num_bytes, binop(Iop_And64,
+                           binop(Iop_Shr64, mkexpr(retval), mkU8(8)),
+                           mkU64(0xff)));
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len1), mkexpr(num_bytes)));
+
+   /* Extract the bytes to be stored at addr1 */
+   IRTemp data = newTemp(Ity_I64);
+   assign(data, binop(Iop_Shr64, mkexpr(retval), mkU8(16)));
+
+   /* To store the bytes construct 4 dirty helper calls. The helper calls
+      are guarded (num_bytes == 1, num_bytes == 2, etc) such that only
+      one of them will be called at runtime. */
+   UInt i;
+   for (i = 1; i <= 4; ++i) {
+      IRDirty *d;
+
+      d = unsafeIRDirty_0_N(0 /* regparms */, "s390x_dirtyhelper_CUxy",
+                            &s390x_dirtyhelper_CUxy,
+                            mkIRExprVec_3(mkexpr(addr1), mkexpr(data),
+                                          mkexpr(num_bytes)));
+      d->guard = binop(Iop_CmpEQ64, mkexpr(num_bytes), mkU64(i));
+      d->mFx   = Ifx_Write;
+      d->mAddr = mkexpr(addr1);
+      d->mSize = i;
+      stmt(IRStmt_Dirty(d));
+   }
+
+   /* Update source address and length */
+   put_gpr_dw0(r2,     binop(Iop_Add64, mkexpr(addr2), mkU64(4)));
+   put_gpr_dw0(r2 + 1, binop(Iop_Sub64, mkexpr(len2),  mkU64(4)));
+
+   /* Update destination address and length */
+   put_gpr_dw0(r1,     binop(Iop_Add64, mkexpr(addr1), mkexpr(num_bytes)));
+   put_gpr_dw0(r1 + 1, binop(Iop_Sub64, mkexpr(len1),  mkexpr(num_bytes)));
+
+   iterate();
+
+   return "cu41";
+}
+
+static IRExpr *
+s390_call_cu12_cu14_helper1(IRExpr *byte1, IRExpr *etf3_and_m3_is_1)
+{
+   IRExpr **args, *call;
+   args = mkIRExprVec_2(byte1, etf3_and_m3_is_1);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/, "s390_do_cu12_cu14_helper1",
+                        &s390_do_cu12_cu14_helper1, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static IRExpr *
+s390_call_cu12_helper2(IRExpr *byte1, IRExpr *byte2, IRExpr *byte3,
+                       IRExpr *byte4, IRExpr *stuff)
+{
+   IRExpr **args, *call;
+   args = mkIRExprVec_5(byte1, byte2, byte3, byte4, stuff);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                        "s390_do_cu12_helper2", &s390_do_cu12_helper2, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static IRExpr *
+s390_call_cu14_helper2(IRExpr *byte1, IRExpr *byte2, IRExpr *byte3,
+                       IRExpr *byte4, IRExpr *stuff)
+{
+   IRExpr **args, *call;
+   args = mkIRExprVec_5(byte1, byte2, byte3, byte4, stuff);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                        "s390_do_cu14_helper2", &s390_do_cu14_helper2, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static void
+s390_irgen_cu12_cu14(UChar m3, UChar r1, UChar r2, Bool is_cu12)
+{
+   IRTemp addr1 = newTemp(Ity_I64);
+   IRTemp addr2 = newTemp(Ity_I64);
+   IRTemp len1 = newTemp(Ity_I64);
+   IRTemp len2 = newTemp(Ity_I64);
+
+   assign(addr1, get_gpr_dw0(r1));
+   assign(addr2, get_gpr_dw0(r2));
+   assign(len1, get_gpr_dw0(r1 + 1));
+   assign(len2, get_gpr_dw0(r2 + 1));
+
+   UInt extended_checking = s390_host_has_etf3 && (m3 & 0x1) == 1;
+
+   /* We're processing the 2nd operand 1 byte at a time. Therefore, if
+      there is less than 1 byte left, then the 2nd operand is exhausted
+      and we're done here. cc = 0 */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len2), mkU64(1)));
+
+   /* There is at least one byte there. Read it. */
+   IRTemp byte1 = newTemp(Ity_I64);
+   assign(byte1, unop(Iop_8Uto64, load(Ity_I8, mkexpr(addr2))));
+
+   /* Call the helper to get number of bytes and invalid byte indicator */
+   IRTemp retval1 = newTemp(Ity_I64);
+   assign(retval1, s390_call_cu12_cu14_helper1(mkexpr(byte1),
+                                               mkU64(extended_checking)));
+
+   /* Check for invalid 1st byte */
+   IRExpr *is_invalid = unop(Iop_64to1, mkexpr(retval1));
+   s390_cc_set(2);
+   next_insn_if(is_invalid);
+
+   /* How many bytes do we have to read? */
+   IRTemp num_src_bytes = newTemp(Ity_I64);
+   assign(num_src_bytes, binop(Iop_Shr64, mkexpr(retval1), mkU8(8)));
+
+   /* Now test whether the 2nd operand is exhausted */
+   s390_cc_set(0);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len2), mkexpr(num_src_bytes)));
+
+   /* Read the remaining bytes */
+   IRExpr *cond, *addr, *byte2, *byte3, *byte4;
+
+   cond  = binop(Iop_CmpLE64U, mkU64(2), mkexpr(num_src_bytes));
+   addr  = binop(Iop_Add64, mkexpr(addr2), mkU64(1));
+   byte2 = mkite(cond, unop(Iop_8Uto64, load(Ity_I8, addr)), mkU64(0));
+   cond  = binop(Iop_CmpLE64U, mkU64(3), mkexpr(num_src_bytes));
+   addr  = binop(Iop_Add64, mkexpr(addr2), mkU64(2));
+   byte3 = mkite(cond, unop(Iop_8Uto64, load(Ity_I8, addr)), mkU64(0));
+   cond  = binop(Iop_CmpLE64U, mkU64(4), mkexpr(num_src_bytes));
+   addr  = binop(Iop_Add64, mkexpr(addr2), mkU64(3));
+   byte4 = mkite(cond, unop(Iop_8Uto64, load(Ity_I8, addr)), mkU64(0));
+
+   /* Call the helper to get the converted value and invalid byte indicator.
+      We can pass at most 5 arguments; therefore some encoding is needed
+      here */
+   IRExpr *stuff = binop(Iop_Or64,
+                         binop(Iop_Shl64, mkexpr(num_src_bytes), mkU8(1)),
+                         mkU64(extended_checking));
+   IRTemp retval2 = newTemp(Ity_I64);
+
+   if (is_cu12) {
+      assign(retval2, s390_call_cu12_helper2(mkexpr(byte1), byte2, byte3,
+                                             byte4, stuff));
+   } else {
+      assign(retval2, s390_call_cu14_helper2(mkexpr(byte1), byte2, byte3,
+                                             byte4, stuff));
+   }
+
+   /* Check for invalid character */
+   s390_cc_set(2);
+   is_invalid = unop(Iop_64to1, mkexpr(retval2));
+   next_insn_if(is_invalid);
+
+   /* Now test whether the 1st operand is exhausted */
+   IRTemp num_bytes = newTemp(Ity_I64);
+   assign(num_bytes, binop(Iop_And64,
+                           binop(Iop_Shr64, mkexpr(retval2), mkU8(8)),
+                           mkU64(0xff)));
+   s390_cc_set(1);
+   next_insn_if(binop(Iop_CmpLT64U, mkexpr(len1), mkexpr(num_bytes)));
+
+   /* Extract the bytes to be stored at addr1 */
+   IRTemp data = newTemp(Ity_I64);
+   assign(data, binop(Iop_Shr64, mkexpr(retval2), mkU8(16)));
+
+   if (is_cu12) {
+      /* To store the bytes construct 2 dirty helper calls. The helper calls
+         are guarded (num_bytes == 2 and num_bytes == 4, respectively) such
+         that only one of them will be called at runtime. */
+
+      Int i;
+      for (i = 2; i <= 4; ++i) {
+         IRDirty *d;
+
+         if (i == 3) continue;  // skip this one
+
+         d = unsafeIRDirty_0_N(0 /* regparms */, "s390x_dirtyhelper_CUxy",
+                               &s390x_dirtyhelper_CUxy,
+                               mkIRExprVec_3(mkexpr(addr1), mkexpr(data),
+                                             mkexpr(num_bytes)));
+         d->guard = binop(Iop_CmpEQ64, mkexpr(num_bytes), mkU64(i));
+         d->mFx   = Ifx_Write;
+         d->mAddr = mkexpr(addr1);
+         d->mSize = i;
+         stmt(IRStmt_Dirty(d));
+      }
+   } else {
+      // cu14
+      store(mkexpr(addr1), unop(Iop_64to32, mkexpr(data)));
+   }
+
+   /* Update source address and length */
+   put_gpr_dw0(r2,     binop(Iop_Add64, mkexpr(addr2), mkexpr(num_src_bytes)));
+   put_gpr_dw0(r2 + 1, binop(Iop_Sub64, mkexpr(len2),  mkexpr(num_src_bytes)));
+
+   /* Update destination address and length */
+   put_gpr_dw0(r1,     binop(Iop_Add64, mkexpr(addr1), mkexpr(num_bytes)));
+   put_gpr_dw0(r1 + 1, binop(Iop_Sub64, mkexpr(len1),  mkexpr(num_bytes)));
+
+   iterate();
+}
+
+static const HChar *
+s390_irgen_CU12(UChar m3, UChar r1, UChar r2)
+{
+   s390_irgen_cu12_cu14(m3, r1, r2, /* is_cu12 = */ 1);
+
+   return "cu12";
+}
+
+static const HChar *
+s390_irgen_CU14(UChar m3, UChar r1, UChar r2)
+{
+   s390_irgen_cu12_cu14(m3, r1, r2, /* is_cu12 = */ 0);
+
+   return "cu14";
+}
+
+static IRExpr *
+s390_call_ecag(IRExpr *op2addr)
+{
+   IRExpr **args, *call;
+
+   args = mkIRExprVec_1(op2addr);
+   call = mkIRExprCCall(Ity_I64, 0 /*regparm*/,
+                        "s390_do_ecag", &s390_do_ecag, args);
+
+   /* Nothing is excluded from definedness checking. */
+   call->Iex.CCall.cee->mcx_mask = 0;
+
+   return call;
+}
+
+static const HChar *
+s390_irgen_ECAG(UChar r1, UChar r3 __attribute__((unused)), IRTemp op2addr)
+{
+   if (! s390_host_has_gie) {
+      emulation_failure(EmFail_S390X_ecag);
+   } else {
+      put_gpr_dw0(r1, s390_call_ecag(mkexpr(op2addr)));
+   }
+
+   return "ecag";
+}
+
+
+/* New insns are added here.
+   If an insn is contingent on a facility being installed also
+   check whether the list of supported facilities in function
+   s390x_dirtyhelper_STFLE needs updating */
+
+/*------------------------------------------------------------*/
+/*--- Build IR for special instructions                    ---*/
+/*------------------------------------------------------------*/
+
+static void
+s390_irgen_client_request(void)
+{
+   if (0)
+      vex_printf("%%R3 = client_request ( %%R2 )\n");
+
+   Addr64 next = guest_IA_curr_instr + S390_SPECIAL_OP_PREAMBLE_SIZE
+                                     + S390_SPECIAL_OP_SIZE;
+
+   dis_res->jk_StopHere = Ijk_ClientReq;
+   dis_res->whatNext = Dis_StopHere;
+
+   put_IA(mkaddr_expr(next));
+}
+
+static void
+s390_irgen_guest_NRADDR(void)
+{
+   if (0)
+      vex_printf("%%R3 = guest_NRADDR\n");
+
+   put_gpr_dw0(3, IRExpr_Get(S390X_GUEST_OFFSET(guest_NRADDR), Ity_I64));
+}
+
+static void
+s390_irgen_call_noredir(void)
+{
+   Addr64 next = guest_IA_curr_instr + S390_SPECIAL_OP_PREAMBLE_SIZE
+                                     + S390_SPECIAL_OP_SIZE;
+
+   /* Continue after special op */
+   put_gpr_dw0(14, mkaddr_expr(next));
+
+   /* The address is in REG1, all parameters are in the right (guest) places */
+   put_IA(get_gpr_dw0(1));
+
+   dis_res->whatNext = Dis_StopHere;
+   dis_res->jk_StopHere = Ijk_NoRedir;
+}
+
+/* Force proper alignment for the structures below. */
+#pragma pack(1)
+
+
+static s390_decode_t
+s390_decode_2byte_and_irgen(const UChar *bytes)
+{
+   typedef union {
+      struct {
+         unsigned int op : 16;
+      } E;
+      struct {
+         unsigned int op :  8;
+         unsigned int i  :  8;
+      } I;
+      struct {
+         unsigned int op :  8;
+         unsigned int r1 :  4;
+         unsigned int r2 :  4;
+      } RR;
+   } formats;
+   union {
+      formats fmt;
+      UShort value;
+   } ovl;
+
+   vassert(sizeof(formats) == 2);
+
+   ((UChar *)(&ovl.value))[0] = bytes[0];
+   ((UChar *)(&ovl.value))[1] = bytes[1];
+
+   switch (ovl.value & 0xffff) {
+   case 0x0101: /* PR */ goto unimplemented;
+   case 0x0102: /* UPT */ goto unimplemented;
+   case 0x0104: /* PTFF */ goto unimplemented;
+   case 0x0107: /* SCKPF */ goto unimplemented;
+   case 0x010a: s390_format_E(s390_irgen_PFPO); goto ok;
+   case 0x010b: /* TAM */ goto unimplemented;
+   case 0x010c: /* SAM24 */ goto unimplemented;
+   case 0x010d: /* SAM31 */ goto unimplemented;
+   case 0x010e: /* SAM64 */ goto unimplemented;
+   case 0x01ff: /* TRAP2 */ goto unimplemented;
+   }
+
+   switch ((ovl.value & 0xff00) >> 8) {
+   case 0x04: /* SPM */ goto unimplemented;
+   case 0x05: /* BALR */ goto unimplemented;
+   case 0x06: s390_format_RR_RR(s390_irgen_BCTR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x07: s390_format_RR(s390_irgen_BCR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                             goto ok;
+   case 0x0a: s390_format_I(s390_irgen_SVC, ovl.fmt.I.i);  goto ok;
+   case 0x0b: /* BSM */ goto unimplemented;
+   case 0x0c: /* BASSM */ goto unimplemented;
+   case 0x0d: s390_format_RR_RR(s390_irgen_BASR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x0e: s390_format_RR(s390_irgen_MVCL, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                             goto ok;
+   case 0x0f: s390_format_RR(s390_irgen_CLCL, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                             goto ok;
+   case 0x10: s390_format_RR_RR(s390_irgen_LPR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x11: s390_format_RR_RR(s390_irgen_LNR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x12: s390_format_RR_RR(s390_irgen_LTR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x13: s390_format_RR_RR(s390_irgen_LCR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x14: s390_format_RR_RR(s390_irgen_NR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x15: s390_format_RR_RR(s390_irgen_CLR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x16: s390_format_RR_RR(s390_irgen_OR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x17: s390_format_RR_RR(s390_irgen_XR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x18: s390_format_RR_RR(s390_irgen_LR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x19: s390_format_RR_RR(s390_irgen_CR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x1a: s390_format_RR_RR(s390_irgen_AR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x1b: s390_format_RR_RR(s390_irgen_SR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x1c: s390_format_RR_RR(s390_irgen_MR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x1d: s390_format_RR_RR(s390_irgen_DR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x1e: s390_format_RR_RR(s390_irgen_ALR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x1f: s390_format_RR_RR(s390_irgen_SLR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x20: /* LPDR */ goto unimplemented;
+   case 0x21: /* LNDR */ goto unimplemented;
+   case 0x22: /* LTDR */ goto unimplemented;
+   case 0x23: /* LCDR */ goto unimplemented;
+   case 0x24: /* HDR */ goto unimplemented;
+   case 0x25: /* LDXR */ goto unimplemented;
+   case 0x26: /* MXR */ goto unimplemented;
+   case 0x27: /* MXDR */ goto unimplemented;
+   case 0x28: s390_format_RR_FF(s390_irgen_LDR, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x29: /* CDR */ goto unimplemented;
+   case 0x2a: /* ADR */ goto unimplemented;
+   case 0x2b: /* SDR */ goto unimplemented;
+   case 0x2c: /* MDR */ goto unimplemented;
+   case 0x2d: /* DDR */ goto unimplemented;
+   case 0x2e: /* AWR */ goto unimplemented;
+   case 0x2f: /* SWR */ goto unimplemented;
+   case 0x30: /* LPER */ goto unimplemented;
+   case 0x31: /* LNER */ goto unimplemented;
+   case 0x32: /* LTER */ goto unimplemented;
+   case 0x33: /* LCER */ goto unimplemented;
+   case 0x34: /* HER */ goto unimplemented;
+   case 0x35: /* LEDR */ goto unimplemented;
+   case 0x36: /* AXR */ goto unimplemented;
+   case 0x37: /* SXR */ goto unimplemented;
+   case 0x38: s390_format_RR_FF(s390_irgen_LER, ovl.fmt.RR.r1, ovl.fmt.RR.r2);
+                                goto ok;
+   case 0x39: /* CER */ goto unimplemented;
+   case 0x3a: /* AER */ goto unimplemented;
+   case 0x3b: /* SER */ goto unimplemented;
+   case 0x3c: /* MDER */ goto unimplemented;
+   case 0x3d: /* DER */ goto unimplemented;
+   case 0x3e: /* AUR */ goto unimplemented;
+   case 0x3f: /* SUR */ goto unimplemented;
+   }
+
+   return S390_DECODE_UNKNOWN_INSN;
+
+ok:
+   return S390_DECODE_OK;
+
+unimplemented:
+   return S390_DECODE_UNIMPLEMENTED_INSN;
+}
+
+static s390_decode_t
+s390_decode_4byte_and_irgen(const UChar *bytes)
+{
+   typedef union {
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int op2 :  4;
+         unsigned int i2  : 16;
+      } RI;
+      struct {
+         unsigned int op : 16;
+         unsigned int    :  8;
+         unsigned int r1 :  4;
+         unsigned int r2 :  4;
+      } RRE;
+      struct {
+         unsigned int op : 16;
+         unsigned int r1 :  4;
+         unsigned int    :  4;
+         unsigned int r3 :  4;
+         unsigned int r2 :  4;
+      } RRF;
+      struct {
+         unsigned int op : 16;
+         unsigned int m3 :  4;
+         unsigned int m4 :  4;
+         unsigned int r1 :  4;
+         unsigned int r2 :  4;
+      } RRF2;
+      struct {
+         unsigned int op : 16;
+         unsigned int r3 :  4;
+         unsigned int    :  4;
+         unsigned int r1 :  4;
+         unsigned int r2 :  4;
+      } RRF3;
+      struct {
+         unsigned int op : 16;
+         unsigned int r3 :  4;
+         unsigned int    :  4;
+         unsigned int r1 :  4;
+         unsigned int r2 :  4;
+      } RRR;
+      struct {
+         unsigned int op : 16;
+         unsigned int r3 :  4;
+         unsigned int m4 :  4;
+         unsigned int r1 :  4;
+         unsigned int r2 :  4;
+      } RRF4;
+      struct {
+         unsigned int op : 16;
+         unsigned int    :  4;
+         unsigned int m4 :  4;
+         unsigned int r1 :  4;
+         unsigned int r2 :  4;
+      } RRF5;
+      struct {
+         unsigned int op :  8;
+         unsigned int r1 :  4;
+         unsigned int r3 :  4;
+         unsigned int b2 :  4;
+         unsigned int d2 : 12;
+      } RS;
+      struct {
+         unsigned int op :  8;
+         unsigned int r1 :  4;
+         unsigned int r3 :  4;
+         unsigned int i2 : 16;
+      } RSI;
+      struct {
+         unsigned int op :  8;
+         unsigned int r1 :  4;
+         unsigned int x2 :  4;
+         unsigned int b2 :  4;
+         unsigned int d2 : 12;
+      } RX;
+      struct {
+         unsigned int op : 16;
+         unsigned int b2 :  4;
+         unsigned int d2 : 12;
+      } S;
+      struct {
+         unsigned int op :  8;
+         unsigned int i2 :  8;
+         unsigned int b1 :  4;
+         unsigned int d1 : 12;
+      } SI;
+   } formats;
+   union {
+      formats fmt;
+      UInt value;
+   } ovl;
+
+   vassert(sizeof(formats) == 4);
+
+   ((UChar *)(&ovl.value))[0] = bytes[0];
+   ((UChar *)(&ovl.value))[1] = bytes[1];
+   ((UChar *)(&ovl.value))[2] = bytes[2];
+   ((UChar *)(&ovl.value))[3] = bytes[3];
+
+   switch ((ovl.value & 0xff0f0000) >> 16) {
+   case 0xa500: s390_format_RI_RU(s390_irgen_IIHH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa501: s390_format_RI_RU(s390_irgen_IIHL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa502: s390_format_RI_RU(s390_irgen_IILH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa503: s390_format_RI_RU(s390_irgen_IILL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa504: s390_format_RI_RU(s390_irgen_NIHH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa505: s390_format_RI_RU(s390_irgen_NIHL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa506: s390_format_RI_RU(s390_irgen_NILH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa507: s390_format_RI_RU(s390_irgen_NILL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa508: s390_format_RI_RU(s390_irgen_OIHH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa509: s390_format_RI_RU(s390_irgen_OIHL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa50a: s390_format_RI_RU(s390_irgen_OILH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa50b: s390_format_RI_RU(s390_irgen_OILL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa50c: s390_format_RI_RU(s390_irgen_LLIHH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa50d: s390_format_RI_RU(s390_irgen_LLIHL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa50e: s390_format_RI_RU(s390_irgen_LLILH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa50f: s390_format_RI_RU(s390_irgen_LLILL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa700: s390_format_RI_RU(s390_irgen_TMLH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa701: s390_format_RI_RU(s390_irgen_TMLL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa702: s390_format_RI_RU(s390_irgen_TMHH, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa703: s390_format_RI_RU(s390_irgen_TMHL, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa704: s390_format_RI(s390_irgen_BRC, ovl.fmt.RI.r1, ovl.fmt.RI.i2);
+                               goto ok;
+   case 0xa705: s390_format_RI_RP(s390_irgen_BRAS, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa706: s390_format_RI_RP(s390_irgen_BRCT, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa707: s390_format_RI_RP(s390_irgen_BRCTG, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa708: s390_format_RI_RI(s390_irgen_LHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2);
+                                  goto ok;
+   case 0xa709: s390_format_RI_RI(s390_irgen_LGHI, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa70a: s390_format_RI_RI(s390_irgen_AHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2);
+                                  goto ok;
+   case 0xa70b: s390_format_RI_RI(s390_irgen_AGHI, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa70c: s390_format_RI_RI(s390_irgen_MHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2);
+                                  goto ok;
+   case 0xa70d: s390_format_RI_RI(s390_irgen_MGHI, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   case 0xa70e: s390_format_RI_RI(s390_irgen_CHI, ovl.fmt.RI.r1, ovl.fmt.RI.i2);
+                                  goto ok;
+   case 0xa70f: s390_format_RI_RI(s390_irgen_CGHI, ovl.fmt.RI.r1,
+                                  ovl.fmt.RI.i2);  goto ok;
+   }
+
+   switch ((ovl.value & 0xffff0000) >> 16) {
+   case 0x8000: /* SSM */ goto unimplemented;
+   case 0x8200: /* LPSW */ goto unimplemented;
+   case 0x9300: /* TS */ goto unimplemented;
+   case 0xb202: /* STIDP */ goto unimplemented;
+   case 0xb204: /* SCK */ goto unimplemented;
+   case 0xb205: s390_format_S_RD(s390_irgen_STCK, ovl.fmt.S.b2, ovl.fmt.S.d2);
+                goto ok;
+   case 0xb206: /* SCKC */ goto unimplemented;
+   case 0xb207: /* STCKC */ goto unimplemented;
+   case 0xb208: /* SPT */ goto unimplemented;
+   case 0xb209: /* STPT */ goto unimplemented;
+   case 0xb20a: /* SPKA */ goto unimplemented;
+   case 0xb20b: /* IPK */ goto unimplemented;
+   case 0xb20d: /* PTLB */ goto unimplemented;
+   case 0xb210: /* SPX */ goto unimplemented;
+   case 0xb211: /* STPX */ goto unimplemented;
+   case 0xb212: /* STAP */ goto unimplemented;
+   case 0xb214: /* SIE */ goto unimplemented;
+   case 0xb218: /* PC */ goto unimplemented;
+   case 0xb219: /* SAC */ goto unimplemented;
+   case 0xb21a: /* CFC */ goto unimplemented;
+   case 0xb221: /* IPTE */ goto unimplemented;
+   case 0xb222: s390_format_RRE_R0(s390_irgen_IPM, ovl.fmt.RRE.r1);  goto ok;
+   case 0xb223: /* IVSK */ goto unimplemented;
+   case 0xb224: /* IAC */ goto unimplemented;
+   case 0xb225: /* SSAR */ goto unimplemented;
+   case 0xb226: /* EPAR */ goto unimplemented;
+   case 0xb227: /* ESAR */ goto unimplemented;
+   case 0xb228: /* PT */ goto unimplemented;
+   case 0xb229: /* ISKE */ goto unimplemented;
+   case 0xb22a: /* RRBE */ goto unimplemented;
+   case 0xb22b: /* SSKE */ goto unimplemented;
+   case 0xb22c: /* TB */ goto unimplemented;
+   case 0xb22d: /* DXR */ goto unimplemented;
+   case 0xb22e: /* PGIN */ goto unimplemented;
+   case 0xb22f: /* PGOUT */ goto unimplemented;
+   case 0xb230: /* CSCH */ goto unimplemented;
+   case 0xb231: /* HSCH */ goto unimplemented;
+   case 0xb232: /* MSCH */ goto unimplemented;
+   case 0xb233: /* SSCH */ goto unimplemented;
+   case 0xb234: /* STSCH */ goto unimplemented;
+   case 0xb235: /* TSCH */ goto unimplemented;
+   case 0xb236: /* TPI */ goto unimplemented;
+   case 0xb237: /* SAL */ goto unimplemented;
+   case 0xb238: /* RSCH */ goto unimplemented;
+   case 0xb239: /* STCRW */ goto unimplemented;
+   case 0xb23a: /* STCPS */ goto unimplemented;
+   case 0xb23b: /* RCHP */ goto unimplemented;
+   case 0xb23c: /* SCHM */ goto unimplemented;
+   case 0xb240: /* BAKR */ goto unimplemented;
+   case 0xb241: s390_format_RRE(s390_irgen_CKSM, ovl.fmt.RRE.r1,
+                                ovl.fmt.RRE.r2);  goto ok;
+   case 0xb244: /* SQDR */ goto unimplemented;
+   case 0xb245: /* SQER */ goto unimplemented;
+   case 0xb246: /* STURA */ goto unimplemented;
+   case 0xb247: /* MSTA */ goto unimplemented;
+   case 0xb248: /* PALB */ goto unimplemented;
+   case 0xb249: /* EREG */ goto unimplemented;
+   case 0xb24a: /* ESTA */ goto unimplemented;
+   case 0xb24b: /* LURA */ goto unimplemented;
+   case 0xb24c: /* TAR */ goto unimplemented;
+   case 0xb24d: s390_format_RRE(s390_irgen_CPYA, ovl.fmt.RRE.r1,
+                                ovl.fmt.RRE.r2);  goto ok;
+   case 0xb24e: s390_format_RRE(s390_irgen_SAR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2);
+                                goto ok;
+   case 0xb24f: s390_format_RRE(s390_irgen_EAR, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2);
+                                goto ok;
+   case 0xb250: /* CSP */ goto unimplemented;
+   case 0xb252: s390_format_RRE_RR(s390_irgen_MSR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb254: /* MVPG */ goto unimplemented;
+   case 0xb255: s390_format_RRE_RR(s390_irgen_MVST, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb257: /* CUSE */ goto unimplemented;
+   case 0xb258: /* BSG */ goto unimplemented;
+   case 0xb25a: /* BSA */ goto unimplemented;
+   case 0xb25d: s390_format_RRE_RR(s390_irgen_CLST, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb25e: s390_format_RRE_RR(s390_irgen_SRST, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb263: /* CMPSC */ goto unimplemented;
+   case 0xb274: /* SIGA */ goto unimplemented;
+   case 0xb276: /* XSCH */ goto unimplemented;
+   case 0xb277: /* RP */ goto unimplemented;
+   case 0xb278: s390_format_S_RD(s390_irgen_STCKE, ovl.fmt.S.b2, ovl.fmt.S.d2);goto ok;
+   case 0xb279: /* SACF */ goto unimplemented;
+   case 0xb27c: s390_format_S_RD(s390_irgen_STCKF, ovl.fmt.S.b2, ovl.fmt.S.d2);goto ok;
+   case 0xb27d: /* STSI */ goto unimplemented;
+   case 0xb280: /* LPP */ goto unimplemented;
+   case 0xb284: /* LCCTL */ goto unimplemented;
+   case 0xb285: /* LPCTL */ goto unimplemented;
+   case 0xb286: /* QSI */ goto unimplemented;
+   case 0xb287: /* LSCTL */ goto unimplemented;
+   case 0xb28e: /* QCTRI */ goto unimplemented;
+   case 0xb299: s390_format_S_RD(s390_irgen_SRNM, ovl.fmt.S.b2, ovl.fmt.S.d2);
+                                 goto ok;
+   case 0xb29c: s390_format_S_RD(s390_irgen_STFPC, ovl.fmt.S.b2, ovl.fmt.S.d2);
+                                 goto ok;
+   case 0xb29d: s390_format_S_RD(s390_irgen_LFPC, ovl.fmt.S.b2, ovl.fmt.S.d2);
+                                 goto ok;
+   case 0xb2a5: s390_format_RRE_FF(s390_irgen_TRE, ovl.fmt.RRE.r1, ovl.fmt.RRE.r2);  goto ok;
+   case 0xb2a6: s390_format_RRF_M0RERE(s390_irgen_CU21, ovl.fmt.RRF3.r3,
+                                       ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);
+      goto ok;
+   case 0xb2a7: s390_format_RRF_M0RERE(s390_irgen_CU12, ovl.fmt.RRF3.r3,
+                                       ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);
+      goto ok;
+   case 0xb2b0: s390_format_S_RD(s390_irgen_STFLE, ovl.fmt.S.b2, ovl.fmt.S.d2);
+                                 goto ok;
+   case 0xb2b1: /* STFL */ goto unimplemented;
+   case 0xb2b2: /* LPSWE */ goto unimplemented;
+   case 0xb2b8: s390_irgen_srnmb_wrapper(ovl.fmt.S.b2, ovl.fmt.S.d2);
+      goto ok;
+   case 0xb2b9: s390_format_S_RD(s390_irgen_SRNMT, ovl.fmt.S.b2, ovl.fmt.S.d2);
+      goto ok;
+   case 0xb2bd: /* LFAS */ goto unimplemented;
+   case 0xb2e0: /* SCCTR */ goto unimplemented;
+   case 0xb2e1: /* SPCTR */ goto unimplemented;
+   case 0xb2e4: /* ECCTR */ goto unimplemented;
+   case 0xb2e5: /* EPCTR */ goto unimplemented;
+   case 0xb2e8: /* PPA */ goto unimplemented;
+   case 0xb2ec: /* ETND */ goto unimplemented;
+   case 0xb2ed: /* ECPGA */ goto unimplemented;
+   case 0xb2f8: /* TEND */ goto unimplemented;
+   case 0xb2fa: /* NIAI */ goto unimplemented;
+   case 0xb2fc: /* TABORT */ goto unimplemented;
+   case 0xb2ff: /* TRAP4 */ goto unimplemented;
+   case 0xb300: s390_format_RRE_FF(s390_irgen_LPEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb301: s390_format_RRE_FF(s390_irgen_LNEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb302: s390_format_RRE_FF(s390_irgen_LTEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb303: s390_format_RRE_FF(s390_irgen_LCEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb304: s390_format_RRE_FF(s390_irgen_LDEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb305: s390_format_RRE_FF(s390_irgen_LXDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb306: s390_format_RRE_FF(s390_irgen_LXEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb307: /* MXDBR */ goto unimplemented;
+   case 0xb308: /* KEBR */ goto unimplemented;
+   case 0xb309: s390_format_RRE_FF(s390_irgen_CEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb30a: s390_format_RRE_FF(s390_irgen_AEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb30b: s390_format_RRE_FF(s390_irgen_SEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb30c: /* MDEBR */ goto unimplemented;
+   case 0xb30d: s390_format_RRE_FF(s390_irgen_DEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb30e: s390_format_RRF_F0FF(s390_irgen_MAEBR, ovl.fmt.RRF.r1,
+                                     ovl.fmt.RRF.r3, ovl.fmt.RRF.r2);  goto ok;
+   case 0xb30f: s390_format_RRF_F0FF(s390_irgen_MSEBR, ovl.fmt.RRF.r1,
+                                     ovl.fmt.RRF.r3, ovl.fmt.RRF.r2);  goto ok;
+   case 0xb310: s390_format_RRE_FF(s390_irgen_LPDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb311: s390_format_RRE_FF(s390_irgen_LNDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb312: s390_format_RRE_FF(s390_irgen_LTDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb313: s390_format_RRE_FF(s390_irgen_LCDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb314: s390_format_RRE_FF(s390_irgen_SQEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb315: s390_format_RRE_FF(s390_irgen_SQDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb316: s390_format_RRE_FF(s390_irgen_SQXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb317: s390_format_RRE_FF(s390_irgen_MEEBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb318: /* KDBR */ goto unimplemented;
+   case 0xb319: s390_format_RRE_FF(s390_irgen_CDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb31a: s390_format_RRE_FF(s390_irgen_ADBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb31b: s390_format_RRE_FF(s390_irgen_SDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb31c: s390_format_RRE_FF(s390_irgen_MDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb31d: s390_format_RRE_FF(s390_irgen_DDBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb31e: s390_format_RRF_F0FF(s390_irgen_MADBR, ovl.fmt.RRF.r1,
+                                     ovl.fmt.RRF.r3, ovl.fmt.RRF.r2);  goto ok;
+   case 0xb31f: s390_format_RRF_F0FF(s390_irgen_MSDBR, ovl.fmt.RRF.r1,
+                                     ovl.fmt.RRF.r3, ovl.fmt.RRF.r2);  goto ok;
+   case 0xb324: /* LDER */ goto unimplemented;
+   case 0xb325: /* LXDR */ goto unimplemented;
+   case 0xb326: /* LXER */ goto unimplemented;
+   case 0xb32e: /* MAER */ goto unimplemented;
+   case 0xb32f: /* MSER */ goto unimplemented;
+   case 0xb336: /* SQXR */ goto unimplemented;
+   case 0xb337: /* MEER */ goto unimplemented;
+   case 0xb338: /* MAYLR */ goto unimplemented;
+   case 0xb339: /* MYLR */ goto unimplemented;
+   case 0xb33a: /* MAYR */ goto unimplemented;
+   case 0xb33b: /* MYR */ goto unimplemented;
+   case 0xb33c: /* MAYHR */ goto unimplemented;
+   case 0xb33d: /* MYHR */ goto unimplemented;
+   case 0xb33e: /* MADR */ goto unimplemented;
+   case 0xb33f: /* MSDR */ goto unimplemented;
+   case 0xb340: s390_format_RRE_FF(s390_irgen_LPXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb341: s390_format_RRE_FF(s390_irgen_LNXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb342: s390_format_RRE_FF(s390_irgen_LTXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb343: s390_format_RRE_FF(s390_irgen_LCXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb344: s390_format_RRF_UUFF(s390_irgen_LEDBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb345: s390_format_RRF_UUFF(s390_irgen_LDXBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb346: s390_format_RRF_UUFF(s390_irgen_LEXBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb347: /* FIXBR */ goto unimplemented;
+   case 0xb348: /* KXBR */ goto unimplemented;
+   case 0xb349: s390_format_RRE_FF(s390_irgen_CXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb34a: s390_format_RRE_FF(s390_irgen_AXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb34b: s390_format_RRE_FF(s390_irgen_SXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb34c: s390_format_RRE_FF(s390_irgen_MXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb34d: s390_format_RRE_FF(s390_irgen_DXBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb350: /* TBEDR */ goto unimplemented;
+   case 0xb351: /* TBDR */ goto unimplemented;
+   case 0xb353: /* DIEBR */ goto unimplemented;
+   case 0xb357: /* FIEBR */ goto unimplemented;
+   case 0xb358: /* THDER */ goto unimplemented;
+   case 0xb359: /* THDR */ goto unimplemented;
+   case 0xb35b: /* DIDBR */ goto unimplemented;
+   case 0xb35f: /* FIDBR */ goto unimplemented;
+   case 0xb360: /* LPXR */ goto unimplemented;
+   case 0xb361: /* LNXR */ goto unimplemented;
+   case 0xb362: /* LTXR */ goto unimplemented;
+   case 0xb363: /* LCXR */ goto unimplemented;
+   case 0xb365: s390_format_RRE_FF(s390_irgen_LXR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb366: /* LEXR */ goto unimplemented;
+   case 0xb367: /* FIXR */ goto unimplemented;
+   case 0xb369: /* CXR */ goto unimplemented;
+   case 0xb370: s390_format_RRE_FF(s390_irgen_LPDFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb371: s390_format_RRE_FF(s390_irgen_LNDFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb372: s390_format_RRF_F0FF2(s390_irgen_CPSDR, ovl.fmt.RRF3.r3,
+                                      ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);
+                                      goto ok;
+   case 0xb373: s390_format_RRE_FF(s390_irgen_LCDFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb374: s390_format_RRE_F0(s390_irgen_LZER, ovl.fmt.RRE.r1);  goto ok;
+   case 0xb375: s390_format_RRE_F0(s390_irgen_LZDR, ovl.fmt.RRE.r1);  goto ok;
+   case 0xb376: s390_format_RRE_F0(s390_irgen_LZXR, ovl.fmt.RRE.r1);  goto ok;
+   case 0xb377: /* FIER */ goto unimplemented;
+   case 0xb37f: /* FIDR */ goto unimplemented;
+   case 0xb384: s390_format_RRE_R0(s390_irgen_SFPC, ovl.fmt.RRE.r1);  goto ok;
+   case 0xb385: /* SFASR */ goto unimplemented;
+   case 0xb38c: s390_format_RRE_R0(s390_irgen_EFPC, ovl.fmt.RRE.r1);  goto ok;
+   case 0xb390: s390_format_RRF_UUFR(s390_irgen_CELFBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb391: s390_format_RRF_UUFR(s390_irgen_CDLFBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb392: s390_format_RRF_UUFR(s390_irgen_CXLFBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb394: s390_format_RRF_UUFR(s390_irgen_CEFBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb395: s390_format_RRF_UUFR(s390_irgen_CDFBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb396: s390_format_RRF_UUFR(s390_irgen_CXFBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb398: s390_format_RRF_UURF(s390_irgen_CFEBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb399: s390_format_RRF_UURF(s390_irgen_CFDBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb39a: s390_format_RRF_UURF(s390_irgen_CFXBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb39c: s390_format_RRF_UURF(s390_irgen_CLFEBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb39d: s390_format_RRF_UURF(s390_irgen_CLFDBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb39e: s390_format_RRF_UURF(s390_irgen_CLFXBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a0: s390_format_RRF_UUFR(s390_irgen_CELGBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a1: s390_format_RRF_UUFR(s390_irgen_CDLGBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a2: s390_format_RRF_UUFR(s390_irgen_CXLGBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a4: s390_format_RRF_UUFR(s390_irgen_CEGBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a5: s390_format_RRF_UUFR(s390_irgen_CDGBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a6: s390_format_RRF_UUFR(s390_irgen_CXGBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a8: s390_format_RRF_UURF(s390_irgen_CGEBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3a9: s390_format_RRF_UURF(s390_irgen_CGDBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3aa: s390_format_RRF_UURF(s390_irgen_CGXBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3ac: s390_format_RRF_UURF(s390_irgen_CLGEBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3ad: s390_format_RRF_UURF(s390_irgen_CLGDBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3ae: s390_format_RRF_UURF(s390_irgen_CLGXBR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3b4: /* CEFR */ goto unimplemented;
+   case 0xb3b5: /* CDFR */ goto unimplemented;
+   case 0xb3b6: /* CXFR */ goto unimplemented;
+   case 0xb3b8: /* CFER */ goto unimplemented;
+   case 0xb3b9: /* CFDR */ goto unimplemented;
+   case 0xb3ba: /* CFXR */ goto unimplemented;
+   case 0xb3c1: s390_format_RRE_FR(s390_irgen_LDGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3c4: /* CEGR */ goto unimplemented;
+   case 0xb3c5: /* CDGR */ goto unimplemented;
+   case 0xb3c6: /* CXGR */ goto unimplemented;
+   case 0xb3c8: /* CGER */ goto unimplemented;
+   case 0xb3c9: /* CGDR */ goto unimplemented;
+   case 0xb3ca: /* CGXR */ goto unimplemented;
+   case 0xb3cd: s390_format_RRE_RF(s390_irgen_LGDR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3d0: s390_format_RRF_FUFF2(s390_irgen_MDTRA, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                      ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3d1: s390_format_RRF_FUFF2(s390_irgen_DDTRA, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                      ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3d2: s390_format_RRF_FUFF2(s390_irgen_ADTRA, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                      ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3d3: s390_format_RRF_FUFF2(s390_irgen_SDTRA, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                      ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3d4: s390_format_RRF_0UFF(s390_irgen_LDETR, ovl.fmt.RRF5.m4,
+                                     ovl.fmt.RRF5.r1, ovl.fmt.RRF5.r2); goto ok;
+   case 0xb3d5: s390_format_RRF_UUFF(s390_irgen_LEDTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3d6: s390_format_RRE_FF(s390_irgen_LTDTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3d7: /* FIDTR */ goto unimplemented;
+   case 0xb3d8: s390_format_RRF_FUFF2(s390_irgen_MXTRA, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3d9: s390_format_RRF_FUFF2(s390_irgen_DXTRA, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3da: s390_format_RRF_FUFF2(s390_irgen_AXTRA, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3db: s390_format_RRF_FUFF2(s390_irgen_SXTRA, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3dc: s390_format_RRF_0UFF(s390_irgen_LXDTR, ovl.fmt.RRF5.m4,
+                                     ovl.fmt.RRF5.r1, ovl.fmt.RRF5.r2); goto ok;
+   case 0xb3dd: s390_format_RRF_UUFF(s390_irgen_LDXTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3de: s390_format_RRE_FF(s390_irgen_LTXTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3df: /* FIXTR */ goto unimplemented;
+   case 0xb3e0: /* KDTR */ goto unimplemented;
+   case 0xb3e1: s390_format_RRF_UURF(s390_irgen_CGDTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3e2: /* CUDTR */ goto unimplemented;
+   case 0xb3e3: /* CSDTR */ goto unimplemented;
+   case 0xb3e4: s390_format_RRE_FF(s390_irgen_CDTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3e5: s390_format_RRE_RF(s390_irgen_EEDTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3e7: s390_format_RRE_RF(s390_irgen_ESDTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3e8: /* KXTR */ goto unimplemented;
+   case 0xb3e9: s390_format_RRF_UURF(s390_irgen_CGXTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3ea: /* CUXTR */ goto unimplemented;
+   case 0xb3eb: /* CSXTR */ goto unimplemented;
+   case 0xb3ec: s390_format_RRE_FF(s390_irgen_CXTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3ed: s390_format_RRE_RF(s390_irgen_EEXTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3ef: s390_format_RRE_RF(s390_irgen_ESXTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3f1: s390_format_RRF_UUFR(s390_irgen_CDGTRA, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3f2: /* CDUTR */ goto unimplemented;
+   case 0xb3f3: /* CDSTR */ goto unimplemented;
+   case 0xb3f4: s390_format_RRE_FF(s390_irgen_CEDTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3f5: s390_format_RRF_FUFF(s390_irgen_QADTR, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3f6: s390_format_RRF_F0FR(s390_irgen_IEDTR, ovl.fmt.RRF3.r3,
+                                     ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+   case 0xb3f7: s390_format_RRF_FFRU(s390_irgen_RRDTR, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3f9: s390_format_RRF_UUFR(s390_irgen_CXGTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb3fa: /* CXUTR */ goto unimplemented;
+   case 0xb3fb: /* CXSTR */ goto unimplemented;
+   case 0xb3fc: s390_format_RRE_FF(s390_irgen_CEXTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb3fd: s390_format_RRF_FUFF(s390_irgen_QAXTR, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb3fe: s390_format_RRF_F0FR(s390_irgen_IEXTR, ovl.fmt.RRF3.r3,
+                                     ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2); goto ok;
+   case 0xb3ff: s390_format_RRF_FFRU(s390_irgen_RRXTR, ovl.fmt.RRF4.r3,
+                                     ovl.fmt.RRF4.m4, ovl.fmt.RRF4.r1,
+                                     ovl.fmt.RRF4.r2); goto ok;
+   case 0xb900: s390_format_RRE_RR(s390_irgen_LPGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb901: s390_format_RRE_RR(s390_irgen_LNGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb902: s390_format_RRE_RR(s390_irgen_LTGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb903: s390_format_RRE_RR(s390_irgen_LCGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb904: s390_format_RRE_RR(s390_irgen_LGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb905: /* LURAG */ goto unimplemented;
+   case 0xb906: s390_format_RRE_RR(s390_irgen_LGBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb907: s390_format_RRE_RR(s390_irgen_LGHR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb908: s390_format_RRE_RR(s390_irgen_AGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb909: s390_format_RRE_RR(s390_irgen_SGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb90a: s390_format_RRE_RR(s390_irgen_ALGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb90b: s390_format_RRE_RR(s390_irgen_SLGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb90c: s390_format_RRE_RR(s390_irgen_MSGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb90d: s390_format_RRE_RR(s390_irgen_DSGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb90e: /* EREGG */ goto unimplemented;
+   case 0xb90f: s390_format_RRE_RR(s390_irgen_LRVGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb910: s390_format_RRE_RR(s390_irgen_LPGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb911: s390_format_RRE_RR(s390_irgen_LNGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb912: s390_format_RRE_RR(s390_irgen_LTGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb913: s390_format_RRE_RR(s390_irgen_LCGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb914: s390_format_RRE_RR(s390_irgen_LGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb916: s390_format_RRE_RR(s390_irgen_LLGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb917: s390_format_RRE_RR(s390_irgen_LLGTR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb918: s390_format_RRE_RR(s390_irgen_AGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb919: s390_format_RRE_RR(s390_irgen_SGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb91a: s390_format_RRE_RR(s390_irgen_ALGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb91b: s390_format_RRE_RR(s390_irgen_SLGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb91c: s390_format_RRE_RR(s390_irgen_MSGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb91d: s390_format_RRE_RR(s390_irgen_DSGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb91e: /* KMAC */ goto unimplemented;
+   case 0xb91f: s390_format_RRE_RR(s390_irgen_LRVR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb920: s390_format_RRE_RR(s390_irgen_CGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb921: s390_format_RRE_RR(s390_irgen_CLGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb925: /* STURG */ goto unimplemented;
+   case 0xb926: s390_format_RRE_RR(s390_irgen_LBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb927: s390_format_RRE_RR(s390_irgen_LHR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb928: /* PCKMO */ goto unimplemented;
+   case 0xb92a: /* KMF */ goto unimplemented;
+   case 0xb92b: /* KMO */ goto unimplemented;
+   case 0xb92c: /* PCC */ goto unimplemented;
+   case 0xb92d: /* KMCTR */ goto unimplemented;
+   case 0xb92e: /* KM */ goto unimplemented;
+   case 0xb92f: /* KMC */ goto unimplemented;
+   case 0xb930: s390_format_RRE_RR(s390_irgen_CGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb931: s390_format_RRE_RR(s390_irgen_CLGFR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb93e: /* KIMD */ goto unimplemented;
+   case 0xb93f: /* KLMD */ goto unimplemented;
+   case 0xb941: s390_format_RRF_UURF(s390_irgen_CFDTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb942: s390_format_RRF_UURF(s390_irgen_CLGDTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb943: s390_format_RRF_UURF(s390_irgen_CLFDTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb946: s390_format_RRE_RR(s390_irgen_BCTGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb949: s390_format_RRF_UURF(s390_irgen_CFXTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb94a: s390_format_RRF_UURF(s390_irgen_CLGXTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb94b: s390_format_RRF_UURF(s390_irgen_CLFXTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb951: s390_format_RRF_UUFR(s390_irgen_CDFTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb952: s390_format_RRF_UUFR(s390_irgen_CDLGTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb953: s390_format_RRF_UUFR(s390_irgen_CDLFTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb959: s390_format_RRF_UUFR(s390_irgen_CXFTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb95a: s390_format_RRF_UUFR(s390_irgen_CXLGTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb95b: s390_format_RRF_UUFR(s390_irgen_CXLFTR, ovl.fmt.RRF2.m3,
+                                     ovl.fmt.RRF2.m4, ovl.fmt.RRF2.r1,
+                                     ovl.fmt.RRF2.r2);  goto ok;
+   case 0xb960: /* CGRT */ goto unimplemented;
+   case 0xb961: /* CLGRT */ goto unimplemented;
+   case 0xb972: /* CRT */ goto unimplemented;
+   case 0xb973: /* CLRT */ goto unimplemented;
+   case 0xb980: s390_format_RRE_RR(s390_irgen_NGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb981: s390_format_RRE_RR(s390_irgen_OGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb982: s390_format_RRE_RR(s390_irgen_XGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb983: s390_format_RRE_RR(s390_irgen_FLOGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb984: s390_format_RRE_RR(s390_irgen_LLGCR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb985: s390_format_RRE_RR(s390_irgen_LLGHR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb986: s390_format_RRE_RR(s390_irgen_MLGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb987: s390_format_RRE_RR(s390_irgen_DLGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb988: s390_format_RRE_RR(s390_irgen_ALCGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb989: s390_format_RRE_RR(s390_irgen_SLBGR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb98a: /* CSPG */ goto unimplemented;
+   case 0xb98d: /* EPSW */ goto unimplemented;
+   case 0xb98e: /* IDTE */ goto unimplemented;
+   case 0xb98f: /* CRDTE */ goto unimplemented;
+   case 0xb990: s390_format_RRF_M0RERE(s390_irgen_TRTT, ovl.fmt.RRF3.r3,
+                                   ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);  goto ok;
+   case 0xb991: s390_format_RRF_M0RERE(s390_irgen_TRTO, ovl.fmt.RRF3.r3,
+                                   ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);  goto ok;
+   case 0xb992: s390_format_RRF_M0RERE(s390_irgen_TROT, ovl.fmt.RRF3.r3,
+                                   ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);  goto ok;
+   case 0xb993: s390_format_RRF_M0RERE(s390_irgen_TROO, ovl.fmt.RRF3.r3,
+                                   ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);  goto ok;
+   case 0xb994: s390_format_RRE_RR(s390_irgen_LLCR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb995: s390_format_RRE_RR(s390_irgen_LLHR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb996: s390_format_RRE_RR(s390_irgen_MLR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb997: s390_format_RRE_RR(s390_irgen_DLR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb998: s390_format_RRE_RR(s390_irgen_ALCR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb999: s390_format_RRE_RR(s390_irgen_SLBR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb99a: /* EPAIR */ goto unimplemented;
+   case 0xb99b: /* ESAIR */ goto unimplemented;
+   case 0xb99d: /* ESEA */ goto unimplemented;
+   case 0xb99e: /* PTI */ goto unimplemented;
+   case 0xb99f: /* SSAIR */ goto unimplemented;
+   case 0xb9a2: /* PTF */ goto unimplemented;
+   case 0xb9aa: /* LPTEA */ goto unimplemented;
+   case 0xb9ae: /* RRBM */ goto unimplemented;
+   case 0xb9af: /* PFMF */ goto unimplemented;
+   case 0xb9b0: s390_format_RRF_M0RERE(s390_irgen_CU14, ovl.fmt.RRF3.r3,
+                                       ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);
+      goto ok;
+   case 0xb9b1: s390_format_RRF_M0RERE(s390_irgen_CU24, ovl.fmt.RRF3.r3,
+                                       ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2);
+      goto ok;
+   case 0xb9b2: s390_format_RRE_RR(s390_irgen_CU41, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb9b3: s390_format_RRE_RR(s390_irgen_CU42, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb9bd: /* TRTRE */ goto unimplemented;
+   case 0xb9be: /* SRSTU */ goto unimplemented;
+   case 0xb9bf: /* TRTE */ goto unimplemented;
+   case 0xb9c8: s390_format_RRF_R0RR2(s390_irgen_AHHHR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9c9: s390_format_RRF_R0RR2(s390_irgen_SHHHR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9ca: s390_format_RRF_R0RR2(s390_irgen_ALHHHR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9cb: s390_format_RRF_R0RR2(s390_irgen_SLHHHR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9cd: s390_format_RRE_RR(s390_irgen_CHHR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb9cf: s390_format_RRE_RR(s390_irgen_CLHHR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb9d8: s390_format_RRF_R0RR2(s390_irgen_AHHLR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9d9: s390_format_RRF_R0RR2(s390_irgen_SHHLR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9da: s390_format_RRF_R0RR2(s390_irgen_ALHHLR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9db: s390_format_RRF_R0RR2(s390_irgen_SLHHLR, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9dd: s390_format_RRE_RR(s390_irgen_CHLR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb9df: s390_format_RRE_RR(s390_irgen_CLHLR, ovl.fmt.RRE.r1,
+                                   ovl.fmt.RRE.r2);  goto ok;
+   case 0xb9e1: /* POPCNT */ goto unimplemented;
+   case 0xb9e2: s390_format_RRF_U0RR(s390_irgen_LOCGR, ovl.fmt.RRF3.r3,
+                                     ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2,
+                                     S390_XMNM_LOCGR);  goto ok;
+   case 0xb9e4: s390_format_RRF_R0RR2(s390_irgen_NGRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9e6: s390_format_RRF_R0RR2(s390_irgen_OGRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9e7: s390_format_RRF_R0RR2(s390_irgen_XGRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9e8: s390_format_RRF_R0RR2(s390_irgen_AGRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9e9: s390_format_RRF_R0RR2(s390_irgen_SGRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9ea: s390_format_RRF_R0RR2(s390_irgen_ALGRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9eb: s390_format_RRF_R0RR2(s390_irgen_SLGRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9f2: s390_format_RRF_U0RR(s390_irgen_LOCR, ovl.fmt.RRF3.r3,
+                                     ovl.fmt.RRF3.r1, ovl.fmt.RRF3.r2,
+                                     S390_XMNM_LOCR);  goto ok;
+   case 0xb9f4: s390_format_RRF_R0RR2(s390_irgen_NRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9f6: s390_format_RRF_R0RR2(s390_irgen_ORK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9f7: s390_format_RRF_R0RR2(s390_irgen_XRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9f8: s390_format_RRF_R0RR2(s390_irgen_ARK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9f9: s390_format_RRF_R0RR2(s390_irgen_SRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9fa: s390_format_RRF_R0RR2(s390_irgen_ALRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   case 0xb9fb: s390_format_RRF_R0RR2(s390_irgen_SLRK, ovl.fmt.RRF4.r3,
+                                      ovl.fmt.RRF4.r1, ovl.fmt.RRF4.r2);
+                                      goto ok;
+   }
+
+   switch ((ovl.value & 0xff000000) >> 24) {
+   case 0x40: s390_format_RX_RRRD(s390_irgen_STH, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x41: s390_format_RX_RRRD(s390_irgen_LA, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x42: s390_format_RX_RRRD(s390_irgen_STC, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x43: s390_format_RX_RRRD(s390_irgen_IC, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x44: s390_format_RX_RRRD(s390_irgen_EX, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x45: /* BAL */ goto unimplemented;
+   case 0x46: s390_format_RX_RRRD(s390_irgen_BCT, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x47: s390_format_RX(s390_irgen_BC, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                             ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x48: s390_format_RX_RRRD(s390_irgen_LH, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x49: s390_format_RX_RRRD(s390_irgen_CH, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x4a: s390_format_RX_RRRD(s390_irgen_AH, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x4b: s390_format_RX_RRRD(s390_irgen_SH, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x4c: s390_format_RX_RRRD(s390_irgen_MH, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x4d: s390_format_RX_RRRD(s390_irgen_BAS, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x4e: s390_format_RX_RRRD(s390_irgen_CVD, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x4f: s390_format_RX_RRRD(s390_irgen_CVB, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x50: s390_format_RX_RRRD(s390_irgen_ST, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x51: s390_format_RX_RRRD(s390_irgen_LAE, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x54: s390_format_RX_RRRD(s390_irgen_N, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x55: s390_format_RX_RRRD(s390_irgen_CL, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x56: s390_format_RX_RRRD(s390_irgen_O, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x57: s390_format_RX_RRRD(s390_irgen_X, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x58: s390_format_RX_RRRD(s390_irgen_L, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x59: s390_format_RX_RRRD(s390_irgen_C, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x5a: s390_format_RX_RRRD(s390_irgen_A, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x5b: s390_format_RX_RRRD(s390_irgen_S, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x5c: s390_format_RX_RRRD(s390_irgen_M, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x5d: s390_format_RX_RRRD(s390_irgen_D, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x5e: s390_format_RX_RRRD(s390_irgen_AL, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x5f: s390_format_RX_RRRD(s390_irgen_SL, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x60: s390_format_RX_FRRD(s390_irgen_STD, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x67: /* MXD */ goto unimplemented;
+   case 0x68: s390_format_RX_FRRD(s390_irgen_LD, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x69: /* CD */ goto unimplemented;
+   case 0x6a: /* AD */ goto unimplemented;
+   case 0x6b: /* SD */ goto unimplemented;
+   case 0x6c: /* MD */ goto unimplemented;
+   case 0x6d: /* DD */ goto unimplemented;
+   case 0x6e: /* AW */ goto unimplemented;
+   case 0x6f: /* SW */ goto unimplemented;
+   case 0x70: s390_format_RX_FRRD(s390_irgen_STE, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x71: s390_format_RX_RRRD(s390_irgen_MS, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x78: s390_format_RX_FRRD(s390_irgen_LE, ovl.fmt.RX.r1, ovl.fmt.RX.x2,
+                                  ovl.fmt.RX.b2, ovl.fmt.RX.d2);  goto ok;
+   case 0x79: /* CE */ goto unimplemented;
+   case 0x7a: /* AE */ goto unimplemented;
+   case 0x7b: /* SE */ goto unimplemented;
+   case 0x7c: /* MDE */ goto unimplemented;
+   case 0x7d: /* DE */ goto unimplemented;
+   case 0x7e: /* AU */ goto unimplemented;
+   case 0x7f: /* SU */ goto unimplemented;
+   case 0x83: /* DIAG */ goto unimplemented;
+   case 0x84: s390_format_RSI_RRP(s390_irgen_BRXH, ovl.fmt.RSI.r1,
+                                  ovl.fmt.RSI.r3, ovl.fmt.RSI.i2);  goto ok;
+   case 0x85: s390_format_RSI_RRP(s390_irgen_BRXLE, ovl.fmt.RSI.r1,
+                                  ovl.fmt.RSI.r3, ovl.fmt.RSI.i2);  goto ok;
+   case 0x86: s390_format_RS_RRRD(s390_irgen_BXH, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0x87: s390_format_RS_RRRD(s390_irgen_BXLE, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0x88: s390_format_RS_R0RD(s390_irgen_SRL, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x89: s390_format_RS_R0RD(s390_irgen_SLL, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x8a: s390_format_RS_R0RD(s390_irgen_SRA, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x8b: s390_format_RS_R0RD(s390_irgen_SLA, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x8c: s390_format_RS_R0RD(s390_irgen_SRDL, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x8d: s390_format_RS_R0RD(s390_irgen_SLDL, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x8e: s390_format_RS_R0RD(s390_irgen_SRDA, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x8f: s390_format_RS_R0RD(s390_irgen_SLDA, ovl.fmt.RS.r1, ovl.fmt.RS.b2,
+                                  ovl.fmt.RS.d2);  goto ok;
+   case 0x90: s390_format_RS_RRRD(s390_irgen_STM, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0x91: s390_format_SI_URD(s390_irgen_TM, ovl.fmt.SI.i2, ovl.fmt.SI.b1,
+                                 ovl.fmt.SI.d1);  goto ok;
+   case 0x92: s390_format_SI_URD(s390_irgen_MVI, ovl.fmt.SI.i2, ovl.fmt.SI.b1,
+                                 ovl.fmt.SI.d1);  goto ok;
+   case 0x94: s390_format_SI_URD(s390_irgen_NI, ovl.fmt.SI.i2, ovl.fmt.SI.b1,
+                                 ovl.fmt.SI.d1);  goto ok;
+   case 0x95: s390_format_SI_URD(s390_irgen_CLI, ovl.fmt.SI.i2, ovl.fmt.SI.b1,
+                                 ovl.fmt.SI.d1);  goto ok;
+   case 0x96: s390_format_SI_URD(s390_irgen_OI, ovl.fmt.SI.i2, ovl.fmt.SI.b1,
+                                 ovl.fmt.SI.d1);  goto ok;
+   case 0x97: s390_format_SI_URD(s390_irgen_XI, ovl.fmt.SI.i2, ovl.fmt.SI.b1,
+                                 ovl.fmt.SI.d1);  goto ok;
+   case 0x98: s390_format_RS_RRRD(s390_irgen_LM, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0x99: /* TRACE */ goto unimplemented;
+   case 0x9a: s390_format_RS_AARD(s390_irgen_LAM, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0x9b: s390_format_RS_AARD(s390_irgen_STAM, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0xa8: s390_format_RS_RRRD(s390_irgen_MVCLE, ovl.fmt.RS.r1,
+                                  ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2);
+                                  goto ok;
+   case 0xa9: s390_format_RS_RRRD(s390_irgen_CLCLE, ovl.fmt.RS.r1,
+                                  ovl.fmt.RS.r3, ovl.fmt.RS.b2, ovl.fmt.RS.d2);
+                                  goto ok;
+   case 0xac: /* STNSM */ goto unimplemented;
+   case 0xad: /* STOSM */ goto unimplemented;
+   case 0xae: /* SIGP */ goto unimplemented;
+   case 0xaf: /* MC */ goto unimplemented;
+   case 0xb1: /* LRA */ goto unimplemented;
+   case 0xb6: /* STCTL */ goto unimplemented;
+   case 0xb7: /* LCTL */ goto unimplemented;
+   case 0xba: s390_format_RS_RRRD(s390_irgen_CS, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0xbb: s390_format_RS_RRRD(s390_irgen_CDS, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0xbd: s390_format_RS_RURD(s390_irgen_CLM, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0xbe: s390_format_RS_RURD(s390_irgen_STCM, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   case 0xbf: s390_format_RS_RURD(s390_irgen_ICM, ovl.fmt.RS.r1, ovl.fmt.RS.r3,
+                                  ovl.fmt.RS.b2, ovl.fmt.RS.d2);  goto ok;
+   }
+
+   return S390_DECODE_UNKNOWN_INSN;
+
+ok:
+   return S390_DECODE_OK;
+
+unimplemented:
+   return S390_DECODE_UNIMPLEMENTED_INSN;
+}
+
+static s390_decode_t
+s390_decode_6byte_and_irgen(const UChar *bytes)
+{
+   typedef union {
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int r3  :  4;
+         unsigned int i2  : 16;
+         unsigned int     :  8;
+         unsigned int op2 :  8;
+      } RIE;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int r2  :  4;
+         unsigned int i3  :  8;
+         unsigned int i4  :  8;
+         unsigned int i5  :  8;
+         unsigned int op2 :  8;
+      } RIE_RRUUU;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int     :  4;
+         unsigned int i2  : 16;
+         unsigned int m3  :  4;
+         unsigned int     :  4;
+         unsigned int op2 :  8;
+      } RIEv1;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int r2  :  4;
+         unsigned int i4  : 16;
+         unsigned int m3  :  4;
+         unsigned int     :  4;
+         unsigned int op2 :  8;
+      } RIE_RRPU;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int m3  :  4;
+         unsigned int i4  : 16;
+         unsigned int i2  :  8;
+         unsigned int op2 :  8;
+      } RIEv3;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int op2 :  4;
+         unsigned int i2  : 32;
+      } RIL;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int m3  :  4;
+         unsigned int b4  :  4;
+         unsigned int d4  : 12;
+         unsigned int i2  :  8;
+         unsigned int op2 :  8;
+      } RIS;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int r2  :  4;
+         unsigned int b4  :  4;
+         unsigned int d4  : 12;
+         unsigned int m3  :  4;
+         unsigned int     :  4;
+         unsigned int op2 :  8;
+      } RRS;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int l1  :  4;
+         unsigned int     :  4;
+         unsigned int b1  :  4;
+         unsigned int d1  : 12;
+         unsigned int     :  8;
+         unsigned int op2 :  8;
+      } RSL;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int r3  :  4;
+         unsigned int b2  :  4;
+         unsigned int dl2 : 12;
+         unsigned int dh2 :  8;
+         unsigned int op2 :  8;
+      } RSY;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int x2  :  4;
+         unsigned int b2  :  4;
+         unsigned int d2  : 12;
+         unsigned int     :  8;
+         unsigned int op2 :  8;
+      } RXE;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r3  :  4;
+         unsigned int x2  :  4;
+         unsigned int b2  :  4;
+         unsigned int d2  : 12;
+         unsigned int r1  :  4;
+         unsigned int     :  4;
+         unsigned int op2 :  8;
+      } RXF;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r1  :  4;
+         unsigned int x2  :  4;
+         unsigned int b2  :  4;
+         unsigned int dl2 : 12;
+         unsigned int dh2 :  8;
+         unsigned int op2 :  8;
+      } RXY;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int i2  :  8;
+         unsigned int b1  :  4;
+         unsigned int dl1 : 12;
+         unsigned int dh1 :  8;
+         unsigned int op2 :  8;
+      } SIY;
+      struct {
+         unsigned int op :  8;
+         unsigned int l  :  8;
+         unsigned int b1 :  4;
+         unsigned int d1 : 12;
+         unsigned int b2 :  4;
+         unsigned int d2 : 12;
+      } SS;
+      struct {
+         unsigned int op :  8;
+         unsigned int l1 :  4;
+         unsigned int l2 :  4;
+         unsigned int b1 :  4;
+         unsigned int d1 : 12;
+         unsigned int b2 :  4;
+         unsigned int d2 : 12;
+      } SS_LLRDRD;
+      struct {
+         unsigned int op :  8;
+         unsigned int r1 :  4;
+         unsigned int r3 :  4;
+         unsigned int b2 :  4;
+         unsigned int d2 : 12;
+         unsigned int b4 :  4;
+         unsigned int d4 : 12;
+      } SS_RRRDRD2;
+      struct {
+         unsigned int op : 16;
+         unsigned int b1 :  4;
+         unsigned int d1 : 12;
+         unsigned int b2 :  4;
+         unsigned int d2 : 12;
+      } SSE;
+      struct {
+         unsigned int op1 :  8;
+         unsigned int r3  :  4;
+         unsigned int op2 :  4;
+         unsigned int b1  :  4;
+         unsigned int d1  : 12;
+         unsigned int b2  :  4;
+         unsigned int d2  : 12;
+      } SSF;
+      struct {
+         unsigned int op : 16;
+         unsigned int b1 :  4;
+         unsigned int d1 : 12;
+         unsigned int i2 : 16;
+      } SIL;
+   } formats;
+   union {
+      formats fmt;
+      ULong value;
+   } ovl;
+
+   vassert(sizeof(formats) == 6);
+
+   ((UChar *)(&ovl.value))[0] = bytes[0];
+   ((UChar *)(&ovl.value))[1] = bytes[1];
+   ((UChar *)(&ovl.value))[2] = bytes[2];
+   ((UChar *)(&ovl.value))[3] = bytes[3];
+   ((UChar *)(&ovl.value))[4] = bytes[4];
+   ((UChar *)(&ovl.value))[5] = bytes[5];
+   ((UChar *)(&ovl.value))[6] = 0x0;
+   ((UChar *)(&ovl.value))[7] = 0x0;
+
+   switch ((ovl.value >> 16) & 0xff00000000ffULL) {
+   case 0xe30000000002ULL: s390_format_RXY_RRRD(s390_irgen_LTG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000003ULL: /* LRAG */ goto unimplemented;
+   case 0xe30000000004ULL: s390_format_RXY_RRRD(s390_irgen_LG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000006ULL: s390_format_RXY_RRRD(s390_irgen_CVBY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000008ULL: s390_format_RXY_RRRD(s390_irgen_AG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000009ULL: s390_format_RXY_RRRD(s390_irgen_SG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000000aULL: s390_format_RXY_RRRD(s390_irgen_ALG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000000bULL: s390_format_RXY_RRRD(s390_irgen_SLG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000000cULL: s390_format_RXY_RRRD(s390_irgen_MSG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000000dULL: s390_format_RXY_RRRD(s390_irgen_DSG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000000eULL: /* CVBG */ goto unimplemented;
+   case 0xe3000000000fULL: s390_format_RXY_RRRD(s390_irgen_LRVG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000012ULL: s390_format_RXY_RRRD(s390_irgen_LT, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000013ULL: /* LRAY */ goto unimplemented;
+   case 0xe30000000014ULL: s390_format_RXY_RRRD(s390_irgen_LGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000015ULL: s390_format_RXY_RRRD(s390_irgen_LGH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000016ULL: s390_format_RXY_RRRD(s390_irgen_LLGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000017ULL: s390_format_RXY_RRRD(s390_irgen_LLGT, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000018ULL: s390_format_RXY_RRRD(s390_irgen_AGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000019ULL: s390_format_RXY_RRRD(s390_irgen_SGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000001aULL: s390_format_RXY_RRRD(s390_irgen_ALGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000001bULL: s390_format_RXY_RRRD(s390_irgen_SLGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000001cULL: s390_format_RXY_RRRD(s390_irgen_MSGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000001dULL: s390_format_RXY_RRRD(s390_irgen_DSGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000001eULL: s390_format_RXY_RRRD(s390_irgen_LRV, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000001fULL: s390_format_RXY_RRRD(s390_irgen_LRVH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000020ULL: s390_format_RXY_RRRD(s390_irgen_CG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000021ULL: s390_format_RXY_RRRD(s390_irgen_CLG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000024ULL: s390_format_RXY_RRRD(s390_irgen_STG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000025ULL: /* NTSTG */ goto unimplemented;
+   case 0xe30000000026ULL: s390_format_RXY_RRRD(s390_irgen_CVDY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000002eULL: /* CVDG */ goto unimplemented;
+   case 0xe3000000002fULL: s390_format_RXY_RRRD(s390_irgen_STRVG,
+                                                ovl.fmt.RXY.r1, ovl.fmt.RXY.x2,
+                                                ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000030ULL: s390_format_RXY_RRRD(s390_irgen_CGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000031ULL: s390_format_RXY_RRRD(s390_irgen_CLGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000032ULL: s390_format_RXY_RRRD(s390_irgen_LTGF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000034ULL: s390_format_RXY_RRRD(s390_irgen_CGH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000036ULL: s390_format_RXY_URRD(s390_irgen_PFD, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000003eULL: s390_format_RXY_RRRD(s390_irgen_STRV, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000003fULL: s390_format_RXY_RRRD(s390_irgen_STRVH,
+                                                ovl.fmt.RXY.r1, ovl.fmt.RXY.x2,
+                                                ovl.fmt.RXY.b2, ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000046ULL: s390_format_RXY_RRRD(s390_irgen_BCTG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000050ULL: s390_format_RXY_RRRD(s390_irgen_STY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000051ULL: s390_format_RXY_RRRD(s390_irgen_MSY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000054ULL: s390_format_RXY_RRRD(s390_irgen_NY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000055ULL: s390_format_RXY_RRRD(s390_irgen_CLY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000056ULL: s390_format_RXY_RRRD(s390_irgen_OY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000057ULL: s390_format_RXY_RRRD(s390_irgen_XY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000058ULL: s390_format_RXY_RRRD(s390_irgen_LY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000059ULL: s390_format_RXY_RRRD(s390_irgen_CY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000005aULL: s390_format_RXY_RRRD(s390_irgen_AY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000005bULL: s390_format_RXY_RRRD(s390_irgen_SY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000005cULL: s390_format_RXY_RRRD(s390_irgen_MFY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000005eULL: s390_format_RXY_RRRD(s390_irgen_ALY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000005fULL: s390_format_RXY_RRRD(s390_irgen_SLY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000070ULL: s390_format_RXY_RRRD(s390_irgen_STHY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000071ULL: s390_format_RXY_RRRD(s390_irgen_LAY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000072ULL: s390_format_RXY_RRRD(s390_irgen_STCY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000073ULL: s390_format_RXY_RRRD(s390_irgen_ICY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000075ULL: s390_format_RXY_RRRD(s390_irgen_LAEY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000076ULL: s390_format_RXY_RRRD(s390_irgen_LB, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000077ULL: s390_format_RXY_RRRD(s390_irgen_LGB, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000078ULL: s390_format_RXY_RRRD(s390_irgen_LHY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000079ULL: s390_format_RXY_RRRD(s390_irgen_CHY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000007aULL: s390_format_RXY_RRRD(s390_irgen_AHY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000007bULL: s390_format_RXY_RRRD(s390_irgen_SHY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000007cULL: s390_format_RXY_RRRD(s390_irgen_MHY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000080ULL: s390_format_RXY_RRRD(s390_irgen_NG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000081ULL: s390_format_RXY_RRRD(s390_irgen_OG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000082ULL: s390_format_RXY_RRRD(s390_irgen_XG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000085ULL: /* LGAT */ goto unimplemented;
+   case 0xe30000000086ULL: s390_format_RXY_RRRD(s390_irgen_MLG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000087ULL: s390_format_RXY_RRRD(s390_irgen_DLG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000088ULL: s390_format_RXY_RRRD(s390_irgen_ALCG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000089ULL: s390_format_RXY_RRRD(s390_irgen_SLBG, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000008eULL: s390_format_RXY_RRRD(s390_irgen_STPQ, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000008fULL: s390_format_RXY_RRRD(s390_irgen_LPQ, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000090ULL: s390_format_RXY_RRRD(s390_irgen_LLGC, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000091ULL: s390_format_RXY_RRRD(s390_irgen_LLGH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000094ULL: s390_format_RXY_RRRD(s390_irgen_LLC, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000095ULL: s390_format_RXY_RRRD(s390_irgen_LLH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000096ULL: s390_format_RXY_RRRD(s390_irgen_ML, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000097ULL: s390_format_RXY_RRRD(s390_irgen_DL, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000098ULL: s390_format_RXY_RRRD(s390_irgen_ALC, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe30000000099ULL: s390_format_RXY_RRRD(s390_irgen_SLB, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe3000000009cULL: /* LLGTAT */ goto unimplemented;
+   case 0xe3000000009dULL: /* LLGFAT */ goto unimplemented;
+   case 0xe3000000009fULL: /* LAT */ goto unimplemented;
+   case 0xe300000000c0ULL: s390_format_RXY_RRRD(s390_irgen_LBH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000c2ULL: s390_format_RXY_RRRD(s390_irgen_LLCH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000c3ULL: s390_format_RXY_RRRD(s390_irgen_STCH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000c4ULL: s390_format_RXY_RRRD(s390_irgen_LHH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000c6ULL: s390_format_RXY_RRRD(s390_irgen_LLHH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000c7ULL: s390_format_RXY_RRRD(s390_irgen_STHH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000c8ULL: /* LFHAT */ goto unimplemented;
+   case 0xe300000000caULL: s390_format_RXY_RRRD(s390_irgen_LFH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000cbULL: s390_format_RXY_RRRD(s390_irgen_STFH, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000cdULL: s390_format_RXY_RRRD(s390_irgen_CHF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xe300000000cfULL: s390_format_RXY_RRRD(s390_irgen_CLHF, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xeb0000000004ULL: s390_format_RSY_RRRD(s390_irgen_LMG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000000aULL: s390_format_RSY_RRRD(s390_irgen_SRAG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000000bULL: s390_format_RSY_RRRD(s390_irgen_SLAG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000000cULL: s390_format_RSY_RRRD(s390_irgen_SRLG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000000dULL: s390_format_RSY_RRRD(s390_irgen_SLLG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000000fULL: /* TRACG */ goto unimplemented;
+   case 0xeb0000000014ULL: s390_format_RSY_RRRD(s390_irgen_CSY, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000001cULL: s390_format_RSY_RRRD(s390_irgen_RLLG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000001dULL: s390_format_RSY_RRRD(s390_irgen_RLL, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000020ULL: s390_format_RSY_RURD(s390_irgen_CLMH, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000021ULL: s390_format_RSY_RURD(s390_irgen_CLMY, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000023ULL: /* CLT */ goto unimplemented;
+   case 0xeb0000000024ULL: s390_format_RSY_RRRD(s390_irgen_STMG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000025ULL: /* STCTG */ goto unimplemented;
+   case 0xeb0000000026ULL: s390_format_RSY_RRRD(s390_irgen_STMH, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000002bULL: /* CLGT */ goto unimplemented;
+   case 0xeb000000002cULL: s390_format_RSY_RURD(s390_irgen_STCMH,
+                                                ovl.fmt.RSY.r1, ovl.fmt.RSY.r3,
+                                                ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000002dULL: s390_format_RSY_RURD(s390_irgen_STCMY,
+                                                ovl.fmt.RSY.r1, ovl.fmt.RSY.r3,
+                                                ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000002fULL: /* LCTLG */ goto unimplemented;
+   case 0xeb0000000030ULL: s390_format_RSY_RRRD(s390_irgen_CSG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000031ULL: s390_format_RSY_RRRD(s390_irgen_CDSY, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000003eULL: s390_format_RSY_RRRD(s390_irgen_CDSG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000044ULL: s390_format_RSY_RRRD(s390_irgen_BXHG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000045ULL: s390_format_RSY_RRRD(s390_irgen_BXLEG,
+                                                ovl.fmt.RSY.r1, ovl.fmt.RSY.r3,
+                                                ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000004cULL: s390_format_RSY_RRRD(s390_irgen_ECAG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2, 
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000051ULL: s390_format_SIY_URD(s390_irgen_TMY, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb0000000052ULL: s390_format_SIY_URD(s390_irgen_MVIY, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb0000000054ULL: s390_format_SIY_URD(s390_irgen_NIY, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb0000000055ULL: s390_format_SIY_URD(s390_irgen_CLIY, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb0000000056ULL: s390_format_SIY_URD(s390_irgen_OIY, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb0000000057ULL: s390_format_SIY_URD(s390_irgen_XIY, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb000000006aULL: s390_format_SIY_IRD(s390_irgen_ASI, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb000000006eULL: s390_format_SIY_IRD(s390_irgen_ALSI, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb000000007aULL: s390_format_SIY_IRD(s390_irgen_AGSI, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb000000007eULL: s390_format_SIY_IRD(s390_irgen_ALGSI, ovl.fmt.SIY.i2,
+                                               ovl.fmt.SIY.b1, ovl.fmt.SIY.dl1,
+                                               ovl.fmt.SIY.dh1);  goto ok;
+   case 0xeb0000000080ULL: s390_format_RSY_RURD(s390_irgen_ICMH, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000081ULL: s390_format_RSY_RURD(s390_irgen_ICMY, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000008eULL: /* MVCLU */ goto unimplemented;
+   case 0xeb000000008fULL: /* CLCLU */ goto unimplemented;
+   case 0xeb0000000090ULL: s390_format_RSY_RRRD(s390_irgen_STMY, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000096ULL: s390_format_RSY_RRRD(s390_irgen_LMH, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb0000000098ULL: s390_format_RSY_RRRD(s390_irgen_LMY, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000009aULL: s390_format_RSY_AARD(s390_irgen_LAMY, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb000000009bULL: s390_format_RSY_AARD(s390_irgen_STAMY,
+                                                ovl.fmt.RSY.r1, ovl.fmt.RSY.r3,
+                                                ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000c0ULL: /* TP */ goto unimplemented;
+   case 0xeb00000000dcULL: s390_format_RSY_RRRD(s390_irgen_SRAK, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000ddULL: s390_format_RSY_RRRD(s390_irgen_SLAK, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000deULL: s390_format_RSY_RRRD(s390_irgen_SRLK, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000dfULL: s390_format_RSY_RRRD(s390_irgen_SLLK, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000e2ULL: s390_format_RSY_RDRM(s390_irgen_LOCG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2,
+                                                S390_XMNM_LOCG);  goto ok;
+   case 0xeb00000000e3ULL: s390_format_RSY_RDRM(s390_irgen_STOCG,
+                                                ovl.fmt.RSY.r1, ovl.fmt.RSY.r3,
+                                                ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2,
+                                                S390_XMNM_STOCG);  goto ok;
+   case 0xeb00000000e4ULL: s390_format_RSY_RRRD(s390_irgen_LANG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000e6ULL: s390_format_RSY_RRRD(s390_irgen_LAOG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000e7ULL: s390_format_RSY_RRRD(s390_irgen_LAXG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000e8ULL: s390_format_RSY_RRRD(s390_irgen_LAAG, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000eaULL: s390_format_RSY_RRRD(s390_irgen_LAALG,
+                                                ovl.fmt.RSY.r1, ovl.fmt.RSY.r3,
+                                                ovl.fmt.RSY.b2, ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000f2ULL: s390_format_RSY_RDRM(s390_irgen_LOC, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2, S390_XMNM_LOC);
+                                                goto ok;
+   case 0xeb00000000f3ULL: s390_format_RSY_RDRM(s390_irgen_STOC, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2,
+                                                S390_XMNM_STOC);  goto ok;
+   case 0xeb00000000f4ULL: s390_format_RSY_RRRD(s390_irgen_LAN, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000f6ULL: s390_format_RSY_RRRD(s390_irgen_LAO, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000f7ULL: s390_format_RSY_RRRD(s390_irgen_LAX, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000f8ULL: s390_format_RSY_RRRD(s390_irgen_LAA, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xeb00000000faULL: s390_format_RSY_RRRD(s390_irgen_LAAL, ovl.fmt.RSY.r1,
+                                                ovl.fmt.RSY.r3, ovl.fmt.RSY.b2,
+                                                ovl.fmt.RSY.dl2,
+                                                ovl.fmt.RSY.dh2);  goto ok;
+   case 0xec0000000044ULL: s390_format_RIE_RRP(s390_irgen_BRXHG, ovl.fmt.RIE.r1,
+                                               ovl.fmt.RIE.r3, ovl.fmt.RIE.i2);
+                                               goto ok;
+   case 0xec0000000045ULL: s390_format_RIE_RRP(s390_irgen_BRXLG, ovl.fmt.RIE.r1,
+                                               ovl.fmt.RIE.r3, ovl.fmt.RIE.i2);
+                                               goto ok;
+   case 0xec0000000051ULL: /* RISBLG */ goto unimplemented;
+   case 0xec0000000054ULL: s390_format_RIE_RRUUU(s390_irgen_RNSBG,
+                                                 ovl.fmt.RIE_RRUUU.r1,
+                                                 ovl.fmt.RIE_RRUUU.r2,
+                                                 ovl.fmt.RIE_RRUUU.i3,
+                                                 ovl.fmt.RIE_RRUUU.i4,
+                                                 ovl.fmt.RIE_RRUUU.i5);
+                                                 goto ok;
+   case 0xec0000000055ULL: s390_format_RIE_RRUUU(s390_irgen_RISBG,
+                                                 ovl.fmt.RIE_RRUUU.r1,
+                                                 ovl.fmt.RIE_RRUUU.r2,
+                                                 ovl.fmt.RIE_RRUUU.i3,
+                                                 ovl.fmt.RIE_RRUUU.i4,
+                                                 ovl.fmt.RIE_RRUUU.i5);
+                                                 goto ok;
+   case 0xec0000000056ULL: s390_format_RIE_RRUUU(s390_irgen_ROSBG,
+                                                 ovl.fmt.RIE_RRUUU.r1,
+                                                 ovl.fmt.RIE_RRUUU.r2,
+                                                 ovl.fmt.RIE_RRUUU.i3,
+                                                 ovl.fmt.RIE_RRUUU.i4,
+                                                 ovl.fmt.RIE_RRUUU.i5);
+                                                 goto ok;
+   case 0xec0000000057ULL: s390_format_RIE_RRUUU(s390_irgen_RXSBG,
+                                                 ovl.fmt.RIE_RRUUU.r1,
+                                                 ovl.fmt.RIE_RRUUU.r2,
+                                                 ovl.fmt.RIE_RRUUU.i3,
+                                                 ovl.fmt.RIE_RRUUU.i4,
+                                                 ovl.fmt.RIE_RRUUU.i5);
+                                                 goto ok;
+   case 0xec0000000059ULL: /* RISBGN */ goto unimplemented;
+   case 0xec000000005dULL: /* RISBHG */ goto unimplemented;
+   case 0xec0000000064ULL: s390_format_RIE_RRPU(s390_irgen_CGRJ,
+                                                ovl.fmt.RIE_RRPU.r1,
+                                                ovl.fmt.RIE_RRPU.r2,
+                                                ovl.fmt.RIE_RRPU.i4,
+                                                ovl.fmt.RIE_RRPU.m3);  goto ok;
+   case 0xec0000000065ULL: s390_format_RIE_RRPU(s390_irgen_CLGRJ,
+                                                ovl.fmt.RIE_RRPU.r1,
+                                                ovl.fmt.RIE_RRPU.r2,
+                                                ovl.fmt.RIE_RRPU.i4,
+                                                ovl.fmt.RIE_RRPU.m3);  goto ok;
+   case 0xec0000000070ULL: /* CGIT */ goto unimplemented;
+   case 0xec0000000071ULL: /* CLGIT */ goto unimplemented;
+   case 0xec0000000072ULL: /* CIT */ goto unimplemented;
+   case 0xec0000000073ULL: /* CLFIT */ goto unimplemented;
+   case 0xec0000000076ULL: s390_format_RIE_RRPU(s390_irgen_CRJ,
+                                                ovl.fmt.RIE_RRPU.r1,
+                                                ovl.fmt.RIE_RRPU.r2,
+                                                ovl.fmt.RIE_RRPU.i4,
+                                                ovl.fmt.RIE_RRPU.m3);  goto ok;
+   case 0xec0000000077ULL: s390_format_RIE_RRPU(s390_irgen_CLRJ,
+                                                ovl.fmt.RIE_RRPU.r1,
+                                                ovl.fmt.RIE_RRPU.r2,
+                                                ovl.fmt.RIE_RRPU.i4,
+                                                ovl.fmt.RIE_RRPU.m3);  goto ok;
+   case 0xec000000007cULL: s390_format_RIE_RUPI(s390_irgen_CGIJ,
+                                                ovl.fmt.RIEv3.r1,
+                                                ovl.fmt.RIEv3.m3,
+                                                ovl.fmt.RIEv3.i4,
+                                                ovl.fmt.RIEv3.i2);  goto ok;
+   case 0xec000000007dULL: s390_format_RIE_RUPU(s390_irgen_CLGIJ,
+                                                ovl.fmt.RIEv3.r1,
+                                                ovl.fmt.RIEv3.m3,
+                                                ovl.fmt.RIEv3.i4,
+                                                ovl.fmt.RIEv3.i2);  goto ok;
+   case 0xec000000007eULL: s390_format_RIE_RUPI(s390_irgen_CIJ,
+                                                ovl.fmt.RIEv3.r1,
+                                                ovl.fmt.RIEv3.m3,
+                                                ovl.fmt.RIEv3.i4,
+                                                ovl.fmt.RIEv3.i2);  goto ok;
+   case 0xec000000007fULL: s390_format_RIE_RUPU(s390_irgen_CLIJ,
+                                                ovl.fmt.RIEv3.r1,
+                                                ovl.fmt.RIEv3.m3,
+                                                ovl.fmt.RIEv3.i4,
+                                                ovl.fmt.RIEv3.i2);  goto ok;
+   case 0xec00000000d8ULL: s390_format_RIE_RRI0(s390_irgen_AHIK, ovl.fmt.RIE.r1,
+                                                ovl.fmt.RIE.r3, ovl.fmt.RIE.i2);
+                                                goto ok;
+   case 0xec00000000d9ULL: s390_format_RIE_RRI0(s390_irgen_AGHIK,
+                                                ovl.fmt.RIE.r1, ovl.fmt.RIE.r3,
+                                                ovl.fmt.RIE.i2);  goto ok;
+   case 0xec00000000daULL: s390_format_RIE_RRI0(s390_irgen_ALHSIK,
+                                                ovl.fmt.RIE.r1, ovl.fmt.RIE.r3,
+                                                ovl.fmt.RIE.i2);  goto ok;
+   case 0xec00000000dbULL: s390_format_RIE_RRI0(s390_irgen_ALGHSIK,
+                                                ovl.fmt.RIE.r1, ovl.fmt.RIE.r3,
+                                                ovl.fmt.RIE.i2);  goto ok;
+   case 0xec00000000e4ULL: s390_format_RRS(s390_irgen_CGRB, ovl.fmt.RRS.r1,
+                                           ovl.fmt.RRS.r2, ovl.fmt.RRS.b4,
+                                           ovl.fmt.RRS.d4, ovl.fmt.RRS.m3);
+                                           goto ok;
+   case 0xec00000000e5ULL: s390_format_RRS(s390_irgen_CLGRB, ovl.fmt.RRS.r1,
+                                           ovl.fmt.RRS.r2, ovl.fmt.RRS.b4,
+                                           ovl.fmt.RRS.d4, ovl.fmt.RRS.m3);
+                                           goto ok;
+   case 0xec00000000f6ULL: s390_format_RRS(s390_irgen_CRB, ovl.fmt.RRS.r1,
+                                           ovl.fmt.RRS.r2, ovl.fmt.RRS.b4,
+                                           ovl.fmt.RRS.d4, ovl.fmt.RRS.m3);
+                                           goto ok;
+   case 0xec00000000f7ULL: s390_format_RRS(s390_irgen_CLRB, ovl.fmt.RRS.r1,
+                                           ovl.fmt.RRS.r2, ovl.fmt.RRS.b4,
+                                           ovl.fmt.RRS.d4, ovl.fmt.RRS.m3);
+                                           goto ok;
+   case 0xec00000000fcULL: s390_format_RIS_RURDI(s390_irgen_CGIB,
+                                                 ovl.fmt.RIS.r1, ovl.fmt.RIS.m3,
+                                                 ovl.fmt.RIS.b4, ovl.fmt.RIS.d4,
+                                                 ovl.fmt.RIS.i2);  goto ok;
+   case 0xec00000000fdULL: s390_format_RIS_RURDU(s390_irgen_CLGIB,
+                                                 ovl.fmt.RIS.r1, ovl.fmt.RIS.m3,
+                                                 ovl.fmt.RIS.b4, ovl.fmt.RIS.d4,
+                                                 ovl.fmt.RIS.i2);  goto ok;
+   case 0xec00000000feULL: s390_format_RIS_RURDI(s390_irgen_CIB, ovl.fmt.RIS.r1,
+                                                 ovl.fmt.RIS.m3, ovl.fmt.RIS.b4,
+                                                 ovl.fmt.RIS.d4,
+                                                 ovl.fmt.RIS.i2);  goto ok;
+   case 0xec00000000ffULL: s390_format_RIS_RURDU(s390_irgen_CLIB,
+                                                 ovl.fmt.RIS.r1, ovl.fmt.RIS.m3,
+                                                 ovl.fmt.RIS.b4, ovl.fmt.RIS.d4,
+                                                 ovl.fmt.RIS.i2);  goto ok;
+   case 0xed0000000004ULL: s390_format_RXE_FRRD(s390_irgen_LDEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000005ULL: s390_format_RXE_FRRD(s390_irgen_LXDB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000006ULL: s390_format_RXE_FRRD(s390_irgen_LXEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000007ULL: /* MXDB */ goto unimplemented;
+   case 0xed0000000008ULL: /* KEB */ goto unimplemented;
+   case 0xed0000000009ULL: s390_format_RXE_FRRD(s390_irgen_CEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000000aULL: s390_format_RXE_FRRD(s390_irgen_AEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000000bULL: s390_format_RXE_FRRD(s390_irgen_SEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000000cULL: /* MDEB */ goto unimplemented;
+   case 0xed000000000dULL: s390_format_RXE_FRRD(s390_irgen_DEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000000eULL: s390_format_RXF_FRRDF(s390_irgen_MAEB,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed000000000fULL: s390_format_RXF_FRRDF(s390_irgen_MSEB,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed0000000010ULL: s390_format_RXE_FRRD(s390_irgen_TCEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000011ULL: s390_format_RXE_FRRD(s390_irgen_TCDB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000012ULL: s390_format_RXE_FRRD(s390_irgen_TCXB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000014ULL: s390_format_RXE_FRRD(s390_irgen_SQEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000015ULL: s390_format_RXE_FRRD(s390_irgen_SQDB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000017ULL: s390_format_RXE_FRRD(s390_irgen_MEEB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000018ULL: /* KDB */ goto unimplemented;
+   case 0xed0000000019ULL: s390_format_RXE_FRRD(s390_irgen_CDB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000001aULL: s390_format_RXE_FRRD(s390_irgen_ADB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000001bULL: s390_format_RXE_FRRD(s390_irgen_SDB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000001cULL: s390_format_RXE_FRRD(s390_irgen_MDB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000001dULL: s390_format_RXE_FRRD(s390_irgen_DDB, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed000000001eULL: s390_format_RXF_FRRDF(s390_irgen_MADB,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed000000001fULL: s390_format_RXF_FRRDF(s390_irgen_MSDB,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed0000000024ULL: /* LDE */ goto unimplemented;
+   case 0xed0000000025ULL: /* LXD */ goto unimplemented;
+   case 0xed0000000026ULL: /* LXE */ goto unimplemented;
+   case 0xed000000002eULL: /* MAE */ goto unimplemented;
+   case 0xed000000002fULL: /* MSE */ goto unimplemented;
+   case 0xed0000000034ULL: /* SQE */ goto unimplemented;
+   case 0xed0000000035ULL: /* SQD */ goto unimplemented;
+   case 0xed0000000037ULL: /* MEE */ goto unimplemented;
+   case 0xed0000000038ULL: /* MAYL */ goto unimplemented;
+   case 0xed0000000039ULL: /* MYL */ goto unimplemented;
+   case 0xed000000003aULL: /* MAY */ goto unimplemented;
+   case 0xed000000003bULL: /* MY */ goto unimplemented;
+   case 0xed000000003cULL: /* MAYH */ goto unimplemented;
+   case 0xed000000003dULL: /* MYH */ goto unimplemented;
+   case 0xed000000003eULL: /* MAD */ goto unimplemented;
+   case 0xed000000003fULL: /* MSD */ goto unimplemented;
+   case 0xed0000000040ULL: s390_format_RXF_FRRDF(s390_irgen_SLDT,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed0000000041ULL: s390_format_RXF_FRRDF(s390_irgen_SRDT,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed0000000048ULL: s390_format_RXF_FRRDF(s390_irgen_SLXT,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed0000000049ULL: s390_format_RXF_FRRDF(s390_irgen_SRXT,
+                                                 ovl.fmt.RXF.r3, ovl.fmt.RXF.x2,
+                                                 ovl.fmt.RXF.b2, ovl.fmt.RXF.d2,
+                                                 ovl.fmt.RXF.r1);  goto ok;
+   case 0xed0000000050ULL: s390_format_RXE_FRRD(s390_irgen_TDCET, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000051ULL: s390_format_RXE_FRRD(s390_irgen_TDGET, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000054ULL: s390_format_RXE_FRRD(s390_irgen_TDCDT, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000055ULL: s390_format_RXE_FRRD(s390_irgen_TDGDT, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000058ULL: s390_format_RXE_FRRD(s390_irgen_TDCXT, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000059ULL: s390_format_RXE_FRRD(s390_irgen_TDGXT, ovl.fmt.RXE.r1,
+                                                ovl.fmt.RXE.x2, ovl.fmt.RXE.b2,
+                                                ovl.fmt.RXE.d2);  goto ok;
+   case 0xed0000000064ULL: s390_format_RXY_FRRD(s390_irgen_LEY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xed0000000065ULL: s390_format_RXY_FRRD(s390_irgen_LDY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xed0000000066ULL: s390_format_RXY_FRRD(s390_irgen_STEY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xed0000000067ULL: s390_format_RXY_FRRD(s390_irgen_STDY, ovl.fmt.RXY.r1,
+                                                ovl.fmt.RXY.x2, ovl.fmt.RXY.b2,
+                                                ovl.fmt.RXY.dl2,
+                                                ovl.fmt.RXY.dh2);  goto ok;
+   case 0xed00000000a8ULL: /* CZDT */ goto unimplemented;
+   case 0xed00000000a9ULL: /* CZXT */ goto unimplemented;
+   case 0xed00000000aaULL: /* CDZT */ goto unimplemented;
+   case 0xed00000000abULL: /* CXZT */ goto unimplemented;
+   }
+
+   switch (((ovl.value >> 16) & 0xff0f00000000ULL) >> 32) {
+   case 0xc000ULL: s390_format_RIL_RP(s390_irgen_LARL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc001ULL: s390_format_RIL_RI(s390_irgen_LGFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc004ULL: s390_format_RIL(s390_irgen_BRCL, ovl.fmt.RIL.r1,
+                                   ovl.fmt.RIL.i2);  goto ok;
+   case 0xc005ULL: s390_format_RIL_RP(s390_irgen_BRASL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc006ULL: s390_format_RIL_RU(s390_irgen_XIHF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc007ULL: s390_format_RIL_RU(s390_irgen_XILF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc008ULL: s390_format_RIL_RU(s390_irgen_IIHF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc009ULL: s390_format_RIL_RU(s390_irgen_IILF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc00aULL: s390_format_RIL_RU(s390_irgen_NIHF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc00bULL: s390_format_RIL_RU(s390_irgen_NILF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc00cULL: s390_format_RIL_RU(s390_irgen_OIHF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc00dULL: s390_format_RIL_RU(s390_irgen_OILF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc00eULL: s390_format_RIL_RU(s390_irgen_LLIHF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc00fULL: s390_format_RIL_RU(s390_irgen_LLILF, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc200ULL: s390_format_RIL_RI(s390_irgen_MSGFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc201ULL: s390_format_RIL_RI(s390_irgen_MSFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc204ULL: s390_format_RIL_RU(s390_irgen_SLGFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc205ULL: s390_format_RIL_RU(s390_irgen_SLFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc208ULL: s390_format_RIL_RI(s390_irgen_AGFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc209ULL: s390_format_RIL_RI(s390_irgen_AFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc20aULL: s390_format_RIL_RU(s390_irgen_ALGFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc20bULL: s390_format_RIL_RU(s390_irgen_ALFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc20cULL: s390_format_RIL_RI(s390_irgen_CGFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc20dULL: s390_format_RIL_RI(s390_irgen_CFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc20eULL: s390_format_RIL_RU(s390_irgen_CLGFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc20fULL: s390_format_RIL_RU(s390_irgen_CLFI, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc402ULL: s390_format_RIL_RP(s390_irgen_LLHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc404ULL: s390_format_RIL_RP(s390_irgen_LGHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc405ULL: s390_format_RIL_RP(s390_irgen_LHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc406ULL: s390_format_RIL_RP(s390_irgen_LLGHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc407ULL: s390_format_RIL_RP(s390_irgen_STHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc408ULL: s390_format_RIL_RP(s390_irgen_LGRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc40bULL: s390_format_RIL_RP(s390_irgen_STGRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc40cULL: s390_format_RIL_RP(s390_irgen_LGFRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc40dULL: s390_format_RIL_RP(s390_irgen_LRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc40eULL: s390_format_RIL_RP(s390_irgen_LLGFRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc40fULL: s390_format_RIL_RP(s390_irgen_STRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc600ULL: s390_format_RIL_RP(s390_irgen_EXRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc602ULL: s390_format_RIL_UP(s390_irgen_PFDRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc604ULL: s390_format_RIL_RP(s390_irgen_CGHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc605ULL: s390_format_RIL_RP(s390_irgen_CHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc606ULL: s390_format_RIL_RP(s390_irgen_CLGHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc607ULL: s390_format_RIL_RP(s390_irgen_CLHRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc608ULL: s390_format_RIL_RP(s390_irgen_CGRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc60aULL: s390_format_RIL_RP(s390_irgen_CLGRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc60cULL: s390_format_RIL_RP(s390_irgen_CGFRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc60dULL: s390_format_RIL_RP(s390_irgen_CRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc60eULL: s390_format_RIL_RP(s390_irgen_CLGFRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc60fULL: s390_format_RIL_RP(s390_irgen_CLRL, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xc800ULL: /* MVCOS */ goto unimplemented;
+   case 0xc801ULL: /* ECTG */ goto unimplemented;
+   case 0xc802ULL: /* CSST */ goto unimplemented;
+   case 0xc804ULL: /* LPD */ goto unimplemented;
+   case 0xc805ULL: /* LPDG */ goto unimplemented;
+   case 0xcc06ULL: /* BRCTH */ goto unimplemented;
+   case 0xcc08ULL: s390_format_RIL_RI(s390_irgen_AIH, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xcc0aULL: s390_format_RIL_RI(s390_irgen_ALSIH, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xcc0bULL: s390_format_RIL_RI(s390_irgen_ALSIHN, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xcc0dULL: s390_format_RIL_RI(s390_irgen_CIH, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   case 0xcc0fULL: s390_format_RIL_RU(s390_irgen_CLIH, ovl.fmt.RIL.r1,
+                                      ovl.fmt.RIL.i2);  goto ok;
+   }
+
+   switch (((ovl.value >> 16) & 0xff0000000000ULL) >> 40) {
+   case 0xc5ULL: /* BPRP */ goto unimplemented;
+   case 0xc7ULL: /* BPP */ goto unimplemented;
+   case 0xd0ULL: /* TRTR */ goto unimplemented;
+   case 0xd1ULL: /* MVN */ goto unimplemented;
+   case 0xd2ULL: s390_format_SS_L0RDRD(s390_irgen_MVC, ovl.fmt.SS.l,
+                                       ovl.fmt.SS.b1, ovl.fmt.SS.d1,
+                                       ovl.fmt.SS.b2, ovl.fmt.SS.d2);  goto ok;
+   case 0xd3ULL: /* MVZ */ goto unimplemented;
+   case 0xd4ULL: s390_format_SS_L0RDRD(s390_irgen_NC, ovl.fmt.SS.l,
+                                       ovl.fmt.SS.b1, ovl.fmt.SS.d1,
+                                       ovl.fmt.SS.b2, ovl.fmt.SS.d2);  goto ok;
+   case 0xd5ULL: s390_format_SS_L0RDRD(s390_irgen_CLC, ovl.fmt.SS.l,
+                                       ovl.fmt.SS.b1, ovl.fmt.SS.d1,
+                                       ovl.fmt.SS.b2, ovl.fmt.SS.d2);  goto ok;
+   case 0xd6ULL: s390_format_SS_L0RDRD(s390_irgen_OC, ovl.fmt.SS.l,
+                                       ovl.fmt.SS.b1, ovl.fmt.SS.d1,
+                                       ovl.fmt.SS.b2, ovl.fmt.SS.d2);  goto ok;
+   case 0xd7ULL:
+      if (ovl.fmt.SS.b1 == ovl.fmt.SS.b2 && ovl.fmt.SS.d1 == ovl.fmt.SS.d2)
+         s390_irgen_XC_sameloc(ovl.fmt.SS.l, ovl.fmt.SS.b1, ovl.fmt.SS.d1);
+      else
+        s390_format_SS_L0RDRD(s390_irgen_XC, ovl.fmt.SS.l,
+                              ovl.fmt.SS.b1, ovl.fmt.SS.d1,
+                              ovl.fmt.SS.b2, ovl.fmt.SS.d2);
+      goto ok;
+   case 0xd9ULL: /* MVCK */ goto unimplemented;
+   case 0xdaULL: /* MVCP */ goto unimplemented;
+   case 0xdbULL: /* MVCS */ goto unimplemented;
+   case 0xdcULL: s390_format_SS_L0RDRD(s390_irgen_TR, ovl.fmt.SS.l,
+                                       ovl.fmt.SS.b1, ovl.fmt.SS.d1,
+                                       ovl.fmt.SS.b2, ovl.fmt.SS.d2);  goto ok;
+   case 0xddULL: /* TRT */ goto unimplemented;
+   case 0xdeULL: /* ED */ goto unimplemented;
+   case 0xdfULL: /* EDMK */ goto unimplemented;
+   case 0xe1ULL: /* PKU */ goto unimplemented;
+   case 0xe2ULL: /* UNPKU */ goto unimplemented;
+   case 0xe8ULL: /* MVCIN */ goto unimplemented;
+   case 0xe9ULL: /* PKA */ goto unimplemented;
+   case 0xeaULL: /* UNPKA */ goto unimplemented;
+   case 0xeeULL: /* PLO */ goto unimplemented;
+   case 0xefULL: /* LMD */ goto unimplemented;
+   case 0xf0ULL: /* SRP */ goto unimplemented;
+   case 0xf1ULL: /* MVO */ goto unimplemented;
+   case 0xf2ULL: /* PACK */ goto unimplemented;
+   case 0xf3ULL: /* UNPK */ goto unimplemented;
+   case 0xf8ULL: /* ZAP */ goto unimplemented;
+   case 0xf9ULL: /* CP */ goto unimplemented;
+   case 0xfaULL: /* AP */ goto unimplemented;
+   case 0xfbULL: /* SP */ goto unimplemented;
+   case 0xfcULL: /* MP */ goto unimplemented;
+   case 0xfdULL: /* DP */ goto unimplemented;
+   }
+
+   switch (((ovl.value >> 16) & 0xffff00000000ULL) >> 32) {
+   case 0xe500ULL: /* LASP */ goto unimplemented;
+   case 0xe501ULL: /* TPROT */ goto unimplemented;
+   case 0xe502ULL: /* STRAG */ goto unimplemented;
+   case 0xe50eULL: /* MVCSK */ goto unimplemented;
+   case 0xe50fULL: /* MVCDK */ goto unimplemented;
+   case 0xe544ULL: s390_format_SIL_RDI(s390_irgen_MVHHI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe548ULL: s390_format_SIL_RDI(s390_irgen_MVGHI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe54cULL: s390_format_SIL_RDI(s390_irgen_MVHI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe554ULL: s390_format_SIL_RDI(s390_irgen_CHHSI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe555ULL: s390_format_SIL_RDU(s390_irgen_CLHHSI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe558ULL: s390_format_SIL_RDI(s390_irgen_CGHSI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe559ULL: s390_format_SIL_RDU(s390_irgen_CLGHSI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe55cULL: s390_format_SIL_RDI(s390_irgen_CHSI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe55dULL: s390_format_SIL_RDU(s390_irgen_CLFHSI, ovl.fmt.SIL.b1,
+                                       ovl.fmt.SIL.d1, ovl.fmt.SIL.i2);
+                                       goto ok;
+   case 0xe560ULL: /* TBEGIN */ goto unimplemented;
+   case 0xe561ULL: /* TBEGINC */ goto unimplemented;
+   }
+
+   return S390_DECODE_UNKNOWN_INSN;
+
+ok:
+   return S390_DECODE_OK;
+
+unimplemented:
+   return S390_DECODE_UNIMPLEMENTED_INSN;
+}
+
+/* Handle "special" instructions. */
+static s390_decode_t
+s390_decode_special_and_irgen(const UChar *bytes)
+{
+   s390_decode_t status = S390_DECODE_OK;
+
+   /* Got a "Special" instruction preamble.  Which one is it? */
+   if (bytes[0] == 0x18 && bytes[1] == 0x22 /* lr %r2, %r2 */) {
+      s390_irgen_client_request();
+   } else if (bytes[0] == 0x18 && bytes[1] == 0x33 /* lr %r3, %r3 */) {
+      s390_irgen_guest_NRADDR();
+   } else if (bytes[0] == 0x18 && bytes[1] == 0x44 /* lr %r4, %r4 */) {
+      s390_irgen_call_noredir();
+   } else if (bytes[0] == 0x18 && bytes[1] == 0x55 /* lr %r5, %r5 */) {
+      vex_inject_ir(irsb, Iend_BE);
+
+      /* Invalidate the current insn. The reason is that the IRop we're
+         injecting here can change. In which case the translation has to
+         be redone. For ease of handling, we simply invalidate all the
+         time. */
+      stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMSTART),
+                      mkU64(guest_IA_curr_instr)));
+      stmt(IRStmt_Put(S390X_GUEST_OFFSET(guest_CMLEN),
+                      mkU64(guest_IA_next_instr - guest_IA_curr_instr)));
+      vassert(guest_IA_next_instr - guest_IA_curr_instr ==
+              S390_SPECIAL_OP_PREAMBLE_SIZE + S390_SPECIAL_OP_SIZE);
+
+      put_IA(mkaddr_expr(guest_IA_next_instr));
+      dis_res->whatNext    = Dis_StopHere;
+      dis_res->jk_StopHere = Ijk_InvalICache;
+   } else {
+      /* We don't know what it is. */
+      return S390_DECODE_UNKNOWN_SPECIAL_INSN;
+   }
+
+   dis_res->len = S390_SPECIAL_OP_PREAMBLE_SIZE + S390_SPECIAL_OP_SIZE;
+
+   return status;
+}
+
+
+/* Function returns # bytes that were decoded or 0 in case of failure */
+static UInt
+s390_decode_and_irgen(const UChar *bytes, UInt insn_length, DisResult *dres)
+{
+   s390_decode_t status;
+
+   dis_res = dres;
+
+   /* Spot the 8-byte preamble:   18ff lr r15,r15
+                                  1811 lr r1,r1
+                                  1822 lr r2,r2
+                                  1833 lr r3,r3 */
+   if (bytes[ 0] == 0x18 && bytes[ 1] == 0xff && bytes[ 2] == 0x18 &&
+       bytes[ 3] == 0x11 && bytes[ 4] == 0x18 && bytes[ 5] == 0x22 &&
+       bytes[ 6] == 0x18 && bytes[ 7] == 0x33) {
+
+      /* Handle special instruction that follows that preamble. */
+      if (0) vex_printf("special function handling...\n");
+
+      insn_length = S390_SPECIAL_OP_PREAMBLE_SIZE + S390_SPECIAL_OP_SIZE;
+      guest_IA_next_instr = guest_IA_curr_instr + insn_length;
+
+      status =
+         s390_decode_special_and_irgen(bytes + S390_SPECIAL_OP_PREAMBLE_SIZE);
+   } else {
+      /* Handle normal instructions. */
+      switch (insn_length) {
+      case 2:
+         status = s390_decode_2byte_and_irgen(bytes);
+         break;
+
+      case 4:
+         status = s390_decode_4byte_and_irgen(bytes);
+         break;
+
+      case 6:
+         status = s390_decode_6byte_and_irgen(bytes);
+         break;
+
+      default:
+        status = S390_DECODE_ERROR;
+        break;
+      }
+   }
+   /* If next instruction is execute, stop here */
+   if (dis_res->whatNext == Dis_Continue && bytes[insn_length] == 0x44) {
+      put_IA(mkaddr_expr(guest_IA_next_instr));
+      dis_res->whatNext = Dis_StopHere;
+      dis_res->jk_StopHere = Ijk_Boring;
+   }
+
+   if (status == S390_DECODE_OK) return insn_length;  /* OK */
+
+   /* Decoding failed somehow */
+   if (sigill_diag) {
+      vex_printf("vex s390->IR: ");
+      switch (status) {
+      case S390_DECODE_UNKNOWN_INSN:
+         vex_printf("unknown insn: ");
+         break;
+
+      case S390_DECODE_UNIMPLEMENTED_INSN:
+         vex_printf("unimplemented insn: ");
+         break;
+
+      case S390_DECODE_UNKNOWN_SPECIAL_INSN:
+         vex_printf("unimplemented special insn: ");
+         break;
+
+      case S390_DECODE_ERROR:
+         vex_printf("decoding error: ");
+         break;
+
+      default:
+         vpanic("s390_decode_and_irgen");
+      }
+
+      vex_printf("%02x%02x", bytes[0], bytes[1]);
+      if (insn_length > 2) {
+         vex_printf(" %02x%02x", bytes[2], bytes[3]);
+      }
+      if (insn_length > 4) {
+         vex_printf(" %02x%02x", bytes[4], bytes[5]);
+      }
+      vex_printf("\n");
+   }
+
+   return 0;  /* Failed */
+}
+
+
+/* Disassemble a single instruction INSN into IR. */
+static DisResult
+disInstr_S390_WRK(const UChar *insn)
+{
+   UChar byte;
+   UInt  insn_length;
+   DisResult dres;
+
+   /* ---------------------------------------------------- */
+   /* --- Compute instruction length                    -- */
+   /* ---------------------------------------------------- */
+
+   /* Get the first byte of the insn. */
+   byte = insn[0];
+
+   /* The leftmost two bits (0:1) encode the length of the insn in bytes.
+      00 -> 2 bytes, 01 -> 4 bytes, 10 -> 4 bytes, 11 -> 6 bytes. */
+   insn_length = ((((byte >> 6) + 1) >> 1) + 1) << 1;
+
+   guest_IA_next_instr = guest_IA_curr_instr + insn_length;
+
+   /* ---------------------------------------------------- */
+   /* --- Initialise the DisResult data                 -- */
+   /* ---------------------------------------------------- */
+   dres.whatNext   = Dis_Continue;
+   dres.len        = insn_length;
+   dres.continueAt = 0;
+   dres.jk_StopHere = Ijk_INVALID;
+
+   /* fixs390: consider chasing of conditional jumps */
+
+   /* Normal and special instruction handling starts here. */
+   if (s390_decode_and_irgen(insn, insn_length, &dres) == 0) {
+      /* All decode failures end up here. The decoder has already issued an
+         error message.
+         Tell the dispatcher that this insn cannot be decoded, and so has
+         not been executed, and (is currently) the next to be executed.
+         The insn address in the guest state needs to be set to 
+         guest_IA_curr_instr, otherwise the complaint will report an
+         incorrect address. */
+      put_IA(mkaddr_expr(guest_IA_curr_instr));
+
+      dres.len         = 0;
+      dres.whatNext    = Dis_StopHere;
+      dres.jk_StopHere = Ijk_NoDecode;
+      dres.continueAt  = 0;
+   } else {
+      /* Decode success */
+      switch (dres.whatNext) {
+      case Dis_Continue:
+         put_IA(mkaddr_expr(guest_IA_next_instr));
+         break;
+      case Dis_ResteerU:
+      case Dis_ResteerC:
+         put_IA(mkaddr_expr(dres.continueAt));
+         break;
+      case Dis_StopHere:
+         if (dres.jk_StopHere == Ijk_EmWarn ||
+             dres.jk_StopHere == Ijk_EmFail) {
+            /* We assume here, that emulation warnings are not given for
+               insns that transfer control. There is no good way to
+               do that. */
+            put_IA(mkaddr_expr(guest_IA_next_instr));
+         }
+         break;
+      default:
+         vpanic("disInstr_S390_WRK");
+      }
+   }
+
+   return dres;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+DisResult
+disInstr_S390(IRSB        *irsb_IN,
+              Bool       (*resteerOkFn)(void *, Addr),
+              Bool         resteerCisOk,
+              void        *callback_opaque,
+              const UChar *guest_code,
+              Long         delta,
+              Addr         guest_IP,
+              VexArch      guest_arch,
+              const VexArchInfo *archinfo,
+              const VexAbiInfo  *abiinfo,
+              VexEndness   host_endness,
+              Bool         sigill_diag_IN)
+{
+   vassert(guest_arch == VexArchS390X);
+
+   /* The instruction decoder requires a big-endian machine. */
+   vassert(host_endness == VexEndnessBE);
+
+   /* Set globals (see top of this file) */
+   guest_IA_curr_instr = guest_IP;
+   irsb = irsb_IN;
+   resteer_fn = resteerOkFn;
+   resteer_data = callback_opaque;
+   sigill_diag = sigill_diag_IN;
+
+   return disInstr_S390_WRK(guest_code + delta);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                   guest_s390_toIR.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_tilegx_defs.h b/VEX/priv/guest_tilegx_defs.h
new file mode 100644
index 0000000..efa78ca
--- /dev/null
+++ b/VEX/priv/guest_tilegx_defs.h
@@ -0,0 +1,110 @@
+/*---------------------------------------------------------------*/
+/*--- begin                               guest_tilegx_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 Tilera Corp.
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+ /* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#ifndef __VEX_GUEST_TILEGX_DEFS_H
+#define __VEX_GUEST_TILEGX_DEFS_H
+
+#ifdef __tilegx__
+#include "tilegx_disasm.h"
+#endif
+
+/*---------------------------------------------------------*/
+/*--- tilegx to IR conversion                           ---*/
+/*---------------------------------------------------------*/
+
+/* Convert one TILEGX insn to IR.  See the type DisOneInstrFn in
+   bb_to_IR.h. */
+extern DisResult disInstr_TILEGX ( IRSB* irbb,
+                                   Bool (*resteerOkFn) ( void *, Addr ),
+                                   Bool resteerCisOk,
+                                   void* callback_opaque,
+                                   const UChar* guest_code,
+                                   Long delta,
+                                   Addr guest_IP,
+                                   VexArch guest_arch,
+                                   const VexArchInfo* archinfo,
+                                   const VexAbiInfo* abiinfo,
+                                   VexEndness host_endness_IN,
+                                   Bool sigill_diag_IN );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern IRExpr *guest_tilegx_spechelper ( const HChar * function_name,
+                                         IRExpr ** args,
+                                         IRStmt ** precedingStmts,
+                                         Int n_precedingStmts );
+
+/* Describes to the optimser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern Bool guest_tilegx_state_requires_precise_mem_exns (
+  Int, Int, VexRegisterUpdates );
+
+extern VexGuestLayout tilegxGuest_layout;
+
+/*---------------------------------------------------------*/
+/*--- tilegx guest helpers                              ---*/
+/*---------------------------------------------------------*/
+
+extern ULong tilegx_dirtyhelper_gen ( ULong opc,
+                                      ULong rd0,
+                                      ULong rd1,
+                                      ULong rd2,
+                                      ULong rd3 );
+
+/*---------------------------------------------------------*/
+/*--- Condition code stuff                              ---*/
+/*---------------------------------------------------------*/
+
+/* Defines conditions which we can ask for TILEGX */
+
+typedef enum {
+  TILEGXCondEQ = 0,      /* equal                         : Z=1 */
+  TILEGXCondNE = 1,      /* not equal                     : Z=0 */
+  TILEGXCondHS = 2,      /* >=u (higher or same)          : C=1 */
+  TILEGXCondLO = 3,      /* <u  (lower)                   : C=0 */
+  TILEGXCondMI = 4,      /* minus (negative)              : N=1 */
+  TILEGXCondPL = 5,      /* plus (zero or +ve)            : N=0 */
+  TILEGXCondVS = 6,      /* overflow                      : V=1 */
+  TILEGXCondVC = 7,      /* no overflow                   : V=0 */
+  TILEGXCondHI = 8,      /* >u   (higher)                 : C=1 && Z=0 */
+  TILEGXCondLS = 9,      /* <=u  (lower or same)          : C=0 || Z=1 */
+  TILEGXCondGE = 10,     /* >=s (signed greater or equal) : N=V */
+  TILEGXCondLT = 11,     /* <s  (signed less than)        : N!=V */
+  TILEGXCondGT = 12,     /* >s  (signed greater)          : Z=0 && N=V */
+  TILEGXCondLE = 13,     /* <=s (signed less or equal)    : Z=1 || N!=V */
+  TILEGXCondAL = 14,     /* always (unconditional)        : 1 */
+  TILEGXCondNV = 15      /* never (unconditional):        : 0 */
+} TILEGXCondcode;
+
+#endif            /* __VEX_GUEST_TILEGX_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                 guest_tilegx_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_tilegx_helpers.c b/VEX/priv/guest_tilegx_helpers.c
new file mode 100644
index 0000000..cda5499
--- /dev/null
+++ b/VEX/priv/guest_tilegx_helpers.c
@@ -0,0 +1,1103 @@
+/*---------------------------------------------------------------*/
+/*--- begin                            guest_tilegx_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2010-2013 Tilera Corp.
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_tilegx.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_tilegx_defs.h"
+
+/* This file contains helper functions for tilegx guest code.  Calls to
+   these functions are generated by the back end.
+*/
+
+#define ALWAYSDEFD(field)                               \
+  { offsetof(VexGuestTILEGXState, field),               \
+      (sizeof ((VexGuestTILEGXState*)0)->field) }
+
+IRExpr *guest_tilegx_spechelper ( const HChar * function_name, IRExpr ** args,
+                                  IRStmt ** precedingStmts, Int n_precedingStmts)
+{
+  return NULL;
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestTILEGX_initialise ( VexGuestTILEGXState * vex_state )
+{
+  vex_state->guest_r0 = 0;
+  vex_state->guest_r1 = 0;
+  vex_state->guest_r2 = 0;
+  vex_state->guest_r3 = 0;
+  vex_state->guest_r4 = 0;
+  vex_state->guest_r5 = 0;
+  vex_state->guest_r6 = 0;
+  vex_state->guest_r7 = 0;
+  vex_state->guest_r8 = 0;
+  vex_state->guest_r9 = 0;
+  vex_state->guest_r10 = 0;
+  vex_state->guest_r11 = 0;
+  vex_state->guest_r12 = 0;
+  vex_state->guest_r13 = 0;
+  vex_state->guest_r14 = 0;
+  vex_state->guest_r15 = 0;
+  vex_state->guest_r16 = 0;
+  vex_state->guest_r17 = 0;
+  vex_state->guest_r18 = 0;
+  vex_state->guest_r19 = 0;
+  vex_state->guest_r20 = 0;
+  vex_state->guest_r21 = 0;
+  vex_state->guest_r22 = 0;
+  vex_state->guest_r23 = 0;
+  vex_state->guest_r24 = 0;
+  vex_state->guest_r25 = 0;
+  vex_state->guest_r26 = 0;
+  vex_state->guest_r27 = 0;
+  vex_state->guest_r28 = 0;
+  vex_state->guest_r29 = 0;
+  vex_state->guest_r30 = 0;
+  vex_state->guest_r31 = 0;
+  vex_state->guest_r32 = 0;
+  vex_state->guest_r33 = 0;
+  vex_state->guest_r34 = 0;
+  vex_state->guest_r35 = 0;
+  vex_state->guest_r36 = 0;
+  vex_state->guest_r37 = 0;
+  vex_state->guest_r38 = 0;
+  vex_state->guest_r39 = 0;
+  vex_state->guest_r40 = 0;
+  vex_state->guest_r41 = 0;
+  vex_state->guest_r42 = 0;
+  vex_state->guest_r43 = 0;
+  vex_state->guest_r44 = 0;
+  vex_state->guest_r45 = 0;
+  vex_state->guest_r46 = 0;
+  vex_state->guest_r47 = 0;
+  vex_state->guest_r48 = 0;
+  vex_state->guest_r49 = 0;
+  vex_state->guest_r50 = 0;
+  vex_state->guest_r51 = 0;
+  vex_state->guest_r52 = 0;
+  vex_state->guest_r53 = 0;
+  vex_state->guest_r54 = 0;
+  vex_state->guest_r55 = 0;
+
+  vex_state->guest_pc = 0;   /* Program counter */
+
+  vex_state->guest_EMNOTE = 0;
+  vex_state->guest_CMSTART = 0;
+
+  /* For clflush: record start and length of area to invalidate */
+  vex_state->guest_CMSTART = 0;
+  vex_state->guest_CMLEN = 0;
+
+  /* Used to record the unredirected guest address at the start of
+     a translation whose start has been redirected.  By reading
+     this pseudo-register shortly afterwards, the translation can
+     find out what the corresponding no-redirection address was.
+     Note, this is only set for wrap-style redirects, not for
+     replace-style ones. */
+  vex_state->guest_NRADDR = 0;
+}
+
+/*-----------------------------------------------------------*/
+/*--- Describing the tilegx guest state, for the benefit    ---*/
+/*--- of iropt and instrumenters.                         ---*/
+/*-----------------------------------------------------------*/
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this is generates significantly slower code).
+
+   We enforce precise exns for guest SP, PC.
+*/
+Bool guest_tilegx_state_requires_precise_mem_exns (
+  Int minoff, Int maxoff,
+  VexRegisterUpdates pxControl)
+{
+  Int sp_min = offsetof(VexGuestTILEGXState, guest_r54);
+  Int sp_max = sp_min + 8 - 1;
+  Int pc_min = offsetof(VexGuestTILEGXState, guest_pc);
+  Int pc_max = pc_min + 8 - 1;
+
+  if (maxoff < sp_min || minoff > sp_max) {
+    /* no overlap with sp */
+    if (pxControl == VexRegUpdSpAtMemAccess)
+      return False;  /* We only need to check stack pointer. */
+  } else {
+    return True;
+  }
+
+  if (maxoff < pc_min || minoff > pc_max) {
+    /* no overlap with pc */
+  } else {
+    return True;
+  }
+
+  /* We appear to need precise updates of R52 in order to get proper
+     stacktraces from non-optimised code. */
+  Int fp_min = offsetof(VexGuestTILEGXState, guest_r52);
+  Int fp_max = fp_min + 8 - 1;
+
+  if (maxoff < fp_min || minoff > fp_max) {
+    /* no overlap with fp */
+  } else {
+    return True;
+  }
+
+  return False;
+}
+
+VexGuestLayout tilegxGuest_layout = {
+  /* Total size of the guest state, in bytes. */
+  .total_sizeB = sizeof(VexGuestTILEGXState),
+  /* Describe the stack pointer. */
+  .offset_SP = offsetof(VexGuestTILEGXState, guest_r54),
+  .sizeof_SP = 8,
+  /* Describe the frame pointer. */
+  .offset_FP = offsetof(VexGuestTILEGXState, guest_r52),
+  .sizeof_FP = 8,
+  /* Describe the instruction pointer. */
+  .offset_IP = offsetof(VexGuestTILEGXState, guest_pc),
+  .sizeof_IP = 8,
+  /* Describe any sections to be regarded by Memcheck as
+     'always-defined'. */
+  .n_alwaysDefd = 8,
+  /* ? :(  */
+  .alwaysDefd = {
+    /* 0 */ ALWAYSDEFD(guest_r0),
+    /* 1 */ ALWAYSDEFD(guest_r1),
+    /* 2 */ ALWAYSDEFD(guest_EMNOTE),
+    /* 3 */ ALWAYSDEFD(guest_CMSTART),
+    /* 4 */ ALWAYSDEFD(guest_CMLEN),
+    /* 5 */ ALWAYSDEFD(guest_r52),
+    /* 6 */ ALWAYSDEFD(guest_r55),
+    /* 7 */ ALWAYSDEFD(guest_pc),
+  }
+};
+
+#ifdef __tilegx__
+ULong tilegx_dirtyhelper_gen ( ULong opc,
+                               ULong rd0, ULong rd1,
+                               ULong rd2, ULong rd3)
+{
+  switch (opc)
+  {
+  case 0:
+    {
+      /* break point */
+      switch (rd0) {
+      case 0x286a44ae90048fffULL:
+        asm (" bpt ");
+        break;
+      default:
+        vex_printf("unhandled \"bpt\": cins=%016llx\n", rd0);
+
+        vassert(0);
+        return 0;
+      }
+    }
+    break;
+  case 28:
+    {
+      return __insn_addxsc(rd1, rd2);
+    }
+    break;
+
+  case 150:
+    {
+      __insn_mf();
+      return 0;
+    }
+    break;
+
+  case 152: /* mm rd, ra, imm0, imm1 */
+    {
+      ULong mask;
+
+      if( rd2 <= rd3)
+        mask = (-1ULL << rd2) ^ ((-1ULL << rd3) << 1);
+      else
+        mask = (-1ULL << rd2) | (-1ULL >> (63 - rd3));
+
+      return (rd0 & mask) | (rd1 & (-1ULL ^ mask));
+    }
+    break;
+  case 154: /* mtspr imm, ra */
+    {
+      switch(rd0)
+      {
+      case 0x2785:
+        __insn_mtspr(0x2785, rd1);
+        break;
+      case 0x2780:
+        __insn_mtspr(0x2780, rd1);
+        break;
+      case 0x2708:
+        __insn_mtspr(0x2708, rd1);
+        break;
+      case 0x2580:
+        __insn_mtspr(0x2580, rd1);
+        break;
+      case 0x2581:
+        __insn_mtspr(0x2581, rd1);
+        break;
+      case 0x2709:  // PASS
+        __insn_mtspr(0x2709, rd1);
+        break;
+      case 0x2707:  // FAIL
+        __insn_mtspr(0x2707, rd1);
+        break;
+      case 0x2705:  // DONE
+        __insn_mtspr(0x2705, rd1);
+        break;
+
+      case 0x2870: //
+
+      default:
+        vex_printf("opc=%d rd0=%llx rd1=%llx\n",
+                   (int)opc, rd0, rd1);
+        vassert(0);
+      }
+    }
+    break;
+
+  case 151: /* mfspr rd, imm */
+    {
+      switch(rd1)
+      {
+      case 0x2785:   // SIM_CTRL
+        return __insn_mfspr(0x2785);
+        break;
+
+      case 0x2708:   // ICS
+        return __insn_mfspr(0x2708);
+        break;
+
+      case 0x2780:  // CMPEXCH_VALUE
+        return __insn_mfspr(0x2780);
+        break;
+
+      case 0x2781:  // CYCLE
+        return __insn_mfspr(0x2781);
+        break;
+
+      case 0x2709:  // PASS
+        return __insn_mfspr(0x2709);
+        break;
+
+      case 0x2707:  // FAIL
+        return __insn_mfspr(0x2707);
+        break;
+
+      case 0x2705:  // DONE
+        return __insn_mfspr(0x2705);
+        break;
+
+      case 0x2580:  // EX_CONTEXT_0
+        return __insn_mfspr(0x2580);
+        break;
+
+      case 0x2581:  // EX_CONTEXT_1
+        return __insn_mfspr(0x2581);
+        break;
+
+      default:
+        vex_printf("opc=%d rd0=%llx rd1=%llx\n",
+                   (int)opc, rd0, rd1);
+        vassert(0);
+      }
+    }
+    break;
+  case 183:
+    {
+      return __insn_pcnt(rd1);
+    }
+    break;
+  case 184:
+    {
+      return __insn_revbits(rd1);
+    }
+    break;
+  case 185: /* revbytes rd, ra */
+    {
+      return __insn_revbytes(rd1);
+    }
+    break;
+
+  case 102:
+    return __insn_fsingle_add1(rd1, rd2);
+    break;
+
+  case 103:
+    return __insn_fsingle_addsub2(rd0, rd1, rd2);
+    break;
+
+  case 104:
+    return __insn_fsingle_mul1(rd1, rd2);
+    break;
+
+  case 105:
+    return __insn_fsingle_mul2(rd1, rd2);
+    break;
+
+  case 106:
+    return __insn_fsingle_pack1(rd1);
+    break;
+
+  case 107:
+    return __insn_fsingle_pack2(rd1, rd2);
+    break;
+
+  case 108:
+    return __insn_fsingle_sub1(rd1, rd2);
+    break;
+
+  case 21:
+    switch (rd0) {
+    case 0x286a44ae90048fffULL:
+      asm ("{ moveli zero, 72 ; raise }");
+      break;
+    default:
+      vex_printf("unhandled \"raise\": cins=%016llx\n", rd0);
+      __insn_ill();
+      return 0;
+    }
+    break;
+
+  case 64:
+    {
+      return __insn_cmul(rd1, rd2);
+    }
+    break;
+  case 65:
+    {
+      return __insn_cmula(rd0, rd1, rd2);
+    }
+    break;
+  case 66:
+    {
+      return __insn_cmulaf(rd0, rd1, rd2);
+    }
+    break;
+  case 67:
+    {
+      return __insn_cmulf(rd1, rd2);
+    }
+    break;
+  case 68:
+    {
+      return __insn_cmulfr(rd1, rd2);
+    }
+    break;
+  case 69:
+    {
+      return __insn_cmulh(rd1, rd2);
+    }
+    break;
+  case 70:
+    {
+      return __insn_cmulhr(rd1, rd2);
+    }
+    break;
+  case 71:
+    {
+      return __insn_crc32_32(rd1, rd2);
+    }
+    break;
+  case 72:
+    {
+      return __insn_crc32_8(rd1, rd2);
+    }
+    break;
+  case 75:
+    {
+      return __insn_dblalign2(rd1, rd2);
+    }
+    break;
+  case 76:
+    {
+      return __insn_dblalign4(rd1, rd2);
+    }
+    break;
+  case 77:
+    {
+      return __insn_dblalign6(rd1, rd2);
+    }
+    break;
+  case 78:
+    {
+      __insn_drain();
+      return 0;
+    }
+    break;
+  case 79:
+    {
+      __insn_dtlbpr(rd0);
+      return 0;
+    }
+    break;
+  case 82:
+    {
+      return __insn_fdouble_add_flags(rd1, rd2);
+    }
+    break;
+  case 83:
+    {
+      return __insn_fdouble_addsub(rd0, rd1, rd2);
+    }
+    break;
+  case 84:
+    {
+      return __insn_fdouble_mul_flags(rd1, rd2);
+    }
+    break;
+  case 85:
+    {
+      return __insn_fdouble_pack1(rd1, rd2);
+    }
+    break;
+  case 86:
+    {
+      return __insn_fdouble_pack2(rd0, rd1, rd2);
+    }
+    break;
+  case 87:
+    {
+      return __insn_fdouble_sub_flags(rd1, rd2);
+    }
+    break;
+  case 88:
+    {
+      return __insn_fdouble_unpack_max(rd1, rd2);
+    }
+    break;
+  case 89:
+    {
+      return __insn_fdouble_unpack_min(rd1, rd2);
+    }
+    break;
+
+  case 98:
+    {
+      __insn_finv(rd0);
+      return 0;
+    }
+    break;
+  case 99:
+    {
+      __insn_flush(rd0);
+      return 0;
+    }
+    break;
+  case 100:
+    {
+      __insn_flushwb();
+      return 0;
+    }
+    break;
+
+  case 109:
+    {
+      __insn_icoh((ULong *)rd0);
+      return 0;
+    }
+    break;
+  case 110:
+    {
+      __insn_ill();
+    }
+    break;
+  case 111:
+    {
+      __insn_inv((ULong *)rd0);
+      return 0;
+    }
+    break;
+
+  case 169:
+    {
+      return __insn_mula_hu_hu(rd0, rd1, rd2);
+    }
+    break;
+  case 170:
+    {
+      return __insn_mula_hu_ls(rd0, rd1, rd2);
+    }
+    break;
+  case 205:
+    {
+      return __insn_shufflebytes(rd0, rd1, rd2);
+    }
+    break;
+  case 224:
+    {
+      return __insn_subxsc(rd1, rd2);
+    }
+    break;
+  case 229:
+    {
+      return __insn_tblidxb0(rd0, rd1);
+    }
+    break;
+  case 230:
+    {
+      return __insn_tblidxb1(rd0, rd1);
+    }
+    break;
+  case 231:
+    {
+      return __insn_tblidxb2(rd0, rd1);
+    }
+    break;
+  case 232:
+    {
+      return __insn_tblidxb3(rd0, rd1);
+    }
+    break;
+  case 233:
+    {
+      return __insn_v1add(rd1, rd2);
+    }
+    break;
+  case 234:
+    {
+      return __insn_v1add(rd1, rd2);
+    }
+    break;
+  case 235:
+    {
+      return __insn_v1adduc(rd1, rd2);
+    }
+    break;
+  case 236:
+    {
+      return __insn_v1adiffu(rd1, rd2);
+    }
+    break;
+  case 237:
+    {
+      return __insn_v1avgu(rd1, rd2);
+    }
+    break;
+
+  case 238:
+    {
+      return __insn_v1cmpeq(rd1, rd2);
+    }
+    break;
+  case 239:
+    {
+      return __insn_v1cmpeq(rd1, rd2);
+    }
+    break;
+  case 240:
+    {
+      return __insn_v1cmples(rd1, rd2);
+    }
+    break;
+  case 241:
+    {
+      return __insn_v1cmpleu(rd1, rd2);
+    }
+    break;
+  case 242:
+    {
+      return __insn_v1cmplts(rd1, rd2);
+    }
+    break;
+  case 243:
+    {
+      return __insn_v1cmplts(rd1, rd2);
+    }
+    break;
+  case 244:
+    {
+      return __insn_v1cmpltu(rd1, rd2);
+    }
+    break;
+  case 245:
+    {
+      return __insn_v1cmpltu(rd1, rd2);
+    }
+    break;
+  case 246:
+    {
+      return __insn_v1cmpne(rd1, rd2);
+    }
+    break;
+  case 247:
+    {
+      return __insn_v1ddotpu(rd1, rd2);
+    }
+    break;
+  case 248:
+    {
+      return __insn_v1ddotpua(rd0, rd1, rd2);
+    }
+    break;
+  case 249:
+    {
+      return __insn_v1ddotpus(rd1, rd2);
+    }
+    break;
+  case 250:
+    {
+      return __insn_v1ddotpusa(rd0, rd1, rd2);
+    }
+    break;
+  case 251:
+    {
+      return __insn_v1dotp(rd1, rd2);
+    }
+    break;
+  case 252:
+    {
+      return __insn_v1dotpa(rd0, rd1, rd2);
+    }
+    break;
+  case 253:
+    {
+      return __insn_v1dotpu(rd1, rd2);
+    }
+    break;
+  case 254:
+    {
+      return __insn_v1dotpua(rd0, rd1, rd2);
+    }
+    break;
+  case 255:
+    {
+      return __insn_v1dotpus(rd1, rd2);
+    }
+    break;
+  case 256:
+    {
+      return __insn_v1dotpusa(rd0, rd1, rd2);
+    }
+    break;
+  case 257:
+    {
+      return __insn_v1int_h(rd1, rd2);
+    }
+    break;
+  case 258:
+    {
+      return __insn_v1int_l(rd1, rd2);
+    }
+    break;
+  case 259:
+    {
+      return __insn_v1maxu(rd1, rd2);
+    }
+    break;
+  case 260:
+    {
+      return __insn_v1maxu(rd1, rd2);
+    }
+    break;
+  case 261:
+    {
+      return __insn_v1minu(rd1, rd2);
+    }
+    break;
+  case 262:
+    {
+      return __insn_v1minu(rd1, rd2);
+    }
+    break;
+  case 263:
+    {
+      return __insn_v1mnz(rd1, rd2);
+    }
+    break;
+  case 264:
+    {
+      return __insn_v1multu(rd1, rd2);
+    }
+    break;
+  case 265:
+    {
+      return __insn_v1mulu(rd1, rd2);
+    }
+    break;
+  case 266:
+    {
+      return __insn_v1mulus(rd1, rd2);
+    }
+    break;
+  case 267:
+    {
+      return __insn_v1mz(rd1, rd2);
+    }
+    break;
+  case 268:
+    {
+      return __insn_v1sadau(rd0, rd1, rd2);
+    }
+    break;
+  case 269:
+    {
+      return __insn_v1sadu(rd1, rd2);
+    }
+    break;
+  case 270:
+    {
+      return __insn_v1shl(rd1, rd2);
+    }
+    break;
+  case 271:
+    {
+      return __insn_v1shli(rd1, rd2);
+    }
+    break;
+  case 272:
+    {
+      return __insn_v1shrs(rd1, rd2);
+    }
+    break;
+  case 273:
+    {
+      return __insn_v1shrsi(rd1, rd2);
+    }
+    break;
+  case 274:
+    {
+      return __insn_v1shru(rd1, rd2);
+    }
+    break;
+  case 275:
+    {
+      return __insn_v1shrui(rd1, rd2);
+    }
+    break;
+  case 276:
+    {
+      return __insn_v1sub(rd1, rd2);
+    }
+    break;
+  case 277:
+    {
+      return __insn_v1subuc(rd1, rd2);
+    }
+    break;
+  case 278:
+    {
+      return __insn_v2add(rd1, rd2);
+    }
+    break;
+  case 279:
+    {
+      return __insn_v2add(rd1, rd2);
+    }
+    break;
+  case 280:
+    {
+      return __insn_v2addsc(rd1, rd2);
+    }
+    break;
+  case 281:
+    {
+      return __insn_v2adiffs(rd1, rd2);
+    }
+    break;
+  case 282:
+    {
+      return __insn_v2avgs(rd1, rd2);
+    }
+    break;
+  case 283:
+    {
+      return __insn_v2cmpeq(rd1, rd2);
+    }
+    break;
+  case 284:
+    {
+      return __insn_v2cmpeq(rd1, rd2);
+    }
+    break;
+  case 285:
+    {
+      return __insn_v2cmples(rd1, rd2);
+    }
+    break;
+  case 286:
+    {
+      return __insn_v2cmpleu(rd1, rd2);
+    }
+    break;
+  case 287:
+    {
+      return __insn_v2cmplts(rd1, rd2);
+    }
+    break;
+  case 288:
+    {
+      return __insn_v2cmplts(rd1, rd2);
+    }
+    break;
+  case 289:
+    {
+      return __insn_v2cmpltu(rd1, rd2);
+    }
+    break;
+  case 290:
+    {
+      return __insn_v2cmpltu(rd1, rd2);
+    }
+    break;
+  case 291:
+    {
+      return __insn_v2cmpne(rd1, rd2);
+    }
+    break;
+  case 292:
+    {
+      return __insn_v2dotp(rd1, rd2);
+    }
+    break;
+  case 293:
+    {
+      return __insn_v2dotpa(rd0, rd1, rd2);
+    }
+    break;
+  case 294:
+    {
+      return __insn_v2int_h(rd1, rd2);
+    }
+    break;
+  case 295:
+    {
+      return __insn_v2int_l(rd1, rd2);
+    }
+    break;
+  case 296:
+    {
+      return __insn_v2maxs(rd1, rd2);
+    }
+    break;
+  case 297:
+    {
+      return __insn_v2maxs(rd1, rd2);
+    }
+    break;
+  case 298:
+    {
+      return __insn_v2mins(rd1, rd2);
+    }
+    break;
+  case 299:
+    {
+      return __insn_v2mins(rd1, rd2);
+    }
+    break;
+  case 300:
+    {
+      return __insn_v2mnz(rd1, rd2);
+    }
+    break;
+  case 301:
+    {
+      return __insn_v2mulfsc(rd1, rd2);
+    }
+    break;
+  case 302:
+    {
+      return __insn_v2muls(rd1, rd2);
+    }
+    break;
+  case 303:
+    {
+      return __insn_v2mults(rd1, rd2);
+    }
+    break;
+  case 304:
+    {
+      return __insn_v2mz(rd1, rd2);
+    }
+    break;
+  case 305:
+    {
+      return __insn_v2packh(rd1, rd2);
+    }
+    break;
+  case 306:
+    {
+      return __insn_v2packl(rd1, rd2);
+    }
+    break;
+  case 307:
+    {
+      return __insn_v2packuc(rd1, rd2);
+    }
+    break;
+  case 308:
+    {
+      return __insn_v2sadas(rd0, rd1, rd2);
+    }
+    break;
+  case 309:
+    {
+      return __insn_v2sadau(rd0, rd1, rd2);
+    }
+    break;
+  case 310:
+    {
+      return __insn_v2sads(rd1, rd2);
+    }
+    break;
+  case 311:
+    {
+      return __insn_v2sadu(rd1, rd2);
+    }
+    break;
+  case 312:
+    {
+      return __insn_v2shl(rd1, rd2);
+    }
+    break;
+  case 313:
+    {
+      return __insn_v2shli(rd1, rd2);
+    }
+    break;
+  case 314:
+    {
+      return __insn_v2shlsc(rd1, rd2);
+    }
+    break;
+  case 315:
+    {
+      return __insn_v2shrs(rd1, rd2);
+    }
+    break;
+  case 316:
+    {
+      return __insn_v2shrsi(rd1, rd2);
+    }
+    break;
+  case 317:
+    {
+      return __insn_v2shru(rd1, rd2);
+    }
+    break;
+  case 318:
+    {
+      return __insn_v2shrui(rd1, rd2);
+    }
+    break;
+  case 319:
+    {
+      return __insn_v2sub(rd1, rd2);
+    }
+    break;
+  case 320:
+    {
+      return __insn_v2subsc(rd1, rd2);
+    }
+    break;
+  case 321:
+    {
+      return __insn_v4add(rd1, rd2);
+    }
+    break;
+  case 322:
+    {
+      return __insn_v4addsc(rd1, rd2);
+    }
+    break;
+  case 323:
+    {
+      return __insn_v4int_h(rd1, rd2);
+    }
+    break;
+  case 324:
+    {
+      return __insn_v4int_l(rd1, rd2);
+    }
+    break;
+  case 325:
+    {
+      return __insn_v4packsc(rd1, rd2);
+    }
+    break;
+  case 326:
+    {
+      return __insn_v4shl(rd1, rd2);
+    }
+    break;
+  case 327:
+    {
+      return __insn_v4shlsc(rd1, rd2);
+    }
+    break;
+  case 328:
+    {
+      return __insn_v4shrs(rd1, rd2);
+    }
+    break;
+  case 329:
+    {
+      return __insn_v4shru(rd1, rd2);
+    }
+    break;
+  case 330:
+    {
+      return __insn_v4sub(rd1, rd2);
+    }
+    break;
+  case 331:
+    {
+      return __insn_v4subsc(rd1, rd2);
+    }
+    break;
+
+  default:
+    vex_printf("opc=%d rd0=%llx rd1=%llx\n",
+               (int)opc, rd0, rd1);
+    vassert(0);
+  }
+}
+#else
+ULong tilegx_dirtyhelper_gen ( ULong opc,
+                               ULong rd0, ULong rd1,
+                               ULong rd2, ULong rd3 )
+{
+  vex_printf("NOT a TILEGX platform");
+  return 0;
+}
+#endif /* __tilegx__ */
+
+/*---------------------------------------------------------------*/
+/*--- end                              guest_tilegx_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_tilegx_toIR.c b/VEX/priv/guest_tilegx_toIR.c
new file mode 100644
index 0000000..4ce6f7b
--- /dev/null
+++ b/VEX/priv/guest_tilegx_toIR.c
@@ -0,0 +1,2513 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                    guest_tilegx_toIR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2010-2013  Tilera Corp.
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+/* Translates TILEGX code to IR. */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_guest_tilegx.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_tilegx_defs.h"
+#include "tilegx_disasm.h"
+
+/*------------------------------------------------------------*/
+/*--- Globals                                              ---*/
+/*------------------------------------------------------------*/
+
+/* These are set at the start of the translation of a instruction, so
+   that we don't have to pass them around endlessly.  CONST means does
+   not change during translation of the instruction.
+*/
+
+/* CONST: is the host bigendian?  This has to do with float vs double
+   register accesses on VFP, but it's complex and not properly thought
+   out. */
+static VexEndness host_endness;
+
+/* Pointer to the guest code area. */
+static UChar *guest_code;
+
+/* The guest address corresponding to guest_code[0]. */
+static Addr64 guest_PC_bbstart;
+
+/* CONST: The guest address for the instruction currently being
+   translated. */
+static Addr64 guest_PC_curr_instr;
+
+/* MOD: The IRSB* into which we're generating code. */
+static IRSB *irsb;
+
+/*------------------------------------------------------------*/
+/*--- Debugging output                                     ---*/
+/*------------------------------------------------------------*/
+
+#define DIP(format, args...)                    \
+  if (vex_traceflags & VEX_TRACE_FE)            \
+    vex_printf(format, ## args)
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for deconstructing the        ---*/
+/*--- tilegx insn stream.                                  ---*/
+/*------------------------------------------------------------*/
+
+static Int integerGuestRegOffset ( UInt iregNo )
+{
+  return 8 * (iregNo);
+}
+
+/*------------------------------------------------------------*/
+/*---                           Field helpers              ---*/
+/*------------------------------------------------------------*/
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for creating IR fragments.    ---*/
+/*------------------------------------------------------------*/
+
+static IRExpr *mkU8 ( UInt i )
+{
+  return IRExpr_Const(IRConst_U8((UChar) i));
+}
+
+/* Create an expression node for a 32-bit integer constant */
+static IRExpr *mkU32 ( UInt i )
+{
+  return IRExpr_Const(IRConst_U32(i));
+}
+
+/* Create an expression node for a 64-bit integer constant */
+static IRExpr *mkU64 ( ULong i )
+{
+  return IRExpr_Const(IRConst_U64(i));
+}
+
+static IRExpr *mkexpr ( IRTemp tmp )
+{
+  return IRExpr_RdTmp(tmp);
+}
+
+static IRExpr *unop ( IROp op, IRExpr * a )
+{
+  return IRExpr_Unop(op, a);
+}
+
+static IRExpr *binop ( IROp op, IRExpr * a1, IRExpr * a2 )
+{
+  return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr *load ( IRType ty, IRExpr * addr )
+{
+  IRExpr *load1 = NULL;
+
+  load1 = IRExpr_Load(Iend_LE, ty, addr);
+  return load1;
+}
+
+/* Add a statement to the list held by "irsb". */
+static void stmt ( IRStmt * st )
+{
+  addStmtToIRSB(irsb, st);
+}
+
+#define OFFB_PC     offsetof(VexGuestTILEGXState, guest_pc)
+
+static void putPC ( IRExpr * e )
+{
+  stmt(IRStmt_Put(OFFB_PC, e));
+}
+
+static void assign ( IRTemp dst, IRExpr * e )
+{
+  stmt(IRStmt_WrTmp(dst, e));
+}
+
+static void store ( IRExpr * addr, IRExpr * data )
+{
+  stmt(IRStmt_Store(Iend_LE, addr, data));
+}
+
+/* Generate a new temporary of the given type. */
+static IRTemp newTemp ( IRType ty )
+{
+  vassert(isPlausibleIRType(ty));
+  return newIRTemp(irsb->tyenv, ty);
+}
+
+static ULong extend_s_16to64 ( UInt x )
+{
+  return (ULong) ((((Long) x) << 48) >> 48);
+}
+
+static ULong extend_s_8to64 ( UInt x )
+{
+  return (ULong) ((((Long) x) << 56) >> 56);
+}
+
+static IRExpr *getIReg ( UInt iregNo )
+{
+  IRType ty = Ity_I64;
+  if(!(iregNo < 56 || iregNo == 63 ||
+       (iregNo >= 70 && iregNo <= 73))) {
+    vex_printf("iregNo=%d\n", iregNo);
+    vassert(0);
+  }
+  return IRExpr_Get(integerGuestRegOffset(iregNo), ty);
+}
+
+static void putIReg ( UInt archreg, IRExpr * e )
+{
+  IRType ty = Ity_I64;
+  if(!(archreg < 56 || archreg == 63 || archreg == 70 ||
+       archreg == 72 || archreg == 73)) {
+    vex_printf("archreg=%d\n", archreg);
+    vassert(0);
+  }
+  vassert(typeOfIRExpr(irsb->tyenv, e) == ty);
+  if (archreg != 63)
+    stmt(IRStmt_Put(integerGuestRegOffset(archreg), e));
+}
+
+/* Narrow 8/16/32 bit int expr to 8/16/32.  Clearly only some
+   of these combinations make sense. */
+static IRExpr *narrowTo ( IRType dst_ty, IRExpr * e )
+{
+  IRType src_ty = typeOfIRExpr(irsb->tyenv, e);
+  if (src_ty == dst_ty)
+    return e;
+  if (src_ty == Ity_I32 && dst_ty == Ity_I16)
+    return unop(Iop_32to16, e);
+  if (src_ty == Ity_I32 && dst_ty == Ity_I8)
+    return unop(Iop_32to8, e);
+
+  if (src_ty == Ity_I64 && dst_ty == Ity_I8) {
+    return unop(Iop_64to8, e);
+  }
+  if (src_ty == Ity_I64 && dst_ty == Ity_I16) {
+    return unop(Iop_64to16, e);
+  }
+  if (src_ty == Ity_I64 && dst_ty == Ity_I32) {
+    return unop(Iop_64to32, e);
+  }
+
+  if (vex_traceflags & VEX_TRACE_FE) {
+    vex_printf("\nsrc, dst tys are: ");
+    ppIRType(src_ty);
+    vex_printf(", ");
+    ppIRType(dst_ty);
+    vex_printf("\n");
+  }
+  vpanic("narrowTo(tilegx)");
+  return e;
+}
+
+#define signExtend(_e, _n)                                              \
+  ((_n == 32) ?                                                         \
+   unop(Iop_32Sto64, _e) :                                              \
+   ((_n == 16) ?                                                        \
+    unop(Iop_16Sto64, _e) :						\
+    (binop(Iop_Sar64, binop(Iop_Shl64, _e, mkU8(63 - (_n))), mkU8(63 - (_n))))))
+
+static IRStmt* dis_branch ( IRExpr* guard, ULong imm )
+{
+  IRTemp t0;
+
+  t0 = newTemp(Ity_I1);
+  assign(t0, guard);
+  return IRStmt_Exit(mkexpr(t0), Ijk_Boring,
+                     IRConst_U64(imm), OFFB_PC);
+}
+
+#define  MARK_REG_WB(_rd, _td)                  \
+  do {                                          \
+    vassert(rd_wb_index < 6);                   \
+    rd_wb_temp[rd_wb_index] = _td;              \
+    rd_wb_reg[rd_wb_index] = _rd;               \
+    rd_wb_index++;                              \
+  } while(0)
+
+/*------------------------------------------------------------*/
+/*--- Disassemble a single instruction                     ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction bundle into IR.  The bundle is
+   located in host memory at guest_instr, and has guest IP of
+   guest_PC_curr_instr, which will have been set before the call
+   here. */
+static DisResult disInstr_TILEGX_WRK ( Bool(*resteerOkFn) (void *, Addr),
+                                       Bool resteerCisOk,
+                                       void *callback_opaque,
+                                       Long delta64,
+                                       const VexArchInfo * archinfo,
+                                       const VexAbiInfo * abiinfo,
+                                       Bool sigill_diag )
+{
+  struct tilegx_decoded_instruction
+    decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE];
+  ULong  cins, opcode = -1, rd, ra, rb, imm = 0;
+  ULong  opd[4];
+  ULong  opd_src_map, opd_dst_map, opd_imm_map;
+  Int    use_dirty_helper;
+  IRTemp t0, t1, t2, t3, t4;
+  IRTemp tb[4];
+  IRTemp rd_wb_temp[6];
+  ULong  rd_wb_reg[6];
+  /* Tilegx is a VLIW processor, we have to commit register write after read.*/
+  Int    rd_wb_index;
+  Int    n = 0, nr_insn;
+  DisResult dres;
+
+  /* The running delta */
+  Long delta = delta64;
+
+  /* Holds pc at the start of the insn, so that we can print
+     consistent error messages for unimplemented insns. */
+  //Long delta_start = delta;
+
+  UChar *code = (UChar *) (guest_code + delta);
+
+  IRStmt *bstmt = NULL;  /* Branch statement. */
+  IRExpr *next = NULL; /* Next bundle expr. */
+  ULong  jumpkind =  Ijk_Boring;
+  ULong  steering_pc;
+
+  /* Set result defaults. */
+  dres.whatNext = Dis_Continue;
+  dres.len = 0;
+  dres.continueAt = 0;
+  dres.jk_StopHere = Ijk_INVALID;
+
+  /* Verify the code addr is 8-byte aligned. */
+  vassert((((Addr)code) & 7) == 0);
+
+  /* Get the instruction bundle. */
+  cins = *((ULong *)(Addr) code);
+
+  /* "Special" instructions. */
+  /* Spot the 16-byte preamble:   ****tilegx****
+     0:02b3c7ff91234fff { moveli zero, 4660 ; moveli zero, 22136 }
+     8:0091a7ff95678fff { moveli zero, 22136 ; moveli zero, 4660 }
+  */
+#define CL_W0 0x02b3c7ff91234fffULL
+#define CL_W1 0x0091a7ff95678fffULL
+
+  if (*((ULong*)(Addr)(code)) == CL_W0 &&
+      *((ULong*)(Addr)(code + 8)) == CL_W1) {
+    /* Got a "Special" instruction preamble.  Which one is it? */
+    if (*((ULong*)(Addr)(code + 16)) ==
+        0x283a69a6d1483000ULL /* or r13, r13, r13 */ ) {
+      /* r0 = client_request ( r12 ) */
+      DIP("r0 = client_request ( r12 )\n");
+
+      putPC(mkU64(guest_PC_curr_instr + 24));
+
+      dres.jk_StopHere = Ijk_ClientReq;
+      dres.whatNext = Dis_StopHere;
+      dres.len = 24;
+      goto decode_success;
+
+    } else if (*((ULong*)(Addr)(code + 16)) ==
+               0x283a71c751483000ULL /* or r14, r14, r14 */ ) {
+      /* r11 = guest_NRADDR */
+      DIP("r11 = guest_NRADDR\n");
+      dres.len = 24;
+      putIReg(11, IRExpr_Get(offsetof(VexGuestTILEGXState, guest_NRADDR),
+                             Ity_I64));
+      putPC(mkU64(guest_PC_curr_instr + 8));
+      goto decode_success;
+
+    } else if (*((ULong*)(Addr)(code + 16)) ==
+               0x283a79e7d1483000ULL  /* or r15, r15, r15 */ ) {
+      /*  branch-and-link-to-noredir r12 */
+      DIP("branch-and-link-to-noredir r12\n");
+      dres.len = 24;
+      putIReg(55, mkU64(guest_PC_curr_instr + 24));
+
+      putPC(getIReg(12));
+
+      dres.jk_StopHere = Ijk_NoRedir;
+      dres.whatNext = Dis_StopHere;
+      goto decode_success;
+
+    }  else if (*((ULong*)(Addr)(code + 16)) ==
+                0x283a5965d1483000ULL  /* or r11, r11, r11 */ ) {
+      /*  vex-inject-ir */
+      DIP("vex-inject-ir\n");
+      dres.len = 24;
+
+      vex_inject_ir(irsb, Iend_LE);
+
+      stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_CMSTART),
+                      mkU64(guest_PC_curr_instr)));
+      stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_CMLEN),
+                      mkU64(24)));
+
+      /* 2 + 1 = 3 bundles. 24 bytes. */
+      putPC(mkU64(guest_PC_curr_instr + 24));
+
+      dres.jk_StopHere = Ijk_InvalICache;
+      dres.whatNext = Dis_StopHere;
+      goto decode_success;
+    }
+
+    /* We don't expect this. */
+    vex_printf("%s: unexpect special bundles at %lx\n",
+               __func__, (Addr)guest_PC_curr_instr);
+    delta += 16;
+    goto decode_failure;
+    /*NOTREACHED*/
+  }
+
+  /* To decode the given instruction bundle. */
+  nr_insn = parse_insn_tilegx((tilegx_bundle_bits)cins,
+                              (ULong)(Addr)code,
+                              decoded);
+
+  if (vex_traceflags & VEX_TRACE_FE)
+    decode_and_display(&cins, 1, (ULong)(Addr)code);
+
+  /* Init. rb_wb_index */
+  rd_wb_index = 0;
+
+  steering_pc = -1ULL;
+
+  for (n = 0; n < nr_insn; n++) {
+    opcode = decoded[n].opcode->mnemonic;
+    Int opi;
+
+    rd = ra = rb = -1;
+    opd[0] = opd[1] = opd[2] = opd[3] = -1;
+    opd_dst_map = 0;
+    opd_src_map = 0;
+    opd_imm_map = 0;
+
+    for (opi = 0; opi < decoded[n].opcode->num_operands; opi++) {
+      const struct tilegx_operand *op = decoded[n].operands[opi];
+      opd[opi] = decoded[n].operand_values[opi];
+
+      /* Set the operands. rd, ra, rb and imm. */
+      if (opi < 3) {
+        if (op->is_dest_reg) {
+          if (rd == -1)
+            rd =  decoded[n].operand_values[opi];
+          else if (ra == -1)
+            ra =  decoded[n].operand_values[opi];
+        } else if (op->is_src_reg) {
+          if (ra == -1) {
+            ra = decoded[n].operand_values[opi];
+          } else if(rb == -1) {
+            rb = decoded[n].operand_values[opi];
+          } else {
+            vassert(0);
+          }
+        } else {
+          imm = decoded[n].operand_values[opi];
+        }
+      }
+
+      /* Build bit maps of used dest, source registers
+         and immediate. */
+      if (op->is_dest_reg) {
+        opd_dst_map |= 1ULL << opi;
+        if(op->is_src_reg)
+          opd_src_map |= 1ULL << opi;
+      } else if(op->is_src_reg) {
+        opd_src_map |= 1ULL << opi;
+      } else {
+        opd_imm_map |= 1ULL << opi;
+      }
+    }
+
+    use_dirty_helper = 0;
+
+    switch (opcode) {
+    case 0:  /* "bpt" */  /* "raise" */
+      /* "bpt" pseudo instruction is an illegal instruction */
+      opd_imm_map |= (1 << 0);
+      opd[0] = cins;
+      use_dirty_helper = 1;
+      break;
+    case 1:  /* "info" */   /* Ignore this instruction. */
+      break;
+    case 2:  /* "infol" */   /* Ignore this instruction. */
+      break;
+    case 3:  /* "ld4s_tls" */   /* Ignore this instruction. */
+      break;
+    case 4:  /* "ld_tls" */    /* Ignore this instruction. */
+      break;
+    case 5:  /* "move" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, getIReg(ra));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 6:  /* "movei" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, mkU64(extend_s_8to64(imm)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 7:  /* "moveli" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, mkU64(extend_s_16to64(imm)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 8:  /* "prefetch" */   /* Ignore. */
+      break;
+    case 9:  /* "prefetch_add_l1" */   /* Ignore. */
+      break;
+    case 10: /* "prefetch_add_l1_fault" */   /* Ignore. */
+      break;
+    case 11: /* "prefetch_add_l2" */   /* Ignore. */
+      break;
+    case 12: /* "prefetch_add_l2_fault" */   /* Ignore. */
+      break;
+    case 13: /* "prefetch_add_l3" */   /* Ignore. */
+      break;
+    case 14: /* "prefetch_add_l3_fault" */   /* Ignore. */
+      break;
+    case 15: /* "prefetch_l1" */  /* Ignore. */
+      break;
+    case 16: /* "prefetch_l1_fault" */   /* Ignore. */
+      break;
+    case 17: /* "prefetch_l2" */   /* Ignore. */
+      break;
+    case 18: /* "prefetch_l2_fault" */   /* Ignore. */
+      break;
+    case 19: /* "prefetch_l3" */   /* Ignore. */
+      break;
+    case 20: /* "prefetch_l3_fault" */   /* Ignore. */
+      break;
+    case 21: /* "raise" */
+      /* "raise" pseudo instruction is an illegal instruction plusing
+         a "moveli zero, <sig>", so we need save whole bundle in the
+         opd[0], which will be used in the dirty helper. */
+      opd_imm_map |= (1 << 0);
+      opd[0] = cins;
+      use_dirty_helper = 1;
+      break;
+    case 22: /* "add" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64, getIReg(ra), getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 23: /* "addi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64, getIReg(ra),
+                       mkU64(extend_s_8to64(imm))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 24: /* "addli" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64, getIReg(ra),
+                       mkU64(extend_s_16to64(imm))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 25: /* "addx" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, signExtend(binop(Iop_Add32,
+                                  narrowTo(Ity_I32, getIReg(ra)),
+                                  narrowTo(Ity_I32, getIReg(rb))),
+                            32));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 26: /* "addxi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, signExtend(binop(Iop_Add32,
+                                  narrowTo(Ity_I32, getIReg(ra)),
+                                  mkU32(imm)), 32));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 27: /* "addxli" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, signExtend(binop(Iop_Add32,
+                                  narrowTo(Ity_I32, getIReg(ra)),
+                                  mkU32(imm)), 32));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 28: /* "addxsc" */
+      use_dirty_helper = 1;
+      break;
+    case 29: /* "and" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_And64, getIReg(ra), getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 30: /* "andi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_And64, getIReg(ra),
+                       mkU64(extend_s_8to64(imm))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 31: /* "beqz" */
+      /* Fall-through */
+    case 32:
+      /* "beqzt" */
+      bstmt = dis_branch(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)),
+                         imm);
+      break;
+    case 33: /* "bfexts" */
+      {
+        ULong imm0 = decoded[n].operand_values[3];
+        ULong mask = ((-1ULL) ^ ((-1ULL << ((imm0 - imm) & 63)) << 1));
+        t0 = newTemp(Ity_I64);
+        t2 = newTemp(Ity_I64);
+        assign(t0, binop(Iop_Xor64,
+                         binop(Iop_Sub64,
+                               binop(Iop_And64,
+                                     binop(Iop_Shr64,
+                                           getIReg(ra),
+                                           mkU8(imm0)),
+                                     mkU64(1)),
+                               mkU64(1)),
+                         mkU64(-1ULL)));
+        assign(t2,
+               binop(Iop_Or64,
+                     binop(Iop_And64,
+                           binop(Iop_Or64,
+                                 binop(Iop_Shr64,
+                                       getIReg(ra),
+                                       mkU8(imm)),
+                                 binop(Iop_Shl64,
+                                       getIReg(ra),
+                                       mkU8(64 - imm))),
+                           mkU64(mask)),
+                     binop(Iop_And64,
+                           mkexpr(t0),
+                           mkU64(~mask))));
+
+        MARK_REG_WB(rd, t2);
+      }
+      break;
+    case 34:  /* "bfextu" */
+      {
+        ULong imm0 = decoded[n].operand_values[3];
+        ULong mask = 0;
+        t2 = newTemp(Ity_I64);
+        mask = ((-1ULL) ^ ((-1ULL << ((imm0 - imm) & 63)) << 1));
+
+        assign(t2,
+               binop(Iop_And64,
+                     binop(Iop_Or64,
+                           binop(Iop_Shr64,
+                                 getIReg(ra),
+                                 mkU8(imm)),
+                           binop(Iop_Shl64,
+                                 getIReg(ra),
+                                 mkU8(64 - imm))),
+                     mkU64(mask)));
+        MARK_REG_WB(rd, t2);
+      }
+      break;
+    case 35:  /* "bfins" */
+      {
+        ULong mask;
+        ULong imm0 = decoded[n].operand_values[3];
+        t0 = newTemp(Ity_I64);
+        t2 = newTemp(Ity_I64);
+        if (imm <= imm0)
+        {
+          mask = ((-1ULL << imm) ^ ((-1ULL << imm0) << 1));
+        }
+        else
+        {
+          mask = ((-1ULL << imm) | (-1ULL >> (63 - imm0)));
+        }
+
+        assign(t0, binop(Iop_Or64,
+                         binop(Iop_Shl64,
+                               getIReg(ra),
+                               mkU8(imm)),
+                         binop(Iop_Shr64,
+                               getIReg(ra),
+                               mkU8(64 - imm))));
+
+        assign(t2, binop(Iop_Or64,
+                         binop(Iop_And64,
+                               mkexpr(t0),
+                               mkU64(mask)),
+                         binop(Iop_And64,
+                               getIReg(rd),
+                               mkU64(~mask))));
+
+        MARK_REG_WB(rd, t2);
+      }
+      break;
+    case 36:  /* "bgez" */
+      /* Fall-through */
+    case 37:  /* "bgezt" */
+      bstmt = dis_branch(binop(Iop_CmpEQ64,
+                               binop(Iop_And64,
+                                     getIReg(ra),
+                                     mkU64(0x8000000000000000ULL)),
+                               mkU64(0x0)),
+                         imm);
+      break;
+    case 38:  /* "bgtz" */
+      /* Fall-through */
+    case 39:
+      /* "bgtzt" */
+      bstmt = dis_branch(unop(Iop_Not1,
+                              binop(Iop_CmpLE64S,
+                                    getIReg(ra),
+                                    mkU64(0))),
+                         imm);
+      break;
+    case 40:  /* "blbc" */
+      /* Fall-through */
+    case 41:  /* "blbct" */
+      bstmt = dis_branch(unop(Iop_64to1,
+                              unop(Iop_Not64, getIReg(ra))),
+                         imm);
+
+      break;
+    case 42:  /* "blbs" */
+      /* Fall-through */
+    case 43:
+      /* "blbst" */
+      bstmt = dis_branch(unop(Iop_64to1,
+                              getIReg(ra)),
+                         imm);
+      break;
+    case 44:  /* "blez" */
+      bstmt = dis_branch(binop(Iop_CmpLE64S, getIReg(ra),
+                               mkU64(0)),
+                         imm);
+      break;
+    case 45:  /* "blezt" */
+      bstmt = dis_branch(binop(Iop_CmpLE64S, getIReg(ra),
+                               mkU64(0)),
+                         imm);
+      break;
+    case 46:  /* "bltz" */
+      bstmt = dis_branch(binop(Iop_CmpLT64S, getIReg(ra),
+                               mkU64(0)),
+                         imm);
+      break;
+    case 47:  /* "bltzt" */
+      bstmt = dis_branch(binop(Iop_CmpLT64S, getIReg(ra),
+                               mkU64(0)),
+                         imm);
+      break;
+    case 48:  /* "bnez" */
+      /* Fall-through */
+    case 49:
+      /* "bnezt" */
+      bstmt = dis_branch(binop(Iop_CmpNE64, getIReg(ra),
+                               mkU64(0)),
+                         imm);
+      break;
+    case 50:  /* "clz" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_Clz64, getIReg(ra)));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 51:  /* "cmoveqz rd, ra, rb" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, IRExpr_ITE(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)),
+                            getIReg(rb), getIReg(rd)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 52:  /* "cmovnez" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, IRExpr_ITE(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)),
+                            getIReg(rd), getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 53:  /* "cmpeq" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_1Uto64, binop(Iop_CmpEQ64,
+                                         getIReg(ra), getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+
+    case 54:  /* "cmpeqi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_1Uto64, binop(Iop_CmpEQ64,
+                                        getIReg(ra),
+                                        mkU64(extend_s_8to64(imm)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 55:  /* "cmpexch" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+
+      assign(t1, getIReg(rb));
+      stmt( IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t2, Iend_LE,
+                               getIReg(ra),
+                               NULL, binop(Iop_Add64,
+                                           getIReg(70),
+                                           getIReg(71)),
+                               NULL, mkexpr(t1))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 56:  /* "cmpexch4" */
+      t1 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I32);
+
+      assign(t1, narrowTo(Ity_I32, getIReg(rb)));
+      stmt( IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t3, Iend_LE,
+                               getIReg(ra),
+                               NULL,
+                               narrowTo(Ity_I32, binop(Iop_Add64,
+                                                       getIReg(70),
+                                                       getIReg(71))),
+                               NULL,
+                               mkexpr(t1))));
+      assign(t2, unop(Iop_32Uto64, mkexpr(t3)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 57:  /* "cmples" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_1Uto64,
+                      binop(Iop_CmpLE64S, getIReg(ra), getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 58:  /* "cmpleu" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_1Uto64,
+                       binop(Iop_CmpLE64U, getIReg(ra), getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 59:  /* "cmplts" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_1Uto64,
+                      binop(Iop_CmpLT64S, getIReg(ra), getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 60:  /* "cmpltsi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_1Uto64,
+                      binop(Iop_CmpLT64S,
+                            getIReg(ra),
+                            mkU64(extend_s_8to64(imm)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 61:
+
+      /* "cmpltu" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_1Uto64,
+                      binop(Iop_CmpLT64U, getIReg(ra), getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+
+
+      break;
+    case 62:  /* "cmpltui" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_1Uto64,
+                       binop(Iop_CmpLT64U,
+                             getIReg(ra),
+                             mkU64(imm))));
+      MARK_REG_WB(rd, t2);
+
+
+      break;
+    case 63:  /* "cmpne" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_1Uto64,
+                      binop(Iop_CmpNE64, getIReg(ra), getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+
+
+      break;
+    case 64:
+      /* Fall-through */
+    case 65:
+      /* Fall-through */
+    case 66:
+      /* Fall-through */
+    case 67:
+      /* Fall-through */
+    case 68:
+      /* Fall-through */
+    case 69:
+      /* Fall-through */
+    case 70:
+      /* Fall-through */
+    case 71:
+      /* Fall-through */
+    case 72:
+      use_dirty_helper = 1;
+      break;
+    case 73:  /* "ctz" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_Ctz64, getIReg(ra)));
+
+      MARK_REG_WB(rd, t2);
+
+
+      break;
+    case 74:  /* "dblalign" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+
+      /* t0 is the bit shift amount */
+      assign(t0, binop(Iop_Shl64,
+                       binop(Iop_And64,
+                             getIReg(rb),
+                             mkU64(7)),
+                       mkU8(3)));
+      assign(t1, binop(Iop_Sub64,
+                       mkU64(64),
+                       mkexpr(t0)));
+
+      assign(t2, binop(Iop_Or64,
+                       binop(Iop_Shl64,
+                             getIReg(ra),
+                             unop(Iop_64to8, mkexpr(t1))),
+                       binop(Iop_Shr64,
+                             getIReg(rd),
+                             unop(Iop_64to8, mkexpr(t0)))));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 75:
+      /* Fall-through */
+    case 76:
+      /* Fall-through */
+    case 77:
+      /* Fall-through */
+    case 78:
+      /* Fall-through */
+    case 79:
+      use_dirty_helper = 1;
+      break;
+    case 80:  /* "exch" */
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t2,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      mkU64(0x0),
+                      NULL,
+                      getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 81:  /* "exch4 rd, ra, rb" */
+      t0 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t0,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      mkU32(0x0),
+                      NULL,
+                      narrowTo(Ity_I32,
+                               getIReg(rb)))));
+      assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 82:
+      /* Fall-through */
+    case 83:
+      /* Fall-through */
+    case 84:
+      /* Fall-through */
+    case 85:
+      /* Fall-through */
+    case 86:
+      /* Fall-through */
+    case 87:
+      /* Fall-through */
+    case 88:
+      /* Fall-through */
+    case 89:
+      use_dirty_helper = 1;
+      break;
+    case 90:  /* "fetchadd" */
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t2,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      // fetchadd=3
+                      mkU64(0x3),
+                      NULL,
+                      getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 91:  /* "fetchadd4" */
+      t0 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t0,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      // fetchadd=3
+                      mkU32(0x3),
+                      NULL,
+                      narrowTo(Ity_I32,
+                               getIReg(rb)))));
+      assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
+      MARK_REG_WB(rd, t2);
+
+      break;
+    case 92:  /* "fetchaddgez" */
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t2,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      // fetchaddgez=5
+                      mkU64(0x5),
+                      NULL,
+                      getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 93:  /* "fetchaddgez4" */
+      t0 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t0,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      // fetchaddgez=5
+                      mkU32(0x5),
+                      NULL,
+                      narrowTo(Ity_I32,
+                               getIReg(rb)))));
+      assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 94:  /* "fetchand\n") */
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t2,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      mkU64(0x2),
+                      NULL,
+                      getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 95:
+      /* mkIRCAS.
+         0: xch###      1: cmpexch###,
+         2: fetchand##  3: fetchadd##
+         4: fetchor##   5: fetchaddgez
+      */
+      /* "fetchand4" */
+      t0 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t0,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      mkU32(0x2),
+                      NULL,
+                      narrowTo(Ity_I32,
+                               getIReg(rb)))));
+      assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 96:  /* "fetchor" */
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t2,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      mkU64(0x4),
+                      NULL,
+                      getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 97:  /* "fetchor4" */
+      t0 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I64);
+      stmt( IRStmt_CAS(
+              mkIRCAS(IRTemp_INVALID,
+                      t0,
+                      Iend_LE,
+                      getIReg(ra),
+                      NULL,
+                      mkU32(0x4),
+                      NULL,
+                      narrowTo(Ity_I32,
+                               getIReg(rb)))));
+      assign(t2, unop(Iop_32Sto64, mkexpr(t0)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 98:
+      /* Fall-through */
+    case 99:
+      /* Fall-through */
+    case 100:
+      use_dirty_helper = 1;
+      break;
+    case 101: /* "fnop"  Ignore */
+      break;
+    case 102:
+      /* Fall-through */
+    case 103:
+      /* Fall-through */
+    case 104:
+      /* Fall-through */
+    case 105:
+      /* Fall-through */
+    case 106:
+      /* Fall-through */
+    case 107:
+      /* Fall-through */
+    case 108:
+      use_dirty_helper = 1;
+      break;
+    case 109:
+      /* Fall-through */
+    case 110:
+      /* Fall-through */
+    case 111:
+      use_dirty_helper = 1;
+      break;
+    case 112:  /* "iret" */
+      next = mkU64(guest_PC_curr_instr + 8);
+      jumpkind = Ijk_Ret;
+      break;
+    case 113:  /* "j" */
+      next = mkU64(imm);
+      /* set steering address. */
+      steering_pc = imm;
+      jumpkind = Ijk_Boring;
+      break;
+    case 114:
+      t2 = newTemp(Ity_I64);
+      assign(t2, mkU64(guest_PC_curr_instr + 8));
+      /* set steering address. */
+      steering_pc = imm;
+      next = mkU64(imm);
+      jumpkind = Ijk_Call;
+      MARK_REG_WB(55, t2);
+      break;
+    case 115:  /* "jalr" */
+      /* Fall-through */
+    case 116:  /* "jalrp" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1, getIReg(ra));
+      assign(t2, mkU64(guest_PC_curr_instr + 8));
+      next = mkexpr(t1);
+      jumpkind = Ijk_Call;
+      MARK_REG_WB(55, t2);
+      break;
+    case 117:  /* "jr" */
+      /* Fall-through */
+    case 118:  /* "jrp" */
+      next = getIReg(ra);
+      jumpkind = Ijk_Boring;
+      break;
+    case 119:  /* "ld" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, load(Ity_I64, (getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 120:  /* "ld1s" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_8Sto64,
+                       load(Ity_I8, (getIReg(ra)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 121:  /* "ld1s_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2,  unop(Iop_8Sto64,
+                       load(Ity_I8, (getIReg(ra)))));
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 122:  /* "ld1u" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_8Uto64,
+                       load(Ity_I8, (getIReg(ra)))));
+      MARK_REG_WB(rd, t2);
+
+      break;
+    case 123:  /* "ld1u_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1,  binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2,  unop(Iop_8Uto64,
+                       load(Ity_I8, (getIReg(ra)))));
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 124:  /* "ld2s" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_16Sto64,
+                       load(Ity_I16, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 125:  /* "ld2s_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1,  binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2,  unop(Iop_16Sto64,
+                       load(Ity_I16, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      MARK_REG_WB(ra, t1);
+      break;
+    case 126: /* "ld2u" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_16Uto64,
+                       load(Ity_I16, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 127: /* "ld2u_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1,  binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2,  unop(Iop_16Uto64,
+                       load(Ity_I16, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      MARK_REG_WB(ra, t1);
+      break;
+    case 128: /* "ld4s" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_32Sto64,
+                       load(Ity_I32, (getIReg(ra)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 129: /* "ld4s_add" */
+      t2 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      assign(t1,  binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2,  unop(Iop_32Sto64,
+                       load(Ity_I32, (getIReg(ra)))));
+      MARK_REG_WB(rd, t2);
+      MARK_REG_WB(ra, t1);
+      break;
+    case 130:  /* "ld4u" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_32Uto64,
+                       load(Ity_I32, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 131:  /* "ld4u_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2,  unop(Iop_32Uto64,
+                       load(Ity_I32, getIReg(ra))));
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 132:  /* "ld_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1, load(Ity_I64, getIReg(ra)));
+      assign(t2, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      MARK_REG_WB(ra, t2);
+      MARK_REG_WB(rd, t1);
+      break;
+    case 133:  /* "ldna" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, load(Ity_I64,
+                      binop(Iop_And64,
+                            getIReg(ra),
+                            unop(Iop_Not64,
+                                 mkU64(7)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 134:  /* "ldna_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2, load(Ity_I64,
+                      binop(Iop_And64,
+                            getIReg(ra),
+                            unop(Iop_Not64,
+                                 mkU64(7)))));
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 135:  /* "ldnt" */
+      /* Valgrind IR has no Non-Temp load. Use normal load. */
+      t2 = newTemp(Ity_I64);
+      assign(t2, load(Ity_I64, (getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 136:  /* "ldnt1s" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_8Sto64,
+                       load(Ity_I8, (getIReg(ra)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 137:  /* "ldnt1s_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_8Sto64,
+                       load(Ity_I8, (getIReg(ra)))));
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 138:  /* "ldnt1u" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_8Uto64,
+                       load(Ity_I8, (getIReg(ra)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 139:  /* "ldnt1u_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      assign(t2,  unop(Iop_8Uto64,
+                       load(Ity_I8, (getIReg(ra)))));
+
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 140:  /* "ldnt2s" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_16Sto64,
+                       load(Ity_I16, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 141:  /* "ldnt2s_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_16Sto64,
+                       load(Ity_I16, getIReg(ra))));
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 142:  /* "ldnt2u" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_16Uto64,
+                       load(Ity_I16, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 143:  /* "ldnt2u_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_16Uto64,
+                       load(Ity_I16, getIReg(ra))));
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      MARK_REG_WB(ra, t1);
+      MARK_REG_WB(rd, t2);
+      break;
+    case 144:  /* "ldnt4s" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_32Sto64,
+                       load(Ity_I32, (getIReg(ra)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 145:  /* "ldnt4s_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_32Sto64,
+                       load(Ity_I32, (getIReg(ra)))));
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      MARK_REG_WB(rd, t2);
+      MARK_REG_WB(ra, t1);
+      break;
+    case 146:  /* "ldnt4u" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_32Uto64,
+                       load(Ity_I32, getIReg(ra))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 147:  /* "ldnt4u_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_32Uto64,
+                       load(Ity_I32, getIReg(ra))));
+      assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      MARK_REG_WB(rd, t2);
+      MARK_REG_WB(ra, t1);
+      break;
+    case 148:  /* "ldnt_add" */
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t1, load(Ity_I64, getIReg(ra)));
+      assign(t2, binop(Iop_Add64, getIReg(ra), mkU64(imm)));
+      MARK_REG_WB(rd, t1);
+      MARK_REG_WB(ra, t2);
+      break;
+    case 149:  /* "lnk" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  mkU64(guest_PC_curr_instr + 8));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 150:  /* "mf" */
+      use_dirty_helper = 1;
+      break;
+    case 151:  /* "mfspr" */
+      t2 = newTemp(Ity_I64);
+      if (imm == 0x2780) { // Get Cmpexch value
+	 assign(t2, getIReg(70));
+	 MARK_REG_WB(rd, t2);
+      } else if (imm == 0x2580) { // Get EX_CONTEXT_0_0
+         assign(t2, getIReg(576 / 8));
+         MARK_REG_WB(rd, t2);
+      } else if (imm == 0x2581) { // Get EX_CONTEXT_0_1
+         assign(t2, getIReg(584 / 8));
+         MARK_REG_WB(rd, t2);
+      } else
+        use_dirty_helper = 1;
+      break;
+    case 152:  /* "mm" */
+      use_dirty_helper = 1;
+      break;
+    case 153:  /* "mnz" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_And64,
+                       unop(Iop_1Sto64, binop(Iop_CmpNE64,
+                                              getIReg(ra),
+                                              mkU64(0))),
+                       getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 154:  /* "mtspr imm, ra" */
+      if (imm == 0x2780) // Set Cmpexch value
+        putIReg(70, getIReg(ra));
+      else if (imm == 0x2580) // set EX_CONTEXT_0_0
+        putIReg(576/8, getIReg(ra));
+      else if (imm == 0x2581) // set EX_CONTEXT_0_1
+        putIReg(584/8, getIReg(ra));
+      else
+        use_dirty_helper = 1;
+      break;
+    case 155:  /* "mul_hs_hs" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_MullS32,
+                       unop(Iop_64to32,
+                            binop(Iop_Shr64,
+                                  getIReg(ra),
+                                  mkU8(32))),
+                       unop(Iop_64to32,
+                            binop(Iop_Shr64,
+                                  getIReg(rb),
+                                  mkU8(32)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 156:  /* "mul_hs_hu" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+
+      assign(t0, unop(Iop_32Sto64,
+                      unop(Iop_64to32,
+                           binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
+      assign(t1, binop(Iop_MullU32,
+                       unop(Iop_64to32, mkexpr(t0)),
+                       unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
+      assign(t3, binop(Iop_MullU32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              mkexpr(t0),
+                                              mkU8(32))),
+                       unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
+      assign(t2, binop(Iop_Add64,
+                       mkexpr(t1),
+                       binop(Iop_Shl64,
+                             mkexpr(t3),
+                             mkU8(32))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 157:  /* "mul_hs_ls" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_MullS32,
+                       unop(Iop_64to32,
+                            binop(Iop_Shr64,
+                                  getIReg(ra),
+                                  mkU8(32))),
+                       unop(Iop_64to32,
+                            getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 158:  /* "mul_hs_lu" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+
+      assign(t0, unop(Iop_32Sto64,
+                      unop(Iop_64to32,
+                           binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
+      assign(t1, binop(Iop_MullU32,
+                       unop(Iop_64to32, mkexpr(t0)),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t3, binop(Iop_MullU32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              mkexpr(t0),
+                                              mkU8(32))),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t2, binop(Iop_Add64,
+                       mkexpr(t1),
+                       binop(Iop_Shl64,
+                             mkexpr(t3),
+                             mkU8(32))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 159:  /* "mul_hu_hu" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_MullU32,
+                       unop(Iop_64to32,
+                            binop(Iop_Shr64,
+                                  getIReg(ra),
+                                  mkU8(32))),
+                       unop(Iop_64to32,
+                            binop(Iop_Shr64,
+                                  getIReg(rb),
+                                  mkU8(32)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 160:  /* "mul_hu_ls" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+
+      assign(t0, unop(Iop_32Sto64,
+                      unop(Iop_64to32,
+                           getIReg(ra))));
+
+      assign(t1, binop(Iop_MullU32,
+                       unop(Iop_64to32, mkexpr(t0)),
+                       unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
+      assign(t3, binop(Iop_MullU32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              mkexpr(t0),
+                                              mkU8(32))),
+                       unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32)))));
+      assign(t2, binop(Iop_Add64,
+                       mkexpr(t1),
+                       binop(Iop_Shl64,
+                             mkexpr(t3),
+                             mkU8(32))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 161:  /* "mul_hu_lu" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_MullU32,
+                       unop(Iop_64to32,
+                            binop(Iop_Shr64,
+                                  getIReg(ra),
+                                  mkU8(32))),
+                       unop(Iop_64to32,
+                            getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 162:  /* "mul_ls_ls" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_MullS32,
+                       unop(Iop_64to32, getIReg(ra)),
+                       unop(Iop_64to32, getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 163:  /* "mul_ls_lu" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+
+      assign(t0, unop(Iop_32Sto64,
+                      unop(Iop_64to32, getIReg(ra))));
+      assign(t1, binop(Iop_MullU32,
+                       unop(Iop_64to32, mkexpr(t0)),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t3, binop(Iop_MullU32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              mkexpr(t0),
+                                              mkU8(32))),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t2, binop(Iop_Add64,
+                       mkexpr(t1),
+                       binop(Iop_Shl64,
+                             mkexpr(t3),
+                             mkU8(32))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 164:   /* "mul_lu_lu" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_MullU32,
+                       unop(Iop_64to32, getIReg(ra)),
+                       unop(Iop_64to32, getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 165:   /* "mula_hs_hs" */
+      t0 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+
+      assign(t0, binop(Iop_MullS32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              getIReg(ra), mkU8(32))),
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              getIReg(rb), mkU8(32)))));
+      assign(t2, binop(Iop_Add64, getIReg(rd), mkexpr(t0)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 166:   /* "mula_hs_hu" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+      t4 = newTemp(Ity_I64);
+      assign(t0, unop(Iop_32Sto64,
+                      unop(Iop_64to32,
+                           binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
+      assign(t1, binop(Iop_MullU32,
+                       unop(Iop_64to32, mkexpr(t0)),
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              getIReg(rb), mkU8(32)))));
+      assign(t3, binop(Iop_MullU32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              mkexpr(t0),
+                                              mkU8(32))),
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              getIReg(rb), mkU8(32)))));
+      assign(t2, binop(Iop_Add64,
+                       mkexpr(t1),
+                       binop(Iop_Shl64,
+                             mkexpr(t3),
+                             mkU8(32))));
+      assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2)));
+      MARK_REG_WB(rd, t4);
+      break;
+    case 167:   /* "mula_hs_ls" */
+      t2 = newTemp(Ity_I64);
+      t4 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_MullS32,
+                       unop(Iop_64to32,
+                            binop(Iop_Shr64,
+                                  getIReg(ra),
+                                  mkU8(32))),
+                       unop(Iop_64to32,
+                            getIReg(rb))));
+      assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2)));
+      MARK_REG_WB(rd, t4);
+      break;
+    case 168:   /* "mula_hs_lu" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+      t4 = newTemp(Ity_I64);
+      assign(t0, unop(Iop_32Sto64,
+                      unop(Iop_64to32,
+                           binop(Iop_Shr64, getIReg(ra), mkU8(32)))));
+      assign(t1, binop(Iop_MullU32,
+                       unop(Iop_64to32, mkexpr(t0)),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t3, binop(Iop_MullU32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              mkexpr(t0),
+                                              mkU8(32))),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t2, binop(Iop_Add64,
+                       mkexpr(t1),
+                       binop(Iop_Shl64,
+                             mkexpr(t3),
+                             mkU8(32))));
+      assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2)));
+      MARK_REG_WB(rd, t4);
+      break;
+    case 169:   /* "mula_hu_hu" */
+      use_dirty_helper = 1;
+      break;
+    case 170:   /* "mula_hu_ls" */
+      use_dirty_helper = 1;
+      break;
+    case 171:   /* "mula_hu_lu" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64,
+                       binop(Iop_MullU32,
+                             unop(Iop_64to32,
+                                  binop(Iop_Shr64,
+                                        getIReg(ra),
+                                        mkU8(32))),
+                             unop(Iop_64to32,
+                                  getIReg(rb))),
+                       getIReg(rd)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 172:  /* "mula_ls_ls" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64,
+                       getIReg(rd),
+                       binop(Iop_MullS32,
+                             unop(Iop_64to32, getIReg(ra)),
+                             unop(Iop_64to32, getIReg(rb)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 173:  /* "mula_ls_lu" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+
+      assign(t0, unop(Iop_32Sto64,
+                      unop(Iop_64to32, getIReg(ra))));
+      assign(t1, binop(Iop_MullU32,
+                       unop(Iop_64to32, mkexpr(t0)),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t3, binop(Iop_MullU32,
+                       unop(Iop_64to32, binop(Iop_Shr64,
+                                              mkexpr(t0),
+                                              mkU8(32))),
+                       unop(Iop_64to32, getIReg(rb))));
+      assign(t2, binop(Iop_Add64,
+                       getIReg(rd),
+                       binop(Iop_Add64,
+                             mkexpr(t1),
+                             binop(Iop_Shl64,
+                                   mkexpr(t3),
+                                   mkU8(32)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 174:  /* "mula_lu_lu" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64,
+                       binop(Iop_MullU32,
+                             unop(Iop_64to32,
+                                  getIReg(ra)),
+                             unop(Iop_64to32,
+                                  getIReg(rb))),
+                       getIReg(rd)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 175:   /* "mulax" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_32Sto64,
+                      unop(Iop_64to32,
+                           binop(Iop_Add64,
+                                 getIReg(rd),
+                                 binop(Iop_MullU32,
+                                       narrowTo(Ity_I32, getIReg(ra)),
+                                       narrowTo(Ity_I32, getIReg(rb)))))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 176:   /* "mulx" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_32Sto64,
+                      unop(Iop_64to32,
+                           binop(Iop_MullU32,
+                                 narrowTo(Ity_I32, getIReg(ra)),
+                                 narrowTo(Ity_I32, getIReg(rb))))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 177:   /* "mz" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_And64,
+                       unop(Iop_1Sto64, binop(Iop_CmpEQ64,
+                                              getIReg(ra),
+                                              mkU64(0))),
+                       getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 178:  /* "nap" */
+      break;
+    case 179:  /* "nop" */
+      break;
+    case 180:  /* "nor" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_Not64,
+                      binop(Iop_Or64,
+                            getIReg(ra),
+                            getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 181:  /* "or" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Or64,
+                       getIReg(ra),
+                       getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 182:  /* "ori" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Or64,
+                       getIReg(ra),
+                       mkU64(imm)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 183:
+      /* Fall-through */
+    case 184:
+      /* Fall-through */
+    case 185:
+      use_dirty_helper = 1;
+      break;
+    case 186:  /* "rotl" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t0, binop(Iop_Shl64,
+                       getIReg(ra),
+                       unop(Iop_64to8, getIReg(rb))));
+      assign(t1, binop(Iop_Shr64,
+                       getIReg(ra),
+                       unop(Iop_64to8, binop(Iop_Sub64,
+                                             mkU64(0),
+                                             getIReg(rb)))));
+      assign(t2, binop(Iop_Or64, mkexpr(t0), mkexpr(t1)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 187:  /* "rotli" */
+      t0 = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I64);
+      t2 = newTemp(Ity_I64);
+      assign(t0, binop(Iop_Shl64,
+                       getIReg(ra),
+                       mkU8(imm)));
+      assign(t1, binop(Iop_Shr64,
+                       getIReg(ra),
+                       mkU8(0 - imm)));
+      assign(t2, binop(Iop_Or64, mkexpr(t0), mkexpr(t1)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 188:   /* "shl" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Shl64,
+                       getIReg(ra),
+                       unop(Iop_64to8, getIReg(rb))));
+      MARK_REG_WB(rd, t2);
+
+      break;
+    case 189:   /* "shl16insli" */
+      t2 = newTemp(Ity_I64);
+      t3 = newTemp(Ity_I64);
+      assign(t3, binop(Iop_Shl64, getIReg(ra), mkU8(16)));
+      imm &= 0xFFFFULL;
+      if (imm & 0x8000)
+      {
+        t4 = newTemp(Ity_I64);
+        assign(t4, mkU64(imm));
+        assign(t2, binop(Iop_Add64, mkexpr(t3), mkexpr(t4)));
+      }
+      else
+      {
+        assign(t2, binop(Iop_Add64, mkexpr(t3), mkU64(imm)));
+      }
+      MARK_REG_WB(rd, t2);
+
+      break;
+    case 190:   /* "shl1add" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64,
+                       binop(Iop_Shl64,
+                             getIReg(ra), mkU8(1)),
+                       getIReg(rb)));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 191:   /* "shl1addx" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,
+             unop(Iop_32Sto64,
+                  unop(Iop_64to32,
+                       binop(Iop_Add64,
+                             binop(Iop_Shl64,
+                                   getIReg(ra), mkU8(1)),
+                             getIReg(rb)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 192:   /* "shl2add" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64,
+                       binop(Iop_Shl64,
+                             getIReg(ra), mkU8(2)),
+                       getIReg(rb)));
+
+      MARK_REG_WB(rd, t2);
+
+      break;
+    case 193:   /* "shl2addx" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,
+             unop(Iop_32Sto64,
+                  unop(Iop_64to32,
+                       binop(Iop_Add64,
+                             binop(Iop_Shl64,
+                                   getIReg(ra), mkU8(2)),
+                             getIReg(rb)))));
+      MARK_REG_WB(rd, t2);
+
+      break;
+    case 194:   /* "shl3add" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Add64,
+                       binop(Iop_Shl64,
+                             getIReg(ra), mkU8(3)),
+                       getIReg(rb)));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 195:   /* "shl3addx" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,
+             unop(Iop_32Sto64,
+                  unop(Iop_64to32,
+                       binop(Iop_Add64,
+                             binop(Iop_Shl64,
+                                   getIReg(ra), mkU8(3)),
+                             getIReg(rb)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 196:   /* "shli" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Shl64, getIReg(ra),
+                       mkU8(imm)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 197:   /* "shlx" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_32Sto64,
+                      binop(Iop_Shl32,
+                            narrowTo(Ity_I32, getIReg(ra)),
+                            narrowTo(Ity_I8, getIReg(rb)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 198:   /* "shlxi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, signExtend(binop(Iop_Shl32,
+                                  narrowTo(Ity_I32, getIReg(ra)),
+                                  mkU8(imm)),
+                            32));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 199:  /* "shrs" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Sar64, getIReg(ra),
+                       narrowTo(Ity_I8, getIReg(rb))));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 200:  /* "shrsi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Sar64, getIReg(ra),
+                       mkU8(imm)));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 201:  /* "shru" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Shr64,
+                       getIReg(ra),
+                       narrowTo(Ity_I8, (getIReg(rb)))));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 202:  /* "shrui" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Shr64, getIReg(ra), mkU8(imm)));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 203:  /* "shrux" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_32Sto64,
+                      (binop(Iop_Shr32,
+                             narrowTo(Ity_I32, getIReg(ra)),
+                             narrowTo(Ity_I8, getIReg(rb))))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 204:  /* "shruxi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, unop(Iop_32Sto64,
+                      (binop(Iop_Shr32,
+                             narrowTo(Ity_I32, getIReg(ra)),
+                             mkU8(imm)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 205:  /* "shufflebytes" */
+      use_dirty_helper = 1;
+      break;
+    case 206:  /* "st" */
+      store(getIReg(ra),  getIReg(rb));
+      break;
+    case 207:  /* "st1" */
+      store(getIReg(ra),  narrowTo(Ity_I8, getIReg(rb)));
+      break;
+    case 208:  /* "st1_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  narrowTo(Ity_I8, getIReg(opd[1])));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 209:  /* "st2" */
+      store(getIReg(ra),  narrowTo(Ity_I16, getIReg(rb)));
+      break;
+    case 210:  /* "st2_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  narrowTo(Ity_I16, getIReg(opd[1])));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 211:  /* "st4" */
+      store(getIReg(ra),  narrowTo(Ity_I32, getIReg(rb)));
+      break;
+    case 212:  /* "st4_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  narrowTo(Ity_I32, getIReg(opd[1])));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 213:  /* "st_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  getIReg(opd[1]));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 214:  /* "stnt" */
+      store(getIReg(ra),  getIReg(rb));
+      break;
+    case 215:  /* "stnt1" */
+      store(getIReg(ra),  narrowTo(Ity_I8, getIReg(rb)));
+      break;
+    case 216:  /* "stnt1_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  narrowTo(Ity_I8, getIReg(opd[1])));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 217:  /* "stnt2" */
+      store(getIReg(ra),  narrowTo(Ity_I16, getIReg(rb)));
+      break;
+    case 218:  /* "stnt2_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  narrowTo(Ity_I16, getIReg(opd[1])));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 219:  /* "stnt4" */
+      store(getIReg(ra),  narrowTo(Ity_I32, getIReg(rb)));
+      break;
+    case 220:  /* "stnt4_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  narrowTo(Ity_I32, getIReg(opd[1])));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 221:  /* "stnt_add" */
+      t2 = newTemp(Ity_I64);
+      store(getIReg(opd[0]),  getIReg(opd[1]));
+      assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2])));
+      MARK_REG_WB(opd[0], t2);
+      break;
+    case 222:  /* "sub" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Sub64, getIReg(ra),
+                       getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 223:  /* "subx" */
+      t2 = newTemp(Ity_I64);
+      assign(t2,  unop(Iop_32Sto64,
+                       binop(Iop_Sub32,
+                             narrowTo(Ity_I32, getIReg(ra)),
+                             narrowTo(Ity_I32, getIReg(rb)))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 224:  /* "subxsc" */
+      use_dirty_helper = 1;
+      break;
+    case 225:  /* "swint0" */
+      vex_printf( "\n *** swint0 ***\n");
+      vassert(0);
+      break;
+    case 226:  /* "swint1" */
+      next = mkU64(guest_PC_curr_instr + 8);
+      jumpkind = Ijk_Sys_syscall;
+      break;
+    case 227:  /* "swint2" */
+      vex_printf( "\n *** swint2 ***\n");
+      vassert(0);
+      break;
+    case 228:  /* "swint3" */
+      vex_printf( "\n *** swint3 ***\n");
+      vassert(0);
+      break;
+    case 229:
+      /* Fall-through */
+    case 230:
+      /* Fall-through */
+    case 231:
+      /* Fall-through */
+    case 232:
+      /* Fall-through */
+    case 233:
+      /* Fall-through */
+    case 234:
+      /* Fall-through */
+    case 235:
+      /* Fall-through */
+    case 236:
+      /* Fall-through */
+    case 237:
+      use_dirty_helper = 1;
+      break;
+    case 238:  /* "v1cmpeq" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_CmpEQ8x8, getIReg(ra),
+                       getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 239:  /* "v1cmpeqi" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_CmpEQ8x8, getIReg(ra),
+                       mkU64(imm)));
+
+      MARK_REG_WB(rd, t2);
+      break;
+    case 240:
+      /* Fall-through */
+    case 241:
+      /* Fall-through */
+    case 242:
+      /* Fall-through */
+    case 243:
+      /* Fall-through */
+    case 244:
+      /* Fall-through */
+    case 245:
+      use_dirty_helper = 1;
+      break;
+    case 246:  /* "v1cmpne" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_CmpEQ8x8,
+                       binop(Iop_CmpEQ8x8, getIReg(ra),
+                             getIReg(rb)),
+                       getIReg(63)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 247:
+      /* Fall-through */
+    case 248:
+      /* Fall-through */
+    case 249:
+      /* Fall-through */
+    case 250:
+      /* Fall-through */
+    case 251:
+      /* Fall-through */
+    case 252:
+      /* Fall-through */
+    case 253:
+      /* Fall-through */
+    case 254:
+      /* Fall-through */
+    case 255:
+      /* Fall-through */
+    case 256:
+      /* Fall-through */
+    case 257:
+      /* Fall-through */
+    case 258:
+      /* Fall-through */
+    case 259:
+      /* Fall-through */
+    case 260:
+      /* Fall-through */
+    case 261:
+      /* Fall-through */
+    case 262:
+      /* Fall-through */
+    case 263:
+      /* Fall-through */
+    case 264:
+      /* Fall-through */
+    case 265:
+      /* Fall-through */
+    case 266:
+      /* Fall-through */
+    case 267:
+      /* Fall-through */
+    case 268:
+      /* Fall-through */
+    case 269:
+      /* Fall-through */
+    case 270:
+      /* Fall-through */
+    case 271:
+      /* Fall-through */
+    case 272:
+      /* Fall-through */
+    case 273:
+      /* Fall-through */
+    case 274:
+      use_dirty_helper = 1;
+      break;
+    case 275:  /* "v1shrui" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Shr8x8,
+                       getIReg(ra),
+                       mkU64(imm)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 276:
+      /* Fall-through */
+    case 277:
+      /* Fall-through */
+    case 278:
+      /* Fall-through */
+    case 279:
+      /* Fall-through */
+    case 280:
+      /* Fall-through */
+    case 281:
+      /* Fall-through */
+    case 282:
+      /* Fall-through */
+    case 283:
+      /* Fall-through */
+    case 284:
+      /* Fall-through */
+    case 285:
+      /* Fall-through */
+    case 286:
+      /* Fall-through */
+    case 287:
+      /* Fall-through */
+    case 288:
+      /* Fall-through */
+    case 289:
+      /* Fall-through */
+    case 290:
+      /* Fall-through */
+    case 291:
+      /* Fall-through */
+    case 292:
+      /* Fall-through */
+    case 293:
+      /* Fall-through */
+    case 294:
+      /* Fall-through */
+    case 295:
+      /* Fall-through */
+    case 296:
+      /* Fall-through */
+    case 297:
+      /* Fall-through */
+    case 298:
+      /* Fall-through */
+    case 299:
+      /* Fall-through */
+    case 300:
+      /* Fall-through */
+    case 301:
+      /* Fall-through */
+    case 302:
+      /* Fall-through */
+    case 303:
+      /* Fall-through */
+    case 304:
+      /* Fall-through */
+    case 305:
+      /* Fall-through */
+    case 306:
+      /* Fall-through */
+    case 307:
+      /* Fall-through */
+    case 308:
+      /* Fall-through */
+    case 309:
+      /* Fall-through */
+    case 310:
+      /* Fall-through */
+    case 311:
+      /* Fall-through */
+    case 312:
+      /* Fall-through */
+    case 313:
+      /* Fall-through */
+    case 314:
+      /* Fall-through */
+    case 315:
+      /* Fall-through */
+    case 316:
+      /* Fall-through */
+    case 317:
+      /* Fall-through */
+    case 318:
+      /* Fall-through */
+    case 319:
+      /* Fall-through */
+    case 320:
+      /* Fall-through */
+    case 321:
+      /* Fall-through */
+    case 322:
+      /* Fall-through */
+    case 323:
+      use_dirty_helper = 1;
+      break;
+    case 324:   /* "v4int_l" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Or64,
+                       binop(Iop_Shl64,
+                             getIReg(ra),
+                             mkU8(32)),
+                       binop(Iop_And64,
+                             getIReg(rb),
+                             mkU64(0xFFFFFFFF))));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 325:
+      /* Fall-through */
+    case 326:
+      /* Fall-through */
+    case 327:
+      /* Fall-through */
+    case 328:
+      /* Fall-through */
+    case 329:
+      /* Fall-through */
+    case 330:
+      /* Fall-through */
+    case 331:
+      use_dirty_helper = 1;
+      break;
+    case 332:   /* "wh64" */     /* Ignore store hint */
+      break;
+    case 333:   /* "xor" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Xor64,
+                       getIReg(ra),
+                       getIReg(rb)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 334:   /* "xori" */
+      t2 = newTemp(Ity_I64);
+      assign(t2, binop(Iop_Xor64,
+                       getIReg(ra),
+                       mkU64(imm)));
+      MARK_REG_WB(rd, t2);
+      break;
+    case 335:  /* "(null)" */   /* ignore */
+      break;
+    default:
+
+    decode_failure:
+      vex_printf("error: %d\n",  (Int)opcode);
+
+      /* All decode failures end up here. */
+      vex_printf("vex tilegx->IR: unhandled instruction: "
+                 "%s 0x%llx 0x%llx 0x%llx 0x%llx\n",
+                 decoded[n].opcode->name,
+                 opd[0], opd[1], opd[2], opd[3]);
+
+      /* Tell the dispatcher that this insn cannot be decoded, and so has
+         not been executed, and (is currently) the next to be executed. */
+      stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
+                      mkU64(guest_PC_curr_instr)));
+      dres.whatNext = Dis_StopHere;
+      dres.len = 0;
+      return dres;
+    }
+
+    /* Hook the dirty helper for rare instruxtions. */
+    if (use_dirty_helper)
+    {
+      Int i = 0;
+      Int wbc = 0;
+      IRExpr *opc_oprand[5];
+
+      opc_oprand[0] = mkU64(opcode);
+
+      /* Get the operand registers or immediate. */
+      for (i = 0 ; i < 4; i++)
+      {
+        opc_oprand[i + 1] = NULL;
+
+        if (opd_dst_map & (1ULL << i))
+        {
+          tb[wbc] = newTemp(Ity_I64);
+          wbc++;
+          opc_oprand[i + 1] = getIReg(opd[i]);
+        }
+        else if (opd_imm_map & (1ULL << i))
+          opc_oprand[i + 1] = mkU64(opd[i]);
+        else if (opd_src_map & (1ULL << i))
+          opc_oprand[i + 1] = getIReg(opd[i]);
+        else
+          opc_oprand[i + 1] = mkU64(0xfeee);
+      }
+
+      IRExpr **args = mkIRExprVec_5(opc_oprand[0], opc_oprand[1],
+                                    opc_oprand[2], opc_oprand[3],
+                                    opc_oprand[4]);
+      IRDirty *genIR = NULL;
+
+      switch (wbc) {
+      case 0:
+        {
+          genIR = unsafeIRDirty_0_N (0/*regparms*/,
+                                     "tilegx_dirtyhelper_gen",
+                                     &tilegx_dirtyhelper_gen,
+                                     args);
+        }
+        break;
+      case 1:
+        {
+          genIR = unsafeIRDirty_1_N (tb[0],
+                                     0/*regparms*/,
+                                     "tilegx_dirtyhelper_gen",
+                                     &tilegx_dirtyhelper_gen,
+                                     args);
+        }
+        break;
+      default:
+        vex_printf("opc = %d\n", (Int)opcode);
+        vassert(0);
+      }
+
+      stmt(IRStmt_Dirty(genIR));
+
+      wbc = 0;
+      for (i = 0 ; i < 4; i++)
+      {
+        if(opd_dst_map & (1 << i))
+        {
+          /* Queue the writeback destination registers. */
+          MARK_REG_WB(opd[i], tb[wbc]);
+          wbc++;
+        }
+      }
+    }
+  }
+
+  /* Write back registers for a bundle. Note have to get all source registers
+     for all instructions in a bundle before write the destinations b/c this is
+     an VLIW processor. */
+  for (n = 0; n < rd_wb_index; n++)
+    putIReg(rd_wb_reg[n], mkexpr(rd_wb_temp[n]));
+
+  /* Add branch IR if apply finally, only upto one branch per bundle. */
+  if (bstmt) {
+    stmt(bstmt);
+    dres.whatNext = Dis_StopHere;
+
+    dres.jk_StopHere = jumpkind;
+    stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
+                    mkU64(guest_PC_curr_instr + 8)));
+  } else if (next) {
+    if (steering_pc != -1ULL) {
+      if (resteerOkFn(callback_opaque, steering_pc)) {
+        dres.whatNext   = Dis_ResteerU;
+        dres.continueAt = steering_pc;
+        stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
+                        mkU64(steering_pc)));
+      } else {
+        dres.whatNext = Dis_StopHere;
+        dres.jk_StopHere = jumpkind;
+        stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
+                        mkU64(steering_pc)));
+      }
+    } else {
+      dres.whatNext = Dis_StopHere;
+      dres.jk_StopHere = jumpkind;
+      stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), next));
+    }
+  } else {
+    /* As dafault dres.whatNext = Dis_Continue. */
+    stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc),
+                    mkU64(guest_PC_curr_instr + 8)));
+  }
+
+  irsb->jumpkind = Ijk_Boring;
+  irsb->next = NULL;
+  dres.len = 8;
+
+ decode_success:
+
+  return dres;
+}
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+DisResult
+disInstr_TILEGX ( IRSB* irsb_IN,
+                  Bool (*resteerOkFn) (void *, Addr),
+                  Bool resteerCisOk,
+                  void* callback_opaque,
+                  const UChar* guest_code_IN,
+                  Long delta,
+                  Addr guest_IP,
+                  VexArch guest_arch,
+                  const VexArchInfo* archinfo,
+                  const VexAbiInfo* abiinfo,
+                  VexEndness host_endness_IN,
+                  Bool sigill_diag_IN )
+{
+  DisResult dres;
+
+  /* Set globals (see top of this file) */
+  vassert(guest_arch == VexArchTILEGX);
+
+  guest_code = (UChar*)(Addr)guest_code_IN;
+  irsb = irsb_IN;
+  host_endness = host_endness_IN;
+  guest_PC_curr_instr = (Addr64) guest_IP;
+  guest_PC_bbstart = (Addr64) toUInt(guest_IP - delta);
+
+  dres = disInstr_TILEGX_WRK(resteerOkFn, resteerCisOk,
+                             callback_opaque,
+                             delta, archinfo, abiinfo, sigill_diag_IN);
+
+  return dres;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end                                      guest_tilegx_toIR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/guest_x86_defs.h b/VEX/priv/guest_x86_defs.h
new file mode 100644
index 0000000..412ee94
--- /dev/null
+++ b/VEX/priv/guest_x86_defs.h
@@ -0,0 +1,418 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  guest_x86_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Only to be used within the guest-x86 directory. */
+
+#ifndef __VEX_GUEST_X86_DEFS_H
+#define __VEX_GUEST_X86_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex_guest_x86.h"           // VexGuestX86State
+#include "libvex_emnote.h"              // VexEmNote
+#include "guest_generic_bb_to_IR.h"     // DisResult
+
+/*---------------------------------------------------------*/
+/*--- x86 to IR conversion                              ---*/
+/*---------------------------------------------------------*/
+
+/* Convert one x86 insn to IR.  See the type DisOneInstrFn in
+   bb_to_IR.h. */
+extern
+DisResult disInstr_X86 ( IRSB*        irbb,
+                         Bool         (*resteerOkFn) ( void*, Addr ),
+                         Bool         resteerCisOk,
+                         void*        callback_opaque,
+                         const UChar* guest_code,
+                         Long         delta,
+                         Addr         guest_IP,
+                         VexArch      guest_arch,
+                         const VexArchInfo* archinfo,
+                         const VexAbiInfo*  abiinfo,
+                         VexEndness   host_endness,
+                         Bool         sigill_diag );
+
+/* Used by the optimiser to specialise calls to helpers. */
+extern
+IRExpr* guest_x86_spechelper ( const HChar* function_name,
+                               IRExpr** args,
+                               IRStmt** precedingStmts,
+                               Int      n_precedingStmts );
+
+/* Describes to the optimiser which part of the guest state require
+   precise memory exceptions.  This is logically part of the guest
+   state description. */
+extern 
+Bool guest_x86_state_requires_precise_mem_exns ( Int, Int,
+                                                 VexRegisterUpdates );
+
+extern
+VexGuestLayout x86guest_layout;
+
+
+/*---------------------------------------------------------*/
+/*--- x86 guest helpers                                 ---*/
+/*---------------------------------------------------------*/
+
+/* --- CLEAN HELPERS --- */
+
+extern UInt  x86g_calculate_eflags_all ( 
+                UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 
+             );
+
+VEX_REGPARM(3)
+extern UInt  x86g_calculate_eflags_c ( 
+                UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 
+             );
+
+extern UInt  x86g_calculate_condition ( 
+                UInt/*X86Condcode*/ cond, 
+                UInt cc_op, 
+                UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 
+             );
+
+extern UInt  x86g_calculate_FXAM ( UInt tag, ULong dbl );
+
+extern ULong x86g_calculate_RCR ( 
+                UInt arg, UInt rot_amt, UInt eflags_in, UInt sz 
+             );
+extern ULong x86g_calculate_RCL ( 
+                UInt arg, UInt rot_amt, UInt eflags_in, UInt sz 
+             );
+
+extern UInt x86g_calculate_daa_das_aaa_aas ( UInt AX_and_flags, UInt opcode );
+
+extern UInt x86g_calculate_aad_aam ( UInt AX_and_flags, UInt opcode );
+
+extern ULong x86g_check_fldcw ( UInt fpucw );
+
+extern UInt  x86g_create_fpucw ( UInt fpround );
+
+extern ULong x86g_check_ldmxcsr ( UInt mxcsr );
+
+extern UInt  x86g_create_mxcsr ( UInt sseround );
+
+
+/* Translate a guest virtual_addr into a guest linear address by
+   consulting the supplied LDT/GDT structures.  Their representation
+   must be as specified in pub/libvex_guest_x86.h.  To indicate a
+   translation failure, 1<<32 is returned.  On success, the lower 32
+   bits of the returned result indicate the linear address.  
+*/
+extern 
+ULong x86g_use_seg_selector ( HWord ldt, HWord gdt, 
+                              UInt seg_selector, UInt virtual_addr );
+
+extern ULong x86g_calculate_mmx_pmaddwd  ( ULong, ULong );
+extern ULong x86g_calculate_mmx_psadbw   ( ULong, ULong );
+
+
+/* --- DIRTY HELPERS --- */
+
+extern ULong x86g_dirtyhelper_loadF80le  ( Addr );
+
+extern void  x86g_dirtyhelper_storeF80le ( Addr, ULong );
+
+extern void  x86g_dirtyhelper_CPUID_sse0 ( VexGuestX86State* );
+extern void  x86g_dirtyhelper_CPUID_mmxext ( VexGuestX86State* );
+extern void  x86g_dirtyhelper_CPUID_sse1 ( VexGuestX86State* );
+extern void  x86g_dirtyhelper_CPUID_sse2 ( VexGuestX86State* );
+
+extern void  x86g_dirtyhelper_FINIT ( VexGuestX86State* );
+
+extern void  x86g_dirtyhelper_FXSAVE ( VexGuestX86State*, HWord );
+extern void  x86g_dirtyhelper_FSAVE  ( VexGuestX86State*, HWord );
+extern void  x86g_dirtyhelper_FSTENV ( VexGuestX86State*, HWord );
+
+extern ULong x86g_dirtyhelper_RDTSC ( void );
+
+extern UInt x86g_dirtyhelper_IN  ( UInt portno, UInt sz/*1,2 or 4*/ );
+extern void x86g_dirtyhelper_OUT ( UInt portno, UInt data, 
+                                   UInt sz/*1,2 or 4*/ );
+
+extern void x86g_dirtyhelper_SxDT ( void* address,
+                                    UInt op /* 0 or 1 */ );
+
+extern VexEmNote
+            x86g_dirtyhelper_FXRSTOR ( VexGuestX86State*, HWord );
+
+extern VexEmNote
+            x86g_dirtyhelper_FRSTOR ( VexGuestX86State*, HWord );
+
+extern VexEmNote 
+            x86g_dirtyhelper_FLDENV ( VexGuestX86State*, HWord );
+
+
+/*---------------------------------------------------------*/
+/*--- Condition code stuff                              ---*/
+/*---------------------------------------------------------*/
+
+/* eflags masks */
+#define X86G_CC_SHIFT_O   11
+#define X86G_CC_SHIFT_S   7
+#define X86G_CC_SHIFT_Z   6
+#define X86G_CC_SHIFT_A   4
+#define X86G_CC_SHIFT_C   0
+#define X86G_CC_SHIFT_P   2
+
+#define X86G_CC_MASK_O    (1 << X86G_CC_SHIFT_O)
+#define X86G_CC_MASK_S    (1 << X86G_CC_SHIFT_S)
+#define X86G_CC_MASK_Z    (1 << X86G_CC_SHIFT_Z)
+#define X86G_CC_MASK_A    (1 << X86G_CC_SHIFT_A)
+#define X86G_CC_MASK_C    (1 << X86G_CC_SHIFT_C)
+#define X86G_CC_MASK_P    (1 << X86G_CC_SHIFT_P)
+
+/* FPU flag masks */
+#define X86G_FC_SHIFT_C3   14
+#define X86G_FC_SHIFT_C2   10
+#define X86G_FC_SHIFT_C1   9
+#define X86G_FC_SHIFT_C0   8
+
+#define X86G_FC_MASK_C3    (1 << X86G_FC_SHIFT_C3)
+#define X86G_FC_MASK_C2    (1 << X86G_FC_SHIFT_C2)
+#define X86G_FC_MASK_C1    (1 << X86G_FC_SHIFT_C1)
+#define X86G_FC_MASK_C0    (1 << X86G_FC_SHIFT_C0)
+
+
+/* %EFLAGS thunk descriptors.  A four-word thunk is used to record
+   details of the most recent flag-setting operation, so the flags can
+   be computed later if needed.  It is possible to do this a little
+   more efficiently using a 3-word thunk, but that makes it impossible
+   to describe the flag data dependencies sufficiently accurately for
+   Memcheck.  Hence 4 words are used, with minimal loss of efficiency.
+
+   The four words are:
+
+      CC_OP, which describes the operation.
+
+      CC_DEP1 and CC_DEP2.  These are arguments to the operation.
+         We want Memcheck to believe that the resulting flags are
+         data-dependent on both CC_DEP1 and CC_DEP2, hence the 
+         name DEP.
+
+      CC_NDEP.  This is a 3rd argument to the operation which is
+         sometimes needed.  We arrange things so that Memcheck does
+         not believe the resulting flags are data-dependent on CC_NDEP
+         ("not dependent").
+
+   To make Memcheck believe that (the definedness of) the encoded
+   flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
+   requires two things:
+
+   (1) In the guest state layout info (x86guest_layout), CC_OP and
+       CC_NDEP are marked as always defined.
+
+   (2) When passing the thunk components to an evaluation function
+       (calculate_condition, calculate_eflags, calculate_eflags_c) the
+       IRCallee's mcx_mask must be set so as to exclude from
+       consideration all passed args except CC_DEP1 and CC_DEP2.
+
+   Strictly speaking only (2) is necessary for correctness.  However,
+   (1) helps efficiency in that since (2) means we never ask about the
+   definedness of CC_OP or CC_NDEP, we may as well not even bother to
+   track their definedness.
+
+   When building the thunk, it is always necessary to write words into
+   CC_DEP1 and CC_DEP2, even if those args are not used given the
+   CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
+   This is important because otherwise Memcheck could give false
+   positives as it does not understand the relationship between the
+   CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the 
+   definedness of the stored flags always depends on both CC_DEP1 and
+   CC_DEP2.
+
+   However, it is only necessary to set CC_NDEP when the CC_OP value
+   requires it, because Memcheck ignores CC_NDEP, and the evaluation
+   functions do understand the CC_OP fields and will only examine
+   CC_NDEP for suitable values of CC_OP.
+
+   A summary of the field usages is:
+
+   Operation          DEP1               DEP2               NDEP
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+   add/sub/mul        first arg          second arg         unused
+
+   adc/sbb            first arg          (second arg)
+                                         XOR old_carry      old_carry
+
+   and/or/xor         result             zero               unused
+
+   inc/dec            result             zero               old_carry
+
+   shl/shr/sar        result             subshifted-        unused
+                                         result
+
+   rol/ror            result             zero               old_flags
+
+   copy               old_flags          zero               unused.
+
+
+   Therefore Memcheck will believe the following:
+
+   * add/sub/mul -- definedness of result flags depends on definedness
+     of both args.
+
+   * adc/sbb -- definedness of result flags depends on definedness of
+     both args and definedness of the old C flag.  Because only two
+     DEP fields are available, the old C flag is XOR'd into the second
+     arg so that Memcheck sees the data dependency on it.  That means
+     the NDEP field must contain a second copy of the old C flag
+     so that the evaluation functions can correctly recover the second
+     arg.
+
+   * and/or/xor are straightforward -- definedness of result flags
+     depends on definedness of result value.
+
+   * inc/dec -- definedness of result flags depends only on
+     definedness of result.  This isn't really true -- it also depends
+     on the old C flag.  However, we don't want Memcheck to see that,
+     and so the old C flag must be passed in NDEP and not in DEP2.
+     It's inconceivable that a compiler would generate code that puts
+     the C flag in an undefined state, then does an inc/dec, which
+     leaves C unchanged, and then makes a conditional jump/move based
+     on C.  So our fiction seems a good approximation.
+
+   * shl/shr/sar -- straightforward, again, definedness of result
+     flags depends on definedness of result value.  The subshifted
+     value (value shifted one less) is also needed, but its
+     definedness is the same as the definedness of the shifted value.
+
+   * rol/ror -- these only set O and C, and leave A Z C P alone.
+     However it seems prudent (as per inc/dec) to say the definedness
+     of all resulting flags depends on the definedness of the result,
+     hence the old flags must go in as NDEP and not DEP2.
+
+   * rcl/rcr are too difficult to do in-line, and so are done by a
+     helper function.  They are not part of this scheme.  The helper
+     function takes the value to be rotated, the rotate amount and the
+     old flags, and returns the new flags and the rotated value.
+     Since the helper's mcx_mask does not have any set bits, Memcheck
+     will lazily propagate undefinedness from any of the 3 args into 
+     both results (flags and actual value).
+*/
+enum {
+    X86G_CC_OP_COPY=0,  /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
+                        /* just copy DEP1 to output */
+
+    X86G_CC_OP_ADDB,    /* 1 */
+    X86G_CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_ADDL,    /* 3 */
+
+    X86G_CC_OP_SUBB,    /* 4 */
+    X86G_CC_OP_SUBW,    /* 5 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_SUBL,    /* 6 */
+
+    X86G_CC_OP_ADCB,    /* 7 */
+    X86G_CC_OP_ADCW,    /* 8 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
+    X86G_CC_OP_ADCL,    /* 9 */
+
+    X86G_CC_OP_SBBB,    /* 10 */
+    X86G_CC_OP_SBBW,    /* 11 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
+    X86G_CC_OP_SBBL,    /* 12 */
+
+    X86G_CC_OP_LOGICB,  /* 13 */
+    X86G_CC_OP_LOGICW,  /* 14 DEP1 = result, DEP2 = 0, NDEP = unused */
+    X86G_CC_OP_LOGICL,  /* 15 */
+
+    X86G_CC_OP_INCB,    /* 16 */
+    X86G_CC_OP_INCW,    /* 17 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
+    X86G_CC_OP_INCL,    /* 18 */
+
+    X86G_CC_OP_DECB,    /* 19 */
+    X86G_CC_OP_DECW,    /* 20 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
+    X86G_CC_OP_DECL,    /* 21 */
+
+    X86G_CC_OP_SHLB,    /* 22 DEP1 = res, DEP2 = res', NDEP = unused */
+    X86G_CC_OP_SHLW,    /* 23 where res' is like res but shifted one bit less */
+    X86G_CC_OP_SHLL,    /* 24 */
+
+    X86G_CC_OP_SHRB,    /* 25 DEP1 = res, DEP2 = res', NDEP = unused */
+    X86G_CC_OP_SHRW,    /* 26 where res' is like res but shifted one bit less */
+    X86G_CC_OP_SHRL,    /* 27 */
+
+    X86G_CC_OP_ROLB,    /* 28 */
+    X86G_CC_OP_ROLW,    /* 29 DEP1 = res, DEP2 = 0, NDEP = old flags */
+    X86G_CC_OP_ROLL,    /* 30 */
+
+    X86G_CC_OP_RORB,    /* 31 */
+    X86G_CC_OP_RORW,    /* 32 DEP1 = res, DEP2 = 0, NDEP = old flags */
+    X86G_CC_OP_RORL,    /* 33 */
+
+    X86G_CC_OP_UMULB,   /* 34 */
+    X86G_CC_OP_UMULW,   /* 35 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_UMULL,   /* 36 */
+
+    X86G_CC_OP_SMULB,   /* 37 */
+    X86G_CC_OP_SMULW,   /* 38 DEP1 = argL, DEP2 = argR, NDEP = unused */
+    X86G_CC_OP_SMULL,   /* 39 */
+
+    X86G_CC_OP_NUMBER
+};
+
+typedef
+   enum {
+      X86CondO      = 0,  /* overflow           */
+      X86CondNO     = 1,  /* no overflow        */
+
+      X86CondB      = 2,  /* below              */
+      X86CondNB     = 3,  /* not below          */
+
+      X86CondZ      = 4,  /* zero               */
+      X86CondNZ     = 5,  /* not zero           */
+
+      X86CondBE     = 6,  /* below or equal     */
+      X86CondNBE    = 7,  /* not below or equal */
+
+      X86CondS      = 8,  /* negative           */
+      X86CondNS     = 9,  /* not negative       */
+
+      X86CondP      = 10, /* parity even        */
+      X86CondNP     = 11, /* not parity even    */
+
+      X86CondL      = 12, /* jump less          */
+      X86CondNL     = 13, /* not less           */
+
+      X86CondLE     = 14, /* less or equal      */
+      X86CondNLE    = 15, /* not less or equal  */
+
+      X86CondAlways = 16  /* HACK */
+   }
+   X86Condcode;
+
+#endif /* ndef __VEX_GUEST_X86_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                    guest_x86_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_x86_helpers.c b/VEX/priv/guest_x86_helpers.c
new file mode 100644
index 0000000..b4875ee
--- /dev/null
+++ b/VEX/priv/guest_x86_helpers.c
@@ -0,0 +1,2876 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               guest_x86_helpers.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_x86.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_x86_defs.h"
+#include "guest_generic_x87.h"
+
+
+/* This file contains helper functions for x86 guest code.
+   Calls to these functions are generated by the back end.
+   These calls are of course in the host machine code and 
+   this file will be compiled to host machine code, so that
+   all makes sense.  
+
+   Only change the signatures of these helper functions very
+   carefully.  If you change the signature here, you'll have to change
+   the parameters passed to it in the IR calls constructed by
+   guest-x86/toIR.c.
+
+   The convention used is that all functions called from generated
+   code are named x86g_<something>, and any function whose name lacks
+   that prefix is not called from generated code.  Note that some
+   LibVEX_* functions can however be called by VEX's client, but that
+   is not the same as calling them from VEX-generated code.
+*/
+
+
+/* Set to 1 to get detailed profiling info about use of the flag
+   machinery. */
+#define PROFILE_EFLAGS 0
+
+
+/*---------------------------------------------------------------*/
+/*--- %eflags run-time helpers.                               ---*/
+/*---------------------------------------------------------------*/
+
+static const UChar parity_table[256] = {
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0,
+    0, X86G_CC_MASK_P, X86G_CC_MASK_P, 0, X86G_CC_MASK_P, 0, 0, X86G_CC_MASK_P,
+};
+
+/* generalised left-shifter */
+inline static Int lshift ( Int x, Int n )
+{
+   if (n >= 0)
+      return (UInt)x << n;
+   else
+      return x >> (-n);
+}
+
+/* identity on ULong */
+static inline ULong idULong ( ULong x )
+{
+   return x;
+}
+
+
+#define PREAMBLE(__data_bits)					\
+   /* const */ UInt DATA_MASK 					\
+      = __data_bits==8 ? 0xFF 					\
+                       : (__data_bits==16 ? 0xFFFF 		\
+                                          : 0xFFFFFFFF); 	\
+   /* const */ UInt SIGN_MASK = 1u << (__data_bits - 1);	\
+   /* const */ UInt CC_DEP1 = cc_dep1_formal;			\
+   /* const */ UInt CC_DEP2 = cc_dep2_formal;			\
+   /* const */ UInt CC_NDEP = cc_ndep_formal;			\
+   /* Four bogus assignments, which hopefully gcc can     */	\
+   /* optimise away, and which stop it complaining about  */	\
+   /* unused variables.                                   */	\
+   SIGN_MASK = SIGN_MASK;					\
+   DATA_MASK = DATA_MASK;					\
+   CC_DEP2 = CC_DEP2;						\
+   CC_NDEP = CC_NDEP;
+
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_ADD(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     UInt argL, argR, res;					\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2;						\
+     res  = argL + argR;					\
+     cf = (DATA_UTYPE)res < (DATA_UTYPE)argL;			\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR ^ -1) & (argL ^ res), 		\
+                 12 - DATA_BITS) & X86G_CC_MASK_O;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SUB(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     UInt argL, argR, res;					\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2;						\
+     res  = argL - argR;					\
+     cf = (DATA_UTYPE)argL < (DATA_UTYPE)argR;			\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR) & (argL ^ res),	 		\
+                 12 - DATA_BITS) & X86G_CC_MASK_O; 		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_ADC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     UInt argL, argR, oldC, res;		       		\
+     oldC = CC_NDEP & X86G_CC_MASK_C;				\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2 ^ oldC;	       				\
+     res  = (argL + argR) + oldC;				\
+     if (oldC)							\
+        cf = (DATA_UTYPE)res <= (DATA_UTYPE)argL;		\
+     else							\
+        cf = (DATA_UTYPE)res < (DATA_UTYPE)argL;		\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR ^ -1) & (argL ^ res), 		\
+                  12 - DATA_BITS) & X86G_CC_MASK_O;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SBB(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     UInt argL, argR, oldC, res;		       		\
+     oldC = CC_NDEP & X86G_CC_MASK_C;				\
+     argL = CC_DEP1;						\
+     argR = CC_DEP2 ^ oldC;	       				\
+     res  = (argL - argR) - oldC;				\
+     if (oldC)							\
+        cf = (DATA_UTYPE)argL <= (DATA_UTYPE)argR;		\
+     else							\
+        cf = (DATA_UTYPE)argL < (DATA_UTYPE)argR;		\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = lshift((argL ^ argR) & (argL ^ res), 			\
+                 12 - DATA_BITS) & X86G_CC_MASK_O;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_LOGIC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     cf = 0;							\
+     pf = parity_table[(UChar)CC_DEP1];				\
+     af = 0;							\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     of = 0;							\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_INC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     UInt argL, argR, res;					\
+     res  = CC_DEP1;						\
+     argL = res - 1;						\
+     argR = 1;							\
+     cf = CC_NDEP & X86G_CC_MASK_C;				\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = ((res & DATA_MASK) == SIGN_MASK) << 11;		\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_DEC(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     UInt argL, argR, res;					\
+     res  = CC_DEP1;						\
+     argL = res + 1;						\
+     argR = 1;							\
+     cf = CC_NDEP & X86G_CC_MASK_C;				\
+     pf = parity_table[(UChar)res];				\
+     af = (res ^ argL ^ argR) & 0x10;				\
+     zf = ((DATA_UTYPE)res == 0) << 6;				\
+     sf = lshift(res, 8 - DATA_BITS) & 0x80;			\
+     of = ((res & DATA_MASK) 					\
+          == ((UInt)SIGN_MASK - 1)) << 11;			\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SHL(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt cf, pf, af, zf, sf, of;				\
+     cf = (CC_DEP2 >> (DATA_BITS - 1)) & X86G_CC_MASK_C;	\
+     pf = parity_table[(UChar)CC_DEP1];				\
+     af = 0; /* undefined */					\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     /* of is defined if shift count == 1 */			\
+     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS) 		\
+          & X86G_CC_MASK_O;					\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SHR(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);  					\
+   { UInt cf, pf, af, zf, sf, of;				\
+     cf = CC_DEP2 & 1;						\
+     pf = parity_table[(UChar)CC_DEP1];				\
+     af = 0; /* undefined */					\
+     zf = ((DATA_UTYPE)CC_DEP1 == 0) << 6;			\
+     sf = lshift(CC_DEP1, 8 - DATA_BITS) & 0x80;		\
+     /* of is defined if shift count == 1 */			\
+     of = lshift(CC_DEP2 ^ CC_DEP1, 12 - DATA_BITS)		\
+          & X86G_CC_MASK_O;					\
+     return cf | pf | af | zf | sf | of;			\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+/* ROL: cf' = lsb(result).  of' = msb(result) ^ lsb(result). */
+/* DEP1 = result, NDEP = old flags */
+#define ACTIONS_ROL(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt fl 							\
+        = (CC_NDEP & ~(X86G_CC_MASK_O | X86G_CC_MASK_C))	\
+          | (X86G_CC_MASK_C & CC_DEP1)				\
+          | (X86G_CC_MASK_O & (lshift(CC_DEP1,  		\
+                                      11-(DATA_BITS-1)) 	\
+                     ^ lshift(CC_DEP1, 11)));			\
+     return fl;							\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+/* ROR: cf' = msb(result).  of' = msb(result) ^ msb-1(result). */
+/* DEP1 = result, NDEP = old flags */
+#define ACTIONS_ROR(DATA_BITS,DATA_UTYPE)			\
+{								\
+   PREAMBLE(DATA_BITS);						\
+   { UInt fl 							\
+        = (CC_NDEP & ~(X86G_CC_MASK_O | X86G_CC_MASK_C))	\
+          | (X86G_CC_MASK_C & (CC_DEP1 >> (DATA_BITS-1)))	\
+          | (X86G_CC_MASK_O & (lshift(CC_DEP1, 			\
+                                      11-(DATA_BITS-1)) 	\
+                     ^ lshift(CC_DEP1, 11-(DATA_BITS-1)+1)));	\
+     return fl;							\
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_UMUL(DATA_BITS, DATA_UTYPE,  NARROWtoU,         \
+                                DATA_U2TYPE, NARROWto2U)        \
+{                                                               \
+   PREAMBLE(DATA_BITS);                                         \
+   { UInt cf, pf, af, zf, sf, of;                               \
+     DATA_UTYPE  hi;                                            \
+     DATA_UTYPE  lo                                             \
+        = NARROWtoU( ((DATA_UTYPE)CC_DEP1)                      \
+                     * ((DATA_UTYPE)CC_DEP2) );                 \
+     DATA_U2TYPE rr                                             \
+        = NARROWto2U(                                           \
+             ((DATA_U2TYPE)((DATA_UTYPE)CC_DEP1))               \
+             * ((DATA_U2TYPE)((DATA_UTYPE)CC_DEP2)) );          \
+     hi = NARROWtoU(rr >>/*u*/ DATA_BITS);                      \
+     cf = (hi != 0);                                            \
+     pf = parity_table[(UChar)lo];                              \
+     af = 0; /* undefined */                                    \
+     zf = (lo == 0) << 6;                                       \
+     sf = lshift(lo, 8 - DATA_BITS) & 0x80;                     \
+     of = cf << 11;                                             \
+     return cf | pf | af | zf | sf | of;                        \
+   }								\
+}
+
+/*-------------------------------------------------------------*/
+
+#define ACTIONS_SMUL(DATA_BITS, DATA_STYPE,  NARROWtoS,         \
+                                DATA_S2TYPE, NARROWto2S)        \
+{                                                               \
+   PREAMBLE(DATA_BITS);                                         \
+   { UInt cf, pf, af, zf, sf, of;                               \
+     DATA_STYPE  hi;                                            \
+     DATA_STYPE  lo                                             \
+        = NARROWtoS( ((DATA_S2TYPE)(DATA_STYPE)CC_DEP1)         \
+                     * ((DATA_S2TYPE)(DATA_STYPE)CC_DEP2) );    \
+     DATA_S2TYPE rr                                             \
+        = NARROWto2S(                                           \
+             ((DATA_S2TYPE)((DATA_STYPE)CC_DEP1))               \
+             * ((DATA_S2TYPE)((DATA_STYPE)CC_DEP2)) );          \
+     hi = NARROWtoS(rr >>/*s*/ DATA_BITS);                      \
+     cf = (hi != (lo >>/*s*/ (DATA_BITS-1)));                   \
+     pf = parity_table[(UChar)lo];                              \
+     af = 0; /* undefined */                                    \
+     zf = (lo == 0) << 6;                                       \
+     sf = lshift(lo, 8 - DATA_BITS) & 0x80;                     \
+     of = cf << 11;                                             \
+     return cf | pf | af | zf | sf | of;                        \
+   }								\
+}
+
+
+#if PROFILE_EFLAGS
+
+static Bool initted     = False;
+
+/* C flag, fast route */
+static UInt tabc_fast[X86G_CC_OP_NUMBER];
+/* C flag, slow route */
+static UInt tabc_slow[X86G_CC_OP_NUMBER];
+/* table for calculate_cond */
+static UInt tab_cond[X86G_CC_OP_NUMBER][16];
+/* total entry counts for calc_all, calc_c, calc_cond. */
+static UInt n_calc_all  = 0;
+static UInt n_calc_c    = 0;
+static UInt n_calc_cond = 0;
+
+#define SHOW_COUNTS_NOW (0 == (0x3FFFFF & (n_calc_all+n_calc_c+n_calc_cond)))
+
+
+static void showCounts ( void )
+{
+   Int op, co;
+   HChar ch;
+   vex_printf("\nTotal calls: calc_all=%u   calc_cond=%u   calc_c=%u\n",
+              n_calc_all, n_calc_cond, n_calc_c);
+
+   vex_printf("      cSLOW  cFAST    O   NO    B   NB    Z   NZ   BE  NBE"
+              "    S   NS    P   NP    L   NL   LE  NLE\n");
+   vex_printf("     -----------------------------------------------------"
+              "----------------------------------------\n");
+   for (op = 0; op < X86G_CC_OP_NUMBER; op++) {
+
+      ch = ' ';
+      if (op > 0 && (op-1) % 3 == 0) 
+         ch = 'B';
+      if (op > 0 && (op-1) % 3 == 1) 
+         ch = 'W';
+      if (op > 0 && (op-1) % 3 == 2) 
+         ch = 'L';
+
+      vex_printf("%2d%c: ", op, ch);
+      vex_printf("%6u ", tabc_slow[op]);
+      vex_printf("%6u ", tabc_fast[op]);
+      for (co = 0; co < 16; co++) {
+         Int n = tab_cond[op][co];
+         if (n >= 1000) {
+            vex_printf(" %3dK", n / 1000);
+         } else 
+         if (n >= 0) {
+            vex_printf(" %3d ", n );
+         } else {
+            vex_printf("     ");
+         }
+      }
+      vex_printf("\n");
+   }
+   vex_printf("\n");
+}
+
+static void initCounts ( void )
+{
+   Int op, co;
+   initted = True;
+   for (op = 0; op < X86G_CC_OP_NUMBER; op++) {
+      tabc_fast[op] = tabc_slow[op] = 0;
+      for (co = 0; co < 16; co++)
+         tab_cond[op][co] = 0;
+   }
+}
+
+#endif /* PROFILE_EFLAGS */
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate all the 6 flags from the supplied thunk parameters.
+   Worker function, not directly called from generated code. */
+static
+UInt x86g_calculate_eflags_all_WRK ( UInt cc_op, 
+                                     UInt cc_dep1_formal, 
+                                     UInt cc_dep2_formal,
+                                     UInt cc_ndep_formal )
+{
+   switch (cc_op) {
+      case X86G_CC_OP_COPY:
+         return cc_dep1_formal
+                & (X86G_CC_MASK_O | X86G_CC_MASK_S | X86G_CC_MASK_Z 
+                   | X86G_CC_MASK_A | X86G_CC_MASK_C | X86G_CC_MASK_P);
+
+      case X86G_CC_OP_ADDB:   ACTIONS_ADD( 8,  UChar  );
+      case X86G_CC_OP_ADDW:   ACTIONS_ADD( 16, UShort );
+      case X86G_CC_OP_ADDL:   ACTIONS_ADD( 32, UInt   );
+
+      case X86G_CC_OP_ADCB:   ACTIONS_ADC( 8,  UChar  );
+      case X86G_CC_OP_ADCW:   ACTIONS_ADC( 16, UShort );
+      case X86G_CC_OP_ADCL:   ACTIONS_ADC( 32, UInt   );
+
+      case X86G_CC_OP_SUBB:   ACTIONS_SUB(  8, UChar  );
+      case X86G_CC_OP_SUBW:   ACTIONS_SUB( 16, UShort );
+      case X86G_CC_OP_SUBL:   ACTIONS_SUB( 32, UInt   );
+
+      case X86G_CC_OP_SBBB:   ACTIONS_SBB(  8, UChar  );
+      case X86G_CC_OP_SBBW:   ACTIONS_SBB( 16, UShort );
+      case X86G_CC_OP_SBBL:   ACTIONS_SBB( 32, UInt   );
+
+      case X86G_CC_OP_LOGICB: ACTIONS_LOGIC(  8, UChar  );
+      case X86G_CC_OP_LOGICW: ACTIONS_LOGIC( 16, UShort );
+      case X86G_CC_OP_LOGICL: ACTIONS_LOGIC( 32, UInt   );
+
+      case X86G_CC_OP_INCB:   ACTIONS_INC(  8, UChar  );
+      case X86G_CC_OP_INCW:   ACTIONS_INC( 16, UShort );
+      case X86G_CC_OP_INCL:   ACTIONS_INC( 32, UInt   );
+
+      case X86G_CC_OP_DECB:   ACTIONS_DEC(  8, UChar  );
+      case X86G_CC_OP_DECW:   ACTIONS_DEC( 16, UShort );
+      case X86G_CC_OP_DECL:   ACTIONS_DEC( 32, UInt   );
+
+      case X86G_CC_OP_SHLB:   ACTIONS_SHL(  8, UChar  );
+      case X86G_CC_OP_SHLW:   ACTIONS_SHL( 16, UShort );
+      case X86G_CC_OP_SHLL:   ACTIONS_SHL( 32, UInt   );
+
+      case X86G_CC_OP_SHRB:   ACTIONS_SHR(  8, UChar  );
+      case X86G_CC_OP_SHRW:   ACTIONS_SHR( 16, UShort );
+      case X86G_CC_OP_SHRL:   ACTIONS_SHR( 32, UInt   );
+
+      case X86G_CC_OP_ROLB:   ACTIONS_ROL(  8, UChar  );
+      case X86G_CC_OP_ROLW:   ACTIONS_ROL( 16, UShort );
+      case X86G_CC_OP_ROLL:   ACTIONS_ROL( 32, UInt   );
+
+      case X86G_CC_OP_RORB:   ACTIONS_ROR(  8, UChar  );
+      case X86G_CC_OP_RORW:   ACTIONS_ROR( 16, UShort );
+      case X86G_CC_OP_RORL:   ACTIONS_ROR( 32, UInt   );
+
+      case X86G_CC_OP_UMULB:  ACTIONS_UMUL(  8, UChar,  toUChar,
+                                                UShort, toUShort );
+      case X86G_CC_OP_UMULW:  ACTIONS_UMUL( 16, UShort, toUShort,
+                                                UInt,   toUInt );
+      case X86G_CC_OP_UMULL:  ACTIONS_UMUL( 32, UInt,   toUInt,
+                                                ULong,  idULong );
+
+      case X86G_CC_OP_SMULB:  ACTIONS_SMUL(  8, Char,   toUChar,
+                                                Short,  toUShort );
+      case X86G_CC_OP_SMULW:  ACTIONS_SMUL( 16, Short,  toUShort, 
+                                                Int,    toUInt   );
+      case X86G_CC_OP_SMULL:  ACTIONS_SMUL( 32, Int,    toUInt,
+                                                Long,   idULong );
+
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("x86g_calculate_eflags_all_WRK(X86)"
+                    "( %u, 0x%x, 0x%x, 0x%x )\n",
+                    cc_op, cc_dep1_formal, cc_dep2_formal, cc_ndep_formal );
+         vpanic("x86g_calculate_eflags_all_WRK(X86)");
+   }
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate all the 6 flags from the supplied thunk parameters. */
+UInt x86g_calculate_eflags_all ( UInt cc_op, 
+                                 UInt cc_dep1, 
+                                 UInt cc_dep2,
+                                 UInt cc_ndep )
+{
+#  if PROFILE_EFLAGS
+   if (!initted) initCounts();
+   n_calc_all++;
+   if (SHOW_COUNTS_NOW) showCounts();
+#  endif
+   return
+      x86g_calculate_eflags_all_WRK ( cc_op, cc_dep1, cc_dep2, cc_ndep );
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate just the carry flag from the supplied thunk parameters. */
+VEX_REGPARM(3)
+UInt x86g_calculate_eflags_c ( UInt cc_op, 
+                               UInt cc_dep1, 
+                               UInt cc_dep2,
+                               UInt cc_ndep )
+{
+#  if PROFILE_EFLAGS
+   if (!initted) initCounts();
+   n_calc_c++;
+   tabc_fast[cc_op]++;
+   if (SHOW_COUNTS_NOW) showCounts();
+#  endif
+
+   /* Fast-case some common ones. */
+   switch (cc_op) {
+      case X86G_CC_OP_LOGICL: 
+      case X86G_CC_OP_LOGICW: 
+      case X86G_CC_OP_LOGICB:
+         return 0;
+      case X86G_CC_OP_SUBL:
+         return ((UInt)cc_dep1) < ((UInt)cc_dep2)
+                   ? X86G_CC_MASK_C : 0;
+      case X86G_CC_OP_SUBW:
+         return ((UInt)(cc_dep1 & 0xFFFF)) < ((UInt)(cc_dep2 & 0xFFFF))
+                   ? X86G_CC_MASK_C : 0;
+      case X86G_CC_OP_SUBB:
+         return ((UInt)(cc_dep1 & 0xFF)) < ((UInt)(cc_dep2 & 0xFF))
+                   ? X86G_CC_MASK_C : 0;
+      case X86G_CC_OP_INCL:
+      case X86G_CC_OP_DECL:
+         return cc_ndep & X86G_CC_MASK_C;
+      default: 
+         break;
+   }
+
+#  if PROFILE_EFLAGS
+   tabc_fast[cc_op]--;
+   tabc_slow[cc_op]++;
+#  endif
+
+   return x86g_calculate_eflags_all_WRK(cc_op,cc_dep1,cc_dep2,cc_ndep) 
+          & X86G_CC_MASK_C;
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* returns 1 or 0 */
+UInt x86g_calculate_condition ( UInt/*X86Condcode*/ cond, 
+                                UInt cc_op, 
+                                UInt cc_dep1, 
+                                UInt cc_dep2,
+                                UInt cc_ndep )
+{
+   UInt eflags = x86g_calculate_eflags_all_WRK(cc_op, cc_dep1, 
+                                               cc_dep2, cc_ndep);
+   UInt of,sf,zf,cf,pf;
+   UInt inv = cond & 1;
+
+#  if PROFILE_EFLAGS
+   if (!initted) initCounts();
+   tab_cond[cc_op][cond]++;
+   n_calc_cond++;
+   if (SHOW_COUNTS_NOW) showCounts();
+#  endif
+
+   switch (cond) {
+      case X86CondNO:
+      case X86CondO: /* OF == 1 */
+         of = eflags >> X86G_CC_SHIFT_O;
+         return 1 & (inv ^ of);
+
+      case X86CondNZ:
+      case X86CondZ: /* ZF == 1 */
+         zf = eflags >> X86G_CC_SHIFT_Z;
+         return 1 & (inv ^ zf);
+
+      case X86CondNB:
+      case X86CondB: /* CF == 1 */
+         cf = eflags >> X86G_CC_SHIFT_C;
+         return 1 & (inv ^ cf);
+         break;
+
+      case X86CondNBE:
+      case X86CondBE: /* (CF or ZF) == 1 */
+         cf = eflags >> X86G_CC_SHIFT_C;
+         zf = eflags >> X86G_CC_SHIFT_Z;
+         return 1 & (inv ^ (cf | zf));
+         break;
+
+      case X86CondNS:
+      case X86CondS: /* SF == 1 */
+         sf = eflags >> X86G_CC_SHIFT_S;
+         return 1 & (inv ^ sf);
+
+      case X86CondNP:
+      case X86CondP: /* PF == 1 */
+         pf = eflags >> X86G_CC_SHIFT_P;
+         return 1 & (inv ^ pf);
+
+      case X86CondNL:
+      case X86CondL: /* (SF xor OF) == 1 */
+         sf = eflags >> X86G_CC_SHIFT_S;
+         of = eflags >> X86G_CC_SHIFT_O;
+         return 1 & (inv ^ (sf ^ of));
+         break;
+
+      case X86CondNLE:
+      case X86CondLE: /* ((SF xor OF) or ZF)  == 1 */
+         sf = eflags >> X86G_CC_SHIFT_S;
+         of = eflags >> X86G_CC_SHIFT_O;
+         zf = eflags >> X86G_CC_SHIFT_Z;
+         return 1 & (inv ^ ((sf ^ of) | zf));
+         break;
+
+      default:
+         /* shouldn't really make these calls from generated code */
+         vex_printf("x86g_calculate_condition( %u, %u, 0x%x, 0x%x, 0x%x )\n",
+                    cond, cc_op, cc_dep1, cc_dep2, cc_ndep );
+         vpanic("x86g_calculate_condition");
+   }
+}
+
+
+/* VISIBLE TO LIBVEX CLIENT */
+UInt LibVEX_GuestX86_get_eflags ( /*IN*/const VexGuestX86State* vex_state )
+{
+   UInt eflags = x86g_calculate_eflags_all_WRK(
+                    vex_state->guest_CC_OP,
+                    vex_state->guest_CC_DEP1,
+                    vex_state->guest_CC_DEP2,
+                    vex_state->guest_CC_NDEP
+                 );
+   UInt dflag = vex_state->guest_DFLAG;
+   vassert(dflag == 1 || dflag == 0xFFFFFFFF);
+   if (dflag == 0xFFFFFFFF)
+      eflags |= (1<<10);
+   if (vex_state->guest_IDFLAG == 1)
+      eflags |= (1<<21);
+   if (vex_state->guest_ACFLAG == 1)
+      eflags |= (1<<18);
+					     
+   return eflags;
+}
+
+/* VISIBLE TO LIBVEX CLIENT */
+void
+LibVEX_GuestX86_put_eflag_c ( UInt new_carry_flag,
+                              /*MOD*/VexGuestX86State* vex_state )
+{
+   UInt oszacp = x86g_calculate_eflags_all_WRK(
+                    vex_state->guest_CC_OP,
+                    vex_state->guest_CC_DEP1,
+                    vex_state->guest_CC_DEP2,
+                    vex_state->guest_CC_NDEP
+                 );
+   if (new_carry_flag & 1) {
+      oszacp |= X86G_CC_MASK_C;
+   } else {
+      oszacp &= ~X86G_CC_MASK_C;
+   }
+   vex_state->guest_CC_OP   = X86G_CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = oszacp;
+   vex_state->guest_CC_DEP2 = 0;
+   vex_state->guest_CC_NDEP = 0;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- %eflags translation-time function specialisers.         ---*/
+/*--- These help iropt specialise calls the above run-time    ---*/
+/*--- %eflags functions.                                      ---*/
+/*---------------------------------------------------------------*/
+
+/* Used by the optimiser to try specialisations.  Returns an
+   equivalent expression, or NULL if none. */
+
+static inline Bool isU32 ( IRExpr* e, UInt n )
+{
+   return 
+      toBool( e->tag == Iex_Const
+              && e->Iex.Const.con->tag == Ico_U32
+              && e->Iex.Const.con->Ico.U32 == n );
+}
+
+IRExpr* guest_x86_spechelper ( const HChar* function_name,
+                               IRExpr** args,
+                               IRStmt** precedingStmts,
+                               Int      n_precedingStmts )
+{
+#  define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
+#  define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
+#  define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
+#  define mkU8(_n)  IRExpr_Const(IRConst_U8(_n))
+
+   Int i, arity = 0;
+   for (i = 0; args[i]; i++)
+      arity++;
+#  if 0
+   vex_printf("spec request:\n");
+   vex_printf("   %s  ", function_name);
+   for (i = 0; i < arity; i++) {
+      vex_printf("  ");
+      ppIRExpr(args[i]);
+   }
+   vex_printf("\n");
+#  endif
+
+   /* --------- specialising "x86g_calculate_condition" --------- */
+
+   if (vex_streq(function_name, "x86g_calculate_condition")) {
+      /* specialise calls to above "calculate condition" function */
+      IRExpr *cond, *cc_op, *cc_dep1, *cc_dep2;
+      vassert(arity == 5);
+      cond    = args[0];
+      cc_op   = args[1];
+      cc_dep1 = args[2];
+      cc_dep2 = args[3];
+
+      /*---------------- ADDL ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_ADDL) && isU32(cond, X86CondZ)) {
+         /* long add, then Z --> test (dst+src == 0) */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, 
+                           binop(Iop_Add32, cc_dep1, cc_dep2),
+                           mkU32(0)));
+      }
+
+      /*---------------- SUBL ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondZ)) {
+         /* long sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondNZ)) {
+         /* long sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpNE32, cc_dep1, cc_dep2));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondL)) {
+         /* long sub/cmp, then L (signed less than) 
+            --> test dst <s src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondNL)) {
+         /* long sub/cmp, then NL (signed greater than or equal) 
+            --> test !(dst <s src) */
+         return binop(Iop_Xor32,
+                      unop(Iop_1Uto32,
+                           binop(Iop_CmpLT32S, cc_dep1, cc_dep2)),
+                      mkU32(1));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondLE)) {
+         /* long sub/cmp, then LE (signed less than or equal)
+            --> test dst <=s src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondNLE)) {
+         /* long sub/cmp, then NLE (signed not less than or equal)
+            --> test dst >s src 
+            --> test !(dst <=s src) */
+         return binop(Iop_Xor32,
+                      unop(Iop_1Uto32,
+                           binop(Iop_CmpLE32S, cc_dep1, cc_dep2)),
+                      mkU32(1));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondBE)) {
+         /* long sub/cmp, then BE (unsigned less than or equal)
+            --> test dst <=u src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondNBE)) {
+         /* long sub/cmp, then BE (unsigned greater than)
+            --> test !(dst <=u src) */
+         return binop(Iop_Xor32,
+                      unop(Iop_1Uto32,
+                           binop(Iop_CmpLE32U, cc_dep1, cc_dep2)),
+                      mkU32(1));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondB)) {
+         /* long sub/cmp, then B (unsigned less than)
+            --> test dst <u src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32U, cc_dep1, cc_dep2));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondNB)) {
+         /* long sub/cmp, then NB (unsigned greater than or equal)
+            --> test !(dst <u src) */
+         return binop(Iop_Xor32,
+                      unop(Iop_1Uto32,
+                           binop(Iop_CmpLT32U, cc_dep1, cc_dep2)),
+                      mkU32(1));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondS)) {
+         /* long sub/cmp, then S (negative) --> test (dst-src <s 0) */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32S, 
+                           binop(Iop_Sub32, cc_dep1, cc_dep2),
+                           mkU32(0)));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBL) && isU32(cond, X86CondNS)) {
+         /* long sub/cmp, then NS (not negative) --> test !(dst-src <s 0) */
+         return binop(Iop_Xor32,
+                      unop(Iop_1Uto32,
+                           binop(Iop_CmpLT32S, 
+                                 binop(Iop_Sub32, cc_dep1, cc_dep2),
+                                 mkU32(0))),
+                      mkU32(1));
+      }
+
+      /*---------------- SUBW ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_SUBW) && isU32(cond, X86CondZ)) {
+         /* word sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ16, 
+                           unop(Iop_32to16,cc_dep1), 
+                           unop(Iop_32to16,cc_dep2)));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBW) && isU32(cond, X86CondNZ)) {
+         /* word sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpNE16, 
+                           unop(Iop_32to16,cc_dep1), 
+                           unop(Iop_32to16,cc_dep2)));
+      }
+
+      /*---------------- SUBB ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondZ)) {
+         /* byte sub/cmp, then Z --> test dst==src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ8, 
+                           unop(Iop_32to8,cc_dep1), 
+                           unop(Iop_32to8,cc_dep2)));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondNZ)) {
+         /* byte sub/cmp, then NZ --> test dst!=src */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpNE8, 
+                           unop(Iop_32to8,cc_dep1), 
+                           unop(Iop_32to8,cc_dep2)));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondNBE)) {
+         /* byte sub/cmp, then NBE (unsigned greater than)
+            --> test src <u dst */
+         /* Note, args are opposite way round from the usual */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32U, 
+                           binop(Iop_And32,cc_dep2,mkU32(0xFF)),
+			   binop(Iop_And32,cc_dep1,mkU32(0xFF))));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondS)
+                                        && isU32(cc_dep2, 0)) {
+         /* byte sub/cmp of zero, then S --> test (dst-0 <s 0) 
+                                         --> test dst <s 0
+                                         --> (UInt)dst[7] 
+            This is yet another scheme by which gcc figures out if the
+            top bit of a byte is 1 or 0.  See also LOGICB/CondS below. */
+         /* Note: isU32(cc_dep2, 0) is correct, even though this is
+            for an 8-bit comparison, since the args to the helper
+            function are always U32s. */
+         return binop(Iop_And32,
+                      binop(Iop_Shr32,cc_dep1,mkU8(7)),
+                      mkU32(1));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBB) && isU32(cond, X86CondNS)
+                                        && isU32(cc_dep2, 0)) {
+         /* byte sub/cmp of zero, then NS --> test !(dst-0 <s 0) 
+                                          --> test !(dst <s 0)
+                                          --> (UInt) !dst[7] 
+         */
+         return binop(Iop_Xor32,
+                      binop(Iop_And32,
+                            binop(Iop_Shr32,cc_dep1,mkU8(7)),
+                            mkU32(1)),
+                mkU32(1));
+      }
+
+      /*---------------- LOGICL ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondZ)) {
+         /* long and/or/xor, then Z --> test dst==0 */
+         return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
+      }
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondNZ)) {
+         /* long and/or/xor, then NZ --> test dst!=0 */
+         return unop(Iop_1Uto32,binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondLE)) {
+         /* long and/or/xor, then LE
+            This is pretty subtle.  LOGIC sets SF and ZF according to the
+            result and makes OF be zero.  LE computes (SZ ^ OF) | ZF, but
+            OF is zero, so this reduces to SZ | ZF -- which will be 1 iff
+            the result is <=signed 0.  Hence ...
+         */
+         return unop(Iop_1Uto32,binop(Iop_CmpLE32S, cc_dep1, mkU32(0)));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondBE)) {
+         /* long and/or/xor, then BE
+            LOGIC sets ZF according to the result and makes CF be zero.
+            BE computes (CF | ZF), but CF is zero, so this reduces ZF 
+            -- which will be 1 iff the result is zero.  Hence ...
+         */
+         return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondS)) {
+         /* see comment below for (LOGICB, CondS) */
+         /* long and/or/xor, then S --> (UInt)result[31] */
+         return binop(Iop_And32,
+                      binop(Iop_Shr32,cc_dep1,mkU8(31)),
+                      mkU32(1));
+      }
+      if (isU32(cc_op, X86G_CC_OP_LOGICL) && isU32(cond, X86CondNS)) {
+         /* see comment below for (LOGICB, CondNS) */
+         /* long and/or/xor, then S --> (UInt) ~ result[31] */
+         return binop(Iop_Xor32,
+                binop(Iop_And32,
+                      binop(Iop_Shr32,cc_dep1,mkU8(31)),
+                      mkU32(1)),
+                mkU32(1));
+      }
+
+      /*---------------- LOGICW ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICW) && isU32(cond, X86CondZ)) {
+         /* word and/or/xor, then Z --> test dst==0 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, binop(Iop_And32,cc_dep1,mkU32(0xFFFF)), 
+                                        mkU32(0)));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICW) && isU32(cond, X86CondS)) {
+         /* see comment below for (LOGICB, CondS) */
+         /* word and/or/xor, then S --> (UInt)result[15] */
+         return binop(Iop_And32,
+                      binop(Iop_Shr32,cc_dep1,mkU8(15)),
+                      mkU32(1));
+      }
+
+      /*---------------- LOGICB ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICB) && isU32(cond, X86CondZ)) {
+         /* byte and/or/xor, then Z --> test dst==0 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, binop(Iop_And32,cc_dep1,mkU32(255)), 
+                                        mkU32(0)));
+      }
+      if (isU32(cc_op, X86G_CC_OP_LOGICB) && isU32(cond, X86CondNZ)) {
+         /* byte and/or/xor, then Z --> test dst!=0 */
+         /* b9ac9:       84 c0                   test   %al,%al
+            b9acb:       75 0d                   jne    b9ada */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpNE32, binop(Iop_And32,cc_dep1,mkU32(255)), 
+                                        mkU32(0)));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_LOGICB) && isU32(cond, X86CondS)) {
+         /* this is an idiom gcc sometimes uses to find out if the top
+            bit of a byte register is set: eg testb %al,%al; js ..
+            Since it just depends on the top bit of the byte, extract
+            that bit and explicitly get rid of all the rest.  This
+            helps memcheck avoid false positives in the case where any
+            of the other bits in the byte are undefined. */
+         /* byte and/or/xor, then S --> (UInt)result[7] */
+         return binop(Iop_And32,
+                      binop(Iop_Shr32,cc_dep1,mkU8(7)),
+                      mkU32(1));
+      }
+      if (isU32(cc_op, X86G_CC_OP_LOGICB) && isU32(cond, X86CondNS)) {
+         /* ditto, for negation-of-S. */
+         /* byte and/or/xor, then S --> (UInt) ~ result[7] */
+         return binop(Iop_Xor32,
+                binop(Iop_And32,
+                      binop(Iop_Shr32,cc_dep1,mkU8(7)),
+                      mkU32(1)),
+                mkU32(1));
+      }
+
+      /*---------------- DECL ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_DECL) && isU32(cond, X86CondZ)) {
+         /* dec L, then Z --> test dst == 0 */
+         return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_DECL) && isU32(cond, X86CondS)) {
+         /* dec L, then S --> compare DST <s 0 */
+         return unop(Iop_1Uto32,binop(Iop_CmpLT32S, cc_dep1, mkU32(0)));
+      }
+
+      /*---------------- DECW ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_DECW) && isU32(cond, X86CondZ)) {
+         /* dec W, then Z --> test dst == 0 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, 
+                           binop(Iop_Shl32,cc_dep1,mkU8(16)), 
+                           mkU32(0)));
+      }
+
+      /*---------------- INCW ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_INCW) && isU32(cond, X86CondZ)) {
+         /* This rewrite helps memcheck on 'incw %ax ; je ...'. */
+         /* inc W, then Z --> test dst == 0 */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpEQ32, 
+                           binop(Iop_Shl32,cc_dep1,mkU8(16)),
+                           mkU32(0)));
+      }
+
+      /*---------------- SHRL ----------------*/
+
+      if (isU32(cc_op, X86G_CC_OP_SHRL) && isU32(cond, X86CondZ)) {
+         /* SHRL, then Z --> test dep1 == 0 */
+         return unop(Iop_1Uto32,binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
+      }
+
+      /*---------------- COPY ----------------*/
+      /* This can happen, as a result of x87 FP compares: "fcom ... ;
+         fnstsw %ax ; sahf ; jbe" for example. */
+
+      if (isU32(cc_op, X86G_CC_OP_COPY) && 
+          (isU32(cond, X86CondBE) || isU32(cond, X86CondNBE))) {
+         /* COPY, then BE --> extract C and Z from dep1, and test 
+            (C or Z) == 1. */
+         /* COPY, then NBE --> extract C and Z from dep1, and test
+            (C or Z) == 0. */
+         UInt nnn = isU32(cond, X86CondBE) ? 1 : 0;
+         return
+            unop(
+               Iop_1Uto32,
+               binop(
+                  Iop_CmpEQ32,
+                  binop(
+                     Iop_And32,
+                     binop(
+                        Iop_Or32,
+                        binop(Iop_Shr32, cc_dep1, mkU8(X86G_CC_SHIFT_C)),
+                        binop(Iop_Shr32, cc_dep1, mkU8(X86G_CC_SHIFT_Z))
+                     ),
+                     mkU32(1)
+                  ),
+                  mkU32(nnn)
+               )
+            );
+      }
+      
+      if (isU32(cc_op, X86G_CC_OP_COPY) 
+          && (isU32(cond, X86CondB) || isU32(cond, X86CondNB))) {
+         /* COPY, then B --> extract C from dep1, and test (C == 1). */
+         /* COPY, then NB --> extract C from dep1, and test (C == 0). */
+         UInt nnn = isU32(cond, X86CondB) ? 1 : 0;
+         return
+            unop(
+               Iop_1Uto32,
+               binop(
+                  Iop_CmpEQ32,
+                  binop(
+                     Iop_And32,
+                     binop(Iop_Shr32, cc_dep1, mkU8(X86G_CC_SHIFT_C)),
+                     mkU32(1)
+                  ),
+                  mkU32(nnn)
+               )
+            );
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_COPY) 
+          && (isU32(cond, X86CondZ) || isU32(cond, X86CondNZ))) {
+         /* COPY, then Z --> extract Z from dep1, and test (Z == 1). */
+         /* COPY, then NZ --> extract Z from dep1, and test (Z == 0). */
+         UInt nnn = isU32(cond, X86CondZ) ? 1 : 0;
+         return
+            unop(
+               Iop_1Uto32,
+               binop(
+                  Iop_CmpEQ32,
+                  binop(
+                     Iop_And32,
+                     binop(Iop_Shr32, cc_dep1, mkU8(X86G_CC_SHIFT_Z)),
+                     mkU32(1)
+                  ),
+                  mkU32(nnn)
+               )
+            );
+      }
+
+      if (isU32(cc_op, X86G_CC_OP_COPY) 
+          && (isU32(cond, X86CondP) || isU32(cond, X86CondNP))) {
+         /* COPY, then P --> extract P from dep1, and test (P == 1). */
+         /* COPY, then NP --> extract P from dep1, and test (P == 0). */
+         UInt nnn = isU32(cond, X86CondP) ? 1 : 0;
+         return
+            unop(
+               Iop_1Uto32,
+               binop(
+                  Iop_CmpEQ32,
+                  binop(
+                     Iop_And32,
+                     binop(Iop_Shr32, cc_dep1, mkU8(X86G_CC_SHIFT_P)),
+                     mkU32(1)
+                  ),
+                  mkU32(nnn)
+               )
+            );
+      }
+
+      return NULL;
+   }
+
+   /* --------- specialising "x86g_calculate_eflags_c" --------- */
+
+   if (vex_streq(function_name, "x86g_calculate_eflags_c")) {
+      /* specialise calls to above "calculate_eflags_c" function */
+      IRExpr *cc_op, *cc_dep1, *cc_dep2, *cc_ndep;
+      vassert(arity == 4);
+      cc_op   = args[0];
+      cc_dep1 = args[1];
+      cc_dep2 = args[2];
+      cc_ndep = args[3];
+
+      if (isU32(cc_op, X86G_CC_OP_SUBL)) {
+         /* C after sub denotes unsigned less than */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32U, cc_dep1, cc_dep2));
+      }
+      if (isU32(cc_op, X86G_CC_OP_SUBB)) {
+         /* C after sub denotes unsigned less than */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32U, 
+                           binop(Iop_And32,cc_dep1,mkU32(0xFF)),
+                           binop(Iop_And32,cc_dep2,mkU32(0xFF))));
+      }
+      if (isU32(cc_op, X86G_CC_OP_LOGICL)
+          || isU32(cc_op, X86G_CC_OP_LOGICW)
+          || isU32(cc_op, X86G_CC_OP_LOGICB)) {
+         /* cflag after logic is zero */
+         return mkU32(0);
+      }
+      if (isU32(cc_op, X86G_CC_OP_DECL) || isU32(cc_op, X86G_CC_OP_INCL)) {
+         /* If the thunk is dec or inc, the cflag is supplied as CC_NDEP. */
+         return cc_ndep;
+      }
+      if (isU32(cc_op, X86G_CC_OP_COPY)) {
+         /* cflag after COPY is stored in DEP1. */
+         return
+            binop(
+               Iop_And32,
+               binop(Iop_Shr32, cc_dep1, mkU8(X86G_CC_SHIFT_C)),
+               mkU32(1)
+            );
+      }
+      if (isU32(cc_op, X86G_CC_OP_ADDL)) {
+         /* C after add denotes sum <u either arg */
+         return unop(Iop_1Uto32,
+                     binop(Iop_CmpLT32U, 
+                           binop(Iop_Add32, cc_dep1, cc_dep2), 
+                           cc_dep1));
+      }
+      // ATC, requires verification, no test case known
+      //if (isU32(cc_op, X86G_CC_OP_SMULL)) {
+      //   /* C after signed widening multiply denotes the case where
+      //      the top half of the result isn't simply the sign extension
+      //      of the bottom half (iow the result doesn't fit completely
+      //      in the bottom half).  Hence: 
+      //        C = hi-half(dep1 x dep2) != lo-half(dep1 x dep2) >>s 31 
+      //      where 'x' denotes signed widening multiply.*/
+      //   return 
+      //      unop(Iop_1Uto32,
+      //           binop(Iop_CmpNE32, 
+      //                 unop(Iop_64HIto32,
+      //                      binop(Iop_MullS32, cc_dep1, cc_dep2)),
+      //                 binop(Iop_Sar32,
+      //                       binop(Iop_Mul32, cc_dep1, cc_dep2), mkU8(31)) ));
+      //}
+#     if 0
+      if (cc_op->tag == Iex_Const) {
+         vex_printf("CFLAG "); ppIRExpr(cc_op); vex_printf("\n");
+      }
+#     endif
+
+      return NULL;
+   }
+
+   /* --------- specialising "x86g_calculate_eflags_all" --------- */
+
+   if (vex_streq(function_name, "x86g_calculate_eflags_all")) {
+      /* specialise calls to above "calculate_eflags_all" function */
+      IRExpr *cc_op, *cc_dep1; /*, *cc_dep2, *cc_ndep; */
+      vassert(arity == 4);
+      cc_op   = args[0];
+      cc_dep1 = args[1];
+      /* cc_dep2 = args[2]; */
+      /* cc_ndep = args[3]; */
+
+      if (isU32(cc_op, X86G_CC_OP_COPY)) {
+         /* eflags after COPY are stored in DEP1. */
+         return
+            binop(
+               Iop_And32,
+               cc_dep1,
+               mkU32(X86G_CC_MASK_O | X86G_CC_MASK_S | X86G_CC_MASK_Z 
+                     | X86G_CC_MASK_A | X86G_CC_MASK_C | X86G_CC_MASK_P)
+            );
+      }
+      return NULL;
+   }
+
+#  undef unop
+#  undef binop
+#  undef mkU32
+#  undef mkU8
+
+   return NULL;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Supporting functions for x87 FPU activities.            ---*/
+/*---------------------------------------------------------------*/
+
+static inline Bool host_is_little_endian ( void )
+{
+   UInt x = 0x76543210;
+   UChar* p = (UChar*)(&x);
+   return toBool(*p == 0x10);
+}
+
+/* 80 and 64-bit floating point formats:
+
+   80-bit:
+
+    S  0       0-------0      zero
+    S  0       0X------X      denormals
+    S  1-7FFE  1X------X      normals (all normals have leading 1)
+    S  7FFF    10------0      infinity
+    S  7FFF    10X-----X      snan
+    S  7FFF    11X-----X      qnan
+
+   S is the sign bit.  For runs X----X, at least one of the Xs must be
+   nonzero.  Exponent is 15 bits, fractional part is 63 bits, and
+   there is an explicitly represented leading 1, and a sign bit,
+   giving 80 in total.
+
+   64-bit avoids the confusion of an explicitly represented leading 1
+   and so is simpler:
+
+    S  0      0------0   zero
+    S  0      X------X   denormals
+    S  1-7FE  any        normals
+    S  7FF    0------0   infinity
+    S  7FF    0X-----X   snan
+    S  7FF    1X-----X   qnan
+
+   Exponent is 11 bits, fractional part is 52 bits, and there is a 
+   sign bit, giving 64 in total.
+*/
+
+/* Inspect a value and its tag, as per the x87 'FXAM' instruction. */
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+UInt x86g_calculate_FXAM ( UInt tag, ULong dbl ) 
+{
+   Bool   mantissaIsZero;
+   Int    bexp;
+   UChar  sign;
+   UChar* f64;
+
+   vassert(host_is_little_endian());
+
+   /* vex_printf("calculate_FXAM ( %d, %llx ) .. ", tag, dbl ); */
+
+   f64  = (UChar*)(&dbl);
+   sign = toUChar( (f64[7] >> 7) & 1 );
+
+   /* First off, if the tag indicates the register was empty,
+      return 1,0,sign,1 */
+   if (tag == 0) {
+      /* vex_printf("Empty\n"); */
+      return X86G_FC_MASK_C3 | 0 | (sign << X86G_FC_SHIFT_C1) 
+                                 | X86G_FC_MASK_C0;
+   }
+
+   bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F);
+   bexp &= 0x7FF;
+
+   mantissaIsZero
+      = toBool(
+           (f64[6] & 0x0F) == 0 
+           && (f64[5] | f64[4] | f64[3] | f64[2] | f64[1] | f64[0]) == 0
+        );
+
+   /* If both exponent and mantissa are zero, the value is zero.
+      Return 1,0,sign,0. */
+   if (bexp == 0 && mantissaIsZero) {
+      /* vex_printf("Zero\n"); */
+      return X86G_FC_MASK_C3 | 0 
+                             | (sign << X86G_FC_SHIFT_C1) | 0;
+   }
+   
+   /* If exponent is zero but mantissa isn't, it's a denormal.
+      Return 1,1,sign,0. */
+   if (bexp == 0 && !mantissaIsZero) {
+      /* vex_printf("Denormal\n"); */
+      return X86G_FC_MASK_C3 | X86G_FC_MASK_C2 
+                             | (sign << X86G_FC_SHIFT_C1) | 0;
+   }
+
+   /* If the exponent is 7FF and the mantissa is zero, this is an infinity.
+      Return 0,1,sign,1. */
+   if (bexp == 0x7FF && mantissaIsZero) {
+      /* vex_printf("Inf\n"); */
+      return 0 | X86G_FC_MASK_C2 | (sign << X86G_FC_SHIFT_C1) 
+                                 | X86G_FC_MASK_C0;
+   }
+
+   /* If the exponent is 7FF and the mantissa isn't zero, this is a NaN.
+      Return 0,0,sign,1. */
+   if (bexp == 0x7FF && !mantissaIsZero) {
+      /* vex_printf("NaN\n"); */
+      return 0 | 0 | (sign << X86G_FC_SHIFT_C1) | X86G_FC_MASK_C0;
+   }
+
+   /* Uh, ok, we give up.  It must be a normal finite number.
+      Return 0,1,sign,0.
+   */
+   /* vex_printf("normal\n"); */
+   return 0 | X86G_FC_MASK_C2 | (sign << X86G_FC_SHIFT_C1) | 0;
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest memory) */
+ULong x86g_dirtyhelper_loadF80le ( Addr addrU )
+{
+   ULong f64;
+   convert_f80le_to_f64le ( (UChar*)addrU, (UChar*)&f64 );
+   return f64;
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest memory) */
+void x86g_dirtyhelper_storeF80le ( Addr addrU, ULong f64 )
+{
+   convert_f64le_to_f80le( (UChar*)&f64, (UChar*)addrU );
+}
+
+
+/*----------------------------------------------*/
+/*--- The exported fns ..                    ---*/
+/*----------------------------------------------*/
+
+/* Layout of the real x87 state. */
+/* 13 June 05: Fpu_State and auxiliary constants was moved to
+   g_generic_x87.h */
+
+
+/* CLEAN HELPER */
+/* fpucw[15:0] contains a x87 native format FPU control word.
+   Extract from it the required FPROUND value and any resulting
+   emulation warning, and return (warn << 32) | fpround value. 
+*/
+ULong x86g_check_fldcw ( UInt fpucw )
+{
+   /* Decide on a rounding mode.  fpucw[11:10] holds it. */
+   /* NOTE, encoded exactly as per enum IRRoundingMode. */
+   UInt rmode = (fpucw >> 10) & 3;
+
+   /* Detect any required emulation warnings. */
+   VexEmNote ew = EmNote_NONE;
+
+   if ((fpucw & 0x3F) != 0x3F) {
+      /* unmasked exceptions! */
+      ew = EmWarn_X86_x87exns;
+   }
+   else 
+   if (((fpucw >> 8) & 3) != 3) {
+      /* unsupported precision */
+      ew = EmWarn_X86_x87precision;
+   }
+
+   return (((ULong)ew) << 32) | ((ULong)rmode);
+}
+
+/* CLEAN HELPER */
+/* Given fpround as an IRRoundingMode value, create a suitable x87
+   native format FPU control word. */
+UInt x86g_create_fpucw ( UInt fpround )
+{
+   fpround &= 3;
+   return 0x037F | (fpround << 10);
+}
+
+
+/* CLEAN HELPER */
+/* mxcsr[15:0] contains a SSE native format MXCSR value.
+   Extract from it the required SSEROUND value and any resulting
+   emulation warning, and return (warn << 32) | sseround value.
+*/
+ULong x86g_check_ldmxcsr ( UInt mxcsr )
+{
+   /* Decide on a rounding mode.  mxcsr[14:13] holds it. */
+   /* NOTE, encoded exactly as per enum IRRoundingMode. */
+   UInt rmode = (mxcsr >> 13) & 3;
+
+   /* Detect any required emulation warnings. */
+   VexEmNote ew = EmNote_NONE;
+
+   if ((mxcsr & 0x1F80) != 0x1F80) {
+      /* unmasked exceptions! */
+      ew = EmWarn_X86_sseExns;
+   }
+   else 
+   if (mxcsr & (1<<15)) {
+      /* FZ is set */
+      ew = EmWarn_X86_fz;
+   } 
+   else
+   if (mxcsr & (1<<6)) {
+      /* DAZ is set */
+      ew = EmWarn_X86_daz;
+   }
+
+   return (((ULong)ew) << 32) | ((ULong)rmode);
+}
+
+
+/* CLEAN HELPER */
+/* Given sseround as an IRRoundingMode value, create a suitable SSE
+   native format MXCSR value. */
+UInt x86g_create_mxcsr ( UInt sseround )
+{
+   sseround &= 3;
+   return 0x1F80 | (sseround << 13);
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest state) */
+/* Initialise the x87 FPU state as per 'finit'. */
+void x86g_dirtyhelper_FINIT ( VexGuestX86State* gst )
+{
+   Int i;
+   gst->guest_FTOP = 0;
+   for (i = 0; i < 8; i++) {
+      gst->guest_FPTAG[i] = 0; /* empty */
+      gst->guest_FPREG[i] = 0; /* IEEE754 64-bit zero */
+   }
+   gst->guest_FPROUND = (UInt)Irrm_NEAREST;
+   gst->guest_FC3210  = 0;
+}
+
+
+/* This is used to implement both 'frstor' and 'fldenv'.  The latter
+   appears to differ from the former only in that the 8 FP registers
+   themselves are not transferred into the guest state. */
+static
+VexEmNote do_put_x87 ( Bool moveRegs,
+                       /*IN*/UChar* x87_state,
+                       /*OUT*/VexGuestX86State* vex_state )
+{
+   Int        stno, preg;
+   UInt       tag;
+   ULong*     vexRegs = (ULong*)(&vex_state->guest_FPREG[0]);
+   UChar*     vexTags = (UChar*)(&vex_state->guest_FPTAG[0]);
+   Fpu_State* x87     = (Fpu_State*)x87_state;
+   UInt       ftop    = (x87->env[FP_ENV_STAT] >> 11) & 7;
+   UInt       tagw    = x87->env[FP_ENV_TAG];
+   UInt       fpucw   = x87->env[FP_ENV_CTRL];
+   UInt       c3210   = x87->env[FP_ENV_STAT] & 0x4700;
+   VexEmNote  ew;
+   UInt       fpround;
+   ULong      pair;
+
+   /* Copy registers and tags */
+   for (stno = 0; stno < 8; stno++) {
+      preg = (stno + ftop) & 7;
+      tag = (tagw >> (2*preg)) & 3;
+      if (tag == 3) {
+         /* register is empty */
+         /* hmm, if it's empty, does it still get written?  Probably
+            safer to say it does.  If we don't, memcheck could get out
+            of sync, in that it thinks all FP registers are defined by
+            this helper, but in reality some have not been updated. */
+         if (moveRegs)
+            vexRegs[preg] = 0; /* IEEE754 64-bit zero */
+         vexTags[preg] = 0;
+      } else {
+         /* register is non-empty */
+         if (moveRegs)
+            convert_f80le_to_f64le( &x87->reg[10*stno], 
+                                    (UChar*)&vexRegs[preg] );
+         vexTags[preg] = 1;
+      }
+   }
+
+   /* stack pointer */
+   vex_state->guest_FTOP = ftop;
+
+   /* status word */
+   vex_state->guest_FC3210 = c3210;
+
+   /* handle the control word, setting FPROUND and detecting any
+      emulation warnings. */
+   pair    = x86g_check_fldcw ( (UInt)fpucw );
+   fpround = (UInt)pair;
+   ew      = (VexEmNote)(pair >> 32);
+   
+   vex_state->guest_FPROUND = fpround & 3;
+
+   /* emulation warnings --> caller */
+   return ew;
+}
+
+
+/* Create an x87 FPU state from the guest state, as close as
+   we can approximate it. */
+static
+void do_get_x87 ( /*IN*/VexGuestX86State* vex_state,
+                  /*OUT*/UChar* x87_state )
+{
+   Int        i, stno, preg;
+   UInt       tagw;
+   ULong*     vexRegs = (ULong*)(&vex_state->guest_FPREG[0]);
+   UChar*     vexTags = (UChar*)(&vex_state->guest_FPTAG[0]);
+   Fpu_State* x87     = (Fpu_State*)x87_state;
+   UInt       ftop    = vex_state->guest_FTOP;
+   UInt       c3210   = vex_state->guest_FC3210;
+
+   for (i = 0; i < 14; i++)
+      x87->env[i] = 0;
+
+   x87->env[1] = x87->env[3] = x87->env[5] = x87->env[13] = 0xFFFF;
+   x87->env[FP_ENV_STAT] 
+      = toUShort(((ftop & 7) << 11) | (c3210 & 0x4700));
+   x87->env[FP_ENV_CTRL] 
+      = toUShort(x86g_create_fpucw( vex_state->guest_FPROUND ));
+
+   /* Dump the register stack in ST order. */
+   tagw = 0;
+   for (stno = 0; stno < 8; stno++) {
+      preg = (stno + ftop) & 7;
+      if (vexTags[preg] == 0) {
+         /* register is empty */
+         tagw |= (3 << (2*preg));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[preg], 
+                                 &x87->reg[10*stno] );
+      } else {
+         /* register is full. */
+         tagw |= (0 << (2*preg));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[preg], 
+                                 &x87->reg[10*stno] );
+      }
+   }
+   x87->env[FP_ENV_TAG] = toUShort(tagw);
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+void x86g_dirtyhelper_FXSAVE ( VexGuestX86State* gst, HWord addr )
+{
+   /* Somewhat roundabout, but at least it's simple. */
+   Fpu_State tmp;
+   UShort*   addrS = (UShort*)addr;
+   UChar*    addrC = (UChar*)addr;
+   U128*     xmm   = (U128*)(addr + 160);
+   UInt      mxcsr;
+   UShort    fp_tags;
+   UInt      summary_tags;
+   Int       r, stno;
+   UShort    *srcS, *dstS;
+
+   do_get_x87( gst, (UChar*)&tmp );
+   mxcsr = x86g_create_mxcsr( gst->guest_SSEROUND );
+
+   /* Now build the proper fxsave image from the x87 image we just
+      made. */
+
+   addrS[0]  = tmp.env[FP_ENV_CTRL]; /* FCW: fpu control word */
+   addrS[1]  = tmp.env[FP_ENV_STAT]; /* FCW: fpu status word */
+
+   /* set addrS[2] in an endian-independent way */
+   summary_tags = 0;
+   fp_tags = tmp.env[FP_ENV_TAG];
+   for (r = 0; r < 8; r++) {
+      if ( ((fp_tags >> (2*r)) & 3) != 3 )
+         summary_tags |= (1 << r);
+   }
+   addrC[4]  = toUChar(summary_tags); /* FTW: tag summary byte */
+   addrC[5]  = 0; /* pad */
+
+   addrS[3]  = 0; /* FOP: fpu opcode (bogus) */
+   addrS[4]  = 0;
+   addrS[5]  = 0; /* FPU IP (bogus) */
+   addrS[6]  = 0; /* FPU IP's segment selector (bogus) (although we
+                     could conceivably dump %CS here) */
+
+   addrS[7]  = 0; /* Intel reserved */
+
+   addrS[8]  = 0; /* FPU DP (operand pointer) (bogus) */
+   addrS[9]  = 0; /* FPU DP (operand pointer) (bogus) */
+   addrS[10] = 0; /* segment selector for above operand pointer; %DS
+                     perhaps? */
+   addrS[11] = 0; /* Intel reserved */
+
+   addrS[12] = toUShort(mxcsr);  /* MXCSR */
+   addrS[13] = toUShort(mxcsr >> 16);
+
+   addrS[14] = 0xFFFF; /* MXCSR mask (lo16); who knows what for */
+   addrS[15] = 0xFFFF; /* MXCSR mask (hi16); who knows what for */
+
+   /* Copy in the FP registers, in ST order. */
+   for (stno = 0; stno < 8; stno++) {
+      srcS = (UShort*)(&tmp.reg[10*stno]);
+      dstS = (UShort*)(&addrS[16 + 8*stno]);
+      dstS[0] = srcS[0];
+      dstS[1] = srcS[1];
+      dstS[2] = srcS[2];
+      dstS[3] = srcS[3];
+      dstS[4] = srcS[4];
+      dstS[5] = 0;
+      dstS[6] = 0;
+      dstS[7] = 0;
+   }
+
+   /* That's the first 160 bytes of the image done.  Now only %xmm0
+      .. %xmm7 remain to be copied.  If the host is big-endian, these
+      need to be byte-swapped. */
+   vassert(host_is_little_endian());
+
+#  define COPY_U128(_dst,_src)                       \
+      do { _dst[0] = _src[0]; _dst[1] = _src[1];     \
+           _dst[2] = _src[2]; _dst[3] = _src[3]; }   \
+      while (0)
+
+   COPY_U128( xmm[0], gst->guest_XMM0 );
+   COPY_U128( xmm[1], gst->guest_XMM1 );
+   COPY_U128( xmm[2], gst->guest_XMM2 );
+   COPY_U128( xmm[3], gst->guest_XMM3 );
+   COPY_U128( xmm[4], gst->guest_XMM4 );
+   COPY_U128( xmm[5], gst->guest_XMM5 );
+   COPY_U128( xmm[6], gst->guest_XMM6 );
+   COPY_U128( xmm[7], gst->guest_XMM7 );
+
+#  undef COPY_U128
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest state, reads guest mem) */
+VexEmNote x86g_dirtyhelper_FXRSTOR ( VexGuestX86State* gst, HWord addr )
+{
+   Fpu_State tmp;
+   VexEmNote warnX87 = EmNote_NONE;
+   VexEmNote warnXMM = EmNote_NONE;
+   UShort*   addrS   = (UShort*)addr;
+   UChar*    addrC   = (UChar*)addr;
+   U128*     xmm     = (U128*)(addr + 160);
+   UShort    fp_tags;
+   Int       r, stno, i;
+
+   /* Restore %xmm0 .. %xmm7.  If the host is big-endian, these need
+      to be byte-swapped. */
+   vassert(host_is_little_endian());
+
+#  define COPY_U128(_dst,_src)                       \
+      do { _dst[0] = _src[0]; _dst[1] = _src[1];     \
+           _dst[2] = _src[2]; _dst[3] = _src[3]; }   \
+      while (0)
+
+   COPY_U128( gst->guest_XMM0, xmm[0] );
+   COPY_U128( gst->guest_XMM1, xmm[1] );
+   COPY_U128( gst->guest_XMM2, xmm[2] );
+   COPY_U128( gst->guest_XMM3, xmm[3] );
+   COPY_U128( gst->guest_XMM4, xmm[4] );
+   COPY_U128( gst->guest_XMM5, xmm[5] );
+   COPY_U128( gst->guest_XMM6, xmm[6] );
+   COPY_U128( gst->guest_XMM7, xmm[7] );
+
+#  undef COPY_U128
+
+   /* Copy the x87 registers out of the image, into a temporary
+      Fpu_State struct. */
+
+   /* LLVM on Darwin turns the following loop into a movaps plus a
+      handful of scalar stores.  This would work fine except for the
+      fact that VEX doesn't keep the stack correctly (16-) aligned for
+      the call, so it segfaults.  Hence, split the loop into two
+      pieces (and pray LLVM doesn't merely glue them back together) so
+      it's composed only of scalar stores and so is alignment
+      insensitive.  Of course this is a kludge of the lamest kind --
+      VEX should be fixed properly. */
+   /* Code that seems to trigger the problem:
+      for (i = 0; i < 14; i++) tmp.env[i] = 0; */
+   for (i = 0; i < 7; i++) tmp.env[i+0] = 0;
+   for (i = 0; i < 7; i++) tmp.env[i+7] = 0;
+   
+   for (i = 0; i < 80; i++) tmp.reg[i] = 0;
+   /* fill in tmp.reg[0..7] */
+   for (stno = 0; stno < 8; stno++) {
+      UShort* dstS = (UShort*)(&tmp.reg[10*stno]);
+      UShort* srcS = (UShort*)(&addrS[16 + 8*stno]);
+      dstS[0] = srcS[0];
+      dstS[1] = srcS[1];
+      dstS[2] = srcS[2];
+      dstS[3] = srcS[3];
+      dstS[4] = srcS[4];
+   }
+   /* fill in tmp.env[0..13] */
+   tmp.env[FP_ENV_CTRL] = addrS[0]; /* FCW: fpu control word */
+   tmp.env[FP_ENV_STAT] = addrS[1]; /* FCW: fpu status word */
+
+   fp_tags = 0;
+   for (r = 0; r < 8; r++) {
+      if (addrC[4] & (1<<r))
+         fp_tags |= (0 << (2*r)); /* EMPTY */
+      else 
+         fp_tags |= (3 << (2*r)); /* VALID -- not really precise enough. */
+   }
+   tmp.env[FP_ENV_TAG] = fp_tags;
+
+   /* Now write 'tmp' into the guest state. */
+   warnX87 = do_put_x87( True/*moveRegs*/, (UChar*)&tmp, gst );
+
+   { UInt w32 = (((UInt)addrS[12]) & 0xFFFF)
+                | ((((UInt)addrS[13]) & 0xFFFF) << 16);
+     ULong w64 = x86g_check_ldmxcsr( w32 );
+
+     warnXMM = (VexEmNote)(w64 >> 32);
+
+     gst->guest_SSEROUND = (UInt)w64;
+   }
+
+   /* Prefer an X87 emwarn over an XMM one, if both exist. */
+   if (warnX87 != EmNote_NONE)
+      return warnX87;
+   else
+      return warnXMM;
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+void x86g_dirtyhelper_FSAVE ( VexGuestX86State* gst, HWord addr )
+{
+   do_get_x87( gst, (UChar*)addr );
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest state, reads guest mem) */
+VexEmNote x86g_dirtyhelper_FRSTOR ( VexGuestX86State* gst, HWord addr )
+{
+   return do_put_x87( True/*regs too*/, (UChar*)addr, gst );
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (reads guest state, writes guest mem) */
+void x86g_dirtyhelper_FSTENV ( VexGuestX86State* gst, HWord addr )
+{
+   /* Somewhat roundabout, but at least it's simple. */
+   Int       i;
+   UShort*   addrP = (UShort*)addr;
+   Fpu_State tmp;
+   do_get_x87( gst, (UChar*)&tmp );
+   for (i = 0; i < 14; i++)
+      addrP[i] = tmp.env[i];
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (writes guest state, reads guest mem) */
+VexEmNote x86g_dirtyhelper_FLDENV ( VexGuestX86State* gst, HWord addr )
+{
+   return do_put_x87( False/*don't move regs*/, (UChar*)addr, gst);
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Misc integer helpers, including rotates and CPUID.      ---*/
+/*---------------------------------------------------------------*/
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate both flags and value result for rotate right
+   through the carry bit.  Result in low 32 bits, 
+   new flags (OSZACP) in high 32 bits.
+*/
+ULong x86g_calculate_RCR ( UInt arg, UInt rot_amt, UInt eflags_in, UInt sz )
+{
+   UInt tempCOUNT = rot_amt & 0x1F, cf=0, of=0, tempcf;
+
+   switch (sz) {
+      case 4:
+         cf        = (eflags_in >> X86G_CC_SHIFT_C) & 1;
+         of        = ((arg >> 31) ^ cf) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = arg & 1;
+            arg    = (arg >> 1) | (cf << 31);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         break;
+      case 2:
+         while (tempCOUNT >= 17) tempCOUNT -= 17;
+         cf        = (eflags_in >> X86G_CC_SHIFT_C) & 1;
+         of        = ((arg >> 15) ^ cf) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = arg & 1;
+            arg    = ((arg >> 1) & 0x7FFF) | (cf << 15);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         break;
+      case 1:
+         while (tempCOUNT >= 9) tempCOUNT -= 9;
+         cf        = (eflags_in >> X86G_CC_SHIFT_C) & 1;
+         of        = ((arg >> 7) ^ cf) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = arg & 1;
+            arg    = ((arg >> 1) & 0x7F) | (cf << 7);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         break;
+      default: 
+         vpanic("calculate_RCR: invalid size");
+   }
+
+   cf &= 1;
+   of &= 1;
+   eflags_in &= ~(X86G_CC_MASK_C | X86G_CC_MASK_O);
+   eflags_in |= (cf << X86G_CC_SHIFT_C) | (of << X86G_CC_SHIFT_O);
+
+   return (((ULong)eflags_in) << 32) | ((ULong)arg);
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate both flags and value result for rotate left
+   through the carry bit.  Result in low 32 bits, 
+   new flags (OSZACP) in high 32 bits.
+*/
+ULong x86g_calculate_RCL ( UInt arg, UInt rot_amt, UInt eflags_in, UInt sz )
+{
+   UInt tempCOUNT = rot_amt & 0x1F, cf=0, of=0, tempcf;
+
+   switch (sz) {
+      case 4:
+         cf = (eflags_in >> X86G_CC_SHIFT_C) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = (arg >> 31) & 1;
+            arg    = (arg << 1) | (cf & 1);
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         of = ((arg >> 31) ^ cf) & 1;
+         break;
+      case 2:
+         while (tempCOUNT >= 17) tempCOUNT -= 17;
+         cf = (eflags_in >> X86G_CC_SHIFT_C) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = (arg >> 15) & 1;
+            arg    = 0xFFFF & ((arg << 1) | (cf & 1));
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         of = ((arg >> 15) ^ cf) & 1;
+         break;
+      case 1:
+         while (tempCOUNT >= 9) tempCOUNT -= 9;
+         cf = (eflags_in >> X86G_CC_SHIFT_C) & 1;
+         while (tempCOUNT > 0) {
+            tempcf = (arg >> 7) & 1;
+            arg    = 0xFF & ((arg << 1) | (cf & 1));
+            cf     = tempcf;
+            tempCOUNT--;
+         }
+         of = ((arg >> 7) ^ cf) & 1;
+         break;
+      default: 
+         vpanic("calculate_RCL: invalid size");
+   }
+
+   cf &= 1;
+   of &= 1;
+   eflags_in &= ~(X86G_CC_MASK_C | X86G_CC_MASK_O);
+   eflags_in |= (cf << X86G_CC_SHIFT_C) | (of << X86G_CC_SHIFT_O);
+
+   return (((ULong)eflags_in) << 32) | ((ULong)arg);
+}
+
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+/* Calculate both flags and value result for DAA/DAS/AAA/AAS.
+   AX value in low half of arg, OSZACP in upper half.
+   See guest-x86/toIR.c usage point for details.
+*/
+static UInt calc_parity_8bit ( UInt w32 ) {
+   UInt i;
+   UInt p = 1;
+   for (i = 0; i < 8; i++)
+      p ^= (1 & (w32 >> i));
+   return p;
+}
+UInt x86g_calculate_daa_das_aaa_aas ( UInt flags_and_AX, UInt opcode )
+{
+   UInt r_AL = (flags_and_AX >> 0) & 0xFF;
+   UInt r_AH = (flags_and_AX >> 8) & 0xFF;
+   UInt r_O  = (flags_and_AX >> (16 + X86G_CC_SHIFT_O)) & 1;
+   UInt r_S  = (flags_and_AX >> (16 + X86G_CC_SHIFT_S)) & 1;
+   UInt r_Z  = (flags_and_AX >> (16 + X86G_CC_SHIFT_Z)) & 1;
+   UInt r_A  = (flags_and_AX >> (16 + X86G_CC_SHIFT_A)) & 1;
+   UInt r_C  = (flags_and_AX >> (16 + X86G_CC_SHIFT_C)) & 1;
+   UInt r_P  = (flags_and_AX >> (16 + X86G_CC_SHIFT_P)) & 1;
+   UInt result = 0;
+
+   switch (opcode) {
+      case 0x27: { /* DAA */
+         UInt old_AL = r_AL;
+         UInt old_C  = r_C;
+         r_C = 0;
+         if ((r_AL & 0xF) > 9 || r_A == 1) {
+            r_AL = r_AL + 6;
+            r_C  = old_C;
+            if (r_AL >= 0x100) r_C = 1;
+            r_A = 1;
+         } else {
+            r_A = 0;
+         }
+         if (old_AL > 0x99 || old_C == 1) {
+            r_AL = r_AL + 0x60;
+            r_C  = 1;
+         } else {
+            r_C = 0;
+         }
+         /* O is undefined.  S Z and P are set according to the
+	    result. */
+         r_AL &= 0xFF;
+         r_O = 0; /* let's say */
+         r_S = (r_AL & 0x80) ? 1 : 0;
+         r_Z = (r_AL == 0) ? 1 : 0;
+         r_P = calc_parity_8bit( r_AL );
+         break;
+      }
+      case 0x2F: { /* DAS */
+         UInt old_AL = r_AL;
+         UInt old_C  = r_C;
+         r_C = 0;
+         if ((r_AL & 0xF) > 9 || r_A == 1) {
+            Bool borrow = r_AL < 6;
+            r_AL = r_AL - 6;
+            r_C  = old_C;
+            if (borrow) r_C = 1;
+            r_A = 1;
+         } else {
+            r_A = 0;
+         }
+         if (old_AL > 0x99 || old_C == 1) {
+            r_AL = r_AL - 0x60;
+            r_C  = 1;
+         } else {
+            /* Intel docs are wrong: r_C = 0; */
+         }
+         /* O is undefined.  S Z and P are set according to the
+	    result. */
+         r_AL &= 0xFF;
+         r_O = 0; /* let's say */
+         r_S = (r_AL & 0x80) ? 1 : 0;
+         r_Z = (r_AL == 0) ? 1 : 0;
+         r_P = calc_parity_8bit( r_AL );
+         break;
+      }
+      case 0x37: { /* AAA */
+         Bool nudge = r_AL > 0xF9;
+         if ((r_AL & 0xF) > 9 || r_A == 1) {
+            r_AL = r_AL + 6;
+            r_AH = r_AH + 1 + (nudge ? 1 : 0);
+            r_A  = 1;
+            r_C  = 1;
+            r_AL = r_AL & 0xF;
+         } else {
+            r_A  = 0;
+            r_C  = 0;
+            r_AL = r_AL & 0xF;
+         }
+         /* O S Z and P are undefined. */
+         r_O = r_S = r_Z = r_P = 0; /* let's say */
+         break;
+      }
+      case 0x3F: { /* AAS */
+         Bool nudge = r_AL < 0x06;
+         if ((r_AL & 0xF) > 9 || r_A == 1) {
+            r_AL = r_AL - 6;
+            r_AH = r_AH - 1 - (nudge ? 1 : 0);
+            r_A  = 1;
+            r_C  = 1;
+            r_AL = r_AL & 0xF;
+         } else {
+            r_A  = 0;
+            r_C  = 0;
+            r_AL = r_AL & 0xF;
+         }
+         /* O S Z and P are undefined. */
+         r_O = r_S = r_Z = r_P = 0; /* let's say */
+         break;
+      }
+      default:
+         vassert(0);
+   }
+   result =   ( (r_O & 1) << (16 + X86G_CC_SHIFT_O) )
+            | ( (r_S & 1) << (16 + X86G_CC_SHIFT_S) )
+            | ( (r_Z & 1) << (16 + X86G_CC_SHIFT_Z) )
+            | ( (r_A & 1) << (16 + X86G_CC_SHIFT_A) )
+            | ( (r_C & 1) << (16 + X86G_CC_SHIFT_C) )
+            | ( (r_P & 1) << (16 + X86G_CC_SHIFT_P) )
+            | ( (r_AH & 0xFF) << 8 )
+            | ( (r_AL & 0xFF) << 0 );
+   return result;
+}
+
+UInt x86g_calculate_aad_aam ( UInt flags_and_AX, UInt opcode )
+{
+   UInt r_AL = (flags_and_AX >> 0) & 0xFF;
+   UInt r_AH = (flags_and_AX >> 8) & 0xFF;
+   UInt r_O  = (flags_and_AX >> (16 + X86G_CC_SHIFT_O)) & 1;
+   UInt r_S  = (flags_and_AX >> (16 + X86G_CC_SHIFT_S)) & 1;
+   UInt r_Z  = (flags_and_AX >> (16 + X86G_CC_SHIFT_Z)) & 1;
+   UInt r_A  = (flags_and_AX >> (16 + X86G_CC_SHIFT_A)) & 1;
+   UInt r_C  = (flags_and_AX >> (16 + X86G_CC_SHIFT_C)) & 1;
+   UInt r_P  = (flags_and_AX >> (16 + X86G_CC_SHIFT_P)) & 1;
+   UInt result = 0;
+
+   switch (opcode) {
+      case 0xD4: { /* AAM */
+         r_AH = r_AL / 10;
+         r_AL = r_AL % 10;
+         break;
+      }
+      case 0xD5: { /* AAD */
+         r_AL = ((r_AH * 10) + r_AL) & 0xff;
+         r_AH = 0;
+         break;
+      }
+      default:
+         vassert(0);
+   }
+
+   r_O = 0; /* let's say (undefined) */
+   r_C = 0; /* let's say (undefined) */
+   r_A = 0; /* let's say (undefined) */
+   r_S = (r_AL & 0x80) ? 1 : 0;
+   r_Z = (r_AL == 0) ? 1 : 0;
+   r_P = calc_parity_8bit( r_AL );
+
+   result =   ( (r_O & 1) << (16 + X86G_CC_SHIFT_O) )
+            | ( (r_S & 1) << (16 + X86G_CC_SHIFT_S) )
+            | ( (r_Z & 1) << (16 + X86G_CC_SHIFT_Z) )
+            | ( (r_A & 1) << (16 + X86G_CC_SHIFT_A) )
+            | ( (r_C & 1) << (16 + X86G_CC_SHIFT_C) )
+            | ( (r_P & 1) << (16 + X86G_CC_SHIFT_P) )
+            | ( (r_AH & 0xFF) << 8 )
+            | ( (r_AL & 0xFF) << 0 );
+   return result;
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-x86 platforms, return 1. */
+ULong x86g_dirtyhelper_RDTSC ( void )
+{
+#  if defined(__i386__)
+   ULong res;
+   __asm__ __volatile__("rdtsc" : "=A" (res));
+   return res;
+#  else
+   return 1ULL;
+#  endif
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (modifies guest state) */
+/* Claim to be a P55C (Intel Pentium/MMX) */
+void x86g_dirtyhelper_CPUID_sse0 ( VexGuestX86State* st )
+{
+   switch (st->guest_EAX) {
+      case 0: 
+         st->guest_EAX = 0x1;
+         st->guest_EBX = 0x756e6547;
+         st->guest_ECX = 0x6c65746e;
+         st->guest_EDX = 0x49656e69;
+         break;
+      default:
+         st->guest_EAX = 0x543;
+         st->guest_EBX = 0x0;
+         st->guest_ECX = 0x0;
+         st->guest_EDX = 0x8001bf;
+         break;
+   }
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (modifies guest state) */
+/* Claim to be a Athlon "Classic" (Model 2, K75 "Pluto/Orion") */
+/* But without 3DNow support (weird, but we really don't support it). */
+void x86g_dirtyhelper_CPUID_mmxext ( VexGuestX86State* st )
+{
+   switch (st->guest_EAX) {
+      /* vendor ID */
+      case 0:
+         st->guest_EAX = 0x1;
+         st->guest_EBX = 0x68747541;
+         st->guest_ECX = 0x444d4163;
+         st->guest_EDX = 0x69746e65;
+         break;
+      /* feature bits */
+      case 1:
+         st->guest_EAX = 0x621;
+         st->guest_EBX = 0x0;
+         st->guest_ECX = 0x0;
+         st->guest_EDX = 0x183f9ff;
+         break;
+      /* Highest Extended Function Supported (0x80000004 brand string) */
+      case 0x80000000:
+         st->guest_EAX = 0x80000004;
+         st->guest_EBX = 0x68747541;
+         st->guest_ECX = 0x444d4163;
+         st->guest_EDX = 0x69746e65;
+         break;
+      /* Extended Processor Info and Feature Bits */
+      case 0x80000001:
+         st->guest_EAX = 0x721;
+         st->guest_EBX = 0x0;
+         st->guest_ECX = 0x0;
+         st->guest_EDX = 0x1c3f9ff; /* Note no 3DNow. */
+         break;
+      /* Processor Brand String "AMD Athlon(tm) Processor" */
+      case 0x80000002:
+         st->guest_EAX = 0x20444d41;
+         st->guest_EBX = 0x6c687441;
+         st->guest_ECX = 0x74286e6f;
+         st->guest_EDX = 0x5020296d;
+         break;
+      case 0x80000003:
+         st->guest_EAX = 0x65636f72;
+         st->guest_EBX = 0x726f7373;
+         st->guest_ECX = 0x0;
+         st->guest_EDX = 0x0;
+         break;
+      default:
+         st->guest_EAX = 0x0;
+         st->guest_EBX = 0x0;
+         st->guest_ECX = 0x0;
+         st->guest_EDX = 0x0;
+         break;
+   }
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (modifies guest state) */
+/* Claim to be the following SSE1-capable CPU:
+   vendor_id       : GenuineIntel
+   cpu family      : 6
+   model           : 11
+   model name      : Intel(R) Pentium(R) III CPU family      1133MHz
+   stepping        : 1
+   cpu MHz         : 1131.013
+   cache size      : 512 KB
+*/
+void x86g_dirtyhelper_CPUID_sse1 ( VexGuestX86State* st )
+{
+   switch (st->guest_EAX) {
+      case 0: 
+         st->guest_EAX = 0x00000002;
+         st->guest_EBX = 0x756e6547;
+         st->guest_ECX = 0x6c65746e;
+         st->guest_EDX = 0x49656e69;
+         break;
+      case 1: 
+         st->guest_EAX = 0x000006b1;
+         st->guest_EBX = 0x00000004;
+         st->guest_ECX = 0x00000000;
+         st->guest_EDX = 0x0383fbff;
+         break;
+      default:
+         st->guest_EAX = 0x03020101;
+         st->guest_EBX = 0x00000000;
+         st->guest_ECX = 0x00000000;
+         st->guest_EDX = 0x0c040883;
+         break;
+   }
+}
+
+/* Claim to be the following SSSE3-capable CPU (2 x ...):
+   vendor_id       : GenuineIntel
+   cpu family      : 6
+   model           : 15
+   model name      : Intel(R) Core(TM)2 CPU 6600 @ 2.40GHz
+   stepping        : 6
+   cpu MHz         : 2394.000
+   cache size      : 4096 KB
+   physical id     : 0
+   siblings        : 2
+   core id         : 0
+   cpu cores       : 2
+   fpu             : yes
+   fpu_exception   : yes
+   cpuid level     : 10
+   wp              : yes
+   flags           : fpu vme de pse tsc msr pae mce cx8 apic sep
+                     mtrr pge mca cmov pat pse36 clflush dts acpi
+                     mmx fxsr sse sse2 ss ht tm syscall nx lm
+                     constant_tsc pni monitor ds_cpl vmx est tm2
+                     cx16 xtpr lahf_lm
+   bogomips        : 4798.78
+   clflush size    : 64
+   cache_alignment : 64
+   address sizes   : 36 bits physical, 48 bits virtual
+   power management:
+*/
+void x86g_dirtyhelper_CPUID_sse2 ( VexGuestX86State* st )
+{
+#  define SET_ABCD(_a,_b,_c,_d)               \
+      do { st->guest_EAX = (UInt)(_a);        \
+           st->guest_EBX = (UInt)(_b);        \
+           st->guest_ECX = (UInt)(_c);        \
+           st->guest_EDX = (UInt)(_d);        \
+      } while (0)
+
+   switch (st->guest_EAX) {
+      case 0x00000000:
+         SET_ABCD(0x0000000a, 0x756e6547, 0x6c65746e, 0x49656e69);
+         break;
+      case 0x00000001:
+         SET_ABCD(0x000006f6, 0x00020800, 0x0000e3bd, 0xbfebfbff);
+         break;
+      case 0x00000002:
+         SET_ABCD(0x05b0b101, 0x005657f0, 0x00000000, 0x2cb43049);
+         break;
+      case 0x00000003:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000004: {
+         switch (st->guest_ECX) {
+            case 0x00000000: SET_ABCD(0x04000121, 0x01c0003f,
+                                      0x0000003f, 0x00000001); break;
+            case 0x00000001: SET_ABCD(0x04000122, 0x01c0003f,
+                                      0x0000003f, 0x00000001); break;
+            case 0x00000002: SET_ABCD(0x04004143, 0x03c0003f,
+                                      0x00000fff, 0x00000001); break;
+            default:         SET_ABCD(0x00000000, 0x00000000,
+                                      0x00000000, 0x00000000); break;
+         }
+         break;
+      }
+      case 0x00000005:
+         SET_ABCD(0x00000040, 0x00000040, 0x00000003, 0x00000020);
+         break;
+      case 0x00000006:
+         SET_ABCD(0x00000001, 0x00000002, 0x00000001, 0x00000000);
+         break;
+      case 0x00000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000008:
+         SET_ABCD(0x00000400, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x00000009:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x0000000a:
+      unhandled_eax_value:
+         SET_ABCD(0x07280202, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000000:
+         SET_ABCD(0x80000008, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000001:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000001, 0x20100000);
+         break;
+      case 0x80000002:
+         SET_ABCD(0x65746e49, 0x2952286c, 0x726f4320, 0x4d542865);
+         break;
+      case 0x80000003:
+         SET_ABCD(0x43203229, 0x20205550, 0x20202020, 0x20202020);
+         break;
+      case 0x80000004:
+         SET_ABCD(0x30303636, 0x20402020, 0x30342e32, 0x007a4847);
+         break;
+      case 0x80000005:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000006:
+         SET_ABCD(0x00000000, 0x00000000, 0x10008040, 0x00000000);
+         break;
+      case 0x80000007:
+         SET_ABCD(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      case 0x80000008:
+         SET_ABCD(0x00003024, 0x00000000, 0x00000000, 0x00000000);
+         break;
+      default:
+         goto unhandled_eax_value;
+   }
+#  undef SET_ABCD
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-x86 platforms, return 0. */
+UInt x86g_dirtyhelper_IN ( UInt portno, UInt sz/*1,2 or 4*/ )
+{
+#  if defined(__i386__)
+   UInt r = 0;
+   portno &= 0xFFFF;
+   switch (sz) {
+      case 4: 
+         __asm__ __volatile__("movl $0,%%eax; inl %w1,%0" 
+                              : "=a" (r) : "Nd" (portno));
+	 break;
+      case 2: 
+         __asm__ __volatile__("movl $0,%%eax; inw %w1,%w0" 
+                              : "=a" (r) : "Nd" (portno));
+	 break;
+      case 1: 
+         __asm__ __volatile__("movl $0,%%eax; inb %w1,%b0" 
+                              : "=a" (r) : "Nd" (portno));
+	 break;
+      default:
+         break;
+   }
+   return r;
+#  else
+   return 0;
+#  endif
+}
+
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-x86 platforms, do nothing. */
+void x86g_dirtyhelper_OUT ( UInt portno, UInt data, UInt sz/*1,2 or 4*/ )
+{
+#  if defined(__i386__)
+   portno &= 0xFFFF;
+   switch (sz) {
+      case 4: 
+         __asm__ __volatile__("outl %0, %w1" 
+                              : : "a" (data), "Nd" (portno));
+	 break;
+      case 2: 
+         __asm__ __volatile__("outw %w0, %w1" 
+                              : : "a" (data), "Nd" (portno));
+	 break;
+      case 1: 
+         __asm__ __volatile__("outb %b0, %w1" 
+                              : : "a" (data), "Nd" (portno));
+	 break;
+      default:
+         break;
+   }
+#  else
+   /* do nothing */
+#  endif
+}
+
+/* CALLED FROM GENERATED CODE */
+/* DIRTY HELPER (non-referentially-transparent) */
+/* Horrible hack.  On non-x86 platforms, do nothing. */
+/* op = 0: call the native SGDT instruction.
+   op = 1: call the native SIDT instruction.
+*/
+void x86g_dirtyhelper_SxDT ( void *address, UInt op ) {
+#  if defined(__i386__)
+   switch (op) {
+      case 0:
+         __asm__ __volatile__("sgdt (%0)" : : "r" (address) : "memory");
+         break;
+      case 1:
+         __asm__ __volatile__("sidt (%0)" : : "r" (address) : "memory");
+         break;
+      default:
+         vpanic("x86g_dirtyhelper_SxDT");
+   }
+#  else
+   /* do nothing */
+   UChar* p = (UChar*)address;
+   p[0] = p[1] = p[2] = p[3] = p[4] = p[5] = 0;
+#  endif
+}
+
+/*---------------------------------------------------------------*/
+/*--- Helpers for MMX/SSE/SSE2.                               ---*/
+/*---------------------------------------------------------------*/
+
+static inline UChar abdU8 ( UChar xx, UChar yy ) {
+   return toUChar(xx>yy ? xx-yy : yy-xx);
+}
+
+static inline ULong mk32x2 ( UInt w1, UInt w0 ) {
+   return (((ULong)w1) << 32) | ((ULong)w0);
+}
+
+static inline UShort sel16x4_3 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUShort(hi32 >> 16);
+}
+static inline UShort sel16x4_2 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUShort(hi32);
+}
+static inline UShort sel16x4_1 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUShort(lo32 >> 16);
+}
+static inline UShort sel16x4_0 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUShort(lo32);
+}
+
+static inline UChar sel8x8_7 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 24);
+}
+static inline UChar sel8x8_6 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 16);
+}
+static inline UChar sel8x8_5 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 8);
+}
+static inline UChar sel8x8_4 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(hi32 >> 0);
+}
+static inline UChar sel8x8_3 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 24);
+}
+static inline UChar sel8x8_2 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 16);
+}
+static inline UChar sel8x8_1 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 8);
+}
+static inline UChar sel8x8_0 ( ULong w64 ) {
+   UInt lo32 = toUInt(w64);
+   return toUChar(lo32 >> 0);
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong x86g_calculate_mmx_pmaddwd ( ULong xx, ULong yy )
+{
+   return
+      mk32x2( 
+         (((Int)(Short)sel16x4_3(xx)) * ((Int)(Short)sel16x4_3(yy)))
+            + (((Int)(Short)sel16x4_2(xx)) * ((Int)(Short)sel16x4_2(yy))),
+         (((Int)(Short)sel16x4_1(xx)) * ((Int)(Short)sel16x4_1(yy)))
+            + (((Int)(Short)sel16x4_0(xx)) * ((Int)(Short)sel16x4_0(yy)))
+      );
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong x86g_calculate_mmx_psadbw ( ULong xx, ULong yy )
+{
+   UInt t = 0;
+   t += (UInt)abdU8( sel8x8_7(xx), sel8x8_7(yy) );
+   t += (UInt)abdU8( sel8x8_6(xx), sel8x8_6(yy) );
+   t += (UInt)abdU8( sel8x8_5(xx), sel8x8_5(yy) );
+   t += (UInt)abdU8( sel8x8_4(xx), sel8x8_4(yy) );
+   t += (UInt)abdU8( sel8x8_3(xx), sel8x8_3(yy) );
+   t += (UInt)abdU8( sel8x8_2(xx), sel8x8_2(yy) );
+   t += (UInt)abdU8( sel8x8_1(xx), sel8x8_1(yy) );
+   t += (UInt)abdU8( sel8x8_0(xx), sel8x8_0(yy) );
+   t &= 0xFFFF;
+   return (ULong)t;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Helpers for dealing with segment overrides.             ---*/
+/*---------------------------------------------------------------*/
+
+static inline 
+UInt get_segdescr_base ( VexGuestX86SegDescr* ent )
+{
+   UInt lo  = 0xFFFF & (UInt)ent->LdtEnt.Bits.BaseLow;
+   UInt mid =   0xFF & (UInt)ent->LdtEnt.Bits.BaseMid;
+   UInt hi  =   0xFF & (UInt)ent->LdtEnt.Bits.BaseHi;
+   return (hi << 24) | (mid << 16) | lo;
+}
+
+static inline
+UInt get_segdescr_limit ( VexGuestX86SegDescr* ent )
+{
+    UInt lo    = 0xFFFF & (UInt)ent->LdtEnt.Bits.LimitLow;
+    UInt hi    =    0xF & (UInt)ent->LdtEnt.Bits.LimitHi;
+    UInt limit = (hi << 16) | lo;
+    if (ent->LdtEnt.Bits.Granularity) 
+       limit = (limit << 12) | 0xFFF;
+    return limit;
+}
+
+/* CALLED FROM GENERATED CODE: CLEAN HELPER */
+ULong x86g_use_seg_selector ( HWord ldt, HWord gdt,
+                              UInt seg_selector, UInt virtual_addr )
+{
+   UInt tiBit, base, limit;
+   VexGuestX86SegDescr* the_descrs;
+
+   Bool verboze = False;
+
+   /* If this isn't true, we're in Big Trouble. */
+   vassert(8 == sizeof(VexGuestX86SegDescr));
+
+   if (verboze) 
+      vex_printf("x86h_use_seg_selector: "
+                 "seg_selector = 0x%x, vaddr = 0x%x\n", 
+                 seg_selector, virtual_addr);
+
+   /* Check for wildly invalid selector. */
+   if (seg_selector & ~0xFFFF)
+      goto bad;
+
+   seg_selector &= 0x0000FFFF;
+  
+   /* Sanity check the segment selector.  Ensure that RPL=11b (least
+      privilege).  This forms the bottom 2 bits of the selector. */
+   if ((seg_selector & 3) != 3)
+      goto bad;
+
+   /* Extract the TI bit (0 means GDT, 1 means LDT) */
+   tiBit = (seg_selector >> 2) & 1;
+
+   /* Convert the segment selector onto a table index */
+   seg_selector >>= 3;
+   vassert(seg_selector >= 0 && seg_selector < 8192);
+
+   if (tiBit == 0) {
+
+      /* GDT access. */
+      /* Do we actually have a GDT to look at? */
+      if (gdt == 0)
+         goto bad;
+
+      /* Check for access to non-existent entry. */
+      if (seg_selector >= VEX_GUEST_X86_GDT_NENT)
+         goto bad;
+
+      the_descrs = (VexGuestX86SegDescr*)gdt;
+      base  = get_segdescr_base (&the_descrs[seg_selector]);
+      limit = get_segdescr_limit(&the_descrs[seg_selector]);
+
+   } else {
+
+      /* All the same stuff, except for the LDT. */
+      if (ldt == 0)
+         goto bad;
+
+      if (seg_selector >= VEX_GUEST_X86_LDT_NENT)
+         goto bad;
+
+      the_descrs = (VexGuestX86SegDescr*)ldt;
+      base  = get_segdescr_base (&the_descrs[seg_selector]);
+      limit = get_segdescr_limit(&the_descrs[seg_selector]);
+
+   }
+
+   /* Do the limit check.  Note, this check is just slightly too
+      slack.  Really it should be "if (virtual_addr + size - 1 >=
+      limit)," but we don't have the size info to hand.  Getting it
+      could be significantly complex.  */
+   if (virtual_addr >= limit)
+      goto bad;
+
+   if (verboze) 
+      vex_printf("x86h_use_seg_selector: "
+                 "base = 0x%x, addr = 0x%x\n", 
+                 base, base + virtual_addr);
+
+   /* High 32 bits are zero, indicating success. */
+   return (ULong)( ((UInt)virtual_addr) + base );
+
+ bad:
+   return 1ULL << 32;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Helpers for dealing with, and describing,               ---*/
+/*--- guest state as a whole.                                 ---*/
+/*---------------------------------------------------------------*/
+
+/* Initialise the entire x86 guest state. */
+/* VISIBLE TO LIBVEX CLIENT */
+void LibVEX_GuestX86_initialise ( /*OUT*/VexGuestX86State* vex_state )
+{
+   vex_state->host_EvC_FAILADDR = 0;
+   vex_state->host_EvC_COUNTER = 0;
+
+   vex_state->guest_EAX = 0;
+   vex_state->guest_ECX = 0;
+   vex_state->guest_EDX = 0;
+   vex_state->guest_EBX = 0;
+   vex_state->guest_ESP = 0;
+   vex_state->guest_EBP = 0;
+   vex_state->guest_ESI = 0;
+   vex_state->guest_EDI = 0;
+
+   vex_state->guest_CC_OP   = X86G_CC_OP_COPY;
+   vex_state->guest_CC_DEP1 = 0;
+   vex_state->guest_CC_DEP2 = 0;
+   vex_state->guest_CC_NDEP = 0;
+   vex_state->guest_DFLAG   = 1; /* forwards */
+   vex_state->guest_IDFLAG  = 0;
+   vex_state->guest_ACFLAG  = 0;
+
+   vex_state->guest_EIP = 0;
+
+   /* Initialise the simulated FPU */
+   x86g_dirtyhelper_FINIT( vex_state );
+
+   /* Initialse the SSE state. */
+#  define SSEZERO(_xmm) _xmm[0]=_xmm[1]=_xmm[2]=_xmm[3] = 0;
+
+   vex_state->guest_SSEROUND = (UInt)Irrm_NEAREST;
+   SSEZERO(vex_state->guest_XMM0);
+   SSEZERO(vex_state->guest_XMM1);
+   SSEZERO(vex_state->guest_XMM2);
+   SSEZERO(vex_state->guest_XMM3);
+   SSEZERO(vex_state->guest_XMM4);
+   SSEZERO(vex_state->guest_XMM5);
+   SSEZERO(vex_state->guest_XMM6);
+   SSEZERO(vex_state->guest_XMM7);
+
+#  undef SSEZERO
+
+   vex_state->guest_CS  = 0;
+   vex_state->guest_DS  = 0;
+   vex_state->guest_ES  = 0;
+   vex_state->guest_FS  = 0;
+   vex_state->guest_GS  = 0;
+   vex_state->guest_SS  = 0;
+   vex_state->guest_LDT = 0;
+   vex_state->guest_GDT = 0;
+
+   vex_state->guest_EMNOTE = EmNote_NONE;
+
+   /* SSE2 has a 'clflush' cache-line-invalidator which uses these. */
+   vex_state->guest_CMSTART = 0;
+   vex_state->guest_CMLEN   = 0;
+
+   vex_state->guest_NRADDR   = 0;
+   vex_state->guest_SC_CLASS = 0;
+   vex_state->guest_IP_AT_SYSCALL = 0;
+
+   vex_state->padding1 = 0;
+}
+
+
+/* Figure out if any part of the guest state contained in minoff
+   .. maxoff requires precise memory exceptions.  If in doubt return
+   True (but this generates significantly slower code).  
+
+   By default we enforce precise exns for guest %ESP, %EBP and %EIP
+   only.  These are the minimum needed to extract correct stack
+   backtraces from x86 code.
+
+   Only %ESP is needed in mode VexRegUpdSpAtMemAccess.   
+*/
+Bool guest_x86_state_requires_precise_mem_exns (
+        Int minoff, Int maxoff, VexRegisterUpdates pxControl
+     )
+{
+   Int ebp_min = offsetof(VexGuestX86State, guest_EBP);
+   Int ebp_max = ebp_min + 4 - 1;
+   Int esp_min = offsetof(VexGuestX86State, guest_ESP);
+   Int esp_max = esp_min + 4 - 1;
+   Int eip_min = offsetof(VexGuestX86State, guest_EIP);
+   Int eip_max = eip_min + 4 - 1;
+
+   if (maxoff < esp_min || minoff > esp_max) {
+      /* no overlap with esp */
+      if (pxControl == VexRegUpdSpAtMemAccess)
+         return False; // We only need to check stack pointer.
+   } else {
+      return True;
+   }
+
+   if (maxoff < ebp_min || minoff > ebp_max) {
+      /* no overlap with ebp */
+   } else {
+      return True;
+   }
+
+   if (maxoff < eip_min || minoff > eip_max) {
+      /* no overlap with eip */
+   } else {
+      return True;
+   }
+
+   return False;
+}
+
+
+#define ALWAYSDEFD(field)                           \
+    { offsetof(VexGuestX86State, field),            \
+      (sizeof ((VexGuestX86State*)0)->field) }
+
+VexGuestLayout
+   x86guest_layout 
+      = { 
+          /* Total size of the guest state, in bytes. */
+          .total_sizeB = sizeof(VexGuestX86State),
+
+          /* Describe the stack pointer. */
+          .offset_SP = offsetof(VexGuestX86State,guest_ESP),
+          .sizeof_SP = 4,
+
+          /* Describe the frame pointer. */
+          .offset_FP = offsetof(VexGuestX86State,guest_EBP),
+          .sizeof_FP = 4,
+
+          /* Describe the instruction pointer. */
+          .offset_IP = offsetof(VexGuestX86State,guest_EIP),
+          .sizeof_IP = 4,
+
+          /* Describe any sections to be regarded by Memcheck as
+             'always-defined'. */
+          .n_alwaysDefd = 24,
+
+          /* flags thunk: OP and NDEP are always defd, whereas DEP1
+             and DEP2 have to be tracked.  See detailed comment in
+             gdefs.h on meaning of thunk fields. */
+          .alwaysDefd 
+             = { /*  0 */ ALWAYSDEFD(guest_CC_OP),
+                 /*  1 */ ALWAYSDEFD(guest_CC_NDEP),
+                 /*  2 */ ALWAYSDEFD(guest_DFLAG),
+                 /*  3 */ ALWAYSDEFD(guest_IDFLAG),
+                 /*  4 */ ALWAYSDEFD(guest_ACFLAG),
+                 /*  5 */ ALWAYSDEFD(guest_EIP),
+                 /*  6 */ ALWAYSDEFD(guest_FTOP),
+                 /*  7 */ ALWAYSDEFD(guest_FPTAG),
+                 /*  8 */ ALWAYSDEFD(guest_FPROUND),
+                 /*  9 */ ALWAYSDEFD(guest_FC3210),
+                 /* 10 */ ALWAYSDEFD(guest_CS),
+                 /* 11 */ ALWAYSDEFD(guest_DS),
+                 /* 12 */ ALWAYSDEFD(guest_ES),
+                 /* 13 */ ALWAYSDEFD(guest_FS),
+                 /* 14 */ ALWAYSDEFD(guest_GS),
+                 /* 15 */ ALWAYSDEFD(guest_SS),
+                 /* 16 */ ALWAYSDEFD(guest_LDT),
+                 /* 17 */ ALWAYSDEFD(guest_GDT),
+                 /* 18 */ ALWAYSDEFD(guest_EMNOTE),
+                 /* 19 */ ALWAYSDEFD(guest_SSEROUND),
+                 /* 20 */ ALWAYSDEFD(guest_CMSTART),
+                 /* 21 */ ALWAYSDEFD(guest_CMLEN),
+                 /* 22 */ ALWAYSDEFD(guest_SC_CLASS),
+                 /* 23 */ ALWAYSDEFD(guest_IP_AT_SYSCALL)
+               }
+        };
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                 guest_x86_helpers.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/guest_x86_toIR.c b/VEX/priv/guest_x86_toIR.c
new file mode 100644
index 0000000..fc402ce
--- /dev/null
+++ b/VEX/priv/guest_x86_toIR.c
@@ -0,0 +1,15486 @@
+
+/*--------------------------------------------------------------------*/
+/*--- begin                                       guest_x86_toIR.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Translates x86 code to IR. */
+
+/* TODO:
+
+   All Puts to CC_OP/CC_DEP1/CC_DEP2/CC_NDEP should really be checked
+   to ensure a 32-bit value is being written.
+
+   FUCOMI(P): what happens to A and S flags?  Currently are forced
+      to zero.
+
+   x87 FP Limitations:
+
+   * all arithmetic done at 64 bits
+
+   * no FP exceptions, except for handling stack over/underflow
+
+   * FP rounding mode observed only for float->int conversions
+     and int->float conversions which could lose accuracy, and
+     for float-to-float rounding.  For all other operations, 
+     round-to-nearest is used, regardless.
+
+   * some of the FCOM cases could do with testing -- not convinced
+     that the args are the right way round.
+
+   * FSAVE does not re-initialise the FPU; it should do
+
+   * FINIT not only initialises the FPU environment, it also
+     zeroes all the FP registers.  It should leave the registers
+     unchanged.
+
+   SAHF should cause eflags[1] == 1, and in fact it produces 0.  As
+   per Intel docs this bit has no meaning anyway.  Since PUSHF is the
+   only way to observe eflags[1], a proper fix would be to make that
+   bit be set by PUSHF.
+
+   The state of %eflags.AC (alignment check, bit 18) is recorded by
+   the simulation (viz, if you set it with popf then a pushf produces
+   the value you set it to), but it is otherwise ignored.  In
+   particular, setting it to 1 does NOT cause alignment checking to
+   happen.  Programs that set it to 1 and then rely on the resulting
+   SIGBUSs to inform them of misaligned accesses will not work.
+
+   Implementation of sysenter is necessarily partial.  sysenter is a
+   kind of system call entry.  When doing a sysenter, the return
+   address is not known -- that is something that is beyond Vex's
+   knowledge.  So the generated IR forces a return to the scheduler,
+   which can do what it likes to simulate the systenter, but it MUST
+   set this thread's guest_EIP field with the continuation address
+   before resuming execution.  If that doesn't happen, the thread will
+   jump to address zero, which is probably fatal.
+
+   This module uses global variables and so is not MT-safe (if that
+   should ever become relevant).
+
+   The delta values are 32-bit ints, not 64-bit ints.  That means
+   this module may not work right if run on a 64-bit host.  That should
+   be fixed properly, really -- if anyone ever wants to use Vex to
+   translate x86 code for execution on a 64-bit host.
+
+   casLE (implementation of lock-prefixed insns) and rep-prefixed
+   insns: the side-exit back to the start of the insn is done with
+   Ijk_Boring.  This is quite wrong, it should be done with
+   Ijk_NoRedir, since otherwise the side exit, which is intended to
+   restart the instruction for whatever reason, could go somewhere
+   entirely else.  Doing it right (with Ijk_NoRedir jumps) would make
+   no-redir jumps performance critical, at least for rep-prefixed
+   instructions, since all iterations thereof would involve such a
+   jump.  It's not such a big deal with casLE since the side exit is
+   only taken if the CAS fails, that is, the location is contended,
+   which is relatively unlikely.
+
+   XXXX: Nov 2009: handling of SWP on ARM suffers from the same
+   problem.
+
+   Note also, the test for CAS success vs failure is done using
+   Iop_CasCmp{EQ,NE}{8,16,32,64} rather than the ordinary
+   Iop_Cmp{EQ,NE} equivalents.  This is so as to tell Memcheck that it
+   shouldn't definedness-check these comparisons.  See
+   COMMENT_ON_CasCmpEQ in memcheck/mc_translate.c for
+   background/rationale.
+*/
+
+/* Performance holes:
+
+   - fcom ; fstsw %ax ; sahf
+     sahf does not update the O flag (sigh) and so O needs to
+     be computed.  This is done expensively; it would be better
+     to have a calculate_eflags_o helper.
+
+   - emwarns; some FP codes can generate huge numbers of these
+     if the fpucw is changed in an inner loop.  It would be
+     better for the guest state to have an emwarn-enable reg
+     which can be set zero or nonzero.  If it is zero, emwarns
+     are not flagged, and instead control just flows all the
+     way through bbs as usual.
+*/
+
+/* "Special" instructions.
+
+   This instruction decoder can decode three special instructions
+   which mean nothing natively (are no-ops as far as regs/mem are
+   concerned) but have meaning for supporting Valgrind.  A special
+   instruction is flagged by the 12-byte preamble C1C703 C1C70D C1C71D
+   C1C713 (in the standard interpretation, that means: roll $3, %edi;
+   roll $13, %edi; roll $29, %edi; roll $19, %edi).  Following that,
+   one of the following 3 are allowed (standard interpretation in
+   parentheses):
+
+      87DB (xchgl %ebx,%ebx)   %EDX = client_request ( %EAX )
+      87C9 (xchgl %ecx,%ecx)   %EAX = guest_NRADDR
+      87D2 (xchgl %edx,%edx)   call-noredir *%EAX
+      87FF (xchgl %edi,%edi)   IR injection
+
+   Any other bytes following the 12-byte preamble are illegal and
+   constitute a failure in instruction decoding.  This all assumes
+   that the preamble will never occur except in specific code
+   fragments designed for Valgrind to catch.
+
+   No prefixes may precede a "Special" instruction.
+*/
+
+/* LOCK prefixed instructions.  These are translated using IR-level
+   CAS statements (IRCAS) and are believed to preserve atomicity, even
+   from the point of view of some other process racing against a
+   simulated one (presumably they communicate via a shared memory
+   segment).
+
+   Handlers which are aware of LOCK prefixes are:
+      dis_op2_G_E      (add, or, adc, sbb, and, sub, xor)
+      dis_cmpxchg_G_E  (cmpxchg)
+      dis_Grp1         (add, or, adc, sbb, and, sub, xor)
+      dis_Grp3         (not, neg)
+      dis_Grp4         (inc, dec)
+      dis_Grp5         (inc, dec)
+      dis_Grp8_Imm     (bts, btc, btr)
+      dis_bt_G_E       (bts, btc, btr)
+      dis_xadd_G_E     (xadd)
+*/
+
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_guest_x86.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_generic_bb_to_IR.h"
+#include "guest_generic_x87.h"
+#include "guest_x86_defs.h"
+
+
+/*------------------------------------------------------------*/
+/*--- Globals                                              ---*/
+/*------------------------------------------------------------*/
+
+/* These are set at the start of the translation of an insn, right
+   down in disInstr_X86, so that we don't have to pass them around
+   endlessly.  They are all constant during the translation of any
+   given insn. */
+
+/* We need to know this to do sub-register accesses correctly. */
+static VexEndness host_endness;
+
+/* Pointer to the guest code area (points to start of BB, not to the
+   insn being processed). */
+static const UChar* guest_code;
+
+/* The guest address corresponding to guest_code[0]. */
+static Addr32 guest_EIP_bbstart;
+
+/* The guest address for the instruction currently being
+   translated. */
+static Addr32 guest_EIP_curr_instr;
+
+/* The IRSB* into which we're generating code. */
+static IRSB* irsb;
+
+
+/*------------------------------------------------------------*/
+/*--- Debugging output                                     ---*/
+/*------------------------------------------------------------*/
+
+#define DIP(format, args...)           \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_printf(format, ## args)
+
+#define DIS(buf, format, args...)      \
+   if (vex_traceflags & VEX_TRACE_FE)  \
+      vex_sprintf(buf, format, ## args)
+
+
+/*------------------------------------------------------------*/
+/*--- Offsets of various parts of the x86 guest state.     ---*/
+/*------------------------------------------------------------*/
+
+#define OFFB_EAX       offsetof(VexGuestX86State,guest_EAX)
+#define OFFB_EBX       offsetof(VexGuestX86State,guest_EBX)
+#define OFFB_ECX       offsetof(VexGuestX86State,guest_ECX)
+#define OFFB_EDX       offsetof(VexGuestX86State,guest_EDX)
+#define OFFB_ESP       offsetof(VexGuestX86State,guest_ESP)
+#define OFFB_EBP       offsetof(VexGuestX86State,guest_EBP)
+#define OFFB_ESI       offsetof(VexGuestX86State,guest_ESI)
+#define OFFB_EDI       offsetof(VexGuestX86State,guest_EDI)
+
+#define OFFB_EIP       offsetof(VexGuestX86State,guest_EIP)
+
+#define OFFB_CC_OP     offsetof(VexGuestX86State,guest_CC_OP)
+#define OFFB_CC_DEP1   offsetof(VexGuestX86State,guest_CC_DEP1)
+#define OFFB_CC_DEP2   offsetof(VexGuestX86State,guest_CC_DEP2)
+#define OFFB_CC_NDEP   offsetof(VexGuestX86State,guest_CC_NDEP)
+
+#define OFFB_FPREGS    offsetof(VexGuestX86State,guest_FPREG[0])
+#define OFFB_FPTAGS    offsetof(VexGuestX86State,guest_FPTAG[0])
+#define OFFB_DFLAG     offsetof(VexGuestX86State,guest_DFLAG)
+#define OFFB_IDFLAG    offsetof(VexGuestX86State,guest_IDFLAG)
+#define OFFB_ACFLAG    offsetof(VexGuestX86State,guest_ACFLAG)
+#define OFFB_FTOP      offsetof(VexGuestX86State,guest_FTOP)
+#define OFFB_FC3210    offsetof(VexGuestX86State,guest_FC3210)
+#define OFFB_FPROUND   offsetof(VexGuestX86State,guest_FPROUND)
+
+#define OFFB_CS        offsetof(VexGuestX86State,guest_CS)
+#define OFFB_DS        offsetof(VexGuestX86State,guest_DS)
+#define OFFB_ES        offsetof(VexGuestX86State,guest_ES)
+#define OFFB_FS        offsetof(VexGuestX86State,guest_FS)
+#define OFFB_GS        offsetof(VexGuestX86State,guest_GS)
+#define OFFB_SS        offsetof(VexGuestX86State,guest_SS)
+#define OFFB_LDT       offsetof(VexGuestX86State,guest_LDT)
+#define OFFB_GDT       offsetof(VexGuestX86State,guest_GDT)
+
+#define OFFB_SSEROUND  offsetof(VexGuestX86State,guest_SSEROUND)
+#define OFFB_XMM0      offsetof(VexGuestX86State,guest_XMM0)
+#define OFFB_XMM1      offsetof(VexGuestX86State,guest_XMM1)
+#define OFFB_XMM2      offsetof(VexGuestX86State,guest_XMM2)
+#define OFFB_XMM3      offsetof(VexGuestX86State,guest_XMM3)
+#define OFFB_XMM4      offsetof(VexGuestX86State,guest_XMM4)
+#define OFFB_XMM5      offsetof(VexGuestX86State,guest_XMM5)
+#define OFFB_XMM6      offsetof(VexGuestX86State,guest_XMM6)
+#define OFFB_XMM7      offsetof(VexGuestX86State,guest_XMM7)
+
+#define OFFB_EMNOTE    offsetof(VexGuestX86State,guest_EMNOTE)
+
+#define OFFB_CMSTART   offsetof(VexGuestX86State,guest_CMSTART)
+#define OFFB_CMLEN     offsetof(VexGuestX86State,guest_CMLEN)
+#define OFFB_NRADDR    offsetof(VexGuestX86State,guest_NRADDR)
+
+#define OFFB_IP_AT_SYSCALL offsetof(VexGuestX86State,guest_IP_AT_SYSCALL)
+
+
+/*------------------------------------------------------------*/
+/*--- Helper bits and pieces for deconstructing the        ---*/
+/*--- x86 insn stream.                                     ---*/
+/*------------------------------------------------------------*/
+
+/* This is the Intel register encoding -- integer regs. */
+#define R_EAX 0
+#define R_ECX 1
+#define R_EDX 2
+#define R_EBX 3
+#define R_ESP 4
+#define R_EBP 5
+#define R_ESI 6
+#define R_EDI 7
+
+#define R_AL (0+R_EAX)
+#define R_AH (4+R_EAX)
+
+/* This is the Intel register encoding -- segment regs. */
+#define R_ES 0
+#define R_CS 1
+#define R_SS 2
+#define R_DS 3
+#define R_FS 4
+#define R_GS 5
+
+
+/* Add a statement to the list held by "irbb". */
+static void stmt ( IRStmt* st )
+{
+   addStmtToIRSB( irsb, st );
+}
+
+/* Generate a new temporary of the given type. */
+static IRTemp newTemp ( IRType ty )
+{
+   vassert(isPlausibleIRType(ty));
+   return newIRTemp( irsb->tyenv, ty );
+}
+
+/* Various simple conversions */
+
+static UInt extend_s_8to32( UInt x )
+{
+   return (UInt)((Int)(x << 24) >> 24);
+}
+
+static UInt extend_s_16to32 ( UInt x )
+{
+  return (UInt)((Int)(x << 16) >> 16);
+}
+
+/* Fetch a byte from the guest insn stream. */
+static UChar getIByte ( Int delta )
+{
+   return guest_code[delta];
+}
+
+/* Extract the reg field from a modRM byte. */
+static Int gregOfRM ( UChar mod_reg_rm )
+{
+   return (Int)( (mod_reg_rm >> 3) & 7 );
+}
+
+/* Figure out whether the mod and rm parts of a modRM byte refer to a
+   register or memory.  If so, the byte will have the form 11XXXYYY,
+   where YYY is the register number. */
+static Bool epartIsReg ( UChar mod_reg_rm )
+{
+   return toBool(0xC0 == (mod_reg_rm & 0xC0));
+}
+
+/* ... and extract the register number ... */
+static Int eregOfRM ( UChar mod_reg_rm )
+{
+   return (Int)(mod_reg_rm & 0x7);
+}
+
+/* Get a 8/16/32-bit unsigned value out of the insn stream. */
+
+static UChar getUChar ( Int delta )
+{
+   UChar v = guest_code[delta+0];
+   return toUChar(v);
+}
+
+static UInt getUDisp16 ( Int delta )
+{
+   UInt v = guest_code[delta+1]; v <<= 8;
+   v |= guest_code[delta+0];
+   return v & 0xFFFF;
+}
+
+static UInt getUDisp32 ( Int delta )
+{
+   UInt v = guest_code[delta+3]; v <<= 8;
+   v |= guest_code[delta+2]; v <<= 8;
+   v |= guest_code[delta+1]; v <<= 8;
+   v |= guest_code[delta+0];
+   return v;
+}
+
+static UInt getUDisp ( Int size, Int delta )
+{
+   switch (size) {
+      case 4: return getUDisp32(delta);
+      case 2: return getUDisp16(delta);
+      case 1: return (UInt)getUChar(delta);
+      default: vpanic("getUDisp(x86)");
+   }
+   return 0; /*notreached*/
+}
+
+
+/* Get a byte value out of the insn stream and sign-extend to 32
+   bits. */
+static UInt getSDisp8 ( Int delta )
+{
+   return extend_s_8to32( (UInt) (guest_code[delta]) );
+}
+
+static UInt getSDisp16 ( Int delta0 )
+{
+   const UChar* eip = &guest_code[delta0];
+   UInt d = *eip++;
+   d |= ((*eip++) << 8);
+   return extend_s_16to32(d);
+}
+
+static UInt getSDisp ( Int size, Int delta )
+{
+   switch (size) {
+      case 4: return getUDisp32(delta);
+      case 2: return getSDisp16(delta);
+      case 1: return getSDisp8(delta);
+      default: vpanic("getSDisp(x86)");
+  }
+  return 0; /*notreached*/
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for constructing IR.                         ---*/
+/*------------------------------------------------------------*/
+
+/* Create a 1/2/4 byte read of an x86 integer registers.  For 16/8 bit
+   register references, we need to take the host endianness into
+   account.  Supplied value is 0 .. 7 and in the Intel instruction
+   encoding. */
+
+static IRType szToITy ( Int n )
+{
+   switch (n) {
+      case 1: return Ity_I8;
+      case 2: return Ity_I16;
+      case 4: return Ity_I32;
+      default: vpanic("szToITy(x86)");
+   }
+}
+
+/* On a little-endian host, less significant bits of the guest
+   registers are at lower addresses.  Therefore, if a reference to a
+   register low half has the safe guest state offset as a reference to
+   the full register.
+*/
+static Int integerGuestRegOffset ( Int sz, UInt archreg )
+{
+   vassert(archreg < 8);
+
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+
+   if (sz == 4 || sz == 2 || (sz == 1 && archreg < 4)) {
+      switch (archreg) {
+         case R_EAX: return OFFB_EAX;
+         case R_EBX: return OFFB_EBX;
+         case R_ECX: return OFFB_ECX;
+         case R_EDX: return OFFB_EDX;
+         case R_ESI: return OFFB_ESI;
+         case R_EDI: return OFFB_EDI;
+         case R_ESP: return OFFB_ESP;
+         case R_EBP: return OFFB_EBP;
+         default: vpanic("integerGuestRegOffset(x86,le)(4,2)");
+      }
+   }
+
+   vassert(archreg >= 4 && archreg < 8 && sz == 1);
+   switch (archreg-4) {
+      case R_EAX: return 1+ OFFB_EAX;
+      case R_EBX: return 1+ OFFB_EBX;
+      case R_ECX: return 1+ OFFB_ECX;
+      case R_EDX: return 1+ OFFB_EDX;
+      default: vpanic("integerGuestRegOffset(x86,le)(1h)");
+   }
+
+   /* NOTREACHED */
+   vpanic("integerGuestRegOffset(x86,le)");
+}
+
+static Int segmentGuestRegOffset ( UInt sreg )
+{
+   switch (sreg) {
+      case R_ES: return OFFB_ES;
+      case R_CS: return OFFB_CS;
+      case R_SS: return OFFB_SS;
+      case R_DS: return OFFB_DS;
+      case R_FS: return OFFB_FS;
+      case R_GS: return OFFB_GS;
+      default: vpanic("segmentGuestRegOffset(x86)");
+   }
+}
+
+static Int xmmGuestRegOffset ( UInt xmmreg )
+{
+   switch (xmmreg) {
+      case 0: return OFFB_XMM0;
+      case 1: return OFFB_XMM1;
+      case 2: return OFFB_XMM2;
+      case 3: return OFFB_XMM3;
+      case 4: return OFFB_XMM4;
+      case 5: return OFFB_XMM5;
+      case 6: return OFFB_XMM6;
+      case 7: return OFFB_XMM7;
+      default: vpanic("xmmGuestRegOffset");
+   }
+}
+
+/* Lanes of vector registers are always numbered from zero being the
+   least significant lane (rightmost in the register).  */
+
+static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 8);
+   return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
+}
+
+static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 4);
+   return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
+}
+
+static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
+{
+   /* Correct for little-endian host only. */
+   vassert(host_endness == VexEndnessLE);
+   vassert(laneno >= 0 && laneno < 2);
+   return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
+}
+
+static IRExpr* getIReg ( Int sz, UInt archreg )
+{
+   vassert(sz == 1 || sz == 2 || sz == 4);
+   vassert(archreg < 8);
+   return IRExpr_Get( integerGuestRegOffset(sz,archreg),
+                      szToITy(sz) );
+}
+
+/* Ditto, but write to a reg instead. */
+static void putIReg ( Int sz, UInt archreg, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(irsb->tyenv, e);
+   switch (sz) {
+      case 1: vassert(ty == Ity_I8); break;
+      case 2: vassert(ty == Ity_I16); break;
+      case 4: vassert(ty == Ity_I32); break;
+      default: vpanic("putIReg(x86)");
+   }
+   vassert(archreg < 8);
+   stmt( IRStmt_Put(integerGuestRegOffset(sz,archreg), e) );
+}
+
+static IRExpr* getSReg ( UInt sreg )
+{
+   return IRExpr_Get( segmentGuestRegOffset(sreg), Ity_I16 );
+}
+
+static void putSReg ( UInt sreg, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I16);
+   stmt( IRStmt_Put( segmentGuestRegOffset(sreg), e ) );
+}
+
+static IRExpr* getXMMReg ( UInt xmmreg )
+{
+   return IRExpr_Get( xmmGuestRegOffset(xmmreg), Ity_V128 );
+}
+
+static IRExpr* getXMMRegLane64 ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane64offset(xmmreg,laneno), Ity_I64 );
+}
+
+static IRExpr* getXMMRegLane64F ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane64offset(xmmreg,laneno), Ity_F64 );
+}
+
+static IRExpr* getXMMRegLane32 ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane32offset(xmmreg,laneno), Ity_I32 );
+}
+
+static IRExpr* getXMMRegLane32F ( UInt xmmreg, Int laneno )
+{
+   return IRExpr_Get( xmmGuestRegLane32offset(xmmreg,laneno), Ity_F32 );
+}
+
+static void putXMMReg ( UInt xmmreg, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_V128);
+   stmt( IRStmt_Put( xmmGuestRegOffset(xmmreg), e ) );
+}
+
+static void putXMMRegLane64 ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I64);
+   stmt( IRStmt_Put( xmmGuestRegLane64offset(xmmreg,laneno), e ) );
+}
+
+static void putXMMRegLane64F ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_F64);
+   stmt( IRStmt_Put( xmmGuestRegLane64offset(xmmreg,laneno), e ) );
+}
+
+static void putXMMRegLane32F ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_F32);
+   stmt( IRStmt_Put( xmmGuestRegLane32offset(xmmreg,laneno), e ) );
+}
+
+static void putXMMRegLane32 ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I32);
+   stmt( IRStmt_Put( xmmGuestRegLane32offset(xmmreg,laneno), e ) );
+}
+
+static void putXMMRegLane16 ( UInt xmmreg, Int laneno, IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I16);
+   stmt( IRStmt_Put( xmmGuestRegLane16offset(xmmreg,laneno), e ) );
+}
+
+static void assign ( IRTemp dst, IRExpr* e )
+{
+   stmt( IRStmt_WrTmp(dst, e) );
+}
+
+static void storeLE ( IRExpr* addr, IRExpr* data )
+{
+   stmt( IRStmt_Store(Iend_LE, addr, data) );
+}
+
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* triop ( IROp op, IRExpr* a1, IRExpr* a2, IRExpr* a3 )
+{
+   return IRExpr_Triop(op, a1, a2, a3);
+}
+
+static IRExpr* mkexpr ( IRTemp tmp )
+{
+   return IRExpr_RdTmp(tmp);
+}
+
+static IRExpr* mkU8 ( UInt i )
+{
+   vassert(i < 256);
+   return IRExpr_Const(IRConst_U8( (UChar)i ));
+}
+
+static IRExpr* mkU16 ( UInt i )
+{
+   vassert(i < 65536);
+   return IRExpr_Const(IRConst_U16( (UShort)i ));
+}
+
+static IRExpr* mkU32 ( UInt i )
+{
+   return IRExpr_Const(IRConst_U32(i));
+}
+
+static IRExpr* mkU64 ( ULong i )
+{
+   return IRExpr_Const(IRConst_U64(i));
+}
+
+static IRExpr* mkU ( IRType ty, UInt i )
+{
+   if (ty == Ity_I8)  return mkU8(i);
+   if (ty == Ity_I16) return mkU16(i);
+   if (ty == Ity_I32) return mkU32(i);
+   /* If this panics, it usually means you passed a size (1,2,4)
+      value as the IRType, rather than a real IRType. */
+   vpanic("mkU(x86)");
+}
+
+static IRExpr* mkV128 ( UShort mask )
+{
+   return IRExpr_Const(IRConst_V128(mask));
+}
+
+static IRExpr* loadLE ( IRType ty, IRExpr* addr )
+{
+   return IRExpr_Load(Iend_LE, ty, addr);
+}
+
+static IROp mkSizedOp ( IRType ty, IROp op8 )
+{
+   Int adj;
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+   vassert(op8 == Iop_Add8 || op8 == Iop_Sub8 
+           || op8 == Iop_Mul8 
+           || op8 == Iop_Or8 || op8 == Iop_And8 || op8 == Iop_Xor8
+           || op8 == Iop_Shl8 || op8 == Iop_Shr8 || op8 == Iop_Sar8
+           || op8 == Iop_CmpEQ8 || op8 == Iop_CmpNE8
+           || op8 == Iop_CasCmpNE8
+           || op8 == Iop_ExpCmpNE8
+           || op8 == Iop_Not8);
+   adj = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
+   return adj + op8;
+}
+
+static IROp mkWidenOp ( Int szSmall, Int szBig, Bool signd )
+{
+   if (szSmall == 1 && szBig == 4) {
+      return signd ? Iop_8Sto32 : Iop_8Uto32;
+   }
+   if (szSmall == 1 && szBig == 2) {
+      return signd ? Iop_8Sto16 : Iop_8Uto16;
+   }
+   if (szSmall == 2 && szBig == 4) {
+      return signd ? Iop_16Sto32 : Iop_16Uto32;
+   }
+   vpanic("mkWidenOp(x86,guest)");
+}
+
+static IRExpr* mkAnd1 ( IRExpr* x, IRExpr* y )
+{
+   vassert(typeOfIRExpr(irsb->tyenv,x) == Ity_I1);
+   vassert(typeOfIRExpr(irsb->tyenv,y) == Ity_I1);
+   return unop(Iop_32to1, 
+               binop(Iop_And32, 
+                     unop(Iop_1Uto32,x), 
+                     unop(Iop_1Uto32,y)));
+}
+
+/* Generate a compare-and-swap operation, operating on memory at
+   'addr'.  The expected value is 'expVal' and the new value is
+   'newVal'.  If the operation fails, then transfer control (with a
+   no-redir jump (XXX no -- see comment at top of this file)) to
+   'restart_point', which is presumably the address of the guest
+   instruction again -- retrying, essentially. */
+static void casLE ( IRExpr* addr, IRExpr* expVal, IRExpr* newVal,
+                    Addr32 restart_point )
+{
+   IRCAS* cas;
+   IRType tyE    = typeOfIRExpr(irsb->tyenv, expVal);
+   IRType tyN    = typeOfIRExpr(irsb->tyenv, newVal);
+   IRTemp oldTmp = newTemp(tyE);
+   IRTemp expTmp = newTemp(tyE);
+   vassert(tyE == tyN);
+   vassert(tyE == Ity_I32 || tyE == Ity_I16 || tyE == Ity_I8);
+   assign(expTmp, expVal);
+   cas = mkIRCAS( IRTemp_INVALID, oldTmp, Iend_LE, addr, 
+                  NULL, mkexpr(expTmp), NULL, newVal );
+   stmt( IRStmt_CAS(cas) );
+   stmt( IRStmt_Exit(
+            binop( mkSizedOp(tyE,Iop_CasCmpNE8),
+                   mkexpr(oldTmp), mkexpr(expTmp) ),
+            Ijk_Boring, /*Ijk_NoRedir*/
+            IRConst_U32( restart_point ),
+            OFFB_EIP
+         ));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for %eflags.                                 ---*/
+/*------------------------------------------------------------*/
+
+/* -------------- Evaluating the flags-thunk. -------------- */
+
+/* Build IR to calculate all the eflags from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+   Ity_I32. */
+static IRExpr* mk_x86g_calculate_eflags_all ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I32) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I32,
+           0/*regparm*/, 
+           "x86g_calculate_eflags_all", &x86g_calculate_eflags_all,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+/* Build IR to calculate some particular condition from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression ::
+   Ity_Bit. */
+static IRExpr* mk_x86g_calculate_condition ( X86Condcode cond )
+{
+   IRExpr** args
+      = mkIRExprVec_5( mkU32(cond),
+                       IRExpr_Get(OFFB_CC_OP,  Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I32) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I32,
+           0/*regparm*/, 
+           "x86g_calculate_condition", &x86g_calculate_condition,
+           args
+        );
+   /* Exclude the requested condition, OP and NDEP from definedness
+      checking.  We're only interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<1) | (1<<4);
+   return unop(Iop_32to1, call);
+}
+
+/* Build IR to calculate just the carry flag from stored
+   CC_OP/CC_DEP1/CC_DEP2/CC_NDEP.  Returns an expression :: Ity_I32. */
+static IRExpr* mk_x86g_calculate_eflags_c ( void )
+{
+   IRExpr** args
+      = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP,   Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP1, Ity_I32),
+                       IRExpr_Get(OFFB_CC_DEP2, Ity_I32),
+                       IRExpr_Get(OFFB_CC_NDEP, Ity_I32) );
+   IRExpr* call
+      = mkIRExprCCall(
+           Ity_I32,
+           3/*regparm*/, 
+           "x86g_calculate_eflags_c", &x86g_calculate_eflags_c,
+           args
+        );
+   /* Exclude OP and NDEP from definedness checking.  We're only
+      interested in DEP1 and DEP2. */
+   call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3);
+   return call;
+}
+
+
+/* -------------- Building the flags-thunk. -------------- */
+
+/* The machinery in this section builds the flag-thunk following a
+   flag-setting operation.  Hence the various setFlags_* functions.
+*/
+
+static Bool isAddSub ( IROp op8 )
+{
+   return toBool(op8 == Iop_Add8 || op8 == Iop_Sub8);
+}
+
+static Bool isLogic ( IROp op8 )
+{
+   return toBool(op8 == Iop_And8 || op8 == Iop_Or8 || op8 == Iop_Xor8);
+}
+
+/* U-widen 8/16/32 bit int expr to 32. */
+static IRExpr* widenUto32 ( IRExpr* e )
+{
+   switch (typeOfIRExpr(irsb->tyenv,e)) {
+      case Ity_I32: return e;
+      case Ity_I16: return unop(Iop_16Uto32,e);
+      case Ity_I8:  return unop(Iop_8Uto32,e);
+      default: vpanic("widenUto32");
+   }
+}
+
+/* S-widen 8/16/32 bit int expr to 32. */
+static IRExpr* widenSto32 ( IRExpr* e )
+{
+   switch (typeOfIRExpr(irsb->tyenv,e)) {
+      case Ity_I32: return e;
+      case Ity_I16: return unop(Iop_16Sto32,e);
+      case Ity_I8:  return unop(Iop_8Sto32,e);
+      default: vpanic("widenSto32");
+   }
+}
+
+/* Narrow 8/16/32 bit int expr to 8/16/32.  Clearly only some
+   of these combinations make sense. */
+static IRExpr* narrowTo ( IRType dst_ty, IRExpr* e )
+{
+   IRType src_ty = typeOfIRExpr(irsb->tyenv,e);
+   if (src_ty == dst_ty)
+      return e;
+   if (src_ty == Ity_I32 && dst_ty == Ity_I16)
+      return unop(Iop_32to16, e);
+   if (src_ty == Ity_I32 && dst_ty == Ity_I8)
+      return unop(Iop_32to8, e);
+
+   vex_printf("\nsrc, dst tys are: ");
+   ppIRType(src_ty);
+   vex_printf(", ");
+   ppIRType(dst_ty);
+   vex_printf("\n");
+   vpanic("narrowTo(x86)");
+}
+
+
+/* Set the flags thunk OP, DEP1 and DEP2 fields.  The supplied op is
+   auto-sized up to the real op. */
+
+static 
+void setFlags_DEP1_DEP2 ( IROp op8, IRTemp dep1, IRTemp dep2, IRType ty )
+{
+   Int ccOp = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
+
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+
+   switch (op8) {
+      case Iop_Add8: ccOp += X86G_CC_OP_ADDB;   break;
+      case Iop_Sub8: ccOp += X86G_CC_OP_SUBB;   break;
+      default:       ppIROp(op8);
+                     vpanic("setFlags_DEP1_DEP2(x86)");
+   }
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(ccOp)) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto32(mkexpr(dep1))) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto32(mkexpr(dep2))) );
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+}
+
+
+/* Set the OP and DEP1 fields only, and write zero to DEP2. */
+
+static 
+void setFlags_DEP1 ( IROp op8, IRTemp dep1, IRType ty )
+{
+   Int ccOp = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
+
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+
+   switch (op8) {
+      case Iop_Or8:
+      case Iop_And8:
+      case Iop_Xor8: ccOp += X86G_CC_OP_LOGICB; break;
+      default:       ppIROp(op8);
+                     vpanic("setFlags_DEP1(x86)");
+   }
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(ccOp)) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto32(mkexpr(dep1))) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0)) );
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+}
+
+
+/* For shift operations, we put in the result and the undershifted
+   result.  Except if the shift amount is zero, the thunk is left
+   unchanged. */
+
+static void setFlags_DEP1_DEP2_shift ( IROp    op32,
+                                       IRTemp  res,
+                                       IRTemp  resUS,
+                                       IRType  ty,
+                                       IRTemp  guard )
+{
+   Int ccOp = ty==Ity_I8 ? 2 : (ty==Ity_I16 ? 1 : 0);
+
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+   vassert(guard);
+
+   /* Both kinds of right shifts are handled by the same thunk
+      operation. */
+   switch (op32) {
+      case Iop_Shr32:
+      case Iop_Sar32: ccOp = X86G_CC_OP_SHRL - ccOp; break;
+      case Iop_Shl32: ccOp = X86G_CC_OP_SHLL - ccOp; break;
+      default:        ppIROp(op32);
+                      vpanic("setFlags_DEP1_DEP2_shift(x86)");
+   }
+
+   /* guard :: Ity_I8.  We need to convert it to I1. */
+   IRTemp guardB = newTemp(Ity_I1);
+   assign( guardB, binop(Iop_CmpNE8, mkexpr(guard), mkU8(0)) );
+
+   /* DEP1 contains the result, DEP2 contains the undershifted value. */
+   stmt( IRStmt_Put( OFFB_CC_OP,
+                     IRExpr_ITE( mkexpr(guardB),
+                                 mkU32(ccOp),
+                                 IRExpr_Get(OFFB_CC_OP,Ity_I32) ) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1,
+                     IRExpr_ITE( mkexpr(guardB),
+                                 widenUto32(mkexpr(res)),
+                                 IRExpr_Get(OFFB_CC_DEP1,Ity_I32) ) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, 
+                     IRExpr_ITE( mkexpr(guardB),
+                                 widenUto32(mkexpr(resUS)),
+                                 IRExpr_Get(OFFB_CC_DEP2,Ity_I32) ) ));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP,
+                     IRExpr_ITE( mkexpr(guardB),
+                                 mkU32(0),
+                                 IRExpr_Get(OFFB_CC_NDEP,Ity_I32) ) ));
+}
+
+
+/* For the inc/dec case, we store in DEP1 the result value and in NDEP
+   the former value of the carry flag, which unfortunately we have to
+   compute. */
+
+static void setFlags_INC_DEC ( Bool inc, IRTemp res, IRType ty )
+{
+   Int ccOp = inc ? X86G_CC_OP_INCB : X86G_CC_OP_DECB;
+   
+   ccOp += ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+
+   /* This has to come first, because calculating the C flag 
+      may require reading all four thunk fields. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mk_x86g_calculate_eflags_c()) );
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(ccOp)) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto32(mkexpr(res))) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0)) );
+}
+
+
+/* Multiplies are pretty much like add and sub: DEP1 and DEP2 hold the
+   two arguments. */
+
+static
+void setFlags_MUL ( IRType ty, IRTemp arg1, IRTemp arg2, UInt base_op )
+{
+   switch (ty) {
+      case Ity_I8:
+         stmt( IRStmt_Put( OFFB_CC_OP, mkU32(base_op+0) ) );
+         break;
+      case Ity_I16:
+         stmt( IRStmt_Put( OFFB_CC_OP, mkU32(base_op+1) ) );
+         break;
+      case Ity_I32:
+         stmt( IRStmt_Put( OFFB_CC_OP, mkU32(base_op+2) ) );
+         break;
+      default:
+         vpanic("setFlags_MUL(x86)");
+   }
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto32(mkexpr(arg1)) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto32(mkexpr(arg2)) ));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+}
+
+
+/* -------------- Condition codes. -------------- */
+
+/* Condition codes, using the Intel encoding.  */
+
+static const HChar* name_X86Condcode ( X86Condcode cond )
+{
+   switch (cond) {
+      case X86CondO:      return "o";
+      case X86CondNO:     return "no";
+      case X86CondB:      return "b";
+      case X86CondNB:     return "nb";
+      case X86CondZ:      return "z";
+      case X86CondNZ:     return "nz";
+      case X86CondBE:     return "be";
+      case X86CondNBE:    return "nbe";
+      case X86CondS:      return "s";
+      case X86CondNS:     return "ns";
+      case X86CondP:      return "p";
+      case X86CondNP:     return "np";
+      case X86CondL:      return "l";
+      case X86CondNL:     return "nl";
+      case X86CondLE:     return "le";
+      case X86CondNLE:    return "nle";
+      case X86CondAlways: return "ALWAYS";
+      default: vpanic("name_X86Condcode");
+   }
+}
+
+static 
+X86Condcode positiveIse_X86Condcode ( X86Condcode  cond,
+                                      Bool*        needInvert )
+{
+   vassert(cond >= X86CondO && cond <= X86CondNLE);
+   if (cond & 1) {
+      *needInvert = True;
+      return cond-1;
+   } else {
+      *needInvert = False;
+      return cond;
+   }
+}
+
+
+/* -------------- Helpers for ADD/SUB with carry. -------------- */
+
+/* Given ta1, ta2 and tres, compute tres = ADC(ta1,ta2) and set flags
+   appropriately.
+
+   Optionally, generate a store for the 'tres' value.  This can either
+   be a normal store, or it can be a cas-with-possible-failure style
+   store:
+
+   if taddr is IRTemp_INVALID, then no store is generated.
+
+   if taddr is not IRTemp_INVALID, then a store (using taddr as
+   the address) is generated:
+
+     if texpVal is IRTemp_INVALID then a normal store is
+     generated, and restart_point must be zero (it is irrelevant).
+
+     if texpVal is not IRTemp_INVALID then a cas-style store is
+     generated.  texpVal is the expected value, restart_point
+     is the restart point if the store fails, and texpVal must
+     have the same type as tres.   
+*/
+static void helper_ADC ( Int sz,
+                         IRTemp tres, IRTemp ta1, IRTemp ta2,
+                         /* info about optional store: */
+                         IRTemp taddr, IRTemp texpVal, Addr32 restart_point )
+{
+   UInt    thunkOp;
+   IRType  ty    = szToITy(sz);
+   IRTemp  oldc  = newTemp(Ity_I32);
+   IRTemp  oldcn = newTemp(ty);
+   IROp    plus  = mkSizedOp(ty, Iop_Add8);
+   IROp    xor   = mkSizedOp(ty, Iop_Xor8);
+
+   vassert(typeOfIRTemp(irsb->tyenv, tres) == ty);
+   vassert(sz == 1 || sz == 2 || sz == 4);
+   thunkOp = sz==4 ? X86G_CC_OP_ADCL 
+                   : (sz==2 ? X86G_CC_OP_ADCW : X86G_CC_OP_ADCB);
+
+   /* oldc = old carry flag, 0 or 1 */
+   assign( oldc,  binop(Iop_And32,
+                        mk_x86g_calculate_eflags_c(),
+                        mkU32(1)) );
+
+   assign( oldcn, narrowTo(ty, mkexpr(oldc)) );
+
+   assign( tres, binop(plus,
+                       binop(plus,mkexpr(ta1),mkexpr(ta2)),
+                       mkexpr(oldcn)) );
+
+   /* Possibly generate a store of 'tres' to 'taddr'.  See comment at
+      start of this function. */
+   if (taddr != IRTemp_INVALID) {
+      if (texpVal == IRTemp_INVALID) {
+         vassert(restart_point == 0);
+         storeLE( mkexpr(taddr), mkexpr(tres) );
+      } else {
+         vassert(typeOfIRTemp(irsb->tyenv, texpVal) == ty);
+         /* .. and hence 'texpVal' has the same type as 'tres'. */
+         casLE( mkexpr(taddr),
+                mkexpr(texpVal), mkexpr(tres), restart_point );
+      }
+   }
+
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(thunkOp) ) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto32(mkexpr(ta1)) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto32(binop(xor, mkexpr(ta2), 
+                                                         mkexpr(oldcn)) )) );
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );
+}
+
+
+/* Given ta1, ta2 and tres, compute tres = SBB(ta1,ta2) and set flags
+   appropriately.  As with helper_ADC, possibly generate a store of
+   the result -- see comments on helper_ADC for details.
+*/
+static void helper_SBB ( Int sz,
+                         IRTemp tres, IRTemp ta1, IRTemp ta2,
+                         /* info about optional store: */
+                         IRTemp taddr, IRTemp texpVal, Addr32 restart_point )
+{
+   UInt    thunkOp;
+   IRType  ty    = szToITy(sz);
+   IRTemp  oldc  = newTemp(Ity_I32);
+   IRTemp  oldcn = newTemp(ty);
+   IROp    minus = mkSizedOp(ty, Iop_Sub8);
+   IROp    xor   = mkSizedOp(ty, Iop_Xor8);
+
+   vassert(typeOfIRTemp(irsb->tyenv, tres) == ty);
+   vassert(sz == 1 || sz == 2 || sz == 4);
+   thunkOp = sz==4 ? X86G_CC_OP_SBBL 
+                   : (sz==2 ? X86G_CC_OP_SBBW : X86G_CC_OP_SBBB);
+
+   /* oldc = old carry flag, 0 or 1 */
+   assign( oldc, binop(Iop_And32,
+                       mk_x86g_calculate_eflags_c(),
+                       mkU32(1)) );
+
+   assign( oldcn, narrowTo(ty, mkexpr(oldc)) );
+
+   assign( tres, binop(minus,
+                       binop(minus,mkexpr(ta1),mkexpr(ta2)),
+                       mkexpr(oldcn)) );
+
+   /* Possibly generate a store of 'tres' to 'taddr'.  See comment at
+      start of this function. */
+   if (taddr != IRTemp_INVALID) {
+      if (texpVal == IRTemp_INVALID) {
+         vassert(restart_point == 0);
+         storeLE( mkexpr(taddr), mkexpr(tres) );
+      } else {
+         vassert(typeOfIRTemp(irsb->tyenv, texpVal) == ty);
+         /* .. and hence 'texpVal' has the same type as 'tres'. */
+         casLE( mkexpr(taddr),
+                mkexpr(texpVal), mkexpr(tres), restart_point );
+      }
+   }
+
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(thunkOp) ) );
+   stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto32(mkexpr(ta1) )) );
+   stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto32(binop(xor, mkexpr(ta2), 
+                                                         mkexpr(oldcn)) )) );
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );
+}
+
+
+/* -------------- Helpers for disassembly printing. -------------- */
+
+static const HChar* nameGrp1 ( Int opc_aux )
+{
+   static const HChar* grp1_names[8] 
+     = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" };
+   if (opc_aux < 0 || opc_aux > 7) vpanic("nameGrp1(x86)");
+   return grp1_names[opc_aux];
+}
+
+static const HChar* nameGrp2 ( Int opc_aux )
+{
+   static const HChar* grp2_names[8] 
+     = { "rol", "ror", "rcl", "rcr", "shl", "shr", "shl", "sar" };
+   if (opc_aux < 0 || opc_aux > 7) vpanic("nameGrp2(x86)");
+   return grp2_names[opc_aux];
+}
+
+static const HChar* nameGrp4 ( Int opc_aux )
+{
+   static const HChar* grp4_names[8] 
+     = { "inc", "dec", "???", "???", "???", "???", "???", "???" };
+   if (opc_aux < 0 || opc_aux > 1) vpanic("nameGrp4(x86)");
+   return grp4_names[opc_aux];
+}
+
+static const HChar* nameGrp5 ( Int opc_aux )
+{
+   static const HChar* grp5_names[8] 
+     = { "inc", "dec", "call*", "call*", "jmp*", "jmp*", "push", "???" };
+   if (opc_aux < 0 || opc_aux > 6) vpanic("nameGrp5(x86)");
+   return grp5_names[opc_aux];
+}
+
+static const HChar* nameGrp8 ( Int opc_aux )
+{
+   static const HChar* grp8_names[8] 
+     = { "???", "???", "???", "???", "bt", "bts", "btr", "btc" };
+   if (opc_aux < 4 || opc_aux > 7) vpanic("nameGrp8(x86)");
+   return grp8_names[opc_aux];
+}
+
+static const HChar* nameIReg ( Int size, Int reg )
+{
+   static const HChar* ireg32_names[8] 
+     = { "%eax", "%ecx", "%edx", "%ebx", 
+         "%esp", "%ebp", "%esi", "%edi" };
+   static const HChar* ireg16_names[8] 
+     = { "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di" };
+   static const HChar* ireg8_names[8] 
+     = { "%al", "%cl", "%dl", "%bl", 
+         "%ah{sp}", "%ch{bp}", "%dh{si}", "%bh{di}" };
+   if (reg < 0 || reg > 7) goto bad;
+   switch (size) {
+      case 4: return ireg32_names[reg];
+      case 2: return ireg16_names[reg];
+      case 1: return ireg8_names[reg];
+   }
+  bad:
+   vpanic("nameIReg(X86)");
+   return NULL; /*notreached*/
+}
+
+static const HChar* nameSReg ( UInt sreg )
+{
+   switch (sreg) {
+      case R_ES: return "%es";
+      case R_CS: return "%cs";
+      case R_SS: return "%ss";
+      case R_DS: return "%ds";
+      case R_FS: return "%fs";
+      case R_GS: return "%gs";
+      default: vpanic("nameSReg(x86)");
+   }
+}
+
+static const HChar* nameMMXReg ( Int mmxreg )
+{
+   static const HChar* mmx_names[8] 
+     = { "%mm0", "%mm1", "%mm2", "%mm3", "%mm4", "%mm5", "%mm6", "%mm7" };
+   if (mmxreg < 0 || mmxreg > 7) vpanic("nameMMXReg(x86,guest)");
+   return mmx_names[mmxreg];
+}
+
+static const HChar* nameXMMReg ( Int xmmreg )
+{
+   static const HChar* xmm_names[8] 
+     = { "%xmm0", "%xmm1", "%xmm2", "%xmm3", 
+         "%xmm4", "%xmm5", "%xmm6", "%xmm7" };
+   if (xmmreg < 0 || xmmreg > 7) vpanic("name_of_xmm_reg");
+   return xmm_names[xmmreg];
+}
+ 
+static const HChar* nameMMXGran ( Int gran )
+{
+   switch (gran) {
+      case 0: return "b";
+      case 1: return "w";
+      case 2: return "d";
+      case 3: return "q";
+      default: vpanic("nameMMXGran(x86,guest)");
+   }
+}
+
+static HChar nameISize ( Int size )
+{
+   switch (size) {
+      case 4: return 'l';
+      case 2: return 'w';
+      case 1: return 'b';
+      default: vpanic("nameISize(x86)");
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- JMP helpers                                          ---*/
+/*------------------------------------------------------------*/
+
+static void jmp_lit( /*MOD*/DisResult* dres,
+                     IRJumpKind kind, Addr32 d32 )
+{
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = kind;
+   stmt( IRStmt_Put( OFFB_EIP, mkU32(d32) ) );
+}
+
+static void jmp_treg( /*MOD*/DisResult* dres,
+                      IRJumpKind kind, IRTemp t )
+{
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = kind;
+   stmt( IRStmt_Put( OFFB_EIP, mkexpr(t) ) );
+}
+
+static 
+void jcc_01( /*MOD*/DisResult* dres,
+             X86Condcode cond, Addr32 d32_false, Addr32 d32_true )
+{
+   Bool        invert;
+   X86Condcode condPos;
+   vassert(dres->whatNext    == Dis_Continue);
+   vassert(dres->len         == 0);
+   vassert(dres->continueAt  == 0);
+   vassert(dres->jk_StopHere == Ijk_INVALID);
+   dres->whatNext    = Dis_StopHere;
+   dres->jk_StopHere = Ijk_Boring;
+   condPos = positiveIse_X86Condcode ( cond, &invert );
+   if (invert) {
+      stmt( IRStmt_Exit( mk_x86g_calculate_condition(condPos),
+                         Ijk_Boring,
+                         IRConst_U32(d32_false),
+                         OFFB_EIP ) );
+      stmt( IRStmt_Put( OFFB_EIP, mkU32(d32_true) ) );
+   } else {
+      stmt( IRStmt_Exit( mk_x86g_calculate_condition(condPos),
+                         Ijk_Boring,
+                         IRConst_U32(d32_true),
+                         OFFB_EIP ) );
+      stmt( IRStmt_Put( OFFB_EIP, mkU32(d32_false) ) );
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassembling addressing modes                       ---*/
+/*------------------------------------------------------------*/
+
+static 
+const HChar* sorbTxt ( UChar sorb )
+{
+   switch (sorb) {
+      case 0:    return ""; /* no override */
+      case 0x3E: return "%ds";
+      case 0x26: return "%es:";
+      case 0x64: return "%fs:";
+      case 0x65: return "%gs:";
+      default: vpanic("sorbTxt(x86,guest)");
+   }
+}
+
+
+/* 'virtual' is an IRExpr* holding a virtual address.  Convert it to a
+   linear address by adding any required segment override as indicated
+   by sorb. */
+static
+IRExpr* handleSegOverride ( UChar sorb, IRExpr* virtual )
+{
+   Int    sreg;
+   IRType hWordTy;
+   IRTemp ldt_ptr, gdt_ptr, seg_selector, r64;
+
+   if (sorb == 0)
+      /* the common case - no override */
+      return virtual;
+
+   switch (sorb) {
+      case 0x3E: sreg = R_DS; break;
+      case 0x26: sreg = R_ES; break;
+      case 0x64: sreg = R_FS; break;
+      case 0x65: sreg = R_GS; break;
+      default: vpanic("handleSegOverride(x86,guest)");
+   }
+
+   hWordTy = sizeof(HWord)==4 ? Ity_I32 : Ity_I64;
+
+   seg_selector = newTemp(Ity_I32);
+   ldt_ptr      = newTemp(hWordTy);
+   gdt_ptr      = newTemp(hWordTy);
+   r64          = newTemp(Ity_I64);
+
+   assign( seg_selector, unop(Iop_16Uto32, getSReg(sreg)) );
+   assign( ldt_ptr, IRExpr_Get( OFFB_LDT, hWordTy ));
+   assign( gdt_ptr, IRExpr_Get( OFFB_GDT, hWordTy ));
+
+   /*
+   Call this to do the translation and limit checks: 
+   ULong x86g_use_seg_selector ( HWord ldt, HWord gdt,
+                                 UInt seg_selector, UInt virtual_addr )
+   */
+   assign( 
+      r64, 
+      mkIRExprCCall( 
+         Ity_I64, 
+         0/*regparms*/, 
+         "x86g_use_seg_selector", 
+         &x86g_use_seg_selector, 
+         mkIRExprVec_4( mkexpr(ldt_ptr), mkexpr(gdt_ptr), 
+                        mkexpr(seg_selector), virtual)
+      )
+   );
+
+   /* If the high 32 of the result are non-zero, there was a 
+      failure in address translation.  In which case, make a
+      quick exit.
+   */
+   stmt( 
+      IRStmt_Exit(
+         binop(Iop_CmpNE32, unop(Iop_64HIto32, mkexpr(r64)), mkU32(0)),
+         Ijk_MapFail,
+         IRConst_U32( guest_EIP_curr_instr ),
+         OFFB_EIP
+      )
+   );
+
+   /* otherwise, here's the translated result. */
+   return unop(Iop_64to32, mkexpr(r64));
+}
+
+
+/* Generate IR to calculate an address indicated by a ModRM and
+   following SIB bytes.  The expression, and the number of bytes in
+   the address mode, are returned.  Note that this fn should not be
+   called if the R/M part of the address denotes a register instead of
+   memory.  If print_codegen is true, text of the addressing mode is
+   placed in buf. 
+
+   The computed address is stored in a new tempreg, and the
+   identity of the tempreg is returned.  */
+
+static IRTemp disAMode_copy2tmp ( IRExpr* addr32 )
+{
+   IRTemp tmp = newTemp(Ity_I32);
+   assign( tmp, addr32 );
+   return tmp;
+}
+
+static 
+IRTemp disAMode ( Int* len, UChar sorb, Int delta, HChar* buf )
+{
+   UChar mod_reg_rm = getIByte(delta);
+   delta++;
+
+   buf[0] = (UChar)0;
+
+   /* squeeze out the reg field from mod_reg_rm, since a 256-entry
+      jump table seems a bit excessive. 
+   */
+   mod_reg_rm &= 0xC7;                      /* is now XX000YYY */
+   mod_reg_rm  = toUChar(mod_reg_rm | (mod_reg_rm >> 3));  
+                                            /* is now XX0XXYYY */
+   mod_reg_rm &= 0x1F;                      /* is now 000XXYYY */
+   switch (mod_reg_rm) {
+
+      /* (%eax) .. (%edi), not including (%esp) or (%ebp).
+         --> GET %reg, t 
+      */
+      case 0x00: case 0x01: case 0x02: case 0x03: 
+      /* ! 04 */ /* ! 05 */ case 0x06: case 0x07:
+         { UChar rm = mod_reg_rm;
+           DIS(buf, "%s(%s)", sorbTxt(sorb), nameIReg(4,rm));
+           *len = 1;
+           return disAMode_copy2tmp(
+                  handleSegOverride(sorb, getIReg(4,rm)));
+         }
+
+      /* d8(%eax) ... d8(%edi), not including d8(%esp) 
+         --> GET %reg, t ; ADDL d8, t
+      */
+      case 0x08: case 0x09: case 0x0A: case 0x0B: 
+      /* ! 0C */ case 0x0D: case 0x0E: case 0x0F:
+         { UChar rm = toUChar(mod_reg_rm & 7);
+           UInt  d  = getSDisp8(delta);
+           DIS(buf, "%s%d(%s)", sorbTxt(sorb), (Int)d, nameIReg(4,rm));
+           *len = 2;
+           return disAMode_copy2tmp(
+                  handleSegOverride(sorb,
+                     binop(Iop_Add32,getIReg(4,rm),mkU32(d))));
+         }
+
+      /* d32(%eax) ... d32(%edi), not including d32(%esp)
+         --> GET %reg, t ; ADDL d8, t
+      */
+      case 0x10: case 0x11: case 0x12: case 0x13: 
+      /* ! 14 */ case 0x15: case 0x16: case 0x17:
+         { UChar rm = toUChar(mod_reg_rm & 7);
+           UInt  d  = getUDisp32(delta);
+           DIS(buf, "%s0x%x(%s)", sorbTxt(sorb), (Int)d, nameIReg(4,rm));
+           *len = 5;
+           return disAMode_copy2tmp(
+                  handleSegOverride(sorb,
+                     binop(Iop_Add32,getIReg(4,rm),mkU32(d))));
+         }
+
+      /* a register, %eax .. %edi.  This shouldn't happen. */
+      case 0x18: case 0x19: case 0x1A: case 0x1B:
+      case 0x1C: case 0x1D: case 0x1E: case 0x1F:
+         vpanic("disAMode(x86): not an addr!");
+
+      /* a 32-bit literal address
+         --> MOV d32, tmp 
+      */
+      case 0x05: 
+         { UInt d = getUDisp32(delta);
+           *len = 5;
+           DIS(buf, "%s(0x%x)", sorbTxt(sorb), d);
+           return disAMode_copy2tmp( 
+                     handleSegOverride(sorb, mkU32(d)));
+         }
+
+      case 0x04: {
+         /* SIB, with no displacement.  Special cases:
+            -- %esp cannot act as an index value.  
+               If index_r indicates %esp, zero is used for the index.
+            -- when mod is zero and base indicates EBP, base is instead
+               a 32-bit literal.
+            It's all madness, I tell you.  Extract %index, %base and 
+            scale from the SIB byte.  The value denoted is then:
+               | %index == %ESP && %base == %EBP
+               = d32 following SIB byte
+               | %index == %ESP && %base != %EBP
+               = %base
+               | %index != %ESP && %base == %EBP
+               = d32 following SIB byte + (%index << scale)
+               | %index != %ESP && %base != %ESP
+               = %base + (%index << scale)
+
+            What happens to the souls of CPU architects who dream up such
+            horrendous schemes, do you suppose?  
+         */
+         UChar sib     = getIByte(delta);
+         UChar scale   = toUChar((sib >> 6) & 3);
+         UChar index_r = toUChar((sib >> 3) & 7);
+         UChar base_r  = toUChar(sib & 7);
+         delta++;
+
+         if (index_r != R_ESP && base_r != R_EBP) {
+            DIS(buf, "%s(%s,%s,%d)", sorbTxt(sorb), 
+                      nameIReg(4,base_r), nameIReg(4,index_r), 1<<scale);
+            *len = 2;
+            return
+               disAMode_copy2tmp( 
+               handleSegOverride(sorb,
+                  binop(Iop_Add32, 
+                        getIReg(4,base_r),
+                        binop(Iop_Shl32, getIReg(4,index_r),
+                              mkU8(scale)))));
+         }
+
+         if (index_r != R_ESP && base_r == R_EBP) {
+            UInt d = getUDisp32(delta);
+            DIS(buf, "%s0x%x(,%s,%d)", sorbTxt(sorb), d, 
+                      nameIReg(4,index_r), 1<<scale);
+            *len = 6;
+            return
+               disAMode_copy2tmp(
+               handleSegOverride(sorb, 
+                  binop(Iop_Add32,
+                        binop(Iop_Shl32, getIReg(4,index_r), mkU8(scale)),
+                        mkU32(d))));
+         }
+
+         if (index_r == R_ESP && base_r != R_EBP) {
+            DIS(buf, "%s(%s,,)", sorbTxt(sorb), nameIReg(4,base_r));
+            *len = 2;
+            return disAMode_copy2tmp(
+                   handleSegOverride(sorb, getIReg(4,base_r)));
+         }
+
+         if (index_r == R_ESP && base_r == R_EBP) {
+            UInt d = getUDisp32(delta);
+            DIS(buf, "%s0x%x(,,)", sorbTxt(sorb), d);
+            *len = 6;
+            return disAMode_copy2tmp(
+                   handleSegOverride(sorb, mkU32(d)));
+         }
+         /*NOTREACHED*/
+         vassert(0);
+      }
+
+      /* SIB, with 8-bit displacement.  Special cases:
+         -- %esp cannot act as an index value.  
+            If index_r indicates %esp, zero is used for the index.
+         Denoted value is:
+            | %index == %ESP
+            = d8 + %base
+            | %index != %ESP
+            = d8 + %base + (%index << scale)
+      */
+      case 0x0C: {
+         UChar sib     = getIByte(delta);
+         UChar scale   = toUChar((sib >> 6) & 3);
+         UChar index_r = toUChar((sib >> 3) & 7);
+         UChar base_r  = toUChar(sib & 7);
+         UInt  d       = getSDisp8(delta+1);
+
+         if (index_r == R_ESP) {
+            DIS(buf, "%s%d(%s,,)", sorbTxt(sorb), 
+                                   (Int)d, nameIReg(4,base_r));
+            *len = 3;
+            return disAMode_copy2tmp(
+                   handleSegOverride(sorb, 
+                      binop(Iop_Add32, getIReg(4,base_r), mkU32(d)) ));
+         } else {
+            DIS(buf, "%s%d(%s,%s,%d)", sorbTxt(sorb), (Int)d, 
+                     nameIReg(4,base_r), nameIReg(4,index_r), 1<<scale);
+            *len = 3;
+            return 
+                disAMode_copy2tmp(
+                handleSegOverride(sorb,
+                  binop(Iop_Add32,
+                        binop(Iop_Add32, 
+                              getIReg(4,base_r), 
+                              binop(Iop_Shl32, 
+                                    getIReg(4,index_r), mkU8(scale))),
+                        mkU32(d))));
+         }
+	 /*NOTREACHED*/
+         vassert(0);
+      }
+
+      /* SIB, with 32-bit displacement.  Special cases:
+         -- %esp cannot act as an index value.  
+            If index_r indicates %esp, zero is used for the index.
+         Denoted value is:
+            | %index == %ESP
+            = d32 + %base
+            | %index != %ESP
+            = d32 + %base + (%index << scale)
+      */
+      case 0x14: {
+         UChar sib     = getIByte(delta);
+         UChar scale   = toUChar((sib >> 6) & 3);
+         UChar index_r = toUChar((sib >> 3) & 7);
+         UChar base_r  = toUChar(sib & 7);
+         UInt d        = getUDisp32(delta+1);
+
+         if (index_r == R_ESP) {
+            DIS(buf, "%s%d(%s,,)", sorbTxt(sorb), 
+                                   (Int)d, nameIReg(4,base_r));
+            *len = 6;
+            return disAMode_copy2tmp(
+                   handleSegOverride(sorb, 
+                      binop(Iop_Add32, getIReg(4,base_r), mkU32(d)) ));
+         } else {
+            DIS(buf, "%s%d(%s,%s,%d)", sorbTxt(sorb), (Int)d, 
+                     nameIReg(4,base_r), nameIReg(4,index_r), 1<<scale);
+            *len = 6;
+            return 
+                disAMode_copy2tmp(
+                handleSegOverride(sorb,
+                  binop(Iop_Add32,
+                        binop(Iop_Add32, 
+                              getIReg(4,base_r), 
+                              binop(Iop_Shl32, 
+                                    getIReg(4,index_r), mkU8(scale))),
+                        mkU32(d))));
+         }
+	 /*NOTREACHED*/
+         vassert(0);
+      }
+
+      default:
+         vpanic("disAMode(x86)");
+         return 0; /*notreached*/
+   }
+}
+
+
+/* Figure out the number of (insn-stream) bytes constituting the amode
+   beginning at delta.  Is useful for getting hold of literals beyond
+   the end of the amode before it has been disassembled.  */
+
+static UInt lengthAMode ( Int delta )
+{
+   UChar mod_reg_rm = getIByte(delta); delta++;
+
+   /* squeeze out the reg field from mod_reg_rm, since a 256-entry
+      jump table seems a bit excessive. 
+   */
+   mod_reg_rm &= 0xC7;               /* is now XX000YYY */
+   mod_reg_rm  = toUChar(mod_reg_rm | (mod_reg_rm >> 3));  
+                                     /* is now XX0XXYYY */
+   mod_reg_rm &= 0x1F;               /* is now 000XXYYY */
+   switch (mod_reg_rm) {
+
+      /* (%eax) .. (%edi), not including (%esp) or (%ebp). */
+      case 0x00: case 0x01: case 0x02: case 0x03: 
+      /* ! 04 */ /* ! 05 */ case 0x06: case 0x07:
+         return 1;
+
+      /* d8(%eax) ... d8(%edi), not including d8(%esp). */ 
+      case 0x08: case 0x09: case 0x0A: case 0x0B: 
+      /* ! 0C */ case 0x0D: case 0x0E: case 0x0F:
+         return 2;
+
+      /* d32(%eax) ... d32(%edi), not including d32(%esp). */
+      case 0x10: case 0x11: case 0x12: case 0x13: 
+      /* ! 14 */ case 0x15: case 0x16: case 0x17:
+         return 5;
+
+      /* a register, %eax .. %edi.  (Not an addr, but still handled.) */
+      case 0x18: case 0x19: case 0x1A: case 0x1B:
+      case 0x1C: case 0x1D: case 0x1E: case 0x1F:
+         return 1;
+
+      /* a 32-bit literal address. */
+      case 0x05: return 5;
+
+      /* SIB, no displacement.  */
+      case 0x04: {
+         UChar sib    = getIByte(delta);
+         UChar base_r = toUChar(sib & 7);
+         if (base_r == R_EBP) return 6; else return 2;
+      }
+      /* SIB, with 8-bit displacement.  */
+      case 0x0C: return 3;
+
+      /* SIB, with 32-bit displacement.  */
+      case 0x14: return 6;
+
+      default:
+         vpanic("lengthAMode");
+         return 0; /*notreached*/
+   }
+}
+
+/*------------------------------------------------------------*/
+/*--- Disassembling common idioms                          ---*/
+/*------------------------------------------------------------*/
+
+/* Handle binary integer instructions of the form
+      op E, G  meaning
+      op reg-or-mem, reg
+   Is passed the a ptr to the modRM byte, the actual operation, and the
+   data size.  Returns the address advanced completely over this
+   instruction.
+
+   E(src) is reg-or-mem
+   G(dst) is reg.
+
+   If E is reg, -->    GET %G,  tmp
+                       OP %E,   tmp
+                       PUT tmp, %G
+ 
+   If E is mem and OP is not reversible, 
+                -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpa
+                       GET %G, tmp2
+                       OP tmpa, tmp2
+                       PUT tmp2, %G
+
+   If E is mem and OP is reversible
+                -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpa
+                       OP %G, tmpa
+                       PUT tmpa, %G
+*/
+static
+UInt dis_op2_E_G ( UChar       sorb,
+                   Bool        addSubCarry,
+                   IROp        op8, 
+                   Bool        keep,
+                   Int         size, 
+                   Int         delta0,
+                   const HChar* t_x86opc )
+{
+   HChar   dis_buf[50];
+   Int     len;
+   IRType  ty   = szToITy(size);
+   IRTemp  dst1 = newTemp(ty);
+   IRTemp  src  = newTemp(ty);
+   IRTemp  dst0 = newTemp(ty);
+   UChar   rm   = getUChar(delta0);
+   IRTemp  addr = IRTemp_INVALID;
+
+   /* addSubCarry == True indicates the intended operation is
+      add-with-carry or subtract-with-borrow. */
+   if (addSubCarry) {
+      vassert(op8 == Iop_Add8 || op8 == Iop_Sub8);
+      vassert(keep);
+   }
+
+   if (epartIsReg(rm)) {
+      /* Specially handle XOR reg,reg, because that doesn't really
+         depend on reg, and doing the obvious thing potentially
+         generates a spurious value check failure due to the bogus
+         dependency.  Ditto SBB reg,reg. */
+      if ((op8 == Iop_Xor8 || (op8 == Iop_Sub8 && addSubCarry))
+          && gregOfRM(rm) == eregOfRM(rm)) {
+         putIReg(size, gregOfRM(rm), mkU(ty,0));
+      }
+      assign( dst0, getIReg(size,gregOfRM(rm)) );
+      assign( src,  getIReg(size,eregOfRM(rm)) );
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         helper_ADC( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIReg(size, gregOfRM(rm), mkexpr(dst1));
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         helper_SBB( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIReg(size, gregOfRM(rm), mkexpr(dst1));
+      } else {
+         assign( dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+         if (keep)
+            putIReg(size, gregOfRM(rm), mkexpr(dst1));
+      }
+
+      DIP("%s%c %s,%s\n", t_x86opc, nameISize(size), 
+                          nameIReg(size,eregOfRM(rm)),
+                          nameIReg(size,gregOfRM(rm)));
+      return 1+delta0;
+   } else {
+      /* E refers to memory */
+      addr = disAMode ( &len, sorb, delta0, dis_buf);
+      assign( dst0, getIReg(size,gregOfRM(rm)) );
+      assign( src,  loadLE(szToITy(size), mkexpr(addr)) );
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         helper_ADC( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIReg(size, gregOfRM(rm), mkexpr(dst1));
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         helper_SBB( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIReg(size, gregOfRM(rm), mkexpr(dst1));
+      } else {
+         assign( dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+         if (keep)
+            putIReg(size, gregOfRM(rm), mkexpr(dst1));
+      }
+
+      DIP("%s%c %s,%s\n", t_x86opc, nameISize(size), 
+                          dis_buf,nameIReg(size,gregOfRM(rm)));
+      return len+delta0;
+   }
+}
+
+
+
+/* Handle binary integer instructions of the form
+      op G, E  meaning
+      op reg, reg-or-mem
+   Is passed the a ptr to the modRM byte, the actual operation, and the
+   data size.  Returns the address advanced completely over this
+   instruction.
+
+   G(src) is reg.
+   E(dst) is reg-or-mem
+
+   If E is reg, -->    GET %E,  tmp
+                       OP %G,   tmp
+                       PUT tmp, %E
+ 
+   If E is mem, -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpv
+                       OP %G, tmpv
+                       ST tmpv, (tmpa)
+*/
+static
+UInt dis_op2_G_E ( UChar       sorb,
+                   Bool        locked,
+                   Bool        addSubCarry,
+                   IROp        op8, 
+                   Bool        keep,
+                   Int         size, 
+                   Int         delta0,
+                   const HChar* t_x86opc )
+{
+   HChar   dis_buf[50];
+   Int     len;
+   IRType  ty   = szToITy(size);
+   IRTemp  dst1 = newTemp(ty);
+   IRTemp  src  = newTemp(ty);
+   IRTemp  dst0 = newTemp(ty);
+   UChar   rm   = getIByte(delta0);
+   IRTemp  addr = IRTemp_INVALID;
+
+   /* addSubCarry == True indicates the intended operation is
+      add-with-carry or subtract-with-borrow. */
+   if (addSubCarry) {
+      vassert(op8 == Iop_Add8 || op8 == Iop_Sub8);
+      vassert(keep);
+   }
+
+   if (epartIsReg(rm)) {
+      /* Specially handle XOR reg,reg, because that doesn't really
+         depend on reg, and doing the obvious thing potentially
+         generates a spurious value check failure due to the bogus
+         dependency.  Ditto SBB reg,reg.*/
+      if ((op8 == Iop_Xor8 || (op8 == Iop_Sub8 && addSubCarry))
+          && gregOfRM(rm) == eregOfRM(rm)) {
+         putIReg(size, eregOfRM(rm), mkU(ty,0));
+      }
+      assign(dst0, getIReg(size,eregOfRM(rm)));
+      assign(src,  getIReg(size,gregOfRM(rm)));
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         helper_ADC( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIReg(size, eregOfRM(rm), mkexpr(dst1));
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         helper_SBB( size, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+         putIReg(size, eregOfRM(rm), mkexpr(dst1));
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+         if (keep)
+            putIReg(size, eregOfRM(rm), mkexpr(dst1));
+      }
+
+      DIP("%s%c %s,%s\n", t_x86opc, nameISize(size), 
+                          nameIReg(size,gregOfRM(rm)),
+                          nameIReg(size,eregOfRM(rm)));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      addr = disAMode ( &len, sorb, delta0, dis_buf);
+      assign(dst0, loadLE(ty,mkexpr(addr)));
+      assign(src,  getIReg(size,gregOfRM(rm)));
+
+      if (addSubCarry && op8 == Iop_Add8) {
+         if (locked) {
+            /* cas-style store */
+            helper_ADC( size, dst1, dst0, src,
+                        /*store*/addr, dst0/*expVal*/, guest_EIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_ADC( size, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else
+      if (addSubCarry && op8 == Iop_Sub8) {
+         if (locked) {
+            /* cas-style store */
+            helper_SBB( size, dst1, dst0, src,
+                        /*store*/addr, dst0/*expVal*/, guest_EIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_SBB( size, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (keep) {
+            if (locked) {
+               if (0) vex_printf("locked case\n" );
+               casLE( mkexpr(addr),
+                      mkexpr(dst0)/*expval*/, 
+                      mkexpr(dst1)/*newval*/, guest_EIP_curr_instr );
+            } else {
+               if (0) vex_printf("nonlocked case\n");
+               storeLE(mkexpr(addr), mkexpr(dst1));
+            }
+         }
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+      }
+
+      DIP("%s%c %s,%s\n", t_x86opc, nameISize(size), 
+                          nameIReg(size,gregOfRM(rm)), dis_buf);
+      return len+delta0;
+   }
+}
+
+
+/* Handle move instructions of the form
+      mov E, G  meaning
+      mov reg-or-mem, reg
+   Is passed the a ptr to the modRM byte, and the data size.  Returns
+   the address advanced completely over this instruction.
+
+   E(src) is reg-or-mem
+   G(dst) is reg.
+
+   If E is reg, -->    GET %E,  tmpv
+                       PUT tmpv, %G
+ 
+   If E is mem  -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmpb
+                       PUT tmpb, %G
+*/
+static
+UInt dis_mov_E_G ( UChar       sorb,
+                   Int         size, 
+                   Int         delta0 )
+{
+   Int len;
+   UChar rm = getIByte(delta0);
+   HChar dis_buf[50];
+
+   if (epartIsReg(rm)) {
+      putIReg(size, gregOfRM(rm), getIReg(size, eregOfRM(rm)));
+      DIP("mov%c %s,%s\n", nameISize(size), 
+                           nameIReg(size,eregOfRM(rm)),
+                           nameIReg(size,gregOfRM(rm)));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      IRTemp addr = disAMode ( &len, sorb, delta0, dis_buf );
+      putIReg(size, gregOfRM(rm), loadLE(szToITy(size), mkexpr(addr)));
+      DIP("mov%c %s,%s\n", nameISize(size), 
+                           dis_buf,nameIReg(size,gregOfRM(rm)));
+      return delta0+len;
+   }
+}
+
+
+/* Handle move instructions of the form
+      mov G, E  meaning
+      mov reg, reg-or-mem
+   Is passed the a ptr to the modRM byte, and the data size.  Returns
+   the address advanced completely over this instruction.
+
+   G(src) is reg.
+   E(dst) is reg-or-mem
+
+   If E is reg, -->    GET %G,  tmp
+                       PUT tmp, %E
+ 
+   If E is mem, -->    (getAddr E) -> tmpa
+                       GET %G, tmpv
+                       ST tmpv, (tmpa) 
+*/
+static
+UInt dis_mov_G_E ( UChar       sorb,
+                   Int         size, 
+                   Int         delta0 )
+{
+   Int len;
+   UChar rm = getIByte(delta0);
+   HChar dis_buf[50];
+
+   if (epartIsReg(rm)) {
+      putIReg(size, eregOfRM(rm), getIReg(size, gregOfRM(rm)));
+      DIP("mov%c %s,%s\n", nameISize(size), 
+                           nameIReg(size,gregOfRM(rm)),
+                           nameIReg(size,eregOfRM(rm)));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      IRTemp addr = disAMode ( &len, sorb, delta0, dis_buf);
+      storeLE( mkexpr(addr), getIReg(size, gregOfRM(rm)) );
+      DIP("mov%c %s,%s\n", nameISize(size), 
+                           nameIReg(size,gregOfRM(rm)), dis_buf);
+      return len+delta0;
+   }
+}
+
+
+/* op $immediate, AL/AX/EAX. */
+static
+UInt dis_op_imm_A ( Int    size,
+                    Bool   carrying,
+                    IROp   op8,
+                    Bool   keep,
+                    Int    delta,
+                    const HChar* t_x86opc )
+{
+   IRType ty   = szToITy(size);
+   IRTemp dst0 = newTemp(ty);
+   IRTemp src  = newTemp(ty);
+   IRTemp dst1 = newTemp(ty);
+   UInt lit    = getUDisp(size,delta);
+   assign(dst0, getIReg(size,R_EAX));
+   assign(src,  mkU(ty,lit));
+
+   if (isAddSub(op8) && !carrying) {
+      assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+      setFlags_DEP1_DEP2(op8, dst0, src, ty);
+   } 
+   else
+   if (isLogic(op8)) {
+      vassert(!carrying);
+      assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)) );
+      setFlags_DEP1(op8, dst1, ty);
+   } 
+   else
+   if (op8 == Iop_Add8 && carrying) {
+      helper_ADC( size, dst1, dst0, src,
+                  /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+   }
+   else
+   if (op8 == Iop_Sub8 && carrying) {
+      helper_SBB( size, dst1, dst0, src,
+                  /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+   }
+   else
+      vpanic("dis_op_imm_A(x86,guest)");
+
+   if (keep)
+      putIReg(size, R_EAX, mkexpr(dst1));
+
+   DIP("%s%c $0x%x, %s\n", t_x86opc, nameISize(size), 
+                           lit, nameIReg(size,R_EAX));
+   return delta+size;
+}
+
+
+/* Sign- and Zero-extending moves. */
+static
+UInt dis_movx_E_G ( UChar      sorb,
+                    Int delta, Int szs, Int szd, Bool sign_extend )
+{
+   UChar rm = getIByte(delta);
+   if (epartIsReg(rm)) {
+      if (szd == szs) {
+         // mutant case.  See #250799
+         putIReg(szd, gregOfRM(rm),
+                           getIReg(szs,eregOfRM(rm)));
+      } else {
+         // normal case
+         putIReg(szd, gregOfRM(rm),
+                      unop(mkWidenOp(szs,szd,sign_extend), 
+                           getIReg(szs,eregOfRM(rm))));
+      }
+      DIP("mov%c%c%c %s,%s\n", sign_extend ? 's' : 'z',
+                               nameISize(szs), nameISize(szd),
+                               nameIReg(szs,eregOfRM(rm)),
+                               nameIReg(szd,gregOfRM(rm)));
+      return 1+delta;
+   }
+
+   /* E refers to memory */    
+   {
+      Int    len;
+      HChar  dis_buf[50];
+      IRTemp addr = disAMode ( &len, sorb, delta, dis_buf );
+      if (szd == szs) {
+         // mutant case.  See #250799
+         putIReg(szd, gregOfRM(rm),
+                           loadLE(szToITy(szs),mkexpr(addr)));
+      } else {
+         // normal case
+         putIReg(szd, gregOfRM(rm),
+                      unop(mkWidenOp(szs,szd,sign_extend), 
+                           loadLE(szToITy(szs),mkexpr(addr))));
+      }
+      DIP("mov%c%c%c %s,%s\n", sign_extend ? 's' : 'z',
+                               nameISize(szs), nameISize(szd),
+                               dis_buf, nameIReg(szd,gregOfRM(rm)));
+      return len+delta;
+   }
+}
+
+
+/* Generate code to divide ArchRegs EDX:EAX / DX:AX / AX by the 32 /
+   16 / 8 bit quantity in the given IRTemp.  */
+static
+void codegen_div ( Int sz, IRTemp t, Bool signed_divide )
+{
+   IROp   op    = signed_divide ? Iop_DivModS64to32 : Iop_DivModU64to32;
+   IRTemp src64 = newTemp(Ity_I64);
+   IRTemp dst64 = newTemp(Ity_I64);
+   switch (sz) {
+      case 4:
+         assign( src64, binop(Iop_32HLto64, 
+                              getIReg(4,R_EDX), getIReg(4,R_EAX)) );
+         assign( dst64, binop(op, mkexpr(src64), mkexpr(t)) );
+         putIReg( 4, R_EAX, unop(Iop_64to32,mkexpr(dst64)) );
+         putIReg( 4, R_EDX, unop(Iop_64HIto32,mkexpr(dst64)) );
+         break;
+      case 2: {
+         IROp widen3264 = signed_divide ? Iop_32Sto64 : Iop_32Uto64;
+         IROp widen1632 = signed_divide ? Iop_16Sto32 : Iop_16Uto32;
+         assign( src64, unop(widen3264,
+                             binop(Iop_16HLto32, 
+                                   getIReg(2,R_EDX), getIReg(2,R_EAX))) );
+         assign( dst64, binop(op, mkexpr(src64), unop(widen1632,mkexpr(t))) );
+         putIReg( 2, R_EAX, unop(Iop_32to16,unop(Iop_64to32,mkexpr(dst64))) );
+         putIReg( 2, R_EDX, unop(Iop_32to16,unop(Iop_64HIto32,mkexpr(dst64))) );
+         break;
+      }
+      case 1: {
+         IROp widen3264 = signed_divide ? Iop_32Sto64 : Iop_32Uto64;
+         IROp widen1632 = signed_divide ? Iop_16Sto32 : Iop_16Uto32;
+         IROp widen816  = signed_divide ? Iop_8Sto16  : Iop_8Uto16;
+         assign( src64, unop(widen3264, unop(widen1632, getIReg(2,R_EAX))) );
+         assign( dst64, 
+                 binop(op, mkexpr(src64), 
+                           unop(widen1632, unop(widen816, mkexpr(t)))) );
+         putIReg( 1, R_AL, unop(Iop_16to8, unop(Iop_32to16,
+                           unop(Iop_64to32,mkexpr(dst64)))) );
+         putIReg( 1, R_AH, unop(Iop_16to8, unop(Iop_32to16,
+                           unop(Iop_64HIto32,mkexpr(dst64)))) );
+         break;
+      }
+      default: vpanic("codegen_div(x86)");
+   }
+}
+
+
+static 
+UInt dis_Grp1 ( UChar sorb, Bool locked,
+                Int delta, UChar modrm, 
+                Int am_sz, Int d_sz, Int sz, UInt d32 )
+{
+   Int     len;
+   HChar   dis_buf[50];
+   IRType  ty   = szToITy(sz);
+   IRTemp  dst1 = newTemp(ty);
+   IRTemp  src  = newTemp(ty);
+   IRTemp  dst0 = newTemp(ty);
+   IRTemp  addr = IRTemp_INVALID;
+   IROp    op8  = Iop_INVALID;
+   UInt    mask = sz==1 ? 0xFF : (sz==2 ? 0xFFFF : 0xFFFFFFFF);
+
+   switch (gregOfRM(modrm)) {
+      case 0: op8 = Iop_Add8; break;  case 1: op8 = Iop_Or8;  break;
+      case 2: break;  // ADC
+      case 3: break;  // SBB
+      case 4: op8 = Iop_And8; break;  case 5: op8 = Iop_Sub8; break;
+      case 6: op8 = Iop_Xor8; break;  case 7: op8 = Iop_Sub8; break;
+      /*NOTREACHED*/
+      default: vpanic("dis_Grp1: unhandled case");
+   }
+
+   if (epartIsReg(modrm)) {
+      vassert(am_sz == 1);
+
+      assign(dst0, getIReg(sz,eregOfRM(modrm)));
+      assign(src,  mkU(ty,d32 & mask));
+
+      if (gregOfRM(modrm) == 2 /* ADC */) {
+         helper_ADC( sz, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+      } else 
+      if (gregOfRM(modrm) == 3 /* SBB */) {
+         helper_SBB( sz, dst1, dst0, src,
+                     /*no store*/IRTemp_INVALID, IRTemp_INVALID, 0 );
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+      }
+
+      if (gregOfRM(modrm) < 7)
+         putIReg(sz, eregOfRM(modrm), mkexpr(dst1));
+
+      delta += (am_sz + d_sz);
+      DIP("%s%c $0x%x, %s\n", nameGrp1(gregOfRM(modrm)), nameISize(sz), d32, 
+                              nameIReg(sz,eregOfRM(modrm)));
+   } else {
+      addr = disAMode ( &len, sorb, delta, dis_buf);
+
+      assign(dst0, loadLE(ty,mkexpr(addr)));
+      assign(src, mkU(ty,d32 & mask));
+
+      if (gregOfRM(modrm) == 2 /* ADC */) {
+         if (locked) {
+            /* cas-style store */
+            helper_ADC( sz, dst1, dst0, src,
+                       /*store*/addr, dst0/*expVal*/, guest_EIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_ADC( sz, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else 
+      if (gregOfRM(modrm) == 3 /* SBB */) {
+         if (locked) {
+            /* cas-style store */
+            helper_SBB( sz, dst1, dst0, src,
+                       /*store*/addr, dst0/*expVal*/, guest_EIP_curr_instr );
+         } else {
+            /* normal store */
+            helper_SBB( sz, dst1, dst0, src,
+                        /*store*/addr, IRTemp_INVALID, 0 );
+         }
+      } else {
+         assign(dst1, binop(mkSizedOp(ty,op8), mkexpr(dst0), mkexpr(src)));
+         if (gregOfRM(modrm) < 7) {
+            if (locked) {
+               casLE( mkexpr(addr), mkexpr(dst0)/*expVal*/, 
+                                    mkexpr(dst1)/*newVal*/,
+                                    guest_EIP_curr_instr );
+            } else {
+               storeLE(mkexpr(addr), mkexpr(dst1));
+            }
+         }
+         if (isAddSub(op8))
+            setFlags_DEP1_DEP2(op8, dst0, src, ty);
+         else
+            setFlags_DEP1(op8, dst1, ty);
+      }
+
+      delta += (len+d_sz);
+      DIP("%s%c $0x%x, %s\n", nameGrp1(gregOfRM(modrm)), nameISize(sz),
+                              d32, dis_buf);
+   }
+   return delta;
+}
+
+
+/* Group 2 extended opcodes.  shift_expr must be an 8-bit typed
+   expression. */
+
+static
+UInt dis_Grp2 ( UChar sorb,
+                Int delta, UChar modrm,
+                Int am_sz, Int d_sz, Int sz, IRExpr* shift_expr,
+                const HChar* shift_expr_txt, Bool* decode_OK )
+{
+   /* delta on entry points at the modrm byte. */
+   HChar  dis_buf[50];
+   Int    len;
+   Bool   isShift, isRotate, isRotateC;
+   IRType ty    = szToITy(sz);
+   IRTemp dst0  = newTemp(ty);
+   IRTemp dst1  = newTemp(ty);
+   IRTemp addr  = IRTemp_INVALID;
+
+   *decode_OK = True;
+
+   vassert(sz == 1 || sz == 2 || sz == 4);
+
+   /* Put value to shift/rotate in dst0. */
+   if (epartIsReg(modrm)) {
+      assign(dst0, getIReg(sz, eregOfRM(modrm)));
+      delta += (am_sz + d_sz);
+   } else {
+      addr = disAMode ( &len, sorb, delta, dis_buf);
+      assign(dst0, loadLE(ty,mkexpr(addr)));
+      delta += len + d_sz;
+   }
+
+   isShift = False;
+   switch (gregOfRM(modrm)) { case 4: case 5: case 6: case 7: isShift = True; }
+
+   isRotate = False;
+   switch (gregOfRM(modrm)) { case 0: case 1: isRotate = True; }
+
+   isRotateC = False;
+   switch (gregOfRM(modrm)) { case 2: case 3: isRotateC = True; }
+
+   if (!isShift && !isRotate && !isRotateC) {
+      /*NOTREACHED*/
+      vpanic("dis_Grp2(Reg): unhandled case(x86)");
+   }
+
+   if (isRotateC) {
+      /* call a helper; these insns are so ridiculous they do not
+         deserve better */
+      Bool     left = toBool(gregOfRM(modrm) == 2);
+      IRTemp   r64  = newTemp(Ity_I64);
+      IRExpr** args 
+         = mkIRExprVec_4( widenUto32(mkexpr(dst0)), /* thing to rotate */
+                          widenUto32(shift_expr),   /* rotate amount */
+                          widenUto32(mk_x86g_calculate_eflags_all()),
+                          mkU32(sz) );
+      assign( r64, mkIRExprCCall(
+                      Ity_I64, 
+                      0/*regparm*/, 
+                      left ? "x86g_calculate_RCL" : "x86g_calculate_RCR", 
+                      left ? &x86g_calculate_RCL  : &x86g_calculate_RCR,
+                      args
+                   )
+            );
+      /* new eflags in hi half r64; new value in lo half r64 */
+      assign( dst1, narrowTo(ty, unop(Iop_64to32, mkexpr(r64))) );
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, unop(Iop_64HIto32, mkexpr(r64)) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+      /* Set NDEP even though it isn't used.  This makes redundant-PUT
+         elimination of previous stores to this field work better. */
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+   }
+
+   if (isShift) {
+
+      IRTemp pre32     = newTemp(Ity_I32);
+      IRTemp res32     = newTemp(Ity_I32);
+      IRTemp res32ss   = newTemp(Ity_I32);
+      IRTemp shift_amt = newTemp(Ity_I8);
+      IROp   op32;
+
+      switch (gregOfRM(modrm)) { 
+         case 4: op32 = Iop_Shl32; break;
+         case 5: op32 = Iop_Shr32; break;
+         case 6: op32 = Iop_Shl32; break;
+         case 7: op32 = Iop_Sar32; break;
+         /*NOTREACHED*/
+         default: vpanic("dis_Grp2:shift"); break;
+      }
+
+      /* Widen the value to be shifted to 32 bits, do the shift, and
+         narrow back down.  This seems surprisingly long-winded, but
+         unfortunately the Intel semantics requires that 8/16-bit
+         shifts give defined results for shift values all the way up
+         to 31, and this seems the simplest way to do it.  It has the
+         advantage that the only IR level shifts generated are of 32
+         bit values, and the shift amount is guaranteed to be in the
+         range 0 .. 31, thereby observing the IR semantics requiring
+         all shift values to be in the range 0 .. 2^word_size-1. */
+
+      /* shift_amt = shift_expr & 31, regardless of operation size */
+      assign( shift_amt, binop(Iop_And8, shift_expr, mkU8(31)) );
+
+      /* suitably widen the value to be shifted to 32 bits. */
+      assign( pre32, op32==Iop_Sar32 ? widenSto32(mkexpr(dst0))
+                                     : widenUto32(mkexpr(dst0)) );
+
+      /* res32 = pre32 `shift` shift_amt */
+      assign( res32, binop(op32, mkexpr(pre32), mkexpr(shift_amt)) );
+
+      /* res32ss = pre32 `shift` ((shift_amt - 1) & 31) */
+      assign( res32ss,
+              binop(op32,
+                    mkexpr(pre32), 
+                    binop(Iop_And8,
+                          binop(Iop_Sub8,
+                                mkexpr(shift_amt), mkU8(1)),
+                          mkU8(31))) );
+
+      /* Build the flags thunk. */
+      setFlags_DEP1_DEP2_shift(op32, res32, res32ss, ty, shift_amt);
+
+      /* Narrow the result back down. */
+      assign( dst1, narrowTo(ty, mkexpr(res32)) );
+
+   } /* if (isShift) */
+
+   else 
+   if (isRotate) {
+      Int    ccOp      = ty==Ity_I8 ? 0 : (ty==Ity_I16 ? 1 : 2);
+      Bool   left      = toBool(gregOfRM(modrm) == 0);
+      IRTemp rot_amt   = newTemp(Ity_I8);
+      IRTemp rot_amt32 = newTemp(Ity_I8);
+      IRTemp oldFlags  = newTemp(Ity_I32);
+
+      /* rot_amt = shift_expr & mask */
+      /* By masking the rotate amount thusly, the IR-level Shl/Shr
+         expressions never shift beyond the word size and thus remain
+         well defined. */
+      assign(rot_amt32, binop(Iop_And8, shift_expr, mkU8(31)));
+
+      if (ty == Ity_I32)
+         assign(rot_amt, mkexpr(rot_amt32));
+      else
+         assign(rot_amt, binop(Iop_And8, mkexpr(rot_amt32), mkU8(8*sz-1)));
+
+      if (left) {
+
+         /* dst1 = (dst0 << rot_amt) | (dst0 >>u (wordsize-rot_amt)) */
+         assign(dst1, 
+            binop( mkSizedOp(ty,Iop_Or8),
+                   binop( mkSizedOp(ty,Iop_Shl8), 
+                          mkexpr(dst0),
+                          mkexpr(rot_amt)
+                   ),
+                   binop( mkSizedOp(ty,Iop_Shr8), 
+                          mkexpr(dst0), 
+                          binop(Iop_Sub8,mkU8(8*sz), mkexpr(rot_amt))
+                   )
+            )
+         );
+         ccOp += X86G_CC_OP_ROLB;
+
+      } else { /* right */
+
+         /* dst1 = (dst0 >>u rot_amt) | (dst0 << (wordsize-rot_amt)) */
+         assign(dst1, 
+            binop( mkSizedOp(ty,Iop_Or8),
+                   binop( mkSizedOp(ty,Iop_Shr8), 
+                          mkexpr(dst0),
+                          mkexpr(rot_amt)
+                   ),
+                   binop( mkSizedOp(ty,Iop_Shl8), 
+                          mkexpr(dst0), 
+                          binop(Iop_Sub8,mkU8(8*sz), mkexpr(rot_amt))
+                   )
+            )
+         );
+         ccOp += X86G_CC_OP_RORB;
+
+      }
+
+      /* dst1 now holds the rotated value.  Build flag thunk.  We
+         need the resulting value for this, and the previous flags.
+         Except don't set it if the rotate count is zero. */
+
+      assign(oldFlags, mk_x86g_calculate_eflags_all());
+
+      /* rot_amt32 :: Ity_I8.  We need to convert it to I1. */
+      IRTemp rot_amt32b = newTemp(Ity_I1);
+      assign(rot_amt32b, binop(Iop_CmpNE8, mkexpr(rot_amt32), mkU8(0)) );
+
+      /* CC_DEP1 is the rotated value.  CC_NDEP is flags before. */
+      stmt( IRStmt_Put( OFFB_CC_OP,
+                        IRExpr_ITE( mkexpr(rot_amt32b),
+                                    mkU32(ccOp),
+                                    IRExpr_Get(OFFB_CC_OP,Ity_I32) ) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, 
+                        IRExpr_ITE( mkexpr(rot_amt32b),
+                                    widenUto32(mkexpr(dst1)),
+                                    IRExpr_Get(OFFB_CC_DEP1,Ity_I32) ) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, 
+                        IRExpr_ITE( mkexpr(rot_amt32b),
+                                    mkU32(0),
+                                    IRExpr_Get(OFFB_CC_DEP2,Ity_I32) ) ));
+      stmt( IRStmt_Put( OFFB_CC_NDEP, 
+                        IRExpr_ITE( mkexpr(rot_amt32b),
+                                    mkexpr(oldFlags),
+                                    IRExpr_Get(OFFB_CC_NDEP,Ity_I32) ) ));
+   } /* if (isRotate) */
+
+   /* Save result, and finish up. */
+   if (epartIsReg(modrm)) {
+      putIReg(sz, eregOfRM(modrm), mkexpr(dst1));
+      if (vex_traceflags & VEX_TRACE_FE) {
+         vex_printf("%s%c ",
+                    nameGrp2(gregOfRM(modrm)), nameISize(sz) );
+         if (shift_expr_txt)
+            vex_printf("%s", shift_expr_txt);
+         else
+            ppIRExpr(shift_expr);
+         vex_printf(", %s\n", nameIReg(sz,eregOfRM(modrm)));
+      }
+   } else {
+      storeLE(mkexpr(addr), mkexpr(dst1));
+      if (vex_traceflags & VEX_TRACE_FE) {
+         vex_printf("%s%c ",
+                    nameGrp2(gregOfRM(modrm)), nameISize(sz) );
+         if (shift_expr_txt)
+            vex_printf("%s", shift_expr_txt);
+         else
+            ppIRExpr(shift_expr);
+         vex_printf(", %s\n", dis_buf);
+      }
+   }
+   return delta;
+}
+
+
+/* Group 8 extended opcodes (but BT/BTS/BTC/BTR only). */
+static
+UInt dis_Grp8_Imm ( UChar sorb,
+                    Bool locked,
+                    Int delta, UChar modrm,
+                    Int am_sz, Int sz, UInt src_val,
+                    Bool* decode_OK )
+{
+   /* src_val denotes a d8.
+      And delta on entry points at the modrm byte. */
+
+   IRType ty     = szToITy(sz);
+   IRTemp t2     = newTemp(Ity_I32);
+   IRTemp t2m    = newTemp(Ity_I32);
+   IRTemp t_addr = IRTemp_INVALID;
+   HChar  dis_buf[50];
+   UInt   mask;
+
+   /* we're optimists :-) */
+   *decode_OK = True;
+
+   /* Limit src_val -- the bit offset -- to something within a word.
+      The Intel docs say that literal offsets larger than a word are
+      masked in this way. */
+   switch (sz) {
+      case 2:  src_val &= 15; break;
+      case 4:  src_val &= 31; break;
+      default: *decode_OK = False; return delta;
+   }
+
+   /* Invent a mask suitable for the operation. */
+   switch (gregOfRM(modrm)) {
+      case 4: /* BT */  mask = 0;               break;
+      case 5: /* BTS */ mask = 1 << src_val;    break;
+      case 6: /* BTR */ mask = ~(1 << src_val); break;
+      case 7: /* BTC */ mask = 1 << src_val;    break;
+         /* If this needs to be extended, probably simplest to make a
+            new function to handle the other cases (0 .. 3).  The
+            Intel docs do however not indicate any use for 0 .. 3, so
+            we don't expect this to happen. */
+      default: *decode_OK = False; return delta;
+   }
+
+   /* Fetch the value to be tested and modified into t2, which is
+      32-bits wide regardless of sz. */
+   if (epartIsReg(modrm)) {
+      vassert(am_sz == 1);
+      assign( t2, widenUto32(getIReg(sz, eregOfRM(modrm))) );
+      delta += (am_sz + 1);
+      DIP("%s%c $0x%x, %s\n", nameGrp8(gregOfRM(modrm)), nameISize(sz),
+                              src_val, nameIReg(sz,eregOfRM(modrm)));
+   } else {
+      Int len;
+      t_addr = disAMode ( &len, sorb, delta, dis_buf);
+      delta  += (len+1);
+      assign( t2, widenUto32(loadLE(ty, mkexpr(t_addr))) );
+      DIP("%s%c $0x%x, %s\n", nameGrp8(gregOfRM(modrm)), nameISize(sz),
+                              src_val, dis_buf);
+   }
+
+   /* Compute the new value into t2m, if non-BT. */
+   switch (gregOfRM(modrm)) {
+      case 4: /* BT */
+         break;
+      case 5: /* BTS */
+         assign( t2m, binop(Iop_Or32, mkU32(mask), mkexpr(t2)) );
+         break;
+      case 6: /* BTR */
+         assign( t2m, binop(Iop_And32, mkU32(mask), mkexpr(t2)) );
+         break;
+      case 7: /* BTC */
+         assign( t2m, binop(Iop_Xor32, mkU32(mask), mkexpr(t2)) );
+         break;
+      default: 
+         /*NOTREACHED*/ /*the previous switch guards this*/
+         vassert(0);
+   }
+
+   /* Write the result back, if non-BT.  If the CAS fails then we
+      side-exit from the trace at this point, and so the flag state is
+      not affected.  This is of course as required. */
+   if (gregOfRM(modrm) != 4 /* BT */) {
+      if (epartIsReg(modrm)) {
+         putIReg(sz, eregOfRM(modrm), narrowTo(ty, mkexpr(t2m)));
+      } else {
+         if (locked) {
+            casLE( mkexpr(t_addr),
+                   narrowTo(ty, mkexpr(t2))/*expd*/,
+                   narrowTo(ty, mkexpr(t2m))/*new*/,
+                   guest_EIP_curr_instr );
+         } else {
+            storeLE(mkexpr(t_addr), narrowTo(ty, mkexpr(t2m)));
+         }
+      }
+   }
+
+   /* Copy relevant bit from t2 into the carry flag. */
+   /* Flags: C=selected bit, O,S,Z,A,P undefined, so are set to zero. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            binop(Iop_And32,
+                  binop(Iop_Shr32, mkexpr(t2), mkU8(src_val)),
+                  mkU32(1))
+       ));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+
+   return delta;
+}
+
+
+/* Signed/unsigned widening multiply.  Generate IR to multiply the
+   value in EAX/AX/AL by the given IRTemp, and park the result in
+   EDX:EAX/DX:AX/AX.
+*/
+static void codegen_mulL_A_D ( Int sz, Bool syned, 
+                               IRTemp tmp, const HChar* tmp_txt )
+{
+   IRType ty = szToITy(sz);
+   IRTemp t1 = newTemp(ty);
+
+   assign( t1, getIReg(sz, R_EAX) );
+
+   switch (ty) {
+      case Ity_I32: {
+         IRTemp res64   = newTemp(Ity_I64);
+         IRTemp resHi   = newTemp(Ity_I32);
+         IRTemp resLo   = newTemp(Ity_I32);
+         IROp   mulOp   = syned ? Iop_MullS32 : Iop_MullU32;
+         UInt   tBaseOp = syned ? X86G_CC_OP_SMULB : X86G_CC_OP_UMULB;
+         setFlags_MUL ( Ity_I32, t1, tmp, tBaseOp );
+         assign( res64, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
+         assign( resHi, unop(Iop_64HIto32,mkexpr(res64)));
+         assign( resLo, unop(Iop_64to32,mkexpr(res64)));
+         putIReg(4, R_EDX, mkexpr(resHi));
+         putIReg(4, R_EAX, mkexpr(resLo));
+         break;
+      }
+      case Ity_I16: {
+         IRTemp res32   = newTemp(Ity_I32);
+         IRTemp resHi   = newTemp(Ity_I16);
+         IRTemp resLo   = newTemp(Ity_I16);
+         IROp   mulOp   = syned ? Iop_MullS16 : Iop_MullU16;
+         UInt   tBaseOp = syned ? X86G_CC_OP_SMULB : X86G_CC_OP_UMULB;
+         setFlags_MUL ( Ity_I16, t1, tmp, tBaseOp );
+         assign( res32, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
+         assign( resHi, unop(Iop_32HIto16,mkexpr(res32)));
+         assign( resLo, unop(Iop_32to16,mkexpr(res32)));
+         putIReg(2, R_EDX, mkexpr(resHi));
+         putIReg(2, R_EAX, mkexpr(resLo));
+         break;
+      }
+      case Ity_I8: {
+         IRTemp res16   = newTemp(Ity_I16);
+         IRTemp resHi   = newTemp(Ity_I8);
+         IRTemp resLo   = newTemp(Ity_I8);
+         IROp   mulOp   = syned ? Iop_MullS8 : Iop_MullU8;
+         UInt   tBaseOp = syned ? X86G_CC_OP_SMULB : X86G_CC_OP_UMULB;
+         setFlags_MUL ( Ity_I8, t1, tmp, tBaseOp );
+         assign( res16, binop(mulOp, mkexpr(t1), mkexpr(tmp)) );
+         assign( resHi, unop(Iop_16HIto8,mkexpr(res16)));
+         assign( resLo, unop(Iop_16to8,mkexpr(res16)));
+         putIReg(2, R_EAX, mkexpr(res16));
+         break;
+      }
+      default:
+         vpanic("codegen_mulL_A_D(x86)");
+   }
+   DIP("%s%c %s\n", syned ? "imul" : "mul", nameISize(sz), tmp_txt);
+}
+
+
+/* Group 3 extended opcodes. */
+static 
+UInt dis_Grp3 ( UChar sorb, Bool locked, Int sz, Int delta, Bool* decode_OK )
+{
+   UInt    d32;
+   UChar   modrm;
+   HChar   dis_buf[50];
+   Int     len;
+   IRTemp  addr;
+   IRType  ty = szToITy(sz);
+   IRTemp  t1 = newTemp(ty);
+   IRTemp dst1, src, dst0;
+
+   *decode_OK = True; /* may change this later */
+
+   modrm = getIByte(delta);
+
+   if (locked && (gregOfRM(modrm) != 2 && gregOfRM(modrm) != 3)) {
+      /* LOCK prefix only allowed with not and neg subopcodes */
+      *decode_OK = False;
+      return delta;
+   }
+
+   if (epartIsReg(modrm)) {
+      switch (gregOfRM(modrm)) {
+         case 0: { /* TEST */
+            delta++; d32 = getUDisp(sz, delta); delta += sz;
+            dst1 = newTemp(ty);
+            assign(dst1, binop(mkSizedOp(ty,Iop_And8),
+                               getIReg(sz,eregOfRM(modrm)),
+                               mkU(ty,d32)));
+            setFlags_DEP1( Iop_And8, dst1, ty );
+            DIP("test%c $0x%x, %s\n", nameISize(sz), d32, 
+                                      nameIReg(sz, eregOfRM(modrm)));
+            break;
+         }
+         case 1: /* UNDEFINED */
+           /* The Intel docs imply this insn is undefined and binutils
+              agrees.  Unfortunately Core 2 will run it (with who
+              knows what result?)  sandpile.org reckons it's an alias
+              for case 0.  We play safe. */
+           *decode_OK = False;
+           break;
+         case 2: /* NOT */
+            delta++;
+            putIReg(sz, eregOfRM(modrm),
+                        unop(mkSizedOp(ty,Iop_Not8),
+                             getIReg(sz, eregOfRM(modrm))));
+            DIP("not%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
+            break;
+         case 3: /* NEG */
+            delta++;
+            dst0 = newTemp(ty);
+            src  = newTemp(ty);
+            dst1 = newTemp(ty);
+            assign(dst0, mkU(ty,0));
+            assign(src,  getIReg(sz,eregOfRM(modrm)));
+            assign(dst1, binop(mkSizedOp(ty,Iop_Sub8), mkexpr(dst0), mkexpr(src)));
+            setFlags_DEP1_DEP2(Iop_Sub8, dst0, src, ty);
+            putIReg(sz, eregOfRM(modrm), mkexpr(dst1));
+            DIP("neg%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
+            break;
+         case 4: /* MUL (unsigned widening) */
+            delta++;
+            src = newTemp(ty);
+            assign(src, getIReg(sz,eregOfRM(modrm)));
+            codegen_mulL_A_D ( sz, False, src, nameIReg(sz,eregOfRM(modrm)) );
+            break;
+         case 5: /* IMUL (signed widening) */
+            delta++;
+            src = newTemp(ty);
+            assign(src, getIReg(sz,eregOfRM(modrm)));
+            codegen_mulL_A_D ( sz, True, src, nameIReg(sz,eregOfRM(modrm)) );
+            break;
+         case 6: /* DIV */
+            delta++;
+            assign( t1, getIReg(sz, eregOfRM(modrm)) );
+            codegen_div ( sz, t1, False );
+            DIP("div%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
+            break;
+         case 7: /* IDIV */
+            delta++;
+            assign( t1, getIReg(sz, eregOfRM(modrm)) );
+            codegen_div ( sz, t1, True );
+            DIP("idiv%c %s\n", nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
+            break;
+         default: 
+            /* This can't happen - gregOfRM should return 0 .. 7 only */
+            vpanic("Grp3(x86)");
+      }
+   } else {
+      addr = disAMode ( &len, sorb, delta, dis_buf );
+      t1   = newTemp(ty);
+      delta += len;
+      assign(t1, loadLE(ty,mkexpr(addr)));
+      switch (gregOfRM(modrm)) {
+         case 0: { /* TEST */
+            d32 = getUDisp(sz, delta); delta += sz;
+            dst1 = newTemp(ty);
+            assign(dst1, binop(mkSizedOp(ty,Iop_And8),
+                               mkexpr(t1), mkU(ty,d32)));
+            setFlags_DEP1( Iop_And8, dst1, ty );
+            DIP("test%c $0x%x, %s\n", nameISize(sz), d32, dis_buf);
+            break;
+         }
+         case 1: /* UNDEFINED */
+           /* See comment above on R case */
+           *decode_OK = False;
+           break;
+         case 2: /* NOT */
+            dst1 = newTemp(ty);
+            assign(dst1, unop(mkSizedOp(ty,Iop_Not8), mkexpr(t1)));
+            if (locked) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(dst1)/*new*/,
+                                    guest_EIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(dst1) );
+            }
+            DIP("not%c %s\n", nameISize(sz), dis_buf);
+            break;
+         case 3: /* NEG */
+            dst0 = newTemp(ty);
+            src  = newTemp(ty);
+            dst1 = newTemp(ty);
+            assign(dst0, mkU(ty,0));
+            assign(src,  mkexpr(t1));
+            assign(dst1, binop(mkSizedOp(ty,Iop_Sub8),
+                               mkexpr(dst0), mkexpr(src)));
+            if (locked) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(dst1)/*new*/,
+                                    guest_EIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(dst1) );
+            }
+            setFlags_DEP1_DEP2(Iop_Sub8, dst0, src, ty);
+            DIP("neg%c %s\n", nameISize(sz), dis_buf);
+            break;
+         case 4: /* MUL */
+            codegen_mulL_A_D ( sz, False, t1, dis_buf );
+            break;
+         case 5: /* IMUL */
+            codegen_mulL_A_D ( sz, True, t1, dis_buf );
+            break;
+         case 6: /* DIV */
+            codegen_div ( sz, t1, False );
+            DIP("div%c %s\n", nameISize(sz), dis_buf);
+            break;
+         case 7: /* IDIV */
+            codegen_div ( sz, t1, True );
+            DIP("idiv%c %s\n", nameISize(sz), dis_buf);
+            break;
+         default: 
+            /* This can't happen - gregOfRM should return 0 .. 7 only */
+            vpanic("Grp3(x86)");
+      }
+   }
+   return delta;
+}
+
+
+/* Group 4 extended opcodes. */
+static
+UInt dis_Grp4 ( UChar sorb, Bool locked, Int delta, Bool* decode_OK )
+{
+   Int   alen;
+   UChar modrm;
+   HChar dis_buf[50];
+   IRType ty = Ity_I8;
+   IRTemp t1 = newTemp(ty);
+   IRTemp t2 = newTemp(ty);
+
+   *decode_OK = True;
+
+   modrm = getIByte(delta);
+
+   if (locked && (gregOfRM(modrm) != 0 && gregOfRM(modrm) != 1)) {
+      /* LOCK prefix only allowed with inc and dec subopcodes */
+      *decode_OK = False;
+      return delta;
+   }
+
+   if (epartIsReg(modrm)) {
+      assign(t1, getIReg(1, eregOfRM(modrm)));
+      switch (gregOfRM(modrm)) {
+         case 0: /* INC */
+            assign(t2, binop(Iop_Add8, mkexpr(t1), mkU8(1)));
+            putIReg(1, eregOfRM(modrm), mkexpr(t2));
+            setFlags_INC_DEC( True, t2, ty );
+            break;
+         case 1: /* DEC */
+            assign(t2, binop(Iop_Sub8, mkexpr(t1), mkU8(1)));
+            putIReg(1, eregOfRM(modrm), mkexpr(t2));
+            setFlags_INC_DEC( False, t2, ty );
+            break;
+         default: 
+            *decode_OK = False;
+            return delta;
+      }
+      delta++;
+      DIP("%sb %s\n", nameGrp4(gregOfRM(modrm)),
+                      nameIReg(1, eregOfRM(modrm)));
+   } else {
+      IRTemp addr = disAMode ( &alen, sorb, delta, dis_buf );
+      assign( t1, loadLE(ty, mkexpr(addr)) );
+      switch (gregOfRM(modrm)) {
+         case 0: /* INC */
+            assign(t2, binop(Iop_Add8, mkexpr(t1), mkU8(1)));
+            if (locked) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(t2)/*new*/, 
+                      guest_EIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(t2) );
+            }
+            setFlags_INC_DEC( True, t2, ty );
+            break;
+         case 1: /* DEC */
+            assign(t2, binop(Iop_Sub8, mkexpr(t1), mkU8(1)));
+            if (locked) {
+               casLE( mkexpr(addr), mkexpr(t1)/*expd*/, mkexpr(t2)/*new*/, 
+                      guest_EIP_curr_instr );
+            } else {
+               storeLE( mkexpr(addr), mkexpr(t2) );
+            }
+            setFlags_INC_DEC( False, t2, ty );
+            break;
+         default: 
+            *decode_OK = False;
+            return delta;
+      }
+      delta += alen;
+      DIP("%sb %s\n", nameGrp4(gregOfRM(modrm)), dis_buf);
+   }
+   return delta;
+}
+
+
+/* Group 5 extended opcodes. */
+static
+UInt dis_Grp5 ( UChar sorb, Bool locked, Int sz, Int delta, 
+                /*MOD*/DisResult* dres, /*OUT*/Bool* decode_OK )
+{
+   Int     len;
+   UChar   modrm;
+   HChar   dis_buf[50];
+   IRTemp  addr = IRTemp_INVALID;
+   IRType  ty = szToITy(sz);
+   IRTemp  t1 = newTemp(ty);
+   IRTemp  t2 = IRTemp_INVALID;
+
+   *decode_OK = True;
+
+   modrm = getIByte(delta);
+
+   if (locked && (gregOfRM(modrm) != 0 && gregOfRM(modrm) != 1)) {
+      /* LOCK prefix only allowed with inc and dec subopcodes */
+      *decode_OK = False;
+      return delta;
+   }
+
+   if (epartIsReg(modrm)) {
+      assign(t1, getIReg(sz,eregOfRM(modrm)));
+      switch (gregOfRM(modrm)) {
+         case 0: /* INC */ 
+            vassert(sz == 2 || sz == 4);
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Add8),
+                             mkexpr(t1), mkU(ty,1)));
+            setFlags_INC_DEC( True, t2, ty );
+            putIReg(sz,eregOfRM(modrm),mkexpr(t2));
+            break;
+         case 1: /* DEC */ 
+            vassert(sz == 2 || sz == 4);
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Sub8),
+                             mkexpr(t1), mkU(ty,1)));
+            setFlags_INC_DEC( False, t2, ty );
+            putIReg(sz,eregOfRM(modrm),mkexpr(t2));
+            break;
+         case 2: /* call Ev */
+            vassert(sz == 4);
+            t2 = newTemp(Ity_I32);
+            assign(t2, binop(Iop_Sub32, getIReg(4,R_ESP), mkU32(4)));
+            putIReg(4, R_ESP, mkexpr(t2));
+            storeLE( mkexpr(t2), mkU32(guest_EIP_bbstart+delta+1));
+            jmp_treg(dres, Ijk_Call, t1);
+            vassert(dres->whatNext == Dis_StopHere);
+            break;
+         case 4: /* jmp Ev */
+            vassert(sz == 4);
+            jmp_treg(dres, Ijk_Boring, t1);
+            vassert(dres->whatNext == Dis_StopHere);
+            break;
+         case 6: /* PUSH Ev */
+            vassert(sz == 4 || sz == 2);
+            t2 = newTemp(Ity_I32);
+            assign( t2, binop(Iop_Sub32,getIReg(4,R_ESP),mkU32(sz)) );
+            putIReg(4, R_ESP, mkexpr(t2) );
+            storeLE( mkexpr(t2), mkexpr(t1) );
+            break;
+         default: 
+            *decode_OK = False;
+            return delta;
+      }
+      delta++;
+      DIP("%s%c %s\n", nameGrp5(gregOfRM(modrm)),
+                       nameISize(sz), nameIReg(sz, eregOfRM(modrm)));
+   } else {
+      addr = disAMode ( &len, sorb, delta, dis_buf );
+      assign(t1, loadLE(ty,mkexpr(addr)));
+      switch (gregOfRM(modrm)) {
+         case 0: /* INC */ 
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Add8),
+                             mkexpr(t1), mkU(ty,1)));
+            if (locked) {
+               casLE( mkexpr(addr),
+                      mkexpr(t1), mkexpr(t2), guest_EIP_curr_instr );
+            } else {
+               storeLE(mkexpr(addr),mkexpr(t2));
+            }
+            setFlags_INC_DEC( True, t2, ty );
+            break;
+         case 1: /* DEC */ 
+            t2 = newTemp(ty);
+            assign(t2, binop(mkSizedOp(ty,Iop_Sub8),
+                             mkexpr(t1), mkU(ty,1)));
+            if (locked) {
+               casLE( mkexpr(addr),
+                      mkexpr(t1), mkexpr(t2), guest_EIP_curr_instr );
+            } else {
+               storeLE(mkexpr(addr),mkexpr(t2));
+            }
+            setFlags_INC_DEC( False, t2, ty );
+            break;
+         case 2: /* call Ev */
+            vassert(sz == 4);
+            t2 = newTemp(Ity_I32);
+            assign(t2, binop(Iop_Sub32, getIReg(4,R_ESP), mkU32(4)));
+            putIReg(4, R_ESP, mkexpr(t2));
+            storeLE( mkexpr(t2), mkU32(guest_EIP_bbstart+delta+len));
+            jmp_treg(dres, Ijk_Call, t1);
+            vassert(dres->whatNext == Dis_StopHere);
+            break;
+         case 4: /* JMP Ev */
+            vassert(sz == 4);
+            jmp_treg(dres, Ijk_Boring, t1);
+            vassert(dres->whatNext == Dis_StopHere);
+            break;
+         case 6: /* PUSH Ev */
+            vassert(sz == 4 || sz == 2);
+            t2 = newTemp(Ity_I32);
+            assign( t2, binop(Iop_Sub32,getIReg(4,R_ESP),mkU32(sz)) );
+            putIReg(4, R_ESP, mkexpr(t2) );
+            storeLE( mkexpr(t2), mkexpr(t1) );
+            break;
+         default: 
+            *decode_OK = False;
+            return delta;
+      }
+      delta += len;
+      DIP("%s%c %s\n", nameGrp5(gregOfRM(modrm)),
+                       nameISize(sz), dis_buf);
+   }
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Disassembling string ops (including REP prefixes)    ---*/
+/*------------------------------------------------------------*/
+
+/* Code shared by all the string ops */
+static
+void dis_string_op_increment(Int sz, Int t_inc)
+{
+   if (sz == 4 || sz == 2) {
+      assign( t_inc, 
+              binop(Iop_Shl32, IRExpr_Get( OFFB_DFLAG, Ity_I32 ),
+                               mkU8(sz/2) ) );
+   } else {
+      assign( t_inc, 
+              IRExpr_Get( OFFB_DFLAG, Ity_I32 ) );
+   }
+}
+
+static
+void dis_string_op( void (*dis_OP)( Int, IRTemp ), 
+                    Int sz, const HChar* name, UChar sorb )
+{
+   IRTemp t_inc = newTemp(Ity_I32);
+   vassert(sorb == 0); /* hmm.  so what was the point of passing it in? */
+   dis_string_op_increment(sz, t_inc);
+   dis_OP( sz, t_inc );
+   DIP("%s%c\n", name, nameISize(sz));
+}
+
+static 
+void dis_MOVS ( Int sz, IRTemp t_inc )
+{
+   IRType ty = szToITy(sz);
+   IRTemp td = newTemp(Ity_I32);   /* EDI */
+   IRTemp ts = newTemp(Ity_I32);   /* ESI */
+
+   assign( td, getIReg(4, R_EDI) );
+   assign( ts, getIReg(4, R_ESI) );
+
+   storeLE( mkexpr(td), loadLE(ty,mkexpr(ts)) );
+
+   putIReg( 4, R_EDI, binop(Iop_Add32, mkexpr(td), mkexpr(t_inc)) );
+   putIReg( 4, R_ESI, binop(Iop_Add32, mkexpr(ts), mkexpr(t_inc)) );
+}
+
+static 
+void dis_LODS ( Int sz, IRTemp t_inc )
+{
+   IRType ty = szToITy(sz);
+   IRTemp ts = newTemp(Ity_I32);   /* ESI */
+
+   assign( ts, getIReg(4, R_ESI) );
+
+   putIReg( sz, R_EAX, loadLE(ty, mkexpr(ts)) );
+
+   putIReg( 4, R_ESI, binop(Iop_Add32, mkexpr(ts), mkexpr(t_inc)) );
+}
+
+static 
+void dis_STOS ( Int sz, IRTemp t_inc )
+{
+   IRType ty = szToITy(sz);
+   IRTemp ta = newTemp(ty);        /* EAX */
+   IRTemp td = newTemp(Ity_I32);   /* EDI */
+
+   assign( ta, getIReg(sz, R_EAX) );
+   assign( td, getIReg(4, R_EDI) );
+
+   storeLE( mkexpr(td), mkexpr(ta) );
+
+   putIReg( 4, R_EDI, binop(Iop_Add32, mkexpr(td), mkexpr(t_inc)) );
+}
+
+static 
+void dis_CMPS ( Int sz, IRTemp t_inc )
+{
+   IRType ty  = szToITy(sz);
+   IRTemp tdv = newTemp(ty);      /* (EDI) */
+   IRTemp tsv = newTemp(ty);      /* (ESI) */
+   IRTemp td  = newTemp(Ity_I32); /*  EDI  */
+   IRTemp ts  = newTemp(Ity_I32); /*  ESI  */
+
+   assign( td, getIReg(4, R_EDI) );
+   assign( ts, getIReg(4, R_ESI) );
+
+   assign( tdv, loadLE(ty,mkexpr(td)) );
+   assign( tsv, loadLE(ty,mkexpr(ts)) );
+
+   setFlags_DEP1_DEP2 ( Iop_Sub8, tsv, tdv, ty );
+
+   putIReg(4, R_EDI, binop(Iop_Add32, mkexpr(td), mkexpr(t_inc)) );
+   putIReg(4, R_ESI, binop(Iop_Add32, mkexpr(ts), mkexpr(t_inc)) );
+}
+
+static 
+void dis_SCAS ( Int sz, IRTemp t_inc )
+{
+   IRType ty  = szToITy(sz);
+   IRTemp ta  = newTemp(ty);       /*  EAX  */
+   IRTemp td  = newTemp(Ity_I32);  /*  EDI  */
+   IRTemp tdv = newTemp(ty);       /* (EDI) */
+
+   assign( ta, getIReg(sz, R_EAX) );
+   assign( td, getIReg(4, R_EDI) );
+
+   assign( tdv, loadLE(ty,mkexpr(td)) );
+   setFlags_DEP1_DEP2 ( Iop_Sub8, ta, tdv, ty );
+
+   putIReg(4, R_EDI, binop(Iop_Add32, mkexpr(td), mkexpr(t_inc)) );
+}
+
+
+/* Wrap the appropriate string op inside a REP/REPE/REPNE.
+   We assume the insn is the last one in the basic block, and so emit a jump
+   to the next insn, rather than just falling through. */
+static 
+void dis_REP_op ( /*MOD*/DisResult* dres,
+                  X86Condcode cond,
+                  void (*dis_OP)(Int, IRTemp),
+                  Int sz, Addr32 eip, Addr32 eip_next, const HChar* name )
+{
+   IRTemp t_inc = newTemp(Ity_I32);
+   IRTemp tc    = newTemp(Ity_I32);  /*  ECX  */
+
+   assign( tc, getIReg(4,R_ECX) );
+
+   stmt( IRStmt_Exit( binop(Iop_CmpEQ32,mkexpr(tc),mkU32(0)),
+                      Ijk_Boring,
+                      IRConst_U32(eip_next), OFFB_EIP ) );
+
+   putIReg(4, R_ECX, binop(Iop_Sub32, mkexpr(tc), mkU32(1)) );
+
+   dis_string_op_increment(sz, t_inc);
+   dis_OP (sz, t_inc);
+
+   if (cond == X86CondAlways) {
+      jmp_lit(dres, Ijk_Boring, eip);
+      vassert(dres->whatNext == Dis_StopHere);
+   } else {
+      stmt( IRStmt_Exit( mk_x86g_calculate_condition(cond),
+                         Ijk_Boring,
+                         IRConst_U32(eip), OFFB_EIP ) );
+      jmp_lit(dres, Ijk_Boring, eip_next);
+      vassert(dres->whatNext == Dis_StopHere);
+   }
+   DIP("%s%c\n", name, nameISize(sz));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Arithmetic, etc.                                     ---*/
+/*------------------------------------------------------------*/
+
+/* IMUL E, G.  Supplied eip points to the modR/M byte. */
+static
+UInt dis_mul_E_G ( UChar       sorb,
+                   Int         size, 
+                   Int         delta0 )
+{
+   Int    alen;
+   HChar  dis_buf[50];
+   UChar  rm = getIByte(delta0);
+   IRType ty = szToITy(size);
+   IRTemp te = newTemp(ty);
+   IRTemp tg = newTemp(ty);
+   IRTemp resLo = newTemp(ty);
+
+   assign( tg, getIReg(size, gregOfRM(rm)) );
+   if (epartIsReg(rm)) {
+      assign( te, getIReg(size, eregOfRM(rm)) );
+   } else {
+      IRTemp addr = disAMode( &alen, sorb, delta0, dis_buf );
+      assign( te, loadLE(ty,mkexpr(addr)) );
+   }
+
+   setFlags_MUL ( ty, te, tg, X86G_CC_OP_SMULB );
+
+   assign( resLo, binop( mkSizedOp(ty, Iop_Mul8), mkexpr(te), mkexpr(tg) ) );
+
+   putIReg(size, gregOfRM(rm), mkexpr(resLo) );
+
+   if (epartIsReg(rm)) {
+      DIP("imul%c %s, %s\n", nameISize(size), 
+                             nameIReg(size,eregOfRM(rm)),
+                             nameIReg(size,gregOfRM(rm)));
+      return 1+delta0;
+   } else {
+      DIP("imul%c %s, %s\n", nameISize(size), 
+                             dis_buf, nameIReg(size,gregOfRM(rm)));
+      return alen+delta0;
+   }
+}
+
+
+/* IMUL I * E -> G.  Supplied eip points to the modR/M byte. */
+static
+UInt dis_imul_I_E_G ( UChar       sorb,
+                      Int         size, 
+                      Int         delta,
+                      Int         litsize )
+{
+   Int    d32, alen;
+   HChar  dis_buf[50];
+   UChar  rm = getIByte(delta);
+   IRType ty = szToITy(size);
+   IRTemp te = newTemp(ty);
+   IRTemp tl = newTemp(ty);
+   IRTemp resLo = newTemp(ty);
+
+   vassert(size == 1 || size == 2 || size == 4);
+
+   if (epartIsReg(rm)) {
+      assign(te, getIReg(size, eregOfRM(rm)));
+      delta++;
+   } else {
+      IRTemp addr = disAMode( &alen, sorb, delta, dis_buf );
+      assign(te, loadLE(ty, mkexpr(addr)));
+      delta += alen;
+   }
+   d32 = getSDisp(litsize,delta);
+   delta += litsize;
+
+   if (size == 1) d32 &= 0xFF;
+   if (size == 2) d32 &= 0xFFFF;
+
+   assign(tl, mkU(ty,d32));
+
+   assign( resLo, binop( mkSizedOp(ty, Iop_Mul8), mkexpr(te), mkexpr(tl) ));
+
+   setFlags_MUL ( ty, te, tl, X86G_CC_OP_SMULB );
+
+   putIReg(size, gregOfRM(rm), mkexpr(resLo));
+
+   DIP("imul %d, %s, %s\n", d32, 
+       ( epartIsReg(rm) ? nameIReg(size,eregOfRM(rm)) : dis_buf ),
+       nameIReg(size,gregOfRM(rm)) );
+   return delta;
+}
+
+
+/* Generate an IR sequence to do a count-leading-zeroes operation on
+   the supplied IRTemp, and return a new IRTemp holding the result.
+   'ty' may be Ity_I16 or Ity_I32 only.  In the case where the
+   argument is zero, return the number of bits in the word (the
+   natural semantics). */
+static IRTemp gen_LZCNT ( IRType ty, IRTemp src )
+{
+   vassert(ty == Ity_I32 || ty == Ity_I16);
+
+   IRTemp src32 = newTemp(Ity_I32);
+   assign(src32, widenUto32( mkexpr(src) ));
+
+   IRTemp src32x = newTemp(Ity_I32);
+   assign(src32x, 
+          binop(Iop_Shl32, mkexpr(src32),
+                           mkU8(32 - 8 * sizeofIRType(ty))));
+
+   // Clz32 has undefined semantics when its input is zero, so
+   // special-case around that.
+   IRTemp res32 = newTemp(Ity_I32);
+   assign(res32,
+          IRExpr_ITE(
+             binop(Iop_CmpEQ32, mkexpr(src32x), mkU32(0)),
+             mkU32(8 * sizeofIRType(ty)),
+             unop(Iop_Clz32, mkexpr(src32x))
+   ));
+
+   IRTemp res = newTemp(ty);
+   assign(res, narrowTo(ty, mkexpr(res32)));
+   return res;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- x87 FLOATING POINT INSTRUCTIONS                      ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/* --- Helper functions for dealing with the register stack. --- */
+
+/* --- Set the emulation-warning pseudo-register. --- */
+
+static void put_emwarn ( IRExpr* e /* :: Ity_I32 */ )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   stmt( IRStmt_Put( OFFB_EMNOTE, e ) );
+}
+
+/* --- Produce an IRExpr* denoting a 64-bit QNaN. --- */
+
+static IRExpr* mkQNaN64 ( void )
+{
+  /* QNaN is 0 2047 1 0(51times) 
+     == 0b 11111111111b 1 0(51times)
+     == 0x7FF8 0000 0000 0000
+   */
+   return IRExpr_Const(IRConst_F64i(0x7FF8000000000000ULL));
+}
+
+/* --------- Get/put the top-of-stack pointer. --------- */
+
+static IRExpr* get_ftop ( void )
+{
+   return IRExpr_Get( OFFB_FTOP, Ity_I32 );
+}
+
+static void put_ftop ( IRExpr* e )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I32);
+   stmt( IRStmt_Put( OFFB_FTOP, e ) );
+}
+
+/* --------- Get/put the C3210 bits. --------- */
+
+static IRExpr* get_C3210 ( void )
+{
+   return IRExpr_Get( OFFB_FC3210, Ity_I32 );
+}
+
+static void put_C3210 ( IRExpr* e )
+{
+   stmt( IRStmt_Put( OFFB_FC3210, e ) );
+}
+
+/* --------- Get/put the FPU rounding mode. --------- */
+static IRExpr* /* :: Ity_I32 */ get_fpround ( void )
+{
+   return IRExpr_Get( OFFB_FPROUND, Ity_I32 );
+}
+
+static void put_fpround ( IRExpr* /* :: Ity_I32 */ e )
+{
+   stmt( IRStmt_Put( OFFB_FPROUND, e ) );
+}
+
+
+/* --------- Synthesise a 2-bit FPU rounding mode. --------- */
+/* Produces a value in 0 .. 3, which is encoded as per the type
+   IRRoundingMode.  Since the guest_FPROUND value is also encoded as
+   per IRRoundingMode, we merely need to get it and mask it for
+   safety.
+*/
+static IRExpr* /* :: Ity_I32 */ get_roundingmode ( void )
+{
+   return binop( Iop_And32, get_fpround(), mkU32(3) );
+}
+
+static IRExpr* /* :: Ity_I32 */ get_FAKE_roundingmode ( void )
+{
+   return mkU32(Irrm_NEAREST);
+}
+
+
+/* --------- Get/set FP register tag bytes. --------- */
+
+/* Given i, and some expression e, generate 'ST_TAG(i) = e'. */
+
+static void put_ST_TAG ( Int i, IRExpr* value )
+{
+   IRRegArray* descr;
+   vassert(typeOfIRExpr(irsb->tyenv, value) == Ity_I8);
+   descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   stmt( IRStmt_PutI( mkIRPutI(descr, get_ftop(), i, value) ) );
+}
+
+/* Given i, generate an expression yielding 'ST_TAG(i)'.  This will be
+   zero to indicate "Empty" and nonzero to indicate "NonEmpty".  */
+
+static IRExpr* get_ST_TAG ( Int i )
+{
+   IRRegArray* descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   return IRExpr_GetI( descr, get_ftop(), i );
+}
+
+
+/* --------- Get/set FP registers. --------- */
+
+/* Given i, and some expression e, emit 'ST(i) = e' and set the
+   register's tag to indicate the register is full.  The previous
+   state of the register is not checked. */
+
+static void put_ST_UNCHECKED ( Int i, IRExpr* value )
+{
+   IRRegArray* descr;
+   vassert(typeOfIRExpr(irsb->tyenv, value) == Ity_F64);
+   descr = mkIRRegArray( OFFB_FPREGS, Ity_F64, 8 );
+   stmt( IRStmt_PutI( mkIRPutI(descr, get_ftop(), i, value) ) );
+   /* Mark the register as in-use. */
+   put_ST_TAG(i, mkU8(1));
+}
+
+/* Given i, and some expression e, emit
+      ST(i) = is_full(i) ? NaN : e
+   and set the tag accordingly.
+*/
+
+static void put_ST ( Int i, IRExpr* value )
+{
+   put_ST_UNCHECKED(
+      i,
+      IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+                  /* non-0 means full */
+                  mkQNaN64(),
+                  /* 0 means empty */
+                  value
+      )
+   );
+}
+
+
+/* Given i, generate an expression yielding 'ST(i)'. */
+
+static IRExpr* get_ST_UNCHECKED ( Int i )
+{
+   IRRegArray* descr = mkIRRegArray( OFFB_FPREGS, Ity_F64, 8 );
+   return IRExpr_GetI( descr, get_ftop(), i );
+}
+
+
+/* Given i, generate an expression yielding 
+  is_full(i) ? ST(i) : NaN
+*/
+
+static IRExpr* get_ST ( Int i )
+{
+   return
+      IRExpr_ITE( binop(Iop_CmpNE8, get_ST_TAG(i), mkU8(0)),
+                  /* non-0 means full */
+                  get_ST_UNCHECKED(i),
+                  /* 0 means empty */
+                  mkQNaN64());
+}
+
+
+/* Given i, and some expression e, and a condition cond, generate IR
+   which has the same effect as put_ST(i,e) when cond is true and has
+   no effect when cond is false.  Given the lack of proper
+   if-then-else in the IR, this is pretty tricky.
+*/
+
+static void maybe_put_ST ( IRTemp cond, Int i, IRExpr* value )
+{
+   // new_tag = if cond then FULL else old_tag
+   // new_val = if cond then (if old_tag==FULL then NaN else val)
+   //                   else old_val
+
+   IRTemp old_tag = newTemp(Ity_I8);
+   assign(old_tag, get_ST_TAG(i));
+   IRTemp new_tag = newTemp(Ity_I8);
+   assign(new_tag,
+          IRExpr_ITE(mkexpr(cond), mkU8(1)/*FULL*/, mkexpr(old_tag)));
+
+   IRTemp old_val = newTemp(Ity_F64);
+   assign(old_val, get_ST_UNCHECKED(i));
+   IRTemp new_val = newTemp(Ity_F64);
+   assign(new_val,
+          IRExpr_ITE(mkexpr(cond),
+                     IRExpr_ITE(binop(Iop_CmpNE8, mkexpr(old_tag), mkU8(0)),
+                                /* non-0 means full */
+                                mkQNaN64(),
+                                /* 0 means empty */
+                                value),
+                     mkexpr(old_val)));
+
+   put_ST_UNCHECKED(i, mkexpr(new_val));
+   // put_ST_UNCHECKED incorrectly sets tag(i) to always be FULL.  So 
+   // now set it to new_tag instead.
+   put_ST_TAG(i, mkexpr(new_tag));
+}
+
+/* Adjust FTOP downwards by one register. */
+
+static void fp_push ( void )
+{
+   put_ftop( binop(Iop_Sub32, get_ftop(), mkU32(1)) );
+}
+
+/* Adjust FTOP downwards by one register when COND is 1:I1.  Else
+   don't change it. */
+
+static void maybe_fp_push ( IRTemp cond )
+{
+   put_ftop( binop(Iop_Sub32, get_ftop(), unop(Iop_1Uto32,mkexpr(cond))) );
+}
+
+/* Adjust FTOP upwards by one register, and mark the vacated register
+   as empty.  */
+
+static void fp_pop ( void )
+{
+   put_ST_TAG(0, mkU8(0));
+   put_ftop( binop(Iop_Add32, get_ftop(), mkU32(1)) );
+}
+
+/* Set the C2 bit of the FPU status register to e[0].  Assumes that
+   e[31:1] == 0. 
+*/
+static void set_C2 ( IRExpr* e )
+{
+   IRExpr* cleared = binop(Iop_And32, get_C3210(), mkU32(~X86G_FC_MASK_C2));
+   put_C3210( binop(Iop_Or32,
+                    cleared,
+                    binop(Iop_Shl32, e, mkU8(X86G_FC_SHIFT_C2))) );
+}
+
+/* Generate code to check that abs(d64) < 2^63 and is finite.  This is
+   used to do the range checks for FSIN, FCOS, FSINCOS and FPTAN.  The
+   test is simple, but the derivation of it is not so simple.
+
+   The exponent field for an IEEE754 double is 11 bits.  That means it
+   can take values 0 through 0x7FF.  If the exponent has value 0x7FF,
+   the number is either a NaN or an Infinity and so is not finite.
+   Furthermore, a finite value of exactly 2^63 is the smallest value
+   that has exponent value 0x43E.  Hence, what we need to do is
+   extract the exponent, ignoring the sign bit and mantissa, and check
+   it is < 0x43E, or <= 0x43D.
+
+   To make this easily applicable to 32- and 64-bit targets, a
+   roundabout approach is used.  First the number is converted to I64,
+   then the top 32 bits are taken.  Shifting them right by 20 bits
+   places the sign bit and exponent in the bottom 12 bits.  Anding
+   with 0x7FF gets rid of the sign bit, leaving just the exponent
+   available for comparison.
+*/
+static IRTemp math_IS_TRIG_ARG_FINITE_AND_IN_RANGE ( IRTemp d64 )
+{
+   IRTemp i64 = newTemp(Ity_I64);
+   assign(i64, unop(Iop_ReinterpF64asI64, mkexpr(d64)) );
+   IRTemp exponent = newTemp(Ity_I32);
+   assign(exponent,
+          binop(Iop_And32,
+                binop(Iop_Shr32, unop(Iop_64HIto32, mkexpr(i64)), mkU8(20)),
+                mkU32(0x7FF)));
+   IRTemp in_range_and_finite = newTemp(Ity_I1);
+   assign(in_range_and_finite,
+          binop(Iop_CmpLE32U, mkexpr(exponent), mkU32(0x43D)));
+   return in_range_and_finite;
+}
+
+/* Invent a plausible-looking FPU status word value:
+      ((ftop & 7) << 11) | (c3210 & 0x4700)
+ */
+static IRExpr* get_FPU_sw ( void )
+{
+   return
+      unop(Iop_32to16,
+           binop(Iop_Or32,
+                 binop(Iop_Shl32, 
+                       binop(Iop_And32, get_ftop(), mkU32(7)), 
+                             mkU8(11)),
+                       binop(Iop_And32, get_C3210(), mkU32(0x4700))
+      ));
+}
+
+
+/* ------------------------------------------------------- */
+/* Given all that stack-mangling junk, we can now go ahead
+   and describe FP instructions. 
+*/
+
+/* ST(0) = ST(0) `op` mem64/32(addr)
+   Need to check ST(0)'s tag on read, but not on write.
+*/
+static
+void fp_do_op_mem_ST_0 ( IRTemp addr, const HChar* op_txt, HChar* dis_buf, 
+                         IROp op, Bool dbl )
+{
+   DIP("f%s%c %s\n", op_txt, dbl?'l':'s', dis_buf);
+   if (dbl) {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                get_ST(0), 
+                loadLE(Ity_F64,mkexpr(addr))
+         ));
+   } else {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                get_ST(0), 
+                unop(Iop_F32toF64, loadLE(Ity_F32,mkexpr(addr)))
+         ));
+   }
+}
+
+
+/* ST(0) = mem64/32(addr) `op` ST(0)
+   Need to check ST(0)'s tag on read, but not on write.
+*/
+static
+void fp_do_oprev_mem_ST_0 ( IRTemp addr, const HChar* op_txt, HChar* dis_buf,
+                            IROp op, Bool dbl )
+{
+   DIP("f%s%c %s\n", op_txt, dbl?'l':'s', dis_buf);
+   if (dbl) {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                loadLE(Ity_F64,mkexpr(addr)),
+                get_ST(0)
+         ));
+   } else {
+      put_ST_UNCHECKED(0, 
+         triop( op, 
+                get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                unop(Iop_F32toF64, loadLE(Ity_F32,mkexpr(addr))),
+                get_ST(0)
+         ));
+   }
+}
+
+
+/* ST(dst) = ST(dst) `op` ST(src).
+   Check dst and src tags when reading but not on write.
+*/
+static
+void fp_do_op_ST_ST ( const HChar* op_txt, IROp op, UInt st_src, UInt st_dst,
+                      Bool pop_after )
+{
+   DIP("f%s%s st(%d), st(%d)\n", op_txt, pop_after?"p":"", 
+                                 (Int)st_src, (Int)st_dst );
+   put_ST_UNCHECKED( 
+      st_dst, 
+      triop( op, 
+             get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+             get_ST(st_dst), 
+             get_ST(st_src) ) 
+   );
+   if (pop_after)
+      fp_pop();
+}
+
+/* ST(dst) = ST(src) `op` ST(dst).
+   Check dst and src tags when reading but not on write.
+*/
+static
+void fp_do_oprev_ST_ST ( const HChar* op_txt, IROp op, UInt st_src,
+                         UInt st_dst, Bool pop_after )
+{
+   DIP("f%s%s st(%d), st(%d)\n", op_txt, pop_after?"p":"",
+                                 (Int)st_src, (Int)st_dst );
+   put_ST_UNCHECKED( 
+      st_dst, 
+      triop( op, 
+             get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+             get_ST(st_src), 
+             get_ST(st_dst) ) 
+   );
+   if (pop_after)
+      fp_pop();
+}
+
+/* %eflags(Z,P,C) = UCOMI( st(0), st(i) ) */
+static void fp_do_ucomi_ST0_STi ( UInt i, Bool pop_after )
+{
+   DIP("fucomi%s %%st(0),%%st(%d)\n", pop_after ? "p" : "", (Int)i );
+   /* This is a bit of a hack (and isn't really right).  It sets
+      Z,P,C,O correctly, but forces A and S to zero, whereas the Intel
+      documentation implies A and S are unchanged. 
+   */
+   /* It's also fishy in that it is used both for COMIP and
+      UCOMIP, and they aren't the same (although similar). */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1,
+                     binop( Iop_And32,
+                            binop(Iop_CmpF64, get_ST(0), get_ST(i)),
+                            mkU32(0x45)
+       )));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+   if (pop_after)
+      fp_pop();
+}
+
+
+static
+UInt dis_FPU ( Bool* decode_ok, UChar sorb, Int delta )
+{
+   Int    len;
+   UInt   r_src, r_dst;
+   HChar  dis_buf[50];
+   IRTemp t1, t2;
+
+   /* On entry, delta points at the second byte of the insn (the modrm
+      byte).*/
+   UChar first_opcode = getIByte(delta-1);
+   UChar modrm        = getIByte(delta+0);
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xD8 opcodes +-+-+-+-+-+-+-+ */
+
+   if (first_opcode == 0xD8) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FADD single-real */
+               fp_do_op_mem_ST_0 ( addr, "add", dis_buf, Iop_AddF64, False );
+               break;
+
+            case 1: /* FMUL single-real */
+               fp_do_op_mem_ST_0 ( addr, "mul", dis_buf, Iop_MulF64, False );
+               break;
+
+            case 2: /* FCOM single-real */
+               DIP("fcoms %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      unop(Iop_F32toF64, 
+                                           loadLE(Ity_F32,mkexpr(addr)))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               break;  
+
+            case 3: /* FCOMP single-real */
+               DIP("fcomps %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      unop(Iop_F32toF64, 
+                                           loadLE(Ity_F32,mkexpr(addr)))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               break;  
+
+            case 4: /* FSUB single-real */
+               fp_do_op_mem_ST_0 ( addr, "sub", dis_buf, Iop_SubF64, False );
+               break;
+
+            case 5: /* FSUBR single-real */
+               fp_do_oprev_mem_ST_0 ( addr, "subr", dis_buf, Iop_SubF64, False );
+               break;
+
+            case 6: /* FDIV single-real */
+               fp_do_op_mem_ST_0 ( addr, "div", dis_buf, Iop_DivF64, False );
+               break;
+
+            case 7: /* FDIVR single-real */
+               fp_do_oprev_mem_ST_0 ( addr, "divr", dis_buf, Iop_DivF64, False );
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xD8\n");
+               goto decode_fail;
+         }
+      } else {
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADD %st(?),%st(0) */
+               fp_do_op_ST_ST ( "add", Iop_AddF64, modrm - 0xC0, 0, False );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMUL %st(?),%st(0) */
+               fp_do_op_ST_ST ( "mul", Iop_MulF64, modrm - 0xC8, 0, False );
+               break;
+
+            /* Dunno if this is right */
+            case 0xD0 ... 0xD7: /* FCOM %st(?),%st(0) */
+               r_dst = (UInt)modrm - 0xD0;
+               DIP("fcom %%st(0),%%st(%d)\n", (Int)r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               break;
+
+            /* Dunno if this is right */
+            case 0xD8 ... 0xDF: /* FCOMP %st(?),%st(0) */
+               r_dst = (UInt)modrm - 0xD8;
+               DIP("fcomp %%st(0),%%st(%d)\n", (Int)r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUB %st(?),%st(0) */
+               fp_do_op_ST_ST ( "sub", Iop_SubF64, modrm - 0xE0, 0, False );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUBR %st(?),%st(0) */
+               fp_do_oprev_ST_ST ( "subr", Iop_SubF64, modrm - 0xE8, 0, False );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIV %st(?),%st(0) */
+               fp_do_op_ST_ST ( "div", Iop_DivF64, modrm - 0xF0, 0, False );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIVR %st(?),%st(0) */
+               fp_do_oprev_ST_ST ( "divr", Iop_DivF64, modrm - 0xF8, 0, False );
+               break;
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xD9 opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xD9) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FLD single-real */
+               DIP("flds %s\n", dis_buf);
+               fp_push();
+               put_ST(0, unop(Iop_F32toF64,
+                              loadLE(Ity_F32, mkexpr(addr))));
+               break;
+
+            case 2: /* FST single-real */
+               DIP("fsts %s\n", dis_buf);
+               storeLE(mkexpr(addr),
+                       binop(Iop_F64toF32, get_roundingmode(), get_ST(0)));
+               break;
+
+            case 3: /* FSTP single-real */
+               DIP("fstps %s\n", dis_buf);
+               storeLE(mkexpr(addr), 
+                       binop(Iop_F64toF32, get_roundingmode(), get_ST(0)));
+               fp_pop();
+               break;
+
+            case 4: { /* FLDENV m28 */
+               /* Uses dirty helper: 
+                     VexEmNote x86g_do_FLDENV ( VexGuestX86State*, HWord ) */
+               IRTemp   ew = newTemp(Ity_I32);
+               IRDirty* d  = unsafeIRDirty_0_N ( 
+                                0/*regparms*/, 
+                                "x86g_dirtyhelper_FLDENV", 
+                                &x86g_dirtyhelper_FLDENV,
+                                mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                             );
+               d->tmp   = ew;
+               /* declare we're reading memory */
+               d->mFx   = Ifx_Read;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 28;
+
+               /* declare we're writing guest state */
+               d->nFxState = 4;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Write;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Write;
+               d->fxState[1].offset = OFFB_FPTAGS;
+               d->fxState[1].size   = 8 * sizeof(UChar);
+
+               d->fxState[2].fx     = Ifx_Write;
+               d->fxState[2].offset = OFFB_FPROUND;
+               d->fxState[2].size   = sizeof(UInt);
+
+               d->fxState[3].fx     = Ifx_Write;
+               d->fxState[3].offset = OFFB_FC3210;
+               d->fxState[3].size   = sizeof(UInt);
+
+               stmt( IRStmt_Dirty(d) );
+
+               /* ew contains any emulation warning we may need to
+                  issue.  If needed, side-exit to the next insn,
+                  reporting the warning, so that Valgrind's dispatcher
+                  sees the warning. */
+               put_emwarn( mkexpr(ew) );
+               stmt( 
+                  IRStmt_Exit(
+                     binop(Iop_CmpNE32, mkexpr(ew), mkU32(0)),
+                     Ijk_EmWarn,
+                     IRConst_U32( ((Addr32)guest_EIP_bbstart)+delta),
+                     OFFB_EIP
+                  )
+               );
+
+               DIP("fldenv %s\n", dis_buf);
+               break;
+            }
+
+            case 5: {/* FLDCW */
+               /* The only thing we observe in the control word is the
+                  rounding mode.  Therefore, pass the 16-bit value
+                  (x87 native-format control word) to a clean helper,
+                  getting back a 64-bit value, the lower half of which
+                  is the FPROUND value to store, and the upper half of
+                  which is the emulation-warning token which may be
+                  generated.
+               */
+               /* ULong x86h_check_fldcw ( UInt ); */
+               IRTemp t64 = newTemp(Ity_I64);
+               IRTemp ew = newTemp(Ity_I32);
+               DIP("fldcw %s\n", dis_buf);
+               assign( t64, mkIRExprCCall(
+                               Ity_I64, 0/*regparms*/, 
+                               "x86g_check_fldcw",
+                               &x86g_check_fldcw, 
+                               mkIRExprVec_1( 
+                                  unop( Iop_16Uto32, 
+                                        loadLE(Ity_I16, mkexpr(addr)))
+                               )
+                            )
+                     );
+
+               put_fpround( unop(Iop_64to32, mkexpr(t64)) );
+               assign( ew, unop(Iop_64HIto32, mkexpr(t64) ) );
+               put_emwarn( mkexpr(ew) );
+               /* Finally, if an emulation warning was reported,
+                  side-exit to the next insn, reporting the warning,
+                  so that Valgrind's dispatcher sees the warning. */
+               stmt( 
+                  IRStmt_Exit(
+                     binop(Iop_CmpNE32, mkexpr(ew), mkU32(0)),
+                     Ijk_EmWarn,
+                     IRConst_U32( ((Addr32)guest_EIP_bbstart)+delta),
+                     OFFB_EIP
+                  )
+               );
+               break;
+            }
+
+            case 6: { /* FNSTENV m28 */
+               /* Uses dirty helper: 
+                     void x86g_do_FSTENV ( VexGuestX86State*, HWord ) */
+               IRDirty* d = unsafeIRDirty_0_N ( 
+                               0/*regparms*/, 
+                               "x86g_dirtyhelper_FSTENV", 
+                               &x86g_dirtyhelper_FSTENV,
+                               mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                            );
+               /* declare we're writing memory */
+               d->mFx   = Ifx_Write;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 28;
+
+               /* declare we're reading guest state */
+               d->nFxState = 4;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Read;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Read;
+               d->fxState[1].offset = OFFB_FPTAGS;
+               d->fxState[1].size   = 8 * sizeof(UChar);
+
+               d->fxState[2].fx     = Ifx_Read;
+               d->fxState[2].offset = OFFB_FPROUND;
+               d->fxState[2].size   = sizeof(UInt);
+
+               d->fxState[3].fx     = Ifx_Read;
+               d->fxState[3].offset = OFFB_FC3210;
+               d->fxState[3].size   = sizeof(UInt);
+
+               stmt( IRStmt_Dirty(d) );
+
+               DIP("fnstenv %s\n", dis_buf);
+               break;
+            }
+
+            case 7: /* FNSTCW */
+              /* Fake up a native x87 FPU control word.  The only
+                 thing it depends on is FPROUND[1:0], so call a clean
+                 helper to cook it up. */
+               /* UInt x86h_create_fpucw ( UInt fpround ) */
+               DIP("fnstcw %s\n", dis_buf);
+               storeLE(
+                  mkexpr(addr), 
+                  unop( Iop_32to16, 
+                        mkIRExprCCall(
+                           Ity_I32, 0/*regp*/,
+                           "x86g_create_fpucw", &x86g_create_fpucw, 
+                           mkIRExprVec_1( get_fpround() ) 
+                        ) 
+                  ) 
+               );
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xD9\n");
+               goto decode_fail;
+         }
+
+      } else {
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FLD %st(?) */
+               r_src = (UInt)modrm - 0xC0;
+               DIP("fld %%st(%d)\n", (Int)r_src);
+               t1 = newTemp(Ity_F64);
+               assign(t1, get_ST(r_src));
+               fp_push();
+               put_ST(0, mkexpr(t1));
+               break;
+
+            case 0xC8 ... 0xCF: /* FXCH %st(?) */
+               r_src = (UInt)modrm - 0xC8;
+               DIP("fxch %%st(%d)\n", (Int)r_src);
+               t1 = newTemp(Ity_F64);
+               t2 = newTemp(Ity_F64);
+               assign(t1, get_ST(0));
+               assign(t2, get_ST(r_src));
+               put_ST_UNCHECKED(0, mkexpr(t2));
+               put_ST_UNCHECKED(r_src, mkexpr(t1));
+               break;
+
+            case 0xE0: /* FCHS */
+               DIP("fchs\n");
+               put_ST_UNCHECKED(0, unop(Iop_NegF64, get_ST(0)));
+               break;
+
+            case 0xE1: /* FABS */
+               DIP("fabs\n");
+               put_ST_UNCHECKED(0, unop(Iop_AbsF64, get_ST(0)));
+               break;
+
+            case 0xE4: /* FTST */
+               DIP("ftst\n");
+               /* This forces C1 to zero, which isn't right. */
+               /* Well, in fact the Intel docs say (bizarrely): "C1 is
+                  set to 0 if stack underflow occurred; otherwise, set
+                  to 0" which is pretty nonsensical.  I guess it's a
+                   typo. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      IRExpr_Const(IRConst_F64i(0x0ULL))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               break;
+
+            case 0xE5: { /* FXAM */
+               /* This is an interesting one.  It examines %st(0),
+                  regardless of whether the tag says it's empty or not.
+                  Here, just pass both the tag (in our format) and the
+                  value (as a double, actually a ULong) to a helper
+                  function. */
+               IRExpr** args
+                  = mkIRExprVec_2( unop(Iop_8Uto32, get_ST_TAG(0)),
+                                   unop(Iop_ReinterpF64asI64, 
+                                        get_ST_UNCHECKED(0)) );
+               put_C3210(mkIRExprCCall(
+                            Ity_I32, 
+                            0/*regparm*/, 
+                            "x86g_calculate_FXAM", &x86g_calculate_FXAM,
+                            args
+                        ));
+               DIP("fxam\n");
+               break;
+            }
+
+            case 0xE8: /* FLD1 */
+               DIP("fld1\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(1.0))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3ff0000000000000ULL)));
+               break;
+
+            case 0xE9: /* FLDL2T */
+               DIP("fldl2t\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(3.32192809488736234781))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x400a934f0979a371ULL)));
+               break;
+
+            case 0xEA: /* FLDL2E */
+               DIP("fldl2e\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(1.44269504088896340739))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3ff71547652b82feULL)));
+               break;
+
+            case 0xEB: /* FLDPI */
+               DIP("fldpi\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(3.14159265358979323851))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x400921fb54442d18ULL)));
+               break;
+
+            case 0xEC: /* FLDLG2 */
+               DIP("fldlg2\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(0.301029995663981143))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3fd34413509f79ffULL)));
+               break;
+
+            case 0xED: /* FLDLN2 */
+               DIP("fldln2\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(0.69314718055994530942))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x3fe62e42fefa39efULL)));
+               break;
+
+            case 0xEE: /* FLDZ */
+               DIP("fldz\n");
+               fp_push();
+               /* put_ST(0, IRExpr_Const(IRConst_F64(0.0))); */
+               put_ST(0, IRExpr_Const(IRConst_F64i(0x0000000000000000ULL)));
+               break;
+
+            case 0xF0: /* F2XM1 */
+               DIP("f2xm1\n");
+               put_ST_UNCHECKED(0, 
+                  binop(Iop_2xm1F64, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0)));
+               break;
+
+            case 0xF1: /* FYL2X */
+               DIP("fyl2x\n");
+               put_ST_UNCHECKED(1, 
+                  triop(Iop_Yl2xF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(1), 
+                        get_ST(0)));
+               fp_pop();
+               break;
+
+            case 0xF2: { /* FPTAN */
+               DIP("fptan\n");
+               IRTemp argD = newTemp(Ity_F64);
+               assign(argD, get_ST(0));
+               IRTemp argOK = math_IS_TRIG_ARG_FINITE_AND_IN_RANGE(argD);
+               IRTemp resD = newTemp(Ity_F64);
+               assign(resD,
+                  IRExpr_ITE(
+                     mkexpr(argOK), 
+                     binop(Iop_TanF64,
+                           get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                           mkexpr(argD)),
+                     mkexpr(argD))
+               );
+               put_ST_UNCHECKED(0, mkexpr(resD));
+               /* Conditionally push 1.0 on the stack, if the arg is
+                  in range */
+               maybe_fp_push(argOK);
+               maybe_put_ST(argOK, 0,
+                            IRExpr_Const(IRConst_F64(1.0)));
+               set_C2( binop(Iop_Xor32,
+                             unop(Iop_1Uto32, mkexpr(argOK)), 
+                             mkU32(1)) );
+               break;
+            }
+
+            case 0xF3: /* FPATAN */
+               DIP("fpatan\n");
+               put_ST_UNCHECKED(1, 
+                  triop(Iop_AtanF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(1), 
+                        get_ST(0)));
+               fp_pop();
+               break;
+
+            case 0xF4: { /* FXTRACT */
+               IRTemp argF = newTemp(Ity_F64);
+               IRTemp sigF = newTemp(Ity_F64);
+               IRTemp expF = newTemp(Ity_F64);
+               IRTemp argI = newTemp(Ity_I64);
+               IRTemp sigI = newTemp(Ity_I64);
+               IRTemp expI = newTemp(Ity_I64);
+               DIP("fxtract\n");
+               assign( argF, get_ST(0) );
+               assign( argI, unop(Iop_ReinterpF64asI64, mkexpr(argF)));
+               assign( sigI, 
+                       mkIRExprCCall(
+                          Ity_I64, 0/*regparms*/, 
+                          "x86amd64g_calculate_FXTRACT", 
+                          &x86amd64g_calculate_FXTRACT, 
+                          mkIRExprVec_2( mkexpr(argI), 
+                                         mkIRExpr_HWord(0)/*sig*/ )) 
+               );
+               assign( expI, 
+                       mkIRExprCCall(
+                          Ity_I64, 0/*regparms*/, 
+                          "x86amd64g_calculate_FXTRACT", 
+                          &x86amd64g_calculate_FXTRACT, 
+                          mkIRExprVec_2( mkexpr(argI), 
+                                         mkIRExpr_HWord(1)/*exp*/ )) 
+               );
+               assign( sigF, unop(Iop_ReinterpI64asF64, mkexpr(sigI)) );
+               assign( expF, unop(Iop_ReinterpI64asF64, mkexpr(expI)) );
+               /* exponent */
+               put_ST_UNCHECKED(0, mkexpr(expF) );
+               fp_push();
+               /* significand */
+               put_ST(0, mkexpr(sigF) );
+               break;
+            }
+
+            case 0xF5: { /* FPREM1 -- IEEE compliant */
+               IRTemp a1 = newTemp(Ity_F64);
+               IRTemp a2 = newTemp(Ity_F64);
+               DIP("fprem1\n");
+               /* Do FPREM1 twice, once to get the remainder, and once
+                  to get the C3210 flag values. */
+               assign( a1, get_ST(0) );
+               assign( a2, get_ST(1) );
+               put_ST_UNCHECKED(0, 
+                  triop(Iop_PRem1F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1), 
+                        mkexpr(a2)));
+               put_C3210(
+                  triop(Iop_PRem1C3210F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1), 
+                        mkexpr(a2)) );
+               break;
+            }
+
+            case 0xF7: /* FINCSTP */
+               DIP("fprem\n");
+               put_ftop( binop(Iop_Add32, get_ftop(), mkU32(1)) );
+               break;
+
+            case 0xF8: { /* FPREM -- not IEEE compliant */
+               IRTemp a1 = newTemp(Ity_F64);
+               IRTemp a2 = newTemp(Ity_F64);
+               DIP("fprem\n");
+               /* Do FPREM twice, once to get the remainder, and once
+                  to get the C3210 flag values. */
+               assign( a1, get_ST(0) );
+               assign( a2, get_ST(1) );
+               put_ST_UNCHECKED(0, 
+                  triop(Iop_PRemF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1), 
+                        mkexpr(a2)));
+               put_C3210( 
+                  triop(Iop_PRemC3210F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(a1), 
+                        mkexpr(a2)) );
+               break;
+            }
+
+            case 0xF9: /* FYL2XP1 */
+               DIP("fyl2xp1\n");
+               put_ST_UNCHECKED(1, 
+                  triop(Iop_Yl2xp1F64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(1), 
+                        get_ST(0)));
+               fp_pop();
+               break;
+
+            case 0xFA: /* FSQRT */
+               DIP("fsqrt\n");
+               put_ST_UNCHECKED(0, 
+                  binop(Iop_SqrtF64, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0)));
+               break;
+
+            case 0xFB: { /* FSINCOS */
+               DIP("fsincos\n");
+               IRTemp argD = newTemp(Ity_F64);
+               assign(argD, get_ST(0));
+               IRTemp argOK = math_IS_TRIG_ARG_FINITE_AND_IN_RANGE(argD);
+               IRTemp resD = newTemp(Ity_F64);
+               assign(resD,
+                  IRExpr_ITE(
+                     mkexpr(argOK), 
+                     binop(Iop_SinF64,
+                           get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                           mkexpr(argD)),
+                     mkexpr(argD))
+               );
+               put_ST_UNCHECKED(0, mkexpr(resD));
+               /* Conditionally push the cos value on the stack, if
+                  the arg is in range */
+               maybe_fp_push(argOK);
+               maybe_put_ST(argOK, 0,
+                  binop(Iop_CosF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        mkexpr(argD)));
+               set_C2( binop(Iop_Xor32,
+                             unop(Iop_1Uto32, mkexpr(argOK)), 
+                             mkU32(1)) );
+               break;
+            }
+
+            case 0xFC: /* FRNDINT */
+               DIP("frndint\n");
+               put_ST_UNCHECKED(0,
+                  binop(Iop_RoundF64toInt, get_roundingmode(), get_ST(0)) );
+               break;
+
+            case 0xFD: /* FSCALE */
+               DIP("fscale\n");
+               put_ST_UNCHECKED(0, 
+                  triop(Iop_ScaleF64,
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0), 
+                        get_ST(1)));
+               break;
+
+            case 0xFE:   /* FSIN */
+            case 0xFF: { /* FCOS */
+               Bool isSIN = modrm == 0xFE;
+               DIP("%s\n", isSIN ? "fsin" : "fcos");
+               IRTemp argD = newTemp(Ity_F64);
+               assign(argD, get_ST(0));
+               IRTemp argOK = math_IS_TRIG_ARG_FINITE_AND_IN_RANGE(argD);
+               IRTemp resD = newTemp(Ity_F64);
+               assign(resD,
+                  IRExpr_ITE(
+                     mkexpr(argOK), 
+                     binop(isSIN ? Iop_SinF64 : Iop_CosF64,
+                           get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                           mkexpr(argD)),
+                     mkexpr(argD))
+               );
+               put_ST_UNCHECKED(0, mkexpr(resD));
+               set_C2( binop(Iop_Xor32,
+                             unop(Iop_1Uto32, mkexpr(argOK)), 
+                             mkU32(1)) );
+               break;
+            }
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDA opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDA) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IROp   fop;
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FIADD m32int */ /* ST(0) += m32int */
+               DIP("fiaddl %s\n", dis_buf);
+               fop = Iop_AddF64;
+               goto do_fop_m32;
+
+            case 1: /* FIMUL m32int */ /* ST(0) *= m32int */
+               DIP("fimull %s\n", dis_buf);
+               fop = Iop_MulF64;
+               goto do_fop_m32;
+
+            case 2: /* FICOM m32int */
+               DIP("ficoml %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      unop(Iop_I32StoF64, 
+                                           loadLE(Ity_I32,mkexpr(addr)))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               break;
+
+            case 3: /* FICOMP m32int */
+               DIP("ficompl %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      unop(Iop_I32StoF64, 
+                                           loadLE(Ity_I32,mkexpr(addr)))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               break;
+
+            case 4: /* FISUB m32int */ /* ST(0) -= m32int */
+               DIP("fisubl %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_fop_m32;
+
+            case 5: /* FISUBR m32int */ /* ST(0) = m32int - ST(0) */
+               DIP("fisubrl %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_foprev_m32;
+
+            case 6: /* FIDIV m32int */ /* ST(0) /= m32int */
+               DIP("fidivl %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_fop_m32;
+
+            case 7: /* FIDIVR m32int */ /* ST(0) = m32int / ST(0) */
+               DIP("fidivrl %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_foprev_m32;
+
+            do_fop_m32:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0),
+                        unop(Iop_I32StoF64,
+                             loadLE(Ity_I32, mkexpr(addr)))));
+               break;
+
+            do_foprev_m32:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        unop(Iop_I32StoF64,
+                             loadLE(Ity_I32, mkexpr(addr))),
+                        get_ST(0)));
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xDA\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FCMOVB ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC0;
+               DIP("fcmovb %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondB),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xC8 ... 0xCF: /* FCMOVE(Z) ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC8;
+               DIP("fcmovz %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondZ),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xD0 ... 0xD7: /* FCMOVBE ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD0;
+               DIP("fcmovbe %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondBE),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xD8 ... 0xDF: /* FCMOVU ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD8;
+               DIP("fcmovu %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondP),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xE9: /* FUCOMPP %st(0),%st(1) */
+               DIP("fucompp %%st(0),%%st(1)\n");
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(1)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               fp_pop();
+               break;
+
+            default:
+               goto decode_fail;
+         }
+
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDB opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDB) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FILD m32int */
+               DIP("fildl %s\n", dis_buf);
+               fp_push();
+               put_ST(0, unop(Iop_I32StoF64,
+                              loadLE(Ity_I32, mkexpr(addr))));
+               break;
+
+            case 1: /* FISTTPL m32 (SSE3) */
+               DIP("fisttpl %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI32S, mkU32(Irrm_ZERO), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 2: /* FIST m32 */
+               DIP("fistl %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI32S, get_roundingmode(), get_ST(0)) );
+               break;
+
+            case 3: /* FISTP m32 */
+               DIP("fistpl %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI32S, get_roundingmode(), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 5: { /* FLD extended-real */
+               /* Uses dirty helper: 
+                     ULong x86g_loadF80le ( UInt )
+                  addr holds the address.  First, do a dirty call to
+                  get hold of the data. */
+               IRTemp   val  = newTemp(Ity_I64);
+               IRExpr** args = mkIRExprVec_1 ( mkexpr(addr) );
+
+               IRDirty* d = unsafeIRDirty_1_N ( 
+                               val, 
+                               0/*regparms*/, 
+                               "x86g_dirtyhelper_loadF80le", 
+                               &x86g_dirtyhelper_loadF80le, 
+                               args 
+                            );
+               /* declare that we're reading memory */
+               d->mFx   = Ifx_Read;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 10;
+
+               /* execute the dirty call, dumping the result in val. */
+               stmt( IRStmt_Dirty(d) );
+               fp_push();
+               put_ST(0, unop(Iop_ReinterpI64asF64, mkexpr(val)));
+
+               DIP("fldt %s\n", dis_buf);
+               break;
+            }
+
+            case 7: { /* FSTP extended-real */
+               /* Uses dirty helper: void x86g_storeF80le ( UInt, ULong ) */
+               IRExpr** args 
+                  = mkIRExprVec_2( mkexpr(addr), 
+                                   unop(Iop_ReinterpF64asI64, get_ST(0)) );
+
+               IRDirty* d = unsafeIRDirty_0_N ( 
+                               0/*regparms*/, 
+                               "x86g_dirtyhelper_storeF80le", 
+                               &x86g_dirtyhelper_storeF80le,
+                               args 
+                            );
+               /* declare we're writing memory */
+               d->mFx   = Ifx_Write;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 10;
+
+               /* execute the dirty call. */
+               stmt( IRStmt_Dirty(d) );
+               fp_pop();
+
+               DIP("fstpt\n %s", dis_buf);
+               break;
+            }
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xDB\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FCMOVNB ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC0;
+               DIP("fcmovnb %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondNB),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xC8 ... 0xCF: /* FCMOVNE(NZ) ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xC8;
+               DIP("fcmovnz %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondNZ),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xD0 ... 0xD7: /* FCMOVNBE ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD0;
+               DIP("fcmovnbe %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondNBE),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xD8 ... 0xDF: /* FCMOVNU ST(i), ST(0) */
+               r_src = (UInt)modrm - 0xD8;
+               DIP("fcmovnu %%st(%d), %%st(0)\n", (Int)r_src);
+               put_ST_UNCHECKED(0, 
+                                IRExpr_ITE( 
+                                    mk_x86g_calculate_condition(X86CondNP),
+                                    get_ST(r_src), get_ST(0)) );
+               break;
+
+            case 0xE2:
+               DIP("fnclex\n");
+               break;
+
+            case 0xE3: {
+               /* Uses dirty helper: 
+                     void x86g_do_FINIT ( VexGuestX86State* ) */
+               IRDirty* d  = unsafeIRDirty_0_N ( 
+                                0/*regparms*/, 
+                                "x86g_dirtyhelper_FINIT", 
+                                &x86g_dirtyhelper_FINIT,
+                                mkIRExprVec_1(IRExpr_BBPTR())
+                             );
+
+               /* declare we're writing guest state */
+               d->nFxState = 5;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Write;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Write;
+               d->fxState[1].offset = OFFB_FPREGS;
+               d->fxState[1].size   = 8 * sizeof(ULong);
+
+               d->fxState[2].fx     = Ifx_Write;
+               d->fxState[2].offset = OFFB_FPTAGS;
+               d->fxState[2].size   = 8 * sizeof(UChar);
+
+               d->fxState[3].fx     = Ifx_Write;
+               d->fxState[3].offset = OFFB_FPROUND;
+               d->fxState[3].size   = sizeof(UInt);
+
+               d->fxState[4].fx     = Ifx_Write;
+               d->fxState[4].offset = OFFB_FC3210;
+               d->fxState[4].size   = sizeof(UInt);
+
+               stmt( IRStmt_Dirty(d) );
+
+               DIP("fninit\n");
+               break;
+            }
+
+            case 0xE8 ... 0xEF: /* FUCOMI %st(0),%st(?) */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xE8, False );
+               break;
+
+            case 0xF0 ... 0xF7: /* FCOMI %st(0),%st(?) */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xF0, False );
+               break;
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDC opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDC) {
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FADD double-real */
+               fp_do_op_mem_ST_0 ( addr, "add", dis_buf, Iop_AddF64, True );
+               break;
+
+            case 1: /* FMUL double-real */
+               fp_do_op_mem_ST_0 ( addr, "mul", dis_buf, Iop_MulF64, True );
+               break;
+
+            case 2: /* FCOM double-real */
+               DIP("fcoml %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      loadLE(Ity_F64,mkexpr(addr))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               break;  
+
+            case 3: /* FCOMP double-real */
+               DIP("fcompl %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      loadLE(Ity_F64,mkexpr(addr))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               break;  
+
+            case 4: /* FSUB double-real */
+               fp_do_op_mem_ST_0 ( addr, "sub", dis_buf, Iop_SubF64, True );
+               break;
+
+            case 5: /* FSUBR double-real */
+               fp_do_oprev_mem_ST_0 ( addr, "subr", dis_buf, Iop_SubF64, True );
+               break;
+
+            case 6: /* FDIV double-real */
+               fp_do_op_mem_ST_0 ( addr, "div", dis_buf, Iop_DivF64, True );
+               break;
+
+            case 7: /* FDIVR double-real */
+               fp_do_oprev_mem_ST_0 ( addr, "divr", dis_buf, Iop_DivF64, True );
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xDC\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADD %st(0),%st(?) */
+               fp_do_op_ST_ST ( "add", Iop_AddF64, 0, modrm - 0xC0, False );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMUL %st(0),%st(?) */
+               fp_do_op_ST_ST ( "mul", Iop_MulF64, 0, modrm - 0xC8, False );
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUBR %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "subr", Iop_SubF64, 0, modrm - 0xE0, False );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUB %st(0),%st(?) */
+               fp_do_op_ST_ST ( "sub", Iop_SubF64, 0, modrm - 0xE8, False );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIVR %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "divr", Iop_DivF64, 0, modrm - 0xF0, False );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIV %st(0),%st(?) */
+               fp_do_op_ST_ST ( "div", Iop_DivF64, 0, modrm - 0xF8, False );
+               break;
+
+            default:
+               goto decode_fail;
+         }
+
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDD opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDD) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FLD double-real */
+               DIP("fldl %s\n", dis_buf);
+               fp_push();
+               put_ST(0, loadLE(Ity_F64, mkexpr(addr)));
+               break;
+
+            case 1: /* FISTTPQ m64 (SSE3) */
+               DIP("fistppll %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI64S, mkU32(Irrm_ZERO), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 2: /* FST double-real */
+               DIP("fstl %s\n", dis_buf);
+               storeLE(mkexpr(addr), get_ST(0));
+               break;
+
+            case 3: /* FSTP double-real */
+               DIP("fstpl %s\n", dis_buf);
+               storeLE(mkexpr(addr), get_ST(0));
+               fp_pop();
+               break;
+
+            case 4: { /* FRSTOR m108 */
+               /* Uses dirty helper: 
+                     VexEmNote x86g_do_FRSTOR ( VexGuestX86State*, Addr32 ) */
+               IRTemp   ew = newTemp(Ity_I32);
+               IRDirty* d  = unsafeIRDirty_0_N ( 
+                                0/*regparms*/, 
+                                "x86g_dirtyhelper_FRSTOR", 
+                                &x86g_dirtyhelper_FRSTOR,
+                                mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                             );
+               d->tmp   = ew;
+               /* declare we're reading memory */
+               d->mFx   = Ifx_Read;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 108;
+
+               /* declare we're writing guest state */
+               d->nFxState = 5;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Write;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Write;
+               d->fxState[1].offset = OFFB_FPREGS;
+               d->fxState[1].size   = 8 * sizeof(ULong);
+
+               d->fxState[2].fx     = Ifx_Write;
+               d->fxState[2].offset = OFFB_FPTAGS;
+               d->fxState[2].size   = 8 * sizeof(UChar);
+
+               d->fxState[3].fx     = Ifx_Write;
+               d->fxState[3].offset = OFFB_FPROUND;
+               d->fxState[3].size   = sizeof(UInt);
+
+               d->fxState[4].fx     = Ifx_Write;
+               d->fxState[4].offset = OFFB_FC3210;
+               d->fxState[4].size   = sizeof(UInt);
+
+               stmt( IRStmt_Dirty(d) );
+
+               /* ew contains any emulation warning we may need to
+                  issue.  If needed, side-exit to the next insn,
+                  reporting the warning, so that Valgrind's dispatcher
+                  sees the warning. */
+               put_emwarn( mkexpr(ew) );
+               stmt( 
+                  IRStmt_Exit(
+                     binop(Iop_CmpNE32, mkexpr(ew), mkU32(0)),
+                     Ijk_EmWarn,
+                     IRConst_U32( ((Addr32)guest_EIP_bbstart)+delta),
+                     OFFB_EIP
+                  )
+               );
+
+               DIP("frstor %s\n", dis_buf);
+               break;
+            }
+
+            case 6: { /* FNSAVE m108 */
+               /* Uses dirty helper: 
+                     void x86g_do_FSAVE ( VexGuestX86State*, UInt ) */
+               IRDirty* d = unsafeIRDirty_0_N ( 
+                               0/*regparms*/, 
+                               "x86g_dirtyhelper_FSAVE", 
+                               &x86g_dirtyhelper_FSAVE,
+                               mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+                            );
+               /* declare we're writing memory */
+               d->mFx   = Ifx_Write;
+               d->mAddr = mkexpr(addr);
+               d->mSize = 108;
+
+               /* declare we're reading guest state */
+               d->nFxState = 5;
+               vex_bzero(&d->fxState, sizeof(d->fxState));
+
+               d->fxState[0].fx     = Ifx_Read;
+               d->fxState[0].offset = OFFB_FTOP;
+               d->fxState[0].size   = sizeof(UInt);
+
+               d->fxState[1].fx     = Ifx_Read;
+               d->fxState[1].offset = OFFB_FPREGS;
+               d->fxState[1].size   = 8 * sizeof(ULong);
+
+               d->fxState[2].fx     = Ifx_Read;
+               d->fxState[2].offset = OFFB_FPTAGS;
+               d->fxState[2].size   = 8 * sizeof(UChar);
+
+               d->fxState[3].fx     = Ifx_Read;
+               d->fxState[3].offset = OFFB_FPROUND;
+               d->fxState[3].size   = sizeof(UInt);
+
+               d->fxState[4].fx     = Ifx_Read;
+               d->fxState[4].offset = OFFB_FC3210;
+               d->fxState[4].size   = sizeof(UInt);
+
+               stmt( IRStmt_Dirty(d) );
+
+               DIP("fnsave %s\n", dis_buf);
+               break;
+            }
+
+            case 7: { /* FNSTSW m16 */
+               IRExpr* sw = get_FPU_sw();
+               vassert(typeOfIRExpr(irsb->tyenv, sw) == Ity_I16);
+               storeLE( mkexpr(addr), sw );
+               DIP("fnstsw %s\n", dis_buf);
+               break;
+            }
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xDD\n");
+               goto decode_fail;
+         }
+      } else {
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FFREE %st(?) */
+               r_dst = (UInt)modrm - 0xC0;
+               DIP("ffree %%st(%d)\n", (Int)r_dst);
+               put_ST_TAG ( r_dst, mkU8(0) );
+               break;
+
+            case 0xD0 ... 0xD7: /* FST %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xD0;
+               DIP("fst %%st(0),%%st(%d)\n", (Int)r_dst);
+               /* P4 manual says: "If the destination operand is a
+                  non-empty register, the invalid-operation exception
+                  is not generated.  Hence put_ST_UNCHECKED. */
+               put_ST_UNCHECKED(r_dst, get_ST(0));
+               break;
+
+            case 0xD8 ... 0xDF: /* FSTP %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xD8;
+               DIP("fstp %%st(0),%%st(%d)\n", (Int)r_dst);
+               /* P4 manual says: "If the destination operand is a
+                  non-empty register, the invalid-operation exception
+                  is not generated.  Hence put_ST_UNCHECKED. */
+               put_ST_UNCHECKED(r_dst, get_ST(0));
+               fp_pop();
+               break;
+
+            case 0xE0 ... 0xE7: /* FUCOM %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xE0;
+               DIP("fucom %%st(0),%%st(%d)\n", (Int)r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               break;
+
+            case 0xE8 ... 0xEF: /* FUCOMP %st(0),%st(?) */
+               r_dst = (UInt)modrm - 0xE8;
+               DIP("fucomp %%st(0),%%st(%d)\n", (Int)r_dst);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(r_dst)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               break;
+
+            default:
+               goto decode_fail;
+         }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDE opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDE) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IROp   fop;
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FIADD m16int */ /* ST(0) += m16int */
+               DIP("fiaddw %s\n", dis_buf);
+               fop = Iop_AddF64;
+               goto do_fop_m16;
+
+            case 1: /* FIMUL m16int */ /* ST(0) *= m16int */
+               DIP("fimulw %s\n", dis_buf);
+               fop = Iop_MulF64;
+               goto do_fop_m16;
+
+            case 2: /* FICOM m16int */
+               DIP("ficomw %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      unop(Iop_I32StoF64, 
+                                         unop(Iop_16Sto32,
+                                           loadLE(Ity_I16,mkexpr(addr))))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               break;
+
+            case 3: /* FICOMP m16int */
+               DIP("ficompw %s\n", dis_buf);
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, 
+                                      get_ST(0),
+                                      unop(Iop_I32StoF64, 
+                                         unop(Iop_16Sto32,
+                                              loadLE(Ity_I16,mkexpr(addr))))),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               break;
+
+            case 4: /* FISUB m16int */ /* ST(0) -= m16int */
+               DIP("fisubw %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_fop_m16;
+
+            case 5: /* FISUBR m16int */ /* ST(0) = m16int - ST(0) */
+               DIP("fisubrw %s\n", dis_buf);
+               fop = Iop_SubF64;
+               goto do_foprev_m16;
+
+            case 6: /* FIDIV m16int */ /* ST(0) /= m16int */
+               DIP("fisubw %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_fop_m16;
+
+            case 7: /* FIDIVR m16int */ /* ST(0) = m16int / ST(0) */
+               DIP("fidivrw %s\n", dis_buf);
+               fop = Iop_DivF64;
+               goto do_foprev_m16;
+
+            do_fop_m16:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        get_ST(0),
+                        unop(Iop_I32StoF64,
+                             unop(Iop_16Sto32, 
+                                  loadLE(Ity_I16, mkexpr(addr))))));
+               break;
+
+            do_foprev_m16:
+               put_ST_UNCHECKED(0, 
+                  triop(fop, 
+                        get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        unop(Iop_I32StoF64,
+                             unop(Iop_16Sto32, 
+                                  loadLE(Ity_I16, mkexpr(addr)))),
+                        get_ST(0)));
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xDE\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADDP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "add", Iop_AddF64, 0, modrm - 0xC0, True );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMULP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "mul", Iop_MulF64, 0, modrm - 0xC8, True );
+               break;
+
+            case 0xD9: /* FCOMPP %st(0),%st(1) */
+               DIP("fuompp %%st(0),%%st(1)\n");
+               /* This forces C1 to zero, which isn't right. */
+               put_C3210( 
+                   binop( Iop_And32,
+                          binop(Iop_Shl32, 
+                                binop(Iop_CmpF64, get_ST(0), get_ST(1)),
+                                mkU8(8)),
+                          mkU32(0x4500)
+                   ));
+               fp_pop();
+               fp_pop();
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUBRP %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "subr", Iop_SubF64, 0,  modrm - 0xE0, True );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUBP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "sub", Iop_SubF64, 0,  modrm - 0xE8, True );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIVRP %st(0),%st(?) */
+               fp_do_oprev_ST_ST ( "divr", Iop_DivF64, 0, modrm - 0xF0, True );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIVP %st(0),%st(?) */
+               fp_do_op_ST_ST ( "div", Iop_DivF64, 0, modrm - 0xF8, True );
+               break;
+
+            default: 
+               goto decode_fail;
+         }
+
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDF opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDF) {
+
+      if (modrm < 0xC0) {
+
+         /* bits 5,4,3 are an opcode extension, and the modRM also
+            specifies an address. */
+         IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+         delta += len;
+
+         switch (gregOfRM(modrm)) {
+
+            case 0: /* FILD m16int */
+               DIP("fildw %s\n", dis_buf);
+               fp_push();
+               put_ST(0, unop(Iop_I32StoF64,
+                              unop(Iop_16Sto32,
+                                   loadLE(Ity_I16, mkexpr(addr)))));
+               break;
+
+            case 1: /* FISTTPS m16 (SSE3) */
+               DIP("fisttps %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI16S, mkU32(Irrm_ZERO), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 2: /* FIST m16 */
+               DIP("fistp %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI16S, get_roundingmode(), get_ST(0)) );
+               break;
+
+            case 3: /* FISTP m16 */
+               DIP("fistps %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI16S, get_roundingmode(), get_ST(0)) );
+               fp_pop();
+               break;
+
+            case 5: /* FILD m64 */
+               DIP("fildll %s\n", dis_buf);
+               fp_push();
+               put_ST(0, binop(Iop_I64StoF64,
+                               get_roundingmode(),
+                               loadLE(Ity_I64, mkexpr(addr))));
+               break;
+
+            case 7: /* FISTP m64 */
+               DIP("fistpll %s\n", dis_buf);
+               storeLE( mkexpr(addr), 
+                        binop(Iop_F64toI64S, get_roundingmode(), get_ST(0)) );
+               fp_pop();
+               break;
+
+            default:
+               vex_printf("unhandled opc_aux = 0x%2x\n", gregOfRM(modrm));
+               vex_printf("first_opcode == 0xDF\n");
+               goto decode_fail;
+         }
+
+      } else {
+
+         delta++;
+         switch (modrm) {
+
+            case 0xC0: /* FFREEP %st(0) */
+               DIP("ffreep %%st(%d)\n", 0);
+               put_ST_TAG ( 0, mkU8(0) );
+               fp_pop();
+               break;
+
+            case 0xE0: /* FNSTSW %ax */
+               DIP("fnstsw %%ax\n");
+               /* Get the FPU status word value and dump it in %AX. */
+               if (0) {
+                  /* The obvious thing to do is simply dump the 16-bit
+                     status word value in %AX.  However, due to a
+                     limitation in Memcheck's origin tracking
+                     machinery, this causes Memcheck not to track the
+                     origin of any undefinedness into %AH (only into
+                     %AL/%AX/%EAX), which means origins are lost in
+                     the sequence "fnstsw %ax; test $M,%ah; jcond .." */
+                  putIReg(2, R_EAX, get_FPU_sw());
+               } else {
+                  /* So a somewhat lame kludge is to make it very
+                     clear to Memcheck that the value is written to
+                     both %AH and %AL.  This generates marginally
+                     worse code, but I don't think it matters much. */
+                  IRTemp t16 = newTemp(Ity_I16);
+                  assign(t16, get_FPU_sw());
+                  putIReg( 1, R_AL, unop(Iop_16to8, mkexpr(t16)) );
+                  putIReg( 1, R_AH, unop(Iop_16HIto8, mkexpr(t16)) );
+               }
+               break;
+
+            case 0xE8 ... 0xEF: /* FUCOMIP %st(0),%st(?) */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xE8, True );
+               break;
+
+            case 0xF0 ... 0xF7: /* FCOMIP %st(0),%st(?) */
+               /* not really right since COMIP != UCOMIP */
+               fp_do_ucomi_ST0_STi( (UInt)modrm - 0xF0, True );
+               break;
+
+            default: 
+               goto decode_fail;
+         }
+      }
+
+   }
+
+   else
+   vpanic("dis_FPU(x86): invalid primary opcode");
+
+   *decode_ok = True;
+   return delta;
+
+  decode_fail:
+   *decode_ok = False;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- MMX INSTRUCTIONS                                     ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/* Effect of MMX insns on x87 FPU state (table 11-2 of 
+   IA32 arch manual, volume 3):
+
+   Read from, or write to MMX register (viz, any insn except EMMS):
+   * All tags set to Valid (non-empty) -- FPTAGS[i] := nonzero
+   * FP stack pointer set to zero
+
+   EMMS:
+   * All tags set to Invalid (empty) -- FPTAGS[i] := zero
+   * FP stack pointer set to zero
+*/
+
+static void do_MMX_preamble ( void )
+{
+   Int         i;
+   IRRegArray* descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   IRExpr*     zero  = mkU32(0);
+   IRExpr*     tag1  = mkU8(1);
+   put_ftop(zero);
+   for (i = 0; i < 8; i++)
+      stmt( IRStmt_PutI( mkIRPutI(descr, zero, i, tag1) ) );
+}
+
+static void do_EMMS_preamble ( void )
+{
+   Int         i;
+   IRRegArray* descr = mkIRRegArray( OFFB_FPTAGS, Ity_I8, 8 );
+   IRExpr*     zero  = mkU32(0);
+   IRExpr*     tag0  = mkU8(0);
+   put_ftop(zero);
+   for (i = 0; i < 8; i++)
+      stmt( IRStmt_PutI( mkIRPutI(descr, zero, i, tag0) ) );
+}
+
+
+static IRExpr* getMMXReg ( UInt archreg )
+{
+   vassert(archreg < 8);
+   return IRExpr_Get( OFFB_FPREGS + 8 * archreg, Ity_I64 );
+}
+
+
+static void putMMXReg ( UInt archreg, IRExpr* e )
+{
+   vassert(archreg < 8);
+   vassert(typeOfIRExpr(irsb->tyenv,e) == Ity_I64);
+   stmt( IRStmt_Put( OFFB_FPREGS + 8 * archreg, e ) );
+}
+
+
+/* Helper for non-shift MMX insns.  Note this is incomplete in the
+   sense that it does not first call do_MMX_preamble() -- that is the
+   responsibility of its caller. */
+
+static 
+UInt dis_MMXop_regmem_to_reg ( UChar  sorb,
+                               Int    delta,
+                               UChar  opc,
+                               const HChar* name,
+                               Bool   show_granularity )
+{
+   HChar   dis_buf[50];
+   UChar   modrm = getIByte(delta);
+   Bool    isReg = epartIsReg(modrm);
+   IRExpr* argL  = NULL;
+   IRExpr* argR  = NULL;
+   IRExpr* argG  = NULL;
+   IRExpr* argE  = NULL;
+   IRTemp  res   = newTemp(Ity_I64);
+
+   Bool    invG  = False;
+   IROp    op    = Iop_INVALID;
+   void*   hAddr = NULL;
+   Bool    eLeft = False;
+   const HChar*  hName = NULL;
+
+#  define XXX(_name) do { hAddr = &_name; hName = #_name; } while (0)
+
+   switch (opc) {
+      /* Original MMX ones */
+      case 0xFC: op = Iop_Add8x8; break;
+      case 0xFD: op = Iop_Add16x4; break;
+      case 0xFE: op = Iop_Add32x2; break;
+
+      case 0xEC: op = Iop_QAdd8Sx8; break;
+      case 0xED: op = Iop_QAdd16Sx4; break;
+
+      case 0xDC: op = Iop_QAdd8Ux8; break;
+      case 0xDD: op = Iop_QAdd16Ux4; break;
+
+      case 0xF8: op = Iop_Sub8x8;  break;
+      case 0xF9: op = Iop_Sub16x4; break;
+      case 0xFA: op = Iop_Sub32x2; break;
+
+      case 0xE8: op = Iop_QSub8Sx8; break;
+      case 0xE9: op = Iop_QSub16Sx4; break;
+
+      case 0xD8: op = Iop_QSub8Ux8; break;
+      case 0xD9: op = Iop_QSub16Ux4; break;
+
+      case 0xE5: op = Iop_MulHi16Sx4; break;
+      case 0xD5: op = Iop_Mul16x4; break;
+      case 0xF5: XXX(x86g_calculate_mmx_pmaddwd); break;
+
+      case 0x74: op = Iop_CmpEQ8x8; break;
+      case 0x75: op = Iop_CmpEQ16x4; break;
+      case 0x76: op = Iop_CmpEQ32x2; break;
+
+      case 0x64: op = Iop_CmpGT8Sx8; break;
+      case 0x65: op = Iop_CmpGT16Sx4; break;
+      case 0x66: op = Iop_CmpGT32Sx2; break;
+
+      case 0x6B: op = Iop_QNarrowBin32Sto16Sx4; eLeft = True; break;
+      case 0x63: op = Iop_QNarrowBin16Sto8Sx8;  eLeft = True; break;
+      case 0x67: op = Iop_QNarrowBin16Sto8Ux8;  eLeft = True; break;
+
+      case 0x68: op = Iop_InterleaveHI8x8;  eLeft = True; break;
+      case 0x69: op = Iop_InterleaveHI16x4; eLeft = True; break;
+      case 0x6A: op = Iop_InterleaveHI32x2; eLeft = True; break;
+
+      case 0x60: op = Iop_InterleaveLO8x8;  eLeft = True; break;
+      case 0x61: op = Iop_InterleaveLO16x4; eLeft = True; break;
+      case 0x62: op = Iop_InterleaveLO32x2; eLeft = True; break;
+
+      case 0xDB: op = Iop_And64; break;
+      case 0xDF: op = Iop_And64; invG = True; break;
+      case 0xEB: op = Iop_Or64; break;
+      case 0xEF: /* Possibly do better here if argL and argR are the
+                    same reg */
+                 op = Iop_Xor64; break;
+
+      /* Introduced in SSE1 */
+      case 0xE0: op = Iop_Avg8Ux8;    break;
+      case 0xE3: op = Iop_Avg16Ux4;   break;
+      case 0xEE: op = Iop_Max16Sx4;   break;
+      case 0xDE: op = Iop_Max8Ux8;    break;
+      case 0xEA: op = Iop_Min16Sx4;   break;
+      case 0xDA: op = Iop_Min8Ux8;    break;
+      case 0xE4: op = Iop_MulHi16Ux4; break;
+      case 0xF6: XXX(x86g_calculate_mmx_psadbw); break;
+
+      /* Introduced in SSE2 */
+      case 0xD4: op = Iop_Add64; break;
+      case 0xFB: op = Iop_Sub64; break;
+
+      default: 
+         vex_printf("\n0x%x\n", (Int)opc);
+         vpanic("dis_MMXop_regmem_to_reg");
+   }
+
+#  undef XXX
+
+   argG = getMMXReg(gregOfRM(modrm));
+   if (invG)
+      argG = unop(Iop_Not64, argG);
+
+   if (isReg) {
+      delta++;
+      argE = getMMXReg(eregOfRM(modrm));
+   } else {
+      Int    len;
+      IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+      delta += len;
+      argE = loadLE(Ity_I64, mkexpr(addr));
+   }
+
+   if (eLeft) {
+      argL = argE;
+      argR = argG;
+   } else {
+      argL = argG;
+      argR = argE;
+   }
+
+   if (op != Iop_INVALID) {
+      vassert(hName == NULL);
+      vassert(hAddr == NULL);
+      assign(res, binop(op, argL, argR));
+   } else {
+      vassert(hName != NULL);
+      vassert(hAddr != NULL);
+      assign( res, 
+              mkIRExprCCall(
+                 Ity_I64, 
+                 0/*regparms*/, hName, hAddr,
+                 mkIRExprVec_2( argL, argR )
+              ) 
+            );
+   }
+
+   putMMXReg( gregOfRM(modrm), mkexpr(res) );
+
+   DIP("%s%s %s, %s\n", 
+       name, show_granularity ? nameMMXGran(opc & 3) : "",
+       ( isReg ? nameMMXReg(eregOfRM(modrm)) : dis_buf ),
+       nameMMXReg(gregOfRM(modrm)) );
+
+   return delta;
+}
+
+
+/* Vector by scalar shift of G by the amount specified at the bottom
+   of E.  This is a straight copy of dis_SSE_shiftG_byE. */
+
+static UInt dis_MMX_shiftG_byE ( UChar sorb, Int delta, 
+                                 const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen, size;
+   IRTemp  addr;
+   Bool    shl, shr, sar;
+   UChar   rm   = getIByte(delta);
+   IRTemp  g0   = newTemp(Ity_I64);
+   IRTemp  g1   = newTemp(Ity_I64);
+   IRTemp  amt  = newTemp(Ity_I32);
+   IRTemp  amt8 = newTemp(Ity_I8);
+
+   if (epartIsReg(rm)) {
+      assign( amt, unop(Iop_64to32, getMMXReg(eregOfRM(rm))) );
+      DIP("%s %s,%s\n", opname,
+                        nameMMXReg(eregOfRM(rm)),
+                        nameMMXReg(gregOfRM(rm)) );
+      delta++;
+   } else {
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      assign( amt, loadLE(Ity_I32, mkexpr(addr)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameMMXReg(gregOfRM(rm)) );
+      delta += alen;
+   }
+   assign( g0,   getMMXReg(gregOfRM(rm)) );
+   assign( amt8, unop(Iop_32to8, mkexpr(amt)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x4: shl = True; size = 32; break;
+      case Iop_ShlN32x2: shl = True; size = 32; break;
+      case Iop_Shl64:    shl = True; size = 64; break;
+      case Iop_ShrN16x4: shr = True; size = 16; break;
+      case Iop_ShrN32x2: shr = True; size = 32; break;
+      case Iop_Shr64:    shr = True; size = 64; break;
+      case Iop_SarN16x4: sar = True; size = 16; break;
+      case Iop_SarN32x2: sar = True; size = 32; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           mkU64(0)
+        )
+     );
+   } else 
+   if (sar) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           binop(op, mkexpr(g0), mkU8(size-1))
+        )
+     );
+   } else {
+      /*NOTREACHED*/
+      vassert(0);
+   }
+
+   putMMXReg( gregOfRM(rm), mkexpr(g1) );
+   return delta;
+}
+
+
+/* Vector by scalar shift of E by an immediate byte.  This is a
+   straight copy of dis_SSE_shiftE_imm. */
+
+static 
+UInt dis_MMX_shiftE_imm ( Int delta, const HChar* opname, IROp op )
+{
+   Bool    shl, shr, sar;
+   UChar   rm   = getIByte(delta);
+   IRTemp  e0   = newTemp(Ity_I64);
+   IRTemp  e1   = newTemp(Ity_I64);
+   UChar   amt, size;
+   vassert(epartIsReg(rm));
+   vassert(gregOfRM(rm) == 2 
+           || gregOfRM(rm) == 4 || gregOfRM(rm) == 6);
+   amt = getIByte(delta+1);
+   delta += 2;
+   DIP("%s $%d,%s\n", opname,
+                      (Int)amt,
+                      nameMMXReg(eregOfRM(rm)) );
+
+   assign( e0, getMMXReg(eregOfRM(rm)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x4: shl = True; size = 16; break;
+      case Iop_ShlN32x2: shl = True; size = 32; break;
+      case Iop_Shl64:    shl = True; size = 64; break;
+      case Iop_SarN16x4: sar = True; size = 16; break;
+      case Iop_SarN32x2: sar = True; size = 32; break;
+      case Iop_ShrN16x4: shr = True; size = 16; break;
+      case Iop_ShrN32x2: shr = True; size = 32; break;
+      case Iop_Shr64:    shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+      assign( e1, amt >= size 
+                     ? mkU64(0)
+                     : binop(op, mkexpr(e0), mkU8(amt))
+      );
+   } else 
+   if (sar) {
+      assign( e1, amt >= size 
+                     ? binop(op, mkexpr(e0), mkU8(size-1))
+                     : binop(op, mkexpr(e0), mkU8(amt))
+      );
+   } else {
+      /*NOTREACHED*/
+      vassert(0);
+   }
+
+   putMMXReg( eregOfRM(rm), mkexpr(e1) );
+   return delta;
+}
+
+
+/* Completely handle all MMX instructions except emms. */
+
+static
+UInt dis_MMX ( Bool* decode_ok, UChar sorb, Int sz, Int delta )
+{
+   Int   len;
+   UChar modrm;
+   HChar dis_buf[50];
+   UChar opc = getIByte(delta);
+   delta++;
+
+   /* dis_MMX handles all insns except emms. */
+   do_MMX_preamble();
+
+   switch (opc) {
+
+      case 0x6E: 
+         /* MOVD (src)ireg-or-mem (E), (dst)mmxreg (G)*/
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         modrm = getIByte(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putMMXReg(
+               gregOfRM(modrm),
+               binop( Iop_32HLto64,
+                      mkU32(0),
+                      getIReg(4, eregOfRM(modrm)) ) );
+            DIP("movd %s, %s\n", 
+                nameIReg(4,eregOfRM(modrm)), nameMMXReg(gregOfRM(modrm)));
+         } else {
+            IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+            delta += len;
+            putMMXReg(
+               gregOfRM(modrm),
+               binop( Iop_32HLto64,
+                      mkU32(0),
+                      loadLE(Ity_I32, mkexpr(addr)) ) );
+            DIP("movd %s, %s\n", dis_buf, nameMMXReg(gregOfRM(modrm)));
+         }
+         break;
+
+      case 0x7E: /* MOVD (src)mmxreg (G), (dst)ireg-or-mem (E) */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         modrm = getIByte(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putIReg( 4, eregOfRM(modrm),
+                     unop(Iop_64to32, getMMXReg(gregOfRM(modrm)) ) );
+            DIP("movd %s, %s\n", 
+                nameMMXReg(gregOfRM(modrm)), nameIReg(4,eregOfRM(modrm)));
+         } else {
+            IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+            delta += len;
+            storeLE( mkexpr(addr),
+                     unop(Iop_64to32, getMMXReg(gregOfRM(modrm)) ) );
+            DIP("movd %s, %s\n", nameMMXReg(gregOfRM(modrm)), dis_buf);
+         }
+         break;
+
+      case 0x6F:
+         /* MOVQ (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         modrm = getIByte(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putMMXReg( gregOfRM(modrm), getMMXReg(eregOfRM(modrm)) );
+            DIP("movq %s, %s\n", 
+                nameMMXReg(eregOfRM(modrm)), nameMMXReg(gregOfRM(modrm)));
+         } else {
+            IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+            delta += len;
+            putMMXReg( gregOfRM(modrm), loadLE(Ity_I64, mkexpr(addr)) );
+            DIP("movq %s, %s\n", 
+                dis_buf, nameMMXReg(gregOfRM(modrm)));
+         }
+         break;
+
+      case 0x7F:
+         /* MOVQ (src)mmxreg, (dst)mmxreg-or-mem */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         modrm = getIByte(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putMMXReg( eregOfRM(modrm), getMMXReg(gregOfRM(modrm)) );
+            DIP("movq %s, %s\n", 
+                nameMMXReg(gregOfRM(modrm)), nameMMXReg(eregOfRM(modrm)));
+         } else {
+            IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+            delta += len;
+            storeLE( mkexpr(addr), getMMXReg(gregOfRM(modrm)) );
+            DIP("mov(nt)q %s, %s\n", 
+                nameMMXReg(gregOfRM(modrm)), dis_buf);
+         }
+         break;
+
+      case 0xFC: 
+      case 0xFD: 
+      case 0xFE: /* PADDgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "padd", True );
+         break;
+
+      case 0xEC: 
+      case 0xED: /* PADDSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "padds", True );
+         break;
+
+      case 0xDC: 
+      case 0xDD: /* PADDUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "paddus", True );
+         break;
+
+      case 0xF8: 
+      case 0xF9: 
+      case 0xFA: /* PSUBgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "psub", True );
+         break;
+
+      case 0xE8: 
+      case 0xE9: /* PSUBSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "psubs", True );
+         break;
+
+      case 0xD8: 
+      case 0xD9: /* PSUBUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "psubus", True );
+         break;
+
+      case 0xE5: /* PMULHW (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pmulhw", False );
+         break;
+
+      case 0xD5: /* PMULLW (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pmullw", False );
+         break;
+
+      case 0xF5: /* PMADDWD (src)mmxreg-or-mem, (dst)mmxreg */
+         vassert(sz == 4);
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pmaddwd", False );
+         break;
+
+      case 0x74: 
+      case 0x75: 
+      case 0x76: /* PCMPEQgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pcmpeq", True );
+         break;
+
+      case 0x64: 
+      case 0x65: 
+      case 0x66: /* PCMPGTgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pcmpgt", True );
+         break;
+
+      case 0x6B: /* PACKSSDW (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "packssdw", False );
+         break;
+
+      case 0x63: /* PACKSSWB (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "packsswb", False );
+         break;
+
+      case 0x67: /* PACKUSWB (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "packuswb", False );
+         break;
+
+      case 0x68: 
+      case 0x69: 
+      case 0x6A: /* PUNPCKHgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "punpckh", True );
+         break;
+
+      case 0x60: 
+      case 0x61: 
+      case 0x62: /* PUNPCKLgg (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "punpckl", True );
+         break;
+
+      case 0xDB: /* PAND (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pand", False );
+         break;
+
+      case 0xDF: /* PANDN (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pandn", False );
+         break;
+
+      case 0xEB: /* POR (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "por", False );
+         break;
+
+      case 0xEF: /* PXOR (src)mmxreg-or-mem, (dst)mmxreg */
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         delta = dis_MMXop_regmem_to_reg ( sorb, delta, opc, "pxor", False );
+         break; 
+
+#     define SHIFT_BY_REG(_name,_op)                                 \
+                delta = dis_MMX_shiftG_byE(sorb, delta, _name, _op); \
+                break;
+
+      /* PSLLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xF1: SHIFT_BY_REG("psllw", Iop_ShlN16x4);
+      case 0xF2: SHIFT_BY_REG("pslld", Iop_ShlN32x2);
+      case 0xF3: SHIFT_BY_REG("psllq", Iop_Shl64);
+
+      /* PSRLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xD1: SHIFT_BY_REG("psrlw", Iop_ShrN16x4);
+      case 0xD2: SHIFT_BY_REG("psrld", Iop_ShrN32x2);
+      case 0xD3: SHIFT_BY_REG("psrlq", Iop_Shr64);
+
+      /* PSRAgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xE1: SHIFT_BY_REG("psraw", Iop_SarN16x4);
+      case 0xE2: SHIFT_BY_REG("psrad", Iop_SarN32x2);
+
+#     undef SHIFT_BY_REG
+
+      case 0x71: 
+      case 0x72: 
+      case 0x73: {
+         /* (sz==4): PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
+         UChar byte2, subopc;
+         if (sz != 4) 
+            goto mmx_decode_failure;
+         byte2  = getIByte(delta);           /* amode / sub-opcode */
+         subopc = toUChar( (byte2 >> 3) & 7 );
+
+#        define SHIFT_BY_IMM(_name,_op)                         \
+             do { delta = dis_MMX_shiftE_imm(delta,_name,_op);  \
+             } while (0)
+
+              if (subopc == 2 /*SRL*/ && opc == 0x71) 
+                 SHIFT_BY_IMM("psrlw", Iop_ShrN16x4);
+         else if (subopc == 2 /*SRL*/ && opc == 0x72) 
+                 SHIFT_BY_IMM("psrld", Iop_ShrN32x2);
+         else if (subopc == 2 /*SRL*/ && opc == 0x73) 
+                 SHIFT_BY_IMM("psrlq", Iop_Shr64);
+
+         else if (subopc == 4 /*SAR*/ && opc == 0x71) 
+                 SHIFT_BY_IMM("psraw", Iop_SarN16x4);
+         else if (subopc == 4 /*SAR*/ && opc == 0x72) 
+                 SHIFT_BY_IMM("psrad", Iop_SarN32x2);
+
+         else if (subopc == 6 /*SHL*/ && opc == 0x71) 
+                 SHIFT_BY_IMM("psllw", Iop_ShlN16x4);
+         else if (subopc == 6 /*SHL*/ && opc == 0x72) 
+                 SHIFT_BY_IMM("pslld", Iop_ShlN32x2);
+         else if (subopc == 6 /*SHL*/ && opc == 0x73) 
+                 SHIFT_BY_IMM("psllq", Iop_Shl64);
+
+         else goto mmx_decode_failure;
+
+#        undef SHIFT_BY_IMM
+         break;
+      }
+
+      case 0xF7: {
+         IRTemp addr    = newTemp(Ity_I32);
+         IRTemp regD    = newTemp(Ity_I64);
+         IRTemp regM    = newTemp(Ity_I64);
+         IRTemp mask    = newTemp(Ity_I64);
+         IRTemp olddata = newTemp(Ity_I64);
+         IRTemp newdata = newTemp(Ity_I64);
+
+         modrm = getIByte(delta);
+         if (sz != 4 || (!epartIsReg(modrm)))
+            goto mmx_decode_failure;
+         delta++;
+
+         assign( addr, handleSegOverride( sorb, getIReg(4, R_EDI) ));
+         assign( regM, getMMXReg( eregOfRM(modrm) ));
+         assign( regD, getMMXReg( gregOfRM(modrm) ));
+         assign( mask, binop(Iop_SarN8x8, mkexpr(regM), mkU8(7)) );
+         assign( olddata, loadLE( Ity_I64, mkexpr(addr) ));
+         assign( newdata, 
+                 binop(Iop_Or64, 
+                       binop(Iop_And64, 
+                             mkexpr(regD), 
+                             mkexpr(mask) ),
+                       binop(Iop_And64, 
+                             mkexpr(olddata),
+                             unop(Iop_Not64, mkexpr(mask)))) );
+         storeLE( mkexpr(addr), mkexpr(newdata) );
+         DIP("maskmovq %s,%s\n", nameMMXReg( eregOfRM(modrm) ),
+                                 nameMMXReg( gregOfRM(modrm) ) );
+         break;
+      }
+
+      /* --- MMX decode failure --- */
+      default:
+      mmx_decode_failure:
+         *decode_ok = False;
+         return delta; /* ignored */
+
+   }
+
+   *decode_ok = True;
+   return delta;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- More misc arithmetic and other obscure insns.        ---*/
+/*------------------------------------------------------------*/
+
+/* Double length left and right shifts.  Apparently only required in
+   v-size (no b- variant). */
+static
+UInt dis_SHLRD_Gv_Ev ( UChar sorb,
+                       Int delta, UChar modrm,
+                       Int sz,
+                       IRExpr* shift_amt,
+                       Bool amt_is_literal,
+                       const HChar* shift_amt_txt,
+                       Bool left_shift )
+{
+   /* shift_amt :: Ity_I8 is the amount to shift.  shift_amt_txt is used
+      for printing it.   And eip on entry points at the modrm byte. */
+   Int len;
+   HChar dis_buf[50];
+
+   IRType ty       = szToITy(sz);
+   IRTemp gsrc     = newTemp(ty);
+   IRTemp esrc     = newTemp(ty);
+   IRTemp addr     = IRTemp_INVALID;
+   IRTemp tmpSH    = newTemp(Ity_I8);
+   IRTemp tmpL     = IRTemp_INVALID;
+   IRTemp tmpRes   = IRTemp_INVALID;
+   IRTemp tmpSubSh = IRTemp_INVALID;
+   IROp   mkpair;
+   IROp   getres;
+   IROp   shift;
+   IRExpr* mask = NULL;
+
+   vassert(sz == 2 || sz == 4);
+
+   /* The E-part is the destination; this is shifted.  The G-part
+      supplies bits to be shifted into the E-part, but is not
+      changed.  
+
+      If shifting left, form a double-length word with E at the top
+      and G at the bottom, and shift this left.  The result is then in
+      the high part.
+
+      If shifting right, form a double-length word with G at the top
+      and E at the bottom, and shift this right.  The result is then
+      at the bottom.  */
+
+   /* Fetch the operands. */
+
+   assign( gsrc, getIReg(sz, gregOfRM(modrm)) );
+
+   if (epartIsReg(modrm)) {
+      delta++;
+      assign( esrc, getIReg(sz, eregOfRM(modrm)) );
+      DIP("sh%cd%c %s, %s, %s\n",
+          ( left_shift ? 'l' : 'r' ), nameISize(sz), 
+          shift_amt_txt,
+          nameIReg(sz, gregOfRM(modrm)), nameIReg(sz, eregOfRM(modrm)));
+   } else {
+      addr = disAMode ( &len, sorb, delta, dis_buf );
+      delta += len;
+      assign( esrc, loadLE(ty, mkexpr(addr)) );
+      DIP("sh%cd%c %s, %s, %s\n", 
+          ( left_shift ? 'l' : 'r' ), nameISize(sz), 
+          shift_amt_txt,
+          nameIReg(sz, gregOfRM(modrm)), dis_buf);
+   }
+
+   /* Round up the relevant primops. */
+
+   if (sz == 4) {
+      tmpL     = newTemp(Ity_I64);
+      tmpRes   = newTemp(Ity_I32);
+      tmpSubSh = newTemp(Ity_I32);
+      mkpair   = Iop_32HLto64;
+      getres   = left_shift ? Iop_64HIto32 : Iop_64to32;
+      shift    = left_shift ? Iop_Shl64 : Iop_Shr64;
+      mask     = mkU8(31);
+   } else {
+      /* sz == 2 */
+      tmpL     = newTemp(Ity_I32);
+      tmpRes   = newTemp(Ity_I16);
+      tmpSubSh = newTemp(Ity_I16);
+      mkpair   = Iop_16HLto32;
+      getres   = left_shift ? Iop_32HIto16 : Iop_32to16;
+      shift    = left_shift ? Iop_Shl32 : Iop_Shr32;
+      mask     = mkU8(15);
+   }
+
+   /* Do the shift, calculate the subshift value, and set 
+      the flag thunk. */
+
+   assign( tmpSH, binop(Iop_And8, shift_amt, mask) );
+
+   if (left_shift)
+      assign( tmpL, binop(mkpair, mkexpr(esrc), mkexpr(gsrc)) );
+   else
+      assign( tmpL, binop(mkpair, mkexpr(gsrc), mkexpr(esrc)) );
+
+   assign( tmpRes, unop(getres, binop(shift, mkexpr(tmpL), mkexpr(tmpSH)) ) );
+   assign( tmpSubSh, 
+           unop(getres, 
+                binop(shift, 
+                      mkexpr(tmpL), 
+                      binop(Iop_And8, 
+                            binop(Iop_Sub8, mkexpr(tmpSH), mkU8(1) ),
+                            mask))) );
+
+   setFlags_DEP1_DEP2_shift ( left_shift ? Iop_Shl32 : Iop_Sar32,
+                              tmpRes, tmpSubSh, ty, tmpSH );
+
+   /* Put result back. */
+
+   if (epartIsReg(modrm)) {
+      putIReg(sz, eregOfRM(modrm), mkexpr(tmpRes));
+   } else {
+      storeLE( mkexpr(addr), mkexpr(tmpRes) );
+   }
+
+   if (amt_is_literal) delta++;
+   return delta;
+}
+
+
+/* Handle BT/BTS/BTR/BTC Gv, Ev.  Apparently b-size is not
+   required. */
+
+typedef enum { BtOpNone, BtOpSet, BtOpReset, BtOpComp } BtOp;
+
+static const HChar* nameBtOp ( BtOp op )
+{
+   switch (op) {
+      case BtOpNone:  return "";
+      case BtOpSet:   return "s";
+      case BtOpReset: return "r";
+      case BtOpComp:  return "c";
+      default: vpanic("nameBtOp(x86)");
+   }
+}
+
+
+static
+UInt dis_bt_G_E ( const VexAbiInfo* vbi,
+                  UChar sorb, Bool locked, Int sz, Int delta, BtOp op )
+{
+   HChar  dis_buf[50];
+   UChar  modrm;
+   Int    len;
+   IRTemp t_fetched, t_bitno0, t_bitno1, t_bitno2, t_addr0, 
+          t_addr1, t_esp, t_mask, t_new;
+
+   vassert(sz == 2 || sz == 4);
+
+   t_fetched = t_bitno0 = t_bitno1 = t_bitno2 
+             = t_addr0 = t_addr1 = t_esp 
+             = t_mask = t_new = IRTemp_INVALID;
+
+   t_fetched = newTemp(Ity_I8);
+   t_new     = newTemp(Ity_I8);
+   t_bitno0  = newTemp(Ity_I32);
+   t_bitno1  = newTemp(Ity_I32);
+   t_bitno2  = newTemp(Ity_I8);
+   t_addr1   = newTemp(Ity_I32);
+   modrm     = getIByte(delta);
+
+   assign( t_bitno0, widenSto32(getIReg(sz, gregOfRM(modrm))) );
+   
+   if (epartIsReg(modrm)) {
+      delta++;
+      /* Get it onto the client's stack. */
+      t_esp = newTemp(Ity_I32);
+      t_addr0 = newTemp(Ity_I32);
+
+      /* For the choice of the value 128, see comment in dis_bt_G_E in
+         guest_amd64_toIR.c.  We point out here only that 128 is
+         fast-cased in Memcheck and is > 0, so seems like a good
+         choice. */
+      vassert(vbi->guest_stack_redzone_size == 0);
+      assign( t_esp, binop(Iop_Sub32, getIReg(4, R_ESP), mkU32(128)) );
+      putIReg(4, R_ESP, mkexpr(t_esp));
+
+      storeLE( mkexpr(t_esp), getIReg(sz, eregOfRM(modrm)) );
+
+      /* Make t_addr0 point at it. */
+      assign( t_addr0, mkexpr(t_esp) );
+
+      /* Mask out upper bits of the shift amount, since we're doing a
+         reg. */
+      assign( t_bitno1, binop(Iop_And32, 
+                              mkexpr(t_bitno0), 
+                              mkU32(sz == 4 ? 31 : 15)) );
+
+   } else {
+      t_addr0 = disAMode ( &len, sorb, delta, dis_buf );
+      delta += len;
+      assign( t_bitno1, mkexpr(t_bitno0) );
+   }
+  
+   /* At this point: t_addr0 is the address being operated on.  If it
+      was a reg, we will have pushed it onto the client's stack.
+      t_bitno1 is the bit number, suitably masked in the case of a
+      reg.  */
+  
+   /* Now the main sequence. */
+   assign( t_addr1, 
+           binop(Iop_Add32, 
+                 mkexpr(t_addr0), 
+                 binop(Iop_Sar32, mkexpr(t_bitno1), mkU8(3))) );
+
+   /* t_addr1 now holds effective address */
+
+   assign( t_bitno2, 
+           unop(Iop_32to8, 
+                binop(Iop_And32, mkexpr(t_bitno1), mkU32(7))) );
+
+   /* t_bitno2 contains offset of bit within byte */
+
+   if (op != BtOpNone) {
+      t_mask = newTemp(Ity_I8);
+      assign( t_mask, binop(Iop_Shl8, mkU8(1), mkexpr(t_bitno2)) );
+   }
+
+   /* t_mask is now a suitable byte mask */
+
+   assign( t_fetched, loadLE(Ity_I8, mkexpr(t_addr1)) );
+
+   if (op != BtOpNone) {
+      switch (op) {
+         case BtOpSet:
+            assign( t_new,
+                    binop(Iop_Or8, mkexpr(t_fetched), mkexpr(t_mask)) );
+            break;
+         case BtOpComp:
+            assign( t_new,
+                    binop(Iop_Xor8, mkexpr(t_fetched), mkexpr(t_mask)) );
+            break;
+         case BtOpReset:
+            assign( t_new,
+                    binop(Iop_And8, mkexpr(t_fetched), 
+                                    unop(Iop_Not8, mkexpr(t_mask))) );
+            break;
+         default: 
+            vpanic("dis_bt_G_E(x86)");
+      }
+      if (locked && !epartIsReg(modrm)) {
+         casLE( mkexpr(t_addr1), mkexpr(t_fetched)/*expd*/,
+                                 mkexpr(t_new)/*new*/,
+                                 guest_EIP_curr_instr );
+      } else {
+         storeLE( mkexpr(t_addr1), mkexpr(t_new) );
+      }
+   }
+ 
+   /* Side effect done; now get selected bit into Carry flag */
+   /* Flags: C=selected bit, O,S,Z,A,P undefined, so are set to zero. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            binop(Iop_And32,
+                  binop(Iop_Shr32, 
+                        unop(Iop_8Uto32, mkexpr(t_fetched)),
+                        mkexpr(t_bitno2)),
+                  mkU32(1)))
+       );
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+
+   /* Move reg operand from stack back to reg */
+   if (epartIsReg(modrm)) {
+      /* t_esp still points at it. */
+      putIReg(sz, eregOfRM(modrm), loadLE(szToITy(sz), mkexpr(t_esp)) );
+      putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t_esp), mkU32(128)) );
+   }
+
+   DIP("bt%s%c %s, %s\n",
+       nameBtOp(op), nameISize(sz), nameIReg(sz, gregOfRM(modrm)), 
+       ( epartIsReg(modrm) ? nameIReg(sz, eregOfRM(modrm)) : dis_buf ) );
+ 
+   return delta;
+}
+
+
+
+/* Handle BSF/BSR.  Only v-size seems necessary. */
+static
+UInt dis_bs_E_G ( UChar sorb, Int sz, Int delta, Bool fwds )
+{
+   Bool   isReg;
+   UChar  modrm;
+   HChar  dis_buf[50];
+   
+   IRType ty  = szToITy(sz);
+   IRTemp src = newTemp(ty);
+   IRTemp dst = newTemp(ty);
+
+   IRTemp src32 = newTemp(Ity_I32);
+   IRTemp dst32 = newTemp(Ity_I32);
+   IRTemp srcB  = newTemp(Ity_I1);
+
+   vassert(sz == 4 || sz == 2);
+
+   modrm = getIByte(delta);
+
+   isReg = epartIsReg(modrm);
+   if (isReg) {
+      delta++;
+      assign( src, getIReg(sz, eregOfRM(modrm)) );
+   } else {
+      Int    len;
+      IRTemp addr = disAMode( &len, sorb, delta, dis_buf );
+      delta += len;
+      assign( src, loadLE(ty, mkexpr(addr)) );
+   }
+
+   DIP("bs%c%c %s, %s\n",
+       fwds ? 'f' : 'r', nameISize(sz), 
+       ( isReg ? nameIReg(sz, eregOfRM(modrm)) : dis_buf ), 
+       nameIReg(sz, gregOfRM(modrm)));
+
+   /* Generate a bool expression which is zero iff the original is
+      zero, and nonzero otherwise.  Ask for a CmpNE version which, if
+      instrumented by Memcheck, is instrumented expensively, since
+      this may be used on the output of a preceding movmskb insn,
+      which has been known to be partially defined, and in need of
+      careful handling. */
+   assign( srcB, binop(mkSizedOp(ty,Iop_ExpCmpNE8),
+                       mkexpr(src), mkU(ty,0)) );
+
+   /* Flags: Z is 1 iff source value is zero.  All others 
+      are undefined -- we force them to zero. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+   stmt( IRStmt_Put( 
+            OFFB_CC_DEP1,
+            IRExpr_ITE( mkexpr(srcB),
+                        /* src!=0 */
+                        mkU32(0),
+                        /* src==0 */
+                        mkU32(X86G_CC_MASK_Z)
+                        )
+       ));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+
+   /* Result: iff source value is zero, we can't use
+      Iop_Clz32/Iop_Ctz32 as they have no defined result in that case.
+      But anyway, Intel x86 semantics say the result is undefined in
+      such situations.  Hence handle the zero case specially. */
+
+   /* Bleh.  What we compute:
+
+          bsf32:  if src == 0 then 0 else  Ctz32(src)
+          bsr32:  if src == 0 then 0 else  31 - Clz32(src)
+
+          bsf16:  if src == 0 then 0 else  Ctz32(16Uto32(src))
+          bsr16:  if src == 0 then 0 else  31 - Clz32(16Uto32(src))
+
+      First, widen src to 32 bits if it is not already.
+
+      Postscript 15 Oct 04: it seems that at least VIA Nehemiah leaves the
+      dst register unchanged when src == 0.  Hence change accordingly.
+   */
+   if (sz == 2)
+      assign( src32, unop(Iop_16Uto32, mkexpr(src)) );
+   else
+      assign( src32, mkexpr(src) );
+
+   /* The main computation, guarding against zero. */
+   assign( dst32,   
+           IRExpr_ITE( 
+              mkexpr(srcB),
+              /* src != 0 */
+              fwds ? unop(Iop_Ctz32, mkexpr(src32))
+                   : binop(Iop_Sub32, 
+                           mkU32(31), 
+                           unop(Iop_Clz32, mkexpr(src32))),
+              /* src == 0 -- leave dst unchanged */
+              widenUto32( getIReg( sz, gregOfRM(modrm) ) )
+           )
+         );
+
+   if (sz == 2)
+      assign( dst, unop(Iop_32to16, mkexpr(dst32)) );
+   else
+      assign( dst, mkexpr(dst32) );
+
+   /* dump result back */
+   putIReg( sz, gregOfRM(modrm), mkexpr(dst) );
+
+   return delta;
+}
+
+
+static 
+void codegen_xchg_eAX_Reg ( Int sz, Int reg )
+{
+   IRType ty = szToITy(sz);
+   IRTemp t1 = newTemp(ty);
+   IRTemp t2 = newTemp(ty);
+   vassert(sz == 2 || sz == 4);
+   assign( t1, getIReg(sz, R_EAX) );
+   assign( t2, getIReg(sz, reg) );
+   putIReg( sz, R_EAX, mkexpr(t2) );
+   putIReg( sz, reg, mkexpr(t1) );
+   DIP("xchg%c %s, %s\n", 
+       nameISize(sz), nameIReg(sz, R_EAX), nameIReg(sz, reg));
+}
+
+
+static 
+void codegen_SAHF ( void )
+{
+   /* Set the flags to:
+      (x86g_calculate_flags_all() & X86G_CC_MASK_O)  -- retain the old O flag
+      | (%AH & (X86G_CC_MASK_S|X86G_CC_MASK_Z|X86G_CC_MASK_A
+                |X86G_CC_MASK_P|X86G_CC_MASK_C)
+   */
+   UInt   mask_SZACP = X86G_CC_MASK_S|X86G_CC_MASK_Z|X86G_CC_MASK_A
+                       |X86G_CC_MASK_C|X86G_CC_MASK_P;
+   IRTemp oldflags   = newTemp(Ity_I32);
+   assign( oldflags, mk_x86g_calculate_eflags_all() );
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1,
+         binop(Iop_Or32,
+               binop(Iop_And32, mkexpr(oldflags), mkU32(X86G_CC_MASK_O)),
+               binop(Iop_And32, 
+                     binop(Iop_Shr32, getIReg(4, R_EAX), mkU8(8)),
+                     mkU32(mask_SZACP))
+              )
+   ));
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+}
+
+
+static 
+void codegen_LAHF ( void  )
+{
+   /* AH <- EFLAGS(SF:ZF:0:AF:0:PF:1:CF) */
+   IRExpr* eax_with_hole;
+   IRExpr* new_byte;
+   IRExpr* new_eax;
+   UInt    mask_SZACP = X86G_CC_MASK_S|X86G_CC_MASK_Z|X86G_CC_MASK_A
+                        |X86G_CC_MASK_C|X86G_CC_MASK_P;
+
+   IRTemp  flags = newTemp(Ity_I32);
+   assign( flags, mk_x86g_calculate_eflags_all() );
+
+   eax_with_hole 
+      = binop(Iop_And32, getIReg(4, R_EAX), mkU32(0xFFFF00FF));
+   new_byte 
+      = binop(Iop_Or32, binop(Iop_And32, mkexpr(flags), mkU32(mask_SZACP)),
+                        mkU32(1<<1));
+   new_eax 
+      = binop(Iop_Or32, eax_with_hole,
+                        binop(Iop_Shl32, new_byte, mkU8(8)));
+   putIReg(4, R_EAX, new_eax);
+}
+
+
+static
+UInt dis_cmpxchg_G_E ( UChar       sorb,
+                       Bool        locked,
+                       Int         size, 
+                       Int         delta0 )
+{
+   HChar dis_buf[50];
+   Int   len;
+
+   IRType ty    = szToITy(size);
+   IRTemp acc   = newTemp(ty);
+   IRTemp src   = newTemp(ty);
+   IRTemp dest  = newTemp(ty);
+   IRTemp dest2 = newTemp(ty);
+   IRTemp acc2  = newTemp(ty);
+   IRTemp cond  = newTemp(Ity_I1);
+   IRTemp addr  = IRTemp_INVALID;
+   UChar  rm    = getUChar(delta0);
+
+   /* There are 3 cases to consider:
+
+      reg-reg: ignore any lock prefix, generate sequence based
+               on ITE
+
+      reg-mem, not locked: ignore any lock prefix, generate sequence
+                           based on ITE
+
+      reg-mem, locked: use IRCAS
+   */
+   if (epartIsReg(rm)) {
+      /* case 1 */
+      assign( dest, getIReg(size, eregOfRM(rm)) );
+      delta0++;
+      assign( src, getIReg(size, gregOfRM(rm)) );
+      assign( acc, getIReg(size, R_EAX) );
+      setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
+      assign( cond, mk_x86g_calculate_condition(X86CondZ) );
+      assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+      assign( acc2,  IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
+      putIReg(size, R_EAX, mkexpr(acc2));
+      putIReg(size, eregOfRM(rm), mkexpr(dest2));
+      DIP("cmpxchg%c %s,%s\n", nameISize(size),
+                               nameIReg(size,gregOfRM(rm)),
+                               nameIReg(size,eregOfRM(rm)) );
+   } 
+   else if (!epartIsReg(rm) && !locked) {
+      /* case 2 */
+      addr = disAMode ( &len, sorb, delta0, dis_buf );
+      assign( dest, loadLE(ty, mkexpr(addr)) );
+      delta0 += len;
+      assign( src, getIReg(size, gregOfRM(rm)) );
+      assign( acc, getIReg(size, R_EAX) );
+      setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
+      assign( cond, mk_x86g_calculate_condition(X86CondZ) );
+      assign( dest2, IRExpr_ITE(mkexpr(cond), mkexpr(src), mkexpr(dest)) );
+      assign( acc2,  IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
+      putIReg(size, R_EAX, mkexpr(acc2));
+      storeLE( mkexpr(addr), mkexpr(dest2) );
+      DIP("cmpxchg%c %s,%s\n", nameISize(size), 
+                               nameIReg(size,gregOfRM(rm)), dis_buf);
+   }
+   else if (!epartIsReg(rm) && locked) {
+      /* case 3 */
+      /* src is new value.  acc is expected value.  dest is old value.
+         Compute success from the output of the IRCAS, and steer the
+         new value for EAX accordingly: in case of success, EAX is
+         unchanged. */
+      addr = disAMode ( &len, sorb, delta0, dis_buf );
+      delta0 += len;
+      assign( src, getIReg(size, gregOfRM(rm)) );
+      assign( acc, getIReg(size, R_EAX) );
+      stmt( IRStmt_CAS( 
+         mkIRCAS( IRTemp_INVALID, dest, Iend_LE, mkexpr(addr), 
+                  NULL, mkexpr(acc), NULL, mkexpr(src) )
+      ));
+      setFlags_DEP1_DEP2(Iop_Sub8, acc, dest, ty);
+      assign( cond, mk_x86g_calculate_condition(X86CondZ) );
+      assign( acc2,  IRExpr_ITE(mkexpr(cond), mkexpr(acc), mkexpr(dest)) );
+      putIReg(size, R_EAX, mkexpr(acc2));
+      DIP("cmpxchg%c %s,%s\n", nameISize(size), 
+                               nameIReg(size,gregOfRM(rm)), dis_buf);
+   }
+   else vassert(0);
+
+   return delta0;
+}
+
+
+/* Handle conditional move instructions of the form
+      cmovcc E(reg-or-mem), G(reg)
+
+   E(src) is reg-or-mem
+   G(dst) is reg.
+
+   If E is reg, -->    GET %E, tmps
+                       GET %G, tmpd
+                       CMOVcc tmps, tmpd
+                       PUT tmpd, %G
+ 
+   If E is mem  -->    (getAddr E) -> tmpa
+                       LD (tmpa), tmps
+                       GET %G, tmpd
+                       CMOVcc tmps, tmpd
+                       PUT tmpd, %G
+*/
+static
+UInt dis_cmov_E_G ( UChar       sorb,
+                    Int         sz, 
+                    X86Condcode cond,
+                    Int         delta0 )
+{
+   UChar rm  = getIByte(delta0);
+   HChar dis_buf[50];
+   Int   len;
+
+   IRType ty   = szToITy(sz);
+   IRTemp tmps = newTemp(ty);
+   IRTemp tmpd = newTemp(ty);
+
+   if (epartIsReg(rm)) {
+      assign( tmps, getIReg(sz, eregOfRM(rm)) );
+      assign( tmpd, getIReg(sz, gregOfRM(rm)) );
+
+      putIReg(sz, gregOfRM(rm),
+                  IRExpr_ITE( mk_x86g_calculate_condition(cond),
+                              mkexpr(tmps),
+                              mkexpr(tmpd) )
+             );
+      DIP("cmov%c%s %s,%s\n", nameISize(sz), 
+                              name_X86Condcode(cond),
+                              nameIReg(sz,eregOfRM(rm)),
+                              nameIReg(sz,gregOfRM(rm)));
+      return 1+delta0;
+   }
+
+   /* E refers to memory */    
+   {
+      IRTemp addr = disAMode ( &len, sorb, delta0, dis_buf );
+      assign( tmps, loadLE(ty, mkexpr(addr)) );
+      assign( tmpd, getIReg(sz, gregOfRM(rm)) );
+
+      putIReg(sz, gregOfRM(rm),
+                  IRExpr_ITE( mk_x86g_calculate_condition(cond),
+                              mkexpr(tmps),
+                              mkexpr(tmpd) )
+             );
+
+      DIP("cmov%c%s %s,%s\n", nameISize(sz), 
+                              name_X86Condcode(cond),
+                              dis_buf,
+                              nameIReg(sz,gregOfRM(rm)));
+      return len+delta0;
+   }
+}
+
+
+static
+UInt dis_xadd_G_E ( UChar sorb, Bool locked, Int sz, Int delta0,
+                    Bool* decodeOK )
+{
+   Int   len;
+   UChar rm = getIByte(delta0);
+   HChar dis_buf[50];
+
+   IRType ty    = szToITy(sz);
+   IRTemp tmpd  = newTemp(ty);
+   IRTemp tmpt0 = newTemp(ty);
+   IRTemp tmpt1 = newTemp(ty);
+
+   /* There are 3 cases to consider:
+
+      reg-reg: ignore any lock prefix,
+               generate 'naive' (non-atomic) sequence
+
+      reg-mem, not locked: ignore any lock prefix, generate 'naive'
+                           (non-atomic) sequence
+
+      reg-mem, locked: use IRCAS
+   */
+
+   if (epartIsReg(rm)) {
+      /* case 1 */
+      assign( tmpd,  getIReg(sz, eregOfRM(rm)));
+      assign( tmpt0, getIReg(sz, gregOfRM(rm)) );
+      assign( tmpt1, binop(mkSizedOp(ty,Iop_Add8),
+                           mkexpr(tmpd), mkexpr(tmpt0)) );
+      setFlags_DEP1_DEP2( Iop_Add8, tmpd, tmpt0, ty );
+      putIReg(sz, eregOfRM(rm), mkexpr(tmpt1));
+      putIReg(sz, gregOfRM(rm), mkexpr(tmpd));
+      DIP("xadd%c %s, %s\n",
+          nameISize(sz), nameIReg(sz,gregOfRM(rm)), 
+          				 nameIReg(sz,eregOfRM(rm)));
+      *decodeOK = True;
+      return 1+delta0;
+   }
+   else if (!epartIsReg(rm) && !locked) {
+      /* case 2 */
+      IRTemp addr = disAMode ( &len, sorb, delta0, dis_buf );
+      assign( tmpd,  loadLE(ty, mkexpr(addr)) );
+      assign( tmpt0, getIReg(sz, gregOfRM(rm)) );
+      assign( tmpt1, binop(mkSizedOp(ty,Iop_Add8),
+                           mkexpr(tmpd), mkexpr(tmpt0)) );
+      storeLE( mkexpr(addr), mkexpr(tmpt1) );
+      setFlags_DEP1_DEP2( Iop_Add8, tmpd, tmpt0, ty );
+      putIReg(sz, gregOfRM(rm), mkexpr(tmpd));
+      DIP("xadd%c %s, %s\n",
+          nameISize(sz), nameIReg(sz,gregOfRM(rm)), dis_buf);
+      *decodeOK = True;
+      return len+delta0;
+   }
+   else if (!epartIsReg(rm) && locked) {
+      /* case 3 */
+      IRTemp addr = disAMode ( &len, sorb, delta0, dis_buf );
+      assign( tmpd,  loadLE(ty, mkexpr(addr)) );
+      assign( tmpt0, getIReg(sz, gregOfRM(rm)) );
+      assign( tmpt1, binop(mkSizedOp(ty,Iop_Add8), 
+                           mkexpr(tmpd), mkexpr(tmpt0)) );
+      casLE( mkexpr(addr), mkexpr(tmpd)/*expVal*/,
+                           mkexpr(tmpt1)/*newVal*/, guest_EIP_curr_instr );
+      setFlags_DEP1_DEP2( Iop_Add8, tmpd, tmpt0, ty );
+      putIReg(sz, gregOfRM(rm), mkexpr(tmpd));
+      DIP("xadd%c %s, %s\n",
+          nameISize(sz), nameIReg(sz,gregOfRM(rm)), dis_buf);
+      *decodeOK = True;
+      return len+delta0;
+   }
+   /*UNREACHED*/
+   vassert(0);
+}
+
+/* Move 16 bits from Ew (ireg or mem) to G (a segment register). */
+
+static
+UInt dis_mov_Ew_Sw ( UChar sorb, Int delta0 )
+{
+   Int    len;
+   IRTemp addr;
+   UChar  rm  = getIByte(delta0);
+   HChar  dis_buf[50];
+
+   if (epartIsReg(rm)) {
+      putSReg( gregOfRM(rm), getIReg(2, eregOfRM(rm)) );
+      DIP("movw %s,%s\n", nameIReg(2,eregOfRM(rm)), nameSReg(gregOfRM(rm)));
+      return 1+delta0;
+   } else {
+      addr = disAMode ( &len, sorb, delta0, dis_buf );
+      putSReg( gregOfRM(rm), loadLE(Ity_I16, mkexpr(addr)) );
+      DIP("movw %s,%s\n", dis_buf, nameSReg(gregOfRM(rm)));
+      return len+delta0;
+   }
+}
+
+/* Move 16 bits from G (a segment register) to Ew (ireg or mem).  If
+   dst is ireg and sz==4, zero out top half of it.  */
+
+static
+UInt dis_mov_Sw_Ew ( UChar sorb,
+                     Int   sz,
+                     Int   delta0 )
+{
+   Int    len;
+   IRTemp addr;
+   UChar  rm  = getIByte(delta0);
+   HChar  dis_buf[50];
+
+   vassert(sz == 2 || sz == 4);
+
+   if (epartIsReg(rm)) {
+      if (sz == 4)
+         putIReg(4, eregOfRM(rm), unop(Iop_16Uto32, getSReg(gregOfRM(rm))));
+      else
+         putIReg(2, eregOfRM(rm), getSReg(gregOfRM(rm)));
+
+      DIP("mov %s,%s\n", nameSReg(gregOfRM(rm)), nameIReg(sz,eregOfRM(rm)));
+      return 1+delta0;
+   } else {
+      addr = disAMode ( &len, sorb, delta0, dis_buf );
+      storeLE( mkexpr(addr), getSReg(gregOfRM(rm)) );
+      DIP("mov %s,%s\n", nameSReg(gregOfRM(rm)), dis_buf);
+      return len+delta0;
+   }
+}
+
+
+static 
+void dis_push_segreg ( UInt sreg, Int sz )
+{
+    IRTemp t1 = newTemp(Ity_I16);
+    IRTemp ta = newTemp(Ity_I32);
+    vassert(sz == 2 || sz == 4);
+
+    assign( t1, getSReg(sreg) );
+    assign( ta, binop(Iop_Sub32, getIReg(4, R_ESP), mkU32(sz)) );
+    putIReg(4, R_ESP, mkexpr(ta));
+    storeLE( mkexpr(ta), mkexpr(t1) );
+
+    DIP("push%c %s\n", sz==2 ? 'w' : 'l', nameSReg(sreg));
+}
+
+static
+void dis_pop_segreg ( UInt sreg, Int sz )
+{
+    IRTemp t1 = newTemp(Ity_I16);
+    IRTemp ta = newTemp(Ity_I32);
+    vassert(sz == 2 || sz == 4);
+
+    assign( ta, getIReg(4, R_ESP) );
+    assign( t1, loadLE(Ity_I16, mkexpr(ta)) );
+
+    putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(ta), mkU32(sz)) );
+    putSReg( sreg, mkexpr(t1) );
+    DIP("pop%c %s\n", sz==2 ? 'w' : 'l', nameSReg(sreg));
+}
+
+static
+void dis_ret ( /*MOD*/DisResult* dres, UInt d32 )
+{
+   IRTemp t1 = newTemp(Ity_I32);
+   IRTemp t2 = newTemp(Ity_I32);
+   assign(t1, getIReg(4,R_ESP));
+   assign(t2, loadLE(Ity_I32,mkexpr(t1)));
+   putIReg(4, R_ESP,binop(Iop_Add32, mkexpr(t1), mkU32(4+d32)));
+   jmp_treg(dres, Ijk_Ret, t2);
+   vassert(dres->whatNext == Dis_StopHere);
+}
+
+/*------------------------------------------------------------*/
+/*--- SSE/SSE2/SSE3 helpers                                ---*/
+/*------------------------------------------------------------*/
+
+/* Indicates whether the op requires a rounding-mode argument.  Note
+   that this covers only vector floating point arithmetic ops, and
+   omits the scalar ones that need rounding modes.  Note also that
+   inconsistencies here will get picked up later by the IR sanity
+   checker, so this isn't correctness-critical. */
+static Bool requiresRMode ( IROp op )
+{
+   switch (op) {
+      /* 128 bit ops */
+      case Iop_Add32Fx4: case Iop_Sub32Fx4:
+      case Iop_Mul32Fx4: case Iop_Div32Fx4:
+      case Iop_Add64Fx2: case Iop_Sub64Fx2:
+      case Iop_Mul64Fx2: case Iop_Div64Fx2:
+         return True;
+      default:
+         break;
+   }
+   return False;
+}
+
+
+/* Worker function; do not call directly. 
+   Handles full width G = G `op` E   and   G = (not G) `op` E.
+*/
+
+static UInt dis_SSE_E_to_G_all_wrk ( 
+               UChar sorb, Int delta, 
+               const HChar* opname, IROp op,
+               Bool   invertG
+            )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getIByte(delta);
+   IRExpr* gpart
+      = invertG ? unop(Iop_NotV128, getXMMReg(gregOfRM(rm)))
+                : getXMMReg(gregOfRM(rm));
+   if (epartIsReg(rm)) {
+      putXMMReg(
+         gregOfRM(rm),
+         requiresRMode(op)
+            ? triop(op, get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        gpart,
+                        getXMMReg(eregOfRM(rm)))
+            : binop(op, gpart,
+                        getXMMReg(eregOfRM(rm)))
+      );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      putXMMReg(
+         gregOfRM(rm), 
+         requiresRMode(op)
+            ? triop(op, get_FAKE_roundingmode(), /* XXXROUNDINGFIXME */
+                        gpart,
+                        loadLE(Ity_V128, mkexpr(addr)))
+            : binop(op, gpart,
+                        loadLE(Ity_V128, mkexpr(addr)))
+      );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* All lanes SSE binary operation, G = G `op` E. */
+
+static
+UInt dis_SSE_E_to_G_all ( UChar sorb, Int delta, const HChar* opname, IROp op )
+{
+   return dis_SSE_E_to_G_all_wrk( sorb, delta, opname, op, False );
+}
+
+/* All lanes SSE binary operation, G = (not G) `op` E. */
+
+static
+UInt dis_SSE_E_to_G_all_invG ( UChar sorb, Int delta, 
+                               const HChar* opname, IROp op )
+{
+   return dis_SSE_E_to_G_all_wrk( sorb, delta, opname, op, True );
+}
+
+
+/* Lowest 32-bit lane only SSE binary operation, G = G `op` E. */
+
+static UInt dis_SSE_E_to_G_lo32 ( UChar sorb, Int delta, 
+                                  const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getIByte(delta);
+   IRExpr* gpart = getXMMReg(gregOfRM(rm));
+   if (epartIsReg(rm)) {
+      putXMMReg( gregOfRM(rm), 
+                 binop(op, gpart,
+                           getXMMReg(eregOfRM(rm))) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+1;
+   } else {
+      /* We can only do a 32-bit memory read, so the upper 3/4 of the
+         E operand needs to be made simply of zeroes. */
+      IRTemp epart = newTemp(Ity_V128);
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      assign( epart, unop( Iop_32UtoV128,
+                           loadLE(Ity_I32, mkexpr(addr))) );
+      putXMMReg( gregOfRM(rm), 
+                 binop(op, gpart, mkexpr(epart)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* Lower 64-bit lane only SSE binary operation, G = G `op` E. */
+
+static UInt dis_SSE_E_to_G_lo64 ( UChar sorb, Int delta, 
+                                  const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getIByte(delta);
+   IRExpr* gpart = getXMMReg(gregOfRM(rm));
+   if (epartIsReg(rm)) {
+      putXMMReg( gregOfRM(rm), 
+                 binop(op, gpart,
+                           getXMMReg(eregOfRM(rm))) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+1;
+   } else {
+      /* We can only do a 64-bit memory read, so the upper half of the
+         E operand needs to be made simply of zeroes. */
+      IRTemp epart = newTemp(Ity_V128);
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      assign( epart, unop( Iop_64UtoV128,
+                           loadLE(Ity_I64, mkexpr(addr))) );
+      putXMMReg( gregOfRM(rm), 
+                 binop(op, gpart, mkexpr(epart)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* All lanes unary SSE operation, G = op(E). */
+
+static UInt dis_SSE_E_to_G_unary_all ( 
+               UChar sorb, Int delta, 
+               const HChar* opname, IROp op
+            )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getIByte(delta);
+   // Sqrt32Fx4 and Sqrt64Fx2 take a rounding mode, which is faked
+   // up in the usual way.
+   Bool needsIRRM = op == Iop_Sqrt32Fx4 || op == Iop_Sqrt64Fx2;
+   if (epartIsReg(rm)) {
+      IRExpr* src = getXMMReg(eregOfRM(rm));
+      /* XXXROUNDINGFIXME */
+      IRExpr* res = needsIRRM ? binop(op, get_FAKE_roundingmode(), src)
+                              : unop(op, src);
+      putXMMReg( gregOfRM(rm), res );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      IRExpr* src = loadLE(Ity_V128, mkexpr(addr));
+      /* XXXROUNDINGFIXME */
+      IRExpr* res = needsIRRM ? binop(op, get_FAKE_roundingmode(), src)
+                              : unop(op, src);
+      putXMMReg( gregOfRM(rm), res );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* Lowest 32-bit lane only unary SSE operation, G = op(E). */
+
+static UInt dis_SSE_E_to_G_unary_lo32 ( 
+               UChar sorb, Int delta, 
+               const HChar* opname, IROp op
+            )
+{
+   /* First we need to get the old G value and patch the low 32 bits
+      of the E operand into it.  Then apply op and write back to G. */
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getIByte(delta);
+   IRTemp  oldG0 = newTemp(Ity_V128);
+   IRTemp  oldG1 = newTemp(Ity_V128);
+
+   assign( oldG0, getXMMReg(gregOfRM(rm)) );
+
+   if (epartIsReg(rm)) {
+      assign( oldG1, 
+              binop( Iop_SetV128lo32,
+                     mkexpr(oldG0),
+                     getXMMRegLane32(eregOfRM(rm), 0)) );
+      putXMMReg( gregOfRM(rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      assign( oldG1, 
+              binop( Iop_SetV128lo32,
+                     mkexpr(oldG0),
+                     loadLE(Ity_I32, mkexpr(addr)) ));
+      putXMMReg( gregOfRM(rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* Lowest 64-bit lane only unary SSE operation, G = op(E). */
+
+static UInt dis_SSE_E_to_G_unary_lo64 ( 
+               UChar sorb, Int delta, 
+               const HChar* opname, IROp op
+            )
+{
+   /* First we need to get the old G value and patch the low 64 bits
+      of the E operand into it.  Then apply op and write back to G. */
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getIByte(delta);
+   IRTemp  oldG0 = newTemp(Ity_V128);
+   IRTemp  oldG1 = newTemp(Ity_V128);
+
+   assign( oldG0, getXMMReg(gregOfRM(rm)) );
+
+   if (epartIsReg(rm)) {
+      assign( oldG1, 
+              binop( Iop_SetV128lo64,
+                     mkexpr(oldG0),
+                     getXMMRegLane64(eregOfRM(rm), 0)) );
+      putXMMReg( gregOfRM(rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+1;
+   } else {
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      assign( oldG1, 
+              binop( Iop_SetV128lo64,
+                     mkexpr(oldG0),
+                     loadLE(Ity_I64, mkexpr(addr)) ));
+      putXMMReg( gregOfRM(rm), unop(op, mkexpr(oldG1)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      return delta+alen;
+   }
+}
+
+
+/* SSE integer binary operation:
+      G = G `op` E   (eLeft == False)
+      G = E `op` G   (eLeft == True)
+*/
+static UInt dis_SSEint_E_to_G( 
+               UChar sorb, Int delta, 
+               const HChar* opname, IROp op,
+               Bool   eLeft
+            )
+{
+   HChar   dis_buf[50];
+   Int     alen;
+   IRTemp  addr;
+   UChar   rm = getIByte(delta);
+   IRExpr* gpart = getXMMReg(gregOfRM(rm));
+   IRExpr* epart = NULL;
+   if (epartIsReg(rm)) {
+      epart = getXMMReg(eregOfRM(rm));
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      delta += 1;
+   } else {
+      addr  = disAMode ( &alen, sorb, delta, dis_buf );
+      epart = loadLE(Ity_V128, mkexpr(addr));
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      delta += alen;
+   }
+   putXMMReg( gregOfRM(rm), 
+              eLeft ? binop(op, epart, gpart)
+	            : binop(op, gpart, epart) );
+   return delta;
+}
+
+
+/* Helper for doing SSE FP comparisons. */
+
+static void findSSECmpOp ( Bool* needNot, IROp* op, 
+                           Int imm8, Bool all_lanes, Int sz )
+{
+   imm8 &= 7;
+   *needNot = False;
+   *op      = Iop_INVALID;
+   if (imm8 >= 4) {
+      *needNot = True;
+      imm8 -= 4;
+   }
+
+   if (sz == 4 && all_lanes) {
+      switch (imm8) {
+         case 0: *op = Iop_CmpEQ32Fx4; return;
+         case 1: *op = Iop_CmpLT32Fx4; return;
+         case 2: *op = Iop_CmpLE32Fx4; return;
+         case 3: *op = Iop_CmpUN32Fx4; return;
+         default: break;
+      }
+   }
+   if (sz == 4 && !all_lanes) {
+      switch (imm8) {
+         case 0: *op = Iop_CmpEQ32F0x4; return;
+         case 1: *op = Iop_CmpLT32F0x4; return;
+         case 2: *op = Iop_CmpLE32F0x4; return;
+         case 3: *op = Iop_CmpUN32F0x4; return;
+         default: break;
+      }
+   }
+   if (sz == 8 && all_lanes) {
+      switch (imm8) {
+         case 0: *op = Iop_CmpEQ64Fx2; return;
+         case 1: *op = Iop_CmpLT64Fx2; return;
+         case 2: *op = Iop_CmpLE64Fx2; return;
+         case 3: *op = Iop_CmpUN64Fx2; return;
+         default: break;
+      }
+   }
+   if (sz == 8 && !all_lanes) {
+      switch (imm8) {
+         case 0: *op = Iop_CmpEQ64F0x2; return;
+         case 1: *op = Iop_CmpLT64F0x2; return;
+         case 2: *op = Iop_CmpLE64F0x2; return;
+         case 3: *op = Iop_CmpUN64F0x2; return;
+         default: break;
+      }
+   }
+   vpanic("findSSECmpOp(x86,guest)");
+}
+
+/* Handles SSE 32F/64F comparisons. */
+
+static UInt dis_SSEcmp_E_to_G ( UChar sorb, Int delta, 
+				const HChar* opname, Bool all_lanes, Int sz )
+{
+   HChar   dis_buf[50];
+   Int     alen, imm8;
+   IRTemp  addr;
+   Bool    needNot = False;
+   IROp    op      = Iop_INVALID;
+   IRTemp  plain   = newTemp(Ity_V128);
+   UChar   rm      = getIByte(delta);
+   UShort  mask    = 0;
+   vassert(sz == 4 || sz == 8);
+   if (epartIsReg(rm)) {
+      imm8 = getIByte(delta+1);
+      findSSECmpOp(&needNot, &op, imm8, all_lanes, sz);
+      assign( plain, binop(op, getXMMReg(gregOfRM(rm)), 
+                               getXMMReg(eregOfRM(rm))) );
+      delta += 2;
+      DIP("%s $%d,%s,%s\n", opname,
+                            (Int)imm8,
+                            nameXMMReg(eregOfRM(rm)),
+                            nameXMMReg(gregOfRM(rm)) );
+   } else {
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      imm8 = getIByte(delta+alen);
+      findSSECmpOp(&needNot, &op, imm8, all_lanes, sz);
+      assign( plain, 
+              binop(
+                 op,
+                 getXMMReg(gregOfRM(rm)), 
+                   all_lanes  ? loadLE(Ity_V128, mkexpr(addr))
+                 : sz == 8    ? unop( Iop_64UtoV128, loadLE(Ity_I64, mkexpr(addr)))
+                 : /*sz==4*/    unop( Iop_32UtoV128, loadLE(Ity_I32, mkexpr(addr)))
+             ) 
+      );
+      delta += alen+1;
+      DIP("%s $%d,%s,%s\n", opname,
+                            (Int)imm8,
+                            dis_buf,
+                            nameXMMReg(gregOfRM(rm)) );
+   }
+
+   if (needNot && all_lanes) {
+      putXMMReg( gregOfRM(rm), 
+                 unop(Iop_NotV128, mkexpr(plain)) );
+   }
+   else
+   if (needNot && !all_lanes) {
+      mask = toUShort( sz==4 ? 0x000F : 0x00FF );
+      putXMMReg( gregOfRM(rm), 
+                 binop(Iop_XorV128, mkexpr(plain), mkV128(mask)) );
+   }
+   else {
+      putXMMReg( gregOfRM(rm), mkexpr(plain) );
+   }
+
+   return delta;
+}
+
+
+/* Vector by scalar shift of G by the amount specified at the bottom
+   of E. */
+
+static UInt dis_SSE_shiftG_byE ( UChar sorb, Int delta, 
+                                 const HChar* opname, IROp op )
+{
+   HChar   dis_buf[50];
+   Int     alen, size;
+   IRTemp  addr;
+   Bool    shl, shr, sar;
+   UChar   rm   = getIByte(delta);
+   IRTemp  g0   = newTemp(Ity_V128);
+   IRTemp  g1   = newTemp(Ity_V128);
+   IRTemp  amt  = newTemp(Ity_I32);
+   IRTemp  amt8 = newTemp(Ity_I8);
+   if (epartIsReg(rm)) {
+      assign( amt, getXMMRegLane32(eregOfRM(rm), 0) );
+      DIP("%s %s,%s\n", opname,
+                        nameXMMReg(eregOfRM(rm)),
+                        nameXMMReg(gregOfRM(rm)) );
+      delta++;
+   } else {
+      addr = disAMode ( &alen, sorb, delta, dis_buf );
+      assign( amt, loadLE(Ity_I32, mkexpr(addr)) );
+      DIP("%s %s,%s\n", opname,
+                        dis_buf,
+                        nameXMMReg(gregOfRM(rm)) );
+      delta += alen;
+   }
+   assign( g0,   getXMMReg(gregOfRM(rm)) );
+   assign( amt8, unop(Iop_32to8, mkexpr(amt)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x8: shl = True; size = 32; break;
+      case Iop_ShlN32x4: shl = True; size = 32; break;
+      case Iop_ShlN64x2: shl = True; size = 64; break;
+      case Iop_SarN16x8: sar = True; size = 16; break;
+      case Iop_SarN32x4: sar = True; size = 32; break;
+      case Iop_ShrN16x8: shr = True; size = 16; break;
+      case Iop_ShrN32x4: shr = True; size = 32; break;
+      case Iop_ShrN64x2: shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           mkV128(0x0000)
+        )
+     );
+   } else 
+   if (sar) {
+     assign( 
+        g1,
+        IRExpr_ITE(
+           binop(Iop_CmpLT32U,mkexpr(amt),mkU32(size)),
+           binop(op, mkexpr(g0), mkexpr(amt8)),
+           binop(op, mkexpr(g0), mkU8(size-1))
+        )
+     );
+   } else {
+      /*NOTREACHED*/
+      vassert(0);
+   }
+
+   putXMMReg( gregOfRM(rm), mkexpr(g1) );
+   return delta;
+}
+
+
+/* Vector by scalar shift of E by an immediate byte. */
+
+static 
+UInt dis_SSE_shiftE_imm ( Int delta, const HChar* opname, IROp op )
+{
+   Bool    shl, shr, sar;
+   UChar   rm   = getIByte(delta);
+   IRTemp  e0   = newTemp(Ity_V128);
+   IRTemp  e1   = newTemp(Ity_V128);
+   UChar   amt, size;
+   vassert(epartIsReg(rm));
+   vassert(gregOfRM(rm) == 2 
+           || gregOfRM(rm) == 4 || gregOfRM(rm) == 6);
+   amt = getIByte(delta+1);
+   delta += 2;
+   DIP("%s $%d,%s\n", opname,
+                      (Int)amt,
+                      nameXMMReg(eregOfRM(rm)) );
+   assign( e0, getXMMReg(eregOfRM(rm)) );
+
+   shl = shr = sar = False;
+   size = 0;
+   switch (op) {
+      case Iop_ShlN16x8: shl = True; size = 16; break;
+      case Iop_ShlN32x4: shl = True; size = 32; break;
+      case Iop_ShlN64x2: shl = True; size = 64; break;
+      case Iop_SarN16x8: sar = True; size = 16; break;
+      case Iop_SarN32x4: sar = True; size = 32; break;
+      case Iop_ShrN16x8: shr = True; size = 16; break;
+      case Iop_ShrN32x4: shr = True; size = 32; break;
+      case Iop_ShrN64x2: shr = True; size = 64; break;
+      default: vassert(0);
+   }
+
+   if (shl || shr) {
+      assign( e1, amt >= size 
+                     ? mkV128(0x0000)
+                     : binop(op, mkexpr(e0), mkU8(amt))
+      );
+   } else 
+   if (sar) {
+      assign( e1, amt >= size 
+                     ? binop(op, mkexpr(e0), mkU8(size-1))
+                     : binop(op, mkexpr(e0), mkU8(amt))
+      );
+   } else {
+      /*NOTREACHED*/
+      vassert(0);
+   }
+
+   putXMMReg( eregOfRM(rm), mkexpr(e1) );
+   return delta;
+}
+
+
+/* Get the current SSE rounding mode. */
+
+static IRExpr* /* :: Ity_I32 */ get_sse_roundingmode ( void )
+{
+   return binop( Iop_And32, 
+                 IRExpr_Get( OFFB_SSEROUND, Ity_I32 ), 
+                 mkU32(3) );
+}
+
+static void put_sse_roundingmode ( IRExpr* sseround )
+{
+   vassert(typeOfIRExpr(irsb->tyenv, sseround) == Ity_I32);
+   stmt( IRStmt_Put( OFFB_SSEROUND, sseround ) );
+}
+
+/* Break a 128-bit value up into four 32-bit ints. */
+
+static void breakup128to32s ( IRTemp t128,
+			      /*OUTs*/
+                              IRTemp* t3, IRTemp* t2,
+                              IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi64 = newTemp(Ity_I64);
+   IRTemp lo64 = newTemp(Ity_I64);
+   assign( hi64, unop(Iop_V128HIto64, mkexpr(t128)) );
+   assign( lo64, unop(Iop_V128to64,   mkexpr(t128)) );
+
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+
+   *t0 = newTemp(Ity_I32);
+   *t1 = newTemp(Ity_I32);
+   *t2 = newTemp(Ity_I32);
+   *t3 = newTemp(Ity_I32);
+   assign( *t0, unop(Iop_64to32,   mkexpr(lo64)) );
+   assign( *t1, unop(Iop_64HIto32, mkexpr(lo64)) );
+   assign( *t2, unop(Iop_64to32,   mkexpr(hi64)) );
+   assign( *t3, unop(Iop_64HIto32, mkexpr(hi64)) );
+}
+
+/* Construct a 128-bit value from four 32-bit ints. */
+
+static IRExpr* mk128from32s ( IRTemp t3, IRTemp t2,
+                              IRTemp t1, IRTemp t0 )
+{
+   return
+      binop( Iop_64HLtoV128,
+             binop(Iop_32HLto64, mkexpr(t3), mkexpr(t2)),
+             binop(Iop_32HLto64, mkexpr(t1), mkexpr(t0))
+   );
+}
+
+/* Break a 64-bit value up into four 16-bit ints. */
+
+static void breakup64to16s ( IRTemp t64,
+                             /*OUTs*/
+                             IRTemp* t3, IRTemp* t2,
+                             IRTemp* t1, IRTemp* t0 )
+{
+   IRTemp hi32 = newTemp(Ity_I32);
+   IRTemp lo32 = newTemp(Ity_I32);
+   assign( hi32, unop(Iop_64HIto32, mkexpr(t64)) );
+   assign( lo32, unop(Iop_64to32,   mkexpr(t64)) );
+
+   vassert(t0 && *t0 == IRTemp_INVALID);
+   vassert(t1 && *t1 == IRTemp_INVALID);
+   vassert(t2 && *t2 == IRTemp_INVALID);
+   vassert(t3 && *t3 == IRTemp_INVALID);
+
+   *t0 = newTemp(Ity_I16);
+   *t1 = newTemp(Ity_I16);
+   *t2 = newTemp(Ity_I16);
+   *t3 = newTemp(Ity_I16);
+   assign( *t0, unop(Iop_32to16,   mkexpr(lo32)) );
+   assign( *t1, unop(Iop_32HIto16, mkexpr(lo32)) );
+   assign( *t2, unop(Iop_32to16,   mkexpr(hi32)) );
+   assign( *t3, unop(Iop_32HIto16, mkexpr(hi32)) );
+}
+
+/* Construct a 64-bit value from four 16-bit ints. */
+
+static IRExpr* mk64from16s ( IRTemp t3, IRTemp t2,
+                             IRTemp t1, IRTemp t0 )
+{
+   return
+      binop( Iop_32HLto64,
+             binop(Iop_16HLto32, mkexpr(t3), mkexpr(t2)),
+             binop(Iop_16HLto32, mkexpr(t1), mkexpr(t0))
+   );
+}
+
+/* Generate IR to set the guest %EFLAGS from the pushfl-format image
+   in the given 32-bit temporary.  The flags that are set are: O S Z A
+   C P D ID AC.
+
+   In all cases, code to set AC is generated.  However, VEX actually
+   ignores the AC value and so can optionally emit an emulation
+   warning when it is enabled.  In this routine, an emulation warning
+   is only emitted if emit_AC_emwarn is True, in which case
+   next_insn_EIP must be correct (this allows for correct code
+   generation for popfl/popfw).  If emit_AC_emwarn is False,
+   next_insn_EIP is unimportant (this allows for easy if kludgey code
+   generation for IRET.) */
+
+static 
+void set_EFLAGS_from_value ( IRTemp t1, 
+                             Bool   emit_AC_emwarn,
+                             Addr32 next_insn_EIP )
+{
+   vassert(typeOfIRTemp(irsb->tyenv,t1) == Ity_I32);
+
+   /* t1 is the flag word.  Mask out everything except OSZACP and set
+      the flags thunk to X86G_CC_OP_COPY. */
+   stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+   stmt( IRStmt_Put( OFFB_CC_DEP1, 
+                     binop(Iop_And32,
+                           mkexpr(t1), 
+                           mkU32( X86G_CC_MASK_C | X86G_CC_MASK_P 
+                                  | X86G_CC_MASK_A | X86G_CC_MASK_Z 
+                                  | X86G_CC_MASK_S| X86G_CC_MASK_O )
+                          )
+                    )
+       );
+   /* Set NDEP even though it isn't used.  This makes redundant-PUT
+      elimination of previous stores to this field work better. */
+   stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+
+   /* Also need to set the D flag, which is held in bit 10 of t1.
+      If zero, put 1 in OFFB_DFLAG, else -1 in OFFB_DFLAG. */
+   stmt( IRStmt_Put( 
+            OFFB_DFLAG,
+            IRExpr_ITE( 
+               unop(Iop_32to1,
+                    binop(Iop_And32, 
+                          binop(Iop_Shr32, mkexpr(t1), mkU8(10)), 
+                          mkU32(1))),
+               mkU32(0xFFFFFFFF),
+               mkU32(1)))
+       );
+
+   /* Set the ID flag */
+   stmt( IRStmt_Put( 
+            OFFB_IDFLAG,
+            IRExpr_ITE( 
+               unop(Iop_32to1,
+                    binop(Iop_And32, 
+                          binop(Iop_Shr32, mkexpr(t1), mkU8(21)), 
+                          mkU32(1))),
+               mkU32(1),
+               mkU32(0)))
+       );
+
+   /* And set the AC flag.  If setting it 1 to, possibly emit an
+      emulation warning. */
+   stmt( IRStmt_Put( 
+            OFFB_ACFLAG,
+            IRExpr_ITE( 
+               unop(Iop_32to1,
+                    binop(Iop_And32, 
+                          binop(Iop_Shr32, mkexpr(t1), mkU8(18)), 
+                          mkU32(1))),
+               mkU32(1),
+               mkU32(0)))
+       );
+
+   if (emit_AC_emwarn) {
+      put_emwarn( mkU32(EmWarn_X86_acFlag) );
+      stmt( 
+         IRStmt_Exit(
+            binop( Iop_CmpNE32, 
+                   binop(Iop_And32, mkexpr(t1), mkU32(1<<18)), 
+                   mkU32(0) ),
+            Ijk_EmWarn,
+            IRConst_U32( next_insn_EIP ),
+            OFFB_EIP
+         )
+      );
+   }
+}
+
+
+/* Helper for the SSSE3 (not SSE3) PMULHRSW insns.  Given two 64-bit
+   values (aa,bb), computes, for each of the 4 16-bit lanes:
+
+   (((aa_lane *s32 bb_lane) >>u 14) + 1) >>u 1
+*/
+static IRExpr* dis_PMULHRSW_helper ( IRExpr* aax, IRExpr* bbx )
+{
+   IRTemp aa      = newTemp(Ity_I64);
+   IRTemp bb      = newTemp(Ity_I64);
+   IRTemp aahi32s = newTemp(Ity_I64);
+   IRTemp aalo32s = newTemp(Ity_I64);
+   IRTemp bbhi32s = newTemp(Ity_I64);
+   IRTemp bblo32s = newTemp(Ity_I64);
+   IRTemp rHi     = newTemp(Ity_I64);
+   IRTemp rLo     = newTemp(Ity_I64);
+   IRTemp one32x2 = newTemp(Ity_I64);
+   assign(aa, aax);
+   assign(bb, bbx);
+   assign( aahi32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveHI16x4, mkexpr(aa), mkexpr(aa)),
+                 mkU8(16) ));
+   assign( aalo32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveLO16x4, mkexpr(aa), mkexpr(aa)),
+                 mkU8(16) ));
+   assign( bbhi32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveHI16x4, mkexpr(bb), mkexpr(bb)),
+                 mkU8(16) ));
+   assign( bblo32s,
+           binop(Iop_SarN32x2,
+                 binop(Iop_InterleaveLO16x4, mkexpr(bb), mkexpr(bb)),
+                 mkU8(16) ));
+   assign(one32x2, mkU64( (1ULL << 32) + 1 ));
+   assign(
+      rHi,
+      binop(
+         Iop_ShrN32x2,
+         binop(
+            Iop_Add32x2, 
+            binop(
+               Iop_ShrN32x2,
+               binop(Iop_Mul32x2, mkexpr(aahi32s), mkexpr(bbhi32s)),
+               mkU8(14)
+            ),
+            mkexpr(one32x2)
+         ),
+         mkU8(1)
+      )
+   );
+   assign(
+      rLo,
+      binop(
+         Iop_ShrN32x2,
+         binop(
+            Iop_Add32x2, 
+            binop(
+               Iop_ShrN32x2,
+               binop(Iop_Mul32x2, mkexpr(aalo32s), mkexpr(bblo32s)),
+               mkU8(14)
+            ),
+            mkexpr(one32x2)
+         ),
+         mkU8(1)
+      )
+   );
+   return
+      binop(Iop_CatEvenLanes16x4, mkexpr(rHi), mkexpr(rLo));
+}
+
+/* Helper for the SSSE3 (not SSE3) PSIGN{B,W,D} insns.  Given two 64-bit
+   values (aa,bb), computes, for each lane:
+
+          if aa_lane < 0 then - bb_lane
+     else if aa_lane > 0 then bb_lane
+     else 0
+*/
+static IRExpr* dis_PSIGN_helper ( IRExpr* aax, IRExpr* bbx, Int laneszB )
+{
+   IRTemp aa       = newTemp(Ity_I64);
+   IRTemp bb       = newTemp(Ity_I64);
+   IRTemp zero     = newTemp(Ity_I64);
+   IRTemp bbNeg    = newTemp(Ity_I64);
+   IRTemp negMask  = newTemp(Ity_I64);
+   IRTemp posMask  = newTemp(Ity_I64);
+   IROp   opSub    = Iop_INVALID;
+   IROp   opCmpGTS = Iop_INVALID;
+
+   switch (laneszB) {
+      case 1: opSub = Iop_Sub8x8;  opCmpGTS = Iop_CmpGT8Sx8;  break;
+      case 2: opSub = Iop_Sub16x4; opCmpGTS = Iop_CmpGT16Sx4; break;
+      case 4: opSub = Iop_Sub32x2; opCmpGTS = Iop_CmpGT32Sx2; break;
+      default: vassert(0);
+   }
+
+   assign( aa,      aax );
+   assign( bb,      bbx );
+   assign( zero,    mkU64(0) );
+   assign( bbNeg,   binop(opSub,    mkexpr(zero), mkexpr(bb)) );
+   assign( negMask, binop(opCmpGTS, mkexpr(zero), mkexpr(aa)) );
+   assign( posMask, binop(opCmpGTS, mkexpr(aa),   mkexpr(zero)) );
+
+   return
+      binop(Iop_Or64,
+            binop(Iop_And64, mkexpr(bb),    mkexpr(posMask)),
+            binop(Iop_And64, mkexpr(bbNeg), mkexpr(negMask)) );
+
+}
+
+/* Helper for the SSSE3 (not SSE3) PABS{B,W,D} insns.  Given a 64-bit
+   value aa, computes, for each lane
+
+   if aa < 0 then -aa else aa
+
+   Note that the result is interpreted as unsigned, so that the
+   absolute value of the most negative signed input can be
+   represented.
+*/
+static IRExpr* dis_PABS_helper ( IRExpr* aax, Int laneszB )
+{
+   IRTemp aa      = newTemp(Ity_I64);
+   IRTemp zero    = newTemp(Ity_I64);
+   IRTemp aaNeg   = newTemp(Ity_I64);
+   IRTemp negMask = newTemp(Ity_I64);
+   IRTemp posMask = newTemp(Ity_I64);
+   IROp   opSub   = Iop_INVALID;
+   IROp   opSarN  = Iop_INVALID;
+
+   switch (laneszB) {
+      case 1: opSub = Iop_Sub8x8;  opSarN = Iop_SarN8x8;  break;
+      case 2: opSub = Iop_Sub16x4; opSarN = Iop_SarN16x4; break;
+      case 4: opSub = Iop_Sub32x2; opSarN = Iop_SarN32x2; break;
+      default: vassert(0);
+   }
+
+   assign( aa,      aax );
+   assign( negMask, binop(opSarN, mkexpr(aa), mkU8(8*laneszB-1)) );
+   assign( posMask, unop(Iop_Not64, mkexpr(negMask)) );
+   assign( zero,    mkU64(0) );
+   assign( aaNeg,   binop(opSub, mkexpr(zero), mkexpr(aa)) );
+   return
+      binop(Iop_Or64,
+            binop(Iop_And64, mkexpr(aa),    mkexpr(posMask)),
+            binop(Iop_And64, mkexpr(aaNeg), mkexpr(negMask)) );
+}
+
+static IRExpr* dis_PALIGNR_XMM_helper ( IRTemp hi64,
+                                        IRTemp lo64, Int byteShift )
+{
+   vassert(byteShift >= 1 && byteShift <= 7);
+   return
+      binop(Iop_Or64,
+            binop(Iop_Shl64, mkexpr(hi64), mkU8(8*(8-byteShift))),
+            binop(Iop_Shr64, mkexpr(lo64), mkU8(8*byteShift))
+      );
+}
+
+/* Generate a SIGSEGV followed by a restart of the current instruction
+   if effective_addr is not 16-aligned.  This is required behaviour
+   for some SSE3 instructions and all 128-bit SSSE3 instructions.
+   This assumes that guest_RIP_curr_instr is set correctly! */
+static void gen_SEGV_if_not_16_aligned ( IRTemp effective_addr )
+{
+   stmt(
+      IRStmt_Exit(
+         binop(Iop_CmpNE32,
+               binop(Iop_And32,mkexpr(effective_addr),mkU32(0xF)),
+               mkU32(0)),
+         Ijk_SigSEGV,
+         IRConst_U32(guest_EIP_curr_instr),
+         OFFB_EIP
+      )
+   );
+}
+
+
+/* Helper for deciding whether a given insn (starting at the opcode
+   byte) may validly be used with a LOCK prefix.  The following insns
+   may be used with LOCK when their destination operand is in memory.
+   AFAICS this is exactly the same for both 32-bit and 64-bit mode.
+
+   ADD        80 /0,  81 /0,  82 /0,  83 /0,  00,  01
+   OR         80 /1,  81 /1,  82 /x,  83 /1,  08,  09
+   ADC        80 /2,  81 /2,  82 /2,  83 /2,  10,  11
+   SBB        81 /3,  81 /3,  82 /x,  83 /3,  18,  19
+   AND        80 /4,  81 /4,  82 /x,  83 /4,  20,  21
+   SUB        80 /5,  81 /5,  82 /x,  83 /5,  28,  29
+   XOR        80 /6,  81 /6,  82 /x,  83 /6,  30,  31
+
+   DEC        FE /1,  FF /1
+   INC        FE /0,  FF /0
+
+   NEG        F6 /3,  F7 /3
+   NOT        F6 /2,  F7 /2
+
+   XCHG       86, 87
+
+   BTC        0F BB,  0F BA /7
+   BTR        0F B3,  0F BA /6
+   BTS        0F AB,  0F BA /5
+
+   CMPXCHG    0F B0,  0F B1
+   CMPXCHG8B  0F C7 /1
+
+   XADD       0F C0,  0F C1
+
+   ------------------------------
+
+   80 /0  =  addb $imm8,  rm8
+   81 /0  =  addl $imm32, rm32  and  addw $imm16, rm16
+   82 /0  =  addb $imm8,  rm8
+   83 /0  =  addl $simm8, rm32  and  addw $simm8, rm16
+
+   00     =  addb r8,  rm8
+   01     =  addl r32, rm32  and  addw r16, rm16
+
+   Same for ADD OR ADC SBB AND SUB XOR
+
+   FE /1  = dec rm8
+   FF /1  = dec rm32  and  dec rm16
+
+   FE /0  = inc rm8
+   FF /0  = inc rm32  and  inc rm16
+
+   F6 /3  = neg rm8
+   F7 /3  = neg rm32  and  neg rm16
+
+   F6 /2  = not rm8
+   F7 /2  = not rm32  and  not rm16
+
+   0F BB     = btcw r16, rm16    and  btcl r32, rm32
+   OF BA /7  = btcw $imm8, rm16  and  btcw $imm8, rm32
+
+   Same for BTS, BTR
+*/
+static Bool can_be_used_with_LOCK_prefix ( const UChar* opc )
+{
+   switch (opc[0]) {
+      case 0x00: case 0x01: case 0x08: case 0x09:
+      case 0x10: case 0x11: case 0x18: case 0x19:
+      case 0x20: case 0x21: case 0x28: case 0x29:
+      case 0x30: case 0x31:
+         if (!epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0x80: case 0x81: case 0x82: case 0x83:
+         if (gregOfRM(opc[1]) >= 0 && gregOfRM(opc[1]) <= 6
+             && !epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0xFE: case 0xFF:
+         if (gregOfRM(opc[1]) >= 0 && gregOfRM(opc[1]) <= 1
+             && !epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0xF6: case 0xF7:
+         if (gregOfRM(opc[1]) >= 2 && gregOfRM(opc[1]) <= 3
+             && !epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0x86: case 0x87:
+         if (!epartIsReg(opc[1]))
+            return True;
+         break;
+
+      case 0x0F: {
+         switch (opc[1]) {
+            case 0xBB: case 0xB3: case 0xAB:
+               if (!epartIsReg(opc[2]))
+                  return True;
+               break;
+            case 0xBA: 
+               if (gregOfRM(opc[2]) >= 5 && gregOfRM(opc[2]) <= 7
+                   && !epartIsReg(opc[2]))
+                  return True;
+               break;
+            case 0xB0: case 0xB1:
+               if (!epartIsReg(opc[2]))
+                  return True;
+               break;
+            case 0xC7: 
+               if (gregOfRM(opc[2]) == 1 && !epartIsReg(opc[2]) )
+                  return True;
+               break;
+            case 0xC0: case 0xC1:
+               if (!epartIsReg(opc[2]))
+                  return True;
+               break;
+            default:
+               break;
+         } /* switch (opc[1]) */
+         break;
+      }
+
+      default:
+         break;
+   } /* switch (opc[0]) */
+
+   return False;
+}
+
+static IRTemp math_BSWAP ( IRTemp t1, IRType ty )
+{
+   IRTemp t2 = newTemp(ty);
+   if (ty == Ity_I32) {
+      assign( t2,
+         binop(
+            Iop_Or32,
+            binop(Iop_Shl32, mkexpr(t1), mkU8(24)),
+            binop(
+               Iop_Or32,
+               binop(Iop_And32, binop(Iop_Shl32, mkexpr(t1), mkU8(8)),
+                                mkU32(0x00FF0000)),
+               binop(Iop_Or32,
+                     binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(8)),
+                                      mkU32(0x0000FF00)),
+                     binop(Iop_And32, binop(Iop_Shr32, mkexpr(t1), mkU8(24)),
+                                      mkU32(0x000000FF) )
+            )))
+      );
+      return t2;
+   }
+   if (ty == Ity_I16) {
+      assign(t2, 
+             binop(Iop_Or16,
+                   binop(Iop_Shl16, mkexpr(t1), mkU8(8)),
+                   binop(Iop_Shr16, mkexpr(t1), mkU8(8)) ));
+      return t2;
+   }
+   vassert(0);
+   /*NOTREACHED*/
+   return IRTemp_INVALID;
+}
+
+/*------------------------------------------------------------*/
+/*--- Disassemble a single instruction                     ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction is
+   located in host memory at &guest_code[delta].  *expect_CAS is set
+   to True if the resulting IR is expected to contain an IRCAS
+   statement, and False if it's not expected to.  This makes it
+   possible for the caller of disInstr_X86_WRK to check that
+   LOCK-prefixed instructions are at least plausibly translated, in
+   that it becomes possible to check that a (validly) LOCK-prefixed
+   instruction generates a translation containing an IRCAS, and
+   instructions without LOCK prefixes don't generate translations
+   containing an IRCAS.
+*/
+static
+DisResult disInstr_X86_WRK (
+             /*OUT*/Bool* expect_CAS,
+             Bool         (*resteerOkFn) ( /*opaque*/void*, Addr ),
+             Bool         resteerCisOk,
+             void*        callback_opaque,
+             Long         delta64,
+             const VexArchInfo* archinfo,
+             const VexAbiInfo*  vbi,
+             Bool         sigill_diag
+          )
+{
+   IRType    ty;
+   IRTemp    addr, t0, t1, t2, t3, t4, t5, t6;
+   Int       alen;
+   UChar     opc, modrm, abyte, pre;
+   UInt      d32;
+   HChar     dis_buf[50];
+   Int       am_sz, d_sz, n_prefixes;
+   DisResult dres;
+   const UChar* insn; /* used in SSE decoders */
+
+   /* The running delta */
+   Int delta = (Int)delta64;
+
+   /* Holds eip at the start of the insn, so that we can print
+      consistent error messages for unimplemented insns. */
+   Int delta_start = delta;
+
+   /* sz denotes the nominal data-op size of the insn; we change it to
+      2 if an 0x66 prefix is seen */
+   Int sz = 4;
+
+   /* sorb holds the segment-override-prefix byte, if any.  Zero if no
+      prefix has been seen, else one of {0x26, 0x3E, 0x64, 0x65}
+      indicating the prefix.  */
+   UChar sorb = 0;
+
+   /* Gets set to True if a LOCK prefix is seen. */
+   Bool pfx_lock = False;
+
+   /* Set result defaults. */
+   dres.whatNext    = Dis_Continue;
+   dres.len         = 0;
+   dres.continueAt  = 0;
+   dres.jk_StopHere = Ijk_INVALID;
+
+   *expect_CAS = False;
+
+   addr = t0 = t1 = t2 = t3 = t4 = t5 = t6 = IRTemp_INVALID; 
+
+   vassert(guest_EIP_bbstart + delta == guest_EIP_curr_instr);
+   DIP("\t0x%x:  ", guest_EIP_bbstart+delta);
+
+   /* Spot "Special" instructions (see comment at top of file). */
+   {
+      const UChar* code = guest_code + delta;
+      /* Spot the 12-byte preamble:
+         C1C703   roll $3,  %edi
+         C1C70D   roll $13, %edi
+         C1C71D   roll $29, %edi
+         C1C713   roll $19, %edi
+      */
+      if (code[ 0] == 0xC1 && code[ 1] == 0xC7 && code[ 2] == 0x03 &&
+          code[ 3] == 0xC1 && code[ 4] == 0xC7 && code[ 5] == 0x0D &&
+          code[ 6] == 0xC1 && code[ 7] == 0xC7 && code[ 8] == 0x1D &&
+          code[ 9] == 0xC1 && code[10] == 0xC7 && code[11] == 0x13) {
+         /* Got a "Special" instruction preamble.  Which one is it? */
+         if (code[12] == 0x87 && code[13] == 0xDB /* xchgl %ebx,%ebx */) {
+            /* %EDX = client_request ( %EAX ) */
+            DIP("%%edx = client_request ( %%eax )\n");
+            delta += 14;
+            jmp_lit(&dres, Ijk_ClientReq, guest_EIP_bbstart+delta);
+            vassert(dres.whatNext == Dis_StopHere);
+            goto decode_success;
+         }
+         else
+         if (code[12] == 0x87 && code[13] == 0xC9 /* xchgl %ecx,%ecx */) {
+            /* %EAX = guest_NRADDR */
+            DIP("%%eax = guest_NRADDR\n");
+            delta += 14;
+            putIReg(4, R_EAX, IRExpr_Get( OFFB_NRADDR, Ity_I32 ));
+            goto decode_success;
+         }
+         else
+         if (code[12] == 0x87 && code[13] == 0xD2 /* xchgl %edx,%edx */) {
+            /* call-noredir *%EAX */
+            DIP("call-noredir *%%eax\n");
+            delta += 14;
+            t1 = newTemp(Ity_I32);
+            assign(t1, getIReg(4,R_EAX));
+            t2 = newTemp(Ity_I32);
+            assign(t2, binop(Iop_Sub32, getIReg(4,R_ESP), mkU32(4)));
+            putIReg(4, R_ESP, mkexpr(t2));
+            storeLE( mkexpr(t2), mkU32(guest_EIP_bbstart+delta));
+            jmp_treg(&dres, Ijk_NoRedir, t1);
+            vassert(dres.whatNext == Dis_StopHere);
+            goto decode_success;
+         }
+         else
+         if (code[12] == 0x87 && code[13] == 0xFF /* xchgl %edi,%edi */) {
+            /* IR injection */
+            DIP("IR injection\n");
+            vex_inject_ir(irsb, Iend_LE);
+
+            // Invalidate the current insn. The reason is that the IRop we're
+            // injecting here can change. In which case the translation has to
+            // be redone. For ease of handling, we simply invalidate all the
+            // time.
+            stmt(IRStmt_Put(OFFB_CMSTART, mkU32(guest_EIP_curr_instr)));
+            stmt(IRStmt_Put(OFFB_CMLEN,   mkU32(14)));
+   
+            delta += 14;
+
+            stmt( IRStmt_Put( OFFB_EIP, mkU32(guest_EIP_bbstart + delta) ) );
+            dres.whatNext    = Dis_StopHere;
+            dres.jk_StopHere = Ijk_InvalICache;
+            goto decode_success;
+         }
+         /* We don't know what it is. */
+         goto decode_failure;
+         /*NOTREACHED*/
+      }
+   }
+
+   /* Handle a couple of weird-ass NOPs that have been observed in the
+      wild. */
+   {
+      const UChar* code = guest_code + delta;
+      /* Sun's JVM 1.5.0 uses the following as a NOP:
+         26 2E 64 65 90  %es:%cs:%fs:%gs:nop */
+      if (code[0] == 0x26 && code[1] == 0x2E && code[2] == 0x64 
+          && code[3] == 0x65 && code[4] == 0x90) {
+         DIP("%%es:%%cs:%%fs:%%gs:nop\n");
+         delta += 5;
+         goto decode_success;
+      }
+      /* Don't barf on recent binutils padding,
+         all variants of which are: nopw %cs:0x0(%eax,%eax,1)
+         66 2e 0f 1f 84 00 00 00 00 00
+         66 66 2e 0f 1f 84 00 00 00 00 00
+         66 66 66 2e 0f 1f 84 00 00 00 00 00
+         66 66 66 66 2e 0f 1f 84 00 00 00 00 00
+         66 66 66 66 66 2e 0f 1f 84 00 00 00 00 00
+         66 66 66 66 66 66 2e 0f 1f 84 00 00 00 00 00
+      */
+      if (code[0] == 0x66) {
+         Int data16_cnt;
+         for (data16_cnt = 1; data16_cnt < 6; data16_cnt++)
+            if (code[data16_cnt] != 0x66)
+               break;
+         if (code[data16_cnt] == 0x2E && code[data16_cnt + 1] == 0x0F
+             && code[data16_cnt + 2] == 0x1F && code[data16_cnt + 3] == 0x84
+             && code[data16_cnt + 4] == 0x00 && code[data16_cnt + 5] == 0x00
+             && code[data16_cnt + 6] == 0x00 && code[data16_cnt + 7] == 0x00
+             && code[data16_cnt + 8] == 0x00 ) {
+            DIP("nopw %%cs:0x0(%%eax,%%eax,1)\n");
+            delta += 9 + data16_cnt;
+            goto decode_success;
+         }
+      }
+   }       
+
+   /* Normal instruction handling starts here. */
+
+   /* Deal with some but not all prefixes: 
+         66(oso)
+         F0(lock)
+         2E(cs:) 3E(ds:) 26(es:) 64(fs:) 65(gs:) 36(ss:)
+      Not dealt with (left in place):
+         F2 F3
+   */
+   n_prefixes = 0;
+   while (True) {
+      if (n_prefixes > 7) goto decode_failure;
+      pre = getUChar(delta);
+      switch (pre) {
+         case 0x66: 
+            sz = 2;
+            break;
+         case 0xF0: 
+            pfx_lock = True; 
+            *expect_CAS = True;
+            break;
+         case 0x3E: /* %DS: */
+         case 0x26: /* %ES: */
+         case 0x64: /* %FS: */
+         case 0x65: /* %GS: */
+            if (sorb != 0) 
+               goto decode_failure; /* only one seg override allowed */
+            sorb = pre;
+            break;
+         case 0x2E: { /* %CS: */
+            /* 2E prefix on a conditional branch instruction is a
+               branch-prediction hint, which can safely be ignored.  */
+            UChar op1 = getIByte(delta+1);
+            UChar op2 = getIByte(delta+2);
+            if ((op1 >= 0x70 && op1 <= 0x7F)
+                || (op1 == 0xE3)
+                || (op1 == 0x0F && op2 >= 0x80 && op2 <= 0x8F)) {
+               if (0) vex_printf("vex x86->IR: ignoring branch hint\n");
+            } else {
+               /* All other CS override cases are not handled */
+               goto decode_failure;
+            }
+            break;
+         }
+         case 0x36: /* %SS: */
+            /* SS override cases are not handled */
+            goto decode_failure;
+         default: 
+            goto not_a_prefix;
+      }
+      n_prefixes++;
+      delta++;
+   }
+
+   not_a_prefix:
+
+   /* Now we should be looking at the primary opcode byte or the
+      leading F2 or F3.  Check that any LOCK prefix is actually
+      allowed. */
+
+   if (pfx_lock) {
+     if (can_be_used_with_LOCK_prefix( &guest_code[delta] )) {
+         DIP("lock ");
+      } else {
+         *expect_CAS = False;
+         goto decode_failure;
+      }
+   }
+
+
+   /* ---------------------------------------------------- */
+   /* --- The SSE decoder.                             --- */
+   /* ---------------------------------------------------- */
+
+   /* What did I do to deserve SSE ?  Perhaps I was really bad in a
+      previous life? */
+
+   /* Note, this doesn't handle SSE2 or SSE3.  That is handled in a
+      later section, further on. */
+
+   insn = &guest_code[delta];
+
+   /* Treat fxsave specially.  It should be doable even on an SSE0
+      (Pentium-II class) CPU.  Hence be prepared to handle it on
+      any subarchitecture variant.
+   */
+
+   /* 0F AE /0 = FXSAVE m512 -- write x87 and SSE state to memory */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xAE
+       && !epartIsReg(insn[2]) && gregOfRM(insn[2]) == 0) {
+      IRDirty* d;
+      modrm = getIByte(delta+2);
+      vassert(sz == 4);
+      vassert(!epartIsReg(modrm));
+
+      addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+      delta += 2+alen;
+      gen_SEGV_if_not_16_aligned(addr);
+
+      DIP("fxsave %s\n", dis_buf);
+
+      /* Uses dirty helper: 
+            void x86g_do_FXSAVE ( VexGuestX86State*, UInt ) */
+      d = unsafeIRDirty_0_N ( 
+             0/*regparms*/, 
+             "x86g_dirtyhelper_FXSAVE", 
+             &x86g_dirtyhelper_FXSAVE,
+             mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+          );
+
+      /* declare we're writing memory */
+      d->mFx   = Ifx_Write;
+      d->mAddr = mkexpr(addr);
+      d->mSize = 464; /* according to recent Intel docs */
+
+      /* declare we're reading guest state */
+      d->nFxState = 7;
+      vex_bzero(&d->fxState, sizeof(d->fxState));
+
+      d->fxState[0].fx     = Ifx_Read;
+      d->fxState[0].offset = OFFB_FTOP;
+      d->fxState[0].size   = sizeof(UInt);
+
+      d->fxState[1].fx     = Ifx_Read;
+      d->fxState[1].offset = OFFB_FPREGS;
+      d->fxState[1].size   = 8 * sizeof(ULong);
+
+      d->fxState[2].fx     = Ifx_Read;
+      d->fxState[2].offset = OFFB_FPTAGS;
+      d->fxState[2].size   = 8 * sizeof(UChar);
+
+      d->fxState[3].fx     = Ifx_Read;
+      d->fxState[3].offset = OFFB_FPROUND;
+      d->fxState[3].size   = sizeof(UInt);
+
+      d->fxState[4].fx     = Ifx_Read;
+      d->fxState[4].offset = OFFB_FC3210;
+      d->fxState[4].size   = sizeof(UInt);
+
+      d->fxState[5].fx     = Ifx_Read;
+      d->fxState[5].offset = OFFB_XMM0;
+      d->fxState[5].size   = 8 * sizeof(U128);
+
+      d->fxState[6].fx     = Ifx_Read;
+      d->fxState[6].offset = OFFB_SSEROUND;
+      d->fxState[6].size   = sizeof(UInt);
+
+      /* Be paranoid ... this assertion tries to ensure the 8 %xmm
+	 images are packed back-to-back.  If not, the value of
+	 d->fxState[5].size is wrong. */
+      vassert(16 == sizeof(U128));
+      vassert(OFFB_XMM7 == (OFFB_XMM0 + 7 * 16));
+
+      stmt( IRStmt_Dirty(d) );
+
+      goto decode_success;
+   }
+
+   /* 0F AE /1 = FXRSTOR m512 -- read x87 and SSE state from memory */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xAE
+       && !epartIsReg(insn[2]) && gregOfRM(insn[2]) == 1) {
+      IRDirty* d;
+      modrm = getIByte(delta+2);
+      vassert(sz == 4);
+      vassert(!epartIsReg(modrm));
+
+      addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+      delta += 2+alen;
+      gen_SEGV_if_not_16_aligned(addr);
+
+      DIP("fxrstor %s\n", dis_buf);
+
+      /* Uses dirty helper: 
+            VexEmNote x86g_do_FXRSTOR ( VexGuestX86State*, UInt )
+         NOTE:
+            the VexEmNote value is simply ignored (unlike for FRSTOR)
+      */
+      d = unsafeIRDirty_0_N ( 
+             0/*regparms*/, 
+             "x86g_dirtyhelper_FXRSTOR", 
+             &x86g_dirtyhelper_FXRSTOR,
+             mkIRExprVec_2( IRExpr_BBPTR(), mkexpr(addr) )
+          );
+
+      /* declare we're reading memory */
+      d->mFx   = Ifx_Read;
+      d->mAddr = mkexpr(addr);
+      d->mSize = 464; /* according to recent Intel docs */
+
+      /* declare we're writing guest state */
+      d->nFxState = 7;
+      vex_bzero(&d->fxState, sizeof(d->fxState));
+
+      d->fxState[0].fx     = Ifx_Write;
+      d->fxState[0].offset = OFFB_FTOP;
+      d->fxState[0].size   = sizeof(UInt);
+
+      d->fxState[1].fx     = Ifx_Write;
+      d->fxState[1].offset = OFFB_FPREGS;
+      d->fxState[1].size   = 8 * sizeof(ULong);
+
+      d->fxState[2].fx     = Ifx_Write;
+      d->fxState[2].offset = OFFB_FPTAGS;
+      d->fxState[2].size   = 8 * sizeof(UChar);
+
+      d->fxState[3].fx     = Ifx_Write;
+      d->fxState[3].offset = OFFB_FPROUND;
+      d->fxState[3].size   = sizeof(UInt);
+
+      d->fxState[4].fx     = Ifx_Write;
+      d->fxState[4].offset = OFFB_FC3210;
+      d->fxState[4].size   = sizeof(UInt);
+
+      d->fxState[5].fx     = Ifx_Write;
+      d->fxState[5].offset = OFFB_XMM0;
+      d->fxState[5].size   = 8 * sizeof(U128);
+
+      d->fxState[6].fx     = Ifx_Write;
+      d->fxState[6].offset = OFFB_SSEROUND;
+      d->fxState[6].size   = sizeof(UInt);
+
+      /* Be paranoid ... this assertion tries to ensure the 8 %xmm
+	 images are packed back-to-back.  If not, the value of
+	 d->fxState[5].size is wrong. */
+      vassert(16 == sizeof(U128));
+      vassert(OFFB_XMM7 == (OFFB_XMM0 + 7 * 16));
+
+      stmt( IRStmt_Dirty(d) );
+
+      goto decode_success;
+   }
+
+   /* ------ SSE decoder main ------ */
+
+   /* Skip parts of the decoder which don't apply given the stated
+      guest subarchitecture. */
+   if (archinfo->hwcaps == 0/*baseline, no sse at all*/)
+      goto after_sse_decoders;
+
+   /* With mmxext only some extended MMX instructions are recognized.
+      The mmxext instructions are MASKMOVQ MOVNTQ PAVGB PAVGW PMAXSW
+      PMAXUB PMINSW PMINUB PMULHUW PSADBW PSHUFW PEXTRW PINSRW PMOVMSKB
+      PREFETCHNTA PREFETCHT0 PREFETCHT1 PREFETCHT2 SFENCE
+
+      http://support.amd.com/us/Embedded_TechDocs/22466.pdf
+      https://en.wikipedia.org/wiki/3DNow!#3DNow.21_extensions */
+
+   if (archinfo->hwcaps == VEX_HWCAPS_X86_MMXEXT/*integer only sse1 subset*/)
+      goto mmxext;
+
+   /* Otherwise we must be doing sse1 or sse2, so we can at least try
+      for SSE1 here. */
+
+   /* 0F 58 = ADDPS -- add 32Fx4 from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x58) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "addps", Iop_Add32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 58 = ADDSS -- add 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x58) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo32( sorb, delta+3, "addss", Iop_Add32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F 55 = ANDNPS -- G = (not G) and E */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x55) {
+      delta = dis_SSE_E_to_G_all_invG( sorb, delta+2, "andnps", Iop_AndV128 );
+      goto decode_success;
+   }
+
+   /* 0F 54 = ANDPS -- G = G and E */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x54) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "andps", Iop_AndV128 );
+      goto decode_success;
+   }
+
+   /* 0F C2 = CMPPS -- 32Fx4 comparison from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xC2) {
+      delta = dis_SSEcmp_E_to_G( sorb, delta+2, "cmpps", True, 4 );
+      goto decode_success;
+   }
+
+   /* F3 0F C2 = CMPSS -- 32F0x4 comparison from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0xC2) {
+      vassert(sz == 4);
+      delta = dis_SSEcmp_E_to_G( sorb, delta+3, "cmpss", False, 4 );
+      goto decode_success;
+   }
+
+   /* 0F 2F = COMISS  -- 32F0x4 comparison G,E, and set ZCP */
+   /* 0F 2E = UCOMISS -- 32F0x4 comparison G,E, and set ZCP */
+   if (sz == 4 && insn[0] == 0x0F && (insn[1] == 0x2F || insn[1] == 0x2E)) {
+      IRTemp argL = newTemp(Ity_F32);
+      IRTemp argR = newTemp(Ity_F32);
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         assign( argR, getXMMRegLane32F( eregOfRM(modrm), 0/*lowest lane*/ ) );
+         delta += 2+1;
+         DIP("[u]comiss %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)) );
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( argR, loadLE(Ity_F32, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("[u]comiss %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)) );
+      }
+      assign( argL, getXMMRegLane32F( gregOfRM(modrm), 0/*lowest lane*/ ) );
+
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+      stmt( IRStmt_Put( 
+               OFFB_CC_DEP1,
+               binop( Iop_And32,
+                      binop(Iop_CmpF64, 
+                            unop(Iop_F32toF64,mkexpr(argL)),
+                            unop(Iop_F32toF64,mkexpr(argR))),
+                      mkU32(0x45)
+          )));
+      /* Set NDEP even though it isn't used.  This makes redundant-PUT
+         elimination of previous stores to this field work better. */
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+      goto decode_success;
+   }
+
+   /* 0F 2A = CVTPI2PS -- convert 2 x I32 in mem/mmx to 2 x F32 in low
+      half xmm */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x2A) {
+      IRTemp arg64 = newTemp(Ity_I64);
+      IRTemp rmode = newTemp(Ity_I32);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+2);
+      do_MMX_preamble();
+      if (epartIsReg(modrm)) {
+         assign( arg64, getMMXReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("cvtpi2ps %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("cvtpi2ps %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+
+      assign( rmode, get_sse_roundingmode() );
+
+      putXMMRegLane32F( 
+         gregOfRM(modrm), 0,
+         binop(Iop_F64toF32, 
+               mkexpr(rmode),
+               unop(Iop_I32StoF64, 
+                    unop(Iop_64to32, mkexpr(arg64)) )) );
+
+      putXMMRegLane32F(
+         gregOfRM(modrm), 1, 
+         binop(Iop_F64toF32, 
+               mkexpr(rmode),
+               unop(Iop_I32StoF64,
+                    unop(Iop_64HIto32, mkexpr(arg64)) )) );
+
+      goto decode_success;
+   }
+
+   /* F3 0F 2A = CVTSI2SS -- convert I32 in mem/ireg to F32 in low
+      quarter xmm */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x2A) {
+      IRTemp arg32 = newTemp(Ity_I32);
+      IRTemp rmode = newTemp(Ity_I32);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         assign( arg32, getIReg(4, eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("cvtsi2ss %s,%s\n", nameIReg(4, eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign( arg32, loadLE(Ity_I32, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("cvtsi2ss %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+
+      assign( rmode, get_sse_roundingmode() );
+
+      putXMMRegLane32F( 
+         gregOfRM(modrm), 0,
+         binop(Iop_F64toF32,
+               mkexpr(rmode),
+               unop(Iop_I32StoF64, mkexpr(arg32)) ) );
+
+      goto decode_success;
+   }
+
+   /* 0F 2D = CVTPS2PI -- convert 2 x F32 in mem/low half xmm to 2 x
+      I32 in mmx, according to prevailing SSE rounding mode */
+   /* 0F 2C = CVTTPS2PI -- convert 2 x F32 in mem/low half xmm to 2 x
+      I32 in mmx, rounding towards zero */
+   if (sz == 4 && insn[0] == 0x0F && (insn[1] == 0x2D || insn[1] == 0x2C)) {
+      IRTemp dst64  = newTemp(Ity_I64);
+      IRTemp rmode  = newTemp(Ity_I32);
+      IRTemp f32lo  = newTemp(Ity_F32);
+      IRTemp f32hi  = newTemp(Ity_F32);
+      Bool   r2zero = toBool(insn[1] == 0x2C);
+
+      do_MMX_preamble();
+      modrm = getIByte(delta+2);
+
+      if (epartIsReg(modrm)) {
+         delta += 2+1;
+	 assign(f32lo, getXMMRegLane32F(eregOfRM(modrm), 0));
+	 assign(f32hi, getXMMRegLane32F(eregOfRM(modrm), 1));
+         DIP("cvt%sps2pi %s,%s\n", r2zero ? "t" : "",
+                                   nameXMMReg(eregOfRM(modrm)),
+                                   nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign(f32lo, loadLE(Ity_F32, mkexpr(addr)));
+	 assign(f32hi, loadLE(Ity_F32, binop( Iop_Add32, 
+                                              mkexpr(addr), 
+                                              mkU32(4) )));
+         delta += 2+alen;
+         DIP("cvt%sps2pi %s,%s\n", r2zero ? "t" : "",
+                                   dis_buf,
+                                   nameMMXReg(gregOfRM(modrm)));
+      }
+
+      if (r2zero) {
+         assign(rmode, mkU32((UInt)Irrm_ZERO) );
+      } else {
+         assign( rmode, get_sse_roundingmode() );
+      }
+
+      assign( 
+         dst64,
+         binop( Iop_32HLto64,
+                binop( Iop_F64toI32S, 
+                       mkexpr(rmode), 
+                       unop( Iop_F32toF64, mkexpr(f32hi) ) ),
+                binop( Iop_F64toI32S, 
+                       mkexpr(rmode), 
+                       unop( Iop_F32toF64, mkexpr(f32lo) ) )
+              )
+      );
+
+      putMMXReg(gregOfRM(modrm), mkexpr(dst64));
+      goto decode_success;
+   }
+
+   /* F3 0F 2D = CVTSS2SI -- convert F32 in mem/low quarter xmm to
+      I32 in ireg, according to prevailing SSE rounding mode */
+   /* F3 0F 2C = CVTTSS2SI -- convert F32 in mem/low quarter xmm to
+      I32 in ireg, rounding towards zero */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F 
+       && (insn[2] == 0x2D || insn[2] == 0x2C)) {
+      IRTemp rmode = newTemp(Ity_I32);
+      IRTemp f32lo = newTemp(Ity_F32);
+      Bool   r2zero = toBool(insn[2] == 0x2C);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         delta += 3+1;
+	 assign(f32lo, getXMMRegLane32F(eregOfRM(modrm), 0));
+         DIP("cvt%sss2si %s,%s\n", r2zero ? "t" : "",
+                                   nameXMMReg(eregOfRM(modrm)),
+                                   nameIReg(4, gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign(f32lo, loadLE(Ity_F32, mkexpr(addr)));
+         delta += 3+alen;
+         DIP("cvt%sss2si %s,%s\n", r2zero ? "t" : "",
+                                   dis_buf,
+                                   nameIReg(4, gregOfRM(modrm)));
+      }
+
+      if (r2zero) {
+         assign( rmode, mkU32((UInt)Irrm_ZERO) );
+      } else {
+         assign( rmode, get_sse_roundingmode() );
+      }
+
+      putIReg(4, gregOfRM(modrm),
+                 binop( Iop_F64toI32S, 
+                        mkexpr(rmode), 
+                        unop( Iop_F32toF64, mkexpr(f32lo) ) )
+      );
+
+      goto decode_success;
+   }
+
+   /* 0F 5E = DIVPS -- div 32Fx4 from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x5E) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "divps", Iop_Div32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 5E = DIVSS -- div 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x5E) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo32( sorb, delta+3, "divss", Iop_Div32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F AE /2 = LDMXCSR m32 -- load %mxcsr */
+   if (insn[0] == 0x0F && insn[1] == 0xAE
+       && !epartIsReg(insn[2]) && gregOfRM(insn[2]) == 2) {
+
+      IRTemp t64 = newTemp(Ity_I64);
+      IRTemp ew = newTemp(Ity_I32);
+
+      modrm = getIByte(delta+2);
+      vassert(!epartIsReg(modrm));
+      vassert(sz == 4);
+
+      addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+      delta += 2+alen;
+      DIP("ldmxcsr %s\n", dis_buf);
+
+      /* The only thing we observe in %mxcsr is the rounding mode.
+         Therefore, pass the 32-bit value (SSE native-format control
+         word) to a clean helper, getting back a 64-bit value, the
+         lower half of which is the SSEROUND value to store, and the
+         upper half of which is the emulation-warning token which may
+         be generated.  
+      */
+      /* ULong x86h_check_ldmxcsr ( UInt ); */
+      assign( t64, mkIRExprCCall(
+                      Ity_I64, 0/*regparms*/, 
+                      "x86g_check_ldmxcsr",
+                      &x86g_check_ldmxcsr, 
+                      mkIRExprVec_1( loadLE(Ity_I32, mkexpr(addr)) )
+                   )
+            );
+
+      put_sse_roundingmode( unop(Iop_64to32, mkexpr(t64)) );
+      assign( ew, unop(Iop_64HIto32, mkexpr(t64) ) );
+      put_emwarn( mkexpr(ew) );
+      /* Finally, if an emulation warning was reported, side-exit to
+         the next insn, reporting the warning, so that Valgrind's
+         dispatcher sees the warning. */
+      stmt( 
+         IRStmt_Exit(
+            binop(Iop_CmpNE32, mkexpr(ew), mkU32(0)),
+            Ijk_EmWarn,
+            IRConst_U32( ((Addr32)guest_EIP_bbstart)+delta),
+            OFFB_EIP
+         )
+      );
+      goto decode_success;
+   }
+
+
+   /* mmxext sse1 subset starts here. mmxext only arches will parse
+      only this subset of the sse1 instructions. */
+  mmxext:
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F F7 = MASKMOVQ -- 8x8 masked store */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xF7) {
+      Bool ok = False;
+      delta = dis_MMX( &ok, sorb, sz, delta+1 );
+      if (!ok)
+         goto decode_failure;
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F E7 = MOVNTQ -- for us, just a plain MMX store.  Note, the
+      Intel manual does not say anything about the usual business of
+      the FP reg tags getting trashed whenever an MMX insn happens.
+      So we just leave them alone. 
+   */
+   if (insn[0] == 0x0F && insn[1] == 0xE7) {
+      modrm = getIByte(delta+2);
+      if (sz == 4 && !epartIsReg(modrm)) {
+         /* do_MMX_preamble(); Intel docs don't specify this */
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         storeLE( mkexpr(addr), getMMXReg(gregOfRM(modrm)) );
+         DIP("movntq %s,%s\n", dis_buf,
+                               nameMMXReg(gregOfRM(modrm)));
+         delta += 2+alen;
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F E0 = PAVGB -- 8x8 unsigned Packed Average, with rounding */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xE0) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "pavgb", False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F E3 = PAVGW -- 16x4 unsigned Packed Average, with rounding */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xE3) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "pavgw", False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F C5 = PEXTRW -- extract 16-bit field from mmx(E) and put 
+      zero-extend of it in ireg(G). */
+   if (insn[0] == 0x0F && insn[1] == 0xC5) {
+      modrm = insn[2];
+      if (sz == 4 && epartIsReg(modrm)) {
+         IRTemp sV = newTemp(Ity_I64);
+         t5 = newTemp(Ity_I16);
+         do_MMX_preamble();
+         assign(sV, getMMXReg(eregOfRM(modrm)));
+         breakup64to16s( sV, &t3, &t2, &t1, &t0 );
+         switch (insn[3] & 3) {
+            case 0:  assign(t5, mkexpr(t0)); break;
+            case 1:  assign(t5, mkexpr(t1)); break;
+            case 2:  assign(t5, mkexpr(t2)); break;
+            case 3:  assign(t5, mkexpr(t3)); break;
+            default: vassert(0); /*NOTREACHED*/
+         }
+         putIReg(4, gregOfRM(modrm), unop(Iop_16Uto32, mkexpr(t5)));
+         DIP("pextrw $%d,%s,%s\n",
+             (Int)insn[3], nameMMXReg(eregOfRM(modrm)),
+                           nameIReg(4,gregOfRM(modrm)));
+         delta += 4;
+         goto decode_success;
+      } 
+      /* else fall through */
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F C4 = PINSRW -- get 16 bits from E(mem or low half ireg) and
+      put it into the specified lane of mmx(G). */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xC4) {
+      /* Use t0 .. t3 to hold the 4 original 16-bit lanes of the
+         mmx reg.  t4 is the new lane value.  t5 is the original
+         mmx value. t6 is the new mmx value. */
+      Int lane;
+      t4 = newTemp(Ity_I16);
+      t5 = newTemp(Ity_I64);
+      t6 = newTemp(Ity_I64);
+      modrm = insn[2];
+      do_MMX_preamble();
+
+      assign(t5, getMMXReg(gregOfRM(modrm)));
+      breakup64to16s( t5, &t3, &t2, &t1, &t0 );
+
+      if (epartIsReg(modrm)) {
+         assign(t4, getIReg(2, eregOfRM(modrm)));
+         delta += 3+1;
+         lane = insn[3+1-1];
+         DIP("pinsrw $%d,%s,%s\n", (Int)lane, 
+                                   nameIReg(2,eregOfRM(modrm)),
+                                   nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         delta += 3+alen;
+         lane = insn[3+alen-1];
+         assign(t4, loadLE(Ity_I16, mkexpr(addr)));
+         DIP("pinsrw $%d,%s,%s\n", (Int)lane, 
+                                   dis_buf,
+                                   nameMMXReg(gregOfRM(modrm)));
+      }
+
+      switch (lane & 3) {
+         case 0:  assign(t6, mk64from16s(t3,t2,t1,t4)); break;
+         case 1:  assign(t6, mk64from16s(t3,t2,t4,t0)); break;
+         case 2:  assign(t6, mk64from16s(t3,t4,t1,t0)); break;
+         case 3:  assign(t6, mk64from16s(t4,t2,t1,t0)); break;
+         default: vassert(0); /*NOTREACHED*/
+      }
+      putMMXReg(gregOfRM(modrm), mkexpr(t6));
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F EE = PMAXSW -- 16x4 signed max */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xEE) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "pmaxsw", False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F DE = PMAXUB -- 8x8 unsigned max */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xDE) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "pmaxub", False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F EA = PMINSW -- 16x4 signed min */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xEA) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "pminsw", False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F DA = PMINUB -- 8x8 unsigned min */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xDA) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "pminub", False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F D7 = PMOVMSKB -- extract sign bits from each of 8 lanes in
+      mmx(E), turn them into a byte, and put zero-extend of it in
+      ireg(G). */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xD7) {
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         do_MMX_preamble();
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I32);
+         assign(t0, getMMXReg(eregOfRM(modrm)));
+         assign(t1, unop(Iop_8Uto32, unop(Iop_GetMSBs8x8, mkexpr(t0))));
+         putIReg(4, gregOfRM(modrm), mkexpr(t1));
+         DIP("pmovmskb %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                                 nameIReg(4,gregOfRM(modrm)));
+         delta += 3;
+         goto decode_success;
+      } 
+      /* else fall through */
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F E4 = PMULUH -- 16x4 hi-half of unsigned widening multiply */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xE4) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "pmuluh", False );
+      goto decode_success;
+   }
+
+   /* 0F 18 /0 = PREFETCHNTA -- prefetch into caches, */
+   /* 0F 18 /1 = PREFETCH0   -- with various different hints */
+   /* 0F 18 /2 = PREFETCH1 */
+   /* 0F 18 /3 = PREFETCH2 */
+   if (insn[0] == 0x0F && insn[1] == 0x18
+       && !epartIsReg(insn[2]) 
+       && gregOfRM(insn[2]) >= 0 && gregOfRM(insn[2]) <= 3) {
+      const HChar* hintstr = "??";
+
+      modrm = getIByte(delta+2);
+      vassert(!epartIsReg(modrm));
+
+      addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+      delta += 2+alen;
+
+      switch (gregOfRM(modrm)) {
+         case 0: hintstr = "nta"; break;
+         case 1: hintstr = "t0"; break;
+         case 2: hintstr = "t1"; break;
+         case 3: hintstr = "t2"; break;
+         default: vassert(0); /*NOTREACHED*/
+      }
+
+      DIP("prefetch%s %s\n", hintstr, dis_buf);
+      goto decode_success;
+   }
+
+   /* 0F 0D /0 = PREFETCH  m8 -- 3DNow! prefetch */
+   /* 0F 0D /1 = PREFETCHW m8 -- ditto, with some other hint */
+   if (insn[0] == 0x0F && insn[1] == 0x0D
+       && !epartIsReg(insn[2]) 
+       && gregOfRM(insn[2]) >= 0 && gregOfRM(insn[2]) <= 1) {
+      const HChar* hintstr = "??";
+
+      modrm = getIByte(delta+2);
+      vassert(!epartIsReg(modrm));
+
+      addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+      delta += 2+alen;
+
+      switch (gregOfRM(modrm)) {
+         case 0: hintstr = ""; break;
+         case 1: hintstr = "w"; break;
+         default: vassert(0); /*NOTREACHED*/
+      }
+
+      DIP("prefetch%s %s\n", hintstr, dis_buf);
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F F6 = PSADBW -- sum of 8Ux8 absolute differences */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xF6) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                 sorb, delta+2, insn[1], "psadbw", False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE1 ---*** */
+   /* 0F 70 = PSHUFW -- rearrange 4x16 from E(mmx or mem) to G(mmx) */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x70) {
+      Int order;
+      IRTemp sV, dV, s3, s2, s1, s0;
+      s3 = s2 = s1 = s0 = IRTemp_INVALID;
+      sV = newTemp(Ity_I64);
+      dV = newTemp(Ity_I64);
+      do_MMX_preamble();
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         order = (Int)insn[3];
+         delta += 2+2;
+         DIP("pshufw $%d,%s,%s\n", order, 
+                                   nameMMXReg(eregOfRM(modrm)),
+                                   nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+	 order = (Int)insn[2+alen];
+         delta += 3+alen;
+         DIP("pshufw $%d,%s,%s\n", order, 
+                                   dis_buf,
+                                   nameMMXReg(gregOfRM(modrm)));
+      }
+      breakup64to16s( sV, &s3, &s2, &s1, &s0 );
+
+#     define SEL(n) \
+                ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+      assign(dV,
+	     mk64from16s( SEL((order>>6)&3), SEL((order>>4)&3),
+                          SEL((order>>2)&3), SEL((order>>0)&3) )
+      );
+      putMMXReg(gregOfRM(modrm), mkexpr(dV));
+#     undef SEL
+      goto decode_success;
+   }
+
+   /* 0F AE /7 = SFENCE -- flush pending operations to memory */
+   if (insn[0] == 0x0F && insn[1] == 0xAE
+       && epartIsReg(insn[2]) && gregOfRM(insn[2]) == 7) {
+      vassert(sz == 4);
+      delta += 3;
+      /* Insert a memory fence.  It's sometimes important that these
+         are carried through to the generated code. */
+      stmt( IRStmt_MBE(Imbe_Fence) );
+      DIP("sfence\n");
+      goto decode_success;
+   }
+
+   /* End of mmxext sse1 subset. No more sse parsing for mmxext only arches. */
+   if (archinfo->hwcaps == VEX_HWCAPS_X86_MMXEXT/*integer only sse1 subset*/)
+      goto after_sse_decoders;
+
+
+   /* 0F 5F = MAXPS -- max 32Fx4 from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x5F) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "maxps", Iop_Max32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 5F = MAXSS -- max 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x5F) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo32( sorb, delta+3, "maxss", Iop_Max32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F 5D = MINPS -- min 32Fx4 from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x5D) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "minps", Iop_Min32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 5D = MINSS -- min 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x5D) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo32( sorb, delta+3, "minss", Iop_Min32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F 28 = MOVAPS -- move from E (mem or xmm) to G (xmm). */
+   /* 0F 10 = MOVUPS -- move from E (mem or xmm) to G (xmm). */
+   if (sz == 4 && insn[0] == 0x0F && (insn[1] == 0x28 || insn[1] == 0x10)) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         putXMMReg( gregOfRM(modrm), 
+                    getXMMReg( eregOfRM(modrm) ));
+         DIP("mov[ua]ps %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+         delta += 2+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         if (insn[1] == 0x28/*movaps*/)
+            gen_SEGV_if_not_16_aligned( addr );
+         putXMMReg( gregOfRM(modrm), 
+                    loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("mov[ua]ps %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)));
+         delta += 2+alen;
+      }
+      goto decode_success;
+   }
+
+   /* 0F 29 = MOVAPS -- move from G (xmm) to E (mem or xmm). */
+   /* 0F 11 = MOVUPS -- move from G (xmm) to E (mem or xmm). */
+   if (sz == 4 && insn[0] == 0x0F 
+       && (insn[1] == 0x29 || insn[1] == 0x11)) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         /* fall through; awaiting test case */
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         if (insn[1] == 0x29/*movaps*/)
+            gen_SEGV_if_not_16_aligned( addr );
+         storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
+         DIP("mov[ua]ps %s,%s\n", nameXMMReg(gregOfRM(modrm)),
+                                  dis_buf );
+         delta += 2+alen;
+         goto decode_success;
+      }
+   }
+
+   /* 0F 16 = MOVHPS -- move from mem to high half of XMM. */
+   /* 0F 16 = MOVLHPS -- move from lo half to hi half of XMM. */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x16) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         delta += 2+1;
+         putXMMRegLane64( gregOfRM(modrm), 1/*upper lane*/,
+                          getXMMRegLane64( eregOfRM(modrm), 0 ) );
+         DIP("movhps %s,%s\n", nameXMMReg(eregOfRM(modrm)), 
+                               nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         delta += 2+alen;
+         putXMMRegLane64( gregOfRM(modrm), 1/*upper lane*/,
+                          loadLE(Ity_I64, mkexpr(addr)) );
+         DIP("movhps %s,%s\n", dis_buf, 
+                               nameXMMReg( gregOfRM(modrm) ));
+      }
+      goto decode_success;
+   }
+
+   /* 0F 17 = MOVHPS -- move from high half of XMM to mem. */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x17) {
+      if (!epartIsReg(insn[2])) {
+         delta += 2;
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         delta += alen;
+         storeLE( mkexpr(addr), 
+                  getXMMRegLane64( gregOfRM(insn[2]),
+                                   1/*upper lane*/ ) );
+         DIP("movhps %s,%s\n", nameXMMReg( gregOfRM(insn[2]) ),
+                               dis_buf);
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 0F 12 = MOVLPS -- move from mem to low half of XMM. */
+   /* OF 12 = MOVHLPS -- from from hi half to lo half of XMM. */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x12) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         delta += 2+1;
+         putXMMRegLane64( gregOfRM(modrm),  
+                          0/*lower lane*/,
+                          getXMMRegLane64( eregOfRM(modrm), 1 ));
+         DIP("movhlps %s, %s\n", nameXMMReg(eregOfRM(modrm)), 
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         delta += 2+alen;
+         putXMMRegLane64( gregOfRM(modrm),  0/*lower lane*/,
+                          loadLE(Ity_I64, mkexpr(addr)) );
+         DIP("movlps %s, %s\n", 
+             dis_buf, nameXMMReg( gregOfRM(modrm) ));
+      }
+      goto decode_success;
+   }
+
+   /* 0F 13 = MOVLPS -- move from low half of XMM to mem. */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x13) {
+      if (!epartIsReg(insn[2])) {
+         delta += 2;
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         delta += alen;
+         storeLE( mkexpr(addr), 
+                  getXMMRegLane64( gregOfRM(insn[2]), 
+                                   0/*lower lane*/ ) );
+         DIP("movlps %s, %s\n", nameXMMReg( gregOfRM(insn[2]) ),
+                                dis_buf);
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 0F 50 = MOVMSKPS - move 4 sign bits from 4 x F32 in xmm(E)
+      to 4 lowest bits of ireg(G) */
+   if (insn[0] == 0x0F && insn[1] == 0x50) {
+      modrm = getIByte(delta+2);
+      if (sz == 4 && epartIsReg(modrm)) {
+         Int src;
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         t2 = newTemp(Ity_I32);
+         t3 = newTemp(Ity_I32);
+         delta += 2+1;
+         src = eregOfRM(modrm);
+         assign( t0, binop( Iop_And32,
+                            binop(Iop_Shr32, getXMMRegLane32(src,0), mkU8(31)),
+                            mkU32(1) ));
+         assign( t1, binop( Iop_And32,
+                            binop(Iop_Shr32, getXMMRegLane32(src,1), mkU8(30)),
+                            mkU32(2) ));
+         assign( t2, binop( Iop_And32,
+                            binop(Iop_Shr32, getXMMRegLane32(src,2), mkU8(29)),
+                            mkU32(4) ));
+         assign( t3, binop( Iop_And32,
+                            binop(Iop_Shr32, getXMMRegLane32(src,3), mkU8(28)),
+                            mkU32(8) ));
+         putIReg(4, gregOfRM(modrm),
+                    binop(Iop_Or32,
+                          binop(Iop_Or32, mkexpr(t0), mkexpr(t1)),
+                          binop(Iop_Or32, mkexpr(t2), mkexpr(t3))
+                         )
+                 );
+         DIP("movmskps %s,%s\n", nameXMMReg(src), 
+                                 nameIReg(4, gregOfRM(modrm)));
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 0F 2B = MOVNTPS -- for us, just a plain SSE store. */
+   /* 66 0F 2B = MOVNTPD -- for us, just a plain SSE store. */
+   if (insn[0] == 0x0F && insn[1] == 0x2B) {
+      modrm = getIByte(delta+2);
+      if (!epartIsReg(modrm)) {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
+         DIP("movntp%s %s,%s\n", sz==2 ? "d" : "s",
+                                 dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)));
+         delta += 2+alen;
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* F3 0F 10 = MOVSS -- move 32 bits from E (mem or lo 1/4 xmm) to G
+      (lo 1/4 xmm).  If E is mem, upper 3/4 of G is zeroed out. */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x10) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         putXMMRegLane32( gregOfRM(modrm), 0,
+                          getXMMRegLane32( eregOfRM(modrm), 0 ));
+         DIP("movss %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                              nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         /* zero bits 127:64 */
+         putXMMRegLane64( gregOfRM(modrm), 1, mkU64(0) ); 
+         /* zero bits 63:32 */
+         putXMMRegLane32( gregOfRM(modrm), 1, mkU32(0) ); 
+         /* write bits 31:0 */
+         putXMMRegLane32( gregOfRM(modrm), 0,
+                          loadLE(Ity_I32, mkexpr(addr)) );
+         DIP("movss %s,%s\n", dis_buf,
+                              nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+      goto decode_success;
+   }
+
+   /* F3 0F 11 = MOVSS -- move 32 bits from G (lo 1/4 xmm) to E (mem
+      or lo 1/4 xmm). */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x11) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         /* fall through, we don't yet have a test case */
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         storeLE( mkexpr(addr),
+                  getXMMRegLane32(gregOfRM(modrm), 0) );
+         DIP("movss %s,%s\n", nameXMMReg(gregOfRM(modrm)),
+                              dis_buf);
+         delta += 3+alen;
+         goto decode_success;
+      }
+   }
+
+   /* 0F 59 = MULPS -- mul 32Fx4 from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x59) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "mulps", Iop_Mul32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 59 = MULSS -- mul 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x59) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo32( sorb, delta+3, "mulss", Iop_Mul32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F 56 = ORPS -- G = G and E */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x56) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "orps", Iop_OrV128 );
+      goto decode_success;
+   }
+
+   /* 0F 53 = RCPPS -- approx reciprocal 32Fx4 from R/M to R */
+   if (insn[0] == 0x0F && insn[1] == 0x53) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_unary_all( sorb, delta+2, 
+                                        "rcpps", Iop_RecipEst32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 53 = RCPSS -- approx reciprocal 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x53) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_unary_lo32( sorb, delta+3, 
+                                         "rcpss", Iop_RecipEst32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F 52 = RSQRTPS -- approx reciprocal sqrt 32Fx4 from R/M to R */
+   if (insn[0] == 0x0F && insn[1] == 0x52) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_unary_all( sorb, delta+2, 
+                                        "rsqrtps", Iop_RSqrtEst32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 52 = RSQRTSS -- approx reciprocal sqrt 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x52) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_unary_lo32( sorb, delta+3, 
+                                         "rsqrtss", Iop_RSqrtEst32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F C6 /r ib = SHUFPS -- shuffle packed F32s */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xC6) {
+      Int    select;
+      IRTemp sV, dV;
+      IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+      sV = newTemp(Ity_V128);
+      dV = newTemp(Ity_V128);
+      s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+      modrm = insn[2];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         select = (Int)insn[3];
+         delta += 2+2;
+         DIP("shufps $%d,%s,%s\n", select, 
+                                   nameXMMReg(eregOfRM(modrm)),
+                                   nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         select = (Int)insn[2+alen];
+         delta += 3+alen;
+         DIP("shufps $%d,%s,%s\n", select, 
+                                   dis_buf,
+                                   nameXMMReg(gregOfRM(modrm)));
+      }
+
+      breakup128to32s( dV, &d3, &d2, &d1, &d0 );
+      breakup128to32s( sV, &s3, &s2, &s1, &s0 );
+
+#     define SELD(n) ((n)==0 ? d0 : ((n)==1 ? d1 : ((n)==2 ? d2 : d3)))
+#     define SELS(n) ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+
+      putXMMReg(
+         gregOfRM(modrm), 
+         mk128from32s( SELS((select>>6)&3), SELS((select>>4)&3), 
+                       SELD((select>>2)&3), SELD((select>>0)&3) )
+      );
+
+#     undef SELD
+#     undef SELS
+
+      goto decode_success;
+   }
+
+   /* 0F 51 = SQRTPS -- approx sqrt 32Fx4 from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x51) {
+      delta = dis_SSE_E_to_G_unary_all( sorb, delta+2, 
+                                        "sqrtps", Iop_Sqrt32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 51 = SQRTSS -- approx sqrt 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x51) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_unary_lo32( sorb, delta+3, 
+                                         "sqrtss", Iop_Sqrt32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F AE /3 = STMXCSR m32 -- store %mxcsr */
+   if (insn[0] == 0x0F && insn[1] == 0xAE
+       && !epartIsReg(insn[2]) && gregOfRM(insn[2]) == 3) {
+      modrm = getIByte(delta+2);
+      vassert(sz == 4);
+      vassert(!epartIsReg(modrm));
+
+      addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+      delta += 2+alen;
+
+      /* Fake up a native SSE mxcsr word.  The only thing it depends
+         on is SSEROUND[1:0], so call a clean helper to cook it up. 
+      */
+      /* UInt x86h_create_mxcsr ( UInt sseround ) */
+      DIP("stmxcsr %s\n", dis_buf);
+      storeLE( mkexpr(addr), 
+               mkIRExprCCall(
+                  Ity_I32, 0/*regp*/,
+                  "x86g_create_mxcsr", &x86g_create_mxcsr, 
+                  mkIRExprVec_1( get_sse_roundingmode() ) 
+               ) 
+             );
+      goto decode_success;
+   }
+
+   /* 0F 5C = SUBPS -- sub 32Fx4 from R/M to R */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x5C) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "subps", Iop_Sub32Fx4 );
+      goto decode_success;
+   }
+
+   /* F3 0F 5C = SUBSS -- sub 32F0x4 from R/M to R */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x5C) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo32( sorb, delta+3, "subss", Iop_Sub32F0x4 );
+      goto decode_success;
+   }
+
+   /* 0F 15 = UNPCKHPS -- unpack and interleave high part F32s */
+   /* 0F 14 = UNPCKLPS -- unpack and interleave low part F32s */
+   /* These just appear to be special cases of SHUFPS */
+   if (sz == 4 && insn[0] == 0x0F && (insn[1] == 0x15 || insn[1] == 0x14)) {
+      IRTemp sV, dV;
+      IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+      Bool hi = toBool(insn[1] == 0x15);
+      sV = newTemp(Ity_V128);
+      dV = newTemp(Ity_V128);
+      s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+      modrm = insn[2];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                                  nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                                  dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)));
+      }
+
+      breakup128to32s( dV, &d3, &d2, &d1, &d0 );
+      breakup128to32s( sV, &s3, &s2, &s1, &s0 );
+
+      if (hi) {
+         putXMMReg( gregOfRM(modrm), mk128from32s( s3, d3, s2, d2 ) );
+      } else {
+         putXMMReg( gregOfRM(modrm), mk128from32s( s1, d1, s0, d0 ) );
+      }
+
+      goto decode_success;
+   }
+
+   /* 0F 57 = XORPS -- G = G and E */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x57) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "xorps", Iop_XorV128 );
+      goto decode_success;
+   }
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE decoder.                      --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSE2 decoder.                   --- */
+   /* ---------------------------------------------------- */
+
+   /* Skip parts of the decoder which don't apply given the stated
+      guest subarchitecture. */
+   if (0 == (archinfo->hwcaps & VEX_HWCAPS_X86_SSE2))
+      goto after_sse_decoders; /* no SSE2 capabilities */
+
+   insn = &guest_code[delta];
+
+   /* 66 0F 58 = ADDPD -- add 32Fx4 from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x58) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "addpd", Iop_Add64Fx2 );
+      goto decode_success;
+   }
+ 
+   /* F2 0F 58 = ADDSD -- add 64F0x2 from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x58) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo64( sorb, delta+3, "addsd", Iop_Add64F0x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 55 = ANDNPD -- G = (not G) and E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x55) {
+      delta = dis_SSE_E_to_G_all_invG( sorb, delta+2, "andnpd", Iop_AndV128 );
+      goto decode_success;
+   }
+
+   /* 66 0F 54 = ANDPD -- G = G and E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x54) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "andpd", Iop_AndV128 );
+      goto decode_success;
+   }
+
+   /* 66 0F C2 = CMPPD -- 64Fx2 comparison from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xC2) {
+      delta = dis_SSEcmp_E_to_G( sorb, delta+2, "cmppd", True, 8 );
+      goto decode_success;
+   }
+
+   /* F2 0F C2 = CMPSD -- 64F0x2 comparison from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0xC2) {
+      vassert(sz == 4);
+      delta = dis_SSEcmp_E_to_G( sorb, delta+3, "cmpsd", False, 8 );
+      goto decode_success;
+   }
+
+   /* 66 0F 2F = COMISD  -- 64F0x2 comparison G,E, and set ZCP */
+   /* 66 0F 2E = UCOMISD -- 64F0x2 comparison G,E, and set ZCP */
+   if (sz == 2 && insn[0] == 0x0F && (insn[1] == 0x2F || insn[1] == 0x2E)) {
+      IRTemp argL = newTemp(Ity_F64);
+      IRTemp argR = newTemp(Ity_F64);
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         assign( argR, getXMMRegLane64F( eregOfRM(modrm), 0/*lowest lane*/ ) );
+         delta += 2+1;
+         DIP("[u]comisd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)) );
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( argR, loadLE(Ity_F64, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("[u]comisd %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)) );
+      }
+      assign( argL, getXMMRegLane64F( gregOfRM(modrm), 0/*lowest lane*/ ) );
+
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+      stmt( IRStmt_Put( 
+               OFFB_CC_DEP1,
+               binop( Iop_And32,
+                      binop(Iop_CmpF64, mkexpr(argL), mkexpr(argR)),
+                      mkU32(0x45)
+          )));
+      /* Set NDEP even though it isn't used.  This makes redundant-PUT
+         elimination of previous stores to this field work better. */
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+      goto decode_success;
+   }
+
+   /* F3 0F E6 = CVTDQ2PD -- convert 2 x I32 in mem/lo half xmm to 2 x
+      F64 in xmm(G) */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0xE6) {
+      IRTemp arg64 = newTemp(Ity_I64);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         assign( arg64, getXMMRegLane64(eregOfRM(modrm), 0) );
+         delta += 3+1;
+         DIP("cvtdq2pd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("cvtdq2pd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+
+      putXMMRegLane64F( 
+         gregOfRM(modrm), 0,
+         unop(Iop_I32StoF64, unop(Iop_64to32, mkexpr(arg64)))
+      );
+
+      putXMMRegLane64F(
+         gregOfRM(modrm), 1, 
+         unop(Iop_I32StoF64, unop(Iop_64HIto32, mkexpr(arg64)))
+      );
+
+      goto decode_success;
+   }
+
+   /* 0F 5B = CVTDQ2PS -- convert 4 x I32 in mem/xmm to 4 x F32 in
+      xmm(G) */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x5B) {
+      IRTemp argV  = newTemp(Ity_V128);
+      IRTemp rmode = newTemp(Ity_I32);
+
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         assign( argV, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("cvtdq2ps %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("cvtdq2ps %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+         
+      assign( rmode, get_sse_roundingmode() );
+      breakup128to32s( argV, &t3, &t2, &t1, &t0 );
+
+#     define CVT(_t)  binop( Iop_F64toF32,                    \
+                             mkexpr(rmode),                   \
+                             unop(Iop_I32StoF64,mkexpr(_t)))
+      
+      putXMMRegLane32F( gregOfRM(modrm), 3, CVT(t3) );
+      putXMMRegLane32F( gregOfRM(modrm), 2, CVT(t2) );
+      putXMMRegLane32F( gregOfRM(modrm), 1, CVT(t1) );
+      putXMMRegLane32F( gregOfRM(modrm), 0, CVT(t0) );
+
+#     undef CVT
+
+      goto decode_success;
+   }
+
+   /* F2 0F E6 = CVTPD2DQ -- convert 2 x F64 in mem/xmm to 2 x I32 in
+      lo half xmm(G), and zero upper half */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0xE6) {
+      IRTemp argV  = newTemp(Ity_V128);
+      IRTemp rmode = newTemp(Ity_I32);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         assign( argV, getXMMReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("cvtpd2dq %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("cvtpd2dq %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+         
+      assign( rmode, get_sse_roundingmode() );
+      t0 = newTemp(Ity_F64);
+      t1 = newTemp(Ity_F64);
+      assign( t0, unop(Iop_ReinterpI64asF64, 
+                       unop(Iop_V128to64, mkexpr(argV))) );
+      assign( t1, unop(Iop_ReinterpI64asF64, 
+                       unop(Iop_V128HIto64, mkexpr(argV))) );
+      
+#     define CVT(_t)  binop( Iop_F64toI32S,                   \
+                             mkexpr(rmode),                   \
+                             mkexpr(_t) )
+      
+      putXMMRegLane32( gregOfRM(modrm), 3, mkU32(0) );
+      putXMMRegLane32( gregOfRM(modrm), 2, mkU32(0) );
+      putXMMRegLane32( gregOfRM(modrm), 1, CVT(t1) );
+      putXMMRegLane32( gregOfRM(modrm), 0, CVT(t0) );
+
+#     undef CVT
+
+      goto decode_success;
+   }
+
+   /* 66 0F 2D = CVTPD2PI -- convert 2 x F64 in mem/xmm to 2 x
+      I32 in mmx, according to prevailing SSE rounding mode */
+   /* 66 0F 2C = CVTTPD2PI -- convert 2 x F64 in mem/xmm to 2 x
+      I32 in mmx, rounding towards zero */
+   if (sz == 2 && insn[0] == 0x0F && (insn[1] == 0x2D || insn[1] == 0x2C)) {
+      IRTemp dst64  = newTemp(Ity_I64);
+      IRTemp rmode  = newTemp(Ity_I32);
+      IRTemp f64lo  = newTemp(Ity_F64);
+      IRTemp f64hi  = newTemp(Ity_F64);
+      Bool   r2zero = toBool(insn[1] == 0x2C);
+
+      do_MMX_preamble();
+      modrm = getIByte(delta+2);
+
+      if (epartIsReg(modrm)) {
+         delta += 2+1;
+	 assign(f64lo, getXMMRegLane64F(eregOfRM(modrm), 0));
+	 assign(f64hi, getXMMRegLane64F(eregOfRM(modrm), 1));
+         DIP("cvt%spd2pi %s,%s\n", r2zero ? "t" : "",
+                                   nameXMMReg(eregOfRM(modrm)),
+                                   nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign(f64lo, loadLE(Ity_F64, mkexpr(addr)));
+	 assign(f64hi, loadLE(Ity_F64, binop( Iop_Add32, 
+                                              mkexpr(addr), 
+                                              mkU32(8) )));
+         delta += 2+alen;
+         DIP("cvt%spf2pi %s,%s\n", r2zero ? "t" : "",
+                                   dis_buf,
+                                   nameMMXReg(gregOfRM(modrm)));
+      }
+
+      if (r2zero) {
+         assign(rmode, mkU32((UInt)Irrm_ZERO) );
+      } else {
+         assign( rmode, get_sse_roundingmode() );
+      }
+
+      assign( 
+         dst64,
+         binop( Iop_32HLto64,
+                binop( Iop_F64toI32S, mkexpr(rmode), mkexpr(f64hi) ),
+                binop( Iop_F64toI32S, mkexpr(rmode), mkexpr(f64lo) )
+              )
+      );
+
+      putMMXReg(gregOfRM(modrm), mkexpr(dst64));
+      goto decode_success;
+   }
+
+   /* 66 0F 5A = CVTPD2PS -- convert 2 x F64 in mem/xmm to 2 x F32 in
+      lo half xmm(G), and zero upper half */
+   /* Note, this is practically identical to CVTPD2DQ.  It would have
+      been nicer to merge them together, but the insn[] offsets differ
+      by one. */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x5A) {
+      IRTemp argV  = newTemp(Ity_V128);
+      IRTemp rmode = newTemp(Ity_I32);
+
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         assign( argV, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("cvtpd2ps %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("cvtpd2ps %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+         
+      assign( rmode, get_sse_roundingmode() );
+      t0 = newTemp(Ity_F64);
+      t1 = newTemp(Ity_F64);
+      assign( t0, unop(Iop_ReinterpI64asF64, 
+                       unop(Iop_V128to64, mkexpr(argV))) );
+      assign( t1, unop(Iop_ReinterpI64asF64, 
+                       unop(Iop_V128HIto64, mkexpr(argV))) );
+      
+#     define CVT(_t)  binop( Iop_F64toF32,                    \
+                             mkexpr(rmode),                   \
+                             mkexpr(_t) )
+      
+      putXMMRegLane32(  gregOfRM(modrm), 3, mkU32(0) );
+      putXMMRegLane32(  gregOfRM(modrm), 2, mkU32(0) );
+      putXMMRegLane32F( gregOfRM(modrm), 1, CVT(t1) );
+      putXMMRegLane32F( gregOfRM(modrm), 0, CVT(t0) );
+
+#     undef CVT
+
+      goto decode_success;
+   }
+
+   /* 66 0F 2A = CVTPI2PD -- convert 2 x I32 in mem/mmx to 2 x F64 in
+      xmm(G) */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x2A) {
+      IRTemp arg64 = newTemp(Ity_I64);
+
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         /* Only switch to MMX mode if the source is a MMX register.
+            This is inconsistent with all other instructions which
+            convert between XMM and (M64 or MMX), which always switch
+            to MMX mode even if 64-bit operand is M64 and not MMX.  At
+            least, that's what the Intel docs seem to me to say.
+            Fixes #210264. */
+         do_MMX_preamble();
+         assign( arg64, getMMXReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("cvtpi2pd %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( arg64, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("cvtpi2pd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+
+      putXMMRegLane64F( 
+         gregOfRM(modrm), 0,
+         unop(Iop_I32StoF64, unop(Iop_64to32, mkexpr(arg64)) )
+      );
+
+      putXMMRegLane64F( 
+         gregOfRM(modrm), 1,
+         unop(Iop_I32StoF64, unop(Iop_64HIto32, mkexpr(arg64)) )
+      );
+
+      goto decode_success;
+   }
+
+   /* 66 0F 5B = CVTPS2DQ -- convert 4 x F32 in mem/xmm to 4 x I32 in
+      xmm(G) */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x5B) {
+      IRTemp argV  = newTemp(Ity_V128);
+      IRTemp rmode = newTemp(Ity_I32);
+
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         assign( argV, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("cvtps2dq %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("cvtps2dq %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+         
+      assign( rmode, get_sse_roundingmode() );
+      breakup128to32s( argV, &t3, &t2, &t1, &t0 );
+
+      /* This is less than ideal.  If it turns out to be a performance
+	 bottleneck it can be improved. */
+#     define CVT(_t)                            \
+        binop( Iop_F64toI32S,                   \
+               mkexpr(rmode),                   \
+               unop( Iop_F32toF64,              \
+                     unop( Iop_ReinterpI32asF32, mkexpr(_t))) )
+      
+      putXMMRegLane32( gregOfRM(modrm), 3, CVT(t3) );
+      putXMMRegLane32( gregOfRM(modrm), 2, CVT(t2) );
+      putXMMRegLane32( gregOfRM(modrm), 1, CVT(t1) );
+      putXMMRegLane32( gregOfRM(modrm), 0, CVT(t0) );
+
+#     undef CVT
+
+      goto decode_success;
+   }
+
+   /* 0F 5A = CVTPS2PD -- convert 2 x F32 in low half mem/xmm to 2 x
+      F64 in xmm(G). */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0x5A) {
+      IRTemp f32lo = newTemp(Ity_F32);
+      IRTemp f32hi = newTemp(Ity_F32);
+
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         assign( f32lo, getXMMRegLane32F(eregOfRM(modrm), 0) );
+         assign( f32hi, getXMMRegLane32F(eregOfRM(modrm), 1) );
+         delta += 2+1;
+         DIP("cvtps2pd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( f32lo, loadLE(Ity_F32, mkexpr(addr)) );
+	 assign( f32hi, loadLE(Ity_F32, 
+                               binop(Iop_Add32,mkexpr(addr),mkU32(4))) );
+         delta += 2+alen;
+         DIP("cvtps2pd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+
+      putXMMRegLane64F( gregOfRM(modrm), 1,
+                        unop(Iop_F32toF64, mkexpr(f32hi)) );
+      putXMMRegLane64F( gregOfRM(modrm), 0,
+                        unop(Iop_F32toF64, mkexpr(f32lo)) );
+
+      goto decode_success;
+   }
+
+   /* F2 0F 2D = CVTSD2SI -- convert F64 in mem/low half xmm to
+      I32 in ireg, according to prevailing SSE rounding mode */
+   /* F2 0F 2C = CVTTSD2SI -- convert F64 in mem/low half xmm to
+      I32 in ireg, rounding towards zero */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F 
+       && (insn[2] == 0x2D || insn[2] == 0x2C)) {
+      IRTemp rmode = newTemp(Ity_I32);
+      IRTemp f64lo = newTemp(Ity_F64);
+      Bool   r2zero = toBool(insn[2] == 0x2C);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         delta += 3+1;
+	 assign(f64lo, getXMMRegLane64F(eregOfRM(modrm), 0));
+         DIP("cvt%ssd2si %s,%s\n", r2zero ? "t" : "",
+                                   nameXMMReg(eregOfRM(modrm)),
+                                   nameIReg(4, gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign(f64lo, loadLE(Ity_F64, mkexpr(addr)));
+         delta += 3+alen;
+         DIP("cvt%ssd2si %s,%s\n", r2zero ? "t" : "",
+                                   dis_buf,
+                                   nameIReg(4, gregOfRM(modrm)));
+      }
+
+      if (r2zero) {
+         assign( rmode, mkU32((UInt)Irrm_ZERO) );
+      } else {
+         assign( rmode, get_sse_roundingmode() );
+      }
+
+      putIReg(4, gregOfRM(modrm),
+                 binop( Iop_F64toI32S, mkexpr(rmode), mkexpr(f64lo)) );
+
+      goto decode_success;
+   }
+
+   /* F2 0F 5A = CVTSD2SS -- convert F64 in mem/low half xmm to F32 in
+      low 1/4 xmm(G), according to prevailing SSE rounding mode */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x5A) {
+      IRTemp rmode = newTemp(Ity_I32);
+      IRTemp f64lo = newTemp(Ity_F64);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         delta += 3+1;
+	 assign(f64lo, getXMMRegLane64F(eregOfRM(modrm), 0));
+         DIP("cvtsd2ss %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign(f64lo, loadLE(Ity_F64, mkexpr(addr)));
+         delta += 3+alen;
+         DIP("cvtsd2ss %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( rmode, get_sse_roundingmode() );
+      putXMMRegLane32F( 
+         gregOfRM(modrm), 0, 
+         binop( Iop_F64toF32, mkexpr(rmode), mkexpr(f64lo) )
+      );
+
+      goto decode_success;
+   }
+
+   /* F2 0F 2A = CVTSI2SD -- convert I32 in mem/ireg to F64 in low
+      half xmm */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x2A) {
+      IRTemp arg32 = newTemp(Ity_I32);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         assign( arg32, getIReg(4, eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("cvtsi2sd %s,%s\n", nameIReg(4, eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign( arg32, loadLE(Ity_I32, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("cvtsi2sd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)) );
+      }
+
+      putXMMRegLane64F( 
+         gregOfRM(modrm), 0,
+         unop(Iop_I32StoF64, mkexpr(arg32)) );
+
+      goto decode_success;
+   }
+
+   /* F3 0F 5A = CVTSS2SD -- convert F32 in mem/low 1/4 xmm to F64 in
+      low half xmm(G) */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x5A) {
+      IRTemp f32lo = newTemp(Ity_F32);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         delta += 3+1;
+	 assign(f32lo, getXMMRegLane32F(eregOfRM(modrm), 0));
+         DIP("cvtss2sd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign(f32lo, loadLE(Ity_F32, mkexpr(addr)));
+         delta += 3+alen;
+         DIP("cvtss2sd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)));
+      }
+
+      putXMMRegLane64F( gregOfRM(modrm), 0, 
+                        unop( Iop_F32toF64, mkexpr(f32lo) ) );
+
+      goto decode_success;
+   }
+
+   /* 66 0F E6 = CVTTPD2DQ -- convert 2 x F64 in mem/xmm to 2 x I32 in
+      lo half xmm(G), and zero upper half, rounding towards zero */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE6) {
+      IRTemp argV  = newTemp(Ity_V128);
+      IRTemp rmode = newTemp(Ity_I32);
+
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         assign( argV, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("cvttpd2dq %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+	 assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("cvttpd2dq %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)) );
+      }
+
+      assign( rmode, mkU32((UInt)Irrm_ZERO) );
+
+      t0 = newTemp(Ity_F64);
+      t1 = newTemp(Ity_F64);
+      assign( t0, unop(Iop_ReinterpI64asF64, 
+                       unop(Iop_V128to64, mkexpr(argV))) );
+      assign( t1, unop(Iop_ReinterpI64asF64, 
+                       unop(Iop_V128HIto64, mkexpr(argV))) );
+      
+#     define CVT(_t)  binop( Iop_F64toI32S,                   \
+                             mkexpr(rmode),                   \
+                             mkexpr(_t) )
+      
+      putXMMRegLane32( gregOfRM(modrm), 3, mkU32(0) );
+      putXMMRegLane32( gregOfRM(modrm), 2, mkU32(0) );
+      putXMMRegLane32( gregOfRM(modrm), 1, CVT(t1) );
+      putXMMRegLane32( gregOfRM(modrm), 0, CVT(t0) );
+
+#     undef CVT
+
+      goto decode_success;
+   }
+
+   /* F3 0F 5B = CVTTPS2DQ -- convert 4 x F32 in mem/xmm to 4 x I32 in
+      xmm(G), rounding towards zero */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x5B) {
+      IRTemp argV  = newTemp(Ity_V128);
+      IRTemp rmode = newTemp(Ity_I32);
+      vassert(sz == 4);
+
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         assign( argV, getXMMReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("cvttps2dq %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+	 assign( argV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("cvttps2dq %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)) );
+      }
+         
+      assign( rmode, mkU32((UInt)Irrm_ZERO) );
+      breakup128to32s( argV, &t3, &t2, &t1, &t0 );
+
+      /* This is less than ideal.  If it turns out to be a performance
+	 bottleneck it can be improved. */
+#     define CVT(_t)                            \
+        binop( Iop_F64toI32S,                   \
+               mkexpr(rmode),                   \
+               unop( Iop_F32toF64,              \
+                     unop( Iop_ReinterpI32asF32, mkexpr(_t))) )
+      
+      putXMMRegLane32( gregOfRM(modrm), 3, CVT(t3) );
+      putXMMRegLane32( gregOfRM(modrm), 2, CVT(t2) );
+      putXMMRegLane32( gregOfRM(modrm), 1, CVT(t1) );
+      putXMMRegLane32( gregOfRM(modrm), 0, CVT(t0) );
+
+#     undef CVT
+
+      goto decode_success;
+   }
+
+   /* 66 0F 5E = DIVPD -- div 64Fx2 from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x5E) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "divpd", Iop_Div64Fx2 );
+      goto decode_success;
+   }
+
+   /* F2 0F 5E = DIVSD -- div 64F0x2 from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x5E) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo64( sorb, delta+3, "divsd", Iop_Div64F0x2 );
+      goto decode_success;
+   }
+
+   /* 0F AE /5 = LFENCE -- flush pending operations to memory */
+   /* 0F AE /6 = MFENCE -- flush pending operations to memory */
+   if (insn[0] == 0x0F && insn[1] == 0xAE
+       && epartIsReg(insn[2]) 
+       && (gregOfRM(insn[2]) == 5 || gregOfRM(insn[2]) == 6)) {
+      vassert(sz == 4);
+      delta += 3;
+      /* Insert a memory fence.  It's sometimes important that these
+         are carried through to the generated code. */
+      stmt( IRStmt_MBE(Imbe_Fence) );
+      DIP("%sfence\n", gregOfRM(insn[2])==5 ? "l" : "m");
+      goto decode_success;
+   }
+
+   /* 66 0F 5F = MAXPD -- max 64Fx2 from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x5F) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "maxpd", Iop_Max64Fx2 );
+      goto decode_success;
+   }
+
+   /* F2 0F 5F = MAXSD -- max 64F0x2 from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x5F) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo64( sorb, delta+3, "maxsd", Iop_Max64F0x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 5D = MINPD -- min 64Fx2 from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x5D) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "minpd", Iop_Min64Fx2 );
+      goto decode_success;
+   }
+
+   /* F2 0F 5D = MINSD -- min 64F0x2 from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x5D) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo64( sorb, delta+3, "minsd", Iop_Min64F0x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 28 = MOVAPD -- move from E (mem or xmm) to G (xmm). */
+   /* 66 0F 10 = MOVUPD -- move from E (mem or xmm) to G (xmm). */
+   /* 66 0F 6F = MOVDQA -- move from E (mem or xmm) to G (xmm). */
+   if (sz == 2 && insn[0] == 0x0F 
+       && (insn[1] == 0x28 || insn[1] == 0x10 || insn[1] == 0x6F)) {
+      const HChar* wot = insn[1]==0x28 ? "apd" :
+                         insn[1]==0x10 ? "upd" : "dqa";
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         putXMMReg( gregOfRM(modrm), 
+                    getXMMReg( eregOfRM(modrm) ));
+         DIP("mov%s %s,%s\n", wot, nameXMMReg(eregOfRM(modrm)),
+                                   nameXMMReg(gregOfRM(modrm)));
+         delta += 2+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         if (insn[1] == 0x28/*movapd*/ || insn[1] == 0x6F/*movdqa*/)
+            gen_SEGV_if_not_16_aligned( addr );
+         putXMMReg( gregOfRM(modrm), 
+                    loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("mov%s %s,%s\n", wot, dis_buf,
+                                   nameXMMReg(gregOfRM(modrm)));
+         delta += 2+alen;
+      }
+      goto decode_success;
+   }
+
+   /* 66 0F 29 = MOVAPD -- move from G (xmm) to E (mem or xmm). */
+   /* 66 0F 11 = MOVUPD -- move from G (xmm) to E (mem or xmm). */
+   if (sz == 2 && insn[0] == 0x0F 
+       && (insn[1] == 0x29 || insn[1] == 0x11)) {
+      const HChar* wot = insn[1]==0x29 ? "apd" : "upd";
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         /* fall through; awaiting test case */
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         if (insn[1] == 0x29/*movapd*/)
+            gen_SEGV_if_not_16_aligned( addr );
+         storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
+         DIP("mov%s %s,%s\n", wot, nameXMMReg(gregOfRM(modrm)),
+                                   dis_buf );
+         delta += 2+alen;
+         goto decode_success;
+      }
+   }
+
+   /* 66 0F 6E = MOVD from r/m32 to xmm, zeroing high 3/4 of xmm. */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x6E) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         delta += 2+1;
+         putXMMReg(
+            gregOfRM(modrm),
+            unop( Iop_32UtoV128, getIReg(4, eregOfRM(modrm)) ) 
+         );
+         DIP("movd %s, %s\n", 
+             nameIReg(4,eregOfRM(modrm)), nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode( &alen, sorb, delta+2, dis_buf );
+         delta += 2+alen;
+         putXMMReg(
+            gregOfRM(modrm),
+            unop( Iop_32UtoV128,loadLE(Ity_I32, mkexpr(addr)) ) 
+         );
+         DIP("movd %s, %s\n", dis_buf, nameXMMReg(gregOfRM(modrm)));
+      }
+      goto decode_success;
+   }
+
+   /* 66 0F 7E = MOVD from xmm low 1/4 to r/m32. */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x7E) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         delta += 2+1;
+         putIReg( 4, eregOfRM(modrm),
+                  getXMMRegLane32(gregOfRM(modrm), 0) );
+         DIP("movd %s, %s\n", 
+             nameXMMReg(gregOfRM(modrm)), nameIReg(4,eregOfRM(modrm)));
+      } else {
+         addr = disAMode( &alen, sorb, delta+2, dis_buf );
+         delta += 2+alen;
+         storeLE( mkexpr(addr),
+                  getXMMRegLane32(gregOfRM(modrm), 0) );
+         DIP("movd %s, %s\n", nameXMMReg(gregOfRM(modrm)), dis_buf);
+      }
+      goto decode_success;
+   }
+
+   /* 66 0F 7F = MOVDQA -- move from G (xmm) to E (mem or xmm). */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x7F) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         delta += 2+1;
+         putXMMReg( eregOfRM(modrm),
+                    getXMMReg(gregOfRM(modrm)) );
+         DIP("movdqa %s, %s\n", nameXMMReg(gregOfRM(modrm)), 
+                                nameXMMReg(eregOfRM(modrm)));
+      } else {
+         addr = disAMode( &alen, sorb, delta+2, dis_buf );
+         delta += 2+alen;
+         gen_SEGV_if_not_16_aligned( addr );
+         storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
+         DIP("movdqa %s, %s\n", nameXMMReg(gregOfRM(modrm)), dis_buf);
+      }
+      goto decode_success;
+   }
+
+   /* F3 0F 6F = MOVDQU -- move from E (mem or xmm) to G (xmm). */
+   /* Unfortunately can't simply use the MOVDQA case since the
+      prefix lengths are different (66 vs F3) */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x6F) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         putXMMReg( gregOfRM(modrm), 
+                    getXMMReg( eregOfRM(modrm) ));
+         DIP("movdqu %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                               nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         putXMMReg( gregOfRM(modrm), 
+                    loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("movdqu %s,%s\n", dis_buf,
+                               nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+      goto decode_success;
+   }
+
+   /* F3 0F 7F = MOVDQU -- move from G (xmm) to E (mem or xmm). */
+   /* Unfortunately can't simply use the MOVDQA case since the
+      prefix lengths are different (66 vs F3) */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x7F) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         delta += 3+1;
+         putXMMReg( eregOfRM(modrm),
+                    getXMMReg(gregOfRM(modrm)) );
+         DIP("movdqu %s, %s\n", nameXMMReg(gregOfRM(modrm)), 
+                                nameXMMReg(eregOfRM(modrm)));
+      } else {
+         addr = disAMode( &alen, sorb, delta+3, dis_buf );
+         delta += 3+alen;
+         storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
+         DIP("movdqu %s, %s\n", nameXMMReg(gregOfRM(modrm)), dis_buf);
+      }
+      goto decode_success;
+   }
+
+   /* F2 0F D6 = MOVDQ2Q -- move from E (lo half xmm, not mem) to G (mmx). */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0xD6) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         do_MMX_preamble();
+         putMMXReg( gregOfRM(modrm), 
+                    getXMMRegLane64( eregOfRM(modrm), 0 ));
+         DIP("movdq2q %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                nameMMXReg(gregOfRM(modrm)));
+         delta += 3+1;
+         goto decode_success;
+      } else {
+         /* fall through, apparently no mem case for this insn */
+      }
+   }
+
+   /* 66 0F 16 = MOVHPD -- move from mem to high half of XMM. */
+   /* These seems identical to MOVHPS.  This instruction encoding is
+      completely crazy. */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x16) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         /* fall through; apparently reg-reg is not possible */
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         delta += 2+alen;
+         putXMMRegLane64( gregOfRM(modrm), 1/*upper lane*/,
+                          loadLE(Ity_I64, mkexpr(addr)) );
+         DIP("movhpd %s,%s\n", dis_buf, 
+                               nameXMMReg( gregOfRM(modrm) ));
+         goto decode_success;
+      }
+   }
+
+   /* 66 0F 17 = MOVHPD -- move from high half of XMM to mem. */
+   /* Again, this seems identical to MOVHPS. */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x17) {
+      if (!epartIsReg(insn[2])) {
+         delta += 2;
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         delta += alen;
+         storeLE( mkexpr(addr), 
+                  getXMMRegLane64( gregOfRM(insn[2]),
+                                   1/*upper lane*/ ) );
+         DIP("movhpd %s,%s\n", nameXMMReg( gregOfRM(insn[2]) ),
+                               dis_buf);
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 66 0F 12 = MOVLPD -- move from mem to low half of XMM. */
+   /* Identical to MOVLPS ? */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x12) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         /* fall through; apparently reg-reg is not possible */
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         delta += 2+alen;
+         putXMMRegLane64( gregOfRM(modrm),  0/*lower lane*/,
+                          loadLE(Ity_I64, mkexpr(addr)) );
+         DIP("movlpd %s, %s\n", 
+             dis_buf, nameXMMReg( gregOfRM(modrm) ));
+         goto decode_success;
+      }
+   }
+
+   /* 66 0F 13 = MOVLPD -- move from low half of XMM to mem. */
+   /* Identical to MOVLPS ? */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x13) {
+      if (!epartIsReg(insn[2])) {
+         delta += 2;
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         delta += alen;
+         storeLE( mkexpr(addr), 
+                  getXMMRegLane64( gregOfRM(insn[2]), 
+                                   0/*lower lane*/ ) );
+         DIP("movlpd %s, %s\n", nameXMMReg( gregOfRM(insn[2]) ),
+                                dis_buf);
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 66 0F 50 = MOVMSKPD - move 2 sign bits from 2 x F64 in xmm(E) to
+      2 lowest bits of ireg(G) */
+   if (insn[0] == 0x0F && insn[1] == 0x50) {
+      modrm = getIByte(delta+2);
+      if (sz == 2 && epartIsReg(modrm)) {
+         Int src;
+         t0 = newTemp(Ity_I32);
+         t1 = newTemp(Ity_I32);
+         delta += 2+1;
+         src = eregOfRM(modrm);
+         assign( t0, binop( Iop_And32,
+                            binop(Iop_Shr32, getXMMRegLane32(src,1), mkU8(31)),
+                            mkU32(1) ));
+         assign( t1, binop( Iop_And32,
+                            binop(Iop_Shr32, getXMMRegLane32(src,3), mkU8(30)),
+                            mkU32(2) ));
+         putIReg(4, gregOfRM(modrm),
+                    binop(Iop_Or32, mkexpr(t0), mkexpr(t1))
+                 );
+         DIP("movmskpd %s,%s\n", nameXMMReg(src), 
+                                 nameIReg(4, gregOfRM(modrm)));
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 66 0F F7 = MASKMOVDQU -- store selected bytes of double quadword */
+   if (insn[0] == 0x0F && insn[1] == 0xF7) {
+      modrm = getIByte(delta+2);
+      if (sz == 2 && epartIsReg(modrm)) {
+         IRTemp regD    = newTemp(Ity_V128);
+         IRTemp mask    = newTemp(Ity_V128);
+         IRTemp olddata = newTemp(Ity_V128);
+         IRTemp newdata = newTemp(Ity_V128);
+                addr    = newTemp(Ity_I32);
+
+         assign( addr, handleSegOverride( sorb, getIReg(4, R_EDI) ));
+         assign( regD, getXMMReg( gregOfRM(modrm) ));
+
+         /* Unfortunately can't do the obvious thing with SarN8x16
+            here since that can't be re-emitted as SSE2 code - no such
+            insn. */
+	 assign( 
+            mask, 
+            binop(Iop_64HLtoV128,
+                  binop(Iop_SarN8x8, 
+                        getXMMRegLane64( eregOfRM(modrm), 1 ), 
+                        mkU8(7) ),
+                  binop(Iop_SarN8x8, 
+                        getXMMRegLane64( eregOfRM(modrm), 0 ), 
+                        mkU8(7) ) ));
+         assign( olddata, loadLE( Ity_V128, mkexpr(addr) ));
+         assign( newdata, 
+                 binop(Iop_OrV128, 
+                       binop(Iop_AndV128, 
+                             mkexpr(regD), 
+                             mkexpr(mask) ),
+                       binop(Iop_AndV128, 
+                             mkexpr(olddata),
+                             unop(Iop_NotV128, mkexpr(mask)))) );
+         storeLE( mkexpr(addr), mkexpr(newdata) );
+
+         delta += 2+1;
+         DIP("maskmovdqu %s,%s\n", nameXMMReg( eregOfRM(modrm) ),
+                                   nameXMMReg( gregOfRM(modrm) ) );
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 66 0F E7 = MOVNTDQ -- for us, just a plain SSE store. */
+   if (insn[0] == 0x0F && insn[1] == 0xE7) {
+      modrm = getIByte(delta+2);
+      if (sz == 2 && !epartIsReg(modrm)) {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         storeLE( mkexpr(addr), getXMMReg(gregOfRM(modrm)) );
+         DIP("movntdq %s,%s\n", dis_buf,
+                                nameXMMReg(gregOfRM(modrm)));
+         delta += 2+alen;
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 0F C3 = MOVNTI -- for us, just a plain ireg store. */
+   if (insn[0] == 0x0F && insn[1] == 0xC3) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+2);
+      if (!epartIsReg(modrm)) {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         storeLE( mkexpr(addr), getIReg(4, gregOfRM(modrm)) );
+         DIP("movnti %s,%s\n", dis_buf,
+                               nameIReg(4, gregOfRM(modrm)));
+         delta += 2+alen;
+         goto decode_success;
+      }
+      /* else fall through */
+   }
+
+   /* 66 0F D6 = MOVQ -- move 64 bits from G (lo half xmm) to E (mem
+      or lo half xmm).  */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD6) {
+      modrm = getIByte(delta+2);
+      if (epartIsReg(modrm)) {
+         /* fall through, awaiting test case */
+         /* dst: lo half copied, hi half zeroed */
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         storeLE( mkexpr(addr), 
+                  getXMMRegLane64( gregOfRM(modrm), 0 ));
+         DIP("movq %s,%s\n", nameXMMReg(gregOfRM(modrm)), dis_buf );
+         delta += 2+alen;
+         goto decode_success;
+      }
+   }
+
+   /* F3 0F D6 = MOVQ2DQ -- move from E (mmx) to G (lo half xmm, zero
+      hi half). */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0xD6) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         do_MMX_preamble();
+         putXMMReg( gregOfRM(modrm), 
+                    unop(Iop_64UtoV128, getMMXReg( eregOfRM(modrm) )) );
+         DIP("movq2dq %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                                nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+         goto decode_success;
+      } else {
+         /* fall through, apparently no mem case for this insn */
+      }
+   }
+
+   /* F3 0F 7E = MOVQ -- move 64 bits from E (mem or lo half xmm) to
+      G (lo half xmm).  Upper half of G is zeroed out. */
+   /* F2 0F 10 = MOVSD -- move 64 bits from E (mem or lo half xmm) to
+      G (lo half xmm).  If E is mem, upper half of G is zeroed out.
+      If E is reg, upper half of G is unchanged. */
+   if ((insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x10)
+       || (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x7E)) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         putXMMRegLane64( gregOfRM(modrm), 0,
+                          getXMMRegLane64( eregOfRM(modrm), 0 ));
+         if (insn[0] == 0xF3/*MOVQ*/) {
+            /* zero bits 127:64 */
+            putXMMRegLane64( gregOfRM(modrm), 1, mkU64(0) );
+         }
+         DIP("movsd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                              nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         /* zero bits 127:64 */
+         putXMMRegLane64( gregOfRM(modrm), 1, mkU64(0) );
+         /* write bits 63:0 */
+         putXMMRegLane64( gregOfRM(modrm), 0,
+                          loadLE(Ity_I64, mkexpr(addr)) );
+         DIP("movsd %s,%s\n", dis_buf,
+                              nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+      goto decode_success;
+   }
+
+   /* F2 0F 11 = MOVSD -- move 64 bits from G (lo half xmm) to E (mem
+      or lo half xmm). */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x11) {
+      vassert(sz == 4);
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         putXMMRegLane64( eregOfRM(modrm), 0,
+                          getXMMRegLane64( gregOfRM(modrm), 0 ));
+         DIP("movsd %s,%s\n", nameXMMReg(gregOfRM(modrm)),
+                              nameXMMReg(eregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         storeLE( mkexpr(addr),
+                  getXMMRegLane64(gregOfRM(modrm), 0) );
+         DIP("movsd %s,%s\n", nameXMMReg(gregOfRM(modrm)),
+                              dis_buf);
+         delta += 3+alen;
+      }
+      goto decode_success;
+   }
+
+   /* 66 0F 59 = MULPD -- mul 64Fx2 from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x59) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "mulpd", Iop_Mul64Fx2 );
+      goto decode_success;
+   }
+
+   /* F2 0F 59 = MULSD -- mul 64F0x2 from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x59) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo64( sorb, delta+3, "mulsd", Iop_Mul64F0x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 56 = ORPD -- G = G and E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x56) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "orpd", Iop_OrV128 );
+      goto decode_success;
+   }
+
+   /* 66 0F C6 /r ib = SHUFPD -- shuffle packed F64s */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xC6) {
+      Int    select;
+      IRTemp sV = newTemp(Ity_V128);
+      IRTemp dV = newTemp(Ity_V128);
+      IRTemp s1 = newTemp(Ity_I64);
+      IRTemp s0 = newTemp(Ity_I64);
+      IRTemp d1 = newTemp(Ity_I64);
+      IRTemp d0 = newTemp(Ity_I64);
+
+      modrm = insn[2];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         select = (Int)insn[3];
+         delta += 2+2;
+         DIP("shufpd $%d,%s,%s\n", select, 
+                                   nameXMMReg(eregOfRM(modrm)),
+                                   nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         select = (Int)insn[2+alen];
+         delta += 3+alen;
+         DIP("shufpd $%d,%s,%s\n", select, 
+                                   dis_buf,
+                                   nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( d1, unop(Iop_V128HIto64, mkexpr(dV)) );
+      assign( d0, unop(Iop_V128to64,   mkexpr(dV)) );
+      assign( s1, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( s0, unop(Iop_V128to64,   mkexpr(sV)) );
+
+#     define SELD(n) mkexpr((n)==0 ? d0 : d1)
+#     define SELS(n) mkexpr((n)==0 ? s0 : s1)
+
+      putXMMReg(
+         gregOfRM(modrm), 
+         binop(Iop_64HLtoV128, SELS((select>>1)&1), SELD((select>>0)&1) )
+      );
+
+#     undef SELD
+#     undef SELS
+
+      goto decode_success;
+   }
+
+   /* 66 0F 51 = SQRTPD -- approx sqrt 64Fx2 from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x51) {
+      delta = dis_SSE_E_to_G_unary_all( sorb, delta+2, 
+                                        "sqrtpd", Iop_Sqrt64Fx2 );
+      goto decode_success;
+   }
+
+   /* F2 0F 51 = SQRTSD -- approx sqrt 64F0x2 from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x51) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_unary_lo64( sorb, delta+3, 
+                                         "sqrtsd", Iop_Sqrt64F0x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 5C = SUBPD -- sub 64Fx2 from R/M to R */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x5C) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "subpd", Iop_Sub64Fx2 );
+      goto decode_success;
+   }
+
+   /* F2 0F 5C = SUBSD -- sub 64F0x2 from R/M to R */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x5C) {
+      vassert(sz == 4);
+      delta = dis_SSE_E_to_G_lo64( sorb, delta+3, "subsd", Iop_Sub64F0x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 15 = UNPCKHPD -- unpack and interleave high part F64s */
+   /* 66 0F 14 = UNPCKLPD -- unpack and interleave low part F64s */
+   /* These just appear to be special cases of SHUFPS */
+   if (sz == 2 && insn[0] == 0x0F && (insn[1] == 0x15 || insn[1] == 0x14)) {
+      IRTemp s1 = newTemp(Ity_I64);
+      IRTemp s0 = newTemp(Ity_I64);
+      IRTemp d1 = newTemp(Ity_I64);
+      IRTemp d0 = newTemp(Ity_I64);
+      IRTemp sV = newTemp(Ity_V128);
+      IRTemp dV = newTemp(Ity_V128);
+      Bool   hi = toBool(insn[1] == 0x15);
+
+      modrm = insn[2];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                                  nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("unpck%sps %s,%s\n", hi ? "h" : "l",
+                                  dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( d1, unop(Iop_V128HIto64, mkexpr(dV)) );
+      assign( d0, unop(Iop_V128to64,   mkexpr(dV)) );
+      assign( s1, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( s0, unop(Iop_V128to64,   mkexpr(sV)) );
+
+      if (hi) {
+         putXMMReg( gregOfRM(modrm), 
+                    binop(Iop_64HLtoV128, mkexpr(s1), mkexpr(d1)) );
+      } else {
+         putXMMReg( gregOfRM(modrm), 
+                    binop(Iop_64HLtoV128, mkexpr(s0), mkexpr(d0)) );
+      }
+
+      goto decode_success;
+   }
+
+   /* 66 0F 57 = XORPD -- G = G and E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x57) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "xorpd", Iop_XorV128 );
+      goto decode_success;
+   }
+
+   /* 66 0F 6B = PACKSSDW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x6B) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "packssdw",
+                                 Iop_QNarrowBin32Sto16Sx8, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 63 = PACKSSWB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x63) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "packsswb",
+                                 Iop_QNarrowBin16Sto8Sx16, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 67 = PACKUSWB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x67) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "packuswb",
+                                 Iop_QNarrowBin16Sto8Ux16, True );
+      goto decode_success;
+   }
+
+   /* 66 0F FC = PADDB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xFC) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddb", Iop_Add8x16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F FE = PADDD */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xFE) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddd", Iop_Add32x4, False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE2 ---*** */
+   /* 0F D4 = PADDQ -- add 64x1 */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xD4) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "paddq", False );
+      goto decode_success;
+   }
+
+   /* 66 0F D4 = PADDQ */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD4) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddq", Iop_Add64x2, False );
+      goto decode_success;
+   }
+
+   /* 66 0F FD = PADDW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xFD) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddw", Iop_Add16x8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F EC = PADDSB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xEC) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddsb", Iop_QAdd8Sx16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F ED = PADDSW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xED) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddsw", Iop_QAdd16Sx8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F DC = PADDUSB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xDC) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddusb", Iop_QAdd8Ux16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F DD = PADDUSW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xDD) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "paddusw", Iop_QAdd16Ux8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F DB = PAND */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xDB) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "pand", Iop_AndV128 );
+      goto decode_success;
+   }
+
+   /* 66 0F DF = PANDN */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xDF) {
+      delta = dis_SSE_E_to_G_all_invG( sorb, delta+2, "pandn", Iop_AndV128 );
+      goto decode_success;
+   }
+
+   /* 66 0F E0 = PAVGB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE0) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pavgb", Iop_Avg8Ux16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F E3 = PAVGW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE3) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pavgw", Iop_Avg16Ux8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F 74 = PCMPEQB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x74) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pcmpeqb", Iop_CmpEQ8x16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F 76 = PCMPEQD */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x76) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pcmpeqd", Iop_CmpEQ32x4, False );
+      goto decode_success;
+   }
+
+   /* 66 0F 75 = PCMPEQW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x75) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pcmpeqw", Iop_CmpEQ16x8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F 64 = PCMPGTB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x64) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pcmpgtb", Iop_CmpGT8Sx16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F 66 = PCMPGTD */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x66) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pcmpgtd", Iop_CmpGT32Sx4, False );
+      goto decode_success;
+   }
+
+   /* 66 0F 65 = PCMPGTW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x65) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pcmpgtw", Iop_CmpGT16Sx8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F C5 = PEXTRW -- extract 16-bit field from xmm(E) and put 
+      zero-extend of it in ireg(G). */
+   if (insn[0] == 0x0F && insn[1] == 0xC5) {
+      modrm = insn[2];
+      if (sz == 2 && epartIsReg(modrm)) {
+         t5 = newTemp(Ity_V128);
+         t4 = newTemp(Ity_I16);
+         assign(t5, getXMMReg(eregOfRM(modrm)));
+         breakup128to32s( t5, &t3, &t2, &t1, &t0 );
+         switch (insn[3] & 7) {
+            case 0:  assign(t4, unop(Iop_32to16,   mkexpr(t0))); break;
+            case 1:  assign(t4, unop(Iop_32HIto16, mkexpr(t0))); break;
+            case 2:  assign(t4, unop(Iop_32to16,   mkexpr(t1))); break;
+            case 3:  assign(t4, unop(Iop_32HIto16, mkexpr(t1))); break;
+            case 4:  assign(t4, unop(Iop_32to16,   mkexpr(t2))); break;
+            case 5:  assign(t4, unop(Iop_32HIto16, mkexpr(t2))); break;
+            case 6:  assign(t4, unop(Iop_32to16,   mkexpr(t3))); break;
+            case 7:  assign(t4, unop(Iop_32HIto16, mkexpr(t3))); break;
+            default: vassert(0); /*NOTREACHED*/
+         }
+         putIReg(4, gregOfRM(modrm), unop(Iop_16Uto32, mkexpr(t4)));
+         DIP("pextrw $%d,%s,%s\n",
+             (Int)insn[3], nameXMMReg(eregOfRM(modrm)),
+                           nameIReg(4,gregOfRM(modrm)));
+         delta += 4;
+         goto decode_success;
+      } 
+      /* else fall through */
+   }
+
+   /* 66 0F C4 = PINSRW -- get 16 bits from E(mem or low half ireg) and
+      put it into the specified lane of xmm(G). */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xC4) {
+      Int lane;
+      t4 = newTemp(Ity_I16);
+      modrm = insn[2];
+
+      if (epartIsReg(modrm)) {
+         assign(t4, getIReg(2, eregOfRM(modrm)));
+         delta += 3+1;
+         lane = insn[3+1-1];
+         DIP("pinsrw $%d,%s,%s\n", (Int)lane, 
+                                   nameIReg(2,eregOfRM(modrm)),
+                                   nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         delta += 3+alen;
+         lane = insn[3+alen-1];
+         assign(t4, loadLE(Ity_I16, mkexpr(addr)));
+         DIP("pinsrw $%d,%s,%s\n", (Int)lane, 
+                                   dis_buf,
+                                   nameXMMReg(gregOfRM(modrm)));
+      }
+
+      putXMMRegLane16( gregOfRM(modrm), lane & 7, mkexpr(t4) );
+      goto decode_success;
+   }
+
+   /* 66 0F F5 = PMADDWD -- Multiply and add packed integers from
+      E(xmm or mem) to G(xmm) */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF5) {
+      IRTemp s1V  = newTemp(Ity_V128);
+      IRTemp s2V  = newTemp(Ity_V128);
+      IRTemp dV   = newTemp(Ity_V128);
+      IRTemp s1Hi = newTemp(Ity_I64);
+      IRTemp s1Lo = newTemp(Ity_I64);
+      IRTemp s2Hi = newTemp(Ity_I64);
+      IRTemp s2Lo = newTemp(Ity_I64);
+      IRTemp dHi  = newTemp(Ity_I64);
+      IRTemp dLo  = newTemp(Ity_I64);
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         assign( s1V, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("pmaddwd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( s1V, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("pmaddwd %s,%s\n", dis_buf,
+                                nameXMMReg(gregOfRM(modrm)));
+      }
+      assign( s2V, getXMMReg(gregOfRM(modrm)) );
+      assign( s1Hi, unop(Iop_V128HIto64, mkexpr(s1V)) );
+      assign( s1Lo, unop(Iop_V128to64,   mkexpr(s1V)) );
+      assign( s2Hi, unop(Iop_V128HIto64, mkexpr(s2V)) );
+      assign( s2Lo, unop(Iop_V128to64,   mkexpr(s2V)) );
+      assign( dHi, mkIRExprCCall(
+                      Ity_I64, 0/*regparms*/,
+                      "x86g_calculate_mmx_pmaddwd", 
+                      &x86g_calculate_mmx_pmaddwd,
+                      mkIRExprVec_2( mkexpr(s1Hi), mkexpr(s2Hi))
+                   ));
+      assign( dLo, mkIRExprCCall(
+                      Ity_I64, 0/*regparms*/,
+                      "x86g_calculate_mmx_pmaddwd", 
+                      &x86g_calculate_mmx_pmaddwd,
+                      mkIRExprVec_2( mkexpr(s1Lo), mkexpr(s2Lo))
+                   ));
+      assign( dV, binop(Iop_64HLtoV128, mkexpr(dHi), mkexpr(dLo))) ;
+      putXMMReg(gregOfRM(modrm), mkexpr(dV));
+      goto decode_success;
+   }
+
+   /* 66 0F EE = PMAXSW -- 16x8 signed max */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xEE) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pmaxsw", Iop_Max16Sx8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F DE = PMAXUB -- 8x16 unsigned max */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xDE) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pmaxub", Iop_Max8Ux16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F EA = PMINSW -- 16x8 signed min */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xEA) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pminsw", Iop_Min16Sx8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F DA = PMINUB -- 8x16 unsigned min */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xDA) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pminub", Iop_Min8Ux16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F D7 = PMOVMSKB -- extract sign bits from each of 16 lanes
+      in xmm(E), turn them into a byte, and put zero-extend of it in
+      ireg(G). */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD7) {
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         t0 = newTemp(Ity_I64);
+         t1 = newTemp(Ity_I64);
+         assign(t0, getXMMRegLane64(eregOfRM(modrm), 0));
+         assign(t1, getXMMRegLane64(eregOfRM(modrm), 1));
+         t5 = newTemp(Ity_I32);
+         assign(t5,
+                unop(Iop_16Uto32,
+                     binop(Iop_8HLto16,
+                           unop(Iop_GetMSBs8x8, mkexpr(t1)),
+                           unop(Iop_GetMSBs8x8, mkexpr(t0)))));
+         putIReg(4, gregOfRM(modrm), mkexpr(t5));
+         DIP("pmovmskb %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameIReg(4,gregOfRM(modrm)));
+         delta += 3;
+         goto decode_success;
+      } 
+      /* else fall through */
+   }
+
+   /* 66 0F E4 = PMULHUW -- 16x8 hi-half of unsigned widening multiply */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE4) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pmulhuw", Iop_MulHi16Ux8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F E5 = PMULHW -- 16x8 hi-half of signed widening multiply */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE5) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pmulhw", Iop_MulHi16Sx8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F D5 = PMULHL -- 16x8 multiply */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD5) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "pmullw", Iop_Mul16x8, False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE2 ---*** */
+   /* 0F F4 = PMULUDQ -- unsigned widening multiply of 32-lanes 0 x
+      0 to form 64-bit result */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xF4) {
+      IRTemp sV = newTemp(Ity_I64);
+      IRTemp dV = newTemp(Ity_I64);
+      t1 = newTemp(Ity_I32);
+      t0 = newTemp(Ity_I32);
+      modrm = insn[2];
+
+      do_MMX_preamble();
+      assign( dV, getMMXReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("pmuludq %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                                nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("pmuludq %s,%s\n", dis_buf,
+                                nameMMXReg(gregOfRM(modrm)));
+      }
+
+      assign( t0, unop(Iop_64to32, mkexpr(dV)) );
+      assign( t1, unop(Iop_64to32, mkexpr(sV)) );
+      putMMXReg( gregOfRM(modrm),
+                 binop( Iop_MullU32, mkexpr(t0), mkexpr(t1) ) );
+      goto decode_success;
+   }
+
+   /* 66 0F F4 = PMULUDQ -- unsigned widening multiply of 32-lanes 0 x
+      0 to form lower 64-bit half and lanes 2 x 2 to form upper 64-bit
+      half */
+   /* This is a really poor translation -- could be improved if
+      performance critical */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF4) {
+      IRTemp sV, dV;
+      IRTemp s3, s2, s1, s0, d3, d2, d1, d0;
+      sV = newTemp(Ity_V128);
+      dV = newTemp(Ity_V128);
+      s3 = s2 = s1 = s0 = d3 = d2 = d1 = d0 = IRTemp_INVALID;
+      t1 = newTemp(Ity_I64);
+      t0 = newTemp(Ity_I64);
+      modrm = insn[2];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("pmuludq %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("pmuludq %s,%s\n", dis_buf,
+                                nameXMMReg(gregOfRM(modrm)));
+      }
+
+      breakup128to32s( dV, &d3, &d2, &d1, &d0 );
+      breakup128to32s( sV, &s3, &s2, &s1, &s0 );
+
+      assign( t0, binop( Iop_MullU32, mkexpr(d0), mkexpr(s0)) );
+      putXMMRegLane64( gregOfRM(modrm), 0, mkexpr(t0) );
+      assign( t1, binop( Iop_MullU32, mkexpr(d2), mkexpr(s2)) );
+      putXMMRegLane64( gregOfRM(modrm), 1, mkexpr(t1) );
+      goto decode_success;
+   }
+
+   /* 66 0F EB = POR */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xEB) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "por", Iop_OrV128 );
+      goto decode_success;
+   }
+
+   /* 66 0F F6 = PSADBW -- 2 x (8x8 -> 48 zeroes ++ u16) Sum Abs Diffs
+      from E(xmm or mem) to G(xmm) */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF6) {
+      IRTemp s1V  = newTemp(Ity_V128);
+      IRTemp s2V  = newTemp(Ity_V128);
+      IRTemp dV   = newTemp(Ity_V128);
+      IRTemp s1Hi = newTemp(Ity_I64);
+      IRTemp s1Lo = newTemp(Ity_I64);
+      IRTemp s2Hi = newTemp(Ity_I64);
+      IRTemp s2Lo = newTemp(Ity_I64);
+      IRTemp dHi  = newTemp(Ity_I64);
+      IRTemp dLo  = newTemp(Ity_I64);
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         assign( s1V, getXMMReg(eregOfRM(modrm)) );
+         delta += 2+1;
+         DIP("psadbw %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                               nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( s1V, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 2+alen;
+         DIP("psadbw %s,%s\n", dis_buf,
+                               nameXMMReg(gregOfRM(modrm)));
+      }
+      assign( s2V, getXMMReg(gregOfRM(modrm)) );
+      assign( s1Hi, unop(Iop_V128HIto64, mkexpr(s1V)) );
+      assign( s1Lo, unop(Iop_V128to64,   mkexpr(s1V)) );
+      assign( s2Hi, unop(Iop_V128HIto64, mkexpr(s2V)) );
+      assign( s2Lo, unop(Iop_V128to64,   mkexpr(s2V)) );
+      assign( dHi, mkIRExprCCall(
+                      Ity_I64, 0/*regparms*/,
+                      "x86g_calculate_mmx_psadbw", 
+                      &x86g_calculate_mmx_psadbw,
+                      mkIRExprVec_2( mkexpr(s1Hi), mkexpr(s2Hi))
+                   ));
+      assign( dLo, mkIRExprCCall(
+                      Ity_I64, 0/*regparms*/,
+                      "x86g_calculate_mmx_psadbw", 
+                      &x86g_calculate_mmx_psadbw,
+                      mkIRExprVec_2( mkexpr(s1Lo), mkexpr(s2Lo))
+                   ));
+      assign( dV, binop(Iop_64HLtoV128, mkexpr(dHi), mkexpr(dLo))) ;
+      putXMMReg(gregOfRM(modrm), mkexpr(dV));
+      goto decode_success;
+   }
+
+   /* 66 0F 70 = PSHUFD -- rearrange 4x32 from E(xmm or mem) to G(xmm) */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x70) {
+      Int order;
+      IRTemp sV, dV, s3, s2, s1, s0;
+      s3 = s2 = s1 = s0 = IRTemp_INVALID;
+      sV = newTemp(Ity_V128);
+      dV = newTemp(Ity_V128);
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         order = (Int)insn[3];
+         delta += 2+2;
+         DIP("pshufd $%d,%s,%s\n", order, 
+                                   nameXMMReg(eregOfRM(modrm)),
+                                   nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+	 order = (Int)insn[2+alen];
+         delta += 3+alen;
+         DIP("pshufd $%d,%s,%s\n", order, 
+                                   dis_buf,
+                                   nameXMMReg(gregOfRM(modrm)));
+      }
+      breakup128to32s( sV, &s3, &s2, &s1, &s0 );
+
+#     define SEL(n) \
+                ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+      assign(dV,
+	     mk128from32s( SEL((order>>6)&3), SEL((order>>4)&3),
+                           SEL((order>>2)&3), SEL((order>>0)&3) )
+      );
+      putXMMReg(gregOfRM(modrm), mkexpr(dV));
+#     undef SEL
+      goto decode_success;
+   }
+
+   /* F3 0F 70 = PSHUFHW -- rearrange upper half 4x16 from E(xmm or
+      mem) to G(xmm), and copy lower half */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0x70) {
+      Int order;
+      IRTemp sVhi, dVhi, sV, dV, s3, s2, s1, s0;
+      s3 = s2 = s1 = s0 = IRTemp_INVALID;
+      sV   = newTemp(Ity_V128);
+      dV   = newTemp(Ity_V128);
+      sVhi = newTemp(Ity_I64);
+      dVhi = newTemp(Ity_I64);
+      modrm = insn[3];
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         order = (Int)insn[4];
+         delta += 4+1;
+         DIP("pshufhw $%d,%s,%s\n", order, 
+                                    nameXMMReg(eregOfRM(modrm)),
+                                    nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+	 order = (Int)insn[3+alen];
+         delta += 4+alen;
+         DIP("pshufhw $%d,%s,%s\n", order, 
+                                    dis_buf,
+                                    nameXMMReg(gregOfRM(modrm)));
+      }
+      assign( sVhi, unop(Iop_V128HIto64, mkexpr(sV)) );
+      breakup64to16s( sVhi, &s3, &s2, &s1, &s0 );
+
+#     define SEL(n) \
+                ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+      assign(dVhi,
+	     mk64from16s( SEL((order>>6)&3), SEL((order>>4)&3),
+                          SEL((order>>2)&3), SEL((order>>0)&3) )
+      );
+      assign(dV, binop( Iop_64HLtoV128, 
+                        mkexpr(dVhi),
+                        unop(Iop_V128to64, mkexpr(sV))) );
+      putXMMReg(gregOfRM(modrm), mkexpr(dV));
+#     undef SEL
+      goto decode_success;
+   }
+
+   /* F2 0F 70 = PSHUFLW -- rearrange lower half 4x16 from E(xmm or
+      mem) to G(xmm), and copy upper half */
+   if (insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x70) {
+      Int order;
+      IRTemp sVlo, dVlo, sV, dV, s3, s2, s1, s0;
+      s3 = s2 = s1 = s0 = IRTemp_INVALID;
+      sV   = newTemp(Ity_V128);
+      dV   = newTemp(Ity_V128);
+      sVlo = newTemp(Ity_I64);
+      dVlo = newTemp(Ity_I64);
+      modrm = insn[3];
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         order = (Int)insn[4];
+         delta += 4+1;
+         DIP("pshuflw $%d,%s,%s\n", order, 
+                                    nameXMMReg(eregOfRM(modrm)),
+                                    nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+	 order = (Int)insn[3+alen];
+         delta += 4+alen;
+         DIP("pshuflw $%d,%s,%s\n", order, 
+                                    dis_buf,
+                                    nameXMMReg(gregOfRM(modrm)));
+      }
+      assign( sVlo, unop(Iop_V128to64, mkexpr(sV)) );
+      breakup64to16s( sVlo, &s3, &s2, &s1, &s0 );
+
+#     define SEL(n) \
+                ((n)==0 ? s0 : ((n)==1 ? s1 : ((n)==2 ? s2 : s3)))
+      assign(dVlo,
+	     mk64from16s( SEL((order>>6)&3), SEL((order>>4)&3),
+                          SEL((order>>2)&3), SEL((order>>0)&3) )
+      );
+      assign(dV, binop( Iop_64HLtoV128,
+                        unop(Iop_V128HIto64, mkexpr(sV)),
+                        mkexpr(dVlo) ) );
+      putXMMReg(gregOfRM(modrm), mkexpr(dV));
+#     undef SEL
+      goto decode_success;
+   }
+
+   /* 66 0F 72 /6 ib = PSLLD by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x72
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 6) {
+      delta = dis_SSE_shiftE_imm( delta+2, "pslld", Iop_ShlN32x4 );
+      goto decode_success;
+   }
+
+   /* 66 0F F2 = PSLLD by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF2) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "pslld", Iop_ShlN32x4 );
+      goto decode_success;
+   }
+
+   /* 66 0F 73 /7 ib = PSLLDQ by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x73
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 7) {
+      IRTemp sV, dV, hi64, lo64, hi64r, lo64r;
+      Int    imm = (Int)insn[3];
+      Int    reg = eregOfRM(insn[2]);
+      DIP("pslldq $%d,%s\n", imm, nameXMMReg(reg));
+      vassert(imm >= 0 && imm <= 255);
+      delta += 4;
+
+      sV    = newTemp(Ity_V128);
+      dV    = newTemp(Ity_V128);
+      hi64  = newTemp(Ity_I64);
+      lo64  = newTemp(Ity_I64);
+      hi64r = newTemp(Ity_I64);
+      lo64r = newTemp(Ity_I64);
+
+      if (imm >= 16) {
+         putXMMReg(reg, mkV128(0x0000));
+         goto decode_success;
+      }
+
+      assign( sV, getXMMReg(reg) );
+      assign( hi64, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( lo64, unop(Iop_V128to64, mkexpr(sV)) );
+
+      if (imm == 0) {
+         assign( lo64r, mkexpr(lo64) );
+         assign( hi64r, mkexpr(hi64) );
+      }
+      else
+      if (imm == 8) {
+         assign( lo64r, mkU64(0) );
+         assign( hi64r, mkexpr(lo64) );
+      }
+      else
+      if (imm > 8) {
+         assign( lo64r, mkU64(0) );
+         assign( hi64r, binop( Iop_Shl64, 
+                               mkexpr(lo64),
+                               mkU8( 8*(imm-8) ) ));
+      } else {
+         assign( lo64r, binop( Iop_Shl64, 
+                               mkexpr(lo64),
+                               mkU8(8 * imm) ));
+         assign( hi64r, 
+                 binop( Iop_Or64,
+                        binop(Iop_Shl64, mkexpr(hi64), 
+                                         mkU8(8 * imm)),
+                        binop(Iop_Shr64, mkexpr(lo64),
+                                         mkU8(8 * (8 - imm)) )
+                      )
+               );
+      }
+      assign( dV, binop(Iop_64HLtoV128, mkexpr(hi64r), mkexpr(lo64r)) );
+      putXMMReg(reg, mkexpr(dV));
+      goto decode_success;
+   }
+
+   /* 66 0F 73 /6 ib = PSLLQ by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x73
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 6) {
+      delta = dis_SSE_shiftE_imm( delta+2, "psllq", Iop_ShlN64x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F F3 = PSLLQ by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF3) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "psllq", Iop_ShlN64x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 71 /6 ib = PSLLW by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x71
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 6) {
+      delta = dis_SSE_shiftE_imm( delta+2, "psllw", Iop_ShlN16x8 );
+      goto decode_success;
+   }
+
+   /* 66 0F F1 = PSLLW by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF1) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "psllw", Iop_ShlN16x8 );
+      goto decode_success;
+   }
+
+   /* 66 0F 72 /4 ib = PSRAD by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x72
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 4) {
+      delta = dis_SSE_shiftE_imm( delta+2, "psrad", Iop_SarN32x4 );
+      goto decode_success;
+   }
+
+   /* 66 0F E2 = PSRAD by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE2) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "psrad", Iop_SarN32x4 );
+      goto decode_success;
+   }
+
+   /* 66 0F 71 /4 ib = PSRAW by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x71
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 4) {
+      delta = dis_SSE_shiftE_imm( delta+2, "psraw", Iop_SarN16x8 );
+      goto decode_success;
+   }
+
+   /* 66 0F E1 = PSRAW by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE1) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "psraw", Iop_SarN16x8 );
+      goto decode_success;
+   }
+
+   /* 66 0F 72 /2 ib = PSRLD by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x72
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 2) {
+      delta = dis_SSE_shiftE_imm( delta+2, "psrld", Iop_ShrN32x4 );
+      goto decode_success;
+   }
+
+   /* 66 0F D2 = PSRLD by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD2) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "psrld", Iop_ShrN32x4 );
+      goto decode_success;
+   }
+
+   /* 66 0F 73 /3 ib = PSRLDQ by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x73
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 3) {
+      IRTemp sV, dV, hi64, lo64, hi64r, lo64r;
+      Int    imm = (Int)insn[3];
+      Int    reg = eregOfRM(insn[2]);
+      DIP("psrldq $%d,%s\n", imm, nameXMMReg(reg));
+      vassert(imm >= 0 && imm <= 255);
+      delta += 4;
+
+      sV    = newTemp(Ity_V128);
+      dV    = newTemp(Ity_V128);
+      hi64  = newTemp(Ity_I64);
+      lo64  = newTemp(Ity_I64);
+      hi64r = newTemp(Ity_I64);
+      lo64r = newTemp(Ity_I64);
+
+      if (imm >= 16) {
+         putXMMReg(reg, mkV128(0x0000));
+         goto decode_success;
+      }
+
+      assign( sV, getXMMReg(reg) );
+      assign( hi64, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( lo64, unop(Iop_V128to64, mkexpr(sV)) );
+
+      if (imm == 0) {
+         assign( lo64r, mkexpr(lo64) );
+         assign( hi64r, mkexpr(hi64) );
+      }
+      else
+      if (imm == 8) {
+         assign( hi64r, mkU64(0) );
+         assign( lo64r, mkexpr(hi64) );
+      }
+      else 
+      if (imm > 8) {
+         assign( hi64r, mkU64(0) );
+         assign( lo64r, binop( Iop_Shr64, 
+                               mkexpr(hi64),
+                               mkU8( 8*(imm-8) ) ));
+      } else {
+         assign( hi64r, binop( Iop_Shr64, 
+                               mkexpr(hi64),
+                               mkU8(8 * imm) ));
+         assign( lo64r, 
+                 binop( Iop_Or64,
+                        binop(Iop_Shr64, mkexpr(lo64), 
+                                         mkU8(8 * imm)),
+                        binop(Iop_Shl64, mkexpr(hi64),
+                                         mkU8(8 * (8 - imm)) )
+                      )
+               );
+      }
+
+      assign( dV, binop(Iop_64HLtoV128, mkexpr(hi64r), mkexpr(lo64r)) );
+      putXMMReg(reg, mkexpr(dV));
+      goto decode_success;
+   }
+
+   /* 66 0F 73 /2 ib = PSRLQ by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x73
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 2) {
+      delta = dis_SSE_shiftE_imm( delta+2, "psrlq", Iop_ShrN64x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F D3 = PSRLQ by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD3) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "psrlq", Iop_ShrN64x2 );
+      goto decode_success;
+   }
+
+   /* 66 0F 71 /2 ib = PSRLW by immediate */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x71
+       && epartIsReg(insn[2])
+       && gregOfRM(insn[2]) == 2) {
+      delta = dis_SSE_shiftE_imm( delta+2, "psrlw", Iop_ShrN16x8 );
+      goto decode_success;
+   }
+
+   /* 66 0F D1 = PSRLW by E */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD1) {
+      delta = dis_SSE_shiftG_byE( sorb, delta+2, "psrlw", Iop_ShrN16x8 );
+      goto decode_success;
+   }
+
+   /* 66 0F F8 = PSUBB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF8) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubb", Iop_Sub8x16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F FA = PSUBD */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xFA) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubd", Iop_Sub32x4, False );
+      goto decode_success;
+   }
+
+   /* ***--- this is an MMX class insn introduced in SSE2 ---*** */
+   /* 0F FB = PSUBQ -- sub 64x1 */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xFB) {
+      do_MMX_preamble();
+      delta = dis_MMXop_regmem_to_reg ( 
+                sorb, delta+2, insn[1], "psubq", False );
+      goto decode_success;
+   }
+
+   /* 66 0F FB = PSUBQ */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xFB) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubq", Iop_Sub64x2, False );
+      goto decode_success;
+   }
+
+   /* 66 0F F9 = PSUBW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xF9) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubw", Iop_Sub16x8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F E8 = PSUBSB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE8) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubsb", Iop_QSub8Sx16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F E9 = PSUBSW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xE9) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubsw", Iop_QSub16Sx8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F D8 = PSUBSB */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD8) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubusb", Iop_QSub8Ux16, False );
+      goto decode_success;
+   }
+
+   /* 66 0F D9 = PSUBSW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD9) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "psubusw", Iop_QSub16Ux8, False );
+      goto decode_success;
+   }
+
+   /* 66 0F 68 = PUNPCKHBW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x68) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpckhbw",
+                                 Iop_InterleaveHI8x16, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 6A = PUNPCKHDQ */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x6A) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpckhdq",
+                                 Iop_InterleaveHI32x4, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 6D = PUNPCKHQDQ */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x6D) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpckhqdq",
+                                 Iop_InterleaveHI64x2, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 69 = PUNPCKHWD */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x69) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpckhwd",
+                                 Iop_InterleaveHI16x8, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 60 = PUNPCKLBW */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x60) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpcklbw",
+                                 Iop_InterleaveLO8x16, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 62 = PUNPCKLDQ */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x62) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpckldq",
+                                 Iop_InterleaveLO32x4, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 6C = PUNPCKLQDQ */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x6C) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpcklqdq",
+                                 Iop_InterleaveLO64x2, True );
+      goto decode_success;
+   }
+
+   /* 66 0F 61 = PUNPCKLWD */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0x61) {
+      delta = dis_SSEint_E_to_G( sorb, delta+2, 
+                                 "punpcklwd",
+                                 Iop_InterleaveLO16x8, True );
+      goto decode_success;
+   }
+
+   /* 66 0F EF = PXOR */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xEF) {
+      delta = dis_SSE_E_to_G_all( sorb, delta+2, "pxor", Iop_XorV128 );
+      goto decode_success;
+   }
+
+//--    /* FXSAVE/FXRSTOR m32 -- load/store the FPU/MMX/SSE state. */
+//--    if (insn[0] == 0x0F && insn[1] == 0xAE 
+//--        && (!epartIsReg(insn[2]))
+//--        && (gregOfRM(insn[2]) == 1 || gregOfRM(insn[2]) == 0) ) {
+//--       Bool store = gregOfRM(insn[2]) == 0;
+//--       vg_assert(sz == 4);
+//--       pair = disAMode ( cb, sorb, eip+2, dis_buf );
+//--       t1   = LOW24(pair);
+//--       eip += 2+HI8(pair);
+//--       uInstr3(cb, store ? SSE2a_MemWr : SSE2a_MemRd, 512,
+//--                   Lit16, (((UShort)insn[0]) << 8) | (UShort)insn[1],
+//--                   Lit16, (UShort)insn[2],
+//--                   TempReg, t1 );
+//--       DIP("fx%s %s\n", store ? "save" : "rstor", dis_buf );
+//--       goto decode_success;
+//--    }
+
+   /* 0F AE /7 = CLFLUSH -- flush cache line */
+   if (sz == 4 && insn[0] == 0x0F && insn[1] == 0xAE
+       && !epartIsReg(insn[2]) && gregOfRM(insn[2]) == 7) {
+
+      /* This is something of a hack.  We need to know the size of the
+         cache line containing addr.  Since we don't (easily), assume
+         256 on the basis that no real cache would have a line that
+         big.  It's safe to invalidate more stuff than we need, just
+         inefficient. */
+      UInt lineszB = 256;
+
+      addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+      delta += 2+alen;
+
+      /* Round addr down to the start of the containing block. */
+      stmt( IRStmt_Put(
+               OFFB_CMSTART,
+               binop( Iop_And32, 
+                      mkexpr(addr), 
+                      mkU32( ~(lineszB-1) ))) );
+
+      stmt( IRStmt_Put(OFFB_CMLEN, mkU32(lineszB) ) );
+
+      jmp_lit(&dres, Ijk_InvalICache, (Addr32)(guest_EIP_bbstart+delta));
+
+      DIP("clflush %s\n", dis_buf);
+      goto decode_success;
+   }
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE2 decoder.                     --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSE3 decoder.                   --- */
+   /* ---------------------------------------------------- */
+
+   /* Skip parts of the decoder which don't apply given the stated
+      guest subarchitecture. */
+   if (0 == (archinfo->hwcaps & VEX_HWCAPS_X86_SSE3))
+      goto after_sse_decoders; /* no SSE3 capabilities */
+
+   insn = &guest_code[delta];
+
+   /* F3 0F 12 = MOVSLDUP -- move from E (mem or xmm) to G (xmm),
+      duplicating some lanes (2:2:0:0). */
+   /* F3 0F 16 = MOVSHDUP -- move from E (mem or xmm) to G (xmm),
+      duplicating some lanes (3:3:1:1). */
+   if (sz == 4 && insn[0] == 0xF3 && insn[1] == 0x0F 
+       && (insn[2] == 0x12 || insn[2] == 0x16)) {
+      IRTemp s3, s2, s1, s0;
+      IRTemp sV  = newTemp(Ity_V128);
+      Bool   isH = insn[2] == 0x16;
+      s3 = s2 = s1 = s0 = IRTemp_INVALID;
+
+      modrm = insn[3];
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg( eregOfRM(modrm)) );
+         DIP("movs%cdup %s,%s\n", isH ? 'h' : 'l',
+                                  nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("movs%cdup %s,%s\n", isH ? 'h' : 'l',
+	     dis_buf,
+             nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+
+      breakup128to32s( sV, &s3, &s2, &s1, &s0 );
+      putXMMReg( gregOfRM(modrm), 
+                 isH ? mk128from32s( s3, s3, s1, s1 )
+                     : mk128from32s( s2, s2, s0, s0 ) );
+      goto decode_success;
+   }
+
+   /* F2 0F 12 = MOVDDUP -- move from E (mem or xmm) to G (xmm),
+      duplicating some lanes (0:1:0:1). */
+   if (sz == 4 && insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0x12) {
+      IRTemp sV = newTemp(Ity_V128);
+      IRTemp d0 = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg( eregOfRM(modrm)) );
+         DIP("movddup %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+         assign ( d0, unop(Iop_V128to64, mkexpr(sV)) );
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( d0, loadLE(Ity_I64, mkexpr(addr)) );
+         DIP("movddup %s,%s\n", dis_buf,
+                                nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+
+      putXMMReg( gregOfRM(modrm), binop(Iop_64HLtoV128,mkexpr(d0),mkexpr(d0)) );
+      goto decode_success;
+   }
+
+   /* F2 0F D0 = ADDSUBPS -- 32x4 +/-/+/- from E (mem or xmm) to G (xmm). */
+   if (sz == 4 && insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0xD0) {
+      IRTemp a3, a2, a1, a0, s3, s2, s1, s0;
+      IRTemp eV   = newTemp(Ity_V128);
+      IRTemp gV   = newTemp(Ity_V128);
+      IRTemp addV = newTemp(Ity_V128);
+      IRTemp subV = newTemp(Ity_V128);
+      IRTemp rm     = newTemp(Ity_I32);
+      a3 = a2 = a1 = a0 = s3 = s2 = s1 = s0 = IRTemp_INVALID;
+
+      modrm = insn[3];
+      if (epartIsReg(modrm)) {
+         assign( eV, getXMMReg( eregOfRM(modrm)) );
+         DIP("addsubps %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("addsubps %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+
+      assign( gV, getXMMReg(gregOfRM(modrm)) );
+
+      assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+      assign( addV, triop(Iop_Add32Fx4, mkexpr(rm), mkexpr(gV), mkexpr(eV)) );
+      assign( subV, triop(Iop_Sub32Fx4, mkexpr(rm), mkexpr(gV), mkexpr(eV)) );
+
+      breakup128to32s( addV, &a3, &a2, &a1, &a0 );
+      breakup128to32s( subV, &s3, &s2, &s1, &s0 );
+
+      putXMMReg( gregOfRM(modrm), mk128from32s( a3, s2, a1, s0 ));
+      goto decode_success;
+   }
+
+   /* 66 0F D0 = ADDSUBPD -- 64x4 +/- from E (mem or xmm) to G (xmm). */
+   if (sz == 2 && insn[0] == 0x0F && insn[1] == 0xD0) {
+      IRTemp eV   = newTemp(Ity_V128);
+      IRTemp gV   = newTemp(Ity_V128);
+      IRTemp addV = newTemp(Ity_V128);
+      IRTemp subV = newTemp(Ity_V128);
+      IRTemp a1     = newTemp(Ity_I64);
+      IRTemp s0     = newTemp(Ity_I64);
+      IRTemp rm     = newTemp(Ity_I32);
+
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         assign( eV, getXMMReg( eregOfRM(modrm)) );
+         DIP("addsubpd %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+         delta += 2+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("addsubpd %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)));
+         delta += 2+alen;
+      }
+
+      assign( gV, getXMMReg(gregOfRM(modrm)) );
+
+      assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+      assign( addV, triop(Iop_Add64Fx2, mkexpr(rm), mkexpr(gV), mkexpr(eV)) );
+      assign( subV, triop(Iop_Sub64Fx2, mkexpr(rm), mkexpr(gV), mkexpr(eV)) );
+
+      assign( a1, unop(Iop_V128HIto64, mkexpr(addV) ));
+      assign( s0, unop(Iop_V128to64,   mkexpr(subV) ));
+
+      putXMMReg( gregOfRM(modrm), 
+                 binop(Iop_64HLtoV128, mkexpr(a1), mkexpr(s0)) );
+      goto decode_success;
+   }
+
+   /* F2 0F 7D = HSUBPS -- 32x4 sub across from E (mem or xmm) to G (xmm). */
+   /* F2 0F 7C = HADDPS -- 32x4 add across from E (mem or xmm) to G (xmm). */
+   if (sz == 4 && insn[0] == 0xF2 && insn[1] == 0x0F 
+       && (insn[2] == 0x7C || insn[2] == 0x7D)) {
+      IRTemp e3, e2, e1, e0, g3, g2, g1, g0;
+      IRTemp eV     = newTemp(Ity_V128);
+      IRTemp gV     = newTemp(Ity_V128);
+      IRTemp leftV  = newTemp(Ity_V128);
+      IRTemp rightV = newTemp(Ity_V128);
+      IRTemp rm     = newTemp(Ity_I32);
+      Bool   isAdd  = insn[2] == 0x7C;
+      const HChar* str = isAdd ? "add" : "sub";
+      e3 = e2 = e1 = e0 = g3 = g2 = g1 = g0 = IRTemp_INVALID;
+
+      modrm = insn[3];
+      if (epartIsReg(modrm)) {
+         assign( eV, getXMMReg( eregOfRM(modrm)) );
+         DIP("h%sps %s,%s\n", str, nameXMMReg(eregOfRM(modrm)),
+                                   nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("h%sps %s,%s\n", str, dis_buf,
+                                   nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+
+      assign( gV, getXMMReg(gregOfRM(modrm)) );
+
+      breakup128to32s( eV, &e3, &e2, &e1, &e0 );
+      breakup128to32s( gV, &g3, &g2, &g1, &g0 );
+
+      assign( leftV,  mk128from32s( e2, e0, g2, g0 ) );
+      assign( rightV, mk128from32s( e3, e1, g3, g1 ) );
+
+      assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+      putXMMReg( gregOfRM(modrm), 
+                 triop(isAdd ? Iop_Add32Fx4 : Iop_Sub32Fx4, 
+                       mkexpr(rm), mkexpr(leftV), mkexpr(rightV) ) );
+      goto decode_success;
+   }
+
+   /* 66 0F 7D = HSUBPD -- 64x2 sub across from E (mem or xmm) to G (xmm). */
+   /* 66 0F 7C = HADDPD -- 64x2 add across from E (mem or xmm) to G (xmm). */
+   if (sz == 2 && insn[0] == 0x0F && (insn[1] == 0x7C || insn[1] == 0x7D)) {
+      IRTemp e1     = newTemp(Ity_I64);
+      IRTemp e0     = newTemp(Ity_I64);
+      IRTemp g1     = newTemp(Ity_I64);
+      IRTemp g0     = newTemp(Ity_I64);
+      IRTemp eV     = newTemp(Ity_V128);
+      IRTemp gV     = newTemp(Ity_V128);
+      IRTemp leftV  = newTemp(Ity_V128);
+      IRTemp rightV = newTemp(Ity_V128);
+      IRTemp rm     = newTemp(Ity_I32);
+      Bool   isAdd  = insn[1] == 0x7C;
+      const HChar* str = isAdd ? "add" : "sub";
+
+      modrm = insn[2];
+      if (epartIsReg(modrm)) {
+         assign( eV, getXMMReg( eregOfRM(modrm)) );
+         DIP("h%spd %s,%s\n", str, nameXMMReg(eregOfRM(modrm)),
+                                   nameXMMReg(gregOfRM(modrm)));
+         delta += 2+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+2, dis_buf );
+         assign( eV, loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("h%spd %s,%s\n", str, dis_buf,
+                              nameXMMReg(gregOfRM(modrm)));
+         delta += 2+alen;
+      }
+
+      assign( gV, getXMMReg(gregOfRM(modrm)) );
+
+      assign( e1, unop(Iop_V128HIto64, mkexpr(eV) ));
+      assign( e0, unop(Iop_V128to64, mkexpr(eV) ));
+      assign( g1, unop(Iop_V128HIto64, mkexpr(gV) ));
+      assign( g0, unop(Iop_V128to64, mkexpr(gV) ));
+
+      assign( leftV,  binop(Iop_64HLtoV128, mkexpr(e0),mkexpr(g0)) );
+      assign( rightV, binop(Iop_64HLtoV128, mkexpr(e1),mkexpr(g1)) );
+
+      assign( rm, get_FAKE_roundingmode() ); /* XXXROUNDINGFIXME */
+      putXMMReg( gregOfRM(modrm), 
+                 triop(isAdd ? Iop_Add64Fx2 : Iop_Sub64Fx2, 
+                       mkexpr(rm), mkexpr(leftV), mkexpr(rightV) ) );
+      goto decode_success;
+   }
+
+   /* F2 0F F0 = LDDQU -- move from E (mem or xmm) to G (xmm). */
+   if (sz == 4 && insn[0] == 0xF2 && insn[1] == 0x0F && insn[2] == 0xF0) {
+      modrm = getIByte(delta+3);
+      if (epartIsReg(modrm)) {
+         goto decode_failure;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         putXMMReg( gregOfRM(modrm), 
+                    loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("lddqu %s,%s\n", dis_buf,
+                              nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+      goto decode_success;
+   }
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE3 decoder.                     --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSSE3 decoder.                  --- */
+   /* ---------------------------------------------------- */
+
+   /* 0F 38 04 = PMADDUBSW -- Multiply and Add Packed Signed and
+      Unsigned Bytes (MMX) */
+   if (sz == 4
+       && insn[0] == 0x0F && insn[1] == 0x38 && insn[2] == 0x04) {
+      IRTemp sV        = newTemp(Ity_I64);
+      IRTemp dV        = newTemp(Ity_I64);
+      IRTemp sVoddsSX  = newTemp(Ity_I64);
+      IRTemp sVevensSX = newTemp(Ity_I64);
+      IRTemp dVoddsZX  = newTemp(Ity_I64);
+      IRTemp dVevensZX = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      do_MMX_preamble();
+      assign( dV, getMMXReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pmaddubsw %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                                  nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pmaddubsw %s,%s\n", dis_buf,
+                                  nameMMXReg(gregOfRM(modrm)));
+      }
+
+      /* compute dV unsigned x sV signed */
+      assign( sVoddsSX,
+              binop(Iop_SarN16x4, mkexpr(sV), mkU8(8)) );
+      assign( sVevensSX,
+              binop(Iop_SarN16x4, 
+                    binop(Iop_ShlN16x4, mkexpr(sV), mkU8(8)), 
+                    mkU8(8)) );
+      assign( dVoddsZX,
+              binop(Iop_ShrN16x4, mkexpr(dV), mkU8(8)) );
+      assign( dVevensZX,
+              binop(Iop_ShrN16x4,
+                    binop(Iop_ShlN16x4, mkexpr(dV), mkU8(8)),
+                    mkU8(8)) );
+
+      putMMXReg(
+         gregOfRM(modrm),
+         binop(Iop_QAdd16Sx4,
+               binop(Iop_Mul16x4, mkexpr(sVoddsSX), mkexpr(dVoddsZX)),
+               binop(Iop_Mul16x4, mkexpr(sVevensSX), mkexpr(dVevensZX))
+         )
+      );
+      goto decode_success;
+   }
+
+   /* 66 0F 38 04 = PMADDUBSW -- Multiply and Add Packed Signed and
+      Unsigned Bytes (XMM) */
+   if (sz == 2
+       && insn[0] == 0x0F && insn[1] == 0x38 && insn[2] == 0x04) {
+      IRTemp sV        = newTemp(Ity_V128);
+      IRTemp dV        = newTemp(Ity_V128);
+      IRTemp sVoddsSX  = newTemp(Ity_V128);
+      IRTemp sVevensSX = newTemp(Ity_V128);
+      IRTemp dVoddsZX  = newTemp(Ity_V128);
+      IRTemp dVevensZX = newTemp(Ity_V128);
+
+      modrm = insn[3];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pmaddubsw %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pmaddubsw %s,%s\n", dis_buf,
+                                  nameXMMReg(gregOfRM(modrm)));
+      }
+
+      /* compute dV unsigned x sV signed */
+      assign( sVoddsSX,
+              binop(Iop_SarN16x8, mkexpr(sV), mkU8(8)) );
+      assign( sVevensSX,
+              binop(Iop_SarN16x8, 
+                    binop(Iop_ShlN16x8, mkexpr(sV), mkU8(8)), 
+                    mkU8(8)) );
+      assign( dVoddsZX,
+              binop(Iop_ShrN16x8, mkexpr(dV), mkU8(8)) );
+      assign( dVevensZX,
+              binop(Iop_ShrN16x8,
+                    binop(Iop_ShlN16x8, mkexpr(dV), mkU8(8)),
+                    mkU8(8)) );
+
+      putXMMReg(
+         gregOfRM(modrm),
+         binop(Iop_QAdd16Sx8,
+               binop(Iop_Mul16x8, mkexpr(sVoddsSX), mkexpr(dVoddsZX)),
+               binop(Iop_Mul16x8, mkexpr(sVevensSX), mkexpr(dVevensZX))
+         )
+      );
+      goto decode_success;
+   }
+
+   /* ***--- these are MMX class insns introduced in SSSE3 ---*** */
+   /* 0F 38 03 = PHADDSW -- 16x4 signed qadd across from E (mem or
+      mmx) and G to G (mmx). */
+   /* 0F 38 07 = PHSUBSW -- 16x4 signed qsub across from E (mem or
+      mmx) and G to G (mmx). */
+   /* 0F 38 01 = PHADDW -- 16x4 add across from E (mem or mmx) and G
+      to G (mmx). */
+   /* 0F 38 05 = PHSUBW -- 16x4 sub across from E (mem or mmx) and G
+      to G (mmx). */
+   /* 0F 38 02 = PHADDD -- 32x2 add across from E (mem or mmx) and G
+      to G (mmx). */
+   /* 0F 38 06 = PHSUBD -- 32x2 sub across from E (mem or mmx) and G
+      to G (mmx). */
+
+   if (sz == 4 
+       && insn[0] == 0x0F && insn[1] == 0x38 
+       && (insn[2] == 0x03 || insn[2] == 0x07 || insn[2] == 0x01
+           || insn[2] == 0x05 || insn[2] == 0x02 || insn[2] == 0x06)) {
+      const HChar* str = "???";
+      IROp   opV64  = Iop_INVALID;
+      IROp   opCatO = Iop_CatOddLanes16x4;
+      IROp   opCatE = Iop_CatEvenLanes16x4;
+      IRTemp sV     = newTemp(Ity_I64);
+      IRTemp dV     = newTemp(Ity_I64);
+
+      modrm = insn[3];
+
+      switch (insn[2]) {
+         case 0x03: opV64 = Iop_QAdd16Sx4; str = "addsw"; break;
+         case 0x07: opV64 = Iop_QSub16Sx4; str = "subsw"; break;
+         case 0x01: opV64 = Iop_Add16x4;   str = "addw";  break;
+         case 0x05: opV64 = Iop_Sub16x4;   str = "subw";  break;
+         case 0x02: opV64 = Iop_Add32x2;   str = "addd";  break;
+         case 0x06: opV64 = Iop_Sub32x2;   str = "subd";  break;
+         default: vassert(0);
+      }
+      if (insn[2] == 0x02 || insn[2] == 0x06) {
+         opCatO = Iop_InterleaveHI32x2;
+         opCatE = Iop_InterleaveLO32x2;
+      }
+
+      do_MMX_preamble();
+      assign( dV, getMMXReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("ph%s %s,%s\n", str, nameMMXReg(eregOfRM(modrm)),
+                                  nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("ph%s %s,%s\n", str, dis_buf,
+                                  nameMMXReg(gregOfRM(modrm)));
+      }
+
+      putMMXReg(
+         gregOfRM(modrm),
+         binop(opV64,
+               binop(opCatE,mkexpr(sV),mkexpr(dV)),
+               binop(opCatO,mkexpr(sV),mkexpr(dV))
+         )
+      );
+      goto decode_success;
+   }
+
+   /* 66 0F 38 03 = PHADDSW -- 16x8 signed qadd across from E (mem or
+      xmm) and G to G (xmm). */
+   /* 66 0F 38 07 = PHSUBSW -- 16x8 signed qsub across from E (mem or
+      xmm) and G to G (xmm). */
+   /* 66 0F 38 01 = PHADDW -- 16x8 add across from E (mem or xmm) and
+      G to G (xmm). */
+   /* 66 0F 38 05 = PHSUBW -- 16x8 sub across from E (mem or xmm) and
+      G to G (xmm). */
+   /* 66 0F 38 02 = PHADDD -- 32x4 add across from E (mem or xmm) and
+      G to G (xmm). */
+   /* 66 0F 38 06 = PHSUBD -- 32x4 sub across from E (mem or xmm) and
+      G to G (xmm). */
+
+   if (sz == 2
+       && insn[0] == 0x0F && insn[1] == 0x38 
+       && (insn[2] == 0x03 || insn[2] == 0x07 || insn[2] == 0x01
+           || insn[2] == 0x05 || insn[2] == 0x02 || insn[2] == 0x06)) {
+      const HChar* str = "???";
+      IROp   opV64  = Iop_INVALID;
+      IROp   opCatO = Iop_CatOddLanes16x4;
+      IROp   opCatE = Iop_CatEvenLanes16x4;
+      IRTemp sV     = newTemp(Ity_V128);
+      IRTemp dV     = newTemp(Ity_V128);
+      IRTemp sHi    = newTemp(Ity_I64);
+      IRTemp sLo    = newTemp(Ity_I64);
+      IRTemp dHi    = newTemp(Ity_I64);
+      IRTemp dLo    = newTemp(Ity_I64);
+
+      modrm = insn[3];
+
+      switch (insn[2]) {
+         case 0x03: opV64 = Iop_QAdd16Sx4; str = "addsw"; break;
+         case 0x07: opV64 = Iop_QSub16Sx4; str = "subsw"; break;
+         case 0x01: opV64 = Iop_Add16x4;   str = "addw";  break;
+         case 0x05: opV64 = Iop_Sub16x4;   str = "subw";  break;
+         case 0x02: opV64 = Iop_Add32x2;   str = "addd";  break;
+         case 0x06: opV64 = Iop_Sub32x2;   str = "subd";  break;
+         default: vassert(0);
+      }
+      if (insn[2] == 0x02 || insn[2] == 0x06) {
+         opCatO = Iop_InterleaveHI32x2;
+         opCatE = Iop_InterleaveLO32x2;
+      }
+
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg( eregOfRM(modrm)) );
+         DIP("ph%s %s,%s\n", str, nameXMMReg(eregOfRM(modrm)),
+                                  nameXMMReg(gregOfRM(modrm)));
+         delta += 3+1;
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         DIP("ph%s %s,%s\n", str, dis_buf,
+                             nameXMMReg(gregOfRM(modrm)));
+         delta += 3+alen;
+      }
+
+      assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+      assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+      assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+      /* This isn't a particularly efficient way to compute the
+         result, but at least it avoids a proliferation of IROps,
+         hence avoids complication all the backends. */
+      putXMMReg(
+         gregOfRM(modrm), 
+         binop(Iop_64HLtoV128,
+               binop(opV64,
+                     binop(opCatE,mkexpr(sHi),mkexpr(sLo)),
+                     binop(opCatO,mkexpr(sHi),mkexpr(sLo))
+               ),
+               binop(opV64,
+                     binop(opCatE,mkexpr(dHi),mkexpr(dLo)),
+                     binop(opCatO,mkexpr(dHi),mkexpr(dLo))
+               )
+         )
+      );
+      goto decode_success;
+   }
+
+   /* 0F 38 0B = PMULHRSW -- Packed Multiply High with Round and Scale
+      (MMX) */
+   if (sz == 4 
+       && insn[0] == 0x0F && insn[1] == 0x38 && insn[2] == 0x0B) {
+      IRTemp sV = newTemp(Ity_I64);
+      IRTemp dV = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      do_MMX_preamble();
+      assign( dV, getMMXReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pmulhrsw %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                                 nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pmulhrsw %s,%s\n", dis_buf,
+                                 nameMMXReg(gregOfRM(modrm)));
+      }
+
+      putMMXReg(
+         gregOfRM(modrm),
+         dis_PMULHRSW_helper( mkexpr(sV), mkexpr(dV) )
+      );
+      goto decode_success;
+   }
+
+   /* 66 0F 38 0B = PMULHRSW -- Packed Multiply High with Round and
+      Scale (XMM) */
+   if (sz == 2
+       && insn[0] == 0x0F && insn[1] == 0x38 && insn[2] == 0x0B) {
+      IRTemp sV  = newTemp(Ity_V128);
+      IRTemp dV  = newTemp(Ity_V128);
+      IRTemp sHi = newTemp(Ity_I64);
+      IRTemp sLo = newTemp(Ity_I64);
+      IRTemp dHi = newTemp(Ity_I64);
+      IRTemp dLo = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pmulhrsw %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                                 nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pmulhrsw %s,%s\n", dis_buf,
+                                 nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+      assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+      assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+      putXMMReg(
+         gregOfRM(modrm),
+         binop(Iop_64HLtoV128,
+               dis_PMULHRSW_helper( mkexpr(sHi), mkexpr(dHi) ),
+               dis_PMULHRSW_helper( mkexpr(sLo), mkexpr(dLo) )
+         )
+      );
+      goto decode_success;
+   }
+
+   /* 0F 38 08 = PSIGNB -- Packed Sign 8x8  (MMX) */
+   /* 0F 38 09 = PSIGNW -- Packed Sign 16x4 (MMX) */
+   /* 0F 38 09 = PSIGND -- Packed Sign 32x2 (MMX) */
+   if (sz == 4 
+       && insn[0] == 0x0F && insn[1] == 0x38 
+       && (insn[2] == 0x08 || insn[2] == 0x09 || insn[2] == 0x0A)) {
+      IRTemp sV      = newTemp(Ity_I64);
+      IRTemp dV      = newTemp(Ity_I64);
+      const HChar* str = "???";
+      Int    laneszB = 0;
+
+      switch (insn[2]) {
+         case 0x08: laneszB = 1; str = "b"; break;
+         case 0x09: laneszB = 2; str = "w"; break;
+         case 0x0A: laneszB = 4; str = "d"; break;
+         default: vassert(0);
+      }
+
+      modrm = insn[3];
+      do_MMX_preamble();
+      assign( dV, getMMXReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("psign%s %s,%s\n", str, nameMMXReg(eregOfRM(modrm)),
+                                     nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("psign%s %s,%s\n", str, dis_buf,
+                                     nameMMXReg(gregOfRM(modrm)));
+      }
+
+      putMMXReg(
+         gregOfRM(modrm),
+         dis_PSIGN_helper( mkexpr(sV), mkexpr(dV), laneszB )
+      );
+      goto decode_success;
+   }
+
+   /* 66 0F 38 08 = PSIGNB -- Packed Sign 8x16 (XMM) */
+   /* 66 0F 38 09 = PSIGNW -- Packed Sign 16x8 (XMM) */
+   /* 66 0F 38 09 = PSIGND -- Packed Sign 32x4 (XMM) */
+   if (sz == 2
+       && insn[0] == 0x0F && insn[1] == 0x38 
+       && (insn[2] == 0x08 || insn[2] == 0x09 || insn[2] == 0x0A)) {
+      IRTemp sV      = newTemp(Ity_V128);
+      IRTemp dV      = newTemp(Ity_V128);
+      IRTemp sHi     = newTemp(Ity_I64);
+      IRTemp sLo     = newTemp(Ity_I64);
+      IRTemp dHi     = newTemp(Ity_I64);
+      IRTemp dLo     = newTemp(Ity_I64);
+      const HChar* str = "???";
+      Int    laneszB = 0;
+
+      switch (insn[2]) {
+         case 0x08: laneszB = 1; str = "b"; break;
+         case 0x09: laneszB = 2; str = "w"; break;
+         case 0x0A: laneszB = 4; str = "d"; break;
+         default: vassert(0);
+      }
+
+      modrm = insn[3];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("psign%s %s,%s\n", str, nameXMMReg(eregOfRM(modrm)),
+                                     nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("psign%s %s,%s\n", str, dis_buf,
+                                     nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+      assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+      assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+      putXMMReg(
+         gregOfRM(modrm),
+         binop(Iop_64HLtoV128,
+               dis_PSIGN_helper( mkexpr(sHi), mkexpr(dHi), laneszB ),
+               dis_PSIGN_helper( mkexpr(sLo), mkexpr(dLo), laneszB )
+         )
+      );
+      goto decode_success;
+   }
+
+   /* 0F 38 1C = PABSB -- Packed Absolute Value 8x8  (MMX) */
+   /* 0F 38 1D = PABSW -- Packed Absolute Value 16x4 (MMX) */
+   /* 0F 38 1E = PABSD -- Packed Absolute Value 32x2 (MMX) */
+   if (sz == 4 
+       && insn[0] == 0x0F && insn[1] == 0x38 
+       && (insn[2] == 0x1C || insn[2] == 0x1D || insn[2] == 0x1E)) {
+      IRTemp sV      = newTemp(Ity_I64);
+      const HChar* str = "???";
+      Int    laneszB = 0;
+
+      switch (insn[2]) {
+         case 0x1C: laneszB = 1; str = "b"; break;
+         case 0x1D: laneszB = 2; str = "w"; break;
+         case 0x1E: laneszB = 4; str = "d"; break;
+         default: vassert(0);
+      }
+
+      modrm = insn[3];
+      do_MMX_preamble();
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pabs%s %s,%s\n", str, nameMMXReg(eregOfRM(modrm)),
+                                    nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pabs%s %s,%s\n", str, dis_buf,
+                                    nameMMXReg(gregOfRM(modrm)));
+      }
+
+      putMMXReg(
+         gregOfRM(modrm),
+         dis_PABS_helper( mkexpr(sV), laneszB )
+      );
+      goto decode_success;
+   }
+
+   /* 66 0F 38 1C = PABSB -- Packed Absolute Value 8x16 (XMM) */
+   /* 66 0F 38 1D = PABSW -- Packed Absolute Value 16x8 (XMM) */
+   /* 66 0F 38 1E = PABSD -- Packed Absolute Value 32x4 (XMM) */
+   if (sz == 2
+       && insn[0] == 0x0F && insn[1] == 0x38 
+       && (insn[2] == 0x1C || insn[2] == 0x1D || insn[2] == 0x1E)) {
+      IRTemp sV      = newTemp(Ity_V128);
+      IRTemp sHi     = newTemp(Ity_I64);
+      IRTemp sLo     = newTemp(Ity_I64);
+      const HChar* str = "???";
+      Int    laneszB = 0;
+
+      switch (insn[2]) {
+         case 0x1C: laneszB = 1; str = "b"; break;
+         case 0x1D: laneszB = 2; str = "w"; break;
+         case 0x1E: laneszB = 4; str = "d"; break;
+         default: vassert(0);
+      }
+
+      modrm = insn[3];
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pabs%s %s,%s\n", str, nameXMMReg(eregOfRM(modrm)),
+                                    nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pabs%s %s,%s\n", str, dis_buf,
+                                    nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+      putXMMReg(
+         gregOfRM(modrm),
+         binop(Iop_64HLtoV128,
+               dis_PABS_helper( mkexpr(sHi), laneszB ),
+               dis_PABS_helper( mkexpr(sLo), laneszB )
+         )
+      );
+      goto decode_success;
+   }
+
+   /* 0F 3A 0F = PALIGNR -- Packed Align Right (MMX) */
+   if (sz == 4 
+       && insn[0] == 0x0F && insn[1] == 0x3A && insn[2] == 0x0F) {
+      IRTemp sV  = newTemp(Ity_I64);
+      IRTemp dV  = newTemp(Ity_I64);
+      IRTemp res = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      do_MMX_preamble();
+      assign( dV, getMMXReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         d32 = (UInt)insn[3+1];
+         delta += 3+1+1;
+         DIP("palignr $%d,%s,%s\n",  (Int)d32, 
+                                     nameMMXReg(eregOfRM(modrm)),
+                                     nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         d32 = (UInt)insn[3+alen];
+         delta += 3+alen+1;
+         DIP("palignr $%d%s,%s\n", (Int)d32,
+                                   dis_buf,
+                                   nameMMXReg(gregOfRM(modrm)));
+      }
+
+      if (d32 == 0) {
+         assign( res, mkexpr(sV) );
+      }
+      else if (d32 >= 1 && d32 <= 7) {
+         assign(res, 
+                binop(Iop_Or64,
+                      binop(Iop_Shr64, mkexpr(sV), mkU8(8*d32)),
+                      binop(Iop_Shl64, mkexpr(dV), mkU8(8*(8-d32))
+                     )));
+      }
+      else if (d32 == 8) {
+        assign( res, mkexpr(dV) );
+      }
+      else if (d32 >= 9 && d32 <= 15) {
+         assign( res, binop(Iop_Shr64, mkexpr(dV), mkU8(8*(d32-8))) );
+      }
+      else if (d32 >= 16 && d32 <= 255) {
+         assign( res, mkU64(0) );
+      }
+      else
+         vassert(0);
+
+      putMMXReg( gregOfRM(modrm), mkexpr(res) );
+      goto decode_success;
+   }
+
+   /* 66 0F 3A 0F = PALIGNR -- Packed Align Right (XMM) */
+   if (sz == 2
+       && insn[0] == 0x0F && insn[1] == 0x3A && insn[2] == 0x0F) {
+      IRTemp sV  = newTemp(Ity_V128);
+      IRTemp dV  = newTemp(Ity_V128);
+      IRTemp sHi = newTemp(Ity_I64);
+      IRTemp sLo = newTemp(Ity_I64);
+      IRTemp dHi = newTemp(Ity_I64);
+      IRTemp dLo = newTemp(Ity_I64);
+      IRTemp rHi = newTemp(Ity_I64);
+      IRTemp rLo = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         d32 = (UInt)insn[3+1];
+         delta += 3+1+1;
+         DIP("palignr $%d,%s,%s\n", (Int)d32,
+                                    nameXMMReg(eregOfRM(modrm)),
+                                    nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         d32 = (UInt)insn[3+alen];
+         delta += 3+alen+1;
+         DIP("palignr $%d,%s,%s\n", (Int)d32,
+                                    dis_buf,
+                                    nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+      assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+      assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+      if (d32 == 0) {
+         assign( rHi, mkexpr(sHi) );
+         assign( rLo, mkexpr(sLo) );
+      }
+      else if (d32 >= 1 && d32 <= 7) {
+         assign( rHi, dis_PALIGNR_XMM_helper(dLo, sHi, d32) );
+         assign( rLo, dis_PALIGNR_XMM_helper(sHi, sLo, d32) );
+      }
+      else if (d32 == 8) {
+         assign( rHi, mkexpr(dLo) );
+         assign( rLo, mkexpr(sHi) );
+      }
+      else if (d32 >= 9 && d32 <= 15) {
+         assign( rHi, dis_PALIGNR_XMM_helper(dHi, dLo, d32-8) );
+         assign( rLo, dis_PALIGNR_XMM_helper(dLo, sHi, d32-8) );
+      }
+      else if (d32 == 16) {
+         assign( rHi, mkexpr(dHi) );
+         assign( rLo, mkexpr(dLo) );
+      }
+      else if (d32 >= 17 && d32 <= 23) {
+         assign( rHi, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(d32-16))) );
+         assign( rLo, dis_PALIGNR_XMM_helper(dHi, dLo, d32-16) );
+      }
+      else if (d32 == 24) {
+         assign( rHi, mkU64(0) );
+         assign( rLo, mkexpr(dHi) );
+      }
+      else if (d32 >= 25 && d32 <= 31) {
+         assign( rHi, mkU64(0) );
+         assign( rLo, binop(Iop_Shr64, mkexpr(dHi), mkU8(8*(d32-24))) );
+      }
+      else if (d32 >= 32 && d32 <= 255) {
+         assign( rHi, mkU64(0) );
+         assign( rLo, mkU64(0) );
+      }
+      else
+         vassert(0);
+
+      putXMMReg(
+         gregOfRM(modrm),
+         binop(Iop_64HLtoV128, mkexpr(rHi), mkexpr(rLo))
+      );
+      goto decode_success;
+   }
+
+   /* 0F 38 00 = PSHUFB -- Packed Shuffle Bytes 8x8 (MMX) */
+   if (sz == 4 
+       && insn[0] == 0x0F && insn[1] == 0x38 && insn[2] == 0x00) {
+      IRTemp sV      = newTemp(Ity_I64);
+      IRTemp dV      = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      do_MMX_preamble();
+      assign( dV, getMMXReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getMMXReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pshufb %s,%s\n", nameMMXReg(eregOfRM(modrm)),
+                               nameMMXReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         assign( sV, loadLE(Ity_I64, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pshufb %s,%s\n", dis_buf,
+                               nameMMXReg(gregOfRM(modrm)));
+      }
+
+      putMMXReg(
+         gregOfRM(modrm),
+         binop(
+            Iop_And64,
+            /* permute the lanes */
+            binop(
+               Iop_Perm8x8,
+               mkexpr(dV),
+               binop(Iop_And64, mkexpr(sV), mkU64(0x0707070707070707ULL))
+            ),
+            /* mask off lanes which have (index & 0x80) == 0x80 */
+            unop(Iop_Not64, binop(Iop_SarN8x8, mkexpr(sV), mkU8(7)))
+         )
+      );
+      goto decode_success;
+   }
+
+   /* 66 0F 38 00 = PSHUFB -- Packed Shuffle Bytes 8x16 (XMM) */
+   if (sz == 2
+       && insn[0] == 0x0F && insn[1] == 0x38 && insn[2] == 0x00) {
+      IRTemp sV         = newTemp(Ity_V128);
+      IRTemp dV         = newTemp(Ity_V128);
+      IRTemp sHi        = newTemp(Ity_I64);
+      IRTemp sLo        = newTemp(Ity_I64);
+      IRTemp dHi        = newTemp(Ity_I64);
+      IRTemp dLo        = newTemp(Ity_I64);
+      IRTemp rHi        = newTemp(Ity_I64);
+      IRTemp rLo        = newTemp(Ity_I64);
+      IRTemp sevens     = newTemp(Ity_I64);
+      IRTemp mask0x80hi = newTemp(Ity_I64);
+      IRTemp mask0x80lo = newTemp(Ity_I64);
+      IRTemp maskBit3hi = newTemp(Ity_I64);
+      IRTemp maskBit3lo = newTemp(Ity_I64);
+      IRTemp sAnd7hi    = newTemp(Ity_I64);
+      IRTemp sAnd7lo    = newTemp(Ity_I64);
+      IRTemp permdHi    = newTemp(Ity_I64);
+      IRTemp permdLo    = newTemp(Ity_I64);
+
+      modrm = insn[3];
+      assign( dV, getXMMReg(gregOfRM(modrm)) );
+
+      if (epartIsReg(modrm)) {
+         assign( sV, getXMMReg(eregOfRM(modrm)) );
+         delta += 3+1;
+         DIP("pshufb %s,%s\n", nameXMMReg(eregOfRM(modrm)),
+                               nameXMMReg(gregOfRM(modrm)));
+      } else {
+         addr = disAMode ( &alen, sorb, delta+3, dis_buf );
+         gen_SEGV_if_not_16_aligned( addr );
+         assign( sV, loadLE(Ity_V128, mkexpr(addr)) );
+         delta += 3+alen;
+         DIP("pshufb %s,%s\n", dis_buf,
+                               nameXMMReg(gregOfRM(modrm)));
+      }
+
+      assign( dHi, unop(Iop_V128HIto64, mkexpr(dV)) );
+      assign( dLo, unop(Iop_V128to64,   mkexpr(dV)) );
+      assign( sHi, unop(Iop_V128HIto64, mkexpr(sV)) );
+      assign( sLo, unop(Iop_V128to64,   mkexpr(sV)) );
+
+      assign( sevens, mkU64(0x0707070707070707ULL) );
+
+      /*
+      mask0x80hi = Not(SarN8x8(sHi,7))
+      maskBit3hi = SarN8x8(ShlN8x8(sHi,4),7)
+      sAnd7hi    = And(sHi,sevens)
+      permdHi    = Or( And(Perm8x8(dHi,sAnd7hi),maskBit3hi),
+                       And(Perm8x8(dLo,sAnd7hi),Not(maskBit3hi)) )
+      rHi        = And(permdHi,mask0x80hi)
+      */
+      assign(
+         mask0x80hi,
+         unop(Iop_Not64, binop(Iop_SarN8x8,mkexpr(sHi),mkU8(7))));
+
+      assign(
+         maskBit3hi,
+         binop(Iop_SarN8x8,
+               binop(Iop_ShlN8x8,mkexpr(sHi),mkU8(4)),
+               mkU8(7)));
+
+      assign(sAnd7hi, binop(Iop_And64,mkexpr(sHi),mkexpr(sevens)));
+
+      assign(
+         permdHi,
+         binop(
+            Iop_Or64,
+            binop(Iop_And64,
+                  binop(Iop_Perm8x8,mkexpr(dHi),mkexpr(sAnd7hi)),
+                  mkexpr(maskBit3hi)),
+            binop(Iop_And64,
+                  binop(Iop_Perm8x8,mkexpr(dLo),mkexpr(sAnd7hi)),
+                  unop(Iop_Not64,mkexpr(maskBit3hi))) ));
+
+      assign(rHi, binop(Iop_And64,mkexpr(permdHi),mkexpr(mask0x80hi)) );
+
+      /* And the same for the lower half of the result.  What fun. */
+
+      assign(
+         mask0x80lo,
+         unop(Iop_Not64, binop(Iop_SarN8x8,mkexpr(sLo),mkU8(7))));
+
+      assign(
+         maskBit3lo,
+         binop(Iop_SarN8x8,
+               binop(Iop_ShlN8x8,mkexpr(sLo),mkU8(4)),
+               mkU8(7)));
+
+      assign(sAnd7lo, binop(Iop_And64,mkexpr(sLo),mkexpr(sevens)));
+
+      assign(
+         permdLo,
+         binop(
+            Iop_Or64,
+            binop(Iop_And64,
+                  binop(Iop_Perm8x8,mkexpr(dHi),mkexpr(sAnd7lo)),
+                  mkexpr(maskBit3lo)),
+            binop(Iop_And64,
+                  binop(Iop_Perm8x8,mkexpr(dLo),mkexpr(sAnd7lo)),
+                  unop(Iop_Not64,mkexpr(maskBit3lo))) ));
+
+      assign(rLo, binop(Iop_And64,mkexpr(permdLo),mkexpr(mask0x80lo)) );
+
+      putXMMReg(
+         gregOfRM(modrm),
+         binop(Iop_64HLtoV128, mkexpr(rHi), mkexpr(rLo))
+      );
+      goto decode_success;
+   }
+   
+   /* 0F 38 F0 = MOVBE m16/32(E), r16/32(G) */
+   /* 0F 38 F1 = MOVBE r16/32(G), m16/32(E) */
+   if ((sz == 2 || sz == 4)
+       && insn[0] == 0x0F && insn[1] == 0x38
+       && (insn[2] == 0xF0 || insn[2] == 0xF1)
+       && !epartIsReg(insn[3])) {
+
+      modrm = insn[3];
+      addr = disAMode(&alen, sorb, delta + 3, dis_buf);
+      delta += 3 + alen;
+      ty = szToITy(sz);
+      IRTemp src = newTemp(ty);
+
+      if (insn[2] == 0xF0) { /* LOAD */
+         assign(src, loadLE(ty, mkexpr(addr)));
+         IRTemp dst = math_BSWAP(src, ty);
+         putIReg(sz, gregOfRM(modrm), mkexpr(dst));
+         DIP("movbe %s,%s\n", dis_buf, nameIReg(sz, gregOfRM(modrm)));
+      } else { /* STORE */
+         assign(src, getIReg(sz, gregOfRM(modrm)));
+         IRTemp dst = math_BSWAP(src, ty);
+         storeLE(mkexpr(addr), mkexpr(dst));
+         DIP("movbe %s,%s\n", nameIReg(sz, gregOfRM(modrm)), dis_buf);
+      }
+      goto decode_success;
+   }
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSSE3 decoder.                    --- */
+   /* ---------------------------------------------------- */
+
+   /* ---------------------------------------------------- */
+   /* --- start of the SSE4 decoder                    --- */
+   /* ---------------------------------------------------- */
+
+   /* 66 0F 3A 0B /r ib = ROUNDSD imm8, xmm2/m64, xmm1
+      (Partial implementation only -- only deal with cases where
+      the rounding mode is specified directly by the immediate byte.)
+      66 0F 3A 0A /r ib = ROUNDSS imm8, xmm2/m32, xmm1
+      (Limitations ditto)
+   */
+   if (sz == 2 
+       && insn[0] == 0x0F && insn[1] == 0x3A
+       && (/*insn[2] == 0x0B || */insn[2] == 0x0A)) {
+
+      Bool   isD = insn[2] == 0x0B;
+      IRTemp src = newTemp(isD ? Ity_F64 : Ity_F32);
+      IRTemp res = newTemp(isD ? Ity_F64 : Ity_F32);
+      Int    imm = 0;
+
+      modrm = insn[3];
+
+      if (epartIsReg(modrm)) {
+         assign( src, 
+                 isD ? getXMMRegLane64F( eregOfRM(modrm), 0 )
+                     : getXMMRegLane32F( eregOfRM(modrm), 0 ) );
+         imm = insn[3+1];
+         if (imm & ~3) goto decode_failure;
+         delta += 3+1+1;
+         DIP( "rounds%c $%d,%s,%s\n",
+              isD ? 'd' : 's',
+              imm, nameXMMReg( eregOfRM(modrm) ),
+                   nameXMMReg( gregOfRM(modrm) ) );
+      } else {
+         addr = disAMode( &alen, sorb, delta+3, dis_buf );
+         assign( src, loadLE( isD ? Ity_F64 : Ity_F32, mkexpr(addr) ));
+         imm = insn[3+alen];
+         if (imm & ~3) goto decode_failure;
+         delta += 3+alen+1;
+         DIP( "roundsd $%d,%s,%s\n",
+              imm, dis_buf, nameXMMReg( gregOfRM(modrm) ) );
+      }
+
+      /* (imm & 3) contains an Intel-encoded rounding mode.  Because
+         that encoding is the same as the encoding for IRRoundingMode,
+         we can use that value directly in the IR as a rounding
+         mode. */
+      assign(res, binop(isD ? Iop_RoundF64toInt : Iop_RoundF32toInt,
+                  mkU32(imm & 3), mkexpr(src)) );
+
+      if (isD)
+         putXMMRegLane64F( gregOfRM(modrm), 0, mkexpr(res) );
+      else
+         putXMMRegLane32F( gregOfRM(modrm), 0, mkexpr(res) );
+
+      goto decode_success;
+   }
+
+   /* F3 0F BD -- LZCNT (count leading zeroes.  An AMD extension,
+      which we can only decode if we're sure this is an AMD cpu that
+      supports LZCNT, since otherwise it's BSR, which behaves
+      differently. */
+   if (insn[0] == 0xF3 && insn[1] == 0x0F && insn[2] == 0xBD
+       && 0 != (archinfo->hwcaps & VEX_HWCAPS_X86_LZCNT)) {
+      vassert(sz == 2 || sz == 4);
+      /*IRType*/ ty  = szToITy(sz);
+      IRTemp     src = newTemp(ty);
+      modrm = insn[3];
+      if (epartIsReg(modrm)) {
+         assign(src, getIReg(sz, eregOfRM(modrm)));
+         delta += 3+1;
+         DIP("lzcnt%c %s, %s\n", nameISize(sz),
+             nameIReg(sz, eregOfRM(modrm)),
+             nameIReg(sz, gregOfRM(modrm)));
+      } else {
+         addr = disAMode( &alen, sorb, delta+3, dis_buf );
+         assign(src, loadLE(ty, mkexpr(addr)));
+         delta += 3+alen;
+         DIP("lzcnt%c %s, %s\n", nameISize(sz), dis_buf,
+             nameIReg(sz, gregOfRM(modrm)));
+      }
+
+      IRTemp res = gen_LZCNT(ty, src);
+      putIReg(sz, gregOfRM(modrm), mkexpr(res));
+
+      // Update flags.  This is pretty lame .. perhaps can do better
+      // if this turns out to be performance critical.
+      // O S A P are cleared.  Z is set if RESULT == 0.
+      // C is set if SRC is zero.
+      IRTemp src32 = newTemp(Ity_I32);
+      IRTemp res32 = newTemp(Ity_I32);
+      assign(src32, widenUto32(mkexpr(src)));
+      assign(res32, widenUto32(mkexpr(res)));
+
+      IRTemp oszacp = newTemp(Ity_I32);
+      assign(
+         oszacp,
+         binop(Iop_Or32,
+               binop(Iop_Shl32,
+                     unop(Iop_1Uto32,
+                          binop(Iop_CmpEQ32, mkexpr(res32), mkU32(0))),
+                     mkU8(X86G_CC_SHIFT_Z)),
+               binop(Iop_Shl32,
+                     unop(Iop_1Uto32,
+                          binop(Iop_CmpEQ32, mkexpr(src32), mkU32(0))),
+                     mkU8(X86G_CC_SHIFT_C))
+         )
+      );
+
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(oszacp) ));
+
+      goto decode_success;
+   }
+
+   /* ---------------------------------------------------- */
+   /* --- end of the SSE4 decoder                      --- */
+   /* ---------------------------------------------------- */
+
+   after_sse_decoders:
+
+   /* ---------------------------------------------------- */
+   /* --- deal with misc 0x67 pfxs (addr size override) -- */
+   /* ---------------------------------------------------- */
+
+   /* 67 E3 = JCXZ (for JECXZ see below) */
+   if (insn[0] == 0x67 && insn[1] == 0xE3 && sz == 4) {
+      delta += 2;
+      d32 = (((Addr32)guest_EIP_bbstart)+delta+1) + getSDisp8(delta);
+      delta ++;
+      stmt( IRStmt_Exit(
+               binop(Iop_CmpEQ16, getIReg(2,R_ECX), mkU16(0)),
+               Ijk_Boring,
+               IRConst_U32(d32),
+               OFFB_EIP
+            ));
+       DIP("jcxz 0x%x\n", d32);
+       goto decode_success;
+   }
+
+   /* ---------------------------------------------------- */
+   /* --- start of the baseline insn decoder            -- */
+   /* ---------------------------------------------------- */
+
+   /* Get the primary opcode. */
+   opc = getIByte(delta); delta++;
+
+   /* We get here if the current insn isn't SSE, or this CPU doesn't
+      support SSE. */
+
+   switch (opc) {
+
+   /* ------------------------ Control flow --------------- */
+
+   case 0xC2: /* RET imm16 */
+      d32 = getUDisp16(delta); 
+      delta += 2;
+      dis_ret(&dres, d32);
+      DIP("ret %d\n", (Int)d32);
+      break;
+   case 0xC3: /* RET */
+      dis_ret(&dres, 0);
+      DIP("ret\n");
+      break;
+
+   case 0xCF: /* IRET */
+      /* Note, this is an extremely kludgey and limited implementation
+         of iret.  All it really does is: 
+            popl %EIP; popl %CS; popl %EFLAGS.
+         %CS is set but ignored (as it is in (eg) popw %cs)". */
+      t1 = newTemp(Ity_I32); /* ESP */
+      t2 = newTemp(Ity_I32); /* new EIP */
+      t3 = newTemp(Ity_I32); /* new CS */
+      t4 = newTemp(Ity_I32); /* new EFLAGS */
+      assign(t1, getIReg(4,R_ESP));
+      assign(t2, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t1),mkU32(0) )));
+      assign(t3, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t1),mkU32(4) )));
+      assign(t4, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t1),mkU32(8) )));
+      /* Get stuff off stack */
+      putIReg(4, R_ESP,binop(Iop_Add32, mkexpr(t1), mkU32(12)));
+      /* set %CS (which is ignored anyway) */
+      putSReg( R_CS, unop(Iop_32to16, mkexpr(t3)) );
+      /* set %EFLAGS */
+      set_EFLAGS_from_value( t4, False/*!emit_AC_emwarn*/, 0/*unused*/ );
+      /* goto new EIP value */
+      jmp_treg(&dres, Ijk_Ret, t2);
+      vassert(dres.whatNext == Dis_StopHere);
+      DIP("iret (very kludgey)\n");
+      break;
+
+   case 0xE8: /* CALL J4 */
+      d32 = getUDisp32(delta); delta += 4;
+      d32 += (guest_EIP_bbstart+delta); 
+      /* (guest_eip_bbstart+delta) == return-to addr, d32 == call-to addr */
+      if (d32 == guest_EIP_bbstart+delta && getIByte(delta) >= 0x58 
+                                         && getIByte(delta) <= 0x5F) {
+         /* Specially treat the position-independent-code idiom 
+                 call X
+              X: popl %reg
+            as 
+                 movl %eip, %reg.
+            since this generates better code, but for no other reason. */
+         Int archReg = getIByte(delta) - 0x58;
+         /* vex_printf("-- fPIC thingy\n"); */
+         putIReg(4, archReg, mkU32(guest_EIP_bbstart+delta));
+         delta++; /* Step over the POP */
+         DIP("call 0x%x ; popl %s\n",d32,nameIReg(4,archReg));
+      } else {
+         /* The normal sequence for a call. */
+         t1 = newTemp(Ity_I32); 
+         assign(t1, binop(Iop_Sub32, getIReg(4,R_ESP), mkU32(4)));
+         putIReg(4, R_ESP, mkexpr(t1));
+         storeLE( mkexpr(t1), mkU32(guest_EIP_bbstart+delta));
+         if (resteerOkFn( callback_opaque, (Addr32)d32 )) {
+            /* follow into the call target. */
+            dres.whatNext   = Dis_ResteerU;
+            dres.continueAt = (Addr32)d32;
+         } else {
+            jmp_lit(&dres, Ijk_Call, d32);
+            vassert(dres.whatNext == Dis_StopHere);
+         }
+         DIP("call 0x%x\n",d32);
+      }
+      break;
+
+//--    case 0xC8: /* ENTER */ 
+//--       d32 = getUDisp16(eip); eip += 2;
+//--       abyte = getIByte(delta); delta++;
+//-- 
+//--       vg_assert(sz == 4);           
+//--       vg_assert(abyte == 0);
+//-- 
+//--       t1 = newTemp(cb); t2 = newTemp(cb);
+//--       uInstr2(cb, GET,   sz, ArchReg, R_EBP, TempReg, t1);
+//--       uInstr2(cb, GET,    4, ArchReg, R_ESP, TempReg, t2);
+//--       uInstr2(cb, SUB,    4, Literal, 0,     TempReg, t2);
+//--       uLiteral(cb, sz);
+//--       uInstr2(cb, PUT,    4, TempReg, t2,    ArchReg, R_ESP);
+//--       uInstr2(cb, STORE,  4, TempReg, t1,    TempReg, t2);
+//--       uInstr2(cb, PUT,    4, TempReg, t2,    ArchReg, R_EBP);
+//--       if (d32) {
+//--          uInstr2(cb, SUB,    4, Literal, 0,     TempReg, t2);
+//--          uLiteral(cb, d32);
+//--          uInstr2(cb, PUT,    4, TempReg, t2,    ArchReg, R_ESP);
+//--       }
+//--       DIP("enter 0x%x, 0x%x", d32, abyte);
+//--       break;
+
+   case 0xC9: /* LEAVE */
+      vassert(sz == 4);
+      t1 = newTemp(Ity_I32); t2 = newTemp(Ity_I32);
+      assign(t1, getIReg(4,R_EBP));
+      /* First PUT ESP looks redundant, but need it because ESP must
+         always be up-to-date for Memcheck to work... */
+      putIReg(4, R_ESP, mkexpr(t1));
+      assign(t2, loadLE(Ity_I32,mkexpr(t1)));
+      putIReg(4, R_EBP, mkexpr(t2));
+      putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t1), mkU32(4)) );
+      DIP("leave\n");
+      break;
+
+   /* ---------------- Misc weird-ass insns --------------- */
+
+   case 0x27: /* DAA */
+   case 0x2F: /* DAS */
+   case 0x37: /* AAA */
+   case 0x3F: /* AAS */
+      /* An ugly implementation for some ugly instructions.  Oh
+	 well. */
+      if (sz != 4) goto decode_failure;
+      t1 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I32);
+      /* Make up a 32-bit value (t1), with the old value of AX in the
+         bottom 16 bits, and the old OSZACP bitmask in the upper 16
+         bits. */
+      assign(t1, 
+             binop(Iop_16HLto32, 
+                   unop(Iop_32to16,
+                        mk_x86g_calculate_eflags_all()),
+                   getIReg(2, R_EAX)
+            ));
+      /* Call the helper fn, to get a new AX and OSZACP value, and
+         poke both back into the guest state.  Also pass the helper
+         the actual opcode so it knows which of the 4 instructions it
+         is doing the computation for. */
+      vassert(opc == 0x27 || opc == 0x2F || opc == 0x37 || opc == 0x3F);
+      assign(t2,
+              mkIRExprCCall(
+                 Ity_I32, 0/*regparm*/, "x86g_calculate_daa_das_aaa_aas",
+                 &x86g_calculate_daa_das_aaa_aas,
+                 mkIRExprVec_2( mkexpr(t1), mkU32( opc & 0xFF) )
+            ));
+     putIReg(2, R_EAX, unop(Iop_32to16, mkexpr(t2) ));
+
+     stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+     stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+     stmt( IRStmt_Put( OFFB_CC_DEP1, 
+                       binop(Iop_And32,
+                             binop(Iop_Shr32, mkexpr(t2), mkU8(16)),
+                             mkU32( X86G_CC_MASK_C | X86G_CC_MASK_P 
+                                    | X86G_CC_MASK_A | X86G_CC_MASK_Z 
+                                    | X86G_CC_MASK_S| X86G_CC_MASK_O )
+                            )
+                      )
+         );
+     /* Set NDEP even though it isn't used.  This makes redundant-PUT
+        elimination of previous stores to this field work better. */
+     stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+     switch (opc) {
+        case 0x27: DIP("daa\n"); break;
+        case 0x2F: DIP("das\n"); break;
+        case 0x37: DIP("aaa\n"); break;
+        case 0x3F: DIP("aas\n"); break;
+        default: vassert(0);
+     }
+     break;
+
+   case 0xD4: /* AAM */
+   case 0xD5: /* AAD */
+      d32 = getIByte(delta); delta++;
+      if (sz != 4 || d32 != 10) goto decode_failure;
+      t1 = newTemp(Ity_I32);
+      t2 = newTemp(Ity_I32);
+      /* Make up a 32-bit value (t1), with the old value of AX in the
+         bottom 16 bits, and the old OSZACP bitmask in the upper 16
+         bits. */
+      assign(t1, 
+             binop(Iop_16HLto32, 
+                   unop(Iop_32to16,
+                        mk_x86g_calculate_eflags_all()),
+                   getIReg(2, R_EAX)
+            ));
+      /* Call the helper fn, to get a new AX and OSZACP value, and
+         poke both back into the guest state.  Also pass the helper
+         the actual opcode so it knows which of the 2 instructions it
+         is doing the computation for. */
+      assign(t2,
+              mkIRExprCCall(
+                 Ity_I32, 0/*regparm*/, "x86g_calculate_aad_aam",
+                 &x86g_calculate_aad_aam,
+                 mkIRExprVec_2( mkexpr(t1), mkU32( opc & 0xFF) )
+            ));
+      putIReg(2, R_EAX, unop(Iop_32to16, mkexpr(t2) ));
+
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, 
+                        binop(Iop_And32,
+                              binop(Iop_Shr32, mkexpr(t2), mkU8(16)),
+                              mkU32( X86G_CC_MASK_C | X86G_CC_MASK_P 
+                                     | X86G_CC_MASK_A | X86G_CC_MASK_Z 
+                                     | X86G_CC_MASK_S| X86G_CC_MASK_O )
+                             )
+                       )
+          );
+      /* Set NDEP even though it isn't used.  This makes
+         redundant-PUT elimination of previous stores to this field
+         work better. */
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+
+      DIP(opc == 0xD4 ? "aam\n" : "aad\n");
+      break;
+
+   /* ------------------------ CWD/CDQ -------------------- */
+
+   case 0x98: /* CBW */
+      if (sz == 4) {
+         putIReg(4, R_EAX, unop(Iop_16Sto32, getIReg(2, R_EAX)));
+         DIP("cwde\n");
+      } else {
+         vassert(sz == 2);
+         putIReg(2, R_EAX, unop(Iop_8Sto16, getIReg(1, R_EAX)));
+         DIP("cbw\n");
+      }
+      break;
+
+   case 0x99: /* CWD/CDQ */
+      ty = szToITy(sz);
+      putIReg(sz, R_EDX,
+                  binop(mkSizedOp(ty,Iop_Sar8), 
+                        getIReg(sz, R_EAX),
+                        mkU8(sz == 2 ? 15 : 31)) );
+      DIP(sz == 2 ? "cwdq\n" : "cdqq\n");
+      break;
+
+   /* ------------------------ FPU ops -------------------- */
+
+   case 0x9E: /* SAHF */
+      codegen_SAHF();
+      DIP("sahf\n");
+      break;
+
+   case 0x9F: /* LAHF */
+      codegen_LAHF();
+      DIP("lahf\n");
+      break;
+
+   case 0x9B: /* FWAIT */
+      /* ignore? */
+      DIP("fwait\n");
+      break;
+
+   case 0xD8:
+   case 0xD9:
+   case 0xDA:
+   case 0xDB:
+   case 0xDC:
+   case 0xDD:
+   case 0xDE:
+   case 0xDF: {
+      Int  delta0    = delta;
+      Bool decode_OK = False;
+      delta = dis_FPU ( &decode_OK, sorb, delta );
+      if (!decode_OK) {
+         delta = delta0;
+         goto decode_failure;
+      }
+      break;
+   }
+
+   /* ------------------------ INC & DEC ------------------ */
+
+   case 0x40: /* INC eAX */
+   case 0x41: /* INC eCX */
+   case 0x42: /* INC eDX */
+   case 0x43: /* INC eBX */
+   case 0x44: /* INC eSP */
+   case 0x45: /* INC eBP */
+   case 0x46: /* INC eSI */
+   case 0x47: /* INC eDI */
+      vassert(sz == 2 || sz == 4);
+      ty = szToITy(sz);
+      t1 = newTemp(ty);
+      assign( t1, binop(mkSizedOp(ty,Iop_Add8),
+                        getIReg(sz, (UInt)(opc - 0x40)),
+                        mkU(ty,1)) );
+      setFlags_INC_DEC( True, t1, ty );
+      putIReg(sz, (UInt)(opc - 0x40), mkexpr(t1));
+      DIP("inc%c %s\n", nameISize(sz), nameIReg(sz,opc-0x40));
+      break;
+
+   case 0x48: /* DEC eAX */
+   case 0x49: /* DEC eCX */
+   case 0x4A: /* DEC eDX */
+   case 0x4B: /* DEC eBX */
+   case 0x4C: /* DEC eSP */
+   case 0x4D: /* DEC eBP */
+   case 0x4E: /* DEC eSI */
+   case 0x4F: /* DEC eDI */
+      vassert(sz == 2 || sz == 4);
+      ty = szToITy(sz);
+      t1 = newTemp(ty);
+      assign( t1, binop(mkSizedOp(ty,Iop_Sub8),
+                        getIReg(sz, (UInt)(opc - 0x48)),
+                        mkU(ty,1)) );
+      setFlags_INC_DEC( False, t1, ty );
+      putIReg(sz, (UInt)(opc - 0x48), mkexpr(t1));
+      DIP("dec%c %s\n", nameISize(sz), nameIReg(sz,opc-0x48));
+      break;
+
+   /* ------------------------ INT ------------------------ */
+
+   case 0xCC: /* INT 3 */
+      jmp_lit(&dres, Ijk_SigTRAP, ((Addr32)guest_EIP_bbstart)+delta);
+      vassert(dres.whatNext == Dis_StopHere);
+      DIP("int $0x3\n");
+      break;
+
+   case 0xCD: /* INT imm8 */
+      d32 = getIByte(delta); delta++;
+
+      /* For any of the cases where we emit a jump (that is, for all
+         currently handled cases), it's important that all ArchRegs
+         carry their up-to-date value at this point.  So we declare an
+         end-of-block here, which forces any TempRegs caching ArchRegs
+         to be flushed. */
+
+      /* Handle int $0x3F .. $0x4F by synthesising a segfault and a
+         restart of this instruction (hence the "-2" two lines below,
+         to get the restart EIP to be this instruction.  This is
+         probably Linux-specific and it would be more correct to only
+         do this if the VexAbiInfo says that is what we should do.
+         This used to handle just 0x40-0x43; Jikes RVM uses a larger
+         range (0x3F-0x49), and this allows some slack as well. */
+      if (d32 >= 0x3F && d32 <= 0x4F) {
+         jmp_lit(&dres, Ijk_SigSEGV, ((Addr32)guest_EIP_bbstart)+delta-2);
+         vassert(dres.whatNext == Dis_StopHere);
+         DIP("int $0x%x\n", (Int)d32);
+         break;
+      }
+
+      /* Handle int $0x80 (linux syscalls), int $0x81 and $0x82
+         (darwin syscalls).  As part of this, note where we are, so we
+         can back up the guest to this point if the syscall needs to
+         be restarted. */
+      if (d32 == 0x80) {
+         stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
+                           mkU32(guest_EIP_curr_instr) ) );
+         jmp_lit(&dres, Ijk_Sys_int128, ((Addr32)guest_EIP_bbstart)+delta);
+         vassert(dres.whatNext == Dis_StopHere);
+         DIP("int $0x80\n");
+         break;
+      }
+      if (d32 == 0x81) {
+         stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
+                           mkU32(guest_EIP_curr_instr) ) );
+         jmp_lit(&dres, Ijk_Sys_int129, ((Addr32)guest_EIP_bbstart)+delta);
+         vassert(dres.whatNext == Dis_StopHere);
+         DIP("int $0x81\n");
+         break;
+      }
+      if (d32 == 0x82) {
+         stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
+                           mkU32(guest_EIP_curr_instr) ) );
+         jmp_lit(&dres, Ijk_Sys_int130, ((Addr32)guest_EIP_bbstart)+delta);
+         vassert(dres.whatNext == Dis_StopHere);
+         DIP("int $0x82\n");
+         break;
+      }
+
+      /* none of the above */
+      goto decode_failure;
+
+   /* ------------------------ Jcond, byte offset --------- */
+
+   case 0xEB: /* Jb (jump, byte offset) */
+      d32 = (((Addr32)guest_EIP_bbstart)+delta+1) + getSDisp8(delta); 
+      delta++;
+      if (resteerOkFn( callback_opaque, (Addr32)d32) ) {
+         dres.whatNext   = Dis_ResteerU;
+         dres.continueAt = (Addr32)d32;
+      } else {
+         jmp_lit(&dres, Ijk_Boring, d32);
+         vassert(dres.whatNext == Dis_StopHere);
+      }
+      DIP("jmp-8 0x%x\n", d32);
+      break;
+
+   case 0xE9: /* Jv (jump, 16/32 offset) */
+      vassert(sz == 4); /* JRS added 2004 July 11 */
+      d32 = (((Addr32)guest_EIP_bbstart)+delta+sz) + getSDisp(sz,delta); 
+      delta += sz;
+      if (resteerOkFn( callback_opaque, (Addr32)d32) ) {
+         dres.whatNext   = Dis_ResteerU;
+         dres.continueAt = (Addr32)d32;
+      } else {
+         jmp_lit(&dres, Ijk_Boring, d32);
+         vassert(dres.whatNext == Dis_StopHere);
+      }
+      DIP("jmp 0x%x\n", d32);
+      break;
+
+   case 0x70:
+   case 0x71:
+   case 0x72: /* JBb/JNAEb (jump below) */
+   case 0x73: /* JNBb/JAEb (jump not below) */
+   case 0x74: /* JZb/JEb (jump zero) */
+   case 0x75: /* JNZb/JNEb (jump not zero) */
+   case 0x76: /* JBEb/JNAb (jump below or equal) */
+   case 0x77: /* JNBEb/JAb (jump not below or equal) */
+   case 0x78: /* JSb (jump negative) */
+   case 0x79: /* JSb (jump not negative) */
+   case 0x7A: /* JP (jump parity even) */
+   case 0x7B: /* JNP/JPO (jump parity odd) */
+   case 0x7C: /* JLb/JNGEb (jump less) */
+   case 0x7D: /* JGEb/JNLb (jump greater or equal) */
+   case 0x7E: /* JLEb/JNGb (jump less or equal) */
+   case 0x7F: /* JGb/JNLEb (jump greater) */
+    { Int    jmpDelta;
+      const HChar* comment  = "";
+      jmpDelta = (Int)getSDisp8(delta);
+      vassert(-128 <= jmpDelta && jmpDelta < 128);
+      d32 = (((Addr32)guest_EIP_bbstart)+delta+1) + jmpDelta; 
+      delta++;
+      if (resteerCisOk
+          && vex_control.guest_chase_cond
+          && (Addr32)d32 != (Addr32)guest_EIP_bbstart
+          && jmpDelta < 0
+          && resteerOkFn( callback_opaque, (Addr32)d32) ) {
+         /* Speculation: assume this backward branch is taken.  So we
+            need to emit a side-exit to the insn following this one,
+            on the negation of the condition, and continue at the
+            branch target address (d32).  If we wind up back at the
+            first instruction of the trace, just stop; it's better to
+            let the IR loop unroller handle that case. */
+         stmt( IRStmt_Exit( 
+                  mk_x86g_calculate_condition((X86Condcode)(1 ^ (opc - 0x70))),
+                  Ijk_Boring,
+                  IRConst_U32(guest_EIP_bbstart+delta),
+                  OFFB_EIP ) );
+         dres.whatNext   = Dis_ResteerC;
+         dres.continueAt = (Addr32)d32;
+         comment = "(assumed taken)";
+      }
+      else
+      if (resteerCisOk
+          && vex_control.guest_chase_cond
+          && (Addr32)d32 != (Addr32)guest_EIP_bbstart
+          && jmpDelta >= 0
+          && resteerOkFn( callback_opaque, 
+                          (Addr32)(guest_EIP_bbstart+delta)) ) {
+         /* Speculation: assume this forward branch is not taken.  So
+            we need to emit a side-exit to d32 (the dest) and continue
+            disassembling at the insn immediately following this
+            one. */
+         stmt( IRStmt_Exit( 
+                  mk_x86g_calculate_condition((X86Condcode)(opc - 0x70)),
+                  Ijk_Boring,
+                  IRConst_U32(d32),
+                  OFFB_EIP ) );
+         dres.whatNext   = Dis_ResteerC;
+         dres.continueAt = guest_EIP_bbstart + delta;
+         comment = "(assumed not taken)";
+      }
+      else {
+         /* Conservative default translation - end the block at this
+            point. */
+         jcc_01( &dres, (X86Condcode)(opc - 0x70), 
+                 (Addr32)(guest_EIP_bbstart+delta), d32);
+         vassert(dres.whatNext == Dis_StopHere);
+      }
+      DIP("j%s-8 0x%x %s\n", name_X86Condcode(opc - 0x70), d32, comment);
+      break;
+    }
+
+   case 0xE3: /* JECXZ (for JCXZ see above) */
+      if (sz != 4) goto decode_failure;
+      d32 = (((Addr32)guest_EIP_bbstart)+delta+1) + getSDisp8(delta);
+      delta ++;
+      stmt( IRStmt_Exit(
+               binop(Iop_CmpEQ32, getIReg(4,R_ECX), mkU32(0)),
+            Ijk_Boring,
+            IRConst_U32(d32),
+            OFFB_EIP
+          ));
+      DIP("jecxz 0x%x\n", d32);
+      break;
+
+   case 0xE0: /* LOOPNE disp8: decrement count, jump if count != 0 && ZF==0 */
+   case 0xE1: /* LOOPE  disp8: decrement count, jump if count != 0 && ZF==1 */
+   case 0xE2: /* LOOP   disp8: decrement count, jump if count != 0 */
+    { /* Again, the docs say this uses ECX/CX as a count depending on
+         the address size override, not the operand one.  Since we
+         don't handle address size overrides, I guess that means
+         ECX. */
+      IRExpr* zbit  = NULL;
+      IRExpr* count = NULL;
+      IRExpr* cond  = NULL;
+      const HChar* xtra = NULL;
+
+      if (sz != 4) goto decode_failure;
+      d32 = (((Addr32)guest_EIP_bbstart)+delta+1) + getSDisp8(delta);
+      delta++;
+      putIReg(4, R_ECX, binop(Iop_Sub32, getIReg(4,R_ECX), mkU32(1)));
+
+      count = getIReg(4,R_ECX);
+      cond = binop(Iop_CmpNE32, count, mkU32(0));
+      switch (opc) {
+         case 0xE2: 
+            xtra = ""; 
+            break;
+         case 0xE1: 
+            xtra = "e"; 
+            zbit = mk_x86g_calculate_condition( X86CondZ );
+	    cond = mkAnd1(cond, zbit);
+            break;
+         case 0xE0: 
+            xtra = "ne";
+            zbit = mk_x86g_calculate_condition( X86CondNZ );
+	    cond = mkAnd1(cond, zbit);
+            break;
+         default:
+	    vassert(0);
+      }
+      stmt( IRStmt_Exit(cond, Ijk_Boring, IRConst_U32(d32), OFFB_EIP) );
+
+      DIP("loop%s 0x%x\n", xtra, d32);
+      break;
+    }
+
+   /* ------------------------ IMUL ----------------------- */
+
+   case 0x69: /* IMUL Iv, Ev, Gv */
+      delta = dis_imul_I_E_G ( sorb, sz, delta, sz );
+      break;
+   case 0x6B: /* IMUL Ib, Ev, Gv */
+      delta = dis_imul_I_E_G ( sorb, sz, delta, 1 );
+      break;
+
+   /* ------------------------ MOV ------------------------ */
+
+   case 0x88: /* MOV Gb,Eb */
+      delta = dis_mov_G_E(sorb, 1, delta);
+      break;
+
+   case 0x89: /* MOV Gv,Ev */
+      delta = dis_mov_G_E(sorb, sz, delta);
+      break;
+
+   case 0x8A: /* MOV Eb,Gb */
+      delta = dis_mov_E_G(sorb, 1, delta);
+      break;
+ 
+   case 0x8B: /* MOV Ev,Gv */
+      delta = dis_mov_E_G(sorb, sz, delta);
+      break;
+ 
+   case 0x8D: /* LEA M,Gv */
+      if (sz != 4)
+         goto decode_failure;
+      modrm = getIByte(delta);
+      if (epartIsReg(modrm)) 
+         goto decode_failure;
+      /* NOTE!  this is the one place where a segment override prefix
+         has no effect on the address calculation.  Therefore we pass
+         zero instead of sorb here. */
+      addr = disAMode ( &alen, /*sorb*/ 0, delta, dis_buf );
+      delta += alen;
+      putIReg(sz, gregOfRM(modrm), mkexpr(addr));
+      DIP("lea%c %s, %s\n", nameISize(sz), dis_buf, 
+                            nameIReg(sz,gregOfRM(modrm)));
+      break;
+
+   case 0x8C: /* MOV Sw,Ew -- MOV from a SEGMENT REGISTER */
+      delta = dis_mov_Sw_Ew(sorb, sz, delta);
+      break;
+
+   case 0x8E: /* MOV Ew,Sw -- MOV to a SEGMENT REGISTER */
+      delta = dis_mov_Ew_Sw(sorb, delta);
+      break;
+ 
+   case 0xA0: /* MOV Ob,AL */
+      sz = 1;
+      /* Fall through ... */
+   case 0xA1: /* MOV Ov,eAX */
+      d32 = getUDisp32(delta); delta += 4;
+      ty = szToITy(sz);
+      addr = newTemp(Ity_I32);
+      assign( addr, handleSegOverride(sorb, mkU32(d32)) );
+      putIReg(sz, R_EAX, loadLE(ty, mkexpr(addr)));
+      DIP("mov%c %s0x%x, %s\n", nameISize(sz), sorbTxt(sorb),
+                                d32, nameIReg(sz,R_EAX));
+      break;
+
+   case 0xA2: /* MOV Ob,AL */
+      sz = 1;
+      /* Fall through ... */
+   case 0xA3: /* MOV eAX,Ov */
+      d32 = getUDisp32(delta); delta += 4;
+      ty = szToITy(sz);
+      addr = newTemp(Ity_I32);
+      assign( addr, handleSegOverride(sorb, mkU32(d32)) );
+      storeLE( mkexpr(addr), getIReg(sz,R_EAX) );
+      DIP("mov%c %s, %s0x%x\n", nameISize(sz), nameIReg(sz,R_EAX),
+                                sorbTxt(sorb), d32);
+      break;
+
+   case 0xB0: /* MOV imm,AL */
+   case 0xB1: /* MOV imm,CL */
+   case 0xB2: /* MOV imm,DL */
+   case 0xB3: /* MOV imm,BL */
+   case 0xB4: /* MOV imm,AH */
+   case 0xB5: /* MOV imm,CH */
+   case 0xB6: /* MOV imm,DH */
+   case 0xB7: /* MOV imm,BH */
+      d32 = getIByte(delta); delta += 1;
+      putIReg(1, opc-0xB0, mkU8(d32));
+      DIP("movb $0x%x,%s\n", d32, nameIReg(1,opc-0xB0));
+      break;
+
+   case 0xB8: /* MOV imm,eAX */
+   case 0xB9: /* MOV imm,eCX */
+   case 0xBA: /* MOV imm,eDX */
+   case 0xBB: /* MOV imm,eBX */
+   case 0xBC: /* MOV imm,eSP */
+   case 0xBD: /* MOV imm,eBP */
+   case 0xBE: /* MOV imm,eSI */
+   case 0xBF: /* MOV imm,eDI */
+      d32 = getUDisp(sz,delta); delta += sz;
+      putIReg(sz, opc-0xB8, mkU(szToITy(sz), d32));
+      DIP("mov%c $0x%x,%s\n", nameISize(sz), d32, nameIReg(sz,opc-0xB8));
+      break;
+
+   case 0xC6: /* C6 /0 = MOV Ib,Eb */
+      sz = 1;
+      goto maybe_do_Mov_I_E;
+   case 0xC7: /* C7 /0 = MOV Iv,Ev */
+      goto maybe_do_Mov_I_E;
+
+   maybe_do_Mov_I_E:
+      modrm = getIByte(delta);
+      if (gregOfRM(modrm) == 0) {
+         if (epartIsReg(modrm)) {
+            delta++; /* mod/rm byte */
+            d32 = getUDisp(sz,delta); delta += sz;
+            putIReg(sz, eregOfRM(modrm), mkU(szToITy(sz), d32));
+            DIP("mov%c $0x%x, %s\n", nameISize(sz), d32, 
+                                     nameIReg(sz,eregOfRM(modrm)));
+         } else {
+            addr = disAMode ( &alen, sorb, delta, dis_buf );
+            delta += alen;
+            d32 = getUDisp(sz,delta); delta += sz;
+            storeLE(mkexpr(addr), mkU(szToITy(sz), d32));
+            DIP("mov%c $0x%x, %s\n", nameISize(sz), d32, dis_buf);
+         }
+         break;
+      }
+      goto decode_failure;
+
+   /* ------------------------ opl imm, A ----------------- */
+
+   case 0x04: /* ADD Ib, AL */
+      delta = dis_op_imm_A(  1, False, Iop_Add8, True, delta, "add" );
+      break;
+   case 0x05: /* ADD Iv, eAX */
+      delta = dis_op_imm_A( sz, False, Iop_Add8, True, delta, "add" );
+      break;
+
+   case 0x0C: /* OR Ib, AL */
+      delta = dis_op_imm_A(  1, False, Iop_Or8, True, delta, "or" );
+      break;
+   case 0x0D: /* OR Iv, eAX */
+      delta = dis_op_imm_A( sz, False, Iop_Or8, True, delta, "or" );
+      break;
+
+   case 0x14: /* ADC Ib, AL */
+      delta = dis_op_imm_A(  1, True, Iop_Add8, True, delta, "adc" );
+      break;
+   case 0x15: /* ADC Iv, eAX */
+      delta = dis_op_imm_A( sz, True, Iop_Add8, True, delta, "adc" );
+      break;
+
+   case 0x1C: /* SBB Ib, AL */
+      delta = dis_op_imm_A( 1, True, Iop_Sub8, True, delta, "sbb" );
+      break;
+   case 0x1D: /* SBB Iv, eAX */
+      delta = dis_op_imm_A( sz, True, Iop_Sub8, True, delta, "sbb" );
+      break;
+
+   case 0x24: /* AND Ib, AL */
+      delta = dis_op_imm_A(  1, False, Iop_And8, True, delta, "and" );
+      break;
+   case 0x25: /* AND Iv, eAX */
+      delta = dis_op_imm_A( sz, False, Iop_And8, True, delta, "and" );
+      break;
+
+   case 0x2C: /* SUB Ib, AL */
+      delta = dis_op_imm_A(  1, False, Iop_Sub8, True, delta, "sub" );
+      break;
+   case 0x2D: /* SUB Iv, eAX */
+      delta = dis_op_imm_A( sz, False, Iop_Sub8, True, delta, "sub" );
+      break;
+
+   case 0x34: /* XOR Ib, AL */
+      delta = dis_op_imm_A(  1, False, Iop_Xor8, True, delta, "xor" );
+      break;
+   case 0x35: /* XOR Iv, eAX */
+      delta = dis_op_imm_A( sz, False, Iop_Xor8, True, delta, "xor" );
+      break;
+
+   case 0x3C: /* CMP Ib, AL */
+      delta = dis_op_imm_A(  1, False, Iop_Sub8, False, delta, "cmp" );
+      break;
+   case 0x3D: /* CMP Iv, eAX */
+      delta = dis_op_imm_A( sz, False, Iop_Sub8, False, delta, "cmp" );
+      break;
+
+   case 0xA8: /* TEST Ib, AL */
+      delta = dis_op_imm_A(  1, False, Iop_And8, False, delta, "test" );
+      break;
+   case 0xA9: /* TEST Iv, eAX */
+      delta = dis_op_imm_A( sz, False, Iop_And8, False, delta, "test" );
+      break;
+
+   /* ------------------------ opl Ev, Gv ----------------- */
+ 
+   case 0x02: /* ADD Eb,Gb */
+      delta = dis_op2_E_G ( sorb, False, Iop_Add8, True, 1, delta, "add" );
+      break;
+   case 0x03: /* ADD Ev,Gv */
+      delta = dis_op2_E_G ( sorb, False, Iop_Add8, True, sz, delta, "add" );
+      break;
+
+   case 0x0A: /* OR Eb,Gb */
+      delta = dis_op2_E_G ( sorb, False, Iop_Or8, True, 1, delta, "or" );
+      break;
+   case 0x0B: /* OR Ev,Gv */
+      delta = dis_op2_E_G ( sorb, False, Iop_Or8, True, sz, delta, "or" );
+      break;
+
+   case 0x12: /* ADC Eb,Gb */
+      delta = dis_op2_E_G ( sorb, True, Iop_Add8, True, 1, delta, "adc" );
+      break;
+   case 0x13: /* ADC Ev,Gv */
+      delta = dis_op2_E_G ( sorb, True, Iop_Add8, True, sz, delta, "adc" );
+      break;
+
+   case 0x1A: /* SBB Eb,Gb */
+      delta = dis_op2_E_G ( sorb, True, Iop_Sub8, True, 1, delta, "sbb" );
+      break;
+   case 0x1B: /* SBB Ev,Gv */
+      delta = dis_op2_E_G ( sorb, True, Iop_Sub8, True, sz, delta, "sbb" );
+      break;
+
+   case 0x22: /* AND Eb,Gb */
+      delta = dis_op2_E_G ( sorb, False, Iop_And8, True, 1, delta, "and" );
+      break;
+   case 0x23: /* AND Ev,Gv */
+      delta = dis_op2_E_G ( sorb, False, Iop_And8, True, sz, delta, "and" );
+      break;
+
+   case 0x2A: /* SUB Eb,Gb */
+      delta = dis_op2_E_G ( sorb, False, Iop_Sub8, True, 1, delta, "sub" );
+      break;
+   case 0x2B: /* SUB Ev,Gv */
+      delta = dis_op2_E_G ( sorb, False, Iop_Sub8, True, sz, delta, "sub" );
+      break;
+
+   case 0x32: /* XOR Eb,Gb */
+      delta = dis_op2_E_G ( sorb, False, Iop_Xor8, True, 1, delta, "xor" );
+      break;
+   case 0x33: /* XOR Ev,Gv */
+      delta = dis_op2_E_G ( sorb, False, Iop_Xor8, True, sz, delta, "xor" );
+      break;
+
+   case 0x3A: /* CMP Eb,Gb */
+      delta = dis_op2_E_G ( sorb, False, Iop_Sub8, False, 1, delta, "cmp" );
+      break;
+   case 0x3B: /* CMP Ev,Gv */
+      delta = dis_op2_E_G ( sorb, False, Iop_Sub8, False, sz, delta, "cmp" );
+      break;
+
+   case 0x84: /* TEST Eb,Gb */
+      delta = dis_op2_E_G ( sorb, False, Iop_And8, False, 1, delta, "test" );
+      break;
+   case 0x85: /* TEST Ev,Gv */
+      delta = dis_op2_E_G ( sorb, False, Iop_And8, False, sz, delta, "test" );
+      break;
+
+   /* ------------------------ opl Gv, Ev ----------------- */
+
+   case 0x00: /* ADD Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Add8, True, 1, delta, "add" );
+      break;
+   case 0x01: /* ADD Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Add8, True, sz, delta, "add" );
+      break;
+
+   case 0x08: /* OR Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Or8, True, 1, delta, "or" );
+      break;
+   case 0x09: /* OR Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Or8, True, sz, delta, "or" );
+      break;
+
+   case 0x10: /* ADC Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, True,
+                            Iop_Add8, True, 1, delta, "adc" );
+      break;
+   case 0x11: /* ADC Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, True,
+                            Iop_Add8, True, sz, delta, "adc" );
+      break;
+
+   case 0x18: /* SBB Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, True,
+                            Iop_Sub8, True, 1, delta, "sbb" );
+      break;
+   case 0x19: /* SBB Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, True,
+                            Iop_Sub8, True, sz, delta, "sbb" );
+      break;
+
+   case 0x20: /* AND Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_And8, True, 1, delta, "and" );
+      break;
+   case 0x21: /* AND Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_And8, True, sz, delta, "and" );
+      break;
+
+   case 0x28: /* SUB Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Sub8, True, 1, delta, "sub" );
+      break;
+   case 0x29: /* SUB Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Sub8, True, sz, delta, "sub" );
+      break;
+
+   case 0x30: /* XOR Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Xor8, True, 1, delta, "xor" );
+      break;
+   case 0x31: /* XOR Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Xor8, True, sz, delta, "xor" );
+      break;
+
+   case 0x38: /* CMP Gb,Eb */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Sub8, False, 1, delta, "cmp" );
+      break;
+   case 0x39: /* CMP Gv,Ev */
+      delta = dis_op2_G_E ( sorb, pfx_lock, False,
+                            Iop_Sub8, False, sz, delta, "cmp" );
+      break;
+
+   /* ------------------------ POP ------------------------ */
+
+   case 0x58: /* POP eAX */
+   case 0x59: /* POP eCX */
+   case 0x5A: /* POP eDX */
+   case 0x5B: /* POP eBX */
+   case 0x5D: /* POP eBP */
+   case 0x5E: /* POP eSI */
+   case 0x5F: /* POP eDI */
+   case 0x5C: /* POP eSP */
+      vassert(sz == 2 || sz == 4);
+      t1 = newTemp(szToITy(sz)); t2 = newTemp(Ity_I32);
+      assign(t2, getIReg(4, R_ESP));
+      assign(t1, loadLE(szToITy(sz),mkexpr(t2)));
+      putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t2), mkU32(sz)));
+      putIReg(sz, opc-0x58, mkexpr(t1));
+      DIP("pop%c %s\n", nameISize(sz), nameIReg(sz,opc-0x58));
+      break;
+
+   case 0x9D: /* POPF */
+      vassert(sz == 2 || sz == 4);
+      t1 = newTemp(Ity_I32); t2 = newTemp(Ity_I32);
+      assign(t2, getIReg(4, R_ESP));
+      assign(t1, widenUto32(loadLE(szToITy(sz),mkexpr(t2))));
+      putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t2), mkU32(sz)));
+
+      /* Generate IR to set %EFLAGS{O,S,Z,A,C,P,D,ID,AC} from the
+	 value in t1. */
+      set_EFLAGS_from_value( t1, True/*emit_AC_emwarn*/,
+                                 ((Addr32)guest_EIP_bbstart)+delta );
+
+      DIP("popf%c\n", nameISize(sz));
+      break;
+
+   case 0x61: /* POPA */
+      /* This is almost certainly wrong for sz==2.  So ... */
+      if (sz != 4) goto decode_failure;
+
+      /* t5 is the old %ESP value. */
+      t5 = newTemp(Ity_I32);
+      assign( t5, getIReg(4, R_ESP) );
+
+      /* Reload all the registers, except %esp. */
+      putIReg(4,R_EAX, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t5),mkU32(28)) ));
+      putIReg(4,R_ECX, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t5),mkU32(24)) ));
+      putIReg(4,R_EDX, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t5),mkU32(20)) ));
+      putIReg(4,R_EBX, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t5),mkU32(16)) ));
+      /* ignore saved %ESP */
+      putIReg(4,R_EBP, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t5),mkU32( 8)) ));
+      putIReg(4,R_ESI, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t5),mkU32( 4)) ));
+      putIReg(4,R_EDI, loadLE(Ity_I32, binop(Iop_Add32,mkexpr(t5),mkU32( 0)) ));
+
+      /* and move %ESP back up */
+      putIReg( 4, R_ESP, binop(Iop_Add32, mkexpr(t5), mkU32(8*4)) );
+
+      DIP("popa%c\n", nameISize(sz));
+      break;
+
+   case 0x8F: /* POPL/POPW m32 */
+     { Int    len;
+       UChar  rm = getIByte(delta);
+
+       /* make sure this instruction is correct POP */
+       if (epartIsReg(rm) || gregOfRM(rm) != 0)
+          goto decode_failure;
+       /* and has correct size */
+       if (sz != 4 && sz != 2)
+          goto decode_failure;
+       ty = szToITy(sz);
+
+       t1 = newTemp(Ity_I32); /* stack address */
+       t3 = newTemp(ty); /* data */
+       /* set t1 to ESP: t1 = ESP */
+       assign( t1, getIReg(4, R_ESP) );
+       /* load M[ESP] to virtual register t3: t3 = M[t1] */
+       assign( t3, loadLE(ty, mkexpr(t1)) );
+       
+       /* increase ESP; must be done before the STORE.  Intel manual says:
+            If the ESP register is used as a base register for addressing
+            a destination operand in memory, the POP instruction computes
+            the effective address of the operand after it increments the
+            ESP register.
+       */
+       putIReg(4, R_ESP, binop(Iop_Add32, mkexpr(t1), mkU32(sz)) );
+
+       /* resolve MODR/M */
+       addr = disAMode ( &len, sorb, delta, dis_buf);
+       storeLE( mkexpr(addr), mkexpr(t3) );
+
+       DIP("pop%c %s\n", sz==2 ? 'w' : 'l', dis_buf);
+
+       delta += len;
+       break;
+     }
+
+   case 0x1F: /* POP %DS */
+      dis_pop_segreg( R_DS, sz ); break;
+   case 0x07: /* POP %ES */
+      dis_pop_segreg( R_ES, sz ); break;
+   case 0x17: /* POP %SS */
+      dis_pop_segreg( R_SS, sz ); break;
+
+   /* ------------------------ PUSH ----------------------- */
+
+   case 0x50: /* PUSH eAX */
+   case 0x51: /* PUSH eCX */
+   case 0x52: /* PUSH eDX */
+   case 0x53: /* PUSH eBX */
+   case 0x55: /* PUSH eBP */
+   case 0x56: /* PUSH eSI */
+   case 0x57: /* PUSH eDI */
+   case 0x54: /* PUSH eSP */
+      /* This is the Right Way, in that the value to be pushed is
+         established before %esp is changed, so that pushl %esp
+         correctly pushes the old value. */
+      vassert(sz == 2 || sz == 4);
+      ty = sz==2 ? Ity_I16 : Ity_I32;
+      t1 = newTemp(ty); t2 = newTemp(Ity_I32);
+      assign(t1, getIReg(sz, opc-0x50));
+      assign(t2, binop(Iop_Sub32, getIReg(4, R_ESP), mkU32(sz)));
+      putIReg(4, R_ESP, mkexpr(t2) );
+      storeLE(mkexpr(t2),mkexpr(t1));
+      DIP("push%c %s\n", nameISize(sz), nameIReg(sz,opc-0x50));
+      break;
+
+
+   case 0x68: /* PUSH Iv */
+      d32 = getUDisp(sz,delta); delta += sz;
+      goto do_push_I;
+   case 0x6A: /* PUSH Ib, sign-extended to sz */
+      d32 = getSDisp8(delta); delta += 1;
+      goto do_push_I;
+   do_push_I:
+      ty = szToITy(sz);
+      t1 = newTemp(Ity_I32); t2 = newTemp(ty);
+      assign( t1, binop(Iop_Sub32,getIReg(4,R_ESP),mkU32(sz)) );
+      putIReg(4, R_ESP, mkexpr(t1) );
+      /* stop mkU16 asserting if d32 is a negative 16-bit number
+         (bug #132813) */
+      if (ty == Ity_I16)
+         d32 &= 0xFFFF;
+      storeLE( mkexpr(t1), mkU(ty,d32) );
+      DIP("push%c $0x%x\n", nameISize(sz), d32);
+      break;
+
+   case 0x9C: /* PUSHF */ {
+      vassert(sz == 2 || sz == 4);
+
+      t1 = newTemp(Ity_I32);
+      assign( t1, binop(Iop_Sub32,getIReg(4,R_ESP),mkU32(sz)) );
+      putIReg(4, R_ESP, mkexpr(t1) );
+
+      /* Calculate OSZACP, and patch in fixed fields as per
+         Intel docs. 
+         - bit 1 is always 1
+         - bit 9 is Interrupt Enable (should always be 1 in user mode?)
+      */
+      t2 = newTemp(Ity_I32);
+      assign( t2, binop(Iop_Or32, 
+                        mk_x86g_calculate_eflags_all(), 
+                        mkU32( (1<<1)|(1<<9) ) ));
+
+      /* Patch in the D flag.  This can simply be a copy of bit 10 of
+         baseBlock[OFFB_DFLAG]. */
+      t3 = newTemp(Ity_I32);
+      assign( t3, binop(Iop_Or32,
+                        mkexpr(t2),
+                        binop(Iop_And32,
+                              IRExpr_Get(OFFB_DFLAG,Ity_I32),
+                              mkU32(1<<10))) 
+            );
+
+      /* And patch in the ID flag. */
+      t4 = newTemp(Ity_I32);
+      assign( t4, binop(Iop_Or32,
+                        mkexpr(t3),
+                        binop(Iop_And32,
+                              binop(Iop_Shl32, IRExpr_Get(OFFB_IDFLAG,Ity_I32), 
+                                               mkU8(21)),
+                              mkU32(1<<21)))
+            );
+
+      /* And patch in the AC flag. */
+      t5 = newTemp(Ity_I32);
+      assign( t5, binop(Iop_Or32,
+                        mkexpr(t4),
+                        binop(Iop_And32,
+                              binop(Iop_Shl32, IRExpr_Get(OFFB_ACFLAG,Ity_I32), 
+                                               mkU8(18)),
+                              mkU32(1<<18)))
+            );
+
+      /* if sz==2, the stored value needs to be narrowed. */
+      if (sz == 2)
+        storeLE( mkexpr(t1), unop(Iop_32to16,mkexpr(t5)) );
+      else 
+        storeLE( mkexpr(t1), mkexpr(t5) );
+
+      DIP("pushf%c\n", nameISize(sz));
+      break;
+   }
+
+   case 0x60: /* PUSHA */
+      /* This is almost certainly wrong for sz==2.  So ... */
+      if (sz != 4) goto decode_failure;
+
+      /* This is the Right Way, in that the value to be pushed is
+         established before %esp is changed, so that pusha
+         correctly pushes the old %esp value.  New value of %esp is
+         pushed at start. */
+      /* t0 is the %ESP value we're going to push. */
+      t0 = newTemp(Ity_I32);
+      assign( t0, getIReg(4, R_ESP) );
+
+      /* t5 will be the new %ESP value. */
+      t5 = newTemp(Ity_I32);
+      assign( t5, binop(Iop_Sub32, mkexpr(t0), mkU32(8*4)) );
+
+      /* Update guest state before prodding memory. */
+      putIReg(4, R_ESP, mkexpr(t5));
+
+      /* Dump all the registers. */
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32(28)), getIReg(4,R_EAX) );
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32(24)), getIReg(4,R_ECX) );
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32(20)), getIReg(4,R_EDX) );
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32(16)), getIReg(4,R_EBX) );
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32(12)), mkexpr(t0) /*esp*/);
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32( 8)), getIReg(4,R_EBP) );
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32( 4)), getIReg(4,R_ESI) );
+      storeLE( binop(Iop_Add32,mkexpr(t5),mkU32( 0)), getIReg(4,R_EDI) );
+
+      DIP("pusha%c\n", nameISize(sz));
+      break;
+
+   case 0x0E: /* PUSH %CS */
+      dis_push_segreg( R_CS, sz ); break;
+   case 0x1E: /* PUSH %DS */
+      dis_push_segreg( R_DS, sz ); break;
+   case 0x06: /* PUSH %ES */
+      dis_push_segreg( R_ES, sz ); break;
+   case 0x16: /* PUSH %SS */
+      dis_push_segreg( R_SS, sz ); break;
+
+   /* ------------------------ SCAS et al ----------------- */
+
+   case 0xA4: /* MOVS, no REP prefix */
+   case 0xA5: 
+      if (sorb != 0)
+         goto decode_failure; /* else dis_string_op asserts */
+      dis_string_op( dis_MOVS, ( opc == 0xA4 ? 1 : sz ), "movs", sorb );
+      break;
+
+  case 0xA6: /* CMPSb, no REP prefix */
+  case 0xA7:
+      if (sorb != 0)
+         goto decode_failure; /* else dis_string_op asserts */
+      dis_string_op( dis_CMPS, ( opc == 0xA6 ? 1 : sz ), "cmps", sorb );
+      break;
+
+   case 0xAA: /* STOS, no REP prefix */
+   case 0xAB:
+      if (sorb != 0)
+         goto decode_failure; /* else dis_string_op asserts */
+      dis_string_op( dis_STOS, ( opc == 0xAA ? 1 : sz ), "stos", sorb );
+      break;
+
+   case 0xAC: /* LODS, no REP prefix */
+   case 0xAD:
+      if (sorb != 0)
+         goto decode_failure; /* else dis_string_op asserts */
+      dis_string_op( dis_LODS, ( opc == 0xAC ? 1 : sz ), "lods", sorb );
+      break;
+
+   case 0xAE: /* SCAS, no REP prefix */
+   case 0xAF:
+      if (sorb != 0) 
+         goto decode_failure; /* else dis_string_op asserts */
+      dis_string_op( dis_SCAS, ( opc == 0xAE ? 1 : sz ), "scas", sorb );
+      break;
+
+
+   case 0xFC: /* CLD */
+      stmt( IRStmt_Put( OFFB_DFLAG, mkU32(1)) );
+      DIP("cld\n");
+      break;
+
+   case 0xFD: /* STD */
+      stmt( IRStmt_Put( OFFB_DFLAG, mkU32(0xFFFFFFFF)) );
+      DIP("std\n");
+      break;
+
+   case 0xF8: /* CLC */
+   case 0xF9: /* STC */
+   case 0xF5: /* CMC */
+      t0 = newTemp(Ity_I32);
+      t1 = newTemp(Ity_I32);
+      assign( t0, mk_x86g_calculate_eflags_all() );
+      switch (opc) {
+         case 0xF8: 
+            assign( t1, binop(Iop_And32, mkexpr(t0), 
+                                         mkU32(~X86G_CC_MASK_C)));
+            DIP("clc\n");
+            break;
+         case 0xF9: 
+            assign( t1, binop(Iop_Or32, mkexpr(t0), 
+                                        mkU32(X86G_CC_MASK_C)));
+            DIP("stc\n");
+            break;
+         case 0xF5: 
+            assign( t1, binop(Iop_Xor32, mkexpr(t0), 
+                                         mkU32(X86G_CC_MASK_C)));
+            DIP("cmc\n");
+            break;
+         default: 
+            vpanic("disInstr(x86)(clc/stc/cmc)");
+      }
+      stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+      stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(t1) ));
+      /* Set NDEP even though it isn't used.  This makes redundant-PUT
+         elimination of previous stores to this field work better. */
+      stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+      break;
+
+   case 0xD6: /* SALC */
+      t0 = newTemp(Ity_I32);
+      t1 = newTemp(Ity_I32);
+      assign( t0,  binop(Iop_And32,
+                         mk_x86g_calculate_eflags_c(),
+                         mkU32(1)) );
+      assign( t1, binop(Iop_Sar32, 
+                        binop(Iop_Shl32, mkexpr(t0), mkU8(31)), 
+                        mkU8(31)) );
+      putIReg(1, R_EAX, unop(Iop_32to8, mkexpr(t1)) );
+      DIP("salc\n");
+      break;
+
+   /* REPNE prefix insn */
+   case 0xF2: { 
+      Addr32 eip_orig = guest_EIP_bbstart + delta_start;
+      if (sorb != 0) goto decode_failure;
+      abyte = getIByte(delta); delta++;
+
+      if (abyte == 0x66) { sz = 2; abyte = getIByte(delta); delta++; }
+
+      switch (abyte) {
+      /* According to the Intel manual, "repne movs" should never occur, but
+       * in practice it has happened, so allow for it here... */
+      case 0xA4: sz = 1;   /* REPNE MOVS<sz> */
+      case 0xA5: 
+         dis_REP_op ( &dres, X86CondNZ, dis_MOVS, sz, eip_orig,
+                             guest_EIP_bbstart+delta, "repne movs" );
+         break;
+
+      case 0xA6: sz = 1;   /* REPNE CMP<sz> */
+      case 0xA7:
+         dis_REP_op ( &dres, X86CondNZ, dis_CMPS, sz, eip_orig, 
+                             guest_EIP_bbstart+delta, "repne cmps" );
+         break;
+
+      case 0xAA: sz = 1;   /* REPNE STOS<sz> */
+      case 0xAB:
+         dis_REP_op ( &dres, X86CondNZ, dis_STOS, sz, eip_orig, 
+                             guest_EIP_bbstart+delta, "repne stos" );
+         break;
+
+      case 0xAE: sz = 1;   /* REPNE SCAS<sz> */
+      case 0xAF:
+         dis_REP_op ( &dres, X86CondNZ, dis_SCAS, sz, eip_orig,
+                             guest_EIP_bbstart+delta, "repne scas" );
+         break;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+   }
+
+   /* REP/REPE prefix insn (for SCAS and CMPS, 0xF3 means REPE,
+      for the rest, it means REP) */
+   case 0xF3: { 
+      Addr32 eip_orig = guest_EIP_bbstart + delta_start;
+      abyte = getIByte(delta); delta++;
+
+      if (abyte == 0x66) { sz = 2; abyte = getIByte(delta); delta++; }
+
+      if (sorb != 0 && abyte != 0x0F) goto decode_failure;
+
+      switch (abyte) {
+      case 0x0F:
+         switch (getIByte(delta)) {
+         /* On older CPUs, TZCNT behaves the same as BSF.  */
+         case 0xBC: /* REP BSF Gv,Ev */
+            delta = dis_bs_E_G ( sorb, sz, delta + 1, True );
+            break;
+         /* On older CPUs, LZCNT behaves the same as BSR.  */
+         case 0xBD: /* REP BSR Gv,Ev */
+            delta = dis_bs_E_G ( sorb, sz, delta + 1, False );
+            break;
+         default:
+            goto decode_failure;
+         }
+         break;
+
+      case 0xA4: sz = 1;   /* REP MOVS<sz> */
+      case 0xA5:
+         dis_REP_op ( &dres, X86CondAlways, dis_MOVS, sz, eip_orig, 
+                             guest_EIP_bbstart+delta, "rep movs" );
+         break;
+
+      case 0xA6: sz = 1;   /* REPE CMP<sz> */
+      case 0xA7:
+         dis_REP_op ( &dres, X86CondZ, dis_CMPS, sz, eip_orig, 
+                             guest_EIP_bbstart+delta, "repe cmps" );
+         break;
+
+      case 0xAA: sz = 1;   /* REP STOS<sz> */
+      case 0xAB:
+         dis_REP_op ( &dres, X86CondAlways, dis_STOS, sz, eip_orig, 
+                             guest_EIP_bbstart+delta, "rep stos" );
+         break;
+
+      case 0xAC: sz = 1;   /* REP LODS<sz> */
+      case 0xAD:
+         dis_REP_op ( &dres, X86CondAlways, dis_LODS, sz, eip_orig, 
+                             guest_EIP_bbstart+delta, "rep lods" );
+         break;
+
+      case 0xAE: sz = 1;   /* REPE SCAS<sz> */
+      case 0xAF: 
+         dis_REP_op ( &dres, X86CondZ, dis_SCAS, sz, eip_orig, 
+                             guest_EIP_bbstart+delta, "repe scas" );
+         break;
+      
+      case 0x90:           /* REP NOP (PAUSE) */
+         /* a hint to the P4 re spin-wait loop */
+         DIP("rep nop (P4 pause)\n");
+         /* "observe" the hint.  The Vex client needs to be careful not
+            to cause very long delays as a result, though. */
+         jmp_lit(&dres, Ijk_Yield, ((Addr32)guest_EIP_bbstart)+delta);
+         vassert(dres.whatNext == Dis_StopHere);
+         break;
+
+      case 0xC3:           /* REP RET -- same as normal ret? */
+         dis_ret(&dres, 0);
+         DIP("rep ret\n");
+         break;
+
+      default:
+         goto decode_failure;
+      }
+      break;
+   }
+
+   /* ------------------------ XCHG ----------------------- */
+
+   /* XCHG reg,mem automatically asserts LOCK# even without a LOCK
+      prefix; hence it must be translated with an IRCAS (at least, the
+      memory variant). */
+   case 0x86: /* XCHG Gb,Eb */
+      sz = 1;
+      /* Fall through ... */
+   case 0x87: /* XCHG Gv,Ev */
+      modrm = getIByte(delta);
+      ty = szToITy(sz);
+      t1 = newTemp(ty); t2 = newTemp(ty);
+      if (epartIsReg(modrm)) {
+         assign(t1, getIReg(sz, eregOfRM(modrm)));
+         assign(t2, getIReg(sz, gregOfRM(modrm)));
+         putIReg(sz, gregOfRM(modrm), mkexpr(t1));
+         putIReg(sz, eregOfRM(modrm), mkexpr(t2));
+         delta++;
+         DIP("xchg%c %s, %s\n", 
+             nameISize(sz), nameIReg(sz,gregOfRM(modrm)), 
+                            nameIReg(sz,eregOfRM(modrm)));
+      } else {
+         *expect_CAS = True;
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         assign( t1, loadLE(ty,mkexpr(addr)) );
+         assign( t2, getIReg(sz,gregOfRM(modrm)) );
+         casLE( mkexpr(addr),
+                mkexpr(t1), mkexpr(t2), guest_EIP_curr_instr );
+         putIReg( sz, gregOfRM(modrm), mkexpr(t1) );
+         delta += alen;
+         DIP("xchg%c %s, %s\n", nameISize(sz), 
+                                nameIReg(sz,gregOfRM(modrm)), dis_buf);
+      }
+      break;
+
+   case 0x90: /* XCHG eAX,eAX */
+      DIP("nop\n");
+      break;
+   case 0x91: /* XCHG eAX,eCX */
+   case 0x92: /* XCHG eAX,eDX */
+   case 0x93: /* XCHG eAX,eBX */
+   case 0x94: /* XCHG eAX,eSP */
+   case 0x95: /* XCHG eAX,eBP */
+   case 0x96: /* XCHG eAX,eSI */
+   case 0x97: /* XCHG eAX,eDI */
+      codegen_xchg_eAX_Reg ( sz, opc - 0x90 );
+      break;
+
+   /* ------------------------ XLAT ----------------------- */
+
+   case 0xD7: /* XLAT */
+      if (sz != 4) goto decode_failure; /* sz == 2 is also allowed (0x66) */
+      putIReg( 
+         1, 
+         R_EAX/*AL*/,
+         loadLE(Ity_I8, 
+                handleSegOverride( 
+                   sorb, 
+                   binop(Iop_Add32, 
+                         getIReg(4, R_EBX), 
+                         unop(Iop_8Uto32, getIReg(1, R_EAX/*AL*/))))));
+
+      DIP("xlat%c [ebx]\n", nameISize(sz));
+      break;
+
+   /* ------------------------ IN / OUT ----------------------- */
+
+   case 0xE4: /* IN imm8, AL */
+      sz = 1; 
+      t1 = newTemp(Ity_I32);
+      abyte = getIByte(delta); delta++;
+      assign(t1, mkU32( abyte & 0xFF ));
+      DIP("in%c $%d,%s\n", nameISize(sz), (Int)abyte, nameIReg(sz,R_EAX));
+      goto do_IN;
+   case 0xE5: /* IN imm8, eAX */
+      vassert(sz == 2 || sz == 4);
+      t1 = newTemp(Ity_I32);
+      abyte = getIByte(delta); delta++;
+      assign(t1, mkU32( abyte & 0xFF ));
+      DIP("in%c $%d,%s\n", nameISize(sz), (Int)abyte, nameIReg(sz,R_EAX));
+      goto do_IN;
+   case 0xEC: /* IN %DX, AL */
+      sz = 1; 
+      t1 = newTemp(Ity_I32);
+      assign(t1, unop(Iop_16Uto32, getIReg(2, R_EDX)));
+      DIP("in%c %s,%s\n", nameISize(sz), nameIReg(2,R_EDX), 
+                                         nameIReg(sz,R_EAX));
+      goto do_IN;
+   case 0xED: /* IN %DX, eAX */
+      vassert(sz == 2 || sz == 4);
+      t1 = newTemp(Ity_I32);
+      assign(t1, unop(Iop_16Uto32, getIReg(2, R_EDX)));
+      DIP("in%c %s,%s\n", nameISize(sz), nameIReg(2,R_EDX), 
+                                         nameIReg(sz,R_EAX));
+      goto do_IN;
+   do_IN: {
+      /* At this point, sz indicates the width, and t1 is a 32-bit
+         value giving port number. */
+      IRDirty* d;
+      vassert(sz == 1 || sz == 2 || sz == 4);
+      ty = szToITy(sz);
+      t2 = newTemp(Ity_I32);
+      d = unsafeIRDirty_1_N( 
+             t2,
+             0/*regparms*/, 
+             "x86g_dirtyhelper_IN", 
+             &x86g_dirtyhelper_IN,
+             mkIRExprVec_2( mkexpr(t1), mkU32(sz) )
+          );
+      /* do the call, dumping the result in t2. */
+      stmt( IRStmt_Dirty(d) );
+      putIReg(sz, R_EAX, narrowTo( ty, mkexpr(t2) ) );
+      break;
+   }
+
+   case 0xE6: /* OUT AL, imm8 */
+      sz = 1;
+      t1 = newTemp(Ity_I32);
+      abyte = getIByte(delta); delta++;
+      assign( t1, mkU32( abyte & 0xFF ) );
+      DIP("out%c %s,$%d\n", nameISize(sz), nameIReg(sz,R_EAX), (Int)abyte);
+      goto do_OUT;
+   case 0xE7: /* OUT eAX, imm8 */
+      vassert(sz == 2 || sz == 4);
+      t1 = newTemp(Ity_I32);
+      abyte = getIByte(delta); delta++;
+      assign( t1, mkU32( abyte & 0xFF ) );
+      DIP("out%c %s,$%d\n", nameISize(sz), nameIReg(sz,R_EAX), (Int)abyte);
+      goto do_OUT;
+   case 0xEE: /* OUT AL, %DX */
+      sz = 1;
+      t1 = newTemp(Ity_I32);
+      assign( t1, unop(Iop_16Uto32, getIReg(2, R_EDX)) );
+      DIP("out%c %s,%s\n", nameISize(sz), nameIReg(sz,R_EAX),
+                                          nameIReg(2,R_EDX));
+      goto do_OUT;
+   case 0xEF: /* OUT eAX, %DX */
+      vassert(sz == 2 || sz == 4);
+      t1 = newTemp(Ity_I32);
+      assign( t1, unop(Iop_16Uto32, getIReg(2, R_EDX)) );
+      DIP("out%c %s,%s\n", nameISize(sz), nameIReg(sz,R_EAX),
+                                          nameIReg(2,R_EDX));
+      goto do_OUT;
+   do_OUT: {
+      /* At this point, sz indicates the width, and t1 is a 32-bit
+         value giving port number. */
+      IRDirty* d;
+      vassert(sz == 1 || sz == 2 || sz == 4);
+      ty = szToITy(sz);
+      d = unsafeIRDirty_0_N( 
+             0/*regparms*/, 
+             "x86g_dirtyhelper_OUT", 
+             &x86g_dirtyhelper_OUT,
+             mkIRExprVec_3( mkexpr(t1),
+                            widenUto32( getIReg(sz, R_EAX) ), 
+                            mkU32(sz) )
+          );
+      stmt( IRStmt_Dirty(d) );
+      break;
+   }
+
+   /* ------------------------ (Grp1 extensions) ---------- */
+
+   case 0x82: /* Grp1 Ib,Eb too.  Apparently this is the same as 
+                 case 0x80, but only in 32-bit mode. */
+      /* fallthru */
+   case 0x80: /* Grp1 Ib,Eb */
+      modrm = getIByte(delta);
+      am_sz = lengthAMode(delta);
+      sz    = 1;
+      d_sz  = 1;
+      d32   = getUChar(delta + am_sz);
+      delta = dis_Grp1 ( sorb, pfx_lock, delta, modrm, am_sz, d_sz, sz, d32 );
+      break;
+
+   case 0x81: /* Grp1 Iv,Ev */
+      modrm = getIByte(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = sz;
+      d32   = getUDisp(d_sz, delta + am_sz);
+      delta = dis_Grp1 ( sorb, pfx_lock, delta, modrm, am_sz, d_sz, sz, d32 );
+      break;
+
+   case 0x83: /* Grp1 Ib,Ev */
+      modrm = getIByte(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = 1;
+      d32   = getSDisp8(delta + am_sz);
+      delta = dis_Grp1 ( sorb, pfx_lock, delta, modrm, am_sz, d_sz, sz, d32 );
+      break;
+
+   /* ------------------------ (Grp2 extensions) ---------- */
+
+   case 0xC0: { /* Grp2 Ib,Eb */
+      Bool decode_OK = True;
+      modrm = getIByte(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = 1;
+      d32   = getUChar(delta + am_sz);
+      sz    = 1;
+      delta = dis_Grp2 ( sorb, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d32 & 0xFF), NULL, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+   case 0xC1: { /* Grp2 Ib,Ev */
+      Bool decode_OK = True;
+      modrm = getIByte(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = 1;
+      d32   = getUChar(delta + am_sz);
+      delta = dis_Grp2 ( sorb, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d32 & 0xFF), NULL, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+   case 0xD0: { /* Grp2 1,Eb */
+      Bool decode_OK = True;
+      modrm = getIByte(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = 0;
+      d32   = 1;
+      sz    = 1;
+      delta = dis_Grp2 ( sorb, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d32), NULL, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+   case 0xD1: { /* Grp2 1,Ev */
+      Bool decode_OK = True;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = 0;
+      d32   = 1;
+      delta = dis_Grp2 ( sorb, delta, modrm, am_sz, d_sz, sz, 
+                         mkU8(d32), NULL, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+   case 0xD2: { /* Grp2 CL,Eb */
+      Bool decode_OK = True;
+      modrm = getUChar(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = 0;
+      sz    = 1;
+      delta = dis_Grp2 ( sorb, delta, modrm, am_sz, d_sz, sz, 
+                         getIReg(1,R_ECX), "%cl", &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+   case 0xD3: { /* Grp2 CL,Ev */
+      Bool decode_OK = True;
+      modrm = getIByte(delta);
+      am_sz = lengthAMode(delta);
+      d_sz  = 0;
+      delta = dis_Grp2 ( sorb, delta, modrm, am_sz, d_sz, sz, 
+                         getIReg(1,R_ECX), "%cl", &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+
+   /* ------------------------ (Grp3 extensions) ---------- */
+
+   case 0xF6: { /* Grp3 Eb */
+      Bool decode_OK = True;
+      delta = dis_Grp3 ( sorb, pfx_lock, 1, delta, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+   case 0xF7: { /* Grp3 Ev */
+      Bool decode_OK = True;
+      delta = dis_Grp3 ( sorb, pfx_lock, sz, delta, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+
+   /* ------------------------ (Grp4 extensions) ---------- */
+
+   case 0xFE: { /* Grp4 Eb */
+      Bool decode_OK = True;
+      delta = dis_Grp4 ( sorb, pfx_lock, delta, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+
+   /* ------------------------ (Grp5 extensions) ---------- */
+
+   case 0xFF: { /* Grp5 Ev */
+      Bool decode_OK = True;
+      delta = dis_Grp5 ( sorb, pfx_lock, sz, delta, &dres, &decode_OK );
+      if (!decode_OK)
+         goto decode_failure;
+      break;
+   }
+
+   /* ------------------------ Escapes to 2-byte opcodes -- */
+
+   case 0x0F: {
+      opc = getIByte(delta); delta++;
+      switch (opc) {
+
+      /* =-=-=-=-=-=-=-=-=- Grp8 =-=-=-=-=-=-=-=-=-=-=-= */
+
+      case 0xBA: { /* Grp8 Ib,Ev */
+         Bool decode_OK = False;
+         modrm = getUChar(delta);
+         am_sz = lengthAMode(delta);
+         d32   = getSDisp8(delta + am_sz);
+         delta = dis_Grp8_Imm ( sorb, pfx_lock, delta, modrm, 
+                                am_sz, sz, d32, &decode_OK );
+         if (!decode_OK)
+            goto decode_failure;
+         break;
+      }
+
+      /* =-=-=-=-=-=-=-=-=- BSF/BSR -=-=-=-=-=-=-=-=-=-= */
+
+      case 0xBC: /* BSF Gv,Ev */
+         delta = dis_bs_E_G ( sorb, sz, delta, True );
+         break;
+      case 0xBD: /* BSR Gv,Ev */
+         delta = dis_bs_E_G ( sorb, sz, delta, False );
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- BSWAP -=-=-=-=-=-=-=-=-=-=-= */
+
+      case 0xC8: /* BSWAP %eax */
+      case 0xC9:
+      case 0xCA:
+      case 0xCB:
+      case 0xCC:
+      case 0xCD:
+      case 0xCE:
+      case 0xCF: /* BSWAP %edi */
+         /* AFAICS from the Intel docs, this only exists at size 4. */
+         if (sz != 4) goto decode_failure;
+         
+         t1 = newTemp(Ity_I32);
+         assign( t1, getIReg(4, opc-0xC8) );
+         t2 = math_BSWAP(t1, Ity_I32);
+
+         putIReg(4, opc-0xC8, mkexpr(t2));
+         DIP("bswapl %s\n", nameIReg(4, opc-0xC8));
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- BT/BTS/BTR/BTC =-=-=-=-=-=-= */
+
+      case 0xA3: /* BT Gv,Ev */
+         delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpNone );
+         break;
+      case 0xB3: /* BTR Gv,Ev */
+         delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpReset );
+         break;
+      case 0xAB: /* BTS Gv,Ev */
+         delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpSet );
+         break;
+      case 0xBB: /* BTC Gv,Ev */
+         delta = dis_bt_G_E ( vbi, sorb, pfx_lock, sz, delta, BtOpComp );
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- CMOV =-=-=-=-=-=-=-=-=-=-=-= */
+ 
+      case 0x40:
+      case 0x41:
+      case 0x42: /* CMOVBb/CMOVNAEb (cmov below) */
+      case 0x43: /* CMOVNBb/CMOVAEb (cmov not below) */
+      case 0x44: /* CMOVZb/CMOVEb (cmov zero) */
+      case 0x45: /* CMOVNZb/CMOVNEb (cmov not zero) */
+      case 0x46: /* CMOVBEb/CMOVNAb (cmov below or equal) */
+      case 0x47: /* CMOVNBEb/CMOVAb (cmov not below or equal) */
+      case 0x48: /* CMOVSb (cmov negative) */
+      case 0x49: /* CMOVSb (cmov not negative) */
+      case 0x4A: /* CMOVP (cmov parity even) */
+      case 0x4B: /* CMOVNP (cmov parity odd) */
+      case 0x4C: /* CMOVLb/CMOVNGEb (cmov less) */
+      case 0x4D: /* CMOVGEb/CMOVNLb (cmov greater or equal) */
+      case 0x4E: /* CMOVLEb/CMOVNGb (cmov less or equal) */
+      case 0x4F: /* CMOVGb/CMOVNLEb (cmov greater) */
+         delta = dis_cmov_E_G(sorb, sz, (X86Condcode)(opc - 0x40), delta);
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- CMPXCHG -=-=-=-=-=-=-=-=-=-= */
+
+      case 0xB0: /* CMPXCHG Gb,Eb */
+         delta = dis_cmpxchg_G_E ( sorb, pfx_lock, 1, delta );
+         break;
+      case 0xB1: /* CMPXCHG Gv,Ev */
+         delta = dis_cmpxchg_G_E ( sorb, pfx_lock, sz, delta );
+         break;
+
+      case 0xC7: { /* CMPXCHG8B Gv (0F C7 /1) */
+         IRTemp expdHi    = newTemp(Ity_I32);
+         IRTemp expdLo    = newTemp(Ity_I32);
+         IRTemp dataHi    = newTemp(Ity_I32);
+         IRTemp dataLo    = newTemp(Ity_I32);
+         IRTemp oldHi     = newTemp(Ity_I32);
+         IRTemp oldLo     = newTemp(Ity_I32);
+         IRTemp flags_old = newTemp(Ity_I32);
+         IRTemp flags_new = newTemp(Ity_I32);
+         IRTemp success   = newTemp(Ity_I1);
+
+         /* Translate this using a DCAS, even if there is no LOCK
+            prefix.  Life is too short to bother with generating two
+            different translations for the with/without-LOCK-prefix
+            cases. */
+         *expect_CAS = True;
+
+	 /* Decode, and generate address. */
+         if (sz != 4) goto decode_failure;
+         modrm = getIByte(delta);
+         if (epartIsReg(modrm)) goto decode_failure;
+         if (gregOfRM(modrm) != 1) goto decode_failure;
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         delta += alen;
+
+         /* Get the expected and new values. */
+         assign( expdHi, getIReg(4,R_EDX) );
+         assign( expdLo, getIReg(4,R_EAX) );
+         assign( dataHi, getIReg(4,R_ECX) );
+         assign( dataLo, getIReg(4,R_EBX) );
+
+         /* Do the DCAS */
+         stmt( IRStmt_CAS(
+                  mkIRCAS( oldHi, oldLo, 
+                           Iend_LE, mkexpr(addr), 
+                           mkexpr(expdHi), mkexpr(expdLo),
+                           mkexpr(dataHi), mkexpr(dataLo)
+               )));
+
+         /* success when oldHi:oldLo == expdHi:expdLo */
+         assign( success,
+                 binop(Iop_CasCmpEQ32,
+                       binop(Iop_Or32,
+                             binop(Iop_Xor32, mkexpr(oldHi), mkexpr(expdHi)),
+                             binop(Iop_Xor32, mkexpr(oldLo), mkexpr(expdLo))
+                       ),
+                       mkU32(0)
+                 ));
+
+         /* If the DCAS is successful, that is to say oldHi:oldLo ==
+            expdHi:expdLo, then put expdHi:expdLo back in EDX:EAX,
+            which is where they came from originally.  Both the actual
+            contents of these two regs, and any shadow values, are
+            unchanged.  If the DCAS fails then we're putting into
+            EDX:EAX the value seen in memory. */
+         putIReg(4, R_EDX,
+                    IRExpr_ITE( mkexpr(success),
+                                mkexpr(expdHi), mkexpr(oldHi)
+                ));
+         putIReg(4, R_EAX,
+                    IRExpr_ITE( mkexpr(success),
+                                mkexpr(expdLo), mkexpr(oldLo)
+                ));
+
+         /* Copy the success bit into the Z flag and leave the others
+            unchanged */
+         assign( flags_old, widenUto32(mk_x86g_calculate_eflags_all()));
+         assign( 
+            flags_new,
+            binop(Iop_Or32,
+                  binop(Iop_And32, mkexpr(flags_old), 
+                                   mkU32(~X86G_CC_MASK_Z)),
+                  binop(Iop_Shl32, 
+                        binop(Iop_And32, 
+                              unop(Iop_1Uto32, mkexpr(success)), mkU32(1)), 
+                        mkU8(X86G_CC_SHIFT_Z)) ));
+
+         stmt( IRStmt_Put( OFFB_CC_OP,   mkU32(X86G_CC_OP_COPY) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(flags_new) ));
+         stmt( IRStmt_Put( OFFB_CC_DEP2, mkU32(0) ));
+         /* Set NDEP even though it isn't used.  This makes
+            redundant-PUT elimination of previous stores to this field
+            work better. */
+         stmt( IRStmt_Put( OFFB_CC_NDEP, mkU32(0) ));
+
+         /* Sheesh.  Aren't you glad it was me and not you that had to
+	    write and validate all this grunge? */
+
+	 DIP("cmpxchg8b %s\n", dis_buf);
+	 break;
+      }
+
+      /* =-=-=-=-=-=-=-=-=- CPUID -=-=-=-=-=-=-=-=-=-=-= */
+
+      case 0xA2: { /* CPUID */
+         /* Uses dirty helper: 
+               void dirtyhelper_CPUID_sse[012] ( VexGuestX86State* )
+            declared to mod eax, wr ebx, ecx, edx
+         */
+         IRDirty* d     = NULL;
+         void*    fAddr = NULL;
+         const HChar* fName = NULL;
+         if (archinfo->hwcaps & VEX_HWCAPS_X86_SSE2) {
+            fName = "x86g_dirtyhelper_CPUID_sse2";
+            fAddr = &x86g_dirtyhelper_CPUID_sse2; 
+         } 
+         else
+         if (archinfo->hwcaps & VEX_HWCAPS_X86_SSE1) {
+            fName = "x86g_dirtyhelper_CPUID_sse1";
+            fAddr = &x86g_dirtyhelper_CPUID_sse1; 
+         } 
+         else
+         if (archinfo->hwcaps & VEX_HWCAPS_X86_MMXEXT) {
+            fName = "x86g_dirtyhelper_CPUID_mmxext";
+            fAddr = &x86g_dirtyhelper_CPUID_mmxext;
+         }
+         else
+         if (archinfo->hwcaps == 0/*no SSE*/) {
+            fName = "x86g_dirtyhelper_CPUID_sse0";
+            fAddr = &x86g_dirtyhelper_CPUID_sse0; 
+         } else
+            vpanic("disInstr(x86)(cpuid)");
+
+         vassert(fName); vassert(fAddr);
+         d = unsafeIRDirty_0_N ( 0/*regparms*/, 
+                                 fName, fAddr, mkIRExprVec_1(IRExpr_BBPTR()) );
+         /* declare guest state effects */
+         d->nFxState = 4;
+         vex_bzero(&d->fxState, sizeof(d->fxState));
+         d->fxState[0].fx     = Ifx_Modify;
+         d->fxState[0].offset = OFFB_EAX;
+         d->fxState[0].size   = 4;
+         d->fxState[1].fx     = Ifx_Write;
+         d->fxState[1].offset = OFFB_EBX;
+         d->fxState[1].size   = 4;
+         d->fxState[2].fx     = Ifx_Modify;
+         d->fxState[2].offset = OFFB_ECX;
+         d->fxState[2].size   = 4;
+         d->fxState[3].fx     = Ifx_Write;
+         d->fxState[3].offset = OFFB_EDX;
+         d->fxState[3].size   = 4;
+         /* execute the dirty call, side-effecting guest state */
+         stmt( IRStmt_Dirty(d) );
+         /* CPUID is a serialising insn.  So, just in case someone is
+            using it as a memory fence ... */
+         stmt( IRStmt_MBE(Imbe_Fence) );
+         DIP("cpuid\n");
+         break;
+      }
+
+//--          if (!VG_(cpu_has_feature)(VG_X86_FEAT_CPUID))
+//--             goto decode_failure;
+//-- 
+//--          t1 = newTemp(cb);
+//--          t2 = newTemp(cb);
+//--          t3 = newTemp(cb);
+//--          t4 = newTemp(cb);
+//--          uInstr0(cb, CALLM_S, 0);
+//-- 
+//--          uInstr2(cb, GET,   4, ArchReg, R_EAX, TempReg, t1);
+//--          uInstr1(cb, PUSH,  4, TempReg, t1);
+//-- 
+//--          uInstr2(cb, MOV,   4, Literal, 0, TempReg, t2);
+//--          uLiteral(cb, 0);
+//--          uInstr1(cb, PUSH,  4, TempReg, t2);
+//-- 
+//--          uInstr2(cb, MOV,   4, Literal, 0, TempReg, t3);
+//--          uLiteral(cb, 0);
+//--          uInstr1(cb, PUSH,  4, TempReg, t3);
+//-- 
+//--          uInstr2(cb, MOV,   4, Literal, 0, TempReg, t4);
+//--          uLiteral(cb, 0);
+//--          uInstr1(cb, PUSH,  4, TempReg, t4);
+//-- 
+//--          uInstr1(cb, CALLM, 0, Lit16,   VGOFF_(helper_CPUID));
+//--          uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty);
+//-- 
+//--          uInstr1(cb, POP,   4, TempReg, t4);
+//--          uInstr2(cb, PUT,   4, TempReg, t4, ArchReg, R_EDX);
+//-- 
+//--          uInstr1(cb, POP,   4, TempReg, t3);
+//--          uInstr2(cb, PUT,   4, TempReg, t3, ArchReg, R_ECX);
+//-- 
+//--          uInstr1(cb, POP,   4, TempReg, t2);
+//--          uInstr2(cb, PUT,   4, TempReg, t2, ArchReg, R_EBX);
+//-- 
+//--          uInstr1(cb, POP,   4, TempReg, t1);
+//--          uInstr2(cb, PUT,   4, TempReg, t1, ArchReg, R_EAX);
+//-- 
+//--          uInstr0(cb, CALLM_E, 0);
+//--          DIP("cpuid\n");
+//--          break;
+//-- 
+      /* =-=-=-=-=-=-=-=-=- MOVZX, MOVSX =-=-=-=-=-=-=-= */
+
+      case 0xB6: /* MOVZXb Eb,Gv */
+         if (sz != 2 && sz != 4)
+            goto decode_failure;
+         delta = dis_movx_E_G ( sorb, delta, 1, sz, False );
+         break;
+
+      case 0xB7: /* MOVZXw Ew,Gv */
+         if (sz != 4)
+            goto decode_failure;
+         delta = dis_movx_E_G ( sorb, delta, 2, 4, False );
+         break;
+
+      case 0xBE: /* MOVSXb Eb,Gv */
+         if (sz != 2 && sz != 4)
+            goto decode_failure;
+         delta = dis_movx_E_G ( sorb, delta, 1, sz, True );
+         break;
+
+      case 0xBF: /* MOVSXw Ew,Gv */
+         if (sz != 4 && /* accept movsww, sigh, see #250799 */sz != 2)
+            goto decode_failure;
+         delta = dis_movx_E_G ( sorb, delta, 2, sz, True );
+         break;
+
+//--       /* =-=-=-=-=-=-=-=-=-=-= MOVNTI -=-=-=-=-=-=-=-=-= */
+//-- 
+//--       case 0xC3: /* MOVNTI Gv,Ev */
+//--          vg_assert(sz == 4);
+//--          modrm = getUChar(eip);
+//--          vg_assert(!epartIsReg(modrm));
+//--          t1 = newTemp(cb);
+//--          uInstr2(cb, GET, 4, ArchReg, gregOfRM(modrm), TempReg, t1);
+//--          pair = disAMode ( cb, sorb, eip, dis_buf );
+//--          t2 = LOW24(pair);
+//--          eip += HI8(pair);
+//--          uInstr2(cb, STORE, 4, TempReg, t1, TempReg, t2);
+//--          DIP("movnti %s,%s\n", nameIReg(4,gregOfRM(modrm)), dis_buf);
+//--          break;
+
+      /* =-=-=-=-=-=-=-=-=- MUL/IMUL =-=-=-=-=-=-=-=-=-= */
+
+      case 0xAF: /* IMUL Ev, Gv */
+         delta = dis_mul_E_G ( sorb, sz, delta );
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- NOPs =-=-=-=-=-=-=-=-=-=-=-= */
+
+      case 0x1F:
+         modrm = getUChar(delta);
+         if (epartIsReg(modrm)) goto decode_failure;
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         delta += alen;
+         DIP("nop%c %s\n", nameISize(sz), dis_buf);
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- Jcond d32 -=-=-=-=-=-=-=-=-= */
+      case 0x80:
+      case 0x81:
+      case 0x82: /* JBb/JNAEb (jump below) */
+      case 0x83: /* JNBb/JAEb (jump not below) */
+      case 0x84: /* JZb/JEb (jump zero) */
+      case 0x85: /* JNZb/JNEb (jump not zero) */
+      case 0x86: /* JBEb/JNAb (jump below or equal) */
+      case 0x87: /* JNBEb/JAb (jump not below or equal) */
+      case 0x88: /* JSb (jump negative) */
+      case 0x89: /* JSb (jump not negative) */
+      case 0x8A: /* JP (jump parity even) */
+      case 0x8B: /* JNP/JPO (jump parity odd) */
+      case 0x8C: /* JLb/JNGEb (jump less) */
+      case 0x8D: /* JGEb/JNLb (jump greater or equal) */
+      case 0x8E: /* JLEb/JNGb (jump less or equal) */
+      case 0x8F: /* JGb/JNLEb (jump greater) */
+       { Int    jmpDelta;
+         const HChar* comment  = "";
+         jmpDelta = (Int)getUDisp32(delta);
+         d32 = (((Addr32)guest_EIP_bbstart)+delta+4) + jmpDelta;
+         delta += 4;
+         if (resteerCisOk
+             && vex_control.guest_chase_cond
+             && (Addr32)d32 != (Addr32)guest_EIP_bbstart
+             && jmpDelta < 0
+             && resteerOkFn( callback_opaque, (Addr32)d32) ) {
+            /* Speculation: assume this backward branch is taken.  So
+               we need to emit a side-exit to the insn following this
+               one, on the negation of the condition, and continue at
+               the branch target address (d32).  If we wind up back at
+               the first instruction of the trace, just stop; it's
+               better to let the IR loop unroller handle that case.*/
+            stmt( IRStmt_Exit( 
+                     mk_x86g_calculate_condition((X86Condcode)
+                                                 (1 ^ (opc - 0x80))),
+                     Ijk_Boring,
+                     IRConst_U32(guest_EIP_bbstart+delta),
+                     OFFB_EIP ) );
+            dres.whatNext   = Dis_ResteerC;
+            dres.continueAt = (Addr32)d32;
+            comment = "(assumed taken)";
+         }
+         else
+         if (resteerCisOk
+             && vex_control.guest_chase_cond
+             && (Addr32)d32 != (Addr32)guest_EIP_bbstart
+             && jmpDelta >= 0
+             && resteerOkFn( callback_opaque, 
+                             (Addr32)(guest_EIP_bbstart+delta)) ) {
+            /* Speculation: assume this forward branch is not taken.
+               So we need to emit a side-exit to d32 (the dest) and
+               continue disassembling at the insn immediately
+               following this one. */
+            stmt( IRStmt_Exit( 
+                     mk_x86g_calculate_condition((X86Condcode)(opc - 0x80)),
+                     Ijk_Boring,
+                     IRConst_U32(d32),
+                     OFFB_EIP ) );
+            dres.whatNext   = Dis_ResteerC;
+            dres.continueAt = guest_EIP_bbstart + delta;
+            comment = "(assumed not taken)";
+         }
+         else {
+            /* Conservative default translation - end the block at
+               this point. */
+            jcc_01( &dres, (X86Condcode)(opc - 0x80), 
+                    (Addr32)(guest_EIP_bbstart+delta), d32);
+            vassert(dres.whatNext == Dis_StopHere);
+         }
+         DIP("j%s-32 0x%x %s\n", name_X86Condcode(opc - 0x80), d32, comment);
+         break;
+       }
+
+      /* =-=-=-=-=-=-=-=-=- RDTSC -=-=-=-=-=-=-=-=-=-=-= */
+      case 0x31: { /* RDTSC */
+         IRTemp   val  = newTemp(Ity_I64);
+         IRExpr** args = mkIRExprVec_0();
+         IRDirty* d    = unsafeIRDirty_1_N ( 
+                            val, 
+                            0/*regparms*/, 
+                            "x86g_dirtyhelper_RDTSC", 
+                            &x86g_dirtyhelper_RDTSC, 
+                            args 
+                         );
+         /* execute the dirty call, dumping the result in val. */
+         stmt( IRStmt_Dirty(d) );
+         putIReg(4, R_EDX, unop(Iop_64HIto32, mkexpr(val)));
+         putIReg(4, R_EAX, unop(Iop_64to32, mkexpr(val)));
+         DIP("rdtsc\n");
+         break;
+      }
+
+      /* =-=-=-=-=-=-=-=-=- PUSH/POP Sreg =-=-=-=-=-=-=-=-=-= */
+
+      case 0xA1: /* POP %FS */
+         dis_pop_segreg( R_FS, sz ); break;
+      case 0xA9: /* POP %GS */
+         dis_pop_segreg( R_GS, sz ); break;
+
+      case 0xA0: /* PUSH %FS */
+         dis_push_segreg( R_FS, sz ); break;
+      case 0xA8: /* PUSH %GS */
+         dis_push_segreg( R_GS, sz ); break;
+
+      /* =-=-=-=-=-=-=-=-=- SETcc Eb =-=-=-=-=-=-=-=-=-= */
+      case 0x90:
+      case 0x91:
+      case 0x92: /* set-Bb/set-NAEb (jump below) */
+      case 0x93: /* set-NBb/set-AEb (jump not below) */
+      case 0x94: /* set-Zb/set-Eb (jump zero) */
+      case 0x95: /* set-NZb/set-NEb (jump not zero) */
+      case 0x96: /* set-BEb/set-NAb (jump below or equal) */
+      case 0x97: /* set-NBEb/set-Ab (jump not below or equal) */
+      case 0x98: /* set-Sb (jump negative) */
+      case 0x99: /* set-Sb (jump not negative) */
+      case 0x9A: /* set-P (jump parity even) */
+      case 0x9B: /* set-NP (jump parity odd) */
+      case 0x9C: /* set-Lb/set-NGEb (jump less) */
+      case 0x9D: /* set-GEb/set-NLb (jump greater or equal) */
+      case 0x9E: /* set-LEb/set-NGb (jump less or equal) */
+      case 0x9F: /* set-Gb/set-NLEb (jump greater) */
+         t1 = newTemp(Ity_I8);
+         assign( t1, unop(Iop_1Uto8,mk_x86g_calculate_condition(opc-0x90)) );
+         modrm = getIByte(delta);
+         if (epartIsReg(modrm)) {
+            delta++;
+            putIReg(1, eregOfRM(modrm), mkexpr(t1));
+            DIP("set%s %s\n", name_X86Condcode(opc-0x90), 
+                              nameIReg(1,eregOfRM(modrm)));
+         } else {
+           addr = disAMode ( &alen, sorb, delta, dis_buf );
+           delta += alen;
+           storeLE( mkexpr(addr), mkexpr(t1) );
+           DIP("set%s %s\n", name_X86Condcode(opc-0x90), dis_buf);
+         }
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- SHLD/SHRD -=-=-=-=-=-=-=-=-= */
+
+      case 0xA4: /* SHLDv imm8,Gv,Ev */
+         modrm = getIByte(delta);
+         d32   = delta + lengthAMode(delta);
+         vex_sprintf(dis_buf, "$%d", getIByte(d32));
+         delta = dis_SHLRD_Gv_Ev ( 
+                  sorb, delta, modrm, sz, 
+                  mkU8(getIByte(d32)), True, /* literal */
+                  dis_buf, True );
+         break;
+      case 0xA5: /* SHLDv %cl,Gv,Ev */
+         modrm = getIByte(delta);
+         delta = dis_SHLRD_Gv_Ev ( 
+                    sorb, delta, modrm, sz,
+                    getIReg(1,R_ECX), False, /* not literal */
+                    "%cl", True );
+         break;
+
+      case 0xAC: /* SHRDv imm8,Gv,Ev */
+         modrm = getIByte(delta);
+         d32   = delta + lengthAMode(delta);
+         vex_sprintf(dis_buf, "$%d", getIByte(d32));
+         delta = dis_SHLRD_Gv_Ev ( 
+                    sorb, delta, modrm, sz, 
+                    mkU8(getIByte(d32)), True, /* literal */
+                    dis_buf, False );
+         break;
+      case 0xAD: /* SHRDv %cl,Gv,Ev */
+         modrm = getIByte(delta);
+         delta = dis_SHLRD_Gv_Ev ( 
+                    sorb, delta, modrm, sz, 
+                    getIReg(1,R_ECX), False, /* not literal */
+                    "%cl", False );
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- SYSENTER -=-=-=-=-=-=-=-=-=-= */
+
+      case 0x34:
+         /* Simple implementation needing a long explaination.
+
+            sysenter is a kind of syscall entry.  The key thing here
+            is that the return address is not known -- that is
+            something that is beyond Vex's knowledge.  So this IR
+            forces a return to the scheduler, which can do what it
+            likes to simulate the systenter, but it MUST set this
+            thread's guest_EIP field with the continuation address
+            before resuming execution.  If that doesn't happen, the
+            thread will jump to address zero, which is probably
+            fatal. 
+         */
+
+         /* Note where we are, so we can back up the guest to this
+            point if the syscall needs to be restarted. */
+         stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
+                           mkU32(guest_EIP_curr_instr) ) );
+         jmp_lit(&dres, Ijk_Sys_sysenter, 0/*bogus next EIP value*/);
+         vassert(dres.whatNext == Dis_StopHere);
+         DIP("sysenter");
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- XADD -=-=-=-=-=-=-=-=-=-= */
+
+      case 0xC0: { /* XADD Gb,Eb */
+         Bool decodeOK;
+         delta = dis_xadd_G_E ( sorb, pfx_lock, 1, delta, &decodeOK );
+         if (!decodeOK) goto decode_failure;
+         break;
+      }
+      case 0xC1: { /* XADD Gv,Ev */
+         Bool decodeOK;
+         delta = dis_xadd_G_E ( sorb, pfx_lock, sz, delta, &decodeOK );
+         if (!decodeOK) goto decode_failure;
+         break;
+      }
+
+      /* =-=-=-=-=-=-=-=-=- MMXery =-=-=-=-=-=-=-=-=-=-= */
+
+      case 0x71: 
+      case 0x72: 
+      case 0x73: /* PSLLgg/PSRAgg/PSRLgg mmxreg by imm8 */
+
+      case 0x6E: /* MOVD (src)ireg-or-mem, (dst)mmxreg */
+      case 0x7E: /* MOVD (src)mmxreg, (dst)ireg-or-mem */
+      case 0x7F: /* MOVQ (src)mmxreg, (dst)mmxreg-or-mem */
+      case 0x6F: /* MOVQ (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xFC: 
+      case 0xFD: 
+      case 0xFE: /* PADDgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xEC: 
+      case 0xED: /* PADDSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xDC:
+      case 0xDD: /* PADDUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xF8: 
+      case 0xF9: 
+      case 0xFA: /* PSUBgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xE8: 
+      case 0xE9: /* PSUBSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xD8: 
+      case 0xD9: /* PSUBUSgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xE5: /* PMULHW (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xD5: /* PMULLW (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xF5: /* PMADDWD (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x74: 
+      case 0x75: 
+      case 0x76: /* PCMPEQgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x64: 
+      case 0x65: 
+      case 0x66: /* PCMPGTgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x6B: /* PACKSSDW (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0x63: /* PACKSSWB (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0x67: /* PACKUSWB (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x68: 
+      case 0x69: 
+      case 0x6A: /* PUNPCKHgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0x60: 
+      case 0x61: 
+      case 0x62: /* PUNPCKLgg (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xDB: /* PAND (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xDF: /* PANDN (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xEB: /* POR (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xEF: /* PXOR (src)mmxreg-or-mem, (dst)mmxreg */
+
+      case 0xF1: /* PSLLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xF2: 
+      case 0xF3: 
+
+      case 0xD1: /* PSRLgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xD2: 
+      case 0xD3: 
+
+      case 0xE1: /* PSRAgg (src)mmxreg-or-mem, (dst)mmxreg */
+      case 0xE2: 
+      {
+         Int  delta0    = delta-1;
+         Bool decode_OK = False;
+
+         /* If sz==2 this is SSE, and we assume sse idec has
+            already spotted those cases by now. */
+         if (sz != 4)
+            goto decode_failure;
+
+         delta = dis_MMX ( &decode_OK, sorb, sz, delta-1 );
+         if (!decode_OK) {
+            delta = delta0;
+            goto decode_failure;
+         }
+         break;
+      }
+
+      case 0x0E: /* FEMMS */
+      case 0x77: /* EMMS */
+         if (sz != 4)
+            goto decode_failure;
+         do_EMMS_preamble();
+         DIP("{f}emms\n");
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- SGDT and SIDT =-=-=-=-=-=-=-=-=-=-= */
+      case 0x01: /* 0F 01 /0 -- SGDT */
+                 /* 0F 01 /1 -- SIDT */
+      {
+          /* This is really revolting, but ... since each processor
+             (core) only has one IDT and one GDT, just let the guest
+             see it (pass-through semantics).  I can't see any way to
+             construct a faked-up value, so don't bother to try. */
+         modrm = getUChar(delta);
+         addr = disAMode ( &alen, sorb, delta, dis_buf );
+         delta += alen;
+         if (epartIsReg(modrm)) goto decode_failure;
+         if (gregOfRM(modrm) != 0 && gregOfRM(modrm) != 1)
+            goto decode_failure;
+         switch (gregOfRM(modrm)) {
+            case 0: DIP("sgdt %s\n", dis_buf); break;
+            case 1: DIP("sidt %s\n", dis_buf); break;
+            default: vassert(0); /*NOTREACHED*/
+         }
+
+         IRDirty* d = unsafeIRDirty_0_N (
+                          0/*regparms*/,
+                          "x86g_dirtyhelper_SxDT",
+                          &x86g_dirtyhelper_SxDT,
+                          mkIRExprVec_2( mkexpr(addr),
+                                         mkU32(gregOfRM(modrm)) )
+                      );
+         /* declare we're writing memory */
+         d->mFx   = Ifx_Write;
+         d->mAddr = mkexpr(addr);
+         d->mSize = 6;
+         stmt( IRStmt_Dirty(d) );
+         break;
+      }
+
+      case 0x05: /* AMD's syscall */
+         stmt( IRStmt_Put( OFFB_IP_AT_SYSCALL,
+              mkU32(guest_EIP_curr_instr) ) );
+         jmp_lit(&dres, Ijk_Sys_syscall, ((Addr32)guest_EIP_bbstart)+delta);
+         vassert(dres.whatNext == Dis_StopHere);
+         DIP("syscall\n");
+         break;
+
+      /* =-=-=-=-=-=-=-=-=- unimp2 =-=-=-=-=-=-=-=-=-=-= */
+
+      default:
+         goto decode_failure;
+   } /* switch (opc) for the 2-byte opcodes */
+   goto decode_success;
+   } /* case 0x0F: of primary opcode */
+
+   /* ------------------------ ??? ------------------------ */
+  
+  default:
+  decode_failure:
+   /* All decode failures end up here. */
+   if (sigill_diag) {
+      vex_printf("vex x86->IR: unhandled instruction bytes: "
+                 "0x%x 0x%x 0x%x 0x%x\n",
+                 (Int)getIByte(delta_start+0),
+                 (Int)getIByte(delta_start+1),
+                 (Int)getIByte(delta_start+2),
+                 (Int)getIByte(delta_start+3) );
+   }
+
+   /* Tell the dispatcher that this insn cannot be decoded, and so has
+      not been executed, and (is currently) the next to be executed.
+      EIP should be up-to-date since it made so at the start of each
+      insn, but nevertheless be paranoid and update it again right
+      now. */
+   stmt( IRStmt_Put( OFFB_EIP, mkU32(guest_EIP_curr_instr) ) );
+   jmp_lit(&dres, Ijk_NoDecode, guest_EIP_curr_instr);
+   vassert(dres.whatNext == Dis_StopHere);
+   dres.len = 0;
+   /* We also need to say that a CAS is not expected now, regardless
+      of what it might have been set to at the start of the function,
+      since the IR that we've emitted just above (to synthesis a
+      SIGILL) does not involve any CAS, and presumably no other IR has
+      been emitted for this (non-decoded) insn. */
+   *expect_CAS = False;
+   return dres;
+
+   } /* switch (opc) for the main (primary) opcode switch. */
+
+  decode_success:
+   /* All decode successes end up here. */
+   switch (dres.whatNext) {
+      case Dis_Continue:
+         stmt( IRStmt_Put( OFFB_EIP, mkU32(guest_EIP_bbstart + delta) ) );
+         break;
+      case Dis_ResteerU:
+      case Dis_ResteerC:
+         stmt( IRStmt_Put( OFFB_EIP, mkU32(dres.continueAt) ) );
+         break;
+      case Dis_StopHere:
+         break;
+      default:
+         vassert(0);
+   }
+
+   DIP("\n");
+   dres.len = delta - delta_start;
+   return dres;
+}
+
+#undef DIP
+#undef DIS
+
+
+/*------------------------------------------------------------*/
+/*--- Top-level fn                                         ---*/
+/*------------------------------------------------------------*/
+
+/* Disassemble a single instruction into IR.  The instruction
+   is located in host memory at &guest_code[delta]. */
+
+DisResult disInstr_X86 ( IRSB*        irsb_IN,
+                         Bool         (*resteerOkFn) ( void*, Addr ),
+                         Bool         resteerCisOk,
+                         void*        callback_opaque,
+                         const UChar* guest_code_IN,
+                         Long         delta,
+                         Addr         guest_IP,
+                         VexArch      guest_arch,
+                         const VexArchInfo* archinfo,
+                         const VexAbiInfo*  abiinfo,
+                         VexEndness   host_endness_IN,
+                         Bool         sigill_diag_IN )
+{
+   Int       i, x1, x2;
+   Bool      expect_CAS, has_CAS;
+   DisResult dres;
+
+   /* Set globals (see top of this file) */
+   vassert(guest_arch == VexArchX86);
+   guest_code           = guest_code_IN;
+   irsb                 = irsb_IN;
+   host_endness         = host_endness_IN;
+   guest_EIP_curr_instr = (Addr32)guest_IP;
+   guest_EIP_bbstart    = (Addr32)toUInt(guest_IP - delta);
+
+   x1 = irsb_IN->stmts_used;
+   expect_CAS = False;
+   dres = disInstr_X86_WRK ( &expect_CAS, resteerOkFn,
+                             resteerCisOk,
+                             callback_opaque,
+                             delta, archinfo, abiinfo, sigill_diag_IN );
+   x2 = irsb_IN->stmts_used;
+   vassert(x2 >= x1);
+
+   /* See comment at the top of disInstr_X86_WRK for meaning of
+      expect_CAS.  Here, we (sanity-)check for the presence/absence of
+      IRCAS as directed by the returned expect_CAS value. */
+   has_CAS = False;
+   for (i = x1; i < x2; i++) {
+      if (irsb_IN->stmts[i]->tag == Ist_CAS)
+         has_CAS = True;
+   }
+
+   if (expect_CAS != has_CAS) {
+      /* inconsistency detected.  re-disassemble the instruction so as
+         to generate a useful error message; then assert. */
+      vex_traceflags |= VEX_TRACE_FE;
+      dres = disInstr_X86_WRK ( &expect_CAS, resteerOkFn,
+                                resteerCisOk,
+                                callback_opaque,
+                                delta, archinfo, abiinfo, sigill_diag_IN );
+      for (i = x1; i < x2; i++) {
+         vex_printf("\t\t");
+         ppIRStmt(irsb_IN->stmts[i]);
+         vex_printf("\n");
+      }
+      /* Failure of this assertion is serious and denotes a bug in
+         disInstr. */
+      vpanic("disInstr_X86: inconsistency in LOCK prefix handling");
+   }
+
+   return dres;
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end                                         guest_x86_toIR.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/priv/host_amd64_defs.c b/VEX/priv/host_amd64_defs.c
new file mode 100644
index 0000000..7534e43
--- /dev/null
+++ b/VEX/priv/host_amd64_defs.c
@@ -0,0 +1,3936 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 host_amd64_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_amd64_defs.h"
+
+
+/* --------- Registers. --------- */
+
+const RRegUniverse* getRRegUniverse_AMD64 ( void )
+{
+   /* The real-register universe is a big constant, so we just want to
+      initialise it once. */
+   static RRegUniverse rRegUniverse_AMD64;
+   static Bool         rRegUniverse_AMD64_initted = False;
+
+   /* Handy shorthand, nothing more */
+   RRegUniverse* ru = &rRegUniverse_AMD64;
+
+   /* This isn't thread-safe.  Sigh. */
+   if (LIKELY(rRegUniverse_AMD64_initted))
+      return ru;
+
+   RRegUniverse__init(ru);
+
+   /* Add the registers.  The initial segment of this array must be
+      those available for allocation by reg-alloc, and those that
+      follow are not available for allocation. */
+   ru->regs[ru->size++] = hregAMD64_RSI();
+   ru->regs[ru->size++] = hregAMD64_RDI();
+   ru->regs[ru->size++] = hregAMD64_R8();
+   ru->regs[ru->size++] = hregAMD64_R9();
+   ru->regs[ru->size++] = hregAMD64_R12();
+   ru->regs[ru->size++] = hregAMD64_R13();
+   ru->regs[ru->size++] = hregAMD64_R14();
+   ru->regs[ru->size++] = hregAMD64_R15();
+   ru->regs[ru->size++] = hregAMD64_RBX();
+   ru->regs[ru->size++] = hregAMD64_XMM3();
+   ru->regs[ru->size++] = hregAMD64_XMM4();
+   ru->regs[ru->size++] = hregAMD64_XMM5();
+   ru->regs[ru->size++] = hregAMD64_XMM6();
+   ru->regs[ru->size++] = hregAMD64_XMM7();
+   ru->regs[ru->size++] = hregAMD64_XMM8();
+   ru->regs[ru->size++] = hregAMD64_XMM9();
+   ru->regs[ru->size++] = hregAMD64_XMM10();
+   ru->regs[ru->size++] = hregAMD64_XMM11();
+   ru->regs[ru->size++] = hregAMD64_XMM12();
+   ru->regs[ru->size++] = hregAMD64_R10();
+   ru->allocable = ru->size;
+   /* And other regs, not available to the allocator. */
+   ru->regs[ru->size++] = hregAMD64_RAX();
+   ru->regs[ru->size++] = hregAMD64_RCX();
+   ru->regs[ru->size++] = hregAMD64_RDX();
+   ru->regs[ru->size++] = hregAMD64_RSP();
+   ru->regs[ru->size++] = hregAMD64_RBP();
+   ru->regs[ru->size++] = hregAMD64_R11();
+   ru->regs[ru->size++] = hregAMD64_XMM0();
+   ru->regs[ru->size++] = hregAMD64_XMM1();
+
+   rRegUniverse_AMD64_initted = True;
+
+   RRegUniverse__check_is_sane(ru);
+   return ru;
+}
+
+
+void ppHRegAMD64 ( HReg reg ) 
+{
+   Int r;
+   static const HChar* ireg64_names[16] 
+     = { "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
+         "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" };
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      ppHReg(reg);
+      return;
+   }
+   /* But specific for real regs. */
+   switch (hregClass(reg)) {
+      case HRcInt64:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 16);
+         vex_printf("%s", ireg64_names[r]);
+         return;
+      case HRcVec128:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 16);
+         vex_printf("%%xmm%d", r);
+         return;
+      default:
+         vpanic("ppHRegAMD64");
+   }
+}
+
+static void ppHRegAMD64_lo32 ( HReg reg ) 
+{
+   Int r;
+   static const HChar* ireg32_names[16] 
+     = { "%eax", "%ecx", "%edx",  "%ebx",  "%esp",  "%ebp",  "%esi",  "%edi",
+         "%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d" };
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      ppHReg(reg);
+      vex_printf("d");
+      return;
+   }
+   /* But specific for real regs. */
+   switch (hregClass(reg)) {
+      case HRcInt64:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 16);
+         vex_printf("%s", ireg32_names[r]);
+         return;
+      default:
+         vpanic("ppHRegAMD64_lo32: invalid regclass");
+   }
+}
+
+
+/* --------- Condition codes, Intel encoding. --------- */
+
+const HChar* showAMD64CondCode ( AMD64CondCode cond )
+{
+   switch (cond) {
+      case Acc_O:      return "o";
+      case Acc_NO:     return "no";
+      case Acc_B:      return "b";
+      case Acc_NB:     return "nb";
+      case Acc_Z:      return "z";
+      case Acc_NZ:     return "nz";
+      case Acc_BE:     return "be";
+      case Acc_NBE:    return "nbe";
+      case Acc_S:      return "s";
+      case Acc_NS:     return "ns";
+      case Acc_P:      return "p";
+      case Acc_NP:     return "np";
+      case Acc_L:      return "l";
+      case Acc_NL:     return "nl";
+      case Acc_LE:     return "le";
+      case Acc_NLE:    return "nle";
+      case Acc_ALWAYS: return "ALWAYS";
+      default: vpanic("ppAMD64CondCode");
+   }
+}
+
+
+/* --------- AMD64AMode: memory address expressions. --------- */
+
+AMD64AMode* AMD64AMode_IR ( UInt imm32, HReg reg ) {
+   AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode));
+   am->tag        = Aam_IR;
+   am->Aam.IR.imm = imm32;
+   am->Aam.IR.reg = reg;
+   return am;
+}
+AMD64AMode* AMD64AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) {
+   AMD64AMode* am = LibVEX_Alloc_inline(sizeof(AMD64AMode));
+   am->tag = Aam_IRRS;
+   am->Aam.IRRS.imm   = imm32;
+   am->Aam.IRRS.base  = base;
+   am->Aam.IRRS.index = indEx;
+   am->Aam.IRRS.shift = shift;
+   vassert(shift >= 0 && shift <= 3);
+   return am;
+}
+
+void ppAMD64AMode ( AMD64AMode* am ) {
+   switch (am->tag) {
+      case Aam_IR: 
+         if (am->Aam.IR.imm == 0)
+            vex_printf("(");
+         else
+            vex_printf("0x%x(", am->Aam.IR.imm);
+         ppHRegAMD64(am->Aam.IR.reg);
+         vex_printf(")");
+         return;
+      case Aam_IRRS:
+         vex_printf("0x%x(", am->Aam.IRRS.imm);
+         ppHRegAMD64(am->Aam.IRRS.base);
+         vex_printf(",");
+         ppHRegAMD64(am->Aam.IRRS.index);
+         vex_printf(",%d)", 1 << am->Aam.IRRS.shift);
+         return;
+      default:
+         vpanic("ppAMD64AMode");
+   }
+}
+
+static void addRegUsage_AMD64AMode ( HRegUsage* u, AMD64AMode* am ) {
+   switch (am->tag) {
+      case Aam_IR: 
+         addHRegUse(u, HRmRead, am->Aam.IR.reg);
+         return;
+      case Aam_IRRS:
+         addHRegUse(u, HRmRead, am->Aam.IRRS.base);
+         addHRegUse(u, HRmRead, am->Aam.IRRS.index);
+         return;
+      default:
+         vpanic("addRegUsage_AMD64AMode");
+   }
+}
+
+static void mapRegs_AMD64AMode ( HRegRemap* m, AMD64AMode* am ) {
+   switch (am->tag) {
+      case Aam_IR: 
+         am->Aam.IR.reg = lookupHRegRemap(m, am->Aam.IR.reg);
+         return;
+      case Aam_IRRS:
+         am->Aam.IRRS.base = lookupHRegRemap(m, am->Aam.IRRS.base);
+         am->Aam.IRRS.index = lookupHRegRemap(m, am->Aam.IRRS.index);
+         return;
+      default:
+         vpanic("mapRegs_AMD64AMode");
+   }
+}
+
+/* --------- Operand, which can be reg, immediate or memory. --------- */
+
+AMD64RMI* AMD64RMI_Imm ( UInt imm32 ) {
+   AMD64RMI* op       = LibVEX_Alloc_inline(sizeof(AMD64RMI));
+   op->tag            = Armi_Imm;
+   op->Armi.Imm.imm32 = imm32;
+   return op;
+}
+AMD64RMI* AMD64RMI_Reg ( HReg reg ) {
+   AMD64RMI* op     = LibVEX_Alloc_inline(sizeof(AMD64RMI));
+   op->tag          = Armi_Reg;
+   op->Armi.Reg.reg = reg;
+   return op;
+}
+AMD64RMI* AMD64RMI_Mem ( AMD64AMode* am ) {
+   AMD64RMI* op    = LibVEX_Alloc_inline(sizeof(AMD64RMI));
+   op->tag         = Armi_Mem;
+   op->Armi.Mem.am = am;
+   return op;
+}
+
+static void ppAMD64RMI_wrk ( AMD64RMI* op, Bool lo32 ) {
+   switch (op->tag) {
+      case Armi_Imm: 
+         vex_printf("$0x%x", op->Armi.Imm.imm32);
+         return;
+      case Armi_Reg:
+         if (lo32)
+            ppHRegAMD64_lo32(op->Armi.Reg.reg);
+         else
+            ppHRegAMD64(op->Armi.Reg.reg);
+         return;
+      case Armi_Mem: 
+         ppAMD64AMode(op->Armi.Mem.am);
+         return;
+     default: 
+         vpanic("ppAMD64RMI");
+   }
+}
+void ppAMD64RMI ( AMD64RMI* op ) {
+   ppAMD64RMI_wrk(op, False/*!lo32*/);
+}
+void ppAMD64RMI_lo32 ( AMD64RMI* op ) {
+   ppAMD64RMI_wrk(op, True/*lo32*/);
+}
+
+/* An AMD64RMI can only be used in a "read" context (what would it mean
+   to write or modify a literal?) and so we enumerate its registers
+   accordingly. */
+static void addRegUsage_AMD64RMI ( HRegUsage* u, AMD64RMI* op ) {
+   switch (op->tag) {
+      case Armi_Imm: 
+         return;
+      case Armi_Reg: 
+         addHRegUse(u, HRmRead, op->Armi.Reg.reg);
+         return;
+      case Armi_Mem: 
+         addRegUsage_AMD64AMode(u, op->Armi.Mem.am);
+         return;
+      default: 
+         vpanic("addRegUsage_AMD64RMI");
+   }
+}
+
+static void mapRegs_AMD64RMI ( HRegRemap* m, AMD64RMI* op ) {
+   switch (op->tag) {
+      case Armi_Imm: 
+         return;
+      case Armi_Reg: 
+         op->Armi.Reg.reg = lookupHRegRemap(m, op->Armi.Reg.reg);
+         return;
+      case Armi_Mem: 
+         mapRegs_AMD64AMode(m, op->Armi.Mem.am);
+         return;
+      default: 
+         vpanic("mapRegs_AMD64RMI");
+   }
+}
+
+
+/* --------- Operand, which can be reg or immediate only. --------- */
+
+AMD64RI* AMD64RI_Imm ( UInt imm32 ) {
+   AMD64RI* op       = LibVEX_Alloc_inline(sizeof(AMD64RI));
+   op->tag           = Ari_Imm;
+   op->Ari.Imm.imm32 = imm32;
+   return op;
+}
+AMD64RI* AMD64RI_Reg ( HReg reg ) {
+   AMD64RI* op     = LibVEX_Alloc_inline(sizeof(AMD64RI));
+   op->tag         = Ari_Reg;
+   op->Ari.Reg.reg = reg;
+   return op;
+}
+
+void ppAMD64RI ( AMD64RI* op ) {
+   switch (op->tag) {
+      case Ari_Imm: 
+         vex_printf("$0x%x", op->Ari.Imm.imm32);
+         return;
+      case Ari_Reg: 
+         ppHRegAMD64(op->Ari.Reg.reg);
+         return;
+     default: 
+         vpanic("ppAMD64RI");
+   }
+}
+
+/* An AMD64RI can only be used in a "read" context (what would it mean
+   to write or modify a literal?) and so we enumerate its registers
+   accordingly. */
+static void addRegUsage_AMD64RI ( HRegUsage* u, AMD64RI* op ) {
+   switch (op->tag) {
+      case Ari_Imm: 
+         return;
+      case Ari_Reg: 
+         addHRegUse(u, HRmRead, op->Ari.Reg.reg);
+         return;
+      default: 
+         vpanic("addRegUsage_AMD64RI");
+   }
+}
+
+static void mapRegs_AMD64RI ( HRegRemap* m, AMD64RI* op ) {
+   switch (op->tag) {
+      case Ari_Imm: 
+         return;
+      case Ari_Reg: 
+         op->Ari.Reg.reg = lookupHRegRemap(m, op->Ari.Reg.reg);
+         return;
+      default: 
+         vpanic("mapRegs_AMD64RI");
+   }
+}
+
+
+/* --------- Operand, which can be reg or memory only. --------- */
+
+AMD64RM* AMD64RM_Reg ( HReg reg ) {
+   AMD64RM* op       = LibVEX_Alloc_inline(sizeof(AMD64RM));
+   op->tag         = Arm_Reg;
+   op->Arm.Reg.reg = reg;
+   return op;
+}
+AMD64RM* AMD64RM_Mem ( AMD64AMode* am ) {
+   AMD64RM* op    = LibVEX_Alloc_inline(sizeof(AMD64RM));
+   op->tag        = Arm_Mem;
+   op->Arm.Mem.am = am;
+   return op;
+}
+
+void ppAMD64RM ( AMD64RM* op ) {
+   switch (op->tag) {
+      case Arm_Mem: 
+         ppAMD64AMode(op->Arm.Mem.am);
+         return;
+      case Arm_Reg: 
+         ppHRegAMD64(op->Arm.Reg.reg);
+         return;
+     default: 
+         vpanic("ppAMD64RM");
+   }
+}
+
+/* Because an AMD64RM can be both a source or destination operand, we
+   have to supply a mode -- pertaining to the operand as a whole --
+   indicating how it's being used. */
+static void addRegUsage_AMD64RM ( HRegUsage* u, AMD64RM* op, HRegMode mode ) {
+   switch (op->tag) {
+      case Arm_Mem: 
+         /* Memory is read, written or modified.  So we just want to
+            know the regs read by the amode. */
+         addRegUsage_AMD64AMode(u, op->Arm.Mem.am);
+         return;
+      case Arm_Reg: 
+         /* reg is read, written or modified.  Add it in the
+            appropriate way. */
+         addHRegUse(u, mode, op->Arm.Reg.reg);
+         return;
+     default: 
+         vpanic("addRegUsage_AMD64RM");
+   }
+}
+
+static void mapRegs_AMD64RM ( HRegRemap* m, AMD64RM* op )
+{
+   switch (op->tag) {
+      case Arm_Mem: 
+         mapRegs_AMD64AMode(m, op->Arm.Mem.am);
+         return;
+      case Arm_Reg: 
+         op->Arm.Reg.reg = lookupHRegRemap(m, op->Arm.Reg.reg);
+         return;
+     default: 
+         vpanic("mapRegs_AMD64RM");
+   }
+}
+
+
+/* --------- Instructions. --------- */
+
+static const HChar* showAMD64ScalarSz ( Int sz ) {
+   switch (sz) {
+      case 2: return "w";
+      case 4: return "l";
+      case 8: return "q";
+      default: vpanic("showAMD64ScalarSz");
+   }
+}
+ 
+const HChar* showAMD64UnaryOp ( AMD64UnaryOp op ) {
+   switch (op) {
+      case Aun_NOT: return "not";
+      case Aun_NEG: return "neg";
+      default: vpanic("showAMD64UnaryOp");
+   }
+}
+
+const HChar* showAMD64AluOp ( AMD64AluOp op ) {
+   switch (op) {
+      case Aalu_MOV:  return "mov";
+      case Aalu_CMP:  return "cmp";
+      case Aalu_ADD:  return "add";
+      case Aalu_SUB:  return "sub";
+      case Aalu_ADC:  return "adc";
+      case Aalu_SBB:  return "sbb";
+      case Aalu_AND:  return "and";
+      case Aalu_OR:   return "or";
+      case Aalu_XOR:  return "xor";
+      case Aalu_MUL:  return "imul";
+      default: vpanic("showAMD64AluOp");
+   }
+}
+
+const HChar* showAMD64ShiftOp ( AMD64ShiftOp op ) {
+   switch (op) {
+      case Ash_SHL: return "shl";
+      case Ash_SHR: return "shr";
+      case Ash_SAR: return "sar";
+      default: vpanic("showAMD64ShiftOp");
+   }
+}
+
+const HChar* showA87FpOp ( A87FpOp op ) {
+   switch (op) {
+      case Afp_SCALE:  return "scale";
+      case Afp_ATAN:   return "atan";
+      case Afp_YL2X:   return "yl2x";
+      case Afp_YL2XP1: return "yl2xp1";
+      case Afp_PREM:   return "prem";
+      case Afp_PREM1:  return "prem1";
+      case Afp_SQRT:   return "sqrt";
+      case Afp_SIN:    return "sin";
+      case Afp_COS:    return "cos";
+      case Afp_TAN:    return "tan";
+      case Afp_ROUND:  return "round";
+      case Afp_2XM1:   return "2xm1";
+      default: vpanic("showA87FpOp");
+   }
+}
+
+const HChar* showAMD64SseOp ( AMD64SseOp op ) {
+   switch (op) {
+      case Asse_MOV:      return "movups";
+      case Asse_ADDF:     return "add";
+      case Asse_SUBF:     return "sub";
+      case Asse_MULF:     return "mul";
+      case Asse_DIVF:     return "div";
+      case Asse_MAXF:     return "max";
+      case Asse_MINF:     return "min";
+      case Asse_CMPEQF:   return "cmpFeq";
+      case Asse_CMPLTF:   return "cmpFlt";
+      case Asse_CMPLEF:   return "cmpFle";
+      case Asse_CMPUNF:   return "cmpFun";
+      case Asse_RCPF:     return "rcp";
+      case Asse_RSQRTF:   return "rsqrt";
+      case Asse_SQRTF:    return "sqrt";
+      case Asse_AND:      return "and";
+      case Asse_OR:       return "or";
+      case Asse_XOR:      return "xor";
+      case Asse_ANDN:     return "andn";
+      case Asse_ADD8:     return "paddb";
+      case Asse_ADD16:    return "paddw";
+      case Asse_ADD32:    return "paddd";
+      case Asse_ADD64:    return "paddq";
+      case Asse_QADD8U:   return "paddusb";
+      case Asse_QADD16U:  return "paddusw";
+      case Asse_QADD8S:   return "paddsb";
+      case Asse_QADD16S:  return "paddsw";
+      case Asse_SUB8:     return "psubb";
+      case Asse_SUB16:    return "psubw";
+      case Asse_SUB32:    return "psubd";
+      case Asse_SUB64:    return "psubq";
+      case Asse_QSUB8U:   return "psubusb";
+      case Asse_QSUB16U:  return "psubusw";
+      case Asse_QSUB8S:   return "psubsb";
+      case Asse_QSUB16S:  return "psubsw";
+      case Asse_MUL16:    return "pmullw";
+      case Asse_MULHI16U: return "pmulhuw";
+      case Asse_MULHI16S: return "pmulhw";
+      case Asse_AVG8U:    return "pavgb";
+      case Asse_AVG16U:   return "pavgw";
+      case Asse_MAX16S:   return "pmaxw";
+      case Asse_MAX8U:    return "pmaxub";
+      case Asse_MIN16S:   return "pminw";
+      case Asse_MIN8U:    return "pminub";
+      case Asse_CMPEQ8:   return "pcmpeqb";
+      case Asse_CMPEQ16:  return "pcmpeqw";
+      case Asse_CMPEQ32:  return "pcmpeqd";
+      case Asse_CMPGT8S:  return "pcmpgtb";
+      case Asse_CMPGT16S: return "pcmpgtw";
+      case Asse_CMPGT32S: return "pcmpgtd";
+      case Asse_SHL16:    return "psllw";
+      case Asse_SHL32:    return "pslld";
+      case Asse_SHL64:    return "psllq";
+      case Asse_SHR16:    return "psrlw";
+      case Asse_SHR32:    return "psrld";
+      case Asse_SHR64:    return "psrlq";
+      case Asse_SAR16:    return "psraw";
+      case Asse_SAR32:    return "psrad";
+      case Asse_PACKSSD:  return "packssdw";
+      case Asse_PACKSSW:  return "packsswb";
+      case Asse_PACKUSW:  return "packuswb";
+      case Asse_UNPCKHB:  return "punpckhb";
+      case Asse_UNPCKHW:  return "punpckhw";
+      case Asse_UNPCKHD:  return "punpckhd";
+      case Asse_UNPCKHQ:  return "punpckhq";
+      case Asse_UNPCKLB:  return "punpcklb";
+      case Asse_UNPCKLW:  return "punpcklw";
+      case Asse_UNPCKLD:  return "punpckld";
+      case Asse_UNPCKLQ:  return "punpcklq";
+      default: vpanic("showAMD64SseOp");
+   }
+}
+
+AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst ) {
+   AMD64Instr* i      = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag             = Ain_Imm64;
+   i->Ain.Imm64.imm64 = imm64;
+   i->Ain.Imm64.dst   = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_Alu64R;
+   i->Ain.Alu64R.op  = op;
+   i->Ain.Alu64R.src = src;
+   i->Ain.Alu64R.dst = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_Alu64M;
+   i->Ain.Alu64M.op  = op;
+   i->Ain.Alu64M.src = src;
+   i->Ain.Alu64M.dst = dst;
+   vassert(op != Aalu_MUL);
+   return i;
+}
+AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) {
+   AMD64Instr* i   = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag          = Ain_Sh64;
+   i->Ain.Sh64.op  = op;
+   i->Ain.Sh64.src = src;
+   i->Ain.Sh64.dst = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_Test64;
+   i->Ain.Test64.imm32 = imm32;
+   i->Ain.Test64.dst   = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) {
+   AMD64Instr* i      = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag             = Ain_Unary64;
+   i->Ain.Unary64.op  = op;
+   i->Ain.Unary64.dst = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_Lea64 ( AMD64AMode* am, HReg dst ) {
+   AMD64Instr* i      = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag             = Ain_Lea64;
+   i->Ain.Lea64.am    = am;
+   i->Ain.Lea64.dst   = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_Alu32R;
+   i->Ain.Alu32R.op  = op;
+   i->Ain.Alu32R.src = src;
+   i->Ain.Alu32R.dst = dst;
+   switch (op) {
+      case Aalu_ADD: case Aalu_SUB: case Aalu_CMP:
+      case Aalu_AND: case Aalu_OR:  case Aalu_XOR: break;
+      default: vassert(0);
+   }
+   return i;
+}
+AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_MulL;
+   i->Ain.MulL.syned = syned;
+   i->Ain.MulL.src   = src;
+   return i;
+}
+AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_Div;
+   i->Ain.Div.syned  = syned;
+   i->Ain.Div.sz     = sz;
+   i->Ain.Div.src    = src;
+   vassert(sz == 4 || sz == 8);
+   return i;
+}
+AMD64Instr* AMD64Instr_Push( AMD64RMI* src ) {
+   AMD64Instr* i   = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag          = Ain_Push;
+   i->Ain.Push.src = src;
+   return i;
+}
+AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms,
+                              RetLoc rloc ) {
+   AMD64Instr* i        = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag               = Ain_Call;
+   i->Ain.Call.cond     = cond;
+   i->Ain.Call.target   = target;
+   i->Ain.Call.regparms = regparms;
+   i->Ain.Call.rloc     = rloc;
+   vassert(regparms >= 0 && regparms <= 6);
+   vassert(is_sane_RetLoc(rloc));
+   return i;
+}
+
+AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP,
+                                 AMD64CondCode cond, Bool toFastEP ) {
+   AMD64Instr* i           = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                  = Ain_XDirect;
+   i->Ain.XDirect.dstGA    = dstGA;
+   i->Ain.XDirect.amRIP    = amRIP;
+   i->Ain.XDirect.cond     = cond;
+   i->Ain.XDirect.toFastEP = toFastEP;
+   return i;
+}
+AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP,
+                                AMD64CondCode cond ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_XIndir;
+   i->Ain.XIndir.dstGA = dstGA;
+   i->Ain.XIndir.amRIP = amRIP;
+   i->Ain.XIndir.cond  = cond;
+   return i;
+}
+AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
+                                   AMD64CondCode cond, IRJumpKind jk ) {
+   AMD64Instr* i          = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                 = Ain_XAssisted;
+   i->Ain.XAssisted.dstGA = dstGA;
+   i->Ain.XAssisted.amRIP = amRIP;
+   i->Ain.XAssisted.cond  = cond;
+   i->Ain.XAssisted.jk    = jk;
+   return i;
+}
+
+AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, HReg src, HReg dst ) {
+   AMD64Instr* i      = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag             = Ain_CMov64;
+   i->Ain.CMov64.cond = cond;
+   i->Ain.CMov64.src  = src;
+   i->Ain.CMov64.dst  = dst;
+   vassert(cond != Acc_ALWAYS);
+   return i;
+}
+AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB,
+                               AMD64AMode* addr, HReg dst ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_CLoad;
+   i->Ain.CLoad.cond = cond;
+   i->Ain.CLoad.szB  = szB;
+   i->Ain.CLoad.addr = addr;
+   i->Ain.CLoad.dst  = dst;
+   vassert(cond != Acc_ALWAYS && (szB == 4 || szB == 8));
+   return i;
+}
+AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB,
+                                HReg src, AMD64AMode* addr ) {
+   AMD64Instr* i      = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag             = Ain_CStore;
+   i->Ain.CStore.cond = cond;
+   i->Ain.CStore.szB  = szB;
+   i->Ain.CStore.src  = src;
+   i->Ain.CStore.addr = addr;
+   vassert(cond != Acc_ALWAYS && (szB == 4 || szB == 8));
+   return i;
+}
+AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_MovxLQ;
+   i->Ain.MovxLQ.syned = syned;
+   i->Ain.MovxLQ.src   = src;
+   i->Ain.MovxLQ.dst   = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned,
+                                AMD64AMode* src, HReg dst ) {
+   AMD64Instr* i         = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                = Ain_LoadEX;
+   i->Ain.LoadEX.szSmall = szSmall;
+   i->Ain.LoadEX.syned   = syned;
+   i->Ain.LoadEX.src     = src;
+   i->Ain.LoadEX.dst     = dst;
+   vassert(szSmall == 1 || szSmall == 2 || szSmall == 4);
+   return i;
+}
+AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst ) {
+   AMD64Instr* i    = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag           = Ain_Store;
+   i->Ain.Store.sz  = sz;
+   i->Ain.Store.src = src;
+   i->Ain.Store.dst = dst;
+   vassert(sz == 1 || sz == 2 || sz == 4);
+   return i;
+}
+AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_Set64;
+   i->Ain.Set64.cond = cond;
+   i->Ain.Set64.dst  = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst ) {
+   AMD64Instr* i        = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag               = Ain_Bsfr64;
+   i->Ain.Bsfr64.isFwds = isFwds;
+   i->Ain.Bsfr64.src    = src;
+   i->Ain.Bsfr64.dst    = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_MFence ( void ) {
+   AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag        = Ain_MFence;
+   return i;
+}
+AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz ) {
+   AMD64Instr* i    = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag           = Ain_ACAS;
+   i->Ain.ACAS.addr = addr;
+   i->Ain.ACAS.sz   = sz;
+   vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
+   return i;
+}
+AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz ) {
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_DACAS;
+   i->Ain.DACAS.addr = addr;
+   i->Ain.DACAS.sz   = sz;
+   vassert(sz == 8 || sz == 4);
+   return i;
+}
+
+AMD64Instr* AMD64Instr_A87Free ( Int nregs )
+{
+   AMD64Instr* i        = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag               = Ain_A87Free;
+   i->Ain.A87Free.nregs = nregs;
+   vassert(nregs >= 1 && nregs <= 7);
+   return i;
+}
+AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB )
+{
+   AMD64Instr* i            = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                   = Ain_A87PushPop;
+   i->Ain.A87PushPop.addr   = addr;
+   i->Ain.A87PushPop.isPush = isPush;
+   i->Ain.A87PushPop.szB    = szB;
+   vassert(szB == 8 || szB == 4);
+   return i;
+}
+AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op )
+{
+   AMD64Instr* i     = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag            = Ain_A87FpOp;
+   i->Ain.A87FpOp.op = op;
+   return i;
+}
+AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr )
+{
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_A87LdCW;
+   i->Ain.A87LdCW.addr = addr;
+   return i;
+}
+AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr )
+{
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_A87StSW;
+   i->Ain.A87StSW.addr = addr;
+   return i;
+}
+AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* addr ) {
+   AMD64Instr* i         = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                = Ain_LdMXCSR;
+   i->Ain.LdMXCSR.addr   = addr;
+   return i;
+}
+AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst ) {
+   AMD64Instr* i         = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                = Ain_SseUComIS;
+   i->Ain.SseUComIS.sz   = toUChar(sz);
+   i->Ain.SseUComIS.srcL = srcL;
+   i->Ain.SseUComIS.srcR = srcR;
+   i->Ain.SseUComIS.dst  = dst;
+   vassert(sz == 4 || sz == 8);
+   return i;
+}
+AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_SseSI2SF;
+   i->Ain.SseSI2SF.szS = toUChar(szS);
+   i->Ain.SseSI2SF.szD = toUChar(szD);
+   i->Ain.SseSI2SF.src = src;
+   i->Ain.SseSI2SF.dst = dst;
+   vassert(szS == 4 || szS == 8);
+   vassert(szD == 4 || szD == 8);
+   return i;
+}
+AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_SseSF2SI;
+   i->Ain.SseSF2SI.szS = toUChar(szS);
+   i->Ain.SseSF2SI.szD = toUChar(szD);
+   i->Ain.SseSF2SI.src = src;
+   i->Ain.SseSF2SI.dst = dst;
+   vassert(szS == 4 || szS == 8);
+   vassert(szD == 4 || szD == 8);
+   return i;
+}
+AMD64Instr* AMD64Instr_SseSDSS   ( Bool from64, HReg src, HReg dst )
+{
+   AMD64Instr* i         = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                = Ain_SseSDSS;
+   i->Ain.SseSDSS.from64 = from64;
+   i->Ain.SseSDSS.src    = src;
+   i->Ain.SseSDSS.dst    = dst;
+   return i;
+}
+AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz, 
+                                 HReg reg, AMD64AMode* addr ) {
+   AMD64Instr* i         = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                = Ain_SseLdSt;
+   i->Ain.SseLdSt.isLoad = isLoad;
+   i->Ain.SseLdSt.sz     = toUChar(sz);
+   i->Ain.SseLdSt.reg    = reg;
+   i->Ain.SseLdSt.addr   = addr;
+   vassert(sz == 4 || sz == 8 || sz == 16);
+   return i;
+}
+AMD64Instr* AMD64Instr_SseLdzLO  ( Int sz, HReg reg, AMD64AMode* addr )
+{
+   AMD64Instr* i         = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                = Ain_SseLdzLO;
+   i->Ain.SseLdzLO.sz    = sz;
+   i->Ain.SseLdzLO.reg   = reg;
+   i->Ain.SseLdzLO.addr  = addr;
+   vassert(sz == 4 || sz == 8);
+   return i;
+}
+AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp op, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_Sse32Fx4;
+   i->Ain.Sse32Fx4.op  = op;
+   i->Ain.Sse32Fx4.src = src;
+   i->Ain.Sse32Fx4.dst = dst;
+   vassert(op != Asse_MOV);
+   return i;
+}
+AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp op, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_Sse32FLo;
+   i->Ain.Sse32FLo.op  = op;
+   i->Ain.Sse32FLo.src = src;
+   i->Ain.Sse32FLo.dst = dst;
+   vassert(op != Asse_MOV);
+   return i;
+}
+AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp op, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_Sse64Fx2;
+   i->Ain.Sse64Fx2.op  = op;
+   i->Ain.Sse64Fx2.src = src;
+   i->Ain.Sse64Fx2.dst = dst;
+   vassert(op != Asse_MOV);
+   return i;
+}
+AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp op, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_Sse64FLo;
+   i->Ain.Sse64FLo.op  = op;
+   i->Ain.Sse64FLo.src = src;
+   i->Ain.Sse64FLo.dst = dst;
+   vassert(op != Asse_MOV);
+   return i;
+}
+AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp op, HReg re, HReg rg ) {
+   AMD64Instr* i      = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag             = Ain_SseReRg;
+   i->Ain.SseReRg.op  = op;
+   i->Ain.SseReRg.src = re;
+   i->Ain.SseReRg.dst = rg;
+   return i;
+}
+AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode cond, HReg src, HReg dst ) {
+   AMD64Instr* i       = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag              = Ain_SseCMov;
+   i->Ain.SseCMov.cond = cond;
+   i->Ain.SseCMov.src  = src;
+   i->Ain.SseCMov.dst  = dst;
+   vassert(cond != Acc_ALWAYS);
+   return i;
+}
+AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) {
+   AMD64Instr* i        = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag               = Ain_SseShuf;
+   i->Ain.SseShuf.order = order;
+   i->Ain.SseShuf.src   = src;
+   i->Ain.SseShuf.dst   = dst;
+   vassert(order >= 0 && order <= 0xFF);
+   return i;
+}
+//uu AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad,
+//uu                                  HReg reg, AMD64AMode* addr ) {
+//uu    AMD64Instr* i         = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+//uu    i->tag                = Ain_AvxLdSt;
+//uu    i->Ain.AvxLdSt.isLoad = isLoad;
+//uu    i->Ain.AvxLdSt.reg    = reg;
+//uu    i->Ain.AvxLdSt.addr   = addr;
+//uu    return i;
+//uu }
+//uu AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp op, HReg re, HReg rg ) {
+//uu    AMD64Instr* i      = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+//uu    i->tag             = Ain_AvxReRg;
+//uu    i->Ain.AvxReRg.op  = op;
+//uu    i->Ain.AvxReRg.src = re;
+//uu    i->Ain.AvxReRg.dst = rg;
+//uu    return i;
+//uu }
+AMD64Instr* AMD64Instr_EvCheck ( AMD64AMode* amCounter,
+                                 AMD64AMode* amFailAddr ) {
+   AMD64Instr* i             = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag                    = Ain_EvCheck;
+   i->Ain.EvCheck.amCounter  = amCounter;
+   i->Ain.EvCheck.amFailAddr = amFailAddr;
+   return i;
+}
+AMD64Instr* AMD64Instr_ProfInc ( void ) {
+   AMD64Instr* i = LibVEX_Alloc_inline(sizeof(AMD64Instr));
+   i->tag        = Ain_ProfInc;
+   return i;
+}
+
+void ppAMD64Instr ( const AMD64Instr* i, Bool mode64 ) 
+{
+   vassert(mode64 == True);
+   switch (i->tag) {
+      case Ain_Imm64: 
+         vex_printf("movabsq $0x%llx,", i->Ain.Imm64.imm64);
+         ppHRegAMD64(i->Ain.Imm64.dst);
+         return;
+      case Ain_Alu64R:
+         vex_printf("%sq ", showAMD64AluOp(i->Ain.Alu64R.op));
+         ppAMD64RMI(i->Ain.Alu64R.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.Alu64R.dst);
+         return;
+      case Ain_Alu64M:
+         vex_printf("%sq ", showAMD64AluOp(i->Ain.Alu64M.op));
+         ppAMD64RI(i->Ain.Alu64M.src);
+         vex_printf(",");
+         ppAMD64AMode(i->Ain.Alu64M.dst);
+         return;
+      case Ain_Sh64:
+         vex_printf("%sq ", showAMD64ShiftOp(i->Ain.Sh64.op));
+         if (i->Ain.Sh64.src == 0)
+            vex_printf("%%cl,"); 
+         else 
+            vex_printf("$%d,", (Int)i->Ain.Sh64.src);
+         ppHRegAMD64(i->Ain.Sh64.dst);
+         return;
+      case Ain_Test64:
+         vex_printf("testq $%d,", (Int)i->Ain.Test64.imm32);
+         ppHRegAMD64(i->Ain.Test64.dst);
+         return;
+      case Ain_Unary64:
+         vex_printf("%sq ", showAMD64UnaryOp(i->Ain.Unary64.op));
+         ppHRegAMD64(i->Ain.Unary64.dst);
+         return;
+      case Ain_Lea64:
+         vex_printf("leaq ");
+         ppAMD64AMode(i->Ain.Lea64.am);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.Lea64.dst);
+         return;
+      case Ain_Alu32R:
+         vex_printf("%sl ", showAMD64AluOp(i->Ain.Alu32R.op));
+         ppAMD64RMI_lo32(i->Ain.Alu32R.src);
+         vex_printf(",");
+         ppHRegAMD64_lo32(i->Ain.Alu32R.dst);
+         return;
+      case Ain_MulL:
+         vex_printf("%cmulq ", i->Ain.MulL.syned ? 's' : 'u');
+         ppAMD64RM(i->Ain.MulL.src);
+         return;
+      case Ain_Div:
+         vex_printf("%cdiv%s ",
+                    i->Ain.Div.syned ? 's' : 'u',
+                    showAMD64ScalarSz(i->Ain.Div.sz));
+         ppAMD64RM(i->Ain.Div.src);
+         return;
+      case Ain_Push:
+         vex_printf("pushq ");
+         ppAMD64RMI(i->Ain.Push.src);
+         return;
+      case Ain_Call:
+         vex_printf("call%s[%d,", 
+                    i->Ain.Call.cond==Acc_ALWAYS 
+                       ? "" : showAMD64CondCode(i->Ain.Call.cond),
+                    i->Ain.Call.regparms );
+         ppRetLoc(i->Ain.Call.rloc);
+         vex_printf("] 0x%llx", i->Ain.Call.target);
+         break;
+
+      case Ain_XDirect:
+         vex_printf("(xDirect) ");
+         vex_printf("if (%%rflags.%s) { ",
+                    showAMD64CondCode(i->Ain.XDirect.cond));
+         vex_printf("movabsq $0x%llx,%%r11; ", i->Ain.XDirect.dstGA);
+         vex_printf("movq %%r11,");
+         ppAMD64AMode(i->Ain.XDirect.amRIP);
+         vex_printf("; ");
+         vex_printf("movabsq $disp_cp_chain_me_to_%sEP,%%r11; call *%%r11 }",
+                    i->Ain.XDirect.toFastEP ? "fast" : "slow");
+         return;
+      case Ain_XIndir:
+         vex_printf("(xIndir) ");
+         vex_printf("if (%%rflags.%s) { ",
+                    showAMD64CondCode(i->Ain.XIndir.cond));
+         vex_printf("movq ");
+         ppHRegAMD64(i->Ain.XIndir.dstGA);
+         vex_printf(",");
+         ppAMD64AMode(i->Ain.XIndir.amRIP);
+         vex_printf("; movabsq $disp_indir,%%r11; jmp *%%r11 }");
+         return;
+      case Ain_XAssisted:
+         vex_printf("(xAssisted) ");
+         vex_printf("if (%%rflags.%s) { ",
+                    showAMD64CondCode(i->Ain.XAssisted.cond));
+         vex_printf("movq ");
+         ppHRegAMD64(i->Ain.XAssisted.dstGA);
+         vex_printf(",");
+         ppAMD64AMode(i->Ain.XAssisted.amRIP);
+         vex_printf("; movl $IRJumpKind_to_TRCVAL(%d),%%rbp",
+                    (Int)i->Ain.XAssisted.jk);
+         vex_printf("; movabsq $disp_assisted,%%r11; jmp *%%r11 }");
+         return;
+
+      case Ain_CMov64:
+         vex_printf("cmov%s ", showAMD64CondCode(i->Ain.CMov64.cond));
+         ppHRegAMD64(i->Ain.CMov64.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.CMov64.dst);
+         return;
+      case Ain_CLoad:
+         vex_printf("if (%%rflags.%s) { ",
+                    showAMD64CondCode(i->Ain.CLoad.cond));
+         vex_printf("mov%c ", i->Ain.CLoad.szB == 4 ? 'l' : 'q');
+         ppAMD64AMode(i->Ain.CLoad.addr);
+         vex_printf(", ");
+         (i->Ain.CLoad.szB == 4 ? ppHRegAMD64_lo32 : ppHRegAMD64)
+            (i->Ain.CLoad.dst);
+         vex_printf(" }");
+         return;
+      case Ain_CStore:
+         vex_printf("if (%%rflags.%s) { ",
+                    showAMD64CondCode(i->Ain.CStore.cond));
+         vex_printf("mov%c ", i->Ain.CStore.szB == 4 ? 'l' : 'q');
+         (i->Ain.CStore.szB == 4 ? ppHRegAMD64_lo32 : ppHRegAMD64)
+            (i->Ain.CStore.src);
+         vex_printf(", ");
+         ppAMD64AMode(i->Ain.CStore.addr);
+         vex_printf(" }");
+         return;
+
+      case Ain_MovxLQ:
+         vex_printf("mov%clq ", i->Ain.MovxLQ.syned ? 's' : 'z');
+         ppHRegAMD64_lo32(i->Ain.MovxLQ.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.MovxLQ.dst);
+         return;
+      case Ain_LoadEX:
+         if (i->Ain.LoadEX.szSmall==4 && !i->Ain.LoadEX.syned) {
+            vex_printf("movl ");
+            ppAMD64AMode(i->Ain.LoadEX.src);
+            vex_printf(",");
+            ppHRegAMD64_lo32(i->Ain.LoadEX.dst);
+         } else {
+            vex_printf("mov%c%cq ",
+                       i->Ain.LoadEX.syned ? 's' : 'z',
+                       i->Ain.LoadEX.szSmall==1 
+                          ? 'b' 
+                          : (i->Ain.LoadEX.szSmall==2 ? 'w' : 'l'));
+            ppAMD64AMode(i->Ain.LoadEX.src);
+            vex_printf(",");
+            ppHRegAMD64(i->Ain.LoadEX.dst);
+         }
+         return;
+      case Ain_Store:
+         vex_printf("mov%c ", i->Ain.Store.sz==1 ? 'b' 
+                              : (i->Ain.Store.sz==2 ? 'w' : 'l'));
+         ppHRegAMD64(i->Ain.Store.src);
+         vex_printf(",");
+         ppAMD64AMode(i->Ain.Store.dst);
+         return;
+      case Ain_Set64:
+         vex_printf("setq%s ", showAMD64CondCode(i->Ain.Set64.cond));
+         ppHRegAMD64(i->Ain.Set64.dst);
+         return;
+      case Ain_Bsfr64:
+         vex_printf("bs%cq ", i->Ain.Bsfr64.isFwds ? 'f' : 'r');
+         ppHRegAMD64(i->Ain.Bsfr64.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.Bsfr64.dst);
+         return;
+      case Ain_MFence:
+         vex_printf("mfence" );
+         return;
+      case Ain_ACAS:
+         vex_printf("lock cmpxchg%c ",
+                     i->Ain.ACAS.sz==1 ? 'b' : i->Ain.ACAS.sz==2 ? 'w' 
+                     : i->Ain.ACAS.sz==4 ? 'l' : 'q' );
+         vex_printf("{%%rax->%%rbx},");
+         ppAMD64AMode(i->Ain.ACAS.addr);
+         return;
+      case Ain_DACAS:
+         vex_printf("lock cmpxchg%db {%%rdx:%%rax->%%rcx:%%rbx},",
+                    (Int)(2 * i->Ain.DACAS.sz));
+         ppAMD64AMode(i->Ain.DACAS.addr);
+         return;
+      case Ain_A87Free:
+         vex_printf("ffree %%st(7..%d)", 8 - i->Ain.A87Free.nregs );
+         break;
+      case Ain_A87PushPop:
+         vex_printf(i->Ain.A87PushPop.isPush ? "fld%c " : "fstp%c ",
+                    i->Ain.A87PushPop.szB == 4 ? 's' : 'l');
+         ppAMD64AMode(i->Ain.A87PushPop.addr);
+         break;
+      case Ain_A87FpOp:
+         vex_printf("f%s", showA87FpOp(i->Ain.A87FpOp.op));
+         break;
+      case Ain_A87LdCW:
+         vex_printf("fldcw ");
+         ppAMD64AMode(i->Ain.A87LdCW.addr);
+         break;
+      case Ain_A87StSW:
+         vex_printf("fstsw ");
+         ppAMD64AMode(i->Ain.A87StSW.addr);
+         break;
+      case Ain_LdMXCSR:
+         vex_printf("ldmxcsr ");
+         ppAMD64AMode(i->Ain.LdMXCSR.addr);
+         break;
+      case Ain_SseUComIS:
+         vex_printf("ucomis%s ", i->Ain.SseUComIS.sz==4 ? "s" : "d");
+         ppHRegAMD64(i->Ain.SseUComIS.srcL);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.SseUComIS.srcR);
+         vex_printf(" ; pushfq ; popq ");
+         ppHRegAMD64(i->Ain.SseUComIS.dst);
+         break;
+      case Ain_SseSI2SF:
+         vex_printf("cvtsi2s%s ", i->Ain.SseSI2SF.szD==4 ? "s" : "d");
+         (i->Ain.SseSI2SF.szS==4 ? ppHRegAMD64_lo32 : ppHRegAMD64)
+            (i->Ain.SseSI2SF.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.SseSI2SF.dst);
+         break;
+      case Ain_SseSF2SI:
+         vex_printf("cvts%s2si ", i->Ain.SseSF2SI.szS==4 ? "s" : "d");
+         ppHRegAMD64(i->Ain.SseSF2SI.src);
+         vex_printf(",");
+         (i->Ain.SseSF2SI.szD==4 ? ppHRegAMD64_lo32 : ppHRegAMD64)
+            (i->Ain.SseSF2SI.dst);
+         break;
+      case Ain_SseSDSS:
+         vex_printf(i->Ain.SseSDSS.from64 ? "cvtsd2ss " : "cvtss2sd ");
+         ppHRegAMD64(i->Ain.SseSDSS.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.SseSDSS.dst);
+         break;
+      case Ain_SseLdSt:
+         switch (i->Ain.SseLdSt.sz) {
+            case 4:  vex_printf("movss "); break;
+            case 8:  vex_printf("movsd "); break;
+            case 16: vex_printf("movups "); break;
+            default: vassert(0);
+         }
+         if (i->Ain.SseLdSt.isLoad) {
+            ppAMD64AMode(i->Ain.SseLdSt.addr);
+            vex_printf(",");
+            ppHRegAMD64(i->Ain.SseLdSt.reg);
+         } else {
+            ppHRegAMD64(i->Ain.SseLdSt.reg);
+            vex_printf(",");
+            ppAMD64AMode(i->Ain.SseLdSt.addr);
+         }
+         return;
+      case Ain_SseLdzLO:
+         vex_printf("movs%s ", i->Ain.SseLdzLO.sz==4 ? "s" : "d");
+         ppAMD64AMode(i->Ain.SseLdzLO.addr);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.SseLdzLO.reg);
+         return;
+      case Ain_Sse32Fx4:
+         vex_printf("%sps ", showAMD64SseOp(i->Ain.Sse32Fx4.op));
+         ppHRegAMD64(i->Ain.Sse32Fx4.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.Sse32Fx4.dst);
+         return;
+      case Ain_Sse32FLo:
+         vex_printf("%sss ", showAMD64SseOp(i->Ain.Sse32FLo.op));
+         ppHRegAMD64(i->Ain.Sse32FLo.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.Sse32FLo.dst);
+         return;
+      case Ain_Sse64Fx2:
+         vex_printf("%spd ", showAMD64SseOp(i->Ain.Sse64Fx2.op));
+         ppHRegAMD64(i->Ain.Sse64Fx2.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.Sse64Fx2.dst);
+         return;
+      case Ain_Sse64FLo:
+         vex_printf("%ssd ", showAMD64SseOp(i->Ain.Sse64FLo.op));
+         ppHRegAMD64(i->Ain.Sse64FLo.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.Sse64FLo.dst);
+         return;
+      case Ain_SseReRg:
+         vex_printf("%s ", showAMD64SseOp(i->Ain.SseReRg.op));
+         ppHRegAMD64(i->Ain.SseReRg.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.SseReRg.dst);
+         return;
+      case Ain_SseCMov:
+         vex_printf("cmov%s ", showAMD64CondCode(i->Ain.SseCMov.cond));
+         ppHRegAMD64(i->Ain.SseCMov.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.SseCMov.dst);
+         return;
+      case Ain_SseShuf:
+         vex_printf("pshufd $0x%x,", i->Ain.SseShuf.order);
+         ppHRegAMD64(i->Ain.SseShuf.src);
+         vex_printf(",");
+         ppHRegAMD64(i->Ain.SseShuf.dst);
+         return;
+      //uu case Ain_AvxLdSt:
+      //uu    vex_printf("vmovups ");
+      //uu    if (i->Ain.AvxLdSt.isLoad) {
+      //uu       ppAMD64AMode(i->Ain.AvxLdSt.addr);
+      //uu       vex_printf(",");
+      //uu       ppHRegAMD64(i->Ain.AvxLdSt.reg);
+      //uu    } else {
+      //uu       ppHRegAMD64(i->Ain.AvxLdSt.reg);
+      //uu       vex_printf(",");
+      //uu       ppAMD64AMode(i->Ain.AvxLdSt.addr);
+      //uu    }
+      //uu    return;
+      //uu case Ain_AvxReRg:
+      //uu    vex_printf("v%s ", showAMD64SseOp(i->Ain.SseReRg.op));
+      //uu    ppHRegAMD64(i->Ain.AvxReRg.src);
+      //uu    vex_printf(",");
+      //uu    ppHRegAMD64(i->Ain.AvxReRg.dst);
+      //uu    return;
+      case Ain_EvCheck:
+         vex_printf("(evCheck) decl ");
+         ppAMD64AMode(i->Ain.EvCheck.amCounter);
+         vex_printf("; jns nofail; jmp *");
+         ppAMD64AMode(i->Ain.EvCheck.amFailAddr);
+         vex_printf("; nofail:");
+         return;
+      case Ain_ProfInc:
+         vex_printf("(profInc) movabsq $NotKnownYet, %%r11; incq (%%r11)");
+         return;
+      default:
+         vpanic("ppAMD64Instr");
+   }
+}
+
+/* --------- Helpers for register allocation. --------- */
+
+void getRegUsage_AMD64Instr ( HRegUsage* u, const AMD64Instr* i, Bool mode64 )
+{
+   Bool unary;
+   vassert(mode64 == True);
+   initHRegUsage(u);
+   switch (i->tag) {
+      case Ain_Imm64:
+         addHRegUse(u, HRmWrite, i->Ain.Imm64.dst);
+         return;
+      case Ain_Alu64R:
+         addRegUsage_AMD64RMI(u, i->Ain.Alu64R.src);
+         if (i->Ain.Alu64R.op == Aalu_MOV) {
+            addHRegUse(u, HRmWrite, i->Ain.Alu64R.dst);
+            return;
+         }
+         if (i->Ain.Alu64R.op == Aalu_CMP) { 
+            addHRegUse(u, HRmRead, i->Ain.Alu64R.dst);
+            return;
+         }
+         addHRegUse(u, HRmModify, i->Ain.Alu64R.dst);
+         return;
+      case Ain_Alu64M:
+         addRegUsage_AMD64RI(u, i->Ain.Alu64M.src);
+         addRegUsage_AMD64AMode(u, i->Ain.Alu64M.dst);
+         return;
+      case Ain_Sh64:
+         addHRegUse(u, HRmModify, i->Ain.Sh64.dst);
+         if (i->Ain.Sh64.src == 0)
+            addHRegUse(u, HRmRead, hregAMD64_RCX());
+         return;
+      case Ain_Test64:
+         addHRegUse(u, HRmRead, i->Ain.Test64.dst);
+         return;
+      case Ain_Unary64:
+         addHRegUse(u, HRmModify, i->Ain.Unary64.dst);
+         return;
+      case Ain_Lea64:
+         addRegUsage_AMD64AMode(u, i->Ain.Lea64.am);
+         addHRegUse(u, HRmWrite, i->Ain.Lea64.dst);
+         return;
+      case Ain_Alu32R:
+         vassert(i->Ain.Alu32R.op != Aalu_MOV);
+         addRegUsage_AMD64RMI(u, i->Ain.Alu32R.src);
+         if (i->Ain.Alu32R.op == Aalu_CMP) { 
+            addHRegUse(u, HRmRead, i->Ain.Alu32R.dst);
+            return;
+         }
+         addHRegUse(u, HRmModify, i->Ain.Alu32R.dst);
+         return;
+      case Ain_MulL:
+         addRegUsage_AMD64RM(u, i->Ain.MulL.src, HRmRead);
+         addHRegUse(u, HRmModify, hregAMD64_RAX());
+         addHRegUse(u, HRmWrite, hregAMD64_RDX());
+         return;
+      case Ain_Div:
+         addRegUsage_AMD64RM(u, i->Ain.Div.src, HRmRead);
+         addHRegUse(u, HRmModify, hregAMD64_RAX());
+         addHRegUse(u, HRmModify, hregAMD64_RDX());
+         return;
+      case Ain_Push:
+         addRegUsage_AMD64RMI(u, i->Ain.Push.src);
+         addHRegUse(u, HRmModify, hregAMD64_RSP());
+         return;
+      case Ain_Call:
+         /* This is a bit subtle. */
+         /* First off, claim it trashes all the caller-saved regs
+            which fall within the register allocator's jurisdiction.
+            These I believe to be: rax rcx rdx rsi rdi r8 r9 r10 r11 
+            and all the xmm registers.
+         */
+         addHRegUse(u, HRmWrite, hregAMD64_RAX());
+         addHRegUse(u, HRmWrite, hregAMD64_RCX());
+         addHRegUse(u, HRmWrite, hregAMD64_RDX());
+         addHRegUse(u, HRmWrite, hregAMD64_RSI());
+         addHRegUse(u, HRmWrite, hregAMD64_RDI());
+         addHRegUse(u, HRmWrite, hregAMD64_R8());
+         addHRegUse(u, HRmWrite, hregAMD64_R9());
+         addHRegUse(u, HRmWrite, hregAMD64_R10());
+         addHRegUse(u, HRmWrite, hregAMD64_R11());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM0());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM1());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM3());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM4());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM5());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM6());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM7());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM8());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM9());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM10());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM11());
+         addHRegUse(u, HRmWrite, hregAMD64_XMM12());
+
+         /* Now we have to state any parameter-carrying registers
+            which might be read.  This depends on the regparmness. */
+         switch (i->Ain.Call.regparms) {
+            case 6: addHRegUse(u, HRmRead, hregAMD64_R9());  /*fallthru*/
+            case 5: addHRegUse(u, HRmRead, hregAMD64_R8());  /*fallthru*/
+            case 4: addHRegUse(u, HRmRead, hregAMD64_RCX()); /*fallthru*/
+            case 3: addHRegUse(u, HRmRead, hregAMD64_RDX()); /*fallthru*/
+            case 2: addHRegUse(u, HRmRead, hregAMD64_RSI()); /*fallthru*/
+            case 1: addHRegUse(u, HRmRead, hregAMD64_RDI()); break;
+            case 0: break;
+            default: vpanic("getRegUsage_AMD64Instr:Call:regparms");
+         }
+         /* Finally, there is the issue that the insn trashes a
+            register because the literal target address has to be
+            loaded into a register.  Fortunately, r11 is stated in the
+            ABI as a scratch register, and so seems a suitable victim.  */
+         addHRegUse(u, HRmWrite, hregAMD64_R11());
+         /* Upshot of this is that the assembler really must use r11,
+            and no other, as a destination temporary. */
+         return;
+      /* XDirect/XIndir/XAssisted are also a bit subtle.  They
+         conditionally exit the block.  Hence we only need to list (1)
+         the registers that they read, and (2) the registers that they
+         write in the case where the block is not exited.  (2) is
+         empty, hence only (1) is relevant here. */
+      case Ain_XDirect:
+         /* Don't bother to mention the write to %r11, since it is not
+            available to the allocator. */
+         addRegUsage_AMD64AMode(u, i->Ain.XDirect.amRIP);
+         return;
+      case Ain_XIndir:
+         /* Ditto re %r11 */
+         addHRegUse(u, HRmRead, i->Ain.XIndir.dstGA);
+         addRegUsage_AMD64AMode(u, i->Ain.XIndir.amRIP);
+         return;
+      case Ain_XAssisted:
+         /* Ditto re %r11 and %rbp (the baseblock ptr) */
+         addHRegUse(u, HRmRead, i->Ain.XAssisted.dstGA);
+         addRegUsage_AMD64AMode(u, i->Ain.XAssisted.amRIP);
+         return;
+      case Ain_CMov64:
+         addHRegUse(u, HRmRead,   i->Ain.CMov64.src);
+         addHRegUse(u, HRmModify, i->Ain.CMov64.dst);
+         return;
+      case Ain_CLoad:
+         addRegUsage_AMD64AMode(u, i->Ain.CLoad.addr);
+         addHRegUse(u, HRmModify, i->Ain.CLoad.dst);
+         return;
+      case Ain_CStore:
+         addRegUsage_AMD64AMode(u, i->Ain.CStore.addr);
+         addHRegUse(u, HRmRead, i->Ain.CStore.src);
+         return;
+      case Ain_MovxLQ:
+         addHRegUse(u, HRmRead,  i->Ain.MovxLQ.src);
+         addHRegUse(u, HRmWrite, i->Ain.MovxLQ.dst);
+         return;
+      case Ain_LoadEX:
+         addRegUsage_AMD64AMode(u, i->Ain.LoadEX.src);
+         addHRegUse(u, HRmWrite, i->Ain.LoadEX.dst);
+         return;
+      case Ain_Store:
+         addHRegUse(u, HRmRead, i->Ain.Store.src);
+         addRegUsage_AMD64AMode(u, i->Ain.Store.dst);
+         return;
+      case Ain_Set64:
+         addHRegUse(u, HRmWrite, i->Ain.Set64.dst);
+         return;
+      case Ain_Bsfr64:
+         addHRegUse(u, HRmRead, i->Ain.Bsfr64.src);
+         addHRegUse(u, HRmWrite, i->Ain.Bsfr64.dst);
+         return;
+      case Ain_MFence:
+         return;
+      case Ain_ACAS:
+         addRegUsage_AMD64AMode(u, i->Ain.ACAS.addr);
+         addHRegUse(u, HRmRead, hregAMD64_RBX());
+         addHRegUse(u, HRmModify, hregAMD64_RAX());
+         return;
+      case Ain_DACAS:
+         addRegUsage_AMD64AMode(u, i->Ain.DACAS.addr);
+         addHRegUse(u, HRmRead, hregAMD64_RCX());
+         addHRegUse(u, HRmRead, hregAMD64_RBX());
+         addHRegUse(u, HRmModify, hregAMD64_RDX());
+         addHRegUse(u, HRmModify, hregAMD64_RAX());
+         return;
+      case Ain_A87Free:
+         return;
+      case Ain_A87PushPop:
+         addRegUsage_AMD64AMode(u, i->Ain.A87PushPop.addr);
+         return;
+      case Ain_A87FpOp:
+         return;
+      case Ain_A87LdCW:
+         addRegUsage_AMD64AMode(u, i->Ain.A87LdCW.addr);
+         return;
+      case Ain_A87StSW:
+         addRegUsage_AMD64AMode(u, i->Ain.A87StSW.addr);
+         return;
+      case Ain_LdMXCSR:
+         addRegUsage_AMD64AMode(u, i->Ain.LdMXCSR.addr);
+         return;
+      case Ain_SseUComIS:
+         addHRegUse(u, HRmRead,  i->Ain.SseUComIS.srcL);
+         addHRegUse(u, HRmRead,  i->Ain.SseUComIS.srcR);
+         addHRegUse(u, HRmWrite, i->Ain.SseUComIS.dst);
+         return;
+      case Ain_SseSI2SF:
+         addHRegUse(u, HRmRead,  i->Ain.SseSI2SF.src);
+         addHRegUse(u, HRmWrite, i->Ain.SseSI2SF.dst);
+         return;
+      case Ain_SseSF2SI:
+         addHRegUse(u, HRmRead,  i->Ain.SseSF2SI.src);
+         addHRegUse(u, HRmWrite, i->Ain.SseSF2SI.dst);
+         return;
+      case Ain_SseSDSS:
+         addHRegUse(u, HRmRead,  i->Ain.SseSDSS.src);
+         addHRegUse(u, HRmWrite, i->Ain.SseSDSS.dst);
+         return;
+      case Ain_SseLdSt:
+         addRegUsage_AMD64AMode(u, i->Ain.SseLdSt.addr);
+         addHRegUse(u, i->Ain.SseLdSt.isLoad ? HRmWrite : HRmRead,
+                       i->Ain.SseLdSt.reg);
+         return;
+      case Ain_SseLdzLO:
+         addRegUsage_AMD64AMode(u, i->Ain.SseLdzLO.addr);
+         addHRegUse(u, HRmWrite, i->Ain.SseLdzLO.reg);
+         return;
+      case Ain_Sse32Fx4:
+         vassert(i->Ain.Sse32Fx4.op != Asse_MOV);
+         unary = toBool( i->Ain.Sse32Fx4.op == Asse_RCPF
+                         || i->Ain.Sse32Fx4.op == Asse_RSQRTF
+                         || i->Ain.Sse32Fx4.op == Asse_SQRTF );
+         addHRegUse(u, HRmRead, i->Ain.Sse32Fx4.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Ain.Sse32Fx4.dst);
+         return;
+      case Ain_Sse32FLo:
+         vassert(i->Ain.Sse32FLo.op != Asse_MOV);
+         unary = toBool( i->Ain.Sse32FLo.op == Asse_RCPF
+                         || i->Ain.Sse32FLo.op == Asse_RSQRTF
+                         || i->Ain.Sse32FLo.op == Asse_SQRTF );
+         addHRegUse(u, HRmRead, i->Ain.Sse32FLo.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Ain.Sse32FLo.dst);
+         return;
+      case Ain_Sse64Fx2:
+         vassert(i->Ain.Sse64Fx2.op != Asse_MOV);
+         unary = toBool( i->Ain.Sse64Fx2.op == Asse_RCPF
+                         || i->Ain.Sse64Fx2.op == Asse_RSQRTF
+                         || i->Ain.Sse64Fx2.op == Asse_SQRTF );
+         addHRegUse(u, HRmRead, i->Ain.Sse64Fx2.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Ain.Sse64Fx2.dst);
+         return;
+      case Ain_Sse64FLo:
+         vassert(i->Ain.Sse64FLo.op != Asse_MOV);
+         unary = toBool( i->Ain.Sse64FLo.op == Asse_RCPF
+                         || i->Ain.Sse64FLo.op == Asse_RSQRTF
+                         || i->Ain.Sse64FLo.op == Asse_SQRTF );
+         addHRegUse(u, HRmRead, i->Ain.Sse64FLo.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Ain.Sse64FLo.dst);
+         return;
+      case Ain_SseReRg:
+         if ( (i->Ain.SseReRg.op == Asse_XOR
+               || i->Ain.SseReRg.op == Asse_CMPEQ32)
+              && sameHReg(i->Ain.SseReRg.src, i->Ain.SseReRg.dst)) {
+            /* reg-alloc needs to understand 'xor r,r' and 'cmpeqd
+               r,r' as a write of a value to r, and independent of any
+               previous value in r */
+            /* (as opposed to a rite of passage :-) */
+            addHRegUse(u, HRmWrite, i->Ain.SseReRg.dst);
+         } else {
+            addHRegUse(u, HRmRead, i->Ain.SseReRg.src);
+            addHRegUse(u, i->Ain.SseReRg.op == Asse_MOV 
+                             ? HRmWrite : HRmModify, 
+                          i->Ain.SseReRg.dst);
+         }
+         return;
+      case Ain_SseCMov:
+         addHRegUse(u, HRmRead,   i->Ain.SseCMov.src);
+         addHRegUse(u, HRmModify, i->Ain.SseCMov.dst);
+         return;
+      case Ain_SseShuf:
+         addHRegUse(u, HRmRead,  i->Ain.SseShuf.src);
+         addHRegUse(u, HRmWrite, i->Ain.SseShuf.dst);
+         return;
+      //uu case Ain_AvxLdSt:
+      //uu addRegUsage_AMD64AMode(u, i->Ain.AvxLdSt.addr);
+      //uu addHRegUse(u, i->Ain.AvxLdSt.isLoad ? HRmWrite : HRmRead,
+      //uu               i->Ain.AvxLdSt.reg);
+      //uu return;
+      //uu case Ain_AvxReRg:
+      //uu    if ( (i->Ain.AvxReRg.op == Asse_XOR
+      //uu          || i->Ain.AvxReRg.op == Asse_CMPEQ32)
+      //uu         && i->Ain.AvxReRg.src == i->Ain.AvxReRg.dst) {
+      //uu       /* See comments on the case for Ain_SseReRg. */
+      //uu       addHRegUse(u, HRmWrite, i->Ain.AvxReRg.dst);
+      //uu    } else {
+      //uu       addHRegUse(u, HRmRead, i->Ain.AvxReRg.src);
+      //uu       addHRegUse(u, i->Ain.AvxReRg.op == Asse_MOV 
+      //uu                        ? HRmWrite : HRmModify, 
+      //uu                     i->Ain.AvxReRg.dst);
+      //uu    }
+      //uu    return;
+      case Ain_EvCheck:
+         /* We expect both amodes only to mention %rbp, so this is in
+            fact pointless, since %rbp isn't allocatable, but anyway.. */
+         addRegUsage_AMD64AMode(u, i->Ain.EvCheck.amCounter);
+         addRegUsage_AMD64AMode(u, i->Ain.EvCheck.amFailAddr);
+         return;
+      case Ain_ProfInc:
+         addHRegUse(u, HRmWrite, hregAMD64_R11());
+         return;
+      default:
+         ppAMD64Instr(i, mode64);
+         vpanic("getRegUsage_AMD64Instr");
+   }
+}
+
+/* local helper */
+static inline void mapReg(HRegRemap* m, HReg* r)
+{
+   *r = lookupHRegRemap(m, *r);
+}
+
+void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i, Bool mode64 )
+{
+   vassert(mode64 == True);
+   switch (i->tag) {
+      case Ain_Imm64:
+         mapReg(m, &i->Ain.Imm64.dst);
+         return;
+      case Ain_Alu64R:
+         mapRegs_AMD64RMI(m, i->Ain.Alu64R.src);
+         mapReg(m, &i->Ain.Alu64R.dst);
+         return;
+      case Ain_Alu64M:
+         mapRegs_AMD64RI(m, i->Ain.Alu64M.src);
+         mapRegs_AMD64AMode(m, i->Ain.Alu64M.dst);
+         return;
+      case Ain_Sh64:
+         mapReg(m, &i->Ain.Sh64.dst);
+         return;
+      case Ain_Test64:
+         mapReg(m, &i->Ain.Test64.dst);
+         return;
+      case Ain_Unary64:
+         mapReg(m, &i->Ain.Unary64.dst);
+         return;
+      case Ain_Lea64:
+         mapRegs_AMD64AMode(m, i->Ain.Lea64.am);
+         mapReg(m, &i->Ain.Lea64.dst);
+         return;
+      case Ain_Alu32R:
+         mapRegs_AMD64RMI(m, i->Ain.Alu32R.src);
+         mapReg(m, &i->Ain.Alu32R.dst);
+         return;
+      case Ain_MulL:
+         mapRegs_AMD64RM(m, i->Ain.MulL.src);
+         return;
+      case Ain_Div:
+         mapRegs_AMD64RM(m, i->Ain.Div.src);
+         return;
+      case Ain_Push:
+         mapRegs_AMD64RMI(m, i->Ain.Push.src);
+         return;
+      case Ain_Call:
+         return;
+      case Ain_XDirect:
+         mapRegs_AMD64AMode(m, i->Ain.XDirect.amRIP);
+         return;
+      case Ain_XIndir:
+         mapReg(m, &i->Ain.XIndir.dstGA);
+         mapRegs_AMD64AMode(m, i->Ain.XIndir.amRIP);
+         return;
+      case Ain_XAssisted:
+         mapReg(m, &i->Ain.XAssisted.dstGA);
+         mapRegs_AMD64AMode(m, i->Ain.XAssisted.amRIP);
+         return;
+      case Ain_CMov64:
+         mapReg(m, &i->Ain.CMov64.src);
+         mapReg(m, &i->Ain.CMov64.dst);
+         return;
+      case Ain_CLoad:
+         mapRegs_AMD64AMode(m, i->Ain.CLoad.addr);
+         mapReg(m, &i->Ain.CLoad.dst);
+         return;
+      case Ain_CStore:
+         mapRegs_AMD64AMode(m, i->Ain.CStore.addr);
+         mapReg(m, &i->Ain.CStore.src);
+         return;
+      case Ain_MovxLQ:
+         mapReg(m, &i->Ain.MovxLQ.src);
+         mapReg(m, &i->Ain.MovxLQ.dst);
+         return;
+      case Ain_LoadEX:
+         mapRegs_AMD64AMode(m, i->Ain.LoadEX.src);
+         mapReg(m, &i->Ain.LoadEX.dst);
+         return;
+      case Ain_Store:
+         mapReg(m, &i->Ain.Store.src);
+         mapRegs_AMD64AMode(m, i->Ain.Store.dst);
+         return;
+      case Ain_Set64:
+         mapReg(m, &i->Ain.Set64.dst);
+         return;
+      case Ain_Bsfr64:
+         mapReg(m, &i->Ain.Bsfr64.src);
+         mapReg(m, &i->Ain.Bsfr64.dst);
+         return;
+      case Ain_MFence:
+         return;
+      case Ain_ACAS:
+         mapRegs_AMD64AMode(m, i->Ain.ACAS.addr);
+         return;
+      case Ain_DACAS:
+         mapRegs_AMD64AMode(m, i->Ain.DACAS.addr);
+         return;
+      case Ain_A87Free:
+         return;
+      case Ain_A87PushPop:
+         mapRegs_AMD64AMode(m, i->Ain.A87PushPop.addr);
+         return;
+      case Ain_A87FpOp:
+         return;
+      case Ain_A87LdCW:
+         mapRegs_AMD64AMode(m, i->Ain.A87LdCW.addr);
+         return;
+      case Ain_A87StSW:
+         mapRegs_AMD64AMode(m, i->Ain.A87StSW.addr);
+         return;
+      case Ain_LdMXCSR:
+         mapRegs_AMD64AMode(m, i->Ain.LdMXCSR.addr);
+         return;
+      case Ain_SseUComIS:
+         mapReg(m, &i->Ain.SseUComIS.srcL);
+         mapReg(m, &i->Ain.SseUComIS.srcR);
+         mapReg(m, &i->Ain.SseUComIS.dst);
+         return;
+      case Ain_SseSI2SF:
+         mapReg(m, &i->Ain.SseSI2SF.src);
+         mapReg(m, &i->Ain.SseSI2SF.dst);
+         return;
+      case Ain_SseSF2SI:
+         mapReg(m, &i->Ain.SseSF2SI.src);
+         mapReg(m, &i->Ain.SseSF2SI.dst);
+         return;
+      case Ain_SseSDSS:
+         mapReg(m, &i->Ain.SseSDSS.src);
+         mapReg(m, &i->Ain.SseSDSS.dst);
+         return;
+      case Ain_SseLdSt:
+         mapReg(m, &i->Ain.SseLdSt.reg);
+         mapRegs_AMD64AMode(m, i->Ain.SseLdSt.addr);
+         break;
+      case Ain_SseLdzLO:
+         mapReg(m, &i->Ain.SseLdzLO.reg);
+         mapRegs_AMD64AMode(m, i->Ain.SseLdzLO.addr);
+         break;
+      case Ain_Sse32Fx4:
+         mapReg(m, &i->Ain.Sse32Fx4.src);
+         mapReg(m, &i->Ain.Sse32Fx4.dst);
+         return;
+      case Ain_Sse32FLo:
+         mapReg(m, &i->Ain.Sse32FLo.src);
+         mapReg(m, &i->Ain.Sse32FLo.dst);
+         return;
+      case Ain_Sse64Fx2:
+         mapReg(m, &i->Ain.Sse64Fx2.src);
+         mapReg(m, &i->Ain.Sse64Fx2.dst);
+         return;
+      case Ain_Sse64FLo:
+         mapReg(m, &i->Ain.Sse64FLo.src);
+         mapReg(m, &i->Ain.Sse64FLo.dst);
+         return;
+      case Ain_SseReRg:
+         mapReg(m, &i->Ain.SseReRg.src);
+         mapReg(m, &i->Ain.SseReRg.dst);
+         return;
+      case Ain_SseCMov:
+         mapReg(m, &i->Ain.SseCMov.src);
+         mapReg(m, &i->Ain.SseCMov.dst);
+         return;
+      case Ain_SseShuf:
+         mapReg(m, &i->Ain.SseShuf.src);
+         mapReg(m, &i->Ain.SseShuf.dst);
+         return;
+      //uu case Ain_AvxLdSt:
+      //uu    mapReg(m, &i->Ain.AvxLdSt.reg);
+      //uu    mapRegs_AMD64AMode(m, i->Ain.AvxLdSt.addr);
+      //uu    break;
+      //uu case Ain_AvxReRg:
+      //uu    mapReg(m, &i->Ain.AvxReRg.src);
+      //uu    mapReg(m, &i->Ain.AvxReRg.dst);
+      //uu    return;
+      case Ain_EvCheck:
+         /* We expect both amodes only to mention %rbp, so this is in
+            fact pointless, since %rbp isn't allocatable, but anyway.. */
+         mapRegs_AMD64AMode(m, i->Ain.EvCheck.amCounter);
+         mapRegs_AMD64AMode(m, i->Ain.EvCheck.amFailAddr);
+         return;
+      case Ain_ProfInc:
+         /* hardwires r11 -- nothing to modify. */
+         return;
+      default:
+         ppAMD64Instr(i, mode64);
+         vpanic("mapRegs_AMD64Instr");
+   }
+}
+
+/* Figure out if i represents a reg-reg move, and if so assign the
+   source and destination to *src and *dst.  If in doubt say No.  Used
+   by the register allocator to do move coalescing. 
+*/
+Bool isMove_AMD64Instr ( const AMD64Instr* i, HReg* src, HReg* dst )
+{
+   switch (i->tag) {
+      case Ain_Alu64R:
+         /* Moves between integer regs */
+         if (i->Ain.Alu64R.op != Aalu_MOV)
+            return False;
+         if (i->Ain.Alu64R.src->tag != Armi_Reg)
+            return False;
+         *src = i->Ain.Alu64R.src->Armi.Reg.reg;
+         *dst = i->Ain.Alu64R.dst;
+         return True;
+      case Ain_SseReRg:
+         /* Moves between SSE regs */
+         if (i->Ain.SseReRg.op != Asse_MOV)
+            return False;
+         *src = i->Ain.SseReRg.src;
+         *dst = i->Ain.SseReRg.dst;
+         return True;
+      //uu case Ain_AvxReRg:
+      //uu    /* Moves between AVX regs */
+      //uu    if (i->Ain.AvxReRg.op != Asse_MOV)
+      //uu       return False;
+      //uu    *src = i->Ain.AvxReRg.src;
+      //uu    *dst = i->Ain.AvxReRg.dst;
+      //uu    return True;
+      default:
+         return False;
+   }
+   /*NOTREACHED*/
+}
+
+
+/* Generate amd64 spill/reload instructions under the direction of the
+   register allocator.  Note it's critical these don't write the
+   condition codes. */
+
+void genSpill_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                      HReg rreg, Int offsetB, Bool mode64 )
+{
+   AMD64AMode* am;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == True);
+   *i1 = *i2 = NULL;
+   am = AMD64AMode_IR(offsetB, hregAMD64_RBP());
+   switch (hregClass(rreg)) {
+      case HRcInt64:
+         *i1 = AMD64Instr_Alu64M ( Aalu_MOV, AMD64RI_Reg(rreg), am );
+         return;
+      case HRcVec128:
+         *i1 = AMD64Instr_SseLdSt ( False/*store*/, 16, rreg, am );
+         return;
+      default: 
+         ppHRegClass(hregClass(rreg));
+         vpanic("genSpill_AMD64: unimplemented regclass");
+   }
+}
+
+void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                       HReg rreg, Int offsetB, Bool mode64 )
+{
+   AMD64AMode* am;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == True);
+   *i1 = *i2 = NULL;
+   am = AMD64AMode_IR(offsetB, hregAMD64_RBP());
+   switch (hregClass(rreg)) {
+      case HRcInt64:
+         *i1 = AMD64Instr_Alu64R ( Aalu_MOV, AMD64RMI_Mem(am), rreg );
+         return;
+      case HRcVec128:
+         *i1 = AMD64Instr_SseLdSt ( True/*load*/, 16, rreg, am );
+         return;
+      default: 
+         ppHRegClass(hregClass(rreg));
+         vpanic("genReload_AMD64: unimplemented regclass");
+   }
+}
+
+
+/* --------- The amd64 assembler (bleh.) --------- */
+
+/* Produce the low three bits of an integer register number. */
+inline static UInt iregEnc210 ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcInt64);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 15);
+   return n & 7;
+}
+
+/* Produce bit 3 of an integer register number. */
+inline static UInt iregEnc3 ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcInt64);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 15);
+   return (n >> 3) & 1;
+}
+
+/* Produce a complete 4-bit integer register number. */
+inline static UInt iregEnc3210 ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcInt64);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 15);
+   return n;
+}
+
+/* Produce a complete 4-bit integer register number. */
+inline static UInt vregEnc3210 ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcVec128);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 15);
+   return n;
+}
+
+inline static UChar mkModRegRM ( UInt mod, UInt reg, UInt regmem )
+{
+   vassert(mod < 4);
+   vassert((reg|regmem) < 8);
+   return (UChar)( ((mod & 3) << 6) | ((reg & 7) << 3) | (regmem & 7) );
+}
+
+inline static UChar mkSIB ( UInt shift, UInt regindex, UInt regbase )
+{
+   vassert(shift < 4);
+   vassert((regindex|regbase) < 8);
+   return (UChar)( ((shift & 3) << 6) | ((regindex & 7) << 3) | (regbase & 7) );
+}
+
+static UChar* emit32 ( UChar* p, UInt w32 )
+{
+   *p++ = toUChar((w32)       & 0x000000FF);
+   *p++ = toUChar((w32 >>  8) & 0x000000FF);
+   *p++ = toUChar((w32 >> 16) & 0x000000FF);
+   *p++ = toUChar((w32 >> 24) & 0x000000FF);
+   return p;
+}
+
+static UChar* emit64 ( UChar* p, ULong w64 )
+{
+   p = emit32(p, toUInt(w64         & 0xFFFFFFFF));
+   p = emit32(p, toUInt((w64 >> 32) & 0xFFFFFFFF));
+   return p;
+}
+
+/* Does a sign-extend of the lowest 8 bits give 
+   the original number? */
+static Bool fits8bits ( UInt w32 )
+{
+   Int i32 = (Int)w32;
+   return toBool(i32 == ((Int)(w32 << 24) >> 24));
+}
+/* Can the lower 32 bits be signedly widened to produce the whole
+   64-bit value?  In other words, are the top 33 bits either all 0 or
+   all 1 ? */
+static Bool fitsIn32Bits ( ULong x )
+{
+   Long y1;
+   y1 = x << 32;
+   y1 >>=/*s*/ 32;
+   return toBool(x == y1);
+}
+
+
+/* Forming mod-reg-rm bytes and scale-index-base bytes.
+
+     greg,  0(ereg)    |  ereg is not any of: RSP RBP R12 R13
+                       =  00 greg ereg
+
+     greg,  d8(ereg)   |  ereg is neither of: RSP R12
+                       =  01 greg ereg, d8
+
+     greg,  d32(ereg)  |  ereg is neither of: RSP R12
+                       =  10 greg ereg, d32
+
+     greg,  d8(ereg)   |  ereg is either: RSP R12
+                       =  01 greg 100, 0x24, d8
+                       (lowest bit of rex distinguishes R12/RSP)
+
+     greg,  d32(ereg)  |  ereg is either: RSP R12
+                       =  10 greg 100, 0x24, d32
+                       (lowest bit of rex distinguishes R12/RSP)
+
+     -----------------------------------------------
+
+     greg,  d8(base,index,scale)  
+               |  index != RSP
+               =  01 greg 100, scale index base, d8
+
+     greg,  d32(base,index,scale)
+               |  index != RSP
+               =  10 greg 100, scale index base, d32
+*/
+static UChar* doAMode_M__wrk ( UChar* p, UInt gregEnc3210, AMD64AMode* am ) 
+{
+   UInt gregEnc210 = gregEnc3210 & 7;
+   if (am->tag == Aam_IR) {
+      if (am->Aam.IR.imm == 0 
+          && ! sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+          && ! sameHReg(am->Aam.IR.reg, hregAMD64_RBP())
+          && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12())
+          && ! sameHReg(am->Aam.IR.reg, hregAMD64_R13())
+         ) {
+         *p++ = mkModRegRM(0, gregEnc210, iregEnc210(am->Aam.IR.reg));
+         return p;
+      }
+      if (fits8bits(am->Aam.IR.imm)
+          && ! sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+          && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12())
+         ) {
+         *p++ = mkModRegRM(1, gregEnc210, iregEnc210(am->Aam.IR.reg));
+         *p++ = toUChar(am->Aam.IR.imm & 0xFF);
+         return p;
+      }
+      if (! sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+          && ! sameHReg(am->Aam.IR.reg, hregAMD64_R12())
+         ) {
+         *p++ = mkModRegRM(2, gregEnc210, iregEnc210(am->Aam.IR.reg));
+         p = emit32(p, am->Aam.IR.imm);
+         return p;
+      }
+      if ((sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+           || sameHReg(am->Aam.IR.reg, hregAMD64_R12()))
+          && fits8bits(am->Aam.IR.imm)) {
+ 	 *p++ = mkModRegRM(1, gregEnc210, 4);
+         *p++ = 0x24;
+         *p++ = toUChar(am->Aam.IR.imm & 0xFF);
+         return p;
+      }
+      if (/* (sameHReg(am->Aam.IR.reg, hregAMD64_RSP())
+	      || wait for test case for RSP case */
+          sameHReg(am->Aam.IR.reg, hregAMD64_R12())) {
+ 	 *p++ = mkModRegRM(2, gregEnc210, 4);
+         *p++ = 0x24;
+         p = emit32(p, am->Aam.IR.imm);
+         return p;
+      }
+      ppAMD64AMode(am);
+      vpanic("doAMode_M: can't emit amode IR");
+      /*NOTREACHED*/
+   }
+   if (am->tag == Aam_IRRS) {
+      if (fits8bits(am->Aam.IRRS.imm)
+          && ! sameHReg(am->Aam.IRRS.index, hregAMD64_RSP())) {
+         *p++ = mkModRegRM(1, gregEnc210, 4);
+         *p++ = mkSIB(am->Aam.IRRS.shift, iregEnc210(am->Aam.IRRS.index),
+                                          iregEnc210(am->Aam.IRRS.base));
+         *p++ = toUChar(am->Aam.IRRS.imm & 0xFF);
+         return p;
+      }
+      if (! sameHReg(am->Aam.IRRS.index, hregAMD64_RSP())) {
+         *p++ = mkModRegRM(2, gregEnc210, 4);
+         *p++ = mkSIB(am->Aam.IRRS.shift, iregEnc210(am->Aam.IRRS.index),
+                                          iregEnc210(am->Aam.IRRS.base));
+         p = emit32(p, am->Aam.IRRS.imm);
+         return p;
+      }
+      ppAMD64AMode(am);
+      vpanic("doAMode_M: can't emit amode IRRS");
+      /*NOTREACHED*/
+   }
+   vpanic("doAMode_M: unknown amode");
+   /*NOTREACHED*/
+}
+
+static UChar* doAMode_M ( UChar* p, HReg greg, AMD64AMode* am )
+{
+   return doAMode_M__wrk(p, iregEnc3210(greg), am);
+}
+
+static UChar* doAMode_M_enc ( UChar* p, UInt gregEnc3210, AMD64AMode* am )
+{
+   vassert(gregEnc3210 < 16);
+   return doAMode_M__wrk(p, gregEnc3210, am);
+}
+
+
+/* Emit a mod-reg-rm byte when the rm bit denotes a reg. */
+inline
+static UChar* doAMode_R__wrk ( UChar* p, UInt gregEnc3210, UInt eregEnc3210 ) 
+{
+   *p++ = mkModRegRM(3, gregEnc3210 & 7, eregEnc3210 & 7);
+   return p;
+}
+
+static UChar* doAMode_R ( UChar* p, HReg greg, HReg ereg )
+{
+   return doAMode_R__wrk(p, iregEnc3210(greg), iregEnc3210(ereg));
+}
+
+static UChar* doAMode_R_enc_reg ( UChar* p, UInt gregEnc3210, HReg ereg )
+{
+   vassert(gregEnc3210 < 16);
+   return doAMode_R__wrk(p, gregEnc3210, iregEnc3210(ereg));
+}
+
+static UChar* doAMode_R_reg_enc ( UChar* p, HReg greg, UInt eregEnc3210 )
+{
+   vassert(eregEnc3210 < 16);
+   return doAMode_R__wrk(p, iregEnc3210(greg), eregEnc3210);
+}
+
+static UChar* doAMode_R_enc_enc ( UChar* p, UInt gregEnc3210, UInt eregEnc3210 )
+{
+   vassert( (gregEnc3210|eregEnc3210) < 16);
+   return doAMode_R__wrk(p, gregEnc3210, eregEnc3210);
+}
+
+
+/* Clear the W bit on a REX byte, thereby changing the operand size
+   back to whatever that instruction's default operand size is. */
+static inline UChar clearWBit ( UChar rex )
+{
+   return rex & ~(1<<3);
+}
+
+
+/* Make up a REX byte, with W=1 (size=64), for a (greg,amode) pair. */
+inline static UChar rexAMode_M__wrk ( UInt gregEnc3210, AMD64AMode* am )
+{
+   if (am->tag == Aam_IR) {
+      UChar W = 1;  /* we want 64-bit mode */
+      UChar R = (gregEnc3210 >> 3) & 1;
+      UChar X = 0; /* not relevant */
+      UChar B = iregEnc3(am->Aam.IR.reg);
+      return 0x40 + ((W << 3) | (R << 2) | (X << 1) | (B << 0));
+   }
+   if (am->tag == Aam_IRRS) {
+      UChar W = 1;  /* we want 64-bit mode */
+      UChar R = (gregEnc3210 >> 3) & 1;
+      UChar X = iregEnc3(am->Aam.IRRS.index);
+      UChar B = iregEnc3(am->Aam.IRRS.base);
+      return 0x40 + ((W << 3) | (R << 2) | (X << 1) | (B << 0));
+   }
+   vassert(0);
+   return 0; /*NOTREACHED*/
+}
+
+static UChar rexAMode_M ( HReg greg, AMD64AMode* am )
+{
+   return rexAMode_M__wrk(iregEnc3210(greg), am);
+}
+
+static UChar rexAMode_M_enc ( UInt gregEnc3210, AMD64AMode* am )
+{
+   vassert(gregEnc3210 < 16);
+   return rexAMode_M__wrk(gregEnc3210, am);
+}
+
+
+/* Make up a REX byte, with W=1 (size=64), for a (greg,ereg) pair. */
+inline static UChar rexAMode_R__wrk ( UInt gregEnc3210, UInt eregEnc3210 )
+{
+   UChar W = 1;  /* we want 64-bit mode */
+   UChar R = (gregEnc3210 >> 3) & 1;
+   UChar X = 0; /* not relevant */
+   UChar B = (eregEnc3210 >> 3) & 1;
+   return 0x40 + ((W << 3) | (R << 2) | (X << 1) | (B << 0));
+}
+
+static UChar rexAMode_R ( HReg greg, HReg ereg )
+{
+   return rexAMode_R__wrk(iregEnc3210(greg), iregEnc3210(ereg));
+}
+
+static UChar rexAMode_R_enc_reg ( UInt gregEnc3210, HReg ereg )
+{
+   vassert(gregEnc3210 < 16);
+   return rexAMode_R__wrk(gregEnc3210, iregEnc3210(ereg));
+}
+
+static UChar rexAMode_R_reg_enc ( HReg greg, UInt eregEnc3210 )
+{
+   vassert(eregEnc3210 < 16);
+   return rexAMode_R__wrk(iregEnc3210(greg), eregEnc3210);
+}
+
+static UChar rexAMode_R_enc_enc ( UInt gregEnc3210, UInt eregEnc3210 )
+{
+   vassert((gregEnc3210|eregEnc3210) < 16);
+   return rexAMode_R__wrk(gregEnc3210, eregEnc3210);
+}
+
+
+//uu /* May 2012: this VEX prefix stuff is currently unused, but has
+//uu    verified correct (I reckon).  Certainly it has been known to
+//uu    produce correct VEX prefixes during testing. */
+//uu 
+//uu /* Assemble a 2 or 3 byte VEX prefix from parts.  rexR, rexX, rexB and
+//uu    notVvvvv need to be not-ed before packing.  mmmmm, rexW, L and pp go
+//uu    in verbatim.  There's no range checking on the bits. */
+//uu static UInt packVexPrefix ( UInt rexR, UInt rexX, UInt rexB,
+//uu                             UInt mmmmm, UInt rexW, UInt notVvvv,
+//uu                             UInt L, UInt pp )
+//uu {
+//uu    UChar byte0 = 0;
+//uu    UChar byte1 = 0;
+//uu    UChar byte2 = 0;
+//uu    if (rexX == 0 && rexB == 0 && mmmmm == 1 && rexW == 0) {
+//uu       /* 2 byte encoding is possible. */
+//uu       byte0 = 0xC5;
+//uu       byte1 = ((rexR ^ 1) << 7) | ((notVvvv ^ 0xF) << 3) 
+//uu               | (L << 2) | pp;
+//uu    } else {
+//uu       /* 3 byte encoding is needed. */
+//uu       byte0 = 0xC4;
+//uu       byte1 = ((rexR ^ 1) << 7) | ((rexX ^ 1) << 6)
+//uu               | ((rexB ^ 1) << 5) | mmmmm;
+//uu       byte2 = (rexW << 7) | ((notVvvv ^ 0xF) << 3) | (L << 2) | pp;
+//uu    }
+//uu    return (((UInt)byte2) << 16) | (((UInt)byte1) << 8) | ((UInt)byte0);
+//uu }
+//uu 
+//uu /* Make up a VEX prefix for a (greg,amode) pair.  First byte in bits
+//uu    7:0 of result, second in 15:8, third (for a 3 byte prefix) in
+//uu    23:16.  Has m-mmmm set to indicate a prefix of 0F, pp set to
+//uu    indicate no SIMD prefix, W=0 (ignore), L=1 (size=256), and
+//uu    vvvv=1111 (unused 3rd reg). */
+//uu static UInt vexAMode_M ( HReg greg, AMD64AMode* am )
+//uu {
+//uu    UChar L       = 1; /* size = 256 */
+//uu    UChar pp      = 0; /* no SIMD prefix */
+//uu    UChar mmmmm   = 1; /* 0F */
+//uu    UChar notVvvv = 0; /* unused */
+//uu    UChar rexW    = 0;
+//uu    UChar rexR    = 0;
+//uu    UChar rexX    = 0;
+//uu    UChar rexB    = 0;
+//uu    /* Same logic as in rexAMode_M. */
+//uu    if (am->tag == Aam_IR) {
+//uu       rexR = iregEnc3(greg);
+//uu       rexX = 0; /* not relevant */
+//uu       rexB = iregEnc3(am->Aam.IR.reg);
+//uu    }
+//uu    else if (am->tag == Aam_IRRS) {
+//uu       rexR = iregEnc3(greg);
+//uu       rexX = iregEnc3(am->Aam.IRRS.index);
+//uu       rexB = iregEnc3(am->Aam.IRRS.base);
+//uu    } else {
+//uu       vassert(0);
+//uu    }
+//uu    return packVexPrefix( rexR, rexX, rexB, mmmmm, rexW, notVvvv, L, pp );
+//uu }
+//uu 
+//uu static UChar* emitVexPrefix ( UChar* p, UInt vex )
+//uu {
+//uu    switch (vex & 0xFF) {
+//uu       case 0xC5:
+//uu          *p++ = 0xC5;
+//uu          *p++ = (vex >> 8) & 0xFF;
+//uu          vassert(0 == (vex >> 16));
+//uu          break;
+//uu       case 0xC4:
+//uu          *p++ = 0xC4;
+//uu          *p++ = (vex >> 8) & 0xFF;
+//uu          *p++ = (vex >> 16) & 0xFF;
+//uu          vassert(0 == (vex >> 24));
+//uu          break;
+//uu       default:
+//uu          vassert(0);
+//uu    }
+//uu    return p;
+//uu }
+
+
+/* Emit ffree %st(N) */
+static UChar* do_ffree_st ( UChar* p, Int n )
+{
+   vassert(n >= 0 && n <= 7);
+   *p++ = 0xDD;
+   *p++ = toUChar(0xC0 + n);
+   return p;
+}
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code.  If the emitted
+   instruction was a profiler inc, set *is_profInc to True, else
+   leave it unchanged. */
+
+Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
+                      UChar* buf, Int nbuf, const AMD64Instr* i, 
+                      Bool mode64, VexEndness endness_host,
+                      const void* disp_cp_chain_me_to_slowEP,
+                      const void* disp_cp_chain_me_to_fastEP,
+                      const void* disp_cp_xindir,
+                      const void* disp_cp_xassisted )
+{
+   UInt /*irno,*/ opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc;
+   UInt   xtra;
+   UInt   reg;
+   UChar  rex;
+   UChar* p = &buf[0];
+   UChar* ptmp;
+   Int    j;
+   vassert(nbuf >= 32);
+   vassert(mode64 == True);
+
+   /* vex_printf("asm  "); ppAMD64Instr(i, mode64); vex_printf("\n"); */
+
+   switch (i->tag) {
+
+   case Ain_Imm64:
+      if (i->Ain.Imm64.imm64 <= 0xFFFFFULL) {
+         /* Use the short form (load into 32 bit reg, + default
+            widening rule) for constants under 1 million.  We could
+            use this form for the range 0 to 0x7FFFFFFF inclusive, but
+            limit it to a smaller range for verifiability purposes. */
+         if (1 & iregEnc3(i->Ain.Imm64.dst))
+            *p++ = 0x41;
+         *p++ = 0xB8 + iregEnc210(i->Ain.Imm64.dst);
+         p = emit32(p, (UInt)i->Ain.Imm64.imm64);
+      } else {
+         *p++ = toUChar(0x48 + (1 & iregEnc3(i->Ain.Imm64.dst)));
+         *p++ = toUChar(0xB8 + iregEnc210(i->Ain.Imm64.dst));
+         p = emit64(p, i->Ain.Imm64.imm64);
+      }
+      goto done;
+
+   case Ain_Alu64R:
+      /* Deal specially with MOV */
+      if (i->Ain.Alu64R.op == Aalu_MOV) {
+         switch (i->Ain.Alu64R.src->tag) {
+            case Armi_Imm:
+               if (0 == (i->Ain.Alu64R.src->Armi.Imm.imm32 & ~0xFFFFF)) {
+                  /* Actually we could use this form for constants in
+                     the range 0 through 0x7FFFFFFF inclusive, but
+                     limit it to a small range for verifiability
+                     purposes. */
+                  /* Generate "movl $imm32, 32-bit-register" and let
+                     the default zero-extend rule cause the upper half
+                     of the dst to be zeroed out too.  This saves 1
+                     and sometimes 2 bytes compared to the more
+                     obvious encoding in the 'else' branch. */
+                  if (1 & iregEnc3(i->Ain.Alu64R.dst))
+                     *p++ = 0x41;
+                  *p++ = 0xB8 + iregEnc210(i->Ain.Alu64R.dst);
+                  p = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+               } else {
+                  *p++ = toUChar(0x48 + (1 & iregEnc3(i->Ain.Alu64R.dst)));
+                  *p++ = 0xC7;
+                  *p++ = toUChar(0xC0 + iregEnc210(i->Ain.Alu64R.dst));
+                  p = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+               }
+               goto done;
+            case Armi_Reg:
+               *p++ = rexAMode_R( i->Ain.Alu64R.src->Armi.Reg.reg,
+                                  i->Ain.Alu64R.dst );
+               *p++ = 0x89;
+               p = doAMode_R(p, i->Ain.Alu64R.src->Armi.Reg.reg,
+                                i->Ain.Alu64R.dst);
+               goto done;
+            case Armi_Mem:
+               *p++ = rexAMode_M(i->Ain.Alu64R.dst,
+                                 i->Ain.Alu64R.src->Armi.Mem.am);
+               *p++ = 0x8B;
+               p = doAMode_M(p, i->Ain.Alu64R.dst, 
+                                i->Ain.Alu64R.src->Armi.Mem.am);
+               goto done;
+            default:
+               goto bad;
+         }
+      }
+      /* MUL */
+      if (i->Ain.Alu64R.op == Aalu_MUL) {
+         switch (i->Ain.Alu64R.src->tag) {
+            case Armi_Reg:
+               *p++ = rexAMode_R( i->Ain.Alu64R.dst,
+                                  i->Ain.Alu64R.src->Armi.Reg.reg);
+               *p++ = 0x0F;
+               *p++ = 0xAF;
+               p = doAMode_R(p, i->Ain.Alu64R.dst,
+                                i->Ain.Alu64R.src->Armi.Reg.reg);
+               goto done;
+            case Armi_Mem:
+               *p++ = rexAMode_M(i->Ain.Alu64R.dst,
+                                 i->Ain.Alu64R.src->Armi.Mem.am);
+               *p++ = 0x0F;
+               *p++ = 0xAF;
+               p = doAMode_M(p, i->Ain.Alu64R.dst,
+                                i->Ain.Alu64R.src->Armi.Mem.am);
+               goto done;
+            case Armi_Imm:
+               if (fits8bits(i->Ain.Alu64R.src->Armi.Imm.imm32)) {
+                  *p++ = rexAMode_R(i->Ain.Alu64R.dst, i->Ain.Alu64R.dst);
+                  *p++ = 0x6B;
+                  p = doAMode_R(p, i->Ain.Alu64R.dst, i->Ain.Alu64R.dst);
+                  *p++ = toUChar(0xFF & i->Ain.Alu64R.src->Armi.Imm.imm32);
+               } else {
+                  *p++ = rexAMode_R(i->Ain.Alu64R.dst, i->Ain.Alu64R.dst);
+                  *p++ = 0x69;
+                  p = doAMode_R(p, i->Ain.Alu64R.dst, i->Ain.Alu64R.dst);
+                  p = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+               }
+               goto done;
+            default:
+               goto bad;
+         }
+      }
+      /* ADD/SUB/ADC/SBB/AND/OR/XOR/CMP */
+      opc = opc_rr = subopc_imm = opc_imma = 0;
+      switch (i->Ain.Alu64R.op) {
+         case Aalu_ADC: opc = 0x13; opc_rr = 0x11; 
+                        subopc_imm = 2; opc_imma = 0x15; break;
+         case Aalu_ADD: opc = 0x03; opc_rr = 0x01; 
+                        subopc_imm = 0; opc_imma = 0x05; break;
+         case Aalu_SUB: opc = 0x2B; opc_rr = 0x29; 
+                        subopc_imm = 5; opc_imma = 0x2D; break;
+         case Aalu_SBB: opc = 0x1B; opc_rr = 0x19; 
+                        subopc_imm = 3; opc_imma = 0x1D; break;
+         case Aalu_AND: opc = 0x23; opc_rr = 0x21; 
+                        subopc_imm = 4; opc_imma = 0x25; break;
+         case Aalu_XOR: opc = 0x33; opc_rr = 0x31; 
+                        subopc_imm = 6; opc_imma = 0x35; break;
+         case Aalu_OR:  opc = 0x0B; opc_rr = 0x09; 
+                        subopc_imm = 1; opc_imma = 0x0D; break;
+         case Aalu_CMP: opc = 0x3B; opc_rr = 0x39; 
+                        subopc_imm = 7; opc_imma = 0x3D; break;
+         default: goto bad;
+      }
+      switch (i->Ain.Alu64R.src->tag) {
+         case Armi_Imm:
+            if (sameHReg(i->Ain.Alu64R.dst, hregAMD64_RAX())
+                && !fits8bits(i->Ain.Alu64R.src->Armi.Imm.imm32)) {
+               goto bad; /* FIXME: awaiting test case */
+               *p++ = toUChar(opc_imma);
+               p = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+            } else
+            if (fits8bits(i->Ain.Alu64R.src->Armi.Imm.imm32)) {
+               *p++ = rexAMode_R_enc_reg( 0, i->Ain.Alu64R.dst );
+               *p++ = 0x83; 
+               p    = doAMode_R_enc_reg(p, subopc_imm, i->Ain.Alu64R.dst);
+               *p++ = toUChar(0xFF & i->Ain.Alu64R.src->Armi.Imm.imm32);
+            } else {
+               *p++ = rexAMode_R_enc_reg( 0, i->Ain.Alu64R.dst);
+               *p++ = 0x81; 
+               p    = doAMode_R_enc_reg(p, subopc_imm, i->Ain.Alu64R.dst);
+               p    = emit32(p, i->Ain.Alu64R.src->Armi.Imm.imm32);
+            }
+            goto done;
+         case Armi_Reg:
+            *p++ = rexAMode_R( i->Ain.Alu64R.src->Armi.Reg.reg,
+                               i->Ain.Alu64R.dst);
+            *p++ = toUChar(opc_rr);
+            p = doAMode_R(p, i->Ain.Alu64R.src->Armi.Reg.reg,
+                             i->Ain.Alu64R.dst);
+            goto done;
+         case Armi_Mem:
+            *p++ = rexAMode_M( i->Ain.Alu64R.dst,
+                               i->Ain.Alu64R.src->Armi.Mem.am);
+            *p++ = toUChar(opc);
+            p = doAMode_M(p, i->Ain.Alu64R.dst,
+                             i->Ain.Alu64R.src->Armi.Mem.am);
+            goto done;
+         default: 
+            goto bad;
+      }
+      break;
+
+   case Ain_Alu64M:
+      /* Deal specially with MOV */
+      if (i->Ain.Alu64M.op == Aalu_MOV) {
+         switch (i->Ain.Alu64M.src->tag) {
+            case Ari_Reg:
+               *p++ = rexAMode_M(i->Ain.Alu64M.src->Ari.Reg.reg,
+                                 i->Ain.Alu64M.dst);
+               *p++ = 0x89;
+               p = doAMode_M(p, i->Ain.Alu64M.src->Ari.Reg.reg,
+                                i->Ain.Alu64M.dst);
+               goto done;
+            case Ari_Imm:
+               *p++ = rexAMode_M_enc(0, i->Ain.Alu64M.dst);
+               *p++ = 0xC7;
+               p = doAMode_M_enc(p, 0, i->Ain.Alu64M.dst);
+               p = emit32(p, i->Ain.Alu64M.src->Ari.Imm.imm32);
+               goto done;
+            default: 
+               goto bad;
+         }
+      }
+      break;
+
+   case Ain_Sh64:
+      opc_cl = opc_imm = subopc = 0;
+      switch (i->Ain.Sh64.op) {
+         case Ash_SHR: opc_cl = 0xD3; opc_imm = 0xC1; subopc = 5; break;
+         case Ash_SAR: opc_cl = 0xD3; opc_imm = 0xC1; subopc = 7; break;
+         case Ash_SHL: opc_cl = 0xD3; opc_imm = 0xC1; subopc = 4; break;
+         default: goto bad;
+      }
+      if (i->Ain.Sh64.src == 0) {
+         *p++ = rexAMode_R_enc_reg(0, i->Ain.Sh64.dst);
+         *p++ = toUChar(opc_cl);
+         p = doAMode_R_enc_reg(p, subopc, i->Ain.Sh64.dst);
+         goto done;
+      } else {
+         *p++ = rexAMode_R_enc_reg(0, i->Ain.Sh64.dst);
+         *p++ = toUChar(opc_imm);
+         p = doAMode_R_enc_reg(p, subopc, i->Ain.Sh64.dst);
+         *p++ = (UChar)(i->Ain.Sh64.src);
+         goto done;
+      }
+      break;
+
+   case Ain_Test64:
+      /* testq sign-extend($imm32), %reg */
+      *p++ = rexAMode_R_enc_reg(0, i->Ain.Test64.dst);
+      *p++ = 0xF7;
+      p = doAMode_R_enc_reg(p, 0, i->Ain.Test64.dst);
+      p = emit32(p, i->Ain.Test64.imm32);
+      goto done;
+
+   case Ain_Unary64:
+      if (i->Ain.Unary64.op == Aun_NOT) {
+         *p++ = rexAMode_R_enc_reg(0, i->Ain.Unary64.dst);
+         *p++ = 0xF7;
+         p = doAMode_R_enc_reg(p, 2, i->Ain.Unary64.dst);
+         goto done;
+      }
+      if (i->Ain.Unary64.op == Aun_NEG) {
+         *p++ = rexAMode_R_enc_reg(0, i->Ain.Unary64.dst);
+         *p++ = 0xF7;
+         p = doAMode_R_enc_reg(p, 3, i->Ain.Unary64.dst);
+         goto done;
+      }
+      break;
+
+   case Ain_Lea64:
+      *p++ = rexAMode_M(i->Ain.Lea64.dst, i->Ain.Lea64.am);
+      *p++ = 0x8D;
+      p = doAMode_M(p, i->Ain.Lea64.dst, i->Ain.Lea64.am);
+      goto done;
+
+   case Ain_Alu32R:
+      /* ADD/SUB/AND/OR/XOR/CMP */
+      opc = opc_rr = subopc_imm = opc_imma = 0;
+      switch (i->Ain.Alu32R.op) {
+         case Aalu_ADD: opc = 0x03; opc_rr = 0x01; 
+                        subopc_imm = 0; opc_imma = 0x05; break;
+         case Aalu_SUB: opc = 0x2B; opc_rr = 0x29; 
+                        subopc_imm = 5; opc_imma = 0x2D; break;
+         case Aalu_AND: opc = 0x23; opc_rr = 0x21; 
+                        subopc_imm = 4; opc_imma = 0x25; break;
+         case Aalu_XOR: opc = 0x33; opc_rr = 0x31; 
+                        subopc_imm = 6; opc_imma = 0x35; break;
+         case Aalu_OR:  opc = 0x0B; opc_rr = 0x09; 
+                        subopc_imm = 1; opc_imma = 0x0D; break;
+         case Aalu_CMP: opc = 0x3B; opc_rr = 0x39; 
+                        subopc_imm = 7; opc_imma = 0x3D; break;
+         default: goto bad;
+      }
+      switch (i->Ain.Alu32R.src->tag) {
+         case Armi_Imm:
+            if (sameHReg(i->Ain.Alu32R.dst, hregAMD64_RAX())
+                && !fits8bits(i->Ain.Alu32R.src->Armi.Imm.imm32)) {
+               goto bad; /* FIXME: awaiting test case */
+               *p++ = toUChar(opc_imma);
+               p = emit32(p, i->Ain.Alu32R.src->Armi.Imm.imm32);
+            } else
+            if (fits8bits(i->Ain.Alu32R.src->Armi.Imm.imm32)) {
+               rex  = clearWBit( rexAMode_R_enc_reg( 0, i->Ain.Alu32R.dst ) );
+               if (rex != 0x40) *p++ = rex;
+               *p++ = 0x83; 
+               p    = doAMode_R_enc_reg(p, subopc_imm, i->Ain.Alu32R.dst);
+               *p++ = toUChar(0xFF & i->Ain.Alu32R.src->Armi.Imm.imm32);
+            } else {
+               rex  = clearWBit( rexAMode_R_enc_reg( 0, i->Ain.Alu32R.dst) );
+               if (rex != 0x40) *p++ = rex;
+               *p++ = 0x81; 
+               p    = doAMode_R_enc_reg(p, subopc_imm, i->Ain.Alu32R.dst);
+               p    = emit32(p, i->Ain.Alu32R.src->Armi.Imm.imm32);
+            }
+            goto done;
+         case Armi_Reg:
+            rex  = clearWBit( 
+                   rexAMode_R( i->Ain.Alu32R.src->Armi.Reg.reg,
+                               i->Ain.Alu32R.dst) );
+            if (rex != 0x40) *p++ = rex;
+            *p++ = toUChar(opc_rr);
+            p = doAMode_R(p, i->Ain.Alu32R.src->Armi.Reg.reg,
+                             i->Ain.Alu32R.dst);
+            goto done;
+         case Armi_Mem:
+            rex  = clearWBit(
+                   rexAMode_M( i->Ain.Alu32R.dst,
+                               i->Ain.Alu32R.src->Armi.Mem.am) );
+            if (rex != 0x40) *p++ = rex;
+            *p++ = toUChar(opc);
+            p = doAMode_M(p, i->Ain.Alu32R.dst,
+                             i->Ain.Alu32R.src->Armi.Mem.am);
+            goto done;
+         default: 
+            goto bad;
+      }
+      break;
+
+   case Ain_MulL:
+      subopc = i->Ain.MulL.syned ? 5 : 4;
+      switch (i->Ain.MulL.src->tag)  {
+         case Arm_Mem:
+            *p++ = rexAMode_M_enc(0, i->Ain.MulL.src->Arm.Mem.am);
+            *p++ = 0xF7;
+            p = doAMode_M_enc(p, subopc, i->Ain.MulL.src->Arm.Mem.am);
+            goto done;
+         case Arm_Reg:
+            *p++ = rexAMode_R_enc_reg(0, i->Ain.MulL.src->Arm.Reg.reg);
+            *p++ = 0xF7;
+            p = doAMode_R_enc_reg(p, subopc, i->Ain.MulL.src->Arm.Reg.reg);
+            goto done;
+         default:
+            goto bad;
+      }
+      break;
+
+   case Ain_Div:
+      subopc = i->Ain.Div.syned ? 7 : 6;
+      if (i->Ain.Div.sz == 4) {
+         switch (i->Ain.Div.src->tag)  {
+            case Arm_Mem:
+               goto bad;
+               /*FIXME*/
+               *p++ = 0xF7;
+               p = doAMode_M_enc(p, subopc, i->Ain.Div.src->Arm.Mem.am);
+               goto done;
+            case Arm_Reg:
+               *p++ = clearWBit(
+                      rexAMode_R_enc_reg(0, i->Ain.Div.src->Arm.Reg.reg));
+               *p++ = 0xF7;
+               p = doAMode_R_enc_reg(p, subopc, i->Ain.Div.src->Arm.Reg.reg);
+               goto done;
+            default:
+               goto bad;
+         }
+      }
+      if (i->Ain.Div.sz == 8) {
+         switch (i->Ain.Div.src->tag)  {
+            case Arm_Mem:
+               *p++ = rexAMode_M_enc(0, i->Ain.Div.src->Arm.Mem.am);
+               *p++ = 0xF7;
+               p = doAMode_M_enc(p, subopc, i->Ain.Div.src->Arm.Mem.am);
+               goto done;
+            case Arm_Reg:
+               *p++ = rexAMode_R_enc_reg(0, i->Ain.Div.src->Arm.Reg.reg);
+               *p++ = 0xF7;
+               p = doAMode_R_enc_reg(p, subopc, i->Ain.Div.src->Arm.Reg.reg);
+               goto done;
+            default:
+               goto bad;
+         }
+      }
+      break;
+
+   case Ain_Push:
+      switch (i->Ain.Push.src->tag) {
+         case Armi_Mem: 
+            *p++ = clearWBit(
+                   rexAMode_M_enc(0, i->Ain.Push.src->Armi.Mem.am));
+            *p++ = 0xFF;
+            p = doAMode_M_enc(p, 6, i->Ain.Push.src->Armi.Mem.am);
+            goto done;
+         case Armi_Imm:
+            *p++ = 0x68;
+            p = emit32(p, i->Ain.Push.src->Armi.Imm.imm32);
+            goto done;
+         case Armi_Reg:
+            *p++ = toUChar(0x40 + (1 & iregEnc3(i->Ain.Push.src->Armi.Reg.reg)));
+            *p++ = toUChar(0x50 + iregEnc210(i->Ain.Push.src->Armi.Reg.reg));
+            goto done;
+        default: 
+            goto bad;
+      }
+
+   case Ain_Call: {
+      /* As per detailed comment for Ain_Call in getRegUsage_AMD64Instr
+         above, %r11 is used as an address temporary. */
+      /* If we don't need to do any fixup actions in the case that the
+         call doesn't happen, just do the simple thing and emit
+         straight-line code.  This is usually the case. */
+      if (i->Ain.Call.cond == Acc_ALWAYS/*call always happens*/
+          || i->Ain.Call.rloc.pri == RLPri_None/*no fixup action*/) {
+         /* jump over the following two insns if the condition does
+            not hold */
+         Bool shortImm = fitsIn32Bits(i->Ain.Call.target);
+         if (i->Ain.Call.cond != Acc_ALWAYS) {
+            *p++ = toUChar(0x70 + (0xF & (i->Ain.Call.cond ^ 1)));
+            *p++ = shortImm ? 10 : 13;
+            /* 10 or 13 bytes in the next two insns */
+         }
+         if (shortImm) {
+            /* 7 bytes: movl sign-extend(imm32), %r11 */
+            *p++ = 0x49;
+            *p++ = 0xC7;
+            *p++ = 0xC3;
+            p = emit32(p, (UInt)i->Ain.Call.target);
+         } else {
+            /* 10 bytes: movabsq $target, %r11 */
+            *p++ = 0x49;
+            *p++ = 0xBB;
+            p = emit64(p, i->Ain.Call.target);
+         }
+         /* 3 bytes: call *%r11 */
+         *p++ = 0x41;
+         *p++ = 0xFF;
+         *p++ = 0xD3;
+      } else {
+         Int delta;
+         /* Complex case.  We have to generate an if-then-else diamond. */
+         // before:
+         //   j{!cond} else:
+         //   movabsq $target, %r11
+         //   call* %r11
+         // preElse:
+         //   jmp after:
+         // else:
+         //   movabsq $0x5555555555555555, %rax  // possibly
+         //   movq %rax, %rdx                    // possibly
+         // after:
+
+         // before:
+         UChar* pBefore = p;
+
+         //   j{!cond} else:
+         *p++ = toUChar(0x70 + (0xF & (i->Ain.Call.cond ^ 1)));
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+
+         //   movabsq $target, %r11
+         *p++ = 0x49;
+         *p++ = 0xBB;
+         p = emit64(p, i->Ain.Call.target);
+
+         //   call* %r11
+         *p++ = 0x41;
+         *p++ = 0xFF;
+         *p++ = 0xD3;
+
+         // preElse:
+         UChar* pPreElse = p;
+
+         //   jmp after:
+         *p++ = 0xEB;
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+
+         // else:
+         UChar* pElse = p;
+
+         /* Do the 'else' actions */
+         switch (i->Ain.Call.rloc.pri) {
+            case RLPri_Int:
+               // movabsq $0x5555555555555555, %rax
+               *p++ = 0x48; *p++ = 0xB8; p = emit64(p, 0x5555555555555555ULL);
+               break;
+            case RLPri_2Int:
+               vassert(0); //ATC
+               // movabsq $0x5555555555555555, %rax
+               *p++ = 0x48; *p++ = 0xB8; p = emit64(p, 0x5555555555555555ULL);
+               // movq %rax, %rdx
+               *p++ = 0x48; *p++ = 0x89; *p++ = 0xC2;
+            case RLPri_None: case RLPri_INVALID: default:
+               vassert(0);
+         }
+
+         // after:
+         UChar* pAfter = p;
+
+         // Fix up the branch offsets.  The +2s in the offset
+         // calculations are there because x86 requires conditional
+         // branches to have their offset stated relative to the
+         // instruction immediately following the branch insn.  And in
+         // both cases the branch insns are 2 bytes long.
+
+         // First, the "j{!cond} else:" at pBefore.
+         delta = (Int)(Long)(pElse - (pBefore + 2));
+         vassert(delta >= 0 && delta < 100/*arbitrary*/);
+         *(pBefore+1) = (UChar)delta;
+
+         // And secondly, the "jmp after:" at pPreElse.
+         delta = (Int)(Long)(pAfter - (pPreElse + 2));
+         vassert(delta >= 0 && delta < 100/*arbitrary*/);
+         *(pPreElse+1) = (UChar)delta;
+      }
+      goto done;
+   }
+
+   case Ain_XDirect: {
+      /* NB: what goes on here has to be very closely coordinated with the
+         chainXDirect_AMD64 and unchainXDirect_AMD64 below. */
+      /* We're generating chain-me requests here, so we need to be
+         sure this is actually allowed -- no-redir translations can't
+         use chain-me's.  Hence: */
+      vassert(disp_cp_chain_me_to_slowEP != NULL);
+      vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+      HReg r11 = hregAMD64_R11();
+
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* First off, if this is conditional, create a conditional
+         jump over the rest of it. */
+      if (i->Ain.XDirect.cond != Acc_ALWAYS) {
+         /* jmp fwds if !condition */
+         *p++ = toUChar(0x70 + (0xF & (i->Ain.XDirect.cond ^ 1)));
+         ptmp = p; /* fill in this bit later */
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+      }
+
+      /* Update the guest RIP. */
+      if (fitsIn32Bits(i->Ain.XDirect.dstGA)) {
+         /* use a shorter encoding */
+         /* movl sign-extend(dstGA), %r11 */
+         *p++ = 0x49;
+         *p++ = 0xC7;
+         *p++ = 0xC3;
+         p = emit32(p, (UInt)i->Ain.XDirect.dstGA);
+      } else {
+         /* movabsq $dstGA, %r11 */
+         *p++ = 0x49;
+         *p++ = 0xBB;
+         p = emit64(p, i->Ain.XDirect.dstGA);
+      }
+
+      /* movq %r11, amRIP */
+      *p++ = rexAMode_M(r11, i->Ain.XDirect.amRIP);
+      *p++ = 0x89;
+      p = doAMode_M(p, r11, i->Ain.XDirect.amRIP);
+
+      /* --- FIRST PATCHABLE BYTE follows --- */
+      /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're calling
+         to) backs up the return address, so as to find the address of
+         the first patchable byte.  So: don't change the length of the
+         two instructions below. */
+      /* movabsq $disp_cp_chain_me_to_{slow,fast}EP,%r11; */
+      *p++ = 0x49;
+      *p++ = 0xBB;
+      const void* disp_cp_chain_me
+               = i->Ain.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP 
+                                         : disp_cp_chain_me_to_slowEP;
+      p = emit64(p, (Addr)disp_cp_chain_me);
+      /* call *%r11 */
+      *p++ = 0x41;
+      *p++ = 0xFF;
+      *p++ = 0xD3;
+      /* --- END of PATCHABLE BYTES --- */
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Ain.XDirect.cond != Acc_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta > 0 && delta < 40);
+         *ptmp = toUChar(delta-1);
+      }
+      goto done;
+   }
+
+   case Ain_XIndir: {
+      /* We're generating transfers that could lead indirectly to a
+         chain-me, so we need to be sure this is actually allowed --
+         no-redir translations are not allowed to reach normal
+         translations without going through the scheduler.  That means
+         no XDirects or XIndirs out from no-redir translations.
+         Hence: */
+      vassert(disp_cp_xindir != NULL);
+
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* First off, if this is conditional, create a conditional
+         jump over the rest of it. */
+      if (i->Ain.XIndir.cond != Acc_ALWAYS) {
+         /* jmp fwds if !condition */
+         *p++ = toUChar(0x70 + (0xF & (i->Ain.XIndir.cond ^ 1)));
+         ptmp = p; /* fill in this bit later */
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+      }
+
+      /* movq dstGA(a reg), amRIP -- copied from Alu64M MOV case */
+      *p++ = rexAMode_M(i->Ain.XIndir.dstGA, i->Ain.XIndir.amRIP);
+      *p++ = 0x89;
+      p = doAMode_M(p, i->Ain.XIndir.dstGA, i->Ain.XIndir.amRIP);
+
+      /* get $disp_cp_xindir into %r11 */
+      if (fitsIn32Bits((Addr)disp_cp_xindir)) {
+         /* use a shorter encoding */
+         /* movl sign-extend(disp_cp_xindir), %r11 */
+         *p++ = 0x49;
+         *p++ = 0xC7;
+         *p++ = 0xC3;
+         p = emit32(p, (UInt)(Addr)disp_cp_xindir);
+      } else {
+         /* movabsq $disp_cp_xindir, %r11 */
+         *p++ = 0x49;
+         *p++ = 0xBB;
+         p = emit64(p, (Addr)disp_cp_xindir);
+      }
+
+      /* jmp *%r11 */
+      *p++ = 0x41;
+      *p++ = 0xFF;
+      *p++ = 0xE3;
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Ain.XIndir.cond != Acc_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta > 0 && delta < 40);
+         *ptmp = toUChar(delta-1);
+      }
+      goto done;
+   }
+
+   case Ain_XAssisted: {
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* First off, if this is conditional, create a conditional
+         jump over the rest of it. */
+      if (i->Ain.XAssisted.cond != Acc_ALWAYS) {
+         /* jmp fwds if !condition */
+         *p++ = toUChar(0x70 + (0xF & (i->Ain.XAssisted.cond ^ 1)));
+         ptmp = p; /* fill in this bit later */
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+      }
+
+      /* movq dstGA(a reg), amRIP -- copied from Alu64M MOV case */
+      *p++ = rexAMode_M(i->Ain.XAssisted.dstGA, i->Ain.XAssisted.amRIP);
+      *p++ = 0x89;
+      p = doAMode_M(p, i->Ain.XAssisted.dstGA, i->Ain.XAssisted.amRIP);
+      /* movl $magic_number, %ebp.  Since these numbers are all small positive
+         integers, we can get away with "movl $N, %ebp" rather than
+         the longer "movq $N, %rbp". */
+      UInt trcval = 0;
+      switch (i->Ain.XAssisted.jk) {
+         case Ijk_ClientReq:   trcval = VEX_TRC_JMP_CLIENTREQ;   break;
+         case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
+         case Ijk_Sys_int32:   trcval = VEX_TRC_JMP_SYS_INT32;   break;
+         case Ijk_Yield:       trcval = VEX_TRC_JMP_YIELD;       break;
+         case Ijk_EmWarn:      trcval = VEX_TRC_JMP_EMWARN;      break;
+         case Ijk_MapFail:     trcval = VEX_TRC_JMP_MAPFAIL;     break;
+         case Ijk_NoDecode:    trcval = VEX_TRC_JMP_NODECODE;    break;
+         case Ijk_InvalICache: trcval = VEX_TRC_JMP_INVALICACHE; break;
+         case Ijk_NoRedir:     trcval = VEX_TRC_JMP_NOREDIR;     break;
+         case Ijk_SigTRAP:     trcval = VEX_TRC_JMP_SIGTRAP;     break;
+         case Ijk_SigSEGV:     trcval = VEX_TRC_JMP_SIGSEGV;     break;
+         case Ijk_Boring:      trcval = VEX_TRC_JMP_BORING;      break;
+         /* We don't expect to see the following being assisted. */
+         case Ijk_Ret:
+         case Ijk_Call:
+         /* fallthrough */
+         default: 
+            ppIRJumpKind(i->Ain.XAssisted.jk);
+            vpanic("emit_AMD64Instr.Ain_XAssisted: unexpected jump kind");
+      }
+      vassert(trcval != 0);
+      *p++ = 0xBD;
+      p = emit32(p, trcval);
+      /* movabsq $disp_assisted, %r11 */
+      *p++ = 0x49;
+      *p++ = 0xBB;
+      p = emit64(p, (Addr)disp_cp_xassisted);
+      /* jmp *%r11 */
+      *p++ = 0x41;
+      *p++ = 0xFF;
+      *p++ = 0xE3;
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Ain.XAssisted.cond != Acc_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta > 0 && delta < 40);
+         *ptmp = toUChar(delta-1);
+      }
+      goto done;
+   }
+
+   case Ain_CMov64:
+      vassert(i->Ain.CMov64.cond != Acc_ALWAYS);
+      *p++ = rexAMode_R(i->Ain.CMov64.dst, i->Ain.CMov64.src);
+      *p++ = 0x0F;
+      *p++ = toUChar(0x40 + (0xF & i->Ain.CMov64.cond));
+      p = doAMode_R(p, i->Ain.CMov64.dst, i->Ain.CMov64.src);
+      goto done;
+
+   case Ain_CLoad: {
+      vassert(i->Ain.CLoad.cond != Acc_ALWAYS);
+
+      /* Only 32- or 64-bit variants are allowed. */
+      vassert(i->Ain.CLoad.szB == 4 || i->Ain.CLoad.szB == 8);
+
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* jmp fwds if !condition */
+      *p++ = toUChar(0x70 + (0xF & (i->Ain.CLoad.cond ^ 1)));
+      ptmp = p; /* fill in this bit later */
+      *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+
+      /* Now the load.  Either a normal 64 bit load or a normal 32 bit
+         load, which, by the default zero-extension rule, zeroes out
+         the upper half of the destination, as required. */
+      rex = rexAMode_M(i->Ain.CLoad.dst, i->Ain.CLoad.addr);
+      *p++ = i->Ain.CLoad.szB == 4 ? clearWBit(rex) : rex;
+      *p++ = 0x8B;
+      p = doAMode_M(p, i->Ain.CLoad.dst, i->Ain.CLoad.addr);
+
+      /* Fix up the conditional branch */
+      Int delta = p - ptmp;
+      vassert(delta > 0 && delta < 40);
+      *ptmp = toUChar(delta-1);
+      goto done;
+   }
+
+   case Ain_CStore: {
+      /* AFAICS this is identical to Ain_CStore except that the opcode
+         is 0x89 instead of 0x8B. */
+      vassert(i->Ain.CStore.cond != Acc_ALWAYS);
+
+      /* Only 32- or 64-bit variants are allowed. */
+      vassert(i->Ain.CStore.szB == 4 || i->Ain.CStore.szB == 8);
+
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* jmp fwds if !condition */
+      *p++ = toUChar(0x70 + (0xF & (i->Ain.CStore.cond ^ 1)));
+      ptmp = p; /* fill in this bit later */
+      *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+
+      /* Now the store. */
+      rex = rexAMode_M(i->Ain.CStore.src, i->Ain.CStore.addr);
+      *p++ = i->Ain.CStore.szB == 4 ? clearWBit(rex) : rex;
+      *p++ = 0x89;
+      p = doAMode_M(p, i->Ain.CStore.src, i->Ain.CStore.addr);
+
+      /* Fix up the conditional branch */
+      Int delta = p - ptmp;
+      vassert(delta > 0 && delta < 40);
+      *ptmp = toUChar(delta-1);
+      goto done;
+   }
+
+   case Ain_MovxLQ:
+      /* No, _don't_ ask me why the sense of the args has to be
+         different in the S vs Z case.  I don't know. */
+      if (i->Ain.MovxLQ.syned) {
+         /* Need REX.W = 1 here, but rexAMode_R does that for us. */
+         *p++ = rexAMode_R(i->Ain.MovxLQ.dst, i->Ain.MovxLQ.src);
+         *p++ = 0x63;
+         p = doAMode_R(p, i->Ain.MovxLQ.dst, i->Ain.MovxLQ.src);
+      } else {
+         /* Produce a 32-bit reg-reg move, since the implicit
+            zero-extend does what we want. */
+         *p++ = clearWBit (
+                   rexAMode_R(i->Ain.MovxLQ.src, i->Ain.MovxLQ.dst));
+         *p++ = 0x89;
+         p = doAMode_R(p, i->Ain.MovxLQ.src, i->Ain.MovxLQ.dst);
+      }
+      goto done;
+
+   case Ain_LoadEX:
+      if (i->Ain.LoadEX.szSmall == 1 && !i->Ain.LoadEX.syned) {
+         /* movzbq */
+         *p++ = rexAMode_M(i->Ain.LoadEX.dst, i->Ain.LoadEX.src); 
+         *p++ = 0x0F;
+         *p++ = 0xB6;
+         p = doAMode_M(p, i->Ain.LoadEX.dst, i->Ain.LoadEX.src); 
+         goto done;
+      }
+      if (i->Ain.LoadEX.szSmall == 2 && !i->Ain.LoadEX.syned) {
+         /* movzwq */
+         *p++ = rexAMode_M(i->Ain.LoadEX.dst, i->Ain.LoadEX.src); 
+         *p++ = 0x0F;
+         *p++ = 0xB7;
+         p = doAMode_M(p, i->Ain.LoadEX.dst, i->Ain.LoadEX.src); 
+         goto done;
+      }
+      if (i->Ain.LoadEX.szSmall == 4 && !i->Ain.LoadEX.syned) {
+         /* movzlq */
+         /* This isn't really an existing AMD64 instruction per se.
+            Rather, we have to do a 32-bit load.  Because a 32-bit
+            write implicitly clears the upper 32 bits of the target
+            register, we get what we want. */
+         *p++ = clearWBit(
+                rexAMode_M(i->Ain.LoadEX.dst, i->Ain.LoadEX.src));
+         *p++ = 0x8B;
+         p = doAMode_M(p, i->Ain.LoadEX.dst, i->Ain.LoadEX.src);
+         goto done;
+      }
+      break;
+
+   case Ain_Set64:
+      /* Make the destination register be 1 or 0, depending on whether
+         the relevant condition holds.  Complication: the top 56 bits
+         of the destination should be forced to zero, but doing 'xorq
+         %r,%r' kills the flag(s) we are about to read.  Sigh.  So
+         start off my moving $0 into the dest. */
+      reg = iregEnc3210(i->Ain.Set64.dst);
+      vassert(reg < 16);
+
+      /* movq $0, %dst */
+      *p++ = toUChar(reg >= 8 ? 0x49 : 0x48);
+      *p++ = 0xC7;
+      *p++ = toUChar(0xC0 + (reg & 7));
+      p = emit32(p, 0);
+
+      /* setb lo8(%dst) */
+      /* note, 8-bit register rex trickyness.  Be careful here. */
+      *p++ = toUChar(reg >= 8 ? 0x41 : 0x40);
+      *p++ = 0x0F; 
+      *p++ = toUChar(0x90 + (0x0F & i->Ain.Set64.cond));
+      *p++ = toUChar(0xC0 + (reg & 7));
+      goto done;
+
+   case Ain_Bsfr64:
+      *p++ = rexAMode_R(i->Ain.Bsfr64.dst, i->Ain.Bsfr64.src);
+      *p++ = 0x0F;
+      if (i->Ain.Bsfr64.isFwds) {
+         *p++ = 0xBC;
+      } else {
+         *p++ = 0xBD;
+      }
+      p = doAMode_R(p, i->Ain.Bsfr64.dst, i->Ain.Bsfr64.src);
+      goto done;
+
+   case Ain_MFence:
+      /* mfence */
+      *p++ = 0x0F; *p++ = 0xAE; *p++ = 0xF0;
+      goto done;
+
+   case Ain_ACAS:
+      /* lock */
+      *p++ = 0xF0;
+      if (i->Ain.ACAS.sz == 2) *p++ = 0x66; 
+      /* cmpxchg{b,w,l,q} %rbx,mem.  Expected-value in %rax, new value
+         in %rbx.  The new-value register is hardwired to be %rbx
+         since dealing with byte integer registers is too much hassle,
+         so we force the register operand to %rbx (could equally be
+         %rcx or %rdx). */
+      rex = rexAMode_M( hregAMD64_RBX(), i->Ain.ACAS.addr );
+      if (i->Ain.ACAS.sz != 8)
+         rex = clearWBit(rex);
+
+      *p++ = rex; /* this can emit 0x40, which is pointless. oh well. */
+      *p++ = 0x0F;
+      if (i->Ain.ACAS.sz == 1) *p++ = 0xB0; else *p++ = 0xB1;
+      p = doAMode_M(p, hregAMD64_RBX(), i->Ain.ACAS.addr);
+      goto done;
+
+   case Ain_DACAS:
+      /* lock */
+      *p++ = 0xF0;
+      /* cmpxchg{8,16}b m{64,128}.  Expected-value in %rdx:%rax, new
+         value in %rcx:%rbx.  All 4 regs are hardwired in the ISA, so
+         aren't encoded in the insn. */
+      rex = rexAMode_M_enc(1, i->Ain.ACAS.addr );
+      if (i->Ain.ACAS.sz != 8)
+         rex = clearWBit(rex);
+      *p++ = rex;
+      *p++ = 0x0F;
+      *p++ = 0xC7;
+      p = doAMode_M_enc(p, 1, i->Ain.DACAS.addr);
+      goto done;
+
+   case Ain_A87Free:
+      vassert(i->Ain.A87Free.nregs > 0 && i->Ain.A87Free.nregs <= 7);
+      for (j = 0; j < i->Ain.A87Free.nregs; j++) {
+         p = do_ffree_st(p, 7-j);
+      }
+      goto done;
+
+   case Ain_A87PushPop:
+      vassert(i->Ain.A87PushPop.szB == 8 || i->Ain.A87PushPop.szB == 4);
+      if (i->Ain.A87PushPop.isPush) {
+         /* Load from memory into %st(0): flds/fldl amode */
+         *p++ = clearWBit(
+                   rexAMode_M_enc(0, i->Ain.A87PushPop.addr) );
+         *p++ = i->Ain.A87PushPop.szB == 4 ? 0xD9 : 0xDD;
+	 p = doAMode_M_enc(p, 0/*subopcode*/, i->Ain.A87PushPop.addr);
+      } else {
+         /* Dump %st(0) to memory: fstps/fstpl amode */
+         *p++ = clearWBit(
+                   rexAMode_M_enc(3, i->Ain.A87PushPop.addr) );
+         *p++ = i->Ain.A87PushPop.szB == 4 ? 0xD9 : 0xDD;
+         p = doAMode_M_enc(p, 3/*subopcode*/, i->Ain.A87PushPop.addr);
+         goto done;
+      }
+      goto done;
+
+   case Ain_A87FpOp:
+      switch (i->Ain.A87FpOp.op) {
+         case Afp_SQRT:   *p++ = 0xD9; *p++ = 0xFA; break;
+         case Afp_SIN:    *p++ = 0xD9; *p++ = 0xFE; break;
+         case Afp_COS:    *p++ = 0xD9; *p++ = 0xFF; break;
+         case Afp_ROUND:  *p++ = 0xD9; *p++ = 0xFC; break;
+         case Afp_2XM1:   *p++ = 0xD9; *p++ = 0xF0; break;
+         case Afp_SCALE:  *p++ = 0xD9; *p++ = 0xFD; break;
+         case Afp_ATAN:   *p++ = 0xD9; *p++ = 0xF3; break;
+         case Afp_YL2X:   *p++ = 0xD9; *p++ = 0xF1; break;
+         case Afp_YL2XP1: *p++ = 0xD9; *p++ = 0xF9; break;
+         case Afp_PREM:   *p++ = 0xD9; *p++ = 0xF8; break;
+         case Afp_PREM1:  *p++ = 0xD9; *p++ = 0xF5; break;
+         case Afp_TAN:
+            /* fptan pushes 1.0 on the FP stack, except when the
+               argument is out of range.  Hence we have to do the
+               instruction, then inspect C2 to see if there is an out
+               of range condition.  If there is, we skip the fincstp
+               that is used by the in-range case to get rid of this
+               extra 1.0 value. */
+            *p++ = 0xD9; *p++ = 0xF2; // fptan
+            *p++ = 0x50;              // pushq %rax
+            *p++ = 0xDF; *p++ = 0xE0; // fnstsw %ax
+            *p++ = 0x66; *p++ = 0xA9; 
+            *p++ = 0x00; *p++ = 0x04; // testw $0x400,%ax
+            *p++ = 0x75; *p++ = 0x02; // jnz after_fincstp
+            *p++ = 0xD9; *p++ = 0xF7; // fincstp
+            *p++ = 0x58;              // after_fincstp: popq %rax
+            break;
+         default:
+            goto bad;
+      }
+      goto done;
+
+   case Ain_A87LdCW:
+      *p++ = clearWBit(
+                rexAMode_M_enc(5, i->Ain.A87LdCW.addr) );
+      *p++ = 0xD9;
+      p = doAMode_M_enc(p, 5/*subopcode*/, i->Ain.A87LdCW.addr);
+      goto done;
+
+   case Ain_A87StSW:
+      *p++ = clearWBit(
+                rexAMode_M_enc(7, i->Ain.A87StSW.addr) );
+      *p++ = 0xDD;
+      p = doAMode_M_enc(p, 7/*subopcode*/, i->Ain.A87StSW.addr);
+      goto done;
+
+   case Ain_Store:
+      if (i->Ain.Store.sz == 2) {
+         /* This just goes to show the crazyness of the instruction
+            set encoding.  We have to insert two prefix bytes, but be
+            careful to avoid a conflict in what the size should be, by
+            ensuring that REX.W = 0. */
+         *p++ = 0x66; /* override to 16-bits */
+	 *p++ = clearWBit( rexAMode_M( i->Ain.Store.src, i->Ain.Store.dst) );
+         *p++ = 0x89;
+         p = doAMode_M(p, i->Ain.Store.src, i->Ain.Store.dst);
+         goto done;
+      }
+      if (i->Ain.Store.sz == 4) {
+	 *p++ = clearWBit( rexAMode_M( i->Ain.Store.src, i->Ain.Store.dst) );
+         *p++ = 0x89;
+         p = doAMode_M(p, i->Ain.Store.src, i->Ain.Store.dst);
+         goto done;
+      }
+      if (i->Ain.Store.sz == 1) {
+         /* This is one place where it would be wrong to skip emitting
+            a rex byte of 0x40, since the mere presence of rex changes
+            the meaning of the byte register access.  Be careful. */
+	 *p++ = clearWBit( rexAMode_M( i->Ain.Store.src, i->Ain.Store.dst) );
+         *p++ = 0x88;
+         p = doAMode_M(p, i->Ain.Store.src, i->Ain.Store.dst);
+         goto done;
+      }
+      break;
+
+   case Ain_LdMXCSR:
+      *p++ = clearWBit(rexAMode_M_enc(0, i->Ain.LdMXCSR.addr));
+      *p++ = 0x0F;
+      *p++ = 0xAE;
+      p = doAMode_M_enc(p, 2/*subopcode*/, i->Ain.LdMXCSR.addr);
+      goto done;
+
+   case Ain_SseUComIS:
+      /* ucomi[sd] %srcL, %srcR ;  pushfq ; popq %dst */
+      /* ucomi[sd] %srcL, %srcR */
+      if (i->Ain.SseUComIS.sz == 8) {
+         *p++ = 0x66;
+      } else {
+         goto bad;
+         vassert(i->Ain.SseUComIS.sz == 4);
+      }
+      *p++ = clearWBit (
+             rexAMode_R_enc_enc( vregEnc3210(i->Ain.SseUComIS.srcL),
+                                 vregEnc3210(i->Ain.SseUComIS.srcR) ));
+      *p++ = 0x0F;
+      *p++ = 0x2E;
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.SseUComIS.srcL),
+                               vregEnc3210(i->Ain.SseUComIS.srcR) );
+      /* pushfq */
+      *p++ = 0x9C;
+      /* popq %dst */
+      *p++ = toUChar(0x40 + (1 & iregEnc3(i->Ain.SseUComIS.dst)));
+      *p++ = toUChar(0x58 + iregEnc210(i->Ain.SseUComIS.dst));
+      goto done;
+
+   case Ain_SseSI2SF:
+      /* cvssi2s[sd] %src, %dst */
+      rex = rexAMode_R_enc_reg( vregEnc3210(i->Ain.SseSI2SF.dst),
+                                i->Ain.SseSI2SF.src );
+      *p++ = toUChar(i->Ain.SseSI2SF.szD==4 ? 0xF3 : 0xF2);
+      *p++ = toUChar(i->Ain.SseSI2SF.szS==4 ? clearWBit(rex) : rex);
+      *p++ = 0x0F;
+      *p++ = 0x2A;
+      p = doAMode_R_enc_reg( p, vregEnc3210(i->Ain.SseSI2SF.dst),
+                                i->Ain.SseSI2SF.src );
+      goto done;
+
+   case Ain_SseSF2SI:
+      /* cvss[sd]2si %src, %dst */
+      rex = rexAMode_R_reg_enc( i->Ain.SseSF2SI.dst,
+                                vregEnc3210(i->Ain.SseSF2SI.src) );
+      *p++ = toUChar(i->Ain.SseSF2SI.szS==4 ? 0xF3 : 0xF2);
+      *p++ = toUChar(i->Ain.SseSF2SI.szD==4 ? clearWBit(rex) : rex);
+      *p++ = 0x0F;
+      *p++ = 0x2D;
+      p = doAMode_R_reg_enc( p, i->Ain.SseSF2SI.dst,
+                                vregEnc3210(i->Ain.SseSF2SI.src) );
+      goto done;
+
+   case Ain_SseSDSS:
+      /* cvtsd2ss/cvtss2sd %src, %dst */
+      *p++ = toUChar(i->Ain.SseSDSS.from64 ? 0xF2 : 0xF3);
+      *p++ = clearWBit(
+              rexAMode_R_enc_enc( vregEnc3210(i->Ain.SseSDSS.dst),
+                                  vregEnc3210(i->Ain.SseSDSS.src) ));
+      *p++ = 0x0F;
+      *p++ = 0x5A;
+      p = doAMode_R_enc_enc( p, vregEnc3210(i->Ain.SseSDSS.dst),
+                                vregEnc3210(i->Ain.SseSDSS.src) );
+      goto done;
+
+   case Ain_SseLdSt:
+      if (i->Ain.SseLdSt.sz == 8) {
+         *p++ = 0xF2;
+      } else
+      if (i->Ain.SseLdSt.sz == 4) {
+         *p++ = 0xF3;
+      } else
+      if (i->Ain.SseLdSt.sz != 16) {
+         vassert(0);
+      }
+      *p++ = clearWBit(
+             rexAMode_M_enc(vregEnc3210(i->Ain.SseLdSt.reg),
+                            i->Ain.SseLdSt.addr));
+      *p++ = 0x0F; 
+      *p++ = toUChar(i->Ain.SseLdSt.isLoad ? 0x10 : 0x11);
+      p = doAMode_M_enc(p, vregEnc3210(i->Ain.SseLdSt.reg),
+                           i->Ain.SseLdSt.addr);
+      goto done;
+
+   case Ain_SseLdzLO:
+      vassert(i->Ain.SseLdzLO.sz == 4 || i->Ain.SseLdzLO.sz == 8);
+      /* movs[sd] amode, %xmm-dst */
+      *p++ = toUChar(i->Ain.SseLdzLO.sz==4 ? 0xF3 : 0xF2);
+      *p++ = clearWBit(
+             rexAMode_M_enc(vregEnc3210(i->Ain.SseLdzLO.reg), 
+                            i->Ain.SseLdzLO.addr));
+      *p++ = 0x0F; 
+      *p++ = 0x10; 
+      p = doAMode_M_enc(p, vregEnc3210(i->Ain.SseLdzLO.reg), 
+                           i->Ain.SseLdzLO.addr);
+      goto done;
+
+   case Ain_Sse32Fx4:
+      xtra = 0;
+      *p++ = clearWBit(
+             rexAMode_R_enc_enc( vregEnc3210(i->Ain.Sse32Fx4.dst),
+                                 vregEnc3210(i->Ain.Sse32Fx4.src) ));
+      *p++ = 0x0F;
+      switch (i->Ain.Sse32Fx4.op) {
+         case Asse_ADDF:   *p++ = 0x58; break;
+         case Asse_DIVF:   *p++ = 0x5E; break;
+         case Asse_MAXF:   *p++ = 0x5F; break;
+         case Asse_MINF:   *p++ = 0x5D; break;
+         case Asse_MULF:   *p++ = 0x59; break;
+         case Asse_RCPF:   *p++ = 0x53; break;
+         case Asse_RSQRTF: *p++ = 0x52; break;
+         case Asse_SQRTF:  *p++ = 0x51; break;
+         case Asse_SUBF:   *p++ = 0x5C; break;
+         case Asse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Asse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Asse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Asse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.Sse32Fx4.dst),
+                               vregEnc3210(i->Ain.Sse32Fx4.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Ain_Sse64Fx2:
+      xtra = 0;
+      *p++ = 0x66;
+      *p++ = clearWBit(
+             rexAMode_R_enc_enc( vregEnc3210(i->Ain.Sse64Fx2.dst),
+                                 vregEnc3210(i->Ain.Sse64Fx2.src) ));
+      *p++ = 0x0F;
+      switch (i->Ain.Sse64Fx2.op) {
+         case Asse_ADDF:   *p++ = 0x58; break;
+         case Asse_DIVF:   *p++ = 0x5E; break;
+         case Asse_MAXF:   *p++ = 0x5F; break;
+         case Asse_MINF:   *p++ = 0x5D; break;
+         case Asse_MULF:   *p++ = 0x59; break;
+         case Asse_SQRTF:  *p++ = 0x51; break;
+         case Asse_SUBF:   *p++ = 0x5C; break;
+         case Asse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Asse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Asse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Asse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.Sse64Fx2.dst),
+                               vregEnc3210(i->Ain.Sse64Fx2.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Ain_Sse32FLo:
+      xtra = 0;
+      *p++ = 0xF3;
+      *p++ = clearWBit(
+             rexAMode_R_enc_enc( vregEnc3210(i->Ain.Sse32FLo.dst),
+                                 vregEnc3210(i->Ain.Sse32FLo.src) ));
+      *p++ = 0x0F;
+      switch (i->Ain.Sse32FLo.op) {
+         case Asse_ADDF:   *p++ = 0x58; break;
+         case Asse_DIVF:   *p++ = 0x5E; break;
+         case Asse_MAXF:   *p++ = 0x5F; break;
+         case Asse_MINF:   *p++ = 0x5D; break;
+         case Asse_MULF:   *p++ = 0x59; break;
+         case Asse_RCPF:   *p++ = 0x53; break;
+         case Asse_RSQRTF: *p++ = 0x52; break;
+         case Asse_SQRTF:  *p++ = 0x51; break;
+         case Asse_SUBF:   *p++ = 0x5C; break;
+         case Asse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Asse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Asse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Asse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.Sse32FLo.dst),
+                               vregEnc3210(i->Ain.Sse32FLo.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Ain_Sse64FLo:
+      xtra = 0;
+      *p++ = 0xF2;
+      *p++ = clearWBit(
+             rexAMode_R_enc_enc( vregEnc3210(i->Ain.Sse64FLo.dst),
+                                 vregEnc3210(i->Ain.Sse64FLo.src) ));
+      *p++ = 0x0F;
+      switch (i->Ain.Sse64FLo.op) {
+         case Asse_ADDF:   *p++ = 0x58; break;
+         case Asse_DIVF:   *p++ = 0x5E; break;
+         case Asse_MAXF:   *p++ = 0x5F; break;
+         case Asse_MINF:   *p++ = 0x5D; break;
+         case Asse_MULF:   *p++ = 0x59; break;
+         case Asse_SQRTF:  *p++ = 0x51; break;
+         case Asse_SUBF:   *p++ = 0x5C; break;
+         case Asse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Asse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Asse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Asse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.Sse64FLo.dst),
+                               vregEnc3210(i->Ain.Sse64FLo.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Ain_SseReRg:
+#     define XX(_n) *p++ = (_n)
+
+      rex = clearWBit(
+            rexAMode_R_enc_enc( vregEnc3210(i->Ain.SseReRg.dst),
+                                vregEnc3210(i->Ain.SseReRg.src) ));
+
+      switch (i->Ain.SseReRg.op) {
+         case Asse_MOV:     /*movups*/ XX(rex); XX(0x0F); XX(0x10); break;
+         case Asse_OR:                 XX(rex); XX(0x0F); XX(0x56); break;
+         case Asse_XOR:                XX(rex); XX(0x0F); XX(0x57); break;
+         case Asse_AND:                XX(rex); XX(0x0F); XX(0x54); break;
+         case Asse_ANDN:               XX(rex); XX(0x0F); XX(0x55); break;
+         case Asse_PACKSSD:  XX(0x66); XX(rex); XX(0x0F); XX(0x6B); break;
+         case Asse_PACKSSW:  XX(0x66); XX(rex); XX(0x0F); XX(0x63); break;
+         case Asse_PACKUSW:  XX(0x66); XX(rex); XX(0x0F); XX(0x67); break;
+         case Asse_ADD8:     XX(0x66); XX(rex); XX(0x0F); XX(0xFC); break;
+         case Asse_ADD16:    XX(0x66); XX(rex); XX(0x0F); XX(0xFD); break;
+         case Asse_ADD32:    XX(0x66); XX(rex); XX(0x0F); XX(0xFE); break;
+         case Asse_ADD64:    XX(0x66); XX(rex); XX(0x0F); XX(0xD4); break;
+         case Asse_QADD8S:   XX(0x66); XX(rex); XX(0x0F); XX(0xEC); break;
+         case Asse_QADD16S:  XX(0x66); XX(rex); XX(0x0F); XX(0xED); break;
+         case Asse_QADD8U:   XX(0x66); XX(rex); XX(0x0F); XX(0xDC); break;
+         case Asse_QADD16U:  XX(0x66); XX(rex); XX(0x0F); XX(0xDD); break;
+         case Asse_AVG8U:    XX(0x66); XX(rex); XX(0x0F); XX(0xE0); break;
+         case Asse_AVG16U:   XX(0x66); XX(rex); XX(0x0F); XX(0xE3); break;
+         case Asse_CMPEQ8:   XX(0x66); XX(rex); XX(0x0F); XX(0x74); break;
+         case Asse_CMPEQ16:  XX(0x66); XX(rex); XX(0x0F); XX(0x75); break;
+         case Asse_CMPEQ32:  XX(0x66); XX(rex); XX(0x0F); XX(0x76); break;
+         case Asse_CMPGT8S:  XX(0x66); XX(rex); XX(0x0F); XX(0x64); break;
+         case Asse_CMPGT16S: XX(0x66); XX(rex); XX(0x0F); XX(0x65); break;
+         case Asse_CMPGT32S: XX(0x66); XX(rex); XX(0x0F); XX(0x66); break;
+         case Asse_MAX16S:   XX(0x66); XX(rex); XX(0x0F); XX(0xEE); break;
+         case Asse_MAX8U:    XX(0x66); XX(rex); XX(0x0F); XX(0xDE); break;
+         case Asse_MIN16S:   XX(0x66); XX(rex); XX(0x0F); XX(0xEA); break;
+         case Asse_MIN8U:    XX(0x66); XX(rex); XX(0x0F); XX(0xDA); break;
+         case Asse_MULHI16U: XX(0x66); XX(rex); XX(0x0F); XX(0xE4); break;
+         case Asse_MULHI16S: XX(0x66); XX(rex); XX(0x0F); XX(0xE5); break;
+         case Asse_MUL16:    XX(0x66); XX(rex); XX(0x0F); XX(0xD5); break;
+         case Asse_SHL16:    XX(0x66); XX(rex); XX(0x0F); XX(0xF1); break;
+         case Asse_SHL32:    XX(0x66); XX(rex); XX(0x0F); XX(0xF2); break;
+         case Asse_SHL64:    XX(0x66); XX(rex); XX(0x0F); XX(0xF3); break;
+         case Asse_SAR16:    XX(0x66); XX(rex); XX(0x0F); XX(0xE1); break;
+         case Asse_SAR32:    XX(0x66); XX(rex); XX(0x0F); XX(0xE2); break;
+         case Asse_SHR16:    XX(0x66); XX(rex); XX(0x0F); XX(0xD1); break;
+         case Asse_SHR32:    XX(0x66); XX(rex); XX(0x0F); XX(0xD2); break;
+         case Asse_SHR64:    XX(0x66); XX(rex); XX(0x0F); XX(0xD3); break;
+         case Asse_SUB8:     XX(0x66); XX(rex); XX(0x0F); XX(0xF8); break;
+         case Asse_SUB16:    XX(0x66); XX(rex); XX(0x0F); XX(0xF9); break;
+         case Asse_SUB32:    XX(0x66); XX(rex); XX(0x0F); XX(0xFA); break;
+         case Asse_SUB64:    XX(0x66); XX(rex); XX(0x0F); XX(0xFB); break;
+         case Asse_QSUB8S:   XX(0x66); XX(rex); XX(0x0F); XX(0xE8); break;
+         case Asse_QSUB16S:  XX(0x66); XX(rex); XX(0x0F); XX(0xE9); break;
+         case Asse_QSUB8U:   XX(0x66); XX(rex); XX(0x0F); XX(0xD8); break;
+         case Asse_QSUB16U:  XX(0x66); XX(rex); XX(0x0F); XX(0xD9); break;
+         case Asse_UNPCKHB:  XX(0x66); XX(rex); XX(0x0F); XX(0x68); break;
+         case Asse_UNPCKHW:  XX(0x66); XX(rex); XX(0x0F); XX(0x69); break;
+         case Asse_UNPCKHD:  XX(0x66); XX(rex); XX(0x0F); XX(0x6A); break;
+         case Asse_UNPCKHQ:  XX(0x66); XX(rex); XX(0x0F); XX(0x6D); break;
+         case Asse_UNPCKLB:  XX(0x66); XX(rex); XX(0x0F); XX(0x60); break;
+         case Asse_UNPCKLW:  XX(0x66); XX(rex); XX(0x0F); XX(0x61); break;
+         case Asse_UNPCKLD:  XX(0x66); XX(rex); XX(0x0F); XX(0x62); break;
+         case Asse_UNPCKLQ:  XX(0x66); XX(rex); XX(0x0F); XX(0x6C); break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.SseReRg.dst),
+                               vregEnc3210(i->Ain.SseReRg.src) );
+#     undef XX
+      goto done;
+
+   case Ain_SseCMov:
+      /* jmp fwds if !condition */
+      *p++ = toUChar(0x70 + (i->Ain.SseCMov.cond ^ 1));
+      *p++ = 0; /* # of bytes in the next bit, which we don't know yet */
+      ptmp = p;
+
+      /* movaps %src, %dst */
+      *p++ = clearWBit(
+             rexAMode_R_enc_enc( vregEnc3210(i->Ain.SseCMov.dst),
+                                 vregEnc3210(i->Ain.SseCMov.src) ));
+      *p++ = 0x0F; 
+      *p++ = 0x28; 
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.SseCMov.dst),
+                               vregEnc3210(i->Ain.SseCMov.src) );
+
+      /* Fill in the jump offset. */
+      *(ptmp-1) = toUChar(p - ptmp);
+      goto done;
+
+   case Ain_SseShuf:
+      *p++ = 0x66; 
+      *p++ = clearWBit(
+             rexAMode_R_enc_enc( vregEnc3210(i->Ain.SseShuf.dst),
+                                 vregEnc3210(i->Ain.SseShuf.src) ));
+      *p++ = 0x0F; 
+      *p++ = 0x70; 
+      p = doAMode_R_enc_enc(p, vregEnc3210(i->Ain.SseShuf.dst),
+                               vregEnc3210(i->Ain.SseShuf.src) );
+      *p++ = (UChar)(i->Ain.SseShuf.order);
+      goto done;
+
+   //uu case Ain_AvxLdSt: {
+   //uu    UInt vex = vexAMode_M( dvreg2ireg(i->Ain.AvxLdSt.reg),
+   //uu                           i->Ain.AvxLdSt.addr );
+   //uu    p = emitVexPrefix(p, vex);
+   //uu    *p++ = toUChar(i->Ain.AvxLdSt.isLoad ? 0x10 : 0x11);
+   //uu    p = doAMode_M(p, dvreg2ireg(i->Ain.AvxLdSt.reg), i->Ain.AvxLdSt.addr);
+   //uu      goto done;
+   //uu }
+
+   case Ain_EvCheck: {
+      /* We generate:
+            (3 bytes)  decl 8(%rbp)    8 == offsetof(host_EvC_COUNTER)
+            (2 bytes)  jns  nofail     expected taken
+            (3 bytes)  jmp* 0(%rbp)    0 == offsetof(host_EvC_FAILADDR)
+            nofail:
+      */
+      /* This is heavily asserted re instruction lengths.  It needs to
+         be.  If we get given unexpected forms of .amCounter or
+         .amFailAddr -- basically, anything that's not of the form
+         uimm7(%rbp) -- they are likely to fail. */
+      /* Note also that after the decl we must be very careful not to
+         read the carry flag, else we get a partial flags stall.
+         js/jns avoids that, though. */
+      UChar* p0 = p;
+      /* ---  decl 8(%rbp) --- */
+      /* Need to compute the REX byte for the decl in order to prove
+         that we don't need it, since this is a 32-bit inc and all
+         registers involved in the amode are < r8.  "1" because
+         there's no register in this encoding; instead the register
+         field is used as a sub opcode.  The encoding for "decl r/m32"
+         is FF /1, hence the "1". */
+      rex = clearWBit(rexAMode_M_enc(1, i->Ain.EvCheck.amCounter));
+      if (rex != 0x40) goto bad; /* We don't expect to need the REX byte. */
+      *p++ = 0xFF;
+      p = doAMode_M_enc(p, 1, i->Ain.EvCheck.amCounter);
+      vassert(p - p0 == 3);
+      /* --- jns nofail --- */
+      *p++ = 0x79;
+      *p++ = 0x03; /* need to check this 0x03 after the next insn */
+      vassert(p - p0 == 5);
+      /* --- jmp* 0(%rbp) --- */
+      /* Once again, verify we don't need REX.  The encoding is FF /4.
+         We don't need REX.W since by default FF /4 in 64-bit mode
+         implies a 64 bit load. */
+      rex = clearWBit(rexAMode_M_enc(4, i->Ain.EvCheck.amFailAddr));
+      if (rex != 0x40) goto bad;
+      *p++ = 0xFF;
+      p = doAMode_M_enc(p, 4, i->Ain.EvCheck.amFailAddr);
+      vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
+      /* And crosscheck .. */
+      vassert(evCheckSzB_AMD64() == 8);
+      goto done;
+   }
+
+   case Ain_ProfInc: {
+      /* We generate   movabsq $0, %r11
+                       incq (%r11)
+         in the expectation that a later call to LibVEX_patchProfCtr
+         will be used to fill in the immediate field once the right
+         value is known.
+         49 BB 00 00 00 00 00 00 00 00
+         49 FF 03
+      */
+      *p++ = 0x49; *p++ = 0xBB;
+      *p++ = 0x00; *p++ = 0x00; *p++ = 0x00; *p++ = 0x00;
+      *p++ = 0x00; *p++ = 0x00; *p++ = 0x00; *p++ = 0x00;
+      *p++ = 0x49; *p++ = 0xFF; *p++ = 0x03;
+      /* Tell the caller .. */
+      vassert(!(*is_profInc));
+      *is_profInc = True;
+      goto done;
+   }
+
+   default: 
+      goto bad;
+   }
+
+  bad:
+   ppAMD64Instr(i, mode64);
+   vpanic("emit_AMD64Instr");
+   /*NOTREACHED*/
+   
+  done:
+   vassert(p - &buf[0] <= 32);
+   return p - &buf[0];
+}
+
+
+/* How big is an event check?  See case for Ain_EvCheck in
+   emit_AMD64Instr just above.  That crosschecks what this returns, so
+   we can tell if we're inconsistent. */
+Int evCheckSzB_AMD64 (void)
+{
+   return 8;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+                                   void* place_to_chain,
+                                   const void* disp_cp_chain_me_EXPECTED,
+                                   const void* place_to_jump_to )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is:
+        movabsq $disp_cp_chain_me_EXPECTED, %r11
+        call *%r11
+      viz
+        49 BB <8 bytes value == disp_cp_chain_me_EXPECTED>
+        41 FF D3
+   */
+   UChar* p = (UChar*)place_to_chain;
+   vassert(p[0] == 0x49);
+   vassert(p[1] == 0xBB);
+   vassert(*(Addr*)(&p[2]) == (Addr)disp_cp_chain_me_EXPECTED);
+   vassert(p[10] == 0x41);
+   vassert(p[11] == 0xFF);
+   vassert(p[12] == 0xD3);
+   /* And what we want to change it to is either:
+        (general case):
+          movabsq $place_to_jump_to, %r11
+          jmpq *%r11
+        viz
+          49 BB <8 bytes value == place_to_jump_to>
+          41 FF E3
+        So it's the same length (convenient, huh) and we don't
+        need to change all the bits.
+      ---OR---
+        in the case where the displacement falls within 32 bits
+          jmpq disp32   where disp32 is relative to the next insn
+          ud2; ud2; ud2; ud2
+        viz
+          E9 <4 bytes == disp32>
+          0F 0B 0F 0B 0F 0B 0F 0B 
+
+      In both cases the replacement has the same length as the original.
+      To remain sane & verifiable,
+      (1) limit the displacement for the short form to 
+          (say) +/- one billion, so as to avoid wraparound
+          off-by-ones
+      (2) even if the short form is applicable, once every (say)
+          1024 times use the long form anyway, so as to maintain
+          verifiability
+   */
+   /* This is the delta we need to put into a JMP d32 insn.  It's
+      relative to the start of the next insn, hence the -5.  */
+   Long delta   = (Long)((const UChar *)place_to_jump_to - (const UChar*)p) - 5;
+   Bool shortOK = delta >= -1000*1000*1000 && delta < 1000*1000*1000;
+
+   static UInt shortCTR = 0; /* DO NOT MAKE NON-STATIC */
+   if (shortOK) {
+      shortCTR++; // thread safety bleh
+      if (0 == (shortCTR & 0x3FF)) {
+         shortOK = False;
+         if (0)
+            vex_printf("QQQ chainXDirect_AMD64: shortCTR = %u, "
+                       "using long jmp\n", shortCTR);
+      }
+   }
+
+   /* And make the modifications. */
+   if (shortOK) {
+      p[0]  = 0xE9;
+      p[1]  = (delta >> 0) & 0xFF;
+      p[2]  = (delta >> 8) & 0xFF;
+      p[3]  = (delta >> 16) & 0xFF;
+      p[4]  = (delta >> 24) & 0xFF;
+      p[5]  = 0x0F; p[6]  = 0x0B;
+      p[7]  = 0x0F; p[8]  = 0x0B;
+      p[9]  = 0x0F; p[10] = 0x0B;
+      p[11] = 0x0F; p[12] = 0x0B;
+      /* sanity check on the delta -- top 32 are all 0 or all 1 */
+      delta >>= 32;
+      vassert(delta == 0LL || delta == -1LL);
+   } else {
+      /* Minimal modifications from the starting sequence. */   
+     *(Addr*)(&p[2]) = (Addr)place_to_jump_to;
+      p[12] = 0xE3;
+   }
+   VexInvalRange vir = { (HWord)place_to_chain, 13 };
+   return vir;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+                                     void* place_to_unchain,
+                                     const void* place_to_jump_to_EXPECTED,
+                                     const void* disp_cp_chain_me )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is either:
+        (general case)
+          movabsq $place_to_jump_to_EXPECTED, %r11
+          jmpq *%r11
+        viz
+          49 BB <8 bytes value == place_to_jump_to_EXPECTED>
+          41 FF E3
+      ---OR---
+        in the case where the displacement falls within 32 bits
+          jmpq d32
+          ud2; ud2; ud2; ud2
+        viz
+          E9 <4 bytes == disp32>
+          0F 0B 0F 0B 0F 0B 0F 0B
+   */
+   UChar* p     = (UChar*)place_to_unchain;
+   Bool   valid = False;
+   if (p[0] == 0x49 && p[1] == 0xBB
+       && *(Addr*)(&p[2]) == (Addr)place_to_jump_to_EXPECTED
+       && p[10] == 0x41 && p[11] == 0xFF && p[12] == 0xE3) {
+      /* it's the long form */
+      valid = True;
+   }
+   else
+   if (p[0] == 0xE9 
+       && p[5]  == 0x0F && p[6]  == 0x0B
+       && p[7]  == 0x0F && p[8]  == 0x0B
+       && p[9]  == 0x0F && p[10] == 0x0B
+       && p[11] == 0x0F && p[12] == 0x0B) {
+      /* It's the short form.  Check the offset is right. */
+      Int  s32 = *(Int*)(&p[1]);
+      Long s64 = (Long)s32;
+      if ((UChar*)p + 5 + s64 == place_to_jump_to_EXPECTED) {
+         valid = True;
+         if (0)
+            vex_printf("QQQ unchainXDirect_AMD64: found short form\n");
+      }
+   }
+   vassert(valid);
+   /* And what we want to change it to is:
+        movabsq $disp_cp_chain_me, %r11
+        call *%r11
+      viz
+        49 BB <8 bytes value == disp_cp_chain_me>
+        41 FF D3
+      So it's the same length (convenient, huh).
+   */
+   p[0] = 0x49;
+   p[1] = 0xBB;
+   *(Addr*)(&p[2]) = (Addr)disp_cp_chain_me;
+   p[10] = 0x41;
+   p[11] = 0xFF;
+   p[12] = 0xD3;
+   VexInvalRange vir = { (HWord)place_to_unchain, 13 };
+   return vir;
+}
+
+
+/* Patch the counter address into a profile inc point, as previously
+   created by the Ain_ProfInc case for emit_AMD64Instr. */
+VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+                                   void*  place_to_patch,
+                                   const ULong* location_of_counter )
+{
+   vassert(endness_host == VexEndnessLE);
+   vassert(sizeof(ULong*) == 8);
+   UChar* p = (UChar*)place_to_patch;
+   vassert(p[0] == 0x49);
+   vassert(p[1] == 0xBB);
+   vassert(p[2] == 0x00);
+   vassert(p[3] == 0x00);
+   vassert(p[4] == 0x00);
+   vassert(p[5] == 0x00);
+   vassert(p[6] == 0x00);
+   vassert(p[7] == 0x00);
+   vassert(p[8] == 0x00);
+   vassert(p[9] == 0x00);
+   vassert(p[10] == 0x49);
+   vassert(p[11] == 0xFF);
+   vassert(p[12] == 0x03);
+   ULong imm64 = (ULong)(Addr)location_of_counter;
+   p[2] = imm64 & 0xFF; imm64 >>= 8;
+   p[3] = imm64 & 0xFF; imm64 >>= 8;
+   p[4] = imm64 & 0xFF; imm64 >>= 8;
+   p[5] = imm64 & 0xFF; imm64 >>= 8;
+   p[6] = imm64 & 0xFF; imm64 >>= 8;
+   p[7] = imm64 & 0xFF; imm64 >>= 8;
+   p[8] = imm64 & 0xFF; imm64 >>= 8;
+   p[9] = imm64 & 0xFF; imm64 >>= 8;
+   VexInvalRange vir = { (HWord)place_to_patch, 13 };
+   return vir;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                   host_amd64_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_amd64_defs.h b/VEX/priv/host_amd64_defs.h
new file mode 100644
index 0000000..f76cd83
--- /dev/null
+++ b/VEX/priv/host_amd64_defs.h
@@ -0,0 +1,831 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 host_amd64_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_HOST_AMD64_DEFS_H
+#define __VEX_HOST_AMD64_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h"                      // VexArch
+#include "host_generic_regs.h"           // HReg
+
+/* --------- Registers. --------- */
+
+/* The usual HReg abstraction.  There are 16 real int regs, 6 real
+   float regs, and 16 real vector regs.
+*/
+
+#define ST_IN static inline
+ST_IN HReg hregAMD64_RSI   ( void ) { return mkHReg(False, HRcInt64,   6,  0); }
+ST_IN HReg hregAMD64_RDI   ( void ) { return mkHReg(False, HRcInt64,   7,  1); }
+ST_IN HReg hregAMD64_R8    ( void ) { return mkHReg(False, HRcInt64,   8,  2); }
+ST_IN HReg hregAMD64_R9    ( void ) { return mkHReg(False, HRcInt64,   9,  3); }
+ST_IN HReg hregAMD64_R12   ( void ) { return mkHReg(False, HRcInt64,  12,  4); }
+ST_IN HReg hregAMD64_R13   ( void ) { return mkHReg(False, HRcInt64,  13,  5); }
+ST_IN HReg hregAMD64_R14   ( void ) { return mkHReg(False, HRcInt64,  14,  6); }
+ST_IN HReg hregAMD64_R15   ( void ) { return mkHReg(False, HRcInt64,  15,  7); }
+ST_IN HReg hregAMD64_RBX   ( void ) { return mkHReg(False, HRcInt64,   3,  8); }
+
+ST_IN HReg hregAMD64_XMM3  ( void ) { return mkHReg(False, HRcVec128,  3,  9); }
+ST_IN HReg hregAMD64_XMM4  ( void ) { return mkHReg(False, HRcVec128,  4, 10); }
+ST_IN HReg hregAMD64_XMM5  ( void ) { return mkHReg(False, HRcVec128,  5, 11); }
+ST_IN HReg hregAMD64_XMM6  ( void ) { return mkHReg(False, HRcVec128,  6, 12); }
+ST_IN HReg hregAMD64_XMM7  ( void ) { return mkHReg(False, HRcVec128,  7, 13); }
+ST_IN HReg hregAMD64_XMM8  ( void ) { return mkHReg(False, HRcVec128,  8, 14); }
+ST_IN HReg hregAMD64_XMM9  ( void ) { return mkHReg(False, HRcVec128,  9, 15); }
+ST_IN HReg hregAMD64_XMM10 ( void ) { return mkHReg(False, HRcVec128, 10, 16); }
+ST_IN HReg hregAMD64_XMM11 ( void ) { return mkHReg(False, HRcVec128, 11, 17); }
+ST_IN HReg hregAMD64_XMM12 ( void ) { return mkHReg(False, HRcVec128, 12, 18); }
+
+ST_IN HReg hregAMD64_R10   ( void ) { return mkHReg(False, HRcInt64,  10, 19); }
+
+ST_IN HReg hregAMD64_RAX   ( void ) { return mkHReg(False, HRcInt64,   0, 20); }
+ST_IN HReg hregAMD64_RCX   ( void ) { return mkHReg(False, HRcInt64,   1, 21); }
+ST_IN HReg hregAMD64_RDX   ( void ) { return mkHReg(False, HRcInt64,   2, 22); }
+ST_IN HReg hregAMD64_RSP   ( void ) { return mkHReg(False, HRcInt64,   4, 23); }
+ST_IN HReg hregAMD64_RBP   ( void ) { return mkHReg(False, HRcInt64,   5, 24); }
+ST_IN HReg hregAMD64_R11   ( void ) { return mkHReg(False, HRcInt64,  11, 25); }
+
+ST_IN HReg hregAMD64_XMM0  ( void ) { return mkHReg(False, HRcVec128,  0, 26); }
+ST_IN HReg hregAMD64_XMM1  ( void ) { return mkHReg(False, HRcVec128,  1, 27); }
+#undef ST_IN
+
+extern void ppHRegAMD64 ( HReg );
+
+
+/* --------- Condition codes, AMD encoding. --------- */
+
+typedef
+   enum {
+      Acc_O      = 0,  /* overflow           */
+      Acc_NO     = 1,  /* no overflow        */
+
+      Acc_B      = 2,  /* below              */
+      Acc_NB     = 3,  /* not below          */
+
+      Acc_Z      = 4,  /* zero               */
+      Acc_NZ     = 5,  /* not zero           */
+
+      Acc_BE     = 6,  /* below or equal     */
+      Acc_NBE    = 7,  /* not below or equal */
+
+      Acc_S      = 8,  /* negative           */
+      Acc_NS     = 9,  /* not negative       */
+
+      Acc_P      = 10, /* parity even        */
+      Acc_NP     = 11, /* not parity even    */
+
+      Acc_L      = 12, /* jump less          */
+      Acc_NL     = 13, /* not less           */
+
+      Acc_LE     = 14, /* less or equal      */
+      Acc_NLE    = 15, /* not less or equal  */
+
+      Acc_ALWAYS = 16  /* the usual hack     */
+   }
+   AMD64CondCode;
+
+extern const HChar* showAMD64CondCode ( AMD64CondCode );
+
+
+/* --------- Memory address expressions (amodes). --------- */
+
+typedef
+   enum {
+     Aam_IR,        /* Immediate + Reg */
+     Aam_IRRS       /* Immediate + Reg1 + (Reg2 << Shift) */
+   }
+   AMD64AModeTag;
+
+typedef
+   struct {
+      AMD64AModeTag tag;
+      union {
+         struct {
+            UInt imm;
+            HReg reg;
+         } IR;
+         struct {
+            UInt imm;
+            HReg base;
+            HReg index;
+            Int  shift; /* 0, 1, 2 or 3 only */
+         } IRRS;
+      } Aam;
+   }
+   AMD64AMode;
+
+extern AMD64AMode* AMD64AMode_IR   ( UInt, HReg );
+extern AMD64AMode* AMD64AMode_IRRS ( UInt, HReg, HReg, Int );
+
+extern AMD64AMode* dopyAMD64AMode ( AMD64AMode* );
+
+extern void ppAMD64AMode ( AMD64AMode* );
+
+
+/* --------- Operand, which can be reg, immediate or memory. --------- */
+
+typedef 
+   enum {
+      Armi_Imm,
+      Armi_Reg,
+      Armi_Mem
+   }
+   AMD64RMITag;
+
+typedef
+   struct {
+      AMD64RMITag tag;
+      union {
+         struct {
+            UInt imm32;
+         } Imm;
+         struct {
+            HReg reg;
+         } Reg;
+         struct {
+            AMD64AMode* am;
+         } Mem;
+      }
+      Armi;
+   }
+   AMD64RMI;
+
+extern AMD64RMI* AMD64RMI_Imm ( UInt );
+extern AMD64RMI* AMD64RMI_Reg ( HReg );
+extern AMD64RMI* AMD64RMI_Mem ( AMD64AMode* );
+
+extern void ppAMD64RMI      ( AMD64RMI* );
+extern void ppAMD64RMI_lo32 ( AMD64RMI* );
+
+
+/* --------- Operand, which can be reg or immediate only. --------- */
+
+typedef 
+   enum {
+      Ari_Imm,
+      Ari_Reg
+   }
+   AMD64RITag;
+
+typedef
+   struct {
+      AMD64RITag tag;
+      union {
+         struct {
+            UInt imm32;
+         } Imm;
+         struct {
+            HReg reg;
+         } Reg;
+      }
+      Ari;
+   }
+   AMD64RI;
+
+extern AMD64RI* AMD64RI_Imm ( UInt );
+extern AMD64RI* AMD64RI_Reg ( HReg );
+
+extern void ppAMD64RI ( AMD64RI* );
+
+
+/* --------- Operand, which can be reg or memory only. --------- */
+
+typedef 
+   enum {
+      Arm_Reg,
+      Arm_Mem
+   }
+   AMD64RMTag;
+
+typedef
+   struct {
+      AMD64RMTag tag;
+      union {
+         struct {
+            HReg reg;
+         } Reg;
+         struct {
+            AMD64AMode* am;
+         } Mem;
+      }
+      Arm;
+   }
+   AMD64RM;
+
+extern AMD64RM* AMD64RM_Reg ( HReg );
+extern AMD64RM* AMD64RM_Mem ( AMD64AMode* );
+
+extern void ppAMD64RM ( AMD64RM* );
+
+
+/* --------- Instructions. --------- */
+
+/* --------- */
+typedef
+   enum {
+      Aun_NEG,
+      Aun_NOT
+   }
+   AMD64UnaryOp;
+
+extern const HChar* showAMD64UnaryOp ( AMD64UnaryOp );
+
+
+/* --------- */
+typedef 
+   enum {
+      Aalu_INVALID,
+      Aalu_MOV,
+      Aalu_CMP,
+      Aalu_ADD, Aalu_SUB, Aalu_ADC, Aalu_SBB, 
+      Aalu_AND, Aalu_OR, Aalu_XOR,
+      Aalu_MUL
+   }
+   AMD64AluOp;
+
+extern const HChar* showAMD64AluOp ( AMD64AluOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Ash_INVALID,
+      Ash_SHL, Ash_SHR, Ash_SAR
+   }
+   AMD64ShiftOp;
+
+extern const HChar* showAMD64ShiftOp ( AMD64ShiftOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Afp_INVALID,
+      /* Binary */
+      Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, Afp_PREM, Afp_PREM1,
+      /* Unary */
+      Afp_SQRT,
+      Afp_SIN, Afp_COS, Afp_TAN,
+      Afp_ROUND, Afp_2XM1
+   }
+   A87FpOp;
+
+extern const HChar* showA87FpOp ( A87FpOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Asse_INVALID,
+      /* mov */
+      Asse_MOV,
+      /* Floating point binary */
+      Asse_ADDF, Asse_SUBF, Asse_MULF, Asse_DIVF,
+      Asse_MAXF, Asse_MINF,
+      Asse_CMPEQF, Asse_CMPLTF, Asse_CMPLEF, Asse_CMPUNF,
+      /* Floating point unary */
+      Asse_RCPF, Asse_RSQRTF, Asse_SQRTF, 
+      /* Bitwise */
+      Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
+      Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
+      Asse_QADD8U, Asse_QADD16U,
+      Asse_QADD8S, Asse_QADD16S,
+      Asse_SUB8, Asse_SUB16, Asse_SUB32, Asse_SUB64,
+      Asse_QSUB8U, Asse_QSUB16U,
+      Asse_QSUB8S, Asse_QSUB16S,
+      Asse_MUL16,
+      Asse_MULHI16U,
+      Asse_MULHI16S,
+      Asse_AVG8U, Asse_AVG16U,
+      Asse_MAX16S,
+      Asse_MAX8U,
+      Asse_MIN16S,
+      Asse_MIN8U,
+      Asse_CMPEQ8, Asse_CMPEQ16, Asse_CMPEQ32,
+      Asse_CMPGT8S, Asse_CMPGT16S, Asse_CMPGT32S,
+      Asse_SHL16, Asse_SHL32, Asse_SHL64,
+      Asse_SHR16, Asse_SHR32, Asse_SHR64,
+      Asse_SAR16, Asse_SAR32, 
+      Asse_PACKSSD, Asse_PACKSSW, Asse_PACKUSW,
+      Asse_UNPCKHB, Asse_UNPCKHW, Asse_UNPCKHD, Asse_UNPCKHQ,
+      Asse_UNPCKLB, Asse_UNPCKLW, Asse_UNPCKLD, Asse_UNPCKLQ
+   }
+   AMD64SseOp;
+
+extern const HChar* showAMD64SseOp ( AMD64SseOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Ain_Imm64,       /* Generate 64-bit literal to register */
+      Ain_Alu64R,      /* 64-bit mov/arith/logical, dst=REG */
+      Ain_Alu64M,      /* 64-bit mov/arith/logical, dst=MEM */
+      Ain_Sh64,        /* 64-bit shift/rotate, dst=REG or MEM */
+      Ain_Test64,      /* 64-bit test (AND, set flags, discard result) */
+      Ain_Unary64,     /* 64-bit not and neg */
+      Ain_Lea64,       /* 64-bit compute EA into a reg */
+      Ain_Alu32R,      /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
+      Ain_MulL,        /* widening multiply */
+      Ain_Div,         /* div and mod */
+      Ain_Push,        /* push 64-bit value on stack */
+      Ain_Call,        /* call to address in register */
+      Ain_XDirect,     /* direct transfer to GA */
+      Ain_XIndir,      /* indirect transfer to GA */
+      Ain_XAssisted,   /* assisted transfer to GA */
+      Ain_CMov64,      /* conditional move, 64-bit reg-reg only */
+      Ain_CLoad,       /* cond. load to int reg, 32 bit ZX or 64 bit only */
+      Ain_CStore,      /* cond. store from int reg, 32 or 64 bit only */
+      Ain_MovxLQ,      /* reg-reg move, zx-ing/sx-ing top half */
+      Ain_LoadEX,      /* mov{s,z}{b,w,l}q from mem to reg */
+      Ain_Store,       /* store 32/16/8 bit value in memory */
+      Ain_Set64,       /* convert condition code to 64-bit value */
+      Ain_Bsfr64,      /* 64-bit bsf/bsr */
+      Ain_MFence,      /* mem fence */
+      Ain_ACAS,        /* 8/16/32/64-bit lock;cmpxchg */
+      Ain_DACAS,       /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
+                          32-bit or 2 x 64-bit only) */
+      Ain_A87Free,     /* free up x87 registers */
+      Ain_A87PushPop,  /* x87 loads/stores */
+      Ain_A87FpOp,     /* x87 operations */
+      Ain_A87LdCW,     /* load x87 control word */
+      Ain_A87StSW,     /* store x87 status word */
+      Ain_LdMXCSR,     /* load %mxcsr */
+      Ain_SseUComIS,   /* ucomisd/ucomiss, then get %rflags into int
+                          register */
+      Ain_SseSI2SF,    /* scalar 32/64 int to 32/64 float conversion */
+      Ain_SseSF2SI,    /* scalar 32/64 float to 32/64 int conversion */
+      Ain_SseSDSS,     /* scalar float32 to/from float64 */
+      Ain_SseLdSt,     /* SSE load/store 32/64/128 bits, no alignment
+                          constraints, upper 96/64/0 bits arbitrary */
+      Ain_SseLdzLO,    /* SSE load low 32/64 bits, zero remainder of reg */
+      Ain_Sse32Fx4,    /* SSE binary, 32Fx4 */
+      Ain_Sse32FLo,    /* SSE binary, 32F in lowest lane only */
+      Ain_Sse64Fx2,    /* SSE binary, 64Fx2 */
+      Ain_Sse64FLo,    /* SSE binary, 64F in lowest lane only */
+      Ain_SseReRg,     /* SSE binary general reg-reg, Re, Rg */
+      Ain_SseCMov,     /* SSE conditional move */
+      Ain_SseShuf,     /* SSE2 shuffle (pshufd) */
+      //uu Ain_AvxLdSt,     /* AVX load/store 256 bits,
+      //uu                     no alignment constraints */
+      //uu Ain_AvxReRg,     /* AVX binary general reg-reg, Re, Rg */
+      Ain_EvCheck,     /* Event check */
+      Ain_ProfInc      /* 64-bit profile counter increment */
+   }
+   AMD64InstrTag;
+
+/* Destinations are on the RIGHT (second operand) */
+
+typedef
+   struct {
+      AMD64InstrTag tag;
+      union {
+         struct {
+            ULong imm64;
+            HReg  dst;
+         } Imm64;
+         struct {
+            AMD64AluOp op;
+            AMD64RMI*  src;
+            HReg       dst;
+         } Alu64R;
+         struct {
+            AMD64AluOp  op;
+            AMD64RI*    src;
+            AMD64AMode* dst;
+         } Alu64M;
+         struct {
+            AMD64ShiftOp op;
+            UInt         src;  /* shift amount, or 0 means %cl */
+            HReg         dst;
+         } Sh64;
+         struct {
+            UInt   imm32;
+            HReg   dst;
+         } Test64;
+         /* Not and Neg */
+         struct {
+            AMD64UnaryOp op;
+            HReg         dst;
+         } Unary64;
+         /* 64-bit compute EA into a reg */
+         struct {
+            AMD64AMode* am;
+            HReg        dst;
+         } Lea64;
+         /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
+         struct {
+            AMD64AluOp op;
+            AMD64RMI*  src;
+            HReg       dst;
+         } Alu32R;
+         /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
+            r/m64 */
+         struct {
+            Bool     syned;
+            AMD64RM* src;
+         } MulL;
+          /* amd64 div/idiv instruction.  Modifies RDX and RAX and
+	     reads src. */
+         struct {
+            Bool     syned;
+            Int      sz; /* 4 or 8 only */
+            AMD64RM* src;
+         } Div;
+         struct {
+            AMD64RMI* src;
+         } Push;
+         /* Pseudo-insn.  Call target (an absolute address), on given
+            condition (which could be Xcc_ALWAYS). */
+         struct {
+            AMD64CondCode cond;
+            Addr64        target;
+            Int           regparms; /* 0 .. 6 */
+            RetLoc        rloc;     /* where the return value will be */
+         } Call;
+         /* Update the guest RIP value, then exit requesting to chain
+            to it.  May be conditional. */
+         struct {
+            Addr64        dstGA;    /* next guest address */
+            AMD64AMode*   amRIP;    /* amode in guest state for RIP */
+            AMD64CondCode cond;     /* can be Acc_ALWAYS */
+            Bool          toFastEP; /* chain to the slow or fast point? */
+         } XDirect;
+         /* Boring transfer to a guest address not known at JIT time.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg          dstGA;
+            AMD64AMode*   amRIP;
+            AMD64CondCode cond; /* can be Acc_ALWAYS */
+         } XIndir;
+         /* Assisted transfer to a guest address, most general case.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg          dstGA;
+            AMD64AMode*   amRIP;
+            AMD64CondCode cond; /* can be Acc_ALWAYS */
+            IRJumpKind    jk;
+         } XAssisted;
+         /* Mov src to dst on the given condition, which may not
+            be the bogus Acc_ALWAYS. */
+         struct {
+            AMD64CondCode cond;
+            HReg          src;
+            HReg          dst;
+         } CMov64;
+         /* conditional load to int reg, 32 bit ZX or 64 bit only.
+            cond may not be Acc_ALWAYS. */
+         struct {
+            AMD64CondCode cond;
+            UChar         szB; /* 4 or 8 only */
+            AMD64AMode*   addr;
+            HReg          dst;
+         } CLoad;
+         /* cond. store from int reg, 32 or 64 bit only.
+            cond may not be Acc_ALWAYS. */
+         struct {
+            AMD64CondCode cond;
+            UChar         szB; /* 4 or 8 only */
+            HReg          src;
+            AMD64AMode*   addr;
+         } CStore;
+         /* reg-reg move, sx-ing/zx-ing top half */
+         struct {
+            Bool syned;
+            HReg src;
+            HReg dst;
+         } MovxLQ;
+         /* Sign/Zero extending loads.  Dst size is always 64 bits. */
+         struct {
+            UChar       szSmall; /* only 1, 2 or 4 */
+            Bool        syned;
+            AMD64AMode* src;
+            HReg        dst;
+         } LoadEX;
+         /* 32/16/8 bit stores. */
+         struct {
+            UChar       sz; /* only 1, 2 or 4 */
+            HReg        src;
+            AMD64AMode* dst;
+         } Store;
+         /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
+         struct {
+            AMD64CondCode cond;
+            HReg          dst;
+         } Set64;
+         /* 64-bit bsf or bsr. */
+         struct {
+            Bool isFwds;
+            HReg src;
+            HReg dst;
+         } Bsfr64;
+         /* Mem fence.  In short, an insn which flushes all preceding
+            loads and stores as much as possible before continuing.
+            On AMD64 we emit a real "mfence". */
+         struct {
+         } MFence;
+         struct {
+            AMD64AMode* addr;
+            UChar       sz; /* 1, 2, 4 or 8 */
+         } ACAS;
+         struct {
+            AMD64AMode* addr;
+            UChar       sz; /* 4 or 8 only */
+         } DACAS;
+
+         /* --- X87 --- */
+
+         /* A very minimal set of x87 insns, that operate exactly in a
+            stack-like way so no need to think about x87 registers. */
+
+         /* Do 'ffree' on %st(7) .. %st(7-nregs) */
+         struct {
+            Int nregs; /* 1 <= nregs <= 7 */
+         } A87Free;
+
+         /* Push a 32- or 64-bit FP value from memory onto the stack,
+            or move a value from the stack to memory and remove it
+            from the stack. */
+         struct {
+            AMD64AMode* addr;
+            Bool        isPush;
+            UChar       szB; /* 4 or 8 */
+         } A87PushPop;
+
+         /* Do an operation on the top-of-stack.  This can be unary, in
+            which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
+            %st0, %st1 ). */
+         struct {
+            A87FpOp op;
+         } A87FpOp;
+
+         /* Load the FPU control word. */
+         struct {
+            AMD64AMode* addr;
+         } A87LdCW;
+
+         /* Store the FPU status word (fstsw m16) */
+         struct {
+            AMD64AMode* addr;
+         } A87StSW;
+
+         /* --- SSE --- */
+
+         /* Load 32 bits into %mxcsr. */
+         struct {
+            AMD64AMode* addr;
+         }
+         LdMXCSR;
+         /* ucomisd/ucomiss, then get %rflags into int register */
+         struct {
+            UChar   sz;   /* 4 or 8 only */
+            HReg    srcL; /* xmm */
+            HReg    srcR; /* xmm */
+            HReg    dst;  /* int */
+         } SseUComIS;
+         /* scalar 32/64 int to 32/64 float conversion */
+         struct {
+            UChar szS; /* 4 or 8 */
+            UChar szD; /* 4 or 8 */
+            HReg  src; /* i class */
+            HReg  dst; /* v class */
+         } SseSI2SF;
+         /* scalar 32/64 float to 32/64 int conversion */
+         struct {
+            UChar szS; /* 4 or 8 */
+            UChar szD; /* 4 or 8 */
+            HReg  src; /* v class */
+            HReg  dst; /* i class */
+         } SseSF2SI;
+         /* scalar float32 to/from float64 */
+         struct {
+            Bool from64; /* True: 64->32; False: 32->64 */
+            HReg src;
+            HReg dst;
+         } SseSDSS;
+         struct {
+            Bool        isLoad;
+            UChar       sz; /* 4, 8 or 16 only */
+            HReg        reg;
+            AMD64AMode* addr;
+         } SseLdSt;
+         struct {
+            Int         sz; /* 4 or 8 only */
+            HReg        reg;
+            AMD64AMode* addr;
+         } SseLdzLO;
+         struct {
+            AMD64SseOp op;
+            HReg       src;
+            HReg       dst;
+         } Sse32Fx4;
+         struct {
+            AMD64SseOp op;
+            HReg       src;
+            HReg       dst;
+         } Sse32FLo;
+         struct {
+            AMD64SseOp op;
+            HReg       src;
+            HReg       dst;
+         } Sse64Fx2;
+         struct {
+            AMD64SseOp op;
+            HReg       src;
+            HReg       dst;
+         } Sse64FLo;
+         struct {
+            AMD64SseOp op;
+            HReg       src;
+            HReg       dst;
+         } SseReRg;
+         /* Mov src to dst on the given condition, which may not
+            be the bogus Xcc_ALWAYS. */
+         struct {
+            AMD64CondCode cond;
+            HReg          src;
+            HReg          dst;
+         } SseCMov;
+         struct {
+            Int    order; /* 0 <= order <= 0xFF */
+            HReg   src;
+            HReg   dst;
+         } SseShuf;
+         //uu struct {
+         //uu    Bool        isLoad;
+         //uu    HReg        reg;
+         //uu    AMD64AMode* addr;
+         //uu } AvxLdSt;
+         //uu struct {
+         //uu    AMD64SseOp op;
+         //uu    HReg       src;
+         //uu    HReg       dst;
+         //uu } AvxReRg;
+         struct {
+            AMD64AMode* amCounter;
+            AMD64AMode* amFailAddr;
+         } EvCheck;
+         struct {
+            /* No fields.  The address of the counter to inc is
+               installed later, post-translation, by patching it in,
+               as it is not known at translation time. */
+         } ProfInc;
+
+      } Ain;
+   }
+   AMD64Instr;
+
+extern AMD64Instr* AMD64Instr_Imm64      ( ULong imm64, HReg dst );
+extern AMD64Instr* AMD64Instr_Alu64R     ( AMD64AluOp, AMD64RMI*, HReg );
+extern AMD64Instr* AMD64Instr_Alu64M     ( AMD64AluOp, AMD64RI*,  AMD64AMode* );
+extern AMD64Instr* AMD64Instr_Unary64    ( AMD64UnaryOp op, HReg dst );
+extern AMD64Instr* AMD64Instr_Lea64      ( AMD64AMode* am, HReg dst );
+extern AMD64Instr* AMD64Instr_Alu32R     ( AMD64AluOp, AMD64RMI*, HReg );
+extern AMD64Instr* AMD64Instr_Sh64       ( AMD64ShiftOp, UInt, HReg );
+extern AMD64Instr* AMD64Instr_Test64     ( UInt imm32, HReg dst );
+extern AMD64Instr* AMD64Instr_MulL       ( Bool syned, AMD64RM* );
+extern AMD64Instr* AMD64Instr_Div        ( Bool syned, Int sz, AMD64RM* );
+extern AMD64Instr* AMD64Instr_Push       ( AMD64RMI* );
+extern AMD64Instr* AMD64Instr_Call       ( AMD64CondCode, Addr64, Int, RetLoc );
+extern AMD64Instr* AMD64Instr_XDirect    ( Addr64 dstGA, AMD64AMode* amRIP,
+                                           AMD64CondCode cond, Bool toFastEP );
+extern AMD64Instr* AMD64Instr_XIndir     ( HReg dstGA, AMD64AMode* amRIP,
+                                           AMD64CondCode cond );
+extern AMD64Instr* AMD64Instr_XAssisted  ( HReg dstGA, AMD64AMode* amRIP,
+                                           AMD64CondCode cond, IRJumpKind jk );
+extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, HReg src, HReg dst );
+extern AMD64Instr* AMD64Instr_CLoad      ( AMD64CondCode cond, UChar szB,
+                                           AMD64AMode* addr, HReg dst );
+extern AMD64Instr* AMD64Instr_CStore     ( AMD64CondCode cond, UChar szB,
+                                           HReg src, AMD64AMode* addr );
+extern AMD64Instr* AMD64Instr_MovxLQ     ( Bool syned, HReg src, HReg dst );
+extern AMD64Instr* AMD64Instr_LoadEX     ( UChar szSmall, Bool syned,
+                                           AMD64AMode* src, HReg dst );
+extern AMD64Instr* AMD64Instr_Store      ( UChar sz, HReg src, AMD64AMode* dst );
+extern AMD64Instr* AMD64Instr_Set64      ( AMD64CondCode cond, HReg dst );
+extern AMD64Instr* AMD64Instr_Bsfr64     ( Bool isFwds, HReg src, HReg dst );
+extern AMD64Instr* AMD64Instr_MFence     ( void );
+extern AMD64Instr* AMD64Instr_ACAS       ( AMD64AMode* addr, UChar sz );
+extern AMD64Instr* AMD64Instr_DACAS      ( AMD64AMode* addr, UChar sz );
+
+extern AMD64Instr* AMD64Instr_A87Free    ( Int nregs );
+extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB );
+extern AMD64Instr* AMD64Instr_A87FpOp    ( A87FpOp op );
+extern AMD64Instr* AMD64Instr_A87LdCW    ( AMD64AMode* addr );
+extern AMD64Instr* AMD64Instr_A87StSW    ( AMD64AMode* addr );
+extern AMD64Instr* AMD64Instr_LdMXCSR    ( AMD64AMode* );
+extern AMD64Instr* AMD64Instr_SseUComIS  ( Int sz, HReg srcL, HReg srcR, HReg dst );
+extern AMD64Instr* AMD64Instr_SseSI2SF   ( Int szS, Int szD, HReg src, HReg dst );
+extern AMD64Instr* AMD64Instr_SseSF2SI   ( Int szS, Int szD, HReg src, HReg dst );
+extern AMD64Instr* AMD64Instr_SseSDSS    ( Bool from64, HReg src, HReg dst );
+extern AMD64Instr* AMD64Instr_SseLdSt    ( Bool isLoad, Int sz, HReg, AMD64AMode* );
+extern AMD64Instr* AMD64Instr_SseLdzLO   ( Int sz, HReg, AMD64AMode* );
+extern AMD64Instr* AMD64Instr_Sse32Fx4   ( AMD64SseOp, HReg, HReg );
+extern AMD64Instr* AMD64Instr_Sse32FLo   ( AMD64SseOp, HReg, HReg );
+extern AMD64Instr* AMD64Instr_Sse64Fx2   ( AMD64SseOp, HReg, HReg );
+extern AMD64Instr* AMD64Instr_Sse64FLo   ( AMD64SseOp, HReg, HReg );
+extern AMD64Instr* AMD64Instr_SseReRg    ( AMD64SseOp, HReg, HReg );
+extern AMD64Instr* AMD64Instr_SseCMov    ( AMD64CondCode, HReg src, HReg dst );
+extern AMD64Instr* AMD64Instr_SseShuf    ( Int order, HReg src, HReg dst );
+//uu extern AMD64Instr* AMD64Instr_AvxLdSt    ( Bool isLoad, HReg, AMD64AMode* );
+//uu extern AMD64Instr* AMD64Instr_AvxReRg    ( AMD64SseOp, HReg, HReg );
+extern AMD64Instr* AMD64Instr_EvCheck    ( AMD64AMode* amCounter,
+                                           AMD64AMode* amFailAddr );
+extern AMD64Instr* AMD64Instr_ProfInc    ( void );
+
+
+extern void ppAMD64Instr ( const AMD64Instr*, Bool );
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+extern void getRegUsage_AMD64Instr ( HRegUsage*, const AMD64Instr*, Bool );
+extern void mapRegs_AMD64Instr     ( HRegRemap*, AMD64Instr*, Bool );
+extern Bool isMove_AMD64Instr      ( const AMD64Instr*, HReg*, HReg* );
+extern Int          emit_AMD64Instr   ( /*MB_MOD*/Bool* is_profInc,
+                                        UChar* buf, Int nbuf,
+                                        const AMD64Instr* i, 
+                                        Bool mode64,
+                                        VexEndness endness_host,
+                                        const void* disp_cp_chain_me_to_slowEP,
+                                        const void* disp_cp_chain_me_to_fastEP,
+                                        const void* disp_cp_xindir,
+                                        const void* disp_cp_xassisted );
+
+extern void genSpill_AMD64  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                              HReg rreg, Int offset, Bool );
+extern void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                              HReg rreg, Int offset, Bool );
+
+extern const RRegUniverse* getRRegUniverse_AMD64 ( void );
+
+extern HInstrArray* iselSB_AMD64           ( const IRSB*, 
+                                             VexArch,
+                                             const VexArchInfo*,
+                                             const VexAbiInfo*,
+                                             Int offs_Host_EvC_Counter,
+                                             Int offs_Host_EvC_FailAddr,
+                                             Bool chainingAllowed,
+                                             Bool addProfInc,
+                                             Addr max_ga );
+
+/* How big is an event check?  This is kind of a kludge because it
+   depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
+   and so assumes that they are both <= 128, and so can use the short
+   offset encoding.  This is all checked with assertions, so in the
+   worst case we will merely assert at startup. */
+extern Int evCheckSzB_AMD64 (void);
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+                                          void* place_to_chain,
+                                          const void* disp_cp_chain_me_EXPECTED,
+                                          const void* place_to_jump_to );
+
+extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+                                            void* place_to_unchain,
+                                            const void* place_to_jump_to_EXPECTED,
+                                            const void* disp_cp_chain_me );
+
+/* Patch the counter location into an existing ProfInc point. */
+extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+                                          void*  place_to_patch,
+                                          const ULong* location_of_counter );
+
+
+#endif /* ndef __VEX_HOST_AMD64_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                   host_amd64_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_amd64_isel.c b/VEX/priv/host_amd64_isel.c
new file mode 100644
index 0000000..3403d54
--- /dev/null
+++ b/VEX/priv/host_amd64_isel.c
@@ -0,0 +1,5009 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 host_amd64_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "ir_match.h"
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_generic_simd64.h"
+#include "host_generic_simd128.h"
+#include "host_generic_simd256.h"
+#include "host_generic_maddf.h"
+#include "host_amd64_defs.h"
+
+
+/*---------------------------------------------------------*/
+/*--- x87/SSE control word stuff                        ---*/
+/*---------------------------------------------------------*/
+
+/* Vex-generated code expects to run with the FPU set as follows: all
+   exceptions masked, round-to-nearest, precision = 53 bits.  This
+   corresponds to a FPU control word value of 0x027F.
+
+   Similarly the SSE control word (%mxcsr) should be 0x1F80.
+
+   %fpucw and %mxcsr should have these values on entry to
+   Vex-generated code, and should those values should be
+   unchanged at exit.
+*/
+
+#define DEFAULT_FPUCW 0x027F
+
+#define DEFAULT_MXCSR 0x1F80
+
+/* debugging only, do not use */
+/* define DEFAULT_FPUCW 0x037F */
+
+
+/*---------------------------------------------------------*/
+/*--- misc helpers                                      ---*/
+/*---------------------------------------------------------*/
+
+/* These are duplicated in guest-amd64/toIR.c */
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* bind ( Int binder )
+{
+   return IRExpr_Binder(binder);
+}
+
+static Bool isZeroU8 ( IRExpr* e )
+{
+   return e->tag == Iex_Const
+          && e->Iex.Const.con->tag == Ico_U8
+          && e->Iex.Const.con->Ico.U8 == 0;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+     might encounter.  This is computed before insn selection starts,
+     and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+     which virtual register is associated with each IRTemp
+     temporary.  This is computed before insn selection starts, and
+     does not change.  We expect this mapping to map precisely the
+     same set of IRTemps as the type mapping does.
+
+        - vregmap   holds the primary register for the IRTemp.
+        - vregmapHI is only used for 128-bit integer-typed
+             IRTemps.  It holds the identity of a second
+             64-bit virtual HReg, which holds the high half
+             of the value.
+
+   - The host subarchitecture we are selecting insns for.  
+     This is set at the start and does not change.
+
+   - The code array, that is, the insns selected so far.
+
+   - A counter, for generating new virtual registers.
+
+   - A Bool for indicating whether we may generate chain-me
+     instructions for control flow transfers, or whether we must use
+     XAssisted.
+
+   - The maximum guest address of any guest insn in this block.
+     Actually, the address of the highest-addressed byte from any insn
+     in this block.  Is set at the start and does not change.  This is
+     used for detecting jumps which are definitely forward-edges from
+     this block, and therefore can be made (chained) to the fast entry
+     point of the destination, thereby avoiding the destination's
+     event check.
+
+   Note, this is all host-independent.  (JRS 20050201: well, kinda
+   ... not completely.  Compare with ISelEnv for X86.)
+*/
+
+typedef
+   struct {
+      /* Constant -- are set at the start and do not change. */
+      IRTypeEnv*   type_env;
+
+      HReg*        vregmap;
+      HReg*        vregmapHI;
+      Int          n_vregmap;
+
+      UInt         hwcaps;
+
+      Bool         chainingAllowed;
+      Addr64       max_ga;
+
+      /* These are modified as we go along. */
+      HInstrArray* code;
+      Int          vreg_ctr;
+   }
+   ISelEnv;
+
+
+static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   return env->vregmap[tmp];
+}
+
+static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO, 
+                               ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapHI[tmp]));
+   *vrLO = env->vregmap[tmp];
+   *vrHI = env->vregmapHI[tmp];
+}
+
+static void addInstr ( ISelEnv* env, AMD64Instr* instr )
+{
+   addHInstr(env->code, instr);
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      ppAMD64Instr(instr, True);
+      vex_printf("\n");
+   }
+}
+
+static HReg newVRegI ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcInt64, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegV ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcVec128, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Forward declarations                        ---*/
+/*---------------------------------------------------------*/
+
+/* These are organised as iselXXX and iselXXX_wrk pairs.  The
+   iselXXX_wrk do the real work, but are not to be called directly.
+   For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
+   checks that all returned registers are virtual.  You should not
+   call the _wrk version directly.
+*/
+static AMD64RMI*     iselIntExpr_RMI_wrk ( ISelEnv* env, IRExpr* e );
+static AMD64RMI*     iselIntExpr_RMI     ( ISelEnv* env, IRExpr* e );
+
+static AMD64RI*      iselIntExpr_RI_wrk  ( ISelEnv* env, IRExpr* e );
+static AMD64RI*      iselIntExpr_RI      ( ISelEnv* env, IRExpr* e );
+
+static AMD64RM*      iselIntExpr_RM_wrk  ( ISelEnv* env, IRExpr* e );
+static AMD64RM*      iselIntExpr_RM      ( ISelEnv* env, IRExpr* e );
+
+static HReg          iselIntExpr_R_wrk   ( ISelEnv* env, IRExpr* e );
+static HReg          iselIntExpr_R       ( ISelEnv* env, IRExpr* e );
+
+static AMD64AMode*   iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e );
+static AMD64AMode*   iselIntExpr_AMode     ( ISelEnv* env, IRExpr* e );
+
+static void          iselInt128Expr_wrk ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                          ISelEnv* env, IRExpr* e );
+static void          iselInt128Expr     ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                          ISelEnv* env, IRExpr* e );
+
+static AMD64CondCode iselCondCode_wrk    ( ISelEnv* env, IRExpr* e );
+static AMD64CondCode iselCondCode        ( ISelEnv* env, IRExpr* e );
+
+static HReg          iselDblExpr_wrk     ( ISelEnv* env, IRExpr* e );
+static HReg          iselDblExpr         ( ISelEnv* env, IRExpr* e );
+
+static HReg          iselFltExpr_wrk     ( ISelEnv* env, IRExpr* e );
+static HReg          iselFltExpr         ( ISelEnv* env, IRExpr* e );
+
+static HReg          iselVecExpr_wrk     ( ISelEnv* env, IRExpr* e );
+static HReg          iselVecExpr         ( ISelEnv* env, IRExpr* e );
+
+static void          iselDVecExpr_wrk ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                        ISelEnv* env, IRExpr* e );
+static void          iselDVecExpr     ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                        ISelEnv* env, IRExpr* e );
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Misc helpers                                ---*/
+/*---------------------------------------------------------*/
+
+static Bool sane_AMode ( AMD64AMode* am )
+{
+   switch (am->tag) {
+      case Aam_IR:
+         return 
+            toBool( hregClass(am->Aam.IR.reg) == HRcInt64
+                    && (hregIsVirtual(am->Aam.IR.reg)
+                        || sameHReg(am->Aam.IR.reg, hregAMD64_RBP())) );
+      case Aam_IRRS:
+         return 
+            toBool( hregClass(am->Aam.IRRS.base) == HRcInt64
+                    && hregIsVirtual(am->Aam.IRRS.base)
+                    && hregClass(am->Aam.IRRS.index) == HRcInt64
+                    && hregIsVirtual(am->Aam.IRRS.index) );
+      default:
+        vpanic("sane_AMode: unknown amd64 amode tag");
+   }
+}
+
+
+/* Can the lower 32 bits be signedly widened to produce the whole
+   64-bit value?  In other words, are the top 33 bits either all 0 or
+   all 1 ? */
+static Bool fitsIn32Bits ( ULong x )
+{
+   Long y1;
+   y1 = x << 32;
+   y1 >>=/*s*/ 32;
+   return toBool(x == y1);
+}
+
+/* Is this a 64-bit zero expression? */
+
+static Bool isZeroU64 ( IRExpr* e )
+{
+   return e->tag == Iex_Const
+          && e->Iex.Const.con->tag == Ico_U64
+          && e->Iex.Const.con->Ico.U64 == 0ULL;
+}
+
+static Bool isZeroU32 ( IRExpr* e )
+{
+   return e->tag == Iex_Const
+          && e->Iex.Const.con->tag == Ico_U32
+          && e->Iex.Const.con->Ico.U32 == 0;
+}
+
+/* Make a int reg-reg move. */
+
+static AMD64Instr* mk_iMOVsd_RR ( HReg src, HReg dst )
+{
+   vassert(hregClass(src) == HRcInt64);
+   vassert(hregClass(dst) == HRcInt64);
+   return AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Reg(src), dst);
+}
+
+/* Make a vector (128 bit) reg-reg move. */
+
+static AMD64Instr* mk_vMOVsd_RR ( HReg src, HReg dst )
+{
+   vassert(hregClass(src) == HRcVec128);
+   vassert(hregClass(dst) == HRcVec128);
+   return AMD64Instr_SseReRg(Asse_MOV, src, dst);
+}
+
+/* Advance/retreat %rsp by n. */
+
+static void add_to_rsp ( ISelEnv* env, Int n )
+{
+   vassert(n > 0 && n < 256 && (n%8) == 0);
+   addInstr(env, 
+            AMD64Instr_Alu64R(Aalu_ADD, AMD64RMI_Imm(n), 
+                                        hregAMD64_RSP()));
+}
+
+static void sub_from_rsp ( ISelEnv* env, Int n )
+{
+   vassert(n > 0 && n < 256 && (n%8) == 0);
+   addInstr(env, 
+            AMD64Instr_Alu64R(Aalu_SUB, AMD64RMI_Imm(n), 
+                                        hregAMD64_RSP()));
+}
+
+/* Push 64-bit constants on the stack. */
+static void push_uimm64( ISelEnv* env, ULong uimm64 )
+{
+   /* If uimm64 can be expressed as the sign extension of its
+      lower 32 bits, we can do it the easy way. */
+   Long simm64 = (Long)uimm64;
+   if ( simm64 == ((Long)(uimm64 << 32) >> 32) ) {
+      addInstr( env, AMD64Instr_Push(AMD64RMI_Imm( (UInt)uimm64 )) );
+   } else {
+      HReg tmp = newVRegI(env);
+      addInstr( env, AMD64Instr_Imm64(uimm64, tmp) );
+      addInstr( env, AMD64Instr_Push(AMD64RMI_Reg(tmp)) );
+   }
+}
+
+
+/* Used only in doHelperCall.  If possible, produce a single
+   instruction which computes 'e' into 'dst'.  If not possible, return
+   NULL. */
+
+static AMD64Instr* iselIntExpr_single_instruction ( ISelEnv* env,
+                                                    HReg     dst,
+                                                    IRExpr*  e )
+{
+   /* Per comments in doHelperCall below, appearance of
+      Iex_VECRET implies ill-formed IR. */
+   vassert(e->tag != Iex_VECRET);
+
+   /* In this case we give out a copy of the BaseBlock pointer. */
+   if (UNLIKELY(e->tag == Iex_BBPTR)) {
+      return mk_iMOVsd_RR( hregAMD64_RBP(), dst );
+   }
+
+   vassert(typeOfIRExpr(env->type_env, e) == Ity_I64);
+
+   if (e->tag == Iex_Const) {
+      vassert(e->Iex.Const.con->tag == Ico_U64);
+      if (fitsIn32Bits(e->Iex.Const.con->Ico.U64)) {
+         return AMD64Instr_Alu64R(
+                   Aalu_MOV,
+                   AMD64RMI_Imm(toUInt(e->Iex.Const.con->Ico.U64)),
+                   dst
+                );
+      } else {
+         return AMD64Instr_Imm64(e->Iex.Const.con->Ico.U64, dst);
+      }
+   }
+
+   if (e->tag == Iex_RdTmp) {
+      HReg src = lookupIRTemp(env, e->Iex.RdTmp.tmp);
+      return mk_iMOVsd_RR(src, dst);
+   }
+
+   if (e->tag == Iex_Get) {
+      vassert(e->Iex.Get.ty == Ity_I64);
+      return AMD64Instr_Alu64R(
+                Aalu_MOV,
+                AMD64RMI_Mem(
+                   AMD64AMode_IR(e->Iex.Get.offset,
+                                 hregAMD64_RBP())),
+                dst);
+   }
+
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_32Uto64 
+       && e->Iex.Unop.arg->tag == Iex_RdTmp) {
+      HReg src = lookupIRTemp(env, e->Iex.Unop.arg->Iex.RdTmp.tmp);
+      return AMD64Instr_MovxLQ(False, src, dst);
+   }
+
+   if (0) { ppIRExpr(e); vex_printf("\n"); }
+
+   return NULL;
+}
+
+
+/* Do a complete function call.  |guard| is a Ity_Bit expression
+   indicating whether or not the call happens.  If guard==NULL, the
+   call is unconditional.  |retloc| is set to indicate where the
+   return value is after the call.  The caller (of this fn) must
+   generate code to add |stackAdjustAfterCall| to the stack pointer
+   after the call is done. */
+
+static
+void doHelperCall ( /*OUT*/UInt*   stackAdjustAfterCall,
+                    /*OUT*/RetLoc* retloc,
+                    ISelEnv* env,
+                    IRExpr* guard,
+                    IRCallee* cee, IRType retTy, IRExpr** args )
+{
+   AMD64CondCode cc;
+   HReg          argregs[6];
+   HReg          tmpregs[6];
+   AMD64Instr*   fastinstrs[6];
+   UInt          n_args, i;
+
+   /* Set default returns.  We'll update them later if needed. */
+   *stackAdjustAfterCall = 0;
+   *retloc               = mk_RetLoc_INVALID();
+
+   /* These are used for cross-checking that IR-level constraints on
+      the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+   UInt nVECRETs = 0;
+   UInt nBBPTRs  = 0;
+
+   /* Marshal args for a call and do the call.
+
+      This function only deals with a tiny set of possibilities, which
+      cover all helpers in practice.  The restrictions are that only
+      arguments in registers are supported, hence only 6x64 integer
+      bits in total can be passed.  In fact the only supported arg
+      type is I64.
+
+      The return type can be I{64,32,16,8} or V{128,256}.  In the
+      latter two cases, it is expected that |args| will contain the
+      special node IRExpr_VECRET(), in which case this routine
+      generates code to allocate space on the stack for the vector
+      return value.  Since we are not passing any scalars on the
+      stack, it is enough to preallocate the return space before
+      marshalling any arguments, in this case.
+
+      |args| may also contain IRExpr_BBPTR(), in which case the
+      value in %rbp is passed as the corresponding argument.
+
+      Generating code which is both efficient and correct when
+      parameters are to be passed in registers is difficult, for the
+      reasons elaborated in detail in comments attached to
+      doHelperCall() in priv/host-x86/isel.c.  Here, we use a variant
+      of the method described in those comments.
+
+      The problem is split into two cases: the fast scheme and the
+      slow scheme.  In the fast scheme, arguments are computed
+      directly into the target (real) registers.  This is only safe
+      when we can be sure that computation of each argument will not
+      trash any real registers set by computation of any other
+      argument.
+
+      In the slow scheme, all args are first computed into vregs, and
+      once they are all done, they are moved to the relevant real
+      regs.  This always gives correct code, but it also gives a bunch
+      of vreg-to-rreg moves which are usually redundant but are hard
+      for the register allocator to get rid of.
+
+      To decide which scheme to use, all argument expressions are
+      first examined.  If they are all so simple that it is clear they
+      will be evaluated without use of any fixed registers, use the
+      fast scheme, else use the slow scheme.  Note also that only
+      unconditional calls may use the fast scheme, since having to
+      compute a condition expression could itself trash real
+      registers.  Note that for simplicity, in the case where
+      IRExpr_VECRET() is present, we use the slow scheme.  This is
+      motivated by the desire to avoid any possible complexity
+      w.r.t. nested calls.
+
+      Note this requires being able to examine an expression and
+      determine whether or not evaluation of it might use a fixed
+      register.  That requires knowledge of how the rest of this insn
+      selector works.  Currently just the following 3 are regarded as
+      safe -- hopefully they cover the majority of arguments in
+      practice: IRExpr_Tmp IRExpr_Const IRExpr_Get.
+   */
+
+   /* Note that the cee->regparms field is meaningless on AMD64 host
+      (since there is only one calling convention) and so we always
+      ignore it. */
+   n_args = 0;
+   for (i = 0; args[i]; i++)
+      n_args++;
+
+   if (n_args > 6)
+      vpanic("doHelperCall(AMD64): cannot currently handle > 6 args");
+
+   argregs[0] = hregAMD64_RDI();
+   argregs[1] = hregAMD64_RSI();
+   argregs[2] = hregAMD64_RDX();
+   argregs[3] = hregAMD64_RCX();
+   argregs[4] = hregAMD64_R8();
+   argregs[5] = hregAMD64_R9();
+
+   tmpregs[0] = tmpregs[1] = tmpregs[2] =
+   tmpregs[3] = tmpregs[4] = tmpregs[5] = INVALID_HREG;
+
+   fastinstrs[0] = fastinstrs[1] = fastinstrs[2] =
+   fastinstrs[3] = fastinstrs[4] = fastinstrs[5] = NULL;
+
+   /* First decide which scheme (slow or fast) is to be used.  First
+      assume the fast scheme, and select slow if any contraindications
+      (wow) appear. */
+
+   /* We'll need space on the stack for the return value.  Avoid
+      possible complications with nested calls by using the slow
+      scheme. */
+   if (retTy == Ity_V128 || retTy == Ity_V256)
+      goto slowscheme;
+
+   if (guard) {
+      if (guard->tag == Iex_Const 
+          && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional */
+      } else {
+         /* Not manifestly unconditional -- be conservative. */
+         goto slowscheme;
+      }
+   }
+
+   /* Ok, let's try for the fast scheme.  If it doesn't pan out, we'll
+      use the slow scheme.  Because this is tentative, we can't call
+      addInstr (that is, commit to) any instructions until we're
+      handled all the arguments.  So park the resulting instructions
+      in a buffer and emit that if we're successful. */
+
+   /* FAST SCHEME */
+   /* In this loop, we process args that can be computed into the
+      destination (real) register with a single instruction, without
+      using any fixed regs.  That also includes IRExpr_BBPTR(), but
+      not IRExpr_VECRET().  Indeed, if the IR is well-formed, we can
+      never see IRExpr_VECRET() at this point, since the return-type
+      check above should ensure all those cases use the slow scheme
+      instead. */
+   vassert(n_args >= 0 && n_args <= 6);
+   for (i = 0; i < n_args; i++) {
+      IRExpr* arg = args[i];
+      if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
+         vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
+      }
+      fastinstrs[i] 
+         = iselIntExpr_single_instruction( env, argregs[i], args[i] );
+      if (fastinstrs[i] == NULL)
+         goto slowscheme;
+   }
+
+   /* Looks like we're in luck.  Emit the accumulated instructions and
+      move on to doing the call itself. */
+   for (i = 0; i < n_args; i++)
+      addInstr(env, fastinstrs[i]);
+
+   /* Fast scheme only applies for unconditional calls.  Hence: */
+   cc = Acc_ALWAYS;
+
+   goto handle_call;
+
+
+   /* SLOW SCHEME; move via temporaries */
+  slowscheme:
+   {}
+#  if 0 /* debug only */
+   if (n_args > 0) {for (i = 0; args[i]; i++) {
+   ppIRExpr(args[i]); vex_printf(" "); }
+   vex_printf("\n");}
+#  endif
+
+   /* If we have a vector return type, allocate a place for it on the
+      stack and record its address. */
+   HReg r_vecRetAddr = INVALID_HREG;
+   if (retTy == Ity_V128) {
+      r_vecRetAddr = newVRegI(env);
+      sub_from_rsp(env, 16);
+      addInstr(env, mk_iMOVsd_RR( hregAMD64_RSP(), r_vecRetAddr ));
+   }
+   else if (retTy == Ity_V256) {
+      r_vecRetAddr = newVRegI(env);
+      sub_from_rsp(env, 32);
+      addInstr(env, mk_iMOVsd_RR( hregAMD64_RSP(), r_vecRetAddr ));
+   }
+
+   vassert(n_args >= 0 && n_args <= 6);
+   for (i = 0; i < n_args; i++) {
+      IRExpr* arg = args[i];
+      if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+         tmpregs[i] = newVRegI(env);
+         addInstr(env, mk_iMOVsd_RR( hregAMD64_RBP(), tmpregs[i]));
+         nBBPTRs++;
+      }
+      else if (UNLIKELY(arg->tag == Iex_VECRET)) {
+         /* We stashed the address of the return slot earlier, so just
+            retrieve it now. */
+         vassert(!hregIsInvalid(r_vecRetAddr));
+         tmpregs[i] = r_vecRetAddr;
+         nVECRETs++;
+      }
+      else {
+         vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
+         tmpregs[i] = iselIntExpr_R(env, args[i]);
+      }
+   }
+
+   /* Now we can compute the condition.  We can't do it earlier
+      because the argument computations could trash the condition
+      codes.  Be a bit clever to handle the common case where the
+      guard is 1:Bit. */
+   cc = Acc_ALWAYS;
+   if (guard) {
+      if (guard->tag == Iex_Const 
+          && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional -- do nothing */
+      } else {
+         cc = iselCondCode( env, guard );
+      }
+   }
+
+   /* Move the args to their final destinations. */
+   for (i = 0; i < n_args; i++) {
+      /* None of these insns, including any spill code that might
+         be generated, may alter the condition codes. */
+      addInstr( env, mk_iMOVsd_RR( tmpregs[i], argregs[i] ) );
+   }
+
+
+   /* Do final checks, set the return values, and generate the call
+      instruction proper. */
+  handle_call:
+
+   if (retTy == Ity_V128 || retTy == Ity_V256) {
+      vassert(nVECRETs == 1);
+   } else {
+      vassert(nVECRETs == 0);
+   }
+
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+
+   vassert(*stackAdjustAfterCall == 0);
+   vassert(is_RetLoc_INVALID(*retloc));
+   switch (retTy) {
+         case Ity_INVALID:
+            /* Function doesn't return a value. */
+            *retloc = mk_RetLoc_simple(RLPri_None);
+            break;
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+            *retloc = mk_RetLoc_simple(RLPri_Int);
+            break;
+         case Ity_V128:
+            *retloc = mk_RetLoc_spRel(RLPri_V128SpRel, 0);
+            *stackAdjustAfterCall = 16;
+            break;
+         case Ity_V256:
+            *retloc = mk_RetLoc_spRel(RLPri_V256SpRel, 0);
+            *stackAdjustAfterCall = 32;
+            break;
+         default:
+            /* IR can denote other possible return types, but we don't
+               handle those here. */
+           vassert(0);
+   }
+
+   /* Finally, generate the call itself.  This needs the *retloc value
+      set in the switch above, which is why it's at the end. */
+   addInstr(env,
+            AMD64Instr_Call(cc, (Addr)cee->addr, n_args, *retloc));
+}
+
+
+/* Given a guest-state array descriptor, an index expression and a
+   bias, generate an AMD64AMode holding the relevant guest state
+   offset. */
+
+static
+AMD64AMode* genGuestArrayOffset ( ISelEnv* env, IRRegArray* descr, 
+                                  IRExpr* off, Int bias )
+{
+   HReg tmp, roff;
+   Int  elemSz = sizeofIRType(descr->elemTy);
+   Int  nElems = descr->nElems;
+
+   /* Throw out any cases not generated by an amd64 front end.  In
+      theory there might be a day where we need to handle them -- if
+      we ever run non-amd64-guest on amd64 host. */
+
+   if (nElems != 8 || (elemSz != 1 && elemSz != 8))
+      vpanic("genGuestArrayOffset(amd64 host)");
+
+   /* Compute off into a reg, %off.  Then return:
+
+         movq %off, %tmp
+         addq $bias, %tmp  (if bias != 0)
+         andq %tmp, 7
+         ... base(%rbp, %tmp, shift) ...
+   */
+   tmp  = newVRegI(env);
+   roff = iselIntExpr_R(env, off);
+   addInstr(env, mk_iMOVsd_RR(roff, tmp));
+   if (bias != 0) {
+      /* Make sure the bias is sane, in the sense that there are
+         no significant bits above bit 30 in it. */
+      vassert(-10000 < bias && bias < 10000);
+      addInstr(env, 
+               AMD64Instr_Alu64R(Aalu_ADD, AMD64RMI_Imm(bias), tmp));
+   }
+   addInstr(env, 
+            AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(7), tmp));
+   vassert(elemSz == 1 || elemSz == 8);
+   return
+      AMD64AMode_IRRS( descr->base, hregAMD64_RBP(), tmp,
+                                    elemSz==8 ? 3 : 0);
+}
+
+
+/* Set the SSE unit's rounding mode to default (%mxcsr = 0x1F80) */
+static
+void set_SSE_rounding_default ( ISelEnv* env )
+{
+   /* pushq $DEFAULT_MXCSR 
+      ldmxcsr 0(%rsp)
+      addq $8, %rsp
+   */
+   AMD64AMode* zero_rsp = AMD64AMode_IR(0, hregAMD64_RSP());
+   addInstr(env, AMD64Instr_Push(AMD64RMI_Imm(DEFAULT_MXCSR)));
+   addInstr(env, AMD64Instr_LdMXCSR(zero_rsp));
+   add_to_rsp(env, 8);
+}
+
+/* Mess with the FPU's rounding mode: set to the default rounding mode
+   (DEFAULT_FPUCW). */
+static 
+void set_FPU_rounding_default ( ISelEnv* env )
+{
+   /* movq $DEFAULT_FPUCW, -8(%rsp)
+      fldcw -8(%esp)
+   */
+   AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+   addInstr(env, AMD64Instr_Alu64M(
+                    Aalu_MOV, AMD64RI_Imm(DEFAULT_FPUCW), m8_rsp));
+   addInstr(env, AMD64Instr_A87LdCW(m8_rsp));
+}
+
+
+/* Mess with the SSE unit's rounding mode: 'mode' is an I32-typed
+   expression denoting a value in the range 0 .. 3, indicating a round
+   mode encoded as per type IRRoundingMode.  Set the SSE machinery to
+   have the same rounding.
+*/
+static
+void set_SSE_rounding_mode ( ISelEnv* env, IRExpr* mode )
+{
+   /* Note: this sequence only makes sense because DEFAULT_MXCSR has
+      both rounding bits == 0.  If that wasn't the case, we couldn't
+      create a new rounding field simply by ORing the new value into
+      place. */
+
+   /* movq $3, %reg
+      andq [[mode]], %reg  -- shouldn't be needed; paranoia
+      shlq $13, %reg
+      orq $DEFAULT_MXCSR, %reg
+      pushq %reg
+      ldmxcsr 0(%esp)
+      addq $8, %rsp
+   */      
+   HReg        reg      = newVRegI(env);
+   AMD64AMode* zero_rsp = AMD64AMode_IR(0, hregAMD64_RSP());
+   addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Imm(3), reg));
+   addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
+                                   iselIntExpr_RMI(env, mode), reg));
+   addInstr(env, AMD64Instr_Sh64(Ash_SHL, 13, reg));
+   addInstr(env, AMD64Instr_Alu64R(
+                    Aalu_OR, AMD64RMI_Imm(DEFAULT_MXCSR), reg));
+   addInstr(env, AMD64Instr_Push(AMD64RMI_Reg(reg)));
+   addInstr(env, AMD64Instr_LdMXCSR(zero_rsp));
+   add_to_rsp(env, 8);
+}
+
+
+/* Mess with the FPU's rounding mode: 'mode' is an I32-typed
+   expression denoting a value in the range 0 .. 3, indicating a round
+   mode encoded as per type IRRoundingMode.  Set the x87 FPU to have
+   the same rounding.
+*/
+static
+void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode )
+{
+   HReg rrm  = iselIntExpr_R(env, mode);
+   HReg rrm2 = newVRegI(env);
+   AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+
+   /* movq  %rrm, %rrm2
+      andq  $3, %rrm2   -- shouldn't be needed; paranoia
+      shlq  $10, %rrm2
+      orq   $DEFAULT_FPUCW, %rrm2
+      movq  %rrm2, -8(%rsp)
+      fldcw -8(%esp)
+   */
+   addInstr(env, mk_iMOVsd_RR(rrm, rrm2));
+   addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(3), rrm2));
+   addInstr(env, AMD64Instr_Sh64(Ash_SHL, 10, rrm2));
+   addInstr(env, AMD64Instr_Alu64R(Aalu_OR, 
+                                   AMD64RMI_Imm(DEFAULT_FPUCW), rrm2));
+   addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, 
+                                   AMD64RI_Reg(rrm2), m8_rsp));
+   addInstr(env, AMD64Instr_A87LdCW(m8_rsp));
+}
+
+
+/* Generate all-zeroes into a new vector register.
+*/
+static HReg generate_zeroes_V128 ( ISelEnv* env )
+{
+   HReg dst = newVRegV(env);
+   addInstr(env, AMD64Instr_SseReRg(Asse_XOR, dst, dst));
+   return dst;
+}
+
+/* Generate all-ones into a new vector register.
+*/
+static HReg generate_ones_V128 ( ISelEnv* env )
+{
+   HReg dst = newVRegV(env);
+   addInstr(env, AMD64Instr_SseReRg(Asse_CMPEQ32, dst, dst));
+   return dst;
+}
+
+
+/* Generate !src into a new vector register.  Amazing that there isn't
+   a less crappy way to do this.
+*/
+static HReg do_sse_NotV128 ( ISelEnv* env, HReg src )
+{
+   HReg dst = generate_ones_V128(env);
+   addInstr(env, AMD64Instr_SseReRg(Asse_XOR, src, dst));
+   return dst;
+}
+
+
+/* Expand the given byte into a 64-bit word, by cloning each bit
+   8 times. */
+static ULong bitmask8_to_bytemask64 ( UShort w8 )
+{
+   vassert(w8 == (w8 & 0xFF));
+   ULong w64 = 0;
+   Int i;
+   for (i = 0; i < 8; i++) {
+      if (w8 & (1<<i))
+         w64 |= (0xFFULL << (8 * i));
+   }
+   return w64;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64/32/16/8 bit)        ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 64, 32, 16 and 8-bit type.  All
+   results are returned in a 64-bit register.  For 32-, 16- and 8-bit
+   expressions, the upper 32/48/56 bits are arbitrary, so you should
+   mask or sign extend partial values if necessary.
+*/
+
+static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselIntExpr_R_wrk(env, e);
+   /* sanity checks ... */
+#  if 0
+   vex_printf("\niselIntExpr_R: "); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcInt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e )
+{
+   /* Used for unary/binary SIMD64 ops. */
+   HWord fn = 0;
+   Bool second_is_UInt;
+
+   MatchInfo mi;
+   DECLARE_PATTERN(p_1Uto8_64to1);
+   DECLARE_PATTERN(p_LDle8_then_8Uto64);
+   DECLARE_PATTERN(p_LDle16_then_16Uto64);
+
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   switch (ty) {
+      case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8: break;
+      default: vassert(0);
+   }
+
+   switch (e->tag) {
+
+   /* --------- TEMP --------- */
+   case Iex_RdTmp: {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg dst = newVRegI(env);
+      AMD64AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr );
+
+      /* We can't handle big-endian loads, nor load-linked. */
+      if (e->Iex.Load.end != Iend_LE)
+         goto irreducible;
+
+      if (ty == Ity_I64) {
+         addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,
+                                         AMD64RMI_Mem(amode), dst) );
+         return dst;
+      }
+      if (ty == Ity_I32) {
+         addInstr(env, AMD64Instr_LoadEX(4,False,amode,dst));
+         return dst;
+      }
+      if (ty == Ity_I16) {
+         addInstr(env, AMD64Instr_LoadEX(2,False,amode,dst));
+         return dst;
+      }
+      if (ty == Ity_I8) {
+         addInstr(env, AMD64Instr_LoadEX(1,False,amode,dst));
+         return dst;
+      }
+      break;
+   }
+
+   /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+      AMD64AluOp   aluOp;
+      AMD64ShiftOp shOp;
+
+      /* Pattern: Sub64(0,x) */
+      /*     and: Sub32(0,x) */
+      if ((e->Iex.Binop.op == Iop_Sub64 && isZeroU64(e->Iex.Binop.arg1))
+          || (e->Iex.Binop.op == Iop_Sub32 && isZeroU32(e->Iex.Binop.arg1))) {
+         HReg dst = newVRegI(env);
+         HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(reg,dst));
+         addInstr(env, AMD64Instr_Unary64(Aun_NEG,dst));
+         return dst;
+      }
+
+      /* Is it an addition or logical style op? */
+      switch (e->Iex.Binop.op) {
+         case Iop_Add8: case Iop_Add16: case Iop_Add32: case Iop_Add64: 
+            aluOp = Aalu_ADD; break;
+         case Iop_Sub8: case Iop_Sub16: case Iop_Sub32: case Iop_Sub64:
+            aluOp = Aalu_SUB; break;
+         case Iop_And8: case Iop_And16: case Iop_And32: case Iop_And64: 
+            aluOp = Aalu_AND; break;
+         case Iop_Or8:  case Iop_Or16:  case Iop_Or32:  case Iop_Or64: 
+            aluOp = Aalu_OR; break;
+         case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64: 
+            aluOp = Aalu_XOR; break;
+         case Iop_Mul16: case Iop_Mul32: case Iop_Mul64:
+            aluOp = Aalu_MUL; break;
+         default:
+            aluOp = Aalu_INVALID; break;
+      }
+      /* For commutative ops we assume any literal
+         values are on the second operand. */
+      if (aluOp != Aalu_INVALID) {
+         HReg dst      = newVRegI(env);
+         HReg reg      = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         AMD64RMI* rmi = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(reg,dst));
+         addInstr(env, AMD64Instr_Alu64R(aluOp, rmi, dst));
+         return dst;
+      }
+
+      /* Perhaps a shift op? */
+      switch (e->Iex.Binop.op) {
+         case Iop_Shl64: case Iop_Shl32: case Iop_Shl16: case Iop_Shl8:
+            shOp = Ash_SHL; break;
+         case Iop_Shr64: case Iop_Shr32: case Iop_Shr16: case Iop_Shr8: 
+            shOp = Ash_SHR; break;
+         case Iop_Sar64: case Iop_Sar32: case Iop_Sar16: case Iop_Sar8: 
+            shOp = Ash_SAR; break;
+         default:
+            shOp = Ash_INVALID; break;
+      }
+      if (shOp != Ash_INVALID) {
+         HReg dst = newVRegI(env);
+
+         /* regL = the value to be shifted */
+         HReg regL   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         addInstr(env, mk_iMOVsd_RR(regL,dst));
+
+         /* Do any necessary widening for 32/16/8 bit operands */
+         switch (e->Iex.Binop.op) {
+            case Iop_Shr64: case Iop_Shl64: case Iop_Sar64: 
+               break;
+            case Iop_Shl32: case Iop_Shl16: case Iop_Shl8:
+               break;
+            case Iop_Shr8:
+               addInstr(env, AMD64Instr_Alu64R(
+                                Aalu_AND, AMD64RMI_Imm(0xFF), dst));
+               break;
+            case Iop_Shr16:
+               addInstr(env, AMD64Instr_Alu64R(
+                                Aalu_AND, AMD64RMI_Imm(0xFFFF), dst));
+               break;
+            case Iop_Shr32:
+               addInstr(env, AMD64Instr_MovxLQ(False, dst, dst));
+               break;
+            case Iop_Sar8:
+               addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, dst));
+               addInstr(env, AMD64Instr_Sh64(Ash_SAR, 56, dst));
+               break;
+            case Iop_Sar16:
+               addInstr(env, AMD64Instr_Sh64(Ash_SHL, 48, dst));
+               addInstr(env, AMD64Instr_Sh64(Ash_SAR, 48, dst));
+               break;
+            case Iop_Sar32:
+               addInstr(env, AMD64Instr_MovxLQ(True, dst, dst));
+               break;
+            default: 
+               ppIROp(e->Iex.Binop.op);
+               vassert(0);
+         }
+
+         /* Now consider the shift amount.  If it's a literal, we
+            can do a much better job than the general case. */
+         if (e->Iex.Binop.arg2->tag == Iex_Const) {
+            /* assert that the IR is well-typed */
+            Int nshift;
+            vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+            nshift = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            vassert(nshift >= 0);
+            if (nshift > 0)
+               /* Can't allow nshift==0 since that means %cl */
+               addInstr(env, AMD64Instr_Sh64(shOp, nshift, dst));
+         } else {
+            /* General case; we have to force the amount into %cl. */
+            HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, mk_iMOVsd_RR(regR,hregAMD64_RCX()));
+            addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, dst));
+         }
+         return dst;
+      }
+
+      /* Deal with 64-bit SIMD binary ops */
+      second_is_UInt = False;
+      switch (e->Iex.Binop.op) {
+         case Iop_Add8x8:
+            fn = (HWord)h_generic_calc_Add8x8; break;
+         case Iop_Add16x4:
+            fn = (HWord)h_generic_calc_Add16x4; break;
+         case Iop_Add32x2:
+            fn = (HWord)h_generic_calc_Add32x2; break;
+
+         case Iop_Avg8Ux8:
+            fn = (HWord)h_generic_calc_Avg8Ux8; break;
+         case Iop_Avg16Ux4:
+            fn = (HWord)h_generic_calc_Avg16Ux4; break;
+
+         case Iop_CmpEQ8x8:
+            fn = (HWord)h_generic_calc_CmpEQ8x8; break;
+         case Iop_CmpEQ16x4:
+            fn = (HWord)h_generic_calc_CmpEQ16x4; break;
+         case Iop_CmpEQ32x2:
+            fn = (HWord)h_generic_calc_CmpEQ32x2; break;
+
+         case Iop_CmpGT8Sx8:
+            fn = (HWord)h_generic_calc_CmpGT8Sx8; break;
+         case Iop_CmpGT16Sx4:
+            fn = (HWord)h_generic_calc_CmpGT16Sx4; break;
+         case Iop_CmpGT32Sx2:
+            fn = (HWord)h_generic_calc_CmpGT32Sx2; break;
+
+         case Iop_InterleaveHI8x8:
+            fn = (HWord)h_generic_calc_InterleaveHI8x8; break;
+         case Iop_InterleaveLO8x8:
+            fn = (HWord)h_generic_calc_InterleaveLO8x8; break;
+         case Iop_InterleaveHI16x4:
+            fn = (HWord)h_generic_calc_InterleaveHI16x4; break;
+         case Iop_InterleaveLO16x4:
+            fn = (HWord)h_generic_calc_InterleaveLO16x4; break;
+         case Iop_InterleaveHI32x2:
+            fn = (HWord)h_generic_calc_InterleaveHI32x2; break;
+         case Iop_InterleaveLO32x2:
+            fn = (HWord)h_generic_calc_InterleaveLO32x2; break;
+         case Iop_CatOddLanes16x4:
+            fn = (HWord)h_generic_calc_CatOddLanes16x4; break;
+         case Iop_CatEvenLanes16x4:
+            fn = (HWord)h_generic_calc_CatEvenLanes16x4; break;
+         case Iop_Perm8x8:
+            fn = (HWord)h_generic_calc_Perm8x8; break;
+
+         case Iop_Max8Ux8:
+            fn = (HWord)h_generic_calc_Max8Ux8; break;
+         case Iop_Max16Sx4:
+            fn = (HWord)h_generic_calc_Max16Sx4; break;
+         case Iop_Min8Ux8:
+            fn = (HWord)h_generic_calc_Min8Ux8; break;
+         case Iop_Min16Sx4:
+            fn = (HWord)h_generic_calc_Min16Sx4; break;
+
+         case Iop_Mul16x4:
+            fn = (HWord)h_generic_calc_Mul16x4; break;
+         case Iop_Mul32x2:
+            fn = (HWord)h_generic_calc_Mul32x2; break;
+         case Iop_MulHi16Sx4:
+            fn = (HWord)h_generic_calc_MulHi16Sx4; break;
+         case Iop_MulHi16Ux4:
+            fn = (HWord)h_generic_calc_MulHi16Ux4; break;
+
+         case Iop_QAdd8Sx8:
+            fn = (HWord)h_generic_calc_QAdd8Sx8; break;
+         case Iop_QAdd16Sx4:
+            fn = (HWord)h_generic_calc_QAdd16Sx4; break;
+         case Iop_QAdd8Ux8:
+            fn = (HWord)h_generic_calc_QAdd8Ux8; break;
+         case Iop_QAdd16Ux4:
+            fn = (HWord)h_generic_calc_QAdd16Ux4; break;
+
+         case Iop_QNarrowBin32Sto16Sx4:
+            fn = (HWord)h_generic_calc_QNarrowBin32Sto16Sx4; break;
+         case Iop_QNarrowBin16Sto8Sx8:
+            fn = (HWord)h_generic_calc_QNarrowBin16Sto8Sx8; break;
+         case Iop_QNarrowBin16Sto8Ux8:
+            fn = (HWord)h_generic_calc_QNarrowBin16Sto8Ux8; break;
+         case Iop_NarrowBin16to8x8:
+            fn = (HWord)h_generic_calc_NarrowBin16to8x8; break;
+         case Iop_NarrowBin32to16x4:
+            fn = (HWord)h_generic_calc_NarrowBin32to16x4; break;
+
+         case Iop_QSub8Sx8:
+            fn = (HWord)h_generic_calc_QSub8Sx8; break;
+         case Iop_QSub16Sx4:
+            fn = (HWord)h_generic_calc_QSub16Sx4; break;
+         case Iop_QSub8Ux8:
+            fn = (HWord)h_generic_calc_QSub8Ux8; break;
+         case Iop_QSub16Ux4:
+            fn = (HWord)h_generic_calc_QSub16Ux4; break;
+
+         case Iop_Sub8x8:
+            fn = (HWord)h_generic_calc_Sub8x8; break;
+         case Iop_Sub16x4:
+            fn = (HWord)h_generic_calc_Sub16x4; break;
+         case Iop_Sub32x2:
+            fn = (HWord)h_generic_calc_Sub32x2; break;
+
+         case Iop_ShlN32x2:
+            fn = (HWord)h_generic_calc_ShlN32x2; 
+            second_is_UInt = True;
+            break;
+         case Iop_ShlN16x4:
+            fn = (HWord)h_generic_calc_ShlN16x4;
+            second_is_UInt = True;
+            break;
+         case Iop_ShlN8x8:
+            fn = (HWord)h_generic_calc_ShlN8x8;
+            second_is_UInt = True;
+            break;
+         case Iop_ShrN32x2:
+            fn = (HWord)h_generic_calc_ShrN32x2; 
+            second_is_UInt = True; 
+            break;
+         case Iop_ShrN16x4:
+            fn = (HWord)h_generic_calc_ShrN16x4;
+            second_is_UInt = True; 
+            break;
+         case Iop_SarN32x2:
+            fn = (HWord)h_generic_calc_SarN32x2;
+            second_is_UInt = True; 
+            break;
+         case Iop_SarN16x4:
+            fn = (HWord)h_generic_calc_SarN16x4;
+            second_is_UInt = True; 
+            break;
+         case Iop_SarN8x8:
+            fn = (HWord)h_generic_calc_SarN8x8;
+            second_is_UInt = True; 
+            break;
+
+         default:
+            fn = (HWord)0; break;
+      }
+      if (fn != (HWord)0) {
+         /* Note: the following assumes all helpers are of signature 
+               ULong fn ( ULong, ULong ), and they are
+            not marked as regparm functions. 
+         */
+         HReg dst  = newVRegI(env);
+         HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         if (second_is_UInt)
+            addInstr(env, AMD64Instr_MovxLQ(False, argR, argR));
+         addInstr(env, mk_iMOVsd_RR(argL, hregAMD64_RDI()) );
+         addInstr(env, mk_iMOVsd_RR(argR, hregAMD64_RSI()) );
+         addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 2,
+                                        mk_RetLoc_simple(RLPri_Int) ));
+         addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst));
+         return dst;
+      }
+
+      /* Handle misc other ops. */
+
+      if (e->Iex.Binop.op == Iop_Max32U) {
+         HReg src1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg dst  = newVRegI(env);
+         HReg src2 = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(src1, dst));
+         addInstr(env, AMD64Instr_Alu32R(Aalu_CMP, AMD64RMI_Reg(src2), dst));
+         addInstr(env, AMD64Instr_CMov64(Acc_B, src2, dst));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_DivModS64to32
+          || e->Iex.Binop.op == Iop_DivModU64to32) {
+         /* 64 x 32 -> (32(rem),32(div)) division */
+         /* Get the 64-bit operand into edx:eax, and the other into
+            any old R/M. */
+         HReg      rax     = hregAMD64_RAX();
+         HReg      rdx     = hregAMD64_RDX();
+         HReg      dst     = newVRegI(env);
+         Bool      syned   = toBool(e->Iex.Binop.op == Iop_DivModS64to32);
+         AMD64RM*  rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2);
+         /* Compute the left operand into a reg, and then 
+            put the top half in edx and the bottom in eax. */
+         HReg left64 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         addInstr(env, mk_iMOVsd_RR(left64, rdx));
+         addInstr(env, mk_iMOVsd_RR(left64, rax));
+         addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, rdx));
+         addInstr(env, AMD64Instr_Div(syned, 4, rmRight));
+	 addInstr(env, AMD64Instr_MovxLQ(False, rdx, rdx));
+	 addInstr(env, AMD64Instr_MovxLQ(False, rax, rax));
+         addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, rdx));
+         addInstr(env, mk_iMOVsd_RR(rax, dst));
+         addInstr(env, AMD64Instr_Alu64R(Aalu_OR, AMD64RMI_Reg(rdx), dst));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_32HLto64) {
+         HReg hi32  = newVRegI(env);
+         HReg lo32  = newVRegI(env);
+         HReg hi32s = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg lo32s = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(hi32s, hi32));
+         addInstr(env, mk_iMOVsd_RR(lo32s, lo32));
+         addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, hi32));
+	 addInstr(env, AMD64Instr_MovxLQ(False, lo32, lo32));
+         addInstr(env, AMD64Instr_Alu64R(
+                          Aalu_OR, AMD64RMI_Reg(lo32), hi32));
+         return hi32;
+      }
+
+      if (e->Iex.Binop.op == Iop_16HLto32) {
+         HReg hi16  = newVRegI(env);
+         HReg lo16  = newVRegI(env);
+         HReg hi16s = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(hi16s, hi16));
+         addInstr(env, mk_iMOVsd_RR(lo16s, lo16));
+         addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, hi16));
+         addInstr(env, AMD64Instr_Alu64R(
+                          Aalu_AND, AMD64RMI_Imm(0xFFFF), lo16));
+         addInstr(env, AMD64Instr_Alu64R(
+                          Aalu_OR, AMD64RMI_Reg(lo16), hi16));
+         return hi16;
+      }
+
+      if (e->Iex.Binop.op == Iop_8HLto16) {
+         HReg hi8  = newVRegI(env);
+         HReg lo8  = newVRegI(env);
+         HReg hi8s = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(hi8s, hi8));
+         addInstr(env, mk_iMOVsd_RR(lo8s, lo8));
+         addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, hi8));
+         addInstr(env, AMD64Instr_Alu64R(
+                          Aalu_AND, AMD64RMI_Imm(0xFF), lo8));
+         addInstr(env, AMD64Instr_Alu64R(
+                          Aalu_OR, AMD64RMI_Reg(lo8), hi8));
+         return hi8;
+      }
+
+      if (e->Iex.Binop.op == Iop_MullS32
+          || e->Iex.Binop.op == Iop_MullS16
+          || e->Iex.Binop.op == Iop_MullS8
+          || e->Iex.Binop.op == Iop_MullU32 
+          || e->Iex.Binop.op == Iop_MullU16 
+          || e->Iex.Binop.op == Iop_MullU8) {
+         HReg a32   = newVRegI(env);
+         HReg b32   = newVRegI(env);
+         HReg a32s  = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg b32s  = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         Int          shift  = 0;
+         AMD64ShiftOp shr_op = Ash_SHR;
+         switch (e->Iex.Binop.op) {
+            case Iop_MullS32: shr_op = Ash_SAR; shift = 32; break;
+            case Iop_MullS16: shr_op = Ash_SAR; shift = 48; break;
+            case Iop_MullS8:  shr_op = Ash_SAR; shift = 56; break;
+            case Iop_MullU32: shr_op = Ash_SHR; shift = 32; break;
+            case Iop_MullU16: shr_op = Ash_SHR; shift = 48; break;
+            case Iop_MullU8:  shr_op = Ash_SHR; shift = 56; break;
+            default: vassert(0);
+         }
+
+         addInstr(env, mk_iMOVsd_RR(a32s, a32));
+         addInstr(env, mk_iMOVsd_RR(b32s, b32));
+         addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, a32));
+         addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, b32));
+         addInstr(env, AMD64Instr_Sh64(shr_op,  shift, a32));
+         addInstr(env, AMD64Instr_Sh64(shr_op,  shift, b32));
+         addInstr(env, AMD64Instr_Alu64R(Aalu_MUL, AMD64RMI_Reg(a32), b32));
+         return b32;
+      }
+
+      if (e->Iex.Binop.op == Iop_CmpF64) {
+         HReg fL = iselDblExpr(env, e->Iex.Binop.arg1);
+         HReg fR = iselDblExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegI(env);
+         addInstr(env, AMD64Instr_SseUComIS(8,fL,fR,dst));
+         /* Mask out irrelevant parts of the result so as to conform
+            to the CmpF64 definition. */
+         addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(0x45), dst));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_F64toI32S
+          || e->Iex.Binop.op == Iop_F64toI64S) {
+         Int  szD = e->Iex.Binop.op==Iop_F64toI32S ? 4 : 8;
+         HReg rf  = iselDblExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegI(env);
+         set_SSE_rounding_mode( env, e->Iex.Binop.arg1 );
+         addInstr(env, AMD64Instr_SseSF2SI( 8, szD, rf, dst ));
+         set_SSE_rounding_default(env);
+         return dst;
+      }
+
+      break;
+   }
+
+   /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+
+      /* 1Uto8(64to1(expr64)) */
+      {
+         DEFINE_PATTERN( p_1Uto8_64to1,
+                         unop(Iop_1Uto8, unop(Iop_64to1, bind(0))) );
+         if (matchIRExpr(&mi,p_1Uto8_64to1,e)) {
+            IRExpr* expr64 = mi.bindee[0];
+            HReg    dst    = newVRegI(env);
+            HReg    src    = iselIntExpr_R(env, expr64);
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
+                                            AMD64RMI_Imm(1), dst));
+            return dst;
+         }
+      }
+
+      /* 8Uto64(LDle(expr64)) */
+      {
+         DEFINE_PATTERN(p_LDle8_then_8Uto64,
+                        unop(Iop_8Uto64,
+                             IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
+         if (matchIRExpr(&mi,p_LDle8_then_8Uto64,e)) {
+            HReg dst = newVRegI(env);
+            AMD64AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+            addInstr(env, AMD64Instr_LoadEX(1,False,amode,dst));
+            return dst;
+         }
+      }
+
+      /* 16Uto64(LDle(expr64)) */
+      {
+         DEFINE_PATTERN(p_LDle16_then_16Uto64,
+                        unop(Iop_16Uto64,
+                             IRExpr_Load(Iend_LE,Ity_I16,bind(0))) );
+         if (matchIRExpr(&mi,p_LDle16_then_16Uto64,e)) {
+            HReg dst = newVRegI(env);
+            AMD64AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+            addInstr(env, AMD64Instr_LoadEX(2,False,amode,dst));
+            return dst;
+         }
+      }
+
+      /* 32Uto64( Add32/Sub32/And32/Or32/Xor32(expr32, expr32) )
+         Use 32 bit arithmetic and let the default zero-extend rule
+         do the 32Uto64 for free. */
+      if (e->Iex.Unop.op == Iop_32Uto64 && e->Iex.Unop.arg->tag == Iex_Binop) {
+         IROp    opi  = e->Iex.Unop.arg->Iex.Binop.op; /* inner op */
+         IRExpr* argL = e->Iex.Unop.arg->Iex.Binop.arg1;
+         IRExpr* argR = e->Iex.Unop.arg->Iex.Binop.arg2;
+         AMD64AluOp aluOp = Aalu_INVALID;
+         switch (opi) {
+            case Iop_Add32: aluOp = Aalu_ADD; break;
+            case Iop_Sub32: aluOp = Aalu_SUB; break;
+            case Iop_And32: aluOp = Aalu_AND; break;
+            case Iop_Or32:  aluOp = Aalu_OR;  break;
+            case Iop_Xor32: aluOp = Aalu_XOR; break;
+            default: break;
+         }
+         if (aluOp != Aalu_INVALID) {
+            /* For commutative ops we assume any literal values are on
+               the second operand. */
+            HReg dst      = newVRegI(env);
+            HReg reg      = iselIntExpr_R(env, argL);
+            AMD64RMI* rmi = iselIntExpr_RMI(env, argR);
+            addInstr(env, mk_iMOVsd_RR(reg,dst));
+            addInstr(env, AMD64Instr_Alu32R(aluOp, rmi, dst));
+            return dst;
+         }
+         /* just fall through to normal handling for Iop_32Uto64 */
+      }
+
+      /* Fallback cases */
+      switch (e->Iex.Unop.op) {
+         case Iop_32Uto64:
+         case Iop_32Sto64: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, AMD64Instr_MovxLQ(e->Iex.Unop.op == Iop_32Sto64,
+                                            src, dst) );
+            return dst;
+         }
+         case Iop_128HIto64: {
+            HReg rHi, rLo;
+            iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            return rHi; /* and abandon rLo */
+         }
+         case Iop_128to64: {
+            HReg rHi, rLo;
+            iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            return rLo; /* and abandon rHi */
+         }
+         case Iop_8Uto16:
+         case Iop_8Uto32:
+         case Iop_8Uto64:
+         case Iop_16Uto64:
+         case Iop_16Uto32: {
+            HReg dst     = newVRegI(env);
+            HReg src     = iselIntExpr_R(env, e->Iex.Unop.arg);
+            Bool srcIs16 = toBool( e->Iex.Unop.op==Iop_16Uto32
+                                   || e->Iex.Unop.op==Iop_16Uto64 );
+            UInt mask    = srcIs16 ? 0xFFFF : 0xFF;
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
+                                            AMD64RMI_Imm(mask), dst));
+            return dst;
+         }
+         case Iop_8Sto16:
+         case Iop_8Sto64:
+         case Iop_8Sto32:
+         case Iop_16Sto32:
+         case Iop_16Sto64: {
+            HReg dst     = newVRegI(env);
+            HReg src     = iselIntExpr_R(env, e->Iex.Unop.arg);
+            Bool srcIs16 = toBool( e->Iex.Unop.op==Iop_16Sto32
+                                   || e->Iex.Unop.op==Iop_16Sto64 );
+            UInt amt     = srcIs16 ? 48 : 56;
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst));
+            addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst));
+            return dst;
+         }
+ 	 case Iop_Not8:
+ 	 case Iop_Not16:
+         case Iop_Not32:
+         case Iop_Not64: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, AMD64Instr_Unary64(Aun_NOT,dst));
+            return dst;
+         }
+         case Iop_16HIto8:
+         case Iop_32HIto16:
+         case Iop_64HIto32: {
+            HReg dst  = newVRegI(env);
+            HReg src  = iselIntExpr_R(env, e->Iex.Unop.arg);
+            Int shift = 0;
+            switch (e->Iex.Unop.op) {
+               case Iop_16HIto8:  shift = 8;  break;
+               case Iop_32HIto16: shift = 16; break;
+               case Iop_64HIto32: shift = 32; break;
+               default: vassert(0);
+            }
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, AMD64Instr_Sh64(Ash_SHR, shift, dst));
+            return dst;
+         }
+         case Iop_1Uto64:
+         case Iop_1Uto32:
+         case Iop_1Uto8: {
+            HReg dst           = newVRegI(env);
+            AMD64CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            addInstr(env, AMD64Instr_Set64(cond,dst));
+            return dst;
+         }
+         case Iop_1Sto8:
+         case Iop_1Sto16:
+         case Iop_1Sto32:
+         case Iop_1Sto64: {
+            /* could do better than this, but for now ... */
+            HReg dst           = newVRegI(env);
+            AMD64CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            addInstr(env, AMD64Instr_Set64(cond,dst));
+            addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, dst));
+            addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, dst));
+            return dst;
+         }
+         case Iop_Ctz64: {
+            /* Count trailing zeroes, implemented by amd64 'bsfq' */
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, AMD64Instr_Bsfr64(True,src,dst));
+            return dst;
+         }
+         case Iop_Clz64: {
+            /* Count leading zeroes.  Do 'bsrq' to establish the index
+               of the highest set bit, and subtract that value from
+               63. */
+            HReg tmp = newVRegI(env);
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, AMD64Instr_Bsfr64(False,src,tmp));
+            addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, 
+                                            AMD64RMI_Imm(63), dst));
+            addInstr(env, AMD64Instr_Alu64R(Aalu_SUB,
+                                            AMD64RMI_Reg(tmp), dst));
+            return dst;
+         }
+
+         case Iop_CmpwNEZ64: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src,dst));
+            addInstr(env, AMD64Instr_Unary64(Aun_NEG,dst));
+            addInstr(env, AMD64Instr_Alu64R(Aalu_OR,
+                                            AMD64RMI_Reg(src), dst));
+            addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, dst));
+            return dst;
+         }
+
+         case Iop_CmpwNEZ32: {
+            HReg src = newVRegI(env);
+            HReg dst = newVRegI(env);
+            HReg pre = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(pre,src));
+            addInstr(env, AMD64Instr_MovxLQ(False, src, src));
+            addInstr(env, mk_iMOVsd_RR(src,dst));
+            addInstr(env, AMD64Instr_Unary64(Aun_NEG,dst));
+            addInstr(env, AMD64Instr_Alu64R(Aalu_OR,
+                                            AMD64RMI_Reg(src), dst));
+            addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, dst));
+            return dst;
+         }
+
+         case Iop_Left8:
+         case Iop_Left16:
+         case Iop_Left32:
+         case Iop_Left64: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src, dst));
+            addInstr(env, AMD64Instr_Unary64(Aun_NEG, dst));
+            addInstr(env, AMD64Instr_Alu64R(Aalu_OR, AMD64RMI_Reg(src), dst));
+            return dst;
+         }
+
+         case Iop_V128to32: {
+            HReg        dst     = newVRegI(env);
+            HReg        vec     = iselVecExpr(env, e->Iex.Unop.arg);
+            AMD64AMode* rsp_m16 = AMD64AMode_IR(-16, hregAMD64_RSP());
+            addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vec, rsp_m16));
+            addInstr(env, AMD64Instr_LoadEX(4, False/*z-widen*/, rsp_m16, dst));
+            return dst;
+         }
+
+         /* V128{HI}to64 */
+         case Iop_V128HIto64:
+         case Iop_V128to64: {
+            HReg dst = newVRegI(env);
+            Int  off = e->Iex.Unop.op==Iop_V128HIto64 ? -8 : -16;
+            HReg rsp = hregAMD64_RSP();
+            HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
+            AMD64AMode* m16_rsp = AMD64AMode_IR(-16, rsp);
+            AMD64AMode* off_rsp = AMD64AMode_IR(off, rsp);
+            addInstr(env, AMD64Instr_SseLdSt(False/*store*/,
+                                             16, vec, m16_rsp));
+            addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, 
+                                             AMD64RMI_Mem(off_rsp), dst ));
+            return dst;
+         }
+
+         case Iop_V256to64_0: case Iop_V256to64_1:
+         case Iop_V256to64_2: case Iop_V256to64_3: {
+            HReg vHi, vLo, vec;
+            iselDVecExpr(&vHi, &vLo, env, e->Iex.Unop.arg);
+            /* Do the first part of the selection by deciding which of
+               the 128 bit registers do look at, and second part using
+               the same scheme as for V128{HI}to64 above. */
+            Int off = 0;
+            switch (e->Iex.Unop.op) {
+               case Iop_V256to64_0: vec = vLo; off = -16; break;
+               case Iop_V256to64_1: vec = vLo; off =  -8; break;
+               case Iop_V256to64_2: vec = vHi; off = -16; break;
+               case Iop_V256to64_3: vec = vHi; off =  -8; break;
+               default: vassert(0);
+            }
+            HReg        dst     = newVRegI(env);
+            HReg        rsp     = hregAMD64_RSP();
+            AMD64AMode* m16_rsp = AMD64AMode_IR(-16, rsp);
+            AMD64AMode* off_rsp = AMD64AMode_IR(off, rsp);
+            addInstr(env, AMD64Instr_SseLdSt(False/*store*/,
+                                             16, vec, m16_rsp));
+            addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, 
+                                             AMD64RMI_Mem(off_rsp), dst ));
+            return dst;
+         }
+
+         /* ReinterpF64asI64(e) */
+         /* Given an IEEE754 double, produce an I64 with the same bit
+            pattern. */
+         case Iop_ReinterpF64asI64: {
+            AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+            HReg        dst    = newVRegI(env);
+            HReg        src    = iselDblExpr(env, e->Iex.Unop.arg);
+            /* paranoia */
+            set_SSE_rounding_default(env);
+            addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, src, m8_rsp));
+            addInstr(env, AMD64Instr_Alu64R(
+                             Aalu_MOV, AMD64RMI_Mem(m8_rsp), dst));
+            return dst;
+         }
+
+         /* ReinterpF32asI32(e) */
+         /* Given an IEEE754 single, produce an I64 with the same bit
+            pattern in the lower half. */
+         case Iop_ReinterpF32asI32: {
+            AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+            HReg        dst    = newVRegI(env);
+            HReg        src    = iselFltExpr(env, e->Iex.Unop.arg);
+            /* paranoia */
+            set_SSE_rounding_default(env);
+            addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 4, src, m8_rsp));
+            addInstr(env, AMD64Instr_LoadEX(4, False/*unsigned*/, m8_rsp, dst ));
+            return dst;
+         }
+
+         case Iop_16to8:
+         case Iop_32to8:
+         case Iop_64to8:
+         case Iop_32to16:
+         case Iop_64to16:
+         case Iop_64to32:
+            /* These are no-ops. */
+            return iselIntExpr_R(env, e->Iex.Unop.arg);
+
+         case Iop_GetMSBs8x8: {
+            /* Note: the following assumes the helper is of
+               signature
+                  UInt fn ( ULong ), and is not a regparm fn.
+            */
+            HReg dst = newVRegI(env);
+            HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+            fn = (HWord)h_generic_calc_GetMSBs8x8;
+            addInstr(env, mk_iMOVsd_RR(arg, hregAMD64_RDI()) );
+            addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn,
+                                           1, mk_RetLoc_simple(RLPri_Int) ));
+            /* MovxLQ is not exactly the right thing here.  We just
+               need to get the bottom 8 bits of RAX into dst, and zero
+               out everything else.  Assuming that the helper returns
+               a UInt with the top 24 bits zeroed out, it'll do,
+               though. */
+            addInstr(env, AMD64Instr_MovxLQ(False, hregAMD64_RAX(), dst));
+            return dst;
+         }
+
+         case Iop_GetMSBs8x16: {
+            /* Note: the following assumes the helper is of signature
+                  UInt fn ( ULong w64hi, ULong w64Lo ),
+               and is not a regparm fn. */
+            HReg dst = newVRegI(env);
+            HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
+            HReg rsp = hregAMD64_RSP();
+            fn = (HWord)h_generic_calc_GetMSBs8x16;
+            AMD64AMode* m8_rsp  = AMD64AMode_IR( -8, rsp);
+            AMD64AMode* m16_rsp = AMD64AMode_IR(-16, rsp);
+            addInstr(env, AMD64Instr_SseLdSt(False/*store*/,
+                                             16, vec, m16_rsp));
+            /* hi 64 bits into RDI -- the first arg */
+            addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, 
+                                             AMD64RMI_Mem(m8_rsp),
+                                             hregAMD64_RDI() )); /* 1st arg */
+            /* lo 64 bits into RSI -- the 2nd arg */
+            addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, 
+                                             AMD64RMI_Mem(m16_rsp),
+                                             hregAMD64_RSI() )); /* 2nd arg */
+            addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn,
+                                           2, mk_RetLoc_simple(RLPri_Int) ));
+            /* MovxLQ is not exactly the right thing here.  We just
+               need to get the bottom 16 bits of RAX into dst, and zero
+               out everything else.  Assuming that the helper returns
+               a UInt with the top 16 bits zeroed out, it'll do,
+               though. */
+            addInstr(env, AMD64Instr_MovxLQ(False, hregAMD64_RAX(), dst));
+            return dst;
+         }
+
+         default: 
+            break;
+      }
+
+      /* Deal with unary 64-bit SIMD ops. */
+      switch (e->Iex.Unop.op) {
+         case Iop_CmpNEZ32x2:
+            fn = (HWord)h_generic_calc_CmpNEZ32x2; break;
+         case Iop_CmpNEZ16x4:
+            fn = (HWord)h_generic_calc_CmpNEZ16x4; break;
+         case Iop_CmpNEZ8x8:
+            fn = (HWord)h_generic_calc_CmpNEZ8x8; break;
+         default:
+            fn = (HWord)0; break;
+      }
+      if (fn != (HWord)0) {
+         /* Note: the following assumes all helpers are of
+            signature 
+               ULong fn ( ULong ), and they are
+            not marked as regparm functions. 
+         */
+         HReg dst = newVRegI(env);
+         HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+         addInstr(env, mk_iMOVsd_RR(arg, hregAMD64_RDI()) );
+         addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 1,
+                                        mk_RetLoc_simple(RLPri_Int) ));
+         addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst));
+         return dst;
+      }
+
+      break;
+   }
+
+   /* --------- GET --------- */
+   case Iex_Get: {
+      if (ty == Ity_I64) {
+         HReg dst = newVRegI(env);
+         addInstr(env, AMD64Instr_Alu64R(
+                          Aalu_MOV, 
+                          AMD64RMI_Mem(
+                             AMD64AMode_IR(e->Iex.Get.offset,
+                                           hregAMD64_RBP())),
+                          dst));
+         return dst;
+      }
+      if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32) {
+         HReg dst = newVRegI(env);
+         addInstr(env, AMD64Instr_LoadEX(
+                          toUChar(ty==Ity_I8 ? 1 : (ty==Ity_I16 ? 2 : 4)),
+                          False,
+                          AMD64AMode_IR(e->Iex.Get.offset,hregAMD64_RBP()),
+                          dst));
+         return dst;
+      }
+      break;
+   }
+
+   case Iex_GetI: {
+      AMD64AMode* am 
+         = genGuestArrayOffset(
+              env, e->Iex.GetI.descr, 
+                   e->Iex.GetI.ix, e->Iex.GetI.bias );
+      HReg dst = newVRegI(env);
+      if (ty == Ity_I8) {
+         addInstr(env, AMD64Instr_LoadEX( 1, False, am, dst ));
+         return dst;
+      }
+      if (ty == Ity_I64) {
+         addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, AMD64RMI_Mem(am), dst ));
+         return dst;
+      }
+      break;
+   }
+
+   /* --------- CCALL --------- */
+   case Iex_CCall: {
+      HReg    dst = newVRegI(env);
+      vassert(ty == e->Iex.CCall.retty);
+
+      /* be very restrictive for now.  Only 64-bit ints allowed for
+         args, and 64 or 32 bits for return type. */
+      if (e->Iex.CCall.retty != Ity_I64 && e->Iex.CCall.retty != Ity_I32)
+         goto irreducible;
+
+      /* Marshal args, do the call. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                    e->Iex.CCall.cee, e->Iex.CCall.retty, e->Iex.CCall.args );
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_Int);
+      vassert(addToSp == 0);
+
+      /* Move to dst, and zero out the top 32 bits if the result type is
+         Ity_I32.  Probably overkill, but still .. */
+      if (e->Iex.CCall.retty == Ity_I64)
+         addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst));
+      else
+         addInstr(env, AMD64Instr_MovxLQ(False, hregAMD64_RAX(), dst));
+
+      return dst;
+   }
+
+   /* --------- LITERAL --------- */
+   /* 64/32/16/8-bit literals */
+   case Iex_Const:
+      if (ty == Ity_I64) {
+         HReg r = newVRegI(env);
+         addInstr(env, AMD64Instr_Imm64(e->Iex.Const.con->Ico.U64, r));
+         return r;
+      } else {
+         AMD64RMI* rmi = iselIntExpr_RMI ( env, e );
+         HReg      r   = newVRegI(env);
+         addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, rmi, r));
+         return r;
+      }
+
+   /* --------- MULTIPLEX --------- */
+   case Iex_ITE: { // VFD
+      if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
+          && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+         HReg     r1  = iselIntExpr_R(env, e->Iex.ITE.iftrue);
+         HReg     r0  = iselIntExpr_R(env, e->Iex.ITE.iffalse);
+         HReg     dst = newVRegI(env);
+         addInstr(env, mk_iMOVsd_RR(r1,dst));
+         AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+         addInstr(env, AMD64Instr_CMov64(cc ^ 1, r0, dst));
+         return dst;
+      }
+      break;
+   }
+
+   /* --------- TERNARY OP --------- */
+   case Iex_Triop: {
+      IRTriop *triop = e->Iex.Triop.details;
+      /* C3210 flags following FPU partial remainder (fprem), both
+         IEEE compliant (PREM1) and non-IEEE compliant (PREM). */
+      if (triop->op == Iop_PRemC3210F64
+          || triop->op == Iop_PRem1C3210F64) {
+         AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+         HReg        arg1   = iselDblExpr(env, triop->arg2);
+         HReg        arg2   = iselDblExpr(env, triop->arg3);
+         HReg        dst    = newVRegI(env);
+         addInstr(env, AMD64Instr_A87Free(2));
+
+         /* one arg -> top of x87 stack */
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, arg2, m8_rsp));
+         addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/, 8));
+
+         /* other arg -> top of x87 stack */
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, arg1, m8_rsp));
+         addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/, 8));
+
+         switch (triop->op) {
+            case Iop_PRemC3210F64:
+               addInstr(env, AMD64Instr_A87FpOp(Afp_PREM));
+               break;
+            case Iop_PRem1C3210F64:
+               addInstr(env, AMD64Instr_A87FpOp(Afp_PREM1));
+               break;
+            default: 
+               vassert(0);
+         }
+         /* Ignore the result, and instead make off with the FPU's
+	    C3210 flags (in the status word). */
+         addInstr(env, AMD64Instr_A87StSW(m8_rsp));
+         addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,AMD64RMI_Mem(m8_rsp),dst));
+         addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(0x4700),dst));
+         return dst;
+      }
+      break;
+   }
+
+   default: 
+   break;
+   } /* switch (e->tag) */
+
+   /* We get here if no pattern matched. */
+  irreducible:
+   ppIRExpr(e);
+   vpanic("iselIntExpr_R(amd64): cannot reduce tree");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expression auxiliaries              ---*/
+/*---------------------------------------------------------*/
+
+/* --------------------- AMODEs --------------------- */
+
+/* Return an AMode which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a 32-bit one.
+*/
+
+static AMD64AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e )
+{
+   AMD64AMode* am = iselIntExpr_AMode_wrk(env, e);
+   vassert(sane_AMode(am));
+   return am;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static AMD64AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e )
+{
+   MatchInfo mi;
+   DECLARE_PATTERN(p_complex);
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64);
+
+   /* Add64( Add64(expr1, Shl64(expr2, imm8)), simm32 ) */
+   /*              bind0        bind1  bind2   bind3   */
+   DEFINE_PATTERN(p_complex,
+      binop( Iop_Add64,
+             binop( Iop_Add64, 
+                    bind(0), 
+                    binop(Iop_Shl64, bind(1), bind(2))
+                  ),
+             bind(3)
+           )
+   );
+   if (matchIRExpr(&mi, p_complex, e)) {
+      IRExpr* expr1  = mi.bindee[0];
+      IRExpr* expr2  = mi.bindee[1];
+      IRExpr* imm8   = mi.bindee[2];
+      IRExpr* simm32 = mi.bindee[3];
+      if (imm8->tag == Iex_Const 
+          && imm8->Iex.Const.con->tag == Ico_U8
+          && imm8->Iex.Const.con->Ico.U8 < 4
+          /* imm8 is OK, now check simm32 */
+          && simm32->tag == Iex_Const
+          && simm32->Iex.Const.con->tag == Ico_U64
+          && fitsIn32Bits(simm32->Iex.Const.con->Ico.U64)) {
+         UInt shift = imm8->Iex.Const.con->Ico.U8;
+         UInt offset = toUInt(simm32->Iex.Const.con->Ico.U64);
+         HReg r1 = iselIntExpr_R(env, expr1);
+         HReg r2 = iselIntExpr_R(env, expr2);
+         vassert(shift == 0 || shift == 1 || shift == 2 || shift == 3);
+         return AMD64AMode_IRRS(offset, r1, r2, shift);
+      }
+   }
+
+   /* Add64(expr1, Shl64(expr2, imm)) */
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_Add64
+       && e->Iex.Binop.arg2->tag == Iex_Binop
+       && e->Iex.Binop.arg2->Iex.Binop.op == Iop_Shl64
+       && e->Iex.Binop.arg2->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8) {
+      UInt shift = e->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+      if (shift == 1 || shift == 2 || shift == 3) {
+         HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg r2 = iselIntExpr_R(env, e->Iex.Binop.arg2->Iex.Binop.arg1 );
+         return AMD64AMode_IRRS(0, r1, r2, shift);
+      }
+   }
+
+   /* Add64(expr,i) */
+   if (e->tag == Iex_Binop 
+       && e->Iex.Binop.op == Iop_Add64
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64
+       && fitsIn32Bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)) {
+      HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      return AMD64AMode_IR(
+                toUInt(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64), 
+                r1
+             );
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   {
+      HReg r1 = iselIntExpr_R(env, e);
+      return AMD64AMode_IR(0, r1);
+   }
+}
+
+
+/* --------------------- RMIs --------------------- */
+
+/* Similarly, calculate an expression into an X86RMI operand.  As with
+   iselIntExpr_R, the expression can have type 32, 16 or 8 bits.  */
+
+static AMD64RMI* iselIntExpr_RMI ( ISelEnv* env, IRExpr* e )
+{
+   AMD64RMI* rmi = iselIntExpr_RMI_wrk(env, e);
+   /* sanity checks ... */
+   switch (rmi->tag) {
+      case Armi_Imm:
+         return rmi;
+      case Armi_Reg:
+         vassert(hregClass(rmi->Armi.Reg.reg) == HRcInt64);
+         vassert(hregIsVirtual(rmi->Armi.Reg.reg));
+         return rmi;
+      case Armi_Mem:
+         vassert(sane_AMode(rmi->Armi.Mem.am));
+         return rmi;
+      default:
+         vpanic("iselIntExpr_RMI: unknown amd64 RMI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static AMD64RMI* iselIntExpr_RMI_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64 || ty == Ity_I32 
+           || ty == Ity_I16 || ty == Ity_I8);
+
+   /* special case: immediate 64/32/16/8 */
+   if (e->tag == Iex_Const) {
+      switch (e->Iex.Const.con->tag) {
+        case Ico_U64:
+           if (fitsIn32Bits(e->Iex.Const.con->Ico.U64)) {
+              return AMD64RMI_Imm(toUInt(e->Iex.Const.con->Ico.U64));
+           }
+           break;
+         case Ico_U32:
+            return AMD64RMI_Imm(e->Iex.Const.con->Ico.U32); break;
+         case Ico_U16:
+            return AMD64RMI_Imm(0xFFFF & e->Iex.Const.con->Ico.U16); break;
+         case Ico_U8:
+            return AMD64RMI_Imm(0xFF & e->Iex.Const.con->Ico.U8); break;
+         default:
+            vpanic("iselIntExpr_RMI.Iex_Const(amd64)");
+      }
+   }
+
+   /* special case: 64-bit GET */
+   if (e->tag == Iex_Get && ty == Ity_I64) {
+      return AMD64RMI_Mem(AMD64AMode_IR(e->Iex.Get.offset,
+                                        hregAMD64_RBP()));
+   }
+
+   /* special case: 64-bit load from memory */
+   if (e->tag == Iex_Load && ty == Ity_I64
+       && e->Iex.Load.end == Iend_LE) {
+      AMD64AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      return AMD64RMI_Mem(am);
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return AMD64RMI_Reg(r);
+   }
+}
+
+
+/* --------------------- RIs --------------------- */
+
+/* Calculate an expression into an AMD64RI operand.  As with
+   iselIntExpr_R, the expression can have type 64, 32, 16 or 8
+   bits. */
+
+static AMD64RI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e )
+{
+   AMD64RI* ri = iselIntExpr_RI_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case Ari_Imm:
+         return ri;
+      case Ari_Reg:
+         vassert(hregClass(ri->Ari.Reg.reg) == HRcInt64);
+         vassert(hregIsVirtual(ri->Ari.Reg.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RI: unknown amd64 RI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static AMD64RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64 || ty == Ity_I32 
+           || ty == Ity_I16 || ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      switch (e->Iex.Const.con->tag) {
+        case Ico_U64:
+           if (fitsIn32Bits(e->Iex.Const.con->Ico.U64)) {
+              return AMD64RI_Imm(toUInt(e->Iex.Const.con->Ico.U64));
+           }
+           break;
+         case Ico_U32:
+            return AMD64RI_Imm(e->Iex.Const.con->Ico.U32);
+         case Ico_U16: 
+            return AMD64RI_Imm(0xFFFF & e->Iex.Const.con->Ico.U16);
+         case Ico_U8:
+            return AMD64RI_Imm(0xFF & e->Iex.Const.con->Ico.U8);
+         default:
+            vpanic("iselIntExpr_RMI.Iex_Const(amd64)");
+      }
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return AMD64RI_Reg(r);
+   }
+}
+
+
+/* --------------------- RMs --------------------- */
+
+/* Similarly, calculate an expression into an AMD64RM operand.  As
+   with iselIntExpr_R, the expression can have type 64, 32, 16 or 8
+   bits.  */
+
+static AMD64RM* iselIntExpr_RM ( ISelEnv* env, IRExpr* e )
+{
+   AMD64RM* rm = iselIntExpr_RM_wrk(env, e);
+   /* sanity checks ... */
+   switch (rm->tag) {
+      case Arm_Reg:
+         vassert(hregClass(rm->Arm.Reg.reg) == HRcInt64);
+         vassert(hregIsVirtual(rm->Arm.Reg.reg));
+         return rm;
+      case Arm_Mem:
+         vassert(sane_AMode(rm->Arm.Mem.am));
+         return rm;
+      default:
+         vpanic("iselIntExpr_RM: unknown amd64 RM tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static AMD64RM* iselIntExpr_RM_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   /* special case: 64-bit GET */
+   if (e->tag == Iex_Get && ty == Ity_I64) {
+      return AMD64RM_Mem(AMD64AMode_IR(e->Iex.Get.offset,
+                                       hregAMD64_RBP()));
+   }
+
+   /* special case: load from memory */
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return AMD64RM_Reg(r);
+   }
+}
+
+
+/* --------------------- CONDCODE --------------------- */
+
+/* Generate code to evaluated a bit-typed expression, returning the
+   condition code which would correspond when the expression would
+   notionally have returned 1. */
+
+static AMD64CondCode iselCondCode ( ISelEnv* env, IRExpr* e )
+{
+   /* Uh, there's nothing we can sanity check here, unfortunately. */
+   return iselCondCode_wrk(env,e);
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static AMD64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
+{
+   MatchInfo mi;
+
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I1);
+
+   /* var */
+   if (e->tag == Iex_RdTmp) {
+      HReg r64 = lookupIRTemp(env, e->Iex.RdTmp.tmp);
+      HReg dst = newVRegI(env);
+      addInstr(env, mk_iMOVsd_RR(r64,dst));
+      addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(1),dst));
+      return Acc_NZ;
+   }
+
+   /* Constant 1:Bit */
+   if (e->tag == Iex_Const) {
+      HReg r;
+      vassert(e->Iex.Const.con->tag == Ico_U1);
+      vassert(e->Iex.Const.con->Ico.U1 == True 
+              || e->Iex.Const.con->Ico.U1 == False);
+      r = newVRegI(env);
+      addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,AMD64RMI_Imm(0),r));
+      addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,AMD64RMI_Reg(r),r));
+      return e->Iex.Const.con->Ico.U1 ? Acc_Z : Acc_NZ;
+   }
+
+   /* Not1(...) */
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
+      /* Generate code for the arg, and negate the test condition */
+      return 1 ^ iselCondCode(env, e->Iex.Unop.arg);
+   }
+
+   /* --- patterns rooted at: 64to1 --- */
+
+   /* 64to1 */
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_64to1) {
+      HReg reg = iselIntExpr_R(env, e->Iex.Unop.arg);
+      addInstr(env, AMD64Instr_Test64(1,reg));
+      return Acc_NZ;
+   }
+
+   /* --- patterns rooted at: 32to1 --- */
+
+   /* 32to1 */
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_32to1) {
+      HReg reg = iselIntExpr_R(env, e->Iex.Unop.arg);
+      addInstr(env, AMD64Instr_Test64(1,reg));
+      return Acc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ8 --- */
+
+   /* CmpNEZ8(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ8) {
+      HReg r = iselIntExpr_R(env, e->Iex.Unop.arg);
+      addInstr(env, AMD64Instr_Test64(0xFF,r));
+      return Acc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ16 --- */
+
+   /* CmpNEZ16(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ16) {
+      HReg r = iselIntExpr_R(env, e->Iex.Unop.arg);
+      addInstr(env, AMD64Instr_Test64(0xFFFF,r));
+      return Acc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ32 --- */
+
+   /* CmpNEZ32(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ32) {
+      HReg      r1   = iselIntExpr_R(env, e->Iex.Unop.arg);
+      AMD64RMI* rmi2 = AMD64RMI_Imm(0);
+      addInstr(env, AMD64Instr_Alu32R(Aalu_CMP,rmi2,r1));
+      return Acc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ64 --- */
+
+   /* CmpNEZ64(Or64(x,y)) */
+   {
+      DECLARE_PATTERN(p_CmpNEZ64_Or64);
+      DEFINE_PATTERN(p_CmpNEZ64_Or64,
+                     unop(Iop_CmpNEZ64, binop(Iop_Or64, bind(0), bind(1))));
+      if (matchIRExpr(&mi, p_CmpNEZ64_Or64, e)) {
+         HReg      r0   = iselIntExpr_R(env, mi.bindee[0]);
+         AMD64RMI* rmi1 = iselIntExpr_RMI(env, mi.bindee[1]);
+         HReg      tmp  = newVRegI(env);
+         addInstr(env, mk_iMOVsd_RR(r0, tmp));
+         addInstr(env, AMD64Instr_Alu64R(Aalu_OR,rmi1,tmp));
+         return Acc_NZ;
+      }
+   }
+
+   /* CmpNEZ64(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ64) {
+      HReg      r1   = iselIntExpr_R(env, e->Iex.Unop.arg);
+      AMD64RMI* rmi2 = AMD64RMI_Imm(0);
+      addInstr(env, AMD64Instr_Alu64R(Aalu_CMP,rmi2,r1));
+      return Acc_NZ;
+   }
+
+   /* --- patterns rooted at: Cmp{EQ,NE}{8,16,32} --- */
+
+   /* CmpEQ8 / CmpNE8 */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ8
+           || e->Iex.Binop.op == Iop_CmpNE8
+           || e->Iex.Binop.op == Iop_CasCmpEQ8
+           || e->Iex.Binop.op == Iop_CasCmpNE8)) {
+      if (isZeroU8(e->Iex.Binop.arg2)) {
+         HReg      r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         addInstr(env, AMD64Instr_Test64(0xFF,r1));
+         switch (e->Iex.Binop.op) {
+            case Iop_CmpEQ8: case Iop_CasCmpEQ8: return Acc_Z;
+            case Iop_CmpNE8: case Iop_CasCmpNE8: return Acc_NZ;
+            default: vpanic("iselCondCode(amd64): CmpXX8(expr,0:I8)");
+         }
+      } else {
+         HReg      r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+         HReg      r    = newVRegI(env);
+         addInstr(env, mk_iMOVsd_RR(r1,r));
+         addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
+         addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(0xFF),r));
+         switch (e->Iex.Binop.op) {
+            case Iop_CmpEQ8: case Iop_CasCmpEQ8: return Acc_Z;
+            case Iop_CmpNE8: case Iop_CasCmpNE8: return Acc_NZ;
+            default: vpanic("iselCondCode(amd64): CmpXX8(expr,expr)");
+         }
+      }
+   }
+
+   /* CmpEQ16 / CmpNE16 */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ16
+           || e->Iex.Binop.op == Iop_CmpNE16
+           || e->Iex.Binop.op == Iop_CasCmpEQ16
+           || e->Iex.Binop.op == Iop_CasCmpNE16)) {
+      HReg      r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+      HReg      r    = newVRegI(env);
+      addInstr(env, mk_iMOVsd_RR(r1,r));
+      addInstr(env, AMD64Instr_Alu64R(Aalu_XOR,rmi2,r));
+      addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(0xFFFF),r));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ16: case Iop_CasCmpEQ16: return Acc_Z;
+         case Iop_CmpNE16: case Iop_CasCmpNE16: return Acc_NZ;
+         default: vpanic("iselCondCode(amd64): CmpXX16");
+      }
+   }
+
+   /* CmpNE64(ccall, 64-bit constant) (--smc-check=all optimisation).
+      Saves a "movq %rax, %tmp" compared to the default route. */
+   if (e->tag == Iex_Binop 
+       && e->Iex.Binop.op == Iop_CmpNE64
+       && e->Iex.Binop.arg1->tag == Iex_CCall
+       && e->Iex.Binop.arg2->tag == Iex_Const) {
+      IRExpr* cal = e->Iex.Binop.arg1;
+      IRExpr* con = e->Iex.Binop.arg2;
+      HReg    tmp = newVRegI(env);
+      /* clone & partial-eval of generic Iex_CCall and Iex_Const cases */
+      vassert(cal->Iex.CCall.retty == Ity_I64); /* else ill-typed IR */
+      vassert(con->Iex.Const.con->tag == Ico_U64);
+      /* Marshal args, do the call. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                    cal->Iex.CCall.cee,
+                    cal->Iex.CCall.retty, cal->Iex.CCall.args );
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_Int);
+      vassert(addToSp == 0);
+      /* */
+      addInstr(env, AMD64Instr_Imm64(con->Iex.Const.con->Ico.U64, tmp));
+      addInstr(env, AMD64Instr_Alu64R(Aalu_CMP,
+                                      AMD64RMI_Reg(hregAMD64_RAX()), tmp));
+      return Acc_NZ;
+   }
+
+   /* Cmp*64*(x,y) */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ64
+           || e->Iex.Binop.op == Iop_CmpNE64
+           || e->Iex.Binop.op == Iop_CmpLT64S
+           || e->Iex.Binop.op == Iop_CmpLT64U
+           || e->Iex.Binop.op == Iop_CmpLE64S
+           || e->Iex.Binop.op == Iop_CmpLE64U
+           || e->Iex.Binop.op == Iop_CasCmpEQ64
+           || e->Iex.Binop.op == Iop_CasCmpNE64
+           || e->Iex.Binop.op == Iop_ExpCmpNE64)) {
+      HReg      r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+      addInstr(env, AMD64Instr_Alu64R(Aalu_CMP,rmi2,r1));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ64: case Iop_CasCmpEQ64: return Acc_Z;
+         case Iop_CmpNE64:
+         case Iop_CasCmpNE64: case Iop_ExpCmpNE64: return Acc_NZ;
+	 case Iop_CmpLT64S: return Acc_L;
+	 case Iop_CmpLT64U: return Acc_B;
+	 case Iop_CmpLE64S: return Acc_LE;
+         case Iop_CmpLE64U: return Acc_BE;
+         default: vpanic("iselCondCode(amd64): CmpXX64");
+      }
+   }
+
+   /* Cmp*32*(x,y) */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ32
+           || e->Iex.Binop.op == Iop_CmpNE32
+           || e->Iex.Binop.op == Iop_CmpLT32S
+           || e->Iex.Binop.op == Iop_CmpLT32U
+           || e->Iex.Binop.op == Iop_CmpLE32S
+           || e->Iex.Binop.op == Iop_CmpLE32U
+           || e->Iex.Binop.op == Iop_CasCmpEQ32
+           || e->Iex.Binop.op == Iop_CasCmpNE32
+           || e->Iex.Binop.op == Iop_ExpCmpNE32)) {
+      HReg      r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      AMD64RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+      addInstr(env, AMD64Instr_Alu32R(Aalu_CMP,rmi2,r1));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ32: case Iop_CasCmpEQ32: return Acc_Z;
+         case Iop_CmpNE32:
+         case Iop_CasCmpNE32: case Iop_ExpCmpNE32: return Acc_NZ;
+	 case Iop_CmpLT32S: return Acc_L;
+	 case Iop_CmpLT32U: return Acc_B;
+	 case Iop_CmpLE32S: return Acc_LE;
+         case Iop_CmpLE32U: return Acc_BE;
+         default: vpanic("iselCondCode(amd64): CmpXX32");
+      }
+   }
+
+   ppIRExpr(e);
+   vpanic("iselCondCode(amd64)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (128 bit)               ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 128-bit value into a register pair, which is returned as
+   the first two parameters.  As with iselIntExpr_R, these may be
+   either real or virtual regs; in any case they must not be changed
+   by subsequent code emitted by the caller.  */
+
+static void iselInt128Expr ( HReg* rHi, HReg* rLo, 
+                             ISelEnv* env, IRExpr* e )
+{
+   iselInt128Expr_wrk(rHi, rLo, env, e);
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcInt64);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcInt64);
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, 
+                                 ISelEnv* env, IRExpr* e )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
+
+   /* read 128-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+ 
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         /* 64 x 64 -> 128 multiply */
+         case Iop_MullU64:
+         case Iop_MullS64: {
+            /* get one operand into %rax, and the other into a R/M.
+               Need to make an educated guess about which is better in
+               which. */
+            HReg     tLo    = newVRegI(env);
+            HReg     tHi    = newVRegI(env);
+            Bool     syned  = toBool(e->Iex.Binop.op == Iop_MullS64);
+            AMD64RM* rmLeft = iselIntExpr_RM(env, e->Iex.Binop.arg1);
+            HReg     rRight = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, mk_iMOVsd_RR(rRight, hregAMD64_RAX()));
+            addInstr(env, AMD64Instr_MulL(syned, rmLeft));
+            /* Result is now in RDX:RAX.  Tell the caller. */
+            addInstr(env, mk_iMOVsd_RR(hregAMD64_RDX(), tHi));
+            addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 128 x 64 -> (64(rem),64(div)) division */
+         case Iop_DivModU128to64:
+         case Iop_DivModS128to64: {
+            /* Get the 128-bit operand into rdx:rax, and the other into
+               any old R/M. */
+            HReg sHi, sLo;
+            HReg     tLo     = newVRegI(env);
+            HReg     tHi     = newVRegI(env);
+            Bool     syned   = toBool(e->Iex.Binop.op == Iop_DivModS128to64);
+            AMD64RM* rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2);
+            iselInt128Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
+            addInstr(env, mk_iMOVsd_RR(sHi, hregAMD64_RDX()));
+            addInstr(env, mk_iMOVsd_RR(sLo, hregAMD64_RAX()));
+            addInstr(env, AMD64Instr_Div(syned, 8, rmRight));
+            addInstr(env, mk_iMOVsd_RR(hregAMD64_RDX(), tHi));
+            addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 64HLto128(e1,e2) */
+         case Iop_64HLto128:
+            *rHi = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            *rLo = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            return;
+
+         default: 
+            break;
+      }
+   } /* if (e->tag == Iex_Binop) */
+
+   ppIRExpr(e);
+   vpanic("iselInt128Expr");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (32 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Nothing interesting here; really just wrappers for
+   64-bit stuff. */
+
+static HReg iselFltExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselFltExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcVec128);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_F32);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      AMD64AMode* am;
+      HReg res = newVRegV(env);
+      vassert(e->Iex.Load.ty == Ity_F32);
+      am = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 4, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_F64toF32) {
+      /* Although the result is still held in a standard SSE register,
+         we need to round it to reflect the loss of accuracy/range
+         entailed in casting it to a 32-bit float. */
+      HReg dst = newVRegV(env);
+      HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+      set_SSE_rounding_mode( env, e->Iex.Binop.arg1 );
+      addInstr(env, AMD64Instr_SseSDSS(True/*D->S*/,src,dst));
+      set_SSE_rounding_default( env );
+      return dst;
+   }
+
+   if (e->tag == Iex_Get) {
+      AMD64AMode* am = AMD64AMode_IR( e->Iex.Get.offset,
+                                       hregAMD64_RBP() );
+      HReg res = newVRegV(env);
+      addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 4, res, am ));
+      return res;
+   }
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_ReinterpI32asF32) {
+       /* Given an I32, produce an IEEE754 float with the same bit
+          pattern. */
+       HReg        dst    = newVRegV(env);
+       HReg        src    = iselIntExpr_R(env, e->Iex.Unop.arg);
+       AMD64AMode* m4_rsp = AMD64AMode_IR(-4, hregAMD64_RSP());
+       addInstr(env, AMD64Instr_Store(4, src, m4_rsp));
+       addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 4, dst, m4_rsp ));
+       return dst;
+   }
+
+   if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_RoundF32toInt) {
+      AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+      HReg        arg    = iselFltExpr(env, e->Iex.Binop.arg2);
+      HReg        dst    = newVRegV(env);
+
+      /* rf now holds the value to be rounded.  The first thing to do
+         is set the FPU's rounding mode accordingly. */
+
+      /* Set host x87 rounding mode */
+      set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+
+      addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 4, arg, m8_rsp));
+      addInstr(env, AMD64Instr_A87Free(1));
+      addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/, 4));
+      addInstr(env, AMD64Instr_A87FpOp(Afp_ROUND));
+      addInstr(env, AMD64Instr_A87PushPop(m8_rsp, False/*pop*/, 4));
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 4, dst, m8_rsp));
+
+      /* Restore default x87 rounding. */
+      set_FPU_rounding_default( env );
+
+      return dst;
+   }
+
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_NegF32) {
+      /* Sigh ... very rough code.  Could do much better. */
+      /* Get the 128-bit literal 00---0 10---0 into a register
+         and xor it with the value to be negated. */
+      HReg r1  = newVRegI(env);
+      HReg dst = newVRegV(env);
+      HReg tmp = newVRegV(env);
+      HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+      AMD64AMode* rsp0 = AMD64AMode_IR(0, hregAMD64_RSP());
+      addInstr(env, mk_vMOVsd_RR(src,tmp));
+      addInstr(env, AMD64Instr_Push(AMD64RMI_Imm(0)));
+      addInstr(env, AMD64Instr_Imm64( 1ULL<<31, r1 ));
+      addInstr(env, AMD64Instr_Push(AMD64RMI_Reg(r1)));
+      addInstr(env, AMD64Instr_SseLdSt(True, 16, dst, rsp0));
+      addInstr(env, AMD64Instr_SseReRg(Asse_XOR, tmp, dst));
+      add_to_rsp(env, 16);
+      return dst;
+   }
+
+   if (e->tag == Iex_Qop && e->Iex.Qop.details->op == Iop_MAddF32) {
+      IRQop *qop = e->Iex.Qop.details;
+      HReg dst  = newVRegV(env);
+      HReg argX = iselFltExpr(env, qop->arg2);
+      HReg argY = iselFltExpr(env, qop->arg3);
+      HReg argZ = iselFltExpr(env, qop->arg4);
+      /* XXXROUNDINGFIXME */
+      /* set roundingmode here */
+      /* subq $16, %rsp         -- make a space*/
+      sub_from_rsp(env, 16);
+      /* Prepare 4 arg regs:
+         leaq 0(%rsp), %rdi
+         leaq 4(%rsp), %rsi
+         leaq 8(%rsp), %rdx
+         leaq 12(%rsp), %rcx
+      */
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(0, hregAMD64_RSP()),
+                                     hregAMD64_RDI()));
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(4, hregAMD64_RSP()),
+                                     hregAMD64_RSI()));
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(8, hregAMD64_RSP()),
+                                     hregAMD64_RDX()));
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(12, hregAMD64_RSP()),
+                                     hregAMD64_RCX()));
+      /* Store the three args, at (%rsi), (%rdx) and (%rcx):
+         movss  %argX, 0(%rsi)
+         movss  %argY, 0(%rdx)
+         movss  %argZ, 0(%rcx)
+         */
+      addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 4, argX,
+                                       AMD64AMode_IR(0, hregAMD64_RSI())));
+      addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 4, argY,
+                                       AMD64AMode_IR(0, hregAMD64_RDX())));
+      addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 4, argZ,
+                                       AMD64AMode_IR(0, hregAMD64_RCX())));
+      /* call the helper */
+      addInstr(env, AMD64Instr_Call( Acc_ALWAYS,
+                                     (ULong)(HWord)h_generic_calc_MAddF32,
+                                     4, mk_RetLoc_simple(RLPri_None) ));
+      /* fetch the result from memory, using %r_argp, which the
+         register allocator will keep alive across the call. */
+      addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 4, dst,
+                                       AMD64AMode_IR(0, hregAMD64_RSP())));
+      /* and finally, clear the space */
+      add_to_rsp(env, 16);
+      return dst;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselFltExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (64 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 64-bit floating point value into the lower half of an xmm
+   register, the identity of which is returned.  As with
+   iselIntExpr_R, the returned reg will be virtual, and it must not be
+   changed by subsequent code emitted by the caller.
+*/
+
+/* IEEE 754 formats.  From http://www.freesoft.org/CIE/RFC/1832/32.htm:
+
+    Type                  S (1 bit)   E (11 bits)   F (52 bits)
+    ----                  ---------   -----------   -----------
+    signalling NaN        u           2047 (max)    .0uuuuu---u
+                                                    (with at least
+                                                     one 1 bit)
+    quiet NaN             u           2047 (max)    .1uuuuu---u
+
+    negative infinity     1           2047 (max)    .000000---0
+
+    positive infinity     0           2047 (max)    .000000---0
+
+    negative zero         1           0             .000000---0
+
+    positive zero         0           0             .000000---0
+*/
+
+static HReg iselDblExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselDblExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcVec128);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F64);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      union { ULong u64; Double f64; } u;
+      HReg res = newVRegV(env);
+      HReg tmp = newVRegI(env);
+      vassert(sizeof(u) == 8);
+      vassert(sizeof(u.u64) == 8);
+      vassert(sizeof(u.f64) == 8);
+
+      if (e->Iex.Const.con->tag == Ico_F64) {
+         u.f64 = e->Iex.Const.con->Ico.F64;
+      }
+      else if (e->Iex.Const.con->tag == Ico_F64i) {
+         u.u64 = e->Iex.Const.con->Ico.F64i;
+      }
+      else
+         vpanic("iselDblExpr(amd64): const");
+
+      addInstr(env, AMD64Instr_Imm64(u.u64, tmp));
+      addInstr(env, AMD64Instr_Push(AMD64RMI_Reg(tmp)));
+      addInstr(env, AMD64Instr_SseLdSt(
+                       True/*load*/, 8, res, 
+                       AMD64AMode_IR(0, hregAMD64_RSP())
+              ));
+      add_to_rsp(env, 8);
+      return res;
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      AMD64AMode* am;
+      HReg res = newVRegV(env);
+      vassert(e->Iex.Load.ty == Ity_F64);
+      am = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 8, res, am ));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      AMD64AMode* am = AMD64AMode_IR( e->Iex.Get.offset,
+                                      hregAMD64_RBP() );
+      HReg res = newVRegV(env);
+      addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 8, res, am ));
+      return res;
+   }
+
+   if (e->tag == Iex_GetI) {
+      AMD64AMode* am 
+         = genGuestArrayOffset(
+              env, e->Iex.GetI.descr, 
+                   e->Iex.GetI.ix, e->Iex.GetI.bias );
+      HReg res = newVRegV(env);
+      addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 8, res, am ));
+      return res;
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+      AMD64SseOp op = Asse_INVALID;
+      switch (triop->op) {
+         case Iop_AddF64: op = Asse_ADDF; break;
+         case Iop_SubF64: op = Asse_SUBF; break;
+         case Iop_MulF64: op = Asse_MULF; break;
+         case Iop_DivF64: op = Asse_DIVF; break;
+         default: break;
+      }
+      if (op != Asse_INVALID) {
+         HReg dst  = newVRegV(env);
+         HReg argL = iselDblExpr(env, triop->arg2);
+         HReg argR = iselDblExpr(env, triop->arg3);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, AMD64Instr_Sse64FLo(op, argR, dst));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_Qop && e->Iex.Qop.details->op == Iop_MAddF64) {
+      IRQop *qop = e->Iex.Qop.details;
+      HReg dst  = newVRegV(env);
+      HReg argX = iselDblExpr(env, qop->arg2);
+      HReg argY = iselDblExpr(env, qop->arg3);
+      HReg argZ = iselDblExpr(env, qop->arg4);
+      /* XXXROUNDINGFIXME */
+      /* set roundingmode here */
+      /* subq $32, %rsp         -- make a space*/
+      sub_from_rsp(env, 32);
+      /* Prepare 4 arg regs:
+         leaq 0(%rsp), %rdi
+         leaq 8(%rsp), %rsi
+         leaq 16(%rsp), %rdx
+         leaq 24(%rsp), %rcx
+      */
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(0, hregAMD64_RSP()),
+                                     hregAMD64_RDI()));
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(8, hregAMD64_RSP()),
+                                     hregAMD64_RSI()));
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(16, hregAMD64_RSP()),
+                                     hregAMD64_RDX()));
+      addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(24, hregAMD64_RSP()),
+                                     hregAMD64_RCX()));
+      /* Store the three args, at (%rsi), (%rdx) and (%rcx):
+         movsd  %argX, 0(%rsi)
+         movsd  %argY, 0(%rdx)
+         movsd  %argZ, 0(%rcx)
+         */
+      addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 8, argX,
+                                       AMD64AMode_IR(0, hregAMD64_RSI())));
+      addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 8, argY,
+                                       AMD64AMode_IR(0, hregAMD64_RDX())));
+      addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 8, argZ,
+                                       AMD64AMode_IR(0, hregAMD64_RCX())));
+      /* call the helper */
+      addInstr(env, AMD64Instr_Call( Acc_ALWAYS,
+                                     (ULong)(HWord)h_generic_calc_MAddF64,
+                                     4, mk_RetLoc_simple(RLPri_None) ));
+      /* fetch the result from memory, using %r_argp, which the
+         register allocator will keep alive across the call. */
+      addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 8, dst,
+                                       AMD64AMode_IR(0, hregAMD64_RSP())));
+      /* and finally, clear the space */
+      add_to_rsp(env, 32);
+      return dst;
+   }
+
+   if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_RoundF64toInt) {
+      AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+      HReg        arg    = iselDblExpr(env, e->Iex.Binop.arg2);
+      HReg        dst    = newVRegV(env);
+
+      /* rf now holds the value to be rounded.  The first thing to do
+         is set the FPU's rounding mode accordingly. */
+
+      /* Set host x87 rounding mode */
+      set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+
+      addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, arg, m8_rsp));
+      addInstr(env, AMD64Instr_A87Free(1));
+      addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/, 8));
+      addInstr(env, AMD64Instr_A87FpOp(Afp_ROUND));
+      addInstr(env, AMD64Instr_A87PushPop(m8_rsp, False/*pop*/, 8));
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 8, dst, m8_rsp));
+
+      /* Restore default x87 rounding. */
+      set_FPU_rounding_default( env );
+
+      return dst;
+   }
+
+   IRTriop *triop = e->Iex.Triop.details;
+   if (e->tag == Iex_Triop 
+       && (triop->op == Iop_ScaleF64
+           || triop->op == Iop_AtanF64
+           || triop->op == Iop_Yl2xF64
+           || triop->op == Iop_Yl2xp1F64
+           || triop->op == Iop_PRemF64
+           || triop->op == Iop_PRem1F64)
+      ) {
+      AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+      HReg        arg1   = iselDblExpr(env, triop->arg2);
+      HReg        arg2   = iselDblExpr(env, triop->arg3);
+      HReg        dst    = newVRegV(env);
+      Bool     arg2first = toBool(triop->op == Iop_ScaleF64 
+                                  || triop->op == Iop_PRemF64
+                                  || triop->op == Iop_PRem1F64);
+      addInstr(env, AMD64Instr_A87Free(2));
+
+      /* one arg -> top of x87 stack */
+      addInstr(env, AMD64Instr_SseLdSt(
+                       False/*store*/, 8, arg2first ? arg2 : arg1, m8_rsp));
+      addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/, 8));
+
+      /* other arg -> top of x87 stack */
+      addInstr(env, AMD64Instr_SseLdSt(
+                       False/*store*/, 8, arg2first ? arg1 : arg2, m8_rsp));
+      addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/, 8));
+
+      /* do it */
+      /* XXXROUNDINGFIXME */
+      /* set roundingmode here */
+      switch (triop->op) {
+         case Iop_ScaleF64: 
+            addInstr(env, AMD64Instr_A87FpOp(Afp_SCALE));
+            break;
+         case Iop_AtanF64: 
+            addInstr(env, AMD64Instr_A87FpOp(Afp_ATAN));
+            break;
+         case Iop_Yl2xF64: 
+            addInstr(env, AMD64Instr_A87FpOp(Afp_YL2X));
+            break;
+         case Iop_Yl2xp1F64: 
+            addInstr(env, AMD64Instr_A87FpOp(Afp_YL2XP1));
+            break;
+         case Iop_PRemF64:
+            addInstr(env, AMD64Instr_A87FpOp(Afp_PREM));
+            break;
+         case Iop_PRem1F64:
+            addInstr(env, AMD64Instr_A87FpOp(Afp_PREM1));
+            break;
+         default: 
+            vassert(0);
+      }
+
+      /* save result */
+      addInstr(env, AMD64Instr_A87PushPop(m8_rsp, False/*pop*/, 8));
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 8, dst, m8_rsp));
+      return dst;
+   }
+
+   if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_I64StoF64) {
+      HReg dst = newVRegV(env);
+      HReg src = iselIntExpr_R(env, e->Iex.Binop.arg2);
+      set_SSE_rounding_mode( env, e->Iex.Binop.arg1 );
+      addInstr(env, AMD64Instr_SseSI2SF( 8, 8, src, dst ));
+      set_SSE_rounding_default( env );
+      return dst;
+   }
+
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_I32StoF64) {
+      HReg dst = newVRegV(env);
+      HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+      set_SSE_rounding_default( env );
+      addInstr(env, AMD64Instr_SseSI2SF( 4, 8, src, dst ));
+      return dst;
+   }
+
+   if (e->tag == Iex_Unop 
+       && (e->Iex.Unop.op == Iop_NegF64
+           || e->Iex.Unop.op == Iop_AbsF64)) {
+      /* Sigh ... very rough code.  Could do much better. */
+      /* Get the 128-bit literal 00---0 10---0 into a register
+         and xor/nand it with the value to be negated. */
+      HReg r1  = newVRegI(env);
+      HReg dst = newVRegV(env);
+      HReg tmp = newVRegV(env);
+      HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+      AMD64AMode* rsp0 = AMD64AMode_IR(0, hregAMD64_RSP());
+      addInstr(env, mk_vMOVsd_RR(src,tmp));
+      addInstr(env, AMD64Instr_Push(AMD64RMI_Imm(0)));
+      addInstr(env, AMD64Instr_Imm64( 1ULL<<63, r1 ));
+      addInstr(env, AMD64Instr_Push(AMD64RMI_Reg(r1)));
+      addInstr(env, AMD64Instr_SseLdSt(True, 16, dst, rsp0));
+
+      if (e->Iex.Unop.op == Iop_NegF64)
+         addInstr(env, AMD64Instr_SseReRg(Asse_XOR, tmp, dst));
+      else
+         addInstr(env, AMD64Instr_SseReRg(Asse_ANDN, tmp, dst));
+
+      add_to_rsp(env, 16);
+      return dst;
+   }
+
+   if (e->tag == Iex_Binop) {
+      A87FpOp fpop = Afp_INVALID;
+      switch (e->Iex.Binop.op) {
+         case Iop_SqrtF64: fpop = Afp_SQRT; break;
+         case Iop_SinF64:  fpop = Afp_SIN;  break;
+         case Iop_CosF64:  fpop = Afp_COS;  break;
+         case Iop_TanF64:  fpop = Afp_TAN;  break;
+         case Iop_2xm1F64: fpop = Afp_2XM1; break;
+         default: break;
+      }
+      if (fpop != Afp_INVALID) {
+         AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+         HReg        arg    = iselDblExpr(env, e->Iex.Binop.arg2);
+         HReg        dst    = newVRegV(env);
+         Int     nNeeded    = e->Iex.Binop.op==Iop_TanF64 ? 2 : 1;
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, arg, m8_rsp));
+         addInstr(env, AMD64Instr_A87Free(nNeeded));
+         addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/, 8));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         /* Note that AMD64Instr_A87FpOp(Afp_TAN) sets the condition
+            codes.  I don't think that matters, since this insn
+            selector never generates such an instruction intervening
+            between an flag-setting instruction and a flag-using
+            instruction. */
+         addInstr(env, AMD64Instr_A87FpOp(fpop));
+         addInstr(env, AMD64Instr_A87PushPop(m8_rsp, False/*pop*/, 8));
+         addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 8, dst, m8_rsp));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+//..          case Iop_I32toF64: {
+//..             HReg dst = newVRegF(env);
+//..             HReg ri  = iselIntExpr_R(env, e->Iex.Unop.arg);
+//..             addInstr(env, X86Instr_Push(X86RMI_Reg(ri)));
+//..             set_FPU_rounding_default(env);
+//..             addInstr(env, X86Instr_FpLdStI(
+//..                              True/*load*/, 4, dst, 
+//..                              X86AMode_IR(0, hregX86_ESP())));
+//..             add_to_esp(env, 4);
+//..             return dst;
+//..          }
+         case Iop_ReinterpI64asF64: {
+            /* Given an I64, produce an IEEE754 double with the same
+               bit pattern. */
+            AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP());
+            HReg        dst    = newVRegV(env);
+            AMD64RI*    src    = iselIntExpr_RI(env, e->Iex.Unop.arg);
+            /* paranoia */
+            set_SSE_rounding_default(env);
+            addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, src, m8_rsp));
+            addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 8, dst, m8_rsp));
+            return dst;
+         }
+         case Iop_F32toF64: {
+            HReg f32;
+            HReg f64 = newVRegV(env);
+            /* this shouldn't be necessary, but be paranoid ... */
+            set_SSE_rounding_default(env);
+            f32 = iselFltExpr(env, e->Iex.Unop.arg);
+            addInstr(env, AMD64Instr_SseSDSS(False/*S->D*/, f32, f64));
+            return f64;
+         }
+         default: 
+            break;
+      }
+   }
+
+   /* --------- MULTIPLEX --------- */
+   if (e->tag == Iex_ITE) { // VFD
+      HReg r1, r0, dst;
+      vassert(ty == Ity_F64);
+      vassert(typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1);
+      r1  = iselDblExpr(env, e->Iex.ITE.iftrue);
+      r0  = iselDblExpr(env, e->Iex.ITE.iffalse);
+      dst = newVRegV(env);
+      addInstr(env, mk_vMOVsd_RR(r1,dst));
+      AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst));
+      return dst;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselDblExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: SIMD (Vector) expressions, 128 bit.         ---*/
+/*---------------------------------------------------------*/
+
+static HReg iselVecExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselVecExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcVec128);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   HWord      fn = 0; /* address of helper fn, if required */
+   Bool       arg1isEReg = False;
+   AMD64SseOp op = Asse_INVALID;
+   IRType     ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_V128);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Get) {
+      HReg dst = newVRegV(env);
+      addInstr(env, AMD64Instr_SseLdSt(
+                       True/*load*/, 
+                       16,
+                       dst,
+                       AMD64AMode_IR(e->Iex.Get.offset, hregAMD64_RBP())
+                    )
+              );
+      return dst;
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      HReg        dst = newVRegV(env);
+      AMD64AMode* am  = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dst, am ));
+      return dst;
+   }
+
+   if (e->tag == Iex_Const) {
+      HReg dst = newVRegV(env);
+      vassert(e->Iex.Const.con->tag == Ico_V128);
+      switch (e->Iex.Const.con->Ico.V128) {
+         case 0x0000:
+            dst = generate_zeroes_V128(env);
+            break;
+         case 0xFFFF:
+            dst = generate_ones_V128(env);
+            break;
+         default: {
+            AMD64AMode* rsp0 = AMD64AMode_IR(0, hregAMD64_RSP());
+            /* do push_uimm64 twice, first time for the high-order half. */
+            push_uimm64(env, bitmask8_to_bytemask64(
+                                (e->Iex.Const.con->Ico.V128 >> 8) & 0xFF
+                       ));
+            push_uimm64(env, bitmask8_to_bytemask64(
+                                (e->Iex.Const.con->Ico.V128 >> 0) & 0xFF
+                       ));
+            addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dst, rsp0 ));
+            add_to_rsp(env, 16);
+            break;
+         }
+      }
+      return dst;
+   }
+
+   if (e->tag == Iex_Unop) {
+   switch (e->Iex.Unop.op) {
+
+      case Iop_NotV128: {
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         return do_sse_NotV128(env, arg);
+      }
+
+      case Iop_CmpNEZ64x2: {
+         /* We can use SSE2 instructions for this. */
+         /* Ideally, we want to do a 64Ix2 comparison against zero of
+            the operand.  Problem is no such insn exists.  Solution
+            therefore is to do a 32Ix4 comparison instead, and bitwise-
+            negate (NOT) the result.  Let a,b,c,d be 32-bit lanes, and 
+            let the not'd result of this initial comparison be a:b:c:d.
+            What we need to compute is (a|b):(a|b):(c|d):(c|d).  So, use
+            pshufd to create a value b:a:d:c, and OR that with a:b:c:d,
+            giving the required result.
+
+            The required selection sequence is 2,3,0,1, which
+            according to Intel's documentation means the pshufd
+            literal value is 0xB1, that is, 
+            (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0) 
+         */
+         HReg arg  = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg tmp  = generate_zeroes_V128(env);
+         HReg dst  = newVRegV(env);
+         addInstr(env, AMD64Instr_SseReRg(Asse_CMPEQ32, arg, tmp));
+         tmp = do_sse_NotV128(env, tmp);
+         addInstr(env, AMD64Instr_SseShuf(0xB1, tmp, dst));
+         addInstr(env, AMD64Instr_SseReRg(Asse_OR, tmp, dst));
+         return dst;
+      }
+
+      case Iop_CmpNEZ32x4: op = Asse_CMPEQ32; goto do_CmpNEZ_vector;
+      case Iop_CmpNEZ16x8: op = Asse_CMPEQ16; goto do_CmpNEZ_vector;
+      case Iop_CmpNEZ8x16: op = Asse_CMPEQ8;  goto do_CmpNEZ_vector;
+      do_CmpNEZ_vector:
+      {
+         HReg arg  = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg tmp  = newVRegV(env);
+         HReg zero = generate_zeroes_V128(env);
+         HReg dst;
+         addInstr(env, mk_vMOVsd_RR(arg, tmp));
+         addInstr(env, AMD64Instr_SseReRg(op, zero, tmp));
+         dst = do_sse_NotV128(env, tmp);
+         return dst;
+      }
+
+      case Iop_RecipEst32Fx4: op = Asse_RCPF;   goto do_32Fx4_unary;
+      case Iop_RSqrtEst32Fx4: op = Asse_RSQRTF; goto do_32Fx4_unary;
+      do_32Fx4_unary:
+      {
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegV(env);
+         addInstr(env, AMD64Instr_Sse32Fx4(op, arg, dst));
+         return dst;
+      }
+
+      case Iop_RecipEst32F0x4: op = Asse_RCPF;   goto do_32F0x4_unary;
+      case Iop_RSqrtEst32F0x4: op = Asse_RSQRTF; goto do_32F0x4_unary;
+      case Iop_Sqrt32F0x4:     op = Asse_SQRTF;  goto do_32F0x4_unary;
+      do_32F0x4_unary:
+      {
+         /* A bit subtle.  We have to copy the arg to the result
+            register first, because actually doing the SSE scalar insn
+            leaves the upper 3/4 of the destination register
+            unchanged.  Whereas the required semantics of these
+            primops is that the upper 3/4 is simply copied in from the
+            argument. */
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(arg, dst));
+         addInstr(env, AMD64Instr_Sse32FLo(op, arg, dst));
+         return dst;
+      }
+
+      case Iop_Sqrt64F0x2:  op = Asse_SQRTF;  goto do_64F0x2_unary;
+      do_64F0x2_unary:
+      {
+         /* A bit subtle.  We have to copy the arg to the result
+            register first, because actually doing the SSE scalar insn
+            leaves the upper half of the destination register
+            unchanged.  Whereas the required semantics of these
+            primops is that the upper half is simply copied in from the
+            argument. */
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(arg, dst));
+         addInstr(env, AMD64Instr_Sse64FLo(op, arg, dst));
+         return dst;
+      }
+
+      case Iop_32UtoV128: {
+         HReg        dst     = newVRegV(env);
+         AMD64AMode* rsp_m32 = AMD64AMode_IR(-32, hregAMD64_RSP());
+         AMD64RI*    ri      = iselIntExpr_RI(env, e->Iex.Unop.arg);
+         addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, ri, rsp_m32));
+         addInstr(env, AMD64Instr_SseLdzLO(4, dst, rsp_m32));
+         return dst;
+      }
+
+      case Iop_64UtoV128: {
+         HReg        dst  = newVRegV(env);
+         AMD64AMode* rsp0 = AMD64AMode_IR(0, hregAMD64_RSP());
+         AMD64RMI*   rmi  = iselIntExpr_RMI(env, e->Iex.Unop.arg);
+         addInstr(env, AMD64Instr_Push(rmi));
+         addInstr(env, AMD64Instr_SseLdzLO(8, dst, rsp0));
+         add_to_rsp(env, 8);
+         return dst;
+      }
+
+      case Iop_V256toV128_0:
+      case Iop_V256toV128_1: {
+         HReg vHi, vLo;
+         iselDVecExpr(&vHi, &vLo, env, e->Iex.Unop.arg);
+         return (e->Iex.Unop.op == Iop_V256toV128_1) ? vHi : vLo;
+      }
+
+      default:
+         break;
+   } /* switch (e->Iex.Unop.op) */
+   } /* if (e->tag == Iex_Unop) */
+
+   if (e->tag == Iex_Binop) {
+   switch (e->Iex.Binop.op) {
+
+      case Iop_Sqrt64Fx2:
+      case Iop_Sqrt32Fx4: {
+         /* :: (rmode, vec) -> vec */
+         HReg arg = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, (e->Iex.Binop.op == Iop_Sqrt64Fx2 
+                           ? AMD64Instr_Sse64Fx2 : AMD64Instr_Sse32Fx4)
+                       (Asse_SQRTF, arg, dst));
+         return dst;
+      }
+
+      /* FIXME: could we generate MOVQ here? */
+      case Iop_SetV128lo64: {
+         HReg dst  = newVRegV(env);
+         HReg srcV = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg srcI = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         AMD64AMode* rsp_m16 = AMD64AMode_IR(-16, hregAMD64_RSP());
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, srcV, rsp_m16));
+         addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, AMD64RI_Reg(srcI), rsp_m16));
+         addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, dst, rsp_m16));
+         return dst;
+      }
+
+      /* FIXME: could we generate MOVD here? */
+      case Iop_SetV128lo32: {
+         HReg dst  = newVRegV(env);
+         HReg srcV = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg srcI = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         AMD64AMode* rsp_m16 = AMD64AMode_IR(-16, hregAMD64_RSP());
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, srcV, rsp_m16));
+         addInstr(env, AMD64Instr_Store(4, srcI, rsp_m16));
+         addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, dst, rsp_m16));
+         return dst;
+      }
+
+      case Iop_64HLtoV128: {
+         HReg        rsp     = hregAMD64_RSP();
+         AMD64AMode* m8_rsp  = AMD64AMode_IR(-8, rsp);
+         AMD64AMode* m16_rsp = AMD64AMode_IR(-16, rsp);
+         AMD64RI*    qHi = iselIntExpr_RI(env, e->Iex.Binop.arg1);
+         AMD64RI*    qLo = iselIntExpr_RI(env, e->Iex.Binop.arg2);
+         addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, qHi, m8_rsp));
+         addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, qLo, m16_rsp));
+         HReg        dst = newVRegV(env);
+         /* One store-forwarding stall coming up, oh well :-( */
+         addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, dst, m16_rsp));
+         return dst;
+      }
+
+      case Iop_CmpEQ32Fx4: op = Asse_CMPEQF; goto do_32Fx4;
+      case Iop_CmpLT32Fx4: op = Asse_CMPLTF; goto do_32Fx4;
+      case Iop_CmpLE32Fx4: op = Asse_CMPLEF; goto do_32Fx4;
+      case Iop_CmpUN32Fx4: op = Asse_CMPUNF; goto do_32Fx4;
+      case Iop_Max32Fx4:   op = Asse_MAXF;   goto do_32Fx4;
+      case Iop_Min32Fx4:   op = Asse_MINF;   goto do_32Fx4;
+      do_32Fx4:
+      {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_CmpEQ64Fx2: op = Asse_CMPEQF; goto do_64Fx2;
+      case Iop_CmpLT64Fx2: op = Asse_CMPLTF; goto do_64Fx2;
+      case Iop_CmpLE64Fx2: op = Asse_CMPLEF; goto do_64Fx2;
+      case Iop_CmpUN64Fx2: op = Asse_CMPUNF; goto do_64Fx2;
+      case Iop_Max64Fx2:   op = Asse_MAXF;   goto do_64Fx2;
+      case Iop_Min64Fx2:   op = Asse_MINF;   goto do_64Fx2;
+      do_64Fx2:
+      {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_CmpEQ32F0x4: op = Asse_CMPEQF; goto do_32F0x4;
+      case Iop_CmpLT32F0x4: op = Asse_CMPLTF; goto do_32F0x4;
+      case Iop_CmpLE32F0x4: op = Asse_CMPLEF; goto do_32F0x4;
+      case Iop_CmpUN32F0x4: op = Asse_CMPUNF; goto do_32F0x4;
+      case Iop_Add32F0x4:   op = Asse_ADDF;   goto do_32F0x4;
+      case Iop_Div32F0x4:   op = Asse_DIVF;   goto do_32F0x4;
+      case Iop_Max32F0x4:   op = Asse_MAXF;   goto do_32F0x4;
+      case Iop_Min32F0x4:   op = Asse_MINF;   goto do_32F0x4;
+      case Iop_Mul32F0x4:   op = Asse_MULF;   goto do_32F0x4;
+      case Iop_Sub32F0x4:   op = Asse_SUBF;   goto do_32F0x4;
+      do_32F0x4: {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, AMD64Instr_Sse32FLo(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_CmpEQ64F0x2: op = Asse_CMPEQF; goto do_64F0x2;
+      case Iop_CmpLT64F0x2: op = Asse_CMPLTF; goto do_64F0x2;
+      case Iop_CmpLE64F0x2: op = Asse_CMPLEF; goto do_64F0x2;
+      case Iop_CmpUN64F0x2: op = Asse_CMPUNF; goto do_64F0x2;
+      case Iop_Add64F0x2:   op = Asse_ADDF;   goto do_64F0x2;
+      case Iop_Div64F0x2:   op = Asse_DIVF;   goto do_64F0x2;
+      case Iop_Max64F0x2:   op = Asse_MAXF;   goto do_64F0x2;
+      case Iop_Min64F0x2:   op = Asse_MINF;   goto do_64F0x2;
+      case Iop_Mul64F0x2:   op = Asse_MULF;   goto do_64F0x2;
+      case Iop_Sub64F0x2:   op = Asse_SUBF;   goto do_64F0x2;
+      do_64F0x2: {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, AMD64Instr_Sse64FLo(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_QNarrowBin32Sto16Sx8: 
+         op = Asse_PACKSSD; arg1isEReg = True; goto do_SseReRg;
+      case Iop_QNarrowBin16Sto8Sx16: 
+         op = Asse_PACKSSW; arg1isEReg = True; goto do_SseReRg;
+      case Iop_QNarrowBin16Sto8Ux16: 
+         op = Asse_PACKUSW; arg1isEReg = True; goto do_SseReRg;
+
+      case Iop_InterleaveHI8x16: 
+         op = Asse_UNPCKHB; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveHI16x8: 
+         op = Asse_UNPCKHW; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveHI32x4: 
+         op = Asse_UNPCKHD; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveHI64x2: 
+         op = Asse_UNPCKHQ; arg1isEReg = True; goto do_SseReRg;
+
+      case Iop_InterleaveLO8x16: 
+         op = Asse_UNPCKLB; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveLO16x8: 
+         op = Asse_UNPCKLW; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveLO32x4: 
+         op = Asse_UNPCKLD; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveLO64x2: 
+         op = Asse_UNPCKLQ; arg1isEReg = True; goto do_SseReRg;
+
+      case Iop_AndV128:    op = Asse_AND;      goto do_SseReRg;
+      case Iop_OrV128:     op = Asse_OR;       goto do_SseReRg;
+      case Iop_XorV128:    op = Asse_XOR;      goto do_SseReRg;
+      case Iop_Add8x16:    op = Asse_ADD8;     goto do_SseReRg;
+      case Iop_Add16x8:    op = Asse_ADD16;    goto do_SseReRg;
+      case Iop_Add32x4:    op = Asse_ADD32;    goto do_SseReRg;
+      case Iop_Add64x2:    op = Asse_ADD64;    goto do_SseReRg;
+      case Iop_QAdd8Sx16:  op = Asse_QADD8S;   goto do_SseReRg;
+      case Iop_QAdd16Sx8:  op = Asse_QADD16S;  goto do_SseReRg;
+      case Iop_QAdd8Ux16:  op = Asse_QADD8U;   goto do_SseReRg;
+      case Iop_QAdd16Ux8:  op = Asse_QADD16U;  goto do_SseReRg;
+      case Iop_Avg8Ux16:   op = Asse_AVG8U;    goto do_SseReRg;
+      case Iop_Avg16Ux8:   op = Asse_AVG16U;   goto do_SseReRg;
+      case Iop_CmpEQ8x16:  op = Asse_CMPEQ8;   goto do_SseReRg;
+      case Iop_CmpEQ16x8:  op = Asse_CMPEQ16;  goto do_SseReRg;
+      case Iop_CmpEQ32x4:  op = Asse_CMPEQ32;  goto do_SseReRg;
+      case Iop_CmpGT8Sx16: op = Asse_CMPGT8S;  goto do_SseReRg;
+      case Iop_CmpGT16Sx8: op = Asse_CMPGT16S; goto do_SseReRg;
+      case Iop_CmpGT32Sx4: op = Asse_CMPGT32S; goto do_SseReRg;
+      case Iop_Max16Sx8:   op = Asse_MAX16S;   goto do_SseReRg;
+      case Iop_Max8Ux16:   op = Asse_MAX8U;    goto do_SseReRg;
+      case Iop_Min16Sx8:   op = Asse_MIN16S;   goto do_SseReRg;
+      case Iop_Min8Ux16:   op = Asse_MIN8U;    goto do_SseReRg;
+      case Iop_MulHi16Ux8: op = Asse_MULHI16U; goto do_SseReRg;
+      case Iop_MulHi16Sx8: op = Asse_MULHI16S; goto do_SseReRg;
+      case Iop_Mul16x8:    op = Asse_MUL16;    goto do_SseReRg;
+      case Iop_Sub8x16:    op = Asse_SUB8;     goto do_SseReRg;
+      case Iop_Sub16x8:    op = Asse_SUB16;    goto do_SseReRg;
+      case Iop_Sub32x4:    op = Asse_SUB32;    goto do_SseReRg;
+      case Iop_Sub64x2:    op = Asse_SUB64;    goto do_SseReRg;
+      case Iop_QSub8Sx16:  op = Asse_QSUB8S;   goto do_SseReRg;
+      case Iop_QSub16Sx8:  op = Asse_QSUB16S;  goto do_SseReRg;
+      case Iop_QSub8Ux16:  op = Asse_QSUB8U;   goto do_SseReRg;
+      case Iop_QSub16Ux8:  op = Asse_QSUB16U;  goto do_SseReRg;
+      do_SseReRg: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         if (arg1isEReg) {
+            addInstr(env, mk_vMOVsd_RR(arg2, dst));
+            addInstr(env, AMD64Instr_SseReRg(op, arg1, dst));
+         } else {
+            addInstr(env, mk_vMOVsd_RR(arg1, dst));
+            addInstr(env, AMD64Instr_SseReRg(op, arg2, dst));
+         }
+         return dst;
+      }
+
+      case Iop_ShlN16x8: op = Asse_SHL16; goto do_SseShift;
+      case Iop_ShlN32x4: op = Asse_SHL32; goto do_SseShift;
+      case Iop_ShlN64x2: op = Asse_SHL64; goto do_SseShift;
+      case Iop_SarN16x8: op = Asse_SAR16; goto do_SseShift;
+      case Iop_SarN32x4: op = Asse_SAR32; goto do_SseShift;
+      case Iop_ShrN16x8: op = Asse_SHR16; goto do_SseShift;
+      case Iop_ShrN32x4: op = Asse_SHR32; goto do_SseShift;
+      case Iop_ShrN64x2: op = Asse_SHR64; goto do_SseShift;
+      do_SseShift: {
+         HReg        greg = iselVecExpr(env, e->Iex.Binop.arg1);
+         AMD64RMI*   rmi  = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+         AMD64AMode* rsp0 = AMD64AMode_IR(0, hregAMD64_RSP());
+         HReg        ereg = newVRegV(env);
+         HReg        dst  = newVRegV(env);
+         addInstr(env, AMD64Instr_Push(AMD64RMI_Imm(0)));
+         addInstr(env, AMD64Instr_Push(rmi));
+         addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, ereg, rsp0));
+         addInstr(env, mk_vMOVsd_RR(greg, dst));
+         addInstr(env, AMD64Instr_SseReRg(op, ereg, dst));
+         add_to_rsp(env, 16);
+         return dst;
+      }
+
+      case Iop_Mul32x4:    fn = (HWord)h_generic_calc_Mul32x4;
+                           goto do_SseAssistedBinary;
+      case Iop_Max32Sx4:   fn = (HWord)h_generic_calc_Max32Sx4;
+                           goto do_SseAssistedBinary;
+      case Iop_Min32Sx4:   fn = (HWord)h_generic_calc_Min32Sx4;
+                           goto do_SseAssistedBinary;
+      case Iop_Max32Ux4:   fn = (HWord)h_generic_calc_Max32Ux4;
+                           goto do_SseAssistedBinary;
+      case Iop_Min32Ux4:   fn = (HWord)h_generic_calc_Min32Ux4;
+                           goto do_SseAssistedBinary;
+      case Iop_Max16Ux8:   fn = (HWord)h_generic_calc_Max16Ux8;
+                           goto do_SseAssistedBinary;
+      case Iop_Min16Ux8:   fn = (HWord)h_generic_calc_Min16Ux8;
+                           goto do_SseAssistedBinary;
+      case Iop_Max8Sx16:   fn = (HWord)h_generic_calc_Max8Sx16;
+                           goto do_SseAssistedBinary;
+      case Iop_Min8Sx16:   fn = (HWord)h_generic_calc_Min8Sx16;
+                           goto do_SseAssistedBinary;
+      case Iop_CmpEQ64x2:  fn = (HWord)h_generic_calc_CmpEQ64x2;
+                           goto do_SseAssistedBinary;
+      case Iop_CmpGT64Sx2: fn = (HWord)h_generic_calc_CmpGT64Sx2;
+                           goto do_SseAssistedBinary;
+      case Iop_Perm32x4:   fn = (HWord)h_generic_calc_Perm32x4;
+                           goto do_SseAssistedBinary;
+      case Iop_QNarrowBin32Sto16Ux8:
+                           fn = (HWord)h_generic_calc_QNarrowBin32Sto16Ux8;
+                           goto do_SseAssistedBinary;
+      case Iop_NarrowBin16to8x16:
+                           fn = (HWord)h_generic_calc_NarrowBin16to8x16;
+                           goto do_SseAssistedBinary;
+      case Iop_NarrowBin32to16x8:
+                           fn = (HWord)h_generic_calc_NarrowBin32to16x8;
+                           goto do_SseAssistedBinary;
+      do_SseAssistedBinary: {
+         /* RRRufff!  RRRufff code is what we're generating here.  Oh
+            well. */
+         vassert(fn != 0);
+         HReg dst = newVRegV(env);
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg argp = newVRegI(env);
+         /* subq $112, %rsp         -- make a space*/
+         sub_from_rsp(env, 112);
+         /* leaq 48(%rsp), %r_argp  -- point into it */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(48, hregAMD64_RSP()),
+                                        argp));
+         /* andq $-16, %r_argp      -- 16-align the pointer */
+         addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
+                                         AMD64RMI_Imm( ~(UInt)15 ), 
+                                         argp));
+         /* Prepare 3 arg regs:
+            leaq 0(%r_argp), %rdi
+            leaq 16(%r_argp), %rsi
+            leaq 32(%r_argp), %rdx
+         */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(0, argp),
+                                        hregAMD64_RDI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(16, argp),
+                                        hregAMD64_RSI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(32, argp),
+                                        hregAMD64_RDX()));
+         /* Store the two args, at (%rsi) and (%rdx):
+            movupd  %argL, 0(%rsi)
+            movupd  %argR, 0(%rdx)
+         */
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argL,
+                                          AMD64AMode_IR(0, hregAMD64_RSI())));
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argR,
+                                          AMD64AMode_IR(0, hregAMD64_RDX())));
+         /* call the helper */
+         addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn,
+                                        3, mk_RetLoc_simple(RLPri_None) ));
+         /* fetch the result from memory, using %r_argp, which the
+            register allocator will keep alive across the call. */
+         addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dst,
+                                          AMD64AMode_IR(0, argp)));
+         /* and finally, clear the space */
+         add_to_rsp(env, 112);
+         return dst;
+      }
+
+      case Iop_SarN64x2: fn = (HWord)h_generic_calc_SarN64x2;
+                         goto do_SseAssistedVectorAndScalar;
+      case Iop_SarN8x16: fn = (HWord)h_generic_calc_SarN8x16;
+                         goto do_SseAssistedVectorAndScalar;
+      do_SseAssistedVectorAndScalar: {
+         /* RRRufff!  RRRufff code is what we're generating here.  Oh
+            well. */
+         vassert(fn != 0);
+         HReg dst = newVRegV(env);
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg argp = newVRegI(env);
+         /* subq $112, %rsp         -- make a space*/
+         sub_from_rsp(env, 112);
+         /* leaq 48(%rsp), %r_argp  -- point into it */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(48, hregAMD64_RSP()),
+                                        argp));
+         /* andq $-16, %r_argp      -- 16-align the pointer */
+         addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
+                                         AMD64RMI_Imm( ~(UInt)15 ), 
+                                         argp));
+         /* Prepare 2 vector arg regs:
+            leaq 0(%r_argp), %rdi
+            leaq 16(%r_argp), %rsi
+         */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(0, argp),
+                                        hregAMD64_RDI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(16, argp),
+                                        hregAMD64_RSI()));
+         /* Store the vector arg, at (%rsi):
+            movupd  %argL, 0(%rsi)
+         */
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argL,
+                                          AMD64AMode_IR(0, hregAMD64_RSI())));
+         /* And get the scalar value into rdx */
+         addInstr(env, mk_iMOVsd_RR(argR, hregAMD64_RDX()));
+
+         /* call the helper */
+         addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn,
+                                        3, mk_RetLoc_simple(RLPri_None) ));
+         /* fetch the result from memory, using %r_argp, which the
+            register allocator will keep alive across the call. */
+         addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dst,
+                                          AMD64AMode_IR(0, argp)));
+         /* and finally, clear the space */
+         add_to_rsp(env, 112);
+         return dst;
+      }
+
+      default:
+         break;
+   } /* switch (e->Iex.Binop.op) */
+   } /* if (e->tag == Iex_Binop) */
+
+   if (e->tag == Iex_Triop) {
+   IRTriop *triop = e->Iex.Triop.details;
+   switch (triop->op) {
+
+      case Iop_Add64Fx2: op = Asse_ADDF; goto do_64Fx2_w_rm;
+      case Iop_Sub64Fx2: op = Asse_SUBF; goto do_64Fx2_w_rm;
+      case Iop_Mul64Fx2: op = Asse_MULF; goto do_64Fx2_w_rm;
+      case Iop_Div64Fx2: op = Asse_DIVF; goto do_64Fx2_w_rm;
+      do_64Fx2_w_rm:
+      {
+         HReg argL = iselVecExpr(env, triop->arg2);
+         HReg argR = iselVecExpr(env, triop->arg3);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_Add32Fx4: op = Asse_ADDF; goto do_32Fx4_w_rm;
+      case Iop_Sub32Fx4: op = Asse_SUBF; goto do_32Fx4_w_rm;
+      case Iop_Mul32Fx4: op = Asse_MULF; goto do_32Fx4_w_rm;
+      case Iop_Div32Fx4: op = Asse_DIVF; goto do_32Fx4_w_rm;
+      do_32Fx4_w_rm:
+      {
+         HReg argL = iselVecExpr(env, triop->arg2);
+         HReg argR = iselVecExpr(env, triop->arg3);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argR, dst));
+         return dst;
+      }
+
+      default:
+         break;
+   } /* switch (triop->op) */
+   } /* if (e->tag == Iex_Triop) */
+
+   if (e->tag == Iex_ITE) { // VFD
+      HReg r1  = iselVecExpr(env, e->Iex.ITE.iftrue);
+      HReg r0  = iselVecExpr(env, e->Iex.ITE.iffalse);
+      HReg dst = newVRegV(env);
+      addInstr(env, mk_vMOVsd_RR(r1,dst));
+      AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0, dst));
+      return dst;
+   }
+
+   //vec_fail:
+   vex_printf("iselVecExpr (amd64, subarch = %s): can't reduce\n",
+              LibVEX_ppVexHwCaps(VexArchAMD64, env->hwcaps));
+   ppIRExpr(e);
+   vpanic("iselVecExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: SIMD (V256) expressions, into 2 XMM regs.    --*/
+/*---------------------------------------------------------*/
+
+static void iselDVecExpr ( /*OUT*/HReg* rHi, /*OUT*/HReg* rLo, 
+                           ISelEnv* env, IRExpr* e )
+{
+   iselDVecExpr_wrk( rHi, rLo, env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcVec128);
+   vassert(hregClass(*rLo) == HRcVec128);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregIsVirtual(*rLo));
+}
+
+
+/* DO NOT CALL THIS DIRECTLY */
+static void iselDVecExpr_wrk ( /*OUT*/HReg* rHi, /*OUT*/HReg* rLo, 
+                               ISelEnv* env, IRExpr* e )
+{
+   HWord fn = 0; /* address of helper fn, if required */
+   vassert(e);
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_V256);
+
+   AMD64SseOp op = Asse_INVALID;
+
+   /* read 256-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+ 
+   if (e->tag == Iex_Get) {
+      HReg        vHi  = newVRegV(env);
+      HReg        vLo  = newVRegV(env);
+      HReg        rbp  = hregAMD64_RBP();
+      AMD64AMode* am0  = AMD64AMode_IR(e->Iex.Get.offset + 0,  rbp);
+      AMD64AMode* am16 = AMD64AMode_IR(e->Iex.Get.offset + 16, rbp);
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, vLo, am0));
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, vHi, am16));
+      *rHi = vHi;
+      *rLo = vLo;
+      return;
+   }
+
+   if (e->tag == Iex_Load) {
+      HReg        vHi  = newVRegV(env);
+      HReg        vLo  = newVRegV(env);
+      HReg        rA   = iselIntExpr_R(env, e->Iex.Load.addr);
+      AMD64AMode* am0  = AMD64AMode_IR(0,  rA);
+      AMD64AMode* am16 = AMD64AMode_IR(16, rA);
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, vLo, am0));
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, vHi, am16));
+      *rHi = vHi;
+      *rLo = vLo;
+      return;
+   }
+
+   if (e->tag == Iex_Const) {
+      vassert(e->Iex.Const.con->tag == Ico_V256);
+      switch (e->Iex.Const.con->Ico.V256) {
+         case 0x00000000: {
+            HReg vHi = generate_zeroes_V128(env);
+            HReg vLo = newVRegV(env);
+            addInstr(env, mk_vMOVsd_RR(vHi, vLo));
+            *rHi = vHi;
+            *rLo = vLo;
+            return;
+         }
+         default:
+            break; /* give up.   Until such time as is necessary. */
+      }
+   }
+
+   if (e->tag == Iex_Unop) {
+   switch (e->Iex.Unop.op) {
+
+      case Iop_NotV256: {
+         HReg argHi, argLo;
+         iselDVecExpr(&argHi, &argLo, env, e->Iex.Unop.arg);
+         *rHi = do_sse_NotV128(env, argHi);
+         *rLo = do_sse_NotV128(env, argLo);
+         return;
+      }
+
+      case Iop_RecipEst32Fx8: op = Asse_RCPF;   goto do_32Fx8_unary;
+      case Iop_Sqrt32Fx8:     op = Asse_SQRTF;  goto do_32Fx8_unary;
+      case Iop_RSqrtEst32Fx8: op = Asse_RSQRTF; goto do_32Fx8_unary;
+      do_32Fx8_unary:
+      {
+         HReg argHi, argLo;
+         iselDVecExpr(&argHi, &argLo, env, e->Iex.Unop.arg);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argHi, dstHi));
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argLo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_Sqrt64Fx4:  op = Asse_SQRTF;  goto do_64Fx4_unary;
+      do_64Fx4_unary:
+      {
+         HReg argHi, argLo;
+         iselDVecExpr(&argHi, &argLo, env, e->Iex.Unop.arg);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argHi, dstHi));
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argLo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_CmpNEZ64x4: {
+         /* We can use SSE2 instructions for this. */
+         /* Same scheme as Iop_CmpNEZ64x2, except twice as wide
+            (obviously).  See comment on Iop_CmpNEZ64x2 for
+            explanation of what's going on here. */
+         HReg argHi, argLo;
+         iselDVecExpr(&argHi, &argLo, env, e->Iex.Unop.arg);
+         HReg tmpHi  = generate_zeroes_V128(env);
+         HReg tmpLo  = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(tmpHi, tmpLo));
+         HReg dstHi  = newVRegV(env);
+         HReg dstLo  = newVRegV(env);
+         addInstr(env, AMD64Instr_SseReRg(Asse_CMPEQ32, argHi, tmpHi));
+         addInstr(env, AMD64Instr_SseReRg(Asse_CMPEQ32, argLo, tmpLo));
+         tmpHi = do_sse_NotV128(env, tmpHi);
+         tmpLo = do_sse_NotV128(env, tmpLo);
+         addInstr(env, AMD64Instr_SseShuf(0xB1, tmpHi, dstHi));
+         addInstr(env, AMD64Instr_SseShuf(0xB1, tmpLo, dstLo));
+         addInstr(env, AMD64Instr_SseReRg(Asse_OR, tmpHi, dstHi));
+         addInstr(env, AMD64Instr_SseReRg(Asse_OR, tmpLo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_CmpNEZ32x8: op = Asse_CMPEQ32; goto do_CmpNEZ_vector;
+      case Iop_CmpNEZ16x16: op = Asse_CMPEQ16; goto do_CmpNEZ_vector;
+      case Iop_CmpNEZ8x32: op = Asse_CMPEQ8;  goto do_CmpNEZ_vector;
+      do_CmpNEZ_vector:
+      {
+         HReg argHi, argLo;
+         iselDVecExpr(&argHi, &argLo, env, e->Iex.Unop.arg);
+         HReg tmpHi = newVRegV(env);
+         HReg tmpLo = newVRegV(env);
+         HReg zero  = generate_zeroes_V128(env);
+         HReg dstHi, dstLo;
+         addInstr(env, mk_vMOVsd_RR(argHi, tmpHi));
+         addInstr(env, mk_vMOVsd_RR(argLo, tmpLo));
+         addInstr(env, AMD64Instr_SseReRg(op, zero, tmpHi));
+         addInstr(env, AMD64Instr_SseReRg(op, zero, tmpLo));
+         dstHi = do_sse_NotV128(env, tmpHi);
+         dstLo = do_sse_NotV128(env, tmpLo);
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      default:
+         break;
+   } /* switch (e->Iex.Unop.op) */
+   } /* if (e->tag == Iex_Unop) */
+
+   if (e->tag == Iex_Binop) {
+   switch (e->Iex.Binop.op) {
+
+      case Iop_Max64Fx4:   op = Asse_MAXF;   goto do_64Fx4;
+      case Iop_Min64Fx4:   op = Asse_MINF;   goto do_64Fx4;
+      do_64Fx4:
+      {
+         HReg argLhi, argLlo, argRhi, argRlo;
+         iselDVecExpr(&argLhi, &argLlo, env, e->Iex.Binop.arg1);
+         iselDVecExpr(&argRhi, &argRlo, env, e->Iex.Binop.arg2);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argLhi, dstHi));
+         addInstr(env, mk_vMOVsd_RR(argLlo, dstLo));
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argRhi, dstHi));
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argRlo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_Max32Fx8:   op = Asse_MAXF;   goto do_32Fx8;
+      case Iop_Min32Fx8:   op = Asse_MINF;   goto do_32Fx8;
+      do_32Fx8:
+      {
+         HReg argLhi, argLlo, argRhi, argRlo;
+         iselDVecExpr(&argLhi, &argLlo, env, e->Iex.Binop.arg1);
+         iselDVecExpr(&argRhi, &argRlo, env, e->Iex.Binop.arg2);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argLhi, dstHi));
+         addInstr(env, mk_vMOVsd_RR(argLlo, dstLo));
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argRhi, dstHi));
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argRlo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_AndV256:    op = Asse_AND;      goto do_SseReRg;
+      case Iop_OrV256:     op = Asse_OR;       goto do_SseReRg;
+      case Iop_XorV256:    op = Asse_XOR;      goto do_SseReRg;
+      case Iop_Add8x32:    op = Asse_ADD8;     goto do_SseReRg;
+      case Iop_Add16x16:   op = Asse_ADD16;    goto do_SseReRg;
+      case Iop_Add32x8:    op = Asse_ADD32;    goto do_SseReRg;
+      case Iop_Add64x4:    op = Asse_ADD64;    goto do_SseReRg;
+      case Iop_QAdd8Sx32:  op = Asse_QADD8S;   goto do_SseReRg;
+      case Iop_QAdd16Sx16: op = Asse_QADD16S;  goto do_SseReRg;
+      case Iop_QAdd8Ux32:  op = Asse_QADD8U;   goto do_SseReRg;
+      case Iop_QAdd16Ux16: op = Asse_QADD16U;  goto do_SseReRg;
+      case Iop_Avg8Ux32:   op = Asse_AVG8U;    goto do_SseReRg;
+      case Iop_Avg16Ux16:  op = Asse_AVG16U;   goto do_SseReRg;
+      case Iop_CmpEQ8x32:  op = Asse_CMPEQ8;   goto do_SseReRg;
+      case Iop_CmpEQ16x16: op = Asse_CMPEQ16;  goto do_SseReRg;
+      case Iop_CmpEQ32x8:  op = Asse_CMPEQ32;  goto do_SseReRg;
+      case Iop_CmpGT8Sx32: op = Asse_CMPGT8S;  goto do_SseReRg;
+      case Iop_CmpGT16Sx16: op = Asse_CMPGT16S; goto do_SseReRg;
+      case Iop_CmpGT32Sx8: op = Asse_CMPGT32S; goto do_SseReRg;
+      case Iop_Max16Sx16:  op = Asse_MAX16S;   goto do_SseReRg;
+      case Iop_Max8Ux32:   op = Asse_MAX8U;    goto do_SseReRg;
+      case Iop_Min16Sx16:  op = Asse_MIN16S;   goto do_SseReRg;
+      case Iop_Min8Ux32:   op = Asse_MIN8U;    goto do_SseReRg;
+      case Iop_MulHi16Ux16: op = Asse_MULHI16U; goto do_SseReRg;
+      case Iop_MulHi16Sx16: op = Asse_MULHI16S; goto do_SseReRg;
+      case Iop_Mul16x16:   op = Asse_MUL16;    goto do_SseReRg;
+      case Iop_Sub8x32:    op = Asse_SUB8;     goto do_SseReRg;
+      case Iop_Sub16x16:   op = Asse_SUB16;    goto do_SseReRg;
+      case Iop_Sub32x8:    op = Asse_SUB32;    goto do_SseReRg;
+      case Iop_Sub64x4:    op = Asse_SUB64;    goto do_SseReRg;
+      case Iop_QSub8Sx32:  op = Asse_QSUB8S;   goto do_SseReRg;
+      case Iop_QSub16Sx16: op = Asse_QSUB16S;  goto do_SseReRg;
+      case Iop_QSub8Ux32:  op = Asse_QSUB8U;   goto do_SseReRg;
+      case Iop_QSub16Ux16: op = Asse_QSUB16U;  goto do_SseReRg;
+      do_SseReRg:
+      {
+         HReg argLhi, argLlo, argRhi, argRlo;
+         iselDVecExpr(&argLhi, &argLlo, env, e->Iex.Binop.arg1);
+         iselDVecExpr(&argRhi, &argRlo, env, e->Iex.Binop.arg2);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argLhi, dstHi));
+         addInstr(env, mk_vMOVsd_RR(argLlo, dstLo));
+         addInstr(env, AMD64Instr_SseReRg(op, argRhi, dstHi));
+         addInstr(env, AMD64Instr_SseReRg(op, argRlo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_ShlN16x16: op = Asse_SHL16; goto do_SseShift;
+      case Iop_ShlN32x8:  op = Asse_SHL32; goto do_SseShift;
+      case Iop_ShlN64x4:  op = Asse_SHL64; goto do_SseShift;
+      case Iop_SarN16x16: op = Asse_SAR16; goto do_SseShift;
+      case Iop_SarN32x8:  op = Asse_SAR32; goto do_SseShift;
+      case Iop_ShrN16x16: op = Asse_SHR16; goto do_SseShift;
+      case Iop_ShrN32x8:  op = Asse_SHR32; goto do_SseShift;
+      case Iop_ShrN64x4:  op = Asse_SHR64; goto do_SseShift;
+      do_SseShift: {
+         HReg gregHi, gregLo;
+         iselDVecExpr(&gregHi, &gregLo, env, e->Iex.Binop.arg1);
+         AMD64RMI*   rmi   = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+         AMD64AMode* rsp0  = AMD64AMode_IR(0, hregAMD64_RSP());
+         HReg        ereg  = newVRegV(env);
+         HReg        dstHi = newVRegV(env);
+         HReg        dstLo = newVRegV(env);
+         addInstr(env, AMD64Instr_Push(AMD64RMI_Imm(0)));
+         addInstr(env, AMD64Instr_Push(rmi));
+         addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, ereg, rsp0));
+         addInstr(env, mk_vMOVsd_RR(gregHi, dstHi));
+         addInstr(env, AMD64Instr_SseReRg(op, ereg, dstHi));
+         addInstr(env, mk_vMOVsd_RR(gregLo, dstLo));
+         addInstr(env, AMD64Instr_SseReRg(op, ereg, dstLo));
+         add_to_rsp(env, 16);
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_V128HLtoV256: {
+         *rHi = iselVecExpr(env, e->Iex.Binop.arg1);
+         *rLo = iselVecExpr(env, e->Iex.Binop.arg2);
+         return;
+      }
+
+      case Iop_Mul32x8:    fn = (HWord)h_generic_calc_Mul32x4;
+                           goto do_SseAssistedBinary;
+      case Iop_Max32Sx8:   fn = (HWord)h_generic_calc_Max32Sx4;
+                           goto do_SseAssistedBinary;
+      case Iop_Min32Sx8:   fn = (HWord)h_generic_calc_Min32Sx4;
+                           goto do_SseAssistedBinary;
+      case Iop_Max32Ux8:   fn = (HWord)h_generic_calc_Max32Ux4;
+                           goto do_SseAssistedBinary;
+      case Iop_Min32Ux8:   fn = (HWord)h_generic_calc_Min32Ux4;
+                           goto do_SseAssistedBinary;
+      case Iop_Max16Ux16:  fn = (HWord)h_generic_calc_Max16Ux8;
+                           goto do_SseAssistedBinary;
+      case Iop_Min16Ux16:  fn = (HWord)h_generic_calc_Min16Ux8;
+                           goto do_SseAssistedBinary;
+      case Iop_Max8Sx32:   fn = (HWord)h_generic_calc_Max8Sx16;
+                           goto do_SseAssistedBinary;
+      case Iop_Min8Sx32:   fn = (HWord)h_generic_calc_Min8Sx16;
+                           goto do_SseAssistedBinary;
+      case Iop_CmpEQ64x4:  fn = (HWord)h_generic_calc_CmpEQ64x2;
+                           goto do_SseAssistedBinary;
+      case Iop_CmpGT64Sx4: fn = (HWord)h_generic_calc_CmpGT64Sx2;
+                           goto do_SseAssistedBinary;
+      do_SseAssistedBinary: {
+         /* RRRufff!  RRRufff code is what we're generating here.  Oh
+            well. */
+         vassert(fn != 0);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         HReg argLhi, argLlo, argRhi, argRlo;
+         iselDVecExpr(&argLhi, &argLlo, env, e->Iex.Binop.arg1);
+         iselDVecExpr(&argRhi, &argRlo, env, e->Iex.Binop.arg2);
+         HReg argp = newVRegI(env);
+         /* subq $160, %rsp         -- make a space*/
+         sub_from_rsp(env, 160);
+         /* leaq 48(%rsp), %r_argp  -- point into it */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(48, hregAMD64_RSP()),
+                                        argp));
+         /* andq $-16, %r_argp      -- 16-align the pointer */
+         addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
+                                         AMD64RMI_Imm( ~(UInt)15 ),
+                                         argp));
+         /* Prepare 3 arg regs:
+            leaq 0(%r_argp), %rdi
+            leaq 16(%r_argp), %rsi
+            leaq 32(%r_argp), %rdx
+         */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(0, argp),
+                                        hregAMD64_RDI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(16, argp),
+                                        hregAMD64_RSI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(32, argp),
+                                        hregAMD64_RDX()));
+         /* Store the two high args, at (%rsi) and (%rdx):
+            movupd  %argLhi, 0(%rsi)
+            movupd  %argRhi, 0(%rdx)
+         */
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argLhi,
+                                          AMD64AMode_IR(0, hregAMD64_RSI())));
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argRhi,
+                                          AMD64AMode_IR(0, hregAMD64_RDX())));
+         /* Store the two low args, at 48(%rsi) and 48(%rdx):
+            movupd  %argLlo, 48(%rsi)
+            movupd  %argRlo, 48(%rdx)
+         */
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argLlo,
+                                          AMD64AMode_IR(48, hregAMD64_RSI())));
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argRlo,
+                                          AMD64AMode_IR(48, hregAMD64_RDX())));
+         /* call the helper */
+         addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 3,
+                                        mk_RetLoc_simple(RLPri_None) ));
+         /* Prepare 3 arg regs:
+            leaq 48(%r_argp), %rdi
+            leaq 64(%r_argp), %rsi
+            leaq 80(%r_argp), %rdx
+         */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(48, argp),
+                                        hregAMD64_RDI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(64, argp),
+                                        hregAMD64_RSI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(80, argp),
+                                        hregAMD64_RDX()));
+         /* call the helper */
+         addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 3,
+                                        mk_RetLoc_simple(RLPri_None) ));
+         /* fetch the result from memory, using %r_argp, which the
+            register allocator will keep alive across the call. */
+         addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dstHi,
+                                          AMD64AMode_IR(0, argp)));
+         addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dstLo,
+                                          AMD64AMode_IR(48, argp)));
+         /* and finally, clear the space */
+         add_to_rsp(env, 160);
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_Perm32x8:   fn = (HWord)h_generic_calc_Perm32x8;
+                           goto do_SseAssistedBinary256;
+      do_SseAssistedBinary256: {
+         /* RRRufff!  RRRufff code is what we're generating here.  Oh
+            well. */
+         vassert(fn != 0);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         HReg argLhi, argLlo, argRhi, argRlo;
+         iselDVecExpr(&argLhi, &argLlo, env, e->Iex.Binop.arg1);
+         iselDVecExpr(&argRhi, &argRlo, env, e->Iex.Binop.arg2);
+         HReg argp = newVRegI(env);
+         /* subq $160, %rsp         -- make a space*/
+         sub_from_rsp(env, 160);
+         /* leaq 48(%rsp), %r_argp  -- point into it */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(48, hregAMD64_RSP()),
+                                        argp));
+         /* andq $-16, %r_argp      -- 16-align the pointer */
+         addInstr(env, AMD64Instr_Alu64R(Aalu_AND,
+                                         AMD64RMI_Imm( ~(UInt)15 ),
+                                         argp));
+         /* Prepare 3 arg regs:
+            leaq 0(%r_argp), %rdi
+            leaq 32(%r_argp), %rsi
+            leaq 64(%r_argp), %rdx
+         */
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(0, argp),
+                                        hregAMD64_RDI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(32, argp),
+                                        hregAMD64_RSI()));
+         addInstr(env, AMD64Instr_Lea64(AMD64AMode_IR(64, argp),
+                                        hregAMD64_RDX()));
+         /* Store the two args, at (%rsi) and (%rdx):
+            movupd  %argLlo, 0(%rsi)
+            movupd  %argLhi, 16(%rsi)
+            movupd  %argRlo, 0(%rdx)
+            movupd  %argRhi, 16(%rdx)
+         */
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argLlo,
+                                          AMD64AMode_IR(0, hregAMD64_RSI())));
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argLhi,
+                                          AMD64AMode_IR(16, hregAMD64_RSI())));
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argRlo,
+                                          AMD64AMode_IR(0, hregAMD64_RDX())));
+         addInstr(env, AMD64Instr_SseLdSt(False/*!isLoad*/, 16, argRhi,
+                                          AMD64AMode_IR(16, hregAMD64_RDX())));
+         /* call the helper */
+         addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 3,
+                                        mk_RetLoc_simple(RLPri_None) ));
+         /* fetch the result from memory, using %r_argp, which the
+            register allocator will keep alive across the call. */
+         addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dstLo,
+                                          AMD64AMode_IR(0, argp)));
+         addInstr(env, AMD64Instr_SseLdSt(True/*isLoad*/, 16, dstHi,
+                                          AMD64AMode_IR(16, argp)));
+         /* and finally, clear the space */
+         add_to_rsp(env, 160);
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      default:
+         break;
+   } /* switch (e->Iex.Binop.op) */
+   } /* if (e->tag == Iex_Binop) */
+
+   if (e->tag == Iex_Triop) {
+   IRTriop *triop = e->Iex.Triop.details;
+   switch (triop->op) {
+
+      case Iop_Add64Fx4: op = Asse_ADDF; goto do_64Fx4_w_rm;
+      case Iop_Sub64Fx4: op = Asse_SUBF; goto do_64Fx4_w_rm;
+      case Iop_Mul64Fx4: op = Asse_MULF; goto do_64Fx4_w_rm;
+      case Iop_Div64Fx4: op = Asse_DIVF; goto do_64Fx4_w_rm;
+      do_64Fx4_w_rm:
+      {
+         HReg argLhi, argLlo, argRhi, argRlo;
+         iselDVecExpr(&argLhi, &argLlo, env, triop->arg2);
+         iselDVecExpr(&argRhi, &argRlo, env, triop->arg3);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argLhi, dstHi));
+         addInstr(env, mk_vMOVsd_RR(argLlo, dstLo));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argRhi, dstHi));
+         addInstr(env, AMD64Instr_Sse64Fx2(op, argRlo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      case Iop_Add32Fx8: op = Asse_ADDF; goto do_32Fx8_w_rm;
+      case Iop_Sub32Fx8: op = Asse_SUBF; goto do_32Fx8_w_rm;
+      case Iop_Mul32Fx8: op = Asse_MULF; goto do_32Fx8_w_rm;
+      case Iop_Div32Fx8: op = Asse_DIVF; goto do_32Fx8_w_rm;
+      do_32Fx8_w_rm:
+      {
+         HReg argLhi, argLlo, argRhi, argRlo;
+         iselDVecExpr(&argLhi, &argLlo, env, triop->arg2);
+         iselDVecExpr(&argRhi, &argRlo, env, triop->arg3);
+         HReg dstHi = newVRegV(env);
+         HReg dstLo = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argLhi, dstHi));
+         addInstr(env, mk_vMOVsd_RR(argLlo, dstLo));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argRhi, dstHi));
+         addInstr(env, AMD64Instr_Sse32Fx4(op, argRlo, dstLo));
+         *rHi = dstHi;
+         *rLo = dstLo;
+         return;
+      }
+
+      default:
+         break;
+   } /* switch (triop->op) */
+   } /* if (e->tag == Iex_Triop) */
+
+
+   if (e->tag == Iex_Qop && e->Iex.Qop.details->op == Iop_64x4toV256) {
+      HReg        rsp     = hregAMD64_RSP();
+      HReg        vHi     = newVRegV(env);
+      HReg        vLo     = newVRegV(env);
+      AMD64AMode* m8_rsp  = AMD64AMode_IR(-8, rsp);
+      AMD64AMode* m16_rsp = AMD64AMode_IR(-16, rsp);
+      /* arg1 is the most significant (Q3), arg4 the least (Q0) */
+      /* Get all the args into regs, before messing with the stack. */
+      AMD64RI* q3  = iselIntExpr_RI(env, e->Iex.Qop.details->arg1);
+      AMD64RI* q2  = iselIntExpr_RI(env, e->Iex.Qop.details->arg2);
+      AMD64RI* q1  = iselIntExpr_RI(env, e->Iex.Qop.details->arg3);
+      AMD64RI* q0  = iselIntExpr_RI(env, e->Iex.Qop.details->arg4);
+      /* less significant lane (Q2) at the lower address (-16(rsp)) */
+      addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, q3, m8_rsp));
+      addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, q2, m16_rsp));
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, vHi, m16_rsp));
+      /* and then the lower half .. */
+      addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, q1, m8_rsp));
+      addInstr(env, AMD64Instr_Alu64M(Aalu_MOV, q0, m16_rsp));
+      addInstr(env, AMD64Instr_SseLdSt(True/*load*/, 16, vLo, m16_rsp));
+      *rHi = vHi;
+      *rLo = vLo;
+      return;
+   }
+
+   if (e->tag == Iex_ITE) {
+      HReg r1Hi, r1Lo, r0Hi, r0Lo;
+      iselDVecExpr(&r1Hi, &r1Lo, env, e->Iex.ITE.iftrue);
+      iselDVecExpr(&r0Hi, &r0Lo, env, e->Iex.ITE.iffalse);
+      HReg dstHi = newVRegV(env);
+      HReg dstLo = newVRegV(env);
+      addInstr(env, mk_vMOVsd_RR(r1Hi,dstHi));
+      addInstr(env, mk_vMOVsd_RR(r1Lo,dstLo));
+      AMD64CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0Hi, dstHi));
+      addInstr(env, AMD64Instr_SseCMov(cc ^ 1, r0Lo, dstLo));
+      *rHi = dstHi;
+      *rLo = dstLo;
+      return;
+   }
+
+   //avx_fail:
+   vex_printf("iselDVecExpr (amd64, subarch = %s): can't reduce\n",
+              LibVEX_ppVexHwCaps(VexArchAMD64, env->hwcaps));
+   ppIRExpr(e);
+   vpanic("iselDVecExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void iselStmt ( ISelEnv* env, IRStmt* stmt )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n-- ");
+      ppIRStmt(stmt);
+      vex_printf("\n");
+   }
+
+   switch (stmt->tag) {
+
+   /* --------- LOADG (guarded load) --------- */
+   case Ist_LoadG: {
+      IRLoadG* lg = stmt->Ist.LoadG.details;
+      if (lg->end != Iend_LE)
+         goto stmt_fail;
+
+      UChar szB = 0; /* invalid */
+      switch (lg->cvt) {
+         case ILGop_Ident32: szB = 4; break;
+         case ILGop_Ident64: szB = 8; break;
+         default: break;
+      }
+      if (szB == 0)
+         goto stmt_fail;
+
+      AMD64AMode* amAddr = iselIntExpr_AMode(env, lg->addr);
+      HReg rAlt  = iselIntExpr_R(env, lg->alt);
+      HReg rDst  = lookupIRTemp(env, lg->dst);
+      /* Get the alt value into the dst.  We'll do a conditional load
+         which overwrites it -- or not -- with loaded data. */
+      addInstr(env, mk_iMOVsd_RR(rAlt, rDst));
+      AMD64CondCode cc = iselCondCode(env, lg->guard);
+      addInstr(env, AMD64Instr_CLoad(cc, szB, amAddr, rDst));
+      return;
+   }
+
+   /* --------- STOREG (guarded store) --------- */
+   case Ist_StoreG: {
+      IRStoreG* sg = stmt->Ist.StoreG.details;
+      if (sg->end != Iend_LE)
+         goto stmt_fail;
+
+      UChar szB = 0; /* invalid */
+      switch (typeOfIRExpr(env->type_env, sg->data)) {
+         case Ity_I32: szB = 4; break;
+         case Ity_I64: szB = 8; break;
+         default: break;
+      }
+      if (szB == 0)
+         goto stmt_fail;
+
+      AMD64AMode*   amAddr = iselIntExpr_AMode(env, sg->addr);
+      HReg          rSrc   = iselIntExpr_R(env, sg->data);
+      AMD64CondCode cc     = iselCondCode(env, sg->guard);
+      addInstr(env, AMD64Instr_CStore(cc, szB, rSrc, amAddr));
+      return;
+   }
+
+   /* --------- STORE --------- */
+   case Ist_Store: {
+      IRType    tya   = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
+      IRType    tyd   = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+      IREndness end   = stmt->Ist.Store.end;
+
+      if (tya != Ity_I64 || end != Iend_LE)
+         goto stmt_fail;
+
+      if (tyd == Ity_I64) {
+         AMD64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         AMD64RI* ri = iselIntExpr_RI(env, stmt->Ist.Store.data);
+         addInstr(env, AMD64Instr_Alu64M(Aalu_MOV,ri,am));
+         return;
+      }
+      if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32) {
+         AMD64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselIntExpr_R(env, stmt->Ist.Store.data);
+         addInstr(env, AMD64Instr_Store(
+                          toUChar(tyd==Ity_I8 ? 1 : (tyd==Ity_I16 ? 2 : 4)),
+                          r,am));
+         return;
+      }
+      if (tyd == Ity_F64) {
+         AMD64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselDblExpr(env, stmt->Ist.Store.data);
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, r, am));
+         return;
+      }
+      if (tyd == Ity_F32) {
+         AMD64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselFltExpr(env, stmt->Ist.Store.data);
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 4, r, am));
+         return;
+      }
+      if (tyd == Ity_V128) {
+         AMD64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselVecExpr(env, stmt->Ist.Store.data);
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, r, am));
+         return;
+      }
+      if (tyd == Ity_V256) {
+         HReg        rA   = iselIntExpr_R(env, stmt->Ist.Store.addr);
+         AMD64AMode* am0  = AMD64AMode_IR(0,  rA);
+         AMD64AMode* am16 = AMD64AMode_IR(16, rA);
+         HReg vHi, vLo;
+         iselDVecExpr(&vHi, &vLo, env, stmt->Ist.Store.data);
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vLo, am0));
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vHi, am16));
+         return;
+      }
+      break;
+   }
+
+   /* --------- PUT --------- */
+   case Ist_Put: {
+      IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+      if (ty == Ity_I64) {
+         /* We're going to write to memory, so compute the RHS into an
+            AMD64RI. */
+         AMD64RI* ri = iselIntExpr_RI(env, stmt->Ist.Put.data);
+         addInstr(env,
+                  AMD64Instr_Alu64M(
+                     Aalu_MOV,
+                     ri,
+                     AMD64AMode_IR(stmt->Ist.Put.offset,
+                                   hregAMD64_RBP())
+                 ));
+         return;
+      }
+      if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32) {
+         HReg r = iselIntExpr_R(env, stmt->Ist.Put.data);
+         addInstr(env, AMD64Instr_Store(
+                          toUChar(ty==Ity_I8 ? 1 : (ty==Ity_I16 ? 2 : 4)),
+                          r,
+                          AMD64AMode_IR(stmt->Ist.Put.offset,
+                                        hregAMD64_RBP())));
+         return;
+      }
+      if (ty == Ity_F32) {
+         HReg f32 = iselFltExpr(env, stmt->Ist.Put.data);
+         AMD64AMode* am = AMD64AMode_IR(stmt->Ist.Put.offset, hregAMD64_RBP());
+         set_SSE_rounding_default(env); /* paranoia */
+         addInstr(env, AMD64Instr_SseLdSt( False/*store*/, 4, f32, am ));
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg f64 = iselDblExpr(env, stmt->Ist.Put.data);
+         AMD64AMode* am = AMD64AMode_IR( stmt->Ist.Put.offset, 
+                                         hregAMD64_RBP() );
+         addInstr(env, AMD64Instr_SseLdSt( False/*store*/, 8, f64, am ));
+         return;
+      }
+      if (ty == Ity_V128) {
+         HReg        vec = iselVecExpr(env, stmt->Ist.Put.data);
+         AMD64AMode* am  = AMD64AMode_IR(stmt->Ist.Put.offset, 
+                                         hregAMD64_RBP());
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vec, am));
+         return;
+      }
+      if (ty == Ity_V256) {
+         HReg vHi, vLo;
+         iselDVecExpr(&vHi, &vLo, env, stmt->Ist.Put.data);
+         HReg        rbp  = hregAMD64_RBP();
+         AMD64AMode* am0  = AMD64AMode_IR(stmt->Ist.Put.offset + 0,  rbp);
+         AMD64AMode* am16 = AMD64AMode_IR(stmt->Ist.Put.offset + 16, rbp);
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vLo, am0));
+         addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vHi, am16));
+         return;
+      }
+      break;
+   }
+
+   /* --------- Indexed PUT --------- */
+   case Ist_PutI: {
+      IRPutI *puti = stmt->Ist.PutI.details;
+
+      AMD64AMode* am 
+         = genGuestArrayOffset(
+              env, puti->descr, 
+                   puti->ix, puti->bias );
+
+      IRType ty = typeOfIRExpr(env->type_env, puti->data);
+      if (ty == Ity_F64) {
+         HReg val = iselDblExpr(env, puti->data);
+         addInstr(env, AMD64Instr_SseLdSt( False/*store*/, 8, val, am ));
+         return;
+      }
+      if (ty == Ity_I8) {
+         HReg r = iselIntExpr_R(env, puti->data);
+         addInstr(env, AMD64Instr_Store( 1, r, am ));
+         return;
+      }
+      if (ty == Ity_I64) {
+         AMD64RI* ri = iselIntExpr_RI(env, puti->data);
+         addInstr(env, AMD64Instr_Alu64M( Aalu_MOV, ri, am ));
+         return;
+      }
+      break;
+   }
+
+   /* --------- TMP --------- */
+   case Ist_WrTmp: {
+      IRTemp tmp = stmt->Ist.WrTmp.tmp;
+      IRType ty = typeOfIRTemp(env->type_env, tmp);
+
+      /* optimisation: if stmt->Ist.WrTmp.data is Add64(..,..),
+         compute it into an AMode and then use LEA.  This usually
+         produces fewer instructions, often because (for memcheck
+         created IR) we get t = address-expression, (t is later used
+         twice) and so doing this naturally turns address-expression
+         back into an AMD64 amode. */
+      if (ty == Ity_I64 
+          && stmt->Ist.WrTmp.data->tag == Iex_Binop
+          && stmt->Ist.WrTmp.data->Iex.Binop.op == Iop_Add64) {
+         AMD64AMode* am = iselIntExpr_AMode(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         if (am->tag == Aam_IR && am->Aam.IR.imm == 0) {
+            /* Hmm, iselIntExpr_AMode wimped out and just computed the
+               value into a register.  Just emit a normal reg-reg move
+               so reg-alloc can coalesce it away in the usual way. */
+            HReg src = am->Aam.IR.reg;
+            addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Reg(src), dst));
+         } else {
+            addInstr(env, AMD64Instr_Lea64(am,dst));
+         }
+         return;
+      }
+
+      if (ty == Ity_I64 || ty == Ity_I32 
+          || ty == Ity_I16 || ty == Ity_I8) {
+         AMD64RMI* rmi = iselIntExpr_RMI(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,rmi,dst));
+         return;
+      }
+      if (ty == Ity_I128) {
+         HReg rHi, rLo, dstHi, dstLo;
+         iselInt128Expr(&rHi,&rLo, env, stmt->Ist.WrTmp.data);
+         lookupIRTempPair( &dstHi, &dstLo, env, tmp);
+         addInstr(env, mk_iMOVsd_RR(rHi,dstHi) );
+         addInstr(env, mk_iMOVsd_RR(rLo,dstLo) );
+         return;
+      }
+      if (ty == Ity_I1) {
+         AMD64CondCode cond = iselCondCode(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, AMD64Instr_Set64(cond, dst));
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg dst = lookupIRTemp(env, tmp);
+         HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data);
+         addInstr(env, mk_vMOVsd_RR(src, dst));
+         return;
+      }
+      if (ty == Ity_F32) {
+         HReg dst = lookupIRTemp(env, tmp);
+         HReg src = iselFltExpr(env, stmt->Ist.WrTmp.data);
+         addInstr(env, mk_vMOVsd_RR(src, dst));
+         return;
+      }
+      if (ty == Ity_V128) {
+         HReg dst = lookupIRTemp(env, tmp);
+         HReg src = iselVecExpr(env, stmt->Ist.WrTmp.data);
+         addInstr(env, mk_vMOVsd_RR(src, dst));
+         return;
+      }
+      if (ty == Ity_V256) {
+         HReg rHi, rLo, dstHi, dstLo;
+         iselDVecExpr(&rHi,&rLo, env, stmt->Ist.WrTmp.data);
+         lookupIRTempPair( &dstHi, &dstLo, env, tmp);
+         addInstr(env, mk_vMOVsd_RR(rHi,dstHi) );
+         addInstr(env, mk_vMOVsd_RR(rLo,dstLo) );
+         return;
+      }
+      break;
+   }
+
+   /* --------- Call to DIRTY helper --------- */
+   case Ist_Dirty: {
+      IRDirty* d = stmt->Ist.Dirty.details;
+
+      /* Figure out the return type, if any. */
+      IRType retty = Ity_INVALID;
+      if (d->tmp != IRTemp_INVALID)
+         retty = typeOfIRTemp(env->type_env, d->tmp);
+
+      /* Throw out any return types we don't know about. */
+      Bool retty_ok = False;
+      switch (retty) {
+         case Ity_INVALID: /* function doesn't return anything */
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+         case Ity_V128: case Ity_V256:
+            retty_ok = True; break;
+         default:
+            break;
+      }
+      if (!retty_ok)
+         break; /* will go to stmt_fail: */
+
+      /* Marshal args, do the call, and set the return value to
+         0x555..555 if this is a conditional call that returns a value
+         and the call is skipped. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, d->guard, d->cee, retty, d->args );
+      vassert(is_sane_RetLoc(rloc));
+
+      /* Now figure out what to do with the returned value, if any. */
+      switch (retty) {
+         case Ity_INVALID: {
+            /* No return value.  Nothing to do. */
+            vassert(d->tmp == IRTemp_INVALID);
+            vassert(rloc.pri == RLPri_None);
+            vassert(addToSp == 0);
+            return;
+         }
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8: {
+            /* The returned value is in %rax.  Park it in the register
+               associated with tmp. */
+            vassert(rloc.pri == RLPri_Int);
+            vassert(addToSp == 0);
+            HReg dst = lookupIRTemp(env, d->tmp);
+            addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(),dst) );
+            return;
+         }
+         case Ity_V128: {
+            /* The returned value is on the stack, and rloc.spOff
+               tells us where.  Fish it off the stack and then move
+               the stack pointer upwards to clear it, as directed by
+               doHelperCall. */
+            vassert(rloc.pri == RLPri_V128SpRel);
+            vassert(addToSp >= 16);
+            HReg        dst = lookupIRTemp(env, d->tmp);
+            AMD64AMode* am  = AMD64AMode_IR(rloc.spOff, hregAMD64_RSP());
+            addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dst, am ));
+            add_to_rsp(env, addToSp);
+            return;
+         }
+         case Ity_V256: {
+            /* See comments for Ity_V128. */
+            vassert(rloc.pri == RLPri_V256SpRel);
+            vassert(addToSp >= 32);
+            HReg        dstLo, dstHi;
+            lookupIRTempPair(&dstHi, &dstLo, env, d->tmp);
+            AMD64AMode* amLo  = AMD64AMode_IR(rloc.spOff, hregAMD64_RSP());
+            addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dstLo, amLo ));
+            AMD64AMode* amHi  = AMD64AMode_IR(rloc.spOff+16, hregAMD64_RSP());
+            addInstr(env, AMD64Instr_SseLdSt( True/*load*/, 16, dstHi, amHi ));
+            add_to_rsp(env, addToSp);
+            return;
+         }
+         default:
+            /*NOTREACHED*/
+            vassert(0);
+      }
+      break;
+   }
+
+   /* --------- MEM FENCE --------- */
+   case Ist_MBE:
+      switch (stmt->Ist.MBE.event) {
+         case Imbe_Fence:
+            addInstr(env, AMD64Instr_MFence());
+            return;
+         default:
+            break;
+      }
+      break;
+
+   /* --------- ACAS --------- */
+   case Ist_CAS:
+      if (stmt->Ist.CAS.details->oldHi == IRTemp_INVALID) {
+         /* "normal" singleton CAS */
+         UChar  sz;
+         IRCAS* cas = stmt->Ist.CAS.details;
+         IRType ty  = typeOfIRExpr(env->type_env, cas->dataLo);
+         /* get: cas->expd into %rax, and cas->data into %rbx */
+         AMD64AMode* am = iselIntExpr_AMode(env, cas->addr);
+         HReg rData = iselIntExpr_R(env, cas->dataLo);
+         HReg rExpd = iselIntExpr_R(env, cas->expdLo);
+         HReg rOld  = lookupIRTemp(env, cas->oldLo);
+         vassert(cas->expdHi == NULL);
+         vassert(cas->dataHi == NULL);
+         addInstr(env, mk_iMOVsd_RR(rExpd, rOld));
+         addInstr(env, mk_iMOVsd_RR(rExpd, hregAMD64_RAX()));
+         addInstr(env, mk_iMOVsd_RR(rData, hregAMD64_RBX()));
+         switch (ty) { 
+            case Ity_I64: sz = 8; break;
+            case Ity_I32: sz = 4; break;
+            case Ity_I16: sz = 2; break;
+            case Ity_I8:  sz = 1; break; 
+            default: goto unhandled_cas;
+         }
+         addInstr(env, AMD64Instr_ACAS(am, sz));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOld));
+         return;
+      } else {
+         /* double CAS */
+         UChar  sz;
+         IRCAS* cas = stmt->Ist.CAS.details;
+         IRType ty  = typeOfIRExpr(env->type_env, cas->dataLo);
+         /* only 32-bit and 64-bit allowed in this case */
+         /* get: cas->expdLo into %rax, and cas->dataLo into %rbx */
+         /* get: cas->expdHi into %rdx, and cas->dataHi into %rcx */
+         AMD64AMode* am = iselIntExpr_AMode(env, cas->addr);
+         HReg rDataHi = iselIntExpr_R(env, cas->dataHi);
+         HReg rDataLo = iselIntExpr_R(env, cas->dataLo);
+         HReg rExpdHi = iselIntExpr_R(env, cas->expdHi);
+         HReg rExpdLo = iselIntExpr_R(env, cas->expdLo);
+         HReg rOldHi  = lookupIRTemp(env, cas->oldHi);
+         HReg rOldLo  = lookupIRTemp(env, cas->oldLo);
+         switch (ty) { 
+            case Ity_I64:
+               if (!(env->hwcaps & VEX_HWCAPS_AMD64_CX16))
+                  goto unhandled_cas; /* we'd have to generate
+                                         cmpxchg16b, but the host
+                                         doesn't support that */
+               sz = 8;
+               break;
+            case Ity_I32:
+               sz = 4;
+               break;
+            default:
+               goto unhandled_cas;
+         }
+         addInstr(env, mk_iMOVsd_RR(rExpdHi, rOldHi));
+         addInstr(env, mk_iMOVsd_RR(rExpdLo, rOldLo));
+         addInstr(env, mk_iMOVsd_RR(rExpdHi, hregAMD64_RDX()));
+         addInstr(env, mk_iMOVsd_RR(rExpdLo, hregAMD64_RAX()));
+         addInstr(env, mk_iMOVsd_RR(rDataHi, hregAMD64_RCX()));
+         addInstr(env, mk_iMOVsd_RR(rDataLo, hregAMD64_RBX()));
+         addInstr(env, AMD64Instr_DACAS(am, sz));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RDX(), rOldHi));
+         addInstr(env, AMD64Instr_CMov64(Acc_NZ, hregAMD64_RAX(), rOldLo));
+         return;
+      }
+      unhandled_cas:
+      break;
+
+   /* --------- INSTR MARK --------- */
+   /* Doesn't generate any executable code ... */
+   case Ist_IMark:
+       return;
+
+   /* --------- ABI HINT --------- */
+   /* These have no meaning (denotation in the IR) and so we ignore
+      them ... if any actually made it this far. */
+   case Ist_AbiHint:
+       return;
+
+   /* --------- NO-OP --------- */
+   case Ist_NoOp:
+       return;
+
+   /* --------- EXIT --------- */
+   case Ist_Exit: {
+      if (stmt->Ist.Exit.dst->tag != Ico_U64)
+         vpanic("iselStmt(amd64): Ist_Exit: dst is not a 64-bit value");
+
+      AMD64CondCode cc    = iselCondCode(env, stmt->Ist.Exit.guard);
+      AMD64AMode*   amRIP = AMD64AMode_IR(stmt->Ist.Exit.offsIP,
+                                          hregAMD64_RBP());
+
+      /* Case: boring transfer to known address */
+      if (stmt->Ist.Exit.jk == Ijk_Boring) {
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = ((Addr64)stmt->Ist.Exit.dst->Ico.U64) > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "Y" : ",");
+            addInstr(env, AMD64Instr_XDirect(stmt->Ist.Exit.dst->Ico.U64,
+                                             amRIP, cc, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, AMD64Instr_XAssisted(r, amRIP, cc, Ijk_Boring));
+         }
+         return;
+      }
+
+      /* Case: assisted transfer to arbitrary address */
+      switch (stmt->Ist.Exit.jk) {
+         /* Keep this list in sync with that in iselNext below */
+         case Ijk_ClientReq:
+         case Ijk_EmWarn:
+         case Ijk_NoDecode:
+         case Ijk_NoRedir:
+         case Ijk_SigSEGV:
+         case Ijk_SigTRAP:
+         case Ijk_Sys_syscall:
+         case Ijk_InvalICache:
+         case Ijk_Yield:
+         {
+            HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, AMD64Instr_XAssisted(r, amRIP, cc, stmt->Ist.Exit.jk));
+            return;
+         }
+         default:
+            break;
+      }
+
+      /* Do we ever expect to see any other kind? */
+      goto stmt_fail;
+   }
+
+   default: break;
+   }
+  stmt_fail:
+   ppIRStmt(stmt);
+   vpanic("iselStmt(amd64)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void iselNext ( ISelEnv* env,
+                       IRExpr* next, IRJumpKind jk, Int offsIP )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf( "\n-- PUT(%d) = ", offsIP);
+      ppIRExpr( next );
+      vex_printf( "; exit-");
+      ppIRJumpKind(jk);
+      vex_printf( "\n");
+   }
+
+   /* Case: boring transfer to known address */
+   if (next->tag == Iex_Const) {
+      IRConst* cdst = next->Iex.Const.con;
+      vassert(cdst->tag == Ico_U64);
+      if (jk == Ijk_Boring || jk == Ijk_Call) {
+         /* Boring transfer to known address */
+         AMD64AMode* amRIP = AMD64AMode_IR(offsIP, hregAMD64_RBP());
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = ((Addr64)cdst->Ico.U64) > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "X" : ".");
+            addInstr(env, AMD64Instr_XDirect(cdst->Ico.U64, 
+                                             amRIP, Acc_ALWAYS, 
+                                             toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an indirect transfer,
+               as that's the cheapest alternative that is
+               allowable. */
+            HReg r = iselIntExpr_R(env, next);
+            addInstr(env, AMD64Instr_XAssisted(r, amRIP, Acc_ALWAYS,
+                                               Ijk_Boring));
+         }
+         return;
+      }
+   }
+
+   /* Case: call/return (==boring) transfer to any address */
+   switch (jk) {
+      case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
+         HReg        r     = iselIntExpr_R(env, next);
+         AMD64AMode* amRIP = AMD64AMode_IR(offsIP, hregAMD64_RBP());
+         if (env->chainingAllowed) {
+            addInstr(env, AMD64Instr_XIndir(r, amRIP, Acc_ALWAYS));
+         } else {
+            addInstr(env, AMD64Instr_XAssisted(r, amRIP, Acc_ALWAYS,
+                                               Ijk_Boring));
+         }
+         return;
+      }
+      default:
+         break;
+   }
+
+   /* Case: assisted transfer to arbitrary address */
+   switch (jk) {
+      /* Keep this list in sync with that for Ist_Exit above */
+      case Ijk_ClientReq:
+      case Ijk_EmWarn:
+      case Ijk_NoDecode:
+      case Ijk_NoRedir:
+      case Ijk_SigSEGV:
+      case Ijk_SigTRAP:
+      case Ijk_Sys_syscall:
+      case Ijk_InvalICache:
+      case Ijk_Yield: {
+         HReg        r     = iselIntExpr_R(env, next);
+         AMD64AMode* amRIP = AMD64AMode_IR(offsIP, hregAMD64_RBP());
+         addInstr(env, AMD64Instr_XAssisted(r, amRIP, Acc_ALWAYS, jk));
+         return;
+      }
+      default:
+         break;
+   }
+
+   vex_printf( "\n-- PUT(%d) = ", offsIP);
+   ppIRExpr( next );
+   vex_printf( "; exit-");
+   ppIRJumpKind(jk);
+   vex_printf( "\n");
+   vassert(0); // are we expecting any other kind?
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire SB to amd64 code. */
+
+HInstrArray* iselSB_AMD64 ( const IRSB* bb,
+                            VexArch      arch_host,
+                            const VexArchInfo* archinfo_host,
+                            const VexAbiInfo*  vbi/*UNUSED*/,
+                            Int offs_Host_EvC_Counter,
+                            Int offs_Host_EvC_FailAddr,
+                            Bool chainingAllowed,
+                            Bool addProfInc,
+                            Addr max_ga )
+{
+   Int        i, j;
+   HReg       hreg, hregHI;
+   ISelEnv*   env;
+   UInt       hwcaps_host = archinfo_host->hwcaps;
+   AMD64AMode *amCounter, *amFailAddr;
+
+   /* sanity ... */
+   vassert(arch_host == VexArchAMD64);
+   vassert(0 == (hwcaps_host
+                 & ~(VEX_HWCAPS_AMD64_SSE3
+                     | VEX_HWCAPS_AMD64_CX16
+                     | VEX_HWCAPS_AMD64_LZCNT
+                     | VEX_HWCAPS_AMD64_AVX
+                     | VEX_HWCAPS_AMD64_RDTSCP
+                     | VEX_HWCAPS_AMD64_BMI
+                     | VEX_HWCAPS_AMD64_AVX2)));
+
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
+   /* Make up an initial environment to use. */
+   env = LibVEX_Alloc_inline(sizeof(ISelEnv));
+   env->vreg_ctr = 0;
+
+   /* Set up output code array. */
+   env->code = newHInstrArray();
+
+   /* Copy BB's type env. */
+   env->type_env = bb->tyenv;
+
+   /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+      change as we go along. */
+   env->n_vregmap = bb->tyenv->types_used;
+   env->vregmap   = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+
+   /* and finally ... */
+   env->chainingAllowed = chainingAllowed;
+   env->hwcaps          = hwcaps_host;
+   env->max_ga          = max_ga;
+
+   /* For each IR temporary, allocate a suitably-kinded virtual
+      register. */
+   j = 0;
+   for (i = 0; i < env->n_vregmap; i++) {
+      hregHI = hreg = INVALID_HREG;
+      switch (bb->tyenv->types[i]) {
+         case Ity_I1:
+         case Ity_I8: case Ity_I16: case Ity_I32: case Ity_I64:
+            hreg = mkHReg(True, HRcInt64, 0, j++);
+            break;
+         case Ity_I128:
+            hreg   = mkHReg(True, HRcInt64, 0, j++);
+            hregHI = mkHReg(True, HRcInt64, 0, j++);
+            break;
+         case Ity_F32:
+         case Ity_F64:
+         case Ity_V128:
+            hreg = mkHReg(True, HRcVec128, 0, j++);
+            break;
+         case Ity_V256:
+            hreg   = mkHReg(True, HRcVec128, 0, j++);
+            hregHI = mkHReg(True, HRcVec128, 0, j++);
+            break;
+         default:
+            ppIRType(bb->tyenv->types[i]);
+            vpanic("iselBB(amd64): IRTemp type");
+      }
+      env->vregmap[i]   = hreg;
+      env->vregmapHI[i] = hregHI;
+   }
+   env->vreg_ctr = j;
+
+   /* The very first instruction must be an event check. */
+   amCounter  = AMD64AMode_IR(offs_Host_EvC_Counter,  hregAMD64_RBP());
+   amFailAddr = AMD64AMode_IR(offs_Host_EvC_FailAddr, hregAMD64_RBP());
+   addInstr(env, AMD64Instr_EvCheck(amCounter, amFailAddr));
+
+   /* Possibly a block counter increment (for profiling).  At this
+      point we don't know the address of the counter, so just pretend
+      it is zero.  It will have to be patched later, but before this
+      translation is used, by a call to LibVEX_patchProfCtr. */
+   if (addProfInc) {
+      addInstr(env, AMD64Instr_ProfInc());
+   }
+
+   /* Ok, finally we can iterate over the statements. */
+   for (i = 0; i < bb->stmts_used; i++)
+      if (bb->stmts[i])
+         iselStmt(env, bb->stmts[i]);
+
+   iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
+
+   /* record the number of vregs we used. */
+   env->code->n_vregs = env->vreg_ctr;
+   return env->code;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                   host_amd64_isel.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_arm64_defs.c b/VEX/priv/host_arm64_defs.c
new file mode 100644
index 0000000..b886e81
--- /dev/null
+++ b/VEX/priv/host_arm64_defs.c
@@ -0,0 +1,5520 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 host_arm64_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2013 OpenWorks
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_arm64_defs.h"
+
+
+/* --------- Registers. --------- */
+
+/* The usual HReg abstraction.  We use the following classes only:
+     X regs (64 bit int)
+     D regs (64 bit float, also used for 32 bit float)
+     Q regs (128 bit vector)
+*/
+
+const RRegUniverse* getRRegUniverse_ARM64 ( void )
+{
+   /* The real-register universe is a big constant, so we just want to
+      initialise it once. */
+   static RRegUniverse rRegUniverse_ARM64;
+   static Bool         rRegUniverse_ARM64_initted = False;
+
+   /* Handy shorthand, nothing more */
+   RRegUniverse* ru = &rRegUniverse_ARM64;
+
+   /* This isn't thread-safe.  Sigh. */
+   if (LIKELY(rRegUniverse_ARM64_initted))
+      return ru;
+
+   RRegUniverse__init(ru);
+
+   /* Add the registers.  The initial segment of this array must be
+      those available for allocation by reg-alloc, and those that
+      follow are not available for allocation. */
+
+   ru->regs[ru->size++] = hregARM64_X22();
+   ru->regs[ru->size++] = hregARM64_X23();
+   ru->regs[ru->size++] = hregARM64_X24();
+   ru->regs[ru->size++] = hregARM64_X25();
+   ru->regs[ru->size++] = hregARM64_X26();
+   ru->regs[ru->size++] = hregARM64_X27();
+   ru->regs[ru->size++] = hregARM64_X28();
+
+   ru->regs[ru->size++] = hregARM64_X0();
+   ru->regs[ru->size++] = hregARM64_X1();
+   ru->regs[ru->size++] = hregARM64_X2();
+   ru->regs[ru->size++] = hregARM64_X3();
+   ru->regs[ru->size++] = hregARM64_X4();
+   ru->regs[ru->size++] = hregARM64_X5();
+   ru->regs[ru->size++] = hregARM64_X6();
+   ru->regs[ru->size++] = hregARM64_X7();
+   // X8 is used as a ProfInc temporary, not available to regalloc.
+   // X9 is a chaining/spill temporary, not available to regalloc.
+
+   // Do we really need all these?
+   //ru->regs[ru->size++] = hregARM64_X10();
+   //ru->regs[ru->size++] = hregARM64_X11();
+   //ru->regs[ru->size++] = hregARM64_X12();
+   //ru->regs[ru->size++] = hregARM64_X13();
+   //ru->regs[ru->size++] = hregARM64_X14();
+   //ru->regs[ru->size++] = hregARM64_X15();
+   // X21 is the guest state pointer, not available to regalloc.
+
+   // vector regs.  Unfortunately not callee-saved.
+   ru->regs[ru->size++] = hregARM64_Q16();
+   ru->regs[ru->size++] = hregARM64_Q17();
+   ru->regs[ru->size++] = hregARM64_Q18();
+   ru->regs[ru->size++] = hregARM64_Q19();
+   ru->regs[ru->size++] = hregARM64_Q20();
+
+   // F64 regs, all of which are callee-saved
+   ru->regs[ru->size++] = hregARM64_D8();
+   ru->regs[ru->size++] = hregARM64_D9();
+   ru->regs[ru->size++] = hregARM64_D10();
+   ru->regs[ru->size++] = hregARM64_D11();
+   ru->regs[ru->size++] = hregARM64_D12();
+   ru->regs[ru->size++] = hregARM64_D13();
+
+   ru->allocable = ru->size;
+   /* And other regs, not available to the allocator. */
+
+   // unavail: x21 as GSP
+   // x8 is used as a ProfInc temporary
+   // x9 is used as a spill/reload/chaining/call temporary
+   // x30 as LR
+   // x31 because dealing with the SP-vs-ZR overloading is too
+   // confusing, and we don't need to do so, so let's just avoid
+   // the problem
+   //
+   // Currently, we have 15 allocatable integer registers:
+   // 0 1 2 3 4 5 6 7 22 23 24 25 26 27 28
+   //
+   // Hence for the allocatable integer registers we have:
+   //
+   // callee-saved: 22 23 24 25 26 27 28
+   // caller-saved: 0 1 2 3 4 5 6 7
+   //
+   // If the set of available registers changes or if the e/r status
+   // changes, be sure to re-check/sync the definition of
+   // getRegUsage for ARM64Instr_Call too.
+
+   ru->regs[ru->size++] = hregARM64_X8();
+   ru->regs[ru->size++] = hregARM64_X9();
+   ru->regs[ru->size++] = hregARM64_X21();
+
+   rRegUniverse_ARM64_initted = True;
+
+   RRegUniverse__check_is_sane(ru);
+   return ru;
+}
+
+
+void ppHRegARM64 ( HReg reg )  {
+   Int r;
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      ppHReg(reg);
+      return;
+   }
+   /* But specific for real regs. */
+   switch (hregClass(reg)) {
+      case HRcInt64:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 31);
+         vex_printf("x%d", r);
+         return;
+      case HRcFlt64:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 32);
+         vex_printf("d%d", r);
+         return;
+      case HRcVec128:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 32);
+         vex_printf("q%d", r);
+         return;
+      default:
+         vpanic("ppHRegARM64");
+   }
+}
+
+static void ppHRegARM64asSreg ( HReg reg ) {
+   ppHRegARM64(reg);
+   vex_printf("(S-reg)");
+}
+
+static void ppHRegARM64asHreg ( HReg reg ) {
+   ppHRegARM64(reg);
+   vex_printf("(H-reg)");
+}
+
+
+/* --------- Condition codes, ARM64 encoding. --------- */
+
+static const HChar* showARM64CondCode ( ARM64CondCode cond ) {
+   switch (cond) {
+       case ARM64cc_EQ:  return "eq";
+       case ARM64cc_NE:  return "ne";
+       case ARM64cc_CS:  return "cs";
+       case ARM64cc_CC:  return "cc";
+       case ARM64cc_MI:  return "mi";
+       case ARM64cc_PL:  return "pl";
+       case ARM64cc_VS:  return "vs";
+       case ARM64cc_VC:  return "vc";
+       case ARM64cc_HI:  return "hi";
+       case ARM64cc_LS:  return "ls";
+       case ARM64cc_GE:  return "ge";
+       case ARM64cc_LT:  return "lt";
+       case ARM64cc_GT:  return "gt";
+       case ARM64cc_LE:  return "le";
+       case ARM64cc_AL:  return "al"; // default
+       case ARM64cc_NV:  return "nv";
+       default: vpanic("showARM64CondCode");
+   }
+}
+
+
+/* --------- Memory address expressions (amodes). --------- */
+
+ARM64AMode* ARM64AMode_RI9  ( HReg reg, Int simm9 ) {
+   ARM64AMode* am        = LibVEX_Alloc_inline(sizeof(ARM64AMode));
+   am->tag               = ARM64am_RI9;
+   am->ARM64am.RI9.reg   = reg;
+   am->ARM64am.RI9.simm9 = simm9;
+   vassert(-256 <= simm9 && simm9 <= 255);
+   return am;
+}
+
+ARM64AMode* ARM64AMode_RI12 ( HReg reg, Int uimm12, UChar szB ) {
+   ARM64AMode* am          = LibVEX_Alloc_inline(sizeof(ARM64AMode));
+   am->tag                 = ARM64am_RI12;
+   am->ARM64am.RI12.reg    = reg;
+   am->ARM64am.RI12.uimm12 = uimm12;
+   am->ARM64am.RI12.szB    = szB;
+   vassert(uimm12 >= 0 && uimm12 <= 4095);
+   switch (szB) {
+      case 1: case 2: case 4: case 8: break;
+      default: vassert(0);
+   }
+   return am;
+}
+
+ARM64AMode* ARM64AMode_RR ( HReg base, HReg index ) {
+   ARM64AMode* am       = LibVEX_Alloc_inline(sizeof(ARM64AMode));
+   am->tag              = ARM64am_RR;
+   am->ARM64am.RR.base  = base;
+   am->ARM64am.RR.index = index;
+   return am;
+}
+
+static void ppARM64AMode ( ARM64AMode* am ) {
+   switch (am->tag) {
+      case ARM64am_RI9:
+         vex_printf("%d(", am->ARM64am.RI9.simm9);
+         ppHRegARM64(am->ARM64am.RI9.reg);
+         vex_printf(")");
+         break;
+      case ARM64am_RI12:
+         vex_printf("%u(", (UInt)am->ARM64am.RI12.szB
+                           * (UInt)am->ARM64am.RI12.uimm12);
+         ppHRegARM64(am->ARM64am.RI12.reg);
+         vex_printf(")");
+         break;
+      case ARM64am_RR:
+         vex_printf("(");
+         ppHRegARM64(am->ARM64am.RR.base);
+         vex_printf(",");
+         ppHRegARM64(am->ARM64am.RR.index);
+         vex_printf(")");
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARM64AMode ( HRegUsage* u, ARM64AMode* am ) {
+   switch (am->tag) {
+      case ARM64am_RI9:
+         addHRegUse(u, HRmRead, am->ARM64am.RI9.reg);
+         return;
+      case ARM64am_RI12:
+         addHRegUse(u, HRmRead, am->ARM64am.RI12.reg);
+         return;
+      case ARM64am_RR:
+         addHRegUse(u, HRmRead, am->ARM64am.RR.base);
+         addHRegUse(u, HRmRead, am->ARM64am.RR.index);
+         return;
+      default:
+         vpanic("addRegUsage_ARM64Amode");
+   }
+}
+
+static void mapRegs_ARM64AMode ( HRegRemap* m, ARM64AMode* am ) {
+   switch (am->tag) {
+      case ARM64am_RI9:
+         am->ARM64am.RI9.reg = lookupHRegRemap(m, am->ARM64am.RI9.reg);
+         return;
+      case ARM64am_RI12:
+         am->ARM64am.RI12.reg = lookupHRegRemap(m, am->ARM64am.RI12.reg);
+         return;
+      case ARM64am_RR:
+         am->ARM64am.RR.base  = lookupHRegRemap(m, am->ARM64am.RR.base);
+         am->ARM64am.RR.index = lookupHRegRemap(m, am->ARM64am.RR.index);
+         return;
+      default:
+         vpanic("mapRegs_ARM64Amode");
+   }
+}
+
+
+/* --------- Reg or uimm12<<{0,12} operands --------- */
+
+ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift ) {
+   ARM64RIA* riA           = LibVEX_Alloc_inline(sizeof(ARM64RIA));
+   riA->tag                = ARM64riA_I12;
+   riA->ARM64riA.I12.imm12 = imm12;
+   riA->ARM64riA.I12.shift = shift;
+   vassert(imm12 < 4096);
+   vassert(shift == 0 || shift == 12);
+   return riA;
+}
+ARM64RIA* ARM64RIA_R ( HReg reg ) {
+   ARM64RIA* riA       = LibVEX_Alloc_inline(sizeof(ARM64RIA));
+   riA->tag            = ARM64riA_R;
+   riA->ARM64riA.R.reg = reg;
+   return riA;
+}
+
+static void ppARM64RIA ( ARM64RIA* riA ) {
+   switch (riA->tag) {
+      case ARM64riA_I12:
+         vex_printf("#%u",(UInt)(riA->ARM64riA.I12.imm12
+                                 << riA->ARM64riA.I12.shift));
+         break;
+      case ARM64riA_R:
+         ppHRegARM64(riA->ARM64riA.R.reg);
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARM64RIA ( HRegUsage* u, ARM64RIA* riA ) {
+   switch (riA->tag) {
+      case ARM64riA_I12:
+         return;
+      case ARM64riA_R:
+         addHRegUse(u, HRmRead, riA->ARM64riA.R.reg);
+         return;
+      default:
+         vpanic("addRegUsage_ARM64RIA");
+   }
+}
+
+static void mapRegs_ARM64RIA ( HRegRemap* m, ARM64RIA* riA ) {
+   switch (riA->tag) {
+      case ARM64riA_I12:
+         return;
+      case ARM64riA_R:
+         riA->ARM64riA.R.reg = lookupHRegRemap(m, riA->ARM64riA.R.reg);
+         return;
+      default:
+         vpanic("mapRegs_ARM64RIA");
+   }
+}
+
+
+/* --------- Reg or "bitfield" (logic immediate) operands --------- */
+
+ARM64RIL* ARM64RIL_I13 ( UChar bitN, UChar immR, UChar immS ) {
+   ARM64RIL* riL          = LibVEX_Alloc_inline(sizeof(ARM64RIL));
+   riL->tag               = ARM64riL_I13;
+   riL->ARM64riL.I13.bitN = bitN;
+   riL->ARM64riL.I13.immR = immR;
+   riL->ARM64riL.I13.immS = immS;
+   vassert(bitN < 2);
+   vassert(immR < 64);
+   vassert(immS < 64);
+   return riL;
+}
+ARM64RIL* ARM64RIL_R ( HReg reg ) {
+   ARM64RIL* riL       = LibVEX_Alloc_inline(sizeof(ARM64RIL));
+   riL->tag            = ARM64riL_R;
+   riL->ARM64riL.R.reg = reg;
+   return riL;
+}
+
+static void ppARM64RIL ( ARM64RIL* riL ) {
+   switch (riL->tag) {
+      case ARM64riL_I13:
+         vex_printf("#nrs(%u,%u,%u)",
+                     (UInt)riL->ARM64riL.I13.bitN,
+                     (UInt)riL->ARM64riL.I13.immR,
+                     (UInt)riL->ARM64riL.I13.immS);
+         break;
+      case ARM64riL_R:
+         ppHRegARM64(riL->ARM64riL.R.reg);
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARM64RIL ( HRegUsage* u, ARM64RIL* riL ) {
+   switch (riL->tag) {
+      case ARM64riL_I13:
+         return;
+      case ARM64riL_R:
+         addHRegUse(u, HRmRead, riL->ARM64riL.R.reg);
+         return;
+      default:
+         vpanic("addRegUsage_ARM64RIL");
+   }
+}
+
+static void mapRegs_ARM64RIL ( HRegRemap* m, ARM64RIL* riL ) {
+   switch (riL->tag) {
+      case ARM64riL_I13:
+         return;
+      case ARM64riL_R:
+         riL->ARM64riL.R.reg = lookupHRegRemap(m, riL->ARM64riL.R.reg);
+         return;
+      default:
+         vpanic("mapRegs_ARM64RIL");
+   }
+}
+
+
+/* --------------- Reg or uimm6 operands --------------- */
+
+ARM64RI6* ARM64RI6_I6 ( UInt imm6 ) {
+   ARM64RI6* ri6         = LibVEX_Alloc_inline(sizeof(ARM64RI6));
+   ri6->tag              = ARM64ri6_I6;
+   ri6->ARM64ri6.I6.imm6 = imm6;
+   vassert(imm6 > 0 && imm6 < 64);
+   return ri6;
+}
+ARM64RI6* ARM64RI6_R ( HReg reg ) {
+   ARM64RI6* ri6       = LibVEX_Alloc_inline(sizeof(ARM64RI6));
+   ri6->tag            = ARM64ri6_R;
+   ri6->ARM64ri6.R.reg = reg;
+   return ri6;
+}
+
+static void ppARM64RI6 ( ARM64RI6* ri6 ) {
+   switch (ri6->tag) {
+      case ARM64ri6_I6:
+         vex_printf("#%u", ri6->ARM64ri6.I6.imm6);
+         break;
+      case ARM64ri6_R:
+         ppHRegARM64(ri6->ARM64ri6.R.reg);
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARM64RI6 ( HRegUsage* u, ARM64RI6* ri6 ) {
+   switch (ri6->tag) {
+      case ARM64ri6_I6:
+         return;
+      case ARM64ri6_R:
+         addHRegUse(u, HRmRead, ri6->ARM64ri6.R.reg);
+         return;
+      default:
+         vpanic("addRegUsage_ARM64RI6");
+   }
+}
+
+static void mapRegs_ARM64RI6 ( HRegRemap* m, ARM64RI6* ri6 ) {
+   switch (ri6->tag) {
+      case ARM64ri6_I6:
+         return;
+      case ARM64ri6_R:
+         ri6->ARM64ri6.R.reg = lookupHRegRemap(m, ri6->ARM64ri6.R.reg);
+         return;
+      default:
+         vpanic("mapRegs_ARM64RI6");
+   }
+}
+
+
+/* --------- Instructions. --------- */
+
+static const HChar* showARM64LogicOp ( ARM64LogicOp op ) {
+   switch (op) {
+      case ARM64lo_AND: return "and";
+      case ARM64lo_OR:  return "orr";
+      case ARM64lo_XOR: return "eor";
+      default: vpanic("showARM64LogicOp");
+   }
+}
+
+static const HChar* showARM64ShiftOp ( ARM64ShiftOp op ) {
+   switch (op) {
+      case ARM64sh_SHL: return "lsl";
+      case ARM64sh_SHR: return "lsr";
+      case ARM64sh_SAR: return "asr";
+      default: vpanic("showARM64ShiftOp");
+   }
+}
+
+static const HChar* showARM64UnaryOp ( ARM64UnaryOp op ) {
+   switch (op) {
+      case ARM64un_NEG: return "neg";
+      case ARM64un_NOT: return "not";
+      case ARM64un_CLZ: return "clz";
+      default: vpanic("showARM64UnaryOp");
+   }
+}
+
+static const HChar* showARM64MulOp ( ARM64MulOp op ) {
+   switch (op) {
+      case ARM64mul_PLAIN: return "mul  ";
+      case ARM64mul_ZX:    return "umulh";
+      case ARM64mul_SX:    return "smulh";
+      default: vpanic("showARM64MulOp");
+   }
+}
+
+static void characteriseARM64CvtOp ( /*OUT*/HChar* syn,
+                                     /*OUT*/UInt* fszB, /*OUT*/UInt* iszB, 
+                                     ARM64CvtOp op ) {
+   switch (op) {
+      case ARM64cvt_F32_I32S:
+         *syn = 's'; *fszB = 4; *iszB = 4; break;
+      case ARM64cvt_F64_I32S:
+         *syn = 's'; *fszB = 8; *iszB = 4; break;
+      case ARM64cvt_F32_I64S:
+         *syn = 's'; *fszB = 4; *iszB = 8; break;
+      case ARM64cvt_F64_I64S:
+         *syn = 's'; *fszB = 8; *iszB = 8; break;
+      case ARM64cvt_F32_I32U:
+         *syn = 'u'; *fszB = 4; *iszB = 4; break;
+      case ARM64cvt_F64_I32U:
+         *syn = 'u'; *fszB = 8; *iszB = 4; break;
+      case ARM64cvt_F32_I64U:
+         *syn = 'u'; *fszB = 4; *iszB = 8; break;
+      case ARM64cvt_F64_I64U:
+         *syn = 'u'; *fszB = 8; *iszB = 8; break;
+      default:
+         vpanic("characteriseARM64CvtOp");
+  }
+}
+
+static const HChar* showARM64FpBinOp ( ARM64FpBinOp op ) {
+   switch (op) {
+      case ARM64fpb_ADD: return "add";
+      case ARM64fpb_SUB: return "sub";
+      case ARM64fpb_MUL: return "mul";
+      case ARM64fpb_DIV: return "div";
+      default: vpanic("showARM64FpBinOp");
+   }
+}
+
+static const HChar* showARM64FpUnaryOp ( ARM64FpUnaryOp op ) {
+   switch (op) {
+      case ARM64fpu_NEG:   return "neg  ";
+      case ARM64fpu_ABS:   return "abs  ";
+      case ARM64fpu_SQRT:  return "sqrt ";
+      case ARM64fpu_RINT:  return "rinti";
+      case ARM64fpu_RECPX: return "recpx";
+      default: vpanic("showARM64FpUnaryOp");
+   }
+}
+
+static void showARM64VecBinOp(/*OUT*/const HChar** nm,
+                              /*OUT*/const HChar** ar, ARM64VecBinOp op ) {
+   switch (op) {
+      case ARM64vecb_ADD64x2:      *nm = "add   ";    *ar = "2d";   return;
+      case ARM64vecb_ADD32x4:      *nm = "add   ";    *ar = "4s";   return;
+      case ARM64vecb_ADD16x8:      *nm = "add   ";    *ar = "8h";   return;
+      case ARM64vecb_ADD8x16:      *nm = "add   ";    *ar = "16b";  return;
+      case ARM64vecb_SUB64x2:      *nm = "sub   ";    *ar = "2d";   return;
+      case ARM64vecb_SUB32x4:      *nm = "sub   ";    *ar = "4s";   return;
+      case ARM64vecb_SUB16x8:      *nm = "sub   ";    *ar = "8h";   return;
+      case ARM64vecb_SUB8x16:      *nm = "sub   ";    *ar = "16b";  return;
+      case ARM64vecb_MUL32x4:      *nm = "mul   ";    *ar = "4s";   return;
+      case ARM64vecb_MUL16x8:      *nm = "mul   ";    *ar = "8h";   return;
+      case ARM64vecb_MUL8x16:      *nm = "mul   ";    *ar = "16b";  return;
+      case ARM64vecb_FADD64x2:     *nm = "fadd  ";    *ar = "2d";   return;
+      case ARM64vecb_FSUB64x2:     *nm = "fsub  ";    *ar = "2d";   return;
+      case ARM64vecb_FMUL64x2:     *nm = "fmul  ";    *ar = "2d";   return;
+      case ARM64vecb_FDIV64x2:     *nm = "fdiv  ";    *ar = "2d";   return;
+      case ARM64vecb_FADD32x4:     *nm = "fadd  ";    *ar = "4s";   return;
+      case ARM64vecb_FSUB32x4:     *nm = "fsub  ";    *ar = "4s";   return;
+      case ARM64vecb_FMUL32x4:     *nm = "fmul  ";    *ar = "4s";   return;
+      case ARM64vecb_FDIV32x4:     *nm = "fdiv  ";    *ar = "4s";   return;
+      case ARM64vecb_FMAX64x2:     *nm = "fmax  ";    *ar = "2d";   return;
+      case ARM64vecb_FMAX32x4:     *nm = "fmax  ";    *ar = "4s";   return;
+      case ARM64vecb_FMIN64x2:     *nm = "fmin  ";    *ar = "2d";   return;
+      case ARM64vecb_FMIN32x4:     *nm = "fmin  ";    *ar = "4s";   return;
+      case ARM64vecb_UMAX32x4:     *nm = "umax  ";    *ar = "4s";   return;
+      case ARM64vecb_UMAX16x8:     *nm = "umax  ";    *ar = "8h";   return;
+      case ARM64vecb_UMAX8x16:     *nm = "umax  ";    *ar = "16b";  return;
+      case ARM64vecb_UMIN32x4:     *nm = "umin  ";    *ar = "4s";   return;
+      case ARM64vecb_UMIN16x8:     *nm = "umin  ";    *ar = "8h";   return;
+      case ARM64vecb_UMIN8x16:     *nm = "umin  ";    *ar = "16b";  return;
+      case ARM64vecb_SMAX32x4:     *nm = "smax  ";    *ar = "4s";   return;
+      case ARM64vecb_SMAX16x8:     *nm = "smax  ";    *ar = "8h";   return;
+      case ARM64vecb_SMAX8x16:     *nm = "smax  ";    *ar = "16b";  return;
+      case ARM64vecb_SMIN32x4:     *nm = "smin  ";    *ar = "4s";   return;
+      case ARM64vecb_SMIN16x8:     *nm = "smin  ";    *ar = "8h";   return;
+      case ARM64vecb_SMIN8x16:     *nm = "smin  ";    *ar = "16b";  return;
+      case ARM64vecb_AND:          *nm = "and   ";    *ar = "16b";  return;
+      case ARM64vecb_ORR:          *nm = "orr   ";    *ar = "16b";  return;
+      case ARM64vecb_XOR:          *nm = "eor   ";    *ar = "16b";  return;
+      case ARM64vecb_CMEQ64x2:     *nm = "cmeq  ";    *ar = "2d";   return;
+      case ARM64vecb_CMEQ32x4:     *nm = "cmeq  ";    *ar = "4s";   return;
+      case ARM64vecb_CMEQ16x8:     *nm = "cmeq  ";    *ar = "8h";   return;
+      case ARM64vecb_CMEQ8x16:     *nm = "cmeq  ";    *ar = "16b";  return;
+      case ARM64vecb_CMHI64x2:     *nm = "cmhi  ";    *ar = "2d";   return;
+      case ARM64vecb_CMHI32x4:     *nm = "cmhi  ";    *ar = "4s";   return;
+      case ARM64vecb_CMHI16x8:     *nm = "cmhi  ";    *ar = "8h";   return;
+      case ARM64vecb_CMHI8x16:     *nm = "cmhi  ";    *ar = "16b";  return;
+      case ARM64vecb_CMGT64x2:     *nm = "cmgt  ";    *ar = "2d";   return;
+      case ARM64vecb_CMGT32x4:     *nm = "cmgt  ";    *ar = "4s";   return;
+      case ARM64vecb_CMGT16x8:     *nm = "cmgt  ";    *ar = "8h";   return;
+      case ARM64vecb_CMGT8x16:     *nm = "cmgt  ";    *ar = "16b";  return;
+      case ARM64vecb_FCMEQ64x2:    *nm = "fcmeq ";    *ar = "2d";   return;
+      case ARM64vecb_FCMEQ32x4:    *nm = "fcmeq ";    *ar = "4s";   return;
+      case ARM64vecb_FCMGE64x2:    *nm = "fcmge ";    *ar = "2d";   return;
+      case ARM64vecb_FCMGE32x4:    *nm = "fcmge ";    *ar = "4s";   return;
+      case ARM64vecb_FCMGT64x2:    *nm = "fcmgt ";    *ar = "2d";   return;
+      case ARM64vecb_FCMGT32x4:    *nm = "fcmgt ";    *ar = "4s";   return;
+      case ARM64vecb_TBL1:         *nm = "tbl   ";    *ar = "16b";  return;
+      case ARM64vecb_UZP164x2:     *nm = "uzp1  ";    *ar = "2d";   return;
+      case ARM64vecb_UZP132x4:     *nm = "uzp1  ";    *ar = "4s";   return;
+      case ARM64vecb_UZP116x8:     *nm = "uzp1  ";    *ar = "8h";   return;
+      case ARM64vecb_UZP18x16:     *nm = "uzp1  ";    *ar = "16b";  return;
+      case ARM64vecb_UZP264x2:     *nm = "uzp2  ";    *ar = "2d";   return;
+      case ARM64vecb_UZP232x4:     *nm = "uzp2  ";    *ar = "4s";   return;
+      case ARM64vecb_UZP216x8:     *nm = "uzp2  ";    *ar = "8h";   return;
+      case ARM64vecb_UZP28x16:     *nm = "uzp2  ";    *ar = "16b";  return;
+      case ARM64vecb_ZIP132x4:     *nm = "zip1  ";    *ar = "4s";   return;
+      case ARM64vecb_ZIP116x8:     *nm = "zip1  ";    *ar = "8h";   return;
+      case ARM64vecb_ZIP18x16:     *nm = "zip1  ";    *ar = "16b";  return;
+      case ARM64vecb_ZIP232x4:     *nm = "zip2  ";    *ar = "4s";   return;
+      case ARM64vecb_ZIP216x8:     *nm = "zip2  ";    *ar = "8h";   return;
+      case ARM64vecb_ZIP28x16:     *nm = "zip2  ";    *ar = "16b";  return;
+      case ARM64vecb_PMUL8x16:     *nm = "pmul  ";    *ar = "16b";  return;
+      case ARM64vecb_PMULL8x8:     *nm = "pmull ";    *ar = "8hbb"; return;
+      case ARM64vecb_UMULL2DSS:    *nm = "umull ";    *ar = "2dss"; return;
+      case ARM64vecb_UMULL4SHH:    *nm = "umull ";    *ar = "4shh"; return;
+      case ARM64vecb_UMULL8HBB:    *nm = "umull ";    *ar = "8hbb"; return;
+      case ARM64vecb_SMULL2DSS:    *nm = "smull ";    *ar = "2dss"; return;
+      case ARM64vecb_SMULL4SHH:    *nm = "smull ";    *ar = "4shh"; return;
+      case ARM64vecb_SMULL8HBB:    *nm = "smull ";    *ar = "8hbb"; return;
+      case ARM64vecb_SQADD64x2:    *nm = "sqadd ";    *ar = "2d";   return;
+      case ARM64vecb_SQADD32x4:    *nm = "sqadd ";    *ar = "4s";   return;
+      case ARM64vecb_SQADD16x8:    *nm = "sqadd ";    *ar = "8h";   return;
+      case ARM64vecb_SQADD8x16:    *nm = "sqadd ";    *ar = "16b";  return;
+      case ARM64vecb_UQADD64x2:    *nm = "uqadd ";    *ar = "2d";   return;
+      case ARM64vecb_UQADD32x4:    *nm = "uqadd ";    *ar = "4s";   return;
+      case ARM64vecb_UQADD16x8:    *nm = "uqadd ";    *ar = "8h";   return;
+      case ARM64vecb_UQADD8x16:    *nm = "uqadd ";    *ar = "16b";  return;
+      case ARM64vecb_SQSUB64x2:    *nm = "sqsub ";    *ar = "2d";   return;
+      case ARM64vecb_SQSUB32x4:    *nm = "sqsub ";    *ar = "4s";   return;
+      case ARM64vecb_SQSUB16x8:    *nm = "sqsub ";    *ar = "8h";   return;
+      case ARM64vecb_SQSUB8x16:    *nm = "sqsub ";    *ar = "16b";  return;
+      case ARM64vecb_UQSUB64x2:    *nm = "uqsub ";    *ar = "2d";   return;
+      case ARM64vecb_UQSUB32x4:    *nm = "uqsub ";    *ar = "4s";   return;
+      case ARM64vecb_UQSUB16x8:    *nm = "uqsub ";    *ar = "8h";   return;
+      case ARM64vecb_UQSUB8x16:    *nm = "uqsub ";    *ar = "16b";  return;
+      case ARM64vecb_SQDMULL2DSS:  *nm = "sqdmull";   *ar = "2dss"; return;
+      case ARM64vecb_SQDMULL4SHH:  *nm = "sqdmull";   *ar = "4shh"; return;
+      case ARM64vecb_SQDMULH32x4:  *nm = "sqdmulh";   *ar = "4s";   return;
+      case ARM64vecb_SQDMULH16x8:  *nm = "sqdmulh";   *ar = "8h";   return;
+      case ARM64vecb_SQRDMULH32x4: *nm = "sqrdmulh";  *ar = "4s";   return;
+      case ARM64vecb_SQRDMULH16x8: *nm = "sqrdmulh";  *ar = "8h";   return;
+      case ARM64vecb_SQSHL64x2:    *nm = "sqshl ";    *ar = "2d";   return;
+      case ARM64vecb_SQSHL32x4:    *nm = "sqshl ";    *ar = "4s";   return;
+      case ARM64vecb_SQSHL16x8:    *nm = "sqshl ";    *ar = "8h";   return;
+      case ARM64vecb_SQSHL8x16:    *nm = "sqshl ";    *ar = "16b";  return;
+      case ARM64vecb_UQSHL64x2:    *nm = "uqshl ";    *ar = "2d";   return;
+      case ARM64vecb_UQSHL32x4:    *nm = "uqshl ";    *ar = "4s";   return;
+      case ARM64vecb_UQSHL16x8:    *nm = "uqshl ";    *ar = "8h";   return;
+      case ARM64vecb_UQSHL8x16:    *nm = "uqshl ";    *ar = "16b";  return;
+      case ARM64vecb_SQRSHL64x2:   *nm = "sqrshl";    *ar = "2d";   return;
+      case ARM64vecb_SQRSHL32x4:   *nm = "sqrshl";    *ar = "4s";   return;
+      case ARM64vecb_SQRSHL16x8:   *nm = "sqrshl";    *ar = "8h";   return;
+      case ARM64vecb_SQRSHL8x16:   *nm = "sqrshl";    *ar = "16b";  return;
+      case ARM64vecb_UQRSHL64x2:   *nm = "uqrshl";    *ar = "2d";   return;
+      case ARM64vecb_UQRSHL32x4:   *nm = "uqrshl";    *ar = "4s";   return;
+      case ARM64vecb_UQRSHL16x8:   *nm = "uqrshl";    *ar = "8h";   return;
+      case ARM64vecb_UQRSHL8x16:   *nm = "uqrshl";    *ar = "16b";  return;
+      case ARM64vecb_SSHL64x2:     *nm = "sshl  ";    *ar = "2d";   return;
+      case ARM64vecb_SSHL32x4:     *nm = "sshl  ";    *ar = "4s";   return;
+      case ARM64vecb_SSHL16x8:     *nm = "sshl  ";    *ar = "8h";   return;
+      case ARM64vecb_SSHL8x16:     *nm = "sshl  ";    *ar = "16b";  return;
+      case ARM64vecb_USHL64x2:     *nm = "ushl  ";    *ar = "2d";   return;
+      case ARM64vecb_USHL32x4:     *nm = "ushl  ";    *ar = "4s";   return;
+      case ARM64vecb_USHL16x8:     *nm = "ushl  ";    *ar = "8h";   return;
+      case ARM64vecb_USHL8x16:     *nm = "ushl  ";    *ar = "16b";  return;
+      case ARM64vecb_SRSHL64x2:    *nm = "srshl ";    *ar = "2d";   return;
+      case ARM64vecb_SRSHL32x4:    *nm = "srshl ";    *ar = "4s";   return;
+      case ARM64vecb_SRSHL16x8:    *nm = "srshl ";    *ar = "8h";   return;
+      case ARM64vecb_SRSHL8x16:    *nm = "srshl ";    *ar = "16b";  return;
+      case ARM64vecb_URSHL64x2:    *nm = "urshl ";    *ar = "2d";   return;
+      case ARM64vecb_URSHL32x4:    *nm = "urshl ";    *ar = "4s";   return;
+      case ARM64vecb_URSHL16x8:    *nm = "urshl ";    *ar = "8h";   return;
+      case ARM64vecb_URSHL8x16:    *nm = "urshl ";    *ar = "16b";  return;
+      case ARM64vecb_FRECPS64x2:   *nm = "frecps";    *ar = "2d";   return;
+      case ARM64vecb_FRECPS32x4:   *nm = "frecps";    *ar = "4s";   return;
+      case ARM64vecb_FRSQRTS64x2:  *nm = "frsqrts";   *ar = "2d";   return;
+      case ARM64vecb_FRSQRTS32x4:  *nm = "frsqrts";   *ar = "4s";   return;
+      default: vpanic("showARM64VecBinOp");
+   }
+}
+
+static void showARM64VecModifyOp(/*OUT*/const HChar** nm,
+                                 /*OUT*/const HChar** ar,
+                                 ARM64VecModifyOp op ) {
+   switch (op) {
+      case ARM64vecmo_SUQADD64x2:   *nm = "suqadd";    *ar = "2d";   return;
+      case ARM64vecmo_SUQADD32x4:   *nm = "suqadd";    *ar = "4s";   return;
+      case ARM64vecmo_SUQADD16x8:   *nm = "suqadd";    *ar = "8h";   return;
+      case ARM64vecmo_SUQADD8x16:   *nm = "suqadd";    *ar = "16b";  return;
+      case ARM64vecmo_USQADD64x2:   *nm = "usqadd";    *ar = "2d";   return;
+      case ARM64vecmo_USQADD32x4:   *nm = "usqadd";    *ar = "4s";   return;
+      case ARM64vecmo_USQADD16x8:   *nm = "usqadd";    *ar = "8h";   return;
+      case ARM64vecmo_USQADD8x16:   *nm = "usqadd";    *ar = "16b";  return;
+      default: vpanic("showARM64VecModifyOp");
+   }
+}
+
+static void showARM64VecUnaryOp(/*OUT*/const HChar** nm,
+                                /*OUT*/const HChar** ar, ARM64VecUnaryOp op )
+{
+   switch (op) {
+      case ARM64vecu_FNEG64x2:    *nm = "fneg ";   *ar = "2d";  return;
+      case ARM64vecu_FNEG32x4:    *nm = "fneg ";   *ar = "4s";  return;
+      case ARM64vecu_FABS64x2:    *nm = "fabs ";   *ar = "2d";  return;
+      case ARM64vecu_FABS32x4:    *nm = "fabs ";   *ar = "4s";  return;
+      case ARM64vecu_NOT:         *nm = "not  ";   *ar = "all"; return;
+      case ARM64vecu_ABS64x2:     *nm = "abs  ";   *ar = "2d";  return;
+      case ARM64vecu_ABS32x4:     *nm = "abs  ";   *ar = "4s";  return;
+      case ARM64vecu_ABS16x8:     *nm = "abs  ";   *ar = "8h";  return;
+      case ARM64vecu_ABS8x16:     *nm = "abs  ";   *ar = "16b"; return;
+      case ARM64vecu_CLS32x4:     *nm = "cls  ";   *ar = "4s";  return;
+      case ARM64vecu_CLS16x8:     *nm = "cls  ";   *ar = "8h";  return;
+      case ARM64vecu_CLS8x16:     *nm = "cls  ";   *ar = "16b"; return;
+      case ARM64vecu_CLZ32x4:     *nm = "clz  ";   *ar = "4s";  return;
+      case ARM64vecu_CLZ16x8:     *nm = "clz  ";   *ar = "8h";  return;
+      case ARM64vecu_CLZ8x16:     *nm = "clz  ";   *ar = "16b"; return;
+      case ARM64vecu_CNT8x16:     *nm = "cnt  ";   *ar = "16b"; return;
+      case ARM64vecu_RBIT:        *nm = "rbit ";   *ar = "16b"; return;
+      case ARM64vecu_REV1616B:    *nm = "rev16";   *ar = "16b"; return;
+      case ARM64vecu_REV3216B:    *nm = "rev32";   *ar = "16b"; return;
+      case ARM64vecu_REV328H:     *nm = "rev32";   *ar = "8h";  return;
+      case ARM64vecu_REV6416B:    *nm = "rev64";   *ar = "16b"; return;
+      case ARM64vecu_REV648H:     *nm = "rev64";   *ar = "8h";  return;
+      case ARM64vecu_REV644S:     *nm = "rev64";   *ar = "4s";  return;
+      case ARM64vecu_URECPE32x4:  *nm = "urecpe";  *ar = "4s";  return;
+      case ARM64vecu_URSQRTE32x4: *nm = "ursqrte"; *ar = "4s";  return;
+      case ARM64vecu_FRECPE64x2:  *nm = "frecpe";  *ar = "2d";  return;
+      case ARM64vecu_FRECPE32x4:  *nm = "frecpe";  *ar = "4s";  return;
+      case ARM64vecu_FRSQRTE64x2: *nm = "frsqrte"; *ar = "2d";  return;
+      case ARM64vecu_FRSQRTE32x4: *nm = "frsqrte"; *ar = "4s";  return;
+      case ARM64vecu_FSQRT64x2:   *nm = "fsqrt";   *ar = "2d";  return;
+      case ARM64vecu_FSQRT32x4:   *nm = "fsqrt";   *ar = "4s";  return;
+      default: vpanic("showARM64VecUnaryOp");
+   }
+}
+
+static void showARM64VecShiftImmOp(/*OUT*/const HChar** nm,
+                                   /*OUT*/const HChar** ar,
+                                   ARM64VecShiftImmOp op )
+{
+   switch (op) {
+      case ARM64vecshi_USHR64x2:    *nm = "ushr  ";   *ar = "2d";  return;
+      case ARM64vecshi_USHR32x4:    *nm = "ushr  ";   *ar = "4s";  return;
+      case ARM64vecshi_USHR16x8:    *nm = "ushr  ";   *ar = "8h";  return;
+      case ARM64vecshi_USHR8x16:    *nm = "ushr  ";   *ar = "16b"; return;
+      case ARM64vecshi_SSHR64x2:    *nm = "sshr  ";   *ar = "2d";  return;
+      case ARM64vecshi_SSHR32x4:    *nm = "sshr  ";   *ar = "4s";  return;
+      case ARM64vecshi_SSHR16x8:    *nm = "sshr  ";   *ar = "8h";  return;
+      case ARM64vecshi_SSHR8x16:    *nm = "sshr  ";   *ar = "16b"; return;
+      case ARM64vecshi_SHL64x2:     *nm = "shl   ";   *ar = "2d";  return;
+      case ARM64vecshi_SHL32x4:     *nm = "shl   ";   *ar = "4s";  return;
+      case ARM64vecshi_SHL16x8:     *nm = "shl   ";   *ar = "8h";  return;
+      case ARM64vecshi_SHL8x16:     *nm = "shl   ";   *ar = "16b"; return;
+      case ARM64vecshi_SQSHRN2SD:   *nm = "sqshrn";   *ar = "2sd"; return;
+      case ARM64vecshi_SQSHRN4HS:   *nm = "sqshrn";   *ar = "4hs"; return;
+      case ARM64vecshi_SQSHRN8BH:   *nm = "sqshrn";   *ar = "8bh"; return;
+      case ARM64vecshi_UQSHRN2SD:   *nm = "uqshrn";   *ar = "2sd"; return;
+      case ARM64vecshi_UQSHRN4HS:   *nm = "uqshrn";   *ar = "4hs"; return;
+      case ARM64vecshi_UQSHRN8BH:   *nm = "uqshrn";   *ar = "8bh"; return;
+      case ARM64vecshi_SQSHRUN2SD:  *nm = "sqshrun";  *ar = "2sd"; return;
+      case ARM64vecshi_SQSHRUN4HS:  *nm = "sqshrun";  *ar = "4hs"; return;
+      case ARM64vecshi_SQSHRUN8BH:  *nm = "sqshrun";  *ar = "8bh"; return;
+      case ARM64vecshi_SQRSHRN2SD:  *nm = "sqrshrn";  *ar = "2sd"; return;
+      case ARM64vecshi_SQRSHRN4HS:  *nm = "sqrshrn";  *ar = "4hs"; return;
+      case ARM64vecshi_SQRSHRN8BH:  *nm = "sqrshrn";  *ar = "8bh"; return;
+      case ARM64vecshi_UQRSHRN2SD:  *nm = "uqrshrn";  *ar = "2sd"; return;
+      case ARM64vecshi_UQRSHRN4HS:  *nm = "uqrshrn";  *ar = "4hs"; return;
+      case ARM64vecshi_UQRSHRN8BH:  *nm = "uqrshrn";  *ar = "8bh"; return;
+      case ARM64vecshi_SQRSHRUN2SD: *nm = "sqrshrun"; *ar = "2sd"; return;
+      case ARM64vecshi_SQRSHRUN4HS: *nm = "sqrshrun"; *ar = "4hs"; return;
+      case ARM64vecshi_SQRSHRUN8BH: *nm = "sqrshrun"; *ar = "8bh"; return;
+      case ARM64vecshi_UQSHL64x2:   *nm = "uqshl ";   *ar = "2d";  return;
+      case ARM64vecshi_UQSHL32x4:   *nm = "uqshl ";   *ar = "4s";  return;
+      case ARM64vecshi_UQSHL16x8:   *nm = "uqshl ";   *ar = "8h";  return;
+      case ARM64vecshi_UQSHL8x16:   *nm = "uqshl ";   *ar = "16b"; return;
+      case ARM64vecshi_SQSHL64x2:   *nm = "sqshl ";   *ar = "2d";  return;
+      case ARM64vecshi_SQSHL32x4:   *nm = "sqshl ";   *ar = "4s";  return;
+      case ARM64vecshi_SQSHL16x8:   *nm = "sqshl ";   *ar = "8h";  return;
+      case ARM64vecshi_SQSHL8x16:   *nm = "sqshl ";   *ar = "16b"; return;
+      case ARM64vecshi_SQSHLU64x2:  *nm = "sqshlu";   *ar = "2d";  return;
+      case ARM64vecshi_SQSHLU32x4:  *nm = "sqshlu";   *ar = "4s";  return;
+      case ARM64vecshi_SQSHLU16x8:  *nm = "sqshlu";   *ar = "8h";  return;
+      case ARM64vecshi_SQSHLU8x16:  *nm = "sqshlu";   *ar = "16b"; return;
+      default: vpanic("showARM64VecShiftImmOp");
+   }
+}
+
+static const HChar* showARM64VecNarrowOp(ARM64VecNarrowOp op) {
+   switch (op) {
+      case ARM64vecna_XTN:    return "xtn   ";
+      case ARM64vecna_SQXTN:  return "sqxtn ";
+      case ARM64vecna_UQXTN:  return "uqxtn ";
+      case ARM64vecna_SQXTUN: return "sqxtun";
+      default: vpanic("showARM64VecNarrowOp");
+   }
+}
+
+ARM64Instr* ARM64Instr_Arith ( HReg dst,
+                               HReg argL, ARM64RIA* argR, Bool isAdd ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_Arith;
+   i->ARM64in.Arith.dst   = dst;
+   i->ARM64in.Arith.argL  = argL;
+   i->ARM64in.Arith.argR  = argR;
+   i->ARM64in.Arith.isAdd = isAdd;
+   return i;
+}
+ARM64Instr* ARM64Instr_Cmp ( HReg argL, ARM64RIA* argR, Bool is64 ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag              = ARM64in_Cmp;
+   i->ARM64in.Cmp.argL = argL;
+   i->ARM64in.Cmp.argR = argR;
+   i->ARM64in.Cmp.is64 = is64;
+   return i;
+}
+ARM64Instr* ARM64Instr_Logic ( HReg dst,
+                               HReg argL, ARM64RIL* argR, ARM64LogicOp op ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_Logic;
+   i->ARM64in.Logic.dst   = dst;
+   i->ARM64in.Logic.argL  = argL;
+   i->ARM64in.Logic.argR  = argR;
+   i->ARM64in.Logic.op    = op;
+   return i;
+}
+ARM64Instr* ARM64Instr_Test ( HReg argL, ARM64RIL* argR ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag               = ARM64in_Test;
+   i->ARM64in.Test.argL = argL;
+   i->ARM64in.Test.argR = argR;
+   return i;
+}
+ARM64Instr* ARM64Instr_Shift ( HReg dst,
+                               HReg argL, ARM64RI6* argR, ARM64ShiftOp op ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_Shift;
+   i->ARM64in.Shift.dst  = dst;
+   i->ARM64in.Shift.argL = argL;
+   i->ARM64in.Shift.argR = argR;
+   i->ARM64in.Shift.op   = op;
+   return i;
+}
+ARM64Instr* ARM64Instr_Unary ( HReg dst, HReg src, ARM64UnaryOp op ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag               = ARM64in_Unary;
+   i->ARM64in.Unary.dst = dst;
+   i->ARM64in.Unary.src = src;
+   i->ARM64in.Unary.op  = op;
+   return i;
+}
+ARM64Instr* ARM64Instr_MovI ( HReg dst, HReg src ) {
+   ARM64Instr* i      = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag             = ARM64in_MovI;
+   i->ARM64in.MovI.dst = dst;
+   i->ARM64in.MovI.src = src;
+   vassert(hregClass(src) == HRcInt64);
+   vassert(hregClass(dst) == HRcInt64);
+   return i;
+}
+ARM64Instr* ARM64Instr_Imm64 ( HReg dst, ULong imm64 ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_Imm64;
+   i->ARM64in.Imm64.dst   = dst;
+   i->ARM64in.Imm64.imm64 = imm64;
+   return i;
+}
+ARM64Instr* ARM64Instr_LdSt64 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_LdSt64;
+   i->ARM64in.LdSt64.isLoad = isLoad;
+   i->ARM64in.LdSt64.rD     = rD;
+   i->ARM64in.LdSt64.amode  = amode;
+   return i;
+}
+ARM64Instr* ARM64Instr_LdSt32 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_LdSt32;
+   i->ARM64in.LdSt32.isLoad = isLoad;
+   i->ARM64in.LdSt32.rD     = rD;
+   i->ARM64in.LdSt32.amode  = amode;
+   return i;
+}
+ARM64Instr* ARM64Instr_LdSt16 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_LdSt16;
+   i->ARM64in.LdSt16.isLoad = isLoad;
+   i->ARM64in.LdSt16.rD     = rD;
+   i->ARM64in.LdSt16.amode  = amode;
+   return i;
+}
+ARM64Instr* ARM64Instr_LdSt8 ( Bool isLoad, HReg rD, ARM64AMode* amode ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                  = ARM64in_LdSt8;
+   i->ARM64in.LdSt8.isLoad = isLoad;
+   i->ARM64in.LdSt8.rD     = rD;
+   i->ARM64in.LdSt8.amode  = amode;
+   return i;
+}
+ARM64Instr* ARM64Instr_XDirect ( Addr64 dstGA, ARM64AMode* amPC,
+                                 ARM64CondCode cond, Bool toFastEP ) {
+   ARM64Instr* i               = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                      = ARM64in_XDirect;
+   i->ARM64in.XDirect.dstGA    = dstGA;
+   i->ARM64in.XDirect.amPC     = amPC;
+   i->ARM64in.XDirect.cond     = cond;
+   i->ARM64in.XDirect.toFastEP = toFastEP;
+   return i;
+}
+ARM64Instr* ARM64Instr_XIndir ( HReg dstGA, ARM64AMode* amPC,
+                                ARM64CondCode cond ) {
+   ARM64Instr* i           = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                  = ARM64in_XIndir;
+   i->ARM64in.XIndir.dstGA = dstGA;
+   i->ARM64in.XIndir.amPC  = amPC;
+   i->ARM64in.XIndir.cond  = cond;
+   return i;
+}
+ARM64Instr* ARM64Instr_XAssisted ( HReg dstGA, ARM64AMode* amPC,
+                                   ARM64CondCode cond, IRJumpKind jk ) {
+   ARM64Instr* i              = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                     = ARM64in_XAssisted;
+   i->ARM64in.XAssisted.dstGA = dstGA;
+   i->ARM64in.XAssisted.amPC  = amPC;
+   i->ARM64in.XAssisted.cond  = cond;
+   i->ARM64in.XAssisted.jk    = jk;
+   return i;
+}
+ARM64Instr* ARM64Instr_CSel ( HReg dst, HReg argL, HReg argR,
+                              ARM64CondCode cond ) {
+   ARM64Instr* i        = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag               = ARM64in_CSel;
+   i->ARM64in.CSel.dst  = dst;
+   i->ARM64in.CSel.argL = argL;
+   i->ARM64in.CSel.argR = argR;
+   i->ARM64in.CSel.cond = cond;
+   return i;
+}
+ARM64Instr* ARM64Instr_Call ( ARM64CondCode cond, Addr64 target, Int nArgRegs,
+                              RetLoc rloc ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_Call;
+   i->ARM64in.Call.cond     = cond;
+   i->ARM64in.Call.target   = target;
+   i->ARM64in.Call.nArgRegs = nArgRegs;
+   i->ARM64in.Call.rloc     = rloc;
+   vassert(is_sane_RetLoc(rloc));
+   return i;
+}
+extern ARM64Instr* ARM64Instr_AddToSP ( Int simm ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                  = ARM64in_AddToSP;
+   i->ARM64in.AddToSP.simm = simm;
+   vassert(-4096 < simm && simm < 4096);
+   vassert(0 == (simm & 0xF));
+   return i;
+}
+extern ARM64Instr* ARM64Instr_FromSP  ( HReg dst ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_FromSP;
+   i->ARM64in.FromSP.dst = dst;
+   return i;
+}
+ARM64Instr* ARM64Instr_Mul ( HReg dst, HReg argL, HReg argR,
+                             ARM64MulOp op ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag              = ARM64in_Mul;
+   i->ARM64in.Mul.dst  = dst;
+   i->ARM64in.Mul.argL = argL;
+   i->ARM64in.Mul.argR = argR;
+   i->ARM64in.Mul.op   = op;
+   return i;
+}
+ARM64Instr* ARM64Instr_LdrEX ( Int szB ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag               = ARM64in_LdrEX;
+   i->ARM64in.LdrEX.szB = szB;
+   vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
+   return i;
+}
+ARM64Instr* ARM64Instr_StrEX ( Int szB ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag               = ARM64in_StrEX;
+   i->ARM64in.StrEX.szB = szB;
+   vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
+   return i;
+}
+ARM64Instr* ARM64Instr_MFence ( void ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag        = ARM64in_MFence;
+   return i;
+}
+ARM64Instr* ARM64Instr_VLdStH ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_VLdStH;
+   i->ARM64in.VLdStH.isLoad = isLoad;
+   i->ARM64in.VLdStH.hD     = sD;
+   i->ARM64in.VLdStH.rN     = rN;
+   i->ARM64in.VLdStH.uimm12 = uimm12;
+   vassert(uimm12 < 8192 && 0 == (uimm12 & 1));
+   return i;
+}
+ARM64Instr* ARM64Instr_VLdStS ( Bool isLoad, HReg sD, HReg rN, UInt uimm12 ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_VLdStS;
+   i->ARM64in.VLdStS.isLoad = isLoad;
+   i->ARM64in.VLdStS.sD     = sD;
+   i->ARM64in.VLdStS.rN     = rN;
+   i->ARM64in.VLdStS.uimm12 = uimm12;
+   vassert(uimm12 < 16384 && 0 == (uimm12 & 3));
+   return i;
+}
+ARM64Instr* ARM64Instr_VLdStD ( Bool isLoad, HReg dD, HReg rN, UInt uimm12 ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_VLdStD;
+   i->ARM64in.VLdStD.isLoad = isLoad;
+   i->ARM64in.VLdStD.dD     = dD;
+   i->ARM64in.VLdStD.rN     = rN;
+   i->ARM64in.VLdStD.uimm12 = uimm12;
+   vassert(uimm12 < 32768 && 0 == (uimm12 & 7));
+   return i;
+}
+ARM64Instr* ARM64Instr_VLdStQ ( Bool isLoad, HReg rQ, HReg rN ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_VLdStQ;
+   i->ARM64in.VLdStQ.isLoad = isLoad;
+   i->ARM64in.VLdStQ.rQ     = rQ;
+   i->ARM64in.VLdStQ.rN     = rN;
+   return i;
+}
+ARM64Instr* ARM64Instr_VCvtI2F ( ARM64CvtOp how, HReg rD, HReg rS ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VCvtI2F;
+   i->ARM64in.VCvtI2F.how = how;
+   i->ARM64in.VCvtI2F.rD  = rD;
+   i->ARM64in.VCvtI2F.rS  = rS;
+   return i;
+}
+ARM64Instr* ARM64Instr_VCvtF2I ( ARM64CvtOp how, HReg rD, HReg rS,
+                                 UChar armRM ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_VCvtF2I;
+   i->ARM64in.VCvtF2I.how   = how;
+   i->ARM64in.VCvtF2I.rD    = rD;
+   i->ARM64in.VCvtF2I.rS    = rS;
+   i->ARM64in.VCvtF2I.armRM = armRM;
+   vassert(armRM <= 3);
+   return i;
+}
+ARM64Instr* ARM64Instr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VCvtSD;
+   i->ARM64in.VCvtSD.sToD = sToD;
+   i->ARM64in.VCvtSD.dst  = dst;
+   i->ARM64in.VCvtSD.src  = src;
+   return i;
+}
+ARM64Instr* ARM64Instr_VCvtHS ( Bool hToS, HReg dst, HReg src ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VCvtHS;
+   i->ARM64in.VCvtHS.hToS = hToS;
+   i->ARM64in.VCvtHS.dst  = dst;
+   i->ARM64in.VCvtHS.src  = src;
+   return i;
+}
+ARM64Instr* ARM64Instr_VCvtHD ( Bool hToD, HReg dst, HReg src ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VCvtHD;
+   i->ARM64in.VCvtHD.hToD = hToD;
+   i->ARM64in.VCvtHD.dst  = dst;
+   i->ARM64in.VCvtHD.src  = src;
+   return i;
+}
+ARM64Instr* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VUnaryD;
+   i->ARM64in.VUnaryD.op  = op;
+   i->ARM64in.VUnaryD.dst = dst;
+   i->ARM64in.VUnaryD.src = src;
+   return i;
+}
+ARM64Instr* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op, HReg dst, HReg src ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VUnaryS;
+   i->ARM64in.VUnaryS.op  = op;
+   i->ARM64in.VUnaryS.dst = dst;
+   i->ARM64in.VUnaryS.src = src;
+   return i;
+}
+ARM64Instr* ARM64Instr_VBinD ( ARM64FpBinOp op,
+                               HReg dst, HReg argL, HReg argR ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_VBinD;
+   i->ARM64in.VBinD.op   = op;
+   i->ARM64in.VBinD.dst  = dst;
+   i->ARM64in.VBinD.argL = argL;
+   i->ARM64in.VBinD.argR = argR;
+   return i;
+}
+ARM64Instr* ARM64Instr_VBinS ( ARM64FpBinOp op,
+                               HReg dst, HReg argL, HReg argR ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_VBinS;
+   i->ARM64in.VBinS.op   = op;
+   i->ARM64in.VBinS.dst  = dst;
+   i->ARM64in.VBinS.argL = argL;
+   i->ARM64in.VBinS.argR = argR;
+   return i;
+}
+ARM64Instr* ARM64Instr_VCmpD ( HReg argL, HReg argR ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_VCmpD;
+   i->ARM64in.VCmpD.argL = argL;
+   i->ARM64in.VCmpD.argR = argR;
+   return i;
+}
+ARM64Instr* ARM64Instr_VCmpS ( HReg argL, HReg argR ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_VCmpS;
+   i->ARM64in.VCmpS.argL = argL;
+   i->ARM64in.VCmpS.argR = argR;
+   return i;
+}
+ARM64Instr* ARM64Instr_VFCSel ( HReg dst, HReg argL, HReg argR,
+                                ARM64CondCode cond, Bool isD ) {
+   ARM64Instr* i          = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VFCSel;
+   i->ARM64in.VFCSel.dst  = dst;
+   i->ARM64in.VFCSel.argL = argL;
+   i->ARM64in.VFCSel.argR = argR;
+   i->ARM64in.VFCSel.cond = cond;
+   i->ARM64in.VFCSel.isD  = isD;
+   return i;
+}
+ARM64Instr* ARM64Instr_FPCR ( Bool toFPCR, HReg iReg ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_FPCR;
+   i->ARM64in.FPCR.toFPCR = toFPCR;
+   i->ARM64in.FPCR.iReg   = iReg;
+   return i;
+}
+ARM64Instr* ARM64Instr_FPSR ( Bool toFPSR, HReg iReg ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_FPSR;
+   i->ARM64in.FPSR.toFPSR = toFPSR;
+   i->ARM64in.FPSR.iReg   = iReg;
+   return i;
+}
+ARM64Instr* ARM64Instr_VBinV ( ARM64VecBinOp op,
+                               HReg dst, HReg argL, HReg argR ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_VBinV;
+   i->ARM64in.VBinV.op   = op;
+   i->ARM64in.VBinV.dst  = dst;
+   i->ARM64in.VBinV.argL = argL;
+   i->ARM64in.VBinV.argR = argR;
+   return i;
+}
+ARM64Instr* ARM64Instr_VModifyV ( ARM64VecModifyOp op, HReg mod, HReg arg ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                  = ARM64in_VModifyV;
+   i->ARM64in.VModifyV.op  = op;
+   i->ARM64in.VModifyV.mod = mod;
+   i->ARM64in.VModifyV.arg = arg;
+   return i;
+}
+ARM64Instr* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op, HReg dst, HReg arg ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VUnaryV;
+   i->ARM64in.VUnaryV.op  = op;
+   i->ARM64in.VUnaryV.dst = dst;
+   i->ARM64in.VUnaryV.arg = arg;
+   return i;
+}
+ARM64Instr* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op,
+                                  UInt dszBlg2, HReg dst, HReg src ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                      = ARM64in_VNarrowV;
+   i->ARM64in.VNarrowV.op      = op;
+   i->ARM64in.VNarrowV.dszBlg2 = dszBlg2;
+   i->ARM64in.VNarrowV.dst     = dst;
+   i->ARM64in.VNarrowV.src     = src;
+   vassert(dszBlg2 == 0 || dszBlg2 == 1 || dszBlg2 == 2);
+   return i;
+}
+ARM64Instr* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op,
+                                    HReg dst, HReg src, UInt amt ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                    = ARM64in_VShiftImmV;
+   i->ARM64in.VShiftImmV.op  = op;
+   i->ARM64in.VShiftImmV.dst = dst;
+   i->ARM64in.VShiftImmV.src = src;
+   i->ARM64in.VShiftImmV.amt = amt;
+   UInt minSh = 0;
+   UInt maxSh = 0;
+   switch (op) {
+      /* For right shifts, the allowed shift amounts are 1 .. lane_size.
+         For left shifts,  the allowed shift amounts are 0 .. lane_size-1. 
+      */
+      case ARM64vecshi_USHR64x2: case ARM64vecshi_SSHR64x2:
+      case ARM64vecshi_UQSHRN2SD: case ARM64vecshi_SQSHRN2SD:
+      case ARM64vecshi_SQSHRUN2SD:
+      case ARM64vecshi_UQRSHRN2SD: case ARM64vecshi_SQRSHRN2SD:
+      case ARM64vecshi_SQRSHRUN2SD:
+         minSh = 1; maxSh = 64; break;
+      case ARM64vecshi_SHL64x2:
+      case ARM64vecshi_UQSHL64x2: case ARM64vecshi_SQSHL64x2:
+      case ARM64vecshi_SQSHLU64x2:
+         minSh = 0; maxSh = 63; break;
+      case ARM64vecshi_USHR32x4: case ARM64vecshi_SSHR32x4:
+      case ARM64vecshi_UQSHRN4HS: case ARM64vecshi_SQSHRN4HS:
+      case ARM64vecshi_SQSHRUN4HS:
+      case ARM64vecshi_UQRSHRN4HS: case ARM64vecshi_SQRSHRN4HS:
+      case ARM64vecshi_SQRSHRUN4HS:
+         minSh = 1; maxSh = 32; break;
+      case ARM64vecshi_SHL32x4:
+      case ARM64vecshi_UQSHL32x4: case ARM64vecshi_SQSHL32x4:
+      case ARM64vecshi_SQSHLU32x4:
+         minSh = 0; maxSh = 31; break;
+      case ARM64vecshi_USHR16x8: case ARM64vecshi_SSHR16x8:
+      case ARM64vecshi_UQSHRN8BH: case ARM64vecshi_SQSHRN8BH:
+      case ARM64vecshi_SQSHRUN8BH:
+      case ARM64vecshi_UQRSHRN8BH: case ARM64vecshi_SQRSHRN8BH:
+      case ARM64vecshi_SQRSHRUN8BH:
+         minSh = 1; maxSh = 16; break;
+      case ARM64vecshi_SHL16x8:
+      case ARM64vecshi_UQSHL16x8: case ARM64vecshi_SQSHL16x8:
+      case ARM64vecshi_SQSHLU16x8:
+         minSh = 0; maxSh = 15; break;
+      case ARM64vecshi_USHR8x16: case ARM64vecshi_SSHR8x16:
+         minSh = 1; maxSh = 8; break;
+      case ARM64vecshi_SHL8x16:
+      case ARM64vecshi_UQSHL8x16: case ARM64vecshi_SQSHL8x16:
+      case ARM64vecshi_SQSHLU8x16:
+         minSh = 0; maxSh = 7; break;
+      default:
+         vassert(0);
+   }
+   vassert(maxSh > 0);
+   vassert(amt >= minSh && amt <= maxSh);
+   return i;
+}
+ARM64Instr* ARM64Instr_VExtV ( HReg dst, HReg srcLo, HReg srcHi, UInt amtB ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                 = ARM64in_VExtV;
+   i->ARM64in.VExtV.dst   = dst;
+   i->ARM64in.VExtV.srcLo = srcLo;
+   i->ARM64in.VExtV.srcHi = srcHi;
+   i->ARM64in.VExtV.amtB  = amtB;
+   vassert(amtB >= 1 && amtB <= 15);
+   return i;
+}
+ARM64Instr* ARM64Instr_VImmQ (HReg rQ, UShort imm) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag               = ARM64in_VImmQ;
+   i->ARM64in.VImmQ.rQ  = rQ;
+   i->ARM64in.VImmQ.imm = imm;
+   /* Check that this is something that can actually be emitted. */
+   switch (imm) {
+      case 0x0000: case 0x0001: case 0x0003:
+      case 0x000F: case 0x003F: case 0x00FF: case 0xFFFF:
+         break;
+      default:
+         vassert(0);
+   }
+   return i;
+}
+ARM64Instr* ARM64Instr_VDfromX ( HReg rD, HReg rX ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                = ARM64in_VDfromX;
+   i->ARM64in.VDfromX.rD = rD;
+   i->ARM64in.VDfromX.rX = rX;
+   return i;
+}
+ARM64Instr* ARM64Instr_VQfromX ( HReg rQ, HReg rXlo ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                  = ARM64in_VQfromX;
+   i->ARM64in.VQfromX.rQ   = rQ;
+   i->ARM64in.VQfromX.rXlo = rXlo;
+   return i;
+}
+ARM64Instr* ARM64Instr_VQfromXX ( HReg rQ, HReg rXhi, HReg rXlo ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                   = ARM64in_VQfromXX;
+   i->ARM64in.VQfromXX.rQ   = rQ;
+   i->ARM64in.VQfromXX.rXhi = rXhi;
+   i->ARM64in.VQfromXX.rXlo = rXlo;
+   return i;
+}
+ARM64Instr* ARM64Instr_VXfromQ ( HReg rX, HReg rQ, UInt laneNo ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                    = ARM64in_VXfromQ;
+   i->ARM64in.VXfromQ.rX     = rX;
+   i->ARM64in.VXfromQ.rQ     = rQ;
+   i->ARM64in.VXfromQ.laneNo = laneNo;
+   vassert(laneNo <= 1);
+   return i;
+}
+ARM64Instr* ARM64Instr_VXfromDorS ( HReg rX, HReg rDorS, Bool fromD ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                      = ARM64in_VXfromDorS;
+   i->ARM64in.VXfromDorS.rX    = rX;
+   i->ARM64in.VXfromDorS.rDorS = rDorS;
+   i->ARM64in.VXfromDorS.fromD = fromD;
+   return i;
+}
+ARM64Instr* ARM64Instr_VMov ( UInt szB, HReg dst, HReg src ) {
+   ARM64Instr* i       = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag              = ARM64in_VMov;
+   i->ARM64in.VMov.szB = szB;
+   i->ARM64in.VMov.dst = dst;
+   i->ARM64in.VMov.src = src;
+   switch (szB) {
+      case 16:
+        vassert(hregClass(src) == HRcVec128);
+        vassert(hregClass(dst) == HRcVec128);
+        break;
+      case 8:
+        vassert(hregClass(src) == HRcFlt64);
+        vassert(hregClass(dst) == HRcFlt64);
+        break;
+      default:
+        vpanic("ARM64Instr_VMov");
+   }
+   return i;
+}
+ARM64Instr* ARM64Instr_EvCheck ( ARM64AMode* amCounter,
+                                 ARM64AMode* amFailAddr ) {
+   ARM64Instr* i                 = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag                        = ARM64in_EvCheck;
+   i->ARM64in.EvCheck.amCounter  = amCounter;
+   i->ARM64in.EvCheck.amFailAddr = amFailAddr;
+   return i;
+}
+ARM64Instr* ARM64Instr_ProfInc ( void ) {
+   ARM64Instr* i = LibVEX_Alloc_inline(sizeof(ARM64Instr));
+   i->tag        = ARM64in_ProfInc;
+   return i;
+}
+
+/* ... */
+
+void ppARM64Instr ( const ARM64Instr* i ) {
+   switch (i->tag) {
+      case ARM64in_Arith:
+         vex_printf("%s    ", i->ARM64in.Arith.isAdd ? "add" : "sub");
+         ppHRegARM64(i->ARM64in.Arith.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.Arith.argL);
+         vex_printf(", ");
+         ppARM64RIA(i->ARM64in.Arith.argR);
+         return;
+      case ARM64in_Cmp:
+         vex_printf("cmp%s ", i->ARM64in.Cmp.is64 ? "   " : "(w)" );
+         ppHRegARM64(i->ARM64in.Cmp.argL);
+         vex_printf(", ");
+         ppARM64RIA(i->ARM64in.Cmp.argR);
+         return;
+      case ARM64in_Logic:
+         vex_printf("%s    ", showARM64LogicOp(i->ARM64in.Logic.op));
+         ppHRegARM64(i->ARM64in.Logic.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.Logic.argL);
+         vex_printf(", ");
+         ppARM64RIL(i->ARM64in.Logic.argR);
+         return;
+      case ARM64in_Test:
+         vex_printf("tst    ");
+         ppHRegARM64(i->ARM64in.Test.argL);
+         vex_printf(", ");
+         ppARM64RIL(i->ARM64in.Test.argR);
+         return;
+      case ARM64in_Shift:
+         vex_printf("%s    ", showARM64ShiftOp(i->ARM64in.Shift.op));
+         ppHRegARM64(i->ARM64in.Shift.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.Shift.argL);
+         vex_printf(", ");
+         ppARM64RI6(i->ARM64in.Shift.argR);
+         return;
+      case ARM64in_Unary:
+         vex_printf("%s    ", showARM64UnaryOp(i->ARM64in.Unary.op));
+         ppHRegARM64(i->ARM64in.Unary.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.Unary.src);
+         return;
+      case ARM64in_MovI:
+         vex_printf("mov    ");
+         ppHRegARM64(i->ARM64in.MovI.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.MovI.src);
+         return;
+      case ARM64in_Imm64:
+         vex_printf("imm64  ");
+         ppHRegARM64(i->ARM64in.Imm64.dst);
+         vex_printf(", 0x%llx", i->ARM64in.Imm64.imm64);
+         return;
+      case ARM64in_LdSt64:
+         if (i->ARM64in.LdSt64.isLoad) {
+            vex_printf("ldr    ");
+            ppHRegARM64(i->ARM64in.LdSt64.rD);
+            vex_printf(", ");
+            ppARM64AMode(i->ARM64in.LdSt64.amode);
+         } else {
+            vex_printf("str    ");
+            ppARM64AMode(i->ARM64in.LdSt64.amode);
+            vex_printf(", ");
+            ppHRegARM64(i->ARM64in.LdSt64.rD);
+         }
+         return;
+      case ARM64in_LdSt32:
+         if (i->ARM64in.LdSt32.isLoad) {
+            vex_printf("ldruw  ");
+            ppHRegARM64(i->ARM64in.LdSt32.rD);
+            vex_printf(", ");
+            ppARM64AMode(i->ARM64in.LdSt32.amode);
+         } else {
+            vex_printf("strw   ");
+            ppARM64AMode(i->ARM64in.LdSt32.amode);
+            vex_printf(", ");
+            ppHRegARM64(i->ARM64in.LdSt32.rD);
+         }
+         return;
+      case ARM64in_LdSt16:
+         if (i->ARM64in.LdSt16.isLoad) {
+            vex_printf("ldruh  ");
+            ppHRegARM64(i->ARM64in.LdSt16.rD);
+            vex_printf(", ");
+            ppARM64AMode(i->ARM64in.LdSt16.amode);
+         } else {
+            vex_printf("strh   ");
+            ppARM64AMode(i->ARM64in.LdSt16.amode);
+            vex_printf(", ");
+            ppHRegARM64(i->ARM64in.LdSt16.rD);
+         }
+         return;
+      case ARM64in_LdSt8:
+         if (i->ARM64in.LdSt8.isLoad) {
+            vex_printf("ldrub  ");
+            ppHRegARM64(i->ARM64in.LdSt8.rD);
+            vex_printf(", ");
+            ppARM64AMode(i->ARM64in.LdSt8.amode);
+         } else {
+            vex_printf("strb   ");
+            ppARM64AMode(i->ARM64in.LdSt8.amode);
+            vex_printf(", ");
+            ppHRegARM64(i->ARM64in.LdSt8.rD);
+         }
+         return;
+      case ARM64in_XDirect:
+         vex_printf("(xDirect) ");
+         vex_printf("if (%%pstate.%s) { ",
+                    showARM64CondCode(i->ARM64in.XDirect.cond));
+         vex_printf("imm64 x9,0x%llx; ", i->ARM64in.XDirect.dstGA);
+         vex_printf("str x9,");
+         ppARM64AMode(i->ARM64in.XDirect.amPC);
+         vex_printf("; imm64-exactly4 x9,$disp_cp_chain_me_to_%sEP; ",
+                    i->ARM64in.XDirect.toFastEP ? "fast" : "slow");
+         vex_printf("blr x9 }");
+         return;
+      case ARM64in_XIndir:
+         vex_printf("(xIndir) ");
+         vex_printf("if (%%pstate.%s) { ",
+                    showARM64CondCode(i->ARM64in.XIndir.cond));
+         vex_printf("str ");
+         ppHRegARM64(i->ARM64in.XIndir.dstGA);
+         vex_printf(",");
+         ppARM64AMode(i->ARM64in.XIndir.amPC);
+         vex_printf("; imm64 x9,$disp_cp_xindir; ");
+         vex_printf("br x9 }");
+         return;
+      case ARM64in_XAssisted:
+         vex_printf("(xAssisted) ");
+         vex_printf("if (%%pstate.%s) { ",
+                    showARM64CondCode(i->ARM64in.XAssisted.cond));
+         vex_printf("str ");
+         ppHRegARM64(i->ARM64in.XAssisted.dstGA);
+         vex_printf(",");
+         ppARM64AMode(i->ARM64in.XAssisted.amPC);
+         vex_printf("; movw x21,$IRJumpKind_to_TRCVAL(%d); ",
+                    (Int)i->ARM64in.XAssisted.jk);
+         vex_printf("imm64 x9,$disp_cp_xassisted; ");
+         vex_printf("br x9 }");
+         return;
+      case ARM64in_CSel:
+         vex_printf("csel   ");
+         ppHRegARM64(i->ARM64in.CSel.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.CSel.argL);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.CSel.argR);
+         vex_printf(", %s", showARM64CondCode(i->ARM64in.CSel.cond));
+         return;
+      case ARM64in_Call:
+         vex_printf("call%s ",
+                    i->ARM64in.Call.cond==ARM64cc_AL
+                       ? "  " : showARM64CondCode(i->ARM64in.Call.cond));
+         vex_printf("0x%llx [nArgRegs=%d, ",
+                    i->ARM64in.Call.target, i->ARM64in.Call.nArgRegs);
+         ppRetLoc(i->ARM64in.Call.rloc);
+         vex_printf("]");
+         return;
+      case ARM64in_AddToSP: {
+         Int simm = i->ARM64in.AddToSP.simm;
+         vex_printf("%s    xsp, xsp, #%d", simm < 0 ? "sub" : "add", 
+                                           simm < 0 ? -simm : simm);
+         return;
+      }
+      case ARM64in_FromSP:
+         vex_printf("mov    ");
+         ppHRegARM64(i->ARM64in.FromSP.dst);
+         vex_printf(", xsp");
+         return;
+      case ARM64in_Mul:
+         vex_printf("%s  ", showARM64MulOp(i->ARM64in.Mul.op));
+         ppHRegARM64(i->ARM64in.Mul.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.Mul.argL);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.Mul.argR);
+         return;
+
+      case ARM64in_LdrEX: {
+         const HChar* sz = " ";
+         switch (i->ARM64in.LdrEX.szB) {
+            case 1: sz = "b"; break;
+            case 2: sz = "h"; break;
+            case 4: case 8: break;
+            default: vassert(0);
+         }
+         vex_printf("ldxr%s  %c2, [x4]",
+                    sz, i->ARM64in.LdrEX.szB == 8 ? 'x' : 'w');
+         return;
+      }
+      case ARM64in_StrEX: {
+         const HChar* sz = " ";
+         switch (i->ARM64in.StrEX.szB) {
+            case 1: sz = "b"; break;
+            case 2: sz = "h"; break;
+            case 4: case 8: break;
+            default: vassert(0);
+         }
+         vex_printf("stxr%s  w0, %c2, [x4]",
+                    sz, i->ARM64in.StrEX.szB == 8 ? 'x' : 'w');
+         return;
+      }
+      case ARM64in_MFence:
+         vex_printf("(mfence) dsb sy; dmb sy; isb");
+         return;
+      case ARM64in_VLdStH:
+         if (i->ARM64in.VLdStH.isLoad) {
+            vex_printf("ldr    ");
+            ppHRegARM64asHreg(i->ARM64in.VLdStH.hD);
+            vex_printf(", %u(", i->ARM64in.VLdStH.uimm12);
+            ppHRegARM64(i->ARM64in.VLdStH.rN);
+            vex_printf(")");
+         } else {
+            vex_printf("str    ");
+            vex_printf("%u(", i->ARM64in.VLdStH.uimm12);
+            ppHRegARM64(i->ARM64in.VLdStH.rN);
+            vex_printf("), ");
+            ppHRegARM64asHreg(i->ARM64in.VLdStH.hD);
+         }
+         return;
+      case ARM64in_VLdStS:
+         if (i->ARM64in.VLdStS.isLoad) {
+            vex_printf("ldr    ");
+            ppHRegARM64asSreg(i->ARM64in.VLdStS.sD);
+            vex_printf(", %u(", i->ARM64in.VLdStS.uimm12);
+            ppHRegARM64(i->ARM64in.VLdStS.rN);
+            vex_printf(")");
+         } else {
+            vex_printf("str    ");
+            vex_printf("%u(", i->ARM64in.VLdStS.uimm12);
+            ppHRegARM64(i->ARM64in.VLdStS.rN);
+            vex_printf("), ");
+            ppHRegARM64asSreg(i->ARM64in.VLdStS.sD);
+         }
+         return;
+      case ARM64in_VLdStD:
+         if (i->ARM64in.VLdStD.isLoad) {
+            vex_printf("ldr    ");
+            ppHRegARM64(i->ARM64in.VLdStD.dD);
+            vex_printf(", %u(", i->ARM64in.VLdStD.uimm12);
+            ppHRegARM64(i->ARM64in.VLdStD.rN);
+            vex_printf(")");
+         } else {
+            vex_printf("str    ");
+            vex_printf("%u(", i->ARM64in.VLdStD.uimm12);
+            ppHRegARM64(i->ARM64in.VLdStD.rN);
+            vex_printf("), ");
+            ppHRegARM64(i->ARM64in.VLdStD.dD);
+         }
+         return;
+      case ARM64in_VLdStQ:
+         if (i->ARM64in.VLdStQ.isLoad)
+            vex_printf("ld1.2d {");
+         else
+            vex_printf("st1.2d {");
+         ppHRegARM64(i->ARM64in.VLdStQ.rQ);
+         vex_printf("}, [");
+         ppHRegARM64(i->ARM64in.VLdStQ.rN);
+         vex_printf("]");
+         return;
+      case ARM64in_VCvtI2F: {
+         HChar syn  = '?';
+         UInt  fszB = 0;
+         UInt  iszB = 0;
+         characteriseARM64CvtOp(&syn, &fszB, &iszB, i->ARM64in.VCvtI2F.how);
+         vex_printf("%ccvtf  ", syn);
+         ppHRegARM64(i->ARM64in.VCvtI2F.rD);
+         vex_printf("(%c-reg), ", fszB == 4 ? 'S' : 'D');
+         ppHRegARM64(i->ARM64in.VCvtI2F.rS);
+         vex_printf("(%c-reg)", iszB == 4 ? 'W' : 'X');
+         return;
+      }
+      case ARM64in_VCvtF2I: {
+         HChar syn  = '?';
+         UInt  fszB = 0;
+         UInt  iszB = 0;
+         HChar rmo  = '?';
+         characteriseARM64CvtOp(&syn, &fszB, &iszB, i->ARM64in.VCvtF2I.how);
+         UChar armRM = i->ARM64in.VCvtF2I.armRM;
+         if (armRM < 4) rmo = "npmz"[armRM];
+         vex_printf("fcvt%c%c ", rmo, syn);
+         ppHRegARM64(i->ARM64in.VCvtF2I.rD);
+         vex_printf("(%c-reg), ", iszB == 4 ? 'W' : 'X');
+         ppHRegARM64(i->ARM64in.VCvtF2I.rS);
+         vex_printf("(%c-reg)", fszB == 4 ? 'S' : 'D');
+         return;
+      }
+      case ARM64in_VCvtSD:
+         vex_printf("fcvt%s ", i->ARM64in.VCvtSD.sToD ? "s2d" : "d2s");
+         if (i->ARM64in.VCvtSD.sToD) {
+            ppHRegARM64(i->ARM64in.VCvtSD.dst);
+            vex_printf(", ");
+            ppHRegARM64asSreg(i->ARM64in.VCvtSD.src);
+         } else {
+            ppHRegARM64asSreg(i->ARM64in.VCvtSD.dst);
+            vex_printf(", ");
+            ppHRegARM64(i->ARM64in.VCvtSD.src);
+         }
+         return;
+      case ARM64in_VCvtHS:
+         vex_printf("fcvt%s ", i->ARM64in.VCvtHS.hToS ? "h2s" : "s2h");
+         if (i->ARM64in.VCvtHS.hToS) {
+            ppHRegARM64asSreg(i->ARM64in.VCvtHS.dst);
+            vex_printf(", ");
+            ppHRegARM64asHreg(i->ARM64in.VCvtHS.src);
+         } else {
+            ppHRegARM64asHreg(i->ARM64in.VCvtHS.dst);
+            vex_printf(", ");
+            ppHRegARM64asSreg(i->ARM64in.VCvtHS.src);
+         }
+         return;
+      case ARM64in_VCvtHD:
+         vex_printf("fcvt%s ", i->ARM64in.VCvtHD.hToD ? "h2d" : "d2h");
+         if (i->ARM64in.VCvtHD.hToD) {
+            ppHRegARM64(i->ARM64in.VCvtHD.dst);
+            vex_printf(", ");
+            ppHRegARM64asHreg(i->ARM64in.VCvtHD.src);
+         } else {
+            ppHRegARM64asHreg(i->ARM64in.VCvtHD.dst);
+            vex_printf(", ");
+            ppHRegARM64(i->ARM64in.VCvtHD.src);
+         }
+         return;
+      case ARM64in_VUnaryD:
+         vex_printf("f%s ", showARM64FpUnaryOp(i->ARM64in.VUnaryD.op));
+         ppHRegARM64(i->ARM64in.VUnaryD.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VUnaryD.src);
+         return;
+      case ARM64in_VUnaryS:
+         vex_printf("f%s ", showARM64FpUnaryOp(i->ARM64in.VUnaryS.op));
+         ppHRegARM64asSreg(i->ARM64in.VUnaryS.dst);
+         vex_printf(", ");
+         ppHRegARM64asSreg(i->ARM64in.VUnaryS.src);
+         return;
+      case ARM64in_VBinD:
+         vex_printf("f%s   ", showARM64FpBinOp(i->ARM64in.VBinD.op));
+         ppHRegARM64(i->ARM64in.VBinD.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VBinD.argL);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VBinD.argR);
+         return;
+      case ARM64in_VBinS:
+         vex_printf("f%s   ", showARM64FpBinOp(i->ARM64in.VBinS.op));
+         ppHRegARM64asSreg(i->ARM64in.VBinS.dst);
+         vex_printf(", ");
+         ppHRegARM64asSreg(i->ARM64in.VBinS.argL);
+         vex_printf(", ");
+         ppHRegARM64asSreg(i->ARM64in.VBinS.argR);
+         return;
+      case ARM64in_VCmpD:
+         vex_printf("fcmp   ");
+         ppHRegARM64(i->ARM64in.VCmpD.argL);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VCmpD.argR);
+         return;
+      case ARM64in_VCmpS:
+         vex_printf("fcmp   ");
+         ppHRegARM64asSreg(i->ARM64in.VCmpS.argL);
+         vex_printf(", ");
+         ppHRegARM64asSreg(i->ARM64in.VCmpS.argR);
+         return;
+      case ARM64in_VFCSel: {
+         void (*ppHRegARM64fp)(HReg)
+            = (i->ARM64in.VFCSel.isD ? ppHRegARM64 : ppHRegARM64asSreg);
+         vex_printf("fcsel  ");
+         ppHRegARM64fp(i->ARM64in.VFCSel.dst);
+         vex_printf(", ");
+         ppHRegARM64fp(i->ARM64in.VFCSel.argL);
+         vex_printf(", ");
+         ppHRegARM64fp(i->ARM64in.VFCSel.argR);
+         vex_printf(", %s", showARM64CondCode(i->ARM64in.VFCSel.cond));
+         return;
+      }
+      case ARM64in_FPCR:
+         if (i->ARM64in.FPCR.toFPCR) {
+            vex_printf("msr    fpcr, ");
+            ppHRegARM64(i->ARM64in.FPCR.iReg);
+         } else {
+            vex_printf("mrs    ");
+            ppHRegARM64(i->ARM64in.FPCR.iReg);
+            vex_printf(", fpcr");
+         }
+         return;
+      case ARM64in_FPSR:
+         if (i->ARM64in.FPSR.toFPSR) {
+            vex_printf("msr    fpsr, ");
+            ppHRegARM64(i->ARM64in.FPSR.iReg);
+         } else {
+            vex_printf("mrs    ");
+            ppHRegARM64(i->ARM64in.FPSR.iReg);
+            vex_printf(", fpsr");
+         }
+         return;
+      case ARM64in_VBinV: {
+         const HChar* nm = "??";
+         const HChar* ar = "??";
+         showARM64VecBinOp(&nm, &ar, i->ARM64in.VBinV.op);
+         vex_printf("%s ", nm);
+         ppHRegARM64(i->ARM64in.VBinV.dst);
+         vex_printf(".%s, ", ar);
+         ppHRegARM64(i->ARM64in.VBinV.argL);
+         vex_printf(".%s, ", ar);
+         ppHRegARM64(i->ARM64in.VBinV.argR);
+         vex_printf(".%s", ar);
+         return;
+      }
+      case ARM64in_VModifyV: {
+         const HChar* nm = "??";
+         const HChar* ar = "??";
+         showARM64VecModifyOp(&nm, &ar, i->ARM64in.VModifyV.op);
+         vex_printf("%s ", nm);
+         ppHRegARM64(i->ARM64in.VModifyV.mod);
+         vex_printf(".%s, ", ar);
+         ppHRegARM64(i->ARM64in.VModifyV.arg);
+         vex_printf(".%s", ar);
+         return;
+      }
+      case ARM64in_VUnaryV: {
+         const HChar* nm = "??";
+         const HChar* ar = "??";
+         showARM64VecUnaryOp(&nm, &ar, i->ARM64in.VUnaryV.op);
+         vex_printf("%s  ", nm);
+         ppHRegARM64(i->ARM64in.VUnaryV.dst);
+         vex_printf(".%s, ", ar);
+         ppHRegARM64(i->ARM64in.VUnaryV.arg);
+         vex_printf(".%s", ar);
+         return;
+      }
+      case ARM64in_VNarrowV: {
+         UInt dszBlg2 = i->ARM64in.VNarrowV.dszBlg2;
+         const HChar* darr[3] = { "8b", "4h", "2s" };
+         const HChar* sarr[3] = { "8h", "4s", "2d" };
+         const HChar* nm = showARM64VecNarrowOp(i->ARM64in.VNarrowV.op);
+         vex_printf("%s ", nm);
+         ppHRegARM64(i->ARM64in.VNarrowV.dst);
+         vex_printf(".%s, ", dszBlg2 < 3 ? darr[dszBlg2] : "??");
+         ppHRegARM64(i->ARM64in.VNarrowV.src);
+         vex_printf(".%s", dszBlg2 < 3 ? sarr[dszBlg2] : "??");
+         return;
+      }
+      case ARM64in_VShiftImmV: {
+         const HChar* nm = "??";
+         const HChar* ar = "??";
+         showARM64VecShiftImmOp(&nm, &ar, i->ARM64in.VShiftImmV.op);
+         vex_printf("%s ", nm);
+         ppHRegARM64(i->ARM64in.VShiftImmV.dst);
+         vex_printf(".%s, ", ar);
+         ppHRegARM64(i->ARM64in.VShiftImmV.src);
+         vex_printf(".%s, #%u", ar, i->ARM64in.VShiftImmV.amt);
+         return;
+      }
+      case ARM64in_VExtV: {
+         vex_printf("ext    ");
+         ppHRegARM64(i->ARM64in.VExtV.dst);
+         vex_printf(".16b, ");
+         ppHRegARM64(i->ARM64in.VExtV.srcLo);
+         vex_printf(".16b, ");
+         ppHRegARM64(i->ARM64in.VExtV.srcHi);
+         vex_printf(".16b, #%u", i->ARM64in.VExtV.amtB);
+         return;
+      }
+      case ARM64in_VImmQ:
+         vex_printf("qimm   ");
+         ppHRegARM64(i->ARM64in.VImmQ.rQ);
+         vex_printf(", Bits16toBytes16(0x%x)", (UInt)i->ARM64in.VImmQ.imm);
+         return;
+      case ARM64in_VDfromX:
+         vex_printf("fmov   ");
+         ppHRegARM64(i->ARM64in.VDfromX.rD);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VDfromX.rX);
+         return;
+      case ARM64in_VQfromX:
+         vex_printf("fmov   ");
+         ppHRegARM64(i->ARM64in.VQfromX.rQ);
+         vex_printf(".d[0], ");
+         ppHRegARM64(i->ARM64in.VQfromX.rXlo);
+         return;
+      case ARM64in_VQfromXX:
+         vex_printf("qFromXX ");
+         ppHRegARM64(i->ARM64in.VQfromXX.rQ);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VQfromXX.rXhi);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VQfromXX.rXlo);
+         return;
+      case ARM64in_VXfromQ:
+         vex_printf("fmov   ");
+         ppHRegARM64(i->ARM64in.VXfromQ.rX);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VXfromQ.rQ);
+         vex_printf(".d[%u]", i->ARM64in.VXfromQ.laneNo);
+         return;
+      case ARM64in_VXfromDorS:
+         vex_printf("fmov   ");
+         ppHRegARM64(i->ARM64in.VXfromDorS.rX);
+         vex_printf("(%c-reg), ", i->ARM64in.VXfromDorS.fromD ? 'X':'W');
+         ppHRegARM64(i->ARM64in.VXfromDorS.rDorS);
+         vex_printf("(%c-reg)", i->ARM64in.VXfromDorS.fromD ? 'D' : 'S');
+         return;
+      case ARM64in_VMov: {
+         UChar aux = '?';
+         switch (i->ARM64in.VMov.szB) {
+            case 16: aux = 'q'; break;
+            case 8:  aux = 'd'; break;
+            case 4:  aux = 's'; break;
+            default: break;
+         }
+         vex_printf("mov(%c) ", aux);
+         ppHRegARM64(i->ARM64in.VMov.dst);
+         vex_printf(", ");
+         ppHRegARM64(i->ARM64in.VMov.src);
+         return;
+      }
+      case ARM64in_EvCheck:
+         vex_printf("(evCheck) ldr w9,");
+         ppARM64AMode(i->ARM64in.EvCheck.amCounter);
+         vex_printf("; subs w9,w9,$1; str w9,");
+         ppARM64AMode(i->ARM64in.EvCheck.amCounter);
+         vex_printf("; bpl nofail; ldr x9,");
+         ppARM64AMode(i->ARM64in.EvCheck.amFailAddr);
+         vex_printf("; br x9; nofail:");
+         return;
+      case ARM64in_ProfInc:
+         vex_printf("(profInc) imm64-fixed4 x9,$NotKnownYet; "
+                    "ldr x8,[x9]; add x8,x8,#1, str x8,[x9]");
+         return;
+      default:
+         vex_printf("ppARM64Instr: unhandled case (tag %d)", (Int)i->tag);
+         vpanic("ppARM64Instr(1)");
+         return;
+   }
+}
+
+
+/* --------- Helpers for register allocation. --------- */
+
+void getRegUsage_ARM64Instr ( HRegUsage* u, const ARM64Instr* i, Bool mode64 )
+{
+   vassert(mode64 == True);
+   initHRegUsage(u);
+   switch (i->tag) {
+      case ARM64in_Arith:
+         addHRegUse(u, HRmWrite, i->ARM64in.Arith.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.Arith.argL);
+         addRegUsage_ARM64RIA(u, i->ARM64in.Arith.argR);
+         return;
+      case ARM64in_Cmp:
+         addHRegUse(u, HRmRead, i->ARM64in.Cmp.argL);
+         addRegUsage_ARM64RIA(u, i->ARM64in.Cmp.argR);
+         return;
+      case ARM64in_Logic:
+         addHRegUse(u, HRmWrite, i->ARM64in.Logic.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.Logic.argL);
+         addRegUsage_ARM64RIL(u, i->ARM64in.Logic.argR);
+         return;
+      case ARM64in_Test:
+         addHRegUse(u, HRmRead, i->ARM64in.Test.argL);
+         addRegUsage_ARM64RIL(u, i->ARM64in.Test.argR);
+         return;
+      case ARM64in_Shift:
+         addHRegUse(u, HRmWrite, i->ARM64in.Shift.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.Shift.argL);
+         addRegUsage_ARM64RI6(u, i->ARM64in.Shift.argR);
+         return;
+      case ARM64in_Unary:
+         addHRegUse(u, HRmWrite, i->ARM64in.Unary.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.Unary.src);
+         return;
+      case ARM64in_MovI:
+         addHRegUse(u, HRmWrite, i->ARM64in.MovI.dst);
+         addHRegUse(u, HRmRead,  i->ARM64in.MovI.src);
+         return;
+      case ARM64in_Imm64:
+         addHRegUse(u, HRmWrite, i->ARM64in.Imm64.dst);
+         return;
+      case ARM64in_LdSt64:
+         addRegUsage_ARM64AMode(u, i->ARM64in.LdSt64.amode);
+         if (i->ARM64in.LdSt64.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARM64in.LdSt64.rD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARM64in.LdSt64.rD);
+         }
+         return;
+      case ARM64in_LdSt32:
+         addRegUsage_ARM64AMode(u, i->ARM64in.LdSt32.amode);
+         if (i->ARM64in.LdSt32.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARM64in.LdSt32.rD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARM64in.LdSt32.rD);
+         }
+         return;
+      case ARM64in_LdSt16:
+         addRegUsage_ARM64AMode(u, i->ARM64in.LdSt16.amode);
+         if (i->ARM64in.LdSt16.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARM64in.LdSt16.rD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARM64in.LdSt16.rD);
+         }
+         return;
+      case ARM64in_LdSt8:
+         addRegUsage_ARM64AMode(u, i->ARM64in.LdSt8.amode);
+         if (i->ARM64in.LdSt8.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARM64in.LdSt8.rD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARM64in.LdSt8.rD);
+         }
+         return;
+      /* XDirect/XIndir/XAssisted are also a bit subtle.  They
+         conditionally exit the block.  Hence we only need to list (1)
+         the registers that they read, and (2) the registers that they
+         write in the case where the block is not exited.  (2) is
+         empty, hence only (1) is relevant here. */
+      case ARM64in_XDirect:
+         addRegUsage_ARM64AMode(u, i->ARM64in.XDirect.amPC);
+         return;
+      case ARM64in_XIndir:
+         addHRegUse(u, HRmRead, i->ARM64in.XIndir.dstGA);
+         addRegUsage_ARM64AMode(u, i->ARM64in.XIndir.amPC);
+         return;
+      case ARM64in_XAssisted:
+         addHRegUse(u, HRmRead, i->ARM64in.XAssisted.dstGA);
+         addRegUsage_ARM64AMode(u, i->ARM64in.XAssisted.amPC);
+         return;
+      case ARM64in_CSel:
+         addHRegUse(u, HRmWrite, i->ARM64in.CSel.dst);
+         addHRegUse(u, HRmRead,  i->ARM64in.CSel.argL);
+         addHRegUse(u, HRmRead,  i->ARM64in.CSel.argR);
+         return;
+      case ARM64in_Call:
+         /* logic and comments copied/modified from x86 back end */
+         /* This is a bit subtle. */
+         /* First off, claim it trashes all the caller-saved regs
+            which fall within the register allocator's jurisdiction.
+            These I believe to be x0 to x7 and the 128-bit vector
+            registers in use, q16 .. q20. */
+         addHRegUse(u, HRmWrite, hregARM64_X0());
+         addHRegUse(u, HRmWrite, hregARM64_X1());
+         addHRegUse(u, HRmWrite, hregARM64_X2());
+         addHRegUse(u, HRmWrite, hregARM64_X3());
+         addHRegUse(u, HRmWrite, hregARM64_X4());
+         addHRegUse(u, HRmWrite, hregARM64_X5());
+         addHRegUse(u, HRmWrite, hregARM64_X6());
+         addHRegUse(u, HRmWrite, hregARM64_X7());
+         addHRegUse(u, HRmWrite, hregARM64_Q16());
+         addHRegUse(u, HRmWrite, hregARM64_Q17());
+         addHRegUse(u, HRmWrite, hregARM64_Q18());
+         addHRegUse(u, HRmWrite, hregARM64_Q19());
+         addHRegUse(u, HRmWrite, hregARM64_Q20());
+         /* Now we have to state any parameter-carrying registers
+            which might be read.  This depends on nArgRegs. */
+            switch (i->ARM64in.Call.nArgRegs) {
+            case 8: addHRegUse(u, HRmRead, hregARM64_X7()); /*fallthru*/
+            case 7: addHRegUse(u, HRmRead, hregARM64_X6()); /*fallthru*/
+            case 6: addHRegUse(u, HRmRead, hregARM64_X5()); /*fallthru*/
+            case 5: addHRegUse(u, HRmRead, hregARM64_X4()); /*fallthru*/
+            case 4: addHRegUse(u, HRmRead, hregARM64_X3()); /*fallthru*/
+            case 3: addHRegUse(u, HRmRead, hregARM64_X2()); /*fallthru*/
+            case 2: addHRegUse(u, HRmRead, hregARM64_X1()); /*fallthru*/
+            case 1: addHRegUse(u, HRmRead, hregARM64_X0()); break;
+            case 0: break;
+            default: vpanic("getRegUsage_ARM64:Call:regparms");
+         }
+         /* Finally, there is the issue that the insn trashes a
+            register because the literal target address has to be
+            loaded into a register.  However, we reserve x9 for that
+            purpose so there's no further complexity here.  Stating x9
+            as trashed is pointless since it's not under the control
+            of the allocator, but what the hell. */
+         addHRegUse(u, HRmWrite, hregARM64_X9());
+         return;
+      case ARM64in_AddToSP:
+         /* Only changes SP, but regalloc doesn't control that, hence
+            we don't care. */
+         return;
+      case ARM64in_FromSP:
+         addHRegUse(u, HRmWrite, i->ARM64in.FromSP.dst);
+         return;
+      case ARM64in_Mul:
+         addHRegUse(u, HRmWrite, i->ARM64in.Mul.dst);
+         addHRegUse(u, HRmRead,  i->ARM64in.Mul.argL);
+         addHRegUse(u, HRmRead,  i->ARM64in.Mul.argR);
+         return;
+      case ARM64in_LdrEX:
+         addHRegUse(u, HRmRead, hregARM64_X4());
+         addHRegUse(u, HRmWrite, hregARM64_X2());
+         return;
+      case ARM64in_StrEX:
+         addHRegUse(u, HRmRead, hregARM64_X4());
+         addHRegUse(u, HRmWrite, hregARM64_X0());
+         addHRegUse(u, HRmRead, hregARM64_X2());
+         return;
+      case ARM64in_MFence:
+         return;
+      case ARM64in_VLdStH:
+         addHRegUse(u, HRmRead, i->ARM64in.VLdStH.rN);
+         if (i->ARM64in.VLdStH.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARM64in.VLdStH.hD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARM64in.VLdStH.hD);
+         }
+         return;
+      case ARM64in_VLdStS:
+         addHRegUse(u, HRmRead, i->ARM64in.VLdStS.rN);
+         if (i->ARM64in.VLdStS.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARM64in.VLdStS.sD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARM64in.VLdStS.sD);
+         }
+         return;
+      case ARM64in_VLdStD:
+         addHRegUse(u, HRmRead, i->ARM64in.VLdStD.rN);
+         if (i->ARM64in.VLdStD.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARM64in.VLdStD.dD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARM64in.VLdStD.dD);
+         }
+         return;
+      case ARM64in_VLdStQ:
+         addHRegUse(u, HRmRead, i->ARM64in.VLdStQ.rN);
+         if (i->ARM64in.VLdStQ.isLoad)
+            addHRegUse(u, HRmWrite, i->ARM64in.VLdStQ.rQ);
+         else
+            addHRegUse(u, HRmRead, i->ARM64in.VLdStQ.rQ);
+         return;
+      case ARM64in_VCvtI2F:
+         addHRegUse(u, HRmRead, i->ARM64in.VCvtI2F.rS);
+         addHRegUse(u, HRmWrite, i->ARM64in.VCvtI2F.rD);
+         return;
+      case ARM64in_VCvtF2I:
+         addHRegUse(u, HRmRead, i->ARM64in.VCvtF2I.rS);
+         addHRegUse(u, HRmWrite, i->ARM64in.VCvtF2I.rD);
+         return;
+      case ARM64in_VCvtSD:
+         addHRegUse(u, HRmWrite, i->ARM64in.VCvtSD.dst);
+         addHRegUse(u, HRmRead,  i->ARM64in.VCvtSD.src);
+         return;
+      case ARM64in_VCvtHS:
+         addHRegUse(u, HRmWrite, i->ARM64in.VCvtHS.dst);
+         addHRegUse(u, HRmRead,  i->ARM64in.VCvtHS.src);
+         return;
+      case ARM64in_VCvtHD:
+         addHRegUse(u, HRmWrite, i->ARM64in.VCvtHD.dst);
+         addHRegUse(u, HRmRead,  i->ARM64in.VCvtHD.src);
+         return;
+      case ARM64in_VUnaryD:
+         addHRegUse(u, HRmWrite, i->ARM64in.VUnaryD.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VUnaryD.src);
+         return;
+      case ARM64in_VUnaryS:
+         addHRegUse(u, HRmWrite, i->ARM64in.VUnaryS.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VUnaryS.src);
+         return;
+      case ARM64in_VBinD:
+         addHRegUse(u, HRmWrite, i->ARM64in.VBinD.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VBinD.argL);
+         addHRegUse(u, HRmRead, i->ARM64in.VBinD.argR);
+         return;
+      case ARM64in_VBinS:
+         addHRegUse(u, HRmWrite, i->ARM64in.VBinS.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VBinS.argL);
+         addHRegUse(u, HRmRead, i->ARM64in.VBinS.argR);
+         return;
+      case ARM64in_VCmpD:
+         addHRegUse(u, HRmRead, i->ARM64in.VCmpD.argL);
+         addHRegUse(u, HRmRead, i->ARM64in.VCmpD.argR);
+         return;
+      case ARM64in_VCmpS:
+         addHRegUse(u, HRmRead, i->ARM64in.VCmpS.argL);
+         addHRegUse(u, HRmRead, i->ARM64in.VCmpS.argR);
+         return;
+      case ARM64in_VFCSel:
+         addHRegUse(u, HRmRead, i->ARM64in.VFCSel.argL);
+         addHRegUse(u, HRmRead, i->ARM64in.VFCSel.argR);
+         addHRegUse(u, HRmWrite, i->ARM64in.VFCSel.dst);
+         return;
+      case ARM64in_FPCR:
+         if (i->ARM64in.FPCR.toFPCR)
+            addHRegUse(u, HRmRead, i->ARM64in.FPCR.iReg);
+         else
+            addHRegUse(u, HRmWrite, i->ARM64in.FPCR.iReg);
+         return;
+      case ARM64in_FPSR:
+         if (i->ARM64in.FPSR.toFPSR)
+            addHRegUse(u, HRmRead, i->ARM64in.FPSR.iReg);
+         else
+            addHRegUse(u, HRmWrite, i->ARM64in.FPSR.iReg);
+         return;
+      case ARM64in_VBinV:
+         addHRegUse(u, HRmWrite, i->ARM64in.VBinV.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VBinV.argL);
+         addHRegUse(u, HRmRead, i->ARM64in.VBinV.argR);
+         return;
+      case ARM64in_VModifyV:
+         addHRegUse(u, HRmWrite, i->ARM64in.VModifyV.mod);
+         addHRegUse(u, HRmRead, i->ARM64in.VModifyV.mod);
+         addHRegUse(u, HRmRead, i->ARM64in.VModifyV.arg);
+         return;
+      case ARM64in_VUnaryV:
+         addHRegUse(u, HRmWrite, i->ARM64in.VUnaryV.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VUnaryV.arg);
+         return;
+      case ARM64in_VNarrowV:
+         addHRegUse(u, HRmWrite, i->ARM64in.VNarrowV.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VNarrowV.src);
+         return;
+      case ARM64in_VShiftImmV:
+         addHRegUse(u, HRmWrite, i->ARM64in.VShiftImmV.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VShiftImmV.src);
+         return;
+      case ARM64in_VExtV:
+         addHRegUse(u, HRmWrite, i->ARM64in.VExtV.dst);
+         addHRegUse(u, HRmRead, i->ARM64in.VExtV.srcLo);
+         addHRegUse(u, HRmRead, i->ARM64in.VExtV.srcHi);
+         return;
+      case ARM64in_VImmQ:
+         addHRegUse(u, HRmWrite, i->ARM64in.VImmQ.rQ);
+         return;
+      case ARM64in_VDfromX:
+         addHRegUse(u, HRmWrite, i->ARM64in.VDfromX.rD);
+         addHRegUse(u, HRmRead,  i->ARM64in.VDfromX.rX);
+         return;
+      case ARM64in_VQfromX:
+         addHRegUse(u, HRmWrite, i->ARM64in.VQfromX.rQ);
+         addHRegUse(u, HRmRead,  i->ARM64in.VQfromX.rXlo);
+         return;
+      case ARM64in_VQfromXX:
+         addHRegUse(u, HRmWrite, i->ARM64in.VQfromXX.rQ);
+         addHRegUse(u, HRmRead,  i->ARM64in.VQfromXX.rXhi);
+         addHRegUse(u, HRmRead,  i->ARM64in.VQfromXX.rXlo);
+         return;
+      case ARM64in_VXfromQ:
+         addHRegUse(u, HRmWrite, i->ARM64in.VXfromQ.rX);
+         addHRegUse(u, HRmRead,  i->ARM64in.VXfromQ.rQ);
+         return;
+      case ARM64in_VXfromDorS:
+         addHRegUse(u, HRmWrite, i->ARM64in.VXfromDorS.rX);
+         addHRegUse(u, HRmRead,  i->ARM64in.VXfromDorS.rDorS);
+         return;
+      case ARM64in_VMov:
+         addHRegUse(u, HRmWrite, i->ARM64in.VMov.dst);
+         addHRegUse(u, HRmRead,  i->ARM64in.VMov.src);
+         return;
+      case ARM64in_EvCheck:
+         /* We expect both amodes only to mention x21, so this is in
+            fact pointless, since x21 isn't allocatable, but
+            anyway.. */
+         addRegUsage_ARM64AMode(u, i->ARM64in.EvCheck.amCounter);
+         addRegUsage_ARM64AMode(u, i->ARM64in.EvCheck.amFailAddr);
+         addHRegUse(u, HRmWrite, hregARM64_X9()); /* also unavail to RA */
+         return;
+      case ARM64in_ProfInc:
+         /* Again, pointless to actually state these since neither
+            is available to RA. */
+         addHRegUse(u, HRmWrite, hregARM64_X9()); /* unavail to RA */
+         addHRegUse(u, HRmWrite, hregARM64_X8()); /* unavail to RA */
+         return;
+      default:
+         ppARM64Instr(i);
+         vpanic("getRegUsage_ARM64Instr");
+   }
+}
+
+
+void mapRegs_ARM64Instr ( HRegRemap* m, ARM64Instr* i, Bool mode64 )
+{
+   vassert(mode64 == True);
+   switch (i->tag) {
+      case ARM64in_Arith:
+         i->ARM64in.Arith.dst = lookupHRegRemap(m, i->ARM64in.Arith.dst);
+         i->ARM64in.Arith.argL = lookupHRegRemap(m, i->ARM64in.Arith.argL);
+         mapRegs_ARM64RIA(m, i->ARM64in.Arith.argR);
+         return;
+      case ARM64in_Cmp:
+         i->ARM64in.Cmp.argL = lookupHRegRemap(m, i->ARM64in.Cmp.argL);
+         mapRegs_ARM64RIA(m, i->ARM64in.Cmp.argR);
+         return;
+      case ARM64in_Logic:
+         i->ARM64in.Logic.dst = lookupHRegRemap(m, i->ARM64in.Logic.dst);
+         i->ARM64in.Logic.argL = lookupHRegRemap(m, i->ARM64in.Logic.argL);
+         mapRegs_ARM64RIL(m, i->ARM64in.Logic.argR);
+         return;
+      case ARM64in_Test:
+         i->ARM64in.Test.argL = lookupHRegRemap(m, i->ARM64in.Test.argL);
+         mapRegs_ARM64RIL(m, i->ARM64in.Logic.argR);
+         return;
+      case ARM64in_Shift:
+         i->ARM64in.Shift.dst = lookupHRegRemap(m, i->ARM64in.Shift.dst);
+         i->ARM64in.Shift.argL = lookupHRegRemap(m, i->ARM64in.Shift.argL);
+         mapRegs_ARM64RI6(m, i->ARM64in.Shift.argR);
+         return;
+      case ARM64in_Unary:
+         i->ARM64in.Unary.dst = lookupHRegRemap(m, i->ARM64in.Unary.dst);
+         i->ARM64in.Unary.src = lookupHRegRemap(m, i->ARM64in.Unary.src);
+         return;
+      case ARM64in_MovI:
+         i->ARM64in.MovI.dst = lookupHRegRemap(m, i->ARM64in.MovI.dst);
+         i->ARM64in.MovI.src = lookupHRegRemap(m, i->ARM64in.MovI.src);
+         return;
+      case ARM64in_Imm64:
+         i->ARM64in.Imm64.dst = lookupHRegRemap(m, i->ARM64in.Imm64.dst);
+         return;
+      case ARM64in_LdSt64:
+         i->ARM64in.LdSt64.rD = lookupHRegRemap(m, i->ARM64in.LdSt64.rD);
+         mapRegs_ARM64AMode(m, i->ARM64in.LdSt64.amode);
+         return;
+      case ARM64in_LdSt32:
+         i->ARM64in.LdSt32.rD = lookupHRegRemap(m, i->ARM64in.LdSt32.rD);
+         mapRegs_ARM64AMode(m, i->ARM64in.LdSt32.amode);
+         return;
+      case ARM64in_LdSt16:
+         i->ARM64in.LdSt16.rD = lookupHRegRemap(m, i->ARM64in.LdSt16.rD);
+         mapRegs_ARM64AMode(m, i->ARM64in.LdSt16.amode);
+         return;
+      case ARM64in_LdSt8:
+         i->ARM64in.LdSt8.rD = lookupHRegRemap(m, i->ARM64in.LdSt8.rD);
+         mapRegs_ARM64AMode(m, i->ARM64in.LdSt8.amode);
+         return;
+      case ARM64in_XDirect:
+         mapRegs_ARM64AMode(m, i->ARM64in.XDirect.amPC);
+         return;
+      case ARM64in_XIndir:
+         i->ARM64in.XIndir.dstGA
+            = lookupHRegRemap(m, i->ARM64in.XIndir.dstGA);
+         mapRegs_ARM64AMode(m, i->ARM64in.XIndir.amPC);
+         return;
+      case ARM64in_XAssisted:
+         i->ARM64in.XAssisted.dstGA
+            = lookupHRegRemap(m, i->ARM64in.XAssisted.dstGA);
+         mapRegs_ARM64AMode(m, i->ARM64in.XAssisted.amPC);
+         return;
+      case ARM64in_CSel:
+         i->ARM64in.CSel.dst  = lookupHRegRemap(m, i->ARM64in.CSel.dst);
+         i->ARM64in.CSel.argL = lookupHRegRemap(m, i->ARM64in.CSel.argL);
+         i->ARM64in.CSel.argR = lookupHRegRemap(m, i->ARM64in.CSel.argR);
+         return;
+      case ARM64in_Call:
+         return;
+      case ARM64in_AddToSP:
+         return;
+      case ARM64in_FromSP:
+         i->ARM64in.FromSP.dst = lookupHRegRemap(m, i->ARM64in.FromSP.dst);
+         return;
+      case ARM64in_Mul:
+         i->ARM64in.Mul.dst  = lookupHRegRemap(m, i->ARM64in.Mul.dst);
+         i->ARM64in.Mul.argL = lookupHRegRemap(m, i->ARM64in.Mul.argL);
+         i->ARM64in.Mul.argR = lookupHRegRemap(m, i->ARM64in.Mul.argR);
+         break;
+      case ARM64in_LdrEX:
+         return;
+      case ARM64in_StrEX:
+         return;
+      case ARM64in_MFence:
+         return;
+      case ARM64in_VLdStH:
+         i->ARM64in.VLdStH.hD = lookupHRegRemap(m, i->ARM64in.VLdStH.hD);
+         i->ARM64in.VLdStH.rN = lookupHRegRemap(m, i->ARM64in.VLdStH.rN);
+         return;
+      case ARM64in_VLdStS:
+         i->ARM64in.VLdStS.sD = lookupHRegRemap(m, i->ARM64in.VLdStS.sD);
+         i->ARM64in.VLdStS.rN = lookupHRegRemap(m, i->ARM64in.VLdStS.rN);
+         return;
+      case ARM64in_VLdStD:
+         i->ARM64in.VLdStD.dD = lookupHRegRemap(m, i->ARM64in.VLdStD.dD);
+         i->ARM64in.VLdStD.rN = lookupHRegRemap(m, i->ARM64in.VLdStD.rN);
+         return;
+      case ARM64in_VLdStQ:
+         i->ARM64in.VLdStQ.rQ = lookupHRegRemap(m, i->ARM64in.VLdStQ.rQ);
+         i->ARM64in.VLdStQ.rN = lookupHRegRemap(m, i->ARM64in.VLdStQ.rN);
+         return;
+      case ARM64in_VCvtI2F:
+         i->ARM64in.VCvtI2F.rS = lookupHRegRemap(m, i->ARM64in.VCvtI2F.rS);
+         i->ARM64in.VCvtI2F.rD = lookupHRegRemap(m, i->ARM64in.VCvtI2F.rD);
+         return;
+      case ARM64in_VCvtF2I:
+         i->ARM64in.VCvtF2I.rS = lookupHRegRemap(m, i->ARM64in.VCvtF2I.rS);
+         i->ARM64in.VCvtF2I.rD = lookupHRegRemap(m, i->ARM64in.VCvtF2I.rD);
+         return;
+      case ARM64in_VCvtSD:
+         i->ARM64in.VCvtSD.dst = lookupHRegRemap(m, i->ARM64in.VCvtSD.dst);
+         i->ARM64in.VCvtSD.src = lookupHRegRemap(m, i->ARM64in.VCvtSD.src);
+         return;
+      case ARM64in_VCvtHS:
+         i->ARM64in.VCvtHS.dst = lookupHRegRemap(m, i->ARM64in.VCvtHS.dst);
+         i->ARM64in.VCvtHS.src = lookupHRegRemap(m, i->ARM64in.VCvtHS.src);
+         return;
+      case ARM64in_VCvtHD:
+         i->ARM64in.VCvtHD.dst = lookupHRegRemap(m, i->ARM64in.VCvtHD.dst);
+         i->ARM64in.VCvtHD.src = lookupHRegRemap(m, i->ARM64in.VCvtHD.src);
+         return;
+      case ARM64in_VUnaryD:
+         i->ARM64in.VUnaryD.dst = lookupHRegRemap(m, i->ARM64in.VUnaryD.dst);
+         i->ARM64in.VUnaryD.src = lookupHRegRemap(m, i->ARM64in.VUnaryD.src);
+         return;
+      case ARM64in_VUnaryS:
+         i->ARM64in.VUnaryS.dst = lookupHRegRemap(m, i->ARM64in.VUnaryS.dst);
+         i->ARM64in.VUnaryS.src = lookupHRegRemap(m, i->ARM64in.VUnaryS.src);
+         return;
+      case ARM64in_VBinD:
+         i->ARM64in.VBinD.dst  = lookupHRegRemap(m, i->ARM64in.VBinD.dst);
+         i->ARM64in.VBinD.argL = lookupHRegRemap(m, i->ARM64in.VBinD.argL);
+         i->ARM64in.VBinD.argR = lookupHRegRemap(m, i->ARM64in.VBinD.argR);
+         return;
+      case ARM64in_VBinS:
+         i->ARM64in.VBinS.dst  = lookupHRegRemap(m, i->ARM64in.VBinS.dst);
+         i->ARM64in.VBinS.argL = lookupHRegRemap(m, i->ARM64in.VBinS.argL);
+         i->ARM64in.VBinS.argR = lookupHRegRemap(m, i->ARM64in.VBinS.argR);
+         return;
+      case ARM64in_VCmpD:
+         i->ARM64in.VCmpD.argL = lookupHRegRemap(m, i->ARM64in.VCmpD.argL);
+         i->ARM64in.VCmpD.argR = lookupHRegRemap(m, i->ARM64in.VCmpD.argR);
+         return;
+      case ARM64in_VCmpS:
+         i->ARM64in.VCmpS.argL = lookupHRegRemap(m, i->ARM64in.VCmpS.argL);
+         i->ARM64in.VCmpS.argR = lookupHRegRemap(m, i->ARM64in.VCmpS.argR);
+         return;
+      case ARM64in_VFCSel:
+         i->ARM64in.VFCSel.argL = lookupHRegRemap(m, i->ARM64in.VFCSel.argL);
+         i->ARM64in.VFCSel.argR = lookupHRegRemap(m, i->ARM64in.VFCSel.argR);
+         i->ARM64in.VFCSel.dst  = lookupHRegRemap(m, i->ARM64in.VFCSel.dst);
+         return;
+      case ARM64in_FPCR:
+         i->ARM64in.FPCR.iReg = lookupHRegRemap(m, i->ARM64in.FPCR.iReg);
+         return;
+      case ARM64in_FPSR:
+         i->ARM64in.FPSR.iReg = lookupHRegRemap(m, i->ARM64in.FPSR.iReg);
+         return;
+      case ARM64in_VBinV:
+         i->ARM64in.VBinV.dst  = lookupHRegRemap(m, i->ARM64in.VBinV.dst);
+         i->ARM64in.VBinV.argL = lookupHRegRemap(m, i->ARM64in.VBinV.argL);
+         i->ARM64in.VBinV.argR = lookupHRegRemap(m, i->ARM64in.VBinV.argR);
+         return;
+      case ARM64in_VModifyV:
+         i->ARM64in.VModifyV.mod = lookupHRegRemap(m, i->ARM64in.VModifyV.mod);
+         i->ARM64in.VModifyV.arg = lookupHRegRemap(m, i->ARM64in.VModifyV.arg);
+         return;
+      case ARM64in_VUnaryV:
+         i->ARM64in.VUnaryV.dst = lookupHRegRemap(m, i->ARM64in.VUnaryV.dst);
+         i->ARM64in.VUnaryV.arg = lookupHRegRemap(m, i->ARM64in.VUnaryV.arg);
+         return;
+      case ARM64in_VNarrowV:
+         i->ARM64in.VNarrowV.dst = lookupHRegRemap(m, i->ARM64in.VNarrowV.dst);
+         i->ARM64in.VNarrowV.src = lookupHRegRemap(m, i->ARM64in.VNarrowV.src);
+         return;
+      case ARM64in_VShiftImmV:
+         i->ARM64in.VShiftImmV.dst
+            = lookupHRegRemap(m, i->ARM64in.VShiftImmV.dst);
+         i->ARM64in.VShiftImmV.src
+            = lookupHRegRemap(m, i->ARM64in.VShiftImmV.src);
+         return;
+      case ARM64in_VExtV:
+         i->ARM64in.VExtV.dst = lookupHRegRemap(m, i->ARM64in.VExtV.dst);
+         i->ARM64in.VExtV.srcLo = lookupHRegRemap(m, i->ARM64in.VExtV.srcLo);
+         i->ARM64in.VExtV.srcHi = lookupHRegRemap(m, i->ARM64in.VExtV.srcHi);
+         return;
+      case ARM64in_VImmQ:
+         i->ARM64in.VImmQ.rQ = lookupHRegRemap(m, i->ARM64in.VImmQ.rQ);
+         return;
+      case ARM64in_VDfromX:
+         i->ARM64in.VDfromX.rD
+            = lookupHRegRemap(m, i->ARM64in.VDfromX.rD);
+         i->ARM64in.VDfromX.rX
+            = lookupHRegRemap(m, i->ARM64in.VDfromX.rX);
+         return;
+      case ARM64in_VQfromX:
+         i->ARM64in.VQfromX.rQ
+            = lookupHRegRemap(m, i->ARM64in.VQfromX.rQ);
+         i->ARM64in.VQfromX.rXlo
+            = lookupHRegRemap(m, i->ARM64in.VQfromX.rXlo);
+         return;
+      case ARM64in_VQfromXX:
+         i->ARM64in.VQfromXX.rQ
+            = lookupHRegRemap(m, i->ARM64in.VQfromXX.rQ);
+         i->ARM64in.VQfromXX.rXhi
+            = lookupHRegRemap(m, i->ARM64in.VQfromXX.rXhi);
+         i->ARM64in.VQfromXX.rXlo
+            = lookupHRegRemap(m, i->ARM64in.VQfromXX.rXlo);
+         return;
+      case ARM64in_VXfromQ:
+         i->ARM64in.VXfromQ.rX
+            = lookupHRegRemap(m, i->ARM64in.VXfromQ.rX);
+         i->ARM64in.VXfromQ.rQ
+            = lookupHRegRemap(m, i->ARM64in.VXfromQ.rQ);
+         return;
+      case ARM64in_VXfromDorS:
+         i->ARM64in.VXfromDorS.rX
+            = lookupHRegRemap(m, i->ARM64in.VXfromDorS.rX);
+         i->ARM64in.VXfromDorS.rDorS
+            = lookupHRegRemap(m, i->ARM64in.VXfromDorS.rDorS);
+         return;
+      case ARM64in_VMov:
+         i->ARM64in.VMov.dst = lookupHRegRemap(m, i->ARM64in.VMov.dst);
+         i->ARM64in.VMov.src = lookupHRegRemap(m, i->ARM64in.VMov.src);
+         return;
+      case ARM64in_EvCheck:
+         /* We expect both amodes only to mention x21, so this is in
+            fact pointless, since x21 isn't allocatable, but
+            anyway.. */
+         mapRegs_ARM64AMode(m, i->ARM64in.EvCheck.amCounter);
+         mapRegs_ARM64AMode(m, i->ARM64in.EvCheck.amFailAddr);
+         return;
+      case ARM64in_ProfInc:
+         /* hardwires x8 and x9 -- nothing to modify. */
+         return;
+      default:
+         ppARM64Instr(i);
+         vpanic("mapRegs_ARM64Instr");
+   }
+}
+
+/* Figure out if i represents a reg-reg move, and if so assign the
+   source and destination to *src and *dst.  If in doubt say No.  Used
+   by the register allocator to do move coalescing. 
+*/
+Bool isMove_ARM64Instr ( const ARM64Instr* i, HReg* src, HReg* dst )
+{
+   switch (i->tag) {
+      case ARM64in_MovI:
+         *src = i->ARM64in.MovI.src;
+         *dst = i->ARM64in.MovI.dst;
+         return True;
+      case ARM64in_VMov:
+         *src = i->ARM64in.VMov.src;
+         *dst = i->ARM64in.VMov.dst;
+         return True;
+      default:
+         break;
+   }
+
+   return False;
+}
+
+
+/* Generate arm spill/reload instructions under the direction of the
+   register allocator.  Note it's critical these don't write the
+   condition codes. */
+
+void genSpill_ARM64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                      HReg rreg, Int offsetB, Bool mode64 )
+{
+   HRegClass rclass;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == True);
+   *i1 = *i2 = NULL;
+   rclass = hregClass(rreg);
+   switch (rclass) {
+      case HRcInt64:
+         vassert(0 == (offsetB & 7));
+         offsetB >>= 3;
+         vassert(offsetB < 4096);
+         *i1 = ARM64Instr_LdSt64(
+                  False/*!isLoad*/, 
+                  rreg, 
+                  ARM64AMode_RI12(hregARM64_X21(), offsetB, 8)
+               );
+         return;
+      case HRcFlt64:
+         vassert(0 == (offsetB & 7));
+         vassert(offsetB >= 0 && offsetB < 32768);
+         *i1 = ARM64Instr_VLdStD(False/*!isLoad*/,
+                                 rreg, hregARM64_X21(), offsetB);
+         return;
+      case HRcVec128: {
+         HReg x21  = hregARM64_X21();  // baseblock
+         HReg x9   = hregARM64_X9();   // spill temporary
+         vassert(0 == (offsetB & 15)); // check sane alignment
+         vassert(offsetB < 4096);
+         *i1 = ARM64Instr_Arith(x9, x21, ARM64RIA_I12(offsetB, 0), True);
+         *i2 = ARM64Instr_VLdStQ(False/*!isLoad*/, rreg, x9);
+         return;
+      }
+      default:
+         ppHRegClass(rclass);
+         vpanic("genSpill_ARM: unimplemented regclass");
+   }
+}
+
+void genReload_ARM64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                       HReg rreg, Int offsetB, Bool mode64 )
+{
+   HRegClass rclass;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == True);
+   *i1 = *i2 = NULL;
+   rclass = hregClass(rreg);
+   switch (rclass) {
+      case HRcInt64:
+         vassert(0 == (offsetB & 7));
+         offsetB >>= 3;
+         vassert(offsetB < 4096);
+         *i1 = ARM64Instr_LdSt64(
+                  True/*isLoad*/, 
+                  rreg, 
+                  ARM64AMode_RI12(hregARM64_X21(), offsetB, 8)
+               );
+         return;
+      case HRcFlt64:
+         vassert(0 == (offsetB & 7));
+         vassert(offsetB >= 0 && offsetB < 32768);
+         *i1 = ARM64Instr_VLdStD(True/*isLoad*/,
+                                 rreg, hregARM64_X21(), offsetB);
+         return;
+      case HRcVec128: {
+         HReg x21  = hregARM64_X21();  // baseblock
+         HReg x9   = hregARM64_X9();   // spill temporary
+         vassert(0 == (offsetB & 15)); // check sane alignment
+         vassert(offsetB < 4096);
+         *i1 = ARM64Instr_Arith(x9, x21, ARM64RIA_I12(offsetB, 0), True);
+         *i2 = ARM64Instr_VLdStQ(True/*isLoad*/, rreg, x9);
+         return;
+      }
+      default:
+         ppHRegClass(rclass);
+         vpanic("genReload_ARM: unimplemented regclass");
+   }
+}
+
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code. */
+
+static inline UInt iregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcInt64);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 30);
+   return n;
+}
+
+static inline UInt dregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 31);
+   return n;
+}
+
+static inline UInt qregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcVec128);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 31);
+   return n;
+}
+
+#define BITS4(zzb3,zzb2,zzb1,zzb0) \
+   (((zzb3) << 3) | ((zzb2) << 2) | ((zzb1) << 1) | (zzb0))
+
+#define X00  BITS4(0,0, 0,0)
+#define X01  BITS4(0,0, 0,1)
+#define X10  BITS4(0,0, 1,0)
+#define X11  BITS4(0,0, 1,1)
+
+#define X000 BITS4(0, 0,0,0)
+#define X001 BITS4(0, 0,0,1)
+#define X010 BITS4(0, 0,1,0)
+#define X011 BITS4(0, 0,1,1)
+#define X100 BITS4(0, 1,0,0)
+#define X101 BITS4(0, 1,0,1)
+#define X110 BITS4(0, 1,1,0)
+#define X111 BITS4(0, 1,1,1)
+
+#define X0000 BITS4(0,0,0,0)
+#define X0001 BITS4(0,0,0,1)
+#define X0010 BITS4(0,0,1,0)
+#define X0011 BITS4(0,0,1,1)
+
+#define BITS8(zzb7,zzb6,zzb5,zzb4,zzb3,zzb2,zzb1,zzb0) \
+  ((BITS4(zzb7,zzb6,zzb5,zzb4) << 4) | BITS4(zzb3,zzb2,zzb1,zzb0))
+
+#define X00000   BITS8(0,0,0, 0,0,0,0,0)
+#define X00001   BITS8(0,0,0, 0,0,0,0,1)
+#define X00110   BITS8(0,0,0, 0,0,1,1,0)
+#define X00111   BITS8(0,0,0, 0,0,1,1,1)
+#define X01000   BITS8(0,0,0, 0,1,0,0,0)
+#define X10000   BITS8(0,0,0, 1,0,0,0,0)
+#define X11000   BITS8(0,0,0, 1,1,0,0,0)
+#define X11110   BITS8(0,0,0, 1,1,1,1,0)
+#define X11111   BITS8(0,0,0, 1,1,1,1,1)
+
+#define X000000  BITS8(0,0, 0,0,0,0,0,0)
+#define X000001  BITS8(0,0, 0,0,0,0,0,1)
+#define X000010  BITS8(0,0, 0,0,0,0,1,0)
+#define X000011  BITS8(0,0, 0,0,0,0,1,1)
+#define X000100  BITS8(0,0, 0,0,0,1,0,0)
+#define X000110  BITS8(0,0, 0,0,0,1,1,0)
+#define X000111  BITS8(0,0, 0,0,0,1,1,1)
+#define X001000  BITS8(0,0, 0,0,1,0,0,0)
+#define X001001  BITS8(0,0, 0,0,1,0,0,1)
+#define X001010  BITS8(0,0, 0,0,1,0,1,0)
+#define X001011  BITS8(0,0, 0,0,1,0,1,1)
+#define X001101  BITS8(0,0, 0,0,1,1,0,1)
+#define X001110  BITS8(0,0, 0,0,1,1,1,0)
+#define X001111  BITS8(0,0, 0,0,1,1,1,1)
+#define X010000  BITS8(0,0, 0,1,0,0,0,0)
+#define X010001  BITS8(0,0, 0,1,0,0,0,1)
+#define X010010  BITS8(0,0, 0,1,0,0,1,0)
+#define X010011  BITS8(0,0, 0,1,0,0,1,1)
+#define X010101  BITS8(0,0, 0,1,0,1,0,1)
+#define X010110  BITS8(0,0, 0,1,0,1,1,0)
+#define X010111  BITS8(0,0, 0,1,0,1,1,1)
+#define X011001  BITS8(0,0, 0,1,1,0,0,1)
+#define X011010  BITS8(0,0, 0,1,1,0,1,0)
+#define X011011  BITS8(0,0, 0,1,1,0,1,1)
+#define X011101  BITS8(0,0, 0,1,1,1,0,1)
+#define X011110  BITS8(0,0, 0,1,1,1,1,0)
+#define X011111  BITS8(0,0, 0,1,1,1,1,1)
+#define X100001  BITS8(0,0, 1,0,0,0,0,1)
+#define X100011  BITS8(0,0, 1,0,0,0,1,1)
+#define X100100  BITS8(0,0, 1,0,0,1,0,0)
+#define X100101  BITS8(0,0, 1,0,0,1,0,1)
+#define X100110  BITS8(0,0, 1,0,0,1,1,0)
+#define X100111  BITS8(0,0, 1,0,0,1,1,1)
+#define X101101  BITS8(0,0, 1,0,1,1,0,1)
+#define X101110  BITS8(0,0, 1,0,1,1,1,0)
+#define X110000  BITS8(0,0, 1,1,0,0,0,0)
+#define X110001  BITS8(0,0, 1,1,0,0,0,1)
+#define X110010  BITS8(0,0, 1,1,0,0,1,0)
+#define X110100  BITS8(0,0, 1,1,0,1,0,0)
+#define X110101  BITS8(0,0, 1,1,0,1,0,1)
+#define X110110  BITS8(0,0, 1,1,0,1,1,0)
+#define X110111  BITS8(0,0, 1,1,0,1,1,1)
+#define X111000  BITS8(0,0, 1,1,1,0,0,0)
+#define X111001  BITS8(0,0, 1,1,1,0,0,1)
+#define X111101  BITS8(0,0, 1,1,1,1,0,1)
+#define X111110  BITS8(0,0, 1,1,1,1,1,0)
+#define X111111  BITS8(0,0, 1,1,1,1,1,1)
+
+#define X0001000  BITS8(0, 0,0,0,1,0,0,0)
+#define X0010000  BITS8(0, 0,0,1,0,0,0,0)
+#define X0100000  BITS8(0, 0,1,0,0,0,0,0)
+#define X1000000  BITS8(0, 1,0,0,0,0,0,0)
+
+#define X00100000  BITS8(0,0,1,0,0,0,0,0)
+#define X00100001  BITS8(0,0,1,0,0,0,0,1)
+#define X00100010  BITS8(0,0,1,0,0,0,1,0)
+#define X00100011  BITS8(0,0,1,0,0,0,1,1)
+#define X01010000  BITS8(0,1,0,1,0,0,0,0)
+#define X01010001  BITS8(0,1,0,1,0,0,0,1)
+#define X01010100  BITS8(0,1,0,1,0,1,0,0)
+#define X01011000  BITS8(0,1,0,1,1,0,0,0)
+#define X01100000  BITS8(0,1,1,0,0,0,0,0)
+#define X01100001  BITS8(0,1,1,0,0,0,0,1)
+#define X01100010  BITS8(0,1,1,0,0,0,1,0)
+#define X01100011  BITS8(0,1,1,0,0,0,1,1)
+#define X01110000  BITS8(0,1,1,1,0,0,0,0)
+#define X01110001  BITS8(0,1,1,1,0,0,0,1)
+#define X01110010  BITS8(0,1,1,1,0,0,1,0)
+#define X01110011  BITS8(0,1,1,1,0,0,1,1)
+#define X01110100  BITS8(0,1,1,1,0,1,0,0)
+#define X01110101  BITS8(0,1,1,1,0,1,0,1)
+#define X01110110  BITS8(0,1,1,1,0,1,1,0)
+#define X01110111  BITS8(0,1,1,1,0,1,1,1)
+#define X11000001  BITS8(1,1,0,0,0,0,0,1)
+#define X11000011  BITS8(1,1,0,0,0,0,1,1)
+#define X11010100  BITS8(1,1,0,1,0,1,0,0)
+#define X11010110  BITS8(1,1,0,1,0,1,1,0)
+#define X11011000  BITS8(1,1,0,1,1,0,0,0)
+#define X11011010  BITS8(1,1,0,1,1,0,1,0)
+#define X11011110  BITS8(1,1,0,1,1,1,1,0)
+#define X11100010  BITS8(1,1,1,0,0,0,1,0)
+#define X11110001  BITS8(1,1,1,1,0,0,0,1)
+#define X11110011  BITS8(1,1,1,1,0,0,1,1)
+#define X11110101  BITS8(1,1,1,1,0,1,0,1)
+#define X11110111  BITS8(1,1,1,1,0,1,1,1)
+
+
+/* --- 4 fields --- */
+
+static inline UInt X_8_19_1_4 ( UInt f1, UInt f2, UInt f3, UInt f4 ) {
+   vassert(8+19+1+4 == 32);
+   vassert(f1 < (1<<8));
+   vassert(f2 < (1<<19));
+   vassert(f3 < (1<<1));
+   vassert(f4 < (1<<4));
+   UInt w = 0;
+   w = (w <<  8) | f1;
+   w = (w << 19) | f2;
+   w = (w <<  1) | f3;
+   w = (w <<  4) | f4;
+   return w;
+}
+
+/* --- 5 fields --- */
+
+static inline UInt X_3_6_2_16_5 ( UInt f1, UInt f2,
+                                  UInt f3, UInt f4, UInt f5 ) {
+   vassert(3+6+2+16+5 == 32);
+   vassert(f1 < (1<<3));
+   vassert(f2 < (1<<6));
+   vassert(f3 < (1<<2));
+   vassert(f4 < (1<<16));
+   vassert(f5 < (1<<5));
+   UInt w = 0;
+   w = (w <<  3) | f1;
+   w = (w <<  6) | f2;
+   w = (w <<  2) | f3;
+   w = (w << 16) | f4;
+   w = (w <<  5) | f5;
+   return w;
+}
+
+/* --- 6 fields --- */
+
+static inline UInt X_2_6_2_12_5_5 ( UInt f1, UInt f2, UInt f3,
+                                    UInt f4, UInt f5, UInt f6 ) {
+   vassert(2+6+2+12+5+5 == 32);
+   vassert(f1 < (1<<2));
+   vassert(f2 < (1<<6));
+   vassert(f3 < (1<<2));
+   vassert(f4 < (1<<12));
+   vassert(f5 < (1<<5));
+   vassert(f6 < (1<<5));
+   UInt w = 0;
+   w = (w <<  2) | f1;
+   w = (w <<  6) | f2;
+   w = (w <<  2) | f3;
+   w = (w << 12) | f4;
+   w = (w <<  5) | f5;
+   w = (w <<  5) | f6;
+   return w;
+}
+
+static inline UInt X_3_8_5_6_5_5 ( UInt f1, UInt f2, UInt f3,
+                                   UInt f4, UInt f5, UInt f6 ) {
+   vassert(3+8+5+6+5+5 == 32);
+   vassert(f1 < (1<<3));
+   vassert(f2 < (1<<8));
+   vassert(f3 < (1<<5));
+   vassert(f4 < (1<<6));
+   vassert(f5 < (1<<5));
+   vassert(f6 < (1<<5));
+   UInt w = 0;
+   w = (w <<  3) | f1;
+   w = (w <<  8) | f2;
+   w = (w <<  5) | f3;
+   w = (w <<  6) | f4;
+   w = (w <<  5) | f5;
+   w = (w <<  5) | f6;
+   return w;
+}
+
+static inline UInt X_3_5_8_6_5_5 ( UInt f1, UInt f2, UInt f3,
+                                   UInt f4, UInt f5, UInt f6 ) {
+   vassert(3+8+5+6+5+5 == 32);
+   vassert(f1 < (1<<3));
+   vassert(f2 < (1<<5));
+   vassert(f3 < (1<<8));
+   vassert(f4 < (1<<6));
+   vassert(f5 < (1<<5));
+   vassert(f6 < (1<<5));
+   UInt w = 0;
+   w = (w <<  3) | f1;
+   w = (w <<  5) | f2;
+   w = (w <<  8) | f3;
+   w = (w <<  6) | f4;
+   w = (w <<  5) | f5;
+   w = (w <<  5) | f6;
+   return w;
+}
+
+static inline UInt X_3_6_7_6_5_5 ( UInt f1, UInt f2, UInt f3,
+                                   UInt f4, UInt f5, UInt f6 ) {
+   vassert(3+6+7+6+5+5 == 32);
+   vassert(f1 < (1<<3));
+   vassert(f2 < (1<<6));
+   vassert(f3 < (1<<7));
+   vassert(f4 < (1<<6));
+   vassert(f5 < (1<<5));
+   vassert(f6 < (1<<5));
+   UInt w = 0;
+   w = (w <<  3) | f1;
+   w = (w <<  6) | f2;
+   w = (w <<  7) | f3;
+   w = (w <<  6) | f4;
+   w = (w <<  5) | f5;
+   w = (w <<  5) | f6;
+   return w;
+}
+
+/* --- 7 fields --- */
+
+static inline UInt X_2_6_3_9_2_5_5 ( UInt f1, UInt f2, UInt f3,
+                                     UInt f4, UInt f5, UInt f6, UInt f7 ) {
+   vassert(2+6+3+9+2+5+5 == 32);
+   vassert(f1 < (1<<2));
+   vassert(f2 < (1<<6));
+   vassert(f3 < (1<<3));
+   vassert(f4 < (1<<9));
+   vassert(f5 < (1<<2));
+   vassert(f6 < (1<<5));
+   vassert(f7 < (1<<5));
+   UInt w = 0;
+   w = (w << 2) | f1;
+   w = (w << 6) | f2;
+   w = (w << 3) | f3;
+   w = (w << 9) | f4;
+   w = (w << 2) | f5;
+   w = (w << 5) | f6;
+   w = (w << 5) | f7;
+   return w;
+}
+
+static inline UInt X_3_6_1_6_6_5_5 ( UInt f1, UInt f2, UInt f3,
+                                     UInt f4, UInt f5, UInt f6, UInt f7 ) {
+   vassert(3+6+1+6+6+5+5 == 32);
+   vassert(f1 < (1<<3));
+   vassert(f2 < (1<<6));
+   vassert(f3 < (1<<1));
+   vassert(f4 < (1<<6));
+   vassert(f5 < (1<<6));
+   vassert(f6 < (1<<5));
+   vassert(f7 < (1<<5));
+   UInt w = 0;
+   w = (w << 3) | f1;
+   w = (w << 6) | f2;
+   w = (w << 1) | f3;
+   w = (w << 6) | f4;
+   w = (w << 6) | f5;
+   w = (w << 5) | f6;
+   w = (w << 5) | f7;
+   return w;
+}
+
+
+//ZZ #define X0000  BITS4(0,0,0,0)
+//ZZ #define X0001  BITS4(0,0,0,1)
+//ZZ #define X0010  BITS4(0,0,1,0)
+//ZZ #define X0011  BITS4(0,0,1,1)
+//ZZ #define X0100  BITS4(0,1,0,0)
+//ZZ #define X0101  BITS4(0,1,0,1)
+//ZZ #define X0110  BITS4(0,1,1,0)
+//ZZ #define X0111  BITS4(0,1,1,1)
+//ZZ #define X1000  BITS4(1,0,0,0)
+//ZZ #define X1001  BITS4(1,0,0,1)
+//ZZ #define X1010  BITS4(1,0,1,0)
+//ZZ #define X1011  BITS4(1,0,1,1)
+//ZZ #define X1100  BITS4(1,1,0,0)
+//ZZ #define X1101  BITS4(1,1,0,1)
+//ZZ #define X1110  BITS4(1,1,1,0)
+//ZZ #define X1111  BITS4(1,1,1,1)
+/*
+#define XXXXX___(zzx7,zzx6,zzx5,zzx4,zzx3) \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12))
+
+#define XXXXXX__(zzx7,zzx6,zzx5,zzx4,zzx3,zzx2)        \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12) | (((zzx2) & 0xF) <<  8))
+
+#define XXXXX__X(zzx7,zzx6,zzx5,zzx4,zzx3,zzx0)        \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12) | (((zzx0) & 0xF) <<  0))
+
+#define XXX___XX(zzx7,zzx6,zzx5,zzx1,zzx0) \
+  ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) | \
+   (((zzx5) & 0xF) << 20) | (((zzx1) & 0xF) << 4) | \
+   (((zzx0) & 0xF) << 0))
+
+#define XXXXXXXX(zzx7,zzx6,zzx5,zzx4,zzx3,zzx2,zzx1,zzx0)  \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12) | (((zzx2) & 0xF) <<  8) |  \
+    (((zzx1) & 0xF) <<  4) | (((zzx0) & 0xF) <<  0))
+
+#define XX______(zzx7,zzx6) \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24))
+*/
+
+
+/* Get an immediate into a register, using only that register. */
+static UInt* imm64_to_ireg ( UInt* p, Int xD, ULong imm64 )
+{
+   if (imm64 == 0) {
+      // This has to be special-cased, since the logic below
+      // will leave the register unchanged in this case.
+      // MOVZ xD, #0, LSL #0
+      *p++ = X_3_6_2_16_5(X110, X100101, X00, 0/*imm16*/, xD);
+      return p;
+   }
+
+   // There must be at least one non-zero halfword.  Find the
+   // lowest nonzero such, and use MOVZ to install it and zero
+   // out the rest of the register.
+   UShort h[4];
+   h[3] = (UShort)((imm64 >> 48) & 0xFFFF);
+   h[2] = (UShort)((imm64 >> 32) & 0xFFFF);
+   h[1] = (UShort)((imm64 >> 16) & 0xFFFF);
+   h[0] = (UShort)((imm64 >>  0) & 0xFFFF);
+
+   UInt i;
+   for (i = 0; i < 4; i++) {
+      if (h[i] != 0)
+         break;
+   }
+   vassert(i < 4);
+
+   // MOVZ xD, h[i], LSL (16*i)
+   *p++ = X_3_6_2_16_5(X110, X100101, i, h[i], xD);
+
+   // Work on upwards through h[i], using MOVK to stuff in any
+   // remaining nonzero elements.
+   i++;
+   for (; i < 4; i++) {
+      if (h[i] == 0)
+         continue;
+      // MOVK xD, h[i], LSL (16*i)
+      *p++ = X_3_6_2_16_5(X111, X100101, i, h[i], xD);
+   }
+
+   return p;
+}
+
+/* Get an immediate into a register, using only that register, and
+   generating exactly 4 instructions, regardless of the value of the
+   immediate. This is used when generating sections of code that need
+   to be patched later, so as to guarantee a specific size. */
+static UInt* imm64_to_ireg_EXACTLY4 ( UInt* p, Int xD, ULong imm64 )
+{
+   UShort h[4];
+   h[3] = (UShort)((imm64 >> 48) & 0xFFFF);
+   h[2] = (UShort)((imm64 >> 32) & 0xFFFF);
+   h[1] = (UShort)((imm64 >> 16) & 0xFFFF);
+   h[0] = (UShort)((imm64 >>  0) & 0xFFFF);
+   // Work on upwards through h[i], using MOVK to stuff in the
+   // remaining elements.
+   UInt i;
+   for (i = 0; i < 4; i++) {
+      if (i == 0) {
+         // MOVZ xD, h[0], LSL (16*0)
+         *p++ = X_3_6_2_16_5(X110, X100101, i, h[i], xD);
+      } else {
+         // MOVK xD, h[i], LSL (16*i)
+         *p++ = X_3_6_2_16_5(X111, X100101, i, h[i], xD);
+      }
+   }
+   return p;
+}
+
+/* Check whether p points at a 4-insn sequence cooked up by
+   imm64_to_ireg_EXACTLY4(). */
+static Bool is_imm64_to_ireg_EXACTLY4 ( UInt* p, Int xD, ULong imm64 )
+{
+   UShort h[4];
+   h[3] = (UShort)((imm64 >> 48) & 0xFFFF);
+   h[2] = (UShort)((imm64 >> 32) & 0xFFFF);
+   h[1] = (UShort)((imm64 >> 16) & 0xFFFF);
+   h[0] = (UShort)((imm64 >>  0) & 0xFFFF);
+   // Work on upwards through h[i], using MOVK to stuff in the
+   // remaining elements.
+   UInt i;
+   for (i = 0; i < 4; i++) {
+      UInt expected;
+      if (i == 0) {
+         // MOVZ xD, h[0], LSL (16*0)
+         expected = X_3_6_2_16_5(X110, X100101, i, h[i], xD);
+      } else {
+         // MOVK xD, h[i], LSL (16*i)
+         expected = X_3_6_2_16_5(X111, X100101, i, h[i], xD);
+      }
+      if (p[i] != expected)
+         return False;
+   }
+   return True;
+}
+
+
+/* Generate a 8 bit store or 8-to-64 unsigned widening load from/to
+   rD, using the given amode for the address. */
+static UInt* do_load_or_store8 ( UInt* p,
+                                 Bool isLoad, UInt wD, ARM64AMode* am )
+{
+   vassert(wD <= 30);
+   if (am->tag == ARM64am_RI9) {
+      /* STURB Wd, [Xn|SP + simm9]:  00 111000 000 simm9 00 n d
+         LDURB Wd, [Xn|SP + simm9]:  00 111000 010 simm9 00 n d
+      */
+      Int simm9 = am->ARM64am.RI9.simm9;
+      vassert(-256 <= simm9 && simm9 <= 255);
+      UInt instr = X_2_6_3_9_2_5_5(X00, X111000, isLoad ? X010 : X000,
+                                   simm9 & 0x1FF, X00,
+                                   iregEnc(am->ARM64am.RI9.reg), wD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RI12) {
+      /* STRB Wd, [Xn|SP + uimm12 * 1]:  00 111 001 00 imm12 n d
+         LDRB Wd, [Xn|SP + uimm12 * 1]:  00 111 001 01 imm12 n d
+      */
+      UInt uimm12 = am->ARM64am.RI12.uimm12;
+      UInt scale  = am->ARM64am.RI12.szB;
+      vassert(scale == 1); /* failure of this is serious.  Do not ignore. */
+      UInt xN    = iregEnc(am->ARM64am.RI12.reg);
+      vassert(xN <= 30);
+      UInt instr = X_2_6_2_12_5_5(X00, X111001, isLoad ? X01 : X00,
+                                  uimm12, xN, wD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RR) {
+      /* STRB Xd, [Xn|SP, Xm]: 00 111 000 001 m 011 0 10 n d
+         LDRB Xd, [Xn|SP, Xm]: 00 111 000 011 m 011 0 10 n d
+      */
+      UInt xN = iregEnc(am->ARM64am.RR.base);
+      UInt xM = iregEnc(am->ARM64am.RR.index);
+      vassert(xN <= 30);
+      UInt instr = X_3_8_5_6_5_5(X001, isLoad ? X11000011 : X11000001, 
+                                 xM, X011010, xN, wD);
+      *p++ = instr;
+      return p;
+   }
+   vpanic("do_load_or_store8");
+   vassert(0);
+}
+
+
+/* Generate a 16 bit store or 16-to-64 unsigned widening load from/to
+   rD, using the given amode for the address. */
+static UInt* do_load_or_store16 ( UInt* p,
+                                  Bool isLoad, UInt wD, ARM64AMode* am )
+{
+   vassert(wD <= 30);
+   if (am->tag == ARM64am_RI9) {
+      /* STURH Wd, [Xn|SP + simm9]:  01 111000 000 simm9 00 n d
+         LDURH Wd, [Xn|SP + simm9]:  01 111000 010 simm9 00 n d
+      */
+      Int simm9 = am->ARM64am.RI9.simm9;
+      vassert(-256 <= simm9 && simm9 <= 255);
+      UInt instr = X_2_6_3_9_2_5_5(X01, X111000, isLoad ? X010 : X000,
+                                   simm9 & 0x1FF, X00,
+                                   iregEnc(am->ARM64am.RI9.reg), wD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RI12) {
+      /* STRH Wd, [Xn|SP + uimm12 * 2]:  01 111 001 00 imm12 n d
+         LDRH Wd, [Xn|SP + uimm12 * 2]:  01 111 001 01 imm12 n d
+      */
+      UInt uimm12 = am->ARM64am.RI12.uimm12;
+      UInt scale  = am->ARM64am.RI12.szB;
+      vassert(scale == 2); /* failure of this is serious.  Do not ignore. */
+      UInt xN    = iregEnc(am->ARM64am.RI12.reg);
+      vassert(xN <= 30);
+      UInt instr = X_2_6_2_12_5_5(X01, X111001, isLoad ? X01 : X00,
+                                  uimm12, xN, wD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RR) {
+      /* STRH Xd, [Xn|SP, Xm]: 01 111 000 001 m 011 0 10 n d
+         LDRH Xd, [Xn|SP, Xm]: 01 111 000 011 m 011 0 10 n d
+      */
+      UInt xN = iregEnc(am->ARM64am.RR.base);
+      UInt xM = iregEnc(am->ARM64am.RR.index);
+      vassert(xN <= 30);
+      UInt instr = X_3_8_5_6_5_5(X011, isLoad ? X11000011 : X11000001, 
+                                 xM, X011010, xN, wD);
+      *p++ = instr;
+      return p;
+   }
+   vpanic("do_load_or_store16");
+   vassert(0);
+}
+
+
+/* Generate a 32 bit store or 32-to-64 unsigned widening load from/to
+   rD, using the given amode for the address. */
+static UInt* do_load_or_store32 ( UInt* p,
+                                  Bool isLoad, UInt wD, ARM64AMode* am )
+{
+   vassert(wD <= 30);
+   if (am->tag == ARM64am_RI9) {
+      /* STUR Wd, [Xn|SP + simm9]:  10 111000 000 simm9 00 n d
+         LDUR Wd, [Xn|SP + simm9]:  10 111000 010 simm9 00 n d
+      */
+      Int simm9 = am->ARM64am.RI9.simm9;
+      vassert(-256 <= simm9 && simm9 <= 255);
+      UInt instr = X_2_6_3_9_2_5_5(X10, X111000, isLoad ? X010 : X000,
+                                   simm9 & 0x1FF, X00,
+                                   iregEnc(am->ARM64am.RI9.reg), wD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RI12) {
+      /* STR Wd, [Xn|SP + uimm12 * 4]:  10 111 001 00 imm12 n d
+         LDR Wd, [Xn|SP + uimm12 * 4]:  10 111 001 01 imm12 n d
+      */
+      UInt uimm12 = am->ARM64am.RI12.uimm12;
+      UInt scale  = am->ARM64am.RI12.szB;
+      vassert(scale == 4); /* failure of this is serious.  Do not ignore. */
+      UInt xN    = iregEnc(am->ARM64am.RI12.reg);
+      vassert(xN <= 30);
+      UInt instr = X_2_6_2_12_5_5(X10, X111001, isLoad ? X01 : X00,
+                                  uimm12, xN, wD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RR) {
+      /* STR Wd, [Xn|SP, Xm]: 10 111 000 001 m 011 0 10 n d
+         LDR Wd, [Xn|SP, Xm]: 10 111 000 011 m 011 0 10 n d
+      */
+      UInt xN = iregEnc(am->ARM64am.RR.base);
+      UInt xM = iregEnc(am->ARM64am.RR.index);
+      vassert(xN <= 30);
+      UInt instr = X_3_8_5_6_5_5(X101, isLoad ? X11000011 : X11000001, 
+                                 xM, X011010, xN, wD);
+      *p++ = instr;
+      return p;
+   }
+   vpanic("do_load_or_store32");
+   vassert(0);
+}
+
+
+/* Generate a 64 bit load or store to/from xD, using the given amode
+   for the address. */
+static UInt* do_load_or_store64 ( UInt* p,
+                                  Bool isLoad, UInt xD, ARM64AMode* am )
+{
+   /* In all these cases, Rn can't be 31 since that means SP. */
+   vassert(xD <= 30);
+   if (am->tag == ARM64am_RI9) {
+      /* STUR Xd, [Xn|SP + simm9]:  11 111000 000 simm9 00 n d
+         LDUR Xd, [Xn|SP + simm9]:  11 111000 010 simm9 00 n d
+      */
+      Int simm9 = am->ARM64am.RI9.simm9;
+      vassert(-256 <= simm9 && simm9 <= 255);
+      UInt xN = iregEnc(am->ARM64am.RI9.reg);
+      vassert(xN <= 30);
+      UInt instr = X_2_6_3_9_2_5_5(X11, X111000, isLoad ? X010 : X000,
+                                   simm9 & 0x1FF, X00, xN, xD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RI12) {
+      /* STR Xd, [Xn|SP + uimm12 * 8]:  11 111 001 00 imm12 n d
+         LDR Xd, [Xn|SP + uimm12 * 8]:  11 111 001 01 imm12 n d
+      */
+      UInt uimm12 = am->ARM64am.RI12.uimm12;
+      UInt scale  = am->ARM64am.RI12.szB;
+      vassert(scale == 8); /* failure of this is serious.  Do not ignore. */
+      UInt xN    = iregEnc(am->ARM64am.RI12.reg);
+      vassert(xN <= 30);
+      UInt instr = X_2_6_2_12_5_5(X11, X111001, isLoad ? X01 : X00,
+                                  uimm12, xN, xD);
+      *p++ = instr;
+      return p;
+   }
+   if (am->tag == ARM64am_RR) {
+      /* STR Xd, [Xn|SP, Xm]: 11 111 000 001 m 011 0 10 n d
+         LDR Xd, [Xn|SP, Xm]: 11 111 000 011 m 011 0 10 n d
+      */
+      UInt xN = iregEnc(am->ARM64am.RR.base);
+      UInt xM = iregEnc(am->ARM64am.RR.index);
+      vassert(xN <= 30);
+      UInt instr = X_3_8_5_6_5_5(X111, isLoad ? X11000011 : X11000001, 
+                                 xM, X011010, xN, xD);
+      *p++ = instr;
+      return p;
+   }
+   vpanic("do_load_or_store64");
+   vassert(0);
+}
+
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code.  If the emitted
+   instruction was a profiler inc, set *is_profInc to True, else
+   leave it unchanged. */
+
+Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
+                      UChar* buf, Int nbuf, const ARM64Instr* i,
+                      Bool mode64, VexEndness endness_host,
+                      const void* disp_cp_chain_me_to_slowEP,
+                      const void* disp_cp_chain_me_to_fastEP,
+                      const void* disp_cp_xindir,
+                      const void* disp_cp_xassisted )
+{
+   UInt* p = (UInt*)buf;
+   vassert(nbuf >= 32);
+   vassert(mode64 == True);
+   vassert(0 == (((HWord)buf) & 3));
+
+   switch (i->tag) {
+      case ARM64in_Arith: {
+         UInt      rD   = iregEnc(i->ARM64in.Arith.dst);
+         UInt      rN   = iregEnc(i->ARM64in.Arith.argL);
+         ARM64RIA* argR = i->ARM64in.Arith.argR;
+         switch (argR->tag) {
+            case ARM64riA_I12:
+               *p++ = X_2_6_2_12_5_5(
+                         i->ARM64in.Arith.isAdd ? X10 : X11,
+                         X010001,
+                         argR->ARM64riA.I12.shift == 12 ? X01 : X00,
+                         argR->ARM64riA.I12.imm12, rN, rD
+                      );
+               break;
+            case ARM64riA_R: {
+               UInt rM = iregEnc(i->ARM64in.Arith.argR->ARM64riA.R.reg);
+               *p++ = X_3_8_5_6_5_5(
+                         i->ARM64in.Arith.isAdd ? X100 : X110,
+                         X01011000, rM, X000000, rN, rD
+                      );
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+      case ARM64in_Cmp: {
+         UInt      rD   = 31; /* XZR, we are going to dump the result */
+         UInt      rN   = iregEnc(i->ARM64in.Cmp.argL);
+         ARM64RIA* argR = i->ARM64in.Cmp.argR;
+         Bool      is64 = i->ARM64in.Cmp.is64;
+         switch (argR->tag) {
+            case ARM64riA_I12:
+               /* 1 11 10001 sh imm12 Rn Rd = SUBS Xd, Xn, #imm */
+               /* 0 11 10001 sh imm12 Rn Rd = SUBS Wd, Wn, #imm */
+               *p++ = X_2_6_2_12_5_5(
+                         is64 ? X11 : X01, X110001,
+                         argR->ARM64riA.I12.shift == 12 ? X01 : X00,
+                         argR->ARM64riA.I12.imm12, rN, rD);
+               break;
+            case ARM64riA_R: {
+               /* 1 11 01011 00 0 Rm 000000 Rn Rd = SUBS Xd, Xn, Xm */
+               /* 0 11 01011 00 0 Rm 000000 Rn Rd = SUBS Wd, Wn, Wm */
+               UInt rM = iregEnc(i->ARM64in.Cmp.argR->ARM64riA.R.reg);
+               *p++ = X_3_8_5_6_5_5(is64 ? X111 : X011,
+                                    X01011000, rM, X000000, rN, rD);
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+      case ARM64in_Logic: {
+         UInt      rD   = iregEnc(i->ARM64in.Logic.dst);
+         UInt      rN   = iregEnc(i->ARM64in.Logic.argL);
+         ARM64RIL* argR = i->ARM64in.Logic.argR;
+         UInt      opc  = 0; /* invalid */
+         vassert(rD < 31);
+         vassert(rN < 31);
+         switch (i->ARM64in.Logic.op) {
+            case ARM64lo_OR:  opc = X101; break;
+            case ARM64lo_AND: opc = X100; break;
+            case ARM64lo_XOR: opc = X110; break;
+            default: break;
+         }
+         vassert(opc != 0);
+         switch (argR->tag) {
+            case ARM64riL_I13: {
+               /* 1 01 100100 N immR immS Rn Rd = ORR <Xd|Sp>, Xn, #imm */
+               /* 1 00 100100 N immR immS Rn Rd = AND <Xd|Sp>, Xn, #imm */
+               /* 1 10 100100 N immR immS Rn Rd = EOR <Xd|Sp>, Xn, #imm */
+               *p++ = X_3_6_1_6_6_5_5(
+                         opc, X100100, argR->ARM64riL.I13.bitN,
+                         argR->ARM64riL.I13.immR, argR->ARM64riL.I13.immS,
+                         rN, rD
+                      );
+               break;
+            }
+            case ARM64riL_R: {
+               /* 1 01 01010 00 0 m 000000 n d = ORR Xd, Xn, Xm */
+               /* 1 00 01010 00 0 m 000000 n d = AND Xd, Xn, Xm */
+               /* 1 10 01010 00 0 m 000000 n d = EOR Xd, Xn, Xm */
+               UInt rM = iregEnc(argR->ARM64riL.R.reg);
+               vassert(rM < 31);
+               *p++ = X_3_8_5_6_5_5(opc, X01010000, rM, X000000, rN, rD);
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+      case ARM64in_Test: {
+         UInt      rD   = 31; /* XZR, we are going to dump the result */
+         UInt      rN   = iregEnc(i->ARM64in.Test.argL);
+         ARM64RIL* argR = i->ARM64in.Test.argR;
+         switch (argR->tag) {
+            case ARM64riL_I13: {
+               /* 1 11 100100 N immR immS Rn Rd = ANDS Xd, Xn, #imm */
+               *p++ = X_3_6_1_6_6_5_5(
+                         X111, X100100, argR->ARM64riL.I13.bitN,
+                         argR->ARM64riL.I13.immR, argR->ARM64riL.I13.immS,
+                         rN, rD
+                      );
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+      case ARM64in_Shift: {
+         UInt      rD   = iregEnc(i->ARM64in.Shift.dst);
+         UInt      rN   = iregEnc(i->ARM64in.Shift.argL);
+         ARM64RI6* argR = i->ARM64in.Shift.argR;
+         vassert(rD < 31);
+         vassert(rN < 31);
+         switch (argR->tag) {
+            case ARM64ri6_I6: {
+               /* 110 1001101 (63-sh) (64-sh) nn dd   LSL Xd, Xn, sh */
+               /* 110 1001101 sh      63      nn dd   LSR Xd, Xn, sh */
+               /* 100 1001101 sh      63      nn dd   ASR Xd, Xn, sh */
+               UInt sh = argR->ARM64ri6.I6.imm6;
+               vassert(sh > 0 && sh < 64);
+               switch (i->ARM64in.Shift.op) {
+                  case ARM64sh_SHL:
+                     *p++ = X_3_6_1_6_6_5_5(X110, X100110,
+                                            1, 64-sh, 63-sh, rN, rD);
+                     break;
+                  case ARM64sh_SHR:
+                     *p++ = X_3_6_1_6_6_5_5(X110, X100110, 1, sh, 63, rN, rD);
+                     break;
+                  case ARM64sh_SAR:
+                     *p++ = X_3_6_1_6_6_5_5(X100, X100110, 1, sh, 63, rN, rD);
+                     break;
+                  default:
+                     vassert(0);
+               }
+               break;
+            }
+            case ARM64ri6_R: {
+               /* 100 1101 0110 mm 001000 nn dd   LSL Xd, Xn, Xm */
+               /* 100 1101 0110 mm 001001 nn dd   LSR Xd, Xn, Xm */
+               /* 100 1101 0110 mm 001010 nn dd   ASR Xd, Xn, Xm */
+               UInt rM = iregEnc(argR->ARM64ri6.R.reg);
+               vassert(rM < 31);
+               UInt subOpc = 0;
+               switch (i->ARM64in.Shift.op) {
+                  case ARM64sh_SHL: subOpc = X001000; break;
+                  case ARM64sh_SHR: subOpc = X001001; break;
+                  case ARM64sh_SAR: subOpc = X001010; break;
+                  default: vassert(0);
+               }
+               *p++ = X_3_8_5_6_5_5(X100, X11010110, rM, subOpc, rN, rD);
+               break;
+            }
+            default:
+               vassert(0);
+         }
+         goto done;
+      }
+      case ARM64in_Unary: {
+         UInt rDst = iregEnc(i->ARM64in.Unary.dst);
+         UInt rSrc = iregEnc(i->ARM64in.Unary.src);
+         switch (i->ARM64in.Unary.op) {
+            case ARM64un_CLZ:
+               /* 1 10 1101 0110 00000 00010 0 nn dd   CLZ Xd, Xn */
+               /* 1 10 1101 0110 00000 00010 1 nn dd   CLS Xd, Xn (unimp) */
+               *p++ = X_3_8_5_6_5_5(X110,
+                                    X11010110, X00000, X000100, rSrc, rDst);
+               goto done;
+            case ARM64un_NEG:
+               /* 1 10 01011 000 m 000000 11111 d  NEG Xd,Xm */
+               /* 0 10 01011 000 m 000000 11111 d  NEG Wd,Wm (unimp) */
+               *p++ = X_3_8_5_6_5_5(X110,
+                                    X01011000, rSrc, X000000, X11111, rDst);
+               goto done;
+            case ARM64un_NOT: {
+               /* 1 01 01010 00 1 m 000000 11111 d   MVN Xd,Xm */
+               *p++ = X_3_8_5_6_5_5(X101,
+                                    X01010001, rSrc, X000000, X11111, rDst);
+               goto done;
+            }
+            default:
+               break;
+         }
+         goto bad;
+      }
+      case ARM64in_MovI: {
+         /* We generate the "preferred form", ORR Xd, XZR, Xm
+            101 01010 00 0 m 000000 11111 d
+         */
+         UInt instr = 0xAA0003E0;
+         UInt d     = iregEnc(i->ARM64in.MovI.dst);
+         UInt m     = iregEnc(i->ARM64in.MovI.src);
+         *p++ = instr | ((m & 31) << 16) | ((d & 31) << 0);
+         goto done;
+      }
+      case ARM64in_Imm64: {
+         p = imm64_to_ireg( p, iregEnc(i->ARM64in.Imm64.dst),
+                               i->ARM64in.Imm64.imm64 );
+         goto done;
+      }
+      case ARM64in_LdSt64: {
+         p = do_load_or_store64( p, i->ARM64in.LdSt64.isLoad,
+                                 iregEnc(i->ARM64in.LdSt64.rD),
+                                 i->ARM64in.LdSt64.amode );
+         goto done;
+      }
+      case ARM64in_LdSt32: {
+         p = do_load_or_store32( p, i->ARM64in.LdSt32.isLoad,
+                                 iregEnc(i->ARM64in.LdSt32.rD),
+                                 i->ARM64in.LdSt32.amode );
+         goto done;
+      }
+      case ARM64in_LdSt16: {
+         p = do_load_or_store16( p, i->ARM64in.LdSt16.isLoad,
+                                 iregEnc(i->ARM64in.LdSt16.rD),
+                                 i->ARM64in.LdSt16.amode );
+         goto done;
+      }
+      case ARM64in_LdSt8: {
+         p = do_load_or_store8( p, i->ARM64in.LdSt8.isLoad,
+                                iregEnc(i->ARM64in.LdSt8.rD),
+                                i->ARM64in.LdSt8.amode );
+         goto done;
+      }
+
+      case ARM64in_XDirect: {
+         /* NB: what goes on here has to be very closely coordinated
+            with chainXDirect_ARM64 and unchainXDirect_ARM64 below. */
+         /* We're generating chain-me requests here, so we need to be
+            sure this is actually allowed -- no-redir translations
+            can't use chain-me's.  Hence: */
+         vassert(disp_cp_chain_me_to_slowEP != NULL);
+         vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+         /* Use ptmp for backpatching conditional jumps. */
+         UInt* ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it.  Or at least, leave a space for
+            it that we will shortly fill in. */
+         if (i->ARM64in.XDirect.cond != ARM64cc_AL) {
+            vassert(i->ARM64in.XDirect.cond != ARM64cc_NV);
+            ptmp = p;
+            *p++ = 0;
+         }
+
+         /* Update the guest PC. */
+         /* imm64 x9, dstGA */
+         /* str   x9, amPC */
+         p = imm64_to_ireg(p, /*x*/9, i->ARM64in.XDirect.dstGA);
+         p = do_load_or_store64(p, False/*!isLoad*/,
+                                /*x*/9, i->ARM64in.XDirect.amPC);
+
+         /* --- FIRST PATCHABLE BYTE follows --- */
+         /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
+            calling to) backs up the return address, so as to find the
+            address of the first patchable byte.  So: don't change the
+            number of instructions (5) below. */
+         /* movw x9, VG_(disp_cp_chain_me_to_{slowEP,fastEP})[15:0] */
+         /* movk x9, VG_(disp_cp_chain_me_to_{slowEP,fastEP})[31:15], lsl 16 */
+         /* movk x9, VG_(disp_cp_chain_me_to_{slowEP,fastEP})[47:32], lsl 32 */
+         /* movk x9, VG_(disp_cp_chain_me_to_{slowEP,fastEP})[63:48], lsl 48 */
+         /* blr  x9 */
+         const void* disp_cp_chain_me
+                  = i->ARM64in.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP 
+                                                : disp_cp_chain_me_to_slowEP;
+         p = imm64_to_ireg_EXACTLY4(p, /*x*/9, (Addr)disp_cp_chain_me);
+         *p++ = 0xD63F0120;
+         /* --- END of PATCHABLE BYTES --- */
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->ARM64in.XDirect.cond != ARM64cc_AL) {
+            Int delta = (UChar*)p - (UChar*)ptmp; /* must be signed */
+            vassert(delta > 0 && delta < 40);
+            vassert((delta & 3) == 0);
+            UInt notCond = 1 ^ (UInt)i->ARM64in.XDirect.cond;
+            vassert(notCond <= 13); /* Neither AL nor NV */
+            vassert(ptmp != NULL);
+            delta = delta >> 2;
+            *ptmp = X_8_19_1_4(X01010100, delta & ((1<<19)-1), 0, notCond);
+         }
+         goto done;
+      }
+
+      case ARM64in_XIndir: {
+         // XIndir is more or less the same as XAssisted, except
+         // we don't have a trc value to hand back, so there's no
+         // write to r21
+         /* Use ptmp for backpatching conditional jumps. */
+         //UInt* ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it.  Or at least, leave a space for
+            it that we will shortly fill in. */
+         if (i->ARM64in.XIndir.cond != ARM64cc_AL) {
+            vassert(0); //ATC
+//ZZ             vassert(i->ARMin.XIndir.cond != ARMcc_NV);
+//ZZ             ptmp = p;
+//ZZ             *p++ = 0;
+         }
+
+         /* Update the guest PC. */
+         /* str r-dstGA, amPC */
+         p = do_load_or_store64(p, False/*!isLoad*/,
+                                iregEnc(i->ARM64in.XIndir.dstGA),
+                                i->ARM64in.XIndir.amPC);
+
+         /* imm64 x9, VG_(disp_cp_xindir) */
+         /* br    x9 */
+         p = imm64_to_ireg(p, /*x*/9, (Addr)disp_cp_xindir);
+         *p++ = 0xD61F0120; /* br x9 */
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->ARM64in.XIndir.cond != ARM64cc_AL) {
+            vassert(0); //ATC
+//ZZ             Int delta = (UChar*)p - (UChar*)ptmp; /* must be signed */
+//ZZ             vassert(delta > 0 && delta < 40);
+//ZZ             vassert((delta & 3) == 0);
+//ZZ             UInt notCond = 1 ^ (UInt)i->ARMin.XIndir.cond;
+//ZZ             vassert(notCond <= 13); /* Neither AL nor NV */
+//ZZ             delta = (delta >> 2) - 2;
+//ZZ             *ptmp = XX______(notCond, X1010) | (delta & 0xFFFFFF);
+         }
+         goto done;
+      }
+
+      case ARM64in_XAssisted: {
+         /* Use ptmp for backpatching conditional jumps. */
+         UInt* ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it.  Or at least, leave a space for
+            it that we will shortly fill in.  I think this can only
+            ever happen when VEX is driven by the switchbacker. */
+         if (i->ARM64in.XAssisted.cond != ARM64cc_AL) {
+            vassert(i->ARM64in.XDirect.cond != ARM64cc_NV);
+            ptmp = p;
+            *p++ = 0;
+         }
+
+         /* Update the guest PC. */
+         /* str r-dstGA, amPC */
+         p = do_load_or_store64(p, False/*!isLoad*/,
+                                iregEnc(i->ARM64in.XAssisted.dstGA),
+                                i->ARM64in.XAssisted.amPC);
+
+         /* movw r21,  $magic_number */
+         UInt trcval = 0;
+         switch (i->ARM64in.XAssisted.jk) {
+            case Ijk_ClientReq:   trcval = VEX_TRC_JMP_CLIENTREQ;   break;
+            case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
+            //case Ijk_Sys_int128:  trcval = VEX_TRC_JMP_SYS_INT128;  break;
+            //case Ijk_Yield:       trcval = VEX_TRC_JMP_YIELD;       break;
+            //case Ijk_EmWarn:      trcval = VEX_TRC_JMP_EMWARN;      break;
+            //case Ijk_MapFail:     trcval = VEX_TRC_JMP_MAPFAIL;     break;
+            case Ijk_NoDecode:    trcval = VEX_TRC_JMP_NODECODE;    break;
+            case Ijk_InvalICache: trcval = VEX_TRC_JMP_INVALICACHE; break;
+            case Ijk_FlushDCache: trcval = VEX_TRC_JMP_FLUSHDCACHE; break;
+            case Ijk_NoRedir:     trcval = VEX_TRC_JMP_NOREDIR;     break;
+            case Ijk_SigTRAP:     trcval = VEX_TRC_JMP_SIGTRAP;     break;
+            //case Ijk_SigSEGV:     trcval = VEX_TRC_JMP_SIGSEGV;     break;
+            case Ijk_Boring:      trcval = VEX_TRC_JMP_BORING;      break;
+            /* We don't expect to see the following being assisted. */
+            //case Ijk_Ret:
+            //case Ijk_Call:
+            /* fallthrough */
+            default: 
+               ppIRJumpKind(i->ARM64in.XAssisted.jk);
+               vpanic("emit_ARM64Instr.ARM64in_XAssisted: "
+                      "unexpected jump kind");
+         }
+         vassert(trcval != 0);
+         p = imm64_to_ireg(p, /*x*/21, (ULong)trcval);
+
+         /* imm64 x9, VG_(disp_cp_xassisted) */
+         /* br    x9 */
+         p = imm64_to_ireg(p, /*x*/9, (Addr)disp_cp_xassisted);
+         *p++ = 0xD61F0120; /* br x9 */
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->ARM64in.XAssisted.cond != ARM64cc_AL) {
+            Int delta = (UChar*)p - (UChar*)ptmp; /* must be signed */
+            vassert(delta > 0 && delta < 40);
+            vassert((delta & 3) == 0);
+            UInt notCond = 1 ^ (UInt)i->ARM64in.XDirect.cond;
+            vassert(notCond <= 13); /* Neither AL nor NV */
+            vassert(ptmp != NULL);
+            delta = delta >> 2;
+            *ptmp = X_8_19_1_4(X01010100, delta & ((1<<19)-1), 0, notCond);
+         }
+         goto done;
+      }
+
+      case ARM64in_CSel: {
+         /* 100 1101 0100 mm cond 00 nn dd = CSEL Xd, Xn, Xm, cond */
+         UInt dd   = iregEnc(i->ARM64in.CSel.dst);
+         UInt nn   = iregEnc(i->ARM64in.CSel.argL);
+         UInt mm   = iregEnc(i->ARM64in.CSel.argR);
+         UInt cond = (UInt)i->ARM64in.CSel.cond;
+         vassert(dd < 31 && nn < 31 && mm < 31 && cond < 16);
+         *p++ = X_3_8_5_6_5_5(X100, X11010100, mm, cond << 2, nn, dd);
+         goto done;
+      }
+
+      case ARM64in_Call: {
+         /* We'll use x9 as a scratch register to put the target
+            address in. */
+         if (i->ARM64in.Call.cond != ARM64cc_AL
+             && i->ARM64in.Call.rloc.pri != RLPri_None) {
+            /* The call might not happen (it isn't unconditional) and
+               it returns a result.  In this case we will need to
+               generate a control flow diamond to put 0x555..555 in
+               the return register(s) in the case where the call
+               doesn't happen.  If this ever becomes necessary, maybe
+               copy code from the 32-bit ARM equivalent.  Until that
+               day, just give up. */
+            goto bad;
+         }
+
+         UInt* ptmp = NULL;
+         if (i->ARM64in.Call.cond != ARM64cc_AL) {
+            /* Create a hole to put a conditional branch in.  We'll
+               patch it once we know the branch length. */
+            ptmp = p;
+            *p++ = 0;
+         }
+
+         // x9 = &target
+         p = imm64_to_ireg( (UInt*)p, /*x*/9, (ULong)i->ARM64in.Call.target );
+         // blr x9
+         *p++ = 0xD63F0120;
+
+         // Patch the hole if necessary
+         if (i->ARM64in.Call.cond != ARM64cc_AL) {
+            ULong dist = (ULong)(p - ptmp);
+            /* imm64_to_ireg produces between 1 and 4 insns, and
+               then there's the BLR itself.  Hence: */
+            vassert(dist >= 2 && dist <= 5);
+            vassert(ptmp != NULL);
+            // 01010100 simm19 0 cond = B.cond (here + simm19 << 2)
+            *ptmp = X_8_19_1_4(X01010100, dist, 0,
+                               1 ^ (UInt)i->ARM64in.Call.cond);
+         } else {
+            vassert(ptmp == NULL);
+         }
+
+         goto done;
+      }
+
+      case ARM64in_AddToSP: {
+         /* 10,0 10001 00 imm12 11111 11111  ADD xsp, xsp, #imm12
+            11,0 10001 00 imm12 11111 11111  SUB xsp, xsp, #imm12
+         */
+         Int simm12 = i->ARM64in.AddToSP.simm;
+         vassert(-4096 < simm12 && simm12 < 4096);
+         vassert(0 == (simm12 & 0xF));
+         if (simm12 >= 0) {
+            *p++ = X_2_6_2_12_5_5(X10, X010001, X00, simm12, X11111, X11111);
+         } else {
+            *p++ = X_2_6_2_12_5_5(X11, X010001, X00, -simm12, X11111, X11111);
+         }
+         goto done;
+      }
+
+      case ARM64in_FromSP: {
+         /* 10,0 10001 00 0..(12)..0 11111 dd  MOV Xd, xsp */
+         UInt dd = iregEnc(i->ARM64in.FromSP.dst);
+         vassert(dd < 31);
+         *p++ = X_2_6_2_12_5_5(X10, X010001, X00, 0, X11111, dd);
+         goto done;
+      }
+
+      case ARM64in_Mul: {
+         /* 100 11011 110 mm 011111 nn dd   UMULH Xd, Xn,Xm
+            100 11011 010 mm 011111 nn dd   SMULH Xd, Xn,Xm
+            100 11011 000 mm 011111 nn dd   MUL   Xd, Xn,Xm
+         */
+         UInt dd = iregEnc(i->ARM64in.Mul.dst);
+         UInt nn = iregEnc(i->ARM64in.Mul.argL);
+         UInt mm = iregEnc(i->ARM64in.Mul.argR);
+         vassert(dd < 31 && nn < 31 && mm < 31);
+         switch (i->ARM64in.Mul.op) {
+            case ARM64mul_ZX:
+               *p++ = X_3_8_5_6_5_5(X100, X11011110, mm, X011111, nn, dd);
+               goto done;
+            case ARM64mul_SX:
+               *p++ = X_3_8_5_6_5_5(X100, X11011010, mm, X011111, nn, dd);
+               goto done;
+            case ARM64mul_PLAIN:
+               *p++ = X_3_8_5_6_5_5(X100, X11011000, mm, X011111, nn, dd);
+               goto done;
+            default:
+               vassert(0);
+         }
+         goto bad;
+      }
+      case ARM64in_LdrEX: {
+         /* 085F7C82   ldxrb w2, [x4]
+            485F7C82   ldxrh w2, [x4]
+            885F7C82   ldxr  w2, [x4]
+            C85F7C82   ldxr  x2, [x4]
+         */
+         switch (i->ARM64in.LdrEX.szB) {
+            case 1: *p++ = 0x085F7C82; goto done;
+            case 2: *p++ = 0x485F7C82; goto done;
+            case 4: *p++ = 0x885F7C82; goto done;
+            case 8: *p++ = 0xC85F7C82; goto done;
+            default: break;
+         }
+         goto bad;
+      }
+      case ARM64in_StrEX: {
+         /* 08007C82   stxrb w0, w2, [x4]
+            48007C82   stxrh w0, w2, [x4]
+            88007C82   stxr  w0, w2, [x4]
+            C8007C82   stxr  w0, x2, [x4]
+         */
+         switch (i->ARM64in.StrEX.szB) {
+            case 1: *p++ = 0x08007C82; goto done;
+            case 2: *p++ = 0x48007C82; goto done;
+            case 4: *p++ = 0x88007C82; goto done;
+            case 8: *p++ = 0xC8007C82; goto done;
+            default: break;
+         }
+         goto bad;
+      }
+      case ARM64in_MFence: {
+         *p++ = 0xD5033F9F; /* DSB sy */
+         *p++ = 0xD5033FBF; /* DMB sy */
+         *p++ = 0xD5033FDF; /* ISB */
+         goto done;
+      }
+      //case ARM64in_CLREX: {
+      //   //ATC, but believed to be correct
+      //   goto bad;
+      //   *p++ = 0xD5033F5F; /* clrex */
+      //   goto done;
+      //}
+      case ARM64in_VLdStH: {
+         /* 01 111101 01 imm12 n t   LDR Ht, [Xn|SP, #imm12 * 2]
+            01 111101 00 imm12 n t   STR Ht, [Xn|SP, #imm12 * 2]
+         */
+         UInt hD     = dregEnc(i->ARM64in.VLdStH.hD);
+         UInt rN     = iregEnc(i->ARM64in.VLdStH.rN);
+         UInt uimm12 = i->ARM64in.VLdStH.uimm12;
+         Bool isLD   = i->ARM64in.VLdStH.isLoad;
+         vassert(uimm12 < 8192 && 0 == (uimm12 & 1));
+         uimm12 >>= 1;
+         vassert(uimm12 < (1<<12));
+         vassert(hD < 32);
+         vassert(rN < 31);
+         *p++ = X_2_6_2_12_5_5(X01, X111101, isLD ? X01 : X00,
+                               uimm12, rN, hD);
+         goto done;
+      }
+      case ARM64in_VLdStS: {
+         /* 10 111101 01 imm12 n t   LDR St, [Xn|SP, #imm12 * 4]
+            10 111101 00 imm12 n t   STR St, [Xn|SP, #imm12 * 4]
+         */
+         UInt sD     = dregEnc(i->ARM64in.VLdStS.sD);
+         UInt rN     = iregEnc(i->ARM64in.VLdStS.rN);
+         UInt uimm12 = i->ARM64in.VLdStS.uimm12;
+         Bool isLD   = i->ARM64in.VLdStS.isLoad;
+         vassert(uimm12 < 16384 && 0 == (uimm12 & 3));
+         uimm12 >>= 2;
+         vassert(uimm12 < (1<<12));
+         vassert(sD < 32);
+         vassert(rN < 31);
+         *p++ = X_2_6_2_12_5_5(X10, X111101, isLD ? X01 : X00,
+                               uimm12, rN, sD);
+         goto done;
+      }
+      case ARM64in_VLdStD: {
+         /* 11 111101 01 imm12 n t   LDR Dt, [Xn|SP, #imm12 * 8]
+            11 111101 00 imm12 n t   STR Dt, [Xn|SP, #imm12 * 8]
+         */
+         UInt dD     = dregEnc(i->ARM64in.VLdStD.dD);
+         UInt rN     = iregEnc(i->ARM64in.VLdStD.rN);
+         UInt uimm12 = i->ARM64in.VLdStD.uimm12;
+         Bool isLD   = i->ARM64in.VLdStD.isLoad;
+         vassert(uimm12 < 32768 && 0 == (uimm12 & 7));
+         uimm12 >>= 3;
+         vassert(uimm12 < (1<<12));
+         vassert(dD < 32);
+         vassert(rN < 31);
+         *p++ = X_2_6_2_12_5_5(X11, X111101, isLD ? X01 : X00,
+                               uimm12, rN, dD);
+         goto done;
+      }
+      case ARM64in_VLdStQ: {
+         /* 0100 1100 0000 0000 0111 11 rN rQ   st1 {vQ.2d}, [<rN|SP>]
+            0100 1100 0100 0000 0111 11 rN rQ   ld1 {vQ.2d}, [<rN|SP>]
+         */
+         UInt rQ = qregEnc(i->ARM64in.VLdStQ.rQ);
+         UInt rN = iregEnc(i->ARM64in.VLdStQ.rN);
+         vassert(rQ < 32);
+         vassert(rN < 31);
+         if (i->ARM64in.VLdStQ.isLoad) {
+            *p++ = 0x4C407C00 | (rN << 5) | rQ;
+         } else {
+            *p++ = 0x4C007C00 | (rN << 5) | rQ;
+         }
+         goto done;
+      }
+      case ARM64in_VCvtI2F: {
+         /* 31  28    23 21 20 18  15     9 4
+            000 11110 00 1  00 010 000000 n d  SCVTF Sd, Wn
+            000 11110 01 1  00 010 000000 n d  SCVTF Dd, Wn
+            100 11110 00 1  00 010 000000 n d  SCVTF Sd, Xn
+            100 11110 01 1  00 010 000000 n d  SCVTF Dd, Xn
+            000 11110 00 1  00 011 000000 n d  UCVTF Sd, Wn
+            000 11110 01 1  00 011 000000 n d  UCVTF Dd, Wn
+            100 11110 00 1  00 011 000000 n d  UCVTF Sd, Xn
+            100 11110 01 1  00 011 000000 n d  UCVTF Dd, Xn
+         */
+         UInt       rN = iregEnc(i->ARM64in.VCvtI2F.rS);
+         UInt       rD = dregEnc(i->ARM64in.VCvtI2F.rD);
+         ARM64CvtOp how = i->ARM64in.VCvtI2F.how;
+         /* Just handle cases as they show up. */
+         switch (how) {
+            case ARM64cvt_F32_I32S: /* SCVTF Sd, Wn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X00100010, X000000, rN, rD);
+               break;
+            case ARM64cvt_F64_I32S: /* SCVTF Dd, Wn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X01100010, X000000, rN, rD);
+               break;
+            case ARM64cvt_F32_I64S: /* SCVTF Sd, Xn */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X00100010, X000000, rN, rD);
+               break;
+            case ARM64cvt_F64_I64S: /* SCVTF Dd, Xn */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X01100010, X000000, rN, rD);
+               break;
+            case ARM64cvt_F32_I32U: /* UCVTF Sd, Wn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X00100011, X000000, rN, rD);
+               break;
+            case ARM64cvt_F64_I32U: /* UCVTF Dd, Wn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X01100011, X000000, rN, rD);
+               break;
+            case ARM64cvt_F32_I64U: /* UCVTF Sd, Xn */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X00100011, X000000, rN, rD);
+               break;
+            case ARM64cvt_F64_I64U: /* UCVTF Dd, Xn  */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X01100011, X000000, rN, rD);
+               break;
+            default:
+               goto bad; //ATC
+         }
+         goto done;
+      }
+      case ARM64in_VCvtF2I: {
+         /*    30       23   20 18  15     9 4
+            sf 00,11110,0x 1 00 000,000000 n d  FCVTNS Rd, Fn (round to
+            sf 00,11110,0x 1 00 001,000000 n d  FCVTNU Rd, Fn  nearest)
+            ---------------- 01 --------------  FCVTP-------- (round to +inf)
+            ---------------- 10 --------------  FCVTM-------- (round to -inf)
+            ---------------- 11 --------------  FCVTZ-------- (round to zero)
+
+            Rd is Xd when sf==1, Wd when sf==0
+            Fn is Dn when x==1, Sn when x==0
+            20:19 carry the rounding mode, using the same encoding as FPCR
+         */
+         UInt       rD    = iregEnc(i->ARM64in.VCvtF2I.rD);
+         UInt       rN    = dregEnc(i->ARM64in.VCvtF2I.rS);
+         ARM64CvtOp how   = i->ARM64in.VCvtF2I.how;
+         UChar      armRM = i->ARM64in.VCvtF2I.armRM;
+         /* Just handle cases as they show up. */
+         switch (how) {
+            case ARM64cvt_F64_I32S: /* FCVTxS Wd, Dn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X01100000 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            case ARM64cvt_F64_I32U: /* FCVTxU Wd, Dn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X01100001 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            case ARM64cvt_F64_I64S: /* FCVTxS Xd, Dn */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X01100000 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            case ARM64cvt_F64_I64U: /* FCVTxU Xd, Dn */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X01100001 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            case ARM64cvt_F32_I32S: /* FCVTxS Wd, Sn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X00100000 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            case ARM64cvt_F32_I32U: /* FCVTxU Wd, Sn */
+               *p++ = X_3_5_8_6_5_5(X000, X11110, X00100001 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            case ARM64cvt_F32_I64S: /* FCVTxS Xd, Sn */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X00100000 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            case ARM64cvt_F32_I64U: /* FCVTxU Xd, Sn */
+               *p++ = X_3_5_8_6_5_5(X100, X11110, X00100001 | (armRM << 3),
+                                    X000000, rN, rD);
+               break;
+            default:
+               goto bad; //ATC
+         }
+         goto done;
+      }
+      case ARM64in_VCvtSD: {
+         /* 31         23 21    16  14    9 4
+            000,11110, 00 10001 0,1 10000 n d   FCVT Dd, Sn (S->D)
+            ---------- 01 ----- 0,0 ---------   FCVT Sd, Dn (D->S)
+            Rounding, when dst is smaller than src, is per the FPCR.
+         */
+         UInt dd = dregEnc(i->ARM64in.VCvtSD.dst);
+         UInt nn = dregEnc(i->ARM64in.VCvtSD.src);
+         if (i->ARM64in.VCvtSD.sToD) {
+            *p++ = X_3_5_8_6_5_5(X000, X11110, X00100010, X110000, nn, dd);
+         } else {
+            *p++ = X_3_5_8_6_5_5(X000, X11110, X01100010, X010000, nn, dd);
+         }
+         goto done;
+      }
+      case ARM64in_VCvtHS: {
+         /* 31         23 21    16  14    9 4
+            000,11110, 11 10001 0,0 10000 n d   FCVT Sd, Hn (H->S)
+            ---------- 00 ----- 1,1 ---------   FCVT Hd, Sn (S->H)
+            Rounding, when dst is smaller than src, is per the FPCR.
+         */
+         UInt dd = dregEnc(i->ARM64in.VCvtHS.dst);
+         UInt nn = dregEnc(i->ARM64in.VCvtHS.src);
+         if (i->ARM64in.VCvtHS.hToS) {
+            *p++ = X_3_5_8_6_5_5(X000, X11110, X11100010, X010000, nn, dd);
+         } else {
+            *p++ = X_3_5_8_6_5_5(X000, X11110, X00100011, X110000, nn, dd);
+         }
+         goto done;
+      }
+      case ARM64in_VCvtHD: {
+         /* 31         23 21    16  14    9 4
+            000,11110, 11 10001 0,1 10000 n d   FCVT Dd, Hn (H->D)
+            ---------- 01 ----- 1,1 ---------   FCVT Hd, Dn (D->H)
+            Rounding, when dst is smaller than src, is per the FPCR.
+         */
+         UInt dd = dregEnc(i->ARM64in.VCvtHD.dst);
+         UInt nn = dregEnc(i->ARM64in.VCvtHD.src);
+         if (i->ARM64in.VCvtHD.hToD) {
+            *p++ = X_3_5_8_6_5_5(X000, X11110, X11100010, X110000, nn, dd);
+         } else {
+            *p++ = X_3_5_8_6_5_5(X000, X11110, X01100011, X110000, nn, dd);
+         }
+         goto done;
+      }
+      case ARM64in_VUnaryD: {
+         /* 31        23 21     16 14    9 4
+            000,11110 01 1,0000 0,0 10000 n d  FMOV Dd, Dn (not handled)
+            ------------------- 0,1 ---------  FABS ------
+            ------------------- 1,0 ---------  FNEG ------
+            ------------------- 1,1 ---------  FSQRT -----
+         */
+         UInt dD  = dregEnc(i->ARM64in.VUnaryD.dst);
+         UInt dN  = dregEnc(i->ARM64in.VUnaryD.src);
+         UInt b16 = 2; /* impossible */
+         UInt b15 = 2; /* impossible */
+         switch (i->ARM64in.VUnaryD.op) {
+            case ARM64fpu_NEG:  b16 = 1; b15 = 0; break;
+            case ARM64fpu_SQRT: b16 = 1; b15 = 1; break;
+            case ARM64fpu_ABS:  b16 = 0; b15 = 1; break;
+            default: break;
+         }
+         if (b16 < 2 && b15 < 2) {
+            *p++ = X_3_8_5_6_5_5(X000, X11110011, (X0000 << 1) | b16,
+                                 (b15 << 5) | X10000, dN, dD);
+            goto done;
+         }
+         /* 
+            000, 11110 01 1,001 11,1 10000 n d  FRINTI Dd, Dm (round per FPCR)
+         */
+         if (i->ARM64in.VUnaryD.op == ARM64fpu_RINT) {
+           *p++ = X_3_8_5_6_5_5(X000, X11110011, X00111, X110000, dN, dD);
+           goto done;
+         }
+         /* 
+            010, 11110 11 1,0000 1,1111 10 n d  FRECPX Dd, Dm
+         */
+         if (i->ARM64in.VUnaryD.op == ARM64fpu_RECPX) {
+           *p++ = X_3_8_5_6_5_5(X010, X11110111, X00001, X111110, dN, dD);
+           goto done;
+         }
+         goto bad;
+      }
+      case ARM64in_VUnaryS: {
+         /* 31        23 21     16 14    9 4
+            000,11110 00 1,0000 0,0 10000 n d  FMOV Sd, Sn (not handled)
+            ------------------- 0,1 ---------  FABS ------
+            ------------------- 1,0 ---------  FNEG ------
+            ------------------- 1,1 ---------  FSQRT -----
+         */
+         UInt sD  = dregEnc(i->ARM64in.VUnaryS.dst);
+         UInt sN  = dregEnc(i->ARM64in.VUnaryS.src);
+         UInt b16 = 2; /* impossible */
+         UInt b15 = 2; /* impossible */
+         switch (i->ARM64in.VUnaryS.op) {
+            case ARM64fpu_NEG:  b16 = 1; b15 = 0; break;
+            case ARM64fpu_SQRT: b16 = 1; b15 = 1; break;
+            case ARM64fpu_ABS:  b16 = 0; b15 = 1; break;
+            default: break;
+         }
+         if (b16 < 2 && b15 < 2) {
+            *p++ = X_3_8_5_6_5_5(X000, X11110001, (X0000 << 1) | b16,
+                                 (b15 << 5) | X10000, sN, sD);
+            goto done;
+         }
+         /* 
+            000, 11110 00 1,001 11,1 10000 n d  FRINTI Sd, Sm (round per FPCR)
+         */
+         if (i->ARM64in.VUnaryS.op == ARM64fpu_RINT) {
+           *p++ = X_3_8_5_6_5_5(X000, X11110001, X00111, X110000, sN, sD);
+           goto done;
+         }
+         /* 
+            010, 11110 10 1,0000 1,1111 10 n d  FRECPX Sd, Sm
+         */
+         if (i->ARM64in.VUnaryS.op == ARM64fpu_RECPX) {
+           *p++ = X_3_8_5_6_5_5(X010, X11110101, X00001, X111110, sN, sD);
+           goto done;
+         }
+         goto bad;
+      }
+      case ARM64in_VBinD: {
+         /* 31        23  20 15   11 9 4
+            ---------------- 0000 ------   FMUL  --------
+            000 11110 011 m  0001 10 n d   FDIV  Dd,Dn,Dm
+            ---------------- 0010 ------   FADD  --------
+            ---------------- 0011 ------   FSUB  --------
+         */
+         UInt dD = dregEnc(i->ARM64in.VBinD.dst);
+         UInt dN = dregEnc(i->ARM64in.VBinD.argL);
+         UInt dM = dregEnc(i->ARM64in.VBinD.argR);
+         UInt b1512 = 16; /* impossible */
+         switch (i->ARM64in.VBinD.op) {
+            case ARM64fpb_DIV: b1512 = X0001; break;
+            case ARM64fpb_MUL: b1512 = X0000; break;
+            case ARM64fpb_SUB: b1512 = X0011; break;
+            case ARM64fpb_ADD: b1512 = X0010; break;
+            default: goto bad;
+         }
+         vassert(b1512 < 16);
+         *p++
+            = X_3_8_5_6_5_5(X000, X11110011, dM, (b1512 << 2) | X10, dN, dD);
+         goto done;
+      }
+      case ARM64in_VBinS: {
+         /* 31        23  20 15   11 9 4
+            ---------------- 0000 ------   FMUL  --------
+            000 11110 001 m  0001 10 n d   FDIV  Dd,Dn,Dm
+            ---------------- 0010 ------   FADD  --------
+            ---------------- 0011 ------   FSUB  --------
+         */
+         UInt sD = dregEnc(i->ARM64in.VBinS.dst);
+         UInt sN = dregEnc(i->ARM64in.VBinS.argL);
+         UInt sM = dregEnc(i->ARM64in.VBinS.argR);
+         UInt b1512 = 16; /* impossible */
+         switch (i->ARM64in.VBinS.op) {
+            case ARM64fpb_DIV: b1512 = X0001; break;
+            case ARM64fpb_MUL: b1512 = X0000; break;
+            case ARM64fpb_SUB: b1512 = X0011; break;
+            case ARM64fpb_ADD: b1512 = X0010; break;
+            default: goto bad;
+         }
+         vassert(b1512 < 16);
+         *p++
+            = X_3_8_5_6_5_5(X000, X11110001, sM, (b1512 << 2) | X10, sN, sD);
+         goto done;
+      }
+      case ARM64in_VCmpD: {
+         /* 000 11110 01 1 m 00 1000 n 00 000  FCMP Dn, Dm */
+         UInt dN = dregEnc(i->ARM64in.VCmpD.argL);
+         UInt dM = dregEnc(i->ARM64in.VCmpD.argR);
+         *p++ = X_3_8_5_6_5_5(X000, X11110011, dM, X001000, dN, X00000);
+         goto done;
+      }
+      case ARM64in_VCmpS: {
+         /* 000 11110 00 1 m 00 1000 n 00 000  FCMP Sn, Sm */
+         UInt sN = dregEnc(i->ARM64in.VCmpS.argL);
+         UInt sM = dregEnc(i->ARM64in.VCmpS.argR);
+         *p++ = X_3_8_5_6_5_5(X000, X11110001, sM, X001000, sN, X00000);
+         goto done;
+      }
+      case ARM64in_VFCSel: {
+         /* 31        23 21 20 15   11 9 5
+            000 11110 00 1  m  cond 11 n d  FCSEL Sd,Sn,Sm,cond
+            000 11110 01 1  m  cond 11 n d  FCSEL Dd,Dn,Dm,cond
+         */
+         Bool isD  = i->ARM64in.VFCSel.isD;
+         UInt dd   = dregEnc(i->ARM64in.VFCSel.dst);
+         UInt nn   = dregEnc(i->ARM64in.VFCSel.argL);
+         UInt mm   = dregEnc(i->ARM64in.VFCSel.argR);
+         UInt cond = (UInt)i->ARM64in.VFCSel.cond;
+         vassert(cond < 16);
+         *p++ = X_3_8_5_6_5_5(X000, isD ? X11110011 : X11110001,
+                              mm, (cond << 2) | X000011, nn, dd);
+         goto done; 
+      }
+      case ARM64in_FPCR: {
+         Bool toFPCR = i->ARM64in.FPCR.toFPCR;
+         UInt iReg   = iregEnc(i->ARM64in.FPCR.iReg);
+         if (toFPCR) {
+            /* 0xD51B44 000 Rt  MSR fpcr, rT */
+            *p++ = 0xD51B4400 | (iReg & 0x1F);
+            goto done;
+         }
+         goto bad; // FPCR -> iReg case currently ATC
+      }
+      case ARM64in_FPSR: {
+         Bool toFPSR = i->ARM64in.FPSR.toFPSR;
+         UInt iReg   = iregEnc(i->ARM64in.FPSR.iReg);
+         if (toFPSR) {
+            /* 0xD51B44 001 Rt  MSR fpsr, rT */
+            *p++ = 0xD51B4420 | (iReg & 0x1F);
+         } else {
+            /* 0xD53B44 001 Rt  MRS rT, fpsr */
+            *p++ = 0xD53B4420 | (iReg & 0x1F);
+         }
+         goto done;
+      }
+      case ARM64in_VBinV: {
+         /* 31        23   20 15     9 4
+            010 01110 11 1 m  100001 n d   ADD Vd.2d,  Vn.2d,  Vm.2d
+            010 01110 10 1 m  100001 n d   ADD Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  100001 n d   ADD Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 1 m  100001 n d   ADD Vd.16b, Vn.16b, Vm.16b
+
+            011 01110 11 1 m  100001 n d   SUB Vd.2d,  Vn.2d,  Vm.2d
+            011 01110 10 1 m  100001 n d   SUB Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 01 1 m  100001 n d   SUB Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 00 1 m  100001 n d   SUB Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 10 1 m  100111 n d   MUL Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  100111 n d   MUL Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 1 m  100111 n d   MUL Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 01 1 m  110101 n d   FADD Vd.2d, Vn.2d, Vm.2d
+            010 01110 00 1 m  110101 n d   FADD Vd.4s, Vn.4s, Vm.4s
+            010 01110 11 1 m  110101 n d   FSUB Vd.2d, Vn.2d, Vm.2d
+            010 01110 10 1 m  110101 n d   FSUB Vd.4s, Vn.4s, Vm.4s
+
+            011 01110 01 1 m  110111 n d   FMUL Vd.2d, Vn.2d, Vm.2d
+            011 01110 00 1 m  110111 n d   FMUL Vd.4s, Vn.4s, Vm.4s
+            011 01110 01 1 m  111111 n d   FDIV Vd.2d, Vn.2d, Vm.2d
+            011 01110 00 1 m  111111 n d   FDIV Vd.4s, Vn.4s, Vm.4s
+
+            010 01110 01 1 m  111101 n d   FMAX Vd.2d, Vn.2d, Vm.2d
+            010 01110 00 1 m  111101 n d   FMAX Vd.4s, Vn.4s, Vm.4s
+            010 01110 11 1 m  111101 n d   FMIN Vd.2d, Vn.2d, Vm.2d
+            010 01110 10 1 m  111101 n d   FMIN Vd.4s, Vn.4s, Vm.4s
+
+            011 01110 10 1 m  011001 n d   UMAX Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 01 1 m  011001 n d   UMAX Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 00 1 m  011001 n d   UMAX Vd.16b, Vn.16b, Vm.16b
+
+            011 01110 10 1 m  011011 n d   UMIN Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 01 1 m  011011 n d   UMIN Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 00 1 m  011011 n d   UMIN Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 10 1 m  011001 n d   SMAX Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  011001 n d   SMAX Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 1 m  011001 n d   SMAX Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 10 1 m  011011 n d   SMIN Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  011011 n d   SMIN Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 1 m  011011 n d   SMIN Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 00 1 m  000111 n d   AND Vd, Vn, Vm
+            010 01110 10 1 m  000111 n d   ORR Vd, Vn, Vm
+            011 01110 00 1 m  000111 n d   EOR Vd, Vn, Vm
+
+            011 01110 11 1 m  100011 n d   CMEQ Vd.2d,  Vn.2d,  Vm.2d
+            011 01110 10 1 m  100011 n d   CMEQ Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 01 1 m  100011 n d   CMEQ Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 00 1 m  100011 n d   CMEQ Vd.16b, Vn.16b, Vm.16b
+
+            011 01110 11 1 m  001101 n d   CMHI Vd.2d,  Vn.2d,  Vm.2d
+            011 01110 10 1 m  001101 n d   CMHI Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 01 1 m  001101 n d   CMHI Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 00 1 m  001101 n d   CMHI Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 11 1 m  001101 n d   CMGT Vd.2d,  Vn.2d,  Vm.2d
+            010 01110 10 1 m  001101 n d   CMGT Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  001101 n d   CMGT Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 1 m  001101 n d   CMGT Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 01 1 m  111001 n d   FCMEQ Vd.2d, Vn.2d, Vm.2d
+            010 01110 00 1 m  111001 n d   FCMEQ Vd.4s, Vn.4s, Vm.4s
+
+            011 01110 01 1 m  111001 n d   FCMGE Vd.2d, Vn.2d, Vm.2d
+            011 01110 00 1 m  111001 n d   FCMGE Vd.4s, Vn.4s, Vm.4s
+
+            011 01110 11 1 m  111001 n d   FCMGT Vd.2d, Vn.2d, Vm.2d
+            011 01110 10 1 m  111001 n d   FCMGT Vd.4s, Vn.4s, Vm.4s
+
+            010 01110 00 0 m  000000 n d   TBL Vd.16b, {Vn.16b}, Vm.16b
+
+            010 01110 11 0 m  000110 n d   UZP1 Vd.2d,  Vn.2d,  Vm.2d
+            010 01110 10 0 m  000110 n d   UZP1 Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 0 m  000110 n d   UZP1 Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 0 m  000110 n d   UZP1 Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 11 0 m  010110 n d   UZP2 Vd.2d,  Vn.2d,  Vm.2d
+            010 01110 10 0 m  010110 n d   UZP2 Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 0 m  010110 n d   UZP2 Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 0 m  010110 n d   UZP2 Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 10 0 m  001110 n d   ZIP1 Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 0 m  001110 n d   ZIP1 Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 10 0 m  001110 n d   ZIP1 Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 10 0 m  011110 n d   ZIP2 Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 0 m  011110 n d   ZIP2 Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 10 0 m  011110 n d   ZIP2 Vd.16b, Vn.16b, Vm.16b
+
+            011 01110 00 1 m  100111 n d   PMUL Vd.16b, Vn.16b, Vm.16b
+
+            000 01110 00 1 m  111000 n d   PMULL Vd.8h, Vn.8b, Vm.8b
+
+            001 01110 10 1 m  110000 n d   UMULL Vd.2d, Vn.2s, Vm.2s
+            001 01110 01 1 m  110000 n d   UMULL Vd.4s, Vn.4h, Vm.4h
+            001 01110 00 1 m  110000 n d   UMULL Vd.8h, Vn.8b, Vm.8b
+
+            000 01110 10 1 m  110000 n d   SMULL Vd.2d, Vn.2s, Vm.2s
+            000 01110 01 1 m  110000 n d   SMULL Vd.4s, Vn.4h, Vm.4h
+            000 01110 00 1 m  110000 n d   SMULL Vd.8h, Vn.8b, Vm.8b
+
+            010 01110 11 1 m  000011 n d   SQADD Vd.2d,  Vn.2d,  Vm.2d
+            010 01110 10 1 m  000011 n d   SQADD Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  000011 n d   SQADD Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 1 m  000011 n d   SQADD Vd.16b, Vn.16b, Vm.16b
+
+            011 01110 11 1 m  000011 n d   UQADD Vd.2d,  Vn.2d,  Vm.2d
+            011 01110 10 1 m  000011 n d   UQADD Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 01 1 m  000011 n d   UQADD Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 00 1 m  000011 n d   UQADD Vd.16b, Vn.16b, Vm.16b
+
+            010 01110 11 1 m  001011 n d   SQSUB Vd.2d,  Vn.2d,  Vm.2d
+            010 01110 10 1 m  001011 n d   SQSUB Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  001011 n d   SQSUB Vd.8h,  Vn.8h,  Vm.8h
+            010 01110 00 1 m  001011 n d   SQSUB Vd.16b, Vn.16b, Vm.16b
+
+            011 01110 11 1 m  001011 n d   UQSUB Vd.2d,  Vn.2d,  Vm.2d
+            011 01110 10 1 m  001011 n d   UQSUB Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 01 1 m  001011 n d   UQSUB Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 00 1 m  001011 n d   UQSUB Vd.16b, Vn.16b, Vm.16b
+
+            000 01110 10 1 m  110100 n d   SQDMULL Vd.2d, Vn.2s, Vm.2s
+            000 01110 01 1 m  110100 n d   SQDMULL Vd.4s, Vn.4h, Vm.4h
+
+            010 01110 10 1 m  101101 n d   SQDMULH   Vd.4s,  Vn.4s,  Vm.4s
+            010 01110 01 1 m  101101 n d   SQDMULH   Vd.8h,  Vn.8h,  Vm.8h
+            011 01110 10 1 m  101101 n d   SQRDMULH  Vd.4s,  Vn.4s,  Vm.4s
+            011 01110 10 1 m  101101 n d   SQRDMULH  Vd.8h,  Vn.8h,  Vm.8h
+
+            010 01110 sz 1 m  010011 n d   SQSHL@sz   Vd, Vn, Vm
+            010 01110 sz 1 m  010111 n d   SQRSHL@sz  Vd, Vn, Vm
+            011 01110 sz 1 m  010011 n d   UQSHL@sz   Vd, Vn, Vm
+            011 01110 sz 1 m  010111 n d   URQSHL@sz  Vd, Vn, Vm
+
+            010 01110 sz 1 m  010001 n d   SSHL@sz   Vd, Vn, Vm
+            010 01110 sz 1 m  010101 n d   SRSHL@sz  Vd, Vn, Vm
+            011 01110 sz 1 m  010001 n d   USHL@sz   Vd, Vn, Vm
+            011 01110 sz 1 m  010101 n d   URSHL@sz  Vd, Vn, Vm
+
+            010 01110 01 1 m  111111 n d   FRECPS  Vd.2d, Vn.2d, Vm.2d
+            010 01110 00 1 m  111111 n d   FRECPS  Vd.4s, Vn.4s, Vm.4s
+            010 01110 11 1 m  111111 n d   FRSQRTS Vd.2d, Vn.2d, Vm.2d
+            010 01110 10 1 m  111111 n d   FRSQRTS Vd.4s, Vn.4s, Vm.4s
+         */
+         UInt vD = qregEnc(i->ARM64in.VBinV.dst);
+         UInt vN = qregEnc(i->ARM64in.VBinV.argL);
+         UInt vM = qregEnc(i->ARM64in.VBinV.argR);
+         switch (i->ARM64in.VBinV.op) {
+            case ARM64vecb_ADD64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_ADD32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_ADD16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_ADD8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_SUB64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_SUB32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_SUB16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_SUB8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X100001, vN, vD);
+               break;
+            case ARM64vecb_MUL32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X100111, vN, vD);
+               break;
+            case ARM64vecb_MUL16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X100111, vN, vD);
+               break;
+            case ARM64vecb_MUL8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X100111, vN, vD);
+               break;
+            case ARM64vecb_FADD64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X110101, vN, vD);
+               break;
+            case ARM64vecb_FADD32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X110101, vN, vD);
+               break;
+            case ARM64vecb_FSUB64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X110101, vN, vD);
+               break;
+            case ARM64vecb_FSUB32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X110101, vN, vD);
+               break;
+            case ARM64vecb_FMUL64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X110111, vN, vD);
+               break;
+            case ARM64vecb_FMUL32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X110111, vN, vD);
+               break;
+            case ARM64vecb_FDIV64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X111111, vN, vD);
+               break;
+            case ARM64vecb_FDIV32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X111111, vN, vD);
+               break;
+
+            case ARM64vecb_FMAX64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X111101, vN, vD);
+               break;
+            case ARM64vecb_FMAX32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X111101, vN, vD);
+               break;
+            case ARM64vecb_FMIN64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X111101, vN, vD);
+               break;
+            case ARM64vecb_FMIN32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X111101, vN, vD);
+               break;
+
+            case ARM64vecb_UMAX32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X011001, vN, vD);
+               break;
+            case ARM64vecb_UMAX16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X011001, vN, vD);
+               break;
+            case ARM64vecb_UMAX8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X011001, vN, vD);
+               break;
+
+            case ARM64vecb_UMIN32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X011011, vN, vD);
+               break;
+            case ARM64vecb_UMIN16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X011011, vN, vD);
+               break;
+            case ARM64vecb_UMIN8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X011011, vN, vD);
+               break;
+
+            case ARM64vecb_SMAX32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X011001, vN, vD);
+               break;
+            case ARM64vecb_SMAX16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X011001, vN, vD);
+               break;
+            case ARM64vecb_SMAX8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X011001, vN, vD);
+               break;
+
+            case ARM64vecb_SMIN32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X011011, vN, vD);
+               break;
+            case ARM64vecb_SMIN16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X011011, vN, vD);
+               break;
+            case ARM64vecb_SMIN8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X011011, vN, vD);
+               break;
+
+            case ARM64vecb_AND:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X000111, vN, vD);
+               break;
+            case ARM64vecb_ORR:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X000111, vN, vD);
+               break;
+            case ARM64vecb_XOR:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X000111, vN, vD);
+               break;
+
+            case ARM64vecb_CMEQ64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X100011, vN, vD);
+               break;
+            case ARM64vecb_CMEQ32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X100011, vN, vD);
+               break;
+            case ARM64vecb_CMEQ16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X100011, vN, vD);
+               break;
+            case ARM64vecb_CMEQ8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X100011, vN, vD);
+               break;
+
+            case ARM64vecb_CMHI64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM,  X001101, vN, vD);
+               break;
+            case ARM64vecb_CMHI32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM,  X001101, vN, vD);
+               break;
+            case ARM64vecb_CMHI16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM,  X001101, vN, vD);
+               break;
+            case ARM64vecb_CMHI8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM,  X001101, vN, vD);
+               break;
+
+            case ARM64vecb_CMGT64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM,  X001101, vN, vD);
+               break;
+            case ARM64vecb_CMGT32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM,  X001101, vN, vD);
+               break;
+            case ARM64vecb_CMGT16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM,  X001101, vN, vD);
+               break;
+            case ARM64vecb_CMGT8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM,  X001101, vN, vD);
+               break;
+
+            case ARM64vecb_FCMEQ64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X111001, vN, vD);
+               break;
+            case ARM64vecb_FCMEQ32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X111001, vN, vD);
+               break;
+
+            case ARM64vecb_FCMGE64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X111001, vN, vD);
+               break;
+            case ARM64vecb_FCMGE32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X111001, vN, vD);
+               break;
+
+            case ARM64vecb_FCMGT64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X111001, vN, vD);
+               break;
+            case ARM64vecb_FCMGT32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X111001, vN, vD);
+               break;
+
+            case ARM64vecb_TBL1:
+               *p++ = X_3_8_5_6_5_5(X010, X01110000, vM, X000000, vN, vD);
+               break;
+
+            case ARM64vecb_UZP164x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110110, vM, X000110, vN, vD);
+               break;
+            case ARM64vecb_UZP132x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110100, vM, X000110, vN, vD);
+               break;
+            case ARM64vecb_UZP116x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110010, vM, X000110, vN, vD);
+               break;
+            case ARM64vecb_UZP18x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110000, vM, X000110, vN, vD);
+               break;
+
+            case ARM64vecb_UZP264x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110110, vM, X010110, vN, vD);
+               break;
+            case ARM64vecb_UZP232x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110100, vM, X010110, vN, vD);
+               break;
+            case ARM64vecb_UZP216x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110010, vM, X010110, vN, vD);
+               break;
+            case ARM64vecb_UZP28x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110000, vM, X010110, vN, vD);
+               break;
+
+            case ARM64vecb_ZIP132x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110100, vM, X001110, vN, vD);
+               break;
+            case ARM64vecb_ZIP116x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110010, vM, X001110, vN, vD);
+               break;
+            case ARM64vecb_ZIP18x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110000, vM, X001110, vN, vD);
+               break;
+
+            case ARM64vecb_ZIP232x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110100, vM, X011110, vN, vD);
+               break;
+            case ARM64vecb_ZIP216x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110010, vM, X011110, vN, vD);
+               break;
+            case ARM64vecb_ZIP28x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110000, vM, X011110, vN, vD);
+               break;
+
+            case ARM64vecb_PMUL8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X100111, vN, vD);
+               break;
+
+            case ARM64vecb_PMULL8x8:
+               *p++ = X_3_8_5_6_5_5(X000, X01110001, vM, X111000, vN, vD);
+               break;
+
+            case ARM64vecb_UMULL2DSS:
+               *p++ = X_3_8_5_6_5_5(X001, X01110101, vM, X110000, vN, vD);
+               break;
+            case ARM64vecb_UMULL4SHH:
+               *p++ = X_3_8_5_6_5_5(X001, X01110011, vM, X110000, vN, vD);
+               break;
+            case ARM64vecb_UMULL8HBB:
+               *p++ = X_3_8_5_6_5_5(X001, X01110001, vM, X110000, vN, vD);
+               break;
+
+            case ARM64vecb_SMULL2DSS:
+               *p++ = X_3_8_5_6_5_5(X000, X01110101, vM, X110000, vN, vD);
+               break;
+            case ARM64vecb_SMULL4SHH:
+               *p++ = X_3_8_5_6_5_5(X000, X01110011, vM, X110000, vN, vD);
+               break;
+            case ARM64vecb_SMULL8HBB:
+               *p++ = X_3_8_5_6_5_5(X000, X01110001, vM, X110000, vN, vD);
+               break;
+
+            case ARM64vecb_SQADD64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X000011, vN, vD);
+               break;
+            case ARM64vecb_SQADD32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X000011, vN, vD);
+               break;
+            case ARM64vecb_SQADD16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X000011, vN, vD);
+               break;
+            case ARM64vecb_SQADD8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X000011, vN, vD);
+               break;
+
+            case ARM64vecb_UQADD64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X000011, vN, vD);
+               break;
+            case ARM64vecb_UQADD32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X000011, vN, vD);
+               break;
+            case ARM64vecb_UQADD16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X000011, vN, vD);
+               break;
+            case ARM64vecb_UQADD8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X000011, vN, vD);
+               break;
+
+            case ARM64vecb_SQSUB64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X001011, vN, vD);
+               break;
+            case ARM64vecb_SQSUB32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X001011, vN, vD);
+               break;
+            case ARM64vecb_SQSUB16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X001011, vN, vD);
+               break;
+            case ARM64vecb_SQSUB8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X001011, vN, vD);
+               break;
+
+            case ARM64vecb_UQSUB64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X001011, vN, vD);
+               break;
+            case ARM64vecb_UQSUB32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X001011, vN, vD);
+               break;
+            case ARM64vecb_UQSUB16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X001011, vN, vD);
+               break;
+            case ARM64vecb_UQSUB8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X001011, vN, vD);
+               break;
+
+            case ARM64vecb_SQDMULL2DSS:
+               *p++ = X_3_8_5_6_5_5(X000, X01110101, vM, X110100, vN, vD);
+               break;
+            case ARM64vecb_SQDMULL4SHH:
+               *p++ = X_3_8_5_6_5_5(X000, X01110011, vM, X110100, vN, vD);
+               break;
+
+            case ARM64vecb_SQDMULH32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X101101, vN, vD);
+               break;
+            case ARM64vecb_SQDMULH16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X101101, vN, vD);
+               break;
+            case ARM64vecb_SQRDMULH32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X101101, vN, vD);
+               break;
+            case ARM64vecb_SQRDMULH16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X101101, vN, vD);
+               break;
+
+            case ARM64vecb_SQSHL64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X010011, vN, vD);
+               break;
+            case ARM64vecb_SQSHL32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X010011, vN, vD);
+               break;
+            case ARM64vecb_SQSHL16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X010011, vN, vD);
+               break;
+            case ARM64vecb_SQSHL8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X010011, vN, vD);
+               break;
+
+            case ARM64vecb_SQRSHL64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X010111, vN, vD);
+               break;
+            case ARM64vecb_SQRSHL32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X010111, vN, vD);
+               break;
+            case ARM64vecb_SQRSHL16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X010111, vN, vD);
+               break;
+            case ARM64vecb_SQRSHL8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X010111, vN, vD);
+               break;
+
+            case ARM64vecb_UQSHL64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X010011, vN, vD);
+               break;
+            case ARM64vecb_UQSHL32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X010011, vN, vD);
+               break;
+            case ARM64vecb_UQSHL16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X010011, vN, vD);
+               break;
+            case ARM64vecb_UQSHL8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X010011, vN, vD);
+               break;
+
+            case ARM64vecb_UQRSHL64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X010111, vN, vD);
+               break;
+            case ARM64vecb_UQRSHL32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X010111, vN, vD);
+               break;
+            case ARM64vecb_UQRSHL16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X010111, vN, vD);
+               break;
+            case ARM64vecb_UQRSHL8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X010111, vN, vD);
+               break;
+
+            case ARM64vecb_SSHL64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X010001, vN, vD);
+               break;
+            case ARM64vecb_SSHL32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X010001, vN, vD);
+               break;
+            case ARM64vecb_SSHL16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X010001, vN, vD);
+               break;
+            case ARM64vecb_SSHL8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X010001, vN, vD);
+               break;
+
+            case ARM64vecb_SRSHL64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X010101, vN, vD);
+               break;
+            case ARM64vecb_SRSHL32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X010101, vN, vD);
+               break;
+            case ARM64vecb_SRSHL16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X010101, vN, vD);
+               break;
+            case ARM64vecb_SRSHL8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X010101, vN, vD);
+               break;
+
+            case ARM64vecb_USHL64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X010001, vN, vD);
+               break;
+            case ARM64vecb_USHL32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X010001, vN, vD);
+               break;
+            case ARM64vecb_USHL16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X010001, vN, vD);
+               break;
+            case ARM64vecb_USHL8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X010001, vN, vD);
+               break;
+
+            case ARM64vecb_URSHL64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, vM, X010101, vN, vD);
+               break;
+            case ARM64vecb_URSHL32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, vM, X010101, vN, vD);
+               break;
+            case ARM64vecb_URSHL16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, vM, X010101, vN, vD);
+               break;
+            case ARM64vecb_URSHL8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, vM, X010101, vN, vD);
+               break;
+
+            case ARM64vecb_FRECPS64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, vM, X111111, vN, vD);
+               break;
+            case ARM64vecb_FRECPS32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, vM, X111111, vN, vD);
+               break;
+            case ARM64vecb_FRSQRTS64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, vM, X111111, vN, vD);
+               break;
+            case ARM64vecb_FRSQRTS32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, vM, X111111, vN, vD);
+               break;
+
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+      case ARM64in_VModifyV: {
+         /* 31        23   20    15     9 4
+            010 01110 sz 1 00000 001110 n d   SUQADD@sz  Vd, Vn
+            011 01110 sz 1 00000 001110 n d   USQADD@sz  Vd, Vn
+         */
+         UInt vD = qregEnc(i->ARM64in.VModifyV.mod);
+         UInt vN = qregEnc(i->ARM64in.VModifyV.arg);
+         switch (i->ARM64in.VModifyV.op) {
+            case ARM64vecmo_SUQADD64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, X00000, X001110, vN, vD);
+               break;
+            case ARM64vecmo_SUQADD32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, X00000, X001110, vN, vD);
+               break;
+            case ARM64vecmo_SUQADD16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, X00000, X001110, vN, vD);
+               break;
+            case ARM64vecmo_SUQADD8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, X00000, X001110, vN, vD);
+               break;
+            case ARM64vecmo_USQADD64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, X00000, X001110, vN, vD);
+               break;
+            case ARM64vecmo_USQADD32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, X00000, X001110, vN, vD);
+               break;
+            case ARM64vecmo_USQADD16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, X00000, X001110, vN, vD);
+               break;
+            case ARM64vecmo_USQADD8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, X00000, X001110, vN, vD);
+               break;
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+      case ARM64in_VUnaryV: {
+         /* 31        23   20    15     9 4
+            010 01110 11 1 00000 111110 n d  FABS Vd.2d,  Vn.2d
+            010 01110 10 1 00000 111110 n d  FABS Vd.4s,  Vn.4s
+            011 01110 11 1 00000 111110 n d  FNEG Vd.2d,  Vn.2d
+            011 01110 10 1 00000 111110 n d  FNEG Vd.4s,  Vn.4s
+            011 01110 00 1 00000 010110 n d  NOT  Vd.16b, Vn.16b
+
+            010 01110 11 1 00000 101110 n d  ABS  Vd.2d,  Vn.2d
+            010 01110 10 1 00000 101110 n d  ABS  Vd.4s,  Vn.4s
+            010 01110 01 1 00000 101110 n d  ABS  Vd.8h,  Vn.8h
+            010 01110 00 1 00000 101110 n d  ABS  Vd.16b, Vn.16b
+
+            010 01110 10 1 00000 010010 n d  CLS  Vd.4s,  Vn.4s
+            010 01110 01 1 00000 010010 n d  CLS  Vd.8h,  Vn.8h
+            010 01110 00 1 00000 010010 n d  CLS  Vd.16b, Vn.16b
+
+            011 01110 10 1 00000 010010 n d  CLZ  Vd.4s,  Vn.4s
+            011 01110 01 1 00000 010010 n d  CLZ  Vd.8h,  Vn.8h
+            011 01110 00 1 00000 010010 n d  CLZ  Vd.16b, Vn.16b
+
+            010 01110 00 1 00000 010110 n d  CNT  Vd.16b, Vn.16b
+
+            011 01110 01 1 00000 010110 n d  RBIT  Vd.16b, Vn.16b
+            010 01110 00 1 00000 000110 n d  REV16 Vd.16b, Vn.16b
+            011 01110 00 1 00000 000010 n d  REV32 Vd.16b, Vn.16b
+            011 01110 01 1 00000 000010 n d  REV32 Vd.8h, Vn.8h
+
+            010 01110 00 1 00000 000010 n d  REV64 Vd.16b, Vn.16b
+            010 01110 01 1 00000 000010 n d  REV64 Vd.8h, Vn.8h
+            010 01110 10 1 00000 000010 n d  REV64 Vd.4s, Vn.4s
+
+            010 01110 10 1 00001 110010 n d  URECPE Vd.4s, Vn.4s
+            011 01110 10 1 00001 110010 n d  URSQRTE Vd.4s, Vn.4s
+
+            010 01110 11 1 00001 110110 n d  FRECPE Vd.2d, Vn.2d
+            010 01110 10 1 00001 110110 n d  FRECPE Vd.4s, Vn.4s
+
+            011 01110 11 1 00001 110110 n d  FRECPE Vd.2d, Vn.2d
+            011 01110 10 1 00001 110110 n d  FRECPE Vd.4s, Vn.4s
+
+            011 01110 11 1 00001 111110 n d  FSQRT Vd.2d, Vn.2d
+            011 01110 10 1 00001 111110 n d  FSQRT Vd.4s, Vn.4s
+         */
+         UInt vD = qregEnc(i->ARM64in.VUnaryV.dst);
+         UInt vN = qregEnc(i->ARM64in.VUnaryV.arg);
+         switch (i->ARM64in.VUnaryV.op) {
+            case ARM64vecu_FABS64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, X00000, X111110, vN, vD);
+               break;
+            case ARM64vecu_FABS32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, X00000, X111110, vN, vD);
+               break;
+            case ARM64vecu_FNEG64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, X00000, X111110, vN, vD);
+               break;
+            case ARM64vecu_FNEG32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, X00000, X111110, vN, vD);
+               break;
+            case ARM64vecu_NOT:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, X00000, X010110, vN, vD);
+               break;
+            case ARM64vecu_ABS64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, X00000, X101110, vN, vD);
+               break;
+            case ARM64vecu_ABS32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, X00000, X101110, vN, vD);
+               break;
+            case ARM64vecu_ABS16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, X00000, X101110, vN, vD);
+               break;
+            case ARM64vecu_ABS8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, X00000, X101110, vN, vD);
+               break;
+            case ARM64vecu_CLS32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, X00000, X010010, vN, vD);
+               break;
+            case ARM64vecu_CLS16x8:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, X00000, X010010, vN, vD);
+               break;
+            case ARM64vecu_CLS8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, X00000, X010010, vN, vD);
+               break;
+            case ARM64vecu_CLZ32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, X00000, X010010, vN, vD);
+               break;
+            case ARM64vecu_CLZ16x8:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, X00000, X010010, vN, vD);
+               break;
+            case ARM64vecu_CLZ8x16:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, X00000, X010010, vN, vD);
+               break;
+            case ARM64vecu_CNT8x16:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, X00000, X010110, vN, vD);
+               break;
+            case ARM64vecu_RBIT:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, X00000, X010110, vN, vD);
+               break;
+            case ARM64vecu_REV1616B:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, X00000, X000110, vN, vD);
+               break;
+            case ARM64vecu_REV3216B:
+               *p++ = X_3_8_5_6_5_5(X011, X01110001, X00000, X000010, vN, vD);
+               break;
+            case ARM64vecu_REV328H:
+               *p++ = X_3_8_5_6_5_5(X011, X01110011, X00000, X000010, vN, vD);
+               break;
+            case ARM64vecu_REV6416B:
+               *p++ = X_3_8_5_6_5_5(X010, X01110001, X00000, X000010, vN, vD);
+               break;
+            case ARM64vecu_REV648H:
+               *p++ = X_3_8_5_6_5_5(X010, X01110011, X00000, X000010, vN, vD);
+               break;
+            case ARM64vecu_REV644S:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, X00000, X000010, vN, vD);
+               break;
+            case ARM64vecu_URECPE32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, X00001, X110010, vN, vD);
+               break;
+            case ARM64vecu_URSQRTE32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, X00001, X110010, vN, vD);
+               break;
+            case ARM64vecu_FRECPE64x2:
+               *p++ = X_3_8_5_6_5_5(X010, X01110111, X00001, X110110, vN, vD);
+               break;
+            case ARM64vecu_FRECPE32x4:
+               *p++ = X_3_8_5_6_5_5(X010, X01110101, X00001, X110110, vN, vD);
+               break;
+            case ARM64vecu_FRSQRTE64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, X00001, X110110, vN, vD);
+               break;
+            case ARM64vecu_FRSQRTE32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, X00001, X110110, vN, vD);
+               break;
+            case ARM64vecu_FSQRT64x2:
+               *p++ = X_3_8_5_6_5_5(X011, X01110111, X00001, X111110, vN, vD);
+               break;
+            case ARM64vecu_FSQRT32x4:
+               *p++ = X_3_8_5_6_5_5(X011, X01110101, X00001, X111110, vN, vD);
+               break;
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+      case ARM64in_VNarrowV: {
+         /* 31        23 21      15     9 4
+            000 01110 00 1,00001 001010 n d  XTN Vd.8b, Vn.8h
+            000 01110 01 1,00001 001010 n d  XTN Vd.4h, Vn.4s
+            000 01110 10 1,00001 001010 n d  XTN Vd.2s, Vn.2d
+
+            001 01110 00 1,00001 001010 n d  SQXTUN Vd.8b, Vn.8h
+            001 01110 01 1,00001 001010 n d  SQXTUN Vd.4h, Vn.4s
+            001 01110 10 1,00001 001010 n d  SQXTUN Vd.2s, Vn.2d
+
+            000 01110 00 1,00001 010010 n d  SQXTN Vd.8b, Vn.8h
+            000 01110 01 1,00001 010010 n d  SQXTN Vd.4h, Vn.4s
+            000 01110 10 1,00001 010010 n d  SQXTN Vd.2s, Vn.2d
+
+            001 01110 00 1,00001 010010 n d  UQXTN Vd.8b, Vn.8h
+            001 01110 01 1,00001 010010 n d  UQXTN Vd.4h, Vn.4s
+            001 01110 10 1,00001 010010 n d  UQXTN Vd.2s, Vn.2d
+         */
+         UInt vD = qregEnc(i->ARM64in.VNarrowV.dst);
+         UInt vN = qregEnc(i->ARM64in.VNarrowV.src);
+         UInt dszBlg2 = i->ARM64in.VNarrowV.dszBlg2;
+         vassert(dszBlg2 >= 0 && dszBlg2 <= 2);
+         switch (i->ARM64in.VNarrowV.op) {
+            case ARM64vecna_XTN:
+               *p++ = X_3_8_5_6_5_5(X000, X01110001 | (dszBlg2 << 1),
+                                    X00001, X001010, vN, vD);
+               goto done;
+            case ARM64vecna_SQXTUN:
+               *p++ = X_3_8_5_6_5_5(X001, X01110001 | (dszBlg2 << 1),
+                                    X00001, X001010, vN, vD);
+               goto done;
+            case ARM64vecna_SQXTN:
+               *p++ = X_3_8_5_6_5_5(X000, X01110001 | (dszBlg2 << 1),
+                                    X00001, X010010, vN, vD);
+               goto done;
+            case ARM64vecna_UQXTN:
+               *p++ = X_3_8_5_6_5_5(X001, X01110001 | (dszBlg2 << 1),
+                                    X00001, X010010, vN, vD);
+               goto done;
+            default:
+               break;
+         }
+         goto bad;
+      }
+      case ARM64in_VShiftImmV: {
+         /*
+            011 011110 immh immb 000001 n d  USHR     Vd.T, Vn.T, #sh
+            010 011110 immh immb 000001 n d  SSHR     Vd.T, Vn.T, #sh
+
+            001 011110 immh immb 100101 n d  UQSHRN   ,,#sh
+            000 011110 immh immb 100101 n d  SQSHRN   ,,#sh
+            001 011110 immh immb 100001 n d  SQSHRUN  ,,#sh
+
+            001 011110 immh immb 100111 n d  UQRSHRN  ,,#sh
+            000 011110 immh immb 100111 n d  SQRSHRN  ,,#sh
+            001 011110 immh immb 100011 n d  SQRSHRUN ,,#sh
+
+            where immh:immb
+               = case T of 
+                    2d  | sh in 1..64 -> let xxxxxx = 64-sh in 1xxx:xxx
+                    4s  | sh in 1..32 -> let  xxxxx = 32-sh in 01xx:xxx
+                    8h  | sh in 1..16 -> let   xxxx = 16-sh in 001x:xxx
+                    16b | sh in 1..8  -> let    xxx =  8-sh in 0001:xxx
+
+            010 011110 immh immb 010101 n d  SHL    Vd.T, Vn.T, #sh
+
+            011 011110 immh immb 011101 n d  UQSHL  Vd.T, Vn.T, #sh
+            010 011110 immh immb 011101 n d  SQSHL  Vd.T, Vn.T, #sh
+            011 011110 immh immb 011001 n d  SQSHLU Vd.T, Vn.T, #sh
+
+            where immh:immb
+               = case T of 
+                    2d  | sh in 0..63 -> let xxxxxx = sh in 1xxx:xxx
+                    4s  | sh in 0..31 -> let  xxxxx = sh in 01xx:xxx
+                    8h  | sh in 0..15 -> let   xxxx = sh in 001x:xxx
+                    16b | sh in 0..7  -> let    xxx = sh in 0001:xxx
+         */
+         UInt vD   = qregEnc(i->ARM64in.VShiftImmV.dst);
+         UInt vN   = qregEnc(i->ARM64in.VShiftImmV.src);
+         UInt sh   = i->ARM64in.VShiftImmV.amt;
+         UInt tmpl = 0; /* invalid */
+
+         const UInt tmpl_USHR
+            = X_3_6_7_6_5_5(X011, X011110, 0, X000001, vN, vD);
+         const UInt tmpl_SSHR
+            = X_3_6_7_6_5_5(X010, X011110, 0, X000001, vN, vD);
+
+         const UInt tmpl_UQSHRN
+            = X_3_6_7_6_5_5(X001, X011110, 0, X100101, vN, vD);
+         const UInt tmpl_SQSHRN
+            = X_3_6_7_6_5_5(X000, X011110, 0, X100101, vN, vD);
+         const UInt tmpl_SQSHRUN
+            = X_3_6_7_6_5_5(X001, X011110, 0, X100001, vN, vD);
+
+         const UInt tmpl_UQRSHRN
+            = X_3_6_7_6_5_5(X001, X011110, 0, X100111, vN, vD);
+         const UInt tmpl_SQRSHRN
+            = X_3_6_7_6_5_5(X000, X011110, 0, X100111, vN, vD);
+         const UInt tmpl_SQRSHRUN
+            = X_3_6_7_6_5_5(X001, X011110, 0, X100011, vN, vD);
+
+         const UInt tmpl_SHL
+            = X_3_6_7_6_5_5(X010, X011110, 0, X010101, vN, vD);
+
+         const UInt tmpl_UQSHL
+            = X_3_6_7_6_5_5(X011, X011110, 0, X011101, vN, vD);
+         const UInt tmpl_SQSHL
+            = X_3_6_7_6_5_5(X010, X011110, 0, X011101, vN, vD);
+         const UInt tmpl_SQSHLU
+            = X_3_6_7_6_5_5(X011, X011110, 0, X011001, vN, vD);
+
+         switch (i->ARM64in.VShiftImmV.op) {
+            case ARM64vecshi_SSHR64x2:    tmpl = tmpl_SSHR;     goto right64x2;
+            case ARM64vecshi_USHR64x2:    tmpl = tmpl_USHR;     goto right64x2;
+            case ARM64vecshi_SHL64x2:     tmpl = tmpl_SHL;      goto left64x2;
+            case ARM64vecshi_UQSHL64x2:   tmpl = tmpl_UQSHL;    goto left64x2;
+            case ARM64vecshi_SQSHL64x2:   tmpl = tmpl_SQSHL;    goto left64x2;
+            case ARM64vecshi_SQSHLU64x2:  tmpl = tmpl_SQSHLU;   goto left64x2;
+            case ARM64vecshi_SSHR32x4:    tmpl = tmpl_SSHR;     goto right32x4;
+            case ARM64vecshi_USHR32x4:    tmpl = tmpl_USHR;     goto right32x4;
+            case ARM64vecshi_UQSHRN2SD:   tmpl = tmpl_UQSHRN;   goto right32x4;
+            case ARM64vecshi_SQSHRN2SD:   tmpl = tmpl_SQSHRN;   goto right32x4;
+            case ARM64vecshi_SQSHRUN2SD:  tmpl = tmpl_SQSHRUN;  goto right32x4;
+            case ARM64vecshi_UQRSHRN2SD:  tmpl = tmpl_UQRSHRN;  goto right32x4;
+            case ARM64vecshi_SQRSHRN2SD:  tmpl = tmpl_SQRSHRN;  goto right32x4;
+            case ARM64vecshi_SQRSHRUN2SD: tmpl = tmpl_SQRSHRUN; goto right32x4;
+            case ARM64vecshi_SHL32x4:     tmpl = tmpl_SHL;      goto left32x4;
+            case ARM64vecshi_UQSHL32x4:   tmpl = tmpl_UQSHL;    goto left32x4;
+            case ARM64vecshi_SQSHL32x4:   tmpl = tmpl_SQSHL;    goto left32x4;
+            case ARM64vecshi_SQSHLU32x4:  tmpl = tmpl_SQSHLU;   goto left32x4;
+            case ARM64vecshi_SSHR16x8:    tmpl = tmpl_SSHR;     goto right16x8;
+            case ARM64vecshi_USHR16x8:    tmpl = tmpl_USHR;     goto right16x8;
+            case ARM64vecshi_UQSHRN4HS:   tmpl = tmpl_UQSHRN;   goto right16x8;
+            case ARM64vecshi_SQSHRN4HS:   tmpl = tmpl_SQSHRN;   goto right16x8;
+            case ARM64vecshi_SQSHRUN4HS:  tmpl = tmpl_SQSHRUN;  goto right16x8;
+            case ARM64vecshi_UQRSHRN4HS:  tmpl = tmpl_UQRSHRN;  goto right16x8;
+            case ARM64vecshi_SQRSHRN4HS:  tmpl = tmpl_SQRSHRN;  goto right16x8;
+            case ARM64vecshi_SQRSHRUN4HS: tmpl = tmpl_SQRSHRUN; goto right16x8;
+            case ARM64vecshi_SHL16x8:     tmpl = tmpl_SHL;      goto left16x8;
+            case ARM64vecshi_UQSHL16x8:   tmpl = tmpl_UQSHL;    goto left16x8;
+            case ARM64vecshi_SQSHL16x8:   tmpl = tmpl_SQSHL;    goto left16x8;
+            case ARM64vecshi_SQSHLU16x8:  tmpl = tmpl_SQSHLU;   goto left16x8;
+            case ARM64vecshi_SSHR8x16:    tmpl = tmpl_SSHR;     goto right8x16;
+            case ARM64vecshi_USHR8x16:    tmpl = tmpl_USHR;     goto right8x16;
+            case ARM64vecshi_UQSHRN8BH:   tmpl = tmpl_UQSHRN;   goto right8x16;
+            case ARM64vecshi_SQSHRN8BH:   tmpl = tmpl_SQSHRN;   goto right8x16;
+            case ARM64vecshi_SQSHRUN8BH:  tmpl = tmpl_SQSHRUN;  goto right8x16;
+            case ARM64vecshi_UQRSHRN8BH:  tmpl = tmpl_UQRSHRN;  goto right8x16;
+            case ARM64vecshi_SQRSHRN8BH:  tmpl = tmpl_SQRSHRN;  goto right8x16;
+            case ARM64vecshi_SQRSHRUN8BH: tmpl = tmpl_SQRSHRUN; goto right8x16;
+            case ARM64vecshi_SHL8x16:     tmpl = tmpl_SHL;      goto left8x16;
+            case ARM64vecshi_UQSHL8x16:   tmpl = tmpl_UQSHL;    goto left8x16;
+            case ARM64vecshi_SQSHL8x16:   tmpl = tmpl_SQSHL;    goto left8x16;
+            case ARM64vecshi_SQSHLU8x16:  tmpl = tmpl_SQSHLU;   goto left8x16;
+
+            default: break;
+
+            right64x2:
+               if (sh >= 1 && sh <= 63) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X1000000 | (64-sh), 0,0,0);
+                  goto done;
+               }
+               break;
+            right32x4:
+               if (sh >= 1 && sh <= 32) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X0100000 | (32-sh), 0,0,0);
+                  goto done;
+               }
+               break;
+            right16x8:
+               if (sh >= 1 && sh <= 16) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X0010000 | (16-sh), 0,0,0);
+                  goto done;
+               }
+               break;
+            right8x16:
+               if (sh >= 1 && sh <= 8) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X0001000 | (8-sh), 0,0,0);
+                  goto done;
+               }
+               break;
+
+            left64x2:
+               if (sh >= 0 && sh <= 63) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X1000000 | sh, 0,0,0);
+                  goto done;
+               }
+               break;
+            left32x4:
+               if (sh >= 0 && sh <= 31) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X0100000 | sh, 0,0,0);
+                  goto done;
+               }
+               break;
+            left16x8:
+               if (sh >= 0 && sh <= 15) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X0010000 | sh, 0,0,0);
+                  goto done;
+               }
+               break;
+            left8x16:
+               if (sh >= 0 && sh <= 7) {
+                  *p++ = tmpl | X_3_6_7_6_5_5(0,0, X0001000 | sh, 0,0,0);
+                  goto done;
+               }
+               break;
+         }
+         goto bad;
+      }
+      case ARM64in_VExtV: {
+         /*
+            011 01110 000 m 0 imm4 0 n d  EXT Vd.16b, Vn.16b, Vm.16b, #imm4
+            where imm4 = the shift amount, in bytes,
+                  Vn is low operand, Vm is high operand
+         */
+         UInt vD   = qregEnc(i->ARM64in.VExtV.dst);
+         UInt vN   = qregEnc(i->ARM64in.VExtV.srcLo);
+         UInt vM   = qregEnc(i->ARM64in.VExtV.srcHi);
+         UInt imm4 = i->ARM64in.VExtV.amtB;
+         vassert(imm4 >= 1 && imm4 <= 15);
+         *p++ = X_3_8_5_6_5_5(X011, X01110000, vM,
+                              X000000 | (imm4 << 1), vN, vD);
+         goto done;
+      }
+      case ARM64in_VImmQ: {
+         UInt   rQ  = qregEnc(i->ARM64in.VImmQ.rQ);
+         UShort imm = i->ARM64in.VImmQ.imm;
+         vassert(rQ < 32);
+         switch (imm) {
+            case 0x0000:
+               // movi rQ.4s, #0x0 == 0x4F 0x00 0x04 000 rQ
+               *p++ = 0x4F000400 | rQ;
+               goto done;
+            case 0x0001:
+               // movi rQ, #0xFF == 0x2F 0x00 0xE4 001 rQ
+               *p++ = 0x2F00E420 | rQ;
+               goto done;
+            case 0x0003:
+               // movi rQ, #0xFFFF == 0x2F 0x00 0xE4 011 rQ
+               *p++ = 0x2F00E460 | rQ;
+               goto done;
+            case 0x000F:
+               // movi rQ, #0xFFFFFFFF == 0x2F 0x00 0xE5 111 rQ
+               *p++ = 0x2F00E5E0 | rQ;
+               goto done;
+            case 0x003F:
+               // movi rQ, #0xFFFFFFFFFFFF == 0x2F 0x01 0xE7 111 rQ
+               *p++ = 0x2F01E7E0 | rQ;
+               goto done;
+            case 0x00FF:
+               // movi rQ, #0xFFFFFFFFFFFFFFFF == 0x2F 0x07 0xE7 111 rQ
+               *p++ = 0x2F07E7E0 | rQ;
+               goto done;
+            case 0xFFFF:
+               // mvni rQ.4s, #0x0 == 0x6F 0x00 0x04 000 rQ
+               *p++ = 0x6F000400 | rQ;
+               goto done;
+            default:
+               break;
+         }
+         goto bad; /* no other handled cases right now */
+      }
+
+      case ARM64in_VDfromX: {
+         /* INS Vd.D[0], rX
+            0100 1110 0000 1000 0001 11 nn dd   INS Vd.D[0], Xn
+            This isn't wonderful, in the sense that the upper half of
+            the vector register stays unchanged and thus the insn is
+            data dependent on its output register. */
+         UInt dd = dregEnc(i->ARM64in.VDfromX.rD);
+         UInt xx = iregEnc(i->ARM64in.VDfromX.rX);
+         vassert(xx < 31);
+         *p++ = 0x4E081C00 | X_2_6_2_12_5_5(0,0,0,0,xx,dd);
+         goto done;
+      }
+
+      case ARM64in_VQfromX: {
+         /* FMOV D, X
+            1001 1110 0110 0111 0000 00 nn dd   FMOV Vd.D[0], Xn
+            I think this zeroes out the top half of the destination, which
+            is what we need.  TODO: can we do VDfromX and VQfromXX better? */
+         UInt dd = qregEnc(i->ARM64in.VQfromX.rQ);
+         UInt xx = iregEnc(i->ARM64in.VQfromX.rXlo);
+         vassert(xx < 31);
+         *p++ = 0x9E670000 | X_2_6_2_12_5_5(0,0,0,0,xx,dd);
+         goto done;
+      }
+
+      case ARM64in_VQfromXX: {
+         /* What we really generate is a two insn sequence:
+               INS Vd.D[0], Xlo; INS Vd.D[1], Xhi
+            0100 1110 0000 1000 0001 11 nn dd   INS Vd.D[0], Xn
+            0100 1110 0001 1000 0001 11 nn dd   INS Vd.D[1], Xn
+         */
+         UInt qq  = qregEnc(i->ARM64in.VQfromXX.rQ);
+         UInt xhi = iregEnc(i->ARM64in.VQfromXX.rXhi);
+         UInt xlo = iregEnc(i->ARM64in.VQfromXX.rXlo);
+         vassert(xhi < 31 && xlo < 31);
+         *p++ = 0x4E081C00 | X_2_6_2_12_5_5(0,0,0,0,xlo,qq);
+         *p++ = 0x4E181C00 | X_2_6_2_12_5_5(0,0,0,0,xhi,qq);
+         goto done;
+      }
+
+      case ARM64in_VXfromQ: {
+         /* 010 0111 0000 01000 001111 nn dd  UMOV Xd, Vn.D[0]
+            010 0111 0000 11000 001111 nn dd  UMOV Xd, Vn.D[1]
+         */
+         UInt dd     = iregEnc(i->ARM64in.VXfromQ.rX);
+         UInt nn     = qregEnc(i->ARM64in.VXfromQ.rQ);
+         UInt laneNo = i->ARM64in.VXfromQ.laneNo;
+         vassert(dd < 31);
+         vassert(laneNo < 2);
+         *p++ = X_3_8_5_6_5_5(X010, X01110000,
+                              laneNo == 1 ? X11000 : X01000, X001111, nn, dd);
+         goto done;
+      }
+
+      case ARM64in_VXfromDorS: {
+         /* 000 11110001 00110 000000 n d     FMOV Wd, Sn
+            100 11110011 00110 000000 n d     FMOV Xd, Dn
+         */
+         UInt dd    = iregEnc(i->ARM64in.VXfromDorS.rX);
+         UInt nn    = dregEnc(i->ARM64in.VXfromDorS.rDorS);
+         Bool fromD = i->ARM64in.VXfromDorS.fromD;
+         vassert(dd < 31);
+         *p++ = X_3_8_5_6_5_5(fromD ? X100 : X000,
+                              fromD ? X11110011 : X11110001,
+                              X00110, X000000, nn, dd);
+         goto done;
+      }
+
+      case ARM64in_VMov: {
+         /* 000 11110 00 10000 00 10000 n d   FMOV Sd, Sn
+            000 11110 01 10000 00 10000 n d   FMOV Dd, Dn
+            010 01110 10 1 n    0 00111 n d   MOV Vd.16b, Vn.16b
+         */
+        HReg rD = i->ARM64in.VMov.dst;
+        HReg rN = i->ARM64in.VMov.src;
+        switch (i->ARM64in.VMov.szB) {
+           case 16: {
+              UInt dd = qregEnc(rD);
+              UInt nn = qregEnc(rN);
+              *p++ = X_3_8_5_6_5_5(X010, X01110101, nn, X000111, nn, dd);
+              goto done;
+           }
+           case 8: {
+              UInt dd = dregEnc(rD);
+              UInt nn = dregEnc(rN);
+              *p++ = X_3_8_5_6_5_5(X000, X11110011, X00000, X010000, nn, dd);
+              goto done;
+           }
+           default: 
+              break;
+        }
+        goto bad;
+      }
+
+      case ARM64in_EvCheck: {
+         /* The sequence is fixed (canned) except for the two amodes
+            supplied by the insn.  These don't change the length, though.
+            We generate:
+               ldr  w9, [x21 + #8]   8 == offsetof(host_EvC_COUNTER)
+               subs w9, w9, #1
+               str  w9, [x21 + #8]   8 == offsetof(host_EvC_COUNTER)
+               bpl  nofail
+               ldr  x9, [x21 + #0]   0 == offsetof(host_EvC_FAILADDR)
+               br   x9
+              nofail:
+         */
+         UInt* p0 = p;
+         p = do_load_or_store32(p, True/*isLoad*/, /*w*/9,
+                                i->ARM64in.EvCheck.amCounter);
+         *p++ = 0x71000529; /* subs w9, w9, #1 */
+         p = do_load_or_store32(p, False/*!isLoad*/, /*w*/9,
+                                i->ARM64in.EvCheck.amCounter);
+         *p++ = 0x54000065; /* bpl nofail */
+         p = do_load_or_store64(p, True/*isLoad*/, /*x*/9,
+                                i->ARM64in.EvCheck.amFailAddr);
+         *p++ = 0xD61F0120; /* br x9 */
+         /* nofail: */
+
+         /* Crosscheck */
+         vassert(evCheckSzB_ARM64() == (UChar*)p - (UChar*)p0);
+         goto done;
+      }
+
+      case ARM64in_ProfInc: {
+         /* We generate:
+              (ctrP is unknown now, so use 0x6555'7555'8555'9566 in the
+              expectation that a later call to LibVEX_patchProfCtr
+              will be used to fill in the immediate fields once the
+              right value is known.)
+            imm64-exactly4 x9, 0x6555'7555'8555'9566
+            ldr  x8, [x9]
+            add  x8, x8, #1
+            str  x8, [x9]
+         */
+         p = imm64_to_ireg_EXACTLY4(p, /*x*/9, 0x6555755585559566ULL);
+         *p++ = 0xF9400128;
+         *p++ = 0x91000508;
+         *p++ = 0xF9000128;
+         /* Tell the caller .. */
+         vassert(!(*is_profInc));
+         *is_profInc = True;
+         goto done;
+      }
+
+      /* ... */
+      default: 
+         goto bad;
+    }
+
+  bad:
+   ppARM64Instr(i);
+   vpanic("emit_ARM64Instr");
+   /*NOTREACHED*/
+
+  done:
+   vassert(((UChar*)p) - &buf[0] <= 36);
+   return ((UChar*)p) - &buf[0];
+}
+
+
+/* How big is an event check?  See case for ARM64in_EvCheck in
+   emit_ARM64Instr just above.  That crosschecks what this returns, so
+   we can tell if we're inconsistent. */
+Int evCheckSzB_ARM64 (void)
+{
+   return 24;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+                                   void* place_to_chain,
+                                   const void* disp_cp_chain_me_EXPECTED,
+                                   const void* place_to_jump_to )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is:
+        movw x9, disp_cp_chain_me_to_EXPECTED[15:0]
+        movk x9, disp_cp_chain_me_to_EXPECTED[31:15], lsl 16
+        movk x9, disp_cp_chain_me_to_EXPECTED[47:32], lsl 32
+        movk x9, disp_cp_chain_me_to_EXPECTED[63:48], lsl 48
+        blr  x9
+      viz
+        <16 bytes generated by imm64_to_ireg_EXACTLY4>
+        D6 3F 01 20
+   */
+   UInt* p = (UInt*)place_to_chain;
+   vassert(0 == (3 & (HWord)p));
+   vassert(is_imm64_to_ireg_EXACTLY4(
+              p, /*x*/9, (Addr)disp_cp_chain_me_EXPECTED));
+   vassert(p[4] == 0xD63F0120);
+
+   /* And what we want to change it to is:
+        movw x9, place_to_jump_to[15:0]
+        movk x9, place_to_jump_to[31:15], lsl 16
+        movk x9, place_to_jump_to[47:32], lsl 32
+        movk x9, place_to_jump_to[63:48], lsl 48
+        br   x9
+      viz
+        <16 bytes generated by imm64_to_ireg_EXACTLY4>
+        D6 1F 01 20
+
+      The replacement has the same length as the original.
+   */
+   (void)imm64_to_ireg_EXACTLY4(p, /*x*/9, (Addr)place_to_jump_to);
+   p[4] = 0xD61F0120;
+
+   VexInvalRange vir = {(HWord)p, 20};
+   return vir;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+                                     void* place_to_unchain,
+                                     const void* place_to_jump_to_EXPECTED,
+                                     const void* disp_cp_chain_me )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is:
+        movw x9, place_to_jump_to_EXPECTED[15:0]
+        movk x9, place_to_jump_to_EXPECTED[31:15], lsl 16
+        movk x9, place_to_jump_to_EXPECTED[47:32], lsl 32
+        movk x9, place_to_jump_to_EXPECTED[63:48], lsl 48
+        br   x9
+      viz
+        <16 bytes generated by imm64_to_ireg_EXACTLY4>
+        D6 1F 01 20
+   */
+   UInt* p = (UInt*)place_to_unchain;
+   vassert(0 == (3 & (HWord)p));
+   vassert(is_imm64_to_ireg_EXACTLY4(
+              p, /*x*/9, (Addr)place_to_jump_to_EXPECTED));
+   vassert(p[4] == 0xD61F0120);
+
+   /* And what we want to change it to is:
+        movw x9, disp_cp_chain_me_to[15:0]
+        movk x9, disp_cp_chain_me_to[31:15], lsl 16
+        movk x9, disp_cp_chain_me_to[47:32], lsl 32
+        movk x9, disp_cp_chain_me_to[63:48], lsl 48
+        blr  x9
+      viz
+        <16 bytes generated by imm64_to_ireg_EXACTLY4>
+        D6 3F 01 20
+   */
+   (void)imm64_to_ireg_EXACTLY4(p, /*x*/9, (Addr)disp_cp_chain_me);
+   p[4] = 0xD63F0120;
+
+   VexInvalRange vir = {(HWord)p, 20};
+   return vir;
+}
+
+
+/* Patch the counter address into a profile inc point, as previously
+   created by the ARM64in_ProfInc case for emit_ARM64Instr. */
+VexInvalRange patchProfInc_ARM64 ( VexEndness endness_host,
+                                   void*  place_to_patch,
+                                   const ULong* location_of_counter )
+{
+   vassert(sizeof(ULong*) == 8);
+   vassert(endness_host == VexEndnessLE);
+   UInt* p = (UInt*)place_to_patch;
+   vassert(0 == (3 & (HWord)p));
+   vassert(is_imm64_to_ireg_EXACTLY4(p, /*x*/9, 0x6555755585559566ULL));
+   vassert(p[4] == 0xF9400128);
+   vassert(p[5] == 0x91000508);
+   vassert(p[6] == 0xF9000128);
+   imm64_to_ireg_EXACTLY4(p, /*x*/9, (Addr)location_of_counter);
+   VexInvalRange vir = {(HWord)p, 4*4};
+   return vir;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                   host_arm64_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_arm64_defs.h b/VEX/priv/host_arm64_defs.h
new file mode 100644
index 0000000..039fce1
--- /dev/null
+++ b/VEX/priv/host_arm64_defs.h
@@ -0,0 +1,1016 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 host_arm64_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2013 OpenWorks
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_HOST_ARM64_DEFS_H
+#define __VEX_HOST_ARM64_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h"                      // VexArch
+#include "host_generic_regs.h"           // HReg
+
+
+/* --------- Registers. --------- */
+
+#define ST_IN static inline
+ST_IN HReg hregARM64_X22 ( void ) { return mkHReg(False, HRcInt64,  22,  0); }
+ST_IN HReg hregARM64_X23 ( void ) { return mkHReg(False, HRcInt64,  23,  1); }
+ST_IN HReg hregARM64_X24 ( void ) { return mkHReg(False, HRcInt64,  24,  2); }
+ST_IN HReg hregARM64_X25 ( void ) { return mkHReg(False, HRcInt64,  25,  3); }
+ST_IN HReg hregARM64_X26 ( void ) { return mkHReg(False, HRcInt64,  26,  4); }
+ST_IN HReg hregARM64_X27 ( void ) { return mkHReg(False, HRcInt64,  27,  5); }
+ST_IN HReg hregARM64_X28 ( void ) { return mkHReg(False, HRcInt64,  28,  6); }
+
+ST_IN HReg hregARM64_X0  ( void ) { return mkHReg(False, HRcInt64,  0,   7); }
+ST_IN HReg hregARM64_X1  ( void ) { return mkHReg(False, HRcInt64,  1,   8); }
+ST_IN HReg hregARM64_X2  ( void ) { return mkHReg(False, HRcInt64,  2,   9); }
+ST_IN HReg hregARM64_X3  ( void ) { return mkHReg(False, HRcInt64,  3,  10); }
+ST_IN HReg hregARM64_X4  ( void ) { return mkHReg(False, HRcInt64,  4,  11); }
+ST_IN HReg hregARM64_X5  ( void ) { return mkHReg(False, HRcInt64,  5,  12); }
+ST_IN HReg hregARM64_X6  ( void ) { return mkHReg(False, HRcInt64,  6,  13); }
+ST_IN HReg hregARM64_X7  ( void ) { return mkHReg(False, HRcInt64,  7,  14); }
+
+ST_IN HReg hregARM64_Q16 ( void ) { return mkHReg(False, HRcVec128, 16, 15); }
+ST_IN HReg hregARM64_Q17 ( void ) { return mkHReg(False, HRcVec128, 17, 16); }
+ST_IN HReg hregARM64_Q18 ( void ) { return mkHReg(False, HRcVec128, 18, 17); }
+ST_IN HReg hregARM64_Q19 ( void ) { return mkHReg(False, HRcVec128, 19, 18); }
+ST_IN HReg hregARM64_Q20 ( void ) { return mkHReg(False, HRcVec128, 20, 19); }
+
+ST_IN HReg hregARM64_D8  ( void ) { return mkHReg(False, HRcFlt64,  8,  20); }
+ST_IN HReg hregARM64_D9  ( void ) { return mkHReg(False, HRcFlt64,  9,  21); }
+ST_IN HReg hregARM64_D10 ( void ) { return mkHReg(False, HRcFlt64,  10, 22); }
+ST_IN HReg hregARM64_D11 ( void ) { return mkHReg(False, HRcFlt64,  11, 23); }
+ST_IN HReg hregARM64_D12 ( void ) { return mkHReg(False, HRcFlt64,  12, 24); }
+ST_IN HReg hregARM64_D13 ( void ) { return mkHReg(False, HRcFlt64,  13, 25); }
+
+ST_IN HReg hregARM64_X8  ( void ) { return mkHReg(False, HRcInt64,  8,  26); }
+ST_IN HReg hregARM64_X9  ( void ) { return mkHReg(False, HRcInt64,  9,  27); }
+ST_IN HReg hregARM64_X21 ( void ) { return mkHReg(False, HRcInt64, 21,  28); }
+#undef ST_IN
+
+extern void ppHRegARM64 ( HReg );
+
+/* Number of registers used arg passing in function calls */
+#define ARM64_N_ARGREGS 8   /* x0 .. x7 */
+
+
+/* --------- Condition codes. --------- */
+
+typedef
+   enum {
+      ARM64cc_EQ  = 0,  /* equal                         : Z=1 */
+      ARM64cc_NE  = 1,  /* not equal                     : Z=0 */
+
+      ARM64cc_CS  = 2,  /* >=u (higher or same)          : C=1 */
+      ARM64cc_CC  = 3,  /* <u  (lower)                   : C=0 */
+
+      ARM64cc_MI  = 4,  /* minus (negative)              : N=1 */
+      ARM64cc_PL  = 5,  /* plus (zero or +ve)            : N=0 */
+
+      ARM64cc_VS  = 6,  /* overflow                      : V=1 */
+      ARM64cc_VC  = 7,  /* no overflow                   : V=0 */
+
+      ARM64cc_HI  = 8,  /* >u   (higher)                 :   C=1 && Z=0 */
+      ARM64cc_LS  = 9,  /* <=u  (lower or same)          : !(C=1 && Z=0) */
+
+      ARM64cc_GE  = 10, /* >=s (signed greater or equal) :   N=V */
+      ARM64cc_LT  = 11, /* <s  (signed less than)        : !(N=V) */
+
+      ARM64cc_GT  = 12, /* >s  (signed greater)          :   Z=0 && N=V */
+      ARM64cc_LE  = 13, /* <=s (signed less or equal)    : !(Z=0 && N=V) */
+
+      ARM64cc_AL  = 14, /* always (unconditional) */
+      ARM64cc_NV  = 15  /* in 64-bit mode also means "always" */
+   }
+   ARM64CondCode;
+
+
+/* --------- Memory address expressions (amodes). --------- */
+
+typedef
+   enum {
+      ARM64am_RI9=10, /* reg + simm9 */
+      ARM64am_RI12,   /* reg + uimm12 * szB (iow, scaled by access size) */
+      ARM64am_RR      /* reg1 + reg2 */
+   }
+   ARM64AModeTag;
+
+typedef
+   struct {
+      ARM64AModeTag tag;
+      union {
+         struct {
+            HReg reg;
+            Int  simm9; /* -256 .. +255 */
+         } RI9;
+         struct {
+            HReg  reg;
+            UInt  uimm12; /* 0 .. 4095 */
+            UChar szB;    /* 1, 2, 4, 8 (16 ?) */
+         } RI12;
+         struct {
+            HReg base;
+            HReg index;
+         } RR;
+      } ARM64am;
+   }
+   ARM64AMode;
+
+extern ARM64AMode* ARM64AMode_RI9  ( HReg reg, Int simm9 );
+extern ARM64AMode* ARM64AMode_RI12 ( HReg reg, Int uimm12, UChar szB );
+extern ARM64AMode* ARM64AMode_RR   ( HReg base, HReg index );
+
+
+/* --------- Reg or uimm12 or (uimm12 << 12) operands --------- */
+
+typedef
+   enum {
+      ARM64riA_I12=20, /* uimm12 << 0 or 12 only */
+      ARM64riA_R       /* reg */
+   }
+   ARM64RIATag;
+
+typedef
+   struct {
+      ARM64RIATag tag;
+      union {
+         struct {
+            UShort imm12;  /* 0 .. 4095 */
+            UChar  shift;  /* 0 or 12 only */
+         } I12;
+         struct {
+            HReg reg;
+         } R;
+      } ARM64riA;
+   }
+   ARM64RIA;
+
+extern ARM64RIA* ARM64RIA_I12 ( UShort imm12, UChar shift );
+extern ARM64RIA* ARM64RIA_R   ( HReg );
+
+
+/* --------- Reg or "bitfield" (logic immediate) operands --------- */
+
+typedef
+   enum {
+      ARM64riL_I13=6, /* wierd-o bitfield immediate, 13 bits in total */
+      ARM64riL_R      /* reg */
+   }
+   ARM64RILTag;
+
+typedef
+   struct {
+      ARM64RILTag tag;
+      union {
+         struct {
+            UChar bitN; /* 0 .. 1 */
+            UChar immR; /* 0 .. 63 */
+            UChar immS; /* 0 .. 63 */
+         } I13;
+         struct {
+            HReg reg;
+         } R;
+      } ARM64riL;
+   }
+   ARM64RIL;
+
+extern ARM64RIL* ARM64RIL_I13 ( UChar bitN, UChar immR, UChar immS );
+extern ARM64RIL* ARM64RIL_R   ( HReg );
+
+
+/* --------------- Reg or uimm6 operands --------------- */
+
+typedef
+   enum {
+      ARM64ri6_I6=30, /* uimm6, 1 .. 63 only */
+      ARM64ri6_R      /* reg */
+   }
+   ARM64RI6Tag;
+
+typedef
+   struct {
+      ARM64RI6Tag tag;
+      union {
+         struct {
+            UInt imm6;   /* 1 .. 63 */
+         } I6;
+         struct {
+            HReg reg;
+         } R;
+      } ARM64ri6;
+   }
+   ARM64RI6;
+
+extern ARM64RI6* ARM64RI6_I6 ( UInt imm6 );
+extern ARM64RI6* ARM64RI6_R  ( HReg );
+
+
+/* --------------------- Instructions --------------------- */
+
+typedef
+   enum {
+      ARM64lo_AND=40,
+      ARM64lo_OR,
+      ARM64lo_XOR
+   }
+   ARM64LogicOp;
+
+typedef
+   enum {
+      ARM64sh_SHL=50,
+      ARM64sh_SHR,
+      ARM64sh_SAR
+   }
+   ARM64ShiftOp;
+
+typedef
+   enum {
+      ARM64un_NEG=60,
+      ARM64un_NOT,
+      ARM64un_CLZ,
+   }
+   ARM64UnaryOp;
+
+typedef
+   enum {
+      ARM64mul_PLAIN=70, /* lo64(64 * 64)  */
+      ARM64mul_ZX,       /* hi64(64 *u 64) */
+      ARM64mul_SX        /* hi64(64 *s 64) */
+   }
+   ARM64MulOp;
+
+typedef
+   /* These characterise an integer-FP conversion, but don't imply any
+      particular direction. */
+   enum {
+      ARM64cvt_F32_I32S=80,
+      ARM64cvt_F64_I32S,
+      ARM64cvt_F32_I64S,
+      ARM64cvt_F64_I64S,
+      ARM64cvt_F32_I32U,
+      ARM64cvt_F64_I32U,
+      ARM64cvt_F32_I64U,
+      ARM64cvt_F64_I64U,
+      ARM64cvt_INVALID
+   }
+   ARM64CvtOp;
+
+typedef
+   enum {
+      ARM64fpb_ADD=100,
+      ARM64fpb_SUB,
+      ARM64fpb_MUL,
+      ARM64fpb_DIV,
+      ARM64fpb_INVALID
+   }
+   ARM64FpBinOp;
+
+typedef
+   enum {
+      ARM64fpu_NEG=110,
+      ARM64fpu_ABS,
+      ARM64fpu_SQRT,
+      ARM64fpu_RINT,
+      ARM64fpu_RECPX,
+      ARM64fpu_INVALID
+   }
+   ARM64FpUnaryOp;
+
+typedef
+   enum {
+      ARM64vecb_ADD64x2=120, ARM64vecb_ADD32x4,
+      ARM64vecb_ADD16x8,     ARM64vecb_ADD8x16,
+      ARM64vecb_SUB64x2,     ARM64vecb_SUB32x4,
+      ARM64vecb_SUB16x8,     ARM64vecb_SUB8x16,
+                             ARM64vecb_MUL32x4,
+      ARM64vecb_MUL16x8,     ARM64vecb_MUL8x16,
+      ARM64vecb_FADD64x2,    ARM64vecb_FADD32x4,
+      ARM64vecb_FSUB64x2,    ARM64vecb_FSUB32x4,
+      ARM64vecb_FMUL64x2,    ARM64vecb_FMUL32x4,
+      ARM64vecb_FDIV64x2,    ARM64vecb_FDIV32x4,
+      ARM64vecb_FMAX64x2,    ARM64vecb_FMAX32x4,
+      ARM64vecb_FMIN64x2,    ARM64vecb_FMIN32x4,
+                             ARM64vecb_UMAX32x4,
+      ARM64vecb_UMAX16x8,    ARM64vecb_UMAX8x16,
+                             ARM64vecb_UMIN32x4,
+      ARM64vecb_UMIN16x8,    ARM64vecb_UMIN8x16,
+                             ARM64vecb_SMAX32x4,
+      ARM64vecb_SMAX16x8,    ARM64vecb_SMAX8x16,
+                             ARM64vecb_SMIN32x4,
+      ARM64vecb_SMIN16x8,    ARM64vecb_SMIN8x16,
+      ARM64vecb_AND,
+      ARM64vecb_ORR,
+      ARM64vecb_XOR,
+      ARM64vecb_CMEQ64x2,    ARM64vecb_CMEQ32x4,
+      ARM64vecb_CMEQ16x8,    ARM64vecb_CMEQ8x16,
+      ARM64vecb_CMHI64x2,    ARM64vecb_CMHI32x4, /* >u */
+      ARM64vecb_CMHI16x8,    ARM64vecb_CMHI8x16,
+      ARM64vecb_CMGT64x2,    ARM64vecb_CMGT32x4, /* >s */
+      ARM64vecb_CMGT16x8,    ARM64vecb_CMGT8x16,
+      ARM64vecb_FCMEQ64x2,   ARM64vecb_FCMEQ32x4,
+      ARM64vecb_FCMGE64x2,   ARM64vecb_FCMGE32x4,
+      ARM64vecb_FCMGT64x2,   ARM64vecb_FCMGT32x4,
+      ARM64vecb_TBL1,
+      ARM64vecb_UZP164x2,    ARM64vecb_UZP132x4,
+      ARM64vecb_UZP116x8,    ARM64vecb_UZP18x16,
+      ARM64vecb_UZP264x2,    ARM64vecb_UZP232x4,
+      ARM64vecb_UZP216x8,    ARM64vecb_UZP28x16,
+      ARM64vecb_ZIP132x4,    ARM64vecb_ZIP116x8,
+      ARM64vecb_ZIP18x16,    ARM64vecb_ZIP232x4,
+      ARM64vecb_ZIP216x8,    ARM64vecb_ZIP28x16,
+                             ARM64vecb_PMUL8x16,
+                             ARM64vecb_PMULL8x8,
+                             ARM64vecb_UMULL2DSS,
+      ARM64vecb_UMULL4SHH,   ARM64vecb_UMULL8HBB,
+                             ARM64vecb_SMULL2DSS,
+      ARM64vecb_SMULL4SHH,   ARM64vecb_SMULL8HBB,
+      ARM64vecb_SQADD64x2,   ARM64vecb_SQADD32x4,
+      ARM64vecb_SQADD16x8,   ARM64vecb_SQADD8x16,
+      ARM64vecb_UQADD64x2,   ARM64vecb_UQADD32x4,
+      ARM64vecb_UQADD16x8,   ARM64vecb_UQADD8x16,
+      ARM64vecb_SQSUB64x2,   ARM64vecb_SQSUB32x4,
+      ARM64vecb_SQSUB16x8,   ARM64vecb_SQSUB8x16,
+      ARM64vecb_UQSUB64x2,   ARM64vecb_UQSUB32x4,
+      ARM64vecb_UQSUB16x8,   ARM64vecb_UQSUB8x16,
+                             ARM64vecb_SQDMULL2DSS,
+      ARM64vecb_SQDMULL4SHH,
+                             ARM64vecb_SQDMULH32x4,
+      ARM64vecb_SQDMULH16x8,
+                             ARM64vecb_SQRDMULH32x4,
+      ARM64vecb_SQRDMULH16x8,
+      ARM64vecb_SQSHL64x2,   ARM64vecb_SQSHL32x4,
+      ARM64vecb_SQSHL16x8,   ARM64vecb_SQSHL8x16,
+      ARM64vecb_UQSHL64x2,   ARM64vecb_UQSHL32x4,
+      ARM64vecb_UQSHL16x8,   ARM64vecb_UQSHL8x16,
+      ARM64vecb_SQRSHL64x2,  ARM64vecb_SQRSHL32x4,
+      ARM64vecb_SQRSHL16x8,  ARM64vecb_SQRSHL8x16,
+      ARM64vecb_UQRSHL64x2,  ARM64vecb_UQRSHL32x4,
+      ARM64vecb_UQRSHL16x8,  ARM64vecb_UQRSHL8x16,
+      ARM64vecb_SSHL64x2,    ARM64vecb_SSHL32x4,
+      ARM64vecb_SSHL16x8,    ARM64vecb_SSHL8x16, 
+      ARM64vecb_USHL64x2,    ARM64vecb_USHL32x4,
+      ARM64vecb_USHL16x8,    ARM64vecb_USHL8x16, 
+      ARM64vecb_SRSHL64x2,   ARM64vecb_SRSHL32x4,
+      ARM64vecb_SRSHL16x8,   ARM64vecb_SRSHL8x16, 
+      ARM64vecb_URSHL64x2,   ARM64vecb_URSHL32x4,
+      ARM64vecb_URSHL16x8,   ARM64vecb_URSHL8x16, 
+      ARM64vecb_FRECPS64x2,  ARM64vecb_FRECPS32x4,
+      ARM64vecb_FRSQRTS64x2, ARM64vecb_FRSQRTS32x4,
+      ARM64vecb_INVALID
+   }
+   ARM64VecBinOp;
+
+typedef
+   enum {
+      ARM64vecmo_SUQADD64x2=300, ARM64vecmo_SUQADD32x4,
+      ARM64vecmo_SUQADD16x8,     ARM64vecmo_SUQADD8x16,
+      ARM64vecmo_USQADD64x2,     ARM64vecmo_USQADD32x4,
+      ARM64vecmo_USQADD16x8,     ARM64vecmo_USQADD8x16,
+      ARM64vecmo_INVALID
+   }
+   ARM64VecModifyOp;
+
+typedef
+   enum {
+      ARM64vecu_FNEG64x2=350, ARM64vecu_FNEG32x4,
+      ARM64vecu_FABS64x2,     ARM64vecu_FABS32x4,
+      ARM64vecu_NOT,
+      ARM64vecu_ABS64x2,      ARM64vecu_ABS32x4,
+      ARM64vecu_ABS16x8,      ARM64vecu_ABS8x16,
+      ARM64vecu_CLS32x4,      ARM64vecu_CLS16x8,      ARM64vecu_CLS8x16, 
+      ARM64vecu_CLZ32x4,      ARM64vecu_CLZ16x8,      ARM64vecu_CLZ8x16, 
+      ARM64vecu_CNT8x16,
+      ARM64vecu_RBIT,
+      ARM64vecu_REV1616B,
+      ARM64vecu_REV3216B,     ARM64vecu_REV328H,
+      ARM64vecu_REV6416B,     ARM64vecu_REV648H,      ARM64vecu_REV644S,
+      ARM64vecu_URECPE32x4,
+      ARM64vecu_URSQRTE32x4,
+      ARM64vecu_FRECPE64x2,   ARM64vecu_FRECPE32x4,
+      ARM64vecu_FRSQRTE64x2,  ARM64vecu_FRSQRTE32x4,
+      ARM64vecu_FSQRT64x2,    ARM64vecu_FSQRT32x4,
+      ARM64vecu_INVALID
+   }
+   ARM64VecUnaryOp;
+
+typedef
+   enum {
+      ARM64vecshi_USHR64x2=400, ARM64vecshi_USHR32x4,
+      ARM64vecshi_USHR16x8,     ARM64vecshi_USHR8x16,
+      ARM64vecshi_SSHR64x2,     ARM64vecshi_SSHR32x4,
+      ARM64vecshi_SSHR16x8,     ARM64vecshi_SSHR8x16,
+      ARM64vecshi_SHL64x2,      ARM64vecshi_SHL32x4,
+      ARM64vecshi_SHL16x8,      ARM64vecshi_SHL8x16,
+      /* These narrowing shifts zero out the top half of the destination
+         register. */
+      ARM64vecshi_SQSHRN2SD,    ARM64vecshi_SQSHRN4HS,   ARM64vecshi_SQSHRN8BH,
+      ARM64vecshi_UQSHRN2SD,    ARM64vecshi_UQSHRN4HS,   ARM64vecshi_UQSHRN8BH,
+      ARM64vecshi_SQSHRUN2SD,   ARM64vecshi_SQSHRUN4HS,  ARM64vecshi_SQSHRUN8BH,
+      ARM64vecshi_SQRSHRN2SD,   ARM64vecshi_SQRSHRN4HS,  ARM64vecshi_SQRSHRN8BH,
+      ARM64vecshi_UQRSHRN2SD,   ARM64vecshi_UQRSHRN4HS,  ARM64vecshi_UQRSHRN8BH,
+      ARM64vecshi_SQRSHRUN2SD,  ARM64vecshi_SQRSHRUN4HS, ARM64vecshi_SQRSHRUN8BH,
+      /* Saturating left shifts, of various flavours. */
+      ARM64vecshi_UQSHL64x2,    ARM64vecshi_UQSHL32x4,
+      ARM64vecshi_UQSHL16x8,    ARM64vecshi_UQSHL8x16, 
+      ARM64vecshi_SQSHL64x2,    ARM64vecshi_SQSHL32x4,
+      ARM64vecshi_SQSHL16x8,    ARM64vecshi_SQSHL8x16, 
+      ARM64vecshi_SQSHLU64x2,   ARM64vecshi_SQSHLU32x4,
+      ARM64vecshi_SQSHLU16x8,   ARM64vecshi_SQSHLU8x16, 
+      ARM64vecshi_INVALID
+   }
+   ARM64VecShiftImmOp;
+
+typedef
+   enum {
+      ARM64vecna_XTN=450,
+      ARM64vecna_SQXTN,
+      ARM64vecna_UQXTN,
+      ARM64vecna_SQXTUN,
+      ARM64vecna_INVALID
+   }
+   ARM64VecNarrowOp;
+
+typedef
+   enum {
+      /* baseline */
+      ARM64in_Arith=1220,
+      ARM64in_Cmp,
+      ARM64in_Logic,
+      ARM64in_Test,
+      ARM64in_Shift,
+      ARM64in_Unary,
+      ARM64in_MovI,        /* int reg-reg move */
+      ARM64in_Imm64,
+      ARM64in_LdSt64,
+      ARM64in_LdSt32,      /* w/ ZX loads */
+      ARM64in_LdSt16,      /* w/ ZX loads */
+      ARM64in_LdSt8,       /* w/ ZX loads */
+      ARM64in_XDirect,     /* direct transfer to GA */
+      ARM64in_XIndir,      /* indirect transfer to GA */
+      ARM64in_XAssisted,   /* assisted transfer to GA */
+      ARM64in_CSel,
+      ARM64in_Call,
+      ARM64in_AddToSP,     /* move SP by small, signed constant */
+      ARM64in_FromSP,      /* move SP to integer register */
+      ARM64in_Mul,
+      ARM64in_LdrEX,
+      ARM64in_StrEX,
+      ARM64in_MFence,
+      /* ARM64in_V*: scalar ops involving vector registers */
+      ARM64in_VLdStH,   /* ld/st to/from low 16 bits of vec reg, imm offset */
+      ARM64in_VLdStS,   /* ld/st to/from low 32 bits of vec reg, imm offset */
+      ARM64in_VLdStD,   /* ld/st to/from low 64 bits of vec reg, imm offset */
+      ARM64in_VLdStQ,   /* ld/st to/from all 128 bits of vec reg, no offset */
+      ARM64in_VCvtI2F,
+      ARM64in_VCvtF2I,
+      ARM64in_VCvtSD,   /* scalar 32 bit FP <--> 64 bit FP */
+      ARM64in_VCvtHS,   /* scalar 16 bit FP <--> 32 bit FP */
+      ARM64in_VCvtHD,   /* scalar 16 bit FP <--> 64 bit FP */
+      ARM64in_VUnaryD,
+      ARM64in_VUnaryS,
+      ARM64in_VBinD,
+      ARM64in_VBinS,
+      ARM64in_VCmpD,
+      ARM64in_VCmpS,
+      ARM64in_VFCSel,
+      ARM64in_FPCR,
+      ARM64in_FPSR,
+      /* ARM64in_V*V: vector ops on vector registers */
+      ARM64in_VBinV,
+      ARM64in_VModifyV,
+      ARM64in_VUnaryV,
+      ARM64in_VNarrowV,
+      ARM64in_VShiftImmV,
+      ARM64in_VExtV,
+      ARM64in_VImmQ,
+      ARM64in_VDfromX,    /* Move an Xreg to a Dreg */
+      ARM64in_VQfromX,    /* Move an Xreg to a Qreg lo64, and zero hi64 */
+      ARM64in_VQfromXX,   /* Move 2 Xregs to a Qreg */
+      ARM64in_VXfromQ,    /* Move half a Qreg to an Xreg */
+      ARM64in_VXfromDorS, /* Move Dreg or Sreg(ZX) to an Xreg */
+      ARM64in_VMov,       /* vector reg-reg move, 16, 8 or 4 bytes */
+      /* infrastructure */
+      ARM64in_EvCheck,    /* Event check */
+      ARM64in_ProfInc     /* 64-bit profile counter increment */
+   }
+   ARM64InstrTag;
+
+/* Destinations are on the LEFT (first operand) */
+
+typedef
+   struct {
+      ARM64InstrTag tag;
+      union {
+         /* --- INTEGER INSTRUCTIONS --- */
+         /* 64 bit ADD/SUB reg, reg or uimm12<<{0,12} */
+         struct {
+            HReg      dst;
+            HReg      argL;
+            ARM64RIA* argR;
+            Bool      isAdd;
+         } Arith;
+         /* 64 or 32 bit CMP reg, reg or aimm (SUB and set flags) */
+         struct {
+            HReg      argL;
+            ARM64RIA* argR;
+            Bool      is64;
+         } Cmp;
+         /* 64 bit AND/OR/XOR reg, reg or bitfield-immediate */
+         struct {
+            HReg         dst;
+            HReg         argL;
+            ARM64RIL*    argR;
+            ARM64LogicOp op;
+         } Logic;
+         /* 64 bit TST reg, reg or bimm (AND and set flags) */
+         struct {
+            HReg      argL;
+            ARM64RIL* argR;
+         } Test;
+         /* 64 bit SHL/SHR/SAR, 2nd arg is reg or imm */
+         struct {
+            HReg         dst;
+            HReg         argL;
+            ARM64RI6*    argR;
+            ARM64ShiftOp op;
+         } Shift;
+         /* NOT/NEG/CLZ, 64 bit only */
+         struct {
+            HReg         dst;
+            HReg         src;
+            ARM64UnaryOp op;
+         } Unary;
+         /* MOV dst, src -- reg-reg move for integer registers */
+         struct {
+            HReg dst;
+            HReg src;
+         } MovI;
+         /* Pseudo-insn; make a 64-bit immediate */
+         struct {
+            HReg  dst;
+            ULong imm64;
+         } Imm64;
+         /* 64-bit load or store */
+         struct {
+            Bool        isLoad;
+            HReg        rD;
+            ARM64AMode* amode;
+         } LdSt64;
+         /* zx-32-to-64-bit load, or 32-bit store */
+         struct {
+            Bool        isLoad;
+            HReg        rD;
+            ARM64AMode* amode;
+         } LdSt32;
+         /* zx-16-to-64-bit load, or 16-bit store */
+         struct {
+            Bool        isLoad;
+            HReg        rD;
+            ARM64AMode* amode;
+         } LdSt16;
+         /* zx-8-to-64-bit load, or 8-bit store */
+         struct {
+            Bool        isLoad;
+            HReg        rD;
+            ARM64AMode* amode;
+         } LdSt8;
+         /* Update the guest PC value, then exit requesting to chain
+            to it.  May be conditional.  Urr, use of Addr64 implicitly
+            assumes that wordsize(guest) == wordsize(host). */
+         struct {
+            Addr64        dstGA;    /* next guest address */
+            ARM64AMode*   amPC;     /* amode in guest state for PC */
+            ARM64CondCode cond;     /* can be ARM64cc_AL */
+            Bool          toFastEP; /* chain to the slow or fast point? */
+         } XDirect;
+         /* Boring transfer to a guest address not known at JIT time.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg          dstGA;
+            ARM64AMode*   amPC;
+            ARM64CondCode cond; /* can be ARM64cc_AL */
+         } XIndir;
+         /* Assisted transfer to a guest address, most general case.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg          dstGA;
+            ARM64AMode*   amPC;
+            ARM64CondCode cond; /* can be ARM64cc_AL */
+            IRJumpKind    jk;
+         } XAssisted;
+         /* CSEL: dst = if cond then argL else argR.  cond may be anything. */
+          struct {
+            HReg          dst;
+            HReg          argL;
+            HReg          argR;
+            ARM64CondCode cond;
+         } CSel;
+         /* Pseudo-insn.  Call target (an absolute address), on given
+            condition (which could be ARM64cc_AL). */
+         struct {
+            RetLoc        rloc;     /* where the return value will be */
+            Addr64        target;
+            ARM64CondCode cond;
+            Int           nArgRegs; /* # regs carrying args: 0 .. 8 */
+         } Call;
+         /* move SP by small, signed constant */
+         struct {
+            Int simm; /* needs to be 0 % 16 and in the range -4095
+                         .. 4095 inclusive */
+         } AddToSP;
+         /* move SP to integer register */
+         struct {
+            HReg dst;
+         } FromSP;
+         /* Integer multiply, with 3 variants:
+              (PLAIN) lo64(64 *  64)
+              (ZX)    hi64(64 *u 64)
+              (SX)    hi64(64 *s 64)
+         */
+         struct {
+            HReg       dst;
+            HReg       argL;
+            HReg       argR;
+            ARM64MulOp op;
+         } Mul;
+         /* LDXR{,H,B} x2, [x4] */
+         struct {
+            Int  szB; /* 1, 2, 4 or 8 */
+         } LdrEX;
+         /* STXR{,H,B} w0, x2, [x4] */
+         struct {
+            Int  szB; /* 1, 2, 4 or 8 */
+         } StrEX;
+         /* Mem fence.  An insn which fences all loads and stores as
+            much as possible before continuing.  On ARM64 we emit the
+            sequence "dsb sy ; dmb sy ; isb sy", which is probably
+            total nuclear overkill, but better safe than sorry. */
+         struct {
+         } MFence;
+         /* --- INSTRUCTIONS INVOLVING VECTOR REGISTERS --- */
+         /* ld/st to/from low 16 bits of vec reg, imm offset */
+         struct {
+            Bool isLoad;
+            HReg hD;
+            HReg rN;
+            UInt uimm12;  /* 0 .. 8190 inclusive, 0 % 2 */
+         } VLdStH;
+         /* ld/st to/from low 32 bits of vec reg, imm offset */
+         struct {
+            Bool isLoad;
+            HReg sD;
+            HReg rN;
+            UInt uimm12;  /* 0 .. 16380 inclusive, 0 % 4 */
+         } VLdStS;
+         /* ld/st to/from low 64 bits of vec reg, imm offset */
+         struct {
+            Bool isLoad;
+            HReg dD;
+            HReg rN;
+            UInt uimm12;  /* 0 .. 32760 inclusive, 0 % 8 */
+         } VLdStD;
+         /* ld/st to/from all 128 bits of vec reg, no offset */
+         struct {
+            Bool isLoad;
+            HReg rQ; // data
+            HReg rN; // address
+         } VLdStQ;
+         /* Scalar conversion of int to float. */
+         struct {
+            ARM64CvtOp how;
+            HReg       rD; // dst, a D or S register
+            HReg       rS; // src, a W or X register
+         } VCvtI2F;
+         /* Scalar conversion of float to int, w/ specified RM. */
+         struct {
+            ARM64CvtOp how;
+            HReg       rD; // dst, a W or X register
+            HReg       rS; // src, a D or S register
+            UChar      armRM; // ARM encoded RM:
+                              // 00=nearest, 01=+inf, 10=-inf, 11=zero
+         } VCvtF2I;
+         /* Convert between 32-bit and 64-bit FP values (both ways). (FCVT) */
+         struct {
+            Bool sToD; /* True: F32->F64.  False: F64->F32 */
+            HReg dst;
+            HReg src;
+         } VCvtSD;
+         /* Convert between 16-bit and 32-bit FP values (both ways). (FCVT) */
+         struct {
+            Bool hToS; /* True: F16->F32.  False: F32->F16 */
+            HReg dst;
+            HReg src;
+         } VCvtHS;
+         /* Convert between 16-bit and 64-bit FP values (both ways). (FCVT) */
+         struct {
+            Bool hToD; /* True: F16->F64.  False: F64->F16 */
+            HReg dst;
+            HReg src;
+         } VCvtHD;
+         /* 64-bit FP unary */
+         struct {
+            ARM64FpUnaryOp op;
+            HReg           dst;
+            HReg           src;
+         } VUnaryD;
+         /* 32-bit FP unary */
+         struct {
+            ARM64FpUnaryOp op;
+            HReg           dst;
+            HReg           src;
+         } VUnaryS;
+         /* 64-bit FP binary arithmetic */
+         struct {
+            ARM64FpBinOp op;
+            HReg         dst;
+            HReg         argL;
+            HReg         argR;
+         } VBinD;
+         /* 32-bit FP binary arithmetic */
+         struct {
+            ARM64FpBinOp op;
+            HReg         dst;
+            HReg         argL;
+            HReg         argR;
+         } VBinS;
+         /* 64-bit FP compare */
+         struct {
+            HReg argL;
+            HReg argR;
+         } VCmpD;
+         /* 32-bit FP compare */
+         struct {
+            HReg argL;
+            HReg argR;
+         } VCmpS;
+         /* 32- or 64-bit FP conditional select */
+         struct {
+            HReg          dst;
+            HReg          argL;
+            HReg          argR;
+            ARM64CondCode cond;
+            Bool          isD;
+         }
+         VFCSel;
+         /* Move a 32-bit value to/from the FPCR */
+         struct {
+            Bool toFPCR;
+            HReg iReg;
+         } FPCR;
+         /* Move a 32-bit value to/from the FPSR */
+         struct {
+            Bool toFPSR;
+            HReg iReg;
+         } FPSR;
+         /* binary vector operation on vector registers */
+         struct {
+            ARM64VecBinOp op;
+            HReg          dst;
+            HReg          argL;
+            HReg          argR;
+         } VBinV;
+         /* binary vector operation on vector registers.
+            Dst reg is also a src. */
+         struct {
+            ARM64VecModifyOp op;
+            HReg             mod;
+            HReg             arg;
+         } VModifyV;
+         /* unary vector operation on vector registers */
+         struct {
+            ARM64VecUnaryOp op;
+            HReg            dst;
+            HReg            arg;
+         } VUnaryV;
+         /* vector narrowing, Q -> Q.  Result goes in the bottom half
+            of dst and the top half is zeroed out.  Iow one of the
+            XTN family. */
+        struct {
+           ARM64VecNarrowOp op;
+           UInt             dszBlg2; // 0: 16to8_x8  1: 32to16_x4  2: 64to32_x2
+           HReg             dst;     // Q reg
+           HReg             src;     // Q reg
+        } VNarrowV;
+        /* Vector shift by immediate.  For left shifts, |amt| must be
+           >= 0 and < implied lane size of |op|.  For right shifts,
+           |amt| must be > 0 and <= implied lane size of |op|.  Shifts
+           beyond these ranges are not allowed. */
+        struct {
+           ARM64VecShiftImmOp op;
+           HReg               dst;
+           HReg               src;
+           UInt               amt;
+        } VShiftImmV;
+        struct {
+           HReg dst;
+           HReg srcLo;
+           HReg srcHi;
+           UInt amtB;
+        } VExtV;
+         struct {
+            HReg   rQ;
+            UShort imm; /* Same 1-bit-per-byte encoding as IR */
+         } VImmQ;
+         struct {
+            HReg rD;
+            HReg rX;
+         } VDfromX;
+         struct {
+            HReg rQ;
+            HReg rXlo;
+         } VQfromX;
+         struct {
+            HReg rQ;
+            HReg rXhi;
+            HReg rXlo;
+         } VQfromXX;
+         struct {
+            HReg rX;
+            HReg rQ;
+            UInt laneNo; /* either 0 or 1 */
+         } VXfromQ;
+         struct {
+            HReg rX;
+            HReg rDorS;
+            Bool fromD;
+         } VXfromDorS;
+         /* MOV dst, src -- reg-reg move for vector registers */
+         struct {
+            UInt szB; // 16=mov qD,qS;  8=mov dD,dS;  4=mov sD,sS
+            HReg dst;
+            HReg src;
+         } VMov;
+         struct {
+            ARM64AMode* amCounter;
+            ARM64AMode* amFailAddr;
+         } EvCheck;
+         struct {
+            /* No fields.  The address of the counter to inc is
+               installed later, post-translation, by patching it in,
+               as it is not known at translation time. */
+         } ProfInc;
+      } ARM64in;
+   }
+   ARM64Instr;
+
+
+extern ARM64Instr* ARM64Instr_Arith   ( HReg, HReg, ARM64RIA*, Bool isAdd );
+extern ARM64Instr* ARM64Instr_Cmp     ( HReg, ARM64RIA*, Bool is64 );
+extern ARM64Instr* ARM64Instr_Logic   ( HReg, HReg, ARM64RIL*, ARM64LogicOp );
+extern ARM64Instr* ARM64Instr_Test    ( HReg, ARM64RIL* );
+extern ARM64Instr* ARM64Instr_Shift   ( HReg, HReg, ARM64RI6*, ARM64ShiftOp );
+extern ARM64Instr* ARM64Instr_Unary   ( HReg, HReg, ARM64UnaryOp );
+extern ARM64Instr* ARM64Instr_MovI    ( HReg, HReg );
+extern ARM64Instr* ARM64Instr_Imm64   ( HReg, ULong );
+extern ARM64Instr* ARM64Instr_LdSt64  ( Bool isLoad, HReg, ARM64AMode* );
+extern ARM64Instr* ARM64Instr_LdSt32  ( Bool isLoad, HReg, ARM64AMode* );
+extern ARM64Instr* ARM64Instr_LdSt16  ( Bool isLoad, HReg, ARM64AMode* );
+extern ARM64Instr* ARM64Instr_LdSt8   ( Bool isLoad, HReg, ARM64AMode* );
+extern ARM64Instr* ARM64Instr_XDirect ( Addr64 dstGA, ARM64AMode* amPC,
+                                        ARM64CondCode cond, Bool toFastEP );
+extern ARM64Instr* ARM64Instr_XIndir  ( HReg dstGA, ARM64AMode* amPC,
+                                        ARM64CondCode cond );
+extern ARM64Instr* ARM64Instr_XAssisted ( HReg dstGA, ARM64AMode* amPC,
+                                          ARM64CondCode cond, IRJumpKind jk );
+extern ARM64Instr* ARM64Instr_CSel    ( HReg dst, HReg argL, HReg argR,
+                                        ARM64CondCode cond );
+extern ARM64Instr* ARM64Instr_Call    ( ARM64CondCode, Addr64, Int nArgRegs,
+                                        RetLoc rloc );
+extern ARM64Instr* ARM64Instr_AddToSP ( Int simm );
+extern ARM64Instr* ARM64Instr_FromSP  ( HReg dst );
+extern ARM64Instr* ARM64Instr_Mul     ( HReg dst, HReg argL, HReg argR,
+                                        ARM64MulOp op );
+extern ARM64Instr* ARM64Instr_LdrEX   ( Int szB );
+extern ARM64Instr* ARM64Instr_StrEX   ( Int szB );
+extern ARM64Instr* ARM64Instr_MFence  ( void );
+extern ARM64Instr* ARM64Instr_VLdStH  ( Bool isLoad, HReg sD, HReg rN,
+                                        UInt uimm12 /* 0 .. 8190, 0 % 2 */ );
+extern ARM64Instr* ARM64Instr_VLdStS  ( Bool isLoad, HReg sD, HReg rN,
+                                        UInt uimm12 /* 0 .. 16380, 0 % 4 */ );
+extern ARM64Instr* ARM64Instr_VLdStD  ( Bool isLoad, HReg dD, HReg rN,
+                                        UInt uimm12 /* 0 .. 32760, 0 % 8 */ );
+extern ARM64Instr* ARM64Instr_VLdStQ  ( Bool isLoad, HReg rQ, HReg rN );
+extern ARM64Instr* ARM64Instr_VCvtI2F ( ARM64CvtOp how, HReg rD, HReg rS );
+extern ARM64Instr* ARM64Instr_VCvtF2I ( ARM64CvtOp how, HReg rD, HReg rS,
+                                        UChar armRM );
+extern ARM64Instr* ARM64Instr_VCvtSD  ( Bool sToD, HReg dst, HReg src );
+extern ARM64Instr* ARM64Instr_VCvtHS  ( Bool hToS, HReg dst, HReg src );
+extern ARM64Instr* ARM64Instr_VCvtHD  ( Bool hToD, HReg dst, HReg src );
+extern ARM64Instr* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op, HReg dst, HReg src );
+extern ARM64Instr* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op, HReg dst, HReg src );
+extern ARM64Instr* ARM64Instr_VBinD   ( ARM64FpBinOp op, HReg, HReg, HReg );
+extern ARM64Instr* ARM64Instr_VBinS   ( ARM64FpBinOp op, HReg, HReg, HReg );
+extern ARM64Instr* ARM64Instr_VCmpD   ( HReg argL, HReg argR );
+extern ARM64Instr* ARM64Instr_VCmpS   ( HReg argL, HReg argR );
+extern ARM64Instr* ARM64Instr_VFCSel  ( HReg dst, HReg argL, HReg argR,
+                                        ARM64CondCode cond, Bool isD );
+extern ARM64Instr* ARM64Instr_FPCR    ( Bool toFPCR, HReg iReg );
+extern ARM64Instr* ARM64Instr_FPSR    ( Bool toFPSR, HReg iReg );
+extern ARM64Instr* ARM64Instr_VBinV   ( ARM64VecBinOp op, HReg, HReg, HReg );
+extern ARM64Instr* ARM64Instr_VModifyV ( ARM64VecModifyOp, HReg, HReg );
+extern ARM64Instr* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op, HReg, HReg );
+extern ARM64Instr* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op, UInt dszBlg2,
+                                         HReg dst, HReg src );
+extern ARM64Instr* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op,
+                                           HReg dst, HReg src, UInt amt );
+extern ARM64Instr* ARM64Instr_VExtV   ( HReg dst,
+                                        HReg srcLo, HReg srcHi, UInt amtB );
+extern ARM64Instr* ARM64Instr_VImmQ   ( HReg, UShort );
+extern ARM64Instr* ARM64Instr_VDfromX ( HReg rD, HReg rX );
+extern ARM64Instr* ARM64Instr_VQfromX ( HReg rQ, HReg rXlo );
+extern ARM64Instr* ARM64Instr_VQfromXX( HReg rQ, HReg rXhi, HReg rXlo );
+extern ARM64Instr* ARM64Instr_VXfromQ ( HReg rX, HReg rQ, UInt laneNo );
+extern ARM64Instr* ARM64Instr_VXfromDorS ( HReg rX, HReg rDorS, Bool fromD );
+extern ARM64Instr* ARM64Instr_VMov    ( UInt szB, HReg dst, HReg src );
+
+extern ARM64Instr* ARM64Instr_EvCheck ( ARM64AMode* amCounter,
+                                        ARM64AMode* amFailAddr );
+extern ARM64Instr* ARM64Instr_ProfInc ( void );
+
+extern void ppARM64Instr ( const ARM64Instr* );
+
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+extern void getRegUsage_ARM64Instr ( HRegUsage*, const ARM64Instr*, Bool );
+extern void mapRegs_ARM64Instr     ( HRegRemap*, ARM64Instr*, Bool );
+extern Bool isMove_ARM64Instr      ( const ARM64Instr*, HReg*, HReg* );
+extern Int  emit_ARM64Instr        ( /*MB_MOD*/Bool* is_profInc,
+                                     UChar* buf, Int nbuf, const ARM64Instr* i,
+                                     Bool mode64,
+                                     VexEndness endness_host,
+                                     const void* disp_cp_chain_me_to_slowEP,
+                                     const void* disp_cp_chain_me_to_fastEP,
+                                     const void* disp_cp_xindir,
+                                     const void* disp_cp_xassisted );
+
+extern void genSpill_ARM64  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                              HReg rreg, Int offset, Bool );
+extern void genReload_ARM64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                              HReg rreg, Int offset, Bool );
+
+extern const RRegUniverse* getRRegUniverse_ARM64 ( void );
+
+extern HInstrArray* iselSB_ARM64 ( const IRSB*, 
+                                   VexArch,
+                                   const VexArchInfo*,
+                                   const VexAbiInfo*,
+                                   Int offs_Host_EvC_Counter,
+                                   Int offs_Host_EvC_FailAddr,
+                                   Bool chainingAllowed,
+                                   Bool addProfInc,
+                                   Addr max_ga );
+
+/* How big is an event check?  This is kind of a kludge because it
+   depends on the offsets of host_EvC_FAILADDR and
+   host_EvC_COUNTER. */
+extern Int evCheckSzB_ARM64 (void);
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+extern VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+                                          void* place_to_chain,
+                                          const void* disp_cp_chain_me_EXPECTED,
+                                          const void* place_to_jump_to );
+
+extern VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+                                            void* place_to_unchain,
+                                            const void* place_to_jump_to_EXPECTED,
+                                            const void* disp_cp_chain_me );
+
+/* Patch the counter location into an existing ProfInc point. */
+extern VexInvalRange patchProfInc_ARM64 ( VexEndness endness_host,
+                                          void*  place_to_patch,
+                                          const ULong* location_of_counter );
+
+
+#endif /* ndef __VEX_HOST_ARM64_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                   host_arm64_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_arm64_isel.c b/VEX/priv/host_arm64_isel.c
new file mode 100644
index 0000000..0568fde
--- /dev/null
+++ b/VEX/priv/host_arm64_isel.c
@@ -0,0 +1,4112 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 host_arm64_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2013 OpenWorks
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "ir_match.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_generic_simd64.h"  // for 32-bit SIMD helpers
+#include "host_arm64_defs.h"
+
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+     might encounter.  This is computed before insn selection starts,
+     and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+     which virtual register is associated with each IRTemp temporary.
+     This is computed before insn selection starts, and does not
+     change.  We expect this mapping to map precisely the same set of
+     IRTemps as the type mapping does.
+
+     |vregmap|   holds the primary register for the IRTemp.
+     |vregmapHI| is only used for 128-bit integer-typed
+                 IRTemps.  It holds the identity of a second
+                 64-bit virtual HReg, which holds the high half
+                 of the value.
+
+   - The code array, that is, the insns selected so far.
+
+   - A counter, for generating new virtual registers.
+
+   - The host hardware capabilities word.  This is set at the start
+     and does not change.
+
+   - A Bool for indicating whether we may generate chain-me
+     instructions for control flow transfers, or whether we must use
+     XAssisted.
+
+   - The maximum guest address of any guest insn in this block.
+     Actually, the address of the highest-addressed byte from any insn
+     in this block.  Is set at the start and does not change.  This is
+     used for detecting jumps which are definitely forward-edges from
+     this block, and therefore can be made (chained) to the fast entry
+     point of the destination, thereby avoiding the destination's
+     event check.
+
+    - An IRExpr*, which may be NULL, holding the IR expression (an
+      IRRoundingMode-encoded value) to which the FPU's rounding mode
+      was most recently set.  Setting to NULL is always safe.  Used to
+      avoid redundant settings of the FPU's rounding mode, as
+      described in set_FPCR_rounding_mode below.
+
+   Note, this is all (well, mostly) host-independent.
+*/
+
+typedef
+   struct {
+      /* Constant -- are set at the start and do not change. */
+      IRTypeEnv*   type_env;
+
+      HReg*        vregmap;
+      HReg*        vregmapHI;
+      Int          n_vregmap;
+
+      UInt         hwcaps;
+
+      Bool         chainingAllowed;
+      Addr64       max_ga;
+
+      /* These are modified as we go along. */
+      HInstrArray* code;
+      Int          vreg_ctr;
+
+      IRExpr*      previous_rm;
+   }
+   ISelEnv;
+
+static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   return env->vregmap[tmp];
+}
+
+static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO, 
+                               ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapHI[tmp]));
+   *vrLO = env->vregmap[tmp];
+   *vrHI = env->vregmapHI[tmp];
+}
+
+static void addInstr ( ISelEnv* env, ARM64Instr* instr )
+{
+   addHInstr(env->code, instr);
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      ppARM64Instr(instr);
+      vex_printf("\n");
+   }
+}
+
+static HReg newVRegI ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcInt64, 0, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegD ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcFlt64, 0, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegV ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcVec128, 0, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Forward declarations                        ---*/
+/*---------------------------------------------------------*/
+
+/* These are organised as iselXXX and iselXXX_wrk pairs.  The
+   iselXXX_wrk do the real work, but are not to be called directly.
+   For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
+   checks that all returned registers are virtual.  You should not
+   call the _wrk version directly.
+
+   Because some forms of ARM64 memory amodes are implicitly scaled by
+   the access size, iselIntExpr_AMode takes an IRType which tells it
+   the type of the access for which the amode is to be used.  This
+   type needs to be correct, else you'll get incorrect code.
+*/
+static ARM64AMode* iselIntExpr_AMode_wrk ( ISelEnv* env,
+                                           IRExpr* e, IRType dty );
+static ARM64AMode* iselIntExpr_AMode     ( ISelEnv* env,
+                                           IRExpr* e, IRType dty );
+
+static ARM64RIA*   iselIntExpr_RIA_wrk   ( ISelEnv* env, IRExpr* e );
+static ARM64RIA*   iselIntExpr_RIA       ( ISelEnv* env, IRExpr* e );
+
+static ARM64RIL*   iselIntExpr_RIL_wrk   ( ISelEnv* env, IRExpr* e );
+static ARM64RIL*   iselIntExpr_RIL       ( ISelEnv* env, IRExpr* e );
+
+static ARM64RI6*   iselIntExpr_RI6_wrk   ( ISelEnv* env, IRExpr* e );
+static ARM64RI6*   iselIntExpr_RI6       ( ISelEnv* env, IRExpr* e );
+
+static ARM64CondCode iselCondCode_wrk    ( ISelEnv* env, IRExpr* e );
+static ARM64CondCode iselCondCode        ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselIntExpr_R_wrk     ( ISelEnv* env, IRExpr* e );
+static HReg        iselIntExpr_R         ( ISelEnv* env, IRExpr* e );
+
+static void        iselInt128Expr_wrk    ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                           ISelEnv* env, IRExpr* e );
+static void        iselInt128Expr        ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                           ISelEnv* env, IRExpr* e );
+
+static HReg        iselDblExpr_wrk        ( ISelEnv* env, IRExpr* e );
+static HReg        iselDblExpr            ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselFltExpr_wrk        ( ISelEnv* env, IRExpr* e );
+static HReg        iselFltExpr            ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselF16Expr_wrk        ( ISelEnv* env, IRExpr* e );
+static HReg        iselF16Expr            ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselV128Expr_wrk       ( ISelEnv* env, IRExpr* e );
+static HReg        iselV128Expr           ( ISelEnv* env, IRExpr* e );
+
+static void        iselV256Expr_wrk       ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                            ISelEnv* env, IRExpr* e );
+static void        iselV256Expr           ( /*OUT*/HReg* rHi, HReg* rLo, 
+                                            ISelEnv* env, IRExpr* e );
+
+static ARM64RIL* mb_mkARM64RIL_I ( ULong imm64 );
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Misc helpers                                ---*/
+/*---------------------------------------------------------*/
+
+/* Generate an amode suitable for a 64-bit sized access relative to
+   the baseblock register (X21).  This generates an RI12 amode, which
+   means its scaled by the access size, which is why the access size
+   -- 64 bit -- is stated explicitly here.  Consequently |off| needs
+   to be divisible by 8. */
+static ARM64AMode* mk_baseblock_64bit_access_amode ( UInt off )
+{
+   vassert(off < (8 << 12)); /* otherwise it's unrepresentable */
+   vassert((off & 7) == 0);  /* ditto */
+   return ARM64AMode_RI12(hregARM64_X21(), off >> 3, 8/*scale*/);
+}
+
+/* Ditto, for 32 bit accesses. */
+static ARM64AMode* mk_baseblock_32bit_access_amode ( UInt off )
+{
+   vassert(off < (4 << 12)); /* otherwise it's unrepresentable */
+   vassert((off & 3) == 0);  /* ditto */
+   return ARM64AMode_RI12(hregARM64_X21(), off >> 2, 4/*scale*/);
+}
+
+/* Ditto, for 16 bit accesses. */
+static ARM64AMode* mk_baseblock_16bit_access_amode ( UInt off )
+{
+   vassert(off < (2 << 12)); /* otherwise it's unrepresentable */
+   vassert((off & 1) == 0);  /* ditto */
+   return ARM64AMode_RI12(hregARM64_X21(), off >> 1, 2/*scale*/);
+}
+
+/* Ditto, for 8 bit accesses. */
+static ARM64AMode* mk_baseblock_8bit_access_amode ( UInt off )
+{
+   vassert(off < (1 << 12)); /* otherwise it's unrepresentable */
+   return ARM64AMode_RI12(hregARM64_X21(), off >> 0, 1/*scale*/);
+}
+
+static HReg mk_baseblock_128bit_access_addr ( ISelEnv* env, UInt off )
+{
+   vassert(off < (1<<12));
+   HReg r = newVRegI(env);
+   addInstr(env, ARM64Instr_Arith(r, hregARM64_X21(),
+                                     ARM64RIA_I12(off,0), True/*isAdd*/));
+   return r;
+}
+
+static HReg get_baseblock_register ( void )
+{
+   return hregARM64_X21();
+}
+
+/* Generate code to zero extend a 32 bit value in 'src' to 64 bits, in
+   a new register, and return the new register. */
+static HReg widen_z_32_to_64 ( ISelEnv* env, HReg src )
+{
+   HReg      dst  = newVRegI(env);
+   ARM64RIL* mask = ARM64RIL_I13(1, 0, 31); /* encodes 0xFFFFFFFF */
+   addInstr(env, ARM64Instr_Logic(dst, src, mask, ARM64lo_AND));
+   return dst;
+}
+
+/* Generate code to sign extend a 16 bit value in 'src' to 64 bits, in
+   a new register, and return the new register. */
+static HReg widen_s_16_to_64 ( ISelEnv* env, HReg src )
+{
+   HReg      dst = newVRegI(env);
+   ARM64RI6* n48 = ARM64RI6_I6(48);
+   addInstr(env, ARM64Instr_Shift(dst, src, n48, ARM64sh_SHL));
+   addInstr(env, ARM64Instr_Shift(dst, dst, n48, ARM64sh_SAR));
+   return dst;
+}
+
+/* Generate code to zero extend a 16 bit value in 'src' to 64 bits, in
+   a new register, and return the new register. */
+static HReg widen_z_16_to_64 ( ISelEnv* env, HReg src )
+{
+   HReg      dst = newVRegI(env);
+   ARM64RI6* n48 = ARM64RI6_I6(48);
+   addInstr(env, ARM64Instr_Shift(dst, src, n48, ARM64sh_SHL));
+   addInstr(env, ARM64Instr_Shift(dst, dst, n48, ARM64sh_SHR));
+   return dst;
+}
+
+/* Generate code to sign extend a 32 bit value in 'src' to 64 bits, in
+   a new register, and return the new register. */
+static HReg widen_s_32_to_64 ( ISelEnv* env, HReg src )
+{
+   HReg      dst = newVRegI(env);
+   ARM64RI6* n32 = ARM64RI6_I6(32);
+   addInstr(env, ARM64Instr_Shift(dst, src, n32, ARM64sh_SHL));
+   addInstr(env, ARM64Instr_Shift(dst, dst, n32, ARM64sh_SAR));
+   return dst;
+}
+
+/* Generate code to sign extend a 8 bit value in 'src' to 64 bits, in
+   a new register, and return the new register. */
+static HReg widen_s_8_to_64 ( ISelEnv* env, HReg src )
+{
+   HReg      dst = newVRegI(env);
+   ARM64RI6* n56 = ARM64RI6_I6(56);
+   addInstr(env, ARM64Instr_Shift(dst, src, n56, ARM64sh_SHL));
+   addInstr(env, ARM64Instr_Shift(dst, dst, n56, ARM64sh_SAR));
+   return dst;
+}
+
+static HReg widen_z_8_to_64 ( ISelEnv* env, HReg src )
+{
+   HReg      dst = newVRegI(env);
+   ARM64RI6* n56 = ARM64RI6_I6(56);
+   addInstr(env, ARM64Instr_Shift(dst, src, n56, ARM64sh_SHL));
+   addInstr(env, ARM64Instr_Shift(dst, dst, n56, ARM64sh_SHR));
+   return dst;
+}
+
+/* Is this IRExpr_Const(IRConst_U64(0)) ? */
+static Bool isZeroU64 ( IRExpr* e ) {
+   if (e->tag != Iex_Const) return False;
+   IRConst* con = e->Iex.Const.con;
+   vassert(con->tag == Ico_U64);
+   return con->Ico.U64 == 0;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: FP rounding mode helpers                    ---*/
+/*---------------------------------------------------------*/
+
+/* Set the FP rounding mode: 'mode' is an I32-typed expression
+   denoting a value in the range 0 .. 3, indicating a round mode
+   encoded as per type IRRoundingMode -- the first four values only
+   (Irrm_NEAREST, Irrm_NegINF, Irrm_PosINF, Irrm_ZERO).  Set the ARM64
+   FSCR to have the same rounding.
+
+   For speed & simplicity, we're setting the *entire* FPCR here.
+
+   Setting the rounding mode is expensive.  So this function tries to
+   avoid repeatedly setting the rounding mode to the same thing by
+   first comparing 'mode' to the 'mode' tree supplied in the previous
+   call to this function, if any.  (The previous value is stored in
+   env->previous_rm.)  If 'mode' is a single IR temporary 't' and
+   env->previous_rm is also just 't', then the setting is skipped.
+
+   This is safe because of the SSA property of IR: an IR temporary can
+   only be defined once and so will have the same value regardless of
+   where it appears in the block.  Cool stuff, SSA.
+
+   A safety condition: all attempts to set the RM must be aware of
+   this mechanism - by being routed through the functions here.
+
+   Of course this only helps if blocks where the RM is set more than
+   once and it is set to the same value each time, *and* that value is
+   held in the same IR temporary each time.  In order to assure the
+   latter as much as possible, the IR optimiser takes care to do CSE
+   on any block with any sign of floating point activity.
+*/
+static
+void set_FPCR_rounding_mode ( ISelEnv* env, IRExpr* mode )
+{
+   vassert(typeOfIRExpr(env->type_env,mode) == Ity_I32);
+   
+   /* Do we need to do anything? */
+   if (env->previous_rm
+       && env->previous_rm->tag == Iex_RdTmp
+       && mode->tag == Iex_RdTmp
+       && env->previous_rm->Iex.RdTmp.tmp == mode->Iex.RdTmp.tmp) {
+      /* no - setting it to what it was before.  */
+      vassert(typeOfIRExpr(env->type_env, env->previous_rm) == Ity_I32);
+      return;
+   }
+
+   /* No luck - we better set it, and remember what we set it to. */
+   env->previous_rm = mode;
+
+   /* Only supporting the rounding-mode bits - the rest of FPCR is set
+      to zero - so we can set the whole register at once (faster). */
+
+   /* This isn't simple, because 'mode' carries an IR rounding
+      encoding, and we need to translate that to an ARM64 FP one:
+      The IR encoding:
+         00  to nearest (the default)
+         10  to +infinity
+         01  to -infinity
+         11  to zero
+      The ARM64 FP encoding:
+         00  to nearest
+         01  to +infinity
+         10  to -infinity
+         11  to zero
+      Easy enough to do; just swap the two bits.
+   */
+   HReg irrm = iselIntExpr_R(env, mode);
+   HReg tL   = newVRegI(env);
+   HReg tR   = newVRegI(env);
+   HReg t3   = newVRegI(env);
+   /* tL = irrm << 1;
+      tR = irrm >> 1;  if we're lucky, these will issue together
+      tL &= 2;
+      tR &= 1;         ditto
+      t3 = tL | tR;
+      t3 <<= 22;
+      fmxr fpscr, t3
+   */
+   ARM64RIL* ril_one = mb_mkARM64RIL_I(1);
+   ARM64RIL* ril_two = mb_mkARM64RIL_I(2);
+   vassert(ril_one && ril_two);
+   addInstr(env, ARM64Instr_Shift(tL, irrm, ARM64RI6_I6(1), ARM64sh_SHL));
+   addInstr(env, ARM64Instr_Shift(tR, irrm, ARM64RI6_I6(1), ARM64sh_SHR));
+   addInstr(env, ARM64Instr_Logic(tL, tL, ril_two, ARM64lo_AND));
+   addInstr(env, ARM64Instr_Logic(tR, tR, ril_one, ARM64lo_AND));
+   addInstr(env, ARM64Instr_Logic(t3, tL, ARM64RIL_R(tR), ARM64lo_OR));
+   addInstr(env, ARM64Instr_Shift(t3, t3, ARM64RI6_I6(22), ARM64sh_SHL));
+   addInstr(env, ARM64Instr_FPCR(True/*toFPCR*/, t3));
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Function call helpers                       ---*/
+/*---------------------------------------------------------*/
+
+/* Used only in doHelperCall.  See big comment in doHelperCall re
+   handling of register-parameter args.  This function figures out
+   whether evaluation of an expression might require use of a fixed
+   register.  If in doubt return True (safe but suboptimal).
+*/
+static
+Bool mightRequireFixedRegs ( IRExpr* e )
+{
+   if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
+      // These are always "safe" -- either a copy of SP in some
+      // arbitrary vreg, or a copy of x21, respectively.
+      return False;
+   }
+   /* Else it's a "normal" expression. */
+   switch (e->tag) {
+      case Iex_RdTmp: case Iex_Const: case Iex_Get:
+         return False;
+      default:
+         return True;
+   }
+}
+ 
+
+/* Do a complete function call.  |guard| is a Ity_Bit expression
+   indicating whether or not the call happens.  If guard==NULL, the
+   call is unconditional.  |retloc| is set to indicate where the
+   return value is after the call.  The caller (of this fn) must
+   generate code to add |stackAdjustAfterCall| to the stack pointer
+   after the call is done.  Returns True iff it managed to handle this
+   combination of arg/return types, else returns False. */
+
+static
+Bool doHelperCall ( /*OUT*/UInt*   stackAdjustAfterCall,
+                    /*OUT*/RetLoc* retloc,
+                    ISelEnv* env,
+                    IRExpr* guard,
+                    IRCallee* cee, IRType retTy, IRExpr** args )
+{
+   ARM64CondCode cc;
+   HReg          argregs[ARM64_N_ARGREGS];
+   HReg          tmpregs[ARM64_N_ARGREGS];
+   Bool          go_fast;
+   Int           n_args, i, nextArgReg;
+   Addr64        target;
+
+   vassert(ARM64_N_ARGREGS == 8);
+
+   /* Set default returns.  We'll update them later if needed. */
+   *stackAdjustAfterCall = 0;
+   *retloc               = mk_RetLoc_INVALID();
+
+   /* These are used for cross-checking that IR-level constraints on
+      the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+   UInt nVECRETs = 0;
+   UInt nBBPTRs  = 0;
+
+   /* Marshal args for a call and do the call.
+
+      This function only deals with a tiny set of possibilities, which
+      cover all helpers in practice.  The restrictions are that only
+      arguments in registers are supported, hence only
+      ARM64_N_REGPARMS x 64 integer bits in total can be passed.  In
+      fact the only supported arg type is I64.
+
+      The return type can be I{64,32} or V128.  In the V128 case, it
+      is expected that |args| will contain the special node
+      IRExpr_VECRET(), in which case this routine generates code to
+      allocate space on the stack for the vector return value.  Since
+      we are not passing any scalars on the stack, it is enough to
+      preallocate the return space before marshalling any arguments,
+      in this case.
+
+      |args| may also contain IRExpr_BBPTR(), in which case the
+      value in x21 is passed as the corresponding argument.
+
+      Generating code which is both efficient and correct when
+      parameters are to be passed in registers is difficult, for the
+      reasons elaborated in detail in comments attached to
+      doHelperCall() in priv/host-x86/isel.c.  Here, we use a variant
+      of the method described in those comments.
+
+      The problem is split into two cases: the fast scheme and the
+      slow scheme.  In the fast scheme, arguments are computed
+      directly into the target (real) registers.  This is only safe
+      when we can be sure that computation of each argument will not
+      trash any real registers set by computation of any other
+      argument.
+
+      In the slow scheme, all args are first computed into vregs, and
+      once they are all done, they are moved to the relevant real
+      regs.  This always gives correct code, but it also gives a bunch
+      of vreg-to-rreg moves which are usually redundant but are hard
+      for the register allocator to get rid of.
+
+      To decide which scheme to use, all argument expressions are
+      first examined.  If they are all so simple that it is clear they
+      will be evaluated without use of any fixed registers, use the
+      fast scheme, else use the slow scheme.  Note also that only
+      unconditional calls may use the fast scheme, since having to
+      compute a condition expression could itself trash real
+      registers.
+
+      Note this requires being able to examine an expression and
+      determine whether or not evaluation of it might use a fixed
+      register.  That requires knowledge of how the rest of this insn
+      selector works.  Currently just the following 3 are regarded as
+      safe -- hopefully they cover the majority of arguments in
+      practice: IRExpr_Tmp IRExpr_Const IRExpr_Get.
+   */
+
+   /* Note that the cee->regparms field is meaningless on ARM64 hosts
+      (since there is only one calling convention) and so we always
+      ignore it. */
+
+   n_args = 0;
+   for (i = 0; args[i]; i++) {
+      IRExpr* arg = args[i];
+      if (UNLIKELY(arg->tag == Iex_VECRET)) {
+         nVECRETs++;
+      } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+         nBBPTRs++;
+      }
+      n_args++;
+   }
+
+   /* If this fails, the IR is ill-formed */
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+
+   /* If we have a VECRET, allocate space on the stack for the return
+      value, and record the stack pointer after that. */
+   HReg r_vecRetAddr = INVALID_HREG;
+   if (nVECRETs == 1) {
+      vassert(retTy == Ity_V128 || retTy == Ity_V256);
+      vassert(retTy != Ity_V256); // we don't handle that yet (if ever)
+      r_vecRetAddr = newVRegI(env);
+      addInstr(env, ARM64Instr_AddToSP(-16));
+      addInstr(env, ARM64Instr_FromSP(r_vecRetAddr));
+   } else {
+      // If either of these fail, the IR is ill-formed
+      vassert(retTy != Ity_V128 && retTy != Ity_V256);
+      vassert(nVECRETs == 0);
+   }
+
+   argregs[0] = hregARM64_X0();
+   argregs[1] = hregARM64_X1();
+   argregs[2] = hregARM64_X2();
+   argregs[3] = hregARM64_X3();
+   argregs[4] = hregARM64_X4();
+   argregs[5] = hregARM64_X5();
+   argregs[6] = hregARM64_X6();
+   argregs[7] = hregARM64_X7();
+
+   tmpregs[0] = tmpregs[1] = tmpregs[2] = tmpregs[3] = INVALID_HREG;
+   tmpregs[4] = tmpregs[5] = tmpregs[6] = tmpregs[7] = INVALID_HREG;
+
+   /* First decide which scheme (slow or fast) is to be used.  First
+      assume the fast scheme, and select slow if any contraindications
+      (wow) appear. */
+
+   go_fast = True;
+
+   if (guard) {
+      if (guard->tag == Iex_Const
+          && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional */
+      } else {
+         /* Not manifestly unconditional -- be conservative. */
+         go_fast = False;
+      }
+   }
+
+   if (go_fast) {
+      for (i = 0; i < n_args; i++) {
+         if (mightRequireFixedRegs(args[i])) {
+            go_fast = False;
+            break;
+         }
+      }
+   }
+
+   if (go_fast) {
+      if (retTy == Ity_V128 || retTy == Ity_V256)
+         go_fast = False;
+   }
+
+   /* At this point the scheme to use has been established.  Generate
+      code to get the arg values into the argument rregs.  If we run
+      out of arg regs, give up. */
+
+   if (go_fast) {
+
+      /* FAST SCHEME */
+      nextArgReg = 0;
+
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+
+         IRType  aTy = Ity_INVALID;
+         if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+            aTy = typeOfIRExpr(env->type_env, args[i]);
+
+         if (nextArgReg >= ARM64_N_ARGREGS)
+            return False; /* out of argregs */
+
+         if (aTy == Ity_I64) {
+            addInstr(env, ARM64Instr_MovI( argregs[nextArgReg],
+                                           iselIntExpr_R(env, args[i]) ));
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_BBPTR) {
+            vassert(0); //ATC
+            addInstr(env, ARM64Instr_MovI( argregs[nextArgReg],
+                                           hregARM64_X21() ));
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_VECRET) {
+            // because of the go_fast logic above, we can't get here,
+            // since vector return values makes us use the slow path
+            // instead.
+            vassert(0);
+         }
+         else
+            return False; /* unhandled arg type */
+      }
+
+      /* Fast scheme only applies for unconditional calls.  Hence: */
+      cc = ARM64cc_AL;
+
+   } else {
+
+      /* SLOW SCHEME; move via temporaries */
+      nextArgReg = 0;
+
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+
+         IRType  aTy = Ity_INVALID;
+         if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+            aTy = typeOfIRExpr(env->type_env, args[i]);
+
+         if (nextArgReg >= ARM64_N_ARGREGS)
+            return False; /* out of argregs */
+
+         if (aTy == Ity_I64) {
+            tmpregs[nextArgReg] = iselIntExpr_R(env, args[i]);
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_BBPTR) {
+            vassert(0); //ATC
+            tmpregs[nextArgReg] = hregARM64_X21();
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_VECRET) {
+            vassert(!hregIsInvalid(r_vecRetAddr));
+            tmpregs[nextArgReg] = r_vecRetAddr;
+            nextArgReg++;
+         }
+         else
+            return False; /* unhandled arg type */
+      }
+
+      /* Now we can compute the condition.  We can't do it earlier
+         because the argument computations could trash the condition
+         codes.  Be a bit clever to handle the common case where the
+         guard is 1:Bit. */
+      cc = ARM64cc_AL;
+      if (guard) {
+         if (guard->tag == Iex_Const
+             && guard->Iex.Const.con->tag == Ico_U1
+             && guard->Iex.Const.con->Ico.U1 == True) {
+            /* unconditional -- do nothing */
+         } else {
+            cc = iselCondCode( env, guard );
+         }
+      }
+
+      /* Move the args to their final destinations. */
+      for (i = 0; i < nextArgReg; i++) {
+         vassert(!(hregIsInvalid(tmpregs[i])));
+         /* None of these insns, including any spill code that might
+            be generated, may alter the condition codes. */
+         addInstr( env, ARM64Instr_MovI( argregs[i], tmpregs[i] ) );
+      }
+
+   }
+
+   /* Should be assured by checks above */
+   vassert(nextArgReg <= ARM64_N_ARGREGS);
+
+   /* Do final checks, set the return values, and generate the call
+      instruction proper. */
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+   vassert(nVECRETs == (retTy == Ity_V128 || retTy == Ity_V256) ? 1 : 0);
+   vassert(*stackAdjustAfterCall == 0);
+   vassert(is_RetLoc_INVALID(*retloc));
+   switch (retTy) {
+      case Ity_INVALID:
+         /* Function doesn't return a value. */
+         *retloc = mk_RetLoc_simple(RLPri_None);
+         break;
+      case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+         *retloc = mk_RetLoc_simple(RLPri_Int);
+         break;
+      case Ity_V128:
+         *retloc = mk_RetLoc_spRel(RLPri_V128SpRel, 0);
+         *stackAdjustAfterCall = 16;
+         break;
+      case Ity_V256:
+         vassert(0); // ATC
+         *retloc = mk_RetLoc_spRel(RLPri_V256SpRel, 0);
+         *stackAdjustAfterCall = 32;
+         break;
+      default:
+         /* IR can denote other possible return types, but we don't
+            handle those here. */
+         vassert(0);
+   }
+
+   /* Finally, generate the call itself.  This needs the *retloc value
+      set in the switch above, which is why it's at the end. */
+
+   /* nextArgReg doles out argument registers.  Since these are
+      assigned in the order x0 .. x7, its numeric value at this point,
+      which must be between 0 and 8 inclusive, is going to be equal to
+      the number of arg regs in use for the call.  Hence bake that
+      number into the call (we'll need to know it when doing register
+      allocation, to know what regs the call reads.) */
+
+   target = (Addr)cee->addr;
+   addInstr(env, ARM64Instr_Call( cc, target, nextArgReg, *retloc ));
+
+   return True; /* success */
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64/32 bit)             ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 64- and 32-bit type.  All results
+   are returned in a 64-bit register.  For 32-bit expressions, the
+   upper 32 bits are arbitrary, so you should mask or sign extend
+   partial values if necessary.
+*/
+
+/* --------------------- AMode --------------------- */
+
+/* Return an AMode which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a 64-bit one.
+*/
+
+static Bool isValidScale ( UChar scale )
+{
+   switch (scale) {
+      case 1: case 2: case 4: case 8: /* case 16: ??*/ return True;
+      default: return False;
+   }
+}
+
+static Bool sane_AMode ( ARM64AMode* am )
+{
+   switch (am->tag) {
+      case ARM64am_RI9:
+         return
+            toBool( hregClass(am->ARM64am.RI9.reg) == HRcInt64
+                    && (hregIsVirtual(am->ARM64am.RI9.reg)
+                        /* || sameHReg(am->ARM64am.RI9.reg, 
+                                       hregARM64_X21()) */ )
+                    && am->ARM64am.RI9.simm9 >= -256
+                    && am->ARM64am.RI9.simm9 <= 255 );
+      case ARM64am_RI12:
+         return
+            toBool( hregClass(am->ARM64am.RI12.reg) == HRcInt64
+                    && (hregIsVirtual(am->ARM64am.RI12.reg)
+                        /* || sameHReg(am->ARM64am.RI12.reg, 
+                                       hregARM64_X21()) */ )
+                    && am->ARM64am.RI12.uimm12 < 4096
+                    && isValidScale(am->ARM64am.RI12.szB) );
+      case ARM64am_RR:
+         return
+            toBool( hregClass(am->ARM64am.RR.base) == HRcInt64
+                    && hregIsVirtual(am->ARM64am.RR.base)
+                    && hregClass(am->ARM64am.RR.index) == HRcInt64
+                    && hregIsVirtual(am->ARM64am.RR.index) );
+      default:
+         vpanic("sane_AMode: unknown ARM64 AMode1 tag");
+   }
+}
+
+static
+ARM64AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e, IRType dty )
+{
+   ARM64AMode* am = iselIntExpr_AMode_wrk(env, e, dty);
+   vassert(sane_AMode(am));
+   return am;
+}
+
+static
+ARM64AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e, IRType dty )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64);
+
+   ULong szBbits = 0;
+   switch (dty) {
+      case Ity_I64: szBbits = 3; break;
+      case Ity_I32: szBbits = 2; break;
+      case Ity_I16: szBbits = 1; break;
+      case Ity_I8:  szBbits = 0; break;
+      default: vassert(0);
+   }
+
+   /* {Add64,Sub64}(expr,simm9).  We don't care about |dty| here since
+      we're going to create an amode suitable for LDU* or STU*
+      instructions, which use unscaled immediate offsets.  */
+   if (e->tag == Iex_Binop
+       && (e->Iex.Binop.op == Iop_Add64 || e->Iex.Binop.op == Iop_Sub64)
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64) {
+      Long simm = (Long)e->Iex.Binop.arg2->Iex.Const.con->Ico.U64;
+      if (simm >= -255 && simm <= 255) {
+         /* Although the gating condition might seem to be 
+               simm >= -256 && simm <= 255
+            we will need to negate simm in the case where the op is Sub64.
+            Hence limit the lower value to -255 in order that its negation
+            is representable. */
+         HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         if (e->Iex.Binop.op == Iop_Sub64) simm = -simm;
+         return ARM64AMode_RI9(reg, (Int)simm);
+      }
+   }
+
+   /* Add64(expr, uimm12 * transfer-size) */
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_Add64
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64) {
+      ULong uimm = e->Iex.Binop.arg2->Iex.Const.con->Ico.U64;
+      ULong szB  = 1 << szBbits;
+      if (0 == (uimm & (szB-1)) /* "uimm is szB-aligned" */
+          && (uimm >> szBbits) < 4096) {
+         HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         return ARM64AMode_RI12(reg, (UInt)(uimm >> szBbits), (UChar)szB);
+      }
+   }
+
+   /* Add64(expr1, expr2) */
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_Add64) {
+      HReg reg1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      HReg reg2 = iselIntExpr_R(env, e->Iex.Binop.arg2);
+      return ARM64AMode_RR(reg1, reg2);
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   HReg reg = iselIntExpr_R(env, e);
+   return ARM64AMode_RI9(reg, 0);
+}
+
+
+/* --------------------- RIA --------------------- */
+
+/* Select instructions to generate 'e' into a RIA. */
+
+static ARM64RIA* iselIntExpr_RIA ( ISelEnv* env, IRExpr* e )
+{
+   ARM64RIA* ri = iselIntExpr_RIA_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case ARM64riA_I12:
+         vassert(ri->ARM64riA.I12.imm12 < 4096);
+         vassert(ri->ARM64riA.I12.shift == 0 || ri->ARM64riA.I12.shift == 12);
+         return ri;
+      case ARM64riA_R:
+         vassert(hregClass(ri->ARM64riA.R.reg) == HRcInt64);
+         vassert(hregIsVirtual(ri->ARM64riA.R.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RIA: unknown arm RIA tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static ARM64RIA* iselIntExpr_RIA_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64 || ty == Ity_I32);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      ULong u = 0xF000000ULL; /* invalid */
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U64: u = e->Iex.Const.con->Ico.U64; break;
+         case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
+         default: vpanic("iselIntExpr_RIA.Iex_Const(arm64)");
+      }
+      if (0 == (u & ~(0xFFFULL << 0)))
+         return ARM64RIA_I12((UShort)((u >> 0) & 0xFFFULL), 0);
+      if (0 == (u & ~(0xFFFULL << 12)))
+         return ARM64RIA_I12((UShort)((u >> 12) & 0xFFFULL), 12);
+      /* else fail, fall through to default case */
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return ARM64RIA_R(r);
+   }
+}
+
+
+/* --------------------- RIL --------------------- */
+
+/* Select instructions to generate 'e' into a RIL.  At this point we
+   have to deal with the strange bitfield-immediate encoding for logic
+   instructions. */
+
+
+// The following four functions
+//    CountLeadingZeros CountTrailingZeros CountSetBits isImmLogical
+// are copied, with modifications, from
+// https://github.com/armvixl/vixl/blob/master/src/a64/assembler-a64.cc
+// which has the following copyright notice:
+/*
+   Copyright 2013, ARM Limited
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are met:
+   
+   * Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright notice,
+     this list of conditions and the following disclaimer in the documentation
+     and/or other materials provided with the distribution.
+   * Neither the name of ARM Limited nor the names of its contributors may be
+     used to endorse or promote products derived from this software without
+     specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+   ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+   DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+   FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+   SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+   CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+   OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+static Int CountLeadingZeros(ULong value, Int width)
+{
+   vassert(width == 32 || width == 64);
+   Int count = 0;
+   ULong bit_test = 1ULL << (width - 1);
+   while ((count < width) && ((bit_test & value) == 0)) {
+      count++;
+      bit_test >>= 1;
+   }
+   return count;
+}
+
+static Int CountTrailingZeros(ULong value, Int width)
+{
+   vassert(width == 32 || width == 64);
+   Int count = 0;
+   while ((count < width) && (((value >> count) & 1) == 0)) {
+      count++;
+   }
+   return count;
+}
+
+static Int CountSetBits(ULong value, Int width)
+{
+   // TODO: Other widths could be added here, as the implementation already
+   // supports them.
+   vassert(width == 32 || width == 64);
+
+   // Mask out unused bits to ensure that they are not counted.
+   value &= (0xffffffffffffffffULL >> (64-width));
+
+   // Add up the set bits.
+   // The algorithm works by adding pairs of bit fields together iteratively,
+   // where the size of each bit field doubles each time.
+   // An example for an 8-bit value:
+   // Bits: h g f e d c b a
+   // \ | \ | \ | \ |
+   // value = h+g f+e d+c b+a
+   // \ | \ |
+   // value = h+g+f+e d+c+b+a
+   // \ |
+   // value = h+g+f+e+d+c+b+a
+   value = ((value >>  1) & 0x5555555555555555ULL)
+                 + (value & 0x5555555555555555ULL);
+   value = ((value >>  2) & 0x3333333333333333ULL)
+                 + (value & 0x3333333333333333ULL);
+   value = ((value >>  4) & 0x0f0f0f0f0f0f0f0fULL)
+                 + (value & 0x0f0f0f0f0f0f0f0fULL);
+   value = ((value >>  8) & 0x00ff00ff00ff00ffULL)
+                 + (value & 0x00ff00ff00ff00ffULL);
+   value = ((value >> 16) & 0x0000ffff0000ffffULL)
+                 + (value & 0x0000ffff0000ffffULL);
+   value = ((value >> 32) & 0x00000000ffffffffULL)
+                 + (value & 0x00000000ffffffffULL);
+
+   return value;
+}
+
+static Bool isImmLogical ( /*OUT*/UInt* n,
+                           /*OUT*/UInt* imm_s, /*OUT*/UInt* imm_r,
+                           ULong value, UInt width )
+{
+  // Test if a given value can be encoded in the immediate field of a
+  // logical instruction.
+
+  // If it can be encoded, the function returns true, and values
+  // pointed to by n, imm_s and imm_r are updated with immediates
+  // encoded in the format required by the corresponding fields in the
+  // logical instruction.  If it can not be encoded, the function
+  // returns false, and the values pointed to by n, imm_s and imm_r
+  // are undefined.
+  vassert(n != NULL && imm_s != NULL && imm_r != NULL);
+  vassert(width == 32 || width == 64);
+
+  // Logical immediates are encoded using parameters n, imm_s and imm_r using
+  // the following table:
+  //
+  // N imms immr size S R
+  // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+  // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+  // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+  // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+  // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+  // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+  // (s bits must not be all set)
+  //
+  // A pattern is constructed of size bits, where the least significant S+1
+  // bits are set. The pattern is rotated right by R, and repeated across a
+  // 32 or 64-bit value, depending on destination register width.
+  //
+  // To test if an arbitrary immediate can be encoded using this scheme, an
+  // iterative algorithm is used.
+  //
+  // TODO: This code does not consider using X/W register overlap to support
+  // 64-bit immediates where the top 32-bits are zero, and the bottom 32-bits
+  // are an encodable logical immediate.
+
+  // 1. If the value has all set or all clear bits, it can't be encoded.
+  if ((value == 0) || (value == 0xffffffffffffffffULL) ||
+      ((width == 32) && (value == 0xffffffff))) {
+    return False;
+  }
+
+  UInt lead_zero = CountLeadingZeros(value, width);
+  UInt lead_one = CountLeadingZeros(~value, width);
+  UInt trail_zero = CountTrailingZeros(value, width);
+  UInt trail_one = CountTrailingZeros(~value, width);
+  UInt set_bits = CountSetBits(value, width);
+
+  // The fixed bits in the immediate s field.
+  // If width == 64 (X reg), start at 0xFFFFFF80.
+  // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
+  // widths won't be executed.
+  Int imm_s_fixed = (width == 64) ? -128 : -64;
+  Int imm_s_mask = 0x3F;
+
+  for (;;) {
+    // 2. If the value is two bits wide, it can be encoded.
+    if (width == 2) {
+      *n = 0;
+      *imm_s = 0x3C;
+      *imm_r = (value & 3) - 1;
+      return True;
+    }
+
+    *n = (width == 64) ? 1 : 0;
+    *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
+    if ((lead_zero + set_bits) == width) {
+      *imm_r = 0;
+    } else {
+      *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
+    }
+
+    // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
+    // the bit width of the value, it can be encoded.
+    if (lead_zero + trail_zero + set_bits == width) {
+      return True;
+    }
+
+    // 4. If the sum of leading ones, trailing ones and unset bits in the
+    // value is equal to the bit width of the value, it can be encoded.
+    if (lead_one + trail_one + (width - set_bits) == width) {
+      return True;
+    }
+
+    // 5. If the most-significant half of the bitwise value is equal to the
+    // least-significant half, return to step 2 using the least-significant
+    // half of the value.
+    ULong mask = (1ULL << (width >> 1)) - 1;
+    if ((value & mask) == ((value >> (width >> 1)) & mask)) {
+      width >>= 1;
+      set_bits >>= 1;
+      imm_s_fixed >>= 1;
+      continue;
+    }
+
+    // 6. Otherwise, the value can't be encoded.
+    return False;
+  }
+}
+
+
+/* Create a RIL for the given immediate, if it is representable, or
+   return NULL if not. */
+
+static ARM64RIL* mb_mkARM64RIL_I ( ULong imm64 )
+{
+   UInt n = 0, imm_s = 0, imm_r = 0;
+   Bool ok = isImmLogical(&n, &imm_s, &imm_r, imm64, 64);
+   if (!ok) return NULL;
+   vassert(n < 2 && imm_s < 64 && imm_r < 64);
+   return ARM64RIL_I13(n, imm_r, imm_s);
+}
+
+/* So, finally .. */
+
+static ARM64RIL* iselIntExpr_RIL ( ISelEnv* env, IRExpr* e )
+{
+   ARM64RIL* ri = iselIntExpr_RIL_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case ARM64riL_I13:
+         vassert(ri->ARM64riL.I13.bitN < 2);
+         vassert(ri->ARM64riL.I13.immR < 64);
+         vassert(ri->ARM64riL.I13.immS < 64);
+         return ri;
+      case ARM64riL_R:
+         vassert(hregClass(ri->ARM64riL.R.reg) == HRcInt64);
+         vassert(hregIsVirtual(ri->ARM64riL.R.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RIL: unknown arm RIL tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static ARM64RIL* iselIntExpr_RIL_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64 || ty == Ity_I32);
+   
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      ARM64RIL* maybe = NULL;
+      if (ty == Ity_I64) {
+         vassert(e->Iex.Const.con->tag == Ico_U64);
+         maybe = mb_mkARM64RIL_I(e->Iex.Const.con->Ico.U64);
+      } else {
+         vassert(ty == Ity_I32);
+         vassert(e->Iex.Const.con->tag == Ico_U32);
+         UInt  u32 = e->Iex.Const.con->Ico.U32;
+         ULong u64 = (ULong)u32;
+         /* First try with 32 leading zeroes. */
+         maybe = mb_mkARM64RIL_I(u64);
+         /* If that doesn't work, try with 2 copies, since it doesn't
+            matter what winds up in the upper 32 bits. */
+         if (!maybe) {
+            maybe = mb_mkARM64RIL_I((u64 << 32) | u64);
+         }
+      }
+      if (maybe) return maybe;
+      /* else fail, fall through to default case */
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return ARM64RIL_R(r);
+   }
+}
+
+
+/* --------------------- RI6 --------------------- */
+
+/* Select instructions to generate 'e' into a RI6. */
+
+static ARM64RI6* iselIntExpr_RI6 ( ISelEnv* env, IRExpr* e )
+{
+   ARM64RI6* ri = iselIntExpr_RI6_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case ARM64ri6_I6:
+         vassert(ri->ARM64ri6.I6.imm6 < 64);
+         vassert(ri->ARM64ri6.I6.imm6 > 0);
+         return ri;
+      case ARM64ri6_R:
+         vassert(hregClass(ri->ARM64ri6.R.reg) == HRcInt64);
+         vassert(hregIsVirtual(ri->ARM64ri6.R.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RI6: unknown arm RI6 tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static ARM64RI6* iselIntExpr_RI6_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64 || ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U8: {
+            UInt u = e->Iex.Const.con->Ico.U8;
+            if (u > 0 && u < 64)
+              return ARM64RI6_I6(u);
+            break;
+         default:
+            break;
+         }
+      }
+      /* else fail, fall through to default case */
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return ARM64RI6_R(r);
+   }
+}
+
+
+/* ------------------- CondCode ------------------- */
+
+/* Generate code to evaluated a bit-typed expression, returning the
+   condition code which would correspond when the expression would
+   notionally have returned 1. */
+
+static ARM64CondCode iselCondCode ( ISelEnv* env, IRExpr* e )
+{
+   ARM64CondCode cc = iselCondCode_wrk(env,e);
+   vassert(cc != ARM64cc_NV);
+   return cc;
+}
+
+static ARM64CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I1);
+
+   /* var */
+   if (e->tag == Iex_RdTmp) {
+      HReg rTmp = lookupIRTemp(env, e->Iex.RdTmp.tmp);
+      /* Cmp doesn't modify rTmp; so this is OK. */
+      ARM64RIL* one = mb_mkARM64RIL_I(1);
+      vassert(one);
+      addInstr(env, ARM64Instr_Test(rTmp, one));
+      return ARM64cc_NE;
+   }
+
+   /* Not1(e) */
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
+      /* Generate code for the arg, and negate the test condition */
+      ARM64CondCode cc = iselCondCode(env, e->Iex.Unop.arg);
+      if (cc == ARM64cc_AL || cc == ARM64cc_NV) {
+        return ARM64cc_AL;
+      } else {
+        return 1 ^ cc;
+      }
+   }
+
+   /* --- patterns rooted at: 64to1 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_64to1) {
+      HReg      rTmp = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARM64RIL* one  = mb_mkARM64RIL_I(1);
+      vassert(one); /* '1' must be representable */
+      addInstr(env, ARM64Instr_Test(rTmp, one));
+      return ARM64cc_NE;
+   }
+
+   /* --- patterns rooted at: CmpNEZ8 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ8) {
+      HReg      r1  = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARM64RIL* xFF = mb_mkARM64RIL_I(0xFF);
+      addInstr(env, ARM64Instr_Test(r1, xFF));
+      return ARM64cc_NE;
+   }
+
+   /* --- patterns rooted at: CmpNEZ16 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ16) {
+      HReg      r1    = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARM64RIL* xFFFF = mb_mkARM64RIL_I(0xFFFF);
+      addInstr(env, ARM64Instr_Test(r1, xFFFF));
+      return ARM64cc_NE;
+   }
+
+   /* --- patterns rooted at: CmpNEZ64 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ64) {
+      HReg      r1   = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARM64RIA* zero = ARM64RIA_I12(0,0);
+      addInstr(env, ARM64Instr_Cmp(r1, zero, True/*is64*/));
+      return ARM64cc_NE;
+   }
+
+   /* --- patterns rooted at: CmpNEZ32 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ32) {
+      HReg      r1   = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARM64RIA* zero = ARM64RIA_I12(0,0);
+      addInstr(env, ARM64Instr_Cmp(r1, zero, False/*!is64*/));
+      return ARM64cc_NE;
+   }
+
+   /* --- Cmp*64*(x,y) --- */
+   if (e->tag == Iex_Binop
+       && (e->Iex.Binop.op == Iop_CmpEQ64
+           || e->Iex.Binop.op == Iop_CmpNE64
+           || e->Iex.Binop.op == Iop_CmpLT64S
+           || e->Iex.Binop.op == Iop_CmpLT64U
+           || e->Iex.Binop.op == Iop_CmpLE64S
+           || e->Iex.Binop.op == Iop_CmpLE64U)) {
+      HReg      argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      ARM64RIA* argR = iselIntExpr_RIA(env, e->Iex.Binop.arg2);
+      addInstr(env, ARM64Instr_Cmp(argL, argR, True/*is64*/));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ64:  return ARM64cc_EQ;
+         case Iop_CmpNE64:  return ARM64cc_NE;
+         case Iop_CmpLT64S: return ARM64cc_LT;
+         case Iop_CmpLT64U: return ARM64cc_CC;
+         case Iop_CmpLE64S: return ARM64cc_LE;
+         case Iop_CmpLE64U: return ARM64cc_LS;
+         default: vpanic("iselCondCode(arm64): CmpXX64");
+      }
+   }
+
+   /* --- Cmp*32*(x,y) --- */
+   if (e->tag == Iex_Binop
+       && (e->Iex.Binop.op == Iop_CmpEQ32
+           || e->Iex.Binop.op == Iop_CmpNE32
+           || e->Iex.Binop.op == Iop_CmpLT32S
+           || e->Iex.Binop.op == Iop_CmpLT32U
+           || e->Iex.Binop.op == Iop_CmpLE32S
+           || e->Iex.Binop.op == Iop_CmpLE32U)) {
+      HReg      argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      ARM64RIA* argR = iselIntExpr_RIA(env, e->Iex.Binop.arg2);
+      addInstr(env, ARM64Instr_Cmp(argL, argR, False/*!is64*/));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ32:  return ARM64cc_EQ;
+         case Iop_CmpNE32:  return ARM64cc_NE;
+         case Iop_CmpLT32S: return ARM64cc_LT;
+         case Iop_CmpLT32U: return ARM64cc_CC;
+         case Iop_CmpLE32S: return ARM64cc_LE;
+         case Iop_CmpLE32U: return ARM64cc_LS;
+         default: vpanic("iselCondCode(arm64): CmpXX32");
+      }
+   }
+
+   ppIRExpr(e);
+   vpanic("iselCondCode");
+}
+
+
+/* --------------------- Reg --------------------- */
+
+static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselIntExpr_R_wrk(env, e);
+   /* sanity checks ... */
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcInt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   switch (e->tag) {
+
+   /* --------- TEMP --------- */
+   case Iex_RdTmp: {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg dst  = newVRegI(env);
+
+      if (e->Iex.Load.end != Iend_LE)
+         goto irreducible;
+
+      if (ty == Ity_I64) {
+         ARM64AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr, ty );
+         addInstr(env, ARM64Instr_LdSt64(True/*isLoad*/, dst, amode));
+         return dst;
+      }
+      if (ty == Ity_I32) {
+         ARM64AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr, ty );
+         addInstr(env, ARM64Instr_LdSt32(True/*isLoad*/, dst, amode));
+         return dst;
+      }
+      if (ty == Ity_I16) {
+         ARM64AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr, ty );
+         addInstr(env, ARM64Instr_LdSt16(True/*isLoad*/, dst, amode));
+         return dst;
+      }
+      if (ty == Ity_I8) {
+         ARM64AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr, ty );
+         addInstr(env, ARM64Instr_LdSt8(True/*isLoad*/, dst, amode));
+         return dst;
+      }
+      break;
+   }
+
+   /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+
+      ARM64LogicOp lop = 0; /* invalid */
+      ARM64ShiftOp sop = 0; /* invalid */
+
+      /* Special-case 0-x into a Neg instruction.  Not because it's
+         particularly useful but more so as to give value flow using
+         this instruction, so as to check its assembly correctness for
+         implementation of Left32/Left64. */
+      switch (e->Iex.Binop.op) {
+         case Iop_Sub64:
+            if (isZeroU64(e->Iex.Binop.arg1)) {
+               HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+               HReg dst  = newVRegI(env);
+               addInstr(env, ARM64Instr_Unary(dst, argR, ARM64un_NEG));
+               return dst;
+            }
+            break;
+         default:
+            break;
+      }
+
+      /* ADD/SUB */
+      switch (e->Iex.Binop.op) {
+         case Iop_Add64: case Iop_Add32:
+         case Iop_Sub64: case Iop_Sub32: {
+            Bool      isAdd = e->Iex.Binop.op == Iop_Add64
+                              || e->Iex.Binop.op == Iop_Add32;
+            HReg      dst   = newVRegI(env);
+            HReg      argL  = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            ARM64RIA* argR  = iselIntExpr_RIA(env, e->Iex.Binop.arg2);
+            addInstr(env, ARM64Instr_Arith(dst, argL, argR, isAdd));
+            return dst;
+         }
+         default:
+            break;
+      }
+
+      /* AND/OR/XOR */
+      switch (e->Iex.Binop.op) {
+         case Iop_And64: case Iop_And32: lop = ARM64lo_AND; goto log_binop;
+         case Iop_Or64:  case Iop_Or32:  lop = ARM64lo_OR;  goto log_binop;
+         case Iop_Xor64: case Iop_Xor32: lop = ARM64lo_XOR; goto log_binop;
+         log_binop: {
+            HReg      dst  = newVRegI(env);
+            HReg      argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            ARM64RIL* argR = iselIntExpr_RIL(env, e->Iex.Binop.arg2);
+            addInstr(env, ARM64Instr_Logic(dst, argL, argR, lop));
+            return dst;
+         }
+         default:
+            break;
+      }
+
+      /* SHL/SHR/SAR */
+      switch (e->Iex.Binop.op) {
+         case Iop_Shr64:                 sop = ARM64sh_SHR; goto sh_binop;
+         case Iop_Sar64:                 sop = ARM64sh_SAR; goto sh_binop;
+         case Iop_Shl64: case Iop_Shl32: sop = ARM64sh_SHL; goto sh_binop;
+         sh_binop: {
+            HReg      dst  = newVRegI(env);
+            HReg      argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            ARM64RI6* argR = iselIntExpr_RI6(env, e->Iex.Binop.arg2);
+            addInstr(env, ARM64Instr_Shift(dst, argL, argR, sop));
+            return dst;
+         }
+         case Iop_Shr32:
+         case Iop_Sar32: {
+            Bool      zx   = e->Iex.Binop.op == Iop_Shr32;
+            HReg      argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            ARM64RI6* argR = iselIntExpr_RI6(env, e->Iex.Binop.arg2);
+            HReg      dst  = zx ? widen_z_32_to_64(env, argL)
+                                : widen_s_32_to_64(env, argL);
+            addInstr(env, ARM64Instr_Shift(dst, dst, argR, ARM64sh_SHR));
+            return dst;
+         }
+         default: break;
+      }
+
+      /* MUL */
+      if (e->Iex.Binop.op == Iop_Mul64 || e->Iex.Binop.op == Iop_Mul32) {
+         HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg dst  = newVRegI(env);
+         addInstr(env, ARM64Instr_Mul(dst, argL, argR, ARM64mul_PLAIN));
+         return dst;
+      }
+
+      /* MULL */
+      if (e->Iex.Binop.op == Iop_MullU32 || e->Iex.Binop.op == Iop_MullS32) {
+         Bool isS  = e->Iex.Binop.op == Iop_MullS32;
+         HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg extL = (isS ? widen_s_32_to_64 : widen_z_32_to_64)(env, argL);
+         HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg extR = (isS ? widen_s_32_to_64 : widen_z_32_to_64)(env, argR);
+         HReg dst  = newVRegI(env);
+         addInstr(env, ARM64Instr_Mul(dst, extL, extR, ARM64mul_PLAIN));
+         return dst;
+      }
+
+      /* Handle misc other ops. */
+
+      if (e->Iex.Binop.op == Iop_Max32U) {
+         HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg dst  = newVRegI(env);
+         addInstr(env, ARM64Instr_Cmp(argL, ARM64RIA_R(argR), False/*!is64*/));
+         addInstr(env, ARM64Instr_CSel(dst, argL, argR, ARM64cc_CS));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_32HLto64) {
+         HReg hi32s = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg lo32s = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg lo32  = widen_z_32_to_64(env, lo32s);
+         HReg hi32  = newVRegI(env);
+         addInstr(env, ARM64Instr_Shift(hi32, hi32s, ARM64RI6_I6(32),
+                                        ARM64sh_SHL));
+         addInstr(env, ARM64Instr_Logic(hi32, hi32, ARM64RIL_R(lo32),
+                                        ARM64lo_OR));
+         return hi32;
+      }
+
+      if (e->Iex.Binop.op == Iop_CmpF64 || e->Iex.Binop.op == Iop_CmpF32) {
+         Bool isD = e->Iex.Binop.op == Iop_CmpF64;
+         HReg dL  = (isD ? iselDblExpr : iselFltExpr)(env, e->Iex.Binop.arg1);
+         HReg dR  = (isD ? iselDblExpr : iselFltExpr)(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegI(env);
+         HReg imm = newVRegI(env);
+         /* Do the compare (FCMP), which sets NZCV in PSTATE.  Then
+            create in dst, the IRCmpF64Result encoded result. */
+         addInstr(env, (isD ? ARM64Instr_VCmpD : ARM64Instr_VCmpS)(dL, dR));
+         addInstr(env, ARM64Instr_Imm64(dst, 0));
+         addInstr(env, ARM64Instr_Imm64(imm, 0x40)); // 0x40 = Ircr_EQ
+         addInstr(env, ARM64Instr_CSel(dst, imm, dst, ARM64cc_EQ));
+         addInstr(env, ARM64Instr_Imm64(imm, 0x01)); // 0x01 = Ircr_LT
+         addInstr(env, ARM64Instr_CSel(dst, imm, dst, ARM64cc_MI));
+         addInstr(env, ARM64Instr_Imm64(imm, 0x00)); // 0x00 = Ircr_GT
+         addInstr(env, ARM64Instr_CSel(dst, imm, dst, ARM64cc_GT));
+         addInstr(env, ARM64Instr_Imm64(imm, 0x45)); // 0x45 = Ircr_UN
+         addInstr(env, ARM64Instr_CSel(dst, imm, dst, ARM64cc_VS));
+         return dst;
+      }
+
+      { /* local scope */
+        ARM64CvtOp cvt_op = ARM64cvt_INVALID;
+        Bool       srcIsD = False;
+        switch (e->Iex.Binop.op) {
+           case Iop_F64toI64S:
+              cvt_op = ARM64cvt_F64_I64S; srcIsD = True; break;
+           case Iop_F64toI64U:
+              cvt_op = ARM64cvt_F64_I64U; srcIsD = True; break;
+           case Iop_F64toI32S:
+              cvt_op = ARM64cvt_F64_I32S; srcIsD = True; break;
+           case Iop_F64toI32U:
+              cvt_op = ARM64cvt_F64_I32U; srcIsD = True; break;
+           case Iop_F32toI32S:
+              cvt_op = ARM64cvt_F32_I32S; srcIsD = False; break;
+           case Iop_F32toI32U:
+              cvt_op = ARM64cvt_F32_I32U; srcIsD = False; break;
+           case Iop_F32toI64S:
+              cvt_op = ARM64cvt_F32_I64S; srcIsD = False; break;
+           case Iop_F32toI64U:
+              cvt_op = ARM64cvt_F32_I64U; srcIsD = False; break;
+           default:
+              break;
+        }
+        if (cvt_op != ARM64cvt_INVALID) {
+           /* This is all a bit dodgy, because we can't handle a
+              non-constant (not-known-at-JIT-time) rounding mode
+              indication.  That's because there's no instruction
+              AFAICS that does this conversion but rounds according to
+              FPCR.RM, so we have to bake the rounding mode into the
+              instruction right now.  But that should be OK because
+              (1) the front end attaches a literal Irrm_ value to the
+              conversion binop, and (2) iropt will never float that
+              off via CSE, into a literal.  Hence we should always
+              have an Irrm_ value as the first arg. */
+           IRExpr* arg1 = e->Iex.Binop.arg1;
+           if (arg1->tag != Iex_Const) goto irreducible;
+           IRConst* arg1con = arg1->Iex.Const.con;
+           vassert(arg1con->tag == Ico_U32); // else ill-typed IR
+           UInt irrm = arg1con->Ico.U32;
+           /* Find the ARM-encoded equivalent for |irrm|. */
+           UInt armrm = 4; /* impossible */
+           switch (irrm) {
+              case Irrm_NEAREST: armrm = 0; break;
+              case Irrm_NegINF:  armrm = 2; break;
+              case Irrm_PosINF:  armrm = 1; break;
+              case Irrm_ZERO:    armrm = 3; break;
+              default: goto irreducible;
+           }
+           HReg src = (srcIsD ? iselDblExpr : iselFltExpr)
+                         (env, e->Iex.Binop.arg2);
+           HReg dst = newVRegI(env);
+           addInstr(env, ARM64Instr_VCvtF2I(cvt_op, dst, src, armrm));
+           return dst;
+        }
+      } /* local scope */
+
+      /* All cases involving host-side helper calls. */
+      void* fn = NULL;
+      switch (e->Iex.Binop.op) {
+         case Iop_DivU32:
+            fn = &h_calc_udiv32_w_arm_semantics; break;
+         case Iop_DivS32:
+            fn = &h_calc_sdiv32_w_arm_semantics; break;
+         case Iop_DivU64:
+            fn = &h_calc_udiv64_w_arm_semantics; break;
+         case Iop_DivS64:
+            fn = &h_calc_sdiv64_w_arm_semantics; break;
+         default:
+            break;
+      }
+
+      if (fn) {
+         HReg regL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg res  = newVRegI(env);
+         addInstr(env, ARM64Instr_MovI(hregARM64_X0(), regL));
+         addInstr(env, ARM64Instr_MovI(hregARM64_X1(), regR));
+         addInstr(env, ARM64Instr_Call( ARM64cc_AL, (Addr)fn,
+                                        2, mk_RetLoc_simple(RLPri_Int) ));
+         addInstr(env, ARM64Instr_MovI(res, hregARM64_X0()));
+         return res;
+      }
+
+      break;
+   }
+
+   /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+
+      switch (e->Iex.Unop.op) {
+         case Iop_16Uto64: {
+            /* This probably doesn't occur often enough to be worth
+               rolling the extension into the load. */
+            IRExpr* arg = e->Iex.Unop.arg;
+            HReg    src = iselIntExpr_R(env, arg);
+            HReg    dst = widen_z_16_to_64(env, src);
+            return dst;
+         }
+         case Iop_32Uto64: {
+            IRExpr* arg = e->Iex.Unop.arg;
+            if (arg->tag == Iex_Load) {
+               /* This correctly zero extends because _LdSt32 is
+                  defined to do a zero extending load. */
+               HReg dst = newVRegI(env);
+               ARM64AMode* am
+                  = iselIntExpr_AMode(env, arg->Iex.Load.addr, Ity_I32);
+               addInstr(env, ARM64Instr_LdSt32(True/*isLoad*/, dst, am));
+               return dst;
+            }
+            /* else be lame and mask it  */
+            HReg src  = iselIntExpr_R(env, arg);
+            HReg dst  = widen_z_32_to_64(env, src);
+            return dst;
+         }
+         case Iop_8Uto32: /* Just freeload on the 8Uto64 case */
+         case Iop_8Uto64: {
+            IRExpr* arg = e->Iex.Unop.arg;
+            if (arg->tag == Iex_Load) {
+               /* This correctly zero extends because _LdSt8 is
+                  defined to do a zero extending load. */
+               HReg dst = newVRegI(env);
+               ARM64AMode* am
+                  = iselIntExpr_AMode(env, arg->Iex.Load.addr, Ity_I8);
+               addInstr(env, ARM64Instr_LdSt8(True/*isLoad*/, dst, am));
+               return dst;
+            }
+            /* else be lame and mask it  */
+            HReg src = iselIntExpr_R(env, arg);
+            HReg dst = widen_z_8_to_64(env, src);
+            return dst;
+         }
+         case Iop_128HIto64: {
+            HReg rHi, rLo;
+            iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            return rHi; /* and abandon rLo */
+         }
+         case Iop_8Sto32: case Iop_8Sto64: {
+            IRExpr* arg = e->Iex.Unop.arg;
+            HReg    src = iselIntExpr_R(env, arg);
+            HReg    dst = widen_s_8_to_64(env, src);
+            return dst;
+         }
+         case Iop_16Sto32: case Iop_16Sto64: {
+            IRExpr* arg = e->Iex.Unop.arg;
+            HReg    src = iselIntExpr_R(env, arg);
+            HReg    dst = widen_s_16_to_64(env, src);
+            return dst;
+         }
+         case Iop_32Sto64: {
+            IRExpr* arg = e->Iex.Unop.arg;
+            HReg    src = iselIntExpr_R(env, arg);
+            HReg    dst = widen_s_32_to_64(env, src);
+            return dst;
+         }
+         case Iop_Not32:
+         case Iop_Not64: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_Unary(dst, src, ARM64un_NOT));
+            return dst;
+         }
+         case Iop_Clz64: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_Unary(dst, src, ARM64un_CLZ));
+            return dst;
+         }
+         case Iop_Left32:
+         case Iop_Left64: {
+            /* Left64(src) = src | -src.  Left32 can use the same
+               implementation since in that case we don't care what
+               the upper 32 bits become. */
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_Unary(dst, src, ARM64un_NEG));
+            addInstr(env, ARM64Instr_Logic(dst, dst, ARM64RIL_R(src),
+                                           ARM64lo_OR));
+            return dst;
+         }
+         case Iop_CmpwNEZ64: {
+           /* CmpwNEZ64(src) = (src == 0) ? 0...0 : 1...1
+                             = Left64(src) >>s 63 */
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_Unary(dst, src, ARM64un_NEG));
+            addInstr(env, ARM64Instr_Logic(dst, dst, ARM64RIL_R(src),
+                                           ARM64lo_OR));
+            addInstr(env, ARM64Instr_Shift(dst, dst, ARM64RI6_I6(63),
+                                           ARM64sh_SAR));
+            return dst;
+         }
+         case Iop_CmpwNEZ32: {
+            /* CmpwNEZ32(src) = CmpwNEZ64(src & 0xFFFFFFFF)
+                              = Left64(src & 0xFFFFFFFF) >>s 63 */
+            HReg dst = newVRegI(env);
+            HReg pre = iselIntExpr_R(env, e->Iex.Unop.arg);
+            HReg src = widen_z_32_to_64(env, pre);
+            addInstr(env, ARM64Instr_Unary(dst, src, ARM64un_NEG));
+            addInstr(env, ARM64Instr_Logic(dst, dst, ARM64RIL_R(src),
+                                           ARM64lo_OR));
+            addInstr(env, ARM64Instr_Shift(dst, dst, ARM64RI6_I6(63),
+                                           ARM64sh_SAR));
+            return dst;
+         }
+         case Iop_V128to64: case Iop_V128HIto64: {
+            HReg dst    = newVRegI(env);
+            HReg src    = iselV128Expr(env, e->Iex.Unop.arg);
+            UInt laneNo = (e->Iex.Unop.op == Iop_V128HIto64) ? 1 : 0;
+            addInstr(env, ARM64Instr_VXfromQ(dst, src, laneNo));
+            return dst;
+         }
+         case Iop_ReinterpF64asI64: {
+            HReg dst = newVRegI(env);
+            HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_VXfromDorS(dst, src, True/*fromD*/));
+            return dst;
+         }
+         case Iop_ReinterpF32asI32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_VXfromDorS(dst, src, False/*!fromD*/));
+            return dst;
+         }
+         case Iop_1Sto16:
+         case Iop_1Sto32:
+         case Iop_1Sto64: {
+            /* As with the iselStmt case for 'tmp:I1 = expr', we could
+               do a lot better here if it ever became necessary. */
+            HReg zero = newVRegI(env);
+            HReg one  = newVRegI(env);
+            HReg dst  = newVRegI(env);
+            addInstr(env, ARM64Instr_Imm64(zero, 0));
+            addInstr(env, ARM64Instr_Imm64(one,  1));
+            ARM64CondCode cc = iselCondCode(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_CSel(dst, one, zero, cc));
+            addInstr(env, ARM64Instr_Shift(dst, dst, ARM64RI6_I6(63),
+                                           ARM64sh_SHL));
+            addInstr(env, ARM64Instr_Shift(dst, dst, ARM64RI6_I6(63),
+                                           ARM64sh_SAR));
+            return dst;
+         }
+         case Iop_NarrowUn16to8x8:
+         case Iop_NarrowUn32to16x4:
+         case Iop_NarrowUn64to32x2:
+         case Iop_QNarrowUn16Sto8Sx8:
+         case Iop_QNarrowUn32Sto16Sx4:
+         case Iop_QNarrowUn64Sto32Sx2:
+         case Iop_QNarrowUn16Uto8Ux8:
+         case Iop_QNarrowUn32Uto16Ux4:
+         case Iop_QNarrowUn64Uto32Ux2:
+         case Iop_QNarrowUn16Sto8Ux8:
+         case Iop_QNarrowUn32Sto16Ux4:
+         case Iop_QNarrowUn64Sto32Ux2:
+         {
+            HReg src = iselV128Expr(env, e->Iex.Unop.arg);
+            HReg tmp = newVRegV(env);
+            HReg dst = newVRegI(env);
+            UInt dszBlg2 = 3; /* illegal */
+            ARM64VecNarrowOp op = ARM64vecna_INVALID;
+            switch (e->Iex.Unop.op) {
+               case Iop_NarrowUn16to8x8:
+                  dszBlg2 = 0; op = ARM64vecna_XTN; break;
+               case Iop_NarrowUn32to16x4:
+                  dszBlg2 = 1; op = ARM64vecna_XTN; break;
+               case Iop_NarrowUn64to32x2:
+                  dszBlg2 = 2; op = ARM64vecna_XTN; break;
+               case Iop_QNarrowUn16Sto8Sx8:
+                  dszBlg2 = 0; op = ARM64vecna_SQXTN; break;
+               case Iop_QNarrowUn32Sto16Sx4:
+                  dszBlg2 = 1; op = ARM64vecna_SQXTN; break;
+               case Iop_QNarrowUn64Sto32Sx2:
+                  dszBlg2 = 2; op = ARM64vecna_SQXTN; break;
+               case Iop_QNarrowUn16Uto8Ux8:
+                  dszBlg2 = 0; op = ARM64vecna_UQXTN; break;
+               case Iop_QNarrowUn32Uto16Ux4:
+                  dszBlg2 = 1; op = ARM64vecna_UQXTN; break;
+               case Iop_QNarrowUn64Uto32Ux2:
+                  dszBlg2 = 2; op = ARM64vecna_UQXTN; break;
+               case Iop_QNarrowUn16Sto8Ux8:
+                  dszBlg2 = 0; op = ARM64vecna_SQXTUN; break;
+               case Iop_QNarrowUn32Sto16Ux4:
+                  dszBlg2 = 1; op = ARM64vecna_SQXTUN; break;
+               case Iop_QNarrowUn64Sto32Ux2:
+                  dszBlg2 = 2; op = ARM64vecna_SQXTUN; break;
+               default:
+                  vassert(0);
+            }
+            addInstr(env, ARM64Instr_VNarrowV(op, dszBlg2, tmp, src));
+            addInstr(env, ARM64Instr_VXfromQ(dst, tmp, 0/*laneNo*/));
+            return dst;
+         }
+         case Iop_1Uto64: {
+            /* 1Uto64(tmp). */
+            HReg dst = newVRegI(env);
+            if (e->Iex.Unop.arg->tag == Iex_RdTmp) {
+               ARM64RIL* one = mb_mkARM64RIL_I(1);
+               HReg src = lookupIRTemp(env, e->Iex.Unop.arg->Iex.RdTmp.tmp);
+               vassert(one);
+               addInstr(env, ARM64Instr_Logic(dst, src, one, ARM64lo_AND));
+            } else {
+               /* CLONE-01 */
+               HReg zero = newVRegI(env);
+               HReg one  = newVRegI(env);
+               addInstr(env, ARM64Instr_Imm64(zero, 0));
+               addInstr(env, ARM64Instr_Imm64(one,  1));
+               ARM64CondCode cc = iselCondCode(env, e->Iex.Unop.arg);
+               addInstr(env, ARM64Instr_CSel(dst, one, zero, cc));
+            }
+            return dst;
+         }
+         case Iop_64to32:
+         case Iop_64to16:
+         case Iop_64to8:
+            /* These are no-ops. */
+            return iselIntExpr_R(env, e->Iex.Unop.arg);
+
+         default:
+            break;
+      }
+
+      break;
+   }
+
+   /* --------- GET --------- */
+   case Iex_Get: {
+      if (ty == Ity_I64
+          && 0 == (e->Iex.Get.offset & 7) && e->Iex.Get.offset < (8<<12)-8) {
+         HReg        dst = newVRegI(env);
+         ARM64AMode* am
+            = mk_baseblock_64bit_access_amode(e->Iex.Get.offset);
+         addInstr(env, ARM64Instr_LdSt64(True/*isLoad*/, dst, am));
+         return dst;
+      }
+      if (ty == Ity_I32
+          && 0 == (e->Iex.Get.offset & 3) && e->Iex.Get.offset < (4<<12)-4) {
+         HReg        dst = newVRegI(env);
+         ARM64AMode* am
+            = mk_baseblock_32bit_access_amode(e->Iex.Get.offset);
+         addInstr(env, ARM64Instr_LdSt32(True/*isLoad*/, dst, am));
+         return dst;
+      }
+      if (ty == Ity_I16
+          && 0 == (e->Iex.Get.offset & 1) && e->Iex.Get.offset < (2<<12)-2) {
+         HReg        dst = newVRegI(env);
+         ARM64AMode* am
+            = mk_baseblock_16bit_access_amode(e->Iex.Get.offset);
+         addInstr(env, ARM64Instr_LdSt16(True/*isLoad*/, dst, am));
+         return dst;
+      }
+      if (ty == Ity_I8
+          /* && no alignment check */ && e->Iex.Get.offset < (1<<12)-1) {
+         HReg        dst = newVRegI(env);
+         ARM64AMode* am
+            = mk_baseblock_8bit_access_amode(e->Iex.Get.offset);
+         addInstr(env, ARM64Instr_LdSt8(True/*isLoad*/, dst, am));
+         return dst;
+      }
+      break;
+   }
+
+   /* --------- CCALL --------- */
+   case Iex_CCall: {
+      HReg    dst = newVRegI(env);
+      vassert(ty == e->Iex.CCall.retty);
+
+      /* be very restrictive for now.  Only 64-bit ints allowed for
+         args, and 64 bits for return type.  Don't forget to change
+         the RetLoc if more types are allowed in future. */
+      if (e->Iex.CCall.retty != Ity_I64)
+         goto irreducible;
+
+      /* Marshal args, do the call, clear stack. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      Bool   ok      = doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                                     e->Iex.CCall.cee, e->Iex.CCall.retty,
+                                     e->Iex.CCall.args );
+      /* */
+      if (ok) {
+         vassert(is_sane_RetLoc(rloc));
+         vassert(rloc.pri == RLPri_Int);
+         vassert(addToSp == 0);
+         addInstr(env, ARM64Instr_MovI(dst, hregARM64_X0()));
+         return dst;
+      }
+      /* else fall through; will hit the irreducible: label */
+   }
+
+   /* --------- LITERAL --------- */
+   /* 64-bit literals */
+   case Iex_Const: {
+      ULong u   = 0;
+      HReg  dst = newVRegI(env);
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U64: u = e->Iex.Const.con->Ico.U64; break;
+         case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
+         case Ico_U16: u = e->Iex.Const.con->Ico.U16; break;
+         case Ico_U8:  u = e->Iex.Const.con->Ico.U8;  break;
+         default: ppIRExpr(e); vpanic("iselIntExpr_R.Iex_Const(arm64)");
+      }
+      addInstr(env, ARM64Instr_Imm64(dst, u));
+      return dst;
+   }
+
+   /* --------- MULTIPLEX --------- */
+   case Iex_ITE: {
+      /* ITE(ccexpr, iftrue, iffalse) */
+      if (ty == Ity_I64 || ty == Ity_I32) {
+         ARM64CondCode cc;
+         HReg r1  = iselIntExpr_R(env, e->Iex.ITE.iftrue);
+         HReg r0  = iselIntExpr_R(env, e->Iex.ITE.iffalse);
+         HReg dst = newVRegI(env);
+         cc = iselCondCode(env, e->Iex.ITE.cond);
+         addInstr(env, ARM64Instr_CSel(dst, r1, r0, cc));
+         return dst;
+      }
+      break;
+   }
+
+   default: 
+   break;
+   } /* switch (e->tag) */
+
+   /* We get here if no pattern matched. */
+  irreducible:
+   ppIRExpr(e);
+   vpanic("iselIntExpr_R: cannot reduce tree");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (128 bit)               ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 128-bit value into a register pair, which is returned as
+   the first two parameters.  As with iselIntExpr_R, these may be
+   either real or virtual regs; in any case they must not be changed
+   by subsequent code emitted by the caller.  */
+
+static void iselInt128Expr ( HReg* rHi, HReg* rLo, 
+                             ISelEnv* env, IRExpr* e )
+{
+   iselInt128Expr_wrk(rHi, rLo, env, e);
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcInt64);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcInt64);
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, 
+                                 ISelEnv* env, IRExpr* e )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         /* 64 x 64 -> 128 multiply */
+         case Iop_MullU64:
+         case Iop_MullS64: {
+            Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64);
+            HReg argL  = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            HReg argR  = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg dstLo = newVRegI(env);
+            HReg dstHi = newVRegI(env);
+            addInstr(env, ARM64Instr_Mul(dstLo, argL, argR,
+                                         ARM64mul_PLAIN));
+            addInstr(env, ARM64Instr_Mul(dstHi, argL, argR,
+                                         syned ? ARM64mul_SX : ARM64mul_ZX));
+            *rHi = dstHi;
+            *rLo = dstLo;
+            return;
+         }
+         /* 64HLto128(e1,e2) */
+         case Iop_64HLto128:
+            *rHi = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            *rLo = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            return;
+         default: 
+            break;
+      }
+   } /* if (e->tag == Iex_Binop) */
+
+   ppIRExpr(e);
+   vpanic("iselInt128Expr(arm64)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Vector expressions (128 bit)                ---*/
+/*---------------------------------------------------------*/
+
+static HReg iselV128Expr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselV128Expr_wrk( env, e );
+   vassert(hregClass(r) == HRcVec128);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselV128Expr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   vassert(e);
+   vassert(ty == Ity_V128);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      /* Only a very limited range of constants is handled. */
+      vassert(e->Iex.Const.con->tag == Ico_V128);
+      UShort con = e->Iex.Const.con->Ico.V128;
+      HReg   res = newVRegV(env);
+      switch (con) {
+         case 0x0000: case 0x000F: case 0x003F: case 0x00FF: case 0xFFFF:
+            addInstr(env, ARM64Instr_VImmQ(res, con));
+            return res;
+         case 0x00F0:
+            addInstr(env, ARM64Instr_VImmQ(res, 0x000F));
+            addInstr(env, ARM64Instr_VExtV(res, res, res, 12));
+            return res;
+         case 0x0F00:
+            addInstr(env, ARM64Instr_VImmQ(res, 0x000F));
+            addInstr(env, ARM64Instr_VExtV(res, res, res, 8));
+            return res;
+         case 0x0FF0:
+            addInstr(env, ARM64Instr_VImmQ(res, 0x00FF));
+            addInstr(env, ARM64Instr_VExtV(res, res, res, 12));
+            return res;
+         case 0x0FFF:
+            addInstr(env, ARM64Instr_VImmQ(res, 0x000F));
+            addInstr(env, ARM64Instr_VExtV(res, res, res, 4));
+            addInstr(env, ARM64Instr_VUnaryV(ARM64vecu_NOT, res, res));
+            return res;
+         case 0xF000:
+            addInstr(env, ARM64Instr_VImmQ(res, 0x000F));
+            addInstr(env, ARM64Instr_VExtV(res, res, res, 4));
+            return res;
+         case 0xFF00:
+            addInstr(env, ARM64Instr_VImmQ(res, 0x00FF));
+            addInstr(env, ARM64Instr_VExtV(res, res, res, 8));
+            return res;
+         default: 
+            break;
+      }
+      /* Unhandled */
+      goto v128_expr_bad;
+   }
+
+   if (e->tag == Iex_Load) {
+      HReg res = newVRegV(env);
+      HReg rN  = iselIntExpr_R(env, e->Iex.Load.addr);
+      vassert(ty == Ity_V128);
+      addInstr(env, ARM64Instr_VLdStQ(True/*isLoad*/, res, rN));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      UInt offs = (UInt)e->Iex.Get.offset;
+      if (offs < (1<<12)) {
+         HReg addr = mk_baseblock_128bit_access_addr(env, offs);
+         HReg res  = newVRegV(env);
+         vassert(ty == Ity_V128);
+         addInstr(env, ARM64Instr_VLdStQ(True/*isLoad*/, res, addr));
+         return res;
+      }
+      goto v128_expr_bad;
+   }
+
+   if (e->tag == Iex_Unop) {
+
+      /* Iop_ZeroHIXXofV128 cases */
+      UShort imm16 = 0;
+      switch (e->Iex.Unop.op) {
+         case Iop_ZeroHI64ofV128:  imm16 = 0x00FF; break;
+         case Iop_ZeroHI96ofV128:  imm16 = 0x000F; break;
+         case Iop_ZeroHI112ofV128: imm16 = 0x0003; break;
+         case Iop_ZeroHI120ofV128: imm16 = 0x0001; break;
+         default: break;
+      }
+      if (imm16 != 0) {
+         HReg src = iselV128Expr(env, e->Iex.Unop.arg);
+         HReg imm = newVRegV(env);
+         HReg res = newVRegV(env);
+         addInstr(env, ARM64Instr_VImmQ(imm, imm16));
+         addInstr(env, ARM64Instr_VBinV(ARM64vecb_AND, res, src, imm));
+         return res;
+      }
+
+      /* Other cases */
+      switch (e->Iex.Unop.op) {
+         case Iop_NotV128:
+         case Iop_Abs64Fx2: case Iop_Abs32Fx4:
+         case Iop_Neg64Fx2: case Iop_Neg32Fx4:
+         case Iop_Abs64x2:  case Iop_Abs32x4:
+         case Iop_Abs16x8:  case Iop_Abs8x16:
+         case Iop_Cls32x4:  case Iop_Cls16x8:  case Iop_Cls8x16:
+         case Iop_Clz32x4:  case Iop_Clz16x8:  case Iop_Clz8x16:
+         case Iop_Cnt8x16:
+         case Iop_Reverse1sIn8_x16:
+         case Iop_Reverse8sIn16_x8:
+         case Iop_Reverse8sIn32_x4: case Iop_Reverse16sIn32_x4:
+         case Iop_Reverse8sIn64_x2: case Iop_Reverse16sIn64_x2:
+         case Iop_Reverse32sIn64_x2:
+         case Iop_RecipEst32Ux4:
+         case Iop_RSqrtEst32Ux4:
+         case Iop_RecipEst64Fx2: case Iop_RecipEst32Fx4:
+         case Iop_RSqrtEst64Fx2: case Iop_RSqrtEst32Fx4:
+         {
+            HReg res   = newVRegV(env);
+            HReg arg   = iselV128Expr(env, e->Iex.Unop.arg);
+            Bool setRM = False;
+            ARM64VecUnaryOp op = ARM64vecu_INVALID;
+            switch (e->Iex.Unop.op) {
+               case Iop_NotV128:           op = ARM64vecu_NOT;         break;
+               case Iop_Abs64Fx2:          op = ARM64vecu_FABS64x2;    break;
+               case Iop_Abs32Fx4:          op = ARM64vecu_FABS32x4;    break;
+               case Iop_Neg64Fx2:          op = ARM64vecu_FNEG64x2;    break;
+               case Iop_Neg32Fx4:          op = ARM64vecu_FNEG32x4;    break;
+               case Iop_Abs64x2:           op = ARM64vecu_ABS64x2;     break;
+               case Iop_Abs32x4:           op = ARM64vecu_ABS32x4;     break;
+               case Iop_Abs16x8:           op = ARM64vecu_ABS16x8;     break;
+               case Iop_Abs8x16:           op = ARM64vecu_ABS8x16;     break;
+               case Iop_Cls32x4:           op = ARM64vecu_CLS32x4;     break;
+               case Iop_Cls16x8:           op = ARM64vecu_CLS16x8;     break;
+               case Iop_Cls8x16:           op = ARM64vecu_CLS8x16;     break;
+               case Iop_Clz32x4:           op = ARM64vecu_CLZ32x4;     break;
+               case Iop_Clz16x8:           op = ARM64vecu_CLZ16x8;     break;
+               case Iop_Clz8x16:           op = ARM64vecu_CLZ8x16;     break;
+               case Iop_Cnt8x16:           op = ARM64vecu_CNT8x16;     break;
+               case Iop_Reverse1sIn8_x16:  op = ARM64vecu_RBIT;        break;
+               case Iop_Reverse8sIn16_x8:  op = ARM64vecu_REV1616B;    break;
+               case Iop_Reverse8sIn32_x4:  op = ARM64vecu_REV3216B;    break;
+               case Iop_Reverse16sIn32_x4: op = ARM64vecu_REV328H;     break;
+               case Iop_Reverse8sIn64_x2:  op = ARM64vecu_REV6416B;    break;
+               case Iop_Reverse16sIn64_x2: op = ARM64vecu_REV648H;     break;
+               case Iop_Reverse32sIn64_x2: op = ARM64vecu_REV644S;     break;
+               case Iop_RecipEst32Ux4:     op = ARM64vecu_URECPE32x4;  break;
+               case Iop_RSqrtEst32Ux4:     op = ARM64vecu_URSQRTE32x4; break;
+               case Iop_RecipEst64Fx2:     setRM = True;
+                                           op = ARM64vecu_FRECPE64x2;  break;
+               case Iop_RecipEst32Fx4:     setRM = True;
+                                           op = ARM64vecu_FRECPE32x4;  break;
+               case Iop_RSqrtEst64Fx2:     setRM = True;
+                                           op = ARM64vecu_FRSQRTE64x2; break;
+               case Iop_RSqrtEst32Fx4:     setRM = True;
+                                           op = ARM64vecu_FRSQRTE32x4; break;
+               default: vassert(0);
+            }
+            if (setRM) {
+               // This is a bit of a kludge.  We should do rm properly for
+               // these recip-est insns, but that would require changing the
+               // primop's type to take an rmode.
+               set_FPCR_rounding_mode(env, IRExpr_Const(
+                                              IRConst_U32(Irrm_NEAREST)));
+            }
+            addInstr(env, ARM64Instr_VUnaryV(op, res, arg));
+            return res;
+         }
+         case Iop_CmpNEZ8x16:
+         case Iop_CmpNEZ16x8:
+         case Iop_CmpNEZ32x4:
+         case Iop_CmpNEZ64x2: {
+            HReg arg  = iselV128Expr(env, e->Iex.Unop.arg);
+            HReg zero = newVRegV(env);
+            HReg res  = newVRegV(env);
+            ARM64VecBinOp cmp = ARM64vecb_INVALID;
+            switch (e->Iex.Unop.op) {
+               case Iop_CmpNEZ64x2: cmp = ARM64vecb_CMEQ64x2; break;
+               case Iop_CmpNEZ32x4: cmp = ARM64vecb_CMEQ32x4; break;
+               case Iop_CmpNEZ16x8: cmp = ARM64vecb_CMEQ16x8; break;
+               case Iop_CmpNEZ8x16: cmp = ARM64vecb_CMEQ8x16; break;
+               default: vassert(0);
+            }
+            // This is pretty feeble.  Better: use CMP against zero
+            // and avoid the extra instruction and extra register.
+            addInstr(env, ARM64Instr_VImmQ(zero, 0x0000));
+            addInstr(env, ARM64Instr_VBinV(cmp, res, arg, zero));
+            addInstr(env, ARM64Instr_VUnaryV(ARM64vecu_NOT, res, res));
+            return res;
+         }
+         case Iop_V256toV128_0:
+         case Iop_V256toV128_1: {
+            HReg vHi, vLo;
+            iselV256Expr(&vHi, &vLo, env, e->Iex.Unop.arg);
+            return (e->Iex.Unop.op == Iop_V256toV128_1) ? vHi : vLo;
+         }
+         case Iop_64UtoV128: {
+            HReg res = newVRegV(env);
+            HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_VQfromX(res, arg));
+            return res;
+         }
+         case Iop_Widen8Sto16x8: {
+            HReg res = newVRegV(env);
+            HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_VQfromX(res, arg));
+            addInstr(env, ARM64Instr_VBinV(ARM64vecb_ZIP18x16, res, res, res));
+            addInstr(env, ARM64Instr_VShiftImmV(ARM64vecshi_SSHR16x8,
+                                                res, res, 8));
+            return res;
+         }
+         case Iop_Widen16Sto32x4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_VQfromX(res, arg));
+            addInstr(env, ARM64Instr_VBinV(ARM64vecb_ZIP116x8, res, res, res));
+            addInstr(env, ARM64Instr_VShiftImmV(ARM64vecshi_SSHR32x4,
+                                                res, res, 16));
+            return res;
+         }
+         case Iop_Widen32Sto64x2: {
+            HReg res = newVRegV(env);
+            HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARM64Instr_VQfromX(res, arg));
+            addInstr(env, ARM64Instr_VBinV(ARM64vecb_ZIP132x4, res, res, res));
+            addInstr(env, ARM64Instr_VShiftImmV(ARM64vecshi_SSHR64x2,
+                                                res, res, 32));
+            return res;
+         }
+         /* ... */
+         default:
+            break;
+      } /* switch on the unop */
+   } /* if (e->tag == Iex_Unop) */
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_Sqrt32Fx4:
+         case Iop_Sqrt64Fx2: {
+            HReg arg = iselV128Expr(env, e->Iex.Binop.arg2);
+            HReg res = newVRegV(env);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            ARM64VecUnaryOp op 
+               = e->Iex.Binop.op == Iop_Sqrt32Fx4
+                    ? ARM64vecu_FSQRT32x4 : ARM64vecu_FSQRT64x2;
+            addInstr(env, ARM64Instr_VUnaryV(op, res, arg));
+            return res;
+         }
+         case Iop_64HLtoV128: {
+            HReg res  = newVRegV(env);
+            HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, ARM64Instr_VQfromXX(res, argL, argR));
+            return res;
+         }
+         /* -- Cases where we can generate a simple three-reg instruction. -- */
+         case Iop_AndV128:
+         case Iop_OrV128:
+         case Iop_XorV128:
+         case Iop_Max32Ux4: case Iop_Max16Ux8: case Iop_Max8Ux16:
+         case Iop_Min32Ux4: case Iop_Min16Ux8: case Iop_Min8Ux16:
+         case Iop_Max32Sx4: case Iop_Max16Sx8: case Iop_Max8Sx16:
+         case Iop_Min32Sx4: case Iop_Min16Sx8: case Iop_Min8Sx16:
+         case Iop_Add64x2: case Iop_Add32x4:
+         case Iop_Add16x8: case Iop_Add8x16:
+         case Iop_Sub64x2: case Iop_Sub32x4:
+         case Iop_Sub16x8: case Iop_Sub8x16:
+         case Iop_Mul32x4: case Iop_Mul16x8: case Iop_Mul8x16:
+         case Iop_CmpEQ64x2: case Iop_CmpEQ32x4:
+         case Iop_CmpEQ16x8:  case Iop_CmpEQ8x16:
+         case Iop_CmpGT64Ux2: case Iop_CmpGT32Ux4:
+         case Iop_CmpGT16Ux8: case Iop_CmpGT8Ux16:
+         case Iop_CmpGT64Sx2: case Iop_CmpGT32Sx4:
+         case Iop_CmpGT16Sx8: case Iop_CmpGT8Sx16:
+         case Iop_CmpEQ64Fx2: case Iop_CmpEQ32Fx4:
+         case Iop_CmpLE64Fx2: case Iop_CmpLE32Fx4:
+         case Iop_CmpLT64Fx2: case Iop_CmpLT32Fx4:
+         case Iop_Perm8x16:
+         case Iop_InterleaveLO64x2: case Iop_CatEvenLanes32x4:
+         case Iop_CatEvenLanes16x8: case Iop_CatEvenLanes8x16:
+         case Iop_InterleaveHI64x2: case Iop_CatOddLanes32x4:
+         case Iop_CatOddLanes16x8:  case Iop_CatOddLanes8x16:
+         case Iop_InterleaveHI32x4:
+         case Iop_InterleaveHI16x8: case Iop_InterleaveHI8x16:
+         case Iop_InterleaveLO32x4:
+         case Iop_InterleaveLO16x8: case Iop_InterleaveLO8x16:
+         case Iop_PolynomialMul8x16:
+         case Iop_QAdd64Sx2: case Iop_QAdd32Sx4:
+         case Iop_QAdd16Sx8: case Iop_QAdd8Sx16:
+         case Iop_QAdd64Ux2: case Iop_QAdd32Ux4:
+         case Iop_QAdd16Ux8: case Iop_QAdd8Ux16:
+         case Iop_QSub64Sx2: case Iop_QSub32Sx4:
+         case Iop_QSub16Sx8: case Iop_QSub8Sx16:
+         case Iop_QSub64Ux2: case Iop_QSub32Ux4:
+         case Iop_QSub16Ux8: case Iop_QSub8Ux16:
+         case Iop_QDMulHi32Sx4:  case Iop_QDMulHi16Sx8:
+         case Iop_QRDMulHi32Sx4: case Iop_QRDMulHi16Sx8:
+         case Iop_Sh8Sx16:  case Iop_Sh16Sx8:
+         case Iop_Sh32Sx4:  case Iop_Sh64Sx2:
+         case Iop_Sh8Ux16:  case Iop_Sh16Ux8:
+         case Iop_Sh32Ux4:  case Iop_Sh64Ux2:
+         case Iop_Rsh8Sx16: case Iop_Rsh16Sx8:
+         case Iop_Rsh32Sx4: case Iop_Rsh64Sx2:
+         case Iop_Rsh8Ux16: case Iop_Rsh16Ux8:
+         case Iop_Rsh32Ux4: case Iop_Rsh64Ux2:
+         case Iop_Max64Fx2: case Iop_Max32Fx4:
+         case Iop_Min64Fx2: case Iop_Min32Fx4:
+         case Iop_RecipStep64Fx2: case Iop_RecipStep32Fx4:
+         case Iop_RSqrtStep64Fx2: case Iop_RSqrtStep32Fx4:
+         {
+            HReg res   = newVRegV(env);
+            HReg argL  = iselV128Expr(env, e->Iex.Binop.arg1);
+            HReg argR  = iselV128Expr(env, e->Iex.Binop.arg2);
+            Bool sw    = False;
+            Bool setRM = False;
+            ARM64VecBinOp op = ARM64vecb_INVALID;
+            switch (e->Iex.Binop.op) {
+               case Iop_AndV128:    op = ARM64vecb_AND; break;
+               case Iop_OrV128:     op = ARM64vecb_ORR; break;
+               case Iop_XorV128:    op = ARM64vecb_XOR; break;
+               case Iop_Max32Ux4:   op = ARM64vecb_UMAX32x4; break;
+               case Iop_Max16Ux8:   op = ARM64vecb_UMAX16x8; break;
+               case Iop_Max8Ux16:   op = ARM64vecb_UMAX8x16; break;
+               case Iop_Min32Ux4:   op = ARM64vecb_UMIN32x4; break;
+               case Iop_Min16Ux8:   op = ARM64vecb_UMIN16x8; break;
+               case Iop_Min8Ux16:   op = ARM64vecb_UMIN8x16; break;
+               case Iop_Max32Sx4:   op = ARM64vecb_SMAX32x4; break;
+               case Iop_Max16Sx8:   op = ARM64vecb_SMAX16x8; break;
+               case Iop_Max8Sx16:   op = ARM64vecb_SMAX8x16; break;
+               case Iop_Min32Sx4:   op = ARM64vecb_SMIN32x4; break;
+               case Iop_Min16Sx8:   op = ARM64vecb_SMIN16x8; break;
+               case Iop_Min8Sx16:   op = ARM64vecb_SMIN8x16; break;
+               case Iop_Add64x2:    op = ARM64vecb_ADD64x2; break;
+               case Iop_Add32x4:    op = ARM64vecb_ADD32x4; break;
+               case Iop_Add16x8:    op = ARM64vecb_ADD16x8; break;
+               case Iop_Add8x16:    op = ARM64vecb_ADD8x16; break;
+               case Iop_Sub64x2:    op = ARM64vecb_SUB64x2; break;
+               case Iop_Sub32x4:    op = ARM64vecb_SUB32x4; break;
+               case Iop_Sub16x8:    op = ARM64vecb_SUB16x8; break;
+               case Iop_Sub8x16:    op = ARM64vecb_SUB8x16; break;
+               case Iop_Mul32x4:    op = ARM64vecb_MUL32x4; break;
+               case Iop_Mul16x8:    op = ARM64vecb_MUL16x8; break;
+               case Iop_Mul8x16:    op = ARM64vecb_MUL8x16; break;
+               case Iop_CmpEQ64x2:  op = ARM64vecb_CMEQ64x2; break;
+               case Iop_CmpEQ32x4:  op = ARM64vecb_CMEQ32x4; break;
+               case Iop_CmpEQ16x8:  op = ARM64vecb_CMEQ16x8; break;
+               case Iop_CmpEQ8x16:  op = ARM64vecb_CMEQ8x16; break;
+               case Iop_CmpGT64Ux2: op = ARM64vecb_CMHI64x2; break;
+               case Iop_CmpGT32Ux4: op = ARM64vecb_CMHI32x4; break;
+               case Iop_CmpGT16Ux8: op = ARM64vecb_CMHI16x8; break;
+               case Iop_CmpGT8Ux16: op = ARM64vecb_CMHI8x16; break;
+               case Iop_CmpGT64Sx2: op = ARM64vecb_CMGT64x2; break;
+               case Iop_CmpGT32Sx4: op = ARM64vecb_CMGT32x4; break;
+               case Iop_CmpGT16Sx8: op = ARM64vecb_CMGT16x8; break;
+               case Iop_CmpGT8Sx16: op = ARM64vecb_CMGT8x16; break;
+               case Iop_CmpEQ64Fx2: op = ARM64vecb_FCMEQ64x2; break;
+               case Iop_CmpEQ32Fx4: op = ARM64vecb_FCMEQ32x4; break;
+               case Iop_CmpLE64Fx2: op = ARM64vecb_FCMGE64x2; sw = True; break;
+               case Iop_CmpLE32Fx4: op = ARM64vecb_FCMGE32x4; sw = True; break;
+               case Iop_CmpLT64Fx2: op = ARM64vecb_FCMGT64x2; sw = True; break;
+               case Iop_CmpLT32Fx4: op = ARM64vecb_FCMGT32x4; sw = True; break;
+               case Iop_Perm8x16:   op = ARM64vecb_TBL1; break;
+               case Iop_InterleaveLO64x2: op = ARM64vecb_UZP164x2; sw = True;
+                                          break;
+               case Iop_CatEvenLanes32x4: op = ARM64vecb_UZP132x4; sw = True;
+                                          break;
+               case Iop_CatEvenLanes16x8: op = ARM64vecb_UZP116x8; sw = True;
+                                          break;
+               case Iop_CatEvenLanes8x16: op = ARM64vecb_UZP18x16; sw = True;
+                                          break;
+               case Iop_InterleaveHI64x2: op = ARM64vecb_UZP264x2; sw = True;
+                                          break;
+               case Iop_CatOddLanes32x4:  op = ARM64vecb_UZP232x4; sw = True;
+                                          break;
+               case Iop_CatOddLanes16x8:  op = ARM64vecb_UZP216x8; sw = True;
+                                          break;
+               case Iop_CatOddLanes8x16:  op = ARM64vecb_UZP28x16; sw = True;
+                                          break;
+               case Iop_InterleaveHI32x4: op = ARM64vecb_ZIP232x4; sw = True;
+                                          break;
+               case Iop_InterleaveHI16x8: op = ARM64vecb_ZIP216x8; sw = True;
+                                          break;
+               case Iop_InterleaveHI8x16: op = ARM64vecb_ZIP28x16; sw = True;
+                                          break;
+               case Iop_InterleaveLO32x4: op = ARM64vecb_ZIP132x4; sw = True;
+                                          break;
+               case Iop_InterleaveLO16x8: op = ARM64vecb_ZIP116x8; sw = True;
+                                          break;
+               case Iop_InterleaveLO8x16: op = ARM64vecb_ZIP18x16; sw = True;
+                                          break;
+               case Iop_PolynomialMul8x16: op = ARM64vecb_PMUL8x16; break;
+               case Iop_QAdd64Sx2:      op = ARM64vecb_SQADD64x2; break;
+               case Iop_QAdd32Sx4:      op = ARM64vecb_SQADD32x4; break;
+               case Iop_QAdd16Sx8:      op = ARM64vecb_SQADD16x8; break;
+               case Iop_QAdd8Sx16:      op = ARM64vecb_SQADD8x16; break;
+               case Iop_QAdd64Ux2:      op = ARM64vecb_UQADD64x2; break;
+               case Iop_QAdd32Ux4:      op = ARM64vecb_UQADD32x4; break;
+               case Iop_QAdd16Ux8:      op = ARM64vecb_UQADD16x8; break;
+               case Iop_QAdd8Ux16:      op = ARM64vecb_UQADD8x16; break;
+               case Iop_QSub64Sx2:      op = ARM64vecb_SQSUB64x2; break;
+               case Iop_QSub32Sx4:      op = ARM64vecb_SQSUB32x4; break;
+               case Iop_QSub16Sx8:      op = ARM64vecb_SQSUB16x8; break;
+               case Iop_QSub8Sx16:      op = ARM64vecb_SQSUB8x16; break;
+               case Iop_QSub64Ux2:      op = ARM64vecb_UQSUB64x2; break;
+               case Iop_QSub32Ux4:      op = ARM64vecb_UQSUB32x4; break;
+               case Iop_QSub16Ux8:      op = ARM64vecb_UQSUB16x8; break;
+               case Iop_QSub8Ux16:      op = ARM64vecb_UQSUB8x16; break;
+               case Iop_QDMulHi32Sx4:   op = ARM64vecb_SQDMULH32x4; break;
+               case Iop_QDMulHi16Sx8:   op = ARM64vecb_SQDMULH16x8; break;
+               case Iop_QRDMulHi32Sx4:  op = ARM64vecb_SQRDMULH32x4; break;
+               case Iop_QRDMulHi16Sx8:  op = ARM64vecb_SQRDMULH16x8; break;
+               case Iop_Sh8Sx16:        op = ARM64vecb_SSHL8x16; break;
+               case Iop_Sh16Sx8:        op = ARM64vecb_SSHL16x8; break;
+               case Iop_Sh32Sx4:        op = ARM64vecb_SSHL32x4; break;
+               case Iop_Sh64Sx2:        op = ARM64vecb_SSHL64x2; break;
+               case Iop_Sh8Ux16:        op = ARM64vecb_USHL8x16; break;
+               case Iop_Sh16Ux8:        op = ARM64vecb_USHL16x8; break;
+               case Iop_Sh32Ux4:        op = ARM64vecb_USHL32x4; break;
+               case Iop_Sh64Ux2:        op = ARM64vecb_USHL64x2; break;
+               case Iop_Rsh8Sx16:       op = ARM64vecb_SRSHL8x16; break;
+               case Iop_Rsh16Sx8:       op = ARM64vecb_SRSHL16x8; break;
+               case Iop_Rsh32Sx4:       op = ARM64vecb_SRSHL32x4; break;
+               case Iop_Rsh64Sx2:       op = ARM64vecb_SRSHL64x2; break;
+               case Iop_Rsh8Ux16:       op = ARM64vecb_URSHL8x16; break;
+               case Iop_Rsh16Ux8:       op = ARM64vecb_URSHL16x8; break;
+               case Iop_Rsh32Ux4:       op = ARM64vecb_URSHL32x4; break;
+               case Iop_Rsh64Ux2:       op = ARM64vecb_URSHL64x2; break;
+               case Iop_Max64Fx2:       op = ARM64vecb_FMAX64x2; break;
+               case Iop_Max32Fx4:       op = ARM64vecb_FMAX32x4; break;
+               case Iop_Min64Fx2:       op = ARM64vecb_FMIN64x2; break;
+               case Iop_Min32Fx4:       op = ARM64vecb_FMIN32x4; break;
+               case Iop_RecipStep64Fx2: setRM = True;
+                                        op = ARM64vecb_FRECPS64x2; break;
+               case Iop_RecipStep32Fx4: setRM = True;
+                                        op = ARM64vecb_FRECPS32x4; break;
+               case Iop_RSqrtStep64Fx2: setRM = True;
+                                        op = ARM64vecb_FRSQRTS64x2; break;
+               case Iop_RSqrtStep32Fx4: setRM = True;
+                                        op = ARM64vecb_FRSQRTS32x4; break;
+               default: vassert(0);
+            }
+            if (setRM) {
+               // This is a bit of a kludge.  We should do rm properly for
+               // these recip-step insns, but that would require changing the
+               // primop's type to take an rmode.
+               set_FPCR_rounding_mode(env, IRExpr_Const(
+                                              IRConst_U32(Irrm_NEAREST)));
+            }
+            if (sw) {
+               addInstr(env, ARM64Instr_VBinV(op, res, argR, argL));
+            } else {
+               addInstr(env, ARM64Instr_VBinV(op, res, argL, argR));
+            }
+            return res;
+         }
+         /* -- These only have 2 operand instructions, so we have to first move
+            the first argument into a new register, for modification. -- */
+         case Iop_QAddExtUSsatSS8x16: case Iop_QAddExtUSsatSS16x8:
+         case Iop_QAddExtUSsatSS32x4: case Iop_QAddExtUSsatSS64x2:
+         case Iop_QAddExtSUsatUU8x16: case Iop_QAddExtSUsatUU16x8:
+         case Iop_QAddExtSUsatUU32x4: case Iop_QAddExtSUsatUU64x2:
+         {
+            HReg res  = newVRegV(env);
+            HReg argL = iselV128Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselV128Expr(env, e->Iex.Binop.arg2);
+            ARM64VecModifyOp op = ARM64vecmo_INVALID;
+            switch (e->Iex.Binop.op) {
+               /* In the following 8 cases, the US - SU switching is intended.
+                  See comments on the libvex_ir.h for details.  Also in the 
+                  ARM64 front end, where used these primops are generated. */
+               case Iop_QAddExtUSsatSS8x16: op = ARM64vecmo_SUQADD8x16; break;
+               case Iop_QAddExtUSsatSS16x8: op = ARM64vecmo_SUQADD16x8; break;
+               case Iop_QAddExtUSsatSS32x4: op = ARM64vecmo_SUQADD32x4; break;
+               case Iop_QAddExtUSsatSS64x2: op = ARM64vecmo_SUQADD64x2; break;
+               case Iop_QAddExtSUsatUU8x16: op = ARM64vecmo_USQADD8x16; break;
+               case Iop_QAddExtSUsatUU16x8: op = ARM64vecmo_USQADD16x8; break;
+               case Iop_QAddExtSUsatUU32x4: op = ARM64vecmo_USQADD32x4; break;
+               case Iop_QAddExtSUsatUU64x2: op = ARM64vecmo_USQADD64x2; break;
+               default: vassert(0);
+            }
+            /* The order of the operands is important.  Although this is
+               basically addition, the two operands are extended differently,
+               making it important to get them into the correct registers in
+               the instruction. */
+            addInstr(env, ARM64Instr_VMov(16, res, argR));
+            addInstr(env, ARM64Instr_VModifyV(op, res, argL));
+            return res;
+         }
+         /* -- Shifts by an immediate. -- */
+         case Iop_ShrN64x2: case Iop_ShrN32x4:
+         case Iop_ShrN16x8: case Iop_ShrN8x16:
+         case Iop_SarN64x2: case Iop_SarN32x4:
+         case Iop_SarN16x8: case Iop_SarN8x16:
+         case Iop_ShlN64x2: case Iop_ShlN32x4:
+         case Iop_ShlN16x8: case Iop_ShlN8x16:
+         case Iop_QShlNsatUU64x2: case Iop_QShlNsatUU32x4:
+         case Iop_QShlNsatUU16x8: case Iop_QShlNsatUU8x16:
+         case Iop_QShlNsatSS64x2: case Iop_QShlNsatSS32x4:
+         case Iop_QShlNsatSS16x8: case Iop_QShlNsatSS8x16:
+         case Iop_QShlNsatSU64x2: case Iop_QShlNsatSU32x4:
+         case Iop_QShlNsatSU16x8: case Iop_QShlNsatSU8x16:
+         {
+            IRExpr* argL = e->Iex.Binop.arg1;
+            IRExpr* argR = e->Iex.Binop.arg2;
+            if (argR->tag == Iex_Const && argR->Iex.Const.con->tag == Ico_U8) {
+               UInt amt   = argR->Iex.Const.con->Ico.U8;
+               UInt limLo = 0;
+               UInt limHi = 0;
+               ARM64VecShiftImmOp op = ARM64vecshi_INVALID;
+               /* Establish the instruction to use. */
+               switch (e->Iex.Binop.op) {
+                  case Iop_ShrN64x2:       op = ARM64vecshi_USHR64x2;   break;
+                  case Iop_ShrN32x4:       op = ARM64vecshi_USHR32x4;   break;
+                  case Iop_ShrN16x8:       op = ARM64vecshi_USHR16x8;   break;
+                  case Iop_ShrN8x16:       op = ARM64vecshi_USHR8x16;   break;
+                  case Iop_SarN64x2:       op = ARM64vecshi_SSHR64x2;   break;
+                  case Iop_SarN32x4:       op = ARM64vecshi_SSHR32x4;   break;
+                  case Iop_SarN16x8:       op = ARM64vecshi_SSHR16x8;   break;
+                  case Iop_SarN8x16:       op = ARM64vecshi_SSHR8x16;   break;
+                  case Iop_ShlN64x2:       op = ARM64vecshi_SHL64x2;    break;
+                  case Iop_ShlN32x4:       op = ARM64vecshi_SHL32x4;    break;
+                  case Iop_ShlN16x8:       op = ARM64vecshi_SHL16x8;    break;
+                  case Iop_ShlN8x16:       op = ARM64vecshi_SHL8x16;    break;
+                  case Iop_QShlNsatUU64x2: op = ARM64vecshi_UQSHL64x2;  break;
+                  case Iop_QShlNsatUU32x4: op = ARM64vecshi_UQSHL32x4;  break;
+                  case Iop_QShlNsatUU16x8: op = ARM64vecshi_UQSHL16x8;  break;
+                  case Iop_QShlNsatUU8x16: op = ARM64vecshi_UQSHL8x16;  break;
+                  case Iop_QShlNsatSS64x2: op = ARM64vecshi_SQSHL64x2;  break;
+                  case Iop_QShlNsatSS32x4: op = ARM64vecshi_SQSHL32x4;  break;
+                  case Iop_QShlNsatSS16x8: op = ARM64vecshi_SQSHL16x8;  break;
+                  case Iop_QShlNsatSS8x16: op = ARM64vecshi_SQSHL8x16;  break;
+                  case Iop_QShlNsatSU64x2: op = ARM64vecshi_SQSHLU64x2; break;
+                  case Iop_QShlNsatSU32x4: op = ARM64vecshi_SQSHLU32x4; break;
+                  case Iop_QShlNsatSU16x8: op = ARM64vecshi_SQSHLU16x8; break;
+                  case Iop_QShlNsatSU8x16: op = ARM64vecshi_SQSHLU8x16; break;
+                  default: vassert(0);
+               }
+               /* Establish the shift limits, for sanity check purposes only. */
+               switch (e->Iex.Binop.op) {
+                  case Iop_ShrN64x2:       limLo = 1; limHi = 64; break;
+                  case Iop_ShrN32x4:       limLo = 1; limHi = 32; break;
+                  case Iop_ShrN16x8:       limLo = 1; limHi = 16; break;
+                  case Iop_ShrN8x16:       limLo = 1; limHi = 8;  break;
+                  case Iop_SarN64x2:       limLo = 1; limHi = 64; break;
+                  case Iop_SarN32x4:       limLo = 1; limHi = 32; break;
+                  case Iop_SarN16x8:       limLo = 1; limHi = 16; break;
+                  case Iop_SarN8x16:       limLo = 1; limHi = 8;  break;
+                  case Iop_ShlN64x2:       limLo = 0; limHi = 63; break;
+                  case Iop_ShlN32x4:       limLo = 0; limHi = 31; break;
+                  case Iop_ShlN16x8:       limLo = 0; limHi = 15; break;
+                  case Iop_ShlN8x16:       limLo = 0; limHi = 7;  break;
+                  case Iop_QShlNsatUU64x2: limLo = 0; limHi = 63; break;
+                  case Iop_QShlNsatUU32x4: limLo = 0; limHi = 31; break;
+                  case Iop_QShlNsatUU16x8: limLo = 0; limHi = 15; break;
+                  case Iop_QShlNsatUU8x16: limLo = 0; limHi = 7;  break;
+                  case Iop_QShlNsatSS64x2: limLo = 0; limHi = 63; break;
+                  case Iop_QShlNsatSS32x4: limLo = 0; limHi = 31; break;
+                  case Iop_QShlNsatSS16x8: limLo = 0; limHi = 15; break;
+                  case Iop_QShlNsatSS8x16: limLo = 0; limHi = 7;  break;
+                  case Iop_QShlNsatSU64x2: limLo = 0; limHi = 63; break;
+                  case Iop_QShlNsatSU32x4: limLo = 0; limHi = 31; break;
+                  case Iop_QShlNsatSU16x8: limLo = 0; limHi = 15; break;
+                  case Iop_QShlNsatSU8x16: limLo = 0; limHi = 7;  break;
+                  default: vassert(0);
+               }
+               /* For left shifts, the allowable amt values are
+                  0 .. lane_bits-1.  For right shifts the allowable
+                  values are 1 .. lane_bits. */
+               if (op != ARM64vecshi_INVALID && amt >= limLo && amt <= limHi) {
+                  HReg src = iselV128Expr(env, argL);
+                  HReg dst = newVRegV(env);
+                  addInstr(env, ARM64Instr_VShiftImmV(op, dst, src, amt));
+                  return dst;
+               }
+               /* Special case some no-op shifts that the arm64 front end
+                  throws at us.  We can't generate any instructions for these,
+                  but we don't need to either. */
+               switch (e->Iex.Binop.op) {
+                  case Iop_ShrN64x2: case Iop_ShrN32x4:
+                  case Iop_ShrN16x8: case Iop_ShrN8x16:
+                     if (amt == 0) {
+                        return iselV128Expr(env, argL);
+                     }
+                     break;
+                  default:
+                     break;
+               }
+               /* otherwise unhandled */
+            }
+            /* else fall out; this is unhandled */
+            break;
+         }
+         /* -- Saturating narrowing by an immediate -- */
+         /* uu */
+         case Iop_QandQShrNnarrow16Uto8Ux8:
+         case Iop_QandQShrNnarrow32Uto16Ux4:
+         case Iop_QandQShrNnarrow64Uto32Ux2:
+         /* ss */
+         case Iop_QandQSarNnarrow16Sto8Sx8:
+         case Iop_QandQSarNnarrow32Sto16Sx4:
+         case Iop_QandQSarNnarrow64Sto32Sx2:
+         /* su */
+         case Iop_QandQSarNnarrow16Sto8Ux8:
+         case Iop_QandQSarNnarrow32Sto16Ux4:
+         case Iop_QandQSarNnarrow64Sto32Ux2:
+         /* ruu */
+         case Iop_QandQRShrNnarrow16Uto8Ux8:
+         case Iop_QandQRShrNnarrow32Uto16Ux4:
+         case Iop_QandQRShrNnarrow64Uto32Ux2:
+         /* rss */
+         case Iop_QandQRSarNnarrow16Sto8Sx8:
+         case Iop_QandQRSarNnarrow32Sto16Sx4:
+         case Iop_QandQRSarNnarrow64Sto32Sx2:
+         /* rsu */
+         case Iop_QandQRSarNnarrow16Sto8Ux8:
+         case Iop_QandQRSarNnarrow32Sto16Ux4:
+         case Iop_QandQRSarNnarrow64Sto32Ux2:
+         {
+            IRExpr* argL = e->Iex.Binop.arg1;
+            IRExpr* argR = e->Iex.Binop.arg2;
+            if (argR->tag == Iex_Const && argR->Iex.Const.con->tag == Ico_U8) {
+               UInt amt   = argR->Iex.Const.con->Ico.U8;
+               UInt limit = 0;
+               ARM64VecShiftImmOp op = ARM64vecshi_INVALID;
+               switch (e->Iex.Binop.op) {
+                  /* uu */
+                  case Iop_QandQShrNnarrow64Uto32Ux2:
+                     op = ARM64vecshi_UQSHRN2SD; limit = 64; break;
+                  case Iop_QandQShrNnarrow32Uto16Ux4:
+                     op = ARM64vecshi_UQSHRN4HS; limit = 32; break;
+                  case Iop_QandQShrNnarrow16Uto8Ux8:
+                     op = ARM64vecshi_UQSHRN8BH; limit = 16; break;
+                  /* ss */
+                  case Iop_QandQSarNnarrow64Sto32Sx2:
+                     op = ARM64vecshi_SQSHRN2SD; limit = 64; break;
+                  case Iop_QandQSarNnarrow32Sto16Sx4:
+                     op = ARM64vecshi_SQSHRN4HS; limit = 32; break;
+                  case Iop_QandQSarNnarrow16Sto8Sx8:
+                     op = ARM64vecshi_SQSHRN8BH; limit = 16; break;
+                  /* su */
+                  case Iop_QandQSarNnarrow64Sto32Ux2:
+                     op = ARM64vecshi_SQSHRUN2SD; limit = 64; break;
+                  case Iop_QandQSarNnarrow32Sto16Ux4:
+                     op = ARM64vecshi_SQSHRUN4HS; limit = 32; break;
+                  case Iop_QandQSarNnarrow16Sto8Ux8:
+                     op = ARM64vecshi_SQSHRUN8BH; limit = 16; break;
+                  /* ruu */
+                  case Iop_QandQRShrNnarrow64Uto32Ux2:
+                     op = ARM64vecshi_UQRSHRN2SD; limit = 64; break;
+                  case Iop_QandQRShrNnarrow32Uto16Ux4:
+                     op = ARM64vecshi_UQRSHRN4HS; limit = 32; break;
+                  case Iop_QandQRShrNnarrow16Uto8Ux8:
+                     op = ARM64vecshi_UQRSHRN8BH; limit = 16; break;
+                  /* rss */
+                  case Iop_QandQRSarNnarrow64Sto32Sx2:
+                     op = ARM64vecshi_SQRSHRN2SD; limit = 64; break;
+                  case Iop_QandQRSarNnarrow32Sto16Sx4:
+                     op = ARM64vecshi_SQRSHRN4HS; limit = 32; break;
+                  case Iop_QandQRSarNnarrow16Sto8Sx8:
+                     op = ARM64vecshi_SQRSHRN8BH; limit = 16; break;
+                  /* rsu */
+                  case Iop_QandQRSarNnarrow64Sto32Ux2:
+                     op = ARM64vecshi_SQRSHRUN2SD; limit = 64; break;
+                  case Iop_QandQRSarNnarrow32Sto16Ux4:
+                     op = ARM64vecshi_SQRSHRUN4HS; limit = 32; break;
+                  case Iop_QandQRSarNnarrow16Sto8Ux8:
+                     op = ARM64vecshi_SQRSHRUN8BH; limit = 16; break;
+                  /**/
+                  default:
+                     vassert(0);
+               }
+               if (op != ARM64vecshi_INVALID && amt >= 1 && amt <= limit) {
+                  HReg src  = iselV128Expr(env, argL);
+                  HReg dst  = newVRegV(env);
+                  HReg fpsr = newVRegI(env);
+                  /* Clear FPSR.Q, do the operation, and return both its
+                     result and the new value of FPSR.Q.  We can simply
+                     zero out FPSR since all the other bits have no relevance
+                     in VEX generated code. */
+                  addInstr(env, ARM64Instr_Imm64(fpsr, 0));
+                  addInstr(env, ARM64Instr_FPSR(True/*toFPSR*/, fpsr));
+                  addInstr(env, ARM64Instr_VShiftImmV(op, dst, src, amt));
+                  addInstr(env, ARM64Instr_FPSR(False/*!toFPSR*/, fpsr));
+                  addInstr(env, ARM64Instr_Shift(fpsr, fpsr, ARM64RI6_I6(27),
+                                                             ARM64sh_SHR));
+                  ARM64RIL* ril_one = mb_mkARM64RIL_I(1);
+                  vassert(ril_one);
+                  addInstr(env, ARM64Instr_Logic(fpsr,
+                                                 fpsr, ril_one, ARM64lo_AND));
+                  /* Now we have: the main (shift) result in the bottom half
+                     of |dst|, and the Q bit at the bottom of |fpsr|.  
+                     Combining them with a "InterleaveLO64x2" style operation 
+                     produces a 128 bit value, dst[63:0]:fpsr[63:0], 
+                     which is what we want. */
+                  HReg scratch = newVRegV(env);
+                  addInstr(env, ARM64Instr_VQfromX(scratch, fpsr));
+                  addInstr(env, ARM64Instr_VBinV(ARM64vecb_UZP164x2,
+                                                 dst, dst, scratch));
+                  return dst;
+               }
+            }
+            /* else fall out; this is unhandled */
+            break;
+         }
+
+         // Use Iop_SliceV128 in preference to Iop_ShlV128 and Iop_ShrV128,
+         // as it is in some ways more general and often leads to better
+         // code overall. 
+         case Iop_ShlV128:
+         case Iop_ShrV128: {
+            Bool isSHR = e->Iex.Binop.op == Iop_ShrV128;
+            /* This is tricky.  Generate an EXT instruction with zeroes in
+               the high operand (shift right) or low operand (shift left).
+               Note that we can only slice in the EXT instruction at a byte
+               level of granularity, so the shift amount needs careful
+               checking. */
+            IRExpr* argL = e->Iex.Binop.arg1;
+            IRExpr* argR = e->Iex.Binop.arg2;
+            if (argR->tag == Iex_Const && argR->Iex.Const.con->tag == Ico_U8) {
+               UInt amt   = argR->Iex.Const.con->Ico.U8;
+               Bool amtOK = False;
+               switch (amt) {
+                  case 0x08: case 0x10: case 0x18: case 0x20: case 0x28:
+                  case 0x30: case 0x38: case 0x40: case 0x48: case 0x50:
+                  case 0x58: case 0x60: case 0x68: case 0x70: case 0x78:
+                     amtOK = True; break;
+               }
+               /* We could also deal with amt==0 by copying the source to
+                  the destination, but there's no need for that so far. */
+               if (amtOK) {
+                  HReg src  = iselV128Expr(env, argL);
+                  HReg srcZ = newVRegV(env);
+                  addInstr(env, ARM64Instr_VImmQ(srcZ, 0x0000));
+                  UInt immB = amt / 8;
+                  vassert(immB >= 1 && immB <= 15);
+                  HReg dst = newVRegV(env);
+                  if (isSHR) {
+                    addInstr(env, ARM64Instr_VExtV(dst, src/*lo*/, srcZ/*hi*/,
+                                                         immB));
+                  } else {
+                    addInstr(env, ARM64Instr_VExtV(dst, srcZ/*lo*/, src/*hi*/,
+                                                         16 - immB));
+                  }
+                  return dst;
+               }
+            }
+            /* else fall out; this is unhandled */
+            break;
+         }
+
+         case Iop_PolynomialMull8x8:
+         case Iop_Mull32Ux2:
+         case Iop_Mull16Ux4:
+         case Iop_Mull8Ux8:
+         case Iop_Mull32Sx2:
+         case Iop_Mull16Sx4:
+         case Iop_Mull8Sx8:
+         case Iop_QDMull32Sx2:
+         case Iop_QDMull16Sx4:
+         {
+            HReg iSrcL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            HReg iSrcR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg vSrcL = newVRegV(env);
+            HReg vSrcR = newVRegV(env);
+            HReg dst   = newVRegV(env);
+            ARM64VecBinOp op = ARM64vecb_INVALID;
+            switch (e->Iex.Binop.op) {
+               case Iop_PolynomialMull8x8: op = ARM64vecb_PMULL8x8;    break;
+               case Iop_Mull32Ux2:         op = ARM64vecb_UMULL2DSS;   break;
+               case Iop_Mull16Ux4:         op = ARM64vecb_UMULL4SHH;   break;
+               case Iop_Mull8Ux8:          op = ARM64vecb_UMULL8HBB;   break;
+               case Iop_Mull32Sx2:         op = ARM64vecb_SMULL2DSS;   break;
+               case Iop_Mull16Sx4:         op = ARM64vecb_SMULL4SHH;   break;
+               case Iop_Mull8Sx8:          op = ARM64vecb_SMULL8HBB;   break;
+               case Iop_QDMull32Sx2:       op = ARM64vecb_SQDMULL2DSS; break;
+               case Iop_QDMull16Sx4:       op = ARM64vecb_SQDMULL4SHH; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARM64Instr_VQfromXX(vSrcL, iSrcL, iSrcL));
+            addInstr(env, ARM64Instr_VQfromXX(vSrcR, iSrcR, iSrcR));
+            addInstr(env, ARM64Instr_VBinV(op, dst, vSrcL, vSrcR));
+            return dst;
+         }
+
+         /* ... */
+         default:
+            break;
+      } /* switch on the binop */
+   } /* if (e->tag == Iex_Binop) */
+
+   if (e->tag == Iex_Triop) {
+      IRTriop*      triop  = e->Iex.Triop.details;
+      ARM64VecBinOp vecbop = ARM64vecb_INVALID;
+      switch (triop->op) {
+         case Iop_Add64Fx2: vecbop = ARM64vecb_FADD64x2; break;
+         case Iop_Sub64Fx2: vecbop = ARM64vecb_FSUB64x2; break;
+         case Iop_Mul64Fx2: vecbop = ARM64vecb_FMUL64x2; break;
+         case Iop_Div64Fx2: vecbop = ARM64vecb_FDIV64x2; break;
+         case Iop_Add32Fx4: vecbop = ARM64vecb_FADD32x4; break;
+         case Iop_Sub32Fx4: vecbop = ARM64vecb_FSUB32x4; break;
+         case Iop_Mul32Fx4: vecbop = ARM64vecb_FMUL32x4; break;
+         case Iop_Div32Fx4: vecbop = ARM64vecb_FDIV32x4; break;
+         default: break;
+      }
+      if (vecbop != ARM64vecb_INVALID) {
+         HReg argL = iselV128Expr(env, triop->arg2);
+         HReg argR = iselV128Expr(env, triop->arg3);
+         HReg dst  = newVRegV(env);
+         set_FPCR_rounding_mode(env, triop->arg1);
+         addInstr(env, ARM64Instr_VBinV(vecbop, dst, argL, argR));
+         return dst;
+      }
+
+      if (triop->op == Iop_SliceV128) {
+         /* Note that, compared to ShlV128/ShrV128 just above, the shift
+            amount here is in bytes, not bits. */
+         IRExpr* argHi  = triop->arg1;
+         IRExpr* argLo  = triop->arg2;
+         IRExpr* argAmt = triop->arg3;
+         if (argAmt->tag == Iex_Const && argAmt->Iex.Const.con->tag == Ico_U8) {
+            UInt amt   = argAmt->Iex.Const.con->Ico.U8;
+            Bool amtOK = amt >= 1 && amt <= 15;
+            /* We could also deal with amt==0 by copying argLO to
+               the destination, but there's no need for that so far. */
+            if (amtOK) {
+               HReg srcHi = iselV128Expr(env, argHi);
+               HReg srcLo = iselV128Expr(env, argLo);
+               HReg dst = newVRegV(env);
+              addInstr(env, ARM64Instr_VExtV(dst, srcLo, srcHi, amt));
+               return dst;
+            }
+         }
+         /* else fall out; this is unhandled */
+      }
+
+   } /* if (e->tag == Iex_Triop) */
+
+  v128_expr_bad:
+   ppIRExpr(e);
+   vpanic("iselV128Expr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (64 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 64-bit floating point value into a register, the identity
+   of which is returned.  As with iselIntExpr_R, the reg may be either
+   real or virtual; in any case it must not be changed by subsequent
+   code emitted by the caller.  */
+
+static HReg iselDblExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselDblExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F64);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      IRConst* con = e->Iex.Const.con;
+      if (con->tag == Ico_F64i) {
+         HReg src = newVRegI(env);
+         HReg dst = newVRegD(env);
+         addInstr(env, ARM64Instr_Imm64(src, con->Ico.F64i));
+         addInstr(env, ARM64Instr_VDfromX(dst, src));
+         return dst;
+      }
+      if (con->tag == Ico_F64) {
+         HReg src = newVRegI(env);
+         HReg dst = newVRegD(env);
+         union { Double d64; ULong u64; } u;
+         vassert(sizeof(u) == 8);
+         u.d64 = con->Ico.F64;
+         addInstr(env, ARM64Instr_Imm64(src, u.u64));
+         addInstr(env, ARM64Instr_VDfromX(dst, src));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      vassert(e->Iex.Load.ty == Ity_F64);
+      HReg addr = iselIntExpr_R(env, e->Iex.Load.addr);
+      HReg res  = newVRegD(env);
+      addInstr(env, ARM64Instr_VLdStD(True/*isLoad*/, res, addr, 0));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      Int offs = e->Iex.Get.offset;
+      if (offs >= 0 && offs < 32768 && 0 == (offs & 7)) {
+         HReg rD = newVRegD(env);
+         HReg rN = get_baseblock_register();
+         addInstr(env, ARM64Instr_VLdStD(True/*isLoad*/, rD, rN, offs));
+         return rD;
+      }
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_NegF64: {
+            HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARM64Instr_VUnaryD(ARM64fpu_NEG, dst, src));
+            return dst;
+         }
+         case Iop_AbsF64: {
+            HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARM64Instr_VUnaryD(ARM64fpu_ABS, dst, src));
+            return dst;
+         }
+         case Iop_F32toF64: {
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtSD(True/*sToD*/, dst, src));
+            return dst;
+         }
+         case Iop_F16toF64: {
+            HReg src = iselF16Expr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtHD(True/*hToD*/, dst, src));
+            return dst;
+         }
+         case Iop_I32UtoF64:
+         case Iop_I32StoF64: {
+            /* Rounding mode is not involved here, since the
+               conversion can always be done without loss of
+               precision. */
+            HReg src   = iselIntExpr_R(env, e->Iex.Unop.arg);
+            HReg dst   = newVRegD(env);
+            Bool syned = e->Iex.Unop.op == Iop_I32StoF64;
+            ARM64CvtOp cvt_op = syned ? ARM64cvt_F64_I32S : ARM64cvt_F64_I32U;
+            addInstr(env, ARM64Instr_VCvtI2F(cvt_op, dst, src));
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_RoundF64toInt:
+         case Iop_SqrtF64:
+         case Iop_RecpExpF64: {
+            HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg dst = newVRegD(env);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            ARM64FpUnaryOp op = ARM64fpu_INVALID;
+            switch (e->Iex.Binop.op) {
+               case Iop_RoundF64toInt: op = ARM64fpu_RINT;  break;
+               case Iop_SqrtF64:       op = ARM64fpu_SQRT;  break;
+               case Iop_RecpExpF64:    op = ARM64fpu_RECPX; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARM64Instr_VUnaryD(op, dst, src));
+            return dst;
+         }
+         case Iop_I64StoF64:
+         case Iop_I64UtoF64: {
+            ARM64CvtOp cvt_op = e->Iex.Binop.op == Iop_I64StoF64
+                                   ? ARM64cvt_F64_I64S : ARM64cvt_F64_I64U;
+            HReg srcI = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            HReg dstS = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtI2F(cvt_op, dstS, srcI));
+            return dstS;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop*     triop = e->Iex.Triop.details;
+      ARM64FpBinOp dblop = ARM64fpb_INVALID;
+      switch (triop->op) {
+         case Iop_DivF64: dblop = ARM64fpb_DIV; break;
+         case Iop_MulF64: dblop = ARM64fpb_MUL; break;
+         case Iop_SubF64: dblop = ARM64fpb_SUB; break;
+         case Iop_AddF64: dblop = ARM64fpb_ADD; break;
+         default: break;
+      }
+      if (dblop != ARM64fpb_INVALID) {
+         HReg argL = iselDblExpr(env, triop->arg2);
+         HReg argR = iselDblExpr(env, triop->arg3);
+         HReg dst  = newVRegD(env);
+         set_FPCR_rounding_mode(env, triop->arg1);
+         addInstr(env, ARM64Instr_VBinD(dblop, dst, argL, argR));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_ITE) {
+      /* ITE(ccexpr, iftrue, iffalse) */
+      ARM64CondCode cc;
+      HReg r1  = iselDblExpr(env, e->Iex.ITE.iftrue);
+      HReg r0  = iselDblExpr(env, e->Iex.ITE.iffalse);
+      HReg dst = newVRegD(env);
+      cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, ARM64Instr_VFCSel(dst, r1, r0, cc, True/*64-bit*/));
+      return dst;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselDblExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (32 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 32-bit floating point value into a register, the identity
+   of which is returned.  As with iselIntExpr_R, the reg may be either
+   real or virtual; in any case it must not be changed by subsequent
+   code emitted by the caller.  Values are generated into HRcFlt64
+   registers despite the values themselves being Ity_F32s. */
+
+static HReg iselFltExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselFltExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F32);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      /* This is something of a kludge.  Since a 32 bit floating point
+         zero is just .. all zeroes, just create a 64 bit zero word
+         and transfer it.  This avoids having to create a SfromW
+         instruction for this specific case. */
+      IRConst* con = e->Iex.Const.con;
+      if (con->tag == Ico_F32i && con->Ico.F32i == 0) {
+         HReg src = newVRegI(env);
+         HReg dst = newVRegD(env);
+         addInstr(env, ARM64Instr_Imm64(src, 0));
+         addInstr(env, ARM64Instr_VDfromX(dst, src));
+         return dst;
+      }
+      if (con->tag == Ico_F32) {
+         HReg src = newVRegI(env);
+         HReg dst = newVRegD(env);
+         union { Float f32; UInt u32; } u;
+         vassert(sizeof(u) == 4);
+         u.f32 = con->Ico.F32;
+         addInstr(env, ARM64Instr_Imm64(src, (ULong)u.u32));
+         addInstr(env, ARM64Instr_VDfromX(dst, src));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      vassert(e->Iex.Load.ty == Ity_F32);
+      HReg addr = iselIntExpr_R(env, e->Iex.Load.addr);
+      HReg res  = newVRegD(env);
+      addInstr(env, ARM64Instr_VLdStS(True/*isLoad*/, res, addr, 0));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      Int offs = e->Iex.Get.offset;
+      if (offs >= 0 && offs < 16384 && 0 == (offs & 3)) {
+         HReg rD = newVRegD(env);
+         HReg rN = get_baseblock_register();
+         addInstr(env, ARM64Instr_VLdStS(True/*isLoad*/, rD, rN, offs));
+         return rD;
+      }
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_NegF32: {
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARM64Instr_VUnaryS(ARM64fpu_NEG, dst, src));
+            return dst;
+         }
+         case Iop_AbsF32: {
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARM64Instr_VUnaryS(ARM64fpu_ABS, dst, src));
+            return dst;
+         }
+         case Iop_F16toF32: {
+            HReg src = iselF16Expr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtHS(True/*hToS*/, dst, src));
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_RoundF32toInt:
+         case Iop_SqrtF32:
+         case Iop_RecpExpF32: {
+            HReg src = iselFltExpr(env, e->Iex.Binop.arg2);
+            HReg dst = newVRegD(env);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            ARM64FpUnaryOp op = ARM64fpu_INVALID;
+            switch (e->Iex.Binop.op) {
+               case Iop_RoundF32toInt: op = ARM64fpu_RINT;  break;
+               case Iop_SqrtF32:       op = ARM64fpu_SQRT;  break;
+               case Iop_RecpExpF32:    op = ARM64fpu_RECPX; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARM64Instr_VUnaryS(op, dst, src));
+            return dst;
+         }
+         case Iop_F64toF32: {
+            HReg srcD = iselDblExpr(env, e->Iex.Binop.arg2);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            HReg dstS = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtSD(False/*!sToD*/, dstS, srcD));
+            return dstS;
+         }
+         case Iop_I32UtoF32:
+         case Iop_I32StoF32:
+         case Iop_I64UtoF32:
+         case Iop_I64StoF32: {
+            ARM64CvtOp cvt_op = ARM64cvt_INVALID;
+            switch (e->Iex.Binop.op) {
+               case Iop_I32UtoF32: cvt_op = ARM64cvt_F32_I32U; break;
+               case Iop_I32StoF32: cvt_op = ARM64cvt_F32_I32S; break;
+               case Iop_I64UtoF32: cvt_op = ARM64cvt_F32_I64U; break;
+               case Iop_I64StoF32: cvt_op = ARM64cvt_F32_I64S; break;
+               default: vassert(0);
+            }
+            HReg srcI = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            HReg dstS = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtI2F(cvt_op, dstS, srcI));
+            return dstS;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop*     triop = e->Iex.Triop.details;
+      ARM64FpBinOp sglop = ARM64fpb_INVALID;
+      switch (triop->op) {
+         case Iop_DivF32: sglop = ARM64fpb_DIV; break;
+         case Iop_MulF32: sglop = ARM64fpb_MUL; break;
+         case Iop_SubF32: sglop = ARM64fpb_SUB; break;
+         case Iop_AddF32: sglop = ARM64fpb_ADD; break;
+         default: break;
+      }
+      if (sglop != ARM64fpb_INVALID) {
+         HReg argL = iselFltExpr(env, triop->arg2);
+         HReg argR = iselFltExpr(env, triop->arg3);
+         HReg dst  = newVRegD(env);
+         set_FPCR_rounding_mode(env, triop->arg1);
+         addInstr(env, ARM64Instr_VBinS(sglop, dst, argL, argR));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_ITE) {
+      /* ITE(ccexpr, iftrue, iffalse) */
+      ARM64CondCode cc;
+      HReg r1  = iselFltExpr(env, e->Iex.ITE.iftrue);
+      HReg r0  = iselFltExpr(env, e->Iex.ITE.iffalse);
+      HReg dst = newVRegD(env);
+      cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, ARM64Instr_VFCSel(dst, r1, r0, cc, False/*!64-bit*/));
+      return dst;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselFltExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (16 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 16-bit floating point value into a register, the identity
+   of which is returned.  As with iselIntExpr_R, the reg may be either
+   real or virtual; in any case it must not be changed by subsequent
+   code emitted by the caller.  Values are generated into HRcFlt64
+   registers despite the values themselves being Ity_F16s. */
+
+static HReg iselF16Expr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselF16Expr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselF16Expr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F16);
+
+   if (e->tag == Iex_Get) {
+      Int offs = e->Iex.Get.offset;
+      if (offs >= 0 && offs < 8192 && 0 == (offs & 1)) {
+         HReg rD = newVRegD(env);
+         HReg rN = get_baseblock_register();
+         addInstr(env, ARM64Instr_VLdStH(True/*isLoad*/, rD, rN, offs));
+         return rD;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_F32toF16: {
+            HReg srcS = iselFltExpr(env, e->Iex.Binop.arg2);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            HReg dstH = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtHS(False/*!hToS*/, dstH, srcS));
+            return dstH;
+         }
+         case Iop_F64toF16: {
+            HReg srcD = iselDblExpr(env, e->Iex.Binop.arg2);
+            set_FPCR_rounding_mode(env, e->Iex.Binop.arg1);
+            HReg dstH = newVRegD(env);
+            addInstr(env, ARM64Instr_VCvtHD(False/*!hToD*/, dstH, srcD));
+            return dstH;
+         }
+         default:
+            break;
+      }
+   }
+
+   ppIRExpr(e);
+   vpanic("iselF16Expr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Vector expressions (256 bit)                ---*/
+/*---------------------------------------------------------*/
+
+static void iselV256Expr ( /*OUT*/HReg* rHi, HReg* rLo, 
+                           ISelEnv* env, IRExpr* e )
+{
+   iselV256Expr_wrk( rHi, rLo, env, e );
+   vassert(hregClass(*rHi) == HRcVec128);
+   vassert(hregClass(*rLo) == HRcVec128);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static void iselV256Expr_wrk ( /*OUT*/HReg* rHi, /*OUT*/HReg* rLo, 
+                               ISelEnv* env, IRExpr* e )
+{
+   vassert(e);
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_V256);
+
+   /* read 256-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+ 
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_V128HLtoV256: {
+            *rHi = iselV128Expr(env, e->Iex.Binop.arg1);
+            *rLo = iselV128Expr(env, e->Iex.Binop.arg2);
+            return;
+         }
+         case Iop_QandSQsh64x2:
+         case Iop_QandSQsh32x4:
+         case Iop_QandSQsh16x8:
+         case Iop_QandSQsh8x16:
+         case Iop_QandUQsh64x2:
+         case Iop_QandUQsh32x4:
+         case Iop_QandUQsh16x8:
+         case Iop_QandUQsh8x16:
+         case Iop_QandSQRsh64x2:
+         case Iop_QandSQRsh32x4:
+         case Iop_QandSQRsh16x8:
+         case Iop_QandSQRsh8x16:
+         case Iop_QandUQRsh64x2:
+         case Iop_QandUQRsh32x4:
+         case Iop_QandUQRsh16x8:
+         case Iop_QandUQRsh8x16:
+         {
+            HReg argL  = iselV128Expr(env, e->Iex.Binop.arg1);
+            HReg argR  = iselV128Expr(env, e->Iex.Binop.arg2);
+            HReg fpsr  = newVRegI(env);
+            HReg resHi = newVRegV(env);
+            HReg resLo = newVRegV(env);
+            ARM64VecBinOp op = ARM64vecb_INVALID;
+            switch (e->Iex.Binop.op) {
+               case Iop_QandSQsh64x2:  op = ARM64vecb_SQSHL64x2;  break;
+               case Iop_QandSQsh32x4:  op = ARM64vecb_SQSHL32x4;  break;
+               case Iop_QandSQsh16x8:  op = ARM64vecb_SQSHL16x8;  break;
+               case Iop_QandSQsh8x16:  op = ARM64vecb_SQSHL8x16;  break;
+               case Iop_QandUQsh64x2:  op = ARM64vecb_UQSHL64x2;  break;
+               case Iop_QandUQsh32x4:  op = ARM64vecb_UQSHL32x4;  break;
+               case Iop_QandUQsh16x8:  op = ARM64vecb_UQSHL16x8;  break;
+               case Iop_QandUQsh8x16:  op = ARM64vecb_UQSHL8x16;  break;
+               case Iop_QandSQRsh64x2: op = ARM64vecb_SQRSHL64x2; break;
+               case Iop_QandSQRsh32x4: op = ARM64vecb_SQRSHL32x4; break;
+               case Iop_QandSQRsh16x8: op = ARM64vecb_SQRSHL16x8; break;
+               case Iop_QandSQRsh8x16: op = ARM64vecb_SQRSHL8x16; break;
+               case Iop_QandUQRsh64x2: op = ARM64vecb_UQRSHL64x2; break;
+               case Iop_QandUQRsh32x4: op = ARM64vecb_UQRSHL32x4; break;
+               case Iop_QandUQRsh16x8: op = ARM64vecb_UQRSHL16x8; break;
+               case Iop_QandUQRsh8x16: op = ARM64vecb_UQRSHL8x16; break;
+               default: vassert(0);
+            }
+            /* Clear FPSR.Q, do the operation, and return both its result
+               and the new value of FPSR.Q.  We can simply zero out FPSR
+               since all the other bits have no relevance in VEX generated
+               code. */
+            addInstr(env, ARM64Instr_Imm64(fpsr, 0));
+            addInstr(env, ARM64Instr_FPSR(True/*toFPSR*/, fpsr));
+            addInstr(env, ARM64Instr_VBinV(op, resLo, argL, argR));
+            addInstr(env, ARM64Instr_FPSR(False/*!toFPSR*/, fpsr));
+            addInstr(env, ARM64Instr_Shift(fpsr, fpsr, ARM64RI6_I6(27),
+                                                       ARM64sh_SHR));
+            ARM64RIL* ril_one = mb_mkARM64RIL_I(1);
+            vassert(ril_one);
+            addInstr(env, ARM64Instr_Logic(fpsr, fpsr, ril_one, ARM64lo_AND));
+            /* Now we have: the main (shift) result in |resLo|, and the
+               Q bit at the bottom of |fpsr|. */
+            addInstr(env, ARM64Instr_VQfromX(resHi, fpsr));
+            *rHi = resHi;
+            *rLo = resLo;
+            return;
+         }
+
+         /* ... */
+         default:
+            break;
+      } /* switch on the binop */
+   } /* if (e->tag == Iex_Binop) */
+
+   ppIRExpr(e);
+   vpanic("iselV256Expr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void iselStmt ( ISelEnv* env, IRStmt* stmt )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n-- ");
+      ppIRStmt(stmt);
+      vex_printf("\n");
+   }
+   switch (stmt->tag) {
+
+   /* --------- STORE --------- */
+   /* little-endian write to memory */
+   case Ist_Store: {
+      IRType    tya  = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
+      IRType    tyd  = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+      IREndness end  = stmt->Ist.Store.end;
+
+      if (tya != Ity_I64 || end != Iend_LE) 
+         goto stmt_fail;
+
+      if (tyd == Ity_I64) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Store.data);
+         ARM64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr, tyd);
+         addInstr(env, ARM64Instr_LdSt64(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I32) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Store.data);
+         ARM64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr, tyd);
+         addInstr(env, ARM64Instr_LdSt32(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I16) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Store.data);
+         ARM64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr, tyd);
+         addInstr(env, ARM64Instr_LdSt16(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I8) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Store.data);
+         ARM64AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr, tyd);
+         addInstr(env, ARM64Instr_LdSt8(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_V128) {
+         HReg qD   = iselV128Expr(env, stmt->Ist.Store.data);
+         HReg addr = iselIntExpr_R(env, stmt->Ist.Store.addr);
+         addInstr(env, ARM64Instr_VLdStQ(False/*!isLoad*/, qD, addr));
+         return;
+      }
+      if (tyd == Ity_F64) {
+         HReg dD   = iselDblExpr(env, stmt->Ist.Store.data);
+         HReg addr = iselIntExpr_R(env, stmt->Ist.Store.addr);
+         addInstr(env, ARM64Instr_VLdStD(False/*!isLoad*/, dD, addr, 0));
+         return;
+      }
+      if (tyd == Ity_F32) {
+         HReg sD   = iselFltExpr(env, stmt->Ist.Store.data);
+         HReg addr = iselIntExpr_R(env, stmt->Ist.Store.addr);
+         addInstr(env, ARM64Instr_VLdStS(False/*!isLoad*/, sD, addr, 0));
+         return;
+      }
+      break;
+   }
+
+   /* --------- PUT --------- */
+   /* write guest state, fixed offset */
+   case Ist_Put: {
+      IRType tyd  = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+      UInt   offs = (UInt)stmt->Ist.Put.offset;
+      if (tyd == Ity_I64 && 0 == (offs & 7) && offs < (8<<12)) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Put.data);
+         ARM64AMode* am = mk_baseblock_64bit_access_amode(offs);
+         addInstr(env, ARM64Instr_LdSt64(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I32 && 0 == (offs & 3) && offs < (4<<12)) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Put.data);
+         ARM64AMode* am = mk_baseblock_32bit_access_amode(offs);
+         addInstr(env, ARM64Instr_LdSt32(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I16 && 0 == (offs & 1) && offs < (2<<12)) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Put.data);
+         ARM64AMode* am = mk_baseblock_16bit_access_amode(offs);
+         addInstr(env, ARM64Instr_LdSt16(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I8 && offs < (1<<12)) {
+         HReg        rD = iselIntExpr_R(env, stmt->Ist.Put.data);
+         ARM64AMode* am = mk_baseblock_8bit_access_amode(offs);
+         addInstr(env, ARM64Instr_LdSt8(False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_V128 && offs < (1<<12)) {
+         HReg qD   = iselV128Expr(env, stmt->Ist.Put.data);
+         HReg addr = mk_baseblock_128bit_access_addr(env, offs);
+         addInstr(env, ARM64Instr_VLdStQ(False/*!isLoad*/, qD, addr));
+         return;
+      }
+      if (tyd == Ity_F64 && 0 == (offs & 7) && offs < (8<<12)) {
+         HReg dD   = iselDblExpr(env, stmt->Ist.Put.data);
+         HReg bbp  = get_baseblock_register();
+         addInstr(env, ARM64Instr_VLdStD(False/*!isLoad*/, dD, bbp, offs));
+         return;
+      }
+      if (tyd == Ity_F32 && 0 == (offs & 3) && offs < (4<<12)) {
+         HReg sD   = iselFltExpr(env, stmt->Ist.Put.data);
+         HReg bbp  = get_baseblock_register();
+         addInstr(env, ARM64Instr_VLdStS(False/*!isLoad*/, sD, bbp, offs));
+         return;
+      }
+      if (tyd == Ity_F16 && 0 == (offs & 1) && offs < (2<<12)) {
+         HReg hD   = iselF16Expr(env, stmt->Ist.Put.data);
+         HReg bbp  = get_baseblock_register();
+         addInstr(env, ARM64Instr_VLdStH(False/*!isLoad*/, hD, bbp, offs));
+         return;
+      }
+
+      break;
+   }
+
+   /* --------- TMP --------- */
+   /* assign value to temporary */
+   case Ist_WrTmp: {
+      IRTemp tmp = stmt->Ist.WrTmp.tmp;
+      IRType ty  = typeOfIRTemp(env->type_env, tmp);
+
+      if (ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
+         /* We could do a lot better here.  But for the time being: */
+         HReg dst = lookupIRTemp(env, tmp);
+         HReg rD  = iselIntExpr_R(env, stmt->Ist.WrTmp.data);
+         addInstr(env, ARM64Instr_MovI(dst, rD));
+         return;
+      }
+      if (ty == Ity_I1) {
+         /* Here, we are generating a I1 value into a 64 bit register.
+            Make sure the value in the register is only zero or one,
+            but no other.  This allows optimisation of the
+            1Uto64(tmp:I1) case, by making it simply a copy of the
+            register holding 'tmp'.  The point being that the value in
+            the register holding 'tmp' can only have been created
+            here.  LATER: that seems dangerous; safer to do 'tmp & 1'
+            in that case.  Also, could do this just with a single CINC
+            insn. */
+         /* CLONE-01 */
+         HReg zero = newVRegI(env);
+         HReg one  = newVRegI(env);
+         HReg dst  = lookupIRTemp(env, tmp);
+         addInstr(env, ARM64Instr_Imm64(zero, 0));
+         addInstr(env, ARM64Instr_Imm64(one,  1));
+         ARM64CondCode cc = iselCondCode(env, stmt->Ist.WrTmp.data);
+         addInstr(env, ARM64Instr_CSel(dst, one, zero, cc));
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, ARM64Instr_VMov(8, dst, src));
+         return;
+      }
+      if (ty == Ity_F32) {
+         HReg src = iselFltExpr(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, ARM64Instr_VMov(8/*yes, really*/, dst, src));
+         return;
+      }
+      if (ty == Ity_V128) {
+         HReg src = iselV128Expr(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, ARM64Instr_VMov(16, dst, src));
+         return;
+      }
+      if (ty == Ity_V256) {
+         HReg srcHi, srcLo, dstHi, dstLo;
+         iselV256Expr(&srcHi,&srcLo, env, stmt->Ist.WrTmp.data);
+         lookupIRTempPair( &dstHi, &dstLo, env, tmp);
+         addInstr(env, ARM64Instr_VMov(16, dstHi, srcHi));
+         addInstr(env, ARM64Instr_VMov(16, dstLo, srcLo));
+         return;
+      }
+      break;
+   }
+
+   /* --------- Call to DIRTY helper --------- */
+   /* call complex ("dirty") helper function */
+   case Ist_Dirty: {
+      IRDirty* d = stmt->Ist.Dirty.details;
+
+      /* Figure out the return type, if any. */
+      IRType retty = Ity_INVALID;
+      if (d->tmp != IRTemp_INVALID)
+         retty = typeOfIRTemp(env->type_env, d->tmp);
+
+      Bool retty_ok = False;
+      switch (retty) {
+         case Ity_INVALID: /* function doesn't return anything */
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+         case Ity_V128:
+            retty_ok = True; break;
+         default:
+            break;
+      }
+      if (!retty_ok)
+         break; /* will go to stmt_fail: */
+
+      /* Marshal args, do the call, and set the return value to 0x555..555
+         if this is a conditional call that returns a value and the
+         call is skipped. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, d->guard, d->cee, retty, d->args );
+      vassert(is_sane_RetLoc(rloc));
+
+      /* Now figure out what to do with the returned value, if any. */
+      switch (retty) {
+         case Ity_INVALID: {
+            /* No return value.  Nothing to do. */
+            vassert(d->tmp == IRTemp_INVALID);
+            vassert(rloc.pri == RLPri_None);
+            vassert(addToSp == 0);
+            return;
+         }
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8: {
+            vassert(rloc.pri == RLPri_Int);
+            vassert(addToSp == 0);
+            /* The returned value is in x0.  Park it in the register
+               associated with tmp. */
+            HReg dst = lookupIRTemp(env, d->tmp);
+            addInstr(env, ARM64Instr_MovI(dst, hregARM64_X0()) );
+            return;
+         }
+         case Ity_V128: {
+            /* The returned value is on the stack, and *retloc tells
+               us where.  Fish it off the stack and then move the
+               stack pointer upwards to clear it, as directed by
+               doHelperCall. */
+            vassert(rloc.pri == RLPri_V128SpRel);
+            vassert(rloc.spOff < 256); // stay sane
+            vassert(addToSp >= 16); // ditto
+            vassert(addToSp < 256); // ditto
+            HReg dst = lookupIRTemp(env, d->tmp);
+            HReg tmp = newVRegI(env); // the address of the returned value
+            addInstr(env, ARM64Instr_FromSP(tmp)); // tmp = SP
+            addInstr(env, ARM64Instr_Arith(tmp, tmp,
+                                           ARM64RIA_I12((UShort)rloc.spOff, 0),
+                                           True/*isAdd*/ ));
+            addInstr(env, ARM64Instr_VLdStQ(True/*isLoad*/, dst, tmp));
+            addInstr(env, ARM64Instr_AddToSP(addToSp));
+            return;
+         }
+         default:
+            /*NOTREACHED*/
+            vassert(0);
+      }
+      break;
+   }
+
+   /* --------- Load Linked and Store Conditional --------- */
+   case Ist_LLSC: {
+      if (stmt->Ist.LLSC.storedata == NULL) {
+         /* LL */
+         IRTemp res = stmt->Ist.LLSC.result;
+         IRType ty  = typeOfIRTemp(env->type_env, res);
+         if (ty == Ity_I64 || ty == Ity_I32 
+             || ty == Ity_I16 || ty == Ity_I8) {
+            Int  szB   = 0;
+            HReg r_dst = lookupIRTemp(env, res);
+            HReg raddr = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+            switch (ty) {
+               case Ity_I8:  szB = 1; break;
+               case Ity_I16: szB = 2; break;
+               case Ity_I32: szB = 4; break;
+               case Ity_I64: szB = 8; break;
+               default:      vassert(0);
+            }
+            addInstr(env, ARM64Instr_MovI(hregARM64_X4(), raddr));
+            addInstr(env, ARM64Instr_LdrEX(szB));
+            addInstr(env, ARM64Instr_MovI(r_dst, hregARM64_X2()));
+            return;
+         }
+         goto stmt_fail;
+      } else {
+         /* SC */
+         IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.storedata);
+         if (tyd == Ity_I64 || tyd == Ity_I32
+             || tyd == Ity_I16 || tyd == Ity_I8) {
+            Int  szB = 0;
+            HReg rD  = iselIntExpr_R(env, stmt->Ist.LLSC.storedata);
+            HReg rA  = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+            switch (tyd) {
+               case Ity_I8:  szB = 1; break;
+               case Ity_I16: szB = 2; break;
+               case Ity_I32: szB = 4; break;
+               case Ity_I64: szB = 8; break;
+               default:      vassert(0);
+            }
+            addInstr(env, ARM64Instr_MovI(hregARM64_X2(), rD));
+            addInstr(env, ARM64Instr_MovI(hregARM64_X4(), rA));
+            addInstr(env, ARM64Instr_StrEX(szB));
+         } else {
+            goto stmt_fail;
+         }
+         /* now r0 is 1 if failed, 0 if success.  Change to IR
+            conventions (0 is fail, 1 is success).  Also transfer
+            result to r_res. */
+         IRTemp    res   = stmt->Ist.LLSC.result;
+         IRType    ty    = typeOfIRTemp(env->type_env, res);
+         HReg      r_res = lookupIRTemp(env, res);
+         ARM64RIL* one   = mb_mkARM64RIL_I(1);
+         vassert(ty == Ity_I1);
+         vassert(one);
+         addInstr(env, ARM64Instr_Logic(r_res, hregARM64_X0(), one,
+                                        ARM64lo_XOR));
+         /* And be conservative -- mask off all but the lowest bit. */
+         addInstr(env, ARM64Instr_Logic(r_res, r_res, one,
+                                        ARM64lo_AND));
+         return;
+      }
+      break;
+   }
+
+   /* --------- MEM FENCE --------- */
+   case Ist_MBE:
+      switch (stmt->Ist.MBE.event) {
+         case Imbe_Fence:
+            addInstr(env, ARM64Instr_MFence());
+            return;
+         default:
+            break;
+      }
+      break;
+
+   /* --------- INSTR MARK --------- */
+   /* Doesn't generate any executable code ... */
+   case Ist_IMark:
+       return;
+
+   /* --------- ABI HINT --------- */
+   /* These have no meaning (denotation in the IR) and so we ignore
+      them ... if any actually made it this far. */
+   case Ist_AbiHint:
+       return;
+
+   /* --------- NO-OP --------- */
+   case Ist_NoOp:
+       return;
+
+   /* --------- EXIT --------- */
+   case Ist_Exit: {
+      if (stmt->Ist.Exit.dst->tag != Ico_U64)
+         vpanic("isel_arm: Ist_Exit: dst is not a 64-bit value");
+
+      ARM64CondCode cc 
+         = iselCondCode(env, stmt->Ist.Exit.guard);
+      ARM64AMode* amPC
+         = mk_baseblock_64bit_access_amode(stmt->Ist.Exit.offsIP);
+
+      /* Case: boring transfer to known address */
+      if (stmt->Ist.Exit.jk == Ijk_Boring
+          /*ATC || stmt->Ist.Exit.jk == Ijk_Call */
+          /*ATC || stmt->Ist.Exit.jk == Ijk_Ret */ ) {
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = ((Addr64)stmt->Ist.Exit.dst->Ico.U64) > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "Y" : ",");
+            addInstr(env, ARM64Instr_XDirect(stmt->Ist.Exit.dst->Ico.U64,
+                                             amPC, cc, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, ARM64Instr_XAssisted(r, amPC, cc, Ijk_Boring));
+         }
+         return;
+      }
+
+      /* Do we ever expect to see any other kind? */
+      goto stmt_fail;
+   }
+
+   default: break;
+   }
+  stmt_fail:
+   ppIRStmt(stmt);
+   vpanic("iselStmt");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void iselNext ( ISelEnv* env,
+                       IRExpr* next, IRJumpKind jk, Int offsIP )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf( "\n-- PUT(%d) = ", offsIP);
+      ppIRExpr( next );
+      vex_printf( "; exit-");
+      ppIRJumpKind(jk);
+      vex_printf( "\n");
+   }
+
+   /* Case: boring transfer to known address */
+   if (next->tag == Iex_Const) {
+      IRConst* cdst = next->Iex.Const.con;
+      vassert(cdst->tag == Ico_U64);
+      if (jk == Ijk_Boring || jk == Ijk_Call) {
+         /* Boring transfer to known address */
+         ARM64AMode* amPC = mk_baseblock_64bit_access_amode(offsIP);
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = ((Addr64)cdst->Ico.U64) > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "X" : ".");
+            addInstr(env, ARM64Instr_XDirect(cdst->Ico.U64,
+                                             amPC, ARM64cc_AL, 
+                                             toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselIntExpr_R(env, next);
+            addInstr(env, ARM64Instr_XAssisted(r, amPC, ARM64cc_AL,
+                                               Ijk_Boring));
+         }
+         return;
+      }
+   }
+
+   /* Case: call/return (==boring) transfer to any address */
+   switch (jk) {
+      case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
+         HReg        r    = iselIntExpr_R(env, next);
+         ARM64AMode* amPC = mk_baseblock_64bit_access_amode(offsIP);
+         if (env->chainingAllowed) {
+            addInstr(env, ARM64Instr_XIndir(r, amPC, ARM64cc_AL));
+         } else {
+            addInstr(env, ARM64Instr_XAssisted(r, amPC, ARM64cc_AL,
+                                               Ijk_Boring));
+         }
+         return;
+      }
+      default:
+         break;
+   }
+
+   /* Case: assisted transfer to arbitrary address */
+   switch (jk) {
+      /* Keep this list in sync with that for Ist_Exit above */
+      case Ijk_ClientReq:
+      case Ijk_NoDecode:
+      case Ijk_NoRedir:
+      case Ijk_Sys_syscall:
+      case Ijk_InvalICache:
+      case Ijk_FlushDCache:
+      case Ijk_SigTRAP:
+      {
+         HReg        r    = iselIntExpr_R(env, next);
+         ARM64AMode* amPC = mk_baseblock_64bit_access_amode(offsIP);
+         addInstr(env, ARM64Instr_XAssisted(r, amPC, ARM64cc_AL, jk));
+         return;
+      }
+      default:
+         break;
+   }
+
+   vex_printf( "\n-- PUT(%d) = ", offsIP);
+   ppIRExpr( next );
+   vex_printf( "; exit-");
+   ppIRJumpKind(jk);
+   vex_printf( "\n");
+   vassert(0); // are we expecting any other kind?
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire SB to arm64 code. */
+
+HInstrArray* iselSB_ARM64 ( const IRSB* bb,
+                            VexArch      arch_host,
+                            const VexArchInfo* archinfo_host,
+                            const VexAbiInfo*  vbi/*UNUSED*/,
+                            Int offs_Host_EvC_Counter,
+                            Int offs_Host_EvC_FailAddr,
+                            Bool chainingAllowed,
+                            Bool addProfInc,
+                            Addr max_ga )
+{
+   Int        i, j;
+   HReg       hreg, hregHI;
+   ISelEnv*   env;
+   UInt       hwcaps_host = archinfo_host->hwcaps;
+   ARM64AMode *amCounter, *amFailAddr;
+
+   /* sanity ... */
+   vassert(arch_host == VexArchARM64);
+
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
+   /* guard against unexpected space regressions */
+   vassert(sizeof(ARM64Instr) <= 32);
+
+   /* Make up an initial environment to use. */
+   env = LibVEX_Alloc_inline(sizeof(ISelEnv));
+   env->vreg_ctr = 0;
+
+   /* Set up output code array. */
+   env->code = newHInstrArray();
+    
+   /* Copy BB's type env. */
+   env->type_env = bb->tyenv;
+
+   /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+      change as we go along. */
+   env->n_vregmap = bb->tyenv->types_used;
+   env->vregmap   = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+
+   /* and finally ... */
+   env->chainingAllowed = chainingAllowed;
+   env->hwcaps          = hwcaps_host;
+   env->previous_rm     = NULL;
+   env->max_ga          = max_ga;
+
+   /* For each IR temporary, allocate a suitably-kinded virtual
+      register. */
+   j = 0;
+   for (i = 0; i < env->n_vregmap; i++) {
+      hregHI = hreg = INVALID_HREG;
+      switch (bb->tyenv->types[i]) {
+         case Ity_I1:
+         case Ity_I8: case Ity_I16: case Ity_I32: case Ity_I64:
+            hreg = mkHReg(True, HRcInt64, 0, j++);
+            break;
+         case Ity_I128:
+            hreg   = mkHReg(True, HRcInt64, 0, j++);
+            hregHI = mkHReg(True, HRcInt64, 0, j++);
+            break;
+         case Ity_F16: // we'll use HRcFlt64 regs for F16 too
+         case Ity_F32: // we'll use HRcFlt64 regs for F32 too
+         case Ity_F64:
+            hreg = mkHReg(True, HRcFlt64, 0, j++);
+            break;
+         case Ity_V128:
+            hreg = mkHReg(True, HRcVec128, 0, j++);
+            break;
+         case Ity_V256:
+            hreg   = mkHReg(True, HRcVec128, 0, j++);
+            hregHI = mkHReg(True, HRcVec128, 0, j++);
+            break;
+         default:
+            ppIRType(bb->tyenv->types[i]);
+            vpanic("iselBB(arm64): IRTemp type");
+      }
+      env->vregmap[i]   = hreg;
+      env->vregmapHI[i] = hregHI;
+   }
+   env->vreg_ctr = j;
+
+   /* The very first instruction must be an event check. */
+   amCounter  = ARM64AMode_RI9(hregARM64_X21(), offs_Host_EvC_Counter);
+   amFailAddr = ARM64AMode_RI9(hregARM64_X21(), offs_Host_EvC_FailAddr);
+   addInstr(env, ARM64Instr_EvCheck(amCounter, amFailAddr));
+
+   /* Possibly a block counter increment (for profiling).  At this
+      point we don't know the address of the counter, so just pretend
+      it is zero.  It will have to be patched later, but before this
+      translation is used, by a call to LibVEX_patchProfCtr. */
+   if (addProfInc) {
+      addInstr(env, ARM64Instr_ProfInc());
+   }
+
+   /* Ok, finally we can iterate over the statements. */
+   for (i = 0; i < bb->stmts_used; i++)
+      iselStmt(env, bb->stmts[i]);
+
+   iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
+
+   /* record the number of vregs we used. */
+   env->code->n_vregs = env->vreg_ctr;
+   return env->code;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                   host_arm64_isel.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_arm_defs.c b/VEX/priv/host_arm_defs.c
new file mode 100644
index 0000000..310271d
--- /dev/null
+++ b/VEX/priv/host_arm_defs.c
@@ -0,0 +1,4882 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_arm_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   NEON support is
+   Copyright (C) 2010-2013 Samsung Electronics
+   contributed by Dmitry Zhurikhin <zhur@ispras.ru>
+              and Kirill Batuzov <batuzovk@ispras.ru>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_arm_defs.h"
+
+UInt arm_hwcaps = 0;
+
+
+/* --------- Registers. --------- */
+
+const RRegUniverse* getRRegUniverse_ARM ( void )
+{
+   /* The real-register universe is a big constant, so we just want to
+      initialise it once. */
+   static RRegUniverse rRegUniverse_ARM;
+   static Bool         rRegUniverse_ARM_initted = False;
+
+   /* Handy shorthand, nothing more */
+   RRegUniverse* ru = &rRegUniverse_ARM;
+
+   /* This isn't thread-safe.  Sigh. */
+   if (LIKELY(rRegUniverse_ARM_initted))
+      return ru;
+
+   RRegUniverse__init(ru);
+
+   /* Add the registers.  The initial segment of this array must be
+      those available for allocation by reg-alloc, and those that
+      follow are not available for allocation. */
+
+   /* Callee saves ones are listed first, since we prefer them
+      if they're available. */
+   ru->regs[ru->size++] = hregARM_R4();
+   ru->regs[ru->size++] = hregARM_R5();
+   ru->regs[ru->size++] = hregARM_R6();
+   ru->regs[ru->size++] = hregARM_R7();
+   ru->regs[ru->size++] = hregARM_R10();
+   ru->regs[ru->size++] = hregARM_R11();
+   /* Otherwise we'll have to slum it out with caller-saves ones. */
+   ru->regs[ru->size++] = hregARM_R0();
+   ru->regs[ru->size++] = hregARM_R1();
+   ru->regs[ru->size++] = hregARM_R2();
+   ru->regs[ru->size++] = hregARM_R3();
+   ru->regs[ru->size++] = hregARM_R9();
+   /* FP registers.  Note: these are all callee-save.  Yay!  Hence we
+      don't need to mention them as trashed in getHRegUsage for
+      ARMInstr_Call. */
+   ru->regs[ru->size++] = hregARM_D8();
+   ru->regs[ru->size++] = hregARM_D9();
+   ru->regs[ru->size++] = hregARM_D10();
+   ru->regs[ru->size++] = hregARM_D11();
+   ru->regs[ru->size++] = hregARM_D12();
+   ru->regs[ru->size++] = hregARM_S26();
+   ru->regs[ru->size++] = hregARM_S27();
+   ru->regs[ru->size++] = hregARM_S28();
+   ru->regs[ru->size++] = hregARM_S29();
+   ru->regs[ru->size++] = hregARM_S30();
+   ru->regs[ru->size++] = hregARM_Q8();
+   ru->regs[ru->size++] = hregARM_Q9();
+   ru->regs[ru->size++] = hregARM_Q10();
+   ru->regs[ru->size++] = hregARM_Q11();
+   ru->regs[ru->size++] = hregARM_Q12();
+   ru->allocable = ru->size;
+
+   /* And other regs, not available to the allocator. */
+
+   // unavail: r8 as GSP
+   // r12 is used as a spill/reload temporary
+   // r13 as SP
+   // r14 as LR
+   // r15 as PC
+   //
+   // All in all, we have 11 allocatable integer registers:
+   // 0 1 2 3 4 5 6 7 9 10 11, with r8 dedicated as GSP
+   // and r12 dedicated as a spill temporary.
+   // 13 14 and 15 are not under the allocator's control.
+   //
+   // Hence for the allocatable registers we have:
+   //
+   // callee-saved: 4 5 6 7 (8) 9 10 11
+   // caller-saved: 0 1 2 3
+   // Note 9 is ambiguous: the base EABI does not give an e/r-saved
+   // designation for it, but the Linux instantiation of the ABI
+   // specifies it as callee-saved.
+   //
+   // If the set of available registers changes or if the e/r status
+   // changes, be sure to re-check/sync the definition of
+   // getHRegUsage for ARMInstr_Call too.
+   ru->regs[ru->size++] = hregARM_R8();
+   ru->regs[ru->size++] = hregARM_R12();
+   ru->regs[ru->size++] = hregARM_R13();
+   ru->regs[ru->size++] = hregARM_R14();
+   ru->regs[ru->size++] = hregARM_R15();
+   ru->regs[ru->size++] = hregARM_Q13();
+   ru->regs[ru->size++] = hregARM_Q14();
+   ru->regs[ru->size++] = hregARM_Q15();
+
+   rRegUniverse_ARM_initted = True;
+
+   RRegUniverse__check_is_sane(ru);
+   return ru;
+}
+
+
+void ppHRegARM ( HReg reg )  {
+   Int r;
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      ppHReg(reg);
+      return;
+   }
+   /* But specific for real regs. */
+   switch (hregClass(reg)) {
+      case HRcInt32:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 16);
+         vex_printf("r%d", r);
+         return;
+      case HRcFlt64:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 32);
+         vex_printf("d%d", r);
+         return;
+      case HRcFlt32:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 32);
+         vex_printf("s%d", r);
+         return;
+      case HRcVec128:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 16);
+         vex_printf("q%d", r);
+         return;
+      default:
+         vpanic("ppHRegARM");
+   }
+}
+
+
+/* --------- Condition codes, ARM encoding. --------- */
+
+const HChar* showARMCondCode ( ARMCondCode cond ) {
+   switch (cond) {
+       case ARMcc_EQ:  return "eq";
+       case ARMcc_NE:  return "ne";
+       case ARMcc_HS:  return "hs";
+       case ARMcc_LO:  return "lo";
+       case ARMcc_MI:  return "mi";
+       case ARMcc_PL:  return "pl";
+       case ARMcc_VS:  return "vs";
+       case ARMcc_VC:  return "vc";
+       case ARMcc_HI:  return "hi";
+       case ARMcc_LS:  return "ls";
+       case ARMcc_GE:  return "ge";
+       case ARMcc_LT:  return "lt";
+       case ARMcc_GT:  return "gt";
+       case ARMcc_LE:  return "le";
+       case ARMcc_AL:  return "al"; // default
+       case ARMcc_NV:  return "nv";
+       default: vpanic("showARMCondCode");
+   }
+}
+
+
+/* --------- Mem AModes: Addressing Mode 1 --------- */
+
+ARMAMode1* ARMAMode1_RI  ( HReg reg, Int simm13 ) {
+   ARMAMode1* am        = LibVEX_Alloc_inline(sizeof(ARMAMode1));
+   am->tag              = ARMam1_RI;
+   am->ARMam1.RI.reg    = reg;
+   am->ARMam1.RI.simm13 = simm13;
+   vassert(-4095 <= simm13 && simm13 <= 4095);
+   return am;
+}
+ARMAMode1* ARMAMode1_RRS ( HReg base, HReg index, UInt shift ) {
+   ARMAMode1* am        = LibVEX_Alloc_inline(sizeof(ARMAMode1));
+   am->tag              = ARMam1_RRS;
+   am->ARMam1.RRS.base  = base;
+   am->ARMam1.RRS.index = index;
+   am->ARMam1.RRS.shift = shift;
+   vassert(0 <= shift && shift <= 3);
+   return am;
+}
+
+void ppARMAMode1 ( ARMAMode1* am ) {
+   switch (am->tag) {
+      case ARMam1_RI:
+         vex_printf("%d(", am->ARMam1.RI.simm13);
+         ppHRegARM(am->ARMam1.RI.reg);
+         vex_printf(")");
+         break;
+      case ARMam1_RRS:
+         vex_printf("(");
+         ppHRegARM(am->ARMam1.RRS.base);
+         vex_printf(",");
+         ppHRegARM(am->ARMam1.RRS.index);
+         vex_printf(",%u)", am->ARMam1.RRS.shift);
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARMAMode1 ( HRegUsage* u, ARMAMode1* am ) {
+   switch (am->tag) {
+      case ARMam1_RI:
+         addHRegUse(u, HRmRead, am->ARMam1.RI.reg);
+         return;
+      case ARMam1_RRS:
+         //    addHRegUse(u, HRmRead, am->ARMam1.RRS.base);
+         //    addHRegUse(u, HRmRead, am->ARMam1.RRS.index);
+         //   return;
+      default:
+         vpanic("addRegUsage_ARMAmode1");
+   }
+}
+
+static void mapRegs_ARMAMode1 ( HRegRemap* m, ARMAMode1* am ) {
+   switch (am->tag) {
+      case ARMam1_RI:
+         am->ARMam1.RI.reg = lookupHRegRemap(m, am->ARMam1.RI.reg);
+         return;
+      case ARMam1_RRS:
+         //am->ARMam1.RR.base =lookupHRegRemap(m, am->ARMam1.RR.base);
+         //am->ARMam1.RR.index = lookupHRegRemap(m, am->ARMam1.RR.index);
+         //return;
+      default:
+         vpanic("mapRegs_ARMAmode1");
+   }
+}
+
+
+/* --------- Mem AModes: Addressing Mode 2 --------- */
+
+ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 ) {
+   ARMAMode2* am       = LibVEX_Alloc_inline(sizeof(ARMAMode2));
+   am->tag             = ARMam2_RI;
+   am->ARMam2.RI.reg   = reg;
+   am->ARMam2.RI.simm9 = simm9;
+   vassert(-255 <= simm9 && simm9 <= 255);
+   return am;
+}
+ARMAMode2* ARMAMode2_RR ( HReg base, HReg index ) {
+   ARMAMode2* am       = LibVEX_Alloc_inline(sizeof(ARMAMode2));
+   am->tag             = ARMam2_RR;
+   am->ARMam2.RR.base  = base;
+   am->ARMam2.RR.index = index;
+   return am;
+}
+
+void ppARMAMode2 ( ARMAMode2* am ) {
+   switch (am->tag) {
+      case ARMam2_RI:
+         vex_printf("%d(", am->ARMam2.RI.simm9);
+         ppHRegARM(am->ARMam2.RI.reg);
+         vex_printf(")");
+         break;
+      case ARMam2_RR:
+         vex_printf("(");
+         ppHRegARM(am->ARMam2.RR.base);
+         vex_printf(",");
+         ppHRegARM(am->ARMam2.RR.index);
+         vex_printf(")");
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARMAMode2 ( HRegUsage* u, ARMAMode2* am ) {
+   switch (am->tag) {
+      case ARMam2_RI:
+         addHRegUse(u, HRmRead, am->ARMam2.RI.reg);
+         return;
+      case ARMam2_RR:
+         //    addHRegUse(u, HRmRead, am->ARMam2.RR.base);
+         //    addHRegUse(u, HRmRead, am->ARMam2.RR.index);
+         //   return;
+      default:
+         vpanic("addRegUsage_ARMAmode2");
+   }
+}
+
+static void mapRegs_ARMAMode2 ( HRegRemap* m, ARMAMode2* am ) {
+   switch (am->tag) {
+      case ARMam2_RI:
+         am->ARMam2.RI.reg = lookupHRegRemap(m, am->ARMam2.RI.reg);
+         return;
+      case ARMam2_RR:
+         //am->ARMam2.RR.base =lookupHRegRemap(m, am->ARMam2.RR.base);
+         //am->ARMam2.RR.index = lookupHRegRemap(m, am->ARMam2.RR.index);
+         //return;
+      default:
+         vpanic("mapRegs_ARMAmode2");
+   }
+}
+
+
+/* --------- Mem AModes: Addressing Mode VFP --------- */
+
+ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 ) {
+   ARMAModeV* am = LibVEX_Alloc_inline(sizeof(ARMAModeV));
+   vassert(simm11 >= -1020 && simm11 <= 1020);
+   vassert(0 == (simm11 & 3));
+   am->reg    = reg;
+   am->simm11 = simm11;
+   return am;
+}
+
+void ppARMAModeV ( ARMAModeV* am ) {
+   vex_printf("%d(", am->simm11);
+   ppHRegARM(am->reg);
+   vex_printf(")");
+}
+
+static void addRegUsage_ARMAModeV ( HRegUsage* u, ARMAModeV* am ) {
+   addHRegUse(u, HRmRead, am->reg);
+}
+
+static void mapRegs_ARMAModeV ( HRegRemap* m, ARMAModeV* am ) {
+   am->reg = lookupHRegRemap(m, am->reg);
+}
+
+
+/* --------- Mem AModes: Addressing Mode Neon ------- */
+
+ARMAModeN *mkARMAModeN_RR ( HReg rN, HReg rM ) {
+   ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN));
+   am->tag = ARMamN_RR;
+   am->ARMamN.RR.rN = rN;
+   am->ARMamN.RR.rM = rM;
+   return am;
+}
+
+ARMAModeN *mkARMAModeN_R ( HReg rN ) {
+   ARMAModeN* am = LibVEX_Alloc_inline(sizeof(ARMAModeN));
+   am->tag = ARMamN_R;
+   am->ARMamN.R.rN = rN;
+   return am;
+}
+
+static void addRegUsage_ARMAModeN ( HRegUsage* u, ARMAModeN* am ) {
+   if (am->tag == ARMamN_R) {
+      addHRegUse(u, HRmRead, am->ARMamN.R.rN);
+   } else {
+      addHRegUse(u, HRmRead, am->ARMamN.RR.rN);
+      addHRegUse(u, HRmRead, am->ARMamN.RR.rM);
+   }
+}
+
+static void mapRegs_ARMAModeN ( HRegRemap* m, ARMAModeN* am ) {
+   if (am->tag == ARMamN_R) {
+      am->ARMamN.R.rN = lookupHRegRemap(m, am->ARMamN.R.rN);
+   } else {
+      am->ARMamN.RR.rN = lookupHRegRemap(m, am->ARMamN.RR.rN);
+      am->ARMamN.RR.rM = lookupHRegRemap(m, am->ARMamN.RR.rM);
+   }
+}
+
+void ppARMAModeN ( ARMAModeN* am ) {
+   vex_printf("[");
+   if (am->tag == ARMamN_R) {
+      ppHRegARM(am->ARMamN.R.rN);
+   } else {
+      ppHRegARM(am->ARMamN.RR.rN);
+   }
+   vex_printf("]");
+   if (am->tag == ARMamN_RR) {
+      vex_printf(", ");
+      ppHRegARM(am->ARMamN.RR.rM);
+   }
+}
+
+
+/* --------- Reg or imm-8x4 operands --------- */
+
+static UInt ROR32 ( UInt x, UInt sh ) {
+   vassert(sh >= 0 && sh < 32);
+   if (sh == 0)
+      return x;
+   else
+      return (x << (32-sh)) | (x >> sh);
+}
+
+ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) {
+   ARMRI84* ri84          = LibVEX_Alloc_inline(sizeof(ARMRI84));
+   ri84->tag              = ARMri84_I84;
+   ri84->ARMri84.I84.imm8 = imm8;
+   ri84->ARMri84.I84.imm4 = imm4;
+   vassert(imm8 >= 0 && imm8 <= 255);
+   vassert(imm4 >= 0 && imm4 <= 15);
+   return ri84;
+}
+ARMRI84* ARMRI84_R ( HReg reg ) {
+   ARMRI84* ri84       = LibVEX_Alloc_inline(sizeof(ARMRI84));
+   ri84->tag           = ARMri84_R;
+   ri84->ARMri84.R.reg = reg;
+   return ri84;
+}
+
+void ppARMRI84 ( ARMRI84* ri84 ) {
+   switch (ri84->tag) {
+      case ARMri84_I84:
+         vex_printf("0x%x", ROR32(ri84->ARMri84.I84.imm8,
+                                  2 * ri84->ARMri84.I84.imm4));
+         break;
+      case ARMri84_R:
+         ppHRegARM(ri84->ARMri84.R.reg);
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARMRI84 ( HRegUsage* u, ARMRI84* ri84 ) {
+   switch (ri84->tag) {
+      case ARMri84_I84:
+         return;
+      case ARMri84_R:
+         addHRegUse(u, HRmRead, ri84->ARMri84.R.reg);
+         return;
+      default:
+         vpanic("addRegUsage_ARMRI84");
+   }
+}
+
+static void mapRegs_ARMRI84 ( HRegRemap* m, ARMRI84* ri84 ) {
+   switch (ri84->tag) {
+      case ARMri84_I84:
+         return;
+      case ARMri84_R:
+         ri84->ARMri84.R.reg = lookupHRegRemap(m, ri84->ARMri84.R.reg);
+         return;
+      default:
+         vpanic("mapRegs_ARMRI84");
+   }
+}
+
+
+/* --------- Reg or imm5 operands --------- */
+
+ARMRI5* ARMRI5_I5 ( UInt imm5 ) {
+   ARMRI5* ri5         = LibVEX_Alloc_inline(sizeof(ARMRI5));
+   ri5->tag            = ARMri5_I5;
+   ri5->ARMri5.I5.imm5 = imm5;
+   vassert(imm5 > 0 && imm5 <= 31); // zero is not allowed
+   return ri5;
+}
+ARMRI5* ARMRI5_R ( HReg reg ) {
+   ARMRI5* ri5       = LibVEX_Alloc_inline(sizeof(ARMRI5));
+   ri5->tag          = ARMri5_R;
+   ri5->ARMri5.R.reg = reg;
+   return ri5;
+}
+
+void ppARMRI5 ( ARMRI5* ri5 ) {
+   switch (ri5->tag) {
+      case ARMri5_I5:
+         vex_printf("%u", ri5->ARMri5.I5.imm5);
+         break;
+      case ARMri5_R:
+         ppHRegARM(ri5->ARMri5.R.reg);
+         break;
+      default:
+         vassert(0);
+   }
+}
+
+static void addRegUsage_ARMRI5 ( HRegUsage* u, ARMRI5* ri5 ) {
+   switch (ri5->tag) {
+      case ARMri5_I5:
+         return;
+      case ARMri5_R:
+         addHRegUse(u, HRmRead, ri5->ARMri5.R.reg);
+         return;
+      default:
+         vpanic("addRegUsage_ARMRI5");
+   }
+}
+
+static void mapRegs_ARMRI5 ( HRegRemap* m, ARMRI5* ri5 ) {
+   switch (ri5->tag) {
+      case ARMri5_I5:
+         return;
+      case ARMri5_R:
+         ri5->ARMri5.R.reg = lookupHRegRemap(m, ri5->ARMri5.R.reg);
+         return;
+      default:
+         vpanic("mapRegs_ARMRI5");
+   }
+}
+
+/* -------- Neon Immediate operatnd --------- */
+
+ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 ) {
+   ARMNImm* i = LibVEX_Alloc_inline(sizeof(ARMNImm));
+   i->type = type;
+   i->imm8 = imm8;
+   return i;
+}
+
+ULong ARMNImm_to_Imm64 ( ARMNImm* imm ) {
+   int i, j;
+   ULong y, x = imm->imm8;
+   switch (imm->type) {
+      case 3:
+         x = x << 8; /* fallthrough */
+      case 2:
+         x = x << 8; /* fallthrough */
+      case 1:
+         x = x << 8; /* fallthrough */
+      case 0:
+         return (x << 32) | x;
+      case 5:
+      case 6:
+         if (imm->type == 5)
+            x = x << 8;
+         else
+            x = (x << 8) | x;
+         /* fallthrough */
+      case 4:
+         x = (x << 16) | x;
+         return (x << 32) | x;
+      case 8:
+         x = (x << 8) | 0xFF;
+         /* fallthrough */
+      case 7:
+         x = (x << 8) | 0xFF;
+         return (x << 32) | x;
+      case 9:
+         x = 0;
+         for (i = 7; i >= 0; i--) {
+            y = ((ULong)imm->imm8 >> i) & 1;
+            for (j = 0; j < 8; j++) {
+               x = (x << 1) | y;
+            }
+         }
+         return x;
+      case 10:
+         x |= (x & 0x80) << 5;
+         x |= (~x & 0x40) << 5;
+         x &= 0x187F; /* 0001 1000 0111 1111 */
+         x |= (x & 0x40) << 4;
+         x |= (x & 0x40) << 3;
+         x |= (x & 0x40) << 2;
+         x |= (x & 0x40) << 1;
+         x = x << 19;
+         x = (x << 32) | x;
+         return x;
+      default:
+         vpanic("ARMNImm_to_Imm64");
+   }
+}
+
+ARMNImm* Imm64_to_ARMNImm ( ULong x ) {
+   ARMNImm tmp;
+   if ((x & 0xFFFFFFFF) == (x >> 32)) {
+      if ((x & 0xFFFFFF00) == 0)
+         return ARMNImm_TI(0, x & 0xFF);
+      if ((x & 0xFFFF00FF) == 0)
+         return ARMNImm_TI(1, (x >> 8) & 0xFF);
+      if ((x & 0xFF00FFFF) == 0)
+         return ARMNImm_TI(2, (x >> 16) & 0xFF);
+      if ((x & 0x00FFFFFF) == 0)
+         return ARMNImm_TI(3, (x >> 24) & 0xFF);
+      if ((x & 0xFFFF00FF) == 0xFF)
+         return ARMNImm_TI(7, (x >> 8) & 0xFF);
+      if ((x & 0xFF00FFFF) == 0xFFFF)
+         return ARMNImm_TI(8, (x >> 16) & 0xFF);
+      if ((x & 0xFFFF) == ((x >> 16) & 0xFFFF)) {
+         if ((x & 0xFF00) == 0)
+            return ARMNImm_TI(4, x & 0xFF);
+         if ((x & 0x00FF) == 0)
+            return ARMNImm_TI(5, (x >> 8) & 0xFF);
+         if ((x & 0xFF) == ((x >> 8) & 0xFF))
+            return ARMNImm_TI(6, x & 0xFF);
+      }
+      if ((x & 0x7FFFF) == 0) {
+         tmp.type = 10;
+         tmp.imm8 = ((x >> 19) & 0x7F) | ((x >> 24) & 0x80);
+         if (ARMNImm_to_Imm64(&tmp) == x)
+            return ARMNImm_TI(tmp.type, tmp.imm8);
+      }
+   } else {
+      /* This can only be type 9. */
+      tmp.imm8 = (((x >> 56) & 1) << 7)
+               | (((x >> 48) & 1) << 6)
+               | (((x >> 40) & 1) << 5)
+               | (((x >> 32) & 1) << 4)
+               | (((x >> 24) & 1) << 3)
+               | (((x >> 16) & 1) << 2)
+               | (((x >>  8) & 1) << 1)
+               | (((x >>  0) & 1) << 0);
+      tmp.type = 9;
+      if (ARMNImm_to_Imm64 (&tmp) == x)
+         return ARMNImm_TI(tmp.type, tmp.imm8);
+   }
+   return NULL;
+}
+
+void ppARMNImm (ARMNImm* i) {
+   ULong x = ARMNImm_to_Imm64(i);
+   vex_printf("0x%llX%llX", x, x);
+}
+
+/* -- Register or scalar operand --- */
+
+ARMNRS* mkARMNRS(ARMNRS_tag tag, HReg reg, UInt index)
+{
+   ARMNRS *p = LibVEX_Alloc_inline(sizeof(ARMNRS));
+   p->tag = tag;
+   p->reg = reg;
+   p->index = index;
+   return p;
+}
+
+void ppARMNRS(ARMNRS *p)
+{
+   ppHRegARM(p->reg);
+   if (p->tag == ARMNRS_Scalar) {
+      vex_printf("[%d]", p->index);
+   }
+}
+
+/* --------- Instructions. --------- */
+
+const HChar* showARMAluOp ( ARMAluOp op ) {
+   switch (op) {
+      case ARMalu_ADD:  return "add";
+      case ARMalu_ADDS: return "adds";
+      case ARMalu_ADC:  return "adc";
+      case ARMalu_SUB:  return "sub";
+      case ARMalu_SUBS: return "subs";
+      case ARMalu_SBC:  return "sbc";
+      case ARMalu_AND:  return "and";
+      case ARMalu_BIC:  return "bic";
+      case ARMalu_OR:   return "orr";
+      case ARMalu_XOR:  return "xor";
+      default: vpanic("showARMAluOp");
+   }
+}
+
+const HChar* showARMShiftOp ( ARMShiftOp op ) {
+   switch (op) {
+      case ARMsh_SHL: return "shl";
+      case ARMsh_SHR: return "shr";
+      case ARMsh_SAR: return "sar";
+      default: vpanic("showARMShiftOp");
+   }
+}
+
+const HChar* showARMUnaryOp ( ARMUnaryOp op ) {
+   switch (op) {
+      case ARMun_NEG: return "neg";
+      case ARMun_NOT: return "not";
+      case ARMun_CLZ: return "clz";
+      default: vpanic("showARMUnaryOp");
+   }
+}
+
+const HChar* showARMMulOp ( ARMMulOp op ) {
+   switch (op) {
+      case ARMmul_PLAIN: return "mul";
+      case ARMmul_ZX:    return "umull";
+      case ARMmul_SX:    return "smull";
+      default: vpanic("showARMMulOp");
+   }
+}
+
+const HChar* showARMVfpOp ( ARMVfpOp op ) {
+   switch (op) {
+      case ARMvfp_ADD: return "add";
+      case ARMvfp_SUB: return "sub";
+      case ARMvfp_MUL: return "mul";
+      case ARMvfp_DIV: return "div";
+      default: vpanic("showARMVfpOp");
+   }
+}
+
+const HChar* showARMVfpUnaryOp ( ARMVfpUnaryOp op ) {
+   switch (op) {
+      case ARMvfpu_COPY: return "cpy";
+      case ARMvfpu_NEG:  return "neg";
+      case ARMvfpu_ABS:  return "abs";
+      case ARMvfpu_SQRT: return "sqrt";
+      default: vpanic("showARMVfpUnaryOp");
+   }
+}
+
+const HChar* showARMNeonBinOp ( ARMNeonBinOp op ) {
+   switch (op) {
+      case ARMneon_VAND: return "vand";
+      case ARMneon_VORR: return "vorr";
+      case ARMneon_VXOR: return "veor";
+      case ARMneon_VADD: return "vadd";
+      case ARMneon_VRHADDS: return "vrhadd";
+      case ARMneon_VRHADDU: return "vrhadd";
+      case ARMneon_VADDFP: return "vadd";
+      case ARMneon_VPADDFP: return "vpadd";
+      case ARMneon_VABDFP: return "vabd";
+      case ARMneon_VSUB: return "vsub";
+      case ARMneon_VSUBFP: return "vsub";
+      case ARMneon_VMINU: return "vmin";
+      case ARMneon_VMINS: return "vmin";
+      case ARMneon_VMINF: return "vmin";
+      case ARMneon_VMAXU: return "vmax";
+      case ARMneon_VMAXS: return "vmax";
+      case ARMneon_VMAXF: return "vmax";
+      case ARMneon_VQADDU: return "vqadd";
+      case ARMneon_VQADDS: return "vqadd";
+      case ARMneon_VQSUBU: return "vqsub";
+      case ARMneon_VQSUBS: return "vqsub";
+      case ARMneon_VCGTU:  return "vcgt";
+      case ARMneon_VCGTS:  return "vcgt";
+      case ARMneon_VCGTF:  return "vcgt";
+      case ARMneon_VCGEF:  return "vcgt";
+      case ARMneon_VCGEU:  return "vcge";
+      case ARMneon_VCGES:  return "vcge";
+      case ARMneon_VCEQ:  return "vceq";
+      case ARMneon_VCEQF:  return "vceq";
+      case ARMneon_VPADD:   return "vpadd";
+      case ARMneon_VPMINU:   return "vpmin";
+      case ARMneon_VPMINS:   return "vpmin";
+      case ARMneon_VPMINF:   return "vpmin";
+      case ARMneon_VPMAXU:   return "vpmax";
+      case ARMneon_VPMAXS:   return "vpmax";
+      case ARMneon_VPMAXF:   return "vpmax";
+      case ARMneon_VEXT:   return "vext";
+      case ARMneon_VMUL:   return "vmuli";
+      case ARMneon_VMULLU:   return "vmull";
+      case ARMneon_VMULLS:   return "vmull";
+      case ARMneon_VMULP:  return "vmul";
+      case ARMneon_VMULFP:  return "vmul";
+      case ARMneon_VMULLP:  return "vmul";
+      case ARMneon_VQDMULH: return "vqdmulh";
+      case ARMneon_VQRDMULH: return "vqrdmulh";
+      case ARMneon_VQDMULL: return "vqdmull";
+      case ARMneon_VTBL: return "vtbl";
+      case ARMneon_VRECPS: return "vrecps";
+      case ARMneon_VRSQRTS: return "vrecps";
+      case ARMneon_INVALID: return "??invalid??";
+      /* ... */
+      default: vpanic("showARMNeonBinOp");
+   }
+}
+
+const HChar* showARMNeonBinOpDataType ( ARMNeonBinOp op ) {
+   switch (op) {
+      case ARMneon_VAND:
+      case ARMneon_VORR:
+      case ARMneon_VXOR:
+         return "";
+      case ARMneon_VADD:
+      case ARMneon_VSUB:
+      case ARMneon_VEXT:
+      case ARMneon_VMUL:
+      case ARMneon_VPADD:
+      case ARMneon_VTBL:
+      case ARMneon_VCEQ:
+         return ".i";
+      case ARMneon_VRHADDU:
+      case ARMneon_VMINU:
+      case ARMneon_VMAXU:
+      case ARMneon_VQADDU:
+      case ARMneon_VQSUBU:
+      case ARMneon_VCGTU:
+      case ARMneon_VCGEU:
+      case ARMneon_VMULLU:
+      case ARMneon_VPMINU:
+      case ARMneon_VPMAXU:
+         return ".u";
+      case ARMneon_VRHADDS:
+      case ARMneon_VMINS:
+      case ARMneon_VMAXS:
+      case ARMneon_VQADDS:
+      case ARMneon_VQSUBS:
+      case ARMneon_VCGTS:
+      case ARMneon_VCGES:
+      case ARMneon_VQDMULL:
+      case ARMneon_VMULLS:
+      case ARMneon_VPMINS:
+      case ARMneon_VPMAXS:
+      case ARMneon_VQDMULH:
+      case ARMneon_VQRDMULH:
+         return ".s";
+      case ARMneon_VMULP:
+      case ARMneon_VMULLP:
+         return ".p";
+      case ARMneon_VADDFP:
+      case ARMneon_VABDFP:
+      case ARMneon_VPADDFP:
+      case ARMneon_VSUBFP:
+      case ARMneon_VMULFP:
+      case ARMneon_VMINF:
+      case ARMneon_VMAXF:
+      case ARMneon_VPMINF:
+      case ARMneon_VPMAXF:
+      case ARMneon_VCGTF:
+      case ARMneon_VCGEF:
+      case ARMneon_VCEQF:
+      case ARMneon_VRECPS:
+      case ARMneon_VRSQRTS:
+         return ".f";
+      /* ... */
+      default: vpanic("showARMNeonBinOpDataType");
+   }
+}
+
+const HChar* showARMNeonUnOp ( ARMNeonUnOp op ) {
+   switch (op) {
+      case ARMneon_COPY: return "vmov";
+      case ARMneon_COPYLS: return "vmov";
+      case ARMneon_COPYLU: return "vmov";
+      case ARMneon_COPYN: return "vmov";
+      case ARMneon_COPYQNSS: return "vqmovn";
+      case ARMneon_COPYQNUS: return "vqmovun";
+      case ARMneon_COPYQNUU: return "vqmovn";
+      case ARMneon_NOT: return "vmvn";
+      case ARMneon_EQZ: return "vceq";
+      case ARMneon_CNT: return "vcnt";
+      case ARMneon_CLS: return "vcls";
+      case ARMneon_CLZ: return "vclz";
+      case ARMneon_DUP: return "vdup";
+      case ARMneon_PADDLS: return "vpaddl";
+      case ARMneon_PADDLU: return "vpaddl";
+      case ARMneon_VQSHLNSS: return "vqshl";
+      case ARMneon_VQSHLNUU: return "vqshl";
+      case ARMneon_VQSHLNUS: return "vqshlu";
+      case ARMneon_REV16: return "vrev16";
+      case ARMneon_REV32: return "vrev32";
+      case ARMneon_REV64: return "vrev64";
+      case ARMneon_VCVTFtoU: return "vcvt";
+      case ARMneon_VCVTFtoS: return "vcvt";
+      case ARMneon_VCVTUtoF: return "vcvt";
+      case ARMneon_VCVTStoF: return "vcvt";
+      case ARMneon_VCVTFtoFixedU: return "vcvt";
+      case ARMneon_VCVTFtoFixedS: return "vcvt";
+      case ARMneon_VCVTFixedUtoF: return "vcvt";
+      case ARMneon_VCVTFixedStoF: return "vcvt";
+      case ARMneon_VCVTF32toF16: return "vcvt";
+      case ARMneon_VCVTF16toF32: return "vcvt";
+      case ARMneon_VRECIP: return "vrecip";
+      case ARMneon_VRECIPF: return "vrecipf";
+      case ARMneon_VNEGF: return "vneg";
+      case ARMneon_ABS: return "vabs";
+      case ARMneon_VABSFP: return "vabsfp";
+      case ARMneon_VRSQRTEFP: return "vrsqrtefp";
+      case ARMneon_VRSQRTE: return "vrsqrte";
+      /* ... */
+      default: vpanic("showARMNeonUnOp");
+   }
+}
+
+const HChar* showARMNeonUnOpDataType ( ARMNeonUnOp op ) {
+   switch (op) {
+      case ARMneon_COPY:
+      case ARMneon_NOT:
+         return "";
+      case ARMneon_COPYN:
+      case ARMneon_EQZ:
+      case ARMneon_CNT:
+      case ARMneon_DUP:
+      case ARMneon_REV16:
+      case ARMneon_REV32:
+      case ARMneon_REV64:
+         return ".i";
+      case ARMneon_COPYLU:
+      case ARMneon_PADDLU:
+      case ARMneon_COPYQNUU:
+      case ARMneon_VQSHLNUU:
+      case ARMneon_VRECIP:
+      case ARMneon_VRSQRTE:
+         return ".u";
+      case ARMneon_CLS:
+      case ARMneon_CLZ:
+      case ARMneon_COPYLS:
+      case ARMneon_PADDLS:
+      case ARMneon_COPYQNSS:
+      case ARMneon_COPYQNUS:
+      case ARMneon_VQSHLNSS:
+      case ARMneon_VQSHLNUS:
+      case ARMneon_ABS:
+         return ".s";
+      case ARMneon_VRECIPF:
+      case ARMneon_VNEGF:
+      case ARMneon_VABSFP:
+      case ARMneon_VRSQRTEFP:
+         return ".f";
+      case ARMneon_VCVTFtoU: return ".u32.f32";
+      case ARMneon_VCVTFtoS: return ".s32.f32";
+      case ARMneon_VCVTUtoF: return ".f32.u32";
+      case ARMneon_VCVTStoF: return ".f32.s32";
+      case ARMneon_VCVTF16toF32: return ".f32.f16";
+      case ARMneon_VCVTF32toF16: return ".f16.f32";
+      case ARMneon_VCVTFtoFixedU: return ".u32.f32";
+      case ARMneon_VCVTFtoFixedS: return ".s32.f32";
+      case ARMneon_VCVTFixedUtoF: return ".f32.u32";
+      case ARMneon_VCVTFixedStoF: return ".f32.s32";
+      /* ... */
+      default: vpanic("showARMNeonUnOpDataType");
+   }
+}
+
+const HChar* showARMNeonUnOpS ( ARMNeonUnOpS op ) {
+   switch (op) {
+      case ARMneon_SETELEM: return "vmov";
+      case ARMneon_GETELEMU: return "vmov";
+      case ARMneon_GETELEMS: return "vmov";
+      case ARMneon_VDUP: return "vdup";
+      /* ... */
+      default: vpanic("showARMNeonUnarySOp");
+   }
+}
+
+const HChar* showARMNeonUnOpSDataType ( ARMNeonUnOpS op ) {
+   switch (op) {
+      case ARMneon_SETELEM:
+      case ARMneon_VDUP:
+         return ".i";
+      case ARMneon_GETELEMS:
+         return ".s";
+      case ARMneon_GETELEMU:
+         return ".u";
+      /* ... */
+      default: vpanic("showARMNeonUnarySOp");
+   }
+}
+
+const HChar* showARMNeonShiftOp ( ARMNeonShiftOp op ) {
+   switch (op) {
+      case ARMneon_VSHL: return "vshl";
+      case ARMneon_VSAL: return "vshl";
+      case ARMneon_VQSHL: return "vqshl";
+      case ARMneon_VQSAL: return "vqshl";
+      /* ... */
+      default: vpanic("showARMNeonShiftOp");
+   }
+}
+
+const HChar* showARMNeonShiftOpDataType ( ARMNeonShiftOp op ) {
+   switch (op) {
+      case ARMneon_VSHL:
+      case ARMneon_VQSHL:
+         return ".u";
+      case ARMneon_VSAL:
+      case ARMneon_VQSAL:
+         return ".s";
+      /* ... */
+      default: vpanic("showARMNeonShiftOpDataType");
+   }
+}
+
+const HChar* showARMNeonDualOp ( ARMNeonDualOp op ) {
+   switch (op) {
+      case ARMneon_TRN: return "vtrn";
+      case ARMneon_ZIP: return "vzip";
+      case ARMneon_UZP: return "vuzp";
+      /* ... */
+      default: vpanic("showARMNeonDualOp");
+   }
+}
+
+const HChar* showARMNeonDualOpDataType ( ARMNeonDualOp op ) {
+   switch (op) {
+      case ARMneon_TRN:
+      case ARMneon_ZIP:
+      case ARMneon_UZP:
+         return "i";
+      /* ... */
+      default: vpanic("showARMNeonDualOp");
+   }
+}
+
+static const HChar* showARMNeonDataSize_wrk ( UInt size )
+{
+   switch (size) {
+      case 0: return "8";
+      case 1: return "16";
+      case 2: return "32";
+      case 3: return "64";
+      default: vpanic("showARMNeonDataSize");
+   }
+}
+
+static const HChar* showARMNeonDataSize ( const ARMInstr* i )
+{
+   switch (i->tag) {
+      case ARMin_NBinary:
+         if (i->ARMin.NBinary.op == ARMneon_VEXT)
+            return "8";
+         if (i->ARMin.NBinary.op == ARMneon_VAND ||
+             i->ARMin.NBinary.op == ARMneon_VORR ||
+             i->ARMin.NBinary.op == ARMneon_VXOR)
+            return "";
+         return showARMNeonDataSize_wrk(i->ARMin.NBinary.size);
+      case ARMin_NUnary:
+         if (i->ARMin.NUnary.op == ARMneon_COPY ||
+             i->ARMin.NUnary.op == ARMneon_NOT ||
+             i->ARMin.NUnary.op == ARMneon_VCVTF32toF16||
+             i->ARMin.NUnary.op == ARMneon_VCVTF16toF32||
+             i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFtoS ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFtoU ||
+             i->ARMin.NUnary.op == ARMneon_VCVTStoF ||
+             i->ARMin.NUnary.op == ARMneon_VCVTUtoF)
+            return "";
+         if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
+             i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
+             i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
+            UInt size;
+            size = i->ARMin.NUnary.size;
+            if (size & 0x40)
+               return "64";
+            if (size & 0x20)
+               return "32";
+            if (size & 0x10)
+               return "16";
+            if (size & 0x08)
+               return "8";
+            vpanic("showARMNeonDataSize");
+         }
+         return showARMNeonDataSize_wrk(i->ARMin.NUnary.size);
+      case ARMin_NUnaryS:
+         if (i->ARMin.NUnaryS.op == ARMneon_VDUP) {
+            int size;
+            size = i->ARMin.NUnaryS.size;
+            if ((size & 1) == 1)
+               return "8";
+            if ((size & 3) == 2)
+               return "16";
+            if ((size & 7) == 4)
+               return "32";
+            vpanic("showARMNeonDataSize");
+         }
+         return showARMNeonDataSize_wrk(i->ARMin.NUnaryS.size);
+      case ARMin_NShift:
+         return showARMNeonDataSize_wrk(i->ARMin.NShift.size);
+      case ARMin_NDual:
+         return showARMNeonDataSize_wrk(i->ARMin.NDual.size);
+      default:
+         vpanic("showARMNeonDataSize");
+   }
+}
+
+ARMInstr* ARMInstr_Alu ( ARMAluOp op,
+                         HReg dst, HReg argL, ARMRI84* argR ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag            = ARMin_Alu;
+   i->ARMin.Alu.op   = op;
+   i->ARMin.Alu.dst  = dst;
+   i->ARMin.Alu.argL = argL;
+   i->ARMin.Alu.argR = argR;
+   return i;
+}
+ARMInstr* ARMInstr_Shift  ( ARMShiftOp op,
+                            HReg dst, HReg argL, ARMRI5* argR ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_Shift;
+   i->ARMin.Shift.op   = op;
+   i->ARMin.Shift.dst  = dst;
+   i->ARMin.Shift.argL = argL;
+   i->ARMin.Shift.argR = argR;
+   return i;
+}
+ARMInstr* ARMInstr_Unary ( ARMUnaryOp op, HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag             = ARMin_Unary;
+   i->ARMin.Unary.op  = op;
+   i->ARMin.Unary.dst = dst;
+   i->ARMin.Unary.src = src;
+   return i;
+}
+ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg argL, ARMRI84* argR ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                  = ARMin_CmpOrTst;
+   i->ARMin.CmpOrTst.isCmp = isCmp;
+   i->ARMin.CmpOrTst.argL  = argL;
+   i->ARMin.CmpOrTst.argR  = argR;
+   return i;
+}
+ARMInstr* ARMInstr_Mov ( HReg dst, ARMRI84* src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag           = ARMin_Mov;
+   i->ARMin.Mov.dst = dst;
+   i->ARMin.Mov.src = src;
+   return i;
+}
+ARMInstr* ARMInstr_Imm32  ( HReg dst, UInt imm32 ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag               = ARMin_Imm32;
+   i->ARMin.Imm32.dst   = dst;
+   i->ARMin.Imm32.imm32 = imm32;
+   return i;
+}
+ARMInstr* ARMInstr_LdSt32 ( ARMCondCode cc,
+                            Bool isLoad, HReg rD, ARMAMode1* amode ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                 = ARMin_LdSt32;
+   i->ARMin.LdSt32.cc     = cc;
+   i->ARMin.LdSt32.isLoad = isLoad;
+   i->ARMin.LdSt32.rD     = rD;
+   i->ARMin.LdSt32.amode  = amode;
+   vassert(cc != ARMcc_NV);
+   return i;
+}
+ARMInstr* ARMInstr_LdSt16 ( ARMCondCode cc,
+                            Bool isLoad, Bool signedLoad,
+                            HReg rD, ARMAMode2* amode ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                     = ARMin_LdSt16;
+   i->ARMin.LdSt16.cc         = cc;
+   i->ARMin.LdSt16.isLoad     = isLoad;
+   i->ARMin.LdSt16.signedLoad = signedLoad;
+   i->ARMin.LdSt16.rD         = rD;
+   i->ARMin.LdSt16.amode      = amode;
+   vassert(cc != ARMcc_NV);
+   return i;
+}
+ARMInstr* ARMInstr_LdSt8U ( ARMCondCode cc,
+                            Bool isLoad, HReg rD, ARMAMode1* amode ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                 = ARMin_LdSt8U;
+   i->ARMin.LdSt8U.cc     = cc;
+   i->ARMin.LdSt8U.isLoad = isLoad;
+   i->ARMin.LdSt8U.rD     = rD;
+   i->ARMin.LdSt8U.amode  = amode;
+   vassert(cc != ARMcc_NV);
+   return i;
+}
+ARMInstr* ARMInstr_Ld8S ( ARMCondCode cc, HReg rD, ARMAMode2* amode ) {
+   ARMInstr* i         = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_Ld8S;
+   i->ARMin.Ld8S.cc    = cc;
+   i->ARMin.Ld8S.rD    = rD;
+   i->ARMin.Ld8S.amode = amode;
+   vassert(cc != ARMcc_NV);
+   return i;
+}
+ARMInstr* ARMInstr_XDirect ( Addr32 dstGA, ARMAMode1* amR15T,
+                             ARMCondCode cond, Bool toFastEP ) {
+   ARMInstr* i               = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                    = ARMin_XDirect;
+   i->ARMin.XDirect.dstGA    = dstGA;
+   i->ARMin.XDirect.amR15T   = amR15T;
+   i->ARMin.XDirect.cond     = cond;
+   i->ARMin.XDirect.toFastEP = toFastEP;
+   return i;
+}
+ARMInstr* ARMInstr_XIndir ( HReg dstGA, ARMAMode1* amR15T,
+                            ARMCondCode cond ) {
+   ARMInstr* i            = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                 = ARMin_XIndir;
+   i->ARMin.XIndir.dstGA  = dstGA;
+   i->ARMin.XIndir.amR15T = amR15T;
+   i->ARMin.XIndir.cond   = cond;
+   return i;
+}
+ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T,
+                               ARMCondCode cond, IRJumpKind jk ) {
+   ARMInstr* i               = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                    = ARMin_XAssisted;
+   i->ARMin.XAssisted.dstGA  = dstGA;
+   i->ARMin.XAssisted.amR15T = amR15T;
+   i->ARMin.XAssisted.cond   = cond;
+   i->ARMin.XAssisted.jk     = jk;
+   return i;
+}
+ARMInstr* ARMInstr_CMov ( ARMCondCode cond, HReg dst, ARMRI84* src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag             = ARMin_CMov;
+   i->ARMin.CMov.cond = cond;
+   i->ARMin.CMov.dst  = dst;
+   i->ARMin.CMov.src  = src;
+   vassert(cond != ARMcc_AL);
+   return i;
+}
+ARMInstr* ARMInstr_Call ( ARMCondCode cond, Addr32 target, Int nArgRegs,
+                          RetLoc rloc ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                 = ARMin_Call;
+   i->ARMin.Call.cond     = cond;
+   i->ARMin.Call.target   = target;
+   i->ARMin.Call.nArgRegs = nArgRegs;
+   i->ARMin.Call.rloc     = rloc;
+   vassert(is_sane_RetLoc(rloc));
+   return i;
+}
+ARMInstr* ARMInstr_Mul ( ARMMulOp op ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag          = ARMin_Mul;
+   i->ARMin.Mul.op = op;
+   return i;
+}
+ARMInstr* ARMInstr_LdrEX ( Int szB ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag             = ARMin_LdrEX;
+   i->ARMin.LdrEX.szB = szB;
+   vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
+   return i;
+}
+ARMInstr* ARMInstr_StrEX ( Int szB ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag             = ARMin_StrEX;
+   i->ARMin.StrEX.szB = szB;
+   vassert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
+   return i;
+}
+ARMInstr* ARMInstr_VLdStD ( Bool isLoad, HReg dD, ARMAModeV* am ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                 = ARMin_VLdStD;
+   i->ARMin.VLdStD.isLoad = isLoad;
+   i->ARMin.VLdStD.dD     = dD;
+   i->ARMin.VLdStD.amode  = am;
+   return i;
+}
+ARMInstr* ARMInstr_VLdStS ( Bool isLoad, HReg fD, ARMAModeV* am ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                 = ARMin_VLdStS;
+   i->ARMin.VLdStS.isLoad = isLoad;
+   i->ARMin.VLdStS.fD     = fD;
+   i->ARMin.VLdStS.amode  = am;
+   return i;
+}
+ARMInstr* ARMInstr_VAluD ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_VAluD;
+   i->ARMin.VAluD.op   = op;
+   i->ARMin.VAluD.dst  = dst;
+   i->ARMin.VAluD.argL = argL;
+   i->ARMin.VAluD.argR = argR;
+   return i;
+}
+ARMInstr* ARMInstr_VAluS ( ARMVfpOp op, HReg dst, HReg argL, HReg argR ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_VAluS;
+   i->ARMin.VAluS.op   = op;
+   i->ARMin.VAluS.dst  = dst;
+   i->ARMin.VAluS.argL = argL;
+   i->ARMin.VAluS.argR = argR;
+   return i;
+}
+ARMInstr* ARMInstr_VUnaryD ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag               = ARMin_VUnaryD;
+   i->ARMin.VUnaryD.op  = op;
+   i->ARMin.VUnaryD.dst = dst;
+   i->ARMin.VUnaryD.src = src;
+   return i;
+}
+ARMInstr* ARMInstr_VUnaryS ( ARMVfpUnaryOp op, HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag               = ARMin_VUnaryS;
+   i->ARMin.VUnaryS.op  = op;
+   i->ARMin.VUnaryS.dst = dst;
+   i->ARMin.VUnaryS.src = src;
+   return i;
+}
+ARMInstr* ARMInstr_VCmpD ( HReg argL, HReg argR ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_VCmpD;
+   i->ARMin.VCmpD.argL = argL;
+   i->ARMin.VCmpD.argR = argR;
+   return i;
+}
+ARMInstr* ARMInstr_VCMovD ( ARMCondCode cond, HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag               = ARMin_VCMovD;
+   i->ARMin.VCMovD.cond = cond;
+   i->ARMin.VCMovD.dst  = dst;
+   i->ARMin.VCMovD.src  = src;
+   vassert(cond != ARMcc_AL);
+   return i;
+}
+ARMInstr* ARMInstr_VCMovS ( ARMCondCode cond, HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag               = ARMin_VCMovS;
+   i->ARMin.VCMovS.cond = cond;
+   i->ARMin.VCMovS.dst  = dst;
+   i->ARMin.VCMovS.src  = src;
+   vassert(cond != ARMcc_AL);
+   return i;
+}
+ARMInstr* ARMInstr_VCvtSD ( Bool sToD, HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag               = ARMin_VCvtSD;
+   i->ARMin.VCvtSD.sToD = sToD;
+   i->ARMin.VCvtSD.dst  = dst;
+   i->ARMin.VCvtSD.src  = src;
+   return i;
+}
+ARMInstr* ARMInstr_VXferD ( Bool toD, HReg dD, HReg rHi, HReg rLo ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_VXferD;
+   i->ARMin.VXferD.toD = toD;
+   i->ARMin.VXferD.dD  = dD;
+   i->ARMin.VXferD.rHi = rHi;
+   i->ARMin.VXferD.rLo = rLo;
+   return i;
+}
+ARMInstr* ARMInstr_VXferS ( Bool toS, HReg fD, HReg rLo ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_VXferS;
+   i->ARMin.VXferS.toS = toS;
+   i->ARMin.VXferS.fD  = fD;
+   i->ARMin.VXferS.rLo = rLo;
+   return i;
+}
+ARMInstr* ARMInstr_VCvtID ( Bool iToD, Bool syned,
+                            HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                = ARMin_VCvtID;
+   i->ARMin.VCvtID.iToD  = iToD;
+   i->ARMin.VCvtID.syned = syned;
+   i->ARMin.VCvtID.dst   = dst;
+   i->ARMin.VCvtID.src   = src;
+   return i;
+}
+ARMInstr* ARMInstr_FPSCR ( Bool toFPSCR, HReg iReg ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                 = ARMin_FPSCR;
+   i->ARMin.FPSCR.toFPSCR = toFPSCR;
+   i->ARMin.FPSCR.iReg    = iReg;
+   return i;
+}
+ARMInstr* ARMInstr_MFence ( void ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag      = ARMin_MFence;
+   return i;
+}
+ARMInstr* ARMInstr_CLREX( void ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag      = ARMin_CLREX;
+   return i;
+}
+
+ARMInstr* ARMInstr_NLdStQ ( Bool isLoad, HReg dQ, ARMAModeN *amode ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                  = ARMin_NLdStQ;
+   i->ARMin.NLdStQ.isLoad  = isLoad;
+   i->ARMin.NLdStQ.dQ      = dQ;
+   i->ARMin.NLdStQ.amode   = amode;
+   return i;
+}
+
+ARMInstr* ARMInstr_NLdStD ( Bool isLoad, HReg dD, ARMAModeN *amode ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                  = ARMin_NLdStD;
+   i->ARMin.NLdStD.isLoad  = isLoad;
+   i->ARMin.NLdStD.dD      = dD;
+   i->ARMin.NLdStD.amode   = amode;
+   return i;
+}
+
+ARMInstr* ARMInstr_NUnary ( ARMNeonUnOp op, HReg dQ, HReg nQ,
+                            UInt size, Bool Q ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                = ARMin_NUnary;
+   i->ARMin.NUnary.op   = op;
+   i->ARMin.NUnary.src  = nQ;
+   i->ARMin.NUnary.dst  = dQ;
+   i->ARMin.NUnary.size = size;
+   i->ARMin.NUnary.Q    = Q;
+   return i;
+}
+
+ARMInstr* ARMInstr_NUnaryS ( ARMNeonUnOpS op, ARMNRS* dst, ARMNRS* src,
+                             UInt size, Bool Q ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                = ARMin_NUnaryS;
+   i->ARMin.NUnaryS.op   = op;
+   i->ARMin.NUnaryS.src  = src;
+   i->ARMin.NUnaryS.dst  = dst;
+   i->ARMin.NUnaryS.size = size;
+   i->ARMin.NUnaryS.Q    = Q;
+   return i;
+}
+
+ARMInstr* ARMInstr_NDual ( ARMNeonDualOp op, HReg nQ, HReg mQ,
+                           UInt size, Bool Q ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                = ARMin_NDual;
+   i->ARMin.NDual.op   = op;
+   i->ARMin.NDual.arg1 = nQ;
+   i->ARMin.NDual.arg2 = mQ;
+   i->ARMin.NDual.size = size;
+   i->ARMin.NDual.Q    = Q;
+   return i;
+}
+
+ARMInstr* ARMInstr_NBinary ( ARMNeonBinOp op,
+                             HReg dst, HReg argL, HReg argR,
+                             UInt size, Bool Q ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                = ARMin_NBinary;
+   i->ARMin.NBinary.op   = op;
+   i->ARMin.NBinary.argL = argL;
+   i->ARMin.NBinary.argR = argR;
+   i->ARMin.NBinary.dst  = dst;
+   i->ARMin.NBinary.size = size;
+   i->ARMin.NBinary.Q    = Q;
+   return i;
+}
+
+ARMInstr* ARMInstr_NeonImm (HReg dst, ARMNImm* imm ) {
+   ARMInstr *i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag         = ARMin_NeonImm;
+   i->ARMin.NeonImm.dst = dst;
+   i->ARMin.NeonImm.imm = imm;
+   return i;
+}
+
+ARMInstr* ARMInstr_NCMovQ ( ARMCondCode cond, HReg dst, HReg src ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag               = ARMin_NCMovQ;
+   i->ARMin.NCMovQ.cond = cond;
+   i->ARMin.NCMovQ.dst  = dst;
+   i->ARMin.NCMovQ.src  = src;
+   vassert(cond != ARMcc_AL);
+   return i;
+}
+
+ARMInstr* ARMInstr_NShift ( ARMNeonShiftOp op,
+                            HReg dst, HReg argL, HReg argR,
+                            UInt size, Bool Q ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                = ARMin_NShift;
+   i->ARMin.NShift.op   = op;
+   i->ARMin.NShift.argL = argL;
+   i->ARMin.NShift.argR = argR;
+   i->ARMin.NShift.dst  = dst;
+   i->ARMin.NShift.size = size;
+   i->ARMin.NShift.Q    = Q;
+   return i;
+}
+
+ARMInstr* ARMInstr_NShl64 ( HReg dst, HReg src, UInt amt )
+{
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag              = ARMin_NShl64;
+   i->ARMin.NShl64.dst = dst;
+   i->ARMin.NShl64.src = src;
+   i->ARMin.NShl64.amt = amt;
+   vassert(amt >= 1 && amt <= 63);
+   return i;
+}
+
+/* Helper copy-pasted from isel.c */
+static Bool fitsIn8x4 ( UInt* u8, UInt* u4, UInt u )
+{
+   UInt i;
+   for (i = 0; i < 16; i++) {
+      if (0 == (u & 0xFFFFFF00)) {
+         *u8 = u;
+         *u4 = i;
+         return True;
+      }
+      u = ROR32(u, 30);
+   }
+   vassert(i == 16);
+   return False;
+}
+
+ARMInstr* ARMInstr_Add32 ( HReg rD, HReg rN, UInt imm32 ) {
+   UInt u8, u4;
+   ARMInstr *i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   /* Try to generate single ADD if possible */
+   if (fitsIn8x4(&u8, &u4, imm32)) {
+      i->tag            = ARMin_Alu;
+      i->ARMin.Alu.op   = ARMalu_ADD;
+      i->ARMin.Alu.dst  = rD;
+      i->ARMin.Alu.argL = rN;
+      i->ARMin.Alu.argR = ARMRI84_I84(u8, u4);
+   } else {
+      i->tag               = ARMin_Add32;
+      i->ARMin.Add32.rD    = rD;
+      i->ARMin.Add32.rN    = rN;
+      i->ARMin.Add32.imm32 = imm32;
+   }
+   return i;
+}
+
+ARMInstr* ARMInstr_EvCheck ( ARMAMode1* amCounter,
+                             ARMAMode1* amFailAddr ) {
+   ARMInstr* i                 = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag                      = ARMin_EvCheck;
+   i->ARMin.EvCheck.amCounter  = amCounter;
+   i->ARMin.EvCheck.amFailAddr = amFailAddr;
+   return i;
+}
+
+ARMInstr* ARMInstr_ProfInc ( void ) {
+   ARMInstr* i = LibVEX_Alloc_inline(sizeof(ARMInstr));
+   i->tag      = ARMin_ProfInc;
+   return i;
+}
+
+/* ... */
+
+void ppARMInstr ( const ARMInstr* i ) {
+   switch (i->tag) {
+      case ARMin_Alu:
+         vex_printf("%-4s  ", showARMAluOp(i->ARMin.Alu.op));
+         ppHRegARM(i->ARMin.Alu.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.Alu.argL);
+         vex_printf(", ");
+         ppARMRI84(i->ARMin.Alu.argR);
+         return;
+      case ARMin_Shift:
+         vex_printf("%s   ", showARMShiftOp(i->ARMin.Shift.op));
+         ppHRegARM(i->ARMin.Shift.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.Shift.argL);
+         vex_printf(", ");
+         ppARMRI5(i->ARMin.Shift.argR);
+         return;
+      case ARMin_Unary:
+         vex_printf("%s   ", showARMUnaryOp(i->ARMin.Unary.op));
+         ppHRegARM(i->ARMin.Unary.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.Unary.src);
+         return;
+      case ARMin_CmpOrTst:
+         vex_printf("%s   ", i->ARMin.CmpOrTst.isCmp ? "cmp" : "tst");
+         ppHRegARM(i->ARMin.CmpOrTst.argL);
+         vex_printf(", ");
+         ppARMRI84(i->ARMin.CmpOrTst.argR);
+         return;
+      case ARMin_Mov:
+         vex_printf("mov   ");
+         ppHRegARM(i->ARMin.Mov.dst);
+         vex_printf(", ");
+         ppARMRI84(i->ARMin.Mov.src);
+         return;
+      case ARMin_Imm32:
+         vex_printf("imm   ");
+         ppHRegARM(i->ARMin.Imm32.dst);
+         vex_printf(", 0x%x", i->ARMin.Imm32.imm32);
+         return;
+      case ARMin_LdSt32:
+         if (i->ARMin.LdSt32.isLoad) {
+            vex_printf("ldr%s ", i->ARMin.LdSt32.cc == ARMcc_AL ? "  "
+                                    : showARMCondCode(i->ARMin.LdSt32.cc));
+            ppHRegARM(i->ARMin.LdSt32.rD);
+            vex_printf(", ");
+            ppARMAMode1(i->ARMin.LdSt32.amode);
+         } else {
+            vex_printf("str%s ", i->ARMin.LdSt32.cc == ARMcc_AL ? "  "
+                                    : showARMCondCode(i->ARMin.LdSt32.cc));
+            ppARMAMode1(i->ARMin.LdSt32.amode);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.LdSt32.rD);
+         }
+         return;
+      case ARMin_LdSt16:
+         if (i->ARMin.LdSt16.isLoad) {
+            vex_printf("%s%s%s",
+                       i->ARMin.LdSt16.signedLoad ? "ldrsh" : "ldrh",
+                       i->ARMin.LdSt16.cc == ARMcc_AL ? "  " 
+                          : showARMCondCode(i->ARMin.LdSt16.cc),
+                       i->ARMin.LdSt16.signedLoad ? " " : "  ");
+            ppHRegARM(i->ARMin.LdSt16.rD);
+            vex_printf(", ");
+            ppARMAMode2(i->ARMin.LdSt16.amode);
+         } else {
+            vex_printf("strh%s  ",
+                       i->ARMin.LdSt16.cc == ARMcc_AL ? "  " 
+                          : showARMCondCode(i->ARMin.LdSt16.cc));
+            ppARMAMode2(i->ARMin.LdSt16.amode);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.LdSt16.rD);
+         }
+         return;
+      case ARMin_LdSt8U:
+         if (i->ARMin.LdSt8U.isLoad) {
+            vex_printf("ldrb%s  ", i->ARMin.LdSt8U.cc == ARMcc_AL ? "  "
+                                      : showARMCondCode(i->ARMin.LdSt8U.cc));
+            ppHRegARM(i->ARMin.LdSt8U.rD);
+            vex_printf(", ");
+            ppARMAMode1(i->ARMin.LdSt8U.amode);
+         } else {
+            vex_printf("strb%s  ", i->ARMin.LdSt8U.cc == ARMcc_AL ? "  "
+                                      : showARMCondCode(i->ARMin.LdSt8U.cc));
+            ppARMAMode1(i->ARMin.LdSt8U.amode);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.LdSt8U.rD);
+         }
+         return;
+      case ARMin_Ld8S:
+         vex_printf("ldrsb%s ", i->ARMin.Ld8S.cc == ARMcc_AL ? "  "
+                                   : showARMCondCode(i->ARMin.Ld8S.cc));
+         ppARMAMode2(i->ARMin.Ld8S.amode);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.Ld8S.rD);
+         return;
+      case ARMin_XDirect:
+         vex_printf("(xDirect) ");
+         vex_printf("if (%%cpsr.%s) { ",
+                    showARMCondCode(i->ARMin.XDirect.cond));
+         vex_printf("movw r12,0x%x; ",
+                    (UInt)(i->ARMin.XDirect.dstGA & 0xFFFF));
+         vex_printf("movt r12,0x%x; ",
+                    (UInt)((i->ARMin.XDirect.dstGA >> 16) & 0xFFFF));
+         vex_printf("str r12,");
+         ppARMAMode1(i->ARMin.XDirect.amR15T);
+         vex_printf("; movw r12,LO16($disp_cp_chain_me_to_%sEP); ",
+                    i->ARMin.XDirect.toFastEP ? "fast" : "slow");
+         vex_printf("movt r12,HI16($disp_cp_chain_me_to_%sEP); ",
+                    i->ARMin.XDirect.toFastEP ? "fast" : "slow");
+         vex_printf("blx r12 }");
+         return;
+      case ARMin_XIndir:
+         vex_printf("(xIndir) ");
+         vex_printf("if (%%cpsr.%s) { ",
+                    showARMCondCode(i->ARMin.XIndir.cond));
+         vex_printf("str ");
+         ppHRegARM(i->ARMin.XIndir.dstGA);
+         vex_printf(",");
+         ppARMAMode1(i->ARMin.XIndir.amR15T);
+         vex_printf("; movw r12,LO16($disp_cp_xindir); ");
+         vex_printf("movt r12,HI16($disp_cp_xindir); ");
+         vex_printf("blx r12 }");
+         return;
+      case ARMin_XAssisted:
+         vex_printf("(xAssisted) ");
+         vex_printf("if (%%cpsr.%s) { ",
+                    showARMCondCode(i->ARMin.XAssisted.cond));
+         vex_printf("str ");
+         ppHRegARM(i->ARMin.XAssisted.dstGA);
+         vex_printf(",");
+         ppARMAMode1(i->ARMin.XAssisted.amR15T);
+         vex_printf("movw r8,$IRJumpKind_to_TRCVAL(%d); ",
+                    (Int)i->ARMin.XAssisted.jk);
+         vex_printf("movw r12,LO16($disp_cp_xassisted); ");
+         vex_printf("movt r12,HI16($disp_cp_xassisted); ");
+         vex_printf("blx r12 }");
+         return;
+      case ARMin_CMov:
+         vex_printf("mov%s ", showARMCondCode(i->ARMin.CMov.cond));
+         ppHRegARM(i->ARMin.CMov.dst);
+         vex_printf(", ");
+         ppARMRI84(i->ARMin.CMov.src);
+         return;
+      case ARMin_Call:
+         vex_printf("call%s  ",
+                    i->ARMin.Call.cond==ARMcc_AL
+                       ? "" : showARMCondCode(i->ARMin.Call.cond));
+         vex_printf("0x%x [nArgRegs=%d, ",
+                    i->ARMin.Call.target, i->ARMin.Call.nArgRegs);
+         ppRetLoc(i->ARMin.Call.rloc);
+         vex_printf("]");
+         return;
+      case ARMin_Mul:
+         vex_printf("%-5s ", showARMMulOp(i->ARMin.Mul.op));
+         if (i->ARMin.Mul.op == ARMmul_PLAIN) {
+            vex_printf("r0, r2, r3");
+         } else {
+            vex_printf("r1:r0, r2, r3");
+         }
+         return;
+      case ARMin_LdrEX: {
+         const HChar* sz = "";
+         switch (i->ARMin.LdrEX.szB) {
+            case 1: sz = "b"; break; case 2: sz = "h"; break;
+            case 8: sz = "d"; break; case 4: break;
+            default: vassert(0);
+         }      
+         vex_printf("ldrex%s %sr2, [r4]",
+                    sz, i->ARMin.LdrEX.szB == 8 ? "r3:" : "");
+         return;
+      }
+      case ARMin_StrEX: {
+         const HChar* sz = "";
+         switch (i->ARMin.StrEX.szB) {
+            case 1: sz = "b"; break; case 2: sz = "h"; break;
+            case 8: sz = "d"; break; case 4: break;
+            default: vassert(0);
+         }      
+         vex_printf("strex%s r0, %sr2, [r4]",
+                    sz, i->ARMin.StrEX.szB == 8 ? "r3:" : "");
+         return;
+      }
+      case ARMin_VLdStD:
+         if (i->ARMin.VLdStD.isLoad) {
+            vex_printf("fldd  ");
+            ppHRegARM(i->ARMin.VLdStD.dD);
+            vex_printf(", ");
+            ppARMAModeV(i->ARMin.VLdStD.amode);
+         } else {
+            vex_printf("fstd  ");
+            ppARMAModeV(i->ARMin.VLdStD.amode);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VLdStD.dD);
+         }
+         return;
+      case ARMin_VLdStS:
+         if (i->ARMin.VLdStS.isLoad) {
+            vex_printf("flds  ");
+            ppHRegARM(i->ARMin.VLdStS.fD);
+            vex_printf(", ");
+            ppARMAModeV(i->ARMin.VLdStS.amode);
+         } else {
+            vex_printf("fsts  ");
+            ppARMAModeV(i->ARMin.VLdStS.amode);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VLdStS.fD);
+         }
+         return;
+      case ARMin_VAluD:
+         vex_printf("f%-3sd ", showARMVfpOp(i->ARMin.VAluD.op));
+         ppHRegARM(i->ARMin.VAluD.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VAluD.argL);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VAluD.argR);
+         return;
+      case ARMin_VAluS:
+         vex_printf("f%-3ss ", showARMVfpOp(i->ARMin.VAluS.op));
+         ppHRegARM(i->ARMin.VAluS.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VAluS.argL);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VAluS.argR);
+         return;
+      case ARMin_VUnaryD:
+         vex_printf("f%-3sd ", showARMVfpUnaryOp(i->ARMin.VUnaryD.op));
+         ppHRegARM(i->ARMin.VUnaryD.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VUnaryD.src);
+         return;
+      case ARMin_VUnaryS:
+         vex_printf("f%-3ss ", showARMVfpUnaryOp(i->ARMin.VUnaryS.op));
+         ppHRegARM(i->ARMin.VUnaryS.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VUnaryS.src);
+         return;
+      case ARMin_VCmpD:
+         vex_printf("fcmpd ");
+         ppHRegARM(i->ARMin.VCmpD.argL);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VCmpD.argR);
+         vex_printf(" ; fmstat");
+         return;
+      case ARMin_VCMovD:
+         vex_printf("fcpyd%s ", showARMCondCode(i->ARMin.VCMovD.cond));
+         ppHRegARM(i->ARMin.VCMovD.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VCMovD.src);
+         return;
+      case ARMin_VCMovS:
+         vex_printf("fcpys%s ", showARMCondCode(i->ARMin.VCMovS.cond));
+         ppHRegARM(i->ARMin.VCMovS.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VCMovS.src);
+         return;
+      case ARMin_VCvtSD:
+         vex_printf("fcvt%s ", i->ARMin.VCvtSD.sToD ? "ds" : "sd");
+         ppHRegARM(i->ARMin.VCvtSD.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VCvtSD.src);
+         return;
+      case ARMin_VXferD:
+         vex_printf("vmov  ");
+         if (i->ARMin.VXferD.toD) {
+            ppHRegARM(i->ARMin.VXferD.dD);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VXferD.rLo);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VXferD.rHi);
+         } else {
+            ppHRegARM(i->ARMin.VXferD.rLo);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VXferD.rHi);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VXferD.dD);
+         }
+         return;
+      case ARMin_VXferS:
+         vex_printf("vmov  ");
+         if (i->ARMin.VXferS.toS) {
+            ppHRegARM(i->ARMin.VXferS.fD);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VXferS.rLo);
+         } else {
+            ppHRegARM(i->ARMin.VXferS.rLo);
+            vex_printf(", ");
+            ppHRegARM(i->ARMin.VXferS.fD);
+         }
+         return;
+      case ARMin_VCvtID: {
+         const HChar* nm = "?";
+         if (i->ARMin.VCvtID.iToD) {
+            nm = i->ARMin.VCvtID.syned ? "fsitod" : "fuitod";
+         } else {
+            nm = i->ARMin.VCvtID.syned ? "ftosid" : "ftouid";
+         }
+         vex_printf("%s ", nm);
+         ppHRegARM(i->ARMin.VCvtID.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.VCvtID.src);
+         return;
+      }
+      case ARMin_FPSCR:
+         if (i->ARMin.FPSCR.toFPSCR) {
+            vex_printf("fmxr  fpscr, ");
+            ppHRegARM(i->ARMin.FPSCR.iReg);
+         } else {
+            vex_printf("fmrx  ");
+            ppHRegARM(i->ARMin.FPSCR.iReg);
+            vex_printf(", fpscr");
+         }
+         return;
+      case ARMin_MFence:
+         vex_printf("(mfence) dsb sy; dmb sy; isb");
+         return;
+      case ARMin_CLREX:
+         vex_printf("clrex");
+         return;
+      case ARMin_NLdStQ:
+         if (i->ARMin.NLdStQ.isLoad)
+            vex_printf("vld1.32 {");
+         else
+            vex_printf("vst1.32 {");
+         ppHRegARM(i->ARMin.NLdStQ.dQ);
+         vex_printf("} ");
+         ppARMAModeN(i->ARMin.NLdStQ.amode);
+         return;
+      case ARMin_NLdStD:
+         if (i->ARMin.NLdStD.isLoad)
+            vex_printf("vld1.32 {");
+         else
+            vex_printf("vst1.32 {");
+         ppHRegARM(i->ARMin.NLdStD.dD);
+         vex_printf("} ");
+         ppARMAModeN(i->ARMin.NLdStD.amode);
+         return;
+      case ARMin_NUnary:
+         vex_printf("%s%s%s  ",
+                    showARMNeonUnOp(i->ARMin.NUnary.op),
+                    showARMNeonUnOpDataType(i->ARMin.NUnary.op),
+                    showARMNeonDataSize(i));
+         ppHRegARM(i->ARMin.NUnary.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NUnary.src);
+         if (i->ARMin.NUnary.op == ARMneon_EQZ)
+            vex_printf(", #0");
+         if (i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedS ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFtoFixedU ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFixedStoF ||
+             i->ARMin.NUnary.op == ARMneon_VCVTFixedUtoF) {
+            vex_printf(", #%d", i->ARMin.NUnary.size);
+         }
+         if (i->ARMin.NUnary.op == ARMneon_VQSHLNSS ||
+             i->ARMin.NUnary.op == ARMneon_VQSHLNUU ||
+             i->ARMin.NUnary.op == ARMneon_VQSHLNUS) {
+            UInt size;
+            size = i->ARMin.NUnary.size;
+            if (size & 0x40) {
+               vex_printf(", #%d", size - 64);
+            } else if (size & 0x20) {
+               vex_printf(", #%d", size - 32);
+            } else if (size & 0x10) {
+               vex_printf(", #%d", size - 16);
+            } else if (size & 0x08) {
+               vex_printf(", #%d", size - 8);
+            }
+         }
+         return;
+      case ARMin_NUnaryS:
+         vex_printf("%s%s%s  ",
+                    showARMNeonUnOpS(i->ARMin.NUnaryS.op),
+                    showARMNeonUnOpSDataType(i->ARMin.NUnaryS.op),
+                    showARMNeonDataSize(i));
+         ppARMNRS(i->ARMin.NUnaryS.dst);
+         vex_printf(", ");
+         ppARMNRS(i->ARMin.NUnaryS.src);
+         return;
+      case ARMin_NShift:
+         vex_printf("%s%s%s  ",
+                    showARMNeonShiftOp(i->ARMin.NShift.op),
+                    showARMNeonShiftOpDataType(i->ARMin.NShift.op),
+                    showARMNeonDataSize(i));
+         ppHRegARM(i->ARMin.NShift.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NShift.argL);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NShift.argR);
+         return;
+      case ARMin_NShl64:
+         vex_printf("vshl.i64 ");
+         ppHRegARM(i->ARMin.NShl64.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NShl64.src);
+         vex_printf(", #%u", i->ARMin.NShl64.amt);
+         return;
+      case ARMin_NDual:
+         vex_printf("%s%s%s  ",
+                    showARMNeonDualOp(i->ARMin.NDual.op),
+                    showARMNeonDualOpDataType(i->ARMin.NDual.op),
+                    showARMNeonDataSize(i));
+         ppHRegARM(i->ARMin.NDual.arg1);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NDual.arg2);
+         return;
+      case ARMin_NBinary:
+         vex_printf("%s%s%s",
+                    showARMNeonBinOp(i->ARMin.NBinary.op),
+                    showARMNeonBinOpDataType(i->ARMin.NBinary.op),
+                    showARMNeonDataSize(i));
+         vex_printf("  ");
+         ppHRegARM(i->ARMin.NBinary.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NBinary.argL);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NBinary.argR);
+         return;
+      case ARMin_NeonImm:
+         vex_printf("vmov  ");
+         ppHRegARM(i->ARMin.NeonImm.dst);
+         vex_printf(", ");
+         ppARMNImm(i->ARMin.NeonImm.imm);
+         return;
+      case ARMin_NCMovQ:
+         vex_printf("vmov%s ", showARMCondCode(i->ARMin.NCMovQ.cond));
+         ppHRegARM(i->ARMin.NCMovQ.dst);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.NCMovQ.src);
+         return;
+      case ARMin_Add32:
+         vex_printf("add32 ");
+         ppHRegARM(i->ARMin.Add32.rD);
+         vex_printf(", ");
+         ppHRegARM(i->ARMin.Add32.rN);
+         vex_printf(", ");
+         vex_printf("%d", i->ARMin.Add32.imm32);
+         return;
+      case ARMin_EvCheck:
+         vex_printf("(evCheck) ldr r12,");
+         ppARMAMode1(i->ARMin.EvCheck.amCounter);
+         vex_printf("; subs r12,r12,$1; str r12,");
+         ppARMAMode1(i->ARMin.EvCheck.amCounter);
+         vex_printf("; bpl nofail; ldr r12,");
+         ppARMAMode1(i->ARMin.EvCheck.amFailAddr);
+         vex_printf("; bx r12; nofail:");
+         return;
+      case ARMin_ProfInc:
+         vex_printf("(profInc) movw r12,LO16($NotKnownYet); "
+                    "movw r12,HI16($NotKnownYet); "
+                    "ldr r11,[r12]; "
+                    "adds r11,r11,$1; "
+                    "str r11,[r12]; "
+                    "ldr r11,[r12+4]; "
+                    "adc r11,r11,$0; "
+                    "str r11,[r12+4]");
+         return;
+      default:
+         vex_printf("ppARMInstr: unhandled case (tag %d)", (Int)i->tag);
+         vpanic("ppARMInstr(1)");
+         return;
+   }
+}
+
+
+/* --------- Helpers for register allocation. --------- */
+
+void getRegUsage_ARMInstr ( HRegUsage* u, const ARMInstr* i, Bool mode64 )
+{
+   vassert(mode64 == False);
+   initHRegUsage(u);
+   switch (i->tag) {
+      case ARMin_Alu:
+         addHRegUse(u, HRmWrite, i->ARMin.Alu.dst);
+         addHRegUse(u, HRmRead, i->ARMin.Alu.argL);
+         addRegUsage_ARMRI84(u, i->ARMin.Alu.argR);
+         return;
+      case ARMin_Shift:
+         addHRegUse(u, HRmWrite, i->ARMin.Shift.dst);
+         addHRegUse(u, HRmRead, i->ARMin.Shift.argL);
+         addRegUsage_ARMRI5(u, i->ARMin.Shift.argR);
+         return;
+      case ARMin_Unary:
+         addHRegUse(u, HRmWrite, i->ARMin.Unary.dst);
+         addHRegUse(u, HRmRead, i->ARMin.Unary.src);
+         return;
+      case ARMin_CmpOrTst:
+         addHRegUse(u, HRmRead, i->ARMin.CmpOrTst.argL);
+         addRegUsage_ARMRI84(u, i->ARMin.CmpOrTst.argR);
+         return;
+      case ARMin_Mov:
+         addHRegUse(u, HRmWrite, i->ARMin.Mov.dst);
+         addRegUsage_ARMRI84(u, i->ARMin.Mov.src);
+         return;
+      case ARMin_Imm32:
+         addHRegUse(u, HRmWrite, i->ARMin.Imm32.dst);
+         return;
+      case ARMin_LdSt32:
+         addRegUsage_ARMAMode1(u, i->ARMin.LdSt32.amode);
+         if (i->ARMin.LdSt32.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARMin.LdSt32.rD);
+            if (i->ARMin.LdSt32.cc != ARMcc_AL)
+               addHRegUse(u, HRmRead, i->ARMin.LdSt32.rD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARMin.LdSt32.rD);
+         }
+         return;
+      case ARMin_LdSt16:
+         addRegUsage_ARMAMode2(u, i->ARMin.LdSt16.amode);
+         if (i->ARMin.LdSt16.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARMin.LdSt16.rD);
+            if (i->ARMin.LdSt16.cc != ARMcc_AL)
+               addHRegUse(u, HRmRead, i->ARMin.LdSt16.rD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARMin.LdSt16.rD);
+         }
+         return;
+      case ARMin_LdSt8U:
+         addRegUsage_ARMAMode1(u, i->ARMin.LdSt8U.amode);
+         if (i->ARMin.LdSt8U.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARMin.LdSt8U.rD);
+            if (i->ARMin.LdSt8U.cc != ARMcc_AL)
+               addHRegUse(u, HRmRead, i->ARMin.LdSt8U.rD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARMin.LdSt8U.rD);
+         }
+         return;
+      case ARMin_Ld8S:
+         addRegUsage_ARMAMode2(u, i->ARMin.Ld8S.amode);
+         addHRegUse(u, HRmWrite, i->ARMin.Ld8S.rD);
+         if (i->ARMin.Ld8S.cc != ARMcc_AL)
+            addHRegUse(u, HRmRead, i->ARMin.Ld8S.rD);
+         return;
+      /* XDirect/XIndir/XAssisted are also a bit subtle.  They
+         conditionally exit the block.  Hence we only need to list (1)
+         the registers that they read, and (2) the registers that they
+         write in the case where the block is not exited.  (2) is
+         empty, hence only (1) is relevant here. */
+      case ARMin_XDirect:
+         addRegUsage_ARMAMode1(u, i->ARMin.XDirect.amR15T);
+         return;
+      case ARMin_XIndir:
+         addHRegUse(u, HRmRead, i->ARMin.XIndir.dstGA);
+         addRegUsage_ARMAMode1(u, i->ARMin.XIndir.amR15T);
+         return;
+      case ARMin_XAssisted:
+         addHRegUse(u, HRmRead, i->ARMin.XAssisted.dstGA);
+         addRegUsage_ARMAMode1(u, i->ARMin.XAssisted.amR15T);
+         return;
+      case ARMin_CMov:
+         addHRegUse(u, HRmWrite, i->ARMin.CMov.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.CMov.dst);
+         addRegUsage_ARMRI84(u, i->ARMin.CMov.src);
+         return;
+      case ARMin_Call:
+         /* logic and comments copied/modified from x86 back end */
+         /* This is a bit subtle. */
+         /* First off, claim it trashes all the caller-saved regs
+            which fall within the register allocator's jurisdiction.
+            These I believe to be r0,1,2,3.  If it turns out that r9
+            is also caller-saved, then we'll have to add that here
+            too. */
+         addHRegUse(u, HRmWrite, hregARM_R0());
+         addHRegUse(u, HRmWrite, hregARM_R1());
+         addHRegUse(u, HRmWrite, hregARM_R2());
+         addHRegUse(u, HRmWrite, hregARM_R3());
+         /* Now we have to state any parameter-carrying registers
+            which might be read.  This depends on nArgRegs. */
+         switch (i->ARMin.Call.nArgRegs) {
+            case 4: addHRegUse(u, HRmRead, hregARM_R3()); /*fallthru*/
+            case 3: addHRegUse(u, HRmRead, hregARM_R2()); /*fallthru*/
+            case 2: addHRegUse(u, HRmRead, hregARM_R1()); /*fallthru*/
+            case 1: addHRegUse(u, HRmRead, hregARM_R0()); break;
+            case 0: break;
+            default: vpanic("getRegUsage_ARM:Call:regparms");
+         }
+         /* Finally, there is the issue that the insn trashes a
+            register because the literal target address has to be
+            loaded into a register.  Fortunately, for the nArgRegs=
+            0/1/2/3 case, we can use r0, r1, r2 or r3 respectively, so
+            this does not cause any further damage.  For the
+            nArgRegs=4 case, we'll have to choose another register
+            arbitrarily since all the caller saved regs are used for
+            parameters, and so we might as well choose r11.
+            */
+         if (i->ARMin.Call.nArgRegs == 4)
+            addHRegUse(u, HRmWrite, hregARM_R11());
+         /* Upshot of this is that the assembler really must observe
+            the here-stated convention of which register to use as an
+            address temporary, depending on nArgRegs: 0==r0,
+            1==r1, 2==r2, 3==r3, 4==r11 */
+         return;
+      case ARMin_Mul:
+         addHRegUse(u, HRmRead, hregARM_R2());
+         addHRegUse(u, HRmRead, hregARM_R3());
+         addHRegUse(u, HRmWrite, hregARM_R0());
+         if (i->ARMin.Mul.op != ARMmul_PLAIN)
+            addHRegUse(u, HRmWrite, hregARM_R1());
+         return;
+      case ARMin_LdrEX:
+         addHRegUse(u, HRmRead, hregARM_R4());
+         addHRegUse(u, HRmWrite, hregARM_R2());
+         if (i->ARMin.LdrEX.szB == 8)
+            addHRegUse(u, HRmWrite, hregARM_R3());
+         return;
+      case ARMin_StrEX:
+         addHRegUse(u, HRmRead, hregARM_R4());
+         addHRegUse(u, HRmWrite, hregARM_R0());
+         addHRegUse(u, HRmRead, hregARM_R2());
+         if (i->ARMin.StrEX.szB == 8)
+            addHRegUse(u, HRmRead, hregARM_R3());
+         return;
+      case ARMin_VLdStD:
+         addRegUsage_ARMAModeV(u, i->ARMin.VLdStD.amode);
+         if (i->ARMin.VLdStD.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARMin.VLdStD.dD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARMin.VLdStD.dD);
+         }
+         return;
+      case ARMin_VLdStS:
+         addRegUsage_ARMAModeV(u, i->ARMin.VLdStS.amode);
+         if (i->ARMin.VLdStS.isLoad) {
+            addHRegUse(u, HRmWrite, i->ARMin.VLdStS.fD);
+         } else {
+            addHRegUse(u, HRmRead, i->ARMin.VLdStS.fD);
+         }
+         return;
+      case ARMin_VAluD:
+         addHRegUse(u, HRmWrite, i->ARMin.VAluD.dst);
+         addHRegUse(u, HRmRead, i->ARMin.VAluD.argL);
+         addHRegUse(u, HRmRead, i->ARMin.VAluD.argR);
+         return;
+      case ARMin_VAluS:
+         addHRegUse(u, HRmWrite, i->ARMin.VAluS.dst);
+         addHRegUse(u, HRmRead, i->ARMin.VAluS.argL);
+         addHRegUse(u, HRmRead, i->ARMin.VAluS.argR);
+         return;
+      case ARMin_VUnaryD:
+         addHRegUse(u, HRmWrite, i->ARMin.VUnaryD.dst);
+         addHRegUse(u, HRmRead, i->ARMin.VUnaryD.src);
+         return;
+      case ARMin_VUnaryS:
+         addHRegUse(u, HRmWrite, i->ARMin.VUnaryS.dst);
+         addHRegUse(u, HRmRead, i->ARMin.VUnaryS.src);
+         return;
+      case ARMin_VCmpD:
+         addHRegUse(u, HRmRead, i->ARMin.VCmpD.argL);
+         addHRegUse(u, HRmRead, i->ARMin.VCmpD.argR);
+         return;
+      case ARMin_VCMovD:
+         addHRegUse(u, HRmWrite, i->ARMin.VCMovD.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.VCMovD.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.VCMovD.src);
+         return;
+      case ARMin_VCMovS:
+         addHRegUse(u, HRmWrite, i->ARMin.VCMovS.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.VCMovS.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.VCMovS.src);
+         return;
+      case ARMin_VCvtSD:
+         addHRegUse(u, HRmWrite, i->ARMin.VCvtSD.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.VCvtSD.src);
+         return;
+      case ARMin_VXferD:
+         if (i->ARMin.VXferD.toD) {
+            addHRegUse(u, HRmWrite, i->ARMin.VXferD.dD);
+            addHRegUse(u, HRmRead,  i->ARMin.VXferD.rHi);
+            addHRegUse(u, HRmRead,  i->ARMin.VXferD.rLo);
+         } else {
+            addHRegUse(u, HRmRead,  i->ARMin.VXferD.dD);
+            addHRegUse(u, HRmWrite, i->ARMin.VXferD.rHi);
+            addHRegUse(u, HRmWrite, i->ARMin.VXferD.rLo);
+         }
+         return;
+      case ARMin_VXferS:
+         if (i->ARMin.VXferS.toS) {
+            addHRegUse(u, HRmWrite, i->ARMin.VXferS.fD);
+            addHRegUse(u, HRmRead,  i->ARMin.VXferS.rLo);
+         } else {
+            addHRegUse(u, HRmRead,  i->ARMin.VXferS.fD);
+            addHRegUse(u, HRmWrite, i->ARMin.VXferS.rLo);
+         }
+         return;
+      case ARMin_VCvtID:
+         addHRegUse(u, HRmWrite, i->ARMin.VCvtID.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.VCvtID.src);
+         return;
+      case ARMin_FPSCR:
+         if (i->ARMin.FPSCR.toFPSCR)
+            addHRegUse(u, HRmRead, i->ARMin.FPSCR.iReg);
+         else
+            addHRegUse(u, HRmWrite, i->ARMin.FPSCR.iReg);
+         return;
+      case ARMin_MFence:
+         return;
+      case ARMin_CLREX:
+         return;
+      case ARMin_NLdStQ:
+         if (i->ARMin.NLdStQ.isLoad)
+            addHRegUse(u, HRmWrite, i->ARMin.NLdStQ.dQ);
+         else
+            addHRegUse(u, HRmRead, i->ARMin.NLdStQ.dQ);
+         addRegUsage_ARMAModeN(u, i->ARMin.NLdStQ.amode);
+         return;
+      case ARMin_NLdStD:
+         if (i->ARMin.NLdStD.isLoad)
+            addHRegUse(u, HRmWrite, i->ARMin.NLdStD.dD);
+         else
+            addHRegUse(u, HRmRead, i->ARMin.NLdStD.dD);
+         addRegUsage_ARMAModeN(u, i->ARMin.NLdStD.amode);
+         return;
+      case ARMin_NUnary:
+         addHRegUse(u, HRmWrite, i->ARMin.NUnary.dst);
+         addHRegUse(u, HRmRead, i->ARMin.NUnary.src);
+         return;
+      case ARMin_NUnaryS:
+         addHRegUse(u, HRmWrite, i->ARMin.NUnaryS.dst->reg);
+         addHRegUse(u, HRmRead, i->ARMin.NUnaryS.src->reg);
+         return;
+      case ARMin_NShift:
+         addHRegUse(u, HRmWrite, i->ARMin.NShift.dst);
+         addHRegUse(u, HRmRead, i->ARMin.NShift.argL);
+         addHRegUse(u, HRmRead, i->ARMin.NShift.argR);
+         return;
+      case ARMin_NShl64:
+         addHRegUse(u, HRmWrite, i->ARMin.NShl64.dst);
+         addHRegUse(u, HRmRead, i->ARMin.NShl64.src);
+         return;
+      case ARMin_NDual:
+         addHRegUse(u, HRmWrite, i->ARMin.NDual.arg1);
+         addHRegUse(u, HRmWrite, i->ARMin.NDual.arg2);
+         addHRegUse(u, HRmRead, i->ARMin.NDual.arg1);
+         addHRegUse(u, HRmRead, i->ARMin.NDual.arg2);
+         return;
+      case ARMin_NBinary:
+         addHRegUse(u, HRmWrite, i->ARMin.NBinary.dst);
+         /* TODO: sometimes dst is also being read! */
+         // XXX fix this
+         addHRegUse(u, HRmRead, i->ARMin.NBinary.argL);
+         addHRegUse(u, HRmRead, i->ARMin.NBinary.argR);
+         return;
+      case ARMin_NeonImm:
+         addHRegUse(u, HRmWrite, i->ARMin.NeonImm.dst);
+         return;
+      case ARMin_NCMovQ:
+         addHRegUse(u, HRmWrite, i->ARMin.NCMovQ.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.NCMovQ.dst);
+         addHRegUse(u, HRmRead,  i->ARMin.NCMovQ.src);
+         return;
+      case ARMin_Add32:
+         addHRegUse(u, HRmWrite, i->ARMin.Add32.rD);
+         addHRegUse(u, HRmRead, i->ARMin.Add32.rN);
+         return;
+      case ARMin_EvCheck:
+         /* We expect both amodes only to mention r8, so this is in
+            fact pointless, since r8 isn't allocatable, but
+            anyway.. */
+         addRegUsage_ARMAMode1(u, i->ARMin.EvCheck.amCounter);
+         addRegUsage_ARMAMode1(u, i->ARMin.EvCheck.amFailAddr);
+         addHRegUse(u, HRmWrite, hregARM_R12()); /* also unavail to RA */
+         return;
+      case ARMin_ProfInc:
+         addHRegUse(u, HRmWrite, hregARM_R12());
+         addHRegUse(u, HRmWrite, hregARM_R11());
+         return;
+      default:
+         ppARMInstr(i);
+         vpanic("getRegUsage_ARMInstr");
+   }
+}
+
+
+void mapRegs_ARMInstr ( HRegRemap* m, ARMInstr* i, Bool mode64 )
+{
+   vassert(mode64 == False);
+   switch (i->tag) {
+      case ARMin_Alu:
+         i->ARMin.Alu.dst = lookupHRegRemap(m, i->ARMin.Alu.dst);
+         i->ARMin.Alu.argL = lookupHRegRemap(m, i->ARMin.Alu.argL);
+         mapRegs_ARMRI84(m, i->ARMin.Alu.argR);
+         return;
+      case ARMin_Shift:
+         i->ARMin.Shift.dst = lookupHRegRemap(m, i->ARMin.Shift.dst);
+         i->ARMin.Shift.argL = lookupHRegRemap(m, i->ARMin.Shift.argL);
+         mapRegs_ARMRI5(m, i->ARMin.Shift.argR);
+         return;
+      case ARMin_Unary:
+         i->ARMin.Unary.dst = lookupHRegRemap(m, i->ARMin.Unary.dst);
+         i->ARMin.Unary.src = lookupHRegRemap(m, i->ARMin.Unary.src);
+         return;
+      case ARMin_CmpOrTst:
+         i->ARMin.CmpOrTst.argL = lookupHRegRemap(m, i->ARMin.CmpOrTst.argL);
+         mapRegs_ARMRI84(m, i->ARMin.CmpOrTst.argR);
+         return;
+      case ARMin_Mov:
+         i->ARMin.Mov.dst = lookupHRegRemap(m, i->ARMin.Mov.dst);
+         mapRegs_ARMRI84(m, i->ARMin.Mov.src);
+         return;
+      case ARMin_Imm32:
+         i->ARMin.Imm32.dst = lookupHRegRemap(m, i->ARMin.Imm32.dst);
+         return;
+      case ARMin_LdSt32:
+         i->ARMin.LdSt32.rD = lookupHRegRemap(m, i->ARMin.LdSt32.rD);
+         mapRegs_ARMAMode1(m, i->ARMin.LdSt32.amode);
+         return;
+      case ARMin_LdSt16:
+         i->ARMin.LdSt16.rD = lookupHRegRemap(m, i->ARMin.LdSt16.rD);
+         mapRegs_ARMAMode2(m, i->ARMin.LdSt16.amode);
+         return;
+      case ARMin_LdSt8U:
+         i->ARMin.LdSt8U.rD = lookupHRegRemap(m, i->ARMin.LdSt8U.rD);
+         mapRegs_ARMAMode1(m, i->ARMin.LdSt8U.amode);
+         return;
+      case ARMin_Ld8S:
+         i->ARMin.Ld8S.rD = lookupHRegRemap(m, i->ARMin.Ld8S.rD);
+         mapRegs_ARMAMode2(m, i->ARMin.Ld8S.amode);
+         return;
+      case ARMin_XDirect:
+         mapRegs_ARMAMode1(m, i->ARMin.XDirect.amR15T);
+         return;
+      case ARMin_XIndir:
+         i->ARMin.XIndir.dstGA
+            = lookupHRegRemap(m, i->ARMin.XIndir.dstGA);
+         mapRegs_ARMAMode1(m, i->ARMin.XIndir.amR15T);
+         return;
+      case ARMin_XAssisted:
+         i->ARMin.XAssisted.dstGA
+            = lookupHRegRemap(m, i->ARMin.XAssisted.dstGA);
+         mapRegs_ARMAMode1(m, i->ARMin.XAssisted.amR15T);
+         return;
+      case ARMin_CMov:
+         i->ARMin.CMov.dst = lookupHRegRemap(m, i->ARMin.CMov.dst);
+         mapRegs_ARMRI84(m, i->ARMin.CMov.src);
+         return;
+      case ARMin_Call:
+         return;
+      case ARMin_Mul:
+         return;
+      case ARMin_LdrEX:
+         return;
+      case ARMin_StrEX:
+         return;
+      case ARMin_VLdStD:
+         i->ARMin.VLdStD.dD = lookupHRegRemap(m, i->ARMin.VLdStD.dD);
+         mapRegs_ARMAModeV(m, i->ARMin.VLdStD.amode);
+         return;
+      case ARMin_VLdStS:
+         i->ARMin.VLdStS.fD = lookupHRegRemap(m, i->ARMin.VLdStS.fD);
+         mapRegs_ARMAModeV(m, i->ARMin.VLdStS.amode);
+         return;
+      case ARMin_VAluD:
+         i->ARMin.VAluD.dst  = lookupHRegRemap(m, i->ARMin.VAluD.dst);
+         i->ARMin.VAluD.argL = lookupHRegRemap(m, i->ARMin.VAluD.argL);
+         i->ARMin.VAluD.argR = lookupHRegRemap(m, i->ARMin.VAluD.argR);
+         return;
+      case ARMin_VAluS:
+         i->ARMin.VAluS.dst  = lookupHRegRemap(m, i->ARMin.VAluS.dst);
+         i->ARMin.VAluS.argL = lookupHRegRemap(m, i->ARMin.VAluS.argL);
+         i->ARMin.VAluS.argR = lookupHRegRemap(m, i->ARMin.VAluS.argR);
+         return;
+      case ARMin_VUnaryD:
+         i->ARMin.VUnaryD.dst = lookupHRegRemap(m, i->ARMin.VUnaryD.dst);
+         i->ARMin.VUnaryD.src = lookupHRegRemap(m, i->ARMin.VUnaryD.src);
+         return;
+      case ARMin_VUnaryS:
+         i->ARMin.VUnaryS.dst = lookupHRegRemap(m, i->ARMin.VUnaryS.dst);
+         i->ARMin.VUnaryS.src = lookupHRegRemap(m, i->ARMin.VUnaryS.src);
+         return;
+      case ARMin_VCmpD:
+         i->ARMin.VCmpD.argL = lookupHRegRemap(m, i->ARMin.VCmpD.argL);
+         i->ARMin.VCmpD.argR = lookupHRegRemap(m, i->ARMin.VCmpD.argR);
+         return;
+      case ARMin_VCMovD:
+         i->ARMin.VCMovD.dst = lookupHRegRemap(m, i->ARMin.VCMovD.dst);
+         i->ARMin.VCMovD.src = lookupHRegRemap(m, i->ARMin.VCMovD.src);
+         return;
+      case ARMin_VCMovS:
+         i->ARMin.VCMovS.dst = lookupHRegRemap(m, i->ARMin.VCMovS.dst);
+         i->ARMin.VCMovS.src = lookupHRegRemap(m, i->ARMin.VCMovS.src);
+         return;
+      case ARMin_VCvtSD:
+         i->ARMin.VCvtSD.dst = lookupHRegRemap(m, i->ARMin.VCvtSD.dst);
+         i->ARMin.VCvtSD.src = lookupHRegRemap(m, i->ARMin.VCvtSD.src);
+         return;
+      case ARMin_VXferD:
+         i->ARMin.VXferD.dD  = lookupHRegRemap(m, i->ARMin.VXferD.dD);
+         i->ARMin.VXferD.rHi = lookupHRegRemap(m, i->ARMin.VXferD.rHi);
+         i->ARMin.VXferD.rLo = lookupHRegRemap(m, i->ARMin.VXferD.rLo);
+         return;
+      case ARMin_VXferS:
+         i->ARMin.VXferS.fD  = lookupHRegRemap(m, i->ARMin.VXferS.fD);
+         i->ARMin.VXferS.rLo = lookupHRegRemap(m, i->ARMin.VXferS.rLo);
+         return;
+      case ARMin_VCvtID:
+         i->ARMin.VCvtID.dst = lookupHRegRemap(m, i->ARMin.VCvtID.dst);
+         i->ARMin.VCvtID.src = lookupHRegRemap(m, i->ARMin.VCvtID.src);
+         return;
+      case ARMin_FPSCR:
+         i->ARMin.FPSCR.iReg = lookupHRegRemap(m, i->ARMin.FPSCR.iReg);
+         return;
+      case ARMin_MFence:
+         return;
+      case ARMin_CLREX:
+         return;
+      case ARMin_NLdStQ:
+         i->ARMin.NLdStQ.dQ = lookupHRegRemap(m, i->ARMin.NLdStQ.dQ);
+         mapRegs_ARMAModeN(m, i->ARMin.NLdStQ.amode);
+         return;
+      case ARMin_NLdStD:
+         i->ARMin.NLdStD.dD = lookupHRegRemap(m, i->ARMin.NLdStD.dD);
+         mapRegs_ARMAModeN(m, i->ARMin.NLdStD.amode);
+         return;
+      case ARMin_NUnary:
+         i->ARMin.NUnary.src = lookupHRegRemap(m, i->ARMin.NUnary.src);
+         i->ARMin.NUnary.dst = lookupHRegRemap(m, i->ARMin.NUnary.dst);
+         return;
+      case ARMin_NUnaryS:
+         i->ARMin.NUnaryS.src->reg
+            = lookupHRegRemap(m, i->ARMin.NUnaryS.src->reg);
+         i->ARMin.NUnaryS.dst->reg
+            = lookupHRegRemap(m, i->ARMin.NUnaryS.dst->reg);
+         return;
+      case ARMin_NShift:
+         i->ARMin.NShift.dst = lookupHRegRemap(m, i->ARMin.NShift.dst);
+         i->ARMin.NShift.argL = lookupHRegRemap(m, i->ARMin.NShift.argL);
+         i->ARMin.NShift.argR = lookupHRegRemap(m, i->ARMin.NShift.argR);
+         return;
+      case ARMin_NShl64:
+         i->ARMin.NShl64.dst = lookupHRegRemap(m, i->ARMin.NShl64.dst);
+         i->ARMin.NShl64.src = lookupHRegRemap(m, i->ARMin.NShl64.src);
+         return;
+      case ARMin_NDual:
+         i->ARMin.NDual.arg1 = lookupHRegRemap(m, i->ARMin.NDual.arg1);
+         i->ARMin.NDual.arg2 = lookupHRegRemap(m, i->ARMin.NDual.arg2);
+         return;
+      case ARMin_NBinary:
+         i->ARMin.NBinary.argL = lookupHRegRemap(m, i->ARMin.NBinary.argL);
+         i->ARMin.NBinary.argR = lookupHRegRemap(m, i->ARMin.NBinary.argR);
+         i->ARMin.NBinary.dst  = lookupHRegRemap(m, i->ARMin.NBinary.dst);
+         return;
+      case ARMin_NeonImm:
+         i->ARMin.NeonImm.dst = lookupHRegRemap(m, i->ARMin.NeonImm.dst);
+         return;
+      case ARMin_NCMovQ:
+         i->ARMin.NCMovQ.dst = lookupHRegRemap(m, i->ARMin.NCMovQ.dst);
+         i->ARMin.NCMovQ.src = lookupHRegRemap(m, i->ARMin.NCMovQ.src);
+         return;
+      case ARMin_Add32:
+         i->ARMin.Add32.rD = lookupHRegRemap(m, i->ARMin.Add32.rD);
+         i->ARMin.Add32.rN = lookupHRegRemap(m, i->ARMin.Add32.rN);
+         return;
+      case ARMin_EvCheck:
+         /* We expect both amodes only to mention r8, so this is in
+            fact pointless, since r8 isn't allocatable, but
+            anyway.. */
+         mapRegs_ARMAMode1(m, i->ARMin.EvCheck.amCounter);
+         mapRegs_ARMAMode1(m, i->ARMin.EvCheck.amFailAddr);
+         return;
+      case ARMin_ProfInc:
+         /* hardwires r11 and r12 -- nothing to modify. */
+         return;
+      default:
+         ppARMInstr(i);
+         vpanic("mapRegs_ARMInstr");
+   }
+}
+
+/* Figure out if i represents a reg-reg move, and if so assign the
+   source and destination to *src and *dst.  If in doubt say No.  Used
+   by the register allocator to do move coalescing. 
+*/
+Bool isMove_ARMInstr ( const ARMInstr* i, HReg* src, HReg* dst )
+{
+   /* Moves between integer regs */
+   switch (i->tag) {
+      case ARMin_Mov:
+         if (i->ARMin.Mov.src->tag == ARMri84_R) {
+            *src = i->ARMin.Mov.src->ARMri84.R.reg;
+            *dst = i->ARMin.Mov.dst;
+            return True;
+         }
+         break;
+      case ARMin_VUnaryD:
+         if (i->ARMin.VUnaryD.op == ARMvfpu_COPY) {
+            *src = i->ARMin.VUnaryD.src;
+            *dst = i->ARMin.VUnaryD.dst;
+            return True;
+         }
+         break;
+      case ARMin_VUnaryS:
+         if (i->ARMin.VUnaryS.op == ARMvfpu_COPY) {
+            *src = i->ARMin.VUnaryS.src;
+            *dst = i->ARMin.VUnaryS.dst;
+            return True;
+         }
+         break;
+      case ARMin_NUnary:
+         if (i->ARMin.NUnary.op == ARMneon_COPY) {
+            *src = i->ARMin.NUnary.src;
+            *dst = i->ARMin.NUnary.dst;
+            return True;
+         }
+         break;
+      default:
+         break;
+   }
+
+   return False;
+}
+
+
+/* Generate arm spill/reload instructions under the direction of the
+   register allocator.  Note it's critical these don't write the
+   condition codes. */
+
+void genSpill_ARM ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                    HReg rreg, Int offsetB, Bool mode64 )
+{
+   HRegClass rclass;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == False);
+   *i1 = *i2 = NULL;
+   rclass = hregClass(rreg);
+   switch (rclass) {
+      case HRcInt32:
+         vassert(offsetB <= 4095);
+         *i1 = ARMInstr_LdSt32( ARMcc_AL, False/*!isLoad*/, 
+                                rreg, 
+                                ARMAMode1_RI(hregARM_R8(), offsetB) );
+         return;
+      case HRcFlt32:
+      case HRcFlt64: {
+         HReg r8   = hregARM_R8();  /* baseblock */
+         HReg r12  = hregARM_R12(); /* spill temp */
+         HReg base = r8;
+         vassert(0 == (offsetB & 3));
+         if (offsetB >= 1024) {
+            Int offsetKB = offsetB / 1024;
+            /* r12 = r8 + (1024 * offsetKB) */
+            *i1 = ARMInstr_Alu(ARMalu_ADD, r12, r8,
+                               ARMRI84_I84(offsetKB, 11));
+            offsetB -= (1024 * offsetKB);
+            base = r12;
+         }
+         vassert(offsetB <= 1020);
+         if (rclass == HRcFlt32) {
+            *i2 = ARMInstr_VLdStS( False/*!isLoad*/,
+                                   rreg,
+                                   mkARMAModeV(base, offsetB) );
+         } else {
+            *i2 = ARMInstr_VLdStD( False/*!isLoad*/,
+                                   rreg,
+                                   mkARMAModeV(base, offsetB) );
+         }
+         return;
+      }
+      case HRcVec128: {
+         HReg r8  = hregARM_R8();
+         HReg r12 = hregARM_R12();
+         *i1 = ARMInstr_Add32(r12, r8, offsetB);
+         *i2 = ARMInstr_NLdStQ(False, rreg, mkARMAModeN_R(r12));
+         return;
+      }
+      default:
+         ppHRegClass(rclass);
+         vpanic("genSpill_ARM: unimplemented regclass");
+   }
+}
+
+void genReload_ARM ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                     HReg rreg, Int offsetB, Bool mode64 )
+{
+   HRegClass rclass;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == False);
+   *i1 = *i2 = NULL;
+   rclass = hregClass(rreg);
+   switch (rclass) {
+      case HRcInt32:
+         vassert(offsetB <= 4095);
+         *i1 = ARMInstr_LdSt32( ARMcc_AL, True/*isLoad*/, 
+                                rreg, 
+                                ARMAMode1_RI(hregARM_R8(), offsetB) );
+         return;
+      case HRcFlt32:
+      case HRcFlt64: {
+         HReg r8   = hregARM_R8();  /* baseblock */
+         HReg r12  = hregARM_R12(); /* spill temp */
+         HReg base = r8;
+         vassert(0 == (offsetB & 3));
+         if (offsetB >= 1024) {
+            Int offsetKB = offsetB / 1024;
+            /* r12 = r8 + (1024 * offsetKB) */
+            *i1 = ARMInstr_Alu(ARMalu_ADD, r12, r8,
+                               ARMRI84_I84(offsetKB, 11));
+            offsetB -= (1024 * offsetKB);
+            base = r12;
+         }
+         vassert(offsetB <= 1020);
+         if (rclass == HRcFlt32) {
+            *i2 = ARMInstr_VLdStS( True/*isLoad*/,
+                                   rreg,
+                                   mkARMAModeV(base, offsetB) );
+         } else {
+            *i2 = ARMInstr_VLdStD( True/*isLoad*/,
+                                   rreg,
+                                   mkARMAModeV(base, offsetB) );
+         }
+         return;
+      }
+      case HRcVec128: {
+         HReg r8  = hregARM_R8();
+         HReg r12 = hregARM_R12();
+         *i1 = ARMInstr_Add32(r12, r8, offsetB);
+         *i2 = ARMInstr_NLdStQ(True, rreg, mkARMAModeN_R(r12));
+         return;
+      }
+      default:
+         ppHRegClass(rclass);
+         vpanic("genReload_ARM: unimplemented regclass");
+   }
+}
+
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code. */
+
+static inline UInt iregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcInt32);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 15);
+   return n;
+}
+
+static inline UInt dregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 31);
+   return n;
+}
+
+static inline UInt fregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcFlt32);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 31);
+   return n;
+}
+
+static inline UInt qregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcVec128);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 15);
+   return n;
+}
+
+#define BITS4(zzb3,zzb2,zzb1,zzb0) \
+   (((zzb3) << 3) | ((zzb2) << 2) | ((zzb1) << 1) | (zzb0))
+#define X0000  BITS4(0,0,0,0)
+#define X0001  BITS4(0,0,0,1)
+#define X0010  BITS4(0,0,1,0)
+#define X0011  BITS4(0,0,1,1)
+#define X0100  BITS4(0,1,0,0)
+#define X0101  BITS4(0,1,0,1)
+#define X0110  BITS4(0,1,1,0)
+#define X0111  BITS4(0,1,1,1)
+#define X1000  BITS4(1,0,0,0)
+#define X1001  BITS4(1,0,0,1)
+#define X1010  BITS4(1,0,1,0)
+#define X1011  BITS4(1,0,1,1)
+#define X1100  BITS4(1,1,0,0)
+#define X1101  BITS4(1,1,0,1)
+#define X1110  BITS4(1,1,1,0)
+#define X1111  BITS4(1,1,1,1)
+
+#define XXXXX___(zzx7,zzx6,zzx5,zzx4,zzx3) \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12))
+
+#define XXXXXX__(zzx7,zzx6,zzx5,zzx4,zzx3,zzx2)        \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12) | (((zzx2) & 0xF) <<  8))
+
+#define XXXXX__X(zzx7,zzx6,zzx5,zzx4,zzx3,zzx0)        \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12) | (((zzx0) & 0xF) <<  0))
+
+#define XXX___XX(zzx7,zzx6,zzx5,zzx1,zzx0) \
+  ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) | \
+   (((zzx5) & 0xF) << 20) | (((zzx1) & 0xF) << 4) | \
+   (((zzx0) & 0xF) << 0))
+
+#define XXXXXXXX(zzx7,zzx6,zzx5,zzx4,zzx3,zzx2,zzx1,zzx0)  \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24) |  \
+    (((zzx5) & 0xF) << 20) | (((zzx4) & 0xF) << 16) |  \
+    (((zzx3) & 0xF) << 12) | (((zzx2) & 0xF) <<  8) |  \
+    (((zzx1) & 0xF) <<  4) | (((zzx0) & 0xF) <<  0))
+
+#define XX______(zzx7,zzx6) \
+   ((((zzx7) & 0xF) << 28) | (((zzx6) & 0xF) << 24))
+
+/* Generate a skeletal insn that involves an a RI84 shifter operand.
+   Returns a word which is all zeroes apart from bits 25 and 11..0,
+   since it is those that encode the shifter operand (at least to the
+   extent that we care about it.) */
+static UInt skeletal_RI84 ( ARMRI84* ri )
+{
+   UInt instr;
+   if (ri->tag == ARMri84_I84) {
+      vassert(0 == (ri->ARMri84.I84.imm4 & ~0x0F));
+      vassert(0 == (ri->ARMri84.I84.imm8 & ~0xFF));
+      instr = 1 << 25;
+      instr |= (ri->ARMri84.I84.imm4 << 8);
+      instr |= ri->ARMri84.I84.imm8;
+   } else {
+      instr = 0 << 25;
+      instr |= iregEnc(ri->ARMri84.R.reg);
+   }
+   return instr;
+}
+
+/* Ditto for RI5.  Resulting word is zeroes apart from bit 4 and bits
+   11..7. */
+static UInt skeletal_RI5 ( ARMRI5* ri )
+{
+   UInt instr;
+   if (ri->tag == ARMri5_I5) {
+      UInt imm5 = ri->ARMri5.I5.imm5;
+      vassert(imm5 >= 1 && imm5 <= 31);
+      instr = 0 << 4;
+      instr |= imm5 << 7;
+   } else {
+      instr = 1 << 4;
+      instr |= iregEnc(ri->ARMri5.R.reg) << 8;
+   }
+   return instr;
+}
+
+
+/* Get an immediate into a register, using only that 
+   register.  (very lame..) */
+static UInt* imm32_to_ireg ( UInt* p, Int rD, UInt imm32 )
+{
+   UInt instr;
+   vassert(rD >= 0 && rD <= 14); // r15 not good to mess with!
+#if 0
+   if (0 == (imm32 & ~0xFF)) {
+      /* mov with a immediate shifter operand of (0, imm32) (??) */
+      instr = XXXXXX__(X1110,X0011,X1010,X0000,rD,X0000);
+      instr |= imm32;
+      *p++ = instr;
+   } else {
+      // this is very bad; causes Dcache pollution
+      // ldr  rD, [pc]
+      instr = XXXXX___(X1110,X0101,X1001,X1111,rD);
+      *p++ = instr;
+      // b .+8
+      instr = 0xEA000000;
+      *p++ = instr;
+      // .word imm32
+      *p++ = imm32;
+   }
+#else
+   if (VEX_ARM_ARCHLEVEL(arm_hwcaps) > 6) {
+      /* Generate movw rD, #low16.  Then, if the high 16 are
+         nonzero, generate movt rD, #high16. */
+      UInt lo16 = imm32 & 0xFFFF;
+      UInt hi16 = (imm32 >> 16) & 0xFFFF;
+      instr = XXXXXXXX(0xE, 0x3, 0x0, (lo16 >> 12) & 0xF, rD,
+                       (lo16 >> 8) & 0xF, (lo16 >> 4) & 0xF,
+                       lo16 & 0xF);
+      *p++ = instr;
+      if (hi16 != 0) {
+         instr = XXXXXXXX(0xE, 0x3, 0x4, (hi16 >> 12) & 0xF, rD,
+                          (hi16 >> 8) & 0xF, (hi16 >> 4) & 0xF,
+                          hi16 & 0xF);
+         *p++ = instr;
+      }
+   } else {
+      UInt imm, rot;
+      UInt op = X1010;
+      UInt rN = 0;
+      if ((imm32 & 0xFF) || (imm32 == 0)) {
+         imm = imm32 & 0xFF;
+         rot = 0;
+         instr = XXXXXXXX(0xE, 0x3, op, rN, rD, rot, imm >> 4, imm & 0xF);
+         *p++ = instr;
+         op = X1000;
+         rN = rD;
+      }
+      if (imm32 & 0xFF000000) {
+         imm = (imm32 >> 24) & 0xFF;
+         rot = 4;
+         instr = XXXXXXXX(0xE, 0x3, op, rN, rD, rot, imm >> 4, imm & 0xF);
+         *p++ = instr;
+         op = X1000;
+         rN = rD;
+      }
+      if (imm32 & 0xFF0000) {
+         imm = (imm32 >> 16) & 0xFF;
+         rot = 8;
+         instr = XXXXXXXX(0xE, 0x3, op, rN, rD, rot, imm >> 4, imm & 0xF);
+         *p++ = instr;
+         op = X1000;
+         rN = rD;
+      }
+      if (imm32 & 0xFF00) {
+         imm = (imm32 >> 8) & 0xFF;
+         rot = 12;
+         instr = XXXXXXXX(0xE, 0x3, op, rN, rD, rot, imm >> 4, imm & 0xF);
+         *p++ = instr;
+         op = X1000;
+         rN = rD;
+      }
+   }
+#endif
+   return p;
+}
+
+/* Get an immediate into a register, using only that register, and
+   generating exactly 2 instructions, regardless of the value of the
+   immediate. This is used when generating sections of code that need
+   to be patched later, so as to guarantee a specific size. */
+static UInt* imm32_to_ireg_EXACTLY2 ( UInt* p, Int rD, UInt imm32 )
+{
+   if (VEX_ARM_ARCHLEVEL(arm_hwcaps) > 6) {
+      /* Generate movw rD, #low16 ;  movt rD, #high16. */
+      UInt lo16 = imm32 & 0xFFFF;
+      UInt hi16 = (imm32 >> 16) & 0xFFFF;
+      UInt instr;
+      instr = XXXXXXXX(0xE, 0x3, 0x0, (lo16 >> 12) & 0xF, rD,
+                       (lo16 >> 8) & 0xF, (lo16 >> 4) & 0xF,
+                       lo16 & 0xF);
+      *p++ = instr;
+      instr = XXXXXXXX(0xE, 0x3, 0x4, (hi16 >> 12) & 0xF, rD,
+                       (hi16 >> 8) & 0xF, (hi16 >> 4) & 0xF,
+                       hi16 & 0xF);
+      *p++ = instr;
+   } else {
+      vassert(0); /* lose */
+   }
+   return p;
+}
+
+/* Check whether p points at a 2-insn sequence cooked up by
+   imm32_to_ireg_EXACTLY2(). */
+static Bool is_imm32_to_ireg_EXACTLY2 ( UInt* p, Int rD, UInt imm32 )
+{
+   if (VEX_ARM_ARCHLEVEL(arm_hwcaps) > 6) {
+      /* Generate movw rD, #low16 ;  movt rD, #high16. */
+      UInt lo16 = imm32 & 0xFFFF;
+      UInt hi16 = (imm32 >> 16) & 0xFFFF;
+      UInt i0, i1;
+      i0 = XXXXXXXX(0xE, 0x3, 0x0, (lo16 >> 12) & 0xF, rD,
+                    (lo16 >> 8) & 0xF, (lo16 >> 4) & 0xF,
+                    lo16 & 0xF);
+      i1 = XXXXXXXX(0xE, 0x3, 0x4, (hi16 >> 12) & 0xF, rD,
+                    (hi16 >> 8) & 0xF, (hi16 >> 4) & 0xF,
+                    hi16 & 0xF);
+      return p[0] == i0 && p[1] == i1;
+   } else {
+      vassert(0); /* lose */
+   }
+}
+
+
+static UInt* do_load_or_store32 ( UInt* p,
+                                  Bool isLoad, UInt rD, ARMAMode1* am )
+{
+   vassert(rD <= 12);
+   vassert(am->tag == ARMam1_RI); // RR case is not handled
+   UInt bB = 0;
+   UInt bL = isLoad ? 1 : 0;
+   Int  simm12;
+   UInt instr, bP;
+   if (am->ARMam1.RI.simm13 < 0) {
+      bP = 0;
+      simm12 = -am->ARMam1.RI.simm13;
+   } else {
+      bP = 1;
+      simm12 = am->ARMam1.RI.simm13;
+   }
+   vassert(simm12 >= 0 && simm12 <= 4095);
+   instr = XXXXX___(X1110,X0101,BITS4(bP,bB,0,bL),
+                    iregEnc(am->ARMam1.RI.reg),
+                    rD);
+   instr |= simm12;
+   *p++ = instr;
+   return p;
+}
+
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code.  If the emitted
+   instruction was a profiler inc, set *is_profInc to True, else
+   leave it unchanged. */
+
+Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
+                    UChar* buf, Int nbuf, const ARMInstr* i, 
+                    Bool mode64, VexEndness endness_host,
+                    const void* disp_cp_chain_me_to_slowEP,
+                    const void* disp_cp_chain_me_to_fastEP,
+                    const void* disp_cp_xindir,
+                    const void* disp_cp_xassisted )
+{
+   UInt* p = (UInt*)buf;
+   vassert(nbuf >= 32);
+   vassert(mode64 == False);
+   vassert(0 == (((HWord)buf) & 3));
+
+   switch (i->tag) {
+      case ARMin_Alu: {
+         UInt     instr, subopc;
+         UInt     rD   = iregEnc(i->ARMin.Alu.dst);
+         UInt     rN   = iregEnc(i->ARMin.Alu.argL);
+         ARMRI84* argR = i->ARMin.Alu.argR;
+         switch (i->ARMin.Alu.op) {
+            case ARMalu_ADDS: /* fallthru */
+            case ARMalu_ADD:  subopc = X0100; break;
+            case ARMalu_ADC:  subopc = X0101; break;
+            case ARMalu_SUBS: /* fallthru */
+            case ARMalu_SUB:  subopc = X0010; break;
+            case ARMalu_SBC:  subopc = X0110; break;
+            case ARMalu_AND:  subopc = X0000; break;
+            case ARMalu_BIC:  subopc = X1110; break;
+            case ARMalu_OR:   subopc = X1100; break;
+            case ARMalu_XOR:  subopc = X0001; break;
+            default: goto bad;
+         }
+         instr = skeletal_RI84(argR);
+         instr |= XXXXX___(X1110, (1 & (subopc >> 3)),
+                           (subopc << 1) & 0xF, rN, rD);
+         if (i->ARMin.Alu.op == ARMalu_ADDS
+             || i->ARMin.Alu.op == ARMalu_SUBS) {
+            instr |= 1<<20;  /* set the S bit */
+         }
+         *p++ = instr;
+         goto done;
+      }
+      case ARMin_Shift: {
+         UInt    instr, subopc;
+         UInt    rD   = iregEnc(i->ARMin.Shift.dst);
+         UInt    rM   = iregEnc(i->ARMin.Shift.argL);
+         ARMRI5* argR = i->ARMin.Shift.argR;
+         switch (i->ARMin.Shift.op) {
+            case ARMsh_SHL: subopc = X0000; break;
+            case ARMsh_SHR: subopc = X0001; break;
+            case ARMsh_SAR: subopc = X0010; break;
+            default: goto bad;
+         }
+         instr = skeletal_RI5(argR);
+         instr |= XXXXX__X(X1110,X0001,X1010,X0000,rD, /* _ _ */ rM);
+         instr |= (subopc & 3) << 5;
+         *p++ = instr;
+         goto done;
+      }
+      case ARMin_Unary: {
+         UInt instr;
+         UInt rDst = iregEnc(i->ARMin.Unary.dst);
+         UInt rSrc = iregEnc(i->ARMin.Unary.src);
+         switch (i->ARMin.Unary.op) {
+            case ARMun_CLZ:
+               instr = XXXXXXXX(X1110,X0001,X0110,X1111,
+                                rDst,X1111,X0001,rSrc);
+               *p++ = instr;
+               goto done;
+            case ARMun_NEG: /* RSB rD,rS,#0 */
+               instr = XXXXX___(X1110,0x2,0x6,rSrc,rDst);
+               *p++ = instr;
+               goto done;
+            case ARMun_NOT: {
+               UInt subopc = X1111; /* MVN */
+               instr = rSrc;
+               instr |= XXXXX___(X1110, (1 & (subopc >> 3)),
+                                 (subopc << 1) & 0xF, 0, rDst);
+               *p++ = instr;
+               goto done;
+            }
+            default:
+               break;
+         }
+         goto bad;
+      }
+      case ARMin_CmpOrTst: {
+         UInt instr  = skeletal_RI84(i->ARMin.CmpOrTst.argR);
+         UInt subopc = i->ARMin.CmpOrTst.isCmp ? X1010 : X1000;
+         UInt SBZ    = 0;
+         instr |= XXXXX___(X1110, (1 & (subopc >> 3)),
+                           ((subopc << 1) & 0xF) | 1,
+                           iregEnc(i->ARMin.CmpOrTst.argL), SBZ );
+         *p++ = instr;
+         goto done;
+      }
+      case ARMin_Mov: {
+         UInt instr  = skeletal_RI84(i->ARMin.Mov.src);
+         UInt subopc = X1101; /* MOV */
+         UInt SBZ    = 0;
+         instr |= XXXXX___(X1110, (1 & (subopc >> 3)),
+                           (subopc << 1) & 0xF, SBZ,
+                           iregEnc(i->ARMin.Mov.dst));
+         *p++ = instr;
+         goto done;
+      }
+      case ARMin_Imm32: {
+         p = imm32_to_ireg( (UInt*)p, iregEnc(i->ARMin.Imm32.dst),
+                                      i->ARMin.Imm32.imm32 );
+         goto done;
+      }
+      case ARMin_LdSt32:
+      case ARMin_LdSt8U: {
+         UInt        bL, bB;
+         HReg        rD;
+         ARMAMode1*  am;
+         ARMCondCode cc;
+         if (i->tag == ARMin_LdSt32) {
+            bB = 0;
+            bL = i->ARMin.LdSt32.isLoad ? 1 : 0;
+            am = i->ARMin.LdSt32.amode;
+            rD = i->ARMin.LdSt32.rD;
+            cc = i->ARMin.LdSt32.cc;
+         } else {
+            bB = 1;
+            bL = i->ARMin.LdSt8U.isLoad ? 1 : 0;
+            am = i->ARMin.LdSt8U.amode;
+            rD = i->ARMin.LdSt8U.rD;
+            cc = i->ARMin.LdSt8U.cc;
+         }
+         vassert(cc != ARMcc_NV);
+         if (am->tag == ARMam1_RI) {
+            Int  simm12;
+            UInt instr, bP;
+            if (am->ARMam1.RI.simm13 < 0) {
+               bP = 0;
+               simm12 = -am->ARMam1.RI.simm13;
+            } else {
+               bP = 1;
+               simm12 = am->ARMam1.RI.simm13;
+            }
+            vassert(simm12 >= 0 && simm12 <= 4095);
+            instr = XXXXX___(cc,X0101,BITS4(bP,bB,0,bL),
+                             iregEnc(am->ARMam1.RI.reg),
+                             iregEnc(rD));
+            instr |= simm12;
+            *p++ = instr;
+            goto done;
+         } else {
+            // RR case
+            goto bad;
+         }
+      }
+      case ARMin_LdSt16: {
+         HReg        rD = i->ARMin.LdSt16.rD;
+         UInt        bS = i->ARMin.LdSt16.signedLoad ? 1 : 0;
+         UInt        bL = i->ARMin.LdSt16.isLoad ? 1 : 0;
+         ARMAMode2*  am = i->ARMin.LdSt16.amode;
+         ARMCondCode cc = i->ARMin.LdSt16.cc;
+         vassert(cc != ARMcc_NV);
+         if (am->tag == ARMam2_RI) {
+            HReg rN = am->ARMam2.RI.reg;
+            Int  simm8;
+            UInt bP, imm8hi, imm8lo, instr;
+            if (am->ARMam2.RI.simm9 < 0) {
+               bP = 0;
+               simm8 = -am->ARMam2.RI.simm9;
+            } else {
+               bP = 1;
+               simm8 = am->ARMam2.RI.simm9;
+            }
+            vassert(simm8 >= 0 && simm8 <= 255);
+            imm8hi = (simm8 >> 4) & 0xF;
+            imm8lo = simm8 & 0xF;
+            vassert(!(bL == 0 && bS == 1)); // "! signed store"
+            /**/ if (bL == 0 && bS == 0) {
+               // strh
+               instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,0), iregEnc(rN),
+                                iregEnc(rD), imm8hi, X1011, imm8lo);
+               *p++ = instr;
+               goto done;
+            }
+            else if (bL == 1 && bS == 0) {
+               // ldrh
+               instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregEnc(rN),
+                                iregEnc(rD), imm8hi, X1011, imm8lo);
+               *p++ = instr;
+               goto done;
+            }
+            else if (bL == 1 && bS == 1) {
+               // ldrsh
+               instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregEnc(rN),
+                                iregEnc(rD), imm8hi, X1111, imm8lo);
+               *p++ = instr;
+               goto done;
+            }
+            else vassert(0); // ill-constructed insn
+         } else {
+            // RR case
+            goto bad;
+         }
+      }
+      case ARMin_Ld8S: {
+         HReg        rD = i->ARMin.Ld8S.rD;
+         ARMAMode2*  am = i->ARMin.Ld8S.amode;
+         ARMCondCode cc = i->ARMin.Ld8S.cc;
+         vassert(cc != ARMcc_NV);
+         if (am->tag == ARMam2_RI) {
+            HReg rN = am->ARMam2.RI.reg;
+            Int  simm8;
+            UInt bP, imm8hi, imm8lo, instr;
+            if (am->ARMam2.RI.simm9 < 0) {
+               bP = 0;
+               simm8 = -am->ARMam2.RI.simm9;
+            } else {
+               bP = 1;
+               simm8 = am->ARMam2.RI.simm9;
+            }
+            vassert(simm8 >= 0 && simm8 <= 255);
+            imm8hi = (simm8 >> 4) & 0xF;
+            imm8lo = simm8 & 0xF;
+            // ldrsb
+            instr = XXXXXXXX(cc,X0001, BITS4(bP,1,0,1), iregEnc(rN),
+                             iregEnc(rD), imm8hi, X1101, imm8lo);
+            *p++ = instr;
+            goto done;
+         } else {
+            // RR case
+            goto bad;
+         }
+      }
+
+      case ARMin_XDirect: {
+         /* NB: what goes on here has to be very closely coordinated
+            with the chainXDirect_ARM and unchainXDirect_ARM below. */
+         /* We're generating chain-me requests here, so we need to be
+            sure this is actually allowed -- no-redir translations
+            can't use chain-me's.  Hence: */
+         vassert(disp_cp_chain_me_to_slowEP != NULL);
+         vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+         /* Use ptmp for backpatching conditional jumps. */
+         UInt* ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it.  Or at least, leave a space for
+            it that we will shortly fill in. */
+         if (i->ARMin.XDirect.cond != ARMcc_AL) {
+            vassert(i->ARMin.XDirect.cond != ARMcc_NV);
+            ptmp = p;
+            *p++ = 0;
+         }
+
+         /* Update the guest R15T. */
+         /* movw r12, lo16(dstGA) */
+         /* movt r12, hi16(dstGA) */
+         /* str r12, amR15T */
+         p = imm32_to_ireg(p, /*r*/12, i->ARMin.XDirect.dstGA);
+         p = do_load_or_store32(p, False/*!isLoad*/,
+                                /*r*/12, i->ARMin.XDirect.amR15T);
+
+         /* --- FIRST PATCHABLE BYTE follows --- */
+         /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
+            calling to) backs up the return address, so as to find the
+            address of the first patchable byte.  So: don't change the
+            number of instructions (3) below. */
+         /* movw r12, lo16(VG_(disp_cp_chain_me_to_{slowEP,fastEP})) */
+         /* movt r12, hi16(VG_(disp_cp_chain_me_to_{slowEP,fastEP})) */
+         /* blx  r12  (A1) */
+         const void* disp_cp_chain_me
+                  = i->ARMin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP 
+                                              : disp_cp_chain_me_to_slowEP;
+         p = imm32_to_ireg_EXACTLY2(p, /*r*/12,
+                                    (UInt)(Addr)disp_cp_chain_me);
+         *p++ = 0xE12FFF3C;
+         /* --- END of PATCHABLE BYTES --- */
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->ARMin.XDirect.cond != ARMcc_AL) {
+            Int delta = (UChar*)p - (UChar*)ptmp; /* must be signed */
+            vassert(delta > 0 && delta < 40);
+            vassert((delta & 3) == 0);
+            UInt notCond = 1 ^ (UInt)i->ARMin.XDirect.cond;
+            vassert(notCond <= 13); /* Neither AL nor NV */
+            delta = (delta >> 2) - 2;
+            *ptmp = XX______(notCond, X1010) | (delta & 0xFFFFFF);
+         }
+         goto done;
+      }
+
+      case ARMin_XIndir: {
+         /* We're generating transfers that could lead indirectly to a
+            chain-me, so we need to be sure this is actually allowed
+            -- no-redir translations are not allowed to reach normal
+            translations without going through the scheduler.  That
+            means no XDirects or XIndirs out from no-redir
+            translations.  Hence: */
+         vassert(disp_cp_xindir != NULL);
+
+         /* Use ptmp for backpatching conditional jumps. */
+         UInt* ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it.  Or at least, leave a space for
+            it that we will shortly fill in. */
+         if (i->ARMin.XIndir.cond != ARMcc_AL) {
+            vassert(i->ARMin.XIndir.cond != ARMcc_NV);
+            ptmp = p;
+            *p++ = 0;
+         }
+
+         /* Update the guest R15T. */
+         /* str r-dstGA, amR15T */
+         p = do_load_or_store32(p, False/*!isLoad*/,
+                                iregEnc(i->ARMin.XIndir.dstGA),
+                                i->ARMin.XIndir.amR15T);
+
+         /* movw r12, lo16(VG_(disp_cp_xindir)) */
+         /* movt r12, hi16(VG_(disp_cp_xindir)) */
+         /* bx   r12  (A1) */
+         p = imm32_to_ireg(p, /*r*/12, (UInt)(Addr)disp_cp_xindir);
+         *p++ = 0xE12FFF1C;
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->ARMin.XIndir.cond != ARMcc_AL) {
+            Int delta = (UChar*)p - (UChar*)ptmp; /* must be signed */
+            vassert(delta > 0 && delta < 40);
+            vassert((delta & 3) == 0);
+            UInt notCond = 1 ^ (UInt)i->ARMin.XIndir.cond;
+            vassert(notCond <= 13); /* Neither AL nor NV */
+            delta = (delta >> 2) - 2;
+            *ptmp = XX______(notCond, X1010) | (delta & 0xFFFFFF);
+         }
+         goto done;
+      }
+
+      case ARMin_XAssisted: {
+         /* Use ptmp for backpatching conditional jumps. */
+         UInt* ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it.  Or at least, leave a space for
+            it that we will shortly fill in. */
+         if (i->ARMin.XAssisted.cond != ARMcc_AL) {
+            vassert(i->ARMin.XAssisted.cond != ARMcc_NV);
+            ptmp = p;
+            *p++ = 0;
+         }
+
+         /* Update the guest R15T. */
+         /* str r-dstGA, amR15T */
+         p = do_load_or_store32(p, False/*!isLoad*/,
+                                iregEnc(i->ARMin.XAssisted.dstGA),
+                                i->ARMin.XAssisted.amR15T);
+
+         /* movw r8,  $magic_number */
+         UInt trcval = 0;
+         switch (i->ARMin.XAssisted.jk) {
+            case Ijk_ClientReq:   trcval = VEX_TRC_JMP_CLIENTREQ;   break;
+            case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
+            //case Ijk_Sys_int128:  trcval = VEX_TRC_JMP_SYS_INT128;  break;
+            case Ijk_Yield:       trcval = VEX_TRC_JMP_YIELD;       break;
+            //case Ijk_EmWarn:      trcval = VEX_TRC_JMP_EMWARN;      break;
+            //case Ijk_MapFail:     trcval = VEX_TRC_JMP_MAPFAIL;     break;
+            case Ijk_NoDecode:    trcval = VEX_TRC_JMP_NODECODE;    break;
+            case Ijk_InvalICache: trcval = VEX_TRC_JMP_INVALICACHE; break;
+            case Ijk_NoRedir:     trcval = VEX_TRC_JMP_NOREDIR;     break;
+            //case Ijk_SigTRAP:     trcval = VEX_TRC_JMP_SIGTRAP;     break;
+            //case Ijk_SigSEGV:     trcval = VEX_TRC_JMP_SIGSEGV;     break;
+            case Ijk_Boring:      trcval = VEX_TRC_JMP_BORING;      break;
+            /* We don't expect to see the following being assisted. */
+            //case Ijk_Ret:
+            //case Ijk_Call:
+            /* fallthrough */
+            default: 
+               ppIRJumpKind(i->ARMin.XAssisted.jk);
+               vpanic("emit_ARMInstr.ARMin_XAssisted: unexpected jump kind");
+         }
+         vassert(trcval != 0);
+         p = imm32_to_ireg(p, /*r*/8, trcval);
+
+         /* movw r12, lo16(VG_(disp_cp_xassisted)) */
+         /* movt r12, hi16(VG_(disp_cp_xassisted)) */
+         /* bx   r12  (A1) */
+         p = imm32_to_ireg(p, /*r*/12, (UInt)(Addr)disp_cp_xassisted);
+         *p++ = 0xE12FFF1C;
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->ARMin.XAssisted.cond != ARMcc_AL) {
+            Int delta = (UChar*)p - (UChar*)ptmp; /* must be signed */
+            vassert(delta > 0 && delta < 40);
+            vassert((delta & 3) == 0);
+            UInt notCond = 1 ^ (UInt)i->ARMin.XAssisted.cond;
+            vassert(notCond <= 13); /* Neither AL nor NV */
+            delta = (delta >> 2) - 2;
+            *ptmp = XX______(notCond, X1010) | (delta & 0xFFFFFF);
+         }
+         goto done;
+      }
+
+      case ARMin_CMov: {
+         UInt instr  = skeletal_RI84(i->ARMin.CMov.src);
+         UInt subopc = X1101; /* MOV */
+         UInt SBZ    = 0;
+         instr |= XXXXX___(i->ARMin.CMov.cond, (1 & (subopc >> 3)),
+                           (subopc << 1) & 0xF, SBZ,
+                           iregEnc(i->ARMin.CMov.dst));
+         *p++ = instr;
+         goto done;
+      }
+
+      case ARMin_Call: {
+         UInt instr;
+         /* Decide on a scratch reg used to hold to the call address.
+            This has to be done as per the comments in getRegUsage. */
+         Int scratchNo;
+         switch (i->ARMin.Call.nArgRegs) {
+            case 0:  scratchNo = 0;  break;
+            case 1:  scratchNo = 1;  break;
+            case 2:  scratchNo = 2;  break;
+            case 3:  scratchNo = 3;  break;
+            case 4:  scratchNo = 11; break;
+            default: vassert(0);
+         }
+         /* If we don't need to do any fixup actions in the case that
+            the call doesn't happen, just do the simple thing and emit
+            straight-line code.  We hope this is the common case. */
+         if (i->ARMin.Call.cond == ARMcc_AL/*call always happens*/
+             || i->ARMin.Call.rloc.pri == RLPri_None/*no fixup action*/) {
+            // r"scratchNo" = &target
+            p = imm32_to_ireg( (UInt*)p,
+                               scratchNo, (UInt)i->ARMin.Call.target );
+            // blx{cond} r"scratchNo"
+            instr = XXX___XX(i->ARMin.Call.cond, X0001, X0010, /*___*/
+                             X0011, scratchNo);
+            instr |= 0xFFF << 8; // stick in the SBOnes
+            *p++ = instr;
+         } else {
+            Int delta;
+            /* Complex case.  We have to generate an if-then-else
+               diamond. */
+            // before:
+            //   b{!cond} else:
+            //   r"scratchNo" = &target
+            //   blx{AL} r"scratchNo"
+            // preElse:
+            //   b after:
+            // else:
+            //   mov r0, #0x55555555  // possibly
+            //   mov r1, r0           // possibly
+            // after:
+
+            // before:
+            UInt* pBefore = p;
+
+            //   b{!cond} else:  // ptmp1 points here
+            *p++ = 0; // filled in later
+
+            //   r"scratchNo" = &target
+            p = imm32_to_ireg( (UInt*)p,
+                               scratchNo, (UInt)i->ARMin.Call.target );
+
+            //   blx{AL} r"scratchNo"
+            instr = XXX___XX(ARMcc_AL, X0001, X0010, /*___*/
+                             X0011, scratchNo);
+            instr |= 0xFFF << 8; // stick in the SBOnes
+            *p++ = instr;
+
+            // preElse:
+            UInt* pPreElse = p;
+
+            //   b after:
+            *p++ = 0; // filled in later
+
+            // else:
+            delta = (UChar*)p - (UChar*)pBefore;
+            delta = (delta >> 2) - 2;
+            *pBefore
+               = XX______(1 ^ i->ARMin.Call.cond, X1010) | (delta & 0xFFFFFF);
+
+            /* Do the 'else' actions */
+            switch (i->ARMin.Call.rloc.pri) {
+               case RLPri_Int:
+                  p = imm32_to_ireg_EXACTLY2(p, /*r*/0, 0x55555555);
+                  break;
+               case RLPri_2Int:
+                  vassert(0); //ATC
+                  p = imm32_to_ireg_EXACTLY2(p, /*r*/0, 0x55555555);
+                  /* mov r1, r0 */
+                  *p++ = 0xE1A01000;
+                  break;
+               case RLPri_None: case RLPri_INVALID: default:
+                  vassert(0);
+            }
+
+            // after:
+            delta = (UChar*)p - (UChar*)pPreElse;
+            delta = (delta >> 2) - 2;
+            *pPreElse = XX______(ARMcc_AL, X1010) | (delta & 0xFFFFFF);
+         }
+
+         goto done;
+      }
+
+      case ARMin_Mul: {
+         /* E0000392   mul     r0, r2, r3
+            E0810392   umull   r0(LO), r1(HI), r2, r3
+            E0C10392   smull   r0(LO), r1(HI), r2, r3
+         */
+         switch (i->ARMin.Mul.op) {
+            case ARMmul_PLAIN: *p++ = 0xE0000392; goto done;
+            case ARMmul_ZX:    *p++ = 0xE0810392; goto done;
+            case ARMmul_SX:    *p++ = 0xE0C10392; goto done;
+            default: vassert(0);
+         }
+         goto bad;
+      }
+      case ARMin_LdrEX: {
+         /* E1D42F9F   ldrexb r2, [r4]
+            E1F42F9F   ldrexh r2, [r4]
+            E1942F9F   ldrex  r2, [r4]
+            E1B42F9F   ldrexd r2, r3, [r4]
+         */
+         switch (i->ARMin.LdrEX.szB) {
+            case 1: *p++ = 0xE1D42F9F; goto done;
+            case 2: *p++ = 0xE1F42F9F; goto done;
+            case 4: *p++ = 0xE1942F9F; goto done;
+            case 8: *p++ = 0xE1B42F9F; goto done;
+            default: break;
+         }
+         goto bad;
+      }
+      case ARMin_StrEX: {
+         /* E1C40F92   strexb r0, r2, [r4]
+            E1E40F92   strexh r0, r2, [r4]
+            E1840F92   strex  r0, r2, [r4]
+            E1A40F92   strexd r0, r2, r3, [r4]
+         */
+         switch (i->ARMin.StrEX.szB) {
+            case 1: *p++ = 0xE1C40F92; goto done;
+            case 2: *p++ = 0xE1E40F92; goto done;
+            case 4: *p++ = 0xE1840F92; goto done;
+            case 8: *p++ = 0xE1A40F92; goto done;
+            default: break;
+         }
+         goto bad;
+      }
+      case ARMin_VLdStD: {
+         UInt dD     = dregEnc(i->ARMin.VLdStD.dD);
+         UInt rN     = iregEnc(i->ARMin.VLdStD.amode->reg);
+         Int  simm11 = i->ARMin.VLdStD.amode->simm11;
+         UInt off8   = simm11 >= 0 ? simm11 : ((UInt)(-simm11));
+         UInt bU     = simm11 >= 0 ? 1 : 0;
+         UInt bL     = i->ARMin.VLdStD.isLoad ? 1 : 0;
+         UInt insn;
+         vassert(0 == (off8 & 3));
+         off8 >>= 2;
+         vassert(0 == (off8 & 0xFFFFFF00));
+         insn = XXXXXX__(0xE,X1101,BITS4(bU,0,0,bL),rN,dD,X1011);
+         insn |= off8;
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VLdStS: {
+         UInt fD     = fregEnc(i->ARMin.VLdStS.fD);
+         UInt rN     = iregEnc(i->ARMin.VLdStS.amode->reg);
+         Int  simm11 = i->ARMin.VLdStS.amode->simm11;
+         UInt off8   = simm11 >= 0 ? simm11 : ((UInt)(-simm11));
+         UInt bU     = simm11 >= 0 ? 1 : 0;
+         UInt bL     = i->ARMin.VLdStS.isLoad ? 1 : 0;
+         UInt bD     = fD & 1;
+         UInt insn;
+         vassert(0 == (off8 & 3));
+         off8 >>= 2;
+         vassert(0 == (off8 & 0xFFFFFF00));
+         insn = XXXXXX__(0xE,X1101,BITS4(bU,bD,0,bL),rN, (fD >> 1), X1010);
+         insn |= off8;
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VAluD: {
+         UInt dN = dregEnc(i->ARMin.VAluD.argL);
+         UInt dD = dregEnc(i->ARMin.VAluD.dst);
+         UInt dM = dregEnc(i->ARMin.VAluD.argR);
+         UInt pqrs = X1111; /* undefined */
+         switch (i->ARMin.VAluD.op) {
+            case ARMvfp_ADD: pqrs = X0110; break;
+            case ARMvfp_SUB: pqrs = X0111; break;
+            case ARMvfp_MUL: pqrs = X0100; break;
+            case ARMvfp_DIV: pqrs = X1000; break;
+            default: goto bad;
+         }
+         vassert(pqrs != X1111);
+         UInt bP  = (pqrs >> 3) & 1;
+         UInt bQ  = (pqrs >> 2) & 1;
+         UInt bR  = (pqrs >> 1) & 1;
+         UInt bS  = (pqrs >> 0) & 1;
+         UInt insn = XXXXXXXX(0xE, X1110, BITS4(bP,0,bQ,bR), dN, dD,
+                              X1011, BITS4(0,bS,0,0), dM);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VAluS: {
+         UInt dN = fregEnc(i->ARMin.VAluS.argL);
+         UInt dD = fregEnc(i->ARMin.VAluS.dst);
+         UInt dM = fregEnc(i->ARMin.VAluS.argR);
+         UInt bN = dN & 1;
+         UInt bD = dD & 1;
+         UInt bM = dM & 1;
+         UInt pqrs = X1111; /* undefined */
+         switch (i->ARMin.VAluS.op) {
+            case ARMvfp_ADD: pqrs = X0110; break;
+            case ARMvfp_SUB: pqrs = X0111; break;
+            case ARMvfp_MUL: pqrs = X0100; break;
+            case ARMvfp_DIV: pqrs = X1000; break;
+            default: goto bad;
+         }
+         vassert(pqrs != X1111);
+         UInt bP  = (pqrs >> 3) & 1;
+         UInt bQ  = (pqrs >> 2) & 1;
+         UInt bR  = (pqrs >> 1) & 1;
+         UInt bS  = (pqrs >> 0) & 1;
+         UInt insn = XXXXXXXX(0xE, X1110, BITS4(bP,bD,bQ,bR),
+                              (dN >> 1), (dD >> 1),
+                              X1010, BITS4(bN,bS,bM,0), (dM >> 1));
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VUnaryD: {
+         UInt dD   = dregEnc(i->ARMin.VUnaryD.dst);
+         UInt dM   = dregEnc(i->ARMin.VUnaryD.src);
+         UInt insn = 0;
+         switch (i->ARMin.VUnaryD.op) {
+            case ARMvfpu_COPY:
+               insn = XXXXXXXX(0xE, X1110,X1011,X0000,dD,X1011,X0100,dM);
+               break;
+            case ARMvfpu_ABS:
+               insn = XXXXXXXX(0xE, X1110,X1011,X0000,dD,X1011,X1100,dM);
+               break;
+            case ARMvfpu_NEG:
+               insn = XXXXXXXX(0xE, X1110,X1011,X0001,dD,X1011,X0100,dM);
+               break;
+            case ARMvfpu_SQRT:
+               insn = XXXXXXXX(0xE, X1110,X1011,X0001,dD,X1011,X1100,dM);
+               break;
+            default:
+               goto bad;
+         }
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VUnaryS: {
+         UInt fD   = fregEnc(i->ARMin.VUnaryS.dst);
+         UInt fM   = fregEnc(i->ARMin.VUnaryS.src);
+         UInt insn = 0;
+         switch (i->ARMin.VUnaryS.op) {
+            case ARMvfpu_COPY:
+               insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0000,
+                               (fD >> 1), X1010, BITS4(0,1,(fM & 1),0),
+                               (fM >> 1));
+               break;
+            case ARMvfpu_ABS:
+               insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0000,
+                               (fD >> 1), X1010, BITS4(1,1,(fM & 1),0),
+                               (fM >> 1));
+               break;
+            case ARMvfpu_NEG:
+               insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0001,
+                               (fD >> 1), X1010, BITS4(0,1,(fM & 1),0),
+                               (fM >> 1));
+               break;
+            case ARMvfpu_SQRT:
+               insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1), X0001,
+                               (fD >> 1), X1010, BITS4(1,1,(fM & 1),0),
+                               (fM >> 1));
+               break;
+            default:
+               goto bad;
+         }
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VCmpD: {
+         UInt dD   = dregEnc(i->ARMin.VCmpD.argL);
+         UInt dM   = dregEnc(i->ARMin.VCmpD.argR);
+         UInt insn = XXXXXXXX(0xE, X1110, X1011, X0100, dD, X1011, X0100, dM);
+         *p++ = insn;       /* FCMPD dD, dM */
+         *p++ = 0xEEF1FA10; /* FMSTAT */
+         goto done;
+      }
+      case ARMin_VCMovD: {
+         UInt cc = (UInt)i->ARMin.VCMovD.cond;
+         UInt dD = dregEnc(i->ARMin.VCMovD.dst);
+         UInt dM = dregEnc(i->ARMin.VCMovD.src);
+         vassert(cc < 16 && cc != ARMcc_AL);
+         UInt insn = XXXXXXXX(cc, X1110,X1011,X0000,dD,X1011,X0100,dM);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VCMovS: {
+         UInt cc = (UInt)i->ARMin.VCMovS.cond;
+         UInt fD = fregEnc(i->ARMin.VCMovS.dst);
+         UInt fM = fregEnc(i->ARMin.VCMovS.src);
+         vassert(cc < 16 && cc != ARMcc_AL);
+         UInt insn = XXXXXXXX(cc, X1110, BITS4(1,(fD & 1),1,1),
+                              X0000,(fD >> 1),X1010,
+                              BITS4(0,1,(fM & 1),0), (fM >> 1));
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VCvtSD: {
+         if (i->ARMin.VCvtSD.sToD) {
+            UInt dD = dregEnc(i->ARMin.VCvtSD.dst);
+            UInt fM = fregEnc(i->ARMin.VCvtSD.src);
+            UInt insn = XXXXXXXX(0xE, X1110, X1011, X0111, dD, X1010,
+                                 BITS4(1,1, (fM & 1), 0),
+                                 (fM >> 1));
+            *p++ = insn;
+            goto done;
+         } else {
+            UInt fD = fregEnc(i->ARMin.VCvtSD.dst);
+            UInt dM = dregEnc(i->ARMin.VCvtSD.src);
+            UInt insn = XXXXXXXX(0xE, X1110, BITS4(1,(fD & 1),1,1),
+                                 X0111, (fD >> 1),
+                                 X1011, X1100, dM);
+            *p++ = insn;
+            goto done;
+         }
+      }
+      case ARMin_VXferD: {
+         UInt dD  = dregEnc(i->ARMin.VXferD.dD);
+         UInt rHi = iregEnc(i->ARMin.VXferD.rHi);
+         UInt rLo = iregEnc(i->ARMin.VXferD.rLo);
+         /* vmov dD, rLo, rHi is
+            E C 4 rHi rLo B (0,0,dD[4],1) dD[3:0]
+            vmov rLo, rHi, dD is
+            E C 5 rHi rLo B (0,0,dD[4],1) dD[3:0]
+         */
+         UInt insn
+            = XXXXXXXX(0xE, 0xC, i->ARMin.VXferD.toD ? 4 : 5,
+                       rHi, rLo, 0xB,
+                       BITS4(0,0, ((dD >> 4) & 1), 1), (dD & 0xF));
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VXferS: {
+         UInt fD  = fregEnc(i->ARMin.VXferS.fD);
+         UInt rLo = iregEnc(i->ARMin.VXferS.rLo);
+         /* vmov fD, rLo is
+            E E 0 fD[4:1] rLo A (fD[0],0,0,1) 0
+            vmov rLo, fD is
+            E E 1 fD[4:1] rLo A (fD[0],0,0,1) 0
+         */
+         UInt insn
+            = XXXXXXXX(0xE, 0xE, i->ARMin.VXferS.toS ? 0 : 1,
+                       (fD >> 1) & 0xF, rLo, 0xA, 
+                       BITS4((fD & 1),0,0,1), 0);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_VCvtID: {
+         Bool iToD = i->ARMin.VCvtID.iToD;
+         Bool syned = i->ARMin.VCvtID.syned;
+         if (iToD && syned) {
+            // FSITOD: I32S-in-freg to F64-in-dreg
+            UInt regF = fregEnc(i->ARMin.VCvtID.src);
+            UInt regD = dregEnc(i->ARMin.VCvtID.dst);
+            UInt insn = XXXXXXXX(0xE, X1110, X1011, X1000, regD,
+                                 X1011, BITS4(1,1,(regF & 1),0),
+                                 (regF >> 1) & 0xF);
+            *p++ = insn;
+            goto done;
+         }
+         if (iToD && (!syned)) {
+            // FUITOD: I32U-in-freg to F64-in-dreg
+            UInt regF = fregEnc(i->ARMin.VCvtID.src);
+            UInt regD = dregEnc(i->ARMin.VCvtID.dst);
+            UInt insn = XXXXXXXX(0xE, X1110, X1011, X1000, regD,
+                                 X1011, BITS4(0,1,(regF & 1),0),
+                                 (regF >> 1) & 0xF);
+            *p++ = insn;
+            goto done;
+         }
+         if ((!iToD) && syned) {
+            // FTOSID: F64-in-dreg to I32S-in-freg
+            UInt regD = dregEnc(i->ARMin.VCvtID.src);
+            UInt regF = fregEnc(i->ARMin.VCvtID.dst);
+            UInt insn = XXXXXXXX(0xE, X1110, BITS4(1,(regF & 1),1,1),
+                                 X1101, (regF >> 1) & 0xF,
+                                 X1011, X0100, regD);
+            *p++ = insn;
+            goto done;
+         }
+         if ((!iToD) && (!syned)) {
+            // FTOUID: F64-in-dreg to I32U-in-freg
+            UInt regD = dregEnc(i->ARMin.VCvtID.src);
+            UInt regF = fregEnc(i->ARMin.VCvtID.dst);
+            UInt insn = XXXXXXXX(0xE, X1110, BITS4(1,(regF & 1),1,1),
+                                 X1100, (regF >> 1) & 0xF,
+                                 X1011, X0100, regD);
+            *p++ = insn;
+            goto done;
+         }
+         /*UNREACHED*/
+         vassert(0);
+      }
+      case ARMin_FPSCR: {
+         Bool toFPSCR = i->ARMin.FPSCR.toFPSCR;
+         UInt iReg    = iregEnc(i->ARMin.FPSCR.iReg);
+         if (toFPSCR) {
+            /* fmxr fpscr, iReg is EEE1 iReg A10 */
+            *p++ = 0xEEE10A10 | ((iReg & 0xF) << 12);
+            goto done;
+         }
+         goto bad; // FPSCR -> iReg case currently ATC
+      }
+      case ARMin_MFence: {
+         // It's not clear (to me) how these relate to the ARMv7
+         // versions, so let's just use the v7 versions as they
+         // are at least well documented.
+         //*p++ = 0xEE070F9A; /* mcr 15,0,r0,c7,c10,4 (DSB) */
+         //*p++ = 0xEE070FBA; /* mcr 15,0,r0,c7,c10,5 (DMB) */
+         //*p++ = 0xEE070F95; /* mcr 15,0,r0,c7,c5,4  (ISB) */
+         *p++ = 0xF57FF04F; /* DSB sy */
+         *p++ = 0xF57FF05F; /* DMB sy */
+         *p++ = 0xF57FF06F; /* ISB */
+         goto done;
+      }
+      case ARMin_CLREX: {
+         *p++ = 0xF57FF01F; /* clrex */
+         goto done;
+      }
+
+      case ARMin_NLdStQ: {
+         UInt regD = qregEnc(i->ARMin.NLdStQ.dQ) << 1;
+         UInt regN, regM;
+         UInt D = regD >> 4;
+         UInt bL = i->ARMin.NLdStQ.isLoad ? 1 : 0;
+         UInt insn;
+         vassert(hregClass(i->ARMin.NLdStQ.dQ) == HRcVec128);
+         regD &= 0xF;
+         if (i->ARMin.NLdStQ.amode->tag == ARMamN_RR) {
+            regN = iregEnc(i->ARMin.NLdStQ.amode->ARMamN.RR.rN);
+            regM = iregEnc(i->ARMin.NLdStQ.amode->ARMamN.RR.rM);
+         } else {
+            regN = iregEnc(i->ARMin.NLdStQ.amode->ARMamN.R.rN);
+            regM = 15;
+         }
+         insn = XXXXXXXX(0xF, X0100, BITS4(0, D, bL, 0),
+                              regN, regD, X1010, X1000, regM);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NLdStD: {
+         UInt regD = dregEnc(i->ARMin.NLdStD.dD);
+         UInt regN, regM;
+         UInt D = regD >> 4;
+         UInt bL = i->ARMin.NLdStD.isLoad ? 1 : 0;
+         UInt insn;
+         vassert(hregClass(i->ARMin.NLdStD.dD) == HRcFlt64);
+         regD &= 0xF;
+         if (i->ARMin.NLdStD.amode->tag == ARMamN_RR) {
+            regN = iregEnc(i->ARMin.NLdStD.amode->ARMamN.RR.rN);
+            regM = iregEnc(i->ARMin.NLdStD.amode->ARMamN.RR.rM);
+         } else {
+            regN = iregEnc(i->ARMin.NLdStD.amode->ARMamN.R.rN);
+            regM = 15;
+         }
+         insn = XXXXXXXX(0xF, X0100, BITS4(0, D, bL, 0),
+                              regN, regD, X0111, X1000, regM);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NUnaryS: {
+         UInt Q = i->ARMin.NUnaryS.Q ? 1 : 0;
+         UInt regD, D;
+         UInt regM, M;
+         UInt size = i->ARMin.NUnaryS.size;
+         UInt insn;
+         UInt opc, opc1, opc2;
+         switch (i->ARMin.NUnaryS.op) {
+	    case ARMneon_VDUP:
+               if (i->ARMin.NUnaryS.size >= 16)
+                  goto bad;
+               if (i->ARMin.NUnaryS.dst->tag != ARMNRS_Reg)
+                  goto bad;
+               if (i->ARMin.NUnaryS.src->tag != ARMNRS_Scalar)
+                  goto bad;
+               regD = (hregClass(i->ARMin.NUnaryS.dst->reg) == HRcVec128)
+                        ? (qregEnc(i->ARMin.NUnaryS.dst->reg) << 1)
+                        : dregEnc(i->ARMin.NUnaryS.dst->reg);
+               regM = (hregClass(i->ARMin.NUnaryS.src->reg) == HRcVec128)
+                        ? (qregEnc(i->ARMin.NUnaryS.src->reg) << 1)
+                        : dregEnc(i->ARMin.NUnaryS.src->reg);
+               D = regD >> 4;
+               M = regM >> 4;
+               regD &= 0xf;
+               regM &= 0xf;
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1),
+                               (i->ARMin.NUnaryS.size & 0xf), regD,
+                               X1100, BITS4(0,Q,M,0), regM);
+               *p++ = insn;
+               goto done; 
+            case ARMneon_SETELEM:
+               regD = Q ? (qregEnc(i->ARMin.NUnaryS.dst->reg) << 1) :
+                                dregEnc(i->ARMin.NUnaryS.dst->reg);
+               regM = iregEnc(i->ARMin.NUnaryS.src->reg);
+               M = regM >> 4;
+               D = regD >> 4;
+               regM &= 0xF;
+               regD &= 0xF;
+               if (i->ARMin.NUnaryS.dst->tag != ARMNRS_Scalar)
+                  goto bad;
+               switch (size) {
+                  case 0:
+                     if (i->ARMin.NUnaryS.dst->index > 7)
+                        goto bad;
+                     opc = X1000 | i->ARMin.NUnaryS.dst->index;
+                     break;
+                  case 1:
+                     if (i->ARMin.NUnaryS.dst->index > 3)
+                        goto bad;
+                     opc = X0001 | (i->ARMin.NUnaryS.dst->index << 1);
+                     break;
+                  case 2:
+                     if (i->ARMin.NUnaryS.dst->index > 1)
+                        goto bad;
+                     opc = X0000 | (i->ARMin.NUnaryS.dst->index << 2);
+                     break;
+                  default:
+                     goto bad;
+               }
+               opc1 = (opc >> 2) & 3;
+               opc2 = opc & 3;
+               insn = XXXXXXXX(0xE, X1110, BITS4(0,(opc1 >> 1),(opc1 & 1),0),
+                               regD, regM, X1011,
+                               BITS4(D,(opc2 >> 1),(opc2 & 1),1), X0000);
+               *p++ = insn;
+               goto done;
+            case ARMneon_GETELEMU:
+               regM = Q ? (qregEnc(i->ARMin.NUnaryS.src->reg) << 1) :
+                                dregEnc(i->ARMin.NUnaryS.src->reg);
+               regD = iregEnc(i->ARMin.NUnaryS.dst->reg);
+               M = regM >> 4;
+               D = regD >> 4;
+               regM &= 0xF;
+               regD &= 0xF;
+               if (i->ARMin.NUnaryS.src->tag != ARMNRS_Scalar)
+                  goto bad;
+               switch (size) {
+                  case 0:
+                     if (Q && i->ARMin.NUnaryS.src->index > 7) {
+                        regM++;
+                        i->ARMin.NUnaryS.src->index -= 8;
+                     }
+                     if (i->ARMin.NUnaryS.src->index > 7)
+                        goto bad;
+                     opc = X1000 | i->ARMin.NUnaryS.src->index;
+                     break;
+                  case 1:
+                     if (Q && i->ARMin.NUnaryS.src->index > 3) {
+                        regM++;
+                        i->ARMin.NUnaryS.src->index -= 4;
+                     }
+                     if (i->ARMin.NUnaryS.src->index > 3)
+                        goto bad;
+                     opc = X0001 | (i->ARMin.NUnaryS.src->index << 1);
+                     break;
+                  case 2:
+                     goto bad;
+                  default:
+                     goto bad;
+               }
+               opc1 = (opc >> 2) & 3;
+               opc2 = opc & 3;
+               insn = XXXXXXXX(0xE, X1110, BITS4(1,(opc1 >> 1),(opc1 & 1),1),
+                               regM, regD, X1011,
+                               BITS4(M,(opc2 >> 1),(opc2 & 1),1), X0000);
+               *p++ = insn;
+               goto done;
+            case ARMneon_GETELEMS:
+               regM = Q ? (qregEnc(i->ARMin.NUnaryS.src->reg) << 1) :
+                                dregEnc(i->ARMin.NUnaryS.src->reg);
+               regD = iregEnc(i->ARMin.NUnaryS.dst->reg);
+               M = regM >> 4;
+               D = regD >> 4;
+               regM &= 0xF;
+               regD &= 0xF;
+               if (i->ARMin.NUnaryS.src->tag != ARMNRS_Scalar)
+                  goto bad;
+               switch (size) {
+                  case 0:
+                     if (Q && i->ARMin.NUnaryS.src->index > 7) {
+                        regM++;
+                        i->ARMin.NUnaryS.src->index -= 8;
+                     }
+                     if (i->ARMin.NUnaryS.src->index > 7)
+                        goto bad;
+                     opc = X1000 | i->ARMin.NUnaryS.src->index;
+                     break;
+                  case 1:
+                     if (Q && i->ARMin.NUnaryS.src->index > 3) {
+                        regM++;
+                        i->ARMin.NUnaryS.src->index -= 4;
+                     }
+                     if (i->ARMin.NUnaryS.src->index > 3)
+                        goto bad;
+                     opc = X0001 | (i->ARMin.NUnaryS.src->index << 1);
+                     break;
+                  case 2:
+                     if (Q && i->ARMin.NUnaryS.src->index > 1) {
+                        regM++;
+                        i->ARMin.NUnaryS.src->index -= 2;
+                     }
+                     if (i->ARMin.NUnaryS.src->index > 1)
+                        goto bad;
+                     opc = X0000 | (i->ARMin.NUnaryS.src->index << 2);
+                     break;
+                  default:
+                     goto bad;
+               }
+               opc1 = (opc >> 2) & 3;
+               opc2 = opc & 3;
+               insn = XXXXXXXX(0xE, X1110, BITS4(0,(opc1 >> 1),(opc1 & 1),1),
+                               regM, regD, X1011,
+                               BITS4(M,(opc2 >> 1),(opc2 & 1),1), X0000);
+               *p++ = insn;
+               goto done;
+            default:
+               goto bad;
+         }
+      }
+      case ARMin_NUnary: {
+         UInt Q = i->ARMin.NUnary.Q ? 1 : 0;
+         UInt regD = (hregClass(i->ARMin.NUnary.dst) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NUnary.dst) << 1)
+                       : dregEnc(i->ARMin.NUnary.dst);
+         UInt regM, M;
+         UInt D = regD >> 4;
+         UInt sz1 = i->ARMin.NUnary.size >> 1;
+         UInt sz2 = i->ARMin.NUnary.size & 1;
+         UInt sz = i->ARMin.NUnary.size;
+         UInt insn;
+         UInt F = 0; /* TODO: floating point EQZ ??? */
+         if (i->ARMin.NUnary.op != ARMneon_DUP) {
+            regM = (hregClass(i->ARMin.NUnary.src) == HRcVec128) 
+                     ? (qregEnc(i->ARMin.NUnary.src) << 1)
+                     : dregEnc(i->ARMin.NUnary.src);
+            M = regM >> 4;
+         } else {
+            regM = iregEnc(i->ARMin.NUnary.src);
+            M = regM >> 4;
+         }
+         regD &= 0xF;
+         regM &= 0xF;
+         switch (i->ARMin.NUnary.op) {
+            case ARMneon_COPY: /* VMOV reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,1,0), regM, regD, X0001,
+                               BITS4(M,Q,M,1), regM);
+               break;
+            case ARMneon_COPYN: /* VMOVN regD, regQ */
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,1,0),
+                               regD, X0010, BITS4(0,0,M,0), regM);
+               break;
+            case ARMneon_COPYQNSS: /* VQMOVN regD, regQ */
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,1,0),
+                               regD, X0010, BITS4(1,0,M,0), regM);
+               break;
+            case ARMneon_COPYQNUS: /* VQMOVUN regD, regQ */
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,1,0),
+                               regD, X0010, BITS4(0,1,M,0), regM);
+               break;
+            case ARMneon_COPYQNUU: /* VQMOVN regD, regQ */
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,1,0),
+                               regD, X0010, BITS4(1,1,M,0), regM);
+               break;
+            case ARMneon_COPYLS: /* VMOVL regQ, regD */
+               if (sz >= 3)
+                  goto bad;
+               insn = XXXXXXXX(0xF, X0010,
+                               BITS4(1,D,(sz == 2) ? 1 : 0,(sz == 1) ? 1 : 0),
+                               BITS4((sz == 0) ? 1 : 0,0,0,0),
+                               regD, X1010, BITS4(0,0,M,1), regM);
+               break;
+            case ARMneon_COPYLU: /* VMOVL regQ, regD */
+               if (sz >= 3)
+                  goto bad;
+               insn = XXXXXXXX(0xF, X0011,
+                               BITS4(1,D,(sz == 2) ? 1 : 0,(sz == 1) ? 1 : 0),
+                               BITS4((sz == 0) ? 1 : 0,0,0,0),
+                               regD, X1010, BITS4(0,0,M,1), regM);
+               break;
+            case ARMneon_NOT: /* VMVN reg, reg*/
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X0000, regD, X0101,
+                               BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_EQZ:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,1),
+                               regD, BITS4(0,F,0,1), BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_CNT:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X0000, regD, X0101,
+                               BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_CLZ:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,0),
+                               regD, X0100, BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_CLS:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,0),
+                               regD, X0100, BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_ABS:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,1),
+                               regD, X0011, BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_DUP:
+               sz1 = i->ARMin.NUnary.size == 0 ? 1 : 0;
+               sz2 = i->ARMin.NUnary.size == 1 ? 1 : 0;
+               vassert(sz1 + sz2 < 2);
+               insn = XXXXXXXX(0xE, X1110, BITS4(1, sz1, Q, 0), regD, regM,
+                               X1011, BITS4(D,0,sz2,1), X0000);
+               break;
+            case ARMneon_REV16:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,0),
+                               regD, BITS4(0,0,0,1), BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_REV32:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,0),
+                               regD, BITS4(0,0,0,0), BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_REV64:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,0),
+                               regD, BITS4(0,0,0,0), BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_PADDLU:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,0),
+                               regD, X0010, BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_PADDLS:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,0,0),
+                               regD, X0010, BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_VQSHLNUU:
+               insn = XXXXXXXX(0xF, X0011,
+                               (1 << 3) | (D << 2) | ((sz >> 4) & 3),
+                               sz & 0xf, regD, X0111,
+                               BITS4(sz >> 6,Q,M,1), regM);
+               break;
+            case ARMneon_VQSHLNSS:
+               insn = XXXXXXXX(0xF, X0010,
+                               (1 << 3) | (D << 2) | ((sz >> 4) & 3),
+                               sz & 0xf, regD, X0111,
+                               BITS4(sz >> 6,Q,M,1), regM);
+               break;
+            case ARMneon_VQSHLNUS:
+               insn = XXXXXXXX(0xF, X0011,
+                               (1 << 3) | (D << 2) | ((sz >> 4) & 3),
+                               sz & 0xf, regD, X0110,
+                               BITS4(sz >> 6,Q,M,1), regM);
+               break;
+            case ARMneon_VCVTFtoS:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0111,
+                               BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_VCVTFtoU:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0111,
+                               BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_VCVTStoF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0110,
+                               BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_VCVTUtoF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0110,
+                               BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_VCVTFtoFixedU:
+               sz1 = (sz >> 5) & 1;
+               sz2 = (sz >> 4) & 1;
+               sz &= 0xf;
+               insn = XXXXXXXX(0xF, X0011,
+                               BITS4(1,D,sz1,sz2), sz, regD, X1111,
+                               BITS4(0,Q,M,1), regM);
+               break;
+            case ARMneon_VCVTFtoFixedS:
+               sz1 = (sz >> 5) & 1;
+               sz2 = (sz >> 4) & 1;
+               sz &= 0xf;
+               insn = XXXXXXXX(0xF, X0010,
+                               BITS4(1,D,sz1,sz2), sz, regD, X1111,
+                               BITS4(0,Q,M,1), regM);
+               break;
+            case ARMneon_VCVTFixedUtoF:
+               sz1 = (sz >> 5) & 1;
+               sz2 = (sz >> 4) & 1;
+               sz &= 0xf;
+               insn = XXXXXXXX(0xF, X0011,
+                               BITS4(1,D,sz1,sz2), sz, regD, X1110,
+                               BITS4(0,Q,M,1), regM);
+               break;
+            case ARMneon_VCVTFixedStoF:
+               sz1 = (sz >> 5) & 1;
+               sz2 = (sz >> 4) & 1;
+               sz &= 0xf;
+               insn = XXXXXXXX(0xF, X0010,
+                               BITS4(1,D,sz1,sz2), sz, regD, X1110,
+                               BITS4(0,Q,M,1), regM);
+               break;
+            case ARMneon_VCVTF32toF16:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X0110, regD, X0110,
+                               BITS4(0,0,M,0), regM);
+               break;
+            case ARMneon_VCVTF16toF32:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X0110, regD, X0111,
+                               BITS4(0,0,M,0), regM);
+               break;
+            case ARMneon_VRECIP:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0100,
+                               BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_VRECIPF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0101,
+                               BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_VABSFP:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1001, regD, X0111,
+                               BITS4(0,Q,M,0), regM);
+               break;
+            case ARMneon_VRSQRTEFP:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0101,
+                               BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_VRSQRTE:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1011, regD, X0100,
+                               BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_VNEGF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), X1001, regD, X0111,
+                               BITS4(1,Q,M,0), regM);
+               break;
+
+            default:
+               goto bad;
+         }
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NDual: {
+         UInt Q = i->ARMin.NDual.Q ? 1 : 0;
+         UInt regD = (hregClass(i->ARMin.NDual.arg1) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NDual.arg1) << 1)
+                       : dregEnc(i->ARMin.NDual.arg1);
+         UInt regM = (hregClass(i->ARMin.NDual.arg2) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NDual.arg2) << 1)
+                       : dregEnc(i->ARMin.NDual.arg2);
+         UInt D = regD >> 4;
+         UInt M = regM >> 4;
+         UInt sz1 = i->ARMin.NDual.size >> 1;
+         UInt sz2 = i->ARMin.NDual.size & 1;
+         UInt insn;
+         regD &= 0xF;
+         regM &= 0xF;
+         switch (i->ARMin.NDual.op) {
+            case ARMneon_TRN: /* VTRN reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,1,0),
+                               regD, X0000, BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_ZIP: /* VZIP reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,1,0),
+                               regD, X0001, BITS4(1,Q,M,0), regM);
+               break;
+            case ARMneon_UZP: /* VUZP reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), BITS4(sz1,sz2,1,0),
+                               regD, X0001, BITS4(0,Q,M,0), regM);
+               break;
+            default:
+               goto bad;
+         }
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NBinary: {
+         UInt Q = i->ARMin.NBinary.Q ? 1 : 0;
+         UInt regD = (hregClass(i->ARMin.NBinary.dst) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NBinary.dst) << 1)
+                       : dregEnc(i->ARMin.NBinary.dst);
+         UInt regN = (hregClass(i->ARMin.NBinary.argL) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NBinary.argL) << 1)
+                       : dregEnc(i->ARMin.NBinary.argL);
+         UInt regM = (hregClass(i->ARMin.NBinary.argR) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NBinary.argR) << 1)
+                       : dregEnc(i->ARMin.NBinary.argR);
+         UInt sz1 = i->ARMin.NBinary.size >> 1;
+         UInt sz2 = i->ARMin.NBinary.size & 1;
+         UInt D = regD >> 4;
+         UInt N = regN >> 4;
+         UInt M = regM >> 4;
+         UInt insn;
+         regD &= 0xF;
+         regM &= 0xF;
+         regN &= 0xF;
+         switch (i->ARMin.NBinary.op) {
+            case ARMneon_VAND: /* VAND reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,0,0), regN, regD, X0001,
+                               BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VORR: /* VORR reg, reg, reg*/
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,1,0), regN, regD, X0001,
+                               BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VXOR: /* VEOR reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,0,0), regN, regD, X0001,
+                               BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VADD: /* VADD reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1000, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VSUB: /* VSUB reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1000, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VMINU: /* VMIN.Uxx reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0110, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VMINS: /* VMIN.Sxx reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0110, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VMAXU: /* VMAX.Uxx reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0110, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VMAXS: /* VMAX.Sxx reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0110, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VRHADDS: /* VRHADD.Sxx reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0001, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VRHADDU: /* VRHADD.Uxx reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0001, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VQADDU: /* VQADD unsigned reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0000, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VQADDS: /* VQADD signed reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0000, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VQSUBU: /* VQSUB unsigned reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0010, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VQSUBS: /* VQSUB signed reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0010, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VCGTU: /* VCGT unsigned reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0011, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VCGTS: /* VCGT signed reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0011, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VCGEU: /* VCGE unsigned reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0011, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VCGES: /* VCGE signed reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0011, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VCEQ: /* VCEQ reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1000, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VEXT: /* VEXT.8 reg, reg, #imm4*/
+               if (i->ARMin.NBinary.size >= 16)
+                  goto bad;
+               insn = XXXXXXXX(0xF, X0010, BITS4(1,D,1,1), regN, regD,
+                               i->ARMin.NBinary.size & 0xf, BITS4(N,Q,M,0),
+                               regM);
+               break;
+            case ARMneon_VMUL:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1001, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VMULLU:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,sz1,sz2), regN, regD,
+                               X1100, BITS4(N,0,M,0), regM);
+               break;
+            case ARMneon_VMULLS:
+               insn = XXXXXXXX(0xF, X0010, BITS4(1,D,sz1,sz2), regN, regD,
+                               X1100, BITS4(N,0,M,0), regM);
+               break;
+            case ARMneon_VMULP:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1001, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VMULFP:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,0,0), regN, regD,
+                               X1101, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VMULLP:
+               insn = XXXXXXXX(0xF, X0010, BITS4(1,D,sz1,sz2), regN, regD,
+                               X1110, BITS4(N,0,M,0), regM);
+               break;
+            case ARMneon_VQDMULH:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1011, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VQRDMULH:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1011, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VQDMULL:
+               insn = XXXXXXXX(0xF, X0010, BITS4(1,D,sz1,sz2), regN, regD,
+                               X1101, BITS4(N,0,M,0), regM);
+               break;
+            case ARMneon_VTBL:
+               insn = XXXXXXXX(0xF, X0011, BITS4(1,D,1,1), regN, regD,
+                               X1000, BITS4(N,0,M,0), regM);
+               break;
+            case ARMneon_VPADD:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1011, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VPADDFP:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,0,0), regN, regD,
+                               X1101, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VPMINU:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1010, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VPMINS:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1010, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VPMAXU:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1010, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VPMAXS:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X1010, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VADDFP: /* VADD reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,0,0), regN, regD,
+                               X1101, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VSUBFP: /* VADD reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,1,0), regN, regD,
+                               X1101, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VABDFP: /* VABD reg, reg, reg */
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,1,0), regN, regD,
+                               X1101, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VMINF:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,1,0), regN, regD,
+                               X1111, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VMAXF:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,0,0), regN, regD,
+                               X1111, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VPMINF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,1,0), regN, regD,
+                               X1111, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VPMAXF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,0,0), regN, regD,
+                               X1111, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VRECPS:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,0,0), regN, regD, X1111,
+                               BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VCGTF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,1,0), regN, regD, X1110,
+                               BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VCGEF:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,0,0), regN, regD, X1110,
+                               BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VCEQF:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,0,0), regN, regD, X1110,
+                               BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VRSQRTS:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,1,0), regN, regD, X1111,
+                               BITS4(N,Q,M,1), regM);
+               break;
+            default:
+               goto bad;
+         }
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NShift: {
+         UInt Q = i->ARMin.NShift.Q ? 1 : 0;
+         UInt regD = (hregClass(i->ARMin.NShift.dst) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NShift.dst) << 1)
+                       : dregEnc(i->ARMin.NShift.dst);
+         UInt regM = (hregClass(i->ARMin.NShift.argL) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NShift.argL) << 1)
+                       : dregEnc(i->ARMin.NShift.argL);
+         UInt regN = (hregClass(i->ARMin.NShift.argR) == HRcVec128)
+                       ? (qregEnc(i->ARMin.NShift.argR) << 1)
+                       : dregEnc(i->ARMin.NShift.argR);
+         UInt sz1 = i->ARMin.NShift.size >> 1;
+         UInt sz2 = i->ARMin.NShift.size & 1;
+         UInt D = regD >> 4;
+         UInt N = regN >> 4;
+         UInt M = regM >> 4;
+         UInt insn;
+         regD &= 0xF;
+         regM &= 0xF;
+         regN &= 0xF;
+         switch (i->ARMin.NShift.op) {
+            case ARMneon_VSHL:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0100, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VSAL:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0100, BITS4(N,Q,M,0), regM);
+               break;
+            case ARMneon_VQSHL:
+               insn = XXXXXXXX(0xF, X0011, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0100, BITS4(N,Q,M,1), regM);
+               break;
+            case ARMneon_VQSAL:
+               insn = XXXXXXXX(0xF, X0010, BITS4(0,D,sz1,sz2), regN, regD,
+                               X0100, BITS4(N,Q,M,1), regM);
+               break;
+            default:
+               goto bad;
+         }
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NShl64: {
+         HReg regDreg = i->ARMin.NShl64.dst;
+         HReg regMreg = i->ARMin.NShl64.src;
+         UInt amt     = i->ARMin.NShl64.amt;
+         vassert(amt >= 1 && amt <= 63);
+         vassert(hregClass(regDreg) == HRcFlt64);
+         vassert(hregClass(regMreg) == HRcFlt64);
+         UInt regD = dregEnc(regDreg);
+         UInt regM = dregEnc(regMreg);
+         UInt D    = (regD >> 4) & 1;
+         UInt Vd   = regD & 0xF;
+         UInt L    = 1;
+         UInt Q    = 0; /* always 64-bit */
+         UInt M    = (regM >> 4) & 1;
+         UInt Vm   = regM & 0xF;
+         UInt insn = XXXXXXXX(X1111,X0010, BITS4(1,D,(amt>>5)&1,(amt>>4)&1),
+                              amt & 0xF, Vd, X0101, BITS4(L,Q,M,1), Vm);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NeonImm: {
+         UInt Q = (hregClass(i->ARMin.NeonImm.dst) == HRcVec128) ? 1 : 0;
+         UInt regD = Q ? (qregEnc(i->ARMin.NeonImm.dst) << 1) :
+                          dregEnc(i->ARMin.NeonImm.dst);
+         UInt D = regD >> 4;
+         UInt imm = i->ARMin.NeonImm.imm->imm8;
+         UInt tp = i->ARMin.NeonImm.imm->type;
+         UInt j = imm >> 7;
+         UInt imm3 = (imm >> 4) & 0x7;
+         UInt imm4 = imm & 0xF;
+         UInt cmode, op;
+         UInt insn;
+         regD &= 0xF;
+         if (tp == 9)
+            op = 1;
+         else
+            op = 0;
+         switch (tp) {
+            case 0:
+            case 1:
+            case 2:
+            case 3:
+            case 4:
+            case 5:
+               cmode = tp << 1;
+               break;
+            case 9:
+            case 6:
+               cmode = 14;
+               break;
+            case 7:
+               cmode = 12;
+               break;
+            case 8:
+               cmode = 13;
+               break;
+            case 10:
+               cmode = 15;
+               break;
+            default:
+               vpanic("ARMin_NeonImm");
+
+         }
+         insn = XXXXXXXX(0xF, BITS4(0,0,1,j), BITS4(1,D,0,0), imm3, regD,
+                         cmode, BITS4(0,Q,op,1), imm4);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_NCMovQ: {
+         UInt cc = (UInt)i->ARMin.NCMovQ.cond;
+         UInt qM = qregEnc(i->ARMin.NCMovQ.src) << 1;
+         UInt qD = qregEnc(i->ARMin.NCMovQ.dst) << 1;
+         UInt vM = qM & 0xF;
+         UInt vD = qD & 0xF;
+         UInt M  = (qM >> 4) & 1;
+         UInt D  = (qD >> 4) & 1;
+         vassert(cc < 16 && cc != ARMcc_AL && cc != ARMcc_NV);
+         /* b!cc here+8: !cc A00 0000 */
+         UInt insn = XXXXXXXX(cc ^ 1, 0xA, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0);
+         *p++ = insn;
+         /* vmov qD, qM */
+         insn = XXXXXXXX(0xF, 0x2, BITS4(0,D,1,0),
+                         vM, vD, BITS4(0,0,0,1), BITS4(M,1,M,1), vM);
+         *p++ = insn;
+         goto done;
+      }
+      case ARMin_Add32: {
+         UInt regD = iregEnc(i->ARMin.Add32.rD);
+         UInt regN = iregEnc(i->ARMin.Add32.rN);
+         UInt imm32 = i->ARMin.Add32.imm32;
+         vassert(regD != regN);
+         /* MOV regD, imm32 */
+         p = imm32_to_ireg((UInt *)p, regD, imm32);
+         /* ADD regD, regN, regD */
+         UInt insn = XXXXXXXX(0xE, 0, X1000, regN, regD, 0, 0, regD);
+         *p++ = insn;
+         goto done;
+      }
+
+      case ARMin_EvCheck: {
+         /* We generate:
+               ldr  r12, [r8 + #4]   4 == offsetof(host_EvC_COUNTER)
+               subs r12, r12, #1  (A1)
+               str  r12, [r8 + #4]   4 == offsetof(host_EvC_COUNTER)
+               bpl  nofail
+               ldr  r12, [r8 + #0]   0 == offsetof(host_EvC_FAILADDR)
+               bx   r12
+              nofail:
+         */
+         UInt* p0 = p;
+         p = do_load_or_store32(p, True/*isLoad*/, /*r*/12,
+                                i->ARMin.EvCheck.amCounter);
+         *p++ = 0xE25CC001; /* subs r12, r12, #1 */
+         p = do_load_or_store32(p, False/*!isLoad*/, /*r*/12,
+                                i->ARMin.EvCheck.amCounter);
+         *p++ = 0x5A000001; /* bpl nofail */
+         p = do_load_or_store32(p, True/*isLoad*/, /*r*/12,
+                                i->ARMin.EvCheck.amFailAddr);
+         *p++ = 0xE12FFF1C; /* bx r12 */
+         /* nofail: */
+
+         /* Crosscheck */
+         vassert(evCheckSzB_ARM() == (UChar*)p - (UChar*)p0);
+         goto done;
+      }
+
+      case ARMin_ProfInc: {
+         /* We generate:
+              (ctrP is unknown now, so use 0x65556555 in the
+              expectation that a later call to LibVEX_patchProfCtr
+              will be used to fill in the immediate fields once the
+              right value is known.)
+            movw r12, lo16(0x65556555)
+            movt r12, lo16(0x65556555)
+            ldr  r11, [r12]
+            adds r11, r11, #1
+            str  r11, [r12]
+            ldr  r11, [r12+4]
+            adc  r11, r11, #0
+            str  r11, [r12+4]
+         */
+         p = imm32_to_ireg_EXACTLY2(p, /*r*/12, 0x65556555);
+         *p++ = 0xE59CB000;
+         *p++ = 0xE29BB001;
+         *p++ = 0xE58CB000;
+         *p++ = 0xE59CB004;
+         *p++ = 0xE2ABB000;
+         *p++ = 0xE58CB004;
+         /* Tell the caller .. */
+         vassert(!(*is_profInc));
+         *is_profInc = True;
+         goto done;
+      }
+
+      /* ... */
+      default: 
+         goto bad;
+    }
+
+  bad:
+   ppARMInstr(i);
+   vpanic("emit_ARMInstr");
+   /*NOTREACHED*/
+
+  done:
+   vassert(((UChar*)p) - &buf[0] <= 32);
+   return ((UChar*)p) - &buf[0];
+}
+
+
+/* How big is an event check?  See case for ARMin_EvCheck in
+   emit_ARMInstr just above.  That crosschecks what this returns, so
+   we can tell if we're inconsistent. */
+Int evCheckSzB_ARM (void)
+{
+   return 24;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+                                 void* place_to_chain,
+                                 const void* disp_cp_chain_me_EXPECTED,
+                                 const void* place_to_jump_to )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is:
+        movw r12, lo16(disp_cp_chain_me_to_EXPECTED)
+        movt r12, hi16(disp_cp_chain_me_to_EXPECTED)
+        blx  r12
+      viz
+        <8 bytes generated by imm32_to_ireg_EXACTLY2>
+        E1 2F FF 3C
+   */
+   UInt* p = (UInt*)place_to_chain;
+   vassert(0 == (3 & (HWord)p));
+   vassert(is_imm32_to_ireg_EXACTLY2(
+              p, /*r*/12, (UInt)(Addr)disp_cp_chain_me_EXPECTED));
+   vassert(p[2] == 0xE12FFF3C);
+   /* And what we want to change it to is either:
+        (general case)
+          movw r12, lo16(place_to_jump_to)
+          movt r12, hi16(place_to_jump_to)
+          bx   r12
+        viz
+          <8 bytes generated by imm32_to_ireg_EXACTLY2>
+          E1 2F FF 1C
+      ---OR---
+        in the case where the displacement falls within 26 bits
+          b disp24; undef; undef
+        viz
+          EA <3 bytes == disp24>
+          FF 00 00 00
+          FF 00 00 00
+
+      In both cases the replacement has the same length as the original.
+      To remain sane & verifiable,
+      (1) limit the displacement for the short form to 
+          (say) +/- 30 million, so as to avoid wraparound
+          off-by-ones
+      (2) even if the short form is applicable, once every (say)
+          1024 times use the long form anyway, so as to maintain
+          verifiability
+   */
+
+   /* This is the delta we need to put into a B insn.  It's relative
+      to the start of the next-but-one insn, hence the -8.  */
+   Long delta   = (Long)((const UChar *)place_to_jump_to - (const UChar*)p) - 8;
+   Bool shortOK = delta >= -30*1000*1000 && delta < 30*1000*1000;
+   vassert(0 == (delta & (Long)3));
+
+   static UInt shortCTR = 0; /* DO NOT MAKE NON-STATIC */
+   if (shortOK) {
+      shortCTR++; // thread safety bleh
+      if (0 == (shortCTR & 0x3FF)) {
+         shortOK = False;
+         if (0)
+            vex_printf("QQQ chainXDirect_ARM: shortCTR = %u, "
+                       "using long form\n", shortCTR);
+      }
+   }
+
+   /* And make the modifications. */
+   if (shortOK) {
+      Int simm24 = (Int)(delta >> 2);
+      vassert(simm24 == ((simm24 << 8) >> 8));
+      p[0] = 0xEA000000 | (simm24 & 0x00FFFFFF);
+      p[1] = 0xFF000000;
+      p[2] = 0xFF000000;
+   } else {
+      (void)imm32_to_ireg_EXACTLY2(
+               p, /*r*/12, (UInt)(Addr)place_to_jump_to);
+      p[2] = 0xE12FFF1C;
+   }
+
+   VexInvalRange vir = {(HWord)p, 12};
+   return vir;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+                                   void* place_to_unchain,
+                                   const void* place_to_jump_to_EXPECTED,
+                                   const void* disp_cp_chain_me )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is:
+        (general case)
+          movw r12, lo16(place_to_jump_to_EXPECTED)
+          movt r12, lo16(place_to_jump_to_EXPECTED)
+          bx   r12
+        viz
+          <8 bytes generated by imm32_to_ireg_EXACTLY2>
+          E1 2F FF 1C
+      ---OR---
+        in the case where the displacement falls within 26 bits
+          b disp24; undef; undef
+        viz
+          EA <3 bytes == disp24>
+          FF 00 00 00
+          FF 00 00 00
+   */
+   UInt* p = (UInt*)place_to_unchain;
+   vassert(0 == (3 & (HWord)p));
+
+   Bool valid = False;
+   if (is_imm32_to_ireg_EXACTLY2(
+          p, /*r*/12, (UInt)(Addr)place_to_jump_to_EXPECTED)
+       && p[2] == 0xE12FFF1C) {
+      valid = True; /* it's the long form */
+      if (0)
+         vex_printf("QQQ unchainXDirect_ARM: found long form\n");
+   } else
+   if ((p[0] >> 24) == 0xEA && p[1] == 0xFF000000 && p[2] == 0xFF000000) {
+      /* It's the short form.  Check the displacement is right. */
+      Int simm24 = p[0] & 0x00FFFFFF;
+      simm24 <<= 8; simm24 >>= 8;
+      if ((UChar*)p + (simm24 << 2) + 8 == place_to_jump_to_EXPECTED) {
+         valid = True;
+         if (0)
+            vex_printf("QQQ unchainXDirect_ARM: found short form\n");
+      }
+   }
+   vassert(valid);
+
+   /* And what we want to change it to is:
+        movw r12, lo16(disp_cp_chain_me)
+        movt r12, hi16(disp_cp_chain_me)
+        blx  r12
+      viz
+        <8 bytes generated by imm32_to_ireg_EXACTLY2>
+        E1 2F FF 3C
+   */
+   (void)imm32_to_ireg_EXACTLY2(
+            p, /*r*/12, (UInt)(Addr)disp_cp_chain_me);
+   p[2] = 0xE12FFF3C;
+   VexInvalRange vir = {(HWord)p, 12};
+   return vir;
+}
+
+
+/* Patch the counter address into a profile inc point, as previously
+   created by the ARMin_ProfInc case for emit_ARMInstr. */
+VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+                                 void*  place_to_patch,
+                                 const ULong* location_of_counter )
+{
+   vassert(endness_host == VexEndnessLE);
+   vassert(sizeof(ULong*) == 4);
+   UInt* p = (UInt*)place_to_patch;
+   vassert(0 == (3 & (HWord)p));
+   vassert(is_imm32_to_ireg_EXACTLY2(p, /*r*/12, 0x65556555));
+   vassert(p[2] == 0xE59CB000);
+   vassert(p[3] == 0xE29BB001);
+   vassert(p[4] == 0xE58CB000);
+   vassert(p[5] == 0xE59CB004);
+   vassert(p[6] == 0xE2ABB000);
+   vassert(p[7] == 0xE58CB004);
+   imm32_to_ireg_EXACTLY2(p, /*r*/12, (UInt)(Addr)location_of_counter);
+   VexInvalRange vir = {(HWord)p, 8};
+   return vir;
+}
+
+
+#undef BITS4
+#undef X0000
+#undef X0001
+#undef X0010
+#undef X0011
+#undef X0100
+#undef X0101
+#undef X0110
+#undef X0111
+#undef X1000
+#undef X1001
+#undef X1010
+#undef X1011
+#undef X1100
+#undef X1101
+#undef X1110
+#undef X1111
+#undef XXXXX___
+#undef XXXXXX__
+#undef XXX___XX
+#undef XXXXX__X
+#undef XXXXXXXX
+#undef XX______
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_arm_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_arm_defs.h b/VEX/priv/host_arm_defs.h
new file mode 100644
index 0000000..f0b172b
--- /dev/null
+++ b/VEX/priv/host_arm_defs.h
@@ -0,0 +1,1082 @@
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_arm_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_HOST_ARM_DEFS_H
+#define __VEX_HOST_ARM_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h"                      // VexArch
+#include "host_generic_regs.h"           // HReg
+
+extern UInt arm_hwcaps;
+
+
+/* --------- Registers. --------- */
+
+#define ST_IN static inline
+ST_IN HReg hregARM_R4  ( void ) { return mkHReg(False, HRcInt32,  4,  0);  }
+ST_IN HReg hregARM_R5  ( void ) { return mkHReg(False, HRcInt32,  5,  1);  }
+ST_IN HReg hregARM_R6  ( void ) { return mkHReg(False, HRcInt32,  6,  2);  }
+ST_IN HReg hregARM_R7  ( void ) { return mkHReg(False, HRcInt32,  7,  3);  }
+ST_IN HReg hregARM_R10 ( void ) { return mkHReg(False, HRcInt32,  10, 4);  }
+ST_IN HReg hregARM_R11 ( void ) { return mkHReg(False, HRcInt32,  11, 5);  }
+
+ST_IN HReg hregARM_R0  ( void ) { return mkHReg(False, HRcInt32,  0,  6);  }
+ST_IN HReg hregARM_R1  ( void ) { return mkHReg(False, HRcInt32,  1,  7);  }
+ST_IN HReg hregARM_R2  ( void ) { return mkHReg(False, HRcInt32,  2,  8);  }
+ST_IN HReg hregARM_R3  ( void ) { return mkHReg(False, HRcInt32,  3,  9);  }
+ST_IN HReg hregARM_R9  ( void ) { return mkHReg(False, HRcInt32,  9,  10); }
+
+ST_IN HReg hregARM_D8  ( void ) { return mkHReg(False, HRcFlt64,  8,  11); }
+ST_IN HReg hregARM_D9  ( void ) { return mkHReg(False, HRcFlt64,  9,  12); }
+ST_IN HReg hregARM_D10 ( void ) { return mkHReg(False, HRcFlt64,  10, 13); }
+ST_IN HReg hregARM_D11 ( void ) { return mkHReg(False, HRcFlt64,  11, 14); }
+ST_IN HReg hregARM_D12 ( void ) { return mkHReg(False, HRcFlt64,  12, 15); }
+
+ST_IN HReg hregARM_S26 ( void ) { return mkHReg(False, HRcFlt32,  26, 16); }
+ST_IN HReg hregARM_S27 ( void ) { return mkHReg(False, HRcFlt32,  27, 17); }
+ST_IN HReg hregARM_S28 ( void ) { return mkHReg(False, HRcFlt32,  28, 18); }
+ST_IN HReg hregARM_S29 ( void ) { return mkHReg(False, HRcFlt32,  29, 19); }
+ST_IN HReg hregARM_S30 ( void ) { return mkHReg(False, HRcFlt32,  30, 20); }
+
+ST_IN HReg hregARM_Q8  ( void ) { return mkHReg(False, HRcVec128, 8,  21); }
+ST_IN HReg hregARM_Q9  ( void ) { return mkHReg(False, HRcVec128, 9,  22); }
+ST_IN HReg hregARM_Q10 ( void ) { return mkHReg(False, HRcVec128, 10, 23); }
+ST_IN HReg hregARM_Q11 ( void ) { return mkHReg(False, HRcVec128, 11, 24); }
+ST_IN HReg hregARM_Q12 ( void ) { return mkHReg(False, HRcVec128, 12, 25); }
+
+ST_IN HReg hregARM_R8  ( void ) { return mkHReg(False, HRcInt32,  8,  26); }
+ST_IN HReg hregARM_R12 ( void ) { return mkHReg(False, HRcInt32,  12, 27); }
+ST_IN HReg hregARM_R13 ( void ) { return mkHReg(False, HRcInt32,  13, 28); }
+ST_IN HReg hregARM_R14 ( void ) { return mkHReg(False, HRcInt32,  14, 29); }
+ST_IN HReg hregARM_R15 ( void ) { return mkHReg(False, HRcInt32,  15, 30); }
+ST_IN HReg hregARM_Q13 ( void ) { return mkHReg(False, HRcVec128, 13, 31); }
+ST_IN HReg hregARM_Q14 ( void ) { return mkHReg(False, HRcVec128, 14, 32); }
+ST_IN HReg hregARM_Q15 ( void ) { return mkHReg(False, HRcVec128, 15, 33); }
+#undef ST_IN
+
+extern void ppHRegARM ( HReg );
+
+/* Number of registers used arg passing in function calls */
+#define ARM_N_ARGREGS 4   /* r0, r1, r2, r3 */
+
+
+/* --------- Condition codes. --------- */
+
+typedef
+   enum {
+      ARMcc_EQ  = 0,  /* equal                          : Z=1 */
+      ARMcc_NE  = 1,  /* not equal                      : Z=0 */
+
+      ARMcc_HS  = 2,  /* >=u (higher or same)           : C=1 */
+      ARMcc_LO  = 3,  /* <u  (lower)                    : C=0 */
+
+      ARMcc_MI  = 4,  /* minus (negative)               : N=1 */
+      ARMcc_PL  = 5,  /* plus (zero or +ve)             : N=0 */
+
+      ARMcc_VS  = 6,  /* overflow                       : V=1 */
+      ARMcc_VC  = 7,  /* no overflow                    : V=0 */
+
+      ARMcc_HI  = 8,  /* >u   (higher)                  : C=1 && Z=0 */
+      ARMcc_LS  = 9,  /* <=u  (lower or same)           : C=0 || Z=1 */
+
+      ARMcc_GE  = 10, /* >=s (signed greater or equal)  : N=V */
+      ARMcc_LT  = 11, /* <s  (signed less than)         : N!=V */
+
+      ARMcc_GT  = 12, /* >s  (signed greater)           : Z=0 && N=V */
+      ARMcc_LE  = 13, /* <=s (signed less or equal)     : Z=1 || N!=V */
+
+      ARMcc_AL  = 14, /* always (unconditional) */
+      ARMcc_NV  = 15  /* never (basically undefined meaning), deprecated */
+   }
+   ARMCondCode;
+
+extern const HChar* showARMCondCode ( ARMCondCode );
+
+
+
+/* --------- Memory address expressions (amodes). --------- */
+
+/* --- Addressing Mode 1 --- */
+typedef
+   enum {
+      ARMam1_RI=1,   /* reg +/- imm12 */
+      ARMam1_RRS     /* reg1 + (reg2 << 0, 1 2 or 3) */
+   }
+   ARMAMode1Tag;
+
+typedef
+   struct {
+      ARMAMode1Tag tag;
+      union {
+         struct {
+            HReg reg;
+            Int  simm13; /* -4095 .. +4095 */
+         } RI;
+         struct {
+            HReg base;
+            HReg index;
+            UInt shift; /* 0, 1 2 or 3 */
+         } RRS;
+      } ARMam1;
+   }
+   ARMAMode1;
+
+extern ARMAMode1* ARMAMode1_RI  ( HReg reg, Int simm13 );
+extern ARMAMode1* ARMAMode1_RRS ( HReg base, HReg index, UInt shift );
+
+extern void ppARMAMode1 ( ARMAMode1* );
+
+
+/* --- Addressing Mode 2 --- */
+typedef
+   enum {
+      ARMam2_RI=3,   /* reg +/- imm8 */
+      ARMam2_RR      /* reg1 + reg2 */
+   }
+   ARMAMode2Tag;
+
+typedef
+   struct {
+      ARMAMode2Tag tag;
+      union {
+         struct {
+            HReg reg;
+            Int  simm9; /* -255 .. 255 */
+         } RI;
+         struct {
+            HReg base;
+            HReg index;
+         } RR;
+      } ARMam2;
+   }
+   ARMAMode2;
+
+extern ARMAMode2* ARMAMode2_RI ( HReg reg, Int simm9 );
+extern ARMAMode2* ARMAMode2_RR ( HReg base, HReg index );
+
+extern void ppARMAMode2 ( ARMAMode2* );
+
+
+/* --- Addressing Mode suitable for VFP --- */
+/* The simm11 is encoded as 8 bits + 1 sign bit,
+   so can only be 0 % 4. */
+typedef
+   struct {
+      HReg reg;
+      Int  simm11; /* -1020, -1016 .. 1016, 1020 */
+   }
+   ARMAModeV;
+
+extern ARMAModeV* mkARMAModeV ( HReg reg, Int simm11 );
+
+extern void ppARMAModeV ( ARMAModeV* );
+
+/* --- Addressing Mode suitable for Neon --- */
+typedef
+   enum {
+      ARMamN_R=5,
+      ARMamN_RR
+      /* ... */
+   }
+   ARMAModeNTag;
+
+typedef
+   struct {
+      ARMAModeNTag tag;
+      union {
+         struct {
+            HReg rN;
+            HReg rM;
+         } RR;
+         struct {
+            HReg rN;
+         } R;
+         /* ... */
+      } ARMamN;
+   }
+   ARMAModeN;
+
+extern ARMAModeN* mkARMAModeN_RR ( HReg, HReg );
+extern ARMAModeN* mkARMAModeN_R ( HReg );
+extern void ppARMAModeN ( ARMAModeN* );
+
+/* --------- Reg or imm-8x4 operands --------- */
+/* a.k.a (a very restricted form of) Shifter Operand,
+   in the ARM parlance. */
+
+typedef
+   enum {
+      ARMri84_I84=7,   /* imm8 `ror` (2 * imm4) */
+      ARMri84_R        /* reg */
+   }
+   ARMRI84Tag;
+
+typedef
+   struct {
+      ARMRI84Tag tag;
+      union {
+         struct {
+            UShort imm8;
+            UShort imm4;
+         } I84;
+         struct {
+            HReg reg;
+         } R;
+      } ARMri84;
+   }
+   ARMRI84;
+
+extern ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 );
+extern ARMRI84* ARMRI84_R   ( HReg );
+
+extern void ppARMRI84 ( ARMRI84* );
+
+
+/* --------- Reg or imm5 operands --------- */
+typedef
+   enum {
+      ARMri5_I5=9,   /* imm5, 1 .. 31 only (no zero!) */
+      ARMri5_R       /* reg */
+   }
+   ARMRI5Tag;
+
+typedef
+   struct {
+      ARMRI5Tag tag;
+      union {
+         struct {
+            UInt imm5;
+         } I5;
+         struct {
+            HReg reg;
+         } R;
+      } ARMri5;
+   }
+   ARMRI5;
+
+extern ARMRI5* ARMRI5_I5 ( UInt imm5 );
+extern ARMRI5* ARMRI5_R  ( HReg );
+
+extern void ppARMRI5 ( ARMRI5* );
+
+/* -------- Neon Immediate operand -------- */
+
+/* imm8 = abcdefgh, B = NOT(b);
+
+type | value (64bit binary)
+-----+-------------------------------------------------------------------------
+   0 | 00000000 00000000 00000000 abcdefgh 00000000 00000000 00000000 abcdefgh
+   1 | 00000000 00000000 abcdefgh 00000000 00000000 00000000 abcdefgh 00000000
+   2 | 00000000 abcdefgh 00000000 00000000 00000000 abcdefgh 00000000 00000000
+   3 | abcdefgh 00000000 00000000 00000000 abcdefgh 00000000 00000000 00000000
+   4 | 00000000 abcdefgh 00000000 abcdefgh 00000000 abcdefgh 00000000 abcdefgh
+   5 | abcdefgh 00000000 abcdefgh 00000000 abcdefgh 00000000 abcdefgh 00000000
+   6 | abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh abcdefgh
+   7 | 00000000 00000000 abcdefgh 11111111 00000000 00000000 abcdefgh 11111111
+   8 | 00000000 abcdefgh 11111111 11111111 00000000 abcdefgh 11111111 11111111
+   9 | aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
+  10 | aBbbbbbc defgh000 00000000 00000000 aBbbbbbc defgh000 00000000 00000000
+-----+-------------------------------------------------------------------------
+
+Type 10 is:
+   (-1)^S * 2^exp * mantissa
+where S = a, exp = UInt(B:c:d) - 3, mantissa = (16 + UInt(e:f:g:h)) / 16
+*/
+
+typedef
+   struct {
+      UInt type;
+      UInt imm8;
+   }
+   ARMNImm;
+
+extern ARMNImm* ARMNImm_TI ( UInt type, UInt imm8 );
+extern ULong ARMNImm_to_Imm64 ( ARMNImm* );
+extern ARMNImm* Imm64_to_ARMNImm ( ULong );
+
+extern void ppARMNImm ( ARMNImm* );
+
+/* ------ Neon Register or Scalar Operand ------ */
+
+typedef
+   enum {
+      ARMNRS_Reg=11,
+      ARMNRS_Scalar
+   }
+   ARMNRS_tag;
+
+typedef
+   struct {
+      ARMNRS_tag tag;
+      HReg reg;
+      UInt index;
+   }
+   ARMNRS;
+
+extern ARMNRS* mkARMNRS(ARMNRS_tag, HReg reg, UInt index);
+extern void ppARMNRS ( ARMNRS* );
+
+/* --------- Instructions. --------- */
+
+/* --------- */
+typedef
+   enum {
+      ARMalu_ADD=20,   /* plain 32-bit add */
+      ARMalu_ADDS,     /* 32-bit add, and set the flags */
+      ARMalu_ADC,      /* 32-bit add with carry */
+      ARMalu_SUB,      /* plain 32-bit subtract */
+      ARMalu_SUBS,     /* 32-bit subtract, and set the flags */
+      ARMalu_SBC,      /* 32-bit subtract with carry */
+      ARMalu_AND,
+      ARMalu_BIC,
+      ARMalu_OR,
+      ARMalu_XOR
+   }
+   ARMAluOp;
+
+extern const HChar* showARMAluOp ( ARMAluOp op );
+
+
+typedef
+   enum {
+      ARMsh_SHL=40,
+      ARMsh_SHR,
+      ARMsh_SAR
+   }
+   ARMShiftOp;
+
+extern const HChar* showARMShiftOp ( ARMShiftOp op );
+
+
+typedef
+   enum {
+      ARMun_NEG=50,
+      ARMun_NOT,
+      ARMun_CLZ
+   }
+   ARMUnaryOp;
+
+extern const HChar* showARMUnaryOp ( ARMUnaryOp op );
+
+
+typedef
+   enum {
+      ARMmul_PLAIN=60,
+      ARMmul_ZX,
+      ARMmul_SX
+   }
+   ARMMulOp;
+
+extern const HChar* showARMMulOp ( ARMMulOp op );
+
+
+typedef
+   enum {
+      ARMvfp_ADD=70,
+      ARMvfp_SUB,
+      ARMvfp_MUL,
+      ARMvfp_DIV
+   }
+   ARMVfpOp;
+
+extern const HChar* showARMVfpOp ( ARMVfpOp op );
+
+
+typedef
+   enum {
+      ARMvfpu_COPY=80,
+      ARMvfpu_NEG,
+      ARMvfpu_ABS,
+      ARMvfpu_SQRT
+   }
+   ARMVfpUnaryOp;
+
+extern const HChar* showARMVfpUnaryOp ( ARMVfpUnaryOp op );
+
+typedef
+   enum {
+      ARMneon_VAND=90,
+      ARMneon_VORR,
+      ARMneon_VXOR,
+      ARMneon_VADD,
+      ARMneon_VADDFP,
+      ARMneon_VRHADDS,
+      ARMneon_VRHADDU,
+      ARMneon_VPADDFP,
+      ARMneon_VABDFP,
+      ARMneon_VSUB,
+      ARMneon_VSUBFP,
+      ARMneon_VMAXU,
+      ARMneon_VMAXS,
+      ARMneon_VMAXF,
+      ARMneon_VMINU,
+      ARMneon_VMINS,
+      ARMneon_VMINF,
+      ARMneon_VQADDU,
+      ARMneon_VQADDS,
+      ARMneon_VQSUBU,
+      ARMneon_VQSUBS,
+      ARMneon_VCGTU,
+      ARMneon_VCGTS,
+      ARMneon_VCGEU,
+      ARMneon_VCGES,
+      ARMneon_VCGTF,
+      ARMneon_VCGEF,
+      ARMneon_VCEQ,
+      ARMneon_VCEQF,
+      ARMneon_VEXT,
+      ARMneon_VMUL,
+      ARMneon_VMULFP,
+      ARMneon_VMULLU,
+      ARMneon_VMULLS,
+      ARMneon_VMULP,
+      ARMneon_VMULLP,
+      ARMneon_VQDMULH,
+      ARMneon_VQRDMULH,
+      ARMneon_VPADD,
+      ARMneon_VPMINU,
+      ARMneon_VPMINS,
+      ARMneon_VPMINF,
+      ARMneon_VPMAXU,
+      ARMneon_VPMAXS,
+      ARMneon_VPMAXF,
+      ARMneon_VTBL,
+      ARMneon_VQDMULL,
+      ARMneon_VRECPS,
+      ARMneon_VRSQRTS,
+      ARMneon_INVALID
+      /* ... */
+   }
+   ARMNeonBinOp;
+
+typedef
+   enum {
+      ARMneon_VSHL=150,
+      ARMneon_VSAL, /* Yah, not SAR but SAL */
+      ARMneon_VQSHL,
+      ARMneon_VQSAL
+   }
+   ARMNeonShiftOp;
+
+typedef
+   enum {
+      ARMneon_COPY=160,
+      ARMneon_COPYLU,
+      ARMneon_COPYLS,
+      ARMneon_COPYN,
+      ARMneon_COPYQNSS,
+      ARMneon_COPYQNUS,
+      ARMneon_COPYQNUU,
+      ARMneon_NOT,
+      ARMneon_EQZ,
+      ARMneon_DUP,
+      ARMneon_PADDLS,
+      ARMneon_PADDLU,
+      ARMneon_CNT,
+      ARMneon_CLZ,
+      ARMneon_CLS,
+      ARMneon_VCVTxFPxINT,
+      ARMneon_VQSHLNSS,
+      ARMneon_VQSHLNUU,
+      ARMneon_VQSHLNUS,
+      ARMneon_VCVTFtoU,
+      ARMneon_VCVTFtoS,
+      ARMneon_VCVTUtoF,
+      ARMneon_VCVTStoF,
+      ARMneon_VCVTFtoFixedU,
+      ARMneon_VCVTFtoFixedS,
+      ARMneon_VCVTFixedUtoF,
+      ARMneon_VCVTFixedStoF,
+      ARMneon_VCVTF16toF32,
+      ARMneon_VCVTF32toF16,
+      ARMneon_REV16,
+      ARMneon_REV32,
+      ARMneon_REV64,
+      ARMneon_ABS,
+      ARMneon_VNEGF,
+      ARMneon_VRECIP,
+      ARMneon_VRECIPF,
+      ARMneon_VABSFP,
+      ARMneon_VRSQRTEFP,
+      ARMneon_VRSQRTE
+      /* ... */
+   }
+   ARMNeonUnOp;
+
+typedef
+   enum {
+      ARMneon_SETELEM=200,
+      ARMneon_GETELEMU,
+      ARMneon_GETELEMS,
+      ARMneon_VDUP,
+   }
+   ARMNeonUnOpS;
+
+typedef
+   enum {
+      ARMneon_TRN=210,
+      ARMneon_ZIP,
+      ARMneon_UZP
+      /* ... */
+   }
+   ARMNeonDualOp;
+
+extern const HChar* showARMNeonBinOp ( ARMNeonBinOp op );
+extern const HChar* showARMNeonUnOp ( ARMNeonUnOp op );
+extern const HChar* showARMNeonUnOpS ( ARMNeonUnOpS op );
+extern const HChar* showARMNeonShiftOp ( ARMNeonShiftOp op );
+extern const HChar* showARMNeonDualOp ( ARMNeonDualOp op );
+extern const HChar* showARMNeonBinOpDataType ( ARMNeonBinOp op );
+extern const HChar* showARMNeonUnOpDataType ( ARMNeonUnOp op );
+extern const HChar* showARMNeonUnOpSDataType ( ARMNeonUnOpS op );
+extern const HChar* showARMNeonShiftOpDataType ( ARMNeonShiftOp op );
+extern const HChar* showARMNeonDualOpDataType ( ARMNeonDualOp op );
+
+typedef
+   enum {
+      /* baseline */
+      ARMin_Alu=220,
+      ARMin_Shift,
+      ARMin_Unary,
+      ARMin_CmpOrTst,
+      ARMin_Mov,
+      ARMin_Imm32,
+      ARMin_LdSt32,
+      ARMin_LdSt16,
+      ARMin_LdSt8U,
+      ARMin_Ld8S,
+      ARMin_XDirect,     /* direct transfer to GA */
+      ARMin_XIndir,      /* indirect transfer to GA */
+      ARMin_XAssisted,   /* assisted transfer to GA */
+      ARMin_CMov,
+      ARMin_Call,
+      ARMin_Mul,
+      ARMin_LdrEX,
+      ARMin_StrEX,
+      /* vfp */
+      ARMin_VLdStD,
+      ARMin_VLdStS,
+      ARMin_VAluD,
+      ARMin_VAluS,
+      ARMin_VUnaryD,
+      ARMin_VUnaryS,
+      ARMin_VCmpD,
+      ARMin_VCMovD,
+      ARMin_VCMovS,
+      ARMin_VCvtSD,
+      ARMin_VXferD,
+      ARMin_VXferS,
+      ARMin_VCvtID,
+      ARMin_FPSCR,
+      ARMin_MFence,
+      ARMin_CLREX,
+      /* Neon */
+      ARMin_NLdStQ,
+      ARMin_NLdStD,
+      ARMin_NUnary,
+      ARMin_NUnaryS,
+      ARMin_NDual,
+      ARMin_NBinary,
+      ARMin_NBinaryS,
+      ARMin_NShift,
+      ARMin_NShl64, // special case 64-bit shift of Dreg by immediate
+      ARMin_NeonImm,
+      ARMin_NCMovQ,
+      /* This is not a NEON instruction. Actually there is no corresponding
+         instruction in ARM instruction set at all. We need this one to
+         generate spill/reload of 128-bit registers since current register
+         allocator demands them to consist of no more than two instructions.
+         We will split this instruction into 2 or 3 ARM instructions on the
+         emiting phase.
+         NOTE: source and destination registers should be different! */
+      ARMin_Add32,
+      ARMin_EvCheck,     /* Event check */
+      ARMin_ProfInc      /* 64-bit profile counter increment */
+   }
+   ARMInstrTag;
+
+/* Destinations are on the LEFT (first operand) */
+
+typedef
+   struct {
+      ARMInstrTag tag;
+      union {
+         /* ADD/SUB/AND/OR/XOR, vanilla ALU op */
+         struct {
+            ARMAluOp op;
+            HReg     dst;
+            HReg     argL;
+            ARMRI84* argR;
+         } Alu;
+         /* SHL/SHR/SAR, 2nd arg is reg or imm */
+         struct {
+            ARMShiftOp op;
+            HReg       dst;
+            HReg       argL;
+            ARMRI5*    argR;
+         } Shift;
+         /* NOT/NEG/CLZ */
+         struct {
+            ARMUnaryOp op;
+            HReg       dst;
+            HReg       src;
+         } Unary;
+         /* CMP/TST; subtract/and, discard result, set NZCV */
+         struct {
+            Bool     isCmp;
+            HReg     argL;
+            ARMRI84* argR;
+         } CmpOrTst;
+         /* MOV dst, src -- reg-reg (or reg-imm8x4) move */
+         struct {
+            HReg     dst;
+            ARMRI84* src;
+         } Mov;
+         /* Pseudo-insn; make a 32-bit immediate */
+         struct {
+            HReg dst;
+            UInt imm32;
+         } Imm32;
+         /* 32-bit load or store, may be conditional */
+         struct {
+            ARMCondCode cc; /* ARMcc_NV is not allowed */
+            Bool        isLoad;
+            HReg        rD;
+            ARMAMode1*  amode;
+         } LdSt32;
+         /* 16-bit load or store, may be conditional */
+         struct {
+            ARMCondCode cc; /* ARMcc_NV is not allowed */
+            Bool        isLoad;
+            Bool        signedLoad;
+            HReg        rD;
+            ARMAMode2*  amode;
+         } LdSt16;
+         /* 8-bit (unsigned) load or store, may be conditional */
+         struct {
+            ARMCondCode cc; /* ARMcc_NV is not allowed */
+            Bool        isLoad;
+            HReg        rD;
+            ARMAMode1*  amode;
+         } LdSt8U;
+         /* 8-bit signed load, may be conditional */
+         struct {
+            ARMCondCode cc; /* ARMcc_NV is not allowed */
+            HReg        rD;
+            ARMAMode2*  amode;
+         } Ld8S;
+         /* Update the guest R15T value, then exit requesting to chain
+            to it.  May be conditional.  Urr, use of Addr32 implicitly
+            assumes that wordsize(guest) == wordsize(host). */
+         struct {
+            Addr32      dstGA;    /* next guest address */
+            ARMAMode1*  amR15T;   /* amode in guest state for R15T */
+            ARMCondCode cond;     /* can be ARMcc_AL */
+            Bool        toFastEP; /* chain to the slow or fast point? */
+         } XDirect;
+         /* Boring transfer to a guest address not known at JIT time.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg        dstGA;
+            ARMAMode1*  amR15T;
+            ARMCondCode cond; /* can be ARMcc_AL */
+         } XIndir;
+         /* Assisted transfer to a guest address, most general case.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg        dstGA;
+            ARMAMode1*  amR15T;
+            ARMCondCode cond; /* can be ARMcc_AL */
+            IRJumpKind  jk;
+         } XAssisted;
+         /* Mov src to dst on the given condition, which may not
+            be ARMcc_AL. */
+         struct {
+            ARMCondCode cond;
+            HReg        dst;
+            ARMRI84*    src;
+         } CMov;
+         /* Pseudo-insn.  Call target (an absolute address), on given
+            condition (which could be ARMcc_AL). */
+         struct {
+            ARMCondCode cond;
+            Addr32      target;
+            Int         nArgRegs; /* # regs carrying args: 0 .. 4 */
+            RetLoc      rloc;     /* where the return value will be */
+         } Call;
+         /* (PLAIN) 32 *  32 -> 32:  r0    = r2 * r3
+            (ZX)    32 *u 32 -> 64:  r1:r0 = r2 *u r3
+            (SX)    32 *s 32 -> 64:  r1:r0 = r2 *s r3
+            Why hardwired registers?  Because the ARM ARM specifies
+            (eg for straight MUL) the result (Rd) and the left arg (Rm)
+            may not be the same register.  That's not a constraint we
+            can enforce in the register allocator (without mucho extra
+            complexity).  Hence hardwire it.  At least using caller-saves
+            registers, which are less likely to be in use. */
+         struct {
+            ARMMulOp op;
+         } Mul;
+         /* LDREX{,H,B} r2, [r4]  and
+            LDREXD r2, r3, [r4]   (on LE hosts, transferred value is r3:r2)
+            Again, hardwired registers since this is not performance
+            critical, and there are possibly constraints on the
+            registers that we can't express in the register allocator.*/
+         struct {
+            Int  szB; /* 1, 2, 4 or 8 */
+         } LdrEX;
+         /* STREX{,H,B} r0, r2, [r4]  and  
+            STREXD r0, r2, r3, [r4]   (on LE hosts, transferred value is r3:r2)
+            r0 = SC( [r4] = r2 )      (8, 16, 32 bit transfers)
+            r0 = SC( [r4] = r3:r2)    (64 bit transfers)
+            Ditto comment re fixed registers. */
+         struct {
+            Int  szB; /* 1, 2, 4 or 8 */
+         } StrEX;
+         /* VFP INSTRUCTIONS */
+         /* 64-bit Fp load/store */
+         struct {
+            Bool       isLoad;
+            HReg       dD;
+            ARMAModeV* amode;
+         } VLdStD;
+         /* 32-bit Fp load/store */
+         struct {
+            Bool       isLoad;
+            HReg       fD;
+            ARMAModeV* amode;
+         } VLdStS;
+         /* 64-bit FP binary arithmetic */
+         struct {
+            ARMVfpOp op;
+            HReg     dst;
+            HReg     argL;
+            HReg     argR;
+         } VAluD;
+         /* 32-bit FP binary arithmetic */
+         struct {
+            ARMVfpOp op;
+            HReg     dst;
+            HReg     argL;
+            HReg     argR;
+         } VAluS;
+         /* 64-bit FP unary, also reg-reg move */
+         struct {
+            ARMVfpUnaryOp op;
+            HReg          dst;
+            HReg          src;
+         } VUnaryD;
+         /* 32-bit FP unary, also reg-reg move */
+         struct {
+            ARMVfpUnaryOp op;
+            HReg          dst;
+            HReg          src;
+         } VUnaryS;
+         /* 64-bit FP compare and move results to CPSR (FCMPD;FMSTAT) */
+         struct {
+            HReg argL;
+            HReg argR;
+         } VCmpD;
+         /* 64-bit FP mov src to dst on the given condition, which may
+            not be ARMcc_AL. */
+         struct {
+            ARMCondCode cond;
+            HReg        dst;
+            HReg        src;
+         } VCMovD;
+         /* 32-bit FP mov src to dst on the given condition, which may
+            not be ARMcc_AL. */
+         struct {
+            ARMCondCode cond;
+            HReg        dst;
+            HReg        src;
+         } VCMovS;
+         /* Convert between 32-bit and 64-bit FP values (both ways).
+            (FCVTSD, FCVTDS) */
+         struct {
+            Bool sToD; /* True: F32->F64.  False: F64->F32 */
+            HReg dst;
+            HReg src;
+         } VCvtSD;
+         /* Transfer a VFP D reg to/from two integer registers (VMOV) */
+         struct {
+            Bool toD;
+            HReg dD;
+            HReg rHi;
+            HReg rLo;
+         } VXferD;
+         /* Transfer a VFP S reg to/from an integer register (VMOV) */
+         struct {
+            Bool toS;
+            HReg fD;
+            HReg rLo;
+         } VXferS;
+         /* Convert between 32-bit ints and 64-bit FP values (both ways
+            and both signednesses). (FSITOD, FUITOD, FTOSID, FTOUID) */
+         struct {
+            Bool iToD; /* True: I32->F64.  False: F64->I32 */
+            Bool syned; /* True: I32 is signed.  False: I32 is unsigned */
+            HReg dst;
+            HReg src;
+         } VCvtID;
+         /* Move a 32-bit value to/from the FPSCR (FMXR, FMRX) */
+         struct {
+            Bool toFPSCR;
+            HReg iReg;
+         } FPSCR;
+         /* Mem fence.  An insn which fences all loads and stores as
+            much as possible before continuing.  On ARM we emit the
+            sequence
+               mcr 15,0,r0,c7,c10,4 (DSB)
+               mcr 15,0,r0,c7,c10,5 (DMB)
+               mcr 15,0,r0,c7,c5,4 (ISB)
+            which is probably total overkill, but better safe than
+            sorry.
+         */
+         struct {
+         } MFence;
+         /* A CLREX instruction. */
+         struct {
+         } CLREX;
+         /* Neon data processing instruction: 3 registers of the same
+            length */
+         struct {
+            ARMNeonBinOp op;
+            HReg dst;
+            HReg argL;
+            HReg argR;
+            UInt size;
+            Bool Q;
+         } NBinary;
+         struct {
+            ARMNeonBinOp op;
+            ARMNRS* dst;
+            ARMNRS* argL;
+            ARMNRS* argR;
+            UInt size;
+            Bool Q;
+         } NBinaryS;
+         struct {
+            ARMNeonShiftOp op;
+            HReg dst;
+            HReg argL;
+            HReg argR;
+            UInt size;
+            Bool Q;
+         } NShift;
+         struct {
+            HReg dst;
+            HReg src;
+            UInt amt; /* 1..63 only */
+         } NShl64;
+         struct {
+            Bool isLoad;
+            HReg dQ;
+            ARMAModeN *amode;
+         } NLdStQ;
+         struct {
+            Bool isLoad;
+            HReg dD;
+            ARMAModeN *amode;
+         } NLdStD;
+         struct {
+            ARMNeonUnOpS op;
+            ARMNRS*  dst;
+            ARMNRS*  src;
+            UInt size;
+            Bool Q;
+         } NUnaryS;
+         struct {
+            ARMNeonUnOp op;
+            HReg  dst;
+            HReg  src;
+            UInt size;
+            Bool Q;
+         } NUnary;
+         /* Takes two arguments and modifies them both. */
+         struct {
+            ARMNeonDualOp op;
+            HReg  arg1;
+            HReg  arg2;
+            UInt size;
+            Bool Q;
+         } NDual;
+         struct {
+            HReg dst;
+            ARMNImm* imm;
+         } NeonImm;
+         /* 128-bit Neon move src to dst on the given condition, which
+            may not be ARMcc_AL. */
+         struct {
+            ARMCondCode cond;
+            HReg        dst;
+            HReg        src;
+         } NCMovQ;
+         struct {
+            /* Note: rD != rN */
+            HReg rD;
+            HReg rN;
+            UInt imm32;
+         } Add32;
+         struct {
+            ARMAMode1* amCounter;
+            ARMAMode1* amFailAddr;
+         } EvCheck;
+         struct {
+            /* No fields.  The address of the counter to inc is
+               installed later, post-translation, by patching it in,
+               as it is not known at translation time. */
+         } ProfInc;
+      } ARMin;
+   }
+   ARMInstr;
+
+
+extern ARMInstr* ARMInstr_Alu      ( ARMAluOp, HReg, HReg, ARMRI84* );
+extern ARMInstr* ARMInstr_Shift    ( ARMShiftOp, HReg, HReg, ARMRI5* );
+extern ARMInstr* ARMInstr_Unary    ( ARMUnaryOp, HReg, HReg );
+extern ARMInstr* ARMInstr_CmpOrTst ( Bool isCmp, HReg, ARMRI84* );
+extern ARMInstr* ARMInstr_Mov      ( HReg, ARMRI84* );
+extern ARMInstr* ARMInstr_Imm32    ( HReg, UInt );
+extern ARMInstr* ARMInstr_LdSt32   ( ARMCondCode,
+                                     Bool isLoad, HReg, ARMAMode1* );
+extern ARMInstr* ARMInstr_LdSt16   ( ARMCondCode,
+                                     Bool isLoad, Bool signedLoad,
+                                     HReg, ARMAMode2* );
+extern ARMInstr* ARMInstr_LdSt8U   ( ARMCondCode,
+                                     Bool isLoad, HReg, ARMAMode1* );
+extern ARMInstr* ARMInstr_Ld8S     ( ARMCondCode, HReg, ARMAMode2* );
+extern ARMInstr* ARMInstr_XDirect  ( Addr32 dstGA, ARMAMode1* amR15T,
+                                     ARMCondCode cond, Bool toFastEP );
+extern ARMInstr* ARMInstr_XIndir   ( HReg dstGA, ARMAMode1* amR15T,
+                                     ARMCondCode cond );
+extern ARMInstr* ARMInstr_XAssisted ( HReg dstGA, ARMAMode1* amR15T,
+                                      ARMCondCode cond, IRJumpKind jk );
+extern ARMInstr* ARMInstr_CMov     ( ARMCondCode, HReg dst, ARMRI84* src );
+extern ARMInstr* ARMInstr_Call     ( ARMCondCode, Addr32, Int nArgRegs,
+                                     RetLoc rloc );
+extern ARMInstr* ARMInstr_Mul      ( ARMMulOp op );
+extern ARMInstr* ARMInstr_LdrEX    ( Int szB );
+extern ARMInstr* ARMInstr_StrEX    ( Int szB );
+extern ARMInstr* ARMInstr_VLdStD   ( Bool isLoad, HReg, ARMAModeV* );
+extern ARMInstr* ARMInstr_VLdStS   ( Bool isLoad, HReg, ARMAModeV* );
+extern ARMInstr* ARMInstr_VAluD    ( ARMVfpOp op, HReg, HReg, HReg );
+extern ARMInstr* ARMInstr_VAluS    ( ARMVfpOp op, HReg, HReg, HReg );
+extern ARMInstr* ARMInstr_VUnaryD  ( ARMVfpUnaryOp, HReg dst, HReg src );
+extern ARMInstr* ARMInstr_VUnaryS  ( ARMVfpUnaryOp, HReg dst, HReg src );
+extern ARMInstr* ARMInstr_VCmpD    ( HReg argL, HReg argR );
+extern ARMInstr* ARMInstr_VCMovD   ( ARMCondCode, HReg dst, HReg src );
+extern ARMInstr* ARMInstr_VCMovS   ( ARMCondCode, HReg dst, HReg src );
+extern ARMInstr* ARMInstr_VCvtSD   ( Bool sToD, HReg dst, HReg src );
+extern ARMInstr* ARMInstr_VXferD   ( Bool toD, HReg dD, HReg rHi, HReg rLo );
+extern ARMInstr* ARMInstr_VXferS   ( Bool toS, HReg fD, HReg rLo );
+extern ARMInstr* ARMInstr_VCvtID   ( Bool iToD, Bool syned,
+                                     HReg dst, HReg src );
+extern ARMInstr* ARMInstr_FPSCR    ( Bool toFPSCR, HReg iReg );
+extern ARMInstr* ARMInstr_MFence   ( void );
+extern ARMInstr* ARMInstr_CLREX    ( void );
+extern ARMInstr* ARMInstr_NLdStQ   ( Bool isLoad, HReg, ARMAModeN* );
+extern ARMInstr* ARMInstr_NLdStD   ( Bool isLoad, HReg, ARMAModeN* );
+extern ARMInstr* ARMInstr_NUnary   ( ARMNeonUnOp, HReg, HReg, UInt, Bool );
+extern ARMInstr* ARMInstr_NUnaryS  ( ARMNeonUnOpS, ARMNRS*, ARMNRS*,
+                                     UInt, Bool );
+extern ARMInstr* ARMInstr_NDual    ( ARMNeonDualOp, HReg, HReg, UInt, Bool );
+extern ARMInstr* ARMInstr_NBinary  ( ARMNeonBinOp, HReg, HReg, HReg,
+                                     UInt, Bool );
+extern ARMInstr* ARMInstr_NShift   ( ARMNeonShiftOp, HReg, HReg, HReg,
+                                     UInt, Bool );
+extern ARMInstr* ARMInstr_NShl64   ( HReg, HReg, UInt );
+extern ARMInstr* ARMInstr_NeonImm  ( HReg, ARMNImm* );
+extern ARMInstr* ARMInstr_NCMovQ   ( ARMCondCode, HReg, HReg );
+extern ARMInstr* ARMInstr_Add32    ( HReg rD, HReg rN, UInt imm32 );
+extern ARMInstr* ARMInstr_EvCheck  ( ARMAMode1* amCounter,
+                                     ARMAMode1* amFailAddr );
+extern ARMInstr* ARMInstr_ProfInc  ( void );
+
+extern void ppARMInstr ( const ARMInstr* );
+
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+extern void getRegUsage_ARMInstr ( HRegUsage*, const ARMInstr*, Bool );
+extern void mapRegs_ARMInstr     ( HRegRemap*, ARMInstr*, Bool );
+extern Bool isMove_ARMInstr      ( const ARMInstr*, HReg*, HReg* );
+extern Int  emit_ARMInstr        ( /*MB_MOD*/Bool* is_profInc,
+                                   UChar* buf, Int nbuf, const ARMInstr* i, 
+                                   Bool mode64,
+                                   VexEndness endness_host,
+                                   const void* disp_cp_chain_me_to_slowEP,
+                                   const void* disp_cp_chain_me_to_fastEP,
+                                   const void* disp_cp_xindir,
+                                   const void* disp_cp_xassisted );
+
+extern void genSpill_ARM  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                            HReg rreg, Int offset, Bool );
+extern void genReload_ARM ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                            HReg rreg, Int offset, Bool );
+
+extern const RRegUniverse* getRRegUniverse_ARM ( void );
+
+extern HInstrArray* iselSB_ARM   ( const IRSB*, 
+                                   VexArch,
+                                   const VexArchInfo*,
+                                   const VexAbiInfo*,
+                                   Int offs_Host_EvC_Counter,
+                                   Int offs_Host_EvC_FailAddr,
+                                   Bool chainingAllowed,
+                                   Bool addProfInc,
+                                   Addr max_ga );
+
+/* How big is an event check?  This is kind of a kludge because it
+   depends on the offsets of host_EvC_FAILADDR and
+   host_EvC_COUNTER. */
+extern Int evCheckSzB_ARM (void);
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+extern VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+                                        void* place_to_chain,
+                                        const void* disp_cp_chain_me_EXPECTED,
+                                        const void* place_to_jump_to );
+
+extern VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+                                          void* place_to_unchain,
+                                          const void* place_to_jump_to_EXPECTED,
+                                          const void* disp_cp_chain_me );
+
+/* Patch the counter location into an existing ProfInc point. */
+extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+                                        void*  place_to_patch,
+                                        const ULong* location_of_counter );
+
+
+#endif /* ndef __VEX_HOST_ARM_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_arm_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_arm_isel.c b/VEX/priv/host_arm_isel.c
new file mode 100644
index 0000000..b867c3d
--- /dev/null
+++ b/VEX/priv/host_arm_isel.c
@@ -0,0 +1,6437 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_arm_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   NEON support is
+   Copyright (C) 2010-2013 Samsung Electronics
+   contributed by Dmitry Zhurikhin <zhur@ispras.ru>
+              and Kirill Batuzov <batuzovk@ispras.ru>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "ir_match.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_generic_simd64.h"  // for 32-bit SIMD helpers
+#include "host_arm_defs.h"
+
+
+/*---------------------------------------------------------*/
+/*--- ARMvfp control word stuff                         ---*/
+/*---------------------------------------------------------*/
+
+/* Vex-generated code expects to run with the FPU set as follows: all
+   exceptions masked, round-to-nearest, non-vector mode, with the NZCV
+   flags cleared, and FZ (flush to zero) disabled.  Curiously enough,
+   this corresponds to a FPSCR value of zero.
+
+   fpscr should therefore be zero on entry to Vex-generated code, and
+   should be unchanged at exit.  (Or at least the bottom 28 bits
+   should be zero).
+*/
+
+#define DEFAULT_FPSCR 0
+
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+     might encounter.  This is computed before insn selection starts,
+     and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+     which virtual register(s) are associated with each IRTemp
+     temporary.  This is computed before insn selection starts, and
+     does not change.  We expect this mapping to map precisely the
+     same set of IRTemps as the type mapping does.
+
+        - vregmap   holds the primary register for the IRTemp.
+        - vregmapHI is only used for 64-bit integer-typed
+             IRTemps.  It holds the identity of a second
+             32-bit virtual HReg, which holds the high half
+             of the value.
+
+   - The code array, that is, the insns selected so far.
+
+   - A counter, for generating new virtual registers.
+
+   - The host hardware capabilities word.  This is set at the start
+     and does not change.
+
+   - A Bool for indicating whether we may generate chain-me
+     instructions for control flow transfers, or whether we must use
+     XAssisted.
+
+   - The maximum guest address of any guest insn in this block.
+     Actually, the address of the highest-addressed byte from any insn
+     in this block.  Is set at the start and does not change.  This is
+     used for detecting jumps which are definitely forward-edges from
+     this block, and therefore can be made (chained) to the fast entry
+     point of the destination, thereby avoiding the destination's
+     event check.
+
+   Note, this is all (well, mostly) host-independent.
+*/
+
+typedef
+   struct {
+      /* Constant -- are set at the start and do not change. */
+      IRTypeEnv*   type_env;
+
+      HReg*        vregmap;
+      HReg*        vregmapHI;
+      Int          n_vregmap;
+
+      UInt         hwcaps;
+
+      Bool         chainingAllowed;
+      Addr32       max_ga;
+
+      /* These are modified as we go along. */
+      HInstrArray* code;
+      Int          vreg_ctr;
+   }
+   ISelEnv;
+
+static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   return env->vregmap[tmp];
+}
+
+static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapHI[tmp]));
+   *vrLO = env->vregmap[tmp];
+   *vrHI = env->vregmapHI[tmp];
+}
+
+static void addInstr ( ISelEnv* env, ARMInstr* instr )
+{
+   addHInstr(env->code, instr);
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      ppARMInstr(instr);
+      vex_printf("\n");
+   }
+}
+
+static HReg newVRegI ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcInt32, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegD ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcFlt64, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegF ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcFlt32, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegV ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcVec128, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+/* These are duplicated in guest_arm_toIR.c */
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* bind ( Int binder )
+{
+   return IRExpr_Binder(binder);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Forward declarations                        ---*/
+/*---------------------------------------------------------*/
+
+/* These are organised as iselXXX and iselXXX_wrk pairs.  The
+   iselXXX_wrk do the real work, but are not to be called directly.
+   For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
+   checks that all returned registers are virtual.  You should not
+   call the _wrk version directly.
+*/
+static ARMAMode1*  iselIntExpr_AMode1_wrk ( ISelEnv* env, IRExpr* e );
+static ARMAMode1*  iselIntExpr_AMode1     ( ISelEnv* env, IRExpr* e );
+
+static ARMAMode2*  iselIntExpr_AMode2_wrk ( ISelEnv* env, IRExpr* e );
+static ARMAMode2*  iselIntExpr_AMode2     ( ISelEnv* env, IRExpr* e );
+
+static ARMAModeV*  iselIntExpr_AModeV_wrk ( ISelEnv* env, IRExpr* e );
+static ARMAModeV*  iselIntExpr_AModeV     ( ISelEnv* env, IRExpr* e );
+
+static ARMAModeN*  iselIntExpr_AModeN_wrk ( ISelEnv* env, IRExpr* e );
+static ARMAModeN*  iselIntExpr_AModeN     ( ISelEnv* env, IRExpr* e );
+
+static ARMRI84*    iselIntExpr_RI84_wrk
+        ( /*OUT*/Bool* didInv, Bool mayInv, ISelEnv* env, IRExpr* e );
+static ARMRI84*    iselIntExpr_RI84
+        ( /*OUT*/Bool* didInv, Bool mayInv, ISelEnv* env, IRExpr* e );
+
+static ARMRI5*     iselIntExpr_RI5_wrk    ( ISelEnv* env, IRExpr* e );
+static ARMRI5*     iselIntExpr_RI5        ( ISelEnv* env, IRExpr* e );
+
+static ARMCondCode iselCondCode_wrk       ( ISelEnv* env, IRExpr* e );
+static ARMCondCode iselCondCode           ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselIntExpr_R_wrk      ( ISelEnv* env, IRExpr* e );
+static HReg        iselIntExpr_R          ( ISelEnv* env, IRExpr* e );
+
+static void        iselInt64Expr_wrk      ( HReg* rHi, HReg* rLo, 
+                                            ISelEnv* env, IRExpr* e );
+static void        iselInt64Expr          ( HReg* rHi, HReg* rLo, 
+                                            ISelEnv* env, IRExpr* e );
+
+static HReg        iselDblExpr_wrk        ( ISelEnv* env, IRExpr* e );
+static HReg        iselDblExpr            ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselFltExpr_wrk        ( ISelEnv* env, IRExpr* e );
+static HReg        iselFltExpr            ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselNeon64Expr_wrk     ( ISelEnv* env, IRExpr* e );
+static HReg        iselNeon64Expr         ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselNeonExpr_wrk       ( ISelEnv* env, IRExpr* e );
+static HReg        iselNeonExpr           ( ISelEnv* env, IRExpr* e );
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Misc helpers                                ---*/
+/*---------------------------------------------------------*/
+
+static UInt ROR32 ( UInt x, UInt sh ) {
+   vassert(sh >= 0 && sh < 32);
+   if (sh == 0)
+      return x;
+   else
+      return (x << (32-sh)) | (x >> sh);
+}
+
+/* Figure out if 'u' fits in the special shifter-operand 8x4 immediate
+   form, and if so return the components. */
+static Bool fitsIn8x4 ( /*OUT*/UInt* u8, /*OUT*/UInt* u4, UInt u )
+{
+   UInt i;
+   for (i = 0; i < 16; i++) {
+      if (0 == (u & 0xFFFFFF00)) {
+         *u8 = u;
+         *u4 = i;
+         return True;
+      }
+      u = ROR32(u, 30);
+   }
+   vassert(i == 16);
+   return False;
+}
+
+/* Make a int reg-reg move. */
+static ARMInstr* mk_iMOVds_RR ( HReg dst, HReg src )
+{
+   vassert(hregClass(src) == HRcInt32);
+   vassert(hregClass(dst) == HRcInt32);
+   return ARMInstr_Mov(dst, ARMRI84_R(src));
+}
+
+/* Set the VFP unit's rounding mode to default (round to nearest). */
+static void set_VFP_rounding_default ( ISelEnv* env )
+{
+   /* mov rTmp, #DEFAULT_FPSCR
+      fmxr fpscr, rTmp
+   */
+   HReg rTmp = newVRegI(env);
+   addInstr(env, ARMInstr_Imm32(rTmp, DEFAULT_FPSCR));
+   addInstr(env, ARMInstr_FPSCR(True/*toFPSCR*/, rTmp));
+}
+
+/* Mess with the VFP unit's rounding mode: 'mode' is an I32-typed
+   expression denoting a value in the range 0 .. 3, indicating a round
+   mode encoded as per type IRRoundingMode.  Set FPSCR to have the
+   same rounding.
+*/
+static
+void set_VFP_rounding_mode ( ISelEnv* env, IRExpr* mode )
+{
+   /* This isn't simple, because 'mode' carries an IR rounding
+      encoding, and we need to translate that to an ARMvfp one:
+      The IR encoding:
+         00  to nearest (the default)
+         10  to +infinity
+         01  to -infinity
+         11  to zero
+      The ARMvfp encoding:
+         00  to nearest
+         01  to +infinity
+         10  to -infinity
+         11  to zero
+      Easy enough to do; just swap the two bits.
+   */
+   HReg irrm = iselIntExpr_R(env, mode);
+   HReg tL   = newVRegI(env);
+   HReg tR   = newVRegI(env);
+   HReg t3   = newVRegI(env);
+   /* tL = irrm << 1;
+      tR = irrm >> 1;  if we're lucky, these will issue together
+      tL &= 2;
+      tR &= 1;         ditto
+      t3 = tL | tR;
+      t3 <<= 22;
+      fmxr fpscr, t3
+   */
+   addInstr(env, ARMInstr_Shift(ARMsh_SHL, tL, irrm, ARMRI5_I5(1)));
+   addInstr(env, ARMInstr_Shift(ARMsh_SHR, tR, irrm, ARMRI5_I5(1)));
+   addInstr(env, ARMInstr_Alu(ARMalu_AND, tL, tL, ARMRI84_I84(2,0)));
+   addInstr(env, ARMInstr_Alu(ARMalu_AND, tR, tR, ARMRI84_I84(1,0)));
+   addInstr(env, ARMInstr_Alu(ARMalu_OR, t3, tL, ARMRI84_R(tR)));
+   addInstr(env, ARMInstr_Shift(ARMsh_SHL, t3, t3, ARMRI5_I5(22)));
+   addInstr(env, ARMInstr_FPSCR(True/*toFPSCR*/, t3));
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Function call helpers                       ---*/
+/*---------------------------------------------------------*/
+
+/* Used only in doHelperCall.  See big comment in doHelperCall re
+   handling of register-parameter args.  This function figures out
+   whether evaluation of an expression might require use of a fixed
+   register.  If in doubt return True (safe but suboptimal).
+*/
+static
+Bool mightRequireFixedRegs ( IRExpr* e )
+{
+   if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
+      // These are always "safe" -- either a copy of r13(sp) in some
+      // arbitrary vreg, or a copy of r8, respectively.
+      return False;
+   }
+   /* Else it's a "normal" expression. */
+   switch (e->tag) {
+   case Iex_RdTmp: case Iex_Const: case Iex_Get:
+      return False;
+   default:
+      return True;
+   }
+}
+
+
+/* Do a complete function call.  |guard| is a Ity_Bit expression
+   indicating whether or not the call happens.  If guard==NULL, the
+   call is unconditional.  |retloc| is set to indicate where the
+   return value is after the call.  The caller (of this fn) must
+   generate code to add |stackAdjustAfterCall| to the stack pointer
+   after the call is done.  Returns True iff it managed to handle this
+   combination of arg/return types, else returns False. */
+
+static
+Bool doHelperCall ( /*OUT*/UInt*   stackAdjustAfterCall,
+                    /*OUT*/RetLoc* retloc,
+                    ISelEnv* env,
+                    IRExpr* guard,
+                    IRCallee* cee, IRType retTy, IRExpr** args )
+{
+   ARMCondCode cc;
+   HReg        argregs[ARM_N_ARGREGS];
+   HReg        tmpregs[ARM_N_ARGREGS];
+   Bool        go_fast;
+   Int         n_args, i, nextArgReg;
+   Addr32      target;
+
+   vassert(ARM_N_ARGREGS == 4);
+
+   /* Set default returns.  We'll update them later if needed. */
+   *stackAdjustAfterCall = 0;
+   *retloc               = mk_RetLoc_INVALID();
+
+   /* These are used for cross-checking that IR-level constraints on
+      the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+   UInt nVECRETs = 0;
+   UInt nBBPTRs  = 0;
+
+   /* Marshal args for a call and do the call.
+
+      This function only deals with a tiny set of possibilities, which
+      cover all helpers in practice.  The restrictions are that only
+      arguments in registers are supported, hence only ARM_N_REGPARMS
+      x 32 integer bits in total can be passed.  In fact the only
+      supported arg types are I32 and I64.
+
+      The return type can be I{64,32} or V128.  In the V128 case, it
+      is expected that |args| will contain the special node
+      IRExpr_VECRET(), in which case this routine generates code to
+      allocate space on the stack for the vector return value.  Since
+      we are not passing any scalars on the stack, it is enough to
+      preallocate the return space before marshalling any arguments,
+      in this case.
+
+      |args| may also contain IRExpr_BBPTR(), in which case the
+      value in r8 is passed as the corresponding argument.
+
+      Generating code which is both efficient and correct when
+      parameters are to be passed in registers is difficult, for the
+      reasons elaborated in detail in comments attached to
+      doHelperCall() in priv/host-x86/isel.c.  Here, we use a variant
+      of the method described in those comments.
+
+      The problem is split into two cases: the fast scheme and the
+      slow scheme.  In the fast scheme, arguments are computed
+      directly into the target (real) registers.  This is only safe
+      when we can be sure that computation of each argument will not
+      trash any real registers set by computation of any other
+      argument.
+
+      In the slow scheme, all args are first computed into vregs, and
+      once they are all done, they are moved to the relevant real
+      regs.  This always gives correct code, but it also gives a bunch
+      of vreg-to-rreg moves which are usually redundant but are hard
+      for the register allocator to get rid of.
+
+      To decide which scheme to use, all argument expressions are
+      first examined.  If they are all so simple that it is clear they
+      will be evaluated without use of any fixed registers, use the
+      fast scheme, else use the slow scheme.  Note also that only
+      unconditional calls may use the fast scheme, since having to
+      compute a condition expression could itself trash real
+      registers.
+
+      Note this requires being able to examine an expression and
+      determine whether or not evaluation of it might use a fixed
+      register.  That requires knowledge of how the rest of this insn
+      selector works.  Currently just the following 3 are regarded as
+      safe -- hopefully they cover the majority of arguments in
+      practice: IRExpr_Tmp IRExpr_Const IRExpr_Get.
+   */
+
+   /* Note that the cee->regparms field is meaningless on ARM hosts
+      (since there is only one calling convention) and so we always
+      ignore it. */
+
+   n_args = 0;
+   for (i = 0; args[i]; i++) {
+      IRExpr* arg = args[i];
+      if (UNLIKELY(arg->tag == Iex_VECRET)) {
+         nVECRETs++;
+      } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+         nBBPTRs++;
+      }
+      n_args++;
+   }
+
+   argregs[0] = hregARM_R0();
+   argregs[1] = hregARM_R1();
+   argregs[2] = hregARM_R2();
+   argregs[3] = hregARM_R3();
+
+   tmpregs[0] = tmpregs[1] = tmpregs[2] =
+   tmpregs[3] = INVALID_HREG;
+
+   /* First decide which scheme (slow or fast) is to be used.  First
+      assume the fast scheme, and select slow if any contraindications
+      (wow) appear. */
+
+   go_fast = True;
+
+   if (guard) {
+      if (guard->tag == Iex_Const
+          && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional */
+      } else {
+         /* Not manifestly unconditional -- be conservative. */
+         go_fast = False;
+      }
+   }
+
+   if (go_fast) {
+      for (i = 0; i < n_args; i++) {
+         if (mightRequireFixedRegs(args[i])) {
+            go_fast = False;
+            break;
+         }
+      }
+   }
+
+   if (go_fast) {
+      if (retTy == Ity_V128 || retTy == Ity_V256)
+         go_fast = False;
+   }
+
+   /* At this point the scheme to use has been established.  Generate
+      code to get the arg values into the argument rregs.  If we run
+      out of arg regs, give up. */
+
+   if (go_fast) {
+
+      /* FAST SCHEME */
+      nextArgReg = 0;
+
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+
+         IRType  aTy = Ity_INVALID;
+         if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+            aTy = typeOfIRExpr(env->type_env, arg);
+
+         if (nextArgReg >= ARM_N_ARGREGS)
+            return False; /* out of argregs */
+
+         if (aTy == Ity_I32) {
+            addInstr(env, mk_iMOVds_RR( argregs[nextArgReg],
+                                        iselIntExpr_R(env, arg) ));
+            nextArgReg++;
+         }
+         else if (aTy == Ity_I64) {
+            /* 64-bit args must be passed in an a reg-pair of the form
+               n:n+1, where n is even.  Hence either r0:r1 or r2:r3.
+               On a little-endian host, the less significant word is
+               passed in the lower-numbered register. */
+            if (nextArgReg & 1) {
+               if (nextArgReg >= ARM_N_ARGREGS)
+                  return False; /* out of argregs */
+               addInstr(env, ARMInstr_Imm32( argregs[nextArgReg], 0xAA ));
+               nextArgReg++;
+            }
+            if (nextArgReg >= ARM_N_ARGREGS)
+               return False; /* out of argregs */
+            HReg raHi, raLo;
+            iselInt64Expr(&raHi, &raLo, env, arg);
+            addInstr(env, mk_iMOVds_RR( argregs[nextArgReg], raLo ));
+            nextArgReg++;
+            addInstr(env, mk_iMOVds_RR( argregs[nextArgReg], raHi ));
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_BBPTR) {
+            vassert(0); //ATC
+            addInstr(env, mk_iMOVds_RR( argregs[nextArgReg],
+                                        hregARM_R8() ));
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_VECRET) {
+            // If this happens, it denotes ill-formed IR
+            vassert(0);
+         }
+         else
+            return False; /* unhandled arg type */
+      }
+
+      /* Fast scheme only applies for unconditional calls.  Hence: */
+      cc = ARMcc_AL;
+
+   } else {
+
+      /* SLOW SCHEME; move via temporaries */
+      nextArgReg = 0;
+
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+
+         IRType  aTy = Ity_INVALID;
+         if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+            aTy  = typeOfIRExpr(env->type_env, arg);
+
+         if (nextArgReg >= ARM_N_ARGREGS)
+            return False; /* out of argregs */
+
+         if (aTy == Ity_I32) {
+            tmpregs[nextArgReg] = iselIntExpr_R(env, args[i]);
+            nextArgReg++;
+         }
+         else if (aTy == Ity_I64) {
+            /* Same comment applies as in the Fast-scheme case. */
+            if (nextArgReg & 1)
+               nextArgReg++;
+            if (nextArgReg + 1 >= ARM_N_ARGREGS)
+               return False; /* out of argregs */
+            HReg raHi, raLo;
+            iselInt64Expr(&raHi, &raLo, env, args[i]);
+            tmpregs[nextArgReg] = raLo;
+            nextArgReg++;
+            tmpregs[nextArgReg] = raHi;
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_BBPTR) {
+            vassert(0); //ATC
+            tmpregs[nextArgReg] = hregARM_R8();
+            nextArgReg++;
+         }
+         else if (arg->tag == Iex_VECRET) {
+            // If this happens, it denotes ill-formed IR
+            vassert(0);
+         }
+         else
+            return False; /* unhandled arg type */
+      }
+
+      /* Now we can compute the condition.  We can't do it earlier
+         because the argument computations could trash the condition
+         codes.  Be a bit clever to handle the common case where the
+         guard is 1:Bit. */
+      cc = ARMcc_AL;
+      if (guard) {
+         if (guard->tag == Iex_Const
+             && guard->Iex.Const.con->tag == Ico_U1
+             && guard->Iex.Const.con->Ico.U1 == True) {
+            /* unconditional -- do nothing */
+         } else {
+            cc = iselCondCode( env, guard );
+         }
+      }
+
+      /* Move the args to their final destinations. */
+      for (i = 0; i < nextArgReg; i++) {
+         if (hregIsInvalid(tmpregs[i])) { // Skip invalid regs
+            addInstr(env, ARMInstr_Imm32( argregs[i], 0xAA ));
+            continue;
+         }
+         /* None of these insns, including any spill code that might
+            be generated, may alter the condition codes. */
+         addInstr( env, mk_iMOVds_RR( argregs[i], tmpregs[i] ) );
+      }
+
+   }
+
+   /* Should be assured by checks above */
+   vassert(nextArgReg <= ARM_N_ARGREGS);
+
+   /* Do final checks, set the return values, and generate the call
+      instruction proper. */
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+   vassert(nVECRETs == (retTy == Ity_V128 || retTy == Ity_V256) ? 1 : 0);
+   vassert(*stackAdjustAfterCall == 0);
+   vassert(is_RetLoc_INVALID(*retloc));
+   switch (retTy) {
+         case Ity_INVALID:
+            /* Function doesn't return a value. */
+            *retloc = mk_RetLoc_simple(RLPri_None);
+            break;
+         case Ity_I64:
+            *retloc = mk_RetLoc_simple(RLPri_2Int);
+            break;
+         case Ity_I32: case Ity_I16: case Ity_I8:
+            *retloc = mk_RetLoc_simple(RLPri_Int);
+            break;
+         case Ity_V128:
+            vassert(0); // ATC
+            *retloc = mk_RetLoc_spRel(RLPri_V128SpRel, 0);
+            *stackAdjustAfterCall = 16;
+            break;
+         case Ity_V256:
+            vassert(0); // ATC
+            *retloc = mk_RetLoc_spRel(RLPri_V256SpRel, 0);
+            *stackAdjustAfterCall = 32;
+            break;
+         default:
+            /* IR can denote other possible return types, but we don't
+               handle those here. */
+           vassert(0);
+   }
+
+   /* Finally, generate the call itself.  This needs the *retloc value
+      set in the switch above, which is why it's at the end. */
+
+   /* nextArgReg doles out argument registers.  Since these are
+      assigned in the order r0, r1, r2, r3, its numeric value at this
+      point, which must be between 0 and 4 inclusive, is going to be
+      equal to the number of arg regs in use for the call.  Hence bake
+      that number into the call (we'll need to know it when doing
+      register allocation, to know what regs the call reads.)
+
+      There is a bit of a twist -- harmless but worth recording.
+      Suppose the arg types are (Ity_I32, Ity_I64).  Then we will have
+      the first arg in r0 and the second in r3:r2, but r1 isn't used.
+      We nevertheless have nextArgReg==4 and bake that into the call
+      instruction.  This will mean the register allocator wil believe
+      this insn reads r1 when in fact it doesn't.  But that's
+      harmless; it just artificially extends the live range of r1
+      unnecessarily.  The best fix would be to put into the
+      instruction, a bitmask indicating which of r0/1/2/3 carry live
+      values.  But that's too much hassle. */
+
+   target = (Addr)cee->addr;
+   addInstr(env, ARMInstr_Call( cc, target, nextArgReg, *retloc ));
+
+   return True; /* success */
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (32/16/8 bit)           ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 32, 16 and 8-bit type.  All
+   results are returned in a 32-bit register.  For 16- and 8-bit
+   expressions, the upper 16/24 bits are arbitrary, so you should mask
+   or sign extend partial values if necessary.
+*/
+
+/* --------------------- AMode1 --------------------- */
+
+/* Return an AMode1 which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a 32-bit one.
+*/
+
+static Bool sane_AMode1 ( ARMAMode1* am )
+{
+   switch (am->tag) {
+      case ARMam1_RI:
+         return
+            toBool( hregClass(am->ARMam1.RI.reg) == HRcInt32
+                    && (hregIsVirtual(am->ARMam1.RI.reg)
+                        || sameHReg(am->ARMam1.RI.reg, hregARM_R8()))
+                    && am->ARMam1.RI.simm13 >= -4095
+                    && am->ARMam1.RI.simm13 <= 4095 );
+      case ARMam1_RRS:
+         return
+            toBool( hregClass(am->ARMam1.RRS.base) == HRcInt32
+                    && hregIsVirtual(am->ARMam1.RRS.base)
+                    && hregClass(am->ARMam1.RRS.index) == HRcInt32
+                    && hregIsVirtual(am->ARMam1.RRS.index)
+                    && am->ARMam1.RRS.shift >= 0
+                    && am->ARMam1.RRS.shift <= 3 );
+      default:
+         vpanic("sane_AMode: unknown ARM AMode1 tag");
+   }
+}
+
+static ARMAMode1* iselIntExpr_AMode1 ( ISelEnv* env, IRExpr* e )
+{
+   ARMAMode1* am = iselIntExpr_AMode1_wrk(env, e);
+   vassert(sane_AMode1(am));
+   return am;
+}
+
+static ARMAMode1* iselIntExpr_AMode1_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32);
+
+   /* FIXME: add RRS matching */
+
+   /* {Add32,Sub32}(expr,simm13) */
+   if (e->tag == Iex_Binop
+       && (e->Iex.Binop.op == Iop_Add32 || e->Iex.Binop.op == Iop_Sub32)
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32) {
+      Int simm = (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U32;
+      if (simm >= -4095 && simm <= 4095) {
+         HReg reg;
+         if (e->Iex.Binop.op == Iop_Sub32)
+            simm = -simm;
+         reg = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         return ARMAMode1_RI(reg, simm);
+      }
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   {
+      HReg reg = iselIntExpr_R(env, e);
+      return ARMAMode1_RI(reg, 0);
+   }
+
+}
+
+
+/* --------------------- AMode2 --------------------- */
+
+/* Return an AMode2 which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a 32-bit one.
+*/
+
+static Bool sane_AMode2 ( ARMAMode2* am )
+{
+   switch (am->tag) {
+      case ARMam2_RI:
+         return
+            toBool( hregClass(am->ARMam2.RI.reg) == HRcInt32
+                    && hregIsVirtual(am->ARMam2.RI.reg)
+                    && am->ARMam2.RI.simm9 >= -255
+                    && am->ARMam2.RI.simm9 <= 255 );
+      case ARMam2_RR:
+         return
+            toBool( hregClass(am->ARMam2.RR.base) == HRcInt32
+                    && hregIsVirtual(am->ARMam2.RR.base)
+                    && hregClass(am->ARMam2.RR.index) == HRcInt32
+                    && hregIsVirtual(am->ARMam2.RR.index) );
+      default:
+         vpanic("sane_AMode: unknown ARM AMode2 tag");
+   }
+}
+
+static ARMAMode2* iselIntExpr_AMode2 ( ISelEnv* env, IRExpr* e )
+{
+   ARMAMode2* am = iselIntExpr_AMode2_wrk(env, e);
+   vassert(sane_AMode2(am));
+   return am;
+}
+
+static ARMAMode2* iselIntExpr_AMode2_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32);
+
+   /* FIXME: add RR matching */
+
+   /* {Add32,Sub32}(expr,simm8) */
+   if (e->tag == Iex_Binop
+       && (e->Iex.Binop.op == Iop_Add32 || e->Iex.Binop.op == Iop_Sub32)
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32) {
+      Int simm = (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U32;
+      if (simm >= -255 && simm <= 255) {
+         HReg reg;
+         if (e->Iex.Binop.op == Iop_Sub32)
+            simm = -simm;
+         reg = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         return ARMAMode2_RI(reg, simm);
+      }
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   {
+      HReg reg = iselIntExpr_R(env, e);
+      return ARMAMode2_RI(reg, 0);
+   }
+
+}
+
+
+/* --------------------- AModeV --------------------- */
+
+/* Return an AModeV which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a 32-bit one.
+*/
+
+static Bool sane_AModeV ( ARMAModeV* am )
+{
+  return toBool( hregClass(am->reg) == HRcInt32
+                 && hregIsVirtual(am->reg)
+                 && am->simm11 >= -1020 && am->simm11 <= 1020
+                 && 0 == (am->simm11 & 3) );
+}
+
+static ARMAModeV* iselIntExpr_AModeV ( ISelEnv* env, IRExpr* e )
+{
+   ARMAModeV* am = iselIntExpr_AModeV_wrk(env, e);
+   vassert(sane_AModeV(am));
+   return am;
+}
+
+static ARMAModeV* iselIntExpr_AModeV_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32);
+
+   /* {Add32,Sub32}(expr, simm8 << 2) */
+   if (e->tag == Iex_Binop
+       && (e->Iex.Binop.op == Iop_Add32 || e->Iex.Binop.op == Iop_Sub32)
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32) {
+      Int simm = (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U32;
+      if (simm >= -1020 && simm <= 1020 && 0 == (simm & 3)) {
+         HReg reg;
+         if (e->Iex.Binop.op == Iop_Sub32)
+            simm = -simm;
+         reg = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         return mkARMAModeV(reg, simm);
+      }
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   {
+      HReg reg = iselIntExpr_R(env, e);
+      return mkARMAModeV(reg, 0);
+   }
+
+}
+
+/* -------------------- AModeN -------------------- */
+
+static ARMAModeN* iselIntExpr_AModeN ( ISelEnv* env, IRExpr* e )
+{
+   return iselIntExpr_AModeN_wrk(env, e);
+}
+
+static ARMAModeN* iselIntExpr_AModeN_wrk ( ISelEnv* env, IRExpr* e )
+{
+   HReg reg = iselIntExpr_R(env, e);
+   return mkARMAModeN_R(reg);
+}
+
+
+/* --------------------- RI84 --------------------- */
+
+/* Select instructions to generate 'e' into a RI84.  If mayInv is
+   true, then the caller will also accept an I84 form that denotes
+   'not e'.  In this case didInv may not be NULL, and *didInv is set
+   to True.  This complication is so as to allow generation of an RI84
+   which is suitable for use in either an AND or BIC instruction,
+   without knowing (before this call) which one.
+*/
+static ARMRI84* iselIntExpr_RI84 ( /*OUT*/Bool* didInv, Bool mayInv,
+                                   ISelEnv* env, IRExpr* e )
+{
+   ARMRI84* ri;
+   if (mayInv)
+      vassert(didInv != NULL);
+   ri = iselIntExpr_RI84_wrk(didInv, mayInv, env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case ARMri84_I84:
+         return ri;
+      case ARMri84_R:
+         vassert(hregClass(ri->ARMri84.R.reg) == HRcInt32);
+         vassert(hregIsVirtual(ri->ARMri84.R.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RI84: unknown arm RI84 tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static ARMRI84* iselIntExpr_RI84_wrk ( /*OUT*/Bool* didInv, Bool mayInv,
+                                       ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   if (didInv) *didInv = False;
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      UInt u, u8 = 0x100, u4 = 0x10; /* both invalid */
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
+         case Ico_U16: u = 0xFFFF & (e->Iex.Const.con->Ico.U16); break;
+         case Ico_U8:  u = 0xFF   & (e->Iex.Const.con->Ico.U8); break;
+         default: vpanic("iselIntExpr_RI84.Iex_Const(armh)");
+      }
+      if (fitsIn8x4(&u8, &u4, u)) {
+         return ARMRI84_I84( (UShort)u8, (UShort)u4 );
+      }
+      if (mayInv && fitsIn8x4(&u8, &u4, ~u)) {
+         vassert(didInv);
+         *didInv = True;
+         return ARMRI84_I84( (UShort)u8, (UShort)u4 );
+      }
+      /* else fail, fall through to default case */
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return ARMRI84_R(r);
+   }
+}
+
+
+/* --------------------- RI5 --------------------- */
+
+/* Select instructions to generate 'e' into a RI5. */
+
+static ARMRI5* iselIntExpr_RI5 ( ISelEnv* env, IRExpr* e )
+{
+   ARMRI5* ri = iselIntExpr_RI5_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case ARMri5_I5:
+         return ri;
+      case ARMri5_R:
+         vassert(hregClass(ri->ARMri5.R.reg) == HRcInt32);
+         vassert(hregIsVirtual(ri->ARMri5.R.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RI5: unknown arm RI5 tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static ARMRI5* iselIntExpr_RI5_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32 || ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      UInt u; /* both invalid */
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
+         case Ico_U16: u = 0xFFFF & (e->Iex.Const.con->Ico.U16); break;
+         case Ico_U8:  u = 0xFF   & (e->Iex.Const.con->Ico.U8); break;
+         default: vpanic("iselIntExpr_RI5.Iex_Const(armh)");
+      }
+      if (u >= 1 && u <= 31) {
+         return ARMRI5_I5(u);
+      }
+      /* else fail, fall through to default case */
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return ARMRI5_R(r);
+   }
+}
+
+
+/* ------------------- CondCode ------------------- */
+
+/* Generate code to evaluated a bit-typed expression, returning the
+   condition code which would correspond when the expression would
+   notionally have returned 1. */
+
+static ARMCondCode iselCondCode ( ISelEnv* env, IRExpr* e )
+{
+   ARMCondCode cc = iselCondCode_wrk(env,e);
+   vassert(cc != ARMcc_NV);
+   return cc;
+}
+
+static ARMCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I1);
+
+   /* var */
+   if (e->tag == Iex_RdTmp) {
+      HReg rTmp = lookupIRTemp(env, e->Iex.RdTmp.tmp);
+      /* CmpOrTst doesn't modify rTmp; so this is OK. */
+      ARMRI84* one  = ARMRI84_I84(1,0);
+      addInstr(env, ARMInstr_CmpOrTst(False/*test*/, rTmp, one));
+      return ARMcc_NE;
+   }
+
+   /* Not1(e) */
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
+      /* Generate code for the arg, and negate the test condition */
+      return 1 ^ iselCondCode(env, e->Iex.Unop.arg);
+   }
+
+   /* --- patterns rooted at: 32to1 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_32to1) {
+      HReg     rTmp = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARMRI84* one  = ARMRI84_I84(1,0);
+      addInstr(env, ARMInstr_CmpOrTst(False/*test*/, rTmp, one));
+      return ARMcc_NE;
+   }
+
+   /* --- patterns rooted at: CmpNEZ8 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ8) {
+      HReg     r1   = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARMRI84* xFF  = ARMRI84_I84(0xFF,0);
+      addInstr(env, ARMInstr_CmpOrTst(False/*!isCmp*/, r1, xFF));
+      return ARMcc_NE;
+   }
+
+   /* --- patterns rooted at: CmpNEZ32 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ32) {
+      HReg     r1   = iselIntExpr_R(env, e->Iex.Unop.arg);
+      ARMRI84* zero = ARMRI84_I84(0,0);
+      addInstr(env, ARMInstr_CmpOrTst(True/*isCmp*/, r1, zero));
+      return ARMcc_NE;
+   }
+
+   /* --- patterns rooted at: CmpNEZ64 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ64) {
+      HReg     tHi, tLo;
+      HReg     tmp  = newVRegI(env);
+      ARMRI84* zero = ARMRI84_I84(0,0);
+      iselInt64Expr(&tHi, &tLo, env, e->Iex.Unop.arg);
+      addInstr(env, ARMInstr_Alu(ARMalu_OR, tmp, tHi, ARMRI84_R(tLo)));
+      addInstr(env, ARMInstr_CmpOrTst(True/*isCmp*/, tmp, zero));
+      return ARMcc_NE;
+   }
+
+   /* --- Cmp*32*(x,y) --- */
+   if (e->tag == Iex_Binop
+       && (e->Iex.Binop.op == Iop_CmpEQ32
+           || e->Iex.Binop.op == Iop_CmpNE32
+           || e->Iex.Binop.op == Iop_CmpLT32S
+           || e->Iex.Binop.op == Iop_CmpLT32U
+           || e->Iex.Binop.op == Iop_CmpLE32S
+           || e->Iex.Binop.op == Iop_CmpLE32U)) {
+      HReg     argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      ARMRI84* argR = iselIntExpr_RI84(NULL,False, 
+                                       env, e->Iex.Binop.arg2);
+      addInstr(env, ARMInstr_CmpOrTst(True/*isCmp*/, argL, argR));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ32:  return ARMcc_EQ;
+         case Iop_CmpNE32:  return ARMcc_NE;
+         case Iop_CmpLT32S: return ARMcc_LT;
+         case Iop_CmpLT32U: return ARMcc_LO;
+         case Iop_CmpLE32S: return ARMcc_LE;
+         case Iop_CmpLE32U: return ARMcc_LS;
+         default: vpanic("iselCondCode(arm): CmpXX32");
+      }
+   }
+
+   /* const */
+   /* Constant 1:Bit */
+   if (e->tag == Iex_Const) {
+      HReg r;
+      vassert(e->Iex.Const.con->tag == Ico_U1);
+      vassert(e->Iex.Const.con->Ico.U1 == True 
+              || e->Iex.Const.con->Ico.U1 == False);
+      r = newVRegI(env);
+      addInstr(env, ARMInstr_Imm32(r, 0));
+      addInstr(env, ARMInstr_CmpOrTst(True/*isCmp*/, r, ARMRI84_R(r)));
+      return e->Iex.Const.con->Ico.U1 ? ARMcc_EQ : ARMcc_NE;
+   }
+
+   // JRS 2013-Jan-03: this seems completely nonsensical
+   /* --- CasCmpEQ* --- */
+   /* Ist_Cas has a dummy argument to compare with, so comparison is
+      always true. */
+   //if (e->tag == Iex_Binop
+   //    && (e->Iex.Binop.op == Iop_CasCmpEQ32
+   //        || e->Iex.Binop.op == Iop_CasCmpEQ16
+   //        || e->Iex.Binop.op == Iop_CasCmpEQ8)) {
+   //   return ARMcc_AL;
+   //}
+
+   ppIRExpr(e);
+   vpanic("iselCondCode");
+}
+
+
+/* --------------------- Reg --------------------- */
+
+static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselIntExpr_R_wrk(env, e);
+   /* sanity checks ... */
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcInt32);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   switch (e->tag) {
+
+   /* --------- TEMP --------- */
+   case Iex_RdTmp: {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg dst  = newVRegI(env);
+
+      if (e->Iex.Load.end != Iend_LE)
+         goto irreducible;
+
+      if (ty == Ity_I32) {
+         ARMAMode1* amode = iselIntExpr_AMode1 ( env, e->Iex.Load.addr );
+         addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/, dst, amode));
+         return dst;
+      }
+      if (ty == Ity_I16) {
+         ARMAMode2* amode = iselIntExpr_AMode2 ( env, e->Iex.Load.addr );
+         addInstr(env, ARMInstr_LdSt16(ARMcc_AL,
+                                       True/*isLoad*/, False/*!signedLoad*/,
+                                       dst, amode));
+         return dst;
+      }
+      if (ty == Ity_I8) {
+         ARMAMode1* amode = iselIntExpr_AMode1 ( env, e->Iex.Load.addr );
+         addInstr(env, ARMInstr_LdSt8U(ARMcc_AL, True/*isLoad*/, dst, amode));
+         return dst;
+      }
+      break;
+   }
+
+//zz   /* --------- TERNARY OP --------- */
+//zz   case Iex_Triop: {
+//zz      IRTriop *triop = e->Iex.Triop.details;
+//zz      /* C3210 flags following FPU partial remainder (fprem), both
+//zz         IEEE compliant (PREM1) and non-IEEE compliant (PREM). */
+//zz      if (triop->op == Iop_PRemC3210F64
+//zz          || triop->op == Iop_PRem1C3210F64) {
+//zz         HReg junk = newVRegF(env);
+//zz         HReg dst  = newVRegI(env);
+//zz         HReg srcL = iselDblExpr(env, triop->arg2);
+//zz         HReg srcR = iselDblExpr(env, triop->arg3);
+//zz         /* XXXROUNDINGFIXME */
+//zz         /* set roundingmode here */
+//zz         addInstr(env, X86Instr_FpBinary(
+//zz                           e->Iex.Binop.op==Iop_PRemC3210F64 
+//zz                              ? Xfp_PREM : Xfp_PREM1,
+//zz                           srcL,srcR,junk
+//zz                 ));
+//zz         /* The previous pseudo-insn will have left the FPU's C3210
+//zz            flags set correctly.  So bag them. */
+//zz         addInstr(env, X86Instr_FpStSW_AX());
+//zz         addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), dst));
+//zz         addInstr(env, X86Instr_Alu32R(Xalu_AND, X86RMI_Imm(0x4700), dst));
+//zz         return dst;
+//zz      }
+//zz
+//zz      break;
+//zz   }
+
+   /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+
+      ARMAluOp   aop = 0; /* invalid */
+      ARMShiftOp sop = 0; /* invalid */
+
+      /* ADD/SUB/AND/OR/XOR */
+      switch (e->Iex.Binop.op) {
+         case Iop_And32: {
+            Bool     didInv = False;
+            HReg     dst    = newVRegI(env);
+            HReg     argL   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            ARMRI84* argR   = iselIntExpr_RI84(&didInv, True/*mayInv*/,
+                                               env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_Alu(didInv ? ARMalu_BIC : ARMalu_AND,
+                                       dst, argL, argR));
+            return dst;
+         }
+         case Iop_Or32:  aop = ARMalu_OR;  goto std_binop;
+         case Iop_Xor32: aop = ARMalu_XOR; goto std_binop;
+         case Iop_Sub32: aop = ARMalu_SUB; goto std_binop;
+         case Iop_Add32: aop = ARMalu_ADD; goto std_binop;
+         std_binop: {
+            HReg     dst  = newVRegI(env);
+            HReg     argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            ARMRI84* argR = iselIntExpr_RI84(NULL, False/*mayInv*/,
+                                             env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_Alu(aop, dst, argL, argR));
+            return dst;
+         }
+         default: break;
+      }
+
+      /* SHL/SHR/SAR */
+      switch (e->Iex.Binop.op) {
+         case Iop_Shl32: sop = ARMsh_SHL; goto sh_binop;
+         case Iop_Shr32: sop = ARMsh_SHR; goto sh_binop;
+         case Iop_Sar32: sop = ARMsh_SAR; goto sh_binop;
+         sh_binop: {
+            HReg    dst  = newVRegI(env);
+            HReg    argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            ARMRI5* argR = iselIntExpr_RI5(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_Shift(sop, dst, argL, argR));
+            vassert(ty == Ity_I32); /* else the IR is ill-typed */
+            return dst;
+         }
+         default: break;
+      }
+
+      /* MUL */
+      if (e->Iex.Binop.op == Iop_Mul32) {
+         HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg dst  = newVRegI(env);
+         addInstr(env, mk_iMOVds_RR(hregARM_R2(), argL));
+         addInstr(env, mk_iMOVds_RR(hregARM_R3(), argR));
+         addInstr(env, ARMInstr_Mul(ARMmul_PLAIN));
+         addInstr(env, mk_iMOVds_RR(dst, hregARM_R0()));
+         return dst;
+      }
+
+      /* Handle misc other ops. */
+
+      if (e->Iex.Binop.op == Iop_Max32U) {
+         HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg dst  = newVRegI(env);
+         addInstr(env, ARMInstr_CmpOrTst(True/*isCmp*/, argL,
+                                         ARMRI84_R(argR)));
+         addInstr(env, mk_iMOVds_RR(dst, argL));
+         addInstr(env, ARMInstr_CMov(ARMcc_LO, dst, ARMRI84_R(argR)));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_CmpF64) {
+         HReg dL = iselDblExpr(env, e->Iex.Binop.arg1);
+         HReg dR = iselDblExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegI(env);
+         /* Do the compare (FCMPD) and set NZCV in FPSCR.  Then also do
+            FMSTAT, so we can examine the results directly. */
+         addInstr(env, ARMInstr_VCmpD(dL, dR));
+         /* Create in dst, the IRCmpF64Result encoded result. */
+         addInstr(env, ARMInstr_Imm32(dst, 0));
+         addInstr(env, ARMInstr_CMov(ARMcc_EQ, dst, ARMRI84_I84(0x40,0))); //EQ
+         addInstr(env, ARMInstr_CMov(ARMcc_MI, dst, ARMRI84_I84(0x01,0))); //LT
+         addInstr(env, ARMInstr_CMov(ARMcc_GT, dst, ARMRI84_I84(0x00,0))); //GT
+         addInstr(env, ARMInstr_CMov(ARMcc_VS, dst, ARMRI84_I84(0x45,0))); //UN
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_F64toI32S
+          || e->Iex.Binop.op == Iop_F64toI32U) {
+         /* Wretched uglyness all round, due to having to deal
+            with rounding modes.  Oh well. */
+         /* FIXME: if arg1 is a constant indicating round-to-zero,
+            then we could skip all this arsing around with FPSCR and
+            simply emit FTO{S,U}IZD. */
+         Bool syned = e->Iex.Binop.op == Iop_F64toI32S;
+         HReg valD  = iselDblExpr(env, e->Iex.Binop.arg2);
+         set_VFP_rounding_mode(env, e->Iex.Binop.arg1);
+         /* FTO{S,U}ID valF, valD */
+         HReg valF = newVRegF(env);
+         addInstr(env, ARMInstr_VCvtID(False/*!iToD*/, syned,
+                                       valF, valD));
+         set_VFP_rounding_default(env);
+         /* VMOV dst, valF */
+         HReg dst = newVRegI(env);
+         addInstr(env, ARMInstr_VXferS(False/*!toS*/, valF, dst));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_GetElem8x8
+          || e->Iex.Binop.op == Iop_GetElem16x4
+          || e->Iex.Binop.op == Iop_GetElem32x2) {
+         if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+            HReg res = newVRegI(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            UInt index, size;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports GetElem with constant "
+                      "second argument only (neon)\n");
+            }
+            index = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_GetElem8x8: vassert(index < 8); size = 0; break;
+               case Iop_GetElem16x4: vassert(index < 4); size = 1; break;
+               case Iop_GetElem32x2: vassert(index < 2); size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnaryS(ARMneon_GETELEMS,
+                                           mkARMNRS(ARMNRS_Reg, res, 0),
+                                           mkARMNRS(ARMNRS_Scalar, arg, index),
+                                           size, False));
+            return res;
+         }
+      }
+
+      if (e->Iex.Binop.op == Iop_GetElem32x2
+          && e->Iex.Binop.arg2->tag == Iex_Const
+          && !(env->hwcaps & VEX_HWCAPS_ARM_NEON)) {
+         /* We may have to do GetElem32x2 on a non-NEON capable
+            target. */
+         IRConst* con = e->Iex.Binop.arg2->Iex.Const.con;
+         vassert(con->tag == Ico_U8); /* else IR is ill-typed */
+         UInt index = con->Ico.U8;
+         if (index >= 0 && index <= 1) {
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi, &rLo, env, e->Iex.Binop.arg1);
+            return index == 0 ? rLo : rHi;
+         }
+      }
+
+      if (e->Iex.Binop.op == Iop_GetElem8x16
+          || e->Iex.Binop.op == Iop_GetElem16x8
+          || e->Iex.Binop.op == Iop_GetElem32x4) {
+         if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+            HReg res = newVRegI(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Binop.arg1);
+            UInt index, size;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports GetElem with constant "
+                      "second argument only (neon)\n");
+            }
+            index = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_GetElem8x16: vassert(index < 16); size = 0; break;
+               case Iop_GetElem16x8: vassert(index < 8); size = 1; break;
+               case Iop_GetElem32x4: vassert(index < 4); size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnaryS(ARMneon_GETELEMS,
+                                           mkARMNRS(ARMNRS_Reg, res, 0),
+                                           mkARMNRS(ARMNRS_Scalar, arg, index),
+                                           size, True));
+            return res;
+         }
+      }
+
+      /* All cases involving host-side helper calls. */
+      void* fn = NULL;
+      switch (e->Iex.Binop.op) {
+         case Iop_Add16x2:
+            fn = &h_generic_calc_Add16x2; break;
+         case Iop_Sub16x2:
+            fn = &h_generic_calc_Sub16x2; break;
+         case Iop_HAdd16Ux2:
+            fn = &h_generic_calc_HAdd16Ux2; break;
+         case Iop_HAdd16Sx2:
+            fn = &h_generic_calc_HAdd16Sx2; break;
+         case Iop_HSub16Ux2:
+            fn = &h_generic_calc_HSub16Ux2; break;
+         case Iop_HSub16Sx2:
+            fn = &h_generic_calc_HSub16Sx2; break;
+         case Iop_QAdd16Sx2:
+            fn = &h_generic_calc_QAdd16Sx2; break;
+         case Iop_QAdd16Ux2:
+            fn = &h_generic_calc_QAdd16Ux2; break;
+         case Iop_QSub16Sx2:
+            fn = &h_generic_calc_QSub16Sx2; break;
+         case Iop_Add8x4:
+            fn = &h_generic_calc_Add8x4; break;
+         case Iop_Sub8x4:
+            fn = &h_generic_calc_Sub8x4; break;
+         case Iop_HAdd8Ux4:
+            fn = &h_generic_calc_HAdd8Ux4; break;
+         case Iop_HAdd8Sx4:
+            fn = &h_generic_calc_HAdd8Sx4; break;
+         case Iop_HSub8Ux4:
+            fn = &h_generic_calc_HSub8Ux4; break;
+         case Iop_HSub8Sx4:
+            fn = &h_generic_calc_HSub8Sx4; break;
+         case Iop_QAdd8Sx4:
+            fn = &h_generic_calc_QAdd8Sx4; break;
+         case Iop_QAdd8Ux4:
+            fn = &h_generic_calc_QAdd8Ux4; break;
+         case Iop_QSub8Sx4:
+            fn = &h_generic_calc_QSub8Sx4; break;
+         case Iop_QSub8Ux4:
+            fn = &h_generic_calc_QSub8Ux4; break;
+         case Iop_Sad8Ux4:
+            fn = &h_generic_calc_Sad8Ux4; break;
+         case Iop_QAdd32S:
+            fn = &h_generic_calc_QAdd32S; break;
+         case Iop_QSub32S:
+            fn = &h_generic_calc_QSub32S; break;
+         case Iop_QSub16Ux2:
+            fn = &h_generic_calc_QSub16Ux2; break;
+         case Iop_DivU32:
+            fn = &h_calc_udiv32_w_arm_semantics; break;
+         case Iop_DivS32:
+            fn = &h_calc_sdiv32_w_arm_semantics; break;
+         default:
+            break;
+      }
+
+      if (fn) {
+         HReg regL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         HReg res  = newVRegI(env);
+         addInstr(env, mk_iMOVds_RR(hregARM_R0(), regL));
+         addInstr(env, mk_iMOVds_RR(hregARM_R1(), regR));
+         addInstr(env, ARMInstr_Call( ARMcc_AL, (Addr)fn,
+                                      2, mk_RetLoc_simple(RLPri_Int) ));
+         addInstr(env, mk_iMOVds_RR(res, hregARM_R0()));
+         return res;
+      }
+
+      break;
+   }
+
+   /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+
+//zz      /* 1Uto8(32to1(expr32)) */
+//zz      if (e->Iex.Unop.op == Iop_1Uto8) { 
+//zz         DECLARE_PATTERN(p_32to1_then_1Uto8);
+//zz         DEFINE_PATTERN(p_32to1_then_1Uto8,
+//zz                        unop(Iop_1Uto8,unop(Iop_32to1,bind(0))));
+//zz         if (matchIRExpr(&mi,p_32to1_then_1Uto8,e)) {
+//zz            IRExpr* expr32 = mi.bindee[0];
+//zz            HReg dst = newVRegI(env);
+//zz            HReg src = iselIntExpr_R(env, expr32);
+//zz            addInstr(env, mk_iMOVsd_RR(src,dst) );
+//zz            addInstr(env, X86Instr_Alu32R(Xalu_AND,
+//zz                                          X86RMI_Imm(1), dst));
+//zz            return dst;
+//zz         }
+//zz      }
+//zz
+//zz      /* 8Uto32(LDle(expr32)) */
+//zz      if (e->Iex.Unop.op == Iop_8Uto32) {
+//zz         DECLARE_PATTERN(p_LDle8_then_8Uto32);
+//zz         DEFINE_PATTERN(p_LDle8_then_8Uto32,
+//zz                        unop(Iop_8Uto32,
+//zz                             IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
+//zz         if (matchIRExpr(&mi,p_LDle8_then_8Uto32,e)) {
+//zz            HReg dst = newVRegI(env);
+//zz            X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+//zz            addInstr(env, X86Instr_LoadEX(1,False,amode,dst));
+//zz            return dst;
+//zz         }
+//zz      }
+//zz
+//zz      /* 8Sto32(LDle(expr32)) */
+//zz      if (e->Iex.Unop.op == Iop_8Sto32) {
+//zz         DECLARE_PATTERN(p_LDle8_then_8Sto32);
+//zz         DEFINE_PATTERN(p_LDle8_then_8Sto32,
+//zz                        unop(Iop_8Sto32,
+//zz                             IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
+//zz         if (matchIRExpr(&mi,p_LDle8_then_8Sto32,e)) {
+//zz            HReg dst = newVRegI(env);
+//zz            X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+//zz            addInstr(env, X86Instr_LoadEX(1,True,amode,dst));
+//zz            return dst;
+//zz         }
+//zz      }
+//zz
+//zz      /* 16Uto32(LDle(expr32)) */
+//zz      if (e->Iex.Unop.op == Iop_16Uto32) {
+//zz         DECLARE_PATTERN(p_LDle16_then_16Uto32);
+//zz         DEFINE_PATTERN(p_LDle16_then_16Uto32,
+//zz                        unop(Iop_16Uto32,
+//zz                             IRExpr_Load(Iend_LE,Ity_I16,bind(0))) );
+//zz         if (matchIRExpr(&mi,p_LDle16_then_16Uto32,e)) {
+//zz            HReg dst = newVRegI(env);
+//zz            X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+//zz            addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
+//zz            return dst;
+//zz         }
+//zz      }
+//zz
+//zz      /* 8Uto32(GET:I8) */
+//zz      if (e->Iex.Unop.op == Iop_8Uto32) {
+//zz         if (e->Iex.Unop.arg->tag == Iex_Get) {
+//zz            HReg      dst;
+//zz            X86AMode* amode;
+//zz            vassert(e->Iex.Unop.arg->Iex.Get.ty == Ity_I8);
+//zz            dst = newVRegI(env);
+//zz            amode = X86AMode_IR(e->Iex.Unop.arg->Iex.Get.offset,
+//zz                                hregX86_EBP());
+//zz            addInstr(env, X86Instr_LoadEX(1,False,amode,dst));
+//zz            return dst;
+//zz         }
+//zz      }
+//zz
+//zz      /* 16to32(GET:I16) */
+//zz      if (e->Iex.Unop.op == Iop_16Uto32) {
+//zz         if (e->Iex.Unop.arg->tag == Iex_Get) {
+//zz            HReg      dst;
+//zz            X86AMode* amode;
+//zz            vassert(e->Iex.Unop.arg->Iex.Get.ty == Ity_I16);
+//zz            dst = newVRegI(env);
+//zz            amode = X86AMode_IR(e->Iex.Unop.arg->Iex.Get.offset,
+//zz                                hregX86_EBP());
+//zz            addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
+//zz            return dst;
+//zz         }
+//zz      }
+
+      switch (e->Iex.Unop.op) {
+         case Iop_8Uto32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_Alu(ARMalu_AND,
+                                       dst, src, ARMRI84_I84(0xFF,0)));
+            return dst;
+         }
+//zz         case Iop_8Uto16:
+//zz         case Iop_8Uto32:
+//zz         case Iop_16Uto32: {
+//zz            HReg dst = newVRegI(env);
+//zz            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+//zz            UInt mask = e->Iex.Unop.op==Iop_16Uto32 ? 0xFFFF : 0xFF;
+//zz            addInstr(env, mk_iMOVsd_RR(src,dst) );
+//zz            addInstr(env, X86Instr_Alu32R(Xalu_AND,
+//zz                                          X86RMI_Imm(mask), dst));
+//zz            return dst;
+//zz         }
+//zz         case Iop_8Sto16:
+//zz         case Iop_8Sto32:
+         case Iop_16Uto32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            ARMRI5* amt = ARMRI5_I5(16);
+            addInstr(env, ARMInstr_Shift(ARMsh_SHL, dst, src, amt));
+            addInstr(env, ARMInstr_Shift(ARMsh_SHR, dst, dst, amt));
+            return dst;
+         }
+         case Iop_8Sto32:
+         case Iop_16Sto32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            ARMRI5* amt = ARMRI5_I5(e->Iex.Unop.op==Iop_16Sto32 ? 16 : 24);
+            addInstr(env, ARMInstr_Shift(ARMsh_SHL, dst, src, amt));
+            addInstr(env, ARMInstr_Shift(ARMsh_SAR, dst, dst, amt));
+            return dst;
+         }
+//zz         case Iop_Not8:
+//zz         case Iop_Not16:
+         case Iop_Not32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_Unary(ARMun_NOT, dst, src));
+            return dst;
+         }
+         case Iop_64HIto32: {
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            return rHi; /* and abandon rLo .. poor wee thing :-) */
+         }
+         case Iop_64to32: {
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            return rLo; /* similar stupid comment to the above ... */
+         }
+         case Iop_64to8: {
+            HReg rHi, rLo;
+            if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+               HReg tHi = newVRegI(env);
+               HReg tLo = newVRegI(env);
+               HReg tmp = iselNeon64Expr(env, e->Iex.Unop.arg);
+               addInstr(env, ARMInstr_VXferD(False, tmp, tHi, tLo));
+               rHi = tHi;
+               rLo = tLo;
+            } else {
+               iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            }
+            return rLo;
+         }
+
+         case Iop_1Uto32:
+            /* 1Uto32(tmp).  Since I1 values generated into registers
+               are guaranteed to have value either only zero or one,
+               we can simply return the value of the register in this
+               case. */
+            if (e->Iex.Unop.arg->tag == Iex_RdTmp) {
+               HReg dst = lookupIRTemp(env, e->Iex.Unop.arg->Iex.RdTmp.tmp);
+               return dst;
+            }
+            /* else fall through */
+         case Iop_1Uto8: {
+            HReg        dst  = newVRegI(env);
+            ARMCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_Mov(dst, ARMRI84_I84(0,0)));
+            addInstr(env, ARMInstr_CMov(cond, dst, ARMRI84_I84(1,0)));
+            return dst;
+         }
+
+         case Iop_1Sto32: {
+            HReg        dst  = newVRegI(env);
+            ARMCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            ARMRI5*     amt  = ARMRI5_I5(31);
+            /* This is really rough.  We could do much better here;
+               perhaps mvn{cond} dst, #0 as the second insn?
+               (same applies to 1Sto64) */
+            addInstr(env, ARMInstr_Mov(dst, ARMRI84_I84(0,0)));
+            addInstr(env, ARMInstr_CMov(cond, dst, ARMRI84_I84(1,0)));
+            addInstr(env, ARMInstr_Shift(ARMsh_SHL, dst, dst, amt));
+            addInstr(env, ARMInstr_Shift(ARMsh_SAR, dst, dst, amt));
+            return dst;
+         }
+
+
+//zz         case Iop_1Sto8:
+//zz         case Iop_1Sto16:
+//zz         case Iop_1Sto32: {
+//zz            /* could do better than this, but for now ... */
+//zz            HReg dst         = newVRegI(env);
+//zz            X86CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+//zz            addInstr(env, X86Instr_Set32(cond,dst));
+//zz            addInstr(env, X86Instr_Sh32(Xsh_SHL, 31, dst));
+//zz            addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, dst));
+//zz            return dst;
+//zz         }
+//zz         case Iop_Ctz32: {
+//zz            /* Count trailing zeroes, implemented by x86 'bsfl' */
+//zz            HReg dst = newVRegI(env);
+//zz            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+//zz            addInstr(env, X86Instr_Bsfr32(True,src,dst));
+//zz            return dst;
+//zz         }
+         case Iop_Clz32: {
+            /* Count leading zeroes; easy on ARM. */
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_Unary(ARMun_CLZ, dst, src));
+            return dst;
+         }
+
+         case Iop_CmpwNEZ32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_Unary(ARMun_NEG, dst, src));
+            addInstr(env, ARMInstr_Alu(ARMalu_OR, dst, dst, ARMRI84_R(src)));
+            addInstr(env, ARMInstr_Shift(ARMsh_SAR, dst, dst, ARMRI5_I5(31)));
+            return dst;
+         }
+
+         case Iop_Left32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_Unary(ARMun_NEG, dst, src));
+            addInstr(env, ARMInstr_Alu(ARMalu_OR, dst, dst, ARMRI84_R(src)));
+            return dst;
+         }
+
+//zz         case Iop_V128to32: {
+//zz            HReg      dst  = newVRegI(env);
+//zz            HReg      vec  = iselVecExpr(env, e->Iex.Unop.arg);
+//zz            X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+//zz            sub_from_esp(env, 16);
+//zz            addInstr(env, X86Instr_SseLdSt(False/*store*/, vec, esp0));
+//zz            addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(esp0), dst ));
+//zz            add_to_esp(env, 16);
+//zz            return dst;
+//zz         }
+//zz
+         case Iop_ReinterpF32asI32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_VXferS(False/*!toS*/, src, dst));
+            return dst;
+         }
+
+//zz
+//zz         case Iop_16to8:
+         case Iop_32to8:
+         case Iop_32to16:
+            /* These are no-ops. */
+            return iselIntExpr_R(env, e->Iex.Unop.arg);
+
+         default:
+            break;
+      }
+
+      /* All Unop cases involving host-side helper calls. */
+      void* fn = NULL;
+      switch (e->Iex.Unop.op) {
+         case Iop_CmpNEZ16x2:
+            fn = &h_generic_calc_CmpNEZ16x2; break;
+         case Iop_CmpNEZ8x4:
+            fn = &h_generic_calc_CmpNEZ8x4; break;
+         default:
+            break;
+      }
+
+      if (fn) {
+         HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+         HReg res = newVRegI(env);
+         addInstr(env, mk_iMOVds_RR(hregARM_R0(), arg));
+         addInstr(env, ARMInstr_Call( ARMcc_AL, (Addr)fn,
+                                      1, mk_RetLoc_simple(RLPri_Int) ));
+         addInstr(env, mk_iMOVds_RR(res, hregARM_R0()));
+         return res;
+      }
+
+      break;
+   }
+
+   /* --------- GET --------- */
+   case Iex_Get: {
+      if (ty == Ity_I32 
+          && 0 == (e->Iex.Get.offset & 3)
+          && e->Iex.Get.offset < 4096-4) {
+         HReg dst = newVRegI(env);
+         addInstr(env, ARMInstr_LdSt32(
+                          ARMcc_AL, True/*isLoad*/,
+                          dst,
+                          ARMAMode1_RI(hregARM_R8(), e->Iex.Get.offset)));
+         return dst;
+      }
+//zz      if (ty == Ity_I8 || ty == Ity_I16) {
+//zz         HReg dst = newVRegI(env);
+//zz         addInstr(env, X86Instr_LoadEX(
+//zz                          toUChar(ty==Ity_I8 ? 1 : 2),
+//zz                          False,
+//zz                          X86AMode_IR(e->Iex.Get.offset,hregX86_EBP()),
+//zz                          dst));
+//zz         return dst;
+//zz      }
+      break;
+   }
+
+//zz   case Iex_GetI: {
+//zz      X86AMode* am 
+//zz         = genGuestArrayOffset(
+//zz              env, e->Iex.GetI.descr, 
+//zz                   e->Iex.GetI.ix, e->Iex.GetI.bias );
+//zz      HReg dst = newVRegI(env);
+//zz      if (ty == Ity_I8) {
+//zz         addInstr(env, X86Instr_LoadEX( 1, False, am, dst ));
+//zz         return dst;
+//zz      }
+//zz      if (ty == Ity_I32) {
+//zz         addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(am), dst));
+//zz         return dst;
+//zz      }
+//zz      break;
+//zz   }
+
+   /* --------- CCALL --------- */
+   case Iex_CCall: {
+      HReg    dst = newVRegI(env);
+      vassert(ty == e->Iex.CCall.retty);
+
+      /* be very restrictive for now.  Only 32/64-bit ints allowed for
+         args, and 32 bits for return type.  Don't forget to change
+         the RetLoc if more types are allowed in future. */
+      if (e->Iex.CCall.retty != Ity_I32)
+         goto irreducible;
+
+      /* Marshal args, do the call, clear stack. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      Bool   ok      = doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                                     e->Iex.CCall.cee, e->Iex.CCall.retty,
+                                     e->Iex.CCall.args );
+      /* */
+      if (ok) {
+         vassert(is_sane_RetLoc(rloc));
+         vassert(rloc.pri == RLPri_Int);
+         vassert(addToSp == 0);
+         addInstr(env, mk_iMOVds_RR(dst, hregARM_R0()));
+         return dst;
+      }
+      /* else fall through; will hit the irreducible: label */
+   }
+
+   /* --------- LITERAL --------- */
+   /* 32 literals */
+   case Iex_Const: {
+      UInt u   = 0;
+      HReg dst = newVRegI(env);
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
+         case Ico_U16: u = 0xFFFF & (e->Iex.Const.con->Ico.U16); break;
+         case Ico_U8:  u = 0xFF   & (e->Iex.Const.con->Ico.U8); break;
+         default: ppIRExpr(e); vpanic("iselIntExpr_R.Iex_Const(arm)");
+      }
+      addInstr(env, ARMInstr_Imm32(dst, u));
+      return dst;
+   }
+
+   /* --------- MULTIPLEX --------- */
+   case Iex_ITE: { // VFD
+      /* ITE(ccexpr, iftrue, iffalse) */
+      if (ty == Ity_I32) {
+         ARMCondCode cc;
+         HReg     r1  = iselIntExpr_R(env, e->Iex.ITE.iftrue);
+         ARMRI84* r0  = iselIntExpr_RI84(NULL, False, env, e->Iex.ITE.iffalse);
+         HReg     dst = newVRegI(env);
+         addInstr(env, mk_iMOVds_RR(dst, r1));
+         cc = iselCondCode(env, e->Iex.ITE.cond);
+         addInstr(env, ARMInstr_CMov(cc ^ 1, dst, r0));
+         return dst;
+      }
+      break;
+   }
+
+   default: 
+   break;
+   } /* switch (e->tag) */
+
+   /* We get here if no pattern matched. */
+  irreducible:
+   ppIRExpr(e);
+   vpanic("iselIntExpr_R: cannot reduce tree");
+}
+
+
+/* -------------------- 64-bit -------------------- */
+
+/* Compute a 64-bit value into a register pair, which is returned as
+   the first two parameters.  As with iselIntExpr_R, these may be
+   either real or virtual regs; in any case they must not be changed
+   by subsequent code emitted by the caller.  */
+
+static void iselInt64Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+{
+   iselInt64Expr_wrk(rHi, rLo, env, e);
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcInt32);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcInt32);
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I64);
+
+   /* 64-bit literal */
+   if (e->tag == Iex_Const) {
+      ULong   w64 = e->Iex.Const.con->Ico.U64;
+      UInt    wHi = toUInt(w64 >> 32);
+      UInt    wLo = toUInt(w64);
+      HReg    tHi = newVRegI(env);
+      HReg    tLo = newVRegI(env);
+      vassert(e->Iex.Const.con->tag == Ico_U64);
+      addInstr(env, ARMInstr_Imm32(tHi, wHi));
+      addInstr(env, ARMInstr_Imm32(tLo, wLo));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* read 64-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+         HReg tHi = newVRegI(env);
+         HReg tLo = newVRegI(env);
+         HReg tmp = iselNeon64Expr(env, e);
+         addInstr(env, ARMInstr_VXferD(False, tmp, tHi, tLo));
+         *rHi = tHi;
+         *rLo = tLo;
+      } else {
+         lookupIRTemp64( rHi, rLo, env, e->Iex.RdTmp.tmp);
+      }
+      return;
+   }
+
+   /* 64-bit load */
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      HReg      tLo, tHi, rA;
+      vassert(e->Iex.Load.ty == Ity_I64);
+      rA  = iselIntExpr_R(env, e->Iex.Load.addr);
+      tHi = newVRegI(env);
+      tLo = newVRegI(env);
+      addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/,
+                                    tHi, ARMAMode1_RI(rA, 4)));
+      addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/,
+                                    tLo, ARMAMode1_RI(rA, 0)));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit GET */
+   if (e->tag == Iex_Get) {
+      ARMAMode1* am0 = ARMAMode1_RI(hregARM_R8(), e->Iex.Get.offset + 0);
+      ARMAMode1* am4 = ARMAMode1_RI(hregARM_R8(), e->Iex.Get.offset + 4);
+      HReg tHi = newVRegI(env);
+      HReg tLo = newVRegI(env);
+      addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/, tHi, am4));
+      addInstr(env, ARMInstr_LdSt32(ARMcc_AL, True/*isLoad*/, tLo, am0));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+
+         /* 32 x 32 -> 64 multiply */
+         case Iop_MullS32:
+         case Iop_MullU32: {
+            HReg     argL = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            HReg     argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg     tHi  = newVRegI(env);
+            HReg     tLo  = newVRegI(env);
+            ARMMulOp mop  = e->Iex.Binop.op == Iop_MullS32
+                               ? ARMmul_SX : ARMmul_ZX;
+            addInstr(env, mk_iMOVds_RR(hregARM_R2(), argL));
+            addInstr(env, mk_iMOVds_RR(hregARM_R3(), argR));
+            addInstr(env, ARMInstr_Mul(mop));
+            addInstr(env, mk_iMOVds_RR(tHi, hregARM_R1()));
+            addInstr(env, mk_iMOVds_RR(tLo, hregARM_R0()));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_Or64: {
+            HReg xLo, xHi, yLo, yHi;
+            HReg tHi = newVRegI(env);
+            HReg tLo = newVRegI(env);
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_Alu(ARMalu_OR, tHi, xHi, ARMRI84_R(yHi)));
+            addInstr(env, ARMInstr_Alu(ARMalu_OR, tLo, xLo, ARMRI84_R(yLo)));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_Add64: {
+            HReg xLo, xHi, yLo, yHi;
+            HReg tHi = newVRegI(env);
+            HReg tLo = newVRegI(env);
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_Alu(ARMalu_ADDS, tLo, xLo, ARMRI84_R(yLo)));
+            addInstr(env, ARMInstr_Alu(ARMalu_ADC,  tHi, xHi, ARMRI84_R(yHi)));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 32HLto64(e1,e2) */
+         case Iop_32HLto64: {
+            *rHi = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            *rLo = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            return;
+         }
+
+         default:
+            break;
+      }
+   }
+
+   /* --------- UNARY ops --------- */
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+
+         /* ReinterpF64asI64 */
+         case Iop_ReinterpF64asI64: {
+            HReg dstHi = newVRegI(env);
+            HReg dstLo = newVRegI(env);
+            HReg src   = iselDblExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_VXferD(False/*!toD*/, src, dstHi, dstLo));
+            *rHi = dstHi;
+            *rLo = dstLo;
+            return;
+         }
+
+         /* Left64(e) */
+         case Iop_Left64: {
+            HReg yLo, yHi;
+            HReg tHi  = newVRegI(env);
+            HReg tLo  = newVRegI(env);
+            HReg zero = newVRegI(env);
+            /* yHi:yLo = arg */
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Unop.arg);
+            /* zero = 0 */
+            addInstr(env, ARMInstr_Imm32(zero, 0));
+            /* tLo = 0 - yLo, and set carry */
+            addInstr(env, ARMInstr_Alu(ARMalu_SUBS,
+                                       tLo, zero, ARMRI84_R(yLo)));
+            /* tHi = 0 - yHi - carry */
+            addInstr(env, ARMInstr_Alu(ARMalu_SBC,
+                                       tHi, zero, ARMRI84_R(yHi)));
+            /* So now we have tHi:tLo = -arg.  To finish off, or 'arg'
+               back in, so as to give the final result 
+               tHi:tLo = arg | -arg. */
+            addInstr(env, ARMInstr_Alu(ARMalu_OR, tHi, tHi, ARMRI84_R(yHi)));
+            addInstr(env, ARMInstr_Alu(ARMalu_OR, tLo, tLo, ARMRI84_R(yLo)));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* CmpwNEZ64(e) */
+         case Iop_CmpwNEZ64: {
+            HReg srcLo, srcHi;
+            HReg tmp1 = newVRegI(env);
+            HReg tmp2 = newVRegI(env);
+            /* srcHi:srcLo = arg */
+            iselInt64Expr(&srcHi, &srcLo, env, e->Iex.Unop.arg);
+            /* tmp1 = srcHi | srcLo */
+            addInstr(env, ARMInstr_Alu(ARMalu_OR,
+                                       tmp1, srcHi, ARMRI84_R(srcLo)));
+            /* tmp2 = (tmp1 | -tmp1) >>s 31 */
+            addInstr(env, ARMInstr_Unary(ARMun_NEG, tmp2, tmp1));
+            addInstr(env, ARMInstr_Alu(ARMalu_OR,
+                                       tmp2, tmp2, ARMRI84_R(tmp1)));
+            addInstr(env, ARMInstr_Shift(ARMsh_SAR,
+                                         tmp2, tmp2, ARMRI5_I5(31)));
+            *rHi = tmp2;
+            *rLo = tmp2;
+            return;
+         }
+
+         case Iop_1Sto64: {
+            HReg        dst  = newVRegI(env);
+            ARMCondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            ARMRI5*     amt  = ARMRI5_I5(31);
+            /* This is really rough.  We could do much better here;
+               perhaps mvn{cond} dst, #0 as the second insn?
+               (same applies to 1Sto32) */
+            addInstr(env, ARMInstr_Mov(dst, ARMRI84_I84(0,0)));
+            addInstr(env, ARMInstr_CMov(cond, dst, ARMRI84_I84(1,0)));
+            addInstr(env, ARMInstr_Shift(ARMsh_SHL, dst, dst, amt));
+            addInstr(env, ARMInstr_Shift(ARMsh_SAR, dst, dst, amt));
+            *rHi = dst;
+            *rLo = dst;
+            return;
+         }
+
+         default: 
+            break;
+      }
+   } /* if (e->tag == Iex_Unop) */
+
+   /* --------- MULTIPLEX --------- */
+   if (e->tag == Iex_ITE) { // VFD
+      IRType tyC;
+      HReg   r1hi, r1lo, r0hi, r0lo, dstHi, dstLo;
+      ARMCondCode cc;
+      tyC = typeOfIRExpr(env->type_env,e->Iex.ITE.cond);
+      vassert(tyC == Ity_I1);
+      iselInt64Expr(&r1hi, &r1lo, env, e->Iex.ITE.iftrue);
+      iselInt64Expr(&r0hi, &r0lo, env, e->Iex.ITE.iffalse);
+      dstHi = newVRegI(env);
+      dstLo = newVRegI(env);
+      addInstr(env, mk_iMOVds_RR(dstHi, r1hi));
+      addInstr(env, mk_iMOVds_RR(dstLo, r1lo));
+      cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, ARMInstr_CMov(cc ^ 1, dstHi, ARMRI84_R(r0hi)));
+      addInstr(env, ARMInstr_CMov(cc ^ 1, dstLo, ARMRI84_R(r0lo)));
+      *rHi = dstHi;
+      *rLo = dstLo;
+      return;
+   }
+
+   /* It is convenient sometimes to call iselInt64Expr even when we
+      have NEON support (e.g. in do_helper_call we need 64-bit
+      arguments as 2 x 32 regs). */
+   if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+      HReg tHi = newVRegI(env);
+      HReg tLo = newVRegI(env);
+      HReg tmp = iselNeon64Expr(env, e);
+      addInstr(env, ARMInstr_VXferD(False, tmp, tHi, tLo));
+      *rHi = tHi;
+      *rLo = tLo;
+      return ;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselInt64Expr");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Vector (NEON) expressions (64 or 128 bit)   ---*/
+/*---------------------------------------------------------*/
+
+static HReg iselNeon64Expr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r;
+   vassert(env->hwcaps & VEX_HWCAPS_ARM_NEON);
+   r = iselNeon64Expr_wrk( env, e );
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselNeon64Expr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   MatchInfo mi;
+   vassert(e);
+   vassert(ty == Ity_I64);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      HReg rLo, rHi;
+      HReg res = newVRegD(env);
+      iselInt64Expr(&rHi, &rLo, env, e);
+      addInstr(env, ARMInstr_VXferD(True/*toD*/, res, rHi, rLo));
+      return res;
+   }
+
+   /* 64-bit load */
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      HReg res = newVRegD(env);
+      ARMAModeN* am = iselIntExpr_AModeN(env, e->Iex.Load.addr);
+      vassert(ty == Ity_I64);
+      addInstr(env, ARMInstr_NLdStD(True, res, am));
+      return res;
+   }
+
+   /* 64-bit GET */
+   if (e->tag == Iex_Get) {
+      HReg addr = newVRegI(env);
+      HReg res = newVRegD(env);
+      vassert(ty == Ity_I64);
+      addInstr(env, ARMInstr_Add32(addr, hregARM_R8(), e->Iex.Get.offset));
+      addInstr(env, ARMInstr_NLdStD(True, res, mkARMAModeN_R(addr)));
+      return res;
+   }
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+
+         /* 32 x 32 -> 64 multiply */
+         case Iop_MullS32:
+         case Iop_MullU32: {
+            HReg rLo, rHi;
+            HReg res = newVRegD(env);
+            iselInt64Expr(&rHi, &rLo, env, e);
+            addInstr(env, ARMInstr_VXferD(True/*toD*/, res, rHi, rLo));
+            return res;
+         }
+
+         case Iop_And64: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VAND,
+                                           res, argL, argR, 4, False));
+            return res;
+         }
+         case Iop_Or64: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VORR,
+                                           res, argL, argR, 4, False));
+            return res;
+         }
+         case Iop_Xor64: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VXOR,
+                                           res, argL, argR, 4, False));
+            return res;
+         }
+
+         /* 32HLto64(e1,e2) */
+         case Iop_32HLto64: {
+            HReg rHi = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            HReg rLo = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg res = newVRegD(env);
+            addInstr(env, ARMInstr_VXferD(True/*toD*/, res, rHi, rLo));
+            return res;
+         }
+
+         case Iop_Add8x8:
+         case Iop_Add16x4:
+         case Iop_Add32x2:
+         case Iop_Add64: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Add8x8: size = 0; break;
+               case Iop_Add16x4: size = 1; break;
+               case Iop_Add32x2: size = 2; break;
+               case Iop_Add64: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VADD,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Add32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VADDFP,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_RecipStep32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VRECPS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_RSqrtStep32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VRSQRTS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+
+         // These 6 verified 18 Apr 2013
+         case Iop_InterleaveHI32x2:
+         case Iop_InterleaveLO32x2:
+         case Iop_InterleaveOddLanes8x8:
+         case Iop_InterleaveEvenLanes8x8:
+         case Iop_InterleaveOddLanes16x4:
+         case Iop_InterleaveEvenLanes16x4: {
+            HReg rD   = newVRegD(env);
+            HReg rM   = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            Bool resRd;  // is the result in rD or rM ?
+            switch (e->Iex.Binop.op) {
+               case Iop_InterleaveOddLanes8x8:   resRd = False; size = 0; break;
+               case Iop_InterleaveEvenLanes8x8:  resRd = True;  size = 0; break;
+               case Iop_InterleaveOddLanes16x4:  resRd = False; size = 1; break;
+               case Iop_InterleaveEvenLanes16x4: resRd = True;  size = 1; break;
+               case Iop_InterleaveHI32x2:        resRd = False; size = 2; break;
+               case Iop_InterleaveLO32x2:        resRd = True;  size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rM, argL, 4, False));
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rD, argR, 4, False));
+            addInstr(env, ARMInstr_NDual(ARMneon_TRN, rD, rM, size, False));
+            return resRd ? rD : rM;
+         }
+
+         // These 4 verified 18 Apr 2013
+         case Iop_InterleaveHI8x8:
+         case Iop_InterleaveLO8x8:
+         case Iop_InterleaveHI16x4:
+         case Iop_InterleaveLO16x4: {
+            HReg rD   = newVRegD(env);
+            HReg rM   = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            Bool resRd;  // is the result in rD or rM ?
+            switch (e->Iex.Binop.op) {
+               case Iop_InterleaveHI8x8:  resRd = False; size = 0; break;
+               case Iop_InterleaveLO8x8:  resRd = True;  size = 0; break;
+               case Iop_InterleaveHI16x4: resRd = False; size = 1; break;
+               case Iop_InterleaveLO16x4: resRd = True;  size = 1; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rM, argL, 4, False));
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rD, argR, 4, False));
+            addInstr(env, ARMInstr_NDual(ARMneon_ZIP, rD, rM, size, False));
+            return resRd ? rD : rM;
+         }
+
+         // These 4 verified 18 Apr 2013
+         case Iop_CatOddLanes8x8:
+         case Iop_CatEvenLanes8x8:
+         case Iop_CatOddLanes16x4:
+         case Iop_CatEvenLanes16x4: {
+            HReg rD   = newVRegD(env);
+            HReg rM   = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            Bool resRd;  // is the result in rD or rM ?
+            switch (e->Iex.Binop.op) {
+               case Iop_CatOddLanes8x8:   resRd = False; size = 0; break;
+               case Iop_CatEvenLanes8x8:  resRd = True;  size = 0; break;
+               case Iop_CatOddLanes16x4:  resRd = False; size = 1; break;
+               case Iop_CatEvenLanes16x4: resRd = True;  size = 1; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rM, argL, 4, False));
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rD, argR, 4, False));
+            addInstr(env, ARMInstr_NDual(ARMneon_UZP, rD, rM, size, False));
+            return resRd ? rD : rM;
+         }
+
+         case Iop_QAdd8Ux8:
+         case Iop_QAdd16Ux4:
+         case Iop_QAdd32Ux2:
+         case Iop_QAdd64Ux1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QAdd8Ux8: size = 0; break;
+               case Iop_QAdd16Ux4: size = 1; break;
+               case Iop_QAdd32Ux2: size = 2; break;
+               case Iop_QAdd64Ux1: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQADDU,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_QAdd8Sx8:
+         case Iop_QAdd16Sx4:
+         case Iop_QAdd32Sx2:
+         case Iop_QAdd64Sx1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QAdd8Sx8: size = 0; break;
+               case Iop_QAdd16Sx4: size = 1; break;
+               case Iop_QAdd32Sx2: size = 2; break;
+               case Iop_QAdd64Sx1: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQADDS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Sub8x8:
+         case Iop_Sub16x4:
+         case Iop_Sub32x2:
+         case Iop_Sub64: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Sub8x8: size = 0; break;
+               case Iop_Sub16x4: size = 1; break;
+               case Iop_Sub32x2: size = 2; break;
+               case Iop_Sub64: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Sub32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUBFP,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_QSub8Ux8:
+         case Iop_QSub16Ux4:
+         case Iop_QSub32Ux2:
+         case Iop_QSub64Ux1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QSub8Ux8: size = 0; break;
+               case Iop_QSub16Ux4: size = 1; break;
+               case Iop_QSub32Ux2: size = 2; break;
+               case Iop_QSub64Ux1: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQSUBU,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_QSub8Sx8:
+         case Iop_QSub16Sx4:
+         case Iop_QSub32Sx2:
+         case Iop_QSub64Sx1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QSub8Sx8: size = 0; break;
+               case Iop_QSub16Sx4: size = 1; break;
+               case Iop_QSub32Sx2: size = 2; break;
+               case Iop_QSub64Sx1: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQSUBS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Max8Ux8:
+         case Iop_Max16Ux4:
+         case Iop_Max32Ux2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Max8Ux8: size = 0; break;
+               case Iop_Max16Ux4: size = 1; break;
+               case Iop_Max32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMAXU,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Max8Sx8:
+         case Iop_Max16Sx4:
+         case Iop_Max32Sx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Max8Sx8: size = 0; break;
+               case Iop_Max16Sx4: size = 1; break;
+               case Iop_Max32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMAXS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Min8Ux8:
+         case Iop_Min16Ux4:
+         case Iop_Min32Ux2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Min8Ux8: size = 0; break;
+               case Iop_Min16Ux4: size = 1; break;
+               case Iop_Min32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMINU,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Min8Sx8:
+         case Iop_Min16Sx4:
+         case Iop_Min32Sx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Min8Sx8: size = 0; break;
+               case Iop_Min16Sx4: size = 1; break;
+               case Iop_Min32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMINS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Sar8x8:
+         case Iop_Sar16x4:
+         case Iop_Sar32x2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegD(env);
+            HReg zero = newVRegD(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Sar8x8: size = 0; break;
+               case Iop_Sar16x4: size = 1; break;
+               case Iop_Sar32x2: size = 2; break;
+               case Iop_Sar64: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NeonImm(zero, ARMNImm_TI(0,0)));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           argR2, zero, argR, size, False));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSAL,
+                                          res, argL, argR2, size, False));
+            return res;
+         }
+         case Iop_Sal8x8:
+         case Iop_Sal16x4:
+         case Iop_Sal32x2:
+         case Iop_Sal64x1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Sal8x8: size = 0; break;
+               case Iop_Sal16x4: size = 1; break;
+               case Iop_Sal32x2: size = 2; break;
+               case Iop_Sal64x1: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VSAL,
+                                          res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Shr8x8:
+         case Iop_Shr16x4:
+         case Iop_Shr32x2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegD(env);
+            HReg zero = newVRegD(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Shr8x8: size = 0; break;
+               case Iop_Shr16x4: size = 1; break;
+               case Iop_Shr32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NeonImm(zero, ARMNImm_TI(0,0)));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           argR2, zero, argR, size, False));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, argR2, size, False));
+            return res;
+         }
+         case Iop_Shl8x8:
+         case Iop_Shl16x4:
+         case Iop_Shl32x2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Shl8x8: size = 0; break;
+               case Iop_Shl16x4: size = 1; break;
+               case Iop_Shl32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_QShl8x8:
+         case Iop_QShl16x4:
+         case Iop_QShl32x2:
+         case Iop_QShl64x1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShl8x8: size = 0; break;
+               case Iop_QShl16x4: size = 1; break;
+               case Iop_QShl32x2: size = 2; break;
+               case Iop_QShl64x1: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VQSHL,
+                                          res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_QSal8x8:
+         case Iop_QSal16x4:
+         case Iop_QSal32x2:
+         case Iop_QSal64x1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QSal8x8: size = 0; break;
+               case Iop_QSal16x4: size = 1; break;
+               case Iop_QSal32x2: size = 2; break;
+               case Iop_QSal64x1: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VQSAL,
+                                          res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_QShlNsatUU8x8:
+         case Iop_QShlNsatUU16x4:
+         case Iop_QShlNsatUU32x2:
+         case Iop_QShlNsatUU64x1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            UInt size, imm;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports Iop_QShlNsatUUAxB with constant "
+                      "second argument only\n");
+            }
+            imm = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShlNsatUU8x8: size = 8 | imm; break;
+               case Iop_QShlNsatUU16x4: size = 16 | imm; break;
+               case Iop_QShlNsatUU32x2: size = 32 | imm; break;
+               case Iop_QShlNsatUU64x1: size = 64 | imm; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VQSHLNUU,
+                                          res, argL, size, False));
+            return res;
+         }
+         case Iop_QShlNsatSU8x8:
+         case Iop_QShlNsatSU16x4:
+         case Iop_QShlNsatSU32x2:
+         case Iop_QShlNsatSU64x1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            UInt size, imm;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports Iop_QShlNsatSUAxB with constant "
+                      "second argument only\n");
+            }
+            imm = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShlNsatSU8x8: size = 8 | imm; break;
+               case Iop_QShlNsatSU16x4: size = 16 | imm; break;
+               case Iop_QShlNsatSU32x2: size = 32 | imm; break;
+               case Iop_QShlNsatSU64x1: size = 64 | imm; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VQSHLNUS,
+                                          res, argL, size, False));
+            return res;
+         }
+         case Iop_QShlNsatSS8x8:
+         case Iop_QShlNsatSS16x4:
+         case Iop_QShlNsatSS32x2:
+         case Iop_QShlNsatSS64x1: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            UInt size, imm;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports Iop_QShlNsatSSAxB with constant "
+                      "second argument only\n");
+            }
+            imm = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShlNsatSS8x8: size = 8 | imm; break;
+               case Iop_QShlNsatSS16x4: size = 16 | imm; break;
+               case Iop_QShlNsatSS32x2: size = 32 | imm; break;
+               case Iop_QShlNsatSS64x1: size = 64 | imm; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VQSHLNSS,
+                                          res, argL, size, False));
+            return res;
+         }
+         case Iop_ShrN8x8:
+         case Iop_ShrN16x4:
+         case Iop_ShrN32x2:
+         case Iop_Shr64: {
+            HReg res = newVRegD(env);
+            HReg tmp = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegI(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_ShrN8x8: size = 0; break;
+               case Iop_ShrN16x4: size = 1; break;
+               case Iop_ShrN32x2: size = 2; break;
+               case Iop_Shr64: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_Unary(ARMun_NEG, argR2, argR));
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP, tmp, argR2, 0, False));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, tmp, size, False));
+            return res;
+         }
+         case Iop_ShlN8x8:
+         case Iop_ShlN16x4:
+         case Iop_ShlN32x2:
+         case Iop_Shl64: {
+            HReg res = newVRegD(env);
+            HReg tmp = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            /* special-case Shl64(x, imm8) since the Neon front
+               end produces a lot of those for V{LD,ST}{1,2,3,4}. */
+            if (e->Iex.Binop.op == Iop_Shl64 
+                && e->Iex.Binop.arg2->tag == Iex_Const) {
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+               Int nshift = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+               if (nshift >= 1 && nshift <= 63) {
+                  addInstr(env, ARMInstr_NShl64(res, argL, nshift));
+                  return res;
+               }
+               /* else fall through to general case */
+            }
+            HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_ShlN8x8:  size = 0; break;
+               case Iop_ShlN16x4: size = 1; break;
+               case Iop_ShlN32x2: size = 2; break;
+               case Iop_Shl64:    size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP,
+                                          tmp, argR, 0, False));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, tmp, size, False));
+            return res;
+         }
+         case Iop_SarN8x8:
+         case Iop_SarN16x4:
+         case Iop_SarN32x2:
+         case Iop_Sar64: {
+            HReg res = newVRegD(env);
+            HReg tmp = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegI(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_SarN8x8: size = 0; break;
+               case Iop_SarN16x4: size = 1; break;
+               case Iop_SarN32x2: size = 2; break;
+               case Iop_Sar64: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_Unary(ARMun_NEG, argR2, argR));
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP, tmp, argR2, 0, False));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSAL,
+                                          res, argL, tmp, size, False));
+            return res;
+         }
+         case Iop_CmpGT8Ux8:
+         case Iop_CmpGT16Ux4:
+         case Iop_CmpGT32Ux2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_CmpGT8Ux8: size = 0; break;
+               case Iop_CmpGT16Ux4: size = 1; break;
+               case Iop_CmpGT32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGTU,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_CmpGT8Sx8:
+         case Iop_CmpGT16Sx4:
+         case Iop_CmpGT32Sx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_CmpGT8Sx8: size = 0; break;
+               case Iop_CmpGT16Sx4: size = 1; break;
+               case Iop_CmpGT32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGTS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_CmpEQ8x8:
+         case Iop_CmpEQ16x4:
+         case Iop_CmpEQ32x2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_CmpEQ8x8: size = 0; break;
+               case Iop_CmpEQ16x4: size = 1; break;
+               case Iop_CmpEQ32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCEQ,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Mul8x8:
+         case Iop_Mul16x4:
+         case Iop_Mul32x2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Mul8x8: size = 0; break;
+               case Iop_Mul16x4: size = 1; break;
+               case Iop_Mul32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMUL,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Mul32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMULFP,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_QDMulHi16Sx4:
+         case Iop_QDMulHi32Sx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QDMulHi16Sx4: size = 1; break;
+               case Iop_QDMulHi32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQDMULH,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+
+         case Iop_QRDMulHi16Sx4:
+         case Iop_QRDMulHi32Sx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QRDMulHi16Sx4: size = 1; break;
+               case Iop_QRDMulHi32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQRDMULH,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+
+         case Iop_PwAdd8x8:
+         case Iop_PwAdd16x4:
+         case Iop_PwAdd32x2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwAdd8x8: size = 0; break;
+               case Iop_PwAdd16x4: size = 1; break;
+               case Iop_PwAdd32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPADD,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_PwAdd32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPADDFP,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_PwMin8Ux8:
+         case Iop_PwMin16Ux4:
+         case Iop_PwMin32Ux2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwMin8Ux8: size = 0; break;
+               case Iop_PwMin16Ux4: size = 1; break;
+               case Iop_PwMin32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMINU,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_PwMin8Sx8:
+         case Iop_PwMin16Sx4:
+         case Iop_PwMin32Sx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwMin8Sx8: size = 0; break;
+               case Iop_PwMin16Sx4: size = 1; break;
+               case Iop_PwMin32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMINS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_PwMax8Ux8:
+         case Iop_PwMax16Ux4:
+         case Iop_PwMax32Ux2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwMax8Ux8: size = 0; break;
+               case Iop_PwMax16Ux4: size = 1; break;
+               case Iop_PwMax32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMAXU,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_PwMax8Sx8:
+         case Iop_PwMax16Sx4:
+         case Iop_PwMax32Sx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwMax8Sx8: size = 0; break;
+               case Iop_PwMax16Sx4: size = 1; break;
+               case Iop_PwMax32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMAXS,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Perm8x8: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VTBL,
+                                           res, argL, argR, 0, False));
+            return res;
+         }
+         case Iop_PolynomialMul8x8: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMULP,
+                                           res, argL, argR, size, False));
+            return res;
+         }
+         case Iop_Max32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMAXF,
+                                           res, argL, argR, 2, False));
+            return res;
+         }
+         case Iop_Min32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMINF,
+                                           res, argL, argR, 2, False));
+            return res;
+         }
+         case Iop_PwMax32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMAXF,
+                                           res, argL, argR, 2, False));
+            return res;
+         }
+         case Iop_PwMin32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMINF,
+                                           res, argL, argR, 2, False));
+            return res;
+         }
+         case Iop_CmpGT32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGTF,
+                                           res, argL, argR, 2, False));
+            return res;
+         }
+         case Iop_CmpGE32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGEF,
+                                           res, argL, argR, 2, False));
+            return res;
+         }
+         case Iop_CmpEQ32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCEQF,
+                                           res, argL, argR, 2, False));
+            return res;
+         }
+         case Iop_F32ToFixed32Ux2_RZ:
+         case Iop_F32ToFixed32Sx2_RZ:
+         case Iop_Fixed32UToF32x2_RN:
+         case Iop_Fixed32SToF32x2_RN: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            ARMNeonUnOp op;
+            UInt imm6;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+               typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+                  vpanic("ARM supports FP <-> Fixed conversion with constant "
+                         "second argument less than 33 only\n");
+            }
+            imm6 = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            vassert(imm6 <= 32 && imm6 > 0);
+            imm6 = 64 - imm6;
+            switch(e->Iex.Binop.op) {
+               case Iop_F32ToFixed32Ux2_RZ: op = ARMneon_VCVTFtoFixedU; break;
+               case Iop_F32ToFixed32Sx2_RZ: op = ARMneon_VCVTFtoFixedS; break;
+               case Iop_Fixed32UToF32x2_RN: op = ARMneon_VCVTFixedUtoF; break;
+               case Iop_Fixed32SToF32x2_RN: op = ARMneon_VCVTFixedStoF; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(op, res, arg, imm6, False));
+            return res;
+         }
+         /*
+         FIXME: is this here or not?
+         case Iop_VDup8x8:
+         case Iop_VDup16x4:
+         case Iop_VDup32x2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            UInt index;
+            UInt imm4;
+            UInt size = 0;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+               typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+                  vpanic("ARM supports Iop_VDup with constant "
+                         "second argument less than 16 only\n");
+            }
+            index = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch(e->Iex.Binop.op) {
+               case Iop_VDup8x8: imm4 = (index << 1) + 1; break;
+               case Iop_VDup16x4: imm4 = (index << 2) + 2; break;
+               case Iop_VDup32x2: imm4 = (index << 3) + 4; break;
+               default: vassert(0);
+            }
+            if (imm4 >= 16) {
+               vpanic("ARM supports Iop_VDup with constant "
+                      "second argument less than 16 only\n");
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VDUP,
+                                          res, argL, imm4, False));
+            return res;
+         }
+         */
+         default:
+            break;
+      }
+   }
+
+   /* --------- UNARY ops --------- */
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+
+         /* 32Uto64 */
+         case Iop_32Uto64: {
+            HReg rLo = iselIntExpr_R(env, e->Iex.Unop.arg);
+            HReg rHi = newVRegI(env);
+            HReg res = newVRegD(env);
+            addInstr(env, ARMInstr_Imm32(rHi, 0));
+            addInstr(env, ARMInstr_VXferD(True/*toD*/, res, rHi, rLo));
+            return res;
+         }
+
+         /* 32Sto64 */
+         case Iop_32Sto64: {
+            HReg rLo = iselIntExpr_R(env, e->Iex.Unop.arg);
+            HReg rHi = newVRegI(env);
+            addInstr(env, mk_iMOVds_RR(rHi, rLo));
+            addInstr(env, ARMInstr_Shift(ARMsh_SAR, rHi, rHi, ARMRI5_I5(31)));
+            HReg res = newVRegD(env);
+            addInstr(env, ARMInstr_VXferD(True/*toD*/, res, rHi, rLo));
+            return res;
+         }
+
+         /* The next 3 are pass-throughs */
+         /* ReinterpF64asI64 */
+         case Iop_ReinterpF64asI64:
+         /* Left64(e) */
+         case Iop_Left64:
+         /* CmpwNEZ64(e) */
+         case Iop_1Sto64: {
+            HReg rLo, rHi;
+            HReg res = newVRegD(env);
+            iselInt64Expr(&rHi, &rLo, env, e);
+            addInstr(env, ARMInstr_VXferD(True/*toD*/, res, rHi, rLo));
+            return res;
+         }
+
+         case Iop_Not64: {
+            DECLARE_PATTERN(p_veqz_8x8);
+            DECLARE_PATTERN(p_veqz_16x4);
+            DECLARE_PATTERN(p_veqz_32x2);
+            DECLARE_PATTERN(p_vcge_8sx8);
+            DECLARE_PATTERN(p_vcge_16sx4);
+            DECLARE_PATTERN(p_vcge_32sx2);
+            DECLARE_PATTERN(p_vcge_8ux8);
+            DECLARE_PATTERN(p_vcge_16ux4);
+            DECLARE_PATTERN(p_vcge_32ux2);
+            DEFINE_PATTERN(p_veqz_8x8,
+                  unop(Iop_Not64, unop(Iop_CmpNEZ8x8, bind(0))));
+            DEFINE_PATTERN(p_veqz_16x4,
+                  unop(Iop_Not64, unop(Iop_CmpNEZ16x4, bind(0))));
+            DEFINE_PATTERN(p_veqz_32x2,
+                  unop(Iop_Not64, unop(Iop_CmpNEZ32x2, bind(0))));
+            DEFINE_PATTERN(p_vcge_8sx8,
+                  unop(Iop_Not64, binop(Iop_CmpGT8Sx8, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_16sx4,
+                  unop(Iop_Not64, binop(Iop_CmpGT16Sx4, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_32sx2,
+                  unop(Iop_Not64, binop(Iop_CmpGT32Sx2, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_8ux8,
+                  unop(Iop_Not64, binop(Iop_CmpGT8Ux8, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_16ux4,
+                  unop(Iop_Not64, binop(Iop_CmpGT16Ux4, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_32ux2,
+                  unop(Iop_Not64, binop(Iop_CmpGT32Ux2, bind(1), bind(0))));
+            if (matchIRExpr(&mi, p_veqz_8x8, e)) {
+               HReg res = newVRegD(env);
+               HReg arg = iselNeon64Expr(env, mi.bindee[0]);
+               addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, res, arg, 0, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_veqz_16x4, e)) {
+               HReg res = newVRegD(env);
+               HReg arg = iselNeon64Expr(env, mi.bindee[0]);
+               addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, res, arg, 1, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_veqz_32x2, e)) {
+               HReg res = newVRegD(env);
+               HReg arg = iselNeon64Expr(env, mi.bindee[0]);
+               addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, res, arg, 2, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_8sx8, e)) {
+               HReg res = newVRegD(env);
+               HReg argL = iselNeon64Expr(env, mi.bindee[0]);
+               HReg argR = iselNeon64Expr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGES,
+                                              res, argL, argR, 0, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_16sx4, e)) {
+               HReg res = newVRegD(env);
+               HReg argL = iselNeon64Expr(env, mi.bindee[0]);
+               HReg argR = iselNeon64Expr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGES,
+                                              res, argL, argR, 1, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_32sx2, e)) {
+               HReg res = newVRegD(env);
+               HReg argL = iselNeon64Expr(env, mi.bindee[0]);
+               HReg argR = iselNeon64Expr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGES,
+                                              res, argL, argR, 2, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_8ux8, e)) {
+               HReg res = newVRegD(env);
+               HReg argL = iselNeon64Expr(env, mi.bindee[0]);
+               HReg argR = iselNeon64Expr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGEU,
+                                              res, argL, argR, 0, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_16ux4, e)) {
+               HReg res = newVRegD(env);
+               HReg argL = iselNeon64Expr(env, mi.bindee[0]);
+               HReg argR = iselNeon64Expr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGEU,
+                                              res, argL, argR, 1, False));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_32ux2, e)) {
+               HReg res = newVRegD(env);
+               HReg argL = iselNeon64Expr(env, mi.bindee[0]);
+               HReg argR = iselNeon64Expr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGEU,
+                                              res, argL, argR, 2, False));
+               return res;
+            } else {
+               HReg res = newVRegD(env);
+               HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+               addInstr(env, ARMInstr_NUnary(ARMneon_NOT, res, arg, 4, False));
+               return res;
+            }
+         }
+         case Iop_Dup8x8:
+         case Iop_Dup16x4:
+         case Iop_Dup32x2: {
+            HReg res, arg;
+            UInt size;
+            DECLARE_PATTERN(p_vdup_8x8);
+            DECLARE_PATTERN(p_vdup_16x4);
+            DECLARE_PATTERN(p_vdup_32x2);
+            DEFINE_PATTERN(p_vdup_8x8,
+                  unop(Iop_Dup8x8, binop(Iop_GetElem8x8, bind(0), bind(1))));
+            DEFINE_PATTERN(p_vdup_16x4,
+                  unop(Iop_Dup16x4, binop(Iop_GetElem16x4, bind(0), bind(1))));
+            DEFINE_PATTERN(p_vdup_32x2,
+                  unop(Iop_Dup32x2, binop(Iop_GetElem32x2, bind(0), bind(1))));
+            if (matchIRExpr(&mi, p_vdup_8x8, e)) {
+               UInt index;
+               UInt imm4;
+               if (mi.bindee[1]->tag == Iex_Const &&
+                  typeOfIRExpr(env->type_env, mi.bindee[1]) == Ity_I8) {
+                  index = mi.bindee[1]->Iex.Const.con->Ico.U8;
+                  imm4 = (index << 1) + 1;
+                  if (index < 8) {
+                     res = newVRegD(env);
+                     arg = iselNeon64Expr(env, mi.bindee[0]);
+                     addInstr(env, ARMInstr_NUnaryS(
+                                      ARMneon_VDUP,
+                                      mkARMNRS(ARMNRS_Reg, res, 0),
+                                      mkARMNRS(ARMNRS_Scalar, arg, index),
+                                      imm4, False
+                             ));
+                     return res;
+                  }
+               }
+            } else if (matchIRExpr(&mi, p_vdup_16x4, e)) {
+               UInt index;
+               UInt imm4;
+               if (mi.bindee[1]->tag == Iex_Const &&
+                  typeOfIRExpr(env->type_env, mi.bindee[1]) == Ity_I8) {
+                  index = mi.bindee[1]->Iex.Const.con->Ico.U8;
+                  imm4 = (index << 2) + 2;
+                  if (index < 4) {
+                     res = newVRegD(env);
+                     arg = iselNeon64Expr(env, mi.bindee[0]);
+                     addInstr(env, ARMInstr_NUnaryS(
+                                      ARMneon_VDUP,
+                                      mkARMNRS(ARMNRS_Reg, res, 0),
+                                      mkARMNRS(ARMNRS_Scalar, arg, index),
+                                      imm4, False
+                             ));
+                     return res;
+                  }
+               }
+            } else if (matchIRExpr(&mi, p_vdup_32x2, e)) {
+               UInt index;
+               UInt imm4;
+               if (mi.bindee[1]->tag == Iex_Const &&
+                  typeOfIRExpr(env->type_env, mi.bindee[1]) == Ity_I8) {
+                  index = mi.bindee[1]->Iex.Const.con->Ico.U8;
+                  imm4 = (index << 3) + 4;
+                  if (index < 2) {
+                     res = newVRegD(env);
+                     arg = iselNeon64Expr(env, mi.bindee[0]);
+                     addInstr(env, ARMInstr_NUnaryS(
+                                      ARMneon_VDUP,
+                                      mkARMNRS(ARMNRS_Reg, res, 0),
+                                      mkARMNRS(ARMNRS_Scalar, arg, index),
+                                      imm4, False
+                             ));
+                     return res;
+                  }
+               }
+            }
+            arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+            res = newVRegD(env);
+            switch (e->Iex.Unop.op) {
+               case Iop_Dup8x8: size = 0; break;
+               case Iop_Dup16x4: size = 1; break;
+               case Iop_Dup32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP, res, arg, size, False));
+            return res;
+         }
+         case Iop_Abs8x8:
+         case Iop_Abs16x4:
+         case Iop_Abs32x2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Abs8x8: size = 0; break;
+               case Iop_Abs16x4: size = 1; break;
+               case Iop_Abs32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_ABS, res, arg, size, False));
+            return res;
+         }
+         case Iop_Reverse8sIn64_x1:
+         case Iop_Reverse16sIn64_x1:
+         case Iop_Reverse32sIn64_x1: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Reverse8sIn64_x1: size = 0; break;
+               case Iop_Reverse16sIn64_x1: size = 1; break;
+               case Iop_Reverse32sIn64_x1: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_REV64,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_Reverse8sIn32_x2:
+         case Iop_Reverse16sIn32_x2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Reverse8sIn32_x2: size = 0; break;
+               case Iop_Reverse16sIn32_x2: size = 1; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_REV32,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_Reverse8sIn16_x4: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NUnary(ARMneon_REV16,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_CmpwNEZ64: {
+            HReg x_lsh = newVRegD(env);
+            HReg x_rsh = newVRegD(env);
+            HReg lsh_amt = newVRegD(env);
+            HReg rsh_amt = newVRegD(env);
+            HReg zero = newVRegD(env);
+            HReg tmp = newVRegD(env);
+            HReg tmp2 = newVRegD(env);
+            HReg res = newVRegD(env);
+            HReg x = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, tmp2, arg, 2, False));
+            addInstr(env, ARMInstr_NUnary(ARMneon_NOT, x, tmp2, 4, False));
+            addInstr(env, ARMInstr_NeonImm(lsh_amt, ARMNImm_TI(0, 32)));
+            addInstr(env, ARMInstr_NeonImm(zero, ARMNImm_TI(0, 0)));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           rsh_amt, zero, lsh_amt, 2, False));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          x_lsh, x, lsh_amt, 3, False));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          x_rsh, x, rsh_amt, 3, False));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VORR,
+                                           tmp, x_lsh, x_rsh, 0, False));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VORR,
+                                           res, tmp, x, 0, False));
+            return res;
+         }
+         case Iop_CmpNEZ8x8:
+         case Iop_CmpNEZ16x4:
+         case Iop_CmpNEZ32x2: {
+            HReg res = newVRegD(env);
+            HReg tmp = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size;
+            switch (e->Iex.Unop.op) {
+               case Iop_CmpNEZ8x8: size = 0; break;
+               case Iop_CmpNEZ16x4: size = 1; break;
+               case Iop_CmpNEZ32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, tmp, arg, size, False));
+            addInstr(env, ARMInstr_NUnary(ARMneon_NOT, res, tmp, 4, False));
+            return res;
+         }
+         case Iop_NarrowUn16to8x8:
+         case Iop_NarrowUn32to16x4:
+         case Iop_NarrowUn64to32x2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_NarrowUn16to8x8:  size = 0; break;
+               case Iop_NarrowUn32to16x4: size = 1; break;
+               case Iop_NarrowUn64to32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPYN,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_QNarrowUn16Sto8Sx8:
+         case Iop_QNarrowUn32Sto16Sx4:
+         case Iop_QNarrowUn64Sto32Sx2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QNarrowUn16Sto8Sx8:  size = 0; break;
+               case Iop_QNarrowUn32Sto16Sx4: size = 1; break;
+               case Iop_QNarrowUn64Sto32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPYQNSS,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_QNarrowUn16Sto8Ux8:
+         case Iop_QNarrowUn32Sto16Ux4:
+         case Iop_QNarrowUn64Sto32Ux2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QNarrowUn16Sto8Ux8:  size = 0; break;
+               case Iop_QNarrowUn32Sto16Ux4: size = 1; break;
+               case Iop_QNarrowUn64Sto32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPYQNUS,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_QNarrowUn16Uto8Ux8:
+         case Iop_QNarrowUn32Uto16Ux4:
+         case Iop_QNarrowUn64Uto32Ux2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QNarrowUn16Uto8Ux8:  size = 0; break;
+               case Iop_QNarrowUn32Uto16Ux4: size = 1; break;
+               case Iop_QNarrowUn64Uto32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPYQNUU,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_PwAddL8Sx8:
+         case Iop_PwAddL16Sx4:
+         case Iop_PwAddL32Sx2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwAddL8Sx8: size = 0; break;
+               case Iop_PwAddL16Sx4: size = 1; break;
+               case Iop_PwAddL32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_PADDLS,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_PwAddL8Ux8:
+         case Iop_PwAddL16Ux4:
+         case Iop_PwAddL32Ux2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwAddL8Ux8: size = 0; break;
+               case Iop_PwAddL16Ux4: size = 1; break;
+               case Iop_PwAddL32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_PADDLU,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_Cnt8x8: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NUnary(ARMneon_CNT,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_Clz8x8:
+         case Iop_Clz16x4:
+         case Iop_Clz32x2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Clz8x8: size = 0; break;
+               case Iop_Clz16x4: size = 1; break;
+               case Iop_Clz32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_CLZ,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_Cls8x8:
+         case Iop_Cls16x4:
+         case Iop_Cls32x2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Cls8x8: size = 0; break;
+               case Iop_Cls16x4: size = 1; break;
+               case Iop_Cls32x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_CLS,
+                                          res, arg, size, False));
+            return res;
+         }
+         case Iop_FtoI32Sx2_RZ: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoS,
+                                          res, arg, 2, False));
+            return res;
+         }
+         case Iop_FtoI32Ux2_RZ: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoU,
+                                          res, arg, 2, False));
+            return res;
+         }
+         case Iop_I32StoFx2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTStoF,
+                                          res, arg, 2, False));
+            return res;
+         }
+         case Iop_I32UtoFx2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTUtoF,
+                                          res, arg, 2, False));
+            return res;
+         }
+         case Iop_F32toF16x4: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTF32toF16,
+                                          res, arg, 2, False));
+            return res;
+         }
+         case Iop_RecipEst32Fx2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRECIPF,
+                                          res, argL, 0, False));
+            return res;
+         }
+         case Iop_RecipEst32Ux2: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRECIP,
+                                          res, argL, 0, False));
+            return res;
+         }
+         case Iop_Abs32Fx2: {
+            DECLARE_PATTERN(p_vabd_32fx2);
+            DEFINE_PATTERN(p_vabd_32fx2,
+                           unop(Iop_Abs32Fx2,
+                                binop(Iop_Sub32Fx2,
+                                      bind(0),
+                                      bind(1))));
+            if (matchIRExpr(&mi, p_vabd_32fx2, e)) {
+               HReg res = newVRegD(env);
+               HReg argL = iselNeon64Expr(env, mi.bindee[0]);
+               HReg argR = iselNeon64Expr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VABDFP,
+                                              res, argL, argR, 0, False));
+               return res;
+            } else {
+               HReg res = newVRegD(env);
+               HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+               addInstr(env, ARMInstr_NUnary(ARMneon_VABSFP,
+                                             res, arg, 0, False));
+               return res;
+            }
+         }
+         case Iop_RSqrtEst32Fx2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTEFP,
+                                          res, arg, 0, False));
+            return res;
+         }
+         case Iop_RSqrtEst32Ux2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTE,
+                                          res, arg, 0, False));
+            return res;
+         }
+         case Iop_Neg32Fx2: {
+            HReg res = newVRegD(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VNEGF,
+                                          res, arg, 0, False));
+            return res;
+         }
+         default:
+            break;
+      }
+   } /* if (e->tag == Iex_Unop) */
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+
+      switch (triop->op) {
+         case Iop_Slice64: {
+            HReg res = newVRegD(env);
+            HReg argL = iselNeon64Expr(env, triop->arg2);
+            HReg argR = iselNeon64Expr(env, triop->arg1);
+            UInt imm4;
+            if (triop->arg3->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, triop->arg3) != Ity_I8) {
+               vpanic("ARM target supports Iop_Extract64 with constant "
+                      "third argument less than 16 only\n");
+            }
+            imm4 = triop->arg3->Iex.Const.con->Ico.U8;
+            if (imm4 >= 8) {
+               vpanic("ARM target supports Iop_Extract64 with constant "
+                      "third argument less than 16 only\n");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VEXT,
+                                           res, argL, argR, imm4, False));
+            return res;
+         }
+         case Iop_SetElem8x8:
+         case Iop_SetElem16x4:
+         case Iop_SetElem32x2: {
+            HReg res = newVRegD(env);
+            HReg dreg = iselNeon64Expr(env, triop->arg1);
+            HReg arg = iselIntExpr_R(env, triop->arg3);
+            UInt index, size;
+            if (triop->arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, triop->arg2) != Ity_I8) {
+               vpanic("ARM target supports SetElem with constant "
+                      "second argument only\n");
+            }
+            index = triop->arg2->Iex.Const.con->Ico.U8;
+            switch (triop->op) {
+               case Iop_SetElem8x8: vassert(index < 8); size = 0; break;
+               case Iop_SetElem16x4: vassert(index < 4); size = 1; break;
+               case Iop_SetElem32x2: vassert(index < 2); size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, res, dreg, 4, False));
+            addInstr(env, ARMInstr_NUnaryS(ARMneon_SETELEM,
+                                           mkARMNRS(ARMNRS_Scalar, res, index),
+                                           mkARMNRS(ARMNRS_Reg, arg, 0),
+                                           size, False));
+            return res;
+         }
+         default:
+            break;
+      }
+   }
+
+   /* --------- MULTIPLEX --------- */
+   if (e->tag == Iex_ITE) { // VFD
+      HReg rLo, rHi;
+      HReg res = newVRegD(env);
+      iselInt64Expr(&rHi, &rLo, env, e);
+      addInstr(env, ARMInstr_VXferD(True/*toD*/, res, rHi, rLo));
+      return res;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselNeon64Expr");
+}
+
+
+static HReg iselNeonExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r;
+   vassert(env->hwcaps & VEX_HWCAPS_ARM_NEON);
+   r = iselNeonExpr_wrk( env, e );
+   vassert(hregClass(r) == HRcVec128);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselNeonExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   MatchInfo mi;
+   vassert(e);
+   vassert(ty == Ity_V128);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      /* At the moment there should be no 128-bit constants in IR for ARM
+         generated during disassemble. They are represented as Iop_64HLtoV128
+         binary operation and are handled among binary ops. */
+      /* But zero can be created by valgrind internal optimizer */
+      if (e->Iex.Const.con->Ico.V128 == 0x0000) {
+         HReg res = newVRegV(env);
+         addInstr(env, ARMInstr_NeonImm(res, ARMNImm_TI(6, 0)));
+         return res;
+      }
+      if (e->Iex.Const.con->Ico.V128 == 0xFFFF) {
+         HReg res = newVRegV(env);
+         addInstr(env, ARMInstr_NeonImm(res, ARMNImm_TI(6, 255)));
+         return res;
+      }
+      ppIRExpr(e);
+      vpanic("128-bit constant is not implemented");
+   }
+
+   if (e->tag == Iex_Load) {
+      HReg res = newVRegV(env);
+      ARMAModeN* am = iselIntExpr_AModeN(env, e->Iex.Load.addr);
+      vassert(ty == Ity_V128);
+      addInstr(env, ARMInstr_NLdStQ(True, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      HReg addr = newVRegI(env);
+      HReg res = newVRegV(env);
+      vassert(ty == Ity_V128);
+      addInstr(env, ARMInstr_Add32(addr, hregARM_R8(), e->Iex.Get.offset));
+      addInstr(env, ARMInstr_NLdStQ(True, res, mkARMAModeN_R(addr)));
+      return res;
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_NotV128: {
+            DECLARE_PATTERN(p_veqz_8x16);
+            DECLARE_PATTERN(p_veqz_16x8);
+            DECLARE_PATTERN(p_veqz_32x4);
+            DECLARE_PATTERN(p_vcge_8sx16);
+            DECLARE_PATTERN(p_vcge_16sx8);
+            DECLARE_PATTERN(p_vcge_32sx4);
+            DECLARE_PATTERN(p_vcge_8ux16);
+            DECLARE_PATTERN(p_vcge_16ux8);
+            DECLARE_PATTERN(p_vcge_32ux4);
+            DEFINE_PATTERN(p_veqz_8x16,
+                  unop(Iop_NotV128, unop(Iop_CmpNEZ8x16, bind(0))));
+            DEFINE_PATTERN(p_veqz_16x8,
+                  unop(Iop_NotV128, unop(Iop_CmpNEZ16x8, bind(0))));
+            DEFINE_PATTERN(p_veqz_32x4,
+                  unop(Iop_NotV128, unop(Iop_CmpNEZ32x4, bind(0))));
+            DEFINE_PATTERN(p_vcge_8sx16,
+                  unop(Iop_NotV128, binop(Iop_CmpGT8Sx16, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_16sx8,
+                  unop(Iop_NotV128, binop(Iop_CmpGT16Sx8, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_32sx4,
+                  unop(Iop_NotV128, binop(Iop_CmpGT32Sx4, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_8ux16,
+                  unop(Iop_NotV128, binop(Iop_CmpGT8Ux16, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_16ux8,
+                  unop(Iop_NotV128, binop(Iop_CmpGT16Ux8, bind(1), bind(0))));
+            DEFINE_PATTERN(p_vcge_32ux4,
+                  unop(Iop_NotV128, binop(Iop_CmpGT32Ux4, bind(1), bind(0))));
+            if (matchIRExpr(&mi, p_veqz_8x16, e)) {
+               HReg res = newVRegV(env);
+               HReg arg = iselNeonExpr(env, mi.bindee[0]);
+               addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, res, arg, 0, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_veqz_16x8, e)) {
+               HReg res = newVRegV(env);
+               HReg arg = iselNeonExpr(env, mi.bindee[0]);
+               addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, res, arg, 1, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_veqz_32x4, e)) {
+               HReg res = newVRegV(env);
+               HReg arg = iselNeonExpr(env, mi.bindee[0]);
+               addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, res, arg, 2, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_8sx16, e)) {
+               HReg res = newVRegV(env);
+               HReg argL = iselNeonExpr(env, mi.bindee[0]);
+               HReg argR = iselNeonExpr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGES,
+                                              res, argL, argR, 0, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_16sx8, e)) {
+               HReg res = newVRegV(env);
+               HReg argL = iselNeonExpr(env, mi.bindee[0]);
+               HReg argR = iselNeonExpr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGES,
+                                              res, argL, argR, 1, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_32sx4, e)) {
+               HReg res = newVRegV(env);
+               HReg argL = iselNeonExpr(env, mi.bindee[0]);
+               HReg argR = iselNeonExpr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGES,
+                                              res, argL, argR, 2, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_8ux16, e)) {
+               HReg res = newVRegV(env);
+               HReg argL = iselNeonExpr(env, mi.bindee[0]);
+               HReg argR = iselNeonExpr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGEU,
+                                              res, argL, argR, 0, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_16ux8, e)) {
+               HReg res = newVRegV(env);
+               HReg argL = iselNeonExpr(env, mi.bindee[0]);
+               HReg argR = iselNeonExpr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGEU,
+                                              res, argL, argR, 1, True));
+               return res;
+            } else if (matchIRExpr(&mi, p_vcge_32ux4, e)) {
+               HReg res = newVRegV(env);
+               HReg argL = iselNeonExpr(env, mi.bindee[0]);
+               HReg argR = iselNeonExpr(env, mi.bindee[1]);
+               addInstr(env, ARMInstr_NBinary(ARMneon_VCGEU,
+                                              res, argL, argR, 2, True));
+               return res;
+            } else {
+               HReg res = newVRegV(env);
+               HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+               addInstr(env, ARMInstr_NUnary(ARMneon_NOT, res, arg, 4, True));
+               return res;
+            }
+         }
+         case Iop_Dup8x16:
+         case Iop_Dup16x8:
+         case Iop_Dup32x4: {
+            HReg res, arg;
+            UInt size;
+            DECLARE_PATTERN(p_vdup_8x16);
+            DECLARE_PATTERN(p_vdup_16x8);
+            DECLARE_PATTERN(p_vdup_32x4);
+            DEFINE_PATTERN(p_vdup_8x16,
+                  unop(Iop_Dup8x16, binop(Iop_GetElem8x8, bind(0), bind(1))));
+            DEFINE_PATTERN(p_vdup_16x8,
+                  unop(Iop_Dup16x8, binop(Iop_GetElem16x4, bind(0), bind(1))));
+            DEFINE_PATTERN(p_vdup_32x4,
+                  unop(Iop_Dup32x4, binop(Iop_GetElem32x2, bind(0), bind(1))));
+            if (matchIRExpr(&mi, p_vdup_8x16, e)) {
+               UInt index;
+               UInt imm4;
+               if (mi.bindee[1]->tag == Iex_Const &&
+                  typeOfIRExpr(env->type_env, mi.bindee[1]) == Ity_I8) {
+                  index = mi.bindee[1]->Iex.Const.con->Ico.U8;
+                  imm4 = (index << 1) + 1;
+                  if (index < 8) {
+                     res = newVRegV(env);
+                     arg = iselNeon64Expr(env, mi.bindee[0]);
+                     addInstr(env, ARMInstr_NUnaryS(
+                                      ARMneon_VDUP,
+                                      mkARMNRS(ARMNRS_Reg, res, 0),
+                                      mkARMNRS(ARMNRS_Scalar, arg, index),
+                                      imm4, True
+                             ));
+                     return res;
+                  }
+               }
+            } else if (matchIRExpr(&mi, p_vdup_16x8, e)) {
+               UInt index;
+               UInt imm4;
+               if (mi.bindee[1]->tag == Iex_Const &&
+                  typeOfIRExpr(env->type_env, mi.bindee[1]) == Ity_I8) {
+                  index = mi.bindee[1]->Iex.Const.con->Ico.U8;
+                  imm4 = (index << 2) + 2;
+                  if (index < 4) {
+                     res = newVRegV(env);
+                     arg = iselNeon64Expr(env, mi.bindee[0]);
+                     addInstr(env, ARMInstr_NUnaryS(
+                                      ARMneon_VDUP,
+                                      mkARMNRS(ARMNRS_Reg, res, 0),
+                                      mkARMNRS(ARMNRS_Scalar, arg, index),
+                                      imm4, True
+                             ));
+                     return res;
+                  }
+               }
+            } else if (matchIRExpr(&mi, p_vdup_32x4, e)) {
+               UInt index;
+               UInt imm4;
+               if (mi.bindee[1]->tag == Iex_Const &&
+                  typeOfIRExpr(env->type_env, mi.bindee[1]) == Ity_I8) {
+                  index = mi.bindee[1]->Iex.Const.con->Ico.U8;
+                  imm4 = (index << 3) + 4;
+                  if (index < 2) {
+                     res = newVRegV(env);
+                     arg = iselNeon64Expr(env, mi.bindee[0]);
+                     addInstr(env, ARMInstr_NUnaryS(
+                                      ARMneon_VDUP,
+                                      mkARMNRS(ARMNRS_Reg, res, 0),
+                                      mkARMNRS(ARMNRS_Scalar, arg, index),
+                                      imm4, True
+                             ));
+                     return res;
+                  }
+               }
+            }
+            arg = iselIntExpr_R(env, e->Iex.Unop.arg);
+            res = newVRegV(env);
+            switch (e->Iex.Unop.op) {
+               case Iop_Dup8x16: size = 0; break;
+               case Iop_Dup16x8: size = 1; break;
+               case Iop_Dup32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP, res, arg, size, True));
+            return res;
+         }
+         case Iop_Abs8x16:
+         case Iop_Abs16x8:
+         case Iop_Abs32x4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Abs8x16: size = 0; break;
+               case Iop_Abs16x8: size = 1; break;
+               case Iop_Abs32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_ABS, res, arg, size, True));
+            return res;
+         }
+         case Iop_Reverse8sIn64_x2:
+         case Iop_Reverse16sIn64_x2:
+         case Iop_Reverse32sIn64_x2: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Reverse8sIn64_x2: size = 0; break;
+               case Iop_Reverse16sIn64_x2: size = 1; break;
+               case Iop_Reverse32sIn64_x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_REV64,
+                                          res, arg, size, True));
+            return res;
+         }
+         case Iop_Reverse8sIn32_x4:
+         case Iop_Reverse16sIn32_x4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Reverse8sIn32_x4: size = 0; break;
+               case Iop_Reverse16sIn32_x4: size = 1; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_REV32,
+                                          res, arg, size, True));
+            return res;
+         }
+         case Iop_Reverse8sIn16_x8: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NUnary(ARMneon_REV16,
+                                          res, arg, size, True));
+            return res;
+         }
+         case Iop_CmpNEZ64x2: {
+            HReg x_lsh = newVRegV(env);
+            HReg x_rsh = newVRegV(env);
+            HReg lsh_amt = newVRegV(env);
+            HReg rsh_amt = newVRegV(env);
+            HReg zero = newVRegV(env);
+            HReg tmp = newVRegV(env);
+            HReg tmp2 = newVRegV(env);
+            HReg res = newVRegV(env);
+            HReg x = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, tmp2, arg, 2, True));
+            addInstr(env, ARMInstr_NUnary(ARMneon_NOT, x, tmp2, 4, True));
+            addInstr(env, ARMInstr_NeonImm(lsh_amt, ARMNImm_TI(0, 32)));
+            addInstr(env, ARMInstr_NeonImm(zero, ARMNImm_TI(0, 0)));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           rsh_amt, zero, lsh_amt, 2, True));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          x_lsh, x, lsh_amt, 3, True));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          x_rsh, x, rsh_amt, 3, True));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VORR,
+                                           tmp, x_lsh, x_rsh, 0, True));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VORR,
+                                           res, tmp, x, 0, True));
+            return res;
+         }
+         case Iop_CmpNEZ8x16:
+         case Iop_CmpNEZ16x8:
+         case Iop_CmpNEZ32x4: {
+            HReg res = newVRegV(env);
+            HReg tmp = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size;
+            switch (e->Iex.Unop.op) {
+               case Iop_CmpNEZ8x16: size = 0; break;
+               case Iop_CmpNEZ16x8: size = 1; break;
+               case Iop_CmpNEZ32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_EQZ, tmp, arg, size, True));
+            addInstr(env, ARMInstr_NUnary(ARMneon_NOT, res, tmp, 4, True));
+            return res;
+         }
+         case Iop_Widen8Uto16x8:
+         case Iop_Widen16Uto32x4:
+         case Iop_Widen32Uto64x2: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size;
+            switch (e->Iex.Unop.op) {
+               case Iop_Widen8Uto16x8:  size = 0; break;
+               case Iop_Widen16Uto32x4: size = 1; break;
+               case Iop_Widen32Uto64x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPYLU,
+                                          res, arg, size, True));
+            return res;
+         }
+         case Iop_Widen8Sto16x8:
+         case Iop_Widen16Sto32x4:
+         case Iop_Widen32Sto64x2: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            UInt size;
+            switch (e->Iex.Unop.op) {
+               case Iop_Widen8Sto16x8:  size = 0; break;
+               case Iop_Widen16Sto32x4: size = 1; break;
+               case Iop_Widen32Sto64x2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPYLS,
+                                          res, arg, size, True));
+            return res;
+         }
+         case Iop_PwAddL8Sx16:
+         case Iop_PwAddL16Sx8:
+         case Iop_PwAddL32Sx4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwAddL8Sx16: size = 0; break;
+               case Iop_PwAddL16Sx8: size = 1; break;
+               case Iop_PwAddL32Sx4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_PADDLS,
+                                          res, arg, size, True));
+            return res;
+         }
+         case Iop_PwAddL8Ux16:
+         case Iop_PwAddL16Ux8:
+         case Iop_PwAddL32Ux4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwAddL8Ux16: size = 0; break;
+               case Iop_PwAddL16Ux8: size = 1; break;
+               case Iop_PwAddL32Ux4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_PADDLU,
+                                          res, arg, size, True));
+            return res;
+         }
+         case Iop_Cnt8x16: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NUnary(ARMneon_CNT, res, arg, size, True));
+            return res;
+         }
+         case Iop_Clz8x16:
+         case Iop_Clz16x8:
+         case Iop_Clz32x4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Clz8x16: size = 0; break;
+               case Iop_Clz16x8: size = 1; break;
+               case Iop_Clz32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_CLZ, res, arg, size, True));
+            return res;
+         }
+         case Iop_Cls8x16:
+         case Iop_Cls16x8:
+         case Iop_Cls32x4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Cls8x16: size = 0; break;
+               case Iop_Cls16x8: size = 1; break;
+               case Iop_Cls32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_CLS, res, arg, size, True));
+            return res;
+         }
+         case Iop_FtoI32Sx4_RZ: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoS,
+                                          res, arg, 2, True));
+            return res;
+         }
+         case Iop_FtoI32Ux4_RZ: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTFtoU,
+                                          res, arg, 2, True));
+            return res;
+         }
+         case Iop_I32StoFx4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTStoF,
+                                          res, arg, 2, True));
+            return res;
+         }
+         case Iop_I32UtoFx4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTUtoF,
+                                          res, arg, 2, True));
+            return res;
+         }
+         case Iop_F16toF32x4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VCVTF16toF32,
+                                          res, arg, 2, True));
+            return res;
+         }
+         case Iop_RecipEst32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRECIPF,
+                                          res, argL, 0, True));
+            return res;
+         }
+         case Iop_RecipEst32Ux4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRECIP,
+                                          res, argL, 0, True));
+            return res;
+         }
+         case Iop_Abs32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VABSFP,
+                                          res, argL, 0, True));
+            return res;
+         }
+         case Iop_RSqrtEst32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTEFP,
+                                          res, argL, 0, True));
+            return res;
+         }
+         case Iop_RSqrtEst32Ux4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VRSQRTE,
+                                          res, argL, 0, True));
+            return res;
+         }
+         case Iop_Neg32Fx4: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_NUnary(ARMneon_VNEGF,
+                                          res, arg, 0, True));
+            return res;
+         }
+         /* ... */
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_64HLtoV128:
+            /* Try to match into single "VMOV reg, imm" instruction */
+            if (e->Iex.Binop.arg1->tag == Iex_Const &&
+                e->Iex.Binop.arg2->tag == Iex_Const &&
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg1) == Ity_I64 &&
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) == Ity_I64 &&
+                e->Iex.Binop.arg1->Iex.Const.con->Ico.U64 ==
+                           e->Iex.Binop.arg2->Iex.Const.con->Ico.U64) {
+               ULong imm64 = e->Iex.Binop.arg2->Iex.Const.con->Ico.U64;
+               ARMNImm *imm = Imm64_to_ARMNImm(imm64);
+               if (imm) {
+                  HReg res = newVRegV(env);
+                  addInstr(env, ARMInstr_NeonImm(res, imm));
+                  return res;
+               }
+               if ((imm64 >> 32) == 0LL &&
+                   (imm = Imm64_to_ARMNImm(imm64 | (imm64 << 32))) != NULL) {
+                  HReg tmp1 = newVRegV(env);
+                  HReg tmp2 = newVRegV(env);
+                  HReg res = newVRegV(env);
+                  if (imm->type < 10) {
+                     addInstr(env, ARMInstr_NeonImm(tmp1, ARMNImm_TI(9,0x0f)));
+                     addInstr(env, ARMInstr_NeonImm(tmp2, imm));
+                     addInstr(env, ARMInstr_NBinary(ARMneon_VAND,
+                                                    res, tmp1, tmp2, 4, True));
+                     return res;
+                  }
+               }
+               if ((imm64 & 0xFFFFFFFFLL) == 0LL &&
+                   (imm = Imm64_to_ARMNImm(imm64 | (imm64 >> 32))) != NULL) {
+                  HReg tmp1 = newVRegV(env);
+                  HReg tmp2 = newVRegV(env);
+                  HReg res = newVRegV(env);
+                  if (imm->type < 10) {
+                     addInstr(env, ARMInstr_NeonImm(tmp1, ARMNImm_TI(9,0xf0)));
+                     addInstr(env, ARMInstr_NeonImm(tmp2, imm));
+                     addInstr(env, ARMInstr_NBinary(ARMneon_VAND,
+                                                    res, tmp1, tmp2, 4, True));
+                     return res;
+                  }
+               }
+            }
+            /* Does not match "VMOV Reg, Imm" form.  We'll have to do
+               it the slow way. */
+            { 
+               /* local scope */
+               /* Done via the stack for ease of use. */
+               /* FIXME: assumes little endian host */
+               HReg       w3, w2, w1, w0;
+               HReg       res  = newVRegV(env);
+               ARMAMode1* sp_0  = ARMAMode1_RI(hregARM_R13(), 0);
+               ARMAMode1* sp_4  = ARMAMode1_RI(hregARM_R13(), 4);
+               ARMAMode1* sp_8  = ARMAMode1_RI(hregARM_R13(), 8);
+               ARMAMode1* sp_12 = ARMAMode1_RI(hregARM_R13(), 12);
+               ARMRI84*   c_16  = ARMRI84_I84(16,0);
+               /* Make space for SP */
+               addInstr(env, ARMInstr_Alu(ARMalu_SUB, hregARM_R13(),
+                                                      hregARM_R13(), c_16));
+
+               /* Store the less significant 64 bits */
+               iselInt64Expr(&w1, &w0, env, e->Iex.Binop.arg2);
+               addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+                                             w0, sp_0));
+               addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+                                             w1, sp_4));
+         
+               /* Store the more significant 64 bits */
+               iselInt64Expr(&w3, &w2, env, e->Iex.Binop.arg1);
+               addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+                                             w2, sp_8));
+               addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*store*/,
+                                             w3, sp_12));
+         
+                /* Load result back from stack. */
+                addInstr(env, ARMInstr_NLdStQ(True/*load*/, res,
+                                              mkARMAModeN_R(hregARM_R13())));
+
+                /* Restore SP */
+                addInstr(env, ARMInstr_Alu(ARMalu_ADD, hregARM_R13(),
+                                           hregARM_R13(), c_16));
+                return res;
+            } /* local scope */
+            goto neon_expr_bad;
+         case Iop_AndV128: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VAND,
+                                           res, argL, argR, 4, True));
+            return res;
+         }
+         case Iop_OrV128: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VORR,
+                                           res, argL, argR, 4, True));
+            return res;
+         }
+         case Iop_XorV128: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VXOR,
+                                           res, argL, argR, 4, True));
+            return res;
+         }
+         case Iop_Add8x16:
+         case Iop_Add16x8:
+         case Iop_Add32x4:
+         case Iop_Add64x2: {
+            /* 
+            FIXME: remove this if not used
+            DECLARE_PATTERN(p_vrhadd_32sx4);
+            ULong one = (1LL << 32) | 1LL;
+            DEFINE_PATTERN(p_vrhadd_32sx4,
+                  binop(Iop_Add32x4,
+                        binop(Iop_Add32x4,
+                              binop(Iop_SarN32x4,
+                                    bind(0),
+                                    mkU8(1)),
+                              binop(Iop_SarN32x4,
+                                    bind(1),
+                                    mkU8(1))),
+                        binop(Iop_SarN32x4,
+                              binop(Iop_Add32x4,
+                                    binop(Iop_Add32x4,
+                                          binop(Iop_AndV128,
+                                                bind(0),
+                                                mkU128(one)),
+                                          binop(Iop_AndV128,
+                                                bind(1),
+                                                mkU128(one))),
+                                    mkU128(one)),
+                              mkU8(1))));
+            */
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Add8x16: size = 0; break;
+               case Iop_Add16x8: size = 1; break;
+               case Iop_Add32x4: size = 2; break;
+               case Iop_Add64x2: size = 3; break;
+               default:
+                  ppIROp(e->Iex.Binop.op);
+                  vpanic("Illegal element size in VADD");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VADD,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_RecipStep32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VRECPS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_RSqrtStep32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VRSQRTS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+
+         // These 6 verified 18 Apr 2013
+         case Iop_InterleaveEvenLanes8x16:
+         case Iop_InterleaveOddLanes8x16:
+         case Iop_InterleaveEvenLanes16x8:
+         case Iop_InterleaveOddLanes16x8:
+         case Iop_InterleaveEvenLanes32x4:
+         case Iop_InterleaveOddLanes32x4: {
+            HReg rD   = newVRegV(env);
+            HReg rM   = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            Bool resRd;  // is the result in rD or rM ?
+            switch (e->Iex.Binop.op) {
+               case Iop_InterleaveOddLanes8x16:  resRd = False; size = 0; break;
+               case Iop_InterleaveEvenLanes8x16: resRd = True;  size = 0; break;
+               case Iop_InterleaveOddLanes16x8:  resRd = False; size = 1; break;
+               case Iop_InterleaveEvenLanes16x8: resRd = True;  size = 1; break;
+               case Iop_InterleaveOddLanes32x4:  resRd = False; size = 2; break;
+               case Iop_InterleaveEvenLanes32x4: resRd = True;  size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rM, argL, 4, True));
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rD, argR, 4, True));
+            addInstr(env, ARMInstr_NDual(ARMneon_TRN, rD, rM, size, True));
+            return resRd ? rD : rM;
+         }
+
+         // These 6 verified 18 Apr 2013
+         case Iop_InterleaveHI8x16:
+         case Iop_InterleaveLO8x16:
+         case Iop_InterleaveHI16x8:
+         case Iop_InterleaveLO16x8:
+         case Iop_InterleaveHI32x4:
+         case Iop_InterleaveLO32x4: {
+            HReg rD   = newVRegV(env);
+            HReg rM   = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            Bool resRd;  // is the result in rD or rM ?
+            switch (e->Iex.Binop.op) {
+               case Iop_InterleaveHI8x16: resRd = False; size = 0; break;
+               case Iop_InterleaveLO8x16: resRd = True;  size = 0; break;
+               case Iop_InterleaveHI16x8: resRd = False; size = 1; break;
+               case Iop_InterleaveLO16x8: resRd = True;  size = 1; break;
+               case Iop_InterleaveHI32x4: resRd = False; size = 2; break;
+               case Iop_InterleaveLO32x4: resRd = True;  size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rM, argL, 4, True));
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rD, argR, 4, True));
+            addInstr(env, ARMInstr_NDual(ARMneon_ZIP, rD, rM, size, True));
+            return resRd ? rD : rM;
+         }
+
+         // These 6 verified 18 Apr 2013
+         case Iop_CatOddLanes8x16:
+         case Iop_CatEvenLanes8x16:
+         case Iop_CatOddLanes16x8:
+         case Iop_CatEvenLanes16x8:
+         case Iop_CatOddLanes32x4:
+         case Iop_CatEvenLanes32x4: {
+            HReg rD   = newVRegV(env);
+            HReg rM   = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            Bool resRd;  // is the result in rD or rM ?
+            switch (e->Iex.Binop.op) {
+               case Iop_CatOddLanes8x16:  resRd = False; size = 0; break;
+               case Iop_CatEvenLanes8x16: resRd = True;  size = 0; break;
+               case Iop_CatOddLanes16x8:  resRd = False; size = 1; break;
+               case Iop_CatEvenLanes16x8: resRd = True;  size = 1; break;
+               case Iop_CatOddLanes32x4:  resRd = False; size = 2; break;
+               case Iop_CatEvenLanes32x4: resRd = True;  size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rM, argL, 4, True));
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, rD, argR, 4, True));
+            addInstr(env, ARMInstr_NDual(ARMneon_UZP, rD, rM, size, True));
+            return resRd ? rD : rM;
+         }
+
+         case Iop_QAdd8Ux16:
+         case Iop_QAdd16Ux8:
+         case Iop_QAdd32Ux4:
+         case Iop_QAdd64Ux2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QAdd8Ux16: size = 0; break;
+               case Iop_QAdd16Ux8: size = 1; break;
+               case Iop_QAdd32Ux4: size = 2; break;
+               case Iop_QAdd64Ux2: size = 3; break;
+               default:
+                  ppIROp(e->Iex.Binop.op);
+                  vpanic("Illegal element size in VQADDU");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQADDU,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_QAdd8Sx16:
+         case Iop_QAdd16Sx8:
+         case Iop_QAdd32Sx4:
+         case Iop_QAdd64Sx2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QAdd8Sx16: size = 0; break;
+               case Iop_QAdd16Sx8: size = 1; break;
+               case Iop_QAdd32Sx4: size = 2; break;
+               case Iop_QAdd64Sx2: size = 3; break;
+               default:
+                  ppIROp(e->Iex.Binop.op);
+                  vpanic("Illegal element size in VQADDS");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQADDS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Sub8x16:
+         case Iop_Sub16x8:
+         case Iop_Sub32x4:
+         case Iop_Sub64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Sub8x16: size = 0; break;
+               case Iop_Sub16x8: size = 1; break;
+               case Iop_Sub32x4: size = 2; break;
+               case Iop_Sub64x2: size = 3; break;
+               default:
+                  ppIROp(e->Iex.Binop.op);
+                  vpanic("Illegal element size in VSUB");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_QSub8Ux16:
+         case Iop_QSub16Ux8:
+         case Iop_QSub32Ux4:
+         case Iop_QSub64Ux2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QSub8Ux16: size = 0; break;
+               case Iop_QSub16Ux8: size = 1; break;
+               case Iop_QSub32Ux4: size = 2; break;
+               case Iop_QSub64Ux2: size = 3; break;
+               default:
+                  ppIROp(e->Iex.Binop.op);
+                  vpanic("Illegal element size in VQSUBU");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQSUBU,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_QSub8Sx16:
+         case Iop_QSub16Sx8:
+         case Iop_QSub32Sx4:
+         case Iop_QSub64Sx2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QSub8Sx16: size = 0; break;
+               case Iop_QSub16Sx8: size = 1; break;
+               case Iop_QSub32Sx4: size = 2; break;
+               case Iop_QSub64Sx2: size = 3; break;
+               default:
+                  ppIROp(e->Iex.Binop.op);
+                  vpanic("Illegal element size in VQSUBS");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQSUBS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Max8Ux16:
+         case Iop_Max16Ux8:
+         case Iop_Max32Ux4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Max8Ux16: size = 0; break;
+               case Iop_Max16Ux8: size = 1; break;
+               case Iop_Max32Ux4: size = 2; break;
+               default: vpanic("Illegal element size in VMAXU");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMAXU,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Max8Sx16:
+         case Iop_Max16Sx8:
+         case Iop_Max32Sx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Max8Sx16: size = 0; break;
+               case Iop_Max16Sx8: size = 1; break;
+               case Iop_Max32Sx4: size = 2; break;
+               default: vpanic("Illegal element size in VMAXU");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMAXS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Min8Ux16:
+         case Iop_Min16Ux8:
+         case Iop_Min32Ux4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Min8Ux16: size = 0; break;
+               case Iop_Min16Ux8: size = 1; break;
+               case Iop_Min32Ux4: size = 2; break;
+               default: vpanic("Illegal element size in VMAXU");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMINU,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Min8Sx16:
+         case Iop_Min16Sx8:
+         case Iop_Min32Sx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Min8Sx16: size = 0; break;
+               case Iop_Min16Sx8: size = 1; break;
+               case Iop_Min32Sx4: size = 2; break;
+               default: vpanic("Illegal element size in VMAXU");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMINS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Sar8x16:
+         case Iop_Sar16x8:
+         case Iop_Sar32x4:
+         case Iop_Sar64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegV(env);
+            HReg zero = newVRegV(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Sar8x16: size = 0; break;
+               case Iop_Sar16x8: size = 1; break;
+               case Iop_Sar32x4: size = 2; break;
+               case Iop_Sar64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NeonImm(zero, ARMNImm_TI(0,0)));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           argR2, zero, argR, size, True));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSAL,
+                                          res, argL, argR2, size, True));
+            return res;
+         }
+         case Iop_Sal8x16:
+         case Iop_Sal16x8:
+         case Iop_Sal32x4:
+         case Iop_Sal64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Sal8x16: size = 0; break;
+               case Iop_Sal16x8: size = 1; break;
+               case Iop_Sal32x4: size = 2; break;
+               case Iop_Sal64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VSAL,
+                                          res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Shr8x16:
+         case Iop_Shr16x8:
+         case Iop_Shr32x4:
+         case Iop_Shr64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegV(env);
+            HReg zero = newVRegV(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Shr8x16: size = 0; break;
+               case Iop_Shr16x8: size = 1; break;
+               case Iop_Shr32x4: size = 2; break;
+               case Iop_Shr64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NeonImm(zero, ARMNImm_TI(0,0)));
+            addInstr(env, ARMInstr_NBinary(ARMneon_VSUB,
+                                           argR2, zero, argR, size, True));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, argR2, size, True));
+            return res;
+         }
+         case Iop_Shl8x16:
+         case Iop_Shl16x8:
+         case Iop_Shl32x4:
+         case Iop_Shl64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_Shl8x16: size = 0; break;
+               case Iop_Shl16x8: size = 1; break;
+               case Iop_Shl32x4: size = 2; break;
+               case Iop_Shl64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_QShl8x16:
+         case Iop_QShl16x8:
+         case Iop_QShl32x4:
+         case Iop_QShl64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShl8x16: size = 0; break;
+               case Iop_QShl16x8: size = 1; break;
+               case Iop_QShl32x4: size = 2; break;
+               case Iop_QShl64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VQSHL,
+                                          res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_QSal8x16:
+         case Iop_QSal16x8:
+         case Iop_QSal32x4:
+         case Iop_QSal64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_QSal8x16: size = 0; break;
+               case Iop_QSal16x8: size = 1; break;
+               case Iop_QSal32x4: size = 2; break;
+               case Iop_QSal64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NShift(ARMneon_VQSAL,
+                                          res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_QShlNsatUU8x16:
+         case Iop_QShlNsatUU16x8:
+         case Iop_QShlNsatUU32x4:
+         case Iop_QShlNsatUU64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            UInt size, imm;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports Iop_QShlNsatUUAxB with constant "
+                      "second argument only\n");
+            }
+            imm = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShlNsatUU8x16: size = 8 | imm; break;
+               case Iop_QShlNsatUU16x8: size = 16 | imm; break;
+               case Iop_QShlNsatUU32x4: size = 32 | imm; break;
+               case Iop_QShlNsatUU64x2: size = 64 | imm; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VQSHLNUU,
+                                          res, argL, size, True));
+            return res;
+         }
+         case Iop_QShlNsatSU8x16:
+         case Iop_QShlNsatSU16x8:
+         case Iop_QShlNsatSU32x4:
+         case Iop_QShlNsatSU64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            UInt size, imm;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports Iop_QShlNsatSUAxB with constant "
+                      "second argument only\n");
+            }
+            imm = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShlNsatSU8x16: size = 8 | imm; break;
+               case Iop_QShlNsatSU16x8: size = 16 | imm; break;
+               case Iop_QShlNsatSU32x4: size = 32 | imm; break;
+               case Iop_QShlNsatSU64x2: size = 64 | imm; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VQSHLNUS,
+                                          res, argL, size, True));
+            return res;
+         }
+         case Iop_QShlNsatSS8x16:
+         case Iop_QShlNsatSS16x8:
+         case Iop_QShlNsatSS32x4:
+         case Iop_QShlNsatSS64x2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            UInt size, imm;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+               vpanic("ARM target supports Iop_QShlNsatSSAxB with constant "
+                      "second argument only\n");
+            }
+            imm = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch (e->Iex.Binop.op) {
+               case Iop_QShlNsatSS8x16: size = 8 | imm; break;
+               case Iop_QShlNsatSS16x8: size = 16 | imm; break;
+               case Iop_QShlNsatSS32x4: size = 32 | imm; break;
+               case Iop_QShlNsatSS64x2: size = 64 | imm; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VQSHLNSS,
+                                          res, argL, size, True));
+            return res;
+         }
+         case Iop_ShrN8x16:
+         case Iop_ShrN16x8:
+         case Iop_ShrN32x4:
+         case Iop_ShrN64x2: {
+            HReg res = newVRegV(env);
+            HReg tmp = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegI(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_ShrN8x16: size = 0; break;
+               case Iop_ShrN16x8: size = 1; break;
+               case Iop_ShrN32x4: size = 2; break;
+               case Iop_ShrN64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_Unary(ARMun_NEG, argR2, argR));
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP,
+                                          tmp, argR2, 0, True));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, tmp, size, True));
+            return res;
+         }
+         case Iop_ShlN8x16:
+         case Iop_ShlN16x8:
+         case Iop_ShlN32x4:
+         case Iop_ShlN64x2: {
+            HReg res = newVRegV(env);
+            HReg tmp = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_ShlN8x16: size = 0; break;
+               case Iop_ShlN16x8: size = 1; break;
+               case Iop_ShlN32x4: size = 2; break;
+               case Iop_ShlN64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP, tmp, argR, 0, True));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSHL,
+                                          res, argL, tmp, size, True));
+            return res;
+         }
+         case Iop_SarN8x16:
+         case Iop_SarN16x8:
+         case Iop_SarN32x4:
+         case Iop_SarN64x2: {
+            HReg res = newVRegV(env);
+            HReg tmp = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            HReg argR2 = newVRegI(env);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_SarN8x16: size = 0; break;
+               case Iop_SarN16x8: size = 1; break;
+               case Iop_SarN32x4: size = 2; break;
+               case Iop_SarN64x2: size = 3; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_Unary(ARMun_NEG, argR2, argR));
+            addInstr(env, ARMInstr_NUnary(ARMneon_DUP, tmp, argR2, 0, True));
+            addInstr(env, ARMInstr_NShift(ARMneon_VSAL,
+                                          res, argL, tmp, size, True));
+            return res;
+         }
+         case Iop_CmpGT8Ux16:
+         case Iop_CmpGT16Ux8:
+         case Iop_CmpGT32Ux4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_CmpGT8Ux16: size = 0; break;
+               case Iop_CmpGT16Ux8: size = 1; break;
+               case Iop_CmpGT32Ux4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGTU,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_CmpGT8Sx16:
+         case Iop_CmpGT16Sx8:
+         case Iop_CmpGT32Sx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_CmpGT8Sx16: size = 0; break;
+               case Iop_CmpGT16Sx8: size = 1; break;
+               case Iop_CmpGT32Sx4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGTS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_CmpEQ8x16:
+         case Iop_CmpEQ16x8:
+         case Iop_CmpEQ32x4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size;
+            switch (e->Iex.Binop.op) {
+               case Iop_CmpEQ8x16: size = 0; break;
+               case Iop_CmpEQ16x8: size = 1; break;
+               case Iop_CmpEQ32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCEQ,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Mul8x16:
+         case Iop_Mul16x8:
+         case Iop_Mul32x4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Mul8x16: size = 0; break;
+               case Iop_Mul16x8: size = 1; break;
+               case Iop_Mul32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMUL,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Mull8Ux8:
+         case Iop_Mull16Ux4:
+         case Iop_Mull32Ux2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Mull8Ux8: size = 0; break;
+               case Iop_Mull16Ux4: size = 1; break;
+               case Iop_Mull32Ux2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMULLU,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+
+         case Iop_Mull8Sx8:
+         case Iop_Mull16Sx4:
+         case Iop_Mull32Sx2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_Mull8Sx8: size = 0; break;
+               case Iop_Mull16Sx4: size = 1; break;
+               case Iop_Mull32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMULLS,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+
+         case Iop_QDMulHi16Sx8:
+         case Iop_QDMulHi32Sx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QDMulHi16Sx8: size = 1; break;
+               case Iop_QDMulHi32Sx4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQDMULH,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+
+         case Iop_QRDMulHi16Sx8:
+         case Iop_QRDMulHi32Sx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QRDMulHi16Sx8: size = 1; break;
+               case Iop_QRDMulHi32Sx4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQRDMULH,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+
+         case Iop_QDMull16Sx4:
+         case Iop_QDMull32Sx2: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_QDMull16Sx4: size = 1; break;
+               case Iop_QDMull32Sx2: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VQDMULL,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_PolynomialMul8x16: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMULP,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_Max32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMAXF,
+                                           res, argL, argR, 2, True));
+            return res;
+         }
+         case Iop_Min32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMINF,
+                                           res, argL, argR, 2, True));
+            return res;
+         }
+         case Iop_PwMax32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMAXF,
+                                           res, argL, argR, 2, True));
+            return res;
+         }
+         case Iop_PwMin32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPMINF,
+                                           res, argL, argR, 2, True));
+            return res;
+         }
+         case Iop_CmpGT32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGTF,
+                                           res, argL, argR, 2, True));
+            return res;
+         }
+         case Iop_CmpGE32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCGEF,
+                                           res, argL, argR, 2, True));
+            return res;
+         }
+         case Iop_CmpEQ32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            addInstr(env, ARMInstr_NBinary(ARMneon_VCEQF,
+                                           res, argL, argR, 2, True));
+            return res;
+         }
+
+         case Iop_PolynomialMull8x8: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeon64Expr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            addInstr(env, ARMInstr_NBinary(ARMneon_VMULLP,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         case Iop_F32ToFixed32Ux4_RZ:
+         case Iop_F32ToFixed32Sx4_RZ:
+         case Iop_Fixed32UToF32x4_RN:
+         case Iop_Fixed32SToF32x4_RN: {
+            HReg res = newVRegV(env);
+            HReg arg = iselNeonExpr(env, e->Iex.Binop.arg1);
+            ARMNeonUnOp op;
+            UInt imm6;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+               typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+                  vpanic("ARM supports FP <-> Fixed conversion with constant "
+                         "second argument less than 33 only\n");
+            }
+            imm6 = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            vassert(imm6 <= 32 && imm6 > 0);
+            imm6 = 64 - imm6;
+            switch(e->Iex.Binop.op) {
+               case Iop_F32ToFixed32Ux4_RZ: op = ARMneon_VCVTFtoFixedU; break;
+               case Iop_F32ToFixed32Sx4_RZ: op = ARMneon_VCVTFtoFixedS; break;
+               case Iop_Fixed32UToF32x4_RN: op = ARMneon_VCVTFixedUtoF; break;
+               case Iop_Fixed32SToF32x4_RN: op = ARMneon_VCVTFixedStoF; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NUnary(op, res, arg, imm6, True));
+            return res;
+         }
+         /*
+         FIXME remove if not used
+         case Iop_VDup8x16:
+         case Iop_VDup16x8:
+         case Iop_VDup32x4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeon64Expr(env, e->Iex.Binop.arg1);
+            UInt imm4;
+            UInt index;
+            if (e->Iex.Binop.arg2->tag != Iex_Const ||
+               typeOfIRExpr(env->type_env, e->Iex.Binop.arg2) != Ity_I8) {
+                  vpanic("ARM supports Iop_VDup with constant "
+                         "second argument less than 16 only\n");
+            }
+            index = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+            switch(e->Iex.Binop.op) {
+               case Iop_VDup8x16: imm4 = (index << 1) + 1; break;
+               case Iop_VDup16x8: imm4 = (index << 2) + 2; break;
+               case Iop_VDup32x4: imm4 = (index << 3) + 4; break;
+               default: vassert(0);
+            }
+            if (imm4 >= 16) {
+               vpanic("ARM supports Iop_VDup with constant "
+                      "second argument less than 16 only\n");
+            }
+            addInstr(env, ARMInstr_NUnary(ARMneon_VDUP,
+                                          res, argL, imm4, True));
+            return res;
+         }
+         */
+         case Iop_PwAdd8x16:
+         case Iop_PwAdd16x8:
+         case Iop_PwAdd32x4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, e->Iex.Binop.arg1);
+            HReg argR = iselNeonExpr(env, e->Iex.Binop.arg2);
+            UInt size = 0;
+            switch(e->Iex.Binop.op) {
+               case Iop_PwAdd8x16: size = 0; break;
+               case Iop_PwAdd16x8: size = 1; break;
+               case Iop_PwAdd32x4: size = 2; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VPADD,
+                                           res, argL, argR, size, True));
+            return res;
+         }
+         /* ... */
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+
+      switch (triop->op) {
+         case Iop_SliceV128: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, triop->arg2);
+            HReg argR = iselNeonExpr(env, triop->arg1);
+            UInt imm4;
+            if (triop->arg3->tag != Iex_Const ||
+                typeOfIRExpr(env->type_env, triop->arg3) != Ity_I8) {
+               vpanic("ARM target supports Iop_ExtractV128 with constant "
+                      "third argument less than 16 only\n");
+            }
+            imm4 = triop->arg3->Iex.Const.con->Ico.U8;
+            if (imm4 >= 16) {
+               vpanic("ARM target supports Iop_ExtractV128 with constant "
+                      "third argument less than 16 only\n");
+            }
+            addInstr(env, ARMInstr_NBinary(ARMneon_VEXT,
+                                           res, argL, argR, imm4, True));
+            return res;
+         }
+         case Iop_Mul32Fx4:
+         case Iop_Sub32Fx4:
+         case Iop_Add32Fx4: {
+            HReg res = newVRegV(env);
+            HReg argL = iselNeonExpr(env, triop->arg2);
+            HReg argR = iselNeonExpr(env, triop->arg3);
+            UInt size = 0;
+            ARMNeonBinOp op = ARMneon_INVALID;
+            switch (triop->op) {
+               case Iop_Mul32Fx4: op = ARMneon_VMULFP; break;
+               case Iop_Sub32Fx4: op = ARMneon_VSUBFP; break;
+               case Iop_Add32Fx4: op = ARMneon_VADDFP; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_NBinary(op, res, argL, argR, size, True));
+            return res;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_ITE) { // VFD
+      ARMCondCode cc;
+      HReg r1  = iselNeonExpr(env, e->Iex.ITE.iftrue);
+      HReg r0  = iselNeonExpr(env, e->Iex.ITE.iffalse);
+      HReg dst = newVRegV(env);
+      addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, r1, 4, True));
+      cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, ARMInstr_NCMovQ(cc ^ 1, dst, r0));
+      return dst;
+   }
+
+  neon_expr_bad:
+   ppIRExpr(e);
+   vpanic("iselNeonExpr_wrk");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (64 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 64-bit floating point value into a register, the identity
+   of which is returned.  As with iselIntExpr_R, the reg may be either
+   real or virtual; in any case it must not be changed by subsequent
+   code emitted by the caller.  */
+
+static HReg iselDblExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselDblExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F64);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      /* Just handle the zero case. */
+      IRConst* con = e->Iex.Const.con;
+      if (con->tag == Ico_F64i && con->Ico.F64i == 0ULL) {
+         HReg z32 = newVRegI(env);
+         HReg dst = newVRegD(env);
+         addInstr(env, ARMInstr_Imm32(z32, 0));
+         addInstr(env, ARMInstr_VXferD(True/*toD*/, dst, z32, z32));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      ARMAModeV* am;
+      HReg res = newVRegD(env);
+      vassert(e->Iex.Load.ty == Ity_F64);
+      am = iselIntExpr_AModeV(env, e->Iex.Load.addr);
+      addInstr(env, ARMInstr_VLdStD(True/*isLoad*/, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      // XXX This won't work if offset > 1020 or is not 0 % 4.
+      // In which case we'll have to generate more longwinded code.
+      ARMAModeV* am  = mkARMAModeV(hregARM_R8(), e->Iex.Get.offset);
+      HReg       res = newVRegD(env);
+      addInstr(env, ARMInstr_VLdStD(True/*isLoad*/, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_ReinterpI64asF64: {
+            if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+               return iselNeon64Expr(env, e->Iex.Unop.arg);
+            } else {
+               HReg srcHi, srcLo;
+               HReg dst = newVRegD(env);
+               iselInt64Expr(&srcHi, &srcLo, env, e->Iex.Unop.arg);
+               addInstr(env, ARMInstr_VXferD(True/*toD*/, dst, srcHi, srcLo));
+               return dst;
+            }
+         }
+         case Iop_NegF64: {
+            HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARMInstr_VUnaryD(ARMvfpu_NEG, dst, src));
+            return dst;
+         }
+         case Iop_AbsF64: {
+            HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARMInstr_VUnaryD(ARMvfpu_ABS, dst, src));
+            return dst;
+         }
+         case Iop_F32toF64: {
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARMInstr_VCvtSD(True/*sToD*/, dst, src));
+            return dst;
+         }
+         case Iop_I32UtoF64:
+         case Iop_I32StoF64: {
+            HReg src   = iselIntExpr_R(env, e->Iex.Unop.arg);
+            HReg f32   = newVRegF(env);
+            HReg dst   = newVRegD(env);
+            Bool syned = e->Iex.Unop.op == Iop_I32StoF64;
+            /* VMOV f32, src */
+            addInstr(env, ARMInstr_VXferS(True/*toS*/, f32, src));
+            /* FSITOD dst, f32 */
+            addInstr(env, ARMInstr_VCvtID(True/*iToD*/, syned,
+                                          dst, f32));
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_SqrtF64: {
+            /* first arg is rounding mode; we ignore it. */
+            HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg dst = newVRegD(env);
+            addInstr(env, ARMInstr_VUnaryD(ARMvfpu_SQRT, dst, src));
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+
+      switch (triop->op) {
+         case Iop_DivF64:
+         case Iop_MulF64:
+         case Iop_AddF64:
+         case Iop_SubF64: {
+            ARMVfpOp op = 0; /*INVALID*/
+            HReg argL = iselDblExpr(env, triop->arg2);
+            HReg argR = iselDblExpr(env, triop->arg3);
+            HReg dst  = newVRegD(env);
+            switch (triop->op) {
+               case Iop_DivF64: op = ARMvfp_DIV; break;
+               case Iop_MulF64: op = ARMvfp_MUL; break;
+               case Iop_AddF64: op = ARMvfp_ADD; break;
+               case Iop_SubF64: op = ARMvfp_SUB; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_VAluD(op, dst, argL, argR));
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_ITE) { // VFD
+      if (ty == Ity_F64
+          && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+         HReg r1  = iselDblExpr(env, e->Iex.ITE.iftrue);
+         HReg r0  = iselDblExpr(env, e->Iex.ITE.iffalse);
+         HReg dst = newVRegD(env);
+         addInstr(env, ARMInstr_VUnaryD(ARMvfpu_COPY, dst, r1));
+         ARMCondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+         addInstr(env, ARMInstr_VCMovD(cc ^ 1, dst, r0));
+         return dst;
+      }
+   }
+
+   ppIRExpr(e);
+   vpanic("iselDblExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (32 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 32-bit floating point value into a register, the identity
+   of which is returned.  As with iselIntExpr_R, the reg may be either
+   real or virtual; in any case it must not be changed by subsequent
+   code emitted by the caller.  */
+
+static HReg iselFltExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselFltExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt32);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F32);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      ARMAModeV* am;
+      HReg res = newVRegF(env);
+      vassert(e->Iex.Load.ty == Ity_F32);
+      am = iselIntExpr_AModeV(env, e->Iex.Load.addr);
+      addInstr(env, ARMInstr_VLdStS(True/*isLoad*/, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      // XXX This won't work if offset > 1020 or is not 0 % 4.
+      // In which case we'll have to generate more longwinded code.
+      ARMAModeV* am  = mkARMAModeV(hregARM_R8(), e->Iex.Get.offset);
+      HReg       res = newVRegF(env);
+      addInstr(env, ARMInstr_VLdStS(True/*isLoad*/, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_ReinterpI32asF32: {
+            HReg dst = newVRegF(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, ARMInstr_VXferS(True/*toS*/, dst, src));
+            return dst;
+         }
+         case Iop_NegF32: {
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegF(env);
+            addInstr(env, ARMInstr_VUnaryS(ARMvfpu_NEG, dst, src));
+            return dst;
+         }
+         case Iop_AbsF32: {
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegF(env);
+            addInstr(env, ARMInstr_VUnaryS(ARMvfpu_ABS, dst, src));
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_SqrtF32: {
+            /* first arg is rounding mode; we ignore it. */
+            HReg src = iselFltExpr(env, e->Iex.Binop.arg2);
+            HReg dst = newVRegF(env);
+            addInstr(env, ARMInstr_VUnaryS(ARMvfpu_SQRT, dst, src));
+            return dst;
+         }
+         case Iop_F64toF32: {
+            HReg valD = iselDblExpr(env, e->Iex.Binop.arg2);
+            set_VFP_rounding_mode(env, e->Iex.Binop.arg1);
+            HReg valS = newVRegF(env);
+            /* FCVTSD valS, valD */
+            addInstr(env, ARMInstr_VCvtSD(False/*!sToD*/, valS, valD));
+            set_VFP_rounding_default(env);
+            return valS;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+
+      switch (triop->op) {
+         case Iop_DivF32:
+         case Iop_MulF32:
+         case Iop_AddF32:
+         case Iop_SubF32: {
+            ARMVfpOp op = 0; /*INVALID*/
+            HReg argL = iselFltExpr(env, triop->arg2);
+            HReg argR = iselFltExpr(env, triop->arg3);
+            HReg dst  = newVRegF(env);
+            switch (triop->op) {
+               case Iop_DivF32: op = ARMvfp_DIV; break;
+               case Iop_MulF32: op = ARMvfp_MUL; break;
+               case Iop_AddF32: op = ARMvfp_ADD; break;
+               case Iop_SubF32: op = ARMvfp_SUB; break;
+               default: vassert(0);
+            }
+            addInstr(env, ARMInstr_VAluS(op, dst, argL, argR));
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_ITE) { // VFD
+      if (ty == Ity_F32
+          && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+         ARMCondCode cc;
+         HReg r1  = iselFltExpr(env, e->Iex.ITE.iftrue);
+         HReg r0  = iselFltExpr(env, e->Iex.ITE.iffalse);
+         HReg dst = newVRegF(env);
+         addInstr(env, ARMInstr_VUnaryS(ARMvfpu_COPY, dst, r1));
+         cc = iselCondCode(env, e->Iex.ITE.cond);
+         addInstr(env, ARMInstr_VCMovS(cc ^ 1, dst, r0));
+         return dst;
+      }
+   }
+
+   ppIRExpr(e);
+   vpanic("iselFltExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void iselStmt ( ISelEnv* env, IRStmt* stmt )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n-- ");
+      ppIRStmt(stmt);
+      vex_printf("\n");
+   }
+   switch (stmt->tag) {
+
+   /* --------- STORE --------- */
+   /* little-endian write to memory */
+   case Ist_Store: {
+      IRType    tya  = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
+      IRType    tyd  = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+      IREndness end  = stmt->Ist.Store.end;
+
+      if (tya != Ity_I32 || end != Iend_LE) 
+         goto stmt_fail;
+
+      if (tyd == Ity_I32) {
+         HReg       rD = iselIntExpr_R(env, stmt->Ist.Store.data);
+         ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.Store.addr);
+         addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I16) {
+         HReg       rD = iselIntExpr_R(env, stmt->Ist.Store.data);
+         ARMAMode2* am = iselIntExpr_AMode2(env, stmt->Ist.Store.addr);
+         addInstr(env, ARMInstr_LdSt16(ARMcc_AL,
+                                       False/*!isLoad*/,
+                                       False/*!isSignedLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I8) {
+         HReg       rD = iselIntExpr_R(env, stmt->Ist.Store.data);
+         ARMAMode1* am = iselIntExpr_AMode1(env, stmt->Ist.Store.addr);
+         addInstr(env, ARMInstr_LdSt8U(ARMcc_AL, False/*!isLoad*/, rD, am));
+         return;
+      }
+      if (tyd == Ity_I64) {
+         if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+            HReg dD = iselNeon64Expr(env, stmt->Ist.Store.data);
+            ARMAModeN* am = iselIntExpr_AModeN(env, stmt->Ist.Store.addr);
+            addInstr(env, ARMInstr_NLdStD(False, dD, am));
+         } else {
+            HReg rDhi, rDlo, rA;
+            iselInt64Expr(&rDhi, &rDlo, env, stmt->Ist.Store.data);
+            rA = iselIntExpr_R(env, stmt->Ist.Store.addr);
+            addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!load*/, rDhi,
+                                          ARMAMode1_RI(rA,4)));
+            addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!load*/, rDlo,
+                                          ARMAMode1_RI(rA,0)));
+         }
+         return;
+      }
+      if (tyd == Ity_F64) {
+         HReg       dD = iselDblExpr(env, stmt->Ist.Store.data);
+         ARMAModeV* am = iselIntExpr_AModeV(env, stmt->Ist.Store.addr);
+         addInstr(env, ARMInstr_VLdStD(False/*!isLoad*/, dD, am));
+         return;
+      }
+      if (tyd == Ity_F32) {
+         HReg       fD = iselFltExpr(env, stmt->Ist.Store.data);
+         ARMAModeV* am = iselIntExpr_AModeV(env, stmt->Ist.Store.addr);
+         addInstr(env, ARMInstr_VLdStS(False/*!isLoad*/, fD, am));
+         return;
+      }
+      if (tyd == Ity_V128) {
+         HReg       qD = iselNeonExpr(env, stmt->Ist.Store.data);
+         ARMAModeN* am = iselIntExpr_AModeN(env, stmt->Ist.Store.addr);
+         addInstr(env, ARMInstr_NLdStQ(False, qD, am));
+         return;
+      }
+
+      break;
+   }
+
+   /* --------- CONDITIONAL STORE --------- */
+   /* conditional little-endian write to memory */
+   case Ist_StoreG: {
+      IRStoreG* sg   = stmt->Ist.StoreG.details;
+      IRType    tya  = typeOfIRExpr(env->type_env, sg->addr);
+      IRType    tyd  = typeOfIRExpr(env->type_env, sg->data);
+      IREndness end  = sg->end;
+
+      if (tya != Ity_I32 || end != Iend_LE) 
+         goto stmt_fail;
+
+      switch (tyd) {
+         case Ity_I8:
+         case Ity_I32: {
+            HReg        rD = iselIntExpr_R(env, sg->data);
+            ARMAMode1*  am = iselIntExpr_AMode1(env, sg->addr);
+            ARMCondCode cc = iselCondCode(env, sg->guard);
+            addInstr(env, (tyd == Ity_I32 ? ARMInstr_LdSt32 : ARMInstr_LdSt8U)
+                             (cc, False/*!isLoad*/, rD, am));
+            return;
+         }
+         case Ity_I16: {
+            HReg        rD = iselIntExpr_R(env, sg->data);
+            ARMAMode2*  am = iselIntExpr_AMode2(env, sg->addr);
+            ARMCondCode cc = iselCondCode(env, sg->guard);
+            addInstr(env, ARMInstr_LdSt16(cc, 
+                                          False/*!isLoad*/,
+                                          False/*!isSignedLoad*/, rD, am));
+            return;
+         }
+         default:
+            break;
+      }
+      break;
+   }
+
+   /* --------- CONDITIONAL LOAD --------- */
+   /* conditional little-endian load from memory */
+   case Ist_LoadG: {
+      IRLoadG*  lg   = stmt->Ist.LoadG.details;
+      IRType    tya  = typeOfIRExpr(env->type_env, lg->addr);
+      IREndness end  = lg->end;
+
+      if (tya != Ity_I32 || end != Iend_LE) 
+         goto stmt_fail;
+
+      switch (lg->cvt) {
+         case ILGop_8Uto32:
+         case ILGop_Ident32: {
+            HReg        rAlt = iselIntExpr_R(env, lg->alt);
+            ARMAMode1*  am   = iselIntExpr_AMode1(env, lg->addr);
+            HReg        rD   = lookupIRTemp(env, lg->dst);
+            addInstr(env, mk_iMOVds_RR(rD, rAlt));
+            ARMCondCode cc   = iselCondCode(env, lg->guard);
+            addInstr(env, (lg->cvt == ILGop_Ident32 ? ARMInstr_LdSt32
+                                                    : ARMInstr_LdSt8U)
+                             (cc, True/*isLoad*/, rD, am));
+            return;
+         }
+         case ILGop_16Sto32:
+         case ILGop_16Uto32:
+         case ILGop_8Sto32: {
+            HReg        rAlt = iselIntExpr_R(env, lg->alt);
+            ARMAMode2*  am   = iselIntExpr_AMode2(env, lg->addr);
+            HReg        rD   = lookupIRTemp(env, lg->dst);
+            addInstr(env, mk_iMOVds_RR(rD, rAlt));
+            ARMCondCode cc   = iselCondCode(env, lg->guard);
+            if (lg->cvt == ILGop_8Sto32) {
+               addInstr(env, ARMInstr_Ld8S(cc, rD, am));
+            } else {
+               vassert(lg->cvt == ILGop_16Sto32 || lg->cvt == ILGop_16Uto32);
+               Bool sx = lg->cvt == ILGop_16Sto32;
+               addInstr(env, ARMInstr_LdSt16(cc, True/*isLoad*/, sx, rD, am));
+            }
+            return;
+         }
+         default:
+            break;
+      }
+      break;
+   }
+
+   /* --------- PUT --------- */
+   /* write guest state, fixed offset */
+   case Ist_Put: {
+       IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+
+       if (tyd == Ity_I32) {
+           HReg       rD = iselIntExpr_R(env, stmt->Ist.Put.data);
+           ARMAMode1* am = ARMAMode1_RI(hregARM_R8(), stmt->Ist.Put.offset);
+           addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/, rD, am));
+           return;
+       }
+       if (tyd == Ity_I64) {
+          if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+             HReg addr = newVRegI(env);
+             HReg qD = iselNeon64Expr(env, stmt->Ist.Put.data);
+             addInstr(env, ARMInstr_Add32(addr, hregARM_R8(),
+                                                stmt->Ist.Put.offset));
+             addInstr(env, ARMInstr_NLdStD(False, qD, mkARMAModeN_R(addr)));
+          } else {
+             HReg rDhi, rDlo;
+             ARMAMode1* am0 = ARMAMode1_RI(hregARM_R8(),
+                                           stmt->Ist.Put.offset + 0);
+             ARMAMode1* am4 = ARMAMode1_RI(hregARM_R8(),
+                                           stmt->Ist.Put.offset + 4);
+             iselInt64Expr(&rDhi, &rDlo, env, stmt->Ist.Put.data);
+             addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/,
+                                           rDhi, am4));
+             addInstr(env, ARMInstr_LdSt32(ARMcc_AL, False/*!isLoad*/,
+                                           rDlo, am0));
+          }
+          return;
+       }
+       if (tyd == Ity_F64) {
+          // XXX This won't work if offset > 1020 or is not 0 % 4.
+          // In which case we'll have to generate more longwinded code.
+          ARMAModeV* am = mkARMAModeV(hregARM_R8(), stmt->Ist.Put.offset);
+          HReg       rD = iselDblExpr(env, stmt->Ist.Put.data);
+          addInstr(env, ARMInstr_VLdStD(False/*!isLoad*/, rD, am));
+          return;
+       }
+       if (tyd == Ity_F32) {
+          // XXX This won't work if offset > 1020 or is not 0 % 4.
+          // In which case we'll have to generate more longwinded code.
+          ARMAModeV* am = mkARMAModeV(hregARM_R8(), stmt->Ist.Put.offset);
+          HReg       rD = iselFltExpr(env, stmt->Ist.Put.data);
+          addInstr(env, ARMInstr_VLdStS(False/*!isLoad*/, rD, am));
+          return;
+       }
+       if (tyd == Ity_V128) {
+          HReg addr = newVRegI(env);
+          HReg qD = iselNeonExpr(env, stmt->Ist.Put.data);
+          addInstr(env, ARMInstr_Add32(addr, hregARM_R8(),
+                                       stmt->Ist.Put.offset));
+          addInstr(env, ARMInstr_NLdStQ(False, qD, mkARMAModeN_R(addr)));
+          return;
+       }
+       break;
+   }
+
+   /* --------- TMP --------- */
+   /* assign value to temporary */
+   case Ist_WrTmp: {
+      IRTemp tmp = stmt->Ist.WrTmp.tmp;
+      IRType ty = typeOfIRTemp(env->type_env, tmp);
+
+      if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
+         ARMRI84* ri84 = iselIntExpr_RI84(NULL, False,
+                                          env, stmt->Ist.WrTmp.data);
+         HReg     dst  = lookupIRTemp(env, tmp);
+         addInstr(env, ARMInstr_Mov(dst,ri84));
+         return;
+      }
+      if (ty == Ity_I1) {
+         /* Here, we are generating a I1 value into a 32 bit register.
+            Make sure the value in the register is only zero or one,
+            but no other.  This allows optimisation of the
+            1Uto32(tmp:I1) case, by making it simply a copy of the
+            register holding 'tmp'.  The point being that the value in
+            the register holding 'tmp' can only have been created
+            here. */
+         HReg        dst  = lookupIRTemp(env, tmp);
+         ARMCondCode cond = iselCondCode(env, stmt->Ist.WrTmp.data);
+         addInstr(env, ARMInstr_Mov(dst, ARMRI84_I84(0,0)));
+         addInstr(env, ARMInstr_CMov(cond, dst, ARMRI84_I84(1,0)));
+         return;
+      }
+      if (ty == Ity_I64) {
+         if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+            HReg src = iselNeon64Expr(env, stmt->Ist.WrTmp.data);
+            HReg dst = lookupIRTemp(env, tmp);
+            addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, src, 4, False));
+         } else {
+            HReg rHi, rLo, dstHi, dstLo;
+            iselInt64Expr(&rHi,&rLo, env, stmt->Ist.WrTmp.data);
+            lookupIRTemp64( &dstHi, &dstLo, env, tmp);
+            addInstr(env, mk_iMOVds_RR(dstHi, rHi) );
+            addInstr(env, mk_iMOVds_RR(dstLo, rLo) );
+         }
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, ARMInstr_VUnaryD(ARMvfpu_COPY, dst, src));
+         return;
+      }
+      if (ty == Ity_F32) {
+         HReg src = iselFltExpr(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, ARMInstr_VUnaryS(ARMvfpu_COPY, dst, src));
+         return;
+      }
+      if (ty == Ity_V128) {
+         HReg src = iselNeonExpr(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, ARMInstr_NUnary(ARMneon_COPY, dst, src, 4, True));
+         return;
+      }
+      break;
+   }
+
+   /* --------- Call to DIRTY helper --------- */
+   /* call complex ("dirty") helper function */
+   case Ist_Dirty: {
+      IRDirty* d = stmt->Ist.Dirty.details;
+
+      /* Figure out the return type, if any. */
+      IRType retty = Ity_INVALID;
+      if (d->tmp != IRTemp_INVALID)
+         retty = typeOfIRTemp(env->type_env, d->tmp);
+
+      Bool retty_ok = False;
+      switch (retty) {
+         case Ity_INVALID: /* function doesn't return anything */
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+         //case Ity_V128: //ATC
+            retty_ok = True; break;
+         default:
+            break;
+      }
+      if (!retty_ok)
+         break; /* will go to stmt_fail: */
+
+      /* Marshal args, do the call, and set the return value to 0x555..555
+         if this is a conditional call that returns a value and the
+         call is skipped. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, d->guard, d->cee, retty, d->args );
+      vassert(is_sane_RetLoc(rloc));
+
+      /* Now figure out what to do with the returned value, if any. */
+      switch (retty) {
+         case Ity_INVALID: {
+            /* No return value.  Nothing to do. */
+            vassert(d->tmp == IRTemp_INVALID);
+            vassert(rloc.pri == RLPri_None);
+            vassert(addToSp == 0);
+            return;
+         }
+         case Ity_I64: {
+            vassert(rloc.pri == RLPri_2Int);
+            vassert(addToSp == 0);
+            if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+               HReg tmp = lookupIRTemp(env, d->tmp);
+               addInstr(env, ARMInstr_VXferD(True, tmp, hregARM_R1(),
+                                                        hregARM_R0()));
+            } else {
+               HReg dstHi, dstLo;
+               /* The returned value is in r1:r0.  Park it in the
+                  register-pair associated with tmp. */
+               lookupIRTemp64( &dstHi, &dstLo, env, d->tmp);
+               addInstr(env, mk_iMOVds_RR(dstHi, hregARM_R1()) );
+               addInstr(env, mk_iMOVds_RR(dstLo, hregARM_R0()) );
+            }
+            return;
+         }
+         case Ity_I32: case Ity_I16: case Ity_I8: {
+            vassert(rloc.pri == RLPri_Int);
+            vassert(addToSp == 0);
+            /* The returned value is in r0.  Park it in the register
+               associated with tmp. */
+            HReg dst = lookupIRTemp(env, d->tmp);
+            addInstr(env, mk_iMOVds_RR(dst, hregARM_R0()) );
+            return;
+         }
+         case Ity_V128: {
+            vassert(0); // ATC.  The code that this produces really
+            // needs to be looked at, to verify correctness.
+            // I don't think this can ever happen though, since the
+            // ARM front end never produces 128-bit loads/stores.
+            // Hence the following is mostly theoretical.
+            /* The returned value is on the stack, and *retloc tells
+               us where.  Fish it off the stack and then move the
+               stack pointer upwards to clear it, as directed by
+               doHelperCall. */
+            vassert(rloc.pri == RLPri_V128SpRel);
+            vassert(rloc.spOff < 256); // else ARMRI84_I84(_,0) can't encode it
+            vassert(addToSp >= 16);
+            vassert(addToSp < 256); // ditto reason as for rloc.spOff
+            HReg dst = lookupIRTemp(env, d->tmp);
+            HReg tmp = newVRegI(env);
+            HReg r13 = hregARM_R13(); // sp
+            addInstr(env, ARMInstr_Alu(ARMalu_ADD,
+                                       tmp, r13, ARMRI84_I84(rloc.spOff,0)));
+            ARMAModeN* am = mkARMAModeN_R(tmp);
+            addInstr(env, ARMInstr_NLdStQ(True/*load*/, dst, am));
+            addInstr(env, ARMInstr_Alu(ARMalu_ADD,
+                                       r13, r13, ARMRI84_I84(addToSp,0)));
+            return;
+         }
+         default:
+            /*NOTREACHED*/
+            vassert(0);
+      }
+      break;
+   }
+
+   /* --------- Load Linked and Store Conditional --------- */
+   case Ist_LLSC: {
+      if (stmt->Ist.LLSC.storedata == NULL) {
+         /* LL */
+         IRTemp res = stmt->Ist.LLSC.result;
+         IRType ty  = typeOfIRTemp(env->type_env, res);
+         if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
+            Int  szB   = 0;
+            HReg r_dst = lookupIRTemp(env, res);
+            HReg raddr = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+            switch (ty) {
+               case Ity_I8:  szB = 1; break;
+               case Ity_I16: szB = 2; break;
+               case Ity_I32: szB = 4; break;
+               default:      vassert(0);
+            }
+            addInstr(env, mk_iMOVds_RR(hregARM_R4(), raddr));
+            addInstr(env, ARMInstr_LdrEX(szB));
+            addInstr(env, mk_iMOVds_RR(r_dst, hregARM_R2()));
+            return;
+         }
+         if (ty == Ity_I64) {
+            HReg raddr = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+            addInstr(env, mk_iMOVds_RR(hregARM_R4(), raddr));
+            addInstr(env, ARMInstr_LdrEX(8));
+            /* Result is in r3:r2.  On a non-NEON capable CPU, we must
+               move it into a result register pair.  On a NEON capable
+               CPU, the result register will be a 64 bit NEON
+               register, so we must move it there instead. */
+            if (env->hwcaps & VEX_HWCAPS_ARM_NEON) {
+               HReg dst = lookupIRTemp(env, res);
+               addInstr(env, ARMInstr_VXferD(True, dst, hregARM_R3(),
+                                                        hregARM_R2()));
+            } else {
+               HReg r_dst_hi, r_dst_lo;
+               lookupIRTemp64(&r_dst_hi, &r_dst_lo, env, res);
+               addInstr(env, mk_iMOVds_RR(r_dst_lo, hregARM_R2()));
+               addInstr(env, mk_iMOVds_RR(r_dst_hi, hregARM_R3()));
+            }
+            return;
+         }
+         /*NOTREACHED*/
+         vassert(0); 
+      } else {
+         /* SC */
+         IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.storedata);
+         if (tyd == Ity_I32 || tyd == Ity_I16 || tyd == Ity_I8) {
+            Int  szB = 0;
+            HReg rD  = iselIntExpr_R(env, stmt->Ist.LLSC.storedata);
+            HReg rA  = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+            switch (tyd) {
+               case Ity_I8:  szB = 1; break;
+               case Ity_I16: szB = 2; break;
+               case Ity_I32: szB = 4; break;
+               default:      vassert(0);
+            }
+            addInstr(env, mk_iMOVds_RR(hregARM_R2(), rD));
+            addInstr(env, mk_iMOVds_RR(hregARM_R4(), rA));
+            addInstr(env, ARMInstr_StrEX(szB));
+         } else {
+            vassert(tyd == Ity_I64);
+            /* This is really ugly.  There is no is/is-not NEON
+               decision akin to the case for LL, because iselInt64Expr
+               fudges this for us, and always gets the result into two
+               GPRs even if this means moving it from a NEON
+               register. */
+            HReg rDhi, rDlo;
+            iselInt64Expr(&rDhi, &rDlo, env, stmt->Ist.LLSC.storedata);
+            HReg rA = iselIntExpr_R(env, stmt->Ist.LLSC.addr);
+            addInstr(env, mk_iMOVds_RR(hregARM_R2(), rDlo));
+            addInstr(env, mk_iMOVds_RR(hregARM_R3(), rDhi));
+            addInstr(env, mk_iMOVds_RR(hregARM_R4(), rA));
+            addInstr(env, ARMInstr_StrEX(8));
+         }
+         /* now r0 is 1 if failed, 0 if success.  Change to IR
+            conventions (0 is fail, 1 is success).  Also transfer
+            result to r_res. */
+         IRTemp   res   = stmt->Ist.LLSC.result;
+         IRType   ty    = typeOfIRTemp(env->type_env, res);
+         HReg     r_res = lookupIRTemp(env, res);
+         ARMRI84* one   = ARMRI84_I84(1,0);
+         vassert(ty == Ity_I1);
+         addInstr(env, ARMInstr_Alu(ARMalu_XOR, r_res, hregARM_R0(), one));
+         /* And be conservative -- mask off all but the lowest bit */
+         addInstr(env, ARMInstr_Alu(ARMalu_AND, r_res, r_res, one));
+         return;
+      }
+      break;
+   }
+
+   /* --------- MEM FENCE --------- */
+   case Ist_MBE:
+      switch (stmt->Ist.MBE.event) {
+         case Imbe_Fence:
+            addInstr(env, ARMInstr_MFence());
+            return;
+         case Imbe_CancelReservation:
+            addInstr(env, ARMInstr_CLREX());
+            return;
+         default:
+            break;
+      }
+      break;
+
+   /* --------- INSTR MARK --------- */
+   /* Doesn't generate any executable code ... */
+   case Ist_IMark:
+       return;
+
+   /* --------- NO-OP --------- */
+   case Ist_NoOp:
+       return;
+
+   /* --------- EXIT --------- */
+   case Ist_Exit: {
+      if (stmt->Ist.Exit.dst->tag != Ico_U32)
+         vpanic("isel_arm: Ist_Exit: dst is not a 32-bit value");
+
+      ARMCondCode cc     = iselCondCode(env, stmt->Ist.Exit.guard);
+      ARMAMode1*  amR15T = ARMAMode1_RI(hregARM_R8(),
+                                        stmt->Ist.Exit.offsIP);
+
+      /* Case: boring transfer to known address */
+      if (stmt->Ist.Exit.jk == Ijk_Boring
+          || stmt->Ist.Exit.jk == Ijk_Call
+          || stmt->Ist.Exit.jk == Ijk_Ret) {
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = stmt->Ist.Exit.dst->Ico.U32 > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "Y" : ",");
+            addInstr(env, ARMInstr_XDirect(stmt->Ist.Exit.dst->Ico.U32,
+                                           amR15T, cc, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, ARMInstr_XAssisted(r, amR15T, cc, Ijk_Boring));
+         }
+         return;
+      }
+
+      /* Case: assisted transfer to arbitrary address */
+      switch (stmt->Ist.Exit.jk) {
+         /* Keep this list in sync with that in iselNext below */
+         case Ijk_ClientReq:
+         case Ijk_NoDecode:
+         case Ijk_NoRedir:
+         case Ijk_Sys_syscall:
+         case Ijk_InvalICache:
+         case Ijk_Yield:
+         {
+            HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, ARMInstr_XAssisted(r, amR15T, cc,
+                                             stmt->Ist.Exit.jk));
+            return;
+         }
+         default:
+            break;
+      }
+
+      /* Do we ever expect to see any other kind? */
+      goto stmt_fail;
+   }
+
+   default: break;
+   }
+  stmt_fail:
+   ppIRStmt(stmt);
+   vpanic("iselStmt");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void iselNext ( ISelEnv* env,
+                       IRExpr* next, IRJumpKind jk, Int offsIP )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf( "\n-- PUT(%d) = ", offsIP);
+      ppIRExpr( next );
+      vex_printf( "; exit-");
+      ppIRJumpKind(jk);
+      vex_printf( "\n");
+   }
+
+   /* Case: boring transfer to known address */
+   if (next->tag == Iex_Const) {
+      IRConst* cdst = next->Iex.Const.con;
+      vassert(cdst->tag == Ico_U32);
+      if (jk == Ijk_Boring || jk == Ijk_Call) {
+         /* Boring transfer to known address */
+         ARMAMode1* amR15T = ARMAMode1_RI(hregARM_R8(), offsIP);
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = cdst->Ico.U32 > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "X" : ".");
+            addInstr(env, ARMInstr_XDirect(cdst->Ico.U32,
+                                           amR15T, ARMcc_AL, 
+                                           toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselIntExpr_R(env, next);
+            addInstr(env, ARMInstr_XAssisted(r, amR15T, ARMcc_AL,
+                                             Ijk_Boring));
+         }
+         return;
+      }
+   }
+
+   /* Case: call/return (==boring) transfer to any address */
+   switch (jk) {
+      case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
+         HReg       r      = iselIntExpr_R(env, next);
+         ARMAMode1* amR15T = ARMAMode1_RI(hregARM_R8(), offsIP);
+         if (env->chainingAllowed) {
+            addInstr(env, ARMInstr_XIndir(r, amR15T, ARMcc_AL));
+         } else {
+            addInstr(env, ARMInstr_XAssisted(r, amR15T, ARMcc_AL,
+                                                Ijk_Boring));
+         }
+         return;
+      }
+      default:
+         break;
+   }
+
+   /* Case: assisted transfer to arbitrary address */
+   switch (jk) {
+      /* Keep this list in sync with that for Ist_Exit above */
+      case Ijk_ClientReq:
+      case Ijk_NoDecode:
+      case Ijk_NoRedir:
+      case Ijk_Sys_syscall:
+      case Ijk_InvalICache:
+      case Ijk_Yield:
+      {
+         HReg       r      = iselIntExpr_R(env, next);
+         ARMAMode1* amR15T = ARMAMode1_RI(hregARM_R8(), offsIP);
+         addInstr(env, ARMInstr_XAssisted(r, amR15T, ARMcc_AL, jk));
+         return;
+      }
+      default:
+         break;
+   }
+
+   vex_printf( "\n-- PUT(%d) = ", offsIP);
+   ppIRExpr( next );
+   vex_printf( "; exit-");
+   ppIRJumpKind(jk);
+   vex_printf( "\n");
+   vassert(0); // are we expecting any other kind?
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire SB to arm code. */
+
+HInstrArray* iselSB_ARM ( const IRSB* bb,
+                          VexArch      arch_host,
+                          const VexArchInfo* archinfo_host,
+                          const VexAbiInfo*  vbi/*UNUSED*/,
+                          Int offs_Host_EvC_Counter,
+                          Int offs_Host_EvC_FailAddr,
+                          Bool chainingAllowed,
+                          Bool addProfInc,
+                          Addr max_ga )
+{
+   Int       i, j;
+   HReg      hreg, hregHI;
+   ISelEnv*  env;
+   UInt      hwcaps_host = archinfo_host->hwcaps;
+   ARMAMode1 *amCounter, *amFailAddr;
+
+   /* sanity ... */
+   vassert(arch_host == VexArchARM);
+
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
+   /* guard against unexpected space regressions */
+   vassert(sizeof(ARMInstr) <= 28);
+
+   /* hwcaps should not change from one ISEL call to another. */
+   arm_hwcaps = hwcaps_host; // JRS 2012 Mar 31: FIXME (RM)
+
+   /* Make up an initial environment to use. */
+   env = LibVEX_Alloc_inline(sizeof(ISelEnv));
+   env->vreg_ctr = 0;
+
+   /* Set up output code array. */
+   env->code = newHInstrArray();
+    
+   /* Copy BB's type env. */
+   env->type_env = bb->tyenv;
+
+   /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+      change as we go along. */
+   env->n_vregmap = bb->tyenv->types_used;
+   env->vregmap   = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+
+   /* and finally ... */
+   env->chainingAllowed = chainingAllowed;
+   env->hwcaps          = hwcaps_host;
+   env->max_ga          = max_ga;
+
+   /* For each IR temporary, allocate a suitably-kinded virtual
+      register. */
+   j = 0;
+   for (i = 0; i < env->n_vregmap; i++) {
+      hregHI = hreg = INVALID_HREG;
+      switch (bb->tyenv->types[i]) {
+         case Ity_I1:
+         case Ity_I8:
+         case Ity_I16:
+         case Ity_I32:  hreg   = mkHReg(True, HRcInt32, 0, j++); break;
+         case Ity_I64:
+            if (hwcaps_host & VEX_HWCAPS_ARM_NEON) {
+               hreg = mkHReg(True, HRcFlt64, 0, j++);
+            } else {
+               hregHI = mkHReg(True, HRcInt32, 0, j++);
+               hreg   = mkHReg(True, HRcInt32, 0, j++);
+            }
+            break;
+         case Ity_F32:  hreg   = mkHReg(True, HRcFlt32,  0, j++); break;
+         case Ity_F64:  hreg   = mkHReg(True, HRcFlt64,  0, j++); break;
+         case Ity_V128: hreg   = mkHReg(True, HRcVec128, 0, j++); break;
+         default: ppIRType(bb->tyenv->types[i]);
+                  vpanic("iselBB: IRTemp type");
+      }
+      env->vregmap[i]   = hreg;
+      env->vregmapHI[i] = hregHI;
+   }
+   env->vreg_ctr = j;
+
+   /* The very first instruction must be an event check. */
+   amCounter  = ARMAMode1_RI(hregARM_R8(), offs_Host_EvC_Counter);
+   amFailAddr = ARMAMode1_RI(hregARM_R8(), offs_Host_EvC_FailAddr);
+   addInstr(env, ARMInstr_EvCheck(amCounter, amFailAddr));
+
+   /* Possibly a block counter increment (for profiling).  At this
+      point we don't know the address of the counter, so just pretend
+      it is zero.  It will have to be patched later, but before this
+      translation is used, by a call to LibVEX_patchProfCtr. */
+   if (addProfInc) {
+      addInstr(env, ARMInstr_ProfInc());
+   }
+
+   /* Ok, finally we can iterate over the statements. */
+   for (i = 0; i < bb->stmts_used; i++)
+      iselStmt(env, bb->stmts[i]);
+
+   iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
+
+   /* record the number of vregs we used. */
+   env->code->n_vregs = env->vreg_ctr;
+   return env->code;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_arm_isel.c ---*/
+/*---------------------------------------------------------------*/
+
diff --git a/VEX/priv/host_generic_maddf.c b/VEX/priv/host_generic_maddf.c
new file mode 100644
index 0000000..d4e9fb7
--- /dev/null
+++ b/VEX/priv/host_generic_maddf.c
@@ -0,0 +1,320 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                              host_generic_maddf.c ---*/
+/*---------------------------------------------------------------*/
+
+/* 
+   Compute x * y + z as ternary operation.
+   Copyright (C) 2010-2013 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2010.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.
+*/
+
+/* Generic helper functions for doing FMA, i.e. compute x * y + z
+   as ternary operation.
+   These are purely back-end entities and cannot be seen/referenced
+   from IR. */
+
+#include "libvex_basictypes.h"
+#include "host_generic_maddf.h"
+#include "main_util.h"
+
+/* This implementation relies on Double being more than twice as
+   precise as Float and uses rounding to odd in order to avoid problems
+   with double rounding.
+   See a paper by Boldo and Melquiond:
+   http://www.lri.fr/~melquion/doc/08-tc.pdf  */
+
+#define FORCE_EVAL(X) __asm __volatile__ ("" : : "m" (X))
+
+#if defined(__x86_64__) && defined(__SSE2_MATH__)
+# define ENV_TYPE unsigned int
+/* Save current rounding mode into ENV, hold exceptions, set rounding
+   mode to rounding toward zero.  */
+# define ROUNDTOZERO(env) \
+   do {							\
+      unsigned int mxcsr;				\
+      __asm __volatile__ ("stmxcsr %0" : "=m" (mxcsr));	\
+      (env) = mxcsr;					\
+      mxcsr = (mxcsr | 0x7f80) & ~0x3f;			\
+      __asm __volatile__ ("ldmxcsr %0" : : "m" (mxcsr));\
+   } while (0)
+/* Restore exceptions from ENV, return if inexact exception has been raised
+   since ROUNDTOZERO.  */
+# define RESET_TESTINEXACT(env) \
+   ({							\
+      unsigned int mxcsr, ret;				\
+      __asm __volatile__ ("stmxcsr %0" : "=m" (mxcsr));	\
+      ret = (mxcsr >> 5) & 1;				\
+      mxcsr = (mxcsr & 0x3d) | (env);			\
+      __asm __volatile__ ("ldmxcsr %0" : : "m" (mxcsr));\
+      ret;						\
+   })
+/* Return if inexact exception has been raised since ROUNDTOZERO.  */
+# define TESTINEXACT() \
+   ({							\
+      unsigned int mxcsr;				\
+      __asm __volatile__ ("stmxcsr %0" : "=m" (mxcsr));	\
+      (mxcsr >> 5) & 1;					\
+   })
+#endif
+
+#define DBL_MANT_DIG 53
+#define IEEE754_DOUBLE_BIAS 0x3ff
+
+union vg_ieee754_double {
+   Double d;
+
+   /* This is the IEEE 754 double-precision format.  */
+   struct {
+#ifdef VKI_BIG_ENDIAN
+      unsigned int negative:1;
+      unsigned int exponent:11;
+      unsigned int mantissa0:20;
+      unsigned int mantissa1:32;
+#else
+      unsigned int mantissa1:32;
+      unsigned int mantissa0:20;
+      unsigned int exponent:11;
+      unsigned int negative:1;
+#endif
+   } ieee;
+};
+
+void VEX_REGPARM(3)
+     h_generic_calc_MAddF32 ( /*OUT*/Float* res,
+                               Float* argX, Float* argY, Float* argZ )
+{
+#ifndef ENV_TYPE
+   /* Lame fallback implementation.  */
+   *res = *argX * *argY + *argZ;
+#else
+   ENV_TYPE env;
+   /* Multiplication is always exact.  */
+   Double temp = (Double) *argX * (Double) *argY;
+   union vg_ieee754_double u;
+
+   ROUNDTOZERO (env);
+
+   /* Perform addition with round to odd.  */
+   u.d = temp + (Double) *argZ;
+   /* Ensure the addition is not scheduled after fetestexcept call.  */
+   FORCE_EVAL (u.d);
+
+   /* Reset rounding mode and test for inexact simultaneously.  */
+   int j = RESET_TESTINEXACT (env);
+
+   if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff)
+      u.ieee.mantissa1 |= j;
+
+   /* And finally truncation with round to nearest.  */
+   *res = (Float) u.d;
+#endif
+}
+
+
+void VEX_REGPARM(3)
+     h_generic_calc_MAddF64 ( /*OUT*/Double* res,
+                               Double* argX, Double* argY, Double* argZ )
+{
+#ifndef ENV_TYPE
+   /* Lame fallback implementation.  */
+   *res = *argX * *argY + *argZ;
+#else
+   Double x = *argX, y = *argY, z = *argZ;
+   union vg_ieee754_double u, v, w;
+   int adjust = 0;
+   u.d = x;
+   v.d = y;
+   w.d = z;
+   if (UNLIKELY (u.ieee.exponent + v.ieee.exponent
+                 >= 0x7ff + IEEE754_DOUBLE_BIAS - DBL_MANT_DIG)
+       || UNLIKELY (u.ieee.exponent >= 0x7ff - DBL_MANT_DIG)
+       || UNLIKELY (v.ieee.exponent >= 0x7ff - DBL_MANT_DIG)
+       || UNLIKELY (w.ieee.exponent >= 0x7ff - DBL_MANT_DIG)
+       || UNLIKELY (u.ieee.exponent + v.ieee.exponent
+                    <= IEEE754_DOUBLE_BIAS + DBL_MANT_DIG)) {
+      /* If z is Inf, but x and y are finite, the result should be
+         z rather than NaN.  */
+      if (w.ieee.exponent == 0x7ff
+          && u.ieee.exponent != 0x7ff
+          && v.ieee.exponent != 0x7ff) {
+         *res = (z + x) + y;
+         return;
+      }
+      /* If x or y or z is Inf/NaN, or if fma will certainly overflow,
+         or if x * y is less than half of DBL_DENORM_MIN,
+         compute as x * y + z.  */
+      if (u.ieee.exponent == 0x7ff
+          || v.ieee.exponent == 0x7ff
+          || w.ieee.exponent == 0x7ff
+          || u.ieee.exponent + v.ieee.exponent > 0x7ff + IEEE754_DOUBLE_BIAS
+          || u.ieee.exponent + v.ieee.exponent
+             < IEEE754_DOUBLE_BIAS - DBL_MANT_DIG - 2) {
+         *res = x * y + z;
+         return;
+      }
+      if (u.ieee.exponent + v.ieee.exponent
+          >= 0x7ff + IEEE754_DOUBLE_BIAS - DBL_MANT_DIG) {
+         /* Compute 1p-53 times smaller result and multiply
+            at the end.  */
+         if (u.ieee.exponent > v.ieee.exponent)
+            u.ieee.exponent -= DBL_MANT_DIG;
+         else
+            v.ieee.exponent -= DBL_MANT_DIG;
+         /* If x + y exponent is very large and z exponent is very small,
+            it doesn't matter if we don't adjust it.  */
+         if (w.ieee.exponent > DBL_MANT_DIG)
+            w.ieee.exponent -= DBL_MANT_DIG;
+         adjust = 1;
+      } else if (w.ieee.exponent >= 0x7ff - DBL_MANT_DIG) {
+         /* Similarly.
+            If z exponent is very large and x and y exponents are
+            very small, it doesn't matter if we don't adjust it.  */
+         if (u.ieee.exponent > v.ieee.exponent) {
+            if (u.ieee.exponent > DBL_MANT_DIG)
+               u.ieee.exponent -= DBL_MANT_DIG;
+         } else if (v.ieee.exponent > DBL_MANT_DIG)
+            v.ieee.exponent -= DBL_MANT_DIG;
+         w.ieee.exponent -= DBL_MANT_DIG;
+         adjust = 1;
+      } else if (u.ieee.exponent >= 0x7ff - DBL_MANT_DIG) {
+         u.ieee.exponent -= DBL_MANT_DIG;
+         if (v.ieee.exponent)
+            v.ieee.exponent += DBL_MANT_DIG;
+         else
+            v.d *= 0x1p53;
+      } else if (v.ieee.exponent >= 0x7ff - DBL_MANT_DIG) {
+         v.ieee.exponent -= DBL_MANT_DIG;
+         if (u.ieee.exponent)
+            u.ieee.exponent += DBL_MANT_DIG;
+         else
+            u.d *= 0x1p53;
+      } else /* if (u.ieee.exponent + v.ieee.exponent
+                    <= IEEE754_DOUBLE_BIAS + DBL_MANT_DIG) */ {
+         if (u.ieee.exponent > v.ieee.exponent)
+            u.ieee.exponent += 2 * DBL_MANT_DIG;
+         else
+            v.ieee.exponent += 2 * DBL_MANT_DIG;
+         if (w.ieee.exponent <= 4 * DBL_MANT_DIG + 4) {
+            if (w.ieee.exponent)
+               w.ieee.exponent += 2 * DBL_MANT_DIG;
+            else
+               w.d *= 0x1p106;
+            adjust = -1;
+         }
+         /* Otherwise x * y should just affect inexact
+            and nothing else.  */
+      }
+      x = u.d;
+      y = v.d;
+      z = w.d;
+   }
+   /* Multiplication m1 + m2 = x * y using Dekker's algorithm.  */
+#  define C ((1 << (DBL_MANT_DIG + 1) / 2) + 1)
+   Double x1 = x * C;
+   Double y1 = y * C;
+   Double m1 = x * y;
+   x1 = (x - x1) + x1;
+   y1 = (y - y1) + y1;
+   Double x2 = x - x1;
+   Double y2 = y - y1;
+   Double m2 = (((x1 * y1 - m1) + x1 * y2) + x2 * y1) + x2 * y2;
+#  undef C
+
+   /* Addition a1 + a2 = z + m1 using Knuth's algorithm.  */
+   Double a1 = z + m1;
+   Double t1 = a1 - z;
+   Double t2 = a1 - t1;
+   t1 = m1 - t1;
+   t2 = z - t2;
+   Double a2 = t1 + t2;
+
+   ENV_TYPE env;
+   ROUNDTOZERO (env);
+
+   /* Perform m2 + a2 addition with round to odd.  */
+   u.d = a2 + m2;
+
+   if (UNLIKELY (adjust < 0)) {
+      if ((u.ieee.mantissa1 & 1) == 0)
+         u.ieee.mantissa1 |= TESTINEXACT ();
+      v.d = a1 + u.d;
+      /* Ensure the addition is not scheduled after fetestexcept call.  */
+      FORCE_EVAL (v.d);
+   }
+
+   /* Reset rounding mode and test for inexact simultaneously.  */
+   int j = RESET_TESTINEXACT (env) != 0;
+
+   if (LIKELY (adjust == 0)) {
+      if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff)
+         u.ieee.mantissa1 |= j;
+      /* Result is a1 + u.d.  */
+      *res = a1 + u.d;
+   } else if (LIKELY (adjust > 0)) {
+      if ((u.ieee.mantissa1 & 1) == 0 && u.ieee.exponent != 0x7ff)
+         u.ieee.mantissa1 |= j;
+      /* Result is a1 + u.d, scaled up.  */
+      *res = (a1 + u.d) * 0x1p53;
+   } else {
+      /* If a1 + u.d is exact, the only rounding happens during
+         scaling down.  */
+      if (j == 0) {
+         *res = v.d * 0x1p-106;
+         return;
+      }
+      /* If result rounded to zero is not subnormal, no double
+         rounding will occur.  */
+      if (v.ieee.exponent > 106) {
+         *res = (a1 + u.d) * 0x1p-106;
+         return;
+      }
+      /* If v.d * 0x1p-106 with round to zero is a subnormal above
+         or equal to DBL_MIN / 2, then v.d * 0x1p-106 shifts mantissa
+         down just by 1 bit, which means v.ieee.mantissa1 |= j would
+         change the round bit, not sticky or guard bit.
+         v.d * 0x1p-106 never normalizes by shifting up,
+         so round bit plus sticky bit should be already enough
+         for proper rounding.  */
+      if (v.ieee.exponent == 106) {
+         /* v.ieee.mantissa1 & 2 is LSB bit of the result before rounding,
+            v.ieee.mantissa1 & 1 is the round bit and j is our sticky
+            bit.  In round-to-nearest 001 rounds down like 00,
+            011 rounds up, even though 01 rounds down (thus we need
+            to adjust), 101 rounds down like 10 and 111 rounds up
+            like 11.  */
+         if ((v.ieee.mantissa1 & 3) == 1) {
+            v.d *= 0x1p-106;
+            if (v.ieee.negative)
+               *res = v.d - 0x1p-1074;
+            else
+               *res = v.d + 0x1p-1074;
+         } else
+            *res = v.d * 0x1p-106;
+         return;
+      }
+      v.ieee.mantissa1 |= j;
+      *res = v.d * 0x1p-106;
+      return;
+    }
+#endif
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                 host_generic_maddf.c --*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_maddf.h b/VEX/priv/host_generic_maddf.h
new file mode 100644
index 0000000..6757f74
--- /dev/null
+++ b/VEX/priv/host_generic_maddf.h
@@ -0,0 +1,48 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                              host_generic_maddf.h ---*/
+/*---------------------------------------------------------------*/
+
+/* 
+   Compute x * y + z as ternary operation.
+   Copyright (C) 2010-2013 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Jakub Jelinek <jakub@redhat.com>, 2010.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.
+*/
+
+/* Generic helper functions for doing FMA, i.e. compute x * y + z
+   as ternary operation.
+   These are purely back-end entities and cannot be seen/referenced
+   from IR. */
+
+#ifndef __VEX_HOST_GENERIC_MADDF_H
+#define __VEX_HOST_GENERIC_MADDF_H
+
+#include "libvex_basictypes.h"
+
+extern VEX_REGPARM(3)
+       void h_generic_calc_MAddF32 ( /*OUT*/Float*, Float*, Float*, Float* );
+
+extern VEX_REGPARM(3)
+       void h_generic_calc_MAddF64 ( /*OUT*/Double*, Double*, Double*,
+                                     Double* );
+
+#endif /* ndef __VEX_HOST_GENERIC_MADDF_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                 host_generic_maddf.h --*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_reg_alloc2.c b/VEX/priv/host_generic_reg_alloc2.c
new file mode 100644
index 0000000..b303606
--- /dev/null
+++ b/VEX/priv/host_generic_reg_alloc2.c
@@ -0,0 +1,1614 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                 host_reg_alloc2.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+
+/* Set to 1 for lots of debugging output. */
+#define DEBUG_REGALLOC 0
+
+
+/* TODO 27 Oct 04:
+
+   Better consistency checking from what isMove tells us.
+
+   We can possibly do V-V coalescing even when the src is spilled,
+   providing we can arrange for the dst to have the same spill slot.
+
+   Note that state[].hreg is the same as the available real regs.
+
+   Generally rationalise data structures.  */
+
+
+/* Records information on virtual register live ranges.  Computed once
+   and remains unchanged after that. */
+typedef
+   struct {
+      /* Becomes live for the first time after this insn ... */
+      Short live_after;
+      /* Becomes dead for the last time before this insn ... */
+      Short dead_before;
+      /* The "home" spill slot, if needed.  Never changes. */
+      Short spill_offset;
+      Short spill_size;
+      /* What kind of register this is. */
+      HRegClass reg_class;
+   }
+   VRegLR;
+
+
+/* Records information on real-register live ranges.  Computed once
+   and remains unchanged after that. */
+typedef
+   struct {
+      HReg rreg;
+      /* Becomes live after this insn ... */
+      Short live_after;
+      /* Becomes dead before this insn ... */
+      Short dead_before;
+   }
+   RRegLR;
+
+
+/* An array of the following structs (rreg_state) comprises the
+   running state of the allocator.  It indicates what the current
+   disposition of each allocatable real register is.  The array gets
+   updated as the allocator processes instructions.  The identity of
+   the register is not recorded here, because the index of this
+   structure in doRegisterAllocation()'s |rreg_state| is the index
+   number of the register, and the register itself can be extracted
+   from the RRegUniverse supplied to doRegisterAllocation(). */
+typedef
+   struct {
+      /* ------ FIELDS WHICH DO NOT CHANGE ------ */
+      /* Is this involved in any HLRs?  (only an optimisation hint) */
+      Bool has_hlrs;
+      /* ------ FIELDS WHICH DO CHANGE ------ */
+      /* 6 May 07: rearranged fields below so the whole struct fits
+         into 16 bytes on both x86 and amd64. */
+      /* Used when .disp == Bound and we are looking for vregs to
+         spill. */
+      Bool is_spill_cand;
+      /* Optimisation: used when .disp == Bound.  Indicates when the
+         rreg has the same value as the spill slot for the associated
+         vreg.  Is safely left at False, and becomes True after a
+         spill store or reload for this rreg. */
+      Bool eq_spill_slot;
+      /* What's it's current disposition? */
+      enum { Free,     /* available for use */
+             Unavail,  /* in a real-reg live range */
+             Bound     /* in use (holding value of some vreg) */
+           }
+           disp;
+      /* If .disp == Bound, what vreg is it bound to? */
+      HReg vreg;
+   }
+   RRegState;
+
+
+/* The allocator also maintains a redundant array of indexes
+   (vreg_state) from vreg numbers back to entries in rreg_state.  It
+   is redundant because iff vreg_state[i] == j then
+   hregNumber(rreg_state[j].vreg) == i -- that is, the two entries
+   point at each other.  The purpose of this is to speed up activities
+   which involve looking for a particular vreg: there is no need to
+   scan the rreg_state looking for it, just index directly into
+   vreg_state.  The FAQ "does this vreg already have an associated
+   rreg" is the main beneficiary.  
+
+   To indicate, in vreg_state[i], that a given vreg is not currently
+   associated with any rreg, that entry can be set to INVALID_RREG_NO.
+
+   Because the vreg_state entries are signed Shorts, the max number
+   of vregs that can be handed by regalloc is 32767.
+*/
+
+#define INVALID_RREG_NO ((Short)(-1))
+
+#define IS_VALID_VREGNO(_zz) ((_zz) >= 0 && (_zz) < n_vregs)
+#define IS_VALID_RREGNO(_zz) ((_zz) >= 0 && (_zz) < n_rregs)
+
+
+/* Search forward from some given point in the incoming instruction
+   sequence.  Point is to select a virtual register to spill, by
+   finding the vreg which is mentioned as far ahead as possible, in
+   the hope that this will minimise the number of consequent reloads.
+
+   Only do the search for vregs which are Bound in the running state,
+   and for which the .is_spill_cand field is set.  This allows the
+   caller to arbitrarily restrict the set of spill candidates to be
+   considered.
+
+   To do this we don't actually need to see the incoming instruction
+   stream.  Rather, what we need us the HRegUsage records for the
+   incoming instruction stream.  Hence that is passed in.
+
+   Returns an index into the state array indicating the (v,r) pair to
+   spill, or -1 if none was found.  */
+static
+Int findMostDistantlyMentionedVReg ( 
+   HRegUsage*   reg_usages_in,
+   Int          search_from_instr,
+   Int          num_instrs,
+   RRegState*   state,
+   Int          n_state
+)
+{
+   Int k, m;
+   Int furthest_k = -1;
+   Int furthest   = -1;
+   vassert(search_from_instr >= 0);
+   for (k = 0; k < n_state; k++) {
+      if (!state[k].is_spill_cand)
+         continue;
+      vassert(state[k].disp == Bound);
+      for (m = search_from_instr; m < num_instrs; m++) {
+         if (HRegUsage__contains(&reg_usages_in[m], state[k].vreg))
+            break;
+      }
+      if (m > furthest) {
+         furthest   = m;
+         furthest_k = k;
+      }
+   }
+   return furthest_k;
+}
+
+
+/* Check that this vreg has been assigned a sane spill offset. */
+inline
+static void sanity_check_spill_offset ( VRegLR* vreg )
+{
+   switch (vreg->reg_class) {
+      case HRcVec128: case HRcFlt64:
+         vassert(0 == ((UShort)vreg->spill_offset % 16)); break;
+      default:
+         vassert(0 == ((UShort)vreg->spill_offset % 8)); break;
+   }
+}
+
+
+/* Double the size of the real-reg live-range array, if needed. */
+__attribute__((noinline)) 
+static void ensureRRLRspace_SLOW ( RRegLR** info, Int* size, Int used )
+{
+   Int     k;
+   RRegLR* arr2;
+   if (0)
+      vex_printf("ensureRRISpace: %d -> %d\n", *size, 2 * *size);
+   vassert(used == *size);
+   arr2 = LibVEX_Alloc_inline(2 * *size * sizeof(RRegLR));
+   for (k = 0; k < *size; k++)
+      arr2[k] = (*info)[k];
+   *size *= 2;
+   *info = arr2;
+}
+inline
+static void ensureRRLRspace ( RRegLR** info, Int* size, Int used )
+{
+   if (LIKELY(used < *size)) return;
+   ensureRRLRspace_SLOW(info, size, used);
+}
+
+
+/* Sort an array of RRegLR entries by either the .live_after or
+   .dead_before fields.  This is performance-critical. */
+static void sortRRLRarray ( RRegLR* arr, 
+                            Int size, Bool by_live_after )
+{
+   Int    incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
+                       9841, 29524, 88573, 265720,
+                       797161, 2391484 };
+   Int    lo = 0;
+   Int    hi = size-1;
+   Int    i, j, h, bigN, hp;
+   RRegLR v;
+
+   vassert(size >= 0);
+   if (size == 0)
+      return;
+
+   bigN = hi - lo + 1; if (bigN < 2) return;
+   hp = 0; while (hp < 14 && incs[hp] < bigN) hp++; hp--;
+
+   if (by_live_after) {
+
+      for ( ; hp >= 0; hp--) {
+         h = incs[hp];
+         for (i = lo + h; i <= hi; i++) {
+            v = arr[i];
+            j = i;
+            while (arr[j-h].live_after > v.live_after) {
+               arr[j] = arr[j-h];
+               j = j - h;
+               if (j <= (lo + h - 1)) break;
+            }
+            arr[j] = v;
+         }
+      }
+
+   } else {
+
+      for ( ; hp >= 0; hp--) {
+         h = incs[hp];
+         for (i = lo + h; i <= hi; i++) {
+            v = arr[i];
+            j = i;
+            while (arr[j-h].dead_before > v.dead_before) {
+               arr[j] = arr[j-h];
+               j = j - h;
+               if (j <= (lo + h - 1)) break;
+            }
+            arr[j] = v;
+         }
+      }
+
+   }
+}
+
+
+/* Compute the index of the highest and lowest 1 in a ULong,
+   respectively.  Results are undefined if the argument is zero.
+   Don't pass it zero :) */
+static inline UInt ULong__maxIndex ( ULong w64 ) {
+   return 63 - __builtin_clzll(w64);
+}
+
+static inline UInt ULong__minIndex ( ULong w64 ) {
+   return __builtin_ctzll(w64);
+}
+
+
+/* Vectorised memset, copied from Valgrind's m_libcbase.c. */
+static void* local_memset ( void *destV, Int c, SizeT sz )
+{
+#  define IS_4_ALIGNED(aaa_p) (0 == (((HWord)(aaa_p)) & ((HWord)0x3)))
+
+   UInt   c4;
+   UChar* d = destV;
+   UChar  uc = c;
+
+   while ((!IS_4_ALIGNED(d)) && sz >= 1) {
+      d[0] = uc;
+      d++;
+      sz--;
+   }
+   if (sz == 0)
+      return destV;
+   c4 = uc;
+   c4 |= (c4 << 8);
+   c4 |= (c4 << 16);
+   while (sz >= 16) {
+      ((UInt*)d)[0] = c4;
+      ((UInt*)d)[1] = c4;
+      ((UInt*)d)[2] = c4;
+      ((UInt*)d)[3] = c4;
+      d += 16;
+      sz -= 16;
+   }
+   while (sz >= 4) {
+      ((UInt*)d)[0] = c4;
+      d += 4;
+      sz -= 4;
+   }
+   while (sz >= 1) {
+      d[0] = c;
+      d++;
+      sz--;
+   }
+   return destV;
+
+#  undef IS_4_ALIGNED
+}
+
+
+/* A target-independent register allocator.  Requires various
+   functions which it uses to deal abstractly with instructions and
+   registers, since it cannot have any target-specific knowledge.
+
+   Returns a new list of instructions, which, as a result of the
+   behaviour of mapRegs, will be in-place modifications of the
+   original instructions.
+
+   Requires that the incoming code has been generated using
+   vreg numbers 0, 1 .. n_vregs-1.  Appearance of a vreg outside
+   that range is a checked run-time error.
+
+   Takes an expandable array of pointers to unallocated insns.
+   Returns an expandable array of pointers to allocated insns.
+*/
+HInstrArray* doRegisterAllocation (
+
+   /* Incoming virtual-registerised code. */ 
+   HInstrArray* instrs_in,
+
+   /* The real-register universe to use.  This contains facts about
+      real registers, one of which is the set of registers available
+      for allocation. */
+   const RRegUniverse* univ,
+
+   /* Return True iff the given insn is a reg-reg move, in which
+      case also return the src and dst regs. */
+   Bool (*isMove) ( const HInstr*, HReg*, HReg* ),
+
+   /* Get info about register usage in this insn. */
+   void (*getRegUsage) ( HRegUsage*, const HInstr*, Bool ),
+
+   /* Apply a reg-reg mapping to an insn. */
+   void (*mapRegs) ( HRegRemap*, HInstr*, Bool ),
+
+   /* Return one, or, if we're unlucky, two insn(s) to spill/restore a
+      real reg to a spill slot byte offset.  The two leading HInstr**
+      args are out parameters, through which the generated insns are
+      returned.  Also (optionally) a 'directReload' function, which
+      attempts to replace a given instruction by one which reads
+      directly from a specified spill slot.  May be NULL, in which
+      case the optimisation is not attempted. */
+   void    (*genSpill)  ( HInstr**, HInstr**, HReg, Int, Bool ),
+   void    (*genReload) ( HInstr**, HInstr**, HReg, Int, Bool ),
+   HInstr* (*directReload) ( HInstr*, HReg, Short ),
+   Int     guest_sizeB,
+
+   /* For debug printing only. */
+   void (*ppInstr) ( const HInstr*, Bool ),
+   void (*ppReg) ( HReg ),
+
+   /* 32/64bit mode */
+   Bool mode64
+)
+{
+#  define N_SPILL64S  (LibVEX_N_SPILL_BYTES / 8)
+
+   const Bool eq_spill_opt = True;
+
+   /* Info on vregs and rregs.  Computed once and remains
+      unchanged. */
+   Int     n_vregs;
+   VRegLR* vreg_lrs; /* [0 .. n_vregs-1] */
+
+   /* We keep two copies of the real-reg live range info, one sorted
+      by .live_after and the other by .dead_before.  First the
+      unsorted info is created in the _la variant is copied into the
+      _db variant.  Once that's done both of them are sorted. 
+      We also need two integer cursors which record the next
+      location in the two arrays to consider. */
+   RRegLR* rreg_lrs_la;
+   RRegLR* rreg_lrs_db;
+   Int     rreg_lrs_size;
+   Int     rreg_lrs_used;
+   Int     rreg_lrs_la_next;
+   Int     rreg_lrs_db_next;
+
+   /* Info on register usage in the incoming instruction array.
+      Computed once and remains unchanged, more or less; updated
+      sometimes by the direct-reload optimisation. */
+   HRegUsage* reg_usage_arr; /* [0 .. instrs_in->arr_used-1] */
+
+   /* Used when constructing vreg_lrs (for allocating stack
+      slots). */
+   Short ss_busy_until_before[N_SPILL64S];
+
+   /* Used when constructing rreg_lrs. */
+   Int* rreg_live_after;
+   Int* rreg_dead_before;
+
+   /* Running state of the core allocation algorithm. */
+   RRegState* rreg_state;  /* [0 .. n_rregs-1] */
+   Int        n_rregs;
+
+   /* .. and the redundant backward map */
+   /* Each value is 0 .. n_rregs-1 or is INVALID_RREG_NO.
+      This inplies n_rregs must be <= 32768. */
+   Short*     vreg_state;  /* [0 .. n_vregs-1] */
+
+   /* The vreg -> rreg map constructed and then applied to each
+      instr. */
+   HRegRemap remap;
+
+   /* The output array of instructions. */
+   HInstrArray* instrs_out;
+
+   /* Sanity checks are expensive.  They are only done periodically,
+      not at each insn processed. */
+   Bool do_sanity_check;
+
+   vassert(0 == (guest_sizeB % LibVEX_GUEST_STATE_ALIGN));
+   vassert(0 == (LibVEX_N_SPILL_BYTES % LibVEX_GUEST_STATE_ALIGN));
+   vassert(0 == (N_SPILL64S % 2));
+
+   /* The live range numbers are signed shorts, and so limiting the
+      number of insns to 15000 comfortably guards against them
+      overflowing 32k. */
+   vassert(instrs_in->arr_used <= 15000);
+
+#  define INVALID_INSTRNO (-2)
+
+#  define EMIT_INSTR(_instr)                  \
+      do {                                    \
+        HInstr* _tmp = (_instr);              \
+        if (DEBUG_REGALLOC) {                 \
+           vex_printf("**  ");                \
+           (*ppInstr)(_tmp, mode64);          \
+           vex_printf("\n\n");                \
+        }                                     \
+        addHInstr ( instrs_out, _tmp );       \
+      } while (0)
+
+#   define PRINT_STATE						   \
+      do {							   \
+         Int z, q;						   \
+         for (z = 0; z < n_rregs; z++) {			   \
+            vex_printf("  rreg_state[%2d] = ", z);		   \
+            (*ppReg)(univ->regs[z]);	       			   \
+            vex_printf("  \t");					   \
+            switch (rreg_state[z].disp) {			   \
+               case Free:    vex_printf("Free\n"); break;	   \
+               case Unavail: vex_printf("Unavail\n"); break;	   \
+               case Bound:   vex_printf("BoundTo "); 		   \
+                             (*ppReg)(rreg_state[z].vreg);	   \
+                             vex_printf("\n"); break;		   \
+            }							   \
+         }							   \
+         vex_printf("\n  vreg_state[0 .. %d]:\n    ", n_vregs-1);  \
+         q = 0;                                                    \
+         for (z = 0; z < n_vregs; z++) {                           \
+            if (vreg_state[z] == INVALID_RREG_NO)                  \
+               continue;                                           \
+            vex_printf("[%d] -> %d   ", z, vreg_state[z]);         \
+            q++;                                                   \
+            if (q > 0 && (q % 6) == 0)                             \
+               vex_printf("\n    ");                               \
+         }                                                         \
+         vex_printf("\n");                                         \
+      } while (0)
+
+
+   /* --------- Stage 0: set up output array --------- */
+   /* --------- and allocate/initialise running state. --------- */
+
+   instrs_out = newHInstrArray();
+
+   /* ... and initialise running state. */
+   /* n_rregs is no more than a short name for n_available_real_regs. */
+   n_rregs = univ->allocable;
+   n_vregs = instrs_in->n_vregs;
+
+   /* If this is not so, vreg_state entries will overflow. */
+   vassert(n_vregs < 32767);
+
+   /* If this is not so, the universe we have is nonsensical. */
+   vassert(n_rregs > 0);
+
+   rreg_state = LibVEX_Alloc_inline(n_rregs * sizeof(RRegState));
+   vreg_state = LibVEX_Alloc_inline(n_vregs * sizeof(Short));
+
+   for (Int j = 0; j < n_rregs; j++) {
+      rreg_state[j].has_hlrs      = False;
+      rreg_state[j].disp          = Free;
+      rreg_state[j].vreg          = INVALID_HREG;
+      rreg_state[j].is_spill_cand = False;
+      rreg_state[j].eq_spill_slot = False;
+   }
+
+   for (Int j = 0; j < n_vregs; j++)
+      vreg_state[j] = INVALID_RREG_NO;
+
+
+   /* --------- Stage 1: compute vreg live ranges. --------- */
+   /* --------- Stage 2: compute rreg live ranges. --------- */
+
+   /* ------ start of SET UP TO COMPUTE VREG LIVE RANGES ------ */
+
+   /* This is relatively simple, because (1) we only seek the complete
+      end-to-end live range of each vreg, and are not interested in
+      any holes in it, and (2) the vregs are conveniently numbered 0
+      .. n_vregs-1, so we can just dump the results in a
+      pre-allocated array. */
+
+   vreg_lrs = NULL;
+   if (n_vregs > 0)
+      vreg_lrs = LibVEX_Alloc_inline(sizeof(VRegLR) * n_vregs);
+
+   for (Int j = 0; j < n_vregs; j++) {
+      vreg_lrs[j].live_after     = INVALID_INSTRNO;
+      vreg_lrs[j].dead_before    = INVALID_INSTRNO;
+      vreg_lrs[j].spill_offset   = 0;
+      vreg_lrs[j].spill_size     = 0;
+      vreg_lrs[j].reg_class      = HRcINVALID;
+   }
+
+   /* An array to hold the reg-usage info for the incoming
+      instructions. */
+   reg_usage_arr
+      = LibVEX_Alloc_inline(sizeof(HRegUsage) * instrs_in->arr_used-1);
+
+   /* ------ end of SET UP TO COMPUTE VREG LIVE RANGES ------ */
+
+   /* ------ start of SET UP TO COMPUTE RREG LIVE RANGES ------ */
+
+   /* This is more complex than Stage 1, because we need to compute
+      exactly all the live ranges of all the allocatable real regs,
+      and we don't know in advance how many there will be. */
+
+   rreg_lrs_used = 0;
+   rreg_lrs_size = 4;
+   rreg_lrs_la = LibVEX_Alloc_inline(rreg_lrs_size * sizeof(RRegLR));
+   rreg_lrs_db = NULL; /* we'll create this later */
+
+   /* We'll need to track live range start/end points seperately for
+      each rreg.  Sigh. */
+   vassert(n_rregs > 0);
+   rreg_live_after  = LibVEX_Alloc_inline(n_rregs * sizeof(Int));
+   rreg_dead_before = LibVEX_Alloc_inline(n_rregs * sizeof(Int));
+
+   for (Int j = 0; j < n_rregs; j++) {
+      rreg_live_after[j] = 
+      rreg_dead_before[j] = INVALID_INSTRNO;
+   }
+
+   /* ------ end of SET UP TO COMPUTE RREG LIVE RANGES ------ */
+
+   /* ------ start of ITERATE OVER INSNS ------ */
+
+   for (Int ii = 0; ii < instrs_in->arr_used; ii++) {
+
+      (*getRegUsage)( &reg_usage_arr[ii], instrs_in->arr[ii], mode64 );
+
+      if (0) {
+         vex_printf("\n%d  stage1: ", ii);
+         (*ppInstr)(instrs_in->arr[ii], mode64);
+         vex_printf("\n");
+         ppHRegUsage(univ, &reg_usage_arr[ii]);
+      }
+
+      /* ------ start of DEAL WITH VREG LIVE RANGES ------ */
+
+      /* for each virtual reg mentioned in the insn ... */
+      for (Int j = 0; j < reg_usage_arr[ii].n_vRegs; j++) {
+
+         HReg vreg = reg_usage_arr[ii].vRegs[j];
+         vassert(hregIsVirtual(vreg));
+
+         Int k = hregIndex(vreg);
+         if (k < 0 || k >= n_vregs) {
+            vex_printf("\n");
+            (*ppInstr)(instrs_in->arr[ii], mode64);
+            vex_printf("\n");
+            vex_printf("vreg %d, n_vregs %d\n", k, n_vregs);
+            vpanic("doRegisterAllocation: out-of-range vreg");
+         }
+
+         /* Take the opportunity to note its regclass.  We'll need
+            that when allocating spill slots. */
+         if (vreg_lrs[k].reg_class == HRcINVALID) {
+            /* First mention of this vreg. */
+            vreg_lrs[k].reg_class = hregClass(vreg);
+         } else {
+            /* Seen it before, so check for consistency. */
+            vassert(vreg_lrs[k].reg_class == hregClass(vreg));
+         }
+
+         /* Now consider live ranges. */
+         switch (reg_usage_arr[ii].vMode[j]) {
+            case HRmRead: 
+               if (vreg_lrs[k].live_after == INVALID_INSTRNO) {
+                  vex_printf("\n\nOFFENDING VREG = %d\n", k);
+                  vpanic("doRegisterAllocation: "
+                         "first event for vreg is Read");
+               }
+               vreg_lrs[k].dead_before = toShort(ii + 1);
+               break;
+            case HRmWrite:
+               if (vreg_lrs[k].live_after == INVALID_INSTRNO)
+                  vreg_lrs[k].live_after = toShort(ii);
+               vreg_lrs[k].dead_before = toShort(ii + 1);
+               break;
+            case HRmModify:
+               if (vreg_lrs[k].live_after == INVALID_INSTRNO) {
+                  vex_printf("\n\nOFFENDING VREG = %d\n", k);
+                  vpanic("doRegisterAllocation: "
+                         "first event for vreg is Modify");
+               }
+               vreg_lrs[k].dead_before = toShort(ii + 1);
+               break;
+            default:
+               vpanic("doRegisterAllocation(1)");
+         } /* switch */
+
+      } /* iterate over virtual registers */
+
+      /* ------ end of DEAL WITH VREG LIVE RANGES ------ */
+
+      /* ------ start of DEAL WITH RREG LIVE RANGES ------ */
+
+      /* If this doesn't hold, the following iteration over real registers
+         will fail miserably. */
+      vassert(N_RREGUNIVERSE_REGS == 64);
+
+      const ULong rRead      = reg_usage_arr[ii].rRead;
+      const ULong rWritten   = reg_usage_arr[ii].rWritten;
+      const ULong rMentioned = rRead | rWritten;
+
+      UInt rReg_minIndex;
+      UInt rReg_maxIndex;
+      if (rMentioned == 0) {
+         /* There are no real register uses in this insn.  Set
+            rReg_{min,max}Index so that the following loop doesn't iterate
+            at all, so as to avoid wasting time. */
+         rReg_minIndex = 1;
+         rReg_maxIndex = 0;
+      } else {
+         rReg_minIndex = ULong__minIndex(rMentioned);
+         rReg_maxIndex = ULong__maxIndex(rMentioned);
+         /* Don't bother to look at registers which are not available
+            to the allocator.  We asserted above that n_rregs > 0, so
+            n_rregs-1 is safe. */
+         if (rReg_maxIndex >= n_rregs)
+            rReg_maxIndex = n_rregs-1;
+      }
+
+      /* for each allocator-available real reg mentioned in the insn ... */
+      /* Note.  We are allocating only over the real regs available to
+         the allocator.  Others, eg the stack or baseblock pointers,
+         are unavailable to allocation and so we never visit them.
+         Hence the iteration is cut off at n_rregs-1, since n_rregs ==
+         univ->allocable. */
+      for (Int j = rReg_minIndex; j <= rReg_maxIndex; j++) {
+
+         const ULong jMask = 1ULL << j;
+         if (LIKELY((rMentioned & jMask) == 0))
+            continue;
+
+         const Bool isR = (rRead    & jMask) != 0;
+         const Bool isW = (rWritten & jMask) != 0;
+
+         /* Dummy initialisations of flush_la and flush_db to avoid
+            possible bogus uninit-var warnings from gcc. */
+         Int  flush_la = INVALID_INSTRNO, flush_db = INVALID_INSTRNO;
+         Bool flush = False;
+
+         if (isW && !isR) {
+            flush_la = rreg_live_after[j];
+            flush_db = rreg_dead_before[j];
+            if (flush_la != INVALID_INSTRNO && flush_db != INVALID_INSTRNO)
+               flush = True;
+            rreg_live_after[j]  = ii;
+            rreg_dead_before[j] = ii+1;
+         } else if (!isW && isR) {
+            if (rreg_live_after[j] == INVALID_INSTRNO) {
+               vex_printf("\nOFFENDING RREG = ");
+               (*ppReg)(univ->regs[j]);
+               vex_printf("\n");
+               vex_printf("\nOFFENDING instr = ");
+               (*ppInstr)(instrs_in->arr[ii], mode64);
+               vex_printf("\n");
+               vpanic("doRegisterAllocation: "
+                      "first event for rreg is Read");
+            }
+            rreg_dead_before[j] = ii+1;
+         } else {
+            vassert(isR && isW);
+            if (rreg_live_after[j] == INVALID_INSTRNO) {
+               vex_printf("\nOFFENDING RREG = ");
+               (*ppReg)(univ->regs[j]);
+               vex_printf("\n");
+               vex_printf("\nOFFENDING instr = ");
+               (*ppInstr)(instrs_in->arr[ii], mode64);
+               vex_printf("\n");
+               vpanic("doRegisterAllocation: "
+                      "first event for rreg is Modify");
+            }
+            rreg_dead_before[j] = ii+1;
+         }
+
+         if (flush) {
+            vassert(flush_la != INVALID_INSTRNO);
+            vassert(flush_db != INVALID_INSTRNO);
+            ensureRRLRspace(&rreg_lrs_la, &rreg_lrs_size, rreg_lrs_used);
+            if (0) 
+               vex_printf("FLUSH 1 (%d,%d)\n", flush_la, flush_db);
+            rreg_lrs_la[rreg_lrs_used].rreg        = univ->regs[j];
+            rreg_lrs_la[rreg_lrs_used].live_after  = toShort(flush_la);
+            rreg_lrs_la[rreg_lrs_used].dead_before = toShort(flush_db);
+            rreg_lrs_used++;
+         }
+
+      } /* iterate over rregs in the instr */
+
+      /* ------ end of DEAL WITH RREG LIVE RANGES ------ */
+
+   } /* iterate over insns */
+
+   /* ------ end of ITERATE OVER INSNS ------ */
+
+   /* ------ start of FINALISE RREG LIVE RANGES ------ */
+
+   /* Now finish up any live ranges left over. */
+   for (Int j = 0; j < n_rregs; j++) {
+
+      if (0) {
+         vex_printf("residual %d:  %d %d\n", j, rreg_live_after[j],
+                                                rreg_dead_before[j]);
+      }
+      vassert( (rreg_live_after[j] == INVALID_INSTRNO 
+                && rreg_dead_before[j] == INVALID_INSTRNO)
+              ||
+               (rreg_live_after[j] != INVALID_INSTRNO 
+                && rreg_dead_before[j] != INVALID_INSTRNO)
+            );
+
+      if (rreg_live_after[j] == INVALID_INSTRNO)
+         continue;
+
+      ensureRRLRspace(&rreg_lrs_la, &rreg_lrs_size, rreg_lrs_used);
+      if (0)
+         vex_printf("FLUSH 2 (%d,%d)\n", 
+                    rreg_live_after[j], rreg_dead_before[j]);
+      rreg_lrs_la[rreg_lrs_used].rreg        = univ->regs[j];
+      rreg_lrs_la[rreg_lrs_used].live_after  = toShort(rreg_live_after[j]);
+      rreg_lrs_la[rreg_lrs_used].dead_before = toShort(rreg_dead_before[j]);
+      rreg_lrs_used++;
+   }
+
+   /* Compute summary hints for choosing real regs.  If a real reg is
+      involved in a hard live range, record that fact in the fixed
+      part of the running rreg_state.  Later, when offered a choice between
+      rregs, it's better to choose one which is not marked as having
+      any HLRs, since ones with HLRs may need to be spilled around
+      their HLRs.  Correctness of final assignment is unaffected by
+      this mechanism -- it is only an optimisation. */
+
+   for (Int j = 0; j < rreg_lrs_used; j++) {
+      HReg rreg = rreg_lrs_la[j].rreg;
+      vassert(!hregIsVirtual(rreg));
+      /* rreg is involved in a HLR.  Record this info in the array, if
+         there is space. */
+      UInt ix = hregIndex(rreg);
+      vassert(ix < n_rregs);
+      rreg_state[ix].has_hlrs = True;
+   }
+   if (0) {
+      for (Int j = 0; j < n_rregs; j++) {
+         if (!rreg_state[j].has_hlrs)
+            continue;
+         ppReg(univ->regs[j]);
+         vex_printf(" hinted\n");
+      }
+   }
+
+   /* Finally, copy the _la variant into the _db variant and
+      sort both by their respective fields. */
+   rreg_lrs_db = LibVEX_Alloc_inline(rreg_lrs_used * sizeof(RRegLR));
+   for (Int j = 0; j < rreg_lrs_used; j++)
+      rreg_lrs_db[j] = rreg_lrs_la[j];
+
+   sortRRLRarray( rreg_lrs_la, rreg_lrs_used, True /* by .live_after*/  );
+   sortRRLRarray( rreg_lrs_db, rreg_lrs_used, False/* by .dead_before*/ );
+
+   /* And set up the cursors. */
+   rreg_lrs_la_next = 0;
+   rreg_lrs_db_next = 0;
+
+   for (Int j = 1; j < rreg_lrs_used; j++) {
+      vassert(rreg_lrs_la[j-1].live_after  <= rreg_lrs_la[j].live_after);
+      vassert(rreg_lrs_db[j-1].dead_before <= rreg_lrs_db[j].dead_before);
+   }
+
+   /* ------ end of FINALISE RREG LIVE RANGES ------ */
+
+   if (DEBUG_REGALLOC) {
+      for (Int j = 0; j < n_vregs; j++) {
+         vex_printf("vreg %d:  la = %d,  db = %d\n", 
+                    j, vreg_lrs[j].live_after, vreg_lrs[j].dead_before );
+      }
+   }
+
+   if (DEBUG_REGALLOC) {
+      vex_printf("RRegLRs by LA:\n");
+      for (Int j = 0; j < rreg_lrs_used; j++) {
+         vex_printf("  ");
+         (*ppReg)(rreg_lrs_la[j].rreg);
+         vex_printf("      la = %d,  db = %d\n",
+                    rreg_lrs_la[j].live_after, rreg_lrs_la[j].dead_before );
+      }
+      vex_printf("RRegLRs by DB:\n");
+      for (Int j = 0; j < rreg_lrs_used; j++) {
+         vex_printf("  ");
+         (*ppReg)(rreg_lrs_db[j].rreg);
+         vex_printf("      la = %d,  db = %d\n",
+                    rreg_lrs_db[j].live_after, rreg_lrs_db[j].dead_before );
+      }
+   }
+
+   /* --------- Stage 3: allocate spill slots. --------- */
+
+   /* Each spill slot is 8 bytes long.  For vregs which take more than
+      64 bits to spill (classes Flt64 and Vec128), we have to allocate
+      two consecutive spill slots.  For 256 bit registers (class
+      Vec256), we have to allocate four consecutive spill slots.
+
+      For Vec128-class on PowerPC, the spill slot's actual address
+      must be 16-byte aligned.  Since the spill slot's address is
+      computed as an offset from the guest state pointer, and since
+      the user of the generated code must set that pointer to a
+      32-aligned value, we have the residual obligation here of
+      choosing a 16-aligned spill slot offset for Vec128-class values.
+      Since each spill slot is 8 bytes long, that means for
+      Vec128-class values we must allocated a spill slot number which
+      is zero mod 2.
+
+      Similarly, for Vec256 class on amd64, find a spill slot number
+      which is zero mod 4.  This guarantees it will be 32 byte
+      aligned, which isn't actually necessary on amd64 (we use movUpd
+      etc to spill), but seems like good practice.
+
+      Do a rank-based allocation of vregs to spill slot numbers.  We
+      put as few values as possible in spill slots, but nevertheless
+      need to have a spill slot available for all vregs, just in case.
+   */
+   /* Int max_ss_no = -1; */
+
+   local_memset(ss_busy_until_before, 0, sizeof(ss_busy_until_before));
+
+   for (Int j = 0; j < n_vregs; j++) {
+
+      /* True iff this vreg is unused.  In which case we also expect
+         that the reg_class field for it has not been set.  */
+      if (vreg_lrs[j].live_after == INVALID_INSTRNO) {
+         vassert(vreg_lrs[j].reg_class == HRcINVALID);
+         continue;
+      }
+
+      /* The spill slots are 64 bits in size.  As per the comment on
+         definition of HRegClass in host_generic_regs.h, that means,
+         to spill a vreg of class Flt64 or Vec128, we'll need to find
+         two adjacent spill slots to use.  For Vec256, we'll need to
+         find four adjacent slots to use.  Note, this logic needs to
+         kept in sync with the size info on the definition of
+         HRegClass. */
+      Int ss_no = -1;
+      switch (vreg_lrs[j].reg_class) {
+
+         case HRcVec128: case HRcFlt64:
+            /* Find two adjacent free slots in which between them
+               provide up to 128 bits in which to spill the vreg.
+               Since we are trying to find an even:odd pair, move
+               along in steps of 2 (slots). */
+            for (ss_no = 0; ss_no < N_SPILL64S-1; ss_no += 2)
+               if (ss_busy_until_before[ss_no+0] <= vreg_lrs[j].live_after
+                   && ss_busy_until_before[ss_no+1] <= vreg_lrs[j].live_after)
+                  break;
+            if (ss_no >= N_SPILL64S-1) {
+               vpanic("LibVEX_N_SPILL_BYTES is too low.  " 
+                      "Increase and recompile.");
+            }
+            ss_busy_until_before[ss_no+0] = vreg_lrs[j].dead_before;
+            ss_busy_until_before[ss_no+1] = vreg_lrs[j].dead_before;
+            break;
+
+         default:
+            /* The ordinary case -- just find a single spill slot. */
+            /* Find the lowest-numbered spill slot which is available
+               at the start point of this interval, and assign the
+               interval to it. */
+            for (ss_no = 0; ss_no < N_SPILL64S; ss_no++)
+               if (ss_busy_until_before[ss_no] <= vreg_lrs[j].live_after)
+                  break;
+            if (ss_no == N_SPILL64S) {
+               vpanic("LibVEX_N_SPILL_BYTES is too low.  " 
+                      "Increase and recompile.");
+            }
+            ss_busy_until_before[ss_no] = vreg_lrs[j].dead_before;
+            break;
+
+      } /* switch (vreg_lrs[j].reg_class) */
+
+      /* This reflects LibVEX's hard-wired knowledge of the baseBlock
+         layout: the guest state, then two equal sized areas following
+         it for two sets of shadow state, and then the spill area. */
+      vreg_lrs[j].spill_offset = toShort(guest_sizeB * 3 + ss_no * 8);
+
+      /* Independent check that we've made a sane choice of slot */
+      sanity_check_spill_offset( &vreg_lrs[j] );
+      /* if (j > max_ss_no) */
+      /*    max_ss_no = j; */
+   }
+
+   if (0) {
+      vex_printf("\n\n");
+      for (Int j = 0; j < n_vregs; j++)
+         vex_printf("vreg %d    --> spill offset %d\n",
+                    j, vreg_lrs[j].spill_offset);
+   }
+
+   /* --------- Stage 4: establish rreg preferences --------- */
+
+   /* It may be advantageous to allocating certain vregs to specific
+      rregs, as a way of avoiding reg-reg moves later.  Here we
+      establish which, if any, rreg each vreg would prefer to be in.
+      Note that this constrains the allocator -- ideally we end up
+      with as few as possible vregs expressing a preference.  
+
+      This is an optimisation: if the .preferred_rreg field is never
+      set to anything different from INVALID_HREG, the allocator still
+      works. */
+
+   /* 30 Dec 04: removed this mechanism as it does not seem to
+      help. */
+
+   /* --------- Stage 5: process instructions --------- */
+
+   /* This is the main loop of the allocator.  First, we need to
+      correctly set up our running state, which tracks the status of
+      each real register. */
+
+   /* ------ BEGIN: Process each insn in turn. ------ */
+
+   for (Int ii = 0; ii < instrs_in->arr_used; ii++) {
+
+      if (DEBUG_REGALLOC) {
+         vex_printf("\n====----====---- Insn %d ----====----====\n", ii);
+         vex_printf("---- ");
+         (*ppInstr)(instrs_in->arr[ii], mode64);
+         vex_printf("\n\nInitial state:\n");
+         PRINT_STATE;
+         vex_printf("\n");
+      }
+
+      /* ------------ Sanity checks ------------ */
+
+      /* Sanity checks are expensive.  So they are done only once
+         every 13 instructions, and just before the last
+         instruction. */
+      do_sanity_check
+         = toBool(
+              False /* Set to True for sanity checking of all insns. */
+              || ii == instrs_in->arr_used-1
+              || (ii > 0 && (ii % 13) == 0)
+           );
+
+      if (do_sanity_check) {
+
+         /* Sanity check 1: all rregs with a hard live range crossing
+            this insn must be marked as unavailable in the running
+            state. */
+         for (Int j = 0; j < rreg_lrs_used; j++) {
+            if (rreg_lrs_la[j].live_after < ii
+                && ii < rreg_lrs_la[j].dead_before) {
+               /* ii is the middle of a hard live range for some real
+                  reg.  Check it's marked as such in the running
+                  state. */
+               HReg reg = rreg_lrs_la[j].rreg;
+
+               if (0) {
+                  vex_printf("considering la %d .. db %d   reg = ", 
+                             rreg_lrs_la[j].live_after, 
+                             rreg_lrs_la[j].dead_before);
+                  (*ppReg)(reg);
+                  vex_printf("\n");
+               }
+
+               /* assert that this rreg is marked as unavailable */
+               vassert(!hregIsVirtual(reg));
+               vassert(rreg_state[hregIndex(reg)].disp == Unavail);
+            }
+         }
+
+         /* Sanity check 2: conversely, all rregs marked as
+            unavailable in the running rreg_state must have a
+            corresponding hard live range entry in the rreg_lrs
+            array. */
+         for (Int j = 0; j < n_rregs; j++) {
+            vassert(rreg_state[j].disp == Bound
+                    || rreg_state[j].disp == Free
+                    || rreg_state[j].disp == Unavail);
+            if (rreg_state[j].disp != Unavail)
+               continue;
+            Int k;
+            for (k = 0; k < rreg_lrs_used; k++) {
+               HReg reg = rreg_lrs_la[k].rreg;
+               vassert(!hregIsVirtual(reg));
+               if (hregIndex(reg) == j
+                   && rreg_lrs_la[k].live_after < ii 
+                   && ii < rreg_lrs_la[k].dead_before) 
+                  break;
+            }
+            /* If this vassertion fails, we couldn't find a
+               corresponding HLR. */
+            vassert(k < rreg_lrs_used);
+         }
+
+         /* Sanity check 3: all vreg-rreg bindings must bind registers
+            of the same class. */
+         for (Int j = 0; j < n_rregs; j++) {
+            if (rreg_state[j].disp != Bound) {
+               vassert(rreg_state[j].eq_spill_slot == False);
+               continue;
+            }
+            vassert(hregClass(univ->regs[j]) 
+                    == hregClass(rreg_state[j].vreg));
+            vassert( hregIsVirtual(rreg_state[j].vreg));
+         }
+
+         /* Sanity check 4: the vreg_state and rreg_state
+            mutually-redundant mappings are consistent.  If
+            rreg_state[j].vreg points at some vreg_state entry then
+            that vreg_state entry should point back at
+            rreg_state[j]. */
+         for (Int j = 0; j < n_rregs; j++) {
+            if (rreg_state[j].disp != Bound)
+               continue;
+            Int k = hregIndex(rreg_state[j].vreg);
+            vassert(IS_VALID_VREGNO(k));
+            vassert(vreg_state[k] == j);
+         }
+         for (Int j = 0; j < n_vregs; j++) {
+            Int k = vreg_state[j];
+            if (k == INVALID_RREG_NO)
+               continue;
+            vassert(IS_VALID_RREGNO(k));
+            vassert(rreg_state[k].disp == Bound);
+            vassert(hregIndex(rreg_state[k].vreg) == j);
+         }
+
+      } /* if (do_sanity_check) */
+
+      /* ------------ end of Sanity checks ------------ */
+
+      /* Do various optimisations pertaining to register coalescing
+         and preferencing:
+            MOV  v <-> v   coalescing (done here).
+            MOV  v <-> r   coalescing (not yet, if ever)
+      */
+      /* If doing a reg-reg move between two vregs, and the src's live
+         range ends here and the dst's live range starts here, bind
+         the dst to the src's rreg, and that's all. */
+      HReg vregS = INVALID_HREG;
+      HReg vregD = INVALID_HREG;
+      if ( (*isMove)( instrs_in->arr[ii], &vregS, &vregD ) ) {
+         if (!hregIsVirtual(vregS)) goto cannot_coalesce;
+         if (!hregIsVirtual(vregD)) goto cannot_coalesce;
+         /* Check that *isMove is not telling us a bunch of lies ... */
+         vassert(hregClass(vregS) == hregClass(vregD));
+         Int k = hregIndex(vregS);
+         Int m = hregIndex(vregD);
+         vassert(IS_VALID_VREGNO(k));
+         vassert(IS_VALID_VREGNO(m));
+         if (vreg_lrs[k].dead_before != ii + 1) goto cannot_coalesce;
+         if (vreg_lrs[m].live_after != ii) goto cannot_coalesce;
+         if (DEBUG_REGALLOC) {
+         vex_printf("COALESCE ");
+            (*ppReg)(vregS);
+            vex_printf(" -> ");
+            (*ppReg)(vregD);
+            vex_printf("\n\n");
+         }
+         /* Find the state entry for vregS. */
+         Int n = vreg_state[k]; /* k is the index of vregS */
+         if (n == INVALID_RREG_NO) {
+            /* vregS is not currently in a real register.  So we can't
+               do the coalescing.  Give up. */
+            goto cannot_coalesce;
+         }
+         vassert(IS_VALID_RREGNO(n));
+
+         /* Finally, we can do the coalescing.  It's trivial -- merely
+            claim vregS's register for vregD. */
+         rreg_state[n].vreg = vregD;
+         vassert(IS_VALID_VREGNO(hregIndex(vregD)));
+         vassert(IS_VALID_VREGNO(hregIndex(vregS)));
+         vreg_state[hregIndex(vregD)] = toShort(n);
+         vreg_state[hregIndex(vregS)] = INVALID_RREG_NO;
+
+         /* This rreg has become associated with a different vreg and
+            hence with a different spill slot.  Play safe. */
+         rreg_state[n].eq_spill_slot = False;
+
+         /* Move on to the next insn.  We skip the post-insn stuff for
+            fixed registers, since this move should not interact with
+            them in any way. */
+         continue;
+      }
+     cannot_coalesce:
+
+      /* ------ Free up rregs bound to dead vregs ------ */
+
+      /* Look for vregs whose live range has just ended, and 
+	 mark the associated rreg as free. */
+
+      for (Int j = 0; j < n_rregs; j++) {
+         if (rreg_state[j].disp != Bound)
+            continue;
+         UInt vregno = hregIndex(rreg_state[j].vreg);
+         vassert(IS_VALID_VREGNO(vregno));
+         if (vreg_lrs[vregno].dead_before <= ii) {
+            rreg_state[j].disp = Free;
+            rreg_state[j].eq_spill_slot = False;
+            Int m = hregIndex(rreg_state[j].vreg);
+            vassert(IS_VALID_VREGNO(m));
+            vreg_state[m] = INVALID_RREG_NO;
+            if (DEBUG_REGALLOC) {
+               vex_printf("free up "); 
+               (*ppReg)(univ->regs[j]); 
+               vex_printf("\n");
+            }
+         }
+      }
+
+      /* ------ Pre-instruction actions for fixed rreg uses ------ */
+
+      /* Now we have to deal with rregs which are about to be made
+         live by this instruction -- in other words, are entering into
+         one of their live ranges.  If any such rreg holds a vreg, we
+         will have to free up the rreg.  The simplest solution which
+         is correct is to spill the rreg.
+
+         Note we could do better:
+         * Could move it into some other free rreg, if one is available 
+
+         Do this efficiently, by incrementally stepping along an array
+         of rreg HLRs that are known to be sorted by start point
+         (their .live_after field).
+      */
+      while (True) {
+         vassert(rreg_lrs_la_next >= 0);
+         vassert(rreg_lrs_la_next <= rreg_lrs_used);
+         if (rreg_lrs_la_next == rreg_lrs_used)
+            break; /* no more real reg live ranges to consider */
+         if (ii < rreg_lrs_la[rreg_lrs_la_next].live_after)
+            break; /* next live range does not yet start */
+         vassert(ii == rreg_lrs_la[rreg_lrs_la_next].live_after);
+         /* rreg_lrs_la[rreg_lrs_la_next].rreg needs to be freed up.
+            Find the associated rreg_state entry. */
+         /* Note, re ii == rreg_lrs_la[rreg_lrs_la_next].live_after.
+            Real register live ranges are guaranteed to be well-formed
+            in that they start with a write to the register -- Stage 2
+            rejects any code not satisfying this.  So the correct
+            question to ask is whether
+            rreg_lrs_la[rreg_lrs_la_next].live_after == ii, that is,
+            whether the reg becomes live after this insn -- rather
+            than before it. */
+         if (DEBUG_REGALLOC) {
+            vex_printf("need to free up rreg: ");
+            (*ppReg)(rreg_lrs_la[rreg_lrs_la_next].rreg);
+            vex_printf("\n\n");
+         }
+         Int k = hregIndex(rreg_lrs_la[rreg_lrs_la_next].rreg);
+
+         /* If this fails, we don't have an entry for this rreg.
+            Which we should. */
+         vassert(IS_VALID_RREGNO(k));
+         Int m = hregIndex(rreg_state[k].vreg);
+         if (rreg_state[k].disp == Bound) {
+            /* Yes, there is an associated vreg.  Spill it if it's
+               still live. */
+            vassert(IS_VALID_VREGNO(m));
+            vreg_state[m] = INVALID_RREG_NO;
+            if (vreg_lrs[m].dead_before > ii) {
+               vassert(vreg_lrs[m].reg_class != HRcINVALID);
+               if ((!eq_spill_opt) || !rreg_state[k].eq_spill_slot) {
+                  HInstr* spill1 = NULL;
+                  HInstr* spill2 = NULL;
+                  (*genSpill)( &spill1, &spill2, univ->regs[k],
+                               vreg_lrs[m].spill_offset, mode64 );
+                  vassert(spill1 || spill2); /* can't both be NULL */
+                  if (spill1)
+                     EMIT_INSTR(spill1);
+                  if (spill2)
+                     EMIT_INSTR(spill2);
+               }
+               rreg_state[k].eq_spill_slot = True;
+            }
+         }
+         rreg_state[k].disp = Unavail;
+         rreg_state[k].vreg = INVALID_HREG;
+         rreg_state[k].eq_spill_slot = False;
+
+         /* check for further rregs entering HLRs at this point */
+         rreg_lrs_la_next++;
+      }
+
+      if (DEBUG_REGALLOC) {
+         vex_printf("After pre-insn actions for fixed regs:\n");
+         PRINT_STATE;
+         vex_printf("\n");
+      }
+
+      /* ------ Deal with the current instruction. ------ */
+
+      /* Finally we can begin the processing of this instruction
+         itself.  The aim is to free up enough rregs for this insn.
+         This may generate spill stores since we may have to evict
+         some vregs currently in rregs.  Also generates spill loads.
+         We also build up the final vreg->rreg mapping to be applied
+         to the insn. */
+
+      initHRegRemap(&remap);
+
+      /* ------------ BEGIN directReload optimisation ----------- */
+
+      /* If the instruction reads exactly one vreg which is currently
+         in a spill slot, and this is last use of that vreg, see if we
+         can convert the instruction into one that reads directly from
+         the spill slot.  This is clearly only possible for x86 and
+         amd64 targets, since ppc and arm are load-store
+         architectures.  If successful, replace instrs_in->arr[ii]
+         with this new instruction, and recompute its reg usage, so
+         that the change is invisible to the standard-case handling
+         that follows. */
+      
+      if (directReload && reg_usage_arr[ii].n_vRegs <= 2) {
+         Bool  debug_direct_reload = False;
+         HReg  cand     = INVALID_HREG;
+         Bool  nreads   = 0;
+         Short spilloff = 0;
+
+         for (Int j = 0; j < reg_usage_arr[ii].n_vRegs; j++) {
+
+            HReg vreg = reg_usage_arr[ii].vRegs[j];
+            vassert(hregIsVirtual(vreg));
+
+            if (reg_usage_arr[ii].vMode[j] == HRmRead) {
+               nreads++;
+               Int m = hregIndex(vreg);
+               vassert(IS_VALID_VREGNO(m));
+               Int k = vreg_state[m];
+               if (!IS_VALID_RREGNO(k)) {
+                  /* ok, it is spilled.  Now, is this its last use? */
+                  vassert(vreg_lrs[m].dead_before >= ii+1);
+                  if (vreg_lrs[m].dead_before == ii+1
+                      && hregIsInvalid(cand)) {
+                     spilloff = vreg_lrs[m].spill_offset;
+                     cand = vreg;
+                  }
+               }
+            }
+         }
+
+         if (nreads == 1 && ! hregIsInvalid(cand)) {
+            HInstr* reloaded;
+            if (reg_usage_arr[ii].n_vRegs == 2)
+               vassert(! sameHReg(reg_usage_arr[ii].vRegs[0],
+                                  reg_usage_arr[ii].vRegs[1]));
+
+            reloaded = directReload ( instrs_in->arr[ii], cand, spilloff );
+            if (debug_direct_reload && !reloaded) {
+               vex_printf("[%3d] ", spilloff); ppHReg(cand); vex_printf(" "); 
+               ppInstr(instrs_in->arr[ii], mode64); 
+            }
+            if (reloaded) {
+               /* Update info about the insn, so it looks as if it had
+                  been in this form all along. */
+               instrs_in->arr[ii] = reloaded;
+               (*getRegUsage)( &reg_usage_arr[ii], instrs_in->arr[ii], mode64 );
+               if (debug_direct_reload && !reloaded) {
+                  vex_printf("  -->  ");
+                  ppInstr(reloaded, mode64);
+               }
+            }
+
+            if (debug_direct_reload && !reloaded)
+               vex_printf("\n");
+         }
+
+      }
+
+      /* ------------ END directReload optimisation ------------ */
+
+      /* for each virtual reg mentioned in the insn ... */
+      for (Int j = 0; j < reg_usage_arr[ii].n_vRegs; j++) {
+
+         HReg vreg = reg_usage_arr[ii].vRegs[j];
+         vassert(hregIsVirtual(vreg));
+
+         if (0) {
+            vex_printf("considering "); (*ppReg)(vreg); vex_printf("\n");
+         }
+
+         /* Now we're trying to find a rreg for "vreg".  First of all,
+            if it already has an rreg assigned, we don't need to do
+            anything more.  Inspect the current state to find out. */
+         Int m = hregIndex(vreg);
+         vassert(IS_VALID_VREGNO(m));
+         Int n = vreg_state[m];
+         if (IS_VALID_RREGNO(n)) {
+            vassert(rreg_state[n].disp == Bound);
+            addToHRegRemap(&remap, vreg, univ->regs[n]);
+            /* If this rreg is written or modified, mark it as different
+               from any spill slot value. */
+            if (reg_usage_arr[ii].vMode[j] != HRmRead)
+               rreg_state[n].eq_spill_slot = False;
+            continue;
+         } else {
+            vassert(n == INVALID_RREG_NO);
+         }
+
+         /* No luck.  The next thing to do is see if there is a
+            currently free rreg available, of the correct class.  If
+            so, bag it.  NOTE, we could improve this by selecting an
+            rreg for which the next live-range event is as far ahead
+            as possible. */
+         Int k_suboptimal = -1;
+         Int k;
+         for (k = 0; k < n_rregs; k++) {
+            if (rreg_state[k].disp != Free
+                || hregClass(univ->regs[k]) != hregClass(vreg))
+               continue;
+            if (rreg_state[k].has_hlrs) {
+               /* Well, at least we can use k_suboptimal if we really
+                  have to.  Keep on looking for a better candidate. */
+               k_suboptimal = k;
+            } else {
+               /* Found a preferable reg.  Use it. */
+               k_suboptimal = -1;
+               break;
+            }
+         }
+         if (k_suboptimal >= 0)
+            k = k_suboptimal;
+
+         if (k < n_rregs) {
+            rreg_state[k].disp = Bound;
+            rreg_state[k].vreg = vreg;
+            Int p = hregIndex(vreg);
+            vassert(IS_VALID_VREGNO(p));
+            vreg_state[p] = toShort(k);
+            addToHRegRemap(&remap, vreg, univ->regs[k]);
+            /* Generate a reload if needed.  This only creates needed
+               reloads because the live range builder for vregs will
+               guarantee that the first event for a vreg is a write.
+               Hence, if this reference is not a write, it cannot be
+               the first reference for this vreg, and so a reload is
+               indeed needed. */
+            if (reg_usage_arr[ii].vMode[j] != HRmWrite) {
+               vassert(vreg_lrs[p].reg_class != HRcINVALID);
+               HInstr* reload1 = NULL;
+               HInstr* reload2 = NULL;
+               (*genReload)( &reload1, &reload2, univ->regs[k],
+                             vreg_lrs[p].spill_offset, mode64 );
+               vassert(reload1 || reload2); /* can't both be NULL */
+               if (reload1)
+                  EMIT_INSTR(reload1);
+               if (reload2)
+                  EMIT_INSTR(reload2);
+               /* This rreg is read or modified by the instruction.
+                  If it's merely read we can claim it now equals the
+                  spill slot, but not so if it is modified. */
+               if (reg_usage_arr[ii].vMode[j] == HRmRead) {
+                  rreg_state[k].eq_spill_slot = True;
+               } else {
+                  vassert(reg_usage_arr[ii].vMode[j] == HRmModify);
+                  rreg_state[k].eq_spill_slot = False;
+               }
+            } else {
+               rreg_state[k].eq_spill_slot = False;
+            }
+
+            continue;
+         }
+
+         /* Well, now we have no option but to spill a vreg.  It's
+            important to make a good choice of vreg to spill, and of
+            course we need to be careful not to spill a vreg which is
+            needed by this insn. */
+
+         /* First, mark in the rreg_state, those rregs which are not spill
+            candidates, due to holding a vreg mentioned by this
+            instruction.  Or being of the wrong class. */
+         for (k = 0; k < n_rregs; k++) {
+            rreg_state[k].is_spill_cand = False;
+            if (rreg_state[k].disp != Bound)
+               continue;
+            if (hregClass(univ->regs[k]) != hregClass(vreg))
+               continue;
+            rreg_state[k].is_spill_cand = True;
+            /* Note, the following loop visits only the virtual regs
+               mentioned by the instruction. */
+            for (m = 0; m < reg_usage_arr[ii].n_vRegs; m++) {
+               if (sameHReg(rreg_state[k].vreg, reg_usage_arr[ii].vRegs[m])) {
+                  rreg_state[k].is_spill_cand = False;
+                  break;
+               }
+            }
+         }
+
+         /* We can choose to spill any rreg satisfying
+            rreg_state[r].is_spill_cand (so to speak).  Choose r so that
+            the next use of its associated vreg is as far ahead as
+            possible, in the hope that this will minimise the number
+            of consequent reloads required. */
+         Int spillee
+            = findMostDistantlyMentionedVReg ( 
+                 reg_usage_arr, ii+1, instrs_in->arr_used, rreg_state, n_rregs );
+
+         if (spillee == -1) {
+            /* Hmmmmm.  There don't appear to be any spill candidates.
+               We're hosed. */
+            vex_printf("reg_alloc: can't find a register in class: ");
+            ppHRegClass(hregClass(vreg));
+            vex_printf("\n");
+            vpanic("reg_alloc: can't create a free register.");
+         }
+
+         /* Right.  So we're going to spill rreg_state[spillee]. */
+         vassert(IS_VALID_RREGNO(spillee));
+         vassert(rreg_state[spillee].disp == Bound);
+         /* check it's the right class */
+         vassert(hregClass(univ->regs[spillee]) == hregClass(vreg));
+         /* check we're not ejecting the vreg for which we are trying
+            to free up a register. */
+         vassert(! sameHReg(rreg_state[spillee].vreg, vreg));
+
+         m = hregIndex(rreg_state[spillee].vreg);
+         vassert(IS_VALID_VREGNO(m));
+
+         /* So here's the spill store.  Assert that we're spilling a
+            live vreg. */
+         vassert(vreg_lrs[m].dead_before > ii);
+         vassert(vreg_lrs[m].reg_class != HRcINVALID);
+         if ((!eq_spill_opt) || !rreg_state[spillee].eq_spill_slot) {
+            HInstr* spill1 = NULL;
+            HInstr* spill2 = NULL;
+            (*genSpill)( &spill1, &spill2, univ->regs[spillee],
+                         vreg_lrs[m].spill_offset, mode64 );
+            vassert(spill1 || spill2); /* can't both be NULL */
+            if (spill1)
+               EMIT_INSTR(spill1);
+            if (spill2)
+               EMIT_INSTR(spill2);
+         }
+
+         /* Update the rreg_state to reflect the new assignment for this
+            rreg. */
+         rreg_state[spillee].vreg = vreg;
+         vreg_state[m] = INVALID_RREG_NO;
+
+         rreg_state[spillee].eq_spill_slot = False; /* be safe */
+
+         m = hregIndex(vreg);
+         vassert(IS_VALID_VREGNO(m));
+         vreg_state[m] = toShort(spillee);
+
+         /* Now, if this vreg is being read or modified (as opposed to
+            written), we have to generate a reload for it. */
+         if (reg_usage_arr[ii].vMode[j] != HRmWrite) {
+            vassert(vreg_lrs[m].reg_class != HRcINVALID);
+            HInstr* reload1 = NULL;
+            HInstr* reload2 = NULL;
+            (*genReload)( &reload1, &reload2, univ->regs[spillee],
+                          vreg_lrs[m].spill_offset, mode64 );
+            vassert(reload1 || reload2); /* can't both be NULL */
+            if (reload1)
+               EMIT_INSTR(reload1);
+            if (reload2)
+               EMIT_INSTR(reload2);
+            /* This rreg is read or modified by the instruction.
+               If it's merely read we can claim it now equals the
+               spill slot, but not so if it is modified. */
+            if (reg_usage_arr[ii].vMode[j] == HRmRead) {
+               rreg_state[spillee].eq_spill_slot = True;
+            } else {
+               vassert(reg_usage_arr[ii].vMode[j] == HRmModify);
+               rreg_state[spillee].eq_spill_slot = False;
+            }
+         }
+
+         /* So after much twisting and turning, we have vreg mapped to
+            rreg_state[spillee].rreg.  Note that in the map. */
+         addToHRegRemap(&remap, vreg, univ->regs[spillee]);
+
+      } /* iterate over virtual registers in this instruction. */
+
+      /* We've finished clowning around with registers in this instruction.
+         Three results:
+         - the running rreg_state[] has been updated
+         - a suitable vreg->rreg mapping for this instruction has been 
+           constructed
+         - spill and reload instructions may have been emitted.
+
+        The final step is to apply the mapping to the instruction, 
+        and emit that.
+      */
+
+      /* NOTE, DESTRUCTIVELY MODIFIES instrs_in->arr[ii]. */
+      (*mapRegs)( &remap, instrs_in->arr[ii], mode64 );
+      EMIT_INSTR( instrs_in->arr[ii] );
+
+      if (DEBUG_REGALLOC) {
+         vex_printf("After dealing with current insn:\n");
+         PRINT_STATE;
+         vex_printf("\n");
+      }
+
+      /* ------ Post-instruction actions for fixed rreg uses ------ */
+
+      /* Now we need to check for rregs exiting fixed live ranges
+         after this instruction, and if so mark them as free. */
+      while (True) {
+         vassert(rreg_lrs_db_next >= 0);
+         vassert(rreg_lrs_db_next <= rreg_lrs_used);
+         if (rreg_lrs_db_next == rreg_lrs_used)
+            break; /* no more real reg live ranges to consider */
+         if (ii+1 < rreg_lrs_db[rreg_lrs_db_next].dead_before)
+            break; /* next live range does not yet start */
+         vassert(ii+1 == rreg_lrs_db[rreg_lrs_db_next].dead_before);
+         /* rreg_lrs_db[[rreg_lrs_db_next].rreg is exiting a hard live
+            range.  Mark it as such in the main rreg_state array. */
+         HReg reg = rreg_lrs_db[rreg_lrs_db_next].rreg;
+         vassert(!hregIsVirtual(reg));
+         Int k = hregIndex(reg);
+         vassert(IS_VALID_RREGNO(k));
+         vassert(rreg_state[k].disp == Unavail);
+         rreg_state[k].disp = Free;
+         rreg_state[k].vreg = INVALID_HREG;
+         rreg_state[k].eq_spill_slot = False;
+
+         /* check for further rregs leaving HLRs at this point */
+         rreg_lrs_db_next++;
+      }
+
+      if (DEBUG_REGALLOC) {
+         vex_printf("After post-insn actions for fixed regs:\n");
+         PRINT_STATE;
+         vex_printf("\n");
+      }
+
+   } /* iterate over insns */
+
+   /* ------ END: Process each insn in turn. ------ */
+
+   /* free(rreg_state); */
+   /* free(rreg_lrs); */
+   /* if (vreg_lrs) free(vreg_lrs); */
+
+   /* Paranoia */
+   vassert(rreg_lrs_la_next == rreg_lrs_used);
+   vassert(rreg_lrs_db_next == rreg_lrs_used);
+
+   return instrs_out;
+
+#  undef INVALID_INSTRNO
+#  undef EMIT_INSTR
+#  undef PRINT_STATE
+}
+
+
+
+/*---------------------------------------------------------------*/
+/*---                                       host_reg_alloc2.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_regs.c b/VEX/priv/host_generic_regs.c
new file mode 100644
index 0000000..a6192f7
--- /dev/null
+++ b/VEX/priv/host_generic_regs.c
@@ -0,0 +1,337 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               host_generic_regs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+
+
+/*---------------------------------------------------------*/
+/*--- Representing HOST REGISTERS                       ---*/
+/*---------------------------------------------------------*/
+
+void ppHRegClass ( HRegClass hrc )
+{
+   switch (hrc) {
+      case HRcInt32:   vex_printf("HRcInt32"); break;
+      case HRcInt64:   vex_printf("HRcInt64"); break;
+      case HRcFlt32:   vex_printf("HRcFlt32"); break;
+      case HRcFlt64:   vex_printf("HRcFlt64"); break;
+      case HRcVec64:   vex_printf("HRcVec64"); break;
+      case HRcVec128:  vex_printf("HRcVec128"); break;
+      default: vpanic("ppHRegClass");
+   }
+}
+
+/* Generic printing for registers. */
+void ppHReg ( HReg r ) 
+{
+   if (hregIsInvalid(r)) {
+      vex_printf("HReg_INVALID");
+      return;
+   }
+   const Bool   isV     = hregIsVirtual(r);
+   const HChar* maybe_v = isV ? "v" : "";
+   const UInt   regNN   = isV ? hregIndex(r) : hregEncoding(r);
+   /* For real registers, we show the encoding.  But the encoding is
+      always zero for virtual registers, so that's pointless -- hence
+      show the index number instead. */
+   switch (hregClass(r)) {
+      case HRcInt32:   vex_printf("%%%sr%u", maybe_v, regNN); return;
+      case HRcInt64:   vex_printf("%%%sR%u", maybe_v, regNN); return;
+      case HRcFlt32:   vex_printf("%%%sF%u", maybe_v, regNN); return;
+      case HRcFlt64:   vex_printf("%%%sD%u", maybe_v, regNN); return;
+      case HRcVec64:   vex_printf("%%%sv%u", maybe_v, regNN); return;
+      case HRcVec128:  vex_printf("%%%sV%u", maybe_v, regNN); return;
+      default: vpanic("ppHReg");
+   }
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Real register Universes.                          ---*/
+/*---------------------------------------------------------*/
+
+void RRegUniverse__init ( /*OUT*/RRegUniverse* univ )
+{
+   *univ = (RRegUniverse){};
+   univ->size      = 0;
+   univ->allocable = 0;
+   for (UInt i = 0; i < N_RREGUNIVERSE_REGS; i++) {
+      univ->regs[i] = INVALID_HREG;
+   }
+}
+
+void RRegUniverse__check_is_sane ( const RRegUniverse* univ )
+{
+   /* Check Real-Register-Universe invariants.  All of these are
+      important. */
+   vassert(univ->size > 0);
+   vassert(univ->size <= N_RREGUNIVERSE_REGS);
+   vassert(univ->allocable <= univ->size);
+   for (UInt i = 0; i < univ->size; i++) {
+      HReg reg = univ->regs[i];
+      vassert(!hregIsInvalid(reg));
+      vassert(!hregIsVirtual(reg));
+      vassert(hregIndex(reg) == i);
+   }
+   for (UInt i = univ->size; i < N_RREGUNIVERSE_REGS; i++) {
+      HReg reg = univ->regs[i];
+      vassert(hregIsInvalid(reg));
+   }
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Helpers for recording reg usage (for reg-alloc)   ---*/
+/*---------------------------------------------------------*/
+
+void ppHRegUsage ( const RRegUniverse* univ, HRegUsage* tab )
+{
+   /* This is going to fail miserably if N_RREGUNIVERSE_REGS exceeds
+      64.  So let's cause it to fail in an obvious way. */
+   vassert(N_RREGUNIVERSE_REGS == 64);
+
+   vex_printf("HRegUsage {\n");
+   /* First print the real regs */
+   for (UInt i = 0; i < N_RREGUNIVERSE_REGS; i++) {
+      Bool rRd = (tab->rRead    & (1ULL << i)) != 0;
+      Bool rWr = (tab->rWritten & (1ULL << i)) != 0;
+      const HChar* str = "Modify ";
+      /**/ if (!rRd && !rWr) { continue; }
+      else if ( rRd && !rWr) { str = "Read   "; }
+      else if (!rRd &&  rWr) { str = "Write  "; }
+      /* else "Modify" is correct */
+      vex_printf("   %s ", str);
+      ppHReg(univ->regs[i]);
+      vex_printf("\n");
+   }
+   /* and now the virtual registers */
+   for (UInt i = 0; i < tab->n_vRegs; i++) {
+      const HChar* str = NULL;
+      switch (tab->vMode[i]) {
+         case HRmRead:   str = "Read   "; break;
+         case HRmWrite:  str = "Write  "; break;
+         case HRmModify: str = "Modify "; break;
+         default: vpanic("ppHRegUsage");
+      }
+      vex_printf("   %s ", str);
+      ppHReg(tab->vRegs[i]);
+      vex_printf("\n");
+   }
+   vex_printf("}\n");
+}
+
+
+/* Add a register to a usage table.  Combines incoming read uses with
+   existing write uses into a modify use, and vice versa.  Does not
+   create duplicate entries -- each reg is only mentioned once.  
+*/
+void addHRegUse ( HRegUsage* tab, HRegMode mode, HReg reg )
+{
+   /* Because real and virtual registers are represented differently,
+      they have completely different paths here. */
+   if (LIKELY(hregIsVirtual(reg))) {
+      /* Virtual register */
+      UInt i;
+      /* Find it ... */
+      for (i = 0; i < tab->n_vRegs; i++)
+         if (sameHReg(tab->vRegs[i], reg))
+            break;
+      if (i == tab->n_vRegs) {
+         /* Not found, add new entry. */
+         vassert(tab->n_vRegs < N_HREGUSAGE_VREGS);
+         tab->vRegs[tab->n_vRegs] = reg;
+         tab->vMode[tab->n_vRegs] = mode;
+         tab->n_vRegs++;
+      } else {
+         /* Found: combine or ignore. */
+         /* This is a greatest-lower-bound operation in the poset:
+
+               R   W
+                \ /
+                 M
+
+            Need to do: tab->mode[i] = GLB(tab->mode, mode).  In this
+            case very simple -- if tab->mode[i] != mode then result must
+            be M.
+         */
+         if (tab->vMode[i] == mode) {
+            /* duplicate, ignore */
+         } else {
+            tab->vMode[i] = HRmModify;
+         }
+      }
+   } else {
+      /* Real register */
+      UInt ix = hregIndex(reg);
+      vassert(ix < N_RREGUNIVERSE_REGS);
+      ULong mask = 1ULL << ix;
+      switch (mode) {
+         case HRmRead:   tab->rRead |= mask; break;
+         case HRmWrite:  tab->rWritten |= mask; break;
+         case HRmModify: tab->rRead |= mask; tab->rWritten |= mask; break;
+         default: vassert(0);
+      }
+   }
+}
+
+Bool HRegUsage__contains ( const HRegUsage* tab, HReg reg )
+{
+   vassert(!hregIsInvalid(reg));
+   if (hregIsVirtual(reg)) {
+      for (UInt i = 0; i < tab->n_vRegs; i++) {
+         if (sameHReg(reg, tab->vRegs[i]))
+            return True;
+      }
+      return False;
+   } else {
+      UInt ix = hregIndex(reg);
+      vassert(ix < N_RREGUNIVERSE_REGS);
+      ULong mentioned = tab->rRead | tab->rWritten;
+      return (mentioned & (1ULL << ix)) != 0;
+   }
+   /*NOTREACHED*/
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Indicating register remappings (for reg-alloc)    ---*/
+/*---------------------------------------------------------*/
+
+void ppHRegRemap ( HRegRemap* map )
+{
+   Int   i;
+   vex_printf("HRegRemap {\n");
+   for (i = 0; i < map->n_used; i++) {
+      vex_printf("   ");
+      ppHReg(map->orig[i]);
+      vex_printf("  -->  ");
+      ppHReg(map->replacement[i]);
+      vex_printf("\n");
+   }
+   vex_printf("}\n");
+}
+
+
+void addToHRegRemap ( HRegRemap* map, HReg orig, HReg replacement )
+{
+   Int i;
+   for (i = 0; i < map->n_used; i++)
+      if (sameHReg(map->orig[i], orig))
+         vpanic("addToHRegMap: duplicate entry");
+   if (!hregIsVirtual(orig))
+      vpanic("addToHRegMap: orig is not a vreg");
+   if (hregIsVirtual(replacement))
+      vpanic("addToHRegMap: replacement is a vreg");
+
+   vassert(map->n_used+1 < N_HREG_REMAP);
+   map->orig[map->n_used]        = orig;
+   map->replacement[map->n_used] = replacement;
+   map->n_used++;
+}
+
+
+HReg lookupHRegRemap ( HRegRemap* map, HReg orig )
+{
+   Int i;
+   if (!hregIsVirtual(orig))
+      return orig;
+   for (i = 0; i < map->n_used; i++)
+      if (sameHReg(map->orig[i], orig))
+         return map->replacement[i];
+   vpanic("lookupHRegRemap: not found");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Abstract instructions                             ---*/
+/*---------------------------------------------------------*/
+
+HInstrArray* newHInstrArray ( void )
+{
+   HInstrArray* ha = LibVEX_Alloc_inline(sizeof(HInstrArray));
+   ha->arr_size = 4;
+   ha->arr_used = 0;
+   ha->arr      = LibVEX_Alloc_inline(ha->arr_size * sizeof(HInstr*));
+   ha->n_vregs  = 0;
+   return ha;
+}
+
+__attribute__((noinline))
+void addHInstr_SLOW ( HInstrArray* ha, HInstr* instr )
+{
+   vassert(ha->arr_used == ha->arr_size);
+   Int      i;
+   HInstr** arr2 = LibVEX_Alloc_inline(ha->arr_size * 2 * sizeof(HInstr*));
+   for (i = 0; i < ha->arr_size; i++) {
+      arr2[i] = ha->arr[i];
+   }
+   ha->arr_size *= 2;
+   ha->arr = arr2;
+   addHInstr(ha, instr);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- C-Call return-location actions                    ---*/
+/*---------------------------------------------------------*/
+
+void ppRetLoc ( RetLoc ska )
+{
+   switch (ska.pri) {
+      case RLPri_INVALID:
+         vex_printf("RLPri_INVALID"); return;
+      case RLPri_None:
+         vex_printf("RLPri_None");    return;
+      case RLPri_Int:
+         vex_printf("RLPri_Int");     return;
+      case RLPri_2Int:
+         vex_printf("RLPri_2Int");    return;
+      case RLPri_V128SpRel:
+         vex_printf("RLPri_V128SpRel(%d)", ska.spOff); return;
+      case RLPri_V256SpRel:
+         vex_printf("RLPri_V256SpRel(%d)", ska.spOff); return;
+      default:
+         vpanic("ppRetLoc");
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                 host_generic_regs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_regs.h b/VEX/priv/host_generic_regs.h
new file mode 100644
index 0000000..b6de07b
--- /dev/null
+++ b/VEX/priv/host_generic_regs.h
@@ -0,0 +1,486 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               host_generic_regs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_HOST_GENERIC_REGS_H
+#define __VEX_HOST_GENERIC_REGS_H
+
+#include "libvex_basictypes.h"
+
+
+/*---------------------------------------------------------*/
+/*--- Representing HOST REGISTERS                       ---*/
+/*---------------------------------------------------------*/
+
+/* Host registers.  Stuff to represent:
+
+   - The register index.  This is a zero-based, sequential index that
+     facilitates indexing into arrays or virtual or real registers.
+     Virtual and real registers both have indices starting at zero.
+     Interpreting a real register index requires having the host's
+     RRegUniverse to hand.
+
+   - The register's hardware encoding.  This applies only for real
+     registers and should be zero for virtual registers.  This is the
+     number as used in a target architecture encoding.
+
+   - The register class
+
+   - Whether or not the register is a virtual reg.
+
+   Registers are sized so as to fit into 32 bits.
+
+   Note that since the class field is never 1111b, no valid register
+   can have the value INVALID_HREG.
+
+   There are currently 6 register classes:
+
+     int32 int64 float32 float64 simd64 simd128
+*/
+
+/* Registers are represented as 32 bit integers, with the following layout:
+
+   31     30..27  26..20  19..0
+   isV:1  rc:4    enc:7   ix:20
+
+   where
+      UInt      ix:20;   // Zero based index
+      UInt      enc:7;   // Hardware encoding number
+      HRegClass rc:4;    // the register's HRegClass
+      Bool      isV:1;   // is it a virtual register?
+
+   The obvious thing to do here would be to use bitfields.  But gcc
+   seems to have problems constant folding calls to mkHReg() with all
+   4 parameters constant to a 32 bit number, when using bitfields.
+   Hence the use of the traditional shift-and-mask by-hand bitfields
+   instead.
+*/
+typedef  struct { UInt u32; }  HReg;
+
+/* HRegClass describes host register classes which the instruction
+   selectors can speak about.  We would not expect all of them to be
+   available on any specific host.  For example on x86, the available
+   classes are: Int32, Flt64, Vec128 only.
+
+   IMPORTANT NOTE: host_generic_reg_alloc2.c needs how much space is
+   needed to spill each class of register.  It allocates the following
+   amount of space:
+
+      HRcInt32     64 bits
+      HRcInt64     64 bits
+      HRcFlt32     64 bits
+      HRcFlt64     128 bits (on x86 these are spilled by fstpt/fldt and
+                             so won't fit in a 64-bit slot)
+      HRcVec64     64 bits
+      HRcVec128    128 bits
+
+   If you add another regclass, you must remember to update
+   host_generic_reg_alloc2.c accordingly.  
+
+   When adding entries to enum HRegClass, do not use any value > 14 or < 1.
+*/
+typedef
+   enum { 
+      HRcINVALID=1,   /* NOT A VALID REGISTER CLASS */
+      HRcInt32=3,     /* 32-bit int */
+      HRcInt64=4,     /* 64-bit int */
+      HRcFlt32=5,     /* 32-bit float */
+      HRcFlt64=6,     /* 64-bit float */
+      HRcVec64=7,     /* 64-bit SIMD */
+      HRcVec128=8     /* 128-bit SIMD */
+   }
+   HRegClass;
+
+extern void ppHRegClass ( HRegClass );
+
+
+/* Print an HReg in a generic (non-target-specific) way. */
+extern void ppHReg ( HReg );
+
+/* Construct.  The goal here is that compiler can fold this down to a
+   constant in the case where the four arguments are constants, which
+   is often the case. */
+static inline HReg mkHReg ( Bool virtual, HRegClass rc, UInt enc, UInt ix )
+{
+   vassert(ix <= 0xFFFFF);
+   vassert(enc <= 0x7F);
+   vassert(((UInt)rc) <= 0xF);
+   vassert(((UInt)virtual) <= 1);
+   if (virtual) vassert(enc == 0);
+   HReg r;
+   r.u32 = ((((UInt)virtual) & 1)       << 31)  |
+           ((((UInt)rc)      & 0xF)     << 27)  |
+           ((((UInt)enc)     & 0x7F)    << 20)  |
+           ((((UInt)ix)      & 0xFFFFF) << 0);
+   return r;
+}
+
+static inline HRegClass hregClass ( HReg r )
+{
+   HRegClass rc = (HRegClass)((r.u32 >> 27) & 0xF);
+   vassert(rc >= HRcInt32 && rc <= HRcVec128);
+   return rc;
+}
+
+static inline UInt hregIndex ( HReg r )
+{
+   return r.u32 & 0xFFFFF;
+}
+
+static inline UInt hregEncoding ( HReg r )
+{
+   return (r.u32 >> 20) & 0x7F;
+}
+
+static inline Bool hregIsVirtual ( HReg r )
+{
+   return toBool((r.u32 >> 31) & 1);
+}
+
+static inline Bool sameHReg ( HReg r1, HReg r2 )
+{
+   return toBool(r1.u32 == r2.u32);
+}
+
+static const HReg INVALID_HREG = { .u32 = 0xFFFFFFFF };
+
+static inline Bool hregIsInvalid ( HReg r )
+{
+   return sameHReg(r, INVALID_HREG);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Real register Universes.                          ---*/
+/*---------------------------------------------------------*/
+
+/* A "Real Register Universe" is a read-only structure that contains
+   all information about real registers on a given host.  It serves
+   several purposes:
+
+   * defines the mapping from real register indices to the registers
+     themselves
+
+   * defines the size of the initial section of that mapping that is
+     available to the register allocator for use, so that the register
+     allocator can treat the registers under its control as a zero
+     based, contiguous array.  This is important for its efficiency.
+
+   * gives meaning to RRegSets, which otherwise would merely be a
+     bunch of bits.
+
+   This is a big structure, but it's readonly, and we expect to
+   allocate only one instance for each run of Valgrind.  It is sized
+   so as to be able to deal with up to 64 real registers.  AFAICS none
+   of the back ends actually mention more than 64, despite the fact
+   that many of the host architectures have more than 64 registers
+   when all classes are taken into consideration.
+*/
+
+#define N_RREGUNIVERSE_REGS 64
+
+typedef
+   struct {
+      /* Total number of registers in this universe .. */
+      UInt size;
+      /* .. of which the first |allocable| are available to regalloc. */
+      UInt allocable;
+      /* The registers themselves.  All must be real registers, and
+         all must have their index number (.s.ix) equal to the array
+         index here, since this is the only place where we map index
+         numbers to actual registers. */
+      HReg regs[N_RREGUNIVERSE_REGS];
+   }
+   RRegUniverse;
+
+/* Nominally initialise (zero out) an RRegUniverse. */
+void RRegUniverse__init ( /*OUT*/RRegUniverse* );
+
+/* Check an RRegUniverse is valid, and assert if not.*/
+void RRegUniverse__check_is_sane ( const RRegUniverse* );
+
+/* Print an RRegUniverse, for debugging. */
+void RRegUniverse__show ( const RRegUniverse* );
+
+
+/*---------------------------------------------------------*/
+/*--- Real register sets.                               ---*/
+/*---------------------------------------------------------*/
+
+/* Represents sets of real registers.  |bitset| is interpreted in the
+   context of |univ|.  That is, each bit index |i| in |bitset|
+   corresponds to the register |univ->regs[i]|.  This relies
+   entirely on the fact that N_RREGUNIVERSE_REGS <= 64. */
+typedef
+   struct {
+      ULong         bitset;
+      RRegUniverse* univ;
+   }
+   RRegSet;
+
+
+/*---------------------------------------------------------*/
+/*--- Recording register usage (for reg-alloc)          ---*/
+/*---------------------------------------------------------*/
+
+typedef
+   enum { HRmRead, HRmWrite, HRmModify }
+   HRegMode;
+
+
+/* This isn't entirely general, and is specialised towards being fast,
+   for the reg-alloc.  It represents real registers using a bitmask
+   and can also represent up to four virtual registers, in an
+   unordered array.  This is based on the observation that no
+   instruction that we generate can mention more than four registers
+   at once. 
+*/
+#define N_HREGUSAGE_VREGS 5
+
+typedef
+   struct {
+      /* The real registers.  The associated universe is not stored
+         here -- callers will have to pass it around separately, as
+         needed. */
+      ULong    rRead;     /* real regs that are read */
+      ULong    rWritten;  /* real regs that are written */
+      /* The virtual registers. */
+      HReg     vRegs[N_HREGUSAGE_VREGS];
+      HRegMode vMode[N_HREGUSAGE_VREGS];
+      UInt     n_vRegs;
+   }
+   HRegUsage;
+
+extern void ppHRegUsage ( const RRegUniverse*, HRegUsage* );
+
+static inline void initHRegUsage ( HRegUsage* tab )
+{
+   tab->rRead    = 0;
+   tab->rWritten = 0;
+   tab->n_vRegs  = 0;
+}
+
+/* Add a register to a usage table.  Combine incoming read uses with
+   existing write uses into a modify use, and vice versa.  Do not
+   create duplicate entries -- each reg should only be mentioned once.  
+*/
+extern void addHRegUse ( HRegUsage*, HRegMode, HReg );
+
+extern Bool HRegUsage__contains ( const HRegUsage*, HReg );
+
+
+/*---------------------------------------------------------*/
+/*--- Indicating register remappings (for reg-alloc)    ---*/
+/*---------------------------------------------------------*/
+
+/* Note that such maps can only map virtual regs to real regs.
+   addToHRegRenap will barf if given a pair not of that form.  As a
+   result, no valid HRegRemap will bind a real reg to anything, and so
+   if lookupHRegMap is given a real reg, it returns it unchanged.
+   This is precisely the behaviour that the register allocator needs
+   to impose its decisions on the instructions it processes.  */
+
+#define N_HREG_REMAP 6
+
+typedef
+   struct {
+      HReg orig       [N_HREG_REMAP];
+      HReg replacement[N_HREG_REMAP];
+      Int  n_used;
+   }
+   HRegRemap;
+
+extern void ppHRegRemap     ( HRegRemap* );
+extern void addToHRegRemap  ( HRegRemap*, HReg, HReg );
+extern HReg lookupHRegRemap ( HRegRemap*, HReg );
+
+static inline void initHRegRemap ( HRegRemap* map )
+{
+   map->n_used = 0;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Abstract instructions                             ---*/
+/*---------------------------------------------------------*/
+
+/* A type is needed to refer to pointers to instructions of any
+   target.  Defining it like this means that HInstr* can stand in for
+   X86Instr*, ArmInstr*, etc. */
+
+typedef  void  HInstr;
+
+
+/* An expandable array of HInstr*'s.  Handy for insn selection and
+   register allocation.  n_vregs indicates the number of virtual
+   registers mentioned in the code, something that reg-alloc needs to
+   know.  These are required to be numbered 0 .. n_vregs-1. 
+*/
+typedef
+   struct {
+      HInstr** arr;
+      Int      arr_size;
+      Int      arr_used;
+      Int      n_vregs;
+   }
+   HInstrArray;
+
+extern HInstrArray* newHInstrArray ( void );
+
+/* Never call this directly.  It's the slow and incomplete path for
+   addHInstr. */
+__attribute__((noinline))
+extern void addHInstr_SLOW ( HInstrArray*, HInstr* );
+
+static inline void addHInstr ( HInstrArray* ha, HInstr* instr )
+{
+   if (LIKELY(ha->arr_used < ha->arr_size)) {
+      ha->arr[ha->arr_used] = instr;
+      ha->arr_used++;
+   } else {
+      addHInstr_SLOW(ha, instr);
+   }
+}
+
+
+/*---------------------------------------------------------*/
+/*--- C-Call return-location descriptions               ---*/
+/*---------------------------------------------------------*/
+
+/* This is common to all back ends.  It describes where the return
+   value from a C call is located.  This is important in the case that
+   the call is conditional, since the return locations will need to be
+   set to 0x555..555 in the case that the call does not happen. */
+
+typedef
+   enum {
+      RLPri_INVALID,   /* INVALID */
+      RLPri_None,      /* no return value (a.k.a C "void") */
+      RLPri_Int,       /* in the primary int return reg */
+      RLPri_2Int,      /* in both primary and secondary int ret regs */
+      RLPri_V128SpRel, /* 128-bit value, on the stack */
+      RLPri_V256SpRel  /* 256-bit value, on the stack */
+   }
+   RetLocPrimary;
+
+typedef
+   struct {
+      /* Primary description */
+      RetLocPrimary pri;
+      /* For .pri == RLPri_V128SpRel or RLPri_V256SpRel only, gives
+         the offset of the lowest addressed byte of the value,
+         relative to the stack pointer.  For all other .how values,
+         has no meaning and should be zero. */
+      Int spOff;
+   }
+   RetLoc;
+
+extern void ppRetLoc ( RetLoc rloc );
+
+static inline RetLoc mk_RetLoc_simple ( RetLocPrimary pri ) {
+   vassert(pri >= RLPri_INVALID && pri <= RLPri_2Int);
+   return (RetLoc){pri, 0};
+}
+
+static inline RetLoc mk_RetLoc_spRel ( RetLocPrimary pri, Int off ) {
+   vassert(pri >= RLPri_V128SpRel && pri <= RLPri_V256SpRel);
+   return (RetLoc){pri, off};
+}
+
+static inline Bool is_sane_RetLoc ( RetLoc rloc ) {
+   switch (rloc.pri) {
+      case RLPri_None: case RLPri_Int: case RLPri_2Int:
+         return rloc.spOff == 0;
+      case RLPri_V128SpRel: case RLPri_V256SpRel:
+         return True;
+      default:
+         return False;
+   }
+}
+
+static inline RetLoc mk_RetLoc_INVALID ( void ) {
+   return (RetLoc){RLPri_INVALID, 0};
+}
+
+static inline Bool is_RetLoc_INVALID ( RetLoc rl ) {
+   return rl.pri == RLPri_INVALID && rl.spOff == 0;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Reg alloc: TODO: move somewhere else              ---*/
+/*---------------------------------------------------------*/
+
+extern
+HInstrArray* doRegisterAllocation (
+
+   /* Incoming virtual-registerised code. */ 
+   HInstrArray* instrs_in,
+
+   /* The real-register universe to use.  This contains facts about
+      real registers, one of which is the set of registers available
+      for allocation. */
+   const RRegUniverse* univ,
+
+   /* Return True iff the given insn is a reg-reg move, in which
+      case also return the src and dst regs. */
+   Bool (*isMove) (const HInstr*, HReg*, HReg*),
+
+   /* Get info about register usage in this insn. */
+   void (*getRegUsage) (HRegUsage*, const HInstr*, Bool),
+
+   /* Apply a reg-reg mapping to an insn. */
+   void (*mapRegs) (HRegRemap*, HInstr*, Bool),
+
+   /* Return insn(s) to spill/restore a real reg to a spill slot
+      offset.  And optionally a function to do direct reloads. */
+   void    (*genSpill) (  HInstr**, HInstr**, HReg, Int, Bool ),
+   void    (*genReload) ( HInstr**, HInstr**, HReg, Int, Bool ),
+   HInstr* (*directReload) ( HInstr*, HReg, Short ),
+   Int     guest_sizeB,
+
+   /* For debug printing only. */
+   void (*ppInstr) ( const HInstr*, Bool ),
+   void (*ppReg) ( HReg ),
+
+   /* 32/64bit mode */
+   Bool mode64
+);
+
+
+#endif /* ndef __VEX_HOST_GENERIC_REGS_H */
+
+/*---------------------------------------------------------------*/
+/*---                                     host_generic_regs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_simd128.c b/VEX/priv/host_generic_simd128.c
new file mode 100644
index 0000000..22df708
--- /dev/null
+++ b/VEX/priv/host_generic_simd128.c
@@ -0,0 +1,396 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                            host_generic_simd128.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 OpenWorks GbR
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Generic helper functions for doing 128-bit SIMD arithmetic in cases
+   where the instruction selectors cannot generate code in-line.
+   These are purely back-end entities and cannot be seen/referenced
+   from IR. */
+
+#include "libvex_basictypes.h"
+#include "host_generic_simd128.h"
+
+
+/* Primitive helpers always take args of the real type (signed vs
+   unsigned) but return an unsigned result, so there's no conversion
+   weirdness when stuffing results back in the V128 union fields,
+   which are all unsigned. */
+
+static inline UInt mul32 ( Int xx, Int yy )
+{
+   Long t = ((Long)xx) * ((Long)yy);
+   return toUInt(t);
+}
+
+static inline UInt max32S ( Int xx, Int yy )
+{
+   return toUInt((xx > yy) ? xx : yy);
+}
+
+static inline UInt min32S ( Int xx, Int yy )
+{
+   return toUInt((xx < yy) ? xx : yy);
+}
+
+static inline UInt max32U ( UInt xx, UInt yy )
+{
+   return toUInt((xx > yy) ? xx : yy);
+}
+
+static inline UInt min32U ( UInt xx, UInt yy )
+{
+   return toUInt((xx < yy) ? xx : yy);
+}
+
+static inline UShort max16U ( UShort xx, UShort yy )
+{
+   return toUShort((xx > yy) ? xx : yy);
+}
+
+static inline UShort min16U ( UShort xx, UShort yy )
+{
+   return toUShort((xx < yy) ? xx : yy);
+}
+
+static inline UChar max8S ( Char xx, Char yy )
+{
+   return toUChar((xx > yy) ? xx : yy);
+}
+
+static inline UChar min8S ( Char xx, Char yy )
+{
+   return toUChar((xx < yy) ? xx : yy);
+}
+
+static inline ULong cmpEQ64 ( Long xx, Long yy )
+{
+   return (((Long)xx) == ((Long)yy))
+             ? 0xFFFFFFFFFFFFFFFFULL : 0ULL;
+}
+
+static inline ULong cmpGT64S ( Long xx, Long yy )
+{
+   return (((Long)xx) > ((Long)yy))
+             ? 0xFFFFFFFFFFFFFFFFULL : 0ULL;
+}
+
+static inline ULong sar64 ( ULong v, UInt n )
+{
+   return ((Long)v) >> n;
+}
+
+static inline UChar sar8 ( UChar v, UInt n )
+{
+   return toUChar(((Char)v) >> n);
+}
+
+static inline UShort qnarrow32Sto16U ( UInt xx0 )
+{
+   Int xx = (Int)xx0;
+   if (xx < 0)     xx = 0;
+   if (xx > 65535) xx = 65535;
+   return (UShort)xx;
+}
+
+static inline UShort narrow32to16 ( UInt xx )
+{
+   return (UShort)xx;
+}
+
+static inline UChar narrow16to8 ( UShort xx )
+{
+   return (UChar)xx;
+}
+
+
+void VEX_REGPARM(3)
+     h_generic_calc_Mul32x4 ( /*OUT*/V128* res,
+                              V128* argL, V128* argR )
+{
+   res->w32[0] = mul32(argL->w32[0], argR->w32[0]);
+   res->w32[1] = mul32(argL->w32[1], argR->w32[1]);
+   res->w32[2] = mul32(argL->w32[2], argR->w32[2]);
+   res->w32[3] = mul32(argL->w32[3], argR->w32[3]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Max32Sx4 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w32[0] = max32S(argL->w32[0], argR->w32[0]);
+   res->w32[1] = max32S(argL->w32[1], argR->w32[1]);
+   res->w32[2] = max32S(argL->w32[2], argR->w32[2]);
+   res->w32[3] = max32S(argL->w32[3], argR->w32[3]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Min32Sx4 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w32[0] = min32S(argL->w32[0], argR->w32[0]);
+   res->w32[1] = min32S(argL->w32[1], argR->w32[1]);
+   res->w32[2] = min32S(argL->w32[2], argR->w32[2]);
+   res->w32[3] = min32S(argL->w32[3], argR->w32[3]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Max32Ux4 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w32[0] = max32U(argL->w32[0], argR->w32[0]);
+   res->w32[1] = max32U(argL->w32[1], argR->w32[1]);
+   res->w32[2] = max32U(argL->w32[2], argR->w32[2]);
+   res->w32[3] = max32U(argL->w32[3], argR->w32[3]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Min32Ux4 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w32[0] = min32U(argL->w32[0], argR->w32[0]);
+   res->w32[1] = min32U(argL->w32[1], argR->w32[1]);
+   res->w32[2] = min32U(argL->w32[2], argR->w32[2]);
+   res->w32[3] = min32U(argL->w32[3], argR->w32[3]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Max16Ux8 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w16[0] = max16U(argL->w16[0], argR->w16[0]);
+   res->w16[1] = max16U(argL->w16[1], argR->w16[1]);
+   res->w16[2] = max16U(argL->w16[2], argR->w16[2]);
+   res->w16[3] = max16U(argL->w16[3], argR->w16[3]);
+   res->w16[4] = max16U(argL->w16[4], argR->w16[4]);
+   res->w16[5] = max16U(argL->w16[5], argR->w16[5]);
+   res->w16[6] = max16U(argL->w16[6], argR->w16[6]);
+   res->w16[7] = max16U(argL->w16[7], argR->w16[7]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Min16Ux8 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w16[0] = min16U(argL->w16[0], argR->w16[0]);
+   res->w16[1] = min16U(argL->w16[1], argR->w16[1]);
+   res->w16[2] = min16U(argL->w16[2], argR->w16[2]);
+   res->w16[3] = min16U(argL->w16[3], argR->w16[3]);
+   res->w16[4] = min16U(argL->w16[4], argR->w16[4]);
+   res->w16[5] = min16U(argL->w16[5], argR->w16[5]);
+   res->w16[6] = min16U(argL->w16[6], argR->w16[6]);
+   res->w16[7] = min16U(argL->w16[7], argR->w16[7]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Max8Sx16 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w8[ 0] = max8S(argL->w8[ 0], argR->w8[ 0]);
+   res->w8[ 1] = max8S(argL->w8[ 1], argR->w8[ 1]);
+   res->w8[ 2] = max8S(argL->w8[ 2], argR->w8[ 2]);
+   res->w8[ 3] = max8S(argL->w8[ 3], argR->w8[ 3]);
+   res->w8[ 4] = max8S(argL->w8[ 4], argR->w8[ 4]);
+   res->w8[ 5] = max8S(argL->w8[ 5], argR->w8[ 5]);
+   res->w8[ 6] = max8S(argL->w8[ 6], argR->w8[ 6]);
+   res->w8[ 7] = max8S(argL->w8[ 7], argR->w8[ 7]);
+   res->w8[ 8] = max8S(argL->w8[ 8], argR->w8[ 8]);
+   res->w8[ 9] = max8S(argL->w8[ 9], argR->w8[ 9]);
+   res->w8[10] = max8S(argL->w8[10], argR->w8[10]);
+   res->w8[11] = max8S(argL->w8[11], argR->w8[11]);
+   res->w8[12] = max8S(argL->w8[12], argR->w8[12]);
+   res->w8[13] = max8S(argL->w8[13], argR->w8[13]);
+   res->w8[14] = max8S(argL->w8[14], argR->w8[14]);
+   res->w8[15] = max8S(argL->w8[15], argR->w8[15]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Min8Sx16 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w8[ 0] = min8S(argL->w8[ 0], argR->w8[ 0]);
+   res->w8[ 1] = min8S(argL->w8[ 1], argR->w8[ 1]);
+   res->w8[ 2] = min8S(argL->w8[ 2], argR->w8[ 2]);
+   res->w8[ 3] = min8S(argL->w8[ 3], argR->w8[ 3]);
+   res->w8[ 4] = min8S(argL->w8[ 4], argR->w8[ 4]);
+   res->w8[ 5] = min8S(argL->w8[ 5], argR->w8[ 5]);
+   res->w8[ 6] = min8S(argL->w8[ 6], argR->w8[ 6]);
+   res->w8[ 7] = min8S(argL->w8[ 7], argR->w8[ 7]);
+   res->w8[ 8] = min8S(argL->w8[ 8], argR->w8[ 8]);
+   res->w8[ 9] = min8S(argL->w8[ 9], argR->w8[ 9]);
+   res->w8[10] = min8S(argL->w8[10], argR->w8[10]);
+   res->w8[11] = min8S(argL->w8[11], argR->w8[11]);
+   res->w8[12] = min8S(argL->w8[12], argR->w8[12]);
+   res->w8[13] = min8S(argL->w8[13], argR->w8[13]);
+   res->w8[14] = min8S(argL->w8[14], argR->w8[14]);
+   res->w8[15] = min8S(argL->w8[15], argR->w8[15]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_CmpEQ64x2 ( /*OUT*/V128* res,
+                                V128* argL, V128* argR )
+{
+   res->w64[0] = cmpEQ64(argL->w64[0], argR->w64[0]);
+   res->w64[1] = cmpEQ64(argL->w64[1], argR->w64[1]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_CmpGT64Sx2 ( /*OUT*/V128* res,
+                                 V128* argL, V128* argR )
+{
+   res->w64[0] = cmpGT64S(argL->w64[0], argR->w64[0]);
+   res->w64[1] = cmpGT64S(argL->w64[1], argR->w64[1]);
+}
+
+/* ------------ Shifting ------------ */
+/* Note that because these primops are undefined if the shift amount
+   equals or exceeds the lane width, the shift amount is masked so
+   that the scalar shifts are always in range.  In fact, given the
+   semantics of these primops (Sar64x2, etc) it is an error if in
+   fact we are ever given an out-of-range shift amount. 
+*/
+void /*not-regparm*/
+     h_generic_calc_SarN64x2 ( /*OUT*/V128* res,
+                               V128* argL, UInt nn)
+{
+   /* vassert(nn < 64); */
+   nn &= 63;
+   res->w64[0] = sar64(argL->w64[0], nn);
+   res->w64[1] = sar64(argL->w64[1], nn);
+}
+
+void /*not-regparm*/
+     h_generic_calc_SarN8x16 ( /*OUT*/V128* res,
+                              V128* argL, UInt nn)
+{
+   /* vassert(nn < 8); */
+   nn &= 7;
+   res->w8[ 0] = sar8(argL->w8[ 0], nn);
+   res->w8[ 1] = sar8(argL->w8[ 1], nn);
+   res->w8[ 2] = sar8(argL->w8[ 2], nn);
+   res->w8[ 3] = sar8(argL->w8[ 3], nn);
+   res->w8[ 4] = sar8(argL->w8[ 4], nn);
+   res->w8[ 5] = sar8(argL->w8[ 5], nn);
+   res->w8[ 6] = sar8(argL->w8[ 6], nn);
+   res->w8[ 7] = sar8(argL->w8[ 7], nn);
+   res->w8[ 8] = sar8(argL->w8[ 8], nn);
+   res->w8[ 9] = sar8(argL->w8[ 9], nn);
+   res->w8[10] = sar8(argL->w8[10], nn);
+   res->w8[11] = sar8(argL->w8[11], nn);
+   res->w8[12] = sar8(argL->w8[12], nn);
+   res->w8[13] = sar8(argL->w8[13], nn);
+   res->w8[14] = sar8(argL->w8[14], nn);
+   res->w8[15] = sar8(argL->w8[15], nn);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_QNarrowBin32Sto16Ux8 ( /*OUT*/V128* res,
+                                           V128* argL, V128* argR )
+{
+   res->w16[0] = qnarrow32Sto16U(argR->w32[0]);
+   res->w16[1] = qnarrow32Sto16U(argR->w32[1]);
+   res->w16[2] = qnarrow32Sto16U(argR->w32[2]);
+   res->w16[3] = qnarrow32Sto16U(argR->w32[3]);
+   res->w16[4] = qnarrow32Sto16U(argL->w32[0]);
+   res->w16[5] = qnarrow32Sto16U(argL->w32[1]);
+   res->w16[6] = qnarrow32Sto16U(argL->w32[2]);
+   res->w16[7] = qnarrow32Sto16U(argL->w32[3]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_NarrowBin16to8x16 ( /*OUT*/V128* res,
+                                        V128* argL, V128* argR )
+{
+   res->w8[ 0] = narrow16to8(argR->w16[0]);
+   res->w8[ 1] = narrow16to8(argR->w16[1]);
+   res->w8[ 2] = narrow16to8(argR->w16[2]);
+   res->w8[ 3] = narrow16to8(argR->w16[3]);
+   res->w8[ 4] = narrow16to8(argR->w16[4]);
+   res->w8[ 5] = narrow16to8(argR->w16[5]);
+   res->w8[ 6] = narrow16to8(argR->w16[6]);
+   res->w8[ 7] = narrow16to8(argR->w16[7]);
+   res->w8[ 8] = narrow16to8(argL->w16[0]);
+   res->w8[ 9] = narrow16to8(argL->w16[1]);
+   res->w8[10] = narrow16to8(argL->w16[2]);
+   res->w8[11] = narrow16to8(argL->w16[3]);
+   res->w8[12] = narrow16to8(argL->w16[4]);
+   res->w8[13] = narrow16to8(argL->w16[5]);
+   res->w8[14] = narrow16to8(argL->w16[6]);
+   res->w8[15] = narrow16to8(argL->w16[7]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_NarrowBin32to16x8 ( /*OUT*/V128* res,
+                                        V128* argL, V128* argR )
+{
+   res->w16[0] = narrow32to16(argR->w32[0]);
+   res->w16[1] = narrow32to16(argR->w32[1]);
+   res->w16[2] = narrow32to16(argR->w32[2]);
+   res->w16[3] = narrow32to16(argR->w32[3]);
+   res->w16[4] = narrow32to16(argL->w32[0]);
+   res->w16[5] = narrow32to16(argL->w32[1]);
+   res->w16[6] = narrow32to16(argL->w32[2]);
+   res->w16[7] = narrow32to16(argL->w32[3]);
+}
+
+void VEX_REGPARM(3)
+     h_generic_calc_Perm32x4 ( /*OUT*/V128* res,
+                               V128* argL, V128* argR )
+{
+   res->w32[0] = argL->w32[ argR->w32[0] & 3 ];
+   res->w32[1] = argL->w32[ argR->w32[1] & 3 ];
+   res->w32[2] = argL->w32[ argR->w32[2] & 3 ];
+   res->w32[3] = argL->w32[ argR->w32[3] & 3 ];
+}
+
+UInt /*not-regparm*/
+     h_generic_calc_GetMSBs8x16 ( ULong w64hi, ULong w64lo )
+{
+   UInt r = 0;
+   if (w64hi & (1ULL << (64-1))) r |= (1<<15);
+   if (w64hi & (1ULL << (56-1))) r |= (1<<14);
+   if (w64hi & (1ULL << (48-1))) r |= (1<<13);
+   if (w64hi & (1ULL << (40-1))) r |= (1<<12);
+   if (w64hi & (1ULL << (32-1))) r |= (1<<11);
+   if (w64hi & (1ULL << (24-1))) r |= (1<<10);
+   if (w64hi & (1ULL << (16-1))) r |= (1<<9);
+   if (w64hi & (1ULL << ( 8-1))) r |= (1<<8);
+   if (w64lo & (1ULL << (64-1))) r |= (1<<7);
+   if (w64lo & (1ULL << (56-1))) r |= (1<<6);
+   if (w64lo & (1ULL << (48-1))) r |= (1<<5);
+   if (w64lo & (1ULL << (40-1))) r |= (1<<4);
+   if (w64lo & (1ULL << (32-1))) r |= (1<<3);
+   if (w64lo & (1ULL << (24-1))) r |= (1<<2);
+   if (w64lo & (1ULL << (16-1))) r |= (1<<1);
+   if (w64lo & (1ULL << ( 8-1))) r |= (1<<0);
+   return r;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                              host_generic_simd128.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_simd128.h b/VEX/priv/host_generic_simd128.h
new file mode 100644
index 0000000..c30c0df
--- /dev/null
+++ b/VEX/priv/host_generic_simd128.h
@@ -0,0 +1,96 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             host_generic_simd128.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 OpenWorks GbR
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Generic helper functions for doing 128-bit SIMD arithmetic in cases
+   where the instruction selectors cannot generate code in-line.
+   These are purely back-end entities and cannot be seen/referenced
+   as clean helper functions from IR.
+
+   These will get called from generated code and therefore should be
+   well behaved -- no floating point or mmx insns, just straight
+   integer code.
+
+   Each function implements the correspondingly-named IR primop.
+*/
+
+#ifndef __VEX_HOST_GENERIC_SIMD128_H
+#define __VEX_HOST_GENERIC_SIMD128_H
+
+#include "libvex_basictypes.h"
+
+extern VEX_REGPARM(3)
+       void h_generic_calc_Mul32x4    ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Max32Sx4   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Min32Sx4   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Max32Ux4   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Min32Ux4   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Max16Ux8   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Min16Ux8   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Max8Sx16   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_Min8Sx16   ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_CmpEQ64x2  ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_CmpGT64Sx2 ( /*OUT*/V128*, V128*, V128* );
+
+extern /*not-regparm*/
+       void h_generic_calc_SarN64x2   ( /*OUT*/V128*, V128*, UInt );
+extern /*not-regparm*/
+       void h_generic_calc_SarN8x16   ( /*OUT*/V128*, V128*, UInt );
+
+extern VEX_REGPARM(3)
+       void h_generic_calc_QNarrowBin32Sto16Ux8
+                                      ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_NarrowBin16to8x16
+                                      ( /*OUT*/V128*, V128*, V128* );
+extern VEX_REGPARM(3)
+       void h_generic_calc_NarrowBin32to16x8
+                                      ( /*OUT*/V128*, V128*, V128* );
+
+extern VEX_REGPARM(3)
+       void h_generic_calc_Perm32x4   ( /*OUT*/V128*, V128*, V128* );
+
+extern /*not-regparm*/
+       UInt  h_generic_calc_GetMSBs8x16 ( ULong w64hi, ULong w64lo );
+
+#endif /* ndef __VEX_HOST_GENERIC_SIMD128_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                              host_generic_simd128.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_simd256.c b/VEX/priv/host_generic_simd256.c
new file mode 100644
index 0000000..c5dd7b6
--- /dev/null
+++ b/VEX/priv/host_generic_simd256.c
@@ -0,0 +1,57 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                            host_generic_simd256.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2012-2013 OpenWorks GbR
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Generic helper functions for doing 256-bit SIMD arithmetic in cases
+   where the instruction selectors cannot generate code in-line.
+   These are purely back-end entities and cannot be seen/referenced
+   from IR. */
+
+#include "libvex_basictypes.h"
+#include "host_generic_simd256.h"
+
+
+void VEX_REGPARM(3)
+     h_generic_calc_Perm32x8 ( /*OUT*/V256* res,
+                               V256* argL, V256* argR )
+{
+   res->w32[0] = argL->w32[ argR->w32[0] & 7 ];
+   res->w32[1] = argL->w32[ argR->w32[1] & 7 ];
+   res->w32[2] = argL->w32[ argR->w32[2] & 7 ];
+   res->w32[3] = argL->w32[ argR->w32[3] & 7 ];
+   res->w32[4] = argL->w32[ argR->w32[4] & 7 ];
+   res->w32[5] = argL->w32[ argR->w32[5] & 7 ];
+   res->w32[6] = argL->w32[ argR->w32[6] & 7 ];
+   res->w32[7] = argL->w32[ argR->w32[7] & 7 ];
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                              host_generic_simd256.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_simd256.h b/VEX/priv/host_generic_simd256.h
new file mode 100644
index 0000000..2913435
--- /dev/null
+++ b/VEX/priv/host_generic_simd256.h
@@ -0,0 +1,55 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             host_generic_simd256.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2012-2013 OpenWorks GbR
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Generic helper functions for doing 256-bit SIMD arithmetic in cases
+   where the instruction selectors cannot generate code in-line.
+   These are purely back-end entities and cannot be seen/referenced
+   as clean helper functions from IR.
+
+   These will get called from generated code and therefore should be
+   well behaved -- no floating point or mmx insns, just straight
+   integer code.
+
+   Each function implements the correspondingly-named IR primop.
+*/
+
+#ifndef __VEX_HOST_GENERIC_SIMD256_H
+#define __VEX_HOST_GENERIC_SIMD256_H
+
+#include "libvex_basictypes.h"
+
+extern VEX_REGPARM(3)
+       void h_generic_calc_Perm32x8   ( /*OUT*/V256*, V256*, V256* );
+
+#endif /* ndef __VEX_HOST_GENERIC_SIMD256_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                              host_generic_simd256.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_simd64.c b/VEX/priv/host_generic_simd64.c
new file mode 100644
index 0000000..367491f
--- /dev/null
+++ b/VEX/priv/host_generic_simd64.c
@@ -0,0 +1,1612 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             host_generic_simd64.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Generic helper functions for doing 64-bit SIMD arithmetic in cases
+   where the instruction selectors cannot generate code in-line.
+   These are purely back-end entities and cannot be seen/referenced
+   from IR.  There are also helpers for 32-bit arithmetic in here. */
+
+#include "libvex_basictypes.h"
+#include "main_util.h"              // LIKELY, UNLIKELY
+#include "host_generic_simd64.h"
+
+
+
+/* Tuple/select functions for 32x2 vectors. */
+
+static inline ULong mk32x2 ( UInt w1, UInt w0 ) {
+   return (((ULong)w1) << 32) | ((ULong)w0);
+}
+
+static inline UInt sel32x2_1 ( ULong w64 ) {
+   return 0xFFFFFFFF & toUInt(w64 >> 32);
+}
+static inline UInt sel32x2_0 ( ULong w64 ) {
+   return 0xFFFFFFFF & toUInt(w64);
+}
+
+
+/* Tuple/select functions for 16x4 vectors.  gcc is pretty hopeless
+   with 64-bit shifts so we give it a hand. */
+
+static inline ULong mk16x4 ( UShort w3, UShort w2, 
+                             UShort w1, UShort w0 ) {
+   UInt hi32 = (((UInt)w3) << 16) | ((UInt)w2);
+   UInt lo32 = (((UInt)w1) << 16) | ((UInt)w0);
+   return mk32x2(hi32, lo32);
+}
+
+static inline UShort sel16x4_3 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUShort(0xFFFF & (hi32 >> 16));
+}
+static inline UShort sel16x4_2 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUShort(0xFFFF & hi32);
+}
+static inline UShort sel16x4_1 ( ULong w64 ) {
+   UInt lo32 = (UInt)w64;
+   return toUShort(0xFFFF & (lo32 >> 16));
+}
+static inline UShort sel16x4_0 ( ULong w64 ) {
+   UInt lo32 = (UInt)w64;
+   return toUShort(0xFFFF & lo32);
+}
+
+
+/* Tuple/select functions for 8x8 vectors. */
+
+static inline ULong mk8x8 ( UChar w7, UChar w6,
+                            UChar w5, UChar w4,
+                            UChar w3, UChar w2,
+                            UChar w1, UChar w0 ) {
+   UInt hi32 =   (((UInt)w7) << 24) | (((UInt)w6) << 16)
+               | (((UInt)w5) << 8)  | (((UInt)w4) << 0);
+   UInt lo32 =   (((UInt)w3) << 24) | (((UInt)w2) << 16)
+               | (((UInt)w1) << 8)  | (((UInt)w0) << 0);
+   return mk32x2(hi32, lo32);
+}
+
+static inline UChar sel8x8_7 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(0xFF & (hi32 >> 24));
+}
+static inline UChar sel8x8_6 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(0xFF & (hi32 >> 16));
+}
+static inline UChar sel8x8_5 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(0xFF & (hi32 >> 8));
+}
+static inline UChar sel8x8_4 ( ULong w64 ) {
+   UInt hi32 = toUInt(w64 >> 32);
+   return toUChar(0xFF & (hi32 >> 0));
+}
+static inline UChar sel8x8_3 ( ULong w64 ) {
+   UInt lo32 = (UInt)w64;
+   return toUChar(0xFF & (lo32 >> 24));
+}
+static inline UChar sel8x8_2 ( ULong w64 ) {
+   UInt lo32 = (UInt)w64;
+   return toUChar(0xFF & (lo32 >> 16));
+}
+static inline UChar sel8x8_1 ( ULong w64 ) {
+   UInt lo32 = (UInt)w64;
+   return toUChar(0xFF & (lo32 >> 8));
+}
+static inline UChar sel8x8_0 ( ULong w64 ) {
+   UInt lo32 = (UInt)w64;
+   return toUChar(0xFF & (lo32 >> 0));
+}
+
+static inline UChar index8x8 ( ULong w64, UChar ix ) {
+   ix &= 7;
+   return toUChar((w64 >> (8*ix)) & 0xFF);
+}
+
+
+/* Scalar helpers. */
+
+static inline Int qadd32S ( Int xx, Int yy ) 
+{
+   Long t = ((Long)xx) + ((Long)yy);
+   const Long loLim = -0x80000000LL;
+   const Long hiLim =  0x7FFFFFFFLL;
+   if (t < loLim) t = loLim;
+   if (t > hiLim) t = hiLim;
+   return (Int)t;
+}
+
+static inline Short qadd16S ( Short xx, Short yy ) 
+{
+   Int t = ((Int)xx) + ((Int)yy);
+   if (t < -32768) t = -32768;
+   if (t > 32767)  t = 32767;
+   return (Short)t;
+}
+
+static inline Char qadd8S ( Char xx, Char yy )
+{
+   Int t = ((Int)xx) + ((Int)yy);
+   if (t < -128) t = -128;
+   if (t > 127)  t = 127;
+   return (Char)t;
+}
+
+static inline UShort qadd16U ( UShort xx, UShort yy )
+{
+   UInt t = ((UInt)xx) + ((UInt)yy);
+   if (t > 0xFFFF) t = 0xFFFF;
+   return (UShort)t;
+}
+
+static inline UChar qadd8U ( UChar xx, UChar yy )
+{
+   UInt t = ((UInt)xx) + ((UInt)yy);
+   if (t > 0xFF) t = 0xFF;
+   return (UChar)t;
+}
+
+static inline Int qsub32S ( Int xx, Int yy ) 
+{
+   Long t = ((Long)xx) - ((Long)yy);
+   const Long loLim = -0x80000000LL;
+   const Long hiLim =  0x7FFFFFFFLL;
+   if (t < loLim) t = loLim;
+   if (t > hiLim) t = hiLim;
+   return (Int)t;
+}
+
+static inline Short qsub16S ( Short xx, Short yy )
+{
+   Int t = ((Int)xx) - ((Int)yy);
+   if (t < -32768) t = -32768;
+   if (t > 32767)  t = 32767;
+   return (Short)t;
+}
+
+static inline Char qsub8S ( Char xx, Char yy )
+{
+   Int t = ((Int)xx) - ((Int)yy);
+   if (t < -128) t = -128;
+   if (t > 127)  t = 127;
+   return (Char)t;
+}
+
+static inline UShort qsub16U ( UShort xx, UShort yy )
+{
+   Int t = ((Int)xx) - ((Int)yy);
+   if (t < 0)      t = 0;
+   if (t > 0xFFFF) t = 0xFFFF;
+   return (UShort)t;
+}
+
+static inline UChar qsub8U ( UChar xx, UChar yy )
+{
+   Int t = ((Int)xx) - ((Int)yy);
+   if (t < 0)    t = 0;
+   if (t > 0xFF) t = 0xFF;
+   return (UChar)t;
+}
+
+static inline Short mul16 ( Short xx, Short yy )
+{
+   Int t = ((Int)xx) * ((Int)yy);
+   return (Short)t;
+}
+
+static inline Int mul32 ( Int xx, Int yy )
+{
+   Int t = ((Int)xx) * ((Int)yy);
+   return (Int)t;
+}
+
+static inline Short mulhi16S ( Short xx, Short yy )
+{
+   Int t = ((Int)xx) * ((Int)yy);
+   t >>=/*s*/ 16;
+   return (Short)t;
+}
+
+static inline UShort mulhi16U ( UShort xx, UShort yy )
+{
+   UInt t = ((UInt)xx) * ((UInt)yy);
+   t >>=/*u*/ 16;
+   return (UShort)t;
+}
+
+static inline UInt cmpeq32 ( UInt xx, UInt yy )
+{
+   return xx==yy ? 0xFFFFFFFF : 0;
+}
+
+static inline UShort cmpeq16 ( UShort xx, UShort yy )
+{
+   return toUShort(xx==yy ? 0xFFFF : 0);
+}
+
+static inline UChar cmpeq8 ( UChar xx, UChar yy )
+{
+   return toUChar(xx==yy ? 0xFF : 0);
+}
+
+static inline UInt cmpgt32S ( Int xx, Int yy )
+{
+   return xx>yy ? 0xFFFFFFFF : 0;
+}
+
+static inline UShort cmpgt16S ( Short xx, Short yy )
+{
+   return toUShort(xx>yy ? 0xFFFF : 0);
+}
+
+static inline UChar cmpgt8S ( Char xx, Char yy )
+{
+   return toUChar(xx>yy ? 0xFF : 0);
+}
+
+static inline UInt cmpnez32 ( UInt xx )
+{
+   return xx==0 ? 0 : 0xFFFFFFFF;
+}
+
+static inline UShort cmpnez16 ( UShort xx )
+{
+   return toUShort(xx==0 ? 0 : 0xFFFF);
+}
+
+static inline UChar cmpnez8 ( UChar xx )
+{
+   return toUChar(xx==0 ? 0 : 0xFF);
+}
+
+static inline Short qnarrow32Sto16S ( UInt xx0 )
+{
+   Int xx = (Int)xx0;
+   if (xx < -32768) xx = -32768;
+   if (xx > 32767)  xx = 32767;
+   return (Short)xx;
+}
+
+static inline Char qnarrow16Sto8S ( UShort xx0 )
+{
+   Short xx = (Short)xx0;
+   if (xx < -128) xx = -128;
+   if (xx > 127)  xx = 127;
+   return (Char)xx;
+}
+
+static inline UChar qnarrow16Sto8U ( UShort xx0 )
+{
+   Short xx = (Short)xx0;
+   if (xx < 0)   xx = 0;
+   if (xx > 255) xx = 255;
+   return (UChar)xx;
+}
+
+static inline UShort narrow32to16 ( UInt xx )
+{
+   return (UShort)xx;
+}
+
+static inline UChar narrow16to8 ( UShort xx )
+{
+   return (UChar)xx;
+}
+
+/* shifts: we don't care about out-of-range ones, since
+   that is dealt with at a higher level. */
+
+static inline UChar shl8 ( UChar v, UInt n )
+{
+   return toUChar(v << n);
+}
+
+static inline UChar sar8 ( UChar v, UInt n )
+{
+   return toUChar(((Char)v) >> n);
+}
+
+static inline UShort shl16 ( UShort v, UInt n )
+{
+   return toUShort(v << n);
+}
+
+static inline UShort shr16 ( UShort v, UInt n )
+{
+   return toUShort((((UShort)v) >> n));
+}
+
+static inline UShort sar16 ( UShort v, UInt n )
+{
+   return toUShort(((Short)v) >> n);
+}
+
+static inline UInt shl32 ( UInt v, UInt n )
+{
+   return v << n;
+}
+
+static inline UInt shr32 ( UInt v, UInt n )
+{
+   return (((UInt)v) >> n);
+}
+
+static inline UInt sar32 ( UInt v, UInt n )
+{
+   return ((Int)v) >> n;
+}
+
+static inline UChar avg8U ( UChar xx, UChar yy )
+{
+   UInt xxi = (UInt)xx;
+   UInt yyi = (UInt)yy;
+   UInt r   = (xxi + yyi + 1) >> 1;
+   return (UChar)r;
+}
+
+static inline UShort avg16U ( UShort xx, UShort yy )
+{
+   UInt xxi = (UInt)xx;
+   UInt yyi = (UInt)yy;
+   UInt r   = (xxi + yyi + 1) >> 1;
+   return (UShort)r;
+}
+
+static inline Short max16S ( Short xx, Short yy )
+{
+   return toUShort((xx > yy) ? xx : yy);
+}
+
+static inline UChar max8U ( UChar xx, UChar yy )
+{
+   return toUChar((xx > yy) ? xx : yy);
+}
+
+static inline Short min16S ( Short xx, Short yy )
+{
+   return toUShort((xx < yy) ? xx : yy);
+}
+
+static inline UChar min8U ( UChar xx, UChar yy )
+{
+   return toUChar((xx < yy) ? xx : yy);
+}
+
+static inline UShort hadd16U ( UShort xx, UShort yy )
+{
+   UInt xxi = (UInt)xx;
+   UInt yyi = (UInt)yy;
+   UInt r   = (xxi + yyi) >> 1;
+   return (UShort)r;
+}
+
+static inline Short hadd16S ( Short xx, Short yy )
+{
+   Int xxi = (Int)xx;
+   Int yyi = (Int)yy;
+   Int r   = (xxi + yyi) >> 1;
+   return (Short)r;
+}
+
+static inline UShort hsub16U ( UShort xx, UShort yy )
+{
+   UInt xxi = (UInt)xx;
+   UInt yyi = (UInt)yy;
+   UInt r   = (xxi - yyi) >> 1;
+   return (UShort)r;
+}
+
+static inline Short hsub16S ( Short xx, Short yy )
+{
+   Int xxi = (Int)xx;
+   Int yyi = (Int)yy;
+   Int r   = (xxi - yyi) >> 1;
+   return (Short)r;
+}
+
+static inline UChar hadd8U ( UChar xx, UChar yy )
+{
+   UInt xxi = (UInt)xx;
+   UInt yyi = (UInt)yy;
+   UInt r   = (xxi + yyi) >> 1;
+   return (UChar)r;
+}
+
+static inline Char hadd8S ( Char xx, Char yy )
+{
+   Int xxi = (Int)xx;
+   Int yyi = (Int)yy;
+   Int r   = (xxi + yyi) >> 1;
+   return (Char)r;
+}
+
+static inline UChar hsub8U ( UChar xx, UChar yy )
+{
+   UInt xxi = (UInt)xx;
+   UInt yyi = (UInt)yy;
+   UInt r   = (xxi - yyi) >> 1;
+   return (UChar)r;
+}
+
+static inline Char hsub8S ( Char xx, Char yy )
+{
+   Int xxi = (Int)xx;
+   Int yyi = (Int)yy;
+   Int r   = (xxi - yyi) >> 1;
+   return (Char)r;
+}
+
+static inline UInt absdiff8U ( UChar xx, UChar yy )
+{
+   UInt xxu = (UChar)xx;
+   UInt yyu = (UChar)yy;
+   return xxu >= yyu  ? xxu - yyu  : yyu - xxu;
+}
+
+/* ----------------------------------------------------- */
+/* Start of the externally visible functions.  These simply
+   implement the corresponding IR primops. */
+/* ----------------------------------------------------- */
+
+/* ------------ Normal addition ------------ */
+
+ULong h_generic_calc_Add32x2 ( ULong xx, ULong yy )
+{
+   return mk32x2(
+             sel32x2_1(xx) + sel32x2_1(yy),
+             sel32x2_0(xx) + sel32x2_0(yy)
+          );
+}
+
+ULong h_generic_calc_Add16x4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             toUShort( sel16x4_3(xx) + sel16x4_3(yy) ),
+             toUShort( sel16x4_2(xx) + sel16x4_2(yy) ),
+             toUShort( sel16x4_1(xx) + sel16x4_1(yy) ),
+             toUShort( sel16x4_0(xx) + sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_Add8x8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             toUChar( sel8x8_7(xx) + sel8x8_7(yy) ),
+             toUChar( sel8x8_6(xx) + sel8x8_6(yy) ),
+             toUChar( sel8x8_5(xx) + sel8x8_5(yy) ),
+             toUChar( sel8x8_4(xx) + sel8x8_4(yy) ),
+             toUChar( sel8x8_3(xx) + sel8x8_3(yy) ),
+             toUChar( sel8x8_2(xx) + sel8x8_2(yy) ),
+             toUChar( sel8x8_1(xx) + sel8x8_1(yy) ),
+             toUChar( sel8x8_0(xx) + sel8x8_0(yy) )
+          );
+}
+
+/* ------------ Saturating addition ------------ */
+
+ULong h_generic_calc_QAdd16Sx4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             qadd16S( sel16x4_3(xx), sel16x4_3(yy) ),
+             qadd16S( sel16x4_2(xx), sel16x4_2(yy) ),
+             qadd16S( sel16x4_1(xx), sel16x4_1(yy) ),
+             qadd16S( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_QAdd8Sx8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             qadd8S( sel8x8_7(xx), sel8x8_7(yy) ),
+             qadd8S( sel8x8_6(xx), sel8x8_6(yy) ),
+             qadd8S( sel8x8_5(xx), sel8x8_5(yy) ),
+             qadd8S( sel8x8_4(xx), sel8x8_4(yy) ),
+             qadd8S( sel8x8_3(xx), sel8x8_3(yy) ),
+             qadd8S( sel8x8_2(xx), sel8x8_2(yy) ),
+             qadd8S( sel8x8_1(xx), sel8x8_1(yy) ),
+             qadd8S( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+ULong h_generic_calc_QAdd16Ux4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             qadd16U( sel16x4_3(xx), sel16x4_3(yy) ),
+             qadd16U( sel16x4_2(xx), sel16x4_2(yy) ),
+             qadd16U( sel16x4_1(xx), sel16x4_1(yy) ),
+             qadd16U( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_QAdd8Ux8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             qadd8U( sel8x8_7(xx), sel8x8_7(yy) ),
+             qadd8U( sel8x8_6(xx), sel8x8_6(yy) ),
+             qadd8U( sel8x8_5(xx), sel8x8_5(yy) ),
+             qadd8U( sel8x8_4(xx), sel8x8_4(yy) ),
+             qadd8U( sel8x8_3(xx), sel8x8_3(yy) ),
+             qadd8U( sel8x8_2(xx), sel8x8_2(yy) ),
+             qadd8U( sel8x8_1(xx), sel8x8_1(yy) ),
+             qadd8U( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+/* ------------ Normal subtraction ------------ */
+
+ULong h_generic_calc_Sub32x2 ( ULong xx, ULong yy )
+{
+   return mk32x2(
+             sel32x2_1(xx) - sel32x2_1(yy),
+             sel32x2_0(xx) - sel32x2_0(yy)
+          );
+}
+
+ULong h_generic_calc_Sub16x4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             toUShort( sel16x4_3(xx) - sel16x4_3(yy) ),
+             toUShort( sel16x4_2(xx) - sel16x4_2(yy) ),
+             toUShort( sel16x4_1(xx) - sel16x4_1(yy) ),
+             toUShort( sel16x4_0(xx) - sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_Sub8x8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             toUChar( sel8x8_7(xx) - sel8x8_7(yy) ),
+             toUChar( sel8x8_6(xx) - sel8x8_6(yy) ),
+             toUChar( sel8x8_5(xx) - sel8x8_5(yy) ),
+             toUChar( sel8x8_4(xx) - sel8x8_4(yy) ),
+             toUChar( sel8x8_3(xx) - sel8x8_3(yy) ),
+             toUChar( sel8x8_2(xx) - sel8x8_2(yy) ),
+             toUChar( sel8x8_1(xx) - sel8x8_1(yy) ),
+             toUChar( sel8x8_0(xx) - sel8x8_0(yy) )
+          );
+}
+
+/* ------------ Saturating subtraction ------------ */
+
+ULong h_generic_calc_QSub16Sx4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             qsub16S( sel16x4_3(xx), sel16x4_3(yy) ),
+             qsub16S( sel16x4_2(xx), sel16x4_2(yy) ),
+             qsub16S( sel16x4_1(xx), sel16x4_1(yy) ),
+             qsub16S( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_QSub8Sx8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             qsub8S( sel8x8_7(xx), sel8x8_7(yy) ),
+             qsub8S( sel8x8_6(xx), sel8x8_6(yy) ),
+             qsub8S( sel8x8_5(xx), sel8x8_5(yy) ),
+             qsub8S( sel8x8_4(xx), sel8x8_4(yy) ),
+             qsub8S( sel8x8_3(xx), sel8x8_3(yy) ),
+             qsub8S( sel8x8_2(xx), sel8x8_2(yy) ),
+             qsub8S( sel8x8_1(xx), sel8x8_1(yy) ),
+             qsub8S( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+ULong h_generic_calc_QSub16Ux4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             qsub16U( sel16x4_3(xx), sel16x4_3(yy) ),
+             qsub16U( sel16x4_2(xx), sel16x4_2(yy) ),
+             qsub16U( sel16x4_1(xx), sel16x4_1(yy) ),
+             qsub16U( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_QSub8Ux8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             qsub8U( sel8x8_7(xx), sel8x8_7(yy) ),
+             qsub8U( sel8x8_6(xx), sel8x8_6(yy) ),
+             qsub8U( sel8x8_5(xx), sel8x8_5(yy) ),
+             qsub8U( sel8x8_4(xx), sel8x8_4(yy) ),
+             qsub8U( sel8x8_3(xx), sel8x8_3(yy) ),
+             qsub8U( sel8x8_2(xx), sel8x8_2(yy) ),
+             qsub8U( sel8x8_1(xx), sel8x8_1(yy) ),
+             qsub8U( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+/* ------------ Multiplication ------------ */
+
+ULong h_generic_calc_Mul16x4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             mul16( sel16x4_3(xx), sel16x4_3(yy) ),
+             mul16( sel16x4_2(xx), sel16x4_2(yy) ),
+             mul16( sel16x4_1(xx), sel16x4_1(yy) ),
+             mul16( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_Mul32x2 ( ULong xx, ULong yy )
+{
+   return mk32x2(
+             mul32( sel32x2_1(xx), sel32x2_1(yy) ),
+             mul32( sel32x2_0(xx), sel32x2_0(yy) )
+          );
+}
+
+ULong h_generic_calc_MulHi16Sx4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             mulhi16S( sel16x4_3(xx), sel16x4_3(yy) ),
+             mulhi16S( sel16x4_2(xx), sel16x4_2(yy) ),
+             mulhi16S( sel16x4_1(xx), sel16x4_1(yy) ),
+             mulhi16S( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_MulHi16Ux4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             mulhi16U( sel16x4_3(xx), sel16x4_3(yy) ),
+             mulhi16U( sel16x4_2(xx), sel16x4_2(yy) ),
+             mulhi16U( sel16x4_1(xx), sel16x4_1(yy) ),
+             mulhi16U( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+/* ------------ Comparison ------------ */
+
+ULong h_generic_calc_CmpEQ32x2 ( ULong xx, ULong yy )
+{
+   return mk32x2(
+             cmpeq32( sel32x2_1(xx), sel32x2_1(yy) ),
+             cmpeq32( sel32x2_0(xx), sel32x2_0(yy) )
+          );
+}
+
+ULong h_generic_calc_CmpEQ16x4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             cmpeq16( sel16x4_3(xx), sel16x4_3(yy) ),
+             cmpeq16( sel16x4_2(xx), sel16x4_2(yy) ),
+             cmpeq16( sel16x4_1(xx), sel16x4_1(yy) ),
+             cmpeq16( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_CmpEQ8x8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             cmpeq8( sel8x8_7(xx), sel8x8_7(yy) ),
+             cmpeq8( sel8x8_6(xx), sel8x8_6(yy) ),
+             cmpeq8( sel8x8_5(xx), sel8x8_5(yy) ),
+             cmpeq8( sel8x8_4(xx), sel8x8_4(yy) ),
+             cmpeq8( sel8x8_3(xx), sel8x8_3(yy) ),
+             cmpeq8( sel8x8_2(xx), sel8x8_2(yy) ),
+             cmpeq8( sel8x8_1(xx), sel8x8_1(yy) ),
+             cmpeq8( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+ULong h_generic_calc_CmpGT32Sx2 ( ULong xx, ULong yy )
+{
+   return mk32x2(
+             cmpgt32S( sel32x2_1(xx), sel32x2_1(yy) ),
+             cmpgt32S( sel32x2_0(xx), sel32x2_0(yy) )
+          );
+}
+
+ULong h_generic_calc_CmpGT16Sx4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             cmpgt16S( sel16x4_3(xx), sel16x4_3(yy) ),
+             cmpgt16S( sel16x4_2(xx), sel16x4_2(yy) ),
+             cmpgt16S( sel16x4_1(xx), sel16x4_1(yy) ),
+             cmpgt16S( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_CmpGT8Sx8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             cmpgt8S( sel8x8_7(xx), sel8x8_7(yy) ),
+             cmpgt8S( sel8x8_6(xx), sel8x8_6(yy) ),
+             cmpgt8S( sel8x8_5(xx), sel8x8_5(yy) ),
+             cmpgt8S( sel8x8_4(xx), sel8x8_4(yy) ),
+             cmpgt8S( sel8x8_3(xx), sel8x8_3(yy) ),
+             cmpgt8S( sel8x8_2(xx), sel8x8_2(yy) ),
+             cmpgt8S( sel8x8_1(xx), sel8x8_1(yy) ),
+             cmpgt8S( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+ULong h_generic_calc_CmpNEZ32x2 ( ULong xx )
+{
+   return mk32x2(
+             cmpnez32( sel32x2_1(xx) ),
+             cmpnez32( sel32x2_0(xx) )
+          );
+}
+
+ULong h_generic_calc_CmpNEZ16x4 ( ULong xx )
+{
+   return mk16x4(
+             cmpnez16( sel16x4_3(xx) ),
+             cmpnez16( sel16x4_2(xx) ),
+             cmpnez16( sel16x4_1(xx) ),
+             cmpnez16( sel16x4_0(xx) )
+          );
+}
+
+ULong h_generic_calc_CmpNEZ8x8 ( ULong xx )
+{
+   return mk8x8(
+             cmpnez8( sel8x8_7(xx) ),
+             cmpnez8( sel8x8_6(xx) ),
+             cmpnez8( sel8x8_5(xx) ),
+             cmpnez8( sel8x8_4(xx) ),
+             cmpnez8( sel8x8_3(xx) ),
+             cmpnez8( sel8x8_2(xx) ),
+             cmpnez8( sel8x8_1(xx) ),
+             cmpnez8( sel8x8_0(xx) )
+          );
+}
+
+/* ------------ Saturating narrowing ------------ */
+
+ULong h_generic_calc_QNarrowBin32Sto16Sx4 ( ULong aa, ULong bb )
+{
+   UInt d = sel32x2_1(aa);
+   UInt c = sel32x2_0(aa);
+   UInt b = sel32x2_1(bb);
+   UInt a = sel32x2_0(bb);
+   return mk16x4( 
+             qnarrow32Sto16S(d),
+             qnarrow32Sto16S(c),
+             qnarrow32Sto16S(b),
+             qnarrow32Sto16S(a)
+          );
+}
+
+ULong h_generic_calc_QNarrowBin16Sto8Sx8 ( ULong aa, ULong bb )
+{
+   UShort h = sel16x4_3(aa);
+   UShort g = sel16x4_2(aa);
+   UShort f = sel16x4_1(aa);
+   UShort e = sel16x4_0(aa);
+   UShort d = sel16x4_3(bb);
+   UShort c = sel16x4_2(bb);
+   UShort b = sel16x4_1(bb);
+   UShort a = sel16x4_0(bb);
+   return mk8x8( 
+             qnarrow16Sto8S(h),
+             qnarrow16Sto8S(g),
+             qnarrow16Sto8S(f),
+             qnarrow16Sto8S(e),
+             qnarrow16Sto8S(d),
+             qnarrow16Sto8S(c),
+             qnarrow16Sto8S(b),
+             qnarrow16Sto8S(a)
+          );
+}
+
+ULong h_generic_calc_QNarrowBin16Sto8Ux8 ( ULong aa, ULong bb )
+{
+   UShort h = sel16x4_3(aa);
+   UShort g = sel16x4_2(aa);
+   UShort f = sel16x4_1(aa);
+   UShort e = sel16x4_0(aa);
+   UShort d = sel16x4_3(bb);
+   UShort c = sel16x4_2(bb);
+   UShort b = sel16x4_1(bb);
+   UShort a = sel16x4_0(bb);
+   return mk8x8( 
+             qnarrow16Sto8U(h),
+             qnarrow16Sto8U(g),
+             qnarrow16Sto8U(f),
+             qnarrow16Sto8U(e),
+             qnarrow16Sto8U(d),
+             qnarrow16Sto8U(c),
+             qnarrow16Sto8U(b),
+             qnarrow16Sto8U(a)
+          );
+}
+
+/* ------------ Truncating narrowing ------------ */
+
+ULong h_generic_calc_NarrowBin32to16x4 ( ULong aa, ULong bb )
+{
+   UInt d = sel32x2_1(aa);
+   UInt c = sel32x2_0(aa);
+   UInt b = sel32x2_1(bb);
+   UInt a = sel32x2_0(bb);
+   return mk16x4( 
+             narrow32to16(d),
+             narrow32to16(c),
+             narrow32to16(b),
+             narrow32to16(a)
+          );
+}
+
+ULong h_generic_calc_NarrowBin16to8x8 ( ULong aa, ULong bb )
+{
+   UShort h = sel16x4_3(aa);
+   UShort g = sel16x4_2(aa);
+   UShort f = sel16x4_1(aa);
+   UShort e = sel16x4_0(aa);
+   UShort d = sel16x4_3(bb);
+   UShort c = sel16x4_2(bb);
+   UShort b = sel16x4_1(bb);
+   UShort a = sel16x4_0(bb);
+   return mk8x8( 
+             narrow16to8(h),
+             narrow16to8(g),
+             narrow16to8(f),
+             narrow16to8(e),
+             narrow16to8(d),
+             narrow16to8(c),
+             narrow16to8(b),
+             narrow16to8(a)
+          );
+}
+
+/* ------------ Interleaving ------------ */
+
+ULong h_generic_calc_InterleaveHI8x8 ( ULong aa, ULong bb )
+{
+   return mk8x8(
+             sel8x8_7(aa),
+             sel8x8_7(bb),
+             sel8x8_6(aa),
+             sel8x8_6(bb),
+             sel8x8_5(aa),
+             sel8x8_5(bb),
+             sel8x8_4(aa),
+             sel8x8_4(bb)
+          );
+}
+
+ULong h_generic_calc_InterleaveLO8x8 ( ULong aa, ULong bb )
+{
+   return mk8x8(
+             sel8x8_3(aa),
+             sel8x8_3(bb),
+             sel8x8_2(aa),
+             sel8x8_2(bb),
+             sel8x8_1(aa),
+             sel8x8_1(bb),
+             sel8x8_0(aa),
+             sel8x8_0(bb)
+          );
+}
+
+ULong h_generic_calc_InterleaveHI16x4 ( ULong aa, ULong bb )
+{
+   return mk16x4(
+             sel16x4_3(aa),
+             sel16x4_3(bb),
+             sel16x4_2(aa),
+             sel16x4_2(bb)
+          );
+}
+
+ULong h_generic_calc_InterleaveLO16x4 ( ULong aa, ULong bb )
+{
+   return mk16x4(
+             sel16x4_1(aa),
+             sel16x4_1(bb),
+             sel16x4_0(aa),
+             sel16x4_0(bb)
+          );
+}
+
+ULong h_generic_calc_InterleaveHI32x2 ( ULong aa, ULong bb )
+{
+   return mk32x2(
+             sel32x2_1(aa),
+             sel32x2_1(bb)
+          );
+}
+
+ULong h_generic_calc_InterleaveLO32x2 ( ULong aa, ULong bb )
+{
+   return mk32x2(
+             sel32x2_0(aa),
+             sel32x2_0(bb)
+          );
+}
+
+/* ------------ Concatenation ------------ */
+
+ULong h_generic_calc_CatOddLanes16x4 ( ULong aa, ULong bb )
+{
+   return mk16x4(
+             sel16x4_3(aa),
+             sel16x4_1(aa),
+             sel16x4_3(bb),
+             sel16x4_1(bb)
+          );
+}
+
+ULong h_generic_calc_CatEvenLanes16x4 ( ULong aa, ULong bb )
+{
+   return mk16x4(
+             sel16x4_2(aa),
+             sel16x4_0(aa),
+             sel16x4_2(bb),
+             sel16x4_0(bb)
+          );
+}
+
+/* misc hack looking for a proper home */
+ULong h_generic_calc_Perm8x8 ( ULong aa, ULong bb )
+{
+   return mk8x8(
+             index8x8(aa, sel8x8_7(bb)),
+             index8x8(aa, sel8x8_6(bb)),
+             index8x8(aa, sel8x8_5(bb)),
+             index8x8(aa, sel8x8_4(bb)),
+             index8x8(aa, sel8x8_3(bb)),
+             index8x8(aa, sel8x8_2(bb)),
+             index8x8(aa, sel8x8_1(bb)),
+             index8x8(aa, sel8x8_0(bb))
+          );
+}
+
+/* ------------ Shifting ------------ */
+/* Note that because these primops are undefined if the shift amount
+   equals or exceeds the lane width, the shift amount is masked so
+   that the scalar shifts are always in range.  In fact, given the
+   semantics of these primops (ShlN16x4, etc) it is an error if in
+   fact we are ever given an out-of-range shift amount. 
+*/
+ULong h_generic_calc_ShlN32x2 ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 32); */
+   nn &= 31;
+   return mk32x2(
+             shl32( sel32x2_1(xx), nn ),
+             shl32( sel32x2_0(xx), nn )
+          );
+}
+
+ULong h_generic_calc_ShlN16x4 ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 16); */
+   nn &= 15;
+   return mk16x4(
+             shl16( sel16x4_3(xx), nn ),
+             shl16( sel16x4_2(xx), nn ),
+             shl16( sel16x4_1(xx), nn ),
+             shl16( sel16x4_0(xx), nn )
+          );
+}
+
+ULong h_generic_calc_ShlN8x8  ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 8); */
+   nn &= 7;
+   return mk8x8(
+             shl8( sel8x8_7(xx), nn ),
+             shl8( sel8x8_6(xx), nn ),
+             shl8( sel8x8_5(xx), nn ),
+             shl8( sel8x8_4(xx), nn ),
+             shl8( sel8x8_3(xx), nn ),
+             shl8( sel8x8_2(xx), nn ),
+             shl8( sel8x8_1(xx), nn ),
+             shl8( sel8x8_0(xx), nn )
+          );
+}
+
+ULong h_generic_calc_ShrN32x2 ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 32); */
+   nn &= 31;
+   return mk32x2(
+             shr32( sel32x2_1(xx), nn ),
+             shr32( sel32x2_0(xx), nn )
+          );
+}
+
+ULong h_generic_calc_ShrN16x4 ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 16); */
+   nn &= 15;
+   return mk16x4(
+             shr16( sel16x4_3(xx), nn ),
+             shr16( sel16x4_2(xx), nn ),
+             shr16( sel16x4_1(xx), nn ),
+             shr16( sel16x4_0(xx), nn )
+          );
+}
+
+ULong h_generic_calc_SarN32x2 ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 32); */
+   nn &= 31;
+   return mk32x2(
+             sar32( sel32x2_1(xx), nn ),
+             sar32( sel32x2_0(xx), nn )
+          );
+}
+
+ULong h_generic_calc_SarN16x4 ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 16); */
+   nn &= 15;
+   return mk16x4(
+             sar16( sel16x4_3(xx), nn ),
+             sar16( sel16x4_2(xx), nn ),
+             sar16( sel16x4_1(xx), nn ),
+             sar16( sel16x4_0(xx), nn )
+          );
+}
+
+ULong h_generic_calc_SarN8x8 ( ULong xx, UInt nn )
+{
+   /* vassert(nn < 8); */
+   nn &= 7;
+   return mk8x8(
+             sar8( sel8x8_7(xx), nn ),
+             sar8( sel8x8_6(xx), nn ),
+             sar8( sel8x8_5(xx), nn ),
+             sar8( sel8x8_4(xx), nn ),
+             sar8( sel8x8_3(xx), nn ),
+             sar8( sel8x8_2(xx), nn ),
+             sar8( sel8x8_1(xx), nn ),
+             sar8( sel8x8_0(xx), nn )
+          );
+}
+
+/* ------------ Averaging ------------ */
+
+ULong h_generic_calc_Avg8Ux8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             avg8U( sel8x8_7(xx), sel8x8_7(yy) ),
+             avg8U( sel8x8_6(xx), sel8x8_6(yy) ),
+             avg8U( sel8x8_5(xx), sel8x8_5(yy) ),
+             avg8U( sel8x8_4(xx), sel8x8_4(yy) ),
+             avg8U( sel8x8_3(xx), sel8x8_3(yy) ),
+             avg8U( sel8x8_2(xx), sel8x8_2(yy) ),
+             avg8U( sel8x8_1(xx), sel8x8_1(yy) ),
+             avg8U( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+ULong h_generic_calc_Avg16Ux4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             avg16U( sel16x4_3(xx), sel16x4_3(yy) ),
+             avg16U( sel16x4_2(xx), sel16x4_2(yy) ),
+             avg16U( sel16x4_1(xx), sel16x4_1(yy) ),
+             avg16U( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+/* ------------ max/min ------------ */
+
+ULong h_generic_calc_Max16Sx4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             max16S( sel16x4_3(xx), sel16x4_3(yy) ),
+             max16S( sel16x4_2(xx), sel16x4_2(yy) ),
+             max16S( sel16x4_1(xx), sel16x4_1(yy) ),
+             max16S( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_Max8Ux8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             max8U( sel8x8_7(xx), sel8x8_7(yy) ),
+             max8U( sel8x8_6(xx), sel8x8_6(yy) ),
+             max8U( sel8x8_5(xx), sel8x8_5(yy) ),
+             max8U( sel8x8_4(xx), sel8x8_4(yy) ),
+             max8U( sel8x8_3(xx), sel8x8_3(yy) ),
+             max8U( sel8x8_2(xx), sel8x8_2(yy) ),
+             max8U( sel8x8_1(xx), sel8x8_1(yy) ),
+             max8U( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+ULong h_generic_calc_Min16Sx4 ( ULong xx, ULong yy )
+{
+   return mk16x4(
+             min16S( sel16x4_3(xx), sel16x4_3(yy) ),
+             min16S( sel16x4_2(xx), sel16x4_2(yy) ),
+             min16S( sel16x4_1(xx), sel16x4_1(yy) ),
+             min16S( sel16x4_0(xx), sel16x4_0(yy) )
+          );
+}
+
+ULong h_generic_calc_Min8Ux8 ( ULong xx, ULong yy )
+{
+   return mk8x8(
+             min8U( sel8x8_7(xx), sel8x8_7(yy) ),
+             min8U( sel8x8_6(xx), sel8x8_6(yy) ),
+             min8U( sel8x8_5(xx), sel8x8_5(yy) ),
+             min8U( sel8x8_4(xx), sel8x8_4(yy) ),
+             min8U( sel8x8_3(xx), sel8x8_3(yy) ),
+             min8U( sel8x8_2(xx), sel8x8_2(yy) ),
+             min8U( sel8x8_1(xx), sel8x8_1(yy) ),
+             min8U( sel8x8_0(xx), sel8x8_0(yy) )
+          );
+}
+
+UInt h_generic_calc_GetMSBs8x8 ( ULong xx )
+{
+   UInt r = 0;
+   if (xx & (1ULL << (64-1))) r |= (1<<7);
+   if (xx & (1ULL << (56-1))) r |= (1<<6);
+   if (xx & (1ULL << (48-1))) r |= (1<<5);
+   if (xx & (1ULL << (40-1))) r |= (1<<4);
+   if (xx & (1ULL << (32-1))) r |= (1<<3);
+   if (xx & (1ULL << (24-1))) r |= (1<<2);
+   if (xx & (1ULL << (16-1))) r |= (1<<1);
+   if (xx & (1ULL << ( 8-1))) r |= (1<<0);
+   return r;
+}
+
+/* ------------ SOME 32-bit SIMD HELPERS TOO ------------ */
+
+/* Tuple/select functions for 16x2 vectors. */
+static inline UInt mk16x2 ( UShort w1, UShort w2 ) {
+   return (((UInt)w1) << 16) | ((UInt)w2);
+}
+
+static inline UShort sel16x2_1 ( UInt w32 ) {
+   return 0xFFFF & (UShort)(w32 >> 16);
+}
+static inline UShort sel16x2_0 ( UInt w32 ) {
+   return 0xFFFF & (UShort)(w32);
+}
+
+static inline UInt mk8x4 ( UChar w3, UChar w2,
+                           UChar w1, UChar w0 ) {
+   UInt w32 =   (((UInt)w3) << 24) | (((UInt)w2) << 16)
+              | (((UInt)w1) << 8)  | (((UInt)w0) << 0);
+   return w32;
+}
+
+static inline UChar sel8x4_3 ( UInt w32 ) {
+   return toUChar(0xFF & (w32 >> 24));
+}
+static inline UChar sel8x4_2 ( UInt w32 ) {
+   return toUChar(0xFF & (w32 >> 16));
+}
+static inline UChar sel8x4_1 ( UInt w32 ) {
+   return toUChar(0xFF & (w32 >> 8));
+}
+static inline UChar sel8x4_0 ( UInt w32 ) {
+   return toUChar(0xFF & (w32 >> 0));
+}
+
+
+/* ----------------------------------------------------- */
+/* More externally visible functions.  These simply
+   implement the corresponding IR primops. */
+/* ----------------------------------------------------- */
+
+/* ------ 16x2 ------ */
+
+UInt h_generic_calc_Add16x2 ( UInt xx, UInt yy )
+{
+   return mk16x2( sel16x2_1(xx) + sel16x2_1(yy),
+                  sel16x2_0(xx) + sel16x2_0(yy) );
+}
+
+UInt h_generic_calc_Sub16x2 ( UInt xx, UInt yy )
+{
+   return mk16x2( sel16x2_1(xx) - sel16x2_1(yy),
+                  sel16x2_0(xx) - sel16x2_0(yy) );
+}
+
+UInt h_generic_calc_HAdd16Ux2 ( UInt xx, UInt yy )
+{
+   return mk16x2( hadd16U( sel16x2_1(xx), sel16x2_1(yy) ),
+                  hadd16U( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+UInt h_generic_calc_HAdd16Sx2 ( UInt xx, UInt yy )
+{
+   return mk16x2( hadd16S( sel16x2_1(xx), sel16x2_1(yy) ),
+                  hadd16S( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+UInt h_generic_calc_HSub16Ux2 ( UInt xx, UInt yy )
+{
+   return mk16x2( hsub16U( sel16x2_1(xx), sel16x2_1(yy) ),
+                  hsub16U( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+UInt h_generic_calc_HSub16Sx2 ( UInt xx, UInt yy )
+{
+   return mk16x2( hsub16S( sel16x2_1(xx), sel16x2_1(yy) ),
+                  hsub16S( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+UInt h_generic_calc_QAdd16Ux2 ( UInt xx, UInt yy )
+{
+   return mk16x2( qadd16U( sel16x2_1(xx), sel16x2_1(yy) ),
+                  qadd16U( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+UInt h_generic_calc_QAdd16Sx2 ( UInt xx, UInt yy )
+{
+   return mk16x2( qadd16S( sel16x2_1(xx), sel16x2_1(yy) ),
+                  qadd16S( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+UInt h_generic_calc_QSub16Ux2 ( UInt xx, UInt yy )
+{
+   return mk16x2( qsub16U( sel16x2_1(xx), sel16x2_1(yy) ),
+                  qsub16U( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+UInt h_generic_calc_QSub16Sx2 ( UInt xx, UInt yy )
+{
+   return mk16x2( qsub16S( sel16x2_1(xx), sel16x2_1(yy) ),
+                  qsub16S( sel16x2_0(xx), sel16x2_0(yy) ) );
+}
+
+/* ------ 8x4 ------ */
+
+UInt h_generic_calc_Add8x4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             sel8x4_3(xx) + sel8x4_3(yy),
+             sel8x4_2(xx) + sel8x4_2(yy),
+             sel8x4_1(xx) + sel8x4_1(yy),
+             sel8x4_0(xx) + sel8x4_0(yy)
+          );
+}
+
+UInt h_generic_calc_Sub8x4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             sel8x4_3(xx) - sel8x4_3(yy),
+             sel8x4_2(xx) - sel8x4_2(yy),
+             sel8x4_1(xx) - sel8x4_1(yy),
+             sel8x4_0(xx) - sel8x4_0(yy)
+          );
+}
+
+UInt h_generic_calc_HAdd8Ux4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             hadd8U( sel8x4_3(xx), sel8x4_3(yy) ),
+             hadd8U( sel8x4_2(xx), sel8x4_2(yy) ),
+             hadd8U( sel8x4_1(xx), sel8x4_1(yy) ),
+             hadd8U( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_HAdd8Sx4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             hadd8S( sel8x4_3(xx), sel8x4_3(yy) ),
+             hadd8S( sel8x4_2(xx), sel8x4_2(yy) ),
+             hadd8S( sel8x4_1(xx), sel8x4_1(yy) ),
+             hadd8S( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_HSub8Ux4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             hsub8U( sel8x4_3(xx), sel8x4_3(yy) ),
+             hsub8U( sel8x4_2(xx), sel8x4_2(yy) ),
+             hsub8U( sel8x4_1(xx), sel8x4_1(yy) ),
+             hsub8U( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_HSub8Sx4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             hsub8S( sel8x4_3(xx), sel8x4_3(yy) ),
+             hsub8S( sel8x4_2(xx), sel8x4_2(yy) ),
+             hsub8S( sel8x4_1(xx), sel8x4_1(yy) ),
+             hsub8S( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_QAdd8Ux4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             qadd8U( sel8x4_3(xx), sel8x4_3(yy) ),
+             qadd8U( sel8x4_2(xx), sel8x4_2(yy) ),
+             qadd8U( sel8x4_1(xx), sel8x4_1(yy) ),
+             qadd8U( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_QAdd8Sx4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             qadd8S( sel8x4_3(xx), sel8x4_3(yy) ),
+             qadd8S( sel8x4_2(xx), sel8x4_2(yy) ),
+             qadd8S( sel8x4_1(xx), sel8x4_1(yy) ),
+             qadd8S( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_QSub8Ux4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             qsub8U( sel8x4_3(xx), sel8x4_3(yy) ),
+             qsub8U( sel8x4_2(xx), sel8x4_2(yy) ),
+             qsub8U( sel8x4_1(xx), sel8x4_1(yy) ),
+             qsub8U( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_QSub8Sx4 ( UInt xx, UInt yy )
+{
+   return mk8x4(
+             qsub8S( sel8x4_3(xx), sel8x4_3(yy) ),
+             qsub8S( sel8x4_2(xx), sel8x4_2(yy) ),
+             qsub8S( sel8x4_1(xx), sel8x4_1(yy) ),
+             qsub8S( sel8x4_0(xx), sel8x4_0(yy) )
+          );
+}
+
+UInt h_generic_calc_CmpNEZ16x2 ( UInt xx )
+{
+   return mk16x2(
+             cmpnez16( sel16x2_1(xx) ),
+             cmpnez16( sel16x2_0(xx) )
+          );
+}
+
+UInt h_generic_calc_CmpNEZ8x4 ( UInt xx )
+{
+   return mk8x4(
+             cmpnez8( sel8x4_3(xx) ),
+             cmpnez8( sel8x4_2(xx) ),
+             cmpnez8( sel8x4_1(xx) ),
+             cmpnez8( sel8x4_0(xx) )
+          );
+}
+
+UInt h_generic_calc_Sad8Ux4 ( UInt xx, UInt yy )
+{
+   return absdiff8U( sel8x4_3(xx), sel8x4_3(yy) )
+          + absdiff8U( sel8x4_2(xx), sel8x4_2(yy) )
+          + absdiff8U( sel8x4_1(xx), sel8x4_1(yy) )
+          + absdiff8U( sel8x4_0(xx), sel8x4_0(yy) );
+}
+
+UInt h_generic_calc_QAdd32S ( UInt xx, UInt yy )
+{
+   return qadd32S( xx, yy );
+}
+
+UInt h_generic_calc_QSub32S ( UInt xx, UInt yy )
+{
+   return qsub32S( xx, yy );
+}
+
+
+/*------------------------------------------------------------------*/
+/* Decimal Floating Point (DFP) externally visible helper functions */
+/* that implement Iop_BCDtoDPB and Iop_DPBtoBCD                     */
+/*------------------------------------------------------------------*/
+
+#define NOT( x )    ( ( ( x ) == 0) ? 1 : 0)
+#define GET( x, y ) ( ( ( x ) & ( 0x1UL << ( y ) ) ) >> ( y ) )
+#define PUT( x, y ) ( ( x )<< ( y ) )
+
+static ULong dpb_to_bcd( ULong chunk )
+{
+   Short a, b, c, d, e, f, g, h, i, j, k, m;
+   Short p, q, r, s, t, u, v, w, x, y;
+   ULong value;
+
+   /* convert 10 bit densely packed BCD to BCD */
+   p = GET( chunk, 9 );
+   q = GET( chunk, 8 );
+   r = GET( chunk, 7 );
+   s = GET( chunk, 6 );
+   t = GET( chunk, 5 );
+   u = GET( chunk, 4 );
+   v = GET( chunk, 3 );
+   w = GET( chunk, 2 );
+   x = GET( chunk, 1 );
+   y = GET( chunk, 0 );
+
+   /* The BCD bit values are given by the following boolean equations.*/
+   a = ( NOT(s) & v & w ) | ( t & v & w & s ) | ( v & w & NOT(x) );
+   b = ( p & s & x & NOT(t) ) | ( p & NOT(w) ) | ( p & NOT(v) );
+   c = ( q & s & x & NOT(t) ) | ( q & NOT(w) ) | ( q & NOT(v) );
+   d = r;
+   e = ( v & NOT(w) & x ) | ( s & v & w & x ) | ( NOT(t) & v & x & w );
+   f = ( p & t & v & w & x & NOT(s) ) | ( s & NOT(x) & v ) | ( s & NOT(v) );
+   g = ( q & t & w & v & x & NOT(s) ) | ( t & NOT(x) & v ) | ( t & NOT(v) );
+   h = u;
+   i = ( t & v & w & x ) | ( s & v & w & x ) | ( v & NOT(w) & NOT(x) );
+   j = ( p & NOT(s) & NOT(t) & w & v ) | ( s & v & NOT(w) & x )
+            | ( p & w & NOT(x) & v ) | ( w & NOT(v) );
+   k = ( q & NOT(s) & NOT(t) & v & w ) | ( t & v & NOT(w) & x )
+            | ( q & v & w & NOT(x) ) | ( x & NOT(v) );
+   m = y;
+
+   value = PUT(a, 11) | PUT(b, 10) | PUT(c, 9) | PUT(d, 8) | PUT(e, 7)
+            | PUT(f, 6) | PUT(g, 5) | PUT(h, 4) | PUT(i, 3) | PUT(j, 2)
+            | PUT(k, 1) | PUT(m, 0);
+   return value;
+}
+
+static ULong bcd_to_dpb( ULong chunk )
+{
+   Short a, b, c, d, e, f, g, h, i, j, k, m;
+   Short p, q, r, s, t, u, v, w, x, y;
+   ULong value;
+   /* Convert a 3 digit BCD value to a 10 bit Densely Packed Binary (DPD) value
+    The boolean equations to calculate the value of each of the DPD bit
+    is given in Appendix B  of Book 1: Power ISA User Instruction set.  The
+    bits for the DPD number are [abcdefghijkm].  The bits for the BCD value
+    are [pqrstuvwxy].  The boolean logic equations in psuedo C code are:
+    */
+   a = GET( chunk, 11 );
+   b = GET( chunk, 10 );
+   c = GET( chunk, 9 );
+   d = GET( chunk, 8 );
+   e = GET( chunk, 7 );
+   f = GET( chunk, 6 );
+   g = GET( chunk, 5 );
+   h = GET( chunk, 4 );
+   i = GET( chunk, 3 );
+   j = GET( chunk, 2 );
+   k = GET( chunk, 1 );
+   m = GET( chunk, 0 );
+
+   p = ( f & a & i & NOT(e) ) | ( j & a & NOT(i) ) | ( b & NOT(a) );
+   q = ( g & a & i & NOT(e) ) | ( k & a & NOT(i) ) | ( c & NOT(a) );
+   r = d;
+   s = ( j & NOT(a) & e & NOT(i) ) | ( f & NOT(i) & NOT(e) )
+            | ( f & NOT(a) & NOT(e) ) | ( e & i );
+   t = ( k & NOT(a) & e & NOT(i) ) | ( g & NOT(i) & NOT(e) )
+            | ( g & NOT(a) & NOT(e) ) | ( a & i );
+   u = h;
+   v = a | e | i;
+   w = ( NOT(e) & j & NOT(i) ) | ( e & i ) | a;
+   x = ( NOT(a) & k & NOT(i) ) | ( a & i ) | e;
+   y = m;
+
+   value = PUT(p, 9) | PUT(q, 8) | PUT(r, 7) | PUT(s, 6) | PUT(t, 5) 
+            | PUT(u, 4) | PUT(v, 3) | PUT(w, 2) | PUT(x, 1) | y;
+
+   return value;
+}
+
+ULong h_calc_DPBtoBCD( ULong dpb )
+{
+   ULong result, chunk;
+   Int i;
+
+   result = 0;
+
+   for (i = 0; i < 5; i++) {
+      chunk = dpb >> ( 4 - i ) * 10;
+      result = result << 12;
+      result |= dpb_to_bcd( chunk & 0x3FF );
+   }
+   return result;
+}
+
+ULong h_calc_BCDtoDPB( ULong bcd )
+{
+   ULong result, chunk;
+   Int i;
+
+   result = 0;
+
+   for (i = 0; i < 5; i++) {
+      chunk = bcd >> ( 4 - i ) * 12;
+      result = result << 10;
+      result |= bcd_to_dpb( chunk & 0xFFF );
+   }
+   return result;
+}
+#undef NOT
+#undef GET
+#undef PUT
+
+
+/* ----------------------------------------------------- */
+/* Signed and unsigned integer division, that behave like
+   the ARMv7 UDIV ansd SDIV instructions.
+
+   sdiv32 also behaves like 64-bit v8 SDIV on w-regs.
+   udiv32 also behaves like 64-bit v8 UDIV on w-regs.
+*/
+/* ----------------------------------------------------- */
+
+UInt h_calc_udiv32_w_arm_semantics ( UInt x, UInt y )
+{
+   // Division by zero --> zero
+   if (UNLIKELY(y == 0)) return 0;
+   // C requires rounding towards zero, which is also what we need.
+   return x / y;
+}
+
+ULong h_calc_udiv64_w_arm_semantics ( ULong x, ULong y )
+{
+   // Division by zero --> zero
+   if (UNLIKELY(y == 0)) return 0;
+   // C requires rounding towards zero, which is also what we need.
+   return x / y;
+}
+
+Int h_calc_sdiv32_w_arm_semantics ( Int x, Int y )
+{
+   // Division by zero --> zero
+   if (UNLIKELY(y == 0)) return 0;
+   // The single case that produces an unrepresentable result
+   if (UNLIKELY( ((UInt)x) == ((UInt)0x80000000)
+                 && ((UInt)y) == ((UInt)0xFFFFFFFF) ))
+      return (Int)(UInt)0x80000000;
+   // Else return the result rounded towards zero.  C89 says
+   // this is implementation defined (in the signed case), but gcc
+   // promises to round towards zero.  Nevertheless, at startup,
+   // in main_main.c, do a check for that.
+   return x / y;
+}
+
+Long h_calc_sdiv64_w_arm_semantics ( Long x, Long y )
+{
+   // Division by zero --> zero
+   if (UNLIKELY(y == 0)) return 0;
+   // The single case that produces an unrepresentable result
+   if (UNLIKELY( ((ULong)x) == ((ULong)0x8000000000000000ULL )
+                 && ((ULong)y) == ((ULong)0xFFFFFFFFFFFFFFFFULL ) ))
+      return (Long)(ULong)0x8000000000000000ULL;
+   // Else return the result rounded towards zero.  C89 says
+   // this is implementation defined (in the signed case), but gcc
+   // promises to round towards zero.  Nevertheless, at startup,
+   // in main_main.c, do a check for that.
+   return x / y;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                               host_generic_simd64.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_generic_simd64.h b/VEX/priv/host_generic_simd64.h
new file mode 100644
index 0000000..e8d33df
--- /dev/null
+++ b/VEX/priv/host_generic_simd64.h
@@ -0,0 +1,179 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             host_generic_simd64.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Generic helper functions for doing 64-bit SIMD arithmetic in cases
+   where the instruction selectors cannot generate code in-line.
+   These are purely back-end entities and cannot be seen/referenced
+   as clean helper functions from IR.
+
+   These will get called from generated code and therefore should be
+   well behaved -- no floating point or mmx insns, just straight
+   integer code.
+
+   Each function implements the correspondingly-named IR primop.
+*/
+
+#ifndef __VEX_HOST_GENERIC_SIMD64_H
+#define __VEX_HOST_GENERIC_SIMD64_H
+
+#include "libvex_basictypes.h"
+
+/* DO NOT MAKE THESE INTO REGPARM FNS!  THIS WILL BREAK CALLING
+   SEQUENCES GENERATED BY host-x86/isel.c. */
+
+extern ULong h_generic_calc_Add32x2 ( ULong, ULong );
+extern ULong h_generic_calc_Add16x4 ( ULong, ULong );
+extern ULong h_generic_calc_Add8x8  ( ULong, ULong );
+
+extern ULong h_generic_calc_QAdd16Sx4 ( ULong, ULong );
+extern ULong h_generic_calc_QAdd8Sx8  ( ULong, ULong );
+extern ULong h_generic_calc_QAdd16Ux4 ( ULong, ULong );
+extern ULong h_generic_calc_QAdd8Ux8  ( ULong, ULong );
+
+extern ULong h_generic_calc_Sub32x2 ( ULong, ULong );
+extern ULong h_generic_calc_Sub16x4 ( ULong, ULong );
+extern ULong h_generic_calc_Sub8x8  ( ULong, ULong );
+
+extern ULong h_generic_calc_QSub16Sx4 ( ULong, ULong );
+extern ULong h_generic_calc_QSub8Sx8  ( ULong, ULong );
+extern ULong h_generic_calc_QSub16Ux4 ( ULong, ULong );
+extern ULong h_generic_calc_QSub8Ux8  ( ULong, ULong );
+
+extern ULong h_generic_calc_Mul16x4    ( ULong, ULong );
+extern ULong h_generic_calc_Mul32x2    ( ULong, ULong );
+extern ULong h_generic_calc_MulHi16Sx4 ( ULong, ULong );
+extern ULong h_generic_calc_MulHi16Ux4 ( ULong, ULong );
+
+extern ULong h_generic_calc_CmpEQ32x2  ( ULong, ULong );
+extern ULong h_generic_calc_CmpEQ16x4  ( ULong, ULong );
+extern ULong h_generic_calc_CmpEQ8x8   ( ULong, ULong );
+extern ULong h_generic_calc_CmpGT32Sx2 ( ULong, ULong );
+extern ULong h_generic_calc_CmpGT16Sx4 ( ULong, ULong );
+extern ULong h_generic_calc_CmpGT8Sx8  ( ULong, ULong );
+
+extern ULong h_generic_calc_CmpNEZ32x2 ( ULong );
+extern ULong h_generic_calc_CmpNEZ16x4 ( ULong );
+extern ULong h_generic_calc_CmpNEZ8x8  ( ULong );
+
+extern ULong h_generic_calc_QNarrowBin32Sto16Sx4 ( ULong, ULong );
+extern ULong h_generic_calc_QNarrowBin16Sto8Sx8  ( ULong, ULong );
+extern ULong h_generic_calc_QNarrowBin16Sto8Ux8  ( ULong, ULong );
+extern ULong h_generic_calc_NarrowBin32to16x4    ( ULong, ULong );
+extern ULong h_generic_calc_NarrowBin16to8x8     ( ULong, ULong );
+
+extern ULong h_generic_calc_InterleaveHI8x8 ( ULong, ULong );
+extern ULong h_generic_calc_InterleaveLO8x8 ( ULong, ULong );
+extern ULong h_generic_calc_InterleaveHI16x4 ( ULong, ULong );
+extern ULong h_generic_calc_InterleaveLO16x4 ( ULong, ULong );
+extern ULong h_generic_calc_InterleaveHI32x2 ( ULong, ULong );
+extern ULong h_generic_calc_InterleaveLO32x2 ( ULong, ULong );
+
+extern ULong h_generic_calc_CatOddLanes16x4 ( ULong, ULong );
+extern ULong h_generic_calc_CatEvenLanes16x4 ( ULong, ULong );
+extern ULong h_generic_calc_Perm8x8 ( ULong, ULong );
+
+extern ULong h_generic_calc_ShlN8x8  ( ULong, UInt );
+extern ULong h_generic_calc_ShlN16x4 ( ULong, UInt );
+extern ULong h_generic_calc_ShlN32x2 ( ULong, UInt );
+
+extern ULong h_generic_calc_ShrN16x4 ( ULong, UInt );
+extern ULong h_generic_calc_ShrN32x2 ( ULong, UInt );
+
+extern ULong h_generic_calc_SarN8x8  ( ULong, UInt );
+extern ULong h_generic_calc_SarN16x4 ( ULong, UInt );
+extern ULong h_generic_calc_SarN32x2 ( ULong, UInt );
+
+extern ULong h_generic_calc_Avg8Ux8  ( ULong, ULong );
+extern ULong h_generic_calc_Avg16Ux4 ( ULong, ULong );
+
+extern ULong h_generic_calc_Max16Sx4 ( ULong, ULong );
+extern ULong h_generic_calc_Max8Ux8  ( ULong, ULong );
+extern ULong h_generic_calc_Min16Sx4 ( ULong, ULong );
+extern ULong h_generic_calc_Min8Ux8  ( ULong, ULong );
+
+extern UInt  h_generic_calc_GetMSBs8x8 ( ULong );
+
+/* 32-bit SIMD HELPERS */
+
+extern UInt h_generic_calc_Add16x2   ( UInt, UInt );
+extern UInt h_generic_calc_Sub16x2   ( UInt, UInt );
+
+extern UInt h_generic_calc_HAdd16Ux2 ( UInt, UInt );
+extern UInt h_generic_calc_HAdd16Sx2 ( UInt, UInt );
+extern UInt h_generic_calc_HSub16Ux2 ( UInt, UInt );
+extern UInt h_generic_calc_HSub16Sx2 ( UInt, UInt );
+
+extern UInt h_generic_calc_QAdd16Ux2 ( UInt, UInt );
+extern UInt h_generic_calc_QAdd16Sx2 ( UInt, UInt );
+extern UInt h_generic_calc_QSub16Ux2 ( UInt, UInt );
+extern UInt h_generic_calc_QSub16Sx2 ( UInt, UInt );
+
+extern UInt h_generic_calc_Add8x4   ( UInt, UInt );
+extern UInt h_generic_calc_Sub8x4   ( UInt, UInt );
+
+extern UInt h_generic_calc_HAdd8Ux4 ( UInt, UInt );
+extern UInt h_generic_calc_HAdd8Sx4 ( UInt, UInt );
+extern UInt h_generic_calc_HSub8Ux4 ( UInt, UInt );
+extern UInt h_generic_calc_HSub8Sx4 ( UInt, UInt );
+
+extern UInt h_generic_calc_QAdd8Ux4 ( UInt, UInt );
+extern UInt h_generic_calc_QAdd8Sx4 ( UInt, UInt );
+extern UInt h_generic_calc_QSub8Ux4 ( UInt, UInt );
+extern UInt h_generic_calc_QSub8Sx4 ( UInt, UInt );
+
+extern UInt h_generic_calc_Sad8Ux4  ( UInt, UInt );
+
+extern UInt h_generic_calc_QAdd32S  ( UInt, UInt );
+extern UInt h_generic_calc_QSub32S  ( UInt, UInt );
+
+extern UInt h_generic_calc_CmpNEZ16x2 ( UInt );
+extern UInt h_generic_calc_CmpNEZ8x4  ( UInt );
+
+extern ULong h_calc_DPBtoBCD ( ULong dpb );
+extern ULong h_calc_BCDtoDPB ( ULong bcd );
+
+// Signed and unsigned integer division, that behave like
+// the ARMv7 UDIV and SDIV instructions.
+extern UInt  h_calc_udiv32_w_arm_semantics ( UInt,  UInt  );
+extern ULong h_calc_udiv64_w_arm_semantics ( ULong, ULong );
+extern Int   h_calc_sdiv32_w_arm_semantics ( Int,   Int   );
+extern Long  h_calc_sdiv64_w_arm_semantics ( Long,  Long  );
+
+
+#endif /* ndef __VEX_HOST_GENERIC_SIMD64_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                               host_generic_simd64.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_mips_defs.c b/VEX/priv/host_mips_defs.c
new file mode 100644
index 0000000..d85a306
--- /dev/null
+++ b/VEX/priv/host_mips_defs.c
@@ -0,0 +1,4050 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  host_mips_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_mips_defs.h"
+
+/* guest_COND offset. */
+#define COND_OFFSET(__mode64) (__mode64 ? 612 : 448)
+
+/* Register number for guest state pointer in host code. */
+#define GuestSP 23
+
+
+/*---------------- Registers ----------------*/
+
+const RRegUniverse* getRRegUniverse_MIPS ( Bool mode64 )
+{
+   /* The real-register universe is a big constant, so we just want to
+      initialise it once.  rRegUniverse_MIPS_initted values: 0=not initted,
+      1=initted for 32-bit-mode, 2=initted for 64-bit-mode */
+   static RRegUniverse rRegUniverse_MIPS;
+   static UInt         rRegUniverse_MIPS_initted = 0;
+
+   /* Handy shorthand, nothing more */
+   RRegUniverse* ru = &rRegUniverse_MIPS;
+
+   /* This isn't thread-safe.  Sigh. */
+   UInt howNeeded = mode64 ? 2 : 1;
+   if (LIKELY(rRegUniverse_MIPS_initted == howNeeded))
+      return ru;
+
+   RRegUniverse__init(ru);
+
+   /* Add the registers.  The initial segment of this array must be
+      those available for allocation by reg-alloc, and those that
+      follow are not available for allocation. */
+   ru->regs[ru->size++] = hregMIPS_GPR16(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR17(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR18(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR19(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR20(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR21(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR22(mode64);
+
+   ru->regs[ru->size++] = hregMIPS_GPR12(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR13(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR14(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR15(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR24(mode64);
+   /* s7  (=guest_state) */
+   ru->regs[ru->size++] = hregMIPS_F16(mode64);
+   ru->regs[ru->size++] = hregMIPS_F18(mode64);
+   ru->regs[ru->size++] = hregMIPS_F20(mode64);
+   ru->regs[ru->size++] = hregMIPS_F22(mode64);
+   ru->regs[ru->size++] = hregMIPS_F24(mode64);
+   ru->regs[ru->size++] = hregMIPS_F26(mode64);
+   ru->regs[ru->size++] = hregMIPS_F28(mode64);
+   ru->regs[ru->size++] = hregMIPS_F30(mode64);
+   if (!mode64) {
+      /* Fake double floating point */
+      ru->regs[ru->size++] = hregMIPS_D0(mode64);
+      ru->regs[ru->size++] = hregMIPS_D1(mode64);
+      ru->regs[ru->size++] = hregMIPS_D2(mode64);
+      ru->regs[ru->size++] = hregMIPS_D3(mode64);
+      ru->regs[ru->size++] = hregMIPS_D4(mode64);
+      ru->regs[ru->size++] = hregMIPS_D5(mode64);
+      ru->regs[ru->size++] = hregMIPS_D6(mode64);
+      ru->regs[ru->size++] = hregMIPS_D7(mode64);
+   }
+
+   ru->allocable = ru->size;
+   /* And other regs, not available to the allocator. */
+
+   ru->regs[ru->size++] = hregMIPS_HI(mode64);
+   ru->regs[ru->size++] = hregMIPS_LO(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR0(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR1(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR2(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR3(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR4(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR5(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR6(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR7(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR8(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR9(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR10(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR11(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR23(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR25(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR29(mode64);
+   ru->regs[ru->size++] = hregMIPS_GPR31(mode64);
+
+   rRegUniverse_MIPS_initted = howNeeded;
+
+   RRegUniverse__check_is_sane(ru);
+   return ru;
+}
+
+
+void ppHRegMIPS(HReg reg, Bool mode64)
+{
+   Int r;
+   static const HChar *ireg32_names[35]
+       = { "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
+      "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
+      "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
+      "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
+      "%32", "%33", "%34",
+   };
+
+   static const HChar *freg32_names[32]
+       = { "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+      "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+      "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+      "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "f30", "$f31"
+   };
+
+   static const HChar *freg64_names[32]
+       = { "$d0", "$d1", "$d2", "$d3", "$d4", "$d5", "$d6", "$d7",
+      "$d8", "$d9", "$d10", "$d11", "$d12", "$d13", "$d14", "$d15",
+   };
+
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      ppHReg(reg);
+      return;
+   }
+
+   /* But specific for real regs. */
+   vassert(hregClass(reg) == HRcInt32 || hregClass(reg) == HRcInt64 ||
+           hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64);
+
+   /* But specific for real regs. */
+   switch (hregClass(reg)) {
+      case HRcInt32:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 32);
+         vex_printf("%s", ireg32_names[r]);
+         return;
+      case HRcInt64:
+         r = hregEncoding (reg);
+         vassert (r >= 0 && r < 32);
+         vex_printf ("%s", ireg32_names[r]);
+         return;
+      case HRcFlt32:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 32);
+         vex_printf("%s", freg32_names[r]);
+         return;
+      case HRcFlt64:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 32);
+         vex_printf("%s", freg64_names[r]);
+         return;
+      default:
+         vpanic("ppHRegMIPS");
+         break;
+   }
+
+   return;
+}
+
+
+/*----------------- Condition Codes ----------------------*/
+
+const HChar *showMIPSCondCode(MIPSCondCode cond)
+{
+   const HChar* ret;
+   switch (cond) {
+      case MIPScc_EQ:
+         ret = "EQ";  /* equal */
+         break;
+      case MIPScc_NE:
+         ret = "NEQ";  /* not equal */
+         break;
+      case MIPScc_HS:
+         ret = "GE";  /* >=u (Greater Than or Equal) */
+         break;
+      case MIPScc_LO:
+         ret = "LT";  /* <u  (lower) */
+         break;
+      case MIPScc_MI:
+         ret = "MI";  /* minus (negative) */
+         break;
+      case MIPScc_PL:
+         ret = "PL";  /* plus (zero or +ve) */
+         break;
+      case MIPScc_VS:
+         ret = "VS";  /* overflow */
+         break;
+      case MIPScc_VC:
+         ret = "VC";  /* no overflow */
+         break;
+      case MIPScc_HI:
+         ret = "HI";  /* >u   (higher) */
+         break;
+      case MIPScc_LS:
+         ret = "LS";  /* <=u  (lower or same) */
+         break;
+      case MIPScc_GE:
+         ret = "GE";  /* >=s (signed greater or equal) */
+         break;
+      case MIPScc_LT:
+         ret = "LT";  /* <s  (signed less than) */
+         break;
+      case MIPScc_GT:
+         ret = "GT";  /* >s  (signed greater) */
+         break;
+      case MIPScc_LE:
+         ret = "LE";  /* <=s (signed less or equal) */
+         break;
+      case MIPScc_AL:
+         ret = "AL";  /* always (unconditional) */
+         break;
+      case MIPScc_NV:
+         ret = "NV";  /* never (unconditional): */
+         break;
+      default:
+         vpanic("showMIPSCondCode");
+         break;
+   }
+   return ret;
+}
+
+const HChar *showMIPSFpOp(MIPSFpOp op)
+{
+   const HChar *ret;
+   switch (op) {
+      case Mfp_ADDD:
+         ret = "add.d";
+         break;
+      case Mfp_SUBD:
+         ret = "sub.d";
+         break;
+      case Mfp_MULD:
+         ret = "mul.d";
+         break;
+      case Mfp_DIVD:
+         ret = "div.d";
+         break;
+      case Mfp_MADDD:
+         ret = "madd.d";
+         break;
+      case Mfp_MSUBD:
+         ret = "msub.d";
+         break;
+      case Mfp_MADDS:
+         ret = "madd.s";
+         break;
+      case Mfp_MSUBS:
+         ret = "msub.s";
+         break;
+      case Mfp_ADDS:
+         ret = "add.s";
+         break;
+      case Mfp_SUBS:
+         ret = "sub.s";
+         break;
+      case Mfp_MULS:
+         ret = "mul.s";
+         break;
+      case Mfp_DIVS:
+         ret = "div.s";
+         break;
+      case Mfp_SQRTS:
+         ret = "sqrt.s";
+         break;
+      case Mfp_SQRTD:
+         ret = "sqrt.d";
+         break;
+      case Mfp_ABSS:
+         ret = "abs.s";
+         break;
+      case Mfp_ABSD:
+         ret = "abs.d";
+         break;
+      case Mfp_NEGS:
+         ret = "neg.s";
+         break;
+      case Mfp_NEGD:
+         ret = "neg.d";
+         break;
+      case Mfp_MOVS:
+         ret = "mov.s";
+         break;
+      case Mfp_MOVD:
+         ret = "mov.d";
+         break;
+      case Mfp_ROUNDWS:
+         ret = "round.w.s";
+         break;
+      case Mfp_ROUNDWD:
+         ret = "round.w.d";
+         break;
+      case Mfp_ROUNDLD:
+         ret = "round.l.d";
+         break;
+      case Mfp_FLOORWS:
+         ret = "floor.w.s";
+         break;
+      case Mfp_FLOORWD:
+         ret = "floor.w.d";
+         break;
+      case Mfp_CVTDW:
+         ret = "cvt.d.w";
+         break;
+      case Mfp_CVTDL:
+         ret = "cvt.d.l";
+         break;
+      case Mfp_CVTDS:
+         ret = "cvt.d.s";
+         break;
+      case Mfp_CVTSD:
+         ret = "cvt.s.d";
+         break;
+      case Mfp_CVTSW:
+         ret = "cvt.s.w";
+         break;
+      case Mfp_CVTWS:
+         ret = "cvt.w.s";
+         break;
+      case Mfp_CVTWD:
+         ret = "cvt.w.d";
+         break;
+      case Mfp_CVTLD:
+         ret = "cvt.l.d";
+         break;
+      case Mfp_CVTLS:
+         ret = "cvt.l.s";
+         break;
+      case Mfp_TRUWD:
+         ret = "trunc.w.d";
+         break;
+      case Mfp_TRUWS:
+         ret = "trunc.w.s";
+         break;
+      case Mfp_TRULD:
+         ret = "trunc.l.d";
+         break;
+      case Mfp_TRULS:
+         ret = "trunc.l.s";
+         break;
+      case Mfp_CEILWS:
+         ret = "ceil.w.s";
+         break;
+      case Mfp_CEILWD:
+         ret = "ceil.w.d";
+         break;
+      case Mfp_CEILLS:
+         ret = "ceil.l.s";
+         break;
+      case Mfp_CEILLD:
+         ret = "ceil.l.d";
+         break;
+      case Mfp_CMP_UN:
+         ret = "c.un.d";
+         break;
+      case Mfp_CMP_EQ:
+         ret = "c.eq.d";
+         break;
+      case Mfp_CMP_LT:
+         ret = "c.lt.d";
+         break;
+      case Mfp_CMP_NGT:
+         ret = "c.ngt.d";
+         break;
+      default:
+         vex_printf("Unknown op: %d", op);
+         vpanic("showMIPSFpOp");
+         break;
+   }
+   return ret;
+}
+
+/* Show move from/to fpr to/from gpr */
+const HChar* showMIPSFpGpMoveOp ( MIPSFpGpMoveOp op )
+{
+   const HChar *ret;
+   switch (op) {
+      case MFpGpMove_mfc1:
+         ret = "mfc1";
+         break;
+      case MFpGpMove_dmfc1:
+         ret = "dmfc1";
+         break;
+      case MFpGpMove_mtc1:
+         ret = "mtc1";
+         break;
+      case MFpGpMove_dmtc1:
+         ret = "dmtc1";
+         break;
+      default:
+         vpanic("showMIPSFpGpMoveOp");
+         break;
+   }
+   return ret;
+}
+
+/* Show floating point move conditional */
+const HChar* showMIPSMoveCondOp ( MIPSMoveCondOp op )
+{
+   const HChar *ret;
+   switch (op) {
+      case MFpMoveCond_movns:
+         ret = "movn.s";
+         break;
+      case MFpMoveCond_movnd:
+         ret = "movn.d";
+         break;
+      case MMoveCond_movn:
+         ret = "movn";
+         break;
+      default:
+         vpanic("showMIPSFpMoveCondOp");
+         break;
+   }
+   return ret;
+}
+
+/* --------- MIPSAMode: memory address expressions. --------- */
+
+MIPSAMode *MIPSAMode_IR(Int idx, HReg base)
+{
+   MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
+   am->tag = Mam_IR;
+   am->Mam.IR.base = base;
+   am->Mam.IR.index = idx;
+
+   return am;
+}
+
+MIPSAMode *MIPSAMode_RR(HReg idx, HReg base)
+{
+   MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
+   am->tag = Mam_RR;
+   am->Mam.RR.base = base;
+   am->Mam.RR.index = idx;
+
+   return am;
+}
+
+MIPSAMode *dopyMIPSAMode(MIPSAMode * am)
+{
+   MIPSAMode* ret;
+   switch (am->tag) {
+      case Mam_IR:
+         ret = MIPSAMode_IR(am->Mam.IR.index, am->Mam.IR.base);
+         break;
+      case Mam_RR:
+         ret = MIPSAMode_RR(am->Mam.RR.index, am->Mam.RR.base);
+         break;
+      default:
+         vpanic("dopyMIPSAMode");
+         break;
+   }
+   return ret;
+}
+
+MIPSAMode *nextMIPSAModeFloat(MIPSAMode * am)
+{
+   MIPSAMode* ret;
+   switch (am->tag) {
+      case Mam_IR:
+         ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
+         break;
+      case Mam_RR:
+         /* We can't do anything with the RR case, so if it appears
+            we simply have to give up. */
+         /* fallthrough */
+      default:
+         vpanic("nextMIPSAModeFloat");
+         break;
+   }
+   return ret;
+}
+
+MIPSAMode *nextMIPSAModeInt(MIPSAMode * am)
+{
+   MIPSAMode* ret;
+   switch (am->tag) {
+      case Mam_IR:
+         ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
+         break;
+      case Mam_RR:
+         /* We can't do anything with the RR case, so if it appears
+            we simply have to give up. */
+         /* fallthrough */
+      default:
+         vpanic("nextMIPSAModeInt");
+         break;
+   }
+   return ret;
+}
+
+void ppMIPSAMode(MIPSAMode * am, Bool mode64)
+{
+   switch (am->tag) {
+      case Mam_IR:
+         if (am->Mam.IR.index == 0)
+            vex_printf("0(");
+         else
+            vex_printf("%d(", (Int) am->Mam.IR.index);
+         ppHRegMIPS(am->Mam.IR.base, mode64);
+         vex_printf(")");
+         return;
+      case Mam_RR:
+         ppHRegMIPS(am->Mam.RR.base, mode64);
+         vex_printf(", ");
+         ppHRegMIPS(am->Mam.RR.index, mode64);
+         return;
+      default:
+         vpanic("ppMIPSAMode");
+         break;
+   }
+}
+
+static void addRegUsage_MIPSAMode(HRegUsage * u, MIPSAMode * am)
+{
+   switch (am->tag) {
+      case Mam_IR:
+         addHRegUse(u, HRmRead, am->Mam.IR.base);
+         return;
+      case Mam_RR:
+         addHRegUse(u, HRmRead, am->Mam.RR.base);
+         addHRegUse(u, HRmRead, am->Mam.RR.index);
+         return;
+      default:
+         vpanic("addRegUsage_MIPSAMode");
+         break;
+   }
+}
+
+static void mapRegs_MIPSAMode(HRegRemap * m, MIPSAMode * am)
+{
+   switch (am->tag) {
+      case Mam_IR:
+         am->Mam.IR.base = lookupHRegRemap(m, am->Mam.IR.base);
+         return;
+      case Mam_RR:
+         am->Mam.RR.base = lookupHRegRemap(m, am->Mam.RR.base);
+         am->Mam.RR.index = lookupHRegRemap(m, am->Mam.RR.index);
+         return;
+      default:
+         vpanic("mapRegs_MIPSAMode");
+         break;
+   }
+}
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+
+MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16)
+{
+   MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
+   op->tag = Mrh_Imm;
+   op->Mrh.Imm.syned = syned;
+   op->Mrh.Imm.imm16 = imm16;
+   /* If this is a signed value, ensure it's not -32768, so that we
+      are guaranteed always to be able to negate if needed. */
+   if (syned)
+      vassert(imm16 != 0x8000);
+   vassert(syned == True || syned == False);
+   return op;
+}
+
+MIPSRH *MIPSRH_Reg(HReg reg)
+{
+   MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
+   op->tag = Mrh_Reg;
+   op->Mrh.Reg.reg = reg;
+   return op;
+}
+
+void ppMIPSRH(MIPSRH * op, Bool mode64)
+{
+   MIPSRHTag tag = op->tag;
+   switch (tag) {
+      case Mrh_Imm:
+         if (op->Mrh.Imm.syned)
+            vex_printf("%d", (Int) (Short) op->Mrh.Imm.imm16);
+         else
+            vex_printf("%u", (UInt) (UShort) op->Mrh.Imm.imm16);
+         return;
+      case Mrh_Reg:
+         ppHRegMIPS(op->Mrh.Reg.reg, mode64);
+         return;
+      default:
+         vpanic("ppMIPSRH");
+         break;
+   }
+}
+
+/* An MIPSRH can only be used in a "read" context (what would it mean
+   to write or modify a literal?) and so we enumerate its registers
+   accordingly. */
+static void addRegUsage_MIPSRH(HRegUsage * u, MIPSRH * op)
+{
+   switch (op->tag) {
+      case Mrh_Imm:
+         return;
+      case Mrh_Reg:
+         addHRegUse(u, HRmRead, op->Mrh.Reg.reg);
+         return;
+      default:
+         vpanic("addRegUsage_MIPSRH");
+         break;
+   }
+}
+
+static void mapRegs_MIPSRH(HRegRemap * m, MIPSRH * op)
+{
+   switch (op->tag) {
+      case Mrh_Imm:
+         return;
+      case Mrh_Reg:
+         op->Mrh.Reg.reg = lookupHRegRemap(m, op->Mrh.Reg.reg);
+         return;
+      default:
+         vpanic("mapRegs_MIPSRH");
+         break;
+   }
+}
+
+/* --------- Instructions. --------- */
+
+const HChar *showMIPSUnaryOp(MIPSUnaryOp op)
+{
+   const HChar* ret;
+   switch (op) {
+      case Mun_CLO:
+         ret = "clo";
+         break;
+      case Mun_CLZ:
+         ret = "clz";
+         break;
+      case Mun_NOP:
+         ret = "nop";
+         break;
+      case Mun_DCLO:
+         ret = "dclo";
+         break;
+      case Mun_DCLZ:
+         ret = "dclz";
+         break;
+      default:
+         vpanic("showMIPSUnaryOp");
+         break;
+   }
+   return ret;
+}
+
+const HChar *showMIPSAluOp(MIPSAluOp op, Bool immR)
+{
+   const HChar* ret;
+   switch (op) {
+      case Malu_ADD:
+         ret = immR ? "addiu" : "addu";
+         break;
+      case Malu_SUB:
+         ret = "subu";
+         break;
+      case Malu_AND:
+         ret = immR ? "andi" : "and";
+         break;
+      case Malu_OR:
+         ret = immR ? "ori" : "or";
+         break;
+      case Malu_NOR:
+         vassert(immR == False); /*there's no nor with an immediate operand!? */
+         ret = "nor";
+         break;
+      case Malu_XOR:
+         ret = immR ? "xori" : "xor";
+         break;
+      case Malu_DADD:
+         ret = immR ? "daddi" : "dadd";
+         break;
+      case Malu_DSUB:
+         ret = immR ? "dsubi" : "dsub";
+         break;
+      case Malu_SLT:
+         ret = immR ? "slti" : "slt";
+         break;
+      default:
+         vpanic("showMIPSAluOp");
+         break;
+   }
+   return ret;
+}
+
+const HChar *showMIPSShftOp(MIPSShftOp op, Bool immR, Bool sz32)
+{
+   const HChar *ret;
+   switch (op) {
+      case Mshft_SRA:
+         ret = immR ? (sz32 ? "sra" : "dsra") : (sz32 ? "srav" : "dsrav");
+         break;
+      case Mshft_SLL:
+         ret = immR ? (sz32 ? "sll" : "dsll") : (sz32 ? "sllv" : "dsllv");
+         break;
+      case Mshft_SRL:
+         ret = immR ? (sz32 ? "srl" : "dsrl") : (sz32 ? "srlv" : "dsrlv");
+         break;
+      default:
+         vpanic("showMIPSShftOp");
+         break;
+   }
+   return ret;
+}
+
+const HChar *showMIPSMaccOp(MIPSMaccOp op, Bool variable)
+{
+   const HChar *ret;
+   switch (op) {
+      case Macc_ADD:
+         ret = variable ? "madd" : "maddu";
+         break;
+      case Macc_SUB:
+         ret = variable ? "msub" : "msubu";
+         break;
+      default:
+         vpanic("showMIPSAccOp");
+         break;
+   }
+   return ret;
+}
+
+MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_LI;
+   i->Min.LI.dst = dst;
+   i->Min.LI.imm = imm;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Alu;
+   i->Min.Alu.op = op;
+   i->Min.Alu.dst = dst;
+   i->Min.Alu.srcL = srcL;
+   i->Min.Alu.srcR = srcR;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL,
+                          MIPSRH * srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Shft;
+   i->Min.Shft.op = op;
+   i->Min.Shft.sz32 = sz32;
+   i->Min.Shft.dst = dst;
+   i->Min.Shft.srcL = srcL;
+   i->Min.Shft.srcR = srcR;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Unary;
+   i->Min.Unary.op = op;
+   i->Min.Unary.dst = dst;
+   i->Min.Unary.src = src;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR,
+                         MIPSCondCode cond)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Cmp;
+   i->Min.Cmp.syned = syned;
+   i->Min.Cmp.sz32 = sz32;
+   i->Min.Cmp.dst = dst;
+   i->Min.Cmp.srcL = srcL;
+   i->Min.Cmp.srcR = srcR;
+   i->Min.Cmp.cond = cond;
+   return i;
+}
+
+/* multiply */
+MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL,
+                         HReg srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Mul;
+   i->Min.Mul.syned = syned;
+   i->Min.Mul.widening = wid; /* widen=True else False */
+   i->Min.Mul.sz32 = sz32; /* True = 32 bits */
+   i->Min.Mul.dst = dst;
+   i->Min.Mul.srcL = srcL;
+   i->Min.Mul.srcR = srcR;
+   return i;
+}
+
+/* msub */
+MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Macc;
+
+   i->Min.Macc.op = Macc_SUB;
+   i->Min.Macc.syned = syned;
+   i->Min.Macc.srcL = srcL;
+   i->Min.Macc.srcR = srcR;
+   return i;
+}
+
+/* madd */
+MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Macc;
+
+   i->Min.Macc.op = Macc_ADD;
+   i->Min.Macc.syned = syned;
+   i->Min.Macc.srcL = srcL;
+   i->Min.Macc.srcR = srcR;
+   return i;
+}
+
+/* div */
+MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Div;
+   i->Min.Div.syned = syned;
+   i->Min.Div.sz32 = sz32; /* True = 32 bits */
+   i->Min.Div.srcL = srcL;
+   i->Min.Div.srcR = srcR;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs,
+                            HReg src, RetLoc rloc )
+{
+   UInt mask;
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Call;
+   i->Min.Call.cond = cond;
+   i->Min.Call.target = target;
+   i->Min.Call.argiregs = argiregs;
+   i->Min.Call.src = src;
+   i->Min.Call.rloc = rloc;
+   /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
+   mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
+          | (1 << 10) | (1 << 11);
+   vassert(0 == (argiregs & ~mask));
+   vassert(is_sane_RetLoc(rloc));
+   return i;
+}
+
+MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target,
+                                  UInt argiregs, RetLoc rloc )
+{
+   UInt mask;
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Call;
+   i->Min.Call.cond = cond;
+   i->Min.Call.target = target;
+   i->Min.Call.argiregs = argiregs;
+   i->Min.Call.rloc = rloc;
+   /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
+   mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
+          | (1 << 10) | (1 << 11);
+   vassert(0 == (argiregs & ~mask));
+   vassert(is_sane_RetLoc(rloc));
+   return i;
+}
+
+MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC,
+                               MIPSCondCode cond, Bool toFastEP ) {
+   MIPSInstr* i               = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag                     = Min_XDirect;
+   i->Min.XDirect.dstGA       = dstGA;
+   i->Min.XDirect.amPC        = amPC;
+   i->Min.XDirect.cond        = cond;
+   i->Min.XDirect.toFastEP    = toFastEP;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC,
+                              MIPSCondCode cond ) {
+   MIPSInstr* i            = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag                  = Min_XIndir;
+   i->Min.XIndir.dstGA     = dstGA;
+   i->Min.XIndir.amPC      = amPC;
+   i->Min.XIndir.cond      = cond;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC,
+                                 MIPSCondCode cond, IRJumpKind jk ) {
+   MIPSInstr* i               = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag                     = Min_XAssisted;
+   i->Min.XAssisted.dstGA     = dstGA;
+   i->Min.XAssisted.amPC      = amPC;
+   i->Min.XAssisted.cond      = cond;
+   i->Min.XAssisted.jk        = jk;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Load;
+   i->Min.Load.sz = sz;
+   i->Min.Load.src = src;
+   i->Min.Load.dst = dst;
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+
+   if (sz == 8)
+      vassert(mode64);
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Store;
+   i->Min.Store.sz = sz;
+   i->Min.Store.src = src;
+   i->Min.Store.dst = dst;
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+
+   if (sz == 8)
+      vassert(mode64);
+   return i;
+}
+
+MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_LoadL;
+   i->Min.LoadL.sz  = sz;
+   i->Min.LoadL.src = src;
+   i->Min.LoadL.dst = dst;
+   vassert(sz == 4 || sz == 8);
+
+   if (sz == 8)
+      vassert(mode64);
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Cas(UChar sz, HReg old, HReg addr,
+                         HReg expd, HReg data, Bool mode64)
+{
+   MIPSInstr *i    = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag          = Min_Cas;
+   i->Min.Cas.sz   = sz;
+   i->Min.Cas.old  = old;
+   i->Min.Cas.addr = addr;
+   i->Min.Cas.expd = expd;
+   i->Min.Cas.data = data;
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+
+   if (sz == 8)
+      vassert(mode64);
+   return i;
+}
+
+MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_StoreC;
+   i->Min.StoreC.sz  = sz;
+   i->Min.StoreC.src = src;
+   i->Min.StoreC.dst = dst;
+   vassert(sz == 4 || sz == 8);
+
+   if (sz == 8)
+      vassert(mode64);
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Mthi(HReg src)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Mthi;
+   i->Min.MtHL.src = src;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Mtlo(HReg src)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Mtlo;
+   i->Min.MtHL.src = src;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Mfhi(HReg dst)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Mfhi;
+   i->Min.MfHL.dst = dst;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_Mflo(HReg dst)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_Mflo;
+   i->Min.MfHL.dst = dst;
+   return i;
+}
+
+/* Read/Write Link Register */
+MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_RdWrLR;
+   i->Min.RdWrLR.wrLR = wrLR;
+   i->Min.RdWrLR.gpr = gpr;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_FpLdSt;
+   i->Min.FpLdSt.isLoad = isLoad;
+   i->Min.FpLdSt.sz = sz;
+   i->Min.FpLdSt.reg = reg;
+   i->Min.FpLdSt.addr = addr;
+   vassert(sz == 4 || sz == 8);
+   return i;
+}
+
+MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_FpUnary;
+   i->Min.FpUnary.op = op;
+   i->Min.FpUnary.dst = dst;
+   i->Min.FpUnary.src = src;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_FpBinary;
+   i->Min.FpBinary.op = op;
+   i->Min.FpBinary.dst = dst;
+   i->Min.FpBinary.srcL = srcL;
+   i->Min.FpBinary.srcR = srcR;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2,
+                                 HReg src3 )
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_FpTernary;
+   i->Min.FpTernary.op = op;
+   i->Min.FpTernary.dst = dst;
+   i->Min.FpTernary.src1 = src1;
+   i->Min.FpTernary.src2 = src2;
+   i->Min.FpTernary.src3 = src3;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_FpConvert;
+   i->Min.FpConvert.op = op;
+   i->Min.FpConvert.dst = dst;
+   i->Min.FpConvert.src = src;
+   return i;
+
+}
+
+MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_FpCompare;
+   i->Min.FpCompare.op = op;
+   i->Min.FpCompare.dst = dst;
+   i->Min.FpCompare.srcL = srcL;
+   i->Min.FpCompare.srcR = srcR;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_MtFCSR(HReg src)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_MtFCSR;
+   i->Min.MtFCSR.src = src;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_MfFCSR(HReg dst)
+{
+   MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag = Min_MfFCSR;
+   i->Min.MfFCSR.dst = dst;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src )
+{
+   MIPSInstr *i        = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag              = Min_FpGpMove;
+   i->Min.FpGpMove.op  = op;
+   i->Min.FpGpMove.dst = dst;
+   i->Min.FpGpMove.src = src;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src,
+                                HReg cond )
+{
+   MIPSInstr *i        = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag              = Min_MoveCond;
+   i->Min.MoveCond.op  = op;
+   i->Min.MoveCond.dst = dst;
+   i->Min.MoveCond.src = src;
+   i->Min.MoveCond.cond = cond;
+   return i;
+}
+
+MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter,
+                            MIPSAMode* amFailAddr ) {
+   MIPSInstr* i                 = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag                       = Min_EvCheck;
+   i->Min.EvCheck.amCounter     = amCounter;
+   i->Min.EvCheck.amFailAddr    = amFailAddr;
+   return i;
+}
+
+MIPSInstr* MIPSInstr_ProfInc ( void ) {
+   MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
+   i->tag       = Min_ProfInc;
+   return i;
+}
+
+/* -------- Pretty Print instructions ------------- */
+static void ppLoadImm(HReg dst, ULong imm, Bool mode64)
+{
+   vex_printf("li ");
+   ppHRegMIPS(dst, mode64);
+   vex_printf(",0x%016llx", imm);
+}
+
+void ppMIPSInstr(const MIPSInstr * i, Bool mode64)
+{
+   switch (i->tag) {
+      case Min_LI:
+         ppLoadImm(i->Min.LI.dst, i->Min.LI.imm, mode64);
+         break;
+      case Min_Alu: {
+         HReg r_srcL = i->Min.Alu.srcL;
+         MIPSRH *rh_srcR = i->Min.Alu.srcR;
+         /* generic */
+         vex_printf("%s ", showMIPSAluOp(i->Min.Alu.op,
+                                         toBool(rh_srcR->tag == Mrh_Imm)));
+         ppHRegMIPS(i->Min.Alu.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(r_srcL, mode64);
+         vex_printf(",");
+         ppMIPSRH(rh_srcR, mode64);
+         return;
+      }
+      case Min_Shft: {
+         HReg r_srcL = i->Min.Shft.srcL;
+         MIPSRH *rh_srcR = i->Min.Shft.srcR;
+         vex_printf("%s ", showMIPSShftOp(i->Min.Shft.op,
+                                          toBool(rh_srcR->tag == Mrh_Imm),
+                                          i->Min.Shft.sz32));
+         ppHRegMIPS(i->Min.Shft.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(r_srcL, mode64);
+         vex_printf(",");
+         ppMIPSRH(rh_srcR, mode64);
+         return;
+      }
+      case Min_Unary: {
+         vex_printf("%s ", showMIPSUnaryOp(i->Min.Unary.op));
+         ppHRegMIPS(i->Min.Unary.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.Unary.src, mode64);
+         return;
+      }
+      case Min_Cmp: {
+         vex_printf("word_compare ");
+         ppHRegMIPS(i->Min.Cmp.dst, mode64);
+         vex_printf(" = %s ( ", showMIPSCondCode(i->Min.Cmp.cond));
+         ppHRegMIPS(i->Min.Cmp.srcL, mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.Cmp.srcR, mode64);
+         vex_printf(" )");
+
+         return;
+      }
+      case Min_Mul: {
+         switch (i->Min.Mul.widening) {
+            case False:
+               vex_printf("mul ");
+               ppHRegMIPS(i->Min.Mul.dst, mode64);
+               vex_printf(", ");
+               ppHRegMIPS(i->Min.Mul.srcL, mode64);
+               vex_printf(", ");
+               ppHRegMIPS(i->Min.Mul.srcR, mode64);
+               return;
+            case True:
+               vex_printf("%s%s ", i->Min.Mul.sz32 ? "mult" : "dmult",
+                                   i->Min.Mul.syned ? "" : "u");
+               ppHRegMIPS(i->Min.Mul.dst, mode64);
+               vex_printf(", ");
+               ppHRegMIPS(i->Min.Mul.srcL, mode64);
+               vex_printf(", ");
+               ppHRegMIPS(i->Min.Mul.srcR, mode64);
+               return;
+            }
+         break;
+      }
+      case Min_Mthi: {
+         vex_printf("mthi ");
+         ppHRegMIPS(i->Min.MtHL.src, mode64);
+         return;
+      }
+      case Min_Mtlo: {
+         vex_printf("mtlo ");
+         ppHRegMIPS(i->Min.MtHL.src, mode64);
+         return;
+      }
+      case Min_Mfhi: {
+         vex_printf("mfhi ");
+         ppHRegMIPS(i->Min.MfHL.dst, mode64);
+         return;
+      }
+      case Min_Mflo: {
+         vex_printf("mflo ");
+         ppHRegMIPS(i->Min.MfHL.dst, mode64);
+         return;
+      }
+      case Min_Macc: {
+         vex_printf("%s ", showMIPSMaccOp(i->Min.Macc.op, i->Min.Macc.syned));
+         ppHRegMIPS(i->Min.Macc.srcL, mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.Macc.srcR, mode64);
+         return;
+      }
+      case Min_Div: {
+         if (!i->Min.Div.sz32)
+            vex_printf("d");
+         vex_printf("div");
+         vex_printf("%s ", i->Min.Div.syned ? "s" : "u");
+         ppHRegMIPS(i->Min.Div.srcL, mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.Div.srcR, mode64);
+         return;
+      }
+      case Min_Call: {
+         Int n;
+         vex_printf("call: ");
+         if (i->Min.Call.cond != MIPScc_AL) {
+            vex_printf("if (%s) ", showMIPSCondCode(i->Min.Call.cond));
+         }
+         vex_printf(" {");
+         if (!mode64)
+            vex_printf(" addiu $29, $29, -16");
+
+         ppLoadImm(hregMIPS_GPR25(mode64), i->Min.Call.target, mode64);
+
+         vex_printf(" ; jarl $31, $25; # args [");
+         for (n = 0; n < 32; n++) {
+            if (i->Min.Call.argiregs & (1 << n)) {
+               vex_printf("$%d", n);
+               if ((i->Min.Call.argiregs >> n) > 1)
+                  vex_printf(",");
+            }
+         }
+         vex_printf("] nop; ");
+         if (!mode64)
+            vex_printf("addiu $29, $29, 16; ]");
+
+         break;
+      }
+      case Min_XDirect:
+         vex_printf("(xDirect) ");
+         vex_printf("if (guest_COND.%s) { ",
+                    showMIPSCondCode(i->Min.XDirect.cond));
+         vex_printf("move $9, 0x%x,", (UInt)i->Min.XDirect.dstGA);
+         vex_printf("; sw $9, ");
+         ppMIPSAMode(i->Min.XDirect.amPC, mode64);
+         vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}",
+                    i->Min.XDirect.toFastEP ? "fast" : "slow");
+         return;
+      case Min_XIndir:
+         vex_printf("(xIndir) ");
+         vex_printf("if (guest_COND.%s) { sw ",
+                    showMIPSCondCode(i->Min.XIndir.cond));
+         ppHRegMIPS(i->Min.XIndir.dstGA, mode64);
+         vex_printf(", ");
+         ppMIPSAMode(i->Min.XIndir.amPC, mode64);
+         vex_printf("; move $9, $disp_indir; jalr $9; nop}");
+         return;
+      case Min_XAssisted:
+         vex_printf("(xAssisted) ");
+         vex_printf("if (guest_COND.%s) { ",
+                    showMIPSCondCode(i->Min.XAssisted.cond));
+         vex_printf("sw ");
+         ppHRegMIPS(i->Min.XAssisted.dstGA, mode64);
+         vex_printf(", ");
+         ppMIPSAMode(i->Min.XAssisted.amPC, mode64);
+         vex_printf("; move $9, $IRJumpKind_to_TRCVAL(%d)",
+                    (Int)i->Min.XAssisted.jk);
+         vex_printf("; move $9, $disp_assisted; jalr $9; nop; }");
+         return;
+      case Min_Load: {
+         Bool idxd = toBool(i->Min.Load.src->tag == Mam_RR);
+         UChar sz = i->Min.Load.sz;
+         HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
+         vex_printf("l%c%s ", c_sz, idxd ? "x" : "");
+         ppHRegMIPS(i->Min.Load.dst, mode64);
+         vex_printf(",");
+         ppMIPSAMode(i->Min.Load.src, mode64);
+         return;
+      }
+      case Min_Store: {
+         UChar sz = i->Min.Store.sz;
+         Bool idxd = toBool(i->Min.Store.dst->tag == Mam_RR);
+         HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
+         vex_printf("s%c%s ", c_sz, idxd ? "x" : "");
+         ppHRegMIPS(i->Min.Store.src, mode64);
+         vex_printf(",");
+         ppMIPSAMode(i->Min.Store.dst, mode64);
+         return;
+      }
+      case Min_LoadL: {
+         vex_printf("ll ");
+         ppHRegMIPS(i->Min.LoadL.dst, mode64);
+         vex_printf(",");
+         ppMIPSAMode(i->Min.LoadL.src, mode64);
+         return;
+      }
+      case Min_Cas: {
+          Bool sz8  = toBool(i->Min.Cas.sz == 8);
+          /*
+           * ll(d)    old,  0(addr)
+           * bne      old,  expd, end
+           * nop
+           * (d)addiu old,  old,  1
+           * sc(d)    data, 0(addr)
+           * movn     old,  expd, data
+           * end:
+           */
+          // ll(d) old, 0(addr)
+         vex_printf("cas: ");
+
+         vex_printf("%s ", sz8 ? "lld" : "ll");
+         ppHRegMIPS(i->Min.Cas.old , mode64);
+         vex_printf(", 0(");
+         ppHRegMIPS(i->Min.Cas.addr , mode64);
+         vex_printf(")\n");
+
+         vex_printf("bne ");
+         ppHRegMIPS(i->Min.Cas.old , mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.Cas.expd , mode64);
+         vex_printf(", end\n");
+
+         vex_printf("nop\n");
+
+         vex_printf("%s ", sz8 ? "daddiu" : "addiu");
+         ppHRegMIPS(i->Min.Cas.old , mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.Cas.old , mode64);
+         vex_printf(", 1\n");
+
+         vex_printf("%s ", sz8 ? "scd" : "sc");
+         ppHRegMIPS(i->Min.Cas.data , mode64);
+         vex_printf(", 0(");
+         ppHRegMIPS(i->Min.Cas.addr , mode64);
+         vex_printf(")\n");
+
+         vex_printf("movn ");
+         ppHRegMIPS(i->Min.Cas.old , mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.Cas.expd , mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.Cas.data , mode64);
+         vex_printf("\nend:");
+         return;
+      }
+      case Min_StoreC: {
+         vex_printf("sc ");
+         ppHRegMIPS(i->Min.StoreC.src, mode64);
+         vex_printf(",");
+         ppMIPSAMode(i->Min.StoreC.dst, mode64);
+         return;
+      }
+      case Min_RdWrLR: {
+         vex_printf("%s ", i->Min.RdWrLR.wrLR ? "mtlr" : "mflr");
+         ppHRegMIPS(i->Min.RdWrLR.gpr, mode64);
+         return;
+      }
+      case Min_FpUnary:
+         vex_printf("%s ", showMIPSFpOp(i->Min.FpUnary.op));
+         ppHRegMIPS(i->Min.FpUnary.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpUnary.src, mode64);
+         return;
+      case Min_FpBinary:
+         vex_printf("%s", showMIPSFpOp(i->Min.FpBinary.op));
+         ppHRegMIPS(i->Min.FpBinary.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpBinary.srcL, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpBinary.srcR, mode64);
+         return;
+      case Min_FpTernary:
+         vex_printf("%s", showMIPSFpOp(i->Min.FpTernary.op));
+         ppHRegMIPS(i->Min.FpTernary.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpTernary.src1, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpTernary.src2, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpTernary.src3, mode64);
+         return;
+      case Min_FpConvert:
+         vex_printf("%s", showMIPSFpOp(i->Min.FpConvert.op));
+         ppHRegMIPS(i->Min.FpConvert.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpConvert.src, mode64);
+         return;
+      case Min_FpCompare:
+         vex_printf("%s ", showMIPSFpOp(i->Min.FpCompare.op));
+         ppHRegMIPS(i->Min.FpCompare.srcL, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpCompare.srcR, mode64);
+         return;
+      case Min_FpMulAcc:
+         vex_printf("%s ", showMIPSFpOp(i->Min.FpMulAcc.op));
+         ppHRegMIPS(i->Min.FpMulAcc.dst, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpMulAcc.srcML, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpMulAcc.srcMR, mode64);
+         vex_printf(",");
+         ppHRegMIPS(i->Min.FpMulAcc.srcAcc, mode64);
+         return;
+      case Min_FpLdSt: {
+         if (i->Min.FpLdSt.sz == 4) {
+            if (i->Min.FpLdSt.isLoad) {
+               vex_printf("lwc1 ");
+               ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
+               vex_printf(",");
+               ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
+            } else {
+               vex_printf("swc1 ");
+               ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
+               vex_printf(",");
+               ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
+            }
+         } else if (i->Min.FpLdSt.sz == 8) {
+            if (i->Min.FpLdSt.isLoad) {
+               vex_printf("ldc1 ");
+               ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
+               vex_printf(",");
+               ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
+            } else {
+               vex_printf("sdc1 ");
+               ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
+               vex_printf(",");
+               ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
+            }
+         }
+         return;
+      }
+      case Min_MtFCSR: {
+         vex_printf("ctc1 ");
+         ppHRegMIPS(i->Min.MtFCSR.src, mode64);
+         vex_printf(", $31");
+         return;
+      }
+      case Min_MfFCSR: {
+         vex_printf("ctc1 ");
+         ppHRegMIPS(i->Min.MfFCSR.dst, mode64);
+         vex_printf(", $31");
+         return;
+      }
+      case Min_FpGpMove: {
+         vex_printf("%s ", showMIPSFpGpMoveOp(i->Min.FpGpMove.op));
+         ppHRegMIPS(i->Min.FpGpMove.dst, mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.FpGpMove.src, mode64);
+         return;
+      }
+      case Min_MoveCond: {
+         vex_printf("%s", showMIPSMoveCondOp(i->Min.MoveCond.op));
+         ppHRegMIPS(i->Min.MoveCond.dst, mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.MoveCond.src, mode64);
+         vex_printf(", ");
+         ppHRegMIPS(i->Min.MoveCond.cond, mode64);
+         return;
+      }
+      case Min_EvCheck:
+         vex_printf("(evCheck) lw $9, ");
+         ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
+         vex_printf("; addiu $9, $9, -1");
+         vex_printf("; sw $9, ");
+         ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
+         vex_printf("; bgez $t9, nofail; jalr *");
+         ppMIPSAMode(i->Min.EvCheck.amFailAddr, mode64);
+         vex_printf("; nofail:");
+         return;
+      case Min_ProfInc:
+         if (mode64)
+            vex_printf("(profInc) move $9, ($NotKnownYet); "
+                       "ld $8, 0($9); "
+                       "daddiu $8, $8, 1; "
+                       "sd $8, 0($9); " );
+         else
+            vex_printf("(profInc) move $9, ($NotKnownYet); "
+                       "lw $8, 0($9); "
+                       "addiu $8, $8, 1; "
+                       "sw $8, 0($9); "
+                       "sltiu $1, $8, 1; "
+                       "lw $8, 4($9); "
+                       "addu $8, $8, $1; "
+                       "sw $8, 4($9); " );
+         return;
+      default:
+         vpanic("ppMIPSInstr");
+         break;
+   }
+}
+
+/* --------- Helpers for register allocation. --------- */
+
+void getRegUsage_MIPSInstr(HRegUsage * u, const MIPSInstr * i, Bool mode64)
+{
+   initHRegUsage(u);
+   switch (i->tag) {
+      case Min_LI:
+         addHRegUse(u, HRmWrite, i->Min.LI.dst);
+         break;
+      case Min_Alu:
+         addHRegUse(u, HRmRead, i->Min.Alu.srcL);
+         addRegUsage_MIPSRH(u, i->Min.Alu.srcR);
+         addHRegUse(u, HRmWrite, i->Min.Alu.dst);
+         return;
+      case Min_Shft:
+         addHRegUse(u, HRmRead, i->Min.Shft.srcL);
+         addRegUsage_MIPSRH(u, i->Min.Shft.srcR);
+         addHRegUse(u, HRmWrite, i->Min.Shft.dst);
+         return;
+      case Min_Cmp:
+         addHRegUse(u, HRmRead, i->Min.Cmp.srcL);
+         addHRegUse(u, HRmRead, i->Min.Cmp.srcR);
+         addHRegUse(u, HRmWrite, i->Min.Cmp.dst);
+         return;
+      case Min_Unary:
+         addHRegUse(u, HRmRead, i->Min.Unary.src);
+         addHRegUse(u, HRmWrite, i->Min.Unary.dst);
+         return;
+      case Min_Mul:
+         addHRegUse(u, HRmWrite, i->Min.Mul.dst);
+         addHRegUse(u, HRmRead, i->Min.Mul.srcL);
+         addHRegUse(u, HRmRead, i->Min.Mul.srcR);
+         return;
+      case Min_Mthi:
+      case Min_Mtlo:
+         addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
+         addHRegUse(u, HRmRead, i->Min.MtHL.src);
+         return;
+      case Min_Mfhi:
+      case Min_Mflo:
+         addHRegUse(u, HRmRead, hregMIPS_HI(mode64));
+         addHRegUse(u, HRmRead, hregMIPS_LO(mode64));
+         addHRegUse(u, HRmWrite, i->Min.MfHL.dst);
+         return;
+      case Min_MtFCSR:
+         addHRegUse(u, HRmRead, i->Min.MtFCSR.src);
+         return;
+      case Min_MfFCSR:
+         addHRegUse(u, HRmWrite, i->Min.MfFCSR.dst);
+         return;
+      case Min_Macc:
+         addHRegUse(u, HRmModify, hregMIPS_HI(mode64));
+         addHRegUse(u, HRmModify, hregMIPS_LO(mode64));
+         addHRegUse(u, HRmRead, i->Min.Macc.srcL);
+         addHRegUse(u, HRmRead, i->Min.Macc.srcR);
+         return;
+      case Min_Div:
+         addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
+         addHRegUse(u, HRmRead, i->Min.Div.srcL);
+         addHRegUse(u, HRmRead, i->Min.Div.srcR);
+         return;
+      case Min_Call: {
+         /* Logic and comments copied/modified from x86, ppc and arm back end.
+            First off, claim it trashes all the caller-saved regs
+            which fall within the register allocator's jurisdiction. */
+         if (i->Min.Call.cond != MIPScc_AL)
+            addHRegUse(u, HRmRead, i->Min.Call.src);
+         UInt argir;
+         addHRegUse(u, HRmWrite, hregMIPS_GPR1(mode64));
+
+         addHRegUse(u, HRmWrite, hregMIPS_GPR2(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR3(mode64));
+
+         addHRegUse(u, HRmWrite, hregMIPS_GPR4(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR5(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR6(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR7(mode64));
+
+         addHRegUse(u, HRmWrite, hregMIPS_GPR8(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR9(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR10(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR11(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR12(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR13(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR14(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR15(mode64));
+
+         addHRegUse(u, HRmWrite, hregMIPS_GPR24(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR25(mode64));
+         addHRegUse(u, HRmWrite, hregMIPS_GPR31(mode64));
+
+         /* Now we have to state any parameter-carrying registers
+            which might be read. This depends on the argiregs field. */
+         argir = i->Min.Call.argiregs;
+         if (argir & (1<<11)) addHRegUse(u, HRmRead, hregMIPS_GPR11(mode64));
+         if (argir & (1<<10)) addHRegUse(u, HRmRead, hregMIPS_GPR10(mode64));
+         if (argir & (1<<9)) addHRegUse(u, HRmRead, hregMIPS_GPR9(mode64));
+         if (argir & (1<<8)) addHRegUse(u, HRmRead, hregMIPS_GPR8(mode64));
+         if (argir & (1<<7)) addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64));
+         if (argir & (1<<6)) addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64));
+         if (argir & (1<<5)) addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64));
+         if (argir & (1<<4)) addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64));
+
+         vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6)
+                                 | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10)
+                                 | (1 << 11))));
+
+         return;
+      }
+      /* XDirect/XIndir/XAssisted are also a bit subtle.  They
+         conditionally exit the block.  Hence we only need to list (1)
+         the registers that they read, and (2) the registers that they
+         write in the case where the block is not exited.  (2) is
+         empty, hence only (1) is relevant here. */
+      case Min_XDirect:
+         addRegUsage_MIPSAMode(u, i->Min.XDirect.amPC);
+         return;
+      case Min_XIndir:
+         addHRegUse(u, HRmRead, i->Min.XIndir.dstGA);
+         addRegUsage_MIPSAMode(u, i->Min.XIndir.amPC);
+         return;
+      case Min_XAssisted:
+         addHRegUse(u, HRmRead, i->Min.XAssisted.dstGA);
+         addRegUsage_MIPSAMode(u, i->Min.XAssisted.amPC);
+         return;
+      case Min_Load:
+         addRegUsage_MIPSAMode(u, i->Min.Load.src);
+         addHRegUse(u, HRmWrite, i->Min.Load.dst);
+         return;
+      case Min_Store:
+         addHRegUse(u, HRmRead, i->Min.Store.src);
+         addRegUsage_MIPSAMode(u, i->Min.Store.dst);
+         return;
+      case Min_LoadL:
+         addRegUsage_MIPSAMode(u, i->Min.LoadL.src);
+         addHRegUse(u, HRmWrite, i->Min.LoadL.dst);
+         return;
+      case Min_Cas:
+         addHRegUse(u, HRmWrite, i->Min.Cas.old);
+         addHRegUse(u, HRmRead, i->Min.Cas.addr);
+         addHRegUse(u, HRmRead, i->Min.Cas.expd);
+         addHRegUse(u, HRmModify, i->Min.Cas.data);
+         return;
+      case Min_StoreC:
+         addHRegUse(u, HRmWrite, i->Min.StoreC.src);
+         addHRegUse(u, HRmRead, i->Min.StoreC.src);
+         addRegUsage_MIPSAMode(u, i->Min.StoreC.dst);
+         return;
+      case Min_RdWrLR:
+         addHRegUse(u, (i->Min.RdWrLR.wrLR ? HRmRead : HRmWrite),
+                        i->Min.RdWrLR.gpr);
+         return;
+      case Min_FpLdSt:
+         if (i->Min.FpLdSt.sz == 4) {
+            addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
+                           i->Min.FpLdSt.reg);
+            addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
+            return;
+         } else if (i->Min.FpLdSt.sz == 8) {
+            addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
+                           i->Min.FpLdSt.reg);
+            addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
+            return;
+         }
+         break;
+      case Min_FpUnary:
+         addHRegUse(u, HRmWrite, i->Min.FpUnary.dst);
+         addHRegUse(u, HRmRead, i->Min.FpUnary.src);
+         return;
+      case Min_FpBinary:
+         addHRegUse(u, HRmWrite, i->Min.FpBinary.dst);
+         addHRegUse(u, HRmRead, i->Min.FpBinary.srcL);
+         addHRegUse(u, HRmRead, i->Min.FpBinary.srcR);
+         return;
+      case Min_FpTernary:
+         addHRegUse(u, HRmWrite, i->Min.FpTernary.dst);
+         addHRegUse(u, HRmRead, i->Min.FpTernary.src1);
+         addHRegUse(u, HRmRead, i->Min.FpTernary.src2);
+         addHRegUse(u, HRmRead, i->Min.FpTernary.src3);
+         return;
+      case Min_FpConvert:
+         addHRegUse(u, HRmWrite, i->Min.FpConvert.dst);
+         addHRegUse(u, HRmRead, i->Min.FpConvert.src);
+         return;
+      case Min_FpCompare:
+         addHRegUse(u, HRmWrite, i->Min.FpCompare.dst);
+         addHRegUse(u, HRmRead, i->Min.FpCompare.srcL);
+         addHRegUse(u, HRmRead, i->Min.FpCompare.srcR);
+         return;
+      case Min_FpGpMove:
+         addHRegUse(u, HRmWrite, i->Min.FpGpMove.dst);
+         addHRegUse(u, HRmRead, i->Min.FpGpMove.src);
+         return;
+      case Min_MoveCond:
+         addHRegUse(u, HRmModify, i->Min.MoveCond.dst);
+         addHRegUse(u, HRmRead, i->Min.MoveCond.src);
+         addHRegUse(u, HRmRead, i->Min.MoveCond.cond);
+         return;
+      case Min_EvCheck:
+         /* We expect both amodes only to mention %ebp, so this is in
+            fact pointless, since %ebp isn't allocatable, but anyway.. */
+         addRegUsage_MIPSAMode(u, i->Min.EvCheck.amCounter);
+         addRegUsage_MIPSAMode(u, i->Min.EvCheck.amFailAddr);
+         return;
+      case Min_ProfInc:
+         /* does not use any registers. */
+         return;
+      default:
+         ppMIPSInstr(i, mode64);
+         vpanic("getRegUsage_MIPSInstr");
+         break;
+   }
+}
+
+/* local helper */
+static void mapReg(HRegRemap * m, HReg * r)
+{
+   *r = lookupHRegRemap(m, *r);
+}
+
+void mapRegs_MIPSInstr(HRegRemap * m, MIPSInstr * i, Bool mode64)
+{
+   switch (i->tag) {
+      case Min_LI:
+         mapReg(m, &i->Min.LI.dst);
+         break;
+      case Min_Alu:
+         mapReg(m, &i->Min.Alu.srcL);
+         mapRegs_MIPSRH(m, i->Min.Alu.srcR);
+         mapReg(m, &i->Min.Alu.dst);
+         return;
+      case Min_Shft:
+         mapReg(m, &i->Min.Shft.srcL);
+         mapRegs_MIPSRH(m, i->Min.Shft.srcR);
+         mapReg(m, &i->Min.Shft.dst);
+         return;
+      case Min_Cmp:
+         mapReg(m, &i->Min.Cmp.srcL);
+         mapReg(m, &i->Min.Cmp.srcR);
+         mapReg(m, &i->Min.Cmp.dst);
+         return;
+      case Min_Unary:
+         mapReg(m, &i->Min.Unary.src);
+         mapReg(m, &i->Min.Unary.dst);
+         return;
+      case Min_Mul:
+         mapReg(m, &i->Min.Mul.dst);
+         mapReg(m, &i->Min.Mul.srcL);
+         mapReg(m, &i->Min.Mul.srcR);
+         return;
+      case Min_Mthi:
+      case Min_Mtlo:
+         mapReg(m, &i->Min.MtHL.src);
+         return;
+      case Min_Mfhi:
+      case Min_Mflo:
+         mapReg(m, &i->Min.MfHL.dst);
+         return;
+      case Min_Macc:
+         mapReg(m, &i->Min.Macc.srcL);
+         mapReg(m, &i->Min.Macc.srcR);
+         return;
+      case Min_Div:
+         mapReg(m, &i->Min.Div.srcL);
+         mapReg(m, &i->Min.Div.srcR);
+         return;
+      case Min_Call:
+         {
+            if (i->Min.Call.cond != MIPScc_AL)
+               mapReg(m, &i->Min.Call.src);
+            return;
+         }
+      case Min_XDirect:
+         mapRegs_MIPSAMode(m, i->Min.XDirect.amPC);
+         return;
+      case Min_XIndir:
+         mapReg(m, &i->Min.XIndir.dstGA);
+         mapRegs_MIPSAMode(m, i->Min.XIndir.amPC);
+         return;
+      case Min_XAssisted:
+         mapReg(m, &i->Min.XAssisted.dstGA);
+         mapRegs_MIPSAMode(m, i->Min.XAssisted.amPC);
+         return;
+      case Min_Load:
+         mapRegs_MIPSAMode(m, i->Min.Load.src);
+         mapReg(m, &i->Min.Load.dst);
+         return;
+      case Min_Store:
+         mapReg(m, &i->Min.Store.src);
+         mapRegs_MIPSAMode(m, i->Min.Store.dst);
+         return;
+      case Min_LoadL:
+         mapRegs_MIPSAMode(m, i->Min.LoadL.src);
+         mapReg(m, &i->Min.LoadL.dst);
+         return;
+      case Min_Cas:
+         mapReg(m, &i->Min.Cas.old);
+         mapReg(m, &i->Min.Cas.addr);
+         mapReg(m, &i->Min.Cas.expd);
+         mapReg(m, &i->Min.Cas.data);
+         return;
+      case Min_StoreC:
+         mapReg(m, &i->Min.StoreC.src);
+         mapRegs_MIPSAMode(m, i->Min.StoreC.dst);
+         return;
+      case Min_RdWrLR:
+         mapReg(m, &i->Min.RdWrLR.gpr);
+         return;
+      case Min_FpLdSt:
+         if (i->Min.FpLdSt.sz == 4) {
+            mapReg(m, &i->Min.FpLdSt.reg);
+            mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
+            return;
+         } else if (i->Min.FpLdSt.sz == 8) {
+            mapReg(m, &i->Min.FpLdSt.reg);
+            mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
+            return;
+         }
+         break;
+      case Min_FpUnary:
+         mapReg(m, &i->Min.FpUnary.dst);
+         mapReg(m, &i->Min.FpUnary.src);
+         return;
+      case Min_FpBinary:
+         mapReg(m, &i->Min.FpBinary.dst);
+         mapReg(m, &i->Min.FpBinary.srcL);
+         mapReg(m, &i->Min.FpBinary.srcR);
+         return;
+      case Min_FpTernary:
+         mapReg(m, &i->Min.FpTernary.dst);
+         mapReg(m, &i->Min.FpTernary.src1);
+         mapReg(m, &i->Min.FpTernary.src2);
+         mapReg(m, &i->Min.FpTernary.src3);
+         return;
+      case Min_FpConvert:
+         mapReg(m, &i->Min.FpConvert.dst);
+         mapReg(m, &i->Min.FpConvert.src);
+         return;
+      case Min_FpCompare:
+         mapReg(m, &i->Min.FpCompare.dst);
+         mapReg(m, &i->Min.FpCompare.srcL);
+         mapReg(m, &i->Min.FpCompare.srcR);
+         return;
+      case Min_MtFCSR:
+         mapReg(m, &i->Min.MtFCSR.src);
+         return;
+      case Min_MfFCSR:
+         mapReg(m, &i->Min.MfFCSR.dst);
+         return;
+      case Min_FpGpMove:
+         mapReg(m, &i->Min.FpGpMove.dst);
+         mapReg(m, &i->Min.FpGpMove.src);
+         return;
+      case Min_MoveCond:
+         mapReg(m, &i->Min.MoveCond.dst);
+         mapReg(m, &i->Min.MoveCond.src);
+         mapReg(m, &i->Min.MoveCond.cond);
+         return;
+      case Min_EvCheck:
+         /* We expect both amodes only to mention %ebp, so this is in
+            fact pointless, since %ebp isn't allocatable, but anyway.. */
+         mapRegs_MIPSAMode(m, i->Min.EvCheck.amCounter);
+         mapRegs_MIPSAMode(m, i->Min.EvCheck.amFailAddr);
+         return;
+      case Min_ProfInc:
+         /* does not use any registers. */
+         return;
+      default:
+         ppMIPSInstr(i, mode64);
+         vpanic("mapRegs_MIPSInstr");
+         break;
+   }
+
+}
+
+/* Figure out if i represents a reg-reg move, and if so assign the
+   source and destination to *src and *dst.  If in doubt say No.  Used
+   by the register allocator to do move coalescing.
+*/
+Bool isMove_MIPSInstr(const MIPSInstr * i, HReg * src, HReg * dst)
+{
+   /* Moves between integer regs */
+   if (i->tag == Min_Alu) {
+      /* or Rd,Rs,Rs == mr Rd,Rs */
+      if (i->Min.Alu.op != Malu_OR)
+         return False;
+      if (i->Min.Alu.srcR->tag != Mrh_Reg)
+         return False;
+      if (!sameHReg(i->Min.Alu.srcR->Mrh.Reg.reg, i->Min.Alu.srcL))
+         return False;
+      *src = i->Min.Alu.srcL;
+      *dst = i->Min.Alu.dst;
+      return True;
+   }
+   return False;
+}
+
+/* Generate mips spill/reload instructions under the direction of the
+   register allocator. */
+void genSpill_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
+                    Int offsetB, Bool mode64)
+{
+   MIPSAMode *am;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   *i1 = *i2 = NULL;
+   am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
+
+   switch (hregClass(rreg)) {
+      case HRcInt64:
+         vassert(mode64);
+         *i1 = MIPSInstr_Store(8, am, rreg, mode64);
+         break;
+      case HRcInt32:
+         vassert(!mode64);
+         *i1 = MIPSInstr_Store(4, am, rreg, mode64);
+         break;
+      case HRcFlt32:
+         vassert(!mode64);
+         *i1 = MIPSInstr_FpLdSt(False /*Store */ , 4, rreg, am);
+         break;
+      case HRcFlt64:
+         *i1 = MIPSInstr_FpLdSt(False /*Store */ , 8, rreg, am);
+         break;
+      default:
+         ppHRegClass(hregClass(rreg));
+         vpanic("genSpill_MIPS: unimplemented regclass");
+         break;
+   }
+}
+
+void genReload_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
+                     Int offsetB, Bool mode64)
+{
+   MIPSAMode *am;
+   vassert(!hregIsVirtual(rreg));
+   am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
+
+   switch (hregClass(rreg)) {
+      case HRcInt64:
+         vassert(mode64);
+         *i1 = MIPSInstr_Load(8, rreg, am, mode64);
+         break;
+      case HRcInt32:
+         vassert(!mode64);
+         *i1 = MIPSInstr_Load(4, rreg, am, mode64);
+         break;
+      case HRcFlt32:
+         if (mode64)
+            *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
+         else
+            *i1 = MIPSInstr_FpLdSt(True /*Load */ , 4, rreg, am);
+         break;
+      case HRcFlt64:
+         *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
+         break;
+      default:
+         ppHRegClass(hregClass(rreg));
+         vpanic("genReload_MIPS: unimplemented regclass");
+         break;
+   }
+}
+
+/* --------- The mips assembler --------- */
+
+inline static UInt iregNo(HReg r, Bool mode64)
+{
+   UInt n;
+   vassert(hregClass(r) == (mode64 ? HRcInt64 : HRcInt32));
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 32);
+   return n;
+}
+
+inline static UInt fregNo(HReg r, Bool mode64)
+{
+   UInt n;
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 31);
+   return n;
+}
+
+inline static UInt dregNo(HReg r)
+{
+   UInt n;
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 31);
+   return n;
+}
+
+/* Emit 32bit instruction */
+static UChar *emit32(UChar * p, UInt w32)
+{
+#if defined (_MIPSEL)
+   *p++ = toUChar(w32 & 0x000000FF);
+   *p++ = toUChar((w32 >> 8) & 0x000000FF);
+   *p++ = toUChar((w32 >> 16) & 0x000000FF);
+   *p++ = toUChar((w32 >> 24) & 0x000000FF);
+/* HACK !!!!
+   MIPS endianess is decided at compile time using gcc defined
+   symbols _MIPSEL or _MIPSEB. When compiling libvex in a cross-arch
+   setup, then none of these is defined. We just choose here by default
+   mips Big Endian to allow libvexmultiarch_test to work when using
+   a mips host architecture.
+   A cleaner way would be to either have mips using 'dynamic endness'
+   (like ppc64be or le, decided at runtime) or at least defining
+   by default _MIPSEB when compiling on a non mips system.
+#elif defined (_MIPSEB).
+*/
+#else
+   *p++ = toUChar((w32 >> 24) & 0x000000FF);
+   *p++ = toUChar((w32 >> 16) & 0x000000FF);
+   *p++ = toUChar((w32 >> 8) & 0x000000FF);
+   *p++ = toUChar(w32 & 0x000000FF);
+#endif
+   return p;
+}
+/* Fetch an instruction */
+static UInt fetch32 ( UChar* p )
+{
+   UInt w32 = 0;
+#if defined (_MIPSEL)
+   w32 |= ((0xFF & (UInt)p[0]) << 0);
+   w32 |= ((0xFF & (UInt)p[1]) << 8);
+   w32 |= ((0xFF & (UInt)p[2]) << 16);
+   w32 |= ((0xFF & (UInt)p[3]) << 24);
+#elif defined (_MIPSEB)
+   w32 |= ((0xFF & (UInt)p[0]) << 24);
+   w32 |= ((0xFF & (UInt)p[1]) << 16);
+   w32 |= ((0xFF & (UInt)p[2]) <<  8);
+   w32 |= ((0xFF & (UInt)p[3]) <<  0);
+#endif
+   return w32;
+}
+
+/* physical structure of mips instructions */
+/* type I : opcode    - 6 bits
+         rs         - 5 bits
+         rt         - 5 bits
+         immediate - 16 bits
+*/
+static UChar *mkFormI(UChar * p, UInt opc, UInt rs, UInt rt, UInt imm)
+{
+   UInt theInstr;
+   vassert(opc < 0x40);
+   vassert(rs < 0x20);
+   vassert(rt < 0x20);
+   imm = imm & 0xFFFF;
+   theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (imm));
+   return emit32(p, theInstr);
+}
+
+/* type R: opcode    - 6 bits
+         rs    - 5 bits
+         rt    - 5 bits
+         rd    - 5 bits
+         sa    - 5 bits
+         func  - 6 bits
+*/
+static UChar *mkFormR(UChar * p, UInt opc, UInt rs, UInt rt, UInt rd, UInt sa,
+            UInt func)
+{
+   if (rs >= 0x20)
+      vex_printf("rs = %d\n", rs);
+   UInt theInstr;
+   vassert(opc < 0x40);
+   vassert(rs < 0x20);
+   vassert(rt < 0x20);
+   vassert(rd < 0x20);
+   vassert(sa < 0x20);
+   func = func & 0xFFFF;
+   theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (rd << 11) | (sa << 6) |
+               (func));
+
+   return emit32(p, theInstr);
+}
+
+static UChar *mkFormS(UChar * p, UInt opc1, UInt rRD, UInt rRS, UInt rRT,
+                      UInt sa, UInt opc2)
+{
+   UInt theInstr;
+   vassert(opc1 <= 0x3F);
+   vassert(rRD < 0x20);
+   vassert(rRS < 0x20);
+   vassert(rRT < 0x20);
+   vassert(opc2 <= 0x3F);
+   vassert(sa >= 0 && sa <= 0x3F);
+
+   theInstr = ((opc1 << 26) | (rRS << 21) | (rRT << 16) | (rRD << 11) |
+              ((sa & 0x1F) << 6) | (opc2));
+
+   return emit32(p, theInstr);
+}
+
+static UChar *doAMode_IR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
+                         Bool mode64)
+{
+   UInt rA, idx, r_dst;
+   vassert(am->tag == Mam_IR);
+   vassert(am->Mam.IR.index < 0x10000);
+
+   rA = iregNo(am->Mam.IR.base, mode64);
+   idx = am->Mam.IR.index;
+
+   if (rSD == 33 || rSD == 34)
+      r_dst = 24;
+   else
+      r_dst = rSD;
+
+   if (opc1 < 40) {
+      /* load */
+      if (rSD == 33)
+         /* mfhi */
+         p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
+      else if (rSD == 34)
+         /* mflo */
+         p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
+   }
+
+   p = mkFormI(p, opc1, rA, r_dst, idx);
+
+   if (opc1 >= 40) {
+      /* store */
+      if (rSD == 33)
+         /* mthi */
+         p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
+      else if (rSD == 34)
+         /* mtlo */
+         p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
+   }
+
+   return p;
+}
+
+static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
+                         Bool mode64)
+{
+   UInt rA, rB, r_dst;
+   vassert(am->tag == Mam_RR);
+
+   rA = iregNo(am->Mam.RR.base, mode64);
+   rB = iregNo(am->Mam.RR.index, mode64);
+
+   if (rSD == 33 || rSD == 34)
+      r_dst = 24;
+   else
+      r_dst = rSD;
+
+   if (opc1 < 40) {
+      /* load */
+      if (rSD == 33)
+         /* mfhi */
+         p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
+      else if (rSD == 34)
+         /* mflo */
+         p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
+   }
+
+   if (mode64) {
+      /* daddu rA, rA, rB$
+         sd/ld r_dst, 0(rA)$
+         dsubu rA, rA, rB */
+      p = mkFormR(p, 0, rA, rB, rA, 0, 45);
+      p = mkFormI(p, opc1, rA, r_dst, 0);
+      p = mkFormR(p, 0, rA, rB, rA, 0, 47);
+   } else {
+      /* addu rA, rA, rB
+         sw/lw r_dst, 0(rA)
+         subu rA, rA, rB */
+      p = mkFormR(p, 0, rA, rB, rA, 0, 33);
+      p = mkFormI(p, opc1, rA, r_dst, 0);
+      p = mkFormR(p, 0, rA, rB, rA, 0, 35);
+   }
+   if (opc1 >= 40) {
+      /* store */
+      if (rSD == 33)
+         /* mthi */
+         p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
+      else if (rSD == 34)
+         /* mtlo */
+         p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
+   }
+
+   return p;
+}
+
+/* Load imm to r_dst */
+static UChar *mkLoadImm(UChar * p, UInt r_dst, ULong imm, Bool mode64)
+{
+   if (!mode64) {
+      vassert(r_dst < 0x20);
+      UInt u32 = (UInt) imm;
+      Int s32 = (Int) u32;
+      Long s64 = (Long) s32;
+      imm = (ULong) s64;
+   }
+
+   if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
+      /* sign-extendable from 16 bits
+         addiu r_dst, 0, imm  => li r_dst, imm */
+      p = mkFormI(p, 9, 0, r_dst, imm & 0xFFFF);
+   } else {
+      if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
+         /* sign-extendable from 32 bits
+            addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
+            lui r_dst, (imm >> 16) */
+         p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
+         /* ori r_dst, r_dst, (imm & 0xFFFF) */
+         p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
+      } else {
+         vassert(mode64);
+         /* lui load in upper half of low word */
+         p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
+         /* ori */
+         p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
+         /* shift */
+         p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+         /* ori */
+         p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
+         /* shift */
+         p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+         /* ori */
+         p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
+      }
+   }
+   return p;
+}
+
+/* A simplified version of mkLoadImm that always generates 2 or 6
+   instructions (32 or 64 bits respectively) even if it could generate
+   fewer.  This is needed for generating fixed sized patchable
+   sequences. */
+static UChar* mkLoadImm_EXACTLY2or6 ( UChar* p,
+                                      UInt r_dst, ULong imm, Bool mode64)
+{
+   vassert(r_dst < 0x20);
+
+   if (!mode64) {
+      /* In 32-bit mode, make sure the top 32 bits of imm are a sign
+         extension of the bottom 32 bits. (Probably unnecessary.) */
+      UInt u32 = (UInt)imm;
+      Int  s32 = (Int)u32;
+      Long s64 = (Long)s32;
+      imm = (ULong)s64;
+   }
+
+   if (!mode64) {
+      /* sign-extendable from 32 bits
+         addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
+         lui r_dst, (imm >> 16) */
+      p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
+      /* ori r_dst, r_dst, (imm & 0xFFFF) */
+      p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
+   } else {
+      /* full 64bit immediate load: 6 (six!) insns. */
+      vassert(mode64);
+      /* lui load in upper half of low word */
+      p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
+      /* ori */
+      p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
+      /* shift */
+      p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+      /* ori */
+      p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
+      /* shift */
+      p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+      /* ori */
+      p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
+   }
+   return p;
+}
+
+/* Checks whether the sequence of bytes at p was indeed created
+   by mkLoadImm_EXACTLY2or6 with the given parameters. */
+static Bool isLoadImm_EXACTLY2or6 ( UChar* p_to_check,
+                                    UInt r_dst, ULong imm, Bool mode64 )
+{
+   vassert(r_dst < 0x20);
+   Bool ret;
+   if (!mode64) {
+      /* In 32-bit mode, make sure the top 32 bits of imm are a sign
+         extension of the bottom 32 bits.  (Probably unnecessary.) */
+      UInt u32 = (UInt)imm;
+      Int  s32 = (Int)u32;
+      Long s64 = (Long)s32;
+      imm = (ULong)s64;
+   }
+
+   if (!mode64) {
+      UInt   expect[2] = { 0, 0 };
+      UChar* p         = (UChar*)&expect[0];
+      /* lui r_dst, (immi >> 16) */
+      p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
+      /* ori r_dst, r_dst, (imm & 0xFFFF) */
+      p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
+      vassert(p == (UChar*)&expect[2]);
+
+      ret = fetch32(p_to_check + 0) == expect[0]
+            && fetch32(p_to_check + 4) == expect[1];
+   } else {
+      UInt   expect[6] = { 0, 0, 0, 0, 0, 0};
+      UChar* p         = (UChar*)&expect[0];
+      /* lui load in upper half of low word */
+      p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
+      /* ori */
+      p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
+      /* shift */
+      p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+      /* ori */
+      p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
+      /* shift */
+      p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
+      /* ori */
+      p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
+      vassert(p == (UChar*)&expect[6]);
+
+      ret = fetch32(p_to_check + 0) == expect[0]
+            && fetch32(p_to_check + 4) == expect[1]
+            && fetch32(p_to_check + 8) == expect[2]
+            && fetch32(p_to_check + 12) == expect[3]
+            && fetch32(p_to_check + 16) == expect[4]
+            && fetch32(p_to_check + 20) == expect[5];
+   }
+   return ret;
+}
+
+/* Generate a machine-word sized load or store. Simplified version of
+   the Min_Load and Min_Store cases below.
+   This will generate 32-bit load/store on MIPS32, and 64-bit load/store on
+   MIPS64 platforms.
+*/
+static UChar* do_load_or_store_machine_word ( UChar* p, Bool isLoad, UInt reg,
+                                              MIPSAMode* am, Bool mode64 )
+{
+   if (isLoad) { /* load */
+      switch (am->tag) {
+         case Mam_IR:
+            if (mode64) {
+               vassert(0 == (am->Mam.IR.index & 3));
+            }
+            p = doAMode_IR(p, mode64 ? 55 : 35, reg, am, mode64);
+            break;
+         case Mam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+            break;
+         default:
+            vassert(0);
+            break;
+      }
+   } else /* store */ {
+      switch (am->tag) {
+         case Mam_IR:
+            if (mode64) {
+               vassert(0 == (am->Mam.IR.index & 3));
+            }
+            p = doAMode_IR(p, mode64 ? 63 : 43, reg, am, mode64);
+            break;
+         case Mam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+            break;
+         default:
+            vassert(0);
+            break;
+      }
+   }
+   return p;
+}
+
+/* Generate a 32-bit sized load or store. Simplified version of
+   do_load_or_store_machine_word above. */
+static UChar* do_load_or_store_word32 ( UChar* p, Bool isLoad, UInt reg,
+                                        MIPSAMode* am, Bool mode64 )
+{
+   if (isLoad) { /* load */
+      switch (am->tag) {
+         case Mam_IR:
+            if (mode64) {
+               vassert(0 == (am->Mam.IR.index & 3));
+            }
+            p = doAMode_IR(p, 35, reg, am, mode64);
+            break;
+         case Mam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+            break;
+         default:
+            vassert(0);
+            break;
+      }
+   } else /* store */ {
+      switch (am->tag) {
+         case Mam_IR:
+            if (mode64) {
+               vassert(0 == (am->Mam.IR.index & 3));
+            }
+            p = doAMode_IR(p, 43, reg, am, mode64);
+            break;
+         case Mam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+            break;
+         default:
+            vassert(0);
+            break;
+      }
+   }
+   return p;
+}
+
+/* Move r_dst to r_src */
+static UChar *mkMoveReg(UChar * p, UInt r_dst, UInt r_src)
+{
+   vassert(r_dst < 0x20);
+   vassert(r_src < 0x20);
+
+   if (r_dst != r_src) {
+      /* or r_dst, r_src, r_src */
+      p = mkFormR(p, 0, r_src, r_src, r_dst, 0, 37);
+   }
+   return p;
+}
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code.  If the emitted
+   instruction was a profiler inc, set *is_profInc to True, else
+   leave it unchanged. */
+Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
+                     UChar* buf, Int nbuf, const MIPSInstr* i,
+                     Bool mode64,
+                     VexEndness endness_host,
+                     const void* disp_cp_chain_me_to_slowEP,
+                     const void* disp_cp_chain_me_to_fastEP,
+                     const void* disp_cp_xindir,
+                     const void* disp_cp_xassisted )
+{
+   UChar *p = &buf[0];
+   UChar *ptmp = p;
+   vassert(nbuf >= 32);
+
+   switch (i->tag) {
+      case Min_LI:
+         p = mkLoadImm(p, iregNo(i->Min.LI.dst, mode64), i->Min.LI.imm, mode64);
+         goto done;
+
+      case Min_Alu: {
+         MIPSRH *srcR = i->Min.Alu.srcR;
+         Bool immR = toBool(srcR->tag == Mrh_Imm);
+         UInt r_dst = iregNo(i->Min.Alu.dst, mode64);
+         UInt r_srcL = iregNo(i->Min.Alu.srcL, mode64);
+         UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg,
+                                                       mode64);
+         switch (i->Min.Alu.op) {
+            /* Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR, Malu_SLT */
+            case Malu_ADD:
+               if (immR) {
+                  vassert(srcR->Mrh.Imm.imm16 != 0x8000);
+                  if (srcR->Mrh.Imm.syned)
+                     /* addi */
+                     p = mkFormI(p, 9, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
+                  else
+                     /* addiu */
+                     p = mkFormI(p, 9, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
+               } else {
+                  /* addu */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 33);
+               }
+               break;
+            case Malu_SUB:
+               if (immR) {
+                  /* addi , but with negated imm */
+                  vassert(srcR->Mrh.Imm.syned);
+                  vassert(srcR->Mrh.Imm.imm16 != 0x8000);
+                  p = mkFormI(p, 8, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16));
+               } else {
+                  /* subu */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 35);
+               }
+               break;
+            case Malu_AND:
+               if (immR) {
+                  /* andi */
+                  vassert(!srcR->Mrh.Imm.syned);
+                  p = mkFormI(p, 12, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
+               } else {
+                  /* and */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 36);
+               }
+               break;
+            case Malu_OR:
+               if (immR) {
+                  /* ori */
+                  vassert(!srcR->Mrh.Imm.syned);
+                  p = mkFormI(p, 13, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
+               } else {
+                  /* or */
+                  if (r_srcL == 33)
+                     /* MFHI */
+                     p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
+                  else if (r_srcL == 34)
+                     /* MFLO */
+                     p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
+                  else if (r_dst == 33)
+                     /* MTHI */
+                     p = mkFormR(p, 0, r_srcL, 0, 0, 0, 17);
+                  else if (r_dst == 34)
+                     /* MTLO */
+                     p = mkFormR(p, 0, r_srcL, 0, 0, 0, 19);
+                  else
+                     p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 37);
+               }
+               break;
+            case Malu_NOR:
+               /* nor */
+               vassert(!immR);
+               p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 39);
+               break;
+            case Malu_XOR:
+               if (immR) {
+                  /* xori */
+                  vassert(!srcR->Mrh.Imm.syned);
+                  p = mkFormI(p, 14, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
+               } else {
+                  /* xor */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
+               }
+               break;
+            case Malu_DADD:
+               if (immR) {
+                  vassert(srcR->Mrh.Imm.syned);
+                  vassert(srcR->Mrh.Imm.imm16 != 0x8000);
+                  p = mkFormI(p, 25, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
+               } else {
+                  p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 45);
+               }
+               break;
+            case Malu_DSUB:
+               if (immR) {
+                  p = mkFormI(p, 25, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16));
+               } else {
+                  p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 47);
+               }
+               break;
+            case Malu_SLT:
+               if (immR) {
+                  goto bad;
+               } else {
+                  p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
+               }
+               break;
+
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_Shft: {
+         MIPSRH *srcR = i->Min.Shft.srcR;
+         Bool sz32 = i->Min.Shft.sz32;
+         Bool immR = toBool(srcR->tag == Mrh_Imm);
+         UInt r_dst = iregNo(i->Min.Shft.dst, mode64);
+         UInt r_srcL = iregNo(i->Min.Shft.srcL, mode64);
+         UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg,
+                                                       mode64);
+         if (!mode64)
+            vassert(sz32);
+         switch (i->Min.Shft.op) {
+            case Mshft_SLL:
+               if (sz32) {
+                  if (immR) {
+                     UInt n = srcR->Mrh.Imm.imm16;
+                     vassert(n >= 0 && n <= 32);
+                     p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 0);
+                  } else {
+                     /* shift variable */
+                     p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 4);
+                  }
+               } else {
+                  if (immR) {
+                     UInt n = srcR->Mrh.Imm.imm16;
+                     vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
+                     if (n >= 0 && n < 32) {
+                        p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 56);
+                     } else {
+                        p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 60);
+                     }
+                  } else {
+                     p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 20);
+                  }
+               }
+               break;
+
+            case Mshft_SRL:
+               if (sz32) {
+                  /* SRL, SRLV */
+                  if (immR) {
+                     UInt n = srcR->Mrh.Imm.imm16;
+                     vassert(n >= 0 && n < 32);
+                     p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 2);
+                  } else {
+                     /* shift variable */
+                     p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 6);
+                  }
+               } else {
+                  /* DSRL, DSRL32, DSRLV */
+                  if (immR) {
+                     UInt n = srcR->Mrh.Imm.imm16;
+                     vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
+                     if (n >= 0 && n < 32) {
+                        p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 58);
+                     } else {
+                        p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 62);
+                     }
+                  } else {
+                     p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 22);
+                  }
+               }
+               break;
+
+            case Mshft_SRA:
+               if (sz32) {
+                  /* SRA, SRAV */
+                  if (immR) {
+                     UInt n = srcR->Mrh.Imm.imm16;
+                     vassert(n >= 0 && n < 32);
+                     p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 3);
+                  } else {
+                     /* shift variable */
+                     p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 7);
+                  }
+               } else {
+                  /* DSRA, DSRA32, DSRAV */
+                  if (immR) {
+                     UInt n = srcR->Mrh.Imm.imm16;
+                     vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
+                     if (n >= 0 && n < 32) {
+                        p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 59);
+                     } else {
+                        p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 63);
+                     }
+                  } else {
+                     p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 23);
+                  }
+               }
+               break;
+
+            default:
+               goto bad;
+         }
+
+         goto done;
+      }
+
+      case Min_Unary: {
+         UInt r_dst = iregNo(i->Min.Unary.dst, mode64);
+         UInt r_src = iregNo(i->Min.Unary.src, mode64);
+
+         switch (i->Min.Unary.op) {
+            /* Mun_CLO, Mun_CLZ, Mun_NOP, Mun_DCLO, Mun_DCLZ */
+            case Mun_CLO:  /* clo */
+               p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 33);
+               break;
+            case Mun_CLZ:  /* clz */
+               p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 32);
+               break;
+            case Mun_NOP:  /* nop (sll r0,r0,0) */
+               p = mkFormR(p, 0, 0, 0, 0, 0, 0);
+               break;
+            case Mun_DCLO:  /* clo */
+               p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 37);
+               break;
+            case Mun_DCLZ:  /* clz */
+               p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 36);
+               break;
+         }
+         goto done;
+      }
+
+      case Min_Cmp: {
+         UInt r_srcL = iregNo(i->Min.Cmp.srcL, mode64);
+         UInt r_srcR = iregNo(i->Min.Cmp.srcR, mode64);
+         UInt r_dst = iregNo(i->Min.Cmp.dst, mode64);
+
+         switch (i->Min.Cmp.cond) {
+            case MIPScc_EQ:
+               /* xor r_dst, r_srcL, r_srcR
+                  sltiu r_dst, r_dst, 1 */
+               p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
+               p = mkFormI(p, 11, r_dst, r_dst, 1);
+               break;
+            case MIPScc_NE:
+               /* xor r_dst, r_srcL, r_srcR
+                  sltu r_dst, zero, r_dst */
+               p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
+               p = mkFormR(p, 0, 0, r_dst, r_dst, 0, 43);
+               break;
+            case MIPScc_LT:
+               /* slt r_dst, r_srcL, r_srcR */
+               p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
+               break;
+            case MIPScc_LO:
+               /* sltu r_dst, r_srcL, r_srcR */
+               p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 43);
+               break;
+            case MIPScc_LE:
+               /* slt r_dst, r_srcR, r_srcL
+                  xori r_dst, r_dst, 1 */
+               p = mkFormR(p, 0, r_srcR, r_srcL, r_dst, 0, 42);
+               p = mkFormI(p, 14, r_dst, r_dst, 1);
+               break;
+            case MIPScc_LS:
+               /* sltu r_dst, rsrcR, r_srcL
+                  xori r_dsr, r_dst, 1 */
+               p = mkFormR(p, 0, r_srcR, r_srcL, r_dst, 0, 43);
+               p = mkFormI(p, 14, r_dst, r_dst, 1);
+               break;
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_Mul: {
+         Bool syned = i->Min.Mul.syned;
+         Bool widening = i->Min.Mul.widening;
+         Bool sz32 = i->Min.Mul.sz32;
+         UInt r_srcL = iregNo(i->Min.Mul.srcL, mode64);
+         UInt r_srcR = iregNo(i->Min.Mul.srcR, mode64);
+         UInt r_dst = iregNo(i->Min.Mul.dst, mode64);
+         if (widening) {
+            if (sz32) {
+               if (syned)
+                  /* mult */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 24);
+               else
+                  /* multu */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 25);
+            } else {
+               if (syned)  /* DMULT  r_dst,r_srcL,r_srcR */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 28);
+               else  /* DMULTU r_dst,r_srcL,r_srcR */
+                  p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 29);
+            }
+         } else {
+            if (sz32)
+               /* mul */
+               p = mkFormR(p, 28, r_srcL, r_srcR, r_dst, 0, 2);
+            else if (mode64 && !sz32)
+               p = mkFormR(p, 28, r_srcL, r_srcR, r_dst, 0, 2);
+            else
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_Macc: {
+         Bool syned = i->Min.Macc.syned;
+         UInt r_srcL = iregNo(i->Min.Macc.srcL, mode64);
+         UInt r_srcR = iregNo(i->Min.Macc.srcR, mode64);
+
+         if (syned) {
+            switch (i->Min.Macc.op) {
+               case Macc_ADD:
+                  /* madd */
+                  p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 0);
+                  break;
+               case Macc_SUB:
+                  /* msub */
+                  p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
+                         4);
+                  break;
+               default:
+                  goto bad;
+            }
+         } else {
+            switch (i->Min.Macc.op) {
+               case Macc_ADD:
+                  /* maddu */
+                  p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
+                         1);
+                  break;
+               case Macc_SUB:
+                  /* msubu */
+                  p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
+                         5);
+                  break;
+               default:
+                  goto bad;
+            }
+         }
+
+         goto done;
+      }
+
+      case Min_Div: {
+         Bool syned = i->Min.Div.syned;
+         Bool sz32 = i->Min.Div.sz32;
+         UInt r_srcL = iregNo(i->Min.Div.srcL, mode64);
+         UInt r_srcR = iregNo(i->Min.Div.srcR, mode64);
+         if (sz32) {
+            if (syned) {
+               /* div */
+               p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 26);
+            } else
+               /* divu */
+               p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 27);
+            goto done;
+         } else {
+            if (syned) {
+               /* ddiv */
+               p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 30);
+            } else
+               /* ddivu */
+               p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 31);
+            goto done;
+         }
+      }
+
+      case Min_Mthi: {
+         UInt r_src = iregNo(i->Min.MtHL.src, mode64);
+         p = mkFormR(p, 0, r_src, 0, 0, 0, 17);
+         goto done;
+      }
+
+      case Min_Mtlo: {
+         UInt r_src = iregNo(i->Min.MtHL.src, mode64);
+         p = mkFormR(p, 0, r_src, 0, 0, 0, 19);
+         goto done;
+      }
+
+      case Min_Mfhi: {
+         UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
+         p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
+         goto done;
+      }
+
+      case Min_Mflo: {
+         UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
+         p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
+         goto done;
+      }
+
+      case Min_MtFCSR: {
+         UInt r_src = iregNo(i->Min.MtFCSR.src, mode64);
+         /* ctc1 */
+         p = mkFormR(p, 17, 6, r_src, 31, 0, 0);
+         goto done;
+      }
+
+      case Min_MfFCSR: {
+         UInt r_dst = iregNo(i->Min.MfFCSR.dst, mode64);
+         /* cfc1 */
+         p = mkFormR(p, 17, 2, r_dst, 31, 0, 0);
+         goto done;
+      }
+
+      case Min_Call: {
+         if (i->Min.Call.cond != MIPScc_AL
+             && i->Min.Call.rloc.pri != RLPri_None) {
+            /* The call might not happen (it isn't unconditional) and
+               it returns a result.  In this case we will need to
+               generate a control flow diamond to put 0x555..555 in
+               the return register(s) in the case where the call
+               doesn't happen.  If this ever becomes necessary, maybe
+               copy code from the ARM equivalent.  Until that day,
+               just give up. */
+            goto bad;
+         }
+         MIPSCondCode cond = i->Min.Call.cond;
+         UInt r_dst = 25;  /* using %r25 as address temporary -
+                              see getRegUsage_MIPSInstr */
+
+         /* jump over the following insns if condition does not hold */
+         if (cond != MIPScc_AL) {
+            /* jmp fwds if !condition */
+            /* don't know how many bytes to jump over yet...
+               make space for a jump instruction + nop!!! and fill in later. */
+            ptmp = p;  /* fill in this bit later */
+            p += 8;    /* p += 8 */
+         }
+
+         if (!mode64) {
+            /* addiu $29, $29, -16 */
+            p = mkFormI(p, 9, 29, 29, 0xFFF0);
+         }
+
+         /* load target to r_dst; p += 4|8 */
+         p = mkLoadImm(p, r_dst, i->Min.Call.target, mode64);
+
+         /* jalr r_dst */
+         p = mkFormR(p, 0, r_dst, 0, 31, 0, 9);  /* p += 4 */
+         p = mkFormR(p, 0, 0, 0, 0, 0, 0);       /* p += 4 */
+
+         if (!mode64) {
+            /* addiu $29, $29, 16 */
+            p = mkFormI(p, 9, 29, 29, 0x0010);
+         }
+
+         /* Fix up the conditional jump, if there was one. */
+         if (cond != MIPScc_AL) {
+            UInt r_src = iregNo(i->Min.Call.src, mode64);
+            Int delta = p - ptmp;
+
+            vassert(delta >= 20 && delta <= 32);
+            /* blez r_src, delta/4-1
+               nop */
+            ptmp = mkFormI(ptmp, 6, r_src, 0, delta / 4 - 1);
+            mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
+         }
+         goto done;
+      }
+
+      case Min_XDirect: {
+         /* NB: what goes on here has to be very closely coordinated
+            with the chainXDirect_MIPS and unchainXDirect_MIPS below. */
+         /* We're generating chain-me requests here, so we need to be
+            sure this is actually allowed -- no-redir translations
+            can't use chain-me's.  Hence: */
+         vassert(disp_cp_chain_me_to_slowEP != NULL);
+         vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+         /* Use ptmp for backpatching conditional jumps. */
+         ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it.  Or at least, leave a space for
+            it that we will shortly fill in. */
+         if (i->Min.XDirect.cond != MIPScc_AL) {
+            vassert(i->Min.XDirect.cond != MIPScc_NV);
+            ptmp = p;
+            p += 12;
+         }
+
+         /* Update the guest PC. */
+         /* move r9, dstGA */
+         /* sw/sd r9, amPC */
+         p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, (ULong)i->Min.XDirect.dstGA,
+                                   mode64);
+         p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 9,
+                                           i->Min.XDirect.amPC, mode64);
+
+         /* --- FIRST PATCHABLE BYTE follows --- */
+         /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
+            calling to) backs up the return address, so as to find the
+            address of the first patchable byte.  So: don't change the
+            number of instructions (3) below. */
+         /* move r9, VG_(disp_cp_chain_me_to_{slowEP,fastEP}) */
+         /* jr  r9  */
+         const void* disp_cp_chain_me
+                  = i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
+                                              : disp_cp_chain_me_to_slowEP;
+         p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
+                                   (Addr)disp_cp_chain_me, mode64);
+         /* jalr $9 */
+         /* nop */
+         p = mkFormR(p, 0, 9, 0, 31, 0, 9);  /* p += 4 */
+         p = mkFormR(p, 0, 0, 0, 0, 0, 0);   /* p += 4 */
+         /* --- END of PATCHABLE BYTES --- */
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->Min.XDirect.cond != MIPScc_AL) {
+            Int delta = p - ptmp;
+            delta = delta / 4 - 3;
+            vassert(delta > 0 && delta < 40);
+
+            /* lw $9, COND_OFFSET(GuestSP)
+               beq $9, $0, 2
+               nop */
+            ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
+            ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
+            mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
+         }
+         goto done;
+      }
+
+      case Min_XIndir: {
+         /* We're generating transfers that could lead indirectly to a
+            chain-me, so we need to be sure this is actually allowed --
+            no-redir translations are not allowed to reach normal
+            translations without going through the scheduler.  That means
+            no XDirects or XIndirs out from no-redir translations.
+            Hence: */
+         vassert(disp_cp_xindir != NULL);
+
+         /* Use ptmp for backpatching conditional jumps. */
+         ptmp = NULL;
+
+         /* First off, if this is conditional, create a conditional
+            jump over the rest of it. */
+         if (i->Min.XIndir.cond != MIPScc_AL) {
+            vassert(i->Min.XIndir.cond != MIPScc_NV);
+            ptmp = p;
+            p += 12;
+         }
+
+         /* Update the guest PC. */
+         /* sw/sd r-dstGA, amPC */
+         p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
+                                           iregNo(i->Min.XIndir.dstGA, mode64),
+                                           i->Min.XIndir.amPC, mode64);
+
+         /* move r9, VG_(disp_cp_xindir) */
+         /* jalr   r9 */
+         /* nop */
+         p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
+                                   (Addr)disp_cp_xindir, mode64);
+         p = mkFormR(p, 0, 9, 0, 31, 0, 9);  /* p += 4 */
+         p = mkFormR(p, 0, 0, 0, 0, 0, 0);   /* p += 4 */
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->Min.XIndir.cond != MIPScc_AL) {
+            Int delta = p - ptmp;
+            delta = delta / 4 - 3;
+            vassert(delta > 0 && delta < 40);
+
+            /* lw $9, COND_OFFSET($GuestSP)
+               beq $9, $0, 2
+               nop */
+            ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
+            ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
+            mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
+         }
+         goto done;
+      }
+
+      case Min_XAssisted: {
+         /* First off, if this is conditional, create a conditional jump
+            over the rest of it.  Or at least, leave a space for it that
+            we will shortly fill in. */
+         ptmp = NULL;
+         if (i->Min.XAssisted.cond != MIPScc_AL) {
+            vassert(i->Min.XAssisted.cond != MIPScc_NV);
+            ptmp = p;
+            p += 12;
+         }
+
+         /* Update the guest PC. */
+         /* sw/sd r-dstGA, amPC */
+         p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
+                                           iregNo(i->Min.XIndir.dstGA, mode64),
+                                           i->Min.XIndir.amPC, mode64);
+
+         /* imm32/64 r31, $magic_number */
+         UInt trcval = 0;
+         switch (i->Min.XAssisted.jk) {
+            case Ijk_ClientReq:     trcval = VEX_TRC_JMP_CLIENTREQ;     break;
+            case Ijk_Sys_syscall:   trcval = VEX_TRC_JMP_SYS_SYSCALL;   break;
+            /* case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128;   break; */
+            case Ijk_Yield:         trcval = VEX_TRC_JMP_YIELD;       break;
+            case Ijk_EmWarn:        trcval = VEX_TRC_JMP_EMWARN;        break;
+            case Ijk_EmFail:        trcval = VEX_TRC_JMP_EMFAIL;        break;
+            /* case Ijk_MapFail:   trcval = VEX_TRC_JMP_MAPFAIL;       break; */
+            case Ijk_NoDecode:      trcval = VEX_TRC_JMP_NODECODE;      break;
+            case Ijk_InvalICache:   trcval = VEX_TRC_JMP_INVALICACHE;   break;
+            case Ijk_NoRedir:       trcval = VEX_TRC_JMP_NOREDIR;       break;
+            case Ijk_SigILL:        trcval = VEX_TRC_JMP_SIGILL;        break;
+            case Ijk_SigTRAP:       trcval = VEX_TRC_JMP_SIGTRAP;       break;
+            /* case Ijk_SigSEGV:   trcval = VEX_TRC_JMP_SIGSEGV;       break; */
+            case Ijk_SigBUS:        trcval = VEX_TRC_JMP_SIGBUS;        break;
+            case Ijk_SigFPE_IntDiv: trcval = VEX_TRC_JMP_SIGFPE_INTDIV; break;
+            case Ijk_SigFPE_IntOvf: trcval = VEX_TRC_JMP_SIGFPE_INTOVF; break;
+            case Ijk_Boring:        trcval = VEX_TRC_JMP_BORING;        break;
+            /* We don't expect to see the following being assisted.
+               case Ijk_Ret:
+               case Ijk_Call:
+               fallthrough */
+            default:
+               ppIRJumpKind(i->Min.XAssisted.jk);
+               vpanic("emit_MIPSInstr.Min_XAssisted: unexpected jump kind");
+         }
+         vassert(trcval != 0);
+         p = mkLoadImm_EXACTLY2or6(p, /*r*/ GuestSP, trcval, mode64);
+
+         /* move r9, VG_(disp_cp_xassisted) */
+         p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
+                                   (ULong)(Addr)disp_cp_xassisted, mode64);
+         /* jalr $9
+             nop */
+         p = mkFormR(p, 0, 9, 0, 31, 0, 9);  /* p += 4 */
+         p = mkFormR(p, 0, 0, 0, 0, 0, 0);   /* p += 4 */
+
+         /* Fix up the conditional jump, if there was one. */
+         if (i->Min.XAssisted.cond != MIPScc_AL) {
+            Int delta = p - ptmp;
+            delta = delta / 4 - 3;
+            vassert(delta > 0 && delta < 40);
+
+            /* lw $9, COND_OFFSET($GuestSP)
+               beq $9, $0, 2
+               nop */
+            ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
+            ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
+            mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
+         }
+         goto done;
+      }
+
+      case Min_Load: {
+         MIPSAMode *am_addr = i->Min.Load.src;
+         if (am_addr->tag == Mam_IR) {
+            UInt r_dst = iregNo(i->Min.Load.dst, mode64);
+            UInt opc, sz = i->Min.Load.sz;
+            if (mode64 && (sz == 4 || sz == 8)) {
+               /* should be guaranteed to us by iselWordExpr_AMode */
+               vassert(0 == (am_addr->Mam.IR.index & 3));
+            }
+            switch (sz) {
+               case 1:
+                  opc = 32;
+                  break;
+               case 2:
+                  opc = 33;
+                  break;
+               case 4:
+                  opc = 35;
+                  break;
+               case 8:
+                  opc = 55;
+                  vassert(mode64);
+                  break;
+               default:
+                  goto bad;
+            }
+
+            p = doAMode_IR(p, opc, r_dst, am_addr, mode64);
+            goto done;
+         } else if (am_addr->tag == Mam_RR) {
+            UInt r_dst = iregNo(i->Min.Load.dst, mode64);
+            UInt opc, sz = i->Min.Load.sz;
+
+            switch (sz) {
+               case 1:
+                  opc = 32;
+                  break;
+               case 2:
+                  opc = 33;
+                  break;
+               case 4:
+                  opc = 35;
+                  break;
+               case 8:
+                  opc = 55;
+                  vassert(mode64);
+                  break;
+               default:
+                  goto bad;
+            }
+
+            p = doAMode_RR(p, opc, r_dst, am_addr, mode64);
+            goto done;
+         }
+         break;
+      }
+
+      case Min_Store: {
+         MIPSAMode *am_addr = i->Min.Store.dst;
+         if (am_addr->tag == Mam_IR) {
+            UInt r_src = iregNo(i->Min.Store.src, mode64);
+            UInt opc, sz = i->Min.Store.sz;
+            if (mode64 && (sz == 4 || sz == 8)) {
+               /* should be guaranteed to us by iselWordExpr_AMode */
+               vassert(0 == (am_addr->Mam.IR.index & 3));
+            }
+            switch (sz) {
+               case 1:
+                  opc = 40;
+                  break;
+               case 2:
+                  opc = 41;
+                  break;
+               case 4:
+                  opc = 43;
+                  break;
+               case 8:
+                  vassert(mode64);
+                  opc = 63;
+                  break;
+               default:
+                  goto bad;
+            }
+
+            p = doAMode_IR(p, opc, r_src, am_addr, mode64);
+            goto done;
+         } else if (am_addr->tag == Mam_RR) {
+            UInt r_src = iregNo(i->Min.Store.src, mode64);
+            UInt opc, sz = i->Min.Store.sz;
+
+            switch (sz) {
+               case 1:
+                  opc = 40;
+                  break;
+               case 2:
+                  opc = 41;
+                  break;
+               case 4:
+                  opc = 43;
+                  break;
+               case 8:
+                  vassert(mode64);
+                  opc = 63;
+                  break;
+               default:
+                  goto bad;
+            }
+
+            p = doAMode_RR(p, opc, r_src, am_addr, mode64);
+            goto done;
+         }
+         break;
+      }
+      case Min_LoadL: {
+         MIPSAMode *am_addr = i->Min.LoadL.src;
+         UInt r_src = iregNo(am_addr->Mam.IR.base, mode64);
+         UInt idx = am_addr->Mam.IR.index;
+         UInt r_dst = iregNo(i->Min.LoadL.dst, mode64);
+
+         if (i->Min.LoadL.sz == 4)
+            p = mkFormI(p, 0x30, r_src, r_dst, idx);
+         else
+            p = mkFormI(p, 0x34, r_src, r_dst, idx);
+         goto done;
+      }
+      case Min_StoreC: {
+         MIPSAMode *am_addr = i->Min.StoreC.dst;
+         UInt r_src = iregNo(i->Min.StoreC.src, mode64);
+         UInt idx = am_addr->Mam.IR.index;
+         UInt r_dst = iregNo(am_addr->Mam.IR.base, mode64);
+
+         if (i->Min.StoreC.sz == 4)
+            p = mkFormI(p, 0x38, r_dst, r_src, idx);
+         else
+            p = mkFormI(p, 0x3C, r_dst, r_src, idx);
+         goto done;
+      }
+      case Min_Cas: {
+         if (i->Min.Cas.sz != 8 && i->Min.Cas.sz != 4)
+            goto bad;
+         UInt old  = iregNo(i->Min.Cas.old, mode64);
+         UInt addr = iregNo(i->Min.Cas.addr, mode64);
+         UInt expd = iregNo(i->Min.Cas.expd, mode64);
+         UInt data = iregNo(i->Min.Cas.data, mode64);
+         Bool sz8  = toBool(i->Min.Cas.sz == 8);
+
+         /*
+          * ll(d)    old,  0(addr)
+          * bne      old,  expd, end
+          * nop
+          * (d)addiu old,  old,  1
+          * sc(d)    data, 0(addr)
+          * movn     old,  expd, data
+          * end:
+          */
+         // ll(d) old, 0(addr)
+         p = mkFormI(p, sz8 ? 0x34 : 0x30, addr, old, 0);
+         // bne  old,  expd, end
+         p = mkFormI(p, 5, old, expd, 4);
+         // nop
+         p = mkFormR(p, 0, 0, 0, 0, 0, 0);
+         // (d)addiu old,  old,  1
+         p = mkFormI(p, sz8 ? 25 : 9, old, old, 1);
+         // sc(d)  data, 0(addr)
+         p = mkFormI(p, sz8 ? 0x3C : 0x38, addr, data, 0);
+         // movn old,  expd, data
+         p = mkFormR(p, 0, expd, data, old, 0, 0xb);
+
+         goto done;
+      }
+      case Min_RdWrLR: {
+         UInt reg = iregNo(i->Min.RdWrLR.gpr, mode64);
+         Bool wrLR = i->Min.RdWrLR.wrLR;
+         if (wrLR)
+            p = mkMoveReg(p, 31, reg);
+         else
+            p = mkMoveReg(p, reg, 31);
+         goto done;
+      }
+
+      /* Floating point */
+      case Min_FpLdSt: {
+         MIPSAMode *am_addr = i->Min.FpLdSt.addr;
+         UChar sz = i->Min.FpLdSt.sz;
+         vassert(sz == 4 || sz == 8);
+         if (sz == 4) {
+            UInt f_reg = fregNo(i->Min.FpLdSt.reg, mode64);
+            if (i->Min.FpLdSt.isLoad) {
+               if (am_addr->tag == Mam_IR)
+                  p = doAMode_IR(p, 0x31, f_reg, am_addr, mode64);
+               else if (am_addr->tag == Mam_RR)
+                  p = doAMode_RR(p, 0x31, f_reg, am_addr, mode64);
+            } else {
+               if (am_addr->tag == Mam_IR)
+                  p = doAMode_IR(p, 0x39, f_reg, am_addr, mode64);
+               else if (am_addr->tag == Mam_RR)
+                  p = doAMode_RR(p, 0x39, f_reg, am_addr, mode64);
+            }
+         } else if (sz == 8) {
+            UInt f_reg = dregNo(i->Min.FpLdSt.reg);
+            if (i->Min.FpLdSt.isLoad) {
+               if (am_addr->tag == Mam_IR) {
+                  p = doAMode_IR(p, 0x35, f_reg, am_addr, mode64);
+               } else if (am_addr->tag == Mam_RR) {
+                  p = doAMode_RR(p, 0x35, f_reg, am_addr, mode64);
+               }
+            } else {
+               if (am_addr->tag == Mam_IR) {
+                  p = doAMode_IR(p, 0x3d, f_reg, am_addr, mode64);
+               } else if (am_addr->tag == Mam_RR) {
+                  p = doAMode_RR(p, 0x3d, f_reg, am_addr, mode64);
+               }
+            }
+         }
+         goto done;
+      }
+
+      case Min_FpUnary: {
+         switch (i->Min.FpUnary.op) {
+            case Mfp_MOVS: {  /* FP move */
+               UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
+               UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x6);
+               break;
+            }
+            case Mfp_MOVD: {  /* FP move */
+                UInt fr_dst = dregNo(i->Min.FpUnary.dst);
+                UInt fr_src = dregNo(i->Min.FpUnary.src);
+                p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x6);
+                break;
+             }
+            case Mfp_ABSS: {  /* ABS.S */
+               UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
+               UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x5);
+               break;
+            }
+            case Mfp_ABSD: {  /* ABS.D */
+               UInt fr_dst = dregNo(i->Min.FpUnary.dst);
+               UInt fr_src = dregNo(i->Min.FpUnary.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x5);
+               break;
+            }
+            case Mfp_NEGS: {  /* NEG.S */
+               UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
+               UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x7);
+               break;
+            }
+            case Mfp_NEGD: {  /* NEG.D */
+               UInt fr_dst = dregNo(i->Min.FpUnary.dst);
+               UInt fr_src = dregNo(i->Min.FpUnary.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x7);
+               break;
+            }
+            case Mfp_SQRTS: {  /* SQRT.S */
+               UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
+               UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x04);
+               break;
+            }
+            case Mfp_SQRTD: {  /* SQRT.D */
+               UInt fr_dst = dregNo(i->Min.FpUnary.dst);
+               UInt fr_src = dregNo(i->Min.FpUnary.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x04);
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_FpBinary: {
+         switch (i->Min.FpBinary.op) {
+            case Mfp_ADDS: {
+               UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
+               UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
+               UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
+               p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 0);
+               break;
+            }
+            case Mfp_SUBS: {
+               UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
+               UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
+               UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
+               p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 1);
+               break;
+            }
+            case Mfp_MULS: {
+               UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
+               UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
+               UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
+               p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 2);
+               break;
+            }
+            case Mfp_DIVS: {
+               UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
+               UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
+               UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
+               p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 3);
+               break;
+            }
+            case Mfp_ADDD: {
+               UInt fr_dst = dregNo(i->Min.FpBinary.dst);
+               UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
+               UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
+               p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 0);
+               break;
+            }
+            case Mfp_SUBD: {
+               UInt fr_dst = dregNo(i->Min.FpBinary.dst);
+               UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
+               UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
+               p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 1);
+               break;
+            }
+            case Mfp_MULD: {
+               UInt fr_dst = dregNo(i->Min.FpBinary.dst);
+               UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
+               UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
+               p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 2);
+               break;
+            }
+            case Mfp_DIVD: {
+               UInt fr_dst = dregNo(i->Min.FpBinary.dst);
+               UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
+               UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
+               p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 3);
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_FpTernary: {
+         switch (i->Min.FpTernary.op) {
+            case Mfp_MADDS: {
+               UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
+               UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
+               UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
+               UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
+               p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x20);
+               break;
+            }
+            case Mfp_MADDD: {
+               UInt fr_dst = dregNo(i->Min.FpTernary.dst);
+               UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
+               UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
+               UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
+               p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x21);
+               break;
+            }
+            case Mfp_MSUBS: {
+               UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
+               UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
+               UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
+               UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
+               p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x28);
+               break;
+            }
+            case Mfp_MSUBD: {
+               UInt fr_dst = dregNo(i->Min.FpTernary.dst);
+               UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
+               UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
+               UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
+               p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x29);
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_FpConvert: {
+         switch (i->Min.FpConvert.op) {
+            UInt fr_dst, fr_src;
+            case Mfp_CVTSD:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x20);
+               break;
+            case Mfp_CVTSW:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x20);
+               break;
+            case Mfp_CVTWD:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x24);
+               break;
+            case Mfp_CVTWS:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x24);
+               break;
+            case Mfp_CVTDW:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x21);
+               break;
+            case Mfp_CVTDL:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x21);
+               break;
+            case Mfp_CVTDS:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21);
+               break;
+            case Mfp_CVTSL:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x20);
+               break;
+            case Mfp_CVTLS:
+               if (mode64) {
+                  fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+                  fr_src = dregNo(i->Min.FpConvert.src);
+               } else {
+                  fr_dst = dregNo(i->Min.FpConvert.dst);
+                  fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               }
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x25);
+               break;
+            case Mfp_CVTLD:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x25);
+               break;
+            case Mfp_TRUWS:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0D);
+               break;
+            case Mfp_TRUWD:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0D);
+               break;
+            case Mfp_TRULS:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x09);
+               break;
+            case Mfp_TRULD:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x09);
+               break;
+            case Mfp_CEILWS:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0E);
+               break;
+            case Mfp_CEILWD:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0E);
+               break;
+            case Mfp_CEILLS:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0A);
+               break;
+            case Mfp_CEILLD:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0A);
+               break;
+            case Mfp_ROUNDWS:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0C);
+               break;
+            case Mfp_ROUNDWD:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0C);
+               break;
+            case Mfp_ROUNDLD:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x08);
+               break;
+            case Mfp_FLOORWS:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = fregNo(i->Min.FpConvert.src, mode64);
+               p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0F);
+               break;
+            case Mfp_FLOORWD:
+               fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0F);
+               break;
+            case Mfp_FLOORLD:
+               fr_dst = dregNo(i->Min.FpConvert.dst);
+               fr_src = dregNo(i->Min.FpConvert.src);
+               p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0B);
+               break;
+
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_FpCompare: {
+         UInt r_dst   = iregNo(i->Min.FpCompare.dst, mode64);
+         UInt fr_srcL = dregNo(i->Min.FpCompare.srcL);
+         UInt fr_srcR = dregNo(i->Min.FpCompare.srcR);
+
+         UInt op;
+         switch (i->Min.FpConvert.op) {
+            case Mfp_CMP_UN:
+               op = 1;
+               break;
+            case Mfp_CMP_EQ:
+               op = 2;
+               break;
+            case Mfp_CMP_LT:
+               op = 12;
+               break;
+            case Mfp_CMP_NGT:
+               op = 15;
+               break;               
+            default:
+               goto bad;
+         }
+         /* c.cond.d fr_srcL, fr_srcR
+            cfc1     r_dst,   $31
+            srl      r_dst,   r_dst, 23
+            andi     r_dst,   r_dst, 1 */
+         p = mkFormR(p, 0x11, 0x11, fr_srcL, fr_srcR, 0, op + 48);
+         p = mkFormR(p, 0x11, 0x2, r_dst, 31, 0, 0);
+         p = mkFormS(p, 0, r_dst, 0, r_dst, 23, 2);
+         p = mkFormI(p, 12, r_dst, r_dst, 1);
+         goto done;
+      }
+
+      case Min_FpGpMove: {
+         switch (i->Min.FpGpMove.op) {
+            UInt rt, fs;
+            case MFpGpMove_mfc1: {
+               rt = iregNo(i->Min.FpGpMove.dst, mode64);
+               fs = fregNo(i->Min.FpGpMove.src, mode64);
+               p = mkFormR(p, 0x11, 0x0, rt, fs, 0x0, 0x0);
+               break;
+            }
+            case MFpGpMove_dmfc1: {
+               vassert(mode64);
+               rt = iregNo(i->Min.FpGpMove.dst, mode64);
+               fs = fregNo(i->Min.FpGpMove.src, mode64);
+               p = mkFormR(p, 0x11, 0x1, rt, fs, 0x0, 0x0);
+               break;
+            }
+            case MFpGpMove_mtc1: {
+               rt = iregNo(i->Min.FpGpMove.src, mode64);
+               fs = fregNo(i->Min.FpGpMove.dst, mode64);
+               p = mkFormR(p, 0x11, 0x4, rt, fs, 0x0, 0x0);
+               break;
+            }
+            case MFpGpMove_dmtc1: {
+               vassert(mode64);
+               rt = iregNo(i->Min.FpGpMove.src, mode64);
+               fs = fregNo(i->Min.FpGpMove.dst, mode64);
+               p = mkFormR(p, 0x11, 0x5, rt, fs, 0x0, 0x0);
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_MoveCond: {
+         switch (i->Min.MoveCond.op) {
+            UInt d, s, t;
+            case MFpMoveCond_movns: {
+               d = fregNo(i->Min.MoveCond.dst, mode64);
+               s = fregNo(i->Min.MoveCond.src, mode64);
+               t = iregNo(i->Min.MoveCond.cond, mode64);
+               p = mkFormR(p, 0x11, 0x10, t, s, d, 0x13);
+               break;
+            }
+            case MFpMoveCond_movnd: {
+               d = dregNo(i->Min.MoveCond.dst);
+               s = dregNo(i->Min.MoveCond.src);
+               t = iregNo(i->Min.MoveCond.cond, mode64);
+               p = mkFormR(p, 0x11, 0x11, t, s, d, 0x13);
+               break;
+            }
+            case MMoveCond_movn: {
+               d = iregNo(i->Min.MoveCond.dst, mode64);
+               s = iregNo(i->Min.MoveCond.src, mode64);
+               t = iregNo(i->Min.MoveCond.cond, mode64);
+               p = mkFormR(p, 0, s, t, d, 0, 0xb);
+               break;
+            }
+            default:
+               goto bad;
+         }
+         goto done;
+      }
+
+      case Min_EvCheck: {
+         /* This requires a 32-bit dec/test in 32 mode. */
+         /* We generate:
+               lw      r9, amCounter
+               addiu   r9, r9, -1
+               sw      r9, amCounter
+               bgez    r9, nofail
+               lw      r9, amFailAddr
+               jalr    r9
+               nop
+              nofail:
+         */
+         UChar* p0 = p;
+         /* lw  r9, amCounter */
+         p = do_load_or_store_word32(p, True /*isLoad*/ , /*r*/ 9,
+                                     i->Min.EvCheck.amCounter, mode64);
+         /* addiu r9,r9,-1 */
+         p = mkFormI(p, 9, 9, 9, 0xFFFF);
+         /* sw r30, amCounter */
+         p = do_load_or_store_word32(p, False /*!isLoad*/ , /*r*/ 9,
+                                     i->Min.EvCheck.amCounter, mode64);
+         /* bgez t9, nofail */
+         p = mkFormI(p, 1, 9, 1, 3);
+         /* lw/ld r9, amFailAddr */
+         p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 9,
+                                           i->Min.EvCheck.amFailAddr, mode64);
+         /* jalr $9 */
+         p = mkFormR(p, 0, 9, 0, 31, 0, 9);  /* p += 4 */
+         p = mkFormR(p, 0, 0, 0, 0, 0, 0);   /* p += 4 */
+         /* nofail: */
+
+         /* Crosscheck */
+         vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
+         goto done;
+      }
+
+      case Min_ProfInc: {
+         /* Generate a code template to increment a memory location whose
+            address will be known later as an immediate value. This code
+            template will be patched once the memory location is known.
+            For now we do this with address == 0x65556555. */
+         if (mode64) {
+            /* 64-bit:
+               move r9, 0x6555655565556555ULL
+               ld r8, 0(r9)
+               daddiu r8, r8, 1
+               sd r8, 0(r9) */
+
+            /* move r9, 0x6555655565556555ULL */
+            p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x6555655565556555ULL,
+                                      True /*mode64*/);
+            /* ld r8, 0(r9) */
+            p = mkFormI(p, 55, 9, 8, 0);
+
+            /* daddiu r8, r8, 1 */
+            p = mkFormI(p, 25, 8, 8, 1);
+
+            /* sd r8, 0(r9) */
+            p = mkFormI(p, 63, 9, 8, 0);
+         } else {
+            /* 32-bit:
+               move r9, 0x65556555
+               lw r8, 0(r9)
+               addiu r8, r8, 1         # add least significant word
+               sw r8, 0(r9)
+               sltiu r1, r8, 1         # set carry-in bit
+               lw r8, 4(r9)
+               addu r8, r8, r1
+               sw r8, 4(r9) */
+
+            /* move r9, 0x65556555 */
+            p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x65556555ULL,
+                                      False /*!mode64*/);
+            /* lw r8, 0(r9) */
+            p = mkFormI(p, 35, 9, 8, 0);
+
+            /* addiu r8, r8, 1         # add least significant word */
+            p = mkFormI(p, 9, 8, 8, 1);
+
+            /* sw r8, 0(r9) */
+            p = mkFormI(p, 43, 9, 8, 0);
+
+            /* sltiu r1, r8, 1         # set carry-in bit */
+            p = mkFormI(p, 11, 8, 1, 1);
+
+            /* lw r8, 4(r9) */
+            p = mkFormI(p, 35, 9, 8, 4);
+
+            /* addu r8, r8, r1 */
+            p = mkFormR(p, 0, 8, 1, 8, 0, 33);
+
+            /*  sw r8, 4(r9) */
+            p = mkFormI(p, 43, 9, 8, 4);
+
+         }
+         /* Tell the caller .. */
+         vassert(!(*is_profInc));
+         *is_profInc = True;
+         goto done;
+      }
+
+      default:
+         goto bad;
+
+   }
+
+   bad:
+      vex_printf("\n=> ");
+      ppMIPSInstr(i, mode64);
+      vpanic("emit_MIPSInstr");
+      /* NOTREACHED */ done:
+      vassert(p - &buf[0] <= 128);
+      return p - &buf[0];
+}
+
+/* How big is an event check?  See case for Min_EvCheck in
+   emit_MIPSInstr just above.  That crosschecks what this returns, so
+   we can tell if we're inconsistent. */
+Int evCheckSzB_MIPS (void)
+{
+  UInt kInstrSize = 4;
+  return 7*kInstrSize;
+}
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+                                  void* place_to_chain,
+                                  const void* disp_cp_chain_me_EXPECTED,
+                                  const void* place_to_jump_to,
+                                  Bool  mode64 )
+{
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
+   /* What we're expecting to see is:
+        move r9, disp_cp_chain_me_to_EXPECTED
+        jalr r9
+        nop
+      viz
+        <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
+        0x120F809   # jalr r9
+        0x00000000  # nop
+   */
+   UChar* p = (UChar*)place_to_chain;
+   vassert(0 == (3 & (HWord)p));
+   vassert(isLoadImm_EXACTLY2or6(p, /*r*/9,
+                                 (UInt)(Addr)disp_cp_chain_me_EXPECTED,
+                                 mode64));
+   vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
+   vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
+   /* And what we want to change it to is either:
+          move r9, place_to_jump_to
+          jalr r9
+          nop
+        viz
+          <8 bytes generated by mkLoadImm_EXACTLY2or6>
+          0x120F809   # jalr r9
+          0x00000000  # nop
+
+      The replacement has the same length as the original.
+   */
+
+   p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
+                             (Addr)place_to_jump_to, mode64);
+   p = emit32(p, 0x120F809);
+   p = emit32(p, 0x00000000);
+
+   Int len = p - (UChar*)place_to_chain;
+   vassert(len == (mode64 ? 32 : 16)); /* stay sane */
+   VexInvalRange vir = {(HWord)place_to_chain, len};
+   return vir;
+}
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+                                    void* place_to_unchain,
+                                    const void* place_to_jump_to_EXPECTED,
+                                    const void* disp_cp_chain_me,
+                                    Bool  mode64 )
+{
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
+   /* What we're expecting to see is:
+        move r9, place_to_jump_to_EXPECTED
+        jalr r9
+        nop
+      viz
+        <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
+        0x120F809   # jalr r9
+        0x00000000  # nop
+   */
+   UChar* p = (UChar*)place_to_unchain;
+   vassert(0 == (3 & (HWord)p));
+   vassert(isLoadImm_EXACTLY2or6(p, /*r*/ 9,
+                                 (Addr)place_to_jump_to_EXPECTED,
+                                 mode64));
+   vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
+   vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
+   /* And what we want to change it to is:
+        move r9, disp_cp_chain_me
+        jalr r9
+        nop
+      viz
+        <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
+        0x120F809   # jalr r9
+        0x00000000  # nop
+      The replacement has the same length as the original.
+   */
+   p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
+                             (Addr)disp_cp_chain_me, mode64);
+   p = emit32(p, 0x120F809);
+   p = emit32(p, 0x00000000);
+
+   Int len = p - (UChar*)place_to_unchain;
+   vassert(len == (mode64 ? 32 : 16)); /* stay sane */
+   VexInvalRange vir = {(HWord)place_to_unchain, len};
+   return vir;
+}
+
+/* Patch the counter address into a profile inc point, as previously
+   created by the Min_ProfInc case for emit_MIPSInstr. */
+VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+                                  void*  place_to_patch,
+                                  const ULong* location_of_counter,
+                                  Bool mode64 )
+{
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
+   if (mode64) {
+      vassert(sizeof(ULong*) == 8);
+   } else {
+      vassert(sizeof(ULong*) == 4);
+   }
+   UChar* p = (UChar*)place_to_patch;
+   vassert(0 == (3 & (HWord)p));
+   vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
+                                 mode64 ? 0x6555655565556555ULL : 0x65556555,
+                                 mode64));
+
+   if (mode64) {
+      vassert(fetch32(p + 24 + 0) == 0xDD280000);
+      vassert(fetch32(p + 24 + 4) == 0x65080001);
+      vassert(fetch32(p + 24 + 8) == 0xFD280000);
+   } else {
+      vassert(fetch32(p + 8 + 0) == 0x8D280000);
+      vassert(fetch32(p + 8 + 4) == 0x25080001);
+      vassert(fetch32(p + 8 + 8) == 0xAD280000);
+      vassert(fetch32(p + 8 + 12) == 0x2d010001);
+      vassert(fetch32(p + 8 + 16) == 0x8d280004);
+      vassert(fetch32(p + 8 + 20) == 0x01014021);
+      vassert(fetch32(p + 8 + 24) == 0xad280004);
+   }
+
+   p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
+                             (Addr)location_of_counter, mode64);
+
+   VexInvalRange vir = {(HWord)p, 8};
+   return vir;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                    host_mips_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_mips_defs.h b/VEX/priv/host_mips_defs.h
new file mode 100644
index 0000000..500c97f
--- /dev/null
+++ b/VEX/priv/host_mips_defs.h
@@ -0,0 +1,743 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  host_mips_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_HOST_MIPS_DEFS_H
+#define __VEX_HOST_MIPS_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h"             /* VexArch */
+#include "host_generic_regs.h"  /* HReg */
+
+
+/* --------- Registers. --------- */
+
+#define ST_IN static inline
+
+#define GPR(_mode64, _enc, _ix64, _ix32) \
+  mkHReg(False,  (_mode64) ? HRcInt64 : HRcInt32, \
+         (_enc), (_mode64) ? (_ix64) : (_ix32))
+
+#define FR(_mode64, _enc, _ix64, _ix32) \
+  mkHReg(False,  (_mode64) ? HRcFlt64 : HRcFlt32, \
+         (_enc), (_mode64) ? (_ix64) : (_ix32))
+
+#define DR(_mode64, _enc, _ix64, _ix32) \
+  mkHReg(False,  HRcFlt64, \
+         (_enc), (_mode64) ? (_ix64) : (_ix32))
+
+ST_IN HReg hregMIPS_GPR16 ( Bool mode64 ) { return GPR(mode64, 16,  0,  0); }
+ST_IN HReg hregMIPS_GPR17 ( Bool mode64 ) { return GPR(mode64, 17,  1,  1); }
+ST_IN HReg hregMIPS_GPR18 ( Bool mode64 ) { return GPR(mode64, 18,  2,  2); }
+ST_IN HReg hregMIPS_GPR19 ( Bool mode64 ) { return GPR(mode64, 19,  3,  3); }
+ST_IN HReg hregMIPS_GPR20 ( Bool mode64 ) { return GPR(mode64, 20,  4,  4); }
+ST_IN HReg hregMIPS_GPR21 ( Bool mode64 ) { return GPR(mode64, 21,  5,  5); }
+ST_IN HReg hregMIPS_GPR22 ( Bool mode64 ) { return GPR(mode64, 22,  6,  6); }
+
+ST_IN HReg hregMIPS_GPR12 ( Bool mode64 ) { return GPR(mode64, 12,  7,  7); }
+ST_IN HReg hregMIPS_GPR13 ( Bool mode64 ) { return GPR(mode64, 13,  8,  8); }
+ST_IN HReg hregMIPS_GPR14 ( Bool mode64 ) { return GPR(mode64, 14,  9,  9); }
+ST_IN HReg hregMIPS_GPR15 ( Bool mode64 ) { return GPR(mode64, 15, 10, 10); }
+ST_IN HReg hregMIPS_GPR24 ( Bool mode64 ) { return GPR(mode64, 24, 11, 11); }
+
+ST_IN HReg hregMIPS_F16   ( Bool mode64 ) { return FR (mode64, 16, 12, 12); }
+ST_IN HReg hregMIPS_F18   ( Bool mode64 ) { return FR (mode64, 18, 13, 13); }
+ST_IN HReg hregMIPS_F20   ( Bool mode64 ) { return FR (mode64, 20, 14, 14); }
+ST_IN HReg hregMIPS_F22   ( Bool mode64 ) { return FR (mode64, 22, 15, 15); }
+ST_IN HReg hregMIPS_F24   ( Bool mode64 ) { return FR (mode64, 24, 16, 16); }
+ST_IN HReg hregMIPS_F26   ( Bool mode64 ) { return FR (mode64, 26, 17, 17); }
+ST_IN HReg hregMIPS_F28   ( Bool mode64 ) { return FR (mode64, 28, 18, 18); }
+ST_IN HReg hregMIPS_F30   ( Bool mode64 ) { return FR (mode64, 30, 19, 19); }
+
+// DRs are only allocatable in 32-bit mode, so the 64-bit index numbering
+// doesn't advance here.
+ST_IN HReg hregMIPS_D0    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64,  0,  0, 20); }
+ST_IN HReg hregMIPS_D1    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64,  2,  0, 21); }
+ST_IN HReg hregMIPS_D2    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64,  4,  0, 22); }
+ST_IN HReg hregMIPS_D3    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64,  6,  0, 23); }
+ST_IN HReg hregMIPS_D4    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64,  8,  0, 24); }
+ST_IN HReg hregMIPS_D5    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64, 10,  0, 25); }
+ST_IN HReg hregMIPS_D6    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64, 12,  0, 26); }
+ST_IN HReg hregMIPS_D7    ( Bool mode64 ) { vassert(!mode64);
+                                            return DR (mode64, 14,  0, 27); }
+
+ST_IN HReg hregMIPS_HI    ( Bool mode64 ) { return FR (mode64, 33, 20, 28); }
+ST_IN HReg hregMIPS_LO    ( Bool mode64 ) { return FR (mode64, 34, 21, 29); }
+
+ST_IN HReg hregMIPS_GPR0  ( Bool mode64 ) { return GPR(mode64,  0, 22, 30); }
+ST_IN HReg hregMIPS_GPR1  ( Bool mode64 ) { return GPR(mode64,  1, 23, 31); }
+ST_IN HReg hregMIPS_GPR2  ( Bool mode64 ) { return GPR(mode64,  2, 24, 32); }
+ST_IN HReg hregMIPS_GPR3  ( Bool mode64 ) { return GPR(mode64,  3, 25, 33); }
+ST_IN HReg hregMIPS_GPR4  ( Bool mode64 ) { return GPR(mode64,  4, 26, 34); }
+ST_IN HReg hregMIPS_GPR5  ( Bool mode64 ) { return GPR(mode64,  5, 27, 35); }
+ST_IN HReg hregMIPS_GPR6  ( Bool mode64 ) { return GPR(mode64,  6, 28, 36); }
+ST_IN HReg hregMIPS_GPR7  ( Bool mode64 ) { return GPR(mode64,  7, 29, 37); }
+ST_IN HReg hregMIPS_GPR8  ( Bool mode64 ) { return GPR(mode64,  8, 30, 38); }
+ST_IN HReg hregMIPS_GPR9  ( Bool mode64 ) { return GPR(mode64,  9, 31, 39); }
+ST_IN HReg hregMIPS_GPR10 ( Bool mode64 ) { return GPR(mode64, 10, 32, 40); }
+ST_IN HReg hregMIPS_GPR11 ( Bool mode64 ) { return GPR(mode64, 11, 33, 41); }
+ST_IN HReg hregMIPS_GPR23 ( Bool mode64 ) { return GPR(mode64, 23, 34, 42); }
+ST_IN HReg hregMIPS_GPR25 ( Bool mode64 ) { return GPR(mode64, 25, 35, 43); }
+ST_IN HReg hregMIPS_GPR29 ( Bool mode64 ) { return GPR(mode64, 29, 36, 44); }
+ST_IN HReg hregMIPS_GPR31 ( Bool mode64 ) { return GPR(mode64, 31, 37, 45); }
+
+#undef ST_IN
+#undef GPR
+#undef FR
+#undef DR
+
+#define GuestStatePointer(_mode64)     hregMIPS_GPR23(_mode64)
+#define StackFramePointer(_mode64)     hregMIPS_GPR30(_mode64)
+#define StackPointer(_mode64)          hregMIPS_GPR29(_mode64)
+
+/* Num registers used for function calls */
+#if defined(VGP_mips32_linux)
+  /* a0, a1, a2, a3 */
+# define MIPS_N_REGPARMS 4
+#else
+  /* a0, a1, a2, a3, a4, a5, a6, a7 */
+# define MIPS_N_REGPARMS 8
+#endif
+
+extern void ppHRegMIPS ( HReg, Bool );
+
+
+/* --------- Condition codes, Intel encoding. --------- */
+typedef enum {
+   MIPScc_EQ = 0,   /* equal */
+   MIPScc_NE = 1,   /* not equal */
+
+   MIPScc_HS = 2,   /* >=u (higher or same) */
+   MIPScc_LO = 3,   /* <u  (lower) */
+
+   MIPScc_MI = 4,   /* minus (negative) */
+   MIPScc_PL = 5,   /* plus (zero or +ve) */
+
+   MIPScc_VS = 6,   /* overflow */
+   MIPScc_VC = 7,   /* no overflow */
+
+   MIPScc_HI = 8,   /* >u   (higher) */
+   MIPScc_LS = 9,   /* <=u  (lower or same) */
+
+   MIPScc_GE = 10,  /* >=s (signed greater or equal) */
+   MIPScc_LT = 11,  /* <s  (signed less than) */
+
+   MIPScc_GT = 12,  /* >s  (signed greater) */
+   MIPScc_LE = 13,  /* <=s (signed less or equal) */
+
+   MIPScc_AL = 14,  /* always (unconditional) */
+   MIPScc_NV = 15   /* never (unconditional): */
+} MIPSCondCode;
+
+extern const HChar *showMIPSCondCode(MIPSCondCode);
+
+/* --------- Memory address expressions (amodes). --------- */
+typedef enum {
+   Mam_IR,        /* Immediate (signed 16-bit) + Reg */
+   Mam_RR         /* Reg1 + Reg2 */
+} MIPSAModeTag;
+
+typedef struct {
+   MIPSAModeTag tag;
+   union {
+      struct {
+         HReg base;
+         Int index;
+      } IR;
+      struct {
+         HReg base;
+         HReg index;
+      } RR;
+   } Mam;
+} MIPSAMode;
+
+extern MIPSAMode *MIPSAMode_IR(Int, HReg);
+extern MIPSAMode *MIPSAMode_RR(HReg, HReg);
+
+extern MIPSAMode *dopyMIPSAMode(MIPSAMode *);
+extern MIPSAMode *nextMIPSAModeFloat(MIPSAMode *);
+extern MIPSAMode *nextMIPSAModeInt(MIPSAMode *);
+
+extern void ppMIPSAMode(MIPSAMode *, Bool);
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+/* ("RH" == "Register or Halfword immediate") */
+typedef enum {
+   Mrh_Imm,
+   Mrh_Reg
+} MIPSRHTag;
+
+typedef struct {
+   MIPSRHTag tag;
+   union {
+      struct {
+         Bool syned;
+         UShort imm16;
+      } Imm;
+      struct {
+         HReg reg;
+      } Reg;
+   } Mrh;
+} MIPSRH;
+
+extern void ppMIPSRH(MIPSRH *, Bool);
+
+extern MIPSRH *MIPSRH_Imm(Bool, UShort);
+extern MIPSRH *MIPSRH_Reg(HReg);
+
+/* --------- Instructions. --------- */
+
+/*Tags for operations*/
+
+/* --------- */
+typedef enum {
+   Mun_CLO,
+   Mun_CLZ,
+   Mun_DCLO,
+   Mun_DCLZ,
+   Mun_NOP,
+} MIPSUnaryOp;
+
+extern const HChar *showMIPSUnaryOp(MIPSUnaryOp);
+/* --------- */
+
+/* --------- */
+
+typedef enum {
+   Malu_INVALID,
+   Malu_ADD, Malu_SUB,
+   Malu_AND, Malu_OR, Malu_NOR, Malu_XOR,
+   Malu_DADD, Malu_DSUB,
+   Malu_SLT
+} MIPSAluOp;
+
+extern const HChar *showMIPSAluOp(MIPSAluOp,
+                            Bool /* is the 2nd operand an immediate? */ );
+
+/* --------- */
+typedef enum {
+   Mshft_INVALID,
+   Mshft_SLL, Mshft_SRL,
+   Mshft_SRA
+} MIPSShftOp;
+
+extern const HChar *showMIPSShftOp(MIPSShftOp,
+                             Bool /* is the 2nd operand an immediate? */ ,
+                             Bool /* is this a 32bit or 64bit op? */ );
+
+/* --------- */
+typedef enum {
+   Macc_ADD,
+   Macc_SUB
+} MIPSMaccOp;
+
+extern const HChar *showMIPSMaccOp(MIPSMaccOp, Bool);
+/* --------- */
+
+/* ----- Instruction tags ----- */
+typedef enum {
+   Min_LI,         /* load word (32/64-bit) immediate (fake insn) */
+   Min_Alu,        /* word add/sub/and/or/xor/nor/others? */
+   Min_Shft,       /* word sll/srl/sra */
+   Min_Unary,      /* clo, clz, nop, neg */
+
+   Min_Cmp,        /* word compare (fake insn) */
+
+   Min_Mul,        /* widening/non-widening multiply */
+   Min_Div,        /* div */
+
+   Min_Call,       /* call to address in register */
+
+   /* The following 5 insns are mandated by translation chaining */
+   Min_XDirect,    /* direct transfer to GA */
+   Min_XIndir,     /* indirect transfer to GA */
+   Min_XAssisted,  /* assisted transfer to GA */
+   Min_EvCheck,    /* Event check */
+   Min_ProfInc,    /* 64-bit profile counter increment */
+
+   Min_RdWrLR,     /* Read/Write Link Register */
+   Min_Mthi,       /* Move to HI from GP register */
+   Min_Mtlo,       /* Move to LO from GP register */
+   Min_Mfhi,       /* Move from HI to GP register */
+   Min_Mflo,       /* Move from LO to GP register */
+   Min_Macc,       /* Multiply and accumulate */
+
+   Min_Load,       /* zero-extending load a 8|16|32 bit value from mem */
+   Min_Store,      /* store a 8|16|32 bit value to mem */
+   Min_Cas,        /* compare and swap */
+   Min_LoadL,      /* mips Load Linked Word - LL */
+   Min_StoreC,     /* mips Store Conditional Word - SC */
+
+   Min_FpUnary,    /* FP unary op */
+   Min_FpBinary,   /* FP binary op */
+   Min_FpTernary,  /* FP ternary op */
+   Min_FpConvert,  /* FP conversion op */
+   Min_FpMulAcc,   /* FP multipy-accumulate style op */
+   Min_FpLdSt,     /* FP load/store */
+   Min_FpSTFIW,    /* stfiwx */
+   Min_FpRSP,      /* FP round IEEE754 double to IEEE754 single */
+   Min_FpCftI,     /* fcfid/fctid/fctiw */
+   Min_FpCMov,     /* FP floating point conditional move */
+   Min_MtFCSR,     /* set FCSR register */
+   Min_MfFCSR,     /* get FCSR register */
+   Min_FpCompare,  /* FP compare, generating value into int reg */
+
+   Min_FpGpMove,   /* Move from/to fpr to/from gpr */
+   Min_MoveCond    /* Move Conditional */
+} MIPSInstrTag;
+
+/* --------- */
+typedef enum {
+   Mfp_INVALID,
+
+   /* Ternary */
+   Mfp_MADDD, Mfp_MSUBD,
+   Mfp_MADDS, Mfp_MSUBS,
+
+   /* Binary */
+   Mfp_ADDD, Mfp_SUBD, Mfp_MULD, Mfp_DIVD,
+   Mfp_ADDS, Mfp_SUBS, Mfp_MULS, Mfp_DIVS,
+
+   /* Unary */
+   Mfp_SQRTS, Mfp_SQRTD,
+   Mfp_ABSS, Mfp_ABSD, Mfp_NEGS, Mfp_NEGD, Mfp_MOVS, Mfp_MOVD,
+
+   /* FP convert */
+   Mfp_CVTSD, Mfp_CVTSW, Mfp_CVTWD,
+   Mfp_CVTWS, Mfp_CVTDL, Mfp_CVTSL, Mfp_CVTLS, Mfp_CVTLD, Mfp_TRULS, Mfp_TRULD,
+   Mfp_TRUWS, Mfp_TRUWD, Mfp_FLOORWS, Mfp_FLOORWD, Mfp_ROUNDWS, Mfp_ROUNDWD,
+   Mfp_CVTDW, Mfp_CEILWS, Mfp_CEILWD, Mfp_CEILLS, Mfp_CEILLD, Mfp_CVTDS,
+   Mfp_ROUNDLD, Mfp_FLOORLD,
+
+   /* FP compare */
+   Mfp_CMP_UN, Mfp_CMP_EQ, Mfp_CMP_LT, Mfp_CMP_NGT
+
+} MIPSFpOp;
+
+extern const HChar *showMIPSFpOp(MIPSFpOp);
+
+/* Move from/to fpr to/from gpr */
+typedef enum {
+   MFpGpMove_mfc1,   /* Move Word From Floating Point - MIPS32 */
+   MFpGpMove_dmfc1,  /* Doubleword Move from Floating Point - MIPS64 */
+   MFpGpMove_mtc1,   /* Move Word to Floating Point - MIPS32 */
+   MFpGpMove_dmtc1   /* Doubleword Move to Floating Point - MIPS64 */
+} MIPSFpGpMoveOp;
+
+extern const HChar *showMIPSFpGpMoveOp ( MIPSFpGpMoveOp );
+
+/* Move Conditional */
+typedef enum {
+   MFpMoveCond_movns,  /* FP Move Conditional on Not Zero - MIPS32 */
+   MFpMoveCond_movnd,
+   MMoveCond_movn      /* Move Conditional on Not Zero */
+} MIPSMoveCondOp;
+
+extern const HChar *showMIPSMoveCondOp ( MIPSMoveCondOp );
+
+/*--------- Structure for instructions ----------*/
+/* Destinations are on the LEFT (first operand) */
+
+typedef struct {
+   MIPSInstrTag tag;
+   union {
+      /* Get a 32/64-bit literal into a register.
+         May turn into a number of real insns. */
+      struct {
+         HReg dst;
+         ULong imm;
+      } LI;
+      /* Integer add/sub/and/or/xor.  Limitations:
+         - For add, the immediate, if it exists, is a signed 16.
+         - For sub, the immediate, if it exists, is a signed 16
+         which may not be -32768, since no such instruction 
+         exists, and so we have to emit addi with +32768, but 
+         that is not possible.
+         - For and/or/xor,  the immediate, if it exists, 
+         is an unsigned 16.
+       */
+      struct {
+         MIPSAluOp op;
+         HReg dst;
+         HReg srcL;
+         MIPSRH *srcR;
+      } Alu;
+      /* Integer shl/shr/sar.
+         Limitations: the immediate, if it exists,
+         is a signed 5-bit value between 1 and 31 inclusive.
+       */
+      struct {
+         MIPSShftOp op;
+         Bool sz32;  /* mode64 has both 32 and 64bit shft */
+         HReg dst;
+         HReg srcL;
+         MIPSRH *srcR;
+      } Shft;
+      /* Clz, Clo, nop */
+      struct {
+         MIPSUnaryOp op;
+         HReg dst;
+         HReg src;
+      } Unary;
+      /* Word compare. Fake instruction, used for basic block ending */
+      struct {
+         Bool syned;
+         Bool sz32;
+         HReg dst;
+         HReg srcL;
+         HReg srcR;
+
+         MIPSCondCode cond;
+      } Cmp;
+      struct {
+         Bool widening;  /* True => widening, False => non-widening */
+         Bool syned;     /* signed/unsigned - meaningless if widenind = False */
+         Bool sz32;
+         HReg dst;
+         HReg srcL;
+         HReg srcR;
+      } Mul;
+      struct {
+         Bool syned;  /* signed/unsigned - meaningless if widenind = False */
+         Bool sz32;
+         HReg srcL;
+         HReg srcR;
+      } Div;
+      /* Pseudo-insn.  Call target (an absolute address), on given
+         condition (which could be Mcc_ALWAYS).  argiregs indicates
+         which of $4 .. $7 (mips32) or $4 .. $11 (mips64)
+         carries argument values for this call,
+         using a bit mask (1<<N is set if $N holds an arg, for N in
+         $4 .. $7 or $4 .. $11 inclusive). 
+         If cond is != Mcc_ALWAYS, src is checked.
+         Otherwise, unconditional call */
+      struct {
+         MIPSCondCode cond;
+         Addr64 target;
+         UInt argiregs;
+         HReg src;
+         RetLoc rloc;     /* where the return value will be */
+      } Call;
+      /* Update the guest EIP value, then exit requesting to chain
+         to it.  May be conditional.  Urr, use of Addr32 implicitly
+         assumes that wordsize(guest) == wordsize(host). */
+      struct {
+         Addr64       dstGA;     /* next guest address */
+         MIPSAMode*   amPC;      /* amode in guest state for PC */
+         MIPSCondCode cond;      /* can be MIPScc_AL */
+         Bool         toFastEP;  /* chain to the slow or fast point? */
+      } XDirect;
+      /* Boring transfer to a guest address not known at JIT time.
+         Not chainable.  May be conditional. */
+      struct {
+         HReg        dstGA;
+         MIPSAMode*   amPC;
+         MIPSCondCode cond; /* can be MIPScc_AL */
+      } XIndir;
+      /* Assisted transfer to a guest address, most general case.
+         Not chainable.  May be conditional. */
+      struct {
+         HReg        dstGA;
+         MIPSAMode*   amPC;
+         MIPSCondCode cond; /* can be MIPScc_AL */
+         IRJumpKind  jk;
+      } XAssisted;
+      /* Zero extending loads.  Dst size is host word size */
+      struct {
+         UChar sz;   /* 1|2|4|8 */
+         HReg dst;
+         MIPSAMode *src;
+      } Load;
+      /* 64/32/16/8 bit stores */
+      struct {
+         UChar sz;   /* 1|2|4|8 */
+         MIPSAMode *dst;
+         HReg src;
+      } Store;
+      struct {
+         UChar sz;   /* 4|8 */
+         HReg dst;
+         MIPSAMode *src;
+      } LoadL;
+      struct {
+         UChar sz;   /* 4|8 */
+         HReg  old;
+         HReg  addr;
+         HReg  expd;
+         HReg  data;
+      } Cas;
+      struct {
+         UChar sz;   /* 4|8 */
+         MIPSAMode *dst;
+         HReg src;
+      } StoreC;
+      /* Move from HI/LO register to GP register. */
+      struct {
+         HReg dst;
+      } MfHL;
+
+      /* Move to HI/LO register from GP register. */
+      struct {
+         HReg src;
+      } MtHL;
+
+      /* Read/Write Link Register */
+      struct {
+         Bool wrLR;
+         HReg gpr;
+      } RdWrLR;
+
+      /* MIPS Multiply and accumulate instructions. */
+      struct {
+         MIPSMaccOp op;
+         Bool syned;
+
+         HReg srcL;
+         HReg srcR;
+      } Macc;
+
+      /* MIPS Floating point */
+      struct {
+         MIPSFpOp op;
+         HReg dst;
+         HReg src;
+      } FpUnary;
+      struct {
+         MIPSFpOp op;
+         HReg dst;
+         HReg srcL;
+         HReg srcR;
+      } FpBinary;
+      struct {
+         MIPSFpOp op;
+         HReg dst;
+         HReg src1;
+         HReg src2;
+         HReg src3;
+      } FpTernary;
+      struct {
+         MIPSFpOp op;
+         HReg dst;
+         HReg srcML;
+         HReg srcMR;
+         HReg srcAcc;
+      } FpMulAcc;
+      struct {
+         Bool isLoad;
+         UChar sz;   /* only 4 (IEEE single) or 8 (IEEE double) */
+         HReg reg;
+         MIPSAMode *addr;
+      } FpLdSt;
+
+      struct {
+         MIPSFpOp op;
+         HReg dst;
+         HReg src;
+      } FpConvert;
+      struct {
+         MIPSFpOp op;
+         HReg dst;
+         HReg srcL;
+         HReg srcR;
+         UChar cond1;
+      } FpCompare;
+      /* Move from GP register to FCSR register. */
+      struct {
+         HReg src;
+      } MtFCSR;
+      /* Move from FCSR register to GP register. */
+      struct {
+         HReg dst;
+      } MfFCSR;
+      struct {
+         MIPSAMode* amCounter;
+         MIPSAMode* amFailAddr;
+      } EvCheck;
+      struct {
+         /* No fields.  The address of the counter to inc is
+            installed later, post-translation, by patching it in,
+            as it is not known at translation time. */
+      } ProfInc;
+
+      /* Move from/to fpr to/from gpr */
+      struct {
+         MIPSFpGpMoveOp op;
+         HReg dst;
+         HReg src;
+      } FpGpMove;
+      struct {
+         MIPSMoveCondOp op;
+         HReg dst;
+         HReg src;
+         HReg cond;
+      } MoveCond;
+
+   } Min;
+} MIPSInstr;
+
+extern MIPSInstr *MIPSInstr_LI(HReg, ULong);
+extern MIPSInstr *MIPSInstr_Alu(MIPSAluOp, HReg, HReg, MIPSRH *);
+extern MIPSInstr *MIPSInstr_Shft(MIPSShftOp, Bool sz32, HReg, HReg, MIPSRH *);
+extern MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_Cmp(Bool, Bool, HReg, HReg, HReg, MIPSCondCode);
+
+extern MIPSInstr *MIPSInstr_Mul(Bool syned, Bool hi32, Bool sz32, HReg,
+                                HReg, HReg);
+extern MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg, HReg);
+extern MIPSInstr *MIPSInstr_Madd(Bool, HReg, HReg);
+extern MIPSInstr *MIPSInstr_Msub(Bool, HReg, HReg);
+
+extern MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src,
+                                 Bool mode64);
+extern MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src,
+                                  Bool mode64);
+
+extern MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src,
+                                  Bool mode64);
+extern MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src,
+                                   Bool mode64);
+extern MIPSInstr *MIPSInstr_Cas(UChar sz, HReg old, HReg addr,
+                                HReg expd, HReg data, Bool mode64);
+
+extern MIPSInstr *MIPSInstr_Call ( MIPSCondCode, Addr64, UInt, HReg, RetLoc );
+extern MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode, Addr64, UInt, RetLoc );
+
+extern MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC,
+                                      MIPSCondCode cond, Bool toFastEP );
+extern MIPSInstr *MIPSInstr_XIndir(HReg dstGA, MIPSAMode* amPC,
+                                     MIPSCondCode cond);
+extern MIPSInstr *MIPSInstr_XAssisted(HReg dstGA, MIPSAMode* amPC,
+                                      MIPSCondCode cond, IRJumpKind jk);
+
+extern MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL,
+                                     HReg srcR);
+extern MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1,
+                                        HReg src2, HReg src3 );
+extern MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL,
+                                      HReg srcR);
+extern MIPSInstr *MIPSInstr_FpMulAcc(MIPSFpOp op, HReg dst, HReg srcML,
+                                     HReg srcMR, HReg srcAcc);
+extern MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg, MIPSAMode *);
+extern MIPSInstr *MIPSInstr_FpSTFIW(HReg addr, HReg data);
+extern MIPSInstr *MIPSInstr_FpRSP(HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpCftI(Bool fromI, Bool int32, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_FpCMov(MIPSCondCode, HReg dst, HReg src);
+extern MIPSInstr *MIPSInstr_MtFCSR(HReg src);
+extern MIPSInstr *MIPSInstr_MfFCSR(HReg dst);
+extern MIPSInstr *MIPSInstr_FpCmp(HReg dst, HReg srcL, HReg srcR);
+
+extern MIPSInstr *MIPSInstr_Mfhi(HReg dst);
+extern MIPSInstr *MIPSInstr_Mflo(HReg dst);
+extern MIPSInstr *MIPSInstr_Mthi(HReg src);
+extern MIPSInstr *MIPSInstr_Mtlo(HReg src);
+
+extern MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr);
+
+extern MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst,
+                                       HReg src, HReg cond );
+
+extern MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src );
+
+extern MIPSInstr *MIPSInstr_EvCheck(MIPSAMode* amCounter,
+                                    MIPSAMode* amFailAddr );
+extern MIPSInstr *MIPSInstr_ProfInc( void );
+
+extern void ppMIPSInstr(const MIPSInstr *, Bool mode64);
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+extern void getRegUsage_MIPSInstr (HRegUsage *, const MIPSInstr *, Bool);
+extern void mapRegs_MIPSInstr     (HRegRemap *, MIPSInstr *, Bool mode64);
+extern Bool isMove_MIPSInstr      (const MIPSInstr *, HReg *, HReg *);
+extern Int        emit_MIPSInstr (/*MB_MOD*/Bool* is_profInc,
+                                  UChar* buf, Int nbuf, const MIPSInstr* i,
+                                  Bool mode64,
+                                  VexEndness endness_host,
+                                  const void* disp_cp_chain_me_to_slowEP,
+                                  const void* disp_cp_chain_me_to_fastEP,
+                                  const void* disp_cp_xindir,
+                                  const void* disp_cp_xassisted );
+
+extern void genSpill_MIPS ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
+                            HReg rreg, Int offset, Bool);
+extern void genReload_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
+                            HReg rreg, Int offset, Bool);
+
+extern const RRegUniverse* getRRegUniverse_MIPS ( Bool mode64 );
+
+extern HInstrArray *iselSB_MIPS          ( const IRSB*,
+                                           VexArch,
+                                           const VexArchInfo*,
+                                           const VexAbiInfo*,
+                                           Int offs_Host_EvC_Counter,
+                                           Int offs_Host_EvC_FailAddr,
+                                           Bool chainingAllowed,
+                                           Bool addProfInc,
+                                           Addr max_ga );
+
+/* How big is an event check?  This is kind of a kludge because it
+   depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
+   and so assumes that they are both <= 128, and so can use the short
+   offset encoding.  This is all checked with assertions, so in the
+   worst case we will merely assert at startup. */
+extern Int evCheckSzB_MIPS (void);
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+extern VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+                                         void* place_to_chain,
+                                         const void* disp_cp_chain_me_EXPECTED,
+                                         const void* place_to_jump_to,
+                                         Bool  mode64 );
+
+extern VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+                                           void* place_to_unchain,
+                                           const void* place_to_jump_to_EXPECTED,
+                                           const void* disp_cp_chain_me,
+                                           Bool  mode64 );
+
+/* Patch the counter location into an existing ProfInc point. */
+extern VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+                                         void*  place_to_patch,
+                                         const ULong* location_of_counter,
+                                         Bool  mode64 );
+
+#endif /* ndef __VEX_HOST_MIPS_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                    host-mips_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_mips_isel.c b/VEX/priv/host_mips_isel.c
new file mode 100644
index 0000000..213c52a
--- /dev/null
+++ b/VEX/priv/host_mips_isel.c
@@ -0,0 +1,4291 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  host_mips_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_generic_simd64.h"  /* for 64-bit SIMD helpers */
+#include "host_mips_defs.h"
+
+/*---------------------------------------------------------*/
+/*--- Register Usage Conventions                        ---*/
+/*---------------------------------------------------------*/
+
+/* Integer Regs
+   ------------
+   ZERO0       Reserved
+   GPR12:22    Allocateable
+   23          GuestStatePointer
+   SP          StackFramePointer
+   RA          LinkRegister */
+
+static Bool mode64 = False;
+
+/* Host CPU has FPU and 32 dbl. prec. FP registers. */
+static Bool fp_mode64 = False;
+
+/* GPR register class for mips32/64 */
+#define HRcGPR(_mode64) ((_mode64) ? HRcInt64 : HRcInt32)
+
+/* FPR register class for mips32/64 */
+#define HRcFPR(_mode64) ((_mode64) ? HRcFlt64 : HRcFlt32)
+
+/* guest_COND offset */
+#define COND_OFFSET(_mode64) ((_mode64) ? 612 : 448)
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+     might encounter.  This is computed before insn selection starts,
+     and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+     which virtual register(s) are associated with each IRTemp
+     temporary.  This is computed before insn selection starts, and
+     does not change.  We expect this mapping to map precisely the
+     same set of IRTemps as the type mapping does.
+
+        - vregmap   holds the primary register for the IRTemp.
+        - vregmapHI is only used for 64-bit integer-typed
+             IRTemps.  It holds the identity of a second
+             32-bit virtual HReg, which holds the high half
+             of the value.
+
+   - The code array, that is, the insns selected so far.
+
+   - A counter, for generating new virtual registers.
+
+   - The host subarchitecture we are selecting insns for.
+     This is set at the start and does not change.
+
+   - A Bool for indicating whether we may generate chain-me
+     instructions for control flow transfers, or whether we must use
+     XAssisted.
+
+   - The maximum guest address of any guest insn in this block.
+     Actually, the address of the highest-addressed byte from any insn
+     in this block.  Is set at the start and does not change.  This is
+     used for detecting jumps which are definitely forward-edges from
+     this block, and therefore can be made (chained) to the fast entry
+     point of the destination, thereby avoiding the destination's
+     event check.
+
+   Note, this is all (well, mostly) host-independent.
+*/
+
+typedef
+   struct {
+      /* Constant -- are set at the start and do not change. */
+      IRTypeEnv*   type_env;
+
+      HReg*        vregmap;
+      HReg*        vregmapHI;
+      Int          n_vregmap;
+
+      UInt         hwcaps;
+      Bool         mode64;
+      Bool         fp_mode64;
+
+      Bool         chainingAllowed;
+      Addr64       max_ga;
+
+      /* These are modified as we go along. */
+      HInstrArray* code;
+      Int          vreg_ctr;
+   }
+   ISelEnv;
+
+static HReg lookupIRTemp(ISelEnv * env, IRTemp tmp)
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   return env->vregmap[tmp];
+}
+
+static void lookupIRTemp64(HReg * vrHI, HReg * vrLO, ISelEnv * env, IRTemp tmp)
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapHI[tmp]));
+   *vrLO = env->vregmap[tmp];
+   *vrHI = env->vregmapHI[tmp];
+}
+
+static void
+lookupIRTempPair(HReg * vrHI, HReg * vrLO, ISelEnv * env, IRTemp tmp)
+{
+   vassert(env->mode64);
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapHI[tmp]));
+   *vrLO = env->vregmap[tmp];
+   *vrHI = env->vregmapHI[tmp];
+}
+
+static void addInstr(ISelEnv * env, MIPSInstr * instr)
+{
+   addHInstr(env->code, instr);
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      ppMIPSInstr(instr, mode64);
+      vex_printf("\n");
+   }
+}
+
+static HReg newVRegI(ISelEnv * env)
+{
+   HReg reg = mkHReg(True/*virtual reg*/,
+                     HRcGPR(env->mode64), 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegD(ISelEnv * env)
+{
+   HReg reg = mkHReg(True/*virtual reg*/,
+                     HRcFlt64, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegF(ISelEnv * env)
+{
+   HReg reg = mkHReg(True/*virtual reg*/,
+                     HRcFPR(env->mode64), 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static void add_to_sp(ISelEnv * env, UInt n)
+{
+   HReg sp = StackPointer(mode64);
+   vassert(n < 256 && (n % 8) == 0);
+   if (mode64)
+      addInstr(env, MIPSInstr_Alu(Malu_DADD, sp, sp, MIPSRH_Imm(True,
+                                                                toUShort(n))));
+   else
+      addInstr(env, MIPSInstr_Alu(Malu_ADD, sp, sp, MIPSRH_Imm(True,
+                                                               toUShort(n))));
+}
+
+static void sub_from_sp(ISelEnv * env, UInt n)
+{
+   HReg sp = StackPointer(mode64);
+   vassert(n < 256 && (n % 8) == 0);
+   if (mode64)
+      addInstr(env, MIPSInstr_Alu(Malu_DSUB, sp, sp,
+                                  MIPSRH_Imm(True, toUShort(n))));
+   else
+      addInstr(env, MIPSInstr_Alu(Malu_SUB, sp, sp,
+                                  MIPSRH_Imm(True, toUShort(n))));
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Forward declarations                        ---*/
+/*---------------------------------------------------------*/
+
+/* These are organised as iselXXX and iselXXX_wrk pairs.  The
+   iselXXX_wrk do the real work, but are not to be called directly.
+   For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
+   checks that all returned registers are virtual.  You should not
+   call the _wrk version directly.
+*/
+/* 32-bit mode: Compute an I8/I16/I32 into a RH
+                (reg-or-halfword-immediate).
+   It's important to specify whether the immediate is to be regarded
+   as signed or not.  If yes, this will never return -32768 as an
+   immediate; this guaranteed that all signed immediates that are
+   return can have their sign inverted if need be.
+*/
+static MIPSRH *iselWordExpr_RH_wrk(ISelEnv * env, Bool syned, IRExpr * e);
+static MIPSRH *iselWordExpr_RH(ISelEnv * env, Bool syned, IRExpr * e);
+
+/* Compute an I8 into a reg-or-5-bit-unsigned-immediate, the latter being an
+   immediate in the range 1 .. 31 inclusive.  Used for doing shift amounts. */
+static MIPSRH *iselWordExpr_RH5u_wrk(ISelEnv * env, IRExpr * e);
+static MIPSRH *iselWordExpr_RH5u(ISelEnv * env, IRExpr * e);
+
+/* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter being an
+   immediate in the range 1 .. 63 inclusive.  Used for doing shift amounts. */
+static MIPSRH *iselWordExpr_RH6u_wrk(ISelEnv * env, IRExpr * e);
+static MIPSRH *iselWordExpr_RH6u(ISelEnv * env, IRExpr * e);
+
+/* compute an I8/I16/I32 into a GPR*/
+static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e);
+static HReg iselWordExpr_R(ISelEnv * env, IRExpr * e);
+
+/* compute an I32 into an AMode. */
+static MIPSAMode *iselWordExpr_AMode_wrk(ISelEnv * env, IRExpr * e,
+                                         IRType xferTy);
+static MIPSAMode *iselWordExpr_AMode(ISelEnv * env, IRExpr * e, IRType xferTy);
+
+static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env,
+                              IRExpr * e);
+static void iselInt64Expr(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e);
+
+/* 64-bit mode ONLY: compute an I128 into a GPR64 pair. */
+static void iselInt128Expr_wrk(HReg * rHi, HReg * rLo,
+                               ISelEnv * env, IRExpr * e);
+static void iselInt128Expr(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e);
+
+static MIPSCondCode iselCondCode_wrk(ISelEnv * env, IRExpr * e);
+static MIPSCondCode iselCondCode(ISelEnv * env, IRExpr * e);
+
+static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e);
+static HReg iselDblExpr(ISelEnv * env, IRExpr * e);
+
+static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e);
+static HReg iselFltExpr(ISelEnv * env, IRExpr * e);
+
+static void set_MIPS_rounding_mode(ISelEnv * env, IRExpr * mode)
+{
+   /*
+      rounding mode | MIPS | IR
+      ------------------------
+      to nearest    | 00  | 00
+      to zero       | 01  | 11
+      to +infinity  | 10  | 10
+      to -infinity  | 11  | 01
+    */
+   /* rm_MIPS32  = XOR(rm_IR , (rm_IR << 1)) & 2 */
+   HReg irrm = iselWordExpr_R(env, mode);
+   HReg tmp = newVRegI(env);
+   HReg fcsr_old = newVRegI(env);
+   MIPSAMode *am_addr;
+
+   addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp, irrm,
+                                MIPSRH_Imm(False, 1)));
+   addInstr(env, MIPSInstr_Alu(Malu_XOR, tmp, irrm, MIPSRH_Reg(tmp)));
+   addInstr(env, MIPSInstr_Alu(Malu_AND, irrm, tmp, MIPSRH_Imm(False, 3)));
+   /* save old value of FCSR */
+   addInstr(env, MIPSInstr_MfFCSR(fcsr_old));
+   sub_from_sp(env, 8); /*  Move SP down 8 bytes */
+   am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+   /* store old FCSR to stack */
+   addInstr(env, MIPSInstr_Store(4, am_addr, fcsr_old, mode64));
+
+   /* set new value of FCSR */
+   addInstr(env, MIPSInstr_MtFCSR(irrm));
+}
+
+static void set_MIPS_rounding_default(ISelEnv * env)
+{
+   HReg fcsr = newVRegI(env);
+   /* load as float */
+   MIPSAMode *am_addr;
+   am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+   addInstr(env, MIPSInstr_Load(4, fcsr, am_addr, mode64));
+
+   add_to_sp(env, 8);  /* Reset SP */
+
+   /* set new value of FCSR*/
+   addInstr(env, MIPSInstr_MtFCSR(fcsr));
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Misc helpers                                ---*/
+/*---------------------------------------------------------*/
+
+/* Make an int reg-reg move. */
+static MIPSInstr *mk_iMOVds_RR(HReg r_dst, HReg r_src)
+{
+   vassert(hregClass(r_dst) == hregClass(r_src));
+   vassert(hregClass(r_src) == HRcInt32 || hregClass(r_src) == HRcInt64);
+   return MIPSInstr_Alu(Malu_OR, r_dst, r_src, MIPSRH_Reg(r_src));
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Function call helpers                       ---*/
+/*---------------------------------------------------------*/
+
+/* Used only in doHelperCall.  See big comment in doHelperCall re
+   handling of register-parameter args.  This function figures out
+   whether evaluation of an expression might require use of a fixed
+   register.  If in doubt return True (safe but suboptimal).
+*/
+static Bool mightRequireFixedRegs(IRExpr * e)
+{
+   switch (e->tag) {
+      case Iex_RdTmp:
+      case Iex_Const:
+      case Iex_Get:
+         return False;
+      default:
+         return True;
+   }
+}
+
+/* Load 2*I32 regs to fp reg */
+static HReg mk_LoadRR32toFPR(ISelEnv * env, HReg r_srcHi, HReg r_srcLo)
+{
+   HReg fr_dst = newVRegD(env);
+   MIPSAMode *am_addr0, *am_addr1;
+
+   vassert(hregClass(r_srcHi) == HRcInt32);
+   vassert(hregClass(r_srcLo) == HRcInt32);
+
+   sub_from_sp(env, 16);  /* Move SP down 16 bytes */
+   am_addr0 = MIPSAMode_IR(0, StackPointer(mode64));
+   am_addr1 = MIPSAMode_IR(4, StackPointer(mode64));
+
+   /* store hi,lo as Ity_I32's */
+#if defined (_MIPSEL)
+   addInstr(env, MIPSInstr_Store(4, am_addr0, r_srcLo, mode64));
+   addInstr(env, MIPSInstr_Store(4, am_addr1, r_srcHi, mode64));
+#elif defined (_MIPSEB)
+   addInstr(env, MIPSInstr_Store(4, am_addr0, r_srcHi, mode64));
+   addInstr(env, MIPSInstr_Store(4, am_addr1, r_srcLo, mode64));
+#else
+   /* Stop gcc on other platforms complaining about am_addr1 being set
+      but not used. */
+   (void)am_addr1;
+#endif
+
+   /* load as float */
+   addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, fr_dst, am_addr0));
+
+   add_to_sp(env, 16);  /* Reset SP */
+   return fr_dst;
+}
+
+/* Do a complete function call.  |guard| is a Ity_Bit expression
+   indicating whether or not the call happens.  If guard==NULL, the
+   call is unconditional.  |retloc| is set to indicate where the
+   return value is after the call.  The caller (of this fn) must
+   generate code to add |stackAdjustAfterCall| to the stack pointer
+   after the call is done. */
+
+static void doHelperCall(/*OUT*/UInt*   stackAdjustAfterCall,
+                         /*OUT*/RetLoc* retloc,
+                         ISelEnv* env,
+                         IRExpr* guard,
+                         IRCallee* cee, IRType retTy, IRExpr** args )
+{
+   MIPSCondCode cc;
+   HReg argregs[MIPS_N_REGPARMS];
+   HReg tmpregs[MIPS_N_REGPARMS];
+   Bool go_fast;
+   Int n_args, i, argreg;
+   UInt argiregs;
+   HReg src = INVALID_HREG;
+
+   /* Set default returns.  We'll update them later if needed. */
+   *stackAdjustAfterCall = 0;
+   *retloc               = mk_RetLoc_INVALID();
+
+   /* These are used for cross-checking that IR-level constraints on
+      the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+   UInt nVECRETs = 0;
+   UInt nBBPTRs  = 0;
+
+   /* MIPS O32 calling convention: up to four registers ($a0 ... $a3)
+      are allowed to be used for passing integer arguments. They correspond
+      to regs GPR4 ... GPR7. Note that the cee->regparms field is meaningless
+      on MIPS host (since we only implement one calling convention) and so we
+      always ignore it. */
+
+   /* MIPS 64 calling convention: up to four registers ($a0 ... $a7)
+      are allowed to be used for passing integer arguments. They correspond
+      to regs GPR4 ... GPR11. Note that the cee->regparms field is meaningless
+      on MIPS host (since we only implement one calling convention) and so we
+      always ignore it. */
+
+   /* The return type can be I{64,32,16,8} or V{128,256}.  In the
+      latter two cases, it is expected that |args| will contain the
+      special node IRExpr_VECRET(), in which case this routine
+      generates code to allocate space on the stack for the vector
+      return value.  Since we are not passing any scalars on the
+      stack, it is enough to preallocate the return space before
+      marshalling any arguments, in this case.
+
+      |args| may also contain IRExpr_BBPTR(), in which case the value
+      in the guest state pointer register is passed as the
+      corresponding argument. */
+
+   n_args = 0;
+   for (i = 0; args[i]; i++) {
+      IRExpr* arg = args[i];
+      if (UNLIKELY(arg->tag == Iex_VECRET)) {
+         nVECRETs++;
+      } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+         nBBPTRs++;
+      }
+      n_args++;
+   }
+
+   if (n_args > MIPS_N_REGPARMS) {
+      vpanic("doHelperCall(MIPS): cannot currently handle > 4 or 8 args");
+   }
+   if (mode64) {
+      argregs[0] = hregMIPS_GPR4(mode64);
+      argregs[1] = hregMIPS_GPR5(mode64);
+      argregs[2] = hregMIPS_GPR6(mode64);
+      argregs[3] = hregMIPS_GPR7(mode64);
+      argregs[4] = hregMIPS_GPR8(mode64);
+      argregs[5] = hregMIPS_GPR9(mode64);
+      argregs[6] = hregMIPS_GPR10(mode64);
+      argregs[7] = hregMIPS_GPR11(mode64);
+      argiregs = 0;
+      tmpregs[0] = tmpregs[1] = tmpregs[2] =
+      tmpregs[3] = tmpregs[4] = tmpregs[5] =
+      tmpregs[6] = tmpregs[7] = INVALID_HREG;
+   } else {
+      argregs[0] = hregMIPS_GPR4(mode64);
+      argregs[1] = hregMIPS_GPR5(mode64);
+      argregs[2] = hregMIPS_GPR6(mode64);
+      argregs[3] = hregMIPS_GPR7(mode64);
+      argiregs = 0;
+      tmpregs[0] = tmpregs[1] = tmpregs[2] = tmpregs[3] = INVALID_HREG;
+   }
+
+   /* First decide which scheme (slow or fast) is to be used. First assume the
+      fast scheme, and select slow if any contraindications (wow) appear. */
+
+   go_fast = True;
+
+   /* We'll need space on the stack for the return value.  Avoid
+      possible complications with nested calls by using the slow
+      scheme. */
+   if (retTy == Ity_V128 || retTy == Ity_V256)
+      go_fast = False;
+
+   if (go_fast && guard) {
+      if (guard->tag == Iex_Const && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional */
+      } else {
+         /* Not manifestly unconditional -- be conservative. */
+         go_fast = False;
+      }
+   }
+
+   if (go_fast) {
+      for (i = 0; i < n_args; i++) {
+         if (mightRequireFixedRegs(args[i])) {
+            go_fast = False;
+            break;
+         }
+      }
+   }
+
+   /* At this point the scheme to use has been established.  Generate
+      code to get the arg values into the argument rregs. */
+   if (go_fast) {
+      /* FAST SCHEME */
+      argreg = 0;
+
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+         vassert(argreg < MIPS_N_REGPARMS);
+
+         IRType  aTy = Ity_INVALID;
+         if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+            aTy = typeOfIRExpr(env->type_env, arg);
+
+         if (aTy == Ity_I32 || mode64) {
+            argiregs |= (1 << (argreg + 4));
+            addInstr(env, mk_iMOVds_RR(argregs[argreg],
+                                       iselWordExpr_R(env, arg)));
+            argreg++;
+         } else if (aTy == Ity_I64) {  /* Ity_I64 */
+            if (argreg & 1) {
+               argreg++;
+               argiregs |= (1 << (argreg + 4));
+            }
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi, &rLo, env, arg);
+            argiregs |= (1 << (argreg + 4));
+            addInstr(env, mk_iMOVds_RR( argregs[argreg++], rHi ));
+            argiregs |= (1 << (argreg + 4));
+            addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
+            argreg++;
+         } else if (arg->tag == Iex_BBPTR) {
+            vassert(0);  // ATC
+            addInstr(env, mk_iMOVds_RR(argregs[argreg],
+                                       GuestStatePointer(mode64)));
+            argreg++;
+         } else if (arg->tag == Iex_VECRET) {
+            // If this happens, it denotes ill-formed IR.
+            vassert(0);
+         }
+      }
+      /* Fast scheme only applies for unconditional calls.  Hence: */
+      cc = MIPScc_AL;
+   } else {
+      /* SLOW SCHEME; move via temporaries */
+      argreg = 0;
+
+      for (i = 0; i < n_args; i++) {
+         vassert(argreg < MIPS_N_REGPARMS);
+         IRExpr* arg = args[i];
+
+         IRType  aTy = Ity_INVALID;
+         if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+            aTy  = typeOfIRExpr(env->type_env, arg);
+
+         if (aTy == Ity_I32 || (mode64 && arg->tag != Iex_BBPTR)) {
+            tmpregs[argreg] = iselWordExpr_R(env, arg);
+            argreg++;
+         } else if (aTy == Ity_I64) {  /* Ity_I64 */
+            if (argreg & 1)
+               argreg++;
+            if (argreg + 1 >= MIPS_N_REGPARMS)
+               vassert(0);  /* out of argregs */
+            HReg raHi, raLo;
+            iselInt64Expr(&raHi, &raLo, env, arg);
+            tmpregs[argreg] = raLo;
+            argreg++;
+            tmpregs[argreg] = raHi;
+            argreg++;
+         } else if (arg->tag == Iex_BBPTR) {
+            tmpregs[argreg] = GuestStatePointer(mode64);
+            argreg++;
+         }
+         else if (arg->tag == Iex_VECRET) {
+            // If this happens, it denotes ill-formed IR
+            vassert(0);
+         }
+      }
+
+      /* Now we can compute the condition.  We can't do it earlier
+         because the argument computations could trash the condition
+         codes.  Be a bit clever to handle the common case where the
+         guard is 1:Bit. */
+      cc = MIPScc_AL;
+      if (guard) {
+         if (guard->tag == Iex_Const && guard->Iex.Const.con->tag == Ico_U1
+             && guard->Iex.Const.con->Ico.U1 == True) {
+            /* unconditional -- do nothing */
+         } else {
+            cc = iselCondCode(env, guard);
+            src = iselWordExpr_R(env, guard);
+         }
+      }
+      /* Move the args to their final destinations. */
+      for (i = 0; i < argreg; i++) {
+         if (hregIsInvalid(tmpregs[i]))  /* Skip invalid regs */
+            continue;
+         /* None of these insns, including any spill code that might
+            be generated, may alter the condition codes. */
+         argiregs |= (1 << (i + 4));
+         addInstr(env, mk_iMOVds_RR(argregs[i], tmpregs[i]));
+      }
+   }
+
+   /* Do final checks, set the return values, and generate the call
+      instruction proper. */
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+   vassert(nVECRETs == (retTy == Ity_V128 || retTy == Ity_V256) ? 1 : 0);
+   vassert(*stackAdjustAfterCall == 0);
+   vassert(is_RetLoc_INVALID(*retloc));
+   switch (retTy) {
+      case Ity_INVALID:
+         /* Function doesn't return a value. */
+         *retloc = mk_RetLoc_simple(RLPri_None);
+         break;
+      case Ity_I64:
+         *retloc = mk_RetLoc_simple(mode64 ? RLPri_Int : RLPri_2Int);
+         break;
+      case Ity_I32: case Ity_I16: case Ity_I8:
+         *retloc = mk_RetLoc_simple(RLPri_Int);
+         break;
+      case Ity_V128:
+         vassert(0); // ATC
+         *retloc = mk_RetLoc_spRel(RLPri_V128SpRel, 0);
+         *stackAdjustAfterCall = 16;
+         break;
+      case Ity_V256:
+         vassert(0); // ATC
+         *retloc = mk_RetLoc_spRel(RLPri_V256SpRel, 0);
+         *stackAdjustAfterCall = 32;
+         break;
+      default:
+         /* IR can denote other possible return types, but we don't
+            handle those here. */
+        vassert(0);
+   }
+
+   Addr64 target = mode64 ? (Addr)cee->addr :
+                            toUInt((Addr)cee->addr);
+
+   /* Finally, generate the call itself.  This needs the *retloc value
+      set in the switch above, which is why it's at the end. */
+   if (cc == MIPScc_AL)
+      addInstr(env, MIPSInstr_CallAlways(cc, target, argiregs,
+                                         *retloc));
+   else
+      addInstr(env, MIPSInstr_Call(cc, target, argiregs, src, *retloc));
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expression auxiliaries              ---*/
+/*---------------------------------------------------------*/
+
+/* --------------------- AMODEs --------------------- */
+
+/* Return an AMode which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a word-size one.
+*/
+
+static Bool uInt_fits_in_16_bits(UInt u)
+{
+   Int i = u & 0xFFFF;
+   i <<= 16;
+   i >>= 16;
+   return toBool(u == (UInt) i);
+}
+
+static Bool uLong_fits_in_16_bits ( ULong u )
+{
+   Long i = u & 0xFFFFULL;
+   i <<= 48;
+   i >>= 48;
+   return toBool(u == (ULong) i);
+}
+
+static Bool uLong_is_4_aligned ( ULong u )
+{
+   return toBool((u & 3ULL) == 0);
+}
+
+static Bool sane_AMode(ISelEnv * env, MIPSAMode * am)
+{
+   switch (am->tag) {
+      case Mam_IR:
+         return toBool(hregClass(am->Mam.IR.base) == HRcGPR(mode64) &&
+                  hregIsVirtual(am->Mam.IR.base) &&
+                  uInt_fits_in_16_bits(am->Mam.IR.index));
+      case Mam_RR:
+         return toBool(hregClass(am->Mam.RR.base) == HRcGPR(mode64) &&
+                  hregIsVirtual(am->Mam.RR.base) &&
+                  hregClass(am->Mam.RR.index) == HRcGPR(mode64) &&
+                  hregIsVirtual(am->Mam.RR.index));
+      default:
+         vpanic("sane_AMode: unknown mips amode tag");
+   }
+}
+
+static MIPSAMode *iselWordExpr_AMode(ISelEnv * env, IRExpr * e, IRType xferTy)
+{
+   MIPSAMode *am = iselWordExpr_AMode_wrk(env, e, xferTy);
+   vassert(sane_AMode(env, am));
+   return am;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static MIPSAMode *iselWordExpr_AMode_wrk(ISelEnv * env, IRExpr * e,
+                                         IRType xferTy)
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   if (env->mode64) {
+      Bool aligned4imm = toBool(xferTy == Ity_I32 || xferTy == Ity_I64);
+      vassert(ty == Ity_I64);
+
+      /* Add64(expr,i), where i == sign-extend of (i & 0xFFFF) */
+      if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64
+          && e->Iex.Binop.arg2->tag == Iex_Const
+          && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64
+          && (aligned4imm ?
+          uLong_is_4_aligned(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64) : True)
+          && uLong_fits_in_16_bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)) {
+         return MIPSAMode_IR((Int) e->Iex.Binop.arg2->Iex.Const.con->Ico.U64,
+                                   iselWordExpr_R(env, e->Iex.Binop.arg1));
+      }
+
+      /* Add64(expr,expr) */
+      if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64) {
+         HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1);
+         HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2);
+         return MIPSAMode_RR(r_idx, r_base);
+      }
+   } else {
+      vassert(ty == Ity_I32);
+
+      /* Add32(expr,i), where i == sign-extend of (i & 0xFFFF) */
+      if (e->tag == Iex_Binop
+          && e->Iex.Binop.op == Iop_Add32
+          && e->Iex.Binop.arg2->tag == Iex_Const
+          && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32
+          && uInt_fits_in_16_bits(e->Iex.Binop.arg2->Iex.Const.con-> Ico.U32)) {
+         return MIPSAMode_IR((Int) e->Iex.Binop.arg2->Iex.Const.con->Ico.U32,
+                              iselWordExpr_R(env, e->Iex.Binop.arg1));
+      }
+
+      /* Add32(expr,expr) */
+      if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add32) {
+         HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1);
+         HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2);
+
+         return MIPSAMode_RR(r_idx, r_base);
+      }
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   return MIPSAMode_IR(0, iselWordExpr_R(env, e));
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64/32/16/8 bit)        ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 64, 32, 16 and 8-bit type.
+   All results are returned in a (mode64 ? 64bit : 32bit) register.
+   For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits
+   are arbitrary, so you should mask or sign extend partial values
+   if necessary.
+*/
+static HReg iselWordExpr_R(ISelEnv * env, IRExpr * e)
+{
+   HReg r = iselWordExpr_R_wrk(env, e);
+   /* sanity checks ... */
+
+   vassert(hregClass(r) == HRcGPR(env->mode64));
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg iselWordExpr_R_wrk(ISelEnv * env, IRExpr * e)
+{
+   UInt argiregs = 0;
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ty == Ity_I1
+           || ty == Ity_F32 || (ty == Ity_I64 && mode64)
+           || (ty == Ity_I128 && mode64));
+
+   switch (e->tag) {
+      /* --------- TEMP --------- */
+      case Iex_RdTmp:
+         return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+
+      /* --------- LOAD --------- */
+      case Iex_Load: {
+         HReg r_dst = newVRegI(env);
+         MIPSAMode *am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, ty);
+
+         if (e->Iex.Load.end != Iend_LE
+             && e->Iex.Load.end != Iend_BE)
+            goto irreducible;
+
+         addInstr(env, MIPSInstr_Load(toUChar(sizeofIRType(ty)),
+                                      r_dst, am_addr, mode64));
+         return r_dst;
+      }
+
+      /* --------- BINARY OP --------- */
+      case Iex_Binop: {
+         MIPSAluOp aluOp;
+         MIPSShftOp shftOp;
+
+         /* Is it an addition or logical style op? */
+         switch (e->Iex.Binop.op) {
+            case Iop_Add8:
+            case Iop_Add16:
+            case Iop_Add32:
+               aluOp = Malu_ADD;
+               break;
+
+            case Iop_Sub8:
+            case Iop_Sub16:
+            case Iop_Sub32:
+               aluOp = Malu_SUB;
+               break;
+
+            case Iop_Sub64:
+               aluOp = Malu_DSUB;
+               break;
+
+            case Iop_And8:
+            case Iop_And16:
+            case Iop_And32:
+            case Iop_And64:
+               aluOp = Malu_AND;
+               break;
+
+            case Iop_Or8:
+            case Iop_Or16:
+            case Iop_Or32:
+            case Iop_Or64:
+               aluOp = Malu_OR;
+               break;
+
+            case Iop_Xor8:
+            case Iop_Xor16:
+            case Iop_Xor32:
+            case Iop_Xor64:
+               aluOp = Malu_XOR;
+               break;
+
+            case Iop_Add64:
+               aluOp = Malu_DADD;
+               break;
+
+            default:
+               aluOp = Malu_INVALID;
+               break;
+         }
+
+         /* For commutative ops we assume any literal
+            values are on the second operand. */
+         if (aluOp != Malu_INVALID) {
+            HReg r_dst = newVRegI(env);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            MIPSRH *ri_srcR = NULL;
+            /* get right arg into an RH, in the appropriate way */
+            switch (aluOp) {
+               case Malu_ADD:
+               case Malu_SUB:
+               case Malu_DADD:
+               case Malu_DSUB:
+                  ri_srcR = iselWordExpr_RH(env, True /*signed */ ,
+                                            e->Iex.Binop.arg2);
+                  break;
+               case Malu_AND:
+               case Malu_OR:
+               case Malu_XOR:
+                  ri_srcR = iselWordExpr_RH(env, False /*unsigned */,
+                                            e->Iex.Binop.arg2);
+                  break;
+               default:
+                  vpanic("iselWordExpr_R_wrk-aluOp-arg2");
+            }
+            addInstr(env, MIPSInstr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
+            return r_dst;
+         }
+
+         /* a shift? */
+         switch (e->Iex.Binop.op) {
+            case Iop_Shl32:
+            case Iop_Shl64:
+               shftOp = Mshft_SLL;
+               break;
+            case Iop_Shr32:
+            case Iop_Shr64:
+               shftOp = Mshft_SRL;
+               break;
+            case Iop_Sar32:
+            case Iop_Sar64:
+               shftOp = Mshft_SRA;
+               break;
+            default:
+               shftOp = Mshft_INVALID;
+               break;
+         }
+
+         /* we assume any literal values are on the second operand. */
+         if (shftOp != Mshft_INVALID) {
+            HReg r_dst = newVRegI(env);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            MIPSRH *ri_srcR;
+            if (mode64)
+               ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+            else
+               ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop.arg2);
+
+            if (ty == Ity_I8) {
+               vassert(0);
+            } else if (ty == Ity_I32) {
+               if (mode64 && (shftOp == Mshft_SRA || shftOp == Mshft_SRL)) {
+                  HReg tmp = newVRegI(env);
+                  HReg r_srcL_se = newVRegI(env);
+                  /* SRA, SRAV, SRL, SRLV: On 64-bit processors, if GPR rt does
+                     not contain a sign-extended 32-bit value (bits 63..31
+                     equal), then the result of the operation is UNPREDICTABLE.
+                     So we need to sign-extend r_srcL:
+                     DSLLV tmp, r_srcL, 32
+                     DSRAV r_srcL_se, tmp, 32
+                  */
+                  addInstr(env, MIPSInstr_Shft(Mshft_SLL, False, tmp,
+                                               r_srcL, MIPSRH_Imm(False, 32)));
+                  addInstr(env, MIPSInstr_Shft(Mshft_SRA, False, r_srcL_se,
+                                               tmp, MIPSRH_Imm(False, 32)));
+                  /* And finally do the shift. */
+                  addInstr(env, MIPSInstr_Shft(shftOp, True /*32bit shift */,
+                                               r_dst, r_srcL_se, ri_srcR));
+               } else
+                  addInstr(env, MIPSInstr_Shft(shftOp, True /*32bit shift */,
+                                               r_dst, r_srcL, ri_srcR));
+            } else if (ty == Ity_I64) {
+               vassert(mode64);
+               addInstr(env, MIPSInstr_Shft(shftOp, False/*64bit shift */,
+                                            r_dst, r_srcL, ri_srcR));
+            } else
+               goto irreducible;
+            return r_dst;
+         }
+
+         /* Cmp*32*(x,y) ? */
+         if (e->Iex.Binop.op == Iop_CmpEQ32
+             || e->Iex.Binop.op == Iop_CmpEQ16
+             || e->Iex.Binop.op == Iop_CmpNE32
+             || e->Iex.Binop.op == Iop_CmpNE64
+             || e->Iex.Binop.op == Iop_CmpLT32S
+             || e->Iex.Binop.op == Iop_CmpLT32U
+             || e->Iex.Binop.op == Iop_CmpLT64U
+             || e->Iex.Binop.op == Iop_CmpLE32U
+             || e->Iex.Binop.op == Iop_CmpLE32S
+             || e->Iex.Binop.op == Iop_CmpLE64S
+             || e->Iex.Binop.op == Iop_CmpLT64S
+             || e->Iex.Binop.op == Iop_CmpEQ64
+             || e->Iex.Binop.op == Iop_CasCmpEQ32
+             || e->Iex.Binop.op == Iop_CasCmpEQ64) {
+
+            Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S
+                         || e->Iex.Binop.op == Iop_CmpLE32S
+                         || e->Iex.Binop.op == Iop_CmpLT64S
+                         || e->Iex.Binop.op == Iop_CmpLE64S);
+            Bool size32;
+            HReg dst = newVRegI(env);
+            HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg r2 = iselWordExpr_R(env, e->Iex.Binop.arg2);
+
+            MIPSCondCode cc;
+
+            switch (e->Iex.Binop.op) {
+               case Iop_CmpEQ32:
+               case Iop_CasCmpEQ32:
+                  cc = MIPScc_EQ;
+                  size32 = True;
+                  break;
+               case Iop_CmpEQ16:
+                  cc = MIPScc_EQ;
+                  size32 = True;
+                  break;
+               case Iop_CmpNE32:
+                  cc = MIPScc_NE;
+                  size32 = True;
+                  break;
+               case Iop_CmpNE64:
+                  cc = MIPScc_NE;
+                  size32 = True;
+                  break;
+               case Iop_CmpLT32S:
+                  cc = MIPScc_LT;
+                  size32 = True;
+                  break;
+               case Iop_CmpLT32U:
+                  cc = MIPScc_LO;
+                  size32 = True;
+                  break;
+               case Iop_CmpLT64U:
+                  cc = MIPScc_LO;
+                  size32 = False;
+                  break;
+               case Iop_CmpLE32U:
+                  cc = MIPScc_LE;
+                  size32 = True;
+                  break;
+               case Iop_CmpLE32S:
+                  cc = MIPScc_LE;
+                  size32 = True;
+                  break;
+               case Iop_CmpLE64S:
+                  cc = MIPScc_LE;
+                  size32 = False;
+                  break;
+               case Iop_CmpLT64S:
+                  cc = MIPScc_LT;
+                  size32 = False;
+                  break;
+               case Iop_CmpEQ64:
+               case Iop_CasCmpEQ64:
+                  cc = MIPScc_EQ;
+                  size32 = False;
+                  break;
+               default:
+                  vpanic("iselCondCode(mips): CmpXX32 or CmpXX64");
+            }
+
+            addInstr(env, MIPSInstr_Cmp(syned, size32, dst, r1, r2, cc));
+            return dst;
+         }
+
+         if (e->Iex.Binop.op == Iop_Max32U) {
+            HReg tmp = newVRegI(env);
+            HReg r_dst = newVRegI(env);
+            HReg argL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg argR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            MIPSRH *argRH = iselWordExpr_RH(env, False /*signed */ ,
+                                           e->Iex.Binop.arg2);
+            /* max (v0, s0)
+               ------------
+               slt v1, v0, s0
+               movn v0, s0, v1 */
+
+            addInstr(env, MIPSInstr_Alu(Malu_SLT, tmp, argL, argRH));
+            addInstr(env, mk_iMOVds_RR(r_dst, argL));
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dst, argR, tmp));
+            return r_dst;
+         }
+
+         if (e->Iex.Binop.op == Iop_Mul32 || e->Iex.Binop.op == Iop_Mul64) {
+            Bool sz32 = (e->Iex.Binop.op == Iop_Mul32);
+            HReg r_dst = newVRegI(env);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, MIPSInstr_Mul(False/*Unsigned or Signed */ ,
+                                       False /*widen */ ,
+                                       sz32 /*32bit or 64bit */,
+                                       r_dst, r_srcL, r_srcR));
+            return r_dst;
+         }
+
+         if (e->Iex.Binop.op == Iop_MullU32 || e->Iex.Binop.op == Iop_MullS32) {
+            HReg r_dst = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg tLo = newVRegI(env);
+            HReg tLo_1 = newVRegI(env);
+            HReg tHi_1 = newVRegI(env);
+            HReg mask = newVRegI(env);
+
+            Bool syned = toBool(e->Iex.Binop.op == Iop_MullS32);
+            Bool size = toBool(e->Iex.Binop.op == Iop_MullS32)
+                        || toBool(e->Iex.Binop.op == Iop_MullU32);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, MIPSInstr_Mul(syned /*Unsigned or Signed */ ,
+                                        True /*widen */ ,
+                                        size /*32bit or 64bit mul */ ,
+                                        r_dst, r_srcL, r_srcR));
+
+            addInstr(env, MIPSInstr_Mfhi(tHi));
+            addInstr(env, MIPSInstr_Mflo(tLo));
+
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, False, tHi_1,
+                          tHi, MIPSRH_Imm(False, 32)));
+
+            addInstr(env, MIPSInstr_LI(mask, 0xffffffff));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, tLo_1, tLo,
+                          MIPSRH_Reg(mask)));
+
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, tHi_1,
+                          MIPSRH_Reg(tLo_1)));
+
+            return r_dst;
+         }
+
+         if (e->Iex.Binop.op == Iop_CmpF64) {
+            HReg r_srcL, r_srcR;
+            if (mode64) {
+               r_srcL = iselFltExpr(env, e->Iex.Binop.arg1);
+               r_srcR = iselFltExpr(env, e->Iex.Binop.arg2);
+            } else {
+               r_srcL = iselDblExpr(env, e->Iex.Binop.arg1);
+               r_srcR = iselDblExpr(env, e->Iex.Binop.arg2);
+            }
+            HReg tmp = newVRegI(env);
+            HReg r_ccMIPS = newVRegI(env);
+            HReg r_ccIR = newVRegI(env);
+            HReg r_ccIR_b0 = newVRegI(env);
+            HReg r_ccIR_b2 = newVRegI(env);
+            HReg r_ccIR_b6 = newVRegI(env);
+
+            /* Create in dst, the IRCmpF64Result encoded result. */
+            /* chech for EQ */
+            addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_EQ, tmp, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, r_ccMIPS, tmp,
+                                         MIPSRH_Imm(False, 1)));
+            /* chech for UN */
+            addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_UN, tmp, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS,
+                                        MIPSRH_Reg(tmp)));
+            /* chech for LT */
+            addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_LT, tmp, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp,
+                                         tmp, MIPSRH_Imm(False, 2)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS,
+                                        MIPSRH_Reg(tmp)));
+            /* chech for GT */
+            addInstr(env, MIPSInstr_FpCompare(Mfp_CMP_NGT,
+                                              tmp, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp, tmp,
+                                         MIPSRH_Imm(False, 3)));
+
+            addInstr(env, MIPSInstr_Alu(Malu_NOR, tmp, tmp, MIPSRH_Reg(tmp)));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, tmp,
+                                        MIPSRH_Imm(False, 8)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccMIPS, r_ccMIPS,
+                                        MIPSRH_Reg(tmp)));
+            /* Map compare result from MIPS to IR,
+               conforming to CmpF64 definition.
+               FP cmp result | MIPS | IR
+               --------------------------
+               UN            | 0x1 | 0x45
+               EQ            | 0x2 | 0x40
+               GT            | 0x4 | 0x00
+               LT            | 0x8 | 0x01
+             */
+
+            /* r_ccIR_b0 = r_ccMIPS[0] | r_ccMIPS[3] */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True, r_ccIR_b0, r_ccMIPS,
+                          MIPSRH_Imm(False, 0x3)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR_b0, r_ccMIPS,
+                          MIPSRH_Reg(r_ccIR_b0)));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b0, r_ccIR_b0,
+                          MIPSRH_Imm(False, 0x1)));
+
+            /* r_ccIR_b2 = r_ccMIPS[0] */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, r_ccIR_b2, r_ccMIPS,
+                          MIPSRH_Imm(False, 0x2)));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b2, r_ccIR_b2,
+                          MIPSRH_Imm(False, 0x4)));
+
+            /* r_ccIR_b6 = r_ccMIPS[0] | r_ccMIPS[1] */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True, r_ccIR_b6,
+                          r_ccMIPS, MIPSRH_Imm(False, 0x1)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR_b6, r_ccMIPS,
+                          MIPSRH_Reg(r_ccIR_b6)));
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, r_ccIR_b6, r_ccIR_b6,
+                          MIPSRH_Imm(False, 0x6)));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, r_ccIR_b6, r_ccIR_b6,
+                          MIPSRH_Imm(False, 0x40)));
+
+            /* r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6 */
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR, r_ccIR_b0,
+                          MIPSRH_Reg(r_ccIR_b2)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_ccIR, r_ccIR,
+                          MIPSRH_Reg(r_ccIR_b6)));
+            return r_ccIR;
+         }
+
+         if (e->Iex.Binop.op == Iop_DivModU64to32 ||
+             e->Iex.Binop.op == Iop_DivModS64to32) {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg mask = newVRegI(env);
+            HReg tLo_1 = newVRegI(env);
+            HReg tHi_1 = newVRegI(env);
+            HReg r_dst = newVRegI(env);
+            Bool syned = toBool(e->Iex.Binop.op == Iop_DivModS64to32);
+
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+
+            addInstr(env, MIPSInstr_Div(syned, True, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Mfhi(tHi));
+            addInstr(env, MIPSInstr_Mflo(tLo));
+
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, False, tHi_1, tHi,
+                                         MIPSRH_Imm(False, 32)));
+
+            addInstr(env, MIPSInstr_LI(mask, 0xffffffff));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, tLo_1, tLo,
+                          MIPSRH_Reg(mask)));
+
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, tHi_1,
+                          MIPSRH_Reg(tLo_1)));
+
+            return r_dst;
+         }
+
+         if (e->Iex.Binop.op == Iop_8HLto16
+             || e->Iex.Binop.op == Iop_16HLto32) {
+            HReg tHi   = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg tLo   = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            HReg tLo_1 = newVRegI(env);
+            HReg tHi_1 = newVRegI(env);
+            HReg r_dst = newVRegI(env);
+            UInt shift = 0;
+            UInt mask  = 0;
+            switch (e->Iex.Binop.op) {
+               case Iop_8HLto16:
+                  shift = 8;
+                  mask  = 0xff;
+                  break;
+               case Iop_16HLto32:
+                  shift = 16;
+                  mask  = 0xffff;
+                  break;
+               default:
+                  break;
+            }
+
+            /* sll tHi_1, tHi,   shift
+               and tLo_1, tLo,   mask
+               or  r_dst, tHi_1, tLo_1 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tHi_1, tHi,
+                                         MIPSRH_Imm(False, shift)));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, tLo_1, tLo,
+                          MIPSRH_Imm(False, mask)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, tHi_1,
+                          MIPSRH_Reg(tLo_1)));
+            return r_dst;
+         }
+
+         if (e->Iex.Binop.op == Iop_32HLto64) {
+            vassert(mode64);
+            HReg tHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg tLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            HReg tLo_1 = newVRegI(env);
+            HReg tHi_1 = newVRegI(env);
+            HReg r_dst = newVRegI(env);
+            HReg mask = newVRegI(env);
+
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, False, tHi_1, tHi,
+                                         MIPSRH_Imm(False, 32)));
+
+            addInstr(env, MIPSInstr_LI(mask, 0xffffffff));
+            addInstr(env, MIPSInstr_Alu(Malu_AND, tLo_1, tLo,
+                          MIPSRH_Reg(mask)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, tHi_1,
+                          MIPSRH_Reg(tLo_1)));
+
+            return r_dst;
+         }
+
+         if (e->Iex.Binop.op == Iop_F32toI64S) {
+            vassert(mode64);
+            HReg valS = newVRegI(env);
+            HReg tmpF = newVRegF(env);
+            HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+
+            /* CVTLS tmpF, valF */
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLS, tmpF, valF));
+            set_MIPS_rounding_default(env);
+
+            /* Doubleword Move from Floating Point
+               dmfc1 valS, tmpF */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmfc1, valS, tmpF));
+
+            return valS;
+         }
+
+         if (e->Iex.Binop.op == Iop_F64toI32S) {
+            HReg valD;
+            if (mode64)
+               valD = iselFltExpr(env, e->Iex.Binop.arg2);
+            else
+               valD = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg valS = newVRegF(env);
+            HReg r_dst = newVRegI(env);
+
+            /* CVTWD valS, valD */
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWD, valS, valD));
+            set_MIPS_rounding_default(env);
+
+            /* Move Word From Floating Point
+               mfc1 r_dst, valS */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, valS));
+
+            return r_dst;
+         }
+
+         /* -------- DSP ASE -------- */
+         /* All used cases involving host-side helper calls. */
+         void* fn = NULL;
+         switch (e->Iex.Binop.op) {
+            case Iop_HAdd8Ux4:
+               fn = &h_generic_calc_HAdd8Ux4; break;
+            case Iop_HSub8Ux4:
+               fn = &h_generic_calc_HSub8Ux4; break;
+            case Iop_HSub16Sx2:
+               fn = &h_generic_calc_HSub16Sx2; break;
+            case Iop_QSub8Ux4:
+               fn = &h_generic_calc_QSub8Ux4; break;
+            default:
+                  break;
+         }
+
+         /* What's the retloc? */
+         RetLoc rloc = mk_RetLoc_INVALID();
+         if (ty == Ity_I32) {
+            rloc = mk_RetLoc_simple(RLPri_Int);
+         }
+         else if (ty == Ity_I64) {
+            rloc = mode64 ? mk_RetLoc_simple(RLPri_Int) :
+                            mk_RetLoc_simple(RLPri_2Int);
+         }
+         else {
+            goto irreducible;
+         }
+
+         if (fn) {
+            HReg regL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg regR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            HReg res  = newVRegI(env);
+            addInstr(env, mk_iMOVds_RR(hregMIPS_GPR4(env->mode64), regL));
+            addInstr(env, mk_iMOVds_RR(hregMIPS_GPR5(env->mode64), regR));
+            argiregs |= (1 << 4);
+            argiregs |= (1 << 5);
+            addInstr(env, MIPSInstr_CallAlways( MIPScc_AL,
+                                                (Addr)fn,
+                                                argiregs, rloc));
+            addInstr(env, mk_iMOVds_RR(res, hregMIPS_GPR2(env->mode64)));
+            return res;
+         }
+      break;
+   }
+
+   /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+      IROp op_unop = e->Iex.Unop.op;
+
+      switch (op_unop) {
+         case Iop_1Sto8:
+         case Iop_1Sto16:
+         case Iop_1Sto32:
+         case Iop_8Sto16:
+         case Iop_8Sto32:
+         case Iop_16Sto32:
+         case Iop_16Sto64:
+         case Iop_8Sto64:
+         case Iop_1Sto64: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            Bool sz32;
+            UShort amt;
+            switch (op_unop) {
+               case Iop_1Sto8:
+                  amt = 31;
+                  sz32 = True;
+                  break;
+               case Iop_1Sto16:
+                  amt = 31;
+                  sz32 = True;
+                  break;
+               case Iop_1Sto32:
+                  amt = 31;
+                  sz32 = True;
+                  break;
+               case Iop_16Sto32:
+                  amt = 16;
+                  sz32 = True;
+                  break;
+               case Iop_16Sto64:
+                  amt = 48;
+                  sz32 = False;
+                  break;
+               case Iop_8Sto16:
+                  amt = 24;
+                  sz32 = True;
+                  break;
+               case Iop_8Sto32:
+                  amt = 24;
+                  sz32 = True;
+                  break;
+               case Iop_8Sto64:
+                  amt = 56;
+                  sz32 = False;
+                  break;
+               case Iop_1Sto64:
+                  amt = 63;
+                  sz32 = False;
+                  break;
+               default:
+                  vassert(0);
+            }
+
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, sz32, r_dst, r_src,
+                                         MIPSRH_Imm(False, amt)));
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, sz32, r_dst, r_dst,
+                                         MIPSRH_Imm(False, amt)));
+            return r_dst;
+         }
+
+         /* not(x) = nor(x,x) */
+         case Iop_Not1: {
+            HReg r_dst = newVRegI(env);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg);
+            MIPSRH *r_srcR = MIPSRH_Reg(r_srcL);
+
+            addInstr(env, MIPSInstr_LI(r_dst, 0x1));
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, r_dst, r_dst, r_srcR));
+            return r_dst;
+         }
+
+         case Iop_Not8:
+         case Iop_Not16:
+         case Iop_Not32:
+         case Iop_Not64: {
+            HReg r_dst = newVRegI(env);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg);
+            MIPSRH *r_srcR = MIPSRH_Reg(r_srcL);
+
+            addInstr(env, MIPSInstr_Alu(Malu_NOR, r_dst, r_srcL, r_srcR));
+            return r_dst;
+         }
+
+         case Iop_ReinterpF32asI32: {
+            HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg r_dst = newVRegI(env);
+
+            /* Move Word From Floating Point
+               mfc1 r_dst, fr_src */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, fr_src));
+
+            return r_dst;
+         }
+
+         case Iop_ReinterpF64asI64: {
+            vassert(mode64);
+            HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg r_dst = newVRegI(env);
+
+            /* Doubleword Move from Floating Point
+               mfc1 r_dst, fr_src */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmfc1, r_dst, fr_src));
+
+            return r_dst;
+         }
+
+         case Iop_F64toI32S: {
+            HReg valD;
+            if (mode64)
+               valD = iselFltExpr(env, e->Iex.Binop.arg2);
+            else
+               valD = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg valS = newVRegF(env);
+            HReg r_dst = newVRegI(env);
+
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWD, valS, valD));
+            set_MIPS_rounding_default(env);
+
+            /* Move Word From Floating Point
+               mfc1 r_dst, valS */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mfc1, r_dst, valS));
+
+            return r_dst;
+         }
+
+         case Iop_16to8:
+         case Iop_32to1:
+         case Iop_32to8:
+         case Iop_32to16:
+            return iselWordExpr_R(env, e->Iex.Unop.arg);
+
+         case Iop_32HIto16: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         r_dst, r_src, MIPSRH_Imm(False, 16)));
+            return r_dst;
+         }
+
+         case Iop_64to1:
+         case Iop_64to8: {
+            vassert(mode64);
+            HReg r_src, r_dst;
+            UShort mask = (op_unop == Iop_64to1) ? 0x1 : 0xFF;
+            r_dst = newVRegI(env);
+            r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, MIPSInstr_Alu(Malu_AND, r_dst, r_src,
+                          MIPSRH_Imm(False, mask)));
+            return r_dst;
+         }
+
+         case Iop_16HIto8: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         r_dst, r_src, MIPSRH_Imm(False, 8)));
+            return r_dst;
+         }
+
+         case Iop_1Uto8:
+         case Iop_1Uto32:
+         case Iop_1Uto64:
+         case Iop_8Uto16:
+         case Iop_8Uto32:
+         case Iop_8Uto64:
+         case Iop_16Uto32:
+         case Iop_16Uto64: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            UShort mask = 0;
+            switch (op_unop) {
+               case Iop_1Uto64:
+                  vassert(mode64);
+               case Iop_1Uto8:
+               case Iop_1Uto32:
+                  mask = toUShort(0x1);
+                  break;
+               case Iop_8Uto64:
+                  vassert(mode64);
+               case Iop_8Uto16:
+               case Iop_8Uto32:
+                  mask = toUShort(0xFF);
+                  break;
+               case Iop_16Uto64:
+                  vassert(mode64);
+               case Iop_16Uto32:
+                  mask = toUShort(0xFFFF);
+                  break;
+               default:
+                  vassert(0);
+                  break;
+            }
+            addInstr(env, MIPSInstr_Alu(Malu_AND, r_dst, r_src,
+                          MIPSRH_Imm(False, mask)));
+            return r_dst;
+         }
+
+         case Iop_32Uto64: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            vassert(mode64);
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, False /*!32bit shift */,
+                                         r_dst, r_src, MIPSRH_Imm(False, 32)));
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, False /*!32bit shift */,
+                                         r_dst, r_dst, MIPSRH_Imm(False, 32)));
+            return r_dst;
+         }
+
+         case Iop_64HIto32: {
+            if (env->mode64) {
+               HReg r_dst = newVRegI(env);
+               HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+               addInstr(env, MIPSInstr_Shft(Mshft_SRA, False /*64bit shift */,
+                       r_dst, r_src, MIPSRH_Imm(True, 32)));
+               return r_dst;
+            } else {
+               HReg rHi, rLo;
+               iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
+               return rHi;
+            }
+         }
+
+         case Iop_64to32: {
+            if (env->mode64) {
+               HReg r_dst = newVRegI(env);
+               r_dst = iselWordExpr_R(env, e->Iex.Unop.arg);
+               return r_dst;
+            } else {
+               HReg rHi, rLo;
+               iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
+               return rLo;
+            }
+         }
+
+         case Iop_64to16: {
+            vassert(env->mode64);
+            HReg r_dst = newVRegI(env);
+            r_dst = iselWordExpr_R(env, e->Iex.Unop.arg);
+            return r_dst;
+         }
+
+         case Iop_32Sto64: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            vassert(mode64);
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /*!32bit shift */,
+                                         r_dst, r_src, MIPSRH_Imm(True, 0)));
+            return r_dst;
+         }
+
+         case Iop_CmpNEZ8:
+         case Iop_CmpNEZ16: {
+            HReg r_dst = newVRegI(env);
+            HReg tmp = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            UShort mask = (op_unop == Iop_CmpNEZ8) ? 0xFF : 0xFFFF;
+
+            addInstr(env, MIPSInstr_Alu(Malu_AND, tmp, r_src,
+                                        MIPSRH_Imm(False, mask)));
+            addInstr(env, MIPSInstr_Cmp(False, True, r_dst, tmp,
+                                        hregMIPS_GPR0(mode64), MIPScc_NE));
+            return r_dst;
+         }
+
+         case Iop_CmpNEZ32: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+            addInstr(env, MIPSInstr_Cmp(False, True, r_dst, r_src,
+                                        hregMIPS_GPR0(mode64), MIPScc_NE));
+            return r_dst;
+         }
+
+         case Iop_CmpwNEZ32: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, r_dst, hregMIPS_GPR0(mode64),
+                          MIPSRH_Reg(r_src)));
+
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, r_dst,
+                                        MIPSRH_Reg(r_src)));
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, r_dst, r_dst,
+                                         MIPSRH_Imm(False, 31)));
+            return r_dst;
+         }
+
+         case Iop_Left8:
+         case Iop_Left16:
+         case Iop_Left32:
+         case Iop_Left64: {
+            if (op_unop == Iop_Left64 && !mode64)
+               goto irreducible;
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            MIPSAluOp op = (op_unop == Iop_Left64) ? Malu_DSUB : Malu_SUB;
+            addInstr(env, MIPSInstr_Alu(op, r_dst,
+                                        hregMIPS_GPR0(mode64),
+                                        MIPSRH_Reg(r_src)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, r_dst, r_dst,
+                          MIPSRH_Reg(r_src)));
+            return r_dst;
+         }
+
+         case Iop_Clz64:
+            vassert(mode64);
+         case Iop_Clz32: {
+            HReg r_dst = newVRegI(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            MIPSUnaryOp op = (op_unop == Iop_Clz64) ? Mun_DCLZ : Mun_CLZ;
+            addInstr(env, MIPSInstr_Unary(op, r_dst, r_src));
+            return r_dst;
+         }
+
+         case Iop_CmpNEZ64: {
+            HReg hi, lo;
+            HReg r_dst = newVRegI(env);
+            HReg r_src;
+            if (env->mode64) {
+               r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            } else {
+               r_src = newVRegI(env);
+               iselInt64Expr(&hi, &lo, env, e->Iex.Unop.arg);
+               addInstr(env, MIPSInstr_Alu(Malu_OR, r_src, lo, MIPSRH_Reg(hi)));
+            }
+            addInstr(env, MIPSInstr_Cmp(False, !(env->mode64), r_dst, r_src,
+                                        hregMIPS_GPR0(mode64), MIPScc_NE));
+            return r_dst;
+         }
+
+         case Iop_CmpwNEZ64: {
+            HReg tmp1;
+            HReg tmp2 = newVRegI(env);
+            vassert(env->mode64);
+            tmp1 = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+            addInstr(env, MIPSInstr_Alu(Malu_DSUB, tmp2, hregMIPS_GPR0(mode64),
+                          MIPSRH_Reg(tmp1)));
+
+            addInstr(env, MIPSInstr_Alu(Malu_OR, tmp2, tmp2, MIPSRH_Reg(tmp1)));
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, False, tmp2, tmp2,
+                                         MIPSRH_Imm (False, 63)));
+            return tmp2;
+         }
+
+         case Iop_128HIto64: {
+            vassert(mode64);
+            HReg rHi, rLo;
+            iselInt128Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
+            return rHi;  /* and abandon rLo .. poor wee thing :-) */
+         }
+
+         case Iop_128to64: {
+            vassert(mode64);
+            HReg rHi, rLo;
+            iselInt128Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
+            return rLo;  /* and abandon rLo .. poor wee thing :-) */
+         }
+
+         default:
+            break;
+      }
+
+      /* -------- DSP ASE -------- */
+      /* All Unop cases involving host-side helper calls. */
+      void* fn = NULL;
+      switch (e->Iex.Unop.op) {
+         case Iop_CmpNEZ16x2:
+            fn = &h_generic_calc_CmpNEZ16x2; break;
+         case Iop_CmpNEZ8x4:
+            fn = &h_generic_calc_CmpNEZ8x4; break;
+         default:
+            break;
+      }
+
+      RetLoc rloc = mk_RetLoc_INVALID();
+      if (ty == Ity_I32) {
+         rloc = mk_RetLoc_simple(RLPri_Int);
+      }
+      else if (ty == Ity_I64) {
+         rloc = mode64 ? mk_RetLoc_simple(RLPri_Int) :
+                         mk_RetLoc_simple(RLPri_2Int);
+      }
+      else {
+         goto irreducible;
+      }
+
+      if (fn) {
+         HReg regL = iselWordExpr_R(env, e->Iex.Unop.arg);
+         HReg res  = newVRegI(env);
+         addInstr(env, mk_iMOVds_RR(hregMIPS_GPR4(env->mode64), regL));
+         argiregs |= (1 << 4);
+         addInstr(env, MIPSInstr_CallAlways( MIPScc_AL,
+                                             (Addr)fn,
+                                             argiregs, rloc));
+         addInstr(env, mk_iMOVds_RR(res, hregMIPS_GPR2(env->mode64)));
+         return res;
+      }
+
+      break;
+   }
+
+   /* --------- GET --------- */
+   case Iex_Get: {
+      if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32
+          || ((ty == Ity_I64) && mode64)) {
+         HReg r_dst = newVRegI(env);
+
+         MIPSAMode *am_addr = MIPSAMode_IR(e->Iex.Get.offset,
+                                           GuestStatePointer(mode64));
+         addInstr(env, MIPSInstr_Load(toUChar(sizeofIRType(ty)), r_dst, am_addr,
+                                      mode64));
+         return r_dst;
+      }
+      break;
+   }
+
+   /* --------- ITE --------- */
+   case Iex_ITE: {
+      if ((ty == Ity_I8 || ty == Ity_I16 ||
+           ty == Ity_I32 || ((ty == Ity_I64))) &&
+           typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) {
+         HReg r_dst  = iselWordExpr_R(env, e->Iex.ITE.iffalse);
+         HReg r1     = iselWordExpr_R(env, e->Iex.ITE.iftrue);
+         HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
+         /*
+          * r_dst = r0
+          * movn r_dst, r1, r_cond
+          */
+         addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, r_dst, r1, r_cond));
+         return r_dst;
+      }
+      break;
+   }
+
+   /* --------- LITERAL --------- */
+   /* 32/16/8-bit literals */
+   case Iex_Const: {
+      Long l;
+      HReg r_dst = newVRegI(env);
+      IRConst *con = e->Iex.Const.con;
+      switch (con->tag) {
+         case Ico_U64:
+            if (!mode64)
+               goto irreducible;
+            l = (Long) con->Ico.U64;
+            break;
+         case Ico_U32:
+            l = (Long) (Int) con->Ico.U32;
+            break;
+         case Ico_U16:
+            l = (Long) (Int) (Short) con->Ico.U16;
+            break;
+         case Ico_U8:
+            l = (Long) (Int) (Char) con->Ico.U8;
+            break;
+         default:
+            vpanic("iselIntExpr_R.const(mips)");
+      }
+      addInstr(env, MIPSInstr_LI(r_dst, (ULong) l));
+      return r_dst;
+   }
+
+   /* --------- CCALL --------- */
+   case Iex_CCall: {
+      HReg r_dst = newVRegI(env);
+      vassert(ty == e->Iex.CCall.retty);
+
+      /* be very restrictive for now.  Only 32/64-bit ints allowed for
+         args, and 64 and 32 bits for return type.  Don't forget to change
+         the RetLoc if more return types are allowed in future. */
+      if (e->Iex.CCall.retty != Ity_I64 && e->Iex.CCall.retty != Ity_I32)
+         goto irreducible;
+
+      /* Marshal args, do the call, clear stack. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall(&addToSp, &rloc, env, NULL/*guard*/, e->Iex.CCall.cee,
+                   e->Iex.CCall.retty, e->Iex.CCall.args );
+
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_Int);
+      vassert(addToSp == 0);
+      addInstr(env, mk_iMOVds_RR(r_dst, hregMIPS_GPR2(mode64)));
+      return r_dst;
+   }
+
+   default:
+      break;
+   }  /* end switch(e->tag) */
+
+   /* We get here if no pattern matched. */
+   irreducible:
+      vex_printf("--------------->\n");
+      if (e->tag == Iex_RdTmp)
+         vex_printf("Iex_RdTmp \n");
+      ppIRExpr(e);
+
+      vpanic("iselWordExpr_R(mips): cannot reduce tree");
+}
+
+/* --------------------- RH --------------------- */
+
+/* Compute an I8/I16/I32 (and I64, in 64-bit mode) into a RH
+   (reg-or-halfword-immediate).  It's important to specify whether the
+   immediate is to be regarded as signed or not.  If yes, this will
+   never return -32768 as an immediate; this guaranteed that all
+   signed immediates that are return can have their sign inverted if
+   need be. */
+
+static MIPSRH *iselWordExpr_RH(ISelEnv * env, Bool syned, IRExpr * e)
+{
+   MIPSRH *ri = iselWordExpr_RH_wrk(env, syned, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case Mrh_Imm:
+         vassert(ri->Mrh.Imm.syned == syned);
+         if (syned)
+            vassert(ri->Mrh.Imm.imm16 != 0x8000);
+         return ri;
+      case Mrh_Reg:
+         vassert(hregClass(ri->Mrh.Reg.reg) == HRcGPR(env->mode64));
+         vassert(hregIsVirtual(ri->Mrh.Reg.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RH: unknown mips RH tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static MIPSRH *iselWordExpr_RH_wrk(ISelEnv * env, Bool syned, IRExpr * e)
+{
+   ULong u;
+   Long l;
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 ||
+          ((ty == Ity_I64) && env->mode64));
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      IRConst *con = e->Iex.Const.con;
+      /* What value are we aiming to generate? */
+      switch (con->tag) {
+         /* Note: Not sign-extending - we carry 'syned' around */
+         case Ico_U64:
+            vassert(env->mode64);
+            u = con->Ico.U64;
+            break;
+         case Ico_U32:
+            u = 0xFFFFFFFF & con->Ico.U32;
+            break;
+         case Ico_U16:
+            u = 0x0000FFFF & con->Ico.U16;
+            break;
+         case Ico_U8:
+            u = 0x000000FF & con->Ico.U8;
+            break;
+         default:
+            vpanic("iselIntExpr_RH.Iex_Const(mips)");
+      }
+      l = (Long) u;
+      /* Now figure out if it's representable. */
+      if (!syned && u <= 65535) {
+         return MIPSRH_Imm(False /*unsigned */ , toUShort(u & 0xFFFF));
+      }
+      if (syned && l >= -32767 && l <= 32767) {
+         return MIPSRH_Imm(True /*signed */ , toUShort(u & 0xFFFF));
+      }
+      /* no luck; use the Slow Way. */
+   }
+   /* default case: calculate into a register and return that */
+   return MIPSRH_Reg(iselWordExpr_R(env, e));
+}
+
+/* --------------------- RH5u --------------------- */
+
+/* Compute an I8 into a reg-or-5-bit-unsigned-immediate, the latter
+   being an immediate in the range 1 .. 31 inclusive.  Used for doing
+   shift amounts. */
+
+static MIPSRH *iselWordExpr_RH5u(ISelEnv * env, IRExpr * e)
+{
+   MIPSRH *ri;
+   ri = iselWordExpr_RH5u_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case Mrh_Imm:
+         vassert(ri->Mrh.Imm.imm16 >= 1 && ri->Mrh.Imm.imm16 <= 31);
+         vassert(!ri->Mrh.Imm.syned);
+         return ri;
+      case Mrh_Reg:
+         vassert(hregClass(ri->Mrh.Reg.reg) == HRcInt32);
+         vassert(hregIsVirtual(ri->Mrh.Reg.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RH5u: unknown mips RH tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static MIPSRH *iselWordExpr_RH5u_wrk(ISelEnv * env, IRExpr * e)
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   vassert(ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const
+       && e->Iex.Const.con->tag == Ico_U8
+       && e->Iex.Const.con->Ico.U8 >= 1 && e->Iex.Const.con->Ico.U8 <= 31) {
+      return MIPSRH_Imm(False /*unsigned */ , e->Iex.Const.con->Ico.U8);
+   }
+
+   /* default case: calculate into a register and return that */
+   return MIPSRH_Reg(iselWordExpr_R(env, e));
+}
+
+/* --------------------- RH6u --------------------- */
+
+/* Only used in 64-bit mode. */
+static MIPSRH *iselWordExpr_RH6u ( ISelEnv * env, IRExpr * e )
+{
+   MIPSRH *ri;
+   ri = iselWordExpr_RH6u_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+   case Mrh_Imm:
+      vassert(ri->Mrh.Imm.imm16 >= 1 && ri->Mrh.Imm.imm16 <= 63);
+      vassert(!ri->Mrh.Imm.syned);
+      return ri;
+   case Mrh_Reg:
+      vassert(hregClass(ri->Mrh.Reg.reg) == HRcGPR(env->mode64));
+      vassert(hregIsVirtual(ri->Mrh.Reg.reg));
+      return ri;
+   default:
+      vpanic("iselIntExpr_RH6u: unknown mips64 RI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static MIPSRH *iselWordExpr_RH6u_wrk ( ISelEnv * env, IRExpr * e )
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   vassert(ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const
+       && e->Iex.Const.con->tag == Ico_U8
+       && e->Iex.Const.con->Ico.U8 >= 1 && e->Iex.Const.con->Ico.U8 <= 63)
+   {
+      return MIPSRH_Imm(False /*unsigned */ ,
+              e->Iex.Const.con->Ico.U8);
+   }
+
+   /* default case: calculate into a register and return that */
+   return MIPSRH_Reg(iselWordExpr_R(env, e));
+}
+
+/* --------------------- CONDCODE --------------------- */
+
+/* Generate code to evaluated a bit-typed expression, returning the
+   condition code which would correspond when the expression would
+   notionally have returned 1. */
+
+static MIPSCondCode iselCondCode(ISelEnv * env, IRExpr * e)
+{
+   MIPSCondCode cc = iselCondCode_wrk(env,e);
+   vassert(cc != MIPScc_NV);
+   return cc;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static MIPSCondCode iselCondCode_wrk(ISelEnv * env, IRExpr * e)
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env, e) == Ity_I1);
+   /* Cmp*32*(x,y) ? */
+   if (e->Iex.Binop.op == Iop_CmpEQ32
+       || e->Iex.Binop.op == Iop_CmpNE32
+       || e->Iex.Binop.op == Iop_CmpNE64
+       || e->Iex.Binop.op == Iop_CmpLT32S
+       || e->Iex.Binop.op == Iop_CmpLT32U
+       || e->Iex.Binop.op == Iop_CmpLT64U
+       || e->Iex.Binop.op == Iop_CmpLE32S
+       || e->Iex.Binop.op == Iop_CmpLE64S
+       || e->Iex.Binop.op == Iop_CmpLT64S
+       || e->Iex.Binop.op == Iop_CmpEQ64
+       || e->Iex.Binop.op == Iop_CasCmpEQ32
+       || e->Iex.Binop.op == Iop_CasCmpEQ64) {
+
+      Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S
+                   || e->Iex.Binop.op == Iop_CmpLE32S
+                   || e->Iex.Binop.op == Iop_CmpLT64S
+                   || e->Iex.Binop.op == Iop_CmpLE64S);
+      Bool size32;
+      HReg dst = newVRegI(env);
+      HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      HReg r2 = iselWordExpr_R(env, e->Iex.Binop.arg2);
+
+      MIPSCondCode cc;
+
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ32:
+         case Iop_CasCmpEQ32:
+            cc = MIPScc_EQ;
+            size32 = True;
+            break;
+         case Iop_CmpNE32:
+            cc = MIPScc_NE;
+            size32 = True;
+            break;
+         case Iop_CmpNE64:
+            cc = MIPScc_NE;
+            size32 = True;
+            break;
+         case Iop_CmpLT32S:
+            cc = MIPScc_LT;
+            size32 = True;
+            break;
+         case Iop_CmpLT32U:
+            cc = MIPScc_LO;
+            size32 = True;
+            break;
+         case Iop_CmpLT64U:
+            cc = MIPScc_LO;
+            size32 = False;
+            break;
+         case Iop_CmpLE32S:
+            cc = MIPScc_LE;
+            size32 = True;
+            break;
+         case Iop_CmpLE64S:
+            cc = MIPScc_LE;
+            size32 = False;
+            break;
+         case Iop_CmpLT64S:
+            cc = MIPScc_LT;
+            size32 = False;
+            break;
+         case Iop_CmpEQ64:
+         case Iop_CasCmpEQ64:
+            cc = MIPScc_EQ;
+            size32 = False;
+            break;
+         default:
+            vpanic("iselCondCode(mips): CmpXX32 or CmpXX64");
+            break;
+      }
+
+      addInstr(env, MIPSInstr_Cmp(syned, size32, dst, r1, r2, cc));
+      /* Store result to guest_COND */
+      MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64));
+
+      addInstr(env, MIPSInstr_Store(4,
+               MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64),
+                            am_addr->Mam.IR.base),
+               dst, mode64));
+      return cc;
+   }
+   if (e->Iex.Binop.op == Iop_Not1) {
+      HReg r_dst = newVRegI(env);
+      HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg);
+      MIPSRH *r_srcR = MIPSRH_Reg(r_srcL);
+
+      addInstr(env, MIPSInstr_LI(r_dst, 0x1));
+      addInstr(env, MIPSInstr_Alu(Malu_SUB, r_dst, r_dst, r_srcR));
+      /* Store result to guest_COND */
+      MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64));
+
+      addInstr(env, MIPSInstr_Store(4,
+               MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64),
+                            am_addr->Mam.IR.base),
+               r_dst, mode64));
+      return MIPScc_NE;
+   }
+   if (e->tag == Iex_RdTmp || e->tag == Iex_Unop) {
+      HReg r_dst = iselWordExpr_R_wrk(env, e);
+      /* Store result to guest_COND */
+      MIPSAMode *am_addr = MIPSAMode_IR(0, GuestStatePointer(mode64));
+
+      addInstr(env, MIPSInstr_Store(4,
+               MIPSAMode_IR(am_addr->Mam.IR.index + COND_OFFSET(mode64),
+                            am_addr->Mam.IR.base),
+               r_dst, mode64));
+      return MIPScc_EQ;
+   }
+
+   vex_printf("iselCondCode(mips): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselCondCode(mips)");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (128 bit)               ---*/
+/*---------------------------------------------------------*/
+
+/* 64-bit mode ONLY: compute a 128-bit value into a register pair,
+   which is returned as the first two parameters.  As with
+   iselWordExpr_R, these may be either real or virtual regs; in any
+   case they must not be changed by subsequent code emitted by the
+   caller.  */
+
+static void iselInt128Expr(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e)
+{
+   vassert(env->mode64);
+   iselInt128Expr_wrk(rHi, rLo, env, e);
+   vassert(hregClass(*rHi) == HRcGPR(env->mode64));
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcGPR(env->mode64));
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt128Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env,
+                               IRExpr * e)
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env, e) == Ity_I128);
+
+   /* read 128-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempPair(rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         /* 64 x 64 -> 128 multiply */
+         case Iop_MullU64:
+         case Iop_MullS64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64);
+            HReg r_dst = newVRegI(env);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, MIPSInstr_Mul(syned, True, False /*64bit mul */ ,
+                                        r_dst, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Mfhi(tHi));
+            addInstr(env, MIPSInstr_Mflo(tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 64HLto128(e1,e2) */
+         case Iop_64HLto128:
+            *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            return;
+
+         case Iop_DivModS64to64: {
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            Bool syned = toBool(e->Iex.Binop.op == Iop_DivModS64to64);
+
+            addInstr(env, MIPSInstr_Div(syned, False, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Mfhi(tHi));
+            addInstr(env, MIPSInstr_Mflo(tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_DivModU128to64:
+         case Iop_DivModS128to64: {
+            vassert(mode64);
+            HReg rHi1, rLo1;
+            iselInt128Expr(&rHi1, &rLo1, env, e->Iex.Binop.arg1);
+
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            Bool syned = toBool(e->Iex.Binop.op == Iop_DivModS128to64);
+
+            addInstr(env, MIPSInstr_Div(syned, False, rLo1, r_srcR));
+            addInstr(env, MIPSInstr_Mfhi(tHi));
+            addInstr(env, MIPSInstr_Mflo(tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         default:
+            break;
+      }
+   }
+   vex_printf("iselInt128Expr(mips64): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselInt128Expr(mips64)");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64 bit)                ---*/
+/*---------------------------------------------------------*/
+
+/* 32-bit mode ONLY. Compute a 64-bit value into the register
+ * pair HI, LO. HI and LO must not be changed by subsequent
+ *  code emitted by the caller. */
+
+static void iselInt64Expr(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e)
+{
+   vassert(!env->mode64);
+   iselInt64Expr_wrk(rHi, rLo, env, e);
+   vassert(hregClass(*rHi) == HRcInt32);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcInt32);
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt64Expr_wrk(HReg * rHi, HReg * rLo, ISelEnv * env, IRExpr * e)
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env, e) == Ity_I64);
+
+   /* read 64-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTemp64(rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+   /* 64-bit load */
+   if (e->tag == Iex_Load) {
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+      HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr);
+      addInstr(env, MIPSInstr_Load(4, tHi, MIPSAMode_IR(0, r_addr), mode64));
+      addInstr(env, MIPSInstr_Load(4, tLo, MIPSAMode_IR(4, r_addr), mode64));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit literal */
+   if (e->tag == Iex_Const) {
+      ULong w64 = e->Iex.Const.con->Ico.U64;
+      UInt wHi = toUInt(w64 >> 32);
+      UInt wLo = toUInt(w64);
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+      vassert(e->Iex.Const.con->tag == Ico_U64);
+
+      if (wLo == wHi) {
+         /* Save a precious Int register in this special case. */
+         addInstr(env, MIPSInstr_LI(tLo, (ULong) wLo));
+         *rHi = tLo;
+         *rLo = tLo;
+      } else {
+         addInstr(env, MIPSInstr_LI(tHi, (ULong) wHi));
+         addInstr(env, MIPSInstr_LI(tLo, (ULong) wLo));
+         *rHi = tHi;
+         *rLo = tLo;
+      }
+
+      return;
+   }
+
+   /* 64-bit GET */
+   if (e->tag == Iex_Get) {
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+
+      MIPSAMode *am_addr = MIPSAMode_IR(e->Iex.Get.offset,
+                                        GuestStatePointer(mode64));
+      addInstr(env, MIPSInstr_Load(4, tLo, am_addr, mode64));
+      addInstr(env, MIPSInstr_Load(4, tHi, nextMIPSAModeInt(am_addr), mode64));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit ITE */
+   if (e->tag == Iex_ITE) {
+      vassert(typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1);
+      HReg expr0Lo, expr0Hi;
+      HReg expr1Lo, expr1Hi;
+      HReg desLo  = newVRegI(env);
+      HReg desHi  = newVRegI(env);
+      HReg cond = iselWordExpr_R(env, e->Iex.ITE.cond);
+
+      /* expr0Hi:expr0Lo = iffalse */
+      /* expr1Hi:expr1Lo = iftrue */
+      iselInt64Expr(&expr0Hi, &expr0Lo, env, e->Iex.ITE.iffalse);
+      iselInt64Expr(&expr1Hi, &expr1Lo, env, e->Iex.ITE.iftrue);
+
+      /* move desLo, expr0Lo
+       * move desHi, expr0Hi
+       * movn desLo, expr1Lo, cond
+       * movn desHi, expr1Hi, cond */
+      addInstr(env, mk_iMOVds_RR(desLo, expr0Lo));
+      addInstr(env, mk_iMOVds_RR(desHi, expr0Hi));
+      addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, desLo, expr1Lo, cond));
+      addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, desHi, expr1Hi, cond));
+
+      *rHi = desHi;
+      *rLo = desLo;
+      return;
+   }
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      IROp op_binop = e->Iex.Binop.op;
+      switch (op_binop) {
+         /* 32 x 32 -> 64 multiply */
+         /* Add64 */
+         case Iop_Add64: {
+            HReg xLo, xHi, yLo, yHi, carryBit;
+
+            HReg tHi = newVRegI(env);
+            HReg tHi1 = newVRegI(env);
+            HReg tLo = newVRegI(env);
+
+            carryBit = newVRegI(env);
+
+            Bool size32 = True;
+            MIPSCondCode cc = MIPScc_LO;
+
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+            addInstr(env, MIPSInstr_Alu(Malu_ADD, tLo, xLo, MIPSRH_Reg(yLo)));
+
+            /* Check carry. */
+            addInstr(env, MIPSInstr_Cmp(False, size32, carryBit, tLo, xLo, cc));
+
+            addInstr(env, MIPSInstr_Alu(Malu_ADD, tHi1, xHi, MIPSRH_Reg(yHi)));
+            addInstr(env, MIPSInstr_Alu(Malu_ADD, tHi, tHi1,
+                                        MIPSRH_Reg(carryBit)));
+
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+         case Iop_Sub64: {
+            HReg xLo, xHi, yLo, yHi, borrow;
+            Bool size32 = True;
+            MIPSCondCode cc = MIPScc_LO;
+
+            HReg tHi = newVRegI(env);
+            HReg tLo = newVRegI(env);
+
+            borrow = newVRegI(env);
+
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, tLo, xLo, MIPSRH_Reg(yLo)));
+
+            /* Check if borrow is nedded. */
+            addInstr(env, MIPSInstr_Cmp(False, size32, borrow, xLo, yLo, cc));
+
+            addInstr(env, MIPSInstr_Alu(Malu_ADD, yHi, yHi,
+                                        MIPSRH_Reg(borrow)));
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, tHi, xHi, MIPSRH_Reg(yHi)));
+
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+         case Iop_MullU32:
+         case Iop_MullS32: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg r_dst = newVRegI(env);
+            Bool syned = toBool(op_binop == Iop_MullS32);
+            HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+
+            addInstr(env, MIPSInstr_Mul(syned /*Unsigned or Signed */,
+                                        True /*widen */ , True,
+                                        r_dst, r_srcL, r_srcR));
+            addInstr(env, MIPSInstr_Mfhi(tHi));
+            addInstr(env, MIPSInstr_Mflo(tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+
+            return;
+         }
+         case Iop_DivModS64to32:
+         case Iop_DivModU64to32: {
+            HReg r_sHi, r_sLo;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            Bool syned = toBool(op_binop == Iop_DivModS64to32);
+            HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+
+            iselInt64Expr(&r_sHi, &r_sLo, env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_Div(syned, True, r_sLo, r_srcR));
+            addInstr(env, MIPSInstr_Mfhi(tHi));
+            addInstr(env, MIPSInstr_Mflo(tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+
+            return;
+         }
+
+         /* 32HLto64(e1,e2) */
+         case Iop_32HLto64:
+            *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
+            *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
+
+            return;
+         /* Or64/And64/Xor64 */
+         case Iop_Or64:
+         case Iop_And64:
+         case Iop_Xor64: {
+            HReg xLo, xHi, yLo, yHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            MIPSAluOp op = (op_binop == Iop_Or64) ? Malu_OR :
+                           (op_binop == Iop_And64) ? Malu_AND : Malu_XOR;
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+            addInstr(env, MIPSInstr_Alu(op, tHi, xHi, MIPSRH_Reg(yHi)));
+            addInstr(env, MIPSInstr_Alu(op, tLo, xLo, MIPSRH_Reg(yLo)));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_Shr64: {
+#if defined (_MIPSEL)
+            /* 64-bit logical shift right based on what gcc generates:
+               <shift>:
+               nor  v0, zero, a2
+               sll  a3, a1, 0x1
+               sllv a3, a3, v0
+               srlv v0, a0, a2
+               srlv v1, a1, a2
+               andi a0, a2, 0x20
+               or   v0, a3, v0
+               movn v0, v1, a0
+               jr   ra
+               movn v1, zero, a0
+            */
+            HReg a0, a1;
+            HReg a0tmp = newVRegI(env);
+            HReg a2 = newVRegI(env);
+            HReg a3 = newVRegI(env);
+            HReg v0 = newVRegI(env);
+            HReg v1 = newVRegI(env);
+            HReg zero = newVRegI(env);
+            MIPSRH *sa = NULL;
+
+            iselInt64Expr(&a1, &a0, env, e->Iex.Binop.arg1);
+            sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+
+            if (sa->tag == Mrh_Imm) {
+               addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
+            }
+            else {
+               addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
+                                           MIPSRH_Imm(False, 0x3f)));
+            }
+
+            addInstr(env, MIPSInstr_LI(zero, 0x00000000));
+            /* nor  v0, zero, a2 */
+            addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
+            /* sll  a3, a1, 0x1 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         a3, a1, MIPSRH_Imm(False, 0x1)));
+            /* sllv a3, a3, v0 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         a3, a3, MIPSRH_Reg(v0)));
+            /* srlv v0, a0, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         v0, a0, MIPSRH_Reg(a2)));
+            /* srlv v1, a1, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         v1, a1, MIPSRH_Reg(a2)));
+            /* andi a0, a2, 0x20 */
+            addInstr(env, MIPSInstr_Alu(Malu_AND, a0tmp, a2,
+                                        MIPSRH_Imm(False, 0x20)));
+            /* or   v0, a3, v0 */
+            addInstr(env, MIPSInstr_Alu(Malu_OR, v0, a3, MIPSRH_Reg(v0)));
+
+            /* movn    v0, v1, a0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v0, v1, a0tmp));
+            /* movn    v1, zero, a0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, zero, a0tmp));
+
+            *rHi = v1;
+            *rLo = v0;
+            return;
+#elif defined (_MIPSEB)
+            /* 64-bit logical shift right based on what gcc generates:
+               <shift>:
+               nor  v0, zero, a2
+               sll  a3, a0, 0x1
+               sllv a3, a3, v0
+               srlv v1, a1, a2
+               andi v0, a2, 0x20
+               or   v1, a3, v1
+               srlv a2, a0, a2
+               movn v1, a2, v0
+               movn a2, zero, v0
+               jr   ra
+               move v0, a2
+            */
+            HReg a0, a1;
+            HReg a2 = newVRegI(env);
+            HReg a2tmp = newVRegI(env);
+            HReg a3 = newVRegI(env);
+            HReg v0 = newVRegI(env);
+            HReg v1 = newVRegI(env);
+            HReg zero = newVRegI(env);
+            MIPSRH *sa = NULL;
+
+            iselInt64Expr(&a0, &a1, env, e->Iex.Binop.arg1);
+            sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+
+            if (sa->tag == Mrh_Imm) {
+               addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
+            }
+            else {
+               addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
+                                           MIPSRH_Imm(False, 0x3f)));
+            }
+
+            addInstr(env, MIPSInstr_LI(zero, 0x00000000));
+            /* nor v0, zero, a2 */
+            addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
+            /* sll a3, a0, 0x1 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         a3, a0, MIPSRH_Imm(False, 0x1)));
+            /* sllv a3, a3, v0 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         a3, a3, MIPSRH_Reg(v0)));
+            /* srlv v1, a1, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         v1, a1, MIPSRH_Reg(a2)));
+            /* andi v0, a2, 0x20 */
+            addInstr(env, MIPSInstr_Alu(Malu_AND, v0, a2,
+                                        MIPSRH_Imm(False, 0x20)));
+            /* or v1, a3, v1 */
+            addInstr(env, MIPSInstr_Alu(Malu_OR, v1, a3, MIPSRH_Reg(v1)));
+            /* srlv a2, a0, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                             a2tmp, a0, MIPSRH_Reg(a2)));
+
+            /* movn v1, a2, v0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, a2tmp, v0));
+            /* movn  a2, zero, v0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, a2tmp, zero, v0));
+            /* move v0, a2 */
+            addInstr(env, mk_iMOVds_RR(v0, a2tmp));
+
+            *rHi = v0;
+            *rLo = v1;
+            return;
+#endif
+         }
+
+         case Iop_Shl64: {
+            /* 64-bit shift left based on what gcc generates:
+               <shift>:
+               nor  v0,zero,a2
+               srl  a3,a0,0x1
+               srlv a3,a3,v0
+               sllv v1,a1,a2
+               andi v0,a2,0x20
+               or   v1,a3,v1
+               sllv a2,a0,a2
+               movn v1,a2,v0
+               movn a2,zero,v0
+               jr   ra
+               move v0,a2
+            */
+            HReg a0, a1;
+            HReg a2 = newVRegI(env);
+            HReg a3 = newVRegI(env);
+            HReg v0 = newVRegI(env);
+            HReg v1 = newVRegI(env);
+            HReg zero = newVRegI(env);
+            MIPSRH *sa = NULL;
+
+            iselInt64Expr(&a1, &a0, env, e->Iex.Binop.arg1);
+            sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+
+            if (sa->tag == Mrh_Imm) {
+               addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
+            }
+            else {
+               addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
+                                           MIPSRH_Imm(False, 0x3f)));
+            }
+
+            addInstr(env, MIPSInstr_LI(zero, 0x00000000));
+            /* nor v0, zero, a2 */
+            addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
+            /* srl a3, a0, 0x1 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         a3, a0, MIPSRH_Imm(False, 0x1)));
+            /* srlv a3, a3, v0 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         a3, a3, MIPSRH_Reg(v0)));
+            /* sllv v1, a1, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         v1, a1, MIPSRH_Reg(a2)));
+            /* andi v0, a2, 0x20 */
+            addInstr(env, MIPSInstr_Alu(Malu_AND, v0, a2,
+                                        MIPSRH_Imm(False, 0x20)));
+            /* or v1, a3, v1 */
+            addInstr(env, MIPSInstr_Alu(Malu_OR, v1, a3, MIPSRH_Reg(v1)));
+            /* sllv a2, a0, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         a2, a0, MIPSRH_Reg(a2)));
+
+            /* movn v1, a2, v0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, a2, v0));
+            /* movn a2, zero, v0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, a2, zero, v0));
+            addInstr(env, mk_iMOVds_RR(v0, a2));
+
+            *rHi = v1;
+            *rLo = v0;
+            return;
+         }
+
+         case Iop_Sar64: {
+            /* 64-bit arithmetic shift right based on what gcc generates:
+               <shift>:
+               nor  v0, zero, a2
+               sll  a3, a1, 0x1
+               sllv a3, a3, v0
+               srlv v0, a0, a2
+               srav v1, a1, a2
+               andi a0, a2, 0x20
+               sra  a1, a1, 0x1f
+               or   v0, a3, v0
+               movn v0, v1, a0
+               jr   ra
+               movn v1, a1, a0
+            */
+            HReg a0, a1;
+            HReg a0tmp = newVRegI(env);
+            HReg a1tmp = newVRegI(env);
+            HReg a2 = newVRegI(env);
+            HReg a3 = newVRegI(env);
+            HReg v0 = newVRegI(env);
+            HReg v1 = newVRegI(env);
+            HReg zero = newVRegI(env);
+            MIPSRH *sa = NULL;
+
+            iselInt64Expr(&a1, &a0, env, e->Iex.Binop.arg1);
+            sa = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+
+            if (sa->tag == Mrh_Imm) {
+               addInstr(env, MIPSInstr_LI(a2, sa->Mrh.Imm.imm16));
+            }
+            else {
+               addInstr(env, MIPSInstr_Alu(Malu_AND, a2, sa->Mrh.Reg.reg,
+                                           MIPSRH_Imm(False, 0x3f)));
+            }
+
+            addInstr(env, MIPSInstr_LI(zero, 0x00000000));
+            /* nor  v0, zero, a2 */
+            addInstr(env, MIPSInstr_Alu(Malu_NOR, v0, zero, MIPSRH_Reg(a2)));
+            /* sll  a3, a1, 0x1 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         a3, a1, MIPSRH_Imm(False, 0x1)));
+            /* sllv a3, a3, v0 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True /* 32bit shift */,
+                                         a3, a3, MIPSRH_Reg(v0)));
+            /* srlv v0, a0, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRL, True /* 32bit shift */,
+                                         v0, a0, MIPSRH_Reg(a2)));
+            /* srav v1, a1, a2 */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, True /* 32bit shift */,
+                                         v1, a1, MIPSRH_Reg(a2)));
+            /* andi a0, a2, 0x20 */
+            addInstr(env, MIPSInstr_Alu(Malu_AND, a0tmp, a2,
+                                        MIPSRH_Imm(False, 0x20)));
+            /* sra a1, a1, 0x1f */
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, True /* 32bit shift */,
+                                         a1tmp, a1, MIPSRH_Imm(False, 0x1f)));
+            /* or   v0, a3, v0 */
+            addInstr(env, MIPSInstr_Alu(Malu_OR, v0, a3, MIPSRH_Reg(v0)));
+
+            /* movn    v0, v1, a0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v0, v1, a0tmp));
+            /* movn    v1, a1, a0 */
+            addInstr(env, MIPSInstr_MoveCond(MMoveCond_movn, v1, a1tmp, a0tmp));
+
+            *rHi = v1;
+            *rLo = v0;
+            return;
+         }
+
+         case Iop_F32toI64S: {
+            HReg tmpD = newVRegD(env);
+            HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+            HReg tLo  = newVRegI(env);
+            HReg tHi  = newVRegI(env);
+            MIPSAMode *am_addr;
+
+            /* CVTLS tmpD, valF */
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLS, tmpD, valF));
+            set_MIPS_rounding_default(env);
+
+            sub_from_sp(env, 16);  /* Move SP down 16 bytes */
+            am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+            /* store as F64 */
+            addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, tmpD,
+                                           am_addr));
+            /* load as 2xI32 */
+#if defined (_MIPSEL)
+            addInstr(env, MIPSInstr_Load(4, tLo, am_addr, mode64));
+            addInstr(env, MIPSInstr_Load(4, tHi, nextMIPSAModeFloat(am_addr),
+                                         mode64));
+#elif defined (_MIPSEB)
+            addInstr(env, MIPSInstr_Load(4, tHi, am_addr, mode64));
+            addInstr(env, MIPSInstr_Load(4, tLo, nextMIPSAModeFloat(am_addr),
+                                         mode64));
+#endif
+
+            /* Reset SP */
+            add_to_sp(env, 16);
+
+            *rHi = tHi;
+            *rLo = tLo;
+
+            return;
+         }
+
+         default:
+            break;
+      }
+   }
+
+   /* --------- UNARY ops --------- */
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_1Sto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            HReg tmp = newVRegI(env);
+
+            addInstr(env, MIPSInstr_Shft(Mshft_SLL, True, tmp, src,
+                          MIPSRH_Imm(False, 31)));
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp, tmp,
+                          MIPSRH_Imm(False, 31)));
+
+            addInstr(env, mk_iMOVds_RR(tHi, tmp));
+            addInstr(env, mk_iMOVds_RR(tLo, tmp));
+
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 32Sto64(e) */
+         case Iop_32Sto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVds_RR(tHi, src));
+            addInstr(env, mk_iMOVds_RR(tLo, src));
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tHi, tHi,
+                          MIPSRH_Imm(False, 31)));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 8Uto64(e) */
+         case Iop_8Uto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, MIPSInstr_Alu(Malu_AND, tLo, src,
+                                        MIPSRH_Imm(False, 0xFF)));
+            addInstr(env, MIPSInstr_Alu(Malu_ADD, tHi, hregMIPS_GPR0(mode64),
+                                        MIPSRH_Reg(hregMIPS_GPR0(mode64))));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 32Uto64(e) */
+         case Iop_32Uto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVds_RR(tLo, src));
+            addInstr(env, MIPSInstr_Alu(Malu_ADD, tHi, hregMIPS_GPR0(mode64),
+                          MIPSRH_Reg(hregMIPS_GPR0(mode64))));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_Left64: {
+            HReg yHi, yLo;
+            HReg tHi  = newVRegI(env);
+            HReg tLo  = newVRegI(env);
+            HReg tmp  = newVRegI(env);
+            HReg tmp1  = newVRegI(env);
+            HReg tmp2  = newVRegI(env);
+            HReg zero = newVRegI(env);
+            MIPSCondCode cc = MIPScc_LO;
+
+            /* yHi:yLo = arg */
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Unop.arg);
+            /* zero = 0 */
+            addInstr(env, MIPSInstr_LI(zero, 0x00000000));
+
+            /* tmp2:tmp1 = 0 - (yHi:yLo)*/
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, tmp2, zero, MIPSRH_Reg(yLo)));
+            addInstr(env, MIPSInstr_Cmp(False, True, tmp1, zero, tmp2, cc));
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, tmp, zero, MIPSRH_Reg(yHi)));
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, tmp1, tmp, MIPSRH_Reg(tmp1)));
+
+            /* So now we have tmp2:tmp1 = -arg.  To finish off, or 'arg'
+               back in, so as to give the final result
+               tHi:tLo = arg | -arg. */
+            addInstr(env, MIPSInstr_Alu(Malu_OR, tHi, yHi, MIPSRH_Reg(tmp1)));
+            addInstr(env, MIPSInstr_Alu(Malu_OR, tLo, yLo, MIPSRH_Reg(tmp2)));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_CmpwNEZ64: {
+            HReg srcLo, srcHi;
+            HReg tmp1 = newVRegI(env);
+            HReg tmp2 = newVRegI(env);
+            /* srcHi:srcLo = arg */
+            iselInt64Expr(&srcHi, &srcLo, env, e->Iex.Unop.arg);
+            /* tmp1 = srcHi | srcLo */
+            addInstr(env, MIPSInstr_Alu(Malu_OR, tmp1, srcLo,
+                                        MIPSRH_Reg(srcHi)));
+            /* tmp2 = (tmp1 | -tmp1) >>s 31 */
+
+            addInstr(env, MIPSInstr_Alu(Malu_SUB, tmp2, hregMIPS_GPR0(mode64),
+                                        MIPSRH_Reg(tmp1)));
+
+            addInstr(env, MIPSInstr_Alu(Malu_OR, tmp2, tmp2, MIPSRH_Reg(tmp1)));
+            addInstr(env, MIPSInstr_Shft(Mshft_SRA, True, tmp2, tmp2,
+                          MIPSRH_Imm(False, 31)));
+            *rHi = tmp2;
+            *rLo = tmp2;
+            return;
+
+         }
+         case Iop_ReinterpF64asI64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            MIPSAMode *am_addr;
+            HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg);
+
+            sub_from_sp(env, 16);  /* Move SP down 16 bytes */
+            am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+            /* store as F64 */
+            addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
+                                           am_addr));
+            /* load as 2xI32 */
+#if defined (_MIPSEL)
+            addInstr(env, MIPSInstr_Load(4, tLo, am_addr, mode64));
+            addInstr(env, MIPSInstr_Load(4, tHi, nextMIPSAModeFloat(am_addr),
+                                         mode64));
+#elif defined (_MIPSEB)
+            addInstr(env, MIPSInstr_Load(4, tHi, am_addr, mode64));
+            addInstr(env, MIPSInstr_Load(4, tLo, nextMIPSAModeFloat(am_addr),
+                                         mode64));
+#endif
+
+            /* Reset SP */
+            add_to_sp(env, 16);
+
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         default:
+            vex_printf("UNARY: No such op: ");
+            ppIROp(e->Iex.Unop.op);
+            vex_printf("\n");
+            break;
+      }
+   }
+
+   vex_printf("iselInt64Expr(mips): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselInt64Expr(mips)");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (32 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Nothing interesting here; really just wrappers for
+   64-bit stuff. */
+static HReg iselFltExpr(ISelEnv * env, IRExpr * e)
+{
+   HReg r = iselFltExpr_wrk(env, e);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselFltExpr_wrk(ISelEnv * env, IRExpr * e)
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   vassert(ty == Ity_F32 || (ty == Ity_F64 && fp_mode64));
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Load) {
+      vassert(e->Iex.Load.ty == Ity_F32
+              || (e->Iex.Load.ty == Ity_F64 && fp_mode64));
+      HReg r_dst;
+      MIPSAMode *am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, ty);
+      if (e->Iex.Load.ty == Ity_F64) {
+         r_dst = newVRegD(env);
+         addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, r_dst, am_addr));
+      } else {
+         r_dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpLdSt(True /*load */, 4, r_dst, am_addr));
+      }
+      return r_dst;
+   }
+
+   if (e->tag == Iex_Get) {
+      MIPSAMode *am_addr = MIPSAMode_IR(e->Iex.Get.offset,
+                                        GuestStatePointer(mode64));
+      HReg r_dst;
+      if (e->Iex.Load.ty == Ity_F64) {
+         r_dst = newVRegD(env);
+         addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, r_dst, am_addr));
+      } else {
+         r_dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpLdSt(True /*load */, 4, r_dst, am_addr));
+      }
+      return r_dst;
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+      case Iop_ReinterpI32asF32: {
+         HReg fr_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+         HReg r_dst = newVRegF(env);
+
+         /* Move Word to Floating Point
+            mtc1 r_dst, valS */
+         addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, r_dst, fr_src));
+
+         return r_dst;
+      }
+      case Iop_F32toF64: {
+         vassert(fp_mode64);
+         HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegD(env);
+
+         addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDS, dst, src));
+         return dst;
+      }
+      case Iop_ReinterpI64asF64: {
+         HReg r_dst;
+         if (mode64) {
+            HReg fr_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+            r_dst = newVRegF(env);
+            /* Move Doubleword to Floating Point
+               dmtc1 r_dst, fr_src */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_dmtc1, r_dst, fr_src));
+         } else {
+             HReg Hi, Lo;
+             r_dst = newVRegD(env);
+             iselInt64Expr(&Hi, &Lo, env, e->Iex.Unop.arg);
+             r_dst = mk_LoadRR32toFPR(env, Hi, Lo);  /* 2*I32 -> F64 */
+         }
+         return r_dst;
+      }
+      case Iop_I32StoF64: {
+         vassert(fp_mode64);
+         HReg dst = newVRegF(env);
+         HReg tmp = newVRegF(env);
+         HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+         /* Move Word to Floating Point
+            mtc1 tmp, r_src */
+         addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp, r_src));
+
+         /* and do convert */
+         addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDW, dst, tmp));
+
+         return dst;
+      }
+      case Iop_AbsF32:
+      case Iop_AbsF64: {
+         Bool sz32 = e->Iex.Unop.op == Iop_AbsF32;
+         HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpUnary(sz32 ? Mfp_ABSS : Mfp_ABSD, dst, src));
+         return dst;
+      }
+      case Iop_NegF32:
+      case Iop_NegF64: {
+         Bool sz32 = e->Iex.Unop.op == Iop_NegF32;
+         HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpUnary(sz32 ? Mfp_NEGS : Mfp_NEGD, dst, src));
+         return dst;
+      }
+      case Iop_RoundF64toF64_ZERO: {
+         vassert(mode64);
+         HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpConvert(Mfp_TRULD, dst, src));
+         return dst;
+      }
+      case Iop_RoundF64toF64_NEAREST: {
+         vassert(mode64);
+         HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpConvert(Mfp_ROUNDLD, dst, src));
+         return dst;
+      }
+      case Iop_RoundF64toF64_NegINF: {
+         vassert(mode64);
+         HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpConvert(Mfp_FLOORLD, dst, src));
+         return dst;
+      }
+      case Iop_RoundF64toF64_PosINF: {
+         vassert(mode64);
+         HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpConvert(Mfp_CEILLD, dst, src));
+         return dst;
+      }
+
+      default:
+         break;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      switch (e->Iex.Triop.details->op) {
+         case Iop_DivF32:
+         case Iop_DivF64:
+         case Iop_MulF32:
+         case Iop_MulF64:
+         case Iop_AddF32:
+         case Iop_AddF64:
+         case Iop_SubF32:
+         case Iop_SubF64: {
+            MIPSFpOp op = 0;
+            HReg argL = iselFltExpr(env, e->Iex.Triop.details->arg2);
+            HReg argR = iselFltExpr(env, e->Iex.Triop.details->arg3);
+            HReg dst = newVRegF(env);
+            switch (e->Iex.Triop.details->op) {
+               case Iop_DivF32:
+                  op = Mfp_DIVS;
+                  break;
+               case Iop_DivF64:
+                  vassert(fp_mode64);
+                  op = Mfp_DIVD;
+                  break;
+               case Iop_MulF32:
+                  op = Mfp_MULS;
+                  break;
+               case Iop_MulF64:
+                  vassert(fp_mode64);
+                  op = Mfp_MULD;
+                  break;
+               case Iop_AddF32:
+                  op = Mfp_ADDS;
+                  break;
+               case Iop_AddF64:
+                  vassert(fp_mode64);
+                  op = Mfp_ADDD;
+                  break;
+               case Iop_SubF32:
+                  op = Mfp_SUBS;
+                  break;
+               case Iop_SubF64:
+                  vassert(fp_mode64);
+                  op = Mfp_SUBD;
+                  break;
+               default:
+                  vassert(0);
+            }
+            set_MIPS_rounding_mode(env, e->Iex.Triop.details->arg1);
+            addInstr(env, MIPSInstr_FpBinary(op, dst, argL, argR));
+            set_MIPS_rounding_default(env);
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_F64toF32: {
+            HReg valD;
+            if (mode64)
+               valD = iselFltExpr(env, e->Iex.Binop.arg2);
+            else
+               valD = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg valS = newVRegF(env);
+
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSD, valS, valD));
+            set_MIPS_rounding_default(env);
+            return valS;
+         }
+
+         case Iop_RoundF32toInt: {
+               HReg valS = newVRegF(env);
+               HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+
+               set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+               addInstr(env, MIPSInstr_FpConvert(Mfp_CVTWS, valS, valF));
+               set_MIPS_rounding_default(env);
+               return valS;
+            }
+
+         case Iop_RoundF64toInt: {
+            HReg valS = newVRegF(env);
+            HReg valF = iselFltExpr(env, e->Iex.Binop.arg2);
+
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, valS, valF));
+            set_MIPS_rounding_default(env);
+            return valS;
+         }
+
+         case Iop_I32StoF32: {
+            HReg r_dst = newVRegF(env);
+            HReg fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2);
+            HReg tmp = newVRegF(env);
+
+            /* Move Word to Floating Point
+               mtc1 tmp, fr_src */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp, fr_src));
+
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSW, r_dst, tmp));
+            set_MIPS_rounding_default(env);
+
+            return r_dst;
+         }
+
+         case Iop_I64StoF64: {
+            HReg r_dst = newVRegF(env);
+            MIPSAMode *am_addr;
+            HReg tmp, fr_src;
+            if (mode64) {
+               tmp = newVRegF(env);
+               fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2);
+               /* Move SP down 8 bytes */
+               sub_from_sp(env, 8);
+               am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+               /* store as I64 */
+               addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64));
+
+               /* load as Ity_F64 */
+               addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, tmp, am_addr));
+
+               /* Reset SP */
+               add_to_sp(env, 8);
+            } else {
+               HReg Hi, Lo;
+               tmp = newVRegD(env);
+               iselInt64Expr(&Hi, &Lo, env, e->Iex.Binop.arg2);
+               tmp = mk_LoadRR32toFPR(env, Hi, Lo);  /* 2*I32 -> F64 */
+            }
+
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDL, r_dst, tmp));
+            set_MIPS_rounding_default(env);
+
+            return r_dst;
+         }
+
+         case Iop_I64StoF32: {
+            HReg r_dst = newVRegF(env);
+            MIPSAMode *am_addr;
+            HReg fr_src, tmp;
+            if (mode64) {
+               tmp = newVRegF(env);
+               fr_src = iselWordExpr_R(env, e->Iex.Binop.arg2);
+               /* Move SP down 8 bytes */
+               sub_from_sp(env, 8);
+               am_addr = MIPSAMode_IR(0, StackPointer(mode64));
+
+               /* store as I64 */
+               addInstr(env, MIPSInstr_Store(8, am_addr, fr_src, mode64));
+
+               /* load as Ity_F64 */
+               addInstr(env, MIPSInstr_FpLdSt(True /*load */, 8, tmp, am_addr));
+
+               /* Reset SP */
+               add_to_sp(env, 8);
+            } else {
+               HReg Hi, Lo;
+               tmp = newVRegD(env);
+               iselInt64Expr(&Hi, &Lo, env, e->Iex.Binop.arg2);
+               tmp = mk_LoadRR32toFPR(env, Hi, Lo);  /* 2*I32 -> F64 */
+            }
+
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTSL, r_dst, tmp));
+            set_MIPS_rounding_default(env);
+
+            return r_dst;
+         }
+
+         case Iop_SqrtF32:
+         case Iop_SqrtF64: {
+            Bool sz32 = e->Iex.Binop.op == Iop_SqrtF32;
+            HReg src = iselFltExpr(env, e->Iex.Binop.arg2);
+            HReg dst = newVRegF(env);
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpUnary(sz32 ? Mfp_SQRTS : Mfp_SQRTD, dst,
+                                            src));
+            set_MIPS_rounding_default(env);
+            return dst;
+         }
+
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Qop) {
+      switch (e->Iex.Qop.details->op) {
+         case Iop_MAddF32:
+         case Iop_MAddF64:
+         case Iop_MSubF32:
+         case Iop_MSubF64: {
+            MIPSFpOp op = 0;
+            switch (e->Iex.Qop.details->op) {
+               case Iop_MAddF32:
+                  op = Mfp_MADDS;
+                  break;
+               case Iop_MAddF64:
+                  op = Mfp_MADDD;
+                  break;
+               case Iop_MSubF32:
+                  op = Mfp_MSUBS;
+                  break;
+               case Iop_MSubF64:
+                  op = Mfp_MSUBD;
+                  break;
+               default:
+                  vassert(0);
+            }
+            HReg dst = newVRegF(env);
+            HReg src1 = iselFltExpr(env, e->Iex.Qop.details->arg2);
+            HReg src2 = iselFltExpr(env, e->Iex.Qop.details->arg3);
+            HReg src3 = iselFltExpr(env, e->Iex.Qop.details->arg4);
+            set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1);
+            addInstr(env, MIPSInstr_FpTernary(op, dst,
+                                              src1, src2, src3));
+            set_MIPS_rounding_default(env);
+            return dst;
+         }
+
+         default:
+         break;
+      }
+   }
+
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_TruncF64asF32) {
+      /* This is quite subtle.  The only way to do the relevant
+         truncation is to do a single-precision store and then a
+         double precision load to get it back into a register.  The
+         problem is, if the data is then written to memory a second
+         time, as in
+
+         STbe(...) = TruncF64asF32(...)
+
+         then will the second truncation further alter the value?  The
+         answer is no: flds (as generated here) followed by fsts
+         (generated for the STbe) is the identity function on 32-bit
+         floats, so we are safe.
+
+         Another upshot of this is that if iselStmt can see the
+         entirety of
+
+         STbe(...) = TruncF64asF32(arg)
+
+         then it can short circuit having to deal with TruncF64asF32
+         individually; instead just compute arg into a 64-bit FP
+         register and do 'fsts' (since that itself does the
+         truncation).
+
+         We generate pretty poor code here (should be ok both for
+         32-bit and 64-bit mode); but it is expected that for the most
+         part the latter optimisation will apply and hence this code
+         will not often be used.
+       */
+      HReg fsrc = iselDblExpr(env, e->Iex.Unop.arg);
+      HReg fdst = newVRegF(env);
+      MIPSAMode *zero_r1 = MIPSAMode_IR(0, StackPointer(mode64));
+
+      sub_from_sp(env, 16);
+      /* store as F32, hence truncating */
+      addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 4, fsrc, zero_r1));
+      /* and reload.  Good huh?! (sigh) */
+      addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 4, fdst, zero_r1));
+      add_to_sp(env, 16);
+      return fdst;
+   }
+
+   /* --------- ITE --------- */
+   if (e->tag == Iex_ITE) {
+      if (ty == Ity_F64
+          && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) {
+         vassert(mode64);
+         HReg r0 = iselFltExpr(env, e->Iex.ITE.iffalse);
+         HReg r1 = iselFltExpr(env, e->Iex.ITE.iftrue);
+         HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
+         HReg r_dst = newVRegF(env);
+         addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, r_dst, r0));
+         addInstr(env, MIPSInstr_MoveCond(MFpMoveCond_movnd, r_dst, r1,
+                                            r_cond));
+         return r_dst;
+      }
+   }
+
+   vex_printf("iselFltExpr(mips): No such tag(0x%x)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselFltExpr_wrk(mips)");
+}
+
+static HReg iselDblExpr(ISelEnv * env, IRExpr * e)
+{
+   HReg r = iselDblExpr_wrk(env, e);
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDblExpr_wrk(ISelEnv * env, IRExpr * e)
+{
+   IRType ty = typeOfIRExpr(env->type_env, e);
+   vassert(e);
+   vassert(ty == Ity_F64);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   /* --------- LOAD --------- */
+   if (e->tag == Iex_Load) {
+      HReg r_dst = newVRegD(env);
+      MIPSAMode *am_addr;
+      vassert(e->Iex.Load.ty == Ity_F64);
+      am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, ty);
+      addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, r_dst, am_addr));
+      return r_dst;
+   }
+
+   /* --------- GET --------- */
+   if (e->tag == Iex_Get) {
+
+      HReg r_dst = newVRegD(env);
+      MIPSAMode *am_addr = MIPSAMode_IR(e->Iex.Get.offset,
+                                        GuestStatePointer(mode64));
+      addInstr(env, MIPSInstr_FpLdSt(True /*load */ , 8, r_dst, am_addr));
+      return r_dst;
+   }
+
+   if (e->tag == Iex_Unop) {
+      MIPSFpOp fpop = Mfp_INVALID;
+      switch (e->Iex.Unop.op) {
+         case Iop_NegF64:
+            fpop = Mfp_NEGD;
+            break;
+         case Iop_AbsF64:
+            fpop = Mfp_ABSD;
+            break;
+         case Iop_F32toF64: {
+            vassert(!mode64);
+            HReg src = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst = newVRegD(env);
+
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDS, dst, src));
+            return dst;
+         }
+         case Iop_ReinterpI64asF64: {
+            HReg Hi, Lo;
+            HReg dst = newVRegD(env);
+
+            iselInt64Expr(&Hi, &Lo, env, e->Iex.Unop.arg);
+
+            dst = mk_LoadRR32toFPR(env, Hi, Lo);  /* 2*I32 -> F64 */
+            return dst;
+         }
+         case Iop_I32StoF64: {
+            vassert(!mode64);
+            HReg dst = newVRegD(env);
+            HReg tmp = newVRegF(env);
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+            /* Move Word to Floating Point
+               mtc1 tmp, r_src */
+            addInstr(env, MIPSInstr_FpGpMove(MFpGpMove_mtc1, tmp, r_src));
+
+            /* and do convert */
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTDW, dst, tmp));
+
+            return dst;
+         }
+         default:
+            break;
+      }
+
+      if (fpop != Mfp_INVALID) {
+         HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegD(env);
+         addInstr(env, MIPSInstr_FpUnary(fpop, dst, src));
+         return dst;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         case Iop_RoundF64toInt: {
+            HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg dst = newVRegD(env);
+
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpConvert(Mfp_CVTLD, dst, src));
+            set_MIPS_rounding_default(env);
+
+            return dst; 
+         }
+
+         case Iop_SqrtF64: {
+            HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg dst = newVRegD(env);
+            set_MIPS_rounding_mode(env, e->Iex.Binop.arg1);
+            addInstr(env, MIPSInstr_FpUnary(Mfp_SQRTD, dst, src));
+            set_MIPS_rounding_default(env);
+            return dst;
+         }
+
+         default:
+            break;
+
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      switch (e->Iex.Triop.details->op) {
+         case Iop_DivF64:
+         case Iop_DivF32:
+         case Iop_MulF64:
+         case Iop_AddF64:
+         case Iop_SubF64: {
+            MIPSFpOp op = 0;
+            HReg argL = iselDblExpr(env, e->Iex.Triop.details->arg2);
+            HReg argR = iselDblExpr(env, e->Iex.Triop.details->arg3);
+            HReg dst = newVRegD(env);
+            switch (e->Iex.Triop.details->op) {
+               case Iop_DivF64:
+                  op = Mfp_DIVD;
+                  break;
+               case Iop_DivF32:
+                  op = Mfp_DIVS;
+                  break;
+               case Iop_MulF64:
+                  op = Mfp_MULD;
+                  break;
+               case Iop_AddF64:
+                  op = Mfp_ADDD;
+                  break;
+               case Iop_SubF64:
+                  op = Mfp_SUBD;
+                  break;
+               default:
+                  vassert(0);
+            }
+            set_MIPS_rounding_mode(env, e->Iex.Triop.details->arg1);
+            addInstr(env, MIPSInstr_FpBinary(op, dst, argL, argR));
+            set_MIPS_rounding_default(env);
+            return dst;
+         }
+         default:
+            break;
+      }
+   }
+
+   if (e->tag == Iex_Qop) {
+      switch (e->Iex.Qop.details->op) {
+         case Iop_MAddF32:
+         case Iop_MAddF64:
+         case Iop_MSubF32:
+         case Iop_MSubF64: {
+            MIPSFpOp op = 0;
+            switch (e->Iex.Qop.details->op) {
+               case Iop_MAddF32:
+                  op = Mfp_MADDS;
+                  break;
+               case Iop_MAddF64:
+                  op = Mfp_MADDD;
+                  break;
+               case Iop_MSubF32:
+                  op = Mfp_MSUBS;
+                  break;
+               case Iop_MSubF64:
+                  op = Mfp_MSUBD;
+                  break;
+               default:
+                  vassert(0);
+            }
+            HReg dst = newVRegD(env);
+            HReg src1 = iselDblExpr(env, e->Iex.Qop.details->arg2);
+            HReg src2 = iselDblExpr(env, e->Iex.Qop.details->arg3);
+            HReg src3 = iselDblExpr(env, e->Iex.Qop.details->arg4);
+            set_MIPS_rounding_mode(env, e->Iex.Qop.details->arg1);
+            addInstr(env, MIPSInstr_FpTernary(op, dst,
+                                              src1, src2, src3));
+            set_MIPS_rounding_default(env);
+            return dst;
+         }
+
+         default:
+         break;
+      }
+   }
+
+   /* --------- ITE --------- */
+   if (e->tag == Iex_ITE) {
+      if (ty == Ity_F64
+          && typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) {
+         HReg r0 = iselDblExpr(env, e->Iex.ITE.iffalse);
+         HReg r1 = iselDblExpr(env, e->Iex.ITE.iftrue);
+         HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
+         HReg r_dst = newVRegD(env);
+
+         addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, r_dst, r0));
+         addInstr(env, MIPSInstr_MoveCond(MFpMoveCond_movnd, r_dst, r1,
+                                            r_cond));
+         return r_dst;
+      }
+   }
+
+   vex_printf("iselDblExpr(mips): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselDblExpr_wrk(mips)");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void iselStmt(ISelEnv * env, IRStmt * stmt)
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n-- ");
+
+      ppIRStmt(stmt);
+      vex_printf("\n");
+   }
+
+   switch (stmt->tag) {
+      /* --------- STORE --------- */
+      case Ist_Store: {
+         MIPSAMode *am_addr;
+         IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+
+         /*constructs addressing mode from address provided */
+         am_addr = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd);
+
+         if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
+             (mode64 && (tyd == Ity_I64))) {
+            HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data);
+            addInstr(env, MIPSInstr_Store(toUChar(sizeofIRType(tyd)),
+                     am_addr, r_src, mode64));
+            return;
+         }
+         if (!mode64 && (tyd == Ity_I64)) {
+            HReg vHi, vLo;
+            HReg r_addr = iselWordExpr_R(env, stmt->Ist.Store.addr);
+
+            iselInt64Expr(&vHi, &vLo, env, stmt->Ist.Store.data);
+
+            addInstr(env, MIPSInstr_Store(toUChar(sizeofIRType(Ity_I32)),
+                          MIPSAMode_IR(0, r_addr), vHi, mode64));
+            addInstr(env, MIPSInstr_Store(toUChar(sizeofIRType(Ity_I32)),
+                          MIPSAMode_IR(4, r_addr), vLo, mode64));
+            return;
+         }
+         if (tyd == Ity_F32) {
+            HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data);
+            addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 4, fr_src,
+                                           am_addr));
+            return;
+         }
+         if (tyd == Ity_F64 && mode64) {
+            HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data);
+            addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
+                                           am_addr));
+            return;
+         }
+         if (!mode64 && (tyd == Ity_F64)) {
+            HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data);
+            addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
+                                           am_addr));
+            return;
+         }
+
+         break;
+      }
+
+      /* --------- PUT --------- */
+      case Ist_Put: {
+         IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+
+         if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 ||
+             (ty == Ity_I64 && mode64)) {
+            HReg r_src = iselWordExpr_R(env, stmt->Ist.Put.data);
+            MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
+                                              GuestStatePointer(mode64));
+            addInstr(env, MIPSInstr_Store(toUChar(sizeofIRType(ty)),
+                                          am_addr, r_src, mode64));
+            return;
+         }
+
+         if (ty == Ity_I64 && !mode64) {
+            HReg vHi, vLo;
+            MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
+                                              GuestStatePointer(mode64));
+            MIPSAMode *am_addr4 = MIPSAMode_IR(stmt->Ist.Put.offset + 4,
+                                               GuestStatePointer(mode64));
+            iselInt64Expr(&vHi, &vLo, env, stmt->Ist.Put.data);
+            addInstr(env, MIPSInstr_Store(toUChar(sizeofIRType(Ity_I32)),
+                                          am_addr, vLo, mode64));
+            addInstr(env, MIPSInstr_Store(toUChar(sizeofIRType(Ity_I32)),
+                                          am_addr4, vHi, mode64));
+            return;
+
+         }
+
+         if (ty == Ity_F32) {
+            HReg fr_src = iselFltExpr(env, stmt->Ist.Put.data);
+            MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
+                                              GuestStatePointer(mode64));
+            addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 4, fr_src,
+                                           am_addr));
+            return;
+         }
+
+         if (ty == Ity_F64) {
+            HReg fr_src = iselFltExpr(env, stmt->Ist.Put.data);
+            MIPSAMode *am_addr = MIPSAMode_IR(stmt->Ist.Put.offset,
+                                              GuestStatePointer(mode64));
+            addInstr(env, MIPSInstr_FpLdSt(False /*store */ , 8, fr_src,
+                                           am_addr));
+            return;
+         }
+         break;
+      }
+
+      /* --------- TMP --------- */
+      case Ist_WrTmp: {
+         IRTemp tmp = stmt->Ist.WrTmp.tmp;
+         IRType ty = typeOfIRTemp(env->type_env, tmp);
+
+         if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ty == Ity_I1) {
+            HReg r_dst = lookupIRTemp(env, tmp);
+            HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data);
+            addInstr(env, mk_iMOVds_RR(r_dst, r_src));
+            return;
+         }
+
+         if (ty == Ity_I64) {
+            if (mode64) {
+               HReg r_dst = lookupIRTemp(env, tmp);
+               HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data);
+               addInstr(env, mk_iMOVds_RR(r_dst, r_src));
+               return;
+            } else {
+               HReg rHi, rLo, dstHi, dstLo;
+               iselInt64Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data);
+               lookupIRTemp64(&dstHi, &dstLo, env, tmp);
+               addInstr(env, mk_iMOVds_RR(dstHi, rHi));
+               addInstr(env, mk_iMOVds_RR(dstLo, rLo));
+               return;
+            }
+         }
+
+         if (mode64 && ty == Ity_I128) {
+            HReg rHi, rLo, dstHi, dstLo;
+            iselInt128Expr(&rHi, &rLo, env, stmt->Ist.WrTmp.data);
+            lookupIRTempPair(&dstHi, &dstLo, env, tmp);
+            addInstr(env, mk_iMOVds_RR(dstHi, rHi));
+            addInstr(env, mk_iMOVds_RR(dstLo, rLo));
+            return;
+         }
+
+         if (ty == Ity_F32) {
+            HReg fr_dst = lookupIRTemp(env, tmp);
+            HReg fr_src = iselFltExpr(env, stmt->Ist.WrTmp.data);
+            addInstr(env, MIPSInstr_FpUnary(Mfp_MOVS, fr_dst, fr_src));
+            return;
+         }
+
+         if (ty == Ity_F64) {
+            if (mode64) {
+               HReg src = iselFltExpr(env, stmt->Ist.WrTmp.data);
+               HReg dst = lookupIRTemp(env, tmp);
+               addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src));
+               return;
+            } else {
+               HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data);
+               HReg dst = lookupIRTemp(env, tmp);
+               addInstr(env, MIPSInstr_FpUnary(Mfp_MOVD, dst, src));
+               return;
+            }
+         }
+         break;
+      }
+
+      /* --------- Call to DIRTY helper --------- */
+      case Ist_Dirty: {
+         IRDirty *d = stmt->Ist.Dirty.details;
+
+         /* Figure out the return type, if any. */
+         IRType retty = Ity_INVALID;
+         if (d->tmp != IRTemp_INVALID)
+            retty = typeOfIRTemp(env->type_env, d->tmp);
+
+         /* Throw out any return types we don't know about. */
+         Bool retty_ok = False;
+         switch (retty) {
+            case Ity_INVALID: /* Function doesn't return anything. */
+            case Ity_V128:
+            case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+               retty_ok = True; break;
+            default:
+               break;
+         }
+
+         if (!retty_ok)
+            break; /* will go to stmt_fail: */
+
+         /* Marshal args, do the call, clear stack, set the return value
+            to 0x555..555 if this is a conditional call that returns a
+            value and the call is skipped. */
+         UInt   addToSp = 0;
+         RetLoc rloc    = mk_RetLoc_INVALID();
+         doHelperCall( &addToSp, &rloc, env, d->guard, d->cee, retty, d->args );
+         vassert(is_sane_RetLoc(rloc));
+
+         /* Now figure out what to do with the returned value, if any. */
+         switch (retty) {
+            case Ity_INVALID: {
+               /* No return value.  Nothing to do. */
+               vassert(d->tmp == IRTemp_INVALID);
+               vassert(rloc.pri == RLPri_None);
+               vassert(addToSp == 0);
+               return;
+            }
+            case Ity_I32: case Ity_I16: case Ity_I8: {
+               /* The returned value is in $v0.  Park it in the register
+                  associated with tmp. */
+               HReg r_dst = lookupIRTemp(env, d->tmp);
+               addInstr(env, mk_iMOVds_RR(r_dst, hregMIPS_GPR2(mode64)));
+               vassert(rloc.pri == RLPri_Int);
+               vassert(addToSp == 0);
+               return;
+            }
+            case Ity_I64: {
+               if (mode64) {
+                  /* The returned value is in $v0.  Park it in the register
+                     associated with tmp. */
+                  HReg r_dst = lookupIRTemp(env, d->tmp);
+                  addInstr(env, mk_iMOVds_RR(r_dst, hregMIPS_GPR2(mode64)));
+                  vassert(rloc.pri == RLPri_Int);
+                  vassert(addToSp == 0);
+                  return;
+               } else {
+                  HReg rHi = newVRegI(env);
+                  HReg rLo = newVRegI(env);
+                  HReg dstHi, dstLo;
+                  addInstr(env, mk_iMOVds_RR(rLo, hregMIPS_GPR2(mode64)));
+                  addInstr(env, mk_iMOVds_RR(rHi, hregMIPS_GPR3(mode64)));
+                  lookupIRTemp64(&dstHi, &dstLo, env, d->tmp);
+                  addInstr(env, mk_iMOVds_RR(dstHi, rHi));
+                  addInstr(env, mk_iMOVds_RR(dstLo, rLo));
+                  return;
+               }
+            }
+            case Ity_V128: {
+               /* ATC. The code that this produces really
+                  needs to be looked at, to verify correctness.
+                  I don't think this can ever happen though, since the
+                  MIPS front end never produces 128-bit loads/stores. */
+               vassert(0);
+               vassert(rloc.pri == RLPri_V128SpRel);
+               vassert(addToSp >= 16);
+               HReg       dst = lookupIRTemp(env, d->tmp);
+               MIPSAMode* am  = MIPSAMode_IR(rloc.spOff, StackPointer(mode64));
+               addInstr(env, MIPSInstr_Load(mode64 ? 8 : 4, dst, am, mode64));
+               add_to_sp(env, addToSp);
+               return;
+
+            }
+            default:
+               /*NOTREACHED*/
+               vassert(0);
+         }
+      }
+
+      /* --------- Load Linked or Store Conditional --------- */
+      case Ist_LLSC: {
+         /* Temporary solution; this need to be rewritten again for MIPS.
+            On MIPS you can not read from address that is locked with LL
+            before SC. If you read from address that is locked than SC will
+            fall. */
+         IRTemp res = stmt->Ist.LLSC.result;
+         IRType tyRes = typeOfIRTemp(env->type_env, res);
+         IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr);
+
+         if (!mode64 && (tyAddr != Ity_I32))
+            goto stmt_fail;
+
+         if (stmt->Ist.LLSC.storedata == NULL) {
+            /* LL */
+            MIPSAMode *r_addr;
+            /* constructs addressing mode from address provided */
+            r_addr = iselWordExpr_AMode(env, stmt->Ist.LLSC.addr, tyAddr);
+
+            HReg r_dst = lookupIRTemp(env, res);
+            if (tyRes == Ity_I32) {
+               addInstr(env, MIPSInstr_LoadL(4, r_dst, r_addr, mode64));
+               return;
+            } else if (tyRes == Ity_I64 && mode64) {
+               addInstr(env, MIPSInstr_LoadL(8, r_dst, r_addr, mode64));
+               return;
+            }
+         } else {
+            /* SC */
+            MIPSAMode *r_addr;
+            r_addr = iselWordExpr_AMode(env, stmt->Ist.LLSC.addr, tyAddr);
+            HReg r_src = iselWordExpr_R(env, stmt->Ist.LLSC.storedata);
+            HReg r_dst = lookupIRTemp(env, res);
+            IRType tyData = typeOfIRExpr(env->type_env,
+                                         stmt->Ist.LLSC.storedata);
+
+            if (tyData == Ity_I32) {
+               addInstr(env, mk_iMOVds_RR(r_dst, r_src));
+               addInstr(env, MIPSInstr_StoreC(4, r_addr, r_dst, mode64));
+               return;
+            } else if (tyData == Ity_I64 && mode64) {
+               addInstr(env, mk_iMOVds_RR(r_dst, r_src));
+               addInstr(env, MIPSInstr_StoreC(8, r_addr, r_dst, mode64));
+               return;
+            }
+         }
+         goto stmt_fail;
+       /* NOTREACHED */}
+
+   case Ist_CAS:
+      if (stmt->Ist.CAS.details->oldHi == IRTemp_INVALID) {
+         IRCAS *cas = stmt->Ist.CAS.details;
+         HReg old   = lookupIRTemp(env, cas->oldLo);
+         HReg addr  = iselWordExpr_R(env, cas->addr);
+         HReg expd  = iselWordExpr_R(env, cas->expdLo);
+         HReg data  = iselWordExpr_R(env, cas->dataLo);
+         if (typeOfIRTemp(env->type_env, cas->oldLo) == Ity_I64) {
+            addInstr(env, MIPSInstr_Cas(8, old, addr, expd, data, mode64));
+         } else if (typeOfIRTemp(env->type_env, cas->oldLo) == Ity_I32) {
+            addInstr(env, MIPSInstr_Cas(4, old, addr, expd, data, mode64));
+         }
+      }
+      return;
+
+   /* --------- INSTR MARK --------- */
+   /* Doesn't generate any executable code ... */
+   case Ist_IMark:
+      return;
+
+   /* --------- ABI HINT --------- */
+   /* These have no meaning (denotation in the IR) and so we ignore
+      them ... if any actually made it this far. */
+   case Ist_AbiHint:
+      return;
+
+   /* --------- NO-OP --------- */
+   /* Fairly self-explanatory, wouldn't you say? */
+   case Ist_NoOp:
+      return;
+
+   /* --------- EXIT --------- */
+   case Ist_Exit: {
+      IRConst* dst = stmt->Ist.Exit.dst;
+      if (!mode64 && dst->tag != Ico_U32)
+         vpanic("iselStmt(mips32): Ist_Exit: dst is not a 32-bit value");
+      if (mode64 && dst->tag != Ico_U64)
+         vpanic("iselStmt(mips64): Ist_Exit: dst is not a 64-bit value");
+
+      MIPSCondCode cc   = iselCondCode(env, stmt->Ist.Exit.guard);
+      MIPSAMode*   amPC = MIPSAMode_IR(stmt->Ist.Exit.offsIP,
+                                      GuestStatePointer(mode64));
+
+      /* Case: boring transfer to known address */
+      if (stmt->Ist.Exit.jk == Ijk_Boring
+          || stmt->Ist.Exit.jk == Ijk_Call
+          /* || stmt->Ist.Exit.jk == Ijk_Ret */) {
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = mode64
+               ? (((Addr64)stmt->Ist.Exit.dst->Ico.U64) > (Addr64)env->max_ga)
+               : (((Addr32)stmt->Ist.Exit.dst->Ico.U32) > (Addr32)env->max_ga);
+            if (0) vex_printf("%s", toFastEP ? "Y" : ",");
+            addInstr(env, MIPSInstr_XDirect(
+                             mode64 ? (Addr64)stmt->Ist.Exit.dst->Ico.U64
+                                    : (Addr64)stmt->Ist.Exit.dst->Ico.U32,
+                             amPC, cc, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, MIPSInstr_XAssisted(r, amPC, cc, Ijk_Boring));
+         }
+         return;
+      }
+
+      /* Case: assisted transfer to arbitrary address */
+      switch (stmt->Ist.Exit.jk) {
+         /* Keep this list in sync with that in iselNext below */
+         case Ijk_ClientReq:
+         case Ijk_EmFail:
+         case Ijk_EmWarn:
+         case Ijk_NoDecode:
+         case Ijk_NoRedir:
+         case Ijk_SigBUS:
+         case Ijk_Yield:
+         case Ijk_SigTRAP:
+         case Ijk_SigFPE_IntDiv:
+         case Ijk_SigFPE_IntOvf:
+         case Ijk_Sys_syscall:
+         case Ijk_InvalICache:
+         {
+            HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, MIPSInstr_XAssisted(r, amPC, cc,
+                                             stmt->Ist.Exit.jk));
+            return;
+         }
+         default:
+            break;
+      }
+
+      /* Do we ever expect to see any other kind? */
+      goto stmt_fail;
+   }
+
+   default:
+      break;
+   }
+
+   stmt_fail:
+      vex_printf("stmt_fail tag: 0x%x\n", stmt->tag);
+      ppIRStmt(stmt);
+      vpanic("iselStmt:\n");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void iselNext ( ISelEnv* env,
+                       IRExpr* next, IRJumpKind jk, Int offsIP )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf( "\n-- PUT(%d) = ", offsIP);
+      ppIRExpr( next );
+      vex_printf( "; exit-");
+      ppIRJumpKind(jk);
+      vex_printf( "\n");
+   }
+
+   /* Case: boring transfer to known address */
+   if (next->tag == Iex_Const) {
+      IRConst* cdst = next->Iex.Const.con;
+      vassert(cdst->tag == (env->mode64 ? Ico_U64 :Ico_U32));
+      if (jk == Ijk_Boring || jk == Ijk_Call) {
+         /* Boring transfer to known address */
+         MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64));
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = env->mode64
+               ? (((Addr64)cdst->Ico.U64) > (Addr64)env->max_ga)
+               : (((Addr32)cdst->Ico.U32) > (Addr32)env->max_ga);
+            if (0) vex_printf("%s", toFastEP ? "X" : ".");
+            addInstr(env, MIPSInstr_XDirect(
+                             env->mode64 ? (Addr64)cdst->Ico.U64
+                                         : (Addr64)cdst->Ico.U32,
+                             amPC, MIPScc_AL, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselWordExpr_R(env, next);
+            addInstr(env, MIPSInstr_XAssisted(r, amPC, MIPScc_AL,
+                                              Ijk_Boring));
+         }
+         return;
+      }
+   }
+
+   /* Case: call/return (==boring) transfer to any address */
+   switch (jk) {
+      case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
+         HReg       r     = iselWordExpr_R(env, next);
+         MIPSAMode*  amPC = MIPSAMode_IR(offsIP,
+                                         GuestStatePointer(env->mode64));
+         if (env->chainingAllowed) {
+            addInstr(env, MIPSInstr_XIndir(r, amPC, MIPScc_AL));
+         } else {
+            addInstr(env, MIPSInstr_XAssisted(r, amPC, MIPScc_AL,
+                                              Ijk_Boring));
+         }
+         return;
+      }
+      default:
+         break;
+   }
+
+   /* Case: assisted transfer to arbitrary address */
+   switch (jk) {
+      /* Keep this list in sync with that for Ist_Exit above */
+      case Ijk_ClientReq:
+      case Ijk_EmFail:
+      case Ijk_EmWarn:
+      case Ijk_NoDecode:
+      case Ijk_NoRedir:
+      case Ijk_SigBUS:
+      case Ijk_SigILL:
+      case Ijk_SigTRAP:
+      case Ijk_SigFPE_IntDiv:
+      case Ijk_SigFPE_IntOvf:
+      case Ijk_Sys_syscall:
+      case Ijk_InvalICache: {
+         HReg      r     = iselWordExpr_R(env, next);
+         MIPSAMode* amPC = MIPSAMode_IR(offsIP, GuestStatePointer(env->mode64));
+         addInstr(env, MIPSInstr_XAssisted(r, amPC, MIPScc_AL, jk));
+         return;
+      }
+      default:
+         break;
+   }
+
+   vex_printf("\n-- PUT(%d) = ", offsIP);
+   ppIRExpr(next );
+   vex_printf("; exit-");
+   ppIRJumpKind(jk);
+   vex_printf("\n");
+   vassert(0);  /* are we expecting any other kind? */
+}
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire BB to mips code. */
+HInstrArray *iselSB_MIPS ( const IRSB* bb,
+                           VexArch arch_host,
+                           const VexArchInfo* archinfo_host,
+                           const VexAbiInfo* vbi,
+                           Int offs_Host_EvC_Counter,
+                           Int offs_Host_EvC_FailAddr,
+                           Bool chainingAllowed,
+                           Bool addProfInc,
+                           Addr max_ga )
+{
+   Int      i, j;
+   HReg     hreg, hregHI;
+   ISelEnv* env;
+   UInt     hwcaps_host = archinfo_host->hwcaps;
+   MIPSAMode *amCounter, *amFailAddr;
+
+   /* sanity ... */
+   vassert(arch_host == VexArchMIPS32 || arch_host == VexArchMIPS64);
+   vassert(VEX_PRID_COMP_MIPS == hwcaps_host
+           || VEX_PRID_COMP_BROADCOM == hwcaps_host
+           || VEX_PRID_COMP_NETLOGIC);
+
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE
+           || archinfo_host->endness == VexEndnessBE);
+
+   mode64 = arch_host != VexArchMIPS32;
+#if (__mips_fpr==64)
+   fp_mode64 = ((VEX_MIPS_REV(hwcaps_host) == VEX_PRID_CPU_32FPR)
+                || arch_host == VexArchMIPS64);
+#endif
+
+   /* Make up an initial environment to use. */
+   env = LibVEX_Alloc_inline(sizeof(ISelEnv));
+   env->vreg_ctr = 0;
+   env->mode64 = mode64;
+   env->fp_mode64 = fp_mode64;
+
+   /* Set up output code array. */
+   env->code = newHInstrArray();
+
+   /* Copy BB's type env. */
+   env->type_env = bb->tyenv;
+
+   /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+      change as we go along. */
+   env->n_vregmap = bb->tyenv->types_used;
+   env->vregmap = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+
+   /* and finally ... */
+   env->hwcaps          = hwcaps_host;
+   env->chainingAllowed = chainingAllowed;
+   env->hwcaps          = hwcaps_host;
+   env->max_ga          = max_ga;
+
+   /* For each IR temporary, allocate a suitably-kinded virtual
+      register. */
+   j = 0;
+   for (i = 0; i < env->n_vregmap; i++) {
+      hregHI = hreg = INVALID_HREG;
+      switch (bb->tyenv->types[i]) {
+         case Ity_I1:
+         case Ity_I8:
+         case Ity_I16:
+         case Ity_I32:
+            if (mode64) {
+               hreg = mkHReg(True, HRcInt64, 0, j++);
+               break;
+            } else {
+               hreg = mkHReg(True, HRcInt32, 0, j++);
+               break;
+            }
+         case Ity_I64:
+            if (mode64) {
+               hreg = mkHReg(True, HRcInt64, 0, j++);
+               break;
+            } else {
+               hreg   = mkHReg(True, HRcInt32, 0, j++);
+               hregHI = mkHReg(True, HRcInt32, 0, j++);
+               break;
+            }
+         case Ity_I128:
+            vassert(mode64);
+            hreg   = mkHReg(True, HRcInt64, 0, j++);
+            hregHI = mkHReg(True, HRcInt64, 0, j++);
+            break;
+         case Ity_F32:
+            if (mode64) {
+               hreg = mkHReg(True, HRcFlt64, 0, j++);
+               break;
+            } else {
+               hreg = mkHReg(True, HRcFlt32, 0, j++);
+               break;
+            }
+         case Ity_F64:
+            hreg = mkHReg(True, HRcFlt64, 0, j++);
+            break;
+         default:
+            ppIRType(bb->tyenv->types[i]);
+            vpanic("iselBB(mips): IRTemp type");
+            break;
+      }
+      env->vregmap[i] = hreg;
+      env->vregmapHI[i] = hregHI;
+   }
+   env->vreg_ctr = j;
+
+   /* The very first instruction must be an event check. */
+   amCounter = MIPSAMode_IR(offs_Host_EvC_Counter, GuestStatePointer(mode64));
+   amFailAddr = MIPSAMode_IR(offs_Host_EvC_FailAddr, GuestStatePointer(mode64));
+   addInstr(env, MIPSInstr_EvCheck(amCounter, amFailAddr));
+
+   /* Possibly a block counter increment (for profiling).  At this
+      point we don't know the address of the counter, so just pretend
+      it is zero.  It will have to be patched later, but before this
+      translation is used, by a call to LibVEX_patchProfCtr. */
+   if (addProfInc) {
+      addInstr(env, MIPSInstr_ProfInc());
+   }
+
+   /* Ok, finally we can iterate over the statements. */
+   for (i = 0; i < bb->stmts_used; i++)
+      iselStmt(env, bb->stmts[i]);
+
+   iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
+
+   /* record the number of vregs we used. */
+   env->code->n_vregs = env->vreg_ctr;
+   return env->code;
+
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                    host_mips_isel.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_ppc_defs.c b/VEX/priv/host_ppc_defs.c
new file mode 100644
index 0000000..e9de08b
--- /dev/null
+++ b/VEX/priv/host_ppc_defs.c
@@ -0,0 +1,6020 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_ppc_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_ppc_defs.h"
+
+
+/* --------- Registers. --------- */
+
+const RRegUniverse* getRRegUniverse_PPC ( Bool mode64 )
+{
+   /* The real-register universe is a big constant, so we just want to
+      initialise it once.  rRegUniverse_PPC_initted values: 0=not initted,
+      1=initted for 32-bit-mode, 2=initted for 64-bit-mode */
+   static RRegUniverse rRegUniverse_PPC;
+   static UInt         rRegUniverse_PPC_initted = 0;
+
+   /* Handy shorthand, nothing more */
+   RRegUniverse* ru = &rRegUniverse_PPC;
+
+   /* This isn't thread-safe.  Sigh. */
+   UInt howNeeded = mode64 ? 2 : 1;
+   if (LIKELY(rRegUniverse_PPC_initted == howNeeded))
+      return ru;
+
+   RRegUniverse__init(ru);
+
+   /* Add the registers.  The initial segment of this array must be
+      those available for allocation by reg-alloc, and those that
+      follow are not available for allocation. */
+   // GPR0 = scratch reg where poss. - some ops interpret as value zero
+   // GPR1 = stack pointer
+   // GPR2 = TOC pointer
+   ru->regs[ru->size++] = hregPPC_GPR3(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR4(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR5(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR6(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR7(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR8(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR9(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR10(mode64);
+   if (!mode64) {
+      /* in mode64: 
+         r11 used for calls by ptr / env ptr for some langs
+         r12 used for exception handling and global linkage code */
+      ru->regs[ru->size++] = hregPPC_GPR11(mode64);
+      ru->regs[ru->size++] = hregPPC_GPR12(mode64);
+   }
+   // GPR13 = thread specific pointer
+   // GPR14 and above are callee save.  Yay.
+   ru->regs[ru->size++] = hregPPC_GPR14(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR15(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR16(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR17(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR18(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR19(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR20(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR21(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR22(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR23(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR24(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR25(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR26(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR27(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR28(mode64);
+   // GPR29 is reserved for the dispatcher
+   // GPR30 is reserved as AltiVec spill reg temporary
+   // GPR31 is reserved for the GuestStatePtr
+
+   /* Don't waste the reg-allocs's time trawling through zillions of
+      FP registers - they mostly will never be used.  We'll tolerate
+      the occasional extra spill instead. */
+   /* For both ppc32-linux and ppc64-linux, f14-f31 are callee save.
+      So use them. */
+   ru->regs[ru->size++] = hregPPC_FPR14(mode64);
+   ru->regs[ru->size++] = hregPPC_FPR15(mode64);
+   ru->regs[ru->size++] = hregPPC_FPR16(mode64);
+   ru->regs[ru->size++] = hregPPC_FPR17(mode64);
+   ru->regs[ru->size++] = hregPPC_FPR18(mode64);
+   ru->regs[ru->size++] = hregPPC_FPR19(mode64);
+   ru->regs[ru->size++] = hregPPC_FPR20(mode64);
+   ru->regs[ru->size++] = hregPPC_FPR21(mode64);
+
+   /* Same deal re Altivec */
+   /* For both ppc32-linux and ppc64-linux, v20-v31 are callee save.
+      So use them. */
+   /* NB, vr29 is used as a scratch temporary -- do not allocate */
+   ru->regs[ru->size++] = hregPPC_VR20(mode64);
+   ru->regs[ru->size++] = hregPPC_VR21(mode64);
+   ru->regs[ru->size++] = hregPPC_VR22(mode64);
+   ru->regs[ru->size++] = hregPPC_VR23(mode64);
+   ru->regs[ru->size++] = hregPPC_VR24(mode64);
+   ru->regs[ru->size++] = hregPPC_VR25(mode64);
+   ru->regs[ru->size++] = hregPPC_VR26(mode64);
+   ru->regs[ru->size++] = hregPPC_VR27(mode64);
+   ru->allocable = ru->size;
+
+   /* And other regs, not available to the allocator. */
+   ru->regs[ru->size++] = hregPPC_GPR1(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR29(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR30(mode64);
+   ru->regs[ru->size++] = hregPPC_GPR31(mode64);
+   ru->regs[ru->size++] = hregPPC_VR29(mode64);
+
+   rRegUniverse_PPC_initted = howNeeded;
+
+   RRegUniverse__check_is_sane(ru);
+   return ru;
+}
+
+
+void ppHRegPPC ( HReg reg ) 
+{
+   Int r;
+   static const HChar* ireg32_names[32] 
+      = { "%r0",  "%r1",  "%r2",  "%r3",
+          "%r4",  "%r5",  "%r6",  "%r7",
+          "%r8",  "%r9",  "%r10", "%r11",
+          "%r12", "%r13", "%r14", "%r15",
+          "%r16", "%r17", "%r18", "%r19",
+          "%r20", "%r21", "%r22", "%r23",
+          "%r24", "%r25", "%r26", "%r27",
+          "%r28", "%r29", "%r30", "%r31" };
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      ppHReg(reg);
+      return;
+   }
+   /* But specific for real regs. */
+   switch (hregClass(reg)) {
+   case HRcInt64:
+      r = hregEncoding(reg);
+      vassert(r >= 0 && r < 32);
+      vex_printf("%s", ireg32_names[r]);
+      return;
+   case HRcInt32:
+      r = hregEncoding(reg);
+      vassert(r >= 0 && r < 32);
+      vex_printf("%s", ireg32_names[r]);
+      return;
+   case HRcFlt64:
+      r = hregEncoding(reg);
+      vassert(r >= 0 && r < 32);
+      vex_printf("%%fr%d", r);
+      return;
+   case HRcVec128:
+      r = hregEncoding(reg);
+      vassert(r >= 0 && r < 32);
+      vex_printf("%%v%d", r);
+      return;
+   default:
+      vpanic("ppHRegPPC");
+   }
+}
+
+
+/* --------- Condition codes, Intel encoding. --------- */
+
+const HChar* showPPCCondCode ( PPCCondCode cond )
+{
+   if (cond.test == Pct_ALWAYS) return "always";
+
+   switch (cond.flag) {
+   case Pcf_7SO:
+      return (cond.test == Pct_TRUE) ? "cr7.so=1" : "cr7.so=0";
+   case Pcf_7EQ:
+      return (cond.test == Pct_TRUE) ? "cr7.eq=1" : "cr7.eq=0";
+   case Pcf_7GT:
+      return (cond.test == Pct_TRUE) ? "cr7.gt=1" : "cr7.gt=0";
+   case Pcf_7LT:
+      return (cond.test == Pct_TRUE) ? "cr7.lt=1" : "cr7.lt=0";
+   case Pcf_NONE:
+      return "no-flag";
+   default: vpanic("ppPPCCondCode");
+   }
+}
+
+/* construct condition code */
+PPCCondCode mk_PPCCondCode ( PPCCondTest test, PPCCondFlag flag )
+{
+   PPCCondCode cc;
+   cc.flag = flag;
+   cc.test = test;
+   if (test == Pct_ALWAYS) { 
+      vassert(flag == Pcf_NONE);
+   } else {
+      vassert(flag != Pcf_NONE);
+   }
+   return cc;
+}
+
+/* false->true, true->false */
+PPCCondTest invertCondTest ( PPCCondTest ct )
+{
+   vassert(ct != Pct_ALWAYS);
+   return (ct == Pct_TRUE) ? Pct_FALSE : Pct_TRUE;
+}
+
+
+/* --------- PPCAMode: memory address expressions. --------- */
+
+PPCAMode* PPCAMode_IR ( Int idx, HReg base ) {
+   PPCAMode* am = LibVEX_Alloc_inline(sizeof(PPCAMode));
+   vassert(idx >= -0x8000 && idx < 0x8000);
+   am->tag = Pam_IR;
+   am->Pam.IR.base = base;
+   am->Pam.IR.index = idx;
+   return am;
+}
+PPCAMode* PPCAMode_RR ( HReg idx, HReg base ) {
+   PPCAMode* am = LibVEX_Alloc_inline(sizeof(PPCAMode));
+   am->tag = Pam_RR;
+   am->Pam.RR.base = base;
+   am->Pam.RR.index = idx;
+   return am;
+}
+
+PPCAMode* dopyPPCAMode ( PPCAMode* am ) {
+   switch (am->tag) {
+   case Pam_IR: 
+      return PPCAMode_IR( am->Pam.IR.index, am->Pam.IR.base );
+   case Pam_RR: 
+      return PPCAMode_RR( am->Pam.RR.index, am->Pam.RR.base );
+   default:
+      vpanic("dopyPPCAMode");
+   }
+}
+
+void ppPPCAMode ( PPCAMode* am ) {
+   switch (am->tag) {
+   case Pam_IR: 
+      if (am->Pam.IR.index == 0)
+         vex_printf("0(");
+      else
+         vex_printf("%d(", (Int)am->Pam.IR.index);
+      ppHRegPPC(am->Pam.IR.base);
+      vex_printf(")");
+      return;
+   case Pam_RR:
+      ppHRegPPC(am->Pam.RR.base);
+      vex_printf(",");
+      ppHRegPPC(am->Pam.RR.index);
+      return;
+   default:
+      vpanic("ppPPCAMode");
+   }
+}
+
+static void addRegUsage_PPCAMode ( HRegUsage* u, PPCAMode* am ) {
+   switch (am->tag) {
+   case Pam_IR: 
+      addHRegUse(u, HRmRead, am->Pam.IR.base);
+      return;
+   case Pam_RR:
+      addHRegUse(u, HRmRead, am->Pam.RR.base);
+      addHRegUse(u, HRmRead, am->Pam.RR.index);
+      return;
+   default:
+      vpanic("addRegUsage_PPCAMode");
+   }
+}
+
+static void mapRegs_PPCAMode ( HRegRemap* m, PPCAMode* am ) {
+   switch (am->tag) {
+   case Pam_IR: 
+      am->Pam.IR.base = lookupHRegRemap(m, am->Pam.IR.base);
+      return;
+   case Pam_RR:
+      am->Pam.RR.base = lookupHRegRemap(m, am->Pam.RR.base);
+      am->Pam.RR.index = lookupHRegRemap(m, am->Pam.RR.index);
+      return;
+   default:
+      vpanic("mapRegs_PPCAMode");
+   }
+}
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+
+PPCRH* PPCRH_Imm ( Bool syned, UShort imm16 ) {
+   PPCRH* op         = LibVEX_Alloc_inline(sizeof(PPCRH));
+   op->tag           = Prh_Imm;
+   op->Prh.Imm.syned = syned;
+   op->Prh.Imm.imm16 = imm16;
+   /* If this is a signed value, ensure it's not -32768, so that we
+      are guaranteed always to be able to negate if needed. */
+   if (syned)
+      vassert(imm16 != 0x8000);
+   vassert(syned == True || syned == False);
+   return op;
+}
+PPCRH* PPCRH_Reg ( HReg reg ) {
+   PPCRH* op       = LibVEX_Alloc_inline(sizeof(PPCRH));
+   op->tag         = Prh_Reg;
+   op->Prh.Reg.reg = reg;
+   return op;
+}
+
+void ppPPCRH ( PPCRH* op ) {
+   switch (op->tag) {
+   case Prh_Imm: 
+      if (op->Prh.Imm.syned)
+         vex_printf("%d", (Int)(Short)op->Prh.Imm.imm16);
+      else
+         vex_printf("%u", (UInt)(UShort)op->Prh.Imm.imm16);
+      return;
+   case Prh_Reg: 
+      ppHRegPPC(op->Prh.Reg.reg);
+      return;
+   default: 
+      vpanic("ppPPCRH");
+   }
+}
+
+/* An PPCRH can only be used in a "read" context (what would it mean
+   to write or modify a literal?) and so we enumerate its registers
+   accordingly. */
+static void addRegUsage_PPCRH ( HRegUsage* u, PPCRH* op ) {
+   switch (op->tag) {
+   case Prh_Imm: 
+      return;
+   case Prh_Reg: 
+      addHRegUse(u, HRmRead, op->Prh.Reg.reg);
+      return;
+   default: 
+      vpanic("addRegUsage_PPCRH");
+   }
+}
+
+static void mapRegs_PPCRH ( HRegRemap* m, PPCRH* op ) {
+   switch (op->tag) {
+   case Prh_Imm: 
+      return;
+   case Prh_Reg: 
+      op->Prh.Reg.reg = lookupHRegRemap(m, op->Prh.Reg.reg);
+      return;
+   default: 
+      vpanic("mapRegs_PPCRH");
+   }
+}
+
+
+/* --------- Operand, which can be a reg or a u32/64. --------- */
+
+PPCRI* PPCRI_Imm ( ULong imm64 ) {
+   PPCRI* op   = LibVEX_Alloc_inline(sizeof(PPCRI));
+   op->tag     = Pri_Imm;
+   op->Pri.Imm = imm64;
+   return op;
+}
+PPCRI* PPCRI_Reg ( HReg reg ) {
+   PPCRI* op   = LibVEX_Alloc_inline(sizeof(PPCRI));
+   op->tag     = Pri_Reg;
+   op->Pri.Reg = reg;
+   return op;
+}
+
+void ppPPCRI ( PPCRI* dst ) {
+   switch (dst->tag) {
+      case Pri_Imm: 
+         vex_printf("0x%llx", dst->Pri.Imm);
+         break;
+      case Pri_Reg: 
+         ppHRegPPC(dst->Pri.Reg);
+         break;
+      default: 
+         vpanic("ppPPCRI");
+   }
+}
+
+/* An PPCRI can only be used in a "read" context (what would it
+   mean to write or modify a literal?) and so we enumerate its
+   registers accordingly. */
+static void addRegUsage_PPCRI ( HRegUsage* u, PPCRI* dst ) {
+   switch (dst->tag) {
+      case Pri_Imm: 
+         return;
+      case Pri_Reg: 
+         addHRegUse(u, HRmRead, dst->Pri.Reg);
+         return;
+      default: 
+         vpanic("addRegUsage_PPCRI");
+   }
+}
+
+static void mapRegs_PPCRI ( HRegRemap* m, PPCRI* dst ) {
+   switch (dst->tag) {
+      case Pri_Imm: 
+         return;
+      case Pri_Reg: 
+         dst->Pri.Reg = lookupHRegRemap(m, dst->Pri.Reg);
+         return;
+      default: 
+         vpanic("mapRegs_PPCRI");
+   }
+}
+
+
+/* --------- Operand, which can be a vector reg or a simm5. --------- */
+
+PPCVI5s* PPCVI5s_Imm ( Char simm5 ) {
+   PPCVI5s* op   = LibVEX_Alloc_inline(sizeof(PPCVI5s));
+   op->tag       = Pvi_Imm;
+   op->Pvi.Imm5s = simm5;
+   vassert(simm5 >= -16 && simm5 <= 15);
+   return op;
+}
+PPCVI5s* PPCVI5s_Reg ( HReg reg ) {
+   PPCVI5s* op = LibVEX_Alloc_inline(sizeof(PPCVI5s));
+   op->tag     = Pvi_Reg;
+   op->Pvi.Reg = reg;
+   vassert(hregClass(reg) == HRcVec128);
+   return op;
+}
+
+void ppPPCVI5s ( PPCVI5s* src ) {
+   switch (src->tag) {
+      case Pvi_Imm: 
+         vex_printf("%d", (Int)src->Pvi.Imm5s);
+         break;
+      case Pvi_Reg: 
+         ppHRegPPC(src->Pvi.Reg);
+         break;
+      default: 
+         vpanic("ppPPCVI5s");
+   }
+}
+
+/* An PPCVI5s can only be used in a "read" context (what would it
+   mean to write or modify a literal?) and so we enumerate its
+   registers accordingly. */
+static void addRegUsage_PPCVI5s ( HRegUsage* u, PPCVI5s* dst ) {
+   switch (dst->tag) {
+      case Pvi_Imm: 
+         return;
+      case Pvi_Reg: 
+         addHRegUse(u, HRmRead, dst->Pvi.Reg);
+         return;
+      default: 
+         vpanic("addRegUsage_PPCVI5s");
+   }
+}
+
+static void mapRegs_PPCVI5s ( HRegRemap* m, PPCVI5s* dst ) {
+   switch (dst->tag) {
+      case Pvi_Imm: 
+         return;
+      case Pvi_Reg: 
+         dst->Pvi.Reg = lookupHRegRemap(m, dst->Pvi.Reg);
+         return;
+      default: 
+         vpanic("mapRegs_PPCVI5s");
+   }
+}
+
+
+/* --------- Instructions. --------- */
+
+const HChar* showPPCUnaryOp ( PPCUnaryOp op ) {
+   switch (op) {
+   case Pun_NOT:   return "not";
+   case Pun_NEG:   return "neg";
+   case Pun_CLZ32: return "cntlzw";
+   case Pun_CLZ64: return "cntlzd";
+   case Pun_EXTSW: return "extsw";
+   default: vpanic("showPPCUnaryOp");
+   }
+}
+
+const HChar* showPPCAluOp ( PPCAluOp op, Bool immR ) {
+   switch (op) {
+      case Palu_ADD: return immR ? "addi"  : "add";
+      case Palu_SUB: return immR ? "subi"  : "sub";
+      case Palu_AND: return immR ? "andi." : "and";
+      case Palu_OR:  return immR ? "ori"   : "or";
+      case Palu_XOR: return immR ? "xori"  : "xor";
+      default: vpanic("showPPCAluOp");
+   }
+}
+
+const HChar* showPPCShftOp ( PPCShftOp op, Bool immR, Bool sz32 ) {
+   switch (op) {
+      case Pshft_SHL: return sz32 ? (immR ? "slwi"  : "slw") : 
+                                    (immR ? "sldi"  : "sld");
+      case Pshft_SHR: return sz32 ? (immR ? "srwi"  : "srw") :
+                                    (immR ? "srdi"  : "srd");
+      case Pshft_SAR: return sz32 ? (immR ? "srawi" : "sraw") :
+                                    (immR ? "sradi" : "srad");
+      default: vpanic("showPPCShftOp");
+   }
+}
+
+const HChar* showPPCFpOp ( PPCFpOp op ) {
+   switch (op) {
+      case Pfp_ADDD:   return "fadd";
+      case Pfp_SUBD:   return "fsub";
+      case Pfp_MULD:   return "fmul";
+      case Pfp_DIVD:   return "fdiv";
+      case Pfp_MADDD:  return "fmadd";
+      case Pfp_MSUBD:  return "fmsub";
+      case Pfp_MADDS:  return "fmadds";
+      case Pfp_MSUBS:  return "fmsubs";
+      case Pfp_ADDS:   return "fadds";
+      case Pfp_SUBS:   return "fsubs";
+      case Pfp_MULS:   return "fmuls";
+      case Pfp_DIVS:   return "fdivs";
+      case Pfp_SQRT:   return "fsqrt";
+      case Pfp_ABS:    return "fabs";
+      case Pfp_NEG:    return "fneg";
+      case Pfp_MOV:    return "fmr";
+      case Pfp_RES:    return "fres";
+      case Pfp_RSQRTE: return "frsqrte";
+      case Pfp_FRIM:   return "frim";
+      case Pfp_FRIN:   return "frin";
+      case Pfp_FRIP:   return "frip";
+      case Pfp_FRIZ:   return "friz";
+      case Pfp_DFPADD:     return "dadd";
+      case Pfp_DFPADDQ:    return "daddq";
+      case Pfp_DFPSUB:     return "dsub";
+      case Pfp_DFPSUBQ:    return "dsubq";
+      case Pfp_DFPMUL:     return "dmul";
+      case Pfp_DFPMULQ:    return "dmulq";
+      case Pfp_DFPDIV:     return "ddivd";
+      case Pfp_DFPDIVQ:    return "ddivq";
+      case Pfp_DCTDP:      return "dctdp";
+      case Pfp_DRSP:       return "drsp";
+      case Pfp_DCTFIX:     return "dctfix";
+      case Pfp_DCFFIX:     return "dcffix";
+      case Pfp_DCTQPQ:     return "dctqpq";
+      case Pfp_DCFFIXQ:    return "dcffixq";
+      case Pfp_DQUA:       return "dqua";
+      case Pfp_DQUAQ:      return "dquaq";
+      case Pfp_DXEX:       return "dxex";
+      case Pfp_DXEXQ:      return "dxexq";
+      case Pfp_DIEX:       return "diex";
+      case Pfp_DIEXQ:      return "diexq";
+      case Pfp_RRDTR:      return "rrdtr";
+      default: vpanic("showPPCFpOp");
+   }
+}
+
+const HChar* showPPCAvOp ( PPCAvOp op ) {
+   switch (op) {
+
+   /* Unary */
+   case Pav_MOV:       return "vmr";      /* Mov */
+     
+   case Pav_AND:       return "vand";     /* Bitwise */
+   case Pav_OR:        return "vor";
+   case Pav_XOR:       return "vxor";
+   case Pav_NOT:       return "vnot";
+
+   case Pav_UNPCKH8S:  return "vupkhsb";  /* Unpack */
+   case Pav_UNPCKH16S: return "vupkhsh";
+   case Pav_UNPCKL8S:  return "vupklsb";
+   case Pav_UNPCKL16S: return "vupklsh";
+   case Pav_UNPCKHPIX: return "vupkhpx";
+   case Pav_UNPCKLPIX: return "vupklpx";
+
+   /* Integer binary */
+   case Pav_ADDU:      return "vaddu_m";  // b,h,w,dw
+   case Pav_QADDU:     return "vaddu_s";  // b,h,w,dw
+   case Pav_QADDS:     return "vadds_s";  // b,h,w,dw
+     
+   case Pav_SUBU:      return "vsubu_m";  // b,h,w,dw
+   case Pav_QSUBU:     return "vsubu_s";  // b,h,w,dw
+   case Pav_QSUBS:     return "vsubs_s";  // b,h,w,dw
+     
+   case Pav_MULU:      return "vmulu";    // w
+   case Pav_OMULU:     return "vmulou";   // b,h,w
+   case Pav_OMULS:     return "vmulos";   // b,h,w
+   case Pav_EMULU:     return "vmuleu";   // b,h,w
+   case Pav_EMULS:     return "vmules";   // b,h,w
+  
+   case Pav_AVGU:      return "vavgu";    // b,h,w
+   case Pav_AVGS:      return "vavgs";    // b,h,w
+     
+   case Pav_MAXU:      return "vmaxu";    // b,h,w
+   case Pav_MAXS:      return "vmaxs";    // b,h,w
+     
+   case Pav_MINU:      return "vminu";    // b,h,w
+   case Pav_MINS:      return "vmins";    // b,h,w
+     
+   /* Compare (always affects CR field 6) */
+   case Pav_CMPEQU:    return "vcmpequ";  // b,h,w
+   case Pav_CMPGTU:    return "vcmpgtu";  // b,h,w
+   case Pav_CMPGTS:    return "vcmpgts";  // b,h,w
+
+   /* Shift */
+   case Pav_SHL:       return "vsl";      // ' ',b,h,w,dw
+   case Pav_SHR:       return "vsr";      // ' ',b,h,w,dw
+   case Pav_SAR:       return "vsra";     // b,h,w,dw
+   case Pav_ROTL:      return "vrl";      // b,h,w,dw
+
+   /* Pack */
+   case Pav_PACKUU:    return "vpku_um";  // h,w,dw
+   case Pav_QPACKUU:   return "vpku_us";  // h,w
+   case Pav_QPACKSU:   return "vpks_us";  // h,w
+   case Pav_QPACKSS:   return "vpks_ss";  // h,w
+   case Pav_PACKPXL:   return "vpkpx";
+
+   /* Merge */
+   case Pav_MRGHI:     return "vmrgh";    // b,h,w
+   case Pav_MRGLO:     return "vmrgl";    // b,h,w
+
+   /* Concatenation */
+   case Pav_CATODD:     return "vmrgow";    // w
+   case Pav_CATEVEN:    return "vmrgew";    // w
+
+   /* SHA */
+   case Pav_SHA256:     return "vshasigmaw"; // w
+   case Pav_SHA512:     return "vshasigmaw"; // dw
+
+   /* BCD */
+   case Pav_BCDAdd:     return "bcdadd.";  // qw
+   case Pav_BCDSub:     return "bcdsub.";  // qw
+
+   /* Polynomial arith */
+   case Pav_POLYMULADD: return "vpmsum";   // b, h, w, d
+
+   /* Cipher */
+   case Pav_CIPHERV128:  case Pav_CIPHERLV128:
+   case Pav_NCIPHERV128: case Pav_NCIPHERLV128:
+   case Pav_CIPHERSUBV128: return "v_cipher_";  // qw
+
+   /* zero count */
+   case Pav_ZEROCNTBYTE: case Pav_ZEROCNTWORD:
+   case Pav_ZEROCNTHALF: case Pav_ZEROCNTDBL:
+      return "vclz_";                           // b, h, w, d
+
+   /* vector gather (byte-by-byte bit matrix transpose) */
+   case Pav_BITMTXXPOSE:
+      return "vgbbd";
+
+   default: vpanic("showPPCAvOp");
+   }
+}
+
+const HChar* showPPCAvFpOp ( PPCAvFpOp op ) {
+   switch (op) {
+   /* Floating Point Binary */
+   case Pavfp_ADDF:      return "vaddfp";
+   case Pavfp_SUBF:      return "vsubfp";
+   case Pavfp_MULF:      return "vmaddfp";
+   case Pavfp_MAXF:      return "vmaxfp";
+   case Pavfp_MINF:      return "vminfp";
+   case Pavfp_CMPEQF:    return "vcmpeqfp";
+   case Pavfp_CMPGTF:    return "vcmpgtfp";
+   case Pavfp_CMPGEF:    return "vcmpgefp";
+     
+   /* Floating Point Unary */
+   case Pavfp_RCPF:      return "vrefp";
+   case Pavfp_RSQRTF:    return "vrsqrtefp";
+   case Pavfp_CVTU2F:    return "vcfux";
+   case Pavfp_CVTS2F:    return "vcfsx";
+   case Pavfp_QCVTF2U:   return "vctuxs";
+   case Pavfp_QCVTF2S:   return "vctsxs";
+   case Pavfp_ROUNDM:    return "vrfim";
+   case Pavfp_ROUNDP:    return "vrfip";
+   case Pavfp_ROUNDN:    return "vrfin";
+   case Pavfp_ROUNDZ:    return "vrfiz";
+
+   default: vpanic("showPPCAvFpOp");
+   }
+}
+
+PPCInstr* PPCInstr_LI ( HReg dst, ULong imm64, Bool mode64 )
+{
+   PPCInstr* i     = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag          = Pin_LI;
+   i->Pin.LI.dst   = dst;
+   i->Pin.LI.imm64 = imm64;
+   if (!mode64)
+      vassert( (Long)imm64 == (Long)(Int)(UInt)imm64 );
+   return i;
+}
+PPCInstr* PPCInstr_Alu ( PPCAluOp op, HReg dst, 
+                         HReg srcL, PPCRH* srcR ) {
+   PPCInstr* i     = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag          = Pin_Alu;
+   i->Pin.Alu.op   = op;
+   i->Pin.Alu.dst  = dst;
+   i->Pin.Alu.srcL = srcL;
+   i->Pin.Alu.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_Shft ( PPCShftOp op, Bool sz32, 
+                          HReg dst, HReg srcL, PPCRH* srcR ) {
+   PPCInstr* i      = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag           = Pin_Shft;
+   i->Pin.Shft.op   = op;
+   i->Pin.Shft.sz32 = sz32;
+   i->Pin.Shft.dst  = dst;
+   i->Pin.Shft.srcL = srcL;
+   i->Pin.Shft.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AddSubC ( Bool isAdd, Bool setC,
+                             HReg dst, HReg srcL, HReg srcR ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_AddSubC;
+   i->Pin.AddSubC.isAdd = isAdd;
+   i->Pin.AddSubC.setC  = setC;
+   i->Pin.AddSubC.dst   = dst;
+   i->Pin.AddSubC.srcL  = srcL;
+   i->Pin.AddSubC.srcR  = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_Cmp ( Bool syned, Bool sz32, 
+                         UInt crfD, HReg srcL, PPCRH* srcR ) {
+   PPCInstr* i      = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag           = Pin_Cmp;
+   i->Pin.Cmp.syned = syned;
+   i->Pin.Cmp.sz32  = sz32;
+   i->Pin.Cmp.crfD  = crfD;
+   i->Pin.Cmp.srcL  = srcL;
+   i->Pin.Cmp.srcR  = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_Unary ( PPCUnaryOp op, HReg dst, HReg src ) {
+   PPCInstr* i      = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag           = Pin_Unary;
+   i->Pin.Unary.op  = op;
+   i->Pin.Unary.dst = dst;
+   i->Pin.Unary.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_MulL ( Bool syned, Bool hi, Bool sz32, 
+                          HReg dst, HReg srcL, HReg srcR ) {
+   PPCInstr* i       = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag            = Pin_MulL;
+   i->Pin.MulL.syned = syned;
+   i->Pin.MulL.hi    = hi;
+   i->Pin.MulL.sz32  = sz32;
+   i->Pin.MulL.dst   = dst;
+   i->Pin.MulL.srcL  = srcL;
+   i->Pin.MulL.srcR  = srcR;
+   /* if doing the low word, the signedness is irrelevant, but tie it
+      down anyway. */
+   if (!hi) vassert(!syned);
+   return i;
+}
+PPCInstr* PPCInstr_Div ( Bool extended, Bool syned, Bool sz32,
+                         HReg dst, HReg srcL, HReg srcR ) {
+   PPCInstr* i      = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag           = Pin_Div;
+   i->Pin.Div.extended = extended;
+   i->Pin.Div.syned = syned;
+   i->Pin.Div.sz32  = sz32;
+   i->Pin.Div.dst   = dst;
+   i->Pin.Div.srcL  = srcL;
+   i->Pin.Div.srcR  = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_Call ( PPCCondCode cond, 
+                          Addr64 target, UInt argiregs, RetLoc rloc ) {
+   UInt mask;
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_Call;
+   i->Pin.Call.cond     = cond;
+   i->Pin.Call.target   = target;
+   i->Pin.Call.argiregs = argiregs;
+   i->Pin.Call.rloc     = rloc;
+   /* Only r3 .. r10 inclusive may be used as arg regs. Hence: */
+   mask = (1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)|(1<<10);
+   vassert(0 == (argiregs & ~mask));
+   vassert(is_sane_RetLoc(rloc));
+   return i;
+}
+PPCInstr* PPCInstr_XDirect ( Addr64 dstGA, PPCAMode* amCIA,
+                             PPCCondCode cond, Bool toFastEP ) {
+   PPCInstr* i             = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                  = Pin_XDirect;
+   i->Pin.XDirect.dstGA    = dstGA;
+   i->Pin.XDirect.amCIA    = amCIA;
+   i->Pin.XDirect.cond     = cond;
+   i->Pin.XDirect.toFastEP = toFastEP;
+   return i;
+}
+PPCInstr* PPCInstr_XIndir ( HReg dstGA, PPCAMode* amCIA,
+                            PPCCondCode cond ) {
+   PPCInstr* i         = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag              = Pin_XIndir;
+   i->Pin.XIndir.dstGA = dstGA;
+   i->Pin.XIndir.amCIA = amCIA;
+   i->Pin.XIndir.cond  = cond;
+   return i;
+}
+PPCInstr* PPCInstr_XAssisted ( HReg dstGA, PPCAMode* amCIA,
+                               PPCCondCode cond, IRJumpKind jk ) {
+   PPCInstr* i            = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                 = Pin_XAssisted;
+   i->Pin.XAssisted.dstGA = dstGA;
+   i->Pin.XAssisted.amCIA = amCIA;
+   i->Pin.XAssisted.cond  = cond;
+   i->Pin.XAssisted.jk    = jk;
+   return i;
+}
+PPCInstr* PPCInstr_CMov  ( PPCCondCode cond, 
+                           HReg dst, PPCRI* src ) {
+   PPCInstr* i      = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag           = Pin_CMov;
+   i->Pin.CMov.cond = cond;
+   i->Pin.CMov.src  = src;
+   i->Pin.CMov.dst  = dst;
+   vassert(cond.test != Pct_ALWAYS);
+   return i;
+}
+PPCInstr* PPCInstr_Load ( UChar sz,
+                          HReg dst, PPCAMode* src, Bool mode64 ) {
+   PPCInstr* i       = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag            = Pin_Load;
+   i->Pin.Load.sz    = sz;
+   i->Pin.Load.src   = src;
+   i->Pin.Load.dst   = dst;
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+   if (sz == 8) vassert(mode64);
+   return i;
+}
+PPCInstr* PPCInstr_LoadL ( UChar sz,
+                           HReg dst, HReg src, Bool mode64 )
+{
+   PPCInstr* i       = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag            = Pin_LoadL;
+   i->Pin.LoadL.sz   = sz;
+   i->Pin.LoadL.src  = src;
+   i->Pin.LoadL.dst  = dst;
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+   if (sz == 8) vassert(mode64);
+   return i;
+}
+PPCInstr* PPCInstr_Store ( UChar sz, PPCAMode* dst, HReg src,
+                           Bool mode64 ) {
+   PPCInstr* i      = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag           = Pin_Store;
+   i->Pin.Store.sz  = sz;
+   i->Pin.Store.src = src;
+   i->Pin.Store.dst = dst;
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+   if (sz == 8) vassert(mode64);
+   return i;
+}
+PPCInstr* PPCInstr_StoreC ( UChar sz, HReg dst, HReg src, Bool mode64 ) {
+   PPCInstr* i       = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag            = Pin_StoreC;
+   i->Pin.StoreC.sz  = sz;
+   i->Pin.StoreC.src = src;
+   i->Pin.StoreC.dst = dst;
+   vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+   if (sz == 8) vassert(mode64);
+   return i;
+}
+PPCInstr* PPCInstr_Set ( PPCCondCode cond, HReg dst ) {
+   PPCInstr* i     = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag          = Pin_Set;
+   i->Pin.Set.cond = cond;
+   i->Pin.Set.dst  = dst;
+   return i;
+}
+PPCInstr* PPCInstr_MfCR ( HReg dst )
+{
+   PPCInstr* i     = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag          = Pin_MfCR;
+   i->Pin.MfCR.dst = dst;
+   return i;
+}
+PPCInstr* PPCInstr_MFence ( void )
+{
+   PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag      = Pin_MFence;
+   return i;
+}
+
+PPCInstr* PPCInstr_FpUnary ( PPCFpOp op, HReg dst, HReg src ) {
+   PPCInstr* i        = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_FpUnary;
+   i->Pin.FpUnary.op  = op;
+   i->Pin.FpUnary.dst = dst;
+   i->Pin.FpUnary.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_FpBinary ( PPCFpOp op, HReg dst,
+                              HReg srcL, HReg srcR ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_FpBinary;
+   i->Pin.FpBinary.op   = op;
+   i->Pin.FpBinary.dst  = dst;
+   i->Pin.FpBinary.srcL = srcL;
+   i->Pin.FpBinary.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_FpMulAcc ( PPCFpOp op, HReg dst, HReg srcML, 
+                                          HReg srcMR, HReg srcAcc )
+{
+   PPCInstr* i            = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                 = Pin_FpMulAcc;
+   i->Pin.FpMulAcc.op     = op;
+   i->Pin.FpMulAcc.dst    = dst;
+   i->Pin.FpMulAcc.srcML  = srcML;
+   i->Pin.FpMulAcc.srcMR  = srcMR;
+   i->Pin.FpMulAcc.srcAcc = srcAcc;
+   return i;
+}
+PPCInstr* PPCInstr_FpLdSt ( Bool isLoad, UChar sz,
+                            HReg reg, PPCAMode* addr ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_FpLdSt;
+   i->Pin.FpLdSt.isLoad = isLoad;
+   i->Pin.FpLdSt.sz     = sz;
+   i->Pin.FpLdSt.reg    = reg;
+   i->Pin.FpLdSt.addr   = addr;
+   vassert(sz == 4 || sz == 8);
+   return i;
+}
+PPCInstr* PPCInstr_FpSTFIW ( HReg addr, HReg data )
+{
+   PPCInstr* i         = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag              = Pin_FpSTFIW;
+   i->Pin.FpSTFIW.addr = addr;
+   i->Pin.FpSTFIW.data = data;
+   return i;
+}
+PPCInstr* PPCInstr_FpRSP ( HReg dst, HReg src ) {
+   PPCInstr* i      = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag           = Pin_FpRSP;
+   i->Pin.FpRSP.dst = dst;
+   i->Pin.FpRSP.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_Dfp64Unary(PPCFpOp op, HReg dst, HReg src) {
+   PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
+   i->tag = Pin_Dfp64Unary;
+   i->Pin.Dfp64Unary.op = op;
+   i->Pin.Dfp64Unary.dst = dst;
+   i->Pin.Dfp64Unary.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_Dfp64Binary(PPCFpOp op, HReg dst, HReg srcL, HReg srcR) {
+   PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
+   i->tag = Pin_Dfp64Binary;
+   i->Pin.Dfp64Binary.op = op;
+   i->Pin.Dfp64Binary.dst = dst;
+   i->Pin.Dfp64Binary.srcL = srcL;
+   i->Pin.Dfp64Binary.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_DfpShift ( PPCFpOp op, HReg dst, HReg src, PPCRI* shift ) {
+   PPCInstr* i            = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                 = Pin_DfpShift;
+   i->Pin.DfpShift.op     = op;
+   i->Pin.DfpShift.shift  = shift;
+   i->Pin.DfpShift.src    = src;
+   i->Pin.DfpShift.dst    = dst;
+   return i;
+}
+PPCInstr* PPCInstr_Dfp128Unary(PPCFpOp op, HReg dst_hi, HReg dst_lo,
+                                HReg src_hi, HReg src_lo) {
+   PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
+   i->tag = Pin_Dfp128Unary;
+   i->Pin.Dfp128Unary.op = op;
+   i->Pin.Dfp128Unary.dst_hi = dst_hi;
+   i->Pin.Dfp128Unary.dst_lo = dst_lo;
+   i->Pin.Dfp128Unary.src_hi = src_hi;
+   i->Pin.Dfp128Unary.src_lo = src_lo;
+   return i;
+}
+PPCInstr* PPCInstr_Dfp128Binary(PPCFpOp op, HReg dst_hi, HReg dst_lo,
+                                HReg srcR_hi, HReg srcR_lo) {
+   /* dst is used to pass the srcL argument and return the result */
+   PPCInstr* i = LibVEX_Alloc_inline( sizeof(PPCInstr) );
+   i->tag = Pin_Dfp128Binary;
+   i->Pin.Dfp128Binary.op = op;
+   i->Pin.Dfp128Binary.dst_hi = dst_hi;
+   i->Pin.Dfp128Binary.dst_lo = dst_lo;
+   i->Pin.Dfp128Binary.srcR_hi = srcR_hi;
+   i->Pin.Dfp128Binary.srcR_lo = srcR_lo;
+   return i;
+}
+PPCInstr* PPCInstr_DfpShift128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo, 
+                                 HReg src_hi, HReg src_lo,
+                                 PPCRI* shift ) {
+   PPCInstr* i               = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                    = Pin_DfpShift128;
+   i->Pin.DfpShift128.op     = op;
+   i->Pin.DfpShift128.shift  = shift;
+   i->Pin.DfpShift128.src_hi = src_hi;
+   i->Pin.DfpShift128.src_lo = src_lo;
+   i->Pin.DfpShift128.dst_hi = dst_hi;
+   i->Pin.DfpShift128.dst_lo = dst_lo;
+   return i;
+}
+PPCInstr* PPCInstr_DfpRound ( HReg dst, HReg src, PPCRI* r_rmc ) {
+   PPCInstr* i           = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                = Pin_DfpRound;
+   i->Pin.DfpRound.dst   = dst;
+   i->Pin.DfpRound.src   = src;
+   i->Pin.DfpRound.r_rmc = r_rmc;
+   return i;
+}
+PPCInstr* PPCInstr_DfpRound128 ( HReg dst_hi, HReg dst_lo, HReg src_hi, 
+                                 HReg src_lo, PPCRI* r_rmc ) {
+   PPCInstr* i               = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                    = Pin_DfpRound128;
+   i->Pin.DfpRound128.dst_hi = dst_hi;
+   i->Pin.DfpRound128.dst_lo = dst_lo;
+   i->Pin.DfpRound128.src_hi = src_hi;
+   i->Pin.DfpRound128.src_lo = src_lo;
+   i->Pin.DfpRound128.r_rmc  = r_rmc;
+   return i;
+}
+PPCInstr* PPCInstr_DfpQuantize ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR,
+                                 PPCRI* rmc ) {
+   PPCInstr* i             = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                  = Pin_DfpQuantize;
+   i->Pin.DfpQuantize.op   = op;
+   i->Pin.DfpQuantize.dst  = dst;
+   i->Pin.DfpQuantize.srcL = srcL;
+   i->Pin.DfpQuantize.srcR = srcR;
+   i->Pin.DfpQuantize.rmc  = rmc;
+   return i;
+}
+PPCInstr* PPCInstr_DfpQuantize128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo,
+                                    HReg src_hi, HReg src_lo, PPCRI* rmc ) {
+   /* dst is used to pass left operand in and return result */
+   PPCInstr* i                  = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                       = Pin_DfpQuantize128;
+   i->Pin.DfpQuantize128.op     = op;
+   i->Pin.DfpQuantize128.dst_hi = dst_hi;
+   i->Pin.DfpQuantize128.dst_lo = dst_lo;
+   i->Pin.DfpQuantize128.src_hi = src_hi;
+   i->Pin.DfpQuantize128.src_lo = src_lo;
+   i->Pin.DfpQuantize128.rmc    = rmc;
+   return i;
+}
+PPCInstr* PPCInstr_DfpD128toD64 ( PPCFpOp op, HReg dst,
+                                  HReg src_hi, HReg src_lo ) {
+   PPCInstr* i                = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                     = Pin_DfpD128toD64;
+   i->Pin.DfpD128toD64.op     = op;
+   i->Pin.DfpD128toD64.src_hi = src_hi;
+   i->Pin.DfpD128toD64.src_lo = src_lo;
+   i->Pin.DfpD128toD64.dst    = dst;
+   return i;
+}
+PPCInstr* PPCInstr_DfpI64StoD128 ( PPCFpOp op, HReg dst_hi,
+                                   HReg dst_lo, HReg src ) {
+   PPCInstr* i                 = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                      = Pin_DfpI64StoD128;
+   i->Pin.DfpI64StoD128.op     = op;
+   i->Pin.DfpI64StoD128.src    = src;
+   i->Pin.DfpI64StoD128.dst_hi = dst_hi;
+   i->Pin.DfpI64StoD128.dst_lo = dst_lo;
+   return i;
+}
+PPCInstr* PPCInstr_ExtractExpD128 ( PPCFpOp op, HReg dst,
+                                    HReg src_hi, HReg src_lo ) {
+   /* dst is used to pass the srcL argument */                             
+   PPCInstr* i                  = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                       = Pin_ExtractExpD128;
+   i->Pin.ExtractExpD128.op     = op;
+   i->Pin.ExtractExpD128.dst    = dst;
+   i->Pin.ExtractExpD128.src_hi = src_hi;
+   i->Pin.ExtractExpD128.src_lo = src_lo;
+   return i;
+}
+PPCInstr* PPCInstr_InsertExpD128 ( PPCFpOp op, HReg dst_hi, HReg dst_lo,   
+                                   HReg srcL, HReg srcR_hi, HReg srcR_lo ) {
+   /* dst is used to pass the srcL argument */                             
+   PPCInstr* i                  = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                       = Pin_InsertExpD128;
+   i->Pin.InsertExpD128.op      = op;
+   i->Pin.InsertExpD128.dst_hi  = dst_hi;
+   i->Pin.InsertExpD128.dst_lo  = dst_lo;
+   i->Pin.InsertExpD128.srcL    = srcL;
+   i->Pin.InsertExpD128.srcR_hi = srcR_hi;
+   i->Pin.InsertExpD128.srcR_lo = srcR_lo;
+   return i;
+}
+PPCInstr* PPCInstr_Dfp64Cmp (/* UInt crfD,*/ HReg dst, HReg srcL, HReg srcR ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_Dfp64Cmp;
+   i->Pin.Dfp64Cmp.dst = dst;
+   i->Pin.Dfp64Cmp.srcL = srcL;
+   i->Pin.Dfp64Cmp.srcR = srcR;
+   return i;                                                   
+}
+PPCInstr* PPCInstr_Dfp128Cmp ( HReg dst, HReg srcL_hi, HReg srcL_lo,
+                               HReg srcR_hi, HReg srcR_lo ) {
+   PPCInstr* i               = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                    = Pin_Dfp128Cmp;
+   i->Pin.Dfp128Cmp.dst      = dst;
+   i->Pin.Dfp128Cmp.srcL_hi  = srcL_hi;
+   i->Pin.Dfp128Cmp.srcL_lo  = srcL_lo;
+   i->Pin.Dfp128Cmp.srcR_hi  = srcR_hi;
+   i->Pin.Dfp128Cmp.srcR_lo  = srcR_lo;
+   return i;                                                   
+}
+PPCInstr* PPCInstr_EvCheck ( PPCAMode* amCounter,
+                             PPCAMode* amFailAddr ) {
+   PPCInstr* i               = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                    = Pin_EvCheck;
+   i->Pin.EvCheck.amCounter  = amCounter;
+   i->Pin.EvCheck.amFailAddr = amFailAddr;
+   return i;
+}
+PPCInstr* PPCInstr_ProfInc ( void ) {
+   PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag      = Pin_ProfInc;
+   return i;
+}
+
+/*
+Valid combo | fromI | int32 | syned | flt64 |
+--------------------------------------------
+            |  n       n       n       n    |
+--------------------------------------------
+ F64->I64U  |  n       n       n       y    |
+--------------------------------------------
+            |  n       n       y       n    |
+--------------------------------------------
+ F64->I64S  |  n       n       y       y    |
+--------------------------------------------
+            |  n       y       n       n    |
+--------------------------------------------
+ F64->I32U  |  n       y       n       y    |
+--------------------------------------------
+            |  n       y       y       n    |
+--------------------------------------------
+ F64->I32S  |  n       y       y       y    |
+--------------------------------------------
+ I64U->F32  |  y       n       n       n    |
+--------------------------------------------
+ I64U->F64  |  y       n       n       y    |
+--------------------------------------------
+            |  y       n       y       n    |
+--------------------------------------------
+ I64S->F64  |  y       n       y       y    |
+--------------------------------------------
+            |  y       y       n       n    |
+--------------------------------------------
+            |  y       y       n       y    |
+--------------------------------------------
+            |  y       y       y       n    |
+--------------------------------------------
+            |  y       y       y       y    |
+--------------------------------------------
+*/
+PPCInstr* PPCInstr_FpCftI ( Bool fromI, Bool int32, Bool syned,
+                            Bool flt64, HReg dst, HReg src ) {
+   Bool tmp = fromI | int32 | syned | flt64;
+   vassert(tmp == True || tmp == False); // iow, no high bits set
+   UShort conversion = 0;
+   conversion = (fromI << 3) | (int32 << 2) | (syned << 1) | flt64;
+   switch (conversion) {
+      // Supported conversion operations
+      case 1: case 3: case 5: case 7:
+      case 8: case 9: case 11:
+         break;
+      default:
+         vpanic("PPCInstr_FpCftI(ppc_host)");
+   }
+   PPCInstr* i         = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag              = Pin_FpCftI;
+   i->Pin.FpCftI.fromI = fromI;
+   i->Pin.FpCftI.int32 = int32;
+   i->Pin.FpCftI.syned = syned;
+   i->Pin.FpCftI.flt64 = flt64;
+   i->Pin.FpCftI.dst   = dst;
+   i->Pin.FpCftI.src   = src;
+   return i;
+}
+PPCInstr* PPCInstr_FpCMov ( PPCCondCode cond, HReg dst, HReg src ) {
+   PPCInstr* i        = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_FpCMov;
+   i->Pin.FpCMov.cond = cond;
+   i->Pin.FpCMov.dst  = dst;
+   i->Pin.FpCMov.src  = src;
+   vassert(cond.test != Pct_ALWAYS);
+   return i;
+}
+PPCInstr* PPCInstr_FpLdFPSCR ( HReg src, Bool dfp_rm ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_FpLdFPSCR;
+   i->Pin.FpLdFPSCR.src = src;
+   i->Pin.FpLdFPSCR.dfp_rm = dfp_rm ? 1 : 0;
+   return i;
+}
+PPCInstr* PPCInstr_FpCmp ( HReg dst, HReg srcL, HReg srcR ) {
+   PPCInstr* i       = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag            = Pin_FpCmp;
+   i->Pin.FpCmp.dst  = dst;
+   i->Pin.FpCmp.srcL = srcL;
+   i->Pin.FpCmp.srcR = srcR;
+   return i;
+}
+
+/* Read/Write Link Register */
+PPCInstr* PPCInstr_RdWrLR ( Bool wrLR, HReg gpr ) {
+   PPCInstr* i        = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_RdWrLR;
+   i->Pin.RdWrLR.wrLR = wrLR;
+   i->Pin.RdWrLR.gpr  = gpr;
+   return i;
+}
+
+/* AltiVec */
+PPCInstr* PPCInstr_AvLdSt ( Bool isLoad, UChar sz,
+                            HReg reg, PPCAMode* addr ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_AvLdSt;
+   i->Pin.AvLdSt.isLoad = isLoad;
+   i->Pin.AvLdSt.sz     = sz;
+   i->Pin.AvLdSt.reg    = reg;
+   i->Pin.AvLdSt.addr   = addr;
+   return i;
+}
+PPCInstr* PPCInstr_AvUnary ( PPCAvOp op, HReg dst, HReg src ) {
+   PPCInstr* i        = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_AvUnary;
+   i->Pin.AvUnary.op  = op;
+   i->Pin.AvUnary.dst = dst;
+   i->Pin.AvUnary.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_AvBinary ( PPCAvOp op, HReg dst,
+                              HReg srcL, HReg srcR ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_AvBinary;
+   i->Pin.AvBinary.op   = op;
+   i->Pin.AvBinary.dst  = dst;
+   i->Pin.AvBinary.srcL = srcL;
+   i->Pin.AvBinary.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvBin8x16 ( PPCAvOp op, HReg dst,
+                               HReg srcL, HReg srcR ) {
+   PPCInstr* i           = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                = Pin_AvBin8x16;
+   i->Pin.AvBin8x16.op   = op;
+   i->Pin.AvBin8x16.dst  = dst;
+   i->Pin.AvBin8x16.srcL = srcL;
+   i->Pin.AvBin8x16.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvBin16x8 ( PPCAvOp op, HReg dst,
+                               HReg srcL, HReg srcR ) {
+   PPCInstr* i           = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                = Pin_AvBin16x8;
+   i->Pin.AvBin16x8.op   = op;
+   i->Pin.AvBin16x8.dst  = dst;
+   i->Pin.AvBin16x8.srcL = srcL;
+   i->Pin.AvBin16x8.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvBin32x4 ( PPCAvOp op, HReg dst,
+                               HReg srcL, HReg srcR ) {
+   PPCInstr* i           = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                = Pin_AvBin32x4;
+   i->Pin.AvBin32x4.op   = op;
+   i->Pin.AvBin32x4.dst  = dst;
+   i->Pin.AvBin32x4.srcL = srcL;
+   i->Pin.AvBin32x4.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvBin64x2 ( PPCAvOp op, HReg dst,
+                               HReg srcL, HReg srcR ) {
+   PPCInstr* i           = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                = Pin_AvBin64x2;
+   i->Pin.AvBin64x2.op   = op;
+   i->Pin.AvBin64x2.dst  = dst;
+   i->Pin.AvBin64x2.srcL = srcL;
+   i->Pin.AvBin64x2.srcR = srcR;
+   return i;
+}
+
+PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvFpOp op, HReg dst,
+                                HReg srcL, HReg srcR ) {
+   PPCInstr* i            = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                 = Pin_AvBin32Fx4;
+   i->Pin.AvBin32Fx4.op   = op;
+   i->Pin.AvBin32Fx4.dst  = dst;
+   i->Pin.AvBin32Fx4.srcL = srcL;
+   i->Pin.AvBin32Fx4.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvUn32Fx4 ( PPCAvFpOp op, HReg dst, HReg src ) {
+   PPCInstr* i          = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag               = Pin_AvUn32Fx4;
+   i->Pin.AvUn32Fx4.op  = op;
+   i->Pin.AvUn32Fx4.dst = dst;
+   i->Pin.AvUn32Fx4.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_AvPerm ( HReg dst, HReg srcL, HReg srcR, HReg ctl ) {
+   PPCInstr* i        = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_AvPerm;
+   i->Pin.AvPerm.dst  = dst;
+   i->Pin.AvPerm.srcL = srcL;
+   i->Pin.AvPerm.srcR = srcR;
+   i->Pin.AvPerm.ctl  = ctl;
+   return i;
+}
+
+PPCInstr* PPCInstr_AvSel ( HReg ctl, HReg dst, HReg srcL, HReg srcR ) {
+   PPCInstr* i       = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag            = Pin_AvSel;
+   i->Pin.AvSel.ctl  = ctl;
+   i->Pin.AvSel.dst  = dst;
+   i->Pin.AvSel.srcL = srcL;
+   i->Pin.AvSel.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvSh ( Bool shLeft, HReg dst, PPCAMode* addr ) {
+   PPCInstr*  i       = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_AvSh;
+   i->Pin.AvSh.shLeft = shLeft;
+   i->Pin.AvSh.dst    = dst;
+   i->Pin.AvSh.addr   = addr;
+   return i;
+}
+PPCInstr* PPCInstr_AvShlDbl ( UChar shift, HReg dst,
+                              HReg srcL, HReg srcR ) {
+   PPCInstr* i           = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                = Pin_AvShlDbl;
+   i->Pin.AvShlDbl.shift = shift;
+   i->Pin.AvShlDbl.dst   = dst;
+   i->Pin.AvShlDbl.srcL  = srcL;
+   i->Pin.AvShlDbl.srcR  = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvSplat ( UChar sz, HReg dst, PPCVI5s* src ) {
+   PPCInstr* i        = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_AvSplat;
+   i->Pin.AvSplat.sz  = sz;
+   i->Pin.AvSplat.dst = dst;
+   i->Pin.AvSplat.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_AvCMov ( PPCCondCode cond, HReg dst, HReg src ) {
+   PPCInstr* i        = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag             = Pin_AvCMov;
+   i->Pin.AvCMov.cond = cond;
+   i->Pin.AvCMov.dst  = dst;
+   i->Pin.AvCMov.src  = src;
+   vassert(cond.test != Pct_ALWAYS);
+   return i;
+}
+PPCInstr* PPCInstr_AvLdVSCR ( HReg src ) {
+   PPCInstr* i         = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag              = Pin_AvLdVSCR;
+   i->Pin.AvLdVSCR.src = src;
+   return i;
+}
+PPCInstr* PPCInstr_AvCipherV128Unary ( PPCAvOp op, HReg dst, HReg src ) {
+   PPCInstr* i              = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                   = Pin_AvCipherV128Unary;
+   i->Pin.AvCipherV128Unary.op   = op;
+   i->Pin.AvCipherV128Unary.dst  = dst;
+   i->Pin.AvCipherV128Unary.src  = src;
+   return i;
+}
+PPCInstr* PPCInstr_AvCipherV128Binary ( PPCAvOp op, HReg dst,
+                                        HReg srcL, HReg srcR ) {
+   PPCInstr* i              = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                   = Pin_AvCipherV128Binary;
+   i->Pin.AvCipherV128Binary.op   = op;
+   i->Pin.AvCipherV128Binary.dst  = dst;
+   i->Pin.AvCipherV128Binary.srcL = srcL;
+   i->Pin.AvCipherV128Binary.srcR = srcR;
+   return i;
+}
+PPCInstr* PPCInstr_AvHashV128Binary ( PPCAvOp op, HReg dst,
+                                      HReg src, PPCRI* s_field ) {
+   PPCInstr* i              = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag                   = Pin_AvHashV128Binary;
+   i->Pin.AvHashV128Binary.op  = op;
+   i->Pin.AvHashV128Binary.dst = dst;
+   i->Pin.AvHashV128Binary.src = src;
+   i->Pin.AvHashV128Binary.s_field = s_field;
+   return i;
+}
+PPCInstr* PPCInstr_AvBCDV128Trinary ( PPCAvOp op, HReg dst,
+                                      HReg src1, HReg src2, PPCRI* ps ) {
+   PPCInstr* i = LibVEX_Alloc_inline(sizeof(PPCInstr));
+   i->tag      = Pin_AvBCDV128Trinary;
+   i->Pin.AvBCDV128Trinary.op   = op;
+   i->Pin.AvBCDV128Trinary.dst  = dst;
+   i->Pin.AvBCDV128Trinary.src1 = src1;
+   i->Pin.AvBCDV128Trinary.src2 = src2;
+   i->Pin.AvBCDV128Trinary.ps   = ps;
+   return i;
+}
+
+
+/* Pretty Print instructions */
+static void ppLoadImm ( HReg dst, ULong imm, Bool mode64 ) {
+   vex_printf("li_word ");
+   ppHRegPPC(dst);
+   if (!mode64) {
+      vex_printf(",0x%08x", (UInt)imm);
+   } else {
+      vex_printf(",0x%016llx", imm);
+   }
+}
+
+static void ppMovReg ( HReg dst, HReg src ) {
+   if (!sameHReg(dst, src)) {
+      vex_printf("mr ");
+      ppHRegPPC(dst);
+      vex_printf(",");
+      ppHRegPPC(src);
+   }
+}
+
+void ppPPCInstr ( const PPCInstr* i, Bool mode64 )
+{
+   switch (i->tag) {
+   case Pin_LI:
+      ppLoadImm(i->Pin.LI.dst, i->Pin.LI.imm64, mode64);
+      break;
+   case Pin_Alu: {
+      HReg   r_srcL  = i->Pin.Alu.srcL;
+      PPCRH* rh_srcR = i->Pin.Alu.srcR;
+      /* special-case "mr" */
+      if (i->Pin.Alu.op == Palu_OR &&   // or Rd,Rs,Rs == mr Rd,Rs
+          rh_srcR->tag == Prh_Reg &&
+          sameHReg(rh_srcR->Prh.Reg.reg, r_srcL)) {
+         vex_printf("mr ");
+         ppHRegPPC(i->Pin.Alu.dst);
+         vex_printf(",");
+         ppHRegPPC(r_srcL);
+         return;
+      }
+      /* special-case "li" */
+      if (i->Pin.Alu.op == Palu_ADD &&   // addi Rd,0,imm == li Rd,imm
+          rh_srcR->tag == Prh_Imm &&
+          hregEncoding(r_srcL) == 0) {
+         vex_printf("li ");
+         ppHRegPPC(i->Pin.Alu.dst);
+         vex_printf(",");
+         ppPPCRH(rh_srcR);
+         return;
+      }
+      /* generic */
+      vex_printf("%s ", showPPCAluOp(i->Pin.Alu.op,
+                                     toBool(rh_srcR->tag == Prh_Imm)));
+      ppHRegPPC(i->Pin.Alu.dst);
+      vex_printf(",");
+      ppHRegPPC(r_srcL);
+      vex_printf(",");
+      ppPPCRH(rh_srcR);
+      return;
+   }
+   case Pin_Shft: {
+      HReg   r_srcL  = i->Pin.Shft.srcL;
+      PPCRH* rh_srcR = i->Pin.Shft.srcR;
+      vex_printf("%s ", showPPCShftOp(i->Pin.Shft.op,
+                                      toBool(rh_srcR->tag == Prh_Imm),
+                                      i->Pin.Shft.sz32));
+      ppHRegPPC(i->Pin.Shft.dst);
+      vex_printf(",");
+      ppHRegPPC(r_srcL);
+      vex_printf(",");
+      ppPPCRH(rh_srcR);
+      return;
+   }
+   case Pin_AddSubC:
+      vex_printf("%s%s ",
+                 i->Pin.AddSubC.isAdd ? "add" : "sub",
+                 i->Pin.AddSubC.setC ? "c" : "e");
+      ppHRegPPC(i->Pin.AddSubC.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AddSubC.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AddSubC.srcR);
+      return;
+   case Pin_Cmp:
+      vex_printf("%s%c%s %%cr%u,",
+                 i->Pin.Cmp.syned ? "cmp" : "cmpl",
+                 i->Pin.Cmp.sz32 ? 'w' : 'd',
+                 i->Pin.Cmp.srcR->tag == Prh_Imm ? "i" : "",
+                 i->Pin.Cmp.crfD);
+      ppHRegPPC(i->Pin.Cmp.srcL);
+      vex_printf(",");
+      ppPPCRH(i->Pin.Cmp.srcR);
+      return;
+   case Pin_Unary:
+      vex_printf("%s ", showPPCUnaryOp(i->Pin.Unary.op));
+      ppHRegPPC(i->Pin.Unary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Unary.src);
+      return;
+   case Pin_MulL:
+      vex_printf("mul%c%c%s ",
+                 i->Pin.MulL.hi ? 'h' : 'l',
+                 i->Pin.MulL.sz32 ? 'w' : 'd',
+                 i->Pin.MulL.hi ? (i->Pin.MulL.syned ? "s" : "u") : "");
+      ppHRegPPC(i->Pin.MulL.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.MulL.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.MulL.srcR);
+      return;
+   case Pin_Div:
+      vex_printf("div%c%s%s ",
+                 i->Pin.Div.sz32 ? 'w' : 'd',
+                 i->Pin.Div.extended ? "e" : "",
+                 i->Pin.Div.syned ? "" : "u");
+      ppHRegPPC(i->Pin.Div.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Div.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Div.srcR);
+      return;
+   case Pin_Call: {
+      Int n;
+      vex_printf("call: ");
+      if (i->Pin.Call.cond.test != Pct_ALWAYS) {
+         vex_printf("if (%s) ", showPPCCondCode(i->Pin.Call.cond));
+      }
+      vex_printf("{ ");
+      ppLoadImm(hregPPC_GPR10(mode64), i->Pin.Call.target, mode64);
+      vex_printf(" ; mtctr r10 ; bctrl [");
+      for (n = 0; n < 32; n++) {
+         if (i->Pin.Call.argiregs & (1<<n)) {
+            vex_printf("r%d", n);
+            if ((i->Pin.Call.argiregs >> n) > 1)
+               vex_printf(",");
+         }
+      }
+      vex_printf(",");
+      ppRetLoc(i->Pin.Call.rloc);
+      vex_printf("] }");
+      break;
+   }
+   case Pin_XDirect:
+      vex_printf("(xDirect) ");
+      vex_printf("if (%s) { ",
+                 showPPCCondCode(i->Pin.XDirect.cond));
+      if (mode64) {
+         vex_printf("imm64 r30,0x%llx; ", i->Pin.XDirect.dstGA);
+         vex_printf("std r30,");
+      } else {
+         vex_printf("imm32 r30,0x%llx; ", i->Pin.XDirect.dstGA);
+         vex_printf("stw r30,");
+      }
+      ppPPCAMode(i->Pin.XDirect.amCIA);
+      vex_printf("; ");
+      if (mode64) {
+         vex_printf("imm64-fixed5 r30,$disp_cp_chain_me_to_%sEP; ",
+                    i->Pin.XDirect.toFastEP ? "fast" : "slow");
+      } else {
+         vex_printf("imm32-fixed2 r30,$disp_cp_chain_me_to_%sEP; ",
+                    i->Pin.XDirect.toFastEP ? "fast" : "slow");
+      }
+      vex_printf("mtctr r30; bctrl }");
+      return;
+   case Pin_XIndir:
+      vex_printf("(xIndir) ");
+      vex_printf("if (%s) { ",
+                 showPPCCondCode(i->Pin.XIndir.cond));
+      vex_printf("%s ", mode64 ? "std" : "stw");
+      ppHRegPPC(i->Pin.XIndir.dstGA);
+      vex_printf(",");
+      ppPPCAMode(i->Pin.XIndir.amCIA);
+      vex_printf("; ");
+      vex_printf("imm%s r30,$disp_cp_xindir; ", mode64 ? "64" : "32");
+      vex_printf("mtctr r30; bctr }");
+      return;
+   case Pin_XAssisted:
+      vex_printf("(xAssisted) ");
+      vex_printf("if (%s) { ",
+                 showPPCCondCode(i->Pin.XAssisted.cond));
+      vex_printf("%s ", mode64 ? "std" : "stw");
+      ppHRegPPC(i->Pin.XAssisted.dstGA);
+      vex_printf(",");
+      ppPPCAMode(i->Pin.XAssisted.amCIA);
+      vex_printf("; ");
+      vex_printf("li r31,$IRJumpKind_to_TRCVAL(%d); ",                            
+                 (Int)i->Pin.XAssisted.jk);
+      vex_printf("imm%s r30,$disp_cp_xindir; ", mode64 ? "64" : "32");
+      vex_printf("mtctr r30; bctr }");
+      return;
+   case Pin_CMov:
+      vex_printf("cmov (%s) ", showPPCCondCode(i->Pin.CMov.cond));
+      ppHRegPPC(i->Pin.CMov.dst);
+      vex_printf(",");
+      ppPPCRI(i->Pin.CMov.src);
+      vex_printf(": ");
+      if (i->Pin.CMov.cond.test != Pct_ALWAYS) {
+         vex_printf("if (%s) ", showPPCCondCode(i->Pin.CMov.cond));
+      }
+      vex_printf("{ ");
+      if (i->Pin.CMov.src->tag == Pri_Imm) {
+         ppLoadImm(i->Pin.CMov.dst, i->Pin.CMov.src->Pri.Imm, mode64);
+      } else {
+         ppMovReg(i->Pin.CMov.dst, i->Pin.CMov.src->Pri.Reg);
+      }
+      vex_printf(" }");
+      return;
+   case Pin_Load: {
+      Bool idxd = toBool(i->Pin.Load.src->tag == Pam_RR);
+      UChar sz = i->Pin.Load.sz;
+      HChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : 'd';
+      vex_printf("l%c%s%s ", c_sz, sz==8 ? "" : "z", idxd ? "x" : "" );
+      ppHRegPPC(i->Pin.Load.dst);
+      vex_printf(",");
+      ppPPCAMode(i->Pin.Load.src);
+      return;
+   }
+   case Pin_LoadL: {
+      UChar sz = i->Pin.LoadL.sz;
+      HChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : 'd';
+      vex_printf("l%carx ", c_sz);
+      ppHRegPPC(i->Pin.LoadL.dst);
+      vex_printf(",%%r0,");
+      ppHRegPPC(i->Pin.LoadL.src);
+      return;
+   }
+   case Pin_Store: {
+      UChar sz = i->Pin.Store.sz;
+      Bool idxd = toBool(i->Pin.Store.dst->tag == Pam_RR);
+      HChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : /*8*/ 'd';
+      vex_printf("st%c%s ", c_sz, idxd ? "x" : "" );
+      ppHRegPPC(i->Pin.Store.src);
+      vex_printf(",");
+      ppPPCAMode(i->Pin.Store.dst);
+      return;
+   }
+   case Pin_StoreC: {
+      UChar sz = i->Pin.StoreC.sz;
+      HChar c_sz = sz==1 ? 'b' : sz==2 ? 'h' : sz==4 ? 'w' : 'd';
+      vex_printf("st%ccx. ", c_sz);
+      ppHRegPPC(i->Pin.StoreC.src);
+      vex_printf(",%%r0,");
+      ppHRegPPC(i->Pin.StoreC.dst);
+      return;
+   }
+   case Pin_Set: {
+      PPCCondCode cc = i->Pin.Set.cond;
+      vex_printf("set (%s),", showPPCCondCode(cc));
+      ppHRegPPC(i->Pin.Set.dst);
+      if (cc.test == Pct_ALWAYS) {
+         vex_printf(": { li ");
+         ppHRegPPC(i->Pin.Set.dst);
+         vex_printf(",1 }");
+      } else {
+         vex_printf(": { mfcr r0 ; rlwinm ");
+         ppHRegPPC(i->Pin.Set.dst);
+         vex_printf(",r0,%u,31,31", cc.flag+1);
+         if (cc.test == Pct_FALSE) {
+            vex_printf("; xori ");
+            ppHRegPPC(i->Pin.Set.dst);
+            vex_printf(",");
+            ppHRegPPC(i->Pin.Set.dst);
+            vex_printf(",1");
+         }
+         vex_printf(" }");
+      }
+      return;
+   }
+   case Pin_MfCR:
+      vex_printf("mfcr ");
+      ppHRegPPC(i->Pin.MfCR.dst);
+      break;
+   case Pin_MFence:
+      vex_printf("mfence (=sync)");
+      return;
+
+   case Pin_FpUnary:
+      vex_printf("%s ", showPPCFpOp(i->Pin.FpUnary.op));
+      ppHRegPPC(i->Pin.FpUnary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpUnary.src);
+      return;
+   case Pin_FpBinary:
+      vex_printf("%s ", showPPCFpOp(i->Pin.FpBinary.op));
+      ppHRegPPC(i->Pin.FpBinary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpBinary.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpBinary.srcR);
+      return;
+   case Pin_FpMulAcc:
+      vex_printf("%s ", showPPCFpOp(i->Pin.FpMulAcc.op));
+      ppHRegPPC(i->Pin.FpMulAcc.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpMulAcc.srcML);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpMulAcc.srcMR);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpMulAcc.srcAcc);
+      return;
+   case Pin_FpLdSt: {
+      UChar sz = i->Pin.FpLdSt.sz;
+      Bool idxd = toBool(i->Pin.FpLdSt.addr->tag == Pam_RR);
+      if (i->Pin.FpLdSt.isLoad) {
+         vex_printf("lf%c%s ",
+                    (sz==4 ? 's' : 'd'),
+                    idxd ? "x" : "" );
+         ppHRegPPC(i->Pin.FpLdSt.reg);
+         vex_printf(",");
+         ppPPCAMode(i->Pin.FpLdSt.addr);
+      } else {
+         vex_printf("stf%c%s ",
+                    (sz==4 ? 's' : 'd'),
+                    idxd ? "x" : "" );
+         ppHRegPPC(i->Pin.FpLdSt.reg);
+         vex_printf(",");
+         ppPPCAMode(i->Pin.FpLdSt.addr);
+      }
+      return;
+   }
+   case Pin_FpSTFIW:
+      vex_printf("stfiwz ");
+      ppHRegPPC(i->Pin.FpSTFIW.data);
+      vex_printf(",0(");
+      ppHRegPPC(i->Pin.FpSTFIW.addr);
+      vex_printf(")");
+      return;
+   case Pin_FpRSP:
+      vex_printf("frsp ");
+      ppHRegPPC(i->Pin.FpRSP.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpRSP.src);
+      return;
+   case Pin_FpCftI: {
+      const HChar* str = "fc?????";
+      /* Note that "fcfids" is missing from below. That instruction would
+       * satisfy the predicate:
+       *    (i->Pin.FpCftI.fromI == True && i->Pin.FpCftI.int32 == False)
+       * which would go into a final "else" clause to make this if-else
+       * block balanced.  But we're able to implement fcfids by leveraging
+       * the fcfid implementation, so it wasn't necessary to include it here.
+       */
+      if (i->Pin.FpCftI.fromI == False && i->Pin.FpCftI.int32 == False)
+         if (i->Pin.FpCftI.syned == True)
+            str = "fctid";
+         else
+            str = "fctidu";
+      else if (i->Pin.FpCftI.fromI == False && i->Pin.FpCftI.int32 == True)
+         if (i->Pin.FpCftI.syned == True)
+            str = "fctiw";
+         else
+            str = "fctiwu";
+      else if (i->Pin.FpCftI.fromI == True && i->Pin.FpCftI.int32 == False) {
+         if (i->Pin.FpCftI.syned == True) {
+            str = "fcfid";
+         } else {
+            if (i->Pin.FpCftI.flt64 == True)
+               str = "fcfidu";
+            else
+               str = "fcfidus";
+         }
+      }
+      vex_printf("%s ", str);
+      ppHRegPPC(i->Pin.FpCftI.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpCftI.src);
+      return;
+   }
+   case Pin_FpCMov:
+      vex_printf("fpcmov (%s) ", showPPCCondCode(i->Pin.FpCMov.cond));
+      ppHRegPPC(i->Pin.FpCMov.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpCMov.src);
+      vex_printf(": ");
+      vex_printf("if (fr_dst != fr_src) { ");
+      if (i->Pin.FpCMov.cond.test != Pct_ALWAYS) {
+         vex_printf("if (%s) { ", showPPCCondCode(i->Pin.FpCMov.cond));
+      }
+      vex_printf("fmr ");
+      ppHRegPPC(i->Pin.FpCMov.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpCMov.src);
+      if (i->Pin.FpCMov.cond.test != Pct_ALWAYS)
+         vex_printf(" }");
+      vex_printf(" }");
+      return;
+   case Pin_FpLdFPSCR:
+      vex_printf("mtfsf 0xFF,");
+      ppHRegPPC(i->Pin.FpLdFPSCR.src);
+      vex_printf(",0, %s", i->Pin.FpLdFPSCR.dfp_rm ? "1" : "0");
+      return;
+   case Pin_FpCmp:
+      vex_printf("fcmpo %%cr1,");
+      ppHRegPPC(i->Pin.FpCmp.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpCmp.srcR);
+      vex_printf("; mfcr ");
+      ppHRegPPC(i->Pin.FpCmp.dst);
+      vex_printf("; rlwinm ");
+      ppHRegPPC(i->Pin.FpCmp.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.FpCmp.dst);
+      vex_printf(",8,28,31");
+      return;
+
+   case Pin_RdWrLR:
+      vex_printf("%s ", i->Pin.RdWrLR.wrLR ? "mtlr" : "mflr");
+      ppHRegPPC(i->Pin.RdWrLR.gpr);
+      return;
+
+   case Pin_AvLdSt: {
+      UChar  sz = i->Pin.AvLdSt.sz;
+      const HChar* str_size;
+      if (i->Pin.AvLdSt.addr->tag == Pam_IR) {
+         ppLoadImm(hregPPC_GPR30(mode64),
+                   i->Pin.AvLdSt.addr->Pam.IR.index, mode64);
+         vex_printf(" ; ");
+      }
+      str_size = sz==1 ? "eb" : sz==2 ? "eh" : sz==4 ? "ew" : "";
+      if (i->Pin.AvLdSt.isLoad)
+         vex_printf("lv%sx ", str_size);
+      else
+         vex_printf("stv%sx ", str_size);
+      ppHRegPPC(i->Pin.AvLdSt.reg);
+      vex_printf(",");
+      if (i->Pin.AvLdSt.addr->tag == Pam_IR)
+         vex_printf("%%r30");
+      else 
+         ppHRegPPC(i->Pin.AvLdSt.addr->Pam.RR.index);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvLdSt.addr->Pam.RR.base);
+      return;
+   }
+   case Pin_AvUnary:
+      vex_printf("%s ", showPPCAvOp(i->Pin.AvUnary.op));
+      ppHRegPPC(i->Pin.AvUnary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvUnary.src);
+      return;
+   case Pin_AvBinary:
+      vex_printf("%s ", showPPCAvOp(i->Pin.AvBinary.op));
+      ppHRegPPC(i->Pin.AvBinary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBinary.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBinary.srcR);
+      return;
+   case Pin_AvBin8x16:
+      vex_printf("%s(b) ", showPPCAvOp(i->Pin.AvBin8x16.op));
+      ppHRegPPC(i->Pin.AvBin8x16.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin8x16.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin8x16.srcR);
+      return;
+   case Pin_AvBin16x8:
+      vex_printf("%s(h) ", showPPCAvOp(i->Pin.AvBin16x8.op));
+      ppHRegPPC(i->Pin.AvBin16x8.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin16x8.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin16x8.srcR);
+      return;
+   case Pin_AvBin32x4:
+      vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvBin32x4.op));
+      ppHRegPPC(i->Pin.AvBin32x4.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin32x4.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin32x4.srcR);
+      return;
+   case Pin_AvBin64x2:
+      vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvBin64x2.op));
+      ppHRegPPC(i->Pin.AvBin64x2.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin64x2.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin64x2.srcR);
+      return;
+   case Pin_AvBin32Fx4:
+      vex_printf("%s ", showPPCAvFpOp(i->Pin.AvBin32Fx4.op));
+      ppHRegPPC(i->Pin.AvBin32Fx4.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin32Fx4.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBin32Fx4.srcR);
+      return;
+   case Pin_AvUn32Fx4:
+      vex_printf("%s ", showPPCAvFpOp(i->Pin.AvUn32Fx4.op));
+      ppHRegPPC(i->Pin.AvUn32Fx4.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvUn32Fx4.src);
+      return;
+   case Pin_AvPerm:
+      vex_printf("vperm ");
+      ppHRegPPC(i->Pin.AvPerm.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvPerm.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvPerm.srcR);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvPerm.ctl);
+      return;
+
+   case Pin_AvSel:
+      vex_printf("vsel ");
+      ppHRegPPC(i->Pin.AvSel.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvSel.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvSel.srcR);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvSel.ctl);
+      return;
+
+   case Pin_AvSh:
+      /* This only generates the following instructions with RA
+       * register number set to 0.
+       */
+      if (i->Pin.AvSh.addr->tag == Pam_IR) {
+         ppLoadImm(hregPPC_GPR30(mode64),
+                   i->Pin.AvSh.addr->Pam.IR.index, mode64);
+         vex_printf(" ; ");
+      }
+
+      if (i->Pin.AvSh.shLeft)
+         vex_printf("lvsl ");
+      else
+         vex_printf("lvsr ");
+
+      ppHRegPPC(i->Pin.AvSh.dst);
+      if (i->Pin.AvSh.addr->tag == Pam_IR)
+         vex_printf("%%r30");
+      else
+         ppHRegPPC(i->Pin.AvSh.addr->Pam.RR.index);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvSh.addr->Pam.RR.base);
+      return;
+
+   case Pin_AvShlDbl:
+      vex_printf("vsldoi ");
+      ppHRegPPC(i->Pin.AvShlDbl.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvShlDbl.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvShlDbl.srcR);
+      vex_printf(",%d", i->Pin.AvShlDbl.shift);
+      return;
+
+   case Pin_AvSplat: {
+      UChar sz = i->Pin.AvSplat.sz;
+      HChar ch_sz = toUChar( (sz == 8) ? 'b' : (sz == 16) ? 'h' : 'w' );
+      vex_printf("vsplt%s%c ",
+                 i->Pin.AvSplat.src->tag == Pvi_Imm ? "is" : "", ch_sz);
+      ppHRegPPC(i->Pin.AvSplat.dst);
+      vex_printf(",");
+      ppPPCVI5s(i->Pin.AvSplat.src);
+      if (i->Pin.AvSplat.src->tag == Pvi_Reg)
+         vex_printf(", %d", (128/sz)-1);   /* louis lane */
+      return;
+   }
+
+   case Pin_AvCMov:
+      vex_printf("avcmov (%s) ", showPPCCondCode(i->Pin.AvCMov.cond));
+      ppHRegPPC(i->Pin.AvCMov.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvCMov.src);
+      vex_printf(": ");
+      vex_printf("if (v_dst != v_src) { ");
+      if (i->Pin.AvCMov.cond.test != Pct_ALWAYS) {
+         vex_printf("if (%s) { ", showPPCCondCode(i->Pin.AvCMov.cond));
+      }
+      vex_printf("vmr ");
+      ppHRegPPC(i->Pin.AvCMov.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvCMov.src);
+      if (i->Pin.FpCMov.cond.test != Pct_ALWAYS)
+         vex_printf(" }");
+      vex_printf(" }");
+      return;
+
+   case Pin_AvLdVSCR:
+      vex_printf("mtvscr ");
+      ppHRegPPC(i->Pin.AvLdVSCR.src);
+      return;
+
+   case Pin_AvCipherV128Unary:
+      vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvCipherV128Unary.op));
+      ppHRegPPC(i->Pin.AvCipherV128Unary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvCipherV128Unary.src);
+      return;
+
+   case Pin_AvCipherV128Binary:
+      vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvCipherV128Binary.op));
+      ppHRegPPC(i->Pin.AvCipherV128Binary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvCipherV128Binary.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvCipherV128Binary.srcR);
+      return;
+
+   case Pin_AvHashV128Binary:
+      vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvHashV128Binary.op));
+      ppHRegPPC(i->Pin.AvHashV128Binary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvHashV128Binary.src);
+      vex_printf(",");
+      ppPPCRI(i->Pin.AvHashV128Binary.s_field);
+      return;
+
+   case Pin_AvBCDV128Trinary:
+      vex_printf("%s(w) ", showPPCAvOp(i->Pin.AvBCDV128Trinary.op));
+      ppHRegPPC(i->Pin.AvBCDV128Trinary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBCDV128Trinary.src1);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.AvBCDV128Trinary.src2);
+      vex_printf(",");
+      ppPPCRI(i->Pin.AvBCDV128Trinary.ps);
+      return;
+
+   case Pin_Dfp64Unary:
+      vex_printf("%s ", showPPCFpOp(i->Pin.Dfp64Unary.op));
+      ppHRegPPC(i->Pin.Dfp64Unary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp64Unary.src);
+      return;
+
+   case Pin_Dfp64Binary:
+      vex_printf("%s ", showPPCFpOp(i->Pin.Dfp64Binary.op));
+      ppHRegPPC(i->Pin.Dfp64Binary.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp64Binary.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp64Binary.srcR);
+      return;
+
+   case Pin_DfpShift:
+      vex_printf("%s ", showPPCFpOp(i->Pin.DfpShift.op));
+      ppHRegPPC(i->Pin.DfpShift.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpShift.src);
+      vex_printf(",");
+      ppPPCRI(i->Pin.DfpShift.shift);
+      return;
+
+   case Pin_Dfp128Unary:
+      vex_printf("%s ", showPPCFpOp(i->Pin.Dfp128Unary.op));
+      ppHRegPPC(i->Pin.Dfp128Unary.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp128Unary.src_hi);
+      return;
+
+   case Pin_Dfp128Binary:
+      vex_printf("%s ", showPPCFpOp(i->Pin.Dfp128Binary.op));
+      ppHRegPPC(i->Pin.Dfp128Binary.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp128Binary.srcR_hi);
+      return;
+
+   case Pin_DfpShift128:
+      vex_printf("%s ", showPPCFpOp(i->Pin.DfpShift128.op));
+      ppHRegPPC(i->Pin.DfpShift128.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpShift128.src_hi);
+      vex_printf(",");
+      ppPPCRI(i->Pin.DfpShift128.shift);
+      return;
+
+   case Pin_DfpRound:
+      vex_printf("drintx ");
+      ppHRegPPC(i->Pin.DfpRound.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpRound.src);
+      vex_printf(",");
+      ppPPCRI(i->Pin.DfpRound.r_rmc); /*  R in bit 3 and RMC in bits 2:0 */
+      return;
+
+   case Pin_DfpRound128:
+      vex_printf("drintxq ");
+      ppHRegPPC(i->Pin.DfpRound128.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpRound128.src_hi);
+      vex_printf(",");
+      ppPPCRI(i->Pin.DfpRound128.r_rmc); /*  R in bit 3 and RMC in bits 2:0 */
+      return;
+
+   case Pin_DfpQuantize:
+      vex_printf("%s ", showPPCFpOp(i->Pin.DfpQuantize.op));
+      ppHRegPPC(i->Pin.DfpQuantize.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpQuantize.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpQuantize.srcR);
+      vex_printf(",");
+      ppPPCRI(i->Pin.DfpQuantize.rmc);
+      return;
+
+   case Pin_DfpQuantize128:
+      /*  Dst is used to pass in left source and return result */
+      vex_printf("dquaq ");
+      ppHRegPPC(i->Pin.DfpQuantize128.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpQuantize128.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpQuantize128.src_hi);
+      vex_printf(",");
+      ppPPCRI(i->Pin.DfpQuantize128.rmc);
+      return;
+
+   case Pin_DfpD128toD64:
+      vex_printf("%s ", showPPCFpOp(i->Pin.DfpD128toD64.op));
+      ppHRegPPC(i->Pin.DfpD128toD64.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpD128toD64.src_hi);
+      vex_printf(",");
+      return;
+
+   case Pin_DfpI64StoD128:
+      vex_printf("%s ", showPPCFpOp(i->Pin.DfpI64StoD128.op));
+      ppHRegPPC(i->Pin.DfpI64StoD128.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.DfpI64StoD128.src);
+      vex_printf(",");
+      return;
+   case Pin_ExtractExpD128:
+      vex_printf("dxexq ");
+      ppHRegPPC(i->Pin.ExtractExpD128.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.ExtractExpD128.src_hi);
+      return;
+   case Pin_InsertExpD128:
+      vex_printf("diexq ");
+      ppHRegPPC(i->Pin.InsertExpD128.dst_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.InsertExpD128.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.InsertExpD128.srcR_hi);
+      return;
+   case Pin_Dfp64Cmp:
+      vex_printf("dcmpo %%cr1,");
+      ppHRegPPC(i->Pin.Dfp64Cmp.srcL);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp64Cmp.srcR);
+      vex_printf("; mfcr ");
+      ppHRegPPC(i->Pin.Dfp64Cmp.dst);
+      vex_printf("; rlwinm ");
+      ppHRegPPC(i->Pin.Dfp64Cmp.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp64Cmp.dst);
+      vex_printf(",8,28,31");
+      return;
+   case Pin_Dfp128Cmp:
+      vex_printf("dcmpoq %%cr1,");
+      ppHRegPPC(i->Pin.Dfp128Cmp.srcL_hi);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp128Cmp.srcR_hi);
+      vex_printf("; mfcr ");
+      ppHRegPPC(i->Pin.Dfp128Cmp.dst);
+      vex_printf("; rlwinm ");
+      ppHRegPPC(i->Pin.Dfp128Cmp.dst);
+      vex_printf(",");
+      ppHRegPPC(i->Pin.Dfp128Cmp.dst);
+      vex_printf(",8,28,31");
+      return;
+   case Pin_EvCheck:
+      /* Note that the counter dec is 32 bit even in 64-bit mode. */
+      vex_printf("(evCheck) ");
+      vex_printf("lwz r30,");
+      ppPPCAMode(i->Pin.EvCheck.amCounter);
+      vex_printf("; addic. r30,r30,-1; ");
+      vex_printf("stw r30,");
+      ppPPCAMode(i->Pin.EvCheck.amCounter);
+      vex_printf("; bge nofail; lwz r30,");
+      ppPPCAMode(i->Pin.EvCheck.amFailAddr);
+      vex_printf("; mtctr r30; bctr; nofail:");
+      return;
+   case Pin_ProfInc:
+      if (mode64) {
+         vex_printf("(profInc) imm64-fixed5 r30,$NotKnownYet; ");
+         vex_printf("ld r29,(r30); addi r29,r29,1; std r29,(r30)");
+      } else {
+         vex_printf("(profInc) imm32-fixed2 r30,$NotKnownYet; ");
+         vex_printf("lwz r29,4(r30); addic. r29,r29,1; stw r29,4(r30)");
+         vex_printf("lwz r29,0(r30); addze r29,r29; stw r29,0(r30)");
+      }
+      break;
+   default:
+      vex_printf("\nppPPCInstr: No such tag(%d)\n", (Int)i->tag);
+      vpanic("ppPPCInstr");
+   }
+}
+
+/* --------- Helpers for register allocation. --------- */
+
+void getRegUsage_PPCInstr ( HRegUsage* u, const PPCInstr* i, Bool mode64 )
+{
+   initHRegUsage(u);
+   switch (i->tag) {
+   case Pin_LI:
+      addHRegUse(u, HRmWrite, i->Pin.LI.dst);
+      break;
+   case Pin_Alu:
+      addHRegUse(u, HRmRead,  i->Pin.Alu.srcL);
+      addRegUsage_PPCRH(u,    i->Pin.Alu.srcR);
+      addHRegUse(u, HRmWrite, i->Pin.Alu.dst);
+      return;
+   case Pin_Shft:
+      addHRegUse(u, HRmRead,  i->Pin.Shft.srcL);
+      addRegUsage_PPCRH(u,    i->Pin.Shft.srcR);
+      addHRegUse(u, HRmWrite, i->Pin.Shft.dst);
+      return;
+   case Pin_AddSubC:
+      addHRegUse(u, HRmWrite, i->Pin.AddSubC.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AddSubC.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AddSubC.srcR);
+      return;
+   case Pin_Cmp:
+      addHRegUse(u, HRmRead, i->Pin.Cmp.srcL);
+      addRegUsage_PPCRH(u,   i->Pin.Cmp.srcR);
+      return;
+   case Pin_Unary:
+      addHRegUse(u, HRmWrite, i->Pin.Unary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.Unary.src);
+      return;
+   case Pin_MulL:
+      addHRegUse(u, HRmWrite, i->Pin.MulL.dst);
+      addHRegUse(u, HRmRead,  i->Pin.MulL.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.MulL.srcR);
+      return;
+   case Pin_Div:
+      addHRegUse(u, HRmWrite, i->Pin.Div.dst);
+      addHRegUse(u, HRmRead,  i->Pin.Div.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.Div.srcR);
+      return;
+   case Pin_Call: {
+      UInt argir;
+      /* This is a bit subtle. */
+      /* First off, claim it trashes all the caller-saved regs
+         which fall within the register allocator's jurisdiction.
+         These I believe to be:
+         mode32: r3 to r12
+         mode64: r3 to r10
+      */
+      /* XXXXXXXXXXXXXXXXX BUG! This doesn't say anything about the FP
+         or Altivec registers.  We get away with this ONLY because
+         getAllocatableRegs_PPC gives the allocator callee-saved fp
+         and Altivec regs, and no caller-save ones. */
+      addHRegUse(u, HRmWrite, hregPPC_GPR3(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR4(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR5(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR6(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR7(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR8(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR9(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR10(mode64));
+      if (!mode64) {
+         addHRegUse(u, HRmWrite, hregPPC_GPR11(mode64));
+         addHRegUse(u, HRmWrite, hregPPC_GPR12(mode64));
+      }
+
+      /* Now we have to state any parameter-carrying registers
+         which might be read.  This depends on the argiregs field. */
+      argir = i->Pin.Call.argiregs;
+      if (argir &(1<<10)) addHRegUse(u, HRmRead, hregPPC_GPR10(mode64));
+      if (argir & (1<<9)) addHRegUse(u, HRmRead, hregPPC_GPR9(mode64));
+      if (argir & (1<<8)) addHRegUse(u, HRmRead, hregPPC_GPR8(mode64));
+      if (argir & (1<<7)) addHRegUse(u, HRmRead, hregPPC_GPR7(mode64));
+      if (argir & (1<<6)) addHRegUse(u, HRmRead, hregPPC_GPR6(mode64));
+      if (argir & (1<<5)) addHRegUse(u, HRmRead, hregPPC_GPR5(mode64));
+      if (argir & (1<<4)) addHRegUse(u, HRmRead, hregPPC_GPR4(mode64));
+      if (argir & (1<<3)) addHRegUse(u, HRmRead, hregPPC_GPR3(mode64));
+
+      vassert(0 == (argir & ~((1<<3)|(1<<4)|(1<<5)|(1<<6)
+                              |(1<<7)|(1<<8)|(1<<9)|(1<<10))));
+
+      /* Finally, there is the issue that the insn trashes a
+         register because the literal target address has to be
+         loaded into a register.  %r10 seems a suitable victim.
+         (Can't use %r0, as some insns interpret it as value zero). */
+      addHRegUse(u, HRmWrite, hregPPC_GPR10(mode64));
+      /* Upshot of this is that the assembler really must use %r10,
+         and no other, as a destination temporary. */
+      return;
+   }
+   /* XDirect/XIndir/XAssisted are also a bit subtle.  They
+      conditionally exit the block.  Hence we only need to list (1)
+      the registers that they read, and (2) the registers that they
+      write in the case where the block is not exited.  (2) is empty,
+      hence only (1) is relevant here. */
+   case Pin_XDirect:
+      addRegUsage_PPCAMode(u, i->Pin.XDirect.amCIA);
+      return;
+   case Pin_XIndir:
+      addHRegUse(u, HRmRead, i->Pin.XIndir.dstGA);
+      addRegUsage_PPCAMode(u, i->Pin.XIndir.amCIA);
+      return;
+   case Pin_XAssisted:
+      addHRegUse(u, HRmRead, i->Pin.XAssisted.dstGA);
+      addRegUsage_PPCAMode(u, i->Pin.XAssisted.amCIA);
+      return;
+   case Pin_CMov:
+      addRegUsage_PPCRI(u,  i->Pin.CMov.src);
+      addHRegUse(u, HRmWrite, i->Pin.CMov.dst);
+      return;
+   case Pin_Load:
+      addRegUsage_PPCAMode(u, i->Pin.Load.src);
+      addHRegUse(u, HRmWrite, i->Pin.Load.dst);
+      return;
+   case Pin_LoadL:
+      addHRegUse(u, HRmRead,  i->Pin.LoadL.src);
+      addHRegUse(u, HRmWrite, i->Pin.LoadL.dst);
+      return;
+   case Pin_Store:
+      addHRegUse(u, HRmRead,  i->Pin.Store.src);
+      addRegUsage_PPCAMode(u, i->Pin.Store.dst);
+      return;
+   case Pin_StoreC:
+      addHRegUse(u, HRmRead, i->Pin.StoreC.src);
+      addHRegUse(u, HRmRead, i->Pin.StoreC.dst);
+      return;
+   case Pin_Set:
+      addHRegUse(u, HRmWrite, i->Pin.Set.dst);
+      return;
+   case Pin_MfCR:
+      addHRegUse(u, HRmWrite, i->Pin.MfCR.dst);
+      return;
+   case Pin_MFence:
+      return;
+
+   case Pin_FpUnary:
+      addHRegUse(u, HRmWrite, i->Pin.FpUnary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.FpUnary.src);
+      return;
+   case Pin_FpBinary:
+      addHRegUse(u, HRmWrite, i->Pin.FpBinary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.FpBinary.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.FpBinary.srcR);
+      return;
+   case Pin_FpMulAcc:
+      addHRegUse(u, HRmWrite, i->Pin.FpMulAcc.dst);
+      addHRegUse(u, HRmRead,  i->Pin.FpMulAcc.srcML);
+      addHRegUse(u, HRmRead,  i->Pin.FpMulAcc.srcMR);
+      addHRegUse(u, HRmRead,  i->Pin.FpMulAcc.srcAcc);
+      return;
+   case Pin_FpLdSt:
+      addHRegUse(u, (i->Pin.FpLdSt.isLoad ? HRmWrite : HRmRead),
+                 i->Pin.FpLdSt.reg);
+      addRegUsage_PPCAMode(u, i->Pin.FpLdSt.addr);
+      return;
+   case Pin_FpSTFIW:
+      addHRegUse(u, HRmRead, i->Pin.FpSTFIW.addr);
+      addHRegUse(u, HRmRead, i->Pin.FpSTFIW.data);
+      return;
+   case Pin_FpRSP:
+      addHRegUse(u, HRmWrite, i->Pin.FpRSP.dst);
+      addHRegUse(u, HRmRead,  i->Pin.FpRSP.src);
+      return;
+   case Pin_FpCftI:
+      addHRegUse(u, HRmWrite, i->Pin.FpCftI.dst);
+      addHRegUse(u, HRmRead,  i->Pin.FpCftI.src);
+      return;
+   case Pin_FpCMov:
+      addHRegUse(u, HRmModify, i->Pin.FpCMov.dst);
+      addHRegUse(u, HRmRead,   i->Pin.FpCMov.src);
+      return;
+   case Pin_FpLdFPSCR:
+      addHRegUse(u, HRmRead, i->Pin.FpLdFPSCR.src);
+      return;
+   case Pin_FpCmp:
+      addHRegUse(u, HRmWrite, i->Pin.FpCmp.dst);
+      addHRegUse(u, HRmRead,  i->Pin.FpCmp.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.FpCmp.srcR);
+      return;
+
+   case Pin_RdWrLR:
+      addHRegUse(u, (i->Pin.RdWrLR.wrLR ? HRmRead : HRmWrite),
+                 i->Pin.RdWrLR.gpr);
+      return;
+
+   case Pin_AvLdSt:
+      addHRegUse(u, (i->Pin.AvLdSt.isLoad ? HRmWrite : HRmRead),
+                 i->Pin.AvLdSt.reg);
+      if (i->Pin.AvLdSt.addr->tag == Pam_IR)
+         addHRegUse(u, HRmWrite, hregPPC_GPR30(mode64));
+      addRegUsage_PPCAMode(u, i->Pin.AvLdSt.addr);
+      return;
+   case Pin_AvUnary:
+      addHRegUse(u, HRmWrite, i->Pin.AvUnary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvUnary.src);
+      return;
+   case Pin_AvBinary:
+      if (i->Pin.AvBinary.op == Pav_XOR
+          && sameHReg(i->Pin.AvBinary.dst, i->Pin.AvBinary.srcL)
+          && sameHReg(i->Pin.AvBinary.dst, i->Pin.AvBinary.srcR)) {
+         /* reg-alloc needs to understand 'xor r,r,r' as a write of r */
+         /* (as opposed to a rite of passage :-) */
+         addHRegUse(u, HRmWrite, i->Pin.AvBinary.dst);
+      } else {
+         addHRegUse(u, HRmWrite, i->Pin.AvBinary.dst);
+         addHRegUse(u, HRmRead,  i->Pin.AvBinary.srcL);
+         addHRegUse(u, HRmRead,  i->Pin.AvBinary.srcR);
+      }
+      return;
+   case Pin_AvBin8x16:
+      addHRegUse(u, HRmWrite, i->Pin.AvBin8x16.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin8x16.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin8x16.srcR);
+      return;
+   case Pin_AvBin16x8:
+      addHRegUse(u, HRmWrite, i->Pin.AvBin16x8.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin16x8.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin16x8.srcR);
+      return;
+   case Pin_AvBin32x4:
+      addHRegUse(u, HRmWrite, i->Pin.AvBin32x4.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin32x4.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin32x4.srcR);
+      return;
+   case Pin_AvBin64x2:
+      addHRegUse(u, HRmWrite, i->Pin.AvBin64x2.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin64x2.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin64x2.srcR);
+      return;
+   case Pin_AvBin32Fx4:
+      addHRegUse(u, HRmWrite, i->Pin.AvBin32Fx4.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin32Fx4.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvBin32Fx4.srcR);
+      if (i->Pin.AvBin32Fx4.op == Pavfp_MULF)
+         addHRegUse(u, HRmWrite, hregPPC_VR29(mode64));
+      return;
+   case Pin_AvUn32Fx4:
+      addHRegUse(u, HRmWrite, i->Pin.AvUn32Fx4.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvUn32Fx4.src);
+      return;
+   case Pin_AvPerm:
+      addHRegUse(u, HRmWrite, i->Pin.AvPerm.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvPerm.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvPerm.srcR);
+      addHRegUse(u, HRmRead,  i->Pin.AvPerm.ctl);
+      return;
+   case Pin_AvSel:
+      addHRegUse(u, HRmWrite, i->Pin.AvSel.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvSel.ctl);
+      addHRegUse(u, HRmRead,  i->Pin.AvSel.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvSel.srcR);
+      return;
+   case Pin_AvSh:
+      addHRegUse(u, HRmWrite, i->Pin.AvSh.dst);
+      if (i->Pin.AvSh.addr->tag == Pam_IR)
+         addHRegUse(u, HRmWrite, hregPPC_GPR30(mode64));
+      addRegUsage_PPCAMode(u, i->Pin.AvSh.addr);
+      return;
+   case Pin_AvShlDbl:
+      addHRegUse(u, HRmWrite, i->Pin.AvShlDbl.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvShlDbl.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvShlDbl.srcR);
+      return;
+   case Pin_AvSplat:
+      addHRegUse(u, HRmWrite, i->Pin.AvSplat.dst);
+      addRegUsage_PPCVI5s(u,  i->Pin.AvSplat.src);
+      return;
+   case Pin_AvCMov:
+      addHRegUse(u, HRmModify, i->Pin.AvCMov.dst);
+      addHRegUse(u, HRmRead,   i->Pin.AvCMov.src);
+      return;
+   case Pin_AvLdVSCR:
+      addHRegUse(u, HRmRead, i->Pin.AvLdVSCR.src);
+      return;
+   case Pin_AvCipherV128Unary:
+      addHRegUse(u, HRmWrite, i->Pin.AvCipherV128Unary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvCipherV128Unary.src);
+      return;
+   case Pin_AvCipherV128Binary:
+      addHRegUse(u, HRmWrite, i->Pin.AvCipherV128Binary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvCipherV128Binary.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.AvCipherV128Binary.srcR);
+      return;
+   case Pin_AvHashV128Binary:
+      addHRegUse(u, HRmWrite, i->Pin.AvHashV128Binary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvHashV128Binary.src);
+      addRegUsage_PPCRI(u,    i->Pin.AvHashV128Binary.s_field);
+      return;
+   case Pin_AvBCDV128Trinary:
+      addHRegUse(u, HRmWrite, i->Pin.AvBCDV128Trinary.dst);
+      addHRegUse(u, HRmRead,  i->Pin.AvBCDV128Trinary.src1);
+      addHRegUse(u, HRmRead,  i->Pin.AvBCDV128Trinary.src2);
+      addRegUsage_PPCRI(u,    i->Pin.AvBCDV128Trinary.ps);
+      return;
+   case Pin_Dfp64Unary:
+      addHRegUse(u, HRmWrite, i->Pin.Dfp64Unary.dst);
+      addHRegUse(u, HRmRead, i->Pin.Dfp64Unary.src);
+      return;
+   case Pin_Dfp64Binary:
+      addHRegUse(u, HRmWrite, i->Pin.Dfp64Binary.dst);
+      addHRegUse(u, HRmRead, i->Pin.Dfp64Binary.srcL);
+      addHRegUse(u, HRmRead, i->Pin.Dfp64Binary.srcR);
+      return;
+   case Pin_DfpShift:
+      addRegUsage_PPCRI(u,    i->Pin.DfpShift.shift);
+      addHRegUse(u, HRmWrite, i->Pin.DfpShift.src);
+      addHRegUse(u, HRmWrite, i->Pin.DfpShift.dst);
+      return;
+   case Pin_Dfp128Unary:
+      addHRegUse(u, HRmWrite, i->Pin.Dfp128Unary.dst_hi);
+      addHRegUse(u, HRmWrite, i->Pin.Dfp128Unary.dst_lo);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp128Unary.src_hi);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp128Unary.src_lo);
+      return;
+   case Pin_Dfp128Binary:
+      addHRegUse(u, HRmWrite, i->Pin.Dfp128Binary.dst_hi);
+      addHRegUse(u, HRmWrite, i->Pin.Dfp128Binary.dst_lo);
+      addHRegUse(u, HRmRead, i->Pin.Dfp128Binary.srcR_hi);
+      addHRegUse(u, HRmRead, i->Pin.Dfp128Binary.srcR_lo);
+      return;
+   case Pin_DfpRound:
+      addHRegUse(u, HRmWrite, i->Pin.DfpRound.dst);
+      addHRegUse(u, HRmRead,  i->Pin.DfpRound.src);
+      return;
+   case Pin_DfpRound128:
+      addHRegUse(u, HRmWrite, i->Pin.DfpRound128.dst_hi);
+      addHRegUse(u, HRmWrite, i->Pin.DfpRound128.dst_lo);
+      addHRegUse(u, HRmRead,  i->Pin.DfpRound128.src_hi);
+      addHRegUse(u, HRmRead,  i->Pin.DfpRound128.src_lo);
+      return;
+   case Pin_DfpQuantize:
+      addRegUsage_PPCRI(u,  i->Pin.DfpQuantize.rmc);
+      addHRegUse(u, HRmWrite, i->Pin.DfpQuantize.dst);
+      addHRegUse(u, HRmRead,  i->Pin.DfpQuantize.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.DfpQuantize.srcR);
+      return;
+   case Pin_DfpQuantize128:
+      addHRegUse(u, HRmWrite, i->Pin.DfpQuantize128.dst_hi);
+      addHRegUse(u, HRmWrite, i->Pin.DfpQuantize128.dst_lo);
+      addHRegUse(u, HRmRead,  i->Pin.DfpQuantize128.src_hi);
+      addHRegUse(u, HRmRead,  i->Pin.DfpQuantize128.src_lo);
+      return;
+   case Pin_DfpShift128:
+      addRegUsage_PPCRI(u,    i->Pin.DfpShift128.shift);
+      addHRegUse(u, HRmWrite, i->Pin.DfpShift128.src_hi);
+      addHRegUse(u, HRmWrite, i->Pin.DfpShift128.src_lo);
+      addHRegUse(u, HRmWrite, i->Pin.DfpShift128.dst_hi);
+      addHRegUse(u, HRmWrite, i->Pin.DfpShift128.dst_lo);
+      return;
+   case Pin_DfpD128toD64:
+      addHRegUse(u, HRmWrite, i->Pin.DfpD128toD64.src_hi);
+      addHRegUse(u, HRmWrite, i->Pin.DfpD128toD64.src_lo);
+      addHRegUse(u, HRmWrite, i->Pin.DfpD128toD64.dst);
+      return;
+   case Pin_DfpI64StoD128:
+      addHRegUse(u, HRmWrite, i->Pin.DfpI64StoD128.src);
+      addHRegUse(u, HRmWrite, i->Pin.DfpI64StoD128.dst_hi);
+      addHRegUse(u, HRmWrite, i->Pin.DfpI64StoD128.dst_lo);
+      return;
+   case Pin_ExtractExpD128:
+      addHRegUse(u, HRmWrite, i->Pin.ExtractExpD128.dst);
+      addHRegUse(u, HRmRead,  i->Pin.ExtractExpD128.src_hi);
+      addHRegUse(u, HRmRead,  i->Pin.ExtractExpD128.src_lo);
+      return;
+   case Pin_InsertExpD128:
+      addHRegUse(u, HRmWrite, i->Pin.InsertExpD128.dst_hi);
+      addHRegUse(u, HRmWrite, i->Pin.InsertExpD128.dst_lo);
+      addHRegUse(u, HRmRead,  i->Pin.InsertExpD128.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.InsertExpD128.srcR_hi);
+      addHRegUse(u, HRmRead,  i->Pin.InsertExpD128.srcR_lo);
+      return;
+   case Pin_Dfp64Cmp:
+      addHRegUse(u, HRmWrite, i->Pin.Dfp64Cmp.dst);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp64Cmp.srcL);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp64Cmp.srcR);
+      return;
+   case Pin_Dfp128Cmp:
+      addHRegUse(u, HRmWrite, i->Pin.Dfp128Cmp.dst);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp128Cmp.srcL_hi);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp128Cmp.srcL_lo);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp128Cmp.srcR_hi);
+      addHRegUse(u, HRmRead,  i->Pin.Dfp128Cmp.srcR_lo);
+      return;                                           
+   case Pin_EvCheck:
+      /* We expect both amodes only to mention the GSP (r31), so this
+         is in fact pointless, since GSP isn't allocatable, but
+         anyway.. */
+      addRegUsage_PPCAMode(u, i->Pin.EvCheck.amCounter);
+      addRegUsage_PPCAMode(u, i->Pin.EvCheck.amFailAddr);
+      addHRegUse(u, HRmWrite, hregPPC_GPR30(mode64)); /* also unavail to RA */
+      return;
+   case Pin_ProfInc:
+      addHRegUse(u, HRmWrite, hregPPC_GPR29(mode64));
+      addHRegUse(u, HRmWrite, hregPPC_GPR30(mode64));
+      return;
+   default:
+      ppPPCInstr(i, mode64);
+      vpanic("getRegUsage_PPCInstr");
+   }
+}
+
+/* local helper */
+static void mapReg( HRegRemap* m, HReg* r )
+{
+   *r = lookupHRegRemap(m, *r);
+}
+
+void mapRegs_PPCInstr ( HRegRemap* m, PPCInstr* i, Bool mode64 )
+{
+   switch (i->tag) {
+   case Pin_LI:
+      mapReg(m, &i->Pin.LI.dst);
+      return;
+   case Pin_Alu:
+      mapReg(m, &i->Pin.Alu.dst);
+      mapReg(m, &i->Pin.Alu.srcL);
+      mapRegs_PPCRH(m, i->Pin.Alu.srcR);
+      return;
+   case Pin_Shft:
+      mapReg(m, &i->Pin.Shft.dst);
+      mapReg(m, &i->Pin.Shft.srcL);
+      mapRegs_PPCRH(m, i->Pin.Shft.srcR);
+      return;
+   case Pin_AddSubC:
+      mapReg(m, &i->Pin.AddSubC.dst);
+      mapReg(m, &i->Pin.AddSubC.srcL);
+      mapReg(m, &i->Pin.AddSubC.srcR);
+      return;
+   case Pin_Cmp:
+      mapReg(m, &i->Pin.Cmp.srcL);
+      mapRegs_PPCRH(m, i->Pin.Cmp.srcR);
+      return;
+   case Pin_Unary:
+      mapReg(m, &i->Pin.Unary.dst);
+      mapReg(m, &i->Pin.Unary.src);
+      return;
+   case Pin_MulL:
+      mapReg(m, &i->Pin.MulL.dst);
+      mapReg(m, &i->Pin.MulL.srcL);
+      mapReg(m, &i->Pin.MulL.srcR);
+      return;
+   case Pin_Div:
+      mapReg(m, &i->Pin.Div.dst);
+      mapReg(m, &i->Pin.Div.srcL);
+      mapReg(m, &i->Pin.Div.srcR);
+      return;
+   case Pin_Call:
+      return;
+   case Pin_XDirect:
+      mapRegs_PPCAMode(m, i->Pin.XDirect.amCIA);
+      return;
+   case Pin_XIndir:
+      mapReg(m, &i->Pin.XIndir.dstGA);
+      mapRegs_PPCAMode(m, i->Pin.XIndir.amCIA);
+      return;
+   case Pin_XAssisted:
+      mapReg(m, &i->Pin.XAssisted.dstGA);
+      mapRegs_PPCAMode(m, i->Pin.XAssisted.amCIA);
+      return;
+   case Pin_CMov:
+      mapRegs_PPCRI(m, i->Pin.CMov.src);
+      mapReg(m, &i->Pin.CMov.dst);
+      return;
+   case Pin_Load:
+      mapRegs_PPCAMode(m, i->Pin.Load.src);
+      mapReg(m, &i->Pin.Load.dst);
+      return;
+   case Pin_LoadL:
+      mapReg(m, &i->Pin.LoadL.src);
+      mapReg(m, &i->Pin.LoadL.dst);
+      return;
+   case Pin_Store:
+      mapReg(m, &i->Pin.Store.src);
+      mapRegs_PPCAMode(m, i->Pin.Store.dst);
+      return;
+   case Pin_StoreC:
+      mapReg(m, &i->Pin.StoreC.src);
+      mapReg(m, &i->Pin.StoreC.dst);
+      return;
+   case Pin_Set:
+      mapReg(m, &i->Pin.Set.dst);
+      return;
+   case Pin_MfCR:
+      mapReg(m, &i->Pin.MfCR.dst);
+      return;
+   case Pin_MFence:
+      return;
+   case Pin_FpUnary:
+      mapReg(m, &i->Pin.FpUnary.dst);
+      mapReg(m, &i->Pin.FpUnary.src);
+      return;
+   case Pin_FpBinary:
+      mapReg(m, &i->Pin.FpBinary.dst);
+      mapReg(m, &i->Pin.FpBinary.srcL);
+      mapReg(m, &i->Pin.FpBinary.srcR);
+      return;
+   case Pin_FpMulAcc:
+      mapReg(m, &i->Pin.FpMulAcc.dst);
+      mapReg(m, &i->Pin.FpMulAcc.srcML);
+      mapReg(m, &i->Pin.FpMulAcc.srcMR);
+      mapReg(m, &i->Pin.FpMulAcc.srcAcc);
+      return;
+   case Pin_FpLdSt:
+      mapReg(m, &i->Pin.FpLdSt.reg);
+      mapRegs_PPCAMode(m, i->Pin.FpLdSt.addr);
+      return;
+   case Pin_FpSTFIW:
+      mapReg(m, &i->Pin.FpSTFIW.addr);
+      mapReg(m, &i->Pin.FpSTFIW.data);
+      return;
+   case Pin_FpRSP:
+      mapReg(m, &i->Pin.FpRSP.dst);
+      mapReg(m, &i->Pin.FpRSP.src);
+      return;
+   case Pin_FpCftI:
+      mapReg(m, &i->Pin.FpCftI.dst);
+      mapReg(m, &i->Pin.FpCftI.src);
+      return;
+   case Pin_FpCMov:
+      mapReg(m, &i->Pin.FpCMov.dst);
+      mapReg(m, &i->Pin.FpCMov.src);
+      return;
+   case Pin_FpLdFPSCR:
+      mapReg(m, &i->Pin.FpLdFPSCR.src);
+      return;
+   case Pin_FpCmp:
+      mapReg(m, &i->Pin.FpCmp.dst);
+      mapReg(m, &i->Pin.FpCmp.srcL);
+      mapReg(m, &i->Pin.FpCmp.srcR);
+      return;
+   case Pin_RdWrLR:
+      mapReg(m, &i->Pin.RdWrLR.gpr);
+      return;
+   case Pin_AvLdSt:
+      mapReg(m, &i->Pin.AvLdSt.reg);
+      mapRegs_PPCAMode(m, i->Pin.AvLdSt.addr);
+      return;
+   case Pin_AvUnary:
+      mapReg(m, &i->Pin.AvUnary.dst);
+      mapReg(m, &i->Pin.AvUnary.src);
+      return;
+   case Pin_AvBinary:
+      mapReg(m, &i->Pin.AvBinary.dst);
+      mapReg(m, &i->Pin.AvBinary.srcL);
+      mapReg(m, &i->Pin.AvBinary.srcR);
+      return;
+   case Pin_AvBin8x16:
+      mapReg(m, &i->Pin.AvBin8x16.dst);
+      mapReg(m, &i->Pin.AvBin8x16.srcL);
+      mapReg(m, &i->Pin.AvBin8x16.srcR);
+      return;
+   case Pin_AvBin16x8:
+      mapReg(m, &i->Pin.AvBin16x8.dst);
+      mapReg(m, &i->Pin.AvBin16x8.srcL);
+      mapReg(m, &i->Pin.AvBin16x8.srcR);
+      return;
+   case Pin_AvBin32x4:
+      mapReg(m, &i->Pin.AvBin32x4.dst);
+      mapReg(m, &i->Pin.AvBin32x4.srcL);
+      mapReg(m, &i->Pin.AvBin32x4.srcR);
+      return;
+   case Pin_AvBin64x2:
+      mapReg(m, &i->Pin.AvBin64x2.dst);
+      mapReg(m, &i->Pin.AvBin64x2.srcL);
+      mapReg(m, &i->Pin.AvBin64x2.srcR);
+      return;
+   case Pin_AvBin32Fx4:
+      mapReg(m, &i->Pin.AvBin32Fx4.dst);
+      mapReg(m, &i->Pin.AvBin32Fx4.srcL);
+      mapReg(m, &i->Pin.AvBin32Fx4.srcR);
+      return;
+   case Pin_AvUn32Fx4:
+      mapReg(m, &i->Pin.AvUn32Fx4.dst);
+      mapReg(m, &i->Pin.AvUn32Fx4.src);
+      return;
+   case Pin_AvPerm:
+      mapReg(m, &i->Pin.AvPerm.dst);
+      mapReg(m, &i->Pin.AvPerm.srcL);
+      mapReg(m, &i->Pin.AvPerm.srcR);
+      mapReg(m, &i->Pin.AvPerm.ctl);
+      return;
+   case Pin_AvSel:
+      mapReg(m, &i->Pin.AvSel.dst);
+      mapReg(m, &i->Pin.AvSel.srcL);
+      mapReg(m, &i->Pin.AvSel.srcR);
+      mapReg(m, &i->Pin.AvSel.ctl);
+      return;
+   case Pin_AvSh:
+      mapReg(m, &i->Pin.AvSh.dst);
+      mapRegs_PPCAMode(m, i->Pin.AvSh.addr);
+      return;
+   case Pin_AvShlDbl:
+      mapReg(m, &i->Pin.AvShlDbl.dst);
+      mapReg(m, &i->Pin.AvShlDbl.srcL);
+      mapReg(m, &i->Pin.AvShlDbl.srcR);
+      return;
+   case Pin_AvSplat:
+      mapReg(m, &i->Pin.AvSplat.dst);
+      mapRegs_PPCVI5s(m, i->Pin.AvSplat.src);
+      return;
+   case Pin_AvCMov:
+     mapReg(m, &i->Pin.AvCMov.dst);
+     mapReg(m, &i->Pin.AvCMov.src);
+     return;
+   case Pin_AvLdVSCR:
+      mapReg(m, &i->Pin.AvLdVSCR.src);
+      return;
+   case Pin_AvCipherV128Unary:
+      mapReg(m, &i->Pin.AvCipherV128Unary.dst);
+      mapReg(m, &i->Pin.AvCipherV128Unary.src);
+      return;
+   case Pin_AvCipherV128Binary:
+      mapReg(m, &i->Pin.AvCipherV128Binary.dst);
+      mapReg(m, &i->Pin.AvCipherV128Binary.srcL);
+      mapReg(m, &i->Pin.AvCipherV128Binary.srcR);
+      return;
+   case Pin_AvHashV128Binary:
+      mapRegs_PPCRI(m, i->Pin.AvHashV128Binary.s_field);
+      mapReg(m, &i->Pin.AvHashV128Binary.dst);
+      mapReg(m, &i->Pin.AvHashV128Binary.src);
+      return;
+   case Pin_AvBCDV128Trinary:
+      mapReg(m, &i->Pin.AvBCDV128Trinary.dst);
+      mapReg(m, &i->Pin.AvBCDV128Trinary.src1);
+      mapReg(m, &i->Pin.AvBCDV128Trinary.src2);
+      mapRegs_PPCRI(m, i->Pin.AvBCDV128Trinary.ps);
+      return;
+   case Pin_Dfp64Unary:
+      mapReg(m, &i->Pin.Dfp64Unary.dst);
+      mapReg(m, &i->Pin.Dfp64Unary.src);
+      return;
+   case Pin_Dfp64Binary:
+      mapReg(m, &i->Pin.Dfp64Binary.dst);
+      mapReg(m, &i->Pin.Dfp64Binary.srcL);
+      mapReg(m, &i->Pin.Dfp64Binary.srcR);
+      return;
+   case Pin_DfpShift:
+      mapRegs_PPCRI(m, i->Pin.DfpShift.shift);
+      mapReg(m, &i->Pin.DfpShift.src);
+      mapReg(m, &i->Pin.DfpShift.dst);
+      return;
+   case Pin_Dfp128Unary:
+      mapReg(m, &i->Pin.Dfp128Unary.dst_hi);
+      mapReg(m, &i->Pin.Dfp128Unary.dst_lo);
+      mapReg(m, &i->Pin.Dfp128Unary.src_hi);
+      mapReg(m, &i->Pin.Dfp128Unary.src_lo);
+     return;
+   case Pin_Dfp128Binary:
+      mapReg(m, &i->Pin.Dfp128Binary.dst_hi);
+      mapReg(m, &i->Pin.Dfp128Binary.dst_lo);
+      mapReg(m, &i->Pin.Dfp128Binary.srcR_hi);
+      mapReg(m, &i->Pin.Dfp128Binary.srcR_lo);
+      return;
+   case Pin_DfpShift128:
+      mapRegs_PPCRI(m, i->Pin.DfpShift128.shift);
+      mapReg(m, &i->Pin.DfpShift128.src_hi);
+      mapReg(m, &i->Pin.DfpShift128.src_lo);
+      mapReg(m, &i->Pin.DfpShift128.dst_hi);
+      mapReg(m, &i->Pin.DfpShift128.dst_lo);
+      return;
+   case Pin_DfpRound:
+      mapReg(m, &i->Pin.DfpRound.dst);
+      mapReg(m, &i->Pin.DfpRound.src);
+      return;
+   case Pin_DfpRound128:
+      mapReg(m, &i->Pin.DfpRound128.dst_hi);
+      mapReg(m, &i->Pin.DfpRound128.dst_lo);
+      mapReg(m, &i->Pin.DfpRound128.src_hi);
+      mapReg(m, &i->Pin.DfpRound128.src_lo);
+      return;
+   case Pin_DfpQuantize:
+      mapRegs_PPCRI(m, i->Pin.DfpQuantize.rmc);
+      mapReg(m, &i->Pin.DfpQuantize.dst);
+      mapReg(m, &i->Pin.DfpQuantize.srcL);
+      mapReg(m, &i->Pin.DfpQuantize.srcR);
+      return;
+   case Pin_DfpQuantize128:
+      mapRegs_PPCRI(m, i->Pin.DfpQuantize128.rmc);
+      mapReg(m, &i->Pin.DfpQuantize128.dst_hi);
+      mapReg(m, &i->Pin.DfpQuantize128.dst_lo);
+      mapReg(m, &i->Pin.DfpQuantize128.src_hi);
+      mapReg(m, &i->Pin.DfpQuantize128.src_lo);
+      return;
+   case Pin_DfpD128toD64:
+      mapReg(m, &i->Pin.DfpD128toD64.src_hi);
+      mapReg(m, &i->Pin.DfpD128toD64.src_lo);
+      mapReg(m, &i->Pin.DfpD128toD64.dst);
+      return;
+   case Pin_DfpI64StoD128:
+      mapReg(m, &i->Pin.DfpI64StoD128.src);
+      mapReg(m, &i->Pin.DfpI64StoD128.dst_hi);
+      mapReg(m, &i->Pin.DfpI64StoD128.dst_lo);
+      return;
+   case Pin_ExtractExpD128:
+      mapReg(m, &i->Pin.ExtractExpD128.dst);
+      mapReg(m, &i->Pin.ExtractExpD128.src_hi);
+      mapReg(m, &i->Pin.ExtractExpD128.src_lo);
+      return;
+   case Pin_InsertExpD128:
+      mapReg(m, &i->Pin.InsertExpD128.dst_hi);
+      mapReg(m, &i->Pin.InsertExpD128.dst_lo);
+      mapReg(m, &i->Pin.InsertExpD128.srcL);
+      mapReg(m, &i->Pin.InsertExpD128.srcR_hi);
+      mapReg(m, &i->Pin.InsertExpD128.srcR_lo);
+      return;
+   case Pin_Dfp64Cmp:
+      mapReg(m, &i->Pin.Dfp64Cmp.dst);
+      mapReg(m, &i->Pin.Dfp64Cmp.srcL);
+      mapReg(m, &i->Pin.Dfp64Cmp.srcR);
+      return;
+   case Pin_Dfp128Cmp:
+      mapReg(m, &i->Pin.Dfp128Cmp.dst);
+      mapReg(m, &i->Pin.Dfp128Cmp.srcL_hi);
+      mapReg(m, &i->Pin.Dfp128Cmp.srcL_lo);
+      mapReg(m, &i->Pin.Dfp128Cmp.srcR_hi);
+      mapReg(m, &i->Pin.Dfp128Cmp.srcR_lo);
+      return;
+   case Pin_EvCheck:
+      /* We expect both amodes only to mention the GSP (r31), so this
+         is in fact pointless, since GSP isn't allocatable, but
+         anyway.. */
+      mapRegs_PPCAMode(m, i->Pin.EvCheck.amCounter);
+      mapRegs_PPCAMode(m, i->Pin.EvCheck.amFailAddr);
+      return;
+   case Pin_ProfInc:
+      /* hardwires r29 and r30 -- nothing to modify. */
+      return;
+   default:
+      ppPPCInstr(i, mode64);
+      vpanic("mapRegs_PPCInstr");
+   }
+}
+
+/* Figure out if i represents a reg-reg move, and if so assign the
+   source and destination to *src and *dst.  If in doubt say No.  Used
+   by the register allocator to do move coalescing. 
+*/
+Bool isMove_PPCInstr ( const PPCInstr* i, HReg* src, HReg* dst )
+{
+   /* Moves between integer regs */
+   if (i->tag == Pin_Alu) {
+      // or Rd,Rs,Rs == mr Rd,Rs
+      if (i->Pin.Alu.op != Palu_OR)
+         return False;
+      if (i->Pin.Alu.srcR->tag != Prh_Reg)
+         return False;
+      if (! sameHReg(i->Pin.Alu.srcR->Prh.Reg.reg, i->Pin.Alu.srcL))
+         return False;
+      *src = i->Pin.Alu.srcL;
+      *dst = i->Pin.Alu.dst;
+      return True;
+   }
+   /* Moves between FP regs */
+   if (i->tag == Pin_FpUnary) {
+      if (i->Pin.FpUnary.op != Pfp_MOV)
+         return False;
+      *src = i->Pin.FpUnary.src;
+      *dst = i->Pin.FpUnary.dst;
+      return True;
+   }
+   return False;
+}
+
+
+/* Generate ppc spill/reload instructions under the direction of the
+   register allocator.  Note it's critical these don't write the
+   condition codes. */
+
+void genSpill_PPC ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                    HReg rreg, Int offsetB, Bool mode64 )
+{
+   PPCAMode* am;
+   vassert(!hregIsVirtual(rreg));
+   *i1 = *i2 = NULL;
+   am = PPCAMode_IR( offsetB, GuestStatePtr(mode64) );
+   switch (hregClass(rreg)) {
+      case HRcInt64:
+         vassert(mode64);
+         *i1 = PPCInstr_Store( 8, am, rreg, mode64 );
+         return;
+      case HRcInt32:
+         vassert(!mode64);
+         *i1 = PPCInstr_Store( 4, am, rreg, mode64 );
+         return;
+      case HRcFlt64:
+         *i1 = PPCInstr_FpLdSt ( False/*store*/, 8, rreg, am );
+         return;
+      case HRcVec128:
+         // XXX: GPR30 used as spill register to kludge AltiVec
+         // AMode_IR
+         *i1 = PPCInstr_AvLdSt ( False/*store*/, 16, rreg, am );
+         return;
+      default: 
+         ppHRegClass(hregClass(rreg));
+         vpanic("genSpill_PPC: unimplemented regclass");
+   }
+}
+
+void genReload_PPC ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                     HReg rreg, Int offsetB, Bool mode64 )
+{
+   PPCAMode* am;
+   vassert(!hregIsVirtual(rreg));
+   *i1 = *i2 = NULL;
+   am = PPCAMode_IR( offsetB, GuestStatePtr(mode64) );
+   switch (hregClass(rreg)) {
+      case HRcInt64:
+         vassert(mode64);
+         *i1 = PPCInstr_Load( 8, rreg, am, mode64 );
+         return;
+      case HRcInt32:
+         vassert(!mode64);
+         *i1 = PPCInstr_Load( 4, rreg, am, mode64 );
+         return;
+      case HRcFlt64:
+         *i1 = PPCInstr_FpLdSt ( True/*load*/, 8, rreg, am );
+         return;
+      case HRcVec128:
+         // XXX: GPR30 used as spill register to kludge AltiVec AMode_IR
+         *i1 = PPCInstr_AvLdSt ( True/*load*/, 16, rreg, am );
+         return;
+      default: 
+         ppHRegClass(hregClass(rreg));
+         vpanic("genReload_PPC: unimplemented regclass");
+   }
+}
+
+
+/* --------- The ppc assembler (bleh.) --------- */
+
+inline static UInt iregEnc ( HReg r, Bool mode64 )
+{
+   UInt n;
+   vassert(hregClass(r) == (mode64 ? HRcInt64 : HRcInt32));
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 32);
+   return n;
+}
+
+inline static UInt fregEnc ( HReg fr )
+{
+   UInt n;
+   vassert(hregClass(fr) == HRcFlt64);
+   vassert(!hregIsVirtual(fr));
+   n = hregEncoding(fr);
+   vassert(n <= 32);
+   return n;
+}
+
+inline static UInt vregEnc ( HReg v )
+{
+   UInt n;
+   vassert(hregClass(v) == HRcVec128);
+   vassert(!hregIsVirtual(v));
+   n = hregEncoding(v);
+   vassert(n <= 32);
+   return n;
+}
+
+/* Emit an instruction ppc-endianly */
+static UChar* emit32 ( UChar* p, UInt w32, VexEndness endness_host )
+{
+  if (endness_host == VexEndnessBE) {
+    *p++ = toUChar((w32 >> 24) & 0x000000FF);
+    *p++ = toUChar((w32 >> 16) & 0x000000FF);
+    *p++ = toUChar((w32 >>  8) & 0x000000FF);
+    *p++ = toUChar((w32)       & 0x000000FF);
+  } else {
+    *p++ = toUChar((w32)       & 0x000000FF);
+    *p++ = toUChar((w32 >>  8) & 0x000000FF);
+    *p++ = toUChar((w32 >> 16) & 0x000000FF);
+    *p++ = toUChar((w32 >> 24) & 0x000000FF);
+  }
+   return p;
+}
+
+/* Fetch an instruction ppc-endianly */
+static UInt fetch32 ( UChar* p, VexEndness endness_host )
+{
+   UInt w32 = 0;
+   if (endness_host == VexEndnessBE) {
+      w32 |= ((0xFF & (UInt)p[0]) << 24);
+      w32 |= ((0xFF & (UInt)p[1]) << 16);
+      w32 |= ((0xFF & (UInt)p[2]) <<  8);
+      w32 |= ((0xFF & (UInt)p[3]) <<  0);
+  } else {
+      w32 |= ((0xFF & (UInt)p[3]) << 24);
+      w32 |= ((0xFF & (UInt)p[2]) << 16);
+      w32 |= ((0xFF & (UInt)p[1]) <<  8);
+      w32 |= ((0xFF & (UInt)p[0]) <<  0);
+  }
+   return w32;
+}
+
+/* The following mkForm[...] functions refer to ppc instruction forms
+   as per PPC32 p576
+ */
+
+static UChar* mkFormD ( UChar* p, UInt opc1,
+                        UInt r1, UInt r2, UInt imm, VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   imm = imm & 0xFFFF;
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (imm));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormMD ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                         UInt imm1, UInt imm2, UInt opc2,
+                         VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(imm1 < 0x40);
+   vassert(imm2 < 0x40);
+   vassert(opc2 < 0x08);
+   imm2 = ((imm2 & 0x1F) << 1) | (imm2 >> 5);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               ((imm1 & 0x1F)<<11) | (imm2<<5) |
+               (opc2<<2) | ((imm1 >> 5)<<1));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormX ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                        UInt r3, UInt opc2, UInt b0, VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(opc2 < 0x400);
+   vassert(b0   < 0x2);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               (r3<<11) | (opc2<<1) | (b0));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormXO ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                         UInt r3, UInt b10, UInt opc2, UInt b0,
+                         VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(b10  < 0x2);
+   vassert(opc2 < 0x200);
+   vassert(b0   < 0x2);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               (r3<<11) | (b10 << 10) | (opc2<<1) | (b0));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormXL ( UChar* p, UInt opc1, UInt f1, UInt f2,
+                         UInt f3, UInt opc2, UInt b0, VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(f1   < 0x20);
+   vassert(f2   < 0x20);
+   vassert(f3   < 0x20);
+   vassert(opc2 < 0x400);
+   vassert(b0   < 0x2);
+   theInstr = ((opc1<<26) | (f1<<21) | (f2<<16) |
+               (f3<<11) | (opc2<<1) | (b0));
+   return emit32(p, theInstr, endness_host);
+}
+
+// Note: for split field ops, give mnemonic arg
+static UChar* mkFormXFX ( UChar* p, UInt r1, UInt f2, UInt opc2,
+                          VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(r1   < 0x20);
+   vassert(f2   < 0x20);
+   vassert(opc2 < 0x400);
+   switch (opc2) {
+   case 144:  // mtcrf
+      vassert(f2 < 0x100);
+      f2 = f2 << 1;
+      break;
+   case 339:  // mfspr
+   case 371:  // mftb
+   case 467:  // mtspr
+      vassert(f2 < 0x400);
+      // re-arrange split field
+      f2 = ((f2>>5) & 0x1F) | ((f2 & 0x1F)<<5);
+      break;
+   default: vpanic("mkFormXFX(ppch)");
+   }
+   theInstr = ((31<<26) | (r1<<21) | (f2<<11) | (opc2<<1));
+   return emit32(p, theInstr, endness_host);
+}
+
+// Only used by mtfsf
+static UChar* mkFormXFL ( UChar* p, UInt FM, UInt freg, UInt dfp_rm,
+                          VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(FM   < 0x100);
+   vassert(freg < 0x20);
+   theInstr = ((63<<26) | (FM<<17) | (dfp_rm<<16) | (freg<<11) | (711<<1));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormXS ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                         UInt imm, UInt opc2, UInt b0,
+                         VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(imm  < 0x40);
+   vassert(opc2 < 0x400);
+   vassert(b0   < 0x2);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               ((imm & 0x1F)<<11) | (opc2<<2) | ((imm>>5)<<1) | (b0));
+   return emit32(p, theInstr, endness_host);
+}
+
+
+#if 0
+// 'b'
+static UChar* mkFormI ( UChar* p, UInt LI, UInt AA, UInt LK,
+                        VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(LI  < 0x1000000);
+   vassert(AA  < 0x2);
+   vassert(LK  < 0x2);
+   theInstr = ((18<<26) | (LI<<2) | (AA<<1) | (LK));
+   return emit32(p, theInstr, endness_host);
+}
+#endif
+
+// 'bc'
+static UChar* mkFormB ( UChar* p, UInt BO, UInt BI,
+                        UInt BD, UInt AA, UInt LK, VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(BO  < 0x20);
+   vassert(BI  < 0x20);
+   vassert(BD  < 0x4000);
+   vassert(AA  < 0x2);
+   vassert(LK  < 0x2);
+   theInstr = ((16<<26) | (BO<<21) | (BI<<16) |
+               (BD<<2) | (AA<<1) | (LK));
+   return emit32(p, theInstr, endness_host);
+}
+
+// rotates
+static UChar* mkFormM ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                        UInt f3, UInt MB, UInt ME, UInt Rc,
+                        VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(f3   < 0x20);
+   vassert(MB   < 0x20);
+   vassert(ME   < 0x20);
+   vassert(Rc   < 0x2);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               (f3<<11) | (MB<<6) | (ME<<1) | (Rc));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormA ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                        UInt r3, UInt r4, UInt opc2, UInt b0,
+                        VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(r4   < 0x20);
+   vassert(opc2 < 0x20);
+   vassert(b0   < 0x2 );
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) |
+               (r4<<6) | (opc2<<1) | (b0));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormZ22 ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                          UInt constant, UInt opc2, UInt b0,
+                          VexEndness endness_host)
+{
+   UInt theInstr;
+   vassert(opc1     < 0x40);
+   vassert(r1       < 0x20);
+   vassert(r2       < 0x20);
+   vassert(constant < 0x40);   /* 6 bit constant */
+   vassert(opc2     < 0x200);  /* 9 bit field */
+   vassert(b0       < 0x2);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               (constant<<10) | (opc2<<1) | (b0));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormZ23 ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                          UInt r3, UInt rmc, UInt opc2, UInt b0,
+                          VexEndness endness_host)
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(rmc  < 0x4);
+   vassert(opc2 < 0x100);
+   vassert(b0   < 0x2);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               (r3<<11) | (rmc<<9) | (opc2<<1) | (b0));
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* doAMode_IR ( UChar* p, UInt opc1, UInt rSD,
+                           PPCAMode* am, Bool mode64, VexEndness endness_host )
+{
+   UInt rA, idx;
+   vassert(am->tag == Pam_IR);
+   vassert(am->Pam.IR.index < 0x10000);
+
+   rA  = iregEnc(am->Pam.IR.base, mode64);
+   idx = am->Pam.IR.index;
+
+   if (opc1 == 58 || opc1 == 62) { // ld/std: mode64 only
+      vassert(mode64);
+      /* stay sane with DS form: lowest 2 bits must be 00.  This
+         should be guaranteed to us by iselWordExpr_AMode. */
+      vassert(0 == (idx & 3));
+   }
+   p = mkFormD(p, opc1, rSD, rA, idx, endness_host);
+   return p;
+}
+
+static UChar* doAMode_RR ( UChar* p, UInt opc1, UInt opc2,
+                           UInt rSD, PPCAMode* am, Bool mode64,
+                           VexEndness endness_host )
+{
+   UInt rA, rB;
+   vassert(am->tag == Pam_RR);
+
+   rA  = iregEnc(am->Pam.RR.base, mode64);
+   rB  = iregEnc(am->Pam.RR.index, mode64);
+   
+   p = mkFormX(p, opc1, rSD, rA, rB, opc2, 0, endness_host);
+   return p;
+}
+
+
+/* Load imm to r_dst */
+static UChar* mkLoadImm ( UChar* p, UInt r_dst, ULong imm, Bool mode64,
+                          VexEndness endness_host )
+{
+   vassert(r_dst < 0x20);
+
+   if (!mode64) {
+      /* In 32-bit mode, make sure the top 32 bits of imm are a sign
+         extension of the bottom 32 bits, so that the range tests
+         below work correctly. */
+      UInt u32 = (UInt)imm;
+      Int  s32 = (Int)u32;
+      Long s64 = (Long)s32;
+      imm = (ULong)s64;
+   }
+
+   if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
+      // sign-extendable from 16 bits
+
+      // addi r_dst,0,imm  => li r_dst,imm
+      p = mkFormD(p, 14, r_dst, 0, imm & 0xFFFF, endness_host);
+   } else {
+      if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
+         // sign-extendable from 32 bits
+
+         // addis r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
+         p = mkFormD(p, 15, r_dst, 0, (imm>>16) & 0xFFFF, endness_host);
+         // ori r_dst, r_dst, (imm & 0xFFFF)
+         p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF, endness_host);
+      } else {
+         // full 64bit immediate load: 5 (five!) insns.
+         vassert(mode64);
+
+         // load high word
+
+         // lis r_dst, (imm>>48) & 0xFFFF
+         p = mkFormD(p, 15, r_dst, 0, (imm>>48) & 0xFFFF, endness_host);
+
+         // ori r_dst, r_dst, (imm>>32) & 0xFFFF
+         if ((imm>>32) & 0xFFFF)
+	   p = mkFormD(p, 24, r_dst, r_dst, (imm>>32) & 0xFFFF, endness_host);
+         
+         // shift r_dst low word to high word => rldicr
+         p = mkFormMD(p, 30, r_dst, r_dst, 32, 31, 1, endness_host);
+
+         // load low word
+
+         // oris r_dst, r_dst, (imm>>16) & 0xFFFF
+         if ((imm>>16) & 0xFFFF)
+            p = mkFormD(p, 25, r_dst, r_dst, (imm>>16) & 0xFFFF, endness_host);
+
+         // ori r_dst, r_dst, (imm) & 0xFFFF
+         if (imm & 0xFFFF)
+            p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF, endness_host);
+      }
+   }
+   return p;
+}
+
+/* A simplified version of mkLoadImm that always generates 2 or 5
+   instructions (32 or 64 bits respectively) even if it could generate
+   fewer.  This is needed for generating fixed sized patchable
+   sequences. */
+static UChar* mkLoadImm_EXACTLY2or5 ( UChar* p,
+                                      UInt r_dst, ULong imm, Bool mode64,
+                                      VexEndness endness_host )
+{
+   vassert(r_dst < 0x20);
+
+   if (!mode64) {
+      /* In 32-bit mode, make sure the top 32 bits of imm are a sign
+         extension of the bottom 32 bits.  (Probably unnecessary.) */
+      UInt u32 = (UInt)imm;
+      Int  s32 = (Int)u32;
+      Long s64 = (Long)s32;
+      imm = (ULong)s64;
+   }
+
+   if (!mode64) {
+      // addis r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
+      p = mkFormD(p, 15, r_dst, 0, (imm>>16) & 0xFFFF, endness_host);
+      // ori r_dst, r_dst, (imm & 0xFFFF)
+      p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF, endness_host);
+
+   } else {
+      // full 64bit immediate load: 5 (five!) insns.
+
+      // load high word
+      // lis r_dst, (imm>>48) & 0xFFFF
+      p = mkFormD(p, 15, r_dst, 0, (imm>>48) & 0xFFFF, endness_host);
+
+      // ori r_dst, r_dst, (imm>>32) & 0xFFFF
+      p = mkFormD(p, 24, r_dst, r_dst, (imm>>32) & 0xFFFF, endness_host);
+         
+      // shift r_dst low word to high word => rldicr
+      p = mkFormMD(p, 30, r_dst, r_dst, 32, 31, 1, endness_host);
+
+      // load low word
+      // oris r_dst, r_dst, (imm>>16) & 0xFFFF
+      p = mkFormD(p, 25, r_dst, r_dst, (imm>>16) & 0xFFFF, endness_host);
+
+      // ori r_dst, r_dst, (imm) & 0xFFFF
+      p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF, endness_host);
+   }
+   return p;
+}
+
+/* Checks whether the sequence of bytes at p was indeed created
+   by mkLoadImm_EXACTLY2or5 with the given parameters. */
+static Bool isLoadImm_EXACTLY2or5 ( UChar* p_to_check,
+                                    UInt r_dst, ULong imm, Bool mode64,
+                                    VexEndness endness_host )
+{
+   vassert(r_dst < 0x20);
+
+   if (!mode64) {
+      /* In 32-bit mode, make sure the top 32 bits of imm are a sign
+         extension of the bottom 32 bits.  (Probably unnecessary.) */
+      UInt u32 = (UInt)imm;
+      Int  s32 = (Int)u32;
+      Long s64 = (Long)s32;
+      imm = (ULong)s64;
+   }
+
+   if (!mode64) {
+      UInt   expect[2] = { 0, 0 };
+      UChar* p         = (UChar*)&expect[0];
+      // addis r_dst,r0,(imm>>16) => lis r_dst, (imm>>16)
+      p = mkFormD(p, 15, r_dst, 0, (imm>>16) & 0xFFFF, endness_host);
+      // ori r_dst, r_dst, (imm & 0xFFFF)
+      p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF, endness_host);
+      vassert(p == (UChar*)&expect[2]);
+
+      return fetch32(p_to_check + 0, endness_host) == expect[0]
+             && fetch32(p_to_check + 4, endness_host) == expect[1];
+
+   } else {
+      UInt   expect[5] = { 0, 0, 0, 0, 0 };
+      UChar* p         = (UChar*)&expect[0];
+      // full 64bit immediate load: 5 (five!) insns.
+
+      // load high word
+      // lis r_dst, (imm>>48) & 0xFFFF
+      p = mkFormD(p, 15, r_dst, 0, (imm>>48) & 0xFFFF, endness_host);
+
+      // ori r_dst, r_dst, (imm>>32) & 0xFFFF
+      p = mkFormD(p, 24, r_dst, r_dst, (imm>>32) & 0xFFFF, endness_host);
+         
+      // shift r_dst low word to high word => rldicr
+      p = mkFormMD(p, 30, r_dst, r_dst, 32, 31, 1, endness_host);
+
+      // load low word
+      // oris r_dst, r_dst, (imm>>16) & 0xFFFF
+      p = mkFormD(p, 25, r_dst, r_dst, (imm>>16) & 0xFFFF, endness_host);
+
+      // ori r_dst, r_dst, (imm) & 0xFFFF
+      p = mkFormD(p, 24, r_dst, r_dst, imm & 0xFFFF, endness_host);
+
+      vassert(p == (UChar*)&expect[5]);
+
+      return fetch32(p_to_check + 0, endness_host) == expect[0]
+             && fetch32(p_to_check + 4,  endness_host) == expect[1]
+             && fetch32(p_to_check + 8,  endness_host) == expect[2]
+             && fetch32(p_to_check + 12, endness_host) == expect[3]
+             && fetch32(p_to_check + 16, endness_host) == expect[4];
+   }
+}
+
+
+/* Generate a machine-word sized load or store.  Simplified version of
+   the Pin_Load and Pin_Store cases below. */
+static UChar* do_load_or_store_machine_word ( 
+                 UChar* p, Bool isLoad,
+                 UInt reg, PPCAMode* am, Bool mode64, VexEndness endness_host )
+{
+   if (isLoad) {
+      UInt opc1, sz = mode64 ? 8 : 4;
+      switch (am->tag) {
+         case Pam_IR:
+            if (mode64) {
+               vassert(0 == (am->Pam.IR.index & 3));
+            }
+            switch (sz) {
+               case 4:  opc1 = 32; vassert(!mode64); break;
+               case 8:  opc1 = 58; vassert(mode64);  break;
+               default: vassert(0);
+            }
+            p = doAMode_IR(p, opc1, reg, am, mode64, endness_host);
+            break;
+         case Pam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+         default:
+            vassert(0);
+      }
+   } else /*store*/ {
+      UInt opc1, sz = mode64 ? 8 : 4;
+      switch (am->tag) {
+         case Pam_IR:
+            if (mode64) {
+               vassert(0 == (am->Pam.IR.index & 3));
+            }
+            switch (sz) {
+               case 4:  opc1 = 36; vassert(!mode64); break;
+               case 8:  opc1 = 62; vassert(mode64);  break;
+               default: vassert(0);
+            }
+            p = doAMode_IR(p, opc1, reg, am, mode64, endness_host);
+            break;
+         case Pam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+         default:
+            vassert(0);
+      }
+   }
+   return p;
+}
+
+/* Generate a 32-bit sized load or store.  Simplified version of
+   do_load_or_store_machine_word above. */
+static UChar* do_load_or_store_word32 ( 
+                 UChar* p, Bool isLoad,
+                 UInt reg, PPCAMode* am, Bool mode64, VexEndness endness_host )
+{
+   if (isLoad) {
+      UInt opc1;
+      switch (am->tag) {
+         case Pam_IR:
+            if (mode64) {
+               vassert(0 == (am->Pam.IR.index & 3));
+            }
+            opc1 = 32;
+            p = doAMode_IR(p, opc1, reg, am, mode64, endness_host);
+            break;
+         case Pam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+         default:
+            vassert(0);
+      }
+   } else /*store*/ {
+      UInt opc1;
+      switch (am->tag) {
+         case Pam_IR:
+            if (mode64) {
+               vassert(0 == (am->Pam.IR.index & 3));
+            }
+            opc1 = 36;
+            p = doAMode_IR(p, opc1, reg, am, mode64, endness_host);
+            break;
+         case Pam_RR:
+            /* we could handle this case, but we don't expect to ever
+               need to. */
+            vassert(0);
+         default:
+            vassert(0);
+      }
+   }
+   return p;
+}
+
+/* Move r_dst to r_src */
+static UChar* mkMoveReg ( UChar* p, UInt r_dst, UInt r_src,
+                          VexEndness endness_host )
+{
+   vassert(r_dst < 0x20);
+   vassert(r_src < 0x20);
+
+   if (r_dst != r_src) {
+      /* or r_dst, r_src, r_src */
+      p = mkFormX(p, 31, r_src, r_dst, r_src, 444, 0, endness_host );
+   }
+   return p;
+}
+
+static UChar* mkFormVX ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                         UInt r3, UInt opc2, VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(opc2 < 0x800);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | opc2);
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormVXI ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                          UInt r3, UInt opc2, VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(opc2 < 0x27);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (r3<<11) | opc2<<1);
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormVXR ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                          UInt r3, UInt Rc, UInt opc2,
+                          VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(Rc   < 0x2);
+   vassert(opc2 < 0x400);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               (r3<<11) | (Rc<<10) | opc2);
+   return emit32(p, theInstr, endness_host);
+}
+
+static UChar* mkFormVA ( UChar* p, UInt opc1, UInt r1, UInt r2,
+                         UInt r3, UInt r4, UInt opc2, VexEndness endness_host )
+{
+   UInt theInstr;
+   vassert(opc1 < 0x40);
+   vassert(r1   < 0x20);
+   vassert(r2   < 0x20);
+   vassert(r3   < 0x20);
+   vassert(r4   < 0x20);
+   vassert(opc2 < 0x40);
+   theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+               (r3<<11) | (r4<<6) | opc2);
+   return emit32(p, theInstr, endness_host);
+}
+
+
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code.  If the emitted
+   instruction was a profiler inc, set *is_profInc to True, else leave
+   it unchanged.
+*/
+Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
+                    UChar* buf, Int nbuf, const PPCInstr* i, 
+                    Bool mode64, VexEndness endness_host,
+                    const void* disp_cp_chain_me_to_slowEP,
+                    const void* disp_cp_chain_me_to_fastEP,
+                    const void* disp_cp_xindir,
+                    const void* disp_cp_xassisted)
+{
+   UChar* p = &buf[0];
+   vassert(nbuf >= 32);
+
+   if (0) {
+      vex_printf("asm  ");ppPPCInstr(i, mode64); vex_printf("\n");
+   }
+
+   switch (i->tag) {
+
+   case Pin_LI:
+      p = mkLoadImm(p, iregEnc(i->Pin.LI.dst, mode64),
+                    i->Pin.LI.imm64, mode64, endness_host);
+      goto done;
+
+   case Pin_Alu: {
+      PPCRH* srcR   = i->Pin.Alu.srcR;
+      Bool   immR   = toBool(srcR->tag == Prh_Imm);
+      UInt   r_dst  = iregEnc(i->Pin.Alu.dst, mode64);
+      UInt   r_srcL = iregEnc(i->Pin.Alu.srcL, mode64);
+      UInt   r_srcR = immR ? (-1)/*bogus*/ :
+                             iregEnc(srcR->Prh.Reg.reg, mode64);
+
+      switch (i->Pin.Alu.op) {
+      case Palu_ADD:
+         if (immR) {
+            /* addi (PPC32 p350) */
+            vassert(srcR->Prh.Imm.syned);
+            vassert(srcR->Prh.Imm.imm16 != 0x8000);
+            p = mkFormD(p, 14, r_dst, r_srcL, srcR->Prh.Imm.imm16, endness_host);
+         } else {
+            /* add (PPC32 p347) */
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 266, 0, endness_host);
+         }
+         break;
+
+      case Palu_SUB:
+         if (immR) {
+            /* addi (PPC32 p350), but with negated imm */
+            vassert(srcR->Prh.Imm.syned);
+            vassert(srcR->Prh.Imm.imm16 != 0x8000);
+            p = mkFormD(p, 14, r_dst, r_srcL, (- srcR->Prh.Imm.imm16),
+                        endness_host);
+         } else {
+            /* subf (PPC32 p537), with args the "wrong" way round */
+            p = mkFormXO(p, 31, r_dst, r_srcR, r_srcL, 0, 40, 0, endness_host);
+         }
+         break;
+
+      case Palu_AND:
+         if (immR) {
+            /* andi. (PPC32 p358) */
+            vassert(!srcR->Prh.Imm.syned);
+            p = mkFormD(p, 28, r_srcL, r_dst, srcR->Prh.Imm.imm16, endness_host);
+         } else {
+            /* and (PPC32 p356) */
+            p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 28, 0, endness_host);
+         }
+         break;
+
+      case Palu_OR:
+         if (immR) {
+            /* ori (PPC32 p497) */
+            vassert(!srcR->Prh.Imm.syned);
+            p = mkFormD(p, 24, r_srcL, r_dst, srcR->Prh.Imm.imm16, endness_host);
+         } else {
+            /* or (PPC32 p495) */
+            p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 444, 0, endness_host);
+         }
+         break;
+
+      case Palu_XOR:
+         if (immR) {
+            /* xori (PPC32 p550) */
+            vassert(!srcR->Prh.Imm.syned);
+            p = mkFormD(p, 26, r_srcL, r_dst, srcR->Prh.Imm.imm16, endness_host);
+         } else {
+            /* xor (PPC32 p549) */
+            p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 316, 0, endness_host);
+         }
+         break;
+
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_Shft: {
+      PPCRH* srcR   = i->Pin.Shft.srcR;
+      Bool   sz32   = i->Pin.Shft.sz32;
+      Bool   immR   = toBool(srcR->tag == Prh_Imm);
+      UInt   r_dst  = iregEnc(i->Pin.Shft.dst, mode64);
+      UInt   r_srcL = iregEnc(i->Pin.Shft.srcL, mode64);
+      UInt   r_srcR = immR ? (-1)/*bogus*/ :
+                             iregEnc(srcR->Prh.Reg.reg, mode64);
+      if (!mode64)
+         vassert(sz32);
+
+      switch (i->Pin.Shft.op) {
+      case Pshft_SHL:
+         if (sz32) {
+            if (immR) {
+               /* rd = rs << n, 1 <= n <= 31
+                  is
+                  rlwinm rd,rs,n,0,31-n  (PPC32 p501)
+               */
+               UInt n = srcR->Prh.Imm.imm16;
+               vassert(!srcR->Prh.Imm.syned);
+               vassert(n > 0 && n < 32);
+               p = mkFormM(p, 21, r_srcL, r_dst, n, 0, 31-n, 0, endness_host);
+            } else {
+               /* slw (PPC32 p505) */
+               p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 24, 0, endness_host);
+            }
+         } else {
+            if (immR) {
+               /* rd = rs << n, 1 <= n <= 63
+                  is
+                  rldicr rd,rs,n,63-n  (PPC64 p559)
+               */
+               UInt n = srcR->Prh.Imm.imm16;
+               vassert(!srcR->Prh.Imm.syned);
+               vassert(n > 0 && n < 64);
+               p = mkFormMD(p, 30, r_srcL, r_dst, n, 63-n, 1, endness_host);
+            } else {
+               /* sld (PPC64 p568) */
+               p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 27, 0, endness_host);
+            }
+         }
+         break;
+
+      case Pshft_SHR:
+         if (sz32) {
+             if (immR) {
+               /* rd = rs >>u n, 1 <= n <= 31
+                  is
+                  rlwinm rd,rs,32-n,n,31  (PPC32 p501)
+               */
+               UInt n = srcR->Prh.Imm.imm16;
+               vassert(!srcR->Prh.Imm.syned);
+               vassert(n > 0 && n < 32);
+               p = mkFormM(p, 21, r_srcL, r_dst, 32-n, n, 31, 0, endness_host);
+            } else {
+               /* srw (PPC32 p508) */
+               p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 536, 0, endness_host);
+            }
+         } else {
+            if (immR) {
+               /* rd = rs >>u n, 1 <= n <= 63
+                  is
+                  rldicl rd,rs,64-n,n  (PPC64 p558)
+               */
+               UInt n = srcR->Prh.Imm.imm16;
+               vassert(!srcR->Prh.Imm.syned);
+               vassert(n > 0 && n < 64);
+               p = mkFormMD(p, 30, r_srcL, r_dst, 64-n, n, 0, endness_host);
+            } else {
+               /* srd (PPC64 p574) */
+               p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 539, 0, endness_host);
+            }
+         }
+         break;
+
+      case Pshft_SAR:
+         if (sz32) {
+            if (immR) {
+               /* srawi (PPC32 p507) */
+               UInt n = srcR->Prh.Imm.imm16;
+               vassert(!srcR->Prh.Imm.syned);
+               /* In 64-bit mode, we allow right shifts by zero bits
+                  as that is a handy way to sign extend the lower 32
+                  bits into the upper 32 bits. */
+               if (mode64)
+                  vassert(n >= 0 && n < 32);
+               else 
+                  vassert(n > 0 && n < 32);
+               p = mkFormX(p, 31, r_srcL, r_dst, n, 824, 0, endness_host);
+            } else {
+               /* sraw (PPC32 p506) */
+               p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 792, 0, endness_host);
+            }
+         } else {
+            if (immR) {
+               /* sradi (PPC64 p571) */
+               UInt n = srcR->Prh.Imm.imm16;
+               vassert(!srcR->Prh.Imm.syned);
+               vassert(n > 0 && n < 64);
+               p = mkFormXS(p, 31, r_srcL, r_dst, n, 413, 0, endness_host);
+            } else {
+               /* srad (PPC32 p570) */
+               p = mkFormX(p, 31, r_srcL, r_dst, r_srcR, 794, 0, endness_host);
+            }
+         }
+         break;
+
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_AddSubC: {
+      Bool isAdd  = i->Pin.AddSubC.isAdd;
+      Bool setC   = i->Pin.AddSubC.setC;
+      UInt r_srcL = iregEnc(i->Pin.AddSubC.srcL, mode64);
+      UInt r_srcR = iregEnc(i->Pin.AddSubC.srcR, mode64);
+      UInt r_dst  = iregEnc(i->Pin.AddSubC.dst, mode64);
+      
+      if (isAdd) {
+         if (setC) /* addc (PPC32 p348) */
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 10, 0, endness_host);
+         else          /* adde (PPC32 p349) */
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 138, 0, endness_host);
+      } else {
+         /* subfX, with args the "wrong" way round */
+         if (setC) /* subfc (PPC32 p538) */
+            p = mkFormXO(p, 31, r_dst, r_srcR, r_srcL, 0, 8, 0, endness_host);
+         else          /* subfe (PPC32 p539) */
+            p = mkFormXO(p, 31, r_dst, r_srcR, r_srcL, 0, 136, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_Cmp: {
+      Bool syned  = i->Pin.Cmp.syned;
+      Bool sz32   = i->Pin.Cmp.sz32;
+      UInt fld1   = i->Pin.Cmp.crfD << 2;
+      UInt r_srcL = iregEnc(i->Pin.Cmp.srcL, mode64);
+      UInt r_srcR, imm_srcR;
+      PPCRH* srcR = i->Pin.Cmp.srcR;
+
+      if (!mode64)        // cmp double word invalid for mode32
+         vassert(sz32);      
+      else if (!sz32)     // mode64 && cmp64: set L=1
+         fld1 |= 1;
+ 
+      switch (srcR->tag) {
+      case Prh_Imm:
+         vassert(syned == srcR->Prh.Imm.syned);
+         imm_srcR = srcR->Prh.Imm.imm16;
+         if (syned) {  // cmpw/di  (signed)   (PPC32 p368)
+            vassert(imm_srcR != 0x8000);
+            p = mkFormD(p, 11, fld1, r_srcL, imm_srcR, endness_host);
+         } else {      // cmplw/di (unsigned) (PPC32 p370)
+            p = mkFormD(p, 10, fld1, r_srcL, imm_srcR, endness_host);
+         }
+         break;
+      case Prh_Reg:
+         r_srcR = iregEnc(srcR->Prh.Reg.reg, mode64);
+         if (syned)  // cmpwi  (signed)   (PPC32 p367)
+            p = mkFormX(p, 31, fld1, r_srcL, r_srcR, 0, 0, endness_host);
+         else        // cmplwi (unsigned) (PPC32 p379)
+            p = mkFormX(p, 31, fld1, r_srcL, r_srcR, 32, 0, endness_host);
+         break;
+      default: 
+         goto bad;
+      }        
+      goto done;
+   }
+
+   case Pin_Unary: {
+      UInt r_dst = iregEnc(i->Pin.Unary.dst, mode64);
+      UInt r_src = iregEnc(i->Pin.Unary.src, mode64);
+
+      switch (i->Pin.Unary.op) {
+      case Pun_NOT:  // nor r_dst,r_src,r_src
+         p = mkFormX(p, 31, r_src, r_dst, r_src, 124, 0, endness_host);
+         break;
+      case Pun_NEG:  // neg r_dst,r_src
+         p = mkFormXO(p, 31, r_dst, r_src, 0, 0, 104, 0, endness_host);
+         break;
+      case Pun_CLZ32:  // cntlzw r_dst, r_src
+         p = mkFormX(p, 31, r_src, r_dst, 0, 26, 0, endness_host);
+         break;
+      case Pun_CLZ64:  // cntlzd r_dst, r_src
+         vassert(mode64);
+         p = mkFormX(p, 31, r_src, r_dst, 0, 58, 0, endness_host);
+         break;
+      case Pun_EXTSW:  // extsw r_dst, r_src
+         vassert(mode64);
+         p = mkFormX(p, 31, r_src, r_dst, 0, 986, 0, endness_host);
+         break;
+      default: goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_MulL: {
+      Bool syned  = i->Pin.MulL.syned;
+      Bool sz32   = i->Pin.MulL.sz32;
+      UInt r_dst  = iregEnc(i->Pin.MulL.dst, mode64);
+      UInt r_srcL = iregEnc(i->Pin.MulL.srcL, mode64);
+      UInt r_srcR = iregEnc(i->Pin.MulL.srcR, mode64);
+
+      if (!mode64)
+         vassert(sz32);
+
+      if (i->Pin.MulL.hi) {
+         // mul hi words, must consider sign
+         if (sz32) {
+            if (syned)  // mulhw r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 75, 0,
+                            endness_host);
+            else        // mulhwu r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 11, 0,
+                            endness_host);
+         } else {
+            if (syned)  // mulhd r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 73, 0,
+                            endness_host);
+            else        // mulhdu r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 9, 0, endness_host);
+         }
+      } else {
+         // mul low word, sign is irrelevant
+         vassert(!i->Pin.MulL.syned);
+         if (sz32)      // mullw r_dst,r_srcL,r_srcR
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 235, 0, endness_host);
+         else           // mulld r_dst,r_srcL,r_srcR
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 233, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_Div: {
+      Bool syned  = i->Pin.Div.syned;
+      Bool sz32   = i->Pin.Div.sz32;
+      UInt r_dst  = iregEnc(i->Pin.Div.dst, mode64);
+      UInt r_srcL = iregEnc(i->Pin.Div.srcL, mode64);
+      UInt r_srcR = iregEnc(i->Pin.Div.srcR, mode64);
+
+      if (!mode64)
+         vassert(sz32);
+
+      if (i->Pin.Div.extended) {
+         if (sz32) {
+            if (syned)
+               // divwe r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 427, 0,
+                            endness_host);
+            else
+               // divweu r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 395, 0,
+                            endness_host);
+         } else {
+            if (syned)
+               // divde r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 425, 0,
+                            endness_host);
+            else
+               // divdeu r_dst,r_srcL,r_srcR
+               p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 393, 0,
+                            endness_host);
+         }
+      } else if (sz32) {
+         if (syned)  // divw r_dst,r_srcL,r_srcR
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 491, 0, endness_host);
+         else        // divwu r_dst,r_srcL,r_srcR
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 459, 0, endness_host);
+      } else {
+         if (syned)  // divd r_dst,r_srcL,r_srcR
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 489, 0, endness_host);
+         else        // divdu r_dst,r_srcL,r_srcR
+            p = mkFormXO(p, 31, r_dst, r_srcL, r_srcR, 0, 457, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_Call: {
+      if (i->Pin.Call.cond.test != Pct_ALWAYS
+          && i->Pin.Call.rloc.pri != RLPri_None) {
+         /* The call might not happen (it isn't unconditional) and it
+            returns a result.  In this case we will need to generate a
+            control flow diamond to put 0x555..555 in the return
+            register(s) in the case where the call doesn't happen.  If
+            this ever becomes necessary, maybe copy code from the ARM
+            equivalent.  Until that day, just give up. */
+         goto bad;
+      }
+      PPCCondCode cond  = i->Pin.Call.cond;
+      UInt        r_dst = 10;
+      /* As per detailed comment for Pin_Call in
+         getRegUsage_PPCInstr above, %r10 is used as an address temp */
+
+      /* jump over the following insns if condition does not hold */
+      UChar* ptmp = NULL;
+      if (cond.test != Pct_ALWAYS) {
+         /* jmp fwds if !condition */
+         /* don't know how many bytes to jump over yet...
+            make space for a jump instruction and fill in later. */
+         ptmp = p; /* fill in this bit later */
+         p += 4;                                          // p += 4
+      }
+
+      /* load target to r_dst */                          // p += 4|8|20
+      p = mkLoadImm(p, r_dst, i->Pin.Call.target, mode64, endness_host);
+
+      /* mtspr 9,r_dst => move r_dst to count register */
+      p = mkFormXFX(p, r_dst, 9, 467, endness_host);               // p += 4
+      
+      /* bctrl => branch to count register (and save to lr) */
+      p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 1, endness_host); // p += 4
+
+      /* Fix up the conditional jump, if there was one. */
+      if (cond.test != Pct_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta >= 16 && delta <= 32);
+         /* bc !ct,cf,delta */
+         mkFormB(ptmp, invertCondTest(cond.test),
+                 cond.flag, (delta>>2), 0, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_XDirect: {
+      /* NB: what goes on here has to be very closely coordinated
+         with the chainXDirect_PPC and unchainXDirect_PPC below. */
+      /* We're generating chain-me requests here, so we need to be
+            sure this is actually allowed -- no-redir translations
+            can't use chain-me's.  Hence: */
+      vassert(disp_cp_chain_me_to_slowEP != NULL);
+      vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+      /* First off, if this is conditional, create a conditional jump
+         over the rest of it.  Or at least, leave a space for it that
+         we will shortly fill in. */
+      UChar* ptmp = NULL;
+      if (i->Pin.XDirect.cond.test != Pct_ALWAYS) {
+         vassert(i->Pin.XDirect.cond.flag != Pcf_NONE);
+         ptmp = p;
+         p += 4;
+      } else {
+         vassert(i->Pin.XDirect.cond.flag == Pcf_NONE);
+      }
+
+      /* Update the guest CIA. */
+      /* imm32/64 r30, dstGA */
+      if (!mode64) vassert(0 == (((ULong)i->Pin.XDirect.dstGA) >> 32));
+      p = mkLoadImm(p, /*r*/30, (ULong)i->Pin.XDirect.dstGA, mode64,
+                    endness_host);
+      /* stw/std r30, amCIA */
+      p = do_load_or_store_machine_word(
+             p, False/*!isLoad*/,
+             /*r*/30, i->Pin.XDirect.amCIA, mode64, endness_host
+          );
+
+      /* --- FIRST PATCHABLE BYTE follows --- */
+      /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're calling
+         to) backs up the return address, so as to find the address of
+         the first patchable byte.  So: don't change the number of
+         instructions (32-bit: 4, 64-bit: 7) below. */
+      /* imm32/64-fixed r30, VG_(disp_cp_chain_me_to_{slowEP,fastEP} */
+      const void* disp_cp_chain_me
+               = i->Pin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP 
+                                         : disp_cp_chain_me_to_slowEP;
+      p = mkLoadImm_EXACTLY2or5(
+             p, /*r*/30, (Addr)disp_cp_chain_me, mode64, endness_host);
+      /* mtctr r30 */
+      p = mkFormXFX(p, /*r*/30, 9, 467, endness_host);
+      /* bctrl */
+      p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 1, endness_host);
+      /* --- END of PATCHABLE BYTES --- */
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Pin.XDirect.cond.test != Pct_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta >= 16 && delta <= 64 && 0 == (delta & 3));
+         /* bc !ct,cf,delta */
+         mkFormB(ptmp, invertCondTest(i->Pin.XDirect.cond.test),
+                 i->Pin.XDirect.cond.flag, (delta>>2), 0, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_XIndir: {
+      /* We're generating transfers that could lead indirectly to a
+         chain-me, so we need to be sure this is actually allowed --
+         no-redir translations are not allowed to reach normal
+         translations without going through the scheduler.  That means
+         no XDirects or XIndirs out from no-redir translations.
+         Hence: */
+      vassert(disp_cp_xindir != NULL);
+
+      /* First off, if this is conditional, create a conditional jump
+         over the rest of it.  Or at least, leave a space for it that
+         we will shortly fill in. */
+      UChar* ptmp = NULL;
+      if (i->Pin.XIndir.cond.test != Pct_ALWAYS) {
+         vassert(i->Pin.XIndir.cond.flag != Pcf_NONE);
+         ptmp = p;
+         p += 4;
+      } else {
+         vassert(i->Pin.XIndir.cond.flag == Pcf_NONE);
+      }
+
+      /* Update the guest CIA. */
+      /* stw/std r-dstGA, amCIA */
+      p = do_load_or_store_machine_word(
+             p, False/*!isLoad*/,
+             iregEnc(i->Pin.XIndir.dstGA, mode64),
+             i->Pin.XIndir.amCIA, mode64, endness_host
+          );
+
+      /* imm32/64 r30, VG_(disp_cp_xindir) */
+      p = mkLoadImm(p, /*r*/30, (ULong)(Addr)disp_cp_xindir, mode64,
+                    endness_host);
+      /* mtctr r30 */
+      p = mkFormXFX(p, /*r*/30, 9, 467, endness_host);
+      /* bctr */
+      p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 0, endness_host);
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Pin.XIndir.cond.test != Pct_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta >= 16 && delta <= 32 && 0 == (delta & 3));
+         /* bc !ct,cf,delta */
+         mkFormB(ptmp, invertCondTest(i->Pin.XIndir.cond.test),
+                 i->Pin.XIndir.cond.flag, (delta>>2), 0, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_XAssisted: {
+      /* First off, if this is conditional, create a conditional jump
+         over the rest of it.  Or at least, leave a space for it that
+         we will shortly fill in. */
+      UChar* ptmp = NULL;
+      if (i->Pin.XAssisted.cond.test != Pct_ALWAYS) {
+         vassert(i->Pin.XAssisted.cond.flag != Pcf_NONE);
+         ptmp = p;
+         p += 4;
+      } else {
+         vassert(i->Pin.XAssisted.cond.flag == Pcf_NONE);
+      }
+
+      /* Update the guest CIA. */
+      /* stw/std r-dstGA, amCIA */
+      p = do_load_or_store_machine_word(
+             p, False/*!isLoad*/,
+             iregEnc(i->Pin.XIndir.dstGA, mode64),
+             i->Pin.XIndir.amCIA, mode64, endness_host
+          );
+
+      /* imm32/64 r31, $magic_number */
+      UInt trcval = 0;
+      switch (i->Pin.XAssisted.jk) {
+         case Ijk_ClientReq:   trcval = VEX_TRC_JMP_CLIENTREQ;   break;
+         case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
+         //case Ijk_Sys_int128:  trcval = VEX_TRC_JMP_SYS_INT128;  break;
+         //case Ijk_Yield:       trcval = VEX_TRC_JMP_YIELD;       break;
+         case Ijk_EmWarn:      trcval = VEX_TRC_JMP_EMWARN;      break;
+         case Ijk_EmFail:      trcval = VEX_TRC_JMP_EMFAIL;      break;
+         //case Ijk_MapFail:     trcval = VEX_TRC_JMP_MAPFAIL;     break;
+         case Ijk_NoDecode:    trcval = VEX_TRC_JMP_NODECODE;    break;
+         case Ijk_InvalICache: trcval = VEX_TRC_JMP_INVALICACHE; break;
+         case Ijk_NoRedir:     trcval = VEX_TRC_JMP_NOREDIR;     break;
+         case Ijk_SigTRAP:     trcval = VEX_TRC_JMP_SIGTRAP;     break;
+         //case Ijk_SigSEGV:     trcval = VEX_TRC_JMP_SIGSEGV;     break;
+         case Ijk_SigBUS:        trcval = VEX_TRC_JMP_SIGBUS;    break;
+         case Ijk_Boring:      trcval = VEX_TRC_JMP_BORING;      break;
+         /* We don't expect to see the following being assisted. */
+         //case Ijk_Ret:
+         //case Ijk_Call:
+         /* fallthrough */
+         default: 
+            ppIRJumpKind(i->Pin.XAssisted.jk);
+            vpanic("emit_ARMInstr.Pin_XAssisted: unexpected jump kind");
+      }
+      vassert(trcval != 0);
+      p = mkLoadImm(p, /*r*/31, trcval, mode64, endness_host);
+
+      /* imm32/64 r30, VG_(disp_cp_xassisted) */
+      p = mkLoadImm(p, /*r*/30,
+                    (ULong)(Addr)disp_cp_xassisted, mode64,
+                     endness_host);
+      /* mtctr r30 */
+      p = mkFormXFX(p, /*r*/30, 9, 467, endness_host);
+      /* bctr */
+      p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 0, endness_host);
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Pin.XAssisted.cond.test != Pct_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta >= 16 && delta <= 32 && 0 == (delta & 3));
+         /* bc !ct,cf,delta */
+         mkFormB(ptmp, invertCondTest(i->Pin.XAssisted.cond.test),
+                 i->Pin.XAssisted.cond.flag, (delta>>2), 0, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_CMov: {
+      UInt  r_dst, r_src;
+      ULong imm_src;
+      PPCCondCode cond;
+      vassert(i->Pin.CMov.cond.test != Pct_ALWAYS);
+
+      r_dst = iregEnc(i->Pin.CMov.dst, mode64);
+      cond = i->Pin.CMov.cond;
+
+      /* branch (if cond fails) over move instrs */
+      UChar* ptmp = NULL;
+      if (cond.test != Pct_ALWAYS) {
+         /* don't know how many bytes to jump over yet...
+            make space for a jump instruction and fill in later. */
+         ptmp = p; /* fill in this bit later */
+         p += 4;
+      }
+
+      // cond true: move src => dst
+      switch (i->Pin.CMov.src->tag) {
+      case Pri_Imm:
+         imm_src = i->Pin.CMov.src->Pri.Imm;
+         p = mkLoadImm(p, r_dst, imm_src, mode64, endness_host);  // p += 4|8|20
+         break;
+      case Pri_Reg:
+         r_src = iregEnc(i->Pin.CMov.src->Pri.Reg, mode64);
+         p = mkMoveReg(p, r_dst, r_src, endness_host);            // p += 4
+         break;
+      default: goto bad;
+      }
+
+      /* Fix up the conditional jump, if there was one. */
+      if (cond.test != Pct_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta >= 8 && delta <= 24);
+         /* bc !ct,cf,delta */
+         mkFormB(ptmp, invertCondTest(cond.test),
+                 cond.flag, (delta>>2), 0, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_Load: {
+      PPCAMode* am_addr = i->Pin.Load.src;
+      UInt r_dst = iregEnc(i->Pin.Load.dst, mode64);
+      UInt opc1, opc2, sz = i->Pin.Load.sz;
+      switch (am_addr->tag) {
+      case Pam_IR:
+         if (mode64 && (sz == 4 || sz == 8)) {
+            /* should be guaranteed to us by iselWordExpr_AMode */
+            vassert(0 == (am_addr->Pam.IR.index & 3));
+         }
+         switch(sz) {
+            case 1:  opc1 = 34; break;
+            case 2:  opc1 = 40; break;
+            case 4:  opc1 = 32; break;
+            case 8:  opc1 = 58; vassert(mode64); break;
+            default: goto bad;
+         }
+         p = doAMode_IR(p, opc1, r_dst, am_addr, mode64, endness_host);
+         goto done;
+      case Pam_RR:
+         switch(sz) {
+            case 1:  opc2 = 87;  break;
+            case 2:  opc2 = 279; break;
+            case 4:  opc2 = 23;  break;
+            case 8:  opc2 = 21; vassert(mode64); break;
+            default: goto bad;
+         }
+         p = doAMode_RR(p, 31, opc2, r_dst, am_addr, mode64, endness_host);
+         goto done;
+      default:
+         goto bad;
+      }
+   }
+
+   case Pin_LoadL: {
+      if (i->Pin.LoadL.sz == 1) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.LoadL.dst, mode64),
+                     0, iregEnc(i->Pin.LoadL.src, mode64), 52, 0, endness_host);
+         goto done;
+      }
+      if (i->Pin.LoadL.sz == 2) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.LoadL.dst, mode64),
+                     0, iregEnc(i->Pin.LoadL.src, mode64), 116, 0, endness_host);
+         goto done;
+      }
+      if (i->Pin.LoadL.sz == 4) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.LoadL.dst, mode64),
+                     0, iregEnc(i->Pin.LoadL.src, mode64), 20, 0, endness_host);
+         goto done;
+      }
+      if (i->Pin.LoadL.sz == 8 && mode64) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.LoadL.dst, mode64),
+                     0, iregEnc(i->Pin.LoadL.src, mode64), 84, 0, endness_host);
+         goto done;
+      }
+      goto bad;
+   }
+
+   case Pin_Set: {
+      /* Make the destination register be 1 or 0, depending on whether
+         the relevant condition holds. */
+      UInt        r_dst = iregEnc(i->Pin.Set.dst, mode64);
+      PPCCondCode cond  = i->Pin.Set.cond;
+      UInt rot_imm, r_tmp;
+
+      if (cond.test == Pct_ALWAYS) {
+         // Just load 1 to dst => li dst,1
+         p = mkFormD(p, 14, r_dst, 0, 1, endness_host);
+      } else {
+         vassert(cond.flag != Pcf_NONE);
+         rot_imm = 1 + cond.flag;
+         r_tmp = 0;  // Not set in getAllocable, so no need to declare.
+
+         // r_tmp = CR  => mfcr r_tmp
+         p = mkFormX(p, 31, r_tmp, 0, 0, 19, 0, endness_host);
+
+         // r_dst = flag (rotate left and mask)
+         //  => rlwinm r_dst,r_tmp,rot_imm,31,31
+         p = mkFormM(p, 21, r_tmp, r_dst, rot_imm, 31, 31, 0, endness_host);
+
+         if (cond.test == Pct_FALSE) {
+            // flip bit  => xori r_dst,r_dst,1
+            p = mkFormD(p, 26, r_dst, r_dst, 1, endness_host);
+         }
+      }
+      goto done;
+   }
+
+   case Pin_MfCR:
+      // mfcr dst
+      p = mkFormX(p, 31, iregEnc(i->Pin.MfCR.dst, mode64), 0, 0, 19, 0,
+                  endness_host);
+      goto done;
+
+   case Pin_MFence: {
+      p = mkFormX(p, 31, 0, 0, 0, 598, 0, endness_host);   // sync, PPC32 p616
+      // CAB: Should this be isync?
+      //    p = mkFormXL(p, 19, 0, 0, 0, 150, 0);  // isync, PPC32 p467
+      goto done;
+   }
+
+   case Pin_Store: {
+      PPCAMode* am_addr = i->Pin.Store.dst;
+      UInt r_src = iregEnc(i->Pin.Store.src, mode64);
+      UInt opc1, opc2, sz = i->Pin.Store.sz;
+      switch (i->Pin.Store.dst->tag) {
+      case Pam_IR:
+         if (mode64 && (sz == 4 || sz == 8)) {
+            /* should be guaranteed to us by iselWordExpr_AMode */
+            vassert(0 == (am_addr->Pam.IR.index & 3));
+         }
+         switch(sz) {
+         case 1: opc1 = 38; break;
+         case 2: opc1 = 44; break;
+         case 4: opc1 = 36; break;
+         case 8: vassert(mode64);
+                 opc1 = 62; break;
+         default:
+            goto bad;
+         }
+         p = doAMode_IR(p, opc1, r_src, am_addr, mode64, endness_host);
+         goto done;
+      case Pam_RR:
+         switch(sz) {
+         case 1: opc2 = 215; break;
+         case 2: opc2 = 407; break;
+         case 4: opc2 = 151; break;
+         case 8: vassert(mode64);
+                 opc2 = 149; break;
+         default:
+            goto bad;
+         }
+         p = doAMode_RR(p, 31, opc2, r_src, am_addr, mode64, endness_host);
+         goto done;
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_StoreC: {
+      if (i->Pin.StoreC.sz == 1) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.StoreC.src, mode64),
+                     0, iregEnc(i->Pin.StoreC.dst, mode64), 694, 1, endness_host);
+         goto done;
+      }
+      if (i->Pin.StoreC.sz == 2) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.StoreC.src, mode64),
+                     0, iregEnc(i->Pin.StoreC.dst, mode64), 726, 1, endness_host);
+         goto done;
+      }
+
+      if (i->Pin.StoreC.sz == 4) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.StoreC.src, mode64),
+                     0, iregEnc(i->Pin.StoreC.dst, mode64), 150, 1, endness_host);
+         goto done;
+      }
+      if (i->Pin.StoreC.sz == 8 && mode64) {
+         p = mkFormX(p, 31, iregEnc(i->Pin.StoreC.src, mode64),
+                     0, iregEnc(i->Pin.StoreC.dst, mode64), 214, 1, endness_host);
+         goto done;
+      }
+      goto bad;
+   }
+
+   case Pin_FpUnary: {
+      UInt fr_dst = fregEnc(i->Pin.FpUnary.dst);
+      UInt fr_src = fregEnc(i->Pin.FpUnary.src);
+      switch (i->Pin.FpUnary.op) {
+      case Pfp_RSQRTE: // frsqrtre, PPC32 p424
+         p = mkFormA( p, 63, fr_dst, 0, fr_src, 0, 26, 0, endness_host );
+         break;
+      case Pfp_RES:   // fres, PPC32 p421
+         p = mkFormA( p, 59, fr_dst, 0, fr_src, 0, 24, 0, endness_host );
+         break;
+      case Pfp_SQRT:  // fsqrt, PPC32 p427
+         p = mkFormA( p, 63, fr_dst, 0, fr_src, 0, 22, 0, endness_host );
+         break;
+      case Pfp_ABS:   // fabs, PPC32 p399
+         p = mkFormX(p, 63, fr_dst, 0, fr_src, 264, 0, endness_host);
+         break;
+      case Pfp_NEG:   // fneg, PPC32 p416
+         p = mkFormX(p, 63, fr_dst, 0, fr_src, 40, 0, endness_host);
+         break;
+      case Pfp_MOV:   // fmr, PPC32 p410
+         p = mkFormX(p, 63, fr_dst, 0, fr_src, 72, 0, endness_host);
+         break;
+      case Pfp_FRIM:  // frim, PPC ISA 2.05 p137
+         p = mkFormX(p, 63, fr_dst, 0, fr_src, 488, 0, endness_host);
+         break;
+      case Pfp_FRIP:  // frip, PPC ISA 2.05 p137
+         p = mkFormX(p, 63, fr_dst, 0, fr_src, 456, 0, endness_host);
+         break;
+      case Pfp_FRIN:  // frin, PPC ISA 2.05 p137
+         p = mkFormX(p, 63, fr_dst, 0, fr_src, 392, 0, endness_host);
+         break;
+      case Pfp_FRIZ:  // friz, PPC ISA 2.05 p137
+         p = mkFormX(p, 63, fr_dst, 0, fr_src, 424, 0, endness_host);
+         break;
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_FpBinary: {
+      UInt fr_dst  = fregEnc(i->Pin.FpBinary.dst);
+      UInt fr_srcL = fregEnc(i->Pin.FpBinary.srcL);
+      UInt fr_srcR = fregEnc(i->Pin.FpBinary.srcR);
+      switch (i->Pin.FpBinary.op) {
+      case Pfp_ADDD:   // fadd, PPC32 p400
+         p = mkFormA( p, 63, fr_dst, fr_srcL, fr_srcR, 0, 21, 0, endness_host );
+         break;
+      case Pfp_ADDS:   // fadds, PPC32 p401
+         p = mkFormA( p, 59, fr_dst, fr_srcL, fr_srcR, 0, 21, 0, endness_host );
+         break;
+      case Pfp_SUBD:   // fsub, PPC32 p429
+         p = mkFormA( p, 63, fr_dst, fr_srcL, fr_srcR, 0, 20, 0, endness_host );
+         break;
+      case Pfp_SUBS:   // fsubs, PPC32 p430
+         p = mkFormA( p, 59, fr_dst, fr_srcL, fr_srcR, 0, 20, 0, endness_host );
+         break;
+      case Pfp_MULD:   // fmul, PPC32 p413
+         p = mkFormA( p, 63, fr_dst, fr_srcL, 0, fr_srcR, 25, 0, endness_host );
+         break;
+      case Pfp_MULS:   // fmuls, PPC32 p414
+         p = mkFormA( p, 59, fr_dst, fr_srcL, 0, fr_srcR, 25, 0, endness_host );
+         break;
+      case Pfp_DIVD:   // fdiv, PPC32 p406
+         p = mkFormA( p, 63, fr_dst, fr_srcL, fr_srcR, 0, 18, 0, endness_host );
+         break;
+      case Pfp_DIVS:   // fdivs, PPC32 p407
+         p = mkFormA( p, 59, fr_dst, fr_srcL, fr_srcR, 0, 18, 0, endness_host );
+         break;
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_FpMulAcc: {
+      UInt fr_dst    = fregEnc(i->Pin.FpMulAcc.dst);
+      UInt fr_srcML  = fregEnc(i->Pin.FpMulAcc.srcML);
+      UInt fr_srcMR  = fregEnc(i->Pin.FpMulAcc.srcMR);
+      UInt fr_srcAcc = fregEnc(i->Pin.FpMulAcc.srcAcc);
+      switch (i->Pin.FpMulAcc.op) {
+      case Pfp_MADDD:   // fmadd, PPC32 p408
+         p = mkFormA( p, 63, fr_dst, fr_srcML, fr_srcAcc, fr_srcMR, 29, 0,
+                      endness_host );
+         break;
+      case Pfp_MADDS:   // fmadds, PPC32 p409
+         p = mkFormA( p, 59, fr_dst, fr_srcML, fr_srcAcc, fr_srcMR, 29, 0,
+                      endness_host );
+         break;
+      case Pfp_MSUBD:   // fmsub, PPC32 p411
+         p = mkFormA( p, 63, fr_dst, fr_srcML, fr_srcAcc, fr_srcMR, 28, 0,
+                      endness_host );
+         break;
+      case Pfp_MSUBS:   // fmsubs, PPC32 p412
+         p = mkFormA( p, 59, fr_dst, fr_srcML, fr_srcAcc, fr_srcMR, 28, 0,
+                      endness_host );
+         break;
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_FpLdSt: {
+      PPCAMode* am_addr = i->Pin.FpLdSt.addr;
+      UInt f_reg = fregEnc(i->Pin.FpLdSt.reg);
+      Bool idxd = toBool(i->Pin.FpLdSt.addr->tag == Pam_RR);
+      UChar sz = i->Pin.FpLdSt.sz;
+      UInt opc;
+      vassert(sz == 4 || sz == 8);
+
+      if (i->Pin.FpLdSt.isLoad) {   // Load from memory
+         if (idxd) {  // lf[s|d]x, PPC32 p444|440
+            opc = (sz == 4) ? 535 : 599;
+            p = doAMode_RR(p, 31, opc, f_reg, am_addr, mode64, endness_host);
+         } else {     // lf[s|d], PPC32 p441|437
+            opc = (sz == 4) ? 48 : 50;
+            p = doAMode_IR(p, opc, f_reg, am_addr, mode64, endness_host);
+         }
+      } else {                      // Store to memory
+         if (idxd) { // stf[s|d]x, PPC32 p521|516
+            opc = (sz == 4) ? 663 : 727;
+            p = doAMode_RR(p, 31, opc, f_reg, am_addr, mode64, endness_host);
+         } else {    // stf[s|d], PPC32 p518|513
+            opc = (sz == 4) ? 52 : 54;
+            p = doAMode_IR(p, opc, f_reg, am_addr, mode64, endness_host);
+         }
+      }
+      goto done;
+   }
+
+   case Pin_FpSTFIW: {
+      UInt ir_addr = iregEnc(i->Pin.FpSTFIW.addr, mode64);
+      UInt fr_data = fregEnc(i->Pin.FpSTFIW.data);
+      // stfiwx (store fp64[lo32] as int32), PPC32 p517
+      // Use rA==0, so that EA == rB == ir_addr
+      p = mkFormX(p, 31, fr_data, 0/*rA=0*/, ir_addr, 983, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_FpRSP: {
+      UInt fr_dst = fregEnc(i->Pin.FpRSP.dst);
+      UInt fr_src = fregEnc(i->Pin.FpRSP.src);
+      // frsp, PPC32 p423
+      p = mkFormX(p, 63, fr_dst, 0, fr_src, 12, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_FpCftI: {
+      UInt fr_dst = fregEnc(i->Pin.FpCftI.dst);
+      UInt fr_src = fregEnc(i->Pin.FpCftI.src);
+      if (i->Pin.FpCftI.fromI == False && i->Pin.FpCftI.int32 == True) {
+         if (i->Pin.FpCftI.syned == True) {
+            // fctiw (conv f64 to i32), PPC32 p404
+            p = mkFormX(p, 63, fr_dst, 0, fr_src, 14, 0, endness_host);
+            goto done;
+         } else {
+            // fctiwu (conv f64 to u32)
+            p = mkFormX(p, 63, fr_dst, 0, fr_src, 142, 0, endness_host);
+            goto done;
+         }
+      }
+      if (i->Pin.FpCftI.fromI == False && i->Pin.FpCftI.int32 == False) {
+         if (i->Pin.FpCftI.syned == True) {
+            // fctid (conv f64 to i64), PPC64 p437
+            p = mkFormX(p, 63, fr_dst, 0, fr_src, 814, 0, endness_host);
+            goto done;
+         } else {
+            // fctidu (conv f64 to u64)
+            p = mkFormX(p, 63, fr_dst, 0, fr_src, 942, 0, endness_host);
+            goto done;
+         }
+      }
+      if (i->Pin.FpCftI.fromI == True && i->Pin.FpCftI.int32 == False) {
+         if (i->Pin.FpCftI.syned == True) {
+            // fcfid (conv i64 to f64), PPC64 p434
+            p = mkFormX(p, 63, fr_dst, 0, fr_src, 846, 0, endness_host);
+            goto done;
+         } else if (i->Pin.FpCftI.flt64 == True) {
+            // fcfidu (conv u64 to f64)
+            p = mkFormX(p, 63, fr_dst, 0, fr_src, 974, 0, endness_host);
+            goto done;
+         } else {
+            // fcfidus (conv u64 to f32)
+            p = mkFormX(p, 59, fr_dst, 0, fr_src, 974, 0, endness_host);
+            goto done;
+         }
+      }
+      goto bad;
+   }
+
+   case Pin_FpCMov: {
+      UInt        fr_dst = fregEnc(i->Pin.FpCMov.dst);
+      UInt        fr_src = fregEnc(i->Pin.FpCMov.src);
+      PPCCondCode cc     = i->Pin.FpCMov.cond;
+
+      if (fr_dst == fr_src) goto done;
+      
+      vassert(cc.test != Pct_ALWAYS);
+
+      /* jmp fwds if !condition */
+      if (cc.test != Pct_ALWAYS) {
+         /* bc !ct,cf,n_bytes>>2 */
+         p = mkFormB(p, invertCondTest(cc.test), cc.flag, 8>>2, 0, 0,
+                     endness_host);
+      }
+
+      // fmr, PPC32 p410
+      p = mkFormX(p, 63, fr_dst, 0, fr_src, 72, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_FpLdFPSCR: {
+      UInt fr_src = fregEnc(i->Pin.FpLdFPSCR.src);
+      p = mkFormXFL(p, 0xFF, fr_src, i->Pin.FpLdFPSCR.dfp_rm, endness_host); // mtfsf, PPC32 p480
+      goto done;
+   }
+
+   case Pin_FpCmp: {
+      UChar crfD    = 1;
+      UInt  r_dst   = iregEnc(i->Pin.FpCmp.dst, mode64);
+      UInt  fr_srcL = fregEnc(i->Pin.FpCmp.srcL);
+      UInt  fr_srcR = fregEnc(i->Pin.FpCmp.srcR);
+      vassert(crfD < 8);
+      // fcmpo, PPC32 p402
+      p = mkFormX(p, 63, crfD<<2, fr_srcL, fr_srcR, 32, 0, endness_host);
+
+      // mfcr (mv CR to r_dst), PPC32 p467
+      p = mkFormX(p, 31, r_dst, 0, 0, 19, 0, endness_host);
+      
+      // rlwinm r_dst,r_dst,8,28,31, PPC32 p501
+      //  => rotate field 1 to bottomw of word, masking out upper 28
+      p = mkFormM(p, 21, r_dst, r_dst, 8, 28, 31, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_RdWrLR: {
+      UInt reg = iregEnc(i->Pin.RdWrLR.gpr, mode64);
+      /* wrLR==True ? mtlr r4 : mflr r4 */
+      p = mkFormXFX(p, reg, 8, (i->Pin.RdWrLR.wrLR==True) ? 467 : 339,
+                    endness_host);
+      goto done;
+   }
+
+
+   /* AltiVec */
+   case Pin_AvLdSt: {
+      UInt opc2, v_reg, r_idx, r_base;
+      UChar sz   = i->Pin.AvLdSt.sz;
+      Bool  idxd = toBool(i->Pin.AvLdSt.addr->tag == Pam_RR);
+      vassert(sz == 1 || sz == 2 || sz == 4 || sz == 16);
+
+      v_reg  = vregEnc(i->Pin.AvLdSt.reg);
+      r_base = iregEnc(i->Pin.AvLdSt.addr->Pam.RR.base, mode64);
+
+      // Only have AltiVec AMode_RR: kludge AMode_IR
+      if (!idxd) {
+         r_idx = 30;                       // XXX: Using r30 as temp
+         p = mkLoadImm(p, r_idx,
+                       i->Pin.AvLdSt.addr->Pam.IR.index, mode64, endness_host);
+      } else {
+         r_idx  = iregEnc(i->Pin.AvLdSt.addr->Pam.RR.index, mode64);
+      }
+
+      if (i->Pin.FpLdSt.isLoad) {  // Load from memory (1,2,4,16)
+         opc2 = (sz==1) ?   7 : (sz==2) ?  39 : (sz==4) ?  71 : 103;
+         p = mkFormX(p, 31, v_reg, r_idx, r_base, opc2, 0, endness_host);
+      } else {                      // Store to memory (1,2,4,16)
+         opc2 = (sz==1) ? 135 : (sz==2) ? 167 : (sz==4) ? 199 : 231;
+         p = mkFormX(p, 31, v_reg, r_idx, r_base, opc2, 0, endness_host);
+      }
+      goto done;
+   }
+
+   case Pin_AvUnary: {
+      UInt v_dst = vregEnc(i->Pin.AvUnary.dst);
+      UInt v_src = vregEnc(i->Pin.AvUnary.src);
+      UInt opc2;
+      switch (i->Pin.AvUnary.op) {
+      case Pav_MOV:       opc2 = 1156; break; // vor vD,vS,vS
+      case Pav_NOT:       opc2 = 1284; break; // vnor vD,vS,vS
+      case Pav_UNPCKH8S:  opc2 =  526; break; // vupkhsb
+      case Pav_UNPCKH16S: opc2 =  590; break; // vupkhsh
+      case Pav_UNPCKL8S:  opc2 =  654; break; // vupklsb
+      case Pav_UNPCKL16S: opc2 =  718; break; // vupklsh
+      case Pav_UNPCKHPIX: opc2 =  846; break; // vupkhpx
+      case Pav_UNPCKLPIX: opc2 =  974; break; // vupklpx
+
+      case Pav_ZEROCNTBYTE: opc2 = 1794; break; // vclzb
+      case Pav_ZEROCNTHALF: opc2 = 1858; break; // vclzh
+      case Pav_ZEROCNTWORD: opc2 = 1922; break; // vclzw
+      case Pav_ZEROCNTDBL:  opc2 = 1986; break; // vclzd
+      case Pav_BITMTXXPOSE: opc2 = 1292; break; // vgbbd
+      default:
+         goto bad;
+      }
+      switch (i->Pin.AvUnary.op) {
+      case Pav_MOV:
+      case Pav_NOT:
+         p = mkFormVX( p, 4, v_dst, v_src, v_src, opc2, endness_host );
+         break;
+      default:
+         p = mkFormVX( p, 4, v_dst, 0, v_src, opc2, endness_host );
+         break;
+      }
+      goto done;
+   }
+
+   case Pin_AvBinary: {
+      UInt v_dst  = vregEnc(i->Pin.AvBinary.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvBinary.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvBinary.srcR);
+      UInt opc2;
+      if (i->Pin.AvBinary.op == Pav_SHL) {
+         p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 1036, endness_host ); // vslo
+         p = mkFormVX( p, 4, v_dst, v_dst,  v_srcR, 452, endness_host );  // vsl
+         goto done;
+      }
+      if (i->Pin.AvBinary.op == Pav_SHR) {
+         p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 1100, endness_host ); // vsro
+         p = mkFormVX( p, 4, v_dst, v_dst,  v_srcR, 708, endness_host );  // vsr
+         goto done;
+      }
+      switch (i->Pin.AvBinary.op) {
+      /* Bitwise */
+      case Pav_AND:       opc2 = 1028; break; // vand
+      case Pav_OR:        opc2 = 1156; break; // vor
+      case Pav_XOR:       opc2 = 1220; break; // vxor
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2, endness_host );
+      goto done;
+   }
+
+   case Pin_AvBin8x16: {
+      UInt v_dst  = vregEnc(i->Pin.AvBin8x16.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvBin8x16.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvBin8x16.srcR);
+      UInt opc2;
+      switch (i->Pin.AvBin8x16.op) {
+
+      case Pav_ADDU:     opc2 =    0; break; // vaddubm
+      case Pav_QADDU:    opc2 =  512; break; // vaddubs
+      case Pav_QADDS:    opc2 =  768; break; // vaddsbs
+
+      case Pav_SUBU:     opc2 = 1024; break; // vsububm
+      case Pav_QSUBU:    opc2 = 1536; break; // vsububs
+      case Pav_QSUBS:    opc2 = 1792; break; // vsubsbs
+
+      case Pav_OMULU:   opc2 =    8; break; // vmuloub
+      case Pav_OMULS:   opc2 =  264; break; // vmulosb
+      case Pav_EMULU:   opc2 =  520; break; // vmuleub
+      case Pav_EMULS:   opc2 =  776; break; // vmulesb
+
+      case Pav_AVGU:     opc2 = 1026; break; // vavgub
+      case Pav_AVGS:     opc2 = 1282; break; // vavgsb
+      case Pav_MAXU:     opc2 =    2; break; // vmaxub
+      case Pav_MAXS:     opc2 =  258; break; // vmaxsb
+      case Pav_MINU:     opc2 =  514; break; // vminub
+      case Pav_MINS:     opc2 =  770; break; // vminsb
+
+      case Pav_CMPEQU:   opc2 =    6; break; // vcmpequb
+      case Pav_CMPGTU:   opc2 =  518; break; // vcmpgtub
+      case Pav_CMPGTS:   opc2 =  774; break; // vcmpgtsb
+
+      case Pav_SHL:      opc2 =  260; break; // vslb
+      case Pav_SHR:      opc2 =  516; break; // vsrb
+      case Pav_SAR:      opc2 =  772; break; // vsrab
+      case Pav_ROTL:     opc2 =    4; break; // vrlb
+
+      case Pav_MRGHI:    opc2 =   12; break; // vmrghb
+      case Pav_MRGLO:    opc2 =  268; break; // vmrglb
+
+      case Pav_POLYMULADD: opc2 = 1032; break; // vpmsumb
+
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2, endness_host );
+      goto done;
+   }
+
+   case Pin_AvBin16x8: {
+      UInt v_dst  = vregEnc(i->Pin.AvBin16x8.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvBin16x8.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvBin16x8.srcR);
+      UInt opc2;
+      switch (i->Pin.AvBin16x8.op) {
+
+      case Pav_ADDU:    opc2 =   64; break; // vadduhm
+      case Pav_QADDU:   opc2 =  576; break; // vadduhs
+      case Pav_QADDS:   opc2 =  832; break; // vaddshs
+
+      case Pav_SUBU:    opc2 = 1088; break; // vsubuhm
+      case Pav_QSUBU:   opc2 = 1600; break; // vsubuhs
+      case Pav_QSUBS:   opc2 = 1856; break; // vsubshs
+
+      case Pav_OMULU:   opc2 =   72; break; // vmulouh
+      case Pav_OMULS:   opc2 =  328; break; // vmulosh
+      case Pav_EMULU:   opc2 =  584; break; // vmuleuh
+      case Pav_EMULS:   opc2 =  840; break; // vmulesh
+
+      case Pav_AVGU:    opc2 = 1090; break; // vavguh
+      case Pav_AVGS:    opc2 = 1346; break; // vavgsh
+      case Pav_MAXU:    opc2 =   66; break; // vmaxuh
+      case Pav_MAXS:    opc2 =  322; break; // vmaxsh
+      case Pav_MINS:    opc2 =  834; break; // vminsh
+      case Pav_MINU:    opc2 =  578; break; // vminuh
+
+      case Pav_CMPEQU:  opc2 =   70; break; // vcmpequh
+      case Pav_CMPGTU:  opc2 =  582; break; // vcmpgtuh
+      case Pav_CMPGTS:  opc2 =  838; break; // vcmpgtsh
+
+      case Pav_SHL:     opc2 =  324; break; // vslh
+      case Pav_SHR:     opc2 =  580; break; // vsrh
+      case Pav_SAR:     opc2 =  836; break; // vsrah
+      case Pav_ROTL:    opc2 =   68; break; // vrlh
+
+      case Pav_PACKUU:  opc2 =   14; break; // vpkuhum
+      case Pav_QPACKUU: opc2 =  142; break; // vpkuhus
+      case Pav_QPACKSU: opc2 =  270; break; // vpkshus
+      case Pav_QPACKSS: opc2 =  398; break; // vpkshss
+      case Pav_PACKPXL: opc2 =  782; break; // vpkpx
+
+      case Pav_MRGHI:   opc2 =   76; break; // vmrghh
+      case Pav_MRGLO:   opc2 =  332; break; // vmrglh
+
+      case Pav_POLYMULADD: opc2 = 1224; break; // vpmsumh
+
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2, endness_host );
+      goto done;
+   }
+
+   case Pin_AvBin32x4: {
+      UInt v_dst  = vregEnc(i->Pin.AvBin32x4.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvBin32x4.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvBin32x4.srcR);
+      UInt opc2;
+      switch (i->Pin.AvBin32x4.op) {
+
+      case Pav_ADDU:    opc2 =  128; break; // vadduwm
+      case Pav_QADDU:   opc2 =  640; break; // vadduws
+      case Pav_QADDS:   opc2 =  896; break; // vaddsws
+
+      case Pav_SUBU:    opc2 = 1152; break; // vsubuwm
+      case Pav_QSUBU:   opc2 = 1664; break; // vsubuws
+      case Pav_QSUBS:   opc2 = 1920; break; // vsubsws
+
+      case Pav_MULU:    opc2 =  137; break; // vmuluwm
+      case Pav_OMULU:   opc2 =  136; break; // vmulouw
+      case Pav_OMULS:   opc2 =  392; break; // vmulosw
+      case Pav_EMULU:   opc2 =  648; break; // vmuleuw
+      case Pav_EMULS:   opc2 =  904; break; // vmulesw
+
+      case Pav_AVGU:    opc2 = 1154; break; // vavguw
+      case Pav_AVGS:    opc2 = 1410; break; // vavgsw
+
+      case Pav_MAXU:    opc2 =  130; break; // vmaxuw
+      case Pav_MAXS:    opc2 =  386; break; // vmaxsw
+
+      case Pav_MINS:    opc2 =  898; break; // vminsw
+      case Pav_MINU:    opc2 =  642; break; // vminuw
+
+      case Pav_CMPEQU:  opc2 =  134; break; // vcmpequw
+      case Pav_CMPGTS:  opc2 =  902; break; // vcmpgtsw
+      case Pav_CMPGTU:  opc2 =  646; break; // vcmpgtuw
+
+      case Pav_SHL:     opc2 =  388; break; // vslw
+      case Pav_SHR:     opc2 =  644; break; // vsrw
+      case Pav_SAR:     opc2 =  900; break; // vsraw
+      case Pav_ROTL:    opc2 =  132; break; // vrlw
+
+      case Pav_PACKUU:  opc2 =   78; break; // vpkuwum
+      case Pav_QPACKUU: opc2 =  206; break; // vpkuwus
+      case Pav_QPACKSU: opc2 =  334; break; // vpkswus
+      case Pav_QPACKSS: opc2 =  462; break; // vpkswss
+
+      case Pav_MRGHI:   opc2 =  140; break; // vmrghw
+      case Pav_MRGLO:   opc2 =  396; break; // vmrglw
+
+      case Pav_CATODD:  opc2 = 1676; break; // vmrgow
+      case Pav_CATEVEN: opc2 = 1932; break; // vmrgew
+
+      case Pav_POLYMULADD: opc2 = 1160; break; // vpmsumw
+
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2, endness_host );
+      goto done;
+   }
+
+   case Pin_AvBin64x2: {
+      UInt v_dst  = vregEnc(i->Pin.AvBin64x2.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvBin64x2.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvBin64x2.srcR);
+      UInt opc2;
+      switch (i->Pin.AvBin64x2.op) {
+      case Pav_ADDU:    opc2 =  192; break; // vaddudm  vector double add
+      case Pav_SUBU:    opc2 = 1216; break; // vsubudm  vector double add
+      case Pav_MAXU:    opc2 =  194; break; // vmaxud   vector double max
+      case Pav_MAXS:    opc2 =  450; break; // vmaxsd   vector double max
+      case Pav_MINU:    opc2 =  706; break; // vminud   vector double min
+      case Pav_MINS:    opc2 =  962; break; // vminsd   vector double min
+      case Pav_CMPEQU:  opc2 =  199; break; // vcmpequd vector double compare
+      case Pav_CMPGTU:  opc2 =  711; break; // vcmpgtud vector double compare
+      case Pav_CMPGTS:  opc2 =  967; break; // vcmpgtsd vector double compare
+      case Pav_SHL:     opc2 = 1476; break; // vsld
+      case Pav_SHR:     opc2 = 1732; break; // vsrd
+      case Pav_SAR:     opc2 =  964; break; // vsrad
+      case Pav_ROTL:    opc2 =  196; break; // vrld
+      case Pav_PACKUU:  opc2 = 1102; break; // vpkudum
+      case Pav_QPACKUU: opc2 = 1230; break; // vpkudus, vpksdus (emulated)
+      case Pav_QPACKSS: opc2 = 1486; break; // vpksdsm
+      case Pav_MRGHI:   opc2 = 1614; break; // vmrghw
+      case Pav_MRGLO:   opc2 = 1742; break; // vmrglw
+      case Pav_POLYMULADD: opc2 = 1096; break; // vpmsumd
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2, endness_host );
+      goto done;
+   }
+   case Pin_AvCipherV128Unary: {
+      UInt v_dst = vregEnc(i->Pin.AvCipherV128Unary.dst);
+      UInt v_src = vregEnc(i->Pin.AvCipherV128Unary.src);
+      UInt opc2;
+      switch (i->Pin.AvCipherV128Unary.op) {
+      case Pav_CIPHERSUBV128:   opc2 =  1480; break; // vsbox
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_src, 0, opc2, endness_host );
+      goto done;
+   }
+   case Pin_AvCipherV128Binary: {
+      UInt v_dst  = vregEnc(i->Pin.AvCipherV128Binary.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvCipherV128Binary.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvCipherV128Binary.srcR);
+      UInt opc2;
+      switch (i->Pin.AvCipherV128Binary.op) {
+      case Pav_CIPHERV128:     opc2 =  1288; break; // vcipher
+      case Pav_CIPHERLV128:    opc2 =  1289; break; // vcipherlast
+      case Pav_NCIPHERV128:    opc2 =  1352; break; // vncipher
+      case Pav_NCIPHERLV128:   opc2 =  1353; break; // vncipherlast
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, opc2, endness_host );
+      goto done;
+   }
+   case Pin_AvHashV128Binary: {
+      UInt v_dst = vregEnc(i->Pin.AvHashV128Binary.dst);
+      UInt v_src = vregEnc(i->Pin.AvHashV128Binary.src);
+      PPCRI* s_field = i->Pin.AvHashV128Binary.s_field;
+      UInt opc2;
+      switch (i->Pin.AvHashV128Binary.op) {
+      case Pav_SHA256:   opc2 =  1666; break; // vshasigmaw
+      case Pav_SHA512:   opc2 =  1730; break; // vshasigmad
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, v_src, s_field->Pri.Imm, opc2, endness_host );
+      goto done;
+   }
+   case Pin_AvBCDV128Trinary: {
+      UInt v_dst  = vregEnc(i->Pin.AvBCDV128Trinary.dst);
+      UInt v_src1 = vregEnc(i->Pin.AvBCDV128Trinary.src1);
+      UInt v_src2 = vregEnc(i->Pin.AvBCDV128Trinary.src2);
+      PPCRI* ps   = i->Pin.AvBCDV128Trinary.ps;
+      UInt opc2;
+      switch (i->Pin.AvBCDV128Trinary.op) {
+      case Pav_BCDAdd:   opc2 =  1; break; // bcdadd
+      case Pav_BCDSub:   opc2 = 65; break; // bcdsub
+      default:
+         goto bad;
+      }
+      p = mkFormVXR( p, 4, v_dst, v_src1, v_src2,
+                     0x1, (ps->Pri.Imm << 9) | opc2, endness_host );
+      goto done;
+   }
+   case Pin_AvBin32Fx4: {
+      UInt v_dst  = vregEnc(i->Pin.AvBin32Fx4.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvBin32Fx4.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvBin32Fx4.srcR);
+      switch (i->Pin.AvBin32Fx4.op) {
+
+      case Pavfp_ADDF:
+         p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 10, endness_host );   // vaddfp
+         break;
+      case Pavfp_SUBF:
+         p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 74, endness_host );   // vsubfp
+         break;
+      case Pavfp_MAXF:
+         p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 1034, endness_host ); // vmaxfp
+         break;
+      case Pavfp_MINF:
+         p = mkFormVX( p, 4, v_dst, v_srcL, v_srcR, 1098, endness_host ); // vminfp
+         break;
+
+      case Pavfp_MULF: {
+         /* Make a vmulfp from a vmaddfp:
+            load -0.0 (0x8000_0000) to each 32-bit word of vB
+            this makes the add a noop.
+         */
+         UInt vB = 29;  // XXX: Using v29 for temp do not change
+                        // without also changing
+                        // getRegUsage_PPCInstr
+         UInt konst = 0x1F;
+
+         // Better way to load -0.0 (0x80000000) ?
+         // vspltisw vB,0x1F   (0x1F => each word of vB)
+         p = mkFormVX( p, 4, vB, konst, 0, 908, endness_host );
+
+         // vslw vB,vB,vB (each word of vB = (0x1F << 0x1F) = 0x80000000
+         p = mkFormVX( p, 4, vB, vB, vB, 388, endness_host );
+
+         // Finally, do the multiply:
+         p = mkFormVA( p, 4, v_dst, v_srcL, vB, v_srcR, 46, endness_host );
+         break;
+      }
+      case Pavfp_CMPEQF:  // vcmpeqfp
+         p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 198, endness_host);
+         break;
+      case Pavfp_CMPGTF:  // vcmpgtfp
+         p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 710, endness_host );
+         break;
+      case Pavfp_CMPGEF:  // vcmpgefp
+         p = mkFormVXR( p, 4, v_dst, v_srcL, v_srcR, 0, 454, endness_host );
+         break;
+
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_AvUn32Fx4: {
+      UInt v_dst = vregEnc(i->Pin.AvUn32Fx4.dst);
+      UInt v_src = vregEnc(i->Pin.AvUn32Fx4.src);
+      UInt opc2;
+      switch (i->Pin.AvUn32Fx4.op) {
+      case Pavfp_RCPF:    opc2 =  266; break; // vrefp
+      case Pavfp_RSQRTF:  opc2 =  330; break; // vrsqrtefp
+      case Pavfp_CVTU2F:  opc2 =  778; break; // vcfux
+      case Pavfp_CVTS2F:  opc2 =  842; break; // vcfsx
+      case Pavfp_QCVTF2U: opc2 =  906; break; // vctuxs
+      case Pavfp_QCVTF2S: opc2 =  970; break; // vctsxs
+      case Pavfp_ROUNDM:  opc2 =  714; break; // vrfim
+      case Pavfp_ROUNDP:  opc2 =  650; break; // vrfip
+      case Pavfp_ROUNDN:  opc2 =  522; break; // vrfin
+      case Pavfp_ROUNDZ:  opc2 =  586; break; // vrfiz
+      default:
+         goto bad;
+      }
+      p = mkFormVX( p, 4, v_dst, 0, v_src, opc2, endness_host );
+      goto done;
+   }
+
+   case Pin_AvPerm: {  // vperm
+      UInt v_dst  = vregEnc(i->Pin.AvPerm.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvPerm.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvPerm.srcR);
+      UInt v_ctl  = vregEnc(i->Pin.AvPerm.ctl);
+      p = mkFormVA( p, 4, v_dst, v_srcL, v_srcR, v_ctl, 43, endness_host );
+      goto done;
+   }
+
+   case Pin_AvSel: {  // vsel
+      UInt v_ctl  = vregEnc(i->Pin.AvSel.ctl);
+      UInt v_dst  = vregEnc(i->Pin.AvSel.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvSel.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvSel.srcR);
+      p = mkFormVA( p, 4, v_dst, v_srcL, v_srcR, v_ctl, 42, endness_host );
+      goto done;
+   }
+
+   case Pin_AvSh: {  // vsl or vsr
+      UInt v_dst  = vregEnc(i->Pin.AvSh.dst);
+      Bool  idxd = toBool(i->Pin.AvSh.addr->tag == Pam_RR);
+      UInt r_idx, r_base;
+
+      r_base = iregEnc(i->Pin.AvSh.addr->Pam.RR.base, mode64);
+
+      if (!idxd) {
+         r_idx = 30; // XXX: Using r30 as temp
+         p = mkLoadImm(p, r_idx,
+                       i->Pin.AvSh.addr->Pam.IR.index, mode64, endness_host);
+      } else {
+         r_idx  = iregEnc(i->Pin.AvSh.addr->Pam.RR.index, mode64);
+      }
+
+      if (i->Pin.AvSh.shLeft)
+         //vsl VRT,RA,RB
+         p = mkFormVXI( p, 31, v_dst, r_idx, r_base, 6, endness_host );
+      else
+         //vsr VRT,RA,RB
+         p = mkFormVXI( p, 31, v_dst, r_idx, r_base, 38, endness_host );
+      goto done;
+   }
+
+   case Pin_AvShlDbl: {  // vsldoi
+      UInt shift  = i->Pin.AvShlDbl.shift;
+      UInt v_dst  = vregEnc(i->Pin.AvShlDbl.dst);
+      UInt v_srcL = vregEnc(i->Pin.AvShlDbl.srcL);
+      UInt v_srcR = vregEnc(i->Pin.AvShlDbl.srcR);
+      vassert(shift <= 0xF);
+      p = mkFormVA( p, 4, v_dst, v_srcL, v_srcR, shift, 44, endness_host );
+      goto done;
+   }
+
+   case Pin_AvSplat: { // vsplt(is)(b,h,w)
+      UInt v_dst = vregEnc(i->Pin.AvShlDbl.dst);
+      UChar sz   = i->Pin.AvSplat.sz;
+      UInt v_src, opc2;
+      vassert(sz == 8 || sz == 16 || sz == 32);
+
+      if (i->Pin.AvSplat.src->tag == Pvi_Imm) {
+         Char simm5;
+         opc2 = (sz == 8) ? 780 : (sz == 16) ? 844 : 908;   // 8,16,32
+         /* expects 5-bit-signed-imm */
+         simm5 = i->Pin.AvSplat.src->Pvi.Imm5s;
+         vassert(simm5 >= -16 && simm5 <= 15);
+         simm5 = simm5 & 0x1F;
+         p = mkFormVX( p, 4, v_dst, (UInt)simm5, 0, opc2, endness_host );
+      }
+      else {  // Pri_Reg
+         UInt lowest_lane;
+         opc2 = (sz == 8) ? 524 : (sz == 16) ? 588 : 652;  // 8,16,32
+         vassert(hregClass(i->Pin.AvSplat.src->Pvi.Reg) == HRcVec128);
+         v_src = vregEnc(i->Pin.AvSplat.src->Pvi.Reg);
+         lowest_lane = (128/sz)-1;
+         p = mkFormVX( p, 4, v_dst, lowest_lane, v_src, opc2, endness_host );
+      }
+      goto done;
+   }
+
+   case Pin_AvCMov: {
+      UInt v_dst     = vregEnc(i->Pin.AvCMov.dst);
+      UInt v_src     = vregEnc(i->Pin.AvCMov.src);
+      PPCCondCode cc = i->Pin.AvCMov.cond;
+
+      if (v_dst == v_src) goto done;
+      
+      vassert(cc.test != Pct_ALWAYS);
+
+      /* jmp fwds 2 insns if !condition */
+      if (cc.test != Pct_ALWAYS) {
+         /* bc !ct,cf,n_bytes>>2 */
+         p = mkFormB(p, invertCondTest(cc.test), cc.flag, 8>>2, 0, 0,
+                     endness_host);
+      }
+      /* vmr */
+      p = mkFormVX( p, 4, v_dst, v_src, v_src, 1156, endness_host );
+      goto done;
+   }
+
+   case Pin_AvLdVSCR: {  // mtvscr
+      UInt v_src = vregEnc(i->Pin.AvLdVSCR.src);
+      p = mkFormVX( p, 4, 0, 0, v_src, 1604, endness_host );
+      goto done;
+   }
+
+   case Pin_Dfp64Unary: {
+      UInt fr_dst = fregEnc( i->Pin.FpUnary.dst );
+      UInt fr_src = fregEnc( i->Pin.FpUnary.src );
+
+      switch (i->Pin.Dfp64Unary.op) {
+      case Pfp_MOV: // fmr, PPC32 p410
+         p = mkFormX( p, 63, fr_dst, 0, fr_src, 72, 0, endness_host );
+         break;
+      case Pfp_DCTDP:   // D32 to D64
+         p = mkFormX( p, 59, fr_dst, 0, fr_src, 258, 0, endness_host );
+         break;
+      case Pfp_DRSP:    // D64 to D32
+         p = mkFormX( p, 59, fr_dst, 0, fr_src, 770, 0, endness_host );
+         break;
+      case Pfp_DCFFIX:   // I64 to D64 conversion
+         /* ONLY WORKS ON POWER7 */
+         p = mkFormX( p, 59, fr_dst, 0, fr_src, 802, 0, endness_host );
+         break;
+      case Pfp_DCTFIX:   // D64 to I64 conversion
+         p = mkFormX( p, 59, fr_dst, 0, fr_src, 290, 0, endness_host );
+         break;
+      case Pfp_DXEX:     // Extract exponent
+         p = mkFormX( p, 59, fr_dst, 0, fr_src, 354, 0, endness_host );
+         break;                                
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_Dfp64Binary: {
+      UInt fr_dst = fregEnc( i->Pin.Dfp64Binary.dst );
+      UInt fr_srcL = fregEnc( i->Pin.Dfp64Binary.srcL );
+      UInt fr_srcR = fregEnc( i->Pin.Dfp64Binary.srcR );
+      switch (i->Pin.Dfp64Binary.op) {
+      case Pfp_DFPADD: /* dadd, dfp add, use default RM from reg ignore mode
+                        * from the Iop instruction. */
+         p = mkFormX( p, 59, fr_dst, fr_srcL, fr_srcR, 2, 0, endness_host );
+         break;
+      case Pfp_DFPSUB: /* dsub, dfp subtract, use default RM from reg ignore
+                        * mode from the Iop instruction. */
+         p = mkFormX( p, 59, fr_dst, fr_srcL, fr_srcR, 514, 0, endness_host );
+         break;
+      case Pfp_DFPMUL: /* dmul, dfp multipy, use default RM from reg ignore
+                        * mode from the Iop instruction. */
+         p = mkFormX( p, 59, fr_dst, fr_srcL, fr_srcR, 34, 0, endness_host );
+         break;
+      case Pfp_DFPDIV: /* ddiv, dfp divide, use default RM from reg ignore
+                        * mode from the Iop instruction. */
+         p = mkFormX( p, 59, fr_dst, fr_srcL, fr_srcR, 546, 0, endness_host );
+         break;
+      case Pfp_DIEX:  /* diex, insert exponent */
+         p = mkFormX( p, 59, fr_dst, fr_srcL, fr_srcR, 866, 0, endness_host );
+         break;
+      default:
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_DfpShift: {
+      UInt fr_src = fregEnc(i->Pin.DfpShift.src);
+      UInt fr_dst = fregEnc(i->Pin.DfpShift.dst);
+      UInt shift;
+
+      shift =  i->Pin.DfpShift.shift->Pri.Imm;
+
+      switch (i->Pin.DfpShift.op) {
+      case Pfp_DSCLI:    /* dscli, DFP shift left by fr_srcR */
+         p = mkFormZ22( p, 59, fr_dst, fr_src, shift,  66, 0, endness_host );
+         break;
+      case Pfp_DSCRI:    /* dscri, DFP shift right by fr_srcR */
+         p = mkFormZ22( p, 59, fr_dst, fr_src, shift,  98, 0, endness_host );
+         break;
+      default:
+         vex_printf("ERROR: emit_PPCInstr default case\n");
+         goto bad;
+      }
+      goto done;
+   }
+
+   case Pin_ExtractExpD128: {
+      UInt fr_dst   = fregEnc(i->Pin.ExtractExpD128.dst);
+      UInt fr_srcHi = fregEnc(i->Pin.ExtractExpD128.src_hi);
+      UInt fr_srcLo = fregEnc(i->Pin.ExtractExpD128.src_lo);
+
+      switch (i->Pin.ExtractExpD128.op) {
+      case Pfp_DXEXQ:                                                          
+         /* Setup the upper and lower registers of the source operand
+          * register pair.
+          */
+         p = mkFormX( p, 63, 12, 0, fr_srcHi, 72, 0, endness_host );
+         p = mkFormX( p, 63, 13, 0, fr_srcLo, 72, 0, endness_host );
+         p = mkFormX( p, 63, 10, 0, 12, 354, 0, endness_host );
+
+         /* The instruction will put the 64-bit result in
+          * register 10.
+          */
+         p = mkFormX(p, 63, fr_dst, 0, 10,  72, 0, endness_host);
+         break;
+      default:
+         vex_printf("Error: emit_PPCInstr case Pin_DfpExtractExp, case inst Default\n");
+         goto bad;
+      }
+      goto done;
+   }
+   case Pin_Dfp128Unary: {
+     UInt fr_dstHi = fregEnc(i->Pin.Dfp128Unary.dst_hi);
+     UInt fr_dstLo = fregEnc(i->Pin.Dfp128Unary.dst_lo);
+     UInt fr_srcLo = fregEnc(i->Pin.Dfp128Unary.src_lo);
+
+     /* Do instruction with 128-bit source operands in registers (10,11)       
+      * and (12,13).                                                           
+      */
+     switch (i->Pin.Dfp128Unary.op) {
+     case Pfp_DCTQPQ: // D64 to D128, srcLo holds 64 bit operand              
+        p = mkFormX( p, 63, 12, 0, fr_srcLo, 72, 0, endness_host );
+
+        p = mkFormX( p, 63, 10, 0, 12, 258, 0, endness_host );
+
+        /* The instruction will put the 128-bit result in
+         * registers (10,11).  Note, the operand in the instruction only
+         * reference the first of the two registers in the pair.
+         */
+        p = mkFormX(p, 63, fr_dstHi, 0, 10,  72, 0, endness_host);
+        p = mkFormX(p, 63, fr_dstLo, 0, 11,  72, 0, endness_host);
+        break;
+     default:
+        vex_printf("Error: emit_PPCInstr case Pin_Dfp128Unary, case inst Default\
+\n");
+        goto bad;
+     }
+     goto done;
+   }
+
+   case Pin_Dfp128Binary: {
+      /* dst is used to supply the  left source operand and return
+       * the result.
+       */
+      UInt fr_dstHi = fregEnc( i->Pin.Dfp128Binary.dst_hi );
+      UInt fr_dstLo = fregEnc( i->Pin.Dfp128Binary.dst_lo );
+      UInt fr_srcRHi = fregEnc( i->Pin.Dfp128Binary.srcR_hi );
+      UInt fr_srcRLo = fregEnc( i->Pin.Dfp128Binary.srcR_lo );
+
+      /* Setup the upper and lower registers of the source operand
+       * register pair.
+       */
+      p = mkFormX( p, 63, 10, 0, fr_dstHi, 72, 0, endness_host );
+      p = mkFormX( p, 63, 11, 0, fr_dstLo, 72, 0, endness_host );
+      p = mkFormX( p, 63, 12, 0, fr_srcRHi, 72, 0, endness_host );
+      p = mkFormX( p, 63, 13, 0, fr_srcRLo, 72, 0, endness_host );
+
+      /* Do instruction with 128-bit source operands in registers (10,11)
+       * and (12,13).
+       */
+      switch (i->Pin.Dfp128Binary.op) {
+      case Pfp_DFPADDQ:
+         p = mkFormX( p, 63, 10, 10, 12, 2, 0, endness_host );
+         break;
+      case Pfp_DFPSUBQ:
+         p = mkFormX( p, 63, 10, 10, 12, 514, 0, endness_host );
+         break;
+      case Pfp_DFPMULQ:
+         p = mkFormX( p, 63, 10, 10, 12, 34, 0, endness_host );
+         break;
+      case Pfp_DFPDIVQ:
+         p = mkFormX( p, 63, 10, 10, 12, 546, 0, endness_host );
+         break;
+      default:
+         goto bad;
+      }
+
+      /* The instruction will put the 128-bit result in
+       * registers (10,11).  Note, the operand in the instruction only
+       * reference the first of the two registers in the pair.
+       */
+      p = mkFormX(p, 63, fr_dstHi, 0, 10,  72, 0, endness_host);
+      p = mkFormX(p, 63, fr_dstLo, 0, 11,  72, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_DfpShift128: {
+      UInt fr_src_hi = fregEnc(i->Pin.DfpShift128.src_hi);
+      UInt fr_src_lo = fregEnc(i->Pin.DfpShift128.src_lo);
+      UInt fr_dst_hi = fregEnc(i->Pin.DfpShift128.dst_hi);
+      UInt fr_dst_lo = fregEnc(i->Pin.DfpShift128.dst_lo);
+      UInt shift;
+
+      shift =  i->Pin.DfpShift128.shift->Pri.Imm;
+
+      /* setup source operand in register 12, 13 pair */
+      p = mkFormX(p, 63, 12, 0, fr_src_hi, 72, 0, endness_host);
+      p = mkFormX(p, 63, 13, 0, fr_src_lo, 72, 0, endness_host);
+
+      /* execute instruction putting result in register 10, 11 pair */
+      switch (i->Pin.DfpShift128.op) {
+      case Pfp_DSCLIQ:    /* dscliq, DFP shift left, fr_srcR is the integer
+                           * shift amount.
+                           */
+         p = mkFormZ22( p, 63, 10, 12, shift,  66, 0, endness_host );
+         break;
+      case Pfp_DSCRIQ:    /* dscriq, DFP shift right, fr_srcR is the integer
+                           * shift amount.
+                           */
+         p = mkFormZ22( p, 63, 10, 12, shift,  98, 0, endness_host );
+         break;
+      default:
+         vex_printf("ERROR: emit_PPCInstr quad default case %d \n",
+                    i->Pin.DfpShift128.op);
+         goto bad;
+      }
+
+      /* The instruction put the 128-bit result in registers (10,11). 
+       * Note, the operand in the instruction only reference the first of 
+       * the two registers in the pair.
+       */
+      p = mkFormX(p, 63, fr_dst_hi, 0, 10,  72, 0, endness_host);
+      p = mkFormX(p, 63, fr_dst_lo, 0, 11,  72, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_DfpRound: {
+      UInt fr_dst = fregEnc(i->Pin.DfpRound.dst);
+      UInt fr_src = fregEnc(i->Pin.DfpRound.src);
+      UInt r_rmc, r, rmc;
+
+      r_rmc =  i->Pin.DfpRound.r_rmc->Pri.Imm;
+      r = (r_rmc & 0x8) >> 3;
+      rmc = r_rmc & 0x3;
+
+      // drintx
+      p = mkFormZ23(p, 59, fr_dst, r, fr_src, rmc, 99, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_DfpRound128: {
+      UInt fr_dstHi = fregEnc(i->Pin.DfpRound128.dst_hi);
+      UInt fr_dstLo = fregEnc(i->Pin.DfpRound128.dst_lo);
+      UInt fr_srcHi = fregEnc(i->Pin.DfpRound128.src_hi);
+      UInt fr_srcLo = fregEnc(i->Pin.DfpRound128.src_lo);
+      UInt r_rmc, r, rmc;
+
+      r_rmc =  i->Pin.DfpRound128.r_rmc->Pri.Imm;
+      r = (r_rmc & 0x8) >> 3;
+      rmc = r_rmc & 0x3;
+
+      /* Setup the upper and lower registers of the source operand 
+       * register pair.
+       */
+      p = mkFormX(p, 63, 12, 0, fr_srcHi, 72, 0, endness_host);
+      p = mkFormX(p, 63, 13, 0, fr_srcLo, 72, 0, endness_host);
+
+      /* Do drintx instruction with 128-bit source operands in 
+       * registers (12,13).  
+       */
+      p = mkFormZ23(p, 63, 10, r, 12, rmc, 99, 0, endness_host);
+
+      /* The instruction will put the 128-bit result in 
+       * registers (10,11).  Note, the operand in the instruction only 
+       * reference the first of the two registers in the pair.
+       */
+      p = mkFormX(p, 63, fr_dstHi, 0, 10,  72, 0, endness_host);
+      p = mkFormX(p, 63, fr_dstLo, 0, 11,  72, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_DfpQuantize: {
+      UInt fr_dst  = fregEnc(i->Pin.DfpQuantize.dst);
+      UInt fr_srcL = fregEnc(i->Pin.DfpQuantize.srcL);
+      UInt fr_srcR = fregEnc(i->Pin.DfpQuantize.srcR);
+      UInt rmc;
+
+      rmc =  i->Pin.DfpQuantize.rmc->Pri.Imm;
+
+      switch (i->Pin.DfpQuantize.op) {
+      case Pfp_DQUA:
+         p = mkFormZ23(p, 59, fr_dst, fr_srcL, fr_srcR, rmc, 3, 0, endness_host);
+         break;
+      case Pfp_RRDTR:
+         p = mkFormZ23(p, 59, fr_dst, fr_srcL, fr_srcR, rmc, 35, 0, endness_host);
+         break;
+      default:
+         break;
+      }
+      goto done;
+   }
+
+   case Pin_DfpQuantize128: {
+      UInt fr_dst_hi = fregEnc(i->Pin.DfpQuantize128.dst_hi);
+      UInt fr_dst_lo = fregEnc(i->Pin.DfpQuantize128.dst_lo);
+      UInt fr_src_hi = fregEnc(i->Pin.DfpQuantize128.src_hi);
+      UInt fr_src_lo = fregEnc(i->Pin.DfpQuantize128.src_lo);
+      UInt rmc;
+
+      rmc =  i->Pin.DfpQuantize128.rmc->Pri.Imm;
+      /* Setup the upper and lower registers of the source operand 
+       * register pairs.  Note, left source operand passed in via the
+       * dst register pair.
+       */
+      p = mkFormX(p, 63, 10, 0, fr_dst_hi, 72, 0, endness_host);
+      p = mkFormX(p, 63, 11, 0, fr_dst_lo, 72, 0, endness_host);
+      p = mkFormX(p, 63, 12, 0, fr_src_hi, 72, 0, endness_host);
+      p = mkFormX(p, 63, 13, 0, fr_src_lo, 72, 0, endness_host);
+
+      /* Do dquaq instruction with 128-bit source operands in 
+       * registers (12,13).  
+       */
+      switch (i->Pin.DfpQuantize128.op) {
+      case Pfp_DQUAQ:
+         p = mkFormZ23(p, 63, 10, 10, 12, rmc, 3, 0, endness_host);
+         break;
+      case Pfp_DRRNDQ:
+         p = mkFormZ23(p, 63, 10, 10, 12, rmc, 35, 0, endness_host);
+         break;
+      default:
+         vpanic("Pin_DfpQuantize128: default case, couldn't find inst to issue \n");
+         break;
+      }
+
+      /* The instruction will put the 128-bit result in 
+       * registers (10,11).  Note, the operand in the instruction only 
+       * reference the first of the two registers in the pair.
+       */
+      p = mkFormX(p, 63, fr_dst_hi, 0, 10,  72, 0, endness_host);
+      p = mkFormX(p, 63, fr_dst_lo, 0, 11,  72, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_DfpD128toD64: {
+      UInt fr_dst   = fregEnc( i->Pin.DfpD128toD64.dst );
+      UInt fr_srcHi = fregEnc( i->Pin.DfpD128toD64.src_hi );
+      UInt fr_srcLo = fregEnc( i->Pin.DfpD128toD64.src_lo );
+
+      /* Setup the upper and lower registers of the source operand
+       * register pair.
+       */
+      p = mkFormX( p, 63, 10, 0, fr_dst, 72, 0, endness_host );
+      p = mkFormX( p, 63, 12, 0, fr_srcHi, 72, 0, endness_host );
+      p = mkFormX( p, 63, 13, 0, fr_srcLo, 72, 0, endness_host );
+
+      /* Do instruction with 128-bit source operands in registers (10,11) */
+      switch (i->Pin.Dfp128Binary.op) {
+      case Pfp_DRDPQ:
+         p = mkFormX( p, 63, 10, 0, 12, 770, 0, endness_host );
+         break;
+      case Pfp_DCTFIXQ:
+         p = mkFormX( p, 63, 10, 0, 12, 290, 0, endness_host );
+         break;
+      default:
+         goto bad;
+      }
+
+      /* The instruction will put the 64-bit result in registers 10. */
+      p = mkFormX(p, 63, fr_dst, 0, 10,  72, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_DfpI64StoD128: {
+      UInt fr_dstHi = fregEnc( i->Pin.DfpI64StoD128.dst_hi );
+      UInt fr_dstLo = fregEnc( i->Pin.DfpI64StoD128.dst_lo );
+      UInt fr_src   = fregEnc( i->Pin.DfpI64StoD128.src );
+
+      switch (i->Pin.Dfp128Binary.op) {
+      case Pfp_DCFFIXQ:
+         p = mkFormX( p, 63, 10, 11, fr_src, 802, 0, endness_host );
+         break;
+      default:
+         goto bad;
+      }
+
+      /* The instruction will put the 64-bit result in registers 10, 11. */
+      p = mkFormX(p, 63, fr_dstHi, 0, 10,  72, 0, endness_host);
+      p = mkFormX(p, 63, fr_dstLo, 0, 11,  72, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_InsertExpD128: {
+      UInt fr_dstHi  = fregEnc(i->Pin.InsertExpD128.dst_hi);
+      UInt fr_dstLo  = fregEnc(i->Pin.InsertExpD128.dst_lo);
+      UInt fr_srcL   = fregEnc(i->Pin.InsertExpD128.srcL);
+      UInt fr_srcRHi = fregEnc(i->Pin.InsertExpD128.srcR_hi);
+      UInt fr_srcRLo = fregEnc(i->Pin.InsertExpD128.srcR_lo);
+
+      /* The left operand is a single F64 value, the right is an F128
+       * register pair.
+       */
+      p = mkFormX(p, 63, 10, 0, fr_srcL, 72, 0, endness_host);
+      p = mkFormX(p, 63, 12, 0, fr_srcRHi, 72, 0, endness_host);
+      p = mkFormX(p, 63, 13, 0, fr_srcRLo, 72, 0, endness_host);
+      p = mkFormX(p, 63, 10, 10, 12, 866, 0, endness_host );
+
+      /* The instruction will put the 128-bit result into
+       * registers (10,11).  Note, the operand in the instruction only
+       * reference the first of the two registers in the pair.
+       */
+      p = mkFormX(p, 63, fr_dstHi, 0, 10,  72, 0, endness_host);
+      p = mkFormX(p, 63, fr_dstLo, 0, 11,  72, 0, endness_host);
+      goto done;
+   }                                                                           
+
+   case Pin_Dfp64Cmp:{
+      UChar crfD    = 1;
+      UInt  r_dst   = iregEnc(i->Pin.Dfp64Cmp.dst, mode64);
+      UInt  fr_srcL = fregEnc(i->Pin.Dfp64Cmp.srcL);
+      UInt  fr_srcR = fregEnc(i->Pin.Dfp64Cmp.srcR);
+      vassert(crfD < 8);
+      // dcmpo, dcmpu
+      p = mkFormX(p, 59, crfD<<2, fr_srcL, fr_srcR, 130, 0, endness_host);
+
+      // mfcr (mv CR to r_dst)
+      p = mkFormX(p, 31, r_dst, 0, 0, 19, 0, endness_host);
+
+      // rlwinm r_dst,r_dst,8,28,31
+      //  => rotate field 1 to bottomw of word, masking out upper 28
+      p = mkFormM(p, 21, r_dst, r_dst, 8, 28, 31, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_Dfp128Cmp: {
+      UChar crfD       = 1;
+      UInt  r_dst      = iregEnc(i->Pin.Dfp128Cmp.dst, mode64);
+      UInt  fr_srcL_hi = fregEnc(i->Pin.Dfp128Cmp.srcL_hi);
+      UInt  fr_srcL_lo = fregEnc(i->Pin.Dfp128Cmp.srcL_lo);
+      UInt  fr_srcR_hi = fregEnc(i->Pin.Dfp128Cmp.srcR_hi);
+      UInt  fr_srcR_lo = fregEnc(i->Pin.Dfp128Cmp.srcR_lo);
+      vassert(crfD < 8);
+      // dcmpoq, dcmpuq
+      /* Setup the upper and lower registers of the source operand
+       * register pair.
+       */
+      p = mkFormX(p, 63, 10, 0, fr_srcL_hi, 72, 0, endness_host);
+      p = mkFormX(p, 63, 11, 0, fr_srcL_lo, 72, 0, endness_host);
+      p = mkFormX(p, 63, 12, 0, fr_srcR_hi, 72, 0, endness_host);
+      p = mkFormX(p, 63, 13, 0, fr_srcR_lo, 72, 0, endness_host);
+
+      p = mkFormX(p, 63, crfD<<2, 10, 12, 130, 0, endness_host);
+
+      // mfcr (mv CR to r_dst)
+      p = mkFormX(p, 31, r_dst, 0, 0, 19, 0, endness_host);
+
+      // rlwinm r_dst,r_dst,8,28,31
+      //  => rotate field 1 to bottomw of word, masking out upper 28
+      p = mkFormM(p, 21, r_dst, r_dst, 8, 28, 31, 0, endness_host);
+      goto done;
+   }
+
+   case Pin_EvCheck: {
+      /* This requires a 32-bit dec/test in both 32- and 64-bit
+         modes. */
+      /* We generate:
+            lwz     r30, amCounter
+            addic.  r30, r30, -1
+            stw     r30, amCounter
+            bge     nofail
+            lwz/ld  r30, amFailAddr
+            mtctr   r30
+            bctr
+           nofail:
+      */
+      UChar* p0 = p;
+      /* lwz r30, amCounter */
+      p = do_load_or_store_word32(p, True/*isLoad*/, /*r*/30,
+                                  i->Pin.EvCheck.amCounter, mode64,
+                                  endness_host);
+      /* addic. r30,r30,-1 */
+      p = emit32(p, 0x37DEFFFF, endness_host);
+      /* stw r30, amCounter */
+      p = do_load_or_store_word32(p, False/*!isLoad*/, /*r*/30,
+                                  i->Pin.EvCheck.amCounter, mode64,
+                                  endness_host);
+      /* bge nofail */
+      p = emit32(p, 0x40800010, endness_host);
+      /* lwz/ld r30, amFailAddr */
+      p = do_load_or_store_machine_word(p, True/*isLoad*/, /*r*/30,
+                                        i->Pin.EvCheck.amFailAddr, mode64,
+                                        endness_host);
+      /* mtctr r30 */
+      p = mkFormXFX(p, /*r*/30, 9, 467, endness_host);
+      /* bctr */
+      p = mkFormXL(p, 19, Pct_ALWAYS, 0, 0, 528, 0, endness_host);
+      /* nofail: */
+
+      /* Crosscheck */
+      vassert(evCheckSzB_PPC() == (UChar*)p - (UChar*)p0);
+      goto done;
+   }
+
+   case Pin_ProfInc: {
+      /* We generate:
+               (ctrP is unknown now, so use 0x65556555(65556555) in the
+               expectation that a later call to LibVEX_patchProfCtr
+               will be used to fill in the immediate fields once the
+               right value is known.)
+            32-bit:
+              imm32-exactly r30, 0x65556555
+              lwz     r29, 4(r30)
+              addic.  r29, r29, 1
+              stw     r29, 4(r30)
+              lwz     r29, 0(r30)
+              addze   r29, r29
+              stw     r29, 0(r30)
+            64-bit:
+              imm64-exactly r30, 0x6555655565556555
+              ld      r29, 0(r30)
+              addi    r29, r29, 1
+              std     r29, 0(r30)
+      */
+      if (mode64) {
+         p = mkLoadImm_EXACTLY2or5(
+                p, /*r*/30, 0x6555655565556555ULL, True/*mode64*/, endness_host);
+         p = emit32(p, 0xEBBE0000, endness_host);
+         p = emit32(p, 0x3BBD0001, endness_host);
+         p = emit32(p, 0xFBBE0000, endness_host);
+      } else {
+         p = mkLoadImm_EXACTLY2or5(
+                p, /*r*/30, 0x65556555ULL, False/*!mode64*/, endness_host);
+         p = emit32(p, 0x83BE0004, endness_host);
+         p = emit32(p, 0x37BD0001, endness_host);
+         p = emit32(p, 0x93BE0004, endness_host);
+         p = emit32(p, 0x83BE0000, endness_host);
+         p = emit32(p, 0x7FBD0194, endness_host);
+         p = emit32(p, 0x93BE0000, endness_host);
+      }
+      /* Tell the caller .. */
+      vassert(!(*is_profInc));
+      *is_profInc = True;
+      goto done;
+   }
+
+   default: 
+      goto bad;
+   }
+
+  bad:
+   vex_printf("\n=> ");
+   ppPPCInstr(i, mode64);
+   vpanic("emit_PPCInstr");
+   /*NOTREACHED*/
+   
+  done:
+   vassert(p - &buf[0] <= 64);
+   return p - &buf[0];
+}
+
+
+/* How big is an event check?  See case for Pin_EvCheck in
+   emit_PPCInstr just above.  That crosschecks what this returns, so
+   we can tell if we're inconsistent. */
+Int evCheckSzB_PPC (void)
+{
+  return 28;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+                                 void* place_to_chain,
+                                 const void* disp_cp_chain_me_EXPECTED,
+                                 const void* place_to_jump_to,
+                                 Bool  mode64 )
+{
+   if (mode64) {
+      vassert((endness_host == VexEndnessBE) ||
+              (endness_host == VexEndnessLE));
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
+   /* What we're expecting to see is:
+        imm32/64-fixed r30, disp_cp_chain_me_to_EXPECTED
+        mtctr r30
+        bctrl
+      viz
+        <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5>
+        7F C9 03 A6
+        4E 80 04 21
+   */
+   UChar* p = (UChar*)place_to_chain;
+   vassert(0 == (3 & (HWord)p));
+   vassert(isLoadImm_EXACTLY2or5(p, /*r*/30,
+                                 (Addr)disp_cp_chain_me_EXPECTED,
+                                 mode64, endness_host));
+   vassert(fetch32(p + (mode64 ? 20 : 8) + 0, endness_host) == 0x7FC903A6);
+   vassert(fetch32(p + (mode64 ? 20 : 8) + 4, endness_host) == 0x4E800421);
+   /* And what we want to change it to is:
+        imm32/64-fixed r30, place_to_jump_to
+        mtctr r30
+        bctr
+      viz
+        <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5>
+        7F C9 03 A6
+        4E 80 04 20
+      The replacement has the same length as the original.
+   */
+   p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
+                             (Addr)place_to_jump_to, mode64, 
+                             endness_host);
+   p = emit32(p, 0x7FC903A6, endness_host);
+   p = emit32(p, 0x4E800420, endness_host);
+
+   Int len = p - (UChar*)place_to_chain;
+   vassert(len == (mode64 ? 28 : 16)); /* stay sane */
+   VexInvalRange vir = {(HWord)place_to_chain, len};
+   return vir;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+                                   void* place_to_unchain,
+                                   const void* place_to_jump_to_EXPECTED,
+                                   const void* disp_cp_chain_me,
+                                   Bool  mode64 )
+{
+   if (mode64) {
+      vassert((endness_host == VexEndnessBE) ||
+              (endness_host == VexEndnessLE));
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
+   /* What we're expecting to see is:
+        imm32/64-fixed r30, place_to_jump_to_EXPECTED
+        mtctr r30
+        bctr
+      viz
+        <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5>
+        7F C9 03 A6
+        4E 80 04 20
+   */
+   UChar* p = (UChar*)place_to_unchain;
+   vassert(0 == (3 & (HWord)p));
+   vassert(isLoadImm_EXACTLY2or5(p, /*r*/30,
+                                 (Addr)place_to_jump_to_EXPECTED,
+                                 mode64, endness_host));
+   vassert(fetch32(p + (mode64 ? 20 : 8) + 0, endness_host) == 0x7FC903A6);
+   vassert(fetch32(p + (mode64 ? 20 : 8) + 4, endness_host) == 0x4E800420);
+   /* And what we want to change it to is:
+        imm32/64-fixed r30, disp_cp_chain_me
+        mtctr r30
+        bctrl
+      viz
+        <8 or 20 bytes generated by mkLoadImm_EXACTLY2or5>
+        7F C9 03 A6
+        4E 80 04 21
+      The replacement has the same length as the original.
+   */
+   p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
+                             (Addr)disp_cp_chain_me, mode64, 
+                             endness_host);
+   p = emit32(p, 0x7FC903A6, endness_host);
+   p = emit32(p, 0x4E800421, endness_host);
+
+   Int len = p - (UChar*)place_to_unchain;
+   vassert(len == (mode64 ? 28 : 16)); /* stay sane */
+   VexInvalRange vir = {(HWord)place_to_unchain, len};
+   return vir;
+}
+
+
+/* Patch the counter address into a profile inc point, as previously
+   created by the Pin_ProfInc case for emit_PPCInstr. */
+VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+                                 void*  place_to_patch,
+                                 const ULong* location_of_counter,
+                                 Bool   mode64 )
+{
+   if (mode64) {
+      vassert((endness_host == VexEndnessBE) ||
+              (endness_host == VexEndnessLE));
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
+   UChar* p = (UChar*)place_to_patch;
+   vassert(0 == (3 & (HWord)p));
+
+   Int len = 0;
+   if (mode64) {
+      vassert(isLoadImm_EXACTLY2or5(p, /*r*/30,
+                                    0x6555655565556555ULL, True/*mode64*/,
+                                    endness_host));
+      vassert(fetch32(p + 20, endness_host) == 0xEBBE0000);
+      vassert(fetch32(p + 24, endness_host) == 0x3BBD0001);
+      vassert(fetch32(p + 28, endness_host) == 0xFBBE0000);
+      p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
+                                (Addr)location_of_counter,
+                                True/*mode64*/, endness_host);
+      len = p - (UChar*)place_to_patch;
+      vassert(len == 20);
+   } else {
+      vassert(isLoadImm_EXACTLY2or5(p, /*r*/30,
+                                    0x65556555ULL, False/*!mode64*/, 
+                                    endness_host));
+      vassert(fetch32(p +  8, endness_host) == 0x83BE0004);
+      vassert(fetch32(p + 12, endness_host) == 0x37BD0001);
+      vassert(fetch32(p + 16, endness_host) == 0x93BE0004);
+      vassert(fetch32(p + 20, endness_host) == 0x83BE0000);
+      vassert(fetch32(p + 24, endness_host) == 0x7FBD0194);
+      vassert(fetch32(p + 28, endness_host) == 0x93BE0000);
+      p = mkLoadImm_EXACTLY2or5(p, /*r*/30,
+                                (Addr)location_of_counter,
+                                False/*!mode64*/, endness_host);
+      len = p - (UChar*)place_to_patch;
+      vassert(len == 8);
+   }
+   VexInvalRange vir = {(HWord)place_to_patch, len};
+   return vir;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_ppc_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_ppc_defs.h b/VEX/priv/host_ppc_defs.h
new file mode 100644
index 0000000..8f41190
--- /dev/null
+++ b/VEX/priv/host_ppc_defs.h
@@ -0,0 +1,1169 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_ppc_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_HOST_PPC_DEFS_H
+#define __VEX_HOST_PPC_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h"                      // VexArch
+#include "host_generic_regs.h"           // HReg
+
+
+/* --------- Registers. --------- */
+
+#define ST_IN static inline
+
+#define GPR(_mode64, _enc, _ix64, _ix32) \
+  mkHReg(False,  (_mode64) ? HRcInt64 : HRcInt32, \
+         (_enc), (_mode64) ? (_ix64) : (_ix32))
+
+#define FPR(_mode64, _enc, _ix64, _ix32) \
+  mkHReg(False,  HRcFlt64, \
+         (_enc), (_mode64) ? (_ix64) : (_ix32))
+
+#define VR(_mode64, _enc, _ix64, _ix32) \
+  mkHReg(False,  HRcVec128, \
+         (_enc), (_mode64) ? (_ix64) : (_ix32))
+
+ST_IN HReg hregPPC_GPR3  ( Bool mode64 ) { return GPR(mode64,  3,   0,  0); }
+ST_IN HReg hregPPC_GPR4  ( Bool mode64 ) { return GPR(mode64,  4,   1,  1); }
+ST_IN HReg hregPPC_GPR5  ( Bool mode64 ) { return GPR(mode64,  5,   2,  2); }
+ST_IN HReg hregPPC_GPR6  ( Bool mode64 ) { return GPR(mode64,  6,   3,  3); }
+ST_IN HReg hregPPC_GPR7  ( Bool mode64 ) { return GPR(mode64,  7,   4,  4); }
+ST_IN HReg hregPPC_GPR8  ( Bool mode64 ) { return GPR(mode64,  8,   5,  5); }
+ST_IN HReg hregPPC_GPR9  ( Bool mode64 ) { return GPR(mode64,  9,   6,  6); }
+ST_IN HReg hregPPC_GPR10 ( Bool mode64 ) { return GPR(mode64, 10,   7,  7); }
+
+// r11 and r12 are only allocatable in 32-bit mode.  Hence the 64-bit
+// index numbering doesn't advance for these two.
+ST_IN HReg hregPPC_GPR11 ( Bool mode64 ) { return GPR(mode64, 11,   0,  8); }
+ST_IN HReg hregPPC_GPR12 ( Bool mode64 ) { return GPR(mode64, 12,   0,  9); }
+
+ST_IN HReg hregPPC_GPR14 ( Bool mode64 ) { return GPR(mode64, 14,   8, 10); }
+ST_IN HReg hregPPC_GPR15 ( Bool mode64 ) { return GPR(mode64, 15,   9, 11); }
+ST_IN HReg hregPPC_GPR16 ( Bool mode64 ) { return GPR(mode64, 16,  10, 12); }
+ST_IN HReg hregPPC_GPR17 ( Bool mode64 ) { return GPR(mode64, 17,  11, 13); }
+ST_IN HReg hregPPC_GPR18 ( Bool mode64 ) { return GPR(mode64, 18,  12, 14); }
+ST_IN HReg hregPPC_GPR19 ( Bool mode64 ) { return GPR(mode64, 19,  13, 15); }
+ST_IN HReg hregPPC_GPR20 ( Bool mode64 ) { return GPR(mode64, 20,  14, 16); }
+ST_IN HReg hregPPC_GPR21 ( Bool mode64 ) { return GPR(mode64, 21,  15, 17); }
+ST_IN HReg hregPPC_GPR22 ( Bool mode64 ) { return GPR(mode64, 22,  16, 18); }
+ST_IN HReg hregPPC_GPR23 ( Bool mode64 ) { return GPR(mode64, 23,  17, 19); }
+ST_IN HReg hregPPC_GPR24 ( Bool mode64 ) { return GPR(mode64, 24,  18, 20); }
+ST_IN HReg hregPPC_GPR25 ( Bool mode64 ) { return GPR(mode64, 25,  19, 21); }
+ST_IN HReg hregPPC_GPR26 ( Bool mode64 ) { return GPR(mode64, 26,  20, 22); }
+ST_IN HReg hregPPC_GPR27 ( Bool mode64 ) { return GPR(mode64, 27,  21, 23); }
+ST_IN HReg hregPPC_GPR28 ( Bool mode64 ) { return GPR(mode64, 28,  22, 24); }
+
+ST_IN HReg hregPPC_FPR14 ( Bool mode64 ) { return FPR(mode64, 14,  23, 25); }
+ST_IN HReg hregPPC_FPR15 ( Bool mode64 ) { return FPR(mode64, 15,  24, 26); }
+ST_IN HReg hregPPC_FPR16 ( Bool mode64 ) { return FPR(mode64, 16,  25, 27); }
+ST_IN HReg hregPPC_FPR17 ( Bool mode64 ) { return FPR(mode64, 17,  26, 28); }
+ST_IN HReg hregPPC_FPR18 ( Bool mode64 ) { return FPR(mode64, 18,  27, 29); }
+ST_IN HReg hregPPC_FPR19 ( Bool mode64 ) { return FPR(mode64, 19,  28, 30); }
+ST_IN HReg hregPPC_FPR20 ( Bool mode64 ) { return FPR(mode64, 20,  29, 31); }
+ST_IN HReg hregPPC_FPR21 ( Bool mode64 ) { return FPR(mode64, 21,  30, 32); }
+
+ST_IN HReg hregPPC_VR20  ( Bool mode64 ) { return VR (mode64, 20,  31, 33); }
+ST_IN HReg hregPPC_VR21  ( Bool mode64 ) { return VR (mode64, 21,  32, 34); }
+ST_IN HReg hregPPC_VR22  ( Bool mode64 ) { return VR (mode64, 22,  33, 35); }
+ST_IN HReg hregPPC_VR23  ( Bool mode64 ) { return VR (mode64, 23,  34, 36); }
+ST_IN HReg hregPPC_VR24  ( Bool mode64 ) { return VR (mode64, 24,  35, 37); }
+ST_IN HReg hregPPC_VR25  ( Bool mode64 ) { return VR (mode64, 25,  36, 38); }
+ST_IN HReg hregPPC_VR26  ( Bool mode64 ) { return VR (mode64, 26,  37, 39); }
+ST_IN HReg hregPPC_VR27  ( Bool mode64 ) { return VR (mode64, 27,  38, 40); }
+
+ST_IN HReg hregPPC_GPR1  ( Bool mode64 ) { return GPR(mode64,  1,  39, 41); }
+ST_IN HReg hregPPC_GPR29 ( Bool mode64 ) { return GPR(mode64, 29,  40, 42); }
+ST_IN HReg hregPPC_GPR30 ( Bool mode64 ) { return GPR(mode64, 30,  41, 43); }
+ST_IN HReg hregPPC_GPR31 ( Bool mode64 ) { return GPR(mode64, 31,  42, 44); }
+ST_IN HReg hregPPC_VR29  ( Bool mode64 ) { return VR (mode64, 29,  43, 45); }
+
+#undef ST_IN
+#undef GPR
+#undef FPR
+#undef VR
+
+#define StackFramePtr(_mode64) hregPPC_GPR1(_mode64)
+#define GuestStatePtr(_mode64) hregPPC_GPR31(_mode64)
+
+/* Num registers used for function calls */
+#define PPC_N_REGPARMS 8
+
+extern void ppHRegPPC ( HReg );
+
+
+/* --------- Condition codes --------- */
+
+/* This gives names from bitfields in CR; hence it names BI numbers */
+/* Using IBM/hardware indexing convention */
+typedef
+   enum {
+      // CR7, which we use for integer compares
+      Pcf_7LT  = 28,  /* neg  | lt          */
+      Pcf_7GT  = 29,  /* pos  | gt          */
+      Pcf_7EQ  = 30,  /* zero | equal       */
+      Pcf_7SO  = 31,  /* summary overflow   */
+      Pcf_NONE = 32   /* no condition; used with Pct_ALWAYS */
+   }
+   PPCCondFlag;
+
+typedef
+   enum {   /* Maps bc bitfield BO */
+      Pct_FALSE  = 0x4, /* associated PPCCondFlag must not be Pcf_NONE */
+      Pct_TRUE   = 0xC, /* associated PPCCondFlag must not be Pcf_NONE */
+      Pct_ALWAYS = 0x14 /* associated PPCCondFlag must be Pcf_NONE */
+   }
+   PPCCondTest;
+
+typedef
+   struct {
+      PPCCondFlag flag;
+      PPCCondTest test;
+   }
+   PPCCondCode;
+
+extern const HChar* showPPCCondCode ( PPCCondCode );
+
+/* constructor */
+extern PPCCondCode mk_PPCCondCode ( PPCCondTest, PPCCondFlag );
+
+/* false->true, true->false */
+extern PPCCondTest invertCondTest ( PPCCondTest );
+
+
+
+
+/* --------- Memory address expressions (amodes). --------- */
+
+typedef
+   enum {
+     Pam_IR=1,      /* Immediate (signed 16-bit) + Reg */
+     Pam_RR=2       /* Reg1 + Reg2     */
+   }
+   PPCAModeTag;
+
+typedef
+   struct {
+      PPCAModeTag tag;
+      union {
+         struct {
+            HReg base;
+            Int  index;
+         } IR;
+         struct {
+            HReg base;
+            HReg index;
+         } RR;
+      } Pam;
+   }
+   PPCAMode;
+
+extern PPCAMode* PPCAMode_IR ( Int,  HReg );
+extern PPCAMode* PPCAMode_RR ( HReg, HReg );
+
+extern PPCAMode* dopyPPCAMode ( PPCAMode* );
+
+extern void ppPPCAMode ( PPCAMode* );
+
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+/* ("RH" == "Register or Halfword immediate") */
+typedef 
+   enum {
+      Prh_Imm=3,
+      Prh_Reg=4
+   }
+   PPCRHTag;
+
+typedef
+   struct {
+      PPCRHTag tag;
+      union {
+         struct {
+            Bool   syned;
+            UShort imm16;
+         } Imm;
+         struct {
+            HReg reg;
+         } Reg;
+      }
+      Prh;
+   }
+   PPCRH;
+
+extern PPCRH* PPCRH_Imm ( Bool, UShort );
+extern PPCRH* PPCRH_Reg ( HReg );
+
+extern void ppPPCRH ( PPCRH* );
+
+
+/* --------- Operand, which can be a reg or a u32/64. --------- */
+
+typedef
+   enum {
+      Pri_Imm=5,
+      Pri_Reg=6
+   } 
+   PPCRITag;
+
+typedef
+   struct {
+      PPCRITag tag;
+      union {
+         ULong Imm;
+         HReg  Reg;
+      }
+      Pri;
+   }
+   PPCRI;
+
+extern PPCRI* PPCRI_Imm ( ULong );
+extern PPCRI* PPCRI_Reg( HReg );
+
+extern void ppPPCRI ( PPCRI* );
+
+
+/* --------- Operand, which can be a vector reg or a s6. --------- */
+/* ("VI" == "Vector Register or Immediate") */
+typedef
+   enum {
+      Pvi_Imm=7,
+      Pvi_Reg=8
+   } 
+   PPCVI5sTag;
+
+typedef
+   struct {
+      PPCVI5sTag tag;
+      union {
+         Char Imm5s;
+         HReg Reg;
+      }
+      Pvi;
+   }
+   PPCVI5s;
+
+extern PPCVI5s* PPCVI5s_Imm ( Char );
+extern PPCVI5s* PPCVI5s_Reg ( HReg );
+
+extern void ppPPCVI5s ( PPCVI5s* );
+
+
+/* --------- Instructions. --------- */
+
+/* --------- */
+typedef
+   enum {
+      Pun_NEG,
+      Pun_NOT,
+      Pun_CLZ32,
+      Pun_CLZ64,
+      Pun_EXTSW
+   }
+   PPCUnaryOp;
+
+extern const HChar* showPPCUnaryOp ( PPCUnaryOp );
+
+
+/* --------- */
+typedef 
+   enum {
+      Palu_INVALID,
+      Palu_ADD, Palu_SUB,
+      Palu_AND, Palu_OR, Palu_XOR,
+   }
+   PPCAluOp;
+
+extern 
+const HChar* showPPCAluOp ( PPCAluOp, 
+                            Bool /* is the 2nd operand an immediate? */);
+
+
+/* --------- */
+typedef 
+   enum {
+      Pshft_INVALID,
+      Pshft_SHL, Pshft_SHR, Pshft_SAR, 
+   }
+   PPCShftOp;
+
+extern 
+const HChar* showPPCShftOp ( PPCShftOp, 
+                             Bool /* is the 2nd operand an immediate? */,
+                             Bool /* is this a 32bit or 64bit op? */ );
+
+
+/* --------- */
+typedef
+   enum {
+      Pfp_INVALID,
+
+      /* Ternary */
+      Pfp_MADDD,  Pfp_MSUBD,
+      Pfp_MADDS,  Pfp_MSUBS,
+      Pfp_DFPADD, Pfp_DFPADDQ,
+      Pfp_DFPSUB, Pfp_DFPSUBQ,
+      Pfp_DFPMUL, Pfp_DFPMULQ,
+      Pfp_DFPDIV, Pfp_DFPDIVQ,
+      Pfp_DQUAQ,  Pfp_DRRNDQ,
+
+      /* Binary */
+      Pfp_ADDD, Pfp_SUBD, Pfp_MULD, Pfp_DIVD,
+      Pfp_ADDS, Pfp_SUBS, Pfp_MULS, Pfp_DIVS,
+      Pfp_DRSP, Pfp_DRDPQ, Pfp_DCTFIX, Pfp_DCTFIXQ, Pfp_DCFFIX, 
+      Pfp_DQUA, Pfp_RRDTR, Pfp_DIEX, Pfp_DIEXQ, Pfp_DRINTN,
+
+      /* Unary */
+      Pfp_SQRT, Pfp_ABS, Pfp_NEG, Pfp_MOV, Pfp_RES, Pfp_RSQRTE,
+      Pfp_FRIN, Pfp_FRIM, Pfp_FRIP, Pfp_FRIZ, 
+      Pfp_DSCLI, Pfp_DSCRI, Pfp_DSCLIQ, Pfp_DSCRIQ, Pfp_DCTDP,
+      Pfp_DCTQPQ, Pfp_DCFFIXQ, Pfp_DXEX, Pfp_DXEXQ, 
+
+   }
+   PPCFpOp;
+
+extern const HChar* showPPCFpOp ( PPCFpOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Pav_INVALID,
+
+      /* Integer Unary */
+      Pav_MOV,                             /* Mov */
+      Pav_NOT,                             /* Bitwise */
+      Pav_UNPCKH8S,  Pav_UNPCKH16S,        /* Unpack */
+      Pav_UNPCKL8S,  Pav_UNPCKL16S,
+      Pav_UNPCKHPIX, Pav_UNPCKLPIX,
+
+      /* Integer Binary */
+      Pav_AND, Pav_OR, Pav_XOR,            /* Bitwise */
+      Pav_ADDU, Pav_QADDU, Pav_QADDS,
+      Pav_SUBU, Pav_QSUBU, Pav_QSUBS,
+      Pav_MULU,
+      Pav_OMULU, Pav_OMULS, Pav_EMULU, Pav_EMULS,
+      Pav_AVGU, Pav_AVGS,
+      Pav_MAXU, Pav_MAXS,
+      Pav_MINU, Pav_MINS,
+
+      /* Compare (always affects CR field 6) */
+      Pav_CMPEQU, Pav_CMPGTU, Pav_CMPGTS,
+
+      /* Shift */
+      Pav_SHL, Pav_SHR, Pav_SAR, Pav_ROTL,
+
+      /* Pack */
+      Pav_PACKUU, Pav_QPACKUU, Pav_QPACKSU, Pav_QPACKSS,
+      Pav_PACKPXL,
+
+      /* Merge */
+      Pav_MRGHI, Pav_MRGLO,
+
+      /* Concatenation */
+      Pav_CATODD, Pav_CATEVEN,
+
+      /* Polynomial Multipy-Add */
+      Pav_POLYMULADD,
+
+      /* Cipher */
+      Pav_CIPHERV128, Pav_CIPHERLV128, Pav_NCIPHERV128, Pav_NCIPHERLV128,
+      Pav_CIPHERSUBV128,
+
+      /* Hash */
+      Pav_SHA256, Pav_SHA512,
+
+      /* BCD Arithmetic */
+      Pav_BCDAdd, Pav_BCDSub,
+
+      /* zero count */
+      Pav_ZEROCNTBYTE, Pav_ZEROCNTWORD, Pav_ZEROCNTHALF, Pav_ZEROCNTDBL,
+
+      /* Vector bit matrix transpose by byte */
+      Pav_BITMTXXPOSE,
+   }
+   PPCAvOp;
+
+extern const HChar* showPPCAvOp ( PPCAvOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Pavfp_INVALID,
+
+      /* Floating point binary */
+      Pavfp_ADDF, Pavfp_SUBF, Pavfp_MULF,
+      Pavfp_MAXF, Pavfp_MINF,
+      Pavfp_CMPEQF, Pavfp_CMPGTF, Pavfp_CMPGEF,
+
+      /* Floating point unary */
+      Pavfp_RCPF, Pavfp_RSQRTF,
+      Pavfp_CVTU2F, Pavfp_CVTS2F, Pavfp_QCVTF2U, Pavfp_QCVTF2S,
+      Pavfp_ROUNDM, Pavfp_ROUNDP, Pavfp_ROUNDN, Pavfp_ROUNDZ,
+   }
+   PPCAvFpOp;
+
+extern const HChar* showPPCAvFpOp ( PPCAvFpOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Pin_LI,         /* load word (32/64-bit) immediate (fake insn) */
+      Pin_Alu,        /* word add/sub/and/or/xor */
+      Pin_Shft,       /* word shl/shr/sar */
+      Pin_AddSubC,    /* add/sub with read/write carry */
+      Pin_Cmp,        /* word compare */
+      Pin_Unary,      /* not, neg, clz */
+      Pin_MulL,       /* widening multiply */
+      Pin_Div,        /* div */
+      Pin_Call,       /* call to address in register */
+      Pin_XDirect,    /* direct transfer to GA */
+      Pin_XIndir,     /* indirect transfer to GA */
+      Pin_XAssisted,  /* assisted transfer to GA */
+      Pin_CMov,       /* conditional move */
+      Pin_Load,       /* zero-extending load a 8|16|32|64 bit value from mem */
+      Pin_LoadL,      /* load-linked (lwarx/ldarx) 32|64 bit value from mem */
+      Pin_Store,      /* store a 8|16|32|64 bit value to mem */
+      Pin_StoreC,     /* store-conditional (stwcx./stdcx.) 32|64 bit val */
+      Pin_Set,        /* convert condition code to value 0 or 1 */
+      Pin_MfCR,       /* move from condition register to GPR */
+      Pin_MFence,     /* mem fence */
+
+      Pin_FpUnary,    /* FP unary op */
+      Pin_FpBinary,   /* FP binary op */
+      Pin_FpMulAcc,   /* FP multipy-accumulate style op */
+      Pin_FpLdSt,     /* FP load/store */
+      Pin_FpSTFIW,    /* stfiwx */
+      Pin_FpRSP,      /* FP round IEEE754 double to IEEE754 single */
+      Pin_FpCftI,     /* fcfid[u,s,us]/fctid[u]/fctiw[u] */
+      Pin_FpCMov,     /* FP floating point conditional move */
+      Pin_FpLdFPSCR,  /* mtfsf */
+      Pin_FpCmp,      /* FP compare, generating value into int reg */
+
+      Pin_RdWrLR,     /* Read/Write Link Register */
+
+      Pin_AvLdSt,     /* AV load/store (kludging for AMode_IR) */
+      Pin_AvUnary,    /* AV unary general reg=>reg */
+
+      Pin_AvBinary,   /* AV binary general reg,reg=>reg */
+      Pin_AvBin8x16,  /* AV binary, 8x4 */
+      Pin_AvBin16x8,  /* AV binary, 16x4 */
+      Pin_AvBin32x4,  /* AV binary, 32x4 */
+      Pin_AvBin64x2,  /* AV binary, 64x2 */
+
+      Pin_AvBin32Fx4, /* AV FP binary, 32Fx4 */
+      Pin_AvUn32Fx4,  /* AV FP unary,  32Fx4 */
+
+      Pin_AvPerm,     /* AV permute (shuffle) */
+      Pin_AvSel,      /* AV select */
+      Pin_AvSh,       /* AV shift left or right */
+      Pin_AvShlDbl,   /* AV shift-left double by imm */
+      Pin_AvSplat,    /* One elem repeated throughout dst */
+      Pin_AvLdVSCR,   /* mtvscr */
+      Pin_AvCMov,     /* AV conditional move */
+      Pin_AvCipherV128Unary,  /* AV Vector unary Cipher */
+      Pin_AvCipherV128Binary, /* AV Vector binary Cipher */
+      Pin_AvHashV128Binary, /* AV Vector binary Hash */
+      Pin_AvBCDV128Trinary, /* BCD Arithmetic */
+      Pin_Dfp64Unary,   /* DFP64  unary op */
+      Pin_Dfp128Unary,  /* DFP128 unary op */
+      Pin_DfpShift,     /* Decimal floating point shift by immediate value */
+      Pin_Dfp64Binary,  /* DFP64  binary op */
+      Pin_Dfp128Binary, /* DFP128 binary op */
+      Pin_DfpShift128,  /* 128-bit Decimal floating point shift by 
+                         * immediate value */
+      Pin_DfpD128toD64, /* DFP 128 to DFP 64 op */
+      Pin_DfpI64StoD128, /* DFP signed integer to DFP 128 */
+      Pin_DfpRound,       /* D64 round to D64 */
+      Pin_DfpRound128,    /* D128 round to D128 */
+      Pin_ExtractExpD128, /* DFP, extract 64 bit exponent */
+      Pin_InsertExpD128,  /* DFP, insert 64 bit exponent and 128 bit binary 
+                           * significand into a DFP 128-bit value*/
+      Pin_Dfp64Cmp,       /* DFP 64-bit compare, generating value into
+                           * int reg */
+      Pin_Dfp128Cmp,      /* DFP 128-bit  compare, generating value into
+                           * int reg */
+      Pin_DfpQuantize,    /* D64 quantize using register value, significance 
+                           * round */
+      Pin_DfpQuantize128, /* D128 quantize using register value, significance
+                           * round */
+      Pin_EvCheck,    /* Event check */
+      Pin_ProfInc     /* 64-bit profile counter increment */
+   }
+   PPCInstrTag;
+
+/* Destinations are on the LEFT (first operand) */
+
+typedef
+   struct {
+      PPCInstrTag tag;
+      union {
+         /* Get a 32/64-bit literal into a register.
+            May turn into a number of real insns. */
+         struct {
+            HReg dst;
+            ULong imm64;
+         } LI;
+         /* Integer add/sub/and/or/xor.  Limitations:
+            - For add, the immediate, if it exists, is a signed 16.
+            - For sub, the immediate, if it exists, is a signed 16
+              which may not be -32768, since no such instruction 
+              exists, and so we have to emit addi with +32768, but 
+              that is not possible.
+            - For and/or/xor,  the immediate, if it exists, 
+              is an unsigned 16.
+         */
+         struct {
+            PPCAluOp op;
+            HReg     dst;
+            HReg     srcL;
+            PPCRH*   srcR;
+         } Alu;
+         /* Integer shl/shr/sar.
+            Limitations: the immediate, if it exists,
+            is a signed 5-bit value between 1 and 31 inclusive.
+         */
+         struct {
+            PPCShftOp op;
+            Bool      sz32;   /* mode64 has both 32 and 64bit shft */
+            HReg      dst;
+            HReg      srcL;
+            PPCRH*    srcR;
+         } Shft;
+         /*  */
+         struct {
+            Bool isAdd;  /* else sub */
+            Bool setC;   /* else read carry */
+            HReg dst;
+            HReg srcL;
+            HReg srcR;
+         } AddSubC;
+         /* If signed, the immediate, if it exists, is a signed 16,
+            else it is an unsigned 16. */
+         struct {
+            Bool   syned;
+            Bool   sz32;    /* mode64 has both 32 and 64bit cmp */
+            UInt   crfD;
+            HReg   srcL;
+            PPCRH* srcR;
+         } Cmp;
+         /* Not, Neg, Clz32/64, Extsw */
+         struct {
+            PPCUnaryOp op;
+            HReg       dst;
+            HReg       src;
+         } Unary;
+         struct {
+            Bool syned;  /* meaningless if hi32==False */
+            Bool hi;     /* False=>low, True=>high */
+            Bool sz32;   /* mode64 has both 32 & 64bit mull */
+            HReg dst;
+            HReg srcL;
+            HReg srcR;
+         } MulL;
+         /* ppc32 div/divu instruction. */
+         struct {
+            Bool extended;
+            Bool syned;
+            Bool sz32;   /* mode64 has both 32 & 64bit div */
+            HReg dst;
+            HReg srcL;
+            HReg srcR;
+         } Div;
+         /* Pseudo-insn.  Call target (an absolute address), on given
+            condition (which could be Pct_ALWAYS).  argiregs indicates
+            which of r3 .. r10 carries argument values for this call,
+            using a bit mask (1<<N is set if rN holds an arg, for N in
+            3 .. 10 inclusive). */
+         struct {
+            PPCCondCode cond;
+            Addr64      target;
+            UInt        argiregs;
+            RetLoc      rloc;     /* where the return value will be */
+         } Call;
+         /* Update the guest CIA value, then exit requesting to chain
+            to it.  May be conditional.  Use of Addr64 in order to cope
+            with 64-bit hosts. */
+         struct {
+            Addr64      dstGA;    /* next guest address */
+            PPCAMode*   amCIA;    /* amode in guest state for CIA */
+            PPCCondCode cond;     /* can be ALWAYS */
+            Bool        toFastEP; /* chain to the slow or fast point? */
+         } XDirect;
+         /* Boring transfer to a guest address not known at JIT time.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg        dstGA;
+            PPCAMode*   amCIA;
+            PPCCondCode cond; /* can be ALWAYS */
+         } XIndir;
+         /* Assisted transfer to a guest address, most general case.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg        dstGA;
+            PPCAMode*   amCIA;
+            PPCCondCode cond; /* can be ALWAYS */
+            IRJumpKind  jk;
+         } XAssisted;
+         /* Mov src to dst on the given condition, which may not
+            be the bogus Pct_ALWAYS. */
+         struct {
+            PPCCondCode cond;
+            HReg        dst;
+            PPCRI*      src;
+         } CMov;
+         /* Zero extending loads.  Dst size is host word size */
+         struct {
+            UChar     sz; /* 1|2|4|8 */
+            HReg      dst;
+            PPCAMode* src;
+         } Load;
+         /* Load-and-reserve (lwarx, ldarx) */
+         struct {
+            UChar sz; /* 4|8 */
+            HReg  dst;
+            HReg  src;
+         } LoadL;
+         /* 64/32/16/8 bit stores */
+         struct {
+            UChar     sz; /* 1|2|4|8 */
+            PPCAMode* dst;
+            HReg      src;
+         } Store;
+         /* Store-conditional (stwcx., stdcx.) */
+         struct {
+            UChar sz; /* 4|8 */
+            HReg  dst;
+            HReg  src;
+         } StoreC;
+         /* Convert a ppc condition code to value 0 or 1. */
+         struct {
+            PPCCondCode cond;
+            HReg        dst;
+         } Set;
+         /* Move the entire CR to a GPR */
+         struct {
+            HReg dst;
+         } MfCR;
+         /* Mem fence.  In short, an insn which flushes all preceding
+            loads and stores as much as possible before continuing.
+            On PPC we emit a "sync". */
+         struct {
+         } MFence;
+
+         /* PPC Floating point */
+         struct {
+            PPCFpOp op;
+            HReg    dst;
+            HReg    src;
+         } FpUnary;
+         struct {
+            PPCFpOp op;
+            HReg    dst;
+            HReg    srcL;
+            HReg    srcR;
+         } FpBinary;
+         struct {
+            PPCFpOp op;
+            HReg    dst;
+            HReg    srcML;
+            HReg    srcMR;
+            HReg    srcAcc;
+         } FpMulAcc;
+         struct {
+            Bool      isLoad;
+            UChar     sz; /* only 4 (IEEE single) or 8 (IEEE double) */
+            HReg      reg;
+            PPCAMode* addr;
+         } FpLdSt;
+         struct {
+            HReg addr; /* int reg */
+            HReg data; /* float reg */
+         } FpSTFIW;
+         /* Round 64-bit FP value to 32-bit FP value in an FP reg. */
+         struct {
+            HReg src;
+            HReg dst;
+         } FpRSP;
+         /* fcfid[u,s,us]/fctid[u]/fctiw[u].  Only some combinations
+            of the various fields are allowed.  This is asserted for
+            and documented in the code for the constructor,
+            PPCInstr_FpCftI, in host_ppc_defs.c.  */
+         struct {
+            Bool fromI; /* True== I->F,    False== F->I */
+            Bool int32; /* True== I is 32, False== I is 64 */
+            Bool syned;
+            Bool flt64; /* True== F is 64, False== F is 32 */
+            HReg src;
+            HReg dst;
+         } FpCftI;
+         /* FP mov src to dst on the given condition. */
+         struct {
+            PPCCondCode cond;
+            HReg        dst;
+            HReg        src;
+         } FpCMov;
+         /* Load FP Status & Control Register */
+         struct {
+            HReg src;
+            UInt dfp_rm;
+         } FpLdFPSCR;
+         /* Do a compare, generating result into an int register. */
+         struct {
+            UChar crfD;
+            HReg  dst;
+            HReg  srcL;
+            HReg  srcR;
+         } FpCmp;
+
+         /* Read/Write Link Register */
+         struct {
+            Bool wrLR;
+            HReg gpr;
+         } RdWrLR;
+
+         /* Simplistic AltiVec */
+         struct {
+            Bool      isLoad;
+            UChar     sz;      /* 8|16|32|128 */
+            HReg      reg;
+            PPCAMode* addr;
+         } AvLdSt;
+         struct {
+            PPCAvOp op;
+            HReg    dst;
+            HReg    src;
+         } AvUnary;
+         struct {
+            PPCAvOp op;
+            HReg    dst;
+            HReg    srcL;
+            HReg    srcR;
+         } AvBinary;
+         struct {
+            PPCAvOp op;
+            HReg    dst;
+            HReg    srcL;
+            HReg    srcR;
+         } AvBin8x16;
+         struct {
+            PPCAvOp op;
+            HReg    dst;
+            HReg    srcL;
+            HReg    srcR;
+         } AvBin16x8;
+         struct {
+            PPCAvOp op;
+            HReg    dst;
+            HReg    srcL;
+            HReg    srcR;
+         } AvBin32x4;
+         /* Can only be generated for CPUs capable of ISA 2.07 or above */
+         struct {
+            PPCAvOp op;
+            HReg    dst;
+            HReg    srcL;
+            HReg    srcR;
+         } AvBin64x2;
+         struct {
+            PPCAvFpOp op;
+            HReg      dst;
+            HReg      srcL;
+            HReg      srcR;
+         } AvBin32Fx4;
+         struct {
+            PPCAvFpOp op;
+            HReg      dst;
+            HReg      src;
+         } AvUn32Fx4;
+         /* Perm,Sel,SlDbl,Splat are all weird AV permutations */
+         struct {
+            HReg dst;
+            HReg srcL;
+            HReg srcR;
+            HReg ctl;
+         } AvPerm;
+         struct {
+            HReg dst;
+            HReg srcL;
+            HReg srcR;
+            HReg ctl;
+         } AvSel;
+         struct {
+            Bool  shLeft;
+            HReg  dst;
+            PPCAMode* addr;
+         } AvSh;
+         struct {
+            UChar shift;
+            HReg  dst;
+            HReg  srcL;
+            HReg  srcR;
+         } AvShlDbl;
+         struct {
+            UChar    sz;   /* 8,16,32 */
+            HReg     dst;
+            PPCVI5s* src; 
+         } AvSplat;
+         /* Mov src to dst on the given condition, which may not
+            be the bogus Xcc_ALWAYS. */
+         struct {
+            PPCCondCode cond;
+            HReg        dst;
+            HReg        src;
+         } AvCMov;
+         /* Load AltiVec Status & Control Register */
+         struct {
+            HReg src;
+         } AvLdVSCR;
+         struct {
+            PPCAvOp   op;
+            HReg      dst;
+            HReg      src;
+         } AvCipherV128Unary;
+         struct {
+            PPCAvOp     op;
+            HReg       dst;
+            HReg       src;
+            PPCRI* s_field;
+         } AvHashV128Binary;
+         struct {
+            PPCAvOp     op;
+            HReg       dst;
+            HReg      src1;
+            HReg      src2;
+            PPCRI*      ps;
+         } AvBCDV128Trinary;
+         struct {
+            PPCAvOp   op;
+            HReg      dst;
+            HReg      srcL;
+            HReg      srcR;
+         } AvCipherV128Binary;
+         struct {
+            PPCFpOp op;
+            HReg dst;
+            HReg src;
+         } Dfp64Unary;
+         struct {
+            PPCFpOp op;
+            HReg dst;
+            HReg srcL;
+            HReg srcR;
+         } Dfp64Binary;
+         struct {
+            PPCFpOp op;
+            HReg   dst;
+            HReg   src;
+            PPCRI* shift;
+         } DfpShift;
+         struct {
+            PPCFpOp op;
+            HReg dst_hi;
+            HReg dst_lo;
+            HReg src_hi;
+            HReg src_lo;
+         } Dfp128Unary;
+         struct {
+            /* The dst is used to pass the left source operand in and return
+             * the result.
+             */
+            PPCFpOp op;
+            HReg dst_hi;
+            HReg dst_lo;
+            HReg srcR_hi;
+            HReg srcR_lo;
+         } Dfp128Binary;
+         struct {
+            PPCFpOp op;
+            HReg   dst_hi;
+            HReg   dst_lo;
+            HReg   src_hi;
+            HReg   src_lo;
+            PPCRI* shift;
+         } DfpShift128;
+         struct {
+            HReg dst;
+            HReg src;
+            PPCRI* r_rmc;
+         } DfpRound;
+         struct {
+            HReg dst_hi;
+            HReg dst_lo;
+            HReg src_hi;
+            HReg src_lo;
+            PPCRI* r_rmc;
+         } DfpRound128;
+         struct {
+	    PPCFpOp op;
+            HReg dst;
+            HReg srcL;
+            HReg srcR;
+            PPCRI* rmc;
+         } DfpQuantize;
+         struct {
+	    PPCFpOp op;
+            HReg dst_hi;
+            HReg dst_lo;
+            HReg src_hi;
+            HReg src_lo;
+  	    PPCRI* rmc;
+         } DfpQuantize128;
+         struct {
+            PPCFpOp op;
+            HReg dst;
+            HReg src_hi;
+            HReg src_lo;
+         } ExtractExpD128;
+         struct {
+	    PPCFpOp op;
+            HReg dst_hi;
+            HReg dst_lo;
+            HReg srcL;
+            HReg srcR_hi;
+            HReg srcR_lo;
+         } InsertExpD128;
+         struct {
+            PPCFpOp op;
+            HReg   dst;
+            HReg   src_hi;
+            HReg   src_lo;
+         } DfpD128toD64;
+         struct {
+            PPCFpOp op;
+            HReg   dst_hi;
+            HReg   dst_lo;
+            HReg   src;
+         } DfpI64StoD128;
+         struct {
+            UChar crfD;
+            HReg  dst;
+            HReg  srcL;
+            HReg  srcR;
+         } Dfp64Cmp;
+         struct {         
+            UChar crfD;   
+            HReg  dst;    
+            HReg  srcL_hi;
+            HReg  srcL_lo;
+            HReg  srcR_hi;
+            HReg  srcR_lo;
+         } Dfp128Cmp;     
+         struct {
+            PPCAMode* amCounter;
+            PPCAMode* amFailAddr;
+         } EvCheck;
+         struct {
+            /* No fields.  The address of the counter to inc is
+               installed later, post-translation, by patching it in,
+               as it is not known at translation time. */
+         } ProfInc;
+      } Pin;
+   }
+   PPCInstr;
+
+
+extern PPCInstr* PPCInstr_LI         ( HReg, ULong, Bool );
+extern PPCInstr* PPCInstr_Alu        ( PPCAluOp, HReg, HReg, PPCRH* );
+extern PPCInstr* PPCInstr_Shft       ( PPCShftOp, Bool sz32, HReg, HReg, PPCRH* );
+extern PPCInstr* PPCInstr_AddSubC    ( Bool, Bool, HReg, HReg, HReg );
+extern PPCInstr* PPCInstr_Cmp        ( Bool, Bool, UInt, HReg, PPCRH* );
+extern PPCInstr* PPCInstr_Unary      ( PPCUnaryOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_MulL       ( Bool syned, Bool hi32, Bool sz32, HReg, HReg, HReg );
+extern PPCInstr* PPCInstr_Div        ( Bool extended, Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_Call       ( PPCCondCode, Addr64, UInt, RetLoc );
+extern PPCInstr* PPCInstr_XDirect    ( Addr64 dstGA, PPCAMode* amCIA,
+                                       PPCCondCode cond, Bool toFastEP );
+extern PPCInstr* PPCInstr_XIndir     ( HReg dstGA, PPCAMode* amCIA,
+                                       PPCCondCode cond );
+extern PPCInstr* PPCInstr_XAssisted  ( HReg dstGA, PPCAMode* amCIA,
+                                       PPCCondCode cond, IRJumpKind jk );
+extern PPCInstr* PPCInstr_CMov       ( PPCCondCode, HReg dst, PPCRI* src );
+extern PPCInstr* PPCInstr_Load       ( UChar sz,
+                                       HReg dst, PPCAMode* src, Bool mode64 );
+extern PPCInstr* PPCInstr_LoadL      ( UChar sz,
+                                       HReg dst, HReg src, Bool mode64 );
+extern PPCInstr* PPCInstr_Store      ( UChar sz, PPCAMode* dst,
+                                       HReg src, Bool mode64 );
+extern PPCInstr* PPCInstr_StoreC     ( UChar sz, HReg dst, HReg src,
+                                       Bool mode64 );
+extern PPCInstr* PPCInstr_Set        ( PPCCondCode cond, HReg dst );
+extern PPCInstr* PPCInstr_MfCR       ( HReg dst );
+extern PPCInstr* PPCInstr_MFence     ( void );
+
+extern PPCInstr* PPCInstr_FpUnary    ( PPCFpOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpBinary   ( PPCFpOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_FpMulAcc   ( PPCFpOp op, HReg dst, HReg srcML, 
+                                                   HReg srcMR, HReg srcAcc );
+extern PPCInstr* PPCInstr_FpLdSt     ( Bool isLoad, UChar sz, HReg, PPCAMode* );
+extern PPCInstr* PPCInstr_FpSTFIW    ( HReg addr, HReg data );
+extern PPCInstr* PPCInstr_FpRSP      ( HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpCftI     ( Bool fromI, Bool int32, Bool syned,
+                                       Bool dst64, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpCMov     ( PPCCondCode, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_FpLdFPSCR  ( HReg src, Bool dfp_rm );
+extern PPCInstr* PPCInstr_FpCmp      ( HReg dst, HReg srcL, HReg srcR );
+
+extern PPCInstr* PPCInstr_RdWrLR     ( Bool wrLR, HReg gpr );
+
+extern PPCInstr* PPCInstr_AvLdSt     ( Bool isLoad, UChar sz, HReg, PPCAMode* );
+extern PPCInstr* PPCInstr_AvUnary    ( PPCAvOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_AvBinary   ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin8x16  ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin16x8  ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin32x4  ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin64x2  ( PPCAvOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvBin32Fx4 ( PPCAvFpOp op, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvUn32Fx4  ( PPCAvFpOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_AvPerm     ( HReg dst, HReg srcL, HReg srcR, HReg ctl );
+extern PPCInstr* PPCInstr_AvSel      ( HReg ctl, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvSh       ( Bool shLeft, HReg dst, PPCAMode* am_addr );
+extern PPCInstr* PPCInstr_AvShlDbl   ( UChar shift, HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvSplat    ( UChar sz, HReg dst, PPCVI5s* src );
+extern PPCInstr* PPCInstr_AvCMov     ( PPCCondCode, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_AvLdVSCR   ( HReg src );
+extern PPCInstr* PPCInstr_AvCipherV128Unary  ( PPCAvOp op, HReg dst,
+                                               HReg srcR );
+extern PPCInstr* PPCInstr_AvCipherV128Binary ( PPCAvOp op, HReg dst,
+                                               HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_AvHashV128Binary ( PPCAvOp op, HReg dst,
+                                             HReg src, PPCRI* s_field );
+extern PPCInstr* PPCInstr_AvBCDV128Trinary ( PPCAvOp op, HReg dst,
+                                             HReg src1, HReg src2,
+                                             PPCRI* ps );
+extern PPCInstr* PPCInstr_Dfp64Unary  ( PPCFpOp op, HReg dst, HReg src );
+extern PPCInstr* PPCInstr_Dfp64Binary ( PPCFpOp op, HReg dst, HReg srcL,
+                                        HReg srcR );
+extern PPCInstr* PPCInstr_DfpShift    ( PPCFpOp op, HReg dst, HReg src,
+                                        PPCRI* shift );
+extern PPCInstr* PPCInstr_Dfp128Unary  ( PPCFpOp op, HReg dst_hi, HReg dst_lo,
+                                         HReg srcR_hi, HReg srcR_lo );
+extern PPCInstr* PPCInstr_Dfp128Binary ( PPCFpOp op, HReg dst_hi, HReg dst_lo,
+                                         HReg srcR_hi, HReg srcR_lo );
+extern PPCInstr* PPCInstr_DfpShift128  ( PPCFpOp op, HReg dst_hi, HReg src_hi,
+                                         HReg dst_lo, HReg src_lo,
+                                         PPCRI* shift );
+extern PPCInstr* PPCInstr_DfpD128toD64 ( PPCFpOp op, HReg dst,
+                                         HReg dst_lo, HReg src_lo);
+extern PPCInstr* PPCInstr_DfpI64StoD128  ( PPCFpOp op, HReg dst_hi,
+                                           HReg dst_lo, HReg src);
+extern PPCInstr* PPCInstr_DfpRound       ( HReg dst, HReg src, PPCRI* r_rmc );
+extern PPCInstr* PPCInstr_DfpRound128    ( HReg dst_hi, HReg dst_lo, HReg src_hi,
+                                           HReg src_lo, PPCRI* r_rmc );
+extern PPCInstr* PPCInstr_DfpQuantize    ( PPCFpOp op, HReg dst, HReg srcL,
+                                           HReg srcR, PPCRI* rmc );
+extern PPCInstr* PPCInstr_DfpQuantize128 ( PPCFpOp op, HReg dst_hi,
+                                           HReg dst_lo,
+                                           HReg src_hi,
+                                           HReg src_lo, PPCRI* rmc );
+extern PPCInstr* PPCInstr_ExtractExpD128 ( PPCFpOp op,   HReg dst, 
+                                           HReg src_hi, HReg src_lo );
+extern PPCInstr* PPCInstr_InsertExpD128  ( PPCFpOp op,   HReg dst_hi, 
+                                           HReg dst_lo,  HReg srcL,
+                                           HReg srcR_hi, HReg srcR_lo );
+extern PPCInstr* PPCInstr_Dfp64Cmp       ( HReg dst, HReg srcL, HReg srcR );
+extern PPCInstr* PPCInstr_Dfp128Cmp      ( HReg dst, HReg srcL_hi, HReg srcL_lo,
+                                           HReg srcR_hi, HReg srcR_lo );
+extern PPCInstr* PPCInstr_EvCheck     ( PPCAMode* amCounter,
+                                        PPCAMode* amFailAddr );
+extern PPCInstr* PPCInstr_ProfInc     ( void );
+
+extern void ppPPCInstr(const PPCInstr*, Bool mode64);
+
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+extern void getRegUsage_PPCInstr ( HRegUsage*, const PPCInstr*, Bool mode64 );
+extern void mapRegs_PPCInstr     ( HRegRemap*, PPCInstr* , Bool mode64);
+extern Bool isMove_PPCInstr      ( const PPCInstr*, HReg*, HReg* );
+extern Int          emit_PPCInstr   ( /*MB_MOD*/Bool* is_profInc,
+                                      UChar* buf, Int nbuf, const PPCInstr* i, 
+                                      Bool mode64,
+                                      VexEndness endness_host,
+                                      const void* disp_cp_chain_me_to_slowEP,
+                                      const void* disp_cp_chain_me_to_fastEP,
+                                      const void* disp_cp_xindir,
+                                      const void* disp_cp_xassisted );
+
+extern void genSpill_PPC  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                            HReg rreg, Int offsetB, Bool mode64 );
+extern void genReload_PPC ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                            HReg rreg, Int offsetB, Bool mode64 );
+
+extern const RRegUniverse* getRRegUniverse_PPC ( Bool mode64 );
+
+extern HInstrArray* iselSB_PPC           ( const IRSB*,
+                                           VexArch,
+                                           const VexArchInfo*,
+                                           const VexAbiInfo*,
+                                           Int offs_Host_EvC_Counter,
+                                           Int offs_Host_EvC_FailAddr,
+                                           Bool chainingAllowed,
+                                           Bool addProfInc,
+                                           Addr max_ga );
+
+/* How big is an event check?  This is kind of a kludge because it
+   depends on the offsets of host_EvC_FAILADDR and
+   host_EvC_COUNTER. */
+extern Int evCheckSzB_PPC (void);
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+extern VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+                                        void* place_to_chain,
+                                        const void* disp_cp_chain_me_EXPECTED,
+                                        const void* place_to_jump_to,
+                                        Bool  mode64 );
+
+extern VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+                                          void* place_to_unchain,
+                                          const void* place_to_jump_to_EXPECTED,
+                                          const void* disp_cp_chain_me,
+                                          Bool  mode64 );
+
+/* Patch the counter location into an existing ProfInc point. */
+extern VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+                                        void*  place_to_patch,
+                                        const ULong* location_of_counter,
+                                        Bool   mode64 );
+
+
+#endif /* ndef __VEX_HOST_PPC_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_ppc_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_ppc_isel.c b/VEX/priv/host_ppc_isel.c
new file mode 100644
index 0000000..40fe895
--- /dev/null
+++ b/VEX/priv/host_ppc_isel.c
@@ -0,0 +1,6293 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_ppc_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "ir_match.h"
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_generic_simd64.h"
+#include "host_ppc_defs.h"
+
+/* GPR register class for ppc32/64 */
+#define HRcGPR(_mode64) ((_mode64) ? HRcInt64 : HRcInt32)
+
+
+/*---------------------------------------------------------*/
+/*--- Register Usage Conventions                        ---*/
+/*---------------------------------------------------------*/
+/*
+  Integer Regs
+  ------------
+  GPR0       Reserved
+  GPR1       Stack Pointer
+  GPR2       not used - TOC pointer
+  GPR3:10    Allocateable
+  GPR11      if mode64: not used - calls by ptr / env ptr for some langs
+  GPR12      if mode64: not used - exceptions / global linkage code
+  GPR13      not used - Thread-specific pointer
+  GPR14:28   Allocateable
+  GPR29      Unused by us (reserved for the dispatcher)
+  GPR30      AltiVec temp spill register
+  GPR31      GuestStatePointer
+
+  Of Allocateable regs:
+  if (mode64)
+    GPR3:10  Caller-saved regs
+  else
+    GPR3:12  Caller-saved regs
+  GPR14:29   Callee-saved regs
+
+  GPR3       [Return | Parameter] - carrying reg
+  GPR4:10    Parameter-carrying regs
+
+
+  Floating Point Regs
+  -------------------
+  FPR0:31    Allocateable
+
+  FPR0       Caller-saved - scratch reg
+  if (mode64)
+    FPR1:13  Caller-saved - param & return regs
+  else
+    FPR1:8   Caller-saved - param & return regs
+    FPR9:13  Caller-saved regs
+  FPR14:31   Callee-saved regs
+
+
+  Vector Regs (on processors with the VMX feature)
+  -----------
+  VR0-VR1    Volatile scratch registers
+  VR2-VR13   Volatile vector parameters registers
+  VR14-VR19  Volatile scratch registers
+  VR20-VR31  Non-volatile registers
+  VRSAVE     Non-volatile 32-bit register
+*/
+
+
+/*---------------------------------------------------------*/
+/*--- PPC FP Status & Control Register Conventions      ---*/
+/*---------------------------------------------------------*/
+/*
+  Vex-generated code expects to run with the FPU set as follows: all
+  exceptions masked.  The rounding mode is set appropriately before
+  each floating point insn emitted (or left unchanged if known to be
+  correct already).  There are a few fp insns (fmr,fneg,fabs,fnabs),
+  which are unaffected by the rm and so the rounding mode is not set
+  prior to them.  
+
+  At least on MPC7447A (Mac Mini), frsqrte is also not affected by
+  rounding mode.  At some point the ppc docs get sufficiently vague
+  that the only way to find out is to write test programs.
+*/
+/* Notes on the FP instruction set, 6 Feb 06.
+
+What                 exns -> CR1 ?   Sets FPRF ?   Observes RM ?
+-------------------------------------------------------------
+
+fmr[.]                   if .             n             n
+fneg[.]                  if .             n             n
+fabs[.]                  if .             n             n
+fnabs[.]                 if .             n             n
+
+fadd[.]                  if .             y             y
+fadds[.]                 if .             y             y
+fcfid[.] (Si64->dbl)     if .             y             y
+fcfidU[.] (Ui64->dbl)    if .             y             y
+fcfids[.] (Si64->sngl)   if .             Y             Y
+fcfidus[.] (Ui64->sngl)  if .             Y             Y
+fcmpo (cmp, result       n                n             n
+fcmpu  to crfD)          n                n             n
+fctid[.]  (dbl->i64)     if .       ->undef             y
+fctidz[.] (dbl->i64)     if .       ->undef    rounds-to-zero
+fctiw[.]  (dbl->i32)     if .       ->undef             y
+fctiwz[.] (dbl->i32)     if .       ->undef    rounds-to-zero
+fdiv[.]                  if .             y             y
+fdivs[.]                 if .             y             y
+fmadd[.]                 if .             y             y
+fmadds[.]                if .             y             y
+fmsub[.]                 if .             y             y
+fmsubs[.]                if .             y             y
+fmul[.]                  if .             y             y
+fmuls[.]                 if .             y             y
+
+(note: for fnm*, rounding happens before final negation)
+fnmadd[.]                if .             y             y
+fnmadds[.]               if .             y             y
+fnmsub[.]                if .             y             y
+fnmsubs[.]               if .             y             y
+
+fre[.]                   if .             y             y
+fres[.]                  if .             y             y
+
+frsqrte[.]               if .             y       apparently not
+
+fsqrt[.]                 if .             y             y
+fsqrts[.]                if .             y             y
+fsub[.]                  if .             y             y
+fsubs[.]                 if .             y             y
+
+
+fpscr: bits 30-31 (ibm) is RM
+            24-29 (ibm) are exnmasks/non-IEEE bit, all zero
+	    15-19 (ibm) is FPRF: class, <, =, >, UNord
+
+ppc fe(guest) makes fpscr read as all zeros except RM (and maybe FPRF
+in future) 
+
+mcrfs     - move fpscr field to CR field
+mtfsfi[.] - 4 bit imm moved to fpscr field
+mtfsf[.]  - move frS[low 1/2] to fpscr but using 8-bit field mask
+mtfsb1[.] - set given fpscr bit
+mtfsb0[.] - clear given fpscr bit
+mffs[.]   - move all fpscr to frD[low 1/2]
+
+For [.] presumably cr1 is set with exn summary bits, as per 
+main FP insns
+
+A single precision store truncates/denormalises the in-register value,
+but does not round it.  This is so that flds followed by fsts is
+always the identity.
+*/
+
+
+/*---------------------------------------------------------*/
+/*--- misc helpers                                      ---*/
+/*---------------------------------------------------------*/
+
+/* These are duplicated in guest-ppc/toIR.c */
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* mkU32 ( UInt i )
+{
+   return IRExpr_Const(IRConst_U32(i));
+}
+
+static IRExpr* bind ( Int binder )
+{
+   return IRExpr_Binder(binder);
+}
+
+static Bool isZeroU8 ( IRExpr* e )
+{
+   return e->tag == Iex_Const
+          && e->Iex.Const.con->tag == Ico_U8
+          && e->Iex.Const.con->Ico.U8 == 0;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+     might encounter.  This is computed before insn selection starts,
+     and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+     which virtual register(s) are associated with each IRTemp
+     temporary.  This is computed before insn selection starts, and
+     does not change.  We expect this mapping to map precisely the
+     same set of IRTemps as the type mapping does.
+ 
+         - vregmapLo    holds the primary register for the IRTemp.
+         - vregmapMedLo holds the secondary register for the IRTemp,
+              if any is needed.  That's only for Ity_I64 temps
+              in 32 bit mode or Ity_I128 temps in 64-bit mode.
+         - vregmapMedHi is only for dealing with Ity_I128 temps in
+              32 bit mode.  It holds bits 95:64 (Intel numbering)
+              of the IRTemp.
+         - vregmapHi is also only for dealing with Ity_I128 temps
+              in 32 bit mode.  It holds the most significant bits
+              (127:96 in Intel numbering) of the IRTemp.
+
+    - The code array, that is, the insns selected so far.
+ 
+    - A counter, for generating new virtual registers.
+ 
+    - The host subarchitecture we are selecting insns for.  
+      This is set at the start and does not change.
+ 
+    - A Bool to tell us if the host is 32 or 64bit.
+      This is set at the start and does not change.
+ 
+    - An IRExpr*, which may be NULL, holding the IR expression (an
+      IRRoundingMode-encoded value) to which the FPU's rounding mode
+      was most recently set.  Setting to NULL is always safe.  Used to
+      avoid redundant settings of the FPU's rounding mode, as
+      described in set_FPU_rounding_mode below.
+
+    - A VexMiscInfo*, needed for knowing how to generate
+      function calls for this target.
+
+    - The maximum guest address of any guest insn in this block.
+      Actually, the address of the highest-addressed byte from any
+      insn in this block.  Is set at the start and does not change.
+      This is used for detecting jumps which are definitely
+      forward-edges from this block, and therefore can be made
+      (chained) to the fast entry point of the destination, thereby
+      avoiding the destination's event check.
+*/
+ 
+typedef
+   struct {
+      /* Constant -- are set at the start and do not change. */
+      IRTypeEnv* type_env;
+                              //    64-bit mode              32-bit mode
+      HReg*    vregmapLo;     // Low 64-bits [63:0]    Low 32-bits     [31:0]
+      HReg*    vregmapMedLo;  // high 64-bits[127:64]  Next 32-bits    [63:32]
+      HReg*    vregmapMedHi;  // unused                Next 32-bits    [95:64]
+      HReg*    vregmapHi;     // unused                highest 32-bits [127:96]
+      Int      n_vregmap;
+
+      /* 27 Jan 06: Not currently used, but should be */
+      UInt         hwcaps;
+
+      Bool         mode64;
+
+      const VexAbiInfo*  vbi;   // unused
+
+      Bool         chainingAllowed;
+      Addr64       max_ga;
+
+      /* These are modified as we go along. */
+      HInstrArray* code;
+      Int          vreg_ctr;
+
+      IRExpr*      previous_rm;
+   }
+   ISelEnv;
+ 
+ 
+static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   return env->vregmapLo[tmp];
+}
+
+static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO,
+                               ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapMedLo[tmp]));
+   *vrLO = env->vregmapLo[tmp];
+   *vrHI = env->vregmapMedLo[tmp];
+}
+
+/* Only for used in 32-bit mode */
+static void lookupIRTempQuad ( HReg* vrHi, HReg* vrMedHi, HReg* vrMedLo,
+                               HReg* vrLo, ISelEnv* env, IRTemp tmp )
+{
+   vassert(!env->mode64);
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapMedLo[tmp]));
+   *vrHi    = env->vregmapHi[tmp];
+   *vrMedHi = env->vregmapMedHi[tmp];
+   *vrMedLo = env->vregmapMedLo[tmp];
+   *vrLo    = env->vregmapLo[tmp];
+}
+
+static void addInstr ( ISelEnv* env, PPCInstr* instr )
+{
+   addHInstr(env->code, instr);
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      ppPPCInstr(instr, env->mode64);
+      vex_printf("\n");
+   }
+}
+
+static HReg newVRegI ( ISelEnv* env )
+{
+   HReg reg
+      = mkHReg(True/*vreg*/, HRcGPR(env->mode64), 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegF ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*vreg*/, HRcFlt64, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegV ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*vreg*/, HRcVec128, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Forward declarations                        ---*/
+/*---------------------------------------------------------*/
+
+/* These are organised as iselXXX and iselXXX_wrk pairs.  The
+   iselXXX_wrk do the real work, but are not to be called directly.
+   For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
+   checks that all returned registers are virtual.  You should not
+   call the _wrk version directly.
+
+   'Word' refers to the size of the native machine word, that is,
+   32-bit int in 32-bit mode and 64-bit int in 64-bit mode.  '2Word'
+   therefore refers to a double-width (64/128-bit) quantity in two
+   integer registers.
+*/
+/* 32-bit mode: compute an I8/I16/I32 into a GPR.
+   64-bit mode: compute an I8/I16/I32/I64 into a GPR. */
+static HReg          iselWordExpr_R_wrk ( ISelEnv* env, IRExpr* e,
+                                          IREndness IEndianess );
+static HReg          iselWordExpr_R     ( ISelEnv* env, IRExpr* e,
+                                          IREndness IEndianess );
+
+/* 32-bit mode: Compute an I8/I16/I32 into a RH
+                (reg-or-halfword-immediate).
+   64-bit mode: Compute an I8/I16/I32/I64 into a RH
+                (reg-or-halfword-immediate).
+   It's important to specify whether the immediate is to be regarded
+   as signed or not.  If yes, this will never return -32768 as an
+   immediate; this guaranteed that all signed immediates that are
+   return can have their sign inverted if need be. 
+*/
+static PPCRH*        iselWordExpr_RH_wrk ( ISelEnv* env, 
+                                           Bool syned, IRExpr* e,
+                                           IREndness IEndianess );
+static PPCRH*        iselWordExpr_RH     ( ISelEnv* env, 
+                                           Bool syned, IRExpr* e,
+                                           IREndness IEndianess );
+
+/* 32-bit mode: compute an I32 into a RI (reg or 32-bit immediate).
+   64-bit mode: compute an I64 into a RI (reg or 64-bit immediate). */
+static PPCRI*        iselWordExpr_RI_wrk ( ISelEnv* env, IRExpr* e,
+                                           IREndness IEndianess );
+static PPCRI*        iselWordExpr_RI     ( ISelEnv* env, IRExpr* e,
+                                           IREndness IEndianess );
+
+/* In 32 bit mode ONLY, compute an I8 into a
+   reg-or-5-bit-unsigned-immediate, the latter being an immediate in
+   the range 1 .. 31 inclusive.  Used for doing shift amounts. */
+static PPCRH*        iselWordExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e,
+                                             IREndness IEndianess );
+static PPCRH*        iselWordExpr_RH5u     ( ISelEnv* env, IRExpr* e,
+                                             IREndness IEndianess );
+
+/* In 64-bit mode ONLY, compute an I8 into a
+   reg-or-6-bit-unsigned-immediate, the latter being an immediate in
+   the range 1 .. 63 inclusive.  Used for doing shift amounts. */
+static PPCRH*        iselWordExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e,
+                                             IREndness IEndianess );
+static PPCRH*        iselWordExpr_RH6u     ( ISelEnv* env, IRExpr* e,
+                                             IREndness IEndianess );
+
+/* 32-bit mode: compute an I32 into an AMode.
+   64-bit mode: compute an I64 into an AMode.
+
+   Requires to know (xferTy) the type of data to be loaded/stored
+   using this amode.  That is so that, for 64-bit code generation, any
+   PPCAMode_IR returned will have an index (immediate offset) field
+   that is guaranteed to be 4-aligned, if there is any chance that the
+   amode is to be used in ld/ldu/lda/std/stdu.
+
+   Since there are no such restrictions on 32-bit insns, xferTy is
+   ignored for 32-bit code generation. */
+static PPCAMode*     iselWordExpr_AMode_wrk ( ISelEnv* env, IRExpr* e,
+                                              IRType xferTy,
+                                              IREndness IEndianess );
+static PPCAMode*     iselWordExpr_AMode     ( ISelEnv* env, IRExpr* e,
+                                              IRType xferTy,
+                                              IREndness IEndianess );
+
+static void iselInt128Expr_to_32x4_wrk ( HReg* rHi, HReg* rMedHi,
+                                         HReg* rMedLo, HReg* rLo,
+                                         ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+static void iselInt128Expr_to_32x4     ( HReg* rHi, HReg* rMedHi,
+                                         HReg* rMedLo, HReg* rLo,
+                                         ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+
+
+/* 32-bit mode ONLY: compute an I64 into a GPR pair. */
+static void          iselInt64Expr_wrk ( HReg* rHi, HReg* rLo,
+                                         ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+static void          iselInt64Expr     ( HReg* rHi, HReg* rLo,
+                                         ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+
+/* 64-bit mode ONLY: compute an I128 into a GPR64 pair. */
+static void          iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, 
+                                          ISelEnv* env, IRExpr* e,
+                                          IREndness IEndianess );
+
+static void          iselInt128Expr     ( HReg* rHi, HReg* rLo, 
+                                          ISelEnv* env, IRExpr* e,
+                                          IREndness IEndianess );
+
+static PPCCondCode   iselCondCode_wrk ( ISelEnv* env, IRExpr* e,
+                                        IREndness IEndianess );
+static PPCCondCode   iselCondCode     ( ISelEnv* env, IRExpr* e,
+                                        IREndness IEndianess );
+
+static HReg          iselDblExpr_wrk ( ISelEnv* env, IRExpr* e,
+                                       IREndness IEndianess );
+static HReg          iselDblExpr     ( ISelEnv* env, IRExpr* e,
+                                       IREndness IEndianess );
+
+static HReg          iselFltExpr_wrk ( ISelEnv* env, IRExpr* e,
+                                       IREndness IEndianess );
+static HReg          iselFltExpr     ( ISelEnv* env, IRExpr* e,
+                                       IREndness IEndianess );
+
+static HReg          iselVecExpr_wrk ( ISelEnv* env, IRExpr* e,
+                                       IREndness IEndianess );
+static HReg          iselVecExpr     ( ISelEnv* env, IRExpr* e,
+                                       IREndness IEndianess );
+
+/* 64-bit mode ONLY. */
+static HReg          iselDfp32Expr_wrk ( ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+static HReg          iselDfp32Expr     ( ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+static HReg          iselDfp64Expr_wrk ( ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+static HReg          iselDfp64Expr     ( ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess );
+
+/* 64-bit mode ONLY: compute an D128 into a GPR64 pair. */
+static void iselDfp128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env,
+                                 IRExpr* e, IREndness IEndianess );
+static void iselDfp128Expr     ( HReg* rHi, HReg* rLo, ISelEnv* env,
+                                 IRExpr* e, IREndness IEndianess );
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Misc helpers                                ---*/
+/*---------------------------------------------------------*/
+
+/* Make an int reg-reg move. */
+
+static PPCInstr* mk_iMOVds_RR ( HReg r_dst, HReg r_src )
+{
+   vassert(hregClass(r_dst) == hregClass(r_src));
+   vassert(hregClass(r_src) ==  HRcInt32 ||
+           hregClass(r_src) ==  HRcInt64);
+   return PPCInstr_Alu(Palu_OR, r_dst, r_src, PPCRH_Reg(r_src));
+}
+
+/* Advance/retreat %r1 by n. */
+
+static void add_to_sp ( ISelEnv* env, UInt n )
+{
+   HReg sp = StackFramePtr(env->mode64);
+   vassert(n <= 1024 && (n%16) == 0);
+   addInstr(env, PPCInstr_Alu( Palu_ADD, sp, sp,
+                               PPCRH_Imm(True,toUShort(n)) ));
+}
+
+static void sub_from_sp ( ISelEnv* env, UInt n )
+{
+   HReg sp = StackFramePtr(env->mode64);
+   vassert(n <= 1024 && (n%16) == 0);
+   addInstr(env, PPCInstr_Alu( Palu_SUB, sp, sp,
+                               PPCRH_Imm(True,toUShort(n)) ));
+}
+
+/*
+  returns a quadword aligned address on the stack
+   - copies SP, adds 16bytes, aligns to quadword.
+  use sub_from_sp(32) before calling this,
+  as expects to have 32 bytes to play with.
+*/
+static HReg get_sp_aligned16 ( ISelEnv* env )
+{
+   HReg       r = newVRegI(env);
+   HReg align16 = newVRegI(env);
+   addInstr(env, mk_iMOVds_RR(r, StackFramePtr(env->mode64)));
+   // add 16
+   addInstr(env, PPCInstr_Alu( Palu_ADD, r, r,
+                               PPCRH_Imm(True,toUShort(16)) ));
+   // mask to quadword
+   addInstr(env,
+            PPCInstr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, env->mode64));
+   addInstr(env, PPCInstr_Alu(Palu_AND, r,r, PPCRH_Reg(align16)));
+   return r;
+}
+
+
+
+/* Load 2*I32 regs to fp reg */
+static HReg mk_LoadRR32toFPR ( ISelEnv* env,
+                               HReg r_srcHi, HReg r_srcLo )
+{
+   HReg fr_dst = newVRegF(env);
+   PPCAMode *am_addr0, *am_addr1;
+
+   vassert(!env->mode64);
+   vassert(hregClass(r_srcHi) == HRcInt32);
+   vassert(hregClass(r_srcLo) == HRcInt32);
+
+   sub_from_sp( env, 16 );        // Move SP down 16 bytes
+   am_addr0 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+   am_addr1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+   // store hi,lo as Ity_I32's
+   addInstr(env, PPCInstr_Store( 4, am_addr0, r_srcHi, env->mode64 ));
+   addInstr(env, PPCInstr_Store( 4, am_addr1, r_srcLo, env->mode64 ));
+
+   // load as float
+   addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
+   
+   add_to_sp( env, 16 );          // Reset SP
+   return fr_dst;
+}
+
+/* Load I64 reg to fp reg */
+static HReg mk_LoadR64toFPR ( ISelEnv* env, HReg r_src )
+{
+   HReg fr_dst = newVRegF(env);
+   PPCAMode *am_addr0;
+
+   vassert(env->mode64);
+   vassert(hregClass(r_src) == HRcInt64);
+
+   sub_from_sp( env, 16 );        // Move SP down 16 bytes
+   am_addr0 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+
+   // store as Ity_I64
+   addInstr(env, PPCInstr_Store( 8, am_addr0, r_src, env->mode64 ));
+
+   // load as float
+   addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0));
+   
+   add_to_sp( env, 16 );          // Reset SP
+   return fr_dst;
+}
+
+
+/* Given an amode, return one which references 4 bytes further
+   along. */
+
+static PPCAMode* advance4 ( ISelEnv* env, PPCAMode* am )
+{
+   PPCAMode* am4 = dopyPPCAMode( am );
+   if (am4->tag == Pam_IR 
+       && am4->Pam.IR.index + 4 <= 32767) {
+      am4->Pam.IR.index += 4;
+   } else {
+      vpanic("advance4(ppc,host)");
+   }
+   return am4;
+}
+
+
+/* Given a guest-state array descriptor, an index expression and a
+   bias, generate a PPCAMode pointing at the relevant piece of 
+   guest state.  */
+static
+PPCAMode* genGuestArrayOffset ( ISelEnv* env, IRRegArray* descr,
+                                IRExpr* off, Int bias, IREndness IEndianess )
+{
+   HReg rtmp, roff;
+   Int  elemSz = sizeofIRType(descr->elemTy);
+   Int  nElems = descr->nElems;
+   Int  shift  = 0;
+
+   /* Throw out any cases we don't need.  In theory there might be a
+      day where we need to handle others, but not today. */
+
+   if (nElems != 16 && nElems != 32)
+      vpanic("genGuestArrayOffset(ppc host)(1)");
+
+   switch (elemSz) {
+      case 4:  shift = 2; break;
+      case 8:  shift = 3; break;
+      default: vpanic("genGuestArrayOffset(ppc host)(2)");
+   }
+
+   if (bias < -100 || bias > 100) /* somewhat arbitrarily */
+      vpanic("genGuestArrayOffset(ppc host)(3)");
+   if (descr->base < 0 || descr->base > 5000) /* somewhat arbitrarily */
+      vpanic("genGuestArrayOffset(ppc host)(4)");
+
+   /* Compute off into a reg, %off.  Then return:
+
+         addi %tmp, %off, bias (if bias != 0)
+         andi %tmp, nElems-1
+         sldi %tmp, shift
+         addi %tmp, %tmp, base
+         ... Baseblockptr + %tmp ...
+   */
+   roff = iselWordExpr_R(env, off, IEndianess);
+   rtmp = newVRegI(env);
+   addInstr(env, PPCInstr_Alu(
+                    Palu_ADD, 
+                    rtmp, roff, 
+                    PPCRH_Imm(True/*signed*/, toUShort(bias))));
+   addInstr(env, PPCInstr_Alu(
+                    Palu_AND, 
+                    rtmp, rtmp, 
+                    PPCRH_Imm(False/*unsigned*/, toUShort(nElems-1))));
+   addInstr(env, PPCInstr_Shft(
+                    Pshft_SHL, 
+                    env->mode64 ? False : True/*F:64-bit, T:32-bit shift*/,
+                    rtmp, rtmp, 
+                    PPCRH_Imm(False/*unsigned*/, toUShort(shift))));
+   addInstr(env, PPCInstr_Alu(
+                    Palu_ADD, 
+                    rtmp, rtmp, 
+                    PPCRH_Imm(True/*signed*/, toUShort(descr->base))));
+   return
+      PPCAMode_RR( GuestStatePtr(env->mode64), rtmp );
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Function call helpers                       ---*/
+/*---------------------------------------------------------*/
+
+/* Used only in doHelperCall.  See big comment in doHelperCall re
+   handling of register-parameter args.  This function figures out
+   whether evaluation of an expression might require use of a fixed
+   register.  If in doubt return True (safe but suboptimal).
+*/
+static
+Bool mightRequireFixedRegs ( IRExpr* e )
+{
+   switch (e->tag) {
+   case Iex_RdTmp: case Iex_Const: case Iex_Get: 
+      return False;
+   default:
+      return True;
+   }
+}
+
+
+/* Do a complete function call.  |guard| is a Ity_Bit expression
+   indicating whether or not the call happens.  If guard==NULL, the
+   call is unconditional.  |retloc| is set to indicate where the
+   return value is after the call.  The caller (of this fn) must
+   generate code to add |stackAdjustAfterCall| to the stack pointer
+   after the call is done. */
+
+static
+void doHelperCall ( /*OUT*/UInt*   stackAdjustAfterCall,
+                    /*OUT*/RetLoc* retloc,
+                    ISelEnv* env,
+                    IRExpr* guard,
+                    IRCallee* cee, IRType retTy, IRExpr** args,
+                    IREndness IEndianess)
+{
+   PPCCondCode cc;
+   HReg        argregs[PPC_N_REGPARMS];
+   HReg        tmpregs[PPC_N_REGPARMS];
+   Bool        go_fast;
+   Int         n_args, i, argreg;
+   UInt        argiregs;
+   Bool        mode64 = env->mode64;
+
+   /* Set default returns.  We'll update them later if needed. */
+   *stackAdjustAfterCall = 0;
+   *retloc               = mk_RetLoc_INVALID();
+
+   /* These are used for cross-checking that IR-level constraints on
+      the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+   UInt nVECRETs = 0;
+   UInt nBBPTRs  = 0;
+
+   /* Marshal args for a call and do the call.
+
+      This function only deals with a tiny set of possibilities, which
+      cover all helpers in practice.  The restrictions are that only
+      arguments in registers are supported, hence only PPC_N_REGPARMS x
+      (mode32:32 | mode64:64) integer bits in total can be passed.
+      In fact the only supported arg type is (mode32:I32 | mode64:I64).
+
+      The return type can be I{64,32,16,8} or V{128,256}.  In the
+      latter two cases, it is expected that |args| will contain the
+      special node IRExpr_VECRET(), in which case this routine
+      generates code to allocate space on the stack for the vector
+      return value.  Since we are not passing any scalars on the
+      stack, it is enough to preallocate the return space before
+      marshalling any arguments, in this case.
+
+      |args| may also contain IRExpr_BBPTR(), in which case the value
+      in the guest state pointer register is passed as the
+      corresponding argument.
+
+      Generating code which is both efficient and correct when
+      parameters are to be passed in registers is difficult, for the
+      reasons elaborated in detail in comments attached to
+      doHelperCall() in priv/host-x86/isel.c.  Here, we use a variant
+      of the method described in those comments.
+
+      The problem is split into two cases: the fast scheme and the
+      slow scheme.  In the fast scheme, arguments are computed
+      directly into the target (real) registers.  This is only safe
+      when we can be sure that computation of each argument will not
+      trash any real registers set by computation of any other
+      argument.
+
+      In the slow scheme, all args are first computed into vregs, and
+      once they are all done, they are moved to the relevant real
+      regs.  This always gives correct code, but it also gives a bunch
+      of vreg-to-rreg moves which are usually redundant but are hard
+      for the register allocator to get rid of.
+
+      To decide which scheme to use, all argument expressions are
+      first examined.  If they are all so simple that it is clear they
+      will be evaluated without use of any fixed registers, use the
+      fast scheme, else use the slow scheme.  Note also that only
+      unconditional calls may use the fast scheme, since having to
+      compute a condition expression could itself trash real
+      registers.
+
+      Note this requires being able to examine an expression and
+      determine whether or not evaluation of it might use a fixed
+      register.  That requires knowledge of how the rest of this insn
+      selector works.  Currently just the following 3 are regarded as
+      safe -- hopefully they cover the majority of arguments in
+      practice: IRExpr_Tmp IRExpr_Const IRExpr_Get.
+   */
+
+   /* Note that the cee->regparms field is meaningless on PPC32/64 host
+      (since there is only one calling convention) and so we always
+      ignore it. */
+
+   n_args = 0;
+   for (i = 0; args[i]; i++)
+      n_args++;
+
+   if (n_args > PPC_N_REGPARMS) {
+      vpanic("doHelperCall(PPC): cannot currently handle > 8 args");
+      // PPC_N_REGPARMS
+   }
+
+   /* This is kind of stupid .. the arrays are sized as PPC_N_REGPARMS
+      but we then assume that that value is 8. */
+   vassert(PPC_N_REGPARMS == 8);
+   
+   argregs[0] = hregPPC_GPR3(mode64);
+   argregs[1] = hregPPC_GPR4(mode64);
+   argregs[2] = hregPPC_GPR5(mode64);
+   argregs[3] = hregPPC_GPR6(mode64);
+   argregs[4] = hregPPC_GPR7(mode64);
+   argregs[5] = hregPPC_GPR8(mode64);
+   argregs[6] = hregPPC_GPR9(mode64);
+   argregs[7] = hregPPC_GPR10(mode64);
+   argiregs = 0;
+
+   tmpregs[0] = tmpregs[1] = tmpregs[2] =
+   tmpregs[3] = tmpregs[4] = tmpregs[5] =
+   tmpregs[6] = tmpregs[7] = INVALID_HREG;
+
+   /* First decide which scheme (slow or fast) is to be used.  First
+      assume the fast scheme, and select slow if any contraindications
+      (wow) appear. */
+
+   go_fast = True;
+
+   /* We'll need space on the stack for the return value.  Avoid
+      possible complications with nested calls by using the slow
+      scheme. */
+   if (retTy == Ity_V128 || retTy == Ity_V256)
+      go_fast = False;
+
+   if (go_fast && guard) {
+      if (guard->tag == Iex_Const 
+          && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional */
+      } else {
+         /* Not manifestly unconditional -- be conservative. */
+         go_fast = False;
+      }
+   }
+
+   if (go_fast) {
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+         if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+            /* that's OK */
+         } 
+         else if (UNLIKELY(arg->tag == Iex_VECRET)) {
+            /* This implies ill-formed IR, since if the IR was
+               well-formed, the return-type test above would have
+               filtered it out. */
+            vpanic("doHelperCall(PPC): invalid IR");
+         }
+         else if (mightRequireFixedRegs(arg)) {
+            go_fast = False;
+            break;
+         }
+      }
+   }
+
+   /* At this point the scheme to use has been established.  Generate
+      code to get the arg values into the argument rregs. */
+
+   if (go_fast) {
+
+      /* FAST SCHEME */
+      argreg = 0;
+
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+         vassert(argreg < PPC_N_REGPARMS);
+
+         if (arg->tag == Iex_BBPTR) {
+            argiregs |= (1 << (argreg+3));
+            addInstr(env, mk_iMOVds_RR( argregs[argreg],
+                                        GuestStatePtr(mode64) ));
+            argreg++;
+         } else {
+            vassert(arg->tag != Iex_VECRET);
+            IRType ty = typeOfIRExpr(env->type_env, arg);
+            vassert(ty == Ity_I32 || ty == Ity_I64);
+            if (!mode64) {
+               if (ty == Ity_I32) { 
+                  argiregs |= (1 << (argreg+3));
+                  addInstr(env,
+                           mk_iMOVds_RR( argregs[argreg],
+                                         iselWordExpr_R(env, arg,
+							IEndianess) ));
+               } else { // Ity_I64 in 32-bit mode
+                  HReg rHi, rLo;
+                  if ((argreg%2) == 1)
+                                 // ppc32 ELF abi spec for passing LONG_LONG
+                     argreg++;   // XXX: odd argreg => even rN
+                  vassert(argreg < PPC_N_REGPARMS-1);
+                  iselInt64Expr(&rHi,&rLo, env, arg, IEndianess);
+                  argiregs |= (1 << (argreg+3));
+                  addInstr(env, mk_iMOVds_RR( argregs[argreg++], rHi ));
+                  argiregs |= (1 << (argreg+3));
+                  addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo));
+               }
+            } else { // mode64
+               argiregs |= (1 << (argreg+3));
+               addInstr(env, mk_iMOVds_RR( argregs[argreg],
+                                           iselWordExpr_R(env, arg,
+                                                          IEndianess) ));
+            }
+            argreg++;
+         } /* if (arg == IRExprP__BBPR) */
+      }
+
+      /* Fast scheme only applies for unconditional calls.  Hence: */
+      cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
+
+   } else {
+
+      /* SLOW SCHEME; move via temporaries */
+      argreg = 0;
+
+      /* If we have a vector return type, allocate a place for it on
+         the stack and record its address.  Rather than figure out the
+         complexities of PPC{32,64} ELF ABI stack frame layout, simply
+         drop the SP by 1024 and allocate the return point in the
+         middle.  I think this should comfortably clear any ABI
+         mandated register save areas.  Note that it doesn't maintain
+         the backchain as it should, since we're not doing st{d,w}u to
+         adjust the SP, but .. that doesn't seem to be a big deal.
+         Since we're not expecting to have to unwind out of here. */
+      HReg r_vecRetAddr = INVALID_HREG;
+      if (retTy == Ity_V128) {
+         r_vecRetAddr = newVRegI(env);
+         sub_from_sp(env, 512);
+         addInstr(env, mk_iMOVds_RR( r_vecRetAddr, StackFramePtr(mode64) ));
+         sub_from_sp(env, 512);
+      }
+      else if (retTy == Ity_V256) {
+         vassert(0); //ATC
+         r_vecRetAddr = newVRegI(env);
+         sub_from_sp(env, 512);
+         addInstr(env, mk_iMOVds_RR( r_vecRetAddr, StackFramePtr(mode64) ));
+         sub_from_sp(env, 512);
+      }
+
+      vassert(n_args >= 0 && n_args <= 8);
+      for (i = 0; i < n_args; i++) {
+         IRExpr* arg = args[i];
+         vassert(argreg < PPC_N_REGPARMS);
+         if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+            tmpregs[argreg] = newVRegI(env);
+            addInstr(env, mk_iMOVds_RR( tmpregs[argreg],
+                                        GuestStatePtr(mode64) ));
+            nBBPTRs++;
+         }
+         else if (UNLIKELY(arg->tag == Iex_VECRET)) {
+            /* We stashed the address of the return slot earlier, so just
+               retrieve it now. */
+            vassert(!hregIsInvalid(r_vecRetAddr));
+            tmpregs[i] = r_vecRetAddr;
+            nVECRETs++;
+         }
+         else {
+            IRType ty = typeOfIRExpr(env->type_env, arg);
+            vassert(ty == Ity_I32 || ty == Ity_I64);
+            if (!mode64) {
+               if (ty == Ity_I32) { 
+                  tmpregs[argreg] = iselWordExpr_R(env, arg, IEndianess);
+               } else { // Ity_I64 in 32-bit mode
+                  HReg rHi, rLo;
+                  if ((argreg%2) == 1)
+                                // ppc32 ELF abi spec for passing LONG_LONG
+                     argreg++;  // XXX: odd argreg => even rN
+                  vassert(argreg < PPC_N_REGPARMS-1);
+                  iselInt64Expr(&rHi,&rLo, env, arg, IEndianess);
+                  tmpregs[argreg++] = rHi;
+                  tmpregs[argreg]   = rLo;
+               }
+            } else { // mode64
+               tmpregs[argreg] = iselWordExpr_R(env, arg, IEndianess);
+            }
+         }
+         argreg++;
+      }
+
+      /* Now we can compute the condition.  We can't do it earlier
+         because the argument computations could trash the condition
+         codes.  Be a bit clever to handle the common case where the
+         guard is 1:Bit. */
+      cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
+      if (guard) {
+         if (guard->tag == Iex_Const 
+             && guard->Iex.Const.con->tag == Ico_U1
+             && guard->Iex.Const.con->Ico.U1 == True) {
+            /* unconditional -- do nothing */
+         } else {
+            cc = iselCondCode( env, guard, IEndianess );
+         }
+      }
+
+      /* Move the args to their final destinations. */
+      for (i = 0; i < argreg; i++) {
+         if (hregIsInvalid(tmpregs[i]))  // Skip invalid regs
+            continue;
+         /* None of these insns, including any spill code that might
+            be generated, may alter the condition codes. */
+         argiregs |= (1 << (i+3));
+         addInstr( env, mk_iMOVds_RR( argregs[i], tmpregs[i] ) );
+      }
+
+   }
+
+   /* Do final checks, set the return values, and generate the call
+      instruction proper. */
+   if (retTy == Ity_V128 || retTy == Ity_V256) {
+      vassert(nVECRETs == 1);
+   } else {
+      vassert(nVECRETs == 0);
+   }
+
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+
+   vassert(*stackAdjustAfterCall == 0);
+   vassert(is_RetLoc_INVALID(*retloc));
+   switch (retTy) {
+      case Ity_INVALID:
+         /* Function doesn't return a value. */
+         *retloc = mk_RetLoc_simple(RLPri_None);
+         break;
+      case Ity_I64:
+         *retloc = mk_RetLoc_simple(mode64 ? RLPri_Int : RLPri_2Int);
+         break;
+      case Ity_I32: case Ity_I16: case Ity_I8:
+         *retloc = mk_RetLoc_simple(RLPri_Int);
+         break;
+      case Ity_V128:
+         /* Result is 512 bytes up the stack, and after it has been
+            retrieved, adjust SP upwards by 1024. */
+         *retloc = mk_RetLoc_spRel(RLPri_V128SpRel, 512);
+         *stackAdjustAfterCall = 1024;
+         break;
+      case Ity_V256:
+         vassert(0); // ATC
+         /* Ditto */
+         *retloc = mk_RetLoc_spRel(RLPri_V256SpRel, 512);
+         *stackAdjustAfterCall = 1024;
+         break;
+      default:
+         /* IR can denote other possible return types, but we don't
+            handle those here. */
+         vassert(0);
+   }
+
+   /* Finally, generate the call itself.  This needs the *retloc value
+      set in the switch above, which is why it's at the end. */
+
+   Addr64 target = mode64 ? (Addr)cee->addr
+                          : toUInt((Addr)(cee->addr));
+   addInstr(env, PPCInstr_Call( cc, target, argiregs, *retloc ));
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: FP rounding mode helpers                    ---*/
+/*---------------------------------------------------------*/
+
+///* Set FPU's rounding mode to the default */
+//static 
+//void set_FPU_rounding_default ( ISelEnv* env )
+//{
+//   HReg fr_src = newVRegF(env);
+//   HReg r_src  = newVRegI(env);
+//
+//   /* Default rounding mode = 0x0
+//      Only supporting the rounding-mode bits - the rest of FPSCR is 0x0
+//       - so we can set the whole register at once (faster)
+//      note: upper 32 bits ignored by FpLdFPSCR
+//   */
+//   addInstr(env, PPCInstr_LI(r_src, 0x0, env->mode64));
+//   if (env->mode64) {
+//      fr_src = mk_LoadR64toFPR( env, r_src );         // 1*I64 -> F64
+//   } else {
+//      fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64
+//   }
+//   addInstr(env, PPCInstr_FpLdFPSCR( fr_src ));
+//}
+
+/* Convert IR rounding mode to PPC encoding */
+static HReg roundModeIRtoPPC ( ISelEnv* env, HReg r_rmIR )
+{
+   /* 
+   rounding mode                     | PPC  |  IR
+   -----------------------------------------------
+   to nearest, ties to even          | 000  | 000
+   to zero                           | 001  | 011
+   to +infinity                      | 010  | 010
+   to -infinity                      | 011  | 001
+   +++++ Below are the extended rounding modes for decimal floating point +++++
+   to nearest, ties away from 0      | 100  | 100
+   to nearest, ties toward 0         | 101  | 111
+   to away from 0                    | 110  | 110
+   to prepare for shorter precision  | 111  | 101
+   */
+   HReg r_rmPPC = newVRegI(env);
+   HReg r_tmp1  = newVRegI(env);
+   HReg r_tmp2  = newVRegI(env);
+
+   vassert(hregClass(r_rmIR) == HRcGPR(env->mode64));
+
+   // r_rmPPC = XOR(r_rmIR, r_rmIR << 1) & 3
+   //
+   // slwi  tmp1,    r_rmIR, 1
+   // xor   tmp1,    r_rmIR, tmp1
+   // andi  r_rmPPC, tmp1, 3
+
+   addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+                               r_tmp1, r_rmIR, PPCRH_Imm(False,1)));
+
+   addInstr( env, PPCInstr_Alu( Palu_AND,
+                                r_tmp2, r_tmp1, PPCRH_Imm( False, 3 ) ) );
+
+   addInstr( env, PPCInstr_Alu( Palu_XOR,
+                                r_rmPPC, r_rmIR, PPCRH_Reg( r_tmp2 ) ) );
+
+   return r_rmPPC;
+}
+
+
+/* Set the FPU's rounding mode: 'mode' is an I32-typed expression
+   denoting a value in the range 0 .. 7, indicating a round mode
+   encoded as per type IRRoundingMode.  Set the PPC FPSCR to have the
+   same rounding.  When the dfp_rm arg is True, set the decimal
+   floating point rounding mode bits (29:31); otherwise, set the
+   binary floating point rounding mode bits (62:63).
+
+   For speed & simplicity, we're setting the *entire* FPSCR here.
+
+   Setting the rounding mode is expensive.  So this function tries to
+   avoid repeatedly setting the rounding mode to the same thing by
+   first comparing 'mode' to the 'mode' tree supplied in the previous
+   call to this function, if any.  (The previous value is stored in
+   env->previous_rm.)  If 'mode' is a single IR temporary 't' and
+   env->previous_rm is also just 't', then the setting is skipped.
+
+   This is safe because of the SSA property of IR: an IR temporary can
+   only be defined once and so will have the same value regardless of
+   where it appears in the block.  Cool stuff, SSA.
+
+   A safety condition: all attempts to set the RM must be aware of
+   this mechanism - by being routed through the functions here.
+
+   Of course this only helps if blocks where the RM is set more than
+   once and it is set to the same value each time, *and* that value is
+   held in the same IR temporary each time.  In order to assure the
+   latter as much as possible, the IR optimiser takes care to do CSE
+   on any block with any sign of floating point activity.
+*/
+static
+void _set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode, Bool dfp_rm,
+                              IREndness IEndianess )
+{
+   HReg fr_src = newVRegF(env);
+   HReg r_src;
+
+   vassert(typeOfIRExpr(env->type_env,mode) == Ity_I32);
+   
+   /* Do we need to do anything? */
+   if (env->previous_rm
+       && env->previous_rm->tag == Iex_RdTmp
+       && mode->tag == Iex_RdTmp
+       && env->previous_rm->Iex.RdTmp.tmp == mode->Iex.RdTmp.tmp) {
+      /* no - setting it to what it was before.  */
+      vassert(typeOfIRExpr(env->type_env, env->previous_rm) == Ity_I32);
+      return;
+   }
+
+   /* No luck - we better set it, and remember what we set it to. */
+   env->previous_rm = mode;
+
+   /* Only supporting the rounding-mode bits - the rest of FPSCR is
+      0x0 - so we can set the whole register at once (faster). */
+
+   // Resolve rounding mode and convert to PPC representation
+   r_src = roundModeIRtoPPC( env, iselWordExpr_R(env, mode, IEndianess) );
+
+   // gpr -> fpr
+   if (env->mode64) {
+      if (dfp_rm) {
+         HReg r_tmp1 = newVRegI( env );
+         addInstr( env,
+                   PPCInstr_Shft( Pshft_SHL, False/*64bit shift*/,
+                                  r_tmp1, r_src, PPCRH_Imm( False, 32 ) ) );
+         fr_src = mk_LoadR64toFPR( env, r_tmp1 );
+      } else {
+         fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64
+      }
+   } else {
+      if (dfp_rm) {
+         HReg r_zero = newVRegI( env );
+         addInstr( env, PPCInstr_LI( r_zero, 0, env->mode64 ) );
+         fr_src = mk_LoadRR32toFPR( env, r_src, r_zero );
+      } else {
+         fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64
+      }
+   }
+
+   // Move to FPSCR
+   addInstr(env, PPCInstr_FpLdFPSCR( fr_src, dfp_rm ));
+}
+
+static void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode,
+                                    IREndness IEndianess )
+{
+   _set_FPU_rounding_mode(env, mode, False, IEndianess);
+}
+
+static void set_FPU_DFP_rounding_mode ( ISelEnv* env, IRExpr* mode,
+                                        IREndness IEndianess )
+{
+   _set_FPU_rounding_mode(env, mode, True, IEndianess);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: vector helpers                              ---*/
+/*---------------------------------------------------------*/
+
+/* Generate all-zeroes into a new vector register.
+*/
+static HReg generate_zeroes_V128 ( ISelEnv* env )
+{
+   HReg dst = newVRegV(env);
+   addInstr(env, PPCInstr_AvBinary(Pav_XOR, dst, dst, dst));
+   return dst;
+}
+
+/* Generate all-ones into a new vector register.
+*/
+static HReg generate_ones_V128 ( ISelEnv* env )
+{
+   HReg dst = newVRegV(env);
+   PPCVI5s * src = PPCVI5s_Imm(-1);
+   addInstr(env, PPCInstr_AvSplat(8, dst, src));
+   return dst;
+}
+
+
+/*
+  Generates code for AvSplat
+  - takes in IRExpr* of type 8|16|32
+    returns vector reg of duplicated lanes of input
+  - uses AvSplat(imm) for imms up to simm6.
+    otherwise must use store reg & load vector
+*/
+static HReg mk_AvDuplicateRI( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   HReg   r_src;
+   HReg   dst = newVRegV(env);
+   PPCRI* ri  = iselWordExpr_RI(env, e, IEndianess);
+   IRType ty  = typeOfIRExpr(env->type_env,e);
+   UInt   sz  = (ty == Ity_I8) ? 8 : (ty == Ity_I16) ? 16 : 32;
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32);
+
+   /* special case: immediate */
+   if (ri->tag == Pri_Imm) {
+      Int simm32 = (Int)ri->Pri.Imm;
+
+      /* figure out if it's do-able with imm splats. */
+      if (simm32 >= -32 && simm32 <= 31) {
+         Char simm6 = (Char)simm32;
+         if (simm6 > 15) {           /* 16:31 inclusive */
+            HReg v1 = newVRegV(env);
+            HReg v2 = newVRegV(env);
+            addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16)));
+            addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6-16)));
+            addInstr(env,
+               (sz== 8) ? PPCInstr_AvBin8x16(Pav_SUBU, dst, v2, v1) :
+               (sz==16) ? PPCInstr_AvBin16x8(Pav_SUBU, dst, v2, v1)
+                        : PPCInstr_AvBin32x4(Pav_SUBU, dst, v2, v1) );
+            return dst;
+         }
+         if (simm6 < -16) {          /* -32:-17 inclusive */
+            HReg v1 = newVRegV(env);
+            HReg v2 = newVRegV(env);
+            addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16)));
+            addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6+16)));
+            addInstr(env,
+               (sz== 8) ? PPCInstr_AvBin8x16(Pav_ADDU, dst, v2, v1) :
+               (sz==16) ? PPCInstr_AvBin16x8(Pav_ADDU, dst, v2, v1)
+                        : PPCInstr_AvBin32x4(Pav_ADDU, dst, v2, v1) );
+            return dst;
+         }
+         /* simplest form:              -16:15 inclusive */
+         addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Imm(simm6)));
+         return dst;
+      }
+
+      /* no luck; use the Slow way. */
+      r_src = newVRegI(env);
+      addInstr(env, PPCInstr_LI(r_src, (Long)simm32, env->mode64));
+   }
+   else {
+      r_src = ri->Pri.Reg;
+   }
+
+   {
+      /* Store r_src multiple times (sz dependent); then load the dest vector. */
+      HReg r_aligned16;
+      PPCAMode *am_offset, *am_offset_zero;
+
+      sub_from_sp( env, 32 );     // Move SP down
+      /* Get a 16-aligned address within our stack space */
+      r_aligned16 = get_sp_aligned16( env );
+
+      Int i;
+      Int stride = (sz == 8) ? 1 : (sz == 16) ? 2 : 4;
+      UChar num_bytes_to_store = stride;
+      am_offset_zero = PPCAMode_IR( 0, r_aligned16 );
+      am_offset = am_offset_zero;
+      for (i = 0; i < 16; i+=stride, am_offset = PPCAMode_IR( i, r_aligned16)) {
+         addInstr(env, PPCInstr_Store( num_bytes_to_store, am_offset, r_src, env->mode64 ));
+      }
+
+      /* Effectively splat the r_src value to dst */
+      addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 16, dst, am_offset_zero ) );
+      add_to_sp( env, 32 );       // Reset SP
+
+      return dst;
+   }
+}
+
+
+/* for each lane of vSrc: lane == nan ? laneX = all 1's : all 0's */
+static HReg isNan ( ISelEnv* env, HReg vSrc, IREndness IEndianess )
+{
+   HReg zeros, msk_exp, msk_mnt, expt, mnts, vIsNan;
+ 
+   vassert(hregClass(vSrc) == HRcVec128);
+
+   zeros   = mk_AvDuplicateRI(env, mkU32(0), IEndianess);
+   msk_exp = mk_AvDuplicateRI(env, mkU32(0x7F800000), IEndianess);
+   msk_mnt = mk_AvDuplicateRI(env, mkU32(0x7FFFFF), IEndianess);
+   expt    = newVRegV(env);
+   mnts    = newVRegV(env);
+   vIsNan  = newVRegV(env); 
+
+   /* 32bit float => sign(1) | exponent(8) | mantissa(23)
+      nan => exponent all ones, mantissa > 0 */
+
+   addInstr(env, PPCInstr_AvBinary(Pav_AND, expt, vSrc, msk_exp));
+   addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, expt, expt, msk_exp));
+   addInstr(env, PPCInstr_AvBinary(Pav_AND, mnts, vSrc, msk_mnt));
+   addInstr(env, PPCInstr_AvBin32x4(Pav_CMPGTU, mnts, mnts, zeros));
+   addInstr(env, PPCInstr_AvBinary(Pav_AND, vIsNan, expt, mnts));
+   return vIsNan;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64/32/16/8 bit)        ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 64, 32, 16 and 8-bit type.
+   All results are returned in a (mode64 ? 64bit : 32bit) register.
+   For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits
+   are arbitrary, so you should mask or sign extend partial values
+   if necessary.
+*/
+
+static HReg iselWordExpr_R ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   HReg r = iselWordExpr_R_wrk(env, e, IEndianess);
+   /* sanity checks ... */
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+
+   vassert(hregClass(r) == HRcGPR(env->mode64));
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg iselWordExpr_R_wrk ( ISelEnv* env, IRExpr* e,
+                                 IREndness IEndianess )
+{
+   Bool mode64 = env->mode64;
+   MatchInfo mi;
+   DECLARE_PATTERN(p_32to1_then_1Uto8);
+
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I8 || ty == Ity_I16 ||
+           ty == Ity_I32 || ((ty == Ity_I64) && mode64));
+
+   switch (e->tag) {
+
+   /* --------- TEMP --------- */
+   case Iex_RdTmp:
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+
+   /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg      r_dst;
+      PPCAMode* am_addr;
+      if (e->Iex.Load.end != IEndianess)
+         goto irreducible;
+      r_dst   = newVRegI(env);
+      am_addr = iselWordExpr_AMode( env, e->Iex.Load.addr, ty/*of xfer*/,
+                                    IEndianess );
+      addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)), 
+                                   r_dst, am_addr, mode64 ));
+      return r_dst;
+      /*NOTREACHED*/
+   }
+
+   /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+      PPCAluOp  aluOp;
+      PPCShftOp shftOp;
+
+      /* Is it an addition or logical style op? */
+      switch (e->Iex.Binop.op) {
+      case Iop_Add8: case Iop_Add16: case Iop_Add32: case Iop_Add64:
+         aluOp = Palu_ADD; break;
+      case Iop_Sub8: case Iop_Sub16: case Iop_Sub32: case Iop_Sub64:
+         aluOp = Palu_SUB; break;
+      case Iop_And8: case Iop_And16: case Iop_And32: case Iop_And64:
+         aluOp = Palu_AND; break;
+      case Iop_Or8:  case Iop_Or16:  case Iop_Or32:  case Iop_Or64:
+         aluOp = Palu_OR; break;
+      case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64:
+         aluOp = Palu_XOR; break;
+      default:
+         aluOp = Palu_INVALID; break;
+      }
+      /* For commutative ops we assume any literal
+         values are on the second operand. */
+      if (aluOp != Palu_INVALID) {
+         HReg   r_dst   = newVRegI(env);
+         HReg   r_srcL  = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         PPCRH* ri_srcR = NULL;
+         /* get right arg into an RH, in the appropriate way */
+         switch (aluOp) {
+         case Palu_ADD: case Palu_SUB:
+            ri_srcR = iselWordExpr_RH(env, True/*signed*/, 
+                                      e->Iex.Binop.arg2, IEndianess);
+            break;
+         case Palu_AND: case Palu_OR: case Palu_XOR:
+            ri_srcR = iselWordExpr_RH(env, False/*signed*/,
+                                      e->Iex.Binop.arg2, IEndianess);
+            break;
+         default:
+            vpanic("iselWordExpr_R_wrk-aluOp-arg2");
+         }
+         addInstr(env, PPCInstr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
+         return r_dst;
+      }
+
+      /* a shift? */
+      switch (e->Iex.Binop.op) {
+      case Iop_Shl8: case Iop_Shl16: case Iop_Shl32: case Iop_Shl64:
+         shftOp = Pshft_SHL; break;
+      case Iop_Shr8: case Iop_Shr16: case Iop_Shr32: case Iop_Shr64:
+         shftOp = Pshft_SHR; break;
+      case Iop_Sar8: case Iop_Sar16: case Iop_Sar32: case Iop_Sar64:
+         shftOp = Pshft_SAR; break;
+      default:
+         shftOp = Pshft_INVALID; break;
+      }
+      /* we assume any literal values are on the second operand. */
+      if (shftOp != Pshft_INVALID) {
+         HReg   r_dst   = newVRegI(env);
+         HReg   r_srcL  = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         PPCRH* ri_srcR = NULL;
+         /* get right arg into an RH, in the appropriate way */
+         switch (shftOp) {
+         case Pshft_SHL: case Pshft_SHR: case Pshft_SAR:
+            if (!mode64)
+               ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop.arg2, IEndianess);
+            else
+               ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2, IEndianess);
+            break;
+         default:
+            vpanic("iselIntExpr_R_wrk-shftOp-arg2");
+         }
+         /* widen the left arg if needed */
+         if (shftOp == Pshft_SHR || shftOp == Pshft_SAR) {
+            if (ty == Ity_I8 || ty == Ity_I16) {
+               PPCRH* amt = PPCRH_Imm(False,
+                                      toUShort(ty == Ity_I8 ? 24 : 16));
+               HReg   tmp = newVRegI(env);
+               addInstr(env, PPCInstr_Shft(Pshft_SHL,
+                                           True/*32bit shift*/,
+                                           tmp, r_srcL, amt));
+               addInstr(env, PPCInstr_Shft(shftOp,
+                                           True/*32bit shift*/,
+                                           tmp, tmp,    amt));
+               r_srcL = tmp;
+               vassert(0); /* AWAITING TEST CASE */
+            }
+         }
+         /* Only 64 expressions need 64bit shifts,
+            32bit shifts are fine for all others */
+         if (ty == Ity_I64) {
+            vassert(mode64);
+            addInstr(env, PPCInstr_Shft(shftOp, False/*64bit shift*/,
+                                        r_dst, r_srcL, ri_srcR));
+         } else {
+            addInstr(env, PPCInstr_Shft(shftOp, True/*32bit shift*/,
+                                        r_dst, r_srcL, ri_srcR));
+         }
+         return r_dst;
+      }
+
+      /* How about a div? */
+      if (e->Iex.Binop.op == Iop_DivS32 || 
+          e->Iex.Binop.op == Iop_DivU32 ||
+          e->Iex.Binop.op == Iop_DivS32E ||
+          e->Iex.Binop.op == Iop_DivU32E) {
+         Bool syned  = toBool((e->Iex.Binop.op == Iop_DivS32) || (e->Iex.Binop.op == Iop_DivS32E));
+         HReg r_dst  = newVRegI(env);
+         HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr( env,
+                      PPCInstr_Div( ( ( e->Iex.Binop.op == Iop_DivU32E )
+                                             || ( e->Iex.Binop.op == Iop_DivS32E ) ) ? True
+                                                                                     : False,
+                                    syned,
+                                    True/*32bit div*/,
+                                    r_dst,
+                                    r_srcL,
+                                    r_srcR ) );
+         return r_dst;
+      }
+      if (e->Iex.Binop.op == Iop_DivS64 || 
+          e->Iex.Binop.op == Iop_DivU64 || e->Iex.Binop.op == Iop_DivS64E
+          || e->Iex.Binop.op == Iop_DivU64E ) {
+         Bool syned  = toBool((e->Iex.Binop.op == Iop_DivS64) ||(e->Iex.Binop.op == Iop_DivS64E));
+         HReg r_dst  = newVRegI(env);
+         HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         vassert(mode64);
+         addInstr( env,
+                      PPCInstr_Div( ( ( e->Iex.Binop.op == Iop_DivS64E )
+                                             || ( e->Iex.Binop.op
+                                                      == Iop_DivU64E ) ) ? True
+                                                                         : False,
+                                    syned,
+                                    False/*64bit div*/,
+                                    r_dst,
+                                    r_srcL,
+                                    r_srcR ) );
+         return r_dst;
+      }
+
+      /* No? Anyone for a mul? */
+      if (e->Iex.Binop.op == Iop_Mul32
+          || e->Iex.Binop.op == Iop_Mul64) {
+         Bool syned       = False;
+         Bool sz32        = (e->Iex.Binop.op != Iop_Mul64);
+         HReg r_dst       = newVRegI(env);
+         HReg r_srcL      = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg r_srcR      = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_MulL(syned, False/*lo32*/, sz32,
+                                     r_dst, r_srcL, r_srcR));
+         return r_dst;
+      }      
+
+      /* 32 x 32 -> 64 multiply */
+      if (mode64
+          && (e->Iex.Binop.op == Iop_MullU32
+              || e->Iex.Binop.op == Iop_MullS32)) {
+         HReg tLo    = newVRegI(env);
+         HReg tHi    = newVRegI(env);
+         HReg r_dst  = newVRegI(env);
+         Bool syned  = toBool(e->Iex.Binop.op == Iop_MullS32);
+         HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, 
+                                     False/*lo32*/, True/*32bit mul*/,
+                                     tLo, r_srcL, r_srcR));
+         addInstr(env, PPCInstr_MulL(syned,
+                                     True/*hi32*/, True/*32bit mul*/,
+                                     tHi, r_srcL, r_srcR));
+         addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+                                     r_dst, tHi, PPCRH_Imm(False,32)));
+         addInstr(env, PPCInstr_Alu(Palu_OR,
+                                    r_dst, r_dst, PPCRH_Reg(tLo)));
+         return r_dst;
+      }
+
+      /* El-mutanto 3-way compare? */
+      if (e->Iex.Binop.op == Iop_CmpORD32S
+          || e->Iex.Binop.op == Iop_CmpORD32U) {
+         Bool   syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S);
+         HReg   dst   = newVRegI(env);
+         HReg   srcL  = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         PPCRH* srcR  = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2,
+                                        IEndianess);
+         addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/,
+                                    7/*cr*/, srcL, srcR));
+         addInstr(env, PPCInstr_MfCR(dst));
+         addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst,
+                                    PPCRH_Imm(False,7<<1)));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_CmpORD64S
+          || e->Iex.Binop.op == Iop_CmpORD64U) {
+         Bool   syned = toBool(e->Iex.Binop.op == Iop_CmpORD64S);
+         HReg   dst   = newVRegI(env);
+         HReg   srcL  = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         PPCRH* srcR  = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2,
+                                        IEndianess);
+         vassert(mode64);
+         addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/,
+                                    7/*cr*/, srcL, srcR));
+         addInstr(env, PPCInstr_MfCR(dst));
+         addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst,
+                                    PPCRH_Imm(False,7<<1)));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_Max32U) {
+         HReg        r1   = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg        r2   = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         HReg        rdst = newVRegI(env);
+         PPCCondCode cc   = mk_PPCCondCode( Pct_TRUE, Pcf_7LT );
+         addInstr(env, mk_iMOVds_RR(rdst, r1));
+         addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+                                    7/*cr*/, rdst, PPCRH_Reg(r2)));
+         addInstr(env, PPCInstr_CMov(cc, rdst, PPCRI_Reg(r2)));
+         return rdst;
+      }
+
+      if (e->Iex.Binop.op == Iop_32HLto64) {
+         HReg   r_Hi  = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg   r_Lo  = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         HReg   r_Tmp = newVRegI(env);
+         HReg   r_dst = newVRegI(env);
+         HReg   msk   = newVRegI(env);
+         vassert(mode64);
+         /* r_dst = OR( r_Hi<<32, r_Lo ) */
+         addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+                                     r_dst, r_Hi, PPCRH_Imm(False,32)));
+         addInstr(env, PPCInstr_LI(msk, 0xFFFFFFFF, mode64));
+         addInstr(env, PPCInstr_Alu( Palu_AND, r_Tmp, r_Lo,
+                                     PPCRH_Reg(msk) ));
+         addInstr(env, PPCInstr_Alu( Palu_OR, r_dst, r_dst,
+                                     PPCRH_Reg(r_Tmp) ));
+         return r_dst;
+      }
+
+      if ((e->Iex.Binop.op == Iop_CmpF64) ||
+          (e->Iex.Binop.op == Iop_CmpD64) ||
+          (e->Iex.Binop.op == Iop_CmpD128)) {
+         HReg fr_srcL;
+         HReg fr_srcL_lo;
+         HReg fr_srcR;
+         HReg fr_srcR_lo;
+
+         HReg r_ccPPC   = newVRegI(env);
+         HReg r_ccIR    = newVRegI(env);
+         HReg r_ccIR_b0 = newVRegI(env);
+         HReg r_ccIR_b2 = newVRegI(env);
+         HReg r_ccIR_b6 = newVRegI(env);
+
+         if (e->Iex.Binop.op == Iop_CmpF64) {
+            fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1, IEndianess);
+            fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2, IEndianess);
+            addInstr(env, PPCInstr_FpCmp(r_ccPPC, fr_srcL, fr_srcR));
+
+         } else if (e->Iex.Binop.op == Iop_CmpD64) {
+            fr_srcL = iselDfp64Expr(env, e->Iex.Binop.arg1, IEndianess);
+            fr_srcR = iselDfp64Expr(env, e->Iex.Binop.arg2, IEndianess);
+            addInstr(env, PPCInstr_Dfp64Cmp(r_ccPPC, fr_srcL, fr_srcR));
+
+         } else {    //  e->Iex.Binop.op == Iop_CmpD128
+            iselDfp128Expr(&fr_srcL, &fr_srcL_lo, env, e->Iex.Binop.arg1,
+                           IEndianess);
+            iselDfp128Expr(&fr_srcR, &fr_srcR_lo, env, e->Iex.Binop.arg2,
+                           IEndianess);
+            addInstr(env, PPCInstr_Dfp128Cmp(r_ccPPC, fr_srcL, fr_srcL_lo,
+                                             fr_srcR, fr_srcR_lo));
+         }
+
+         /* Map compare result from PPC to IR,
+            conforming to CmpF64 definition. */
+         /*
+           FP cmp result | PPC | IR
+           --------------------------
+           UN            | 0x1 | 0x45
+           EQ            | 0x2 | 0x40
+           GT            | 0x4 | 0x00
+           LT            | 0x8 | 0x01
+         */
+
+         // r_ccIR_b0 = r_ccPPC[0] | r_ccPPC[3]
+         addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/,
+                                     r_ccIR_b0, r_ccPPC,
+                                     PPCRH_Imm(False,0x3)));
+         addInstr(env, PPCInstr_Alu(Palu_OR,  r_ccIR_b0,
+                                    r_ccPPC,   PPCRH_Reg(r_ccIR_b0)));
+         addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b0,
+                                    r_ccIR_b0, PPCRH_Imm(False,0x1)));
+         
+         // r_ccIR_b2 = r_ccPPC[0]
+         addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+                                     r_ccIR_b2, r_ccPPC,
+                                     PPCRH_Imm(False,0x2)));
+         addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b2,
+                                    r_ccIR_b2, PPCRH_Imm(False,0x4)));
+
+         // r_ccIR_b6 = r_ccPPC[0] | r_ccPPC[1]
+         addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/,
+                                     r_ccIR_b6, r_ccPPC,
+                                     PPCRH_Imm(False,0x1)));
+         addInstr(env, PPCInstr_Alu(Palu_OR,  r_ccIR_b6,
+                                    r_ccPPC, PPCRH_Reg(r_ccIR_b6)));
+         addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+                                     r_ccIR_b6, r_ccIR_b6,
+                                     PPCRH_Imm(False,0x6)));
+         addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b6,
+                                    r_ccIR_b6, PPCRH_Imm(False,0x40)));
+
+         // r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6
+         addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR,
+                                    r_ccIR_b0, PPCRH_Reg(r_ccIR_b2)));
+         addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR,
+                                    r_ccIR,    PPCRH_Reg(r_ccIR_b6)));
+         return r_ccIR;
+      }
+
+      if ( e->Iex.Binop.op == Iop_F64toI32S ||
+               e->Iex.Binop.op == Iop_F64toI32U ) {
+         /* This works in both mode64 and mode32. */
+         HReg      r1      = StackFramePtr(env->mode64);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+         HReg      fsrc    = iselDblExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg      ftmp    = newVRegF(env);
+         HReg      idst    = newVRegI(env);
+
+         /* Set host rounding mode */
+         set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+         sub_from_sp( env, 16 );
+         addInstr(env, PPCInstr_FpCftI(False/*F->I*/, True/*int32*/,
+                                       e->Iex.Binop.op == Iop_F64toI32S ? True/*syned*/
+                                                                     : False,
+                                       True/*flt64*/,
+                                       ftmp, fsrc));
+         addInstr(env, PPCInstr_FpSTFIW(r1, ftmp));
+         addInstr(env, PPCInstr_Load(4, idst, zero_r1, mode64));
+
+         /* in 64-bit mode we need to sign-widen idst. */
+         if (mode64)
+            addInstr(env, PPCInstr_Unary(Pun_EXTSW, idst, idst));
+
+         add_to_sp( env, 16 );
+
+         ///* Restore default FPU rounding. */
+         //set_FPU_rounding_default( env );
+         return idst;
+      }
+
+      if (e->Iex.Binop.op == Iop_F64toI64S || e->Iex.Binop.op == Iop_F64toI64U ) {
+         if (mode64) {
+            HReg      r1      = StackFramePtr(env->mode64);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+            HReg      fsrc    = iselDblExpr(env, e->Iex.Binop.arg2,
+                                            IEndianess);
+            HReg      idst    = newVRegI(env);         
+            HReg      ftmp    = newVRegF(env);
+
+            /* Set host rounding mode */
+            set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+            sub_from_sp( env, 16 );
+            addInstr(env, PPCInstr_FpCftI(False/*F->I*/, False/*int64*/,
+                                          ( e->Iex.Binop.op == Iop_F64toI64S ) ? True
+                                                                            : False,
+                                          True, ftmp, fsrc));
+            addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, ftmp, zero_r1));
+            addInstr(env, PPCInstr_Load(8, idst, zero_r1, True/*mode64*/));
+            add_to_sp( env, 16 );
+
+            ///* Restore default FPU rounding. */
+            //set_FPU_rounding_default( env );
+            return idst;
+         }
+      }
+
+      if (e->Iex.Binop.op == Iop_D64toI64S ) {
+         HReg      r1      = StackFramePtr(env->mode64);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+         HReg      fr_src  = iselDfp64Expr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg      idst    = newVRegI(env);
+         HReg      ftmp    = newVRegF(env);
+
+         /* Set host rounding mode */
+         set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         addInstr(env, PPCInstr_Dfp64Unary(Pfp_DCTFIX, ftmp, fr_src));
+         sub_from_sp( env, 16 );
+         addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, ftmp, zero_r1));
+         addInstr(env, PPCInstr_Load(8, idst, zero_r1, mode64));
+
+         add_to_sp( env, 16 );
+
+         ///* Restore default FPU rounding. */
+         //set_FPU_rounding_default( env );
+         return idst;
+      }
+
+      if (e->Iex.Binop.op == Iop_D128toI64S ) {
+         PPCFpOp fpop = Pfp_DCTFIXQ;
+         HReg r_srcHi = newVRegF(env);
+         HReg r_srcLo = newVRegF(env);
+         HReg idst    = newVRegI(env);
+         HReg ftmp    = newVRegF(env);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+
+         set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2,
+                        IEndianess);
+         addInstr(env, PPCInstr_DfpD128toD64(fpop, ftmp, r_srcHi, r_srcLo));
+
+         // put the D64 result into an integer register
+         sub_from_sp( env, 16 );
+         addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, ftmp, zero_r1));
+         addInstr(env, PPCInstr_Load(8, idst, zero_r1, True/*mode64*/));
+         add_to_sp( env, 16 );
+         return idst;
+      }
+      break;
+   }
+
+   /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+      IROp op_unop = e->Iex.Unop.op;
+
+      /* 1Uto8(32to1(expr32)) */
+      DEFINE_PATTERN(p_32to1_then_1Uto8,
+                     unop(Iop_1Uto8,unop(Iop_32to1,bind(0))));
+      if (matchIRExpr(&mi,p_32to1_then_1Uto8,e)) {
+         IRExpr* expr32 = mi.bindee[0];
+         HReg r_dst = newVRegI(env);
+         HReg r_src = iselWordExpr_R(env, expr32, IEndianess);
+         addInstr(env, PPCInstr_Alu(Palu_AND, r_dst,
+                                    r_src, PPCRH_Imm(False,1)));
+         return r_dst;
+      }
+
+      /* 16Uto32(LDbe:I16(expr32)) */
+      {
+         DECLARE_PATTERN(p_LDbe16_then_16Uto32);
+         DEFINE_PATTERN(p_LDbe16_then_16Uto32,
+                        unop(Iop_16Uto32,
+                             IRExpr_Load(IEndianess,Ity_I16,bind(0))) );
+         if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) {
+            HReg r_dst = newVRegI(env);
+            PPCAMode* amode
+               = iselWordExpr_AMode( env, mi.bindee[0], Ity_I16/*xfer*/,
+                                     IEndianess );
+            addInstr(env, PPCInstr_Load(2,r_dst,amode, mode64));
+            return r_dst;
+         }
+      }
+
+      switch (op_unop) {
+      case Iop_8Uto16:
+      case Iop_8Uto32:
+      case Iop_8Uto64:
+      case Iop_16Uto32:
+      case Iop_16Uto64: {
+         HReg   r_dst = newVRegI(env);
+         HReg   r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         UShort mask  = toUShort(op_unop==Iop_16Uto64 ? 0xFFFF :
+                                 op_unop==Iop_16Uto32 ? 0xFFFF : 0xFF);
+         addInstr(env, PPCInstr_Alu(Palu_AND,r_dst,r_src,
+                                    PPCRH_Imm(False,mask)));
+         return r_dst;
+      }
+      case Iop_32Uto64: {
+         HReg r_dst = newVRegI(env);
+         HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         vassert(mode64);
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+                                r_dst, r_src, PPCRH_Imm(False,32)));
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/,
+                                r_dst, r_dst, PPCRH_Imm(False,32)));
+         return r_dst;
+      }
+      case Iop_8Sto16:
+      case Iop_8Sto32:
+      case Iop_16Sto32: {
+         HReg   r_dst = newVRegI(env);
+         HReg   r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         UShort amt   = toUShort(op_unop==Iop_16Sto32 ? 16 : 24);
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+                                r_dst, r_src, PPCRH_Imm(False,amt)));
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+                                r_dst, r_dst, PPCRH_Imm(False,amt)));
+         return r_dst;
+      }
+      case Iop_8Sto64:
+      case Iop_16Sto64: {
+         HReg   r_dst = newVRegI(env);
+         HReg   r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         UShort amt   = toUShort(op_unop==Iop_8Sto64  ? 56 : 48);
+         vassert(mode64);
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+                                r_dst, r_src, PPCRH_Imm(False,amt)));
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/,
+                                r_dst, r_dst, PPCRH_Imm(False,amt)));
+         return r_dst;
+      }
+      case Iop_32Sto64: {
+         HReg   r_dst = newVRegI(env);
+         HReg   r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+	 vassert(mode64);
+         /* According to the IBM docs, in 64 bit mode, srawi r,r,0
+            sign extends the lower 32 bits into the upper 32 bits. */
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+                                r_dst, r_src, PPCRH_Imm(False,0)));
+         return r_dst;
+      }
+      case Iop_Not8:
+      case Iop_Not16:
+      case Iop_Not32:
+      case Iop_Not64: {
+         if (op_unop == Iop_Not64) vassert(mode64);
+         HReg r_dst = newVRegI(env);
+         HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Unary(Pun_NOT,r_dst,r_src));
+         return r_dst;
+      }
+      case Iop_64HIto32: {
+         if (!mode64) {
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg, IEndianess);
+            return rHi; /* and abandon rLo .. poor wee thing :-) */
+         } else {
+            HReg   r_dst = newVRegI(env);
+            HReg   r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+            addInstr(env,
+                     PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/,
+                                   r_dst, r_src, PPCRH_Imm(False,32)));
+            return r_dst;
+         }
+      }
+      case Iop_64to32: {
+         if (!mode64) {
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg, IEndianess);
+            return rLo; /* similar stupid comment to the above ... */
+         } else {
+            /* This is a no-op. */
+            return iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         }
+      }
+      case Iop_64to16: {
+         if (mode64) { /* This is a no-op. */
+            return iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         }
+         break; /* evidently not used in 32-bit mode */
+      }
+      case Iop_16HIto8:
+      case Iop_32HIto16: {
+         HReg   r_dst = newVRegI(env);
+         HReg   r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         UShort shift = toUShort(op_unop == Iop_16HIto8 ? 8 : 16);
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/,
+                                r_dst, r_src, PPCRH_Imm(False,shift)));
+         return r_dst;
+      }
+      case Iop_128HIto64: 
+         if (mode64) {
+            HReg rHi, rLo;
+            iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg, IEndianess);
+            return rHi; /* and abandon rLo .. poor wee thing :-) */
+         }
+         break;
+      case Iop_128to64:
+         if (mode64) {
+            HReg rHi, rLo;
+            iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg, IEndianess);
+            return rLo; /* similar stupid comment to the above ... */
+         }
+         break;
+      case Iop_1Uto64:
+      case Iop_1Uto32:
+      case Iop_1Uto8:
+         if ((op_unop != Iop_1Uto64) || mode64) {
+            HReg        r_dst = newVRegI(env);
+            PPCCondCode cond  = iselCondCode(env, e->Iex.Unop.arg, IEndianess);
+            addInstr(env, PPCInstr_Set(cond,r_dst));
+            return r_dst;
+         }
+         break;
+      case Iop_1Sto8:
+      case Iop_1Sto16:
+      case Iop_1Sto32: {
+         /* could do better than this, but for now ... */
+         HReg        r_dst = newVRegI(env);
+         PPCCondCode cond  = iselCondCode(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Set(cond,r_dst));
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+                                r_dst, r_dst, PPCRH_Imm(False,31)));
+         addInstr(env,
+                  PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+                                r_dst, r_dst, PPCRH_Imm(False,31)));
+         return r_dst;
+      }
+      case Iop_1Sto64: 
+         if (mode64) {
+            /* could do better than this, but for now ... */
+            HReg        r_dst = newVRegI(env);
+            PPCCondCode cond  = iselCondCode(env, e->Iex.Unop.arg, IEndianess);
+            addInstr(env, PPCInstr_Set(cond,r_dst));
+            addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/,
+                                        r_dst, r_dst, PPCRH_Imm(False,63)));
+            addInstr(env, PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/,
+                                        r_dst, r_dst, PPCRH_Imm(False,63)));
+            return r_dst;
+         }
+         break;
+      case Iop_Clz32:
+      case Iop_Clz64: {
+         HReg r_src, r_dst;
+         PPCUnaryOp op_clz = (op_unop == Iop_Clz32) ? Pun_CLZ32 :
+                                                      Pun_CLZ64;
+         if (op_unop == Iop_Clz64 && !mode64)
+            goto irreducible;
+         /* Count leading zeroes. */
+         r_dst = newVRegI(env);
+         r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Unary(op_clz,r_dst,r_src));
+         return r_dst;
+      }
+
+      case Iop_Left8:
+      case Iop_Left16:
+      case Iop_Left32: 
+      case Iop_Left64: {
+         HReg r_src, r_dst;
+         if (op_unop == Iop_Left64 && !mode64)
+            goto irreducible;
+         r_dst = newVRegI(env);
+         r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src));
+         addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src)));
+         return r_dst;
+      }
+
+      case Iop_CmpwNEZ32: {
+         HReg r_dst = newVRegI(env);
+         HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src));
+         addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src)));
+         addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 
+                                     r_dst, r_dst, PPCRH_Imm(False, 31)));
+         return r_dst;
+      }
+
+      case Iop_CmpwNEZ64: {
+         HReg r_dst = newVRegI(env);
+         HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         if (!mode64) goto irreducible;
+         addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src));
+         addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src)));
+         addInstr(env, PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/, 
+                                     r_dst, r_dst, PPCRH_Imm(False, 63)));
+         return r_dst;
+      }
+
+      case Iop_V128to32: {
+         HReg        r_aligned16;
+         HReg        dst  = newVRegI(env);
+         HReg        vec  = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         PPCAMode *am_off0, *am_off_word0;
+         sub_from_sp( env, 32 );     // Move SP down 32 bytes
+
+         // get a quadword aligned address within our stack space
+         r_aligned16 = get_sp_aligned16( env );
+         am_off0  = PPCAMode_IR( 0, r_aligned16 );
+
+         /* Note that the store below (done via PPCInstr_AvLdSt) uses
+          * stvx, which stores the vector in proper LE format,
+          * with byte zero (far right byte of the register in LE format)
+          * stored at the lowest memory address.  Therefore, to obtain
+          * integer word zero, we need to use that lowest memory address
+          * as the base for the load.
+          */
+         if (IEndianess == Iend_LE)
+            am_off_word0 = am_off0;
+         else
+            am_off_word0 = PPCAMode_IR( 12,r_aligned16 );
+
+         // store vec, load low word to dst
+         addInstr(env,
+                  PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
+         addInstr(env,
+                  PPCInstr_Load( 4, dst, am_off_word0, mode64 ));
+
+         add_to_sp( env, 32 );       // Reset SP
+         return dst;
+      }
+
+      case Iop_V128to64:
+      case Iop_V128HIto64: 
+         if (mode64) {
+            HReg     r_aligned16;
+            HReg     dst = newVRegI(env);
+            HReg     vec = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+            PPCAMode *am_off0, *am_off8, *am_off_arg;
+            sub_from_sp( env, 32 );     // Move SP down 32 bytes
+
+            // get a quadword aligned address within our stack space
+            r_aligned16 = get_sp_aligned16( env );
+            am_off0 = PPCAMode_IR( 0, r_aligned16 );
+            am_off8 = PPCAMode_IR( 8 ,r_aligned16 );
+
+            // store vec, load low word or high to dst
+            addInstr(env,
+                     PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
+            if (IEndianess == Iend_LE) {
+               if (op_unop == Iop_V128HIto64)
+                  am_off_arg = am_off8;
+               else
+                  am_off_arg = am_off0;
+            } else {
+               if (op_unop == Iop_V128HIto64)
+                  am_off_arg = am_off0;
+               else
+                  am_off_arg = am_off8;
+            }
+            addInstr(env,
+                     PPCInstr_Load( 
+                        8, dst, 
+                        am_off_arg,
+                        mode64 ));
+
+            add_to_sp( env, 32 );       // Reset SP
+            return dst;
+         }
+         break;
+      case Iop_16to8:
+      case Iop_32to8:
+      case Iop_32to16:
+      case Iop_64to8:
+         /* These are no-ops. */
+         return iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         
+      /* ReinterpF64asI64(e) */
+      /* Given an IEEE754 double, produce an I64 with the same bit
+         pattern. */
+      case Iop_ReinterpF64asI64: 
+         if (mode64) {
+            PPCAMode *am_addr;
+            HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg, IEndianess);
+            HReg r_dst  = newVRegI(env);
+
+            sub_from_sp( env, 16 );     // Move SP down 16 bytes
+            am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+
+            // store as F64
+            addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+                                           fr_src, am_addr ));
+            // load as Ity_I64
+            addInstr(env, PPCInstr_Load( 8, r_dst, am_addr, mode64 ));
+
+            add_to_sp( env, 16 );       // Reset SP
+            return r_dst;
+         }
+         break;
+
+      /* ReinterpF32asI32(e) */
+      /* Given an IEEE754 float, produce an I32 with the same bit
+         pattern. */
+      case Iop_ReinterpF32asI32: {
+         /* I believe this generates correct code for both 32- and
+            64-bit hosts. */
+         PPCAMode *am_addr;
+         HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg r_dst  = newVRegI(env);
+
+         sub_from_sp( env, 16 );     // Move SP down 16 bytes
+         am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+
+         // store as F32
+         addInstr(env, PPCInstr_FpLdSt( False/*store*/, 4,
+                                        fr_src, am_addr ));
+         // load as Ity_I32
+         addInstr(env, PPCInstr_Load( 4, r_dst, am_addr, mode64 ));
+
+         add_to_sp( env, 16 );       // Reset SP
+         return r_dst;
+      }
+      break;
+
+      case Iop_ReinterpD64asI64:
+         if (mode64) {
+            PPCAMode *am_addr;
+            HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg, IEndianess);
+            HReg r_dst  = newVRegI(env);
+
+            sub_from_sp( env, 16 );     // Move SP down 16 bytes
+            am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) );
+
+            // store as D64
+            addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+                                           fr_src, am_addr ));
+            // load as Ity_I64
+            addInstr(env, PPCInstr_Load( 8, r_dst, am_addr, mode64 ));
+            add_to_sp( env, 16 );       // Reset SP
+            return r_dst;
+         } 
+         break;
+
+      case Iop_BCDtoDPB: {
+         /* the following is only valid in 64 bit mode */
+         if (!mode64) break;
+
+         PPCCondCode cc;
+         UInt        argiregs;
+         HReg        argregs[1];
+         HReg        r_dst  = newVRegI(env);
+         Int         argreg;
+
+         argiregs = 0;
+         argreg = 0;
+         argregs[0] = hregPPC_GPR3(mode64);
+
+         argiregs |= (1 << (argreg+3));
+         addInstr(env, mk_iMOVds_RR( argregs[argreg++],
+                                     iselWordExpr_R(env, e->Iex.Unop.arg,
+                                                    IEndianess) ) );
+
+         cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
+         if (IEndianess == Iend_LE) {
+             addInstr(env, PPCInstr_Call( cc, (Addr)h_calc_BCDtoDPB,
+                                          argiregs,
+                                          mk_RetLoc_simple(RLPri_Int)) );
+         } else {
+             HWord*      fdescr;
+             fdescr = (HWord*)h_calc_BCDtoDPB;
+             addInstr(env, PPCInstr_Call( cc, (Addr64)(fdescr[0]),
+                                          argiregs,
+                                          mk_RetLoc_simple(RLPri_Int)) );
+         }
+
+         addInstr(env, mk_iMOVds_RR(r_dst, argregs[0]));
+         return r_dst;
+      }
+
+      case Iop_DPBtoBCD: {
+         /* the following is only valid in 64 bit mode */
+         if (!mode64) break;
+
+         PPCCondCode cc;
+         UInt        argiregs;
+         HReg        argregs[1];
+         HReg        r_dst  = newVRegI(env);
+         Int         argreg;
+
+         argiregs = 0;
+         argreg = 0;
+         argregs[0] = hregPPC_GPR3(mode64);
+
+         argiregs |= (1 << (argreg+3));
+         addInstr(env, mk_iMOVds_RR( argregs[argreg++],
+                                     iselWordExpr_R(env, e->Iex.Unop.arg,
+                                                    IEndianess) ) );
+
+         cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
+
+        if (IEndianess == Iend_LE) {
+            addInstr(env, PPCInstr_Call( cc, (Addr)h_calc_DPBtoBCD,
+                                         argiregs, 
+                                         mk_RetLoc_simple(RLPri_Int) ) );
+	} else {
+            HWord*      fdescr;
+            fdescr = (HWord*)h_calc_DPBtoBCD;
+            addInstr(env, PPCInstr_Call( cc, (Addr64)(fdescr[0]),
+                                         argiregs,
+                                         mk_RetLoc_simple(RLPri_Int) ) );
+         }
+
+         addInstr(env, mk_iMOVds_RR(r_dst, argregs[0]));
+         return r_dst;
+      }
+
+      default: 
+         break;
+      }
+
+     switch (e->Iex.Unop.op) {
+        case Iop_ExtractExpD64: {
+
+            HReg fr_dst = newVRegI(env);
+            HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg, IEndianess);
+            HReg tmp    = newVRegF(env);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+            addInstr(env, PPCInstr_Dfp64Unary(Pfp_DXEX, tmp, fr_src));
+
+            // put the D64 result into a integer register
+            sub_from_sp( env, 16 );
+            addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, tmp, zero_r1));
+            addInstr(env, PPCInstr_Load(8, fr_dst, zero_r1, env->mode64));
+            add_to_sp( env, 16 );
+            return fr_dst;
+         }
+         case Iop_ExtractExpD128: {
+            HReg fr_dst = newVRegI(env);
+            HReg r_srcHi;
+            HReg r_srcLo;
+            HReg tmp    = newVRegF(env);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+
+            iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Unop.arg,
+                           IEndianess);
+            addInstr(env, PPCInstr_ExtractExpD128(Pfp_DXEXQ, tmp,
+                                                  r_srcHi, r_srcLo));
+
+            sub_from_sp( env, 16 );
+            addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, tmp, zero_r1));
+            addInstr(env, PPCInstr_Load(8, fr_dst, zero_r1, env->mode64));
+            add_to_sp( env, 16 );
+            return fr_dst;
+         }
+         default: 
+            break;
+      }
+
+      break;
+   }
+
+   /* --------- GET --------- */
+   case Iex_Get: {
+      if (ty == Ity_I8  || ty == Ity_I16 ||
+          ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
+         HReg r_dst = newVRegI(env);
+         PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+                                          GuestStatePtr(mode64) );
+         addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)), 
+                                      r_dst, am_addr, mode64 ));
+         return r_dst;
+      }
+      break;
+   }
+
+   case Iex_GetI: {
+      PPCAMode* src_am
+         = genGuestArrayOffset( env, e->Iex.GetI.descr,
+                                e->Iex.GetI.ix, e->Iex.GetI.bias,
+                                IEndianess );
+      HReg r_dst = newVRegI(env);
+      if (mode64 && ty == Ity_I64) {
+         addInstr(env, PPCInstr_Load( toUChar(8),
+                                      r_dst, src_am, mode64 ));
+         return r_dst;
+      }
+      if ((!mode64) && ty == Ity_I32) {
+         addInstr(env, PPCInstr_Load( toUChar(4),
+                                      r_dst, src_am, mode64 ));
+         return r_dst;
+      }
+      break;
+   }
+
+   /* --------- CCALL --------- */
+   case Iex_CCall: {
+      vassert(ty == e->Iex.CCall.retty); /* well-formedness of IR */
+
+      /* be very restrictive for now.  Only 32/64-bit ints allowed for
+         args, and 32 bits or host machine word for return type. */
+      if (!(ty == Ity_I32 || (mode64 && ty == Ity_I64)))
+         goto irreducible;
+
+      /* Marshal args, do the call, clear stack. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                    e->Iex.CCall.cee, e->Iex.CCall.retty, e->Iex.CCall.args,
+                    IEndianess );
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_Int);
+      vassert(addToSp == 0);
+
+      /* GPR3 now holds the destination address from Pin_Goto */
+      HReg r_dst = newVRegI(env);
+      addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
+      return r_dst;
+   }
+      
+   /* --------- LITERAL --------- */
+   /* 32/16/8-bit literals */
+   case Iex_Const: {
+      Long l;
+      HReg r_dst = newVRegI(env);
+      IRConst* con = e->Iex.Const.con;
+      switch (con->tag) {
+         case Ico_U64: if (!mode64) goto irreducible;
+                       l = (Long)            con->Ico.U64; break;
+         case Ico_U32: l = (Long)(Int)       con->Ico.U32; break;
+         case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break;
+         case Ico_U8:  l = (Long)(Int)(Char )con->Ico.U8;  break;
+         default:      vpanic("iselIntExpr_R.const(ppc)");
+      }
+      addInstr(env, PPCInstr_LI(r_dst, (ULong)l, mode64));
+      return r_dst;
+   }
+
+   /* --------- MULTIPLEX --------- */
+   case Iex_ITE: { // VFD
+      if ((ty == Ity_I8  || ty == Ity_I16 ||
+           ty == Ity_I32 || ((ty == Ity_I64) && mode64)) &&
+          typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+         PPCRI* r1    = iselWordExpr_RI(env, e->Iex.ITE.iftrue, IEndianess);
+         HReg   r0    = iselWordExpr_R(env, e->Iex.ITE.iffalse, IEndianess);
+         HReg   r_dst = newVRegI(env);
+         addInstr(env, mk_iMOVds_RR(r_dst,r0));
+         PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond, IEndianess);
+         addInstr(env, PPCInstr_CMov(cc, r_dst, r1));
+         return r_dst;
+      }
+      break;
+   }
+      
+   default: 
+      break;
+   } /* switch (e->tag) */
+
+
+   /* We get here if no pattern matched. */
+ irreducible:
+   ppIRExpr(e);
+   vpanic("iselIntExpr_R(ppc): cannot reduce tree");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expression auxiliaries              ---*/
+/*---------------------------------------------------------*/
+
+/* --------------------- AMODEs --------------------- */
+
+/* Return an AMode which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a word-size one.
+*/
+
+static Bool uInt_fits_in_16_bits ( UInt u ) 
+{
+   /* Is u the same as the sign-extend of its lower 16 bits? */
+   Int i = u & 0xFFFF;
+   i <<= 16;
+   i >>= 16;
+   return toBool(u == (UInt)i);
+}
+
+static Bool uLong_fits_in_16_bits ( ULong u ) 
+{
+   /* Is u the same as the sign-extend of its lower 16 bits? */
+   Long i = u & 0xFFFFULL;
+   i <<= 48;
+   i >>= 48;
+   return toBool(u == (ULong)i);
+}
+
+static Bool uLong_is_4_aligned ( ULong u )
+{
+   return toBool((u & 3ULL) == 0);
+}
+
+static Bool sane_AMode ( ISelEnv* env, PPCAMode* am )
+{
+   Bool mode64 = env->mode64;
+   switch (am->tag) {
+   case Pam_IR:
+      /* Using uInt_fits_in_16_bits in 64-bit mode seems a bit bogus,
+         somehow, but I think it's OK. */
+      return toBool( hregClass(am->Pam.IR.base) == HRcGPR(mode64) && 
+                     hregIsVirtual(am->Pam.IR.base) && 
+                     uInt_fits_in_16_bits(am->Pam.IR.index) );
+   case Pam_RR:
+      return toBool( hregClass(am->Pam.RR.base) == HRcGPR(mode64) && 
+                     hregIsVirtual(am->Pam.RR.base) &&
+                     hregClass(am->Pam.RR.index) == HRcGPR(mode64) &&
+                     hregIsVirtual(am->Pam.RR.index) );
+   default:
+      vpanic("sane_AMode: unknown ppc amode tag");
+   }
+}
+
+static 
+PPCAMode* iselWordExpr_AMode ( ISelEnv* env, IRExpr* e, IRType xferTy,
+                               IREndness IEndianess )
+{
+   PPCAMode* am = iselWordExpr_AMode_wrk(env, e, xferTy, IEndianess);
+   vassert(sane_AMode(env, am));
+   return am;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static PPCAMode* iselWordExpr_AMode_wrk ( ISelEnv* env, IRExpr* e,
+                                          IRType xferTy, IREndness IEndianess )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+
+   if (env->mode64) {
+
+      /* If the data load/store type is I32 or I64, this amode might
+         be destined for use in ld/ldu/lwa/st/stu.  In which case
+         insist that if it comes out as an _IR, the immediate must
+         have its bottom two bits be zero.  This does assume that for
+         any other type (I8/I16/I128/F32/F64/V128) the amode will not
+         be parked in any such instruction.  But that seems a
+         reasonable assumption.  */
+      Bool aligned4imm = toBool(xferTy == Ity_I32 || xferTy == Ity_I64);
+
+      vassert(ty == Ity_I64);
+   
+      /* Add64(expr,i), where i == sign-extend of (i & 0xFFFF) */
+      if (e->tag == Iex_Binop 
+          && e->Iex.Binop.op == Iop_Add64
+          && e->Iex.Binop.arg2->tag == Iex_Const
+          && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64
+          && (aligned4imm  ? uLong_is_4_aligned(e->Iex.Binop.arg2
+                                                 ->Iex.Const.con->Ico.U64)
+                           : True)
+          && uLong_fits_in_16_bits(e->Iex.Binop.arg2
+                                    ->Iex.Const.con->Ico.U64)) {
+         return PPCAMode_IR( (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U64,
+                             iselWordExpr_R(env, e->Iex.Binop.arg1,
+                                            IEndianess) );
+      }
+      
+      /* Add64(expr,expr) */
+      if (e->tag == Iex_Binop 
+          && e->Iex.Binop.op == Iop_Add64) {
+         HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg r_idx  = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         return PPCAMode_RR( r_idx, r_base );
+      }
+
+   } else {
+
+      vassert(ty == Ity_I32);
+   
+      /* Add32(expr,i), where i == sign-extend of (i & 0xFFFF) */
+      if (e->tag == Iex_Binop 
+          && e->Iex.Binop.op == Iop_Add32
+          && e->Iex.Binop.arg2->tag == Iex_Const
+          && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32
+          && uInt_fits_in_16_bits(e->Iex.Binop.arg2
+                                   ->Iex.Const.con->Ico.U32)) {
+         return PPCAMode_IR( (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U32,
+                             iselWordExpr_R(env, e->Iex.Binop.arg1,
+                                            IEndianess) );
+      }
+      
+      /* Add32(expr,expr) */
+      if (e->tag == Iex_Binop 
+          && e->Iex.Binop.op == Iop_Add32) {
+         HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg r_idx  = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         return PPCAMode_RR( r_idx, r_base );
+      }
+
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   return PPCAMode_IR( 0, iselWordExpr_R(env,e,IEndianess) );
+}
+
+
+/* --------------------- RH --------------------- */
+
+/* Compute an I8/I16/I32 (and I64, in 64-bit mode) into a RH
+   (reg-or-halfword-immediate).  It's important to specify whether the
+   immediate is to be regarded as signed or not.  If yes, this will
+   never return -32768 as an immediate; this guaranteed that all
+   signed immediates that are return can have their sign inverted if
+   need be. */
+
+static PPCRH* iselWordExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e,
+                                IREndness IEndianess )
+{
+  PPCRH* ri = iselWordExpr_RH_wrk(env, syned, e, IEndianess);
+   /* sanity checks ... */
+   switch (ri->tag) {
+   case Prh_Imm:
+      vassert(ri->Prh.Imm.syned == syned);
+      if (syned)
+         vassert(ri->Prh.Imm.imm16 != 0x8000);
+      return ri;
+   case Prh_Reg:
+      vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64));
+      vassert(hregIsVirtual(ri->Prh.Reg.reg));
+      return ri;
+   default:
+      vpanic("iselIntExpr_RH: unknown ppc RH tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static PPCRH* iselWordExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e,
+                                    IREndness IEndianess )
+{
+   ULong u;
+   Long  l;
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I8  || ty == Ity_I16 ||
+           ty == Ity_I32 || ((ty == Ity_I64) && env->mode64));
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      IRConst* con = e->Iex.Const.con;
+      /* What value are we aiming to generate? */
+      switch (con->tag) {
+      /* Note: Not sign-extending - we carry 'syned' around */
+      case Ico_U64: vassert(env->mode64);
+                    u =              con->Ico.U64; break;
+      case Ico_U32: u = 0xFFFFFFFF & con->Ico.U32; break;
+      case Ico_U16: u = 0x0000FFFF & con->Ico.U16; break;
+      case Ico_U8:  u = 0x000000FF & con->Ico.U8; break;
+      default:      vpanic("iselIntExpr_RH.Iex_Const(ppch)");
+      }
+      l = (Long)u;
+      /* Now figure out if it's representable. */
+      if (!syned && u <= 65535) {
+         return PPCRH_Imm(False/*unsigned*/, toUShort(u & 0xFFFF));
+      }
+      if (syned && l >= -32767 && l <= 32767) {
+         return PPCRH_Imm(True/*signed*/, toUShort(u & 0xFFFF));
+      }
+      /* no luck; use the Slow Way. */
+   }
+
+   /* default case: calculate into a register and return that */
+   return PPCRH_Reg( iselWordExpr_R ( env, e, IEndianess ) );
+}
+
+
+/* --------------------- RIs --------------------- */
+
+/* Calculate an expression into an PPCRI operand.  As with
+   iselIntExpr_R, the expression can have type 32, 16 or 8 bits, or,
+   in 64-bit mode, 64 bits. */
+
+static PPCRI* iselWordExpr_RI ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   PPCRI* ri = iselWordExpr_RI_wrk(env, e, IEndianess);
+   /* sanity checks ... */
+   switch (ri->tag) {
+   case Pri_Imm:
+      return ri;
+   case Pri_Reg:
+      vassert(hregClass(ri->Pri.Reg) == HRcGPR(env->mode64));
+      vassert(hregIsVirtual(ri->Pri.Reg));
+      return ri;
+   default:
+      vpanic("iselIntExpr_RI: unknown ppc RI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static PPCRI* iselWordExpr_RI_wrk ( ISelEnv* env, IRExpr* e,
+                                    IREndness IEndianess )
+{
+   Long  l;
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I8  || ty == Ity_I16 ||
+           ty == Ity_I32 || ((ty == Ity_I64) && env->mode64));
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      IRConst* con = e->Iex.Const.con;
+      switch (con->tag) {
+      case Ico_U64: vassert(env->mode64);
+                    l = (Long)            con->Ico.U64; break;
+      case Ico_U32: l = (Long)(Int)       con->Ico.U32; break;
+      case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break;
+      case Ico_U8:  l = (Long)(Int)(Char )con->Ico.U8;  break;
+      default:      vpanic("iselIntExpr_RI.Iex_Const(ppch)");
+      }
+      return PPCRI_Imm((ULong)l);
+   }
+
+   /* default case: calculate into a register and return that */
+   return PPCRI_Reg( iselWordExpr_R ( env, e, IEndianess ) );
+}
+
+
+/* --------------------- RH5u --------------------- */
+
+/* Compute an I8 into a reg-or-5-bit-unsigned-immediate, the latter
+   being an immediate in the range 1 .. 31 inclusive.  Used for doing
+   shift amounts.  Only used in 32-bit mode. */
+
+static PPCRH* iselWordExpr_RH5u ( ISelEnv* env, IRExpr* e,
+                                  IREndness IEndianess )
+{
+   PPCRH* ri;
+   vassert(!env->mode64);
+   ri = iselWordExpr_RH5u_wrk(env, e, IEndianess);
+   /* sanity checks ... */
+   switch (ri->tag) {
+   case Prh_Imm:
+      vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 31);
+      vassert(!ri->Prh.Imm.syned);
+      return ri;
+   case Prh_Reg:
+      vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64));
+      vassert(hregIsVirtual(ri->Prh.Reg.reg));
+      return ri;
+   default:
+      vpanic("iselIntExpr_RH5u: unknown ppc RI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static PPCRH* iselWordExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e,
+                                      IREndness IEndianess )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const
+       && e->Iex.Const.con->tag == Ico_U8
+       && e->Iex.Const.con->Ico.U8 >= 1
+       && e->Iex.Const.con->Ico.U8 <= 31) {
+      return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8);
+   }
+
+   /* default case: calculate into a register and return that */
+   return PPCRH_Reg( iselWordExpr_R ( env, e, IEndianess ) );
+}
+
+
+/* --------------------- RH6u --------------------- */
+
+/* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter
+   being an immediate in the range 1 .. 63 inclusive.  Used for doing
+   shift amounts.  Only used in 64-bit mode. */
+
+static PPCRH* iselWordExpr_RH6u ( ISelEnv* env, IRExpr* e,
+                                  IREndness IEndianess )
+{
+   PPCRH* ri; 
+   vassert(env->mode64);
+   ri = iselWordExpr_RH6u_wrk(env, e, IEndianess);
+   /* sanity checks ... */
+   switch (ri->tag) {
+   case Prh_Imm:
+      vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 63);
+      vassert(!ri->Prh.Imm.syned);
+      return ri;
+   case Prh_Reg:
+      vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64));
+      vassert(hregIsVirtual(ri->Prh.Reg.reg));
+      return ri;
+   default:
+      vpanic("iselIntExpr_RH6u: unknown ppc64 RI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static PPCRH* iselWordExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e,
+                                      IREndness IEndianess )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const
+       && e->Iex.Const.con->tag == Ico_U8
+       && e->Iex.Const.con->Ico.U8 >= 1
+       && e->Iex.Const.con->Ico.U8 <= 63) {
+      return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8);
+   }
+
+   /* default case: calculate into a register and return that */
+   return PPCRH_Reg( iselWordExpr_R ( env, e, IEndianess ) );
+}
+
+
+/* --------------------- CONDCODE --------------------- */
+
+/* Generate code to evaluated a bit-typed expression, returning the
+   condition code which would correspond when the expression would
+   notionally have returned 1. */
+
+static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e,
+                                  IREndness IEndianess )
+{
+   /* Uh, there's nothing we can sanity check here, unfortunately. */
+   return iselCondCode_wrk(env,e, IEndianess);
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e,
+                                      IREndness IEndianess )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I1);
+
+   /* Constant 1:Bit */
+   if (e->tag == Iex_Const && e->Iex.Const.con->Ico.U1 == True) {
+      // Make a compare that will always be true:
+      HReg r_zero = newVRegI(env);
+      addInstr(env, PPCInstr_LI(r_zero, 0, env->mode64));
+      addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+                                 7/*cr*/, r_zero, PPCRH_Reg(r_zero)));
+      return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+   }
+
+   /* Not1(...) */
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
+      /* Generate code for the arg, and negate the test condition */
+      PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg, IEndianess);
+      cond.test = invertCondTest(cond.test);
+      return cond;
+   }
+
+   /* --- patterns rooted at: 32to1 or 64to1 --- */
+
+   /* 32to1, 64to1 */
+   if (e->tag == Iex_Unop &&
+       (e->Iex.Unop.op == Iop_32to1 || e->Iex.Unop.op == Iop_64to1)) {
+      HReg src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+      HReg tmp = newVRegI(env);
+      /* could do better, probably -- andi. */
+      addInstr(env, PPCInstr_Alu(Palu_AND, tmp,
+                                 src, PPCRH_Imm(False,1)));
+      addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+                                 7/*cr*/, tmp, PPCRH_Imm(False,1)));
+      return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+   }
+
+   /* --- patterns rooted at: CmpNEZ8 --- */
+
+   /* CmpNEZ8(x) */
+   /* Note this cloned as CmpNE8(x,0) below. */
+   /* could do better -- andi. */
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ8) {
+      HReg arg = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+      HReg tmp = newVRegI(env);
+      addInstr(env, PPCInstr_Alu(Palu_AND, tmp, arg,
+                                 PPCRH_Imm(False,0xFF)));
+      addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+                                 7/*cr*/, tmp, PPCRH_Imm(False,0)));
+      return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+   }
+
+   /* --- patterns rooted at: CmpNEZ32 --- */
+
+   /* CmpNEZ32(x) */
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_CmpNEZ32) {
+      HReg r1 = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+      addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+                                 7/*cr*/, r1, PPCRH_Imm(False,0)));
+      return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+   }
+
+   /* --- patterns rooted at: Cmp*32* --- */
+
+   /* Cmp*32*(x,y) */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ32
+           || e->Iex.Binop.op == Iop_CmpNE32
+           || e->Iex.Binop.op == Iop_CmpLT32S
+           || e->Iex.Binop.op == Iop_CmpLT32U
+           || e->Iex.Binop.op == Iop_CmpLE32S
+           || e->Iex.Binop.op == Iop_CmpLE32U)) {
+      Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S ||
+                    e->Iex.Binop.op == Iop_CmpLE32S);
+      HReg   r1  = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+      PPCRH* ri2 = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2, IEndianess);
+      addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/,
+                                 7/*cr*/, r1, ri2));
+
+      switch (e->Iex.Binop.op) {
+      case Iop_CmpEQ32:  return mk_PPCCondCode( Pct_TRUE,  Pcf_7EQ );
+      case Iop_CmpNE32:  return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+      case Iop_CmpLT32U: case Iop_CmpLT32S:
+         return mk_PPCCondCode( Pct_TRUE,  Pcf_7LT );
+      case Iop_CmpLE32U: case Iop_CmpLE32S:
+         return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
+      default: vpanic("iselCondCode(ppc): CmpXX32");
+      }
+   }
+
+   /* --- patterns rooted at: CmpNEZ64 --- */
+
+   /* CmpNEZ64 */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ64) {
+      if (!env->mode64) {
+         HReg hi, lo;
+         HReg tmp = newVRegI(env);
+         iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg, IEndianess );
+         addInstr(env, PPCInstr_Alu(Palu_OR, tmp, lo, PPCRH_Reg(hi)));
+         addInstr(env, PPCInstr_Cmp(False/*sign*/, True/*32bit cmp*/,
+                                    7/*cr*/, tmp,PPCRH_Imm(False,0)));
+         return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+      } else {  // mode64
+         HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Cmp(False/*sign*/, False/*64bit cmp*/,
+                                    7/*cr*/, r_src,PPCRH_Imm(False,0)));
+         return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+      }
+   }
+
+   /* --- patterns rooted at: Cmp*64* --- */
+
+   /* Cmp*64*(x,y) */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ64
+           || e->Iex.Binop.op == Iop_CmpNE64
+           || e->Iex.Binop.op == Iop_CmpLT64S
+           || e->Iex.Binop.op == Iop_CmpLT64U
+           || e->Iex.Binop.op == Iop_CmpLE64S
+           || e->Iex.Binop.op == Iop_CmpLE64U)) {
+      Bool   syned = (e->Iex.Binop.op == Iop_CmpLT64S ||
+                      e->Iex.Binop.op == Iop_CmpLE64S);
+      HReg    r1 = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+      PPCRH* ri2 = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2, IEndianess);
+      vassert(env->mode64);
+      addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/,
+                                 7/*cr*/, r1, ri2));
+
+      switch (e->Iex.Binop.op) {
+      case Iop_CmpEQ64:  return mk_PPCCondCode( Pct_TRUE,  Pcf_7EQ );
+      case Iop_CmpNE64:  return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+      case Iop_CmpLT64U: return mk_PPCCondCode( Pct_TRUE,  Pcf_7LT );
+      case Iop_CmpLE64U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT );
+      default: vpanic("iselCondCode(ppc): CmpXX64");
+      }
+   }
+
+   /* --- patterns rooted at: CmpNE8 --- */
+
+   /* CmpNE8(x,0) */
+   /* Note this is a direct copy of CmpNEZ8 above. */
+   /* could do better -- andi. */
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_CmpNE8
+       && isZeroU8(e->Iex.Binop.arg2)) {
+      HReg arg = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+      HReg tmp = newVRegI(env);
+      addInstr(env, PPCInstr_Alu(Palu_AND, tmp, arg,
+                                 PPCRH_Imm(False,0xFF)));
+      addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+                                 7/*cr*/, tmp, PPCRH_Imm(False,0)));
+      return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ );
+   }
+
+   /* var */
+   if (e->tag == Iex_RdTmp) {
+      HReg r_src      = lookupIRTemp(env, e->Iex.RdTmp.tmp);
+      HReg src_masked = newVRegI(env);
+      addInstr(env,
+               PPCInstr_Alu(Palu_AND, src_masked,
+                            r_src, PPCRH_Imm(False,1)));
+      addInstr(env,
+               PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/,
+                            7/*cr*/, src_masked, PPCRH_Imm(False,1)));
+      return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ );
+   }
+
+   vex_printf("iselCondCode(ppc): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselCondCode(ppc)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (128 bit)               ---*/
+/*---------------------------------------------------------*/
+
+/* 64-bit mode ONLY: compute a 128-bit value into a register pair,
+   which is returned as the first two parameters.  As with
+   iselWordExpr_R, these may be either real or virtual regs; in any
+   case they must not be changed by subsequent code emitted by the
+   caller.  */
+
+static void iselInt128Expr ( HReg* rHi, HReg* rLo,
+                             ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   vassert(env->mode64);
+   iselInt128Expr_wrk(rHi, rLo, env, e, IEndianess);
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcGPR(env->mode64));
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcGPR(env->mode64));
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo,
+                                 ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
+
+   /* read 128-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+      /* 64 x 64 -> 128 multiply */
+      case Iop_MullU64:
+      case Iop_MullS64: {
+         HReg     tLo     = newVRegI(env);
+         HReg     tHi     = newVRegI(env);
+         Bool     syned   = toBool(e->Iex.Binop.op == Iop_MullS64);
+         HReg     r_srcL  = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         HReg     r_srcR  = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, 
+                                     False/*lo64*/, False/*64bit mul*/,
+                                     tLo, r_srcL, r_srcR));
+         addInstr(env, PPCInstr_MulL(syned,
+                                     True/*hi64*/, False/*64bit mul*/,
+                                     tHi, r_srcL, r_srcR));
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+
+      /* 64HLto128(e1,e2) */
+      case Iop_64HLto128:
+         *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+         *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         return;
+      default: 
+         break;
+      }
+   } /* if (e->tag == Iex_Binop) */
+
+
+   /* --------- UNARY ops --------- */
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+      default:
+         break;
+      }
+   } /* if (e->tag == Iex_Unop) */
+
+   vex_printf("iselInt128Expr(ppc64): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselInt128Expr(ppc64)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64 bit)                ---*/
+/*---------------------------------------------------------*/
+
+/* 32-bit mode ONLY: compute a 128-bit value into a register quad */
+static void iselInt128Expr_to_32x4 ( HReg* rHi, HReg* rMedHi, HReg* rMedLo,
+                                     HReg* rLo, ISelEnv* env, IRExpr* e,
+                                     IREndness IEndianess )
+{
+   vassert(!env->mode64);
+   iselInt128Expr_to_32x4_wrk(rHi, rMedHi, rMedLo, rLo, env, e, IEndianess);
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcInt32);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rMedHi) == HRcInt32);
+   vassert(hregIsVirtual(*rMedHi));
+   vassert(hregClass(*rMedLo) == HRcInt32);
+   vassert(hregIsVirtual(*rMedLo));
+   vassert(hregClass(*rLo) == HRcInt32);
+   vassert(hregIsVirtual(*rLo));
+}
+
+static void iselInt128Expr_to_32x4_wrk ( HReg* rHi, HReg* rMedHi,
+                                         HReg* rMedLo, HReg* rLo,
+                                         ISelEnv* env, IRExpr* e,
+                                         IREndness IEndianess )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I128);
+
+   /* read 128-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempQuad( rHi, rMedHi, rMedLo, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+
+   if (e->tag == Iex_Binop) {
+
+      IROp op_binop = e->Iex.Binop.op;
+      switch (op_binop) {
+      case Iop_64HLto128:
+         iselInt64Expr(rHi, rMedHi, env, e->Iex.Binop.arg1, IEndianess);
+         iselInt64Expr(rMedLo, rLo, env, e->Iex.Binop.arg2, IEndianess);
+         return;
+      default:
+         vex_printf("iselInt128Expr_to_32x4_wrk: Binop case 0x%x not found\n",
+                    op_binop);
+         break;
+      }
+   } 
+
+   vex_printf("iselInt128Expr_to_32x4_wrk: e->tag 0x%x not found\n", e->tag);
+   return;
+}
+
+/* 32-bit mode ONLY: compute a 64-bit value into a register pair,
+   which is returned as the first two parameters.  As with
+   iselIntExpr_R, these may be either real or virtual regs; in any
+   case they must not be changed by subsequent code emitted by the
+   caller.  */
+
+static void iselInt64Expr ( HReg* rHi, HReg* rLo,
+                            ISelEnv* env, IRExpr* e,
+                            IREndness IEndianess )
+{
+   vassert(!env->mode64);
+   iselInt64Expr_wrk(rHi, rLo, env, e, IEndianess);
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcInt32);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcInt32);
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo,
+                                ISelEnv* env, IRExpr* e,
+                                IREndness IEndianess )
+{
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I64);
+
+   /* 64-bit load */
+   if (e->tag == Iex_Load && e->Iex.Load.end == IEndianess) {
+      HReg tLo    = newVRegI(env);
+      HReg tHi    = newVRegI(env);
+      HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr, IEndianess);
+      vassert(!env->mode64);
+      addInstr(env, PPCInstr_Load( 4/*byte-load*/,
+                                   tHi, PPCAMode_IR( 0, r_addr ), 
+                                   False/*32-bit insn please*/) );
+      addInstr(env, PPCInstr_Load( 4/*byte-load*/, 
+                                   tLo, PPCAMode_IR( 4, r_addr ), 
+                                   False/*32-bit insn please*/) );
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit literal */
+   if (e->tag == Iex_Const) {
+      ULong w64 = e->Iex.Const.con->Ico.U64;
+      UInt  wHi = ((UInt)(w64 >> 32)) & 0xFFFFFFFF;
+      UInt  wLo = ((UInt)w64) & 0xFFFFFFFF;
+      HReg  tLo = newVRegI(env);
+      HReg  tHi = newVRegI(env);
+      vassert(e->Iex.Const.con->tag == Ico_U64);
+      addInstr(env, PPCInstr_LI(tHi, (Long)(Int)wHi, False/*mode32*/));
+      addInstr(env, PPCInstr_LI(tLo, (Long)(Int)wLo, False/*mode32*/));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* read 64-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+
+   /* 64-bit GET */
+   if (e->tag == Iex_Get) {
+      PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+                                       GuestStatePtr(False/*mode32*/) );
+      PPCAMode* am_addr4 = advance4(env, am_addr);
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+      addInstr(env, PPCInstr_Load( 4, tHi, am_addr,  False/*mode32*/ ));
+      addInstr(env, PPCInstr_Load( 4, tLo, am_addr4, False/*mode32*/ ));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit ITE */
+   if (e->tag == Iex_ITE) { // VFD
+      HReg e0Lo, e0Hi, eXLo, eXHi;
+      iselInt64Expr(&eXHi, &eXLo, env, e->Iex.ITE.iftrue, IEndianess);
+      iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.ITE.iffalse, IEndianess);
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+      addInstr(env, mk_iMOVds_RR(tHi,e0Hi));
+      addInstr(env, mk_iMOVds_RR(tLo,e0Lo));
+      PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond, IEndianess);
+      addInstr(env, PPCInstr_CMov(cc,tHi,PPCRI_Reg(eXHi)));
+      addInstr(env, PPCInstr_CMov(cc,tLo,PPCRI_Reg(eXLo)));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      IROp op_binop = e->Iex.Binop.op;
+      switch (op_binop) {
+         /* 32 x 32 -> 64 multiply */
+         case Iop_MullU32:
+         case Iop_MullS32: {
+            HReg     tLo     = newVRegI(env);
+            HReg     tHi     = newVRegI(env);
+            Bool     syned   = toBool(op_binop == Iop_MullS32);
+            HReg     r_srcL  = iselWordExpr_R(env, e->Iex.Binop.arg1,
+                                              IEndianess);
+            HReg     r_srcR  = iselWordExpr_R(env, e->Iex.Binop.arg2,
+                                              IEndianess);
+            addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, 
+                                        False/*lo32*/, True/*32bit mul*/,
+                                        tLo, r_srcL, r_srcR));
+            addInstr(env, PPCInstr_MulL(syned,
+                                        True/*hi32*/, True/*32bit mul*/,
+                                        tHi, r_srcL, r_srcR));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* Or64/And64/Xor64 */
+         case Iop_Or64:
+         case Iop_And64:
+         case Iop_Xor64: {
+            HReg xLo, xHi, yLo, yHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            PPCAluOp op = (op_binop == Iop_Or64) ? Palu_OR :
+                          (op_binop == Iop_And64) ? Palu_AND : Palu_XOR;
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1, IEndianess);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2, IEndianess);
+            addInstr(env, PPCInstr_Alu(op, tHi, xHi, PPCRH_Reg(yHi)));
+            addInstr(env, PPCInstr_Alu(op, tLo, xLo, PPCRH_Reg(yLo)));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* Add64 */
+         case Iop_Add64: {
+            HReg xLo, xHi, yLo, yHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1, IEndianess);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2, IEndianess);
+            addInstr(env, PPCInstr_AddSubC( True/*add*/, True /*set carry*/,
+                                            tLo, xLo, yLo));
+            addInstr(env, PPCInstr_AddSubC( True/*add*/, False/*read carry*/,
+                                            tHi, xHi, yHi));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 32HLto64(e1,e2) */
+         case Iop_32HLto64:
+            *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+            *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+            return;
+
+         /* F64toI64[S|U] */
+         case Iop_F64toI64S: case Iop_F64toI64U: {
+            HReg      tLo     = newVRegI(env);
+            HReg      tHi     = newVRegI(env);
+            HReg      r1      = StackFramePtr(env->mode64);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+            PPCAMode* four_r1 = PPCAMode_IR( 4, r1 );
+            HReg      fsrc    = iselDblExpr(env, e->Iex.Binop.arg2,
+                                            IEndianess);
+            HReg      ftmp    = newVRegF(env);
+
+            vassert(!env->mode64);
+            /* Set host rounding mode */
+            set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+            sub_from_sp( env, 16 );
+            addInstr(env, PPCInstr_FpCftI(False/*F->I*/, False/*int64*/,
+                                          (op_binop == Iop_F64toI64S) ? True : False,
+                                          True, ftmp, fsrc));
+            addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, ftmp, zero_r1));
+            addInstr(env, PPCInstr_Load(4, tHi, zero_r1, False/*mode32*/));
+            addInstr(env, PPCInstr_Load(4, tLo, four_r1, False/*mode32*/));
+            add_to_sp( env, 16 );
+
+            ///* Restore default FPU rounding. */
+            //set_FPU_rounding_default( env );
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+         case Iop_D64toI64S: {
+            HReg      tLo     = newVRegI(env);
+            HReg      tHi     = newVRegI(env);
+            HReg      r1      = StackFramePtr(env->mode64);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+            PPCAMode* four_r1 = PPCAMode_IR( 4, r1 );
+            HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2, IEndianess);
+            HReg tmp    = newVRegF(env);
+
+            vassert(!env->mode64);
+            set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+            addInstr(env, PPCInstr_Dfp64Unary(Pfp_DCTFIX, tmp, fr_src));
+
+            sub_from_sp( env, 16 );
+            addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, tmp, zero_r1));
+            addInstr(env, PPCInstr_Load(4, tHi, zero_r1, False/*mode32*/));
+            addInstr(env, PPCInstr_Load(4, tLo, four_r1, False/*mode32*/));
+            add_to_sp( env, 16 );
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+         case Iop_D128toI64S: {
+            PPCFpOp fpop = Pfp_DCTFIXQ;
+            HReg r_srcHi = newVRegF(env);
+            HReg r_srcLo = newVRegF(env);
+            HReg tLo     = newVRegI(env);
+            HReg tHi     = newVRegI(env);
+            HReg ftmp    = newVRegF(env);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+            PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+            set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+            iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2,
+                           IEndianess);
+            addInstr(env, PPCInstr_DfpD128toD64(fpop, ftmp, r_srcHi, r_srcLo));
+
+            // put the D64 result into an integer register pair
+            sub_from_sp( env, 16 );
+            addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, ftmp, zero_r1));
+            addInstr(env, PPCInstr_Load(4, tHi, zero_r1, False/*mode32*/));
+            addInstr(env, PPCInstr_Load(4, tLo, four_r1, False/*mode32*/));
+            add_to_sp( env, 16 );
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+         default: 
+            break;
+      }
+   } /* if (e->tag == Iex_Binop) */
+
+
+   /* --------- UNARY ops --------- */
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+
+      /* CmpwNEZ64(e) */
+      case Iop_CmpwNEZ64: {
+         HReg argHi, argLo;
+         HReg tmp1  = newVRegI(env);
+         HReg tmp2  = newVRegI(env);
+         iselInt64Expr(&argHi, &argLo, env, e->Iex.Unop.arg, IEndianess);
+         /* tmp1 = argHi | argLo */
+         addInstr(env, PPCInstr_Alu(Palu_OR, tmp1, argHi, PPCRH_Reg(argLo)));
+         /* tmp2 = (tmp1 | -tmp1) >>s 31 */
+         addInstr(env, PPCInstr_Unary(Pun_NEG,tmp2,tmp1));
+         addInstr(env, PPCInstr_Alu(Palu_OR, tmp2, tmp2, PPCRH_Reg(tmp1)));
+         addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 
+                                     tmp2, tmp2, PPCRH_Imm(False, 31)));
+         *rHi = tmp2;
+         *rLo = tmp2; /* yes, really tmp2 */
+         return;
+      }
+
+      /* Left64 */
+      case Iop_Left64: {
+         HReg argHi, argLo;
+         HReg zero32 = newVRegI(env);
+         HReg resHi  = newVRegI(env);
+         HReg resLo  = newVRegI(env);
+         iselInt64Expr(&argHi, &argLo, env, e->Iex.Unop.arg, IEndianess);
+         vassert(env->mode64 == False);
+         addInstr(env, PPCInstr_LI(zero32, 0, env->mode64));
+         /* resHi:resLo = - argHi:argLo */
+         addInstr(env, PPCInstr_AddSubC( False/*sub*/, True/*set carry*/,
+                                         resLo, zero32, argLo ));
+         addInstr(env, PPCInstr_AddSubC( False/*sub*/, False/*read carry*/,
+                                         resHi, zero32, argHi ));
+         /* resHi:resLo |= srcHi:srcLo */
+         addInstr(env, PPCInstr_Alu(Palu_OR, resLo, resLo, PPCRH_Reg(argLo)));
+         addInstr(env, PPCInstr_Alu(Palu_OR, resHi, resHi, PPCRH_Reg(argHi)));
+         *rHi = resHi;
+         *rLo = resLo;
+         return;
+      }
+
+      /* 32Sto64(e) */
+      case Iop_32Sto64: {
+         HReg tHi = newVRegI(env);
+         HReg src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+                                     tHi, src, PPCRH_Imm(False,31)));
+         *rHi = tHi;
+         *rLo = src;
+         return;
+      }
+      case Iop_ExtractExpD64: {
+         HReg tmp    = newVRegF(env);
+         HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg, IEndianess);
+         HReg      tLo     = newVRegI(env);
+         HReg      tHi     = newVRegI(env);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+         PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+         addInstr(env, PPCInstr_Dfp64Unary(Pfp_DXEX, tmp, fr_src));
+
+         // put the D64 result into a integer register pair
+         sub_from_sp( env, 16 );
+         addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, tmp, zero_r1));
+         addInstr(env, PPCInstr_Load(4, tHi, zero_r1, False/*mode32*/));
+         addInstr(env, PPCInstr_Load(4, tLo, four_r1, False/*mode32*/));
+         add_to_sp( env, 16 );
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+      case Iop_ExtractExpD128: {
+         HReg      r_srcHi;
+         HReg      r_srcLo;
+         HReg      tmp     = newVRegF(env);
+         HReg      tLo     = newVRegI(env);
+         HReg      tHi     = newVRegI(env);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+         PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_ExtractExpD128(Pfp_DXEXQ, tmp,
+                                                  r_srcHi, r_srcLo));
+
+         // put the D64 result into a integer register pair
+         sub_from_sp( env, 16 );
+         addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, tmp, zero_r1));
+         addInstr(env, PPCInstr_Load(4, tHi, zero_r1, False/*mode32*/));
+         addInstr(env, PPCInstr_Load(4, tLo, four_r1, False/*mode32*/));
+         add_to_sp( env, 16 );
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+
+      /* 32Uto64(e) */
+      case Iop_32Uto64: {
+         HReg tHi = newVRegI(env);
+         HReg tLo = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_LI(tHi, 0, False/*mode32*/));
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+
+      case Iop_128to64: {
+         /* Narrow, return the low 64-bit half as a 32-bit
+          * register pair */
+         HReg r_Hi    = INVALID_HREG;
+         HReg r_MedHi = INVALID_HREG;
+         HReg r_MedLo = INVALID_HREG;
+         HReg r_Lo    = INVALID_HREG;
+
+         iselInt128Expr_to_32x4(&r_Hi, &r_MedHi, &r_MedLo, &r_Lo,
+                                env, e->Iex.Unop.arg, IEndianess);
+         *rHi = r_MedLo;
+         *rLo = r_Lo;
+         return;
+      }
+
+      case Iop_128HIto64: {
+         /* Narrow, return the high 64-bit half as a 32-bit
+          *  register pair */
+         HReg r_Hi    = INVALID_HREG;
+         HReg r_MedHi = INVALID_HREG;
+         HReg r_MedLo = INVALID_HREG;
+         HReg r_Lo    = INVALID_HREG;
+
+         iselInt128Expr_to_32x4(&r_Hi, &r_MedHi, &r_MedLo, &r_Lo,
+                                env, e->Iex.Unop.arg, IEndianess);
+         *rHi = r_Hi;
+         *rLo = r_MedHi;
+         return;
+      }
+
+      /* V128{HI}to64 */
+      case Iop_V128HIto64:
+      case Iop_V128to64: {
+         HReg r_aligned16;
+         Int  off = e->Iex.Unop.op==Iop_V128HIto64 ? 0 : 8;
+         HReg tLo = newVRegI(env);
+         HReg tHi = newVRegI(env);
+         HReg vec = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         PPCAMode *am_off0, *am_offLO, *am_offHI;
+         sub_from_sp( env, 32 );     // Move SP down 32 bytes
+         
+         // get a quadword aligned address within our stack space
+         r_aligned16 = get_sp_aligned16( env );
+         am_off0  = PPCAMode_IR( 0,     r_aligned16 );
+         am_offHI = PPCAMode_IR( off,   r_aligned16 );
+         am_offLO = PPCAMode_IR( off+4, r_aligned16 );
+         
+         // store as Vec128
+         addInstr(env,
+                  PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 ));
+         
+         // load hi,lo words (of hi/lo half of vec) as Ity_I32's
+         addInstr(env,
+                  PPCInstr_Load( 4, tHi, am_offHI, False/*mode32*/ ));
+         addInstr(env,
+                  PPCInstr_Load( 4, tLo, am_offLO, False/*mode32*/ ));
+         
+         add_to_sp( env, 32 );       // Reset SP
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+
+      /* could do better than this, but for now ... */
+      case Iop_1Sto64: {
+         HReg tLo = newVRegI(env);
+         HReg tHi = newVRegI(env);
+         PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Set(cond,tLo));
+         addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/,
+                                     tLo, tLo, PPCRH_Imm(False,31)));
+         addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/,
+                                     tLo, tLo, PPCRH_Imm(False,31)));
+         addInstr(env, mk_iMOVds_RR(tHi, tLo));
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+
+      case Iop_Not64: {
+         HReg xLo, xHi;
+         HReg tmpLo = newVRegI(env);
+         HReg tmpHi = newVRegI(env);
+         iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Unary(Pun_NOT,tmpLo,xLo));
+         addInstr(env, PPCInstr_Unary(Pun_NOT,tmpHi,xHi));
+         *rHi = tmpHi;
+         *rLo = tmpLo;
+         return;
+      }
+
+      /* ReinterpF64asI64(e) */
+      /* Given an IEEE754 double, produce an I64 with the same bit
+         pattern. */
+      case Iop_ReinterpF64asI64: {
+         PPCAMode *am_addr0, *am_addr1;
+         HReg fr_src  = iselDblExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg r_dstLo = newVRegI(env);
+         HReg r_dstHi = newVRegI(env);
+         
+         sub_from_sp( env, 16 );     // Move SP down 16 bytes
+         am_addr0 = PPCAMode_IR( 0, StackFramePtr(False/*mode32*/) );
+         am_addr1 = PPCAMode_IR( 4, StackFramePtr(False/*mode32*/) );
+
+         // store as F64
+         addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+                                        fr_src, am_addr0 ));
+         
+         // load hi,lo as Ity_I32's
+         addInstr(env, PPCInstr_Load( 4, r_dstHi,
+                                      am_addr0, False/*mode32*/ ));
+         addInstr(env, PPCInstr_Load( 4, r_dstLo,
+                                      am_addr1, False/*mode32*/ ));
+         *rHi = r_dstHi;
+         *rLo = r_dstLo;
+         
+         add_to_sp( env, 16 );       // Reset SP
+         return;
+      }
+
+      case Iop_ReinterpD64asI64: {
+         HReg fr_src  = iselDfp64Expr(env, e->Iex.Unop.arg, IEndianess);
+         PPCAMode *am_addr0, *am_addr1;
+         HReg r_dstLo = newVRegI(env);
+         HReg r_dstHi = newVRegI(env);
+
+
+         sub_from_sp( env, 16 );     // Move SP down 16 bytes
+         am_addr0 = PPCAMode_IR( 0, StackFramePtr(False/*mode32*/) );
+         am_addr1 = PPCAMode_IR( 4, StackFramePtr(False/*mode32*/) );
+
+         // store as D64
+         addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+                                        fr_src, am_addr0 ));
+
+         // load hi,lo as Ity_I32's
+         addInstr(env, PPCInstr_Load( 4, r_dstHi,
+                                      am_addr0, False/*mode32*/ ));
+         addInstr(env, PPCInstr_Load( 4, r_dstLo,
+                                      am_addr1, False/*mode32*/ ));
+         *rHi = r_dstHi;
+         *rLo = r_dstLo;
+
+         add_to_sp( env, 16 );       // Reset SP
+
+         return;
+      }
+
+      case Iop_BCDtoDPB: {
+         PPCCondCode cc;
+         UInt        argiregs;
+         HReg        argregs[2];
+         Int         argreg;
+         HReg        tLo = newVRegI(env);
+         HReg        tHi = newVRegI(env);
+         HReg        tmpHi;
+         HReg        tmpLo;
+         Bool        mode64 = env->mode64;
+
+         argregs[0] = hregPPC_GPR3(mode64);
+         argregs[1] = hregPPC_GPR4(mode64);
+
+         argiregs = 0;
+         argreg = 0;
+
+         iselInt64Expr( &tmpHi, &tmpLo, env, e->Iex.Unop.arg, IEndianess );
+
+         argiregs |= ( 1 << (argreg+3 ) );
+         addInstr( env, mk_iMOVds_RR( argregs[argreg++], tmpHi ) );
+
+         argiregs |= ( 1 << (argreg+3 ) );
+         addInstr( env, mk_iMOVds_RR( argregs[argreg], tmpLo ) );
+
+         cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
+
+         if (IEndianess == Iend_LE) {
+             addInstr( env, PPCInstr_Call( cc, (Addr)h_calc_BCDtoDPB,
+                                           argiregs,
+                                           mk_RetLoc_simple(RLPri_2Int) ) );
+         } else {
+             Addr64 target;
+             target = mode64 ? (Addr)h_calc_BCDtoDPB :
+               toUInt( (Addr)h_calc_BCDtoDPB );
+             addInstr( env, PPCInstr_Call( cc, target,
+                                           argiregs,
+                                           mk_RetLoc_simple(RLPri_2Int) ) );
+         }
+
+         addInstr( env, mk_iMOVds_RR( tHi, argregs[argreg-1] ) );
+         addInstr( env, mk_iMOVds_RR( tLo, argregs[argreg] ) );
+
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+
+      case Iop_DPBtoBCD: {
+         PPCCondCode cc;
+         UInt        argiregs;
+         HReg        argregs[2];
+         Int         argreg;
+         HReg        tLo = newVRegI(env);
+         HReg        tHi = newVRegI(env);
+         HReg        tmpHi;
+         HReg        tmpLo;
+         Bool        mode64 = env->mode64;
+
+         argregs[0] = hregPPC_GPR3(mode64);
+         argregs[1] = hregPPC_GPR4(mode64);
+
+         argiregs = 0;
+         argreg = 0;
+
+         iselInt64Expr(&tmpHi, &tmpLo, env, e->Iex.Unop.arg, IEndianess);
+
+         argiregs |= (1 << (argreg+3));
+         addInstr(env, mk_iMOVds_RR( argregs[argreg++], tmpHi ));
+
+         argiregs |= (1 << (argreg+3));
+         addInstr(env, mk_iMOVds_RR( argregs[argreg], tmpLo));
+
+         cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
+
+         if (IEndianess == Iend_LE) {
+             addInstr(env, PPCInstr_Call( cc, (Addr)h_calc_DPBtoBCD,
+                                          argiregs,
+                                          mk_RetLoc_simple(RLPri_2Int) ) );
+         } else {
+             Addr64 target;
+             target = mode64 ? (Addr)h_calc_DPBtoBCD :
+               toUInt( (Addr)h_calc_DPBtoBCD );
+             addInstr(env, PPCInstr_Call( cc, target, argiregs,
+                                          mk_RetLoc_simple(RLPri_2Int) ) );
+         }
+
+         addInstr(env, mk_iMOVds_RR(tHi, argregs[argreg-1]));
+         addInstr(env, mk_iMOVds_RR(tLo, argregs[argreg]));
+
+         *rHi = tHi;
+         *rLo = tLo;
+         return;
+      }
+
+      default:
+         break;
+      }
+   } /* if (e->tag == Iex_Unop) */
+
+   vex_printf("iselInt64Expr(ppc): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselInt64Expr(ppc)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (32 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Nothing interesting here; really just wrappers for
+   64-bit stuff. */
+
+static HReg iselFltExpr ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+  HReg r = iselFltExpr_wrk( env, e, IEndianess );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64); /* yes, really Flt64 */
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   Bool        mode64 = env->mode64;
+
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_F32);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == IEndianess) {
+      PPCAMode* am_addr;
+      HReg r_dst = newVRegF(env);
+      vassert(e->Iex.Load.ty == Ity_F32);
+      am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_F32/*xfer*/,
+                                   IEndianess);
+      addInstr(env, PPCInstr_FpLdSt(True/*load*/, 4, r_dst, am_addr));
+      return r_dst;
+   }
+
+   if (e->tag == Iex_Get) {
+      HReg r_dst = newVRegF(env);
+      PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+                                       GuestStatePtr(env->mode64) );
+      addInstr(env, PPCInstr_FpLdSt( True/*load*/, 4, r_dst, am_addr ));
+      return r_dst;
+   }
+
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_TruncF64asF32) {
+      /* This is quite subtle.  The only way to do the relevant
+         truncation is to do a single-precision store and then a
+         double precision load to get it back into a register.  The
+         problem is, if the data is then written to memory a second
+         time, as in
+
+            STbe(...) = TruncF64asF32(...)
+
+         then will the second truncation further alter the value?  The
+         answer is no: flds (as generated here) followed by fsts
+         (generated for the STbe) is the identity function on 32-bit
+         floats, so we are safe.
+
+         Another upshot of this is that if iselStmt can see the
+         entirety of
+
+            STbe(...) = TruncF64asF32(arg)
+
+         then it can short circuit having to deal with TruncF64asF32
+         individually; instead just compute arg into a 64-bit FP
+         register and do 'fsts' (since that itself does the
+         truncation).
+
+         We generate pretty poor code here (should be ok both for
+         32-bit and 64-bit mode); but it is expected that for the most
+         part the latter optimisation will apply and hence this code
+         will not often be used.
+      */
+      HReg      fsrc    = iselDblExpr(env, e->Iex.Unop.arg, IEndianess);
+      HReg      fdst    = newVRegF(env);
+      PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+
+      sub_from_sp( env, 16 );
+      // store as F32, hence truncating
+      addInstr(env, PPCInstr_FpLdSt( False/*store*/, 4,
+                                     fsrc, zero_r1 ));
+      // and reload.  Good huh?! (sigh)
+      addInstr(env, PPCInstr_FpLdSt( True/*load*/, 4,
+                                     fdst, zero_r1 ));
+      add_to_sp( env, 16 );
+      return fdst;
+   }
+
+   if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_I64UtoF32) {
+      if (mode64) {
+         HReg fdst = newVRegF(env);
+         HReg isrc = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+         HReg r1   = StackFramePtr(env->mode64);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+
+         /* Set host rounding mode */
+         set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+         sub_from_sp( env, 16 );
+
+         addInstr(env, PPCInstr_Store(8, zero_r1, isrc, True/*mode64*/));
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1));
+         addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 
+                                       False, False,
+                                       fdst, fdst));
+
+         add_to_sp( env, 16 );
+
+         ///* Restore default FPU rounding. */
+         //set_FPU_rounding_default( env );
+         return fdst;
+      } else {
+         /* 32-bit mode */
+         HReg fdst = newVRegF(env);
+         HReg isrcHi, isrcLo;
+         HReg r1   = StackFramePtr(env->mode64);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+         PPCAMode* four_r1 = PPCAMode_IR( 4, r1 );
+
+         iselInt64Expr(&isrcHi, &isrcLo, env, e->Iex.Binop.arg2, IEndianess);
+
+         /* Set host rounding mode */
+         set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+         sub_from_sp( env, 16 );
+
+         addInstr(env, PPCInstr_Store(4, zero_r1, isrcHi, False/*mode32*/));
+         addInstr(env, PPCInstr_Store(4, four_r1, isrcLo, False/*mode32*/));
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1));
+         addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 
+                                       False, False,
+                                       fdst, fdst));
+
+         add_to_sp( env, 16 );
+
+         ///* Restore default FPU rounding. */
+         //set_FPU_rounding_default( env );
+         return fdst;
+      }
+
+   }
+
+   vex_printf("iselFltExpr(ppc): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselFltExpr_wrk(ppc)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (64 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 64-bit floating point value into a register, the identity
+   of which is returned.  As with iselIntExpr_R, the reg may be either
+   real or virtual; in any case it must not be changed by subsequent
+   code emitted by the caller.  */
+
+/* IEEE 754 formats.  From http://www.freesoft.org/CIE/RFC/1832/32.htm:
+
+    Type                  S (1 bit)   E (11 bits)   F (52 bits)
+    ----                  ---------   -----------   -----------
+    signalling NaN        u           2047 (max)    .0uuuuu---u
+                                                    (with at least
+                                                     one 1 bit)
+    quiet NaN             u           2047 (max)    .1uuuuu---u
+
+    negative infinity     1           2047 (max)    .000000---0
+
+    positive infinity     0           2047 (max)    .000000---0
+
+    negative zero         1           0             .000000---0
+
+    positive zero         0           0             .000000---0
+*/
+
+static HReg iselDblExpr ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   HReg r = iselDblExpr_wrk( env, e, IEndianess );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   Bool mode64 = env->mode64;
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F64);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   /* --------- LITERAL --------- */
+   if (e->tag == Iex_Const) {
+      union { UInt u32x2[2]; ULong u64; Double f64; } u;
+      vassert(sizeof(u) == 8);
+      vassert(sizeof(u.u64) == 8);
+      vassert(sizeof(u.f64) == 8);
+      vassert(sizeof(u.u32x2) == 8);
+
+      if (e->Iex.Const.con->tag == Ico_F64) {
+         u.f64 = e->Iex.Const.con->Ico.F64;
+      }
+      else if (e->Iex.Const.con->tag == Ico_F64i) {
+         u.u64 = e->Iex.Const.con->Ico.F64i;
+      }
+      else
+         vpanic("iselDblExpr(ppc): const");
+
+      if (!mode64) {
+         HReg r_srcHi = newVRegI(env);
+         HReg r_srcLo = newVRegI(env);
+         addInstr(env, PPCInstr_LI(r_srcHi, u.u32x2[0], mode64));
+         addInstr(env, PPCInstr_LI(r_srcLo, u.u32x2[1], mode64));
+         return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
+      } else { // mode64
+         HReg r_src = newVRegI(env);
+         addInstr(env, PPCInstr_LI(r_src, u.u64, mode64));
+         return mk_LoadR64toFPR( env, r_src );         // 1*I64 -> F64
+      }
+   }
+
+   /* --------- LOAD --------- */
+   if (e->tag == Iex_Load && e->Iex.Load.end == IEndianess) {
+      HReg r_dst = newVRegF(env);
+      PPCAMode* am_addr;
+      vassert(e->Iex.Load.ty == Ity_F64);
+      am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_F64/*xfer*/,
+                                   IEndianess);
+      addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_dst, am_addr));
+      return r_dst;
+   }
+
+   /* --------- GET --------- */
+   if (e->tag == Iex_Get) {
+      HReg r_dst = newVRegF(env);
+      PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+                                       GuestStatePtr(mode64) );
+      addInstr(env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, am_addr ));
+      return r_dst;
+   }
+
+   /* --------- OPS --------- */
+   if (e->tag == Iex_Qop) {
+      PPCFpOp fpop = Pfp_INVALID;
+      switch (e->Iex.Qop.details->op) {
+         case Iop_MAddF64:    fpop = Pfp_MADDD; break;
+         case Iop_MAddF64r32: fpop = Pfp_MADDS; break;
+         case Iop_MSubF64:    fpop = Pfp_MSUBD; break;
+         case Iop_MSubF64r32: fpop = Pfp_MSUBS; break;
+         default: break;
+      }
+      if (fpop != Pfp_INVALID) {
+         HReg r_dst  = newVRegF(env);
+         HReg r_srcML  = iselDblExpr(env, e->Iex.Qop.details->arg2,
+                                     IEndianess);
+         HReg r_srcMR  = iselDblExpr(env, e->Iex.Qop.details->arg3,
+                                     IEndianess);
+         HReg r_srcAcc = iselDblExpr(env, e->Iex.Qop.details->arg4,
+                                     IEndianess);
+         set_FPU_rounding_mode( env, e->Iex.Qop.details->arg1, IEndianess );
+         addInstr(env, PPCInstr_FpMulAcc(fpop, r_dst, 
+                                               r_srcML, r_srcMR, r_srcAcc));
+         return r_dst;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+      PPCFpOp fpop = Pfp_INVALID;
+      switch (triop->op) {
+         case Iop_AddF64:    fpop = Pfp_ADDD; break;
+         case Iop_SubF64:    fpop = Pfp_SUBD; break;
+         case Iop_MulF64:    fpop = Pfp_MULD; break;
+         case Iop_DivF64:    fpop = Pfp_DIVD; break;
+         case Iop_AddF64r32: fpop = Pfp_ADDS; break;
+         case Iop_SubF64r32: fpop = Pfp_SUBS; break;
+         case Iop_MulF64r32: fpop = Pfp_MULS; break;
+         case Iop_DivF64r32: fpop = Pfp_DIVS; break;
+         default: break;
+      }
+      if (fpop != Pfp_INVALID) {
+         HReg r_dst  = newVRegF(env);
+         HReg r_srcL = iselDblExpr(env, triop->arg2, IEndianess);
+         HReg r_srcR = iselDblExpr(env, triop->arg3, IEndianess);
+         set_FPU_rounding_mode( env, triop->arg1, IEndianess );
+         addInstr(env, PPCInstr_FpBinary(fpop, r_dst, r_srcL, r_srcR));
+         return r_dst;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      PPCFpOp fpop = Pfp_INVALID;
+      switch (e->Iex.Binop.op) {
+      case Iop_SqrtF64:   fpop = Pfp_SQRT;   break;
+      default: break;
+      }
+      if (fpop == Pfp_SQRT) {
+         HReg fr_dst = newVRegF(env);
+         HReg fr_src = iselDblExpr(env, e->Iex.Binop.arg2, IEndianess);
+         set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src));
+         return fr_dst;
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+
+      if (e->Iex.Binop.op == Iop_RoundF64toF32) {
+         HReg r_dst = newVRegF(env);
+         HReg r_src = iselDblExpr(env, e->Iex.Binop.arg2, IEndianess);
+         set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         addInstr(env, PPCInstr_FpRSP(r_dst, r_src));
+         //set_FPU_rounding_default( env );
+         return r_dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_I64StoF64 || e->Iex.Binop.op == Iop_I64UtoF64) {
+         if (mode64) {
+            HReg fdst = newVRegF(env);
+            HReg isrc = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+            HReg r1   = StackFramePtr(env->mode64);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+
+            /* Set host rounding mode */
+            set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+            sub_from_sp( env, 16 );
+
+            addInstr(env, PPCInstr_Store(8, zero_r1, isrc, True/*mode64*/));
+            addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1));
+            addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 
+                                          e->Iex.Binop.op == Iop_I64StoF64,
+                                          True/*fdst is 64 bit*/,
+                                          fdst, fdst));
+
+            add_to_sp( env, 16 );
+
+            ///* Restore default FPU rounding. */
+            //set_FPU_rounding_default( env );
+            return fdst;
+         } else {
+            /* 32-bit mode */
+            HReg fdst = newVRegF(env);
+            HReg isrcHi, isrcLo;
+            HReg r1   = StackFramePtr(env->mode64);
+            PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 );
+            PPCAMode* four_r1 = PPCAMode_IR( 4, r1 );
+
+            iselInt64Expr(&isrcHi, &isrcLo, env, e->Iex.Binop.arg2,
+                          IEndianess);
+
+            /* Set host rounding mode */
+            set_FPU_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+
+            sub_from_sp( env, 16 );
+
+            addInstr(env, PPCInstr_Store(4, zero_r1, isrcHi, False/*mode32*/));
+            addInstr(env, PPCInstr_Store(4, four_r1, isrcLo, False/*mode32*/));
+            addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1));
+            addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 
+                                          e->Iex.Binop.op == Iop_I64StoF64,
+                                          True/*fdst is 64 bit*/,
+                                          fdst, fdst));
+
+            add_to_sp( env, 16 );
+
+            ///* Restore default FPU rounding. */
+            //set_FPU_rounding_default( env );
+            return fdst;
+         }
+      }
+
+   }
+
+   if (e->tag == Iex_Unop) {
+      PPCFpOp fpop = Pfp_INVALID;
+      switch (e->Iex.Unop.op) {
+         case Iop_NegF64:     fpop = Pfp_NEG; break;
+         case Iop_AbsF64:     fpop = Pfp_ABS; break;
+         case Iop_RSqrtEst5GoodF64:      fpop = Pfp_RSQRTE; break;
+         case Iop_RoundF64toF64_NegINF:  fpop = Pfp_FRIM; break;
+         case Iop_RoundF64toF64_PosINF:  fpop = Pfp_FRIP; break;
+         case Iop_RoundF64toF64_NEAREST: fpop = Pfp_FRIN; break;
+         case Iop_RoundF64toF64_ZERO:    fpop = Pfp_FRIZ; break;
+         default: break;
+      }
+      if (fpop != Pfp_INVALID) {
+         HReg fr_dst = newVRegF(env);
+         HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src));
+         return fr_dst;
+      }
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_ReinterpI64asF64: {
+            /* Given an I64, produce an IEEE754 double with the same
+               bit pattern. */
+            if (!mode64) {
+               HReg r_srcHi, r_srcLo;
+               iselInt64Expr( &r_srcHi, &r_srcLo, env, e->Iex.Unop.arg,
+                               IEndianess);
+               return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
+            } else {
+               HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+               return mk_LoadR64toFPR( env, r_src );
+            }
+         }
+
+         case Iop_F32toF64: {
+            if (e->Iex.Unop.arg->tag == Iex_Unop &&
+                     e->Iex.Unop.arg->Iex.Unop.op == Iop_ReinterpI32asF32 ) {
+               e = e->Iex.Unop.arg;
+
+               HReg src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+               HReg fr_dst = newVRegF(env);
+               PPCAMode *am_addr;
+
+               sub_from_sp( env, 16 );        // Move SP down 16 bytes
+               am_addr = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+
+               // store src as Ity_I32's
+               addInstr(env, PPCInstr_Store( 4, am_addr, src, env->mode64 ));
+
+               // load single precision float, but the end results loads into a
+               // 64-bit FP register -- i.e., F64.
+               addInstr(env, PPCInstr_FpLdSt(True/*load*/, 4, fr_dst, am_addr));
+
+               add_to_sp( env, 16 );          // Reset SP
+               return fr_dst;
+            }
+
+
+            /* this is a no-op */
+            HReg res = iselFltExpr(env, e->Iex.Unop.arg, IEndianess);
+            return res;
+         }
+         default: 
+            break;
+      }
+   }
+
+   /* --------- MULTIPLEX --------- */
+   if (e->tag == Iex_ITE) { // VFD
+      if (ty == Ity_F64
+          && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+         HReg fr1    = iselDblExpr(env, e->Iex.ITE.iftrue, IEndianess);
+         HReg fr0    = iselDblExpr(env, e->Iex.ITE.iffalse, IEndianess);
+         HReg fr_dst = newVRegF(env);
+         addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, fr0 ));
+         PPCCondCode cc = iselCondCode(env, e->Iex.ITE.cond, IEndianess);
+         addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr1 ));
+         return fr_dst;
+      }
+   }
+
+   vex_printf("iselDblExpr(ppc): No such tag(%u)\n", e->tag);
+   ppIRExpr(e);
+   vpanic("iselDblExpr_wrk(ppc)");
+}
+
+static HReg iselDfp32Expr(ISelEnv* env, IRExpr* e, IREndness IEndianess)
+{
+   HReg r = iselDfp32Expr_wrk( env, e, IEndianess );
+   vassert(hregClass(r) == HRcFlt64);
+   vassert( hregIsVirtual(r) );
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDfp32Expr_wrk(ISelEnv* env, IRExpr* e, IREndness IEndianess)
+{
+   Bool mode64 = env->mode64;
+   IRType ty = typeOfIRExpr( env->type_env, e );
+
+   vassert( e );
+   vassert( ty == Ity_D32 );
+
+   /* --------- GET --------- */
+   if (e->tag == Iex_Get) {
+      HReg r_dst = newVRegF( env );
+      PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+                                       GuestStatePtr(mode64) );
+      addInstr( env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, am_addr ) );
+      return r_dst;
+   }
+
+   /* --------- LOAD --------- */
+   if (e->tag == Iex_Load && e->Iex.Load.end == IEndianess) {
+      PPCAMode* am_addr;
+      HReg r_dst = newVRegF(env);
+      vassert(e->Iex.Load.ty == Ity_D32);
+      am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_D32/*xfer*/,
+                                   IEndianess);
+      addInstr(env, PPCInstr_FpLdSt(True/*load*/, 4, r_dst, am_addr));
+      return r_dst;
+   }
+
+   /* --------- OPS --------- */
+   if (e->tag == Iex_Binop) {
+      if (e->Iex.Binop.op == Iop_D64toD32) {
+         HReg fr_dst = newVRegF(env);
+         HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2, IEndianess);
+         set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         addInstr(env, PPCInstr_Dfp64Unary(Pfp_DRSP, fr_dst, fr_src));
+         return fr_dst;
+      }
+   }
+
+   ppIRExpr( e );
+   vpanic( "iselDfp32Expr_wrk(ppc)" );
+}
+
+static HReg iselDfp64Expr(ISelEnv* env, IRExpr* e, IREndness IEndianess)
+{
+   HReg r = iselDfp64Expr_wrk( env, e, IEndianess );
+   vassert(hregClass(r) == HRcFlt64);
+   vassert( hregIsVirtual(r) );
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDfp64Expr_wrk(ISelEnv* env, IRExpr* e, IREndness IEndianess)
+{
+   Bool mode64 = env->mode64;
+   IRType ty = typeOfIRExpr( env->type_env, e );
+   HReg r_dstHi, r_dstLo;
+
+   vassert( e );
+   vassert( ty == Ity_D64 );
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp( env, e->Iex.RdTmp.tmp );
+   }
+
+   /* --------- GET --------- */
+   if (e->tag == Iex_Get) {
+      HReg r_dst = newVRegF( env );
+      PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset,
+                                       GuestStatePtr(mode64) );
+      addInstr( env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, am_addr ) );
+      return r_dst;
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == IEndianess) {
+      PPCAMode* am_addr;
+      HReg r_dst = newVRegF(env);
+      vassert(e->Iex.Load.ty == Ity_D64);
+      am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_D64/*xfer*/,
+                                   IEndianess);
+      addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_dst, am_addr));
+      return r_dst;
+   }
+
+   /* --------- OPS --------- */
+   if (e->tag == Iex_Qop) {
+      HReg r_dst = newVRegF( env );
+      return r_dst;
+   }
+
+   if (e->tag == Iex_Unop) {
+      HReg fr_dst = newVRegF(env);
+      switch (e->Iex.Unop.op) {
+      case Iop_ReinterpI64asD64: {
+         /* Given an I64, produce an IEEE754 DFP with the same
+               bit pattern. */
+         if (!mode64) {
+            HReg r_srcHi, r_srcLo;
+            iselInt64Expr( &r_srcHi, &r_srcLo, env, e->Iex.Unop.arg,
+                           IEndianess);
+            return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo );
+         } else {
+            HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+            return mk_LoadR64toFPR( env, r_src );
+         }
+      }
+      case Iop_D32toD64: {
+         HReg fr_src = iselDfp32Expr(env, e->Iex.Unop.arg, IEndianess);
+         addInstr(env, PPCInstr_Dfp64Unary(Pfp_DCTDP, fr_dst, fr_src));
+         return fr_dst;
+      }
+      case Iop_D128HItoD64:
+         iselDfp128Expr( &r_dstHi, &r_dstLo, env, e->Iex.Unop.arg,
+                         IEndianess );
+         return r_dstHi;
+      case Iop_D128LOtoD64:
+         iselDfp128Expr( &r_dstHi, &r_dstLo, env, e->Iex.Unop.arg,
+                         IEndianess );
+         return r_dstLo;
+      case Iop_InsertExpD64: {
+         HReg fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2, IEndianess);
+
+         addInstr(env, PPCInstr_Dfp64Binary(Pfp_DIEX, fr_dst, fr_srcL,
+					    fr_srcR));
+         return fr_dst;
+       }
+      default:
+         vex_printf( "ERROR: iselDfp64Expr_wrk, UNKNOWN unop case %d\n",
+                     e->Iex.Unop.op );
+      }
+   }
+
+   if (e->tag == Iex_Binop) {
+      PPCFpOp fpop = Pfp_INVALID;
+      HReg fr_dst = newVRegF(env);
+
+      switch (e->Iex.Binop.op) {
+      case Iop_D128toD64:     fpop = Pfp_DRDPQ;  break;
+      case Iop_D64toD32:      fpop = Pfp_DRSP;   break;
+      case Iop_I64StoD64:     fpop = Pfp_DCFFIX; break;
+      case Iop_RoundD64toInt: fpop = Pfp_DRINTN; break;
+      default: break;
+      }
+      if (fpop == Pfp_DRDPQ) {
+         HReg r_srcHi = newVRegF(env);
+         HReg r_srcLo = newVRegF(env);
+
+         set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2,
+                        IEndianess);
+         addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo));
+         return fr_dst;
+
+      } else if (fpop == Pfp_DRINTN) {
+         HReg fr_src = newVRegF(env);
+         PPCRI* r_rmc = iselWordExpr_RI(env, e->Iex.Binop.arg1, IEndianess);
+
+         /* NOTE, this IOP takes a DFP value and rounds to the
+          * neares floating point integer value, i.e. fractional part
+          * is zero.  The result is a decimal floating point number.
+          * the INT in the name is a bit misleading.
+          */
+         fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_DfpRound(fr_dst, fr_src, r_rmc));
+         return fr_dst;
+
+      } else if (fpop == Pfp_DRSP) {
+         HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2, IEndianess);
+         set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         addInstr(env, PPCInstr_Dfp64Unary(fpop, fr_dst, fr_src));
+         return fr_dst;
+
+      } else if (fpop == Pfp_DCFFIX) {
+         HReg fr_src = newVRegF(env);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+
+         set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         sub_from_sp( env, 16 );
+
+         // put the I64 value into a floating point register
+         if (mode64) {
+           HReg tmp = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+
+           addInstr(env, PPCInstr_Store(8, zero_r1, tmp, True/*mode64*/));
+         } else {
+            HReg tmpHi, tmpLo;
+            PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+            iselInt64Expr(&tmpHi, &tmpLo, env, e->Iex.Binop.arg2,
+                          IEndianess);
+            addInstr(env, PPCInstr_Store(4, zero_r1, tmpHi, False/*mode32*/));
+            addInstr(env, PPCInstr_Store(4, four_r1, tmpLo, False/*mode32*/));
+         }
+
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8,  fr_src, zero_r1));
+         addInstr(env, PPCInstr_Dfp64Unary(fpop, fr_dst, fr_src));
+         add_to_sp( env, 16 );
+         return fr_dst;
+      }
+
+      switch (e->Iex.Binop.op) {
+      /* shift instructions D64, I32 -> D64 */
+      case Iop_ShlD64: fpop = Pfp_DSCLI; break;
+      case Iop_ShrD64: fpop = Pfp_DSCRI; break;
+      default: break;
+      }
+      if (fpop != Pfp_INVALID) {
+         HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg1, IEndianess);
+         PPCRI* shift = iselWordExpr_RI(env, e->Iex.Binop.arg2, IEndianess);
+
+         /* shift value must be an immediate value */
+         vassert(shift->tag == Pri_Imm);
+
+         addInstr(env, PPCInstr_DfpShift(fpop, fr_dst, fr_src, shift));
+         return fr_dst;
+      }
+
+      switch (e->Iex.Binop.op) {
+      case Iop_InsertExpD64:
+         fpop = Pfp_DIEX;
+         break;
+      default: 	break;
+      }
+      if (fpop != Pfp_INVALID) {
+         HReg fr_srcL = newVRegF(env);
+         HReg fr_srcR = iselDfp64Expr(env, e->Iex.Binop.arg2, IEndianess);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+         sub_from_sp( env, 16 );
+
+         if (env->mode64) {
+            // put the I64 value into a floating point reg
+            HReg tmp = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+
+            addInstr(env, PPCInstr_Store(8, zero_r1, tmp, True/*mode64*/));
+         } else {
+            // put the I64 register pair into a floating point reg
+            HReg tmpHi;
+            HReg tmpLo;
+            PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+            iselInt64Expr(&tmpHi, &tmpLo, env, e->Iex.Binop.arg1,
+                          IEndianess);
+            addInstr(env, PPCInstr_Store(4, zero_r1, tmpHi, False/*!mode64*/));
+            addInstr(env, PPCInstr_Store(4, four_r1, tmpLo, False/*!mode64*/));
+         }
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_srcL, zero_r1));
+         addInstr(env, PPCInstr_Dfp64Binary(fpop, fr_dst, fr_srcL,
+                                            fr_srcR));
+         add_to_sp( env, 16 );
+         return fr_dst;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+      PPCFpOp fpop = Pfp_INVALID;
+
+      switch (triop->op) {
+      case Iop_AddD64:
+         fpop = Pfp_DFPADD;
+         break;
+      case Iop_SubD64:
+         fpop = Pfp_DFPSUB;
+         break;
+      case Iop_MulD64:
+         fpop = Pfp_DFPMUL;
+         break;
+      case Iop_DivD64:
+         fpop = Pfp_DFPDIV;
+         break;
+      default:
+         break;
+      }
+      if (fpop != Pfp_INVALID) {
+         HReg r_dst = newVRegF( env );
+         HReg r_srcL = iselDfp64Expr( env, triop->arg2, IEndianess );
+         HReg r_srcR = iselDfp64Expr( env, triop->arg3, IEndianess );
+
+         set_FPU_DFP_rounding_mode( env, triop->arg1, IEndianess );
+         addInstr( env, PPCInstr_Dfp64Binary( fpop, r_dst, r_srcL, r_srcR ) );
+         return r_dst;
+      }
+
+      switch (triop->op) {
+      case Iop_QuantizeD64:          fpop = Pfp_DQUA;  break;
+      case Iop_SignificanceRoundD64: fpop = Pfp_RRDTR; break;
+      default: break;
+      }
+      if (fpop == Pfp_DQUA) {
+         HReg r_dst = newVRegF(env);
+         HReg r_srcL = iselDfp64Expr(env, triop->arg2, IEndianess);
+         HReg r_srcR = iselDfp64Expr(env, triop->arg3, IEndianess);
+         PPCRI* rmc  = iselWordExpr_RI(env, triop->arg1, IEndianess);
+         addInstr(env, PPCInstr_DfpQuantize(fpop, r_dst, r_srcL, r_srcR,
+                                            rmc));
+         return r_dst;
+
+      } else if (fpop == Pfp_RRDTR) {
+         HReg r_dst = newVRegF(env);
+         HReg r_srcL = newVRegF(env);
+         HReg r_srcR = iselDfp64Expr(env, triop->arg3, IEndianess);
+         PPCRI* rmc  = iselWordExpr_RI(env, triop->arg1, IEndianess);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+         HReg i8_val = iselWordExpr_R(env, triop->arg2, IEndianess);
+
+         /* Move I8 to float register to issue instruction */
+         sub_from_sp( env, 16 );
+         if (mode64)
+            addInstr(env, PPCInstr_Store(8, zero_r1, i8_val, True/*mode64*/));
+         else
+            addInstr(env, PPCInstr_Store(4, zero_r1, i8_val, False/*mode32*/));
+
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_srcL, zero_r1));
+         add_to_sp( env, 16 );
+
+         // will set TE and RMC when issuing instruction
+         addInstr(env, PPCInstr_DfpQuantize(fpop, r_dst, r_srcL, r_srcR, rmc));
+         return r_dst;
+      }
+   }
+
+   ppIRExpr( e );
+   vpanic( "iselDfp64Expr_wrk(ppc)" );
+}
+
+static void iselDfp128Expr(HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e,
+                           IREndness IEndianess)
+{
+   iselDfp128Expr_wrk( rHi, rLo, env, e, IEndianess );
+   vassert( hregIsVirtual(*rHi) );
+   vassert( hregIsVirtual(*rLo) );
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static void iselDfp128Expr_wrk(HReg* rHi, HReg *rLo, ISelEnv* env, IRExpr* e,
+                               IREndness IEndianess)
+{
+   vassert( e );
+   vassert( typeOfIRExpr(env->type_env,e) == Ity_D128 );
+
+   /* read 128-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp );
+      return;
+   }
+
+   if (e->tag == Iex_Unop) {
+      HReg r_dstHi = newVRegF(env);
+      HReg r_dstLo = newVRegF(env);
+
+      if (e->Iex.Unop.op == Iop_I64StoD128) {
+         HReg fr_src = newVRegF(env);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+
+         // put the I64 value into a floating point reg
+         if (env->mode64) {
+            HReg tmp   = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+            addInstr(env, PPCInstr_Store(8, zero_r1, tmp, True/*mode64*/));
+         } else {
+            HReg tmpHi, tmpLo;
+            PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+            iselInt64Expr(&tmpHi, &tmpLo, env, e->Iex.Unop.arg,
+                          IEndianess);
+            addInstr(env, PPCInstr_Store(4, zero_r1, tmpHi, False/*mode32*/));
+            addInstr(env, PPCInstr_Store(4, four_r1, tmpLo, False/*mode32*/));
+         }
+
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_src, zero_r1));
+         addInstr(env, PPCInstr_DfpI64StoD128(Pfp_DCFFIXQ, r_dstHi, r_dstLo,
+                                              fr_src));
+      }
+
+      if (e->Iex.Unop.op == Iop_D64toD128) {
+         HReg r_src = iselDfp64Expr(env, e->Iex.Unop.arg, IEndianess);
+
+         /* Source is 64bit, result is 128 bit.  High 64bit source arg,
+          * is ignored by the instruction.  Set high arg to r_src just
+          * to meet the vassert tests.
+          */
+         addInstr(env, PPCInstr_Dfp128Unary(Pfp_DCTQPQ, r_dstHi, r_dstLo,
+                                            r_src, r_src));
+      }
+      *rHi = r_dstHi;
+      *rLo = r_dstLo;
+      return;
+   }
+
+   /* --------- OPS --------- */
+   if (e->tag == Iex_Binop) {
+      HReg r_srcHi;
+      HReg r_srcLo;
+
+      switch (e->Iex.Binop.op) {
+      case Iop_D64HLtoD128:
+         r_srcHi = iselDfp64Expr( env, e->Iex.Binop.arg1, IEndianess );
+         r_srcLo = iselDfp64Expr( env, e->Iex.Binop.arg2, IEndianess );
+         *rHi = r_srcHi;
+         *rLo = r_srcLo;
+         return;
+         break;
+      case Iop_D128toD64: {
+         PPCFpOp fpop = Pfp_DRDPQ;
+         HReg fr_dst  = newVRegF(env);
+
+         set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1, IEndianess );
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2,
+                        IEndianess);
+         addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo));
+
+         /* Need to meet the interface spec but the result is
+          * just 64-bits so send the result back in both halfs.
+          */
+         *rHi = fr_dst;
+         *rLo = fr_dst;
+         return;
+      }
+      case Iop_ShlD128: 
+      case Iop_ShrD128: {
+         HReg fr_dst_hi = newVRegF(env);  
+         HReg fr_dst_lo = newVRegF(env);
+         PPCRI* shift = iselWordExpr_RI(env, e->Iex.Binop.arg2, IEndianess);
+         PPCFpOp fpop = Pfp_DSCLIQ;  /* fix later if necessary */
+
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg1,
+                        IEndianess);
+
+         if (e->Iex.Binop.op == Iop_ShrD128)
+            fpop = Pfp_DSCRIQ;
+
+         addInstr(env, PPCInstr_DfpShift128(fpop, fr_dst_hi, fr_dst_lo,
+                                            r_srcHi, r_srcLo, shift));
+
+         *rHi = fr_dst_hi;
+         *rLo = fr_dst_lo;
+         return;
+      }
+      case Iop_RoundD128toInt: {
+         HReg r_dstHi = newVRegF(env);
+         HReg r_dstLo = newVRegF(env);
+         PPCRI* r_rmc = iselWordExpr_RI(env, e->Iex.Binop.arg1, IEndianess);
+
+         // will set R and RMC when issuing instruction
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2,
+                        IEndianess);
+
+         addInstr(env, PPCInstr_DfpRound128(r_dstHi, r_dstLo,
+                                            r_srcHi, r_srcLo, r_rmc));
+         *rHi = r_dstHi;
+         *rLo = r_dstLo;
+         return;
+      }
+      case Iop_InsertExpD128: {
+         HReg r_dstHi = newVRegF(env);
+         HReg r_dstLo = newVRegF(env);
+         HReg r_srcL  = newVRegF(env);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+         r_srcHi = newVRegF(env);
+         r_srcLo = newVRegF(env);
+
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2,
+                        IEndianess);
+
+         /* Move I64 to float register to issue instruction */
+         if (env->mode64) {
+            HReg tmp = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+            addInstr(env, PPCInstr_Store(8, zero_r1, tmp, True/*mode64*/));
+         } else {
+            HReg tmpHi, tmpLo;
+            PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+
+            iselInt64Expr(&tmpHi, &tmpLo, env, e->Iex.Unop.arg,
+                          IEndianess);
+            addInstr(env, PPCInstr_Store(4, zero_r1, tmpHi, False/*mode32*/));
+            addInstr(env, PPCInstr_Store(4, four_r1, tmpLo, False/*mode32*/));
+         }
+
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_srcL, zero_r1));
+         addInstr(env, PPCInstr_InsertExpD128(Pfp_DIEXQ,
+                                              r_dstHi, r_dstLo,
+                                              r_srcL, r_srcHi, r_srcLo));
+         *rHi = r_dstHi;
+         *rLo = r_dstLo;
+         return;
+      }
+      default:
+         vex_printf( "ERROR: iselDfp128Expr_wrk, UNKNOWN binop case %d\n",
+                     e->Iex.Binop.op );
+         break;
+      }
+   }
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+      PPCFpOp fpop = Pfp_INVALID;
+      HReg r_dstHi = newVRegF(env);
+      HReg r_dstLo = newVRegF(env);
+
+      switch (triop->op) {
+      case Iop_AddD128:
+         fpop = Pfp_DFPADDQ;
+         break;
+      case Iop_SubD128:
+         fpop = Pfp_DFPSUBQ;
+         break;
+      case Iop_MulD128:
+         fpop = Pfp_DFPMULQ;
+         break;
+      case Iop_DivD128:
+         fpop = Pfp_DFPDIVQ;
+         break;
+      default:
+         break;
+      }
+
+      if (fpop != Pfp_INVALID) {
+         HReg r_srcRHi = newVRegV( env );
+         HReg r_srcRLo = newVRegV( env );
+
+         /* dst will be used to pass in the left operand and get the result. */
+         iselDfp128Expr( &r_dstHi, &r_dstLo, env, triop->arg2, IEndianess );
+         iselDfp128Expr( &r_srcRHi, &r_srcRLo, env, triop->arg3, IEndianess );
+         set_FPU_DFP_rounding_mode( env, triop->arg1, IEndianess );
+         addInstr( env,
+                   PPCInstr_Dfp128Binary( fpop, r_dstHi, r_dstLo,
+                                          r_srcRHi, r_srcRLo ) );
+         *rHi = r_dstHi;
+         *rLo = r_dstLo;
+         return;
+      }
+      switch (triop->op) {
+      case Iop_QuantizeD128:          fpop = Pfp_DQUAQ;  break;
+      case Iop_SignificanceRoundD128: fpop = Pfp_DRRNDQ; break;
+      default: break;
+      }
+      if (fpop == Pfp_DQUAQ) {
+         HReg r_srcHi = newVRegF(env);
+         HReg r_srcLo = newVRegF(env);
+         PPCRI* rmc = iselWordExpr_RI(env, triop->arg1, IEndianess);
+
+         /* dst will be used to pass in the left operand and get the result */
+         iselDfp128Expr(&r_dstHi, &r_dstLo, env, triop->arg2, IEndianess);
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, triop->arg3, IEndianess);
+
+         // will set RMC when issuing instruction
+         addInstr(env, PPCInstr_DfpQuantize128(fpop, r_dstHi, r_dstLo,
+                                               r_srcHi, r_srcLo, rmc));
+        *rHi = r_dstHi;
+        *rLo = r_dstLo;
+         return;
+
+      } else if (fpop == Pfp_DRRNDQ) {
+         HReg r_srcHi = newVRegF(env);
+         HReg r_srcLo = newVRegF(env);
+         PPCRI* rmc = iselWordExpr_RI(env, triop->arg1, IEndianess);
+         PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) );
+         PPCAMode* four_r1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) );
+         HReg i8_val = iselWordExpr_R(env, triop->arg2, IEndianess);
+         HReg r_zero = newVRegI( env );
+
+         iselDfp128Expr(&r_srcHi, &r_srcLo, env, triop->arg3, IEndianess);
+
+         /* dst will be used to pass in the left operand and get the result */
+         /* Move I8 to float register to issue instruction.  Note, the
+          * instruction only looks at the bottom 6 bits so we really don't
+          * have to clear the upper bits since the iselWordExpr_R sets the
+          * bottom 8-bits.
+          */
+         sub_from_sp( env, 16 );
+
+         if (env->mode64)
+            addInstr(env, PPCInstr_Store(4, four_r1, i8_val, True/*mode64*/));
+         else
+            addInstr(env, PPCInstr_Store(4, four_r1, i8_val, False/*mode32*/));
+
+         /* Have to write to the upper bits to ensure they have been
+          * initialized. The instruction ignores all but the lower 6-bits.
+          */
+         addInstr( env, PPCInstr_LI( r_zero, 0, env->mode64 ) );
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_dstHi, zero_r1));
+         addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_dstLo, zero_r1));
+
+         add_to_sp( env, 16 );
+
+         // will set RMC when issuing instruction
+         addInstr(env, PPCInstr_DfpQuantize128(fpop, r_dstHi, r_dstLo,
+                                               r_srcHi, r_srcLo, rmc));
+         *rHi = r_dstHi;
+         *rLo = r_dstLo;
+         return;
+      }
+ }
+
+   ppIRExpr( e );
+   vpanic( "iselDfp128Expr(ppc64)" );
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: SIMD (Vector) expressions, 128 bit.         ---*/
+/*---------------------------------------------------------*/
+
+static HReg iselVecExpr ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   HReg r = iselVecExpr_wrk( env, e, IEndianess );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcVec128);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e, IREndness IEndianess )
+{
+   Bool mode64 = env->mode64;
+   PPCAvOp op = Pav_INVALID;
+   PPCAvFpOp fpop = Pavfp_INVALID;
+   IRType  ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_V128);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Get) {
+      /* Guest state vectors are 16byte aligned,
+         so don't need to worry here */
+      HReg dst = newVRegV(env);
+      addInstr(env,
+               PPCInstr_AvLdSt( True/*load*/, 16, dst,
+                                PPCAMode_IR( e->Iex.Get.offset,
+                                             GuestStatePtr(mode64) )));
+      return dst;
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == IEndianess) {
+      /* Need to be able to do V128 unaligned loads. The BE unaligned load
+       * can be accomplised using the following code sequece from the ISA.
+       * It uses the lvx instruction that does two aligned loads and then
+       * permute the data to store the required data as if it had been an
+       * unaligned load.
+       *
+       *   lvx  Vhi,0,Rb        # load MSQ, using the unaligned address in Rb
+       *   lvsl Vp, 0,Rb        # Set permute control vector
+       *   addi Rb,Rb,15        # Address of LSQ
+       *   lvx  Vlo,0,Rb        # load LSQ
+       *   vperm Vt,Vhi,Vlo,Vp  # align the data as requested
+       */
+
+      HReg Vhi   = newVRegV(env);
+      HReg Vlo   = newVRegV(env);
+      HReg Vp    = newVRegV(env);
+      HReg v_dst = newVRegV(env);
+      HReg rB;
+      HReg rB_plus_15 = newVRegI(env);
+
+      vassert(e->Iex.Load.ty == Ity_V128);
+      rB = iselWordExpr_R( env, e->Iex.Load.addr, IEndianess );
+
+      // lvx  Vhi, 0, Rb
+      addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, Vhi,
+                                     PPCAMode_IR(0, rB)) );
+
+      if (IEndianess == Iend_LE)
+         // lvsr Vp, 0, Rb
+         addInstr(env, PPCInstr_AvSh( False/*right shift*/, Vp,
+                                      PPCAMode_IR(0, rB)) );
+      else
+         // lvsl Vp, 0, Rb
+         addInstr(env, PPCInstr_AvSh( True/*left shift*/, Vp,
+                                      PPCAMode_IR(0, rB)) );
+
+      // addi Rb_plus_15, Rb, 15
+      addInstr(env, PPCInstr_Alu( Palu_ADD, rB_plus_15,
+                                  rB, PPCRH_Imm(True, toUShort(15))) );
+
+      // lvx  Vlo, 0, Rb_plus_15
+      addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, Vlo,
+                                     PPCAMode_IR(0, rB_plus_15)) );
+
+      if (IEndianess == Iend_LE)
+         // vperm Vt, Vhi, Vlo, Vp
+         addInstr(env, PPCInstr_AvPerm( v_dst, Vlo, Vhi, Vp ));
+      else
+         // vperm Vt, Vhi, Vlo, Vp
+         addInstr(env, PPCInstr_AvPerm( v_dst, Vhi, Vlo, Vp ));
+
+      return v_dst;
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+
+      case Iop_NotV128: {
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg dst = newVRegV(env);
+         addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, arg));
+         return dst;
+      }
+
+      case Iop_CmpNEZ8x16: {
+         HReg arg  = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg zero = newVRegV(env);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
+         addInstr(env, PPCInstr_AvBin8x16(Pav_CMPEQU, dst, arg, zero));
+         addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
+         return dst;
+      }
+
+      case Iop_CmpNEZ16x8: {
+         HReg arg  = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg zero = newVRegV(env);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
+         addInstr(env, PPCInstr_AvBin16x8(Pav_CMPEQU, dst, arg, zero));
+         addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
+         return dst;
+      }
+
+      case Iop_CmpNEZ32x4: {
+         HReg arg  = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg zero = newVRegV(env);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
+         addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, dst, arg, zero));
+         addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
+         return dst;
+      }
+
+      case Iop_CmpNEZ64x2: {
+         HReg arg  = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg zero = newVRegV(env);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero));
+         addInstr(env, PPCInstr_AvBin64x2(Pav_CMPEQU, dst, arg, zero));
+         addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
+         return dst;
+      }
+
+      case Iop_RecipEst32Fx4: fpop = Pavfp_RCPF;    goto do_32Fx4_unary;
+      case Iop_RSqrtEst32Fx4: fpop = Pavfp_RSQRTF;  goto do_32Fx4_unary;
+      case Iop_I32UtoFx4:     fpop = Pavfp_CVTU2F;  goto do_32Fx4_unary;
+      case Iop_I32StoFx4:     fpop = Pavfp_CVTS2F;  goto do_32Fx4_unary;
+      case Iop_QFtoI32Ux4_RZ: fpop = Pavfp_QCVTF2U; goto do_32Fx4_unary;
+      case Iop_QFtoI32Sx4_RZ: fpop = Pavfp_QCVTF2S; goto do_32Fx4_unary;
+      case Iop_RoundF32x4_RM: fpop = Pavfp_ROUNDM;  goto do_32Fx4_unary;
+      case Iop_RoundF32x4_RP: fpop = Pavfp_ROUNDP;  goto do_32Fx4_unary;
+      case Iop_RoundF32x4_RN: fpop = Pavfp_ROUNDN;  goto do_32Fx4_unary;
+      case Iop_RoundF32x4_RZ: fpop = Pavfp_ROUNDZ;  goto do_32Fx4_unary;
+      do_32Fx4_unary:
+      {
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg dst = newVRegV(env);
+         addInstr(env, PPCInstr_AvUn32Fx4(fpop, dst, arg));
+         return dst;
+      }
+
+      case Iop_32UtoV128: {
+         HReg r_aligned16, r_zeros;
+         HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg, IEndianess);
+         HReg   dst = newVRegV(env);
+         PPCAMode *am_off0, *am_off4, *am_off8, *am_off12;
+         sub_from_sp( env, 32 );     // Move SP down
+
+         /* Get a quadword aligned address within our stack space */
+         r_aligned16 = get_sp_aligned16( env );
+         am_off0  = PPCAMode_IR( 0,  r_aligned16 );
+         am_off4  = PPCAMode_IR( 4,  r_aligned16 );
+         am_off8  = PPCAMode_IR( 8,  r_aligned16 );
+         am_off12 = PPCAMode_IR( 12, r_aligned16 );
+
+         /* Store zeros */
+         r_zeros = newVRegI(env);
+         addInstr(env, PPCInstr_LI(r_zeros, 0x0, mode64));
+         if (IEndianess == Iend_LE)
+            addInstr(env, PPCInstr_Store( 4, am_off0, r_src, mode64 ));
+         else
+            addInstr(env, PPCInstr_Store( 4, am_off0, r_zeros, mode64 ));
+         addInstr(env, PPCInstr_Store( 4, am_off4, r_zeros, mode64 ));
+         addInstr(env, PPCInstr_Store( 4, am_off8, r_zeros, mode64 ));
+
+         /* Store r_src in low word of quadword-aligned mem */
+         if (IEndianess == Iend_LE)
+            addInstr(env, PPCInstr_Store( 4, am_off12, r_zeros, mode64 ));
+         else
+            addInstr(env, PPCInstr_Store( 4, am_off12, r_src, mode64 ));
+
+         /* Load word into low word of quadword vector reg */
+         if (IEndianess == Iend_LE)
+            addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, dst, am_off0 ));
+         else
+            addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, dst, am_off12 ));
+
+         add_to_sp( env, 32 );       // Reset SP
+         return dst;
+      }
+
+      case Iop_Dup8x16:
+      case Iop_Dup16x8:
+      case Iop_Dup32x4:
+         return mk_AvDuplicateRI(env, e->Iex.Unop.arg, IEndianess);
+
+      case Iop_CipherSV128: op = Pav_CIPHERSUBV128; goto do_AvCipherV128Un;
+      do_AvCipherV128Un: {
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+         HReg dst = newVRegV(env);
+         addInstr(env, PPCInstr_AvCipherV128Unary(op, dst, arg));
+         return dst;
+      }
+
+      case Iop_Clz8x16: op = Pav_ZEROCNTBYTE;   goto do_zerocnt;
+      case Iop_Clz16x8: op = Pav_ZEROCNTHALF;   goto do_zerocnt;
+      case Iop_Clz32x4: op = Pav_ZEROCNTWORD;   goto do_zerocnt;
+      case Iop_Clz64x2: op = Pav_ZEROCNTDBL;    goto do_zerocnt;
+      case Iop_PwBitMtxXpose64x2: op = Pav_BITMTXXPOSE;  goto do_zerocnt;
+      do_zerocnt:
+      {
+        HReg arg = iselVecExpr(env, e->Iex.Unop.arg, IEndianess);
+        HReg dst = newVRegV(env);
+        addInstr(env, PPCInstr_AvUnary(op, dst, arg));
+        return dst;
+      }
+
+      default:
+         break;
+      } /* switch (e->Iex.Unop.op) */
+   } /* if (e->tag == Iex_Unop) */
+
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+
+      case Iop_64HLtoV128: {
+         if (!mode64) {
+            HReg     r3, r2, r1, r0, r_aligned16;
+            PPCAMode *am_off0, *am_off4, *am_off8, *am_off12;
+            HReg     dst = newVRegV(env);
+            /* do this via the stack (easy, convenient, etc) */
+            sub_from_sp( env, 32 );        // Move SP down
+            
+            // get a quadword aligned address within our stack space
+            r_aligned16 = get_sp_aligned16( env );
+            am_off0  = PPCAMode_IR( 0,  r_aligned16 );
+            am_off4  = PPCAMode_IR( 4,  r_aligned16 );
+            am_off8  = PPCAMode_IR( 8,  r_aligned16 );
+            am_off12 = PPCAMode_IR( 12, r_aligned16 );
+            
+            /* Do the less significant 64 bits */
+            iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2, IEndianess);
+            addInstr(env, PPCInstr_Store( 4, am_off12, r0, mode64 ));
+            addInstr(env, PPCInstr_Store( 4, am_off8,  r1, mode64 ));
+            /* Do the more significant 64 bits */
+            iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1, IEndianess);
+            addInstr(env, PPCInstr_Store( 4, am_off4, r2, mode64 ));
+            addInstr(env, PPCInstr_Store( 4, am_off0, r3, mode64 ));
+            
+            /* Fetch result back from stack. */
+            addInstr(env, PPCInstr_AvLdSt(True/*ld*/, 16, dst, am_off0));
+            
+            add_to_sp( env, 32 );          // Reset SP
+            return dst;
+         } else {
+            HReg     rHi = iselWordExpr_R(env, e->Iex.Binop.arg1, IEndianess);
+            HReg     rLo = iselWordExpr_R(env, e->Iex.Binop.arg2, IEndianess);
+            HReg     dst = newVRegV(env);
+            HReg     r_aligned16;
+            PPCAMode *am_off0, *am_off8;
+            /* do this via the stack (easy, convenient, etc) */
+            sub_from_sp( env, 32 );        // Move SP down
+            
+            // get a quadword aligned address within our stack space
+            r_aligned16 = get_sp_aligned16( env );
+            am_off0  = PPCAMode_IR( 0,  r_aligned16 );
+            am_off8  = PPCAMode_IR( 8,  r_aligned16 );
+            
+            /* Store 2*I64 to stack */
+            if (IEndianess == Iend_LE) {
+               addInstr(env, PPCInstr_Store( 8, am_off0, rLo, mode64 ));
+               addInstr(env, PPCInstr_Store( 8, am_off8, rHi, mode64 ));
+            } else {
+               addInstr(env, PPCInstr_Store( 8, am_off0, rHi, mode64 ));
+               addInstr(env, PPCInstr_Store( 8, am_off8, rLo, mode64 ));
+            }
+            /* Fetch result back from stack. */
+            addInstr(env, PPCInstr_AvLdSt(True/*ld*/, 16, dst, am_off0));
+            
+            add_to_sp( env, 32 );          // Reset SP
+            return dst;
+         }
+      }
+
+      case Iop_Max32Fx4:   fpop = Pavfp_MAXF;   goto do_32Fx4;
+      case Iop_Min32Fx4:   fpop = Pavfp_MINF;   goto do_32Fx4;
+      case Iop_CmpEQ32Fx4: fpop = Pavfp_CMPEQF; goto do_32Fx4;
+      case Iop_CmpGT32Fx4: fpop = Pavfp_CMPGTF; goto do_32Fx4;
+      case Iop_CmpGE32Fx4: fpop = Pavfp_CMPGEF; goto do_32Fx4;
+      do_32Fx4:
+      {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst = newVRegV(env);
+         addInstr(env, PPCInstr_AvBin32Fx4(fpop, dst, argL, argR));
+         return dst;
+      }
+
+      case Iop_CmpLE32Fx4: {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst = newVRegV(env);
+         
+         /* stay consistent with native ppc compares:
+            if a left/right lane holds a nan, return zeros for that lane
+            so: le == NOT(gt OR isNan)
+          */
+         HReg isNanLR = newVRegV(env);
+         HReg isNanL = isNan(env, argL, IEndianess);
+         HReg isNanR = isNan(env, argR, IEndianess);
+         addInstr(env, PPCInstr_AvBinary(Pav_OR, isNanLR,
+                                         isNanL, isNanR));
+
+         addInstr(env, PPCInstr_AvBin32Fx4(Pavfp_CMPGTF, dst,
+                                           argL, argR));
+         addInstr(env, PPCInstr_AvBinary(Pav_OR, dst, dst, isNanLR));
+         addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst));
+         return dst;
+      }
+
+      case Iop_AndV128:    op = Pav_AND;      goto do_AvBin;
+      case Iop_OrV128:     op = Pav_OR;       goto do_AvBin;
+      case Iop_XorV128:    op = Pav_XOR;      goto do_AvBin;
+      do_AvBin: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBinary(op, dst, arg1, arg2));
+         return dst;
+      }
+
+      case Iop_Shl8x16:    op = Pav_SHL;    goto do_AvBin8x16;
+      case Iop_Shr8x16:    op = Pav_SHR;    goto do_AvBin8x16;
+      case Iop_Sar8x16:    op = Pav_SAR;    goto do_AvBin8x16;
+      case Iop_Rol8x16:    op = Pav_ROTL;   goto do_AvBin8x16;
+      case Iop_InterleaveHI8x16: op = Pav_MRGHI;  goto do_AvBin8x16;
+      case Iop_InterleaveLO8x16: op = Pav_MRGLO;  goto do_AvBin8x16;
+      case Iop_Add8x16:    op = Pav_ADDU;   goto do_AvBin8x16;
+      case Iop_QAdd8Ux16:  op = Pav_QADDU;  goto do_AvBin8x16;
+      case Iop_QAdd8Sx16:  op = Pav_QADDS;  goto do_AvBin8x16;
+      case Iop_Sub8x16:    op = Pav_SUBU;   goto do_AvBin8x16;
+      case Iop_QSub8Ux16:  op = Pav_QSUBU;  goto do_AvBin8x16;
+      case Iop_QSub8Sx16:  op = Pav_QSUBS;  goto do_AvBin8x16;
+      case Iop_Avg8Ux16:   op = Pav_AVGU;   goto do_AvBin8x16;
+      case Iop_Avg8Sx16:   op = Pav_AVGS;   goto do_AvBin8x16;
+      case Iop_Max8Ux16:   op = Pav_MAXU;   goto do_AvBin8x16;
+      case Iop_Max8Sx16:   op = Pav_MAXS;   goto do_AvBin8x16;
+      case Iop_Min8Ux16:   op = Pav_MINU;   goto do_AvBin8x16;
+      case Iop_Min8Sx16:   op = Pav_MINS;   goto do_AvBin8x16;
+      case Iop_MullEven8Ux16: op = Pav_OMULU;  goto do_AvBin8x16;
+      case Iop_MullEven8Sx16: op = Pav_OMULS;  goto do_AvBin8x16;
+      case Iop_CmpEQ8x16:  op = Pav_CMPEQU; goto do_AvBin8x16;
+      case Iop_CmpGT8Ux16: op = Pav_CMPGTU; goto do_AvBin8x16;
+      case Iop_CmpGT8Sx16: op = Pav_CMPGTS; goto do_AvBin8x16;
+      case Iop_PolynomialMulAdd8x16: op = Pav_POLYMULADD; goto do_AvBin8x16;
+      do_AvBin8x16: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBin8x16(op, dst, arg1, arg2));
+         return dst;
+      }
+
+      case Iop_Shl16x8:    op = Pav_SHL;    goto do_AvBin16x8;
+      case Iop_Shr16x8:    op = Pav_SHR;    goto do_AvBin16x8;
+      case Iop_Sar16x8:    op = Pav_SAR;    goto do_AvBin16x8;
+      case Iop_Rol16x8:    op = Pav_ROTL;   goto do_AvBin16x8;
+      case Iop_NarrowBin16to8x16:    op = Pav_PACKUU;  goto do_AvBin16x8;
+      case Iop_QNarrowBin16Uto8Ux16: op = Pav_QPACKUU; goto do_AvBin16x8;
+      case Iop_QNarrowBin16Sto8Sx16: op = Pav_QPACKSS; goto do_AvBin16x8;
+      case Iop_InterleaveHI16x8:  op = Pav_MRGHI;  goto do_AvBin16x8;
+      case Iop_InterleaveLO16x8:  op = Pav_MRGLO;  goto do_AvBin16x8;
+      case Iop_Add16x8:    op = Pav_ADDU;   goto do_AvBin16x8;
+      case Iop_QAdd16Ux8:  op = Pav_QADDU;  goto do_AvBin16x8;
+      case Iop_QAdd16Sx8:  op = Pav_QADDS;  goto do_AvBin16x8;
+      case Iop_Sub16x8:    op = Pav_SUBU;   goto do_AvBin16x8;
+      case Iop_QSub16Ux8:  op = Pav_QSUBU;  goto do_AvBin16x8;
+      case Iop_QSub16Sx8:  op = Pav_QSUBS;  goto do_AvBin16x8;
+      case Iop_Avg16Ux8:   op = Pav_AVGU;   goto do_AvBin16x8;
+      case Iop_Avg16Sx8:   op = Pav_AVGS;   goto do_AvBin16x8;
+      case Iop_Max16Ux8:   op = Pav_MAXU;   goto do_AvBin16x8;
+      case Iop_Max16Sx8:   op = Pav_MAXS;   goto do_AvBin16x8;
+      case Iop_Min16Ux8:   op = Pav_MINU;   goto do_AvBin16x8;
+      case Iop_Min16Sx8:   op = Pav_MINS;   goto do_AvBin16x8;
+      case Iop_MullEven16Ux8: op = Pav_OMULU;  goto do_AvBin16x8;
+      case Iop_MullEven16Sx8: op = Pav_OMULS;  goto do_AvBin16x8;
+      case Iop_CmpEQ16x8:  op = Pav_CMPEQU; goto do_AvBin16x8;
+      case Iop_CmpGT16Ux8: op = Pav_CMPGTU; goto do_AvBin16x8;
+      case Iop_CmpGT16Sx8: op = Pav_CMPGTS; goto do_AvBin16x8;
+      case Iop_PolynomialMulAdd16x8: op = Pav_POLYMULADD; goto do_AvBin16x8;
+      do_AvBin16x8: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBin16x8(op, dst, arg1, arg2));
+         return dst;
+      }
+
+      case Iop_Shl32x4:    op = Pav_SHL;    goto do_AvBin32x4;
+      case Iop_Shr32x4:    op = Pav_SHR;    goto do_AvBin32x4;
+      case Iop_Sar32x4:    op = Pav_SAR;    goto do_AvBin32x4;
+      case Iop_Rol32x4:    op = Pav_ROTL;   goto do_AvBin32x4;
+      case Iop_NarrowBin32to16x8:    op = Pav_PACKUU;  goto do_AvBin32x4;
+      case Iop_QNarrowBin32Uto16Ux8: op = Pav_QPACKUU; goto do_AvBin32x4;
+      case Iop_QNarrowBin32Sto16Sx8: op = Pav_QPACKSS; goto do_AvBin32x4;
+      case Iop_InterleaveHI32x4:  op = Pav_MRGHI;  goto do_AvBin32x4;
+      case Iop_InterleaveLO32x4:  op = Pav_MRGLO;  goto do_AvBin32x4;
+      case Iop_Add32x4:    op = Pav_ADDU;   goto do_AvBin32x4;
+      case Iop_QAdd32Ux4:  op = Pav_QADDU;  goto do_AvBin32x4;
+      case Iop_QAdd32Sx4:  op = Pav_QADDS;  goto do_AvBin32x4;
+      case Iop_Sub32x4:    op = Pav_SUBU;   goto do_AvBin32x4;
+      case Iop_QSub32Ux4:  op = Pav_QSUBU;  goto do_AvBin32x4;
+      case Iop_QSub32Sx4:  op = Pav_QSUBS;  goto do_AvBin32x4;
+      case Iop_Avg32Ux4:   op = Pav_AVGU;   goto do_AvBin32x4;
+      case Iop_Avg32Sx4:   op = Pav_AVGS;   goto do_AvBin32x4;
+      case Iop_Max32Ux4:   op = Pav_MAXU;   goto do_AvBin32x4;
+      case Iop_Max32Sx4:   op = Pav_MAXS;   goto do_AvBin32x4;
+      case Iop_Min32Ux4:   op = Pav_MINU;   goto do_AvBin32x4;
+      case Iop_Min32Sx4:   op = Pav_MINS;   goto do_AvBin32x4;
+      case Iop_Mul32x4:    op = Pav_MULU;   goto do_AvBin32x4;
+      case Iop_MullEven32Ux4: op = Pav_OMULU;  goto do_AvBin32x4;
+      case Iop_MullEven32Sx4: op = Pav_OMULS;  goto do_AvBin32x4;
+      case Iop_CmpEQ32x4:  op = Pav_CMPEQU; goto do_AvBin32x4;
+      case Iop_CmpGT32Ux4: op = Pav_CMPGTU; goto do_AvBin32x4;
+      case Iop_CmpGT32Sx4: op = Pav_CMPGTS; goto do_AvBin32x4;
+      case Iop_CatOddLanes32x4:  op = Pav_CATODD;  goto do_AvBin32x4;
+      case Iop_CatEvenLanes32x4: op = Pav_CATEVEN; goto do_AvBin32x4;
+      case Iop_PolynomialMulAdd32x4: op = Pav_POLYMULADD; goto do_AvBin32x4;
+      do_AvBin32x4: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBin32x4(op, dst, arg1, arg2));
+         return dst;
+      }
+
+      case Iop_Shl64x2:    op = Pav_SHL;    goto do_AvBin64x2;
+      case Iop_Shr64x2:    op = Pav_SHR;    goto do_AvBin64x2;
+      case Iop_Sar64x2:    op = Pav_SAR;    goto do_AvBin64x2;
+      case Iop_Rol64x2:    op = Pav_ROTL;   goto do_AvBin64x2;
+      case Iop_NarrowBin64to32x4:    op = Pav_PACKUU;  goto do_AvBin64x2;
+      case Iop_QNarrowBin64Sto32Sx4: op = Pav_QPACKSS; goto do_AvBin64x2;
+      case Iop_QNarrowBin64Uto32Ux4: op = Pav_QPACKUU; goto do_AvBin64x2;
+      case Iop_InterleaveHI64x2:  op = Pav_MRGHI;  goto do_AvBin64x2;
+      case Iop_InterleaveLO64x2:  op = Pav_MRGLO;  goto do_AvBin64x2;
+      case Iop_Add64x2:    op = Pav_ADDU;   goto do_AvBin64x2;
+      case Iop_Sub64x2:    op = Pav_SUBU;   goto do_AvBin64x2;
+      case Iop_Max64Ux2:   op = Pav_MAXU;   goto do_AvBin64x2;
+      case Iop_Max64Sx2:   op = Pav_MAXS;   goto do_AvBin64x2;
+      case Iop_Min64Ux2:   op = Pav_MINU;   goto do_AvBin64x2;
+      case Iop_Min64Sx2:   op = Pav_MINS;   goto do_AvBin64x2;
+      case Iop_CmpEQ64x2:  op = Pav_CMPEQU; goto do_AvBin64x2;
+      case Iop_CmpGT64Ux2: op = Pav_CMPGTU; goto do_AvBin64x2;
+      case Iop_CmpGT64Sx2: op = Pav_CMPGTS; goto do_AvBin64x2;
+      case Iop_PolynomialMulAdd64x2: op = Pav_POLYMULADD; goto do_AvBin64x2;
+      do_AvBin64x2: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvBin64x2(op, dst, arg1, arg2));
+         return dst;
+      }
+
+      case Iop_ShlN8x16: op = Pav_SHL; goto do_AvShift8x16;
+      case Iop_SarN8x16: op = Pav_SAR; goto do_AvShift8x16;
+      do_AvShift8x16: {
+         HReg r_src  = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg dst    = newVRegV(env);
+         HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_AvBin8x16(op, dst, r_src, v_shft));
+         return dst;
+      }
+
+      case Iop_ShlN16x8: op = Pav_SHL; goto do_AvShift16x8;
+      case Iop_ShrN16x8: op = Pav_SHR; goto do_AvShift16x8;
+      case Iop_SarN16x8: op = Pav_SAR; goto do_AvShift16x8;
+      do_AvShift16x8: {
+         HReg r_src  = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg dst    = newVRegV(env);
+         HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_AvBin16x8(op, dst, r_src, v_shft));
+         return dst;
+      }
+
+      case Iop_ShlN32x4: op = Pav_SHL; goto do_AvShift32x4;
+      case Iop_ShrN32x4: op = Pav_SHR; goto do_AvShift32x4;
+      case Iop_SarN32x4: op = Pav_SAR; goto do_AvShift32x4;
+      do_AvShift32x4: {
+         HReg r_src  = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg dst    = newVRegV(env);
+         HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_AvBin32x4(op, dst, r_src, v_shft));
+         return dst;
+      }
+
+      case Iop_ShlN64x2: op = Pav_SHL; goto do_AvShift64x2;
+      case Iop_ShrN64x2: op = Pav_SHR; goto do_AvShift64x2;
+      case Iop_SarN64x2: op = Pav_SAR; goto do_AvShift64x2;
+      do_AvShift64x2: {
+         HReg r_src  = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg dst    = newVRegV(env);
+         HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_AvBin64x2(op, dst, r_src, v_shft));
+         return dst;
+      }
+
+      case Iop_ShrV128: op = Pav_SHR; goto do_AvShiftV128;
+      case Iop_ShlV128: op = Pav_SHL; goto do_AvShiftV128;
+      do_AvShiftV128: {
+         HReg dst    = newVRegV(env);
+         HReg r_src  = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2, IEndianess);
+         /* Note: shift value gets masked by 127 */
+         addInstr(env, PPCInstr_AvBinary(op, dst, r_src, v_shft));
+         return dst;
+      }
+
+      case Iop_Perm8x16: {
+         HReg dst   = newVRegV(env);
+         HReg v_src = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg v_ctl = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_AvPerm(dst, v_src, v_src, v_ctl));
+         return dst;
+      }
+
+      case Iop_CipherV128:  op = Pav_CIPHERV128;   goto do_AvCipherV128;
+      case Iop_CipherLV128: op = Pav_CIPHERLV128;  goto do_AvCipherV128;
+      case Iop_NCipherV128: op = Pav_NCIPHERV128;  goto do_AvCipherV128;
+      case Iop_NCipherLV128:op = Pav_NCIPHERLV128; goto do_AvCipherV128;
+      do_AvCipherV128: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2, IEndianess);
+         HReg dst  = newVRegV(env);
+         addInstr(env, PPCInstr_AvCipherV128Binary(op, dst, arg1, arg2));
+         return dst;
+      }
+
+      case Iop_SHA256:op = Pav_SHA256; goto do_AvHashV128;
+      case Iop_SHA512:op = Pav_SHA512; goto do_AvHashV128;
+      do_AvHashV128: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1, IEndianess);
+         HReg dst  = newVRegV(env);
+         PPCRI* s_field = iselWordExpr_RI(env, e->Iex.Binop.arg2, IEndianess);
+         addInstr(env, PPCInstr_AvHashV128Binary(op, dst, arg1, s_field));
+         return dst;
+      }
+      default:
+         break;
+      } /* switch (e->Iex.Binop.op) */
+   } /* if (e->tag == Iex_Binop) */
+
+   if (e->tag == Iex_Triop) {
+      IRTriop *triop = e->Iex.Triop.details;
+      switch (triop->op) {
+      case Iop_BCDAdd:op = Pav_BCDAdd; goto do_AvBCDV128;
+      case Iop_BCDSub:op = Pav_BCDSub; goto do_AvBCDV128;
+      do_AvBCDV128: {
+         HReg arg1 = iselVecExpr(env, triop->arg1, IEndianess);
+         HReg arg2 = iselVecExpr(env, triop->arg2, IEndianess);
+         HReg dst  = newVRegV(env);
+         PPCRI* ps = iselWordExpr_RI(env, triop->arg3, IEndianess);
+         addInstr(env, PPCInstr_AvBCDV128Trinary(op, dst, arg1, arg2, ps));
+         return dst;
+      }
+
+      case Iop_Add32Fx4: fpop = Pavfp_ADDF; goto do_32Fx4_with_rm;
+      case Iop_Sub32Fx4: fpop = Pavfp_SUBF; goto do_32Fx4_with_rm;
+      case Iop_Mul32Fx4: fpop = Pavfp_MULF; goto do_32Fx4_with_rm;
+      do_32Fx4_with_rm:
+      {
+         HReg argL = iselVecExpr(env, triop->arg2, IEndianess);
+         HReg argR = iselVecExpr(env, triop->arg3, IEndianess);
+         HReg dst  = newVRegV(env);
+         /* FIXME: this is bogus, in the sense that Altivec ignores
+            FPSCR.RM, at least for some FP operations.  So setting the
+            RM is pointless.  This is only really correct in the case
+            where the RM is known, at JIT time, to be Irrm_NEAREST,
+            since -- at least for Altivec FP add/sub/mul -- the
+            emitted insn is hardwired to round to nearest. */
+         set_FPU_rounding_mode(env, triop->arg1, IEndianess);
+         addInstr(env, PPCInstr_AvBin32Fx4(fpop, dst, argL, argR));
+         return dst;
+      }
+
+      default:
+         break;
+      } /* switch (e->Iex.Triop.op) */
+   } /* if (e->tag == Iex_Trinop) */
+
+
+   if (e->tag == Iex_Const ) {
+      vassert(e->Iex.Const.con->tag == Ico_V128);
+      if (e->Iex.Const.con->Ico.V128 == 0x0000) {
+         return generate_zeroes_V128(env);
+      } 
+      else if (e->Iex.Const.con->Ico.V128 == 0xffff) {
+         return generate_ones_V128(env);
+      }
+   }
+
+   vex_printf("iselVecExpr(ppc) (subarch = %s): can't reduce\n",
+              LibVEX_ppVexHwCaps(mode64 ? VexArchPPC64 : VexArchPPC32,
+                                 env->hwcaps));
+   ppIRExpr(e);
+   vpanic("iselVecExpr_wrk(ppc)");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void iselStmt ( ISelEnv* env, IRStmt* stmt, IREndness IEndianess )
+{
+   Bool mode64 = env->mode64;
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n -- ");
+      ppIRStmt(stmt);
+      vex_printf("\n");
+   }
+
+   switch (stmt->tag) {
+
+   /* --------- STORE --------- */
+   case Ist_Store: {
+      IRType    tya   = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
+      IRType    tyd   = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+      IREndness end   = stmt->Ist.Store.end;
+
+      if (end != IEndianess)
+         goto stmt_fail;
+      if (!mode64 && (tya != Ity_I32))
+         goto stmt_fail;
+      if (mode64 && (tya != Ity_I64))
+         goto stmt_fail;
+
+      if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
+          (mode64 && (tyd == Ity_I64))) {
+         PPCAMode* am_addr
+            = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/,
+                                 IEndianess);
+         HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data, IEndianess);
+         addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(tyd)), 
+                                       am_addr, r_src, mode64 ));
+         return;
+      }
+      if (tyd == Ity_F64) {
+         PPCAMode* am_addr
+            = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/,
+                                 IEndianess);
+         HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data, IEndianess);
+         addInstr(env,
+                  PPCInstr_FpLdSt(False/*store*/, 8, fr_src, am_addr));
+         return;
+      }
+      if (tyd == Ity_F32) {
+         PPCAMode* am_addr
+            = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/,
+                                 IEndianess);
+         HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data, IEndianess);
+         addInstr(env,
+                  PPCInstr_FpLdSt(False/*store*/, 4, fr_src, am_addr));
+         return;
+      }
+      if (tyd == Ity_D64) {
+         PPCAMode* am_addr
+            = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/,
+                                 IEndianess);
+         HReg fr_src = iselDfp64Expr(env, stmt->Ist.Store.data, IEndianess);
+         addInstr(env,
+                  PPCInstr_FpLdSt(False/*store*/, 8, fr_src, am_addr));
+         return;
+      }
+      if (tyd == Ity_D32) {
+         PPCAMode* am_addr
+            = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/,
+                                 IEndianess);
+         HReg fr_src = iselDfp32Expr(env, stmt->Ist.Store.data, IEndianess);
+         addInstr(env,
+                  PPCInstr_FpLdSt(False/*store*/, 4, fr_src, am_addr));
+         return;
+      }
+      if (tyd == Ity_V128) {
+         PPCAMode* am_addr
+            = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/,
+                                 IEndianess);
+         HReg v_src = iselVecExpr(env, stmt->Ist.Store.data, IEndianess);
+         addInstr(env,
+                  PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr));
+         return;
+      }
+      if (tyd == Ity_I64 && !mode64) {
+         /* Just calculate the address in the register.  Life is too
+            short to arse around trying and possibly failing to adjust
+            the offset in a 'reg+offset' style amode. */
+         HReg rHi32, rLo32;
+         HReg r_addr = iselWordExpr_R(env, stmt->Ist.Store.addr, IEndianess);
+         iselInt64Expr( &rHi32, &rLo32, env, stmt->Ist.Store.data,
+                        IEndianess );
+         addInstr(env, PPCInstr_Store( 4/*byte-store*/,
+                                       PPCAMode_IR( 0, r_addr ), 
+                                       rHi32,
+                                       False/*32-bit insn please*/) );
+         addInstr(env, PPCInstr_Store( 4/*byte-store*/, 
+                                       PPCAMode_IR( 4, r_addr ), 
+                                       rLo32,
+                                       False/*32-bit insn please*/) );
+         return;
+      }
+      break;
+   }
+
+   /* --------- PUT --------- */
+   case Ist_Put: {
+      IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+      if (ty == Ity_I8  || ty == Ity_I16 ||
+          ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
+         HReg r_src = iselWordExpr_R(env, stmt->Ist.Put.data, IEndianess);
+         PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+                                          GuestStatePtr(mode64) );
+         addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(ty)), 
+                                       am_addr, r_src, mode64 ));
+         return;
+      }
+      if (!mode64 && ty == Ity_I64) {
+         HReg rHi, rLo;
+         PPCAMode* am_addr  = PPCAMode_IR( stmt->Ist.Put.offset,
+                                           GuestStatePtr(mode64) );
+         PPCAMode* am_addr4 = advance4(env, am_addr);
+         iselInt64Expr(&rHi,&rLo, env, stmt->Ist.Put.data, IEndianess);
+         addInstr(env, PPCInstr_Store( 4, am_addr,  rHi, mode64 ));
+         addInstr(env, PPCInstr_Store( 4, am_addr4, rLo, mode64 ));
+         return;
+     }
+     if (ty == Ity_V128) {
+         /* Guest state vectors are 16byte aligned,
+            so don't need to worry here */
+         HReg v_src = iselVecExpr(env, stmt->Ist.Put.data, IEndianess);
+         PPCAMode* am_addr  = PPCAMode_IR( stmt->Ist.Put.offset,
+                                           GuestStatePtr(mode64) );
+         addInstr(env,
+                  PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr));
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg fr_src = iselDblExpr(env, stmt->Ist.Put.data, IEndianess);
+         PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+                                          GuestStatePtr(mode64) );
+         addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8,
+                                        fr_src, am_addr ));
+         return;
+      }
+      if (ty == Ity_D32) {
+         /* The 32-bit value is stored in a 64-bit register */
+         HReg fr_src = iselDfp32Expr( env, stmt->Ist.Put.data, IEndianess );
+         PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+                                          GuestStatePtr(mode64) );
+         addInstr( env, PPCInstr_FpLdSt( False/*store*/, 8,
+                                         fr_src, am_addr ) );
+         return;
+      }
+      if (ty == Ity_D64) {
+         HReg fr_src = iselDfp64Expr( env, stmt->Ist.Put.data, IEndianess );
+         PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset,
+                                          GuestStatePtr(mode64) );
+         addInstr( env, PPCInstr_FpLdSt( False/*store*/, 8, fr_src, am_addr ) );
+         return;
+      }
+      break;
+   }
+      
+   /* --------- Indexed PUT --------- */
+   case Ist_PutI: {
+      IRPutI *puti = stmt->Ist.PutI.details;
+
+      PPCAMode* dst_am
+         = genGuestArrayOffset(
+              env, puti->descr, 
+              puti->ix, puti->bias,
+              IEndianess );
+      IRType ty = typeOfIRExpr(env->type_env, puti->data);
+      if (mode64 && ty == Ity_I64) {
+         HReg r_src = iselWordExpr_R(env, puti->data, IEndianess);
+         addInstr(env, PPCInstr_Store( toUChar(8),
+                                       dst_am, r_src, mode64 ));
+         return;
+      }
+      if ((!mode64) && ty == Ity_I32) {
+         HReg r_src = iselWordExpr_R(env, puti->data, IEndianess);
+         addInstr(env, PPCInstr_Store( toUChar(4),
+                                       dst_am, r_src, mode64 ));
+         return;
+      }
+      break;
+   }
+
+   /* --------- TMP --------- */
+   case Ist_WrTmp: {
+      IRTemp tmp = stmt->Ist.WrTmp.tmp;
+      IRType ty = typeOfIRTemp(env->type_env, tmp);
+      if (ty == Ity_I8  || ty == Ity_I16 ||
+          ty == Ity_I32 || ((ty == Ity_I64) && mode64)) {
+         HReg r_dst = lookupIRTemp(env, tmp);
+         HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data, IEndianess);
+         addInstr(env, mk_iMOVds_RR( r_dst, r_src ));
+         return;
+      }
+      if (!mode64 && ty == Ity_I64) {
+         HReg r_srcHi, r_srcLo, r_dstHi, r_dstLo;
+
+         iselInt64Expr(&r_srcHi,&r_srcLo, env, stmt->Ist.WrTmp.data,
+                       IEndianess);
+         lookupIRTempPair( &r_dstHi, &r_dstLo, env, tmp);
+         addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) );
+         addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) );
+         return;
+      }
+      if (mode64 && ty == Ity_I128) {
+         HReg r_srcHi, r_srcLo, r_dstHi, r_dstLo;
+         iselInt128Expr(&r_srcHi,&r_srcLo, env, stmt->Ist.WrTmp.data,
+                        IEndianess);
+         lookupIRTempPair( &r_dstHi, &r_dstLo, env, tmp);
+         addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) );
+         addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) );
+         return;
+      }
+      if (!mode64 && ty == Ity_I128) {
+         HReg r_srcHi, r_srcMedHi, r_srcMedLo, r_srcLo;
+         HReg r_dstHi, r_dstMedHi, r_dstMedLo, r_dstLo;
+
+         iselInt128Expr_to_32x4(&r_srcHi, &r_srcMedHi,
+                                &r_srcMedLo, &r_srcLo,
+                                env, stmt->Ist.WrTmp.data, IEndianess);
+
+         lookupIRTempQuad( &r_dstHi, &r_dstMedHi, &r_dstMedLo,
+                           &r_dstLo, env, tmp);
+
+         addInstr(env, mk_iMOVds_RR(r_dstHi,    r_srcHi) );
+         addInstr(env, mk_iMOVds_RR(r_dstMedHi, r_srcMedHi) );
+         addInstr(env, mk_iMOVds_RR(r_dstMedLo, r_srcMedLo) );
+         addInstr(env, mk_iMOVds_RR(r_dstLo,    r_srcLo) );
+         return;
+      }
+      if (ty == Ity_I1) {
+         PPCCondCode cond = iselCondCode(env, stmt->Ist.WrTmp.data,
+                                         IEndianess);
+         HReg r_dst = lookupIRTemp(env, tmp);
+         addInstr(env, PPCInstr_Set(cond, r_dst));
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg fr_dst = lookupIRTemp(env, tmp);
+         HReg fr_src = iselDblExpr(env, stmt->Ist.WrTmp.data, IEndianess);
+         addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src));
+         return;
+      }
+      if (ty == Ity_F32) {
+         HReg fr_dst = lookupIRTemp(env, tmp);
+         HReg fr_src = iselFltExpr(env, stmt->Ist.WrTmp.data, IEndianess);
+         addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src));
+         return;
+      }
+      if (ty == Ity_D32) {
+         HReg fr_dst = lookupIRTemp(env, tmp);
+         HReg fr_src = iselDfp32Expr(env, stmt->Ist.WrTmp.data, IEndianess);
+         addInstr(env, PPCInstr_Dfp64Unary(Pfp_MOV, fr_dst, fr_src));
+         return;
+      }
+      if (ty == Ity_V128) {
+         HReg v_dst = lookupIRTemp(env, tmp);
+         HReg v_src = iselVecExpr(env, stmt->Ist.WrTmp.data, IEndianess);
+         addInstr(env, PPCInstr_AvUnary(Pav_MOV, v_dst, v_src));
+         return;
+      }
+      if (ty == Ity_D64) {
+         HReg fr_dst = lookupIRTemp( env, tmp );
+         HReg fr_src = iselDfp64Expr( env, stmt->Ist.WrTmp.data, IEndianess );
+         addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dst, fr_src ) );
+         return;
+      }
+      if (ty == Ity_D128) {
+         HReg fr_srcHi, fr_srcLo, fr_dstHi, fr_dstLo;
+	 //         lookupDfp128IRTempPair( &fr_dstHi, &fr_dstLo, env, tmp );
+         lookupIRTempPair( &fr_dstHi, &fr_dstLo, env, tmp );
+         iselDfp128Expr( &fr_srcHi, &fr_srcLo, env, stmt->Ist.WrTmp.data,
+                         IEndianess );
+         addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dstHi, fr_srcHi ) );
+         addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dstLo, fr_srcLo ) );
+         return;
+      }
+      break;
+   }
+
+   /* --------- Load Linked or Store Conditional --------- */
+   case Ist_LLSC: {
+      IRTemp res    = stmt->Ist.LLSC.result;
+      IRType tyRes  = typeOfIRTemp(env->type_env, res);
+      IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr);
+
+      if (stmt->Ist.LLSC.end != IEndianess)
+         goto stmt_fail;
+      if (!mode64 && (tyAddr != Ity_I32))
+         goto stmt_fail;
+      if (mode64 && (tyAddr != Ity_I64))
+         goto stmt_fail;
+
+      if (stmt->Ist.LLSC.storedata == NULL) {
+         /* LL */
+         HReg r_addr = iselWordExpr_R( env, stmt->Ist.LLSC.addr, IEndianess );
+         HReg r_dst  = lookupIRTemp(env, res);
+         if (tyRes == Ity_I8) {
+            addInstr(env, PPCInstr_LoadL( 1, r_dst, r_addr, mode64 ));
+            return;
+         }
+         if (tyRes == Ity_I16) {
+            addInstr(env, PPCInstr_LoadL( 2, r_dst, r_addr, mode64 ));
+            return;
+         }
+         if (tyRes == Ity_I32) {
+            addInstr(env, PPCInstr_LoadL( 4, r_dst, r_addr, mode64 ));
+            return;
+         }
+         if (tyRes == Ity_I64 && mode64) {
+            addInstr(env, PPCInstr_LoadL( 8, r_dst, r_addr, mode64 ));
+            return;
+         }
+         /* fallthru */;
+      } else {
+         /* SC */
+         HReg   r_res  = lookupIRTemp(env, res); /* :: Ity_I1 */
+         HReg   r_a    = iselWordExpr_R(env, stmt->Ist.LLSC.addr, IEndianess);
+         HReg   r_src  = iselWordExpr_R(env, stmt->Ist.LLSC.storedata,
+                                        IEndianess);
+         HReg   r_tmp  = newVRegI(env);
+         IRType tyData = typeOfIRExpr(env->type_env,
+                                      stmt->Ist.LLSC.storedata);
+         vassert(tyRes == Ity_I1);
+         if (tyData == Ity_I8 || tyData == Ity_I16 || tyData == Ity_I32 ||
+            (tyData == Ity_I64 && mode64)) {
+            int size = 0;
+
+            if (tyData == Ity_I64)
+               size = 8;
+            else if (tyData == Ity_I32)
+               size = 4;
+            else if (tyData == Ity_I16)
+               size = 2;
+            else if (tyData == Ity_I8)
+               size = 1;
+
+            addInstr(env, PPCInstr_StoreC( size,
+                                           r_a, r_src, mode64 ));
+            addInstr(env, PPCInstr_MfCR( r_tmp ));
+            addInstr(env, PPCInstr_Shft(
+                             Pshft_SHR,
+                             env->mode64 ? False : True
+                                /*F:64-bit, T:32-bit shift*/,
+                             r_tmp, r_tmp, 
+                             PPCRH_Imm(False/*unsigned*/, 29)));
+            /* Probably unnecessary, since the IR dest type is Ity_I1,
+               and so we are entitled to leave whatever junk we like
+               drifting round in the upper 31 or 63 bits of r_res.
+               However, for the sake of conservativeness .. */
+            addInstr(env, PPCInstr_Alu(
+                             Palu_AND, 
+                             r_res, r_tmp, 
+                             PPCRH_Imm(False/*signed*/, 1)));
+            return;
+         }
+         /* fallthru */
+      }
+      goto stmt_fail;
+      /*NOTREACHED*/
+   }
+
+   /* --------- Call to DIRTY helper --------- */
+   case Ist_Dirty: {
+      IRDirty* d = stmt->Ist.Dirty.details;
+
+      /* Figure out the return type, if any. */
+      IRType retty = Ity_INVALID;
+      if (d->tmp != IRTemp_INVALID)
+         retty = typeOfIRTemp(env->type_env, d->tmp);
+
+      /* Throw out any return types we don't know about.  The set of
+         acceptable return types is the same in both 32- and 64-bit
+         mode, so we don't need to inspect mode64 to make a
+         decision. */
+      Bool retty_ok = False;
+      switch (retty) {
+         case Ity_INVALID: /* function doesn't return anything */
+         case Ity_V128:
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+            retty_ok = True; break;
+         default:
+            break;
+      }
+      if (!retty_ok)
+         break; /* will go to stmt_fail: */
+
+      /* Marshal args, do the call, clear stack, set the return value
+         to 0x555..555 if this is a conditional call that returns a
+         value and the call is skipped. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, d->guard, d->cee, retty, d->args,
+                    IEndianess );
+      vassert(is_sane_RetLoc(rloc));
+
+      /* Now figure out what to do with the returned value, if any. */
+      switch (retty) {
+         case Ity_INVALID: {
+            /* No return value.  Nothing to do. */
+            vassert(d->tmp == IRTemp_INVALID);
+            vassert(rloc.pri == RLPri_None);
+            vassert(addToSp == 0);
+            return;
+         }
+         case Ity_I32: case Ity_I16: case Ity_I8: {
+            /* The returned value is in %r3.  Park it in the register
+               associated with tmp. */
+            HReg r_dst = lookupIRTemp(env, d->tmp);
+            addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
+            vassert(rloc.pri == RLPri_Int);
+            vassert(addToSp == 0);
+            return;
+         }
+         case Ity_I64:
+            if (mode64) {
+               /* The returned value is in %r3.  Park it in the register
+                  associated with tmp. */
+               HReg r_dst = lookupIRTemp(env, d->tmp);
+               addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64)));
+               vassert(rloc.pri == RLPri_Int);
+               vassert(addToSp == 0);
+            } else {
+               /* The returned value is in %r3:%r4.  Park it in the
+                  register-pair associated with tmp. */
+               HReg r_dstHi = INVALID_HREG;
+               HReg r_dstLo = INVALID_HREG;
+               lookupIRTempPair( &r_dstHi, &r_dstLo, env, d->tmp);
+               addInstr(env, mk_iMOVds_RR(r_dstHi, hregPPC_GPR3(mode64)));
+               addInstr(env, mk_iMOVds_RR(r_dstLo, hregPPC_GPR4(mode64)));
+               vassert(rloc.pri == RLPri_2Int);
+               vassert(addToSp == 0);
+            }
+            return;
+         case Ity_V128: {
+            /* The returned value is on the stack, and *retloc tells
+               us where.  Fish it off the stack and then move the
+               stack pointer upwards to clear it, as directed by
+               doHelperCall. */
+            vassert(rloc.pri == RLPri_V128SpRel);
+            vassert(addToSp >= 16);
+            HReg      dst = lookupIRTemp(env, d->tmp);
+            PPCAMode* am  = PPCAMode_IR(rloc.spOff, StackFramePtr(mode64));
+            addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, dst, am ));
+            add_to_sp(env, addToSp);
+            return;
+         }
+         default:
+            /*NOTREACHED*/
+            vassert(0);
+      }
+   }
+
+   /* --------- MEM FENCE --------- */
+   case Ist_MBE:
+      switch (stmt->Ist.MBE.event) {
+         case Imbe_Fence:
+            addInstr(env, PPCInstr_MFence());
+            return;
+         default:
+            break;
+      }
+      break;
+
+   /* --------- INSTR MARK --------- */
+   /* Doesn't generate any executable code ... */
+   case Ist_IMark:
+       return;
+
+   /* --------- ABI HINT --------- */
+   /* These have no meaning (denotation in the IR) and so we ignore
+      them ... if any actually made it this far. */
+   case Ist_AbiHint:
+       return;
+
+   /* --------- NO-OP --------- */
+   /* Fairly self-explanatory, wouldn't you say? */
+   case Ist_NoOp:
+       return;
+
+   /* --------- EXIT --------- */
+   case Ist_Exit: {
+      IRConst* dst = stmt->Ist.Exit.dst;
+      if (!mode64 && dst->tag != Ico_U32)
+         vpanic("iselStmt(ppc): Ist_Exit: dst is not a 32-bit value");
+      if (mode64 && dst->tag != Ico_U64)
+         vpanic("iselStmt(ppc64): Ist_Exit: dst is not a 64-bit value");
+
+      PPCCondCode cc    = iselCondCode(env, stmt->Ist.Exit.guard, IEndianess);
+      PPCAMode*   amCIA = PPCAMode_IR(stmt->Ist.Exit.offsIP,
+                                      hregPPC_GPR31(mode64));
+
+      /* Case: boring transfer to known address */
+      if (stmt->Ist.Exit.jk == Ijk_Boring
+          || stmt->Ist.Exit.jk == Ijk_Call
+          /* || stmt->Ist.Exit.jk == Ijk_Ret */) {
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = mode64
+               ? (((Addr64)stmt->Ist.Exit.dst->Ico.U64) > (Addr64)env->max_ga)
+               : (((Addr32)stmt->Ist.Exit.dst->Ico.U32) > (Addr32)env->max_ga);
+            if (0) vex_printf("%s", toFastEP ? "Y" : ",");
+            addInstr(env, PPCInstr_XDirect(
+                             mode64 ? (Addr64)stmt->Ist.Exit.dst->Ico.U64
+                                    : (Addr64)stmt->Ist.Exit.dst->Ico.U32,
+                             amCIA, cc, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst),
+                                    IEndianess);
+            addInstr(env, PPCInstr_XAssisted(r, amCIA, cc, Ijk_Boring));
+         }
+         return;
+      }
+
+      /* Case: assisted transfer to arbitrary address */
+      switch (stmt->Ist.Exit.jk) {
+         /* Keep this list in sync with that in iselNext below */
+         case Ijk_ClientReq:
+         case Ijk_EmFail:
+         case Ijk_EmWarn:
+         case Ijk_NoDecode:
+         case Ijk_NoRedir:
+         case Ijk_SigBUS:
+         case Ijk_SigTRAP:
+         case Ijk_Sys_syscall:
+         case Ijk_InvalICache:
+         {
+            HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst),
+                                    IEndianess);
+            addInstr(env, PPCInstr_XAssisted(r, amCIA, cc,
+                                             stmt->Ist.Exit.jk));
+            return;
+         }
+         default:
+            break;
+      }
+
+      /* Do we ever expect to see any other kind? */
+      goto stmt_fail;
+   }
+
+   default: break;
+   }
+  stmt_fail:
+   ppIRStmt(stmt);
+   vpanic("iselStmt(ppc)");
+}
+ 
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void iselNext ( ISelEnv* env,
+                       IRExpr* next, IRJumpKind jk, Int offsIP,
+                       IREndness IEndianess)
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf( "\n-- PUT(%d) = ", offsIP);
+      ppIRExpr( next );
+      vex_printf( "; exit-");
+      ppIRJumpKind(jk);
+      vex_printf( "\n");
+   }
+
+   PPCCondCode always = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE );
+
+   /* Case: boring transfer to known address */
+   if (next->tag == Iex_Const) {
+      IRConst* cdst = next->Iex.Const.con;
+      vassert(cdst->tag == (env->mode64 ? Ico_U64 :Ico_U32));
+      if (jk == Ijk_Boring || jk == Ijk_Call) {
+         /* Boring transfer to known address */
+         PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64));
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = env->mode64
+               ? (((Addr64)cdst->Ico.U64) > (Addr64)env->max_ga)
+               : (((Addr32)cdst->Ico.U32) > (Addr32)env->max_ga);
+            if (0) vex_printf("%s", toFastEP ? "X" : ".");
+            addInstr(env, PPCInstr_XDirect(
+                             env->mode64 ? (Addr64)cdst->Ico.U64
+                                         : (Addr64)cdst->Ico.U32,
+                             amCIA, always, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselWordExpr_R(env, next, IEndianess);
+            addInstr(env, PPCInstr_XAssisted(r, amCIA, always,
+                                             Ijk_Boring));
+         }
+         return;
+      }
+   }
+
+   /* Case: call/return (==boring) transfer to any address */
+   switch (jk) {
+      case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
+         HReg       r     = iselWordExpr_R(env, next, IEndianess);
+         PPCAMode*  amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64));
+         if (env->chainingAllowed) {
+            addInstr(env, PPCInstr_XIndir(r, amCIA, always));
+         } else {
+            addInstr(env, PPCInstr_XAssisted(r, amCIA, always,
+                                             Ijk_Boring));
+         }
+         return;
+      }
+      default:
+         break;
+   }
+
+   /* Case: assisted transfer to arbitrary address */
+   switch (jk) {
+      /* Keep this list in sync with that for Ist_Exit above */
+      case Ijk_ClientReq:
+      case Ijk_EmFail:
+      case Ijk_EmWarn:
+      case Ijk_NoDecode:
+      case Ijk_NoRedir:
+      case Ijk_SigBUS:
+      case Ijk_SigTRAP:
+      case Ijk_Sys_syscall:
+      case Ijk_InvalICache:
+      {
+         HReg      r     = iselWordExpr_R(env, next, IEndianess);
+         PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64));
+         addInstr(env, PPCInstr_XAssisted(r, amCIA, always, jk));
+         return;
+      }
+      default:
+         break;
+   }
+
+   vex_printf( "\n-- PUT(%d) = ", offsIP);
+   ppIRExpr( next );
+   vex_printf( "; exit-");
+   ppIRJumpKind(jk);
+   vex_printf( "\n");
+   vassert(0); // are we expecting any other kind?
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire SB to ppc code. */
+HInstrArray* iselSB_PPC ( const IRSB* bb,
+                          VexArch      arch_host,
+                          const VexArchInfo* archinfo_host,
+                          const VexAbiInfo*  vbi,
+                          Int offs_Host_EvC_Counter,
+                          Int offs_Host_EvC_FailAddr,
+                          Bool chainingAllowed,
+                          Bool addProfInc,
+                          Addr max_ga)
+
+{
+   Int       i, j;
+   HReg      hregLo, hregMedLo, hregMedHi, hregHi;
+   ISelEnv*  env;
+   UInt      hwcaps_host = archinfo_host->hwcaps;
+   Bool      mode64 = False;
+   UInt      mask32, mask64;
+   PPCAMode *amCounter, *amFailAddr;
+   IREndness IEndianess;
+
+   vassert(arch_host == VexArchPPC32 || arch_host == VexArchPPC64);
+   mode64 = arch_host == VexArchPPC64;
+
+   /* do some sanity checks */
+   mask32 = VEX_HWCAPS_PPC32_F | VEX_HWCAPS_PPC32_V
+            | VEX_HWCAPS_PPC32_FX | VEX_HWCAPS_PPC32_GX | VEX_HWCAPS_PPC32_VX
+            | VEX_HWCAPS_PPC32_DFP | VEX_HWCAPS_PPC32_ISA2_07;
+
+
+   mask64 = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX
+            | VEX_HWCAPS_PPC64_GX | VEX_HWCAPS_PPC64_VX | VEX_HWCAPS_PPC64_DFP
+            | VEX_HWCAPS_PPC64_ISA2_07;
+
+   if (mode64) {
+      vassert((hwcaps_host & mask32) == 0);
+   } else {
+      vassert((hwcaps_host & mask64) == 0);
+   }
+
+   /* Check that the host's endianness is as expected. */
+   vassert((archinfo_host->endness == VexEndnessBE) ||
+	   (archinfo_host->endness == VexEndnessLE));
+
+   if (archinfo_host->endness == VexEndnessBE)
+     IEndianess = Iend_BE;
+   else
+     IEndianess = Iend_LE;
+
+   /* Make up an initial environment to use. */
+   env = LibVEX_Alloc_inline(sizeof(ISelEnv));
+   env->vreg_ctr = 0;
+
+   /* Are we being ppc32 or ppc64? */
+   env->mode64 = mode64;
+
+   /* Set up output code array. */
+   env->code = newHInstrArray();
+
+   /* Copy BB's type env. */
+   env->type_env = bb->tyenv;
+
+   /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+    * change as we go along. 
+    *
+    * vregmap2 and vregmap3 are only used in 32 bit mode 
+    * for supporting I128 in 32-bit mode
+    */
+   env->n_vregmap = bb->tyenv->types_used;
+   env->vregmapLo    = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   env->vregmapMedLo = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   if (mode64) {
+      env->vregmapMedHi = NULL;
+      env->vregmapHi    = NULL;
+   } else {
+      env->vregmapMedHi = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+      env->vregmapHi    = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   }
+
+   /* and finally ... */
+   env->chainingAllowed = chainingAllowed;
+   env->max_ga          = max_ga;
+   env->hwcaps          = hwcaps_host;
+   env->previous_rm     = NULL;
+   env->vbi             = vbi;
+
+   /* For each IR temporary, allocate a suitably-kinded virtual
+      register. */
+   j = 0;
+   for (i = 0; i < env->n_vregmap; i++) {
+      hregLo = hregMedLo = hregMedHi = hregHi = INVALID_HREG;
+      switch (bb->tyenv->types[i]) {
+      case Ity_I1:
+      case Ity_I8:
+      case Ity_I16:
+      case Ity_I32:
+         if (mode64) {
+            hregLo = mkHReg(True, HRcInt64, 0, j++);
+         } else {
+            hregLo = mkHReg(True, HRcInt32, 0, j++);
+         }
+         break;
+      case Ity_I64:  
+         if (mode64) {
+            hregLo    = mkHReg(True, HRcInt64, 0, j++);
+         } else {
+            hregLo    = mkHReg(True, HRcInt32, 0, j++);
+            hregMedLo = mkHReg(True, HRcInt32, 0, j++);
+         }
+         break;
+      case Ity_I128:
+         if (mode64) {
+            hregLo    = mkHReg(True, HRcInt64, 0, j++);
+            hregMedLo = mkHReg(True, HRcInt64, 0, j++);
+         } else {
+            hregLo    = mkHReg(True, HRcInt32, 0, j++);
+            hregMedLo = mkHReg(True, HRcInt32, 0, j++);
+            hregMedHi = mkHReg(True, HRcInt32, 0, j++);
+            hregHi    = mkHReg(True, HRcInt32, 0, j++);
+         }
+         break;
+      case Ity_F32:
+      case Ity_F64:
+         hregLo = mkHReg(True, HRcFlt64, 0, j++);
+         break;
+      case Ity_V128:
+         hregLo = mkHReg(True, HRcVec128, 0, j++);
+         break;
+      case Ity_D32:
+      case Ity_D64:
+         hregLo = mkHReg(True, HRcFlt64, 0, j++);
+         break;
+      case Ity_D128:
+         hregLo    = mkHReg(True, HRcFlt64, 0, j++);
+         hregMedLo = mkHReg(True, HRcFlt64, 0, j++);
+         break;
+      default:
+         ppIRType(bb->tyenv->types[i]);
+         vpanic("iselBB(ppc): IRTemp type");
+      }
+      env->vregmapLo[i]    = hregLo;
+      env->vregmapMedLo[i] = hregMedLo;
+      if (!mode64) {
+         env->vregmapMedHi[i] = hregMedHi;
+         env->vregmapHi[i]    = hregHi;
+      }
+   }
+   env->vreg_ctr = j;
+
+   /* The very first instruction must be an event check. */
+   amCounter  = PPCAMode_IR(offs_Host_EvC_Counter, hregPPC_GPR31(mode64));
+   amFailAddr = PPCAMode_IR(offs_Host_EvC_FailAddr, hregPPC_GPR31(mode64));
+   addInstr(env, PPCInstr_EvCheck(amCounter, amFailAddr));
+
+   /* Possibly a block counter increment (for profiling).  At this
+      point we don't know the address of the counter, so just pretend
+      it is zero.  It will have to be patched later, but before this
+      translation is used, by a call to LibVEX_patchProfCtr. */
+   if (addProfInc) {
+      addInstr(env, PPCInstr_ProfInc());
+   }
+
+   /* Ok, finally we can iterate over the statements. */
+   for (i = 0; i < bb->stmts_used; i++)
+      iselStmt(env, bb->stmts[i], IEndianess);
+
+   iselNext(env, bb->next, bb->jumpkind, bb->offsIP, IEndianess);
+
+   /* record the number of vregs we used. */
+   env->code->n_vregs = env->vreg_ctr;
+   return env->code;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_ppc_isel.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_s390_defs.c b/VEX/priv/host_s390_defs.c
new file mode 100644
index 0000000..974ec6d
--- /dev/null
+++ b/VEX/priv/host_s390_defs.c
@@ -0,0 +1,10155 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  host_s390_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+   Copyright (C) 2012-2013  Florian Krohm   (britzel@acm.org)
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+#include "libvex_s390x_common.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_s390_defs.h"
+#include "s390_disasm.h"
+#include "guest_s390_defs.h"    /* S390X_GUEST_OFFSET */
+#include <stdarg.h>
+
+/*------------------------------------------------------------*/
+/*--- Forward declarations                                 ---*/
+/*------------------------------------------------------------*/
+
+static Bool s390_insn_is_reg_reg_move(const s390_insn *, HReg *src, HReg *dst);
+static void s390_insn_map_regs(HRegRemap *, s390_insn *);
+static void s390_insn_get_reg_usage(HRegUsage *u, const s390_insn *);
+static UInt s390_tchain_load64_len(void);
+
+
+/*------------------------------------------------------------*/
+/*--- Registers                                            ---*/
+/*------------------------------------------------------------*/
+
+/* A mapping from register number to register index */
+static Int gpr_index[16];  // GPR regno -> register index
+static Int fpr_index[16];  // FPR regno -> register index
+
+HReg
+s390_hreg_gpr(UInt regno)
+{
+   Int ix = gpr_index[regno];
+   vassert(ix >= 0);
+   return mkHReg(/*virtual*/False, HRcInt64, regno, ix);
+}
+
+HReg
+s390_hreg_fpr(UInt regno)
+{
+   Int ix = fpr_index[regno];
+   vassert(ix >= 0);
+   return mkHReg(/*virtual*/False, HRcFlt64, regno, ix);
+}
+
+static __inline__ UInt
+hregNumber(HReg reg)
+{
+   return hregEncoding(reg);
+}
+
+/* Decompile the given register into a static buffer and return it */
+const HChar *
+s390_hreg_as_string(HReg reg)
+{
+   static HChar buf[10];
+
+   static const HChar ireg_names[16][5] = {
+      "%r0",  "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
+      "%r8",  "%r9",  "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
+   };
+
+   static const HChar freg_names[16][5] = {
+      "%f0",  "%f1",  "%f2",  "%f3",  "%f4",  "%f5",  "%f6",  "%f7",
+      "%f8",  "%f9",  "%f10", "%f11", "%f12", "%f13", "%f14", "%f15"
+   };
+
+   UInt r;  /* hregNumber() returns an UInt */
+
+   r = hregNumber(reg);
+
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      buf[0] = '\0';
+      switch (hregClass(reg)) {
+      case HRcInt64: vex_sprintf(buf, "%%vR%d", r); break;
+      case HRcFlt64: vex_sprintf(buf, "%%vF%d", r); break;
+      default:       goto fail;
+      }
+      return buf;
+   }
+
+   /* But specific for real regs. */
+   vassert(r < 16);
+
+   switch (hregClass(reg)) {
+   case HRcInt64: return ireg_names[r];
+   case HRcFlt64: return freg_names[r];
+   default:       goto fail;
+   }
+
+ fail: vpanic("s390_hreg_as_string");
+}
+
+
+/* Return the real register that holds the guest state pointer */
+HReg
+s390_hreg_guest_state_pointer(void)
+{
+   return s390_hreg_gpr(S390_REGNO_GUEST_STATE_POINTER);
+}
+
+
+/* Is VALUE within the domain of a 20-bit signed integer. */
+static __inline__ Bool
+fits_signed_20bit(Int value)
+{
+   UInt uval = value;
+   return ((Int)(uval << 12) >> 12) == value;
+}
+
+
+/* Is VALUE within the domain of a 12-bit unsigned integer. */
+static __inline__ Bool
+fits_unsigned_12bit(Int value)
+{
+   return (value & 0xFFF) == value;
+}
+
+/*------------------------------------------------------------*/
+/*--- Addressing modes (amodes)                            ---*/
+/*------------------------------------------------------------*/
+
+/* Construct a b12 amode. */
+s390_amode *
+s390_amode_b12(Int d, HReg b)
+{
+   s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
+
+   vassert(fits_unsigned_12bit(d));
+
+   am->tag = S390_AMODE_B12;
+   am->d = d;
+   am->b = b;
+   am->x = s390_hreg_gpr(0);  /* hregNumber(am->x) == 0 */
+
+   return am;
+}
+
+
+/* Construct a b20 amode. */
+s390_amode *
+s390_amode_b20(Int d, HReg b)
+{
+   s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
+
+   vassert(fits_signed_20bit(d));
+
+   am->tag = S390_AMODE_B20;
+   am->d = d;
+   am->b = b;
+   am->x = s390_hreg_gpr(0);  /* hregNumber(am->x) == 0 */
+
+   return am;
+}
+
+
+/* Construct a bx12 amode. */
+s390_amode *
+s390_amode_bx12(Int d, HReg b, HReg x)
+{
+   s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
+
+   vassert(fits_unsigned_12bit(d));
+   vassert(hregNumber(b) != 0);
+   vassert(hregNumber(x) != 0);
+
+   am->tag = S390_AMODE_BX12;
+   am->d = d;
+   am->b = b;
+   am->x = x;
+
+   return am;
+}
+
+
+/* Construct a bx20 amode. */
+s390_amode *
+s390_amode_bx20(Int d, HReg b, HReg x)
+{
+   s390_amode *am = LibVEX_Alloc_inline(sizeof(s390_amode));
+
+   vassert(fits_signed_20bit(d));
+   vassert(hregNumber(b) != 0);
+   vassert(hregNumber(x) != 0);
+
+   am->tag = S390_AMODE_BX20;
+   am->d = d;
+   am->b = b;
+   am->x = x;
+
+   return am;
+}
+
+
+/* Construct an AMODE for accessing the guest state at OFFSET. 
+   OFFSET can be at most 3 * sizeof(VexGuestS390XState) + LibVEX_N_SPILL_BYTES
+   which may be too large for a B12 addressing mode. 
+   Use a B20 amode as a fallback which will be safe for any offset.
+*/
+s390_amode *
+s390_amode_for_guest_state(Int offset)
+{
+   if (fits_unsigned_12bit(offset))
+      return s390_amode_b12(offset, s390_hreg_guest_state_pointer());
+
+   if (fits_signed_20bit(offset))
+      return s390_amode_b20(offset, s390_hreg_guest_state_pointer());
+
+   vpanic("invalid guest state offset");
+}
+
+
+/* Decompile the given amode into a static buffer and return it. */
+const HChar *
+s390_amode_as_string(const s390_amode *am)
+{
+   static HChar buf[30];
+   HChar *p;
+
+   buf[0] = '\0';
+   p = buf;
+
+   switch (am->tag) {
+   case S390_AMODE_B12:
+   case S390_AMODE_B20:
+      vex_sprintf(p, "%d(%s)", am->d, s390_hreg_as_string(am->b));
+      break;
+
+   case S390_AMODE_BX12:
+   case S390_AMODE_BX20:
+      /* s390_hreg_as_string returns pointer to local buffer. Need to
+         split this into two printfs */
+      p += vex_sprintf(p, "%d(%s,", am->d, s390_hreg_as_string(am->x));
+      vex_sprintf(p, "%s)", s390_hreg_as_string(am->b));
+      break;
+
+   default:
+      vpanic("s390_amode_as_string");
+   }
+
+   return buf;
+}
+
+
+/* Helper function for s390_amode_is_sane */
+static __inline__ Bool
+is_virtual_gpr(HReg reg)
+{
+   return hregIsVirtual(reg) && hregClass(reg) == HRcInt64;
+}
+
+
+/* Sanity check for an amode */
+Bool
+s390_amode_is_sane(const s390_amode *am)
+{
+   switch (am->tag) {
+   case S390_AMODE_B12:
+      return is_virtual_gpr(am->b) && fits_unsigned_12bit(am->d);
+
+   case S390_AMODE_B20:
+      return is_virtual_gpr(am->b) && fits_signed_20bit(am->d);
+
+   case S390_AMODE_BX12:
+      return is_virtual_gpr(am->b) && is_virtual_gpr(am->x) &&
+             fits_unsigned_12bit(am->d);
+
+   case S390_AMODE_BX20:
+      return is_virtual_gpr(am->b) && is_virtual_gpr(am->x) &&
+             fits_signed_20bit(am->d);
+
+   default:
+      vpanic("s390_amode_is_sane");
+   }
+}
+
+
+/* Record the register use of an amode */
+static void
+s390_amode_get_reg_usage(HRegUsage *u, const s390_amode *am)
+{
+   switch (am->tag) {
+   case S390_AMODE_B12:
+   case S390_AMODE_B20:
+      addHRegUse(u, HRmRead, am->b);
+      return;
+
+   case S390_AMODE_BX12:
+   case S390_AMODE_BX20:
+      addHRegUse(u, HRmRead, am->b);
+      addHRegUse(u, HRmRead, am->x);
+      return;
+
+   default:
+      vpanic("s390_amode_get_reg_usage");
+   }
+}
+
+
+static void
+s390_amode_map_regs(HRegRemap *m, s390_amode *am)
+{
+   switch (am->tag) {
+   case S390_AMODE_B12:
+   case S390_AMODE_B20:
+      am->b = lookupHRegRemap(m, am->b);
+      return;
+
+   case S390_AMODE_BX12:
+   case S390_AMODE_BX20:
+      am->b = lookupHRegRemap(m, am->b);
+      am->x = lookupHRegRemap(m, am->x);
+      return;
+
+   default:
+      vpanic("s390_amode_map_regs");
+   }
+}
+
+
+void
+ppS390AMode(const s390_amode *am)
+{
+   vex_printf("%s", s390_amode_as_string(am));
+}
+
+void
+ppS390Instr(const s390_insn *insn, Bool mode64)
+{
+   vex_printf("%s", s390_insn_as_string(insn));
+}
+
+void
+ppHRegS390(HReg reg)
+{
+   vex_printf("%s", s390_hreg_as_string(reg));
+}
+
+/*------------------------------------------------------------*/
+/*--- Helpers for register allocation                      ---*/
+/*------------------------------------------------------------*/
+
+/* Initialise and return the "register universe", i.e. a list of
+   all hardware registers. Called once. */
+const RRegUniverse *
+getRRegUniverse_S390(void)
+{
+   static RRegUniverse all_regs;
+   static Bool initialised = False;
+   RRegUniverse *ru = &all_regs;
+   
+   if (LIKELY(initialised))
+      return ru;
+
+   RRegUniverse__init(ru);
+
+   /* Assign invalid values to the gpr/fpr_index */
+   for (UInt i = 0; i < sizeof gpr_index / sizeof gpr_index[0]; ++i)
+      gpr_index[i] = -1;
+   for (UInt i = 0; i < sizeof fpr_index / sizeof fpr_index[0]; ++i)
+      fpr_index[i] = -1;
+
+   /* Add the registers that are available to the register allocator.
+      GPRs:  registers 1..11 are available
+      FPRs:  registers 0..15 are available
+             FPR12 - FPR15 are also used as register pairs for 128-bit
+             floating point operations
+   */
+   UInt regno;
+   for (regno = 1; regno <= 11; ++regno) {
+      gpr_index[regno] = ru->size;
+      ru->regs[ru->size++] = s390_hreg_gpr(regno);
+   }
+   for (regno = 0; regno <= 15; ++regno) {
+      fpr_index[regno] = ru->size;
+      ru->regs[ru->size++] = s390_hreg_fpr(regno);
+   }
+   ru->allocable = ru->size;
+
+   /* Add the registers that are not available for allocation.
+      r0  -- cannot be used as a base or index register
+      r12 -- scratch register for translation chaining support
+      r13 -- guest state pointer
+      r14 -- link register
+      r15 -- stack pointer
+   */
+   UInt other[] = { 0, 12, 13, 14, 15 };
+   for (UInt i = 0; i < sizeof other / sizeof other[0]; ++i) {
+      gpr_index[other[i]] = ru->size;
+      ru->regs[ru->size++] = s390_hreg_gpr(other[i]);
+   }
+
+   /* Sanity checking */
+   for (UInt i = 0; i < sizeof gpr_index / sizeof gpr_index[0]; ++i)
+      vassert(gpr_index[i] >= 0);
+   for (UInt i = 0; i < sizeof fpr_index / sizeof fpr_index[0]; ++i)
+      vassert(fpr_index[i] >= 0);
+                 
+   initialised = True;
+   return ru;
+}
+
+/* Tell the register allocator how the given instruction uses the registers
+   it refers to. */
+void
+getRegUsage_S390Instr(HRegUsage *u, const s390_insn *insn, Bool mode64)
+{
+   s390_insn_get_reg_usage(u, insn);
+}
+
+
+/* Map the registers of the given instruction */
+void
+mapRegs_S390Instr(HRegRemap *m, s390_insn *insn, Bool mode64)
+{
+   s390_insn_map_regs(m, insn);
+}
+
+
+/* Figure out if the given insn represents a reg-reg move, and if so
+   assign the source and destination to *src and *dst.  If in doubt say No.
+   Used by the register allocator to do move coalescing. */
+Bool
+isMove_S390Instr(const s390_insn *insn, HReg *src, HReg *dst)
+{
+   return s390_insn_is_reg_reg_move(insn, src, dst);
+}
+
+
+/* Generate s390 spill/reload instructions under the direction of the
+   register allocator.  Note it's critical these don't write the
+   condition codes. This is like an Ist_Put */
+void
+genSpill_S390(HInstr **i1, HInstr **i2, HReg rreg, Int offsetB, Bool mode64)
+{
+   s390_amode *am;
+
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+
+   *i1 = *i2 = NULL;
+
+   am = s390_amode_for_guest_state(offsetB);
+
+   switch (hregClass(rreg)) {
+   case HRcInt64:
+   case HRcFlt64:
+      *i1 = s390_insn_store(8, am, rreg);
+      return;
+
+   default:
+      ppHRegClass(hregClass(rreg));
+      vpanic("genSpill_S390: unimplemented regclass");
+   }
+}
+
+
+/* This is like an Iex_Get */
+void
+genReload_S390(HInstr **i1, HInstr **i2, HReg rreg, Int offsetB, Bool mode64)
+{
+   s390_amode *am;
+
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+
+   *i1 = *i2 = NULL;
+
+   am = s390_amode_for_guest_state(offsetB);
+
+   switch (hregClass(rreg)) {
+   case HRcInt64:
+   case HRcFlt64:
+      *i1 = s390_insn_load(8, rreg, am);
+      return;
+
+   default:
+      ppHRegClass(hregClass(rreg));
+      vpanic("genReload_S390: unimplemented regclass");
+   }
+}
+
+/* Helper function for s390_insn_get_reg_usage */
+static void
+s390_opnd_RMI_get_reg_usage(HRegUsage *u, s390_opnd_RMI op)
+{
+   switch (op.tag) {
+   case S390_OPND_REG:
+      addHRegUse(u, HRmRead, op.variant.reg);
+      break;
+
+   case S390_OPND_AMODE:
+      s390_amode_get_reg_usage(u, op.variant.am);
+      break;
+
+   case S390_OPND_IMMEDIATE:
+      break;
+
+   default:
+      vpanic("s390_opnd_RMI_get_reg_usage");
+   }
+}
+
+
+/* Tell the register allocator how the given insn uses the registers */
+static void
+s390_insn_get_reg_usage(HRegUsage *u, const s390_insn *insn)
+{
+   initHRegUsage(u);
+
+   switch (insn->tag) {
+   case S390_INSN_LOAD:
+      addHRegUse(u, HRmWrite, insn->variant.load.dst);
+      s390_amode_get_reg_usage(u, insn->variant.load.src);
+      break;
+
+   case S390_INSN_LOAD_IMMEDIATE:
+      addHRegUse(u, HRmWrite, insn->variant.load_immediate.dst);
+      break;
+
+   case S390_INSN_STORE:
+      addHRegUse(u, HRmRead, insn->variant.store.src);
+      s390_amode_get_reg_usage(u, insn->variant.store.dst);
+      break;
+
+   case S390_INSN_MOVE:
+      addHRegUse(u, HRmRead,  insn->variant.move.src);
+      addHRegUse(u, HRmWrite, insn->variant.move.dst);
+      break;
+
+   case S390_INSN_MEMCPY:
+      s390_amode_get_reg_usage(u, insn->variant.memcpy.src);
+      s390_amode_get_reg_usage(u, insn->variant.memcpy.dst);
+      break;
+
+   case S390_INSN_COND_MOVE:
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.cond_move.src);
+      addHRegUse(u, HRmWrite, insn->variant.cond_move.dst);
+      break;
+
+   case S390_INSN_ALU:
+      addHRegUse(u, HRmWrite, insn->variant.alu.dst);
+      addHRegUse(u, HRmRead,  insn->variant.alu.dst);  /* op1 */
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.alu.op2);
+      break;
+
+   case S390_INSN_SMUL:
+   case S390_INSN_UMUL:
+      addHRegUse(u, HRmRead,  insn->variant.mul.dst_lo);  /* op1 */
+      addHRegUse(u, HRmWrite, insn->variant.mul.dst_lo);
+      addHRegUse(u, HRmWrite, insn->variant.mul.dst_hi);
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.mul.op2);
+      break;
+
+   case S390_INSN_SDIV:
+   case S390_INSN_UDIV:
+      addHRegUse(u, HRmRead,  insn->variant.div.op1_lo);
+      addHRegUse(u, HRmRead,  insn->variant.div.op1_hi);
+      addHRegUse(u, HRmWrite, insn->variant.div.op1_lo);
+      addHRegUse(u, HRmWrite, insn->variant.div.op1_hi);
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.div.op2);
+      break;
+
+   case S390_INSN_DIVS:
+      addHRegUse(u, HRmRead,  insn->variant.divs.op1);
+      addHRegUse(u, HRmWrite, insn->variant.divs.op1); /* quotient */
+      addHRegUse(u, HRmWrite, insn->variant.divs.rem); /* remainder */
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.divs.op2);
+      break;
+
+   case S390_INSN_CLZ:
+      addHRegUse(u, HRmWrite, insn->variant.clz.num_bits);
+      addHRegUse(u, HRmWrite, insn->variant.clz.clobber);
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.clz.src);
+      break;
+
+   case S390_INSN_UNOP:
+      addHRegUse(u, HRmWrite, insn->variant.unop.dst);
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.unop.src);
+      break;
+
+   case S390_INSN_TEST:
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.test.src);
+      break;
+
+   case S390_INSN_CC2BOOL:
+      addHRegUse(u, HRmWrite, insn->variant.cc2bool.dst);
+      break;
+
+   case S390_INSN_CAS:
+      addHRegUse(u, HRmRead,  insn->variant.cas.op1);
+      s390_amode_get_reg_usage(u, insn->variant.cas.op2);
+      addHRegUse(u, HRmRead,  insn->variant.cas.op3);
+      addHRegUse(u, HRmWrite,  insn->variant.cas.old_mem);
+      break;
+
+   case S390_INSN_CDAS: {
+      s390_cdas *cdas = insn->variant.cdas.details;
+
+      addHRegUse(u, HRmRead,  cdas->op1_high);
+      addHRegUse(u, HRmRead,  cdas->op1_low);
+      s390_amode_get_reg_usage(u, cdas->op2);
+      addHRegUse(u, HRmRead,  cdas->op3_high);
+      addHRegUse(u, HRmRead,  cdas->op3_low);
+      addHRegUse(u, HRmWrite, cdas->old_mem_high);
+      addHRegUse(u, HRmWrite, cdas->old_mem_low);
+      addHRegUse(u, HRmWrite, cdas->scratch);
+      break;
+   }
+
+   case S390_INSN_COMPARE:
+      addHRegUse(u, HRmRead, insn->variant.compare.src1);
+      s390_opnd_RMI_get_reg_usage(u, insn->variant.compare.src2);
+      break;
+
+   case S390_INSN_HELPER_CALL: {
+      UInt i;
+
+      /* Assume that all volatile registers are clobbered. ABI says,
+         volatile registers are: r0 - r5. Valgrind's register allocator
+         does not know about r0, so we can leave that out */
+      for (i = 1; i <= 5; ++i) {
+         addHRegUse(u, HRmWrite, s390_hreg_gpr(i));
+      }
+
+      /* Ditto for floating point registers. f0 - f7 are volatile */
+      for (i = 0; i <= 7; ++i) {
+         addHRegUse(u, HRmWrite, s390_hreg_fpr(i));
+      }
+
+      /* The registers that are used for passing arguments will be read.
+         Not all of them may, but in general we need to assume that. */
+      for (i = 0; i < insn->variant.helper_call.details->num_args; ++i) {
+         addHRegUse(u, HRmRead, s390_hreg_gpr(s390_gprno_from_arg_index(i)));
+      }
+
+      /* s390_insn_helper_call_emit also reads / writes the link register
+         and stack pointer. But those registers are not visible to the
+         register allocator. So we don't need to do anything for them. */
+      break;
+   }
+
+   case S390_INSN_BFP_TRIOP:
+      addHRegUse(u, HRmWrite, insn->variant.bfp_triop.dst);
+      addHRegUse(u, HRmRead,  insn->variant.bfp_triop.dst);  /* first */
+      addHRegUse(u, HRmRead,  insn->variant.bfp_triop.op2);  /* second */
+      addHRegUse(u, HRmRead,  insn->variant.bfp_triop.op3);  /* third */
+      break;
+
+   case S390_INSN_BFP_BINOP:
+      addHRegUse(u, HRmWrite, insn->variant.bfp_binop.dst_hi);
+      addHRegUse(u, HRmRead,  insn->variant.bfp_binop.dst_hi);  /* left */
+      addHRegUse(u, HRmRead,  insn->variant.bfp_binop.op2_hi);  /* right */
+      if (insn->size == 16) {
+         addHRegUse(u, HRmWrite, insn->variant.bfp_binop.dst_lo);
+         addHRegUse(u, HRmRead,  insn->variant.bfp_binop.dst_lo);  /* left */
+         addHRegUse(u, HRmRead,  insn->variant.bfp_binop.op2_lo);  /* right */
+      }
+      break;
+
+   case S390_INSN_BFP_UNOP:
+      addHRegUse(u, HRmWrite, insn->variant.bfp_unop.dst_hi);
+      addHRegUse(u, HRmRead,  insn->variant.bfp_unop.op_hi);  /* operand */
+      if (insn->size == 16) {
+         addHRegUse(u, HRmWrite, insn->variant.bfp_unop.dst_lo);
+         addHRegUse(u, HRmRead,  insn->variant.bfp_unop.op_lo);  /* operand */
+      }
+      break;
+
+   case S390_INSN_BFP_COMPARE:
+      addHRegUse(u, HRmWrite, insn->variant.bfp_compare.dst);
+      addHRegUse(u, HRmRead,  insn->variant.bfp_compare.op1_hi);  /* left */
+      addHRegUse(u, HRmRead,  insn->variant.bfp_compare.op2_hi);  /* right */
+      if (insn->size == 16) {
+         addHRegUse(u, HRmRead,  insn->variant.bfp_compare.op1_lo);  /* left */
+         addHRegUse(u, HRmRead,  insn->variant.bfp_compare.op2_lo);  /* right */
+      }
+      break;
+
+   case S390_INSN_BFP_CONVERT:
+      addHRegUse(u, HRmWrite, insn->variant.bfp_convert.dst_hi);
+      if (! hregIsInvalid(insn->variant.bfp_convert.dst_lo))
+         addHRegUse(u, HRmWrite, insn->variant.bfp_convert.dst_lo);
+      addHRegUse(u, HRmRead,  insn->variant.bfp_convert.op_hi);
+      if (! hregIsInvalid(insn->variant.bfp_convert.op_lo))
+         addHRegUse(u, HRmRead, insn->variant.bfp_convert.op_lo);
+      break;
+
+   case S390_INSN_DFP_BINOP: {
+      s390_dfp_binop *dfp_binop = insn->variant.dfp_binop.details;
+
+      addHRegUse(u, HRmWrite, dfp_binop->dst_hi);
+      addHRegUse(u, HRmRead,  dfp_binop->op2_hi);  /* left */
+      addHRegUse(u, HRmRead,  dfp_binop->op3_hi);  /* right */
+      if (insn->size == 16) {
+         addHRegUse(u, HRmWrite, dfp_binop->dst_lo);
+         addHRegUse(u, HRmRead,  dfp_binop->op2_lo);  /* left */
+         addHRegUse(u, HRmRead,  dfp_binop->op3_lo);  /* right */
+      }
+      break;
+   }
+
+   case S390_INSN_DFP_UNOP:
+      addHRegUse(u, HRmWrite, insn->variant.dfp_unop.dst_hi);
+      addHRegUse(u, HRmRead,  insn->variant.dfp_unop.op_hi);  /* operand */
+      if (insn->size == 16) {
+         addHRegUse(u, HRmWrite, insn->variant.dfp_unop.dst_lo);
+         addHRegUse(u, HRmRead,  insn->variant.dfp_unop.op_lo);  /* operand */
+      }
+      break;
+
+   case S390_INSN_DFP_INTOP:
+      addHRegUse(u, HRmWrite, insn->variant.dfp_intop.dst_hi);
+      addHRegUse(u, HRmRead,  insn->variant.dfp_intop.op2);
+      addHRegUse(u, HRmRead,  insn->variant.dfp_intop.op3_hi);
+      if (insn->size == 16) {
+         addHRegUse(u, HRmWrite, insn->variant.dfp_intop.dst_lo);
+         addHRegUse(u, HRmRead,  insn->variant.dfp_intop.op3_lo);
+      }
+      break;
+
+   case S390_INSN_DFP_COMPARE:
+      addHRegUse(u, HRmWrite, insn->variant.dfp_compare.dst);
+      addHRegUse(u, HRmRead,  insn->variant.dfp_compare.op1_hi);  /* left */
+      addHRegUse(u, HRmRead,  insn->variant.dfp_compare.op2_hi);  /* right */
+      if (insn->size == 16) {
+         addHRegUse(u, HRmRead,  insn->variant.dfp_compare.op1_lo);  /* left */
+         addHRegUse(u, HRmRead,  insn->variant.dfp_compare.op2_lo);  /* right */
+      }
+      break;
+
+   case S390_INSN_DFP_CONVERT:
+      addHRegUse(u, HRmWrite, insn->variant.dfp_convert.dst_hi);
+      if (! hregIsInvalid(insn->variant.dfp_convert.dst_lo))
+         addHRegUse(u, HRmWrite, insn->variant.dfp_convert.dst_lo);
+      addHRegUse(u, HRmRead,  insn->variant.dfp_convert.op_hi);  /* operand */
+      if (! hregIsInvalid(insn->variant.dfp_convert.op_lo))
+         addHRegUse(u, HRmRead, insn->variant.dfp_convert.op_lo); /* operand */
+      break;
+
+   case S390_INSN_DFP_REROUND:
+      addHRegUse(u, HRmWrite, insn->variant.dfp_reround.dst_hi);
+      addHRegUse(u, HRmRead,  insn->variant.dfp_reround.op2);     /* left */
+      addHRegUse(u, HRmRead,  insn->variant.dfp_reround.op3_hi);  /* right */
+      if (insn->size == 16) {
+         addHRegUse(u, HRmWrite, insn->variant.dfp_reround.dst_lo);
+         addHRegUse(u, HRmRead,  insn->variant.dfp_reround.op3_lo); /* right */
+      }
+      break;
+
+   case S390_INSN_FP_CONVERT: {
+      s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+      addHRegUse(u, HRmWrite, fp_convert->dst_hi);
+      if (! hregIsInvalid(fp_convert->dst_lo))
+         addHRegUse(u, HRmWrite, fp_convert->dst_lo);
+      addHRegUse(u, HRmRead,  fp_convert->op_hi);
+      if (! hregIsInvalid(fp_convert->op_lo))
+         addHRegUse(u, HRmRead, fp_convert->op_lo);
+      addHRegUse(u, HRmWrite, fp_convert->r1);
+      break;
+   }
+
+   case S390_INSN_MIMM:
+      s390_amode_get_reg_usage(u, insn->variant.mimm.dst);
+      break;
+
+   case S390_INSN_MADD:
+      s390_amode_get_reg_usage(u, insn->variant.madd.dst);
+      break;
+
+   case S390_INSN_MFENCE:
+      break;
+
+   case S390_INSN_SET_FPC_BFPRM:
+      addHRegUse(u, HRmRead,  insn->variant.set_fpc_bfprm.mode);
+      break;
+
+   case S390_INSN_SET_FPC_DFPRM:
+      addHRegUse(u, HRmRead,  insn->variant.set_fpc_dfprm.mode);
+      break;
+
+   case S390_INSN_EVCHECK:
+      s390_amode_get_reg_usage(u, insn->variant.evcheck.counter);
+      s390_amode_get_reg_usage(u, insn->variant.evcheck.fail_addr);
+      break;
+
+   case S390_INSN_PROFINC:
+      /* Does not use any register visible to the register allocator */
+      break;
+
+   case S390_INSN_XDIRECT:
+      s390_amode_get_reg_usage(u, insn->variant.xdirect.guest_IA);
+      break;
+
+   case S390_INSN_XINDIR:
+      addHRegUse(u, HRmRead, insn->variant.xindir.dst);
+      s390_amode_get_reg_usage(u, insn->variant.xindir.guest_IA);
+      break;
+
+   case S390_INSN_XASSISTED:
+      addHRegUse(u, HRmRead, insn->variant.xassisted.dst);
+      s390_amode_get_reg_usage(u, insn->variant.xassisted.guest_IA);
+      break;
+
+   default:
+      vpanic("s390_insn_get_reg_usage");
+   }
+}
+
+
+/* Helper function for s390_insn_map_regs */
+static void
+s390_opnd_RMI_map_regs(HRegRemap *m, s390_opnd_RMI *op)
+{
+   switch (op->tag) {
+   case S390_OPND_REG:
+      op->variant.reg = lookupHRegRemap(m, op->variant.reg);
+      break;
+
+   case S390_OPND_IMMEDIATE:
+      break;
+
+   case S390_OPND_AMODE:
+      s390_amode_map_regs(m, op->variant.am);
+      break;
+
+   default:
+      vpanic("s390_opnd_RMI_map_regs");
+   }
+}
+
+
+static void
+s390_insn_map_regs(HRegRemap *m, s390_insn *insn)
+{
+   switch (insn->tag) {
+   case S390_INSN_LOAD:
+      insn->variant.load.dst = lookupHRegRemap(m, insn->variant.load.dst);
+      s390_amode_map_regs(m, insn->variant.load.src);
+      break;
+
+   case S390_INSN_STORE:
+      s390_amode_map_regs(m, insn->variant.store.dst);
+      insn->variant.store.src = lookupHRegRemap(m, insn->variant.store.src);
+      break;
+
+   case S390_INSN_MOVE:
+      insn->variant.move.dst = lookupHRegRemap(m, insn->variant.move.dst);
+      insn->variant.move.src = lookupHRegRemap(m, insn->variant.move.src);
+      break;
+
+   case S390_INSN_MEMCPY:
+      s390_amode_map_regs(m, insn->variant.memcpy.dst);
+      s390_amode_map_regs(m, insn->variant.memcpy.src);
+      break;
+
+   case S390_INSN_COND_MOVE:
+      insn->variant.cond_move.dst = lookupHRegRemap(m, insn->variant.cond_move.dst);
+      s390_opnd_RMI_map_regs(m, &insn->variant.cond_move.src);
+      break;
+
+   case S390_INSN_LOAD_IMMEDIATE:
+      insn->variant.load_immediate.dst =
+         lookupHRegRemap(m, insn->variant.load_immediate.dst);
+      break;
+
+   case S390_INSN_ALU:
+      insn->variant.alu.dst = lookupHRegRemap(m, insn->variant.alu.dst);
+      s390_opnd_RMI_map_regs(m, &insn->variant.alu.op2);
+      break;
+
+   case S390_INSN_SMUL:
+   case S390_INSN_UMUL:
+      insn->variant.mul.dst_hi = lookupHRegRemap(m, insn->variant.mul.dst_hi);
+      insn->variant.mul.dst_lo = lookupHRegRemap(m, insn->variant.mul.dst_lo);
+      s390_opnd_RMI_map_regs(m, &insn->variant.mul.op2);
+      break;
+
+   case S390_INSN_SDIV:
+   case S390_INSN_UDIV:
+      insn->variant.div.op1_hi = lookupHRegRemap(m, insn->variant.div.op1_hi);
+      insn->variant.div.op1_lo = lookupHRegRemap(m, insn->variant.div.op1_lo);
+      s390_opnd_RMI_map_regs(m, &insn->variant.div.op2);
+      break;
+
+   case S390_INSN_DIVS:
+      insn->variant.divs.op1 = lookupHRegRemap(m, insn->variant.divs.op1);
+      insn->variant.divs.rem = lookupHRegRemap(m, insn->variant.divs.rem);
+      s390_opnd_RMI_map_regs(m, &insn->variant.divs.op2);
+      break;
+
+   case S390_INSN_CLZ:
+      insn->variant.clz.num_bits = lookupHRegRemap(m, insn->variant.clz.num_bits);
+      insn->variant.clz.clobber  = lookupHRegRemap(m, insn->variant.clz.clobber);
+      s390_opnd_RMI_map_regs(m, &insn->variant.clz.src);
+      break;
+
+   case S390_INSN_UNOP:
+      insn->variant.unop.dst = lookupHRegRemap(m, insn->variant.unop.dst);
+      s390_opnd_RMI_map_regs(m, &insn->variant.unop.src);
+      break;
+
+   case S390_INSN_TEST:
+      s390_opnd_RMI_map_regs(m, &insn->variant.test.src);
+      break;
+
+   case S390_INSN_CC2BOOL:
+      insn->variant.cc2bool.dst = lookupHRegRemap(m, insn->variant.cc2bool.dst);
+      break;
+
+   case S390_INSN_CAS:
+      insn->variant.cas.op1 = lookupHRegRemap(m, insn->variant.cas.op1);
+      s390_amode_map_regs(m, insn->variant.cas.op2);
+      insn->variant.cas.op3 = lookupHRegRemap(m, insn->variant.cas.op3);
+      insn->variant.cas.old_mem = lookupHRegRemap(m, insn->variant.cas.old_mem);
+      break;
+
+   case S390_INSN_CDAS: {
+      s390_cdas *cdas = insn->variant.cdas.details;
+
+      cdas->op1_high = lookupHRegRemap(m, cdas->op1_high);
+      cdas->op1_low  = lookupHRegRemap(m, cdas->op1_low);
+      s390_amode_map_regs(m, cdas->op2);
+      cdas->op3_high = lookupHRegRemap(m, cdas->op3_high);
+      cdas->op3_low  = lookupHRegRemap(m, cdas->op3_low);
+      cdas->old_mem_high = lookupHRegRemap(m, cdas->old_mem_high);
+      cdas->old_mem_low  = lookupHRegRemap(m, cdas->old_mem_low);
+      cdas->scratch  = lookupHRegRemap(m, cdas->scratch);
+      break;
+   }
+
+   case S390_INSN_COMPARE:
+      insn->variant.compare.src1 = lookupHRegRemap(m, insn->variant.compare.src1);
+      s390_opnd_RMI_map_regs(m, &insn->variant.compare.src2);
+      break;
+
+   case S390_INSN_HELPER_CALL:
+      /* s390_insn_helper_call_emit also reads / writes the link register
+         and stack pointer. But those registers are not visible to the
+         register allocator. So we don't need to do anything for them.
+         As for the arguments of the helper call -- they will be loaded into
+         non-virtual registers. Again, we don't need to do anything for those
+         here. */
+      break;
+
+   case S390_INSN_BFP_TRIOP:
+      insn->variant.bfp_triop.dst =
+         lookupHRegRemap(m, insn->variant.bfp_triop.dst);
+      insn->variant.bfp_triop.op2 =
+         lookupHRegRemap(m, insn->variant.bfp_triop.op2);
+      insn->variant.bfp_triop.op3 =
+         lookupHRegRemap(m, insn->variant.bfp_triop.op3);
+      break;
+
+   case S390_INSN_BFP_BINOP:
+      insn->variant.bfp_binop.dst_hi =
+         lookupHRegRemap(m, insn->variant.bfp_binop.dst_hi);
+      insn->variant.bfp_binop.op2_hi =
+         lookupHRegRemap(m, insn->variant.bfp_binop.op2_hi);
+      if (insn->size == 16) {
+         insn->variant.bfp_binop.dst_lo =
+            lookupHRegRemap(m, insn->variant.bfp_binop.dst_lo);
+         insn->variant.bfp_binop.op2_lo  =
+            lookupHRegRemap(m, insn->variant.bfp_binop.op2_lo);
+      }
+      break;
+
+   case S390_INSN_BFP_UNOP:
+      insn->variant.bfp_unop.dst_hi =
+         lookupHRegRemap(m, insn->variant.bfp_unop.dst_hi);
+      insn->variant.bfp_unop.op_hi  =
+         lookupHRegRemap(m, insn->variant.bfp_unop.op_hi);
+      if (insn->size == 16) {
+         insn->variant.bfp_unop.dst_lo =
+            lookupHRegRemap(m, insn->variant.bfp_unop.dst_lo);
+         insn->variant.bfp_unop.op_lo  =
+            lookupHRegRemap(m, insn->variant.bfp_unop.op_lo);
+      }
+      break;
+
+   case S390_INSN_BFP_COMPARE:
+      insn->variant.bfp_compare.dst =
+         lookupHRegRemap(m, insn->variant.bfp_compare.dst);
+      insn->variant.bfp_compare.op1_hi =
+         lookupHRegRemap(m, insn->variant.bfp_compare.op1_hi);
+      insn->variant.bfp_compare.op2_hi =
+         lookupHRegRemap(m, insn->variant.bfp_compare.op2_hi);
+      if (insn->size == 16) {
+         insn->variant.bfp_compare.op1_lo =
+            lookupHRegRemap(m, insn->variant.bfp_compare.op1_lo);
+         insn->variant.bfp_compare.op2_lo =
+            lookupHRegRemap(m, insn->variant.bfp_compare.op2_lo);
+      }
+      break;
+
+   case S390_INSN_BFP_CONVERT:
+      insn->variant.bfp_convert.dst_hi =
+         lookupHRegRemap(m, insn->variant.bfp_convert.dst_hi);
+      if (! hregIsInvalid(insn->variant.bfp_convert.dst_lo))
+         insn->variant.bfp_convert.dst_lo =
+            lookupHRegRemap(m, insn->variant.bfp_convert.dst_lo);
+      insn->variant.bfp_convert.op_hi =
+         lookupHRegRemap(m, insn->variant.bfp_convert.op_hi);
+      if (! hregIsInvalid(insn->variant.bfp_convert.op_lo))
+         insn->variant.bfp_convert.op_lo =
+            lookupHRegRemap(m, insn->variant.bfp_convert.op_lo);
+      break;
+
+   case S390_INSN_DFP_BINOP: {
+      s390_dfp_binop *dfp_binop = insn->variant.dfp_binop.details;
+
+      dfp_binop->dst_hi = lookupHRegRemap(m, dfp_binop->dst_hi);
+      dfp_binop->op2_hi = lookupHRegRemap(m, dfp_binop->op2_hi);
+      dfp_binop->op3_hi = lookupHRegRemap(m, dfp_binop->op3_hi);
+      if (insn->size == 16) {
+         dfp_binop->dst_lo = lookupHRegRemap(m, dfp_binop->dst_lo);
+         dfp_binop->op2_lo = lookupHRegRemap(m, dfp_binop->op2_lo);
+         dfp_binop->op3_lo = lookupHRegRemap(m, dfp_binop->op3_lo);
+      }
+      break;
+   }
+
+   case S390_INSN_DFP_UNOP:
+      insn->variant.dfp_unop.dst_hi =
+         lookupHRegRemap(m, insn->variant.dfp_unop.dst_hi);
+      insn->variant.dfp_unop.op_hi  =
+         lookupHRegRemap(m, insn->variant.dfp_unop.op_hi);
+      if (insn->size == 16) {
+         insn->variant.dfp_unop.dst_lo =
+            lookupHRegRemap(m, insn->variant.dfp_unop.dst_lo);
+         insn->variant.dfp_unop.op_lo  =
+            lookupHRegRemap(m, insn->variant.dfp_unop.op_lo);
+      }
+      break;
+
+   case S390_INSN_DFP_INTOP:
+      insn->variant.dfp_intop.dst_hi =
+         lookupHRegRemap(m, insn->variant.dfp_intop.dst_hi);
+      insn->variant.dfp_intop.op2    =
+         lookupHRegRemap(m, insn->variant.dfp_intop.op2);
+      insn->variant.dfp_intop.op3_hi =
+         lookupHRegRemap(m, insn->variant.dfp_intop.op3_hi);
+      if (insn->size == 16) {
+         insn->variant.dfp_intop.dst_lo =
+            lookupHRegRemap(m, insn->variant.dfp_intop.dst_lo);
+         insn->variant.dfp_intop.op3_lo =
+            lookupHRegRemap(m, insn->variant.dfp_intop.op3_lo);
+      }
+      break;
+
+   case S390_INSN_DFP_COMPARE:
+      insn->variant.dfp_compare.dst =
+         lookupHRegRemap(m, insn->variant.dfp_compare.dst);
+      insn->variant.dfp_compare.op1_hi =
+         lookupHRegRemap(m, insn->variant.dfp_compare.op1_hi);
+      insn->variant.dfp_compare.op2_hi =
+         lookupHRegRemap(m, insn->variant.dfp_compare.op2_hi);
+      if (insn->size == 16) {
+         insn->variant.dfp_compare.op1_lo =
+            lookupHRegRemap(m, insn->variant.dfp_compare.op1_lo);
+         insn->variant.dfp_compare.op2_lo =
+            lookupHRegRemap(m, insn->variant.dfp_compare.op2_lo);
+      }
+      break;
+
+   case S390_INSN_DFP_CONVERT:
+      insn->variant.dfp_convert.dst_hi =
+         lookupHRegRemap(m, insn->variant.dfp_convert.dst_hi);
+      if (! hregIsInvalid(insn->variant.dfp_convert.dst_lo))
+         insn->variant.dfp_convert.dst_lo =
+            lookupHRegRemap(m, insn->variant.dfp_convert.dst_lo);
+      insn->variant.dfp_convert.op_hi =
+         lookupHRegRemap(m, insn->variant.dfp_convert.op_hi);
+      if (! hregIsInvalid(insn->variant.dfp_convert.op_lo))
+         insn->variant.dfp_convert.op_lo =
+            lookupHRegRemap(m, insn->variant.dfp_convert.op_lo);
+      break;
+
+   case S390_INSN_DFP_REROUND:
+      insn->variant.dfp_reround.dst_hi =
+         lookupHRegRemap(m, insn->variant.dfp_reround.dst_hi);
+      insn->variant.dfp_reround.op2    =
+         lookupHRegRemap(m, insn->variant.dfp_reround.op2);
+      insn->variant.dfp_reround.op3_hi =
+         lookupHRegRemap(m, insn->variant.dfp_reround.op3_hi);
+      if (insn->size == 16) {
+         insn->variant.dfp_reround.dst_lo =
+            lookupHRegRemap(m, insn->variant.dfp_reround.dst_lo);
+         insn->variant.dfp_reround.op3_lo =
+            lookupHRegRemap(m, insn->variant.dfp_reround.op3_lo);
+      }
+      break;
+
+   case S390_INSN_FP_CONVERT: {
+      s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+      fp_convert->dst_hi = lookupHRegRemap(m, fp_convert->dst_hi);
+      if (! hregIsInvalid(fp_convert->dst_lo))
+         fp_convert->dst_lo = lookupHRegRemap(m, fp_convert->dst_lo);
+      fp_convert->op_hi = lookupHRegRemap(m, fp_convert->op_hi);
+      if (! hregIsInvalid(fp_convert->op_lo))
+         fp_convert->op_lo = lookupHRegRemap(m, fp_convert->op_lo);
+      fp_convert->r1 = lookupHRegRemap(m, fp_convert->r1);
+      break;
+   }
+
+   case S390_INSN_MIMM:
+      s390_amode_map_regs(m, insn->variant.mimm.dst);
+      break;
+
+   case S390_INSN_MADD:
+      s390_amode_map_regs(m, insn->variant.madd.dst);
+      break;
+
+   case S390_INSN_MFENCE:
+      break;
+
+   case S390_INSN_SET_FPC_BFPRM:
+      insn->variant.set_fpc_bfprm.mode =
+         lookupHRegRemap(m, insn->variant.set_fpc_bfprm.mode);
+      break;
+
+   case S390_INSN_SET_FPC_DFPRM:
+      insn->variant.set_fpc_dfprm.mode =
+         lookupHRegRemap(m, insn->variant.set_fpc_dfprm.mode);
+      break;
+
+   case S390_INSN_EVCHECK:
+      s390_amode_map_regs(m, insn->variant.evcheck.counter);
+      s390_amode_map_regs(m, insn->variant.evcheck.fail_addr);
+      break;
+
+   case S390_INSN_PROFINC:
+      /* Does not use any register visible to the register allocator */
+      break;
+
+   case S390_INSN_XDIRECT:
+      s390_amode_map_regs(m, insn->variant.xdirect.guest_IA);
+      break;
+
+   case S390_INSN_XINDIR:
+      s390_amode_map_regs(m, insn->variant.xindir.guest_IA);
+      insn->variant.xindir.dst =
+         lookupHRegRemap(m, insn->variant.xindir.dst);
+      break;
+
+   case S390_INSN_XASSISTED:
+      s390_amode_map_regs(m, insn->variant.xassisted.guest_IA);
+      insn->variant.xassisted.dst =
+         lookupHRegRemap(m, insn->variant.xassisted.dst);
+      break;
+
+   default:
+      vpanic("s390_insn_map_regs");
+   }
+}
+
+
+/* Return True, if INSN is a move between two registers of the same class.
+   In that case assign the source and destination registers to SRC and DST,
+   respectively. */
+static Bool
+s390_insn_is_reg_reg_move(const s390_insn *insn, HReg *src, HReg *dst)
+{
+   if (insn->tag == S390_INSN_MOVE &&
+       hregClass(insn->variant.move.src) == hregClass(insn->variant.move.dst)) {
+      *src = insn->variant.move.src;
+      *dst = insn->variant.move.dst;
+      return True;
+   }
+
+   return False;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Functions to emit a sequence of bytes                ---*/
+/*------------------------------------------------------------*/
+
+static __inline__ UChar *
+emit_2bytes(UChar *p, ULong val)
+{
+   return (UChar *)__builtin_memcpy(p, ((UChar *)&val) + 6, 2) + 2;
+}
+
+
+static __inline__ UChar *
+emit_4bytes(UChar *p, ULong val)
+{
+   return (UChar *)__builtin_memcpy(p, ((UChar *)&val) + 4, 4) + 4;
+}
+
+
+static __inline__ UChar *
+emit_6bytes(UChar *p, ULong val)
+{
+   return (UChar *)__builtin_memcpy(p, ((UChar *)&val) + 2, 6) + 6;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Functions to emit various instruction formats        ---*/
+/*------------------------------------------------------------*/
+
+static UChar *
+emit_RI(UChar *p, UInt op, UChar r1, UShort i2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 20;
+   the_insn |= ((ULong)i2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RIL(UChar *p, ULong op, UChar r1, UInt i2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 36;
+   the_insn |= ((ULong)i2) << 0;
+
+   return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RR(UChar *p, UInt op, UChar r1, UChar r2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 4;
+   the_insn |= ((ULong)r2) << 0;
+
+   return emit_2bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRE(UChar *p, UInt op, UChar r1, UChar r2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 4;
+   the_insn |= ((ULong)r2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRF(UChar *p, UInt op, UChar r1, UChar r3, UChar r2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 12;
+   the_insn |= ((ULong)r3) << 4;
+   the_insn |= ((ULong)r2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRF2(UChar *p, UInt op, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)m3) << 12;
+   the_insn |= ((ULong)m4) << 8;
+   the_insn |= ((ULong)r1) << 4;
+   the_insn |= ((ULong)r2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRF3(UChar *p, UInt op, UChar r3, UChar r1, UChar r2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r3) << 12;
+   the_insn |= ((ULong)r1) << 4;
+   the_insn |= ((ULong)r2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRF4(UChar *p, UInt op, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r3) << 12;
+   the_insn |= ((ULong)m4) << 8;
+   the_insn |= ((ULong)r1) << 4;
+   the_insn |= ((ULong)r2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RRF5(UChar *p, UInt op, UChar m4, UChar r1, UChar r2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)m4) << 8;
+   the_insn |= ((ULong)r1) << 4;
+   the_insn |= ((ULong)r2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RS(UChar *p, UInt op, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 20;
+   the_insn |= ((ULong)r3) << 16;
+   the_insn |= ((ULong)b2) << 12;
+   the_insn |= ((ULong)d2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RSY(UChar *p, ULong op, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 36;
+   the_insn |= ((ULong)r3) << 32;
+   the_insn |= ((ULong)b2) << 28;
+   the_insn |= ((ULong)dl2) << 16;
+   the_insn |= ((ULong)dh2) << 8;
+
+   return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RX(UChar *p, UInt op, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 20;
+   the_insn |= ((ULong)x2) << 16;
+   the_insn |= ((ULong)b2) << 12;
+   the_insn |= ((ULong)d2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RXF(UChar *p, ULong op, UChar r3, UChar x2, UChar b2, UShort d2, UChar r1)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r3) << 36;
+   the_insn |= ((ULong)x2) << 32;
+   the_insn |= ((ULong)b2) << 28;
+   the_insn |= ((ULong)d2) << 16;
+   the_insn |= ((ULong)r1) << 12;
+
+   return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_RXY(UChar *p, ULong op, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)r1) << 36;
+   the_insn |= ((ULong)x2) << 32;
+   the_insn |= ((ULong)b2) << 28;
+   the_insn |= ((ULong)dl2) << 16;
+   the_insn |= ((ULong)dh2) << 8;
+
+   return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_S(UChar *p, UInt op, UChar b2, UShort d2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)b2) << 12;
+   the_insn |= ((ULong)d2) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_SI(UChar *p, UInt op, UChar i2, UChar b1, UShort d1)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)i2) << 16;
+   the_insn |= ((ULong)b1) << 12;
+   the_insn |= ((ULong)d1) << 0;
+
+   return emit_4bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_SIL(UChar *p, ULong op, UChar b1, UShort d1, UShort i2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)b1) << 28;
+   the_insn |= ((ULong)d1) << 16;
+   the_insn |= ((ULong)i2) << 0;
+
+   return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_SIY(UChar *p, ULong op, UChar i2, UChar b1, UShort dl1, UChar dh1)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)i2) << 32;
+   the_insn |= ((ULong)b1) << 28;
+   the_insn |= ((ULong)dl1) << 16;
+   the_insn |= ((ULong)dh1) << 8;
+
+   return emit_6bytes(p, the_insn);
+}
+
+
+static UChar *
+emit_SSa(UChar *p, ULong op, UChar l, UChar b1, UShort d1, UChar b2, UShort d2)
+{
+   ULong the_insn = op;
+
+   the_insn |= ((ULong)l)  << 32;
+   the_insn |= ((ULong)b1) << 28;
+   the_insn |= ((ULong)d1) << 16;
+   the_insn |= ((ULong)b2) << 12;
+   the_insn |= ((ULong)d2) << 0;
+
+   return emit_6bytes(p, the_insn);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Functions to emit particular instructions            ---*/
+/*------------------------------------------------------------*/
+
+static UChar *
+s390_emit_AR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "ar", r1, r2);
+
+   return emit_RR(p, 0x1a00, r1, r2);
+}
+
+
+static UChar *
+s390_emit_AGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "agr", r1, r2);
+
+   return emit_RRE(p, 0xb9080000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_A(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "a", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x5a000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_AY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ay", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000005aULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_AG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ag", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000008ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_AFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "afi", r1, i2);
+
+   return emit_RIL(p, 0xc20900000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_AGFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "agfi", r1, i2);
+
+   return emit_RIL(p, 0xc20800000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_AH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "ah", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x4a000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_AHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ahy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000007aULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_AHI(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "ahi", r1, (Int)(Short)i2);
+
+   return emit_RI(p, 0xa70a0000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_AGHI(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "aghi", r1, (Int)(Short)i2);
+
+   return emit_RI(p, 0xa70b0000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_AGSI(UChar *p, UChar i2, UChar b1, UShort dl1, UChar dh1)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, SDXB, INT), "agsi", dh1, dl1, 0, b1, (Int)(Char)i2);
+
+   return emit_SIY(p, 0xeb000000007aULL, i2, b1, dl1, dh1);
+}
+
+
+static UChar *
+s390_emit_ASI(UChar *p, UChar i2, UChar b1, UShort dl1, UChar dh1)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, SDXB, INT), "asi", dh1, dl1, 0, b1, (Int)(Char)i2);
+
+   return emit_SIY(p, 0xeb000000006aULL, i2, b1, dl1, dh1);
+}
+
+
+static UChar *
+s390_emit_NR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "nr", r1, r2);
+
+   return emit_RR(p, 0x1400, r1, r2);
+}
+
+
+static UChar *
+s390_emit_NGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "ngr", r1, r2);
+
+   return emit_RRE(p, 0xb9800000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_N(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "n", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x54000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_NY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ny", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000054ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_NG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ng", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000080ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_NIHF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "nihf", r1, i2);
+
+   return emit_RIL(p, 0xc00a00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_NILF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "nilf", r1, i2);
+
+   return emit_RIL(p, 0xc00b00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_NILL(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "nill", r1, i2);
+
+   return emit_RI(p, 0xa5070000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_BASR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "basr", r1, r2);
+
+   return emit_RR(p, 0x0d00, r1, r2);
+}
+
+
+static UChar *
+s390_emit_BCR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(XMNM, GPR), S390_XMNM_BCR, r1, r2);
+
+   return emit_RR(p, 0x0700, r1, r2);
+}
+
+
+static UChar *
+s390_emit_BRC(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(XMNM, PCREL), S390_XMNM_BRC, r1, (Int)(Short)i2);
+
+   return emit_RI(p, 0xa7040000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_BRCL(UChar *p, UChar r1, ULong i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(XMNM, PCREL), S390_XMNM_BRCL, r1, i2);
+
+   return emit_RIL(p, 0xc00400000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_CR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "cr", r1, r2);
+
+   return emit_RR(p, 0x1900, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "cgr", r1, r2);
+
+   return emit_RRE(p, 0xb9200000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_C(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "c", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x59000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_CY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "cy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000059ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "cg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000020ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "cfi", r1, i2);
+
+   return emit_RIL(p, 0xc20d00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_CGFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "cgfi", r1, i2);
+
+   return emit_RIL(p, 0xc20c00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_CS(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, UDXB), "cs", r1, r3, d2, 0, b2);
+
+   return emit_RS(p, 0xba000000, r1, r3, b2, d2);
+}
+
+
+static UChar *
+s390_emit_CSY(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "csy", r1, r3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb0000000014ULL, r1, r3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CSG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "csg", r1, r3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb0000000030ULL, r1, r3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CDS(UChar *p, UChar r1, UChar r3, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, UDXB), "cds", r1, r3, d2, 0, b2);
+
+   return emit_RS(p, 0xbb000000, r1, r3, b2, d2);
+}
+
+
+static UChar *
+s390_emit_CDSY(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "cdsy", r1, r3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb0000000031ULL, r1, r3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CDSG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "cdsg", r1, r3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb000000003eULL, r1, r3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CLR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "clr", r1, r2);
+
+   return emit_RR(p, 0x1500, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "clgr", r1, r2);
+
+   return emit_RRE(p, 0xb9210000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CL(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "cl", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x55000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_CLY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "cly", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000055ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "clg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000021ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_CLFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "clfi", r1, i2);
+
+   return emit_RIL(p, 0xc20f00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_CLGFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "clgfi", r1, i2);
+
+   return emit_RIL(p, 0xc20e00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_DR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "dr", r1, r2);
+
+   return emit_RR(p, 0x1d00, r1, r2);
+}
+
+
+static UChar *
+s390_emit_D(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "d", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x5d000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_DLR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "dlr", r1, r2);
+
+   return emit_RRE(p, 0xb9970000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DLGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "dlgr", r1, r2);
+
+   return emit_RRE(p, 0xb9870000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DL(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "dl", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000097ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_DLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "dlg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000087ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_DSGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "dsgr", r1, r2);
+
+   return emit_RRE(p, 0xb90d0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DSG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "dsg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000000dULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_XR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "xr", r1, r2);
+
+   return emit_RR(p, 0x1700, r1, r2);
+}
+
+
+static UChar *
+s390_emit_XGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "xgr", r1, r2);
+
+   return emit_RRE(p, 0xb9820000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_X(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "x", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x57000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_XY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "xy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000057ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_XG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "xg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000082ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_XIHF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "xihf", r1, i2);
+
+   return emit_RIL(p, 0xc00600000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_XILF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "xilf", r1, i2);
+
+   return emit_RIL(p, 0xc00700000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_XC(UChar *p, UInt l, UChar b1, UShort d1, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, UDLB, UDXB), "xc", d1, l, b1, d2, 0, b2);
+
+   return emit_SSa(p, 0xd70000000000ULL, l, b1, d1, b2, d2);
+}
+
+
+static UChar *
+s390_emit_FLOGR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "flogr", r1, r2);
+
+   return emit_RRE(p, 0xb9830000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_IC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "ic", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x43000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_ICY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "icy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000073ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_IIHF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "iihf", r1, i2);
+
+   return emit_RIL(p, 0xc00800000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_IIHH(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "iihh", r1, i2);
+
+   return emit_RI(p, 0xa5000000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_IIHL(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "iihl", r1, i2);
+
+   return emit_RI(p, 0xa5010000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_IILF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "iilf", r1, i2);
+
+   return emit_RIL(p, 0xc00900000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_IILH(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "iilh", r1, i2);
+
+   return emit_RI(p, 0xa5020000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_IILL(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "iill", r1, i2);
+
+   return emit_RI(p, 0xa5030000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_IPM(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(MNM, GPR), "ipm", r1);
+
+   return emit_RRE(p, 0xb2220000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lr", r1, r2);
+
+   return emit_RR(p, 0x1800, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lgr", r1, r2);
+
+   return emit_RRE(p, 0xb9040000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LGFR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lgfr", r1, r2);
+
+   return emit_RRE(p, 0xb9140000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_L(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "l", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x58000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_LY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ly", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000058ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "lg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000004ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LGF(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "lgf", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000014ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LGFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "lgfi", r1, i2);
+
+   return emit_RIL(p, 0xc00100000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_LTR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "ltr", r1, r2);
+
+   return emit_RR(p, 0x1200, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LTGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "ltgr", r1, r2);
+
+   return emit_RRE(p, 0xb9020000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LT(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "lt", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000012ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LTG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ltg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000002ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LBR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lbr", r1, r2);
+
+   return emit_RRE(p, 0xb9260000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LGBR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lgbr", r1, r2);
+
+   return emit_RRE(p, 0xb9060000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LB(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "lb", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000076ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LGB(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "lgb", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000077ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LCR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lcr", r1, r2);
+
+   return emit_RR(p, 0x1300, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LCGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lcgr", r1, r2);
+
+   return emit_RRE(p, 0xb9030000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LHR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lhr", r1, r2);
+
+   return emit_RRE(p, 0xb9270000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LGHR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "lghr", r1, r2);
+
+   return emit_RRE(p, 0xb9070000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "lh", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x48000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_LHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "lhy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000078ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LGH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "lgh", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000015ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LHI(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "lhi", r1, (Int)(Short)i2);
+
+   return emit_RI(p, 0xa7080000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_LGHI(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "lghi", r1, (Int)(Short)i2);
+
+   return emit_RI(p, 0xa7090000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_LLGFR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "llgfr", r1, r2);
+
+   return emit_RRE(p, 0xb9160000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LLGF(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "llgf", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000016ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LLCR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "llcr", r1, r2);
+
+   return emit_RRE(p, 0xb9940000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LLGCR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "llgcr", r1, r2);
+
+   return emit_RRE(p, 0xb9840000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LLC(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "llc", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000094ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LLGC(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "llgc", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000090ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LLHR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "llhr", r1, r2);
+
+   return emit_RRE(p, 0xb9950000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LLGHR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "llghr", r1, r2);
+
+   return emit_RRE(p, 0xb9850000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LLH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "llh", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000095ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LLGH(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "llgh", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000091ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LLILF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "llilf", r1, i2);
+
+   return emit_RIL(p, 0xc00f00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_LLILH(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "llilh", r1, i2);
+
+   return emit_RI(p, 0xa50e0000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_LLILL(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "llill", r1, i2);
+
+   return emit_RI(p, 0xa50f0000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_MR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "mr", r1, r2);
+
+   return emit_RR(p, 0x1c00, r1, r2);
+}
+
+
+static UChar *
+s390_emit_M(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "m", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x5c000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_MFY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "mfy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000005cULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_MH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "mh", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x4c000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_MHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "mhy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000007cULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_MHI(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "mhi", r1, (Int)(Short)i2);
+
+   return emit_RI(p, 0xa70c0000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_MLR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "mlr", r1, r2);
+
+   return emit_RRE(p, 0xb9960000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MLGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "mlgr", r1, r2);
+
+   return emit_RRE(p, 0xb9860000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_ML(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "ml", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000096ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_MLG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "mlg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000086ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_MSR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "msr", r1, r2);
+
+   return emit_RRE(p, 0xb2520000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MSGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "msgr", r1, r2);
+
+   return emit_RRE(p, 0xb90c0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MS(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "ms", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x71000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_MSY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "msy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000051ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_MSG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "msg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000000cULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_MSFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "msfi", r1, i2);
+
+   return emit_RIL(p, 0xc20100000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_MSGFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, INT), "msgfi", r1, i2);
+
+   return emit_RIL(p, 0xc20000000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_MVC(UChar *p, UInt l, UChar b1, UShort d1, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, UDLB, UDXB), "mvc", d1, l, b1, d2, 0, b2);
+
+   return emit_SSa(p, 0xd20000000000ULL, l, b1, d1, b2, d2);
+}
+
+
+static UChar *
+s390_emit_MVI(UChar *p, UChar i2, UChar b1, UShort d1)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, UDXB, INT), "mvi", d1, 0, b1, i2);
+
+   return emit_SI(p, 0x92000000, i2, b1, d1);
+}
+
+
+static UChar *
+s390_emit_MVHHI(UChar *p, UChar b1, UShort d1, UShort i2)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, UDXB, INT), "mvhhi", d1, 0, b1, i2);
+
+   return emit_SIL(p, 0xe54400000000ULL, b1, d1, i2);
+}
+
+
+static UChar *
+s390_emit_MVHI(UChar *p, UChar b1, UShort d1, UShort i2)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, UDXB, INT), "mvhi", d1, 0, b1, i2);
+
+   return emit_SIL(p, 0xe54c00000000ULL, b1, d1, i2);
+}
+
+
+static UChar *
+s390_emit_MVGHI(UChar *p, UChar b1, UShort d1, UShort i2)
+{
+   vassert(s390_host_has_gie);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, UDXB, INT), "mvghi", d1, 0, b1, i2);
+
+   return emit_SIL(p, 0xe54800000000ULL, b1, d1, i2);
+}
+
+
+static UChar *
+s390_emit_OR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "or", r1, r2);
+
+   return emit_RR(p, 0x1600, r1, r2);
+}
+
+
+static UChar *
+s390_emit_OGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "ogr", r1, r2);
+
+   return emit_RRE(p, 0xb9810000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_O(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "o", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x56000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_OY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "oy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000056ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_OG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "og", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000081ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_OIHF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "oihf", r1, i2);
+
+   return emit_RIL(p, 0xc00c00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_OILF(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "oilf", r1, i2);
+
+   return emit_RIL(p, 0xc00d00000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_OILL(UChar *p, UChar r1, UShort i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "oill", r1, i2);
+
+   return emit_RI(p, 0xa50b0000, r1, i2);
+}
+
+
+static UChar *
+s390_emit_SLL(UChar *p, UChar r1, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "sll", r1, d2, 0, b2);
+
+   return emit_RS(p, 0x89000000, r1, 0, b2, d2);
+}
+
+
+static UChar *
+s390_emit_SLLG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "sllg", r1, r3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb000000000dULL, r1, r3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_SRA(UChar *p, UChar r1, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "sra", r1, d2, 0, b2);
+
+   return emit_RS(p, 0x8a000000, r1, 0, b2, d2);
+}
+
+
+static UChar *
+s390_emit_SRAG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "srag", r1, r3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb000000000aULL, r1, r3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_SRL(UChar *p, UChar r1, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "srl", r1, d2, 0, b2);
+
+   return emit_RS(p, 0x88000000, r1, 0, b2, d2);
+}
+
+
+static UChar *
+s390_emit_SRLG(UChar *p, UChar r1, UChar r3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, SDXB), "srlg", r1, r3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb000000000cULL, r1, r3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_ST(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "st", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x50000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_STY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "sty", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000050ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_STG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "stg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000024ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_STC(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "stc", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x42000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_STCY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "stcy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000072ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_STH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "sth", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x40000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_STHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "sthy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000070ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_SR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "sr", r1, r2);
+
+   return emit_RR(p, 0x1b00, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SGR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, GPR), "sgr", r1, r2);
+
+   return emit_RRE(p, 0xb9090000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_S(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "s", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x5b000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_SY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "sy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000005bULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_SG(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "sg", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe30000000009ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_SH(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UDXB), "sh", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x4b000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_SHY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, SDXB), "shy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xe3000000007bULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_SLFI(UChar *p, UChar r1, UInt i2)
+{
+   vassert(s390_host_has_eimm);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "slfi", r1, i2);
+
+   return emit_RIL(p, 0xc20500000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_SLGFI(UChar *p, UChar r1, UInt i2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, UINT), "slgfi", r1, i2);
+
+   return emit_RIL(p, 0xc20400000000ULL, r1, i2);
+}
+
+
+static UChar *
+s390_emit_LDR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "ldr", r1, r2);
+
+   return emit_RR(p, 0x2800, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LE(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, UDXB), "le", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x78000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_LD(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, UDXB), "ld", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x68000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_LEY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, SDXB), "ley", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xed0000000064ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LDY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, SDXB), "ldy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xed0000000065ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LFPC(UChar *p, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(MNM, UDXB), "lfpc", d2, 0, b2);
+
+   return emit_S(p, 0xb29d0000, b2, d2);
+}
+
+
+static UChar *
+s390_emit_LDGR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_fgx);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, GPR), "ldgr", r1, r2);
+
+   return emit_RRE(p, 0xb3c10000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LGDR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_fgx);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, FPR), "lgdr", r1, r2);
+
+   return emit_RRE(p, 0xb3cd0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LZER(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(MNM, FPR), "lzer", r1);
+
+   return emit_RRE(p, 0xb3740000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LZDR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(MNM, FPR), "lzdr", r1);
+
+   return emit_RRE(p, 0xb3750000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SFPC(UChar *p, UChar r1)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(MNM, GPR), "sfpc", r1);
+
+   return emit_RRE(p, 0xb3840000, r1, 0);
+}
+
+
+static UChar *
+s390_emit_STE(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, UDXB), "ste", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x70000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_STD(UChar *p, UChar r1, UChar x2, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, UDXB), "std", r1, d2, x2, b2);
+
+   return emit_RX(p, 0x60000000, r1, x2, b2, d2);
+}
+
+
+static UChar *
+s390_emit_STEY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, SDXB), "stey", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xed0000000066ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_STDY(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, SDXB), "stdy", r1, dh2, dl2, x2, b2);
+
+   return emit_RXY(p, 0xed0000000067ULL, r1, x2, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_STFPC(UChar *p, UChar b2, UShort d2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC2(MNM, UDXB), "stfpc", d2, 0, b2);
+
+   return emit_S(p, 0xb29c0000, b2, d2);
+}
+
+
+static UChar *
+s390_emit_AEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "aebr", r1, r2);
+
+   return emit_RRE(p, 0xb30a0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_ADBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "adbr", r1, r2);
+
+   return emit_RRE(p, 0xb31a0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_AXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "axbr", r1, r2);
+
+   return emit_RRE(p, 0xb34a0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "cebr", r1, r2);
+
+   return emit_RRE(p, 0xb3090000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "cdbr", r1, r2);
+
+   return emit_RRE(p, 0xb3190000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "cxbr", r1, r2);
+
+   return emit_RRE(p, 0xb3490000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CEFBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, GPR), "cefbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT),
+                     "cefbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3940000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDFBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, GPR), "cdfbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT),
+                     "cdfbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3950000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXFBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, GPR), "cxfbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT),
+                     "cxfbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3960000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CEGBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, GPR), "cegbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT),
+                     "cegbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3a40000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDGBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, GPR), "cdgbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT),
+                     "cdgbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3a50000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXGBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, GPR), "cxgbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT),
+                     "cxgbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3a60000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CELFBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "celfbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3900000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLFBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlfbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3910000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLFBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlfbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3920000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CELGBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "celgbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3a00000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLGBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlgbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3a10000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLGBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlgbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3a20000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFEBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfebr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb39c0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFDBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfdbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb39d0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFXBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfxbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb39e0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGEBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgebr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3ac0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGDBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgdbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3ad0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGXBR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgxbr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3ae0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CFEBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cfebr", r1, r3, r2);
+
+   return emit_RRF3(p, 0xb3980000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CFDBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cfdbr", r1, r3, r2);
+
+   return emit_RRF3(p, 0xb3990000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CFXBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cfxbr", r1, r3, r2);
+
+   return emit_RRF3(p, 0xb39a0000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CGEBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgebr", r1, r3, r2);
+
+   return emit_RRF3(p, 0xb3a80000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CGDBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgdbr", r1, r3, r2);
+
+   return emit_RRF3(p, 0xb3a90000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CGXBR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgxbr", r1, r3, r2);
+
+   return emit_RRF3(p, 0xb3aa0000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "debr", r1, r2);
+
+   return emit_RRE(p, 0xb30d0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "ddbr", r1, r2);
+
+   return emit_RRE(p, 0xb31d0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "dxbr", r1, r2);
+
+   return emit_RRE(p, 0xb34d0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LCEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lcebr", r1, r2);
+
+   return emit_RRE(p, 0xb3030000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LCDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lcdbr", r1, r2);
+
+   return emit_RRE(p, 0xb3130000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LCXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lcxbr", r1, r2);
+
+   return emit_RRE(p, 0xb3430000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LDEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "ldebr", r1, r2);
+
+   return emit_RRE(p, 0xb3040000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LXDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lxdbr", r1, r2);
+
+   return emit_RRE(p, 0xb3050000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LXEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lxebr", r1, r2);
+
+   return emit_RRE(p, 0xb3060000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LNEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lnebr", r1, r2);
+
+   return emit_RRE(p, 0xb3010000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LNDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lndbr", r1, r2);
+
+   return emit_RRE(p, 0xb3110000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LNXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lnxbr", r1, r2);
+
+   return emit_RRE(p, 0xb3410000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LPEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lpebr", r1, r2);
+
+   return emit_RRE(p, 0xb3000000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LPDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lpdbr", r1, r2);
+
+   return emit_RRE(p, 0xb3100000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LPXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "lpxbr", r1, r2);
+
+   return emit_RRE(p, 0xb3400000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LEDBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, FPR), "ledbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, FPR, UINT),
+                     "ledbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3440000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LDXBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, FPR), "ldxbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, FPR, UINT),
+                     "ldxbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3450000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LEXBRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, FPR), "lexbr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, FPR, UINT),
+                     "lexbra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3460000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MEEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "meebr", r1, r2);
+
+   return emit_RRE(p, 0xb3170000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "mdbr", r1, r2);
+
+   return emit_RRE(p, 0xb31c0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "mxbr", r1, r2);
+
+   return emit_RRE(p, 0xb34c0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MAEBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, FPR), "maebr", r1, r3, r2);
+
+   return emit_RRF(p, 0xb30e0000, r1, r3, r2);
+}
+
+
+static UChar *
+s390_emit_MADBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, FPR), "madbr", r1, r3, r2);
+
+   return emit_RRF(p, 0xb31e0000, r1, r3, r2);
+}
+
+
+static UChar *
+s390_emit_MSEBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, FPR), "msebr", r1, r3, r2);
+
+   return emit_RRF(p, 0xb30f0000, r1, r3, r2);
+}
+
+
+static UChar *
+s390_emit_MSDBR(UChar *p, UChar r1, UChar r3, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, FPR), "msdbr", r1, r3, r2);
+
+   return emit_RRF(p, 0xb31f0000, r1, r3, r2);
+}
+
+
+static UChar *
+s390_emit_SQEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "sqebr", r1, r2);
+
+   return emit_RRE(p, 0xb3140000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SQDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "sqdbr", r1, r2);
+
+   return emit_RRE(p, 0xb3150000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SQXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "sqxbr", r1, r2);
+
+   return emit_RRE(p, 0xb3160000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SEBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "sebr", r1, r2);
+
+   return emit_RRE(p, 0xb30b0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SDBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "sdbr", r1, r2);
+
+   return emit_RRE(p, 0xb31b0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SXBR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "sxbr", r1, r2);
+
+   return emit_RRE(p, 0xb34b0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_ADTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "adtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "adtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3d20000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_AXTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "axtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "axtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3da0000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDTR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "cdtr", r1, r2);
+
+   return emit_RRE(p, 0xb3e40000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXTR(UChar *p, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "cxtr", r1, r2);
+
+   return emit_RRE(p, 0xb3ec0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDGTRA(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0);
+   vassert(m3 == 0 || s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m3 == 0)
+         s390_disasm(ENC3(MNM, FPR, GPR), "cdgtr", r1, r2);
+      else
+         s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdgtra", r1, m3, r2, m4);
+   }
+
+   return emit_RRF2(p, 0xb3f10000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXGTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0);
+   /* rounding mode m3 is not considered, as the corresponding
+      IRop (Iop_I64StoD128) does not take rounding mode. */
+   vassert(m3 == 0);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, GPR), "cxgtr", r1, r2);
+
+   return emit_RRF2(p, 0xb3f90000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdftr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9510000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxftr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9590000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlftr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9530000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLFTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlftr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb95b0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CDLGTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cdlgtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9520000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CXLGTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, GPR, UINT), "cxlgtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb95a0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CEDTR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "cedtr", r1, r2);
+
+   return emit_RRE(p, 0xb3f40000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CEXTR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, FPR, FPR), "cextr", r1, r2);
+
+   return emit_RRE(p, 0xb3fc0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CFDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "cfdtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9410000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CFXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "cfxtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9490000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CGDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext || m3 < 1 || m3 > 7);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgdtr", r1, m3, r2);
+
+   return emit_RRF2(p, 0xb3e10000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CGXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext || m3 < 1 || m3 > 7);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, FPR), "cgxtr", r1, m3, r2);
+
+   return emit_RRF2(p, 0xb3e90000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfdtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9430000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLFXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clfxtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb94b0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgdtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb9420000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_CLGXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(m4 == 0);
+   vassert(s390_host_has_dfp);
+   vassert(s390_host_has_fpext);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, GPR, UINT, FPR, UINT), "clgxtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb94a0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DDTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "ddtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "ddtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3d10000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_DXTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "dxtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "dxtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3d90000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_EEDTR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, FPR), "eedtr", r1, r2);
+
+   return emit_RRE(p, 0xb3e50000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_EEXTR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, FPR), "eextr", r1, r2);
+
+   return emit_RRE(p, 0xb3ed0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_ESDTR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, FPR), "esdtr", r1, r2);
+
+   return emit_RRE(p, 0xb3e70000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_ESXTR(UChar *p, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC3(MNM, GPR, FPR), "esxtr", r1, r2);
+
+   return emit_RRE(p, 0xb3ef0000, r1, r2);
+}
+
+
+static UChar *
+s390_emit_IEDTR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, GPR), "iedtr", r1, r3, r2);
+
+   return emit_RRF(p, 0xb3f60000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_IEXTR(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, GPR), "iextr", r1, r3, r2);
+
+   return emit_RRF(p, 0xb3fe0000, r3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LDETR(UChar *p, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, UINT), "ldetr", r1, r2, m4);
+
+   return emit_RRF5(p, 0xb3d40000, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LXDTR(UChar *p, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, UINT), "lxdtr", r1, r2, m4);
+
+   return emit_RRF5(p, 0xb3dc0000, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LEDTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext || m3 < 1 || m3 > 7);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, FPR, UINT), "ledtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3d50000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LDXTR(UChar *p, UChar m3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0);
+   vassert(s390_host_has_fpext || m3 < 1 || m3 > 7);
+
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, UINT, FPR, UINT), "ldxtr", r1, m3, r2, m4);
+
+   return emit_RRF2(p, 0xb3dd0000, m3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MDTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "mdtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "mdtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3d00000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_MXTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "mxtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "mxtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3d80000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+emit_E(UChar *p, UInt op)
+{
+   ULong the_insn = op;
+
+   return emit_2bytes(p, the_insn);
+}
+
+
+static UChar *
+s390_emit_PFPO(UChar *p)
+{
+   vassert(s390_host_has_pfpo);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      s390_disasm(ENC1(MNM), "pfpo");
+   }
+
+   return emit_E(p, 0x010a);
+}
+
+
+static UChar *
+s390_emit_QADTR(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "qadtr", r1, r3, r2, m4);
+
+   return emit_RRF4(p, 0xb3f50000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_QAXTR(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "qaxtr", r1, r3, r2, m4);
+
+   return emit_RRF4(p, 0xb3fd0000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_RRDTR(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, FPR, GPR, UINT), "rrdtr", r1, r3, r2, m4);
+
+   return emit_RRF4(p, 0xb3f70000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_RRXTR(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC5(MNM, FPR, FPR, GPR, UINT), "rrxtr", r1, r3, r2, m4);
+
+   return emit_RRF4(p, 0xb3ff0000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SDTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "sdtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "sdtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3d30000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SXTRA(UChar *p, UChar r3, UChar m4, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   vassert(m4 == 0 || s390_host_has_fpext);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+      if (m4 == 0)
+         s390_disasm(ENC4(MNM, FPR, FPR, FPR), "sxtr", r1, r2, r3);
+      else
+         s390_disasm(ENC5(MNM, FPR, FPR, FPR, UINT), "sxtra", r1, r2, r3, m4);
+   }
+
+   return emit_RRF4(p, 0xb3db0000, r3, m4, r1, r2);
+}
+
+
+static UChar *
+s390_emit_SLDT(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, UDXB), "sldt", r1, r3, 0, 0, r2);
+
+   return emit_RXF(p, 0xED0000000040ULL, r3, 0, r2, 0, r1);
+}
+
+
+static UChar *
+s390_emit_SLXT(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, UDXB), "slxt", r1, r3, 0, 0, r2);
+
+   return emit_RXF(p, 0xED0000000048ULL, r3, 0, r2, 0, r1);
+}
+
+
+static UChar *
+s390_emit_SRDT(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, UDXB), "srdt", r1, r3, 0, 0, r2);
+
+   return emit_RXF(p, 0xED0000000041ULL, r3, 0, r2, 0, r1);
+}
+
+
+static UChar *
+s390_emit_SRXT(UChar *p, UChar r3, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_dfp);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, FPR, FPR, UDXB), "srxt", r1, r3, 0, 0, r2);
+
+   return emit_RXF(p, 0xED0000000049ULL, r3, 0, r2, 0, r1);
+}
+
+
+static UChar *
+s390_emit_LOCGR(UChar *p, UChar m3, UChar r1, UChar r2)
+{
+   vassert(s390_host_has_lsc);
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, GPR, UINT), "locgr", r1, r2, m3);
+
+   return emit_RRF3(p, 0xb9e20000, m3, r1, r2);
+}
+
+
+static UChar *
+s390_emit_LOC(UChar *p, UChar r1, UChar m3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, SDXB), "loc", r1, m3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb00000000f2ULL, r1, m3, b2, dl2, dh2);
+}
+
+
+static UChar *
+s390_emit_LOCG(UChar *p, UChar r1, UChar m3, UChar b2, UShort dl2, UChar dh2)
+{
+   if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM))
+      s390_disasm(ENC4(MNM, GPR, UINT, SDXB), "locg", r1, m3, dh2, dl2, 0, b2);
+
+   return emit_RSY(p, 0xeb00000000e2ULL, r1, m3, b2, dl2, dh2);
+}
+
+
+/* Provide a symbolic name for register "R0" */
+#define R0 0
+
+/* Split up a 20-bit displacement into its high and low piece
+   suitable for passing as function arguments */
+#define DISP20(d) (((UInt)d) & 0xFFF), ((((UInt)d) >> 12) & 0xFF)
+
+/*---------------------------------------------------------------*/
+/*--- Helper functions                                        ---*/
+/*---------------------------------------------------------------*/
+
+static __inline__ Bool
+uint_fits_signed_16bit(UInt val)
+{
+   UInt v = val & 0xFFFFu;
+
+   /* sign extend */
+   v = (Int)(v << 16) >> 16;
+
+   return val == v;
+}
+
+
+static __inline__ Bool
+ulong_fits_signed_16bit(ULong val)
+{
+   ULong v = val & 0xFFFFu;
+
+   /* sign extend */
+   v = (Long)(v << 48) >> 48;
+
+   return val == v;
+}
+
+
+static __inline__ Bool
+ulong_fits_signed_32bit(ULong val)
+{
+   ULong v = val & 0xFFFFFFFFu;
+
+   /* sign extend */
+   v = (Long)(v << 32) >> 32;
+
+   return val == v;
+}
+
+
+static __inline__ Bool
+ulong_fits_unsigned_32bit(ULong val)
+{
+   return (val & 0xFFFFFFFFu) == val;
+}
+
+
+/* Load a 64-bit immediate VAL into register REG. */
+static UChar *
+s390_emit_load_64imm(UChar *p, UChar reg, ULong val)
+{
+   if (ulong_fits_signed_16bit(val)) {
+      return s390_emit_LGHI(p, reg, val);
+   }
+
+   if (s390_host_has_eimm) {
+      if (ulong_fits_unsigned_32bit(val)) {
+         return s390_emit_LLILF(p, reg, val);
+      }
+      if (ulong_fits_signed_32bit(val)) {
+         /* LGFI's sign extension will recreate the correct 64-bit value */
+         return s390_emit_LGFI(p, reg, val);
+      }
+      /* Do it in two steps: upper half [0:31] and lower half [32:63] */
+      p =  s390_emit_IIHF(p, reg, val >> 32);
+      return s390_emit_IILF(p, reg, val & 0xFFFFFFFF);
+   }
+
+   /* Fall back */
+   if (ulong_fits_unsigned_32bit(val)) {
+      p = s390_emit_LLILH(p, reg, (val >> 16) & 0xFFFF); /* sets val[32:47]
+                                                            val[0:31] = 0 */
+      p = s390_emit_IILL(p, reg, val & 0xFFFF);          /* sets val[48:63] */
+      return p;
+   }
+
+   p = s390_emit_IIHH(p, reg, (val >> 48) & 0xFFFF);
+   p = s390_emit_IIHL(p, reg, (val >> 32) & 0xFFFF);
+   p = s390_emit_IILH(p, reg, (val >> 16) & 0xFFFF);
+   p = s390_emit_IILL(p, reg, val & 0xFFFF);
+
+   return p;
+}
+
+/* Load a 32-bit immediate VAL into register REG. */
+static UChar *
+s390_emit_load_32imm(UChar *p, UChar reg, UInt val)
+{
+   if (uint_fits_signed_16bit(val)) {
+      /* LHI's sign extension will recreate the correct 32-bit value */
+      return s390_emit_LHI(p, reg, val);
+   }
+   if (s390_host_has_eimm) {
+      return s390_emit_IILF(p, reg, val);
+   }
+   /* val[0:15]  --> (val >> 16) & 0xFFFF
+      val[16:31] --> val & 0xFFFF */
+   p = s390_emit_IILH(p, reg, (val >> 16) & 0xFFFF);
+   return s390_emit_IILL(p, reg, val & 0xFFFF);
+}
+
+/*------------------------------------------------------------*/
+/*--- Wrapper functions                                    ---*/
+/*------------------------------------------------------------*/
+
+/* r1[32:63],r1+1[32:63] = r1+1[32:63] * memory[op2addr][0:31] */
+static UChar *
+s390_emit_MFYw(UChar *p, UChar r1, UChar x, UChar b,  UShort dl, UChar dh)
+{
+   if (s390_host_has_gie) {
+      return s390_emit_MFY(p, r1, x, b, dl, dh);
+   }
+
+   /* Load from memory into R0, then MULTIPLY with R1 */
+   p = s390_emit_LY(p, R0, x, b, dl, dh);
+   return s390_emit_MR(p, r1, R0);
+}
+
+/* r1[32:63] = r1[32:63] * memory[op2addr][0:15] */
+static UChar *
+s390_emit_MHYw(UChar *p, UChar r1, UChar x, UChar b,  UShort dl, UChar dh)
+{
+   if (s390_host_has_gie) {
+      return s390_emit_MHY(p, r1, x, b, dl, dh);
+   }
+
+   /* Load from memory into R0, then MULTIPLY with R1 */
+   p = s390_emit_LHY(p, R0, x, b, dl, dh);
+   return s390_emit_MSR(p, r1, R0);
+}
+
+/* r1[32:63] = r1[32:63] * i2 */
+static UChar *
+s390_emit_MSFIw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_gie) {
+      return s390_emit_MSFI(p, r1, i2);
+   }
+
+   /* Load I2 into R0; then MULTIPLY R0 with R1 */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_MSR(p, r1, R0);
+}
+
+
+/* r1[32:63] = r1[32:63] & i2 */
+static UChar *
+s390_emit_NILFw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_NILF(p, r1, i2);
+   }
+
+   /* Load I2 into R0; then AND R0 with R1 */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_NR(p, r1, R0);
+}
+
+
+/* r1[32:63] = r1[32:63] | i2 */
+static UChar *
+s390_emit_OILFw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_OILF(p, r1, i2);
+   }
+
+   /* Load I2 into R0; then AND R0 with R1 */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_OR(p, r1, R0);
+}
+
+
+/* r1[32:63] = r1[32:63] ^ i2 */
+static UChar *
+s390_emit_XILFw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_XILF(p, r1, i2);
+   }
+
+   /* Load I2 into R0; then AND R0 with R1 */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_XR(p, r1, R0);
+}
+
+
+/*  r1[32:63] = sign_extend(r2[56:63]) */
+static UChar *
+s390_emit_LBRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LBR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);               /* r1 = r2 */
+   p = s390_emit_SLL(p, r1, R0, 24);          /* r1 = r1 << 24  */
+   return s390_emit_SRA(p, r1, R0, 24);       /* r1 = r1 >>a 24 */
+}
+
+
+/*  r1[0:63] = sign_extend(r2[56:63]) */
+static UChar *
+s390_emit_LGBRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LGBR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);                       /* r1 = r2 */
+   p = s390_emit_SLLG(p, r1, r1, R0, DISP20(56));     /* r1 = r1 << 56  */
+   return s390_emit_SRAG(p, r1, r1, R0, DISP20(56));  /* r1 = r1 >>a 56 */
+}
+
+
+/* r1[32:63] = sign_extend(r2[48:63]) */
+static UChar *
+s390_emit_LHRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LHR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);               /* r1 = r2 */
+   p = s390_emit_SLL(p, r1, R0, 16);          /* r1 = r1 << 16  */
+   return s390_emit_SRA(p, r1, R0, 16);       /* r1 = r1 >>a 16 */
+}
+
+
+/* r1[0:63] = sign_extend(r2[48:63]) */
+static UChar *
+s390_emit_LGHRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LGHR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);               /* r1 = r2 */
+   p = s390_emit_SLLG(p, r1, r1, R0, DISP20(48));     /* r1 = r1 << 48  */
+   return s390_emit_SRAG(p, r1, r1, R0, DISP20(48));  /* r1 = r1 >>a 48 */
+}
+
+
+/* r1[0:63] = sign_extend(i2) */
+static UChar *
+s390_emit_LGFIw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LGFI(p, r1, i2);
+   }
+
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_LGFR(p, r1, R0);
+}
+
+
+/* r1[32:63] = zero_extend($r2[56:63]) */
+static UChar *
+s390_emit_LLCRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LLCR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);
+   p = s390_emit_LHI(p, R0, 0xFF);
+   return s390_emit_NR(p, r1, R0);
+}
+
+
+/* r1[0:63] = zero_extend($r2[56:63]) */
+static UChar *
+s390_emit_LLGCRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LLGCR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);
+   p = s390_emit_LLILL(p, R0, 0xFF);
+   return s390_emit_NGR(p, r1, R0);
+}
+
+
+/* r1[32:63] = zero_extend(r2[48:63]) */
+static UChar *
+s390_emit_LLHRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LLHR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);
+   p = s390_emit_LLILL(p, R0, 0xFFFF);
+   return s390_emit_NR(p, r1, R0);
+}
+
+
+/* r1[0:63] = zero_extend(r2[48:63]) */
+static UChar *
+s390_emit_LLGHRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LLGHR(p, r1, r2);
+   }
+
+   p = s390_emit_LR(p, r1, r2);
+   p = s390_emit_LLILL(p, R0, 0xFFFF);
+   return s390_emit_NGR(p, r1, R0);
+}
+
+
+/* r1[32:63] = zero_extend(mem[op2addr][0:7]) */
+static UChar *
+s390_emit_LLCw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LLC(p, r1, x2, b2, dl, dh);
+   }
+
+   if (dh == 0) {
+      p = s390_emit_IC(p, r1, x2, b2, dl);
+   } else {
+      p = s390_emit_ICY(p, r1, x2, b2, dl, dh);
+   }
+   p = s390_emit_LLILL(p, R0, 0xFF);
+   return s390_emit_NR(p, r1, R0);
+}
+
+
+/* r1[32:63] = zero_extend(mem[op2addr][0:15]) */
+static UChar *
+s390_emit_LLHw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LLH(p, r1, x2, b2, dl, dh);
+   }
+
+   p = s390_emit_LLGH(p, r1, x2, b2, dl, dh);
+   p = s390_emit_LLILL(p, R0, 0xFFFF);
+   return s390_emit_NR(p, r1, R0);
+}
+
+
+/* r1[0:63] = zero_extend(i2) */
+static UChar *
+s390_emit_LLILFw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LLILF(p, r1, i2);
+   }
+
+   p = s390_emit_LLILH(p, r1, (i2 >> 16) & 0xFFFF);  /* i2[0:15] */
+   return s390_emit_OILL(p, r1, i2 & 0xFFFF);
+}
+
+
+/* r1[32:63] = r1[32:63] + i2 */
+static UChar *
+s390_emit_AFIw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_AFI(p, r1, i2);
+   }
+   /* Load 32 bit immediate to R0 then add */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_AR(p, r1, R0);
+}
+
+
+/* r1[32:63] = r1[32:63] - i2 */
+static UChar *
+s390_emit_SLFIw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_SLFI(p, r1, i2);
+   }
+
+   /* Load 32 bit immediate to R0 then subtract */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_SR(p, r1, R0);
+}
+
+
+/* r1[0:63] = r1[0:63] - zero_extend(i2) */
+static UChar *
+s390_emit_SLGFIw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_SLGFI(p, r1, i2);
+   }
+
+   /* Load zero-extended 32 bit immediate to R0 then subtract */
+   p = s390_emit_load_64imm(p, R0, i2);
+   return s390_emit_SGR(p, r1, R0);
+}
+
+
+static UChar *
+s390_emit_LTw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LT(p, r1, x2, b2, dl, dh);
+   }
+   /* Load 32 bit from memory to R0 then compare */
+   if (dh == 0) {
+      p = s390_emit_L(p, R0, x2, b2, dl);
+   } else {
+      p = s390_emit_LY(p, R0, x2, b2, dl, dh);
+   }
+   return s390_emit_LTR(p, r1, R0);
+}
+
+
+static UChar *
+s390_emit_LTGw(UChar *p, UChar r1, UChar x2, UChar b2, UShort dl, UChar dh)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_LTG(p, r1, x2, b2, dl, dh);
+   }
+   /* Load 64 bit from memory to R0 then compare */
+   p = s390_emit_LG(p, R0, x2, b2, dl, dh);
+   return s390_emit_LTGR(p, r1, R0);
+}
+
+
+static UChar *
+s390_emit_CFIw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_CFI(p, r1, i2);
+   }
+   /* Load 32 bit immediate to R0 then compare */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_CR(p, r1, R0);
+}
+
+
+static UChar *
+s390_emit_CLFIw(UChar *p, UChar r1, UInt i2)
+{
+   if (s390_host_has_eimm) {
+      return s390_emit_CLFI(p, r1, i2);
+   }
+   /* Load 32 bit immediate to R0 then compare */
+   p = s390_emit_load_32imm(p, R0, i2);
+   return s390_emit_CLR(p, r1, R0);
+}
+
+
+static UChar *
+s390_emit_LGDRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_fgx) {
+      return s390_emit_LGDR(p, r1, r2);
+   }
+
+   /* Store the FPR at memory[sp - 8]. This is safe because SP grows towards
+      smaller addresses and is 8-byte aligned. Then load the GPR from that
+      memory location/ */
+   p = s390_emit_STDY(p, r2, R0, S390_REGNO_STACK_POINTER, DISP20(-8));
+   return s390_emit_LG(p, r1, R0, S390_REGNO_STACK_POINTER, DISP20(-8));
+}
+
+
+static UChar *
+s390_emit_LDGRw(UChar *p, UChar r1, UChar r2)
+{
+   if (s390_host_has_fgx) {
+      return s390_emit_LDGR(p, r1, r2);
+   }
+
+   /* Store the GPR at memory[sp - 8]. This is safe because SP grows towards
+      smaller addresses and is 8-byte aligned. Then load the FPR from that
+      memory location/ */
+   p = s390_emit_STG(p, r2, R0, S390_REGNO_STACK_POINTER, DISP20(-8));
+   return s390_emit_LDY(p, r1, R0, S390_REGNO_STACK_POINTER, DISP20(-8));
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Constructors for the various s390_insn kinds            ---*/
+/*---------------------------------------------------------------*/
+
+s390_insn *
+s390_insn_load(UChar size, HReg dst, s390_amode *src)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_LOAD;
+   insn->size = size;
+   insn->variant.load.src  = src;
+   insn->variant.load.dst  = dst;
+
+   vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_store(UChar size, s390_amode *dst, HReg src)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_STORE;
+   insn->size = size;
+   insn->variant.store.src  = src;
+   insn->variant.store.dst  = dst;
+
+   vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_move(UChar size, HReg dst, HReg src)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_MOVE;
+   insn->size = size;
+   insn->variant.move.src  = src;
+   insn->variant.move.dst  = dst;
+
+   vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_memcpy(UChar size, s390_amode *dst, s390_amode *src)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   /* This insn will be mapped to MVC which requires base register
+      plus 12-bit displacement */
+   vassert(src->tag == S390_AMODE_B12);
+   vassert(dst->tag == S390_AMODE_B12);
+
+   insn->tag  = S390_INSN_MEMCPY;
+   insn->size = size;
+   insn->variant.memcpy.src = src;
+   insn->variant.memcpy.dst = dst;
+
+   vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_cond_move(UChar size, s390_cc_t cond, HReg dst, s390_opnd_RMI src)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_COND_MOVE;
+   insn->size = size;
+   insn->variant.cond_move.cond = cond;
+   insn->variant.cond_move.src  = src;
+   insn->variant.cond_move.dst  = dst;
+
+   vassert(size == 1 || size == 2 || size == 4 || size == 8);
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_load_immediate(UChar size, HReg dst, ULong value)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_LOAD_IMMEDIATE;
+   insn->size = size;
+   insn->variant.load_immediate.dst   = dst;
+   insn->variant.load_immediate.value = value;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_alu(UChar size, s390_alu_t tag, HReg dst, s390_opnd_RMI op2)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_ALU;
+   insn->size = size;
+   insn->variant.alu.tag = tag;
+   insn->variant.alu.dst = dst;
+   insn->variant.alu.op2 = op2;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_mul(UChar size, HReg dst_hi, HReg dst_lo, s390_opnd_RMI op2,
+              Bool signed_multiply)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(! hregIsVirtual(dst_hi));
+   vassert(! hregIsVirtual(dst_lo));
+
+   insn->tag  = signed_multiply ? S390_INSN_SMUL : S390_INSN_UMUL;
+   insn->size = size;
+   insn->variant.mul.dst_hi = dst_hi;
+   insn->variant.mul.dst_lo = dst_lo;
+   insn->variant.mul.op2 = op2;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_div(UChar size, HReg op1_hi, HReg op1_lo, s390_opnd_RMI op2,
+              Bool signed_divide)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+   vassert(! hregIsVirtual(op1_hi));
+   vassert(! hregIsVirtual(op1_lo));
+
+   insn->tag  = signed_divide ? S390_INSN_SDIV : S390_INSN_UDIV;
+   insn->size = size;
+   insn->variant.div.op1_hi = op1_hi;
+   insn->variant.div.op1_lo = op1_lo;
+   insn->variant.div.op2 = op2;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_divs(UChar size, HReg rem, HReg op1, s390_opnd_RMI op2)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 8);
+   vassert(! hregIsVirtual(op1));
+   vassert(! hregIsVirtual(rem));
+
+   insn->tag  = S390_INSN_DIVS;
+   insn->size = size;
+   insn->variant.divs.rem = rem;   /* remainder */
+   insn->variant.divs.op1 = op1;   /* also quotient */
+   insn->variant.divs.op2 = op2;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_clz(UChar size, HReg num_bits, HReg clobber, s390_opnd_RMI src)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 8);
+   vassert(! hregIsVirtual(num_bits));
+   vassert(! hregIsVirtual(clobber));
+
+   insn->tag  = S390_INSN_CLZ;
+   insn->size = size;
+   insn->variant.clz.num_bits = num_bits;
+   insn->variant.clz.clobber  = clobber;
+   insn->variant.clz.src = src;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_unop(UChar size, s390_unop_t tag, HReg dst, s390_opnd_RMI opnd)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_UNOP;
+   insn->size = size;
+   insn->variant.unop.tag = tag;
+   insn->variant.unop.dst = dst;
+   insn->variant.unop.src = opnd;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_test(UChar size, s390_opnd_RMI src)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_TEST;
+   insn->size = size;
+   insn->variant.test.src = src;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_cc2bool(HReg dst, s390_cc_t cond)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_CC2BOOL;
+   insn->size = 0;   /* does not matter */
+   insn->variant.cc2bool.cond = cond;
+   insn->variant.cc2bool.dst  = dst;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_cas(UChar size, HReg op1, s390_amode *op2, HReg op3, HReg old_mem)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+   vassert(hregNumber(op2->x) == 0);
+   vassert(op2->tag == S390_AMODE_B12 || op2->tag == S390_AMODE_B20);
+
+   insn->tag  = S390_INSN_CAS;
+   insn->size = size;
+   insn->variant.cas.op1 = op1;
+   insn->variant.cas.op2 = op2;
+   insn->variant.cas.op3 = op3;
+   insn->variant.cas.old_mem = old_mem;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_cdas(UChar size, HReg op1_high, HReg op1_low, s390_amode *op2,
+               HReg op3_high, HReg op3_low, HReg old_mem_high, HReg old_mem_low,
+               HReg scratch)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+   s390_cdas *cdas = LibVEX_Alloc_inline(sizeof(s390_cdas));
+
+   vassert(size == 4 || size == 8);
+   vassert(hregNumber(op2->x) == 0);
+   vassert(hregNumber(scratch) == 1);  /* r0,r1 used as scratch reg pair */
+   vassert(op2->tag == S390_AMODE_B12 || op2->tag == S390_AMODE_B20);
+
+   insn->tag  = S390_INSN_CDAS;
+   insn->size = size;
+   insn->variant.cdas.details = cdas;
+
+   cdas->op1_high = op1_high;
+   cdas->op1_low  = op1_low;
+   cdas->op2 = op2;
+   cdas->op3_high = op3_high;
+   cdas->op3_low  = op3_low;
+   cdas->old_mem_high = old_mem_high;
+   cdas->old_mem_low  = old_mem_low;
+   cdas->scratch = scratch;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_compare(UChar size, HReg src1, s390_opnd_RMI src2,
+                  Bool signed_comparison)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_COMPARE;
+   insn->size = size;
+   insn->variant.compare.src1 = src1;
+   insn->variant.compare.src2 = src2;
+   insn->variant.compare.signed_comparison = signed_comparison;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_helper_call(s390_cc_t cond, Addr64 target, UInt num_args,
+                      const HChar *name, RetLoc rloc)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+   s390_helper_call *helper_call = LibVEX_Alloc_inline(sizeof(s390_helper_call));
+
+   insn->tag  = S390_INSN_HELPER_CALL;
+   insn->size = 0;  /* does not matter */
+   insn->variant.helper_call.details = helper_call;
+
+   helper_call->cond = cond;
+   helper_call->target = target;
+   helper_call->num_args = num_args;
+   helper_call->name = name;
+   helper_call->rloc = rloc;
+
+   vassert(is_sane_RetLoc(rloc));
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_triop(UChar size, s390_bfp_triop_t tag, HReg dst, HReg op2,
+                    HReg op3)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_BFP_TRIOP;
+   insn->size = size;
+   insn->variant.bfp_triop.tag = tag;
+   insn->variant.bfp_triop.dst = dst;
+   insn->variant.bfp_triop.op2 = op2;
+   insn->variant.bfp_triop.op3 = op3;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_binop(UChar size, s390_bfp_binop_t tag, HReg dst, HReg op2)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_BFP_BINOP;
+   insn->size = size;
+   insn->variant.bfp_binop.tag = tag;
+   insn->variant.bfp_binop.dst_hi = dst;
+   insn->variant.bfp_binop.op2_hi = op2;
+   insn->variant.bfp_binop.dst_lo = INVALID_HREG;
+   insn->variant.bfp_binop.op2_lo = INVALID_HREG;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_unop(UChar size, s390_bfp_unop_t tag, HReg dst, HReg op)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_BFP_UNOP;
+   insn->size = size;
+   insn->variant.bfp_unop.tag = tag;
+   insn->variant.bfp_unop.dst_hi = dst;
+   insn->variant.bfp_unop.op_hi  = op;
+   insn->variant.bfp_unop.dst_lo = INVALID_HREG;
+   insn->variant.bfp_unop.op_lo  = INVALID_HREG;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_compare(UChar size, HReg dst, HReg op1, HReg op2)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_BFP_COMPARE;
+   insn->size = size;
+   insn->variant.bfp_compare.dst = dst;
+   insn->variant.bfp_compare.op1_hi = op1;
+   insn->variant.bfp_compare.op2_hi = op2;
+   insn->variant.bfp_compare.op1_lo = INVALID_HREG;
+   insn->variant.bfp_compare.op2_lo = INVALID_HREG;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp_convert(UChar size, s390_bfp_conv_t tag, HReg dst, HReg op,
+                      s390_bfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_BFP_CONVERT;
+   insn->size = size;
+   insn->variant.bfp_convert.tag = tag;
+   insn->variant.bfp_convert.dst_hi = dst;
+   insn->variant.bfp_convert.op_hi  = op;
+   insn->variant.bfp_convert.dst_lo = INVALID_HREG;
+   insn->variant.bfp_convert.op_lo  = INVALID_HREG;
+   insn->variant.bfp_convert.rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+/* Check validity of a register pair for 128-bit FP. Valid register
+   pairs are (0,2), (1,3), (4, 6), (5, 7), (8, 10), (9, 11), (12, 14),
+   and (13, 15). */
+static Bool
+is_valid_fp128_regpair(HReg hi, HReg lo)
+{
+   UInt hi_regno = hregNumber(hi);
+   UInt lo_regno = hregNumber(lo);
+
+   if (lo_regno != hi_regno + 2) return False;
+   if ((hi_regno & 0x2) != 0) return False;
+
+   return True;
+}
+
+s390_insn *
+s390_insn_bfp128_binop(UChar size, s390_bfp_binop_t tag, HReg dst_hi,
+                       HReg dst_lo, HReg op2_hi, HReg op2_lo)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 16);
+   vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
+   vassert(is_valid_fp128_regpair(op2_hi, op2_lo));
+
+   insn->tag  = S390_INSN_BFP_BINOP;
+   insn->size = size;
+   insn->variant.bfp_binop.tag = tag;
+   insn->variant.bfp_binop.dst_hi = dst_hi;
+   insn->variant.bfp_binop.dst_lo = dst_lo;
+   insn->variant.bfp_binop.op2_hi = op2_hi;
+   insn->variant.bfp_binop.op2_lo = op2_lo;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_unop(UChar size, s390_bfp_unop_t tag, HReg dst_hi,
+                      HReg dst_lo, HReg op_hi, HReg op_lo)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 16);
+   vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
+   vassert(is_valid_fp128_regpair(op_hi, op_lo));
+
+   insn->tag  = S390_INSN_BFP_UNOP;
+   insn->size = size;
+   insn->variant.bfp_unop.tag = tag;
+   insn->variant.bfp_unop.dst_hi = dst_hi;
+   insn->variant.bfp_unop.dst_lo = dst_lo;
+   insn->variant.bfp_unop.op_hi = op_hi;
+   insn->variant.bfp_unop.op_lo = op_lo;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_compare(UChar size, HReg dst, HReg op1_hi, HReg op1_lo,
+                         HReg op2_hi, HReg op2_lo)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 16);
+   vassert(is_valid_fp128_regpair(op1_hi, op1_lo));
+   vassert(is_valid_fp128_regpair(op2_hi, op2_lo));
+
+   insn->tag  = S390_INSN_BFP_COMPARE;
+   insn->size = size;
+   insn->variant.bfp_compare.dst = dst;
+   insn->variant.bfp_compare.op1_hi = op1_hi;
+   insn->variant.bfp_compare.op1_lo = op1_lo;
+   insn->variant.bfp_compare.op2_hi = op2_hi;
+   insn->variant.bfp_compare.op2_lo = op2_lo;
+
+   return insn;
+}
+
+
+static s390_insn *
+s390_insn_bfp128_convert(UChar size, s390_bfp_conv_t tag, HReg dst_hi,
+                         HReg dst_lo, HReg op_hi, HReg op_lo,
+                         s390_bfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   if (size == 16) {
+      /* From smaller size to 16 bytes */
+      vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
+      vassert(hregIsInvalid(op_lo));
+   } else {
+      /* From 16 bytes to smaller size */
+      vassert(is_valid_fp128_regpair(op_hi, op_lo));
+   }
+
+   insn->tag  = S390_INSN_BFP_CONVERT;
+   insn->size = size;
+   insn->variant.bfp_convert.tag = tag;
+   insn->variant.bfp_convert.dst_hi = dst_hi;
+   insn->variant.bfp_convert.dst_lo = dst_lo;
+   insn->variant.bfp_convert.op_hi = op_hi;
+   insn->variant.bfp_convert.op_lo = op_lo;
+   insn->variant.bfp_convert.rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_bfp128_convert_to(UChar size, s390_bfp_conv_t tag, HReg dst_hi,
+                            HReg dst_lo, HReg op)
+{
+   /* Conversion to bfp128 never requires a rounding mode. Provide default
+      rounding mode. It will not be used when emitting insns. */
+   s390_bfp_round_t rounding_mode = S390_BFP_ROUND_NEAREST_EVEN;
+
+   return s390_insn_bfp128_convert(size, tag, dst_hi, dst_lo, op,
+                                   INVALID_HREG, rounding_mode);
+}
+
+
+s390_insn *
+s390_insn_bfp128_convert_from(UChar size, s390_bfp_conv_t tag, HReg dst_hi,
+                              HReg dst_lo, HReg op_hi, HReg op_lo,
+                              s390_bfp_round_t rounding_mode)
+{
+   return s390_insn_bfp128_convert(size, tag, dst_hi, dst_lo, op_hi, op_lo,
+                                   rounding_mode);
+}
+
+
+s390_insn *
+s390_insn_dfp_binop(UChar size, s390_dfp_binop_t tag, HReg dst, HReg op2,
+                    HReg op3, s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+   s390_dfp_binop *dfp_binop = LibVEX_Alloc_inline(sizeof(s390_dfp_binop));
+
+   vassert(size == 8);
+
+   insn->tag  = S390_INSN_DFP_BINOP;
+   insn->size = size;
+   insn->variant.dfp_binop.details = dfp_binop;
+
+   dfp_binop->tag = tag;
+   dfp_binop->dst_hi = dst;
+   dfp_binop->op2_hi = op2;
+   dfp_binop->op3_hi = op3;
+   dfp_binop->dst_lo = INVALID_HREG;
+   dfp_binop->op2_lo = INVALID_HREG;
+   dfp_binop->op3_lo = INVALID_HREG;
+   dfp_binop->rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp_unop(UChar size, s390_dfp_unop_t tag, HReg dst, HReg op)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 8);
+
+   insn->tag  = S390_INSN_DFP_UNOP;
+   insn->size = size;
+   insn->variant.dfp_unop.tag = tag;
+   insn->variant.dfp_unop.dst_hi = dst;
+   insn->variant.dfp_unop.op_hi  = op;
+   insn->variant.dfp_unop.dst_lo = INVALID_HREG;
+   insn->variant.dfp_unop.op_lo  = INVALID_HREG;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp_intop(UChar size, s390_dfp_intop_t tag, HReg dst, HReg op2,
+                    HReg op3)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 8);
+
+   insn->tag  = S390_INSN_DFP_INTOP;
+   insn->size = size;
+   insn->variant.dfp_intop.tag = tag;
+   insn->variant.dfp_intop.dst_hi = dst;
+   insn->variant.dfp_intop.op2    = op2;
+   insn->variant.dfp_intop.op3_hi = op3;
+   insn->variant.dfp_intop.dst_lo = INVALID_HREG;
+   insn->variant.dfp_intop.op3_lo = INVALID_HREG;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp_compare(UChar size, s390_dfp_cmp_t tag, HReg dst,
+                      HReg op1, HReg op2)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 8);
+
+   insn->tag  = S390_INSN_DFP_COMPARE;
+   insn->size = size;
+   insn->variant.dfp_compare.tag = tag;
+   insn->variant.dfp_compare.dst = dst;
+   insn->variant.dfp_compare.op1_hi = op1;
+   insn->variant.dfp_compare.op2_hi = op2;
+   insn->variant.dfp_compare.op1_lo = INVALID_HREG;
+   insn->variant.dfp_compare.op2_lo = INVALID_HREG;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp_convert(UChar size, s390_dfp_conv_t tag, HReg dst, HReg op,
+                      s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_DFP_CONVERT;
+   insn->size = size;
+   insn->variant.dfp_convert.tag = tag;
+   insn->variant.dfp_convert.dst_hi = dst;
+   insn->variant.dfp_convert.op_hi  = op;
+   insn->variant.dfp_convert.dst_lo = INVALID_HREG;
+   insn->variant.dfp_convert.op_lo  = INVALID_HREG;
+   insn->variant.dfp_convert.rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp_reround(UChar size, HReg dst, HReg op2, HReg op3,
+                      s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 8);
+
+   insn->tag  = S390_INSN_DFP_REROUND;
+   insn->size = size;
+   insn->variant.dfp_reround.dst_hi = dst;
+   insn->variant.dfp_reround.op2 = op2;
+   insn->variant.dfp_reround.op3_hi = op3;
+   insn->variant.dfp_reround.dst_lo = INVALID_HREG;
+   insn->variant.dfp_reround.op3_lo = INVALID_HREG;
+   insn->variant.dfp_reround.rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_fp_convert(UChar size, s390_fp_conv_t tag, HReg dst, HReg op,
+                     HReg r1, s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+   s390_fp_convert *fp_convert = LibVEX_Alloc_inline(sizeof(s390_fp_convert));
+
+   vassert(size == 4 || size == 8);
+
+   insn->tag  = S390_INSN_FP_CONVERT;
+   insn->size = size;
+   insn->variant.fp_convert.details = fp_convert;
+
+   fp_convert->tag = tag;
+   fp_convert->dst_hi = dst;
+   fp_convert->op_hi  = op;
+   fp_convert->r1 = r1;
+   fp_convert->dst_lo = INVALID_HREG;
+   fp_convert->op_lo  = INVALID_HREG;
+   fp_convert->rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_fp128_convert(UChar size, s390_fp_conv_t tag, HReg dst_hi,
+                        HReg dst_lo, HReg op_hi, HReg op_lo, HReg r1,
+                        s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+   s390_fp_convert *fp_convert = LibVEX_Alloc_inline(sizeof(s390_fp_convert));
+
+   vassert(size == 16);
+
+   insn->tag  = S390_INSN_FP_CONVERT;
+   insn->size = size;
+   insn->variant.fp_convert.details = fp_convert;
+
+   fp_convert->tag = tag;
+   fp_convert->dst_hi = dst_hi;
+   fp_convert->dst_lo = dst_lo;
+   fp_convert->op_hi  = op_hi;
+   fp_convert->r1 = r1;
+   fp_convert->op_lo  = op_lo;
+   fp_convert->rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp128_binop(UChar size, s390_dfp_binop_t tag, HReg dst_hi,
+                       HReg dst_lo, HReg op2_hi, HReg op2_lo, HReg op3_hi,
+                       HReg op3_lo, s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+   s390_dfp_binop *dfp_binop = LibVEX_Alloc_inline(sizeof(s390_dfp_binop));
+
+   vassert(size == 16);
+   vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
+   vassert(is_valid_fp128_regpair(op2_hi, op2_lo));
+   vassert(is_valid_fp128_regpair(op3_hi, op3_lo));
+
+   insn->tag  = S390_INSN_DFP_BINOP;
+   insn->size = size;
+   insn->variant.dfp_binop.details = dfp_binop;
+
+   dfp_binop->tag = tag;
+   dfp_binop->dst_hi = dst_hi;
+   dfp_binop->dst_lo = dst_lo;
+   dfp_binop->op2_hi = op2_hi;
+   dfp_binop->op2_lo = op2_lo;
+   dfp_binop->op3_hi = op3_hi;
+   dfp_binop->op3_lo = op3_lo;
+   dfp_binop->rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp128_unop(UChar size, s390_dfp_unop_t tag, HReg dst,
+                      HReg op_hi, HReg op_lo)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   /* destination is an 8 byte integer value */
+   vassert(size == 8);
+   vassert(is_valid_fp128_regpair(op_hi, op_lo));
+
+   insn->tag  = S390_INSN_DFP_UNOP;
+   insn->size = size;
+   insn->variant.dfp_unop.tag = tag;
+   insn->variant.dfp_unop.dst_hi = dst;
+   insn->variant.dfp_unop.dst_lo = INVALID_HREG;
+   insn->variant.dfp_unop.op_hi = op_hi;
+   insn->variant.dfp_unop.op_lo = op_lo;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp128_intop(UChar size, s390_dfp_intop_t tag, HReg dst_hi,
+                       HReg dst_lo, HReg op2, HReg op3_hi, HReg op3_lo)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 16);
+   vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
+   vassert(is_valid_fp128_regpair(op3_hi, op3_lo));
+
+   insn->tag  = S390_INSN_DFP_INTOP;
+   insn->size = size;
+   insn->variant.dfp_intop.tag = tag;
+   insn->variant.dfp_intop.dst_hi = dst_hi;
+   insn->variant.dfp_intop.dst_lo = dst_lo;
+   insn->variant.dfp_intop.op2    = op2;
+   insn->variant.dfp_intop.op3_hi = op3_hi;
+   insn->variant.dfp_intop.op3_lo = op3_lo;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp128_compare(UChar size, s390_dfp_cmp_t tag, HReg dst, HReg op1_hi,
+                         HReg op1_lo, HReg op2_hi, HReg op2_lo)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 16);
+   vassert(is_valid_fp128_regpair(op1_hi, op1_lo));
+   vassert(is_valid_fp128_regpair(op2_hi, op2_lo));
+
+   insn->tag  = S390_INSN_DFP_COMPARE;
+   insn->size = size;
+   insn->variant.dfp_compare.tag = tag;
+   insn->variant.dfp_compare.dst = dst;
+   insn->variant.dfp_compare.op1_hi = op1_hi;
+   insn->variant.dfp_compare.op1_lo = op1_lo;
+   insn->variant.dfp_compare.op2_hi = op2_hi;
+   insn->variant.dfp_compare.op2_lo = op2_lo;
+
+   return insn;
+}
+
+
+static s390_insn *
+s390_insn_dfp128_convert(UChar size, s390_dfp_conv_t tag, HReg dst_hi,
+                         HReg dst_lo, HReg op_hi, HReg op_lo,
+                         s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   if (size == 16) {
+      /* From smaller size to 16 bytes */
+      vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
+      vassert(hregIsInvalid(op_lo));
+   } else {
+      /* From 16 bytes to smaller size */
+      vassert(is_valid_fp128_regpair(op_hi, op_lo));
+   }
+
+   insn->tag  = S390_INSN_DFP_CONVERT;
+   insn->size = size;
+   insn->variant.dfp_convert.tag = tag;
+   insn->variant.dfp_convert.dst_hi = dst_hi;
+   insn->variant.dfp_convert.dst_lo = dst_lo;
+   insn->variant.dfp_convert.op_hi = op_hi;
+   insn->variant.dfp_convert.op_lo = op_lo;
+   insn->variant.dfp_convert.rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_dfp128_convert_to(UChar size, s390_dfp_conv_t tag, HReg dst_hi,
+                            HReg dst_lo, HReg op)
+{
+   /* Conversion to dfp128 never requires a rounding mode. Provide default
+      rounding mode. It will not be used when emitting insns. */
+   s390_dfp_round_t rounding_mode = S390_DFP_ROUND_NEAREST_EVEN_4;
+
+   return s390_insn_dfp128_convert(size, tag, dst_hi, dst_lo, op,
+                                   INVALID_HREG, rounding_mode);
+}
+
+
+s390_insn *
+s390_insn_dfp128_convert_from(UChar size, s390_dfp_conv_t tag, HReg dst_hi,
+                              HReg dst_lo, HReg op_hi, HReg op_lo,
+                              s390_dfp_round_t rounding_mode)
+{
+   return s390_insn_dfp128_convert(size, tag, dst_hi, dst_lo, op_hi, op_lo,
+                                   rounding_mode);
+}
+
+
+s390_insn *
+s390_insn_dfp128_reround(UChar size, HReg dst_hi, HReg dst_lo, HReg op2,
+                         HReg op3_hi, HReg op3_lo,
+                         s390_dfp_round_t rounding_mode)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 16);
+   vassert(is_valid_fp128_regpair(dst_hi, dst_lo));
+   vassert(is_valid_fp128_regpair(op3_hi, op3_lo));
+
+   insn->tag  = S390_INSN_DFP_REROUND;
+   insn->size = size;
+   insn->variant.dfp_reround.dst_hi = dst_hi;
+   insn->variant.dfp_reround.dst_lo = dst_lo;
+   insn->variant.dfp_reround.op2    = op2;
+   insn->variant.dfp_reround.op3_hi = op3_hi;
+   insn->variant.dfp_reround.op3_lo = op3_lo;
+   insn->variant.dfp_reround.rounding_mode = rounding_mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_mfence(void)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_MFENCE;
+   insn->size = 0;   /* not needed */
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_mimm(UChar size, s390_amode *dst, ULong value)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   /* This insn will be mapped to insns that require base register
+      plus 12-bit displacement */
+   vassert(dst->tag == S390_AMODE_B12);
+
+   insn->tag  = S390_INSN_MIMM;
+   insn->size = size;
+   insn->variant.mimm.dst = dst;
+   insn->variant.mimm.value = value;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_madd(UChar size, s390_amode *dst, UChar delta, ULong value)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(size == 4 || size == 8);
+
+   /* This insn will be mapped to an ASI or AGSI so we can only allow base
+      register plus 12-bit / 20-bit displacement. */
+   vassert(dst->tag == S390_AMODE_B12 || dst->tag == S390_AMODE_B20);
+   /* ASI and AGSI require the GIE facility */
+   vassert(s390_host_has_gie);
+
+   insn->tag  = S390_INSN_MADD;
+   insn->size = size;
+   insn->variant.madd.dst   = dst;
+   insn->variant.madd.delta = delta;
+   insn->variant.madd.value = value;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_set_fpc_bfprm(UChar size, HReg mode)
+{
+   vassert(size == 4);
+
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_SET_FPC_BFPRM;
+   insn->size = size;
+   insn->variant.set_fpc_bfprm.mode = mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_set_fpc_dfprm(UChar size, HReg mode)
+{
+   vassert(size == 4);
+
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_SET_FPC_DFPRM;
+   insn->size = size;
+   insn->variant.set_fpc_dfprm.mode = mode;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_xdirect(s390_cc_t cond, Addr64 dst, s390_amode *guest_IA,
+                  Bool to_fast_entry)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(guest_IA->tag == S390_AMODE_B12);
+
+   insn->tag  = S390_INSN_XDIRECT;
+   insn->size = 0;   /* does not matter */
+
+   insn->variant.xdirect.cond = cond;
+   insn->variant.xdirect.dst = dst;
+   insn->variant.xdirect.guest_IA = guest_IA;
+   insn->variant.xdirect.to_fast_entry = to_fast_entry;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_xindir(s390_cc_t cond, HReg dst, s390_amode *guest_IA)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(guest_IA->tag == S390_AMODE_B12);
+
+   insn->tag  = S390_INSN_XINDIR;
+   insn->size = 0;   /* does not matter */
+
+   insn->variant.xindir.cond = cond;
+   insn->variant.xindir.dst = dst;
+   insn->variant.xindir.guest_IA = guest_IA;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_xassisted(s390_cc_t cond, HReg dst, s390_amode *guest_IA,
+                    IRJumpKind kind)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(guest_IA->tag == S390_AMODE_B12);
+
+   insn->tag  = S390_INSN_XASSISTED;
+   insn->size = 0;   /* does not matter */
+
+   insn->variant.xassisted.cond = cond;
+   insn->variant.xassisted.dst = dst;
+   insn->variant.xassisted.guest_IA = guest_IA;
+   insn->variant.xassisted.kind = kind;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_evcheck(s390_amode *counter, s390_amode *fail_addr)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   vassert(counter->tag   == S390_AMODE_B12);
+   vassert(fail_addr->tag == S390_AMODE_B12);
+
+   insn->tag  = S390_INSN_EVCHECK;
+   insn->size = 0;   /* does not matter */
+
+   insn->variant.evcheck.counter = counter;
+   insn->variant.evcheck.fail_addr = fail_addr;
+
+   return insn;
+}
+
+
+s390_insn *
+s390_insn_profinc(void)
+{
+   s390_insn *insn = LibVEX_Alloc_inline(sizeof(s390_insn));
+
+   insn->tag  = S390_INSN_PROFINC;
+   insn->size = 0;   /* does not matter */
+
+   return insn;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Debug print                                             ---*/
+/*---------------------------------------------------------------*/
+
+static const HChar *
+s390_cc_as_string(s390_cc_t cc)
+{
+   switch (cc) {
+   case S390_CC_NEVER:  return "never";
+   case S390_CC_OVFL:   return "overflow";
+   case S390_CC_H:      return "greater than";     /* A > B ; high */
+   case S390_CC_NLE:    return "not low or equal";
+   case S390_CC_L:      return "less than";        /* A < B ; low */
+   case S390_CC_NHE:    return "not high or equal";
+   case S390_CC_LH:     return "low or high";
+   case S390_CC_NE:     return "not equal";        /* A != B ; not zero */
+   case S390_CC_E:      return "equal";            /* A == B ; zero */
+   case S390_CC_NLH:    return "not low or high";
+   case S390_CC_HE:     return "greater or equal"; /* A >= B ; high or equal*/
+   case S390_CC_NL:     return "not low";          /* not low */
+   case S390_CC_LE:     return "less or equal";    /* A <= B ; low or equal */
+   case S390_CC_NH:     return "not high";
+   case S390_CC_NO:     return "not overflow";
+   case S390_CC_ALWAYS: return "always";
+   default:
+      vpanic("s390_cc_as_string");
+   }
+}
+
+
+static const HChar *
+s390_jump_kind_as_string(IRJumpKind kind)
+{
+   switch (kind) {
+   case Ijk_Boring:      return "Boring";
+   case Ijk_Call:        return "Call";
+   case Ijk_Ret:         return "Return";
+   case Ijk_ClientReq:   return "ClientReq";
+   case Ijk_Yield:       return "Yield";
+   case Ijk_EmWarn:      return "EmWarn";
+   case Ijk_EmFail:      return "EmFail";
+   case Ijk_NoDecode:    return "NoDecode";
+   case Ijk_MapFail:     return "MapFail";
+   case Ijk_InvalICache: return "Invalidate";
+   case Ijk_NoRedir:     return "NoRedir";
+   case Ijk_SigTRAP:     return "SigTRAP";
+   case Ijk_SigSEGV:     return "SigSEGV";
+   case Ijk_SigBUS:      return "SigBUS";
+   case Ijk_Sys_syscall: return "Sys_syscall";
+   default:
+      vpanic("s390_jump_kind_as_string");
+   }
+}
+
+
+/* Helper function for writing out a V insn */
+static void
+s390_sprintf(HChar *buf, const HChar *fmt, ...)
+{
+   HChar *p;
+   ULong value;
+   va_list args;
+   va_start(args, fmt);
+
+   p = buf;
+   for ( ; *fmt; ++fmt) {
+      Int c = *fmt;
+
+      if (c != '%') {
+         *p++ = c;
+         continue;
+      }
+
+      c = *++fmt;  /* next char */
+      switch (c) {
+      case '%':
+         *p++ = c;   /* %% */
+         continue;
+
+      case 's':     /* %s */
+         p += vex_sprintf(p, "%s", va_arg(args, HChar *));
+         continue;
+
+      case 'M':     /* %M = mnemonic */
+         p += vex_sprintf(p, "%-8s", va_arg(args, HChar *));
+         continue;
+
+      case 'R':     /* %R = register */
+         p += vex_sprintf(p, "%s", s390_hreg_as_string(va_arg(args, HReg)));
+         continue;
+
+      case 'A':     /* %A = amode */
+         p += vex_sprintf(p, "%s",
+                          s390_amode_as_string(va_arg(args, s390_amode *)));
+         continue;
+
+      case 'G':     /* %G = guest state @ offset */
+         p += vex_sprintf(p, "guest[%d]", va_arg(args, UInt));
+         continue;
+
+      case 'C':     /* %C = condition code */
+         p += vex_sprintf(p, "%s", s390_cc_as_string(va_arg(args, s390_cc_t)));
+         continue;
+
+      case 'J':     /* &J = jump kind */
+         p += vex_sprintf(p, "%s",
+                          s390_jump_kind_as_string(va_arg(args, IRJumpKind)));
+         continue;
+
+      case 'L': {   /* %L = argument list in helper call*/
+         UInt i, num_args;
+
+         num_args = va_arg(args, UInt);
+
+         for (i = 0; i < num_args; ++i) {
+            if (i != 0) p += vex_sprintf(p, ", ");
+            p += vex_sprintf(p, "r%d", s390_gprno_from_arg_index(i));
+         }
+         continue;
+      }
+
+      case 'O': {   /* %O = RMI operand */
+         s390_opnd_RMI *op = va_arg(args, s390_opnd_RMI *);
+
+         switch (op->tag) {
+         case S390_OPND_REG:
+            p += vex_sprintf(p, "%s", s390_hreg_as_string(op->variant.reg));
+            continue;
+
+         case S390_OPND_AMODE:
+            p += vex_sprintf(p, "%s", s390_amode_as_string(op->variant.am));
+            continue;
+
+         case S390_OPND_IMMEDIATE:
+            value = op->variant.imm;
+            goto print_value;
+
+         default:
+            goto fail;
+         }
+      }
+
+      case 'I':     /* %I = immediate value */
+         value = va_arg(args, ULong);
+         goto print_value;
+
+      print_value:
+         if ((Long)value < 0)
+            p += vex_sprintf(p, "%lld", (Long)value);
+         else if (value < 100)
+            p += vex_sprintf(p, "%llu", value);
+         else
+            p += vex_sprintf(p, "0x%llx", value);
+         continue;
+
+      default:
+         goto fail;
+      }
+   }
+   *p = '\0';
+   va_end(args);
+
+   return;
+
+ fail: vpanic("s390_printf");
+}
+
+
+/* Decompile the given insn into a static buffer and return it */
+const HChar *
+s390_insn_as_string(const s390_insn *insn)
+{
+   static HChar buf[300];  // large enough
+   const HChar *op;
+   HChar *p;
+
+   buf[0] = '\0';
+
+   switch (insn->tag) {
+   case S390_INSN_LOAD:
+      s390_sprintf(buf, "%M %R,%A", "v-load", insn->variant.load.dst,
+                   insn->variant.load.src);
+      break;
+
+   case S390_INSN_STORE:
+      s390_sprintf(buf, "%M %R,%A", "v-store", insn->variant.store.src,
+                   insn->variant.store.dst);
+      break;
+
+   case S390_INSN_MOVE:
+      s390_sprintf(buf, "%M %R,%R", "v-move", insn->variant.move.dst,
+                   insn->variant.move.src);
+      break;
+
+   case S390_INSN_MEMCPY:
+      s390_sprintf(buf, "%M %A,%A", "v-memcpy", insn->variant.memcpy.dst,
+                   insn->variant.memcpy.src);
+      break;
+
+   case S390_INSN_COND_MOVE:
+      s390_sprintf(buf, "%M if (%C) %R,%O", "v-move",
+                   insn->variant.cond_move.cond, insn->variant.cond_move.dst,
+                   &insn->variant.cond_move.src);
+      break;
+
+   case S390_INSN_LOAD_IMMEDIATE:
+      s390_sprintf(buf, "%M %R,%I", "v-loadi", insn->variant.load_immediate.dst,
+                   insn->variant.load_immediate.value);
+      break;
+
+   case S390_INSN_ALU:
+      switch (insn->variant.alu.tag) {
+      case S390_ALU_ADD:  op = "v-add";  break;
+      case S390_ALU_SUB:  op = "v-sub";  break;
+      case S390_ALU_MUL:  op = "v-mul";  break;
+      case S390_ALU_AND:  op = "v-and";  break;
+      case S390_ALU_OR:   op = "v-or";   break;
+      case S390_ALU_XOR:  op = "v-xor";  break;
+      case S390_ALU_LSH:  op = "v-lsh";  break;
+      case S390_ALU_RSH:  op = "v-rsh";  break;
+      case S390_ALU_RSHA: op = "v-rsha"; break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%O", op, insn->variant.alu.dst, /* also op1 */
+                   &insn->variant.alu.op2);
+      break;
+
+   case S390_INSN_SMUL:
+   case S390_INSN_UMUL:
+      if (insn->tag == S390_INSN_SMUL) {
+         op = "v-muls";
+      } else {
+         op = "v-mulu";
+      }
+      s390_sprintf(buf, "%M %R,%O", op, insn->variant.mul.dst_hi,
+                   &insn->variant.mul.op2);
+      break;
+
+   case S390_INSN_SDIV:
+   case S390_INSN_UDIV:
+      if (insn->tag == S390_INSN_SDIV) {
+         op = "v-divs";
+      } else {
+         op = "v-divu";
+      }
+      s390_sprintf(buf, "%M %R,%O", op, insn->variant.div.op1_hi,
+                   &insn->variant.div.op2);
+      break;
+
+   case S390_INSN_DIVS:
+      s390_sprintf(buf, "%M %R,%O", "v-divsi", insn->variant.divs.op1,
+                   &insn->variant.divs.op2);
+      break;
+
+   case S390_INSN_CLZ:
+      s390_sprintf(buf, "%M %R,%O", "v-clz", insn->variant.clz.num_bits,
+                   &insn->variant.clz.src);
+      break;
+
+   case S390_INSN_UNOP:
+      switch (insn->variant.unop.tag) {
+      case S390_ZERO_EXTEND_8:
+      case S390_ZERO_EXTEND_16:
+      case S390_ZERO_EXTEND_32:
+         op = "v-zerox";
+         break;
+
+      case S390_SIGN_EXTEND_8:
+      case S390_SIGN_EXTEND_16:
+      case S390_SIGN_EXTEND_32:
+         op = "v-signx";
+         break;
+
+      case S390_NEGATE:
+         op = "v-neg";
+         break;
+
+      default:
+         goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%O", op, insn->variant.unop.dst,
+                   &insn->variant.unop.src);
+      break;
+
+   case S390_INSN_TEST:
+      s390_sprintf(buf, "%M %O", "v-test", &insn->variant.test.src);
+      break;
+
+   case S390_INSN_CC2BOOL:
+      s390_sprintf(buf, "%M %R,%C", "v-cc2b", insn->variant.cc2bool.dst,
+                   insn->variant.cc2bool.cond);
+      break;
+
+   case S390_INSN_CAS:
+      s390_sprintf(buf, "%M %R,%A,%R,%R", "v-cas", insn->variant.cas.op1,
+                   insn->variant.cas.op2, insn->variant.cas.op3,
+                   insn->variant.cas.old_mem);
+      break;
+
+   case S390_INSN_CDAS: {
+      s390_cdas *cdas = insn->variant.cdas.details;
+
+      s390_sprintf(buf, "%M %R,%R,%A,%R,%R,%R,%R", "v-cdas",
+                   cdas->op1_high, cdas->op1_low, cdas->op2, cdas->op3_high,
+                   cdas->op3_low, cdas->old_mem_high, cdas->old_mem_low);
+      break;
+   }
+
+   case S390_INSN_COMPARE:
+      if (insn->variant.compare.signed_comparison) {
+         op = "v-cmps";
+      } else {
+         op = "v-cmpu";
+      }
+      s390_sprintf(buf, "%M %R,%O", op, insn->variant.compare.src1,
+                   &insn->variant.compare.src2);
+      break;
+
+   case S390_INSN_HELPER_CALL: {
+      s390_helper_call *helper_call = insn->variant.helper_call.details;
+      s390_sprintf(buf, "%M if (%C) %s{%I}(%L)", "v-call",
+                   helper_call->cond,
+                   helper_call->name,
+                   helper_call->target,
+                   helper_call->num_args);
+      return buf;   /* avoid printing "size = ..." which is meaningless */
+   }
+
+   case S390_INSN_BFP_TRIOP:
+      switch (insn->variant.bfp_triop.tag) {
+      case S390_BFP_MADD:  op = "v-fmadd";  break;
+      case S390_BFP_MSUB:  op = "v-fmsub";  break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R,%R", op,
+                   insn->variant.bfp_triop.dst  /* op1 same as dst */,
+                   insn->variant.bfp_triop.op2, insn->variant.bfp_triop.op3);
+      break;
+
+   case S390_INSN_BFP_BINOP:
+      switch (insn->variant.bfp_binop.tag) {
+      case S390_BFP_ADD:      op = "v-fadd";  break;
+      case S390_BFP_SUB:      op = "v-fsub";  break;
+      case S390_BFP_MUL:      op = "v-fmul";  break;
+      case S390_BFP_DIV:      op = "v-fdiv";  break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R", op,
+                   insn->variant.bfp_binop.dst_hi  /* op1 same as dst */,
+                   insn->variant.bfp_binop.op2_hi);
+      break;
+
+   case S390_INSN_BFP_COMPARE:
+      s390_sprintf(buf, "%M %R,%R,%R", "v-fcmp", insn->variant.bfp_compare.dst,
+                   insn->variant.bfp_compare.op1_hi,
+                   insn->variant.bfp_compare.op2_hi);
+      break;
+
+   case S390_INSN_BFP_UNOP:
+      switch (insn->variant.bfp_unop.tag) {
+      case S390_BFP_ABS:         op = "v-fabs";  break;
+      case S390_BFP_NABS:        op = "v-fnabs"; break;
+      case S390_BFP_NEG:         op = "v-fneg";  break;
+      case S390_BFP_SQRT:        op = "v-fsqrt"; break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R", op, insn->variant.bfp_unop.dst_hi,
+                   insn->variant.bfp_unop.op_hi);
+      break;
+
+   case S390_INSN_BFP_CONVERT:
+      switch (insn->variant.bfp_convert.tag) {
+      case S390_BFP_I32_TO_F32:
+      case S390_BFP_I32_TO_F64:
+      case S390_BFP_I32_TO_F128:
+      case S390_BFP_I64_TO_F32:
+      case S390_BFP_I64_TO_F64:
+      case S390_BFP_I64_TO_F128: op = "v-i2f"; break;
+      case S390_BFP_U32_TO_F32:
+      case S390_BFP_U32_TO_F64:
+      case S390_BFP_U32_TO_F128:
+      case S390_BFP_U64_TO_F32:
+      case S390_BFP_U64_TO_F64:
+      case S390_BFP_U64_TO_F128: op = "v-u2f"; break;
+      case S390_BFP_F32_TO_I32:
+      case S390_BFP_F32_TO_I64:
+      case S390_BFP_F64_TO_I32:
+      case S390_BFP_F64_TO_I64:
+      case S390_BFP_F128_TO_I32:
+      case S390_BFP_F128_TO_I64: op = "v-f2i"; break;
+      case S390_BFP_F32_TO_U32:
+      case S390_BFP_F32_TO_U64:
+      case S390_BFP_F64_TO_U32:
+      case S390_BFP_F64_TO_U64:
+      case S390_BFP_F128_TO_U32:
+      case S390_BFP_F128_TO_U64: op = "v-f2u"; break;
+      case S390_BFP_F32_TO_F64:
+      case S390_BFP_F32_TO_F128:
+      case S390_BFP_F64_TO_F32:
+      case S390_BFP_F64_TO_F128:
+      case S390_BFP_F128_TO_F32:
+      case S390_BFP_F128_TO_F64: op = "v-f2f"; break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R", op, insn->variant.bfp_convert.dst_hi,
+                   insn->variant.bfp_convert.op_hi);
+      break;
+
+   case S390_INSN_DFP_BINOP: {
+      s390_dfp_binop *dfp_binop = insn->variant.dfp_binop.details;
+
+      switch (dfp_binop->tag) {
+      case S390_DFP_ADD:  op = "v-dadd";  break;
+      case S390_DFP_SUB:  op = "v-dsub";  break;
+      case S390_DFP_MUL:  op = "v-dmul";  break;
+      case S390_DFP_DIV:  op = "v-ddiv";  break;
+      case S390_DFP_QUANTIZE:  op = "v-dqua";  break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R,%R", op, dfp_binop->dst_hi,
+                   dfp_binop->op2_hi, dfp_binop->op3_hi);
+      break;
+   }
+
+   case S390_INSN_DFP_UNOP:
+      switch (insn->variant.dfp_unop.tag) {
+      case S390_DFP_EXTRACT_EXP_D64:
+      case S390_DFP_EXTRACT_EXP_D128:  op = "v-d2exp";  break;
+      case S390_DFP_EXTRACT_SIG_D64:
+      case S390_DFP_EXTRACT_SIG_D128:  op = "v-d2sig";  break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R", op, insn->variant.dfp_unop.dst_hi,
+                   insn->variant.dfp_unop.op_hi);
+      break;
+
+   case S390_INSN_DFP_INTOP:
+      switch (insn->variant.dfp_intop.tag) {
+      case S390_DFP_SHIFT_LEFT:  op = "v-dshl"; break;
+      case S390_DFP_SHIFT_RIGHT: op = "v-dshr"; break;
+      case S390_DFP_INSERT_EXP:  op = "v-diexp"; break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R,%R", op, insn->variant.dfp_intop.dst_hi,
+                   insn->variant.dfp_intop.op2,
+                   insn->variant.dfp_intop.op3_hi);
+      break;
+
+   case S390_INSN_DFP_COMPARE:
+      switch (insn->variant.dfp_compare.tag) {
+      case S390_DFP_COMPARE:     op = "v-dcmp"; break;
+      case S390_DFP_COMPARE_EXP: op = "v-dcmpexp"; break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R,%R", op, insn->variant.dfp_compare.dst,
+                   insn->variant.dfp_compare.op1_hi,
+                   insn->variant.dfp_compare.op2_hi);
+      break;
+
+   case S390_INSN_DFP_CONVERT:
+      switch (insn->variant.dfp_convert.tag) {
+      case S390_DFP_D32_TO_D64:
+      case S390_DFP_D64_TO_D32:
+      case S390_DFP_D64_TO_D128:
+      case S390_DFP_D128_TO_D64: op = "v-d2d"; break;
+      case S390_DFP_I32_TO_D64:
+      case S390_DFP_I32_TO_D128:
+      case S390_DFP_I64_TO_D64:
+      case S390_DFP_I64_TO_D128: op = "v-i2d"; break;
+      case S390_DFP_U32_TO_D64:
+      case S390_DFP_U32_TO_D128:
+      case S390_DFP_U64_TO_D64:
+      case S390_DFP_U64_TO_D128: op = "v-u2d"; break;
+      case S390_DFP_D64_TO_I32:
+      case S390_DFP_D128_TO_I32:
+      case S390_DFP_D64_TO_I64:
+      case S390_DFP_D128_TO_I64: op = "v-d2i"; break;
+      case S390_DFP_D64_TO_U32:
+      case S390_DFP_D64_TO_U64:
+      case S390_DFP_D128_TO_U32:
+      case S390_DFP_D128_TO_U64: op = "v-d2u"; break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R", op, insn->variant.dfp_convert.dst_hi,
+                   insn->variant.dfp_convert.op_hi);
+      break;
+
+   case S390_INSN_DFP_REROUND:
+      s390_sprintf(buf, "%M %R,%R,%R", "v-drrnd",
+                   insn->variant.dfp_reround.dst_hi,
+                   insn->variant.dfp_reround.op2,
+                   insn->variant.dfp_reround.op3_hi);
+      break;
+
+   case S390_INSN_FP_CONVERT: {
+      s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+      switch (fp_convert->tag) {
+      case S390_FP_F32_TO_D32:
+      case S390_FP_F32_TO_D64:
+      case S390_FP_F32_TO_D128:
+      case S390_FP_F64_TO_D32:
+      case S390_FP_F64_TO_D64:
+      case S390_FP_F64_TO_D128:
+      case S390_FP_F128_TO_D32:
+      case S390_FP_F128_TO_D64:
+      case S390_FP_F128_TO_D128: op = "v-f2d"; break;
+      case S390_FP_D32_TO_F32:
+      case S390_FP_D32_TO_F64:
+      case S390_FP_D32_TO_F128:
+      case S390_FP_D64_TO_F32:
+      case S390_FP_D64_TO_F64:
+      case S390_FP_D64_TO_F128:
+      case S390_FP_D128_TO_F32:
+      case S390_FP_D128_TO_F64:
+      case S390_FP_D128_TO_F128: op = "v-d2f"; break;
+      default: goto fail;
+      }
+      s390_sprintf(buf, "%M %R,%R", op, fp_convert->dst_hi,
+                   fp_convert->op_hi);
+      break;
+   }
+
+   case S390_INSN_MFENCE:
+      s390_sprintf(buf, "%M", "v-mfence");
+      return buf;   /* avoid printing "size = ..." which is meaningless */
+
+   case S390_INSN_MIMM:
+      s390_sprintf(buf, "%M %A,%I", "v-mimm", insn->variant.mimm.dst,
+                   insn->variant.mimm.value);
+      break;
+
+   case S390_INSN_MADD:
+      s390_sprintf(buf, "%M %A += %I  (= %I)", "v-madd",
+                   insn->variant.madd.dst,
+                   (Long)(Char)insn->variant.madd.delta,
+                   insn->variant.madd.value);
+      break;
+
+   case S390_INSN_SET_FPC_BFPRM:
+      s390_sprintf(buf, "%M %R", "v-set-fpc-bfprm",
+                   insn->variant.set_fpc_bfprm.mode);
+      break;
+
+   case S390_INSN_SET_FPC_DFPRM:
+      s390_sprintf(buf, "%M %R", "v-set-fpc-dfprm",
+                   insn->variant.set_fpc_dfprm.mode);
+      break;
+
+   case S390_INSN_EVCHECK:
+      s390_sprintf(buf, "%M counter = %A, fail-addr = %A", "v-evcheck",
+                   insn->variant.evcheck.counter,
+                   insn->variant.evcheck.fail_addr);
+      return buf;   /* avoid printing "size = ..." which is meaningless */
+
+   case S390_INSN_PROFINC:
+      s390_sprintf(buf, "%M", "v-profinc");
+      return buf;   /* avoid printing "size = ..." which is meaningless */
+
+   case S390_INSN_XDIRECT:
+      s390_sprintf(buf, "%M if (%C) %A = %I  %s", "v-xdirect",
+                   insn->variant.xdirect.cond,
+                   insn->variant.xdirect.guest_IA,
+                   insn->variant.xdirect.dst,
+                   insn->variant.xdirect.to_fast_entry ? "fast" : "slow");
+      return buf;   /* avoid printing "size = ..." which is meaningless */
+
+   case S390_INSN_XINDIR:
+      s390_sprintf(buf, "%M if (%C) %A = %R", "v-xindir",
+                   insn->variant.xindir.cond,
+                   insn->variant.xindir.guest_IA,
+                   insn->variant.xindir.dst);
+      return buf;   /* avoid printing "size = ..." which is meaningless */
+
+   case S390_INSN_XASSISTED:
+      s390_sprintf(buf, "%M if (%C) %J %A = %R", "v-xassisted",
+                   insn->variant.xassisted.cond,
+                   insn->variant.xassisted.kind,
+                   insn->variant.xassisted.guest_IA,
+                   insn->variant.xassisted.dst);
+      return buf;   /* avoid printing "size = ..." which is meaningless */
+
+   default: goto fail;
+   }
+
+   /* Write out how many bytes are involved in the operation */
+
+   {
+      UInt len, i;
+
+      for (p = buf; *p; ++p)
+         continue;
+
+      len = p - buf;
+
+      if (len < 32) {
+         for (i = len; i < 32; ++i)
+            p += vex_sprintf(p, " ");
+      } else {
+         p += vex_sprintf(p, "\t");
+      }
+   }
+
+   /* Special cases first */
+   switch (insn->tag) {
+   case S390_INSN_UNOP:
+      switch (insn->variant.unop.tag) {
+      case S390_SIGN_EXTEND_8:
+      case S390_ZERO_EXTEND_8:  p += vex_sprintf(p, "1 -> "); goto common;
+      case S390_SIGN_EXTEND_16:
+      case S390_ZERO_EXTEND_16: p += vex_sprintf(p, "2 -> "); goto common;
+      case S390_SIGN_EXTEND_32:
+      case S390_ZERO_EXTEND_32: p += vex_sprintf(p, "4 -> "); goto common;
+      default:
+         goto common;
+      }
+
+   case S390_INSN_BFP_CONVERT:
+      switch (insn->variant.bfp_convert.tag) {
+      case S390_BFP_I32_TO_F32:
+      case S390_BFP_I32_TO_F64:
+      case S390_BFP_I32_TO_F128:
+      case S390_BFP_U32_TO_F32:
+      case S390_BFP_U32_TO_F64:
+      case S390_BFP_U32_TO_F128:
+      case S390_BFP_F32_TO_I32:
+      case S390_BFP_F32_TO_I64:
+      case S390_BFP_F32_TO_U32:
+      case S390_BFP_F32_TO_U64:
+      case S390_BFP_F32_TO_F64:
+      case S390_BFP_F32_TO_F128: p += vex_sprintf(p, "4 -> "); goto common;
+      case S390_BFP_I64_TO_F32:
+      case S390_BFP_I64_TO_F64:
+      case S390_BFP_I64_TO_F128:
+      case S390_BFP_U64_TO_F32:
+      case S390_BFP_U64_TO_F64:
+      case S390_BFP_U64_TO_F128:
+      case S390_BFP_F64_TO_I32:
+      case S390_BFP_F64_TO_I64:
+      case S390_BFP_F64_TO_U32:
+      case S390_BFP_F64_TO_U64:
+      case S390_BFP_F64_TO_F32:
+      case S390_BFP_F64_TO_F128: p += vex_sprintf(p, "8 -> "); goto common;
+      case S390_BFP_F128_TO_I32:
+      case S390_BFP_F128_TO_I64:
+      case S390_BFP_F128_TO_U32:
+      case S390_BFP_F128_TO_U64:
+      case S390_BFP_F128_TO_F32:
+      case S390_BFP_F128_TO_F64: p += vex_sprintf(p, "16 -> "); goto common;
+      default:
+         goto common;
+      }
+
+   case S390_INSN_DFP_CONVERT:
+      switch (insn->variant.dfp_convert.tag) {
+      case S390_DFP_D32_TO_D64:
+      case S390_DFP_I32_TO_D64:
+      case S390_DFP_I32_TO_D128:
+      case S390_DFP_U32_TO_D64:
+      case S390_DFP_U32_TO_D128: p += vex_sprintf(p, "4 -> "); goto common;
+      case S390_DFP_D64_TO_D32:
+      case S390_DFP_D64_TO_D128:
+      case S390_DFP_I64_TO_D64:
+      case S390_DFP_I64_TO_D128:
+      case S390_DFP_U64_TO_D64:
+      case S390_DFP_U64_TO_D128:
+      case S390_DFP_D64_TO_I32:
+      case S390_DFP_D64_TO_I64:
+      case S390_DFP_D64_TO_U32:
+      case S390_DFP_D64_TO_U64:  p += vex_sprintf(p, "8 -> "); goto common;
+      case S390_DFP_D128_TO_D64:
+      case S390_DFP_D128_TO_I32:
+      case S390_DFP_D128_TO_I64:
+      case S390_DFP_D128_TO_U32:
+      case S390_DFP_D128_TO_U64: p += vex_sprintf(p, "16 -> "); goto common;
+      default:
+         goto common;
+      }
+
+   case S390_INSN_FP_CONVERT: {
+      s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+
+      switch (fp_convert->tag) {
+      case S390_FP_F32_TO_D32:
+      case S390_FP_F32_TO_D64:
+      case S390_FP_F32_TO_D128:
+      case S390_FP_D32_TO_F32:
+      case S390_FP_D32_TO_F64:
+      case S390_FP_D32_TO_F128:  p += vex_sprintf(p, "4 -> "); goto common;
+      case S390_FP_F64_TO_D32:
+      case S390_FP_F64_TO_D64:
+      case S390_FP_F64_TO_D128:
+      case S390_FP_D64_TO_F32:
+      case S390_FP_D64_TO_F64:
+      case S390_FP_D64_TO_F128:  p += vex_sprintf(p, "8 -> "); goto common;
+      case S390_FP_F128_TO_D32:
+      case S390_FP_F128_TO_D64:
+      case S390_FP_F128_TO_D128:
+      case S390_FP_D128_TO_F32:
+      case S390_FP_D128_TO_F64:
+      case S390_FP_D128_TO_F128: p += vex_sprintf(p, "16 -> "); goto common;
+      default:
+         goto common;
+      }
+   }
+
+   default:
+      goto common;
+   }
+
+   /* Common case */
+ common:
+   vex_sprintf(p, "%u bytes", (UInt)insn->size);
+
+   return buf;
+
+ fail: vpanic("s390_insn_as_string");
+}
+
+
+
+/* Load NUM bytes from memory into register REG using addressing mode AM. */
+static UChar *
+s390_emit_load_mem(UChar *p, UInt num, UChar reg, const s390_amode *am)
+{
+   UInt b = hregNumber(am->b);
+   UInt x = hregNumber(am->x);  /* 0 for B12 and B20 */
+   UInt d = am->d;
+
+   switch (am->tag) {
+   case S390_AMODE_B12:
+   case S390_AMODE_BX12:
+      switch (num) {
+      case 1: return s390_emit_IC(p, reg, x, b, d);
+      case 2: return s390_emit_LH(p, reg, x, b, d);
+      case 4: return s390_emit_L(p, reg, x, b, d);
+      case 8: return s390_emit_LG(p, reg, x, b, DISP20(d));
+      default: goto fail;
+      }
+      break;
+
+   case S390_AMODE_B20:
+   case S390_AMODE_BX20:
+      switch (num) {
+      case 1: return s390_emit_ICY(p, reg, x, b, DISP20(d));
+      case 2: return s390_emit_LHY(p, reg, x, b, DISP20(d));
+      case 4: return s390_emit_LY(p, reg, x, b, DISP20(d));
+      case 8: return s390_emit_LG(p, reg, x, b, DISP20(d));
+      default: goto fail;
+      }
+      break;
+
+   default: goto fail;
+   }
+
+ fail:
+   vpanic("s390_emit_load_mem");
+}
+
+
+/* Load condition code into register REG */
+static UChar *
+s390_emit_load_cc(UChar *p, UChar reg)
+{
+   p = s390_emit_LGHI(p, reg, 0);  /* Clear out, cc not affected */
+   p = s390_emit_IPM(p, reg, reg);
+   /* Shift 28 bits to the right --> [0,1,2,3] */
+   return s390_emit_SRL(p, reg, 0, 28); /* REG = cc */
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Code generation                                         ---*/
+/*---------------------------------------------------------------*/
+
+/* Do not load more bytes than requested. */
+static UChar *
+s390_insn_load_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt r, x, b, d;
+   const s390_amode *src;
+
+   src = insn->variant.load.src;
+
+   r = hregNumber(insn->variant.load.dst);
+
+   if (hregClass(insn->variant.load.dst) == HRcFlt64) {
+      b = hregNumber(src->b);
+      x = hregNumber(src->x);  /* 0 for B12 and B20 */
+      d = src->d;
+
+      switch (insn->size) {
+
+      case 4:
+         switch (src->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            return s390_emit_LE(buf, r, x, b, d);
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            return s390_emit_LEY(buf, r, x, b, DISP20(d));
+         }
+         break;
+
+      case 8:
+         switch (src->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            return s390_emit_LD(buf, r, x, b, d);
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            return s390_emit_LDY(buf, r, x, b, DISP20(d));
+         }
+         break;
+      }
+      vpanic("s390_insn_load_emit");
+   }
+
+   /* Integer stuff */
+   return s390_emit_load_mem(buf, insn->size, r, src);
+}
+
+
+static UChar *
+s390_insn_store_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt r, x, b, d;
+   const s390_amode *dst;
+
+   dst = insn->variant.store.dst;
+
+   r = hregNumber(insn->variant.store.src);
+   b = hregNumber(dst->b);
+   x = hregNumber(dst->x);  /* 0 for B12 and B20 */
+   d = dst->d;
+
+   if (hregClass(insn->variant.store.src) == HRcFlt64) {
+      switch (insn->size) {
+
+      case 4:
+         switch (dst->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            return s390_emit_STE(buf, r, x, b, d);
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            return s390_emit_STEY(buf, r, x, b, DISP20(d));
+         }
+         break;
+
+      case 8:
+         switch (dst->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            return s390_emit_STD(buf, r, x, b, d);
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            return s390_emit_STDY(buf, r, x, b, DISP20(d));
+         }
+         break;
+      }
+      vpanic("s390_insn_store_emit");
+   }
+
+   /* Integer stuff */
+   switch (insn->size) {
+   case 1:
+      switch (dst->tag) {
+      case S390_AMODE_B12:
+      case S390_AMODE_BX12:
+         return s390_emit_STC(buf, r, x, b, d);
+
+      case S390_AMODE_B20:
+      case S390_AMODE_BX20:
+         return s390_emit_STCY(buf, r, x, b, DISP20(d));
+      }
+      break;
+
+   case 2:
+      switch (dst->tag) {
+      case S390_AMODE_B12:
+      case S390_AMODE_BX12:
+         return s390_emit_STH(buf, r, x, b, d);
+
+      case S390_AMODE_B20:
+      case S390_AMODE_BX20:
+         return s390_emit_STHY(buf, r, x, b, DISP20(d));
+      }
+      break;
+
+   case 4:
+      switch (dst->tag) {
+      case S390_AMODE_B12:
+      case S390_AMODE_BX12:
+         return s390_emit_ST(buf, r, x, b, d);
+
+      case S390_AMODE_B20:
+      case S390_AMODE_BX20:
+         return s390_emit_STY(buf, r, x, b, DISP20(d));
+      }
+      break;
+
+   case 8:
+      return s390_emit_STG(buf, r, x, b, DISP20(d));
+
+   default:
+      break;
+   }
+
+   vpanic("s390_insn_store_emit");
+}
+
+
+static UChar *
+s390_insn_move_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt dst, src;
+   HRegClass dst_class, src_class;
+
+   dst = hregNumber(insn->variant.move.dst);
+   src = hregNumber(insn->variant.move.src);
+
+   dst_class = hregClass(insn->variant.move.dst);
+   src_class = hregClass(insn->variant.move.src);
+
+   if (dst_class == src_class) {
+      if (dst_class == HRcInt64)
+         return s390_emit_LGR(buf, dst, src);
+      if (dst_class == HRcFlt64)
+         return s390_emit_LDR(buf, dst, src);
+   } else {
+      if (dst_class == HRcFlt64 && src_class == HRcInt64) {
+         if (insn->size == 4) {
+            buf = s390_emit_SLLG(buf, R0, src, 0, DISP20(32)); /* r0 = src << 32 */
+            return s390_emit_LDGRw(buf, dst, R0);
+         } else {
+            return s390_emit_LDGRw(buf, dst, src);
+         }
+      }
+      if (dst_class == HRcInt64 && src_class == HRcFlt64) {
+         if (insn->size == 4) {
+            buf = s390_emit_LGDRw(buf, dst, src);
+            return s390_emit_SRLG(buf, dst, dst, 0, DISP20(32)); /* dst >>= 32 */
+         } else {
+            return s390_emit_LGDRw(buf, dst, src);
+         }
+      }
+      /* A move between floating point registers and general purpose
+         registers of different size should never occur and indicates
+         an error elsewhere. */
+   }
+
+   vpanic("s390_insn_move_emit");
+}
+
+
+static UChar *
+s390_insn_memcpy_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_amode *dst = insn->variant.memcpy.dst;
+   s390_amode *src = insn->variant.memcpy.src;
+
+   return s390_emit_MVC(buf, insn->size - 1, hregNumber(dst->b), dst->d,
+                        hregNumber(src->b), src->d);
+}
+
+
+static UChar *
+s390_insn_load_immediate_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt  r;
+   ULong value = insn->variant.load_immediate.value;
+
+   r = hregNumber(insn->variant.load_immediate.dst);
+
+   if (hregClass(insn->variant.load_immediate.dst) == HRcFlt64) {
+      vassert(value == 0);
+      switch (insn->size) {
+      case 4: return s390_emit_LZER(buf, r, value);
+      case 8: return s390_emit_LZDR(buf, r, value);
+      }
+      vpanic("s390_insn_load_immediate_emit");
+   }
+
+   switch (insn->size) {
+   case 1:
+   case 2:
+      /* Load the immediate values as a 4 byte value. That does not hurt as
+         those extra bytes will not be looked at. Fall through .... */
+   case 4:
+      return s390_emit_load_32imm(buf, r, value);
+
+   case 8:
+      return s390_emit_load_64imm(buf, r, value);
+   }
+
+   vpanic("s390_insn_load_immediate_emit");
+}
+
+
+/* There is no easy way to do ALU operations on 1-byte or 2-byte operands.
+   So we simply perform a 4-byte operation. Doing so uses possibly undefined
+   bits and produces an undefined result in those extra bit positions. But
+   upstream does not look at those positions, so this is OK. */
+static UChar *
+s390_insn_alu_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI op2;
+   UInt dst;
+
+   dst = hregNumber(insn->variant.alu.dst);
+   op2 = insn->variant.alu.op2;
+
+   /* Second operand is in a register */
+   if (op2.tag == S390_OPND_REG) {
+      UInt r2 = hregNumber(op2.variant.reg);
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+      case 4:
+         switch (insn->variant.alu.tag) {
+         case S390_ALU_ADD:  return s390_emit_AR(buf, dst, r2);
+         case S390_ALU_SUB:  return s390_emit_SR(buf, dst, r2);
+         case S390_ALU_MUL:  return s390_emit_MSR(buf, dst, r2);
+         case S390_ALU_AND:  return s390_emit_NR(buf, dst, r2);
+         case S390_ALU_OR:   return s390_emit_OR(buf, dst, r2);
+         case S390_ALU_XOR:  return s390_emit_XR(buf, dst, r2);
+         case S390_ALU_LSH:  return s390_emit_SLL(buf, dst, r2, 0);
+         case S390_ALU_RSH:  return s390_emit_SRL(buf, dst, r2, 0);
+         case S390_ALU_RSHA: return s390_emit_SRA(buf, dst, r2, 0);
+         }
+         goto fail;
+
+      case 8:
+         switch (insn->variant.alu.tag) {
+         case S390_ALU_ADD:  return s390_emit_AGR(buf, dst, r2);
+         case S390_ALU_SUB:  return s390_emit_SGR(buf, dst, r2);
+         case S390_ALU_MUL:  return s390_emit_MSGR(buf, dst, r2);
+         case S390_ALU_AND:  return s390_emit_NGR(buf, dst, r2);
+         case S390_ALU_OR:   return s390_emit_OGR(buf, dst, r2);
+         case S390_ALU_XOR:  return s390_emit_XGR(buf, dst, r2);
+         case S390_ALU_LSH:  return s390_emit_SLLG(buf, dst, dst, r2, DISP20(0));
+         case S390_ALU_RSH:  return s390_emit_SRLG(buf, dst, dst, r2, DISP20(0));
+         case S390_ALU_RSHA: return s390_emit_SRAG(buf, dst, dst, r2, DISP20(0));
+         }
+         goto fail;
+      }
+      goto fail;
+   }
+
+   /* 2nd operand is in memory */
+   if (op2.tag == S390_OPND_AMODE) {
+      UInt b, x, d;
+      const s390_amode *src = op2.variant.am;
+
+      b = hregNumber(src->b);
+      x = hregNumber(src->x);  /* 0 for B12 and B20 */
+      d = src->d;
+
+      /* Shift operands are special here as there are no opcodes that
+         allow a memory operand. So we first load the 2nd operand into
+         some register. R0 is used to save restore the contents of the
+         chosen register.. */
+
+      if (insn->variant.alu.tag == S390_ALU_LSH ||
+          insn->variant.alu.tag == S390_ALU_RSH ||
+          insn->variant.alu.tag == S390_ALU_RSHA) {
+         UInt b2;
+
+         /* Choose a register (other than DST or R0) into which to stick the
+            shift amount. The following works because r15 is reserved and
+            thusly dst != 15. */
+         vassert(dst != 15);  /* extra paranoia */
+         b2 = (dst + 1) % 16;
+         
+         buf = s390_emit_LGR(buf, R0, b2);  /* save */
+
+         /* Loading SRC to B2 does not modify R0. */
+         buf = s390_emit_load_mem(buf, insn->size, b2, src);
+
+         if (insn->size == 8) {
+            switch (insn->variant.alu.tag) {
+            case S390_ALU_LSH:
+               buf = s390_emit_SLLG(buf, dst, dst, b2, DISP20(0));
+               break;
+            case S390_ALU_RSH:
+               buf = s390_emit_SRLG(buf, dst, dst, b2, DISP20(0));
+               break;
+            case S390_ALU_RSHA:
+               buf = s390_emit_SRAG(buf, dst, dst, b2, DISP20(0));
+               break;
+            default: /* unreachable */
+               goto fail;
+            }
+         } else {
+            switch (insn->variant.alu.tag) {
+            case S390_ALU_LSH:
+               buf = s390_emit_SLL(buf, dst, b2, 0);
+               break;
+            case S390_ALU_RSH:
+               buf = s390_emit_SRL(buf, dst, b2, 0);
+               break;
+            case S390_ALU_RSHA:
+               buf = s390_emit_SRA(buf, dst, b2, 0);
+               break;
+            default: /* unreachable */
+               goto fail;
+            }
+         }
+         return s390_emit_LGR(buf, b2, R0);  /* restore */
+      }
+
+      switch (insn->size) {
+      case 1:
+         /* Move the byte from memory into scratch register r0 */
+         buf = s390_emit_load_mem(buf, 1, R0, src);
+
+         switch (insn->variant.alu.tag) {
+         case S390_ALU_ADD: return s390_emit_AR(buf, dst, R0);
+         case S390_ALU_SUB: return s390_emit_SR(buf, dst, R0);
+         case S390_ALU_MUL: return s390_emit_MSR(buf, dst, R0);
+         case S390_ALU_AND: return s390_emit_NR(buf, dst, R0);
+         case S390_ALU_OR:  return s390_emit_OR(buf, dst, R0);
+         case S390_ALU_XOR: return s390_emit_XR(buf, dst, R0);
+         case S390_ALU_LSH:
+         case S390_ALU_RSH:
+         case S390_ALU_RSHA: ; /* avoid GCC warning */
+         }
+         goto fail;
+
+      case 2:
+         switch (src->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            switch (insn->variant.alu.tag) {
+            case S390_ALU_ADD:
+               return s390_emit_AH(buf, dst, x, b, d);
+
+            case S390_ALU_SUB:
+               return s390_emit_SH(buf, dst, x, b, d);
+
+            case S390_ALU_MUL:
+               return s390_emit_MH(buf, dst, x, b, d);
+
+               /* For bitwise operations: Move two bytes from memory into scratch
+                  register r0; then perform operation */
+            case S390_ALU_AND:
+               buf = s390_emit_LH(buf, R0, x, b, d);
+               return s390_emit_NR(buf, dst, R0);
+
+            case S390_ALU_OR:
+               buf = s390_emit_LH(buf, R0, x, b, d);
+               return s390_emit_OR(buf, dst, R0);
+
+            case S390_ALU_XOR:
+               buf = s390_emit_LH(buf, R0, x, b, d);
+               return s390_emit_XR(buf, dst, R0);
+
+            case S390_ALU_LSH:
+            case S390_ALU_RSH:
+            case S390_ALU_RSHA: ; /* avoid GCC warning */
+            }
+            goto fail;
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            switch (insn->variant.alu.tag) {
+            case S390_ALU_ADD:
+               return s390_emit_AHY(buf, dst, x, b, DISP20(d));
+
+            case S390_ALU_SUB:
+               return s390_emit_SHY(buf, dst, x, b, DISP20(d));
+
+            case S390_ALU_MUL:
+               return s390_emit_MHYw(buf, dst, x, b, DISP20(d));
+
+               /* For bitwise operations: Move two bytes from memory into scratch
+                  register r0; then perform operation */
+            case S390_ALU_AND:
+               buf = s390_emit_LHY(buf, R0, x, b, DISP20(d));
+               return s390_emit_NR(buf, dst, R0);
+
+            case S390_ALU_OR:
+               buf = s390_emit_LHY(buf, R0, x, b, DISP20(d));
+               return s390_emit_OR(buf, dst, R0);
+
+            case S390_ALU_XOR:
+               buf = s390_emit_LHY(buf, R0, x, b, DISP20(d));
+               return s390_emit_XR(buf, dst, R0);
+
+            case S390_ALU_LSH:
+            case S390_ALU_RSH:
+            case S390_ALU_RSHA: ; /* avoid GCC warning */
+            }
+            goto fail;
+         }
+         goto fail;
+
+      case 4:
+         switch (src->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            switch (insn->variant.alu.tag) {
+            case S390_ALU_ADD: return s390_emit_A(buf, dst, x, b, d);
+            case S390_ALU_SUB: return s390_emit_S(buf, dst, x, b, d);
+            case S390_ALU_MUL: return s390_emit_MS(buf, dst, x, b, d);
+            case S390_ALU_AND: return s390_emit_N(buf, dst, x, b, d);
+            case S390_ALU_OR:  return s390_emit_O(buf, dst, x, b, d);
+            case S390_ALU_XOR: return s390_emit_X(buf, dst, x, b, d);
+            case S390_ALU_LSH:
+            case S390_ALU_RSH:
+            case S390_ALU_RSHA: ; /* avoid GCC warning */
+            }
+            goto fail;
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            switch (insn->variant.alu.tag) {
+            case S390_ALU_ADD: return s390_emit_AY(buf, dst, x, b, DISP20(d));
+            case S390_ALU_SUB: return s390_emit_SY(buf, dst, x, b, DISP20(d));
+            case S390_ALU_MUL: return s390_emit_MSY(buf, dst, x, b, DISP20(d));
+            case S390_ALU_AND: return s390_emit_NY(buf, dst, x, b, DISP20(d));
+            case S390_ALU_OR:  return s390_emit_OY(buf, dst, x, b, DISP20(d));
+            case S390_ALU_XOR: return s390_emit_XY(buf, dst, x, b, DISP20(d));
+            case S390_ALU_LSH:
+            case S390_ALU_RSH:
+            case S390_ALU_RSHA: ; /* avoid GCC warning */
+            }
+            goto fail;
+         }
+         goto fail;
+
+      case 8:
+         switch (insn->variant.alu.tag) {
+         case S390_ALU_ADD: return s390_emit_AG(buf, dst, x, b, DISP20(d));
+         case S390_ALU_SUB: return s390_emit_SG(buf, dst, x, b, DISP20(d));
+         case S390_ALU_MUL: return s390_emit_MSG(buf, dst, x, b, DISP20(d));
+         case S390_ALU_AND: return s390_emit_NG(buf, dst, x, b, DISP20(d));
+         case S390_ALU_OR:  return s390_emit_OG(buf, dst, x, b, DISP20(d));
+         case S390_ALU_XOR: return s390_emit_XG(buf, dst, x, b, DISP20(d));
+         case S390_ALU_LSH:
+         case S390_ALU_RSH:
+         case S390_ALU_RSHA: ; /* avoid GCC warning */
+         }
+         goto fail;
+      }
+      goto fail;
+   }
+
+   /* 2nd operand is an immediate value */
+   if (op2.tag == S390_OPND_IMMEDIATE) {
+      ULong value;
+
+      /* No masking of the value is required as it is not sign extended */
+      value = op2.variant.imm;
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+         /* There is no 1-byte opcode. Do the computation in
+            2 bytes. The extra byte will be ignored. */
+         switch (insn->variant.alu.tag) {
+         case S390_ALU_ADD:
+            return s390_emit_AHI(buf, dst, value);
+
+         case S390_ALU_SUB:
+            return s390_emit_SLFIw(buf, dst, value);
+
+         case S390_ALU_MUL:
+            return s390_emit_MHI(buf, dst, value);
+
+         case S390_ALU_AND: return s390_emit_NILL(buf, dst, value);
+         case S390_ALU_OR:  return s390_emit_OILL(buf, dst, value);
+         case S390_ALU_XOR:
+            /* There is no XILL instruction.  Load the immediate value into
+               R0 and combine with the destination register. */
+            buf = s390_emit_LHI(buf, R0, value);
+            return s390_emit_XR(buf, dst, R0);
+
+         case S390_ALU_LSH:
+            return s390_emit_SLL(buf, dst, 0, value);
+
+         case S390_ALU_RSH:
+            return s390_emit_SRL(buf, dst, 0, value);
+
+         case S390_ALU_RSHA:
+            return s390_emit_SRA(buf, dst, 0, value);
+         }
+         goto fail;
+
+      case 4:
+         switch (insn->variant.alu.tag) {
+         case S390_ALU_ADD:
+            if (uint_fits_signed_16bit(value)) {
+               return s390_emit_AHI(buf, dst, value);
+            }
+            return s390_emit_AFIw(buf, dst, value);
+
+         case S390_ALU_SUB:  return s390_emit_SLFIw(buf, dst, value);
+         case S390_ALU_MUL:  return s390_emit_MSFIw(buf, dst, value);
+         case S390_ALU_AND:  return s390_emit_NILFw(buf, dst, value);
+         case S390_ALU_OR:   return s390_emit_OILFw(buf, dst, value);
+         case S390_ALU_XOR:  return s390_emit_XILFw(buf, dst, value);
+         case S390_ALU_LSH:  return s390_emit_SLL(buf, dst, 0, value);
+         case S390_ALU_RSH:  return s390_emit_SRL(buf, dst, 0, value);
+         case S390_ALU_RSHA: return s390_emit_SRA(buf, dst, 0, value);
+         }
+         goto fail;
+
+      case 8:
+         switch (insn->variant.alu.tag) {
+         case S390_ALU_ADD:
+            if (ulong_fits_signed_16bit(value)) {
+               return s390_emit_AGHI(buf, dst, value);
+            }
+            if (ulong_fits_signed_32bit(value) && s390_host_has_eimm) {
+               return s390_emit_AGFI(buf, dst, value);
+            }
+            /* Load constant into R0 then add */
+            buf = s390_emit_load_64imm(buf, R0, value);
+            return s390_emit_AGR(buf, dst, R0);
+
+         case S390_ALU_SUB:
+            if (ulong_fits_unsigned_32bit(value)) {
+               return s390_emit_SLGFIw(buf, dst, value);
+            }
+            /* Load value into R0; then subtract from destination reg */
+            buf = s390_emit_load_64imm(buf, R0, value);
+            return s390_emit_SGR(buf, dst, R0);
+
+         case S390_ALU_MUL:
+            if (ulong_fits_signed_32bit(value) && s390_host_has_gie) {
+               return s390_emit_MSGFI(buf, dst, value);
+            }
+            /* Load constant into R0 then add */
+            buf = s390_emit_load_64imm(buf, R0, value);
+            return s390_emit_MSGR(buf, dst, R0);
+
+            /* Do it in two steps: upper half [0:31] and lower half [32:63] */
+         case S390_ALU_AND:
+            if (s390_host_has_eimm) {
+               buf  = s390_emit_NIHF(buf, dst, value >> 32);
+               return s390_emit_NILF(buf, dst, value & 0xFFFFFFFF);
+            }
+            /* Load value into R0; then combine with destination reg */
+            buf = s390_emit_load_64imm(buf, R0, value);
+            return s390_emit_NGR(buf, dst, R0);
+
+         case S390_ALU_OR:
+            if (s390_host_has_eimm) {
+               buf  = s390_emit_OIHF(buf, dst, value >> 32);
+               return s390_emit_OILF(buf, dst, value & 0xFFFFFFFF);
+            }
+            /* Load value into R0; then combine with destination reg */
+            buf = s390_emit_load_64imm(buf, R0, value);
+            return s390_emit_OGR(buf, dst, R0);
+
+         case S390_ALU_XOR:
+            if (s390_host_has_eimm) {
+               buf  = s390_emit_XIHF(buf, dst, value >> 32);
+               return s390_emit_XILF(buf, dst, value & 0xFFFFFFFF);
+            }
+            /* Load value into R0; then combine with destination reg */
+            buf = s390_emit_load_64imm(buf, R0, value);
+            return s390_emit_XGR(buf, dst, R0);
+
+            /* No special considerations for long displacement here. Only the six
+               least significant bits of VALUE will be taken; all other bits are
+               ignored. So the DH2 bits are irrelevant and do not influence the
+               shift operation, independent of whether long-displacement is available
+               or not. */
+         case S390_ALU_LSH:  return s390_emit_SLLG(buf, dst, dst, 0, DISP20(value));
+         case S390_ALU_RSH:  return s390_emit_SRLG(buf, dst, dst, 0, DISP20(value));
+         case S390_ALU_RSHA: return s390_emit_SRAG(buf, dst, dst, 0, DISP20(value));
+         }
+         goto fail;
+      }
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_alu_emit");
+}
+
+
+static UChar *
+s390_widen_emit(UChar *buf, const s390_insn *insn, UInt from_size,
+                Bool sign_extend)
+{
+   s390_opnd_RMI opnd = insn->variant.unop.src;
+
+   switch (opnd.tag) {
+   case S390_OPND_REG: {
+      UChar r1 = hregNumber(insn->variant.unop.dst);
+      UChar r2 = hregNumber(opnd.variant.reg);
+
+      switch (from_size) {
+      case 1:
+         /* Widening to a half-word is implemented like widening to a word
+            because the upper half-word will not be looked at. */
+         if (insn->size == 4 || insn->size == 2) {  /* 8 --> 32    8 --> 16 */
+            if (sign_extend)
+               return s390_emit_LBRw(buf, r1, r2);
+            else
+               return s390_emit_LLCRw(buf, r1, r2);
+         }
+         if (insn->size == 8) {  /* 8 --> 64 */
+            if (sign_extend)
+               return s390_emit_LGBRw(buf, r1, r2);
+            else
+               return s390_emit_LLGCRw(buf, r1, r2);
+         }
+         goto fail;
+
+      case 2:
+         if (insn->size == 4) {  /* 16 --> 32 */
+            if (sign_extend)
+               return s390_emit_LHRw(buf, r1, r2);
+            else
+               return s390_emit_LLHRw(buf, r1, r2);
+         }
+         if (insn->size == 8) {  /* 16 --> 64 */
+            if (sign_extend)
+               return s390_emit_LGHRw(buf, r1, r2);
+            else
+               return s390_emit_LLGHRw(buf, r1, r2);
+         }
+         goto fail;
+
+      case 4:
+         if (insn->size == 8) {  /* 32 --> 64 */
+            if (sign_extend)
+               return s390_emit_LGFR(buf, r1, r2);
+            else
+               return s390_emit_LLGFR(buf, r1, r2);
+         }
+         goto fail;
+
+      default: /* unexpected "from" size */
+         goto fail;
+      }
+   }
+
+   case S390_OPND_AMODE: {
+      UChar r1 = hregNumber(insn->variant.unop.dst);
+      const s390_amode *src = opnd.variant.am;
+      UChar b = hregNumber(src->b);
+      UChar x = hregNumber(src->x);
+      Int   d = src->d;
+
+      switch (from_size) {
+      case 1:
+         if (insn->size == 4 || insn->size == 2) {
+            if (sign_extend)
+               return s390_emit_LB(buf, r1, x, b, DISP20(d));
+            else
+               return s390_emit_LLCw(buf, r1, x, b, DISP20(d));
+         }
+         if (insn->size == 8) {
+            if (sign_extend)
+               return s390_emit_LGB(buf, r1, x, b, DISP20(d));
+            else
+               return s390_emit_LLGC(buf, r1, x, b, DISP20(d));
+         }
+         goto fail;
+
+      case 2:
+         if (insn->size == 4) {  /* 16 --> 32 */
+            if (sign_extend == 0)
+               return s390_emit_LLHw(buf, r1, x, b, DISP20(d));
+
+            switch (src->tag) {
+            case S390_AMODE_B12:
+            case S390_AMODE_BX12:
+               return s390_emit_LH(buf, r1, x, b, d);
+
+            case S390_AMODE_B20:
+            case S390_AMODE_BX20:
+               return s390_emit_LHY(buf, r1, x, b, DISP20(d));
+            }
+            goto fail;
+         }
+         if (insn->size == 8) {  /* 16 --> 64 */
+            if (sign_extend)
+               return s390_emit_LGH(buf, r1, x, b, DISP20(d));
+            else
+               return s390_emit_LLGH(buf, r1, x, b, DISP20(d));
+         }
+         goto fail;
+
+      case 4:
+         if (insn->size == 8) {  /* 32 --> 64 */
+            if (sign_extend)
+               return s390_emit_LGF(buf, r1, x, b, DISP20(d));
+            else
+               return s390_emit_LLGF(buf, r1, x, b, DISP20(d));
+         }
+         goto fail;
+
+      default: /* unexpected "from" size */
+         goto fail;
+      }
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      UChar r1 = hregNumber(insn->variant.unop.dst);
+      ULong value = opnd.variant.imm;
+
+      switch (from_size) {
+      case 1:
+         if (insn->size == 4 || insn->size == 2) {  /* 8 --> 32   8 --> 16 */
+            if (sign_extend) {
+               /* host can do the sign extension to 16-bit; LHI does the rest */
+               return s390_emit_LHI(buf, r1, (Short)(Char)(UChar)value);
+            } else {
+               return s390_emit_LHI(buf, r1, value);
+            }
+         }
+         if (insn->size == 8) {  /* 8 --> 64 */
+            if (sign_extend) {
+               /* host can do the sign extension to 16-bit; LGHI does the rest */
+               return s390_emit_LGHI(buf, r1, (Short)(Char)(UChar)value);
+            } else {
+               return s390_emit_LGHI(buf, r1, value);
+            }
+         }
+         goto fail;
+
+      case 2:
+         if (insn->size == 4) {  /* 16 --> 32 */
+            return s390_emit_LHI(buf, r1, value);
+         }
+         if (insn->size == 8) {  /* 16 --> 64 */
+            if (sign_extend)
+               return s390_emit_LGHI(buf, r1, value);
+            else
+               return s390_emit_LLILL(buf, r1, value);
+         }
+         goto fail;
+
+      case 4:
+         if (insn->size == 8) {  /* 32 --> 64 */
+            if (sign_extend)
+               return s390_emit_LGFIw(buf, r1, value);
+            else
+               return s390_emit_LLILFw(buf, r1, value);
+         }
+         goto fail;
+
+      default: /* unexpected "from" size */
+         goto fail;
+      }
+   }
+   }
+
+ fail:
+   vpanic("s390_widen_emit");
+}
+
+
+static UChar *
+s390_negate_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI opnd;
+
+   opnd = insn->variant.unop.src;
+
+   switch (opnd.tag) {
+   case S390_OPND_REG: {
+      UChar r1 = hregNumber(insn->variant.unop.dst);
+      UChar r2 = hregNumber(opnd.variant.reg);
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+      case 4:
+         return s390_emit_LCR(buf, r1, r2);
+
+      case 8:
+         return s390_emit_LCGR(buf, r1, r2);
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_AMODE: {
+      UChar r1 = hregNumber(insn->variant.unop.dst);
+
+      /* Load bytes into scratch register R0, then negate */
+      buf = s390_emit_load_mem(buf, insn->size, R0, opnd.variant.am);
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+      case 4:
+         return s390_emit_LCR(buf, r1, R0);
+
+      case 8:
+         return s390_emit_LCGR(buf, r1, R0);
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      UChar r1 = hregNumber(insn->variant.unop.dst);
+      ULong value = opnd.variant.imm;
+
+      value = ~value + 1;   /* two's complement */
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+         /* Load the immediate values as a 4 byte value. That does not hurt as
+            those extra bytes will not be looked at. Fall through .... */
+      case 4:
+         return s390_emit_load_32imm(buf, r1, value);
+
+      case 8:
+         return s390_emit_load_64imm(buf, r1, value);
+
+      default:
+         goto fail;
+      }
+   }
+   }
+
+ fail:
+   vpanic("s390_negate_emit");
+}
+
+
+static UChar *
+s390_insn_unop_emit(UChar *buf, const s390_insn *insn)
+{
+   switch (insn->variant.unop.tag) {
+   case S390_ZERO_EXTEND_8:  return s390_widen_emit(buf, insn, 1, 0);
+   case S390_ZERO_EXTEND_16: return s390_widen_emit(buf, insn, 2, 0);
+   case S390_ZERO_EXTEND_32: return s390_widen_emit(buf, insn, 4, 0);
+
+   case S390_SIGN_EXTEND_8:  return s390_widen_emit(buf, insn, 1, 1);
+   case S390_SIGN_EXTEND_16: return s390_widen_emit(buf, insn, 2, 1);
+   case S390_SIGN_EXTEND_32: return s390_widen_emit(buf, insn, 4, 1);
+
+   case S390_NEGATE:         return s390_negate_emit(buf, insn);
+   }
+
+   vpanic("s390_insn_unop_emit");
+}
+
+
+/* Only 4-byte and 8-byte operands are handled. 1-byte and 2-byte
+   comparisons will have been converted to 4-byte comparisons in
+   s390_isel_cc and should not occur here. */
+static UChar *
+s390_insn_test_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI opnd;
+
+   opnd = insn->variant.test.src;
+
+   switch (opnd.tag) {
+   case S390_OPND_REG: {
+      UInt reg = hregNumber(opnd.variant.reg);
+
+      switch (insn->size) {
+      case 4:
+         return s390_emit_LTR(buf, reg, reg);
+
+      case 8:
+         return s390_emit_LTGR(buf, reg, reg);
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_AMODE: {
+      const s390_amode *am = opnd.variant.am;
+      UChar b = hregNumber(am->b);
+      UChar x = hregNumber(am->x);
+      Int   d = am->d;
+
+      switch (insn->size) {
+      case 4:
+         return s390_emit_LTw(buf, R0, x, b, DISP20(d));
+
+      case 8:
+         return s390_emit_LTGw(buf, R0, x, b, DISP20(d));
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      ULong value = opnd.variant.imm;
+
+      switch (insn->size) {
+      case 4:
+         buf = s390_emit_load_32imm(buf, R0, value);
+         return s390_emit_LTR(buf, R0, R0);
+
+      case 8:
+         buf = s390_emit_load_64imm(buf, R0, value);
+         return s390_emit_LTGR(buf, R0, R0);
+
+      default:
+         goto fail;
+      }
+   }
+
+   default:
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_test_emit");
+}
+
+
+static UChar *
+s390_insn_cc2bool_emit(UChar *buf, const s390_insn *insn)
+{
+   UChar r1 = hregNumber(insn->variant.cc2bool.dst);
+   s390_cc_t cond = insn->variant.cc2bool.cond;
+
+   /* Make the destination register be 1 or 0, depending on whether
+      the relevant condition holds. A 64-bit value is computed. */
+   if (cond == S390_CC_ALWAYS)
+      return s390_emit_LGHI(buf, r1, 1);  /* r1 = 1 */
+
+   buf = s390_emit_load_cc(buf, r1);                 /* r1 = cc */
+   buf = s390_emit_LGHI(buf, R0, cond);              /* r0 = mask */
+   buf = s390_emit_SLLG(buf, r1, R0, r1, DISP20(0)); /* r1 = mask << cc */
+   buf = s390_emit_SRLG(buf, r1, r1, 0,  DISP20(3)); /* r1 = r1 >> 3 */
+   buf = s390_emit_NILL(buf, r1, 1);                 /* r1 = r1 & 0x1 */
+
+   return buf;
+}
+
+
+/* Only 4-byte and 8-byte operands are handled. */
+static UChar *
+s390_insn_cas_emit(UChar *buf, const s390_insn *insn)
+{
+   UChar r1, r3, b, old;
+   Int d;
+   s390_amode *am;
+
+   r1 = hregNumber(insn->variant.cas.op1); /* expected value */
+   r3 = hregNumber(insn->variant.cas.op3);
+   old= hregNumber(insn->variant.cas.old_mem);
+   am = insn->variant.cas.op2;
+   b  = hregNumber(am->b);
+   d  = am->d;
+
+   vassert(am->tag == S390_AMODE_B12 || am->tag == S390_AMODE_B20);
+
+   switch (insn->size) {
+   case 4:
+      /* r1 must not be overwritten. So copy it to R0 and let CS clobber it */
+      buf = s390_emit_LR(buf, R0, r1);
+      if (am->tag == S390_AMODE_B12)
+         buf = s390_emit_CS(buf, R0, r3, b, d);
+      else
+         buf = s390_emit_CSY(buf, R0, r3, b, DISP20(d));
+      /* Now copy R0 which has the old memory value to OLD */
+      return s390_emit_LR(buf, old, R0);
+
+   case 8:
+      /* r1 must not be overwritten. So copy it to R0 and let CS clobber it */
+      buf = s390_emit_LGR(buf, R0, r1);
+      buf = s390_emit_CSG(buf, R0, r3, b, DISP20(d));
+      /* Now copy R0 which has the old memory value to OLD */
+      return s390_emit_LGR(buf, old, R0);
+
+   default:
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_cas_emit");
+}
+
+
+/* Only 4-byte and 8-byte operands are handled. */
+static UChar *
+s390_insn_cdas_emit(UChar *buf, const s390_insn *insn)
+{
+   UChar r1, r1p1, r3, /*r3p1,*/ b, old_high, old_low, scratch;
+   Int d;
+   s390_amode *am;
+   s390_cdas *cdas = insn->variant.cdas.details;
+
+   r1   = hregNumber(cdas->op1_high); /* expected value */
+   r1p1 = hregNumber(cdas->op1_low);  /* expected value */
+   r3   = hregNumber(cdas->op3_high);
+   /* r3p1 = hregNumber(cdas->op3_low); */ /* unused */
+   old_high = hregNumber(cdas->old_mem_high);
+   old_low  = hregNumber(cdas->old_mem_low);
+   scratch  = hregNumber(cdas->scratch);
+   am = cdas->op2;
+   b  = hregNumber(am->b);
+   d  = am->d;
+
+   vassert(scratch == 1);
+   vassert(am->tag == S390_AMODE_B12 || am->tag == S390_AMODE_B20);
+
+   switch (insn->size) {
+   case 4:
+      /* r1, r1+1 must not be overwritten. So copy them to R0,scratch
+         and let CDS/CDSY clobber it */
+      buf = s390_emit_LR(buf, R0, r1);
+      buf = s390_emit_LR(buf, scratch, r1p1);
+
+      if (am->tag == S390_AMODE_B12)
+         buf = s390_emit_CDS(buf, R0, r3, b, d);
+      else
+         buf = s390_emit_CDSY(buf, R0, r3, b, DISP20(d));
+
+      /* Now copy R0,scratch which has the old memory value to OLD */
+      buf = s390_emit_LR(buf, old_high, R0);
+      buf = s390_emit_LR(buf, old_low,  scratch);
+      return buf;
+
+   case 8:
+      /* r1, r1+1 must not be overwritten. So copy them to R0,scratch
+         and let CDSG clobber it */
+      buf = s390_emit_LGR(buf, R0, r1);
+      buf = s390_emit_LGR(buf, scratch, r1p1);
+
+      buf = s390_emit_CDSG(buf, R0, r3, b, DISP20(d));
+
+      /* Now copy R0,scratch which has the old memory value to OLD */
+      buf = s390_emit_LGR(buf, old_high, R0);
+      buf = s390_emit_LGR(buf, old_low,  scratch);
+      return buf;
+
+   default:
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_cdas_emit");
+}
+
+
+/* Only 4-byte and 8-byte comparisons are handled. 1-byte and 2-byte
+   comparisons will have been converted to 4-byte comparisons in
+   s390_isel_cc and should not occur here. */
+static UChar *
+s390_insn_compare_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI op2;
+   HReg op1;
+   Bool signed_comparison;
+
+   op1 = insn->variant.compare.src1;
+   op2 = insn->variant.compare.src2;
+   signed_comparison = insn->variant.compare.signed_comparison;
+
+   switch (op2.tag) {
+   case S390_OPND_REG: {
+      UInt r1 = hregNumber(op1);
+      UInt r2 = hregNumber(op2.variant.reg);
+
+      switch (insn->size) {
+      case 4:
+         if (signed_comparison)
+            return s390_emit_CR(buf, r1, r2);
+         else
+            return s390_emit_CLR(buf, r1, r2);
+
+      case 8:
+         if (signed_comparison)
+            return s390_emit_CGR(buf, r1, r2);
+         else
+            return s390_emit_CLGR(buf, r1, r2);
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_AMODE: {
+      UChar r1 = hregNumber(op1);
+      const s390_amode *am = op2.variant.am;
+      UChar b = hregNumber(am->b);
+      UChar x = hregNumber(am->x);
+      Int   d = am->d;
+
+      switch (insn->size) {
+      case 4:
+         switch (am->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            if (signed_comparison)
+               return s390_emit_C(buf, r1, x, b, d);
+            else
+               return s390_emit_CL(buf, r1, x, b, d);
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            if (signed_comparison)
+               return s390_emit_CY(buf, r1, x, b, DISP20(d));
+            else
+               return s390_emit_CLY(buf, r1, x, b, DISP20(d));
+         }
+         goto fail;
+
+      case 8:
+         if (signed_comparison)
+            return s390_emit_CG(buf, r1, x, b, DISP20(d));
+         else
+            return s390_emit_CLG(buf, r1, x, b, DISP20(d));
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      UChar r1 = hregNumber(op1);
+      ULong value = op2.variant.imm;
+
+      switch (insn->size) {
+      case 4:
+         if (signed_comparison)
+            return s390_emit_CFIw(buf, r1, value);
+         else
+            return s390_emit_CLFIw(buf, r1, value);
+
+      case 8:
+         if (s390_host_has_eimm) {
+            if (signed_comparison) {
+               if (ulong_fits_signed_32bit(value))
+                  return s390_emit_CGFI(buf, r1, value);
+            } else {
+               if (ulong_fits_unsigned_32bit(value))
+                  return s390_emit_CLGFI(buf, r1, value);
+            }
+         }
+         buf = s390_emit_load_64imm(buf, R0, value);
+         if (signed_comparison)
+            return s390_emit_CGR(buf, r1, R0);
+         else
+            return s390_emit_CLGR(buf, r1, R0);
+
+      default:
+         goto fail;
+      }
+   }
+
+   default:
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_compare_emit");
+}
+
+
+static UChar *
+s390_insn_mul_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI op2;
+   UChar r1;
+   Bool signed_multiply;
+
+   /* The register number identifying the register pair */
+   r1  = hregNumber(insn->variant.mul.dst_hi);
+
+   op2 = insn->variant.mul.op2;
+   signed_multiply = insn->tag == S390_INSN_SMUL;
+
+   switch (op2.tag) {
+   case S390_OPND_REG: {
+      UInt r2 = hregNumber(op2.variant.reg);
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+      case 4:
+         if (signed_multiply)
+            return s390_emit_MR(buf, r1, r2);
+         else
+            return s390_emit_MLR(buf, r1, r2);
+
+      case 8:
+         if (signed_multiply)
+            vpanic("s390_insn_mul_emit");
+         else
+            return s390_emit_MLGR(buf, r1, r2);
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_AMODE: {
+      const s390_amode *am = op2.variant.am;
+      UChar b = hregNumber(am->b);
+      UChar x = hregNumber(am->x);
+      Int   d = am->d;
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+         /* Load bytes into scratch register R0, then multiply */
+         buf = s390_emit_load_mem(buf, insn->size, R0, am);
+         if (signed_multiply)
+            return s390_emit_MR(buf, r1, R0);
+         else
+            return s390_emit_MLR(buf, r1, R0);
+
+      case 4:
+         switch (am->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            if (signed_multiply)
+               return s390_emit_M(buf, r1, x, b, d);
+            else
+               return s390_emit_ML(buf, r1, x, b, DISP20(d));
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            if (signed_multiply)
+               return s390_emit_MFYw(buf, r1, x, b, DISP20(d));
+            else
+               return s390_emit_ML(buf, r1, x, b, DISP20(d));
+         }
+         goto fail;
+
+      case 8:
+         if (signed_multiply)
+            vpanic("s390_insn_mul_emit");
+         else
+            return s390_emit_MLG(buf, r1, x, b, DISP20(d));
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      ULong value = op2.variant.imm;
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+      case 4:
+         buf = s390_emit_load_32imm(buf, R0, value);
+         if (signed_multiply)
+            return s390_emit_MR(buf, r1, R0);
+         else
+            return s390_emit_MLR(buf, r1, R0);
+
+      case 8:
+         buf = s390_emit_load_64imm(buf, R0, value);
+         if (signed_multiply)
+            vpanic("s390_insn_mul_emit");
+         else
+            return s390_emit_MLGR(buf, r1, R0);
+
+      default:
+         goto fail;
+      }
+   }
+
+   default:
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_mul_emit");
+}
+
+
+static UChar *
+s390_insn_div_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI op2;
+   UChar r1;
+   Bool signed_divide;
+
+   r1  = hregNumber(insn->variant.div.op1_hi);
+   op2 = insn->variant.div.op2;
+   signed_divide = insn->tag == S390_INSN_SDIV;
+
+   switch (op2.tag) {
+   case S390_OPND_REG: {
+      UInt r2 = hregNumber(op2.variant.reg);
+
+      switch (insn->size) {
+      case 4:
+         if (signed_divide)
+            return s390_emit_DR(buf, r1, r2);
+         else
+            return s390_emit_DLR(buf, r1, r2);
+
+      case 8:
+         if (signed_divide)
+            vpanic("s390_insn_div_emit");
+         else
+            return s390_emit_DLGR(buf, r1, r2);
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_AMODE: {
+      const s390_amode *am = op2.variant.am;
+      UChar b = hregNumber(am->b);
+      UChar x = hregNumber(am->x);
+      Int   d = am->d;
+
+      switch (insn->size) {
+      case 4:
+         switch (am->tag) {
+         case S390_AMODE_B12:
+         case S390_AMODE_BX12:
+            if (signed_divide)
+               return s390_emit_D(buf, r1, x, b, d);
+            else
+               return s390_emit_DL(buf, r1, x, b, DISP20(d));
+
+         case S390_AMODE_B20:
+         case S390_AMODE_BX20:
+            if (signed_divide) {
+               buf = s390_emit_LY(buf, R0, x, b, DISP20(d));
+               return s390_emit_DR(buf, r1, R0);
+            } else
+               return s390_emit_DL(buf, r1, x, b, DISP20(d));
+         }
+         goto fail;
+
+      case 8:
+         if (signed_divide)
+            vpanic("s390_insn_div_emit");
+         else
+            return s390_emit_DLG(buf, r1, x, b, DISP20(d));
+
+      default:
+         goto fail;
+      }
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      ULong value = op2.variant.imm;
+
+      switch (insn->size) {
+      case 4:
+         buf = s390_emit_load_32imm(buf, R0, value);
+         if (signed_divide)
+            return s390_emit_DR(buf, r1, R0);
+         else
+            return s390_emit_DLR(buf, r1, R0);
+
+      case 8:
+         buf = s390_emit_load_64imm(buf, R0, value);
+         if (signed_divide)
+            vpanic("s390_insn_div_emit");
+         else
+            return s390_emit_DLGR(buf, r1, R0);
+
+      default:
+         goto fail;
+      }
+   }
+
+   default:
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_div_emit");
+}
+
+
+static UChar *
+s390_insn_divs_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI op2;
+   UChar r1;
+
+   r1  = hregNumber(insn->variant.divs.rem);
+   op2 = insn->variant.divs.op2;
+
+   switch (op2.tag) {
+   case S390_OPND_REG: {
+      UInt r2 = hregNumber(op2.variant.reg);
+
+      return s390_emit_DSGR(buf, r1, r2);
+   }
+
+   case S390_OPND_AMODE: {
+      const s390_amode *am = op2.variant.am;
+      UChar b = hregNumber(am->b);
+      UChar x = hregNumber(am->x);
+      Int   d = am->d;
+
+      return s390_emit_DSG(buf, r1, x, b, DISP20(d));
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      ULong value = op2.variant.imm;
+
+      buf = s390_emit_load_64imm(buf, R0, value);
+      return s390_emit_DSGR(buf, r1, R0);
+   }
+
+   default:
+      goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_divs_emit");
+}
+
+
+static UChar *
+s390_insn_clz_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_opnd_RMI src;
+   UChar r1, r1p1, r2, *p;
+
+   r1   = hregNumber(insn->variant.clz.num_bits);
+   r1p1 = hregNumber(insn->variant.clz.clobber);
+
+   vassert((r1 & 0x1) == 0);
+   vassert(r1p1 == r1 + 1);
+
+   p = buf;
+   src = insn->variant.clz.src;
+
+   /* Get operand and move it to r2 */
+   switch (src.tag) {
+   case S390_OPND_REG:
+      r2 = hregNumber(src.variant.reg);
+      break;
+
+   case S390_OPND_AMODE: {
+      const s390_amode *am = src.variant.am;
+      UChar b = hregNumber(am->b);
+      UChar x = hregNumber(am->x);
+      Int   d = am->d;
+
+      p  = s390_emit_LG(p, R0, x, b, DISP20(d));
+      r2 = R0;
+      break;
+   }
+
+   case S390_OPND_IMMEDIATE: {
+      ULong value = src.variant.imm;
+
+      p  = s390_emit_load_64imm(p, R0, value);
+      r2 = R0;
+      break;
+   }
+
+   default:
+      goto fail;
+   }
+
+   /* Use FLOGR if you can */
+   if (s390_host_has_eimm) {
+      return s390_emit_FLOGR(p, r1, r2);
+   }
+
+   /*
+      r0 = r2;
+      r1 = 64;
+      while (r0 != 0) {
+        r1 -= 1;
+        r0 >>= 1;
+      }
+   */
+   p = s390_emit_LTGR(p, R0, r2);
+   p = s390_emit_LLILL(p, r1,  64);
+
+   p = s390_emit_BRC(p, S390_CC_E, (4 + 4 + 6 + 4 + 4)/ 2);  /* 4 bytes */
+   p = s390_emit_AGHI(p, r1, (UShort)-1);         /* r1  -= 1;  4 bytes */
+   p = s390_emit_SRLG(p, R0, R0, R0, DISP20(1));  /* r0 >>= 1;  6 bytes */
+   p = s390_emit_LTGR(p, R0, R0);                 /* set cc     4 bytes */
+   p = s390_emit_BRC(p, S390_CC_NE,               /*            4 bytes */
+                     (UShort)(-(4 + 6 + 4) / 2));
+   return p;
+
+ fail:
+   vpanic("s390_insn_clz_emit");
+}
+
+
+/* Returns a value == BUF to denote failure, != BUF to denote success. */
+static UChar *
+s390_insn_helper_call_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_cc_t cond;
+   ULong target;
+   UChar *ptmp = buf;
+   s390_helper_call *helper_call = insn->variant.helper_call.details;
+
+   cond = helper_call->cond;
+   target = helper_call->target;
+
+   if (cond != S390_CC_ALWAYS
+       && helper_call->rloc.pri != RLPri_None) {
+      /* The call might not happen (it isn't unconditional) and it
+         returns a result.  In this case we will need to generate a
+         control flow diamond to put 0x555..555 in the return
+         register(s) in the case where the call doesn't happen.  If
+         this ever becomes necessary, maybe copy code from the ARM
+         equivalent.  Until that day, just give up. */
+      return buf; /* To denote failure. */
+   }
+
+   if (cond != S390_CC_ALWAYS) {
+      /* So we have something like this
+         if (cond) call X;
+         Y: ...
+         We convert this into
+         if (! cond) goto Y;        // BRC opcode; 4 bytes
+         call X;
+         Y:
+      */
+      /* 4 bytes (a BRC insn) to be filled in here */
+      buf += 4;
+   }
+
+   /* Load the target address into a register, that
+      (a) is not used for passing parameters to the helper and
+      (b) can be clobbered by the callee
+      (c) is not special to the BASR insn
+      r1 is the only choice.
+      Also, need to arrange for the return address be put into the
+      link-register */
+   buf = s390_emit_load_64imm(buf, 1, target);
+
+   /* Stash away the client's FPC register because the helper might change it. */
+   buf = s390_emit_STFPC(buf, S390_REGNO_STACK_POINTER, S390_OFFSET_SAVED_FPC_C);
+
+   buf = s390_emit_BASR(buf, S390_REGNO_LINK_REGISTER, 1);      // call helper
+
+   buf = s390_emit_LFPC(buf, S390_REGNO_STACK_POINTER,          // restore FPC
+                        S390_OFFSET_SAVED_FPC_C);
+
+   if (cond != S390_CC_ALWAYS) {
+      Int delta = buf - ptmp;
+
+      delta >>= 1;  /* immediate constant is #half-words */
+      vassert(delta > 0 && delta < (1 << 16));
+      s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+   }
+
+   return buf;
+}
+
+
+static UChar *
+s390_insn_cond_move_emit(UChar *buf, const s390_insn *insn)
+{
+   HReg dst;
+   s390_opnd_RMI src;
+   s390_cc_t cond;
+   UChar *p, *ptmp = 0;   /* avoid compiler warnings */
+
+   cond = insn->variant.cond_move.cond;
+   dst  = insn->variant.cond_move.dst;
+   src  = insn->variant.cond_move.src;
+
+   if (cond == S390_CC_NEVER) return buf;
+
+   p = buf;
+
+   if (s390_host_has_lsc) {
+      /* LOCx is not the preferred way to implement an unconditional load. */
+      if (cond != S390_CC_ALWAYS) goto use_branch_insn;
+
+      switch (src.tag) {
+      case S390_OPND_REG:
+         return s390_emit_LOCGR(p, cond, hregNumber(dst),
+                                hregNumber(src.variant.reg));
+
+      case S390_OPND_AMODE: {
+         const s390_amode *am = src.variant.am;
+
+         /* We cannot use LOCx for loads less than 4 bytes. In that case
+            load into R0 and then use LOCGR. Do the same if the amode uses
+            an index register. */
+         if (insn->size < 4 ||
+             am->tag == S390_AMODE_BX12 || am->tag == S390_AMODE_BX20) {
+            p = s390_emit_load_mem(p, insn->size, R0, am);
+            p = s390_emit_LOCGR(p, cond, hregNumber(dst), R0);
+            return p;
+         }
+
+         vassert(am->tag == S390_AMODE_B12 || am->tag == S390_AMODE_B20);
+         vassert(insn->size == 4 || insn->size == 8);
+
+         UInt b = hregNumber(am->b);
+         UInt d = am->d;
+
+         if (insn->size == 4) {
+            return s390_emit_LOC(p, hregNumber(dst), cond, b, DISP20(d));
+         }
+         return s390_emit_LOCG(p, hregNumber(dst), cond, b, DISP20(d));
+      }
+
+      case S390_OPND_IMMEDIATE: {
+         ULong value = src.variant.imm;
+
+         /* Load value into R0, then use LOCGR */
+         if (insn->size <= 4) {
+            p = s390_emit_load_32imm(p, R0, value);
+            return s390_emit_LOCGR(p, cond, hregNumber(dst), R0);
+         }
+
+         vassert(insn->size == 8);
+         p = s390_emit_load_64imm(p, R0, value);
+         return s390_emit_LOCGR(p, cond, hregNumber(dst), R0);
+      }
+      }
+   }
+
+use_branch_insn:
+   /* Branch (if cond fails) over move instrs */
+   if (cond != S390_CC_ALWAYS) {
+      /* Don't know how many bytes to jump over yet.
+         Make space for a BRC instruction (4 bytes) and fill in later. */
+      ptmp = p;   /*  to be filled in here */
+      p += 4;
+   }
+
+   // cond true: move src => dst
+
+   switch (src.tag) {
+   case S390_OPND_REG:
+      p = s390_emit_LGR(p, hregNumber(dst), hregNumber(src.variant.reg));
+      break;
+
+   case S390_OPND_AMODE:
+      p = s390_emit_load_mem(p, insn->size, hregNumber(dst), src.variant.am);
+      break;
+
+   case S390_OPND_IMMEDIATE: {
+      ULong value = src.variant.imm;
+      UInt  r = hregNumber(dst);
+
+      switch (insn->size) {
+      case 1:
+      case 2:
+         /* Load the immediate values as a 4 byte value. That does not hurt as
+            those extra bytes will not be looked at. Fall through .... */
+      case 4:
+         p = s390_emit_load_32imm(p, r, value);
+         break;
+
+      case 8:
+         p = s390_emit_load_64imm(p, r, value);
+         break;
+      }
+      break;
+   }
+
+   default:
+      goto fail;
+   }
+
+   if (cond != S390_CC_ALWAYS) {
+      Int delta = p - ptmp;
+
+      delta >>= 1;  /* immediate constant is #half-words */
+      vassert(delta > 0 && delta < (1 << 16));
+      s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+   }
+
+   return p;
+
+ fail:
+   vpanic("s390_insn_cond_move_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_triop_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt r1 = hregNumber(insn->variant.bfp_triop.dst);
+   UInt r2 = hregNumber(insn->variant.bfp_triop.op2);
+   UInt r3 = hregNumber(insn->variant.bfp_triop.op3);
+
+   switch (insn->size) {
+   case 4:
+      switch (insn->variant.bfp_triop.tag) {
+      case S390_BFP_MADD:  return s390_emit_MAEBR(buf, r1, r3, r2);
+      case S390_BFP_MSUB:  return s390_emit_MSEBR(buf, r1, r3, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case 8:
+      switch (insn->variant.bfp_triop.tag) {
+      case S390_BFP_MADD:  return s390_emit_MADBR(buf, r1, r3, r2);
+      case S390_BFP_MSUB:  return s390_emit_MSDBR(buf, r1, r3, r2);
+      default:  goto fail;
+      }
+      break;
+
+   default:  goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_bfp_triop_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_binop_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt r1 = hregNumber(insn->variant.bfp_binop.dst_hi);
+   UInt r2 = hregNumber(insn->variant.bfp_binop.op2_hi);
+
+   switch (insn->size) {
+   case 4:
+      switch (insn->variant.bfp_binop.tag) {
+      case S390_BFP_ADD:     return s390_emit_AEBR(buf, r1, r2);
+      case S390_BFP_SUB:     return s390_emit_SEBR(buf, r1, r2);
+      case S390_BFP_MUL:     return s390_emit_MEEBR(buf, r1, r2);
+      case S390_BFP_DIV:     return s390_emit_DEBR(buf, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case 8:
+      switch (insn->variant.bfp_binop.tag) {
+      case S390_BFP_ADD:     return s390_emit_ADBR(buf, r1, r2);
+      case S390_BFP_SUB:     return s390_emit_SDBR(buf, r1, r2);
+      case S390_BFP_MUL:     return s390_emit_MDBR(buf, r1, r2);
+      case S390_BFP_DIV:     return s390_emit_DDBR(buf, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case 16:
+      switch (insn->variant.bfp_binop.tag) {
+      case S390_BFP_ADD:     return s390_emit_AXBR(buf, r1, r2);
+      case S390_BFP_SUB:     return s390_emit_SXBR(buf, r1, r2);
+      case S390_BFP_MUL:     return s390_emit_MXBR(buf, r1, r2);
+      case S390_BFP_DIV:     return s390_emit_DXBR(buf, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   default:  goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_bfp_binop_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_unop_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt  r1 = hregNumber(insn->variant.bfp_unop.dst_hi);
+   UInt  r2 = hregNumber(insn->variant.bfp_unop.op_hi);
+
+   switch (insn->variant.bfp_unop.tag) {
+   case S390_BFP_ABS:
+      switch (insn->size) {
+      case 4:   return s390_emit_LPEBR(buf, r1, r2);
+      case 8:   return s390_emit_LPDBR(buf, r1, r2);
+      case 16:  return s390_emit_LPXBR(buf, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case S390_BFP_NABS:
+      switch (insn->size) {
+      case 4:   return s390_emit_LNEBR(buf, r1, r2);
+      case 8:   return s390_emit_LNDBR(buf, r1, r2);
+      case 16:  return s390_emit_LNXBR(buf, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case S390_BFP_NEG:
+      switch (insn->size) {
+      case 4:   return s390_emit_LCEBR(buf, r1, r2);
+      case 8:   return s390_emit_LCDBR(buf, r1, r2);
+      case 16:  return s390_emit_LCXBR(buf, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case S390_BFP_SQRT:
+      switch (insn->size) {
+      case 4:   return s390_emit_SQEBR(buf, r1, r2);
+      case 8:   return s390_emit_SQDBR(buf, r1, r2);
+      case 16:  return s390_emit_SQXBR(buf, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   default: goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_bfp_unop_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_convert_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt  r1 = hregNumber(insn->variant.bfp_convert.dst_hi);
+   UInt  r2 = hregNumber(insn->variant.bfp_convert.op_hi);
+   s390_bfp_round_t m3 = insn->variant.bfp_convert.rounding_mode;
+   /* The IEEE-inexact-exception control is not modelled. So the
+      m4 field is 0 (which is what GCC does, too) */
+   const UInt m4 = 0;
+
+   switch (insn->variant.bfp_convert.tag) {
+      /* Convert to fixed */
+   case S390_BFP_F32_TO_I32:  return s390_emit_CFEBR(buf, m3, r1, r2);
+   case S390_BFP_F64_TO_I32:  return s390_emit_CFDBR(buf, m3, r1, r2);
+   case S390_BFP_F128_TO_I32: return s390_emit_CFXBR(buf, m3, r1, r2);
+   case S390_BFP_F32_TO_I64:  return s390_emit_CGEBR(buf, m3, r1, r2);
+   case S390_BFP_F64_TO_I64:  return s390_emit_CGDBR(buf, m3, r1, r2);
+   case S390_BFP_F128_TO_I64: return s390_emit_CGXBR(buf, m3, r1, r2);
+
+      /* Convert to logical */
+   case S390_BFP_F32_TO_U32:  return s390_emit_CLFEBR(buf, m3, m4, r1, r2);
+   case S390_BFP_F64_TO_U32:  return s390_emit_CLFDBR(buf, m3, m4, r1, r2);
+   case S390_BFP_F128_TO_U32: return s390_emit_CLFXBR(buf, m3, m4, r1, r2);
+   case S390_BFP_F32_TO_U64:  return s390_emit_CLGEBR(buf, m3, m4, r1, r2);
+   case S390_BFP_F64_TO_U64:  return s390_emit_CLGDBR(buf, m3, m4, r1, r2);
+   case S390_BFP_F128_TO_U64: return s390_emit_CLGXBR(buf, m3, m4, r1, r2);
+
+      /* Convert from fixed */
+   case S390_BFP_I32_TO_F32:  return s390_emit_CEFBRA(buf, m3, m4, r1, r2);
+   case S390_BFP_I32_TO_F64:  return s390_emit_CDFBRA(buf,  0, m4, r1, r2);
+   case S390_BFP_I32_TO_F128: return s390_emit_CXFBRA(buf,  0, m4, r1, r2);
+   case S390_BFP_I64_TO_F32:  return s390_emit_CEGBRA(buf, m3, m4, r1, r2);
+   case S390_BFP_I64_TO_F64:  return s390_emit_CDGBRA(buf, m3, m4, r1, r2);
+   case S390_BFP_I64_TO_F128: return s390_emit_CXGBRA(buf,  0, m4, r1, r2);
+
+      /* Convert from logical */
+   case S390_BFP_U32_TO_F32:  return s390_emit_CELFBR(buf, m3, m4, r1, r2);
+   case S390_BFP_U32_TO_F64:  return s390_emit_CDLFBR(buf, m3, m4, r1, r2);
+   case S390_BFP_U32_TO_F128: return s390_emit_CXLFBR(buf, m3, m4, r1, r2);
+   case S390_BFP_U64_TO_F32:  return s390_emit_CELGBR(buf, m3, m4, r1, r2);
+   case S390_BFP_U64_TO_F64:  return s390_emit_CDLGBR(buf, m3, m4, r1, r2);
+   case S390_BFP_U64_TO_F128: return s390_emit_CXLGBR(buf, m3, m4, r1, r2);
+
+      /* Load lengthened */
+   case S390_BFP_F32_TO_F64:  return s390_emit_LDEBR(buf, r1, r2);
+   case S390_BFP_F32_TO_F128: return s390_emit_LXEBR(buf, r1, r2);
+   case S390_BFP_F64_TO_F128: return s390_emit_LXDBR(buf, r1, r2);
+
+      /* Load rounded */
+   case S390_BFP_F64_TO_F32:  return s390_emit_LEDBRA(buf, m3, m4, r1, r2);
+   case S390_BFP_F128_TO_F32: return s390_emit_LEXBRA(buf, m3, m4, r1, r2);
+   case S390_BFP_F128_TO_F64: return s390_emit_LDXBRA(buf, m3, m4, r1, r2);
+
+   default: goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_bfp_convert_emit");
+}
+
+
+static UChar *
+s390_insn_bfp_compare_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt dst = hregNumber(insn->variant.bfp_compare.dst);
+   UInt r1  = hregNumber(insn->variant.bfp_compare.op1_hi);
+   UInt r2  = hregNumber(insn->variant.bfp_compare.op2_hi);
+
+   switch (insn->size) {
+   case 4:  buf = s390_emit_CEBR(buf, r1, r2); break;
+   case 8:  buf = s390_emit_CDBR(buf, r1, r2); break;
+   case 16: buf = s390_emit_CXBR(buf, r1, r2); break;
+   default:  goto fail;
+   }
+
+   return s390_emit_load_cc(buf, dst);  /* Load condition code into DST */
+
+ fail:
+   vpanic("s390_insn_bfp_compare_emit");
+}
+
+
+static UChar *
+s390_insn_dfp_binop_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_dfp_binop *dfp_binop = insn->variant.dfp_binop.details;
+
+   UInt r1 = hregNumber(dfp_binop->dst_hi);
+   UInt r2 = hregNumber(dfp_binop->op2_hi);
+   UInt r3 = hregNumber(dfp_binop->op3_hi);
+   s390_dfp_round_t m4 = dfp_binop->rounding_mode;
+
+   switch (insn->size) {
+   case 8:
+      switch (dfp_binop->tag) {
+      case S390_DFP_ADD: return s390_emit_ADTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_SUB: return s390_emit_SDTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_MUL: return s390_emit_MDTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_DIV: return s390_emit_DDTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_QUANTIZE: return s390_emit_QADTR(buf, r3, m4, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case 16:
+      switch (dfp_binop->tag) {
+      case S390_DFP_ADD:     return s390_emit_AXTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_SUB:     return s390_emit_SXTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_MUL:     return s390_emit_MXTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_DIV:     return s390_emit_DXTRA(buf, r3, m4, r1, r2);
+      case S390_DFP_QUANTIZE: return s390_emit_QAXTR(buf, r3, m4, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   default:  goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_dfp_binop_emit");
+}
+
+
+static UChar *
+s390_insn_dfp_reround_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt r1 = hregNumber(insn->variant.dfp_reround.dst_hi);
+   UInt r2 = hregNumber(insn->variant.dfp_reround.op2);
+   UInt r3 = hregNumber(insn->variant.dfp_reround.op3_hi);
+   s390_dfp_round_t m4 = insn->variant.dfp_reround.rounding_mode;
+
+   switch (insn->size) {
+   case 8:
+      return s390_emit_RRDTR(buf, r3, m4, r1, r2);
+
+   case 16:
+      return s390_emit_RRXTR(buf, r3, m4, r1, r2);
+
+   default: goto fail;
+   }
+ fail:
+   vpanic("s390_insn_dfp_reround_emit");
+}
+
+
+static UChar *
+s390_insn_dfp_unop_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt  r1 = hregNumber(insn->variant.dfp_unop.dst_hi);
+   UInt  r2 = hregNumber(insn->variant.dfp_unop.op_hi);
+
+   switch (insn->variant.dfp_unop.tag) {
+   case S390_DFP_EXTRACT_EXP_D64:  return s390_emit_EEDTR(buf, r1, r2); break;
+   case S390_DFP_EXTRACT_EXP_D128: return s390_emit_EEXTR(buf, r1, r2); break;
+   case S390_DFP_EXTRACT_SIG_D64:  return s390_emit_ESDTR(buf, r1, r2); break;
+   case S390_DFP_EXTRACT_SIG_D128: return s390_emit_ESXTR(buf, r1, r2); break;
+   default: goto fail;
+   }
+ fail:
+   vpanic("s390_insn_dfp_unop_emit");
+}
+
+
+static UChar *
+s390_insn_dfp_intop_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt r1 = hregNumber(insn->variant.dfp_intop.dst_hi);
+   UInt r2 = hregNumber(insn->variant.dfp_intop.op2);
+   UInt r3 = hregNumber(insn->variant.dfp_intop.op3_hi);
+
+   switch (insn->size) {
+   case 8:
+      switch (insn->variant.dfp_intop.tag) {
+      case S390_DFP_SHIFT_LEFT:  return s390_emit_SLDT(buf, r3, r1, r2);
+      case S390_DFP_SHIFT_RIGHT: return s390_emit_SRDT(buf, r3, r1, r2);
+      case S390_DFP_INSERT_EXP:  return s390_emit_IEDTR(buf, r3, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   case 16:
+      switch (insn->variant.dfp_intop.tag) {
+      case S390_DFP_SHIFT_LEFT:  return s390_emit_SLXT(buf, r3, r1, r2);
+      case S390_DFP_SHIFT_RIGHT: return s390_emit_SRXT(buf, r3, r1, r2);
+      case S390_DFP_INSERT_EXP:  return s390_emit_IEXTR(buf, r3, r1, r2);
+      default:  goto fail;
+      }
+      break;
+
+   default: goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_dfp_intop_emit");
+}
+
+
+static UChar *
+s390_insn_dfp_compare_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt dst = hregNumber(insn->variant.dfp_compare.dst);
+   UInt r1  = hregNumber(insn->variant.dfp_compare.op1_hi);
+   UInt r2  = hregNumber(insn->variant.dfp_compare.op2_hi);
+
+   switch (insn->size) {
+   case 8:
+      switch(insn->variant.dfp_compare.tag) {
+      case S390_DFP_COMPARE:     buf = s390_emit_CDTR(buf, r1, r2); break;
+      case S390_DFP_COMPARE_EXP: buf = s390_emit_CEDTR(buf, r1, r2); break;
+      default: goto fail;
+      }
+      break;
+
+   case 16:
+      switch(insn->variant.dfp_compare.tag) {
+      case S390_DFP_COMPARE:     buf = s390_emit_CXTR(buf, r1, r2); break;
+      case S390_DFP_COMPARE_EXP: buf = s390_emit_CEXTR(buf, r1, r2); break;
+      default: goto fail;
+      }
+      break;
+
+   default:  goto fail;
+   }
+
+   return s390_emit_load_cc(buf, dst);  /* Load condition code into DST */
+
+ fail:
+   vpanic("s390_insn_dfp_compare_emit");
+}
+
+
+static UChar *
+s390_insn_dfp_convert_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt  r1 = hregNumber(insn->variant.dfp_convert.dst_hi);
+   UInt  r2 = hregNumber(insn->variant.dfp_convert.op_hi);
+   s390_dfp_round_t m3 = insn->variant.dfp_convert.rounding_mode;
+   /* The IEEE-inexact-exception control is not modelled. So the
+      m4 field is 0 (which is what GCC does, too) */
+   const UInt m4 = 0;
+
+   switch (insn->variant.dfp_convert.tag) {
+
+      /* Convert to fixed */
+   case S390_DFP_D64_TO_I32:  return s390_emit_CFDTR(buf, m3, m4, r1, r2);
+   case S390_DFP_D128_TO_I32: return s390_emit_CFXTR(buf, m3, m4, r1, r2);
+   case S390_DFP_D64_TO_I64:  return s390_emit_CGDTR(buf, m3, m4, r1, r2);
+   case S390_DFP_D128_TO_I64: return s390_emit_CGXTR(buf, m3, m4, r1, r2);
+
+      /* Convert to logical */
+   case S390_DFP_D64_TO_U32:  return s390_emit_CLFDTR(buf, m3, m4, r1, r2);
+   case S390_DFP_D128_TO_U32: return s390_emit_CLFXTR(buf, m3, m4, r1, r2);
+   case S390_DFP_D64_TO_U64:  return s390_emit_CLGDTR(buf, m3, m4, r1, r2);
+   case S390_DFP_D128_TO_U64: return s390_emit_CLGXTR(buf, m3, m4, r1, r2);
+
+      /* Convert from fixed */
+   case S390_DFP_I32_TO_D64:  return s390_emit_CDFTR(buf, 0, m4, r1, r2);
+   case S390_DFP_I32_TO_D128: return s390_emit_CXFTR(buf, 0, m4, r1, r2);
+   case S390_DFP_I64_TO_D64:  return s390_emit_CDGTRA(buf, m3, m4, r1, r2);
+   case S390_DFP_I64_TO_D128: return s390_emit_CXGTR(buf, 0, m4, r1, r2);
+
+      /* Convert from logical */
+   case S390_DFP_U32_TO_D64:  return s390_emit_CDLFTR(buf, m3, m4, r1, r2);
+   case S390_DFP_U64_TO_D64:  return s390_emit_CDLGTR(buf, m3, m4, r1, r2);
+   case S390_DFP_U32_TO_D128: return s390_emit_CXLFTR(buf, m3, m4, r1, r2);
+   case S390_DFP_U64_TO_D128: return s390_emit_CXLGTR(buf, m3, m4, r1, r2);
+
+      /* Load lengthened */
+   case S390_DFP_D32_TO_D64:   return s390_emit_LDETR(buf, m4, r1, r2);
+   case S390_DFP_D64_TO_D128:  return s390_emit_LXDTR(buf, m4, r1, r2);
+
+      /* Load rounded */
+   case S390_DFP_D64_TO_D32:   return s390_emit_LEDTR(buf, m3, m4, r1, r2);
+   case S390_DFP_D128_TO_D64:  return s390_emit_LDXTR(buf, m3, m4, r1, r2);
+
+   default: goto fail;
+   }
+
+ fail:
+   vpanic("s390_insn_dfp_convert_emit");
+}
+
+
+static UChar *
+s390_insn_fp_convert_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt pfpo;
+   s390_fp_convert *fp_convert = insn->variant.fp_convert.details;
+   s390_dfp_round_t rm = fp_convert->rounding_mode;
+
+   vassert(rm < 2 || rm > 7);
+
+   switch (fp_convert->tag) {
+   case S390_FP_F32_TO_D32:   pfpo = S390_PFPO_F32_TO_D32   << 8; break;
+   case S390_FP_F32_TO_D64:   pfpo = S390_PFPO_F32_TO_D64   << 8; break;
+   case S390_FP_F32_TO_D128:  pfpo = S390_PFPO_F32_TO_D128  << 8; break;
+   case S390_FP_F64_TO_D32:   pfpo = S390_PFPO_F64_TO_D32   << 8; break;
+   case S390_FP_F64_TO_D64:   pfpo = S390_PFPO_F64_TO_D64   << 8; break;
+   case S390_FP_F64_TO_D128:  pfpo = S390_PFPO_F64_TO_D128  << 8; break;
+   case S390_FP_F128_TO_D32:  pfpo = S390_PFPO_F128_TO_D32  << 8; break;
+   case S390_FP_F128_TO_D64:  pfpo = S390_PFPO_F128_TO_D64  << 8; break;
+   case S390_FP_F128_TO_D128: pfpo = S390_PFPO_F128_TO_D128 << 8; break;
+   case S390_FP_D32_TO_F32:   pfpo = S390_PFPO_D32_TO_F32   << 8; break;
+   case S390_FP_D32_TO_F64:   pfpo = S390_PFPO_D32_TO_F64   << 8; break;
+   case S390_FP_D32_TO_F128:  pfpo = S390_PFPO_D32_TO_F128  << 8; break;
+   case S390_FP_D64_TO_F32:   pfpo = S390_PFPO_D64_TO_F32   << 8; break;
+   case S390_FP_D64_TO_F64:   pfpo = S390_PFPO_D64_TO_F64   << 8; break;
+   case S390_FP_D64_TO_F128:  pfpo = S390_PFPO_D64_TO_F128  << 8; break;
+   case S390_FP_D128_TO_F32:  pfpo = S390_PFPO_D128_TO_F32  << 8; break;
+   case S390_FP_D128_TO_F64:  pfpo = S390_PFPO_D128_TO_F64  << 8; break;
+   case S390_FP_D128_TO_F128: pfpo = S390_PFPO_D128_TO_F128 << 8; break;
+   default: goto fail;
+   }
+
+   pfpo = pfpo | rm;
+   buf = s390_emit_load_32imm(buf, R0, pfpo);
+   buf = s390_emit_PFPO(buf);
+   return buf;
+
+ fail:
+   vpanic("s390_insn_fp_convert_emit");
+}
+
+
+static UChar *
+s390_insn_mfence_emit(UChar *buf, const s390_insn *insn)
+{
+   return s390_emit_BCR(buf, 0xF, 0x0);
+}
+
+
+static UChar *
+s390_insn_mimm_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_amode *am = insn->variant.mimm.dst;
+   UChar b = hregNumber(am->b);
+   Int   d = am->d;
+   ULong value = insn->variant.mimm.value;
+
+   if (value == 0) {
+      return s390_emit_XC(buf, insn->size - 1, b, d, b, d);
+   }
+
+   if (insn->size == 1) {
+      return s390_emit_MVI(buf, value & 0xFF, b, d);
+   }
+
+   if (s390_host_has_gie && ulong_fits_signed_16bit(value)) {
+      value &= 0xFFFF;
+      switch (insn->size) {
+      case 2: return s390_emit_MVHHI(buf, b, d, value);
+      case 4: return s390_emit_MVHI(buf,  b, d, value);
+      case 8: return s390_emit_MVGHI(buf, b, d, value);
+      }
+   } else {
+      // Load value to R0, then store.
+      switch (insn->size) {
+      case 2:
+         buf = s390_emit_LHI(buf, R0, value & 0xFFFF);
+         return s390_emit_STH(buf, R0, 0, b, d);
+      case 4:
+         buf = s390_emit_load_32imm(buf, R0, value);
+         return s390_emit_ST(buf, R0, 0, b, d);
+      case 8:
+         buf = s390_emit_load_64imm(buf, R0, value);
+         return s390_emit_STG(buf, R0, 0, b, DISP20(d));
+      }
+   }
+   
+   vpanic("s390_insn_mimm_emit");
+}
+
+
+static UChar *
+s390_insn_madd_emit(UChar *buf, const s390_insn *insn)
+{
+   s390_amode *am = insn->variant.madd.dst;
+   UChar b = hregNumber(am->b);
+   Int   d = am->d;
+
+   if (insn->size == 4) {
+      return s390_emit_ASI(buf, insn->variant.madd.delta, b, DISP20(d));
+   }
+
+   return s390_emit_AGSI(buf, insn->variant.madd.delta, b, DISP20(d));
+}
+
+
+static UChar *
+s390_insn_set_fpc_bfprm_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt mode = hregNumber(insn->variant.set_fpc_bfprm.mode);
+
+   /* Copy FPC from guest state to R0 and OR in the new rounding mode */
+   buf = s390_emit_L(buf, R0, 0, S390_REGNO_GUEST_STATE_POINTER,
+                     S390X_GUEST_OFFSET(guest_fpc));   // r0 = guest_fpc
+
+   buf = s390_emit_NILL(buf, R0, 0xFFF8); /* Clear out right-most 3 bits */
+   buf = s390_emit_OR(buf, R0, mode);     /* OR in the new rounding mode */
+   buf = s390_emit_SFPC(buf, R0);         /* Load FPC register from R0 */
+
+   return buf;
+}
+
+
+static UChar *
+s390_insn_set_fpc_dfprm_emit(UChar *buf, const s390_insn *insn)
+{
+   UInt mode = hregNumber(insn->variant.set_fpc_dfprm.mode);
+
+   /* Copy FPC from guest state to R0 and OR in the new rounding mode */
+   buf = s390_emit_L(buf, R0, 0, S390_REGNO_GUEST_STATE_POINTER,
+                     S390X_GUEST_OFFSET(guest_fpc));   // r0 = guest_fpc
+
+   /* DFP rounding mode is set at bit position 25:27 in FPC register */
+   buf = s390_emit_NILL(buf, R0, 0xFF8F); /* Clear out 25:27 bits */
+   buf = s390_emit_SLL(buf, mode, 0, 4);  /* bring mode to 25:27 bits */
+   buf = s390_emit_OR(buf, R0, mode);     /* OR in the new rounding mode */
+   buf = s390_emit_SFPC(buf, R0);         /* Load FPC register from R0 */
+
+   return buf;
+}
+
+
+/* Define convenience functions needed for translation chaining.
+   Any changes need to be applied to the functions in concert. */
+
+static __inline__ Bool
+s390_insn_is_BRCL(const UChar *p, UChar condition)
+{
+   return p[0] == 0xc0 && p[1] == ((condition << 4) | 0x04);
+}
+
+static __inline__ Bool
+s390_insn_is_BR(const UChar *p, UChar reg)
+{
+   return p[0] == 0x07 && p[1] == (0xF0 | reg);  /* BCR 15,reg */
+}
+
+
+/* The length of the BASR insn */
+#define S390_BASR_LEN  2
+
+
+/* Load the 64-bit VALUE into REG. Note that this function must NOT
+   optimise the generated code by looking at the value. I.e. using
+   LGHI if value == 0 would be very wrong. */
+static UChar *
+s390_tchain_load64(UChar *buf, UChar regno, ULong value)
+{
+   UChar *begin = buf;
+
+   if (s390_host_has_eimm) {
+      /* Do it in two steps: upper half [0:31] and lower half [32:63] */
+      buf = s390_emit_IIHF(buf, regno, value >> 32);
+      buf = s390_emit_IILF(buf, regno, value & 0xFFFFFFFF);
+   } else {
+      buf = s390_emit_IILL(buf, regno, value & 0xFFFF);
+      value >>= 16;
+      buf = s390_emit_IILH(buf, regno, value & 0xFFFF);
+      value >>= 16;
+      buf = s390_emit_IIHL(buf, regno, value & 0xFFFF);
+      value >>= 16;
+      buf = s390_emit_IIHH(buf, regno, value & 0xFFFF);
+   }
+
+   vassert(buf - begin == s390_tchain_load64_len());
+
+   return buf;
+}
+
+/* Return number of bytes generated by s390_tchain_load64 */
+static UInt
+s390_tchain_load64_len(void)
+{
+   if (s390_host_has_eimm) {
+      return 6 + 6;      /* IIHF + IILF */
+   }
+   return 4 + 4 + 4 + 4; /* IIHH + IIHL + IILH + IILL */
+}
+
+/* Verify that CODE is the code sequence generated by s390_tchain_load64
+   to load VALUE into REGNO. Return pointer to the byte following the
+   insn sequence. */
+static const UChar *
+s390_tchain_verify_load64(const UChar *code, UChar regno, ULong value)
+{
+   UInt regmask = regno << 4;
+   UInt hw;
+
+   if (s390_host_has_eimm) {
+      /* Check for IIHF */
+      vassert(code[0]  ==  0xC0);
+      vassert(code[1]  == (0x08 | regmask));
+      vassert(*(const UInt *)&code[2] == (value >> 32));
+      /* Check for IILF */
+      vassert(code[6]  ==  0xC0);
+      vassert(code[7]  == (0x09 | regmask));
+      vassert(*(const UInt *)&code[8] == (value & 0xFFFFFFFF));
+   } else {
+      /* Check for IILL */
+      hw = value & 0xFFFF;
+      vassert(code[0]  ==  0xA5);
+      vassert(code[1]  == (0x03 | regmask));
+      vassert(code[2]  == (hw >> 8));
+      vassert(code[3]  == (hw & 0xFF));
+
+      /* Check for IILH */
+      hw = (value >> 16) & 0xFFFF;
+      vassert(code[4]  ==  0xA5);
+      vassert(code[5]  == (0x02 | regmask));
+      vassert(code[6]  == (hw >> 8));
+      vassert(code[7]  == (hw & 0xFF));
+
+      /* Check for IIHL */
+      hw = (value >> 32) & 0xFFFF;
+      vassert(code[8]  ==  0xA5);
+      vassert(code[9]  == (0x01 | regmask));
+      vassert(code[10] == (hw >> 8));
+      vassert(code[11] == (hw & 0xFF));
+
+      /* Check for IIHH */
+      hw = (value >> 48) & 0xFFFF;
+      vassert(code[12] ==  0xA5);
+      vassert(code[13] == (0x00 | regmask));
+      vassert(code[14] == (hw >> 8));
+      vassert(code[15] == (hw & 0xFF));
+   }
+
+   return code + s390_tchain_load64_len();
+}
+
+/* CODE points to the code sequence as generated by s390_tchain_load64.
+   Change the loaded value to IMM64. Return pointer to the byte following
+   the patched code sequence. */
+static UChar *
+s390_tchain_patch_load64(UChar *code, ULong imm64)
+{
+   if (s390_host_has_eimm) {
+      /* Patch IIHF */
+      *(UInt *)&code[2] = imm64 >> 32;
+      /* Patch IILF */
+      *(UInt *)&code[8] = imm64 & 0xFFFFFFFF;
+   } else {
+      code[3]  = imm64 & 0xFF; imm64 >>= 8;
+      code[2]  = imm64 & 0xFF; imm64 >>= 8;
+      code[7]  = imm64 & 0xFF; imm64 >>= 8;
+      code[6]  = imm64 & 0xFF; imm64 >>= 8;
+      code[11] = imm64 & 0xFF; imm64 >>= 8;
+      code[10] = imm64 & 0xFF; imm64 >>= 8;
+      code[15] = imm64 & 0xFF; imm64 >>= 8;
+      code[14] = imm64 & 0xFF; imm64 >>= 8;
+   }
+
+   return code + s390_tchain_load64_len();
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   chainXDirect_S390 and unchainXDirect_S390 below. */
+static UChar *
+s390_insn_xdirect_emit(UChar *buf, const s390_insn *insn,
+                       const void *disp_cp_chain_me_to_slowEP,
+                       const void *disp_cp_chain_me_to_fastEP)
+{
+   /* We're generating chain-me requests here, so we need to be
+      sure this is actually allowed -- no-redir translations can't
+      use chain-me's.  Hence: */
+   vassert(disp_cp_chain_me_to_slowEP != NULL);
+   vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+   /* Use ptmp for backpatching conditional jumps. */
+   UChar *ptmp = buf;
+
+   /* First off, if this is conditional, create a conditional
+      jump over the rest of it. */
+   s390_cc_t cond = insn->variant.xdirect.cond;
+
+   if (cond != S390_CC_ALWAYS) {
+      /* So we have something like this
+         if (cond) do_xdirect;
+         Y: ...
+         We convert this into
+         if (! cond) goto Y;        // BRC opcode; 4 bytes
+         do_xdirect;
+         Y:
+      */
+      /* 4 bytes (a BRC insn) to be filled in here */
+      buf += 4;
+   }
+
+   /* Update the guest IA. */
+   buf = s390_emit_load_64imm(buf, R0, insn->variant.xdirect.dst);
+
+   const s390_amode *amode = insn->variant.xdirect.guest_IA;
+   vassert(amode->tag == S390_AMODE_B12);
+   UInt b = hregNumber(amode->b);
+   UInt d = amode->d;
+
+   buf = s390_emit_STG(buf, R0, 0, b, DISP20(d));
+
+   /* Load the chosen entry point into the scratch reg */
+   const void *disp_cp_chain_me;
+
+   disp_cp_chain_me =
+      insn->variant.xdirect.to_fast_entry ? disp_cp_chain_me_to_fastEP 
+                                          : disp_cp_chain_me_to_slowEP;
+   /* Get the address of the beginning of the load64 code sequence into %r1.
+      Do not change the register! This is part of the protocol with the
+      dispatcher. */
+   buf = s390_emit_BASR(buf, 1, R0);
+
+   /* --- FIRST PATCHABLE BYTE follows (must not modify %r1) --- */
+   Addr64 addr = (Addr)disp_cp_chain_me;
+   buf = s390_tchain_load64(buf, S390_REGNO_TCHAIN_SCRATCH, addr);
+
+   /* goto *tchain_scratch */
+   buf = s390_emit_BCR(buf, S390_CC_ALWAYS, S390_REGNO_TCHAIN_SCRATCH);
+
+   /* --- END of PATCHABLE BYTES --- */
+
+   /* Fix up the conditional jump, if there was one. */
+   if (cond != S390_CC_ALWAYS) {
+      Int delta = buf - ptmp;
+
+      delta >>= 1;  /* immediate constant is #half-words */
+      vassert(delta > 0 && delta < (1 << 16));
+      s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+   }
+
+   return buf;
+}
+
+/* Return the number of patchable bytes from an xdirect insn. */
+static UInt
+s390_xdirect_patchable_len(void)
+{
+   return s390_tchain_load64_len() + S390_BASR_LEN;
+}
+
+
+static UChar *
+s390_insn_xindir_emit(UChar *buf, const s390_insn *insn,
+                      const void *disp_cp_xindir)
+{
+   /* We're generating transfers that could lead indirectly to a
+      chain-me, so we need to be sure this is actually allowed --
+      no-redir translations are not allowed to reach normal
+      translations without going through the scheduler.  That means
+      no XDirects or XIndirs out from no-redir translations.
+      Hence: */
+   vassert(disp_cp_xindir != NULL);
+
+   /* Use ptmp for backpatching conditional jumps. */
+   UChar *ptmp = buf;
+
+   /* First off, if this is conditional, create a conditional
+      jump over the rest of it. */
+   s390_cc_t cond = insn->variant.xdirect.cond;
+
+   if (cond != S390_CC_ALWAYS) {
+      /* So we have something like this
+         if (cond) do_xdirect;
+         Y: ...
+         We convert this into
+         if (! cond) goto Y;        // BRC opcode; 4 bytes
+         do_xdirect;
+         Y:
+      */
+      /* 4 bytes (a BRC insn) to be filled in here */
+      buf += 4;
+   }
+
+   /* Update the guest IA with the address in xdirect.dst. */
+   const s390_amode *amode = insn->variant.xindir.guest_IA;
+
+   vassert(amode->tag == S390_AMODE_B12);
+   UInt b = hregNumber(amode->b);
+   UInt d = amode->d;
+   UInt regno = hregNumber(insn->variant.xindir.dst);
+
+   buf = s390_emit_STG(buf, regno, 0, b, DISP20(d));
+
+   /* load tchain_scratch, #disp_indir */
+   buf = s390_tchain_load64(buf, S390_REGNO_TCHAIN_SCRATCH,
+                            (Addr)disp_cp_xindir);
+   /* goto *tchain_direct */
+   buf = s390_emit_BCR(buf, S390_CC_ALWAYS, S390_REGNO_TCHAIN_SCRATCH);
+
+   /* Fix up the conditional jump, if there was one. */
+   if (cond != S390_CC_ALWAYS) {
+      Int delta = buf - ptmp;
+
+      delta >>= 1;  /* immediate constant is #half-words */
+      vassert(delta > 0 && delta < (1 << 16));
+      s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+   }
+
+   return buf;
+}
+
+static UChar *
+s390_insn_xassisted_emit(UChar *buf, const s390_insn *insn,
+                         const void *disp_cp_xassisted)
+{
+   /* Use ptmp for backpatching conditional jumps. */
+   UChar *ptmp = buf;
+
+   /* First off, if this is conditional, create a conditional
+      jump over the rest of it. */
+   s390_cc_t cond = insn->variant.xdirect.cond;
+
+   if (cond != S390_CC_ALWAYS) {
+      /* So we have something like this
+         if (cond) do_xdirect;
+         Y: ...
+         We convert this into
+         if (! cond) goto Y;        // BRC opcode; 4 bytes
+         do_xdirect;
+         Y:
+      */
+      /* 4 bytes (a BRC insn) to be filled in here */
+      buf += 4;
+   }
+
+   /* Update the guest IA with the address in xassisted.dst. */
+   const s390_amode *amode = insn->variant.xassisted.guest_IA;
+
+   vassert(amode->tag == S390_AMODE_B12);
+   UInt b = hregNumber(amode->b);
+   UInt d = amode->d;
+   UInt regno = hregNumber(insn->variant.xassisted.dst);
+
+   buf = s390_emit_STG(buf, regno, 0, b, DISP20(d));
+
+   UInt trcval = 0;
+
+   switch (insn->variant.xassisted.kind) {
+   case Ijk_ClientReq:   trcval = VEX_TRC_JMP_CLIENTREQ;   break;
+   case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
+   case Ijk_Yield:       trcval = VEX_TRC_JMP_YIELD;       break;
+   case Ijk_EmWarn:      trcval = VEX_TRC_JMP_EMWARN;      break;
+   case Ijk_EmFail:      trcval = VEX_TRC_JMP_EMFAIL;      break;
+   case Ijk_MapFail:     trcval = VEX_TRC_JMP_MAPFAIL;     break;
+   case Ijk_NoDecode:    trcval = VEX_TRC_JMP_NODECODE;    break;
+   case Ijk_InvalICache: trcval = VEX_TRC_JMP_INVALICACHE; break;
+   case Ijk_NoRedir:     trcval = VEX_TRC_JMP_NOREDIR;     break;
+   case Ijk_SigTRAP:     trcval = VEX_TRC_JMP_SIGTRAP;     break;
+   case Ijk_SigSEGV:     trcval = VEX_TRC_JMP_SIGSEGV;     break;
+   case Ijk_Boring:      trcval = VEX_TRC_JMP_BORING;      break;
+      /* We don't expect to see the following being assisted. */
+   case Ijk_Ret:
+   case Ijk_Call:
+      /* fallthrough */
+   default: 
+      ppIRJumpKind(insn->variant.xassisted.kind);
+      vpanic("s390_insn_xassisted_emit: unexpected jump kind");
+   }
+
+   vassert(trcval != 0);
+
+   /* guest_state_pointer = trcval */
+   buf = s390_emit_LGHI(buf, S390_REGNO_GUEST_STATE_POINTER, trcval);
+
+   /* load tchain_scratch, #disp_assisted */
+   buf = s390_tchain_load64(buf, S390_REGNO_TCHAIN_SCRATCH,
+                            (Addr)disp_cp_xassisted);
+
+   /* goto *tchain_direct */
+   buf = s390_emit_BCR(buf, S390_CC_ALWAYS, S390_REGNO_TCHAIN_SCRATCH);
+
+   /* Fix up the conditional jump, if there was one. */
+   if (cond != S390_CC_ALWAYS) {
+      Int delta = buf - ptmp;
+
+      delta >>= 1;  /* immediate constant is #half-words */
+      vassert(delta > 0 && delta < (1 << 16));
+      s390_emit_BRC(ptmp, s390_cc_invert(cond), delta);
+   }
+
+   return buf;
+}
+
+
+/* Pseudo code:
+
+   guest_state[host_EvC_COUNTER] -= 1;
+   if (guest_state[host_EvC_COUNTER] >= 0) goto nofail;
+   goto guest_state[host_EvC_FAILADDR];
+   nofail: ;
+
+   The dispatch counter is a 32-bit value. */
+static UChar *
+s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn,
+                       VexEndness endness_host)
+{
+   s390_amode *amode;
+   UInt b, d;
+   UChar *code_begin, *code_end;
+
+   code_begin = buf;
+
+   amode = insn->variant.evcheck.counter;
+   vassert(amode->tag == S390_AMODE_B12);
+   b = hregNumber(amode->b);
+   d = amode->d;
+
+   /* Decrement the dispatch counter in the guest state */
+   if (s390_host_has_gie) {
+      buf = s390_emit_ASI(buf, -1, b, DISP20(d));   /* 6 bytes */
+   } else {
+      buf = s390_emit_LHI(buf, R0, -1);             /* 4 bytes */
+      buf = s390_emit_A(buf, R0, 0, b, d);          /* 4 bytes */
+      buf = s390_emit_ST(buf, R0, 0, b, d);         /* 4 bytes */
+   }
+
+   /* Jump over the next insn if >= 0 */
+   buf = s390_emit_BRC(buf, S390_CC_HE, (4 + 6 + 2) / 2);  /* 4 bytes */
+
+   /* Computed goto to fail_address */
+   amode = insn->variant.evcheck.fail_addr;
+   b = hregNumber(amode->b);
+   d = amode->d;
+   buf = s390_emit_LG(buf, S390_REGNO_TCHAIN_SCRATCH, 0, b, DISP20(d));  /* 6 bytes */
+   buf = s390_emit_BCR(buf, S390_CC_ALWAYS, S390_REGNO_TCHAIN_SCRATCH);  /* 2 bytes */
+
+   code_end = buf;
+   
+   /* Make sure the size of the generated code is identical to the size
+      returned by evCheckSzB_S390 */
+   vassert(evCheckSzB_S390() == code_end - code_begin);
+
+   return buf;
+}
+
+
+static UChar *
+s390_insn_profinc_emit(UChar *buf,
+                       const s390_insn *insn __attribute__((unused)))
+{
+   /* Generate a code template to increment a memory location whose
+      address will be known later as an immediate value. This code
+      template will be patched once the memory location is known.
+      For now we do this with address == 0. */
+   buf = s390_tchain_load64(buf, S390_REGNO_TCHAIN_SCRATCH, 0);
+   if (s390_host_has_gie) {
+      buf = s390_emit_AGSI(buf, 1, S390_REGNO_TCHAIN_SCRATCH, DISP20(0));
+   } else {
+      buf = s390_emit_LGHI(buf, R0, 1);
+      buf = s390_emit_AG( buf, R0, 0, S390_REGNO_TCHAIN_SCRATCH, DISP20(0));
+      buf = s390_emit_STG(buf, R0, 0, S390_REGNO_TCHAIN_SCRATCH, DISP20(0));
+   }
+
+   return buf;
+}
+
+
+Int
+emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, const s390_insn *insn,
+               Bool mode64, VexEndness endness_host,
+               const void *disp_cp_chain_me_to_slowEP,
+               const void *disp_cp_chain_me_to_fastEP,
+               const void *disp_cp_xindir,
+               const void *disp_cp_xassisted)
+{
+   UChar *end;
+
+   /* Used to be 48 bytes. Make sure it stays low */
+   vassert(sizeof(s390_insn) == 32);
+
+   switch (insn->tag) {
+   case S390_INSN_LOAD:
+      end = s390_insn_load_emit(buf, insn);
+      break;
+
+   case S390_INSN_STORE:
+      end = s390_insn_store_emit(buf, insn);
+      break;
+
+   case S390_INSN_MOVE:
+      end = s390_insn_move_emit(buf, insn);
+      break;
+
+   case S390_INSN_MEMCPY:
+      end = s390_insn_memcpy_emit(buf, insn);
+      break;
+
+   case S390_INSN_COND_MOVE:
+      end = s390_insn_cond_move_emit(buf, insn);
+      break;
+
+   case S390_INSN_LOAD_IMMEDIATE:
+      end = s390_insn_load_immediate_emit(buf, insn);
+      break;
+
+   case S390_INSN_ALU:
+      end = s390_insn_alu_emit(buf, insn);
+      break;
+
+   case S390_INSN_SMUL:
+   case S390_INSN_UMUL:
+      end = s390_insn_mul_emit(buf, insn);
+      break;
+
+   case S390_INSN_SDIV:
+   case S390_INSN_UDIV:
+      end = s390_insn_div_emit(buf, insn);
+      break;
+
+   case S390_INSN_DIVS:
+      end = s390_insn_divs_emit(buf, insn);
+      break;
+
+   case S390_INSN_CLZ:
+      end = s390_insn_clz_emit(buf, insn);
+      break;
+
+   case S390_INSN_UNOP:
+      end = s390_insn_unop_emit(buf, insn);
+      break;
+
+   case S390_INSN_TEST:
+      end = s390_insn_test_emit(buf, insn);
+      break;
+
+   case S390_INSN_CC2BOOL:
+      end = s390_insn_cc2bool_emit(buf, insn);
+      break;
+
+   case S390_INSN_CAS:
+      end = s390_insn_cas_emit(buf, insn);
+      break;
+
+   case S390_INSN_CDAS:
+      end = s390_insn_cdas_emit(buf, insn);
+      break;
+
+   case S390_INSN_COMPARE:
+      end = s390_insn_compare_emit(buf, insn);
+      break;
+
+   case S390_INSN_HELPER_CALL:
+      end = s390_insn_helper_call_emit(buf, insn);
+      if (end == buf) goto fail;
+      break;
+
+   case S390_INSN_BFP_TRIOP:
+      end = s390_insn_bfp_triop_emit(buf, insn);
+      break;
+
+   case S390_INSN_BFP_BINOP:
+      end = s390_insn_bfp_binop_emit(buf, insn);
+      break;
+
+   case S390_INSN_BFP_UNOP:
+      end = s390_insn_bfp_unop_emit(buf, insn);
+      break;
+
+   case S390_INSN_BFP_COMPARE:
+      end = s390_insn_bfp_compare_emit(buf, insn);
+      break;
+
+   case S390_INSN_BFP_CONVERT:
+      end = s390_insn_bfp_convert_emit(buf, insn);
+      break;
+
+   case S390_INSN_DFP_BINOP:
+      end = s390_insn_dfp_binop_emit(buf, insn);
+      break;
+
+   case S390_INSN_DFP_UNOP:
+      end = s390_insn_dfp_unop_emit(buf, insn);
+      break;
+
+   case S390_INSN_DFP_INTOP:
+      end = s390_insn_dfp_intop_emit(buf, insn);
+      break;
+
+   case S390_INSN_DFP_COMPARE:
+      end = s390_insn_dfp_compare_emit(buf, insn);
+      break;
+
+   case S390_INSN_DFP_CONVERT:
+      end = s390_insn_dfp_convert_emit(buf, insn);
+      break;
+
+   case S390_INSN_DFP_REROUND:
+      end = s390_insn_dfp_reround_emit(buf, insn);
+      break;
+
+   case S390_INSN_FP_CONVERT:
+      end = s390_insn_fp_convert_emit(buf, insn);
+      break;
+
+   case S390_INSN_MFENCE:
+      end = s390_insn_mfence_emit(buf, insn);
+      break;
+
+   case S390_INSN_MIMM:
+      end = s390_insn_mimm_emit(buf, insn);
+      break;
+
+   case S390_INSN_MADD:
+      end = s390_insn_madd_emit(buf, insn);
+      break;
+
+   case S390_INSN_SET_FPC_BFPRM:
+      end = s390_insn_set_fpc_bfprm_emit(buf, insn);
+      break;
+
+   case S390_INSN_SET_FPC_DFPRM:
+      end = s390_insn_set_fpc_dfprm_emit(buf, insn);
+      break;
+
+   case S390_INSN_PROFINC:
+      end = s390_insn_profinc_emit(buf, insn);
+      /* Tell the caller .. */
+      vassert(*is_profinc == False);
+      *is_profinc = True;
+      break;
+
+   case S390_INSN_EVCHECK:
+      end = s390_insn_evcheck_emit(buf, insn, endness_host);
+      break;
+
+   case S390_INSN_XDIRECT:
+      end = s390_insn_xdirect_emit(buf, insn, disp_cp_chain_me_to_slowEP,
+                                   disp_cp_chain_me_to_fastEP);
+      break;
+
+   case S390_INSN_XINDIR:
+      end = s390_insn_xindir_emit(buf, insn, disp_cp_xindir);
+      break;
+
+   case S390_INSN_XASSISTED:
+      end = s390_insn_xassisted_emit(buf, insn, disp_cp_xassisted);
+      break;
+
+   fail:
+   default:
+      vpanic("emit_S390Instr");
+   }
+
+   vassert(end - buf <= nbuf);
+
+   return end - buf;
+}
+
+
+/* Return the number of bytes emitted for an S390_INSN_EVCHECK.
+   See s390_insn_evcheck_emit */
+Int
+evCheckSzB_S390(void)
+{
+   return s390_host_has_gie ? 18 : 24;
+}
+
+
+/* Patch the counter address into CODE_TO_PATCH as previously
+   generated by s390_insn_profinc_emit. */
+VexInvalRange
+patchProfInc_S390(VexEndness endness_host,
+                  void *code_to_patch, const ULong *location_of_counter)
+{
+   vassert(sizeof(ULong *) == 8);
+
+   s390_tchain_verify_load64(code_to_patch, S390_REGNO_TCHAIN_SCRATCH, 0);
+
+   UChar *p = s390_tchain_patch_load64(code_to_patch,
+                                       (Addr)location_of_counter);
+
+   UInt len = p - (UChar *)code_to_patch;
+   VexInvalRange vir = { (HWord)code_to_patch, len };
+   return vir;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   s390_insn_xdirect_emit code above. */
+VexInvalRange
+chainXDirect_S390(VexEndness endness_host,
+                  void *place_to_chain,
+                  const void *disp_cp_chain_me_EXPECTED,
+                  const void *place_to_jump_to)
+{
+   vassert(endness_host == VexEndnessBE);
+
+   /* What we're expecting to see @ PLACE_TO_CHAIN is:
+
+        load  tchain_scratch, #disp_cp_chain_me_EXPECTED
+        goto *tchain_scratch
+   */
+   const UChar *next;
+   next = s390_tchain_verify_load64(place_to_chain, S390_REGNO_TCHAIN_SCRATCH,
+                                    (Addr)disp_cp_chain_me_EXPECTED);
+   vassert(s390_insn_is_BR(next, S390_REGNO_TCHAIN_SCRATCH));
+
+   /* And what we want to change it to is either:
+        (general case):
+
+          load  tchain_scratch, #place_to_jump_to
+          goto *tchain_scratch
+
+      ---OR---
+
+        in the case where the displacement is small enough
+
+          BRCL delta       where delta is in half-words
+          invalid opcodes
+
+      In both cases the replacement has the same length as the original.
+      To remain sane & verifiable,
+      (1) limit the displacement for the short form to 
+          (say) +/- one billion, so as to avoid wraparound
+          off-by-ones
+      (2) even if the short form is applicable, once every (say)
+          1024 times use the long form anyway, so as to maintain
+          verifiability
+   */
+
+   /* This is the delta we need to put into a BRCL insn. Note, that the
+      offset in BRCL is in half-words. Hence division by 2. */
+   Long delta =
+      (Long)((const UChar *)place_to_jump_to - (const UChar *)place_to_chain) / 2;
+   Bool shortOK = delta >= -1000*1000*1000 && delta < 1000*1000*1000;
+
+   static UInt shortCTR = 0; /* DO NOT MAKE NON-STATIC */
+   if (shortOK) {
+      shortCTR++; // thread safety bleh
+      if (0 == (shortCTR & 0x3FF)) {
+         shortOK = False;
+         if (0)
+            vex_printf("QQQ chainXDirect_S390: shortCTR = %u, "
+                       "using long jmp\n", shortCTR);
+      }
+   }
+
+   /* And make the modifications. */
+   UChar *p = (UChar *)place_to_chain;
+   if (shortOK) {
+      p = s390_emit_BRCL(p, S390_CC_ALWAYS, delta);  /* 6 bytes */
+
+      /* Make sure that BRCL fits into the patchable part of an xdirect
+         code sequence */
+      vassert(6 <= s390_xdirect_patchable_len());
+
+      /* Fill remaining bytes with 0x00 (invalid opcode) */
+      Int i;
+      for (i = 0; i < s390_xdirect_patchable_len() - 6; ++i)
+         p[i] = 0x00;
+   } else {
+      /*
+          load  tchain_scratch, #place_to_jump_to
+          goto *tchain_scratch
+      */
+      Addr64 addr = (Addr)place_to_jump_to;
+      p = s390_tchain_load64(p, S390_REGNO_TCHAIN_SCRATCH, addr);
+      /* There is not need to emit a BCR here, as it is already there. */
+   }
+
+   UInt len = p - (UChar *)place_to_chain;
+   VexInvalRange vir = { (HWord)place_to_chain, len };
+   return vir;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   s390_insn_xdirect_emit code above. */
+VexInvalRange
+unchainXDirect_S390(VexEndness endness_host,
+                    void *place_to_unchain,
+                    const void *place_to_jump_to_EXPECTED,
+                    const void *disp_cp_chain_me)
+{
+   vassert(endness_host == VexEndnessBE);
+
+   /* What we're expecting to see @ PLACE_TO_UNCHAIN:
+
+          load  tchain_scratch, #place_to_jump_to_EXPECTED
+          goto *tchain_scratch
+
+      ---OR---
+        in the case where the displacement falls within 32 bits
+
+          BRCL delta
+          invalid opcodes
+   */
+   UChar *p = place_to_unchain;
+
+   Bool uses_short_form = False;
+
+   if (s390_insn_is_BRCL(p, S390_CC_ALWAYS)) {
+      /* Looks like the short form */
+      Int num_hw = *(Int *)&p[2];
+      Int delta = 2 *num_hw;
+
+      vassert(p + delta == place_to_jump_to_EXPECTED);
+
+      Int i;
+      for (i = 0; i < s390_xdirect_patchable_len() - 6; ++i)
+         vassert(p[6+i] == 0x00);
+      uses_short_form = True;
+   } else {
+      /* Should be the long form */
+      const UChar *next;
+
+      next = s390_tchain_verify_load64(p, S390_REGNO_TCHAIN_SCRATCH,
+                                       (Addr)place_to_jump_to_EXPECTED);
+      /* Check for BR *tchain_scratch */
+      vassert(s390_insn_is_BR(next, S390_REGNO_TCHAIN_SCRATCH));
+   }
+
+   /* And what we want to change it to is:
+
+        load  tchain_scratch, #disp_cp_chain_me
+        goto *tchain_scratch
+   */
+
+   /* Get the address of the beginning of the load64 code sequence into %r1.
+      Do not change the register! This is part of the protocol with the
+      dispatcher.
+      Note: the incoming argument PLACE_TO_CHAIN points to the beginning of the
+      load64 insn sequence. That sequence is prefixed with a BASR to get its
+      address (see s390_insn_xdirect_emit).  */
+   p = s390_emit_BASR(p - S390_BASR_LEN, 1, R0);
+
+   Addr64 addr = (Addr)disp_cp_chain_me;
+   p = s390_tchain_load64(p, S390_REGNO_TCHAIN_SCRATCH, addr);
+
+   /* Emit the BCR in case the short form was used. In case of the long
+      form, the BCR is already there. */
+   if (uses_short_form)
+      s390_emit_BCR(p, S390_CC_ALWAYS, S390_REGNO_TCHAIN_SCRATCH);
+
+   UInt len = p - (UChar *)place_to_unchain;
+   VexInvalRange vir = { (HWord)place_to_unchain, len };
+   return vir;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                    host_s390_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_s390_defs.h b/VEX/priv/host_s390_defs.h
new file mode 100644
index 0000000..9d732f9
--- /dev/null
+++ b/VEX/priv/host_s390_defs.h
@@ -0,0 +1,801 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  host_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#ifndef __VEX_HOST_S390_DEFS_H
+#define __VEX_HOST_S390_DEFS_H
+
+#include "libvex_basictypes.h"            /* Bool */
+#include "libvex.h"                       /* VexArchInfo */
+#include "host_generic_regs.h"            /* HReg */
+#include "s390_defs.h"                    /* s390_cc_t */
+
+/* --------- Registers --------- */
+const HChar *s390_hreg_as_string(HReg);
+HReg s390_hreg_gpr(UInt regno);
+HReg s390_hreg_fpr(UInt regno);
+
+/* Dedicated registers */
+HReg s390_hreg_guest_state_pointer(void);
+
+
+/* Given the index of a function argument, return the number of the
+   general purpose register in which it is being passed. Arguments are
+   counted 0, 1, 2, ... and they are being passed in r2, r3, r4, ... */
+static __inline__ UInt
+s390_gprno_from_arg_index(UInt ix)
+{
+   return ix + 2;
+}
+
+/* --------- Memory address expressions (amodes). --------- */
+
+/* These are the address modes:
+   (1) b12:  base register + 12-bit unsigned offset   (e.g. RS)
+   (2) b20:  base register + 20-bit signed offset     (e.g. RSY)
+   (3) bx12: base register + index register + 12-bit unsigned offset (e.g. RX)
+   (4) bx20: base register + index register + 20-bit signed offset   (e.g. RXY)
+   fixs390: There is also pc-relative stuff.. e.g. LARL
+*/
+
+typedef enum {
+   S390_AMODE_B12,
+   S390_AMODE_B20,
+   S390_AMODE_BX12,
+   S390_AMODE_BX20
+} s390_amode_t;
+
+typedef struct {
+   s390_amode_t tag;
+   HReg b;
+   HReg x;       /* hregNumber(x) == 0  for S390_AMODE_B12/B20 kinds */
+   Int  d;       /* 12 bit unsigned or 20 bit signed */
+} s390_amode;
+
+
+s390_amode *s390_amode_b12(Int d, HReg b);
+s390_amode *s390_amode_b20(Int d, HReg b);
+s390_amode *s390_amode_bx12(Int d, HReg b, HReg x);
+s390_amode *s390_amode_bx20(Int d, HReg b, HReg x);
+s390_amode *s390_amode_for_guest_state(Int d);
+Bool        s390_amode_is_sane(const s390_amode *);
+
+const HChar *s390_amode_as_string(const s390_amode *);
+
+/* ------------- 2nd (right) operand of binary operation ---------------- */
+
+typedef enum {
+   S390_OPND_REG,
+   S390_OPND_IMMEDIATE,
+   S390_OPND_AMODE
+} s390_opnd_t;
+
+
+/* Naming convention for operand locations:
+   R    - GPR
+   I    - immediate value
+   M    - memory (any Amode may be used)
+*/
+
+/* An operand that is either in a GPR or is addressable via a BX20 amode */
+typedef struct {
+   s390_opnd_t tag;
+   union {
+      HReg        reg;
+      s390_amode *am;
+      ULong       imm;
+   } variant;
+} s390_opnd_RMI;
+
+
+/* The kind of instructions */
+typedef enum {
+   S390_INSN_LOAD,   /* load register from memory */
+   S390_INSN_STORE,  /* store register to memory */
+   S390_INSN_MOVE,   /* from register to register */
+   S390_INSN_MEMCPY, /* from memory to memory */
+   S390_INSN_COND_MOVE, /* conditonal "move" to register */
+   S390_INSN_LOAD_IMMEDIATE,
+   S390_INSN_ALU,
+   S390_INSN_SMUL,   /*   signed multiply; n-bit operands; 2n-bit result */
+   S390_INSN_UMUL,   /* unsigned multiply; n-bit operands; 2n-bit result */
+   S390_INSN_SDIV,   /*   signed division; 2n-bit / n-bit -> n-bit quot/rem */
+   S390_INSN_UDIV,   /* unsigned division; 2n-bit / n-bit -> n-bit quot/rem */
+   S390_INSN_DIVS,   /* n-bit dividend; n-bit divisor; n-bit quot/rem */
+   S390_INSN_CLZ,    /* count left-most zeroes */
+   S390_INSN_UNOP,
+   S390_INSN_TEST,   /* test operand and set cc */
+   S390_INSN_CC2BOOL,/* convert condition code to 0/1 */
+   S390_INSN_COMPARE,
+   S390_INSN_HELPER_CALL,
+   S390_INSN_CAS,    /* compare and swap */
+   S390_INSN_CDAS,   /* compare double and swap */
+   S390_INSN_BFP_BINOP, /* Binary floating point */
+   S390_INSN_BFP_UNOP,
+   S390_INSN_BFP_TRIOP,
+   S390_INSN_BFP_COMPARE,
+   S390_INSN_BFP_CONVERT,
+   S390_INSN_DFP_BINOP, /* Decimal floating point */
+   S390_INSN_DFP_UNOP,
+   S390_INSN_DFP_INTOP,
+   S390_INSN_DFP_COMPARE,
+   S390_INSN_DFP_CONVERT,
+   S390_INSN_DFP_REROUND,
+   S390_INSN_FP_CONVERT,
+   S390_INSN_MFENCE,
+   S390_INSN_MIMM,    /* Assign an immediate constant to a memory location */
+   S390_INSN_MADD,    /* Add a value to a memory location */
+   S390_INSN_SET_FPC_BFPRM, /* Set the bfp rounding mode in the FPC */
+   S390_INSN_SET_FPC_DFPRM, /* Set the dfp rounding mode in the FPC */
+   /* The following 5 insns are mandated by translation chaining */
+   S390_INSN_XDIRECT,     /* direct transfer to guest address */
+   S390_INSN_XINDIR,      /* indirect transfer to guest address */
+   S390_INSN_XASSISTED,   /* assisted transfer to guest address */
+   S390_INSN_EVCHECK,     /* Event check */
+   S390_INSN_PROFINC      /* 64-bit profile counter increment */
+} s390_insn_tag;
+
+
+/* The kind of ALU instructions */
+typedef enum {
+   S390_ALU_ADD,
+   S390_ALU_SUB,
+   S390_ALU_MUL,   /* n-bit operands; result is lower n-bit of product */
+   S390_ALU_AND,
+   S390_ALU_OR,
+   S390_ALU_XOR,
+   S390_ALU_LSH,
+   S390_ALU_RSH,
+   S390_ALU_RSHA   /* arithmetic */
+} s390_alu_t;
+
+
+/* The kind of unary integer operations */
+typedef enum {
+   S390_ZERO_EXTEND_8,
+   S390_ZERO_EXTEND_16,
+   S390_ZERO_EXTEND_32,
+   S390_SIGN_EXTEND_8,
+   S390_SIGN_EXTEND_16,
+   S390_SIGN_EXTEND_32,
+   S390_NEGATE
+} s390_unop_t;
+
+/* The kind of ternary BFP operations */
+typedef enum {
+   S390_BFP_MADD,
+   S390_BFP_MSUB,
+} s390_bfp_triop_t;
+
+/* The kind of binary BFP operations */
+typedef enum {
+   S390_BFP_ADD,
+   S390_BFP_SUB,
+   S390_BFP_MUL,
+   S390_BFP_DIV
+} s390_bfp_binop_t;
+
+/* The kind of unary BFP operations */
+typedef enum {
+   S390_BFP_ABS,
+   S390_BFP_NABS,
+   S390_BFP_NEG,
+   S390_BFP_SQRT
+} s390_bfp_unop_t;
+
+/* Type conversion operations: to and/or from binary floating point */
+typedef enum {
+   S390_BFP_I32_TO_F32,
+   S390_BFP_I32_TO_F64,
+   S390_BFP_I32_TO_F128,
+   S390_BFP_I64_TO_F32,
+   S390_BFP_I64_TO_F64,
+   S390_BFP_I64_TO_F128,
+   S390_BFP_U32_TO_F32,
+   S390_BFP_U32_TO_F64,
+   S390_BFP_U32_TO_F128,
+   S390_BFP_U64_TO_F32,
+   S390_BFP_U64_TO_F64,
+   S390_BFP_U64_TO_F128,
+   S390_BFP_F32_TO_I32,
+   S390_BFP_F32_TO_I64,
+   S390_BFP_F32_TO_U32,
+   S390_BFP_F32_TO_U64,
+   S390_BFP_F32_TO_F64,
+   S390_BFP_F32_TO_F128,
+   S390_BFP_F64_TO_I32,
+   S390_BFP_F64_TO_I64,
+   S390_BFP_F64_TO_U32,
+   S390_BFP_F64_TO_U64,
+   S390_BFP_F64_TO_F32,
+   S390_BFP_F64_TO_F128,
+   S390_BFP_F128_TO_I32,
+   S390_BFP_F128_TO_I64,
+   S390_BFP_F128_TO_U32,
+   S390_BFP_F128_TO_U64,
+   S390_BFP_F128_TO_F32,
+   S390_BFP_F128_TO_F64
+} s390_bfp_conv_t;
+
+/* Type conversion operations: to and/or from decimal floating point */
+typedef enum {
+   S390_DFP_D32_TO_D64,
+   S390_DFP_D64_TO_D32,
+   S390_DFP_D64_TO_D128,
+   S390_DFP_D128_TO_D64,
+   S390_DFP_I32_TO_D64,
+   S390_DFP_I32_TO_D128,
+   S390_DFP_I64_TO_D64,
+   S390_DFP_I64_TO_D128,
+   S390_DFP_U32_TO_D64,
+   S390_DFP_U32_TO_D128,
+   S390_DFP_U64_TO_D64,
+   S390_DFP_U64_TO_D128,
+   S390_DFP_D64_TO_I32,
+   S390_DFP_D64_TO_I64,
+   S390_DFP_D64_TO_U32,
+   S390_DFP_D64_TO_U64,
+   S390_DFP_D128_TO_I32,
+   S390_DFP_D128_TO_I64,
+   S390_DFP_D128_TO_U32,
+   S390_DFP_D128_TO_U64
+} s390_dfp_conv_t;
+
+typedef enum {
+   S390_FP_F32_TO_D32,
+   S390_FP_F32_TO_D64,
+   S390_FP_F32_TO_D128,
+   S390_FP_F64_TO_D32,
+   S390_FP_F64_TO_D64,
+   S390_FP_F64_TO_D128,
+   S390_FP_F128_TO_D32,
+   S390_FP_F128_TO_D64,
+   S390_FP_F128_TO_D128,
+   S390_FP_D32_TO_F32,
+   S390_FP_D32_TO_F64,
+   S390_FP_D32_TO_F128,
+   S390_FP_D64_TO_F32,
+   S390_FP_D64_TO_F64,
+   S390_FP_D64_TO_F128,
+   S390_FP_D128_TO_F32,
+   S390_FP_D128_TO_F64,
+   S390_FP_D128_TO_F128
+} s390_fp_conv_t;
+
+/* The kind of binary DFP operations */
+typedef enum {
+   S390_DFP_ADD,
+   S390_DFP_SUB,
+   S390_DFP_MUL,
+   S390_DFP_DIV,
+   S390_DFP_QUANTIZE
+} s390_dfp_binop_t;
+
+/* The kind of unary DFP operations */
+typedef enum {
+   S390_DFP_EXTRACT_EXP_D64,
+   S390_DFP_EXTRACT_EXP_D128,
+   S390_DFP_EXTRACT_SIG_D64,
+   S390_DFP_EXTRACT_SIG_D128,
+} s390_dfp_unop_t;
+
+/* The DFP operations with 2 operands one of them being integer */
+typedef enum {
+   S390_DFP_SHIFT_LEFT,
+   S390_DFP_SHIFT_RIGHT,
+   S390_DFP_INSERT_EXP
+} s390_dfp_intop_t;
+
+/* The kind of DFP compare operations */
+typedef enum {
+   S390_DFP_COMPARE,
+   S390_DFP_COMPARE_EXP,
+} s390_dfp_cmp_t;
+
+/* The details of a CDAS insn. Carved out to keep the size of
+   s390_insn low */
+typedef struct {
+   HReg        op1_high;
+   HReg        op1_low;
+   s390_amode *op2;
+   HReg        op3_high;
+   HReg        op3_low;
+   HReg        old_mem_high;
+   HReg        old_mem_low;
+   HReg        scratch;
+} s390_cdas;
+
+/* The details of a binary DFP insn. Carved out to keep the size of
+   s390_insn low */
+typedef struct {
+   s390_dfp_binop_t tag;
+   s390_dfp_round_t rounding_mode;
+   HReg         dst_hi; /* 128-bit result high part; 64-bit result */
+   HReg         dst_lo; /* 128-bit result low part */
+   HReg         op2_hi; /* 128-bit operand high part; 64-bit opnd 1 */
+   HReg         op2_lo; /* 128-bit operand low part */
+   HReg         op3_hi; /* 128-bit operand high part; 64-bit opnd 2 */
+   HReg         op3_lo; /* 128-bit operand low part */
+} s390_dfp_binop;
+
+typedef struct {
+   s390_fp_conv_t  tag;
+   s390_dfp_round_t rounding_mode;
+   HReg         dst_hi; /* 128-bit result high part; 32/64-bit result */
+   HReg         dst_lo; /* 128-bit result low part */
+   HReg         op_hi;  /* 128-bit operand high part; 32/64-bit opnd */
+   HReg         op_lo;  /* 128-bit operand low part */
+   HReg         r1;     /* clobbered register GPR #1 */
+} s390_fp_convert;
+
+/* Pseudo-insn for representing a helper call.
+   TARGET is the absolute address of the helper function
+   NUM_ARGS says how many arguments are being passed.
+   All arguments have integer type and are being passed according to ABI,
+   i.e. in registers r2, r3, r4, r5, and r6, with argument #0 being
+   passed in r2 and so forth. */
+typedef struct {
+   s390_cc_t    cond     : 16;
+   UInt         num_args : 16;
+   RetLoc       rloc;     /* where the return value will be */
+   Addr64       target;
+   const HChar *name;      /* callee's name (for debugging) */
+} s390_helper_call;
+
+typedef struct {
+   s390_insn_tag tag;
+   /* Usually, this is the size of the result of an operation.
+      Exceptions are:
+      - for comparisons it is the size of the operand
+   */
+   UChar size;
+   union {
+      struct {
+         HReg        dst;
+         s390_amode *src;
+      } load;
+      struct {
+         s390_amode *dst;
+         HReg        src;
+      } store;
+      struct {
+         HReg        dst;
+         HReg        src;
+      } move;
+      struct {
+         s390_amode *dst;
+         s390_amode *src;
+      } memcpy;
+      struct {
+         s390_cc_t     cond;
+         HReg          dst;
+         s390_opnd_RMI src;
+      } cond_move;
+      struct {
+         HReg        dst;
+         ULong       value;  /* not sign extended */
+      } load_immediate;
+      /* add, and, or, xor */
+      struct {
+         s390_alu_t    tag;
+         HReg          dst; /* op1 */
+         s390_opnd_RMI op2;
+      } alu;
+      struct {
+         HReg          dst_hi;  /*           r10 */
+         HReg          dst_lo;  /* also op1  r11 */
+         s390_opnd_RMI op2;
+      } mul;
+      struct {
+         HReg          op1_hi;  /* also remainder   r10 */
+         HReg          op1_lo;  /* also quotient    r11 */
+         s390_opnd_RMI op2;
+      } div;
+      struct {
+         HReg          rem; /* remainder      r10 */
+         HReg          op1; /* also quotient  r11 */
+         s390_opnd_RMI op2;
+      } divs;
+      struct {
+         HReg          num_bits; /* number of leftmost '0' bits  r10 */
+         HReg          clobber;  /* unspecified                  r11 */
+         s390_opnd_RMI src;
+      } clz;
+      struct {
+         s390_unop_t   tag;
+         HReg          dst;
+         s390_opnd_RMI src;
+      } unop;
+      struct {
+         Bool          signed_comparison;
+         HReg          src1;
+         s390_opnd_RMI src2;
+      } compare;
+      struct {
+         s390_opnd_RMI src;
+      } test;
+      /* Convert the condition code to a boolean value. */
+      struct {
+         s390_cc_t cond;
+         HReg      dst;
+      } cc2bool;
+      struct {
+         HReg        op1;
+         s390_amode *op2;
+         HReg        op3;
+         HReg        old_mem;
+      } cas;
+      struct {
+         s390_cdas *details;
+      } cdas;
+      struct {
+         s390_helper_call *details;
+      } helper_call;
+
+      /* Floating point instructions (including conversion to/from floating
+         point
+
+         128-bit floating point requires register pairs. As the registers
+         in a register pair cannot be chosen independently it would suffice
+         to store only one register of the pair in order to represent it.
+         We chose not to do that as being explicit about all registers
+         helps with debugging and does not require special handling in 
+         e.g. s390_insn_get_reg_usage, It'd be all too easy to forget about
+         the "other" register in a pair if it is implicit.
+
+         The convention for all fp s390_insn is that the _hi register will
+         be used to store the result / operand of a 32/64-bit operation.
+         The _hi register holds the  8 bytes of HIgher significance of a
+         128-bit value (hence the suffix). However, it is the lower numbered
+         register of a register pair. POP says that the lower numbered
+         register is used to identify the pair in an insn encoding. So,
+         when an insn is emitted, only the _hi registers need to be looked
+         at. Nothing special is needed for 128-bit BFP which is nice.
+      */
+
+      /* There are currently no ternary 128-bit BFP operations. */
+      struct {
+         s390_bfp_triop_t tag;
+         HReg         dst;
+         HReg         op2;
+         HReg         op3;
+      } bfp_triop;
+      struct {
+         s390_bfp_binop_t tag;
+         HReg         dst_hi; /* 128-bit result high part; 32/64-bit result */
+         HReg         dst_lo; /* 128-bit result low part */
+         HReg         op2_hi; /* 128-bit operand high part; 32/64-bit opnd */
+         HReg         op2_lo; /* 128-bit operand low part */
+      } bfp_binop;
+      struct {
+         s390_bfp_unop_t  tag;
+         HReg         dst_hi; /* 128-bit result high part; 32/64-bit result */
+         HReg         dst_lo; /* 128-bit result low part */
+         HReg         op_hi;  /* 128-bit operand high part; 32/64-bit opnd */
+         HReg         op_lo;  /* 128-bit operand low part */
+      } bfp_unop;
+      struct {
+         s390_bfp_conv_t  tag;
+         s390_bfp_round_t rounding_mode;
+         HReg         dst_hi; /* 128-bit result high part; 32/64-bit result */
+         HReg         dst_lo; /* 128-bit result low part */
+         HReg         op_hi;  /* 128-bit operand high part; 32/64-bit opnd */
+         HReg         op_lo;  /* 128-bit operand low part */
+      } bfp_convert;
+      struct {
+         HReg         dst;     /* condition code in s390 encoding */
+         HReg         op1_hi;  /* 128-bit operand high part; 32/64-bit opnd */
+         HReg         op1_lo;  /* 128-bit operand low part */
+         HReg         op2_hi;  /* 128-bit operand high part; 32/64-bit opnd */
+         HReg         op2_lo;  /* 128-bit operand low part */
+      } bfp_compare;
+      struct {
+         s390_dfp_binop *details;
+      } dfp_binop;
+      struct {
+         s390_dfp_unop_t tag;
+         HReg         dst_hi; /* 128-bit result high part; 64-bit result */
+         HReg         dst_lo; /* 128-bit result low part */
+         HReg         op_hi;  /* 128-bit operand high part; 64-bit opnd */
+         HReg         op_lo;  /* 128-bit operand low part */
+      } dfp_unop;
+      struct {
+         s390_dfp_intop_t tag;
+         HReg         dst_hi; /* 128-bit result high part; 64-bit result */
+         HReg         dst_lo; /* 128-bit result low part */
+         HReg         op2;    /* integer operand */
+         HReg         op3_hi; /* 128-bit operand high part; 64-bit opnd */
+         HReg         op3_lo; /* 128-bit operand low part */
+      } dfp_intop;
+      struct {
+         s390_dfp_conv_t  tag;
+         s390_dfp_round_t rounding_mode;
+         HReg         dst_hi; /* 128-bit result high part; 64-bit result */
+         HReg         dst_lo; /* 128-bit result low part */
+         HReg         op_hi;  /* 128-bit operand high part; 64-bit opnd */
+         HReg         op_lo;  /* 128-bit operand low part */
+      } dfp_convert;
+      struct {
+         s390_fp_convert *details;
+      } fp_convert;
+      struct {
+         s390_dfp_cmp_t tag;
+         HReg         dst;     /* condition code in s390 encoding */
+         HReg         op1_hi;  /* 128-bit operand high part; 64-bit opnd 1 */
+         HReg         op1_lo;  /* 128-bit operand low part */
+         HReg         op2_hi;  /* 128-bit operand high part; 64-bit opnd 2 */
+         HReg         op2_lo;  /* 128-bit operand low part */
+      } dfp_compare;
+      struct {
+         s390_dfp_round_t rounding_mode;
+         HReg         dst_hi; /* 128-bit result high part; 64-bit result */
+         HReg         dst_lo; /* 128-bit result low part */
+         HReg         op2;    /* integer operand */
+         HReg         op3_hi; /* 128-bit operand high part; 64-bit opnd */
+         HReg         op3_lo; /* 128-bit operand low part */
+      } dfp_reround;
+
+      /* Miscellaneous */
+      struct {
+         s390_amode      *dst;
+         ULong            value;  /* sign extended */
+      } mimm;
+      struct {
+         s390_amode      *dst;
+         UChar            delta;
+         ULong            value;  /* for debugging only */
+      } madd;
+      struct {
+         HReg             mode;
+      } set_fpc_bfprm;
+      struct {
+         HReg             mode;
+      } set_fpc_dfprm;
+
+      /* The next 5 entries are generic to support translation chaining */
+
+      /* Update the guest IA value, then exit requesting to chain
+         to it.  May be conditional. */
+      struct {
+         s390_cc_t     cond;
+         Bool          to_fast_entry;  /* chain to the what entry point? */
+         Addr64        dst;            /* next guest address */
+         s390_amode   *guest_IA;
+      } xdirect;
+      /* Boring transfer to a guest address not known at JIT time.
+         Not chainable.  May be conditional. */
+      struct {
+         s390_cc_t     cond;
+         HReg          dst;
+         s390_amode   *guest_IA;
+      } xindir;
+      /* Assisted transfer to a guest address, most general case.
+         Not chainable.  May be conditional. */
+      struct {
+         s390_cc_t     cond;
+         IRJumpKind    kind;
+         HReg          dst;
+         s390_amode   *guest_IA;
+      } xassisted;
+      struct {
+         /* fixs390: I don't think these are really needed
+            as the gsp and the offset are fixed  no ? */
+         s390_amode   *counter;    /* dispatch counter */
+         s390_amode   *fail_addr;
+      } evcheck;
+      struct {
+         /* No fields.  The address of the counter to increment is
+            installed later, post-translation, by patching it in,
+            as it is not known at translation time. */
+      } profinc;
+
+   } variant;
+} s390_insn;
+
+s390_insn *s390_insn_load(UChar size, HReg dst, s390_amode *src);
+s390_insn *s390_insn_store(UChar size, s390_amode *dst, HReg src);
+s390_insn *s390_insn_move(UChar size, HReg dst, HReg src);
+s390_insn *s390_insn_memcpy(UChar size, s390_amode *dst, s390_amode *src);
+s390_insn *s390_insn_cond_move(UChar size, s390_cc_t cond, HReg dst,
+                               s390_opnd_RMI src);
+s390_insn *s390_insn_load_immediate(UChar size, HReg dst, ULong val);
+s390_insn *s390_insn_alu(UChar size, s390_alu_t, HReg dst,
+                         s390_opnd_RMI op2);
+s390_insn *s390_insn_mul(UChar size, HReg dst_hi, HReg dst_lo,
+                         s390_opnd_RMI op2, Bool signed_multiply);
+s390_insn *s390_insn_div(UChar size, HReg op1_hi, HReg op1_lo,
+                         s390_opnd_RMI op2, Bool signed_divide);
+s390_insn *s390_insn_divs(UChar size, HReg rem, HReg op1, s390_opnd_RMI op2);
+s390_insn *s390_insn_clz(UChar size, HReg num_bits, HReg clobber,
+                         s390_opnd_RMI op);
+s390_insn *s390_insn_cas(UChar size, HReg op1, s390_amode *op2, HReg op3,
+                         HReg old);
+s390_insn *s390_insn_cdas(UChar size, HReg op1_high, HReg op1_low,
+                          s390_amode *op2, HReg op3_high, HReg op3_low,
+                          HReg old_high, HReg old_low, HReg scratch);
+s390_insn *s390_insn_unop(UChar size, s390_unop_t tag, HReg dst,
+                          s390_opnd_RMI opnd);
+s390_insn *s390_insn_cc2bool(HReg dst, s390_cc_t src);
+s390_insn *s390_insn_test(UChar size, s390_opnd_RMI src);
+s390_insn *s390_insn_compare(UChar size, HReg dst, s390_opnd_RMI opnd,
+                             Bool signed_comparison);
+s390_insn *s390_insn_helper_call(s390_cc_t cond, Addr64 target, UInt num_args,
+                                 const HChar *name, RetLoc rloc);
+s390_insn *s390_insn_bfp_triop(UChar size, s390_bfp_triop_t, HReg dst,
+                               HReg op2, HReg op3);
+s390_insn *s390_insn_bfp_binop(UChar size, s390_bfp_binop_t, HReg dst,
+                               HReg op2);
+s390_insn *s390_insn_bfp_unop(UChar size, s390_bfp_unop_t tag, HReg dst,
+                              HReg op);
+s390_insn *s390_insn_bfp_compare(UChar size, HReg dst, HReg op1, HReg op2);
+s390_insn *s390_insn_bfp_convert(UChar size, s390_bfp_conv_t tag, HReg dst,
+                                 HReg op, s390_bfp_round_t);
+s390_insn *s390_insn_bfp128_binop(UChar size, s390_bfp_binop_t, HReg dst_hi,
+                                  HReg dst_lo, HReg op2_hi, HReg op2_lo);
+s390_insn *s390_insn_bfp128_unop(UChar size, s390_bfp_unop_t, HReg dst_hi,
+                                 HReg dst_lo, HReg op_hi, HReg op_lo);
+s390_insn *s390_insn_bfp128_compare(UChar size, HReg dst, HReg op1_hi,
+                                    HReg op1_lo, HReg op2_hi, HReg op2_lo);
+s390_insn *s390_insn_bfp128_convert_to(UChar size, s390_bfp_conv_t,
+                                       HReg dst_hi, HReg dst_lo, HReg op);
+s390_insn *s390_insn_bfp128_convert_from(UChar size, s390_bfp_conv_t,
+                                         HReg dst_hi, HReg dst_lo, HReg op_hi,
+                                         HReg op_lo, s390_bfp_round_t);
+s390_insn *s390_insn_dfp_binop(UChar size, s390_dfp_binop_t, HReg dst,
+                               HReg op2, HReg op3,
+                               s390_dfp_round_t rounding_mode);
+s390_insn *s390_insn_dfp_unop(UChar size, s390_dfp_unop_t, HReg dst, HReg op);
+s390_insn *s390_insn_dfp_intop(UChar size, s390_dfp_intop_t, HReg dst,
+                               HReg op2, HReg op3);
+s390_insn *s390_insn_dfp_compare(UChar size, s390_dfp_cmp_t, HReg dst,
+                                 HReg op1, HReg op2);
+s390_insn *s390_insn_dfp_convert(UChar size, s390_dfp_conv_t tag, HReg dst,
+                                 HReg op, s390_dfp_round_t);
+s390_insn *s390_insn_dfp_reround(UChar size, HReg dst, HReg op2, HReg op3,
+                                 s390_dfp_round_t);
+s390_insn *s390_insn_fp_convert(UChar size, s390_fp_conv_t tag,
+                                HReg dst, HReg op, HReg r1, s390_dfp_round_t);
+s390_insn *s390_insn_fp128_convert(UChar size, s390_fp_conv_t tag,
+                                   HReg dst_hi, HReg dst_lo, HReg op_hi,
+                                   HReg op_lo, HReg r1, s390_dfp_round_t);
+s390_insn *s390_insn_dfp128_binop(UChar size, s390_dfp_binop_t, HReg dst_hi,
+                                  HReg dst_lo, HReg op2_hi, HReg op2_lo,
+                                  HReg op3_hi, HReg op3_lo,
+                                  s390_dfp_round_t rounding_mode);
+s390_insn *s390_insn_dfp128_unop(UChar size, s390_dfp_unop_t, HReg dst,
+                                 HReg op_hi, HReg op_lo);
+s390_insn *s390_insn_dfp128_intop(UChar size, s390_dfp_intop_t, HReg dst_hi,
+                                  HReg dst_lo, HReg op2,
+                                  HReg op3_hi, HReg op3_lo);
+s390_insn *s390_insn_dfp128_compare(UChar size, s390_dfp_cmp_t, HReg dst,
+                                    HReg op1_hi, HReg op1_lo, HReg op2_hi,
+                                    HReg op2_lo);
+s390_insn *s390_insn_dfp128_convert_to(UChar size, s390_dfp_conv_t,
+                                       HReg dst_hi, HReg dst_lo, HReg op);
+s390_insn *s390_insn_dfp128_convert_from(UChar size, s390_dfp_conv_t,
+                                         HReg dst_hi, HReg dst_lo, HReg op_hi,
+                                         HReg op_lo, s390_dfp_round_t);
+s390_insn *s390_insn_dfp128_reround(UChar size, HReg dst_hi, HReg dst_lo,
+                                    HReg op2, HReg op3_hi, HReg op3_lo,
+                                    s390_dfp_round_t);
+s390_insn *s390_insn_mfence(void);
+s390_insn *s390_insn_mimm(UChar size, s390_amode *dst, ULong value);
+s390_insn *s390_insn_madd(UChar size, s390_amode *dst, UChar delta,
+                          ULong value);
+s390_insn *s390_insn_set_fpc_bfprm(UChar size, HReg mode);
+s390_insn *s390_insn_set_fpc_dfprm(UChar size, HReg mode);
+
+/* Five for translation chaining */
+s390_insn *s390_insn_xdirect(s390_cc_t cond, Addr64 dst, s390_amode *guest_IA,
+                             Bool to_fast_entry);
+s390_insn *s390_insn_xindir(s390_cc_t cond, HReg dst, s390_amode *guest_IA);
+s390_insn *s390_insn_xassisted(s390_cc_t cond, HReg dst, s390_amode *guest_IA,
+                               IRJumpKind kind);
+s390_insn *s390_insn_evcheck(s390_amode *counter, s390_amode *fail_addr);
+s390_insn *s390_insn_profinc(void);
+
+const HChar *s390_insn_as_string(const s390_insn *);
+
+/*--------------------------------------------------------*/
+/* --- Interface exposed to VEX                       --- */
+/*--------------------------------------------------------*/
+
+void ppS390AMode(const s390_amode *);
+void ppS390Instr(const s390_insn *, Bool mode64);
+void ppHRegS390(HReg);
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+void  getRegUsage_S390Instr( HRegUsage *, const s390_insn *, Bool );
+void  mapRegs_S390Instr    ( HRegRemap *, s390_insn *, Bool );
+Bool  isMove_S390Instr     ( const s390_insn *, HReg *, HReg * );
+Int   emit_S390Instr       ( Bool *, UChar *, Int, const s390_insn *, Bool,
+                             VexEndness, const void *, const void *,
+                             const void *, const void *);
+const RRegUniverse *getRRegUniverse_S390( void );
+void  genSpill_S390        ( HInstr **, HInstr **, HReg , Int , Bool );
+void  genReload_S390       ( HInstr **, HInstr **, HReg , Int , Bool );
+HInstrArray *iselSB_S390   ( const IRSB *, VexArch, const VexArchInfo *,
+                             const VexAbiInfo *, Int, Int, Bool, Bool, Addr);
+
+/* Return the number of bytes of code needed for an event check */
+Int evCheckSzB_S390(void);
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+VexInvalRange chainXDirect_S390(VexEndness endness_host,
+                                void *place_to_chain,
+                                const void *disp_cp_chain_me_EXPECTED,
+                                const void *place_to_jump_to);
+
+VexInvalRange unchainXDirect_S390(VexEndness endness_host,
+                                  void *place_to_unchain,
+                                  const void *place_to_jump_to_EXPECTED,
+                                  const void *disp_cp_chain_me);
+
+/* Patch the counter location into an existing ProfInc point. */
+VexInvalRange patchProfInc_S390(VexEndness endness_host,
+                                void  *code_to_patch,
+                                const ULong *location_of_counter);
+
+/* KLUDGE: See detailled comment in host_s390_defs.c. */
+extern UInt s390_host_hwcaps;
+
+/* Convenience macros to test installed facilities */
+#define s390_host_has_ldisp \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_LDISP))
+#define s390_host_has_eimm \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_EIMM))
+#define s390_host_has_gie \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_GIE))
+#define s390_host_has_dfp \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_DFP))
+#define s390_host_has_fgx \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_FGX))
+#define s390_host_has_etf2 \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_ETF2))
+#define s390_host_has_stfle \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_STFLE))
+#define s390_host_has_etf3 \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_ETF3))
+#define s390_host_has_stckf \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_STCKF))
+#define s390_host_has_fpext \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_FPEXT))
+#define s390_host_has_lsc \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_LSC))
+#define s390_host_has_pfpo \
+                      (s390_host_hwcaps & (VEX_HWCAPS_S390X_PFPO))
+
+#endif /* ndef __VEX_HOST_S390_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                    host_s390_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_s390_isel.c b/VEX/priv/host_s390_isel.c
new file mode 100644
index 0000000..dee892a
--- /dev/null
+++ b/VEX/priv/host_s390_isel.c
@@ -0,0 +1,4179 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                  host_s390_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+   Copyright (C) 2012-2013  Florian Krohm   (britzel@acm.org)
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "libvex_s390x_common.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "guest_s390_defs.h"   /* S390X_GUEST_OFFSET */
+#include "host_generic_regs.h"
+#include "host_s390_defs.h"
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+     might encounter.  This is computed before insn selection starts,
+     and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+     which virtual register(s) are associated with each IRTemp
+      temporary.  This is computed before insn selection starts, and
+      does not change.  We expect this mapping to map precisely the
+      same set of IRTemps as the type mapping does.
+
+         - vregmap   holds the primary register for the IRTemp.
+         - vregmapHI holds the secondary register for the IRTemp,
+              if any is needed.  That's only for Ity_I64 temps
+              in 32 bit mode or Ity_I128 temps in 64-bit mode.
+
+    - The code array, that is, the insns selected so far.
+
+    - A counter, for generating new virtual registers.
+
+    - The host subarchitecture we are selecting insns for.
+      This is set at the start and does not change.
+
+   - A Bool for indicating whether we may generate chain-me
+     instructions for control flow transfers, or whether we must use
+     XAssisted.
+
+   - The maximum guest address of any guest insn in this block.
+     Actually, the address of the highest-addressed byte from any insn
+     in this block.  Is set at the start and does not change.  This is
+     used for detecting jumps which are definitely forward-edges from
+     this block, and therefore can be made (chained) to the fast entry
+     point of the destination, thereby avoiding the destination's
+     event check.
+
+    - Values of certain guest registers which are often assigned constants.
+*/
+
+/* Symbolic names for guest registers whose value we're tracking */
+enum {
+   GUEST_IA,
+   GUEST_CC_OP,
+   GUEST_CC_DEP1,
+   GUEST_CC_DEP2,
+   GUEST_CC_NDEP,
+   GUEST_SYSNO,
+   GUEST_COUNTER,
+   GUEST_UNKNOWN    /* must be the last entry */
+};
+
+/* Number of registers we're tracking. */
+#define NUM_TRACKED_REGS GUEST_UNKNOWN
+
+
+typedef struct {
+   IRTypeEnv   *type_env;
+
+   HInstrArray *code;
+   HReg        *vregmap;
+   HReg        *vregmapHI;
+   UInt         n_vregmap;
+   UInt         vreg_ctr;
+   UInt         hwcaps;
+
+   IRExpr      *previous_bfp_rounding_mode;
+   IRExpr      *previous_dfp_rounding_mode;
+
+   ULong        old_value[NUM_TRACKED_REGS];
+
+   /* The next two are for translation chaining */
+   Addr64       max_ga;
+   Bool         chaining_allowed;
+
+   Bool         old_value_valid[NUM_TRACKED_REGS];
+} ISelEnv;
+
+
+/* Forward declarations */
+static HReg          s390_isel_int_expr(ISelEnv *, IRExpr *);
+static s390_amode   *s390_isel_amode(ISelEnv *, IRExpr *);
+static s390_amode   *s390_isel_amode_b12_b20(ISelEnv *, IRExpr *);
+static s390_cc_t     s390_isel_cc(ISelEnv *, IRExpr *);
+static s390_opnd_RMI s390_isel_int_expr_RMI(ISelEnv *, IRExpr *);
+static void          s390_isel_int128_expr(HReg *, HReg *, ISelEnv *, IRExpr *);
+static HReg          s390_isel_float_expr(ISelEnv *, IRExpr *);
+static void          s390_isel_float128_expr(HReg *, HReg *, ISelEnv *, IRExpr *);
+static HReg          s390_isel_dfp_expr(ISelEnv *, IRExpr *);
+static void          s390_isel_dfp128_expr(HReg *, HReg *, ISelEnv *, IRExpr *);
+
+
+static Int
+get_guest_reg(Int offset)
+{
+   switch (offset) {
+   case S390X_GUEST_OFFSET(guest_IA):        return GUEST_IA;
+   case S390X_GUEST_OFFSET(guest_CC_OP):     return GUEST_CC_OP;
+   case S390X_GUEST_OFFSET(guest_CC_DEP1):   return GUEST_CC_DEP1;
+   case S390X_GUEST_OFFSET(guest_CC_DEP2):   return GUEST_CC_DEP2;
+   case S390X_GUEST_OFFSET(guest_CC_NDEP):   return GUEST_CC_NDEP;
+   case S390X_GUEST_OFFSET(guest_SYSNO):     return GUEST_SYSNO;
+   case S390X_GUEST_OFFSET(guest_counter):   return GUEST_COUNTER;
+
+      /* Also make sure there is never a partial write to one of
+         these registers. That would complicate matters. */
+   case S390X_GUEST_OFFSET(guest_IA)+1      ... S390X_GUEST_OFFSET(guest_IA)+7:
+   case S390X_GUEST_OFFSET(guest_CC_OP)+1   ... S390X_GUEST_OFFSET(guest_CC_OP)+7:
+   case S390X_GUEST_OFFSET(guest_CC_DEP1)+1 ... S390X_GUEST_OFFSET(guest_CC_DEP1)+7:
+   case S390X_GUEST_OFFSET(guest_CC_DEP2)+1 ... S390X_GUEST_OFFSET(guest_CC_DEP2)+7:
+   case S390X_GUEST_OFFSET(guest_CC_NDEP)+1 ... S390X_GUEST_OFFSET(guest_CC_NDEP)+7:
+   case S390X_GUEST_OFFSET(guest_SYSNO)+1   ... S390X_GUEST_OFFSET(guest_SYSNO)+7:
+      /* counter is used both as 4-byte and as 8-byte entity */
+   case S390X_GUEST_OFFSET(guest_counter)+1 ... S390X_GUEST_OFFSET(guest_counter)+3:
+   case S390X_GUEST_OFFSET(guest_counter)+5 ... S390X_GUEST_OFFSET(guest_counter)+7:
+      vpanic("partial update of this guest state register is not allowed");
+      break;
+
+   default: break;
+   }
+
+   return GUEST_UNKNOWN;
+}
+
+/* Add an instruction */
+static void
+addInstr(ISelEnv *env, s390_insn *insn)
+{
+   addHInstr(env->code, insn);
+
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("%s\n", s390_insn_as_string(insn));
+   }
+}
+
+
+static __inline__ IRExpr *
+mkU64(ULong value)
+{
+   return IRExpr_Const(IRConst_U64(value));
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Registers                                         ---*/
+/*---------------------------------------------------------*/
+
+/* Return the virtual register to which a given IRTemp is mapped. */
+static HReg
+lookupIRTemp(ISelEnv *env, IRTemp tmp)
+{
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmap[tmp]));
+
+   return env->vregmap[tmp];
+}
+
+
+/* Return the two virtual registers to which the IRTemp is mapped. */
+static void
+lookupIRTemp128(HReg *hi, HReg *lo, ISelEnv *env, IRTemp tmp)
+{
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapHI[tmp]));
+
+   *lo = env->vregmap[tmp];
+   *hi = env->vregmapHI[tmp];
+}
+
+
+/* Allocate a new virtual integer register */
+static __inline__ HReg
+mkVRegI(UInt ix)
+{
+   return mkHReg(/*virtual*/True, HRcInt64, /*encoding*/0, ix);
+}
+
+static __inline__ HReg
+newVRegI(ISelEnv *env)
+{
+   return mkVRegI(env->vreg_ctr++);
+}
+
+
+/* Allocate a new virtual floating point register */
+static __inline__ HReg
+mkVRegF(UInt ix)
+{
+   return mkHReg(/*virtual*/True, HRcFlt64, /*encoding*/0, ix);
+}
+
+static __inline__ HReg
+newVRegF(ISelEnv *env)
+{
+   return mkVRegF(env->vreg_ctr++);
+}
+
+
+/* Construct a non-virtual general purpose register */
+static __inline__ HReg
+make_gpr(UInt regno)
+{
+   return s390_hreg_gpr(regno);
+}
+
+
+/* Construct a non-virtual floating point register */
+static __inline__ HReg
+make_fpr(UInt regno)
+{
+   return s390_hreg_fpr(regno);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Amode                                             ---*/
+/*---------------------------------------------------------*/
+
+static __inline__ Bool
+ulong_fits_unsigned_12bit(ULong val)
+{
+   return (val & 0xFFFu) == val;
+}
+
+
+static __inline__ Bool
+ulong_fits_signed_20bit(ULong val)
+{
+   ULong v = val & 0xFFFFFu;
+
+   v = (Long)(v << 44) >> 44;  /* sign extend */
+
+   return val == v;
+}
+
+
+static __inline__ Bool
+ulong_fits_signed_8bit(ULong val)
+{
+   ULong v = val & 0xFFu;
+
+   v = (Long)(v << 56) >> 56;  /* sign extend */
+
+   return val == v;
+}
+
+/* EXPR is an expression that is used as an address. Return an s390_amode
+   for it. If select_b12_b20_only is true the returned amode must be either
+   S390_AMODE_B12 or S390_AMODE_B20. */
+static s390_amode *
+s390_isel_amode_wrk(ISelEnv *env, IRExpr *expr,
+                    Bool select_b12_b20_only __attribute__((unused)))
+{
+   if (expr->tag == Iex_Binop && expr->Iex.Binop.op == Iop_Add64) {
+      IRExpr *arg1 = expr->Iex.Binop.arg1;
+      IRExpr *arg2 = expr->Iex.Binop.arg2;
+
+      /* Move constant into right subtree */
+      if (arg1->tag == Iex_Const) {
+         IRExpr *tmp;
+         tmp  = arg1;
+         arg1 = arg2;
+         arg2 = tmp;
+      }
+
+      /* r + constant: Check for b12 first, then b20 */
+      if (arg2->tag == Iex_Const && arg2->Iex.Const.con->tag == Ico_U64) {
+         ULong value = arg2->Iex.Const.con->Ico.U64;
+
+         if (ulong_fits_unsigned_12bit(value)) {
+            return s390_amode_b12((Int)value, s390_isel_int_expr(env, arg1));
+         }
+         if (ulong_fits_signed_20bit(value)) {
+            return s390_amode_b20((Int)value, s390_isel_int_expr(env, arg1));
+         }
+      }
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   return s390_amode_b12(0, s390_isel_int_expr(env, expr));
+}
+
+
+static s390_amode *
+s390_isel_amode(ISelEnv *env, IRExpr *expr)
+{
+   s390_amode *am;
+
+   /* Address computation should yield a 64-bit value */
+   vassert(typeOfIRExpr(env->type_env, expr) == Ity_I64);
+
+   am = s390_isel_amode_wrk(env, expr, /* B12, B20 only */ False);
+
+   /* Check post-condition */
+   vassert(s390_amode_is_sane(am));
+
+   return am;
+}
+
+
+/* Sometimes we must compile an expression into an amode that is either
+   S390_AMODE_B12 or S390_AMODE_B20. An example is the compare-and-swap
+   opcode. These opcodes do not have a variant hat accepts an addressing
+   mode with an index register.
+   Now, in theory we could, when emitting the compare-and-swap insn,
+   hack a, say, BX12 amode into a B12 amode like so:
+
+      r0 = b       # save away base register
+      b  = b + x   # add index register to base register
+      cas(b,d,...) # emit compare-and-swap using b12 amode
+      b  = r0      # restore base register
+
+   Unfortunately, emitting the compare-and-swap insn already utilises r0
+   under the covers, so the trick above is off limits, sadly. */
+static s390_amode *
+s390_isel_amode_b12_b20(ISelEnv *env, IRExpr *expr)
+{
+   s390_amode *am;
+
+   /* Address computation should yield a 64-bit value */
+   vassert(typeOfIRExpr(env->type_env, expr) == Ity_I64);
+
+   am = s390_isel_amode_wrk(env, expr, /* B12, B20 only */ True);
+
+   /* Check post-condition */
+   vassert(s390_amode_is_sane(am) &&
+           (am->tag == S390_AMODE_B12 || am->tag == S390_AMODE_B20));
+
+   return am;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Helper functions                                  ---*/
+/*---------------------------------------------------------*/
+
+/* Constants and memory accesses should be right operands */
+#define order_commutative_operands(left, right)                   \
+        do {                                                      \
+          if (left->tag == Iex_Const || left->tag == Iex_Load ||  \
+              left->tag == Iex_Get) {                             \
+            IRExpr *tmp;                                          \
+            tmp   = left;                                         \
+            left  = right;                                        \
+            right = tmp;                                          \
+          }                                                       \
+        } while (0)
+
+
+/* Copy an RMI operand to the DST register */
+static s390_insn *
+s390_opnd_copy(UChar size, HReg dst, s390_opnd_RMI opnd)
+{
+   switch (opnd.tag) {
+   case S390_OPND_AMODE:
+      return s390_insn_load(size, dst, opnd.variant.am);
+
+   case S390_OPND_REG:
+      return s390_insn_move(size, dst, opnd.variant.reg);
+
+   case S390_OPND_IMMEDIATE:
+      return s390_insn_load_immediate(size, dst, opnd.variant.imm);
+
+   default:
+      vpanic("s390_opnd_copy");
+   }
+}
+
+
+/* Construct a RMI operand for a register */
+static __inline__ s390_opnd_RMI
+s390_opnd_reg(HReg reg)
+{
+   s390_opnd_RMI opnd;
+
+   opnd.tag  = S390_OPND_REG;
+   opnd.variant.reg = reg;
+
+   return opnd;
+}
+
+
+/* Construct a RMI operand for an immediate constant */
+static __inline__ s390_opnd_RMI
+s390_opnd_imm(ULong value)
+{
+   s390_opnd_RMI opnd;
+
+   opnd.tag  = S390_OPND_IMMEDIATE;
+   opnd.variant.imm = value;
+
+   return opnd;
+}
+
+
+/* Return 1, if EXPR represents the constant 0 */
+static Bool
+s390_expr_is_const_zero(IRExpr *expr)
+{
+   ULong value;
+
+   if (expr->tag == Iex_Const) {
+      switch (expr->Iex.Const.con->tag) {
+      case Ico_U1:  value = expr->Iex.Const.con->Ico.U1;  break;
+      case Ico_U8:  value = expr->Iex.Const.con->Ico.U8;  break;
+      case Ico_U16: value = expr->Iex.Const.con->Ico.U16; break;
+      case Ico_U32: value = expr->Iex.Const.con->Ico.U32; break;
+      case Ico_U64: value = expr->Iex.Const.con->Ico.U64; break;
+      default:
+         vpanic("s390_expr_is_const_zero");
+      }
+      return value == 0;
+   }
+
+   return 0;
+}
+
+
+/* Return the value of CON as a sign-exteded ULong value */
+static ULong
+get_const_value_as_ulong(const IRConst *con)
+{
+   ULong value;
+
+   switch (con->tag) {
+   case Ico_U1:  value = con->Ico.U1;  return ((Long)(value << 63) >> 63);
+   case Ico_U8:  value = con->Ico.U8;  return ((Long)(value << 56) >> 56);
+   case Ico_U16: value = con->Ico.U16; return ((Long)(value << 48) >> 48);
+   case Ico_U32: value = con->Ico.U32; return ((Long)(value << 32) >> 32);
+   case Ico_U64: return con->Ico.U64;
+   default:
+      vpanic("get_const_value_as_ulong");
+   }
+}
+
+
+/* Call a helper (clean or dirty)
+   Arguments must satisfy the following conditions:
+
+   (a) they are expressions yielding an integer result
+   (b) there can be no more than S390_NUM_GPRPARMS arguments
+
+   guard is a Ity_Bit expression indicating whether or not the
+   call happens.  If guard == NULL, the call is unconditional.
+
+   Calling the helper function proceeds as follows:
+
+   (1) The helper arguments are evaluated and their value stored in
+       virtual registers.
+   (2) The condition code is evaluated
+   (3) The argument values are copied from the virtual registers to the
+       registers mandated by the ABI.
+   (4) Call the helper function.
+
+   This is not the most efficient way as step 3 generates register-to-register
+   moves. But it is the least fragile way as the only hidden dependency here
+   is that register-to-register moves (step 3) must not clobber the condition
+   code. Other schemes (e.g. VEX r2326) that attempt to avoid the register-
+   to-register add more such dependencies. Not good. Besides, it's the job
+   of the register allocator to throw out those reg-to-reg moves.
+*/
+static void
+doHelperCall(/*OUT*/UInt *stackAdjustAfterCall,
+             /*OUT*/RetLoc *retloc,
+             ISelEnv *env, IRExpr *guard,
+             IRCallee *callee, IRType retTy, IRExpr **args)
+{
+   UInt n_args, i, argreg, size;
+   Addr64 target;
+   HReg tmpregs[S390_NUM_GPRPARMS];
+   s390_cc_t cc;
+
+   /* Set default returns.  We'll update them later if needed. */
+   *stackAdjustAfterCall = 0;
+   *retloc               = mk_RetLoc_INVALID();
+
+   /* The return type can be I{64,32,16,8} or V{128,256}.  In the
+      latter two cases, it is expected that |args| will contain the
+      special node IRExpr_VECRET(). For s390, however, V128 and V256 return
+      values do not occur as we generally do not support vector types.
+
+      |args| may also contain IRExpr_BBPTR(), in which case the value
+      in the guest state pointer register is passed as the
+      corresponding argument.
+
+      These are used for cross-checking that IR-level constraints on
+      the use of IRExpr_VECRET() and IRExpr_BBPTR() are observed. */
+   UInt nVECRETs = 0;
+   UInt nBBPTRs  = 0;
+
+   n_args = 0;
+   for (i = 0; args[i]; i++)
+      ++n_args;
+
+   if (n_args > S390_NUM_GPRPARMS) {
+      vpanic("doHelperCall: too many arguments");
+   }
+
+   /* All arguments must have Ity_I64. For two reasons:
+      (1) We do not handle floating point arguments.
+      (2) The ABI requires that integer values are sign- or zero-extended
+           to 64 bit.
+   */
+   Int arg_errors = 0;
+   for (i = 0; i < n_args; ++i) {
+      if (UNLIKELY(args[i]->tag == Iex_VECRET)) {
+         nVECRETs++;
+      } else if (UNLIKELY(args[i]->tag == Iex_BBPTR)) {
+         nBBPTRs++;
+      } else {
+         IRType type = typeOfIRExpr(env->type_env, args[i]);
+         if (type != Ity_I64) {
+            ++arg_errors;
+            vex_printf("calling %s: argument #%d has type ", callee->name, i);
+            ppIRType(type);
+            vex_printf("; Ity_I64 is required\n");
+         }
+      }
+   }
+
+   if (arg_errors)
+      vpanic("cannot continue due to errors in argument passing");
+
+   /* If these fail, the IR is ill-formed */
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+   vassert(nVECRETs == 0);
+
+   argreg = 0;
+
+   /* Compute the function arguments into a temporary register each */
+   for (i = 0; i < n_args; i++) {
+      IRExpr *arg = args[i];
+      if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+         /* If we need the guest state pointer put it in a temporary arg reg */
+         tmpregs[argreg] = newVRegI(env);
+         addInstr(env, s390_insn_move(sizeof(ULong), tmpregs[argreg],
+                                      s390_hreg_guest_state_pointer()));
+      } else {
+         tmpregs[argreg] = s390_isel_int_expr(env, args[i]);
+      }
+      argreg++;
+   }
+
+   /* Compute the condition */
+   cc = S390_CC_ALWAYS;
+   if (guard) {
+      if (guard->tag == Iex_Const
+          && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional -- do nothing */
+      } else {
+         cc = s390_isel_cc(env, guard);
+      }
+   }
+
+   /* Move the args to the final register. It is paramount, that the
+      code to move the registers does not clobber the condition code ! */
+   for (i = 0; i < argreg; i++) {
+      HReg finalreg;
+
+      finalreg = make_gpr(s390_gprno_from_arg_index(i));
+      size = sizeofIRType(Ity_I64);
+      addInstr(env, s390_insn_move(size, finalreg, tmpregs[i]));
+   }
+
+   target = (Addr)callee->addr;
+
+   /* Do final checks, set the return values, and generate the call
+      instruction proper. */
+   vassert(*stackAdjustAfterCall == 0);
+   vassert(is_RetLoc_INVALID(*retloc));
+   switch (retTy) {
+   case Ity_INVALID:
+      /* Function doesn't return a value. */
+      *retloc = mk_RetLoc_simple(RLPri_None);
+      break;
+   case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+      *retloc = mk_RetLoc_simple(RLPri_Int);
+      break;
+   default:
+      /* IR can denote other possible return types, but we don't
+         handle those here. */
+      vex_printf("calling %s: return type is ", callee->name);
+      ppIRType(retTy);
+      vex_printf("; an integer type is required\n");
+      vassert(0);
+   }
+
+   /* Finally, the call itself. */
+   addInstr(env, s390_insn_helper_call(cc, target, n_args,
+                                       callee->name, *retloc));
+}
+
+
+/*---------------------------------------------------------*/
+/*--- BFP helper functions                              ---*/
+/*---------------------------------------------------------*/
+
+/* Set the BFP rounding mode in the FPC. This function is called for
+   all non-conversion BFP instructions as those will always get the
+   rounding mode from the FPC. */
+static void 
+set_bfp_rounding_mode_in_fpc(ISelEnv *env, IRExpr *irrm)
+{
+   vassert(typeOfIRExpr(env->type_env, irrm) == Ity_I32);
+
+   /* Do we need to do anything? */
+   if (env->previous_bfp_rounding_mode &&
+       env->previous_bfp_rounding_mode->tag == Iex_RdTmp &&
+       irrm->tag == Iex_RdTmp &&
+       env->previous_bfp_rounding_mode->Iex.RdTmp.tmp == irrm->Iex.RdTmp.tmp) {
+      /* No - new mode is identical to previous mode.  */
+      return;
+   }
+
+   /* No luck - we better set it, and remember what we set it to. */
+   env->previous_bfp_rounding_mode = irrm;
+
+   /* The incoming rounding mode is in VEX IR encoding. Need to change
+      to s390.
+
+      rounding mode | s390 | IR
+      -------------------------
+      to nearest    |  00  | 00
+      to zero       |  01  | 11
+      to +infinity  |  10  | 10
+      to -infinity  |  11  | 01
+
+      So: s390 = (4 - IR) & 3
+   */
+   HReg ir = s390_isel_int_expr(env, irrm);
+
+   HReg mode = newVRegI(env);
+
+   addInstr(env, s390_insn_load_immediate(4, mode, 4));
+   addInstr(env, s390_insn_alu(4, S390_ALU_SUB, mode, s390_opnd_reg(ir)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_AND, mode, s390_opnd_imm(3)));
+
+   addInstr(env, s390_insn_set_fpc_bfprm(4, mode));
+}
+
+
+/* This function is invoked for insns that support a specification of
+   a rounding mode in the insn itself. In that case there is no need to
+   stick the rounding mode into the FPC -- a good thing. However, the
+   rounding mode must be known. */
+static s390_bfp_round_t
+get_bfp_rounding_mode(ISelEnv *env, IRExpr *irrm)
+{
+   if (irrm->tag == Iex_Const) {          /* rounding mode is known */
+      vassert(irrm->Iex.Const.con->tag == Ico_U32);
+      IRRoundingMode mode = irrm->Iex.Const.con->Ico.U32;
+
+      switch (mode) {
+      case Irrm_NEAREST:  return S390_BFP_ROUND_NEAREST_EVEN;
+      case Irrm_ZERO:     return S390_BFP_ROUND_ZERO;
+      case Irrm_PosINF:   return S390_BFP_ROUND_POSINF;
+      case Irrm_NegINF:   return S390_BFP_ROUND_NEGINF;
+      default:
+         vpanic("get_bfp_rounding_mode");
+      }
+   }
+
+   set_bfp_rounding_mode_in_fpc(env, irrm);
+   return S390_BFP_ROUND_PER_FPC;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- DFP helper functions                              ---*/
+/*---------------------------------------------------------*/
+
+/* Set the DFP rounding mode in the FPC. This function is called for
+   all non-conversion DFP instructions as those will always get the
+   rounding mode from the FPC. */
+static void
+set_dfp_rounding_mode_in_fpc(ISelEnv *env, IRExpr *irrm)
+{
+   vassert(typeOfIRExpr(env->type_env, irrm) == Ity_I32);
+
+   /* Do we need to do anything? */
+   if (env->previous_dfp_rounding_mode &&
+       env->previous_dfp_rounding_mode->tag == Iex_RdTmp &&
+       irrm->tag == Iex_RdTmp &&
+       env->previous_dfp_rounding_mode->Iex.RdTmp.tmp == irrm->Iex.RdTmp.tmp) {
+      /* No - new mode is identical to previous mode.  */
+      return;
+   }
+
+   /* No luck - we better set it, and remember what we set it to. */
+   env->previous_dfp_rounding_mode = irrm;
+
+   /* The incoming rounding mode is in VEX IR encoding. Need to change
+      to s390.
+
+      rounding mode                     | S390 |  IR
+      -----------------------------------------------
+      to nearest, ties to even          | 000  | 000
+      to zero                           | 001  | 011
+      to +infinity                      | 010  | 010
+      to -infinity                      | 011  | 001
+      to nearest, ties away from 0      | 100  | 100
+      to nearest, ties toward 0         | 101  | 111
+      to away from 0                    | 110  | 110
+      to prepare for shorter precision  | 111  | 101
+
+      So: s390 = (IR ^ ((IR << 1) & 2))
+   */
+   HReg ir = s390_isel_int_expr(env, irrm);
+
+   HReg mode = newVRegI(env);
+
+   addInstr(env, s390_insn_move(4, mode, ir));
+   addInstr(env, s390_insn_alu(4, S390_ALU_LSH, mode, s390_opnd_imm(1)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_AND, mode, s390_opnd_imm(2)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_XOR, mode, s390_opnd_reg(ir)));
+
+   addInstr(env, s390_insn_set_fpc_dfprm(4, mode));
+}
+
+
+/* This function is invoked for insns that support a specification of
+   a rounding mode in the insn itself. In that case there is no need to
+   stick the rounding mode into the FPC -- a good thing. However, the
+   rounding mode must be known.
+
+   When mapping an Irrm_XYZ value to an S390_DFP_ROUND_ value there is
+   often a choice. For instance, Irrm_ZERO could be mapped to either
+   S390_DFP_ROUND_ZERO_5 or S390_DFP_ROUND_ZERO_9. The difference between
+   those two is that with S390_DFP_ROUND_ZERO_9 the recognition of the
+   quantum exception is suppressed whereas with S390_DFP_ROUND_ZERO_5 it
+   is not.  As the quantum exception is not modelled we can choose either
+   value. The choice is to use S390_DFP_ROUND_.. values in the range [8:15],
+   because values in the range [1:7] have unpredictable rounding behaviour
+   when the floating point exception facility is not installed.
+
+   Translation table of
+   s390 DFP rounding mode to IRRoundingMode to s390 DFP rounding mode
+
+   s390(S390_DFP_ROUND_)  |  IR(Irrm_)           |  s390(S390_DFP_ROUND_)
+   --------------------------------------------------------------------
+   NEAREST_TIE_AWAY_0_1   |  NEAREST_TIE_AWAY_0  |  NEAREST_TIE_AWAY_0_12
+   NEAREST_TIE_AWAY_0_12  |     "                |     "
+   PREPARE_SHORT_3        |  PREPARE_SHORTER     |  PREPARE_SHORT_15
+   PREPARE_SHORT_15       |     "                |     "
+   NEAREST_EVEN_4         |  NEAREST             |  NEAREST_EVEN_8
+   NEAREST_EVEN_8         |     "                |     "
+   ZERO_5                 |  ZERO                |  ZERO_9
+   ZERO_9                 |     "                |     "
+   POSINF_6               |  PosINF              |  POSINF_10
+   POSINF_10              |     "                |     "
+   NEGINF_7               |  NegINF              |  NEGINF_11
+   NEGINF_11              |     "                |     "
+   NEAREST_TIE_TOWARD_0   |  NEAREST_TIE_TOWARD_0|  NEAREST_TIE_TOWARD_0
+   AWAY_0                 |  AWAY_FROM_ZERO      |  AWAY_0
+*/
+static s390_dfp_round_t
+get_dfp_rounding_mode(ISelEnv *env, IRExpr *irrm)
+{
+   if (irrm->tag == Iex_Const) {          /* rounding mode is known */
+      vassert(irrm->Iex.Const.con->tag == Ico_U32);
+      IRRoundingMode mode = irrm->Iex.Const.con->Ico.U32;
+
+      switch (mode) {
+      case Irrm_NEAREST:
+         return S390_DFP_ROUND_NEAREST_EVEN_8;
+      case Irrm_NegINF:
+         return S390_DFP_ROUND_NEGINF_11;
+      case Irrm_PosINF:
+         return S390_DFP_ROUND_POSINF_10;
+      case Irrm_ZERO:
+         return S390_DFP_ROUND_ZERO_9;
+      case Irrm_NEAREST_TIE_AWAY_0:
+         return S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12;
+      case Irrm_PREPARE_SHORTER:
+          return S390_DFP_ROUND_PREPARE_SHORT_15;
+      case Irrm_AWAY_FROM_ZERO:
+         return S390_DFP_ROUND_AWAY_0;
+      case Irrm_NEAREST_TIE_TOWARD_0:
+         return S390_DFP_ROUND_NEAREST_TIE_TOWARD_0;
+      default:
+         vpanic("get_dfp_rounding_mode");
+      }
+   }
+
+   set_dfp_rounding_mode_in_fpc(env, irrm);
+   return S390_DFP_ROUND_PER_FPC_0;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Condition code helper functions                   ---*/
+/*---------------------------------------------------------*/
+
+/* CC_S390 holds the condition code in s390 encoding. Convert it to
+   VEX encoding (IRCmpFResult)
+
+   s390     VEX              b6 b2 b0   cc.1  cc.0
+   0      0x40 EQ             1  0  0     0     0
+   1      0x01 LT             0  0  1     0     1
+   2      0x00 GT             0  0  0     1     0
+   3      0x45 Unordered      1  1  1     1     1
+
+   b0 = cc.0
+   b2 = cc.0 & cc.1
+   b6 = ~(cc.0 ^ cc.1)   // ((cc.0 - cc.1) + 0x1 ) & 0x1
+
+   VEX = b0 | (b2 << 2) | (b6 << 6);
+*/
+static HReg
+convert_s390_to_vex_bfpcc(ISelEnv *env, HReg cc_s390)
+{
+   HReg cc0, cc1, b2, b6, cc_vex;
+
+   cc0 = newVRegI(env);
+   addInstr(env, s390_insn_move(4, cc0, cc_s390));
+   addInstr(env, s390_insn_alu(4, S390_ALU_AND, cc0, s390_opnd_imm(1)));
+
+   cc1 = newVRegI(env);
+   addInstr(env, s390_insn_move(4, cc1, cc_s390));
+   addInstr(env, s390_insn_alu(4, S390_ALU_RSH, cc1, s390_opnd_imm(1)));
+
+   b2 = newVRegI(env);
+   addInstr(env, s390_insn_move(4, b2, cc0));
+   addInstr(env, s390_insn_alu(4, S390_ALU_AND, b2, s390_opnd_reg(cc1)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_LSH, b2, s390_opnd_imm(2)));
+
+   b6 = newVRegI(env);
+   addInstr(env, s390_insn_move(4, b6, cc0));
+   addInstr(env, s390_insn_alu(4, S390_ALU_SUB, b6, s390_opnd_reg(cc1)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_ADD, b6, s390_opnd_imm(1)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_AND, b6, s390_opnd_imm(1)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_LSH, b6, s390_opnd_imm(6)));
+
+   cc_vex = newVRegI(env);
+   addInstr(env, s390_insn_move(4, cc_vex, cc0));
+   addInstr(env, s390_insn_alu(4, S390_ALU_OR, cc_vex, s390_opnd_reg(b2)));
+   addInstr(env, s390_insn_alu(4, S390_ALU_OR, cc_vex, s390_opnd_reg(b6)));
+
+   return cc_vex;
+}
+
+/* CC_S390 holds the condition code in s390 encoding. Convert it to
+   VEX encoding (IRCmpDResult) */
+static HReg
+convert_s390_to_vex_dfpcc(ISelEnv *env, HReg cc_s390)
+{
+   /* The encodings for IRCmpFResult and IRCmpDResult are the same/ */
+   return convert_s390_to_vex_bfpcc(env, cc_s390);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (128 bit)               ---*/
+/*---------------------------------------------------------*/
+static void
+s390_isel_int128_expr_wrk(HReg *dst_hi, HReg *dst_lo, ISelEnv *env,
+                          IRExpr *expr)
+{
+   IRType ty = typeOfIRExpr(env->type_env, expr);
+
+   vassert(ty == Ity_I128);
+
+   /* No need to consider the following
+      - 128-bit constants (they do not exist in VEX)
+      - 128-bit loads from memory (will not be generated)
+   */
+
+   /* Read 128-bit IRTemp */
+   if (expr->tag == Iex_RdTmp) {
+      lookupIRTemp128(dst_hi, dst_lo, env, expr->Iex.RdTmp.tmp);
+      return;
+   }
+
+   if (expr->tag == Iex_Binop) {
+      IRExpr *arg1 = expr->Iex.Binop.arg1;
+      IRExpr *arg2 = expr->Iex.Binop.arg2;
+      Bool is_signed_multiply, is_signed_divide;
+
+      switch (expr->Iex.Binop.op) {
+      case Iop_MullU64:
+         is_signed_multiply = False;
+         goto do_multiply64;
+
+      case Iop_MullS64:
+         is_signed_multiply = True;
+         goto do_multiply64;
+
+      case Iop_DivModU128to64:
+         is_signed_divide = False;
+         goto do_divide64;
+
+      case Iop_DivModS128to64:
+         is_signed_divide = True;
+         goto do_divide64;
+
+      case Iop_64HLto128:
+         *dst_hi = s390_isel_int_expr(env, arg1);
+         *dst_lo = s390_isel_int_expr(env, arg2);
+         return;
+
+      case Iop_DivModS64to64: {
+         HReg r10, r11, h1;
+         s390_opnd_RMI op2;
+
+         h1  = s390_isel_int_expr(env, arg1);       /* Process 1st operand */
+         op2 = s390_isel_int_expr_RMI(env, arg2);   /* Process 2nd operand */
+
+         /* We use non-virtual registers r10 and r11 as pair */
+         r10  = make_gpr(10);
+         r11  = make_gpr(11);
+
+         /* Move 1st operand into r11 and */
+         addInstr(env, s390_insn_move(8, r11, h1));
+
+         /* Divide */
+         addInstr(env, s390_insn_divs(8, r10, r11, op2));
+
+         /* The result is in registers r10 (remainder) and r11 (quotient).
+            Move the result into the reg pair that is being returned such
+            such that the low 64 bits are the quotient and the upper 64 bits
+            are the remainder. (see libvex_ir.h). */
+         *dst_hi = newVRegI(env);
+         *dst_lo = newVRegI(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, r10));
+         addInstr(env, s390_insn_move(8, *dst_lo, r11));
+         return;
+      }
+
+      default:
+         break;
+
+      do_multiply64: {
+            HReg r10, r11, h1;
+            s390_opnd_RMI op2;
+
+            order_commutative_operands(arg1, arg2);
+
+            h1   = s390_isel_int_expr(env, arg1);       /* Process 1st operand */
+            op2  = s390_isel_int_expr_RMI(env, arg2);   /* Process 2nd operand */
+
+            /* We use non-virtual registers r10 and r11 as pair */
+            r10  = make_gpr(10);
+            r11  = make_gpr(11);
+
+            /* Move the first operand to r11 */
+            addInstr(env, s390_insn_move(8, r11, h1));
+
+            /* Multiply */
+            addInstr(env, s390_insn_mul(8, r10, r11, op2, is_signed_multiply));
+
+            /* The result is in registers r10 and r11. Assign to two virtual regs
+               and return. */
+            *dst_hi = newVRegI(env);
+            *dst_lo = newVRegI(env);
+            addInstr(env, s390_insn_move(8, *dst_hi, r10));
+            addInstr(env, s390_insn_move(8, *dst_lo, r11));
+            return;
+         }
+
+      do_divide64: {
+         HReg r10, r11, hi, lo;
+         s390_opnd_RMI op2;
+
+         s390_isel_int128_expr(&hi, &lo, env, arg1);
+         op2  = s390_isel_int_expr_RMI(env, arg2);   /* Process 2nd operand */
+
+         /* We use non-virtual registers r10 and r11 as pair */
+         r10  = make_gpr(10);
+         r11  = make_gpr(11);
+
+         /* Move high 64 bits of the 1st operand into r10 and
+            the low 64 bits into r11. */
+         addInstr(env, s390_insn_move(8, r10, hi));
+         addInstr(env, s390_insn_move(8, r11, lo));
+
+         /* Divide */
+         addInstr(env, s390_insn_div(8, r10, r11, op2, is_signed_divide));
+
+         /* The result is in registers r10 (remainder) and r11 (quotient).
+            Move the result into the reg pair that is being returned such
+            such that the low 64 bits are the quotient and the upper 64 bits
+            are the remainder. (see libvex_ir.h). */
+         *dst_hi = newVRegI(env);
+         *dst_lo = newVRegI(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, r10));
+         addInstr(env, s390_insn_move(8, *dst_lo, r11));
+         return;
+      }
+      }
+   }
+
+   vpanic("s390_isel_int128_expr");
+}
+
+
+/* Compute a 128-bit value into two 64-bit registers. These may be either
+   real or virtual regs; in any case they must not be changed by subsequent
+   code emitted by the caller. */
+static void
+s390_isel_int128_expr(HReg *dst_hi, HReg *dst_lo, ISelEnv *env, IRExpr *expr)
+{
+   s390_isel_int128_expr_wrk(dst_hi, dst_lo, env, expr);
+
+   /* Sanity checks ... */
+   vassert(hregIsVirtual(*dst_hi));
+   vassert(hregIsVirtual(*dst_lo));
+   vassert(hregClass(*dst_hi) == HRcInt64);
+   vassert(hregClass(*dst_lo) == HRcInt64);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64/32/16/8 bit)        ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 64, 32, 16 and 8-bit type.
+   All results are returned in a 64bit register.
+   For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits
+   are arbitrary, so you should mask or sign extend partial values
+   if necessary.
+*/
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg
+s390_isel_int_expr_wrk(ISelEnv *env, IRExpr *expr)
+{
+   IRType ty = typeOfIRExpr(env->type_env, expr);
+   UChar size;
+   s390_bfp_conv_t conv;
+   s390_dfp_conv_t dconv;
+
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 || ty == Ity_I64);
+
+   size = sizeofIRType(ty);   /* size of the result after evaluating EXPR */
+
+   switch (expr->tag) {
+
+      /* --------- TEMP --------- */
+   case Iex_RdTmp:
+      /* Return the virtual register that holds the temporary. */
+      return lookupIRTemp(env, expr->Iex.RdTmp.tmp);
+
+      /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg        dst = newVRegI(env);
+      s390_amode *am  = s390_isel_amode(env, expr->Iex.Load.addr);
+
+      if (expr->Iex.Load.end != Iend_BE)
+         goto irreducible;
+
+      addInstr(env, s390_insn_load(size, dst, am));
+
+      return dst;
+   }
+
+      /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+      IRExpr *arg1 = expr->Iex.Binop.arg1;
+      IRExpr *arg2 = expr->Iex.Binop.arg2;
+      HReg h1, res;
+      s390_alu_t opkind;
+      s390_opnd_RMI op2, value, opnd;
+      s390_insn *insn;
+      Bool is_commutative, is_signed_multiply, is_signed_divide;
+
+      is_commutative = True;
+
+      switch (expr->Iex.Binop.op) {
+      case Iop_MullU8:
+      case Iop_MullU16:
+      case Iop_MullU32:
+         is_signed_multiply = False;
+         goto do_multiply;
+
+      case Iop_MullS8:
+      case Iop_MullS16:
+      case Iop_MullS32:
+         is_signed_multiply = True;
+         goto do_multiply;
+
+      do_multiply: {
+            HReg r10, r11;
+            UInt arg_size = size / 2;
+
+            order_commutative_operands(arg1, arg2);
+
+            h1   = s390_isel_int_expr(env, arg1);       /* Process 1st operand */
+            op2  = s390_isel_int_expr_RMI(env, arg2);   /* Process 2nd operand */
+
+            /* We use non-virtual registers r10 and r11 as pair */
+            r10  = make_gpr(10);
+            r11  = make_gpr(11);
+
+            /* Move the first operand to r11 */
+            addInstr(env, s390_insn_move(arg_size, r11, h1));
+
+            /* Multiply */
+            addInstr(env, s390_insn_mul(arg_size, r10, r11, op2, is_signed_multiply));
+
+            /* The result is in registers r10 and r11. Combine them into a SIZE-bit
+               value into the destination register. */
+            res  = newVRegI(env);
+            addInstr(env, s390_insn_move(arg_size, res, r10));
+            value = s390_opnd_imm(arg_size * 8);
+            addInstr(env, s390_insn_alu(size, S390_ALU_LSH, res, value));
+            value = s390_opnd_imm((((ULong)1) << arg_size * 8) - 1);
+            addInstr(env, s390_insn_alu(size, S390_ALU_AND, r11, value));
+            opnd = s390_opnd_reg(r11);
+            addInstr(env, s390_insn_alu(size, S390_ALU_OR,  res, opnd));
+            return res;
+         }
+
+      case Iop_DivModS64to32:
+         is_signed_divide = True;
+         goto do_divide;
+
+      case Iop_DivModU64to32:
+         is_signed_divide = False;
+         goto do_divide;
+
+      do_divide: {
+            HReg r10, r11;
+
+            h1   = s390_isel_int_expr(env, arg1);       /* Process 1st operand */
+            op2  = s390_isel_int_expr_RMI(env, arg2);   /* Process 2nd operand */
+
+            /* We use non-virtual registers r10 and r11 as pair */
+            r10  = make_gpr(10);
+            r11  = make_gpr(11);
+
+            /* Split the first operand and put the high 32 bits into r10 and
+               the low 32 bits into r11. */
+            addInstr(env, s390_insn_move(8, r10, h1));
+            addInstr(env, s390_insn_move(8, r11, h1));
+            value = s390_opnd_imm(32);
+            addInstr(env, s390_insn_alu(8, S390_ALU_RSH, r10, value));
+
+            /* Divide */
+            addInstr(env, s390_insn_div(4, r10, r11, op2, is_signed_divide));
+
+            /* The result is in registers r10 (remainder) and r11 (quotient).
+               Combine them into a 64-bit value such that the low 32 bits are
+               the quotient and the upper 32 bits are the remainder. (see
+               libvex_ir.h). */
+            res  = newVRegI(env);
+            addInstr(env, s390_insn_move(8, res, r10));
+            value = s390_opnd_imm(32);
+            addInstr(env, s390_insn_alu(8, S390_ALU_LSH, res, value));
+            value = s390_opnd_imm((((ULong)1) << 32) - 1);
+            addInstr(env, s390_insn_alu(8, S390_ALU_AND, r11, value));
+            opnd = s390_opnd_reg(r11);
+            addInstr(env, s390_insn_alu(8, S390_ALU_OR,  res, opnd));
+            return res;
+         }
+
+      case Iop_F32toI32S:  conv = S390_BFP_F32_TO_I32;  goto do_convert;
+      case Iop_F32toI64S:  conv = S390_BFP_F32_TO_I64;  goto do_convert;
+      case Iop_F32toI32U:  conv = S390_BFP_F32_TO_U32;  goto do_convert;
+      case Iop_F32toI64U:  conv = S390_BFP_F32_TO_U64;  goto do_convert;
+      case Iop_F64toI32S:  conv = S390_BFP_F64_TO_I32;  goto do_convert;
+      case Iop_F64toI64S:  conv = S390_BFP_F64_TO_I64;  goto do_convert;
+      case Iop_F64toI32U:  conv = S390_BFP_F64_TO_U32;  goto do_convert;
+      case Iop_F64toI64U:  conv = S390_BFP_F64_TO_U64;  goto do_convert;
+      case Iop_F128toI32S: conv = S390_BFP_F128_TO_I32; goto do_convert_128;
+      case Iop_F128toI64S: conv = S390_BFP_F128_TO_I64; goto do_convert_128;
+      case Iop_F128toI32U: conv = S390_BFP_F128_TO_U32; goto do_convert_128;
+      case Iop_F128toI64U: conv = S390_BFP_F128_TO_U64; goto do_convert_128;
+
+      case Iop_D64toI32S:  dconv = S390_DFP_D64_TO_I32;  goto do_convert_dfp;
+      case Iop_D64toI64S:  dconv = S390_DFP_D64_TO_I64;  goto do_convert_dfp;
+      case Iop_D64toI32U:  dconv = S390_DFP_D64_TO_U32;  goto do_convert_dfp;
+      case Iop_D64toI64U:  dconv = S390_DFP_D64_TO_U64;  goto do_convert_dfp;
+      case Iop_D128toI32S: dconv = S390_DFP_D128_TO_I32; goto do_convert_dfp128;
+      case Iop_D128toI64S: dconv = S390_DFP_D128_TO_I64; goto do_convert_dfp128;
+      case Iop_D128toI32U: dconv = S390_DFP_D128_TO_U32; goto do_convert_dfp128;
+      case Iop_D128toI64U: dconv = S390_DFP_D128_TO_U64; goto do_convert_dfp128;
+
+      do_convert: {
+         s390_bfp_round_t rounding_mode;
+
+         res  = newVRegI(env);
+         h1   = s390_isel_float_expr(env, arg2);   /* Process operand */
+
+         rounding_mode = get_bfp_rounding_mode(env, arg1);
+         addInstr(env, s390_insn_bfp_convert(size, conv, res, h1,
+                                             rounding_mode));
+         return res;
+      }
+
+      do_convert_128: {
+         s390_bfp_round_t rounding_mode;
+         HReg op_hi, op_lo, f13, f15;
+
+         res = newVRegI(env);
+         s390_isel_float128_expr(&op_hi, &op_lo, env, arg2); /* operand */
+
+         /* We use non-virtual registers r13 and r15 as pair */
+         f13 = make_fpr(13);
+         f15 = make_fpr(15);
+
+         /* operand --> (f13, f15) */
+         addInstr(env, s390_insn_move(8, f13, op_hi));
+         addInstr(env, s390_insn_move(8, f15, op_lo));
+
+         rounding_mode = get_bfp_rounding_mode(env, arg1);
+         addInstr(env, s390_insn_bfp128_convert_from(size, conv, res,
+                                                     INVALID_HREG, f13, f15,
+                                                     rounding_mode));
+         return res;
+      }
+
+      do_convert_dfp: {
+            s390_dfp_round_t rounding_mode;
+
+            res  = newVRegI(env);
+            h1   = s390_isel_dfp_expr(env, arg2);   /* Process operand */
+
+            rounding_mode = get_dfp_rounding_mode(env, arg1);
+            addInstr(env, s390_insn_dfp_convert(size, dconv, res, h1,
+                                                rounding_mode));
+            return res;
+         }
+
+      do_convert_dfp128: {
+            s390_dfp_round_t rounding_mode;
+            HReg op_hi, op_lo, f13, f15;
+
+            res = newVRegI(env);
+            s390_isel_dfp128_expr(&op_hi, &op_lo, env, arg2); /* operand */
+
+            /* We use non-virtual registers r13 and r15 as pair */
+            f13 = make_fpr(13);
+            f15 = make_fpr(15);
+
+            /* operand --> (f13, f15) */
+            addInstr(env, s390_insn_move(8, f13, op_hi));
+            addInstr(env, s390_insn_move(8, f15, op_lo));
+
+            rounding_mode = get_dfp_rounding_mode(env, arg1);
+            addInstr(env, s390_insn_dfp128_convert_from(size, dconv, res,
+                                                        INVALID_HREG, f13,
+                                                        f15, rounding_mode));
+            return res;
+         }
+
+      case Iop_8HLto16:
+      case Iop_16HLto32:
+      case Iop_32HLto64: {
+         HReg h2;
+         UInt arg_size = size / 2;
+
+         res  = newVRegI(env);
+         h1   = s390_isel_int_expr(env, arg1);   /* Process 1st operand */
+         h2   = s390_isel_int_expr(env, arg2);   /* Process 2nd operand */
+
+         addInstr(env, s390_insn_move(arg_size, res, h1));
+         value = s390_opnd_imm(arg_size * 8);
+         addInstr(env, s390_insn_alu(size, S390_ALU_LSH, res, value));
+         value = s390_opnd_imm((((ULong)1) << arg_size * 8) - 1);
+         addInstr(env, s390_insn_alu(size, S390_ALU_AND, h2, value));
+         opnd = s390_opnd_reg(h2);
+         addInstr(env, s390_insn_alu(size, S390_ALU_OR,  res, opnd));
+         return res;
+      }
+
+      case Iop_Max32U: {
+         /* arg1 > arg2 ? arg1 : arg2   using uint32_t arguments */
+         res = newVRegI(env);
+         h1  = s390_isel_int_expr(env, arg1);
+         op2 = s390_isel_int_expr_RMI(env, arg2);
+
+         addInstr(env, s390_insn_move(size, res, h1));
+         addInstr(env, s390_insn_compare(size, res, op2, False /* signed */));
+         addInstr(env, s390_insn_cond_move(size, S390_CC_L, res, op2));
+         return res;
+      }
+
+      case Iop_CmpF32:
+      case Iop_CmpF64: {
+         HReg cc_s390, h2;
+
+         h1 = s390_isel_float_expr(env, arg1);
+         h2 = s390_isel_float_expr(env, arg2);
+         cc_s390 = newVRegI(env);
+
+         size = (expr->Iex.Binop.op == Iop_CmpF32) ? 4 : 8;
+
+         addInstr(env, s390_insn_bfp_compare(size, cc_s390, h1, h2));
+
+         return convert_s390_to_vex_bfpcc(env, cc_s390);
+      }
+
+      case Iop_CmpF128: {
+         HReg op1_hi, op1_lo, op2_hi, op2_lo, f12, f13, f14, f15, cc_s390;
+
+         s390_isel_float128_expr(&op1_hi, &op1_lo, env, arg1); /* 1st operand */
+         s390_isel_float128_expr(&op2_hi, &op2_lo, env, arg2); /* 2nd operand */
+         cc_s390 = newVRegI(env);
+
+         /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+         f12 = make_fpr(12);
+         f13 = make_fpr(13);
+         f14 = make_fpr(14);
+         f15 = make_fpr(15);
+
+         /* 1st operand --> (f12, f14) */
+         addInstr(env, s390_insn_move(8, f12, op1_hi));
+         addInstr(env, s390_insn_move(8, f14, op1_lo));
+
+         /* 2nd operand --> (f13, f15) */
+         addInstr(env, s390_insn_move(8, f13, op2_hi));
+         addInstr(env, s390_insn_move(8, f15, op2_lo));
+
+         res = newVRegI(env);
+         addInstr(env, s390_insn_bfp128_compare(16, cc_s390, f12, f14, f13, f15));
+
+         return convert_s390_to_vex_bfpcc(env, cc_s390);
+      }
+
+      case Iop_CmpD64:
+      case Iop_CmpExpD64: {
+         HReg cc_s390, h2;
+         s390_dfp_cmp_t cmp;
+
+         h1 = s390_isel_dfp_expr(env, arg1);
+         h2 = s390_isel_dfp_expr(env, arg2);
+         cc_s390 = newVRegI(env);
+
+         switch(expr->Iex.Binop.op) {
+         case Iop_CmpD64:    cmp = S390_DFP_COMPARE; break;
+         case Iop_CmpExpD64: cmp = S390_DFP_COMPARE_EXP; break;
+         default: goto irreducible;
+         }
+         addInstr(env, s390_insn_dfp_compare(8, cmp, cc_s390, h1, h2));
+
+         return convert_s390_to_vex_dfpcc(env, cc_s390);
+      }
+
+      case Iop_CmpD128:
+      case Iop_CmpExpD128: {
+         HReg op1_hi, op1_lo, op2_hi, op2_lo, f12, f13, f14, f15, cc_s390;
+         s390_dfp_cmp_t cmp;
+
+         s390_isel_dfp128_expr(&op1_hi, &op1_lo, env, arg1); /* 1st operand */
+         s390_isel_dfp128_expr(&op2_hi, &op2_lo, env, arg2); /* 2nd operand */
+         cc_s390 = newVRegI(env);
+
+         /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+         f12 = make_fpr(12);
+         f13 = make_fpr(13);
+         f14 = make_fpr(14);
+         f15 = make_fpr(15);
+
+         /* 1st operand --> (f12, f14) */
+         addInstr(env, s390_insn_move(8, f12, op1_hi));
+         addInstr(env, s390_insn_move(8, f14, op1_lo));
+
+         /* 2nd operand --> (f13, f15) */
+         addInstr(env, s390_insn_move(8, f13, op2_hi));
+         addInstr(env, s390_insn_move(8, f15, op2_lo));
+
+         switch(expr->Iex.Binop.op) {
+         case Iop_CmpD128:    cmp = S390_DFP_COMPARE; break;
+         case Iop_CmpExpD128: cmp = S390_DFP_COMPARE_EXP; break;
+         default: goto irreducible;
+         }
+         addInstr(env, s390_insn_dfp128_compare(16, cmp, cc_s390, f12, f14,
+                                                f13, f15));
+
+         return convert_s390_to_vex_dfpcc(env, cc_s390);
+      }
+
+      case Iop_Add8:
+      case Iop_Add16:
+      case Iop_Add32:
+      case Iop_Add64:
+         opkind = S390_ALU_ADD;
+         break;
+
+      case Iop_Sub8:
+      case Iop_Sub16:
+      case Iop_Sub32:
+      case Iop_Sub64:
+         opkind = S390_ALU_SUB;
+         is_commutative = False;
+         break;
+
+      case Iop_And8:
+      case Iop_And16:
+      case Iop_And32:
+      case Iop_And64:
+         opkind = S390_ALU_AND;
+         break;
+
+      case Iop_Or8:
+      case Iop_Or16:
+      case Iop_Or32:
+      case Iop_Or64:
+         opkind = S390_ALU_OR;
+         break;
+
+      case Iop_Xor8:
+      case Iop_Xor16:
+      case Iop_Xor32:
+      case Iop_Xor64:
+         opkind = S390_ALU_XOR;
+         break;
+
+      case Iop_Shl8:
+      case Iop_Shl16:
+      case Iop_Shl32:
+      case Iop_Shl64:
+         opkind = S390_ALU_LSH;
+         is_commutative = False;
+         break;
+
+      case Iop_Shr8:
+      case Iop_Shr16:
+      case Iop_Shr32:
+      case Iop_Shr64:
+         opkind = S390_ALU_RSH;
+         is_commutative = False;
+         break;
+
+      case Iop_Sar8:
+      case Iop_Sar16:
+      case Iop_Sar32:
+      case Iop_Sar64:
+         opkind = S390_ALU_RSHA;
+         is_commutative = False;
+         break;
+
+      default:
+         goto irreducible;
+      }
+
+      /* Pattern match: 0 - arg1  -->  -arg1   */
+      if (opkind == S390_ALU_SUB && s390_expr_is_const_zero(arg1)) {
+         res  = newVRegI(env);
+         op2  = s390_isel_int_expr_RMI(env, arg2);   /* Process 2nd operand */
+         insn = s390_insn_unop(size, S390_NEGATE, res, op2);
+         addInstr(env, insn);
+
+         return res;
+      }
+
+      if (is_commutative) {
+         order_commutative_operands(arg1, arg2);
+      }
+
+      h1   = s390_isel_int_expr(env, arg1);       /* Process 1st operand */
+      op2  = s390_isel_int_expr_RMI(env, arg2);   /* Process 2nd operand */
+      res  = newVRegI(env);
+
+      /* As right shifts of one/two byte opreands are implemented using a
+         4-byte shift op, we first need to zero/sign-extend the shiftee. */
+      switch (expr->Iex.Binop.op) {
+      case Iop_Shr8:
+         insn = s390_insn_unop(4, S390_ZERO_EXTEND_8, res, s390_opnd_reg(h1));
+         break;
+      case Iop_Shr16:
+         insn = s390_insn_unop(4, S390_ZERO_EXTEND_16, res, s390_opnd_reg(h1));
+         break;
+      case Iop_Sar8:
+         insn = s390_insn_unop(4, S390_SIGN_EXTEND_8, res, s390_opnd_reg(h1));
+         break;
+      case Iop_Sar16:
+         insn = s390_insn_unop(4, S390_SIGN_EXTEND_16, res, s390_opnd_reg(h1));
+         break;
+      default:
+         insn = s390_insn_move(size, res, h1);
+         break;
+      }
+      addInstr(env, insn);
+
+      insn = s390_insn_alu(size, opkind, res, op2);
+
+      addInstr(env, insn);
+
+      return res;
+   }
+
+      /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+      static s390_opnd_RMI mask  = { S390_OPND_IMMEDIATE };
+      static s390_opnd_RMI shift = { S390_OPND_IMMEDIATE };
+      s390_opnd_RMI opnd;
+      s390_insn    *insn;
+      IRExpr *arg;
+      HReg    dst, h1;
+      IROp    unop, binop;
+
+      arg = expr->Iex.Unop.arg;
+
+      /* Special cases are handled here */
+
+      /* 32-bit multiply with 32-bit result or
+         64-bit multiply with 64-bit result */
+      unop  = expr->Iex.Unop.op;
+      binop = arg->Iex.Binop.op;
+
+      if ((arg->tag == Iex_Binop &&
+           ((unop == Iop_64to32 &&
+             (binop == Iop_MullS32 || binop == Iop_MullU32)) ||
+            (unop == Iop_128to64 &&
+             (binop == Iop_MullS64 || binop == Iop_MullU64))))) {
+         h1   = s390_isel_int_expr(env, arg->Iex.Binop.arg1);     /* 1st opnd */
+         opnd = s390_isel_int_expr_RMI(env, arg->Iex.Binop.arg2); /* 2nd opnd */
+         dst  = newVRegI(env);     /* Result goes into a new register */
+         addInstr(env, s390_insn_move(size, dst, h1));
+         addInstr(env, s390_insn_alu(size, S390_ALU_MUL, dst, opnd));
+
+         return dst;
+      }
+
+      if (unop == Iop_ReinterpF64asI64 || unop == Iop_ReinterpF32asI32) {
+         dst = newVRegI(env);
+         h1  = s390_isel_float_expr(env, arg);     /* Process the operand */
+         addInstr(env, s390_insn_move(size, dst, h1));
+
+         return dst;
+      }
+
+      if (unop == Iop_ReinterpD64asI64) {
+         dst = newVRegI(env);
+         h1  = s390_isel_dfp_expr(env, arg);     /* Process the operand */
+         addInstr(env, s390_insn_move(size, dst, h1));
+
+         return dst;
+      }
+
+      if (unop == Iop_ExtractExpD64 || unop == Iop_ExtractSigD64) {
+         s390_dfp_unop_t dfpop;
+         switch(unop) {
+         case Iop_ExtractExpD64: dfpop = S390_DFP_EXTRACT_EXP_D64; break;
+         case Iop_ExtractSigD64: dfpop = S390_DFP_EXTRACT_SIG_D64; break;
+         default: goto irreducible;
+         }
+         dst = newVRegI(env);
+         h1  = s390_isel_dfp_expr(env, arg);     /* Process the operand */
+         addInstr(env, s390_insn_dfp_unop(size, dfpop, dst, h1));
+         return dst;
+      }
+
+      if (unop == Iop_ExtractExpD128 || unop == Iop_ExtractSigD128) {
+         s390_dfp_unop_t dfpop;
+         HReg op_hi, op_lo, f13, f15;
+
+         switch(unop) {
+         case Iop_ExtractExpD128: dfpop = S390_DFP_EXTRACT_EXP_D128; break;
+         case Iop_ExtractSigD128: dfpop = S390_DFP_EXTRACT_SIG_D128; break;
+         default: goto irreducible;
+         }
+         dst = newVRegI(env);
+         s390_isel_dfp128_expr(&op_hi, &op_lo, env, arg); /* Process operand */
+
+         /* We use non-virtual registers r13 and r15 as pair */
+         f13 = make_fpr(13);
+         f15 = make_fpr(15);
+
+         /* operand --> (f13, f15) */
+         addInstr(env, s390_insn_move(8, f13, op_hi));
+         addInstr(env, s390_insn_move(8, f15, op_lo));
+
+         addInstr(env, s390_insn_dfp128_unop(size, dfpop, dst, f13, f15));
+         return dst;
+      }
+
+      /* Expressions whose argument is 1-bit wide */
+      if (typeOfIRExpr(env->type_env, arg) == Ity_I1) {
+         s390_cc_t cond = s390_isel_cc(env, arg);
+         dst = newVRegI(env);     /* Result goes into a new register */
+         addInstr(env, s390_insn_cc2bool(dst, cond));
+
+         switch (unop) {
+         case Iop_1Uto8:
+         case Iop_1Uto32:
+            /* Zero extend */
+            mask.variant.imm = 1;
+            addInstr(env, s390_insn_alu(4, S390_ALU_AND,  dst, mask));
+            break;
+
+         case Iop_1Uto64:
+            /* Zero extend */
+            mask.variant.imm = 1;
+            addInstr(env, s390_insn_alu(8, S390_ALU_AND,  dst, mask));
+            break;
+
+         case Iop_1Sto8:
+         case Iop_1Sto16:
+         case Iop_1Sto32:
+            shift.variant.imm = 31;
+            addInstr(env, s390_insn_alu(4, S390_ALU_LSH,  dst, shift));
+            addInstr(env, s390_insn_alu(4, S390_ALU_RSHA, dst, shift));
+            break;
+
+         case Iop_1Sto64:
+            shift.variant.imm = 63;
+            addInstr(env, s390_insn_alu(8, S390_ALU_LSH,  dst, shift));
+            addInstr(env, s390_insn_alu(8, S390_ALU_RSHA, dst, shift));
+            break;
+
+         default:
+            goto irreducible;
+         }
+
+         return dst;
+      }
+
+      /* Regular processing */
+
+      if (unop == Iop_128to64) {
+         HReg dst_hi, dst_lo;
+
+         s390_isel_int128_expr(&dst_hi, &dst_lo, env, arg);
+         return dst_lo;
+      }
+
+      if (unop == Iop_128HIto64) {
+         HReg dst_hi, dst_lo;
+
+         s390_isel_int128_expr(&dst_hi, &dst_lo, env, arg);
+         return dst_hi;
+      }
+
+      dst  = newVRegI(env);     /* Result goes into a new register */
+      opnd = s390_isel_int_expr_RMI(env, arg);     /* Process the operand */
+
+      switch (unop) {
+      case Iop_8Uto16:
+      case Iop_8Uto32:
+      case Iop_8Uto64:
+         insn = s390_insn_unop(size, S390_ZERO_EXTEND_8, dst, opnd);
+         break;
+
+      case Iop_16Uto32:
+      case Iop_16Uto64:
+         insn = s390_insn_unop(size, S390_ZERO_EXTEND_16, dst, opnd);
+         break;
+
+      case Iop_32Uto64:
+         insn = s390_insn_unop(size, S390_ZERO_EXTEND_32, dst, opnd);
+         break;
+
+      case Iop_8Sto16:
+      case Iop_8Sto32:
+      case Iop_8Sto64:
+         insn = s390_insn_unop(size, S390_SIGN_EXTEND_8, dst, opnd);
+         break;
+
+      case Iop_16Sto32:
+      case Iop_16Sto64:
+         insn = s390_insn_unop(size, S390_SIGN_EXTEND_16, dst, opnd);
+         break;
+
+      case Iop_32Sto64:
+         insn = s390_insn_unop(size, S390_SIGN_EXTEND_32, dst, opnd);
+         break;
+
+      case Iop_64to8:
+      case Iop_64to16:
+      case Iop_64to32:
+      case Iop_32to8:
+      case Iop_32to16:
+      case Iop_16to8:
+         /* Down-casts are no-ops. Upstream operations will only look at
+            the bytes that make up the result of the down-cast. So there
+            is no point setting the other bytes to 0. */
+         insn = s390_opnd_copy(8, dst, opnd);
+         break;
+
+      case Iop_64HIto32:
+         addInstr(env, s390_opnd_copy(8, dst, opnd));
+         shift.variant.imm = 32;
+         insn = s390_insn_alu(8, S390_ALU_RSH, dst, shift);
+         break;
+
+      case Iop_32HIto16:
+         addInstr(env, s390_opnd_copy(4, dst, opnd));
+         shift.variant.imm = 16;
+         insn = s390_insn_alu(4, S390_ALU_RSH, dst, shift);
+         break;
+
+      case Iop_16HIto8:
+         addInstr(env, s390_opnd_copy(2, dst, opnd));
+         shift.variant.imm = 8;
+         insn = s390_insn_alu(2, S390_ALU_RSH, dst, shift);
+         break;
+
+      case Iop_Not8:
+      case Iop_Not16:
+      case Iop_Not32:
+      case Iop_Not64:
+         /* XOR with ffff... */
+         mask.variant.imm = ~(ULong)0;
+         addInstr(env, s390_opnd_copy(size, dst, opnd));
+         insn = s390_insn_alu(size, S390_ALU_XOR, dst, mask);
+         break;
+
+      case Iop_Left8:
+      case Iop_Left16:
+      case Iop_Left32:
+      case Iop_Left64:
+         addInstr(env, s390_insn_unop(size, S390_NEGATE, dst, opnd));
+         insn = s390_insn_alu(size, S390_ALU_OR, dst, opnd);
+         break;
+
+      case Iop_CmpwNEZ32:
+      case Iop_CmpwNEZ64: {
+         /* Use the fact that x | -x == 0 iff x == 0. Otherwise, either X
+            or -X will have a 1 in the MSB. */
+         addInstr(env, s390_insn_unop(size, S390_NEGATE, dst, opnd));
+         addInstr(env, s390_insn_alu(size, S390_ALU_OR,  dst, opnd));
+         shift.variant.imm = (unop == Iop_CmpwNEZ32) ? 31 : 63;
+         addInstr(env, s390_insn_alu(size, S390_ALU_RSHA,  dst, shift));
+         return dst;
+      }
+
+      case Iop_Clz64: {
+         HReg r10, r11;
+
+         /* This will be implemented using FLOGR, if possible. So we need to
+            set aside a pair of non-virtual registers. The result (number of
+            left-most zero bits) will be in r10. The value in r11 is unspecified
+            and must not be used. */
+         r10  = make_gpr(10);
+         r11  = make_gpr(11);
+
+         addInstr(env, s390_insn_clz(8, r10, r11, opnd));
+         addInstr(env, s390_insn_move(8, dst, r10));
+         return dst;
+      }
+
+      default:
+         goto irreducible;
+      }
+
+      addInstr(env, insn);
+
+      return dst;
+   }
+
+      /* --------- GET --------- */
+   case Iex_Get: {
+      HReg dst = newVRegI(env);
+      s390_amode *am = s390_amode_for_guest_state(expr->Iex.Get.offset);
+
+      /* We never load more than 8 bytes from the guest state, because the
+         floating point register pair is not contiguous. */
+      vassert(size <= 8);
+
+      addInstr(env, s390_insn_load(size, dst, am));
+
+      return dst;
+   }
+
+   case Iex_GetI:
+      /* not needed */
+      break;
+
+      /* --------- CCALL --------- */
+   case Iex_CCall: {
+      HReg dst = newVRegI(env);
+      HReg ret = make_gpr(S390_REGNO_RETURN_VALUE);
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+
+      doHelperCall(&addToSp, &rloc, env, NULL, expr->Iex.CCall.cee,
+                   expr->Iex.CCall.retty, expr->Iex.CCall.args);
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_Int);
+      vassert(addToSp == 0);
+      addInstr(env, s390_insn_move(sizeof(ULong), dst, ret));
+
+      return dst;
+   }
+
+      /* --------- LITERAL --------- */
+
+      /* Load a literal into a register. Create a "load immediate"
+         v-insn and return the register. */
+   case Iex_Const: {
+      ULong value;
+      HReg  dst = newVRegI(env);
+      const IRConst *con = expr->Iex.Const.con;
+
+      /* Bitwise copy of the value. No sign/zero-extension */
+      switch (con->tag) {
+      case Ico_U64: value = con->Ico.U64; break;
+      case Ico_U32: value = con->Ico.U32; break;
+      case Ico_U16: value = con->Ico.U16; break;
+      case Ico_U8:  value = con->Ico.U8;  break;
+      default:      vpanic("s390_isel_int_expr: invalid constant");
+      }
+
+      addInstr(env, s390_insn_load_immediate(size, dst, value));
+
+      return dst;
+   }
+
+      /* --------- MULTIPLEX --------- */
+   case Iex_ITE: {
+      IRExpr *cond_expr;
+      HReg dst, r1;
+      s390_opnd_RMI r0;
+
+      cond_expr = expr->Iex.ITE.cond;
+
+      vassert(typeOfIRExpr(env->type_env, cond_expr) == Ity_I1);
+
+      dst  = newVRegI(env);
+      r0   = s390_isel_int_expr_RMI(env, expr->Iex.ITE.iffalse);
+      r1   = s390_isel_int_expr(env, expr->Iex.ITE.iftrue);
+      size = sizeofIRType(typeOfIRExpr(env->type_env, expr->Iex.ITE.iftrue));
+
+      s390_cc_t cc = s390_isel_cc(env, cond_expr);
+
+      addInstr(env, s390_insn_move(size, dst, r1));
+      addInstr(env, s390_insn_cond_move(size, s390_cc_invert(cc), dst, r0));
+      return dst;
+   }
+
+   default:
+      break;
+   }
+
+   /* We get here if no pattern matched. */
+ irreducible:
+   ppIRExpr(expr);
+   vpanic("s390_isel_int_expr: cannot reduce tree");
+}
+
+
+static HReg
+s390_isel_int_expr(ISelEnv *env, IRExpr *expr)
+{
+   HReg dst = s390_isel_int_expr_wrk(env, expr);
+
+   /* Sanity checks ... */
+   vassert(hregClass(dst) == HRcInt64);
+   vassert(hregIsVirtual(dst));
+
+   return dst;
+}
+
+
+static s390_opnd_RMI
+s390_isel_int_expr_RMI(ISelEnv *env, IRExpr *expr)
+{
+   IRType ty = typeOfIRExpr(env->type_env, expr);
+   s390_opnd_RMI dst;
+
+   vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 ||
+           ty == Ity_I64);
+
+   if (expr->tag == Iex_Load) {
+      dst.tag = S390_OPND_AMODE;
+      dst.variant.am = s390_isel_amode(env, expr->Iex.Load.addr);
+   } else if (expr->tag == Iex_Get) {
+      dst.tag = S390_OPND_AMODE;
+      dst.variant.am = s390_amode_for_guest_state(expr->Iex.Get.offset);
+   } else if (expr->tag == Iex_Const) {
+      ULong value;
+
+      /* The bit pattern for the value will be stored as is in the least
+         significant bits of VALUE. */
+      switch (expr->Iex.Const.con->tag) {
+      case Ico_U1:  value = expr->Iex.Const.con->Ico.U1;  break;
+      case Ico_U8:  value = expr->Iex.Const.con->Ico.U8;  break;
+      case Ico_U16: value = expr->Iex.Const.con->Ico.U16; break;
+      case Ico_U32: value = expr->Iex.Const.con->Ico.U32; break;
+      case Ico_U64: value = expr->Iex.Const.con->Ico.U64; break;
+      default:
+         vpanic("s390_isel_int_expr_RMI");
+      }
+
+      dst.tag = S390_OPND_IMMEDIATE;
+      dst.variant.imm = value;
+   } else {
+      dst.tag = S390_OPND_REG;
+      dst.variant.reg = s390_isel_int_expr(env, expr);
+   }
+
+   return dst;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (128 bit)        ---*/
+/*---------------------------------------------------------*/
+static void
+s390_isel_float128_expr_wrk(HReg *dst_hi, HReg *dst_lo, ISelEnv *env,
+                            IRExpr *expr)
+{
+   IRType ty = typeOfIRExpr(env->type_env, expr);
+
+   vassert(ty == Ity_F128);
+
+   switch (expr->tag) {
+   case Iex_RdTmp:
+      /* Return the virtual registers that hold the temporary. */
+      lookupIRTemp128(dst_hi, dst_lo, env, expr->Iex.RdTmp.tmp);
+      return;
+
+      /* --------- LOAD --------- */
+   case Iex_Load: {
+      IRExpr *addr_hi, *addr_lo;
+      s390_amode *am_hi, *am_lo;
+
+      if (expr->Iex.Load.end != Iend_BE)
+         goto irreducible;
+
+      addr_hi = expr->Iex.Load.addr;
+      addr_lo = IRExpr_Binop(Iop_Add64, addr_hi, mkU64(8));
+
+      am_hi  = s390_isel_amode(env, addr_hi);
+      am_lo  = s390_isel_amode(env, addr_lo);
+
+      *dst_hi = newVRegF(env);
+      *dst_lo = newVRegF(env);
+      addInstr(env, s390_insn_load(8, *dst_hi, am_hi));
+      addInstr(env, s390_insn_load(8, *dst_hi, am_lo));
+      return;
+   }
+
+
+      /* --------- GET --------- */
+   case Iex_Get:
+      /* This is not supported because loading 128-bit from the guest
+         state is almost certainly wrong. Use get_fpr_pair instead. */
+      vpanic("Iex_Get with F128 data");
+
+      /* --------- 4-ary OP --------- */
+   case Iex_Qop:
+      vpanic("Iex_Qop with F128 data");
+
+      /* --------- TERNARY OP --------- */
+   case Iex_Triop: {
+      IRTriop *triop = expr->Iex.Triop.details;
+      IROp    op     = triop->op;
+      IRExpr *left   = triop->arg2;
+      IRExpr *right  = triop->arg3;
+      s390_bfp_binop_t bfpop;
+      HReg op1_hi, op1_lo, op2_hi, op2_lo, f12, f13, f14, f15;
+
+      s390_isel_float128_expr(&op1_hi, &op1_lo, env, left);  /* 1st operand */
+      s390_isel_float128_expr(&op2_hi, &op2_lo, env, right); /* 2nd operand */
+
+      /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+      f12 = make_fpr(12);
+      f13 = make_fpr(13);
+      f14 = make_fpr(14);
+      f15 = make_fpr(15);
+
+      /* 1st operand --> (f12, f14) */
+      addInstr(env, s390_insn_move(8, f12, op1_hi));
+      addInstr(env, s390_insn_move(8, f14, op1_lo));
+
+      /* 2nd operand --> (f13, f15) */
+      addInstr(env, s390_insn_move(8, f13, op2_hi));
+      addInstr(env, s390_insn_move(8, f15, op2_lo));
+
+      switch (op) {
+      case Iop_AddF128: bfpop = S390_BFP_ADD; break;
+      case Iop_SubF128: bfpop = S390_BFP_SUB; break;
+      case Iop_MulF128: bfpop = S390_BFP_MUL; break;
+      case Iop_DivF128: bfpop = S390_BFP_DIV; break;
+      default:
+         goto irreducible;
+      }
+
+      set_bfp_rounding_mode_in_fpc(env, triop->arg1);
+      addInstr(env, s390_insn_bfp128_binop(16, bfpop, f12, f14, f13, f15));
+
+      /* Move result to virtual destination register */
+      *dst_hi = newVRegF(env);
+      *dst_lo = newVRegF(env);
+      addInstr(env, s390_insn_move(8, *dst_hi, f12));
+      addInstr(env, s390_insn_move(8, *dst_lo, f14));
+
+      return;
+   }
+
+      /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+      switch (expr->Iex.Binop.op) {
+      case Iop_SqrtF128: {
+         HReg op_hi, op_lo, f12, f13, f14, f15;
+
+         /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+         f12 = make_fpr(12);
+         f13 = make_fpr(13);
+         f14 = make_fpr(14);
+         f15 = make_fpr(15);
+
+         s390_isel_float128_expr(&op_hi, &op_lo, env, expr->Iex.Binop.arg2);
+
+         /* operand --> (f13, f15) */
+         addInstr(env, s390_insn_move(8, f13, op_hi));
+         addInstr(env, s390_insn_move(8, f15, op_lo));
+
+         set_bfp_rounding_mode_in_fpc(env, expr->Iex.Binop.arg1);
+         addInstr(env, s390_insn_bfp128_unop(16, S390_BFP_SQRT, f12, f14,
+                                             f13, f15));
+
+         /* Move result to virtual destination registers */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f12));
+         addInstr(env, s390_insn_move(8, *dst_lo, f14));
+         return;
+      }
+
+      case Iop_F64HLtoF128:
+         *dst_hi = s390_isel_float_expr(env, expr->Iex.Binop.arg1);
+         *dst_lo = s390_isel_float_expr(env, expr->Iex.Binop.arg2);
+         return;
+
+      case Iop_D32toF128:
+      case Iop_D64toF128: {
+         IRExpr *irrm;
+         IRExpr *left;
+         s390_dfp_round_t rm;
+         HReg h1; /* virtual reg. to hold source */
+         HReg f0, f2, f4, r1; /* real registers used by PFPO */
+         s390_fp_conv_t fpconv;
+
+         switch (expr->Iex.Binop.op) {
+         case Iop_D32toF128:
+            fpconv = S390_FP_D32_TO_F128;
+            break;
+         case Iop_D64toF128:
+            fpconv = S390_FP_D64_TO_F128;
+            break;
+         default: goto irreducible;
+         }
+
+         f4 = make_fpr(4); /* source */
+         f0 = make_fpr(0); /* destination */
+         f2 = make_fpr(2); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+         irrm = expr->Iex.Binop.arg1;
+         left = expr->Iex.Binop.arg2;
+         rm = get_dfp_rounding_mode(env, irrm);
+         h1 = s390_isel_dfp_expr(env, left);
+         addInstr(env, s390_insn_move(8, f4, h1));
+         addInstr(env, s390_insn_fp128_convert(16, fpconv, f0, f2,
+                                               f4, INVALID_HREG, r1, rm));
+         /* (f0, f2) --> destination */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f0));
+         addInstr(env, s390_insn_move(8, *dst_lo, f2));
+
+         return;
+      }
+
+      case Iop_D128toF128: {
+         IRExpr *irrm;
+         IRExpr *left;
+         s390_dfp_round_t rm;
+         HReg op_hi, op_lo;
+         HReg f0, f2, f4, f6, r1; /* real registers used by PFPO */
+
+         f4 = make_fpr(4); /* source */
+         f6 = make_fpr(6); /* source */
+         f0 = make_fpr(0); /* destination */
+         f2 = make_fpr(2); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+
+         irrm = expr->Iex.Binop.arg1;
+         left = expr->Iex.Binop.arg2;
+         rm = get_dfp_rounding_mode(env, irrm);
+         s390_isel_dfp128_expr(&op_hi, &op_lo, env, left);
+         /* operand --> (f4, f6) */
+         addInstr(env, s390_insn_move(8, f4, op_hi));
+         addInstr(env, s390_insn_move(8, f6, op_lo));
+         addInstr(env, s390_insn_fp128_convert(16, S390_FP_D128_TO_F128, f0, f2,
+                                               f4, f6, r1, rm));
+         /* (f0, f2) --> destination */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f0));
+         addInstr(env, s390_insn_move(8, *dst_lo, f2));
+
+         return;
+      }
+
+      default:
+         goto irreducible;
+      }
+   }
+
+      /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+      IRExpr *left = expr->Iex.Unop.arg;
+      s390_bfp_unop_t bfpop;
+      s390_bfp_conv_t conv;
+      HReg op_hi, op_lo, op, f12, f13, f14, f15;
+
+      /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+      f12 = make_fpr(12);
+      f13 = make_fpr(13);
+      f14 = make_fpr(14);
+      f15 = make_fpr(15);
+
+      switch (expr->Iex.Unop.op) {
+      case Iop_NegF128:
+         if (left->tag == Iex_Unop &&
+             (left->Iex.Unop.op == Iop_AbsF32 ||
+              left->Iex.Unop.op == Iop_AbsF64))
+            bfpop = S390_BFP_NABS;
+         else
+            bfpop = S390_BFP_NEG;
+         goto float128_opnd;
+      case Iop_AbsF128:     bfpop = S390_BFP_ABS;         goto float128_opnd;
+      case Iop_I32StoF128:  conv = S390_BFP_I32_TO_F128;  goto convert_int;
+      case Iop_I64StoF128:  conv = S390_BFP_I64_TO_F128;  goto convert_int;
+      case Iop_I32UtoF128:  conv = S390_BFP_U32_TO_F128;  goto convert_int;
+      case Iop_I64UtoF128:  conv = S390_BFP_U64_TO_F128;  goto convert_int;
+      case Iop_F32toF128:   conv = S390_BFP_F32_TO_F128;  goto convert_float;
+      case Iop_F64toF128:   conv = S390_BFP_F64_TO_F128;  goto convert_float;
+      default:
+         goto irreducible;
+      }
+
+   float128_opnd:
+      s390_isel_float128_expr(&op_hi, &op_lo, env, left);
+
+      /* operand --> (f13, f15) */
+      addInstr(env, s390_insn_move(8, f13, op_hi));
+      addInstr(env, s390_insn_move(8, f15, op_lo));
+
+      addInstr(env, s390_insn_bfp128_unop(16, bfpop, f12, f14, f13, f15));
+      goto move_dst;
+
+   convert_float:
+      op  = s390_isel_float_expr(env, left);
+      addInstr(env, s390_insn_bfp128_convert_to(16, conv, f12, f14, op));
+      goto move_dst;
+
+   convert_int:
+      op  = s390_isel_int_expr(env, left);
+      addInstr(env, s390_insn_bfp128_convert_to(16, conv, f12, f14, op));
+      goto move_dst;
+
+   move_dst:
+      /* Move result to virtual destination registers */
+      *dst_hi = newVRegF(env);
+      *dst_lo = newVRegF(env);
+      addInstr(env, s390_insn_move(8, *dst_hi, f12));
+      addInstr(env, s390_insn_move(8, *dst_lo, f14));
+      return;
+   }
+
+   default:
+      goto irreducible;
+   }
+
+   /* We get here if no pattern matched. */
+ irreducible:
+   ppIRExpr(expr);
+   vpanic("s390_isel_float128_expr: cannot reduce tree");
+}
+
+/* Compute a 128-bit value into two 64-bit registers. These may be either
+   real or virtual regs; in any case they must not be changed by subsequent
+   code emitted by the caller. */
+static void
+s390_isel_float128_expr(HReg *dst_hi, HReg *dst_lo, ISelEnv *env, IRExpr *expr)
+{
+   s390_isel_float128_expr_wrk(dst_hi, dst_lo, env, expr);
+
+   /* Sanity checks ... */
+   vassert(hregIsVirtual(*dst_hi));
+   vassert(hregIsVirtual(*dst_lo));
+   vassert(hregClass(*dst_hi) == HRcFlt64);
+   vassert(hregClass(*dst_lo) == HRcFlt64);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (64 bit)         ---*/
+/*---------------------------------------------------------*/
+
+static HReg
+s390_isel_float_expr_wrk(ISelEnv *env, IRExpr *expr)
+{
+   IRType ty = typeOfIRExpr(env->type_env, expr);
+   UChar size;
+
+   vassert(ty == Ity_F32 || ty == Ity_F64);
+
+   size = sizeofIRType(ty);
+
+   switch (expr->tag) {
+   case Iex_RdTmp:
+      /* Return the virtual register that holds the temporary. */
+      return lookupIRTemp(env, expr->Iex.RdTmp.tmp);
+
+      /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg        dst = newVRegF(env);
+      s390_amode *am  = s390_isel_amode(env, expr->Iex.Load.addr);
+
+      if (expr->Iex.Load.end != Iend_BE)
+         goto irreducible;
+
+      addInstr(env, s390_insn_load(size, dst, am));
+
+      return dst;
+   }
+
+      /* --------- GET --------- */
+   case Iex_Get: {
+      HReg dst = newVRegF(env);
+      s390_amode *am = s390_amode_for_guest_state(expr->Iex.Get.offset);
+
+      addInstr(env, s390_insn_load(size, dst, am));
+
+      return dst;
+   }
+
+      /* --------- LITERAL --------- */
+
+      /* Load a literal into a register. Create a "load immediate"
+         v-insn and return the register. */
+   case Iex_Const: {
+      ULong value;
+      HReg  dst = newVRegF(env);
+      const IRConst *con = expr->Iex.Const.con;
+
+      /* Bitwise copy of the value. No sign/zero-extension */
+      switch (con->tag) {
+      case Ico_F32i: value = con->Ico.F32i; break;
+      case Ico_F64i: value = con->Ico.F64i; break;
+      default:       vpanic("s390_isel_float_expr: invalid constant");
+      }
+
+      if (value != 0) vpanic("cannot load immediate floating point constant");
+
+      addInstr(env, s390_insn_load_immediate(size, dst, value));
+
+      return dst;
+   }
+
+      /* --------- 4-ary OP --------- */
+   case Iex_Qop: {
+      HReg op1, op2, op3, dst;
+      s390_bfp_triop_t bfpop;
+
+      op3 = s390_isel_float_expr(env, expr->Iex.Qop.details->arg2);
+      op2 = s390_isel_float_expr(env, expr->Iex.Qop.details->arg3);
+      op1 = s390_isel_float_expr(env, expr->Iex.Qop.details->arg4);
+      dst = newVRegF(env);
+      addInstr(env, s390_insn_move(size, dst, op1));
+
+      switch (expr->Iex.Qop.details->op) {
+      case Iop_MAddF32:
+      case Iop_MAddF64:  bfpop = S390_BFP_MADD; break;
+      case Iop_MSubF32:
+      case Iop_MSubF64:  bfpop = S390_BFP_MSUB; break;
+
+      default:
+         goto irreducible;
+      }
+
+      set_bfp_rounding_mode_in_fpc(env, expr->Iex.Qop.details->arg1);
+      addInstr(env, s390_insn_bfp_triop(size, bfpop, dst, op2, op3));
+      return dst;
+   }
+
+      /* --------- TERNARY OP --------- */
+   case Iex_Triop: {
+      IRTriop *triop = expr->Iex.Triop.details;
+      IROp    op     = triop->op;
+      IRExpr *left   = triop->arg2;
+      IRExpr *right  = triop->arg3;
+      s390_bfp_binop_t bfpop;
+      HReg h1, op2, dst;
+
+      h1   = s390_isel_float_expr(env, left);  /* Process 1st operand */
+      op2  = s390_isel_float_expr(env, right); /* Process 2nd operand */
+      dst  = newVRegF(env);
+      addInstr(env, s390_insn_move(size, dst, h1));
+      switch (op) {
+      case Iop_AddF32:
+      case Iop_AddF64:  bfpop = S390_BFP_ADD; break;
+      case Iop_SubF32:
+      case Iop_SubF64:  bfpop = S390_BFP_SUB; break;
+      case Iop_MulF32:
+      case Iop_MulF64:  bfpop = S390_BFP_MUL; break;
+      case Iop_DivF32:
+      case Iop_DivF64:  bfpop = S390_BFP_DIV; break;
+
+      default:
+         goto irreducible;
+      }
+
+      set_bfp_rounding_mode_in_fpc(env, triop->arg1);
+      addInstr(env, s390_insn_bfp_binop(size, bfpop, dst, op2));
+      return dst;
+   }
+
+      /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+      IROp    op   = expr->Iex.Binop.op;
+      IRExpr *irrm = expr->Iex.Binop.arg1;
+      IRExpr *left = expr->Iex.Binop.arg2;
+      HReg h1, dst;
+      s390_bfp_conv_t  conv;
+      s390_fp_conv_t fpconv;
+
+      switch (op) {
+      case Iop_SqrtF32:
+      case Iop_SqrtF64:
+         h1  = s390_isel_float_expr(env, left);
+         dst = newVRegF(env);
+         set_bfp_rounding_mode_in_fpc(env, irrm);
+         addInstr(env, s390_insn_bfp_unop(size, S390_BFP_SQRT, dst, h1));
+         return dst;
+
+      case Iop_F64toF32:  conv = S390_BFP_F64_TO_F32; goto convert_float;
+      case Iop_I32StoF32: conv = S390_BFP_I32_TO_F32; goto convert_int;
+      case Iop_I32UtoF32: conv = S390_BFP_U32_TO_F32; goto convert_int;
+      case Iop_I64StoF32: conv = S390_BFP_I64_TO_F32; goto convert_int;
+      case Iop_I64StoF64: conv = S390_BFP_I64_TO_F64; goto convert_int;
+      case Iop_I64UtoF32: conv = S390_BFP_U64_TO_F32; goto convert_int;
+      case Iop_I64UtoF64: conv = S390_BFP_U64_TO_F64; goto convert_int;
+      case Iop_D32toF32:  fpconv = S390_FP_D32_TO_F32;  goto convert_dfp;
+      case Iop_D32toF64:  fpconv = S390_FP_D32_TO_F64;  goto convert_dfp;
+      case Iop_D64toF32:  fpconv = S390_FP_D64_TO_F32;  goto convert_dfp;
+      case Iop_D64toF64:  fpconv = S390_FP_D64_TO_F64;  goto convert_dfp;
+      case Iop_D128toF32: fpconv = S390_FP_D128_TO_F32; goto convert_dfp128;
+      case Iop_D128toF64: fpconv = S390_FP_D128_TO_F64; goto convert_dfp128;
+
+      convert_float:
+         h1 = s390_isel_float_expr(env, left);
+         goto convert;
+
+      convert_int:
+         h1 = s390_isel_int_expr(env, left);
+         goto convert;
+
+      convert: {
+         s390_bfp_round_t rounding_mode;
+         /* convert-from-fixed and load-rounded have a rounding mode field
+            when the floating point extension facility is installed. */
+         dst = newVRegF(env);
+         if (s390_host_has_fpext) {
+            rounding_mode = get_bfp_rounding_mode(env, irrm);
+         } else {
+            set_bfp_rounding_mode_in_fpc(env, irrm);
+            rounding_mode = S390_BFP_ROUND_PER_FPC;
+         }
+         addInstr(env, s390_insn_bfp_convert(size, conv, dst, h1,
+                                             rounding_mode));
+         return dst;
+      }
+
+      convert_dfp: {
+         s390_dfp_round_t rm;
+         HReg f0, f4, r1; /* real registers used by PFPO */
+
+         f4 = make_fpr(4); /* source */
+         f0 = make_fpr(0); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+         h1 = s390_isel_dfp_expr(env, left);
+         dst = newVRegF(env);
+         rm = get_dfp_rounding_mode(env, irrm);
+         /* operand --> f4 */
+         addInstr(env, s390_insn_move(8, f4, h1));
+         addInstr(env, s390_insn_fp_convert(size, fpconv, f0, f4, r1, rm));
+         /* f0 --> destination */
+         addInstr(env, s390_insn_move(8, dst, f0));
+         return dst;
+      }
+
+      convert_dfp128: {
+         s390_dfp_round_t rm;
+         HReg op_hi, op_lo;
+         HReg f0, f4, f6, r1; /* real registers used by PFPO */
+
+         f4 = make_fpr(4); /* source */
+         f6 = make_fpr(6); /* source */
+         f0 = make_fpr(0); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+         s390_isel_dfp128_expr(&op_hi, &op_lo, env, left);
+         dst = newVRegF(env);
+         rm = get_dfp_rounding_mode(env, irrm);
+         /* operand --> (f4, f6) */
+         addInstr(env, s390_insn_move(8, f4, op_hi));
+         addInstr(env, s390_insn_move(8, f6, op_lo));
+         addInstr(env, s390_insn_fp128_convert(16, fpconv, f0, INVALID_HREG,
+                                               f4, f6, r1, rm));
+         /* f0 --> destination */
+         addInstr(env, s390_insn_move(8, dst, f0));
+         return dst;
+      }
+
+      default:
+         goto irreducible;
+
+      case Iop_F128toF64:
+      case Iop_F128toF32: {
+         HReg op_hi, op_lo, f12, f13, f14, f15;
+         s390_bfp_round_t rounding_mode;
+
+         conv = op == Iop_F128toF32 ? S390_BFP_F128_TO_F32
+                                    : S390_BFP_F128_TO_F64;
+
+         s390_isel_float128_expr(&op_hi, &op_lo, env, left);
+
+         /* We use non-virtual registers as pairs (f13, f15) and (f12, f14)) */
+         f12 = make_fpr(12);
+         f13 = make_fpr(13);
+         f14 = make_fpr(14);
+         f15 = make_fpr(15);
+
+         /* operand --> (f13, f15) */
+         addInstr(env, s390_insn_move(8, f13, op_hi));
+         addInstr(env, s390_insn_move(8, f15, op_lo));
+
+         /* result --> (f12, f14) */
+
+         /* load-rounded has a rounding mode field when the floating point
+            extension facility is installed. */
+         if (s390_host_has_fpext) {
+            rounding_mode = get_bfp_rounding_mode(env, irrm);
+         } else {
+            set_bfp_rounding_mode_in_fpc(env, irrm);
+            rounding_mode = S390_BFP_ROUND_PER_FPC;
+         }
+
+         addInstr(env, s390_insn_bfp128_convert_from(size, conv, f12, f14,
+                                                     f13, f15, rounding_mode));
+         dst = newVRegF(env);
+         addInstr(env, s390_insn_move(8, dst, f12));
+
+         return dst;
+      }
+      }
+   }
+
+      /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+      IROp    op   = expr->Iex.Unop.op;
+      IRExpr *left = expr->Iex.Unop.arg;
+      s390_bfp_unop_t bfpop;
+      s390_bfp_conv_t conv;
+      HReg h1, dst;
+
+      if (op == Iop_F128HItoF64 || op == Iop_F128LOtoF64) {
+         HReg dst_hi, dst_lo;
+
+         s390_isel_float128_expr(&dst_hi, &dst_lo, env, left);
+         return op == Iop_F128LOtoF64 ? dst_lo : dst_hi;
+      }
+
+      if (op == Iop_ReinterpI64asF64 || op == Iop_ReinterpI32asF32) {
+         dst = newVRegF(env);
+         h1  = s390_isel_int_expr(env, left);     /* Process the operand */
+         addInstr(env, s390_insn_move(size, dst, h1));
+
+         return dst;
+      }
+
+      switch (op) {
+      case Iop_NegF32:
+      case Iop_NegF64:
+         if (left->tag == Iex_Unop &&
+             (left->Iex.Unop.op == Iop_AbsF32 ||
+              left->Iex.Unop.op == Iop_AbsF64))
+            bfpop = S390_BFP_NABS;
+         else
+            bfpop = S390_BFP_NEG;
+         break;
+
+      case Iop_AbsF32:
+      case Iop_AbsF64:
+         bfpop = S390_BFP_ABS;
+         break;
+
+      case Iop_I32StoF64:  conv = S390_BFP_I32_TO_F64;  goto convert_int1;
+      case Iop_I32UtoF64:  conv = S390_BFP_U32_TO_F64;  goto convert_int1;
+      case Iop_F32toF64:   conv = S390_BFP_F32_TO_F64;  goto convert_float1;
+
+      convert_float1:
+         h1 = s390_isel_float_expr(env, left);
+         goto convert1;
+
+      convert_int1:
+         h1 = s390_isel_int_expr(env, left);
+         goto convert1;
+
+      convert1:
+         dst = newVRegF(env);
+         /* No rounding mode is needed for these conversions. Just stick
+            one in. It won't be used later on. */
+         addInstr(env, s390_insn_bfp_convert(size, conv, dst, h1,
+                                             S390_BFP_ROUND_NEAREST_EVEN));
+         return dst;
+
+      default:
+         goto irreducible;
+      }
+
+      /* Process operand */
+      h1  = s390_isel_float_expr(env, left);
+      dst = newVRegF(env);
+      addInstr(env, s390_insn_bfp_unop(size, bfpop, dst, h1));
+      return dst;
+   }
+
+   default:
+      goto irreducible;
+   }
+
+   /* We get here if no pattern matched. */
+ irreducible:
+   ppIRExpr(expr);
+   vpanic("s390_isel_float_expr: cannot reduce tree");
+}
+
+
+static HReg
+s390_isel_float_expr(ISelEnv *env, IRExpr *expr)
+{
+   HReg dst = s390_isel_float_expr_wrk(env, expr);
+
+   /* Sanity checks ... */
+   vassert(hregClass(dst) == HRcFlt64);
+   vassert(hregIsVirtual(dst));
+
+   return dst;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Decimal point expressions (128 bit)         ---*/
+/*---------------------------------------------------------*/
+static void
+s390_isel_dfp128_expr_wrk(HReg *dst_hi, HReg *dst_lo, ISelEnv *env,
+                          IRExpr *expr)
+{
+   IRType ty = typeOfIRExpr(env->type_env, expr);
+
+   vassert(ty == Ity_D128);
+
+   switch (expr->tag) {
+   case Iex_RdTmp:
+      /* Return the virtual registers that hold the temporary. */
+      lookupIRTemp128(dst_hi, dst_lo, env, expr->Iex.RdTmp.tmp);
+      return;
+
+      /* --------- LOAD --------- */
+   case Iex_Load: {
+      IRExpr *addr_hi, *addr_lo;
+      s390_amode *am_hi, *am_lo;
+
+      if (expr->Iex.Load.end != Iend_BE)
+         goto irreducible;
+
+      addr_hi = expr->Iex.Load.addr;
+      addr_lo = IRExpr_Binop(Iop_Add64, addr_hi, mkU64(8));
+
+      am_hi  = s390_isel_amode(env, addr_hi);
+      am_lo  = s390_isel_amode(env, addr_lo);
+
+      *dst_hi = newVRegF(env);
+      *dst_lo = newVRegF(env);
+      addInstr(env, s390_insn_load(8, *dst_hi, am_hi));
+      addInstr(env, s390_insn_load(8, *dst_hi, am_lo));
+      return;
+   }
+
+      /* --------- GET --------- */
+   case Iex_Get:
+      /* This is not supported because loading 128-bit from the guest
+         state is almost certainly wrong. Use get_dpr_pair instead. */
+      vpanic("Iex_Get with D128 data");
+
+      /* --------- 4-ary OP --------- */
+   case Iex_Qop:
+      vpanic("Iex_Qop with D128 data");
+
+      /* --------- TERNARY OP --------- */
+   case Iex_Triop: {
+      IRTriop *triop = expr->Iex.Triop.details;
+      IROp    op     = triop->op;
+      IRExpr *irrm   = triop->arg1;
+      IRExpr *left   = triop->arg2;
+      IRExpr *right  = triop->arg3;
+      s390_dfp_round_t rounding_mode;
+      s390_dfp_binop_t dfpop;
+      HReg op1_hi, op1_lo, op2_hi, op2_lo, f9, f11, f12, f13, f14, f15;
+
+      /* We use non-virtual registers as pairs with (f9, f11) as op1,
+         (f12, f14) as op2 and (f13, f15)  as destination) */
+      f9  = make_fpr(9);
+      f11 = make_fpr(11);
+      f12 = make_fpr(12);
+      f13 = make_fpr(13);
+      f14 = make_fpr(14);
+      f15 = make_fpr(15);
+
+      switch (op) {
+      case Iop_AddD128:       dfpop = S390_DFP_ADD;      goto evaluate_dfp128;
+      case Iop_SubD128:       dfpop = S390_DFP_SUB;      goto evaluate_dfp128;
+      case Iop_MulD128:       dfpop = S390_DFP_MUL;      goto evaluate_dfp128;
+      case Iop_DivD128:       dfpop = S390_DFP_DIV;      goto evaluate_dfp128;
+      case Iop_QuantizeD128:  dfpop = S390_DFP_QUANTIZE; goto evaluate_dfp128;
+
+      evaluate_dfp128: {
+         /* Process 1st operand */
+         s390_isel_dfp128_expr(&op1_hi, &op1_lo, env, left);
+         /* 1st operand --> (f9, f11) */
+         addInstr(env, s390_insn_move(8, f9,  op1_hi));
+         addInstr(env, s390_insn_move(8, f11, op1_lo));
+
+         /* Process 2nd operand */
+         s390_isel_dfp128_expr(&op2_hi, &op2_lo, env, right);
+         /* 2nd operand --> (f12, f14) */
+         addInstr(env, s390_insn_move(8, f12, op2_hi));
+         addInstr(env, s390_insn_move(8, f14, op2_lo));
+
+         /* DFP arithmetic ops take rounding mode only when fpext is
+            installed. But, DFP quantize operation takes rm irrespective
+            of fpext facility . */
+         if (s390_host_has_fpext || op == Iop_QuantizeD128) {
+            rounding_mode = get_dfp_rounding_mode(env, irrm);
+         } else {
+            set_dfp_rounding_mode_in_fpc(env, irrm);
+            rounding_mode = S390_DFP_ROUND_PER_FPC_0;
+         }
+         addInstr(env, s390_insn_dfp128_binop(16, dfpop, f13, f15, f9, f11,
+                                              f12, f14, rounding_mode));
+         /* Move result to virtual destination register */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f13));
+         addInstr(env, s390_insn_move(8, *dst_lo, f15));
+         return;
+      }
+
+      case Iop_SignificanceRoundD128: {
+         /* Process 1st operand */
+         HReg op1 = s390_isel_int_expr(env, left);
+         /* Process 2nd operand */
+         s390_isel_dfp128_expr(&op2_hi, &op2_lo, env, right);
+         /* 2nd operand --> (f12, f14) */
+         addInstr(env, s390_insn_move(8, f12, op2_hi));
+         addInstr(env, s390_insn_move(8, f14, op2_lo));
+
+         rounding_mode = get_dfp_rounding_mode(env, irrm);
+         addInstr(env, s390_insn_dfp128_reround(16, f13, f15, op1, f12, f14,
+                                                rounding_mode));
+         /* Move result to virtual destination register */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f13));
+         addInstr(env, s390_insn_move(8, *dst_lo, f15));
+         return;
+      }
+
+      default:
+         goto irreducible;
+      }
+   }
+
+      /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+
+      switch (expr->Iex.Binop.op) {
+      case Iop_D64HLtoD128:
+         *dst_hi = s390_isel_dfp_expr(env, expr->Iex.Binop.arg1);
+         *dst_lo = s390_isel_dfp_expr(env, expr->Iex.Binop.arg2);
+         return;
+
+      case Iop_ShlD128:
+      case Iop_ShrD128:
+      case Iop_InsertExpD128: {
+         HReg op1_hi, op1_lo, op2, f9, f11, f13, f15;
+         s390_dfp_intop_t intop;
+         IRExpr *dfp_op;
+         IRExpr *int_op;
+
+         switch (expr->Iex.Binop.op) {
+         case Iop_ShlD128:       /* (D128, I64) -> D128 */
+            intop = S390_DFP_SHIFT_LEFT;
+            dfp_op = expr->Iex.Binop.arg1;
+            int_op = expr->Iex.Binop.arg2;
+            break;
+         case Iop_ShrD128:       /* (D128, I64) -> D128 */
+            intop = S390_DFP_SHIFT_RIGHT;
+            dfp_op = expr->Iex.Binop.arg1;
+            int_op = expr->Iex.Binop.arg2;
+            break;
+         case Iop_InsertExpD128: /* (I64, D128) -> D128 */
+            intop = S390_DFP_INSERT_EXP;
+            int_op = expr->Iex.Binop.arg1;
+            dfp_op = expr->Iex.Binop.arg2;
+            break;
+         default: goto irreducible;
+         }
+
+         /* We use non-virtual registers as pairs (f9, f11) and (f13, f15)) */
+         f9  = make_fpr(9); /* 128 bit dfp operand */
+         f11 = make_fpr(11);
+
+         f13 = make_fpr(13); /* 128 bit dfp destination */
+         f15 = make_fpr(15);
+
+         /* Process dfp operand */
+         s390_isel_dfp128_expr(&op1_hi, &op1_lo, env, dfp_op);
+         /* op1 -> (f9,f11) */
+         addInstr(env, s390_insn_move(8, f9,  op1_hi));
+         addInstr(env, s390_insn_move(8, f11, op1_lo));
+
+         op2 = s390_isel_int_expr(env, int_op);  /* int operand */
+
+         addInstr(env,
+                  s390_insn_dfp128_intop(16, intop, f13, f15, op2, f9, f11));
+
+         /* Move result to virtual destination register */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f13));
+         addInstr(env, s390_insn_move(8, *dst_lo, f15));
+         return;
+      }
+
+      case Iop_F32toD128:
+      case Iop_F64toD128: {
+         IRExpr *irrm;
+         IRExpr *left;
+         s390_dfp_round_t rm;
+         HReg h1; /* virtual reg. to hold source */
+         HReg f0, f2, f4, r1; /* real registers used by PFPO */
+         s390_fp_conv_t fpconv;
+
+         switch (expr->Iex.Binop.op) {
+         case Iop_F32toD128:       /* (D128, I64) -> D128 */
+            fpconv = S390_FP_F32_TO_D128;
+            break;
+         case Iop_F64toD128:       /* (D128, I64) -> D128 */
+            fpconv = S390_FP_F64_TO_D128;
+            break;
+         default: goto irreducible;
+         }
+
+         f4 = make_fpr(4); /* source */
+         f0 = make_fpr(0); /* destination */
+         f2 = make_fpr(2); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+         irrm = expr->Iex.Binop.arg1;
+         left = expr->Iex.Binop.arg2;
+         rm = get_dfp_rounding_mode(env, irrm);
+         h1 = s390_isel_float_expr(env, left);
+         addInstr(env, s390_insn_move(8, f4, h1));
+         addInstr(env, s390_insn_fp128_convert(16, fpconv, f0, f2,
+                                               f4, INVALID_HREG, r1, rm));
+         /* (f0, f2) --> destination */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f0));
+         addInstr(env, s390_insn_move(8, *dst_lo, f2));
+
+         return;
+      }
+
+      case Iop_F128toD128: {
+         IRExpr *irrm;
+         IRExpr *left;
+         s390_dfp_round_t rm;
+         HReg op_hi, op_lo;
+         HReg f0, f2, f4, f6, r1; /* real registers used by PFPO */
+
+         f4 = make_fpr(4); /* source */
+         f6 = make_fpr(6); /* source */
+         f0 = make_fpr(0); /* destination */
+         f2 = make_fpr(2); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+
+         irrm = expr->Iex.Binop.arg1;
+         left = expr->Iex.Binop.arg2;
+         rm = get_dfp_rounding_mode(env, irrm);
+         s390_isel_float128_expr(&op_hi, &op_lo, env, left);
+         /* operand --> (f4, f6) */
+         addInstr(env, s390_insn_move(8, f4, op_hi));
+         addInstr(env, s390_insn_move(8, f6, op_lo));
+         addInstr(env, s390_insn_fp128_convert(16, S390_FP_F128_TO_D128, f0, f2,
+                                               f4, f6, r1, rm));
+         /* (f0, f2) --> destination */
+         *dst_hi = newVRegF(env);
+         *dst_lo = newVRegF(env);
+         addInstr(env, s390_insn_move(8, *dst_hi, f0));
+         addInstr(env, s390_insn_move(8, *dst_lo, f2));
+
+         return;
+      }
+
+      default:
+         goto irreducible;
+      }
+   }
+
+      /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+      IRExpr *left = expr->Iex.Unop.arg;
+      s390_dfp_conv_t conv;
+      HReg op, f12, f14;
+
+      /* We use non-virtual registers as pairs (f12, f14)) */
+      f12 = make_fpr(12);
+      f14 = make_fpr(14);
+
+      switch (expr->Iex.Unop.op) {
+      case Iop_D64toD128:   conv = S390_DFP_D64_TO_D128;  goto convert_dfp;
+      case Iop_I32StoD128:  conv = S390_DFP_I32_TO_D128;  goto convert_int;
+      case Iop_I64StoD128:  conv = S390_DFP_I64_TO_D128;  goto convert_int;
+      case Iop_I32UtoD128:  conv = S390_DFP_U32_TO_D128;  goto convert_int;
+      case Iop_I64UtoD128:  conv = S390_DFP_U64_TO_D128;  goto convert_int;
+      default:
+         goto irreducible;
+      }
+
+   convert_dfp:
+      op  = s390_isel_dfp_expr(env, left);
+      addInstr(env, s390_insn_dfp128_convert_to(16, conv, f12, f14, op));
+      goto move_dst;
+
+   convert_int:
+      op  = s390_isel_int_expr(env, left);
+      addInstr(env, s390_insn_dfp128_convert_to(16, conv, f12, f14, op));
+      goto move_dst;
+
+   move_dst:
+      /* Move result to virtual destination registers */
+      *dst_hi = newVRegF(env);
+      *dst_lo = newVRegF(env);
+      addInstr(env, s390_insn_move(8, *dst_hi, f12));
+      addInstr(env, s390_insn_move(8, *dst_lo, f14));
+      return;
+   }
+
+   default:
+      goto irreducible;
+   }
+
+   /* We get here if no pattern matched. */
+ irreducible:
+   ppIRExpr(expr);
+   vpanic("s390_isel_dfp128_expr_wrk: cannot reduce tree");
+
+}
+
+
+/* Compute a 128-bit value into two 64-bit registers. These may be either
+   real or virtual regs; in any case they must not be changed by subsequent
+   code emitted by the caller. */
+static void
+s390_isel_dfp128_expr(HReg *dst_hi, HReg *dst_lo, ISelEnv *env, IRExpr *expr)
+{
+   s390_isel_dfp128_expr_wrk(dst_hi, dst_lo, env, expr);
+
+   /* Sanity checks ... */
+   vassert(hregIsVirtual(*dst_hi));
+   vassert(hregIsVirtual(*dst_lo));
+   vassert(hregClass(*dst_hi) == HRcFlt64);
+   vassert(hregClass(*dst_lo) == HRcFlt64);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Decimal point expressions (64 bit)          ---*/
+/*---------------------------------------------------------*/
+
+static HReg
+s390_isel_dfp_expr_wrk(ISelEnv *env, IRExpr *expr)
+{
+   IRType ty = typeOfIRExpr(env->type_env, expr);
+   UChar size;
+
+   vassert(ty == Ity_D64 || ty == Ity_D32);
+
+   size = sizeofIRType(ty);
+
+   switch (expr->tag) {
+   case Iex_RdTmp:
+      /* Return the virtual register that holds the temporary. */
+      return lookupIRTemp(env, expr->Iex.RdTmp.tmp);
+
+      /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg        dst = newVRegF(env);
+      s390_amode *am  = s390_isel_amode(env, expr->Iex.Load.addr);
+
+      if (expr->Iex.Load.end != Iend_BE)
+         goto irreducible;
+
+      addInstr(env, s390_insn_load(size, dst, am));
+
+      return dst;
+   }
+
+      /* --------- GET --------- */
+   case Iex_Get: {
+      HReg dst = newVRegF(env);
+      s390_amode *am = s390_amode_for_guest_state(expr->Iex.Get.offset);
+
+      addInstr(env, s390_insn_load(size, dst, am));
+
+      return dst;
+   }
+
+      /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+      IROp    op   = expr->Iex.Binop.op;
+      IRExpr *irrm = expr->Iex.Binop.arg1;
+      IRExpr *left = expr->Iex.Binop.arg2;
+      HReg h1, dst;
+      s390_dfp_conv_t  conv;
+      s390_fp_conv_t  fpconv;
+
+      switch (op) {
+      case Iop_D64toD32:  conv = S390_DFP_D64_TO_D32; goto convert_dfp;
+      case Iop_I64StoD64: conv = S390_DFP_I64_TO_D64; goto convert_int;
+      case Iop_I64UtoD64: conv = S390_DFP_U64_TO_D64; goto convert_int;
+      case Iop_F32toD32:  fpconv = S390_FP_F32_TO_D32; goto convert_bfp;
+      case Iop_F32toD64:  fpconv = S390_FP_F32_TO_D64; goto convert_bfp;
+      case Iop_F64toD32:  fpconv = S390_FP_F64_TO_D32; goto convert_bfp;
+      case Iop_F64toD64:  fpconv = S390_FP_F64_TO_D64; goto convert_bfp;
+      case Iop_F128toD32: fpconv = S390_FP_F128_TO_D32; goto convert_bfp128;
+      case Iop_F128toD64: fpconv = S390_FP_F128_TO_D64; goto convert_bfp128;
+
+      convert_dfp:
+         h1 = s390_isel_dfp_expr(env, left);
+         goto convert;
+
+      convert_int:
+         h1 = s390_isel_int_expr(env, left);
+         goto convert;
+
+      convert: {
+            s390_dfp_round_t rounding_mode;
+            /* convert-from-fixed and load-rounded have a rounding mode field
+               when the floating point extension facility is installed. */
+            dst = newVRegF(env);
+            if (s390_host_has_fpext) {
+               rounding_mode = get_dfp_rounding_mode(env, irrm);
+            } else {
+               set_dfp_rounding_mode_in_fpc(env, irrm);
+               rounding_mode = S390_DFP_ROUND_PER_FPC_0;
+            }
+            addInstr(env, s390_insn_dfp_convert(size, conv, dst, h1,
+                                                rounding_mode));
+            return dst;
+         }
+
+      convert_bfp: {
+         s390_dfp_round_t rm;
+         HReg f0, f4, r1; /* real registers used by PFPO */
+
+         f4 = make_fpr(4); /* source */
+         f0 = make_fpr(0); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+         h1 = s390_isel_float_expr(env, left);
+         dst = newVRegF(env);
+         rm = get_dfp_rounding_mode(env, irrm);
+         /* operand --> f4 */
+         addInstr(env, s390_insn_move(8, f4, h1));
+         addInstr(env, s390_insn_fp_convert(size, fpconv, f0, f4, r1, rm));
+         /* f0 --> destination */
+         addInstr(env, s390_insn_move(8, dst, f0));
+         return dst;
+      }
+
+      convert_bfp128: {
+         s390_dfp_round_t rm;
+         HReg op_hi, op_lo;
+         HReg f0, f4, f6, r1; /* real registers used by PFPO */
+
+         f4 = make_fpr(4); /* source */
+         f6 = make_fpr(6); /* source */
+         f0 = make_fpr(0); /* destination */
+         r1 = make_gpr(1); /* GPR #1 clobbered */
+         s390_isel_float128_expr(&op_hi, &op_lo, env, left);
+         dst = newVRegF(env);
+         rm = get_dfp_rounding_mode(env, irrm);
+         /* operand --> (f4, f6) */
+         addInstr(env, s390_insn_move(8, f4, op_hi));
+         addInstr(env, s390_insn_move(8, f6, op_lo));
+         addInstr(env, s390_insn_fp128_convert(16, fpconv, f0, INVALID_HREG,
+                                               f4, f6, r1, rm));
+         /* f0 --> destination */
+         addInstr(env, s390_insn_move(8, dst, f0));
+         return dst;
+      }
+
+      case Iop_D128toD64: {
+         HReg op_hi, op_lo, f12, f13, f14, f15;
+         s390_dfp_round_t rounding_mode;
+
+         conv = S390_DFP_D128_TO_D64;
+
+         s390_isel_dfp128_expr(&op_hi, &op_lo, env, left);
+
+         /* We use non-virtual registers as pairs (f13, f15) and (f12, f14) */
+         f12 = make_fpr(12);
+         f13 = make_fpr(13);
+         f14 = make_fpr(14);
+         f15 = make_fpr(15);
+
+         /* operand --> (f13, f15) */
+         addInstr(env, s390_insn_move(8, f13, op_hi));
+         addInstr(env, s390_insn_move(8, f15, op_lo));
+
+         /* result --> (f12, f14) */
+ 
+         /* load-rounded has a rounding mode field when the floating point
+            extension facility is installed. */
+         if (s390_host_has_fpext) {
+            rounding_mode = get_dfp_rounding_mode(env, irrm);
+         } else {
+            set_dfp_rounding_mode_in_fpc(env, irrm);
+            rounding_mode = S390_DFP_ROUND_PER_FPC_0;
+         }
+         addInstr(env, s390_insn_dfp128_convert_from(size, conv, f12, f14,
+                                                     f13, f15, rounding_mode));
+         dst = newVRegF(env);
+         addInstr(env, s390_insn_move(8, dst, f12));
+
+         return dst;
+      }
+
+      case Iop_ShlD64:
+      case Iop_ShrD64:
+      case Iop_InsertExpD64: {
+         HReg op2;
+         HReg op3;
+         IRExpr *dfp_op;
+         IRExpr *int_op;
+         s390_dfp_intop_t intop;
+
+         switch (expr->Iex.Binop.op) {
+         case Iop_ShlD64:       /* (D64, I64) -> D64 */
+            intop = S390_DFP_SHIFT_LEFT;
+            dfp_op = expr->Iex.Binop.arg1;
+            int_op = expr->Iex.Binop.arg2;
+            break;
+         case Iop_ShrD64:       /* (D64, I64) -> D64 */
+            intop = S390_DFP_SHIFT_RIGHT;
+            dfp_op = expr->Iex.Binop.arg1;
+            int_op = expr->Iex.Binop.arg2;
+            break;
+         case Iop_InsertExpD64: /* (I64, D64) -> D64 */
+            intop = S390_DFP_INSERT_EXP;
+            int_op = expr->Iex.Binop.arg1;
+            dfp_op = expr->Iex.Binop.arg2;
+            break;
+         default: goto irreducible;
+         }
+
+         op2 = s390_isel_int_expr(env, int_op);
+         op3 = s390_isel_dfp_expr(env, dfp_op);
+         dst = newVRegF(env);
+
+         addInstr(env, s390_insn_dfp_intop(size, intop, dst, op2, op3));
+         return dst;
+      }
+
+      default:
+         goto irreducible;
+      }
+   }
+
+      /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+      IROp    op   = expr->Iex.Unop.op;
+      IRExpr *left = expr->Iex.Unop.arg;
+      s390_dfp_conv_t conv;
+      HReg h1, dst;
+
+      if (op == Iop_D128HItoD64 || op == Iop_D128LOtoD64) {
+         HReg dst_hi, dst_lo;
+
+         s390_isel_dfp128_expr(&dst_hi, &dst_lo, env, left);
+         return op == Iop_D128LOtoD64 ? dst_lo : dst_hi;
+      }
+
+      if (op == Iop_ReinterpI64asD64) {
+         dst = newVRegF(env);
+         h1  = s390_isel_int_expr(env, left);     /* Process the operand */
+         addInstr(env, s390_insn_move(size, dst, h1));
+
+         return dst;
+      }
+
+      switch (op) {
+      case Iop_D32toD64:  conv = S390_DFP_D32_TO_D64;  goto convert_dfp1;
+      case Iop_I32StoD64: conv = S390_DFP_I32_TO_D64;  goto convert_int1;
+      case Iop_I32UtoD64: conv = S390_DFP_U32_TO_D64;  goto convert_int1;
+
+      convert_dfp1:
+         h1 = s390_isel_dfp_expr(env, left);
+         goto convert1;
+
+      convert_int1:
+         h1 = s390_isel_int_expr(env, left);
+         goto convert1;
+
+      convert1:
+         dst = newVRegF(env);
+         /* No rounding mode is needed for these conversions. Just stick
+            one in. It won't be used later on. */
+         addInstr(env, s390_insn_dfp_convert(size, conv, dst, h1,
+                                             S390_DFP_ROUND_NEAREST_EVEN_4));
+         return dst;
+
+      default:
+         goto irreducible;
+      }
+   }
+
+      /* --------- TERNARY OP --------- */
+   case Iex_Triop: {
+      IRTriop *triop = expr->Iex.Triop.details;
+      IROp    op     = triop->op;
+      IRExpr *irrm   = triop->arg1;
+      IRExpr *left   = triop->arg2;
+      IRExpr *right  = triop->arg3;
+      s390_dfp_round_t rounding_mode;
+      s390_dfp_binop_t dfpop;
+      HReg op2, op3, dst;
+
+      switch (op) {
+      case Iop_AddD64:      dfpop = S390_DFP_ADD;      goto evaluate_dfp;
+      case Iop_SubD64:      dfpop = S390_DFP_SUB;      goto evaluate_dfp;
+      case Iop_MulD64:      dfpop = S390_DFP_MUL;      goto evaluate_dfp;
+      case Iop_DivD64:      dfpop = S390_DFP_DIV;      goto evaluate_dfp;
+      case Iop_QuantizeD64: dfpop = S390_DFP_QUANTIZE; goto evaluate_dfp;
+
+      evaluate_dfp: {
+         op2  = s390_isel_dfp_expr(env, left);  /* Process 1st operand */
+         op3  = s390_isel_dfp_expr(env, right); /* Process 2nd operand */
+         dst  = newVRegF(env);
+         /* DFP arithmetic ops take rounding mode only when fpext is
+            installed. But, DFP quantize operation takes rm irrespective
+            of fpext facility . */
+         if (s390_host_has_fpext || dfpop == S390_DFP_QUANTIZE) {
+            rounding_mode = get_dfp_rounding_mode(env, irrm);
+         } else {
+            set_dfp_rounding_mode_in_fpc(env, irrm);
+            rounding_mode = S390_DFP_ROUND_PER_FPC_0;
+         }
+         addInstr(env, s390_insn_dfp_binop(size, dfpop, dst, op2, op3,
+                                           rounding_mode));
+         return dst;
+      }
+
+      case Iop_SignificanceRoundD64:
+         op2  = s390_isel_int_expr(env, left);  /* Process 1st operand */
+         op3  = s390_isel_dfp_expr(env, right); /* Process 2nd operand */
+         dst  = newVRegF(env);
+         rounding_mode = get_dfp_rounding_mode(env, irrm);
+         addInstr(env, s390_insn_dfp_reround(size, dst, op2, op3,
+                                             rounding_mode));
+         return dst;
+
+      default:
+         goto irreducible;
+      }
+   }
+
+   default:
+      goto irreducible;
+   }
+
+   /* We get here if no pattern matched. */
+ irreducible:
+   ppIRExpr(expr);
+   vpanic("s390_isel_dfp_expr: cannot reduce tree");
+}
+
+static HReg
+s390_isel_dfp_expr(ISelEnv *env, IRExpr *expr)
+{
+   HReg dst = s390_isel_dfp_expr_wrk(env, expr);
+
+   /* Sanity checks ... */
+   vassert(hregClass(dst) == HRcFlt64);
+   vassert(hregIsVirtual(dst));
+
+   return dst;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Condition Code                              ---*/
+/*---------------------------------------------------------*/
+
+/* This function handles all operators that produce a 1-bit result */
+static s390_cc_t
+s390_isel_cc(ISelEnv *env, IRExpr *cond)
+{
+   UChar size;
+
+   vassert(typeOfIRExpr(env->type_env, cond) == Ity_I1);
+
+   /* Constant: either 1 or 0 */
+   if (cond->tag == Iex_Const) {
+      vassert(cond->Iex.Const.con->tag == Ico_U1);
+      vassert(cond->Iex.Const.con->Ico.U1 == True
+              || cond->Iex.Const.con->Ico.U1 == False);
+
+      return cond->Iex.Const.con->Ico.U1 == True ? S390_CC_ALWAYS : S390_CC_NEVER;
+   }
+
+   /* Variable: values are 1 or 0 */
+   if (cond->tag == Iex_RdTmp) {
+      IRTemp tmp = cond->Iex.RdTmp.tmp;
+      HReg   reg = lookupIRTemp(env, tmp);
+
+      /* Load-and-test does not modify REG; so this is OK. */
+      if (typeOfIRTemp(env->type_env, tmp) == Ity_I1)
+         size = 4;
+      else
+         size = sizeofIRType(typeOfIRTemp(env->type_env, tmp));
+      addInstr(env, s390_insn_test(size, s390_opnd_reg(reg)));
+      return S390_CC_NE;
+   }
+
+   /* Unary operators */
+   if (cond->tag == Iex_Unop) {
+      IRExpr *arg = cond->Iex.Unop.arg;
+
+      switch (cond->Iex.Unop.op) {
+      case Iop_Not1:  /* Not1(cond) */
+         /* Generate code for EXPR, and negate the test condition */
+         return s390_cc_invert(s390_isel_cc(env, arg));
+
+         /* Iop_32/64to1  select the LSB from their operand */
+      case Iop_32to1:
+      case Iop_64to1: {
+         HReg dst = newVRegI(env);
+         HReg h1  = s390_isel_int_expr(env, arg);
+
+         size = sizeofIRType(typeOfIRExpr(env->type_env, arg));
+
+         addInstr(env, s390_insn_move(size, dst, h1));
+         addInstr(env, s390_insn_alu(size, S390_ALU_AND, dst, s390_opnd_imm(1)));
+         addInstr(env, s390_insn_test(size, s390_opnd_reg(dst)));
+         return S390_CC_NE;
+      }
+
+      case Iop_CmpNEZ8:
+      case Iop_CmpNEZ16: {
+         s390_opnd_RMI src;
+         s390_unop_t   op;
+         HReg dst;
+
+         op  = (cond->Iex.Unop.op == Iop_CmpNEZ8) ? S390_ZERO_EXTEND_8
+            : S390_ZERO_EXTEND_16;
+         dst = newVRegI(env);
+         src = s390_isel_int_expr_RMI(env, arg);
+         addInstr(env, s390_insn_unop(4, op, dst, src));
+         addInstr(env, s390_insn_test(4, s390_opnd_reg(dst)));
+         return S390_CC_NE;
+      }
+
+      case Iop_CmpNEZ32:
+      case Iop_CmpNEZ64: {
+         s390_opnd_RMI src;
+
+         src = s390_isel_int_expr_RMI(env, arg);
+         size = sizeofIRType(typeOfIRExpr(env->type_env, arg));
+         addInstr(env, s390_insn_test(size, src));
+         return S390_CC_NE;
+      }
+
+      default:
+         goto fail;
+      }
+   }
+
+   /* Binary operators */
+   if (cond->tag == Iex_Binop) {
+      IRExpr *arg1 = cond->Iex.Binop.arg1;
+      IRExpr *arg2 = cond->Iex.Binop.arg2;
+      HReg reg1, reg2;
+
+      size = sizeofIRType(typeOfIRExpr(env->type_env, arg1));
+
+      switch (cond->Iex.Binop.op) {
+         s390_unop_t op;
+         s390_cc_t   result;
+
+      case Iop_CmpEQ8:
+      case Iop_CasCmpEQ8:
+         op     = S390_ZERO_EXTEND_8;
+         result = S390_CC_E;
+         goto do_compare_ze;
+
+      case Iop_CmpNE8:
+      case Iop_CasCmpNE8:
+         op     = S390_ZERO_EXTEND_8;
+         result = S390_CC_NE;
+         goto do_compare_ze;
+
+      case Iop_CmpEQ16:
+      case Iop_CasCmpEQ16:
+         op     = S390_ZERO_EXTEND_16;
+         result = S390_CC_E;
+         goto do_compare_ze;
+
+      case Iop_CmpNE16:
+      case Iop_CasCmpNE16:
+         op     = S390_ZERO_EXTEND_16;
+         result = S390_CC_NE;
+         goto do_compare_ze;
+
+      do_compare_ze: {
+            s390_opnd_RMI op1, op2;
+
+            op1  = s390_isel_int_expr_RMI(env, arg1);
+            reg1 = newVRegI(env);
+            addInstr(env, s390_insn_unop(4, op, reg1, op1));
+
+            op2  = s390_isel_int_expr_RMI(env, arg2);
+            reg2 = newVRegI(env);
+            addInstr(env, s390_insn_unop(4, op, reg2, op2));  /* zero extend */
+
+            op2 = s390_opnd_reg(reg2);
+            addInstr(env, s390_insn_compare(4, reg1, op2, False));
+
+            return result;
+         }
+
+      case Iop_CmpEQ32:
+      case Iop_CmpEQ64:
+      case Iop_CasCmpEQ32:
+      case Iop_CasCmpEQ64:
+         result = S390_CC_E;
+         goto do_compare;
+
+      case Iop_CmpNE32:
+      case Iop_CmpNE64:
+      case Iop_CasCmpNE32:
+      case Iop_CasCmpNE64:
+         result = S390_CC_NE;
+         goto do_compare;
+
+      do_compare: {
+            HReg op1;
+            s390_opnd_RMI op2;
+
+            order_commutative_operands(arg1, arg2);
+
+            op1 = s390_isel_int_expr(env, arg1);
+            op2 = s390_isel_int_expr_RMI(env, arg2);
+
+            addInstr(env, s390_insn_compare(size, op1, op2, False));
+
+            return result;
+         }
+
+      case Iop_CmpLT32S:
+      case Iop_CmpLE32S:
+      case Iop_CmpLT64S:
+      case Iop_CmpLE64S: {
+         HReg op1;
+         s390_opnd_RMI op2;
+
+         op1 = s390_isel_int_expr(env, arg1);
+         op2 = s390_isel_int_expr_RMI(env, arg2);
+
+         addInstr(env, s390_insn_compare(size, op1, op2, True));
+
+         return (cond->Iex.Binop.op == Iop_CmpLT32S ||
+                 cond->Iex.Binop.op == Iop_CmpLT64S) ? S390_CC_L : S390_CC_LE;
+      }
+
+      case Iop_CmpLT32U:
+      case Iop_CmpLE32U:
+      case Iop_CmpLT64U:
+      case Iop_CmpLE64U: {
+         HReg op1;
+         s390_opnd_RMI op2;
+
+         op1 = s390_isel_int_expr(env, arg1);
+         op2 = s390_isel_int_expr_RMI(env, arg2);
+
+         addInstr(env, s390_insn_compare(size, op1, op2, False));
+
+         return (cond->Iex.Binop.op == Iop_CmpLT32U ||
+                 cond->Iex.Binop.op == Iop_CmpLT64U) ? S390_CC_L : S390_CC_LE;
+      }
+
+      default:
+         goto fail;
+      }
+   }
+
+ fail:
+   ppIRExpr(cond);
+   vpanic("s390_isel_cc: unexpected operator");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void
+s390_isel_stmt(ISelEnv *env, IRStmt *stmt)
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n -- ");
+      ppIRStmt(stmt);
+      vex_printf("\n");
+   }
+
+   switch (stmt->tag) {
+
+      /* --------- STORE --------- */
+   case Ist_Store: {
+      IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+      s390_amode *am;
+      HReg src;
+
+      if (stmt->Ist.Store.end != Iend_BE) goto stmt_fail;
+
+      am = s390_isel_amode(env, stmt->Ist.Store.addr);
+
+      switch (tyd) {
+      case Ity_I8:
+      case Ity_I16:
+      case Ity_I32:
+      case Ity_I64:
+         /* fixs390: We could check for INSN_MADD here. */
+         if (am->tag == S390_AMODE_B12 &&
+             stmt->Ist.Store.data->tag == Iex_Const) {
+            ULong value =
+               get_const_value_as_ulong(stmt->Ist.Store.data->Iex.Const.con);
+            addInstr(env, s390_insn_mimm(sizeofIRType(tyd), am, value));
+            return;
+         }
+         /* Check whether we can use a memcpy here. Currently, the restriction
+            is that both amodes need to be B12, so MVC can be emitted.
+            We do not consider a store whose data expression is a load because
+            we don't want to deal with overlapping locations. */
+         /* store(get) never overlaps*/
+         if (am->tag == S390_AMODE_B12 &&
+             stmt->Ist.Store.data->tag == Iex_Get) {
+            UInt offset = stmt->Ist.Store.data->Iex.Get.offset;
+            s390_amode *from = s390_amode_for_guest_state(offset);
+            addInstr(env, s390_insn_memcpy(sizeofIRType(tyd), am, from));
+            return;
+         }
+         /* General case: compile data into a register */
+         src = s390_isel_int_expr(env, stmt->Ist.Store.data);
+         break;
+
+      case Ity_F32:
+      case Ity_F64:
+         src = s390_isel_float_expr(env, stmt->Ist.Store.data);
+         break;
+
+      case Ity_D32:
+      case Ity_D64:
+         src = s390_isel_dfp_expr(env, stmt->Ist.Store.data);
+         break;
+
+      case Ity_F128:
+      case Ity_D128:
+         /* Cannot occur. No such instruction */
+         vpanic("Ist_Store with 128-bit floating point data");
+
+      default:
+         goto stmt_fail;
+      }
+
+      addInstr(env, s390_insn_store(sizeofIRType(tyd), am, src));
+      return;
+   }
+
+      /* --------- PUT --------- */
+   case Ist_Put: {
+      IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+      HReg src;
+      s390_amode *am;
+      ULong new_value, old_value, difference;
+
+      /* Detect updates to certain guest registers. We track the contents
+         of those registers as long as they contain constants. If the new
+         constant is either zero or in the 8-bit neighbourhood of the
+         current value we can use a memory-to-memory insn to do the update. */
+
+      Int offset = stmt->Ist.Put.offset;
+
+      /* Check necessary conditions:
+         (1) must be one of the registers we care about
+         (2) assigned value must be a constant */
+      Int guest_reg = get_guest_reg(offset);
+
+      if (guest_reg == GUEST_UNKNOWN) goto not_special;
+
+      if (stmt->Ist.Put.data->tag != Iex_Const) {
+         /* Invalidate guest register contents */
+         env->old_value_valid[guest_reg] = False;
+         goto not_special;
+      }
+
+      /* We can only handle Ity_I64, but the CC_DEPS field can have floats */
+      if (tyd != Ity_I64)
+         goto not_special;
+
+      /* OK. Necessary conditions are satisfied. */
+
+      old_value = env->old_value[guest_reg];
+      new_value = stmt->Ist.Put.data->Iex.Const.con->Ico.U64;
+      env->old_value[guest_reg] = new_value;
+
+      Bool old_value_is_valid = env->old_value_valid[guest_reg];
+      env->old_value_valid[guest_reg] = True;
+
+      /* If the register already contains the new value, there is nothing
+         to do here. */
+      if (old_value_is_valid && new_value == old_value) {
+         return;
+      }
+
+      if (old_value_is_valid == False) goto not_special;
+
+      /* If the new value is in the neighbourhood of the old value
+         we can use a memory-to-memory insn */
+      difference = new_value - old_value;
+
+      if (s390_host_has_gie && ulong_fits_signed_8bit(difference)) {
+         am = s390_amode_for_guest_state(offset);
+         addInstr(env, s390_insn_madd(sizeofIRType(tyd), am,
+                                      (difference & 0xFF), new_value));
+         return;
+      }
+
+      /* If the high word is the same it is sufficient to load the low word. */
+      if ((old_value >> 32) == (new_value >> 32)) {
+         am = s390_amode_for_guest_state(offset + 4);
+         addInstr(env, s390_insn_mimm(4, am, new_value & 0xFFFFFFFF));
+         return;
+      }
+
+      /* No special case applies... fall through */
+
+   not_special:
+      am = s390_amode_for_guest_state(offset);
+
+      switch (tyd) {
+      case Ity_I8:
+      case Ity_I16:
+      case Ity_I32:
+      case Ity_I64:
+         if (am->tag == S390_AMODE_B12 &&
+             stmt->Ist.Put.data->tag == Iex_Const) {
+            ULong value =
+               get_const_value_as_ulong(stmt->Ist.Put.data->Iex.Const.con);
+            addInstr(env, s390_insn_mimm(sizeofIRType(tyd), am, value));
+            return;
+         }
+         /* Check whether we can use a memcpy here. Currently, the restriction
+            is that both amodes need to be B12, so MVC can be emitted. */
+         /* put(load) never overlaps */
+         if (am->tag == S390_AMODE_B12 &&
+             stmt->Ist.Put.data->tag == Iex_Load) {
+            if (stmt->Ist.Put.data->Iex.Load.end != Iend_BE) goto stmt_fail;
+            IRExpr *data = stmt->Ist.Put.data->Iex.Load.addr;
+            s390_amode *from = s390_isel_amode(env, data);
+            UInt size = sizeofIRType(tyd);
+
+            if (from->tag == S390_AMODE_B12) {
+               /* Source can be compiled into a B12 amode. */
+               addInstr(env, s390_insn_memcpy(size, am, from));
+               return;
+            }
+
+            src = newVRegI(env);
+            addInstr(env, s390_insn_load(size, src, from));
+            break;
+         }
+         /* put(get) */
+         if (am->tag == S390_AMODE_B12 &&
+             stmt->Ist.Put.data->tag == Iex_Get) {
+            UInt put_offset = am->d;
+            UInt get_offset = stmt->Ist.Put.data->Iex.Get.offset;
+            UInt size = sizeofIRType(tyd);
+            /* don't memcpy in case of overlap */
+            if (put_offset + size <= get_offset ||
+                get_offset + size <= put_offset) {
+               s390_amode *from = s390_amode_for_guest_state(get_offset);
+               addInstr(env, s390_insn_memcpy(size, am, from));
+               return;
+            }
+            goto no_memcpy_put;
+         }
+         /* General case: compile data into a register */
+no_memcpy_put:
+         src = s390_isel_int_expr(env, stmt->Ist.Put.data);
+         break;
+
+      case Ity_F32:
+      case Ity_F64:
+         src = s390_isel_float_expr(env, stmt->Ist.Put.data);
+         break;
+
+      case Ity_F128:
+      case Ity_D128:
+         /* Does not occur. See function put_(f|d)pr_pair. */
+         vpanic("Ist_Put with 128-bit floating point data");
+
+      case Ity_D32:
+      case Ity_D64:
+         src = s390_isel_dfp_expr(env, stmt->Ist.Put.data);
+         break;
+
+      default:
+         goto stmt_fail;
+      }
+
+      addInstr(env, s390_insn_store(sizeofIRType(tyd), am, src));
+      return;
+   }
+
+      /* --------- TMP --------- */
+   case Ist_WrTmp: {
+      IRTemp tmp = stmt->Ist.WrTmp.tmp;
+      IRType tyd = typeOfIRTemp(env->type_env, tmp);
+      HReg src, dst;
+
+      switch (tyd) {
+      case Ity_I128: {
+         HReg dst_hi, dst_lo, res_hi, res_lo;
+
+         s390_isel_int128_expr(&res_hi, &res_lo, env, stmt->Ist.WrTmp.data);
+         lookupIRTemp128(&dst_hi, &dst_lo, env, tmp);
+
+         addInstr(env, s390_insn_move(8, dst_hi, res_hi));
+         addInstr(env, s390_insn_move(8, dst_lo, res_lo));
+         return;
+      }
+
+      case Ity_I8:
+      case Ity_I16:
+      case Ity_I32:
+      case Ity_I64:
+         src = s390_isel_int_expr(env, stmt->Ist.WrTmp.data);
+         dst = lookupIRTemp(env, tmp);
+         break;
+
+      case Ity_I1: {
+         s390_cc_t cond = s390_isel_cc(env, stmt->Ist.WrTmp.data);
+         dst = lookupIRTemp(env, tmp);
+         addInstr(env, s390_insn_cc2bool(dst, cond));
+         return;
+      }
+
+      case Ity_F32:
+      case Ity_F64:
+         src = s390_isel_float_expr(env, stmt->Ist.WrTmp.data);
+         dst = lookupIRTemp(env, tmp);
+         break;
+
+      case Ity_F128: {
+         HReg dst_hi, dst_lo, res_hi, res_lo;
+
+         s390_isel_float128_expr(&res_hi, &res_lo, env, stmt->Ist.WrTmp.data);
+         lookupIRTemp128(&dst_hi, &dst_lo, env, tmp);
+
+         addInstr(env, s390_insn_move(8, dst_hi, res_hi));
+         addInstr(env, s390_insn_move(8, dst_lo, res_lo));
+         return;
+      }
+
+      case Ity_D32:
+      case Ity_D64:
+         src = s390_isel_dfp_expr(env, stmt->Ist.WrTmp.data);
+         dst = lookupIRTemp(env, tmp);
+         break;
+
+      case Ity_D128: {
+         HReg dst_hi, dst_lo, res_hi, res_lo;
+
+         s390_isel_dfp128_expr(&res_hi, &res_lo, env, stmt->Ist.WrTmp.data);
+         lookupIRTemp128(&dst_hi, &dst_lo, env, tmp);
+
+         addInstr(env, s390_insn_move(8, dst_hi, res_hi));
+         addInstr(env, s390_insn_move(8, dst_lo, res_lo));
+         return;
+      }
+
+      default:
+         goto stmt_fail;
+      }
+
+      addInstr(env, s390_insn_move(sizeofIRType(tyd), dst, src));
+      return;
+   }
+
+      /* --------- Call to DIRTY helper --------- */
+   case Ist_Dirty: {
+      IRType   retty;
+      IRDirty* d = stmt->Ist.Dirty.details;
+      HReg dst;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      UInt   addToSp = 0;
+      Int i;
+
+      /* Invalidate tracked values of those guest state registers that are
+         modified by this helper. */
+      for (i = 0; i < d->nFxState; ++i) {
+         /* JRS 1 June 2012: AFAICS, s390 guest doesn't use 'repeat'
+            descriptors in guest state effect descriptions.  Hence: */
+         vassert(d->fxState[i].nRepeats == 0 && d->fxState[i].repeatLen == 0);
+         if ((d->fxState[i].fx == Ifx_Write || d->fxState[i].fx == Ifx_Modify)) {
+            Int guest_reg = get_guest_reg(d->fxState[i].offset);
+            if (guest_reg != GUEST_UNKNOWN)
+               env->old_value_valid[guest_reg] = False;
+         }
+      }
+
+      if (d->tmp == IRTemp_INVALID) {
+         /* No return value. */
+         retty = Ity_INVALID;
+         doHelperCall(&addToSp, &rloc, env, d->guard,  d->cee, retty,
+                      d->args);
+         vassert(is_sane_RetLoc(rloc));
+         vassert(rloc.pri == RLPri_None);
+         vassert(addToSp == 0);
+
+         return;
+      }
+
+      retty = typeOfIRTemp(env->type_env, d->tmp);
+      if (retty == Ity_I64 || retty == Ity_I32
+          || retty == Ity_I16 || retty == Ity_I8) {
+         /* Move the returned value to the destination register */
+         HReg ret = make_gpr(S390_REGNO_RETURN_VALUE);
+
+         dst = lookupIRTemp(env, d->tmp);
+         doHelperCall(&addToSp, &rloc, env, d->guard,  d->cee, retty,
+                      d->args);
+         vassert(is_sane_RetLoc(rloc));
+         vassert(rloc.pri == RLPri_Int);
+         vassert(addToSp == 0);
+         addInstr(env, s390_insn_move(sizeof(ULong), dst, ret));
+
+         return;
+      }
+      break;
+   }
+
+   case Ist_CAS:
+      if (stmt->Ist.CAS.details->oldHi == IRTemp_INVALID) {
+         IRCAS *cas = stmt->Ist.CAS.details;
+         s390_amode *op2 = s390_isel_amode_b12_b20(env, cas->addr);
+         HReg op3 = s390_isel_int_expr(env, cas->dataLo);  /* new value */
+         HReg op1 = s390_isel_int_expr(env, cas->expdLo);  /* expected value */
+         HReg old = lookupIRTemp(env, cas->oldLo);
+
+         if (typeOfIRTemp(env->type_env, cas->oldLo) == Ity_I32) {
+            addInstr(env, s390_insn_cas(4, op1, op2, op3, old));
+         } else {
+            addInstr(env, s390_insn_cas(8, op1, op2, op3, old));
+         }
+         return;
+      } else {
+         IRCAS *cas = stmt->Ist.CAS.details;
+         s390_amode *op2 = s390_isel_amode_b12_b20(env, cas->addr);
+         HReg r8, r9, r10, r11, r1;
+         HReg op3_high = s390_isel_int_expr(env, cas->dataHi);  /* new value */
+         HReg op3_low  = s390_isel_int_expr(env, cas->dataLo);  /* new value */
+         HReg op1_high = s390_isel_int_expr(env, cas->expdHi);  /* expected value */
+         HReg op1_low  = s390_isel_int_expr(env, cas->expdLo);  /* expected value */
+         HReg old_low  = lookupIRTemp(env, cas->oldLo);
+         HReg old_high = lookupIRTemp(env, cas->oldHi);
+
+         /* Use non-virtual registers r8 and r9 as pair for op1
+            and move op1 there */
+         r8 = make_gpr(8);
+         r9 = make_gpr(9);
+         addInstr(env, s390_insn_move(8, r8, op1_high));
+         addInstr(env, s390_insn_move(8, r9, op1_low));
+
+         /* Use non-virtual registers r10 and r11 as pair for op3
+            and move op3 there */
+         r10 = make_gpr(10);
+         r11 = make_gpr(11);
+         addInstr(env, s390_insn_move(8, r10, op3_high));
+         addInstr(env, s390_insn_move(8, r11, op3_low));
+
+         /* Register r1 is used as a scratch register */
+         r1 = make_gpr(1);
+
+         if (typeOfIRTemp(env->type_env, cas->oldLo) == Ity_I32) {
+            addInstr(env, s390_insn_cdas(4, r8, r9, op2, r10, r11,
+                                         old_high, old_low, r1));
+         } else {
+            addInstr(env, s390_insn_cdas(8, r8, r9, op2, r10, r11,
+                                         old_high, old_low, r1));
+         }
+         addInstr(env, s390_insn_move(8, op1_high, r8));
+         addInstr(env, s390_insn_move(8, op1_low,  r9));
+         addInstr(env, s390_insn_move(8, op3_high, r10));
+         addInstr(env, s390_insn_move(8, op3_low,  r11));
+         return;
+      }
+      break;
+
+      /* --------- EXIT --------- */
+   case Ist_Exit: {
+      s390_cc_t cond;
+      IRConstTag tag = stmt->Ist.Exit.dst->tag;
+
+      if (tag != Ico_U64)
+         vpanic("s390_isel_stmt: Ist_Exit: dst is not a 64-bit value");
+
+      s390_amode *guest_IA = s390_amode_for_guest_state(stmt->Ist.Exit.offsIP);
+      cond = s390_isel_cc(env, stmt->Ist.Exit.guard);
+
+      /* Case: boring transfer to known address */
+      if (stmt->Ist.Exit.jk == Ijk_Boring) {
+         if (env->chaining_allowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool to_fast_entry
+               = ((Addr64)stmt->Ist.Exit.dst->Ico.U64) > env->max_ga;
+            if (0) vex_printf("%s", to_fast_entry ? "Y" : ",");
+            addInstr(env, s390_insn_xdirect(cond, stmt->Ist.Exit.dst->Ico.U64,
+                                            guest_IA, to_fast_entry));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg dst = s390_isel_int_expr(env,
+                                          IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, s390_insn_xassisted(cond, dst, guest_IA, Ijk_Boring));
+         }
+         return;
+      }
+
+      /* Case: assisted transfer to arbitrary address */
+      switch (stmt->Ist.Exit.jk) {
+      case Ijk_EmFail:
+      case Ijk_EmWarn:
+      case Ijk_NoDecode:
+      case Ijk_InvalICache:
+      case Ijk_Sys_syscall:
+      case Ijk_ClientReq:
+      case Ijk_NoRedir:
+      case Ijk_Yield:
+      case Ijk_SigTRAP: {
+         HReg dst = s390_isel_int_expr(env, IRExpr_Const(stmt->Ist.Exit.dst));
+         addInstr(env, s390_insn_xassisted(cond, dst, guest_IA,
+                                           stmt->Ist.Exit.jk));
+         return;
+      }
+      default:
+         break;
+      }
+
+      /* Do we ever expect to see any other kind? */
+      goto stmt_fail;
+   }
+
+      /* --------- MEM FENCE --------- */
+   case Ist_MBE:
+      switch (stmt->Ist.MBE.event) {
+         case Imbe_Fence:
+            addInstr(env, s390_insn_mfence());
+            return;
+         default:
+            break;
+      }
+      break;
+
+      /* --------- Miscellaneous --------- */
+
+   case Ist_PutI:    /* Not needed */
+   case Ist_IMark:   /* Doesn't generate any executable code */
+   case Ist_NoOp:    /* Doesn't generate any executable code */
+   case Ist_AbiHint: /* Meaningless in IR */
+      return;
+
+   default:
+      break;
+   }
+
+ stmt_fail:
+   ppIRStmt(stmt);
+   vpanic("s390_isel_stmt");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void
+iselNext(ISelEnv *env, IRExpr *next, IRJumpKind jk, Int offsIP)
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n-- PUT(%d) = ", offsIP);
+      ppIRExpr(next);
+      vex_printf("; exit-");
+      ppIRJumpKind(jk);
+      vex_printf("\n");
+   }
+
+   s390_amode *guest_IA = s390_amode_for_guest_state(offsIP);
+
+   /* Case: boring transfer to known address */
+   if (next->tag == Iex_Const) {
+      IRConst *cdst = next->Iex.Const.con;
+      vassert(cdst->tag == Ico_U64);
+      if (jk == Ijk_Boring || jk == Ijk_Call) {
+         /* Boring transfer to known address */
+         if (env->chaining_allowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool to_fast_entry
+               = ((Addr64)cdst->Ico.U64) > env->max_ga;
+            if (0) vex_printf("%s", to_fast_entry ? "X" : ".");
+            addInstr(env, s390_insn_xdirect(S390_CC_ALWAYS, cdst->Ico.U64,
+                                            guest_IA, to_fast_entry));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an indirect transfer,
+               as that's the cheapest alternative that is allowable. */
+            HReg dst = s390_isel_int_expr(env, next);
+            addInstr(env, s390_insn_xassisted(S390_CC_ALWAYS, dst, guest_IA,
+                                              Ijk_Boring));
+         }
+         return;
+      }
+   }
+
+   /* Case: call/return (==boring) transfer to any address */
+   switch (jk) {
+   case Ijk_Boring:
+   case Ijk_Ret:
+   case Ijk_Call: {
+      HReg dst = s390_isel_int_expr(env, next);
+      if (env->chaining_allowed) {
+         addInstr(env, s390_insn_xindir(S390_CC_ALWAYS, dst, guest_IA));
+      } else {
+         addInstr(env, s390_insn_xassisted(S390_CC_ALWAYS, dst, guest_IA,
+                                           Ijk_Boring));
+      }
+      return;
+   }
+   default:
+      break;
+   }
+
+   /* Case: some other kind of transfer to any address */
+   switch (jk) {
+   case Ijk_EmFail:
+   case Ijk_EmWarn:
+   case Ijk_NoDecode:
+   case Ijk_InvalICache:
+   case Ijk_Sys_syscall:
+   case Ijk_ClientReq:
+   case Ijk_NoRedir:
+   case Ijk_Yield:
+   case Ijk_SigTRAP: {
+      HReg dst = s390_isel_int_expr(env, next);
+      addInstr(env, s390_insn_xassisted(S390_CC_ALWAYS, dst, guest_IA, jk));
+      return;
+   }
+   default:
+      break;
+   }
+
+   vpanic("iselNext");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire SB to s390 code.
+   Note: archinfo_host is a pointer to a stack-allocated variable.
+   Do not assign it to a global variable! */
+
+HInstrArray *
+iselSB_S390(const IRSB *bb, VexArch arch_host, const VexArchInfo *archinfo_host,
+            const VexAbiInfo *vbi, Int offset_host_evcheck_counter,
+            Int offset_host_evcheck_fail_addr, Bool chaining_allowed,
+            Bool add_profinc, Addr max_ga)
+{
+   UInt     i, j;
+   HReg     hreg, hregHI;
+   ISelEnv *env;
+   UInt     hwcaps_host = archinfo_host->hwcaps;
+
+   /* Do some sanity checks */
+   vassert((VEX_HWCAPS_S390X(hwcaps_host) & ~(VEX_HWCAPS_S390X_ALL)) == 0);
+
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessBE);
+
+   /* Make up an initial environment to use. */
+   env = LibVEX_Alloc_inline(sizeof(ISelEnv));
+   env->vreg_ctr = 0;
+
+   /* Set up output code array. */
+   env->code = newHInstrArray();
+
+   /* Copy BB's type env. */
+   env->type_env = bb->tyenv;
+
+   /* Set up data structures for tracking guest register values. */
+   for (i = 0; i < NUM_TRACKED_REGS; ++i) {
+      env->old_value[i] = 0;  /* just something to have a defined value */
+      env->old_value_valid[i] = False;
+   }
+
+   /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+      change as we go along. For some reason types_used has Int type -- but
+      it should be unsigned. Internally we use an unsigned type; so we
+      assert it here. */
+   vassert(bb->tyenv->types_used >= 0);
+
+   env->n_vregmap = bb->tyenv->types_used;
+   env->vregmap   = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+
+   env->previous_bfp_rounding_mode = NULL;
+   env->previous_dfp_rounding_mode = NULL;
+
+   /* and finally ... */
+   env->hwcaps    = hwcaps_host;
+
+   env->max_ga = max_ga;
+   env->chaining_allowed = chaining_allowed;
+
+   /* For each IR temporary, allocate a suitably-kinded virtual
+      register. */
+   j = 0;
+   for (i = 0; i < env->n_vregmap; i++) {
+      hregHI = hreg = INVALID_HREG;
+      switch (bb->tyenv->types[i]) {
+      case Ity_I1:
+      case Ity_I8:
+      case Ity_I16:
+      case Ity_I32:
+      case Ity_I64:
+         hreg = mkVRegI(j++);
+         break;
+
+      case Ity_I128:
+         hreg   = mkVRegI(j++);
+         hregHI = mkVRegI(j++);
+         break;
+
+      case Ity_F32:
+      case Ity_F64:
+      case Ity_D32:
+      case Ity_D64:
+         hreg = mkVRegF(j++);
+         break;
+
+      case Ity_F128:
+      case Ity_D128:
+         hreg   = mkVRegF(j++);
+         hregHI = mkVRegF(j++);
+         break;
+
+      case Ity_V128: /* fall through */
+      default:
+         ppIRType(bb->tyenv->types[i]);
+         vpanic("iselSB_S390: IRTemp type");
+      }
+
+      env->vregmap[i]   = hreg;
+      env->vregmapHI[i] = hregHI;
+   }
+   env->vreg_ctr = j;
+
+   /* The very first instruction must be an event check. */
+   s390_amode *counter, *fail_addr;
+   counter   = s390_amode_for_guest_state(offset_host_evcheck_counter);
+   fail_addr = s390_amode_for_guest_state(offset_host_evcheck_fail_addr);
+   addInstr(env, s390_insn_evcheck(counter, fail_addr));
+
+   /* Possibly a block counter increment (for profiling).  At this
+      point we don't know the address of the counter, so just pretend
+      it is zero.  It will have to be patched later, but before this
+      translation is used, by a call to LibVEX_patchProfInc. */
+   if (add_profinc) {
+      addInstr(env, s390_insn_profinc());
+   }
+
+   /* Ok, finally we can iterate over the statements. */
+   for (i = 0; i < bb->stmts_used; i++)
+      if (bb->stmts[i])
+         s390_isel_stmt(env, bb->stmts[i]);
+
+   iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
+
+   /* Record the number of vregs we used. */
+   env->code->n_vregs = env->vreg_ctr;
+
+   return env->code;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                    host_s390_isel.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_tilegx_defs.c b/VEX/priv/host_tilegx_defs.c
new file mode 100644
index 0000000..92ca2c7
--- /dev/null
+++ b/VEX/priv/host_tilegx_defs.c
@@ -0,0 +1,2628 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                host_tilegx_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2010-2013 Tilera Corp.
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_tilegx_defs.h"
+#include "tilegx_disasm.h"
+
+/* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+/* Register number for guest state pointer in host code, r50 */
+#define GuestSP     ( 50)
+/* CONTEXT_EX0 offset */
+#define OFFSET_EX0  (576)
+/* CONTEXT_EX1 offset */
+#define OFFSET_EX1  (584)
+/* COND offset */
+#define OFFSET_COND (608)
+/* PC offset */
+#define OFFSET_PC   (512)
+
+/* guest_COND offset. */
+#define COND_OFFSET() OFFSET_COND
+
+/*---------------- Registers ----------------*/
+
+void ppHRegTILEGX ( HReg reg )
+{
+  Int r;
+  static const HChar *ireg_names[64] = {
+    "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",  "r8",  "r9",
+    "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
+    "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29",
+    "r30", "r31", "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
+    "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49",
+    "r50", "r51", "r52", "r53", "r54", "r55",
+    "sn",  "idn0", "idn1", "udn0", "udn1", "udn2", "udn3", "zero"
+  };
+
+  /* Be generic for all virtual regs. */
+  if (hregIsVirtual(reg)) {
+    ppHReg(reg);
+    return;
+  }
+
+  /* But specific for real regs. */
+  vassert(hregClass(reg) == HRcInt32 || hregClass(reg) == HRcInt64 ||
+          hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64);
+
+  /* But specific for real regs. */
+  switch (hregClass(reg)) {
+  case HRcInt32:
+  case HRcInt64:
+    r = hregEncoding(reg);
+    vassert(r >= 0 && r < 64);
+    vex_printf("%s", ireg_names[r]);
+    return;
+  case HRcFlt32:
+    r = hregEncoding(reg);
+    vassert(r >= 0 && r < 64);
+    vex_printf("%s", ireg_names[r]);
+    return;
+  case HRcFlt64:
+    r = hregEncoding(reg);
+    vassert(r >= 0 && r < 64);
+    vex_printf("%s", ireg_names[r]);
+    return;
+  default:
+    vpanic("ppHRegTILEGX");
+  }
+
+  return;
+}
+
+static const HChar* tilegxUnaryOp [] =
+  {
+    "clz ",
+    "ctz ",
+    "nop "
+  };
+
+static const HChar* tilegxAluOp [] =
+  {  "Alu_invalid",
+     "Add ",
+     "Sub ",
+     "And ",
+     "Or  ",
+     "Nor ",
+     "Xor "
+  };
+
+static const HChar* tilegxShftOp [] =
+  {
+    "Shft_invalid",
+    "Sll    ",
+    "Srl    ",
+    "Sra    ",
+    "Sll8x8 ",
+    "Srl8x8 ",
+  };
+
+static const HChar* tilegxBfOp [] =
+  {
+    "BfExts ",
+    "BfEtxu ",
+    "BfIns  "
+  };
+
+
+static const HChar* tilegxAcasOp [] =
+  {
+    "CmpExch    ",
+    "Exch       ",
+    "FetchAnd   ",
+    "FetchAdd   ",
+    "FetchAddgez",
+    "FetchOr    "
+  };
+
+static const HChar* tilegxInstrTag [] =
+  {
+    "Imm      ",
+    "ALU      ",
+    "Shift    ",
+    "Unary    ",
+    "Cmp      ",
+    "CmpI     ",
+    "Mul      ",
+    "Call     ",
+    "XDirect  ",
+    "XIndir   ",
+    "XAssisted",
+    "EvCheck  ",
+    "ProfInc  ",
+    "RdWrLR   ",
+    "Load     ",
+    "Store    ",
+    "MovCond  ",
+    "BitField ",
+    "ACAS     "
+  };
+
+/* -------- Pretty Print instructions ------------- */
+static void ppLoadImm ( HReg dst, ULong imm )
+{
+  vex_printf("li ");
+  ppHRegTILEGX(dst);
+  vex_printf(",0x%016lx", (unsigned long)imm);
+}
+
+void ppTILEGXInstr ( const TILEGXInstr * instr )
+{
+  vex_printf("%s ", tilegxInstrTag[instr->tag]);
+  switch (instr->tag) {
+  case GXin_LI:  {
+    ppHRegTILEGX(instr->GXin.LI.dst);
+    vex_printf(",0x%016llx", instr->GXin.LI.imm);
+  }
+    break;
+
+  case GXin_Alu: {
+    HReg r_srcL = instr->GXin.Alu.srcL;
+    TILEGXRH *rh_srcR = instr->GXin.Alu.srcR;
+    /* generic */
+    vex_printf("%s ", tilegxAluOp[instr->GXin.Alu.op]);
+    ppHRegTILEGX(instr->GXin.Alu.dst);
+    vex_printf(",");
+    ppHRegTILEGX(r_srcL);
+    vex_printf(",");
+    ppTILEGXRH(rh_srcR);
+  }
+    break;
+
+  case GXin_Shft: {
+    HReg r_srcL = instr->GXin.Shft.srcL;
+    TILEGXRH *rh_srcR = instr->GXin.Shft.srcR;
+    vex_printf("%s ", tilegxShftOp[instr->GXin.Shft.op]);
+    ppHRegTILEGX(instr->GXin.Shft.dst);
+    vex_printf(",");
+    ppHRegTILEGX(r_srcL);
+    vex_printf(",");
+    ppTILEGXRH(rh_srcR);
+  }
+    break;
+
+  case GXin_Unary: {
+    vex_printf("%s ", tilegxUnaryOp[instr->GXin.Unary.op]);
+    ppHRegTILEGX(instr->GXin.Unary.dst);
+    vex_printf(",");
+    ppHRegTILEGX(instr->GXin.Unary.src);
+  }
+    break;
+
+  case GXin_Cmp: {
+    ppHRegTILEGX(instr->GXin.Cmp.dst);
+    vex_printf(" = %s ( ", showTILEGXCondCode(instr->GXin.Cmp.cond));
+    ppHRegTILEGX(instr->GXin.Cmp.srcL);
+    vex_printf(", ");
+    ppHRegTILEGX(instr->GXin.Cmp.srcR);
+    vex_printf(" )");
+  }
+    break;
+
+  case GXin_CmpI: {
+    ppHRegTILEGX(instr->GXin.CmpI.dst);
+    vex_printf(" = %s ( ", showTILEGXCondCode(instr->GXin.CmpI.cond));
+    ppHRegTILEGX(instr->GXin.CmpI.srcL);
+    vex_printf(", ");
+    ppTILEGXRH(instr->GXin.CmpI.srcR);
+    vex_printf(" )");
+  }
+    break;
+
+  case GXin_Mul: {
+    if (instr->GXin.Mul.widening == False) {
+      vex_printf("mul ");
+      ppHRegTILEGX(instr->GXin.Mul.dst);
+      vex_printf(", ");
+      ppHRegTILEGX(instr->GXin.Mul.srcL);
+      vex_printf(", ");
+      ppHRegTILEGX(instr->GXin.Mul.srcR);
+
+    } else {
+      vex_printf("%s ", instr->GXin.Mul.syned ? "mull32s" : "mull32u");
+      ppHRegTILEGX(instr->GXin.Mul.dst);
+      vex_printf(", ");
+      ppHRegTILEGX(instr->GXin.Mul.srcL);
+      vex_printf(", ");
+      ppHRegTILEGX(instr->GXin.Mul.srcR);
+    }
+  }
+    break;
+
+  case GXin_Call: {
+    Int n;
+    if (instr->GXin.Call.cond != TILEGXcc_AL) {
+      vex_printf("if (%s (", showTILEGXCondCode(instr->GXin.Call.cond));
+      ppHRegTILEGX(instr->GXin.Call.src);
+      vex_printf(",zero))");
+    }
+    else
+      vex_printf("(always) ");
+
+    vex_printf("{ ");
+    ppLoadImm(hregTILEGX_R11(), instr->GXin.Call.target);
+
+    vex_printf(" ; [");
+    for (n = 0; n < 56; n++) {
+      if (instr->GXin.Call.argiregs & (1ULL << n)) {
+        vex_printf("r%d", n);
+        if ((instr->GXin.Call.argiregs >> n) > 1)
+          vex_printf(",");
+      }
+    }
+    vex_printf("] }");
+  }
+    break;
+
+  case GXin_XDirect:
+    vex_printf("(xDirect) ");
+    vex_printf("if (guest_COND.%s) { ",
+               showTILEGXCondCode(instr->GXin.XDirect.cond));
+    vex_printf("move r11, 0x%x,", (UInt)instr->GXin.XDirect.dstGA);
+    vex_printf("; st r11, ");
+    ppTILEGXAMode(instr->GXin.XDirect.amPC);
+    vex_printf("; move r11, $disp_cp_chain_me_to_%sEP; jalr r11; nop}",
+               instr->GXin.XDirect.toFastEP ? "fast" : "slow");
+    return;
+  case GXin_XIndir:
+    vex_printf("(xIndir) ");
+    vex_printf("if (guest_COND.%s) { st ",
+               showTILEGXCondCode(instr->GXin.XIndir.cond));
+    ppHRegTILEGX(instr->GXin.XIndir.dstGA);
+    vex_printf(", ");
+    ppTILEGXAMode(instr->GXin.XIndir.amPC);
+    vex_printf("; move r11, $disp_indir; jalr r11; nop}");
+    return;
+  case GXin_XAssisted:
+    vex_printf("(xAssisted) ");
+    vex_printf("if (guest_COND.%s) { ",
+               showTILEGXCondCode(instr->GXin.XAssisted.cond));
+    vex_printf("st ");
+    ppHRegTILEGX(instr->GXin.XAssisted.dstGA);
+    vex_printf(", ");
+    ppTILEGXAMode(instr->GXin.XAssisted.amPC);
+    vex_printf("; move r50, $IRJumpKind_to_TRCVAL(%d)",
+               (Int)instr->GXin.XAssisted.jk);
+    vex_printf("; move r11, $disp_assisted; jalr r11; nop; }");
+    return;
+
+  case GXin_EvCheck:
+    vex_printf("(evCheck) ld r11, ");
+    ppTILEGXAMode(instr->GXin.EvCheck.amCounter);
+    vex_printf("; addli r11, r11, -1");
+    vex_printf("; st r11, ");
+    ppTILEGXAMode(instr->GXin.EvCheck.amCounter);
+    vex_printf("; bgez r11, nofail; jalr *");
+    ppTILEGXAMode(instr->GXin.EvCheck.amFailAddr);
+    vex_printf("; nofail:");
+    return;
+  case GXin_ProfInc:
+    vex_printf("(profInc) move r11, ($NotKnownYet); "
+               "ld r8, r11; "
+               "addi r8, r8, 1; "
+               "st r11, r8; " );
+    return;
+  case GXin_Load: {
+    UChar sz = instr->GXin.Load.sz;
+    UChar c_sz = sz == 1 ? '1' : sz == 2 ? '2' : sz == 4 ? '4' : '8';
+    vex_printf("ld%c ", c_sz);
+    ppHRegTILEGX(instr->GXin.Load.dst);
+    vex_printf(",");
+    ppTILEGXAMode(instr->GXin.Load.src);
+  }
+    break;
+
+  case GXin_Store: {
+    UChar sz = instr->GXin.Store.sz;
+    UChar c_sz = sz == 1 ? '1' : sz == 2 ? '2' : sz == 4 ? '4' : '8';
+    vex_printf("st%c ", c_sz);
+    ppTILEGXAMode(instr->GXin.Store.dst);
+    vex_printf(",");
+    ppHRegTILEGX(instr->GXin.Store.src);
+  }
+    break;
+
+  case GXin_MovCond: {
+    ppHRegTILEGX(instr->GXin.MovCond.dst);
+    vex_printf("=");
+    showTILEGXCondCode(instr->GXin.MovCond.cond);
+    vex_printf("?");
+    ppHRegTILEGX(instr->GXin.MovCond.srcL);
+    vex_printf(":");
+    ppTILEGXRH(instr->GXin.MovCond.srcR);
+  }
+    break;
+
+  case GXin_Acas: {
+    vex_printf("%s ",  tilegxAcasOp[instr->GXin.Acas.op]);
+    ppHRegTILEGX(instr->GXin.Acas.old);
+    vex_printf(",");
+    if (instr->GXin.Acas.op == GXacas_CMPEXCH) {
+      ppHRegTILEGX(instr->GXin.Acas.exp);
+      vex_printf(",");
+    }
+    ppHRegTILEGX(instr->GXin.Acas.new);
+  }
+    break;
+
+  case GXin_Bf: {
+    vex_printf("%s ",  tilegxBfOp[instr->GXin.Bf.op]);
+    ppHRegTILEGX(instr->GXin.Bf.dst);
+    vex_printf(",");
+    ppHRegTILEGX(instr->GXin.Bf.src);
+    vex_printf(",");
+    vex_printf("%d,%d", (Int)instr->GXin.Bf.Start, (Int)instr->GXin.Bf.End);
+  }
+    break;
+
+  default:
+    vassert(0);
+  }
+}
+
+
+const RRegUniverse* getRRegUniverse_TILEGX ( void )
+{
+  /* The 'universe' is constant and BIG, do it statically. */
+  static RRegUniverse rRegUniverse_TILEGX;
+  static UInt         rRegUniverse_TILEGX_initted = False;
+
+  /* Get a pointer of the 'universe' */
+  RRegUniverse* ru = &rRegUniverse_TILEGX;
+
+  if (LIKELY(rRegUniverse_TILEGX_initted))
+    return ru;
+
+  RRegUniverse__init(ru);
+
+  /* Callee saves ones are listed first, since we prefer them
+     if they're available */
+
+  ru->regs[ru->size++] = hregTILEGX_R30();
+  ru->regs[ru->size++] = hregTILEGX_R31();
+  ru->regs[ru->size++] = hregTILEGX_R32();
+  ru->regs[ru->size++] = hregTILEGX_R33();
+  ru->regs[ru->size++] = hregTILEGX_R34();
+  ru->regs[ru->size++] = hregTILEGX_R35();
+  ru->regs[ru->size++] = hregTILEGX_R36();
+  ru->regs[ru->size++] = hregTILEGX_R37();
+  ru->regs[ru->size++] = hregTILEGX_R38();
+  ru->regs[ru->size++] = hregTILEGX_R39();
+
+  ru->regs[ru->size++] = hregTILEGX_R40();
+  ru->regs[ru->size++] = hregTILEGX_R41();
+  ru->regs[ru->size++] = hregTILEGX_R42();
+  ru->regs[ru->size++] = hregTILEGX_R43();
+  ru->regs[ru->size++] = hregTILEGX_R44();
+  ru->regs[ru->size++] = hregTILEGX_R45();
+  ru->regs[ru->size++] = hregTILEGX_R46();
+  ru->regs[ru->size++] = hregTILEGX_R47();
+  ru->regs[ru->size++] = hregTILEGX_R48();
+  ru->regs[ru->size++] = hregTILEGX_R49();
+
+  /* GPR 50 is reserved as Guest state */
+  /* GPR 51 is reserved register, mainly used to do memory
+     load and store since TileGx has no pre-displacement
+     addressing mode */
+
+  ru->regs[ru->size++] = hregTILEGX_R10();
+
+  /* GPR 11 is reserved as next guest address */
+
+  ru->regs[ru->size++] = hregTILEGX_R13();
+  ru->regs[ru->size++] = hregTILEGX_R14();
+  ru->regs[ru->size++] = hregTILEGX_R15();
+  ru->regs[ru->size++] = hregTILEGX_R16();
+  ru->regs[ru->size++] = hregTILEGX_R17();
+  ru->regs[ru->size++] = hregTILEGX_R18();
+  ru->regs[ru->size++] = hregTILEGX_R19();
+  ru->regs[ru->size++] = hregTILEGX_R20();
+  ru->regs[ru->size++] = hregTILEGX_R21();
+  ru->regs[ru->size++] = hregTILEGX_R22();
+  ru->regs[ru->size++] = hregTILEGX_R23();
+  ru->regs[ru->size++] = hregTILEGX_R24();
+  ru->regs[ru->size++] = hregTILEGX_R25();
+  ru->regs[ru->size++] = hregTILEGX_R26();
+  ru->regs[ru->size++] = hregTILEGX_R27();
+  ru->regs[ru->size++] = hregTILEGX_R28();
+  ru->regs[ru->size++] = hregTILEGX_R29();
+
+  ru->allocable = ru->size;
+
+  /* And other unallocable registers. */
+  ru->regs[ru->size++] = hregTILEGX_R0();
+  ru->regs[ru->size++] = hregTILEGX_R1();
+  ru->regs[ru->size++] = hregTILEGX_R2();
+  ru->regs[ru->size++] = hregTILEGX_R3();
+  ru->regs[ru->size++] = hregTILEGX_R4();
+  ru->regs[ru->size++] = hregTILEGX_R5();
+  ru->regs[ru->size++] = hregTILEGX_R6();
+  ru->regs[ru->size++] = hregTILEGX_R7();
+  ru->regs[ru->size++] = hregTILEGX_R8();
+  ru->regs[ru->size++] = hregTILEGX_R9();
+  ru->regs[ru->size++] = hregTILEGX_R11();
+  ru->regs[ru->size++] = hregTILEGX_R12();
+  ru->regs[ru->size++] = hregTILEGX_R50();
+  ru->regs[ru->size++] = hregTILEGX_R51();
+  ru->regs[ru->size++] = hregTILEGX_R52();
+  ru->regs[ru->size++] = hregTILEGX_R53();
+  ru->regs[ru->size++] = hregTILEGX_R54();
+  ru->regs[ru->size++] = hregTILEGX_R55();
+  ru->regs[ru->size++] = hregTILEGX_R63();
+
+  rRegUniverse_TILEGX_initted = True;
+
+  RRegUniverse__check_is_sane(ru);
+
+  return ru;
+}
+
+/*----------------- Condition Codes ----------------------*/
+
+const HChar *showTILEGXCondCode ( TILEGXCondCode cond )
+{
+  switch (cond) {
+  case TILEGXcc_EQ:
+    return "e"; /* equal */
+  case TILEGXcc_EQ8x8:
+    return "e8x8"; /* equal */
+
+  case TILEGXcc_NE:
+    return "ne";   /* not equal */
+  case TILEGXcc_NE8x8:
+    return "ne8x8";   /* not equal */
+
+  case TILEGXcc_HS:
+    return "hs";   /* >=u (higher or same) */
+  case TILEGXcc_LO:
+    return "lo";   /* <u  (lower) */
+
+  case TILEGXcc_MI:
+    return "mi";   /* minus (negative) */
+  case TILEGXcc_PL:
+    return "pl";   /* plus (zero or +ve) */
+
+  case TILEGXcc_VS:
+    return "vs";   /* overflow */
+  case TILEGXcc_VC:
+    return "vc";   /* no overflow */
+
+  case TILEGXcc_HI:
+    return "hi";   /* >u   (higher) */
+  case TILEGXcc_LS:
+    return "ls";   /* <=u  (lower or same) */
+
+  case TILEGXcc_GE:
+    return "ge";   /* >=s (signed greater or equal) */
+  case TILEGXcc_LT:
+    return "lt";   /* <s  (signed less than) */
+
+  case TILEGXcc_GT:
+    return "gt";   /* >s  (signed greater) */
+  case TILEGXcc_LE:
+    return "le";   /* <=s (signed less or equal) */
+
+  case TILEGXcc_AL:
+    return "al";   /* always (unconditional) */
+  case TILEGXcc_NV:
+    return "nv";   /* never (unconditional): */
+  case TILEGXcc_EZ:
+    return "ez"; /* equal 0 */
+  case TILEGXcc_NZ:
+    return "nz"; /* not equal 0 */
+
+  default:
+    vpanic("showTILEGXCondCode");
+  }
+}
+
+
+/* --------- TILEGXAMode: memory address expressions. --------- */
+
+TILEGXAMode *TILEGXAMode_IR ( Int idx, HReg base )
+{
+  TILEGXAMode *am = LibVEX_Alloc(sizeof(TILEGXAMode));
+  am->tag = GXam_IR;
+  am->GXam.IR.base = base;
+  am->GXam.IR.index = idx;
+
+  return am;
+}
+
+TILEGXAMode *nextTILEGXAModeInt ( TILEGXAMode * am )
+{
+  if (am->tag == GXam_IR)
+    return TILEGXAMode_IR(am->GXam.IR.index + 4, am->GXam.IR.base);
+
+  vpanic("dopyTILEGXAMode");
+}
+
+void ppTILEGXAMode ( const TILEGXAMode * am )
+{
+  if (am->tag == GXam_IR)
+  {
+    if (am->GXam.IR.index == 0)
+      vex_printf("(");
+    else
+      vex_printf("%d(", (Int) am->GXam.IR.index);
+    ppHRegTILEGX(am->GXam.IR.base);
+    vex_printf(")");
+    return;
+  }
+  vpanic("ppTILEGXAMode");
+}
+
+static void addRegUsage_TILEGXAMode ( HRegUsage * u, TILEGXAMode * am )
+{
+  if (am->tag == GXam_IR)
+  {
+    addHRegUse(u, HRmRead, am->GXam.IR.base);
+    return;
+  }
+
+  vpanic("addRegUsage_TILEGXAMode");
+}
+
+static void mapRegs_TILEGXAMode ( HRegRemap * m, TILEGXAMode * am )
+{
+  if (am->tag == GXam_IR)
+  {
+    am->GXam.IR.base = lookupHRegRemap(m, am->GXam.IR.base);
+    return;
+  }
+
+  vpanic("mapRegs_TILEGXAMode");
+}
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+
+TILEGXRH *TILEGXRH_Imm ( Bool syned, UShort imm16 )
+{
+  TILEGXRH *op = LibVEX_Alloc(sizeof(TILEGXRH));
+  op->tag = GXrh_Imm;
+  op->GXrh.Imm.syned = syned;
+  op->GXrh.Imm.imm16 = imm16;
+  /* If this is a signed value, ensure it's not -32768, so that we
+     are guaranteed always to be able to negate if needed. */
+  if (syned)
+    vassert(imm16 != 0x8000);
+  vassert(syned == True || syned == False);
+  return op;
+}
+
+TILEGXRH *TILEGXRH_Reg ( HReg reg )
+{
+  TILEGXRH *op = LibVEX_Alloc(sizeof(TILEGXRH));
+  op->tag = GXrh_Reg;
+  op->GXrh.Reg.reg = reg;
+  return op;
+}
+
+void ppTILEGXRH ( const TILEGXRH * op )
+{
+  TILEGXRHTag tag = op->tag;
+  switch (tag) {
+  case GXrh_Imm:
+    if (op->GXrh.Imm.syned)
+      vex_printf("%d", (Int) (Short) op->GXrh.Imm.imm16);
+    else
+      vex_printf("%u", (UInt) (UShort) op->GXrh.Imm.imm16);
+    return;
+  case GXrh_Reg:
+    ppHRegTILEGX(op->GXrh.Reg.reg);
+    return;
+  default:
+    vpanic("ppTILEGXRH");
+  }
+}
+
+/* An TILEGXRH can only be used in a "read" context (what would it mean
+   to write or modify a literal?) and so we enumerate its registers
+   accordingly. */
+static void addRegUsage_TILEGXRH ( HRegUsage * u, TILEGXRH * op )
+{
+  switch (op->tag) {
+  case GXrh_Imm:
+    return;
+  case GXrh_Reg:
+    addHRegUse(u, HRmRead, op->GXrh.Reg.reg);
+    return;
+  default:
+    vpanic("addRegUsage_TILEGXRH");
+  }
+}
+
+static void mapRegs_TILEGXRH ( HRegRemap * m, TILEGXRH * op )
+{
+  switch (op->tag) {
+  case GXrh_Imm:
+    return;
+  case GXrh_Reg:
+    op->GXrh.Reg.reg = lookupHRegRemap(m, op->GXrh.Reg.reg);
+    return;
+  default:
+    vpanic("mapRegs_TILEGXRH");
+  }
+}
+
+TILEGXInstr *TILEGXInstr_LI ( HReg dst, ULong imm )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_LI;
+  i->GXin.LI.dst = dst;
+  i->GXin.LI.imm = imm;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Alu ( TILEGXAluOp op, HReg dst, HReg srcL,
+                               TILEGXRH * srcR )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Alu;
+  i->GXin.Alu.op = op;
+  i->GXin.Alu.dst = dst;
+  i->GXin.Alu.srcL = srcL;
+  i->GXin.Alu.srcR = srcR;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Shft ( TILEGXShftOp op, Bool sz32, HReg dst, HReg srcL,
+                                TILEGXRH * srcR )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Shft;
+  i->GXin.Shft.op = op;
+  i->GXin.Shft.sz32 = sz32;
+  i->GXin.Shft.dst = dst;
+  i->GXin.Shft.srcL = srcL;
+  i->GXin.Shft.srcR = srcR;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Unary ( TILEGXUnaryOp op, HReg dst, HReg src )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Unary;
+  i->GXin.Unary.op = op;
+  i->GXin.Unary.dst = dst;
+  i->GXin.Unary.src = src;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Cmp ( Bool syned, Bool sz32, HReg dst,
+                               HReg srcL, HReg srcR, TILEGXCondCode cond )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Cmp;
+  i->GXin.Cmp.syned = syned;
+  i->GXin.Cmp.sz32 = sz32;
+  i->GXin.Cmp.dst = dst;
+  i->GXin.Cmp.srcL = srcL;
+  i->GXin.Cmp.srcR = srcR;
+  i->GXin.Cmp.cond = cond;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_CmpI ( Bool syned, Bool sz32, HReg dst,
+                                HReg srcL, TILEGXRH * srcR,
+                                TILEGXCondCode cond )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_CmpI;
+  i->GXin.CmpI.syned = syned;
+  i->GXin.CmpI.sz32 = sz32;
+  i->GXin.CmpI.dst = dst;
+  i->GXin.CmpI.srcL = srcL;
+  i->GXin.CmpI.srcR = srcR;
+  i->GXin.CmpI.cond = cond;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Bf ( TILEGXBfOp op, HReg dst, HReg src,
+                              UInt Start, UInt End )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Bf;
+  i->GXin.Bf.op = op;
+  i->GXin.Bf.dst = dst;
+  i->GXin.Bf.src = src;
+  i->GXin.Bf.Start = Start;
+  i->GXin.Bf.End = End;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Acas ( TILEGXAcasOp op, HReg old,
+                                HReg addr, HReg exp, HReg new, UInt sz )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Acas;
+  i->GXin.Acas.op = op;
+  i->GXin.Acas.old = old;
+  i->GXin.Acas.addr = addr;
+  i->GXin.Acas.exp = exp;
+  i->GXin.Acas.new = new;
+  i->GXin.Acas.sz = sz;
+  return i;
+}
+
+/* multiply */
+TILEGXInstr *TILEGXInstr_Mul ( Bool syned, Bool wid, Bool sz32,
+                               HReg dst, HReg srcL,
+                               HReg srcR )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Mul;
+  i->GXin.Mul.syned = syned;
+  i->GXin.Mul.widening = wid; /* widen=True else False */
+  i->GXin.Mul.sz32 = sz32;    /* True = 32 bits */
+  i->GXin.Mul.dst = dst;
+  i->GXin.Mul.srcL = srcL;
+  i->GXin.Mul.srcR = srcR;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Call ( TILEGXCondCode cond, Addr64 target,
+                                ULong argiregs,
+                                HReg src )
+{
+  ULong mask;
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Call;
+  i->GXin.Call.cond = cond;
+  i->GXin.Call.target = target;
+  i->GXin.Call.argiregs = argiregs;
+  i->GXin.Call.src = src;
+
+  /* Only r0 .. r9 inclusive may be used as arg regs. Hence: */
+  mask = (1ULL << 10) - 1;
+  vassert(0 == (argiregs & ~mask));
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_CallAlways ( TILEGXCondCode cond, Addr64 target,
+                                      ULong argiregs )
+{
+  ULong mask;
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Call;
+  i->GXin.Call.cond = cond;
+  i->GXin.Call.target = target;
+  i->GXin.Call.argiregs = argiregs;
+
+  /* Only r0 .. r9 inclusive may be used as arg regs. Hence: */
+  mask = (1ULL << 10) - 1;
+  vassert(0 == (argiregs & ~mask));
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_XDirect ( Addr64 dstGA, TILEGXAMode* amPC,
+                                   TILEGXCondCode cond, Bool toFastEP )
+{
+  TILEGXInstr* i             = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag                     = GXin_XDirect;
+  i->GXin.XDirect.dstGA      = dstGA;
+  i->GXin.XDirect.amPC       = amPC;
+  i->GXin.XDirect.cond       = cond;
+  i->GXin.XDirect.toFastEP   = toFastEP;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_XIndir ( HReg dstGA, TILEGXAMode* amPC,
+                                  TILEGXCondCode cond )
+{
+  TILEGXInstr* i           = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag                   = GXin_XIndir;
+  i->GXin.XIndir.dstGA     = dstGA;
+  i->GXin.XIndir.amPC      = amPC;
+  i->GXin.XIndir.cond      = cond;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_XAssisted ( HReg dstGA, TILEGXAMode* amPC,
+                                     TILEGXCondCode cond, IRJumpKind jk )
+{
+  TILEGXInstr* i              = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag                      = GXin_XAssisted;
+  i->GXin.XAssisted.dstGA     = dstGA;
+  i->GXin.XAssisted.amPC      = amPC;
+  i->GXin.XAssisted.cond      = cond;
+  i->GXin.XAssisted.jk        = jk;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_EvCheck ( TILEGXAMode* amCounter,
+                                   TILEGXAMode* amFailAddr ) {
+  TILEGXInstr* i               = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag                       = GXin_EvCheck;
+  i->GXin.EvCheck.amCounter     = amCounter;
+  i->GXin.EvCheck.amFailAddr    = amFailAddr;
+  return i;
+}
+
+TILEGXInstr* TILEGXInstr_ProfInc ( void ) {
+  TILEGXInstr* i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag       = GXin_ProfInc;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Load ( UChar sz, HReg dst, TILEGXAMode * src )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Load;
+  i->GXin.Load.sz = sz;
+  i->GXin.Load.src = src;
+  i->GXin.Load.dst = dst;
+  vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_Store(UChar sz, TILEGXAMode * dst, HReg src)
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_Store;
+  i->GXin.Store.sz = sz;
+  i->GXin.Store.src = src;
+  i->GXin.Store.dst = dst;
+  vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
+  return i;
+}
+
+/* Read/Write Link Register */
+TILEGXInstr *TILEGXInstr_RdWrLR ( Bool wrLR, HReg gpr )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_RdWrLR;
+  i->GXin.RdWrLR.wrLR = wrLR;
+  i->GXin.RdWrLR.gpr = gpr;
+  return i;
+}
+
+TILEGXInstr *TILEGXInstr_MovCond ( HReg dst, HReg argL, TILEGXRH * argR,
+                                   HReg condR, TILEGXCondCode cond )
+{
+  TILEGXInstr *i = LibVEX_Alloc(sizeof(TILEGXInstr));
+  i->tag = GXin_MovCond;
+  i->GXin.MovCond.dst = dst;
+  i->GXin.MovCond.srcL = argL;
+  i->GXin.MovCond.srcR = argR;
+  i->GXin.MovCond.condR = condR;
+  i->GXin.MovCond.cond = cond;
+  return i;
+}
+
+/* --------- Helpers for register allocation. --------- */
+
+void getRegUsage_TILEGXInstr ( HRegUsage * u, TILEGXInstr * i )
+{
+  initHRegUsage(u);
+  switch (i->tag) {
+  case GXin_LI:
+    addHRegUse(u, HRmWrite, i->GXin.LI.dst);
+    break;
+  case GXin_Alu:
+    addHRegUse(u, HRmRead, i->GXin.Alu.srcL);
+    addRegUsage_TILEGXRH(u, i->GXin.Alu.srcR);
+    addHRegUse(u, HRmWrite, i->GXin.Alu.dst);
+    return;
+  case GXin_CmpI:
+    addHRegUse(u, HRmRead, i->GXin.CmpI.srcL);
+    addRegUsage_TILEGXRH(u, i->GXin.CmpI.srcR);
+    addHRegUse(u, HRmWrite, i->GXin.CmpI.dst);
+    return;
+  case GXin_Shft:
+    addHRegUse(u, HRmRead, i->GXin.Shft.srcL);
+    addRegUsage_TILEGXRH(u, i->GXin.Shft.srcR);
+    addHRegUse(u, HRmWrite, i->GXin.Shft.dst);
+    return;
+  case GXin_Cmp:
+    addHRegUse(u, HRmRead, i->GXin.Cmp.srcL);
+    addHRegUse(u, HRmRead, i->GXin.Cmp.srcR);
+    addHRegUse(u, HRmWrite, i->GXin.Cmp.dst);
+    return;
+  case GXin_Bf:
+    addHRegUse(u, HRmRead, i->GXin.Bf.src);
+    addHRegUse(u, HRmWrite, i->GXin.Bf.dst);
+    return;
+  case GXin_Acas:
+    addHRegUse(u, HRmRead, i->GXin.Acas.addr);
+    addHRegUse(u, HRmRead, i->GXin.Acas.new);
+    if (i->GXin.Acas.op == GXacas_CMPEXCH)
+      addHRegUse(u, HRmRead, i->GXin.Acas.exp);
+    addHRegUse(u, HRmWrite, i->GXin.Acas.old);
+    return;
+  case GXin_Unary:
+    addHRegUse(u, HRmRead, i->GXin.Unary.src);
+    addHRegUse(u, HRmWrite, i->GXin.Unary.dst);
+    return;
+  case GXin_Mul:
+    addHRegUse(u, HRmWrite, i->GXin.Mul.dst);
+    addHRegUse(u, HRmRead, i->GXin.Mul.srcL);
+    addHRegUse(u, HRmRead, i->GXin.Mul.srcR);
+    return;
+  case GXin_Call: {
+    if (i->GXin.Call.cond != TILEGXcc_AL)
+      addHRegUse(u, HRmRead, i->GXin.Call.src);
+    ULong argir;
+
+    // Only need save r10-r29, and r0-r9 is not allocable.
+    addHRegUse(u, HRmWrite, hregTILEGX_R10());
+    addHRegUse(u, HRmWrite, hregTILEGX_R11());
+    addHRegUse(u, HRmWrite, hregTILEGX_R12());
+    addHRegUse(u, HRmWrite, hregTILEGX_R13());
+    addHRegUse(u, HRmWrite, hregTILEGX_R14());
+    addHRegUse(u, HRmWrite, hregTILEGX_R15());
+
+    addHRegUse(u, HRmWrite, hregTILEGX_R16());
+    addHRegUse(u, HRmWrite, hregTILEGX_R17());
+    addHRegUse(u, HRmWrite, hregTILEGX_R18());
+    addHRegUse(u, HRmWrite, hregTILEGX_R19());
+    addHRegUse(u, HRmWrite, hregTILEGX_R20());
+    addHRegUse(u, HRmWrite, hregTILEGX_R21());
+    addHRegUse(u, HRmWrite, hregTILEGX_R22());
+    addHRegUse(u, HRmWrite, hregTILEGX_R23());
+
+    addHRegUse(u, HRmWrite, hregTILEGX_R24());
+    addHRegUse(u, HRmWrite, hregTILEGX_R25());
+    addHRegUse(u, HRmWrite, hregTILEGX_R26());
+    addHRegUse(u, HRmWrite, hregTILEGX_R27());
+
+    addHRegUse(u, HRmWrite, hregTILEGX_R28());
+    addHRegUse(u, HRmWrite, hregTILEGX_R29());
+
+    /* Now we have to state any parameter-carrying registers
+       which might be read.  This depends on the argiregs field. */
+    argir = i->GXin.Call.argiregs;
+    if (argir & (1 << 9))
+      addHRegUse(u, HRmRead, hregTILEGX_R9());
+    if (argir & (1 << 8))
+      addHRegUse(u, HRmRead, hregTILEGX_R8());
+    if (argir & (1 << 7))
+      addHRegUse(u, HRmRead, hregTILEGX_R7());
+    if (argir & (1 << 6))
+      addHRegUse(u, HRmRead, hregTILEGX_R6());
+    if (argir & (1 << 5))
+      addHRegUse(u, HRmRead, hregTILEGX_R5());
+    if (argir & (1 << 4))
+      addHRegUse(u, HRmRead, hregTILEGX_R4());
+    if (argir & (1 << 3))
+      addHRegUse(u, HRmRead, hregTILEGX_R3());
+    if (argir & (1 << 2))
+      addHRegUse(u, HRmRead, hregTILEGX_R2());
+    if (argir & (1 << 1))
+      addHRegUse(u, HRmRead, hregTILEGX_R1());
+    if (argir & (1 << 0))
+      addHRegUse(u, HRmRead, hregTILEGX_R0());
+
+    vassert(0 == (argir & ~((1ULL << 10) - 1)));
+    return;
+  }
+  case GXin_XDirect:
+    addRegUsage_TILEGXAMode(u, i->GXin.XDirect.amPC);
+    return;
+  case GXin_XIndir:
+    addHRegUse(u, HRmRead, i->GXin.XIndir.dstGA);
+    addRegUsage_TILEGXAMode(u, i->GXin.XIndir.amPC);
+    return;
+  case GXin_XAssisted:
+    addHRegUse(u, HRmRead, i->GXin.XAssisted.dstGA);
+    addRegUsage_TILEGXAMode(u, i->GXin.XAssisted.amPC);
+    return;
+
+  case GXin_EvCheck:
+    addRegUsage_TILEGXAMode(u, i->GXin.EvCheck.amCounter);
+    addRegUsage_TILEGXAMode(u, i->GXin.EvCheck.amFailAddr);
+    return;
+  case GXin_ProfInc:
+    return;
+  case GXin_Load:
+    addRegUsage_TILEGXAMode(u, i->GXin.Load.src);
+    addHRegUse(u, HRmWrite, i->GXin.Load.dst);
+    return;
+  case GXin_Store:
+    addHRegUse(u, HRmRead, i->GXin.Store.src);
+    addRegUsage_TILEGXAMode(u, i->GXin.Store.dst);
+    return;
+  case GXin_RdWrLR:
+    addHRegUse(u, (i->GXin.RdWrLR.wrLR ? HRmRead : HRmWrite),
+               i->GXin.RdWrLR.gpr);
+    return;
+  case GXin_MovCond:
+    if (i->GXin.MovCond.srcR->tag == GXrh_Reg) {
+      addHRegUse(u, HRmRead, i->GXin.MovCond.srcR->GXrh.Reg.reg);
+    }
+    addHRegUse(u, HRmRead, i->GXin.MovCond.srcL);
+    addHRegUse(u, HRmRead, i->GXin.MovCond.condR);
+    addHRegUse(u, HRmWrite, i->GXin.MovCond.dst);
+    return;
+  default:
+    vpanic("getRegUsage_TILEGXInstr");
+  }
+}
+
+/* local helper */
+static void mapReg ( HRegRemap * m, HReg * r )
+{
+  *r = lookupHRegRemap(m, *r);
+}
+
+void mapRegs_TILEGXInstr ( HRegRemap * m, TILEGXInstr * i )
+{
+  switch (i->tag) {
+  case GXin_LI:
+    mapReg(m, &i->GXin.LI.dst);
+    break;
+  case GXin_Alu:
+    mapReg(m, &i->GXin.Alu.srcL);
+    mapRegs_TILEGXRH(m, i->GXin.Alu.srcR);
+    mapReg(m, &i->GXin.Alu.dst);
+    return;
+  case GXin_CmpI:
+    mapReg(m, &i->GXin.CmpI.srcL);
+    mapRegs_TILEGXRH(m, i->GXin.CmpI.srcR);
+    mapReg(m, &i->GXin.CmpI.dst);
+    return;
+  case GXin_Shft:
+    mapReg(m, &i->GXin.Shft.srcL);
+    mapRegs_TILEGXRH(m, i->GXin.Shft.srcR);
+    mapReg(m, &i->GXin.Shft.dst);
+    return;
+  case GXin_Cmp:
+    mapReg(m, &i->GXin.Cmp.srcL);
+    mapReg(m, &i->GXin.Cmp.srcR);
+    mapReg(m, &i->GXin.Cmp.dst);
+    return;
+  case GXin_Acas:
+    mapReg(m, &i->GXin.Acas.old);
+    mapReg(m, &i->GXin.Acas.addr);
+    mapReg(m, &i->GXin.Acas.new);
+    if (i->GXin.Acas.op == GXacas_CMPEXCH)
+      mapReg(m, &i->GXin.Acas.exp);
+    return;
+  case GXin_Bf:
+    mapReg(m, &i->GXin.Bf.src);
+    mapReg(m, &i->GXin.Bf.dst);
+    return;
+  case GXin_Unary:
+    mapReg(m, &i->GXin.Unary.src);
+    mapReg(m, &i->GXin.Unary.dst);
+    return;
+  case GXin_Mul:
+    mapReg(m, &i->GXin.Mul.dst);
+    mapReg(m, &i->GXin.Mul.srcL);
+    mapReg(m, &i->GXin.Mul.srcR);
+    return;
+  case GXin_Call:
+    {
+      if (i->GXin.Call.cond != TILEGXcc_AL)
+        mapReg(m, &i->GXin.Call.src);
+      return;
+    }
+  case GXin_XDirect:
+    mapRegs_TILEGXAMode(m, i->GXin.XDirect.amPC);
+    return;
+  case GXin_XIndir:
+    mapReg(m, &i->GXin.XIndir.dstGA);
+    mapRegs_TILEGXAMode(m, i->GXin.XIndir.amPC);
+    return;
+  case GXin_XAssisted:
+    mapReg(m, &i->GXin.XAssisted.dstGA);
+    mapRegs_TILEGXAMode(m, i->GXin.XAssisted.amPC);
+    return;
+  case GXin_EvCheck:
+    mapRegs_TILEGXAMode(m, i->GXin.EvCheck.amCounter);
+    mapRegs_TILEGXAMode(m, i->GXin.EvCheck.amFailAddr);
+    return;
+  case GXin_ProfInc:
+    return;
+  case GXin_Load:
+    mapRegs_TILEGXAMode(m, i->GXin.Load.src);
+    mapReg(m, &i->GXin.Load.dst);
+    return;
+  case GXin_Store:
+    mapReg(m, &i->GXin.Store.src);
+    mapRegs_TILEGXAMode(m, i->GXin.Store.dst);
+    return;
+  case GXin_RdWrLR:
+    mapReg(m, &i->GXin.RdWrLR.gpr);
+    return;
+  case GXin_MovCond:
+    if (i->GXin.MovCond.srcR->tag == GXrh_Reg) {
+      mapReg(m, &(i->GXin.MovCond.srcR->GXrh.Reg.reg));
+    }
+    mapReg(m, &i->GXin.MovCond.srcL);
+    mapReg(m, &i->GXin.MovCond.condR);
+    mapReg(m, &i->GXin.MovCond.dst);
+
+    return;
+  default:
+    vpanic("mapRegs_TILEGXInstr");
+  }
+}
+
+/* Figure out if i represents a reg-reg move, and if so assign the
+   source and destination to *src and *dst.  If in doubt say No.  Used
+   by the register allocator to do move coalescing.
+*/
+Bool isMove_TILEGXInstr ( TILEGXInstr * i, HReg * src, HReg * dst )
+{
+  /* Moves between integer regs */
+  if (i->tag == GXin_Alu) {
+    // or Rd,Rs,Rs == mov Rd, Rs
+    if (i->GXin.Alu.op != GXalu_OR)
+      return False;
+    if (i->GXin.Alu.srcR->tag != GXrh_Reg)
+      return False;
+    if (!sameHReg(i->GXin.Alu.srcR->GXrh.Reg.reg, i->GXin.Alu.srcL))
+      return False;
+    *src = i->GXin.Alu.srcL;
+    *dst = i->GXin.Alu.dst;
+    return True;
+  }
+  return False;
+}
+
+/* Generate tilegx spill/reload instructions under the direction of the
+   register allocator.
+*/
+void genSpill_TILEGX ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
+                       Int offsetB )
+{
+  TILEGXAMode *am;
+  vassert(offsetB >= 0);
+  vassert(!hregIsVirtual(rreg));
+  *i1 = *i2 = NULL;
+  am = TILEGXAMode_IR(offsetB, TILEGXGuestStatePointer());
+
+  switch (hregClass(rreg)) {
+  case HRcInt64:
+    *i1 = TILEGXInstr_Store(8, am, rreg);
+    break;
+  case HRcInt32:
+    *i1 = TILEGXInstr_Store(4, am, rreg);
+    break;
+  default:
+    ppHRegClass(hregClass(rreg));
+    vpanic("genSpill_TILEGX: unimplemented regclass");
+  }
+}
+
+void genReload_TILEGX ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
+                        Int offsetB )
+{
+  TILEGXAMode *am;
+  vassert(!hregIsVirtual(rreg));
+  am = TILEGXAMode_IR(offsetB, TILEGXGuestStatePointer());
+
+  switch (hregClass(rreg)) {
+  case HRcInt64:
+    *i1 = TILEGXInstr_Load(8, rreg, am);
+    break;
+  case HRcInt32:
+    *i1 = TILEGXInstr_Load(4, rreg, am);
+    break;
+  default:
+    ppHRegClass(hregClass(rreg));
+    vpanic("genReload_TILEGX: unimplemented regclass");
+    break;
+  }
+}
+
+/* --------- The tilegx assembler --------- */
+
+static UChar *mkInsnBin ( UChar * p, ULong insn )
+{
+  vassert(insn != (ULong)(-1));
+  if (((Addr)p) & 7) {
+    vex_printf("p=%p\n", p);
+    vassert((((Addr)p) & 7) == 0);
+  }
+  *((ULong *)(Addr)p) = insn;
+  p += 8;
+  return p;
+}
+
+static Int display_insn ( struct tilegx_decoded_instruction
+                          decoded[1] )
+{
+  Int i;
+  for (i = 0;
+       decoded[i].opcode && (i < 1);
+       i++) {
+    Int n;
+    vex_printf("%s ", decoded[i].opcode->name);
+
+    for (n = 0; n < decoded[i].opcode->num_operands; n++) {
+      const struct tilegx_operand *op = decoded[i].operands[n];
+
+      if (op->type == TILEGX_OP_TYPE_REGISTER)
+        vex_printf("r%d", (Int) decoded[i].operand_values[n]);
+      else
+        vex_printf("%ld", (unsigned long)decoded[i].operand_values[n]);
+
+      if (n != (decoded[i].opcode->num_operands - 1))
+        vex_printf(", ");
+    }
+    vex_printf(" ");
+  }
+  return i;
+}
+
+
+Int decode_and_display ( tilegx_bundle_bits *p, Int count, ULong pc )
+{
+  struct tilegx_decoded_instruction
+    decode[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE];
+  Int i;
+
+#ifdef TILEGX_DEBUG
+  vex_printf("Insn@0x%lx\n", (ULong)p);
+#endif
+
+  if (count > 0x1000) {
+    vex_printf("insn count: %d", count);
+    vassert(0);
+  }
+
+  for (i = 0 ; i < count ; i++) {
+    if (pc) {
+      vex_printf("%012llx %016llx  ", pc, (ULong)p[i]);
+      pc += 8;
+    }
+    parse_insn_tilegx(p[i], 0, decode);
+
+    Int n, k, bundled = 0;
+
+    for(k = 0; decode[k].opcode && (k <TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE);
+        k++) {
+      if (decode[k].opcode->mnemonic != TILEGX_OPC_FNOP)
+        bundled++;
+    }
+
+    /* Print "{", ";" and "}" only if multiple instructions are bundled. */
+    if (bundled > 1)
+      vex_printf("{ ");
+
+    n = bundled;
+    for(k = 0; decode[k].opcode && (k <TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE);
+        k++) {
+      if (decode[k].opcode->mnemonic == TILEGX_OPC_FNOP)
+        continue;
+
+      display_insn(&decode[k]);
+
+      if (--n > 0)
+        vex_printf("; ");
+    }
+
+    if (bundled > 1)
+      vex_printf(" }");
+
+    vex_printf("\n");
+  }
+  return count;
+}
+
+static UInt iregNo ( HReg r )
+{
+  UInt n;
+  vassert(hregClass(r) == HRcInt64);
+  vassert(!hregIsVirtual(r));
+  n = hregEncoding(r);
+  vassert(n <= 63);
+  return n;
+}
+
+static UChar *doAMode_IR ( UChar * p, UInt opc1, UInt rSD, TILEGXAMode * am )
+{
+  UInt rA;
+  vassert(am->tag == GXam_IR);
+
+  rA = iregNo(am->GXam.IR.base);
+
+  if (opc1 == TILEGX_OPC_ST1 || opc1 == TILEGX_OPC_ST2 ||
+      opc1 == TILEGX_OPC_ST4 || opc1 == TILEGX_OPC_ST) {
+    if ( am->GXam.IR.index ) {
+      /* r51 is reserved scratch registers. */
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                    51, rA, am->GXam.IR.index));
+      /* store rSD to address in r51 */
+      p = mkInsnBin(p, mkTileGxInsn(opc1, 2, 51, rSD));
+    } else {
+      /* store rSD to address in rA */
+      p = mkInsnBin(p, mkTileGxInsn(opc1, 2, rA, rSD));
+    }
+  } else {
+    if ( am->GXam.IR.index ) {
+      /* r51 is reserved scratch registers. */
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                    51, rA, am->GXam.IR.index));
+      /* load from address in r51 to rSD. */
+      p = mkInsnBin(p, mkTileGxInsn(opc1, 2, rSD, 51));
+    } else {
+      /* load from address in rA to rSD. */
+      p = mkInsnBin(p, mkTileGxInsn(opc1, 2, rSD, rA));
+    }
+  }
+  return p;
+}
+
+/* Generate a machine-word sized load or store using exact 2 bundles.
+   Simplified version of the GXin_Load and GXin_Store cases below. */
+static UChar* do_load_or_store_machine_word ( UChar* p, Bool isLoad, UInt reg,
+                                              TILEGXAMode* am )
+{
+  UInt rA = iregNo(am->GXam.IR.base);
+
+  if (am->tag != GXam_IR)
+    vpanic(__func__);
+
+  if (isLoad) /* load */ {
+     /* r51 is reserved scratch registers. */
+     p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+				   51, rA, am->GXam.IR.index));
+     /* load from address in r51 to rSD. */
+     p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_LD, 2, reg, 51));
+  } else /* store */ {
+     /* r51 is reserved scratch registers. */
+     p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+				   51, rA, am->GXam.IR.index));
+     /* store rSD to address in r51 */
+     p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ST, 2, 51, reg));
+  }
+  return p;
+}
+
+/* Load imm to r_dst */
+static UChar *mkLoadImm ( UChar * p, UInt r_dst, ULong imm )
+{
+  vassert(r_dst < 0x40);
+
+  if (imm == 0)
+  {
+    /* A special case, use r63 - zero register. */
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MOVE, 2, r_dst, 63));
+  }
+  else if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000)
+  {
+    /* only need one 16-bit sign-extendable movli instructon. */
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MOVELI, 2,
+                                  r_dst, imm & 0xFFFF));
+
+  }
+  else if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL)
+  {
+    /* Sign-extendable moveli and a shl16insli */
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MOVELI, 2,
+                                  r_dst,
+                                  (imm >> 16) & 0xFFFF));
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                  r_dst, r_dst,
+                                  (imm & 0xFFFF)));
+
+  }
+  else
+  {
+    /* A general slower and rare case, use 4 instructions/bundles:
+       moveli     r_dst, imm[63:48]
+       shl16insli r_dst, imm[47:32]
+       shl16insli r_dst, imm[31:16]
+       shl16insli r_dst, imm[15: 0]
+    */
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MOVELI, 2,
+                                  r_dst,
+                                  (imm >> 48) & 0xFFFF));
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                  r_dst, r_dst,
+                                  (imm >> 32) & 0xFFFF));
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                  r_dst, r_dst,
+                                  (imm >> 16) & 0xFFFF));
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                  r_dst, r_dst,
+                                  imm & 0xFFFF));
+  }
+  return p;
+}
+
+/* Load imm to r_dst using exact 4 bundles. A special case of above
+   mkLoadImm(...). */
+static UChar *mkLoadImm_EXACTLY4 ( UChar * p, UInt r_dst, ULong imm )
+{
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MOVELI, 2,
+                                r_dst,
+                                (imm >> 48) & 0xFFFF));
+
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                r_dst, r_dst,
+                                (imm >> 32) & 0xFFFF));
+
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                r_dst, r_dst,
+                                (imm >> 16) & 0xFFFF));
+
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                r_dst, r_dst,
+                                (imm) & 0xFFFF));
+  return p;
+}
+
+/* Move r_dst to r_src */
+static UChar *mkMoveReg ( UChar * p, UInt r_dst, UInt r_src )
+{
+  vassert(r_dst < 0x40);
+  vassert(r_src < 0x40);
+
+  if (r_dst != r_src) {
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MOVE, 2,
+                                  r_dst, r_src));
+  }
+  return p;
+}
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code. */
+Int emit_TILEGXInstr ( Bool*  is_profInc,
+                       UChar* buf,
+                       Int    nbuf,
+                       TILEGXInstr* i,
+                       Bool   mode64,
+                       VexEndness endness_host,
+                       void*  disp_cp_chain_me_to_slowEP,
+                       void*  disp_cp_chain_me_to_fastEP,
+                       void*  disp_cp_xindir,
+                       void*  disp_cp_xassisted )
+{
+  Int instr_bytes = 0;
+  UChar *p = &buf[0];
+  UChar *ptmp = p;
+  vassert(nbuf >= 32);
+  vassert(!((Addr)p & 0x7));
+  vassert (mode64);
+
+  switch (i->tag) {
+  case GXin_MovCond: {
+
+    TILEGXRH *srcR = i->GXin.MovCond.srcR;
+    UInt condR = iregNo(i->GXin.MovCond.condR);
+    UInt dst = iregNo(i->GXin.MovCond.dst);
+
+    UInt srcL = iregNo(i->GXin.MovCond.srcL);
+
+    if (i->GXin.MovCond.cond == TILEGXcc_EZ) {
+      if (srcR->tag == GXrh_Reg) {
+        p = mkMoveReg(p, dst, iregNo(srcR->GXrh.Reg.reg));
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMOVEQZ, 3,
+                                      dst, condR, srcL));
+      } else {
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MOVELI, 2,
+                                      dst, srcR->GXrh.Imm.imm16));
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMOVEQZ, 3,
+                                      dst, condR, srcL));
+      }
+    } else {
+      vassert(0);
+    }
+
+    goto done;
+  }
+  case GXin_LI:
+
+    // Tilegx, load literal
+    p = mkLoadImm(p, iregNo(i->GXin.LI.dst), i->GXin.LI.imm);
+    goto done;
+
+  case GXin_Alu: {
+    TILEGXRH *srcR = i->GXin.Alu.srcR;
+    Bool immR = toBool(srcR->tag == GXrh_Imm);
+    UInt r_dst = iregNo(i->GXin.Alu.dst);
+    UInt r_srcL = iregNo(i->GXin.Alu.srcL);
+    UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->GXrh.Reg.reg);
+
+    switch (i->GXin.Alu.op) {
+      /*GXalu_ADD, GXalu_SUB, GXalu_AND, GXalu_OR, GXalu_NOR, GXalu_XOR */
+    case GXalu_ADD:
+      if (immR) {
+        vassert(srcR->GXrh.Imm.imm16 != 0x8000);
+        if (srcR->GXrh.Imm.syned)
+          /* addi */
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                        r_dst, r_srcL,
+                                        srcR->GXrh.Imm.imm16));
+        else
+          /* addiu, use shil16insli for tilegx  */
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL16INSLI, 3,
+                                        r_dst, 63,
+                                        srcR->GXrh.Imm.imm16));
+      } else {
+        /* addu */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADD, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+    case GXalu_SUB:
+      if (immR) {
+        /* addi , but with negated imm */
+        vassert(srcR->GXrh.Imm.syned);
+        vassert(srcR->GXrh.Imm.imm16 != 0x8000);
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                      r_dst, r_srcL,
+                                      -srcR->GXrh.Imm.imm16));
+      } else {
+        /* subu */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SUB, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+    case GXalu_AND:
+      if (immR) {
+        /* andi */
+        vassert((srcR->GXrh.Imm.imm16 >> 8 == 0) ||
+                (srcR->GXrh.Imm.imm16 >> 8 == 0xFF));
+
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ANDI, 3,
+                                      r_dst, r_srcL,
+                                      srcR->GXrh.Imm.imm16));
+
+      } else {
+        /* and */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_AND, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+    case GXalu_OR:
+      if (immR) {
+        /* ori */
+        vassert((srcR->GXrh.Imm.imm16 >> 8 == 0) ||
+                (srcR->GXrh.Imm.imm16 >> 8 == 0xFF));
+
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ORI, 3,
+                                      r_dst, r_srcL,
+                                      srcR->GXrh.Imm.imm16));
+      } else {
+        /* or */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_OR, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+    case GXalu_NOR:
+      /* nor */
+      vassert(!immR);
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOR, 3,
+                                    r_dst, r_srcL,
+                                    r_srcR));
+      break;
+    case GXalu_XOR:
+      if (immR) {
+        /* xori */
+        vassert(srcR->GXrh.Imm.syned);
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_XORI, 3,
+                                      r_dst, r_srcL,
+                                      srcR->GXrh.Imm.imm16));
+      } else {
+        /* xor */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_XOR, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+
+    default:
+      goto bad;
+    }
+    goto done;
+  }
+
+  case GXin_Shft: {
+    TILEGXRH *srcR = i->GXin.Shft.srcR;
+    Bool sz32 = i->GXin.Shft.sz32;
+    Bool immR = toBool(srcR->tag == GXrh_Imm);
+    UInt r_dst = iregNo(i->GXin.Shft.dst);
+    UInt r_srcL = iregNo(i->GXin.Shft.srcL);
+    UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->GXrh.Reg.reg);
+
+    switch (i->GXin.Shft.op) {
+    case GXshft_SLL:
+      if (sz32) {
+        if (immR) {
+          UInt n = srcR->GXrh.Imm.imm16;
+          vassert(n >= 0 && n < 64);
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHLXI, 3,
+                                        r_dst, r_srcL,
+                                        srcR->GXrh.Imm.imm16));
+        } else {
+          /* shift variable */
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHLX, 3,
+                                        r_dst, r_srcL,
+                                        r_srcR));
+        }
+      } else {
+        if (immR) {
+          UInt n = srcR->GXrh.Imm.imm16;
+          vassert(n >= 0 && n < 64);
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHLI, 3,
+                                        r_dst, r_srcL,
+                                        srcR->GXrh.Imm.imm16));
+        } else {
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHL, 3,
+                                        r_dst, r_srcL,
+                                        r_srcR));
+        }
+      }
+      break;
+
+    case GXshft_SLL8x8:
+      if (immR) {
+        UInt n = srcR->GXrh.Imm.imm16;
+        vassert(n >= 0 && n < 64);
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_V1SHLI, 3,
+                                      r_dst, r_srcL,
+                                      srcR->GXrh.Imm.imm16));
+      } else {
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_V1SHL, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+
+    case GXshft_SRL8x8:
+      if (immR) {
+        UInt n = srcR->GXrh.Imm.imm16;
+        vassert(n >= 0 && n < 64);
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_V1SHRUI, 3,
+                                      r_dst, r_srcL,
+                                      srcR->GXrh.Imm.imm16));
+      } else {
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_V1SHRU, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+
+    case GXshft_SRL:
+      if (sz32) {
+        // SRL, SRLV
+        if (immR) {
+          UInt n = srcR->GXrh.Imm.imm16;
+          vassert(n >= 0 && n < 32);
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRUXI, 3,
+                                        r_dst, r_srcL,
+                                        srcR->GXrh.Imm.imm16));
+        } else {
+          /* shift variable */
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRUX, 3,
+                                        r_dst, r_srcL,
+                                        r_srcR));
+        }
+      } else {
+        // DSRL, DSRL32, DSRLV
+        if (immR) {
+          UInt n = srcR->GXrh.Imm.imm16;
+          vassert((n >= 0 && n < 64));
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRUI, 3,
+                                        r_dst, r_srcL,
+                                        srcR->GXrh.Imm.imm16));
+        } else {
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRU, 3,
+                                        r_dst, r_srcL,
+                                        r_srcR));
+        }
+      }
+      break;
+
+    case GXshft_SRA:
+      if (sz32) {
+        // SRA, SRAV
+        if (immR) {
+          UInt n = srcR->GXrh.Imm.imm16;
+          vassert(n >= 0 && n < 64);
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRSI, 3,
+                                        r_dst, r_srcL,
+                                        srcR->GXrh.Imm.imm16));
+
+        } else {
+          /* shift variable */
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRS, 3,
+                                        r_dst, r_srcL,
+                                        r_srcR));
+        }
+      } else {
+        // DSRA, DSRA32, DSRAV
+        if (immR) {
+
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRSI, 3,
+                                        r_dst, r_srcL,
+                                        srcR->GXrh.Imm.imm16));
+        } else {
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_SHRS, 3,
+                                        r_dst, r_srcL,
+                                        r_srcR));
+        }
+      }
+      break;
+
+    default:
+      goto bad;
+    }
+
+    goto done;
+  }
+
+  case GXin_Unary: {
+    UInt r_dst = iregNo(i->GXin.Unary.dst);
+    UInt r_src = iregNo(i->GXin.Unary.src);
+
+    switch (i->GXin.Unary.op) {
+      /* GXun_CLZ, GXun_NOP */
+    case GXun_CLZ:  //clz
+
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CLZ, 2,
+                                    r_dst, r_src));
+      break;
+    case GXun_CTZ:  //ctz
+
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CTZ, 2,
+                                    r_dst, r_src));
+      break;
+
+    case GXun_NOP:
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOP, 0));
+      break;
+    }
+    goto done;
+  }
+
+  case GXin_Cmp: {
+
+    Bool syned = i->GXin.Cmp.syned;
+    UInt r_srcL = iregNo(i->GXin.Cmp.srcL);
+    UInt r_srcR = iregNo(i->GXin.Cmp.srcR);
+    UInt r_dst = iregNo(i->GXin.Cmp.dst);
+
+    switch (i->GXin.Cmp.cond) {
+    case TILEGXcc_EQ:
+
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPEQ, 3,
+                                    r_dst, r_srcL,
+                                    r_srcR));
+
+      break;
+
+    case TILEGXcc_NE:
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPNE, 3,
+                                    r_dst, r_srcL,
+                                    r_srcR));
+
+      break;
+    case TILEGXcc_LT:
+      /*  slt r_dst, r_srcL, r_srcR */
+
+      if (syned)
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPLTS, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      else
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPLTU, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+
+      break;
+    case TILEGXcc_LO:
+      /*  sltu r_dst, r_srcL, r_srcR */
+
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPLTU, 3,
+                                    r_dst, r_srcL,
+                                    r_srcR));
+
+      break;
+    case TILEGXcc_LE:
+      if (syned)
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPLES, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      else
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPLEU, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      break;
+    case TILEGXcc_LS:
+
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPLTU, 3,
+                                    r_dst, r_srcL,
+                                    r_srcR));
+      break;
+    default:
+      goto bad;
+    }
+    goto done;
+  }
+
+  case GXin_CmpI: {
+
+    TILEGXRH *srcR = i->GXin.CmpI.srcR;
+    Bool immR = toBool(srcR->tag == GXrh_Imm);
+    UInt r_dst = iregNo(i->GXin.CmpI.dst);
+    UInt r_srcL = iregNo(i->GXin.CmpI.srcL);
+    UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->GXrh.Reg.reg);
+
+    switch (i->GXin.CmpI.cond) {
+    case TILEGXcc_EQ8x8:
+      if (immR) {
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_V1CMPEQI, 3,
+                                      r_dst, r_srcL,
+                                      srcR->GXrh.Imm.imm16));
+      } else {
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_V1CMPEQ, 3,
+                                      r_dst, r_srcL,
+                                      r_srcR));
+      }
+      break;
+
+    case TILEGXcc_NE8x8:
+      if (immR) {
+        vassert(0);
+      } else {
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_V1CMPNE, 3,
+                                      r_dst, r_srcR,
+                                      r_srcL));
+      }
+      break;
+    default:
+      vassert(0);
+    }
+    goto done;
+    break;
+  }
+
+  case GXin_Bf: {
+
+    /* Bit Field */
+    UInt r_dst = iregNo(i->GXin.Bf.dst);
+    UInt r_src = iregNo(i->GXin.Bf.src);
+    UInt Start = i->GXin.Bf.Start;
+    UInt End   = i->GXin.Bf.End;
+
+    switch (i->GXin.Bf.op) {
+    case GXbf_EXTS:
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_BFEXTS, 4,
+                                    r_dst, r_src,
+                                    Start, End));
+
+      break;
+    case GXbf_EXTU:
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_BFEXTU, 4,
+                                    r_dst, r_src,
+                                    Start, End));
+
+      break;
+    case GXbf_INS:
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_BFINS, 4,
+                                    r_dst, r_src,
+                                    Start, End));
+
+      break;
+    default:
+      vassert(0);
+    }
+    goto done;
+    break;
+  }
+
+  case GXin_Acas: {
+
+    /* Atomic */
+    UInt sz =  i->GXin.Acas.sz;
+    UInt old = iregNo(i->GXin.Acas.old);
+    UInt addr= iregNo(i->GXin.Acas.addr);
+    UInt new = iregNo(i->GXin.Acas.new);
+
+    switch (i->GXin.Acas.op) {
+    case GXacas_CMPEXCH:
+      {
+        UInt exp = iregNo(i->GXin.Acas.exp);
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MTSPR, 2,
+                                      0x2780, exp));
+        if (sz == 8)
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPEXCH, 3,
+                                        old, addr, new));
+        else
+          p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_CMPEXCH4, 3,
+                                        old, addr, new));
+      }
+      break;
+
+    case GXacas_EXCH:
+      if (sz == 8)
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_EXCH, 3,
+                                      old, addr, new));
+      else
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_EXCH4, 3,
+                                      old, addr, new));
+      break;
+
+    case GXacas_FetchAnd:
+      if (sz == 8)
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHAND, 3,
+                                      old, addr, new));
+      else
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHAND4, 3,
+                                      old, addr, new));
+      break;
+
+    case GXacas_FetchAdd:
+      if (sz == 8)
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHADD, 3,
+                                      old, addr, new));
+      else
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHADD4, 3,
+                                      old, addr, new));
+      break;
+
+    case GXacas_FetchAddgez:
+      if (sz == 8)
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHADDGEZ, 3,
+                                      old, addr, new));
+      else
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHADDGEZ4, 3,
+                                      old, addr, new));
+      break;
+
+    case GXacas_FetchOr:
+      if (sz == 8)
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHOR, 3,
+                                      old, addr, new));
+      else
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_FETCHOR4, 3,
+                                      old, addr, new));
+      break;
+
+    default: vassert(0);
+    }
+    goto done;
+    break;
+  }
+
+  case GXin_Mul: {
+
+    /* Multiplication */
+    Bool syned = i->GXin.Mul.syned;
+    Bool widening = i->GXin.Mul.widening;
+    Bool sz32 = i->GXin.Mul.sz32;
+    UInt r_srcL = iregNo(i->GXin.Mul.srcL);
+    UInt r_srcR = iregNo(i->GXin.Mul.srcR);
+    UInt r_dst = iregNo(i->GXin.Mul.dst);
+
+    vassert(widening);  // always widen.
+    vassert(!sz32);   // always be 64 bits.
+
+    if (syned) {
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MUL_LS_LS, 3,
+                                    r_dst, r_srcL, r_srcR));
+    } else {
+      p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MUL_LU_LU, 3,
+                                    r_dst, r_srcL, r_srcR));
+    }
+    goto done;
+  }
+
+  case GXin_Call: {
+
+    /* Function Call. */
+    TILEGXCondCode cond = i->GXin.Call.cond;
+    UInt r_dst = 11;  /* using r11 as address temporary */
+
+    /* jump over the following insns if conditional. */
+    if (cond != TILEGXcc_AL) {
+      /* jmp fwds if !condition */
+      /* don't know how many bytes to jump over yet...
+         make space for a jump instruction + nop!!! and fill in later. */
+      ptmp = p;   /* fill in this bit later */
+      p += 8;
+    }
+
+    /* load target to r_dst */
+    p = mkLoadImm(p, r_dst, i->GXin.Call.target);
+
+    /* jalr %r_dst */
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_JALRP, 1,
+                                  r_dst));
+
+    /* Fix up the conditional jump, if there was one. */
+    if (cond != TILEGXcc_AL) {
+      UInt r_src = iregNo(i->GXin.Call.src);
+      Int delta = p - ptmp;
+
+      vassert(cond == TILEGXcc_EQ);
+
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_BEQZ, 2,
+                                          r_src, delta / 8));
+   }
+    goto done;
+  }
+
+  case GXin_XDirect: {
+    /* NB: what goes on here has to be very closely coordinated
+       with the chainXDirect_TILEGX and unchainXDirect_TILEGX below. */
+    /* We're generating chain-me requests here, so we need to be
+       sure this is actually allowed -- no-redir translations
+       can't use chain-me's.  Hence: */
+    vassert(disp_cp_chain_me_to_slowEP != NULL);
+    vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+    /* Use ptmp for backpatching conditional jumps. */
+    ptmp = NULL;
+
+    /* First, if this is conditional, create a conditional
+       jump over the rest of it.  Or at least, leave a space for
+       it that we will shortly fill in. */
+    if (i->GXin.XDirect.cond != TILEGXcc_AL) {
+      vassert(i->GXin.XDirect.cond != TILEGXcc_NV);
+      ptmp = p;
+      p += 24;
+    }
+
+    /* Update the guest PC. */
+    /* move r11, dstGA */
+    /* st   amPC, r11  */
+    p = mkLoadImm_EXACTLY4(p, /*r*/ 11, (ULong)i->GXin.XDirect.dstGA);
+
+    p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 11,
+                                      i->GXin.XDirect.amPC);
+
+    /* --- FIRST PATCHABLE BYTE follows --- */
+    /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
+       calling to) backs up the return address, so as to find the
+       address of the first patchable byte.  So: don't change the
+       number of instructions (3) below. */
+    /* move r9, VG_(disp_cp_chain_me_to_{slowEP,fastEP}) */
+    /* jr  r11  */
+    void* disp_cp_chain_me
+      = i->GXin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
+      : disp_cp_chain_me_to_slowEP;
+    p = mkLoadImm_EXACTLY4(p, /*r*/ 11,
+                           (Addr)disp_cp_chain_me);
+    /* jalr r11 */
+    /* nop */
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_JALR, 1, 11));
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOP, 0));
+
+    /* --- END of PATCHABLE BYTES --- */
+
+    /* Fix up the conditional jump, if there was one. */
+    if (i->GXin.XDirect.cond != TILEGXcc_AL) {
+      Int delta = p - ptmp;
+      delta = delta / 8 - 3;
+
+      /* ld r11, COND_OFFSET(GuestSP=r50)
+         beqz r11, delta
+      */
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                          11, 50, COND_OFFSET()));
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_LD, 2,
+                                          11, 11));
+
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_BEQZ, 2,
+                                          11, delta));
+
+    }
+    goto done;
+  }
+
+  case GXin_XIndir: {
+    /* We're generating transfers that could lead indirectly to a
+       chain-me, so we need to be sure this is actually allowed --
+       no-redir translations are not allowed to reach normal
+       translations without going through the scheduler.  That means
+       no XDirects or XIndirs out from no-redir translations.
+       Hence: */
+    vassert(disp_cp_xindir != NULL);
+
+    /* Use ptmp for backpatching conditional jumps. */
+    ptmp = NULL;
+
+    /* First off, if this is conditional, create a conditional
+       jump over the rest of it. */
+    if (i->GXin.XIndir.cond != TILEGXcc_AL) {
+      vassert(i->GXin.XIndir.cond != TILEGXcc_NV);
+      ptmp = p;
+      p += 24;
+    }
+
+    /* Update the guest PC. */
+    /* st amPC, dstGA */
+    p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
+                                      iregNo(i->GXin.XIndir.dstGA),
+                                      i->GXin.XIndir.amPC);
+
+    /* move r11, VG_(disp_cp_xindir), 4 bundles. */
+    /* jalr r11 */
+    /* nop */
+    p = mkLoadImm_EXACTLY4(p, /*r*/ 11,
+                           (Addr)disp_cp_xindir);
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_JALR, 1, 11));
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOP, 0));
+
+    /* Fix up the conditional jump, if there was one. */
+    if (i->GXin.XIndir.cond != TILEGXcc_AL) {
+      Int delta = p - ptmp;
+      delta = delta / 8 - 3;
+      vassert(delta > 0 && delta < 40);
+
+      /* ld r11, COND_OFFSET($GuestSP)
+         beqz r11, delta  */
+
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                          11, 50, COND_OFFSET()));
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_LD, 2,
+                                          11, 11));
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_BEQZ, 2,
+                                          11, delta));
+    }
+    goto done;
+  }
+
+  case GXin_XAssisted: {
+    /* First off, if this is conditional, create a conditional jump
+       over the rest of it.  Or at least, leave a space for it that
+       we will shortly fill in. */
+    ptmp = NULL;
+    if (i->GXin.XAssisted.cond != TILEGXcc_AL) {
+      vassert(i->GXin.XAssisted.cond != TILEGXcc_NV);
+      ptmp = p;
+      p += 24;
+    }
+
+    /* Update the guest PC. */
+    /* st amPC, dstGA */
+    p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
+                                      iregNo(i->GXin.XIndir.dstGA),
+                                      i->GXin.XIndir.amPC);
+
+    UInt trcval = 0;
+    switch (i->GXin.XAssisted.jk) {
+    case Ijk_ClientReq:     trcval = VEX_TRC_JMP_CLIENTREQ;     break;
+    case Ijk_Sys_syscall:   trcval = VEX_TRC_JMP_SYS_SYSCALL;   break;
+    case Ijk_Yield:         trcval = VEX_TRC_JMP_YIELD;         break;
+    case Ijk_EmWarn:        trcval = VEX_TRC_JMP_EMWARN;        break;
+    case Ijk_EmFail:        trcval = VEX_TRC_JMP_EMFAIL;        break;
+    case Ijk_NoDecode:      trcval = VEX_TRC_JMP_NODECODE;      break;
+    case Ijk_InvalICache:   trcval = VEX_TRC_JMP_INVALICACHE;   break;
+    case Ijk_NoRedir:       trcval = VEX_TRC_JMP_NOREDIR;       break;
+    case Ijk_SigILL:        trcval = VEX_TRC_JMP_SIGILL;        break;
+    case Ijk_SigTRAP:       trcval = VEX_TRC_JMP_SIGTRAP;       break;
+    case Ijk_SigBUS:        trcval = VEX_TRC_JMP_SIGBUS;        break;
+    case Ijk_SigFPE_IntDiv: trcval = VEX_TRC_JMP_SIGFPE_INTDIV; break;
+    case Ijk_SigFPE_IntOvf: trcval = VEX_TRC_JMP_SIGFPE_INTOVF; break;
+    case Ijk_Boring:        trcval = VEX_TRC_JMP_BORING;        break;
+    case Ijk_Ret:
+      {
+        /* Tilegx "iret" instruction. */
+        trcval = VEX_TRC_JMP_BORING;
+        /* Interrupt return "iret", setup the jump address into EX_CONTRXT_0_0.
+           Read context_0_1 from guest_state */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                      51, 50, OFFSET_EX1));
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_LD, 2,
+                                      11, 51));
+        /* Write into host cpu's context_0_1 spr. */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MTSPR, 2,
+                                      0x2581, 11));
+        /* Read context_0_0 from guest_state */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                      51, 50, OFFSET_EX0));
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_LD, 2,
+                                      11, 51));
+        /* Write into host cpu's context_0_0 spr */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_MTSPR, 2,
+                                      0x2580, 11));
+        /* Update the guest PC  so branch to the iret target address
+           in EX_CONTEXT_0. */
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                      51, 50, 512));
+        p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ST, 2,
+                                      51, 11));
+      }
+      break;
+      /* We don't expect to see the following being assisted.
+         case Ijk_Call:
+         fallthrough */
+    default:
+      ppIRJumpKind(i->GXin.XAssisted.jk);
+      vpanic("emit_TILEGXInstr.GXin_XAssisted: unexpected jump kind");
+    }
+    vassert(trcval != 0);
+
+    /* moveli r50, trcval */
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDLI, 3, 50, 63, trcval));
+
+    /* move r11, VG_(disp_cp_xassisted) */
+
+    p = mkLoadImm_EXACTLY4(p, /*r*/ 11,
+                           (Addr)disp_cp_xassisted);
+    /* jalr r11
+       nop  */
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_JALR, 1, 11));
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOP, 0));
+
+    /* Fix up the conditional jump, if there was one. */
+    if (i->GXin.XAssisted.cond != TILEGXcc_AL) {
+      Int delta = p - ptmp;
+      delta = delta / 8 - 3;
+      vassert(delta > 0 && delta < 40);
+
+      /* ld  r11, COND_OFFSET($GuestSP)
+         beqz r11, delta
+         nop  */
+
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_ADDLI, 3,
+                                          11, 50, COND_OFFSET()));
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_LD, 2,
+                                          11, 11));
+      ptmp = mkInsnBin(ptmp, mkTileGxInsn(TILEGX_OPC_BEQZ, 2,
+                                          11, delta));
+    }
+    goto done;
+  }
+
+  case GXin_EvCheck: {
+    /* We generate:
+       ld      r11, amCounter
+       addi    r11, r11, -1
+       st      amCounter, r11
+       bgez    r11, nofail
+       ld      r11, amFailAddr
+       jalr    r11
+       nop
+       nofail:
+    */
+    UChar* p0 = p;
+    /* ld  r11, amCounter */
+    p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 11,
+                                      i->GXin.EvCheck.amCounter);
+
+    /* addi r11,r11,-1 */
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDI, 3,
+                                  11, 11, -1));
+
+    /* st amCounter, 11 */
+    p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 11,
+                                      i->GXin.EvCheck.amCounter);
+
+    /* Reserve a bundle, fill it after the do_load_or_store_machine_word.
+       since we are not sure how many bundles it takes. */
+    UChar* p1 = p;
+    p += 8;
+    /* bgez t9, nofail */
+
+    /* lw/ld r9, amFailAddr */
+    p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 11,
+                                      i->GXin.EvCheck.amFailAddr);
+
+    mkInsnBin(p1, mkTileGxInsn(TILEGX_OPC_BGEZ, 2,
+                               11, 2 + (p - p1) / 8));
+
+    /* jalr r11 */
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_JALR, 1, 11));
+
+    /* nop */
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOP, 0));
+
+    /* nofail: */
+
+    /* Crosscheck */
+    vassert(evCheckSzB_TILEGX() == (UChar*)p - (UChar*)p0);
+    goto done;
+  }
+
+  case GXin_ProfInc: {
+    /* Generate a code template to increment a memory location whose
+       address will be known later as an immediate value. This code
+       template will be patched once the memory location is known.
+       For now we do this with address == 0x65556555. */
+    /* 64-bit:
+       move r11, 0x6555655565556555ULL
+       ld r51, r11
+       addi r51, r51, 1
+       st  r11, r51
+    */
+
+    /* move r11, 0x6555655565556555ULL */
+    p = mkLoadImm_EXACTLY4(p, /*r*/ 11, 0x6555655565556555ULL);
+
+    /* ld r51, r11 */
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_LD, 2, 51, 11));
+
+    /* addi r51, r51, 1 */
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ADDI, 3, 51, 51, 1));
+
+    /* st r11, r51 */
+
+    p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_ST, 2, 11, 51));
+
+    /* Tell the caller .. */
+    vassert(!(*is_profInc));
+    *is_profInc = True;
+    goto done;
+  }
+
+  case GXin_Load: {
+    TILEGXAMode *am_addr = i->GXin.Load.src;
+    if (am_addr->tag == GXam_IR) {
+      UInt r_dst = iregNo(i->GXin.Load.dst);
+      UInt opc, sz = i->GXin.Load.sz;
+      if ((sz == 4 || sz == 8)) {
+        /* should be guaranteed to us by iselWordExpr_AMode */
+        vassert(0 == (am_addr->GXam.IR.index & 3));
+      }
+
+      // Note: Valgrind memory load has no sign-extend. We extend explicitly.
+      switch (sz) {
+      case 1:
+        opc = TILEGX_OPC_LD1U;
+        break;
+      case 2:
+        opc = TILEGX_OPC_LD2U;
+        break;
+      case 4:
+        opc = TILEGX_OPC_LD4U;
+        break;
+      case 8:
+        opc = TILEGX_OPC_LD;
+        break;
+      default:
+        goto bad;
+      }
+
+      p = doAMode_IR(p, opc, r_dst, am_addr);
+      goto done;
+
+    }
+  }
+
+  case GXin_Store: {
+    TILEGXAMode *am_addr = i->GXin.Store.dst;
+    if (am_addr->tag == GXam_IR) {
+      UInt r_src = iregNo(i->GXin.Store.src);
+      UInt opc, sz = i->GXin.Store.sz;
+      switch (sz) {
+      case 1:
+        opc = TILEGX_OPC_ST1;
+        break;
+      case 2:
+        opc = TILEGX_OPC_ST2;
+        break;
+      case 4:
+        opc = TILEGX_OPC_ST4;
+        break;
+      case 8:
+        opc = TILEGX_OPC_ST;
+        break;
+      default:
+        goto bad;
+      }
+
+      p = doAMode_IR(p, opc, r_src, am_addr);
+      goto done;
+    } else {
+      vassert(0);
+    }
+  }
+
+  case GXin_RdWrLR: {
+    UInt reg = iregNo(i->GXin.RdWrLR.gpr);
+    Bool wrLR = i->GXin.RdWrLR.wrLR;
+    if (wrLR)
+      p = mkMoveReg(p, 55, reg);
+    else
+      p = mkMoveReg(p, reg, 55);
+    goto done;
+  }
+
+  default:
+    goto bad;
+  }
+
+ bad:
+  vex_printf("\n=> ");
+  vpanic("emit_TILEGXInstr");
+  /*NOTREACHED*/
+
+ done:
+  instr_bytes = p - &buf[0];
+  /* Instr byte count must be modular of 8. */
+  vassert(0 == (instr_bytes & 0x7));
+
+  if (  0) {
+    Int k;
+    for (k = 0; k < instr_bytes; k += 8)
+      decode_and_display((ULong *)(Addr)&buf[k], 1, 0);
+  }
+
+  /* Limit the JIT size. */
+  vassert(instr_bytes <= 256);
+  return instr_bytes;
+}
+
+
+Int evCheckSzB_TILEGX ( void )
+{
+  UInt kInstrSize = 8;
+  return 10*kInstrSize;
+}
+
+VexInvalRange chainXDirect_TILEGX ( VexEndness endness_host,
+                                    void* place_to_chain,
+                                    const void* disp_cp_chain_me_EXPECTED,
+                                    const void* place_to_jump_to,
+                                    Bool  mode64 )
+{
+  vassert(mode64);
+  vassert(endness_host == VexEndnessLE);
+  /* What we're expecting to see is:
+     move r11, disp_cp_chain_me_to_EXPECTED
+     jalr r11
+     nop
+     viz
+     <32 bytes generated by mkLoadImm_EXACTLY4>
+     jalr r11
+     nop
+  */
+  UChar* p = (UChar*)place_to_chain;
+  vassert(0 == (7 & (HWord)p));
+
+#ifdef TILEGX_DEBUG
+  vex_printf("chainXDirect_TILEGX: disp_cp_chain_me_EXPECTED=%p\n",
+             disp_cp_chain_me_EXPECTED);
+  decode_and_display(p, 6, p);
+
+  vex_printf("chainXDirect_TILEGX: place_to_jump_to=%p\n",
+             place_to_jump_to);
+#endif
+
+  /* And what we want to change it to is either:
+     move r11, place_to_jump_to
+     jalr r11
+     nop
+     viz
+     <32 bytes generated by mkLoadImm_EXACTLY4>
+     jalr r11
+     nop
+
+     The replacement has the same length as the original.
+  */
+
+  p = mkLoadImm_EXACTLY4(p, /*r*/ 11,
+                         (Addr)place_to_jump_to);
+
+
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_JALR, 1, 11));
+
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOP, 0));
+
+#ifdef TILEGX_DEBUG
+  decode_and_display((UChar*)place_to_chain, 8, place_to_chain);
+#endif
+
+  Int len = p - (UChar*)place_to_chain;
+  vassert(len == 48); /* stay sane */
+  VexInvalRange vir = {(HWord)place_to_chain, len};
+  return vir;
+}
+
+VexInvalRange unchainXDirect_TILEGX ( VexEndness endness_host,
+                                      void* place_to_unchain,
+                                      const void* place_to_jump_to_EXPECTED,
+                                      const void* disp_cp_chain_me,
+                                      Bool  mode64 )
+{
+  vassert(mode64);
+  vassert(endness_host == VexEndnessLE);
+  /* What we're expecting to see is:
+     move r11, place_to_jump_to_EXPECTED
+     jalr r11
+     nop
+     viz
+     <32 bytes generated by mkLoadImm_EXACTLY4>
+     jalr r11
+     nop
+  */
+  UChar* p = (UChar*)place_to_unchain;
+  vassert(0 == (7 & (HWord)p));
+
+  /* And what we want to change it to is:
+     move r11, disp_cp_chain_me
+     jalr r11
+     nop
+     viz
+     <32 bytes generated by mkLoadImm_EXACTLY4>
+     jalr r11
+     nop
+     The replacement has the same length as the original.
+  */
+  p = mkLoadImm_EXACTLY4(p, /*r*/ 11,
+                         (Addr)disp_cp_chain_me);
+
+
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_JALR, 1, 11));
+
+  p = mkInsnBin(p, mkTileGxInsn(TILEGX_OPC_NOP, 0));
+
+  Int len = p - (UChar*)place_to_unchain;
+  vassert(len == 48); /* stay sane */
+  VexInvalRange vir = {(HWord)place_to_unchain, len};
+  return vir;
+}
+
+/* Patch the counter address into a profile inc point, as previously
+   created by the GXin_ProfInc case for emit_TILEGXInstr. */
+VexInvalRange patchProfInc_TILEGX ( VexEndness endness_host,
+                                    void*  place_to_patch,
+                                    const ULong* location_of_counter,
+                                    Bool mode64 )
+{
+  vassert(mode64);
+  vassert(endness_host == VexEndnessLE);
+  UChar* p = (UChar*)place_to_patch;
+  vassert(0 == (7 & (HWord)p));
+
+  p = mkLoadImm_EXACTLY4(p, /*r*/ 11,
+                         (Addr)location_of_counter);
+
+  VexInvalRange vir = {(HWord)p, 32};
+  return vir;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                    host_tilegx_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_tilegx_defs.h b/VEX/priv/host_tilegx_defs.h
new file mode 100644
index 0000000..8788e1a
--- /dev/null
+++ b/VEX/priv/host_tilegx_defs.h
@@ -0,0 +1,562 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                host_tilegx_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2010-2013 Tilera Corp.
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+ /* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#ifndef __VEX_HOST_TILEGX_DEFS_H
+#define __VEX_HOST_TILEGX_DEFS_H
+
+#include "tilegx_disasm.h"
+
+/* Num registers used for function calls */
+#define TILEGX_N_REGPARMS 10
+
+/* --------- Registers. --------- */
+
+/* The usual HReg abstraction.
+   There are 56 general purpose regs.
+*/
+
+#define ST_IN static inline
+
+ST_IN HReg hregTILEGX_R30 ( void ) { return mkHReg(False, HRcInt64,  30,  0); }
+ST_IN HReg hregTILEGX_R31 ( void ) { return mkHReg(False, HRcInt64,  31,  1); }
+ST_IN HReg hregTILEGX_R32 ( void ) { return mkHReg(False, HRcInt64,  32,  2); }
+ST_IN HReg hregTILEGX_R33 ( void ) { return mkHReg(False, HRcInt64,  33,  3); }
+ST_IN HReg hregTILEGX_R34 ( void ) { return mkHReg(False, HRcInt64,  34,  4); }
+ST_IN HReg hregTILEGX_R35 ( void ) { return mkHReg(False, HRcInt64,  35,  5); }
+ST_IN HReg hregTILEGX_R36 ( void ) { return mkHReg(False, HRcInt64,  36,  6); }
+ST_IN HReg hregTILEGX_R37 ( void ) { return mkHReg(False, HRcInt64,  37,  7); }
+ST_IN HReg hregTILEGX_R38 ( void ) { return mkHReg(False, HRcInt64,  38,  8); }
+ST_IN HReg hregTILEGX_R39 ( void ) { return mkHReg(False, HRcInt64,  39,  9); }
+
+ST_IN HReg hregTILEGX_R40 ( void ) { return mkHReg(False, HRcInt64,  40,  10); }
+ST_IN HReg hregTILEGX_R41 ( void ) { return mkHReg(False, HRcInt64,  41,  11); }
+ST_IN HReg hregTILEGX_R42 ( void ) { return mkHReg(False, HRcInt64,  42,  12); }
+ST_IN HReg hregTILEGX_R43 ( void ) { return mkHReg(False, HRcInt64,  43,  13); }
+ST_IN HReg hregTILEGX_R44 ( void ) { return mkHReg(False, HRcInt64,  44,  14); }
+ST_IN HReg hregTILEGX_R45 ( void ) { return mkHReg(False, HRcInt64,  45,  15); }
+ST_IN HReg hregTILEGX_R46 ( void ) { return mkHReg(False, HRcInt64,  46,  16); }
+ST_IN HReg hregTILEGX_R47 ( void ) { return mkHReg(False, HRcInt64,  47,  17); }
+ST_IN HReg hregTILEGX_R48 ( void ) { return mkHReg(False, HRcInt64,  48,  18); }
+ST_IN HReg hregTILEGX_R49 ( void ) { return mkHReg(False, HRcInt64,  49,  19); }
+
+ST_IN HReg hregTILEGX_R10 ( void ) { return mkHReg(False, HRcInt64,  10,  20); }
+ST_IN HReg hregTILEGX_R13 ( void ) { return mkHReg(False, HRcInt64,  13,  21); }
+ST_IN HReg hregTILEGX_R14 ( void ) { return mkHReg(False, HRcInt64,  14,  22); }
+ST_IN HReg hregTILEGX_R15 ( void ) { return mkHReg(False, HRcInt64,  15,  23); }
+ST_IN HReg hregTILEGX_R16 ( void ) { return mkHReg(False, HRcInt64,  16,  24); }
+ST_IN HReg hregTILEGX_R17 ( void ) { return mkHReg(False, HRcInt64,  17,  25); }
+ST_IN HReg hregTILEGX_R18 ( void ) { return mkHReg(False, HRcInt64,  18,  26); }
+ST_IN HReg hregTILEGX_R19 ( void ) { return mkHReg(False, HRcInt64,  19,  27); }
+ST_IN HReg hregTILEGX_R20 ( void ) { return mkHReg(False, HRcInt64,  20,  28); }
+
+ST_IN HReg hregTILEGX_R21 ( void ) { return mkHReg(False, HRcInt64,  21,  29); }
+ST_IN HReg hregTILEGX_R22 ( void ) { return mkHReg(False, HRcInt64,  22,  30); }
+ST_IN HReg hregTILEGX_R23 ( void ) { return mkHReg(False, HRcInt64,  23,  31); }
+ST_IN HReg hregTILEGX_R24 ( void ) { return mkHReg(False, HRcInt64,  24,  32); }
+ST_IN HReg hregTILEGX_R25 ( void ) { return mkHReg(False, HRcInt64,  25,  33); }
+ST_IN HReg hregTILEGX_R26 ( void ) { return mkHReg(False, HRcInt64,  26,  34); }
+ST_IN HReg hregTILEGX_R27 ( void ) { return mkHReg(False, HRcInt64,  27,  35); }
+ST_IN HReg hregTILEGX_R28 ( void ) { return mkHReg(False, HRcInt64,  28,  36); }
+ST_IN HReg hregTILEGX_R29 ( void ) { return mkHReg(False, HRcInt64,  29,  37); }
+
+ST_IN HReg hregTILEGX_R0 ( void ) { return mkHReg(False, HRcInt64,  0,  38); }
+ST_IN HReg hregTILEGX_R1 ( void ) { return mkHReg(False, HRcInt64,  1,  39); }
+ST_IN HReg hregTILEGX_R2 ( void ) { return mkHReg(False, HRcInt64,  2,  40); }
+ST_IN HReg hregTILEGX_R3 ( void ) { return mkHReg(False, HRcInt64,  3,  41); }
+ST_IN HReg hregTILEGX_R4 ( void ) { return mkHReg(False, HRcInt64,  4,  42); }
+ST_IN HReg hregTILEGX_R5 ( void ) { return mkHReg(False, HRcInt64,  5,  43); }
+ST_IN HReg hregTILEGX_R6 ( void ) { return mkHReg(False, HRcInt64,  6,  44); }
+ST_IN HReg hregTILEGX_R7 ( void ) { return mkHReg(False, HRcInt64,  7,  45); }
+ST_IN HReg hregTILEGX_R8 ( void ) { return mkHReg(False, HRcInt64,  8,  46); }
+ST_IN HReg hregTILEGX_R9 ( void ) { return mkHReg(False, HRcInt64,  9,  47); }
+
+ST_IN HReg hregTILEGX_R11 ( void ) { return mkHReg(False, HRcInt64,  11,  48); }
+ST_IN HReg hregTILEGX_R12 ( void ) { return mkHReg(False, HRcInt64,  12,  49); }
+ST_IN HReg hregTILEGX_R50 ( void ) { return mkHReg(False, HRcInt64,  50,  50); }
+ST_IN HReg hregTILEGX_R51 ( void ) { return mkHReg(False, HRcInt64,  51,  51); }
+ST_IN HReg hregTILEGX_R52 ( void ) { return mkHReg(False, HRcInt64,  52,  52); }
+ST_IN HReg hregTILEGX_R53 ( void ) { return mkHReg(False, HRcInt64,  53,  53); }
+ST_IN HReg hregTILEGX_R54 ( void ) { return mkHReg(False, HRcInt64,  54,  54); }
+ST_IN HReg hregTILEGX_R55 ( void ) { return mkHReg(False, HRcInt64,  55,  55); }
+ST_IN HReg hregTILEGX_R63 ( void ) { return mkHReg(False, HRcInt64,  63,  56); }
+
+extern void ppHRegTILEGX ( HReg );
+
+#define TILEGXGuestStatePointer()     hregTILEGX_R50()
+#define TILEGXStackFramePointer()     hregTILEGX_R52()
+#define TILEGXLinkRegister()          hregTILEGX_R55()
+#define TILEGXStackPointer()          hregTILEGX_R54()
+
+/* r0, r1, r2, r3 ... r9 */
+#define TILEGX_N_ARGREGS 10
+
+/* --------- Condition codes, Tilegx encoding. --------- */
+typedef enum {
+  TILEGXcc_EQ = 0,    /* equal */
+  TILEGXcc_NE = 1,    /* not equal */
+  TILEGXcc_HS = 2,    /* >=u (higher or same) */
+  TILEGXcc_LO = 3,    /* <u  (lower) */
+  TILEGXcc_MI = 4,    /* minus (negative) */
+  TILEGXcc_PL = 5,    /* plus (zero or +ve) */
+  TILEGXcc_VS = 6,    /* overflow */
+  TILEGXcc_VC = 7,    /* no overflow */
+  TILEGXcc_HI = 8,    /* >u   (higher) */
+  TILEGXcc_LS = 9,    /* <=u  (lower or same) */
+  TILEGXcc_GE = 10,   /* >=s (signed greater or equal) */
+  TILEGXcc_LT = 11,   /* <s  (signed less than) */
+  TILEGXcc_GT = 12,   /* >s  (signed greater) */
+  TILEGXcc_LE = 13,   /* <=s (signed less or equal) */
+  TILEGXcc_AL = 14,   /* always (unconditional) */
+  TILEGXcc_NV = 15,   /* never (unconditional): */
+  TILEGXcc_EQ8x8 = 16,/* V1 equal */
+  TILEGXcc_NE8x8 = 17,/* V1 not equal */
+  TILEGXcc_EZ = 18,   /* equal 0 */
+  TILEGXcc_NZ = 19,   /* not equal */
+
+} TILEGXCondCode;
+
+/* --------- Memory address expressions (amodes). --------- */
+typedef enum {
+  GXam_IR,        /* Immediate (signed 16-bit) + Reg */
+} TILEGXAModeTag;
+
+typedef struct {
+  TILEGXAModeTag tag;
+  union {
+    struct {
+      HReg base;
+      Int index;
+    } IR;
+    struct {
+      HReg base;
+      HReg index;
+    } RR;
+  } GXam;
+} TILEGXAMode;
+
+extern TILEGXAMode *TILEGXAMode_IR ( Int, HReg );
+extern TILEGXAMode *TILEGXAMode_RR ( HReg, HReg );
+extern TILEGXAMode *dopyTILEGXAMode ( TILEGXAMode * );
+extern TILEGXAMode *nextTILEGXAModeFloat ( TILEGXAMode * );
+extern TILEGXAMode *nextTILEGXAModeInt ( TILEGXAMode * );
+extern void ppTILEGXAMode ( const TILEGXAMode * );
+
+/* --------- Operand, which can be a reg or a u16/s16. --------- */
+/* ("RH" == "Register or Halfword immediate") */
+typedef enum {
+  GXrh_Imm,
+  GXrh_Reg
+} TILEGXRHTag;
+
+typedef struct {
+  TILEGXRHTag tag;
+  union {
+    struct {
+      Bool syned;
+      UShort imm16;
+    } Imm;
+    struct {
+      HReg reg;
+    } Reg;
+  } GXrh;
+} TILEGXRH;
+
+extern void ppTILEGXRH ( const TILEGXRH * );
+extern TILEGXRH *TILEGXRH_Imm ( Bool, UShort );
+extern TILEGXRH *TILEGXRH_Reg ( HReg );
+
+/* --------- Reg or imm5 operands --------- */
+typedef enum {
+  TILEGXri5_I5 = 7,      /* imm5, 1 .. 31 only (no zero!) */
+  TILEGXri5_R      /* reg */
+} TILEGXRI5Tag;
+
+typedef struct {
+  TILEGXRI5Tag tag;
+  union {
+    struct {
+      UInt imm5;
+    } I5;
+    struct {
+      HReg reg;
+    } R;
+  } TILEGXri5;
+} TILEGXRI5;
+
+extern TILEGXRI5 *TILEGXRI5_I5 ( UInt imm5 );
+extern TILEGXRI5 *TILEGXRI5_R ( HReg );
+
+extern void ppTILEGXRI5 ( const TILEGXRI5 * );
+
+/* --------- Instructions. --------- */
+
+/*Tags for operations*/
+
+/* --------- */
+typedef enum {
+  GXun_CLZ,
+  GXun_CTZ,
+  GXun_NOP,
+} TILEGXUnaryOp;
+
+/* --------- */
+
+typedef enum {
+  GXalu_INVALID,
+  GXalu_ADD,
+  GXalu_SUB,
+  GXalu_AND,
+  GXalu_OR,
+  GXalu_NOR,
+  GXalu_XOR,
+} TILEGXAluOp;
+
+/* --------- */
+
+typedef enum {
+  GXshft_INVALID,
+  GXshft_SLL,
+  GXshft_SRL,
+  GXshft_SRA,
+  GXshft_SLL8x8,
+  GXshft_SRL8x8,
+
+} TILEGXShftOp;
+
+
+/* --------- */
+typedef enum {
+  GXbf_EXTS,
+  GXbf_EXTU,
+  GXbf_INS
+} TILEGXBfOp;
+
+/* --------- */
+
+
+/* --------- */
+typedef enum {
+  GXacas_CMPEXCH,
+  GXacas_EXCH,
+  GXacas_FetchAnd,
+  GXacas_FetchAdd,
+  GXacas_FetchAddgez,
+  GXacas_FetchOr,
+} TILEGXAcasOp;
+
+/* --------- */
+
+/* ----- Instruction tags ----- */
+typedef enum {
+  GXin_LI,        /* load word (32/64-bit) immediate (fake insn) */
+  GXin_Alu,    /* word add/sub/and/or/xor/nor/others? */
+  GXin_Shft,      /* word sll/srl/sra */
+  GXin_Unary,     /* clo, clz, nop, neg */
+
+  GXin_Cmp,    /* word compare (fake insn) */
+  GXin_CmpI,
+
+  GXin_Mul,    /* widening/non-widening multiply */
+
+  GXin_Call,      /* call to address in register */
+
+  GXin_XDirect,    /* direct transfer to GA */
+  GXin_XIndir,     /* indirect transfer to GA */
+  GXin_XAssisted,  /* assisted transfer to GA */
+  GXin_EvCheck,    /* Event check */
+  GXin_ProfInc,    /* 64-bit profile counter increment */ 
+
+  GXin_RdWrLR,    /* Read/Write Link Register */
+
+  GXin_Load,      /* zero-extending load a 8|16|32|64 bit value from mem */
+  GXin_Store,     /* store a 8|16|32|64 bit value to mem */
+
+  GXin_MovCond,
+  GXin_Bf,           /* Bitfield operations */
+  GXin_Acas,          /* Atomic Campare and swap. */
+
+} TILEGXInstrTag;
+
+/*--------- Structure for instructions ----------*/
+/* Destinations are on the LEFT (first operand) */
+
+typedef struct {
+  TILEGXInstrTag tag;
+  union {
+    /* Get a 32/64-bit literal into a register.
+       May turn into a number of real insns. */
+    struct {
+      HReg dst;
+      ULong imm;
+    } LI;
+    /* Integer add/sub/and/or/xor.  Limitations:
+       - For add, the immediate, if it exists, is a signed 16.
+       - For sub, the immediate, if it exists, is a signed 16
+       which may not be -32768, since no such instruction
+       exists, and so we have to emit addi with +32768, but
+       that is not possible.
+       - For and/or/xor,  the immediate, if it exists,
+       is an unsigned 16.
+    */
+    struct {
+      TILEGXAluOp op;
+      HReg dst;
+      HReg srcL;
+      TILEGXRH *srcR;
+    } Alu;
+
+    struct {
+      TILEGXBfOp op;
+      HReg dst;
+      HReg src;
+      UInt Start;
+      UInt End;
+    } Bf;
+
+    struct {
+      TILEGXAcasOp op;
+      HReg addr;
+      HReg exp;
+      HReg new;
+      HReg old;
+      UInt sz;
+    } Acas;
+
+    /* Integer shl/shr/sar.
+       Limitations: the immediate, if it exists,
+       is a signed 5-bit value between 1 and 31 inclusive.
+    */
+    struct {
+      TILEGXShftOp op;
+      Bool sz32;
+      HReg dst;
+      HReg srcL;
+      TILEGXRH *srcR;
+    } Shft;
+    /* Clz, Ctz, Clo, nop */
+    struct {
+      TILEGXUnaryOp op;
+      HReg dst;
+      HReg src;
+    } Unary;
+    /* Word compare. Fake instruction, used for basic block ending */
+    struct {
+      Bool syned;
+      Bool sz32;
+      HReg dst;
+      HReg srcL;
+      HReg srcR;
+      TILEGXCondCode cond;
+    } Cmp;
+    struct {
+      Bool syned;
+      Bool sz32;
+      HReg dst;
+      HReg srcL;
+      TILEGXRH *srcR;
+      TILEGXCondCode cond;
+    } CmpI;
+    struct {
+      Bool widening; //True => widening, False => non-widening
+      Bool syned; //signed/unsigned - meaningless if widenind = False
+      Bool sz32;
+      HReg dst;
+      HReg srcL;
+      HReg srcR;
+    } Mul;
+    /* Pseudo-insn.  Call target (an absolute address), on given
+       condition (which could be Mcc_ALWAYS).  argiregs indicates
+       which of r0 .. r9
+       carries argument values for this call,
+       using a bit mask (1<<N is set if rN holds an arg, for N in
+       0 .. 9 inclusive).
+       If cond is != Mcc_ALWAYS, src is checked.
+       Otherwise, unconditional call */
+    struct {
+      TILEGXCondCode cond;
+      Addr64 target;
+      ULong argiregs;
+      HReg src;
+      RetLoc rloc; /* where the return value saved. */
+    } Call;
+
+    /* Update the guest IP value, then exit requesting to chain
+       to it.  May be conditional.  Urr, use of Addr32 implicitly
+       assumes that wordsize(guest) == wordsize(host). */
+    struct {
+      Addr64         dstGA;     /* next guest address */
+      TILEGXAMode*   amPC;      /* amode in guest state for PC */
+      TILEGXCondCode cond;      /* can be TILEGXcc_AL */
+      Bool           toFastEP;  /* chain to the slow or fast point? */
+    } XDirect;
+
+    /* Boring transfer to a guest address not known at JIT time.
+       Not chainable.  May be conditional. */
+    struct {
+      HReg           dstGA;
+      TILEGXAMode*   amPC;
+      TILEGXCondCode cond; /* can be TILEGXcc_AL */
+    } XIndir;
+
+    /* Assisted transfer to a guest address, most general case.
+       Not chainable.  May be conditional. */
+    struct {
+      HReg           dstGA;
+      TILEGXAMode*   amPC;
+      TILEGXCondCode cond; /* can be TILEGXcc_AL */
+      IRJumpKind     jk;
+    } XAssisted;
+
+    struct {
+      TILEGXAMode* amCounter;
+      TILEGXAMode* amFailAddr;
+    } EvCheck;
+
+    struct {
+      /* No fields.  The address of the counter to inc is
+         installed later, post-translation, by patching it in,
+         as it is not known at translation time. */
+    } ProfInc;
+    /* Zero extending loads.  Dst size is host word size */
+    struct {
+      UChar sz;   /* 1|2|4|8 */
+      HReg dst;
+      TILEGXAMode *src;
+    } Load;
+    /* 64/32/16/8 bit stores */
+    struct {
+      UChar sz;   /* 1|2|4|8 */
+      TILEGXAMode *dst;
+      HReg src;
+    } Store;
+    /* Read/Write Link Register */
+    struct {
+      Bool wrLR;
+      HReg gpr;
+    } RdWrLR;
+    struct {
+      HReg dst;
+      HReg srcL;
+      TILEGXRH *srcR;
+      HReg condR;
+      TILEGXCondCode cond;
+    } MovCond;
+  } GXin;
+} TILEGXInstr;
+extern TILEGXInstr *TILEGXInstr_LI ( HReg, ULong );
+extern TILEGXInstr *TILEGXInstr_Alu ( TILEGXAluOp, HReg, HReg, TILEGXRH * );
+extern TILEGXInstr *TILEGXInstr_Shft ( TILEGXShftOp, Bool sz32, HReg, HReg,
+                                       TILEGXRH * );
+extern TILEGXInstr *TILEGXInstr_Unary ( TILEGXUnaryOp op, HReg dst, HReg src );
+extern TILEGXInstr *TILEGXInstr_Cmp ( Bool, Bool, HReg, HReg, HReg,
+                                      TILEGXCondCode );
+extern TILEGXInstr *TILEGXInstr_CmpI ( Bool, Bool, HReg, HReg, TILEGXRH *,
+                                       TILEGXCondCode );
+extern TILEGXInstr *TILEGXInstr_Bf ( TILEGXBfOp op, HReg dst, HReg src,
+                                     UInt Start, UInt End );
+extern TILEGXInstr *TILEGXInstr_Acas ( TILEGXAcasOp op, HReg old, HReg addr,
+                                       HReg exp, HReg new, UInt sz );
+extern TILEGXInstr *TILEGXInstr_Mul ( Bool syned, Bool hi32, Bool sz32, HReg,
+                                      HReg, HReg );
+extern TILEGXInstr *TILEGXInstr_Div ( Bool syned, Bool sz32, HReg, HReg );
+extern TILEGXInstr *TILEGXInstr_Madd ( Bool, HReg, HReg );
+extern TILEGXInstr *TILEGXInstr_Msub ( Bool, HReg, HReg );
+
+extern TILEGXInstr *TILEGXInstr_Load ( UChar sz, HReg dst, TILEGXAMode * src );
+
+extern TILEGXInstr *TILEGXInstr_Store ( UChar sz, TILEGXAMode * dst, HReg src );
+
+extern TILEGXInstr *TILEGXInstr_LoadL ( UChar sz, HReg dst, TILEGXAMode * src );
+
+extern TILEGXInstr *TILEGXInstr_StoreC ( UChar sz, TILEGXAMode * dst, HReg src );
+
+extern TILEGXInstr *TILEGXInstr_Call ( TILEGXCondCode, Addr64, ULong, HReg );
+extern TILEGXInstr *TILEGXInstr_CallAlways ( TILEGXCondCode, Addr64, ULong );
+extern TILEGXInstr *TILEGXInstr_XDirect ( Addr64 dstGA, TILEGXAMode* amPC,
+                                          TILEGXCondCode cond, Bool toFastEP );
+extern TILEGXInstr *TILEGXInstr_XIndir ( HReg dstGA, TILEGXAMode* amPC,
+                                         TILEGXCondCode cond );
+extern TILEGXInstr *TILEGXInstr_XAssisted ( HReg dstGA, TILEGXAMode* amPC,
+                                            TILEGXCondCode cond, IRJumpKind jk );
+extern TILEGXInstr *TILEGXInstr_EvCheck ( TILEGXAMode* amCounter,
+                                          TILEGXAMode* amFailAddr );
+extern TILEGXInstr* TILEGXInstr_ProfInc (void);
+
+extern TILEGXInstr *TILEGXInstr_Goto ( IRJumpKind, TILEGXCondCode,
+                                       TILEGXRH * dst, HReg );
+extern TILEGXInstr *TILEGXInstr_GotoAlways ( IRJumpKind, TILEGXRH * );
+extern TILEGXInstr *TILEGXInstr_RdWrLR ( Bool wrLR, HReg gpr );
+extern TILEGXInstr *TILEGXInstr_MovCond ( HReg dst, HReg srcL, TILEGXRH * src,
+                                          HReg condR, TILEGXCondCode cond );
+extern void ppTILEGXInstr ( const TILEGXInstr * );
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+extern void getRegUsage_TILEGXInstr ( HRegUsage *, TILEGXInstr *);
+extern void mapRegs_TILEGXInstr ( HRegRemap *, TILEGXInstr *);
+extern Bool isMove_TILEGXInstr ( TILEGXInstr *, HReg *, HReg * );
+extern Int  emit_TILEGXInstr ( Bool*, UChar*, Int, TILEGXInstr*, Bool, VexEndness,
+                               void*, void*, void*, void* );
+extern void genSpill_TILEGX ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
+                              HReg rreg, Int offset );
+extern void genReload_TILEGX ( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2,
+                               HReg rreg, Int offset );
+
+extern const RRegUniverse* getRRegUniverse_TILEGX ( void );
+
+extern HInstrArray *iselSB_TILEGX ( const IRSB*, VexArch,
+                                    const VexArchInfo*,
+                                    const VexAbiInfo*,
+                                    Int, Int, Bool, Bool, Addr);
+extern const HChar *showTILEGXCondCode ( TILEGXCondCode cond );
+extern Int evCheckSzB_TILEGX (void);
+extern VexInvalRange chainXDirect_TILEGX ( VexEndness endness_host,
+                                           void* place_to_chain,
+                                           const void* disp_cp_chain_me_EXPECTED,
+                                           const void* place_to_jump_to,
+                                           Bool  mode64 );
+extern VexInvalRange unchainXDirect_TILEGX ( VexEndness endness_host,
+                                             void* place_to_unchain,
+                                             const void* place_to_jump_to_EXPECTED,
+                                             const void* disp_cp_chain_me,
+                                             Bool  mode64 );
+extern VexInvalRange patchProfInc_TILEGX ( VexEndness endness_host,
+                                           void*  place_to_patch,
+                                           const ULong* location_of_counter,
+                                           Bool  mode64 );
+
+extern Int decode_and_display ( tilegx_bundle_bits *p, Int count, ULong pc );
+
+#endif  /* __LIBVEX_HOST_TILEGX_HDEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                  host-tilegx_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_tilegx_isel.c b/VEX/priv/host_tilegx_isel.c
new file mode 100644
index 0000000..7e4e6eb
--- /dev/null
+++ b/VEX/priv/host_tilegx_isel.c
@@ -0,0 +1,1863 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                host_tilegx_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2010-2013 Tilera Corp.
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+  02110-1301, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+ /* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_tilegx_defs.h"
+#include "tilegx_disasm.h"
+
+/*---------------------------------------------------------*/
+/*--- Register Usage Conventions                        ---*/
+/*---------------------------------------------------------*/
+
+/* GPR register class for tilegx */
+#define HRcGPR()     HRcInt64
+
+/* guest_COND offset. */
+#define COND_OFFSET()    (608)
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+   might encounter.  This is computed before insn selection starts,
+   and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+   which virtual register(s) are associated with each IRTemp
+   temporary.  This is computed before insn selection starts, and
+   does not change.  We expect this mapping to map precisely the
+   same set of IRTemps as the type mapping does.
+
+   - vregmap   holds the primary register for the IRTemp.
+   - vregmapHI holds the secondary register for the IRTemp,
+   if any is needed.  That's only for Ity_I64 temps
+   in 32 bit mode or Ity_I128 temps in 64-bit mode.
+
+   - The name of the vreg in which we stash a copy of the link reg,
+   so helper functions don't kill it.
+
+   - The code array, that is, the insns selected so far.
+
+   - A counter, for generating new virtual registers.
+
+   - The host subarchitecture we are selecting insns for.
+   This is set at the start and does not change.
+
+   - A Bool to tell us if the host is 32 or 64bit.
+   This is set at the start and does not change.
+
+   - An IRExpr*, which may be NULL, holding the IR expression (an
+   IRRoundingMode-encoded value) to which the FPU's rounding mode
+   was most recently set.  Setting to NULL is always safe.  Used to
+   avoid redundant settings of the FPU's rounding mode, as
+   described in set_FPU_rounding_mode below.
+
+   - A VexMiscInfo*, needed for knowing how to generate
+   function calls for this target
+*/
+typedef struct {
+  IRTypeEnv *type_env;
+
+  HReg *vregmap;
+
+  Int n_vregmap;
+
+  HInstrArray *code;
+
+  Int vreg_ctr;
+
+  UInt hwcaps;
+
+  Bool mode64;
+
+  Bool   chainingAllowed;
+
+  Addr64 max_ga;
+
+  IRExpr *previous_rm;
+
+  VexAbiInfo *vbi;
+} ISelEnv;
+
+static HReg lookupIRTemp ( ISelEnv * env, IRTemp tmp )
+{
+  vassert(tmp >= 0);
+  vassert(tmp < env->n_vregmap);
+  return env->vregmap[tmp];
+}
+
+static void addInstr ( ISelEnv * env, TILEGXInstr * instr )
+{
+  addHInstr(env->code, instr);
+  if (vex_traceflags & VEX_TRACE_VCODE) {
+    ppTILEGXInstr(instr);
+    vex_printf("\n");
+  }
+}
+
+static HReg newVRegI ( ISelEnv * env )
+{
+  HReg reg = mkHReg(True /*virtual R*/, HRcGPR(), 0, env->vreg_ctr);
+  env->vreg_ctr++;
+  return reg;
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Forward declarations                        ---*/
+/*---------------------------------------------------------*/
+
+/* These are organised as iselXXX and iselXXX_wrk pairs.  The
+   iselXXX_wrk do the real work, but are not to be called directly.
+   For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
+   checks that all returned registers are virtual.  You should not
+   call the _wrk version directly.
+*/
+/* Compute an I8/I16/I32/I64 into a RH (reg-or-halfword-immediate).
+   It's important to specify whether the immediate is to be regarded
+   as signed or not.  If yes, this will never return -32768 as an
+   immediate; this guaranteed that all signed immediates that are
+   return can have their sign inverted if need be.
+*/
+static TILEGXRH *iselWordExpr_RH_wrk ( ISelEnv * env, Bool syned, IRExpr * e );
+static TILEGXRH *iselWordExpr_RH ( ISelEnv * env, Bool syned, IRExpr * e );
+
+static TILEGXRH *iselWordExpr_RH6u_wrk ( ISelEnv * env, IRExpr * e );
+static TILEGXRH *iselWordExpr_RH6u ( ISelEnv * env, IRExpr * e );
+
+/* compute an I8/I16/I32/I64 into a GPR*/
+static HReg iselWordExpr_R_wrk ( ISelEnv * env, IRExpr * e );
+static HReg iselWordExpr_R ( ISelEnv * env, IRExpr * e );
+
+/* compute an I64 into an AMode. */
+static TILEGXAMode *iselWordExpr_AMode_wrk ( ISelEnv * env, IRExpr * e,
+                                             IRType xferTy );
+static TILEGXAMode *iselWordExpr_AMode ( ISelEnv * env, IRExpr * e,
+                                         IRType xferTy );
+
+static TILEGXCondCode iselCondCode_wrk ( ISelEnv * env, IRExpr * e );
+static TILEGXCondCode iselCondCode ( ISelEnv * env, IRExpr * e );
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Misc helpers                                ---*/
+/*---------------------------------------------------------*/
+
+/* Make an int reg-reg move. */
+static TILEGXInstr *mk_iMOVds_RR ( HReg r_dst, HReg r_src )
+{
+  vassert(hregClass(r_dst) == hregClass(r_src));
+  vassert(hregClass(r_src) == HRcInt32 || hregClass(r_src) == HRcInt64);
+  return TILEGXInstr_Alu(GXalu_OR, r_dst, r_src, TILEGXRH_Reg(r_src));
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Function call helpers                       ---*/
+/*---------------------------------------------------------*/
+
+/* Used only in doHelperCall. See big comment in doHelperCall
+   handling of register-parameter args.  This function figures out
+   whether evaluation of an expression might require use of a fixed
+   register.
+*/
+static Bool mightRequireFixedRegs ( IRExpr * e )
+{
+  switch (e->tag) {
+  case Iex_RdTmp:
+  case Iex_Const:
+  case Iex_Get:
+    return False;
+  default:
+    return True;
+  }
+}
+
+/* Do a complete function call.  guard is a Ity_Bit expression
+   indicating whether or not the call happens.  If guard==NULL, the
+   call is unconditional. */
+
+static void doHelperCall ( ISelEnv * env, IRExpr * guard, IRCallee * cee,
+                           IRExpr ** args, IRType retTy )
+{
+  TILEGXCondCode cc;
+  HReg argregs[TILEGX_N_REGPARMS];
+  HReg tmpregs[TILEGX_N_REGPARMS];
+  Bool go_fast;
+  Long  n_args, i, argreg;
+  ULong argiregs;
+  ULong target;
+  HReg src = INVALID_HREG;
+
+
+  UInt nVECRETs = 0;
+  UInt nBBPTRs  = 0;
+
+  /* TILEGX calling convention: up to 10 registers (r0 ... r9)
+     are allowed to be used for passing integer arguments. They correspond
+     to regs GPR0 ... GPR9. */
+
+  /* Note that the cee->regparms field is meaningless on ARM64 hosts
+     (since there is only one calling convention) and so we always
+     ignore it. */
+
+  n_args = 0;
+  for (i = 0; args[i]; i++) {
+    n_args++;
+    IRExpr* arg = args[i];
+    if (UNLIKELY(arg->tag == Iex_VECRET)) {
+      nVECRETs++;
+    } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+      nBBPTRs++;
+    }
+  }
+
+  if (nVECRETs || nBBPTRs)
+    vex_printf("nVECRETs=%d, nBBPTRs=%d\n",
+               nVECRETs, nBBPTRs);
+
+  if (TILEGX_N_REGPARMS < n_args) {
+    vpanic("doHelperCall(TILEGX): cannot currently handle > 10 args");
+  }
+  argregs[0] = hregTILEGX_R0();
+  argregs[1] = hregTILEGX_R1();
+  argregs[2] = hregTILEGX_R2();
+  argregs[3] = hregTILEGX_R3();
+  argregs[4] = hregTILEGX_R4();
+  argregs[5] = hregTILEGX_R5();
+  argregs[6] = hregTILEGX_R6();
+  argregs[7] = hregTILEGX_R7();
+  argregs[8] = hregTILEGX_R8();
+  argregs[9] = hregTILEGX_R9();
+  argiregs = 0;
+
+  for (i = 0; i < TILEGX_N_REGPARMS; i++)
+    tmpregs[i] = INVALID_HREG;
+
+  /* First decide which scheme (slow or fast) is to be used.  First
+     assume the fast scheme, and select slow if any contraindications
+     (wow) appear. */
+
+  go_fast = True;
+
+  if (guard) {
+    if (guard->tag == Iex_Const && guard->Iex.Const.con->tag == Ico_U1
+        && guard->Iex.Const.con->Ico.U1 == True) {
+      /* unconditional */
+    } else {
+      /* Not manifestly unconditional -- be conservative. */
+      go_fast = False;
+    }
+  }
+
+  if (go_fast) {
+    for (i = 0; i < n_args; i++) {
+      if (mightRequireFixedRegs(args[i])) {
+        go_fast = False;
+        break;
+      }
+    }
+  }
+
+  /* At this point the scheme to use has been established.  Generate
+     code to get the arg values into the argument rregs. */
+  if (go_fast) {
+    /* FAST SCHEME */
+    argreg = 0;
+
+    for (i = 0; i < n_args; i++) {
+      vassert(argreg < TILEGX_N_REGPARMS);
+      vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 ||
+              typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
+
+      argiregs |= (1 << (argreg));
+      addInstr(env, mk_iMOVds_RR(argregs[argreg],
+                                 iselWordExpr_R(env,
+                                                args[i])));
+      argreg++;
+    }
+    /* Fast scheme only applies for unconditional calls.  Hence: */
+    cc = TILEGXcc_AL;
+  } else {
+    /* SLOW SCHEME; move via temporaries */
+    argreg = 0;
+
+    for (i = 0; i < n_args; i++) {
+      vassert(argreg < TILEGX_N_REGPARMS);
+      vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32
+              || typeOfIRExpr(env->type_env, args[i]) == Ity_I64);
+      tmpregs[argreg] = iselWordExpr_R(env, args[i]);
+      argreg++;
+    }
+
+    /* Now we can compute the condition.  We can't do it earlier
+       because the argument computations could trash the condition
+       codes.  Be a bit clever to handle the common case where the
+       guard is 1:Bit. */
+    cc = TILEGXcc_AL;
+    if (guard) {
+      if (guard->tag == Iex_Const && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+        /* unconditional -- do nothing */
+      } else {
+        cc = iselCondCode(env, guard);
+        src = iselWordExpr_R(env, guard);
+      }
+    }
+    /* Move the args to their final destinations. */
+    for (i = 0; i < argreg; i++) {
+      if (hregIsInvalid(tmpregs[i]))  // Skip invalid regs
+        continue;
+      /* None of these insns, including any spill code that might
+         be generated, may alter the condition codes. */
+      argiregs |= (1 << (i));
+      addInstr(env, mk_iMOVds_RR(argregs[i], tmpregs[i]));
+    }
+  }
+
+  target = (Addr)(cee->addr);
+
+  /* Finally, the call itself. */
+  if (cc == TILEGXcc_AL)
+    addInstr(env, TILEGXInstr_CallAlways(cc, target, argiregs));
+  else
+    addInstr(env, TILEGXInstr_Call(cc, target, argiregs, src));
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expression auxiliaries              ---*/
+/*---------------------------------------------------------*/
+
+/* --------------------- AMODEs --------------------- */
+
+/* Return an AMode which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a word-size one.
+*/
+
+static Bool uInt_fits_in_16_bits ( UInt u )
+{
+  Int i = u & 0xFFFF;
+  i <<= 16;
+  i >>= 16;
+  return toBool(u == (UInt) i);
+}
+
+static Bool sane_AMode ( ISelEnv * env, TILEGXAMode * am )
+{
+  if (am->tag == GXam_IR)
+    return toBool(hregClass(am->GXam.IR.base) == HRcGPR() &&
+                  hregIsVirtual(am->GXam.IR.base) &&
+                  uInt_fits_in_16_bits(am->GXam.IR.index));
+
+  vpanic("sane_AMode: unknown tilegx amode tag");
+}
+
+static TILEGXAMode *iselWordExpr_AMode ( ISelEnv * env, IRExpr * e,
+                                         IRType xferTy )
+{
+  TILEGXAMode *am = iselWordExpr_AMode_wrk(env, e, xferTy);
+  vassert(sane_AMode(env, am));
+  return am;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static TILEGXAMode *iselWordExpr_AMode_wrk ( ISelEnv * env, IRExpr * e,
+                                             IRType xferTy )
+{
+  IRType ty = typeOfIRExpr(env->type_env, e);
+
+  vassert(ty == Ity_I64);
+  /* Add64(expr,i), where i == sign-extend of (i & 0xFFFF) */
+  if (e->tag == Iex_Binop
+      && e->Iex.Binop.op == Iop_Add64
+      && e->Iex.Binop.arg2->tag == Iex_Const
+      && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64
+      && uInt_fits_in_16_bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)) {
+
+    return TILEGXAMode_IR((Long) e->Iex.Binop.arg2->Iex.Const.con->Ico.U64,
+                          iselWordExpr_R(env, e->Iex.Binop.arg1));
+  }
+
+  /* Doesn't match anything in particular.  Generate it into
+     a register and use that. */
+  return TILEGXAMode_IR(0, iselWordExpr_R(env, e));
+}
+
+
+
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64/32/16/8 bit)        ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   add vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 64, 32, 16 and 8-bit type.
+   All results are returned in a  64bit register.
+*/
+static HReg iselWordExpr_R ( ISelEnv * env, IRExpr * e )
+{
+  HReg r = iselWordExpr_R_wrk(env, e);
+  /* sanity checks ... */
+
+  vassert(hregClass(r) == HRcGPR());
+  vassert(hregIsVirtual(r));
+  return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg iselWordExpr_R_wrk ( ISelEnv * env, IRExpr * e )
+{
+  IRType ty = typeOfIRExpr(env->type_env, e);
+  vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 ||
+          ty == Ity_I1 || ty == Ity_I64);
+
+  switch (e->tag) {
+    /* --------- TEMP --------- */
+  case Iex_RdTmp:
+    return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+
+    /* --------- LOAD --------- */
+  case Iex_Load: {
+    HReg r_dst = newVRegI(env);
+    TILEGXAMode *am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, ty);
+
+    if (e->Iex.Load.end != Iend_LE
+        && e->Iex.Load.end != Iend_BE)
+      goto irreducible;
+
+    addInstr(env, TILEGXInstr_Load(toUChar(sizeofIRType(ty)),
+                                   r_dst, am_addr));
+    return r_dst;
+    break;
+  }
+    /* --------- BINARY OP --------- */
+  case Iex_Binop: {
+    TILEGXAluOp aluOp;
+    TILEGXShftOp shftOp;
+
+    switch (e->Iex.Binop.op) {
+
+    case Iop_Add8:
+    case Iop_Add16:
+    case Iop_Add32:
+    case Iop_Add64:
+      aluOp = GXalu_ADD;
+      break;
+
+    case Iop_Sub8:
+    case Iop_Sub16:
+    case Iop_Sub32:
+    case Iop_Sub64:
+      aluOp = GXalu_SUB;
+      break;
+
+    case Iop_And8:
+    case Iop_And16:
+    case Iop_And32:
+    case Iop_And64:
+      aluOp = GXalu_AND;
+      break;
+
+    case Iop_Or8:
+    case Iop_Or16:
+    case Iop_Or32:
+    case Iop_Or64:
+      aluOp = GXalu_OR;
+      break;
+
+    case Iop_Xor8:
+    case Iop_Xor16:
+    case Iop_Xor32:
+    case Iop_Xor64:
+      aluOp = GXalu_XOR;
+      break;
+
+    default:
+      aluOp = GXalu_INVALID;
+      break;
+    }
+
+    /* For commutative ops we assume any literal
+       values are on the second operand. */
+    if (aluOp != GXalu_INVALID) {
+      HReg r_dst = newVRegI(env);
+      HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      TILEGXRH *ri_srcR = NULL;
+      /* get right arg into an RH, in the appropriate way */
+      switch (aluOp) {
+      case GXalu_ADD:
+      case GXalu_SUB:
+        ri_srcR = iselWordExpr_RH(env, True /*signed */ ,
+                                  e->Iex.Binop.arg2);
+        break;
+      case GXalu_AND:
+      case GXalu_OR:
+      case GXalu_XOR:
+        ri_srcR = iselWordExpr_RH(env, True /*signed */,
+                                  e->Iex.Binop.arg2);
+        break;
+      default:
+        vpanic("iselWordExpr_R_wrk-aluOp-arg2");
+      }
+      addInstr(env, TILEGXInstr_Alu(aluOp, r_dst, r_srcL, ri_srcR));
+      return r_dst;
+    }
+
+    /* a shift? */
+    switch (e->Iex.Binop.op) {
+    case Iop_Shl32:
+    case Iop_Shl64:
+      shftOp = GXshft_SLL;
+      break;
+    case Iop_Shr32:
+    case Iop_Shr64:
+      shftOp = GXshft_SRL;
+      break;
+    case Iop_Sar64:
+      shftOp = GXshft_SRA;
+      break;
+    case Iop_Shl8x8:
+      shftOp = GXshft_SLL8x8;
+      break;
+    case Iop_Shr8x8:
+      shftOp = GXshft_SRL8x8;
+      break;
+    default:
+      shftOp = GXshft_INVALID;
+      break;
+    }
+
+    /* we assume any literal values are on the second operand. */
+    if (shftOp != GXshft_INVALID) {
+      HReg r_dst = newVRegI(env);
+      HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      TILEGXRH *ri_srcR = NULL;
+      /* get right arg into an RH, in the appropriate way */
+      switch (shftOp) {
+      case GXshft_SLL:
+      case GXshft_SRL:
+      case GXshft_SRA:
+        //ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+        //break;
+      case GXshft_SLL8x8:
+      case GXshft_SRL8x8:
+        //if (e->Iex.Binop.arg2->tag == GXrh_Imm)
+        //{
+        // ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+        //  break;
+        //}
+        ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2);
+        break;
+      default:
+        vpanic("iselIntExpr_R_wrk-shftOp-arg2");
+      }
+      /* widen the left arg if needed */
+      /*TODO do we need this? */
+      if (ty == Ity_I8 || ty == Ity_I16)
+        goto irreducible;
+      if (ty == Ity_I64) {
+        addInstr(env, TILEGXInstr_Shft(shftOp, False/*64bit shift */,
+                                       r_dst, r_srcL, ri_srcR));
+      } else {
+        addInstr(env, TILEGXInstr_Shft(shftOp, True /*32bit shift */,
+                                       r_dst, r_srcL, ri_srcR));
+      }
+      return r_dst;
+    }
+
+    /* Cmp*32*(x,y) ? */
+    if (e->Iex.Binop.op == Iop_CasCmpEQ32
+        || e->Iex.Binop.op == Iop_CmpEQ32
+        || e->Iex.Binop.op == Iop_CasCmpNE32
+        || e->Iex.Binop.op == Iop_CmpNE32
+        || e->Iex.Binop.op == Iop_CmpNE64
+        || e->Iex.Binop.op == Iop_CmpLT32S
+        || e->Iex.Binop.op == Iop_CmpLT32U
+        || e->Iex.Binop.op == Iop_CmpLT64U
+        || e->Iex.Binop.op == Iop_CmpLE32S
+        || e->Iex.Binop.op == Iop_CmpLE64S
+        || e->Iex.Binop.op == Iop_CmpLE64U
+        || e->Iex.Binop.op == Iop_CmpLT64S
+        || e->Iex.Binop.op == Iop_CmpEQ64
+        || e->Iex.Binop.op == Iop_CasCmpEQ64
+        || e->Iex.Binop.op == Iop_CasCmpNE64) {
+
+      Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S
+                    || e->Iex.Binop.op == Iop_CmpLE32S
+                    || e->Iex.Binop.op == Iop_CmpLT64S
+                    || e->Iex.Binop.op == Iop_CmpLE64S);
+      Bool size32;
+      HReg dst = newVRegI(env);
+      HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      HReg r2 = iselWordExpr_R(env, e->Iex.Binop.arg2);
+      TILEGXCondCode cc;
+
+      switch (e->Iex.Binop.op) {
+      case Iop_CasCmpEQ32:
+      case Iop_CmpEQ32:
+        cc = TILEGXcc_EQ;
+        size32 = True;
+        break;
+      case Iop_CasCmpNE32:
+      case Iop_CmpNE32:
+        cc = TILEGXcc_NE;
+        size32 = True;
+        break;
+      case Iop_CasCmpNE64:
+      case Iop_CmpNE64:
+        cc = TILEGXcc_NE;
+        size32 = True;
+        break;
+      case Iop_CmpLT32S:
+        cc = TILEGXcc_LT;
+        size32 = True;
+        break;
+      case Iop_CmpLT32U:
+        cc = TILEGXcc_LO;
+        size32 = True;
+        break;
+      case Iop_CmpLT64U:
+        cc = TILEGXcc_LT;
+        size32 = False;
+        break;
+      case Iop_CmpLE32S:
+        cc = TILEGXcc_LE;
+        size32 = True;
+        break;
+      case Iop_CmpLE64S:
+        cc = TILEGXcc_LE;
+        size32 = False;
+        break;
+      case Iop_CmpLE64U:
+        cc = TILEGXcc_LE;
+        size32 = False;
+        break;
+      case Iop_CmpLT64S:
+        cc = TILEGXcc_LT;
+        size32 = False;
+        break;
+      case Iop_CasCmpEQ64:
+      case Iop_CmpEQ64:
+        cc = TILEGXcc_EQ;
+        size32 = False;
+        break;
+      default:
+        vpanic
+          ("iselCondCode(tilegx): CmpXX32 or CmpXX64");
+      }
+
+      addInstr(env, TILEGXInstr_Cmp(syned, size32, dst, r1, r2, cc));
+      return dst;
+
+      break;
+
+    }
+
+    if (e->Iex.Binop.op == Iop_CmpEQ8x8) {
+
+      Bool syned = False;
+
+      Bool size32;
+      HReg dst = newVRegI(env);
+      HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      TILEGXRH *r2 = iselWordExpr_RH(env, True, e->Iex.Binop.arg2);
+      TILEGXCondCode cc;
+
+      switch (e->Iex.Binop.op) {
+      case Iop_CmpEQ8x8:
+        cc = TILEGXcc_EQ8x8;
+        size32 = False;
+        break;
+
+      default:
+        vassert(0);
+      }
+
+      addInstr(env, TILEGXInstr_CmpI(syned, size32, dst, r1, r2, cc));
+      return dst;
+
+      break;
+    }
+
+    if (e->Iex.Binop.op == Iop_Max32U) {
+      /*
+        tmp = argL - argR;
+        tmp &= (1<<31)
+        dst = (tmp) ? (argL) ? (argR)
+      */
+      HReg argL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      TILEGXRH *argR = iselWordExpr_RH(env, False /*signed */ ,
+                                       e->Iex.Binop.arg2);
+      HReg dst = newVRegI(env);
+      HReg tmp = newVRegI(env);
+      // temp = argL - argR
+      addInstr(env, TILEGXInstr_Alu(GXalu_SUB, tmp, argL, argR));
+      // tmp &= bit31
+      addInstr(env, TILEGXInstr_Bf(GXbf_EXTU, tmp, tmp , 31, 31));
+      // (tmp == 0) ? (argL) : (argR)
+      addInstr(env, TILEGXInstr_MovCond(dst, argL, argR, tmp, TILEGXcc_EZ));
+      return dst;
+    }
+
+    if (e->Iex.Binop.op == Iop_MullS32 || e->Iex.Binop.op == Iop_MullU32) {
+      Bool syned = (e->Iex.Binop.op == Iop_MullS32);
+      Bool sz32 = (e->Iex.Binop.op == Iop_Mul32);
+      HReg r_dst = newVRegI(env);
+      HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2);
+      addInstr(env, TILEGXInstr_Mul(syned /*Unsigned or Signed */ ,
+                                    True /*widen */ ,
+                                    sz32 /*32bit or 64bit */,
+                                    r_dst, r_srcL, r_srcR));
+      return r_dst;
+    }
+
+    if (e->Iex.Binop.op == Iop_32HLto64) {
+      HReg tHi = iselWordExpr_R(env, e->Iex.Binop.arg1);
+      HReg tLo = iselWordExpr_R(env, e->Iex.Binop.arg2);
+      HReg tLo_1 = newVRegI(env);
+      HReg tHi_1 = newVRegI(env);
+      HReg r_dst = newVRegI(env);
+      HReg mask = newVRegI(env);
+
+      addInstr(env, TILEGXInstr_Shft(GXshft_SLL, False, tHi_1, tHi,
+                                     TILEGXRH_Imm(False, 32)));
+
+      addInstr(env, TILEGXInstr_LI(mask, 0xffffffff));
+      addInstr(env, TILEGXInstr_Alu(GXalu_AND, tLo_1, tLo,
+                                    TILEGXRH_Reg(mask)));
+      addInstr(env, TILEGXInstr_Alu(GXalu_OR, r_dst, tHi_1,
+                                    TILEGXRH_Reg(tLo_1)));
+
+      return r_dst;
+    }
+
+    /* Anything reached here !*/
+    goto irreducible;
+  }
+
+    /* --------- UNARY OP --------- */
+  case Iex_Unop: {
+
+    IROp op_unop = e->Iex.Unop.op;
+
+    switch (op_unop) {
+    case Iop_Not1: {
+      HReg r_dst = newVRegI(env);
+      HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg);
+      TILEGXRH *r_srcR = TILEGXRH_Reg(r_srcL);
+
+      addInstr(env, TILEGXInstr_LI(r_dst, 0x1));
+      addInstr(env, TILEGXInstr_Alu(GXalu_SUB, r_dst, r_dst, r_srcR));
+      return r_dst;
+    }
+
+    case Iop_Not8:
+    case Iop_Not16:
+    case Iop_Not32:
+    case Iop_Not64: {
+      /* not x = nor x, x */
+      HReg r_dst = newVRegI(env);
+      HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg);
+      TILEGXRH *r_srcR = TILEGXRH_Reg(r_srcL);
+
+      addInstr(env, TILEGXInstr_Alu(GXalu_NOR, r_dst, r_srcL, r_srcR));
+      return r_dst;
+    }
+
+    case Iop_CmpNEZ8x8: {
+
+      Bool syned = False;
+      Bool size32;
+      HReg dst = newVRegI(env);
+      HReg r1;
+      TILEGXCondCode cc =  TILEGXcc_NE8x8;
+      size32 = False;
+      r1 = iselWordExpr_R(env, e->Iex.Unop.arg);
+      addInstr(env, TILEGXInstr_CmpI(syned, size32, dst, hregTILEGX_R63(),
+                                     TILEGXRH_Reg(r1), cc));
+
+      return dst;
+      break;
+    }
+
+    case Iop_16to8:
+    case Iop_32to8:
+    case Iop_64to8:
+    case Iop_32to16:
+    case Iop_64to16:
+    case Iop_64to32:
+    case Iop_128to64:
+
+      return iselWordExpr_R(env, e->Iex.Unop.arg);
+
+    case Iop_1Uto64:
+    case Iop_1Uto32:
+    case Iop_1Uto8: {
+      HReg dst = newVRegI(env);
+      HReg src = iselWordExpr_R(env, e->Iex.Unop.arg);
+      addInstr(env, TILEGXInstr_Alu(GXalu_AND, dst, src, TILEGXRH_Imm(False, 1)));
+      return dst;
+    }
+    case Iop_8Uto16:
+    case Iop_8Uto32:
+    case Iop_8Uto64:
+    case Iop_16Uto32:
+    case Iop_16Uto64: {
+
+      HReg dst     = newVRegI(env);
+      HReg src     = iselWordExpr_R(env, e->Iex.Unop.arg);
+      Bool srcIs16 = toBool( e->Iex.Unop.op==Iop_16Uto32
+                             || e->Iex.Unop.op==Iop_16Uto64 );
+
+      addInstr(env, TILEGXInstr_Bf(GXbf_EXTU, dst, src,
+                                   0,
+                                   srcIs16 ? 15 : 7));
+
+      return dst;
+    }
+
+    case Iop_32to1:
+    case Iop_64to1:
+      {
+        HReg r_dst = newVRegI(env);
+        HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTU, r_dst, r_src, 0, 0));
+        return r_dst;
+      }
+    case Iop_1Sto32:
+    case Iop_1Sto64:
+      {
+        HReg r_dst = newVRegI(env);
+        HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTS, r_dst, r_src, 0, 0));
+        return r_dst;
+      }
+    case Iop_8Sto16:
+    case Iop_8Sto32:
+    case Iop_8Sto64:
+      {
+        HReg r_dst = newVRegI(env);
+        HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTS, r_dst, r_src, 0, 7));
+        return r_dst;
+      }
+    case Iop_16Sto32:
+    case Iop_16Sto64:
+      {
+        HReg r_dst = newVRegI(env);
+        HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTS, r_dst, r_src, 0, 15));
+        return r_dst;
+      }
+    case Iop_32Uto64:
+      {
+        HReg r_dst = newVRegI(env);
+        HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTU, r_dst, r_src, 0, 31));
+        return r_dst;
+      }
+    case Iop_32Sto64:
+      {
+        HReg r_dst = newVRegI(env);
+        HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTS, r_dst, r_src, 0, 31));
+        return r_dst;
+      }
+
+    case Iop_CmpNEZ8: {
+      HReg r_dst = newVRegI(env);
+      HReg tmp = newVRegI(env);
+      HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+      TILEGXCondCode cc;
+
+      cc = TILEGXcc_NE;
+      addInstr(env, TILEGXInstr_Alu(GXalu_AND, tmp, r_src,
+                                    TILEGXRH_Imm(False, 0xFF)));
+      addInstr(env, TILEGXInstr_Cmp(False, True, r_dst, tmp,
+                                    hregTILEGX_R63(), cc));
+      return r_dst;
+    }
+
+    case Iop_CmpNEZ32: {
+      HReg r_dst = newVRegI(env);
+      HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+      TILEGXCondCode cc;
+
+      cc = TILEGXcc_NE;
+
+      addInstr(env, TILEGXInstr_Cmp(False, True, r_dst, r_src,
+                                    hregTILEGX_R63(), cc));
+      return r_dst;
+    }
+
+    case Iop_CmpwNEZ32: {
+      HReg r_dst = newVRegI(env);
+      HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+      addInstr(env, TILEGXInstr_Alu(GXalu_SUB, r_dst, hregTILEGX_R63(),
+                                    TILEGXRH_Reg(r_src)));
+
+      addInstr(env, TILEGXInstr_Alu(GXalu_OR, r_dst, r_dst,
+                                    TILEGXRH_Reg(r_src)));
+      addInstr(env, TILEGXInstr_Shft(GXshft_SRA, True, r_dst, r_dst,
+                                     TILEGXRH_Imm(False, 31)));
+      return r_dst;
+    }
+
+    case Iop_Left8:
+    case Iop_Left16:
+    case Iop_Left32:
+    case Iop_Left64: {
+
+      HReg r_dst = newVRegI(env);
+      HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+      addInstr(env, TILEGXInstr_Alu(GXalu_SUB, r_dst, hregTILEGX_R63(),
+                                    TILEGXRH_Reg(r_src)));
+      addInstr(env, TILEGXInstr_Alu(GXalu_OR, r_dst, r_dst,
+                                    TILEGXRH_Reg(r_src)));
+      return r_dst;
+    }
+
+    case Iop_Ctz64:
+    case Iop_Clz64: {
+      HReg r_dst = newVRegI(env);
+      HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+      if (op_unop == Iop_Clz64)
+        addInstr(env, TILEGXInstr_Unary(GXun_CLZ, r_dst, r_src));
+      else
+        addInstr(env, TILEGXInstr_Unary(GXun_CTZ, r_dst, r_src));
+      return r_dst;
+    }
+
+    case Iop_CmpNEZ64: {
+
+      HReg r_dst = newVRegI(env);
+      HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+      TILEGXCondCode cc;
+
+      cc = TILEGXcc_NE;
+
+      addInstr(env, TILEGXInstr_Cmp(False, False, r_dst, r_src,
+                                    hregTILEGX_R63(), cc));
+      return r_dst;
+    }
+
+    case Iop_CmpwNEZ64: {
+      HReg tmp1;
+      HReg tmp2 = newVRegI(env);
+
+      tmp1 = iselWordExpr_R(env, e->Iex.Unop.arg);
+
+      addInstr(env, TILEGXInstr_Alu(GXalu_SUB, tmp2, hregTILEGX_R63(),
+                                    TILEGXRH_Reg(tmp1)));
+
+      addInstr(env, TILEGXInstr_Alu(GXalu_OR, tmp2, tmp2, TILEGXRH_Reg(tmp1)));
+      addInstr(env, TILEGXInstr_Shft(GXshft_SRA, False, tmp2, tmp2,
+                                     TILEGXRH_Imm (False, 63)));
+      return tmp2;
+    }
+
+    default:
+      goto irreducible;
+      break;
+    }
+    break;
+  }
+
+    /* --------- GET --------- */
+  case Iex_Get: {
+    if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32
+        || ((ty == Ity_I64))) {
+      HReg r_dst;
+      TILEGXAMode *am_addr;
+      r_dst = newVRegI(env);
+      am_addr = TILEGXAMode_IR(e->Iex.Get.offset,
+                               TILEGXGuestStatePointer());
+      addInstr(env, TILEGXInstr_Load(toUChar(sizeofIRType(ty)),
+                                     r_dst, am_addr));
+      return r_dst;
+    }
+  }
+
+    /* --------- ITE --------- */
+  case Iex_ITE: {
+    if ((ty == Ity_I8 || ty == Ity_I16 ||
+         ty == Ity_I32 || ((ty == Ity_I64))) &&
+        typeOfIRExpr(env->type_env, e->Iex.ITE.cond) == Ity_I1) {
+
+      HReg r0 = iselWordExpr_R(env, e->Iex.ITE.iffalse);
+      HReg r1 = iselWordExpr_R(env, e->Iex.ITE.iftrue);
+      HReg r_cond = iselWordExpr_R(env, e->Iex.ITE.cond);
+      HReg r_dst = newVRegI(env);
+
+      /* r_dst = (r_cond) ? r1 : r0 */
+
+      addInstr(env, TILEGXInstr_MovCond(r_dst, r0, TILEGXRH_Reg(r1),
+                                        r_cond, TILEGXcc_EZ));
+
+      return r_dst;
+    }
+  }
+
+    /* --------- LITERAL --------- */
+    /* 32/16/8-bit literals */
+  case Iex_Const: {
+    Long l;
+    HReg r_dst = newVRegI(env);
+    IRConst *con = e->Iex.Const.con;
+    switch (con->tag) {
+    case Ico_U64:
+
+      l = (Long) con->Ico.U64;
+      break;
+    case Ico_U32:
+      l = (Long) (Int) con->Ico.U32;
+      break;
+    case Ico_U16:
+      l = (Long) (Int) (Short) con->Ico.U16;
+      break;
+    case Ico_U8:
+      l = (Long) (Int) (Char) con->Ico.U8;
+      break;
+    default:
+      vpanic("iselIntExpr_R.const(tilegx)");
+    }
+    addInstr(env, TILEGXInstr_LI(r_dst, (ULong) l));
+    return r_dst;
+  }
+
+    /* --------- CCALL --------- */
+  case Iex_CCall: {
+    HReg r_dst = newVRegI(env);
+    vassert(ty == e->Iex.CCall.retty);
+
+    /* Marshal args, do the call, clear stack. */
+    doHelperCall(env, NULL, e->Iex.CCall.cee, e->Iex.CCall.args,
+                 e->Iex.CCall.retty);
+
+    /* r0 is the return value. */
+    addInstr(env, mk_iMOVds_RR(r_dst, hregTILEGX_R0()));
+
+    return r_dst;
+  }
+
+  default:
+    goto irreducible;
+    break;
+  }        /* end switch(e->tag) */
+
+  /* We get here if no pattern matched. */
+ irreducible:
+  vex_printf("--------------->\n");
+  if (e->tag == Iex_RdTmp)
+    vex_printf("Iex_RdTmp \n");
+  ppIRExpr(e);
+
+  vpanic("iselWordExpr_R(tilegx): cannot reduce tree");
+}
+
+/* --------------------- RH --------------------- */
+
+/* Compute an I8/I16/I32/I64 into a RH
+   (reg-or-halfword-immediate).  It's important to specify whether the
+   immediate is to be regarded as signed or not.  If yes, this will
+   never return -32768 as an immediate; this guaranteed that all
+   signed immediates that are return can have their sign inverted if
+   need be. */
+
+static TILEGXRH *iselWordExpr_RH ( ISelEnv * env, Bool syned, IRExpr * e )
+{
+  TILEGXRH *ri = iselWordExpr_RH_wrk(env, syned, e);
+  /* sanity checks ... */
+  switch (ri->tag) {
+  case GXrh_Imm:
+    vassert(ri->GXrh.Imm.syned == syned);
+    if (syned)
+      vassert(ri->GXrh.Imm.imm16 != 0x8000);
+    return ri;
+  case GXrh_Reg:
+    vassert(hregClass(ri->GXrh.Reg.reg) == HRcGPR());
+    vassert(hregIsVirtual(ri->GXrh.Reg.reg));
+    return ri;
+  default:
+    vpanic("iselIntExpr_RH: unknown tilegx RH tag");
+  }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static TILEGXRH *iselWordExpr_RH_wrk ( ISelEnv * env, Bool syned, IRExpr * e )
+{
+  ULong u;
+  Long l;
+  IRType ty = typeOfIRExpr(env->type_env, e);
+  vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 ||
+          ((ty == Ity_I64)));
+
+  /* special case: immediate */
+  if (e->tag == Iex_Const) {
+    IRConst *con = e->Iex.Const.con;
+    /* What value are we aiming to generate? */
+    switch (con->tag) {
+      /* Note: Not sign-extending - we carry 'syned' around */
+    case Ico_U64:
+      u = con->Ico.U64;
+      break;
+    case Ico_U32:
+      u = 0xFFFFFFFF & con->Ico.U32;
+      break;
+    case Ico_U16:
+      u = 0x0000FFFF & con->Ico.U16;
+      break;
+    case Ico_U8:
+      u = 0x000000FF & con->Ico.U8;
+      break;
+    default:
+      vpanic("iselIntExpr_RH.Iex_Const(tilegx)");
+    }
+    l = (Long) u;
+    /* Now figure out if it's representable. */
+    if (!syned && u <= 255) {
+      return TILEGXRH_Imm(False /*unsigned */ , toUShort(u & 0xFFFF));
+    }
+    if (syned && l >= -127 && l <= 127) {
+      return TILEGXRH_Imm(True /*signed */ , toUShort(u & 0xFFFF));
+    }
+    /* no luck; use the Slow Way. */
+  }
+  /* default case: calculate into a register and return that */
+  return TILEGXRH_Reg(iselWordExpr_R(env, e));
+}
+
+/* --------------------- RH6u --------------------- */
+
+/* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter
+   being an immediate in the range 0 .. 63 inclusive.  Used for doing
+   shift amounts. */
+
+static TILEGXRH *iselWordExpr_RH6u ( ISelEnv * env, IRExpr * e )
+{
+  TILEGXRH *ri;
+  ri = iselWordExpr_RH6u_wrk(env, e);
+  /* sanity checks ... */
+  switch (ri->tag) {
+  case GXrh_Imm:
+    vassert(ri->GXrh.Imm.imm16 >= 1 && ri->GXrh.Imm.imm16 <= 63);
+    vassert(!ri->GXrh.Imm.syned);
+    return ri;
+  case GXrh_Reg:
+    vassert(hregClass(ri->GXrh.Reg.reg) == HRcInt64);
+    vassert(hregIsVirtual(ri->GXrh.Reg.reg));
+    return ri;
+  default:
+    vpanic("iselIntExpr_RH6u: unknown tilegx RH tag");
+  }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static TILEGXRH *iselWordExpr_RH6u_wrk ( ISelEnv * env, IRExpr * e )
+{
+  IRType ty = typeOfIRExpr(env->type_env, e);
+
+  /* special case: immediate */
+  if (e->tag == Iex_Const)
+  {
+    if (ty == Ity_I8)
+    {
+      if(e->Iex.Const.con->tag == Ico_U8
+         && e->Iex.Const.con->Ico.U8 >= 1 && e->Iex.Const.con->Ico.U8 <= 63)
+        return TILEGXRH_Imm(False /*unsigned */ , e->Iex.Const.con->Ico.U8);
+    }
+    else if (ty == Ity_I64)
+    {
+      if(e->Iex.Const.con->tag == Ico_U64
+         && e->Iex.Const.con->Ico.U64 >= 1
+         && e->Iex.Const.con->Ico.U64 <= 63)
+        return TILEGXRH_Imm(False /*unsigned */, e->Iex.Const.con->Ico.U64);
+    }
+  }
+
+  /* default case: calculate into a register and return that */
+  return TILEGXRH_Reg(iselWordExpr_R(env, e));
+}
+
+/* --------------------- CONDCODE --------------------- */
+
+/* Generate code to evaluated a bit-typed expression, returning the
+   condition code which would correspond when the expression would
+   notionally have returned 1. */
+
+static TILEGXCondCode iselCondCode(ISelEnv * env, IRExpr * e)
+{
+  TILEGXCondCode cc = iselCondCode_wrk(env,e);
+  vassert(cc != TILEGXcc_NV);
+  return cc;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static TILEGXCondCode iselCondCode_wrk ( ISelEnv * env, IRExpr * e )
+{
+  vassert(e);
+  vassert(typeOfIRExpr(env->type_env, e) == Ity_I1);
+
+  /* Cmp*(x,y) ? */
+  if (e->Iex.Binop.op == Iop_CmpEQ32
+      || e->Iex.Binop.op == Iop_CmpNE32
+      || e->Iex.Binop.op == Iop_CmpNE64
+      || e->Iex.Binop.op == Iop_CmpLT32S
+      || e->Iex.Binop.op == Iop_CmpLT32U
+      || e->Iex.Binop.op == Iop_CmpLT64U
+      || e->Iex.Binop.op == Iop_CmpLE32S
+      || e->Iex.Binop.op == Iop_CmpLE64S
+      || e->Iex.Binop.op == Iop_CmpLT64S
+      || e->Iex.Binop.op == Iop_CmpEQ64
+      || e->Iex.Binop.op == Iop_CasCmpEQ32
+      || e->Iex.Binop.op == Iop_CasCmpEQ64) {
+
+    Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S
+                  || e->Iex.Binop.op == Iop_CmpLE32S
+                  || e->Iex.Binop.op == Iop_CmpLT64S
+                  || e->Iex.Binop.op == Iop_CmpLE64S);
+    Bool size32;
+    HReg dst = newVRegI(env);
+    HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1);
+    HReg r2 = iselWordExpr_R(env, e->Iex.Binop.arg2);
+
+    TILEGXCondCode cc;
+
+    switch (e->Iex.Binop.op) {
+    case Iop_CmpEQ32:
+    case Iop_CasCmpEQ32:
+      cc = TILEGXcc_EQ;
+      size32 = True;
+      break;
+    case Iop_CmpNE32:
+      cc = TILEGXcc_NE;
+      size32 = True;
+      break;
+    case Iop_CmpNE64:
+      cc = TILEGXcc_NE;
+      size32 = True;
+      break;
+    case Iop_CmpLT32S:
+      cc = TILEGXcc_LT;
+      size32 = True;
+      break;
+    case Iop_CmpLT32U:
+      cc = TILEGXcc_LO;
+      size32 = True;
+      break;
+    case Iop_CmpLT64U:
+      cc = TILEGXcc_LO;
+      size32 = False;
+      break;
+    case Iop_CmpLE32S:
+      cc = TILEGXcc_LE;
+      size32 = True;
+      break;
+    case Iop_CmpLE64S:
+      cc = TILEGXcc_LE;
+      size32 = False;
+      break;
+    case Iop_CmpLT64S:
+      cc = TILEGXcc_LT;
+      size32 = False;
+      break;
+    case Iop_CmpEQ64:
+    case Iop_CasCmpEQ64:
+      cc = TILEGXcc_EQ;
+      size32 = False;
+      break;
+    default:
+      vpanic("iselCondCode(tilegx): CmpXX32 or CmpXX64");
+      break;
+    }
+
+    addInstr(env, TILEGXInstr_Cmp(syned, size32, dst, r1, r2, cc));
+    /* Store result to guest_COND */
+    TILEGXAMode *am_addr = TILEGXAMode_IR(0, TILEGXGuestStatePointer());
+
+    addInstr(env, TILEGXInstr_Store(8,
+                                    TILEGXAMode_IR(am_addr->GXam.IR.index +
+                                                   COND_OFFSET(),
+                                                   am_addr->GXam.IR.base),
+                                    dst));
+    return cc;
+  }
+
+  if (e->Iex.Binop.op == Iop_Not1) {
+    HReg r_dst = newVRegI(env);
+    HReg r_srcL = iselWordExpr_R(env, e->Iex.Unop.arg);
+    TILEGXRH *r_srcR = TILEGXRH_Reg(r_srcL);
+
+    addInstr(env, TILEGXInstr_LI(r_dst, 0x1));
+    addInstr(env, TILEGXInstr_Alu(GXalu_SUB, r_dst, r_dst, r_srcR));
+
+   /* Store result to guest_COND */
+    TILEGXAMode *am_addr = TILEGXAMode_IR(0, TILEGXGuestStatePointer());
+
+    addInstr(env, TILEGXInstr_Store(8,
+                                    TILEGXAMode_IR(am_addr->GXam.IR.index +
+                                                   COND_OFFSET(),
+                                                   am_addr->GXam.IR.base),
+                                    r_dst));
+    return TILEGXcc_NE;
+  }
+
+  if (e->tag == Iex_RdTmp || e->tag == Iex_Unop) {
+    HReg r_dst = iselWordExpr_R_wrk(env, e);
+    /* Store result to guest_COND */
+    TILEGXAMode *am_addr = TILEGXAMode_IR(0, TILEGXGuestStatePointer());
+
+    addInstr(env, TILEGXInstr_Store(8,
+                                    TILEGXAMode_IR(am_addr->GXam.IR.index +
+                                                   COND_OFFSET(),
+                                                   am_addr->GXam.IR.base),
+                                    r_dst));
+    return TILEGXcc_EQ;
+  }
+
+  vex_printf("iselCondCode(tilegx): No such tag(%u)\n", e->tag);
+  ppIRExpr(e);
+  vpanic("iselCondCode(tilegx)");
+
+  /* Constant 1:Bit */
+  if (e->tag == Iex_Const && e->Iex.Const.con->Ico.U1 == True)
+    return TILEGXcc_AL;
+
+  if (e->tag == Iex_RdTmp)
+    return TILEGXcc_EQ;
+
+  if (e->tag == Iex_Binop)
+    return TILEGXcc_EQ;
+
+  if (e->tag == Iex_Unop)
+    return TILEGXcc_EQ;
+
+  vex_printf("iselCondCode(tilegx): No such tag(%u)\n", e->tag);
+  ppIRExpr(e);
+  vpanic("iselCondCode(tilegx)");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void iselStmt ( ISelEnv * env, IRStmt * stmt )
+{
+  if (vex_traceflags & VEX_TRACE_VCODE) {
+    vex_printf("\n-- ");
+    ppIRStmt(stmt);
+    vex_printf("\n");
+  }
+
+  switch (stmt->tag) {
+    /* --------- STORE --------- */
+  case Ist_Store: {
+    TILEGXAMode *am_addr;
+    IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+
+    /*constructs addressing mode from address provided */
+    am_addr = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd);
+
+    if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 ||
+        (tyd == Ity_I64)) {
+      HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data);
+      addInstr(env, TILEGXInstr_Store(toUChar(sizeofIRType(tyd)),
+                                      am_addr, r_src));
+      return;
+    }
+    break;
+  }
+
+    /* --------- PUT --------- */
+  case Ist_Put: {
+    IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+
+    if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32 ||
+        (ty == Ity_I64)) {
+      HReg r_src = iselWordExpr_R(env, stmt->Ist.Put.data);
+      TILEGXAMode *am_addr = TILEGXAMode_IR(stmt->Ist.Put.offset,
+                                            TILEGXGuestStatePointer());
+      addInstr(env, TILEGXInstr_Store(toUChar(sizeofIRType(ty)),
+                                      am_addr, r_src));
+      return;
+    }
+    break;
+  }
+
+    /* --------- TMP --------- */
+  case Ist_WrTmp: {
+    IRTemp tmp = stmt->Ist.WrTmp.tmp;
+    IRType ty = typeOfIRTemp(env->type_env, tmp);
+    HReg r_dst = lookupIRTemp(env, tmp);
+    HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data);
+    IRType dty = typeOfIRExpr(env->type_env, stmt->Ist.WrTmp.data);
+
+    if (ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8 ||
+        (ty == dty))
+    {
+      addInstr(env, mk_iMOVds_RR(r_dst, r_src));
+      return;
+    }
+    else if (ty == Ity_I1) {
+      switch (dty)
+      {
+      case Ity_I32:
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTU, r_src, r_src, 0, 31));
+        break;
+      case Ity_I16:
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTU, r_src, r_src, 0, 15));
+        break;
+      case Ity_I8:
+        addInstr(env, TILEGXInstr_Bf(GXbf_EXTU, r_src, r_src, 0, 7));
+        break;
+      default:
+        vassert(0);
+      }
+
+      addInstr(env, TILEGXInstr_MovCond(r_dst,
+                                        hregTILEGX_R63(),
+                                        TILEGXRH_Imm(False, 1),
+                                        r_src,
+                                        TILEGXcc_EZ));
+      return;
+    }
+    break;
+  }
+
+    /* --------- Call to DIRTY helper --------- */
+  case Ist_Dirty: {
+    IRType retty;
+    IRDirty *d = stmt->Ist.Dirty.details;
+
+    /* Marshal args, do the call, clear stack. */
+    doHelperCall(env, d->guard, d->cee, d->args, -1);
+
+    /* Now figure out what to do with the returned value, if any. */
+    if (d->tmp == IRTemp_INVALID)
+      /* No return value.  Nothing to do. */
+      return;
+
+    retty = typeOfIRTemp(env->type_env, d->tmp);
+
+    if (retty == Ity_I8 || retty == Ity_I16 || retty == Ity_I32
+        || (retty == Ity_I64)) {
+      /* The returned value is in r0.  Park it in the register
+         associated with tmp. */
+      HReg r_dst = lookupIRTemp(env, d->tmp);
+      addInstr(env, mk_iMOVds_RR(r_dst, hregTILEGX_R0()));
+      return;
+    }
+    break;
+  }
+
+
+    /* --------- ACAS --------- */
+  case Ist_CAS:
+    {
+      UChar  sz;
+      IRCAS* cas = stmt->Ist.CAS.details;
+      IRType ty  = typeOfIRExpr(env->type_env, cas->dataLo);
+
+      TILEGXAMode *r_addr = iselWordExpr_AMode(env, cas->addr, Ity_I64);
+      HReg r_new  = iselWordExpr_R(env, cas->dataLo);
+      HReg r_old  = lookupIRTemp(env,   cas->oldLo);
+      HReg r_exp =  INVALID_HREG;
+
+      vassert(cas->expdHi == NULL);
+      vassert(cas->dataHi == NULL);
+      vassert(r_addr->tag == GXam_IR);
+      vassert(r_addr->GXam.IR.index == 0);
+
+      switch (ty)
+      {
+      case Ity_I64: sz = 8; break;
+      case Ity_I32: sz = 4; break;
+      default: vassert(0);
+      }
+
+      if (cas->expdLo->tag != Iex_Const)
+      {
+        r_exp = iselWordExpr_R(env, cas->expdLo);
+        addInstr(env, TILEGXInstr_Acas(GXacas_CMPEXCH, r_old,
+                                       r_addr->GXam.IR.base, r_exp,
+                                       r_new, sz));
+      }
+      else
+      {
+        if((sz == 8 && cas->expdLo->Iex.Const.con->Ico.U64 == 0) ||
+           (sz == 4 && cas->expdLo->Iex.Const.con->Ico.U32 == 0))
+        {
+          addInstr(env, TILEGXInstr_Acas(GXacas_EXCH, r_old,
+                                         r_addr->GXam.IR.base,
+                                         r_exp, r_new, sz));
+        }
+        else if((sz == 8 && cas->expdLo->Iex.Const.con->Ico.U64 == 2) ||
+                (sz == 4 && cas->expdLo->Iex.Const.con->Ico.U32 == 2))
+        {
+          addInstr(env, TILEGXInstr_Acas(GXacas_FetchAnd, r_old,
+                                         r_addr->GXam.IR.base, r_exp,
+                                         r_new, sz));
+        }
+        else if((sz == 8 && cas->expdLo->Iex.Const.con->Ico.U64 == 3) ||
+                (sz == 4 && cas->expdLo->Iex.Const.con->Ico.U32 == 3))
+        {
+          addInstr(env, TILEGXInstr_Acas(GXacas_FetchAdd, r_old,
+                                         r_addr->GXam.IR.base,
+                                         r_exp, r_new, sz));
+        }
+        else if((sz == 8 && cas->expdLo->Iex.Const.con->Ico.U64 == 4) ||
+                (sz == 4 && cas->expdLo->Iex.Const.con->Ico.U32 == 4))
+        {
+          addInstr(env, TILEGXInstr_Acas(GXacas_FetchOr, r_old,
+                                         r_addr->GXam.IR.base, r_exp,
+                                         r_new, sz));
+        }
+        else if((sz == 8 && cas->expdLo->Iex.Const.con->Ico.U64 == 5) ||
+                (sz == 4 && cas->expdLo->Iex.Const.con->Ico.U32 == 5))
+        {
+          addInstr(env, TILEGXInstr_Acas(GXacas_FetchAddgez, r_old,
+                                         r_addr->GXam.IR.base, r_exp,
+                                         r_new, sz));
+        }
+        else
+        {
+          vassert(0);
+        }
+      }
+      return;
+    }
+
+    /* --------- INSTR MARK --------- */
+    /* Doesn't generate any executable code ... */
+  case Ist_IMark:
+    return;
+
+    /* --------- ABI HINT --------- */
+    /* These have no meaning (denotation in the IR) and so we ignore
+       them ... if any actually made it this far. */
+  case Ist_AbiHint:
+    return;
+
+    /* --------- NO-OP --------- */
+    /* Fairly self-explanatory, wouldn't you say? */
+  case Ist_NoOp:
+    return;
+
+    /* --------- EXIT --------- */
+  case Ist_Exit: {
+
+    TILEGXCondCode cc   = iselCondCode(env, stmt->Ist.Exit.guard);
+    TILEGXAMode*   amPC = TILEGXAMode_IR(stmt->Ist.Exit.offsIP,
+                                         TILEGXGuestStatePointer());
+
+    /* Case: boring transfer to known address */
+    if (stmt->Ist.Exit.jk == Ijk_Boring
+        || stmt->Ist.Exit.jk == Ijk_Call
+        /* || stmt->Ist.Exit.jk == Ijk_Ret */) {
+      if (env->chainingAllowed) {
+        /* .. almost always true .. */
+        /* Skip the event check at the dst if this is a forwards
+           edge. */
+        Bool toFastEP  =
+          ((Addr64)stmt->Ist.Exit.dst->Ico.U64) > ((Addr64)env->max_ga);
+
+        if (0) vex_printf("%s", toFastEP ? "Y" : ",");
+        addInstr(env, TILEGXInstr_XDirect(
+                   (Addr64)stmt->Ist.Exit.dst->Ico.U64,
+                   amPC, cc, toFastEP));
+      } else {
+        /* .. very occasionally .. */
+        /* We can't use chaining, so ask for an assisted transfer,
+           as that's the only alternative that is allowable. */
+        HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+        addInstr(env, TILEGXInstr_XAssisted(r, amPC, cc, Ijk_Boring));
+      }
+      return;
+    }
+
+    /* Case: assisted transfer to arbitrary address */
+    switch (stmt->Ist.Exit.jk) {
+      /* Keep this list in sync with that in iselNext below */
+    case Ijk_ClientReq:
+    case Ijk_EmFail:
+    case Ijk_EmWarn:
+    case Ijk_NoDecode:
+    case Ijk_NoRedir:
+    case Ijk_SigBUS:
+    case Ijk_Yield:
+    case Ijk_SigTRAP:
+    case Ijk_SigFPE_IntDiv:
+    case Ijk_SigFPE_IntOvf:
+    case Ijk_Sys_syscall:
+    case Ijk_InvalICache:
+    case Ijk_Ret:
+      {
+        HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+        addInstr(env, TILEGXInstr_XAssisted(r, amPC, cc,
+                                            stmt->Ist.Exit.jk));
+        return;
+      }
+    default:
+      break;
+    }
+
+    /* Do we ever expect to see any other kind? */
+    goto stmt_fail;
+  }
+
+  default:
+    break;
+  }
+
+ stmt_fail:
+  vex_printf("stmt_fail tag: 0x%x\n", stmt->tag);
+  ppIRStmt(stmt);
+  vpanic("iselStmt:\n");
+}
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void iselNext ( ISelEnv * env, IRExpr * next, IRJumpKind jk,
+                       Int offsIP )
+{
+
+  if (vex_traceflags & VEX_TRACE_VCODE) {
+    vex_printf("\n-- PUT(%d) = ", offsIP);
+    ppIRExpr(next);
+    vex_printf( "; exit-");
+    ppIRJumpKind(jk);
+    vex_printf( "\n");
+  }
+
+  /* Case: boring transfer to known address */
+  if (next->tag == Iex_Const) {
+    IRConst* cdst = next->Iex.Const.con;
+    if (jk == Ijk_Boring || jk == Ijk_Call) {
+      /* Boring transfer to known address */
+      TILEGXAMode* amPC = TILEGXAMode_IR(offsIP, TILEGXGuestStatePointer());
+      if (env->chainingAllowed) {
+        /* .. almost always true .. */
+        /* Skip the event check at the dst if this is a forwards
+           edge. */
+        Bool toFastEP = ((Addr64)cdst->Ico.U64) > ((Addr64)env->max_ga);
+
+        if (0) vex_printf("%s", toFastEP ? "X" : ".");
+        addInstr(env, TILEGXInstr_XDirect((Addr64)cdst->Ico.U64,
+                                          amPC, TILEGXcc_AL, toFastEP));
+      } else {
+        /* .. very occasionally .. */
+        /* We can't use chaining, so ask for an assisted transfer,
+           as that's the only alternative that is allowable. */
+        HReg r = iselWordExpr_R(env, next);
+        addInstr(env, TILEGXInstr_XAssisted(r, amPC, TILEGXcc_AL,
+                                            Ijk_Boring));
+      }
+      return;
+    }
+  }
+
+  /* Case: call/return (==boring) transfer to any address */
+  switch (jk) {
+  case Ijk_Boring: case Ijk_Call: {
+    HReg       r     = iselWordExpr_R(env, next);
+    TILEGXAMode*  amPC = TILEGXAMode_IR(offsIP,
+                                        TILEGXGuestStatePointer());
+    if (env->chainingAllowed)
+      addInstr(env, TILEGXInstr_XIndir(r, amPC, TILEGXcc_AL));
+    else
+      addInstr(env, TILEGXInstr_XAssisted(r, amPC, TILEGXcc_AL,
+                                          Ijk_Boring));
+    return;
+  }
+  default:
+    break;
+  }
+
+  /* Case: assisted transfer to arbitrary address */
+  switch (jk) {
+    /* Keep this list in sync with that for Ist_Exit above */
+  case Ijk_ClientReq:
+  case Ijk_EmFail:
+  case Ijk_EmWarn:
+  case Ijk_NoDecode:
+  case Ijk_NoRedir:
+  case Ijk_SigBUS:
+  case Ijk_SigILL:
+  case Ijk_SigTRAP:
+  case Ijk_SigFPE_IntDiv:
+  case Ijk_SigFPE_IntOvf:
+  case Ijk_Sys_syscall:
+  case Ijk_InvalICache:
+  case Ijk_Ret: {
+    HReg  r = iselWordExpr_R(env, next);
+    TILEGXAMode* amPC = TILEGXAMode_IR(offsIP, TILEGXGuestStatePointer());
+    addInstr(env, TILEGXInstr_XAssisted(r, amPC, TILEGXcc_AL, jk));
+    return;
+  }
+  default:
+    break;
+  }
+
+  vex_printf("\n-- PUT(%d) = ", offsIP);
+  ppIRExpr(next );
+  vex_printf("; exit-");
+  ppIRJumpKind(jk);
+  vex_printf("\n");
+  vassert(0);  /* are we expecting any other kind? */
+}
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire BB to tilegx code. */
+HInstrArray *iselSB_TILEGX ( const IRSB* bb,
+                             VexArch arch_host,
+                             const VexArchInfo* archinfo_host,
+                             const VexAbiInfo* vbi,
+                             Int offs_Host_EvC_Counter,
+                             Int offs_Host_EvC_FailAddr,
+                             Bool chainingAllowed,
+                             Bool addProfInc,
+                             Addr max_ga )
+{
+  Int i, j;
+  HReg hreg;
+  ISelEnv *env;
+  UInt hwcaps_host = archinfo_host->hwcaps;
+  TILEGXAMode *amCounter, *amFailAddr;
+
+  /* sanity ... */
+  vassert(arch_host == VexArchTILEGX);
+
+  /* Make up an initial environment to use. */
+  env = LibVEX_Alloc(sizeof(ISelEnv));
+  env->vreg_ctr = 0;
+  env->mode64 = True;
+
+  /* Set up output code array. */
+  env->code = newHInstrArray();
+
+  /* Copy BB's type env. */
+  env->type_env = bb->tyenv;
+
+  /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+     change as we go along. */
+  env->n_vregmap = bb->tyenv->types_used;
+  env->vregmap = LibVEX_Alloc(env->n_vregmap * sizeof(HReg));
+
+  /* and finally ... */
+  env->hwcaps = hwcaps_host;
+  env->chainingAllowed = chainingAllowed;
+  env->hwcaps          = hwcaps_host;
+  env->max_ga          = max_ga;
+
+  /* For each IR temporary, allocate a suitably-kinded virtual
+     register. */
+  j = 0;
+
+  for (i = 0; i < env->n_vregmap; i++) {
+    hreg = INVALID_HREG;
+    switch (bb->tyenv->types[i]) {
+    case Ity_I1:
+    case Ity_I8:
+    case Ity_I16:
+    case Ity_I32:
+      hreg = mkHReg(True, HRcInt64, 0, j++);
+      break;
+    case Ity_I64:
+      hreg = mkHReg(True, HRcInt64, 0, j++);
+      break;
+    default:
+      ppIRType(bb->tyenv->types[i]);
+      vpanic("iselBB(tilegx): IRTemp type");
+    }
+    env->vregmap[i] = hreg;
+  }
+  env->vreg_ctr = j;
+
+  /* The very first instruction must be an event check. */
+  amCounter = TILEGXAMode_IR(offs_Host_EvC_Counter,
+                             TILEGXGuestStatePointer());
+  amFailAddr = TILEGXAMode_IR(offs_Host_EvC_FailAddr,
+                              TILEGXGuestStatePointer());
+  addInstr(env, TILEGXInstr_EvCheck(amCounter, amFailAddr));
+
+  /* Possibly a block counter increment (for profiling).  At this
+     point we don't know the address of the counter, so just pretend
+     it is zero.  It will have to be patched later, but before this
+     translation is used, by a call to LibVEX_patchProfCtr. */
+  if (addProfInc) {
+    addInstr(env, TILEGXInstr_ProfInc());
+  }
+
+  /* Ok, finally we can iterate over the statements. */
+  for (i = 0; i < bb->stmts_used; i++)
+    iselStmt(env, bb->stmts[i]);
+
+  iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
+
+  /* record the number of vregs we used. */
+  env->code->n_vregs = env->vreg_ctr;
+  return env->code;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                  host_tilegx_isel.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_x86_defs.c b/VEX/priv/host_x86_defs.c
new file mode 100644
index 0000000..792381e
--- /dev/null
+++ b/VEX/priv/host_x86_defs.c
@@ -0,0 +1,3479 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_x86_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+#include "libvex_trc_values.h"
+
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "host_x86_defs.h"
+
+
+/* --------- Registers. --------- */
+
+const RRegUniverse* getRRegUniverse_X86 ( void )
+{
+   /* The real-register universe is a big constant, so we just want to
+      initialise it once. */
+   static RRegUniverse rRegUniverse_X86;
+   static Bool         rRegUniverse_X86_initted = False;
+
+   /* Handy shorthand, nothing more */
+   RRegUniverse* ru = &rRegUniverse_X86;
+
+   /* This isn't thread-safe.  Sigh. */
+   if (LIKELY(rRegUniverse_X86_initted))
+      return ru;
+
+   RRegUniverse__init(ru);
+
+   /* Add the registers.  The initial segment of this array must be
+      those available for allocation by reg-alloc, and those that
+      follow are not available for allocation. */
+   ru->regs[ru->size++] = hregX86_EAX();
+   ru->regs[ru->size++] = hregX86_EBX();
+   ru->regs[ru->size++] = hregX86_ECX();
+   ru->regs[ru->size++] = hregX86_EDX();
+   ru->regs[ru->size++] = hregX86_ESI();
+   ru->regs[ru->size++] = hregX86_EDI();
+   ru->regs[ru->size++] = hregX86_FAKE0();
+   ru->regs[ru->size++] = hregX86_FAKE1();
+   ru->regs[ru->size++] = hregX86_FAKE2();
+   ru->regs[ru->size++] = hregX86_FAKE3();
+   ru->regs[ru->size++] = hregX86_FAKE4();
+   ru->regs[ru->size++] = hregX86_FAKE5();
+   ru->regs[ru->size++] = hregX86_XMM0();
+   ru->regs[ru->size++] = hregX86_XMM1();
+   ru->regs[ru->size++] = hregX86_XMM2();
+   ru->regs[ru->size++] = hregX86_XMM3();
+   ru->regs[ru->size++] = hregX86_XMM4();
+   ru->regs[ru->size++] = hregX86_XMM5();
+   ru->regs[ru->size++] = hregX86_XMM6();
+   ru->regs[ru->size++] = hregX86_XMM7();
+   ru->allocable = ru->size;
+   /* And other regs, not available to the allocator. */
+   ru->regs[ru->size++] = hregX86_ESP();
+   ru->regs[ru->size++] = hregX86_EBP();
+
+   rRegUniverse_X86_initted = True;
+
+   RRegUniverse__check_is_sane(ru);
+   return ru;
+}
+
+
+void ppHRegX86 ( HReg reg ) 
+{
+   Int r;
+   static const HChar* ireg32_names[8] 
+     = { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi" };
+   /* Be generic for all virtual regs. */
+   if (hregIsVirtual(reg)) {
+      ppHReg(reg);
+      return;
+   }
+   /* But specific for real regs. */
+   switch (hregClass(reg)) {
+      case HRcInt32:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 8);
+         vex_printf("%s", ireg32_names[r]);
+         return;
+      case HRcFlt64:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 6);
+         vex_printf("%%fake%d", r);
+         return;
+      case HRcVec128:
+         r = hregEncoding(reg);
+         vassert(r >= 0 && r < 8);
+         vex_printf("%%xmm%d", r);
+         return;
+      default:
+         vpanic("ppHRegX86");
+   }
+}
+
+
+/* --------- Condition codes, Intel encoding. --------- */
+
+const HChar* showX86CondCode ( X86CondCode cond )
+{
+   switch (cond) {
+      case Xcc_O:      return "o";
+      case Xcc_NO:     return "no";
+      case Xcc_B:      return "b";
+      case Xcc_NB:     return "nb";
+      case Xcc_Z:      return "z";
+      case Xcc_NZ:     return "nz";
+      case Xcc_BE:     return "be";
+      case Xcc_NBE:    return "nbe";
+      case Xcc_S:      return "s";
+      case Xcc_NS:     return "ns";
+      case Xcc_P:      return "p";
+      case Xcc_NP:     return "np";
+      case Xcc_L:      return "l";
+      case Xcc_NL:     return "nl";
+      case Xcc_LE:     return "le";
+      case Xcc_NLE:    return "nle";
+      case Xcc_ALWAYS: return "ALWAYS";
+      default: vpanic("ppX86CondCode");
+   }
+}
+
+
+/* --------- X86AMode: memory address expressions. --------- */
+
+X86AMode* X86AMode_IR ( UInt imm32, HReg reg ) {
+   X86AMode* am = LibVEX_Alloc_inline(sizeof(X86AMode));
+   am->tag = Xam_IR;
+   am->Xam.IR.imm = imm32;
+   am->Xam.IR.reg = reg;
+   return am;
+}
+X86AMode* X86AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) {
+   X86AMode* am = LibVEX_Alloc_inline(sizeof(X86AMode));
+   am->tag = Xam_IRRS;
+   am->Xam.IRRS.imm = imm32;
+   am->Xam.IRRS.base = base;
+   am->Xam.IRRS.index = indEx;
+   am->Xam.IRRS.shift = shift;
+   vassert(shift >= 0 && shift <= 3);
+   return am;
+}
+
+X86AMode* dopyX86AMode ( X86AMode* am ) {
+   switch (am->tag) {
+      case Xam_IR: 
+         return X86AMode_IR( am->Xam.IR.imm, am->Xam.IR.reg );
+      case Xam_IRRS: 
+         return X86AMode_IRRS( am->Xam.IRRS.imm, am->Xam.IRRS.base, 
+                               am->Xam.IRRS.index, am->Xam.IRRS.shift );
+      default:
+         vpanic("dopyX86AMode");
+   }
+}
+
+void ppX86AMode ( X86AMode* am ) {
+   switch (am->tag) {
+      case Xam_IR: 
+         if (am->Xam.IR.imm == 0)
+            vex_printf("(");
+         else
+            vex_printf("0x%x(", am->Xam.IR.imm);
+         ppHRegX86(am->Xam.IR.reg);
+         vex_printf(")");
+         return;
+      case Xam_IRRS:
+         vex_printf("0x%x(", am->Xam.IRRS.imm);
+         ppHRegX86(am->Xam.IRRS.base);
+         vex_printf(",");
+         ppHRegX86(am->Xam.IRRS.index);
+         vex_printf(",%d)", 1 << am->Xam.IRRS.shift);
+         return;
+      default:
+         vpanic("ppX86AMode");
+   }
+}
+
+static void addRegUsage_X86AMode ( HRegUsage* u, X86AMode* am ) {
+   switch (am->tag) {
+      case Xam_IR: 
+         addHRegUse(u, HRmRead, am->Xam.IR.reg);
+         return;
+      case Xam_IRRS:
+         addHRegUse(u, HRmRead, am->Xam.IRRS.base);
+         addHRegUse(u, HRmRead, am->Xam.IRRS.index);
+         return;
+      default:
+         vpanic("addRegUsage_X86AMode");
+   }
+}
+
+static void mapRegs_X86AMode ( HRegRemap* m, X86AMode* am ) {
+   switch (am->tag) {
+      case Xam_IR: 
+         am->Xam.IR.reg = lookupHRegRemap(m, am->Xam.IR.reg);
+         return;
+      case Xam_IRRS:
+         am->Xam.IRRS.base = lookupHRegRemap(m, am->Xam.IRRS.base);
+         am->Xam.IRRS.index = lookupHRegRemap(m, am->Xam.IRRS.index);
+         return;
+      default:
+         vpanic("mapRegs_X86AMode");
+   }
+}
+
+/* --------- Operand, which can be reg, immediate or memory. --------- */
+
+X86RMI* X86RMI_Imm ( UInt imm32 ) {
+   X86RMI* op         = LibVEX_Alloc_inline(sizeof(X86RMI));
+   op->tag            = Xrmi_Imm;
+   op->Xrmi.Imm.imm32 = imm32;
+   return op;
+}
+X86RMI* X86RMI_Reg ( HReg reg ) {
+   X86RMI* op       = LibVEX_Alloc_inline(sizeof(X86RMI));
+   op->tag          = Xrmi_Reg;
+   op->Xrmi.Reg.reg = reg;
+   return op;
+}
+X86RMI* X86RMI_Mem ( X86AMode* am ) {
+   X86RMI* op      = LibVEX_Alloc_inline(sizeof(X86RMI));
+   op->tag         = Xrmi_Mem;
+   op->Xrmi.Mem.am = am;
+   return op;
+}
+
+void ppX86RMI ( X86RMI* op ) {
+   switch (op->tag) {
+      case Xrmi_Imm: 
+         vex_printf("$0x%x", op->Xrmi.Imm.imm32);
+         return;
+      case Xrmi_Reg: 
+         ppHRegX86(op->Xrmi.Reg.reg);
+         return;
+      case Xrmi_Mem: 
+         ppX86AMode(op->Xrmi.Mem.am);
+         return;
+     default: 
+         vpanic("ppX86RMI");
+   }
+}
+
+/* An X86RMI can only be used in a "read" context (what would it mean
+   to write or modify a literal?) and so we enumerate its registers
+   accordingly. */
+static void addRegUsage_X86RMI ( HRegUsage* u, X86RMI* op ) {
+   switch (op->tag) {
+      case Xrmi_Imm: 
+         return;
+      case Xrmi_Reg: 
+         addHRegUse(u, HRmRead, op->Xrmi.Reg.reg);
+         return;
+      case Xrmi_Mem: 
+         addRegUsage_X86AMode(u, op->Xrmi.Mem.am);
+         return;
+      default: 
+         vpanic("addRegUsage_X86RMI");
+   }
+}
+
+static void mapRegs_X86RMI ( HRegRemap* m, X86RMI* op ) {
+   switch (op->tag) {
+      case Xrmi_Imm: 
+         return;
+      case Xrmi_Reg: 
+         op->Xrmi.Reg.reg = lookupHRegRemap(m, op->Xrmi.Reg.reg);
+         return;
+      case Xrmi_Mem: 
+         mapRegs_X86AMode(m, op->Xrmi.Mem.am);
+         return;
+      default: 
+         vpanic("mapRegs_X86RMI");
+   }
+}
+
+
+/* --------- Operand, which can be reg or immediate only. --------- */
+
+X86RI* X86RI_Imm ( UInt imm32 ) {
+   X86RI* op         = LibVEX_Alloc_inline(sizeof(X86RI));
+   op->tag           = Xri_Imm;
+   op->Xri.Imm.imm32 = imm32;
+   return op;
+}
+X86RI* X86RI_Reg ( HReg reg ) {
+   X86RI* op       = LibVEX_Alloc_inline(sizeof(X86RI));
+   op->tag         = Xri_Reg;
+   op->Xri.Reg.reg = reg;
+   return op;
+}
+
+void ppX86RI ( X86RI* op ) {
+   switch (op->tag) {
+      case Xri_Imm: 
+         vex_printf("$0x%x", op->Xri.Imm.imm32);
+         return;
+      case Xri_Reg: 
+         ppHRegX86(op->Xri.Reg.reg);
+         return;
+     default: 
+         vpanic("ppX86RI");
+   }
+}
+
+/* An X86RI can only be used in a "read" context (what would it mean
+   to write or modify a literal?) and so we enumerate its registers
+   accordingly. */
+static void addRegUsage_X86RI ( HRegUsage* u, X86RI* op ) {
+   switch (op->tag) {
+      case Xri_Imm: 
+         return;
+      case Xri_Reg: 
+         addHRegUse(u, HRmRead, op->Xri.Reg.reg);
+         return;
+      default: 
+         vpanic("addRegUsage_X86RI");
+   }
+}
+
+static void mapRegs_X86RI ( HRegRemap* m, X86RI* op ) {
+   switch (op->tag) {
+      case Xri_Imm: 
+         return;
+      case Xri_Reg: 
+         op->Xri.Reg.reg = lookupHRegRemap(m, op->Xri.Reg.reg);
+         return;
+      default: 
+         vpanic("mapRegs_X86RI");
+   }
+}
+
+
+/* --------- Operand, which can be reg or memory only. --------- */
+
+X86RM* X86RM_Reg ( HReg reg ) {
+   X86RM* op       = LibVEX_Alloc_inline(sizeof(X86RM));
+   op->tag         = Xrm_Reg;
+   op->Xrm.Reg.reg = reg;
+   return op;
+}
+X86RM* X86RM_Mem ( X86AMode* am ) {
+   X86RM* op      = LibVEX_Alloc_inline(sizeof(X86RM));
+   op->tag        = Xrm_Mem;
+   op->Xrm.Mem.am = am;
+   return op;
+}
+
+void ppX86RM ( X86RM* op ) {
+   switch (op->tag) {
+      case Xrm_Mem: 
+         ppX86AMode(op->Xrm.Mem.am);
+         return;
+      case Xrm_Reg: 
+         ppHRegX86(op->Xrm.Reg.reg);
+         return;
+     default: 
+         vpanic("ppX86RM");
+   }
+}
+
+/* Because an X86RM can be both a source or destination operand, we
+   have to supply a mode -- pertaining to the operand as a whole --
+   indicating how it's being used. */
+static void addRegUsage_X86RM ( HRegUsage* u, X86RM* op, HRegMode mode ) {
+   switch (op->tag) {
+      case Xrm_Mem: 
+         /* Memory is read, written or modified.  So we just want to
+            know the regs read by the amode. */
+         addRegUsage_X86AMode(u, op->Xrm.Mem.am);
+         return;
+      case Xrm_Reg: 
+         /* reg is read, written or modified.  Add it in the
+            appropriate way. */
+         addHRegUse(u, mode, op->Xrm.Reg.reg);
+         return;
+     default: 
+         vpanic("addRegUsage_X86RM");
+   }
+}
+
+static void mapRegs_X86RM ( HRegRemap* m, X86RM* op )
+{
+   switch (op->tag) {
+      case Xrm_Mem: 
+         mapRegs_X86AMode(m, op->Xrm.Mem.am);
+         return;
+      case Xrm_Reg: 
+         op->Xrm.Reg.reg = lookupHRegRemap(m, op->Xrm.Reg.reg);
+         return;
+     default: 
+         vpanic("mapRegs_X86RM");
+   }
+}
+
+
+/* --------- Instructions. --------- */
+
+const HChar* showX86UnaryOp ( X86UnaryOp op ) {
+   switch (op) {
+      case Xun_NOT: return "not";
+      case Xun_NEG: return "neg";
+      default: vpanic("showX86UnaryOp");
+   }
+}
+
+const HChar* showX86AluOp ( X86AluOp op ) {
+   switch (op) {
+      case Xalu_MOV:  return "mov";
+      case Xalu_CMP:  return "cmp";
+      case Xalu_ADD:  return "add";
+      case Xalu_SUB:  return "sub";
+      case Xalu_ADC:  return "adc";
+      case Xalu_SBB:  return "sbb";
+      case Xalu_AND:  return "and";
+      case Xalu_OR:   return "or";
+      case Xalu_XOR:  return "xor";
+      case Xalu_MUL:  return "mul";
+      default: vpanic("showX86AluOp");
+   }
+}
+
+const HChar* showX86ShiftOp ( X86ShiftOp op ) {
+   switch (op) {
+      case Xsh_SHL: return "shl";
+      case Xsh_SHR: return "shr";
+      case Xsh_SAR: return "sar";
+      default: vpanic("showX86ShiftOp");
+   }
+}
+
+const HChar* showX86FpOp ( X86FpOp op ) {
+   switch (op) {
+      case Xfp_ADD:    return "add";
+      case Xfp_SUB:    return "sub";
+      case Xfp_MUL:    return "mul";
+      case Xfp_DIV:    return "div";
+      case Xfp_SCALE:  return "scale";
+      case Xfp_ATAN:   return "atan";
+      case Xfp_YL2X:   return "yl2x";
+      case Xfp_YL2XP1: return "yl2xp1";
+      case Xfp_PREM:   return "prem";
+      case Xfp_PREM1:  return "prem1";
+      case Xfp_SQRT:   return "sqrt";
+      case Xfp_ABS:    return "abs";
+      case Xfp_NEG:    return "chs";
+      case Xfp_MOV:    return "mov";
+      case Xfp_SIN:    return "sin";
+      case Xfp_COS:    return "cos";
+      case Xfp_TAN:    return "tan";
+      case Xfp_ROUND:  return "round";
+      case Xfp_2XM1:   return "2xm1";
+      default: vpanic("showX86FpOp");
+   }
+}
+
+const HChar* showX86SseOp ( X86SseOp op ) {
+   switch (op) {
+      case Xsse_MOV:      return "mov(?!)";
+      case Xsse_ADDF:     return "add";
+      case Xsse_SUBF:     return "sub";
+      case Xsse_MULF:     return "mul";
+      case Xsse_DIVF:     return "div";
+      case Xsse_MAXF:     return "max";
+      case Xsse_MINF:     return "min";
+      case Xsse_CMPEQF:   return "cmpFeq";
+      case Xsse_CMPLTF:   return "cmpFlt";
+      case Xsse_CMPLEF:   return "cmpFle";
+      case Xsse_CMPUNF:   return "cmpFun";
+      case Xsse_RCPF:     return "rcp";
+      case Xsse_RSQRTF:   return "rsqrt";
+      case Xsse_SQRTF:    return "sqrt";
+      case Xsse_AND:      return "and";
+      case Xsse_OR:       return "or";
+      case Xsse_XOR:      return "xor";
+      case Xsse_ANDN:     return "andn";
+      case Xsse_ADD8:     return "paddb";
+      case Xsse_ADD16:    return "paddw";
+      case Xsse_ADD32:    return "paddd";
+      case Xsse_ADD64:    return "paddq";
+      case Xsse_QADD8U:   return "paddusb";
+      case Xsse_QADD16U:  return "paddusw";
+      case Xsse_QADD8S:   return "paddsb";
+      case Xsse_QADD16S:  return "paddsw";
+      case Xsse_SUB8:     return "psubb";
+      case Xsse_SUB16:    return "psubw";
+      case Xsse_SUB32:    return "psubd";
+      case Xsse_SUB64:    return "psubq";
+      case Xsse_QSUB8U:   return "psubusb";
+      case Xsse_QSUB16U:  return "psubusw";
+      case Xsse_QSUB8S:   return "psubsb";
+      case Xsse_QSUB16S:  return "psubsw";
+      case Xsse_MUL16:    return "pmullw";
+      case Xsse_MULHI16U: return "pmulhuw";
+      case Xsse_MULHI16S: return "pmulhw";
+      case Xsse_AVG8U:    return "pavgb";
+      case Xsse_AVG16U:   return "pavgw";
+      case Xsse_MAX16S:   return "pmaxw";
+      case Xsse_MAX8U:    return "pmaxub";
+      case Xsse_MIN16S:   return "pminw";
+      case Xsse_MIN8U:    return "pminub";
+      case Xsse_CMPEQ8:   return "pcmpeqb";
+      case Xsse_CMPEQ16:  return "pcmpeqw";
+      case Xsse_CMPEQ32:  return "pcmpeqd";
+      case Xsse_CMPGT8S:  return "pcmpgtb";
+      case Xsse_CMPGT16S: return "pcmpgtw";
+      case Xsse_CMPGT32S: return "pcmpgtd";
+      case Xsse_SHL16:    return "psllw";
+      case Xsse_SHL32:    return "pslld";
+      case Xsse_SHL64:    return "psllq";
+      case Xsse_SHR16:    return "psrlw";
+      case Xsse_SHR32:    return "psrld";
+      case Xsse_SHR64:    return "psrlq";
+      case Xsse_SAR16:    return "psraw";
+      case Xsse_SAR32:    return "psrad";
+      case Xsse_PACKSSD:  return "packssdw";
+      case Xsse_PACKSSW:  return "packsswb";
+      case Xsse_PACKUSW:  return "packuswb";
+      case Xsse_UNPCKHB:  return "punpckhb";
+      case Xsse_UNPCKHW:  return "punpckhw";
+      case Xsse_UNPCKHD:  return "punpckhd";
+      case Xsse_UNPCKHQ:  return "punpckhq";
+      case Xsse_UNPCKLB:  return "punpcklb";
+      case Xsse_UNPCKLW:  return "punpcklw";
+      case Xsse_UNPCKLD:  return "punpckld";
+      case Xsse_UNPCKLQ:  return "punpcklq";
+      default: vpanic("showX86SseOp");
+   }
+}
+
+X86Instr* X86Instr_Alu32R ( X86AluOp op, X86RMI* src, HReg dst ) {
+   X86Instr* i       = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag            = Xin_Alu32R;
+   i->Xin.Alu32R.op  = op;
+   i->Xin.Alu32R.src = src;
+   i->Xin.Alu32R.dst = dst;
+   return i;
+}
+X86Instr* X86Instr_Alu32M ( X86AluOp op, X86RI* src, X86AMode* dst ) {
+   X86Instr* i       = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag            = Xin_Alu32M;
+   i->Xin.Alu32M.op  = op;
+   i->Xin.Alu32M.src = src;
+   i->Xin.Alu32M.dst = dst;
+   vassert(op != Xalu_MUL);
+   return i;
+}
+X86Instr* X86Instr_Sh32 ( X86ShiftOp op, UInt src, HReg dst ) {
+   X86Instr* i     = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag          = Xin_Sh32;
+   i->Xin.Sh32.op  = op;
+   i->Xin.Sh32.src = src;
+   i->Xin.Sh32.dst = dst;
+   return i;
+}
+X86Instr* X86Instr_Test32 ( UInt imm32, X86RM* dst ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_Test32;
+   i->Xin.Test32.imm32 = imm32;
+   i->Xin.Test32.dst   = dst;
+   return i;
+}
+X86Instr* X86Instr_Unary32 ( X86UnaryOp op, HReg dst ) {
+   X86Instr* i        = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag             = Xin_Unary32;
+   i->Xin.Unary32.op  = op;
+   i->Xin.Unary32.dst = dst;
+   return i;
+}
+X86Instr* X86Instr_Lea32 ( X86AMode* am, HReg dst ) {
+   X86Instr* i        = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag             = Xin_Lea32;
+   i->Xin.Lea32.am    = am;
+   i->Xin.Lea32.dst   = dst;
+   return i;
+}
+X86Instr* X86Instr_MulL ( Bool syned, X86RM* src ) {
+   X86Instr* i        = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag             = Xin_MulL;
+   i->Xin.MulL.syned  = syned;
+   i->Xin.MulL.src    = src;
+   return i;
+}
+X86Instr* X86Instr_Div ( Bool syned, X86RM* src ) {
+   X86Instr* i      = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag           = Xin_Div;
+   i->Xin.Div.syned = syned;
+   i->Xin.Div.src   = src;
+   return i;
+}
+X86Instr* X86Instr_Sh3232  ( X86ShiftOp op, UInt amt, HReg src, HReg dst ) {
+   X86Instr* i       = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag            = Xin_Sh3232;
+   i->Xin.Sh3232.op  = op;
+   i->Xin.Sh3232.amt = amt;
+   i->Xin.Sh3232.src = src;
+   i->Xin.Sh3232.dst = dst;
+   vassert(op == Xsh_SHL || op == Xsh_SHR);
+   return i;
+}
+X86Instr* X86Instr_Push( X86RMI* src ) {
+   X86Instr* i     = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag          = Xin_Push;
+   i->Xin.Push.src = src;
+   return i;
+}
+X86Instr* X86Instr_Call ( X86CondCode cond, Addr32 target, Int regparms,
+                          RetLoc rloc ) {
+   X86Instr* i          = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag               = Xin_Call;
+   i->Xin.Call.cond     = cond;
+   i->Xin.Call.target   = target;
+   i->Xin.Call.regparms = regparms;
+   i->Xin.Call.rloc     = rloc;
+   vassert(regparms >= 0 && regparms <= 3);
+   vassert(is_sane_RetLoc(rloc));
+   return i;
+}
+X86Instr* X86Instr_XDirect ( Addr32 dstGA, X86AMode* amEIP,
+                             X86CondCode cond, Bool toFastEP ) {
+   X86Instr* i             = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                  = Xin_XDirect;
+   i->Xin.XDirect.dstGA    = dstGA;
+   i->Xin.XDirect.amEIP    = amEIP;
+   i->Xin.XDirect.cond     = cond;
+   i->Xin.XDirect.toFastEP = toFastEP;
+   return i;
+}
+X86Instr* X86Instr_XIndir ( HReg dstGA, X86AMode* amEIP,
+                            X86CondCode cond ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_XIndir;
+   i->Xin.XIndir.dstGA = dstGA;
+   i->Xin.XIndir.amEIP = amEIP;
+   i->Xin.XIndir.cond  = cond;
+   return i;
+}
+X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP,
+                               X86CondCode cond, IRJumpKind jk ) {
+   X86Instr* i            = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                 = Xin_XAssisted;
+   i->Xin.XAssisted.dstGA = dstGA;
+   i->Xin.XAssisted.amEIP = amEIP;
+   i->Xin.XAssisted.cond  = cond;
+   i->Xin.XAssisted.jk    = jk;
+   return i;
+}
+X86Instr* X86Instr_CMov32  ( X86CondCode cond, X86RM* src, HReg dst ) {
+   X86Instr* i        = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag             = Xin_CMov32;
+   i->Xin.CMov32.cond = cond;
+   i->Xin.CMov32.src  = src;
+   i->Xin.CMov32.dst  = dst;
+   vassert(cond != Xcc_ALWAYS);
+   return i;
+}
+X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned,
+                            X86AMode* src, HReg dst ) {
+   X86Instr* i           = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                = Xin_LoadEX;
+   i->Xin.LoadEX.szSmall = szSmall;
+   i->Xin.LoadEX.syned   = syned;
+   i->Xin.LoadEX.src     = src;
+   i->Xin.LoadEX.dst     = dst;
+   vassert(szSmall == 1 || szSmall == 2);
+   return i;
+}
+X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst ) {
+   X86Instr* i      = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag           = Xin_Store;
+   i->Xin.Store.sz  = sz;
+   i->Xin.Store.src = src;
+   i->Xin.Store.dst = dst;
+   vassert(sz == 1 || sz == 2);
+   return i;
+}
+X86Instr* X86Instr_Set32 ( X86CondCode cond, HReg dst ) {
+   X86Instr* i       = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag            = Xin_Set32;
+   i->Xin.Set32.cond = cond;
+   i->Xin.Set32.dst  = dst;
+   return i;
+}
+X86Instr* X86Instr_Bsfr32 ( Bool isFwds, HReg src, HReg dst ) {
+   X86Instr* i          = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag               = Xin_Bsfr32;
+   i->Xin.Bsfr32.isFwds = isFwds;
+   i->Xin.Bsfr32.src    = src;
+   i->Xin.Bsfr32.dst    = dst;
+   return i;
+}
+X86Instr* X86Instr_MFence ( UInt hwcaps ) {
+   X86Instr* i          = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag               = Xin_MFence;
+   i->Xin.MFence.hwcaps = hwcaps;
+   vassert(0 == (hwcaps & ~(VEX_HWCAPS_X86_MMXEXT
+                            |VEX_HWCAPS_X86_SSE1
+                            |VEX_HWCAPS_X86_SSE2
+                            |VEX_HWCAPS_X86_SSE3
+                            |VEX_HWCAPS_X86_LZCNT)));
+   return i;
+}
+X86Instr* X86Instr_ACAS ( X86AMode* addr, UChar sz ) {
+   X86Instr* i      = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag           = Xin_ACAS;
+   i->Xin.ACAS.addr = addr;
+   i->Xin.ACAS.sz   = sz;
+   vassert(sz == 4 || sz == 2 || sz == 1);
+   return i;
+}
+X86Instr* X86Instr_DACAS ( X86AMode* addr ) {
+   X86Instr* i       = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag            = Xin_DACAS;
+   i->Xin.DACAS.addr = addr;
+   return i;
+}
+
+X86Instr* X86Instr_FpUnary ( X86FpOp op, HReg src, HReg dst ) {
+   X86Instr* i        = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag             = Xin_FpUnary;
+   i->Xin.FpUnary.op  = op;
+   i->Xin.FpUnary.src = src;
+   i->Xin.FpUnary.dst = dst;
+   return i;
+}
+X86Instr* X86Instr_FpBinary ( X86FpOp op, HReg srcL, HReg srcR, HReg dst ) {
+   X86Instr* i          = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag               = Xin_FpBinary;
+   i->Xin.FpBinary.op   = op;
+   i->Xin.FpBinary.srcL = srcL;
+   i->Xin.FpBinary.srcR = srcR;
+   i->Xin.FpBinary.dst  = dst;
+   return i;
+}
+X86Instr* X86Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, X86AMode* addr ) {
+   X86Instr* i          = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag               = Xin_FpLdSt;
+   i->Xin.FpLdSt.isLoad = isLoad;
+   i->Xin.FpLdSt.sz     = sz;
+   i->Xin.FpLdSt.reg    = reg;
+   i->Xin.FpLdSt.addr   = addr;
+   vassert(sz == 4 || sz == 8 || sz == 10);
+   return i;
+}
+X86Instr* X86Instr_FpLdStI ( Bool isLoad, UChar sz,  
+                             HReg reg, X86AMode* addr ) {
+   X86Instr* i           = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                = Xin_FpLdStI;
+   i->Xin.FpLdStI.isLoad = isLoad;
+   i->Xin.FpLdStI.sz     = sz;
+   i->Xin.FpLdStI.reg    = reg;
+   i->Xin.FpLdStI.addr   = addr;
+   vassert(sz == 2 || sz == 4 || sz == 8);
+   return i;
+}
+X86Instr* X86Instr_Fp64to32 ( HReg src, HReg dst ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_Fp64to32;
+   i->Xin.Fp64to32.src = src;
+   i->Xin.Fp64to32.dst = dst;
+   return i;
+}
+X86Instr* X86Instr_FpCMov ( X86CondCode cond, HReg src, HReg dst ) {
+   X86Instr* i        = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag             = Xin_FpCMov;
+   i->Xin.FpCMov.cond = cond;
+   i->Xin.FpCMov.src  = src;
+   i->Xin.FpCMov.dst  = dst;
+   vassert(cond != Xcc_ALWAYS);
+   return i;
+}
+X86Instr* X86Instr_FpLdCW ( X86AMode* addr ) {
+   X86Instr* i          = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag               = Xin_FpLdCW;
+   i->Xin.FpLdCW.addr   = addr;
+   return i;
+}
+X86Instr* X86Instr_FpStSW_AX ( void ) {
+   X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag      = Xin_FpStSW_AX;
+   return i;
+}
+X86Instr* X86Instr_FpCmp ( HReg srcL, HReg srcR, HReg dst ) {
+   X86Instr* i       = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag            = Xin_FpCmp;
+   i->Xin.FpCmp.srcL = srcL;
+   i->Xin.FpCmp.srcR = srcR;
+   i->Xin.FpCmp.dst  = dst;
+   return i;
+}
+X86Instr* X86Instr_SseConst ( UShort con, HReg dst ) {
+   X86Instr* i            = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                 = Xin_SseConst;
+   i->Xin.SseConst.con    = con;
+   i->Xin.SseConst.dst    = dst;
+   vassert(hregClass(dst) == HRcVec128);
+   return i;
+}
+X86Instr* X86Instr_SseLdSt ( Bool isLoad, HReg reg, X86AMode* addr ) {
+   X86Instr* i           = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                = Xin_SseLdSt;
+   i->Xin.SseLdSt.isLoad = isLoad;
+   i->Xin.SseLdSt.reg    = reg;
+   i->Xin.SseLdSt.addr   = addr;
+   return i;
+}
+X86Instr* X86Instr_SseLdzLO  ( Int sz, HReg reg, X86AMode* addr )
+{
+   X86Instr* i           = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                = Xin_SseLdzLO;
+   i->Xin.SseLdzLO.sz    = toUChar(sz);
+   i->Xin.SseLdzLO.reg   = reg;
+   i->Xin.SseLdzLO.addr  = addr;
+   vassert(sz == 4 || sz == 8);
+   return i;
+}
+X86Instr* X86Instr_Sse32Fx4 ( X86SseOp op, HReg src, HReg dst ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_Sse32Fx4;
+   i->Xin.Sse32Fx4.op  = op;
+   i->Xin.Sse32Fx4.src = src;
+   i->Xin.Sse32Fx4.dst = dst;
+   vassert(op != Xsse_MOV);
+   return i;
+}
+X86Instr* X86Instr_Sse32FLo ( X86SseOp op, HReg src, HReg dst ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_Sse32FLo;
+   i->Xin.Sse32FLo.op  = op;
+   i->Xin.Sse32FLo.src = src;
+   i->Xin.Sse32FLo.dst = dst;
+   vassert(op != Xsse_MOV);
+   return i;
+}
+X86Instr* X86Instr_Sse64Fx2 ( X86SseOp op, HReg src, HReg dst ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_Sse64Fx2;
+   i->Xin.Sse64Fx2.op  = op;
+   i->Xin.Sse64Fx2.src = src;
+   i->Xin.Sse64Fx2.dst = dst;
+   vassert(op != Xsse_MOV);
+   return i;
+}
+X86Instr* X86Instr_Sse64FLo ( X86SseOp op, HReg src, HReg dst ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_Sse64FLo;
+   i->Xin.Sse64FLo.op  = op;
+   i->Xin.Sse64FLo.src = src;
+   i->Xin.Sse64FLo.dst = dst;
+   vassert(op != Xsse_MOV);
+   return i;
+}
+X86Instr* X86Instr_SseReRg ( X86SseOp op, HReg re, HReg rg ) {
+   X86Instr* i        = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag             = Xin_SseReRg;
+   i->Xin.SseReRg.op  = op;
+   i->Xin.SseReRg.src = re;
+   i->Xin.SseReRg.dst = rg;
+   return i;
+}
+X86Instr* X86Instr_SseCMov ( X86CondCode cond, HReg src, HReg dst ) {
+   X86Instr* i         = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag              = Xin_SseCMov;
+   i->Xin.SseCMov.cond = cond;
+   i->Xin.SseCMov.src  = src;
+   i->Xin.SseCMov.dst  = dst;
+   vassert(cond != Xcc_ALWAYS);
+   return i;
+}
+X86Instr* X86Instr_SseShuf ( Int order, HReg src, HReg dst ) {
+   X86Instr* i          = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag               = Xin_SseShuf;
+   i->Xin.SseShuf.order = order;
+   i->Xin.SseShuf.src   = src;
+   i->Xin.SseShuf.dst   = dst;
+   vassert(order >= 0 && order <= 0xFF);
+   return i;
+}
+X86Instr* X86Instr_EvCheck ( X86AMode* amCounter,
+                             X86AMode* amFailAddr ) {
+   X86Instr* i               = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag                    = Xin_EvCheck;
+   i->Xin.EvCheck.amCounter  = amCounter;
+   i->Xin.EvCheck.amFailAddr = amFailAddr;
+   return i;
+}
+X86Instr* X86Instr_ProfInc ( void ) {
+   X86Instr* i = LibVEX_Alloc_inline(sizeof(X86Instr));
+   i->tag      = Xin_ProfInc;
+   return i;
+}
+
+void ppX86Instr ( const X86Instr* i, Bool mode64 ) {
+   vassert(mode64 == False);
+   switch (i->tag) {
+      case Xin_Alu32R:
+         vex_printf("%sl ", showX86AluOp(i->Xin.Alu32R.op));
+         ppX86RMI(i->Xin.Alu32R.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Alu32R.dst);
+         return;
+      case Xin_Alu32M:
+         vex_printf("%sl ", showX86AluOp(i->Xin.Alu32M.op));
+         ppX86RI(i->Xin.Alu32M.src);
+         vex_printf(",");
+         ppX86AMode(i->Xin.Alu32M.dst);
+         return;
+      case Xin_Sh32:
+         vex_printf("%sl ", showX86ShiftOp(i->Xin.Sh32.op));
+         if (i->Xin.Sh32.src == 0)
+           vex_printf("%%cl,"); 
+         else 
+            vex_printf("$%d,", (Int)i->Xin.Sh32.src);
+         ppHRegX86(i->Xin.Sh32.dst);
+         return;
+      case Xin_Test32:
+         vex_printf("testl $%d,", (Int)i->Xin.Test32.imm32);
+         ppX86RM(i->Xin.Test32.dst);
+         return;
+      case Xin_Unary32:
+         vex_printf("%sl ", showX86UnaryOp(i->Xin.Unary32.op));
+         ppHRegX86(i->Xin.Unary32.dst);
+         return;
+      case Xin_Lea32:
+         vex_printf("leal ");
+         ppX86AMode(i->Xin.Lea32.am);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Lea32.dst);
+         return;
+      case Xin_MulL:
+         vex_printf("%cmull ", i->Xin.MulL.syned ? 's' : 'u');
+         ppX86RM(i->Xin.MulL.src);
+         return;
+      case Xin_Div:
+         vex_printf("%cdivl ", i->Xin.Div.syned ? 's' : 'u');
+         ppX86RM(i->Xin.Div.src);
+         return;
+      case Xin_Sh3232:
+         vex_printf("%sdl ", showX86ShiftOp(i->Xin.Sh3232.op));
+         if (i->Xin.Sh3232.amt == 0)
+           vex_printf(" %%cl,"); 
+         else 
+            vex_printf(" $%d,", (Int)i->Xin.Sh3232.amt);
+         ppHRegX86(i->Xin.Sh3232.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Sh3232.dst);
+         return;
+      case Xin_Push:
+         vex_printf("pushl ");
+         ppX86RMI(i->Xin.Push.src);
+         return;
+      case Xin_Call:
+         vex_printf("call%s[%d,", 
+                    i->Xin.Call.cond==Xcc_ALWAYS 
+                       ? "" : showX86CondCode(i->Xin.Call.cond), 
+                    i->Xin.Call.regparms);
+         ppRetLoc(i->Xin.Call.rloc);
+         vex_printf("] 0x%x", i->Xin.Call.target);
+         break;
+      case Xin_XDirect:
+         vex_printf("(xDirect) ");
+         vex_printf("if (%%eflags.%s) { ",
+                    showX86CondCode(i->Xin.XDirect.cond));
+         vex_printf("movl $0x%x,", i->Xin.XDirect.dstGA);
+         ppX86AMode(i->Xin.XDirect.amEIP);
+         vex_printf("; ");
+         vex_printf("movl $disp_cp_chain_me_to_%sEP,%%edx; call *%%edx }",
+                    i->Xin.XDirect.toFastEP ? "fast" : "slow");
+         return;
+      case Xin_XIndir:
+         vex_printf("(xIndir) ");
+         vex_printf("if (%%eflags.%s) { movl ",
+                    showX86CondCode(i->Xin.XIndir.cond));
+         ppHRegX86(i->Xin.XIndir.dstGA);
+         vex_printf(",");
+         ppX86AMode(i->Xin.XIndir.amEIP);
+         vex_printf("; movl $disp_indir,%%edx; jmp *%%edx }");
+         return;
+      case Xin_XAssisted:
+         vex_printf("(xAssisted) ");
+         vex_printf("if (%%eflags.%s) { ",
+                    showX86CondCode(i->Xin.XAssisted.cond));
+         vex_printf("movl ");
+         ppHRegX86(i->Xin.XAssisted.dstGA);
+         vex_printf(",");
+         ppX86AMode(i->Xin.XAssisted.amEIP);
+         vex_printf("; movl $IRJumpKind_to_TRCVAL(%d),%%ebp",
+                    (Int)i->Xin.XAssisted.jk);
+         vex_printf("; movl $disp_assisted,%%edx; jmp *%%edx }");
+         return;
+      case Xin_CMov32:
+         vex_printf("cmov%s ", showX86CondCode(i->Xin.CMov32.cond));
+         ppX86RM(i->Xin.CMov32.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.CMov32.dst);
+         return;
+      case Xin_LoadEX:
+         vex_printf("mov%c%cl ",
+                    i->Xin.LoadEX.syned ? 's' : 'z',
+                    i->Xin.LoadEX.szSmall==1 ? 'b' : 'w');
+         ppX86AMode(i->Xin.LoadEX.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.LoadEX.dst);
+         return;
+      case Xin_Store:
+         vex_printf("mov%c ", i->Xin.Store.sz==1 ? 'b' : 'w');
+         ppHRegX86(i->Xin.Store.src);
+         vex_printf(",");
+         ppX86AMode(i->Xin.Store.dst);
+         return;
+      case Xin_Set32:
+         vex_printf("setl%s ", showX86CondCode(i->Xin.Set32.cond));
+         ppHRegX86(i->Xin.Set32.dst);
+         return;
+      case Xin_Bsfr32:
+         vex_printf("bs%cl ", i->Xin.Bsfr32.isFwds ? 'f' : 'r');
+         ppHRegX86(i->Xin.Bsfr32.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Bsfr32.dst);
+         return;
+      case Xin_MFence:
+         vex_printf("mfence(%s)",
+                    LibVEX_ppVexHwCaps(VexArchX86,i->Xin.MFence.hwcaps));
+         return;
+      case Xin_ACAS:
+         vex_printf("lock cmpxchg%c ",
+                     i->Xin.ACAS.sz==1 ? 'b' 
+                                       : i->Xin.ACAS.sz==2 ? 'w' : 'l');
+         vex_printf("{%%eax->%%ebx},");
+         ppX86AMode(i->Xin.ACAS.addr);
+         return;
+      case Xin_DACAS:
+         vex_printf("lock cmpxchg8b {%%edx:%%eax->%%ecx:%%ebx},");
+         ppX86AMode(i->Xin.DACAS.addr);
+         return;
+      case Xin_FpUnary:
+         vex_printf("g%sD ", showX86FpOp(i->Xin.FpUnary.op));
+         ppHRegX86(i->Xin.FpUnary.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.FpUnary.dst);
+         break;
+      case Xin_FpBinary:
+         vex_printf("g%sD ", showX86FpOp(i->Xin.FpBinary.op));
+         ppHRegX86(i->Xin.FpBinary.srcL);
+         vex_printf(",");
+         ppHRegX86(i->Xin.FpBinary.srcR);
+         vex_printf(",");
+         ppHRegX86(i->Xin.FpBinary.dst);
+         break;
+      case Xin_FpLdSt:
+         if (i->Xin.FpLdSt.isLoad) {
+            vex_printf("gld%c " ,  i->Xin.FpLdSt.sz==10 ? 'T'
+                                   : (i->Xin.FpLdSt.sz==8 ? 'D' : 'F'));
+            ppX86AMode(i->Xin.FpLdSt.addr);
+            vex_printf(", ");
+            ppHRegX86(i->Xin.FpLdSt.reg);
+         } else {
+            vex_printf("gst%c " , i->Xin.FpLdSt.sz==10 ? 'T'
+                                  : (i->Xin.FpLdSt.sz==8 ? 'D' : 'F'));
+            ppHRegX86(i->Xin.FpLdSt.reg);
+            vex_printf(", ");
+            ppX86AMode(i->Xin.FpLdSt.addr);
+         }
+         return;
+      case Xin_FpLdStI:
+         if (i->Xin.FpLdStI.isLoad) {
+            vex_printf("gild%s ", i->Xin.FpLdStI.sz==8 ? "ll" : 
+                                  i->Xin.FpLdStI.sz==4 ? "l" : "w");
+            ppX86AMode(i->Xin.FpLdStI.addr);
+            vex_printf(", ");
+            ppHRegX86(i->Xin.FpLdStI.reg);
+         } else {
+            vex_printf("gist%s ", i->Xin.FpLdStI.sz==8 ? "ll" : 
+                                  i->Xin.FpLdStI.sz==4 ? "l" : "w");
+            ppHRegX86(i->Xin.FpLdStI.reg);
+            vex_printf(", ");
+            ppX86AMode(i->Xin.FpLdStI.addr);
+         }
+         return;
+      case Xin_Fp64to32:
+         vex_printf("gdtof ");
+         ppHRegX86(i->Xin.Fp64to32.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Fp64to32.dst);
+         return;
+      case Xin_FpCMov:
+         vex_printf("gcmov%s ", showX86CondCode(i->Xin.FpCMov.cond));
+         ppHRegX86(i->Xin.FpCMov.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.FpCMov.dst);
+         return;
+      case Xin_FpLdCW:
+         vex_printf("fldcw ");
+         ppX86AMode(i->Xin.FpLdCW.addr);
+         return;
+      case Xin_FpStSW_AX:
+         vex_printf("fstsw %%ax");
+         return;
+      case Xin_FpCmp:
+         vex_printf("gcmp ");
+         ppHRegX86(i->Xin.FpCmp.srcL);
+         vex_printf(",");
+         ppHRegX86(i->Xin.FpCmp.srcR);
+         vex_printf(",");
+         ppHRegX86(i->Xin.FpCmp.dst);
+         break;
+      case Xin_SseConst:
+         vex_printf("const $0x%04x,", (Int)i->Xin.SseConst.con);
+         ppHRegX86(i->Xin.SseConst.dst);
+         break;
+      case Xin_SseLdSt:
+         vex_printf("movups ");
+         if (i->Xin.SseLdSt.isLoad) {
+            ppX86AMode(i->Xin.SseLdSt.addr);
+            vex_printf(",");
+            ppHRegX86(i->Xin.SseLdSt.reg);
+         } else {
+            ppHRegX86(i->Xin.SseLdSt.reg);
+            vex_printf(",");
+            ppX86AMode(i->Xin.SseLdSt.addr);
+         }
+         return;
+      case Xin_SseLdzLO:
+         vex_printf("movs%s ", i->Xin.SseLdzLO.sz==4 ? "s" : "d");
+         ppX86AMode(i->Xin.SseLdzLO.addr);
+         vex_printf(",");
+         ppHRegX86(i->Xin.SseLdzLO.reg);
+         return;
+      case Xin_Sse32Fx4:
+         vex_printf("%sps ", showX86SseOp(i->Xin.Sse32Fx4.op));
+         ppHRegX86(i->Xin.Sse32Fx4.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Sse32Fx4.dst);
+         return;
+      case Xin_Sse32FLo:
+         vex_printf("%sss ", showX86SseOp(i->Xin.Sse32FLo.op));
+         ppHRegX86(i->Xin.Sse32FLo.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Sse32FLo.dst);
+         return;
+      case Xin_Sse64Fx2:
+         vex_printf("%spd ", showX86SseOp(i->Xin.Sse64Fx2.op));
+         ppHRegX86(i->Xin.Sse64Fx2.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Sse64Fx2.dst);
+         return;
+      case Xin_Sse64FLo:
+         vex_printf("%ssd ", showX86SseOp(i->Xin.Sse64FLo.op));
+         ppHRegX86(i->Xin.Sse64FLo.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.Sse64FLo.dst);
+         return;
+      case Xin_SseReRg:
+         vex_printf("%s ", showX86SseOp(i->Xin.SseReRg.op));
+         ppHRegX86(i->Xin.SseReRg.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.SseReRg.dst);
+         return;
+      case Xin_SseCMov:
+         vex_printf("cmov%s ", showX86CondCode(i->Xin.SseCMov.cond));
+         ppHRegX86(i->Xin.SseCMov.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.SseCMov.dst);
+         return;
+      case Xin_SseShuf:
+         vex_printf("pshufd $0x%x,", i->Xin.SseShuf.order);
+         ppHRegX86(i->Xin.SseShuf.src);
+         vex_printf(",");
+         ppHRegX86(i->Xin.SseShuf.dst);
+         return;
+      case Xin_EvCheck:
+         vex_printf("(evCheck) decl ");
+         ppX86AMode(i->Xin.EvCheck.amCounter);
+         vex_printf("; jns nofail; jmp *");
+         ppX86AMode(i->Xin.EvCheck.amFailAddr);
+         vex_printf("; nofail:");
+         return;
+      case Xin_ProfInc:
+         vex_printf("(profInc) addl $1,NotKnownYet; "
+                    "adcl $0,NotKnownYet+4");
+         return;
+      default:
+         vpanic("ppX86Instr");
+   }
+}
+
+/* --------- Helpers for register allocation. --------- */
+
+void getRegUsage_X86Instr (HRegUsage* u, const X86Instr* i, Bool mode64)
+{
+   Bool unary;
+   vassert(mode64 == False);
+   initHRegUsage(u);
+   switch (i->tag) {
+      case Xin_Alu32R:
+         addRegUsage_X86RMI(u, i->Xin.Alu32R.src);
+         if (i->Xin.Alu32R.op == Xalu_MOV) {
+            addHRegUse(u, HRmWrite, i->Xin.Alu32R.dst);
+            return;
+         }
+         if (i->Xin.Alu32R.op == Xalu_CMP) { 
+            addHRegUse(u, HRmRead, i->Xin.Alu32R.dst);
+            return;
+         }
+         addHRegUse(u, HRmModify, i->Xin.Alu32R.dst);
+         return;
+      case Xin_Alu32M:
+         addRegUsage_X86RI(u, i->Xin.Alu32M.src);
+         addRegUsage_X86AMode(u, i->Xin.Alu32M.dst);
+         return;
+      case Xin_Sh32:
+         addHRegUse(u, HRmModify, i->Xin.Sh32.dst);
+         if (i->Xin.Sh32.src == 0)
+            addHRegUse(u, HRmRead, hregX86_ECX());
+         return;
+      case Xin_Test32:
+         addRegUsage_X86RM(u, i->Xin.Test32.dst, HRmRead);
+         return;
+      case Xin_Unary32:
+         addHRegUse(u, HRmModify, i->Xin.Unary32.dst);
+         return;
+      case Xin_Lea32:
+         addRegUsage_X86AMode(u, i->Xin.Lea32.am);
+         addHRegUse(u, HRmWrite, i->Xin.Lea32.dst);
+         return;
+      case Xin_MulL:
+         addRegUsage_X86RM(u, i->Xin.MulL.src, HRmRead);
+         addHRegUse(u, HRmModify, hregX86_EAX());
+         addHRegUse(u, HRmWrite, hregX86_EDX());
+         return;
+      case Xin_Div:
+         addRegUsage_X86RM(u, i->Xin.Div.src, HRmRead);
+         addHRegUse(u, HRmModify, hregX86_EAX());
+         addHRegUse(u, HRmModify, hregX86_EDX());
+         return;
+      case Xin_Sh3232:
+         addHRegUse(u, HRmRead, i->Xin.Sh3232.src);
+         addHRegUse(u, HRmModify, i->Xin.Sh3232.dst);
+         if (i->Xin.Sh3232.amt == 0)
+            addHRegUse(u, HRmRead, hregX86_ECX());
+         return;
+      case Xin_Push:
+         addRegUsage_X86RMI(u, i->Xin.Push.src);
+         addHRegUse(u, HRmModify, hregX86_ESP());
+         return;
+      case Xin_Call:
+         /* This is a bit subtle. */
+         /* First off, claim it trashes all the caller-saved regs
+            which fall within the register allocator's jurisdiction.
+            These I believe to be %eax %ecx %edx and all the xmm
+            registers. */
+         addHRegUse(u, HRmWrite, hregX86_EAX());
+         addHRegUse(u, HRmWrite, hregX86_ECX());
+         addHRegUse(u, HRmWrite, hregX86_EDX());
+         addHRegUse(u, HRmWrite, hregX86_XMM0());
+         addHRegUse(u, HRmWrite, hregX86_XMM1());
+         addHRegUse(u, HRmWrite, hregX86_XMM2());
+         addHRegUse(u, HRmWrite, hregX86_XMM3());
+         addHRegUse(u, HRmWrite, hregX86_XMM4());
+         addHRegUse(u, HRmWrite, hregX86_XMM5());
+         addHRegUse(u, HRmWrite, hregX86_XMM6());
+         addHRegUse(u, HRmWrite, hregX86_XMM7());
+         /* Now we have to state any parameter-carrying registers
+            which might be read.  This depends on the regparmness. */
+         switch (i->Xin.Call.regparms) {
+            case 3: addHRegUse(u, HRmRead, hregX86_ECX()); /*fallthru*/
+            case 2: addHRegUse(u, HRmRead, hregX86_EDX()); /*fallthru*/
+            case 1: addHRegUse(u, HRmRead, hregX86_EAX()); break;
+            case 0: break;
+            default: vpanic("getRegUsage_X86Instr:Call:regparms");
+         }
+         /* Finally, there is the issue that the insn trashes a
+            register because the literal target address has to be
+            loaded into a register.  Fortunately, for the 0/1/2
+            regparm case, we can use EAX, EDX and ECX respectively, so
+            this does not cause any further damage.  For the 3-regparm
+            case, we'll have to choose another register arbitrarily --
+            since A, D and C are used for parameters -- and so we might
+            as well choose EDI. */
+         if (i->Xin.Call.regparms == 3)
+            addHRegUse(u, HRmWrite, hregX86_EDI());
+         /* Upshot of this is that the assembler really must observe
+            the here-stated convention of which register to use as an
+            address temporary, depending on the regparmness: 0==EAX,
+            1==EDX, 2==ECX, 3==EDI. */
+         return;
+      /* XDirect/XIndir/XAssisted are also a bit subtle.  They
+         conditionally exit the block.  Hence we only need to list (1)
+         the registers that they read, and (2) the registers that they
+         write in the case where the block is not exited.  (2) is
+         empty, hence only (1) is relevant here. */
+      case Xin_XDirect:
+         addRegUsage_X86AMode(u, i->Xin.XDirect.amEIP);
+         return;
+      case Xin_XIndir:
+         addHRegUse(u, HRmRead, i->Xin.XIndir.dstGA);
+         addRegUsage_X86AMode(u, i->Xin.XIndir.amEIP);
+         return;
+      case Xin_XAssisted:
+         addHRegUse(u, HRmRead, i->Xin.XAssisted.dstGA);
+         addRegUsage_X86AMode(u, i->Xin.XAssisted.amEIP);
+         return;
+      case Xin_CMov32:
+         addRegUsage_X86RM(u, i->Xin.CMov32.src, HRmRead);
+         addHRegUse(u, HRmModify, i->Xin.CMov32.dst);
+         return;
+      case Xin_LoadEX:
+         addRegUsage_X86AMode(u, i->Xin.LoadEX.src);
+         addHRegUse(u, HRmWrite, i->Xin.LoadEX.dst);
+         return;
+      case Xin_Store:
+         addHRegUse(u, HRmRead, i->Xin.Store.src);
+         addRegUsage_X86AMode(u, i->Xin.Store.dst);
+         return;
+      case Xin_Set32:
+         addHRegUse(u, HRmWrite, i->Xin.Set32.dst);
+         return;
+      case Xin_Bsfr32:
+         addHRegUse(u, HRmRead, i->Xin.Bsfr32.src);
+         addHRegUse(u, HRmWrite, i->Xin.Bsfr32.dst);
+         return;
+      case Xin_MFence:
+         return;
+      case Xin_ACAS:
+         addRegUsage_X86AMode(u, i->Xin.ACAS.addr);
+         addHRegUse(u, HRmRead, hregX86_EBX());
+         addHRegUse(u, HRmModify, hregX86_EAX());
+         return;
+      case Xin_DACAS:
+         addRegUsage_X86AMode(u, i->Xin.DACAS.addr);
+         addHRegUse(u, HRmRead, hregX86_ECX());
+         addHRegUse(u, HRmRead, hregX86_EBX());
+         addHRegUse(u, HRmModify, hregX86_EDX());
+         addHRegUse(u, HRmModify, hregX86_EAX());
+         return;
+      case Xin_FpUnary:
+         addHRegUse(u, HRmRead, i->Xin.FpUnary.src);
+         addHRegUse(u, HRmWrite, i->Xin.FpUnary.dst);
+         return;
+      case Xin_FpBinary:
+         addHRegUse(u, HRmRead, i->Xin.FpBinary.srcL);
+         addHRegUse(u, HRmRead, i->Xin.FpBinary.srcR);
+         addHRegUse(u, HRmWrite, i->Xin.FpBinary.dst);
+         return;
+      case Xin_FpLdSt:
+         addRegUsage_X86AMode(u, i->Xin.FpLdSt.addr);
+         addHRegUse(u, i->Xin.FpLdSt.isLoad ? HRmWrite : HRmRead,
+                       i->Xin.FpLdSt.reg);
+         return;
+      case Xin_FpLdStI:
+         addRegUsage_X86AMode(u, i->Xin.FpLdStI.addr);
+         addHRegUse(u, i->Xin.FpLdStI.isLoad ? HRmWrite : HRmRead,
+                       i->Xin.FpLdStI.reg);
+         return;
+      case Xin_Fp64to32:
+         addHRegUse(u, HRmRead,  i->Xin.Fp64to32.src);
+         addHRegUse(u, HRmWrite, i->Xin.Fp64to32.dst);
+         return;
+      case Xin_FpCMov:
+         addHRegUse(u, HRmRead,   i->Xin.FpCMov.src);
+         addHRegUse(u, HRmModify, i->Xin.FpCMov.dst);
+         return;
+      case Xin_FpLdCW:
+         addRegUsage_X86AMode(u, i->Xin.FpLdCW.addr);
+         return;
+      case Xin_FpStSW_AX:
+         addHRegUse(u, HRmWrite, hregX86_EAX());
+         return;
+      case Xin_FpCmp:
+         addHRegUse(u, HRmRead, i->Xin.FpCmp.srcL);
+         addHRegUse(u, HRmRead, i->Xin.FpCmp.srcR);
+         addHRegUse(u, HRmWrite, i->Xin.FpCmp.dst);
+         addHRegUse(u, HRmWrite, hregX86_EAX());
+         return;
+      case Xin_SseLdSt:
+         addRegUsage_X86AMode(u, i->Xin.SseLdSt.addr);
+         addHRegUse(u, i->Xin.SseLdSt.isLoad ? HRmWrite : HRmRead,
+                       i->Xin.SseLdSt.reg);
+         return;
+      case Xin_SseLdzLO:
+         addRegUsage_X86AMode(u, i->Xin.SseLdzLO.addr);
+         addHRegUse(u, HRmWrite, i->Xin.SseLdzLO.reg);
+         return;
+      case Xin_SseConst:
+         addHRegUse(u, HRmWrite, i->Xin.SseConst.dst);
+         return;
+      case Xin_Sse32Fx4:
+         vassert(i->Xin.Sse32Fx4.op != Xsse_MOV);
+         unary = toBool( i->Xin.Sse32Fx4.op == Xsse_RCPF
+                         || i->Xin.Sse32Fx4.op == Xsse_RSQRTF
+                         || i->Xin.Sse32Fx4.op == Xsse_SQRTF );
+         addHRegUse(u, HRmRead, i->Xin.Sse32Fx4.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Xin.Sse32Fx4.dst);
+         return;
+      case Xin_Sse32FLo:
+         vassert(i->Xin.Sse32FLo.op != Xsse_MOV);
+         unary = toBool( i->Xin.Sse32FLo.op == Xsse_RCPF
+                         || i->Xin.Sse32FLo.op == Xsse_RSQRTF
+                         || i->Xin.Sse32FLo.op == Xsse_SQRTF );
+         addHRegUse(u, HRmRead, i->Xin.Sse32FLo.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Xin.Sse32FLo.dst);
+         return;
+      case Xin_Sse64Fx2:
+         vassert(i->Xin.Sse64Fx2.op != Xsse_MOV);
+         unary = toBool( i->Xin.Sse64Fx2.op == Xsse_RCPF
+                         || i->Xin.Sse64Fx2.op == Xsse_RSQRTF
+                         || i->Xin.Sse64Fx2.op == Xsse_SQRTF );
+         addHRegUse(u, HRmRead, i->Xin.Sse64Fx2.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Xin.Sse64Fx2.dst);
+         return;
+      case Xin_Sse64FLo:
+         vassert(i->Xin.Sse64FLo.op != Xsse_MOV);
+         unary = toBool( i->Xin.Sse64FLo.op == Xsse_RCPF
+                         || i->Xin.Sse64FLo.op == Xsse_RSQRTF
+                         || i->Xin.Sse64FLo.op == Xsse_SQRTF );
+         addHRegUse(u, HRmRead, i->Xin.Sse64FLo.src);
+         addHRegUse(u, unary ? HRmWrite : HRmModify, 
+                       i->Xin.Sse64FLo.dst);
+         return;
+      case Xin_SseReRg:
+         if (i->Xin.SseReRg.op == Xsse_XOR
+             && sameHReg(i->Xin.SseReRg.src, i->Xin.SseReRg.dst)) {
+            /* reg-alloc needs to understand 'xor r,r' as a write of r */
+            /* (as opposed to a rite of passage :-) */
+            addHRegUse(u, HRmWrite, i->Xin.SseReRg.dst);
+         } else {
+            addHRegUse(u, HRmRead, i->Xin.SseReRg.src);
+            addHRegUse(u, i->Xin.SseReRg.op == Xsse_MOV 
+                             ? HRmWrite : HRmModify, 
+                          i->Xin.SseReRg.dst);
+         }
+         return;
+      case Xin_SseCMov:
+         addHRegUse(u, HRmRead,   i->Xin.SseCMov.src);
+         addHRegUse(u, HRmModify, i->Xin.SseCMov.dst);
+         return;
+      case Xin_SseShuf:
+         addHRegUse(u, HRmRead,  i->Xin.SseShuf.src);
+         addHRegUse(u, HRmWrite, i->Xin.SseShuf.dst);
+         return;
+      case Xin_EvCheck:
+         /* We expect both amodes only to mention %ebp, so this is in
+            fact pointless, since %ebp isn't allocatable, but anyway.. */
+         addRegUsage_X86AMode(u, i->Xin.EvCheck.amCounter);
+         addRegUsage_X86AMode(u, i->Xin.EvCheck.amFailAddr);
+         return;
+      case Xin_ProfInc:
+         /* does not use any registers. */
+         return;
+      default:
+         ppX86Instr(i, False);
+         vpanic("getRegUsage_X86Instr");
+   }
+}
+
+/* local helper */
+static void mapReg( HRegRemap* m, HReg* r )
+{
+   *r = lookupHRegRemap(m, *r);
+}
+
+void mapRegs_X86Instr ( HRegRemap* m, X86Instr* i, Bool mode64 )
+{
+   vassert(mode64 == False);
+   switch (i->tag) {
+      case Xin_Alu32R:
+         mapRegs_X86RMI(m, i->Xin.Alu32R.src);
+         mapReg(m, &i->Xin.Alu32R.dst);
+         return;
+      case Xin_Alu32M:
+         mapRegs_X86RI(m, i->Xin.Alu32M.src);
+         mapRegs_X86AMode(m, i->Xin.Alu32M.dst);
+         return;
+      case Xin_Sh32:
+         mapReg(m, &i->Xin.Sh32.dst);
+         return;
+      case Xin_Test32:
+         mapRegs_X86RM(m, i->Xin.Test32.dst);
+         return;
+      case Xin_Unary32:
+         mapReg(m, &i->Xin.Unary32.dst);
+         return;
+      case Xin_Lea32:
+         mapRegs_X86AMode(m, i->Xin.Lea32.am);
+         mapReg(m, &i->Xin.Lea32.dst);
+         return;
+      case Xin_MulL:
+         mapRegs_X86RM(m, i->Xin.MulL.src);
+         return;
+      case Xin_Div:
+         mapRegs_X86RM(m, i->Xin.Div.src);
+         return;
+      case Xin_Sh3232:
+         mapReg(m, &i->Xin.Sh3232.src);
+         mapReg(m, &i->Xin.Sh3232.dst);
+         return;
+      case Xin_Push:
+         mapRegs_X86RMI(m, i->Xin.Push.src);
+         return;
+      case Xin_Call:
+         return;
+      case Xin_XDirect:
+         mapRegs_X86AMode(m, i->Xin.XDirect.amEIP);
+         return;
+      case Xin_XIndir:
+         mapReg(m, &i->Xin.XIndir.dstGA);
+         mapRegs_X86AMode(m, i->Xin.XIndir.amEIP);
+         return;
+      case Xin_XAssisted:
+         mapReg(m, &i->Xin.XAssisted.dstGA);
+         mapRegs_X86AMode(m, i->Xin.XAssisted.amEIP);
+         return;
+      case Xin_CMov32:
+         mapRegs_X86RM(m, i->Xin.CMov32.src);
+         mapReg(m, &i->Xin.CMov32.dst);
+         return;
+      case Xin_LoadEX:
+         mapRegs_X86AMode(m, i->Xin.LoadEX.src);
+         mapReg(m, &i->Xin.LoadEX.dst);
+         return;
+      case Xin_Store:
+         mapReg(m, &i->Xin.Store.src);
+         mapRegs_X86AMode(m, i->Xin.Store.dst);
+         return;
+      case Xin_Set32:
+         mapReg(m, &i->Xin.Set32.dst);
+         return;
+      case Xin_Bsfr32:
+         mapReg(m, &i->Xin.Bsfr32.src);
+         mapReg(m, &i->Xin.Bsfr32.dst);
+         return;
+      case Xin_MFence:
+         return;
+      case Xin_ACAS:
+         mapRegs_X86AMode(m, i->Xin.ACAS.addr);
+         return;
+      case Xin_DACAS:
+         mapRegs_X86AMode(m, i->Xin.DACAS.addr);
+         return;
+      case Xin_FpUnary:
+         mapReg(m, &i->Xin.FpUnary.src);
+         mapReg(m, &i->Xin.FpUnary.dst);
+         return;
+      case Xin_FpBinary:
+         mapReg(m, &i->Xin.FpBinary.srcL);
+         mapReg(m, &i->Xin.FpBinary.srcR);
+         mapReg(m, &i->Xin.FpBinary.dst);
+         return;
+      case Xin_FpLdSt:
+         mapRegs_X86AMode(m, i->Xin.FpLdSt.addr);
+         mapReg(m, &i->Xin.FpLdSt.reg);
+         return;
+      case Xin_FpLdStI:
+         mapRegs_X86AMode(m, i->Xin.FpLdStI.addr);
+         mapReg(m, &i->Xin.FpLdStI.reg);
+         return;
+      case Xin_Fp64to32:
+         mapReg(m, &i->Xin.Fp64to32.src);
+         mapReg(m, &i->Xin.Fp64to32.dst);
+         return;
+      case Xin_FpCMov:
+         mapReg(m, &i->Xin.FpCMov.src);
+         mapReg(m, &i->Xin.FpCMov.dst);
+         return;
+      case Xin_FpLdCW:
+         mapRegs_X86AMode(m, i->Xin.FpLdCW.addr);
+         return;
+      case Xin_FpStSW_AX:
+         return;
+      case Xin_FpCmp:
+         mapReg(m, &i->Xin.FpCmp.srcL);
+         mapReg(m, &i->Xin.FpCmp.srcR);
+         mapReg(m, &i->Xin.FpCmp.dst);
+         return;
+      case Xin_SseConst:
+         mapReg(m, &i->Xin.SseConst.dst);
+         return;
+      case Xin_SseLdSt:
+         mapReg(m, &i->Xin.SseLdSt.reg);
+         mapRegs_X86AMode(m, i->Xin.SseLdSt.addr);
+         break;
+      case Xin_SseLdzLO:
+         mapReg(m, &i->Xin.SseLdzLO.reg);
+         mapRegs_X86AMode(m, i->Xin.SseLdzLO.addr);
+         break;
+      case Xin_Sse32Fx4:
+         mapReg(m, &i->Xin.Sse32Fx4.src);
+         mapReg(m, &i->Xin.Sse32Fx4.dst);
+         return;
+      case Xin_Sse32FLo:
+         mapReg(m, &i->Xin.Sse32FLo.src);
+         mapReg(m, &i->Xin.Sse32FLo.dst);
+         return;
+      case Xin_Sse64Fx2:
+         mapReg(m, &i->Xin.Sse64Fx2.src);
+         mapReg(m, &i->Xin.Sse64Fx2.dst);
+         return;
+      case Xin_Sse64FLo:
+         mapReg(m, &i->Xin.Sse64FLo.src);
+         mapReg(m, &i->Xin.Sse64FLo.dst);
+         return;
+      case Xin_SseReRg:
+         mapReg(m, &i->Xin.SseReRg.src);
+         mapReg(m, &i->Xin.SseReRg.dst);
+         return;
+      case Xin_SseCMov:
+         mapReg(m, &i->Xin.SseCMov.src);
+         mapReg(m, &i->Xin.SseCMov.dst);
+         return;
+      case Xin_SseShuf:
+         mapReg(m, &i->Xin.SseShuf.src);
+         mapReg(m, &i->Xin.SseShuf.dst);
+         return;
+      case Xin_EvCheck:
+         /* We expect both amodes only to mention %ebp, so this is in
+            fact pointless, since %ebp isn't allocatable, but anyway.. */
+         mapRegs_X86AMode(m, i->Xin.EvCheck.amCounter);
+         mapRegs_X86AMode(m, i->Xin.EvCheck.amFailAddr);
+         return;
+      case Xin_ProfInc:
+         /* does not use any registers. */
+         return;
+
+      default:
+         ppX86Instr(i, mode64);
+         vpanic("mapRegs_X86Instr");
+   }
+}
+
+/* Figure out if i represents a reg-reg move, and if so assign the
+   source and destination to *src and *dst.  If in doubt say No.  Used
+   by the register allocator to do move coalescing. 
+*/
+Bool isMove_X86Instr ( const X86Instr* i, HReg* src, HReg* dst )
+{
+   /* Moves between integer regs */
+   if (i->tag == Xin_Alu32R) {
+      if (i->Xin.Alu32R.op != Xalu_MOV)
+         return False;
+      if (i->Xin.Alu32R.src->tag != Xrmi_Reg)
+         return False;
+      *src = i->Xin.Alu32R.src->Xrmi.Reg.reg;
+      *dst = i->Xin.Alu32R.dst;
+      return True;
+   }
+   /* Moves between FP regs */
+   if (i->tag == Xin_FpUnary) {
+      if (i->Xin.FpUnary.op != Xfp_MOV)
+         return False;
+      *src = i->Xin.FpUnary.src;
+      *dst = i->Xin.FpUnary.dst;
+      return True;
+   }
+   if (i->tag == Xin_SseReRg) {
+      if (i->Xin.SseReRg.op != Xsse_MOV)
+         return False;
+      *src = i->Xin.SseReRg.src;
+      *dst = i->Xin.SseReRg.dst;
+      return True;
+   }
+   return False;
+}
+
+
+/* Generate x86 spill/reload instructions under the direction of the
+   register allocator.  Note it's critical these don't write the
+   condition codes. */
+
+void genSpill_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                    HReg rreg, Int offsetB, Bool mode64 )
+{
+   X86AMode* am;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == False);
+   *i1 = *i2 = NULL;
+   am = X86AMode_IR(offsetB, hregX86_EBP());
+   switch (hregClass(rreg)) {
+      case HRcInt32:
+         *i1 = X86Instr_Alu32M ( Xalu_MOV, X86RI_Reg(rreg), am );
+         return;
+      case HRcFlt64:
+         *i1 = X86Instr_FpLdSt ( False/*store*/, 10, rreg, am );
+         return;
+      case HRcVec128:
+         *i1 = X86Instr_SseLdSt ( False/*store*/, rreg, am );
+         return;
+      default: 
+         ppHRegClass(hregClass(rreg));
+         vpanic("genSpill_X86: unimplemented regclass");
+   }
+}
+
+void genReload_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                     HReg rreg, Int offsetB, Bool mode64 )
+{
+   X86AMode* am;
+   vassert(offsetB >= 0);
+   vassert(!hregIsVirtual(rreg));
+   vassert(mode64 == False);
+   *i1 = *i2 = NULL;
+   am = X86AMode_IR(offsetB, hregX86_EBP());
+   switch (hregClass(rreg)) {
+      case HRcInt32:
+         *i1 = X86Instr_Alu32R ( Xalu_MOV, X86RMI_Mem(am), rreg );
+         return;
+      case HRcFlt64:
+         *i1 = X86Instr_FpLdSt ( True/*load*/, 10, rreg, am );
+         return;
+      case HRcVec128:
+         *i1 = X86Instr_SseLdSt ( True/*load*/, rreg, am );
+         return;
+      default: 
+         ppHRegClass(hregClass(rreg));
+         vpanic("genReload_X86: unimplemented regclass");
+   }
+}
+
+/* The given instruction reads the specified vreg exactly once, and
+   that vreg is currently located at the given spill offset.  If
+   possible, return a variant of the instruction to one which instead
+   references the spill slot directly. */
+
+X86Instr* directReload_X86( X86Instr* i, HReg vreg, Short spill_off )
+{
+   vassert(spill_off >= 0 && spill_off < 10000); /* let's say */
+
+   /* Deal with form: src=RMI_Reg, dst=Reg where src == vreg 
+      Convert to: src=RMI_Mem, dst=Reg 
+   */
+   if (i->tag == Xin_Alu32R
+       && (i->Xin.Alu32R.op == Xalu_MOV || i->Xin.Alu32R.op == Xalu_OR
+           || i->Xin.Alu32R.op == Xalu_XOR)
+       && i->Xin.Alu32R.src->tag == Xrmi_Reg
+       && sameHReg(i->Xin.Alu32R.src->Xrmi.Reg.reg, vreg)) {
+      vassert(! sameHReg(i->Xin.Alu32R.dst, vreg));
+      return X86Instr_Alu32R( 
+                i->Xin.Alu32R.op, 
+                X86RMI_Mem( X86AMode_IR( spill_off, hregX86_EBP())),
+                i->Xin.Alu32R.dst
+             );
+   }
+
+   /* Deal with form: src=RMI_Imm, dst=Reg where dst == vreg 
+      Convert to: src=RI_Imm, dst=Mem
+   */
+   if (i->tag == Xin_Alu32R
+       && (i->Xin.Alu32R.op == Xalu_CMP)
+       && i->Xin.Alu32R.src->tag == Xrmi_Imm
+       && sameHReg(i->Xin.Alu32R.dst, vreg)) {
+      return X86Instr_Alu32M( 
+                i->Xin.Alu32R.op,
+		X86RI_Imm( i->Xin.Alu32R.src->Xrmi.Imm.imm32 ),
+                X86AMode_IR( spill_off, hregX86_EBP())
+             );
+   }
+
+   /* Deal with form: Push(RMI_Reg)
+      Convert to: Push(RMI_Mem) 
+   */
+   if (i->tag == Xin_Push
+       && i->Xin.Push.src->tag == Xrmi_Reg
+       && sameHReg(i->Xin.Push.src->Xrmi.Reg.reg, vreg)) {
+      return X86Instr_Push(
+                X86RMI_Mem( X86AMode_IR( spill_off, hregX86_EBP()))
+             );
+   }
+
+   /* Deal with form: CMov32(src=RM_Reg, dst) where vreg == src
+      Convert to CMov32(RM_Mem, dst) */
+   if (i->tag == Xin_CMov32
+       && i->Xin.CMov32.src->tag == Xrm_Reg
+       && sameHReg(i->Xin.CMov32.src->Xrm.Reg.reg, vreg)) {
+      vassert(! sameHReg(i->Xin.CMov32.dst, vreg));
+      return X86Instr_CMov32( 
+                i->Xin.CMov32.cond,
+                X86RM_Mem( X86AMode_IR( spill_off, hregX86_EBP() )),
+                i->Xin.CMov32.dst
+             );
+   }
+
+   /* Deal with form: Test32(imm,RM_Reg vreg) -> Test32(imm,amode) */
+   if (i->tag == Xin_Test32
+       && i->Xin.Test32.dst->tag == Xrm_Reg
+       && sameHReg(i->Xin.Test32.dst->Xrm.Reg.reg, vreg)) {
+      return X86Instr_Test32(
+                i->Xin.Test32.imm32,
+                X86RM_Mem( X86AMode_IR( spill_off, hregX86_EBP() ) )
+             );
+   }
+
+   return NULL;
+}
+
+
+/* --------- The x86 assembler (bleh.) --------- */
+
+inline static UInt iregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcInt32);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 7);
+   return n;
+}
+
+inline static UInt fregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 5);
+   return n;
+}
+
+inline static UInt vregEnc ( HReg r )
+{
+   UInt n;
+   vassert(hregClass(r) == HRcVec128);
+   vassert(!hregIsVirtual(r));
+   n = hregEncoding(r);
+   vassert(n <= 7);
+   return n;
+}
+
+inline static UChar mkModRegRM ( UInt mod, UInt reg, UInt regmem )
+{
+   vassert(mod < 4);
+   vassert((reg|regmem) < 8);
+   return (UChar)( ((mod & 3) << 6) | ((reg & 7) << 3) | (regmem & 7) );
+}
+
+inline static UChar mkSIB ( UInt shift, UInt regindex, UInt regbase )
+{
+   vassert(shift < 4);
+   vassert((regindex|regbase) < 8);
+   return (UChar)( ((shift & 3) << 6) | ((regindex & 7) << 3) | (regbase & 7) );
+}
+
+static UChar* emit32 ( UChar* p, UInt w32 )
+{
+   *p++ = toUChar( w32        & 0x000000FF);
+   *p++ = toUChar((w32 >>  8) & 0x000000FF);
+   *p++ = toUChar((w32 >> 16) & 0x000000FF);
+   *p++ = toUChar((w32 >> 24) & 0x000000FF);
+   return p;
+}
+
+/* Does a sign-extend of the lowest 8 bits give 
+   the original number? */
+static Bool fits8bits ( UInt w32 )
+{
+   Int i32 = (Int)w32;
+   return toBool(i32 == ((Int)(w32 << 24) >> 24));
+}
+
+
+/* Forming mod-reg-rm bytes and scale-index-base bytes.
+
+     greg,  0(ereg)    |  ereg != ESP && ereg != EBP
+                       =  00 greg ereg
+
+     greg,  d8(ereg)   |  ereg != ESP
+                       =  01 greg ereg, d8
+
+     greg,  d32(ereg)  |  ereg != ESP
+                       =  10 greg ereg, d32
+
+     greg,  d8(%esp)   =  01 greg 100, 0x24, d8
+
+     -----------------------------------------------
+
+     greg,  d8(base,index,scale)  
+               |  index != ESP
+               =  01 greg 100, scale index base, d8
+
+     greg,  d32(base,index,scale)
+               |  index != ESP
+               =  10 greg 100, scale index base, d32
+*/
+static UChar* doAMode_M__wrk ( UChar* p, UInt gregEnc, X86AMode* am )
+{
+   if (am->tag == Xam_IR) {
+      if (am->Xam.IR.imm == 0 
+          && ! sameHReg(am->Xam.IR.reg, hregX86_ESP())
+          && ! sameHReg(am->Xam.IR.reg, hregX86_EBP()) ) {
+         *p++ = mkModRegRM(0, gregEnc, iregEnc(am->Xam.IR.reg));
+         return p;
+      }
+      if (fits8bits(am->Xam.IR.imm)
+          && ! sameHReg(am->Xam.IR.reg, hregX86_ESP())) {
+         *p++ = mkModRegRM(1, gregEnc, iregEnc(am->Xam.IR.reg));
+         *p++ = toUChar(am->Xam.IR.imm & 0xFF);
+         return p;
+      }
+      if (! sameHReg(am->Xam.IR.reg, hregX86_ESP())) {
+         *p++ = mkModRegRM(2, gregEnc, iregEnc(am->Xam.IR.reg));
+         p = emit32(p, am->Xam.IR.imm);
+         return p;
+      }
+      if (sameHReg(am->Xam.IR.reg, hregX86_ESP())
+          && fits8bits(am->Xam.IR.imm)) {
+ 	 *p++ = mkModRegRM(1, gregEnc, 4);
+         *p++ = 0x24;
+         *p++ = toUChar(am->Xam.IR.imm & 0xFF);
+         return p;
+      }
+      ppX86AMode(am);
+      vpanic("doAMode_M: can't emit amode IR");
+      /*NOTREACHED*/
+   }
+   if (am->tag == Xam_IRRS) {
+      if (fits8bits(am->Xam.IRRS.imm)
+          && ! sameHReg(am->Xam.IRRS.index, hregX86_ESP())) {
+         *p++ = mkModRegRM(1, gregEnc, 4);
+         *p++ = mkSIB(am->Xam.IRRS.shift, iregEnc(am->Xam.IRRS.index),
+                                          iregEnc(am->Xam.IRRS.base));
+         *p++ = toUChar(am->Xam.IRRS.imm & 0xFF);
+         return p;
+      }
+      if (! sameHReg(am->Xam.IRRS.index, hregX86_ESP())) {
+         *p++ = mkModRegRM(2, gregEnc, 4);
+         *p++ = mkSIB(am->Xam.IRRS.shift, iregEnc(am->Xam.IRRS.index),
+                                          iregEnc(am->Xam.IRRS.base));
+         p = emit32(p, am->Xam.IRRS.imm);
+         return p;
+      }
+      ppX86AMode(am);
+      vpanic("doAMode_M: can't emit amode IRRS");
+      /*NOTREACHED*/
+   }
+   vpanic("doAMode_M: unknown amode");
+   /*NOTREACHED*/
+}
+
+static UChar* doAMode_M ( UChar* p, HReg greg, X86AMode* am )
+{
+   return doAMode_M__wrk(p, iregEnc(greg), am);
+}
+
+static UChar* doAMode_M_enc ( UChar* p, UInt gregEnc, X86AMode* am )
+{
+   vassert(gregEnc < 8);
+   return doAMode_M__wrk(p, gregEnc, am);
+}
+
+
+/* Emit a mod-reg-rm byte when the rm bit denotes a reg. */
+inline static UChar* doAMode_R__wrk ( UChar* p, UInt gregEnc, UInt eregEnc ) 
+{
+   *p++ = mkModRegRM(3, gregEnc, eregEnc);
+   return p;
+}
+
+static UChar* doAMode_R ( UChar* p, HReg greg, HReg ereg )
+{
+   return doAMode_R__wrk(p, iregEnc(greg), iregEnc(ereg));
+}
+
+static UChar* doAMode_R_enc_reg ( UChar* p, UInt gregEnc, HReg ereg )
+{
+   vassert(gregEnc < 8);
+   return doAMode_R__wrk(p, gregEnc, iregEnc(ereg));
+}
+
+static UChar* doAMode_R_enc_enc ( UChar* p, UInt gregEnc, UInt eregEnc )
+{
+   vassert( (gregEnc|eregEnc) < 8);
+   return doAMode_R__wrk(p, gregEnc, eregEnc);
+}
+
+
+/* Emit ffree %st(7) */
+static UChar* do_ffree_st7 ( UChar* p )
+{
+   *p++ = 0xDD;
+   *p++ = 0xC7;
+   return p;
+}
+
+/* Emit fstp %st(i), 1 <= i <= 7 */
+static UChar* do_fstp_st ( UChar* p, Int i )
+{
+   vassert(1 <= i && i <= 7);
+   *p++ = 0xDD;
+   *p++ = toUChar(0xD8+i);
+   return p;
+}
+
+/* Emit fld %st(i), 0 <= i <= 6 */
+static UChar* do_fld_st ( UChar* p, Int i )
+{
+   vassert(0 <= i && i <= 6);
+   *p++ = 0xD9;
+   *p++ = toUChar(0xC0+i);
+   return p;
+}
+
+/* Emit f<op> %st(0) */
+static UChar* do_fop1_st ( UChar* p, X86FpOp op )
+{
+   switch (op) {
+      case Xfp_NEG:    *p++ = 0xD9; *p++ = 0xE0; break;
+      case Xfp_ABS:    *p++ = 0xD9; *p++ = 0xE1; break;
+      case Xfp_SQRT:   *p++ = 0xD9; *p++ = 0xFA; break;
+      case Xfp_ROUND:  *p++ = 0xD9; *p++ = 0xFC; break;
+      case Xfp_SIN:    *p++ = 0xD9; *p++ = 0xFE; break;
+      case Xfp_COS:    *p++ = 0xD9; *p++ = 0xFF; break;
+      case Xfp_2XM1:   *p++ = 0xD9; *p++ = 0xF0; break;
+      case Xfp_MOV:    break;
+      case Xfp_TAN:
+         /* fptan pushes 1.0 on the FP stack, except when the argument
+            is out of range.  Hence we have to do the instruction,
+            then inspect C2 to see if there is an out of range
+            condition.  If there is, we skip the fincstp that is used
+            by the in-range case to get rid of this extra 1.0
+            value. */
+         p = do_ffree_st7(p); /* since fptan sometimes pushes 1.0 */
+         *p++ = 0xD9; *p++ = 0xF2; // fptan
+         *p++ = 0x50;              // pushl %eax
+         *p++ = 0xDF; *p++ = 0xE0; // fnstsw %ax
+         *p++ = 0x66; *p++ = 0xA9; 
+         *p++ = 0x00; *p++ = 0x04; // testw $0x400,%ax
+         *p++ = 0x75; *p++ = 0x02; // jnz after_fincstp
+         *p++ = 0xD9; *p++ = 0xF7; // fincstp
+         *p++ = 0x58;              // after_fincstp: popl %eax
+         break;
+      default:
+         vpanic("do_fop1_st: unknown op");
+   }
+   return p;
+}
+
+/* Emit f<op> %st(i), 1 <= i <= 5 */
+static UChar* do_fop2_st ( UChar* p, X86FpOp op, Int i )
+{
+   Int subopc;
+   switch (op) {
+      case Xfp_ADD: subopc = 0; break;
+      case Xfp_SUB: subopc = 4; break;
+      case Xfp_MUL: subopc = 1; break;
+      case Xfp_DIV: subopc = 6; break;
+      default: vpanic("do_fop2_st: unknown op");
+   }
+   *p++ = 0xD8;
+   p    = doAMode_R_enc_enc(p, subopc, i);
+   return p;
+}
+
+/* Push a 32-bit word on the stack.  The word depends on tags[3:0];
+each byte is either 0x00 or 0xFF depending on the corresponding bit in tags[].
+*/
+static UChar* push_word_from_tags ( UChar* p, UShort tags )
+{
+   UInt w;
+   vassert(0 == (tags & ~0xF));
+   if (tags == 0) {
+      /* pushl $0x00000000 */
+      *p++ = 0x6A;
+      *p++ = 0x00;
+   }
+   else 
+   /* pushl $0xFFFFFFFF */
+   if (tags == 0xF) {
+      *p++ = 0x6A;
+      *p++ = 0xFF;
+   } else {
+      vassert(0); /* awaiting test case */
+      w = 0;
+      if (tags & 1) w |= 0x000000FF;
+      if (tags & 2) w |= 0x0000FF00;
+      if (tags & 4) w |= 0x00FF0000;
+      if (tags & 8) w |= 0xFF000000;
+      *p++ = 0x68;
+      p = emit32(p, w);
+   }
+   return p;
+}
+
+/* Emit an instruction into buf and return the number of bytes used.
+   Note that buf is not the insn's final place, and therefore it is
+   imperative to emit position-independent code.  If the emitted
+   instruction was a profiler inc, set *is_profInc to True, else
+   leave it unchanged. */
+
+Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
+                    UChar* buf, Int nbuf, const X86Instr* i, 
+                    Bool mode64, VexEndness endness_host,
+                    const void* disp_cp_chain_me_to_slowEP,
+                    const void* disp_cp_chain_me_to_fastEP,
+                    const void* disp_cp_xindir,
+                    const void* disp_cp_xassisted )
+{
+   UInt irno, opc, opc_rr, subopc_imm, opc_imma, opc_cl, opc_imm, subopc;
+
+   UInt   xtra;
+   UChar* p = &buf[0];
+   UChar* ptmp;
+   vassert(nbuf >= 32);
+   vassert(mode64 == False);
+
+   /* vex_printf("asm  ");ppX86Instr(i, mode64); vex_printf("\n"); */
+
+   switch (i->tag) {
+
+   case Xin_Alu32R:
+      /* Deal specially with MOV */
+      if (i->Xin.Alu32R.op == Xalu_MOV) {
+         switch (i->Xin.Alu32R.src->tag) {
+            case Xrmi_Imm:
+               *p++ = toUChar(0xB8 + iregEnc(i->Xin.Alu32R.dst));
+               p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
+               goto done;
+            case Xrmi_Reg:
+               *p++ = 0x89;
+               p = doAMode_R(p, i->Xin.Alu32R.src->Xrmi.Reg.reg,
+                                i->Xin.Alu32R.dst);
+               goto done;
+            case Xrmi_Mem:
+               *p++ = 0x8B;
+               p = doAMode_M(p, i->Xin.Alu32R.dst, 
+                                i->Xin.Alu32R.src->Xrmi.Mem.am);
+               goto done;
+            default:
+               goto bad;
+         }
+      }
+      /* MUL */
+      if (i->Xin.Alu32R.op == Xalu_MUL) {
+         switch (i->Xin.Alu32R.src->tag) {
+            case Xrmi_Reg:
+               *p++ = 0x0F;
+               *p++ = 0xAF;
+               p = doAMode_R(p, i->Xin.Alu32R.dst,
+                                i->Xin.Alu32R.src->Xrmi.Reg.reg);
+               goto done;
+            case Xrmi_Mem:
+               *p++ = 0x0F;
+               *p++ = 0xAF;
+               p = doAMode_M(p, i->Xin.Alu32R.dst,
+                                i->Xin.Alu32R.src->Xrmi.Mem.am);
+               goto done;
+            case Xrmi_Imm:
+               if (fits8bits(i->Xin.Alu32R.src->Xrmi.Imm.imm32)) {
+                  *p++ = 0x6B;
+                  p = doAMode_R(p, i->Xin.Alu32R.dst, i->Xin.Alu32R.dst);
+                  *p++ = toUChar(0xFF & i->Xin.Alu32R.src->Xrmi.Imm.imm32);
+               } else {
+                  *p++ = 0x69;
+                  p = doAMode_R(p, i->Xin.Alu32R.dst, i->Xin.Alu32R.dst);
+                  p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
+               }
+               goto done;
+            default:
+               goto bad;
+         }
+      }
+      /* ADD/SUB/ADC/SBB/AND/OR/XOR/CMP */
+      opc = opc_rr = subopc_imm = opc_imma = 0;
+      switch (i->Xin.Alu32R.op) {
+         case Xalu_ADC: opc = 0x13; opc_rr = 0x11; 
+                        subopc_imm = 2; opc_imma = 0x15; break;
+         case Xalu_ADD: opc = 0x03; opc_rr = 0x01; 
+                        subopc_imm = 0; opc_imma = 0x05; break;
+         case Xalu_SUB: opc = 0x2B; opc_rr = 0x29; 
+                        subopc_imm = 5; opc_imma = 0x2D; break;
+         case Xalu_SBB: opc = 0x1B; opc_rr = 0x19; 
+                        subopc_imm = 3; opc_imma = 0x1D; break;
+         case Xalu_AND: opc = 0x23; opc_rr = 0x21; 
+                        subopc_imm = 4; opc_imma = 0x25; break;
+         case Xalu_XOR: opc = 0x33; opc_rr = 0x31; 
+                        subopc_imm = 6; opc_imma = 0x35; break;
+         case Xalu_OR:  opc = 0x0B; opc_rr = 0x09; 
+                        subopc_imm = 1; opc_imma = 0x0D; break;
+         case Xalu_CMP: opc = 0x3B; opc_rr = 0x39; 
+                        subopc_imm = 7; opc_imma = 0x3D; break;
+         default: goto bad;
+      }
+      switch (i->Xin.Alu32R.src->tag) {
+         case Xrmi_Imm:
+            if (sameHReg(i->Xin.Alu32R.dst, hregX86_EAX())
+                && !fits8bits(i->Xin.Alu32R.src->Xrmi.Imm.imm32)) {
+               *p++ = toUChar(opc_imma);
+               p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
+            } else
+            if (fits8bits(i->Xin.Alu32R.src->Xrmi.Imm.imm32)) {
+               *p++ = 0x83; 
+               p    = doAMode_R_enc_reg(p, subopc_imm, i->Xin.Alu32R.dst);
+               *p++ = toUChar(0xFF & i->Xin.Alu32R.src->Xrmi.Imm.imm32);
+            } else {
+               *p++ = 0x81; 
+               p    = doAMode_R_enc_reg(p, subopc_imm, i->Xin.Alu32R.dst);
+               p    = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
+            }
+            goto done;
+         case Xrmi_Reg:
+            *p++ = toUChar(opc_rr);
+            p = doAMode_R(p, i->Xin.Alu32R.src->Xrmi.Reg.reg,
+                             i->Xin.Alu32R.dst);
+            goto done;
+         case Xrmi_Mem:
+            *p++ = toUChar(opc);
+            p = doAMode_M(p, i->Xin.Alu32R.dst,
+                             i->Xin.Alu32R.src->Xrmi.Mem.am);
+            goto done;
+         default: 
+            goto bad;
+      }
+      break;
+
+   case Xin_Alu32M:
+      /* Deal specially with MOV */
+      if (i->Xin.Alu32M.op == Xalu_MOV) {
+         switch (i->Xin.Alu32M.src->tag) {
+            case Xri_Reg:
+               *p++ = 0x89;
+               p = doAMode_M(p, i->Xin.Alu32M.src->Xri.Reg.reg,
+                                i->Xin.Alu32M.dst);
+               goto done;
+            case Xri_Imm:
+               *p++ = 0xC7;
+               p = doAMode_M_enc(p, 0, i->Xin.Alu32M.dst);
+               p = emit32(p, i->Xin.Alu32M.src->Xri.Imm.imm32);
+               goto done;
+            default: 
+               goto bad;
+         }
+      }
+      /* ADD/SUB/ADC/SBB/AND/OR/XOR/CMP.  MUL is not
+         allowed here. */
+      opc = subopc_imm = opc_imma = 0;
+      switch (i->Xin.Alu32M.op) {
+         case Xalu_ADD: opc = 0x01; subopc_imm = 0; break;
+         case Xalu_SUB: opc = 0x29; subopc_imm = 5; break;
+         case Xalu_CMP: opc = 0x39; subopc_imm = 7; break;
+         default: goto bad;
+      }
+      switch (i->Xin.Alu32M.src->tag) {
+         case Xri_Reg:
+            *p++ = toUChar(opc);
+            p = doAMode_M(p, i->Xin.Alu32M.src->Xri.Reg.reg,
+                             i->Xin.Alu32M.dst);
+            goto done;
+         case Xri_Imm:
+            if (fits8bits(i->Xin.Alu32M.src->Xri.Imm.imm32)) {
+               *p++ = 0x83;
+               p    = doAMode_M_enc(p, subopc_imm, i->Xin.Alu32M.dst);
+               *p++ = toUChar(0xFF & i->Xin.Alu32M.src->Xri.Imm.imm32);
+               goto done;
+            } else {
+               *p++ = 0x81;
+               p    = doAMode_M_enc(p, subopc_imm, i->Xin.Alu32M.dst);
+               p    = emit32(p, i->Xin.Alu32M.src->Xri.Imm.imm32);
+               goto done;
+            }
+         default: 
+            goto bad;
+      }
+      break;
+
+   case Xin_Sh32:
+      opc_cl = opc_imm = subopc = 0;
+      switch (i->Xin.Sh32.op) {
+         case Xsh_SHR: opc_cl = 0xD3; opc_imm = 0xC1; subopc = 5; break;
+         case Xsh_SAR: opc_cl = 0xD3; opc_imm = 0xC1; subopc = 7; break;
+         case Xsh_SHL: opc_cl = 0xD3; opc_imm = 0xC1; subopc = 4; break;
+         default: goto bad;
+      }
+      if (i->Xin.Sh32.src == 0) {
+         *p++ = toUChar(opc_cl);
+         p = doAMode_R_enc_reg(p, subopc, i->Xin.Sh32.dst);
+      } else {
+         *p++ = toUChar(opc_imm);
+         p = doAMode_R_enc_reg(p, subopc, i->Xin.Sh32.dst);
+         *p++ = (UChar)(i->Xin.Sh32.src);
+      }
+      goto done;
+
+   case Xin_Test32:
+      if (i->Xin.Test32.dst->tag == Xrm_Reg) {
+         /* testl $imm32, %reg */
+         *p++ = 0xF7;
+         p = doAMode_R_enc_reg(p, 0, i->Xin.Test32.dst->Xrm.Reg.reg);
+         p = emit32(p, i->Xin.Test32.imm32);
+         goto done;
+      } else {
+         /* testl $imm32, amode */
+         *p++ = 0xF7;
+         p = doAMode_M_enc(p, 0, i->Xin.Test32.dst->Xrm.Mem.am);
+         p = emit32(p, i->Xin.Test32.imm32);
+         goto done;
+      }
+
+   case Xin_Unary32:
+      if (i->Xin.Unary32.op == Xun_NOT) {
+         *p++ = 0xF7;
+         p = doAMode_R_enc_reg(p, 2, i->Xin.Unary32.dst);
+         goto done;
+      }
+      if (i->Xin.Unary32.op == Xun_NEG) {
+         *p++ = 0xF7;
+         p = doAMode_R_enc_reg(p, 3, i->Xin.Unary32.dst);
+         goto done;
+      }
+      break;
+
+   case Xin_Lea32:
+      *p++ = 0x8D;
+      p = doAMode_M(p, i->Xin.Lea32.dst, i->Xin.Lea32.am);
+      goto done;
+
+   case Xin_MulL:
+      subopc = i->Xin.MulL.syned ? 5 : 4;
+      *p++ = 0xF7;
+      switch (i->Xin.MulL.src->tag)  {
+         case Xrm_Mem:
+            p = doAMode_M_enc(p, subopc, i->Xin.MulL.src->Xrm.Mem.am);
+            goto done;
+         case Xrm_Reg:
+            p = doAMode_R_enc_reg(p, subopc, i->Xin.MulL.src->Xrm.Reg.reg);
+            goto done;
+         default:
+            goto bad;
+      }
+      break;
+
+   case Xin_Div:
+      subopc = i->Xin.Div.syned ? 7 : 6;
+      *p++ = 0xF7;
+      switch (i->Xin.Div.src->tag)  {
+         case Xrm_Mem:
+            p = doAMode_M_enc(p, subopc, i->Xin.Div.src->Xrm.Mem.am);
+            goto done;
+         case Xrm_Reg:
+            p = doAMode_R_enc_reg(p, subopc, i->Xin.Div.src->Xrm.Reg.reg);
+            goto done;
+         default:
+            goto bad;
+      }
+      break;
+
+   case Xin_Sh3232:
+      vassert(i->Xin.Sh3232.op == Xsh_SHL || i->Xin.Sh3232.op == Xsh_SHR);
+      if (i->Xin.Sh3232.amt == 0) {
+         /* shldl/shrdl by %cl */
+         *p++ = 0x0F;
+         if (i->Xin.Sh3232.op == Xsh_SHL) {
+            *p++ = 0xA5;
+         } else {
+            *p++ = 0xAD;
+         }
+         p = doAMode_R(p, i->Xin.Sh3232.src, i->Xin.Sh3232.dst);
+         goto done;
+      }
+      break;
+
+   case Xin_Push:
+      switch (i->Xin.Push.src->tag) {
+         case Xrmi_Mem: 
+            *p++ = 0xFF;
+            p = doAMode_M_enc(p, 6, i->Xin.Push.src->Xrmi.Mem.am);
+            goto done;
+         case Xrmi_Imm:
+            *p++ = 0x68;
+            p = emit32(p, i->Xin.Push.src->Xrmi.Imm.imm32);
+            goto done;
+         case Xrmi_Reg:
+            *p++ = toUChar(0x50 + iregEnc(i->Xin.Push.src->Xrmi.Reg.reg));
+            goto done;
+        default: 
+            goto bad;
+      }
+
+   case Xin_Call:
+      if (i->Xin.Call.cond != Xcc_ALWAYS
+          && i->Xin.Call.rloc.pri != RLPri_None) {
+         /* The call might not happen (it isn't unconditional) and it
+            returns a result.  In this case we will need to generate a
+            control flow diamond to put 0x555..555 in the return
+            register(s) in the case where the call doesn't happen.  If
+            this ever becomes necessary, maybe copy code from the ARM
+            equivalent.  Until that day, just give up. */
+         goto bad;
+      }
+      /* See detailed comment for Xin_Call in getRegUsage_X86Instr above
+         for explanation of this. */
+      switch (i->Xin.Call.regparms) {
+         case 0: irno = iregEnc(hregX86_EAX()); break;
+         case 1: irno = iregEnc(hregX86_EDX()); break;
+         case 2: irno = iregEnc(hregX86_ECX()); break;
+         case 3: irno = iregEnc(hregX86_EDI()); break;
+         default: vpanic(" emit_X86Instr:call:regparms");
+      }
+      /* jump over the following two insns if the condition does not
+         hold */
+      if (i->Xin.Call.cond != Xcc_ALWAYS) {
+         *p++ = toUChar(0x70 + (0xF & (i->Xin.Call.cond ^ 1)));
+         *p++ = 0x07; /* 7 bytes in the next two insns */
+      }
+      /* movl $target, %tmp */
+      *p++ = toUChar(0xB8 + irno);
+      p = emit32(p, i->Xin.Call.target);
+      /* call *%tmp */
+      *p++ = 0xFF;
+      *p++ = toUChar(0xD0 + irno);
+      goto done;
+
+   case Xin_XDirect: {
+      /* NB: what goes on here has to be very closely coordinated with the
+         chainXDirect_X86 and unchainXDirect_X86 below. */
+      /* We're generating chain-me requests here, so we need to be
+         sure this is actually allowed -- no-redir translations can't
+         use chain-me's.  Hence: */
+      vassert(disp_cp_chain_me_to_slowEP != NULL);
+      vassert(disp_cp_chain_me_to_fastEP != NULL);
+
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* First off, if this is conditional, create a conditional
+         jump over the rest of it. */
+      if (i->Xin.XDirect.cond != Xcc_ALWAYS) {
+         /* jmp fwds if !condition */
+         *p++ = toUChar(0x70 + (0xF & (i->Xin.XDirect.cond ^ 1)));
+         ptmp = p; /* fill in this bit later */
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+      }
+
+      /* Update the guest EIP. */
+      /* movl $dstGA, amEIP */
+      *p++ = 0xC7;
+      p    = doAMode_M_enc(p, 0, i->Xin.XDirect.amEIP);
+      p    = emit32(p, i->Xin.XDirect.dstGA);
+
+      /* --- FIRST PATCHABLE BYTE follows --- */
+      /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're calling
+         to) backs up the return address, so as to find the address of
+         the first patchable byte.  So: don't change the length of the
+         two instructions below. */
+      /* movl $disp_cp_chain_me_to_{slow,fast}EP,%edx; */
+      *p++ = 0xBA;
+      const void* disp_cp_chain_me
+               = i->Xin.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP 
+                                         : disp_cp_chain_me_to_slowEP;
+      p = emit32(p, (UInt)(Addr)disp_cp_chain_me);
+      /* call *%edx */
+      *p++ = 0xFF;
+      *p++ = 0xD2;
+      /* --- END of PATCHABLE BYTES --- */
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Xin.XDirect.cond != Xcc_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta > 0 && delta < 40);
+         *ptmp = toUChar(delta-1);
+      }
+      goto done;
+   }
+
+   case Xin_XIndir: {
+      /* We're generating transfers that could lead indirectly to a
+         chain-me, so we need to be sure this is actually allowed --
+         no-redir translations are not allowed to reach normal
+         translations without going through the scheduler.  That means
+         no XDirects or XIndirs out from no-redir translations.
+         Hence: */
+      vassert(disp_cp_xindir != NULL);
+
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* First off, if this is conditional, create a conditional
+         jump over the rest of it. */
+      if (i->Xin.XIndir.cond != Xcc_ALWAYS) {
+         /* jmp fwds if !condition */
+         *p++ = toUChar(0x70 + (0xF & (i->Xin.XIndir.cond ^ 1)));
+         ptmp = p; /* fill in this bit later */
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+      }
+
+      /* movl dstGA(a reg), amEIP -- copied from Alu32M MOV case */
+      *p++ = 0x89;
+      p = doAMode_M(p, i->Xin.XIndir.dstGA, i->Xin.XIndir.amEIP);
+
+      /* movl $disp_indir, %edx */
+      *p++ = 0xBA;
+      p = emit32(p, (UInt)(Addr)disp_cp_xindir);
+      /* jmp *%edx */
+      *p++ = 0xFF;
+      *p++ = 0xE2;
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Xin.XIndir.cond != Xcc_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta > 0 && delta < 40);
+         *ptmp = toUChar(delta-1);
+      }
+      goto done;
+   }
+
+   case Xin_XAssisted: {
+      /* Use ptmp for backpatching conditional jumps. */
+      ptmp = NULL;
+
+      /* First off, if this is conditional, create a conditional
+         jump over the rest of it. */
+      if (i->Xin.XAssisted.cond != Xcc_ALWAYS) {
+         /* jmp fwds if !condition */
+         *p++ = toUChar(0x70 + (0xF & (i->Xin.XAssisted.cond ^ 1)));
+         ptmp = p; /* fill in this bit later */
+         *p++ = 0; /* # of bytes to jump over; don't know how many yet. */
+      }
+
+      /* movl dstGA(a reg), amEIP -- copied from Alu32M MOV case */
+      *p++ = 0x89;
+      p = doAMode_M(p, i->Xin.XIndir.dstGA, i->Xin.XIndir.amEIP);
+      /* movl $magic_number, %ebp. */
+      UInt trcval = 0;
+      switch (i->Xin.XAssisted.jk) {
+         case Ijk_ClientReq:    trcval = VEX_TRC_JMP_CLIENTREQ;    break;
+         case Ijk_Sys_syscall:  trcval = VEX_TRC_JMP_SYS_SYSCALL;  break;
+         case Ijk_Sys_int128:   trcval = VEX_TRC_JMP_SYS_INT128;   break;
+         case Ijk_Sys_int129:   trcval = VEX_TRC_JMP_SYS_INT129;   break;
+         case Ijk_Sys_int130:   trcval = VEX_TRC_JMP_SYS_INT130;   break;
+         case Ijk_Sys_sysenter: trcval = VEX_TRC_JMP_SYS_SYSENTER; break;
+         case Ijk_Yield:        trcval = VEX_TRC_JMP_YIELD;        break;
+         case Ijk_EmWarn:       trcval = VEX_TRC_JMP_EMWARN;       break;
+         case Ijk_MapFail:      trcval = VEX_TRC_JMP_MAPFAIL;      break;
+         case Ijk_NoDecode:     trcval = VEX_TRC_JMP_NODECODE;     break;
+         case Ijk_InvalICache:  trcval = VEX_TRC_JMP_INVALICACHE;  break;
+         case Ijk_NoRedir:      trcval = VEX_TRC_JMP_NOREDIR;      break;
+         case Ijk_SigTRAP:      trcval = VEX_TRC_JMP_SIGTRAP;      break;
+         case Ijk_SigSEGV:      trcval = VEX_TRC_JMP_SIGSEGV;      break;
+         case Ijk_Boring:       trcval = VEX_TRC_JMP_BORING;       break;
+         /* We don't expect to see the following being assisted. */
+         case Ijk_Ret:
+         case Ijk_Call:
+         /* fallthrough */
+         default: 
+            ppIRJumpKind(i->Xin.XAssisted.jk);
+            vpanic("emit_X86Instr.Xin_XAssisted: unexpected jump kind");
+      }
+      vassert(trcval != 0);
+      *p++ = 0xBD;
+      p = emit32(p, trcval);
+
+      /* movl $disp_indir, %edx */
+      *p++ = 0xBA;
+      p = emit32(p, (UInt)(Addr)disp_cp_xassisted);
+      /* jmp *%edx */
+      *p++ = 0xFF;
+      *p++ = 0xE2;
+
+      /* Fix up the conditional jump, if there was one. */
+      if (i->Xin.XAssisted.cond != Xcc_ALWAYS) {
+         Int delta = p - ptmp;
+         vassert(delta > 0 && delta < 40);
+         *ptmp = toUChar(delta-1);
+      }
+      goto done;
+   }
+
+   case Xin_CMov32:
+      vassert(i->Xin.CMov32.cond != Xcc_ALWAYS);
+
+      /* This generates cmov, which is illegal on P54/P55. */
+      /*
+      *p++ = 0x0F;
+      *p++ = toUChar(0x40 + (0xF & i->Xin.CMov32.cond));
+      if (i->Xin.CMov32.src->tag == Xrm_Reg) {
+         p = doAMode_R(p, i->Xin.CMov32.dst, i->Xin.CMov32.src->Xrm.Reg.reg);
+         goto done;
+      }
+      if (i->Xin.CMov32.src->tag == Xrm_Mem) {
+         p = doAMode_M(p, i->Xin.CMov32.dst, i->Xin.CMov32.src->Xrm.Mem.am);
+         goto done;
+      }
+      */
+
+      /* Alternative version which works on any x86 variant. */
+      /* jmp fwds if !condition */
+      *p++ = toUChar(0x70 + (i->Xin.CMov32.cond ^ 1));
+      *p++ = 0; /* # of bytes in the next bit, which we don't know yet */
+      ptmp = p;
+
+      switch (i->Xin.CMov32.src->tag) {
+         case Xrm_Reg:
+            /* Big sigh.  This is movl E -> G ... */
+            *p++ = 0x89;
+            p = doAMode_R(p, i->Xin.CMov32.src->Xrm.Reg.reg,
+                             i->Xin.CMov32.dst);
+
+            break;
+         case Xrm_Mem:
+            /* ... whereas this is movl G -> E.  That's why the args
+               to doAMode_R appear to be the wrong way round in the
+               Xrm_Reg case. */
+            *p++ = 0x8B;
+            p = doAMode_M(p, i->Xin.CMov32.dst,
+                             i->Xin.CMov32.src->Xrm.Mem.am);
+            break;
+         default:
+            goto bad;
+      }
+      /* Fill in the jump offset. */
+      *(ptmp-1) = toUChar(p - ptmp);
+      goto done;
+
+      break;
+
+   case Xin_LoadEX:
+      if (i->Xin.LoadEX.szSmall == 1 && !i->Xin.LoadEX.syned) {
+         /* movzbl */
+         *p++ = 0x0F;
+         *p++ = 0xB6;
+         p = doAMode_M(p, i->Xin.LoadEX.dst, i->Xin.LoadEX.src); 
+         goto done;
+      }
+      if (i->Xin.LoadEX.szSmall == 2 && !i->Xin.LoadEX.syned) {
+         /* movzwl */
+         *p++ = 0x0F;
+         *p++ = 0xB7;
+         p = doAMode_M(p, i->Xin.LoadEX.dst, i->Xin.LoadEX.src); 
+         goto done;
+      }
+      if (i->Xin.LoadEX.szSmall == 1 && i->Xin.LoadEX.syned) {
+         /* movsbl */
+         *p++ = 0x0F;
+         *p++ = 0xBE;
+         p = doAMode_M(p, i->Xin.LoadEX.dst, i->Xin.LoadEX.src); 
+         goto done;
+      }
+      break;
+
+   case Xin_Set32:
+      /* Make the destination register be 1 or 0, depending on whether
+         the relevant condition holds.  We have to dodge and weave
+         when the destination is %esi or %edi as we cannot directly
+         emit the native 'setb %reg' for those.  Further complication:
+         the top 24 bits of the destination should be forced to zero,
+         but doing 'xor %r,%r' kills the flag(s) we are about to read.
+         Sigh.  So start off my moving $0 into the dest. */
+
+      /* Do we need to swap in %eax? */
+      if (iregEnc(i->Xin.Set32.dst) >= 4) {
+         /* xchg %eax, %dst */
+         *p++ = toUChar(0x90 + iregEnc(i->Xin.Set32.dst));
+         /* movl $0, %eax */
+         *p++ =toUChar(0xB8 + iregEnc(hregX86_EAX()));
+         p = emit32(p, 0);
+         /* setb lo8(%eax) */
+         *p++ = 0x0F; 
+         *p++ = toUChar(0x90 + (0xF & i->Xin.Set32.cond));
+         p = doAMode_R_enc_reg(p, 0, hregX86_EAX());
+         /* xchg %eax, %dst */
+         *p++ = toUChar(0x90 + iregEnc(i->Xin.Set32.dst));
+      } else {
+         /* movl $0, %dst */
+         *p++ = toUChar(0xB8 + iregEnc(i->Xin.Set32.dst));
+         p = emit32(p, 0);
+         /* setb lo8(%dst) */
+         *p++ = 0x0F; 
+         *p++ = toUChar(0x90 + (0xF & i->Xin.Set32.cond));
+         p = doAMode_R_enc_reg(p, 0, i->Xin.Set32.dst);
+      }
+      goto done;
+
+   case Xin_Bsfr32:
+      *p++ = 0x0F;
+      if (i->Xin.Bsfr32.isFwds) {
+         *p++ = 0xBC;
+      } else {
+         *p++ = 0xBD;
+      }
+      p = doAMode_R(p, i->Xin.Bsfr32.dst, i->Xin.Bsfr32.src);
+      goto done;
+
+   case Xin_MFence:
+      /* see comment in hdefs.h re this insn */
+      if (0) vex_printf("EMIT FENCE\n");
+      if (i->Xin.MFence.hwcaps & (VEX_HWCAPS_X86_SSE3
+                                  |VEX_HWCAPS_X86_SSE2)) {
+         /* mfence */
+         *p++ = 0x0F; *p++ = 0xAE; *p++ = 0xF0;
+         goto done;
+      }
+      if (i->Xin.MFence.hwcaps & VEX_HWCAPS_X86_MMXEXT) {
+         /* sfence */
+         *p++ = 0x0F; *p++ = 0xAE; *p++ = 0xF8;
+         /* lock addl $0,0(%esp) */
+         *p++ = 0xF0; *p++ = 0x83; *p++ = 0x44; 
+         *p++ = 0x24; *p++ = 0x00; *p++ = 0x00;
+         goto done;
+      }
+      if (i->Xin.MFence.hwcaps == 0/*baseline, no SSE*/) {
+         /* lock addl $0,0(%esp) */
+         *p++ = 0xF0; *p++ = 0x83; *p++ = 0x44; 
+         *p++ = 0x24; *p++ = 0x00; *p++ = 0x00;
+         goto done;
+      }
+      vpanic("emit_X86Instr:mfence:hwcaps");
+      /*NOTREACHED*/
+      break;
+
+   case Xin_ACAS:
+      /* lock */
+      *p++ = 0xF0;
+      /* cmpxchg{b,w,l} %ebx,mem.  Expected-value in %eax, new value
+         in %ebx.  The new-value register is hardwired to be %ebx
+         since letting it be any integer register gives the problem
+         that %sil and %dil are unaddressible on x86 and hence we
+         would have to resort to the same kind of trickery as with
+         byte-sized Xin.Store, just below.  Given that this isn't
+         performance critical, it is simpler just to force the
+         register operand to %ebx (could equally be %ecx or %edx).
+         (Although %ebx is more consistent with cmpxchg8b.) */
+      if (i->Xin.ACAS.sz == 2) *p++ = 0x66; 
+      *p++ = 0x0F;
+      if (i->Xin.ACAS.sz == 1) *p++ = 0xB0; else *p++ = 0xB1;
+      p = doAMode_M(p, hregX86_EBX(), i->Xin.ACAS.addr);
+      goto done;
+
+   case Xin_DACAS:
+      /* lock */
+      *p++ = 0xF0;
+      /* cmpxchg8b m64.  Expected-value in %edx:%eax, new value
+         in %ecx:%ebx.  All 4 regs are hardwired in the ISA, so
+         aren't encoded in the insn. */
+      *p++ = 0x0F;
+      *p++ = 0xC7;
+      p = doAMode_M_enc(p, 1, i->Xin.DACAS.addr);
+      goto done;
+
+   case Xin_Store:
+      if (i->Xin.Store.sz == 2) {
+         /* This case, at least, is simple, given that we can
+            reference the low 16 bits of any integer register. */
+         *p++ = 0x66;
+         *p++ = 0x89;
+         p = doAMode_M(p, i->Xin.Store.src, i->Xin.Store.dst);
+         goto done;
+      }
+
+      if (i->Xin.Store.sz == 1) {
+         /* We have to do complex dodging and weaving if src is not
+            the low 8 bits of %eax/%ebx/%ecx/%edx. */
+         if (iregEnc(i->Xin.Store.src) < 4) {
+            /* we're OK, can do it directly */
+            *p++ = 0x88;
+            p = doAMode_M(p, i->Xin.Store.src, i->Xin.Store.dst);
+           goto done;
+         } else {
+            /* Bleh.  This means the source is %edi or %esi.  Since
+               the address mode can only mention three registers, at
+               least one of %eax/%ebx/%ecx/%edx must be available to
+               temporarily swap the source into, so the store can
+               happen.  So we have to look at the regs mentioned
+               in the amode. */
+            HReg swap = INVALID_HREG;
+            HReg  eax = hregX86_EAX(), ebx = hregX86_EBX(), 
+                  ecx = hregX86_ECX(), edx = hregX86_EDX();
+            HRegUsage u;
+            initHRegUsage(&u);
+            addRegUsage_X86AMode(&u, i->Xin.Store.dst);
+            /**/ if (! HRegUsage__contains(&u, eax)) { swap = eax; }
+            else if (! HRegUsage__contains(&u, ebx)) { swap = ebx; }
+            else if (! HRegUsage__contains(&u, ecx)) { swap = ecx; }
+            else if (! HRegUsage__contains(&u, edx)) { swap = edx; }
+            vassert(! hregIsInvalid(swap));
+            /* xchgl %source, %swap. Could do better if swap is %eax. */
+            *p++ = 0x87;
+            p = doAMode_R(p, i->Xin.Store.src, swap);
+            /* movb lo8{%swap}, (dst) */
+            *p++ = 0x88;
+            p = doAMode_M(p, swap, i->Xin.Store.dst);
+            /* xchgl %source, %swap. Could do better if swap is %eax. */
+            *p++ = 0x87;
+            p = doAMode_R(p, i->Xin.Store.src, swap);
+            goto done;
+         }
+      } /* if (i->Xin.Store.sz == 1) */
+      break;
+
+   case Xin_FpUnary:
+      /* gop %src, %dst
+         --> ffree %st7 ; fld %st(src) ; fop %st(0) ; fstp %st(1+dst)
+      */
+      p = do_ffree_st7(p);
+      p = do_fld_st(p, 0+fregEnc(i->Xin.FpUnary.src));
+      p = do_fop1_st(p, i->Xin.FpUnary.op);
+      p = do_fstp_st(p, 1+fregEnc(i->Xin.FpUnary.dst));
+      goto done;
+
+   case Xin_FpBinary:
+      if (i->Xin.FpBinary.op == Xfp_YL2X
+          || i->Xin.FpBinary.op == Xfp_YL2XP1) {
+         /* Have to do this specially. */
+         /* ffree %st7 ; fld %st(srcL) ; 
+            ffree %st7 ; fld %st(srcR+1) ; fyl2x{p1} ; fstp(1+dst) */
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 0+fregEnc(i->Xin.FpBinary.srcL));
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 1+fregEnc(i->Xin.FpBinary.srcR));
+         *p++ = 0xD9; 
+         *p++ = toUChar(i->Xin.FpBinary.op==Xfp_YL2X ? 0xF1 : 0xF9);
+         p = do_fstp_st(p, 1+fregEnc(i->Xin.FpBinary.dst));
+         goto done;
+      }
+      if (i->Xin.FpBinary.op == Xfp_ATAN) {
+         /* Have to do this specially. */
+         /* ffree %st7 ; fld %st(srcL) ; 
+            ffree %st7 ; fld %st(srcR+1) ; fpatan ; fstp(1+dst) */
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 0+fregEnc(i->Xin.FpBinary.srcL));
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 1+fregEnc(i->Xin.FpBinary.srcR));
+         *p++ = 0xD9; *p++ = 0xF3;
+         p = do_fstp_st(p, 1+fregEnc(i->Xin.FpBinary.dst));
+         goto done;
+      }
+      if (i->Xin.FpBinary.op == Xfp_PREM
+          || i->Xin.FpBinary.op == Xfp_PREM1
+          || i->Xin.FpBinary.op == Xfp_SCALE) {
+         /* Have to do this specially. */
+         /* ffree %st7 ; fld %st(srcR) ; 
+            ffree %st7 ; fld %st(srcL+1) ; fprem/fprem1/fscale ; fstp(2+dst) ; 
+            fincstp ; ffree %st7 */
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 0+fregEnc(i->Xin.FpBinary.srcR));
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 1+fregEnc(i->Xin.FpBinary.srcL));
+         *p++ = 0xD9;
+         switch (i->Xin.FpBinary.op) {
+            case Xfp_PREM: *p++ = 0xF8; break;
+            case Xfp_PREM1: *p++ = 0xF5; break;
+            case Xfp_SCALE: *p++ =  0xFD; break;
+            default: vpanic("emitX86Instr(FpBinary,PREM/PREM1/SCALE)");
+         }
+         p = do_fstp_st(p, 2+fregEnc(i->Xin.FpBinary.dst));
+         *p++ = 0xD9; *p++ = 0xF7;
+         p = do_ffree_st7(p);
+         goto done;
+      }
+      /* General case */
+      /* gop %srcL, %srcR, %dst
+         --> ffree %st7 ; fld %st(srcL) ; fop %st(1+srcR) ; fstp %st(1+dst)
+      */
+      p = do_ffree_st7(p);
+      p = do_fld_st(p, 0+fregEnc(i->Xin.FpBinary.srcL));
+      p = do_fop2_st(p, i->Xin.FpBinary.op, 
+                        1+fregEnc(i->Xin.FpBinary.srcR));
+      p = do_fstp_st(p, 1+fregEnc(i->Xin.FpBinary.dst));
+      goto done;
+
+   case Xin_FpLdSt:
+      if (i->Xin.FpLdSt.isLoad) {
+         /* Load from memory into %fakeN.  
+            --> ffree %st(7) ; fld{s/l/t} amode ; fstp st(N+1) 
+         */
+         p = do_ffree_st7(p);
+         switch (i->Xin.FpLdSt.sz) {
+            case 4:
+               *p++ = 0xD9;
+               p = doAMode_M_enc(p, 0/*subopcode*/, i->Xin.FpLdSt.addr);
+               break;
+            case 8:
+               *p++ = 0xDD;
+               p = doAMode_M_enc(p, 0/*subopcode*/, i->Xin.FpLdSt.addr);
+               break;
+            case 10:
+               *p++ = 0xDB;
+               p = doAMode_M_enc(p, 5/*subopcode*/, i->Xin.FpLdSt.addr);
+               break;
+            default:
+               vpanic("emitX86Instr(FpLdSt,load)");
+         }
+         p = do_fstp_st(p, 1+fregEnc(i->Xin.FpLdSt.reg));
+         goto done;
+      } else {
+         /* Store from %fakeN into memory.
+            --> ffree %st(7) ; fld st(N) ; fstp{l|s} amode
+	 */
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 0+fregEnc(i->Xin.FpLdSt.reg));
+         switch (i->Xin.FpLdSt.sz) {
+            case 4:
+               *p++ = 0xD9;
+               p = doAMode_M_enc(p, 3/*subopcode*/, i->Xin.FpLdSt.addr);
+               break;
+            case 8:
+               *p++ = 0xDD;
+               p = doAMode_M_enc(p, 3/*subopcode*/, i->Xin.FpLdSt.addr);
+               break;
+            case 10:
+               *p++ = 0xDB;
+               p = doAMode_M_enc(p, 7/*subopcode*/, i->Xin.FpLdSt.addr);
+               break;
+            default:
+               vpanic("emitX86Instr(FpLdSt,store)");
+         }
+         goto done;
+      }
+      break;
+
+   case Xin_FpLdStI:
+      if (i->Xin.FpLdStI.isLoad) {
+         /* Load from memory into %fakeN, converting from an int.  
+            --> ffree %st(7) ; fild{w/l/ll} amode ; fstp st(N+1) 
+         */
+         switch (i->Xin.FpLdStI.sz) {
+            case 8:  opc = 0xDF; subopc_imm = 5; break;
+            case 4:  opc = 0xDB; subopc_imm = 0; break;
+            case 2:  vassert(0); opc = 0xDF; subopc_imm = 0; break;
+            default: vpanic("emitX86Instr(Xin_FpLdStI-load)");
+         }
+         p = do_ffree_st7(p);
+         *p++ = toUChar(opc);
+         p = doAMode_M_enc(p, subopc_imm/*subopcode*/, i->Xin.FpLdStI.addr);
+         p = do_fstp_st(p, 1+fregEnc(i->Xin.FpLdStI.reg));
+         goto done;
+      } else {
+         /* Store from %fakeN into memory, converting to an int.
+            --> ffree %st(7) ; fld st(N) ; fistp{w/l/ll} amode
+	 */
+         switch (i->Xin.FpLdStI.sz) {
+            case 8:  opc = 0xDF; subopc_imm = 7; break;
+            case 4:  opc = 0xDB; subopc_imm = 3; break;
+            case 2:  opc = 0xDF; subopc_imm = 3; break;
+            default: vpanic("emitX86Instr(Xin_FpLdStI-store)");
+         }
+         p = do_ffree_st7(p);
+         p = do_fld_st(p, 0+fregEnc(i->Xin.FpLdStI.reg));
+         *p++ = toUChar(opc);
+         p = doAMode_M_enc(p, subopc_imm/*subopcode*/, i->Xin.FpLdStI.addr);
+         goto done;
+      }
+      break;
+
+   case Xin_Fp64to32:
+      /* ffree %st7 ; fld %st(src) */
+      p = do_ffree_st7(p);
+      p = do_fld_st(p, 0+fregEnc(i->Xin.Fp64to32.src));
+      /* subl $4, %esp */
+      *p++ = 0x83; *p++ = 0xEC; *p++ = 0x04;
+      /* fstps (%esp) */
+      *p++ = 0xD9; *p++ = 0x1C; *p++ = 0x24;
+      /* flds (%esp) */
+      *p++ = 0xD9; *p++ = 0x04; *p++ = 0x24;
+      /* addl $4, %esp */
+      *p++ = 0x83; *p++ = 0xC4; *p++ = 0x04;
+      /* fstp %st(1+dst) */
+      p = do_fstp_st(p, 1+fregEnc(i->Xin.Fp64to32.dst));
+      goto done;
+
+   case Xin_FpCMov:
+      /* jmp fwds if !condition */
+      *p++ = toUChar(0x70 + (i->Xin.FpCMov.cond ^ 1));
+      *p++ = 0; /* # of bytes in the next bit, which we don't know yet */
+      ptmp = p;
+
+      /* ffree %st7 ; fld %st(src) ; fstp %st(1+dst) */
+      p = do_ffree_st7(p);
+      p = do_fld_st(p, 0+fregEnc(i->Xin.FpCMov.src));
+      p = do_fstp_st(p, 1+fregEnc(i->Xin.FpCMov.dst));
+
+      /* Fill in the jump offset. */
+      *(ptmp-1) = toUChar(p - ptmp);
+      goto done;
+
+   case Xin_FpLdCW:
+      *p++ = 0xD9;
+      p = doAMode_M_enc(p, 5/*subopcode*/, i->Xin.FpLdCW.addr);
+      goto done;
+
+   case Xin_FpStSW_AX:
+      /* note, this emits fnstsw %ax, not fstsw %ax */
+      *p++ = 0xDF;
+      *p++ = 0xE0;
+      goto done;
+
+   case Xin_FpCmp:
+      /* gcmp %fL, %fR, %dst
+         -> ffree %st7; fpush %fL ; fucomp %(fR+1) ; 
+            fnstsw %ax ; movl %eax, %dst 
+      */
+      /* ffree %st7 */
+      p = do_ffree_st7(p);
+      /* fpush %fL */
+      p = do_fld_st(p, 0+fregEnc(i->Xin.FpCmp.srcL));
+      /* fucomp %(fR+1) */
+      *p++ = 0xDD;
+      *p++ = toUChar(0xE8 + (7 & (1+fregEnc(i->Xin.FpCmp.srcR))));
+      /* fnstsw %ax */
+      *p++ = 0xDF;
+      *p++ = 0xE0;
+      /*  movl %eax, %dst */
+      *p++ = 0x89;
+      p = doAMode_R(p, hregX86_EAX(), i->Xin.FpCmp.dst);
+      goto done;
+
+   case Xin_SseConst: {
+      UShort con = i->Xin.SseConst.con;
+      p = push_word_from_tags(p, toUShort((con >> 12) & 0xF));
+      p = push_word_from_tags(p, toUShort((con >> 8) & 0xF));
+      p = push_word_from_tags(p, toUShort((con >> 4) & 0xF));
+      p = push_word_from_tags(p, toUShort(con & 0xF));
+      /* movl (%esp), %xmm-dst */
+      *p++ = 0x0F;
+      *p++ = 0x10;
+      *p++ = toUChar(0x04 + 8 * (7 & vregEnc(i->Xin.SseConst.dst)));
+      *p++ = 0x24;
+      /* addl $16, %esp */
+      *p++ = 0x83;
+      *p++ = 0xC4;
+      *p++ = 0x10;
+      goto done;
+   }
+
+   case Xin_SseLdSt:
+      *p++ = 0x0F; 
+      *p++ = toUChar(i->Xin.SseLdSt.isLoad ? 0x10 : 0x11);
+      p = doAMode_M_enc(p, vregEnc(i->Xin.SseLdSt.reg), i->Xin.SseLdSt.addr);
+      goto done;
+
+   case Xin_SseLdzLO:
+      vassert(i->Xin.SseLdzLO.sz == 4 || i->Xin.SseLdzLO.sz == 8);
+      /* movs[sd] amode, %xmm-dst */
+      *p++ = toUChar(i->Xin.SseLdzLO.sz==4 ? 0xF3 : 0xF2);
+      *p++ = 0x0F; 
+      *p++ = 0x10; 
+      p = doAMode_M_enc(p, vregEnc(i->Xin.SseLdzLO.reg), i->Xin.SseLdzLO.addr);
+      goto done;
+
+   case Xin_Sse32Fx4:
+      xtra = 0;
+      *p++ = 0x0F;
+      switch (i->Xin.Sse32Fx4.op) {
+         case Xsse_ADDF:   *p++ = 0x58; break;
+         case Xsse_DIVF:   *p++ = 0x5E; break;
+         case Xsse_MAXF:   *p++ = 0x5F; break;
+         case Xsse_MINF:   *p++ = 0x5D; break;
+         case Xsse_MULF:   *p++ = 0x59; break;
+         case Xsse_RCPF:   *p++ = 0x53; break;
+         case Xsse_RSQRTF: *p++ = 0x52; break;
+         case Xsse_SQRTF:  *p++ = 0x51; break;
+         case Xsse_SUBF:   *p++ = 0x5C; break;
+         case Xsse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Xsse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Xsse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Xsse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc(i->Xin.Sse32Fx4.dst),
+                               vregEnc(i->Xin.Sse32Fx4.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Xin_Sse64Fx2:
+      xtra = 0;
+      *p++ = 0x66;
+      *p++ = 0x0F;
+      switch (i->Xin.Sse64Fx2.op) {
+         case Xsse_ADDF:   *p++ = 0x58; break;
+         case Xsse_DIVF:   *p++ = 0x5E; break;
+         case Xsse_MAXF:   *p++ = 0x5F; break;
+         case Xsse_MINF:   *p++ = 0x5D; break;
+         case Xsse_MULF:   *p++ = 0x59; break;
+         case Xsse_RCPF:   *p++ = 0x53; break;
+         case Xsse_RSQRTF: *p++ = 0x52; break;
+         case Xsse_SQRTF:  *p++ = 0x51; break;
+         case Xsse_SUBF:   *p++ = 0x5C; break;
+         case Xsse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Xsse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Xsse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Xsse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc(i->Xin.Sse64Fx2.dst),
+                               vregEnc(i->Xin.Sse64Fx2.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Xin_Sse32FLo:
+      xtra = 0;
+      *p++ = 0xF3;
+      *p++ = 0x0F;
+      switch (i->Xin.Sse32FLo.op) {
+         case Xsse_ADDF:   *p++ = 0x58; break;
+         case Xsse_DIVF:   *p++ = 0x5E; break;
+         case Xsse_MAXF:   *p++ = 0x5F; break;
+         case Xsse_MINF:   *p++ = 0x5D; break;
+         case Xsse_MULF:   *p++ = 0x59; break;
+         case Xsse_RCPF:   *p++ = 0x53; break;
+         case Xsse_RSQRTF: *p++ = 0x52; break;
+         case Xsse_SQRTF:  *p++ = 0x51; break;
+         case Xsse_SUBF:   *p++ = 0x5C; break;
+         case Xsse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Xsse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Xsse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Xsse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc(i->Xin.Sse32FLo.dst),
+                               vregEnc(i->Xin.Sse32FLo.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Xin_Sse64FLo:
+      xtra = 0;
+      *p++ = 0xF2;
+      *p++ = 0x0F;
+      switch (i->Xin.Sse64FLo.op) {
+         case Xsse_ADDF:   *p++ = 0x58; break;
+         case Xsse_DIVF:   *p++ = 0x5E; break;
+         case Xsse_MAXF:   *p++ = 0x5F; break;
+         case Xsse_MINF:   *p++ = 0x5D; break;
+         case Xsse_MULF:   *p++ = 0x59; break;
+         case Xsse_RCPF:   *p++ = 0x53; break;
+         case Xsse_RSQRTF: *p++ = 0x52; break;
+         case Xsse_SQRTF:  *p++ = 0x51; break;
+         case Xsse_SUBF:   *p++ = 0x5C; break;
+         case Xsse_CMPEQF: *p++ = 0xC2; xtra = 0x100; break;
+         case Xsse_CMPLTF: *p++ = 0xC2; xtra = 0x101; break;
+         case Xsse_CMPLEF: *p++ = 0xC2; xtra = 0x102; break;
+         case Xsse_CMPUNF: *p++ = 0xC2; xtra = 0x103; break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc(i->Xin.Sse64FLo.dst),
+                               vregEnc(i->Xin.Sse64FLo.src) );
+      if (xtra & 0x100)
+         *p++ = toUChar(xtra & 0xFF);
+      goto done;
+
+   case Xin_SseReRg:
+#     define XX(_n) *p++ = (_n)
+      switch (i->Xin.SseReRg.op) {
+         case Xsse_MOV:     /*movups*/ XX(0x0F); XX(0x10); break;
+         case Xsse_OR:                 XX(0x0F); XX(0x56); break;
+         case Xsse_XOR:                XX(0x0F); XX(0x57); break;
+         case Xsse_AND:                XX(0x0F); XX(0x54); break;
+         case Xsse_PACKSSD:  XX(0x66); XX(0x0F); XX(0x6B); break;
+         case Xsse_PACKSSW:  XX(0x66); XX(0x0F); XX(0x63); break;
+         case Xsse_PACKUSW:  XX(0x66); XX(0x0F); XX(0x67); break;
+         case Xsse_ADD8:     XX(0x66); XX(0x0F); XX(0xFC); break;
+         case Xsse_ADD16:    XX(0x66); XX(0x0F); XX(0xFD); break;
+         case Xsse_ADD32:    XX(0x66); XX(0x0F); XX(0xFE); break;
+         case Xsse_ADD64:    XX(0x66); XX(0x0F); XX(0xD4); break;
+         case Xsse_QADD8S:   XX(0x66); XX(0x0F); XX(0xEC); break;
+         case Xsse_QADD16S:  XX(0x66); XX(0x0F); XX(0xED); break;
+         case Xsse_QADD8U:   XX(0x66); XX(0x0F); XX(0xDC); break;
+         case Xsse_QADD16U:  XX(0x66); XX(0x0F); XX(0xDD); break;
+         case Xsse_AVG8U:    XX(0x66); XX(0x0F); XX(0xE0); break;
+         case Xsse_AVG16U:   XX(0x66); XX(0x0F); XX(0xE3); break;
+         case Xsse_CMPEQ8:   XX(0x66); XX(0x0F); XX(0x74); break;
+         case Xsse_CMPEQ16:  XX(0x66); XX(0x0F); XX(0x75); break;
+         case Xsse_CMPEQ32:  XX(0x66); XX(0x0F); XX(0x76); break;
+         case Xsse_CMPGT8S:  XX(0x66); XX(0x0F); XX(0x64); break;
+         case Xsse_CMPGT16S: XX(0x66); XX(0x0F); XX(0x65); break;
+         case Xsse_CMPGT32S: XX(0x66); XX(0x0F); XX(0x66); break;
+         case Xsse_MAX16S:   XX(0x66); XX(0x0F); XX(0xEE); break;
+         case Xsse_MAX8U:    XX(0x66); XX(0x0F); XX(0xDE); break;
+         case Xsse_MIN16S:   XX(0x66); XX(0x0F); XX(0xEA); break;
+         case Xsse_MIN8U:    XX(0x66); XX(0x0F); XX(0xDA); break;
+         case Xsse_MULHI16U: XX(0x66); XX(0x0F); XX(0xE4); break;
+         case Xsse_MULHI16S: XX(0x66); XX(0x0F); XX(0xE5); break;
+         case Xsse_MUL16:    XX(0x66); XX(0x0F); XX(0xD5); break;
+         case Xsse_SHL16:    XX(0x66); XX(0x0F); XX(0xF1); break;
+         case Xsse_SHL32:    XX(0x66); XX(0x0F); XX(0xF2); break;
+         case Xsse_SHL64:    XX(0x66); XX(0x0F); XX(0xF3); break;
+         case Xsse_SAR16:    XX(0x66); XX(0x0F); XX(0xE1); break;
+         case Xsse_SAR32:    XX(0x66); XX(0x0F); XX(0xE2); break;
+         case Xsse_SHR16:    XX(0x66); XX(0x0F); XX(0xD1); break;
+         case Xsse_SHR32:    XX(0x66); XX(0x0F); XX(0xD2); break;
+         case Xsse_SHR64:    XX(0x66); XX(0x0F); XX(0xD3); break;
+         case Xsse_SUB8:     XX(0x66); XX(0x0F); XX(0xF8); break;
+         case Xsse_SUB16:    XX(0x66); XX(0x0F); XX(0xF9); break;
+         case Xsse_SUB32:    XX(0x66); XX(0x0F); XX(0xFA); break;
+         case Xsse_SUB64:    XX(0x66); XX(0x0F); XX(0xFB); break;
+         case Xsse_QSUB8S:   XX(0x66); XX(0x0F); XX(0xE8); break;
+         case Xsse_QSUB16S:  XX(0x66); XX(0x0F); XX(0xE9); break;
+         case Xsse_QSUB8U:   XX(0x66); XX(0x0F); XX(0xD8); break;
+         case Xsse_QSUB16U:  XX(0x66); XX(0x0F); XX(0xD9); break;
+         case Xsse_UNPCKHB:  XX(0x66); XX(0x0F); XX(0x68); break;
+         case Xsse_UNPCKHW:  XX(0x66); XX(0x0F); XX(0x69); break;
+         case Xsse_UNPCKHD:  XX(0x66); XX(0x0F); XX(0x6A); break;
+         case Xsse_UNPCKHQ:  XX(0x66); XX(0x0F); XX(0x6D); break;
+         case Xsse_UNPCKLB:  XX(0x66); XX(0x0F); XX(0x60); break;
+         case Xsse_UNPCKLW:  XX(0x66); XX(0x0F); XX(0x61); break;
+         case Xsse_UNPCKLD:  XX(0x66); XX(0x0F); XX(0x62); break;
+         case Xsse_UNPCKLQ:  XX(0x66); XX(0x0F); XX(0x6C); break;
+         default: goto bad;
+      }
+      p = doAMode_R_enc_enc(p, vregEnc(i->Xin.SseReRg.dst),
+                               vregEnc(i->Xin.SseReRg.src) );
+#     undef XX
+      goto done;
+
+   case Xin_SseCMov:
+      /* jmp fwds if !condition */
+      *p++ = toUChar(0x70 + (i->Xin.SseCMov.cond ^ 1));
+      *p++ = 0; /* # of bytes in the next bit, which we don't know yet */
+      ptmp = p;
+
+      /* movaps %src, %dst */
+      *p++ = 0x0F; 
+      *p++ = 0x28; 
+      p = doAMode_R_enc_enc(p, vregEnc(i->Xin.SseCMov.dst),
+                               vregEnc(i->Xin.SseCMov.src) );
+
+      /* Fill in the jump offset. */
+      *(ptmp-1) = toUChar(p - ptmp);
+      goto done;
+
+   case Xin_SseShuf:
+      *p++ = 0x66; 
+      *p++ = 0x0F; 
+      *p++ = 0x70; 
+      p = doAMode_R_enc_enc(p, vregEnc(i->Xin.SseShuf.dst),
+                               vregEnc(i->Xin.SseShuf.src) );
+      *p++ = (UChar)(i->Xin.SseShuf.order);
+      goto done;
+
+   case Xin_EvCheck: {
+      /* We generate:
+            (3 bytes)  decl 4(%ebp)    4 == offsetof(host_EvC_COUNTER)
+            (2 bytes)  jns  nofail     expected taken
+            (3 bytes)  jmp* 0(%ebp)    0 == offsetof(host_EvC_FAILADDR)
+            nofail:
+      */
+      /* This is heavily asserted re instruction lengths.  It needs to
+         be.  If we get given unexpected forms of .amCounter or
+         .amFailAddr -- basically, anything that's not of the form
+         uimm7(%ebp) -- they are likely to fail. */
+      /* Note also that after the decl we must be very careful not to
+         read the carry flag, else we get a partial flags stall.
+         js/jns avoids that, though. */
+      UChar* p0 = p;
+      /* ---  decl 8(%ebp) --- */
+      /* "1" because + there's no register in this encoding;
+         instead the register + field is used as a sub opcode.  The
+         encoding for "decl r/m32" + is FF /1, hence the "1". */
+      *p++ = 0xFF;
+      p = doAMode_M_enc(p, 1, i->Xin.EvCheck.amCounter);
+      vassert(p - p0 == 3);
+      /* --- jns nofail --- */
+      *p++ = 0x79;
+      *p++ = 0x03; /* need to check this 0x03 after the next insn */
+      vassert(p - p0 == 5);
+      /* --- jmp* 0(%ebp) --- */
+      /* The encoding is FF /4. */
+      *p++ = 0xFF;
+      p = doAMode_M_enc(p, 4, i->Xin.EvCheck.amFailAddr);
+      vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
+      /* And crosscheck .. */
+      vassert(evCheckSzB_X86() == 8);
+      goto done;
+   }
+
+   case Xin_ProfInc: {
+      /* We generate   addl $1,NotKnownYet
+                       adcl $0,NotKnownYet+4
+         in the expectation that a later call to LibVEX_patchProfCtr
+         will be used to fill in the immediate fields once the right
+         value is known.
+           83 05  00 00 00 00  01
+           83 15  00 00 00 00  00
+      */
+      *p++ = 0x83; *p++ = 0x05;
+      *p++ = 0x00; *p++ = 0x00; *p++ = 0x00; *p++ = 0x00;
+      *p++ = 0x01;
+      *p++ = 0x83; *p++ = 0x15;
+      *p++ = 0x00; *p++ = 0x00; *p++ = 0x00; *p++ = 0x00;
+      *p++ = 0x00;
+      /* Tell the caller .. */
+      vassert(!(*is_profInc));
+      *is_profInc = True;
+      goto done;
+   }
+
+   default: 
+      goto bad;
+   }
+
+  bad:
+   ppX86Instr(i, mode64);
+   vpanic("emit_X86Instr");
+   /*NOTREACHED*/
+   
+  done:
+   vassert(p - &buf[0] <= 32);
+   return p - &buf[0];
+}
+
+
+/* How big is an event check?  See case for Xin_EvCheck in
+   emit_X86Instr just above.  That crosschecks what this returns, so
+   we can tell if we're inconsistent. */
+Int evCheckSzB_X86 (void)
+{
+   return 8;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+                                 void* place_to_chain,
+                                 const void* disp_cp_chain_me_EXPECTED,
+                                 const void* place_to_jump_to )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is:
+        movl $disp_cp_chain_me_EXPECTED, %edx
+        call *%edx
+      viz
+        BA <4 bytes value == disp_cp_chain_me_EXPECTED>
+        FF D2
+   */
+   UChar* p = (UChar*)place_to_chain;
+   vassert(p[0] == 0xBA);
+   vassert(*(UInt*)(&p[1]) == (UInt)(Addr)disp_cp_chain_me_EXPECTED);
+   vassert(p[5] == 0xFF);
+   vassert(p[6] == 0xD2);
+   /* And what we want to change it to is:
+          jmp disp32   where disp32 is relative to the next insn
+          ud2;
+        viz
+          E9 <4 bytes == disp32>
+          0F 0B
+      The replacement has the same length as the original.
+   */
+   /* This is the delta we need to put into a JMP d32 insn.  It's
+      relative to the start of the next insn, hence the -5.  */
+   Long delta = (Long)((const UChar *)place_to_jump_to - p) - 5;
+
+   /* And make the modifications. */
+   p[0] = 0xE9;
+   p[1] = (delta >> 0) & 0xFF;
+   p[2] = (delta >> 8) & 0xFF;
+   p[3] = (delta >> 16) & 0xFF;
+   p[4] = (delta >> 24) & 0xFF;
+   p[5] = 0x0F; p[6]  = 0x0B;
+   /* sanity check on the delta -- top 32 are all 0 or all 1 */
+   delta >>= 32;
+   vassert(delta == 0LL || delta == -1LL);
+   VexInvalRange vir = { (HWord)place_to_chain, 7 };
+   return vir;
+}
+
+
+/* NB: what goes on here has to be very closely coordinated with the
+   emitInstr case for XDirect, above. */
+VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+                                   void* place_to_unchain,
+                                   const void* place_to_jump_to_EXPECTED,
+                                   const void* disp_cp_chain_me )
+{
+   vassert(endness_host == VexEndnessLE);
+
+   /* What we're expecting to see is:
+          jmp d32
+          ud2;
+       viz
+          E9 <4 bytes == disp32>
+          0F 0B
+   */
+   UChar* p     = (UChar*)place_to_unchain;
+   Bool   valid = False;
+   if (p[0] == 0xE9 
+       && p[5]  == 0x0F && p[6]  == 0x0B) {
+      /* Check the offset is right. */
+      Int s32 = *(Int*)(&p[1]);
+      if ((UChar*)p + 5 + s32 == place_to_jump_to_EXPECTED) {
+         valid = True;
+         if (0)
+            vex_printf("QQQ unchainXDirect_X86: found valid\n");
+      }
+   }
+   vassert(valid);
+   /* And what we want to change it to is:
+         movl $disp_cp_chain_me, %edx
+         call *%edx
+      viz
+         BA <4 bytes value == disp_cp_chain_me_EXPECTED>
+         FF D2
+      So it's the same length (convenient, huh).
+   */
+   p[0] = 0xBA;
+   *(UInt*)(&p[1]) = (UInt)(Addr)disp_cp_chain_me;
+   p[5] = 0xFF;
+   p[6] = 0xD2;
+   VexInvalRange vir = { (HWord)place_to_unchain, 7 };
+   return vir;
+}
+
+
+/* Patch the counter address into a profile inc point, as previously
+   created by the Xin_ProfInc case for emit_X86Instr. */
+VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+                                 void*  place_to_patch,
+                                 const ULong* location_of_counter )
+{
+   vassert(endness_host == VexEndnessLE);
+   vassert(sizeof(ULong*) == 4);
+   UChar* p = (UChar*)place_to_patch;
+   vassert(p[0] == 0x83);
+   vassert(p[1] == 0x05);
+   vassert(p[2] == 0x00);
+   vassert(p[3] == 0x00);
+   vassert(p[4] == 0x00);
+   vassert(p[5] == 0x00);
+   vassert(p[6] == 0x01);
+   vassert(p[7] == 0x83);
+   vassert(p[8] == 0x15);
+   vassert(p[9] == 0x00);
+   vassert(p[10] == 0x00);
+   vassert(p[11] == 0x00);
+   vassert(p[12] == 0x00);
+   vassert(p[13] == 0x00);
+   UInt imm32 = (UInt)(Addr)location_of_counter;
+   p[2] = imm32 & 0xFF; imm32 >>= 8;
+   p[3] = imm32 & 0xFF; imm32 >>= 8;
+   p[4] = imm32 & 0xFF; imm32 >>= 8;
+   p[5] = imm32 & 0xFF; imm32 >>= 8;
+   imm32 = 4 + (UInt)(Addr)location_of_counter;
+   p[9]  = imm32 & 0xFF; imm32 >>= 8;
+   p[10] = imm32 & 0xFF; imm32 >>= 8;
+   p[11] = imm32 & 0xFF; imm32 >>= 8;
+   p[12] = imm32 & 0xFF; imm32 >>= 8;
+   VexInvalRange vir = { (HWord)place_to_patch, 14 };
+   return vir;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_x86_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_x86_defs.h b/VEX/priv/host_x86_defs.h
new file mode 100644
index 0000000..f1f737f
--- /dev/null
+++ b/VEX/priv/host_x86_defs.h
@@ -0,0 +1,776 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_x86_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_HOST_X86_DEFS_H
+#define __VEX_HOST_X86_DEFS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h"                      // VexArch
+#include "host_generic_regs.h"           // HReg
+
+/* --------- Registers. --------- */
+
+/* The usual HReg abstraction.  There are 8 real int regs,
+   6 real float regs, and 8 real vector regs. 
+*/
+
+#define ST_IN static inline
+ST_IN HReg hregX86_EAX   ( void ) { return mkHReg(False, HRcInt32,  0,  0); }
+ST_IN HReg hregX86_EBX   ( void ) { return mkHReg(False, HRcInt32,  3,  1); }
+ST_IN HReg hregX86_ECX   ( void ) { return mkHReg(False, HRcInt32,  1,  2); }
+ST_IN HReg hregX86_EDX   ( void ) { return mkHReg(False, HRcInt32,  2,  3); }
+ST_IN HReg hregX86_ESI   ( void ) { return mkHReg(False, HRcInt32,  6,  4); }
+ST_IN HReg hregX86_EDI   ( void ) { return mkHReg(False, HRcInt32,  7,  5); }
+
+ST_IN HReg hregX86_FAKE0 ( void ) { return mkHReg(False, HRcFlt64,  0,  6); }
+ST_IN HReg hregX86_FAKE1 ( void ) { return mkHReg(False, HRcFlt64,  1,  7); }
+ST_IN HReg hregX86_FAKE2 ( void ) { return mkHReg(False, HRcFlt64,  2,  8); }
+ST_IN HReg hregX86_FAKE3 ( void ) { return mkHReg(False, HRcFlt64,  3,  9); }
+ST_IN HReg hregX86_FAKE4 ( void ) { return mkHReg(False, HRcFlt64,  4, 10); }
+ST_IN HReg hregX86_FAKE5 ( void ) { return mkHReg(False, HRcFlt64,  5, 11); }
+
+ST_IN HReg hregX86_XMM0  ( void ) { return mkHReg(False, HRcVec128, 0, 12); }
+ST_IN HReg hregX86_XMM1  ( void ) { return mkHReg(False, HRcVec128, 1, 13); }
+ST_IN HReg hregX86_XMM2  ( void ) { return mkHReg(False, HRcVec128, 2, 14); }
+ST_IN HReg hregX86_XMM3  ( void ) { return mkHReg(False, HRcVec128, 3, 15); }
+ST_IN HReg hregX86_XMM4  ( void ) { return mkHReg(False, HRcVec128, 4, 16); }
+ST_IN HReg hregX86_XMM5  ( void ) { return mkHReg(False, HRcVec128, 5, 17); }
+ST_IN HReg hregX86_XMM6  ( void ) { return mkHReg(False, HRcVec128, 6, 18); }
+ST_IN HReg hregX86_XMM7  ( void ) { return mkHReg(False, HRcVec128, 7, 19); }
+
+ST_IN HReg hregX86_ESP   ( void ) { return mkHReg(False, HRcInt32,  4, 20); }
+ST_IN HReg hregX86_EBP   ( void ) { return mkHReg(False, HRcInt32,  5, 21); }
+#undef ST_IN
+
+extern void ppHRegX86 ( HReg );
+
+
+/* --------- Condition codes, Intel encoding. --------- */
+
+typedef
+   enum {
+      Xcc_O      = 0,  /* overflow           */
+      Xcc_NO     = 1,  /* no overflow        */
+
+      Xcc_B      = 2,  /* below              */
+      Xcc_NB     = 3,  /* not below          */
+
+      Xcc_Z      = 4,  /* zero               */
+      Xcc_NZ     = 5,  /* not zero           */
+
+      Xcc_BE     = 6,  /* below or equal     */
+      Xcc_NBE    = 7,  /* not below or equal */
+
+      Xcc_S      = 8,  /* negative           */
+      Xcc_NS     = 9,  /* not negative       */
+
+      Xcc_P      = 10, /* parity even        */
+      Xcc_NP     = 11, /* not parity even    */
+
+      Xcc_L      = 12, /* jump less          */
+      Xcc_NL     = 13, /* not less           */
+
+      Xcc_LE     = 14, /* less or equal      */
+      Xcc_NLE    = 15, /* not less or equal  */
+
+      Xcc_ALWAYS = 16  /* the usual hack     */
+   }
+   X86CondCode;
+
+extern const HChar* showX86CondCode ( X86CondCode );
+
+
+/* --------- Memory address expressions (amodes). --------- */
+
+typedef
+   enum {
+     Xam_IR,        /* Immediate + Reg */
+     Xam_IRRS       /* Immediate + Reg1 + (Reg2 << Shift) */
+   }
+   X86AModeTag;
+
+typedef
+   struct {
+      X86AModeTag tag;
+      union {
+         struct {
+            UInt imm;
+            HReg reg;
+         } IR;
+         struct {
+            UInt imm;
+            HReg base;
+            HReg index;
+            Int  shift; /* 0, 1, 2 or 3 only */
+         } IRRS;
+      } Xam;
+   }
+   X86AMode;
+
+extern X86AMode* X86AMode_IR   ( UInt, HReg );
+extern X86AMode* X86AMode_IRRS ( UInt, HReg, HReg, Int );
+
+extern X86AMode* dopyX86AMode ( X86AMode* );
+
+extern void ppX86AMode ( X86AMode* );
+
+
+/* --------- Operand, which can be reg, immediate or memory. --------- */
+
+typedef 
+   enum {
+      Xrmi_Imm,
+      Xrmi_Reg,
+      Xrmi_Mem
+   }
+   X86RMITag;
+
+typedef
+   struct {
+      X86RMITag tag;
+      union {
+         struct {
+            UInt imm32;
+         } Imm;
+         struct {
+            HReg reg;
+         } Reg;
+         struct {
+            X86AMode* am;
+         } Mem;
+      }
+      Xrmi;
+   }
+   X86RMI;
+
+extern X86RMI* X86RMI_Imm ( UInt );
+extern X86RMI* X86RMI_Reg ( HReg );
+extern X86RMI* X86RMI_Mem ( X86AMode* );
+
+extern void ppX86RMI ( X86RMI* );
+
+
+/* --------- Operand, which can be reg or immediate only. --------- */
+
+typedef 
+   enum {
+      Xri_Imm,
+      Xri_Reg
+   }
+   X86RITag;
+
+typedef
+   struct {
+      X86RITag tag;
+      union {
+         struct {
+            UInt imm32;
+         } Imm;
+         struct {
+            HReg reg;
+         } Reg;
+      }
+      Xri;
+   }
+   X86RI;
+
+extern X86RI* X86RI_Imm ( UInt );
+extern X86RI* X86RI_Reg ( HReg );
+
+extern void ppX86RI ( X86RI* );
+
+
+/* --------- Operand, which can be reg or memory only. --------- */
+
+typedef 
+   enum {
+      Xrm_Reg,
+      Xrm_Mem
+   }
+   X86RMTag;
+
+typedef
+   struct {
+      X86RMTag tag;
+      union {
+         struct {
+            HReg reg;
+         } Reg;
+         struct {
+            X86AMode* am;
+         } Mem;
+      }
+      Xrm;
+   }
+   X86RM;
+
+extern X86RM* X86RM_Reg ( HReg );
+extern X86RM* X86RM_Mem ( X86AMode* );
+
+extern void ppX86RM ( X86RM* );
+
+
+/* --------- Instructions. --------- */
+
+/* --------- */
+typedef
+   enum {
+      Xun_NEG,
+      Xun_NOT
+   }
+   X86UnaryOp;
+
+extern const HChar* showX86UnaryOp ( X86UnaryOp );
+
+
+/* --------- */
+typedef 
+   enum {
+      Xalu_INVALID,
+      Xalu_MOV,
+      Xalu_CMP,
+      Xalu_ADD, Xalu_SUB, Xalu_ADC, Xalu_SBB, 
+      Xalu_AND, Xalu_OR, Xalu_XOR,
+      Xalu_MUL
+   }
+   X86AluOp;
+
+extern const HChar* showX86AluOp ( X86AluOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Xsh_INVALID,
+      Xsh_SHL, Xsh_SHR, Xsh_SAR
+   }
+   X86ShiftOp;
+
+extern const HChar* showX86ShiftOp ( X86ShiftOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Xfp_INVALID,
+      /* Binary */
+      Xfp_ADD, Xfp_SUB, Xfp_MUL, Xfp_DIV, 
+      Xfp_SCALE, Xfp_ATAN, Xfp_YL2X, Xfp_YL2XP1, Xfp_PREM, Xfp_PREM1,
+      /* Unary */
+      Xfp_SQRT, Xfp_ABS, Xfp_NEG, Xfp_MOV, Xfp_SIN, Xfp_COS, Xfp_TAN,
+      Xfp_ROUND, Xfp_2XM1
+   }
+   X86FpOp;
+
+extern const HChar* showX86FpOp ( X86FpOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Xsse_INVALID,
+      /* mov */
+      Xsse_MOV,
+      /* Floating point binary */
+      Xsse_ADDF, Xsse_SUBF, Xsse_MULF, Xsse_DIVF,
+      Xsse_MAXF, Xsse_MINF,
+      Xsse_CMPEQF, Xsse_CMPLTF, Xsse_CMPLEF, Xsse_CMPUNF,
+      /* Floating point unary */
+      Xsse_RCPF, Xsse_RSQRTF, Xsse_SQRTF, 
+      /* Bitwise */
+      Xsse_AND, Xsse_OR, Xsse_XOR, Xsse_ANDN,
+      /* Integer binary */
+      Xsse_ADD8,   Xsse_ADD16,   Xsse_ADD32,   Xsse_ADD64,
+      Xsse_QADD8U, Xsse_QADD16U,
+      Xsse_QADD8S, Xsse_QADD16S,
+      Xsse_SUB8,   Xsse_SUB16,   Xsse_SUB32,   Xsse_SUB64,
+      Xsse_QSUB8U, Xsse_QSUB16U,
+      Xsse_QSUB8S, Xsse_QSUB16S,
+      Xsse_MUL16,
+      Xsse_MULHI16U,
+      Xsse_MULHI16S,
+      Xsse_AVG8U, Xsse_AVG16U,
+      Xsse_MAX16S,
+      Xsse_MAX8U,
+      Xsse_MIN16S,
+      Xsse_MIN8U,
+      Xsse_CMPEQ8,  Xsse_CMPEQ16,  Xsse_CMPEQ32,
+      Xsse_CMPGT8S, Xsse_CMPGT16S, Xsse_CMPGT32S,
+      Xsse_SHL16, Xsse_SHL32, Xsse_SHL64,
+      Xsse_SHR16, Xsse_SHR32, Xsse_SHR64,
+      Xsse_SAR16, Xsse_SAR32, 
+      Xsse_PACKSSD, Xsse_PACKSSW, Xsse_PACKUSW,
+      Xsse_UNPCKHB, Xsse_UNPCKHW, Xsse_UNPCKHD, Xsse_UNPCKHQ,
+      Xsse_UNPCKLB, Xsse_UNPCKLW, Xsse_UNPCKLD, Xsse_UNPCKLQ
+   }
+   X86SseOp;
+
+extern const HChar* showX86SseOp ( X86SseOp );
+
+
+/* --------- */
+typedef
+   enum {
+      Xin_Alu32R,    /* 32-bit mov/arith/logical, dst=REG */
+      Xin_Alu32M,    /* 32-bit mov/arith/logical, dst=MEM */
+      Xin_Sh32,      /* 32-bit shift/rotate, dst=REG */
+      Xin_Test32,    /* 32-bit test of REG or MEM against imm32 (AND, set
+                        flags, discard result) */
+      Xin_Unary32,   /* 32-bit not and neg */
+      Xin_Lea32,     /* 32-bit compute EA into a reg */
+      Xin_MulL,      /* 32 x 32 -> 64 multiply */
+      Xin_Div,       /* 64/32 -> (32,32) div and mod */
+      Xin_Sh3232,    /* shldl or shrdl */
+      Xin_Push,      /* push (32-bit?) value on stack */
+      Xin_Call,      /* call to address in register */
+      Xin_XDirect,   /* direct transfer to GA */
+      Xin_XIndir,    /* indirect transfer to GA */
+      Xin_XAssisted, /* assisted transfer to GA */
+      Xin_CMov32,    /* conditional move */
+      Xin_LoadEX,    /* mov{s,z}{b,w}l from mem to reg */
+      Xin_Store,     /* store 16/8 bit value in memory */
+      Xin_Set32,     /* convert condition code to 32-bit value */
+      Xin_Bsfr32,    /* 32-bit bsf/bsr */
+      Xin_MFence,    /* mem fence (not just sse2, but sse0 and 1/mmxext too) */
+      Xin_ACAS,      /* 8/16/32-bit lock;cmpxchg */
+      Xin_DACAS,     /* lock;cmpxchg8b (doubleword ACAS, 2 x 32-bit only) */
+
+      Xin_FpUnary,   /* FP fake unary op */
+      Xin_FpBinary,  /* FP fake binary op */
+      Xin_FpLdSt,    /* FP fake load/store */
+      Xin_FpLdStI,   /* FP fake load/store, converting to/from Int */
+      Xin_Fp64to32,  /* FP round IEEE754 double to IEEE754 single */
+      Xin_FpCMov,    /* FP fake floating point conditional move */
+      Xin_FpLdCW,    /* fldcw */
+      Xin_FpStSW_AX, /* fstsw %ax */
+      Xin_FpCmp,     /* FP compare, generating a C320 value into int reg */
+
+      Xin_SseConst,  /* Generate restricted SSE literal */
+      Xin_SseLdSt,   /* SSE load/store, no alignment constraints */
+      Xin_SseLdzLO,  /* SSE load low 32/64 bits, zero remainder of reg */
+      Xin_Sse32Fx4,  /* SSE binary, 32Fx4 */
+      Xin_Sse32FLo,  /* SSE binary, 32F in lowest lane only */
+      Xin_Sse64Fx2,  /* SSE binary, 64Fx2 */
+      Xin_Sse64FLo,  /* SSE binary, 64F in lowest lane only */
+      Xin_SseReRg,   /* SSE binary general reg-reg, Re, Rg */
+      Xin_SseCMov,   /* SSE conditional move */
+      Xin_SseShuf,   /* SSE2 shuffle (pshufd) */
+      Xin_EvCheck,   /* Event check */
+      Xin_ProfInc    /* 64-bit profile counter increment */
+   }
+   X86InstrTag;
+
+/* Destinations are on the RIGHT (second operand) */
+
+typedef
+   struct {
+      X86InstrTag tag;
+      union {
+         struct {
+            X86AluOp op;
+            X86RMI*  src;
+            HReg     dst;
+         } Alu32R;
+         struct {
+            X86AluOp  op;
+            X86RI*    src;
+            X86AMode* dst;
+         } Alu32M;
+         struct {
+            X86ShiftOp op;
+            UInt  src;  /* shift amount, or 0 means %cl */
+            HReg  dst;
+         } Sh32;
+         struct {
+            UInt   imm32;
+            X86RM* dst; /* not written, only read */
+         } Test32;
+         /* Not and Neg */
+         struct {
+            X86UnaryOp op;
+            HReg       dst;
+         } Unary32;
+         /* 32-bit compute EA into a reg */
+         struct {
+            X86AMode* am;
+            HReg      dst;
+         } Lea32;
+         /* EDX:EAX = EAX *s/u r/m32 */
+         struct {
+            Bool   syned;
+            X86RM* src;
+         } MulL;
+         /* x86 div/idiv instruction.  Modifies EDX and EAX and reads src. */
+         struct {
+            Bool   syned;
+            X86RM* src;
+         } Div;
+         /* shld/shrd.  op may only be Xsh_SHL or Xsh_SHR */
+         struct {
+            X86ShiftOp op;
+            UInt       amt;   /* shift amount, or 0 means %cl */
+            HReg       src;
+            HReg       dst;
+         } Sh3232;
+         struct {
+            X86RMI* src;
+         } Push;
+         /* Pseudo-insn.  Call target (an absolute address), on given
+            condition (which could be Xcc_ALWAYS). */
+         struct {
+            X86CondCode cond;
+            Addr32      target;
+            Int         regparms; /* 0 .. 3 */
+            RetLoc      rloc;     /* where the return value will be */
+         } Call;
+         /* Update the guest EIP value, then exit requesting to chain
+            to it.  May be conditional.  Urr, use of Addr32 implicitly
+            assumes that wordsize(guest) == wordsize(host). */
+         struct {
+            Addr32      dstGA;    /* next guest address */
+            X86AMode*   amEIP;    /* amode in guest state for EIP */
+            X86CondCode cond;     /* can be Xcc_ALWAYS */
+            Bool        toFastEP; /* chain to the slow or fast point? */
+         } XDirect;
+         /* Boring transfer to a guest address not known at JIT time.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg        dstGA;
+            X86AMode*   amEIP;
+            X86CondCode cond; /* can be Xcc_ALWAYS */
+         } XIndir;
+         /* Assisted transfer to a guest address, most general case.
+            Not chainable.  May be conditional. */
+         struct {
+            HReg        dstGA;
+            X86AMode*   amEIP;
+            X86CondCode cond; /* can be Xcc_ALWAYS */
+            IRJumpKind  jk;
+         } XAssisted;
+         /* Mov src to dst on the given condition, which may not
+            be the bogus Xcc_ALWAYS. */
+         struct {
+            X86CondCode cond;
+            X86RM*      src;
+            HReg        dst;
+         } CMov32;
+         /* Sign/Zero extending loads.  Dst size is always 32 bits. */
+         struct {
+            UChar     szSmall;
+            Bool      syned;
+            X86AMode* src;
+            HReg      dst;
+         } LoadEX;
+         /* 16/8 bit stores, which are troublesome (particularly
+            8-bit) */
+         struct {
+            UChar     sz; /* only 1 or 2 */
+            HReg      src;
+            X86AMode* dst;
+         } Store;
+         /* Convert a x86 condition code to a 32-bit value (0 or 1). */
+         struct {
+            X86CondCode cond;
+            HReg        dst;
+         } Set32;
+         /* 32-bit bsf or bsr. */
+         struct {
+            Bool isFwds;
+            HReg src;
+            HReg dst;
+         } Bsfr32;
+         /* Mem fence (not just sse2, but sse0 and sse1/mmxext too).
+            In short, an insn which flushes all preceding loads and
+            stores as much as possible before continuing.  On SSE2
+            we emit a real "mfence", on SSE1 or the MMXEXT subset
+            "sfence ; lock addl $0,0(%esp)" and on SSE0
+            "lock addl $0,0(%esp)".  This insn therefore carries the
+            host's hwcaps so the assembler knows what to emit. */
+         struct {
+            UInt hwcaps;
+         } MFence;
+         /* "lock;cmpxchg": mem address in .addr,
+             expected value in %eax, new value in %ebx */
+         struct {
+            X86AMode* addr;
+            UChar     sz; /* 1, 2 or 4 */
+         } ACAS;
+         /* "lock;cmpxchg8b": mem address in .addr, expected value in
+            %edx:%eax, new value in %ecx:%ebx */
+         struct {
+            X86AMode* addr;
+         } DACAS;
+
+         /* X86 Floating point (fake 3-operand, "flat reg file" insns) */
+         struct {
+            X86FpOp op;
+            HReg    src;
+            HReg    dst;
+         } FpUnary;
+         struct {
+            X86FpOp op;
+            HReg    srcL;
+            HReg    srcR;
+            HReg    dst;
+         } FpBinary;
+         struct {
+            Bool      isLoad;
+            UChar     sz; /* only 4 (IEEE single) or 8 (IEEE double) */
+            HReg      reg;
+            X86AMode* addr;
+         } FpLdSt;
+         /* Move 64-bit float to/from memory, converting to/from
+            signed int on the way.  Note the conversions will observe
+            the host FPU rounding mode currently in force. */
+         struct {
+            Bool      isLoad;
+            UChar     sz; /* only 2, 4 or 8 */
+            HReg      reg;
+            X86AMode* addr;
+         } FpLdStI;
+         /* By observing the current FPU rounding mode, round (etc)
+            src into dst given that dst should be interpreted as an
+            IEEE754 32-bit (float) type. */
+         struct {
+            HReg src;
+            HReg dst;
+         } Fp64to32;
+         /* Mov src to dst on the given condition, which may not
+            be the bogus Xcc_ALWAYS. */
+         struct {
+            X86CondCode cond;
+            HReg        src;
+            HReg        dst;
+         } FpCMov;
+         /* Load the FPU's 16-bit control word (fldcw) */
+         struct {
+            X86AMode* addr;
+         }
+         FpLdCW;
+         /* fstsw %ax */
+         struct {
+            /* no fields */
+         }
+         FpStSW_AX;
+         /* Do a compare, generating the C320 bits into the dst. */
+         struct {
+            HReg    srcL;
+            HReg    srcR;
+            HReg    dst;
+         } FpCmp;
+
+         /* Simplistic SSE[123] */
+         struct {
+            UShort  con;
+            HReg    dst;
+         } SseConst;
+         struct {
+            Bool      isLoad;
+            HReg      reg;
+            X86AMode* addr;
+         } SseLdSt;
+         struct {
+            UChar     sz; /* 4 or 8 only */
+            HReg      reg;
+            X86AMode* addr;
+         } SseLdzLO;
+         struct {
+            X86SseOp op;
+            HReg     src;
+            HReg     dst;
+         } Sse32Fx4;
+         struct {
+            X86SseOp op;
+            HReg     src;
+            HReg     dst;
+         } Sse32FLo;
+         struct {
+            X86SseOp op;
+            HReg     src;
+            HReg     dst;
+         } Sse64Fx2;
+         struct {
+            X86SseOp op;
+            HReg     src;
+            HReg     dst;
+         } Sse64FLo;
+         struct {
+            X86SseOp op;
+            HReg     src;
+            HReg     dst;
+         } SseReRg;
+         /* Mov src to dst on the given condition, which may not
+            be the bogus Xcc_ALWAYS. */
+         struct {
+            X86CondCode cond;
+            HReg        src;
+            HReg        dst;
+         } SseCMov;
+         struct {
+            Int    order; /* 0 <= order <= 0xFF */
+            HReg   src;
+            HReg   dst;
+         } SseShuf;
+         struct {
+            X86AMode* amCounter;
+            X86AMode* amFailAddr;
+         } EvCheck;
+         struct {
+            /* No fields.  The address of the counter to inc is
+               installed later, post-translation, by patching it in,
+               as it is not known at translation time. */
+         } ProfInc;
+
+      } Xin;
+   }
+   X86Instr;
+
+extern X86Instr* X86Instr_Alu32R    ( X86AluOp, X86RMI*, HReg );
+extern X86Instr* X86Instr_Alu32M    ( X86AluOp, X86RI*,  X86AMode* );
+extern X86Instr* X86Instr_Unary32   ( X86UnaryOp op, HReg dst );
+extern X86Instr* X86Instr_Lea32     ( X86AMode* am, HReg dst );
+
+extern X86Instr* X86Instr_Sh32      ( X86ShiftOp, UInt, HReg );
+extern X86Instr* X86Instr_Test32    ( UInt imm32, X86RM* dst );
+extern X86Instr* X86Instr_MulL      ( Bool syned, X86RM* );
+extern X86Instr* X86Instr_Div       ( Bool syned, X86RM* );
+extern X86Instr* X86Instr_Sh3232    ( X86ShiftOp, UInt amt, HReg src, HReg dst );
+extern X86Instr* X86Instr_Push      ( X86RMI* );
+extern X86Instr* X86Instr_Call      ( X86CondCode, Addr32, Int, RetLoc );
+extern X86Instr* X86Instr_XDirect   ( Addr32 dstGA, X86AMode* amEIP,
+                                      X86CondCode cond, Bool toFastEP );
+extern X86Instr* X86Instr_XIndir    ( HReg dstGA, X86AMode* amEIP,
+                                      X86CondCode cond );
+extern X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP,
+                                      X86CondCode cond, IRJumpKind jk );
+extern X86Instr* X86Instr_CMov32    ( X86CondCode, X86RM* src, HReg dst );
+extern X86Instr* X86Instr_LoadEX    ( UChar szSmall, Bool syned,
+                                      X86AMode* src, HReg dst );
+extern X86Instr* X86Instr_Store     ( UChar sz, HReg src, X86AMode* dst );
+extern X86Instr* X86Instr_Set32     ( X86CondCode cond, HReg dst );
+extern X86Instr* X86Instr_Bsfr32    ( Bool isFwds, HReg src, HReg dst );
+extern X86Instr* X86Instr_MFence    ( UInt hwcaps );
+extern X86Instr* X86Instr_ACAS      ( X86AMode* addr, UChar sz );
+extern X86Instr* X86Instr_DACAS     ( X86AMode* addr );
+
+extern X86Instr* X86Instr_FpUnary   ( X86FpOp op, HReg src, HReg dst );
+extern X86Instr* X86Instr_FpBinary  ( X86FpOp op, HReg srcL, HReg srcR, HReg dst );
+extern X86Instr* X86Instr_FpLdSt    ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
+extern X86Instr* X86Instr_FpLdStI   ( Bool isLoad, UChar sz, HReg reg, X86AMode* );
+extern X86Instr* X86Instr_Fp64to32  ( HReg src, HReg dst );
+extern X86Instr* X86Instr_FpCMov    ( X86CondCode, HReg src, HReg dst );
+extern X86Instr* X86Instr_FpLdCW    ( X86AMode* );
+extern X86Instr* X86Instr_FpStSW_AX ( void );
+extern X86Instr* X86Instr_FpCmp     ( HReg srcL, HReg srcR, HReg dst );
+
+extern X86Instr* X86Instr_SseConst  ( UShort con, HReg dst );
+extern X86Instr* X86Instr_SseLdSt   ( Bool isLoad, HReg, X86AMode* );
+extern X86Instr* X86Instr_SseLdzLO  ( Int sz, HReg, X86AMode* );
+extern X86Instr* X86Instr_Sse32Fx4  ( X86SseOp, HReg, HReg );
+extern X86Instr* X86Instr_Sse32FLo  ( X86SseOp, HReg, HReg );
+extern X86Instr* X86Instr_Sse64Fx2  ( X86SseOp, HReg, HReg );
+extern X86Instr* X86Instr_Sse64FLo  ( X86SseOp, HReg, HReg );
+extern X86Instr* X86Instr_SseReRg   ( X86SseOp, HReg, HReg );
+extern X86Instr* X86Instr_SseCMov   ( X86CondCode, HReg src, HReg dst );
+extern X86Instr* X86Instr_SseShuf   ( Int order, HReg src, HReg dst );
+extern X86Instr* X86Instr_EvCheck   ( X86AMode* amCounter,
+                                      X86AMode* amFailAddr );
+extern X86Instr* X86Instr_ProfInc   ( void );
+
+
+extern void ppX86Instr ( const X86Instr*, Bool );
+
+/* Some functions that insulate the register allocator from details
+   of the underlying instruction set. */
+extern void         getRegUsage_X86Instr ( HRegUsage*, const X86Instr*, Bool );
+extern void         mapRegs_X86Instr     ( HRegRemap*, X86Instr*, Bool );
+extern Bool         isMove_X86Instr      ( const X86Instr*, HReg*, HReg* );
+extern Int          emit_X86Instr   ( /*MB_MOD*/Bool* is_profInc,
+                                      UChar* buf, Int nbuf, const X86Instr* i, 
+                                      Bool mode64,
+                                      VexEndness endness_host,
+                                      const void* disp_cp_chain_me_to_slowEP,
+                                      const void* disp_cp_chain_me_to_fastEP,
+                                      const void* disp_cp_xindir,
+                                      const void* disp_cp_xassisted );
+
+extern void genSpill_X86  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                            HReg rreg, Int offset, Bool );
+extern void genReload_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
+                            HReg rreg, Int offset, Bool );
+
+extern X86Instr* directReload_X86 ( X86Instr* i, HReg vreg, Short spill_off );
+
+extern const RRegUniverse* getRRegUniverse_X86 ( void );
+
+extern HInstrArray* iselSB_X86           ( const IRSB*,
+                                           VexArch,
+                                           const VexArchInfo*,
+                                           const VexAbiInfo*,
+                                           Int offs_Host_EvC_Counter,
+                                           Int offs_Host_EvC_FailAddr,
+                                           Bool chainingAllowed,
+                                           Bool addProfInc,
+                                           Addr max_ga );
+
+/* How big is an event check?  This is kind of a kludge because it
+   depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
+   and so assumes that they are both <= 128, and so can use the short
+   offset encoding.  This is all checked with assertions, so in the
+   worst case we will merely assert at startup. */
+extern Int evCheckSzB_X86 (void);
+
+/* Perform a chaining and unchaining of an XDirect jump. */
+extern VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+                                        void* place_to_chain,
+                                        const void* disp_cp_chain_me_EXPECTED,
+                                        const void* place_to_jump_to );
+
+extern VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+                                          void* place_to_unchain,
+                                          const void* place_to_jump_to_EXPECTED,
+                                          const void* disp_cp_chain_me );
+
+/* Patch the counter location into an existing ProfInc point. */
+extern VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+                                        void*  place_to_patch,
+                                        const ULong* location_of_counter );
+
+
+#endif /* ndef __VEX_HOST_X86_DEFS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_x86_defs.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/host_x86_isel.c b/VEX/priv/host_x86_isel.c
new file mode 100644
index 0000000..011cba5
--- /dev/null
+++ b/VEX/priv/host_x86_isel.c
@@ -0,0 +1,4515 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   host_x86_isel.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "ir_match.h"
+#include "main_util.h"
+#include "main_globals.h"
+#include "host_generic_regs.h"
+#include "host_generic_simd64.h"
+#include "host_generic_simd128.h"
+#include "host_x86_defs.h"
+
+/* TODO 21 Apr 2005:
+
+   -- (Really an assembler issue) don't emit CMov32 as a cmov
+      insn, since that's expensive on P4 and conditional branch
+      is cheaper if (as we expect) the condition is highly predictable
+
+   -- preserve xmm registers across function calls (by declaring them
+      as trashed by call insns)
+
+   -- preserve x87 ST stack discipline across function calls.  Sigh.
+
+   -- Check doHelperCall: if a call is conditional, we cannot safely
+      compute any regparm args directly to registers.  Hence, the
+      fast-regparm marshalling should be restricted to unconditional
+      calls only.
+*/
+
+/*---------------------------------------------------------*/
+/*--- x87 control word stuff                            ---*/
+/*---------------------------------------------------------*/
+
+/* Vex-generated code expects to run with the FPU set as follows: all
+   exceptions masked, round-to-nearest, precision = 53 bits.  This
+   corresponds to a FPU control word value of 0x027F.
+
+   Similarly the SSE control word (%mxcsr) should be 0x1F80.
+
+   %fpucw and %mxcsr should have these values on entry to
+   Vex-generated code, and should those values should be
+   unchanged at exit.
+*/
+
+#define DEFAULT_FPUCW 0x027F
+
+/* debugging only, do not use */
+/* define DEFAULT_FPUCW 0x037F */
+
+
+/*---------------------------------------------------------*/
+/*--- misc helpers                                      ---*/
+/*---------------------------------------------------------*/
+
+/* These are duplicated in guest-x86/toIR.c */
+static IRExpr* unop ( IROp op, IRExpr* a )
+{
+   return IRExpr_Unop(op, a);
+}
+
+static IRExpr* binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   return IRExpr_Binop(op, a1, a2);
+}
+
+static IRExpr* bind ( Int binder )
+{
+   return IRExpr_Binder(binder);
+}
+
+static Bool isZeroU8 ( IRExpr* e )
+{
+   return e->tag == Iex_Const
+          && e->Iex.Const.con->tag == Ico_U8
+          && e->Iex.Const.con->Ico.U8 == 0;
+}
+
+static Bool isZeroU32 ( IRExpr* e )
+{
+   return e->tag == Iex_Const
+          && e->Iex.Const.con->tag == Ico_U32
+          && e->Iex.Const.con->Ico.U32 == 0;
+}
+
+//static Bool isZeroU64 ( IRExpr* e )
+//{
+//   return e->tag == Iex_Const
+//          && e->Iex.Const.con->tag == Ico_U64
+//          && e->Iex.Const.con->Ico.U64 == 0ULL;
+//}
+
+
+/*---------------------------------------------------------*/
+/*--- ISelEnv                                           ---*/
+/*---------------------------------------------------------*/
+
+/* This carries around:
+
+   - A mapping from IRTemp to IRType, giving the type of any IRTemp we
+     might encounter.  This is computed before insn selection starts,
+     and does not change.
+
+   - A mapping from IRTemp to HReg.  This tells the insn selector
+     which virtual register(s) are associated with each IRTemp
+     temporary.  This is computed before insn selection starts, and
+     does not change.  We expect this mapping to map precisely the
+     same set of IRTemps as the type mapping does.
+
+        - vregmap   holds the primary register for the IRTemp.
+        - vregmapHI is only used for 64-bit integer-typed
+             IRTemps.  It holds the identity of a second
+             32-bit virtual HReg, which holds the high half
+             of the value.
+
+   - The code array, that is, the insns selected so far.
+
+   - A counter, for generating new virtual registers.
+
+   - The host subarchitecture we are selecting insns for.  
+     This is set at the start and does not change.
+
+   - A Bool for indicating whether we may generate chain-me
+     instructions for control flow transfers, or whether we must use
+     XAssisted.
+
+   - The maximum guest address of any guest insn in this block.
+     Actually, the address of the highest-addressed byte from any insn
+     in this block.  Is set at the start and does not change.  This is
+     used for detecting jumps which are definitely forward-edges from
+     this block, and therefore can be made (chained) to the fast entry
+     point of the destination, thereby avoiding the destination's
+     event check.
+
+   Note, this is all (well, mostly) host-independent.
+*/
+
+typedef
+   struct {
+      /* Constant -- are set at the start and do not change. */
+      IRTypeEnv*   type_env;
+
+      HReg*        vregmap;
+      HReg*        vregmapHI;
+      Int          n_vregmap;
+
+      UInt         hwcaps;
+
+      Bool         chainingAllowed;
+      Addr32       max_ga;
+
+      /* These are modified as we go along. */
+      HInstrArray* code;
+      Int          vreg_ctr;
+   }
+   ISelEnv;
+
+
+static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   return env->vregmap[tmp];
+}
+
+static void lookupIRTemp64 ( HReg* vrHI, HReg* vrLO, ISelEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->n_vregmap);
+   vassert(! hregIsInvalid(env->vregmapHI[tmp]));
+   *vrLO = env->vregmap[tmp];
+   *vrHI = env->vregmapHI[tmp];
+}
+
+static void addInstr ( ISelEnv* env, X86Instr* instr )
+{
+   addHInstr(env->code, instr);
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      ppX86Instr(instr, False);
+      vex_printf("\n");
+   }
+}
+
+static HReg newVRegI ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcInt32, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegF ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcFlt64, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+static HReg newVRegV ( ISelEnv* env )
+{
+   HReg reg = mkHReg(True/*virtual reg*/, HRcVec128, 0/*enc*/, env->vreg_ctr);
+   env->vreg_ctr++;
+   return reg;
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Forward declarations                        ---*/
+/*---------------------------------------------------------*/
+
+/* These are organised as iselXXX and iselXXX_wrk pairs.  The
+   iselXXX_wrk do the real work, but are not to be called directly.
+   For each XXX, iselXXX calls its iselXXX_wrk counterpart, then
+   checks that all returned registers are virtual.  You should not
+   call the _wrk version directly.
+*/
+static X86RMI*     iselIntExpr_RMI_wrk ( ISelEnv* env, IRExpr* e );
+static X86RMI*     iselIntExpr_RMI     ( ISelEnv* env, IRExpr* e );
+
+static X86RI*      iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e );
+static X86RI*      iselIntExpr_RI     ( ISelEnv* env, IRExpr* e );
+
+static X86RM*      iselIntExpr_RM_wrk ( ISelEnv* env, IRExpr* e );
+static X86RM*      iselIntExpr_RM     ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e );
+static HReg        iselIntExpr_R     ( ISelEnv* env, IRExpr* e );
+
+static X86AMode*   iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e );
+static X86AMode*   iselIntExpr_AMode     ( ISelEnv* env, IRExpr* e );
+
+static void        iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, 
+                                       ISelEnv* env, IRExpr* e );
+static void        iselInt64Expr     ( HReg* rHi, HReg* rLo, 
+                                       ISelEnv* env, IRExpr* e );
+
+static X86CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e );
+static X86CondCode iselCondCode     ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselDblExpr_wrk ( ISelEnv* env, IRExpr* e );
+static HReg        iselDblExpr     ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselFltExpr_wrk ( ISelEnv* env, IRExpr* e );
+static HReg        iselFltExpr     ( ISelEnv* env, IRExpr* e );
+
+static HReg        iselVecExpr_wrk ( ISelEnv* env, IRExpr* e );
+static HReg        iselVecExpr     ( ISelEnv* env, IRExpr* e );
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Misc helpers                                ---*/
+/*---------------------------------------------------------*/
+
+/* Make a int reg-reg move. */
+
+static X86Instr* mk_iMOVsd_RR ( HReg src, HReg dst )
+{
+   vassert(hregClass(src) == HRcInt32);
+   vassert(hregClass(dst) == HRcInt32);
+   return X86Instr_Alu32R(Xalu_MOV, X86RMI_Reg(src), dst);
+}
+
+
+/* Make a vector reg-reg move. */
+
+static X86Instr* mk_vMOVsd_RR ( HReg src, HReg dst )
+{
+   vassert(hregClass(src) == HRcVec128);
+   vassert(hregClass(dst) == HRcVec128);
+   return X86Instr_SseReRg(Xsse_MOV, src, dst);
+}
+
+/* Advance/retreat %esp by n. */
+
+static void add_to_esp ( ISelEnv* env, Int n )
+{
+   vassert(n > 0 && n < 256 && (n%4) == 0);
+   addInstr(env, 
+            X86Instr_Alu32R(Xalu_ADD, X86RMI_Imm(n), hregX86_ESP()));
+}
+
+static void sub_from_esp ( ISelEnv* env, Int n )
+{
+   vassert(n > 0 && n < 256 && (n%4) == 0);
+   addInstr(env, 
+            X86Instr_Alu32R(Xalu_SUB, X86RMI_Imm(n), hregX86_ESP()));
+}
+
+
+/* Given an amode, return one which references 4 bytes further
+   along. */
+
+static X86AMode* advance4 ( X86AMode* am )
+{
+   X86AMode* am4 = dopyX86AMode(am);
+   switch (am4->tag) {
+      case Xam_IRRS:
+         am4->Xam.IRRS.imm += 4; break;
+      case Xam_IR:
+         am4->Xam.IR.imm += 4; break;
+      default:
+         vpanic("advance4(x86,host)");
+   }
+   return am4;
+}
+
+
+/* Push an arg onto the host stack, in preparation for a call to a
+   helper function of some kind.  Returns the number of 32-bit words
+   pushed.  If we encounter an IRExpr_VECRET() then we expect that
+   r_vecRetAddr will be a valid register, that holds the relevant
+   address. 
+*/
+static Int pushArg ( ISelEnv* env, IRExpr* arg, HReg r_vecRetAddr )
+{
+   if (UNLIKELY(arg->tag == Iex_VECRET)) {
+      vassert(0); //ATC
+      vassert(!hregIsInvalid(r_vecRetAddr));
+      addInstr(env, X86Instr_Push(X86RMI_Reg(r_vecRetAddr)));
+      return 1;
+   }
+   if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+      addInstr(env, X86Instr_Push(X86RMI_Reg(hregX86_EBP())));
+      return 1;
+   }
+   /* Else it's a "normal" expression. */
+   IRType arg_ty = typeOfIRExpr(env->type_env, arg);
+   if (arg_ty == Ity_I32) {
+      addInstr(env, X86Instr_Push(iselIntExpr_RMI(env, arg)));
+      return 1;
+   } else 
+   if (arg_ty == Ity_I64) {
+      HReg rHi, rLo;
+      iselInt64Expr(&rHi, &rLo, env, arg);
+      addInstr(env, X86Instr_Push(X86RMI_Reg(rHi)));
+      addInstr(env, X86Instr_Push(X86RMI_Reg(rLo)));
+      return 2;
+   }
+   ppIRExpr(arg);
+   vpanic("pushArg(x86): can't handle arg of this type");
+}
+
+
+/* Complete the call to a helper function, by calling the 
+   helper and clearing the args off the stack. */
+
+static 
+void callHelperAndClearArgs ( ISelEnv* env, X86CondCode cc, 
+                              IRCallee* cee, Int n_arg_ws,
+                              RetLoc rloc )
+{
+   /* Complication.  Need to decide which reg to use as the fn address
+      pointer, in a way that doesn't trash regparm-passed
+      parameters. */
+   vassert(sizeof(void*) == 4);
+
+   addInstr(env, X86Instr_Call( cc, (Addr)cee->addr,
+                                cee->regparms, rloc));
+   if (n_arg_ws > 0)
+      add_to_esp(env, 4*n_arg_ws);
+}
+
+
+/* Used only in doHelperCall.  See big comment in doHelperCall re
+   handling of regparm args.  This function figures out whether
+   evaluation of an expression might require use of a fixed register.
+   If in doubt return True (safe but suboptimal).  
+*/
+static
+Bool mightRequireFixedRegs ( IRExpr* e )
+{
+   if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e))) {
+      // These are always "safe" -- either a copy of %esp in some
+      // arbitrary vreg, or a copy of %ebp, respectively.
+      return False;
+   }
+   /* Else it's a "normal" expression. */
+   switch (e->tag) {
+      case Iex_RdTmp: case Iex_Const: case Iex_Get: 
+         return False;
+      default:
+         return True;
+   }
+}
+
+
+/* Do a complete function call.  |guard| is a Ity_Bit expression
+   indicating whether or not the call happens.  If guard==NULL, the
+   call is unconditional.  |retloc| is set to indicate where the
+   return value is after the call.  The caller (of this fn) must
+   generate code to add |stackAdjustAfterCall| to the stack pointer
+   after the call is done. */
+
+static
+void doHelperCall ( /*OUT*/UInt*   stackAdjustAfterCall,
+                    /*OUT*/RetLoc* retloc,
+                    ISelEnv* env,
+                    IRExpr* guard,
+                    IRCallee* cee, IRType retTy, IRExpr** args )
+{
+   X86CondCode cc;
+   HReg        argregs[3];
+   HReg        tmpregs[3];
+   Bool        danger;
+   Int         not_done_yet, n_args, n_arg_ws, stack_limit, 
+               i, argreg, argregX;
+
+   /* Set default returns.  We'll update them later if needed. */
+   *stackAdjustAfterCall = 0;
+   *retloc               = mk_RetLoc_INVALID();
+
+   /* These are used for cross-checking that IR-level constraints on
+      the use of Iex_VECRET and Iex_BBPTR are observed. */
+   UInt nVECRETs = 0;
+   UInt nBBPTRs  = 0;
+
+   /* Marshal args for a call, do the call, and clear the stack.
+      Complexities to consider:
+
+      * The return type can be I{64,32,16,8} or V128.  In the V128
+        case, it is expected that |args| will contain the special
+        node IRExpr_VECRET(), in which case this routine generates
+        code to allocate space on the stack for the vector return
+        value.  Since we are not passing any scalars on the stack, it
+        is enough to preallocate the return space before marshalling
+        any arguments, in this case.
+
+        |args| may also contain IRExpr_BBPTR(), in which case the
+        value in %ebp is passed as the corresponding argument.
+
+      * If the callee claims regparmness of 1, 2 or 3, we must pass the
+        first 1, 2 or 3 args in registers (EAX, EDX, and ECX
+        respectively).  To keep things relatively simple, only args of
+        type I32 may be passed as regparms -- just bomb out if anything
+        else turns up.  Clearly this depends on the front ends not
+        trying to pass any other types as regparms.  
+   */
+
+   /* 16 Nov 2004: the regparm handling is complicated by the
+      following problem.
+
+      Consider a call two a function with two regparm parameters:
+      f(e1,e2).  We need to compute e1 into %eax and e2 into %edx.
+      Suppose code is first generated to compute e1 into %eax.  Then,
+      code is generated to compute e2 into %edx.  Unfortunately, if
+      the latter code sequence uses %eax, it will trash the value of
+      e1 computed by the former sequence.  This could happen if (for
+      example) e2 itself involved a function call.  In the code below,
+      args are evaluated right-to-left, not left-to-right, but the
+      principle and the problem are the same.
+
+      One solution is to compute all regparm-bound args into vregs
+      first, and once they are all done, move them to the relevant
+      real regs.  This always gives correct code, but it also gives
+      a bunch of vreg-to-rreg moves which are usually redundant but 
+      are hard for the register allocator to get rid of.
+
+      A compromise is to first examine all regparm'd argument 
+      expressions.  If they are all so simple that it is clear 
+      they will be evaluated without use of any fixed registers,
+      use the old compute-directly-to-fixed-target scheme.  If not,
+      be safe and use the via-vregs scheme.
+
+      Note this requires being able to examine an expression and
+      determine whether or not evaluation of it might use a fixed
+      register.  That requires knowledge of how the rest of this
+      insn selector works.  Currently just the following 3 are 
+      regarded as safe -- hopefully they cover the majority of
+      arguments in practice: IRExpr_Tmp IRExpr_Const IRExpr_Get.
+   */
+   vassert(cee->regparms >= 0 && cee->regparms <= 3);
+
+   /* Count the number of args and also the VECRETs */
+   n_args = n_arg_ws = 0;
+   while (args[n_args]) {
+      IRExpr* arg = args[n_args];
+      n_args++;
+      if (UNLIKELY(arg->tag == Iex_VECRET)) {
+         nVECRETs++;
+      } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+         nBBPTRs++;
+      }
+   }
+
+   /* If this fails, the IR is ill-formed */
+   vassert(nBBPTRs == 0 || nBBPTRs == 1);
+
+   /* If we have a VECRET, allocate space on the stack for the return
+      value, and record the stack pointer after that. */
+   HReg r_vecRetAddr = INVALID_HREG;
+   if (nVECRETs == 1) {
+      vassert(retTy == Ity_V128 || retTy == Ity_V256);
+      vassert(retTy != Ity_V256); // we don't handle that yet (if ever)
+      r_vecRetAddr = newVRegI(env);
+      sub_from_esp(env, 16);
+      addInstr(env, mk_iMOVsd_RR( hregX86_ESP(), r_vecRetAddr ));
+   } else {
+      // If either of these fail, the IR is ill-formed
+      vassert(retTy != Ity_V128 && retTy != Ity_V256);
+      vassert(nVECRETs == 0);
+   }
+
+   not_done_yet = n_args;
+
+   stack_limit = cee->regparms;
+
+   /* ------ BEGIN marshall all arguments ------ */
+
+   /* Push (R to L) the stack-passed args, [n_args-1 .. stack_limit] */
+   for (i = n_args-1; i >= stack_limit; i--) {
+      n_arg_ws += pushArg(env, args[i], r_vecRetAddr);
+      not_done_yet--;
+   }
+
+   /* args [stack_limit-1 .. 0] and possibly %ebp are to be passed in
+      registers. */
+
+   if (cee->regparms > 0) {
+
+      /* ------ BEGIN deal with regparms ------ */
+
+      /* deal with regparms, not forgetting %ebp if needed. */
+      argregs[0] = hregX86_EAX();
+      argregs[1] = hregX86_EDX();
+      argregs[2] = hregX86_ECX();
+      tmpregs[0] = tmpregs[1] = tmpregs[2] = INVALID_HREG;
+
+      argreg = cee->regparms;
+
+      /* In keeping with big comment above, detect potential danger
+         and use the via-vregs scheme if needed. */
+      danger = False;
+      for (i = stack_limit-1; i >= 0; i--) {
+         if (mightRequireFixedRegs(args[i])) {
+            danger = True;
+            break;
+         }
+      }
+
+      if (danger) {
+
+         /* Move via temporaries */
+         argregX = argreg;
+         for (i = stack_limit-1; i >= 0; i--) {
+
+            if (0) {
+               vex_printf("x86 host: register param is complex: ");
+               ppIRExpr(args[i]);
+               vex_printf("\n");
+            }
+
+            IRExpr* arg = args[i];
+            argreg--;
+            vassert(argreg >= 0);
+            if (UNLIKELY(arg->tag == Iex_VECRET)) {
+               vassert(0); //ATC
+            }
+            else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+               vassert(0); //ATC
+            } else {
+               vassert(typeOfIRExpr(env->type_env, arg) == Ity_I32);
+               tmpregs[argreg] = iselIntExpr_R(env, arg);
+            }
+            not_done_yet--;
+         }
+         for (i = stack_limit-1; i >= 0; i--) {
+            argregX--;
+            vassert(argregX >= 0);
+            addInstr( env, mk_iMOVsd_RR( tmpregs[argregX], argregs[argregX] ) );
+         }
+
+      } else {
+         /* It's safe to compute all regparm args directly into their
+            target registers. */
+         for (i = stack_limit-1; i >= 0; i--) {
+            IRExpr* arg = args[i];
+            argreg--;
+            vassert(argreg >= 0);
+            if (UNLIKELY(arg->tag == Iex_VECRET)) {
+               vassert(!hregIsInvalid(r_vecRetAddr));
+               addInstr(env, X86Instr_Alu32R(Xalu_MOV,
+                                             X86RMI_Reg(r_vecRetAddr),
+                                             argregs[argreg]));
+            }
+            else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+               vassert(0); //ATC
+            } else {
+               vassert(typeOfIRExpr(env->type_env, arg) == Ity_I32);
+               addInstr(env, X86Instr_Alu32R(Xalu_MOV, 
+                                             iselIntExpr_RMI(env, arg),
+                                             argregs[argreg]));
+            }
+            not_done_yet--;
+         }
+
+      }
+
+      /* ------ END deal with regparms ------ */
+
+   }
+
+   vassert(not_done_yet == 0);
+
+   /* ------ END marshall all arguments ------ */
+
+   /* Now we can compute the condition.  We can't do it earlier
+      because the argument computations could trash the condition
+      codes.  Be a bit clever to handle the common case where the
+      guard is 1:Bit. */
+   cc = Xcc_ALWAYS;
+   if (guard) {
+      if (guard->tag == Iex_Const 
+          && guard->Iex.Const.con->tag == Ico_U1
+          && guard->Iex.Const.con->Ico.U1 == True) {
+         /* unconditional -- do nothing */
+      } else {
+         cc = iselCondCode( env, guard );
+      }
+   }
+
+   /* Do final checks, set the return values, and generate the call
+      instruction proper. */
+   vassert(*stackAdjustAfterCall == 0);
+   vassert(is_RetLoc_INVALID(*retloc));
+   switch (retTy) {
+         case Ity_INVALID:
+            /* Function doesn't return a value. */
+            *retloc = mk_RetLoc_simple(RLPri_None);
+            break;
+         case Ity_I64:
+            *retloc = mk_RetLoc_simple(RLPri_2Int);
+            break;
+         case Ity_I32: case Ity_I16: case Ity_I8:
+            *retloc = mk_RetLoc_simple(RLPri_Int);
+            break;
+         case Ity_V128:
+            *retloc = mk_RetLoc_spRel(RLPri_V128SpRel, 0);
+            *stackAdjustAfterCall = 16;
+            break;
+         case Ity_V256:
+            vassert(0); // ATC
+            *retloc = mk_RetLoc_spRel(RLPri_V256SpRel, 0);
+            *stackAdjustAfterCall = 32;
+            break;
+         default:
+            /* IR can denote other possible return types, but we don't
+               handle those here. */
+           vassert(0);
+   }
+
+   /* Finally, generate the call itself.  This needs the *retloc value
+      set in the switch above, which is why it's at the end. */
+   callHelperAndClearArgs( env, cc, cee, n_arg_ws, *retloc );
+}
+
+
+/* Given a guest-state array descriptor, an index expression and a
+   bias, generate an X86AMode holding the relevant guest state
+   offset. */
+
+static
+X86AMode* genGuestArrayOffset ( ISelEnv* env, IRRegArray* descr, 
+                                IRExpr* off, Int bias )
+{
+   HReg tmp, roff;
+   Int  elemSz = sizeofIRType(descr->elemTy);
+   Int  nElems = descr->nElems;
+   Int  shift  = 0;
+
+   /* throw out any cases not generated by an x86 front end.  In
+      theory there might be a day where we need to handle them -- if
+      we ever run non-x86-guest on x86 host. */
+
+   if (nElems != 8) 
+      vpanic("genGuestArrayOffset(x86 host)(1)");
+
+   switch (elemSz) {
+      case 1:  shift = 0; break;
+      case 4:  shift = 2; break;
+      case 8:  shift = 3; break;
+      default: vpanic("genGuestArrayOffset(x86 host)(2)");
+   }
+
+   /* Compute off into a reg, %off.  Then return:
+
+         movl %off, %tmp
+         addl $bias, %tmp  (if bias != 0)
+         andl %tmp, 7
+         ... base(%ebp, %tmp, shift) ...
+   */
+   tmp  = newVRegI(env);
+   roff = iselIntExpr_R(env, off);
+   addInstr(env, mk_iMOVsd_RR(roff, tmp));
+   if (bias != 0) {
+      addInstr(env, 
+               X86Instr_Alu32R(Xalu_ADD, X86RMI_Imm(bias), tmp));
+   }
+   addInstr(env, 
+            X86Instr_Alu32R(Xalu_AND, X86RMI_Imm(7), tmp));
+   return
+      X86AMode_IRRS( descr->base, hregX86_EBP(), tmp, shift );
+}
+
+
+/* Mess with the FPU's rounding mode: set to the default rounding mode
+   (DEFAULT_FPUCW). */
+static 
+void set_FPU_rounding_default ( ISelEnv* env )
+{
+   /* pushl $DEFAULT_FPUCW
+      fldcw 0(%esp)
+      addl $4, %esp 
+   */
+   X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
+   addInstr(env, X86Instr_Push(X86RMI_Imm(DEFAULT_FPUCW)));
+   addInstr(env, X86Instr_FpLdCW(zero_esp));
+   add_to_esp(env, 4);
+}
+
+
+/* Mess with the FPU's rounding mode: 'mode' is an I32-typed
+   expression denoting a value in the range 0 .. 3, indicating a round
+   mode encoded as per type IRRoundingMode.  Set the x87 FPU to have
+   the same rounding.
+*/
+static
+void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode )
+{
+   HReg rrm  = iselIntExpr_R(env, mode);
+   HReg rrm2 = newVRegI(env);
+   X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
+
+   /* movl  %rrm, %rrm2
+      andl  $3, %rrm2   -- shouldn't be needed; paranoia
+      shll  $10, %rrm2
+      orl   $DEFAULT_FPUCW, %rrm2
+      pushl %rrm2
+      fldcw 0(%esp)
+      addl  $4, %esp
+   */
+   addInstr(env, mk_iMOVsd_RR(rrm, rrm2));
+   addInstr(env, X86Instr_Alu32R(Xalu_AND, X86RMI_Imm(3), rrm2));
+   addInstr(env, X86Instr_Sh32(Xsh_SHL, 10, rrm2));
+   addInstr(env, X86Instr_Alu32R(Xalu_OR, X86RMI_Imm(DEFAULT_FPUCW), rrm2));
+   addInstr(env, X86Instr_Push(X86RMI_Reg(rrm2)));
+   addInstr(env, X86Instr_FpLdCW(zero_esp));
+   add_to_esp(env, 4);
+}
+
+
+/* Generate !src into a new vector register, and be sure that the code
+   is SSE1 compatible.  Amazing that Intel doesn't offer a less crappy
+   way to do this. 
+*/
+static HReg do_sse_Not128 ( ISelEnv* env, HReg src )
+{
+   HReg dst = newVRegV(env);
+   /* Set dst to zero.  If dst contains a NaN then all hell might
+      break loose after the comparison.  So, first zero it. */
+   addInstr(env, X86Instr_SseReRg(Xsse_XOR, dst, dst));
+   /* And now make it all 1s ... */
+   addInstr(env, X86Instr_Sse32Fx4(Xsse_CMPEQF, dst, dst));
+   /* Finally, xor 'src' into it. */
+   addInstr(env, X86Instr_SseReRg(Xsse_XOR, src, dst));
+   /* Doesn't that just totally suck? */
+   return dst;
+}
+
+
+/* Round an x87 FPU value to 53-bit-mantissa precision, to be used
+   after most non-simple FPU operations (simple = +, -, *, / and
+   sqrt).
+
+   This could be done a lot more efficiently if needed, by loading
+   zero and adding it to the value to be rounded (fldz ; faddp?).
+*/
+static void roundToF64 ( ISelEnv* env, HReg reg )
+{
+   X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
+   sub_from_esp(env, 8);
+   addInstr(env, X86Instr_FpLdSt(False/*store*/, 8, reg, zero_esp));
+   addInstr(env, X86Instr_FpLdSt(True/*load*/, 8, reg, zero_esp));
+   add_to_esp(env, 8);
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (32/16/8 bit)           ---*/
+/*---------------------------------------------------------*/
+
+/* Select insns for an integer-typed expression, and add them to the
+   code list.  Return a reg holding the result.  This reg will be a
+   virtual register.  THE RETURNED REG MUST NOT BE MODIFIED.  If you
+   want to modify it, ask for a new vreg, copy it in there, and modify
+   the copy.  The register allocator will do its best to map both
+   vregs to the same real register, so the copies will often disappear
+   later in the game.
+
+   This should handle expressions of 32, 16 and 8-bit type.  All
+   results are returned in a 32-bit register.  For 16- and 8-bit
+   expressions, the upper 16/24 bits are arbitrary, so you should mask
+   or sign extend partial values if necessary.
+*/
+
+static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselIntExpr_R_wrk(env, e);
+   /* sanity checks ... */
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcInt32);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e )
+{
+   MatchInfo mi;
+
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   switch (e->tag) {
+
+   /* --------- TEMP --------- */
+   case Iex_RdTmp: {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   /* --------- LOAD --------- */
+   case Iex_Load: {
+      HReg dst = newVRegI(env);
+      X86AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr );
+
+      /* We can't handle big-endian loads, nor load-linked. */
+      if (e->Iex.Load.end != Iend_LE)
+         goto irreducible;
+
+      if (ty == Ity_I32) {
+         addInstr(env, X86Instr_Alu32R(Xalu_MOV,
+                                       X86RMI_Mem(amode), dst) );
+         return dst;
+      }
+      if (ty == Ity_I16) {
+         addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
+         return dst;
+      }
+      if (ty == Ity_I8) {
+         addInstr(env, X86Instr_LoadEX(1,False,amode,dst));
+         return dst;
+      }
+      break;
+   }
+
+   /* --------- TERNARY OP --------- */
+   case Iex_Triop: {
+      IRTriop *triop = e->Iex.Triop.details;
+      /* C3210 flags following FPU partial remainder (fprem), both
+         IEEE compliant (PREM1) and non-IEEE compliant (PREM). */
+      if (triop->op == Iop_PRemC3210F64
+          || triop->op == Iop_PRem1C3210F64) {
+         HReg junk = newVRegF(env);
+         HReg dst  = newVRegI(env);
+         HReg srcL = iselDblExpr(env, triop->arg2);
+         HReg srcR = iselDblExpr(env, triop->arg3);
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, X86Instr_FpBinary(
+                           e->Iex.Binop.op==Iop_PRemC3210F64 
+                              ? Xfp_PREM : Xfp_PREM1,
+                           srcL,srcR,junk
+                 ));
+         /* The previous pseudo-insn will have left the FPU's C3210
+            flags set correctly.  So bag them. */
+         addInstr(env, X86Instr_FpStSW_AX());
+         addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), dst));
+         addInstr(env, X86Instr_Alu32R(Xalu_AND, X86RMI_Imm(0x4700), dst));
+         return dst;
+      }
+
+      break;
+   }
+
+   /* --------- BINARY OP --------- */
+   case Iex_Binop: {
+      X86AluOp   aluOp;
+      X86ShiftOp shOp;
+
+      /* Pattern: Sub32(0,x) */
+      if (e->Iex.Binop.op == Iop_Sub32 && isZeroU32(e->Iex.Binop.arg1)) {
+         HReg dst = newVRegI(env);
+         HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(reg,dst));
+         addInstr(env, X86Instr_Unary32(Xun_NEG,dst));
+         return dst;
+      }
+
+      /* Is it an addition or logical style op? */
+      switch (e->Iex.Binop.op) {
+         case Iop_Add8: case Iop_Add16: case Iop_Add32:
+            aluOp = Xalu_ADD; break;
+         case Iop_Sub8: case Iop_Sub16: case Iop_Sub32: 
+            aluOp = Xalu_SUB; break;
+         case Iop_And8: case Iop_And16: case Iop_And32: 
+            aluOp = Xalu_AND; break;
+         case Iop_Or8: case Iop_Or16: case Iop_Or32:  
+            aluOp = Xalu_OR; break;
+         case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: 
+            aluOp = Xalu_XOR; break;
+         case Iop_Mul16: case Iop_Mul32: 
+            aluOp = Xalu_MUL; break;
+         default:
+            aluOp = Xalu_INVALID; break;
+      }
+      /* For commutative ops we assume any literal
+         values are on the second operand. */
+      if (aluOp != Xalu_INVALID) {
+         HReg dst    = newVRegI(env);
+         HReg reg    = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         X86RMI* rmi = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(reg,dst));
+         addInstr(env, X86Instr_Alu32R(aluOp, rmi, dst));
+         return dst;
+      }
+      /* Could do better here; forcing the first arg into a reg
+         isn't always clever.
+         -- t70 = Xor32(And32(Xor32(LDle:I32(Add32(t41,0xFFFFFFA0:I32)),
+                        LDle:I32(Add32(t41,0xFFFFFFA4:I32))),LDle:I32(Add32(
+                        t41,0xFFFFFFA8:I32))),LDle:I32(Add32(t41,0xFFFFFFA0:I32)))
+            movl 0xFFFFFFA0(%vr41),%vr107
+            movl 0xFFFFFFA4(%vr41),%vr108
+            movl %vr107,%vr106
+            xorl %vr108,%vr106
+            movl 0xFFFFFFA8(%vr41),%vr109
+            movl %vr106,%vr105
+            andl %vr109,%vr105
+            movl 0xFFFFFFA0(%vr41),%vr110
+            movl %vr105,%vr104
+            xorl %vr110,%vr104
+            movl %vr104,%vr70
+      */
+
+      /* Perhaps a shift op? */
+      switch (e->Iex.Binop.op) {
+         case Iop_Shl32: case Iop_Shl16: case Iop_Shl8:
+            shOp = Xsh_SHL; break;
+         case Iop_Shr32: case Iop_Shr16: case Iop_Shr8: 
+            shOp = Xsh_SHR; break;
+         case Iop_Sar32: case Iop_Sar16: case Iop_Sar8: 
+            shOp = Xsh_SAR; break;
+         default:
+            shOp = Xsh_INVALID; break;
+      }
+      if (shOp != Xsh_INVALID) {
+         HReg dst = newVRegI(env);
+
+         /* regL = the value to be shifted */
+         HReg regL   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         addInstr(env, mk_iMOVsd_RR(regL,dst));
+
+         /* Do any necessary widening for 16/8 bit operands */
+         switch (e->Iex.Binop.op) {
+            case Iop_Shr8:
+               addInstr(env, X86Instr_Alu32R(
+                                Xalu_AND, X86RMI_Imm(0xFF), dst));
+               break;
+            case Iop_Shr16:
+               addInstr(env, X86Instr_Alu32R(
+                                Xalu_AND, X86RMI_Imm(0xFFFF), dst));
+               break;
+            case Iop_Sar8:
+               addInstr(env, X86Instr_Sh32(Xsh_SHL, 24, dst));
+               addInstr(env, X86Instr_Sh32(Xsh_SAR, 24, dst));
+               break;
+            case Iop_Sar16:
+               addInstr(env, X86Instr_Sh32(Xsh_SHL, 16, dst));
+               addInstr(env, X86Instr_Sh32(Xsh_SAR, 16, dst));
+               break;
+            default: break;
+         }
+
+         /* Now consider the shift amount.  If it's a literal, we
+            can do a much better job than the general case. */
+         if (e->Iex.Binop.arg2->tag == Iex_Const) {
+            /* assert that the IR is well-typed */
+            Int nshift;
+            vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+            nshift = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+	    vassert(nshift >= 0);
+	    if (nshift > 0)
+               /* Can't allow nshift==0 since that means %cl */
+               addInstr(env, X86Instr_Sh32( shOp, nshift, dst ));
+         } else {
+            /* General case; we have to force the amount into %cl. */
+            HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, mk_iMOVsd_RR(regR,hregX86_ECX()));
+            addInstr(env, X86Instr_Sh32(shOp, 0/* %cl */, dst));
+         }
+         return dst;
+      }
+
+      /* Handle misc other ops. */
+
+      if (e->Iex.Binop.op == Iop_Max32U) {
+         HReg src1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg dst  = newVRegI(env);
+         HReg src2 = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(src1,dst));
+         addInstr(env, X86Instr_Alu32R(Xalu_CMP, X86RMI_Reg(src2), dst));
+         addInstr(env, X86Instr_CMov32(Xcc_B, X86RM_Reg(src2), dst));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_8HLto16) {
+         HReg hi8  = newVRegI(env);
+         HReg lo8  = newVRegI(env);
+         HReg hi8s = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(hi8s, hi8));
+         addInstr(env, mk_iMOVsd_RR(lo8s, lo8));
+         addInstr(env, X86Instr_Sh32(Xsh_SHL, 8, hi8));
+         addInstr(env, X86Instr_Alu32R(Xalu_AND, X86RMI_Imm(0xFF), lo8));
+         addInstr(env, X86Instr_Alu32R(Xalu_OR, X86RMI_Reg(lo8), hi8));
+         return hi8;
+      }
+
+      if (e->Iex.Binop.op == Iop_16HLto32) {
+         HReg hi16  = newVRegI(env);
+         HReg lo16  = newVRegI(env);
+         HReg hi16s = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         addInstr(env, mk_iMOVsd_RR(hi16s, hi16));
+         addInstr(env, mk_iMOVsd_RR(lo16s, lo16));
+         addInstr(env, X86Instr_Sh32(Xsh_SHL, 16, hi16));
+         addInstr(env, X86Instr_Alu32R(Xalu_AND, X86RMI_Imm(0xFFFF), lo16));
+         addInstr(env, X86Instr_Alu32R(Xalu_OR, X86RMI_Reg(lo16), hi16));
+         return hi16;
+      }
+
+      if (e->Iex.Binop.op == Iop_MullS16 || e->Iex.Binop.op == Iop_MullS8
+          || e->Iex.Binop.op == Iop_MullU16 || e->Iex.Binop.op == Iop_MullU8) {
+         HReg a16   = newVRegI(env);
+         HReg b16   = newVRegI(env);
+         HReg a16s  = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg b16s  = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         Int  shift = (e->Iex.Binop.op == Iop_MullS8 
+                       || e->Iex.Binop.op == Iop_MullU8)
+                         ? 24 : 16;
+         X86ShiftOp shr_op = (e->Iex.Binop.op == Iop_MullS8 
+                              || e->Iex.Binop.op == Iop_MullS16)
+                                ? Xsh_SAR : Xsh_SHR;
+
+         addInstr(env, mk_iMOVsd_RR(a16s, a16));
+         addInstr(env, mk_iMOVsd_RR(b16s, b16));
+         addInstr(env, X86Instr_Sh32(Xsh_SHL, shift, a16));
+         addInstr(env, X86Instr_Sh32(Xsh_SHL, shift, b16));
+         addInstr(env, X86Instr_Sh32(shr_op,  shift, a16));
+         addInstr(env, X86Instr_Sh32(shr_op,  shift, b16));
+         addInstr(env, X86Instr_Alu32R(Xalu_MUL, X86RMI_Reg(a16), b16));
+         return b16;
+      }
+
+      if (e->Iex.Binop.op == Iop_CmpF64) {
+         HReg fL = iselDblExpr(env, e->Iex.Binop.arg1);
+         HReg fR = iselDblExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegI(env);
+         addInstr(env, X86Instr_FpCmp(fL,fR,dst));
+         /* shift this right 8 bits so as to conform to CmpF64
+            definition. */
+         addInstr(env, X86Instr_Sh32(Xsh_SHR, 8, dst));
+         return dst;
+      }
+
+      if (e->Iex.Binop.op == Iop_F64toI32S
+          || e->Iex.Binop.op == Iop_F64toI16S) {
+         Int  sz  = e->Iex.Binop.op == Iop_F64toI16S ? 2 : 4;
+         HReg rf  = iselDblExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegI(env);
+
+         /* Used several times ... */
+         X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
+
+	 /* rf now holds the value to be converted, and rrm holds the
+	    rounding mode value, encoded as per the IRRoundingMode
+	    enum.  The first thing to do is set the FPU's rounding
+	    mode accordingly. */
+
+         /* Create a space for the format conversion. */
+         /* subl $4, %esp */
+         sub_from_esp(env, 4);
+
+	 /* Set host rounding mode */
+	 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+
+         /* gistw/l %rf, 0(%esp) */
+         addInstr(env, X86Instr_FpLdStI(False/*store*/, 
+                                        toUChar(sz), rf, zero_esp));
+
+         if (sz == 2) {
+            /* movzwl 0(%esp), %dst */
+            addInstr(env, X86Instr_LoadEX(2,False,zero_esp,dst));
+         } else {
+            /* movl 0(%esp), %dst */
+            vassert(sz == 4);
+            addInstr(env, X86Instr_Alu32R(
+                             Xalu_MOV, X86RMI_Mem(zero_esp), dst));
+         }
+
+	 /* Restore default FPU rounding. */
+         set_FPU_rounding_default( env );
+
+         /* addl $4, %esp */
+	 add_to_esp(env, 4);
+         return dst;
+      }
+
+      break;
+   }
+
+   /* --------- UNARY OP --------- */
+   case Iex_Unop: {
+
+      /* 1Uto8(32to1(expr32)) */
+      if (e->Iex.Unop.op == Iop_1Uto8) { 
+         DECLARE_PATTERN(p_32to1_then_1Uto8);
+         DEFINE_PATTERN(p_32to1_then_1Uto8,
+                        unop(Iop_1Uto8,unop(Iop_32to1,bind(0))));
+         if (matchIRExpr(&mi,p_32to1_then_1Uto8,e)) {
+            IRExpr* expr32 = mi.bindee[0];
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, expr32);
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, X86Instr_Alu32R(Xalu_AND,
+                                          X86RMI_Imm(1), dst));
+            return dst;
+         }
+      }
+
+      /* 8Uto32(LDle(expr32)) */
+      if (e->Iex.Unop.op == Iop_8Uto32) {
+         DECLARE_PATTERN(p_LDle8_then_8Uto32);
+         DEFINE_PATTERN(p_LDle8_then_8Uto32,
+                        unop(Iop_8Uto32,
+                             IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
+         if (matchIRExpr(&mi,p_LDle8_then_8Uto32,e)) {
+            HReg dst = newVRegI(env);
+            X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+            addInstr(env, X86Instr_LoadEX(1,False,amode,dst));
+            return dst;
+         }
+      }
+
+      /* 8Sto32(LDle(expr32)) */
+      if (e->Iex.Unop.op == Iop_8Sto32) {
+         DECLARE_PATTERN(p_LDle8_then_8Sto32);
+         DEFINE_PATTERN(p_LDle8_then_8Sto32,
+                        unop(Iop_8Sto32,
+                             IRExpr_Load(Iend_LE,Ity_I8,bind(0))) );
+         if (matchIRExpr(&mi,p_LDle8_then_8Sto32,e)) {
+            HReg dst = newVRegI(env);
+            X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+            addInstr(env, X86Instr_LoadEX(1,True,amode,dst));
+            return dst;
+         }
+      }
+
+      /* 16Uto32(LDle(expr32)) */
+      if (e->Iex.Unop.op == Iop_16Uto32) {
+         DECLARE_PATTERN(p_LDle16_then_16Uto32);
+         DEFINE_PATTERN(p_LDle16_then_16Uto32,
+                        unop(Iop_16Uto32,
+                             IRExpr_Load(Iend_LE,Ity_I16,bind(0))) );
+         if (matchIRExpr(&mi,p_LDle16_then_16Uto32,e)) {
+            HReg dst = newVRegI(env);
+            X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );
+            addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
+            return dst;
+         }
+      }
+
+      /* 8Uto32(GET:I8) */
+      if (e->Iex.Unop.op == Iop_8Uto32) {
+         if (e->Iex.Unop.arg->tag == Iex_Get) {
+            HReg      dst;
+            X86AMode* amode;
+            vassert(e->Iex.Unop.arg->Iex.Get.ty == Ity_I8);
+            dst = newVRegI(env);
+            amode = X86AMode_IR(e->Iex.Unop.arg->Iex.Get.offset,
+                                hregX86_EBP());
+            addInstr(env, X86Instr_LoadEX(1,False,amode,dst));
+            return dst;
+         }
+      }
+
+      /* 16to32(GET:I16) */
+      if (e->Iex.Unop.op == Iop_16Uto32) {
+         if (e->Iex.Unop.arg->tag == Iex_Get) {
+            HReg      dst;
+            X86AMode* amode;
+            vassert(e->Iex.Unop.arg->Iex.Get.ty == Ity_I16);
+            dst = newVRegI(env);
+            amode = X86AMode_IR(e->Iex.Unop.arg->Iex.Get.offset,
+                                hregX86_EBP());
+            addInstr(env, X86Instr_LoadEX(2,False,amode,dst));
+            return dst;
+         }
+      }
+
+      switch (e->Iex.Unop.op) {
+         case Iop_8Uto16:
+         case Iop_8Uto32:
+         case Iop_16Uto32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            UInt mask = e->Iex.Unop.op==Iop_16Uto32 ? 0xFFFF : 0xFF;
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, X86Instr_Alu32R(Xalu_AND,
+                                          X86RMI_Imm(mask), dst));
+            return dst;
+         }
+         case Iop_8Sto16:
+         case Iop_8Sto32:
+         case Iop_16Sto32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            UInt amt = e->Iex.Unop.op==Iop_16Sto32 ? 16 : 24;
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, X86Instr_Sh32(Xsh_SHL, amt, dst));
+            addInstr(env, X86Instr_Sh32(Xsh_SAR, amt, dst));
+            return dst;
+         }
+	 case Iop_Not8:
+	 case Iop_Not16:
+         case Iop_Not32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, X86Instr_Unary32(Xun_NOT,dst));
+            return dst;
+         }
+         case Iop_64HIto32: {
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            return rHi; /* and abandon rLo .. poor wee thing :-) */
+         }
+         case Iop_64to32: {
+            HReg rHi, rLo;
+            iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);
+            return rLo; /* similar stupid comment to the above ... */
+         }
+         case Iop_16HIto8:
+         case Iop_32HIto16: {
+            HReg dst  = newVRegI(env);
+            HReg src  = iselIntExpr_R(env, e->Iex.Unop.arg);
+            Int shift = e->Iex.Unop.op == Iop_16HIto8 ? 8 : 16;
+            addInstr(env, mk_iMOVsd_RR(src,dst) );
+            addInstr(env, X86Instr_Sh32(Xsh_SHR, shift, dst));
+            return dst;
+         }
+         case Iop_1Uto32:
+         case Iop_1Uto8: {
+            HReg dst         = newVRegI(env);
+            X86CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Set32(cond,dst));
+            return dst;
+         }
+         case Iop_1Sto8:
+         case Iop_1Sto16:
+         case Iop_1Sto32: {
+            /* could do better than this, but for now ... */
+            HReg dst         = newVRegI(env);
+            X86CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Set32(cond,dst));
+            addInstr(env, X86Instr_Sh32(Xsh_SHL, 31, dst));
+            addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, dst));
+            return dst;
+         }
+         case Iop_Ctz32: {
+            /* Count trailing zeroes, implemented by x86 'bsfl' */
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Bsfr32(True,src,dst));
+            return dst;
+         }
+         case Iop_Clz32: {
+            /* Count leading zeroes.  Do 'bsrl' to establish the index
+               of the highest set bit, and subtract that value from
+               31. */
+            HReg tmp = newVRegI(env);
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Bsfr32(False,src,tmp));
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, 
+                                          X86RMI_Imm(31), dst));
+            addInstr(env, X86Instr_Alu32R(Xalu_SUB,
+                                          X86RMI_Reg(tmp), dst));
+            return dst;
+         }
+
+         case Iop_CmpwNEZ32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src,dst));
+            addInstr(env, X86Instr_Unary32(Xun_NEG,dst));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR,
+                                          X86RMI_Reg(src), dst));
+            addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, dst));
+            return dst;
+         }
+         case Iop_Left8:
+         case Iop_Left16:
+         case Iop_Left32: {
+            HReg dst = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src, dst));
+            addInstr(env, X86Instr_Unary32(Xun_NEG, dst));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR, X86RMI_Reg(src), dst));
+            return dst;
+         }
+
+         case Iop_V128to32: {
+            HReg      dst  = newVRegI(env);
+            HReg      vec  = iselVecExpr(env, e->Iex.Unop.arg);
+            X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+            sub_from_esp(env, 16);
+            addInstr(env, X86Instr_SseLdSt(False/*store*/, vec, esp0));
+            addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(esp0), dst ));
+            add_to_esp(env, 16);
+            return dst;
+         }
+
+         /* ReinterpF32asI32(e) */
+         /* Given an IEEE754 single, produce an I32 with the same bit
+            pattern.  Keep stack 8-aligned even though only using 4
+            bytes. */
+         case Iop_ReinterpF32asI32: {
+            HReg rf   = iselFltExpr(env, e->Iex.Unop.arg);
+            HReg dst  = newVRegI(env);
+            X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
+            /* paranoia */
+            set_FPU_rounding_default(env);
+            /* subl $8, %esp */
+            sub_from_esp(env, 8);
+            /* gstF %rf, 0(%esp) */
+            addInstr(env,
+                     X86Instr_FpLdSt(False/*store*/, 4, rf, zero_esp));
+            /* movl 0(%esp), %dst */
+            addInstr(env, 
+                     X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(zero_esp), dst));
+            /* addl $8, %esp */
+            add_to_esp(env, 8);
+            return dst;
+         }
+
+         case Iop_16to8:
+         case Iop_32to8:
+         case Iop_32to16:
+            /* These are no-ops. */
+            return iselIntExpr_R(env, e->Iex.Unop.arg);
+
+         case Iop_GetMSBs8x8: {
+            /* Note: the following assumes the helper is of
+               signature
+                  UInt fn ( ULong ), and is not a regparm fn.
+            */
+            HReg  xLo, xHi;
+            HReg  dst = newVRegI(env);
+            Addr fn = (Addr)h_generic_calc_GetMSBs8x8;
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
+            addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
+                                         0, mk_RetLoc_simple(RLPri_Int) ));
+            add_to_esp(env, 2*4);
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), dst));
+            return dst;
+         }
+
+         default: 
+            break;
+      }
+      break;
+   }
+
+   /* --------- GET --------- */
+   case Iex_Get: {
+      if (ty == Ity_I32) {
+         HReg dst = newVRegI(env);
+         addInstr(env, X86Instr_Alu32R(
+                          Xalu_MOV, 
+                          X86RMI_Mem(X86AMode_IR(e->Iex.Get.offset,
+                                                 hregX86_EBP())),
+                          dst));
+         return dst;
+      }
+      if (ty == Ity_I8 || ty == Ity_I16) {
+         HReg dst = newVRegI(env);
+         addInstr(env, X86Instr_LoadEX(
+                          toUChar(ty==Ity_I8 ? 1 : 2),
+                          False,
+                          X86AMode_IR(e->Iex.Get.offset,hregX86_EBP()),
+                          dst));
+         return dst;
+      }
+      break;
+   }
+
+   case Iex_GetI: {
+      X86AMode* am 
+         = genGuestArrayOffset(
+              env, e->Iex.GetI.descr, 
+                   e->Iex.GetI.ix, e->Iex.GetI.bias );
+      HReg dst = newVRegI(env);
+      if (ty == Ity_I8) {
+         addInstr(env, X86Instr_LoadEX( 1, False, am, dst ));
+         return dst;
+      }
+      if (ty == Ity_I32) {
+         addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(am), dst));
+         return dst;
+      }
+      break;
+   }
+
+   /* --------- CCALL --------- */
+   case Iex_CCall: {
+      HReg    dst = newVRegI(env);
+      vassert(ty == e->Iex.CCall.retty);
+
+      /* be very restrictive for now.  Only 32/64-bit ints allowed for
+         args, and 32 bits for return type.  Don't forget to change
+         the RetLoc if more return types are allowed in future. */
+      if (e->Iex.CCall.retty != Ity_I32)
+         goto irreducible;
+
+      /* Marshal args, do the call, clear stack. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                    e->Iex.CCall.cee, e->Iex.CCall.retty, e->Iex.CCall.args );
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_Int);
+      vassert(addToSp == 0);
+
+      addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), dst));
+      return dst;
+   }
+
+   /* --------- LITERAL --------- */
+   /* 32/16/8-bit literals */
+   case Iex_Const: {
+      X86RMI* rmi = iselIntExpr_RMI ( env, e );
+      HReg    r   = newVRegI(env);
+      addInstr(env, X86Instr_Alu32R(Xalu_MOV, rmi, r));
+      return r;
+   }
+
+   /* --------- MULTIPLEX --------- */
+   case Iex_ITE: { // VFD
+     if ((ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8)
+         && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+        HReg   r1  = iselIntExpr_R(env, e->Iex.ITE.iftrue);
+        X86RM* r0  = iselIntExpr_RM(env, e->Iex.ITE.iffalse);
+        HReg   dst = newVRegI(env);
+        addInstr(env, mk_iMOVsd_RR(r1,dst));
+        X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+        addInstr(env, X86Instr_CMov32(cc ^ 1, r0, dst));
+        return dst;
+      }
+      break;
+   }
+
+   default: 
+   break;
+   } /* switch (e->tag) */
+
+   /* We get here if no pattern matched. */
+  irreducible:
+   ppIRExpr(e);
+   vpanic("iselIntExpr_R: cannot reduce tree");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expression auxiliaries              ---*/
+/*---------------------------------------------------------*/
+
+/* --------------------- AMODEs --------------------- */
+
+/* Return an AMode which computes the value of the specified
+   expression, possibly also adding insns to the code list as a
+   result.  The expression may only be a 32-bit one.
+*/
+
+static Bool sane_AMode ( X86AMode* am )
+{
+   switch (am->tag) {
+      case Xam_IR:
+         return 
+            toBool( hregClass(am->Xam.IR.reg) == HRcInt32
+                    && (hregIsVirtual(am->Xam.IR.reg)
+                        || sameHReg(am->Xam.IR.reg, hregX86_EBP())) );
+      case Xam_IRRS:
+         return 
+            toBool( hregClass(am->Xam.IRRS.base) == HRcInt32
+                    && hregIsVirtual(am->Xam.IRRS.base)
+                    && hregClass(am->Xam.IRRS.index) == HRcInt32
+                    && hregIsVirtual(am->Xam.IRRS.index) );
+      default:
+        vpanic("sane_AMode: unknown x86 amode tag");
+   }
+}
+
+static X86AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e )
+{
+   X86AMode* am = iselIntExpr_AMode_wrk(env, e);
+   vassert(sane_AMode(am));
+   return am;
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static X86AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32);
+
+   /* Add32( Add32(expr1, Shl32(expr2, simm)), imm32 ) */
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_Add32
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32
+       && e->Iex.Binop.arg1->tag == Iex_Binop
+       && e->Iex.Binop.arg1->Iex.Binop.op == Iop_Add32
+       && e->Iex.Binop.arg1->Iex.Binop.arg2->tag == Iex_Binop
+       && e->Iex.Binop.arg1->Iex.Binop.arg2->Iex.Binop.op == Iop_Shl32
+       && e->Iex.Binop.arg1
+           ->Iex.Binop.arg2->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg1
+           ->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8) {
+      UInt shift = e->Iex.Binop.arg1
+                    ->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+      UInt imm32 = e->Iex.Binop.arg2->Iex.Const.con->Ico.U32;
+      if (shift == 1 || shift == 2 || shift == 3) {
+         HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1->Iex.Binop.arg1);
+         HReg r2 = iselIntExpr_R(env, e->Iex.Binop.arg1
+                                       ->Iex.Binop.arg2->Iex.Binop.arg1 );
+         return X86AMode_IRRS(imm32, r1, r2, shift);
+      }
+   }
+
+   /* Add32(expr1, Shl32(expr2, imm)) */
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_Add32
+       && e->Iex.Binop.arg2->tag == Iex_Binop
+       && e->Iex.Binop.arg2->Iex.Binop.op == Iop_Shl32
+       && e->Iex.Binop.arg2->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8) {
+      UInt shift = e->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->Ico.U8;
+      if (shift == 1 || shift == 2 || shift == 3) {
+         HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         HReg r2 = iselIntExpr_R(env, e->Iex.Binop.arg2->Iex.Binop.arg1 );
+         return X86AMode_IRRS(0, r1, r2, shift);
+      }
+   }
+
+   /* Add32(expr,i) */
+   if (e->tag == Iex_Binop 
+       && e->Iex.Binop.op == Iop_Add32
+       && e->Iex.Binop.arg2->tag == Iex_Const
+       && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32) {
+      HReg r1 = iselIntExpr_R(env,  e->Iex.Binop.arg1);
+      return X86AMode_IR(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32, r1);
+   }
+
+   /* Doesn't match anything in particular.  Generate it into
+      a register and use that. */
+   {
+      HReg r1 = iselIntExpr_R(env, e);
+      return X86AMode_IR(0, r1);
+   }
+}
+
+
+/* --------------------- RMIs --------------------- */
+
+/* Similarly, calculate an expression into an X86RMI operand.  As with
+   iselIntExpr_R, the expression can have type 32, 16 or 8 bits.  */
+
+static X86RMI* iselIntExpr_RMI ( ISelEnv* env, IRExpr* e )
+{
+   X86RMI* rmi = iselIntExpr_RMI_wrk(env, e);
+   /* sanity checks ... */
+   switch (rmi->tag) {
+      case Xrmi_Imm:
+         return rmi;
+      case Xrmi_Reg:
+         vassert(hregClass(rmi->Xrmi.Reg.reg) == HRcInt32);
+         vassert(hregIsVirtual(rmi->Xrmi.Reg.reg));
+         return rmi;
+      case Xrmi_Mem:
+         vassert(sane_AMode(rmi->Xrmi.Mem.am));
+         return rmi;
+      default:
+         vpanic("iselIntExpr_RMI: unknown x86 RMI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static X86RMI* iselIntExpr_RMI_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      UInt u;
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
+         case Ico_U16: u = 0xFFFF & (e->Iex.Const.con->Ico.U16); break;
+         case Ico_U8:  u = 0xFF   & (e->Iex.Const.con->Ico.U8); break;
+         default: vpanic("iselIntExpr_RMI.Iex_Const(x86h)");
+      }
+      return X86RMI_Imm(u);
+   }
+
+   /* special case: 32-bit GET */
+   if (e->tag == Iex_Get && ty == Ity_I32) {
+      return X86RMI_Mem(X86AMode_IR(e->Iex.Get.offset,
+                                    hregX86_EBP()));
+   }
+
+   /* special case: 32-bit load from memory */
+   if (e->tag == Iex_Load && ty == Ity_I32 
+       && e->Iex.Load.end == Iend_LE) {
+      X86AMode* am = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      return X86RMI_Mem(am);
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return X86RMI_Reg(r);
+   }
+}
+
+
+/* --------------------- RIs --------------------- */
+
+/* Calculate an expression into an X86RI operand.  As with
+   iselIntExpr_R, the expression can have type 32, 16 or 8 bits. */
+
+static X86RI* iselIntExpr_RI ( ISelEnv* env, IRExpr* e )
+{
+   X86RI* ri = iselIntExpr_RI_wrk(env, e);
+   /* sanity checks ... */
+   switch (ri->tag) {
+      case Xri_Imm:
+         return ri;
+      case Xri_Reg:
+         vassert(hregClass(ri->Xri.Reg.reg) == HRcInt32);
+         vassert(hregIsVirtual(ri->Xri.Reg.reg));
+         return ri;
+      default:
+         vpanic("iselIntExpr_RI: unknown x86 RI tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static X86RI* iselIntExpr_RI_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   /* special case: immediate */
+   if (e->tag == Iex_Const) {
+      UInt u;
+      switch (e->Iex.Const.con->tag) {
+         case Ico_U32: u = e->Iex.Const.con->Ico.U32; break;
+         case Ico_U16: u = 0xFFFF & (e->Iex.Const.con->Ico.U16); break;
+         case Ico_U8:  u = 0xFF   & (e->Iex.Const.con->Ico.U8); break;
+         default: vpanic("iselIntExpr_RMI.Iex_Const(x86h)");
+      }
+      return X86RI_Imm(u);
+   }
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return X86RI_Reg(r);
+   }
+}
+
+
+/* --------------------- RMs --------------------- */
+
+/* Similarly, calculate an expression into an X86RM operand.  As with
+   iselIntExpr_R, the expression can have type 32, 16 or 8 bits.  */
+
+static X86RM* iselIntExpr_RM ( ISelEnv* env, IRExpr* e )
+{
+   X86RM* rm = iselIntExpr_RM_wrk(env, e);
+   /* sanity checks ... */
+   switch (rm->tag) {
+      case Xrm_Reg:
+         vassert(hregClass(rm->Xrm.Reg.reg) == HRcInt32);
+         vassert(hregIsVirtual(rm->Xrm.Reg.reg));
+         return rm;
+      case Xrm_Mem:
+         vassert(sane_AMode(rm->Xrm.Mem.am));
+         return rm;
+      default:
+         vpanic("iselIntExpr_RM: unknown x86 RM tag");
+   }
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static X86RM* iselIntExpr_RM_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8);
+
+   /* special case: 32-bit GET */
+   if (e->tag == Iex_Get && ty == Ity_I32) {
+      return X86RM_Mem(X86AMode_IR(e->Iex.Get.offset,
+                                   hregX86_EBP()));
+   }
+
+   /* special case: load from memory */
+
+   /* default case: calculate into a register and return that */
+   {
+      HReg r = iselIntExpr_R ( env, e );
+      return X86RM_Reg(r);
+   }
+}
+
+
+/* --------------------- CONDCODE --------------------- */
+
+/* Generate code to evaluated a bit-typed expression, returning the
+   condition code which would correspond when the expression would
+   notionally have returned 1. */
+
+static X86CondCode iselCondCode ( ISelEnv* env, IRExpr* e )
+{
+   /* Uh, there's nothing we can sanity check here, unfortunately. */
+   return iselCondCode_wrk(env,e);
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static X86CondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e )
+{
+   MatchInfo mi;
+
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I1);
+
+   /* var */
+   if (e->tag == Iex_RdTmp) {
+      HReg r32 = lookupIRTemp(env, e->Iex.RdTmp.tmp);
+      /* Test32 doesn't modify r32; so this is OK. */
+      addInstr(env, X86Instr_Test32(1,X86RM_Reg(r32)));
+      return Xcc_NZ;
+   }
+
+   /* Constant 1:Bit */
+   if (e->tag == Iex_Const) {
+      HReg r;
+      vassert(e->Iex.Const.con->tag == Ico_U1);
+      vassert(e->Iex.Const.con->Ico.U1 == True 
+              || e->Iex.Const.con->Ico.U1 == False);
+      r = newVRegI(env);
+      addInstr(env, X86Instr_Alu32R(Xalu_MOV,X86RMI_Imm(0),r));
+      addInstr(env, X86Instr_Alu32R(Xalu_XOR,X86RMI_Reg(r),r));
+      return e->Iex.Const.con->Ico.U1 ? Xcc_Z : Xcc_NZ;
+   }
+
+   /* Not1(e) */
+   if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) {
+      /* Generate code for the arg, and negate the test condition */
+      return 1 ^ iselCondCode(env, e->Iex.Unop.arg);
+   }
+
+   /* --- patterns rooted at: 32to1 --- */
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_32to1) {
+      X86RM* rm = iselIntExpr_RM(env, e->Iex.Unop.arg);
+      addInstr(env, X86Instr_Test32(1,rm));
+      return Xcc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ8 --- */
+
+   /* CmpNEZ8(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ8) {
+      X86RM* rm = iselIntExpr_RM(env, e->Iex.Unop.arg);
+      addInstr(env, X86Instr_Test32(0xFF,rm));
+      return Xcc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ16 --- */
+
+   /* CmpNEZ16(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ16) {
+      X86RM* rm = iselIntExpr_RM(env, e->Iex.Unop.arg);
+      addInstr(env, X86Instr_Test32(0xFFFF,rm));
+      return Xcc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ32 --- */
+
+   /* CmpNEZ32(And32(x,y)) */
+   {
+      DECLARE_PATTERN(p_CmpNEZ32_And32);
+      DEFINE_PATTERN(p_CmpNEZ32_And32,
+                     unop(Iop_CmpNEZ32, binop(Iop_And32, bind(0), bind(1))));
+      if (matchIRExpr(&mi, p_CmpNEZ32_And32, e)) {
+         HReg    r0   = iselIntExpr_R(env, mi.bindee[0]);
+         X86RMI* rmi1 = iselIntExpr_RMI(env, mi.bindee[1]);
+         HReg    tmp  = newVRegI(env);
+         addInstr(env, mk_iMOVsd_RR(r0, tmp));
+         addInstr(env, X86Instr_Alu32R(Xalu_AND,rmi1,tmp));
+         return Xcc_NZ;
+      }
+   }
+
+   /* CmpNEZ32(Or32(x,y)) */
+   {
+      DECLARE_PATTERN(p_CmpNEZ32_Or32);
+      DEFINE_PATTERN(p_CmpNEZ32_Or32,
+                     unop(Iop_CmpNEZ32, binop(Iop_Or32, bind(0), bind(1))));
+      if (matchIRExpr(&mi, p_CmpNEZ32_Or32, e)) {
+         HReg    r0   = iselIntExpr_R(env, mi.bindee[0]);
+         X86RMI* rmi1 = iselIntExpr_RMI(env, mi.bindee[1]);
+         HReg    tmp  = newVRegI(env);
+         addInstr(env, mk_iMOVsd_RR(r0, tmp));
+         addInstr(env, X86Instr_Alu32R(Xalu_OR,rmi1,tmp));
+         return Xcc_NZ;
+      }
+   }
+
+   /* CmpNEZ32(GET(..):I32) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ32
+       && e->Iex.Unop.arg->tag == Iex_Get) {
+      X86AMode* am = X86AMode_IR(e->Iex.Unop.arg->Iex.Get.offset, 
+                                 hregX86_EBP());
+      addInstr(env, X86Instr_Alu32M(Xalu_CMP, X86RI_Imm(0), am));
+      return Xcc_NZ;
+   }
+
+   /* CmpNEZ32(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ32) {
+      HReg    r1   = iselIntExpr_R(env, e->Iex.Unop.arg);
+      X86RMI* rmi2 = X86RMI_Imm(0);
+      addInstr(env, X86Instr_Alu32R(Xalu_CMP,rmi2,r1));
+      return Xcc_NZ;
+   }
+
+   /* --- patterns rooted at: CmpNEZ64 --- */
+
+   /* CmpNEZ64(Or64(x,y)) */
+   {
+      DECLARE_PATTERN(p_CmpNEZ64_Or64);
+      DEFINE_PATTERN(p_CmpNEZ64_Or64,
+                     unop(Iop_CmpNEZ64, binop(Iop_Or64, bind(0), bind(1))));
+      if (matchIRExpr(&mi, p_CmpNEZ64_Or64, e)) {
+         HReg    hi1, lo1, hi2, lo2;
+         HReg    tmp  = newVRegI(env);
+         iselInt64Expr( &hi1, &lo1, env, mi.bindee[0] );
+         addInstr(env, mk_iMOVsd_RR(hi1, tmp));
+         addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(lo1),tmp));
+         iselInt64Expr( &hi2, &lo2, env, mi.bindee[1] );
+         addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(hi2),tmp));
+         addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(lo2),tmp));
+         return Xcc_NZ;
+      }
+   }
+
+   /* CmpNEZ64(x) */
+   if (e->tag == Iex_Unop 
+       && e->Iex.Unop.op == Iop_CmpNEZ64) {
+      HReg hi, lo;
+      HReg tmp = newVRegI(env);
+      iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg );
+      addInstr(env, mk_iMOVsd_RR(hi, tmp));
+      addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(lo), tmp));
+      return Xcc_NZ;
+   }
+
+   /* --- patterns rooted at: Cmp{EQ,NE}{8,16} --- */
+
+   /* CmpEQ8 / CmpNE8 */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ8
+           || e->Iex.Binop.op == Iop_CmpNE8
+           || e->Iex.Binop.op == Iop_CasCmpEQ8
+           || e->Iex.Binop.op == Iop_CasCmpNE8)) {
+      if (isZeroU8(e->Iex.Binop.arg2)) {
+         HReg    r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         addInstr(env, X86Instr_Test32(0xFF,X86RM_Reg(r1)));
+         switch (e->Iex.Binop.op) {
+            case Iop_CmpEQ8: case Iop_CasCmpEQ8: return Xcc_Z;
+            case Iop_CmpNE8: case Iop_CasCmpNE8: return Xcc_NZ;
+            default: vpanic("iselCondCode(x86): CmpXX8(expr,0:I8)");
+         }
+      } else {
+         HReg    r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+         X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+         HReg    r    = newVRegI(env);
+         addInstr(env, mk_iMOVsd_RR(r1,r));
+         addInstr(env, X86Instr_Alu32R(Xalu_XOR,rmi2,r));
+         addInstr(env, X86Instr_Test32(0xFF,X86RM_Reg(r)));
+         switch (e->Iex.Binop.op) {
+            case Iop_CmpEQ8: case Iop_CasCmpEQ8: return Xcc_Z;
+            case Iop_CmpNE8: case Iop_CasCmpNE8: return Xcc_NZ;
+            default: vpanic("iselCondCode(x86): CmpXX8(expr,expr)");
+         }
+      }
+   }
+
+   /* CmpEQ16 / CmpNE16 */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ16
+           || e->Iex.Binop.op == Iop_CmpNE16
+           || e->Iex.Binop.op == Iop_CasCmpEQ16
+           || e->Iex.Binop.op == Iop_CasCmpNE16
+           || e->Iex.Binop.op == Iop_ExpCmpNE16)) {
+      HReg    r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+      HReg    r    = newVRegI(env);
+      addInstr(env, mk_iMOVsd_RR(r1,r));
+      addInstr(env, X86Instr_Alu32R(Xalu_XOR,rmi2,r));
+      addInstr(env, X86Instr_Test32(0xFFFF,X86RM_Reg(r)));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ16: case Iop_CasCmpEQ16:
+            return Xcc_Z;
+         case Iop_CmpNE16: case Iop_CasCmpNE16: case Iop_ExpCmpNE16:
+            return Xcc_NZ;
+         default:
+            vpanic("iselCondCode(x86): CmpXX16");
+      }
+   }
+
+   /* CmpNE32(ccall, 32-bit constant) (--smc-check=all optimisation).
+      Saves a "movl %eax, %tmp" compared to the default route. */
+   if (e->tag == Iex_Binop 
+       && e->Iex.Binop.op == Iop_CmpNE32
+       && e->Iex.Binop.arg1->tag == Iex_CCall
+       && e->Iex.Binop.arg2->tag == Iex_Const) {
+      IRExpr* cal = e->Iex.Binop.arg1;
+      IRExpr* con = e->Iex.Binop.arg2;
+      /* clone & partial-eval of generic Iex_CCall and Iex_Const cases */
+      vassert(cal->Iex.CCall.retty == Ity_I32); /* else ill-typed IR */
+      vassert(con->Iex.Const.con->tag == Ico_U32);
+      /* Marshal args, do the call. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                    cal->Iex.CCall.cee,
+                    cal->Iex.CCall.retty, cal->Iex.CCall.args );
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_Int);
+      vassert(addToSp == 0);
+      /* */
+      addInstr(env, X86Instr_Alu32R(Xalu_CMP,
+                                    X86RMI_Imm(con->Iex.Const.con->Ico.U32),
+                                    hregX86_EAX()));
+      return Xcc_NZ;
+   }
+
+   /* Cmp*32*(x,y) */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpEQ32
+           || e->Iex.Binop.op == Iop_CmpNE32
+           || e->Iex.Binop.op == Iop_CmpLT32S
+           || e->Iex.Binop.op == Iop_CmpLT32U
+           || e->Iex.Binop.op == Iop_CmpLE32S
+           || e->Iex.Binop.op == Iop_CmpLE32U
+           || e->Iex.Binop.op == Iop_CasCmpEQ32
+           || e->Iex.Binop.op == Iop_CasCmpNE32
+           || e->Iex.Binop.op == Iop_ExpCmpNE32)) {
+      HReg    r1   = iselIntExpr_R(env, e->Iex.Binop.arg1);
+      X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+      addInstr(env, X86Instr_Alu32R(Xalu_CMP,rmi2,r1));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpEQ32: case Iop_CasCmpEQ32: return Xcc_Z;
+         case Iop_CmpNE32:
+         case Iop_CasCmpNE32: case Iop_ExpCmpNE32: return Xcc_NZ;
+         case Iop_CmpLT32S: return Xcc_L;
+         case Iop_CmpLT32U: return Xcc_B;
+         case Iop_CmpLE32S: return Xcc_LE;
+         case Iop_CmpLE32U: return Xcc_BE;
+         default: vpanic("iselCondCode(x86): CmpXX32");
+      }
+   }
+
+   /* CmpNE64 */
+   if (e->tag == Iex_Binop 
+       && (e->Iex.Binop.op == Iop_CmpNE64
+           || e->Iex.Binop.op == Iop_CmpEQ64)) {
+      HReg hi1, hi2, lo1, lo2;
+      HReg tHi = newVRegI(env);
+      HReg tLo = newVRegI(env);
+      iselInt64Expr( &hi1, &lo1, env, e->Iex.Binop.arg1 );
+      iselInt64Expr( &hi2, &lo2, env, e->Iex.Binop.arg2 );
+      addInstr(env, mk_iMOVsd_RR(hi1, tHi));
+      addInstr(env, X86Instr_Alu32R(Xalu_XOR,X86RMI_Reg(hi2), tHi));
+      addInstr(env, mk_iMOVsd_RR(lo1, tLo));
+      addInstr(env, X86Instr_Alu32R(Xalu_XOR,X86RMI_Reg(lo2), tLo));
+      addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(tHi), tLo));
+      switch (e->Iex.Binop.op) {
+         case Iop_CmpNE64: return Xcc_NZ;
+         case Iop_CmpEQ64: return Xcc_Z;
+         default: vpanic("iselCondCode(x86): CmpXX64");
+      }
+   }
+
+   ppIRExpr(e);
+   vpanic("iselCondCode");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Integer expressions (64 bit)                ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 64-bit value into a register pair, which is returned as
+   the first two parameters.  As with iselIntExpr_R, these may be
+   either real or virtual regs; in any case they must not be changed
+   by subsequent code emitted by the caller.  */
+
+static void iselInt64Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+{
+   iselInt64Expr_wrk(rHi, rLo, env, e);
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(*rHi) == HRcInt32);
+   vassert(hregIsVirtual(*rHi));
+   vassert(hregClass(*rLo) == HRcInt32);
+   vassert(hregIsVirtual(*rLo));
+}
+
+/* DO NOT CALL THIS DIRECTLY ! */
+static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e )
+{
+   MatchInfo mi;
+   HWord fn = 0; /* helper fn for most SIMD64 stuff */
+   vassert(e);
+   vassert(typeOfIRExpr(env->type_env,e) == Ity_I64);
+
+   /* 64-bit literal */
+   if (e->tag == Iex_Const) {
+      ULong w64 = e->Iex.Const.con->Ico.U64;
+      UInt  wHi = toUInt(w64 >> 32);
+      UInt  wLo = toUInt(w64);
+      HReg  tLo = newVRegI(env);
+      HReg  tHi = newVRegI(env);
+      vassert(e->Iex.Const.con->tag == Ico_U64);
+      if (wLo == wHi) {
+         /* Save a precious Int register in this special case. */
+         addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(wLo), tLo));
+         *rHi = tLo;
+         *rLo = tLo;
+      } else {
+         addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(wHi), tHi));
+         addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(wLo), tLo));
+         *rHi = tHi;
+         *rLo = tLo;
+      }
+      return;
+   }
+
+   /* read 64-bit IRTemp */
+   if (e->tag == Iex_RdTmp) {
+      lookupIRTemp64( rHi, rLo, env, e->Iex.RdTmp.tmp);
+      return;
+   }
+
+   /* 64-bit load */
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      HReg     tLo, tHi;
+      X86AMode *am0, *am4;
+      vassert(e->Iex.Load.ty == Ity_I64);
+      tLo = newVRegI(env);
+      tHi = newVRegI(env);
+      am0 = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      am4 = advance4(am0);
+      addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am0), tLo ));
+      addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am4), tHi ));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit GET */
+   if (e->tag == Iex_Get) {
+      X86AMode* am  = X86AMode_IR(e->Iex.Get.offset, hregX86_EBP());
+      X86AMode* am4 = advance4(am);
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+      addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am), tLo ));
+      addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am4), tHi ));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit GETI */
+   if (e->tag == Iex_GetI) {
+      X86AMode* am 
+         = genGuestArrayOffset( env, e->Iex.GetI.descr, 
+                                     e->Iex.GetI.ix, e->Iex.GetI.bias );
+      X86AMode* am4 = advance4(am);
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+      addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am), tLo ));
+      addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am4), tHi ));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* 64-bit ITE: ITE(g, expr, expr) */ // VFD
+   if (e->tag == Iex_ITE) {
+      HReg e0Lo, e0Hi, e1Lo, e1Hi;
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+      iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.ITE.iffalse);
+      iselInt64Expr(&e1Hi, &e1Lo, env, e->Iex.ITE.iftrue);
+      addInstr(env, mk_iMOVsd_RR(e1Hi, tHi));
+      addInstr(env, mk_iMOVsd_RR(e1Lo, tLo));
+      X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+      /* This assumes the first cmov32 doesn't trash the condition
+         codes, so they are still available for the second cmov32 */
+      addInstr(env, X86Instr_CMov32(cc ^ 1, X86RM_Reg(e0Hi), tHi));
+      addInstr(env, X86Instr_CMov32(cc ^ 1, X86RM_Reg(e0Lo), tLo));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   /* --------- BINARY ops --------- */
+   if (e->tag == Iex_Binop) {
+      switch (e->Iex.Binop.op) {
+         /* 32 x 32 -> 64 multiply */
+         case Iop_MullU32:
+         case Iop_MullS32: {
+            /* get one operand into %eax, and the other into a R/M.
+               Need to make an educated guess about which is better in
+               which. */
+            HReg   tLo    = newVRegI(env);
+            HReg   tHi    = newVRegI(env);
+            Bool   syned  = toBool(e->Iex.Binop.op == Iop_MullS32);
+            X86RM* rmLeft = iselIntExpr_RM(env, e->Iex.Binop.arg1);
+            HReg   rRight = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            addInstr(env, mk_iMOVsd_RR(rRight, hregX86_EAX()));
+            addInstr(env, X86Instr_MulL(syned, rmLeft));
+            /* Result is now in EDX:EAX.  Tell the caller. */
+            addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 64 x 32 -> (32(rem),32(div)) division */
+         case Iop_DivModU64to32:
+         case Iop_DivModS64to32: {
+            /* Get the 64-bit operand into edx:eax, and the other into
+               any old R/M. */
+            HReg sHi, sLo;
+            HReg   tLo     = newVRegI(env);
+            HReg   tHi     = newVRegI(env);
+            Bool   syned   = toBool(e->Iex.Binop.op == Iop_DivModS64to32);
+            X86RM* rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2);
+            iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
+            addInstr(env, mk_iMOVsd_RR(sHi, hregX86_EDX()));
+            addInstr(env, mk_iMOVsd_RR(sLo, hregX86_EAX()));
+            addInstr(env, X86Instr_Div(syned, rmRight));
+            addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* Or64/And64/Xor64 */
+         case Iop_Or64:
+         case Iop_And64:
+         case Iop_Xor64: {
+            HReg xLo, xHi, yLo, yHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            X86AluOp op = e->Iex.Binop.op==Iop_Or64 ? Xalu_OR
+                          : e->Iex.Binop.op==Iop_And64 ? Xalu_AND
+                          : Xalu_XOR;
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+            addInstr(env, mk_iMOVsd_RR(xHi, tHi));
+            addInstr(env, X86Instr_Alu32R(op, X86RMI_Reg(yHi), tHi));
+            addInstr(env, mk_iMOVsd_RR(xLo, tLo));
+            addInstr(env, X86Instr_Alu32R(op, X86RMI_Reg(yLo), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* Add64/Sub64 */
+         case Iop_Add64:
+            if (e->Iex.Binop.arg2->tag == Iex_Const) {
+               /* special case Add64(e, const) */
+               ULong w64 = e->Iex.Binop.arg2->Iex.Const.con->Ico.U64;
+               UInt  wHi = toUInt(w64 >> 32);
+               UInt  wLo = toUInt(w64);
+               HReg  tLo = newVRegI(env);
+               HReg  tHi = newVRegI(env);
+               HReg  xLo, xHi;
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64);
+               iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+               addInstr(env, mk_iMOVsd_RR(xHi, tHi));
+               addInstr(env, mk_iMOVsd_RR(xLo, tLo));
+               addInstr(env, X86Instr_Alu32R(Xalu_ADD, X86RMI_Imm(wLo), tLo));
+               addInstr(env, X86Instr_Alu32R(Xalu_ADC, X86RMI_Imm(wHi), tHi));
+               *rHi = tHi;
+               *rLo = tLo;
+               return;
+            }
+            /* else fall through to the generic case */
+         case Iop_Sub64: {
+            HReg xLo, xHi, yLo, yHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            addInstr(env, mk_iMOVsd_RR(xHi, tHi));
+            addInstr(env, mk_iMOVsd_RR(xLo, tLo));
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+            if (e->Iex.Binop.op==Iop_Add64) {
+               addInstr(env, X86Instr_Alu32R(Xalu_ADD, X86RMI_Reg(yLo), tLo));
+               addInstr(env, X86Instr_Alu32R(Xalu_ADC, X86RMI_Reg(yHi), tHi));
+            } else {
+               addInstr(env, X86Instr_Alu32R(Xalu_SUB, X86RMI_Reg(yLo), tLo));
+               addInstr(env, X86Instr_Alu32R(Xalu_SBB, X86RMI_Reg(yHi), tHi));
+            }
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 32HLto64(e1,e2) */
+         case Iop_32HLto64:
+            *rHi = iselIntExpr_R(env, e->Iex.Binop.arg1);
+            *rLo = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            return;
+
+         /* 64-bit shifts */
+         case Iop_Shl64: {
+            /* We use the same ingenious scheme as gcc.  Put the value
+               to be shifted into %hi:%lo, and the shift amount into
+               %cl.  Then (dsts on right, a la ATT syntax):
+ 
+               shldl %cl, %lo, %hi   -- make %hi be right for the
+                                     -- shift amt %cl % 32
+               shll  %cl, %lo        -- make %lo be right for the
+                                     -- shift amt %cl % 32
+
+               Now, if (shift amount % 64) is in the range 32 .. 63,
+               we have to do a fixup, which puts the result low half
+               into the result high half, and zeroes the low half:
+
+               testl $32, %ecx
+
+               cmovnz %lo, %hi
+               movl $0, %tmp         -- sigh; need yet another reg
+               cmovnz %tmp, %lo 
+            */
+            HReg rAmt, sHi, sLo, tHi, tLo, tTemp;
+            tLo = newVRegI(env);
+            tHi = newVRegI(env);
+            tTemp = newVRegI(env);
+            rAmt = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
+            addInstr(env, mk_iMOVsd_RR(rAmt, hregX86_ECX()));
+            addInstr(env, mk_iMOVsd_RR(sHi, tHi));
+            addInstr(env, mk_iMOVsd_RR(sLo, tLo));
+            /* Ok.  Now shift amt is in %ecx, and value is in tHi/tLo
+               and those regs are legitimately modifiable. */
+            addInstr(env, X86Instr_Sh3232(Xsh_SHL, 0/*%cl*/, tLo, tHi));
+            addInstr(env, X86Instr_Sh32(Xsh_SHL, 0/*%cl*/, tLo));
+            addInstr(env, X86Instr_Test32(32, X86RM_Reg(hregX86_ECX())));
+            addInstr(env, X86Instr_CMov32(Xcc_NZ, X86RM_Reg(tLo), tHi));
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(0), tTemp));
+            addInstr(env, X86Instr_CMov32(Xcc_NZ, X86RM_Reg(tTemp), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_Shr64: {
+            /* We use the same ingenious scheme as gcc.  Put the value
+               to be shifted into %hi:%lo, and the shift amount into
+               %cl.  Then:
+ 
+               shrdl %cl, %hi, %lo   -- make %lo be right for the
+                                     -- shift amt %cl % 32
+               shrl  %cl, %hi        -- make %hi be right for the
+                                     -- shift amt %cl % 32
+
+               Now, if (shift amount % 64) is in the range 32 .. 63,
+               we have to do a fixup, which puts the result high half
+               into the result low half, and zeroes the high half:
+
+               testl $32, %ecx
+
+               cmovnz %hi, %lo
+               movl $0, %tmp         -- sigh; need yet another reg
+               cmovnz %tmp, %hi
+            */
+            HReg rAmt, sHi, sLo, tHi, tLo, tTemp;
+            tLo = newVRegI(env);
+            tHi = newVRegI(env);
+            tTemp = newVRegI(env);
+            rAmt = iselIntExpr_R(env, e->Iex.Binop.arg2);
+            iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1);
+            addInstr(env, mk_iMOVsd_RR(rAmt, hregX86_ECX()));
+            addInstr(env, mk_iMOVsd_RR(sHi, tHi));
+            addInstr(env, mk_iMOVsd_RR(sLo, tLo));
+            /* Ok.  Now shift amt is in %ecx, and value is in tHi/tLo
+               and those regs are legitimately modifiable. */
+            addInstr(env, X86Instr_Sh3232(Xsh_SHR, 0/*%cl*/, tHi, tLo));
+            addInstr(env, X86Instr_Sh32(Xsh_SHR, 0/*%cl*/, tHi));
+            addInstr(env, X86Instr_Test32(32, X86RM_Reg(hregX86_ECX())));
+            addInstr(env, X86Instr_CMov32(Xcc_NZ, X86RM_Reg(tHi), tLo));
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(0), tTemp));
+            addInstr(env, X86Instr_CMov32(Xcc_NZ, X86RM_Reg(tTemp), tHi));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* F64 -> I64 */
+         /* Sigh, this is an almost exact copy of the F64 -> I32/I16
+            case.  Unfortunately I see no easy way to avoid the
+            duplication. */
+         case Iop_F64toI64S: {
+            HReg rf  = iselDblExpr(env, e->Iex.Binop.arg2);
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+
+            /* Used several times ... */
+            /* Careful ... this sharing is only safe because
+	       zero_esp/four_esp do not hold any registers which the
+	       register allocator could attempt to swizzle later. */
+            X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
+            X86AMode* four_esp = X86AMode_IR(4, hregX86_ESP());
+
+            /* rf now holds the value to be converted, and rrm holds
+               the rounding mode value, encoded as per the
+               IRRoundingMode enum.  The first thing to do is set the
+               FPU's rounding mode accordingly. */
+
+            /* Create a space for the format conversion. */
+            /* subl $8, %esp */
+            sub_from_esp(env, 8);
+
+            /* Set host rounding mode */
+            set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+
+            /* gistll %rf, 0(%esp) */
+            addInstr(env, X86Instr_FpLdStI(False/*store*/, 8, rf, zero_esp));
+
+            /* movl 0(%esp), %dstLo */
+            /* movl 4(%esp), %dstHi */
+            addInstr(env, X86Instr_Alu32R(
+                             Xalu_MOV, X86RMI_Mem(zero_esp), tLo));
+            addInstr(env, X86Instr_Alu32R(
+                             Xalu_MOV, X86RMI_Mem(four_esp), tHi));
+
+            /* Restore default FPU rounding. */
+            set_FPU_rounding_default( env );
+
+            /* addl $8, %esp */
+            add_to_esp(env, 8);
+
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_Add8x8:
+            fn = (HWord)h_generic_calc_Add8x8; goto binnish;
+         case Iop_Add16x4:
+            fn = (HWord)h_generic_calc_Add16x4; goto binnish;
+         case Iop_Add32x2:
+            fn = (HWord)h_generic_calc_Add32x2; goto binnish;
+
+         case Iop_Avg8Ux8:
+            fn = (HWord)h_generic_calc_Avg8Ux8; goto binnish;
+         case Iop_Avg16Ux4:
+            fn = (HWord)h_generic_calc_Avg16Ux4; goto binnish;
+
+         case Iop_CmpEQ8x8:
+            fn = (HWord)h_generic_calc_CmpEQ8x8; goto binnish;
+         case Iop_CmpEQ16x4:
+            fn = (HWord)h_generic_calc_CmpEQ16x4; goto binnish;
+         case Iop_CmpEQ32x2:
+            fn = (HWord)h_generic_calc_CmpEQ32x2; goto binnish;
+
+         case Iop_CmpGT8Sx8:
+            fn = (HWord)h_generic_calc_CmpGT8Sx8; goto binnish;
+         case Iop_CmpGT16Sx4:
+            fn = (HWord)h_generic_calc_CmpGT16Sx4; goto binnish;
+         case Iop_CmpGT32Sx2:
+            fn = (HWord)h_generic_calc_CmpGT32Sx2; goto binnish;
+
+         case Iop_InterleaveHI8x8:
+            fn = (HWord)h_generic_calc_InterleaveHI8x8; goto binnish;
+         case Iop_InterleaveLO8x8:
+            fn = (HWord)h_generic_calc_InterleaveLO8x8; goto binnish;
+         case Iop_InterleaveHI16x4:
+            fn = (HWord)h_generic_calc_InterleaveHI16x4; goto binnish;
+         case Iop_InterleaveLO16x4:
+            fn = (HWord)h_generic_calc_InterleaveLO16x4; goto binnish;
+         case Iop_InterleaveHI32x2:
+            fn = (HWord)h_generic_calc_InterleaveHI32x2; goto binnish;
+         case Iop_InterleaveLO32x2:
+            fn = (HWord)h_generic_calc_InterleaveLO32x2; goto binnish;
+         case Iop_CatOddLanes16x4:
+            fn = (HWord)h_generic_calc_CatOddLanes16x4; goto binnish;
+         case Iop_CatEvenLanes16x4:
+            fn = (HWord)h_generic_calc_CatEvenLanes16x4; goto binnish;
+         case Iop_Perm8x8:
+            fn = (HWord)h_generic_calc_Perm8x8; goto binnish;
+
+         case Iop_Max8Ux8:
+            fn = (HWord)h_generic_calc_Max8Ux8; goto binnish;
+         case Iop_Max16Sx4:
+            fn = (HWord)h_generic_calc_Max16Sx4; goto binnish;
+         case Iop_Min8Ux8:
+            fn = (HWord)h_generic_calc_Min8Ux8; goto binnish;
+         case Iop_Min16Sx4:
+            fn = (HWord)h_generic_calc_Min16Sx4; goto binnish;
+
+         case Iop_Mul16x4:
+            fn = (HWord)h_generic_calc_Mul16x4; goto binnish;
+         case Iop_Mul32x2:
+            fn = (HWord)h_generic_calc_Mul32x2; goto binnish;
+         case Iop_MulHi16Sx4:
+            fn = (HWord)h_generic_calc_MulHi16Sx4; goto binnish;
+         case Iop_MulHi16Ux4:
+            fn = (HWord)h_generic_calc_MulHi16Ux4; goto binnish;
+
+         case Iop_QAdd8Sx8:
+            fn = (HWord)h_generic_calc_QAdd8Sx8; goto binnish;
+         case Iop_QAdd16Sx4:
+            fn = (HWord)h_generic_calc_QAdd16Sx4; goto binnish;
+         case Iop_QAdd8Ux8:
+            fn = (HWord)h_generic_calc_QAdd8Ux8; goto binnish;
+         case Iop_QAdd16Ux4:
+            fn = (HWord)h_generic_calc_QAdd16Ux4; goto binnish;
+
+         case Iop_QNarrowBin32Sto16Sx4:
+            fn = (HWord)h_generic_calc_QNarrowBin32Sto16Sx4; goto binnish;
+         case Iop_QNarrowBin16Sto8Sx8:
+            fn = (HWord)h_generic_calc_QNarrowBin16Sto8Sx8; goto binnish;
+         case Iop_QNarrowBin16Sto8Ux8:
+            fn = (HWord)h_generic_calc_QNarrowBin16Sto8Ux8; goto binnish;
+         case Iop_NarrowBin16to8x8:
+            fn = (HWord)h_generic_calc_NarrowBin16to8x8; goto binnish;
+         case Iop_NarrowBin32to16x4:
+            fn = (HWord)h_generic_calc_NarrowBin32to16x4; goto binnish;
+
+         case Iop_QSub8Sx8:
+            fn = (HWord)h_generic_calc_QSub8Sx8; goto binnish;
+         case Iop_QSub16Sx4:
+            fn = (HWord)h_generic_calc_QSub16Sx4; goto binnish;
+         case Iop_QSub8Ux8:
+            fn = (HWord)h_generic_calc_QSub8Ux8; goto binnish;
+         case Iop_QSub16Ux4:
+            fn = (HWord)h_generic_calc_QSub16Ux4; goto binnish;
+
+         case Iop_Sub8x8:
+            fn = (HWord)h_generic_calc_Sub8x8; goto binnish;
+         case Iop_Sub16x4:
+            fn = (HWord)h_generic_calc_Sub16x4; goto binnish;
+         case Iop_Sub32x2:
+            fn = (HWord)h_generic_calc_Sub32x2; goto binnish;
+
+         binnish: {
+            /* Note: the following assumes all helpers are of
+               signature 
+                  ULong fn ( ULong, ULong ), and they are
+               not marked as regparm functions. 
+            */
+            HReg xLo, xHi, yLo, yHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2);
+            addInstr(env, X86Instr_Push(X86RMI_Reg(yHi)));
+            addInstr(env, X86Instr_Push(X86RMI_Reg(yLo)));
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
+            addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
+                                         0, mk_RetLoc_simple(RLPri_2Int) ));
+            add_to_esp(env, 4*4);
+            addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_ShlN32x2:
+            fn = (HWord)h_generic_calc_ShlN32x2; goto shifty;
+         case Iop_ShlN16x4:
+            fn = (HWord)h_generic_calc_ShlN16x4; goto shifty;
+         case Iop_ShlN8x8:
+            fn = (HWord)h_generic_calc_ShlN8x8;  goto shifty;
+         case Iop_ShrN32x2:
+            fn = (HWord)h_generic_calc_ShrN32x2; goto shifty;
+         case Iop_ShrN16x4:
+            fn = (HWord)h_generic_calc_ShrN16x4; goto shifty;
+         case Iop_SarN32x2:
+            fn = (HWord)h_generic_calc_SarN32x2; goto shifty;
+         case Iop_SarN16x4:
+            fn = (HWord)h_generic_calc_SarN16x4; goto shifty;
+         case Iop_SarN8x8:
+            fn = (HWord)h_generic_calc_SarN8x8;  goto shifty;
+         shifty: {
+            /* Note: the following assumes all helpers are of
+               signature 
+                  ULong fn ( ULong, UInt ), and they are
+               not marked as regparm functions. 
+            */
+            HReg xLo, xHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            X86RMI* y = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+            addInstr(env, X86Instr_Push(y));
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1);
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
+            addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
+                                         0, mk_RetLoc_simple(RLPri_2Int) ));
+            add_to_esp(env, 3*4);
+            addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         default: 
+            break;
+      }
+   } /* if (e->tag == Iex_Binop) */
+
+
+   /* --------- UNARY ops --------- */
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+
+         /* 32Sto64(e) */
+         case Iop_32Sto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src,tHi));
+            addInstr(env, mk_iMOVsd_RR(src,tLo));
+            addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, tHi));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 32Uto64(e) */
+         case Iop_32Uto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src,tLo));
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(0), tHi));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* 16Uto64(e) */
+         case Iop_16Uto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg src = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(src,tLo));
+            addInstr(env, X86Instr_Alu32R(Xalu_AND,
+                                          X86RMI_Imm(0xFFFF), tLo));
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(0), tHi));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* V128{HI}to64 */
+         case Iop_V128HIto64:
+         case Iop_V128to64: {
+            Int  off = e->Iex.Unop.op==Iop_V128HIto64 ? 8 : 0;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg vec = iselVecExpr(env, e->Iex.Unop.arg);
+            X86AMode* esp0  = X86AMode_IR(0,     hregX86_ESP());
+            X86AMode* espLO = X86AMode_IR(off,   hregX86_ESP());
+            X86AMode* espHI = X86AMode_IR(off+4, hregX86_ESP());
+            sub_from_esp(env, 16);
+            addInstr(env, X86Instr_SseLdSt(False/*store*/, vec, esp0));
+            addInstr(env, X86Instr_Alu32R( Xalu_MOV, 
+                                           X86RMI_Mem(espLO), tLo ));
+            addInstr(env, X86Instr_Alu32R( Xalu_MOV, 
+                                           X86RMI_Mem(espHI), tHi ));
+            add_to_esp(env, 16);
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* could do better than this, but for now ... */
+         case Iop_1Sto64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            X86CondCode cond = iselCondCode(env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Set32(cond,tLo));
+            addInstr(env, X86Instr_Sh32(Xsh_SHL, 31, tLo));
+            addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, tLo));
+            addInstr(env, mk_iMOVsd_RR(tLo, tHi));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* Not64(e) */
+         case Iop_Not64: {
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            HReg sHi, sLo;
+            iselInt64Expr(&sHi, &sLo, env, e->Iex.Unop.arg);
+            addInstr(env, mk_iMOVsd_RR(sHi, tHi));
+            addInstr(env, mk_iMOVsd_RR(sLo, tLo));
+            addInstr(env, X86Instr_Unary32(Xun_NOT,tHi));
+            addInstr(env, X86Instr_Unary32(Xun_NOT,tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* Left64(e) */
+         case Iop_Left64: {
+            HReg yLo, yHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            /* yHi:yLo = arg */
+            iselInt64Expr(&yHi, &yLo, env, e->Iex.Unop.arg);
+            /* tLo = 0 - yLo, and set carry */
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(0), tLo));
+            addInstr(env, X86Instr_Alu32R(Xalu_SUB, X86RMI_Reg(yLo), tLo));
+            /* tHi = 0 - yHi - carry */
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(0), tHi));
+            addInstr(env, X86Instr_Alu32R(Xalu_SBB, X86RMI_Reg(yHi), tHi));
+            /* So now we have tHi:tLo = -arg.  To finish off, or 'arg'
+               back in, so as to give the final result 
+               tHi:tLo = arg | -arg. */
+            addInstr(env, X86Instr_Alu32R(Xalu_OR, X86RMI_Reg(yLo), tLo));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR, X86RMI_Reg(yHi), tHi));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         /* --- patterns rooted at: CmpwNEZ64 --- */
+
+         /* CmpwNEZ64(e) */
+         case Iop_CmpwNEZ64: {
+
+         DECLARE_PATTERN(p_CmpwNEZ64_Or64);
+         DEFINE_PATTERN(p_CmpwNEZ64_Or64,
+                        unop(Iop_CmpwNEZ64,binop(Iop_Or64,bind(0),bind(1))));
+         if (matchIRExpr(&mi, p_CmpwNEZ64_Or64, e)) {
+            /* CmpwNEZ64(Or64(x,y)) */
+            HReg xHi,xLo,yHi,yLo;
+            HReg xBoth = newVRegI(env);
+            HReg merged = newVRegI(env);
+            HReg tmp2 = newVRegI(env);
+
+            iselInt64Expr(&xHi,&xLo, env, mi.bindee[0]);
+            addInstr(env, mk_iMOVsd_RR(xHi,xBoth));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR,
+                                          X86RMI_Reg(xLo),xBoth));
+
+            iselInt64Expr(&yHi,&yLo, env, mi.bindee[1]);
+            addInstr(env, mk_iMOVsd_RR(yHi,merged));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR,
+                                          X86RMI_Reg(yLo),merged));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR,
+                                             X86RMI_Reg(xBoth),merged));
+
+            /* tmp2 = (merged | -merged) >>s 31 */
+            addInstr(env, mk_iMOVsd_RR(merged,tmp2));
+            addInstr(env, X86Instr_Unary32(Xun_NEG,tmp2));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR,
+                                          X86RMI_Reg(merged), tmp2));
+            addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, tmp2));
+            *rHi = tmp2;
+            *rLo = tmp2;
+            return;
+         } else {
+            /* CmpwNEZ64(e) */
+            HReg srcLo, srcHi;
+            HReg tmp1  = newVRegI(env);
+            HReg tmp2  = newVRegI(env);
+            /* srcHi:srcLo = arg */
+            iselInt64Expr(&srcHi, &srcLo, env, e->Iex.Unop.arg);
+            /* tmp1 = srcHi | srcLo */
+            addInstr(env, mk_iMOVsd_RR(srcHi,tmp1));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR,
+                                          X86RMI_Reg(srcLo), tmp1));
+            /* tmp2 = (tmp1 | -tmp1) >>s 31 */
+            addInstr(env, mk_iMOVsd_RR(tmp1,tmp2));
+            addInstr(env, X86Instr_Unary32(Xun_NEG,tmp2));
+            addInstr(env, X86Instr_Alu32R(Xalu_OR,
+                                          X86RMI_Reg(tmp1), tmp2));
+            addInstr(env, X86Instr_Sh32(Xsh_SAR, 31, tmp2));
+            *rHi = tmp2;
+            *rLo = tmp2;
+            return;
+         }
+         }
+
+         /* ReinterpF64asI64(e) */
+         /* Given an IEEE754 double, produce an I64 with the same bit
+            pattern. */
+         case Iop_ReinterpF64asI64: {
+            HReg rf   = iselDblExpr(env, e->Iex.Unop.arg);
+            HReg tLo  = newVRegI(env);
+            HReg tHi  = newVRegI(env);
+            X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());
+            X86AMode* four_esp = X86AMode_IR(4, hregX86_ESP());
+            /* paranoia */
+            set_FPU_rounding_default(env);
+            /* subl $8, %esp */
+            sub_from_esp(env, 8);
+            /* gstD %rf, 0(%esp) */
+            addInstr(env,
+                     X86Instr_FpLdSt(False/*store*/, 8, rf, zero_esp));
+            /* movl 0(%esp), %tLo */
+            addInstr(env, 
+                     X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(zero_esp), tLo));
+            /* movl 4(%esp), %tHi */
+            addInstr(env, 
+                     X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(four_esp), tHi));
+            /* addl $8, %esp */
+            add_to_esp(env, 8);
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         case Iop_CmpNEZ32x2:
+            fn = (HWord)h_generic_calc_CmpNEZ32x2; goto unish;
+         case Iop_CmpNEZ16x4:
+            fn = (HWord)h_generic_calc_CmpNEZ16x4; goto unish;
+         case Iop_CmpNEZ8x8:
+            fn = (HWord)h_generic_calc_CmpNEZ8x8; goto unish;
+         unish: {
+            /* Note: the following assumes all helpers are of
+               signature 
+                  ULong fn ( ULong ), and they are
+               not marked as regparm functions. 
+            */
+            HReg xLo, xHi;
+            HReg tLo = newVRegI(env);
+            HReg tHi = newVRegI(env);
+            iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xHi)));
+            addInstr(env, X86Instr_Push(X86RMI_Reg(xLo)));
+            addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
+                                         0, mk_RetLoc_simple(RLPri_2Int) ));
+            add_to_esp(env, 2*4);
+            addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo));
+            *rHi = tHi;
+            *rLo = tLo;
+            return;
+         }
+
+         default: 
+            break;
+      }
+   } /* if (e->tag == Iex_Unop) */
+
+
+   /* --------- CCALL --------- */
+   if (e->tag == Iex_CCall) {
+      HReg tLo = newVRegI(env);
+      HReg tHi = newVRegI(env);
+
+      /* Marshal args, do the call, clear stack. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, NULL/*guard*/,
+                    e->Iex.CCall.cee,
+                    e->Iex.CCall.retty, e->Iex.CCall.args );
+      vassert(is_sane_RetLoc(rloc));
+      vassert(rloc.pri == RLPri_2Int);
+      vassert(addToSp == 0);
+      /* */
+
+      addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi));
+      addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo));
+      *rHi = tHi;
+      *rLo = tLo;
+      return;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselInt64Expr");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (32 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Nothing interesting here; really just wrappers for
+   64-bit stuff. */
+
+static HReg iselFltExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselFltExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64); /* yes, really Flt64 */
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(ty == Ity_F32);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      X86AMode* am;
+      HReg res = newVRegF(env);
+      vassert(e->Iex.Load.ty == Ity_F32);
+      am = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      addInstr(env, X86Instr_FpLdSt(True/*load*/, 4, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Binop
+       && e->Iex.Binop.op == Iop_F64toF32) {
+      /* Although the result is still held in a standard FPU register,
+         we need to round it to reflect the loss of accuracy/range
+         entailed in casting it to a 32-bit float. */
+      HReg dst = newVRegF(env);
+      HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+      set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+      addInstr(env, X86Instr_Fp64to32(src,dst));
+      set_FPU_rounding_default( env );
+      return dst;
+   }
+
+   if (e->tag == Iex_Get) {
+      X86AMode* am = X86AMode_IR( e->Iex.Get.offset,
+                                  hregX86_EBP() );
+      HReg res = newVRegF(env);
+      addInstr(env, X86Instr_FpLdSt( True/*load*/, 4, res, am ));
+      return res;
+   }
+
+   if (e->tag == Iex_Unop
+       && e->Iex.Unop.op == Iop_ReinterpI32asF32) {
+       /* Given an I32, produce an IEEE754 float with the same bit
+          pattern. */
+      HReg    dst = newVRegF(env);
+      X86RMI* rmi = iselIntExpr_RMI(env, e->Iex.Unop.arg);
+      /* paranoia */
+      addInstr(env, X86Instr_Push(rmi));
+      addInstr(env, X86Instr_FpLdSt(
+                       True/*load*/, 4, dst, 
+                       X86AMode_IR(0, hregX86_ESP())));
+      add_to_esp(env, 4);
+      return dst;
+   }
+
+   if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_RoundF32toInt) {
+      HReg rf  = iselFltExpr(env, e->Iex.Binop.arg2);
+      HReg dst = newVRegF(env);
+
+      /* rf now holds the value to be rounded.  The first thing to do
+         is set the FPU's rounding mode accordingly. */
+
+      /* Set host rounding mode */
+      set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+
+      /* grndint %rf, %dst */
+      addInstr(env, X86Instr_FpUnary(Xfp_ROUND, rf, dst));
+
+      /* Restore default FPU rounding. */
+      set_FPU_rounding_default( env );
+
+      return dst;
+   }
+
+   ppIRExpr(e);
+   vpanic("iselFltExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Floating point expressions (64 bit)         ---*/
+/*---------------------------------------------------------*/
+
+/* Compute a 64-bit floating point value into a register, the identity
+   of which is returned.  As with iselIntExpr_R, the reg may be either
+   real or virtual; in any case it must not be changed by subsequent
+   code emitted by the caller.  */
+
+/* IEEE 754 formats.  From http://www.freesoft.org/CIE/RFC/1832/32.htm:
+
+    Type                  S (1 bit)   E (11 bits)   F (52 bits)
+    ----                  ---------   -----------   -----------
+    signalling NaN        u           2047 (max)    .0uuuuu---u
+                                                    (with at least
+                                                     one 1 bit)
+    quiet NaN             u           2047 (max)    .1uuuuu---u
+
+    negative infinity     1           2047 (max)    .000000---0
+
+    positive infinity     0           2047 (max)    .000000---0
+
+    negative zero         1           0             .000000---0
+
+    positive zero         0           0             .000000---0
+*/
+
+static HReg iselDblExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselDblExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcFlt64);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+   IRType ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_F64);
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Const) {
+      union { UInt u32x2[2]; ULong u64; Double f64; } u;
+      HReg freg = newVRegF(env);
+      vassert(sizeof(u) == 8);
+      vassert(sizeof(u.u64) == 8);
+      vassert(sizeof(u.f64) == 8);
+      vassert(sizeof(u.u32x2) == 8);
+
+      if (e->Iex.Const.con->tag == Ico_F64) {
+         u.f64 = e->Iex.Const.con->Ico.F64;
+      }
+      else if (e->Iex.Const.con->tag == Ico_F64i) {
+         u.u64 = e->Iex.Const.con->Ico.F64i;
+      }
+      else
+         vpanic("iselDblExpr(x86): const");
+
+      addInstr(env, X86Instr_Push(X86RMI_Imm(u.u32x2[1])));
+      addInstr(env, X86Instr_Push(X86RMI_Imm(u.u32x2[0])));
+      addInstr(env, X86Instr_FpLdSt(True/*load*/, 8, freg, 
+                                    X86AMode_IR(0, hregX86_ESP())));
+      add_to_esp(env, 8);
+      return freg;
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      X86AMode* am;
+      HReg res = newVRegF(env);
+      vassert(e->Iex.Load.ty == Ity_F64);
+      am = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      addInstr(env, X86Instr_FpLdSt(True/*load*/, 8, res, am));
+      return res;
+   }
+
+   if (e->tag == Iex_Get) {
+      X86AMode* am = X86AMode_IR( e->Iex.Get.offset,
+                                  hregX86_EBP() );
+      HReg res = newVRegF(env);
+      addInstr(env, X86Instr_FpLdSt( True/*load*/, 8, res, am ));
+      return res;
+   }
+
+   if (e->tag == Iex_GetI) {
+      X86AMode* am 
+         = genGuestArrayOffset(
+              env, e->Iex.GetI.descr, 
+                   e->Iex.GetI.ix, e->Iex.GetI.bias );
+      HReg res = newVRegF(env);
+      addInstr(env, X86Instr_FpLdSt( True/*load*/, 8, res, am ));
+      return res;
+   }
+
+   if (e->tag == Iex_Triop) {
+      X86FpOp fpop = Xfp_INVALID;
+      IRTriop *triop = e->Iex.Triop.details;
+      switch (triop->op) {
+         case Iop_AddF64:    fpop = Xfp_ADD; break;
+         case Iop_SubF64:    fpop = Xfp_SUB; break;
+         case Iop_MulF64:    fpop = Xfp_MUL; break;
+         case Iop_DivF64:    fpop = Xfp_DIV; break;
+         case Iop_ScaleF64:  fpop = Xfp_SCALE; break;
+         case Iop_Yl2xF64:   fpop = Xfp_YL2X; break;
+         case Iop_Yl2xp1F64: fpop = Xfp_YL2XP1; break;
+         case Iop_AtanF64:   fpop = Xfp_ATAN; break;
+         case Iop_PRemF64:   fpop = Xfp_PREM; break;
+         case Iop_PRem1F64:  fpop = Xfp_PREM1; break;
+         default: break;
+      }
+      if (fpop != Xfp_INVALID) {
+         HReg res  = newVRegF(env);
+         HReg srcL = iselDblExpr(env, triop->arg2);
+         HReg srcR = iselDblExpr(env, triop->arg3);
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, X86Instr_FpBinary(fpop,srcL,srcR,res));
+	 if (fpop != Xfp_ADD && fpop != Xfp_SUB 
+	     && fpop != Xfp_MUL && fpop != Xfp_DIV)
+            roundToF64(env, res);
+         return res;
+      }
+   }
+
+   if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_RoundF64toInt) {
+      HReg rf  = iselDblExpr(env, e->Iex.Binop.arg2);
+      HReg dst = newVRegF(env);
+
+      /* rf now holds the value to be rounded.  The first thing to do
+         is set the FPU's rounding mode accordingly. */
+
+      /* Set host rounding mode */
+      set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+
+      /* grndint %rf, %dst */
+      addInstr(env, X86Instr_FpUnary(Xfp_ROUND, rf, dst));
+
+      /* Restore default FPU rounding. */
+      set_FPU_rounding_default( env );
+
+      return dst;
+   }
+
+   if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_I64StoF64) {
+      HReg dst = newVRegF(env);
+      HReg rHi,rLo;
+      iselInt64Expr( &rHi, &rLo, env, e->Iex.Binop.arg2);
+      addInstr(env, X86Instr_Push(X86RMI_Reg(rHi)));
+      addInstr(env, X86Instr_Push(X86RMI_Reg(rLo)));
+
+      /* Set host rounding mode */
+      set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );
+
+      addInstr(env, X86Instr_FpLdStI(
+                       True/*load*/, 8, dst, 
+                       X86AMode_IR(0, hregX86_ESP())));
+
+      /* Restore default FPU rounding. */
+      set_FPU_rounding_default( env );
+
+      add_to_esp(env, 8);
+      return dst;
+   }
+
+   if (e->tag == Iex_Binop) {
+      X86FpOp fpop = Xfp_INVALID;
+      switch (e->Iex.Binop.op) {
+         case Iop_SinF64:  fpop = Xfp_SIN; break;
+         case Iop_CosF64:  fpop = Xfp_COS; break;
+         case Iop_TanF64:  fpop = Xfp_TAN; break;
+         case Iop_2xm1F64: fpop = Xfp_2XM1; break;
+         case Iop_SqrtF64: fpop = Xfp_SQRT; break;
+         default: break;
+      }
+      if (fpop != Xfp_INVALID) {
+         HReg res = newVRegF(env);
+         HReg src = iselDblExpr(env, e->Iex.Binop.arg2);
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         /* Note that X86Instr_FpUnary(Xfp_TAN,..) sets the condition
+            codes.  I don't think that matters, since this insn
+            selector never generates such an instruction intervening
+            between an flag-setting instruction and a flag-using
+            instruction. */
+         addInstr(env, X86Instr_FpUnary(fpop,src,res));
+	 if (fpop != Xfp_SQRT
+             && fpop != Xfp_NEG && fpop != Xfp_ABS)
+            roundToF64(env, res);
+         return res;
+      }
+   }
+
+   if (e->tag == Iex_Unop) {
+      X86FpOp fpop = Xfp_INVALID;
+      switch (e->Iex.Unop.op) {
+         case Iop_NegF64:  fpop = Xfp_NEG; break;
+         case Iop_AbsF64:  fpop = Xfp_ABS; break;
+         default: break;
+      }
+      if (fpop != Xfp_INVALID) {
+         HReg res = newVRegF(env);
+         HReg src = iselDblExpr(env, e->Iex.Unop.arg);
+         addInstr(env, X86Instr_FpUnary(fpop,src,res));
+         /* No need to do roundToF64(env,res) for Xfp_NEG or Xfp_ABS,
+            but might need to do that for other unary ops. */
+         return res;
+      }
+   }
+
+   if (e->tag == Iex_Unop) {
+      switch (e->Iex.Unop.op) {
+         case Iop_I32StoF64: {
+            HReg dst = newVRegF(env);
+            HReg ri  = iselIntExpr_R(env, e->Iex.Unop.arg);
+            addInstr(env, X86Instr_Push(X86RMI_Reg(ri)));
+            set_FPU_rounding_default(env);
+            addInstr(env, X86Instr_FpLdStI(
+                             True/*load*/, 4, dst, 
+                             X86AMode_IR(0, hregX86_ESP())));
+	    add_to_esp(env, 4);
+            return dst;
+         }
+         case Iop_ReinterpI64asF64: {
+            /* Given an I64, produce an IEEE754 double with the same
+               bit pattern. */
+            HReg dst = newVRegF(env);
+            HReg rHi, rLo;
+	    iselInt64Expr( &rHi, &rLo, env, e->Iex.Unop.arg);
+            /* paranoia */
+            set_FPU_rounding_default(env);
+            addInstr(env, X86Instr_Push(X86RMI_Reg(rHi)));
+            addInstr(env, X86Instr_Push(X86RMI_Reg(rLo)));
+            addInstr(env, X86Instr_FpLdSt(
+                             True/*load*/, 8, dst, 
+                             X86AMode_IR(0, hregX86_ESP())));
+	    add_to_esp(env, 8);
+            return dst;
+	 }
+         case Iop_F32toF64: {
+            /* this is a no-op */
+            HReg res = iselFltExpr(env, e->Iex.Unop.arg);
+            return res;
+	 }
+         default: 
+            break;
+      }
+   }
+
+   /* --------- MULTIPLEX --------- */
+   if (e->tag == Iex_ITE) { // VFD
+     if (ty == Ity_F64
+         && typeOfIRExpr(env->type_env,e->Iex.ITE.cond) == Ity_I1) {
+        HReg r1  = iselDblExpr(env, e->Iex.ITE.iftrue);
+        HReg r0  = iselDblExpr(env, e->Iex.ITE.iffalse);
+        HReg dst = newVRegF(env);
+        addInstr(env, X86Instr_FpUnary(Xfp_MOV,r1,dst));
+        X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+        addInstr(env, X86Instr_FpCMov(cc ^ 1, r0, dst));
+        return dst;
+      }
+   }
+
+   ppIRExpr(e);
+   vpanic("iselDblExpr_wrk");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: SIMD (Vector) expressions, 128 bit.         ---*/
+/*---------------------------------------------------------*/
+
+static HReg iselVecExpr ( ISelEnv* env, IRExpr* e )
+{
+   HReg r = iselVecExpr_wrk( env, e );
+#  if 0
+   vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+#  endif
+   vassert(hregClass(r) == HRcVec128);
+   vassert(hregIsVirtual(r));
+   return r;
+}
+
+
+/* DO NOT CALL THIS DIRECTLY */
+static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e )
+{
+
+#  define REQUIRE_SSE1                                    \
+      do { if (env->hwcaps == 0/*baseline, no sse*/       \
+               ||  env->hwcaps == VEX_HWCAPS_X86_MMXEXT /*Integer SSE*/) \
+              goto vec_fail;                              \
+      } while (0)
+
+#  define REQUIRE_SSE2                                    \
+      do { if (0 == (env->hwcaps & VEX_HWCAPS_X86_SSE2))  \
+              goto vec_fail;                              \
+      } while (0)
+
+#  define SSE2_OR_ABOVE                                   \
+       (env->hwcaps & VEX_HWCAPS_X86_SSE2)
+
+   HWord     fn = 0; /* address of helper fn, if required */
+   MatchInfo mi;
+   Bool      arg1isEReg = False;
+   X86SseOp  op = Xsse_INVALID;
+   IRType    ty = typeOfIRExpr(env->type_env,e);
+   vassert(e);
+   vassert(ty == Ity_V128);
+
+   REQUIRE_SSE1;
+
+   if (e->tag == Iex_RdTmp) {
+      return lookupIRTemp(env, e->Iex.RdTmp.tmp);
+   }
+
+   if (e->tag == Iex_Get) {
+      HReg dst = newVRegV(env);
+      addInstr(env, X86Instr_SseLdSt(
+                       True/*load*/, 
+                       dst,
+                       X86AMode_IR(e->Iex.Get.offset, hregX86_EBP())
+                    )
+              );
+      return dst;
+   }
+
+   if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) {
+      HReg      dst = newVRegV(env);
+      X86AMode* am  = iselIntExpr_AMode(env, e->Iex.Load.addr);
+      addInstr(env, X86Instr_SseLdSt( True/*load*/, dst, am ));
+      return dst;
+   }
+
+   if (e->tag == Iex_Const) {
+      HReg dst = newVRegV(env);
+      vassert(e->Iex.Const.con->tag == Ico_V128);
+      addInstr(env, X86Instr_SseConst(e->Iex.Const.con->Ico.V128, dst));
+      return dst;
+   }
+
+   if (e->tag == Iex_Unop) {
+
+   if (SSE2_OR_ABOVE) { 
+      /* 64UtoV128(LDle:I64(addr)) */
+      DECLARE_PATTERN(p_zwiden_load64);
+      DEFINE_PATTERN(p_zwiden_load64,
+                     unop(Iop_64UtoV128, 
+                          IRExpr_Load(Iend_LE,Ity_I64,bind(0))));
+      if (matchIRExpr(&mi, p_zwiden_load64, e)) {
+         X86AMode* am = iselIntExpr_AMode(env, mi.bindee[0]);
+         HReg dst = newVRegV(env);
+         addInstr(env, X86Instr_SseLdzLO(8, dst, am));
+         return dst;
+      }
+   }
+
+   switch (e->Iex.Unop.op) {
+
+      case Iop_NotV128: {
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         return do_sse_Not128(env, arg);
+      }
+
+      case Iop_CmpNEZ64x2: {
+         /* We can use SSE2 instructions for this. */
+         /* Ideally, we want to do a 64Ix2 comparison against zero of
+            the operand.  Problem is no such insn exists.  Solution
+            therefore is to do a 32Ix4 comparison instead, and bitwise-
+            negate (NOT) the result.  Let a,b,c,d be 32-bit lanes, and 
+            let the not'd result of this initial comparison be a:b:c:d.
+            What we need to compute is (a|b):(a|b):(c|d):(c|d).  So, use
+            pshufd to create a value b:a:d:c, and OR that with a:b:c:d,
+            giving the required result.
+
+            The required selection sequence is 2,3,0,1, which
+            according to Intel's documentation means the pshufd
+            literal value is 0xB1, that is, 
+            (2 << 6) | (3 << 4) | (0 << 2) | (1 << 0) 
+         */
+         HReg arg  = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg tmp  = newVRegV(env);
+         HReg dst  = newVRegV(env);
+         REQUIRE_SSE2;
+         addInstr(env, X86Instr_SseReRg(Xsse_XOR, tmp, tmp));
+         addInstr(env, X86Instr_SseReRg(Xsse_CMPEQ32, arg, tmp));
+         tmp = do_sse_Not128(env, tmp);
+         addInstr(env, X86Instr_SseShuf(0xB1, tmp, dst));
+         addInstr(env, X86Instr_SseReRg(Xsse_OR, tmp, dst));
+         return dst;
+      }
+
+      case Iop_CmpNEZ32x4: {
+         /* Sigh, we have to generate lousy code since this has to
+            work on SSE1 hosts */
+         /* basically, the idea is: for each lane:
+               movl lane, %r ; negl %r   (now CF = lane==0 ? 0 : 1)
+               sbbl %r, %r               (now %r = 1Sto32(CF))
+               movl %r, lane
+         */
+         Int       i;
+         X86AMode* am;
+         X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+         HReg      arg  = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg      dst  = newVRegV(env);
+         HReg      r32  = newVRegI(env);
+         sub_from_esp(env, 16);
+         addInstr(env, X86Instr_SseLdSt(False/*store*/, arg, esp0));
+         for (i = 0; i < 4; i++) {
+            am = X86AMode_IR(i*4, hregX86_ESP());
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(am), r32));
+            addInstr(env, X86Instr_Unary32(Xun_NEG, r32));
+            addInstr(env, X86Instr_Alu32R(Xalu_SBB, X86RMI_Reg(r32), r32));
+            addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(r32), am));
+         }
+         addInstr(env, X86Instr_SseLdSt(True/*load*/, dst, esp0));
+         add_to_esp(env, 16);
+         return dst;
+      }
+
+      case Iop_CmpNEZ8x16:
+      case Iop_CmpNEZ16x8: {
+         /* We can use SSE2 instructions for this. */
+         HReg arg;
+         HReg vec0 = newVRegV(env);
+         HReg vec1 = newVRegV(env);
+         HReg dst  = newVRegV(env);
+         X86SseOp cmpOp 
+            = e->Iex.Unop.op==Iop_CmpNEZ16x8 ? Xsse_CMPEQ16
+                                             : Xsse_CMPEQ8;
+         REQUIRE_SSE2;
+         addInstr(env, X86Instr_SseReRg(Xsse_XOR, vec0, vec0));
+         addInstr(env, mk_vMOVsd_RR(vec0, vec1));
+         addInstr(env, X86Instr_Sse32Fx4(Xsse_CMPEQF, vec1, vec1));
+         /* defer arg computation to here so as to give CMPEQF as long
+            as possible to complete */
+         arg = iselVecExpr(env, e->Iex.Unop.arg);
+         /* vec0 is all 0s; vec1 is all 1s */
+         addInstr(env, mk_vMOVsd_RR(arg, dst));
+         /* 16x8 or 8x16 comparison == */
+         addInstr(env, X86Instr_SseReRg(cmpOp, vec0, dst));
+         /* invert result */
+         addInstr(env, X86Instr_SseReRg(Xsse_XOR, vec1, dst));
+         return dst;
+      }
+
+      case Iop_RecipEst32Fx4: op = Xsse_RCPF;   goto do_32Fx4_unary;
+      case Iop_RSqrtEst32Fx4: op = Xsse_RSQRTF; goto do_32Fx4_unary;
+      do_32Fx4_unary:
+      {
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegV(env);
+         addInstr(env, X86Instr_Sse32Fx4(op, arg, dst));
+         return dst;
+      }
+
+      case Iop_RecipEst32F0x4: op = Xsse_RCPF;   goto do_32F0x4_unary;
+      case Iop_RSqrtEst32F0x4: op = Xsse_RSQRTF; goto do_32F0x4_unary;
+      case Iop_Sqrt32F0x4:     op = Xsse_SQRTF;  goto do_32F0x4_unary;
+      do_32F0x4_unary:
+      {
+         /* A bit subtle.  We have to copy the arg to the result
+            register first, because actually doing the SSE scalar insn
+            leaves the upper 3/4 of the destination register
+            unchanged.  Whereas the required semantics of these
+            primops is that the upper 3/4 is simply copied in from the
+            argument. */
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(arg, dst));
+         addInstr(env, X86Instr_Sse32FLo(op, arg, dst));
+         return dst;
+      }
+
+      case Iop_Sqrt64F0x2:  op = Xsse_SQRTF;  goto do_64F0x2_unary;
+      do_64F0x2_unary:
+      {
+         /* A bit subtle.  We have to copy the arg to the result
+            register first, because actually doing the SSE scalar insn
+            leaves the upper half of the destination register
+            unchanged.  Whereas the required semantics of these
+            primops is that the upper half is simply copied in from the
+            argument. */
+         HReg arg = iselVecExpr(env, e->Iex.Unop.arg);
+         HReg dst = newVRegV(env);
+         REQUIRE_SSE2;
+         addInstr(env, mk_vMOVsd_RR(arg, dst));
+         addInstr(env, X86Instr_Sse64FLo(op, arg, dst));
+         return dst;
+      }
+
+      case Iop_32UtoV128: {
+         HReg      dst  = newVRegV(env);
+         X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+         X86RMI*   rmi  = iselIntExpr_RMI(env, e->Iex.Unop.arg);
+         addInstr(env, X86Instr_Push(rmi));
+	 addInstr(env, X86Instr_SseLdzLO(4, dst, esp0));
+         add_to_esp(env, 4);
+         return dst;
+      }
+
+      case Iop_64UtoV128: {
+         HReg      rHi, rLo;
+         HReg      dst  = newVRegV(env);
+         X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+         iselInt64Expr(&rHi, &rLo, env, e->Iex.Unop.arg);
+         addInstr(env, X86Instr_Push(X86RMI_Reg(rHi)));
+         addInstr(env, X86Instr_Push(X86RMI_Reg(rLo)));
+	 addInstr(env, X86Instr_SseLdzLO(8, dst, esp0));
+         add_to_esp(env, 8);
+         return dst;
+      }
+
+      default:
+         break;
+   } /* switch (e->Iex.Unop.op) */
+   } /* if (e->tag == Iex_Unop) */
+
+   if (e->tag == Iex_Binop) {
+   switch (e->Iex.Binop.op) {
+
+      case Iop_Sqrt64Fx2:
+         REQUIRE_SSE2;
+         /* fallthrough */
+      case Iop_Sqrt32Fx4: {
+         /* :: (rmode, vec) -> vec */
+         HReg arg = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, (e->Iex.Binop.op == Iop_Sqrt64Fx2 
+                           ? X86Instr_Sse64Fx2 : X86Instr_Sse32Fx4)
+                       (Xsse_SQRTF, arg, dst));
+         return dst;
+      }
+
+      case Iop_SetV128lo32: {
+         HReg dst = newVRegV(env);
+         HReg srcV = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg srcI = iselIntExpr_R(env, e->Iex.Binop.arg2);
+         X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+         sub_from_esp(env, 16);
+         addInstr(env, X86Instr_SseLdSt(False/*store*/, srcV, esp0));
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(srcI), esp0));
+         addInstr(env, X86Instr_SseLdSt(True/*load*/, dst, esp0));
+         add_to_esp(env, 16);
+         return dst;
+      }
+
+      case Iop_SetV128lo64: {
+         HReg dst = newVRegV(env);
+         HReg srcV = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg srcIhi, srcIlo;
+         X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+         X86AMode* esp4 = advance4(esp0);
+         iselInt64Expr(&srcIhi, &srcIlo, env, e->Iex.Binop.arg2);
+         sub_from_esp(env, 16);
+         addInstr(env, X86Instr_SseLdSt(False/*store*/, srcV, esp0));
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(srcIlo), esp0));
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(srcIhi), esp4));
+         addInstr(env, X86Instr_SseLdSt(True/*load*/, dst, esp0));
+         add_to_esp(env, 16);
+         return dst;
+      }
+
+      case Iop_64HLtoV128: {
+         HReg r3, r2, r1, r0;
+         X86AMode* esp0  = X86AMode_IR(0, hregX86_ESP());
+         X86AMode* esp4  = advance4(esp0);
+         X86AMode* esp8  = advance4(esp4);
+         X86AMode* esp12 = advance4(esp8);
+         HReg dst = newVRegV(env);
+	 /* do this via the stack (easy, convenient, etc) */
+         sub_from_esp(env, 16);
+         /* Do the less significant 64 bits */
+         iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2);
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(r0), esp0));
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(r1), esp4));
+         /* Do the more significant 64 bits */
+         iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1);
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(r2), esp8));
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(r3), esp12));
+	 /* Fetch result back from stack. */
+         addInstr(env, X86Instr_SseLdSt(True/*load*/, dst, esp0));
+         add_to_esp(env, 16);
+         return dst;
+      }
+
+      case Iop_CmpEQ32Fx4: op = Xsse_CMPEQF; goto do_32Fx4;
+      case Iop_CmpLT32Fx4: op = Xsse_CMPLTF; goto do_32Fx4;
+      case Iop_CmpLE32Fx4: op = Xsse_CMPLEF; goto do_32Fx4;
+      case Iop_CmpUN32Fx4: op = Xsse_CMPUNF; goto do_32Fx4;
+      case Iop_Max32Fx4:   op = Xsse_MAXF;   goto do_32Fx4;
+      case Iop_Min32Fx4:   op = Xsse_MINF;   goto do_32Fx4;
+      do_32Fx4:
+      {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, X86Instr_Sse32Fx4(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_CmpEQ64Fx2: op = Xsse_CMPEQF; goto do_64Fx2;
+      case Iop_CmpLT64Fx2: op = Xsse_CMPLTF; goto do_64Fx2;
+      case Iop_CmpLE64Fx2: op = Xsse_CMPLEF; goto do_64Fx2;
+      case Iop_CmpUN64Fx2: op = Xsse_CMPUNF; goto do_64Fx2;
+      case Iop_Max64Fx2:   op = Xsse_MAXF;   goto do_64Fx2;
+      case Iop_Min64Fx2:   op = Xsse_MINF;   goto do_64Fx2;
+      do_64Fx2:
+      {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         REQUIRE_SSE2;
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, X86Instr_Sse64Fx2(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_CmpEQ32F0x4: op = Xsse_CMPEQF; goto do_32F0x4;
+      case Iop_CmpLT32F0x4: op = Xsse_CMPLTF; goto do_32F0x4;
+      case Iop_CmpLE32F0x4: op = Xsse_CMPLEF; goto do_32F0x4;
+      case Iop_CmpUN32F0x4: op = Xsse_CMPUNF; goto do_32F0x4;
+      case Iop_Add32F0x4:   op = Xsse_ADDF;   goto do_32F0x4;
+      case Iop_Div32F0x4:   op = Xsse_DIVF;   goto do_32F0x4;
+      case Iop_Max32F0x4:   op = Xsse_MAXF;   goto do_32F0x4;
+      case Iop_Min32F0x4:   op = Xsse_MINF;   goto do_32F0x4;
+      case Iop_Mul32F0x4:   op = Xsse_MULF;   goto do_32F0x4;
+      case Iop_Sub32F0x4:   op = Xsse_SUBF;   goto do_32F0x4;
+      do_32F0x4: {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, X86Instr_Sse32FLo(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_CmpEQ64F0x2: op = Xsse_CMPEQF; goto do_64F0x2;
+      case Iop_CmpLT64F0x2: op = Xsse_CMPLTF; goto do_64F0x2;
+      case Iop_CmpLE64F0x2: op = Xsse_CMPLEF; goto do_64F0x2;
+      case Iop_CmpUN64F0x2: op = Xsse_CMPUNF; goto do_64F0x2;
+      case Iop_Add64F0x2:   op = Xsse_ADDF;   goto do_64F0x2;
+      case Iop_Div64F0x2:   op = Xsse_DIVF;   goto do_64F0x2;
+      case Iop_Max64F0x2:   op = Xsse_MAXF;   goto do_64F0x2;
+      case Iop_Min64F0x2:   op = Xsse_MINF;   goto do_64F0x2;
+      case Iop_Mul64F0x2:   op = Xsse_MULF;   goto do_64F0x2;
+      case Iop_Sub64F0x2:   op = Xsse_SUBF;   goto do_64F0x2;
+      do_64F0x2: {
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         REQUIRE_SSE2;
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         addInstr(env, X86Instr_Sse64FLo(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_QNarrowBin32Sto16Sx8: 
+         op = Xsse_PACKSSD; arg1isEReg = True; goto do_SseReRg;
+      case Iop_QNarrowBin16Sto8Sx16: 
+         op = Xsse_PACKSSW; arg1isEReg = True; goto do_SseReRg;
+      case Iop_QNarrowBin16Sto8Ux16: 
+         op = Xsse_PACKUSW; arg1isEReg = True; goto do_SseReRg;
+
+      case Iop_InterleaveHI8x16: 
+         op = Xsse_UNPCKHB; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveHI16x8: 
+         op = Xsse_UNPCKHW; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveHI32x4: 
+         op = Xsse_UNPCKHD; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveHI64x2: 
+         op = Xsse_UNPCKHQ; arg1isEReg = True; goto do_SseReRg;
+
+      case Iop_InterleaveLO8x16: 
+         op = Xsse_UNPCKLB; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveLO16x8: 
+         op = Xsse_UNPCKLW; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveLO32x4: 
+         op = Xsse_UNPCKLD; arg1isEReg = True; goto do_SseReRg;
+      case Iop_InterleaveLO64x2: 
+         op = Xsse_UNPCKLQ; arg1isEReg = True; goto do_SseReRg;
+
+      case Iop_AndV128:    op = Xsse_AND;      goto do_SseReRg;
+      case Iop_OrV128:     op = Xsse_OR;       goto do_SseReRg;
+      case Iop_XorV128:    op = Xsse_XOR;      goto do_SseReRg;
+      case Iop_Add8x16:    op = Xsse_ADD8;     goto do_SseReRg;
+      case Iop_Add16x8:    op = Xsse_ADD16;    goto do_SseReRg;
+      case Iop_Add32x4:    op = Xsse_ADD32;    goto do_SseReRg;
+      case Iop_Add64x2:    op = Xsse_ADD64;    goto do_SseReRg;
+      case Iop_QAdd8Sx16:  op = Xsse_QADD8S;   goto do_SseReRg;
+      case Iop_QAdd16Sx8:  op = Xsse_QADD16S;  goto do_SseReRg;
+      case Iop_QAdd8Ux16:  op = Xsse_QADD8U;   goto do_SseReRg;
+      case Iop_QAdd16Ux8:  op = Xsse_QADD16U;  goto do_SseReRg;
+      case Iop_Avg8Ux16:   op = Xsse_AVG8U;    goto do_SseReRg;
+      case Iop_Avg16Ux8:   op = Xsse_AVG16U;   goto do_SseReRg;
+      case Iop_CmpEQ8x16:  op = Xsse_CMPEQ8;   goto do_SseReRg;
+      case Iop_CmpEQ16x8:  op = Xsse_CMPEQ16;  goto do_SseReRg;
+      case Iop_CmpEQ32x4:  op = Xsse_CMPEQ32;  goto do_SseReRg;
+      case Iop_CmpGT8Sx16: op = Xsse_CMPGT8S;  goto do_SseReRg;
+      case Iop_CmpGT16Sx8: op = Xsse_CMPGT16S; goto do_SseReRg;
+      case Iop_CmpGT32Sx4: op = Xsse_CMPGT32S; goto do_SseReRg;
+      case Iop_Max16Sx8:   op = Xsse_MAX16S;   goto do_SseReRg;
+      case Iop_Max8Ux16:   op = Xsse_MAX8U;    goto do_SseReRg;
+      case Iop_Min16Sx8:   op = Xsse_MIN16S;   goto do_SseReRg;
+      case Iop_Min8Ux16:   op = Xsse_MIN8U;    goto do_SseReRg;
+      case Iop_MulHi16Ux8: op = Xsse_MULHI16U; goto do_SseReRg;
+      case Iop_MulHi16Sx8: op = Xsse_MULHI16S; goto do_SseReRg;
+      case Iop_Mul16x8:    op = Xsse_MUL16;    goto do_SseReRg;
+      case Iop_Sub8x16:    op = Xsse_SUB8;     goto do_SseReRg;
+      case Iop_Sub16x8:    op = Xsse_SUB16;    goto do_SseReRg;
+      case Iop_Sub32x4:    op = Xsse_SUB32;    goto do_SseReRg;
+      case Iop_Sub64x2:    op = Xsse_SUB64;    goto do_SseReRg;
+      case Iop_QSub8Sx16:  op = Xsse_QSUB8S;   goto do_SseReRg;
+      case Iop_QSub16Sx8:  op = Xsse_QSUB16S;  goto do_SseReRg;
+      case Iop_QSub8Ux16:  op = Xsse_QSUB8U;   goto do_SseReRg;
+      case Iop_QSub16Ux8:  op = Xsse_QSUB16U;  goto do_SseReRg;
+      do_SseReRg: {
+         HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg dst = newVRegV(env);
+         if (op != Xsse_OR && op != Xsse_AND && op != Xsse_XOR)
+            REQUIRE_SSE2;
+         if (arg1isEReg) {
+            addInstr(env, mk_vMOVsd_RR(arg2, dst));
+            addInstr(env, X86Instr_SseReRg(op, arg1, dst));
+         } else {
+            addInstr(env, mk_vMOVsd_RR(arg1, dst));
+            addInstr(env, X86Instr_SseReRg(op, arg2, dst));
+         }
+         return dst;
+      }
+
+      case Iop_ShlN16x8: op = Xsse_SHL16; goto do_SseShift;
+      case Iop_ShlN32x4: op = Xsse_SHL32; goto do_SseShift;
+      case Iop_ShlN64x2: op = Xsse_SHL64; goto do_SseShift;
+      case Iop_SarN16x8: op = Xsse_SAR16; goto do_SseShift;
+      case Iop_SarN32x4: op = Xsse_SAR32; goto do_SseShift;
+      case Iop_ShrN16x8: op = Xsse_SHR16; goto do_SseShift;
+      case Iop_ShrN32x4: op = Xsse_SHR32; goto do_SseShift;
+      case Iop_ShrN64x2: op = Xsse_SHR64; goto do_SseShift;
+      do_SseShift: {
+         HReg      greg = iselVecExpr(env, e->Iex.Binop.arg1);
+         X86RMI*   rmi  = iselIntExpr_RMI(env, e->Iex.Binop.arg2);
+         X86AMode* esp0 = X86AMode_IR(0, hregX86_ESP());
+         HReg      ereg = newVRegV(env);
+         HReg      dst  = newVRegV(env);
+         REQUIRE_SSE2;
+         addInstr(env, X86Instr_Push(X86RMI_Imm(0)));
+         addInstr(env, X86Instr_Push(X86RMI_Imm(0)));
+         addInstr(env, X86Instr_Push(X86RMI_Imm(0)));
+         addInstr(env, X86Instr_Push(rmi));
+         addInstr(env, X86Instr_SseLdSt(True/*load*/, ereg, esp0));
+	 addInstr(env, mk_vMOVsd_RR(greg, dst));
+         addInstr(env, X86Instr_SseReRg(op, ereg, dst));
+         add_to_esp(env, 16);
+         return dst;
+      }
+
+      case Iop_NarrowBin32to16x8:
+         fn = (HWord)h_generic_calc_NarrowBin32to16x8;
+         goto do_SseAssistedBinary;
+      case Iop_NarrowBin16to8x16:
+         fn = (HWord)h_generic_calc_NarrowBin16to8x16;
+         goto do_SseAssistedBinary;
+      do_SseAssistedBinary: {
+         /* As with the amd64 case (where this is copied from) we
+            generate pretty bad code. */
+         vassert(fn != 0);
+         HReg dst = newVRegV(env);
+         HReg argL = iselVecExpr(env, e->Iex.Binop.arg1);
+         HReg argR = iselVecExpr(env, e->Iex.Binop.arg2);
+         HReg argp = newVRegI(env);
+         /* subl $112, %esp         -- make a space */
+         sub_from_esp(env, 112);
+         /* leal 48(%esp), %r_argp  -- point into it */
+         addInstr(env, X86Instr_Lea32(X86AMode_IR(48, hregX86_ESP()),
+                                      argp));
+         /* andl $-16, %r_argp      -- 16-align the pointer */
+         addInstr(env, X86Instr_Alu32R(Xalu_AND,
+                                       X86RMI_Imm( ~(UInt)15 ), 
+                                       argp));
+         /* Prepare 3 arg regs:
+            leal  0(%r_argp), %eax
+            leal 16(%r_argp), %edx
+            leal 32(%r_argp), %ecx
+         */
+         addInstr(env, X86Instr_Lea32(X86AMode_IR(0, argp),
+                                      hregX86_EAX()));
+         addInstr(env, X86Instr_Lea32(X86AMode_IR(16, argp),
+                                      hregX86_EDX()));
+         addInstr(env, X86Instr_Lea32(X86AMode_IR(32, argp),
+                                      hregX86_ECX()));
+         /* Store the two args, at (%edx) and (%ecx):
+            movupd  %argL, 0(%edx)
+            movupd  %argR, 0(%ecx)
+         */
+         addInstr(env, X86Instr_SseLdSt(False/*!isLoad*/, argL,
+                                        X86AMode_IR(0, hregX86_EDX())));
+         addInstr(env, X86Instr_SseLdSt(False/*!isLoad*/, argR,
+                                        X86AMode_IR(0, hregX86_ECX())));
+         /* call the helper */
+         addInstr(env, X86Instr_Call( Xcc_ALWAYS, (Addr32)fn,
+                                      3, mk_RetLoc_simple(RLPri_None) ));
+         /* fetch the result from memory, using %r_argp, which the
+            register allocator will keep alive across the call. */
+         addInstr(env, X86Instr_SseLdSt(True/*isLoad*/, dst,
+                                        X86AMode_IR(0, argp)));
+         /* and finally, clear the space */
+         add_to_esp(env, 112);
+         return dst;
+      }
+
+      default:
+         break;
+   } /* switch (e->Iex.Binop.op) */
+   } /* if (e->tag == Iex_Binop) */
+
+
+   if (e->tag == Iex_Triop) {
+   IRTriop *triop = e->Iex.Triop.details;
+   switch (triop->op) {
+
+      case Iop_Add32Fx4: op = Xsse_ADDF; goto do_32Fx4_w_rm;
+      case Iop_Sub32Fx4: op = Xsse_SUBF; goto do_32Fx4_w_rm;
+      case Iop_Mul32Fx4: op = Xsse_MULF; goto do_32Fx4_w_rm;
+      case Iop_Div32Fx4: op = Xsse_DIVF; goto do_32Fx4_w_rm;
+      do_32Fx4_w_rm:
+      {
+         HReg argL = iselVecExpr(env, triop->arg2);
+         HReg argR = iselVecExpr(env, triop->arg3);
+         HReg dst = newVRegV(env);
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, X86Instr_Sse32Fx4(op, argR, dst));
+         return dst;
+      }
+
+      case Iop_Add64Fx2: op = Xsse_ADDF; goto do_64Fx2_w_rm;
+      case Iop_Sub64Fx2: op = Xsse_SUBF; goto do_64Fx2_w_rm;
+      case Iop_Mul64Fx2: op = Xsse_MULF; goto do_64Fx2_w_rm;
+      case Iop_Div64Fx2: op = Xsse_DIVF; goto do_64Fx2_w_rm;
+      do_64Fx2_w_rm:
+      {
+         HReg argL = iselVecExpr(env, triop->arg2);
+         HReg argR = iselVecExpr(env, triop->arg3);
+         HReg dst = newVRegV(env);
+         REQUIRE_SSE2;
+         addInstr(env, mk_vMOVsd_RR(argL, dst));
+         /* XXXROUNDINGFIXME */
+         /* set roundingmode here */
+         addInstr(env, X86Instr_Sse64Fx2(op, argR, dst));
+         return dst;
+      }
+
+      default:
+         break;
+   } /* switch (triop->op) */
+   } /* if (e->tag == Iex_Triop) */
+
+
+   if (e->tag == Iex_ITE) { // VFD
+      HReg r1  = iselVecExpr(env, e->Iex.ITE.iftrue);
+      HReg r0  = iselVecExpr(env, e->Iex.ITE.iffalse);
+      HReg dst = newVRegV(env);
+      addInstr(env, mk_vMOVsd_RR(r1,dst));
+      X86CondCode cc = iselCondCode(env, e->Iex.ITE.cond);
+      addInstr(env, X86Instr_SseCMov(cc ^ 1, r0, dst));
+      return dst;
+   }
+
+   vec_fail:
+   vex_printf("iselVecExpr (hwcaps = %s): can't reduce\n",
+              LibVEX_ppVexHwCaps(VexArchX86,env->hwcaps));
+   ppIRExpr(e);
+   vpanic("iselVecExpr_wrk");
+
+#  undef REQUIRE_SSE1
+#  undef REQUIRE_SSE2
+#  undef SSE2_OR_ABOVE
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Statements                                  ---*/
+/*---------------------------------------------------------*/
+
+static void iselStmt ( ISelEnv* env, IRStmt* stmt )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf("\n-- ");
+      ppIRStmt(stmt);
+      vex_printf("\n");
+   }
+
+   switch (stmt->tag) {
+
+   /* --------- STORE --------- */
+   case Ist_Store: {
+      IRType    tya   = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr);
+      IRType    tyd   = typeOfIRExpr(env->type_env, stmt->Ist.Store.data);
+      IREndness end   = stmt->Ist.Store.end;
+
+      if (tya != Ity_I32 || end != Iend_LE) 
+         goto stmt_fail;
+
+      if (tyd == Ity_I32) {
+         X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         X86RI* ri = iselIntExpr_RI(env, stmt->Ist.Store.data);
+         addInstr(env, X86Instr_Alu32M(Xalu_MOV,ri,am));
+         return;
+      }
+      if (tyd == Ity_I8 || tyd == Ity_I16) {
+         X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselIntExpr_R(env, stmt->Ist.Store.data);
+         addInstr(env, X86Instr_Store( toUChar(tyd==Ity_I8 ? 1 : 2),
+                                       r,am ));
+         return;
+      }
+      if (tyd == Ity_F64) {
+         X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselDblExpr(env, stmt->Ist.Store.data);
+         addInstr(env, X86Instr_FpLdSt(False/*store*/, 8, r, am));
+         return;
+      }
+      if (tyd == Ity_F32) {
+         X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselFltExpr(env, stmt->Ist.Store.data);
+         addInstr(env, X86Instr_FpLdSt(False/*store*/, 4, r, am));
+         return;
+      }
+      if (tyd == Ity_I64) {
+         HReg vHi, vLo, rA;
+         iselInt64Expr(&vHi, &vLo, env, stmt->Ist.Store.data);
+         rA = iselIntExpr_R(env, stmt->Ist.Store.addr);
+         addInstr(env, X86Instr_Alu32M(
+                          Xalu_MOV, X86RI_Reg(vLo), X86AMode_IR(0, rA)));
+         addInstr(env, X86Instr_Alu32M(
+                          Xalu_MOV, X86RI_Reg(vHi), X86AMode_IR(4, rA)));
+         return;
+      }
+      if (tyd == Ity_V128) {
+         X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.Store.addr);
+         HReg r = iselVecExpr(env, stmt->Ist.Store.data);
+         addInstr(env, X86Instr_SseLdSt(False/*store*/, r, am));
+         return;
+      }
+      break;
+   }
+
+   /* --------- PUT --------- */
+   case Ist_Put: {
+      IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data);
+      if (ty == Ity_I32) {
+         /* We're going to write to memory, so compute the RHS into an
+            X86RI. */
+         X86RI* ri = iselIntExpr_RI(env, stmt->Ist.Put.data);
+         addInstr(env,
+                  X86Instr_Alu32M(
+                     Xalu_MOV,
+                     ri,
+                     X86AMode_IR(stmt->Ist.Put.offset,hregX86_EBP())
+                 ));
+         return;
+      }
+      if (ty == Ity_I8 || ty == Ity_I16) {
+         HReg r = iselIntExpr_R(env, stmt->Ist.Put.data);
+         addInstr(env, X86Instr_Store(
+                          toUChar(ty==Ity_I8 ? 1 : 2),
+                          r,
+                          X86AMode_IR(stmt->Ist.Put.offset,
+                                      hregX86_EBP())));
+         return;
+      }
+      if (ty == Ity_I64) {
+         HReg vHi, vLo;
+         X86AMode* am  = X86AMode_IR(stmt->Ist.Put.offset, hregX86_EBP());
+         X86AMode* am4 = advance4(am);
+         iselInt64Expr(&vHi, &vLo, env, stmt->Ist.Put.data);
+         addInstr(env, X86Instr_Alu32M( Xalu_MOV, X86RI_Reg(vLo), am ));
+         addInstr(env, X86Instr_Alu32M( Xalu_MOV, X86RI_Reg(vHi), am4 ));
+         return;
+      }
+      if (ty == Ity_V128) {
+         HReg      vec = iselVecExpr(env, stmt->Ist.Put.data);
+         X86AMode* am  = X86AMode_IR(stmt->Ist.Put.offset, hregX86_EBP());
+         addInstr(env, X86Instr_SseLdSt(False/*store*/, vec, am));
+         return;
+      }
+      if (ty == Ity_F32) {
+         HReg f32 = iselFltExpr(env, stmt->Ist.Put.data);
+         X86AMode* am  = X86AMode_IR(stmt->Ist.Put.offset, hregX86_EBP());
+         set_FPU_rounding_default(env); /* paranoia */
+         addInstr(env, X86Instr_FpLdSt( False/*store*/, 4, f32, am ));
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg f64 = iselDblExpr(env, stmt->Ist.Put.data);
+         X86AMode* am  = X86AMode_IR(stmt->Ist.Put.offset, hregX86_EBP());
+         set_FPU_rounding_default(env); /* paranoia */
+         addInstr(env, X86Instr_FpLdSt( False/*store*/, 8, f64, am ));
+         return;
+      }
+      break;
+   }
+
+   /* --------- Indexed PUT --------- */
+   case Ist_PutI: {
+      IRPutI *puti = stmt->Ist.PutI.details;
+
+      X86AMode* am 
+         = genGuestArrayOffset(
+              env, puti->descr, 
+                   puti->ix, puti->bias );
+
+      IRType ty = typeOfIRExpr(env->type_env, puti->data);
+      if (ty == Ity_F64) {
+         HReg val = iselDblExpr(env, puti->data);
+         addInstr(env, X86Instr_FpLdSt( False/*store*/, 8, val, am ));
+         return;
+      }
+      if (ty == Ity_I8) {
+         HReg r = iselIntExpr_R(env, puti->data);
+         addInstr(env, X86Instr_Store( 1, r, am ));
+         return;
+      }
+      if (ty == Ity_I32) {
+         HReg r = iselIntExpr_R(env, puti->data);
+         addInstr(env, X86Instr_Alu32M( Xalu_MOV, X86RI_Reg(r), am ));
+         return;
+      }
+      if (ty == Ity_I64) {
+         HReg rHi, rLo;
+         X86AMode* am4 = advance4(am);
+         iselInt64Expr(&rHi, &rLo, env, puti->data);
+         addInstr(env, X86Instr_Alu32M( Xalu_MOV, X86RI_Reg(rLo), am ));
+         addInstr(env, X86Instr_Alu32M( Xalu_MOV, X86RI_Reg(rHi), am4 ));
+         return;
+      }
+      break;
+   }
+
+   /* --------- TMP --------- */
+   case Ist_WrTmp: {
+      IRTemp tmp = stmt->Ist.WrTmp.tmp;
+      IRType ty = typeOfIRTemp(env->type_env, tmp);
+
+      /* optimisation: if stmt->Ist.WrTmp.data is Add32(..,..),
+         compute it into an AMode and then use LEA.  This usually
+         produces fewer instructions, often because (for memcheck
+         created IR) we get t = address-expression, (t is later used
+         twice) and so doing this naturally turns address-expression
+         back into an X86 amode. */
+      if (ty == Ity_I32 
+          && stmt->Ist.WrTmp.data->tag == Iex_Binop
+          && stmt->Ist.WrTmp.data->Iex.Binop.op == Iop_Add32) {
+         X86AMode* am = iselIntExpr_AMode(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         if (am->tag == Xam_IR && am->Xam.IR.imm == 0) {
+            /* Hmm, iselIntExpr_AMode wimped out and just computed the
+               value into a register.  Just emit a normal reg-reg move
+               so reg-alloc can coalesce it away in the usual way. */
+            HReg src = am->Xam.IR.reg;
+            addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Reg(src), dst));
+         } else {
+            addInstr(env, X86Instr_Lea32(am,dst));
+         }
+         return;
+      }
+
+      if (ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) {
+         X86RMI* rmi = iselIntExpr_RMI(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, X86Instr_Alu32R(Xalu_MOV,rmi,dst));
+         return;
+      }
+      if (ty == Ity_I64) {
+         HReg rHi, rLo, dstHi, dstLo;
+         iselInt64Expr(&rHi,&rLo, env, stmt->Ist.WrTmp.data);
+         lookupIRTemp64( &dstHi, &dstLo, env, tmp);
+         addInstr(env, mk_iMOVsd_RR(rHi,dstHi) );
+         addInstr(env, mk_iMOVsd_RR(rLo,dstLo) );
+         return;
+      }
+      if (ty == Ity_I1) {
+         X86CondCode cond = iselCondCode(env, stmt->Ist.WrTmp.data);
+         HReg dst = lookupIRTemp(env, tmp);
+         addInstr(env, X86Instr_Set32(cond, dst));
+         return;
+      }
+      if (ty == Ity_F64) {
+         HReg dst = lookupIRTemp(env, tmp);
+         HReg src = iselDblExpr(env, stmt->Ist.WrTmp.data);
+         addInstr(env, X86Instr_FpUnary(Xfp_MOV,src,dst));
+         return;
+      }
+      if (ty == Ity_F32) {
+         HReg dst = lookupIRTemp(env, tmp);
+         HReg src = iselFltExpr(env, stmt->Ist.WrTmp.data);
+         addInstr(env, X86Instr_FpUnary(Xfp_MOV,src,dst));
+         return;
+      }
+      if (ty == Ity_V128) {
+         HReg dst = lookupIRTemp(env, tmp);
+         HReg src = iselVecExpr(env, stmt->Ist.WrTmp.data);
+         addInstr(env, mk_vMOVsd_RR(src,dst));
+         return;
+      }
+      break;
+   }
+
+   /* --------- Call to DIRTY helper --------- */
+   case Ist_Dirty: {
+      IRDirty* d = stmt->Ist.Dirty.details;
+
+      /* Figure out the return type, if any. */
+      IRType retty = Ity_INVALID;
+      if (d->tmp != IRTemp_INVALID)
+         retty = typeOfIRTemp(env->type_env, d->tmp);
+
+      Bool retty_ok = False;
+      switch (retty) {
+         case Ity_INVALID: /* function doesn't return anything */
+         case Ity_I64: case Ity_I32: case Ity_I16: case Ity_I8:
+         case Ity_V128:
+            retty_ok = True; break;
+         default:
+            break;
+      }
+      if (!retty_ok)
+         break; /* will go to stmt_fail: */
+
+      /* Marshal args, do the call, and set the return value to
+         0x555..555 if this is a conditional call that returns a value
+         and the call is skipped. */
+      UInt   addToSp = 0;
+      RetLoc rloc    = mk_RetLoc_INVALID();
+      doHelperCall( &addToSp, &rloc, env, d->guard, d->cee, retty, d->args );
+      vassert(is_sane_RetLoc(rloc));
+
+      /* Now figure out what to do with the returned value, if any. */
+      switch (retty) {
+         case Ity_INVALID: {
+            /* No return value.  Nothing to do. */
+            vassert(d->tmp == IRTemp_INVALID);
+            vassert(rloc.pri == RLPri_None);
+            vassert(addToSp == 0);
+            return;
+         }
+         case Ity_I32: case Ity_I16: case Ity_I8: {
+            /* The returned value is in %eax.  Park it in the register
+               associated with tmp. */
+            vassert(rloc.pri == RLPri_Int);
+            vassert(addToSp == 0);
+            HReg dst = lookupIRTemp(env, d->tmp);
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(),dst) );
+            return;
+         }
+         case Ity_I64: {
+            /* The returned value is in %edx:%eax.  Park it in the
+               register-pair associated with tmp. */
+            vassert(rloc.pri == RLPri_2Int);
+            vassert(addToSp == 0);
+            HReg dstHi, dstLo;
+            lookupIRTemp64( &dstHi, &dstLo, env, d->tmp);
+            addInstr(env, mk_iMOVsd_RR(hregX86_EDX(),dstHi) );
+            addInstr(env, mk_iMOVsd_RR(hregX86_EAX(),dstLo) );
+            return;
+         }
+         case Ity_V128: {
+            /* The returned value is on the stack, and *retloc tells
+               us where.  Fish it off the stack and then move the
+               stack pointer upwards to clear it, as directed by
+               doHelperCall. */
+            vassert(rloc.pri == RLPri_V128SpRel);
+            vassert(addToSp >= 16);
+            HReg      dst = lookupIRTemp(env, d->tmp);
+            X86AMode* am  = X86AMode_IR(rloc.spOff, hregX86_ESP());
+            addInstr(env, X86Instr_SseLdSt( True/*load*/, dst, am ));
+            add_to_esp(env, addToSp);
+            return;
+         }
+         default:
+            /*NOTREACHED*/
+            vassert(0);
+      }
+      break;
+   }
+
+   /* --------- MEM FENCE --------- */
+   case Ist_MBE:
+      switch (stmt->Ist.MBE.event) {
+         case Imbe_Fence:
+            addInstr(env, X86Instr_MFence(env->hwcaps));
+            return;
+         default:
+            break;
+      }
+      break;
+
+   /* --------- ACAS --------- */
+   case Ist_CAS:
+      if (stmt->Ist.CAS.details->oldHi == IRTemp_INVALID) {
+         /* "normal" singleton CAS */
+         UChar  sz;
+         IRCAS* cas = stmt->Ist.CAS.details;
+         IRType ty  = typeOfIRExpr(env->type_env, cas->dataLo);
+         /* get: cas->expdLo into %eax, and cas->dataLo into %ebx */
+         X86AMode* am = iselIntExpr_AMode(env, cas->addr);
+         HReg rDataLo = iselIntExpr_R(env, cas->dataLo);
+         HReg rExpdLo = iselIntExpr_R(env, cas->expdLo);
+         HReg rOldLo  = lookupIRTemp(env, cas->oldLo);
+         vassert(cas->expdHi == NULL);
+         vassert(cas->dataHi == NULL);
+         addInstr(env, mk_iMOVsd_RR(rExpdLo, rOldLo));
+         addInstr(env, mk_iMOVsd_RR(rExpdLo, hregX86_EAX()));
+         addInstr(env, mk_iMOVsd_RR(rDataLo, hregX86_EBX()));
+         switch (ty) { 
+            case Ity_I32: sz = 4; break;
+            case Ity_I16: sz = 2; break;
+            case Ity_I8:  sz = 1; break; 
+            default: goto unhandled_cas;
+         }
+         addInstr(env, X86Instr_ACAS(am, sz));
+         addInstr(env,
+                  X86Instr_CMov32(Xcc_NZ,
+                                  X86RM_Reg(hregX86_EAX()), rOldLo));
+         return;
+      } else {
+         /* double CAS */
+         IRCAS* cas = stmt->Ist.CAS.details;
+         IRType ty  = typeOfIRExpr(env->type_env, cas->dataLo);
+         /* only 32-bit allowed in this case */
+         /* get: cas->expdLo into %eax, and cas->dataLo into %ebx */
+         /* get: cas->expdHi into %edx, and cas->dataHi into %ecx */
+         X86AMode* am = iselIntExpr_AMode(env, cas->addr);
+         HReg rDataHi = iselIntExpr_R(env, cas->dataHi);
+         HReg rDataLo = iselIntExpr_R(env, cas->dataLo);
+         HReg rExpdHi = iselIntExpr_R(env, cas->expdHi);
+         HReg rExpdLo = iselIntExpr_R(env, cas->expdLo);
+         HReg rOldHi  = lookupIRTemp(env, cas->oldHi);
+         HReg rOldLo  = lookupIRTemp(env, cas->oldLo);
+         if (ty != Ity_I32)
+            goto unhandled_cas;
+         addInstr(env, mk_iMOVsd_RR(rExpdHi, rOldHi));
+         addInstr(env, mk_iMOVsd_RR(rExpdLo, rOldLo));
+         addInstr(env, mk_iMOVsd_RR(rExpdHi, hregX86_EDX()));
+         addInstr(env, mk_iMOVsd_RR(rExpdLo, hregX86_EAX()));
+         addInstr(env, mk_iMOVsd_RR(rDataHi, hregX86_ECX()));
+         addInstr(env, mk_iMOVsd_RR(rDataLo, hregX86_EBX()));
+         addInstr(env, X86Instr_DACAS(am));
+         addInstr(env,
+                  X86Instr_CMov32(Xcc_NZ,
+                                  X86RM_Reg(hregX86_EDX()), rOldHi));
+         addInstr(env,
+                  X86Instr_CMov32(Xcc_NZ,
+                                  X86RM_Reg(hregX86_EAX()), rOldLo));
+         return;
+      }
+      unhandled_cas:
+      break;
+
+   /* --------- INSTR MARK --------- */
+   /* Doesn't generate any executable code ... */
+   case Ist_IMark:
+       return;
+
+   /* --------- NO-OP --------- */
+   /* Fairly self-explanatory, wouldn't you say? */
+   case Ist_NoOp:
+       return;
+
+   /* --------- EXIT --------- */
+   case Ist_Exit: {
+      if (stmt->Ist.Exit.dst->tag != Ico_U32)
+         vpanic("iselStmt(x86): Ist_Exit: dst is not a 32-bit value");
+
+      X86CondCode cc    = iselCondCode(env, stmt->Ist.Exit.guard);
+      X86AMode*   amEIP = X86AMode_IR(stmt->Ist.Exit.offsIP,
+                                      hregX86_EBP());
+
+      /* Case: boring transfer to known address */
+      if (stmt->Ist.Exit.jk == Ijk_Boring) {
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = ((Addr32)stmt->Ist.Exit.dst->Ico.U32) > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "Y" : ",");
+            addInstr(env, X86Instr_XDirect(stmt->Ist.Exit.dst->Ico.U32,
+                                           amEIP, cc, toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, X86Instr_XAssisted(r, amEIP, cc, Ijk_Boring));
+         }
+         return;
+      }
+
+      /* Case: assisted transfer to arbitrary address */
+      switch (stmt->Ist.Exit.jk) {
+         /* Keep this list in sync with that in iselNext below */
+         case Ijk_ClientReq:
+         case Ijk_EmWarn:
+         case Ijk_MapFail:
+         case Ijk_NoDecode:
+         case Ijk_NoRedir:
+         case Ijk_SigSEGV:
+         case Ijk_SigTRAP:
+         case Ijk_Sys_int128:
+         case Ijk_Sys_int129:
+         case Ijk_Sys_int130:
+         case Ijk_Sys_syscall:
+         case Ijk_Sys_sysenter:
+         case Ijk_InvalICache:
+         case Ijk_Yield:
+         {
+            HReg r = iselIntExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst));
+            addInstr(env, X86Instr_XAssisted(r, amEIP, cc, stmt->Ist.Exit.jk));
+            return;
+         }
+         default:
+            break;
+      }
+
+      /* Do we ever expect to see any other kind? */
+      goto stmt_fail;
+   }
+
+   default: break;
+   }
+  stmt_fail:
+   ppIRStmt(stmt);
+   vpanic("iselStmt");
+}
+
+
+/*---------------------------------------------------------*/
+/*--- ISEL: Basic block terminators (Nexts)             ---*/
+/*---------------------------------------------------------*/
+
+static void iselNext ( ISelEnv* env,
+                       IRExpr* next, IRJumpKind jk, Int offsIP )
+{
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      vex_printf( "\n-- PUT(%d) = ", offsIP);
+      ppIRExpr( next );
+      vex_printf( "; exit-");
+      ppIRJumpKind(jk);
+      vex_printf( "\n");
+   }
+
+   /* Case: boring transfer to known address */
+   if (next->tag == Iex_Const) {
+      IRConst* cdst = next->Iex.Const.con;
+      vassert(cdst->tag == Ico_U32);
+      if (jk == Ijk_Boring || jk == Ijk_Call) {
+         /* Boring transfer to known address */
+         X86AMode* amEIP = X86AMode_IR(offsIP, hregX86_EBP());
+         if (env->chainingAllowed) {
+            /* .. almost always true .. */
+            /* Skip the event check at the dst if this is a forwards
+               edge. */
+            Bool toFastEP
+               = ((Addr32)cdst->Ico.U32) > env->max_ga;
+            if (0) vex_printf("%s", toFastEP ? "X" : ".");
+            addInstr(env, X86Instr_XDirect(cdst->Ico.U32,
+                                           amEIP, Xcc_ALWAYS, 
+                                           toFastEP));
+         } else {
+            /* .. very occasionally .. */
+            /* We can't use chaining, so ask for an assisted transfer,
+               as that's the only alternative that is allowable. */
+            HReg r = iselIntExpr_R(env, next);
+            addInstr(env, X86Instr_XAssisted(r, amEIP, Xcc_ALWAYS,
+                                             Ijk_Boring));
+         }
+         return;
+      }
+   }
+
+   /* Case: call/return (==boring) transfer to any address */
+   switch (jk) {
+      case Ijk_Boring: case Ijk_Ret: case Ijk_Call: {
+         HReg      r     = iselIntExpr_R(env, next);
+         X86AMode* amEIP = X86AMode_IR(offsIP, hregX86_EBP());
+         if (env->chainingAllowed) {
+            addInstr(env, X86Instr_XIndir(r, amEIP, Xcc_ALWAYS));
+         } else {
+            addInstr(env, X86Instr_XAssisted(r, amEIP, Xcc_ALWAYS,
+                                               Ijk_Boring));
+         }
+         return;
+      }
+      default:
+         break;
+   }
+
+   /* Case: assisted transfer to arbitrary address */
+   switch (jk) {
+      /* Keep this list in sync with that for Ist_Exit above */
+      case Ijk_ClientReq:
+      case Ijk_EmWarn:
+      case Ijk_MapFail:
+      case Ijk_NoDecode:
+      case Ijk_NoRedir:
+      case Ijk_SigSEGV:
+      case Ijk_SigTRAP:
+      case Ijk_Sys_int128:
+      case Ijk_Sys_int129:
+      case Ijk_Sys_int130:
+      case Ijk_Sys_syscall:
+      case Ijk_Sys_sysenter:
+      case Ijk_InvalICache:
+      case Ijk_Yield:
+      {
+         HReg      r     = iselIntExpr_R(env, next);
+         X86AMode* amEIP = X86AMode_IR(offsIP, hregX86_EBP());
+         addInstr(env, X86Instr_XAssisted(r, amEIP, Xcc_ALWAYS, jk));
+         return;
+      }
+      default:
+         break;
+   }
+
+   vex_printf( "\n-- PUT(%d) = ", offsIP);
+   ppIRExpr( next );
+   vex_printf( "; exit-");
+   ppIRJumpKind(jk);
+   vex_printf( "\n");
+   vassert(0); // are we expecting any other kind?
+}
+
+
+/*---------------------------------------------------------*/
+/*--- Insn selector top-level                           ---*/
+/*---------------------------------------------------------*/
+
+/* Translate an entire SB to x86 code. */
+
+HInstrArray* iselSB_X86 ( const IRSB* bb,
+                          VexArch      arch_host,
+                          const VexArchInfo* archinfo_host,
+                          const VexAbiInfo*  vbi/*UNUSED*/,
+                          Int offs_Host_EvC_Counter,
+                          Int offs_Host_EvC_FailAddr,
+                          Bool chainingAllowed,
+                          Bool addProfInc,
+                          Addr max_ga )
+{
+   Int      i, j;
+   HReg     hreg, hregHI;
+   ISelEnv* env;
+   UInt     hwcaps_host = archinfo_host->hwcaps;
+   X86AMode *amCounter, *amFailAddr;
+
+   /* sanity ... */
+   vassert(arch_host == VexArchX86);
+   vassert(0 == (hwcaps_host
+                 & ~(VEX_HWCAPS_X86_MMXEXT
+                     | VEX_HWCAPS_X86_SSE1
+                     | VEX_HWCAPS_X86_SSE2
+                     | VEX_HWCAPS_X86_SSE3
+                     | VEX_HWCAPS_X86_LZCNT)));
+
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
+   /* Make up an initial environment to use. */
+   env = LibVEX_Alloc_inline(sizeof(ISelEnv));
+   env->vreg_ctr = 0;
+
+   /* Set up output code array. */
+   env->code = newHInstrArray();
+
+   /* Copy BB's type env. */
+   env->type_env = bb->tyenv;
+
+   /* Make up an IRTemp -> virtual HReg mapping.  This doesn't
+      change as we go along. */
+   env->n_vregmap = bb->tyenv->types_used;
+   env->vregmap   = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+   env->vregmapHI = LibVEX_Alloc_inline(env->n_vregmap * sizeof(HReg));
+
+   /* and finally ... */
+   env->chainingAllowed = chainingAllowed;
+   env->hwcaps          = hwcaps_host;
+   env->max_ga          = max_ga;
+
+   /* For each IR temporary, allocate a suitably-kinded virtual
+      register. */
+   j = 0;
+   for (i = 0; i < env->n_vregmap; i++) {
+      hregHI = hreg = INVALID_HREG;
+      switch (bb->tyenv->types[i]) {
+         case Ity_I1:
+         case Ity_I8:
+         case Ity_I16:
+         case Ity_I32:  hreg   = mkHReg(True, HRcInt32,  0, j++); break;
+         case Ity_I64:  hreg   = mkHReg(True, HRcInt32,  0, j++);
+                        hregHI = mkHReg(True, HRcInt32,  0, j++); break;
+         case Ity_F32:
+         case Ity_F64:  hreg   = mkHReg(True, HRcFlt64,  0, j++); break;
+         case Ity_V128: hreg   = mkHReg(True, HRcVec128, 0, j++); break;
+         default: ppIRType(bb->tyenv->types[i]);
+                  vpanic("iselBB: IRTemp type");
+      }
+      env->vregmap[i]   = hreg;
+      env->vregmapHI[i] = hregHI;
+   }
+   env->vreg_ctr = j;
+
+   /* The very first instruction must be an event check. */
+   amCounter  = X86AMode_IR(offs_Host_EvC_Counter,  hregX86_EBP());
+   amFailAddr = X86AMode_IR(offs_Host_EvC_FailAddr, hregX86_EBP());
+   addInstr(env, X86Instr_EvCheck(amCounter, amFailAddr));
+
+   /* Possibly a block counter increment (for profiling).  At this
+      point we don't know the address of the counter, so just pretend
+      it is zero.  It will have to be patched later, but before this
+      translation is used, by a call to LibVEX_patchProfCtr. */
+   if (addProfInc) {
+      addInstr(env, X86Instr_ProfInc());
+   }
+
+   /* Ok, finally we can iterate over the statements. */
+   for (i = 0; i < bb->stmts_used; i++)
+      iselStmt(env, bb->stmts[i]);
+
+   iselNext(env, bb->next, bb->jumpkind, bb->offsIP);
+
+   /* record the number of vregs we used. */
+   env->code->n_vregs = env->vreg_ctr;
+   return env->code;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                     host_x86_isel.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/ir_defs.c b/VEX/priv/ir_defs.c
new file mode 100644
index 0000000..da851f5
--- /dev/null
+++ b/VEX/priv/ir_defs.c
@@ -0,0 +1,4711 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                         ir_defs.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Printing the IR                                         ---*/
+/*---------------------------------------------------------------*/
+
+void ppIRType ( IRType ty )
+{
+   switch (ty) {
+      case Ity_INVALID: vex_printf("Ity_INVALID"); break;
+      case Ity_I1:      vex_printf( "I1");   break;
+      case Ity_I8:      vex_printf( "I8");   break;
+      case Ity_I16:     vex_printf( "I16");  break;
+      case Ity_I32:     vex_printf( "I32");  break;
+      case Ity_I64:     vex_printf( "I64");  break;
+      case Ity_I128:    vex_printf( "I128"); break;
+      case Ity_F16:     vex_printf( "F16");  break;
+      case Ity_F32:     vex_printf( "F32");  break;
+      case Ity_F64:     vex_printf( "F64");  break;
+      case Ity_F128:    vex_printf( "F128"); break;
+      case Ity_D32:     vex_printf( "D32");  break;
+      case Ity_D64:     vex_printf( "D64");  break;
+      case Ity_D128:    vex_printf( "D128"); break;
+      case Ity_V128:    vex_printf( "V128"); break;
+      case Ity_V256:    vex_printf( "V256"); break;
+      default: vex_printf("ty = 0x%x\n", (Int)ty);
+               vpanic("ppIRType");
+   }
+}
+
+void ppIRConst ( const IRConst* con )
+{
+   union { ULong i64; Double f64; UInt i32; Float f32; } u;
+   vassert(sizeof(ULong) == sizeof(Double));
+   switch (con->tag) {
+      case Ico_U1:   vex_printf( "%d:I1",        con->Ico.U1 ? 1 : 0); break;
+      case Ico_U8:   vex_printf( "0x%x:I8",      (UInt)(con->Ico.U8)); break;
+      case Ico_U16:  vex_printf( "0x%x:I16",     (UInt)(con->Ico.U16)); break;
+      case Ico_U32:  vex_printf( "0x%x:I32",     (UInt)(con->Ico.U32)); break;
+      case Ico_U64:  vex_printf( "0x%llx:I64",   (ULong)(con->Ico.U64)); break;
+      case Ico_F32:  u.f32 = con->Ico.F32;
+                     vex_printf( "F32{0x%x}",   u.i32);
+                     break;
+      case Ico_F32i: vex_printf( "F32i{0x%x}",   con->Ico.F32i); break;
+      case Ico_F64:  u.f64 = con->Ico.F64;
+                     vex_printf( "F64{0x%llx}",  u.i64);
+                     break;
+      case Ico_F64i: vex_printf( "F64i{0x%llx}", con->Ico.F64i); break;
+      case Ico_V128: vex_printf( "V128{0x%04x}", (UInt)(con->Ico.V128)); break;
+      case Ico_V256: vex_printf( "V256{0x%08x}", con->Ico.V256); break;
+      default: vpanic("ppIRConst");
+   }
+}
+
+void ppIRCallee ( const IRCallee* ce )
+{
+   vex_printf("%s", ce->name);
+   if (ce->regparms > 0)
+      vex_printf("[rp=%d]", ce->regparms);
+   if (ce->mcx_mask > 0)
+      vex_printf("[mcx=0x%x]", ce->mcx_mask);
+   vex_printf("{%p}", (void*)ce->addr);
+}
+
+void ppIRRegArray ( const IRRegArray* arr )
+{
+   vex_printf("(%d:%dx", arr->base, arr->nElems);
+   ppIRType(arr->elemTy);
+   vex_printf(")");
+}
+
+void ppIRTemp ( IRTemp tmp )
+{
+   if (tmp == IRTemp_INVALID)
+      vex_printf("IRTemp_INVALID");
+   else
+      vex_printf( "t%d", (Int)tmp);
+}
+
+void ppIROp ( IROp op )
+{
+   const HChar* str = NULL; 
+   IROp   base;
+   switch (op) {
+      case Iop_Add8 ... Iop_Add64:
+         str = "Add"; base = Iop_Add8; break;
+      case Iop_Sub8 ... Iop_Sub64:
+         str = "Sub"; base = Iop_Sub8; break;
+      case Iop_Mul8 ... Iop_Mul64:
+         str = "Mul"; base = Iop_Mul8; break;
+      case Iop_Or8 ... Iop_Or64:
+         str = "Or"; base = Iop_Or8; break;
+      case Iop_And8 ... Iop_And64:
+         str = "And"; base = Iop_And8; break;
+      case Iop_Xor8 ... Iop_Xor64:
+         str = "Xor"; base = Iop_Xor8; break;
+      case Iop_Shl8 ... Iop_Shl64:
+         str = "Shl"; base = Iop_Shl8; break;
+      case Iop_Shr8 ... Iop_Shr64:
+         str = "Shr"; base = Iop_Shr8; break;
+      case Iop_Sar8 ... Iop_Sar64:
+         str = "Sar"; base = Iop_Sar8; break;
+      case Iop_CmpEQ8 ... Iop_CmpEQ64:
+         str = "CmpEQ"; base = Iop_CmpEQ8; break;
+      case Iop_CmpNE8 ... Iop_CmpNE64:
+         str = "CmpNE"; base = Iop_CmpNE8; break;
+      case Iop_CasCmpEQ8 ... Iop_CasCmpEQ64:
+         str = "CasCmpEQ"; base = Iop_CasCmpEQ8; break;
+      case Iop_CasCmpNE8 ... Iop_CasCmpNE64:
+         str = "CasCmpNE"; base = Iop_CasCmpNE8; break;
+      case Iop_ExpCmpNE8 ... Iop_ExpCmpNE64:
+         str = "ExpCmpNE"; base = Iop_ExpCmpNE8; break;
+      case Iop_Not8 ... Iop_Not64:
+         str = "Not"; base = Iop_Not8; break;
+      /* other cases must explicitly "return;" */
+      case Iop_8Uto16:   vex_printf("8Uto16");  return;
+      case Iop_8Uto32:   vex_printf("8Uto32");  return;
+      case Iop_16Uto32:  vex_printf("16Uto32"); return;
+      case Iop_8Sto16:   vex_printf("8Sto16");  return;
+      case Iop_8Sto32:   vex_printf("8Sto32");  return;
+      case Iop_16Sto32:  vex_printf("16Sto32"); return;
+      case Iop_32Sto64:  vex_printf("32Sto64"); return;
+      case Iop_32Uto64:  vex_printf("32Uto64"); return;
+      case Iop_32to8:    vex_printf("32to8");   return;
+      case Iop_16Uto64:  vex_printf("16Uto64"); return;
+      case Iop_16Sto64:  vex_printf("16Sto64"); return;
+      case Iop_8Uto64:   vex_printf("8Uto64"); return;
+      case Iop_8Sto64:   vex_printf("8Sto64"); return;
+      case Iop_64to16:   vex_printf("64to16"); return;
+      case Iop_64to8:    vex_printf("64to8");  return;
+
+      case Iop_Not1:     vex_printf("Not1");    return;
+      case Iop_32to1:    vex_printf("32to1");   return;
+      case Iop_64to1:    vex_printf("64to1");   return;
+      case Iop_1Uto8:    vex_printf("1Uto8");   return;
+      case Iop_1Uto32:   vex_printf("1Uto32");  return;
+      case Iop_1Uto64:   vex_printf("1Uto64");  return;
+      case Iop_1Sto8:    vex_printf("1Sto8");  return;
+      case Iop_1Sto16:   vex_printf("1Sto16");  return;
+      case Iop_1Sto32:   vex_printf("1Sto32");  return;
+      case Iop_1Sto64:   vex_printf("1Sto64");  return;
+
+      case Iop_MullS8:   vex_printf("MullS8");  return;
+      case Iop_MullS16:  vex_printf("MullS16"); return;
+      case Iop_MullS32:  vex_printf("MullS32"); return;
+      case Iop_MullS64:  vex_printf("MullS64"); return;
+      case Iop_MullU8:   vex_printf("MullU8");  return;
+      case Iop_MullU16:  vex_printf("MullU16"); return;
+      case Iop_MullU32:  vex_printf("MullU32"); return;
+      case Iop_MullU64:  vex_printf("MullU64"); return;
+
+      case Iop_Clz64:    vex_printf("Clz64"); return;
+      case Iop_Clz32:    vex_printf("Clz32"); return;
+      case Iop_Ctz64:    vex_printf("Ctz64"); return;
+      case Iop_Ctz32:    vex_printf("Ctz32"); return;
+
+      case Iop_CmpLT32S: vex_printf("CmpLT32S"); return;
+      case Iop_CmpLE32S: vex_printf("CmpLE32S"); return;
+      case Iop_CmpLT32U: vex_printf("CmpLT32U"); return;
+      case Iop_CmpLE32U: vex_printf("CmpLE32U"); return;
+
+      case Iop_CmpLT64S: vex_printf("CmpLT64S"); return;
+      case Iop_CmpLE64S: vex_printf("CmpLE64S"); return;
+      case Iop_CmpLT64U: vex_printf("CmpLT64U"); return;
+      case Iop_CmpLE64U: vex_printf("CmpLE64U"); return;
+
+      case Iop_CmpNEZ8:  vex_printf("CmpNEZ8"); return;
+      case Iop_CmpNEZ16: vex_printf("CmpNEZ16"); return;
+      case Iop_CmpNEZ32: vex_printf("CmpNEZ32"); return;
+      case Iop_CmpNEZ64: vex_printf("CmpNEZ64"); return;
+
+      case Iop_CmpwNEZ32: vex_printf("CmpwNEZ32"); return;
+      case Iop_CmpwNEZ64: vex_printf("CmpwNEZ64"); return;
+
+      case Iop_Left8:  vex_printf("Left8"); return;
+      case Iop_Left16: vex_printf("Left16"); return;
+      case Iop_Left32: vex_printf("Left32"); return;
+      case Iop_Left64: vex_printf("Left64"); return;
+      case Iop_Max32U: vex_printf("Max32U"); return;
+
+      case Iop_CmpORD32U: vex_printf("CmpORD32U"); return;
+      case Iop_CmpORD32S: vex_printf("CmpORD32S"); return;
+
+      case Iop_CmpORD64U: vex_printf("CmpORD64U"); return;
+      case Iop_CmpORD64S: vex_printf("CmpORD64S"); return;
+
+      case Iop_DivU32: vex_printf("DivU32"); return;
+      case Iop_DivS32: vex_printf("DivS32"); return;
+      case Iop_DivU64: vex_printf("DivU64"); return;
+      case Iop_DivS64: vex_printf("DivS64"); return;
+      case Iop_DivU64E: vex_printf("DivU64E"); return;
+      case Iop_DivS64E: vex_printf("DivS64E"); return;
+      case Iop_DivU32E: vex_printf("DivU32E"); return;
+      case Iop_DivS32E: vex_printf("DivS32E"); return;
+
+      case Iop_DivModU64to32: vex_printf("DivModU64to32"); return;
+      case Iop_DivModS64to32: vex_printf("DivModS64to32"); return;
+
+      case Iop_DivModU128to64: vex_printf("DivModU128to64"); return;
+      case Iop_DivModS128to64: vex_printf("DivModS128to64"); return;
+
+      case Iop_DivModS64to64: vex_printf("DivModS64to64"); return;
+
+      case Iop_16HIto8:  vex_printf("16HIto8"); return;
+      case Iop_16to8:    vex_printf("16to8");   return;
+      case Iop_8HLto16:  vex_printf("8HLto16"); return;
+
+      case Iop_32HIto16: vex_printf("32HIto16"); return;
+      case Iop_32to16:   vex_printf("32to16");   return;
+      case Iop_16HLto32: vex_printf("16HLto32"); return;
+
+      case Iop_64HIto32: vex_printf("64HIto32"); return;
+      case Iop_64to32:   vex_printf("64to32");   return;
+      case Iop_32HLto64: vex_printf("32HLto64"); return;
+
+      case Iop_128HIto64: vex_printf("128HIto64"); return;
+      case Iop_128to64:   vex_printf("128to64");   return;
+      case Iop_64HLto128: vex_printf("64HLto128"); return;
+
+      case Iop_CmpF32:    vex_printf("CmpF32");    return;
+      case Iop_F32toI32S: vex_printf("F32toI32S");  return;
+      case Iop_F32toI64S: vex_printf("F32toI64S");  return;
+      case Iop_I32StoF32: vex_printf("I32StoF32");  return;
+      case Iop_I64StoF32: vex_printf("I64StoF32");  return;
+
+      case Iop_AddF64:    vex_printf("AddF64"); return;
+      case Iop_SubF64:    vex_printf("SubF64"); return;
+      case Iop_MulF64:    vex_printf("MulF64"); return;
+      case Iop_DivF64:    vex_printf("DivF64"); return;
+      case Iop_AddF64r32: vex_printf("AddF64r32"); return;
+      case Iop_SubF64r32: vex_printf("SubF64r32"); return;
+      case Iop_MulF64r32: vex_printf("MulF64r32"); return;
+      case Iop_DivF64r32: vex_printf("DivF64r32"); return;
+      case Iop_AddF32:    vex_printf("AddF32"); return;
+      case Iop_SubF32:    vex_printf("SubF32"); return;
+      case Iop_MulF32:    vex_printf("MulF32"); return;
+      case Iop_DivF32:    vex_printf("DivF32"); return;
+
+        /* 128 bit floating point */
+      case Iop_AddF128:   vex_printf("AddF128");  return;
+      case Iop_SubF128:   vex_printf("SubF128");  return;
+      case Iop_MulF128:   vex_printf("MulF128");  return;
+      case Iop_DivF128:   vex_printf("DivF128");  return;
+      case Iop_AbsF128:   vex_printf("AbsF128");  return;
+      case Iop_NegF128:   vex_printf("NegF128");  return;
+      case Iop_SqrtF128:  vex_printf("SqrtF128"); return;
+      case Iop_CmpF128:   vex_printf("CmpF128");  return;
+
+      case Iop_F64HLtoF128: vex_printf("F64HLtoF128"); return;
+      case Iop_F128HItoF64: vex_printf("F128HItoF64"); return;
+      case Iop_F128LOtoF64: vex_printf("F128LOtoF64"); return;
+      case Iop_I32StoF128: vex_printf("I32StoF128"); return;
+      case Iop_I64StoF128: vex_printf("I64StoF128"); return;
+      case Iop_I32UtoF128: vex_printf("I32UtoF128"); return;
+      case Iop_I64UtoF128: vex_printf("I64UtoF128"); return;
+      case Iop_F128toI32S: vex_printf("F128toI32S"); return;
+      case Iop_F128toI64S: vex_printf("F128toI64S"); return;
+      case Iop_F128toI32U: vex_printf("F128toI32U"); return;
+      case Iop_F128toI64U: vex_printf("F128toI64U"); return;
+      case Iop_F32toF128:  vex_printf("F32toF128");  return;
+      case Iop_F64toF128:  vex_printf("F64toF128");  return;
+      case Iop_F128toF64:  vex_printf("F128toF64");  return;
+      case Iop_F128toF32:  vex_printf("F128toF32");  return;
+
+        /* s390 specific */
+      case Iop_MAddF32:    vex_printf("s390_MAddF32"); return;
+      case Iop_MSubF32:    vex_printf("s390_MSubF32"); return;
+
+      case Iop_ScaleF64:      vex_printf("ScaleF64"); return;
+      case Iop_AtanF64:       vex_printf("AtanF64"); return;
+      case Iop_Yl2xF64:       vex_printf("Yl2xF64"); return;
+      case Iop_Yl2xp1F64:     vex_printf("Yl2xp1F64"); return;
+      case Iop_PRemF64:       vex_printf("PRemF64"); return;
+      case Iop_PRemC3210F64:  vex_printf("PRemC3210F64"); return;
+      case Iop_PRem1F64:      vex_printf("PRem1F64"); return;
+      case Iop_PRem1C3210F64: vex_printf("PRem1C3210F64"); return;
+      case Iop_NegF64:        vex_printf("NegF64"); return;
+      case Iop_AbsF64:        vex_printf("AbsF64"); return;
+      case Iop_NegF32:        vex_printf("NegF32"); return;
+      case Iop_AbsF32:        vex_printf("AbsF32"); return;
+      case Iop_SqrtF64:       vex_printf("SqrtF64"); return;
+      case Iop_SqrtF32:       vex_printf("SqrtF32"); return;
+      case Iop_SinF64:    vex_printf("SinF64"); return;
+      case Iop_CosF64:    vex_printf("CosF64"); return;
+      case Iop_TanF64:    vex_printf("TanF64"); return;
+      case Iop_2xm1F64:   vex_printf("2xm1F64"); return;
+
+      case Iop_MAddF64:    vex_printf("MAddF64"); return;
+      case Iop_MSubF64:    vex_printf("MSubF64"); return;
+      case Iop_MAddF64r32: vex_printf("MAddF64r32"); return;
+      case Iop_MSubF64r32: vex_printf("MSubF64r32"); return;
+
+      case Iop_RSqrtEst5GoodF64: vex_printf("RSqrtEst5GoodF64"); return;
+      case Iop_RoundF64toF64_NEAREST: vex_printf("RoundF64toF64_NEAREST"); return;
+      case Iop_RoundF64toF64_NegINF: vex_printf("RoundF64toF64_NegINF"); return;
+      case Iop_RoundF64toF64_PosINF: vex_printf("RoundF64toF64_PosINF"); return;
+      case Iop_RoundF64toF64_ZERO: vex_printf("RoundF64toF64_ZERO"); return;
+
+      case Iop_TruncF64asF32: vex_printf("TruncF64asF32"); return;
+
+      case Iop_RecpExpF64: vex_printf("RecpExpF64"); return;
+      case Iop_RecpExpF32: vex_printf("RecpExpF32"); return;
+
+      case Iop_F16toF64: vex_printf("F16toF64"); return;
+      case Iop_F64toF16: vex_printf("F64toF16"); return;
+      case Iop_F16toF32: vex_printf("F16toF32"); return;
+      case Iop_F32toF16: vex_printf("F32toF16"); return;
+
+      case Iop_QAdd32S: vex_printf("QAdd32S"); return;
+      case Iop_QSub32S: vex_printf("QSub32S"); return; 
+      case Iop_Add16x2:   vex_printf("Add16x2"); return;
+      case Iop_Sub16x2:   vex_printf("Sub16x2"); return;
+      case Iop_QAdd16Sx2: vex_printf("QAdd16Sx2"); return;
+      case Iop_QAdd16Ux2: vex_printf("QAdd16Ux2"); return;
+      case Iop_QSub16Sx2: vex_printf("QSub16Sx2"); return;
+      case Iop_QSub16Ux2: vex_printf("QSub16Ux2"); return;
+      case Iop_HAdd16Ux2: vex_printf("HAdd16Ux2"); return;
+      case Iop_HAdd16Sx2: vex_printf("HAdd16Sx2"); return;
+      case Iop_HSub16Ux2: vex_printf("HSub16Ux2"); return;
+      case Iop_HSub16Sx2: vex_printf("HSub16Sx2"); return;
+
+      case Iop_Add8x4:   vex_printf("Add8x4"); return;
+      case Iop_Sub8x4:   vex_printf("Sub8x4"); return;
+      case Iop_QAdd8Sx4: vex_printf("QAdd8Sx4"); return;
+      case Iop_QAdd8Ux4: vex_printf("QAdd8Ux4"); return;
+      case Iop_QSub8Sx4: vex_printf("QSub8Sx4"); return;
+      case Iop_QSub8Ux4: vex_printf("QSub8Ux4"); return;
+      case Iop_HAdd8Ux4: vex_printf("HAdd8Ux4"); return;
+      case Iop_HAdd8Sx4: vex_printf("HAdd8Sx4"); return;
+      case Iop_HSub8Ux4: vex_printf("HSub8Ux4"); return;
+      case Iop_HSub8Sx4: vex_printf("HSub8Sx4"); return;
+      case Iop_Sad8Ux4:  vex_printf("Sad8Ux4"); return;
+
+      case Iop_CmpNEZ16x2: vex_printf("CmpNEZ16x2"); return;
+      case Iop_CmpNEZ8x4:  vex_printf("CmpNEZ8x4"); return;
+
+      case Iop_CmpF64:    vex_printf("CmpF64"); return;
+
+      case Iop_F64toI16S: vex_printf("F64toI16S"); return;
+      case Iop_F64toI32S: vex_printf("F64toI32S"); return;
+      case Iop_F64toI64S: vex_printf("F64toI64S"); return;
+      case Iop_F64toI64U: vex_printf("F64toI64U"); return;
+      case Iop_F32toI32U: vex_printf("F32toI32U");  return;
+      case Iop_F32toI64U: vex_printf("F32toI64U");  return;
+
+      case Iop_F64toI32U: vex_printf("F64toI32U"); return;
+
+      case Iop_I32StoF64: vex_printf("I32StoF64"); return;
+      case Iop_I64StoF64: vex_printf("I64StoF64"); return;
+      case Iop_I64UtoF64: vex_printf("I64UtoF64"); return;
+      case Iop_I32UtoF32: vex_printf("I32UtoF32"); return;
+      case Iop_I64UtoF32: vex_printf("I64UtoF32"); return;
+
+      case Iop_I32UtoF64: vex_printf("I32UtoF64"); return;
+
+      case Iop_F32toF64: vex_printf("F32toF64"); return;
+      case Iop_F64toF32: vex_printf("F64toF32"); return;
+
+      case Iop_RoundF64toInt: vex_printf("RoundF64toInt"); return;
+      case Iop_RoundF32toInt: vex_printf("RoundF32toInt"); return;
+      case Iop_RoundF64toF32: vex_printf("RoundF64toF32"); return;
+
+      case Iop_ReinterpF64asI64: vex_printf("ReinterpF64asI64"); return;
+      case Iop_ReinterpI64asF64: vex_printf("ReinterpI64asF64"); return;
+      case Iop_ReinterpF32asI32: vex_printf("ReinterpF32asI32"); return;
+      case Iop_ReinterpI32asF32: vex_printf("ReinterpI32asF32"); return;
+
+      case Iop_I32UtoFx4: vex_printf("I32UtoFx4"); return;
+      case Iop_I32StoFx4: vex_printf("I32StoFx4"); return;
+
+      case Iop_F32toF16x4: vex_printf("F32toF16x4"); return;
+      case Iop_F16toF32x4: vex_printf("F16toF32x4"); return;
+
+      case Iop_RSqrtEst32Fx4: vex_printf("RSqrtEst32Fx4"); return;
+      case Iop_RSqrtEst32Ux4: vex_printf("RSqrtEst32Ux4"); return;
+      case Iop_RSqrtEst32Fx2: vex_printf("RSqrtEst32Fx2"); return;
+      case Iop_RSqrtEst32Ux2: vex_printf("RSqrtEst32Ux2"); return;
+
+      case Iop_QFtoI32Ux4_RZ: vex_printf("QFtoI32Ux4_RZ"); return;
+      case Iop_QFtoI32Sx4_RZ: vex_printf("QFtoI32Sx4_RZ"); return;
+
+      case Iop_FtoI32Ux4_RZ: vex_printf("FtoI32Ux4_RZ"); return;
+      case Iop_FtoI32Sx4_RZ: vex_printf("FtoI32Sx4_RZ"); return;
+
+      case Iop_I32UtoFx2: vex_printf("I32UtoFx2"); return;
+      case Iop_I32StoFx2: vex_printf("I32StoFx2"); return;
+
+      case Iop_FtoI32Ux2_RZ: vex_printf("FtoI32Ux2_RZ"); return;
+      case Iop_FtoI32Sx2_RZ: vex_printf("FtoI32Sx2_RZ"); return;
+
+      case Iop_RoundF32x4_RM: vex_printf("RoundF32x4_RM"); return;
+      case Iop_RoundF32x4_RP: vex_printf("RoundF32x4_RP"); return;
+      case Iop_RoundF32x4_RN: vex_printf("RoundF32x4_RN"); return;
+      case Iop_RoundF32x4_RZ: vex_printf("RoundF32x4_RZ"); return;
+
+      case Iop_Abs8x8: vex_printf("Abs8x8"); return;
+      case Iop_Abs16x4: vex_printf("Abs16x4"); return;
+      case Iop_Abs32x2: vex_printf("Abs32x2"); return;
+      case Iop_Add8x8: vex_printf("Add8x8"); return;
+      case Iop_Add16x4: vex_printf("Add16x4"); return;
+      case Iop_Add32x2: vex_printf("Add32x2"); return;
+      case Iop_QAdd8Ux8: vex_printf("QAdd8Ux8"); return;
+      case Iop_QAdd16Ux4: vex_printf("QAdd16Ux4"); return;
+      case Iop_QAdd32Ux2: vex_printf("QAdd32Ux2"); return;
+      case Iop_QAdd64Ux1: vex_printf("QAdd64Ux1"); return;
+      case Iop_QAdd8Sx8: vex_printf("QAdd8Sx8"); return;
+      case Iop_QAdd16Sx4: vex_printf("QAdd16Sx4"); return;
+      case Iop_QAdd32Sx2: vex_printf("QAdd32Sx2"); return;
+      case Iop_QAdd64Sx1: vex_printf("QAdd64Sx1"); return;
+      case Iop_PwAdd8x8: vex_printf("PwAdd8x8"); return;
+      case Iop_PwAdd16x4: vex_printf("PwAdd16x4"); return;
+      case Iop_PwAdd32x2: vex_printf("PwAdd32x2"); return;
+      case Iop_PwAdd32Fx2: vex_printf("PwAdd32Fx2"); return;
+      case Iop_PwAddL8Ux8: vex_printf("PwAddL8Ux8"); return;
+      case Iop_PwAddL16Ux4: vex_printf("PwAddL16Ux4"); return;
+      case Iop_PwAddL32Ux2: vex_printf("PwAddL32Ux2"); return;
+      case Iop_PwAddL8Sx8: vex_printf("PwAddL8Sx8"); return;
+      case Iop_PwAddL16Sx4: vex_printf("PwAddL16Sx4"); return;
+      case Iop_PwAddL32Sx2: vex_printf("PwAddL32Sx2"); return;
+      case Iop_Sub8x8: vex_printf("Sub8x8"); return;
+      case Iop_Sub16x4: vex_printf("Sub16x4"); return;
+      case Iop_Sub32x2: vex_printf("Sub32x2"); return;
+      case Iop_QSub8Ux8: vex_printf("QSub8Ux8"); return;
+      case Iop_QSub16Ux4: vex_printf("QSub16Ux4"); return;
+      case Iop_QSub32Ux2: vex_printf("QSub32Ux2"); return;
+      case Iop_QSub64Ux1: vex_printf("QSub64Ux1"); return;
+      case Iop_QSub8Sx8: vex_printf("QSub8Sx8"); return;
+      case Iop_QSub16Sx4: vex_printf("QSub16Sx4"); return;
+      case Iop_QSub32Sx2: vex_printf("QSub32Sx2"); return;
+      case Iop_QSub64Sx1: vex_printf("QSub64Sx1"); return;
+      case Iop_Mul8x8: vex_printf("Mul8x8"); return;
+      case Iop_Mul16x4: vex_printf("Mul16x4"); return;
+      case Iop_Mul32x2: vex_printf("Mul32x2"); return;
+      case Iop_Mul32Fx2: vex_printf("Mul32Fx2"); return;
+      case Iop_PolynomialMul8x8: vex_printf("PolynomialMul8x8"); return;
+      case Iop_MulHi16Ux4: vex_printf("MulHi16Ux4"); return;
+      case Iop_MulHi16Sx4: vex_printf("MulHi16Sx4"); return;
+      case Iop_QDMulHi16Sx4: vex_printf("QDMulHi16Sx4"); return;
+      case Iop_QDMulHi32Sx2: vex_printf("QDMulHi32Sx2"); return;
+      case Iop_QRDMulHi16Sx4: vex_printf("QRDMulHi16Sx4"); return;
+      case Iop_QRDMulHi32Sx2: vex_printf("QRDMulHi32Sx2"); return;
+      case Iop_QDMull16Sx4: vex_printf("QDMull16Sx4"); return;
+      case Iop_QDMull32Sx2: vex_printf("QDMull32Sx2"); return;
+      case Iop_Avg8Ux8: vex_printf("Avg8Ux8"); return;
+      case Iop_Avg16Ux4: vex_printf("Avg16Ux4"); return;
+      case Iop_Max8Sx8: vex_printf("Max8Sx8"); return;
+      case Iop_Max16Sx4: vex_printf("Max16Sx4"); return;
+      case Iop_Max32Sx2: vex_printf("Max32Sx2"); return;
+      case Iop_Max8Ux8: vex_printf("Max8Ux8"); return;
+      case Iop_Max16Ux4: vex_printf("Max16Ux4"); return;
+      case Iop_Max32Ux2: vex_printf("Max32Ux2"); return;
+      case Iop_Min8Sx8: vex_printf("Min8Sx8"); return;
+      case Iop_Min16Sx4: vex_printf("Min16Sx4"); return;
+      case Iop_Min32Sx2: vex_printf("Min32Sx2"); return;
+      case Iop_Min8Ux8: vex_printf("Min8Ux8"); return;
+      case Iop_Min16Ux4: vex_printf("Min16Ux4"); return;
+      case Iop_Min32Ux2: vex_printf("Min32Ux2"); return;
+      case Iop_PwMax8Sx8: vex_printf("PwMax8Sx8"); return;
+      case Iop_PwMax16Sx4: vex_printf("PwMax16Sx4"); return;
+      case Iop_PwMax32Sx2: vex_printf("PwMax32Sx2"); return;
+      case Iop_PwMax8Ux8: vex_printf("PwMax8Ux8"); return;
+      case Iop_PwMax16Ux4: vex_printf("PwMax16Ux4"); return;
+      case Iop_PwMax32Ux2: vex_printf("PwMax32Ux2"); return;
+      case Iop_PwMin8Sx8: vex_printf("PwMin8Sx8"); return;
+      case Iop_PwMin16Sx4: vex_printf("PwMin16Sx4"); return;
+      case Iop_PwMin32Sx2: vex_printf("PwMin32Sx2"); return;
+      case Iop_PwMin8Ux8: vex_printf("PwMin8Ux8"); return;
+      case Iop_PwMin16Ux4: vex_printf("PwMin16Ux4"); return;
+      case Iop_PwMin32Ux2: vex_printf("PwMin32Ux2"); return;
+      case Iop_CmpEQ8x8: vex_printf("CmpEQ8x8"); return;
+      case Iop_CmpEQ16x4: vex_printf("CmpEQ16x4"); return;
+      case Iop_CmpEQ32x2: vex_printf("CmpEQ32x2"); return;
+      case Iop_CmpGT8Ux8: vex_printf("CmpGT8Ux8"); return;
+      case Iop_CmpGT16Ux4: vex_printf("CmpGT16Ux4"); return;
+      case Iop_CmpGT32Ux2: vex_printf("CmpGT32Ux2"); return;
+      case Iop_CmpGT8Sx8: vex_printf("CmpGT8Sx8"); return;
+      case Iop_CmpGT16Sx4: vex_printf("CmpGT16Sx4"); return;
+      case Iop_CmpGT32Sx2: vex_printf("CmpGT32Sx2"); return;
+      case Iop_Cnt8x8: vex_printf("Cnt8x8"); return;
+      case Iop_Clz8x8: vex_printf("Clz8x8"); return;
+      case Iop_Clz16x4: vex_printf("Clz16x4"); return;
+      case Iop_Clz32x2: vex_printf("Clz32x2"); return;
+      case Iop_Cls8x8: vex_printf("Cls8x8"); return;
+      case Iop_Cls16x4: vex_printf("Cls16x4"); return;
+      case Iop_Cls32x2: vex_printf("Cls32x2"); return;
+      case Iop_ShlN8x8: vex_printf("ShlN8x8"); return;
+      case Iop_ShlN16x4: vex_printf("ShlN16x4"); return;
+      case Iop_ShlN32x2: vex_printf("ShlN32x2"); return;
+      case Iop_ShrN8x8: vex_printf("ShrN8x8"); return;
+      case Iop_ShrN16x4: vex_printf("ShrN16x4"); return;
+      case Iop_ShrN32x2: vex_printf("ShrN32x2"); return;
+      case Iop_SarN8x8: vex_printf("SarN8x8"); return;
+      case Iop_SarN16x4: vex_printf("SarN16x4"); return;
+      case Iop_SarN32x2: vex_printf("SarN32x2"); return;
+      case Iop_QNarrowBin16Sto8Ux8: vex_printf("QNarrowBin16Sto8Ux8"); return;
+      case Iop_QNarrowBin16Sto8Sx8: vex_printf("QNarrowBin16Sto8Sx8"); return;
+      case Iop_QNarrowBin32Sto16Sx4: vex_printf("QNarrowBin32Sto16Sx4"); return;
+      case Iop_QNarrowBin64Sto32Sx4: vex_printf("QNarrowBin64Sto32Sx4"); return;
+      case Iop_QNarrowBin64Uto32Ux4: vex_printf("QNarrowBin64Uto32Ux4"); return;
+      case Iop_NarrowBin16to8x8: vex_printf("NarrowBin16to8x8"); return;
+      case Iop_NarrowBin32to16x4: vex_printf("NarrowBin32to16x4"); return;
+      case Iop_NarrowBin64to32x4: vex_printf("NarrowBin64to32x4"); return;
+      case Iop_InterleaveHI8x8: vex_printf("InterleaveHI8x8"); return;
+      case Iop_InterleaveHI16x4: vex_printf("InterleaveHI16x4"); return;
+      case Iop_InterleaveHI32x2: vex_printf("InterleaveHI32x2"); return;
+      case Iop_InterleaveLO8x8: vex_printf("InterleaveLO8x8"); return;
+      case Iop_InterleaveLO16x4: vex_printf("InterleaveLO16x4"); return;
+      case Iop_InterleaveLO32x2: vex_printf("InterleaveLO32x2"); return;
+      case Iop_CatOddLanes8x8: vex_printf("CatOddLanes8x8"); return;
+      case Iop_CatOddLanes16x4: vex_printf("CatOddLanes16x4"); return;
+      case Iop_CatEvenLanes8x8: vex_printf("CatEvenLanes8x8"); return;
+      case Iop_CatEvenLanes16x4: vex_printf("CatEvenLanes16x4"); return;
+      case Iop_InterleaveOddLanes8x8: vex_printf("InterleaveOddLanes8x8"); return;
+      case Iop_InterleaveOddLanes16x4: vex_printf("InterleaveOddLanes16x4"); return;
+      case Iop_InterleaveEvenLanes8x8: vex_printf("InterleaveEvenLanes8x8"); return;
+      case Iop_InterleaveEvenLanes16x4: vex_printf("InterleaveEvenLanes16x4"); return;
+      case Iop_Shl8x8: vex_printf("Shl8x8"); return;
+      case Iop_Shl16x4: vex_printf("Shl16x4"); return;
+      case Iop_Shl32x2: vex_printf("Shl32x2"); return;
+      case Iop_Shr8x8: vex_printf("Shr8x8"); return;
+      case Iop_Shr16x4: vex_printf("Shr16x4"); return;
+      case Iop_Shr32x2: vex_printf("Shr32x2"); return;
+      case Iop_QShl8x8: vex_printf("QShl8x8"); return;
+      case Iop_QShl16x4: vex_printf("QShl16x4"); return;
+      case Iop_QShl32x2: vex_printf("QShl32x2"); return;
+      case Iop_QShl64x1: vex_printf("QShl64x1"); return;
+      case Iop_QSal8x8: vex_printf("QSal8x8"); return;
+      case Iop_QSal16x4: vex_printf("QSal16x4"); return;
+      case Iop_QSal32x2: vex_printf("QSal32x2"); return;
+      case Iop_QSal64x1: vex_printf("QSal64x1"); return;
+      case Iop_QShlNsatUU8x8: vex_printf("QShlNsatUU8x8"); return;
+      case Iop_QShlNsatUU16x4: vex_printf("QShlNsatUU16x4"); return;
+      case Iop_QShlNsatUU32x2: vex_printf("QShlNsatUU32x2"); return;
+      case Iop_QShlNsatUU64x1: vex_printf("QShlNsatUU64x1"); return;
+      case Iop_QShlNsatSU8x8: vex_printf("QShlNsatSU8x8"); return;
+      case Iop_QShlNsatSU16x4: vex_printf("QShlNsatSU16x4"); return;
+      case Iop_QShlNsatSU32x2: vex_printf("QShlNsatSU32x2"); return;
+      case Iop_QShlNsatSU64x1: vex_printf("QShlNsatSU64x1"); return;
+      case Iop_QShlNsatSS8x8: vex_printf("QShlNsatSS8x8"); return;
+      case Iop_QShlNsatSS16x4: vex_printf("QShlNsatSS16x4"); return;
+      case Iop_QShlNsatSS32x2: vex_printf("QShlNsatSS32x2"); return;
+      case Iop_QShlNsatSS64x1: vex_printf("QShlNsatSS64x1"); return;
+      case Iop_Sar8x8: vex_printf("Sar8x8"); return;
+      case Iop_Sar16x4: vex_printf("Sar16x4"); return;
+      case Iop_Sar32x2: vex_printf("Sar32x2"); return;
+      case Iop_Sal8x8: vex_printf("Sal8x8"); return;
+      case Iop_Sal16x4: vex_printf("Sal16x4"); return;
+      case Iop_Sal32x2: vex_printf("Sal32x2"); return;
+      case Iop_Sal64x1: vex_printf("Sal64x1"); return;
+      case Iop_Perm8x8: vex_printf("Perm8x8"); return;
+      case Iop_Reverse8sIn16_x4: vex_printf("Reverse8sIn16_x4"); return;
+      case Iop_Reverse8sIn32_x2: vex_printf("Reverse8sIn32_x2"); return;
+      case Iop_Reverse16sIn32_x2: vex_printf("Reverse16sIn32_x2"); return;
+      case Iop_Reverse8sIn64_x1: vex_printf("Reverse8sIn64_x1"); return;
+      case Iop_Reverse16sIn64_x1: vex_printf("Reverse16sIn64_x1"); return;
+      case Iop_Reverse32sIn64_x1: vex_printf("Reverse32sIn64_x1"); return;
+      case Iop_Abs32Fx2: vex_printf("Abs32Fx2"); return;
+      case Iop_GetMSBs8x8: vex_printf("GetMSBs8x8"); return;
+      case Iop_GetMSBs8x16: vex_printf("GetMSBs8x16"); return;
+
+      case Iop_CmpNEZ32x2: vex_printf("CmpNEZ32x2"); return;
+      case Iop_CmpNEZ16x4: vex_printf("CmpNEZ16x4"); return;
+      case Iop_CmpNEZ8x8:  vex_printf("CmpNEZ8x8"); return;
+
+      case Iop_Add32Fx4:  vex_printf("Add32Fx4"); return;
+      case Iop_Add32Fx2:  vex_printf("Add32Fx2"); return;
+      case Iop_Add32F0x4: vex_printf("Add32F0x4"); return;
+      case Iop_Add64Fx2:  vex_printf("Add64Fx2"); return;
+      case Iop_Add64F0x2: vex_printf("Add64F0x2"); return;
+
+      case Iop_Div32Fx4:  vex_printf("Div32Fx4"); return;
+      case Iop_Div32F0x4: vex_printf("Div32F0x4"); return;
+      case Iop_Div64Fx2:  vex_printf("Div64Fx2"); return;
+      case Iop_Div64F0x2: vex_printf("Div64F0x2"); return;
+
+      case Iop_Max32Fx8:  vex_printf("Max32Fx8"); return;
+      case Iop_Max32Fx4:  vex_printf("Max32Fx4"); return;
+      case Iop_Max32Fx2:  vex_printf("Max32Fx2"); return;
+      case Iop_PwMax32Fx4:  vex_printf("PwMax32Fx4"); return;
+      case Iop_PwMax32Fx2:  vex_printf("PwMax32Fx2"); return;
+      case Iop_Max32F0x4: vex_printf("Max32F0x4"); return;
+      case Iop_Max64Fx4:  vex_printf("Max64Fx4"); return;
+      case Iop_Max64Fx2:  vex_printf("Max64Fx2"); return;
+      case Iop_Max64F0x2: vex_printf("Max64F0x2"); return;
+
+      case Iop_Min32Fx8:  vex_printf("Min32Fx8"); return;
+      case Iop_Min32Fx4:  vex_printf("Min32Fx4"); return;
+      case Iop_Min32Fx2:  vex_printf("Min32Fx2"); return;
+      case Iop_PwMin32Fx4:  vex_printf("PwMin32Fx4"); return;
+      case Iop_PwMin32Fx2:  vex_printf("PwMin32Fx2"); return;
+      case Iop_Min32F0x4: vex_printf("Min32F0x4"); return;
+      case Iop_Min64Fx4:  vex_printf("Min64Fx4"); return;
+      case Iop_Min64Fx2:  vex_printf("Min64Fx2"); return;
+      case Iop_Min64F0x2: vex_printf("Min64F0x2"); return;
+
+      case Iop_Mul32Fx4:  vex_printf("Mul32Fx4"); return;
+      case Iop_Mul32F0x4: vex_printf("Mul32F0x4"); return;
+      case Iop_Mul64Fx2:  vex_printf("Mul64Fx2"); return;
+      case Iop_Mul64F0x2: vex_printf("Mul64F0x2"); return;
+
+      case Iop_RecipEst32Ux2: vex_printf("RecipEst32Ux2"); return;
+      case Iop_RecipEst32Fx2: vex_printf("RecipEst32Fx2"); return;
+      case Iop_RecipEst32Fx4: vex_printf("RecipEst32Fx4"); return;
+      case Iop_RecipEst32Fx8: vex_printf("RecipEst32Fx8"); return;
+      case Iop_RecipEst32Ux4: vex_printf("RecipEst32Ux4"); return;
+      case Iop_RecipEst32F0x4: vex_printf("RecipEst32F0x4"); return;
+      case Iop_RecipStep32Fx2: vex_printf("RecipStep32Fx2"); return;
+      case Iop_RecipStep32Fx4: vex_printf("RecipStep32Fx4"); return;
+      case Iop_RecipEst64Fx2: vex_printf("RecipEst64Fx2"); return;
+      case Iop_RecipStep64Fx2: vex_printf("RecipStep64Fx2"); return;
+
+      case Iop_Abs32Fx4:  vex_printf("Abs32Fx4"); return;
+      case Iop_Abs64Fx2:  vex_printf("Abs64Fx2"); return;
+      case Iop_RSqrtStep32Fx4:  vex_printf("RSqrtStep32Fx4"); return;
+      case Iop_RSqrtStep64Fx2:  vex_printf("RSqrtStep64Fx2"); return;
+      case Iop_RSqrtStep32Fx2:  vex_printf("RSqrtStep32Fx2"); return;
+      case Iop_RSqrtEst64Fx2: vex_printf("RSqrtEst64Fx2"); return;
+
+      case Iop_RSqrtEst32F0x4: vex_printf("RSqrtEst32F0x4"); return;
+      case Iop_RSqrtEst32Fx8: vex_printf("RSqrtEst32Fx8"); return;
+
+      case Iop_Sqrt32Fx4:  vex_printf("Sqrt32Fx4"); return;
+      case Iop_Sqrt32F0x4: vex_printf("Sqrt32F0x4"); return;
+      case Iop_Sqrt64Fx2:  vex_printf("Sqrt64Fx2"); return;
+      case Iop_Sqrt64F0x2: vex_printf("Sqrt64F0x2"); return;
+      case Iop_Sqrt32Fx8:  vex_printf("Sqrt32Fx8"); return;
+      case Iop_Sqrt64Fx4:  vex_printf("Sqrt64Fx4"); return;
+ 
+      case Iop_Sub32Fx4:  vex_printf("Sub32Fx4"); return;
+      case Iop_Sub32Fx2:  vex_printf("Sub32Fx2"); return;
+      case Iop_Sub32F0x4: vex_printf("Sub32F0x4"); return;
+      case Iop_Sub64Fx2:  vex_printf("Sub64Fx2"); return;
+      case Iop_Sub64F0x2: vex_printf("Sub64F0x2"); return;
+
+      case Iop_CmpEQ32Fx4: vex_printf("CmpEQ32Fx4"); return;
+      case Iop_CmpLT32Fx4: vex_printf("CmpLT32Fx4"); return;
+      case Iop_CmpLE32Fx4: vex_printf("CmpLE32Fx4"); return;
+      case Iop_CmpGT32Fx4: vex_printf("CmpGT32Fx4"); return;
+      case Iop_CmpGE32Fx4: vex_printf("CmpGE32Fx4"); return;
+      case Iop_CmpUN32Fx4: vex_printf("CmpUN32Fx4"); return;
+      case Iop_CmpEQ64Fx2: vex_printf("CmpEQ64Fx2"); return;
+      case Iop_CmpLT64Fx2: vex_printf("CmpLT64Fx2"); return;
+      case Iop_CmpLE64Fx2: vex_printf("CmpLE64Fx2"); return;
+      case Iop_CmpUN64Fx2: vex_printf("CmpUN64Fx2"); return;
+      case Iop_CmpGT32Fx2: vex_printf("CmpGT32Fx2"); return;
+      case Iop_CmpEQ32Fx2: vex_printf("CmpEQ32Fx2"); return;
+      case Iop_CmpGE32Fx2: vex_printf("CmpGE32Fx2"); return;
+
+      case Iop_CmpEQ32F0x4: vex_printf("CmpEQ32F0x4"); return;
+      case Iop_CmpLT32F0x4: vex_printf("CmpLT32F0x4"); return;
+      case Iop_CmpLE32F0x4: vex_printf("CmpLE32F0x4"); return;
+      case Iop_CmpUN32F0x4: vex_printf("CmpUN32F0x4"); return;
+      case Iop_CmpEQ64F0x2: vex_printf("CmpEQ64F0x2"); return;
+      case Iop_CmpLT64F0x2: vex_printf("CmpLT64F0x2"); return;
+      case Iop_CmpLE64F0x2: vex_printf("CmpLE64F0x2"); return;
+      case Iop_CmpUN64F0x2: vex_printf("CmpUN64F0x2"); return;
+
+      case Iop_Neg64Fx2: vex_printf("Neg64Fx2"); return;
+      case Iop_Neg32Fx4: vex_printf("Neg32Fx4"); return;
+      case Iop_Neg32Fx2: vex_printf("Neg32Fx2"); return;
+
+      case Iop_V128to64:   vex_printf("V128to64");   return;
+      case Iop_V128HIto64: vex_printf("V128HIto64"); return;
+      case Iop_64HLtoV128: vex_printf("64HLtoV128"); return;
+
+      case Iop_64UtoV128:   vex_printf("64UtoV128"); return;
+      case Iop_SetV128lo64: vex_printf("SetV128lo64"); return;
+
+      case Iop_ZeroHI64ofV128:  vex_printf("ZeroHI64ofV128"); return;
+      case Iop_ZeroHI96ofV128:  vex_printf("ZeroHI96ofV128"); return;
+      case Iop_ZeroHI112ofV128: vex_printf("ZeroHI112ofV128"); return;
+      case Iop_ZeroHI120ofV128: vex_printf("ZeroHI120ofV128"); return;
+
+      case Iop_32UtoV128:   vex_printf("32UtoV128"); return;
+      case Iop_V128to32:    vex_printf("V128to32"); return;
+      case Iop_SetV128lo32: vex_printf("SetV128lo32"); return;
+
+      case Iop_Dup8x16: vex_printf("Dup8x16"); return;
+      case Iop_Dup16x8: vex_printf("Dup16x8"); return;
+      case Iop_Dup32x4: vex_printf("Dup32x4"); return;
+      case Iop_Dup8x8: vex_printf("Dup8x8"); return;
+      case Iop_Dup16x4: vex_printf("Dup16x4"); return;
+      case Iop_Dup32x2: vex_printf("Dup32x2"); return;
+
+      case Iop_NotV128:    vex_printf("NotV128"); return;
+      case Iop_AndV128:    vex_printf("AndV128"); return;
+      case Iop_OrV128:     vex_printf("OrV128");  return;
+      case Iop_XorV128:    vex_printf("XorV128"); return;
+
+      case Iop_CmpNEZ8x16: vex_printf("CmpNEZ8x16"); return;
+      case Iop_CmpNEZ16x8: vex_printf("CmpNEZ16x8"); return;
+      case Iop_CmpNEZ32x4: vex_printf("CmpNEZ32x4"); return;
+      case Iop_CmpNEZ64x2: vex_printf("CmpNEZ64x2"); return;
+
+      case Iop_Abs8x16: vex_printf("Abs8x16"); return;
+      case Iop_Abs16x8: vex_printf("Abs16x8"); return;
+      case Iop_Abs32x4: vex_printf("Abs32x4"); return;
+      case Iop_Abs64x2: vex_printf("Abs64x2"); return;
+
+      case Iop_Add8x16:   vex_printf("Add8x16"); return;
+      case Iop_Add16x8:   vex_printf("Add16x8"); return;
+      case Iop_Add32x4:   vex_printf("Add32x4"); return;
+      case Iop_Add64x2:   vex_printf("Add64x2"); return;
+      case Iop_QAdd8Ux16: vex_printf("QAdd8Ux16"); return;
+      case Iop_QAdd16Ux8: vex_printf("QAdd16Ux8"); return;
+      case Iop_QAdd32Ux4: vex_printf("QAdd32Ux4"); return;
+      case Iop_QAdd8Sx16: vex_printf("QAdd8Sx16"); return;
+      case Iop_QAdd16Sx8: vex_printf("QAdd16Sx8"); return;
+      case Iop_QAdd32Sx4: vex_printf("QAdd32Sx4"); return;
+      case Iop_QAdd64Ux2: vex_printf("QAdd64Ux2"); return;
+      case Iop_QAdd64Sx2: vex_printf("QAdd64Sx2"); return;
+
+      case Iop_QAddExtUSsatSS8x16: vex_printf("QAddExtUSsatSS8x16"); return;
+      case Iop_QAddExtUSsatSS16x8: vex_printf("QAddExtUSsatSS16x8"); return;
+      case Iop_QAddExtUSsatSS32x4: vex_printf("QAddExtUSsatSS32x4"); return;
+      case Iop_QAddExtUSsatSS64x2: vex_printf("QAddExtUSsatSS64x2"); return;
+      case Iop_QAddExtSUsatUU8x16: vex_printf("QAddExtSUsatUU8x16"); return;
+      case Iop_QAddExtSUsatUU16x8: vex_printf("QAddExtSUsatUU16x8"); return;
+      case Iop_QAddExtSUsatUU32x4: vex_printf("QAddExtSUsatUU32x4"); return;
+      case Iop_QAddExtSUsatUU64x2: vex_printf("QAddExtSUsatUU64x2"); return;
+
+      case Iop_PwAdd8x16: vex_printf("PwAdd8x16"); return;
+      case Iop_PwAdd16x8: vex_printf("PwAdd16x8"); return;
+      case Iop_PwAdd32x4: vex_printf("PwAdd32x4"); return;
+      case Iop_PwAddL8Ux16: vex_printf("PwAddL8Ux16"); return;
+      case Iop_PwAddL16Ux8: vex_printf("PwAddL16Ux8"); return;
+      case Iop_PwAddL32Ux4: vex_printf("PwAddL32Ux4"); return;
+      case Iop_PwAddL8Sx16: vex_printf("PwAddL8Sx16"); return;
+      case Iop_PwAddL16Sx8: vex_printf("PwAddL16Sx8"); return;
+      case Iop_PwAddL32Sx4: vex_printf("PwAddL32Sx4"); return;
+
+      case Iop_Sub8x16:   vex_printf("Sub8x16"); return;
+      case Iop_Sub16x8:   vex_printf("Sub16x8"); return;
+      case Iop_Sub32x4:   vex_printf("Sub32x4"); return;
+      case Iop_Sub64x2:   vex_printf("Sub64x2"); return;
+      case Iop_QSub8Ux16: vex_printf("QSub8Ux16"); return;
+      case Iop_QSub16Ux8: vex_printf("QSub16Ux8"); return;
+      case Iop_QSub32Ux4: vex_printf("QSub32Ux4"); return;
+      case Iop_QSub8Sx16: vex_printf("QSub8Sx16"); return;
+      case Iop_QSub16Sx8: vex_printf("QSub16Sx8"); return;
+      case Iop_QSub32Sx4: vex_printf("QSub32Sx4"); return;
+      case Iop_QSub64Ux2: vex_printf("QSub64Ux2"); return;
+      case Iop_QSub64Sx2: vex_printf("QSub64Sx2"); return;
+
+      case Iop_Mul8x16:    vex_printf("Mul8x16"); return;
+      case Iop_Mul16x8:    vex_printf("Mul16x8"); return;
+      case Iop_Mul32x4:    vex_printf("Mul32x4"); return;
+      case Iop_Mull8Ux8:    vex_printf("Mull8Ux8"); return;
+      case Iop_Mull8Sx8:    vex_printf("Mull8Sx8"); return;
+      case Iop_Mull16Ux4:    vex_printf("Mull16Ux4"); return;
+      case Iop_Mull16Sx4:    vex_printf("Mull16Sx4"); return;
+      case Iop_Mull32Ux2:    vex_printf("Mull32Ux2"); return;
+      case Iop_Mull32Sx2:    vex_printf("Mull32Sx2"); return;
+      case Iop_PolynomialMul8x16: vex_printf("PolynomialMul8x16"); return;
+      case Iop_PolynomialMull8x8: vex_printf("PolynomialMull8x8"); return;
+      case Iop_MulHi16Ux8: vex_printf("MulHi16Ux8"); return;
+      case Iop_MulHi32Ux4: vex_printf("MulHi32Ux4"); return;
+      case Iop_MulHi16Sx8: vex_printf("MulHi16Sx8"); return;
+      case Iop_MulHi32Sx4: vex_printf("MulHi32Sx4"); return;
+      case Iop_QDMulHi16Sx8: vex_printf("QDMulHi16Sx8"); return;
+      case Iop_QDMulHi32Sx4: vex_printf("QDMulHi32Sx4"); return;
+      case Iop_QRDMulHi16Sx8: vex_printf("QRDMulHi16Sx8"); return;
+      case Iop_QRDMulHi32Sx4: vex_printf("QRDMulHi32Sx4"); return;
+
+      case Iop_MullEven8Ux16: vex_printf("MullEven8Ux16"); return;
+      case Iop_MullEven16Ux8: vex_printf("MullEven16Ux8"); return;
+      case Iop_MullEven32Ux4: vex_printf("MullEven32Ux4"); return;
+      case Iop_MullEven8Sx16: vex_printf("MullEven8Sx16"); return;
+      case Iop_MullEven16Sx8: vex_printf("MullEven16Sx8"); return;
+      case Iop_MullEven32Sx4: vex_printf("MullEven32Sx4"); return;
+
+      case Iop_PolynomialMulAdd8x16:
+         vex_printf("PolynomialMulAdd8x16"); return;
+      case Iop_PolynomialMulAdd16x8:
+         vex_printf("PolynomialMulAdd16x8"); return;
+      case Iop_PolynomialMulAdd32x4:
+         vex_printf("PolynomialMulAdd32x4"); return;
+      case Iop_PolynomialMulAdd64x2:
+         vex_printf("PolynomialMulAdd64x2"); return;
+
+      case Iop_Avg8Ux16: vex_printf("Avg8Ux16"); return;
+      case Iop_Avg16Ux8: vex_printf("Avg16Ux8"); return;
+      case Iop_Avg32Ux4: vex_printf("Avg32Ux4"); return;
+      case Iop_Avg8Sx16: vex_printf("Avg8Sx16"); return;
+      case Iop_Avg16Sx8: vex_printf("Avg16Sx8"); return;
+      case Iop_Avg32Sx4: vex_printf("Avg32Sx4"); return;
+
+      case Iop_Max8Sx16: vex_printf("Max8Sx16"); return;
+      case Iop_Max16Sx8: vex_printf("Max16Sx8"); return;
+      case Iop_Max32Sx4: vex_printf("Max32Sx4"); return;
+      case Iop_Max64Sx2: vex_printf("Max64Sx2"); return;
+      case Iop_Max8Ux16: vex_printf("Max8Ux16"); return;
+      case Iop_Max16Ux8: vex_printf("Max16Ux8"); return;
+      case Iop_Max32Ux4: vex_printf("Max32Ux4"); return;
+      case Iop_Max64Ux2: vex_printf("Max64Ux2"); return;
+
+      case Iop_Min8Sx16: vex_printf("Min8Sx16"); return;
+      case Iop_Min16Sx8: vex_printf("Min16Sx8"); return;
+      case Iop_Min32Sx4: vex_printf("Min32Sx4"); return;
+      case Iop_Min64Sx2: vex_printf("Min64Sx2"); return;
+      case Iop_Min8Ux16: vex_printf("Min8Ux16"); return;
+      case Iop_Min16Ux8: vex_printf("Min16Ux8"); return;
+      case Iop_Min32Ux4: vex_printf("Min32Ux4"); return;
+      case Iop_Min64Ux2: vex_printf("Min64Ux2"); return;
+
+      case Iop_CmpEQ8x16:  vex_printf("CmpEQ8x16"); return;
+      case Iop_CmpEQ16x8:  vex_printf("CmpEQ16x8"); return;
+      case Iop_CmpEQ32x4:  vex_printf("CmpEQ32x4"); return;
+      case Iop_CmpEQ64x2:  vex_printf("CmpEQ64x2"); return;
+      case Iop_CmpGT8Sx16: vex_printf("CmpGT8Sx16"); return;
+      case Iop_CmpGT16Sx8: vex_printf("CmpGT16Sx8"); return;
+      case Iop_CmpGT32Sx4: vex_printf("CmpGT32Sx4"); return;
+      case Iop_CmpGT64Sx2: vex_printf("CmpGT64Sx2"); return;
+      case Iop_CmpGT8Ux16: vex_printf("CmpGT8Ux16"); return;
+      case Iop_CmpGT16Ux8: vex_printf("CmpGT16Ux8"); return;
+      case Iop_CmpGT32Ux4: vex_printf("CmpGT32Ux4"); return;
+      case Iop_CmpGT64Ux2: vex_printf("CmpGT64Ux2"); return;
+
+      case Iop_Cnt8x16: vex_printf("Cnt8x16"); return;
+      case Iop_Clz8x16: vex_printf("Clz8x16"); return;
+      case Iop_Clz16x8: vex_printf("Clz16x8"); return;
+      case Iop_Clz32x4: vex_printf("Clz32x4"); return;
+      case Iop_Clz64x2: vex_printf("Clz64x2"); return;
+      case Iop_Cls8x16: vex_printf("Cls8x16"); return;
+      case Iop_Cls16x8: vex_printf("Cls16x8"); return;
+      case Iop_Cls32x4: vex_printf("Cls32x4"); return;
+
+      case Iop_ShlV128: vex_printf("ShlV128"); return;
+      case Iop_ShrV128: vex_printf("ShrV128"); return;
+
+      case Iop_ShlN8x16: vex_printf("ShlN8x16"); return;
+      case Iop_ShlN16x8: vex_printf("ShlN16x8"); return;
+      case Iop_ShlN32x4: vex_printf("ShlN32x4"); return;
+      case Iop_ShlN64x2: vex_printf("ShlN64x2"); return;
+      case Iop_ShrN8x16: vex_printf("ShrN8x16"); return;
+      case Iop_ShrN16x8: vex_printf("ShrN16x8"); return;
+      case Iop_ShrN32x4: vex_printf("ShrN32x4"); return;
+      case Iop_ShrN64x2: vex_printf("ShrN64x2"); return;
+      case Iop_SarN8x16: vex_printf("SarN8x16"); return;
+      case Iop_SarN16x8: vex_printf("SarN16x8"); return;
+      case Iop_SarN32x4: vex_printf("SarN32x4"); return;
+      case Iop_SarN64x2: vex_printf("SarN64x2"); return;
+
+      case Iop_Shl8x16: vex_printf("Shl8x16"); return;
+      case Iop_Shl16x8: vex_printf("Shl16x8"); return;
+      case Iop_Shl32x4: vex_printf("Shl32x4"); return;
+      case Iop_Shl64x2: vex_printf("Shl64x2"); return;
+      case Iop_QSal8x16: vex_printf("QSal8x16"); return;
+      case Iop_QSal16x8: vex_printf("QSal16x8"); return;
+      case Iop_QSal32x4: vex_printf("QSal32x4"); return;
+      case Iop_QSal64x2: vex_printf("QSal64x2"); return;
+      case Iop_QShl8x16: vex_printf("QShl8x16"); return;
+      case Iop_QShl16x8: vex_printf("QShl16x8"); return;
+      case Iop_QShl32x4: vex_printf("QShl32x4"); return;
+      case Iop_QShl64x2: vex_printf("QShl64x2"); return;
+      case Iop_QShlNsatSS8x16: vex_printf("QShlNsatSS8x16"); return;
+      case Iop_QShlNsatSS16x8: vex_printf("QShlNsatSS16x8"); return;
+      case Iop_QShlNsatSS32x4: vex_printf("QShlNsatSS32x4"); return;
+      case Iop_QShlNsatSS64x2: vex_printf("QShlNsatSS64x2"); return;
+      case Iop_QShlNsatUU8x16: vex_printf("QShlNsatUU8x16"); return;
+      case Iop_QShlNsatUU16x8: vex_printf("QShlNsatUU16x8"); return;
+      case Iop_QShlNsatUU32x4: vex_printf("QShlNsatUU32x4"); return;
+      case Iop_QShlNsatUU64x2: vex_printf("QShlNsatUU64x2"); return;
+      case Iop_QShlNsatSU8x16: vex_printf("QShlNsatSU8x16"); return;
+      case Iop_QShlNsatSU16x8: vex_printf("QShlNsatSU16x8"); return;
+      case Iop_QShlNsatSU32x4: vex_printf("QShlNsatSU32x4"); return;
+      case Iop_QShlNsatSU64x2: vex_printf("QShlNsatSU64x2"); return;
+      case Iop_Shr8x16: vex_printf("Shr8x16"); return;
+      case Iop_Shr16x8: vex_printf("Shr16x8"); return;
+      case Iop_Shr32x4: vex_printf("Shr32x4"); return;
+      case Iop_Shr64x2: vex_printf("Shr64x2"); return;
+      case Iop_Sar8x16: vex_printf("Sar8x16"); return;
+      case Iop_Sar16x8: vex_printf("Sar16x8"); return;
+      case Iop_Sar32x4: vex_printf("Sar32x4"); return;
+      case Iop_Sar64x2: vex_printf("Sar64x2"); return;
+      case Iop_Sal8x16: vex_printf("Sal8x16"); return;
+      case Iop_Sal16x8: vex_printf("Sal16x8"); return;
+      case Iop_Sal32x4: vex_printf("Sal32x4"); return;
+      case Iop_Sal64x2: vex_printf("Sal64x2"); return;
+      case Iop_Rol8x16: vex_printf("Rol8x16"); return;
+      case Iop_Rol16x8: vex_printf("Rol16x8"); return;
+      case Iop_Rol32x4: vex_printf("Rol32x4"); return;
+      case Iop_Rol64x2: vex_printf("Rol64x2"); return;
+
+      case Iop_QandUQsh8x16: vex_printf("QandUQsh8x16"); return;
+      case Iop_QandUQsh16x8: vex_printf("QandUQsh16x8"); return;
+      case Iop_QandUQsh32x4: vex_printf("QandUQsh32x4"); return;
+      case Iop_QandUQsh64x2: vex_printf("QandUQsh64x2"); return;
+      case Iop_QandSQsh8x16: vex_printf("QandSQsh8x16"); return;
+      case Iop_QandSQsh16x8: vex_printf("QandSQsh16x8"); return;
+      case Iop_QandSQsh32x4: vex_printf("QandSQsh32x4"); return;
+      case Iop_QandSQsh64x2: vex_printf("QandSQsh64x2"); return;
+      case Iop_QandUQRsh8x16: vex_printf("QandUQRsh8x16"); return;
+      case Iop_QandUQRsh16x8: vex_printf("QandUQRsh16x8"); return;
+      case Iop_QandUQRsh32x4: vex_printf("QandUQRsh32x4"); return;
+      case Iop_QandUQRsh64x2: vex_printf("QandUQRsh64x2"); return;
+      case Iop_QandSQRsh8x16: vex_printf("QandSQRsh8x16"); return;
+      case Iop_QandSQRsh16x8: vex_printf("QandSQRsh16x8"); return;
+      case Iop_QandSQRsh32x4: vex_printf("QandSQRsh32x4"); return;
+      case Iop_QandSQRsh64x2: vex_printf("QandSQRsh64x2"); return;
+
+      case Iop_Sh8Sx16: vex_printf("Sh8Sx16"); return;
+      case Iop_Sh16Sx8: vex_printf("Sh16Sx8"); return;
+      case Iop_Sh32Sx4: vex_printf("Sh32Sx4"); return;
+      case Iop_Sh64Sx2: vex_printf("Sh64Sx2"); return;
+      case Iop_Sh8Ux16: vex_printf("Sh8Ux16"); return;
+      case Iop_Sh16Ux8: vex_printf("Sh16Ux8"); return;
+      case Iop_Sh32Ux4: vex_printf("Sh32Ux4"); return;
+      case Iop_Sh64Ux2: vex_printf("Sh64Ux2"); return;
+      case Iop_Rsh8Sx16: vex_printf("Rsh8Sx16"); return;
+      case Iop_Rsh16Sx8: vex_printf("Rsh16Sx8"); return;
+      case Iop_Rsh32Sx4: vex_printf("Rsh32Sx4"); return;
+      case Iop_Rsh64Sx2: vex_printf("Rsh64Sx2"); return;
+      case Iop_Rsh8Ux16: vex_printf("Rsh8Ux16"); return;
+      case Iop_Rsh16Ux8: vex_printf("Rsh16Ux8"); return;
+      case Iop_Rsh32Ux4: vex_printf("Rsh32Ux4"); return;
+      case Iop_Rsh64Ux2: vex_printf("Rsh64Ux2"); return;
+
+      case Iop_QandQShrNnarrow16Uto8Ux8:
+         vex_printf("QandQShrNnarrow16Uto8Ux8"); return;
+      case Iop_QandQShrNnarrow32Uto16Ux4:
+         vex_printf("QandQShrNnarrow32Uto16Ux4"); return;
+      case Iop_QandQShrNnarrow64Uto32Ux2:
+         vex_printf("QandQShrNnarrow64Uto32Ux2"); return;
+      case Iop_QandQSarNnarrow16Sto8Sx8:
+         vex_printf("QandQSarNnarrow16Sto8Sx8"); return;
+      case Iop_QandQSarNnarrow32Sto16Sx4:
+         vex_printf("QandQSarNnarrow32Sto16Sx4"); return;
+      case Iop_QandQSarNnarrow64Sto32Sx2:
+         vex_printf("QandQSarNnarrow64Sto32Sx2"); return;
+      case Iop_QandQSarNnarrow16Sto8Ux8:
+         vex_printf("QandQSarNnarrow16Sto8Ux8"); return;
+      case Iop_QandQSarNnarrow32Sto16Ux4:
+         vex_printf("QandQSarNnarrow32Sto16Ux4"); return;
+      case Iop_QandQSarNnarrow64Sto32Ux2:
+         vex_printf("QandQSarNnarrow64Sto32Ux2"); return;
+      case Iop_QandQRShrNnarrow16Uto8Ux8:
+         vex_printf("QandQRShrNnarrow16Uto8Ux8"); return;
+      case Iop_QandQRShrNnarrow32Uto16Ux4:
+         vex_printf("QandQRShrNnarrow32Uto16Ux4"); return;
+      case Iop_QandQRShrNnarrow64Uto32Ux2:
+         vex_printf("QandQRShrNnarrow64Uto32Ux2"); return;
+      case Iop_QandQRSarNnarrow16Sto8Sx8:
+         vex_printf("QandQRSarNnarrow16Sto8Sx8"); return;
+      case Iop_QandQRSarNnarrow32Sto16Sx4:
+         vex_printf("QandQRSarNnarrow32Sto16Sx4"); return;
+      case Iop_QandQRSarNnarrow64Sto32Sx2:
+         vex_printf("QandQRSarNnarrow64Sto32Sx2"); return;
+      case Iop_QandQRSarNnarrow16Sto8Ux8:
+         vex_printf("QandQRSarNnarrow16Sto8Ux8"); return;
+      case Iop_QandQRSarNnarrow32Sto16Ux4:
+         vex_printf("QandQRSarNnarrow32Sto16Ux4"); return;
+      case Iop_QandQRSarNnarrow64Sto32Ux2:
+         vex_printf("QandQRSarNnarrow64Sto32Ux2"); return;
+
+      case Iop_NarrowBin16to8x16:    vex_printf("NarrowBin16to8x16"); return;
+      case Iop_NarrowBin32to16x8:    vex_printf("NarrowBin32to16x8"); return;
+      case Iop_QNarrowBin16Uto8Ux16: vex_printf("QNarrowBin16Uto8Ux16"); return;
+      case Iop_QNarrowBin32Sto16Ux8: vex_printf("QNarrowBin32Sto16Ux8"); return;
+      case Iop_QNarrowBin16Sto8Ux16: vex_printf("QNarrowBin16Sto8Ux16"); return;
+      case Iop_QNarrowBin32Uto16Ux8: vex_printf("QNarrowBin32Uto16Ux8"); return;
+      case Iop_QNarrowBin16Sto8Sx16: vex_printf("QNarrowBin16Sto8Sx16"); return;
+      case Iop_QNarrowBin32Sto16Sx8: vex_printf("QNarrowBin32Sto16Sx8"); return;
+      case Iop_NarrowUn16to8x8:     vex_printf("NarrowUn16to8x8");  return;
+      case Iop_NarrowUn32to16x4:    vex_printf("NarrowUn32to16x4"); return;
+      case Iop_NarrowUn64to32x2:    vex_printf("NarrowUn64to32x2"); return;
+      case Iop_QNarrowUn16Uto8Ux8:  vex_printf("QNarrowUn16Uto8Ux8");  return;
+      case Iop_QNarrowUn32Uto16Ux4: vex_printf("QNarrowUn32Uto16Ux4"); return;
+      case Iop_QNarrowUn64Uto32Ux2: vex_printf("QNarrowUn64Uto32Ux2"); return;
+      case Iop_QNarrowUn16Sto8Sx8:  vex_printf("QNarrowUn16Sto8Sx8");  return;
+      case Iop_QNarrowUn32Sto16Sx4: vex_printf("QNarrowUn32Sto16Sx4"); return;
+      case Iop_QNarrowUn64Sto32Sx2: vex_printf("QNarrowUn64Sto32Sx2"); return;
+      case Iop_QNarrowUn16Sto8Ux8:  vex_printf("QNarrowUn16Sto8Ux8");  return;
+      case Iop_QNarrowUn32Sto16Ux4: vex_printf("QNarrowUn32Sto16Ux4"); return;
+      case Iop_QNarrowUn64Sto32Ux2: vex_printf("QNarrowUn64Sto32Ux2"); return;
+      case Iop_Widen8Uto16x8:  vex_printf("Widen8Uto16x8");  return;
+      case Iop_Widen16Uto32x4: vex_printf("Widen16Uto32x4"); return;
+      case Iop_Widen32Uto64x2: vex_printf("Widen32Uto64x2"); return;
+      case Iop_Widen8Sto16x8:  vex_printf("Widen8Sto16x8");  return;
+      case Iop_Widen16Sto32x4: vex_printf("Widen16Sto32x4"); return;
+      case Iop_Widen32Sto64x2: vex_printf("Widen32Sto64x2"); return;
+
+      case Iop_InterleaveHI8x16: vex_printf("InterleaveHI8x16"); return;
+      case Iop_InterleaveHI16x8: vex_printf("InterleaveHI16x8"); return;
+      case Iop_InterleaveHI32x4: vex_printf("InterleaveHI32x4"); return;
+      case Iop_InterleaveHI64x2: vex_printf("InterleaveHI64x2"); return;
+      case Iop_InterleaveLO8x16: vex_printf("InterleaveLO8x16"); return;
+      case Iop_InterleaveLO16x8: vex_printf("InterleaveLO16x8"); return;
+      case Iop_InterleaveLO32x4: vex_printf("InterleaveLO32x4"); return;
+      case Iop_InterleaveLO64x2: vex_printf("InterleaveLO64x2"); return;
+
+      case Iop_CatOddLanes8x16: vex_printf("CatOddLanes8x16"); return;
+      case Iop_CatOddLanes16x8: vex_printf("CatOddLanes16x8"); return;
+      case Iop_CatOddLanes32x4: vex_printf("CatOddLanes32x4"); return;
+      case Iop_CatEvenLanes8x16: vex_printf("CatEvenLanes8x16"); return;
+      case Iop_CatEvenLanes16x8: vex_printf("CatEvenLanes16x8"); return;
+      case Iop_CatEvenLanes32x4: vex_printf("CatEvenLanes32x4"); return;
+
+      case Iop_InterleaveOddLanes8x16: vex_printf("InterleaveOddLanes8x16"); return;
+      case Iop_InterleaveOddLanes16x8: vex_printf("InterleaveOddLanes16x8"); return;
+      case Iop_InterleaveOddLanes32x4: vex_printf("InterleaveOddLanes32x4"); return;
+      case Iop_InterleaveEvenLanes8x16: vex_printf("InterleaveEvenLanes8x16"); return;
+      case Iop_InterleaveEvenLanes16x8: vex_printf("InterleaveEvenLanes16x8"); return;
+      case Iop_InterleaveEvenLanes32x4: vex_printf("InterleaveEvenLanes32x4"); return;
+
+      case Iop_GetElem8x16: vex_printf("GetElem8x16"); return;
+      case Iop_GetElem16x8: vex_printf("GetElem16x8"); return;
+      case Iop_GetElem32x4: vex_printf("GetElem32x4"); return;
+      case Iop_GetElem64x2: vex_printf("GetElem64x2"); return;
+
+      case Iop_GetElem8x8: vex_printf("GetElem8x8"); return;
+      case Iop_GetElem16x4: vex_printf("GetElem16x4"); return;
+      case Iop_GetElem32x2: vex_printf("GetElem32x2"); return;
+      case Iop_SetElem8x8: vex_printf("SetElem8x8"); return;
+      case Iop_SetElem16x4: vex_printf("SetElem16x4"); return;
+      case Iop_SetElem32x2: vex_printf("SetElem32x2"); return;
+
+      case Iop_Slice64: vex_printf("Slice64"); return;
+      case Iop_SliceV128: vex_printf("SliceV128"); return;
+
+      case Iop_Perm8x16: vex_printf("Perm8x16"); return;
+      case Iop_Perm32x4: vex_printf("Perm32x4"); return;
+      case Iop_Reverse8sIn16_x8: vex_printf("Reverse8sIn16_x8"); return;
+      case Iop_Reverse8sIn32_x4: vex_printf("Reverse8sIn32_x4"); return;
+      case Iop_Reverse16sIn32_x4: vex_printf("Reverse16sIn32_x4"); return;
+      case Iop_Reverse8sIn64_x2: vex_printf("Reverse8sIn64_x2"); return;
+      case Iop_Reverse16sIn64_x2: vex_printf("Reverse16sIn64_x2"); return;
+      case Iop_Reverse32sIn64_x2: vex_printf("Reverse32sIn64_x2"); return;
+      case Iop_Reverse1sIn8_x16: vex_printf("Reverse1sIn8_x16"); return;
+
+      case Iop_F32ToFixed32Ux4_RZ: vex_printf("F32ToFixed32Ux4_RZ"); return;
+      case Iop_F32ToFixed32Sx4_RZ: vex_printf("F32ToFixed32Sx4_RZ"); return;
+      case Iop_Fixed32UToF32x4_RN: vex_printf("Fixed32UToF32x4_RN"); return;
+      case Iop_Fixed32SToF32x4_RN: vex_printf("Fixed32SToF32x4_RN"); return;
+      case Iop_F32ToFixed32Ux2_RZ: vex_printf("F32ToFixed32Ux2_RZ"); return;
+      case Iop_F32ToFixed32Sx2_RZ: vex_printf("F32ToFixed32Sx2_RZ"); return;
+      case Iop_Fixed32UToF32x2_RN: vex_printf("Fixed32UToF32x2_RN"); return;
+      case Iop_Fixed32SToF32x2_RN: vex_printf("Fixed32SToF32x2_RN"); return;
+
+      case Iop_D32toD64:  vex_printf("D32toD64");   return;
+      case Iop_D64toD32:  vex_printf("D64toD32");   return;
+      case Iop_AddD64:  vex_printf("AddD64");   return;
+      case Iop_SubD64:  vex_printf("SubD64");   return;
+      case Iop_MulD64:  vex_printf("MulD64");   return;
+      case Iop_DivD64:  vex_printf("DivD64");   return;
+      case Iop_ShlD64:  vex_printf("ShlD64"); return;
+      case Iop_ShrD64:  vex_printf("ShrD64"); return;
+      case Iop_D64toI32S:  vex_printf("D64toI32S");  return;
+      case Iop_D64toI32U:  vex_printf("D64toI32U");  return;
+      case Iop_D64toI64S:  vex_printf("D64toI64S");  return;
+      case Iop_D64toI64U:  vex_printf("D64toI64U");  return;
+      case Iop_I32StoD64:  vex_printf("I32StoD64");  return;
+      case Iop_I32UtoD64:  vex_printf("I32UtoD64");  return;
+      case Iop_I64StoD64:  vex_printf("I64StoD64");  return;
+      case Iop_I64UtoD64:  vex_printf("I64UtoD64");  return;
+      case Iop_I32StoD128: vex_printf("I32StoD128"); return;
+      case Iop_I32UtoD128: vex_printf("I32UtoD128"); return;
+      case Iop_I64StoD128: vex_printf("I64StoD128"); return;
+      case Iop_I64UtoD128: vex_printf("I64UtoD128"); return;
+      case Iop_D64toD128:  vex_printf("D64toD128");  return;
+      case Iop_D128toD64:  vex_printf("D128toD64");  return;
+      case Iop_D128toI32S: vex_printf("D128toI32S"); return;
+      case Iop_D128toI32U: vex_printf("D128toI32U"); return;
+      case Iop_D128toI64S: vex_printf("D128toI64S"); return;
+      case Iop_D128toI64U: vex_printf("D128toI64U"); return;
+      case Iop_F32toD32:   vex_printf("F32toD32");   return;
+      case Iop_F32toD64:   vex_printf("F32toD64");   return;
+      case Iop_F32toD128:  vex_printf("F32toD128");  return;
+      case Iop_F64toD32:   vex_printf("F64toD32");   return;
+      case Iop_F64toD64:   vex_printf("F64toD64");   return;
+      case Iop_F64toD128:  vex_printf("F64toD128");  return;
+      case Iop_F128toD32:  vex_printf("F128toD32");  return;
+      case Iop_F128toD64:  vex_printf("F128toD64");  return;
+      case Iop_F128toD128: vex_printf("F128toD128"); return;
+      case Iop_D32toF32:   vex_printf("D32toF32");   return;
+      case Iop_D32toF64:   vex_printf("D32toF64");   return;
+      case Iop_D32toF128:  vex_printf("D32toF128");  return;
+      case Iop_D64toF32:   vex_printf("D64toF32");   return;
+      case Iop_D64toF64:   vex_printf("D64toF64");   return;
+      case Iop_D64toF128:  vex_printf("D64toF128");  return;
+      case Iop_D128toF32:  vex_printf("D128toF32");  return;
+      case Iop_D128toF64:  vex_printf("D128toF64");  return;
+      case Iop_D128toF128: vex_printf("D128toF128"); return;
+      case Iop_AddD128: vex_printf("AddD128");  return;
+      case Iop_SubD128: vex_printf("SubD128");  return;
+      case Iop_MulD128: vex_printf("MulD128");  return;
+      case Iop_DivD128: vex_printf("DivD128");  return;
+      case Iop_ShlD128: vex_printf("ShlD128");  return;
+      case Iop_ShrD128: vex_printf("ShrD128");  return;
+      case Iop_RoundD64toInt:  vex_printf("RoundD64toInt");  return;
+      case Iop_RoundD128toInt: vex_printf("RoundD128toInt"); return;
+      case Iop_QuantizeD64:    vex_printf("QuantizeD64");    return;
+      case Iop_QuantizeD128:   vex_printf("QuantizeD128");   return;
+      case Iop_ExtractExpD64:  vex_printf("ExtractExpD64");  return;
+      case Iop_ExtractExpD128: vex_printf("ExtractExpD128"); return;
+      case Iop_ExtractSigD64:  vex_printf("ExtractSigD64");  return;
+      case Iop_ExtractSigD128: vex_printf("ExtractSigD128"); return;
+      case Iop_InsertExpD64:   vex_printf("InsertExpD64");   return;
+      case Iop_InsertExpD128:  vex_printf("InsertExpD128");  return;
+      case Iop_CmpD64:         vex_printf("CmpD64");     return;
+      case Iop_CmpD128:        vex_printf("CmpD128");    return;
+      case Iop_CmpExpD64:      vex_printf("CmpExpD64");  return;
+      case Iop_CmpExpD128:     vex_printf("CmpExpD128"); return;
+      case Iop_D64HLtoD128: vex_printf("D64HLtoD128");   return;
+      case Iop_D128HItoD64: vex_printf("D128HItoD64");   return;
+      case Iop_D128LOtoD64: vex_printf("D128LOtoD64");   return;
+      case Iop_SignificanceRoundD64: vex_printf("SignificanceRoundD64");
+         return;
+      case Iop_SignificanceRoundD128: vex_printf("SignificanceRoundD128");
+         return;
+      case Iop_ReinterpI64asD64: vex_printf("ReinterpI64asD64"); return;
+      case Iop_ReinterpD64asI64: vex_printf("ReinterpD64asI64"); return;
+      case Iop_V256to64_0: vex_printf("V256to64_0"); return;
+      case Iop_V256to64_1: vex_printf("V256to64_1"); return;
+      case Iop_V256to64_2: vex_printf("V256to64_2"); return;
+      case Iop_V256to64_3: vex_printf("V256to64_3"); return;
+      case Iop_64x4toV256: vex_printf("64x4toV256"); return;
+      case Iop_V256toV128_0: vex_printf("V256toV128_0"); return;
+      case Iop_V256toV128_1: vex_printf("V256toV128_1"); return;
+      case Iop_V128HLtoV256: vex_printf("V128HLtoV256"); return;
+      case Iop_DPBtoBCD: vex_printf("DPBtoBCD"); return;
+      case Iop_BCDtoDPB: vex_printf("BCDtoDPB"); return;
+      case Iop_Add64Fx4: vex_printf("Add64Fx4"); return;
+      case Iop_Sub64Fx4: vex_printf("Sub64Fx4"); return;
+      case Iop_Mul64Fx4: vex_printf("Mul64Fx4"); return;
+      case Iop_Div64Fx4: vex_printf("Div64Fx4"); return;
+      case Iop_Add32Fx8: vex_printf("Add32Fx8"); return;
+      case Iop_Sub32Fx8: vex_printf("Sub32Fx8"); return;
+      case Iop_Mul32Fx8: vex_printf("Mul32Fx8"); return;
+      case Iop_Div32Fx8: vex_printf("Div32Fx8"); return;
+      case Iop_AndV256: vex_printf("AndV256"); return;
+      case Iop_OrV256:  vex_printf("OrV256"); return;
+      case Iop_XorV256: vex_printf("XorV256"); return;
+      case Iop_NotV256: vex_printf("NotV256"); return;
+      case Iop_CmpNEZ64x4: vex_printf("CmpNEZ64x4"); return;
+      case Iop_CmpNEZ32x8: vex_printf("CmpNEZ32x8"); return;
+      case Iop_CmpNEZ16x16: vex_printf("CmpNEZ16x16"); return;
+      case Iop_CmpNEZ8x32: vex_printf("CmpNEZ8x32"); return;
+
+      case Iop_Add8x32:   vex_printf("Add8x32"); return;
+      case Iop_Add16x16:  vex_printf("Add16x16"); return;
+      case Iop_Add32x8:   vex_printf("Add32x8"); return;
+      case Iop_Add64x4:   vex_printf("Add64x4"); return;
+      case Iop_Sub8x32:   vex_printf("Sub8x32"); return;
+      case Iop_Sub16x16:  vex_printf("Sub16x16"); return;
+      case Iop_Sub32x8:   vex_printf("Sub32x8"); return;
+      case Iop_Sub64x4:   vex_printf("Sub64x4"); return;
+      case Iop_QAdd8Ux32: vex_printf("QAdd8Ux32"); return;
+      case Iop_QAdd16Ux16: vex_printf("QAdd16Ux16"); return;
+      case Iop_QAdd8Sx32: vex_printf("QAdd8Sx32"); return;
+      case Iop_QAdd16Sx16: vex_printf("QAdd16Sx16"); return;
+      case Iop_QSub8Ux32: vex_printf("QSub8Ux32"); return;
+      case Iop_QSub16Ux16: vex_printf("QSub16Ux16"); return;
+      case Iop_QSub8Sx32: vex_printf("QSub8Sx32"); return;
+      case Iop_QSub16Sx16: vex_printf("QSub16Sx16"); return;
+
+      case Iop_Mul16x16:    vex_printf("Mul16x16"); return;
+      case Iop_Mul32x8:     vex_printf("Mul32x8"); return;
+      case Iop_MulHi16Ux16: vex_printf("MulHi16Ux16"); return;
+      case Iop_MulHi16Sx16: vex_printf("MulHi16Sx16"); return;
+
+      case Iop_Avg8Ux32:  vex_printf("Avg8Ux32"); return;
+      case Iop_Avg16Ux16: vex_printf("Avg16Ux16"); return;
+
+      case Iop_Max8Sx32:  vex_printf("Max8Sx32"); return;
+      case Iop_Max16Sx16: vex_printf("Max16Sx16"); return;
+      case Iop_Max32Sx8:  vex_printf("Max32Sx8"); return;
+      case Iop_Max8Ux32:  vex_printf("Max8Ux32"); return;
+      case Iop_Max16Ux16: vex_printf("Max16Ux16"); return;
+      case Iop_Max32Ux8:  vex_printf("Max32Ux8"); return;
+
+      case Iop_Min8Sx32:  vex_printf("Min8Sx32"); return;
+      case Iop_Min16Sx16: vex_printf("Min16Sx16"); return;
+      case Iop_Min32Sx8:  vex_printf("Min32Sx8"); return;
+      case Iop_Min8Ux32:  vex_printf("Min8Ux32"); return;
+      case Iop_Min16Ux16: vex_printf("Min16Ux16"); return;
+      case Iop_Min32Ux8:  vex_printf("Min32Ux8"); return;
+
+      case Iop_CmpEQ8x32:   vex_printf("CmpEQ8x32"); return;
+      case Iop_CmpEQ16x16:  vex_printf("CmpEQ16x16"); return;
+      case Iop_CmpEQ32x8:   vex_printf("CmpEQ32x8"); return;
+      case Iop_CmpEQ64x4:   vex_printf("CmpEQ64x4"); return;
+      case Iop_CmpGT8Sx32:  vex_printf("CmpGT8Sx32"); return;
+      case Iop_CmpGT16Sx16: vex_printf("CmpGT16Sx16"); return;
+      case Iop_CmpGT32Sx8:  vex_printf("CmpGT32Sx8"); return;
+      case Iop_CmpGT64Sx4:  vex_printf("CmpGT64Sx4"); return;
+
+      case Iop_ShlN16x16:  vex_printf("ShlN16x16"); return;
+      case Iop_ShlN32x8:   vex_printf("ShlN32x8"); return;
+      case Iop_ShlN64x4:   vex_printf("ShlN64x4"); return;
+      case Iop_ShrN16x16:  vex_printf("ShrN16x16"); return;
+      case Iop_ShrN32x8:   vex_printf("ShrN32x8"); return;
+      case Iop_ShrN64x4:   vex_printf("ShrN64x4"); return;
+      case Iop_SarN16x16:  vex_printf("SarN16x16"); return;
+      case Iop_SarN32x8:   vex_printf("SarN32x8"); return;
+
+      case Iop_Perm32x8:   vex_printf("Perm32x8"); return;
+
+      case Iop_CipherV128:   vex_printf("CipherV128"); return;
+      case Iop_CipherLV128:  vex_printf("CipherLV128"); return;
+      case Iop_NCipherV128:  vex_printf("NCipherV128"); return;
+      case Iop_NCipherLV128: vex_printf("NCipherLV128"); return;
+      case Iop_CipherSV128:  vex_printf("CipherSV128"); return;
+
+      case Iop_SHA256:  vex_printf("SHA256"); return;
+      case Iop_SHA512:  vex_printf("SHA512"); return;
+      case Iop_BCDAdd:  vex_printf("BCDAdd"); return;
+      case Iop_BCDSub:  vex_printf("BCDSub"); return;
+
+      case Iop_PwBitMtxXpose64x2: vex_printf("BitMatrixTranspose64x2"); return;
+
+      default: vpanic("ppIROp(1)");
+   }
+
+   vassert(str);  
+   switch (op - base) {
+      case 0: vex_printf("%s",str); vex_printf("8"); break;
+      case 1: vex_printf("%s",str); vex_printf("16"); break;
+      case 2: vex_printf("%s",str); vex_printf("32"); break;
+      case 3: vex_printf("%s",str); vex_printf("64"); break;
+      default: vpanic("ppIROp(2)");
+   }
+}
+
+void ppIRExpr ( const IRExpr* e )
+{
+  Int i;
+  switch (e->tag) {
+    case Iex_Binder:
+      vex_printf("BIND-%d", e->Iex.Binder.binder);
+      break;
+    case Iex_Get:
+      vex_printf( "GET:" );
+      ppIRType(e->Iex.Get.ty);
+      vex_printf("(%d)", e->Iex.Get.offset);
+      break;
+    case Iex_GetI:
+      vex_printf( "GETI" );
+      ppIRRegArray(e->Iex.GetI.descr);
+      vex_printf("[");
+      ppIRExpr(e->Iex.GetI.ix);
+      vex_printf(",%d]", e->Iex.GetI.bias);
+      break;
+    case Iex_RdTmp:
+      ppIRTemp(e->Iex.RdTmp.tmp);
+      break;
+    case Iex_Qop: {
+      const IRQop *qop = e->Iex.Qop.details;
+      ppIROp(qop->op);
+      vex_printf( "(" );
+      ppIRExpr(qop->arg1);
+      vex_printf( "," );
+      ppIRExpr(qop->arg2);
+      vex_printf( "," );
+      ppIRExpr(qop->arg3);
+      vex_printf( "," );
+      ppIRExpr(qop->arg4);
+      vex_printf( ")" );
+      break;
+    }
+    case Iex_Triop: {
+      const IRTriop *triop = e->Iex.Triop.details;
+      ppIROp(triop->op);
+      vex_printf( "(" );
+      ppIRExpr(triop->arg1);
+      vex_printf( "," );
+      ppIRExpr(triop->arg2);
+      vex_printf( "," );
+      ppIRExpr(triop->arg3);
+      vex_printf( ")" );
+      break;
+    }
+    case Iex_Binop:
+      ppIROp(e->Iex.Binop.op);
+      vex_printf( "(" );
+      ppIRExpr(e->Iex.Binop.arg1);
+      vex_printf( "," );
+      ppIRExpr(e->Iex.Binop.arg2);
+      vex_printf( ")" );
+      break;
+    case Iex_Unop:
+      ppIROp(e->Iex.Unop.op);
+      vex_printf( "(" );
+      ppIRExpr(e->Iex.Unop.arg);
+      vex_printf( ")" );
+      break;
+    case Iex_Load:
+      vex_printf( "LD%s:", e->Iex.Load.end==Iend_LE ? "le" : "be" );
+      ppIRType(e->Iex.Load.ty);
+      vex_printf( "(" );
+      ppIRExpr(e->Iex.Load.addr);
+      vex_printf( ")" );
+      break;
+    case Iex_Const:
+      ppIRConst(e->Iex.Const.con);
+      break;
+    case Iex_CCall:
+      ppIRCallee(e->Iex.CCall.cee);
+      vex_printf("(");
+      for (i = 0; e->Iex.CCall.args[i] != NULL; i++) {
+        IRExpr* arg = e->Iex.CCall.args[i];
+        ppIRExpr(arg);
+
+        if (e->Iex.CCall.args[i+1] != NULL) {
+          vex_printf(",");
+        }
+      }
+      vex_printf("):");
+      ppIRType(e->Iex.CCall.retty);
+      break;
+    case Iex_ITE:
+      vex_printf("ITE(");
+      ppIRExpr(e->Iex.ITE.cond);
+      vex_printf(",");
+      ppIRExpr(e->Iex.ITE.iftrue);
+      vex_printf(",");
+      ppIRExpr(e->Iex.ITE.iffalse);
+      vex_printf(")");
+      break;
+    case Iex_VECRET:
+      vex_printf("VECRET");
+      break;
+    case Iex_BBPTR:
+      vex_printf("BBPTR");
+      break;
+    default:
+      vpanic("ppIRExpr");
+  }
+}
+
+void ppIREffect ( IREffect fx )
+{
+   switch (fx) {
+      case Ifx_None:   vex_printf("noFX"); return;
+      case Ifx_Read:   vex_printf("RdFX"); return;
+      case Ifx_Write:  vex_printf("WrFX"); return;
+      case Ifx_Modify: vex_printf("MoFX"); return;
+      default: vpanic("ppIREffect");
+   }
+}
+
+void ppIRDirty ( const IRDirty* d )
+{
+   Int i;
+   if (d->tmp != IRTemp_INVALID) {
+      ppIRTemp(d->tmp);
+      vex_printf(" = ");
+   }
+   vex_printf("DIRTY ");
+   ppIRExpr(d->guard);
+   if (d->mFx != Ifx_None) {
+      vex_printf(" ");
+      ppIREffect(d->mFx);
+      vex_printf("-mem(");
+      ppIRExpr(d->mAddr);
+      vex_printf(",%d)", d->mSize);
+   }
+   for (i = 0; i < d->nFxState; i++) {
+      vex_printf(" ");
+      ppIREffect(d->fxState[i].fx);
+      vex_printf("-gst(%u,%u", (UInt)d->fxState[i].offset,
+                               (UInt)d->fxState[i].size);
+      if (d->fxState[i].nRepeats > 0) {
+         vex_printf(",reps%u,step%u", (UInt)d->fxState[i].nRepeats,
+                                      (UInt)d->fxState[i].repeatLen);
+      }
+      vex_printf(")");
+   }
+   vex_printf(" ::: ");
+   ppIRCallee(d->cee);
+   vex_printf("(");
+   for (i = 0; d->args[i] != NULL; i++) {
+      IRExpr* arg = d->args[i];
+      ppIRExpr(arg);
+
+      if (d->args[i+1] != NULL) {
+         vex_printf(",");
+      }
+   }
+   vex_printf(")");
+}
+
+void ppIRCAS ( const IRCAS* cas )
+{
+   /* Print even structurally invalid constructions, as an aid to
+      debugging. */
+   if (cas->oldHi != IRTemp_INVALID) {
+      ppIRTemp(cas->oldHi);
+      vex_printf(",");
+   }
+   ppIRTemp(cas->oldLo);
+   vex_printf(" = CAS%s(", cas->end==Iend_LE ? "le" : "be" );
+   ppIRExpr(cas->addr);
+   vex_printf("::");
+   if (cas->expdHi) {
+      ppIRExpr(cas->expdHi);
+      vex_printf(",");
+   }
+   ppIRExpr(cas->expdLo);
+   vex_printf("->");
+   if (cas->dataHi) {
+      ppIRExpr(cas->dataHi);
+      vex_printf(",");
+   }
+   ppIRExpr(cas->dataLo);
+   vex_printf(")");
+}
+
+void ppIRPutI ( const IRPutI* puti )
+{
+   vex_printf( "PUTI" );
+   ppIRRegArray(puti->descr);
+   vex_printf("[");
+   ppIRExpr(puti->ix);
+   vex_printf(",%d] = ", puti->bias);
+   ppIRExpr(puti->data);
+}
+
+void ppIRStoreG ( const IRStoreG* sg )
+{
+   vex_printf("if (");
+   ppIRExpr(sg->guard);
+   vex_printf(") { ST%s(", sg->end==Iend_LE ? "le" : "be");
+   ppIRExpr(sg->addr);
+   vex_printf(") = ");
+   ppIRExpr(sg->data);
+   vex_printf(" }");
+}
+
+void ppIRLoadGOp ( IRLoadGOp cvt )
+{
+   switch (cvt) {
+      case ILGop_INVALID: vex_printf("ILGop_INVALID"); break;      
+      case ILGop_Ident64: vex_printf("Ident64"); break;      
+      case ILGop_Ident32: vex_printf("Ident32"); break;      
+      case ILGop_16Uto32: vex_printf("16Uto32"); break;      
+      case ILGop_16Sto32: vex_printf("16Sto32"); break;      
+      case ILGop_8Uto32:  vex_printf("8Uto32"); break;      
+      case ILGop_8Sto32:  vex_printf("8Sto32"); break;      
+      default: vpanic("ppIRLoadGOp");
+   }
+}
+
+void ppIRLoadG ( const IRLoadG* lg )
+{
+   ppIRTemp(lg->dst);
+   vex_printf(" = if-strict (");
+   ppIRExpr(lg->guard);
+   vex_printf(") ");
+   ppIRLoadGOp(lg->cvt);
+   vex_printf("(LD%s(", lg->end==Iend_LE ? "le" : "be");
+   ppIRExpr(lg->addr);
+   vex_printf(")) else ");
+   ppIRExpr(lg->alt);
+}
+
+void ppIRJumpKind ( IRJumpKind kind )
+{
+   switch (kind) {
+      case Ijk_Boring:        vex_printf("Boring"); break;
+      case Ijk_Call:          vex_printf("Call"); break;
+      case Ijk_Ret:           vex_printf("Return"); break;
+      case Ijk_ClientReq:     vex_printf("ClientReq"); break;
+      case Ijk_Yield:         vex_printf("Yield"); break;
+      case Ijk_EmWarn:        vex_printf("EmWarn"); break;
+      case Ijk_EmFail:        vex_printf("EmFail"); break;
+      case Ijk_NoDecode:      vex_printf("NoDecode"); break;
+      case Ijk_MapFail:       vex_printf("MapFail"); break;
+      case Ijk_InvalICache:   vex_printf("InvalICache"); break;
+      case Ijk_FlushDCache:   vex_printf("FlushDCache"); break;
+      case Ijk_NoRedir:       vex_printf("NoRedir"); break;
+      case Ijk_SigILL:        vex_printf("SigILL"); break;
+      case Ijk_SigTRAP:       vex_printf("SigTRAP"); break;
+      case Ijk_SigSEGV:       vex_printf("SigSEGV"); break;
+      case Ijk_SigBUS:        vex_printf("SigBUS"); break;
+      case Ijk_SigFPE_IntDiv: vex_printf("SigFPE_IntDiv"); break;
+      case Ijk_SigFPE_IntOvf: vex_printf("SigFPE_IntOvf"); break;
+      case Ijk_Sys_syscall:   vex_printf("Sys_syscall"); break;
+      case Ijk_Sys_int32:     vex_printf("Sys_int32"); break;
+      case Ijk_Sys_int128:    vex_printf("Sys_int128"); break;
+      case Ijk_Sys_int129:    vex_printf("Sys_int129"); break;
+      case Ijk_Sys_int130:    vex_printf("Sys_int130"); break;
+      case Ijk_Sys_sysenter:  vex_printf("Sys_sysenter"); break;
+      default:                vpanic("ppIRJumpKind");
+   }
+}
+
+void ppIRMBusEvent ( IRMBusEvent event )
+{
+   switch (event) {
+      case Imbe_Fence:
+         vex_printf("Fence"); break;
+      case Imbe_CancelReservation:
+         vex_printf("CancelReservation"); break;
+      default:
+         vpanic("ppIRMBusEvent");
+   }
+}
+
+void ppIRStmt ( const IRStmt* s )
+{
+   if (!s) {
+      vex_printf("!!! IRStmt* which is NULL !!!");
+      return;
+   }
+   switch (s->tag) {
+      case Ist_NoOp:
+         vex_printf("IR-NoOp");
+         break;
+      case Ist_IMark:
+         vex_printf( "------ IMark(0x%lx, %u, %u) ------", 
+                     s->Ist.IMark.addr, s->Ist.IMark.len,
+                     (UInt)s->Ist.IMark.delta);
+         break;
+      case Ist_AbiHint:
+         vex_printf("====== AbiHint(");
+         ppIRExpr(s->Ist.AbiHint.base);
+         vex_printf(", %d, ", s->Ist.AbiHint.len);
+         ppIRExpr(s->Ist.AbiHint.nia);
+         vex_printf(") ======");
+         break;
+      case Ist_Put:
+         vex_printf( "PUT(%d) = ", s->Ist.Put.offset);
+         ppIRExpr(s->Ist.Put.data);
+         break;
+      case Ist_PutI:
+         ppIRPutI(s->Ist.PutI.details);
+         break;
+      case Ist_WrTmp:
+         ppIRTemp(s->Ist.WrTmp.tmp);
+         vex_printf( " = " );
+         ppIRExpr(s->Ist.WrTmp.data);
+         break;
+      case Ist_Store:
+         vex_printf( "ST%s(", s->Ist.Store.end==Iend_LE ? "le" : "be" );
+         ppIRExpr(s->Ist.Store.addr);
+         vex_printf( ") = ");
+         ppIRExpr(s->Ist.Store.data);
+         break;
+      case Ist_StoreG:
+         ppIRStoreG(s->Ist.StoreG.details);
+         break;
+      case Ist_LoadG:
+         ppIRLoadG(s->Ist.LoadG.details);
+         break;
+      case Ist_CAS:
+         ppIRCAS(s->Ist.CAS.details);
+         break;
+      case Ist_LLSC:
+         if (s->Ist.LLSC.storedata == NULL) {
+            ppIRTemp(s->Ist.LLSC.result);
+            vex_printf(" = LD%s-Linked(",
+                       s->Ist.LLSC.end==Iend_LE ? "le" : "be");
+            ppIRExpr(s->Ist.LLSC.addr);
+            vex_printf(")");
+         } else {
+            ppIRTemp(s->Ist.LLSC.result);
+            vex_printf(" = ( ST%s-Cond(",
+                       s->Ist.LLSC.end==Iend_LE ? "le" : "be");
+            ppIRExpr(s->Ist.LLSC.addr);
+            vex_printf(") = ");
+            ppIRExpr(s->Ist.LLSC.storedata);
+            vex_printf(" )");
+         }
+         break;
+      case Ist_Dirty:
+         ppIRDirty(s->Ist.Dirty.details);
+         break;
+      case Ist_MBE:
+         vex_printf("IR-");
+         ppIRMBusEvent(s->Ist.MBE.event);
+         break;
+      case Ist_Exit:
+         vex_printf( "if (" );
+         ppIRExpr(s->Ist.Exit.guard);
+         vex_printf( ") { PUT(%d) = ", s->Ist.Exit.offsIP);
+         ppIRConst(s->Ist.Exit.dst);
+         vex_printf("; exit-");
+         ppIRJumpKind(s->Ist.Exit.jk);
+         vex_printf(" } ");
+         break;
+      default: 
+         vpanic("ppIRStmt");
+   }
+}
+
+void ppIRTypeEnv ( const IRTypeEnv* env )
+{
+   UInt i;
+   for (i = 0; i < env->types_used; i++) {
+      if (i % 8 == 0)
+         vex_printf( "   ");
+      ppIRTemp(i);
+      vex_printf( ":");
+      ppIRType(env->types[i]);
+      if (i % 8 == 7) 
+         vex_printf( "\n"); 
+      else 
+         vex_printf( "   ");
+   }
+   if (env->types_used > 0 && env->types_used % 8 != 7) 
+      vex_printf( "\n"); 
+}
+
+void ppIRSB ( const IRSB* bb )
+{
+   Int i;
+   vex_printf("IRSB {\n");
+   ppIRTypeEnv(bb->tyenv);
+   vex_printf("\n");
+   for (i = 0; i < bb->stmts_used; i++) {
+      vex_printf( "   ");
+      ppIRStmt(bb->stmts[i]);
+      vex_printf( "\n");
+   }
+   vex_printf( "   PUT(%d) = ", bb->offsIP );
+   ppIRExpr( bb->next );
+   vex_printf( "; exit-");
+   ppIRJumpKind(bb->jumpkind);
+   vex_printf( "\n}\n");
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Constructors                                            ---*/
+/*---------------------------------------------------------------*/
+
+
+/* Constructors -- IRConst */
+
+IRConst* IRConst_U1 ( Bool bit )
+{
+   IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag     = Ico_U1;
+   c->Ico.U1  = bit;
+   /* call me paranoid; I don't care :-) */
+   vassert(bit == False || bit == True);
+   return c;
+}
+IRConst* IRConst_U8 ( UChar u8 )
+{
+   IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag     = Ico_U8;
+   c->Ico.U8  = u8;
+   return c;
+}
+IRConst* IRConst_U16 ( UShort u16 )
+{
+   IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag     = Ico_U16;
+   c->Ico.U16 = u16;
+   return c;
+}
+IRConst* IRConst_U32 ( UInt u32 )
+{
+   IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag     = Ico_U32;
+   c->Ico.U32 = u32;
+   return c;
+}
+IRConst* IRConst_U64 ( ULong u64 )
+{
+   IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag     = Ico_U64;
+   c->Ico.U64 = u64;
+   return c;
+}
+IRConst* IRConst_F32 ( Float f32 )
+{
+   IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag     = Ico_F32;
+   c->Ico.F32 = f32;
+   return c;
+}
+IRConst* IRConst_F32i ( UInt f32i )
+{
+   IRConst* c  = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag      = Ico_F32i;
+   c->Ico.F32i = f32i;
+   return c;
+}
+IRConst* IRConst_F64 ( Double f64 )
+{
+   IRConst* c = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag     = Ico_F64;
+   c->Ico.F64 = f64;
+   return c;
+}
+IRConst* IRConst_F64i ( ULong f64i )
+{
+   IRConst* c  = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag      = Ico_F64i;
+   c->Ico.F64i = f64i;
+   return c;
+}
+IRConst* IRConst_V128 ( UShort con )
+{
+   IRConst* c  = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag      = Ico_V128;
+   c->Ico.V128 = con;
+   return c;
+}
+IRConst* IRConst_V256 ( UInt con )
+{
+   IRConst* c  = LibVEX_Alloc_inline(sizeof(IRConst));
+   c->tag      = Ico_V256;
+   c->Ico.V256 = con;
+   return c;
+}
+
+/* Constructors -- IRCallee */
+
+IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr )
+{
+   IRCallee* ce = LibVEX_Alloc_inline(sizeof(IRCallee));
+   ce->regparms = regparms;
+   ce->name     = name;
+   ce->addr     = addr;
+   ce->mcx_mask = 0;
+   vassert(regparms >= 0 && regparms <= 3);
+   vassert(name != NULL);
+   vassert(addr != 0);
+   return ce;
+}
+
+
+/* Constructors -- IRRegArray */
+
+IRRegArray* mkIRRegArray ( Int base, IRType elemTy, Int nElems )
+{
+   IRRegArray* arr = LibVEX_Alloc_inline(sizeof(IRRegArray));
+   arr->base       = base;
+   arr->elemTy     = elemTy;
+   arr->nElems     = nElems;
+   vassert(!(arr->base < 0 || arr->base > 10000 /* somewhat arbitrary */));
+   vassert(!(arr->elemTy == Ity_I1));
+   vassert(!(arr->nElems <= 0 || arr->nElems > 500 /* somewhat arbitrary */));
+   return arr;
+}
+
+
+/* Constructors -- IRExpr */
+
+IRExpr* IRExpr_Binder ( Int binder ) {
+   IRExpr* e            = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag               = Iex_Binder;
+   e->Iex.Binder.binder = binder;
+   return e;
+}
+IRExpr* IRExpr_Get ( Int off, IRType ty ) {
+   IRExpr* e         = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag            = Iex_Get;
+   e->Iex.Get.offset = off;
+   e->Iex.Get.ty     = ty;
+   return e;
+}
+IRExpr* IRExpr_GetI ( IRRegArray* descr, IRExpr* ix, Int bias ) {
+   IRExpr* e         = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag            = Iex_GetI;
+   e->Iex.GetI.descr = descr;
+   e->Iex.GetI.ix    = ix;
+   e->Iex.GetI.bias  = bias;
+   return e;
+}
+IRExpr* IRExpr_RdTmp ( IRTemp tmp ) {
+   IRExpr* e        = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag           = Iex_RdTmp;
+   e->Iex.RdTmp.tmp = tmp;
+   return e;
+}
+IRExpr* IRExpr_Qop ( IROp op, IRExpr* arg1, IRExpr* arg2, 
+                              IRExpr* arg3, IRExpr* arg4 ) {
+   IRExpr* e       = LibVEX_Alloc_inline(sizeof(IRExpr));
+   IRQop*  qop     = LibVEX_Alloc_inline(sizeof(IRQop));
+   qop->op         = op;
+   qop->arg1       = arg1;
+   qop->arg2       = arg2;
+   qop->arg3       = arg3;
+   qop->arg4       = arg4;
+   e->tag          = Iex_Qop;
+   e->Iex.Qop.details = qop;
+   return e;
+}
+IRExpr* IRExpr_Triop  ( IROp op, IRExpr* arg1, 
+                                 IRExpr* arg2, IRExpr* arg3 ) {
+   IRExpr*  e         = LibVEX_Alloc_inline(sizeof(IRExpr));
+   IRTriop* triop     = LibVEX_Alloc_inline(sizeof(IRTriop));
+   triop->op         = op;
+   triop->arg1       = arg1;
+   triop->arg2       = arg2;
+   triop->arg3       = arg3;
+   e->tag            = Iex_Triop;
+   e->Iex.Triop.details = triop;
+   return e;
+}
+IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 ) {
+   IRExpr* e         = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag            = Iex_Binop;
+   e->Iex.Binop.op   = op;
+   e->Iex.Binop.arg1 = arg1;
+   e->Iex.Binop.arg2 = arg2;
+   return e;
+}
+IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg ) {
+   IRExpr* e       = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag          = Iex_Unop;
+   e->Iex.Unop.op  = op;
+   e->Iex.Unop.arg = arg;
+   return e;
+}
+IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ) {
+   IRExpr* e        = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag           = Iex_Load;
+   e->Iex.Load.end  = end;
+   e->Iex.Load.ty   = ty;
+   e->Iex.Load.addr = addr;
+   vassert(end == Iend_LE || end == Iend_BE);
+   return e;
+}
+IRExpr* IRExpr_Const ( IRConst* con ) {
+   IRExpr* e        = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag           = Iex_Const;
+   e->Iex.Const.con = con;
+   return e;
+}
+IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args ) {
+   IRExpr* e          = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag             = Iex_CCall;
+   e->Iex.CCall.cee   = cee;
+   e->Iex.CCall.retty = retty;
+   e->Iex.CCall.args  = args;
+   return e;
+}
+IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ) {
+   IRExpr* e          = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag             = Iex_ITE;
+   e->Iex.ITE.cond    = cond;
+   e->Iex.ITE.iftrue  = iftrue;
+   e->Iex.ITE.iffalse = iffalse;
+   return e;
+}
+IRExpr* IRExpr_VECRET ( void ) {
+   IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag    = Iex_VECRET;
+   return e;
+}
+IRExpr* IRExpr_BBPTR ( void ) {
+   IRExpr* e = LibVEX_Alloc_inline(sizeof(IRExpr));
+   e->tag    = Iex_BBPTR;
+   return e;
+}
+
+
+/* Constructors for NULL-terminated IRExpr expression vectors,
+   suitable for use as arg lists in clean/dirty helper calls. */
+
+IRExpr** mkIRExprVec_0 ( void ) {
+   IRExpr** vec = LibVEX_Alloc_inline(1 * sizeof(IRExpr*));
+   vec[0] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_1 ( IRExpr* arg1 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(2 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_2 ( IRExpr* arg1, IRExpr* arg2 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(3 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = arg2;
+   vec[2] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_3 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(4 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = arg2;
+   vec[2] = arg3;
+   vec[3] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_4 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
+                         IRExpr* arg4 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(5 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = arg2;
+   vec[2] = arg3;
+   vec[3] = arg4;
+   vec[4] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_5 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
+                         IRExpr* arg4, IRExpr* arg5 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(6 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = arg2;
+   vec[2] = arg3;
+   vec[3] = arg4;
+   vec[4] = arg5;
+   vec[5] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_6 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
+                         IRExpr* arg4, IRExpr* arg5, IRExpr* arg6 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(7 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = arg2;
+   vec[2] = arg3;
+   vec[3] = arg4;
+   vec[4] = arg5;
+   vec[5] = arg6;
+   vec[6] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_7 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
+                         IRExpr* arg4, IRExpr* arg5, IRExpr* arg6,
+                         IRExpr* arg7 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(8 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = arg2;
+   vec[2] = arg3;
+   vec[3] = arg4;
+   vec[4] = arg5;
+   vec[5] = arg6;
+   vec[6] = arg7;
+   vec[7] = NULL;
+   return vec;
+}
+IRExpr** mkIRExprVec_8 ( IRExpr* arg1, IRExpr* arg2, IRExpr* arg3,
+                         IRExpr* arg4, IRExpr* arg5, IRExpr* arg6,
+                         IRExpr* arg7, IRExpr* arg8 ) {
+   IRExpr** vec = LibVEX_Alloc_inline(9 * sizeof(IRExpr*));
+   vec[0] = arg1;
+   vec[1] = arg2;
+   vec[2] = arg3;
+   vec[3] = arg4;
+   vec[4] = arg5;
+   vec[5] = arg6;
+   vec[6] = arg7;
+   vec[7] = arg8;
+   vec[8] = NULL;
+   return vec;
+}
+
+
+/* Constructors -- IRDirty */
+
+IRDirty* emptyIRDirty ( void ) {
+   IRDirty* d = LibVEX_Alloc_inline(sizeof(IRDirty));
+   d->cee      = NULL;
+   d->guard    = NULL;
+   d->args     = NULL;
+   d->tmp      = IRTemp_INVALID;
+   d->mFx      = Ifx_None;
+   d->mAddr    = NULL;
+   d->mSize    = 0;
+   d->nFxState = 0;
+   return d;
+}
+
+
+/* Constructors -- IRCAS */
+
+IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo,
+                 IREndness end, IRExpr* addr, 
+                 IRExpr* expdHi, IRExpr* expdLo,
+                 IRExpr* dataHi, IRExpr* dataLo ) {
+   IRCAS* cas = LibVEX_Alloc_inline(sizeof(IRCAS));
+   cas->oldHi  = oldHi;
+   cas->oldLo  = oldLo;
+   cas->end    = end;
+   cas->addr   = addr;
+   cas->expdHi = expdHi;
+   cas->expdLo = expdLo;
+   cas->dataHi = dataHi;
+   cas->dataLo = dataLo;
+   return cas;
+}
+
+
+/* Constructors -- IRPutI */
+
+IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix,
+                   Int bias, IRExpr* data )
+{
+   IRPutI* puti = LibVEX_Alloc_inline(sizeof(IRPutI));
+   puti->descr  = descr;
+   puti->ix     = ix;
+   puti->bias   = bias;
+   puti->data   = data;
+   return puti;
+}
+
+
+/* Constructors -- IRStoreG and IRLoadG */
+
+IRStoreG* mkIRStoreG ( IREndness end,
+                       IRExpr* addr, IRExpr* data, IRExpr* guard )
+{
+   IRStoreG* sg = LibVEX_Alloc_inline(sizeof(IRStoreG));
+   sg->end      = end;
+   sg->addr     = addr;
+   sg->data     = data;
+   sg->guard    = guard;
+   return sg;
+}
+
+IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt,
+                     IRTemp dst, IRExpr* addr, IRExpr* alt, IRExpr* guard )
+{
+   IRLoadG* lg = LibVEX_Alloc_inline(sizeof(IRLoadG));
+   lg->end     = end;
+   lg->cvt     = cvt;
+   lg->dst     = dst;
+   lg->addr    = addr;
+   lg->alt     = alt;
+   lg->guard   = guard;
+   return lg;
+}
+
+
+/* Constructors -- IRStmt */
+
+IRStmt* IRStmt_NoOp ( void )
+{
+   /* Just use a single static closure. */
+   static IRStmt static_closure;
+   static_closure.tag = Ist_NoOp;
+   return &static_closure;
+}
+IRStmt* IRStmt_IMark ( Addr addr, UInt len, UChar delta ) {
+   IRStmt* s          = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag             = Ist_IMark;
+   s->Ist.IMark.addr  = addr;
+   s->Ist.IMark.len   = len;
+   s->Ist.IMark.delta = delta;
+   return s;
+}
+IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia ) {
+   IRStmt* s           = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag              = Ist_AbiHint;
+   s->Ist.AbiHint.base = base;
+   s->Ist.AbiHint.len  = len;
+   s->Ist.AbiHint.nia  = nia;
+   return s;
+}
+IRStmt* IRStmt_Put ( Int off, IRExpr* data ) {
+   IRStmt* s         = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag            = Ist_Put;
+   s->Ist.Put.offset = off;
+   s->Ist.Put.data   = data;
+   return s;
+}
+IRStmt* IRStmt_PutI ( IRPutI* details ) {
+   IRStmt* s          = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag             = Ist_PutI;
+   s->Ist.PutI.details = details;
+   return s;
+}
+IRStmt* IRStmt_WrTmp ( IRTemp tmp, IRExpr* data ) {
+   IRStmt* s         = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag            = Ist_WrTmp;
+   s->Ist.WrTmp.tmp  = tmp;
+   s->Ist.WrTmp.data = data;
+   return s;
+}
+IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data ) {
+   IRStmt* s         = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag            = Ist_Store;
+   s->Ist.Store.end  = end;
+   s->Ist.Store.addr = addr;
+   s->Ist.Store.data = data;
+   vassert(end == Iend_LE || end == Iend_BE);
+   return s;
+}
+IRStmt* IRStmt_StoreG ( IREndness end, IRExpr* addr, IRExpr* data,
+                        IRExpr* guard ) {
+   IRStmt* s             = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag                = Ist_StoreG;
+   s->Ist.StoreG.details = mkIRStoreG(end, addr, data, guard);
+   vassert(end == Iend_LE || end == Iend_BE);
+   return s;
+}
+IRStmt* IRStmt_LoadG ( IREndness end, IRLoadGOp cvt, IRTemp dst,
+                       IRExpr* addr, IRExpr* alt, IRExpr* guard ) {
+   IRStmt* s            = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag               = Ist_LoadG;
+   s->Ist.LoadG.details = mkIRLoadG(end, cvt, dst, addr, alt, guard);
+   return s;
+}
+IRStmt* IRStmt_CAS ( IRCAS* cas ) {
+   IRStmt* s          = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag             = Ist_CAS;
+   s->Ist.CAS.details = cas;
+   return s;
+}
+IRStmt* IRStmt_LLSC ( IREndness end,
+                      IRTemp result, IRExpr* addr, IRExpr* storedata ) {
+   IRStmt* s = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag                = Ist_LLSC;
+   s->Ist.LLSC.end       = end;
+   s->Ist.LLSC.result    = result;
+   s->Ist.LLSC.addr      = addr;
+   s->Ist.LLSC.storedata = storedata;
+   return s;
+}
+IRStmt* IRStmt_Dirty ( IRDirty* d )
+{
+   IRStmt* s            = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag               = Ist_Dirty;
+   s->Ist.Dirty.details = d;
+   return s;
+}
+IRStmt* IRStmt_MBE ( IRMBusEvent event )
+{
+   IRStmt* s        = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag           = Ist_MBE;
+   s->Ist.MBE.event = event;
+   return s;
+}
+IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst,
+                      Int offsIP ) {
+   IRStmt* s          = LibVEX_Alloc_inline(sizeof(IRStmt));
+   s->tag             = Ist_Exit;
+   s->Ist.Exit.guard  = guard;
+   s->Ist.Exit.jk     = jk;
+   s->Ist.Exit.dst    = dst;
+   s->Ist.Exit.offsIP = offsIP;
+   return s;
+}
+
+
+/* Constructors -- IRTypeEnv */
+
+IRTypeEnv* emptyIRTypeEnv ( void )
+{
+   IRTypeEnv* env   = LibVEX_Alloc_inline(sizeof(IRTypeEnv));
+   env->types       = LibVEX_Alloc_inline(8 * sizeof(IRType));
+   env->types_size  = 8;
+   env->types_used  = 0;
+   return env;
+}
+
+
+/* Constructors -- IRSB */
+
+IRSB* emptyIRSB ( void )
+{
+   IRSB* bb       = LibVEX_Alloc_inline(sizeof(IRSB));
+   bb->tyenv      = emptyIRTypeEnv();
+   bb->stmts_used = 0;
+   bb->stmts_size = 8;
+   bb->stmts      = LibVEX_Alloc_inline(bb->stmts_size * sizeof(IRStmt*));
+   bb->next       = NULL;
+   bb->jumpkind   = Ijk_Boring;
+   bb->offsIP     = 0;
+   return bb;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- (Deep) copy constructors.  These make complete copies   ---*/
+/*--- the original, which can be modified without affecting   ---*/
+/*--- the original.                                           ---*/
+/*---------------------------------------------------------------*/
+
+/* Copying IR Expr vectors (for call args). */
+
+/* Shallow copy of an IRExpr vector */
+
+IRExpr** shallowCopyIRExprVec ( IRExpr** vec )
+{
+   Int      i;
+   IRExpr** newvec;
+   for (i = 0; vec[i]; i++)
+      ;
+   newvec = LibVEX_Alloc_inline((i+1)*sizeof(IRExpr*));
+   for (i = 0; vec[i]; i++)
+      newvec[i] = vec[i];
+   newvec[i] = NULL;
+   return newvec;
+}
+
+/* Deep copy of an IRExpr vector */
+
+IRExpr** deepCopyIRExprVec ( IRExpr *const * vec )
+{
+   Int      i;
+   IRExpr** newvec;
+   for (i = 0; vec[i]; i++)
+      ;
+   newvec = LibVEX_Alloc_inline((i+1)*sizeof(IRExpr*));
+   for (i = 0; vec[i]; i++)
+      newvec[i] = deepCopyIRExpr(vec[i]);
+   newvec[i] = NULL;
+   return newvec;
+}
+
+/* Deep copy constructors for all heap-allocated IR types follow. */
+
+IRConst* deepCopyIRConst ( const IRConst* c )
+{
+   switch (c->tag) {
+      case Ico_U1:   return IRConst_U1(c->Ico.U1);
+      case Ico_U8:   return IRConst_U8(c->Ico.U8);
+      case Ico_U16:  return IRConst_U16(c->Ico.U16);
+      case Ico_U32:  return IRConst_U32(c->Ico.U32);
+      case Ico_U64:  return IRConst_U64(c->Ico.U64);
+      case Ico_F32:  return IRConst_F32(c->Ico.F32);
+      case Ico_F32i: return IRConst_F32i(c->Ico.F32i);
+      case Ico_F64:  return IRConst_F64(c->Ico.F64);
+      case Ico_F64i: return IRConst_F64i(c->Ico.F64i);
+      case Ico_V128: return IRConst_V128(c->Ico.V128);
+      case Ico_V256: return IRConst_V256(c->Ico.V256);
+      default: vpanic("deepCopyIRConst");
+   }
+}
+
+IRCallee* deepCopyIRCallee ( const IRCallee* ce )
+{
+   IRCallee* ce2 = mkIRCallee(ce->regparms, ce->name, ce->addr);
+   ce2->mcx_mask = ce->mcx_mask;
+   return ce2;
+}
+
+IRRegArray* deepCopyIRRegArray ( const IRRegArray* d )
+{
+   return mkIRRegArray(d->base, d->elemTy, d->nElems);
+}
+
+IRExpr* deepCopyIRExpr ( const IRExpr* e )
+{
+   switch (e->tag) {
+      case Iex_Get: 
+         return IRExpr_Get(e->Iex.Get.offset, e->Iex.Get.ty);
+      case Iex_GetI: 
+         return IRExpr_GetI(deepCopyIRRegArray(e->Iex.GetI.descr), 
+                            deepCopyIRExpr(e->Iex.GetI.ix),
+                            e->Iex.GetI.bias);
+      case Iex_RdTmp: 
+         return IRExpr_RdTmp(e->Iex.RdTmp.tmp);
+      case Iex_Qop: {
+         const IRQop* qop = e->Iex.Qop.details;
+
+         return IRExpr_Qop(qop->op,
+                           deepCopyIRExpr(qop->arg1),
+                           deepCopyIRExpr(qop->arg2),
+                           deepCopyIRExpr(qop->arg3),
+                           deepCopyIRExpr(qop->arg4));
+      }
+      case Iex_Triop:  {
+         const IRTriop *triop = e->Iex.Triop.details;
+
+         return IRExpr_Triop(triop->op,
+                             deepCopyIRExpr(triop->arg1),
+                             deepCopyIRExpr(triop->arg2),
+                             deepCopyIRExpr(triop->arg3));
+      }
+      case Iex_Binop: 
+         return IRExpr_Binop(e->Iex.Binop.op,
+                             deepCopyIRExpr(e->Iex.Binop.arg1),
+                             deepCopyIRExpr(e->Iex.Binop.arg2));
+      case Iex_Unop: 
+         return IRExpr_Unop(e->Iex.Unop.op,
+                            deepCopyIRExpr(e->Iex.Unop.arg));
+      case Iex_Load: 
+         return IRExpr_Load(e->Iex.Load.end,
+                            e->Iex.Load.ty,
+                            deepCopyIRExpr(e->Iex.Load.addr));
+      case Iex_Const: 
+         return IRExpr_Const(deepCopyIRConst(e->Iex.Const.con));
+      case Iex_CCall:
+         return IRExpr_CCall(deepCopyIRCallee(e->Iex.CCall.cee),
+                             e->Iex.CCall.retty,
+                             deepCopyIRExprVec(e->Iex.CCall.args));
+
+      case Iex_ITE: 
+         return IRExpr_ITE(deepCopyIRExpr(e->Iex.ITE.cond),
+                           deepCopyIRExpr(e->Iex.ITE.iftrue),
+                           deepCopyIRExpr(e->Iex.ITE.iffalse));
+      case Iex_VECRET:
+         return IRExpr_VECRET();
+
+      case Iex_BBPTR:
+         return IRExpr_BBPTR();
+
+      case Iex_Binder:
+         return IRExpr_Binder(e->Iex.Binder.binder);
+
+      default:
+         vpanic("deepCopyIRExpr");
+   }
+}
+
+IRDirty* deepCopyIRDirty ( const IRDirty* d )
+{
+   Int      i;
+   IRDirty* d2 = emptyIRDirty();
+   d2->cee   = deepCopyIRCallee(d->cee);
+   d2->guard = deepCopyIRExpr(d->guard);
+   d2->args  = deepCopyIRExprVec(d->args);
+   d2->tmp   = d->tmp;
+   d2->mFx   = d->mFx;
+   d2->mAddr = d->mAddr==NULL ? NULL : deepCopyIRExpr(d->mAddr);
+   d2->mSize = d->mSize;
+   d2->nFxState = d->nFxState;
+   for (i = 0; i < d2->nFxState; i++)
+      d2->fxState[i] = d->fxState[i];
+   return d2;
+}
+
+IRCAS* deepCopyIRCAS ( const IRCAS* cas )
+{
+   return mkIRCAS( cas->oldHi, cas->oldLo, cas->end,
+                   deepCopyIRExpr(cas->addr),
+                   cas->expdHi==NULL ? NULL : deepCopyIRExpr(cas->expdHi),
+                   deepCopyIRExpr(cas->expdLo),
+                   cas->dataHi==NULL ? NULL : deepCopyIRExpr(cas->dataHi),
+                   deepCopyIRExpr(cas->dataLo) );
+}
+
+IRPutI* deepCopyIRPutI ( const IRPutI * puti )
+{
+  return mkIRPutI( deepCopyIRRegArray(puti->descr),
+                   deepCopyIRExpr(puti->ix),
+                   puti->bias, 
+                   deepCopyIRExpr(puti->data));
+}
+
+IRStmt* deepCopyIRStmt ( const IRStmt* s )
+{
+   switch (s->tag) {
+      case Ist_NoOp:
+         return IRStmt_NoOp();
+      case Ist_AbiHint:
+         return IRStmt_AbiHint(deepCopyIRExpr(s->Ist.AbiHint.base),
+                               s->Ist.AbiHint.len,
+                               deepCopyIRExpr(s->Ist.AbiHint.nia));
+      case Ist_IMark:
+         return IRStmt_IMark(s->Ist.IMark.addr,
+                             s->Ist.IMark.len,
+                             s->Ist.IMark.delta);
+      case Ist_Put: 
+         return IRStmt_Put(s->Ist.Put.offset, 
+                           deepCopyIRExpr(s->Ist.Put.data));
+      case Ist_PutI: 
+         return IRStmt_PutI(deepCopyIRPutI(s->Ist.PutI.details));
+      case Ist_WrTmp:
+         return IRStmt_WrTmp(s->Ist.WrTmp.tmp,
+                             deepCopyIRExpr(s->Ist.WrTmp.data));
+      case Ist_Store: 
+         return IRStmt_Store(s->Ist.Store.end,
+                             deepCopyIRExpr(s->Ist.Store.addr),
+                             deepCopyIRExpr(s->Ist.Store.data));
+      case Ist_StoreG: {
+         const IRStoreG* sg = s->Ist.StoreG.details;
+         return IRStmt_StoreG(sg->end,
+                              deepCopyIRExpr(sg->addr),
+                              deepCopyIRExpr(sg->data),
+                              deepCopyIRExpr(sg->guard));
+      }
+      case Ist_LoadG: {
+         const IRLoadG* lg = s->Ist.LoadG.details;
+         return IRStmt_LoadG(lg->end, lg->cvt, lg->dst,
+                             deepCopyIRExpr(lg->addr),
+                             deepCopyIRExpr(lg->alt),
+                             deepCopyIRExpr(lg->guard));
+      }
+      case Ist_CAS:
+         return IRStmt_CAS(deepCopyIRCAS(s->Ist.CAS.details));
+      case Ist_LLSC:
+         return IRStmt_LLSC(s->Ist.LLSC.end,
+                            s->Ist.LLSC.result,
+                            deepCopyIRExpr(s->Ist.LLSC.addr),
+                            s->Ist.LLSC.storedata
+                               ? deepCopyIRExpr(s->Ist.LLSC.storedata)
+                               : NULL);
+      case Ist_Dirty: 
+         return IRStmt_Dirty(deepCopyIRDirty(s->Ist.Dirty.details));
+      case Ist_MBE:
+         return IRStmt_MBE(s->Ist.MBE.event);
+      case Ist_Exit: 
+         return IRStmt_Exit(deepCopyIRExpr(s->Ist.Exit.guard),
+                            s->Ist.Exit.jk,
+                            deepCopyIRConst(s->Ist.Exit.dst),
+                            s->Ist.Exit.offsIP);
+      default: 
+         vpanic("deepCopyIRStmt");
+   }
+}
+
+IRTypeEnv* deepCopyIRTypeEnv ( const IRTypeEnv* src )
+{
+   Int        i;
+   IRTypeEnv* dst = LibVEX_Alloc_inline(sizeof(IRTypeEnv));
+   dst->types_size = src->types_size;
+   dst->types_used = src->types_used;
+   dst->types = LibVEX_Alloc_inline(dst->types_size * sizeof(IRType));
+   for (i = 0; i < src->types_used; i++)
+      dst->types[i] = src->types[i];
+   return dst;
+}
+
+IRSB* deepCopyIRSB ( const IRSB* bb )
+{
+   Int      i;
+   IRStmt** sts2;
+   IRSB* bb2 = deepCopyIRSBExceptStmts(bb);
+   bb2->stmts_used = bb2->stmts_size = bb->stmts_used;
+   sts2 = LibVEX_Alloc_inline(bb2->stmts_used * sizeof(IRStmt*));
+   for (i = 0; i < bb2->stmts_used; i++)
+      sts2[i] = deepCopyIRStmt(bb->stmts[i]);
+   bb2->stmts = sts2;
+   return bb2;
+}
+
+IRSB* deepCopyIRSBExceptStmts ( const IRSB* bb )
+{
+   IRSB* bb2     = emptyIRSB();
+   bb2->tyenv    = deepCopyIRTypeEnv(bb->tyenv);
+   bb2->next     = deepCopyIRExpr(bb->next);
+   bb2->jumpkind = bb->jumpkind;
+   bb2->offsIP   = bb->offsIP;
+   return bb2;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Primop types                                            ---*/
+/*---------------------------------------------------------------*/
+
+static
+void typeOfPrimop ( IROp op, 
+                    /*OUTs*/
+                    IRType* t_dst, 
+                    IRType* t_arg1, IRType* t_arg2, 
+                    IRType* t_arg3, IRType* t_arg4 )
+{
+#  define UNARY(_ta1,_td)                                      \
+      *t_dst = (_td); *t_arg1 = (_ta1); break
+#  define BINARY(_ta1,_ta2,_td)                                \
+     *t_dst = (_td); *t_arg1 = (_ta1); *t_arg2 = (_ta2); break
+#  define TERNARY(_ta1,_ta2,_ta3,_td)                          \
+     *t_dst = (_td); *t_arg1 = (_ta1);                         \
+     *t_arg2 = (_ta2); *t_arg3 = (_ta3); break
+#  define QUATERNARY(_ta1,_ta2,_ta3,_ta4,_td)                  \
+     *t_dst = (_td); *t_arg1 = (_ta1);                         \
+     *t_arg2 = (_ta2); *t_arg3 = (_ta3);                       \
+     *t_arg4 = (_ta4); break
+#  define COMPARISON(_ta)                                      \
+     *t_dst = Ity_I1; *t_arg1 = *t_arg2 = (_ta); break;
+#  define UNARY_COMPARISON(_ta)                                \
+     *t_dst = Ity_I1; *t_arg1 = (_ta); break;
+
+   /* Rounding mode values are always Ity_I32, encoded as per
+      IRRoundingMode */
+   const IRType ity_RMode = Ity_I32;
+
+   *t_dst  = Ity_INVALID;
+   *t_arg1 = Ity_INVALID;
+   *t_arg2 = Ity_INVALID;
+   *t_arg3 = Ity_INVALID;
+   *t_arg4 = Ity_INVALID;
+   switch (op) {
+      case Iop_Add8: case Iop_Sub8: case Iop_Mul8: 
+      case Iop_Or8:  case Iop_And8: case Iop_Xor8:
+         BINARY(Ity_I8,Ity_I8, Ity_I8);
+
+      case Iop_Add16: case Iop_Sub16: case Iop_Mul16:
+      case Iop_Or16:  case Iop_And16: case Iop_Xor16:
+         BINARY(Ity_I16,Ity_I16, Ity_I16);
+
+      case Iop_CmpORD32U:
+      case Iop_CmpORD32S:
+      case Iop_Add32: case Iop_Sub32: case Iop_Mul32:
+      case Iop_Or32:  case Iop_And32: case Iop_Xor32:
+      case Iop_Max32U:
+      case Iop_QAdd32S: case Iop_QSub32S:
+      case Iop_Add16x2: case Iop_Sub16x2:
+      case Iop_QAdd16Sx2: case Iop_QAdd16Ux2:
+      case Iop_QSub16Sx2: case Iop_QSub16Ux2:
+      case Iop_HAdd16Ux2: case Iop_HAdd16Sx2:
+      case Iop_HSub16Ux2: case Iop_HSub16Sx2:
+      case Iop_Add8x4: case Iop_Sub8x4:
+      case Iop_QAdd8Sx4: case Iop_QAdd8Ux4:
+      case Iop_QSub8Sx4: case Iop_QSub8Ux4:
+      case Iop_HAdd8Ux4: case Iop_HAdd8Sx4:
+      case Iop_HSub8Ux4: case Iop_HSub8Sx4:
+      case Iop_Sad8Ux4:
+         BINARY(Ity_I32,Ity_I32, Ity_I32);
+
+      case Iop_Add64: case Iop_Sub64: case Iop_Mul64:
+      case Iop_Or64:  case Iop_And64: case Iop_Xor64:
+      case Iop_CmpORD64U:
+      case Iop_CmpORD64S:
+      case Iop_Avg8Ux8: case Iop_Avg16Ux4:
+      case Iop_Add8x8: case Iop_Add16x4: case Iop_Add32x2:
+      case Iop_Add32Fx2: case Iop_Sub32Fx2:
+      case Iop_CmpEQ8x8: case Iop_CmpEQ16x4: case Iop_CmpEQ32x2:
+      case Iop_CmpGT8Sx8: case Iop_CmpGT16Sx4: case Iop_CmpGT32Sx2:
+      case Iop_CmpGT8Ux8: case Iop_CmpGT16Ux4: case Iop_CmpGT32Ux2:
+      case Iop_CmpGT32Fx2: case Iop_CmpEQ32Fx2: case Iop_CmpGE32Fx2:
+      case Iop_InterleaveHI8x8: case Iop_InterleaveLO8x8:
+      case Iop_InterleaveHI16x4: case Iop_InterleaveLO16x4:
+      case Iop_InterleaveHI32x2: case Iop_InterleaveLO32x2:
+      case Iop_CatOddLanes8x8: case Iop_CatEvenLanes8x8:
+      case Iop_CatOddLanes16x4: case Iop_CatEvenLanes16x4:
+      case Iop_InterleaveOddLanes8x8: case Iop_InterleaveEvenLanes8x8:
+      case Iop_InterleaveOddLanes16x4: case Iop_InterleaveEvenLanes16x4:
+      case Iop_Perm8x8:
+      case Iop_Max8Ux8: case Iop_Max16Ux4: case Iop_Max32Ux2:
+      case Iop_Max8Sx8: case Iop_Max16Sx4: case Iop_Max32Sx2:
+      case Iop_Max32Fx2: case Iop_Min32Fx2:
+      case Iop_PwMax32Fx2: case Iop_PwMin32Fx2:
+      case Iop_Min8Ux8: case Iop_Min16Ux4: case Iop_Min32Ux2:
+      case Iop_Min8Sx8: case Iop_Min16Sx4: case Iop_Min32Sx2:
+      case Iop_PwMax8Ux8: case Iop_PwMax16Ux4: case Iop_PwMax32Ux2:
+      case Iop_PwMax8Sx8: case Iop_PwMax16Sx4: case Iop_PwMax32Sx2:
+      case Iop_PwMin8Ux8: case Iop_PwMin16Ux4: case Iop_PwMin32Ux2:
+      case Iop_PwMin8Sx8: case Iop_PwMin16Sx4: case Iop_PwMin32Sx2:
+      case Iop_Mul8x8: case Iop_Mul16x4: case Iop_Mul32x2:
+      case Iop_Mul32Fx2:
+      case Iop_PolynomialMul8x8:
+      case Iop_MulHi16Sx4: case Iop_MulHi16Ux4:
+      case Iop_QDMulHi16Sx4: case Iop_QDMulHi32Sx2:
+      case Iop_QRDMulHi16Sx4: case Iop_QRDMulHi32Sx2:
+      case Iop_QAdd8Sx8: case Iop_QAdd16Sx4:
+      case Iop_QAdd32Sx2: case Iop_QAdd64Sx1:
+      case Iop_QAdd8Ux8: case Iop_QAdd16Ux4:
+      case Iop_QAdd32Ux2: case Iop_QAdd64Ux1:
+      case Iop_PwAdd8x8: case Iop_PwAdd16x4: case Iop_PwAdd32x2:
+      case Iop_PwAdd32Fx2:
+      case Iop_QNarrowBin32Sto16Sx4:
+      case Iop_QNarrowBin16Sto8Sx8: case Iop_QNarrowBin16Sto8Ux8:
+      case Iop_NarrowBin16to8x8: case Iop_NarrowBin32to16x4:
+      case Iop_Sub8x8: case Iop_Sub16x4: case Iop_Sub32x2:
+      case Iop_QSub8Sx8: case Iop_QSub16Sx4:
+      case Iop_QSub32Sx2: case Iop_QSub64Sx1:
+      case Iop_QSub8Ux8: case Iop_QSub16Ux4:
+      case Iop_QSub32Ux2: case Iop_QSub64Ux1:
+      case Iop_Shl8x8: case Iop_Shl16x4: case Iop_Shl32x2:
+      case Iop_Shr8x8: case Iop_Shr16x4: case Iop_Shr32x2:
+      case Iop_Sar8x8: case Iop_Sar16x4: case Iop_Sar32x2:
+      case Iop_Sal8x8: case Iop_Sal16x4: case Iop_Sal32x2: case Iop_Sal64x1:
+      case Iop_QShl8x8: case Iop_QShl16x4: case Iop_QShl32x2: case Iop_QShl64x1:
+      case Iop_QSal8x8: case Iop_QSal16x4: case Iop_QSal32x2: case Iop_QSal64x1:
+      case Iop_RecipStep32Fx2:
+      case Iop_RSqrtStep32Fx2:
+         BINARY(Ity_I64,Ity_I64, Ity_I64);
+
+      case Iop_ShlN32x2: case Iop_ShlN16x4: case Iop_ShlN8x8:
+      case Iop_ShrN32x2: case Iop_ShrN16x4: case Iop_ShrN8x8:
+      case Iop_SarN32x2: case Iop_SarN16x4: case Iop_SarN8x8:
+      case Iop_QShlNsatUU8x8:  case Iop_QShlNsatUU16x4:
+      case Iop_QShlNsatUU32x2: case Iop_QShlNsatUU64x1:
+      case Iop_QShlNsatSU8x8:  case Iop_QShlNsatSU16x4:
+      case Iop_QShlNsatSU32x2: case Iop_QShlNsatSU64x1:
+      case Iop_QShlNsatSS8x8:  case Iop_QShlNsatSS16x4:
+      case Iop_QShlNsatSS32x2: case Iop_QShlNsatSS64x1:
+         BINARY(Ity_I64,Ity_I8, Ity_I64);
+
+      case Iop_Shl8: case Iop_Shr8: case Iop_Sar8:
+         BINARY(Ity_I8,Ity_I8, Ity_I8);
+      case Iop_Shl16: case Iop_Shr16: case Iop_Sar16:
+         BINARY(Ity_I16,Ity_I8, Ity_I16);
+      case Iop_Shl32: case Iop_Shr32: case Iop_Sar32:
+         BINARY(Ity_I32,Ity_I8, Ity_I32);
+      case Iop_Shl64: case Iop_Shr64: case Iop_Sar64:
+         BINARY(Ity_I64,Ity_I8, Ity_I64);
+
+      case Iop_Not8:
+         UNARY(Ity_I8, Ity_I8);
+      case Iop_Not16:
+         UNARY(Ity_I16, Ity_I16);
+      case Iop_Not32:
+      case Iop_CmpNEZ16x2: case Iop_CmpNEZ8x4:
+         UNARY(Ity_I32, Ity_I32);
+
+      case Iop_Not64:
+      case Iop_CmpNEZ32x2: case Iop_CmpNEZ16x4: case Iop_CmpNEZ8x8:
+      case Iop_Cnt8x8:
+      case Iop_Clz8x8: case Iop_Clz16x4: case Iop_Clz32x2:
+      case Iop_Cls8x8: case Iop_Cls16x4: case Iop_Cls32x2:
+      case Iop_PwAddL8Ux8: case Iop_PwAddL16Ux4: case Iop_PwAddL32Ux2:
+      case Iop_PwAddL8Sx8: case Iop_PwAddL16Sx4: case Iop_PwAddL32Sx2:
+      case Iop_Reverse8sIn64_x1: case Iop_Reverse16sIn64_x1:
+      case Iop_Reverse32sIn64_x1:
+      case Iop_Reverse8sIn32_x2: case Iop_Reverse16sIn32_x2:
+      case Iop_Reverse8sIn16_x4:
+      case Iop_FtoI32Sx2_RZ: case Iop_FtoI32Ux2_RZ:
+      case Iop_I32StoFx2: case Iop_I32UtoFx2:
+      case Iop_RecipEst32Ux2: case Iop_RecipEst32Fx2:
+      case Iop_Abs32Fx2:
+      case Iop_RSqrtEst32Fx2:
+      case Iop_RSqrtEst32Ux2:
+      case Iop_Neg32Fx2:
+      case Iop_Abs8x8: case Iop_Abs16x4: case Iop_Abs32x2:
+         UNARY(Ity_I64, Ity_I64);
+
+      case Iop_CmpEQ8: case Iop_CmpNE8:
+      case Iop_CasCmpEQ8: case Iop_CasCmpNE8: case Iop_ExpCmpNE8:
+         COMPARISON(Ity_I8);
+      case Iop_CmpEQ16: case Iop_CmpNE16:
+      case Iop_CasCmpEQ16: case Iop_CasCmpNE16: case Iop_ExpCmpNE16:
+         COMPARISON(Ity_I16);
+      case Iop_CmpEQ32: case Iop_CmpNE32:
+      case Iop_CasCmpEQ32: case Iop_CasCmpNE32: case Iop_ExpCmpNE32:
+      case Iop_CmpLT32S: case Iop_CmpLE32S:
+      case Iop_CmpLT32U: case Iop_CmpLE32U:
+         COMPARISON(Ity_I32);
+      case Iop_CmpEQ64: case Iop_CmpNE64:
+      case Iop_CasCmpEQ64: case Iop_CasCmpNE64: case Iop_ExpCmpNE64:
+      case Iop_CmpLT64S: case Iop_CmpLE64S:
+      case Iop_CmpLT64U: case Iop_CmpLE64U:
+         COMPARISON(Ity_I64);
+
+      case Iop_CmpNEZ8:  UNARY_COMPARISON(Ity_I8);
+      case Iop_CmpNEZ16: UNARY_COMPARISON(Ity_I16);
+      case Iop_CmpNEZ32: UNARY_COMPARISON(Ity_I32);
+      case Iop_CmpNEZ64: UNARY_COMPARISON(Ity_I64);
+
+      case Iop_Left8:  UNARY(Ity_I8, Ity_I8);
+      case Iop_Left16: UNARY(Ity_I16,Ity_I16);
+      case Iop_CmpwNEZ32: case Iop_Left32: UNARY(Ity_I32,Ity_I32);
+      case Iop_CmpwNEZ64: case Iop_Left64: UNARY(Ity_I64,Ity_I64);
+
+      case Iop_GetMSBs8x8:  UNARY(Ity_I64, Ity_I8);
+      case Iop_GetMSBs8x16: UNARY(Ity_V128, Ity_I16);
+
+      case Iop_MullU8: case Iop_MullS8:
+         BINARY(Ity_I8,Ity_I8, Ity_I16);
+      case Iop_MullU16: case Iop_MullS16:
+         BINARY(Ity_I16,Ity_I16, Ity_I32);
+      case Iop_MullU32: case Iop_MullS32:
+         BINARY(Ity_I32,Ity_I32, Ity_I64);
+      case Iop_MullU64: case Iop_MullS64:
+         BINARY(Ity_I64,Ity_I64, Ity_I128);
+
+      case Iop_Clz32: case Iop_Ctz32:
+         UNARY(Ity_I32, Ity_I32);
+
+      case Iop_Clz64: case Iop_Ctz64:
+         UNARY(Ity_I64, Ity_I64);
+
+      case Iop_DivU32: case Iop_DivS32: case Iop_DivU32E: case Iop_DivS32E:
+         BINARY(Ity_I32,Ity_I32, Ity_I32);
+
+      case Iop_DivU64: case Iop_DivS64: case Iop_DivS64E: case Iop_DivU64E:
+         BINARY(Ity_I64,Ity_I64, Ity_I64);
+
+      case Iop_DivModU64to32: case Iop_DivModS64to32:
+         BINARY(Ity_I64,Ity_I32, Ity_I64);
+
+      case Iop_DivModU128to64: case Iop_DivModS128to64:
+         BINARY(Ity_I128,Ity_I64, Ity_I128);
+
+      case Iop_DivModS64to64:
+         BINARY(Ity_I64,Ity_I64, Ity_I128);
+
+      case Iop_16HIto8: case Iop_16to8:
+         UNARY(Ity_I16, Ity_I8);
+      case Iop_8HLto16:
+         BINARY(Ity_I8,Ity_I8, Ity_I16);
+
+      case Iop_32HIto16: case Iop_32to16:
+         UNARY(Ity_I32, Ity_I16);
+      case Iop_16HLto32:
+         BINARY(Ity_I16,Ity_I16, Ity_I32);
+
+      case Iop_64HIto32: case Iop_64to32:
+         UNARY(Ity_I64, Ity_I32);
+      case Iop_32HLto64:
+         BINARY(Ity_I32,Ity_I32, Ity_I64);
+
+      case Iop_128HIto64: case Iop_128to64:
+         UNARY(Ity_I128, Ity_I64);
+      case Iop_64HLto128:
+         BINARY(Ity_I64,Ity_I64, Ity_I128);
+
+      case Iop_Not1:   UNARY(Ity_I1, Ity_I1);
+      case Iop_1Uto8:  UNARY(Ity_I1, Ity_I8);
+      case Iop_1Sto8:  UNARY(Ity_I1, Ity_I8);
+      case Iop_1Sto16: UNARY(Ity_I1, Ity_I16);
+      case Iop_1Uto32: case Iop_1Sto32: UNARY(Ity_I1, Ity_I32);
+      case Iop_1Sto64: case Iop_1Uto64: UNARY(Ity_I1, Ity_I64);
+      case Iop_32to1:  UNARY(Ity_I32, Ity_I1);
+      case Iop_64to1:  UNARY(Ity_I64, Ity_I1);
+
+      case Iop_8Uto32: case Iop_8Sto32:
+         UNARY(Ity_I8, Ity_I32);
+
+      case Iop_8Uto16: case Iop_8Sto16:
+         UNARY(Ity_I8, Ity_I16);
+
+      case Iop_16Uto32: case Iop_16Sto32: 
+         UNARY(Ity_I16, Ity_I32);
+
+      case Iop_32Sto64: case Iop_32Uto64:
+         UNARY(Ity_I32, Ity_I64);
+
+      case Iop_8Uto64: case Iop_8Sto64:
+         UNARY(Ity_I8, Ity_I64);
+
+      case Iop_16Uto64: case Iop_16Sto64:
+         UNARY(Ity_I16, Ity_I64);
+      case Iop_64to16:
+         UNARY(Ity_I64, Ity_I16);
+
+      case Iop_32to8: UNARY(Ity_I32, Ity_I8);
+      case Iop_64to8: UNARY(Ity_I64, Ity_I8);
+
+      case Iop_AddF64:    case Iop_SubF64: 
+      case Iop_MulF64:    case Iop_DivF64:
+      case Iop_AddF64r32: case Iop_SubF64r32: 
+      case Iop_MulF64r32: case Iop_DivF64r32:
+         TERNARY(ity_RMode,Ity_F64,Ity_F64, Ity_F64);
+
+      case Iop_AddF32: case Iop_SubF32:
+      case Iop_MulF32: case Iop_DivF32:
+         TERNARY(ity_RMode,Ity_F32,Ity_F32, Ity_F32);
+
+      case Iop_NegF64: case Iop_AbsF64: 
+         UNARY(Ity_F64, Ity_F64);
+
+      case Iop_NegF32: case Iop_AbsF32:
+         UNARY(Ity_F32, Ity_F32);
+
+      case Iop_SqrtF64:
+      case Iop_RecpExpF64:
+         BINARY(ity_RMode,Ity_F64, Ity_F64);
+
+      case Iop_SqrtF32:
+      case Iop_RoundF32toInt:
+      case Iop_RecpExpF32:
+         BINARY(ity_RMode,Ity_F32, Ity_F32);
+
+      case Iop_CmpF32:
+         BINARY(Ity_F32,Ity_F32, Ity_I32);
+
+      case Iop_CmpF64:
+         BINARY(Ity_F64,Ity_F64, Ity_I32);
+
+      case Iop_CmpF128:
+         BINARY(Ity_F128,Ity_F128, Ity_I32);
+
+      case Iop_F64toI16S: BINARY(ity_RMode,Ity_F64, Ity_I16);
+      case Iop_F64toI32S: BINARY(ity_RMode,Ity_F64, Ity_I32);
+      case Iop_F64toI64S: case Iop_F64toI64U:
+         BINARY(ity_RMode,Ity_F64, Ity_I64);
+
+      case Iop_F64toI32U: BINARY(ity_RMode,Ity_F64, Ity_I32);
+
+      case Iop_I32StoF64: UNARY(Ity_I32, Ity_F64);
+      case Iop_I64StoF64: BINARY(ity_RMode,Ity_I64, Ity_F64);
+      case Iop_I64UtoF64: BINARY(ity_RMode,Ity_I64, Ity_F64);
+      case Iop_I64UtoF32: BINARY(ity_RMode,Ity_I64, Ity_F32);
+
+      case Iop_I32UtoF64: UNARY(Ity_I32, Ity_F64);
+
+      case Iop_F32toI32S: BINARY(ity_RMode,Ity_F32, Ity_I32);
+      case Iop_F32toI64S: BINARY(ity_RMode,Ity_F32, Ity_I64);
+      case Iop_F32toI32U: BINARY(ity_RMode,Ity_F32, Ity_I32);
+      case Iop_F32toI64U: BINARY(ity_RMode,Ity_F32, Ity_I64);
+
+      case Iop_I32UtoF32: BINARY(ity_RMode,Ity_I32, Ity_F32);
+      case Iop_I32StoF32: BINARY(ity_RMode,Ity_I32, Ity_F32);
+      case Iop_I64StoF32: BINARY(ity_RMode,Ity_I64, Ity_F32);
+
+      case Iop_F32toF64: UNARY(Ity_F32, Ity_F64);
+      case Iop_F16toF64: UNARY(Ity_F16, Ity_F64);
+      case Iop_F16toF32: UNARY(Ity_F16, Ity_F32);
+
+      case Iop_F64toF32: BINARY(ity_RMode,Ity_F64, Ity_F32);
+      case Iop_F64toF16: BINARY(ity_RMode,Ity_F64, Ity_F16);
+      case Iop_F32toF16: BINARY(ity_RMode,Ity_F32, Ity_F16);
+
+      case Iop_ReinterpI64asF64: UNARY(Ity_I64, Ity_F64);
+      case Iop_ReinterpF64asI64: UNARY(Ity_F64, Ity_I64);
+      case Iop_ReinterpI32asF32: UNARY(Ity_I32, Ity_F32);
+      case Iop_ReinterpF32asI32: UNARY(Ity_F32, Ity_I32);
+
+      case Iop_AtanF64: case Iop_Yl2xF64:  case Iop_Yl2xp1F64: 
+      case Iop_ScaleF64: case Iop_PRemF64: case Iop_PRem1F64:
+         TERNARY(ity_RMode,Ity_F64,Ity_F64, Ity_F64);
+
+      case Iop_PRemC3210F64: case Iop_PRem1C3210F64:
+         TERNARY(ity_RMode,Ity_F64,Ity_F64, Ity_I32);
+
+      case Iop_SinF64: case Iop_CosF64: case Iop_TanF64: 
+      case Iop_2xm1F64:
+      case Iop_RoundF64toInt: BINARY(ity_RMode,Ity_F64, Ity_F64);
+
+      case Iop_MAddF64: case Iop_MSubF64:
+      case Iop_MAddF64r32: case Iop_MSubF64r32:
+         QUATERNARY(ity_RMode,Ity_F64,Ity_F64,Ity_F64, Ity_F64);
+
+      case Iop_RSqrtEst5GoodF64:
+      case Iop_RoundF64toF64_NEAREST: case Iop_RoundF64toF64_NegINF:
+      case Iop_RoundF64toF64_PosINF: case Iop_RoundF64toF64_ZERO:
+         UNARY(Ity_F64, Ity_F64);
+      case Iop_RoundF64toF32:
+         BINARY(ity_RMode,Ity_F64, Ity_F64);
+      case Iop_TruncF64asF32:
+         UNARY(Ity_F64, Ity_F32);
+
+      case Iop_I32UtoFx4:
+      case Iop_I32StoFx4:
+      case Iop_QFtoI32Ux4_RZ:
+      case Iop_QFtoI32Sx4_RZ:
+      case Iop_FtoI32Ux4_RZ:
+      case Iop_FtoI32Sx4_RZ:
+      case Iop_RoundF32x4_RM:
+      case Iop_RoundF32x4_RP:
+      case Iop_RoundF32x4_RN:
+      case Iop_RoundF32x4_RZ:
+      case Iop_Abs64Fx2: case Iop_Abs32Fx4:
+      case Iop_RSqrtEst32Fx4:
+      case Iop_RSqrtEst32Ux4:
+         UNARY(Ity_V128, Ity_V128);
+
+      case Iop_Sqrt64Fx2:
+      case Iop_Sqrt32Fx4:
+         BINARY(ity_RMode,Ity_V128, Ity_V128);
+
+      case Iop_64HLtoV128:
+         BINARY(Ity_I64,Ity_I64, Ity_V128);
+
+      case Iop_V128to64: case Iop_V128HIto64:
+      case Iop_NarrowUn16to8x8:
+      case Iop_NarrowUn32to16x4:
+      case Iop_NarrowUn64to32x2:
+      case Iop_QNarrowUn16Uto8Ux8:
+      case Iop_QNarrowUn32Uto16Ux4:
+      case Iop_QNarrowUn64Uto32Ux2:
+      case Iop_QNarrowUn16Sto8Sx8:
+      case Iop_QNarrowUn32Sto16Sx4:
+      case Iop_QNarrowUn64Sto32Sx2:
+      case Iop_QNarrowUn16Sto8Ux8:
+      case Iop_QNarrowUn32Sto16Ux4:
+      case Iop_QNarrowUn64Sto32Ux2:
+      case Iop_F32toF16x4:
+         UNARY(Ity_V128, Ity_I64);
+
+      case Iop_Widen8Uto16x8:
+      case Iop_Widen16Uto32x4:
+      case Iop_Widen32Uto64x2:
+      case Iop_Widen8Sto16x8:
+      case Iop_Widen16Sto32x4:
+      case Iop_Widen32Sto64x2:
+      case Iop_F16toF32x4:
+         UNARY(Ity_I64, Ity_V128);
+
+      case Iop_V128to32:    UNARY(Ity_V128, Ity_I32);
+      case Iop_32UtoV128:   UNARY(Ity_I32, Ity_V128);
+      case Iop_64UtoV128:   UNARY(Ity_I64, Ity_V128);
+      case Iop_SetV128lo32: BINARY(Ity_V128,Ity_I32, Ity_V128);
+      case Iop_SetV128lo64: BINARY(Ity_V128,Ity_I64, Ity_V128);
+
+      case Iop_Dup8x16: UNARY(Ity_I8, Ity_V128);
+      case Iop_Dup16x8: UNARY(Ity_I16, Ity_V128);
+      case Iop_Dup32x4: UNARY(Ity_I32, Ity_V128);
+      case Iop_Dup8x8:  UNARY(Ity_I8, Ity_I64);
+      case Iop_Dup16x4: UNARY(Ity_I16, Ity_I64);
+      case Iop_Dup32x2: UNARY(Ity_I32, Ity_I64);
+
+      case Iop_CmpEQ32Fx4: case Iop_CmpLT32Fx4:
+      case Iop_CmpEQ64Fx2: case Iop_CmpLT64Fx2:
+      case Iop_CmpLE32Fx4: case Iop_CmpUN32Fx4:
+      case Iop_CmpLE64Fx2: case Iop_CmpUN64Fx2:
+      case Iop_CmpGT32Fx4: case Iop_CmpGE32Fx4:
+      case Iop_CmpEQ32F0x4: case Iop_CmpLT32F0x4:
+      case Iop_CmpEQ64F0x2: case Iop_CmpLT64F0x2:
+      case Iop_CmpLE32F0x4: case Iop_CmpUN32F0x4:
+      case Iop_CmpLE64F0x2: case Iop_CmpUN64F0x2:
+      case Iop_Add32F0x4:
+      case Iop_Add64F0x2:
+      case Iop_Div32F0x4:
+      case Iop_Div64F0x2:
+      case Iop_Max32Fx4: case Iop_Max32F0x4:
+      case Iop_PwMax32Fx4: case Iop_PwMin32Fx4:
+      case Iop_Max64Fx2: case Iop_Max64F0x2:
+      case Iop_Min32Fx4: case Iop_Min32F0x4:
+      case Iop_Min64Fx2: case Iop_Min64F0x2:
+      case Iop_Mul32F0x4:
+      case Iop_Mul64F0x2:
+      case Iop_Sub32F0x4:
+      case Iop_Sub64F0x2:
+      case Iop_AndV128: case Iop_OrV128: case Iop_XorV128:
+      case Iop_Add8x16:   case Iop_Add16x8:   
+      case Iop_Add32x4:   case Iop_Add64x2:
+      case Iop_QAdd8Ux16: case Iop_QAdd16Ux8:
+      case Iop_QAdd32Ux4: case Iop_QAdd64Ux2:
+      case Iop_QAdd8Sx16: case Iop_QAdd16Sx8:
+      case Iop_QAdd32Sx4: case Iop_QAdd64Sx2:
+      case Iop_QAddExtUSsatSS8x16: case Iop_QAddExtUSsatSS16x8:
+      case Iop_QAddExtUSsatSS32x4: case Iop_QAddExtUSsatSS64x2:
+      case Iop_QAddExtSUsatUU8x16: case Iop_QAddExtSUsatUU16x8:
+      case Iop_QAddExtSUsatUU32x4: case Iop_QAddExtSUsatUU64x2:
+      case Iop_PwAdd8x16: case Iop_PwAdd16x8: case Iop_PwAdd32x4:
+      case Iop_Sub8x16:   case Iop_Sub16x8:
+      case Iop_Sub32x4:   case Iop_Sub64x2:
+      case Iop_QSub8Ux16: case Iop_QSub16Ux8:
+      case Iop_QSub32Ux4: case Iop_QSub64Ux2:
+      case Iop_QSub8Sx16: case Iop_QSub16Sx8:
+      case Iop_QSub32Sx4: case Iop_QSub64Sx2:
+      case Iop_Mul8x16: case Iop_Mul16x8: case Iop_Mul32x4:
+      case Iop_PolynomialMul8x16:
+      case Iop_PolynomialMulAdd8x16: case Iop_PolynomialMulAdd16x8:
+      case Iop_PolynomialMulAdd32x4: case Iop_PolynomialMulAdd64x2:
+      case Iop_MulHi16Ux8: case Iop_MulHi32Ux4: 
+      case Iop_MulHi16Sx8: case Iop_MulHi32Sx4: 
+      case Iop_QDMulHi16Sx8: case Iop_QDMulHi32Sx4:
+      case Iop_QRDMulHi16Sx8: case Iop_QRDMulHi32Sx4:
+      case Iop_MullEven8Ux16: case Iop_MullEven16Ux8: case Iop_MullEven32Ux4:
+      case Iop_MullEven8Sx16: case Iop_MullEven16Sx8: case Iop_MullEven32Sx4:
+      case Iop_Avg8Ux16: case Iop_Avg16Ux8: case Iop_Avg32Ux4:
+      case Iop_Avg8Sx16: case Iop_Avg16Sx8: case Iop_Avg32Sx4:
+      case Iop_Max8Sx16: case Iop_Max16Sx8: case Iop_Max32Sx4:
+      case Iop_Max64Sx2:
+      case Iop_Max8Ux16: case Iop_Max16Ux8: case Iop_Max32Ux4:
+      case Iop_Max64Ux2:
+      case Iop_Min8Sx16: case Iop_Min16Sx8: case Iop_Min32Sx4:
+      case Iop_Min64Sx2:
+      case Iop_Min8Ux16: case Iop_Min16Ux8: case Iop_Min32Ux4:
+      case Iop_Min64Ux2:
+      case Iop_CmpEQ8x16:  case Iop_CmpEQ16x8:  case Iop_CmpEQ32x4:
+      case Iop_CmpEQ64x2:
+      case Iop_CmpGT8Sx16: case Iop_CmpGT16Sx8: case Iop_CmpGT32Sx4:
+      case Iop_CmpGT64Sx2:
+      case Iop_CmpGT8Ux16: case Iop_CmpGT16Ux8: case Iop_CmpGT32Ux4:
+      case Iop_CmpGT64Ux2:
+      case Iop_Shl8x16: case Iop_Shl16x8: case Iop_Shl32x4: case Iop_Shl64x2:
+      case Iop_QShl8x16: case Iop_QShl16x8:
+      case Iop_QShl32x4: case Iop_QShl64x2:
+      case Iop_QSal8x16: case Iop_QSal16x8:
+      case Iop_QSal32x4: case Iop_QSal64x2:
+      case Iop_Shr8x16: case Iop_Shr16x8: case Iop_Shr32x4: case Iop_Shr64x2:
+      case Iop_Sar8x16: case Iop_Sar16x8: case Iop_Sar32x4: case Iop_Sar64x2:
+      case Iop_Sal8x16: case Iop_Sal16x8: case Iop_Sal32x4: case Iop_Sal64x2:
+      case Iop_Rol8x16: case Iop_Rol16x8: case Iop_Rol32x4:case Iop_Rol64x2:
+      case Iop_QNarrowBin16Sto8Ux16: case Iop_QNarrowBin32Sto16Ux8:
+      case Iop_QNarrowBin16Sto8Sx16: case Iop_QNarrowBin32Sto16Sx8:
+      case Iop_QNarrowBin16Uto8Ux16: case Iop_QNarrowBin32Uto16Ux8:
+      case Iop_QNarrowBin64Sto32Sx4: case Iop_QNarrowBin64Uto32Ux4:
+      case Iop_NarrowBin16to8x16:   case Iop_NarrowBin32to16x8:
+      case Iop_NarrowBin64to32x4:
+      case Iop_InterleaveHI8x16: case Iop_InterleaveHI16x8:
+      case Iop_InterleaveHI32x4: case Iop_InterleaveHI64x2:
+      case Iop_InterleaveLO8x16: case Iop_InterleaveLO16x8:
+      case Iop_InterleaveLO32x4: case Iop_InterleaveLO64x2:
+      case Iop_CatOddLanes8x16: case Iop_CatEvenLanes8x16:
+      case Iop_CatOddLanes16x8: case Iop_CatEvenLanes16x8:
+      case Iop_CatOddLanes32x4: case Iop_CatEvenLanes32x4:
+      case Iop_InterleaveOddLanes8x16: case Iop_InterleaveEvenLanes8x16:
+      case Iop_InterleaveOddLanes16x8: case Iop_InterleaveEvenLanes16x8:
+      case Iop_InterleaveOddLanes32x4: case Iop_InterleaveEvenLanes32x4:
+      case Iop_Perm8x16: case Iop_Perm32x4:
+      case Iop_RecipStep32Fx4: case Iop_RecipStep64Fx2:
+      case Iop_RSqrtStep32Fx4: case Iop_RSqrtStep64Fx2:
+      case Iop_CipherV128:
+      case Iop_CipherLV128:
+      case Iop_NCipherV128:
+      case Iop_NCipherLV128:
+      case Iop_Sh8Sx16: case Iop_Sh16Sx8:
+      case Iop_Sh32Sx4: case Iop_Sh64Sx2:
+      case Iop_Sh8Ux16: case Iop_Sh16Ux8:
+      case Iop_Sh32Ux4: case Iop_Sh64Ux2:
+      case Iop_Rsh8Sx16: case Iop_Rsh16Sx8:
+      case Iop_Rsh32Sx4: case Iop_Rsh64Sx2:
+      case Iop_Rsh8Ux16: case Iop_Rsh16Ux8:
+      case Iop_Rsh32Ux4: case Iop_Rsh64Ux2:
+         BINARY(Ity_V128,Ity_V128, Ity_V128);
+
+      case Iop_PolynomialMull8x8:
+      case Iop_Mull8Ux8: case Iop_Mull8Sx8:
+      case Iop_Mull16Ux4: case Iop_Mull16Sx4:
+      case Iop_Mull32Ux2: case Iop_Mull32Sx2:
+         BINARY(Ity_I64, Ity_I64, Ity_V128);
+
+      case Iop_NotV128:
+      case Iop_RecipEst32Fx4: case Iop_RecipEst32F0x4:
+      case Iop_RecipEst64Fx2: case Iop_RSqrtEst64Fx2:
+      case Iop_RecipEst32Ux4:
+      case Iop_RSqrtEst32F0x4:
+      case Iop_Sqrt32F0x4:
+      case Iop_Sqrt64F0x2:
+      case Iop_CmpNEZ8x16: case Iop_CmpNEZ16x8:
+      case Iop_CmpNEZ32x4: case Iop_CmpNEZ64x2:
+      case Iop_Cnt8x16:
+      case Iop_Clz8x16: case Iop_Clz16x8: case Iop_Clz32x4: case Iop_Clz64x2:
+      case Iop_Cls8x16: case Iop_Cls16x8: case Iop_Cls32x4:
+      case Iop_PwAddL8Ux16: case Iop_PwAddL16Ux8: case Iop_PwAddL32Ux4:
+      case Iop_PwAddL8Sx16: case Iop_PwAddL16Sx8: case Iop_PwAddL32Sx4:
+      case Iop_Reverse8sIn64_x2: case Iop_Reverse16sIn64_x2:
+      case Iop_Reverse32sIn64_x2:
+      case Iop_Reverse8sIn32_x4: case Iop_Reverse16sIn32_x4:
+      case Iop_Reverse8sIn16_x8:
+      case Iop_Reverse1sIn8_x16:
+      case Iop_Neg64Fx2: case Iop_Neg32Fx4:
+      case Iop_Abs8x16: case Iop_Abs16x8: case Iop_Abs32x4: case Iop_Abs64x2:
+      case Iop_CipherSV128:
+      case Iop_PwBitMtxXpose64x2:
+      case Iop_ZeroHI64ofV128:  case Iop_ZeroHI96ofV128:
+      case Iop_ZeroHI112ofV128: case Iop_ZeroHI120ofV128:
+         UNARY(Ity_V128, Ity_V128);
+
+      case Iop_ShlV128: case Iop_ShrV128:
+      case Iop_ShlN8x16: case Iop_ShlN16x8: 
+      case Iop_ShlN32x4: case Iop_ShlN64x2:
+      case Iop_ShrN8x16: case Iop_ShrN16x8: 
+      case Iop_ShrN32x4: case Iop_ShrN64x2:
+      case Iop_SarN8x16: case Iop_SarN16x8:
+      case Iop_SarN32x4: case Iop_SarN64x2:
+      case Iop_QShlNsatUU8x16: case Iop_QShlNsatUU16x8:
+      case Iop_QShlNsatUU32x4: case Iop_QShlNsatUU64x2:
+      case Iop_QShlNsatSU8x16: case Iop_QShlNsatSU16x8:
+      case Iop_QShlNsatSU32x4: case Iop_QShlNsatSU64x2:
+      case Iop_QShlNsatSS8x16: case Iop_QShlNsatSS16x8:
+      case Iop_QShlNsatSS32x4: case Iop_QShlNsatSS64x2:
+      case Iop_SHA256:    case Iop_SHA512:
+      case Iop_QandQShrNnarrow16Uto8Ux8:
+      case Iop_QandQShrNnarrow32Uto16Ux4:
+      case Iop_QandQShrNnarrow64Uto32Ux2:
+      case Iop_QandQSarNnarrow16Sto8Sx8:
+      case Iop_QandQSarNnarrow32Sto16Sx4:
+      case Iop_QandQSarNnarrow64Sto32Sx2:
+      case Iop_QandQSarNnarrow16Sto8Ux8:
+      case Iop_QandQSarNnarrow32Sto16Ux4:
+      case Iop_QandQSarNnarrow64Sto32Ux2:
+      case Iop_QandQRShrNnarrow16Uto8Ux8:
+      case Iop_QandQRShrNnarrow32Uto16Ux4:
+      case Iop_QandQRShrNnarrow64Uto32Ux2:
+      case Iop_QandQRSarNnarrow16Sto8Sx8:
+      case Iop_QandQRSarNnarrow32Sto16Sx4:
+      case Iop_QandQRSarNnarrow64Sto32Sx2:
+      case Iop_QandQRSarNnarrow16Sto8Ux8:
+      case Iop_QandQRSarNnarrow32Sto16Ux4:
+      case Iop_QandQRSarNnarrow64Sto32Ux2:
+         BINARY(Ity_V128,Ity_I8, Ity_V128);
+
+      case Iop_F32ToFixed32Ux4_RZ:
+      case Iop_F32ToFixed32Sx4_RZ:
+      case Iop_Fixed32UToF32x4_RN:
+      case Iop_Fixed32SToF32x4_RN:
+         BINARY(Ity_V128, Ity_I8, Ity_V128);
+
+      case Iop_F32ToFixed32Ux2_RZ:
+      case Iop_F32ToFixed32Sx2_RZ:
+      case Iop_Fixed32UToF32x2_RN:
+      case Iop_Fixed32SToF32x2_RN:
+         BINARY(Ity_I64, Ity_I8, Ity_I64);
+
+      case Iop_GetElem8x16:
+         BINARY(Ity_V128, Ity_I8, Ity_I8);
+      case Iop_GetElem16x8:
+         BINARY(Ity_V128, Ity_I8, Ity_I16);
+      case Iop_GetElem32x4:
+         BINARY(Ity_V128, Ity_I8, Ity_I32);
+      case Iop_GetElem64x2:
+         BINARY(Ity_V128, Ity_I8, Ity_I64);
+      case Iop_GetElem8x8:
+         BINARY(Ity_I64, Ity_I8, Ity_I8);
+      case Iop_GetElem16x4:
+         BINARY(Ity_I64, Ity_I8, Ity_I16);
+      case Iop_GetElem32x2:
+         BINARY(Ity_I64, Ity_I8, Ity_I32);
+      case Iop_SetElem8x8:
+         TERNARY(Ity_I64, Ity_I8, Ity_I8, Ity_I64);
+      case Iop_SetElem16x4:
+         TERNARY(Ity_I64, Ity_I8, Ity_I16, Ity_I64);
+      case Iop_SetElem32x2:
+         TERNARY(Ity_I64, Ity_I8, Ity_I32, Ity_I64);
+
+      case Iop_Slice64:
+         TERNARY(Ity_I64, Ity_I64, Ity_I8, Ity_I64);
+      case Iop_SliceV128:
+         TERNARY(Ity_V128, Ity_V128, Ity_I8, Ity_V128);
+
+      case Iop_BCDAdd:
+      case Iop_BCDSub:
+         TERNARY(Ity_V128,Ity_V128, Ity_I8, Ity_V128);
+      case Iop_QDMull16Sx4: case Iop_QDMull32Sx2:
+         BINARY(Ity_I64, Ity_I64, Ity_V128);
+
+      /* s390 specific */
+      case Iop_MAddF32:
+      case Iop_MSubF32:
+         QUATERNARY(ity_RMode,Ity_F32,Ity_F32,Ity_F32, Ity_F32);
+
+      case Iop_F64HLtoF128:
+        BINARY(Ity_F64,Ity_F64, Ity_F128);
+
+      case Iop_F128HItoF64:
+      case Iop_F128LOtoF64:
+        UNARY(Ity_F128, Ity_F64);
+
+      case Iop_AddF128:
+      case Iop_SubF128:
+      case Iop_MulF128:
+      case Iop_DivF128:
+         TERNARY(ity_RMode,Ity_F128,Ity_F128, Ity_F128);
+
+      case Iop_Add64Fx2: case Iop_Sub64Fx2:
+      case Iop_Mul64Fx2: case Iop_Div64Fx2: 
+      case Iop_Add32Fx4: case Iop_Sub32Fx4:
+      case Iop_Mul32Fx4: case Iop_Div32Fx4: 
+         TERNARY(ity_RMode,Ity_V128,Ity_V128, Ity_V128);
+
+      case Iop_Add64Fx4: case Iop_Sub64Fx4:
+      case Iop_Mul64Fx4: case Iop_Div64Fx4:
+      case Iop_Add32Fx8: case Iop_Sub32Fx8:
+      case Iop_Mul32Fx8: case Iop_Div32Fx8:
+         TERNARY(ity_RMode,Ity_V256,Ity_V256, Ity_V256);
+
+      case Iop_NegF128:
+      case Iop_AbsF128:
+         UNARY(Ity_F128, Ity_F128);
+
+      case Iop_SqrtF128:
+         BINARY(ity_RMode,Ity_F128, Ity_F128);
+
+      case Iop_I32StoF128: UNARY(Ity_I32, Ity_F128);
+      case Iop_I64StoF128: UNARY(Ity_I64, Ity_F128);
+
+      case Iop_I32UtoF128: UNARY(Ity_I32, Ity_F128);
+      case Iop_I64UtoF128: UNARY(Ity_I64, Ity_F128);
+
+      case Iop_F128toI32S: BINARY(ity_RMode,Ity_F128, Ity_I32);
+      case Iop_F128toI64S: BINARY(ity_RMode,Ity_F128, Ity_I64);
+
+      case Iop_F128toI32U: BINARY(ity_RMode,Ity_F128, Ity_I32);
+      case Iop_F128toI64U: BINARY(ity_RMode,Ity_F128, Ity_I64);
+
+      case Iop_F32toF128: UNARY(Ity_F32, Ity_F128);
+      case Iop_F64toF128: UNARY(Ity_F64, Ity_F128);
+
+      case Iop_F128toF32: BINARY(ity_RMode,Ity_F128, Ity_F32);
+      case Iop_F128toF64: BINARY(ity_RMode,Ity_F128, Ity_F64);
+
+      case Iop_D32toD64:
+         UNARY(Ity_D32, Ity_D64);
+
+      case Iop_ExtractExpD64:
+         UNARY(Ity_D64, Ity_I64);
+
+      case Iop_ExtractSigD64:
+         UNARY(Ity_D64, Ity_I64);
+
+      case Iop_InsertExpD64:
+         BINARY(Ity_I64,Ity_D64, Ity_D64);
+
+      case Iop_ExtractExpD128:
+         UNARY(Ity_D128, Ity_I64);
+
+      case Iop_ExtractSigD128:
+        UNARY(Ity_D128, Ity_I64);
+
+      case Iop_InsertExpD128:
+         BINARY(Ity_I64,Ity_D128, Ity_D128);
+
+      case Iop_D64toD128:
+         UNARY(Ity_D64, Ity_D128);
+
+      case Iop_ReinterpD64asI64:
+	UNARY(Ity_D64, Ity_I64);
+
+      case Iop_ReinterpI64asD64:
+         UNARY(Ity_I64, Ity_D64);
+
+      case Iop_RoundD64toInt:
+         BINARY(ity_RMode,Ity_D64, Ity_D64);
+
+      case Iop_RoundD128toInt:
+         BINARY(ity_RMode,Ity_D128, Ity_D128);
+
+      case Iop_I32StoD128:
+      case Iop_I32UtoD128:
+         UNARY(Ity_I32, Ity_D128);
+
+      case Iop_I64StoD128:
+         UNARY(Ity_I64, Ity_D128);
+
+      case Iop_I64UtoD128:
+         UNARY(Ity_I64, Ity_D128);
+
+      case Iop_DPBtoBCD:
+      case Iop_BCDtoDPB:
+         UNARY(Ity_I64, Ity_I64);
+
+      case Iop_D128HItoD64:
+      case Iop_D128LOtoD64:
+         UNARY(Ity_D128, Ity_D64);
+
+      case Iop_D128toI64S:
+         BINARY(ity_RMode, Ity_D128, Ity_I64);
+
+      case Iop_D128toI64U:
+         BINARY(ity_RMode, Ity_D128, Ity_I64);
+
+      case Iop_D128toI32S:
+      case Iop_D128toI32U:
+         BINARY(ity_RMode, Ity_D128, Ity_I32);
+
+      case Iop_D64HLtoD128:
+         BINARY(Ity_D64, Ity_D64, Ity_D128);
+
+      case Iop_ShlD64:
+      case Iop_ShrD64:
+         BINARY(Ity_D64, Ity_I8, Ity_D64 );
+
+      case Iop_D64toD32:
+         BINARY(ity_RMode, Ity_D64, Ity_D32);
+
+      case Iop_D64toI32S:
+      case Iop_D64toI32U:
+         BINARY(ity_RMode, Ity_D64, Ity_I32);
+
+      case Iop_D64toI64S:
+         BINARY(ity_RMode, Ity_D64, Ity_I64);
+
+      case Iop_D64toI64U:
+         BINARY(ity_RMode, Ity_D64, Ity_I64);
+
+      case Iop_I32StoD64:
+      case Iop_I32UtoD64:
+         UNARY(Ity_I32, Ity_D64);
+
+      case Iop_I64StoD64:
+         BINARY(ity_RMode, Ity_I64, Ity_D64);
+
+      case Iop_I64UtoD64:
+         BINARY(ity_RMode, Ity_I64, Ity_D64);
+
+      case Iop_F32toD32:
+         BINARY(ity_RMode, Ity_F32, Ity_D32);
+
+      case Iop_F32toD64:
+         BINARY(ity_RMode, Ity_F32, Ity_D64);
+
+      case Iop_F32toD128:
+         BINARY(ity_RMode, Ity_F32, Ity_D128);
+
+      case Iop_F64toD32:
+         BINARY(ity_RMode, Ity_F64, Ity_D32);
+
+      case Iop_F64toD64:
+         BINARY(ity_RMode, Ity_F64, Ity_D64);
+
+      case Iop_F64toD128:
+         BINARY(ity_RMode, Ity_F64, Ity_D128);
+
+      case Iop_F128toD32:
+         BINARY(ity_RMode, Ity_F128, Ity_D32);
+
+      case Iop_F128toD64:
+         BINARY(ity_RMode, Ity_F128, Ity_D64);
+
+      case Iop_F128toD128:
+         BINARY(ity_RMode, Ity_F128, Ity_D128);
+
+      case Iop_D32toF32:
+         BINARY(ity_RMode, Ity_D32, Ity_F32);
+
+      case Iop_D32toF64:
+         BINARY(ity_RMode, Ity_D32, Ity_F64);
+
+      case Iop_D32toF128:
+         BINARY(ity_RMode, Ity_D32, Ity_F128);
+
+      case Iop_D64toF32:
+         BINARY(ity_RMode, Ity_D64, Ity_F32);
+
+      case Iop_D64toF64:
+         BINARY(ity_RMode, Ity_D64, Ity_F64);
+
+      case Iop_D64toF128:
+         BINARY(ity_RMode, Ity_D64, Ity_F128);
+
+      case Iop_D128toF32:
+         BINARY(ity_RMode, Ity_D128, Ity_F32);
+
+      case Iop_D128toF64:
+         BINARY(ity_RMode, Ity_D128, Ity_F64);
+
+      case Iop_D128toF128:
+         BINARY(ity_RMode, Ity_D128, Ity_F128);
+
+      case Iop_CmpD64:
+      case Iop_CmpExpD64:
+         BINARY(Ity_D64,Ity_D64, Ity_I32);
+
+      case Iop_CmpD128:
+      case Iop_CmpExpD128:
+         BINARY(Ity_D128,Ity_D128, Ity_I32);
+
+      case Iop_QuantizeD64:
+         TERNARY(ity_RMode,Ity_D64,Ity_D64, Ity_D64);
+
+      case Iop_SignificanceRoundD64:
+         TERNARY(ity_RMode, Ity_I8,Ity_D64, Ity_D64);
+
+      case Iop_QuantizeD128:
+         TERNARY(ity_RMode,Ity_D128,Ity_D128, Ity_D128);
+
+      case Iop_SignificanceRoundD128:
+         TERNARY(ity_RMode, Ity_I8,Ity_D128, Ity_D128);
+
+      case Iop_ShlD128:
+      case Iop_ShrD128:
+         BINARY(Ity_D128, Ity_I8, Ity_D128 );
+
+      case Iop_AddD64:
+      case Iop_SubD64:
+      case Iop_MulD64:
+      case Iop_DivD64:
+         TERNARY( ity_RMode, Ity_D64, Ity_D64, Ity_D64 );
+
+      case Iop_D128toD64:
+         BINARY( ity_RMode, Ity_D128, Ity_D64 );
+
+      case Iop_AddD128:
+      case Iop_SubD128:
+      case Iop_MulD128:
+      case Iop_DivD128:
+         TERNARY(ity_RMode,Ity_D128,Ity_D128, Ity_D128);
+
+      case Iop_V256to64_0: case Iop_V256to64_1:
+      case Iop_V256to64_2: case Iop_V256to64_3:
+         UNARY(Ity_V256, Ity_I64);
+
+      case Iop_64x4toV256:
+         QUATERNARY(Ity_I64, Ity_I64, Ity_I64, Ity_I64, Ity_V256);
+
+      case Iop_AndV256:  case Iop_OrV256:
+      case Iop_XorV256:
+      case Iop_Max32Fx8: case Iop_Min32Fx8:
+      case Iop_Max64Fx4: case Iop_Min64Fx4:
+      case Iop_Add8x32:  case Iop_Add16x16:
+      case Iop_Add32x8:  case Iop_Add64x4:
+      case Iop_Sub8x32:  case Iop_Sub16x16:
+      case Iop_Sub32x8:  case Iop_Sub64x4:
+      case Iop_Mul16x16: case Iop_Mul32x8:
+      case Iop_MulHi16Ux16: case Iop_MulHi16Sx16:
+      case Iop_Avg8Ux32: case Iop_Avg16Ux16:
+      case Iop_Max8Sx32: case Iop_Max16Sx16: case Iop_Max32Sx8:
+      case Iop_Max8Ux32: case Iop_Max16Ux16: case Iop_Max32Ux8:
+      case Iop_Min8Sx32: case Iop_Min16Sx16: case Iop_Min32Sx8:
+      case Iop_Min8Ux32: case Iop_Min16Ux16: case Iop_Min32Ux8:
+      case Iop_CmpEQ8x32:  case Iop_CmpEQ16x16:
+      case Iop_CmpEQ32x8:  case Iop_CmpEQ64x4:
+      case Iop_CmpGT8Sx32: case Iop_CmpGT16Sx16:
+      case Iop_CmpGT32Sx8: case Iop_CmpGT64Sx4:
+      case Iop_QAdd8Ux32: case Iop_QAdd16Ux16:
+      case Iop_QAdd8Sx32: case Iop_QAdd16Sx16:
+      case Iop_QSub8Ux32: case Iop_QSub16Ux16:
+      case Iop_QSub8Sx32: case Iop_QSub16Sx16:
+      case Iop_Perm32x8:
+         BINARY(Ity_V256,Ity_V256, Ity_V256);
+
+      case Iop_V256toV128_1: case Iop_V256toV128_0:
+         UNARY(Ity_V256, Ity_V128);
+
+      case Iop_QandUQsh8x16:  case Iop_QandUQsh16x8:
+      case Iop_QandUQsh32x4:  case Iop_QandUQsh64x2:
+      case Iop_QandSQsh8x16:  case Iop_QandSQsh16x8:
+      case Iop_QandSQsh32x4:  case Iop_QandSQsh64x2:
+      case Iop_QandUQRsh8x16: case Iop_QandUQRsh16x8:
+      case Iop_QandUQRsh32x4: case Iop_QandUQRsh64x2:
+      case Iop_QandSQRsh8x16: case Iop_QandSQRsh16x8:
+      case Iop_QandSQRsh32x4: case Iop_QandSQRsh64x2:
+      case Iop_V128HLtoV256:
+         BINARY(Ity_V128,Ity_V128, Ity_V256);
+
+      case Iop_NotV256:
+      case Iop_RSqrtEst32Fx8:
+      case Iop_Sqrt32Fx8:
+      case Iop_Sqrt64Fx4:
+      case Iop_RecipEst32Fx8:
+      case Iop_CmpNEZ8x32: case Iop_CmpNEZ16x16:
+      case Iop_CmpNEZ64x4: case Iop_CmpNEZ32x8:
+         UNARY(Ity_V256, Ity_V256);
+
+      case Iop_ShlN16x16: case Iop_ShlN32x8:
+      case Iop_ShlN64x4:
+      case Iop_ShrN16x16: case Iop_ShrN32x8:
+      case Iop_ShrN64x4:
+      case Iop_SarN16x16: case Iop_SarN32x8:
+         BINARY(Ity_V256,Ity_I8, Ity_V256);
+
+      default:
+         ppIROp(op);
+         vpanic("typeOfPrimop");
+   }
+#  undef UNARY
+#  undef BINARY
+#  undef TERNARY
+#  undef COMPARISON
+#  undef UNARY_COMPARISON
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Helper functions for the IR -- IR Basic Blocks          ---*/
+/*---------------------------------------------------------------*/
+
+void addStmtToIRSB ( IRSB* bb, IRStmt* st )
+{
+   Int i;
+   if (bb->stmts_used == bb->stmts_size) {
+      IRStmt** stmts2 = LibVEX_Alloc_inline(2 * bb->stmts_size * sizeof(IRStmt*));
+      for (i = 0; i < bb->stmts_size; i++)
+         stmts2[i] = bb->stmts[i];
+      bb->stmts = stmts2;
+      bb->stmts_size *= 2;
+   }
+   vassert(bb->stmts_used < bb->stmts_size);
+   bb->stmts[bb->stmts_used] = st;
+   bb->stmts_used++;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Helper functions for the IR -- IR Type Environments     ---*/
+/*---------------------------------------------------------------*/
+
+/* Allocate a new IRTemp, given its type. */
+
+IRTemp newIRTemp ( IRTypeEnv* env, IRType ty )
+{
+   vassert(env);
+   vassert(env->types_used >= 0);
+   vassert(env->types_size >= 0);
+   vassert(env->types_used <= env->types_size);
+   if (env->types_used < env->types_size) {
+      env->types[env->types_used] = ty;
+      return env->types_used++;
+   } else {
+      Int i;
+      Int new_size = env->types_size==0 ? 8 : 2*env->types_size;
+      IRType* new_types 
+         = LibVEX_Alloc_inline(new_size * sizeof(IRType));
+      for (i = 0; i < env->types_used; i++)
+         new_types[i] = env->types[i];
+      env->types      = new_types;
+      env->types_size = new_size;
+      return newIRTemp(env, ty);
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Helper functions for the IR -- finding types of exprs   ---*/
+/*---------------------------------------------------------------*/
+
+inline 
+IRType typeOfIRTemp ( const IRTypeEnv* env, IRTemp tmp )
+{
+   vassert(tmp >= 0);
+   vassert(tmp < env->types_used);
+   return env->types[tmp];
+}
+
+IRType typeOfIRConst ( const IRConst* con )
+{
+   switch (con->tag) {
+      case Ico_U1:    return Ity_I1;
+      case Ico_U8:    return Ity_I8;
+      case Ico_U16:   return Ity_I16;
+      case Ico_U32:   return Ity_I32;
+      case Ico_U64:   return Ity_I64;
+      case Ico_F32:   return Ity_F32;
+      case Ico_F32i:  return Ity_F32;
+      case Ico_F64:   return Ity_F64;
+      case Ico_F64i:  return Ity_F64;
+      case Ico_V128:  return Ity_V128;
+      case Ico_V256:  return Ity_V256;
+      default: vpanic("typeOfIRConst");
+   }
+}
+
+void typeOfIRLoadGOp ( IRLoadGOp cvt,
+                       /*OUT*/IRType* t_res, /*OUT*/IRType* t_arg )
+{
+   switch (cvt) {
+      case ILGop_Ident64:
+         *t_res = Ity_I64; *t_arg = Ity_I64; break;
+      case ILGop_Ident32:
+         *t_res = Ity_I32; *t_arg = Ity_I32; break;
+      case ILGop_16Uto32: case ILGop_16Sto32:
+         *t_res = Ity_I32; *t_arg = Ity_I16; break;
+      case ILGop_8Uto32: case ILGop_8Sto32:
+         *t_res = Ity_I32; *t_arg = Ity_I8; break;
+      default:
+         vpanic("typeOfIRLoadGOp");
+   }
+}
+
+IRType typeOfIRExpr ( const IRTypeEnv* tyenv, const IRExpr* e )
+{
+   IRType t_dst, t_arg1, t_arg2, t_arg3, t_arg4;
+ start:
+   switch (e->tag) {
+      case Iex_Load:
+         return e->Iex.Load.ty;
+      case Iex_Get:
+         return e->Iex.Get.ty;
+      case Iex_GetI:
+         return e->Iex.GetI.descr->elemTy;
+      case Iex_RdTmp:
+         return typeOfIRTemp(tyenv, e->Iex.RdTmp.tmp);
+      case Iex_Const:
+         return typeOfIRConst(e->Iex.Const.con);
+      case Iex_Qop:
+         typeOfPrimop(e->Iex.Qop.details->op, 
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         return t_dst;
+      case Iex_Triop:
+         typeOfPrimop(e->Iex.Triop.details->op,
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         return t_dst;
+      case Iex_Binop:
+         typeOfPrimop(e->Iex.Binop.op, 
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         return t_dst;
+      case Iex_Unop:
+         typeOfPrimop(e->Iex.Unop.op, 
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         return t_dst;
+      case Iex_CCall:
+         return e->Iex.CCall.retty;
+      case Iex_ITE:
+         e = e->Iex.ITE.iffalse;
+         goto start;
+         /* return typeOfIRExpr(tyenv, e->Iex.ITE.iffalse); */
+      case Iex_Binder:
+         vpanic("typeOfIRExpr: Binder is not a valid expression");
+      case Iex_VECRET:
+         vpanic("typeOfIRExpr: VECRET is not a valid expression");
+      case Iex_BBPTR:
+         vpanic("typeOfIRExpr: BBPTR is not a valid expression");
+      default:
+         ppIRExpr(e);
+         vpanic("typeOfIRExpr");
+   }
+}
+
+/* Is this any value actually in the enumeration 'IRType' ? */
+Bool isPlausibleIRType ( IRType ty )
+{
+   switch (ty) {
+      case Ity_INVALID: case Ity_I1:
+      case Ity_I8: case Ity_I16: case Ity_I32: 
+      case Ity_I64: case Ity_I128:
+      case Ity_F16: case Ity_F32: case Ity_F64: case Ity_F128:
+      case Ity_D32: case Ity_D64: case Ity_D128:
+      case Ity_V128: case Ity_V256:
+         return True;
+      default: 
+         return False;
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Sanity checking -- FLATNESS                             ---*/
+/*---------------------------------------------------------------*/
+
+/* Check that the canonical flatness constraints hold on an
+   IRStmt. The only place where any expression is allowed to be
+   non-atomic is the RHS of IRStmt_Tmp. */
+
+/* Relies on:
+   inline static Bool isAtom ( IRExpr* e ) {
+      return e->tag == Iex_RdTmp || e->tag == Iex_Const;
+   }
+*/
+
+static inline Bool isIRAtom_or_VECRET_or_BBPTR ( const IRExpr* e )
+{
+  if (isIRAtom(e)) {
+    return True;
+  }
+
+  return UNLIKELY(is_IRExpr_VECRET_or_BBPTR(e));
+}
+
+Bool isFlatIRStmt ( const IRStmt* st )
+{
+   Int      i;
+   const IRExpr*  e;
+   const IRQop*   qop;
+   const IRTriop* triop;
+
+   switch (st->tag) {
+      case Ist_AbiHint:
+         return isIRAtom(st->Ist.AbiHint.base)
+                && isIRAtom(st->Ist.AbiHint.nia);
+      case Ist_Put:
+         return isIRAtom(st->Ist.Put.data);
+      case Ist_PutI: {
+         const IRPutI *puti = st->Ist.PutI.details;
+         return toBool( isIRAtom(puti->ix) 
+                        && isIRAtom(puti->data) );
+      }
+      case Ist_WrTmp:
+         /* This is the only interesting case.  The RHS can be any
+            expression, *but* all its subexpressions *must* be
+            atoms. */
+         e = st->Ist.WrTmp.data;
+         switch (e->tag) {
+            case Iex_Binder: return True;
+            case Iex_Get:    return True;
+            case Iex_GetI:   return isIRAtom(e->Iex.GetI.ix);
+            case Iex_RdTmp:  return True;
+            case Iex_Qop:    qop = e->Iex.Qop.details;
+                             return toBool(
+                                    isIRAtom(qop->arg1) 
+                                    && isIRAtom(qop->arg2)
+                                    && isIRAtom(qop->arg3)
+                                    && isIRAtom(qop->arg4));
+            case Iex_Triop:  triop = e->Iex.Triop.details;
+                             return toBool(
+                                    isIRAtom(triop->arg1) 
+                                    && isIRAtom(triop->arg2)
+                                    && isIRAtom(triop->arg3));
+            case Iex_Binop:  return toBool(
+                                    isIRAtom(e->Iex.Binop.arg1) 
+                                    && isIRAtom(e->Iex.Binop.arg2));
+            case Iex_Unop:   return isIRAtom(e->Iex.Unop.arg);
+            case Iex_Load:   return isIRAtom(e->Iex.Load.addr);
+            case Iex_Const:  return True;
+            case Iex_CCall:  for (i = 0; e->Iex.CCall.args[i]; i++)
+                                if (!isIRAtom(e->Iex.CCall.args[i])) 
+                                   return False;
+                             return True;
+            case Iex_ITE:    return toBool (
+                                    isIRAtom(e->Iex.ITE.cond) 
+                                    && isIRAtom(e->Iex.ITE.iftrue) 
+                                    && isIRAtom(e->Iex.ITE.iffalse));
+            default:         vpanic("isFlatIRStmt(e)");
+         }
+         /*notreached*/
+         vassert(0);
+      case Ist_Store:
+         return toBool( isIRAtom(st->Ist.Store.addr) 
+                        && isIRAtom(st->Ist.Store.data) );
+      case Ist_StoreG: {
+         const IRStoreG* sg = st->Ist.StoreG.details;
+         return toBool( isIRAtom(sg->addr)
+                        && isIRAtom(sg->data) && isIRAtom(sg->guard) );
+      }
+      case Ist_LoadG: {
+         const IRLoadG* lg = st->Ist.LoadG.details;
+         return toBool( isIRAtom(lg->addr)
+                        && isIRAtom(lg->alt) && isIRAtom(lg->guard) );
+      }
+      case Ist_CAS: {
+        const IRCAS* cas = st->Ist.CAS.details;
+         return toBool( isIRAtom(cas->addr)
+                        && (cas->expdHi ? isIRAtom(cas->expdHi) : True)
+                        && isIRAtom(cas->expdLo)
+                        && (cas->dataHi ? isIRAtom(cas->dataHi) : True)
+                        && isIRAtom(cas->dataLo) );
+      }
+      case Ist_LLSC:
+         return toBool( isIRAtom(st->Ist.LLSC.addr)
+                        && (st->Ist.LLSC.storedata
+                               ? isIRAtom(st->Ist.LLSC.storedata) : True) );
+      case Ist_Dirty: {
+         const IRDirty* di = st->Ist.Dirty.details;
+         if (!isIRAtom(di->guard)) 
+            return False;
+         for (i = 0; di->args[i]; i++)
+            if (!isIRAtom_or_VECRET_or_BBPTR(di->args[i])) 
+               return False;
+         if (di->mAddr && !isIRAtom(di->mAddr)) 
+            return False;
+         return True;
+      }
+      case Ist_NoOp:
+      case Ist_IMark:
+      case Ist_MBE:
+         return True;
+      case Ist_Exit:
+         return isIRAtom(st->Ist.Exit.guard);
+      default: 
+         vpanic("isFlatIRStmt(st)");
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Sanity checking                                         ---*/
+/*---------------------------------------------------------------*/
+
+/* Checks:
+
+   Everything is type-consistent.  No ill-typed anything.
+   The target address at the end of the BB is a 32- or 64-
+   bit expression, depending on the guest's word size.
+
+   Each temp is assigned only once, before its uses.
+*/
+
+static inline Int countArgs ( IRExpr** args )
+{
+   Int i;
+   for (i = 0; args[i]; i++)
+      ;
+   return i;
+}
+
+static
+__attribute((noreturn))
+void sanityCheckFail ( const IRSB* bb, const IRStmt* stmt, const HChar* what )
+{
+   vex_printf("\nIR SANITY CHECK FAILURE\n\n");
+   ppIRSB(bb);
+   if (stmt) {
+      vex_printf("\nIN STATEMENT:\n\n");
+      ppIRStmt(stmt);
+   }
+   vex_printf("\n\nERROR = %s\n\n", what );
+   vpanic("sanityCheckFail: exiting due to bad IR");
+}
+
+static Bool saneIRRegArray ( const IRRegArray* arr )
+{
+   if (arr->base < 0 || arr->base > 10000 /* somewhat arbitrary */)
+      return False;
+   if (arr->elemTy == Ity_I1)
+      return False;
+   if (arr->nElems <= 0 || arr->nElems > 500 /* somewhat arbitrary */)
+      return False;
+   return True;
+}
+
+static Bool saneIRCallee ( const IRCallee* cee )
+{
+   if (cee->name == NULL)
+      return False;
+   if (cee->addr == 0)
+      return False;
+   if (cee->regparms < 0 || cee->regparms > 3)
+      return False;
+   return True;
+}
+
+static Bool saneIRConst ( const IRConst* con )
+{
+   switch (con->tag) {
+      case Ico_U1: 
+         return toBool( con->Ico.U1 == True || con->Ico.U1 == False );
+      default: 
+         /* Is there anything we can meaningfully check?  I don't
+            think so. */
+         return True;
+   }
+}
+
+/* Traverse a Stmt/Expr, inspecting IRTemp uses.  Report any out of
+   range ones.  Report any which are read and for which the current
+   def_count is zero. */
+
+static
+void useBeforeDef_Temp ( const IRSB* bb, const IRStmt* stmt, IRTemp tmp,
+                         Int* def_counts )
+{
+   if (tmp < 0 || tmp >= bb->tyenv->types_used)
+      sanityCheckFail(bb,stmt, "out of range Temp in IRExpr");
+   if (def_counts[tmp] < 1)
+      sanityCheckFail(bb,stmt, "IRTemp use before def in IRExpr");
+}
+
+static
+void useBeforeDef_Expr ( const IRSB* bb, const IRStmt* stmt,
+                         const IRExpr* expr, Int* def_counts )
+{
+   Int i;
+   switch (expr->tag) {
+      case Iex_Get: 
+         break;
+      case Iex_GetI:
+         useBeforeDef_Expr(bb,stmt,expr->Iex.GetI.ix,def_counts);
+         break;
+      case Iex_RdTmp:
+         useBeforeDef_Temp(bb,stmt,expr->Iex.RdTmp.tmp,def_counts);
+         break;
+      case Iex_Qop: {
+         const IRQop* qop = expr->Iex.Qop.details;
+         useBeforeDef_Expr(bb,stmt,qop->arg1,def_counts);
+         useBeforeDef_Expr(bb,stmt,qop->arg2,def_counts);
+         useBeforeDef_Expr(bb,stmt,qop->arg3,def_counts);
+         useBeforeDef_Expr(bb,stmt,qop->arg4,def_counts);
+         break;
+      }
+      case Iex_Triop: {
+         const IRTriop* triop = expr->Iex.Triop.details;
+         useBeforeDef_Expr(bb,stmt,triop->arg1,def_counts);
+         useBeforeDef_Expr(bb,stmt,triop->arg2,def_counts);
+         useBeforeDef_Expr(bb,stmt,triop->arg3,def_counts);
+         break;
+      }
+      case Iex_Binop:
+         useBeforeDef_Expr(bb,stmt,expr->Iex.Binop.arg1,def_counts);
+         useBeforeDef_Expr(bb,stmt,expr->Iex.Binop.arg2,def_counts);
+         break;
+      case Iex_Unop:
+         useBeforeDef_Expr(bb,stmt,expr->Iex.Unop.arg,def_counts);
+         break;
+      case Iex_Load:
+         useBeforeDef_Expr(bb,stmt,expr->Iex.Load.addr,def_counts);
+         break;
+      case Iex_Const:
+         break;
+      case Iex_CCall:
+         for (i = 0; expr->Iex.CCall.args[i]; i++) {
+            const IRExpr* arg = expr->Iex.CCall.args[i];
+            if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg))) {
+               /* These aren't allowed in CCall lists.  Let's detect
+                  and throw them out here, though, rather than
+                  segfaulting a bit later on. */
+               sanityCheckFail(bb,stmt, "IRExprP__* value in CCall arg list");
+            } else {
+               useBeforeDef_Expr(bb,stmt,arg,def_counts);
+            }
+         }
+         break;
+      case Iex_ITE:
+         useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.cond,def_counts);
+         useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.iftrue,def_counts);
+         useBeforeDef_Expr(bb,stmt,expr->Iex.ITE.iffalse,def_counts);
+         break;
+      default:
+         vpanic("useBeforeDef_Expr");
+   }
+}
+
+static
+void useBeforeDef_Stmt ( const IRSB* bb, const IRStmt* stmt, Int* def_counts )
+{
+   Int       i;
+   const IRDirty*  d;
+   const IRCAS*    cas;
+   const IRPutI*   puti;
+   const IRLoadG*  lg;
+   const IRStoreG* sg;
+   switch (stmt->tag) {
+      case Ist_IMark:
+         break;
+      case Ist_AbiHint:
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.AbiHint.base,def_counts);
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.AbiHint.nia,def_counts);
+         break;
+      case Ist_Put:
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.Put.data,def_counts);
+         break;
+      case Ist_PutI:
+         puti = stmt->Ist.PutI.details;
+         useBeforeDef_Expr(bb,stmt,puti->ix,def_counts);
+         useBeforeDef_Expr(bb,stmt,puti->data,def_counts);
+         break;
+      case Ist_WrTmp:
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.WrTmp.data,def_counts);
+         break;
+      case Ist_Store:
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.Store.addr,def_counts);
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.Store.data,def_counts);
+         break;
+      case Ist_StoreG:
+         sg = stmt->Ist.StoreG.details;
+         useBeforeDef_Expr(bb,stmt,sg->addr,def_counts);
+         useBeforeDef_Expr(bb,stmt,sg->data,def_counts);
+         useBeforeDef_Expr(bb,stmt,sg->guard,def_counts);
+         break;
+      case Ist_LoadG:
+         lg = stmt->Ist.LoadG.details;
+         useBeforeDef_Expr(bb,stmt,lg->addr,def_counts);
+         useBeforeDef_Expr(bb,stmt,lg->alt,def_counts);
+         useBeforeDef_Expr(bb,stmt,lg->guard,def_counts);
+         break;
+      case Ist_CAS:
+         cas = stmt->Ist.CAS.details;
+         useBeforeDef_Expr(bb,stmt,cas->addr,def_counts);
+         if (cas->expdHi)
+            useBeforeDef_Expr(bb,stmt,cas->expdHi,def_counts);
+         useBeforeDef_Expr(bb,stmt,cas->expdLo,def_counts);
+         if (cas->dataHi)
+            useBeforeDef_Expr(bb,stmt,cas->dataHi,def_counts);
+         useBeforeDef_Expr(bb,stmt,cas->dataLo,def_counts);
+         break;
+      case Ist_LLSC:
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.LLSC.addr,def_counts);
+         if (stmt->Ist.LLSC.storedata != NULL)
+            useBeforeDef_Expr(bb,stmt,stmt->Ist.LLSC.storedata,def_counts);
+         break;
+      case Ist_Dirty:
+         d = stmt->Ist.Dirty.details;
+         for (i = 0; d->args[i] != NULL; i++) {
+            IRExpr* arg = d->args[i];
+            if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg))) {
+               /* This is ensured by isFlatIRStmt */
+              ;
+            } else {
+               useBeforeDef_Expr(bb,stmt,arg,def_counts);
+            }
+         }
+         if (d->mFx != Ifx_None)
+            useBeforeDef_Expr(bb,stmt,d->mAddr,def_counts);
+         break;
+      case Ist_NoOp:
+      case Ist_MBE:
+         break;
+      case Ist_Exit:
+         useBeforeDef_Expr(bb,stmt,stmt->Ist.Exit.guard,def_counts);
+         break;
+      default: 
+         vpanic("useBeforeDef_Stmt");
+   }
+}
+
+static
+void tcExpr ( const IRSB* bb, const IRStmt* stmt, const IRExpr* expr,
+              IRType gWordTy )
+{
+   Int        i;
+   IRType     t_dst, t_arg1, t_arg2, t_arg3, t_arg4;
+   const IRTypeEnv* tyenv = bb->tyenv;
+   switch (expr->tag) {
+      case Iex_Get:
+      case Iex_RdTmp:
+         break;
+      case Iex_GetI:
+         tcExpr(bb,stmt, expr->Iex.GetI.ix, gWordTy );
+         if (typeOfIRExpr(tyenv,expr->Iex.GetI.ix) != Ity_I32)
+            sanityCheckFail(bb,stmt,"IRExpr.GetI.ix: not :: Ity_I32");
+         if (!saneIRRegArray(expr->Iex.GetI.descr))
+            sanityCheckFail(bb,stmt,"IRExpr.GetI.descr: invalid descr");
+         break;
+      case Iex_Qop: {
+         IRType ttarg1, ttarg2, ttarg3, ttarg4;
+         const IRQop* qop = expr->Iex.Qop.details;
+         tcExpr(bb,stmt, qop->arg1, gWordTy );
+         tcExpr(bb,stmt, qop->arg2, gWordTy );
+         tcExpr(bb,stmt, qop->arg3, gWordTy );
+         tcExpr(bb,stmt, qop->arg4, gWordTy );
+         typeOfPrimop(qop->op, 
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         if (t_arg1 == Ity_INVALID || t_arg2 == Ity_INVALID 
+             || t_arg3 == Ity_INVALID || t_arg4 == Ity_INVALID) {
+            vex_printf(" op name: " );
+            ppIROp(qop->op);
+            vex_printf("\n");
+            sanityCheckFail(bb,stmt,
+               "Iex.Qop: wrong arity op\n"
+               "... name of op precedes BB printout\n");
+         }
+         ttarg1 = typeOfIRExpr(tyenv, qop->arg1);
+         ttarg2 = typeOfIRExpr(tyenv, qop->arg2);
+         ttarg3 = typeOfIRExpr(tyenv, qop->arg3);
+         ttarg4 = typeOfIRExpr(tyenv, qop->arg4);
+         if (t_arg1 != ttarg1 || t_arg2 != ttarg2 
+             || t_arg3 != ttarg3 || t_arg4 != ttarg4) {
+            vex_printf(" op name: ");
+            ppIROp(qop->op);
+            vex_printf("\n");
+            vex_printf(" op type is (");
+            ppIRType(t_arg1);
+            vex_printf(",");
+            ppIRType(t_arg2);
+            vex_printf(",");
+            ppIRType(t_arg3);
+            vex_printf(",");
+            ppIRType(t_arg4);
+            vex_printf(") -> ");
+            ppIRType (t_dst);
+            vex_printf("\narg tys are (");
+            ppIRType(ttarg1);
+            vex_printf(",");
+            ppIRType(ttarg2);
+            vex_printf(",");
+            ppIRType(ttarg3);
+            vex_printf(",");
+            ppIRType(ttarg4);
+            vex_printf(")\n");
+            sanityCheckFail(bb,stmt,
+               "Iex.Qop: arg tys don't match op tys\n"
+               "... additional details precede BB printout\n");
+         }
+         break;
+      }
+      case Iex_Triop: {
+         IRType ttarg1, ttarg2, ttarg3;
+         const IRTriop *triop = expr->Iex.Triop.details;
+         tcExpr(bb,stmt, triop->arg1, gWordTy );
+         tcExpr(bb,stmt, triop->arg2, gWordTy );
+         tcExpr(bb,stmt, triop->arg3, gWordTy );
+         typeOfPrimop(triop->op, 
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         if (t_arg1 == Ity_INVALID || t_arg2 == Ity_INVALID 
+             || t_arg3 == Ity_INVALID || t_arg4 != Ity_INVALID) {
+            vex_printf(" op name: " );
+            ppIROp(triop->op);
+            vex_printf("\n");
+            sanityCheckFail(bb,stmt,
+               "Iex.Triop: wrong arity op\n"
+               "... name of op precedes BB printout\n");
+         }
+         ttarg1 = typeOfIRExpr(tyenv, triop->arg1);
+         ttarg2 = typeOfIRExpr(tyenv, triop->arg2);
+         ttarg3 = typeOfIRExpr(tyenv, triop->arg3);
+         if (t_arg1 != ttarg1 || t_arg2 != ttarg2 || t_arg3 != ttarg3) {
+            vex_printf(" op name: ");
+            ppIROp(triop->op);
+            vex_printf("\n");
+            vex_printf(" op type is (");
+            ppIRType(t_arg1);
+            vex_printf(",");
+            ppIRType(t_arg2);
+            vex_printf(",");
+            ppIRType(t_arg3);
+            vex_printf(") -> ");
+            ppIRType (t_dst);
+            vex_printf("\narg tys are (");
+            ppIRType(ttarg1);
+            vex_printf(",");
+            ppIRType(ttarg2);
+            vex_printf(",");
+            ppIRType(ttarg3);
+            vex_printf(")\n");
+            sanityCheckFail(bb,stmt,
+               "Iex.Triop: arg tys don't match op tys\n"
+               "... additional details precede BB printout\n");
+         }
+         break;
+      }
+      case Iex_Binop: {
+         IRType ttarg1, ttarg2;
+         tcExpr(bb,stmt, expr->Iex.Binop.arg1, gWordTy );
+         tcExpr(bb,stmt, expr->Iex.Binop.arg2, gWordTy );
+         typeOfPrimop(expr->Iex.Binop.op, 
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         if (t_arg1 == Ity_INVALID || t_arg2 == Ity_INVALID 
+             || t_arg3 != Ity_INVALID || t_arg4 != Ity_INVALID) {
+            vex_printf(" op name: " );
+            ppIROp(expr->Iex.Binop.op);
+            vex_printf("\n");
+            sanityCheckFail(bb,stmt,
+               "Iex.Binop: wrong arity op\n"
+               "... name of op precedes BB printout\n");
+         }
+         ttarg1 = typeOfIRExpr(tyenv, expr->Iex.Binop.arg1);
+         ttarg2 = typeOfIRExpr(tyenv, expr->Iex.Binop.arg2);
+         if (t_arg1 != ttarg1 || t_arg2 != ttarg2) {
+            vex_printf(" op name: ");
+            ppIROp(expr->Iex.Binop.op);
+            vex_printf("\n");
+            vex_printf(" op type is (");
+            ppIRType(t_arg1);
+            vex_printf(",");
+            ppIRType(t_arg2);
+            vex_printf(") -> ");
+            ppIRType (t_dst);
+            vex_printf("\narg tys are (");
+            ppIRType(ttarg1);
+            vex_printf(",");
+            ppIRType(ttarg2);
+            vex_printf(")\n");
+            sanityCheckFail(bb,stmt,
+               "Iex.Binop: arg tys don't match op tys\n"
+               "... additional details precede BB printout\n");
+         }
+         break;
+      }
+      case Iex_Unop:
+         tcExpr(bb,stmt, expr->Iex.Unop.arg, gWordTy );
+         typeOfPrimop(expr->Iex.Unop.op, 
+                      &t_dst, &t_arg1, &t_arg2, &t_arg3, &t_arg4);
+         if (t_arg1 == Ity_INVALID || t_arg2 != Ity_INVALID
+             || t_arg3 != Ity_INVALID || t_arg4 != Ity_INVALID)
+            sanityCheckFail(bb,stmt,"Iex.Unop: wrong arity op");
+         if (t_arg1 != typeOfIRExpr(tyenv, expr->Iex.Unop.arg))
+            sanityCheckFail(bb,stmt,"Iex.Unop: arg ty doesn't match op ty");
+         break;
+      case Iex_Load:
+         tcExpr(bb,stmt, expr->Iex.Load.addr, gWordTy);
+         if (typeOfIRExpr(tyenv, expr->Iex.Load.addr) != gWordTy)
+            sanityCheckFail(bb,stmt,"Iex.Load.addr: not :: guest word type");
+         if (expr->Iex.Load.end != Iend_LE && expr->Iex.Load.end != Iend_BE)
+            sanityCheckFail(bb,stmt,"Iex.Load.end: bogus endianness");
+         break;
+      case Iex_CCall:
+         if (!saneIRCallee(expr->Iex.CCall.cee))
+            sanityCheckFail(bb,stmt,"Iex.CCall.cee: bad IRCallee");
+         if (expr->Iex.CCall.cee->regparms > countArgs(expr->Iex.CCall.args)) 
+            sanityCheckFail(bb,stmt,"Iex.CCall.cee: #regparms > #args");
+         for (i = 0; expr->Iex.CCall.args[i]; i++) {
+            if (i >= 32)
+               sanityCheckFail(bb,stmt,"Iex.CCall: > 32 args");
+            IRExpr* arg = expr->Iex.CCall.args[i];
+            if (UNLIKELY(is_IRExpr_VECRET_or_BBPTR(arg)))
+               sanityCheckFail(bb,stmt,"Iex.CCall.args: is VECRET/BBPTR");
+            tcExpr(bb,stmt, arg, gWordTy);
+         }
+         if (expr->Iex.CCall.retty == Ity_I1)
+            sanityCheckFail(bb,stmt,"Iex.CCall.retty: cannot return :: Ity_I1");
+         for (i = 0; expr->Iex.CCall.args[i]; i++)
+            if (typeOfIRExpr(tyenv, expr->Iex.CCall.args[i]) == Ity_I1)
+               sanityCheckFail(bb,stmt,"Iex.CCall.arg: arg :: Ity_I1");
+         break;
+      case Iex_Const:
+         if (!saneIRConst(expr->Iex.Const.con))
+            sanityCheckFail(bb,stmt,"Iex.Const.con: invalid const");
+         break;
+      case Iex_ITE:
+         tcExpr(bb,stmt, expr->Iex.ITE.cond, gWordTy);
+         tcExpr(bb,stmt, expr->Iex.ITE.iftrue, gWordTy);
+         tcExpr(bb,stmt, expr->Iex.ITE.iffalse, gWordTy);
+         if (typeOfIRExpr(tyenv, expr->Iex.ITE.cond) != Ity_I1)
+            sanityCheckFail(bb,stmt,"Iex.ITE.cond: cond :: Ity_I1");
+         if (typeOfIRExpr(tyenv, expr->Iex.ITE.iftrue)
+             != typeOfIRExpr(tyenv, expr->Iex.ITE.iffalse))
+            sanityCheckFail(bb,stmt,"Iex.ITE: iftrue/iffalse mismatch");
+         break;
+      default: 
+         vpanic("tcExpr");
+   }
+}
+
+
+static
+void tcStmt ( const IRSB* bb, const IRStmt* stmt, IRType gWordTy )
+{
+   Int        i;
+   IRType     tyExpd, tyData;
+   const IRTypeEnv* tyenv = bb->tyenv;
+   switch (stmt->tag) {
+      case Ist_IMark:
+         /* Somewhat heuristic, but rule out totally implausible
+            instruction sizes and deltas. */
+         if (stmt->Ist.IMark.len > 24)
+            sanityCheckFail(bb,stmt,"IRStmt.IMark.len: implausible");
+         if (stmt->Ist.IMark.delta > 1)
+            sanityCheckFail(bb,stmt,"IRStmt.IMark.delta: implausible");
+         break;
+      case Ist_AbiHint:
+         if (typeOfIRExpr(tyenv, stmt->Ist.AbiHint.base) != gWordTy)
+            sanityCheckFail(bb,stmt,"IRStmt.AbiHint.base: "
+                                    "not :: guest word type");
+         if (typeOfIRExpr(tyenv, stmt->Ist.AbiHint.nia) != gWordTy)
+            sanityCheckFail(bb,stmt,"IRStmt.AbiHint.nia: "
+                                    "not :: guest word type");
+         break;
+      case Ist_Put:
+         tcExpr( bb, stmt, stmt->Ist.Put.data, gWordTy );
+         if (typeOfIRExpr(tyenv,stmt->Ist.Put.data) == Ity_I1)
+            sanityCheckFail(bb,stmt,"IRStmt.Put.data: cannot Put :: Ity_I1");
+         break;
+      case Ist_PutI:{
+         const IRPutI* puti = stmt->Ist.PutI.details;
+         tcExpr( bb, stmt, puti->data, gWordTy );
+         tcExpr( bb, stmt, puti->ix, gWordTy );
+         if (typeOfIRExpr(tyenv,puti->data) == Ity_I1)
+            sanityCheckFail(bb,stmt,"IRStmt.PutI.data: cannot PutI :: Ity_I1");
+         if (typeOfIRExpr(tyenv,puti->data) 
+             != puti->descr->elemTy)
+            sanityCheckFail(bb,stmt,"IRStmt.PutI.data: data ty != elem ty");
+         if (typeOfIRExpr(tyenv,puti->ix) != Ity_I32)
+            sanityCheckFail(bb,stmt,"IRStmt.PutI.ix: not :: Ity_I32");
+         if (!saneIRRegArray(puti->descr))
+            sanityCheckFail(bb,stmt,"IRStmt.PutI.descr: invalid descr");
+         break;
+      }
+      case Ist_WrTmp:
+         tcExpr( bb, stmt, stmt->Ist.WrTmp.data, gWordTy );
+         if (typeOfIRTemp(tyenv, stmt->Ist.WrTmp.tmp)
+             != typeOfIRExpr(tyenv, stmt->Ist.WrTmp.data))
+            sanityCheckFail(bb,stmt,
+                            "IRStmt.Put.Tmp: tmp and expr do not match");
+         break;
+      case Ist_Store:
+         tcExpr( bb, stmt, stmt->Ist.Store.addr, gWordTy );
+         tcExpr( bb, stmt, stmt->Ist.Store.data, gWordTy );
+         if (typeOfIRExpr(tyenv, stmt->Ist.Store.addr) != gWordTy)
+            sanityCheckFail(bb,stmt,
+                            "IRStmt.Store.addr: not :: guest word type");
+         if (typeOfIRExpr(tyenv, stmt->Ist.Store.data) == Ity_I1)
+            sanityCheckFail(bb,stmt,
+                            "IRStmt.Store.data: cannot Store :: Ity_I1");
+         if (stmt->Ist.Store.end != Iend_LE && stmt->Ist.Store.end != Iend_BE)
+            sanityCheckFail(bb,stmt,"Ist.Store.end: bogus endianness");
+         break;
+      case Ist_StoreG: {
+         const IRStoreG* sg = stmt->Ist.StoreG.details;
+         tcExpr( bb, stmt, sg->addr, gWordTy );
+         tcExpr( bb, stmt, sg->data, gWordTy );
+         tcExpr( bb, stmt, sg->guard, gWordTy );
+         if (typeOfIRExpr(tyenv, sg->addr) != gWordTy)
+            sanityCheckFail(bb,stmt,"IRStmtG...addr: not :: guest word type");
+         if (typeOfIRExpr(tyenv, sg->data) == Ity_I1)
+            sanityCheckFail(bb,stmt,"IRStmtG...data: cannot Store :: Ity_I1");
+         if (typeOfIRExpr(tyenv, sg->guard) != Ity_I1)
+            sanityCheckFail(bb,stmt,"IRStmtG...guard: not :: Ity_I1");
+         if (sg->end != Iend_LE && sg->end != Iend_BE)
+            sanityCheckFail(bb,stmt,"IRStmtG...end: bogus endianness");
+         break;
+      }
+      case Ist_LoadG: {
+         const IRLoadG* lg = stmt->Ist.LoadG.details;
+         tcExpr( bb, stmt, lg->addr, gWordTy );
+         tcExpr( bb, stmt, lg->alt, gWordTy );
+         tcExpr( bb, stmt, lg->guard, gWordTy );
+         if (typeOfIRExpr(tyenv, lg->guard) != Ity_I1)
+            sanityCheckFail(bb,stmt,"IRStmt.LoadG.guard: not :: Ity_I1");
+         if (typeOfIRExpr(tyenv, lg->addr) != gWordTy)
+              sanityCheckFail(bb,stmt,"IRStmt.LoadG.addr: not "
+                                      ":: guest word type");
+         if (typeOfIRExpr(tyenv, lg->alt) != typeOfIRTemp(tyenv, lg->dst))
+             sanityCheckFail(bb,stmt,"IRStmt.LoadG: dst/alt type mismatch");
+         IRTemp cvtRes = Ity_INVALID, cvtArg = Ity_INVALID;
+         typeOfIRLoadGOp(lg->cvt, &cvtRes, &cvtArg);
+         if (cvtRes != typeOfIRTemp(tyenv, lg->dst))
+            sanityCheckFail(bb,stmt,"IRStmt.LoadG: dst/loaded type mismatch");
+         break;
+      }
+      case Ist_CAS: {
+         const IRCAS* cas = stmt->Ist.CAS.details;
+         /* make sure it's definitely either a CAS or a DCAS */
+         if (cas->oldHi == IRTemp_INVALID 
+             && cas->expdHi == NULL && cas->dataHi == NULL) {
+            /* fine; it's a single cas */
+         }
+         else
+         if (cas->oldHi != IRTemp_INVALID 
+             && cas->expdHi != NULL && cas->dataHi != NULL) {
+            /* fine; it's a double cas */
+         }
+         else {
+            /* it's some el-mutanto hybrid */
+            goto bad_cas;
+         }
+         /* check the address type */
+         tcExpr( bb, stmt, cas->addr, gWordTy );
+         if (typeOfIRExpr(tyenv, cas->addr) != gWordTy) goto bad_cas;
+         /* check types on the {old,expd,data}Lo components agree */
+         tyExpd = typeOfIRExpr(tyenv, cas->expdLo);
+         tyData = typeOfIRExpr(tyenv, cas->dataLo);
+         if (tyExpd != tyData) goto bad_cas;
+         if (tyExpd != typeOfIRTemp(tyenv, cas->oldLo))
+            goto bad_cas;
+         /* check the base element type is sane */
+         if (tyExpd == Ity_I8 || tyExpd == Ity_I16 || tyExpd == Ity_I32
+             || (gWordTy == Ity_I64 && tyExpd == Ity_I64)) {
+            /* fine */
+         } else {
+            goto bad_cas;
+         }
+         /* If it's a DCAS, check types on the {old,expd,data}Hi
+            components too */
+         if (cas->oldHi != IRTemp_INVALID) {
+            tyExpd = typeOfIRExpr(tyenv, cas->expdHi);
+            tyData = typeOfIRExpr(tyenv, cas->dataHi);
+            if (tyExpd != tyData) goto bad_cas;
+            if (tyExpd != typeOfIRTemp(tyenv, cas->oldHi))
+               goto bad_cas;
+            /* and finally check that oldLo and oldHi have the same
+               type.  This forces equivalence amongst all 6 types. */
+            if (typeOfIRTemp(tyenv, cas->oldHi)
+                != typeOfIRTemp(tyenv, cas->oldLo))
+               goto bad_cas;
+         }
+         break;
+         bad_cas:
+         sanityCheckFail(bb,stmt,"IRStmt.CAS: ill-formed");
+         break;
+      }
+      case Ist_LLSC: {
+         IRType tyRes;
+         if (typeOfIRExpr(tyenv, stmt->Ist.LLSC.addr) != gWordTy)
+            sanityCheckFail(bb,stmt,"IRStmt.LLSC.addr: not :: guest word type");
+         if (stmt->Ist.LLSC.end != Iend_LE && stmt->Ist.LLSC.end != Iend_BE)
+            sanityCheckFail(bb,stmt,"Ist.LLSC.end: bogus endianness");
+         tyRes = typeOfIRTemp(tyenv, stmt->Ist.LLSC.result);
+         if (stmt->Ist.LLSC.storedata == NULL) {
+            /* it's a LL */
+            if (tyRes != Ity_I64 && tyRes != Ity_I32
+                && tyRes != Ity_I16 && tyRes != Ity_I8)
+               sanityCheckFail(bb,stmt,"Ist.LLSC(LL).result :: bogus");
+         } else {
+            /* it's a SC */
+            if (tyRes != Ity_I1)
+               sanityCheckFail(bb,stmt,"Ist.LLSC(SC).result: not :: Ity_I1");
+            tyData = typeOfIRExpr(tyenv, stmt->Ist.LLSC.storedata);
+            if (tyData != Ity_I64 && tyData != Ity_I32
+                && tyData != Ity_I16 && tyData != Ity_I8)
+               sanityCheckFail(bb,stmt,
+                               "Ist.LLSC(SC).result :: storedata bogus");
+         }
+         break;
+      }
+      case Ist_Dirty: {
+         /* Mostly check for various kinds of ill-formed dirty calls. */
+         const IRDirty* d = stmt->Ist.Dirty.details;
+         if (d->cee == NULL) goto bad_dirty;
+         if (!saneIRCallee(d->cee)) goto bad_dirty;
+         if (d->cee->regparms > countArgs(d->args)) goto bad_dirty;
+         if (d->mFx == Ifx_None) {
+            if (d->mAddr != NULL || d->mSize != 0)
+               goto bad_dirty;
+         } else {
+            if (d->mAddr == NULL || d->mSize == 0)
+               goto bad_dirty;
+         }
+         if (d->nFxState < 0 || d->nFxState > VEX_N_FXSTATE)
+            goto bad_dirty;
+         for (i = 0; i < d->nFxState; i++) {
+            if (d->fxState[i].fx == Ifx_None) goto bad_dirty;
+            if (d->fxState[i].size <= 0) goto bad_dirty;
+            if (d->fxState[i].nRepeats == 0) {
+               if (d->fxState[i].repeatLen != 0) goto bad_dirty;
+            } else {
+               if (d->fxState[i].repeatLen <= d->fxState[i].size)
+                  goto bad_dirty;
+               /* the % is safe because of the .size check above */
+               if ((d->fxState[i].repeatLen % d->fxState[i].size) != 0)
+                  goto bad_dirty;
+            }
+         }
+         /* check guard */
+         if (d->guard == NULL) goto bad_dirty;
+         tcExpr( bb, stmt, d->guard, gWordTy );
+         if (typeOfIRExpr(tyenv, d->guard) != Ity_I1)
+            sanityCheckFail(bb,stmt,"IRStmt.Dirty.guard not :: Ity_I1");
+         /* check types, minimally */
+         IRType retTy = Ity_INVALID;
+         if (d->tmp != IRTemp_INVALID) {
+            retTy = typeOfIRTemp(tyenv, d->tmp);
+            if (retTy == Ity_I1)
+               sanityCheckFail(bb,stmt,"IRStmt.Dirty.dst :: Ity_I1");
+         }
+         UInt nVECRETs = 0, nBBPTRs = 0;
+         for (i = 0; d->args[i] != NULL; i++) {
+            if (i >= 32)
+               sanityCheckFail(bb,stmt,"IRStmt.Dirty: > 32 args");
+            const IRExpr* arg = d->args[i];
+            if (UNLIKELY(arg->tag == Iex_VECRET)) {
+               nVECRETs++;
+            } else if (UNLIKELY(arg->tag == Iex_BBPTR)) {
+               nBBPTRs++;
+            } else {
+               if (typeOfIRExpr(tyenv, arg) == Ity_I1)
+                  sanityCheckFail(bb,stmt,"IRStmt.Dirty.arg[i] :: Ity_I1");
+            }
+            if (nBBPTRs > 1) {
+               sanityCheckFail(bb,stmt,"IRStmt.Dirty.args: > 1 BBPTR arg");
+            }
+            if (nVECRETs == 1) {
+               /* Fn must return V128 or V256. */
+               if (retTy != Ity_V128 && retTy != Ity_V256)
+                  sanityCheckFail(bb,stmt,
+                                  "IRStmt.Dirty.args: VECRET present, "
+                                  "but fn does not return V128 or V256");
+            } else if (nVECRETs == 0) {
+               /* Fn must not return V128 or V256 */
+               if (retTy == Ity_V128 || retTy == Ity_V256)
+                  sanityCheckFail(bb,stmt,
+                                  "IRStmt.Dirty.args: VECRET not present, "
+                                  "but fn returns V128 or V256");
+            } else {
+               sanityCheckFail(bb,stmt,
+                               "IRStmt.Dirty.args: > 1 VECRET present");
+            }
+         }
+         if (nBBPTRs > 1) {
+            sanityCheckFail(bb,stmt,
+                            "IRStmt.Dirty.args: > 1 BBPTR present");
+         }
+         /* If you ask for the baseblock pointer, you have to make
+            some declaration about access to the guest state too. */
+         if (d->nFxState == 0 && nBBPTRs != 0) {
+            sanityCheckFail(bb,stmt,
+                            "IRStmt.Dirty.args: BBPTR requested, "
+                            "but no fxState declared");
+         }
+        break;
+         bad_dirty:
+         sanityCheckFail(bb,stmt,"IRStmt.Dirty: ill-formed");
+         break;
+      }
+      case Ist_NoOp:
+         break;
+      case Ist_MBE:
+         switch (stmt->Ist.MBE.event) {
+            case Imbe_Fence: case Imbe_CancelReservation:
+               break;
+            default: sanityCheckFail(bb,stmt,"IRStmt.MBE.event: unknown");
+               break;
+         }
+         break;
+      case Ist_Exit:
+         tcExpr( bb, stmt, stmt->Ist.Exit.guard, gWordTy );
+         if (typeOfIRExpr(tyenv,stmt->Ist.Exit.guard) != Ity_I1)
+            sanityCheckFail(bb,stmt,"IRStmt.Exit.guard: not :: Ity_I1");
+         if (!saneIRConst(stmt->Ist.Exit.dst))
+            sanityCheckFail(bb,stmt,"IRStmt.Exit.dst: bad dst");
+         if (typeOfIRConst(stmt->Ist.Exit.dst) != gWordTy)
+            sanityCheckFail(bb,stmt,"IRStmt.Exit.dst: not :: guest word type");
+         /* because it would intersect with host_EvC_* */
+         if (stmt->Ist.Exit.offsIP < 16)
+            sanityCheckFail(bb,stmt,"IRStmt.Exit.offsIP: too low");
+         break;
+      default:
+         vpanic("tcStmt");
+   }
+}
+
+void sanityCheckIRSB ( const IRSB* bb, const HChar* caller,
+                       Bool require_flat, IRType guest_word_size )
+{
+   Int     i;
+   Int     n_temps    = bb->tyenv->types_used;
+   Int*    def_counts = LibVEX_Alloc_inline(n_temps * sizeof(Int));
+
+   if (0)
+      vex_printf("sanityCheck: %s\n", caller);
+
+   vassert(guest_word_size == Ity_I32
+           || guest_word_size == Ity_I64);
+
+   if (bb->stmts_used < 0 || bb->stmts_size < 8
+       || bb->stmts_used > bb->stmts_size)
+      /* this BB is so strange we can't even print it */
+      vpanic("sanityCheckIRSB: stmts array limits wierd");
+
+   /* Ensure each temp has a plausible type. */
+   for (i = 0; i < n_temps; i++) {
+      IRType ty = typeOfIRTemp(bb->tyenv,(IRTemp)i);
+      if (!isPlausibleIRType(ty)) {
+         vex_printf("Temp t%d declared with implausible type 0x%x\n",
+                    i, (UInt)ty);
+         sanityCheckFail(bb,NULL,"Temp declared with implausible type");
+      }
+   }
+
+   const IRStmt* stmt;
+
+   /* Check for flatness, if required. */
+   if (require_flat) {
+      for (i = 0; i < bb->stmts_used; i++) {
+         stmt = bb->stmts[i];
+         if (!stmt)
+            sanityCheckFail(bb, stmt, "IRStmt: is NULL");
+         if (!isFlatIRStmt(stmt))
+            sanityCheckFail(bb, stmt, "IRStmt: is not flat");
+      }
+      if (!isIRAtom(bb->next))
+         sanityCheckFail(bb, NULL, "bb->next is not an atom");
+   }
+
+   /* Count the defs of each temp.  Only one def is allowed.
+      Also, check that each used temp has already been defd. */
+
+   for (i = 0; i < n_temps; i++)
+      def_counts[i] = 0;
+
+   for (i = 0; i < bb->stmts_used; i++) {
+      stmt = bb->stmts[i];
+      /* Check any temps used by this statement. */
+      useBeforeDef_Stmt(bb,stmt,def_counts);
+
+      /* Now make note of any temps defd by this statement. */
+      switch (stmt->tag) {
+      case Ist_WrTmp:
+         if (stmt->Ist.WrTmp.tmp < 0 || stmt->Ist.WrTmp.tmp >= n_temps)
+            sanityCheckFail(bb, stmt, 
+               "IRStmt.Tmp: destination tmp is out of range");
+         def_counts[stmt->Ist.WrTmp.tmp]++;
+         if (def_counts[stmt->Ist.WrTmp.tmp] > 1)
+            sanityCheckFail(bb, stmt, 
+               "IRStmt.Tmp: destination tmp is assigned more than once");
+         break;
+      case Ist_LoadG: {
+         const IRLoadG* lg = stmt->Ist.LoadG.details;
+         if (lg->dst < 0 || lg->dst >= n_temps)
+             sanityCheckFail(bb, stmt, 
+                "IRStmt.LoadG: destination tmp is out of range");
+         def_counts[lg->dst]++;
+         if (def_counts[lg->dst] > 1)
+             sanityCheckFail(bb, stmt, 
+                "IRStmt.LoadG: destination tmp is assigned more than once");
+         break;
+      }
+      case Ist_Dirty: {
+         const IRDirty* d = stmt->Ist.Dirty.details;
+         if (d->tmp != IRTemp_INVALID) {
+            if (d->tmp < 0 || d->tmp >= n_temps)
+               sanityCheckFail(bb, stmt, 
+                  "IRStmt.Dirty: destination tmp is out of range");
+            def_counts[d->tmp]++;
+            if (def_counts[d->tmp] > 1)
+               sanityCheckFail(bb, stmt, 
+                  "IRStmt.Dirty: destination tmp is assigned more than once");
+         }
+         break;
+      }
+      case Ist_CAS: {
+         const IRCAS* cas = stmt->Ist.CAS.details;
+         if (cas->oldHi != IRTemp_INVALID) {
+            if (cas->oldHi < 0 || cas->oldHi >= n_temps)
+                sanityCheckFail(bb, stmt, 
+                   "IRStmt.CAS: destination tmpHi is out of range");
+             def_counts[cas->oldHi]++;
+             if (def_counts[cas->oldHi] > 1)
+                sanityCheckFail(bb, stmt, 
+                   "IRStmt.CAS: destination tmpHi is assigned more than once");
+         }
+         if (cas->oldLo < 0 || cas->oldLo >= n_temps)
+            sanityCheckFail(bb, stmt, 
+               "IRStmt.CAS: destination tmpLo is out of range");
+         def_counts[cas->oldLo]++;
+         if (def_counts[cas->oldLo] > 1)
+            sanityCheckFail(bb, stmt, 
+               "IRStmt.CAS: destination tmpLo is assigned more than once");
+         break;
+      }
+      case Ist_LLSC:
+         if (stmt->Ist.LLSC.result < 0 || stmt->Ist.LLSC.result >= n_temps)
+            sanityCheckFail(bb, stmt,
+               "IRStmt.LLSC: destination tmp is out of range");
+         def_counts[stmt->Ist.LLSC.result]++;
+         if (def_counts[stmt->Ist.LLSC.result] > 1)
+            sanityCheckFail(bb, stmt,
+               "IRStmt.LLSC: destination tmp is assigned more than once");
+         break;
+      default:
+         /* explicitly handle the rest, so as to keep gcc quiet */
+         break;
+      }
+   }
+
+   /* Typecheck everything. */
+   for (i = 0; i < bb->stmts_used; i++)
+      if (bb->stmts[i])
+         tcStmt( bb, bb->stmts[i], guest_word_size );
+   if (typeOfIRExpr(bb->tyenv,bb->next) != guest_word_size)
+      sanityCheckFail(bb, NULL, "bb->next field has wrong type");
+   /* because it would intersect with host_EvC_* */
+   if (bb->offsIP < 16)
+      sanityCheckFail(bb, NULL, "bb->offsIP: too low");
+}
+
+/*---------------------------------------------------------------*/
+/*--- Misc helper functions                                   ---*/
+/*---------------------------------------------------------------*/
+
+Bool eqIRConst ( const IRConst* c1, const IRConst* c2 )
+{
+   if (c1->tag != c2->tag)
+      return False;
+
+   switch (c1->tag) {
+      case Ico_U1:  return toBool( (1 & c1->Ico.U1) == (1 & c2->Ico.U1) );
+      case Ico_U8:  return toBool( c1->Ico.U8  == c2->Ico.U8 );
+      case Ico_U16: return toBool( c1->Ico.U16 == c2->Ico.U16 );
+      case Ico_U32: return toBool( c1->Ico.U32 == c2->Ico.U32 );
+      case Ico_U64: return toBool( c1->Ico.U64 == c2->Ico.U64 );
+      case Ico_F32: return toBool( c1->Ico.F32 == c2->Ico.F32 );
+      case Ico_F32i: return toBool( c1->Ico.F32i == c2->Ico.F32i );
+      case Ico_F64: return toBool( c1->Ico.F64 == c2->Ico.F64 );
+      case Ico_F64i: return toBool( c1->Ico.F64i == c2->Ico.F64i );
+      case Ico_V128: return toBool( c1->Ico.V128 == c2->Ico.V128 );
+      case Ico_V256: return toBool( c1->Ico.V256 == c2->Ico.V256 );
+      default: vpanic("eqIRConst");
+   }
+}
+
+Bool eqIRRegArray ( const IRRegArray* descr1, const IRRegArray* descr2 )
+{
+   return toBool( descr1->base == descr2->base 
+                  && descr1->elemTy == descr2->elemTy
+                  && descr1->nElems == descr2->nElems );
+}
+
+Int sizeofIRType ( IRType ty )
+{
+   switch (ty) {
+      case Ity_I8:   return 1;
+      case Ity_I16:  return 2;
+      case Ity_I32:  return 4;
+      case Ity_I64:  return 8;
+      case Ity_I128: return 16;
+      case Ity_F16:  return 2;
+      case Ity_F32:  return 4;
+      case Ity_F64:  return 8;
+      case Ity_F128: return 16;
+      case Ity_D32:  return 4;
+      case Ity_D64:  return 8;
+      case Ity_D128: return 16;
+      case Ity_V128: return 16;
+      case Ity_V256: return 32;
+      default: vex_printf("\n"); ppIRType(ty); vex_printf("\n");
+               vpanic("sizeofIRType");
+   }
+}
+
+IRType integerIRTypeOfSize ( Int szB )
+{
+   switch (szB) {
+      case 8: return Ity_I64;
+      case 4: return Ity_I32;
+      case 2: return Ity_I16;
+      case 1: return Ity_I8;
+      default: vpanic("integerIRTypeOfSize");
+   }
+}
+
+IRExpr* mkIRExpr_HWord ( HWord hw )
+{
+   vassert(sizeof(void*) == sizeof(HWord));
+   if (sizeof(HWord) == 4)
+      return IRExpr_Const(IRConst_U32((UInt)hw));
+   if (sizeof(HWord) == 8)
+      return IRExpr_Const(IRConst_U64((ULong)hw));
+   vpanic("mkIRExpr_HWord");
+}
+
+IRDirty* unsafeIRDirty_0_N ( Int regparms, const HChar* name, void* addr, 
+                             IRExpr** args ) 
+{
+   IRDirty* d = emptyIRDirty();
+   d->cee   = mkIRCallee ( regparms, name, addr );
+   d->guard = IRExpr_Const(IRConst_U1(True));
+   d->args  = args;
+   return d;
+}
+
+IRDirty* unsafeIRDirty_1_N ( IRTemp dst, 
+                             Int regparms, const HChar* name, void* addr, 
+                             IRExpr** args ) 
+{
+   IRDirty* d = emptyIRDirty();
+   d->cee   = mkIRCallee ( regparms, name, addr );
+   d->guard = IRExpr_Const(IRConst_U1(True));
+   d->args  = args;
+   d->tmp   = dst;
+   return d;
+}
+
+IRExpr* mkIRExprCCall ( IRType retty,
+                        Int regparms, const HChar* name, void* addr, 
+                        IRExpr** args )
+{
+   return IRExpr_CCall ( mkIRCallee ( regparms, name, addr ), 
+                         retty, args );
+}
+
+Bool eqIRAtom ( const IRExpr* a1, const IRExpr* a2 )
+{
+   vassert(isIRAtom(a1));
+   vassert(isIRAtom(a2));
+   if (a1->tag == Iex_RdTmp && a2->tag == Iex_RdTmp)
+      return toBool(a1->Iex.RdTmp.tmp == a2->Iex.RdTmp.tmp);
+   if (a1->tag == Iex_Const && a2->tag == Iex_Const)
+      return eqIRConst(a1->Iex.Const.con, a2->Iex.Const.con);
+   return False;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                           ir_defs.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/ir_inject.c b/VEX/priv/ir_inject.c
new file mode 100644
index 0000000..012eecd
--- /dev/null
+++ b/VEX/priv/ir_inject.c
@@ -0,0 +1,267 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                       ir_inject.c ---*/
+/*---------------------------------------------------------------*/
+
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2012-2013  Florian Krohm   (britzel@acm.org)
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+#include "main_util.h"
+
+/* Convenience macros for readibility */
+#define mkU8(v)   IRExpr_Const(IRConst_U8(v))
+#define mkU32(v)  IRExpr_Const(IRConst_U32(v))
+#define mkU64(v)  IRExpr_Const(IRConst_U64(v))
+#define unop(kind, a)  IRExpr_Unop(kind, a)
+#define binop(kind, a1, a2)  IRExpr_Binop(kind, a1, a2)
+#define triop(kind, a1, a2, a3)  IRExpr_Triop(kind, a1, a2, a3)
+#define qop(kind, a1, a2, a3, a4)  IRExpr_Qop(kind, a1, a2, a3, a4)
+#define stmt(irsb, st)  addStmtToIRSB(irsb, st)
+
+
+/* The IR Injection Control Block. vex_inject_ir will query its contents
+   to construct IR statements for testing purposes. */
+static IRICB iricb;
+
+
+void
+LibVEX_InitIRI(const IRICB *iricb_in)
+{
+   iricb = *iricb_in;  // copy in
+}
+
+
+static IRExpr *
+load_aux(IREndness endian, IRType type, IRExpr *addr)
+{
+   if (type == Ity_D64) {
+      /* The insn selectors do not support loading a DFP value from memory.
+         So we need to fix it here by loading an integer value and
+         reinterpreting it as DFP. */
+      return unop(Iop_ReinterpI64asD64,
+                  IRExpr_Load(endian, Ity_I64, addr));
+   }
+   if (type == Ity_I1) {
+      /* A Boolean value is stored as a 32-bit entity (see store_aux). */
+      return unop(Iop_32to1, IRExpr_Load(endian, Ity_I32, addr));
+   }
+
+   return IRExpr_Load(endian, type, addr);
+}
+
+
+/* Load a value from memory. Loads of more than 8 byte are split into
+   a series of 8-byte loads and combined using appropriate IROps. */
+static IRExpr *
+load(IREndness endian, IRType type, HWord haddr)
+{
+   IROp concat;
+   IRExpr *addr, *next_addr;
+
+   vassert(type == Ity_I1 || sizeofIRType(type) <= 16);
+
+   if (VEX_HOST_WORDSIZE == 8) {
+      addr = mkU64(haddr);
+      next_addr = binop(Iop_Add64, addr, mkU64(8));
+   } else if (VEX_HOST_WORDSIZE == 4) {
+      addr = mkU32(haddr);
+      next_addr = binop(Iop_Add32, addr, mkU32(8));
+   } else {
+      vpanic("invalid #bytes for address");
+   }
+
+   switch (type) {
+   case Ity_I128: concat = Iop_64HLto128;   type = Ity_I64; goto load128;
+   case Ity_F128: concat = Iop_F64HLtoF128; type = Ity_F64; goto load128;
+   case Ity_D128: concat = Iop_D64HLtoD128; type = Ity_D64; goto load128;
+
+   load128:
+     /* Two loads of 64 bit each. */
+      if (endian == Iend_BE) {
+         /* The more significant bits are at the lower address. */
+         return binop(concat,
+                      load_aux(endian, type, addr),
+                      load_aux(endian, type, next_addr));
+      } else {
+         /* The more significant bits are at the higher address. */
+         return binop(concat,
+                      load_aux(endian, type, next_addr),
+                      load_aux(endian, type, addr));
+      }
+
+   default:
+      return load_aux(endian, type, addr);
+   }
+}
+
+
+static void
+store_aux(IRSB *irsb, IREndness endian, IRExpr *addr, IRExpr *data)
+{
+   if (typeOfIRExpr(irsb->tyenv, data) == Ity_D64) {
+      /* The insn selectors do not support writing a DFP value to memory.
+         So we need to fix it here by reinterpreting the DFP value as an
+         integer and storing that. */
+      data = unop(Iop_ReinterpD64asI64, data);
+   }
+   if (typeOfIRExpr(irsb->tyenv, data) == Ity_I1) {
+      /* We cannot store a single bit. So we store it in a 32-bit container.
+         See also load_aux. */
+      data = unop(Iop_1Uto32, data);
+   }
+   stmt(irsb, IRStmt_Store(endian, addr, data));
+}
+
+
+/* Store a value to memory. If a value requires more than 8 bytes a series
+   of 8-byte stores will be generated. */
+static __inline__ void
+store(IRSB *irsb, IREndness endian, HWord haddr, IRExpr *data)
+{
+   IROp high, low;
+   IRExpr *addr, *next_addr;
+
+   if (VEX_HOST_WORDSIZE == 8) {
+      addr = mkU64(haddr);
+      next_addr = binop(Iop_Add64, addr, mkU64(8));
+   } else if (VEX_HOST_WORDSIZE == 4) {
+      addr = mkU32(haddr);
+      next_addr = binop(Iop_Add32, addr, mkU32(8));
+   } else {
+      vpanic("invalid #bytes for address");
+   }
+
+   IRType type = typeOfIRExpr(irsb->tyenv, data);
+
+   vassert(type == Ity_I1 || sizeofIRType(type) <= 16);
+
+   switch (type) {
+   case Ity_I128: high = Iop_128HIto64;   low = Iop_128to64;     goto store128;
+   case Ity_F128: high = Iop_F128HItoF64; low = Iop_F128LOtoF64; goto store128;
+   case Ity_D128: high = Iop_D128HItoD64; low = Iop_D128LOtoD64; goto store128;
+
+   store128:
+     /* Two stores of 64 bit each. */
+      if (endian == Iend_BE) {
+         /* The more significant bits are at the lower address. */
+         store_aux(irsb, endian, addr, unop(high, data));
+         store_aux(irsb, endian, next_addr, unop(low, data));
+      } else {
+         /* The more significant bits are at the higher address. */
+         store_aux(irsb, endian, addr, unop(low, data));
+         store_aux(irsb, endian, next_addr, unop(high, data));
+      }
+      return;
+
+   default:
+      store_aux(irsb, endian, addr, data);
+      return;
+   }
+}
+
+
+/* Inject IR stmts depending on the data provided in the control
+   block iricb. */
+void
+vex_inject_ir(IRSB *irsb, IREndness endian)
+{
+   IRExpr *data, *rounding_mode, *opnd1, *opnd2, *opnd3, *opnd4;
+
+   rounding_mode = NULL;
+   if (iricb.rounding_mode != NO_ROUNDING_MODE) {
+      rounding_mode = mkU32(iricb.rounding_mode);
+   }
+
+   switch (iricb.num_operands) {
+   case 1:
+      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
+      if (rounding_mode)
+         data = binop(iricb.op, rounding_mode, opnd1);
+      else
+         data = unop(iricb.op, opnd1);
+      break;
+
+   case 2:
+      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
+
+      if (iricb.shift_amount_is_immediate) {
+         // This implies that the IROp is a shift op
+         vassert(iricb.t_opnd2 == Ity_I8);
+         opnd2 = mkU8(*((Char *)iricb.opnd2));
+      } else {
+         opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
+      }
+
+      if (rounding_mode)
+         data = triop(iricb.op, rounding_mode, opnd1, opnd2);
+      else
+         data = binop(iricb.op, opnd1, opnd2);
+      break;
+
+   case 3:
+      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
+      opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
+      opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3);
+      if (rounding_mode)
+         data = qop(iricb.op, rounding_mode, opnd1, opnd2, opnd3);
+      else
+         data = triop(iricb.op, opnd1, opnd2, opnd3);
+      break;
+
+   case 4:
+      vassert(rounding_mode == NULL);
+      opnd1 = load(endian, iricb.t_opnd1, iricb.opnd1);
+      opnd2 = load(endian, iricb.t_opnd2, iricb.opnd2);
+      opnd3 = load(endian, iricb.t_opnd3, iricb.opnd3);
+      opnd4 = load(endian, iricb.t_opnd4, iricb.opnd4);
+      data = qop(iricb.op, opnd1, opnd2, opnd3, opnd4);
+      break;
+
+   default:
+      vpanic("unsupported operator");
+   }
+
+   store(irsb, endian, iricb.result, data);
+
+   if (0) {
+      vex_printf("BEGIN inject\n");
+      if (iricb.t_result == Ity_I1 || sizeofIRType(iricb.t_result) <= 8) {
+         ppIRStmt(irsb->stmts[irsb->stmts_used - 1]);
+      } else if (sizeofIRType(iricb.t_result) == 16) {
+         ppIRStmt(irsb->stmts[irsb->stmts_used - 2]);
+         vex_printf("\n");
+         ppIRStmt(irsb->stmts[irsb->stmts_used - 1]);
+      }
+      vex_printf("\nEND inject\n");
+   }
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                         ir_inject.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/ir_match.c b/VEX/priv/ir_match.c
new file mode 100644
index 0000000..560a25b
--- /dev/null
+++ b/VEX/priv/ir_match.c
@@ -0,0 +1,111 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                        ir_match.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Provides a facility for doing IR tree matching. */
+
+#include "main_util.h"
+#include "ir_match.h"
+
+
+/* Assign a value to a binder.  Checks for obvious stupidities. */
+
+static 
+void setBindee ( MatchInfo* mi, Int n, IRExpr* bindee )
+{
+   if (n < 0 || n >= N_IRMATCH_BINDERS)
+      vpanic("setBindee: out of range index");
+   if (mi->bindee[n] != NULL)
+      vpanic("setBindee: bindee already set");
+   mi->bindee[n] = bindee;
+}
+
+
+/* This is the actual matching function, recursing over the pattern
+   and expression trees in the obvious way, and dumping any matches
+   found into 'mi'. */
+
+static 
+Bool matchWrk ( MatchInfo* mi, IRExpr* p/*attern*/, IRExpr* e/*xpr*/ )
+{
+   switch (p->tag) {
+      case Iex_Binder: /* aha, what we were looking for. */
+         setBindee(mi, p->Iex.Binder.binder, e);
+         return True;
+      case Iex_Unop:
+         if (e->tag != Iex_Unop) return False;
+         if (p->Iex.Unop.op != e->Iex.Unop.op) return False;
+         if (!matchWrk(mi, p->Iex.Unop.arg, e->Iex.Unop.arg))
+            return False;
+         return True;
+      case Iex_Binop:
+         if (e->tag != Iex_Binop) return False;
+         if (p->Iex.Binop.op != e->Iex.Binop.op) return False;
+         if (!matchWrk(mi, p->Iex.Binop.arg1, e->Iex.Binop.arg1))
+            return False;
+         if (!matchWrk(mi, p->Iex.Binop.arg2, e->Iex.Binop.arg2))
+            return False;
+         return True;
+      case Iex_Load:
+         if (e->tag != Iex_Load) return False;
+         if (p->Iex.Load.end != e->Iex.Load.end) return False;
+         if (p->Iex.Load.ty != e->Iex.Load.ty) return False;
+         if (!matchWrk(mi, p->Iex.Load.addr, e->Iex.Load.addr))
+            return False;
+         return True;
+      case Iex_Const:
+         if (e->tag != Iex_Const) return False;
+         return eqIRConst(p->Iex.Const.con, e->Iex.Const.con);
+      default: 
+         ppIRExpr(p);
+         vpanic("match");
+   }
+}
+
+
+/* Top level entry point to the matcher. */
+
+Bool matchIRExpr ( MatchInfo* mi, IRExpr* p/*attern*/, IRExpr* e/*xpr*/ )
+{
+   Int i;
+   for (i = 0; i < N_IRMATCH_BINDERS; i++)
+      mi->bindee[i] = NULL;
+   return matchWrk(mi, p, e);
+}
+
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                          ir_match.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/ir_match.h b/VEX/priv/ir_match.h
new file mode 100644
index 0000000..6900fa7
--- /dev/null
+++ b/VEX/priv/ir_match.h
@@ -0,0 +1,88 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                        ir_match.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* Provides a facility for doing IR tree matching. */
+
+#ifndef __VEX_IR_MATCH_H
+#define __VEX_IR_MATCH_H
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "main_util.h"          // NULL
+
+/* Patterns are simply IRExpr* trees, with IRExpr_Binder nodes at the
+   leaves, indicating binding points.  Use these magic macros to
+   declare and define patterns. */
+
+#define DECLARE_PATTERN(_patt) \
+   static IRExpr* _patt = NULL
+
+#define DEFINE_PATTERN(_patt,_expr)                            \
+   do {                                                        \
+      if (!(_patt)) {                                          \
+         vassert(vexGetAllocMode() == VexAllocModeTEMP);       \
+         vexSetAllocMode(VexAllocModePERM);                    \
+         _patt = (_expr);                                      \
+         vexSetAllocMode(VexAllocModeTEMP);                    \
+         vassert(vexGetAllocMode() == VexAllocModeTEMP);       \
+      }                                                        \
+   } while (0)
+
+
+/* This type returns the result of a match -- it records what
+   the binders got instantiated to. */
+
+#define N_IRMATCH_BINDERS 4
+
+typedef
+   struct {
+      IRExpr* bindee[N_IRMATCH_BINDERS];
+   }
+   MatchInfo;
+
+
+/* The matching function.  p is expected to have zero or more
+   IRExpr_Binds in it, numbered 0, 1, 2 ... Returns True if a match
+   succeeded. */
+
+extern
+Bool matchIRExpr ( MatchInfo* mi, IRExpr* p/*attern*/, IRExpr* e/*xpr*/ );
+
+
+#endif /* ndef __VEX_IR_MATCH_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                          ir_match.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/ir_opt.c b/VEX/priv/ir_opt.c
new file mode 100644
index 0000000..52cef9b
--- /dev/null
+++ b/VEX/priv/ir_opt.c
@@ -0,0 +1,6683 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                          ir_opt.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+#include "ir_opt.h"
+
+
+/* Set to 1 for lots of debugging output. */
+#define DEBUG_IROPT 0
+
+/* Set to 1 to gather some statistics. Currently only for sameIRExprs. */
+#define STATS_IROPT 0
+
+
+/* What iropt does, 29 Dec 04.
+
+   It takes an IRSB and produces a new one with the same meaning,
+   defined thus:
+
+   After execution of the new BB, all guest state and guest memory is
+   the same as after execution of the original.  This is true
+   regardless of how the block was exited (at the end vs side exit).
+
+   In addition, parts of the guest state will be identical to that
+   created by execution of the original at the following observation
+   points:
+
+   * In a dirty helper call, any parts of the guest state that the
+     helper states that it reads or modifies will be up to date.
+     Also, guest memory will be up to date.  Parts of the guest state
+     not marked as being read or modified by the helper cannot be
+     assumed to be up-to-date at the point where the helper is called.
+
+   * If iropt_register_updates == VexRegUpdSpAtMemAccess :
+     The guest state is only up to date only as explained above
+     (i.e. at SB exits and as specified by dirty helper call).
+     Also, the stack pointer register is up to date at memory
+     exception points (as this is needed for the stack extension
+     logic in m_signals.c).
+
+   * If iropt_register_updates == VexRegUpdUnwindregsAtMemAccess :
+     Immediately prior to any load or store, those parts of the guest
+     state marked as requiring precise exceptions will be up to date.
+     Also, guest memory will be up to date.  Parts of the guest state
+     not marked as requiring precise exceptions cannot be assumed to
+     be up-to-date at the point of the load/store.
+
+   * If iropt_register_updates == VexRegUpdAllregsAtMemAccess:
+     Same as minimal, but all the guest state is up to date at memory
+     exception points.
+
+   * If iropt_register_updates == VexRegUpdAllregsAtEachInsn :
+     Guest state is up to date at each instruction.
+
+   The relative order of loads and stores (including loads/stores of
+   guest memory done by dirty helpers annotated as such) is not
+   changed.  However, the relative order of loads with no intervening
+   stores/modifies may be changed.  
+
+   Transformation order
+   ~~~~~~~~~~~~~~~~~~~~
+
+   There are three levels of optimisation, controlled by
+   vex_control.iropt_level.  Define first:
+
+   "Cheap transformations" are the following sequence:
+      * Redundant-Get removal
+      * Redundant-Put removal
+      * Constant propagation/folding
+      * Dead code removal
+      * Specialisation of clean helper functions
+      * Dead code removal
+
+   "Expensive transformations" are the following sequence:
+      * CSE
+      * Folding of add/sub chains
+      * Redundant-GetI removal
+      * Redundant-PutI removal
+      * Dead code removal
+
+   Then the transformations are as follows, as defined by
+   vex_control.iropt_level:
+
+   Level 0: 
+      * Flatten into atomic form.
+
+   Level 1: the following sequence:
+      * Flatten into atomic form.
+      * Cheap transformations.
+
+   Level 2: the following sequence
+      * Flatten into atomic form.
+      * Cheap transformations.
+      * If block contains any floating or vector types, CSE.
+      * If block contains GetI or PutI, Expensive transformations.
+      * Try unrolling loops.  Three possible outcomes:
+        - No effect: do nothing more.
+        - Unrolled a loop, and block does not contain GetI or PutI:
+          Do: * CSE
+              * Dead code removal
+        - Unrolled a loop, and block contains GetI or PutI:
+          Do: * Expensive transformations
+              * Cheap transformations
+*/
+
+/* Implementation notes, 29 Dec 04.
+
+   TODO (important): I think rPutI removal ignores precise exceptions
+   and is therefore in a sense, wrong.  In the sense that PutIs are
+   assumed not to write parts of the guest state that we need to have
+   up-to-date at loads/stores.  So far on x86 guest that has not
+   mattered since indeed only the x87 FP registers and tags are
+   accessed using GetI/PutI, and there is no need so far for them to
+   be up to date at mem exception points.  The rPutI pass should be
+   fixed.
+
+   TODO: improve pessimistic handling of precise exceptions
+     in the tree builder.
+
+   TODO: check interaction of rGetI and dirty helpers. 
+   
+   F64i constants are treated differently from other constants.
+   They are not regarded as atoms, and instead lifted off and
+   bound to temps.  This allows them to participate in CSE, which
+   is important for getting good performance for x86 guest code.
+
+   CSE up F64 literals (already doing F64is)
+
+   CSE: consider carefully the requirement for precise exns
+        prior to making CSE any more aggressive.  */
+
+
+/*---------------------------------------------------------------*/
+/*--- Finite mappery, of a sort                               ---*/
+/*---------------------------------------------------------------*/
+
+/* General map from HWord-sized thing HWord-sized thing.  Could be by
+   hashing, but it's not clear whether or not this would really be any
+   faster. */
+
+typedef
+   struct {
+      Bool*  inuse;
+      HWord* key;
+      HWord* val;
+      Int    size;
+      Int    used;
+   }
+   HashHW;
+
+static HashHW* newHHW ( void )
+{
+   HashHW* h = LibVEX_Alloc_inline(sizeof(HashHW));
+   h->size   = 8;
+   h->used   = 0;
+   h->inuse  = LibVEX_Alloc_inline(h->size * sizeof(Bool));
+   h->key    = LibVEX_Alloc_inline(h->size * sizeof(HWord));
+   h->val    = LibVEX_Alloc_inline(h->size * sizeof(HWord));
+   return h;
+}
+
+
+/* Look up key in the map. */
+
+static Bool lookupHHW ( HashHW* h, /*OUT*/HWord* val, HWord key )
+{
+   Int i;
+   /* vex_printf("lookupHHW(%llx)\n", key ); */
+   for (i = 0; i < h->used; i++) {
+      if (h->inuse[i] && h->key[i] == key) {
+         if (val)
+            *val = h->val[i];
+         return True;
+      }
+   }
+   return False;
+}
+
+
+/* Add key->val to the map.  Replaces any existing binding for key. */
+
+static void addToHHW ( HashHW* h, HWord key, HWord val )
+{
+   Int i, j;
+   /* vex_printf("addToHHW(%llx, %llx)\n", key, val); */
+
+   /* Find and replace existing binding, if any. */
+   for (i = 0; i < h->used; i++) {
+      if (h->inuse[i] && h->key[i] == key) {
+         h->val[i] = val;
+         return;
+      }
+   }
+
+   /* Ensure a space is available. */
+   if (h->used == h->size) {
+      /* Copy into arrays twice the size. */
+      Bool*  inuse2 = LibVEX_Alloc_inline(2 * h->size * sizeof(Bool));
+      HWord* key2   = LibVEX_Alloc_inline(2 * h->size * sizeof(HWord));
+      HWord* val2   = LibVEX_Alloc_inline(2 * h->size * sizeof(HWord));
+      for (i = j = 0; i < h->size; i++) {
+         if (!h->inuse[i]) continue;
+         inuse2[j] = True;
+         key2[j] = h->key[i];
+         val2[j] = h->val[i];
+         j++;
+      }
+      h->used = j;
+      h->size *= 2;
+      h->inuse = inuse2;
+      h->key = key2;
+      h->val = val2;
+   }
+
+   /* Finally, add it. */
+   vassert(h->used < h->size);
+   h->inuse[h->used] = True;
+   h->key[h->used] = key;
+   h->val[h->used] = val;
+   h->used++;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Flattening out a BB into atomic SSA form                ---*/
+/*---------------------------------------------------------------*/
+
+/* Non-critical helper, heuristic for reducing the number of tmp-tmp
+   copies made by flattening.  If in doubt return False. */
+
+static Bool isFlat ( IRExpr* e )
+{
+   if (e->tag == Iex_Get) 
+      return True;
+   if (e->tag == Iex_Binop)
+      return toBool( isIRAtom(e->Iex.Binop.arg1) 
+                     && isIRAtom(e->Iex.Binop.arg2) );
+   if (e->tag == Iex_Load)
+      return isIRAtom(e->Iex.Load.addr);
+   return False;
+}
+
+/* Flatten out 'ex' so it is atomic, returning a new expression with
+   the same value, after having appended extra IRTemp assignments to
+   the end of 'bb'. */
+
+static IRExpr* flatten_Expr ( IRSB* bb, IRExpr* ex )
+{
+   Int i;
+   IRExpr** newargs;
+   IRType ty = typeOfIRExpr(bb->tyenv, ex);
+   IRTemp t1;
+
+   switch (ex->tag) {
+
+      case Iex_GetI:
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1,
+            IRExpr_GetI(ex->Iex.GetI.descr,
+                        flatten_Expr(bb, ex->Iex.GetI.ix),
+                        ex->Iex.GetI.bias)));
+         return IRExpr_RdTmp(t1);
+
+      case Iex_Get:
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, 
+            IRStmt_WrTmp(t1, ex));
+         return IRExpr_RdTmp(t1);
+
+      case Iex_Qop: {
+         IRQop* qop = ex->Iex.Qop.details;
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1, 
+            IRExpr_Qop(qop->op,
+                         flatten_Expr(bb, qop->arg1),
+                         flatten_Expr(bb, qop->arg2),
+                         flatten_Expr(bb, qop->arg3),
+                         flatten_Expr(bb, qop->arg4))));
+         return IRExpr_RdTmp(t1);
+      }
+
+      case Iex_Triop: {
+         IRTriop* triop = ex->Iex.Triop.details;
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1, 
+            IRExpr_Triop(triop->op,
+                         flatten_Expr(bb, triop->arg1),
+                         flatten_Expr(bb, triop->arg2),
+                         flatten_Expr(bb, triop->arg3))));
+         return IRExpr_RdTmp(t1);
+      }
+
+      case Iex_Binop:
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1, 
+            IRExpr_Binop(ex->Iex.Binop.op,
+                         flatten_Expr(bb, ex->Iex.Binop.arg1),
+                         flatten_Expr(bb, ex->Iex.Binop.arg2))));
+         return IRExpr_RdTmp(t1);
+
+      case Iex_Unop:
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1, 
+            IRExpr_Unop(ex->Iex.Unop.op,
+                        flatten_Expr(bb, ex->Iex.Unop.arg))));
+         return IRExpr_RdTmp(t1);
+
+      case Iex_Load:
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1,
+            IRExpr_Load(ex->Iex.Load.end,
+                        ex->Iex.Load.ty, 
+                        flatten_Expr(bb, ex->Iex.Load.addr))));
+         return IRExpr_RdTmp(t1);
+
+      case Iex_CCall:
+         newargs = shallowCopyIRExprVec(ex->Iex.CCall.args);
+         for (i = 0; newargs[i]; i++)
+            newargs[i] = flatten_Expr(bb, newargs[i]);
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1,
+            IRExpr_CCall(ex->Iex.CCall.cee,
+                         ex->Iex.CCall.retty,
+                         newargs)));
+         return IRExpr_RdTmp(t1);
+
+      case Iex_ITE:
+         t1 = newIRTemp(bb->tyenv, ty);
+         addStmtToIRSB(bb, IRStmt_WrTmp(t1,
+            IRExpr_ITE(flatten_Expr(bb, ex->Iex.ITE.cond),
+                       flatten_Expr(bb, ex->Iex.ITE.iftrue),
+                       flatten_Expr(bb, ex->Iex.ITE.iffalse))));
+         return IRExpr_RdTmp(t1);
+
+      case Iex_Const:
+         /* Lift F64i constants out onto temps so they can be CSEd
+            later. */
+         if (ex->Iex.Const.con->tag == Ico_F64i) {
+            t1 = newIRTemp(bb->tyenv, ty);
+            addStmtToIRSB(bb, IRStmt_WrTmp(t1,
+               IRExpr_Const(ex->Iex.Const.con)));
+            return IRExpr_RdTmp(t1);
+         } else {
+            /* Leave all other constants alone. */
+            return ex;
+         }
+
+      case Iex_RdTmp:
+         return ex;
+
+      default:
+         vex_printf("\n");
+         ppIRExpr(ex); 
+         vex_printf("\n");
+         vpanic("flatten_Expr");
+   }
+}
+
+
+/* Append a completely flattened form of 'st' to the end of 'bb'. */
+
+static void flatten_Stmt ( IRSB* bb, IRStmt* st )
+{
+   Int i;
+   IRExpr   *e1, *e2, *e3, *e4, *e5;
+   IRDirty  *d,  *d2;
+   IRCAS    *cas, *cas2;
+   IRPutI   *puti, *puti2;
+   IRLoadG  *lg;
+   IRStoreG *sg;
+   switch (st->tag) {
+      case Ist_Put:
+         if (isIRAtom(st->Ist.Put.data)) {
+            /* optimisation to reduce the amount of heap wasted
+               by the flattener */
+            addStmtToIRSB(bb, st);
+         } else {
+            /* general case, always correct */
+            e1 = flatten_Expr(bb, st->Ist.Put.data);
+            addStmtToIRSB(bb, IRStmt_Put(st->Ist.Put.offset, e1));
+         }
+         break;
+      case Ist_PutI:
+         puti = st->Ist.PutI.details;
+         e1 = flatten_Expr(bb, puti->ix);
+         e2 = flatten_Expr(bb, puti->data);
+         puti2 = mkIRPutI(puti->descr, e1, puti->bias, e2);
+         addStmtToIRSB(bb, IRStmt_PutI(puti2));
+         break;
+      case Ist_WrTmp:
+         if (isFlat(st->Ist.WrTmp.data)) {
+            /* optimisation, to reduce the number of tmp-tmp
+               copies generated */
+            addStmtToIRSB(bb, st);
+         } else {
+            /* general case, always correct */
+            e1 = flatten_Expr(bb, st->Ist.WrTmp.data);
+            addStmtToIRSB(bb, IRStmt_WrTmp(st->Ist.WrTmp.tmp, e1));
+         }
+         break;
+      case Ist_Store:
+         e1 = flatten_Expr(bb, st->Ist.Store.addr);
+         e2 = flatten_Expr(bb, st->Ist.Store.data);
+         addStmtToIRSB(bb, IRStmt_Store(st->Ist.Store.end, e1,e2));
+         break;
+      case Ist_StoreG:
+         sg = st->Ist.StoreG.details;
+         e1 = flatten_Expr(bb, sg->addr);
+         e2 = flatten_Expr(bb, sg->data);
+         e3 = flatten_Expr(bb, sg->guard);
+         addStmtToIRSB(bb, IRStmt_StoreG(sg->end, e1, e2, e3));
+         break;
+      case Ist_LoadG:
+         lg = st->Ist.LoadG.details;
+         e1 = flatten_Expr(bb, lg->addr);
+         e2 = flatten_Expr(bb, lg->alt);
+         e3 = flatten_Expr(bb, lg->guard);
+         addStmtToIRSB(bb, IRStmt_LoadG(lg->end, lg->cvt, lg->dst,
+                                        e1, e2, e3));
+         break;
+      case Ist_CAS:
+         cas  = st->Ist.CAS.details;
+         e1   = flatten_Expr(bb, cas->addr);
+         e2   = cas->expdHi ? flatten_Expr(bb, cas->expdHi) : NULL;
+         e3   = flatten_Expr(bb, cas->expdLo);
+         e4   = cas->dataHi ? flatten_Expr(bb, cas->dataHi) : NULL;
+         e5   = flatten_Expr(bb, cas->dataLo);
+         cas2 = mkIRCAS( cas->oldHi, cas->oldLo, cas->end,
+                         e1, e2, e3, e4, e5 );
+         addStmtToIRSB(bb, IRStmt_CAS(cas2));
+         break;
+      case Ist_LLSC:
+         e1 = flatten_Expr(bb, st->Ist.LLSC.addr);
+         e2 = st->Ist.LLSC.storedata
+                 ? flatten_Expr(bb, st->Ist.LLSC.storedata)
+                 : NULL;
+         addStmtToIRSB(bb, IRStmt_LLSC(st->Ist.LLSC.end,
+                                       st->Ist.LLSC.result, e1, e2));
+         break;
+      case Ist_Dirty:
+         d = st->Ist.Dirty.details;
+         d2 = emptyIRDirty();
+         *d2 = *d;
+         d2->args = shallowCopyIRExprVec(d2->args);
+         if (d2->mFx != Ifx_None) {
+            d2->mAddr = flatten_Expr(bb, d2->mAddr);
+         } else {
+            vassert(d2->mAddr == NULL);
+         }
+         d2->guard = flatten_Expr(bb, d2->guard);
+         for (i = 0; d2->args[i]; i++) {
+            IRExpr* arg = d2->args[i];
+            if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+               d2->args[i] = flatten_Expr(bb, arg);
+         }
+         addStmtToIRSB(bb, IRStmt_Dirty(d2));
+         break;
+      case Ist_NoOp:
+      case Ist_MBE:
+      case Ist_IMark:
+         addStmtToIRSB(bb, st);
+         break;
+      case Ist_AbiHint:
+         e1 = flatten_Expr(bb, st->Ist.AbiHint.base);
+         e2 = flatten_Expr(bb, st->Ist.AbiHint.nia);
+         addStmtToIRSB(bb, IRStmt_AbiHint(e1, st->Ist.AbiHint.len, e2));
+         break;
+      case Ist_Exit:
+         e1 = flatten_Expr(bb, st->Ist.Exit.guard);
+         addStmtToIRSB(bb, IRStmt_Exit(e1, st->Ist.Exit.jk,
+                                       st->Ist.Exit.dst,
+                                       st->Ist.Exit.offsIP));
+         break;
+      default:
+         vex_printf("\n");
+         ppIRStmt(st); 
+         vex_printf("\n");
+         vpanic("flatten_Stmt");
+   }
+}
+
+
+static IRSB* flatten_BB ( IRSB* in )
+{
+   Int   i;
+   IRSB* out;
+   out = emptyIRSB();
+   out->tyenv = deepCopyIRTypeEnv( in->tyenv );
+   for (i = 0; i < in->stmts_used; i++)
+      if (in->stmts[i])
+         flatten_Stmt( out, in->stmts[i] );
+   out->next     = flatten_Expr( out, in->next );
+   out->jumpkind = in->jumpkind;
+   out->offsIP   = in->offsIP;
+   return out;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- In-place removal of redundant GETs                      ---*/
+/*---------------------------------------------------------------*/
+
+/* Scan forwards, building up an environment binding (min offset, max
+   offset) pairs to values, which will either be temps or constants.
+
+   On seeing 't = Get(minoff,maxoff)', look up (minoff,maxoff) in the
+   env and if it matches, replace the Get with the stored value.  If
+   there is no match, add a (minoff,maxoff) :-> t binding.
+
+   On seeing 'Put (minoff,maxoff) = t or c', first remove in the env
+   any binding which fully or partially overlaps with (minoff,maxoff).
+   Then add a new (minoff,maxoff) :-> t or c binding.  */
+
+/* Extract the min/max offsets from a guest state array descriptor. */
+
+inline
+static void getArrayBounds ( IRRegArray* descr, 
+                             UInt* minoff, UInt* maxoff )
+{
+   *minoff = descr->base;
+   *maxoff = *minoff + descr->nElems*sizeofIRType(descr->elemTy) - 1;
+   vassert((*minoff & ~0xFFFF) == 0);
+   vassert((*maxoff & ~0xFFFF) == 0);
+   vassert(*minoff <= *maxoff);
+}
+
+/* Create keys, of the form ((minoffset << 16) | maxoffset). */
+
+static UInt mk_key_GetPut ( Int offset, IRType ty )
+{
+   /* offset should fit in 16 bits. */
+   UInt minoff = offset;
+   UInt maxoff = minoff + sizeofIRType(ty) - 1;
+   vassert((minoff & ~0xFFFF) == 0);
+   vassert((maxoff & ~0xFFFF) == 0);
+   return (minoff << 16) | maxoff;
+}
+
+static UInt mk_key_GetIPutI ( IRRegArray* descr )
+{
+   UInt minoff, maxoff;
+   getArrayBounds( descr, &minoff, &maxoff );
+   vassert((minoff & ~0xFFFF) == 0);
+   vassert((maxoff & ~0xFFFF) == 0);
+   return (minoff << 16) | maxoff;
+}
+
+/* Supposing h has keys of the form generated by mk_key_GetPut and
+   mk_key_GetIPutI, invalidate any key which overlaps (k_lo
+   .. k_hi). 
+*/
+static void invalidateOverlaps ( HashHW* h, UInt k_lo, UInt k_hi )
+{
+   Int  j;
+   UInt e_lo, e_hi;
+   vassert(k_lo <= k_hi);
+   /* invalidate any env entries which in any way overlap (k_lo
+      .. k_hi) */
+   /* vex_printf("invalidate %d .. %d\n", k_lo, k_hi ); */
+
+   for (j = 0; j < h->used; j++) {
+      if (!h->inuse[j]) 
+         continue;
+      e_lo = (((UInt)h->key[j]) >> 16) & 0xFFFF;
+      e_hi = ((UInt)h->key[j]) & 0xFFFF;
+      vassert(e_lo <= e_hi);
+      if (e_hi < k_lo || k_hi < e_lo)
+         continue; /* no overlap possible */
+      else
+         /* overlap; invalidate */
+         h->inuse[j] = False;
+   }
+}
+
+
+static void redundant_get_removal_BB ( IRSB* bb )
+{
+   HashHW* env = newHHW();
+   UInt    key = 0; /* keep gcc -O happy */
+   Int     i, j;
+   HWord   val;
+
+   for (i = 0; i < bb->stmts_used; i++) {
+      IRStmt* st = bb->stmts[i];
+
+      if (st->tag == Ist_NoOp)
+         continue;
+
+      /* Deal with Gets */
+      if (st->tag == Ist_WrTmp
+          && st->Ist.WrTmp.data->tag == Iex_Get) {
+         /* st is 't = Get(...)'.  Look up in the environment and see
+            if the Get can be replaced. */
+         IRExpr* get = st->Ist.WrTmp.data;
+         key = (HWord)mk_key_GetPut( get->Iex.Get.offset, 
+                                     get->Iex.Get.ty );
+         if (lookupHHW(env, &val, (HWord)key)) {
+            /* found it */
+            /* Note, we could do better here.  If the types are
+               different we don't do the substitution, since doing so
+               could lead to invalidly-typed IR.  An improvement would
+               be to stick in a reinterpret-style cast, although that
+               would make maintaining flatness more difficult. */
+            IRExpr* valE    = (IRExpr*)val;
+            Bool    typesOK = toBool( typeOfIRExpr(bb->tyenv,valE) 
+                                      == st->Ist.WrTmp.data->Iex.Get.ty );
+            if (typesOK && DEBUG_IROPT) {
+               vex_printf("rGET: "); ppIRExpr(get);
+               vex_printf("  ->  "); ppIRExpr(valE);
+               vex_printf("\n");
+            }
+            if (typesOK)
+               bb->stmts[i] = IRStmt_WrTmp(st->Ist.WrTmp.tmp, valE);
+         } else {
+            /* Not found, but at least we know that t and the Get(...)
+               are now associated.  So add a binding to reflect that
+               fact. */
+            addToHHW( env, (HWord)key, 
+                           (HWord)(void*)(IRExpr_RdTmp(st->Ist.WrTmp.tmp)) );
+         }
+      }
+
+      /* Deal with Puts: invalidate any env entries overlapped by this
+         Put */
+      if (st->tag == Ist_Put || st->tag == Ist_PutI) {
+         UInt k_lo, k_hi;
+         if (st->tag == Ist_Put) {
+            key = mk_key_GetPut( st->Ist.Put.offset, 
+                                 typeOfIRExpr(bb->tyenv,st->Ist.Put.data) );
+         } else {
+            vassert(st->tag == Ist_PutI);
+            key = mk_key_GetIPutI( st->Ist.PutI.details->descr );
+         }
+
+         k_lo = (key >> 16) & 0xFFFF;
+         k_hi = key & 0xFFFF;
+         invalidateOverlaps(env, k_lo, k_hi);
+      }
+      else
+      if (st->tag == Ist_Dirty) {
+         /* Deal with dirty helpers which write or modify guest state.
+            Invalidate the entire env.  We could do a lot better
+            here. */
+         IRDirty* d      = st->Ist.Dirty.details;
+         Bool     writes = False;
+         for (j = 0; j < d->nFxState; j++) {
+            if (d->fxState[j].fx == Ifx_Modify 
+                || d->fxState[j].fx == Ifx_Write)
+            writes = True;
+         }
+         if (writes) {
+            /* dump the entire env (not clever, but correct ...) */
+            for (j = 0; j < env->used; j++)
+               env->inuse[j] = False;
+            if (0) vex_printf("rGET: trash env due to dirty helper\n");
+         }
+      }
+
+      /* add this one to the env, if appropriate */
+      if (st->tag == Ist_Put) {
+         vassert(isIRAtom(st->Ist.Put.data));
+         addToHHW( env, (HWord)key, (HWord)(st->Ist.Put.data));
+      }
+
+   } /* for (i = 0; i < bb->stmts_used; i++) */
+
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- In-place removal of redundant PUTs                      ---*/
+/*---------------------------------------------------------------*/
+
+/* Find any Get uses in st and invalidate any partially or fully
+   overlapping ranges listed in env.  Due to the flattening phase, the
+   only stmt kind we expect to find a Get on is IRStmt_WrTmp. */
+
+static void handle_gets_Stmt ( 
+               HashHW* env, 
+               IRStmt* st,
+               Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+               VexRegisterUpdates pxControl
+            )
+{
+   Int     j;
+   UInt    key = 0; /* keep gcc -O happy */
+   Bool    isGet;
+   Bool    memRW = False;
+   IRExpr* e;
+
+   switch (st->tag) {
+
+      /* This is the only interesting case.  Deal with Gets in the RHS
+         expression. */
+      case Ist_WrTmp:
+         e = st->Ist.WrTmp.data;
+         switch (e->tag) {
+            case Iex_Get:
+               isGet = True;
+               key = mk_key_GetPut ( e->Iex.Get.offset, e->Iex.Get.ty );
+               break;
+            case Iex_GetI:
+               isGet = True;
+               key = mk_key_GetIPutI ( e->Iex.GetI.descr );
+               break;
+            case Iex_Load:
+               isGet = False;
+               memRW = True;
+               break;
+            default: 
+               isGet = False;
+         }
+         if (isGet) {
+            UInt k_lo, k_hi;
+            k_lo = (key >> 16) & 0xFFFF;
+            k_hi = key & 0xFFFF;
+            invalidateOverlaps(env, k_lo, k_hi);
+         }
+         break;
+
+      /* Be very conservative for dirty helper calls; dump the entire
+         environment.  The helper might read guest state, in which
+         case it needs to be flushed first.  Also, the helper might
+         access guest memory, in which case all parts of the guest
+         state requiring precise exceptions needs to be flushed.  The
+         crude solution is just to flush everything; we could easily
+         enough do a lot better if needed. */
+      /* Probably also overly-conservative, but also dump everything
+         if we hit a memory bus event (fence, lock, unlock).  Ditto
+         AbiHints, CASs, LLs and SCs. */
+      case Ist_AbiHint:
+         vassert(isIRAtom(st->Ist.AbiHint.base));
+         vassert(isIRAtom(st->Ist.AbiHint.nia));
+         /* fall through */
+      case Ist_MBE:
+      case Ist_Dirty:
+      case Ist_CAS:
+      case Ist_LLSC:
+         for (j = 0; j < env->used; j++)
+            env->inuse[j] = False;
+         break;
+
+      /* all other cases are boring. */
+      case Ist_Store:
+         vassert(isIRAtom(st->Ist.Store.addr));
+         vassert(isIRAtom(st->Ist.Store.data));
+         memRW = True;
+         break;
+      case Ist_StoreG: {
+         IRStoreG* sg = st->Ist.StoreG.details;
+         vassert(isIRAtom(sg->addr));
+         vassert(isIRAtom(sg->data));
+         vassert(isIRAtom(sg->guard));
+         memRW = True;
+         break;
+      }
+      case Ist_LoadG: {
+         IRLoadG* lg = st->Ist.LoadG.details;
+         vassert(isIRAtom(lg->addr));
+         vassert(isIRAtom(lg->alt));
+         vassert(isIRAtom(lg->guard));
+         memRW = True;
+         break;
+      }
+      case Ist_Exit:
+         vassert(isIRAtom(st->Ist.Exit.guard));
+         break;
+
+      case Ist_Put:
+         vassert(isIRAtom(st->Ist.Put.data));
+         break;
+
+      case Ist_PutI:
+         vassert(isIRAtom(st->Ist.PutI.details->ix));
+         vassert(isIRAtom(st->Ist.PutI.details->data));
+         break;
+
+      case Ist_NoOp:
+      case Ist_IMark:
+         break;
+
+      default:
+         vex_printf("\n");
+         ppIRStmt(st);
+         vex_printf("\n");
+         vpanic("handle_gets_Stmt");
+   }
+
+   if (memRW) {
+      /* This statement accesses memory.  So we might need to dump all parts
+         of the environment corresponding to guest state that may not
+         be reordered with respect to memory references.  That means
+         at least the stack pointer. */
+      switch (pxControl) {
+         case VexRegUpdAllregsAtMemAccess:
+            /* Precise exceptions required at mem access.
+               Flush all guest state. */
+            for (j = 0; j < env->used; j++)
+               env->inuse[j] = False;
+            break;
+         case VexRegUpdSpAtMemAccess:
+            /* We need to dump the stack pointer
+               (needed for stack extension in m_signals.c).
+               preciseMemExnsFn will use vex_control.iropt_register_updates
+               to verify only the sp is to be checked. */
+            /* fallthrough */
+         case VexRegUpdUnwindregsAtMemAccess:
+            for (j = 0; j < env->used; j++) {
+               if (!env->inuse[j])
+                  continue;
+               /* Just flush the minimal amount required, as computed by
+                  preciseMemExnsFn. */
+               HWord k_lo = (env->key[j] >> 16) & 0xFFFF;
+               HWord k_hi = env->key[j] & 0xFFFF;
+               if (preciseMemExnsFn( k_lo, k_hi, pxControl ))
+                  env->inuse[j] = False;
+            }
+            break;
+         case VexRegUpdAllregsAtEachInsn:
+            // VexRegUpdAllregsAtEachInsn cannot happen here.
+            // fall through
+         case VexRegUpd_INVALID:
+         default:
+            vassert(0);
+      }
+   } /* if (memRW) */
+
+}
+
+
+/* Scan backwards, building up a set of (min offset, max
+   offset) pairs, indicating those parts of the guest state
+   for which the next event is a write.
+
+   On seeing a conditional exit, empty the set.
+
+   On seeing 'Put (minoff,maxoff) = t or c', if (minoff,maxoff) is
+   completely within the set, remove the Put.  Otherwise, add
+   (minoff,maxoff) to the set.
+
+   On seeing 'Get (minoff,maxoff)', remove any part of the set
+   overlapping (minoff,maxoff).  The same has to happen for any events
+   which implicitly read parts of the guest state: dirty helper calls
+   and loads/stores.
+*/
+
+static void redundant_put_removal_BB ( 
+               IRSB* bb,
+               Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+               VexRegisterUpdates pxControl
+            )
+{
+   Int     i, j;
+   Bool    isPut;
+   IRStmt* st;
+   UInt    key = 0; /* keep gcc -O happy */
+
+   vassert(pxControl < VexRegUpdAllregsAtEachInsn);
+
+   HashHW* env = newHHW();
+
+   /* Initialise the running env with the fact that the final exit
+      writes the IP (or, whatever it claims to write.  We don't
+      care.) */
+   key = mk_key_GetPut(bb->offsIP, typeOfIRExpr(bb->tyenv, bb->next));
+   addToHHW(env, (HWord)key, 0);
+
+   /* And now scan backwards through the statements. */
+   for (i = bb->stmts_used-1; i >= 0; i--) {
+      st = bb->stmts[i];
+
+      if (st->tag == Ist_NoOp)
+         continue;
+
+      /* Deal with conditional exits. */
+      if (st->tag == Ist_Exit) {
+         //Bool re_add;
+         /* Need to throw out from the env, any part of it which
+            doesn't overlap with the guest state written by this exit.
+            Since the exit only writes one section, it's simplest to
+            do this: (1) check whether env contains a write that
+            completely overlaps the write done by this exit; (2) empty
+            out env; and (3) if (1) was true, add the write done by
+            this exit.
+
+            To make (1) a bit simpler, merely search for a write that
+            exactly matches the one done by this exit.  That's safe
+            because it will fail as often or more often than a full
+            overlap check, and failure to find an overlapping write in
+            env is the safe case (we just nuke env if that
+            happens). */
+         //vassert(isIRAtom(st->Ist.Exit.guard));
+         /* (1) */
+         //key = mk_key_GetPut(st->Ist.Exit.offsIP,
+         //                    typeOfIRConst(st->Ist.Exit.dst));
+         //re_add = lookupHHW(env, NULL, key);
+         /* (2) */
+         for (j = 0; j < env->used; j++)
+            env->inuse[j] = False;
+         /* (3) */
+         //if (0 && re_add) 
+         //   addToHHW(env, (HWord)key, 0);
+         continue;
+      }
+
+      /* Deal with Puts */
+      switch (st->tag) {
+         case Ist_Put: 
+            isPut = True;
+            key = mk_key_GetPut( st->Ist.Put.offset, 
+                                 typeOfIRExpr(bb->tyenv,st->Ist.Put.data) );
+            vassert(isIRAtom(st->Ist.Put.data));
+            break;
+         case Ist_PutI:
+            isPut = True;
+            key = mk_key_GetIPutI( st->Ist.PutI.details->descr );
+            vassert(isIRAtom(st->Ist.PutI.details->ix));
+            vassert(isIRAtom(st->Ist.PutI.details->data));
+            break;
+         default: 
+            isPut = False;
+      }
+      if (isPut && st->tag != Ist_PutI) {
+         /* See if any single entry in env overlaps this Put.  This is
+            simplistic in that the transformation is valid if, say, two
+            or more entries in the env overlap this Put, but the use of
+            lookupHHW will only find a single entry which exactly
+            overlaps this Put.  This is suboptimal but safe. */
+         if (lookupHHW(env, NULL, (HWord)key)) {
+            /* This Put is redundant because a later one will overwrite
+               it.  So NULL (nop) it out. */
+            if (DEBUG_IROPT) {
+               vex_printf("rPUT: "); ppIRStmt(st);
+               vex_printf("\n");
+            }
+            bb->stmts[i] = IRStmt_NoOp();
+         } else {
+            /* We can't demonstrate that this Put is redundant, so add it
+               to the running collection. */
+            addToHHW(env, (HWord)key, 0);
+         }
+         continue;
+      }
+
+      /* Deal with Gets.  These remove bits of the environment since
+         appearance of a Get means that the next event for that slice
+         of the guest state is no longer a write, but a read.  Also
+         deals with implicit reads of guest state needed to maintain
+         precise exceptions. */
+      handle_gets_Stmt( env, st, preciseMemExnsFn, pxControl );
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Constant propagation and folding                        ---*/
+/*---------------------------------------------------------------*/
+
+#if STATS_IROPT
+/* How often sameIRExprs was invoked */
+static UInt invocation_count;
+/* How often sameIRExprs recursed through IRTemp assignments */
+static UInt recursion_count;
+/* How often sameIRExprs found identical IRExprs */
+static UInt success_count;
+/* How often recursing through assignments to IRTemps helped
+   establishing equality. */
+static UInt recursion_success_count;
+/* Whether or not recursing through an IRTemp assignment helped 
+   establishing IRExpr equality for a given sameIRExprs invocation. */
+static Bool recursion_helped;
+/* Whether or not a given sameIRExprs invocation recursed through an
+   IRTemp assignment */
+static Bool recursed;
+/* Maximum number of nodes ever visited when comparing two IRExprs. */
+static UInt max_nodes_visited;
+#endif /* STATS_IROPT */
+
+/* Count the number of nodes visited for a given sameIRExprs invocation. */
+static UInt num_nodes_visited;
+
+/* Do not visit more than NODE_LIMIT nodes when comparing two IRExprs.
+   This is to guard against performance degradation by visiting large
+   trees without success. */
+#define NODE_LIMIT 30
+
+
+/* The env in this section is a map from IRTemp to IRExpr*,
+   that is, an array indexed by IRTemp. */
+
+/* Do both expressions compute the same value? The answer is generally
+   conservative, i.e. it will report that the expressions do not compute
+   the same value when in fact they do. The reason is that we do not
+   keep track of changes in the guest state and memory. Thusly, two
+   Get's, GetI's or Load's, even when accessing the same location, will be
+   assumed to compute different values. After all the accesses may happen
+   at different times and the guest state / memory can have changed in
+   the meantime.
+
+   XXX IMPORTANT XXX the two expressions must have the same IR type.
+   DO NOT CALL HERE WITH DIFFERENTLY-TYPED EXPRESSIONS. */
+
+/* JRS 20-Mar-2012: split sameIRExprs_aux into a fast inlineable
+   wrapper that deals with the common tags-don't-match case, and a
+   slower out of line general case.  Saves a few insns. */
+
+__attribute__((noinline))
+static Bool sameIRExprs_aux2 ( IRExpr** env, IRExpr* e1, IRExpr* e2 );
+
+inline
+static Bool sameIRExprs_aux ( IRExpr** env, IRExpr* e1, IRExpr* e2 )
+{
+   if (e1->tag != e2->tag) return False;
+   return sameIRExprs_aux2(env, e1, e2);
+}
+
+__attribute__((noinline))
+static Bool sameIRExprs_aux2 ( IRExpr** env, IRExpr* e1, IRExpr* e2 )
+{
+   if (num_nodes_visited++ > NODE_LIMIT) return False;
+
+   switch (e1->tag) {
+      case Iex_RdTmp:
+         if (e1->Iex.RdTmp.tmp == e2->Iex.RdTmp.tmp) return True;
+
+         if (env[e1->Iex.RdTmp.tmp] && env[e2->Iex.RdTmp.tmp]) {
+            Bool same = sameIRExprs_aux(env, env[e1->Iex.RdTmp.tmp],
+                                        env[e2->Iex.RdTmp.tmp]);
+#if STATS_IROPT
+            recursed = True;
+            if (same) recursion_helped = True;
+#endif
+            return same;
+         }
+         return False;
+
+      case Iex_Get:
+      case Iex_GetI:
+      case Iex_Load:
+         /* Guest state / memory could have changed in the meantime. */
+         return False;
+
+      case Iex_Binop:
+         return toBool( e1->Iex.Binop.op == e2->Iex.Binop.op
+                        && sameIRExprs_aux( env, e1->Iex.Binop.arg1,
+                                                 e2->Iex.Binop.arg1 )
+                        && sameIRExprs_aux( env, e1->Iex.Binop.arg2,
+                                                 e2->Iex.Binop.arg2 ));
+
+      case Iex_Unop:
+         return toBool( e1->Iex.Unop.op == e2->Iex.Unop.op
+                        && sameIRExprs_aux( env, e1->Iex.Unop.arg,
+                                                 e2->Iex.Unop.arg ));
+
+      case Iex_Const: {
+         IRConst *c1 = e1->Iex.Const.con;
+         IRConst *c2 = e2->Iex.Const.con;
+         vassert(c1->tag == c2->tag);
+         switch (c1->tag) {
+            case Ico_U1:   return toBool( c1->Ico.U1  == c2->Ico.U1 );
+            case Ico_U8:   return toBool( c1->Ico.U8  == c2->Ico.U8 );
+            case Ico_U16:  return toBool( c1->Ico.U16 == c2->Ico.U16 );
+            case Ico_U32:  return toBool( c1->Ico.U32 == c2->Ico.U32 );
+            case Ico_U64:  return toBool( c1->Ico.U64 == c2->Ico.U64 );
+            default: break;
+         }
+         return False;
+      }
+
+      case Iex_Triop: {
+         IRTriop *tri1 = e1->Iex.Triop.details;
+         IRTriop *tri2 = e2->Iex.Triop.details;
+         return toBool( tri1->op == tri2->op
+                        && sameIRExprs_aux( env, tri1->arg1, tri2->arg1 )
+                        && sameIRExprs_aux( env, tri1->arg2, tri2->arg2 )
+                        && sameIRExprs_aux( env, tri1->arg3, tri2->arg3 ));
+      }
+
+      case Iex_ITE:
+         return toBool(    sameIRExprs_aux( env, e1->Iex.ITE.cond,
+                                                 e2->Iex.ITE.cond )
+                        && sameIRExprs_aux( env, e1->Iex.ITE.iftrue,
+                                                 e2->Iex.ITE.iftrue )
+                        && sameIRExprs_aux( env, e1->Iex.ITE.iffalse,
+                                                 e2->Iex.ITE.iffalse ));
+
+      default:
+         /* Not very likely to be "same". */
+         break;
+   }
+
+   return False;
+}
+
+inline
+static Bool sameIRExprs ( IRExpr** env, IRExpr* e1, IRExpr* e2 )
+{
+   Bool same;
+
+   num_nodes_visited = 0;
+   same = sameIRExprs_aux(env, e1, e2);
+
+#if STATS_IROPT
+   ++invocation_count;
+   if (recursed) ++recursion_count;
+   success_count += same;
+   if (same && recursion_helped)
+      ++recursion_success_count;
+   if (num_nodes_visited > max_nodes_visited)
+      max_nodes_visited = num_nodes_visited;
+   recursed = False; /* reset */
+   recursion_helped = False;  /* reset */
+#endif /* STATS_IROPT */
+
+   return same;
+}
+
+
+/* Debugging-only hack (not used in production runs): make a guess
+   whether sameIRExprs might assert due to the two args being of
+   different types.  If in doubt return False.  Is only used when
+   --vex-iropt-level > 0, that is, vex_control.iropt_verbosity > 0.
+   Bad because it duplicates functionality from typeOfIRExpr.  See
+   comment on the single use point below for rationale. */
+static
+Bool debug_only_hack_sameIRExprs_might_assert ( IRExpr* e1, IRExpr* e2 )
+{
+   if (e1->tag != e2->tag) return False;
+   switch (e1->tag) {
+      case Iex_Const: {
+         /* The only interesting case */
+         IRConst *c1 = e1->Iex.Const.con;
+         IRConst *c2 = e2->Iex.Const.con;
+         return c1->tag != c2->tag;
+      }
+      default:
+         break;
+   }
+   return False;
+}
+
+
+/* Is this literally IRExpr_Const(IRConst_U32(0)) ? */
+static Bool isZeroU32 ( IRExpr* e )
+{
+   return toBool( e->tag == Iex_Const 
+                  && e->Iex.Const.con->tag == Ico_U32
+                  && e->Iex.Const.con->Ico.U32 == 0);
+}
+
+/* Is this literally IRExpr_Const(IRConst_U64(0)) ?
+   Currently unused; commented out to avoid compiler warning */
+#if 0
+static Bool isZeroU64 ( IRExpr* e )
+{
+   return toBool( e->tag == Iex_Const 
+                  && e->Iex.Const.con->tag == Ico_U64
+                  && e->Iex.Const.con->Ico.U64 == 0);
+}
+#endif
+
+/* Is this literally IRExpr_Const(IRConst_V128(0)) ? */
+static Bool isZeroV128 ( IRExpr* e )
+{
+   return toBool( e->tag == Iex_Const 
+                  && e->Iex.Const.con->tag == Ico_V128
+                  && e->Iex.Const.con->Ico.V128 == 0x0000);
+}
+
+/* Is this literally IRExpr_Const(IRConst_V256(0)) ? */
+static Bool isZeroV256 ( IRExpr* e )
+{
+   return toBool( e->tag == Iex_Const 
+                  && e->Iex.Const.con->tag == Ico_V256
+                  && e->Iex.Const.con->Ico.V256 == 0x00000000);
+}
+
+/* Is this an integer constant with value 0 ? */
+static Bool isZeroU ( IRExpr* e )
+{
+   if (e->tag != Iex_Const) return False;
+   switch (e->Iex.Const.con->tag) {
+      case Ico_U1:    return toBool( e->Iex.Const.con->Ico.U1  == 0);
+      case Ico_U8:    return toBool( e->Iex.Const.con->Ico.U8  == 0);
+      case Ico_U16:   return toBool( e->Iex.Const.con->Ico.U16 == 0);
+      case Ico_U32:   return toBool( e->Iex.Const.con->Ico.U32 == 0);
+      case Ico_U64:   return toBool( e->Iex.Const.con->Ico.U64 == 0);
+      default: vpanic("isZeroU");
+   }
+}
+
+/* Is this an integer constant with value 1---1b ? */
+static Bool isOnesU ( IRExpr* e )
+{
+   if (e->tag != Iex_Const) return False;
+   switch (e->Iex.Const.con->tag) {
+      case Ico_U8:    return toBool( e->Iex.Const.con->Ico.U8  == 0xFF);
+      case Ico_U16:   return toBool( e->Iex.Const.con->Ico.U16 == 0xFFFF);
+      case Ico_U32:   return toBool( e->Iex.Const.con->Ico.U32
+                                     == 0xFFFFFFFF);
+      case Ico_U64:   return toBool( e->Iex.Const.con->Ico.U64
+                                     == 0xFFFFFFFFFFFFFFFFULL);
+      default: ppIRExpr(e); vpanic("isOnesU");
+   }
+}
+
+static Bool notBool ( Bool b )
+{
+   if (b == True) return False;
+   if (b == False) return True;
+   vpanic("notBool");
+}
+
+/* Make a zero which has the same type as the result of the given
+   primop. */
+static IRExpr* mkZeroOfPrimopResultType ( IROp op )
+{
+   switch (op) {
+      case Iop_CmpNE32: return IRExpr_Const(IRConst_U1(toBool(0)));
+      case Iop_Xor8:  return IRExpr_Const(IRConst_U8(0));
+      case Iop_Xor16: return IRExpr_Const(IRConst_U16(0));
+      case Iop_Sub32:
+      case Iop_Xor32: return IRExpr_Const(IRConst_U32(0));
+      case Iop_And64:
+      case Iop_Sub64:
+      case Iop_Xor64: return IRExpr_Const(IRConst_U64(0));
+      case Iop_XorV128:
+      case Iop_AndV128: return IRExpr_Const(IRConst_V128(0));
+      case Iop_AndV256: return IRExpr_Const(IRConst_V256(0));
+      default: vpanic("mkZeroOfPrimopResultType: bad primop");
+   }
+}
+
+/* Make a value containing all 1-bits, which has the same type as the
+   result of the given primop. */
+static IRExpr* mkOnesOfPrimopResultType ( IROp op )
+{
+   switch (op) {
+      case Iop_CmpEQ32:
+      case Iop_CmpEQ64:
+         return IRExpr_Const(IRConst_U1(toBool(1)));
+      case Iop_Or8:
+         return IRExpr_Const(IRConst_U8(0xFF));
+      case Iop_Or16:
+         return IRExpr_Const(IRConst_U16(0xFFFF));
+      case Iop_Or32:
+         return IRExpr_Const(IRConst_U32(0xFFFFFFFF));
+      case Iop_CmpEQ8x8:
+      case Iop_Or64:
+         return IRExpr_Const(IRConst_U64(0xFFFFFFFFFFFFFFFFULL));
+      case Iop_CmpEQ8x16:
+      case Iop_CmpEQ16x8:
+      case Iop_CmpEQ32x4:
+         return IRExpr_Const(IRConst_V128(0xFFFF));
+      default:
+         ppIROp(op);
+         vpanic("mkOnesOfPrimopResultType: bad primop");
+   }
+}
+
+/* Helpers for folding Clz32/64. */
+static UInt fold_Clz64 ( ULong value )
+{
+   UInt i;
+   vassert(value != 0ULL); /* no defined semantics for arg==0 */
+   for (i = 0; i < 64; ++i) {
+      if (0ULL != (value & (((ULong)1) << (63 - i)))) return i;
+   }
+   vassert(0);
+   /*NOTREACHED*/
+   return 0;
+}
+
+static UInt fold_Clz32 ( UInt value )
+{
+   UInt i;
+   vassert(value != 0); /* no defined semantics for arg==0 */
+   for (i = 0; i < 32; ++i) {
+      if (0 != (value & (((UInt)1) << (31 - i)))) return i;
+   }
+   vassert(0);
+   /*NOTREACHED*/
+   return 0;
+}
+
+/* V64 holds 8 summary-constant bits in V128/V256 style.  Convert to
+   the corresponding real constant. */
+//XXX re-check this before use
+//static ULong de_summarise_V64 ( UChar v64 )
+//{
+//   ULong r = 0;
+//   if (v64 & (1<<0)) r |= 0x00000000000000FFULL;
+//   if (v64 & (1<<1)) r |= 0x000000000000FF00ULL;
+//   if (v64 & (1<<2)) r |= 0x0000000000FF0000ULL;
+//   if (v64 & (1<<3)) r |= 0x00000000FF000000ULL;
+//   if (v64 & (1<<4)) r |= 0x000000FF00000000ULL;
+//   if (v64 & (1<<5)) r |= 0x0000FF0000000000ULL;
+//   if (v64 & (1<<6)) r |= 0x00FF000000000000ULL;
+//   if (v64 & (1<<7)) r |= 0xFF00000000000000ULL;
+//   return r;
+//}
+
+/* Helper for arbitrary expression pattern matching in flat IR.  If
+   'e' is a reference to a tmp, look it up in env -- repeatedly, if
+   necessary -- until it resolves to a non-tmp.  Note that this can
+   return NULL if it can't resolve 'e' to a new expression, which will
+   be the case if 'e' is instead defined by an IRStmt (IRDirty or
+   LLSC). */
+static IRExpr* chase ( IRExpr** env, IRExpr* e )
+{
+   /* Why is this loop guaranteed to terminate?  Because all tmps must
+      have definitions before use, hence a tmp cannot be bound
+      (directly or indirectly) to itself. */
+   while (e->tag == Iex_RdTmp) {
+      if (0) { vex_printf("chase "); ppIRExpr(e); vex_printf("\n"); }
+      e = env[(Int)e->Iex.RdTmp.tmp];
+      if (e == NULL) break;
+   }
+   return e;
+}
+
+/* Similar to |chase|, but follows at most one level of tmp reference. */
+static IRExpr* chase1 ( IRExpr** env, IRExpr* e )
+{
+   if (e == NULL || e->tag != Iex_RdTmp)
+      return e;
+   else
+      return env[(Int)e->Iex.RdTmp.tmp];
+}
+
+static IRExpr* fold_Expr ( IRExpr** env, IRExpr* e )
+{
+   Int     shift;
+   IRExpr* e2 = e; /* e2 is the result of folding e, if possible */
+
+   switch (e->tag) {
+   case Iex_Unop:
+      /* UNARY ops */
+      if (e->Iex.Unop.arg->tag == Iex_Const) {
+         switch (e->Iex.Unop.op) {
+         case Iop_1Uto8:
+            e2 = IRExpr_Const(IRConst_U8(toUChar(
+                    e->Iex.Unop.arg->Iex.Const.con->Ico.U1
+                    ? 1 : 0)));
+            break;
+         case Iop_1Uto32:
+            e2 = IRExpr_Const(IRConst_U32(
+                    e->Iex.Unop.arg->Iex.Const.con->Ico.U1
+                    ? 1 : 0));
+            break;
+         case Iop_1Uto64:
+            e2 = IRExpr_Const(IRConst_U64(
+                    e->Iex.Unop.arg->Iex.Const.con->Ico.U1
+                    ? 1 : 0));
+            break;
+
+         case Iop_1Sto8:
+            e2 = IRExpr_Const(IRConst_U8(toUChar(
+                    e->Iex.Unop.arg->Iex.Const.con->Ico.U1
+                    ? 0xFF : 0)));
+            break;
+         case Iop_1Sto16:
+            e2 = IRExpr_Const(IRConst_U16(toUShort(
+                    e->Iex.Unop.arg->Iex.Const.con->Ico.U1
+                    ? 0xFFFF : 0)));
+            break;
+         case Iop_1Sto32:
+            e2 = IRExpr_Const(IRConst_U32(
+                    e->Iex.Unop.arg->Iex.Const.con->Ico.U1
+                    ? 0xFFFFFFFF : 0));
+            break;
+         case Iop_1Sto64:
+            e2 = IRExpr_Const(IRConst_U64(
+                    e->Iex.Unop.arg->Iex.Const.con->Ico.U1
+                    ? 0xFFFFFFFFFFFFFFFFULL : 0));
+            break;
+
+         case Iop_8Sto32: {
+            UInt u32 = e->Iex.Unop.arg->Iex.Const.con->Ico.U8;
+            u32 <<= 24;
+            u32 = (Int)u32 >> 24;   /* signed shift */
+            e2 = IRExpr_Const(IRConst_U32(u32));
+            break;
+         }
+         case Iop_16Sto32: {
+            UInt u32 = e->Iex.Unop.arg->Iex.Const.con->Ico.U16;
+            u32 <<= 16;
+            u32 = (Int)u32 >> 16;   /* signed shift */
+            e2 = IRExpr_Const(IRConst_U32(u32));
+            break;
+         }
+         case Iop_8Uto64:
+            e2 = IRExpr_Const(IRConst_U64(
+                    0xFFULL & e->Iex.Unop.arg->Iex.Const.con->Ico.U8));
+            break;
+         case Iop_16Uto64:
+            e2 = IRExpr_Const(IRConst_U64(
+                    0xFFFFULL & e->Iex.Unop.arg->Iex.Const.con->Ico.U16));
+            break;
+         case Iop_8Uto32:
+            e2 = IRExpr_Const(IRConst_U32(
+                    0xFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U8));
+            break;
+         case Iop_8Sto16: {
+            UShort u16 = e->Iex.Unop.arg->Iex.Const.con->Ico.U8;
+            u16 <<= 8;
+            u16 = (Short)u16 >> 8;  /* signed shift */
+            e2 = IRExpr_Const(IRConst_U16(u16));
+            break;
+         }
+         case Iop_8Uto16:
+            e2 = IRExpr_Const(IRConst_U16(
+                    0xFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U8));
+            break;
+         case Iop_16Uto32:
+            e2 = IRExpr_Const(IRConst_U32(
+                    0xFFFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U16));
+            break;
+         case Iop_32to16:
+            e2 = IRExpr_Const(IRConst_U16(toUShort(
+                    0xFFFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U32)));
+            break;
+         case Iop_32to8:
+            e2 = IRExpr_Const(IRConst_U8(toUChar(
+                    0xFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U32)));
+            break;
+         case Iop_32to1:
+            e2 = IRExpr_Const(IRConst_U1(toBool(
+                    1 == (1 & e->Iex.Unop.arg->Iex.Const.con->Ico.U32)
+                 )));
+            break;
+         case Iop_64to1:
+            e2 = IRExpr_Const(IRConst_U1(toBool(
+                    1 == (1 & e->Iex.Unop.arg->Iex.Const.con->Ico.U64)
+                 )));
+            break;
+
+         case Iop_NotV128:
+            e2 = IRExpr_Const(IRConst_V128(
+                    ~ (e->Iex.Unop.arg->Iex.Const.con->Ico.V128)));
+            break;
+         case Iop_Not64:
+            e2 = IRExpr_Const(IRConst_U64(
+                    ~ (e->Iex.Unop.arg->Iex.Const.con->Ico.U64)));
+            break;
+         case Iop_Not32:
+            e2 = IRExpr_Const(IRConst_U32(
+                    ~ (e->Iex.Unop.arg->Iex.Const.con->Ico.U32)));
+            break;
+         case Iop_Not16:
+            e2 = IRExpr_Const(IRConst_U16(toUShort(
+                    ~ (e->Iex.Unop.arg->Iex.Const.con->Ico.U16))));
+            break;
+         case Iop_Not8:
+            e2 = IRExpr_Const(IRConst_U8(toUChar(
+                    ~ (e->Iex.Unop.arg->Iex.Const.con->Ico.U8))));
+            break;
+
+         case Iop_Not1:
+            e2 = IRExpr_Const(IRConst_U1(
+                    notBool(e->Iex.Unop.arg->Iex.Const.con->Ico.U1)));
+            break;
+
+         case Iop_64to8: {
+            ULong w64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            w64 &= 0xFFULL;
+            e2 = IRExpr_Const(IRConst_U8( (UChar)w64 ));
+            break;
+         }
+         case Iop_64to16: {
+            ULong w64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            w64 &= 0xFFFFULL;
+            e2 = IRExpr_Const(IRConst_U16( (UShort)w64 ));
+            break;
+         }
+         case Iop_64to32: {
+            ULong w64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            w64 &= 0x00000000FFFFFFFFULL;
+            e2 = IRExpr_Const(IRConst_U32( (UInt)w64 ));
+            break;
+         }
+         case Iop_64HIto32: {
+            ULong w64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            w64 >>= 32;
+            e2 = IRExpr_Const(IRConst_U32( (UInt)w64 ));
+            break;
+         }
+         case Iop_32Uto64:
+            e2 = IRExpr_Const(IRConst_U64(
+                    0xFFFFFFFFULL 
+                    & e->Iex.Unop.arg->Iex.Const.con->Ico.U32));
+            break;
+         case Iop_16Sto64: {
+            ULong u64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U16;
+            u64 <<= 48;
+            u64 = (Long)u64 >> 48;   /* signed shift */
+            e2 = IRExpr_Const(IRConst_U64(u64));
+            break;
+         }
+         case Iop_32Sto64: {
+            ULong u64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U32;
+            u64 <<= 32;
+            u64 = (Long)u64 >> 32;   /* signed shift */
+            e2 = IRExpr_Const(IRConst_U64(u64));
+            break;
+         }
+
+         case Iop_16to8: {
+            UShort w16 = e->Iex.Unop.arg->Iex.Const.con->Ico.U16;
+            w16 &= 0xFF;
+            e2 = IRExpr_Const(IRConst_U8( (UChar)w16 ));
+            break;
+         }
+         case Iop_16HIto8: {
+            UShort w16 = e->Iex.Unop.arg->Iex.Const.con->Ico.U16;
+            w16 >>= 8;
+            w16 &= 0xFF;
+            e2 = IRExpr_Const(IRConst_U8( (UChar)w16 ));
+            break;
+         }
+
+         case Iop_CmpNEZ8:
+            e2 = IRExpr_Const(IRConst_U1(toBool(
+                    0 != 
+                    (0xFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U8)
+                 )));
+            break;
+         case Iop_CmpNEZ32:
+            e2 = IRExpr_Const(IRConst_U1(toBool(
+                    0 != 
+                    (0xFFFFFFFF & e->Iex.Unop.arg->Iex.Const.con->Ico.U32)
+                 )));
+            break;
+         case Iop_CmpNEZ64:
+            e2 = IRExpr_Const(IRConst_U1(toBool(
+                    0ULL != e->Iex.Unop.arg->Iex.Const.con->Ico.U64
+                 )));
+            break;
+
+         case Iop_CmpwNEZ32: {
+            UInt w32 = e->Iex.Unop.arg->Iex.Const.con->Ico.U32;
+            if (w32 == 0)
+               e2 = IRExpr_Const(IRConst_U32( 0 ));
+            else
+               e2 = IRExpr_Const(IRConst_U32( 0xFFFFFFFF ));
+            break;
+         }
+         case Iop_CmpwNEZ64: {
+            ULong w64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            if (w64 == 0)
+               e2 = IRExpr_Const(IRConst_U64( 0 ));
+            else
+               e2 = IRExpr_Const(IRConst_U64( 0xFFFFFFFFFFFFFFFFULL ));
+            break;
+         }
+
+         case Iop_Left32: {
+            UInt u32 = e->Iex.Unop.arg->Iex.Const.con->Ico.U32;
+            Int  s32 = (Int)(u32 & 0xFFFFFFFF);
+            s32 = (s32 | (-s32));
+            e2 = IRExpr_Const( IRConst_U32( (UInt)s32 ));
+            break;
+         }
+
+         case Iop_Left64: {
+            ULong u64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            Long  s64 = (Long)u64;
+            s64 = (s64 | (-s64));
+            e2 = IRExpr_Const( IRConst_U64( (ULong)s64 ));
+            break;
+         }
+
+         case Iop_Clz32: {
+            UInt u32 = e->Iex.Unop.arg->Iex.Const.con->Ico.U32;
+            if (u32 != 0)
+               e2 = IRExpr_Const(IRConst_U32(fold_Clz32(u32)));
+            break;
+         }
+         case Iop_Clz64: {
+            ULong u64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            if (u64 != 0ULL)
+               e2 = IRExpr_Const(IRConst_U64(fold_Clz64(u64)));
+            break;
+         }
+
+         /* For these vector ones, can't fold all cases, but at least
+            do the most obvious one.  Could do better here using
+            summarise/desummarise of vector constants, but too
+            difficult to verify; hence just handle the zero cases. */
+         case Iop_32UtoV128: {
+            UInt u32 = e->Iex.Unop.arg->Iex.Const.con->Ico.U32;
+            if (0 == u32) {
+               e2 = IRExpr_Const(IRConst_V128(0x0000));
+            } else {
+               goto unhandled;
+            }
+            break;
+         }
+         case Iop_V128to64: {
+            UShort v128 = e->Iex.Unop.arg->Iex.Const.con->Ico.V128;
+            if (0 == ((v128 >> 0) & 0xFF)) {
+               e2 = IRExpr_Const(IRConst_U64(0));
+            } else {
+               goto unhandled;
+            }
+            break;
+         }
+         case Iop_V128HIto64: {
+            UShort v128 = e->Iex.Unop.arg->Iex.Const.con->Ico.V128;
+            if (0 == ((v128 >> 8) & 0xFF)) {
+               e2 = IRExpr_Const(IRConst_U64(0));
+            } else {
+               goto unhandled;
+            }
+            break;
+         }
+         case Iop_64UtoV128: {
+            ULong u64 = e->Iex.Unop.arg->Iex.Const.con->Ico.U64;
+            if (0 == u64) {
+               e2 = IRExpr_Const(IRConst_V128(0x0000));
+            } else {
+               goto unhandled;
+            }
+            break;
+         }
+
+         /* Even stupider (although still correct ..) */
+         case Iop_V256to64_0: case Iop_V256to64_1:
+         case Iop_V256to64_2: case Iop_V256to64_3: {
+            UInt v256 = e->Iex.Unop.arg->Iex.Const.con->Ico.V256;
+            if (v256 == 0x00000000) {
+               e2 = IRExpr_Const(IRConst_U64(0));
+            } else {
+               goto unhandled;
+            }
+            break;
+         }
+
+         case Iop_ZeroHI64ofV128: {
+            /* Could do better here -- only need to look at the bottom 64 bits
+               of the argument, really. */
+            UShort v128 = e->Iex.Unop.arg->Iex.Const.con->Ico.V128;
+            if (v128 == 0x0000) {
+               e2 = IRExpr_Const(IRConst_V128(0x0000));
+            } else {
+               goto unhandled;
+            }
+            break;
+         }
+
+         default: 
+            goto unhandled;
+      }
+      }
+      break;
+
+   case Iex_Binop:
+      /* BINARY ops */
+      if (e->Iex.Binop.arg1->tag == Iex_Const
+          && e->Iex.Binop.arg2->tag == Iex_Const) {
+         /* cases where both args are consts */
+         switch (e->Iex.Binop.op) {
+
+            /* -- Or -- */
+            case Iop_Or8:
+               e2 = IRExpr_Const(IRConst_U8(toUChar( 
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U8
+                        | e->Iex.Binop.arg2->Iex.Const.con->Ico.U8))));
+               break;
+            case Iop_Or16:
+               e2 = IRExpr_Const(IRConst_U16(toUShort(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U16
+                        | e->Iex.Binop.arg2->Iex.Const.con->Ico.U16))));
+               break;
+            case Iop_Or32:
+               e2 = IRExpr_Const(IRConst_U32(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        | e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)));
+               break;
+            case Iop_Or64:
+               e2 = IRExpr_Const(IRConst_U64(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        | e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)));
+               break;
+            case Iop_OrV128:
+               e2 = IRExpr_Const(IRConst_V128(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.V128
+                        | e->Iex.Binop.arg2->Iex.Const.con->Ico.V128)));
+               break;
+
+            /* -- Xor -- */
+            case Iop_Xor8:
+               e2 = IRExpr_Const(IRConst_U8(toUChar( 
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U8
+                        ^ e->Iex.Binop.arg2->Iex.Const.con->Ico.U8))));
+               break;
+            case Iop_Xor16:
+               e2 = IRExpr_Const(IRConst_U16(toUShort(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U16
+                        ^ e->Iex.Binop.arg2->Iex.Const.con->Ico.U16))));
+               break;
+            case Iop_Xor32:
+               e2 = IRExpr_Const(IRConst_U32(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        ^ e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)));
+               break;
+            case Iop_Xor64:
+               e2 = IRExpr_Const(IRConst_U64(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        ^ e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)));
+               break;
+            case Iop_XorV128:
+               e2 = IRExpr_Const(IRConst_V128(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.V128
+                        ^ e->Iex.Binop.arg2->Iex.Const.con->Ico.V128)));
+               break;
+
+            /* -- And -- */
+            case Iop_And8:
+               e2 = IRExpr_Const(IRConst_U8(toUChar( 
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U8
+                        & e->Iex.Binop.arg2->Iex.Const.con->Ico.U8))));
+               break;
+            case Iop_And16:
+               e2 = IRExpr_Const(IRConst_U16(toUShort(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U16
+                        & e->Iex.Binop.arg2->Iex.Const.con->Ico.U16))));
+               break;
+            case Iop_And32:
+               e2 = IRExpr_Const(IRConst_U32(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        & e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)));
+               break;
+            case Iop_And64:
+               e2 = IRExpr_Const(IRConst_U64(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        & e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)));
+               break;
+            case Iop_AndV128:
+               e2 = IRExpr_Const(IRConst_V128(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.V128
+                        & e->Iex.Binop.arg2->Iex.Const.con->Ico.V128)));
+               break;
+
+            /* -- Add -- */
+            case Iop_Add8:
+               e2 = IRExpr_Const(IRConst_U8(toUChar( 
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U8
+                        + e->Iex.Binop.arg2->Iex.Const.con->Ico.U8))));
+               break;
+            case Iop_Add32:
+               e2 = IRExpr_Const(IRConst_U32(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        + e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)));
+               break;
+            case Iop_Add64:
+               e2 = IRExpr_Const(IRConst_U64(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        + e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)));
+               break;
+
+            /* -- Sub -- */
+            case Iop_Sub8:
+               e2 = IRExpr_Const(IRConst_U8(toUChar( 
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U8
+                        - e->Iex.Binop.arg2->Iex.Const.con->Ico.U8))));
+               break;
+            case Iop_Sub32:
+               e2 = IRExpr_Const(IRConst_U32(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        - e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)));
+               break;
+            case Iop_Sub64:
+               e2 = IRExpr_Const(IRConst_U64(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        - e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)));
+               break;
+
+            /* -- Max32U -- */
+            case Iop_Max32U: {
+               UInt u32a = e->Iex.Binop.arg1->Iex.Const.con->Ico.U32;
+               UInt u32b = e->Iex.Binop.arg2->Iex.Const.con->Ico.U32;
+               UInt res  = u32a > u32b ? u32a : u32b;
+               e2 = IRExpr_Const(IRConst_U32(res));
+               break;
+            }
+
+            /* -- Mul -- */
+            case Iop_Mul32:
+               e2 = IRExpr_Const(IRConst_U32(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        * e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)));
+               break;
+            case Iop_Mul64:
+               e2 = IRExpr_Const(IRConst_U64(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        * e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)));
+               break;
+
+            case Iop_MullS32: {
+               /* very paranoid */
+               UInt  u32a = e->Iex.Binop.arg1->Iex.Const.con->Ico.U32;
+               UInt  u32b = e->Iex.Binop.arg2->Iex.Const.con->Ico.U32;
+               Int   s32a = (Int)u32a;
+               Int   s32b = (Int)u32b;
+               Long  s64a = (Long)s32a;
+               Long  s64b = (Long)s32b;
+               Long  sres = s64a * s64b;
+               ULong ures = (ULong)sres;
+               e2 = IRExpr_Const(IRConst_U64(ures));
+               break;
+            }
+
+            /* -- Shl -- */
+            case Iop_Shl32:
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+               shift = (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U8);
+               if (shift >= 0 && shift <= 31)
+                  e2 = IRExpr_Const(IRConst_U32(
+                          (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                           << shift)));
+               break;
+            case Iop_Shl64:
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+               shift = (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U8);
+               if (shift >= 0 && shift <= 63)
+                  e2 = IRExpr_Const(IRConst_U64(
+                          (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                           << shift)));
+               break;
+
+            /* -- Sar -- */
+            case Iop_Sar32: {
+               /* paranoid ... */
+               /*signed*/ Int s32;
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+               s32   = (Int)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U32);
+               shift = (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U8);
+               if (shift >= 0 && shift <= 31) {
+                  s32 >>=/*signed*/ shift;
+                  e2 = IRExpr_Const(IRConst_U32((UInt)s32));
+               }
+               break;
+            }
+            case Iop_Sar64: {
+               /* paranoid ... */
+               /*signed*/ Long s64;
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+               s64   = (Long)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U64);
+               shift = (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U8);
+               if (shift >= 0 && shift <= 63) {
+                  s64 >>=/*signed*/ shift;
+                  e2 = IRExpr_Const(IRConst_U64((ULong)s64));
+               }
+               break;
+            }
+
+            /* -- Shr -- */
+            case Iop_Shr32: {
+               /* paranoid ... */
+               /*unsigned*/ UInt u32;
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+               u32   = (UInt)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U32);
+               shift = (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U8);
+               if (shift >= 0 && shift <= 31) {
+                  u32 >>=/*unsigned*/ shift;
+                  e2 = IRExpr_Const(IRConst_U32(u32));
+               }
+               break;
+            }
+            case Iop_Shr64: {
+               /* paranoid ... */
+               /*unsigned*/ ULong u64;
+               vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8);
+               u64   = (ULong)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U64);
+               shift = (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U8);
+               if (shift >= 0 && shift <= 63) {
+                  u64 >>=/*unsigned*/ shift;
+                  e2 = IRExpr_Const(IRConst_U64(u64));
+               }
+               break;
+            }
+
+            /* -- CmpEQ -- */
+            case Iop_CmpEQ32:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        == e->Iex.Binop.arg2->Iex.Const.con->Ico.U32))));
+               break;
+            case Iop_CmpEQ64:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        == e->Iex.Binop.arg2->Iex.Const.con->Ico.U64))));
+               break;
+
+            /* -- CmpNE -- */
+            case Iop_CmpNE8:
+            case Iop_CasCmpNE8:
+            case Iop_ExpCmpNE8:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((0xFF & e->Iex.Binop.arg1->Iex.Const.con->Ico.U8)
+                        != (0xFF & e->Iex.Binop.arg2->Iex.Const.con->Ico.U8)))));
+               break;
+            case Iop_CmpNE32:
+            case Iop_CasCmpNE32:
+            case Iop_ExpCmpNE32:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U32
+                        != e->Iex.Binop.arg2->Iex.Const.con->Ico.U32))));
+               break;
+            case Iop_CmpNE64:
+            case Iop_CasCmpNE64:
+            case Iop_ExpCmpNE64:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       (e->Iex.Binop.arg1->Iex.Const.con->Ico.U64
+                        != e->Iex.Binop.arg2->Iex.Const.con->Ico.U64))));
+               break;
+
+            /* -- CmpLEU -- */
+            case Iop_CmpLE32U:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((UInt)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U32)
+                        <= (UInt)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)))));
+               break;
+            case Iop_CmpLE64U:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((ULong)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U64)
+                        <= (ULong)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)))));
+               break;
+
+            /* -- CmpLES -- */
+            case Iop_CmpLE32S:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((Int)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U32)
+                        <= (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)))));
+               break;
+            case Iop_CmpLE64S:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((Long)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U64)
+                        <= (Long)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)))));
+               break;
+
+            /* -- CmpLTS -- */
+            case Iop_CmpLT32S:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((Int)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U32)
+                        < (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)))));
+               break;
+            case Iop_CmpLT64S:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((Long)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U64)
+                        < (Long)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)))));
+               break;
+
+            /* -- CmpLTU -- */
+            case Iop_CmpLT32U:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((UInt)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U32)
+                        < (UInt)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)))));
+               break;
+            case Iop_CmpLT64U:
+               e2 = IRExpr_Const(IRConst_U1(toBool(
+                       ((ULong)(e->Iex.Binop.arg1->Iex.Const.con->Ico.U64)
+                        < (ULong)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)))));
+               break;
+
+            /* -- CmpORD -- */
+            case Iop_CmpORD32S: {
+               /* very paranoid */
+               UInt  u32a = e->Iex.Binop.arg1->Iex.Const.con->Ico.U32;
+               UInt  u32b = e->Iex.Binop.arg2->Iex.Const.con->Ico.U32;
+               Int   s32a = (Int)u32a;
+               Int   s32b = (Int)u32b;
+               Int   r = 0x2; /* EQ */
+               if (s32a < s32b) {
+                  r = 0x8; /* LT */
+               } 
+               else if (s32a > s32b) {
+                  r = 0x4; /* GT */
+               }
+               e2 = IRExpr_Const(IRConst_U32(r));
+               break;
+            }
+
+            /* -- nHLto2n -- */
+            case Iop_32HLto64:
+               e2 = IRExpr_Const(IRConst_U64(
+                       (((ULong)(e->Iex.Binop.arg1
+                                  ->Iex.Const.con->Ico.U32)) << 32)
+                       | ((ULong)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32)) 
+                    ));
+               break;
+            case Iop_64HLto128:
+               /* We can't fold this, because there is no way to
+                  express he result in IR, but at least pretend to
+                  handle it, so as to stop getting blasted with
+                  no-rule-for-this-primop messages. */
+               break;
+            /* For this vector one, can't fold all cases, but at
+               least do the most obvious one.  Could do better here
+               using summarise/desummarise of vector constants, but
+               too difficult to verify; hence just handle the zero
+               cases. */
+            case Iop_64HLtoV128: {
+               ULong argHi = e->Iex.Binop.arg1->Iex.Const.con->Ico.U64;
+               ULong argLo = e->Iex.Binop.arg2->Iex.Const.con->Ico.U64;
+               if (0 == argHi && 0 == argLo) {
+                  e2 = IRExpr_Const(IRConst_V128(0));
+               } else {
+                  goto unhandled;
+               }
+               break;
+            }
+            /* Same reasoning for the 256-bit version. */
+            case Iop_V128HLtoV256: {
+               IRExpr* argHi = e->Iex.Binop.arg1;
+               IRExpr* argLo = e->Iex.Binop.arg2;
+               if (isZeroV128(argHi) && isZeroV128(argLo)) {
+                  e2 = IRExpr_Const(IRConst_V256(0));
+               } else {
+                  goto unhandled;
+               }
+               break;
+            }
+
+            /* -- V128 stuff -- */
+            case Iop_InterleaveLO8x16: {
+               /* This turns up a lot in Memcheck instrumentation of
+                  Icc generated code.  I don't know why. */
+               UShort arg1 =  e->Iex.Binop.arg1->Iex.Const.con->Ico.V128;
+               UShort arg2 =  e->Iex.Binop.arg2->Iex.Const.con->Ico.V128;
+               if (0 == arg1 && 0 == arg2) {
+                  e2 = IRExpr_Const(IRConst_V128(0));
+               } else {
+                  goto unhandled;
+               }
+               break;
+            }
+
+            default:
+               goto unhandled;
+         }
+
+      } else {
+
+         /* other cases (identities, etc) */
+         switch (e->Iex.Binop.op) {
+
+            case Iop_Shl32:
+            case Iop_Shl64:
+            case Iop_Shr64:
+            case Iop_Sar64:
+               /* Shl32/Shl64/Shr64/Sar64(x,0) ==> x */
+               if (isZeroU(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* Shl32/Shl64/Shr64(0,x) ==> 0 */
+               if (isZeroU(e->Iex.Binop.arg1)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               break;
+
+            case Iop_Sar32:
+            case Iop_Shr32:
+               /* Shr32/Sar32(x,0) ==> x */
+               if (isZeroU(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               break;
+
+            case Iop_Or8:
+            case Iop_Or16:
+            case Iop_Or32:
+            case Iop_Or64:
+            case Iop_Max32U:
+               /* Or8/Or16/Or32/Or64/Max32U(x,0) ==> x */
+               if (isZeroU(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* Or8/Or16/Or32/Or64/Max32U(0,x) ==> x */
+               if (isZeroU(e->Iex.Binop.arg1)) {
+                  e2 = e->Iex.Binop.arg2;
+                  break;
+               }
+               /* Or8/Or16/Or32/Or64/Max32U(x,1---1b) ==> 1---1b */
+               /* Or8/Or16/Or32/Or64/Max32U(1---1b,x) ==> 1---1b */
+               if (isOnesU(e->Iex.Binop.arg1) || isOnesU(e->Iex.Binop.arg2)) {
+                  e2 = mkOnesOfPrimopResultType(e->Iex.Binop.op);
+                  break;
+               }
+               /* Or8/Or16/Or32/Or64/Max32U(t,t) ==> t, for some IRTemp t */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               break;
+
+            case Iop_Add8:
+               /* Add8(t,t) ==> t << 1.
+                  Memcheck doesn't understand that
+                  x+x produces a defined least significant bit, and it seems
+                  simplest just to get rid of the problem by rewriting it
+                  out, since the opportunity to do so exists. */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = IRExpr_Binop(Iop_Shl8, e->Iex.Binop.arg1,
+                                    IRExpr_Const(IRConst_U8(1)));
+                  break;
+               }
+               break;
+
+               /* NB no Add16(t,t) case yet as no known test case exists */
+
+            case Iop_Add32:
+            case Iop_Add64:
+               /* Add32/Add64(x,0) ==> x */
+               if (isZeroU(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* Add32/Add64(0,x) ==> x */
+               if (isZeroU(e->Iex.Binop.arg1)) {
+                  e2 = e->Iex.Binop.arg2;
+                  break;
+               }
+               /* Add32/Add64(t,t) ==> t << 1. Same rationale as for Add8. */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = IRExpr_Binop(
+                          e->Iex.Binop.op == Iop_Add32 ? Iop_Shl32 : Iop_Shl64,
+                          e->Iex.Binop.arg1, IRExpr_Const(IRConst_U8(1)));
+                  break;
+               }
+               break;
+
+            case Iop_Sub32:
+            case Iop_Sub64:
+               /* Sub32/Sub64(x,0) ==> x */
+               if (isZeroU(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* Sub32/Sub64(t,t) ==> 0, for some IRTemp t */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = mkZeroOfPrimopResultType(e->Iex.Binop.op);
+                  break;
+               }
+               break;
+            case Iop_Sub8x16:
+               /* Sub8x16(x,0) ==> x */
+               if (isZeroV128(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               break;
+
+            case Iop_And8:
+            case Iop_And16:
+            case Iop_And32:
+            case Iop_And64:
+               /* And8/And16/And32/And64(x,1---1b) ==> x */
+               if (isOnesU(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* And8/And16/And32/And64(1---1b,x) ==> x */
+               if (isOnesU(e->Iex.Binop.arg1)) {
+                  e2 = e->Iex.Binop.arg2;
+                  break;
+               }
+               /* And8/And16/And32/And64(x,0) ==> 0 */
+               if (isZeroU(e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg2;
+                  break;
+               }
+               /* And8/And16/And32/And64(0,x) ==> 0 */
+               if (isZeroU(e->Iex.Binop.arg1)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* And8/And16/And32/And64(t,t) ==> t, for some IRTemp t */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               break;
+
+            case Iop_AndV128:
+            case Iop_AndV256:
+               /* And8/And16/AndV128/AndV256(t,t) 
+                  ==> t, for some IRTemp t */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* Deal with either arg zero.  Could handle other And
+                  cases here too. */
+               if (e->Iex.Binop.op == Iop_AndV256
+                   && (isZeroV256(e->Iex.Binop.arg1)
+                       || isZeroV256(e->Iex.Binop.arg2))) {
+                  e2 =  mkZeroOfPrimopResultType(e->Iex.Binop.op);
+                  break;
+               } else if (e->Iex.Binop.op == Iop_AndV128
+                          && (isZeroV128(e->Iex.Binop.arg1)
+                              || isZeroV128(e->Iex.Binop.arg2))) {
+                  e2 =  mkZeroOfPrimopResultType(e->Iex.Binop.op);
+                  break;
+               }
+               break;
+
+            case Iop_OrV128:
+            case Iop_OrV256:
+               /* V128/V256(t,t) ==> t, for some IRTemp t */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = e->Iex.Binop.arg1;
+                  break;
+               }
+               /* OrV128(t,0) ==> t */
+               if (e->Iex.Binop.op == Iop_OrV128) {
+                  if (isZeroV128(e->Iex.Binop.arg2)) {
+                     e2 = e->Iex.Binop.arg1;
+                     break;
+                  }
+                  if (isZeroV128(e->Iex.Binop.arg1)) {
+                     e2 = e->Iex.Binop.arg2;
+                     break;
+                  }
+               }
+               /* OrV256(t,0) ==> t */
+               if (e->Iex.Binop.op == Iop_OrV256) {
+                  if (isZeroV256(e->Iex.Binop.arg2)) {
+                     e2 = e->Iex.Binop.arg1;
+                     break;
+                  }
+                  //Disabled because there's no known test case right now.
+                  //if (isZeroV256(e->Iex.Binop.arg1)) {
+                  //   e2 = e->Iex.Binop.arg2;
+                  //   break;
+                  //}
+               }
+               break;
+
+            case Iop_Xor8:
+            case Iop_Xor16:
+            case Iop_Xor32:
+            case Iop_Xor64:
+            case Iop_XorV128:
+               /* Xor8/16/32/64/V128(t,t) ==> 0, for some IRTemp t */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = mkZeroOfPrimopResultType(e->Iex.Binop.op);
+                  break;
+               }
+               /* XorV128(t,0) ==> t */
+               if (e->Iex.Binop.op == Iop_XorV128) {
+                  if (isZeroV128(e->Iex.Binop.arg2)) {
+                     e2 = e->Iex.Binop.arg1;
+                     break;
+                  }
+                  //Disabled because there's no known test case right now.
+                  //if (isZeroV128(e->Iex.Binop.arg1)) {
+                  //   e2 = e->Iex.Binop.arg2;
+                  //   break;
+                  //}
+               } else {
+                  /* Xor8/16/32/64(0,t) ==> t */
+                  if (isZeroU(e->Iex.Binop.arg1)) {
+                     e2 = e->Iex.Binop.arg2;
+                     break;
+                  }
+                  /* Xor8/16/32/64(t,0) ==> t */
+                  if (isZeroU(e->Iex.Binop.arg2)) {
+                     e2 = e->Iex.Binop.arg1;
+                     break;
+                  }
+               }
+               break;
+
+            case Iop_CmpNE32:
+               /* CmpNE32(t,t) ==> 0, for some IRTemp t */
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = mkZeroOfPrimopResultType(e->Iex.Binop.op);
+                  break;
+               }
+               /* CmpNE32(1Uto32(b), 0) ==> b */
+               if (isZeroU32(e->Iex.Binop.arg2)) {
+                  IRExpr* a1 = chase(env, e->Iex.Binop.arg1);
+                  if (a1 && a1->tag == Iex_Unop 
+                         && a1->Iex.Unop.op == Iop_1Uto32) {
+                     e2 = a1->Iex.Unop.arg;
+                     break;
+                  }
+               }
+               break;
+
+            case Iop_CmpEQ32:
+            case Iop_CmpEQ64:
+            case Iop_CmpEQ8x8:
+            case Iop_CmpEQ8x16:
+            case Iop_CmpEQ16x8:
+            case Iop_CmpEQ32x4:
+               if (sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+                  e2 = mkOnesOfPrimopResultType(e->Iex.Binop.op);
+                  break;
+               }
+               break;
+
+            default:
+               break;
+         }
+      }
+      break;
+
+   case Iex_ITE:
+      /* ITE */
+      /* is the discriminant is a constant? */
+      if (e->Iex.ITE.cond->tag == Iex_Const) {
+         /* assured us by the IR type rules */
+         vassert(e->Iex.ITE.cond->Iex.Const.con->tag == Ico_U1);
+         e2 = e->Iex.ITE.cond->Iex.Const.con->Ico.U1
+                 ? e->Iex.ITE.iftrue : e->Iex.ITE.iffalse;
+      }
+      else
+      /* are the arms identical? (pretty weedy test) */
+      if (sameIRExprs(env, e->Iex.ITE.iftrue,
+                           e->Iex.ITE.iffalse)) {
+         e2 = e->Iex.ITE.iffalse;
+      }
+      break;
+
+   default:
+      /* not considered */
+      break;
+   }
+
+   /* Show cases where we've found but not folded 'op(t,t)'.  Be
+      careful not to call sameIRExprs with values of different types,
+      though, else it will assert (and so it should!).  We can't
+      conveniently call typeOfIRExpr on the two args without a whole
+      bunch of extra plumbing to pass in a type env, so just use a
+      hacky test to check the arguments are not anything that might
+      sameIRExprs to assert.  This is only OK because this kludge is
+      only used for debug printing, not for "real" operation.  For
+      "real" operation (ie, all other calls to sameIRExprs), it is
+      essential that the to args have the same type.
+ 
+      The "right" solution is to plumb the containing block's
+      IRTypeEnv through to here and use typeOfIRExpr to be sure.  But
+      that's a bunch of extra parameter passing which will just slow
+      down the normal case, for no purpose. */
+   if (vex_control.iropt_verbosity > 0 
+       && e == e2 
+       && e->tag == Iex_Binop
+       && !debug_only_hack_sameIRExprs_might_assert(e->Iex.Binop.arg1,
+                                                    e->Iex.Binop.arg2)
+       && sameIRExprs(env, e->Iex.Binop.arg1, e->Iex.Binop.arg2)) {
+      vex_printf("vex iropt: fold_Expr: no ident rule for: ");
+      ppIRExpr(e);
+      vex_printf("\n");
+   }
+
+   /* Show the overall results of folding. */
+   if (DEBUG_IROPT && e2 != e) {
+      vex_printf("FOLD: "); 
+      ppIRExpr(e); vex_printf("  ->  ");
+      ppIRExpr(e2); vex_printf("\n");
+   }
+
+   return e2;
+
+ unhandled:
+#  if 0
+   vex_printf("\n\n");
+   ppIRExpr(e);
+   vpanic("fold_Expr: no rule for the above");
+#  else
+   if (vex_control.iropt_verbosity > 0) {
+      vex_printf("vex iropt: fold_Expr: no const rule for: ");
+      ppIRExpr(e);
+      vex_printf("\n");
+   }
+   return e2;
+#  endif
+}
+
+
+/* Apply the subst to a simple 1-level expression -- guaranteed to be
+   1-level due to previous flattening pass. */
+
+static IRExpr* subst_Expr ( IRExpr** env, IRExpr* ex )
+{
+   switch (ex->tag) {
+      case Iex_RdTmp:
+         if (env[(Int)ex->Iex.RdTmp.tmp] != NULL) {
+            IRExpr *rhs = env[(Int)ex->Iex.RdTmp.tmp];
+            if (rhs->tag == Iex_RdTmp)
+               return rhs;
+            if (rhs->tag == Iex_Const
+                && rhs->Iex.Const.con->tag != Ico_F64i)
+               return rhs;
+         }
+         /* not bound in env */
+         return ex;
+
+      case Iex_Const:
+      case Iex_Get:
+         return ex;
+
+      case Iex_GetI:
+         vassert(isIRAtom(ex->Iex.GetI.ix));
+         return IRExpr_GetI(
+            ex->Iex.GetI.descr,
+            subst_Expr(env, ex->Iex.GetI.ix),
+            ex->Iex.GetI.bias
+         );
+
+      case Iex_Qop: {
+         IRQop* qop = ex->Iex.Qop.details;
+         vassert(isIRAtom(qop->arg1));
+         vassert(isIRAtom(qop->arg2));
+         vassert(isIRAtom(qop->arg3));
+         vassert(isIRAtom(qop->arg4));
+         return IRExpr_Qop(
+                   qop->op,
+                   subst_Expr(env, qop->arg1),
+                   subst_Expr(env, qop->arg2),
+                   subst_Expr(env, qop->arg3),
+                   subst_Expr(env, qop->arg4)
+                );
+      }
+
+      case Iex_Triop: {
+         IRTriop* triop = ex->Iex.Triop.details;
+         vassert(isIRAtom(triop->arg1));
+         vassert(isIRAtom(triop->arg2));
+         vassert(isIRAtom(triop->arg3));
+         return IRExpr_Triop(
+                   triop->op,
+                   subst_Expr(env, triop->arg1),
+                   subst_Expr(env, triop->arg2),
+                   subst_Expr(env, triop->arg3)
+                );
+      }
+
+      case Iex_Binop:
+         vassert(isIRAtom(ex->Iex.Binop.arg1));
+         vassert(isIRAtom(ex->Iex.Binop.arg2));
+         return IRExpr_Binop(
+                   ex->Iex.Binop.op,
+                   subst_Expr(env, ex->Iex.Binop.arg1),
+                   subst_Expr(env, ex->Iex.Binop.arg2)
+                );
+
+      case Iex_Unop:
+         vassert(isIRAtom(ex->Iex.Unop.arg));
+         return IRExpr_Unop(
+                   ex->Iex.Unop.op,
+                   subst_Expr(env, ex->Iex.Unop.arg)
+                );
+
+      case Iex_Load:
+         vassert(isIRAtom(ex->Iex.Load.addr));
+         return IRExpr_Load(
+                   ex->Iex.Load.end,
+                   ex->Iex.Load.ty,
+                   subst_Expr(env, ex->Iex.Load.addr)
+                );
+
+      case Iex_CCall: {
+         Int      i;
+         IRExpr** args2 = shallowCopyIRExprVec(ex->Iex.CCall.args);
+         for (i = 0; args2[i]; i++) {
+            vassert(isIRAtom(args2[i]));
+            args2[i] = subst_Expr(env, args2[i]);
+         }
+         return IRExpr_CCall(
+                   ex->Iex.CCall.cee,
+                   ex->Iex.CCall.retty,
+                   args2 
+                );
+      }
+
+      case Iex_ITE:
+         vassert(isIRAtom(ex->Iex.ITE.cond));
+         vassert(isIRAtom(ex->Iex.ITE.iftrue));
+         vassert(isIRAtom(ex->Iex.ITE.iffalse));
+         return IRExpr_ITE(
+                   subst_Expr(env, ex->Iex.ITE.cond),
+                   subst_Expr(env, ex->Iex.ITE.iftrue),
+                   subst_Expr(env, ex->Iex.ITE.iffalse)
+                );
+
+      default:
+         vex_printf("\n\n"); ppIRExpr(ex);
+         vpanic("subst_Expr");
+      
+   }
+}
+
+
+/* Apply the subst to stmt, then fold the result as much as possible.
+   Much simplified due to stmt being previously flattened.  As a
+   result of this, the stmt may wind up being turned into a no-op.  
+*/
+static IRStmt* subst_and_fold_Stmt ( IRExpr** env, IRStmt* st )
+{
+#  if 0
+   vex_printf("\nsubst and fold stmt\n");
+   ppIRStmt(st);
+   vex_printf("\n");
+#  endif
+
+   switch (st->tag) {
+      case Ist_AbiHint:
+         vassert(isIRAtom(st->Ist.AbiHint.base));
+         vassert(isIRAtom(st->Ist.AbiHint.nia));
+         return IRStmt_AbiHint(
+                   fold_Expr(env, subst_Expr(env, st->Ist.AbiHint.base)),
+                   st->Ist.AbiHint.len,
+                   fold_Expr(env, subst_Expr(env, st->Ist.AbiHint.nia))
+                );
+      case Ist_Put:
+         vassert(isIRAtom(st->Ist.Put.data));
+         return IRStmt_Put(
+                   st->Ist.Put.offset, 
+                   fold_Expr(env, subst_Expr(env, st->Ist.Put.data)) 
+                );
+
+      case Ist_PutI: {
+         IRPutI *puti, *puti2;
+         puti = st->Ist.PutI.details;
+         vassert(isIRAtom(puti->ix));
+         vassert(isIRAtom(puti->data));
+         puti2 = mkIRPutI(puti->descr,
+                          fold_Expr(env, subst_Expr(env, puti->ix)),
+                          puti->bias,
+                          fold_Expr(env, subst_Expr(env, puti->data)));
+         return IRStmt_PutI(puti2);
+      }
+
+      case Ist_WrTmp:
+         /* This is the one place where an expr (st->Ist.WrTmp.data) is
+            allowed to be more than just a constant or a tmp. */
+         return IRStmt_WrTmp(
+                   st->Ist.WrTmp.tmp,
+                   fold_Expr(env, subst_Expr(env, st->Ist.WrTmp.data))
+                );
+
+      case Ist_Store:
+         vassert(isIRAtom(st->Ist.Store.addr));
+         vassert(isIRAtom(st->Ist.Store.data));
+         return IRStmt_Store(
+                   st->Ist.Store.end,
+                   fold_Expr(env, subst_Expr(env, st->Ist.Store.addr)),
+                   fold_Expr(env, subst_Expr(env, st->Ist.Store.data))
+                );
+
+      case Ist_StoreG: {
+         IRStoreG* sg = st->Ist.StoreG.details;
+         vassert(isIRAtom(sg->addr));
+         vassert(isIRAtom(sg->data));
+         vassert(isIRAtom(sg->guard));
+         IRExpr* faddr  = fold_Expr(env, subst_Expr(env, sg->addr));
+         IRExpr* fdata  = fold_Expr(env, subst_Expr(env, sg->data));
+         IRExpr* fguard = fold_Expr(env, subst_Expr(env, sg->guard));
+         if (fguard->tag == Iex_Const) {
+            /* The condition on this store has folded down to a constant. */
+            vassert(fguard->Iex.Const.con->tag == Ico_U1);
+            if (fguard->Iex.Const.con->Ico.U1 == False) {
+               return IRStmt_NoOp();
+            } else {
+               vassert(fguard->Iex.Const.con->Ico.U1 == True);
+               return IRStmt_Store(sg->end, faddr, fdata);
+            }
+         }
+         return IRStmt_StoreG(sg->end, faddr, fdata, fguard);
+      }
+
+      case Ist_LoadG: {
+         /* This is complicated.  If the guard folds down to 'false',
+            we can replace it with an assignment 'dst := alt', but if
+            the guard folds down to 'true', we can't conveniently
+            replace it with an unconditional load, because doing so
+            requires generating a new temporary, and that is not easy
+            to do at this point. */
+         IRLoadG* lg = st->Ist.LoadG.details;
+         vassert(isIRAtom(lg->addr));
+         vassert(isIRAtom(lg->alt));
+         vassert(isIRAtom(lg->guard));
+         IRExpr* faddr  = fold_Expr(env, subst_Expr(env, lg->addr));
+         IRExpr* falt   = fold_Expr(env, subst_Expr(env, lg->alt));
+         IRExpr* fguard = fold_Expr(env, subst_Expr(env, lg->guard));
+         if (fguard->tag == Iex_Const) {
+            /* The condition on this load has folded down to a constant. */
+            vassert(fguard->Iex.Const.con->tag == Ico_U1);
+            if (fguard->Iex.Const.con->Ico.U1 == False) {
+               /* The load is not going to happen -- instead 'alt' is
+                  assigned to 'dst'.  */
+               return IRStmt_WrTmp(lg->dst, falt);
+            } else {
+               vassert(fguard->Iex.Const.con->Ico.U1 == True);
+               /* The load is always going to happen.  We want to
+                  convert to an unconditional load and assign to 'dst'
+                  (IRStmt_WrTmp).  Problem is we need an extra temp to
+                  hold the loaded value, but none is available.
+                  Instead, reconstitute the conditional load (with
+                  folded args, of course) and let the caller of this
+                  routine deal with the problem. */
+            }
+         }
+         return IRStmt_LoadG(lg->end, lg->cvt, lg->dst, faddr, falt, fguard);
+      }
+
+      case Ist_CAS: {
+         IRCAS *cas, *cas2;
+         cas = st->Ist.CAS.details;
+         vassert(isIRAtom(cas->addr));
+         vassert(cas->expdHi == NULL || isIRAtom(cas->expdHi));
+         vassert(isIRAtom(cas->expdLo));
+         vassert(cas->dataHi == NULL || isIRAtom(cas->dataHi));
+         vassert(isIRAtom(cas->dataLo));
+         cas2 = mkIRCAS(
+                   cas->oldHi, cas->oldLo, cas->end, 
+                   fold_Expr(env, subst_Expr(env, cas->addr)),
+                   cas->expdHi ? fold_Expr(env, subst_Expr(env, cas->expdHi))
+                               : NULL,
+                   fold_Expr(env, subst_Expr(env, cas->expdLo)),
+                   cas->dataHi ? fold_Expr(env, subst_Expr(env, cas->dataHi))
+                               : NULL,
+                   fold_Expr(env, subst_Expr(env, cas->dataLo))
+                );
+         return IRStmt_CAS(cas2);
+      }
+
+      case Ist_LLSC:
+         vassert(isIRAtom(st->Ist.LLSC.addr));
+         if (st->Ist.LLSC.storedata)
+            vassert(isIRAtom(st->Ist.LLSC.storedata));
+         return IRStmt_LLSC(
+                   st->Ist.LLSC.end,
+                   st->Ist.LLSC.result,
+                   fold_Expr(env, subst_Expr(env, st->Ist.LLSC.addr)),
+                   st->Ist.LLSC.storedata
+                      ? fold_Expr(env, subst_Expr(env, st->Ist.LLSC.storedata))
+                      : NULL
+                );
+
+      case Ist_Dirty: {
+         Int     i;
+         IRDirty *d, *d2;
+         d = st->Ist.Dirty.details;
+         d2 = emptyIRDirty();
+         *d2 = *d;
+         d2->args = shallowCopyIRExprVec(d2->args);
+         if (d2->mFx != Ifx_None) {
+            vassert(isIRAtom(d2->mAddr));
+            d2->mAddr = fold_Expr(env, subst_Expr(env, d2->mAddr));
+         }
+         vassert(isIRAtom(d2->guard));
+         d2->guard = fold_Expr(env, subst_Expr(env, d2->guard));
+         for (i = 0; d2->args[i]; i++) {
+            IRExpr* arg = d2->args[i];
+            if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
+               vassert(isIRAtom(arg));
+               d2->args[i] = fold_Expr(env, subst_Expr(env, arg));
+            }
+         }
+         return IRStmt_Dirty(d2);
+      }
+
+      case Ist_IMark:
+         return IRStmt_IMark(st->Ist.IMark.addr,
+                             st->Ist.IMark.len,
+                             st->Ist.IMark.delta);
+
+      case Ist_NoOp:
+         return IRStmt_NoOp();
+
+      case Ist_MBE:
+         return IRStmt_MBE(st->Ist.MBE.event);
+
+      case Ist_Exit: {
+         IRExpr* fcond;
+         vassert(isIRAtom(st->Ist.Exit.guard));
+         fcond = fold_Expr(env, subst_Expr(env, st->Ist.Exit.guard));
+         if (fcond->tag == Iex_Const) {
+            /* Interesting.  The condition on this exit has folded down to
+               a constant. */
+            vassert(fcond->Iex.Const.con->tag == Ico_U1);
+            if (fcond->Iex.Const.con->Ico.U1 == False) {
+               /* exit is never going to happen, so dump the statement. */
+               return IRStmt_NoOp();
+            } else {
+               vassert(fcond->Iex.Const.con->Ico.U1 == True);
+               /* Hmmm.  The exit has become unconditional.  Leave it
+                  as it is for now, since we'd have to truncate the BB
+                  at this point, which is tricky.  Such truncation is
+                  done later by the dead-code elimination pass. */
+               /* fall out into the reconstruct-the-exit code. */
+               if (vex_control.iropt_verbosity > 0) 
+                  /* really a misuse of vex_control.iropt_verbosity */
+                  vex_printf("vex iropt: IRStmt_Exit became unconditional\n");
+            }
+         }
+         return IRStmt_Exit(fcond, st->Ist.Exit.jk,
+                                   st->Ist.Exit.dst, st->Ist.Exit.offsIP);
+      }
+
+   default:
+      vex_printf("\n"); ppIRStmt(st);
+      vpanic("subst_and_fold_Stmt");
+   }
+}
+
+
+IRSB* cprop_BB ( IRSB* in )
+{
+   Int      i;
+   IRSB*    out;
+   IRStmt*  st2;
+   Int      n_tmps = in->tyenv->types_used;
+   IRExpr** env = LibVEX_Alloc_inline(n_tmps * sizeof(IRExpr*));
+   /* Keep track of IRStmt_LoadGs that we need to revisit after
+      processing all the other statements. */
+   const Int N_FIXUPS = 16;
+   Int fixups[N_FIXUPS]; /* indices in the stmt array of 'out' */
+   Int n_fixups = 0;
+
+   out = emptyIRSB();
+   out->tyenv = deepCopyIRTypeEnv( in->tyenv );
+
+   /* Set up the env with which travels forward.  This holds a
+      substitution, mapping IRTemps to IRExprs. The environment 
+      is to be applied as we move along.  Keys are IRTemps.
+      Values are IRExpr*s.
+   */
+   for (i = 0; i < n_tmps; i++)
+      env[i] = NULL;
+
+   /* For each original SSA-form stmt ... */
+   for (i = 0; i < in->stmts_used; i++) {
+
+      /* First apply the substitution to the current stmt.  This
+         propagates in any constants and tmp-tmp assignments
+         accumulated prior to this point.  As part of the subst_Stmt
+         call, also then fold any constant expressions resulting. */
+
+      st2 = in->stmts[i];
+
+      /* perhaps st2 is already a no-op? */
+      if (st2->tag == Ist_NoOp) continue;
+
+      st2 = subst_and_fold_Stmt( env, st2 );
+
+      /* Deal with some post-folding special cases. */
+      switch (st2->tag) {
+
+         /* If the statement has been folded into a no-op, forget
+            it. */
+         case Ist_NoOp:
+            continue;
+
+         /* If the statement assigns to an IRTemp add it to the
+            running environment. This is for the benefit of copy
+            propagation and to allow sameIRExpr look through
+            IRTemps. */
+         case Ist_WrTmp: {
+            vassert(env[(Int)(st2->Ist.WrTmp.tmp)] == NULL);
+            env[(Int)(st2->Ist.WrTmp.tmp)] = st2->Ist.WrTmp.data;
+
+            /* 't1 = t2' -- don't add to BB; will be optimized out */
+            if (st2->Ist.WrTmp.data->tag == Iex_RdTmp)
+               continue;
+
+            /* 't = const' && 'const != F64i' -- don't add to BB 
+               Note, we choose not to propagate const when const is an
+               F64i, so that F64i literals can be CSE'd later.  This
+               helps x86 floating point code generation. */
+            if (st2->Ist.WrTmp.data->tag == Iex_Const
+                && st2->Ist.WrTmp.data->Iex.Const.con->tag != Ico_F64i) {
+               continue;
+            }
+            /* else add it to the output, as normal */
+            break;
+         }
+
+         case Ist_LoadG: {
+            IRLoadG* lg    = st2->Ist.LoadG.details;
+            IRExpr*  guard = lg->guard;
+            if (guard->tag == Iex_Const) {
+               /* The guard has folded to a constant, and that
+                  constant must be 1:I1, since subst_and_fold_Stmt
+                  folds out the case 0:I1 by itself. */
+               vassert(guard->Iex.Const.con->tag == Ico_U1);
+               vassert(guard->Iex.Const.con->Ico.U1 == True);
+               /* Add a NoOp here as a placeholder, and make a note of
+                  where it is in the output block.  Afterwards we'll
+                  come back here and transform the NoOp and the LoadG
+                  into a load-convert pair.  The fixups[] entry
+                  refers to the inserted NoOp, and we expect to find
+                  the relevant LoadG immediately after it. */
+               vassert(n_fixups >= 0 && n_fixups <= N_FIXUPS);
+               if (n_fixups < N_FIXUPS) {
+                  fixups[n_fixups++] = out->stmts_used;
+                  addStmtToIRSB( out, IRStmt_NoOp() );
+               }
+            }
+            /* And always add the LoadG to the output, regardless. */
+            break;
+         }
+
+      default:
+         break;
+      }
+
+      /* Not interesting, copy st2 into the output block. */
+      addStmtToIRSB( out, st2 );
+   }
+
+#  if STATS_IROPT
+   vex_printf("sameIRExpr: invoked = %u/%u  equal = %u/%u max_nodes = %u\n",
+              invocation_count, recursion_count, success_count,
+              recursion_success_count, max_nodes_visited);
+#  endif
+
+   out->next     = subst_Expr( env, in->next );
+   out->jumpkind = in->jumpkind;
+   out->offsIP   = in->offsIP;
+
+   /* Process any leftover unconditional LoadGs that we noticed
+      in the main pass. */
+   vassert(n_fixups >= 0 && n_fixups <= N_FIXUPS);
+   for (i = 0; i < n_fixups; i++) {
+      Int ix = fixups[i];
+      /* Carefully verify that the LoadG has the expected form. */
+      vassert(ix >= 0 && ix+1 < out->stmts_used);
+      IRStmt* nop = out->stmts[ix];
+      IRStmt* lgu = out->stmts[ix+1];
+      vassert(nop->tag == Ist_NoOp);
+      vassert(lgu->tag == Ist_LoadG);
+      IRLoadG* lg    = lgu->Ist.LoadG.details;
+      IRExpr*  guard = lg->guard;
+      vassert(guard->Iex.Const.con->tag == Ico_U1);
+      vassert(guard->Iex.Const.con->Ico.U1 == True);
+      /* Figure out the load and result types, and the implied
+         conversion operation. */
+      IRType cvtRes = Ity_INVALID, cvtArg = Ity_INVALID;
+      typeOfIRLoadGOp(lg->cvt, &cvtRes, &cvtArg);
+      IROp cvtOp = Iop_INVALID;
+      switch (lg->cvt) {
+         case ILGop_Ident32: break;
+         case ILGop_8Uto32:  cvtOp = Iop_8Uto32;  break;
+         case ILGop_8Sto32:  cvtOp = Iop_8Sto32;  break;
+         case ILGop_16Uto32: cvtOp = Iop_16Uto32; break;
+         case ILGop_16Sto32: cvtOp = Iop_16Sto32; break;
+         default: vpanic("cprop_BB: unhandled ILGOp");
+      }
+      /* Replace the placeholder NoOp by the required unconditional
+         load. */
+      IRTemp tLoaded = newIRTemp(out->tyenv, cvtArg);
+      out->stmts[ix] 
+         = IRStmt_WrTmp(tLoaded,
+                        IRExpr_Load(lg->end, cvtArg, lg->addr));
+      /* Replace the LoadG by a conversion from the loaded value's
+         type to the required result type. */
+      out->stmts[ix+1]
+         = IRStmt_WrTmp(
+              lg->dst, cvtOp == Iop_INVALID
+                          ? IRExpr_RdTmp(tLoaded)
+                          : IRExpr_Unop(cvtOp, IRExpr_RdTmp(tLoaded)));
+   }
+
+   return out;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Dead code (t = E) removal                               ---*/
+/*---------------------------------------------------------------*/
+
+/* As a side effect, also removes all code following an unconditional
+   side exit. */
+
+/* The type of the HashHW map is: a map from IRTemp to nothing
+   -- really just operating a set or IRTemps.
+*/
+
+inline
+static void addUses_Temp ( Bool* set, IRTemp tmp )
+{
+   set[(Int)tmp] = True;
+}
+
+static void addUses_Expr ( Bool* set, IRExpr* e )
+{
+   Int i;
+   switch (e->tag) {
+      case Iex_GetI:
+         addUses_Expr(set, e->Iex.GetI.ix);
+         return;
+      case Iex_ITE:
+         addUses_Expr(set, e->Iex.ITE.cond);
+         addUses_Expr(set, e->Iex.ITE.iftrue);
+         addUses_Expr(set, e->Iex.ITE.iffalse);
+         return;
+      case Iex_CCall:
+         for (i = 0; e->Iex.CCall.args[i]; i++)
+            addUses_Expr(set, e->Iex.CCall.args[i]);
+         return;
+      case Iex_Load:
+         addUses_Expr(set, e->Iex.Load.addr);
+         return;
+      case Iex_Qop:
+         addUses_Expr(set, e->Iex.Qop.details->arg1);
+         addUses_Expr(set, e->Iex.Qop.details->arg2);
+         addUses_Expr(set, e->Iex.Qop.details->arg3);
+         addUses_Expr(set, e->Iex.Qop.details->arg4);
+         return;
+      case Iex_Triop:
+         addUses_Expr(set, e->Iex.Triop.details->arg1);
+         addUses_Expr(set, e->Iex.Triop.details->arg2);
+         addUses_Expr(set, e->Iex.Triop.details->arg3);
+         return;
+      case Iex_Binop:
+         addUses_Expr(set, e->Iex.Binop.arg1);
+         addUses_Expr(set, e->Iex.Binop.arg2);
+         return;
+      case Iex_Unop:
+         addUses_Expr(set, e->Iex.Unop.arg);
+         return;
+      case Iex_RdTmp:
+         addUses_Temp(set, e->Iex.RdTmp.tmp);
+         return;
+      case Iex_Const:
+      case Iex_Get:
+         return;
+      default:
+         vex_printf("\n");
+         ppIRExpr(e);
+         vpanic("addUses_Expr");
+   }
+}
+
+static void addUses_Stmt ( Bool* set, IRStmt* st )
+{
+   Int      i;
+   IRDirty* d;
+   IRCAS*   cas;
+   switch (st->tag) {
+      case Ist_AbiHint:
+         addUses_Expr(set, st->Ist.AbiHint.base);
+         addUses_Expr(set, st->Ist.AbiHint.nia);
+         return;
+      case Ist_PutI:
+         addUses_Expr(set, st->Ist.PutI.details->ix);
+         addUses_Expr(set, st->Ist.PutI.details->data);
+         return;
+      case Ist_WrTmp:
+         addUses_Expr(set, st->Ist.WrTmp.data);
+         return;
+      case Ist_Put:
+         addUses_Expr(set, st->Ist.Put.data);
+         return;
+      case Ist_Store:
+         addUses_Expr(set, st->Ist.Store.addr);
+         addUses_Expr(set, st->Ist.Store.data);
+         return;
+      case Ist_StoreG: {
+         IRStoreG* sg = st->Ist.StoreG.details;
+         addUses_Expr(set, sg->addr);
+         addUses_Expr(set, sg->data);
+         addUses_Expr(set, sg->guard);
+         return;
+      }
+      case Ist_LoadG: {
+         IRLoadG* lg = st->Ist.LoadG.details;
+         addUses_Expr(set, lg->addr);
+         addUses_Expr(set, lg->alt);
+         addUses_Expr(set, lg->guard);
+         return;
+      }
+      case Ist_CAS:
+         cas = st->Ist.CAS.details;
+         addUses_Expr(set, cas->addr);
+         if (cas->expdHi)
+            addUses_Expr(set, cas->expdHi);
+         addUses_Expr(set, cas->expdLo);
+         if (cas->dataHi)
+            addUses_Expr(set, cas->dataHi);
+         addUses_Expr(set, cas->dataLo);
+         return;
+      case Ist_LLSC:
+         addUses_Expr(set, st->Ist.LLSC.addr);
+         if (st->Ist.LLSC.storedata)
+            addUses_Expr(set, st->Ist.LLSC.storedata);
+         return;
+      case Ist_Dirty:
+         d = st->Ist.Dirty.details;
+         if (d->mFx != Ifx_None)
+            addUses_Expr(set, d->mAddr);
+         addUses_Expr(set, d->guard);
+         for (i = 0; d->args[i] != NULL; i++) {
+            IRExpr* arg = d->args[i];
+            if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+               addUses_Expr(set, arg);
+         }
+         return;
+      case Ist_NoOp:
+      case Ist_IMark:
+      case Ist_MBE:
+         return;
+      case Ist_Exit:
+         addUses_Expr(set, st->Ist.Exit.guard);
+         return;
+      default:
+         vex_printf("\n");
+         ppIRStmt(st);
+         vpanic("addUses_Stmt");
+   }
+}
+
+
+/* Is this literally IRExpr_Const(IRConst_U1(False)) ? */
+static Bool isZeroU1 ( IRExpr* e )
+{
+   return toBool( e->tag == Iex_Const
+                  && e->Iex.Const.con->tag == Ico_U1
+                  && e->Iex.Const.con->Ico.U1 == False );
+}
+
+/* Is this literally IRExpr_Const(IRConst_U1(True)) ? */
+static Bool isOneU1 ( IRExpr* e )
+{
+   return toBool( e->tag == Iex_Const
+                  && e->Iex.Const.con->tag == Ico_U1
+                  && e->Iex.Const.con->Ico.U1 == True );
+}
+
+
+/* Note, this destructively modifies the given IRSB. */
+
+/* Scan backwards through statements, carrying a set of IRTemps which
+   are known to be used after the current point.  On encountering 't =
+   E', delete the binding if it is not used.  Otherwise, add any temp
+   uses to the set and keep on moving backwards.
+
+   As an enhancement, the first (backwards) pass searches for IR exits
+   with always-taken conditions and notes the location of the earliest
+   one in the block.  If any such are found, a second pass copies the
+   exit destination and jump kind to the bb-end.  Then, the exit and
+   all statements following it are turned into no-ops.
+*/
+
+/* notstatic */ void do_deadcode_BB ( IRSB* bb )
+{
+   Int     i, i_unconditional_exit;
+   Int     n_tmps = bb->tyenv->types_used;
+   Bool*   set = LibVEX_Alloc_inline(n_tmps * sizeof(Bool));
+   IRStmt* st;
+
+   for (i = 0; i < n_tmps; i++)
+      set[i] = False;
+
+   /* start off by recording IRTemp uses in the next field. */
+   addUses_Expr(set, bb->next);
+
+   /* First pass */
+
+   /* Work backwards through the stmts */
+   i_unconditional_exit = -1;
+   for (i = bb->stmts_used-1; i >= 0; i--) {
+      st = bb->stmts[i];
+      if (st->tag == Ist_NoOp)
+         continue;
+      /* take note of any unconditional exits */
+      if (st->tag == Ist_Exit
+          && isOneU1(st->Ist.Exit.guard))
+         i_unconditional_exit = i;
+      if (st->tag == Ist_WrTmp
+          && set[(Int)(st->Ist.WrTmp.tmp)] == False) {
+          /* it's an IRTemp which never got used.  Delete it. */
+         if (DEBUG_IROPT) {
+            vex_printf("DEAD: ");
+            ppIRStmt(st);
+            vex_printf("\n");
+         }
+         bb->stmts[i] = IRStmt_NoOp();
+      }
+      else
+      if (st->tag == Ist_Dirty
+          && st->Ist.Dirty.details->guard
+          && isZeroU1(st->Ist.Dirty.details->guard)) {
+         /* This is a dirty helper which will never get called.
+            Delete it. */
+         bb->stmts[i] = IRStmt_NoOp();
+       }
+       else {
+         /* Note any IRTemp uses made by the current statement. */
+         addUses_Stmt(set, st);
+      }
+   }
+
+   /* Optional second pass: if any unconditional exits were found, 
+      delete them and all following statements. */
+
+   if (i_unconditional_exit != -1) {
+      if (0) vex_printf("ZAPPING ALL FORWARDS from %d\n", 
+                        i_unconditional_exit);
+      vassert(i_unconditional_exit >= 0 
+              && i_unconditional_exit < bb->stmts_used);
+      bb->next 
+         = IRExpr_Const( bb->stmts[i_unconditional_exit]->Ist.Exit.dst );
+      bb->jumpkind
+         = bb->stmts[i_unconditional_exit]->Ist.Exit.jk;
+      bb->offsIP
+         = bb->stmts[i_unconditional_exit]->Ist.Exit.offsIP;
+      for (i = i_unconditional_exit; i < bb->stmts_used; i++)
+         bb->stmts[i] = IRStmt_NoOp();
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Specialisation of helper function calls, in             ---*/
+/*--- collaboration with the front end                        ---*/
+/*---------------------------------------------------------------*/
+
+static 
+IRSB* spec_helpers_BB(
+         IRSB* bb,
+         IRExpr* (*specHelper) (const HChar*, IRExpr**, IRStmt**, Int)
+      )
+{
+   Int     i;
+   IRStmt* st;
+   IRExpr* ex;
+   Bool    any = False;
+
+   for (i = bb->stmts_used-1; i >= 0; i--) {
+      st = bb->stmts[i];
+
+      if (st->tag != Ist_WrTmp
+          || st->Ist.WrTmp.data->tag != Iex_CCall)
+         continue;
+
+      ex = (*specHelper)( st->Ist.WrTmp.data->Iex.CCall.cee->name,
+                          st->Ist.WrTmp.data->Iex.CCall.args,
+                          &bb->stmts[0], i );
+      if (!ex)
+        /* the front end can't think of a suitable replacement */
+        continue;
+
+      /* We got something better.  Install it in the bb. */
+      any = True;
+      bb->stmts[i]
+         = IRStmt_WrTmp(st->Ist.WrTmp.tmp, ex);
+
+      if (0) {
+         vex_printf("SPEC: ");
+         ppIRExpr(st->Ist.WrTmp.data);
+         vex_printf("  -->  ");
+         ppIRExpr(ex);
+         vex_printf("\n");
+      }
+   }
+
+   if (any)
+      bb = flatten_BB(bb);
+   return bb;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Determination of guest state aliasing relationships     ---*/
+/*---------------------------------------------------------------*/
+
+/* These are helper functions for CSE and GetI/PutI transformations.
+
+   Determine, to the extent possible, the relationship between two
+   guest state accesses.  The possible outcomes are:
+
+   * Exact alias.  These two accesses denote precisely the same
+     piece of the guest state.
+
+   * Definitely no alias.  These two accesses are guaranteed not to
+     overlap any part of the guest state.
+
+   * Unknown -- if neither of the above can be established.
+
+   If in doubt, return Unknown.  */
+
+typedef
+   enum { ExactAlias, NoAlias, UnknownAlias }
+   GSAliasing;
+
+
+/* Produces the alias relation between an indexed guest
+   state access and a non-indexed access. */
+
+static
+GSAliasing getAliasingRelation_IC ( IRRegArray* descr1, IRExpr* ix1,
+                                    Int offset2, IRType ty2 )
+{
+   UInt minoff1, maxoff1, minoff2, maxoff2;
+
+   getArrayBounds( descr1, &minoff1, &maxoff1 );
+   minoff2 = offset2;
+   maxoff2 = minoff2 + sizeofIRType(ty2) - 1;
+
+   if (maxoff1 < minoff2 || maxoff2 < minoff1)
+      return NoAlias;
+
+   /* Could probably do better here if required.  For the moment
+      however just claim not to know anything more. */
+   return UnknownAlias;
+}
+
+
+/* Produces the alias relation between two indexed guest state
+   accesses. */
+
+static
+GSAliasing getAliasingRelation_II ( 
+              IRRegArray* descr1, IRExpr* ix1, Int bias1,
+              IRRegArray* descr2, IRExpr* ix2, Int bias2
+           )
+{
+   UInt minoff1, maxoff1, minoff2, maxoff2;
+   Int  iters;
+
+   /* First try hard to show they don't alias. */
+   getArrayBounds( descr1, &minoff1, &maxoff1 );
+   getArrayBounds( descr2, &minoff2, &maxoff2 );
+   if (maxoff1 < minoff2 || maxoff2 < minoff1)
+      return NoAlias;
+
+   /* So the two arrays at least partially overlap.  To get any
+      further we'll have to be sure that the descriptors are
+      identical. */
+   if (!eqIRRegArray(descr1, descr2))
+      return UnknownAlias;
+
+   /* The descriptors are identical.  Now the only difference can be
+      in the index expressions.  If they cannot be shown to be
+      identical, we have to say we don't know what the aliasing
+      relation will be.  Now, since the IR is flattened, the index
+      expressions should be atoms -- either consts or tmps.  So that
+      makes the comparison simple. */
+   vassert(isIRAtom(ix1));
+   vassert(isIRAtom(ix2));
+   if (!eqIRAtom(ix1,ix2))
+      return UnknownAlias;
+
+   /* Ok, the index expressions are identical.  So now the only way
+      they can be different is in the bias.  Normalise this
+      paranoidly, to reliably establish equality/non-equality. */
+
+   /* So now we know that the GetI and PutI index the same array
+      with the same base.  Are the offsets the same, modulo the
+      array size?  Do this paranoidly. */
+   vassert(descr1->nElems == descr2->nElems);
+   vassert(descr1->elemTy == descr2->elemTy);
+   vassert(descr1->base   == descr2->base);
+   iters = 0;
+   while (bias1 < 0 || bias2 < 0) {
+      bias1 += descr1->nElems;
+      bias2 += descr1->nElems;
+      iters++;
+      if (iters > 10)
+         vpanic("getAliasingRelation: iters");
+   }
+   vassert(bias1 >= 0 && bias2 >= 0);
+   bias1 %= descr1->nElems;
+   bias2 %= descr1->nElems;
+   vassert(bias1 >= 0 && bias1 < descr1->nElems);
+   vassert(bias2 >= 0 && bias2 < descr1->nElems);
+
+   /* Finally, biasP and biasG are normalised into the range 
+      0 .. descrP/G->nElems - 1.  And so we can establish
+      equality/non-equality. */
+
+   return bias1==bias2 ? ExactAlias : NoAlias;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Common Subexpression Elimination                        ---*/
+/*---------------------------------------------------------------*/
+
+/* Expensive in time and space. */
+
+/* Uses two environments: 
+   a IRTemp -> IRTemp mapping 
+   a mapping from AvailExpr* to IRTemp 
+*/
+
+typedef
+   struct {
+      enum { TCc, TCt } tag;
+      union { IRTemp tmp; IRConst* con; } u;
+   }
+   TmpOrConst;
+
+static Bool eqTmpOrConst ( TmpOrConst* tc1, TmpOrConst* tc2 )
+{
+   if (tc1->tag != tc2->tag)
+      return False;
+   switch (tc1->tag) {
+      case TCc:
+         return eqIRConst(tc1->u.con, tc2->u.con);
+      case TCt:
+         return tc1->u.tmp == tc2->u.tmp;
+      default:
+         vpanic("eqTmpOrConst");
+   }
+}
+
+static Bool eqIRCallee ( IRCallee* cee1, IRCallee* cee2 )
+{
+   Bool eq = cee1->addr == cee2->addr;
+   if (eq) {
+      vassert(cee1->regparms == cee2->regparms);
+      vassert(cee1->mcx_mask == cee2->mcx_mask);
+      /* Names should be the same too, but we don't bother to
+         check. */
+   }
+   return eq;
+}
+
+/* Convert an atomic IRExpr* to a TmpOrConst. */
+static void irExpr_to_TmpOrConst ( /*OUT*/TmpOrConst* tc, IRExpr* e )
+{
+   switch (e->tag) {
+      case Iex_RdTmp:
+         tc->tag   = TCt;
+         tc->u.tmp = e->Iex.RdTmp.tmp;
+         break;
+      case Iex_Const:
+         tc->tag   = TCc;
+         tc->u.con = e->Iex.Const.con;
+         break;
+      default:
+         /* Getting here is a serious error.  It means that the
+            presented arg isn't an IR atom, as it should be. */
+         vpanic("irExpr_to_TmpOrConst");
+   }
+}
+
+/* Convert a TmpOrConst to an atomic IRExpr*. */
+static IRExpr* tmpOrConst_to_IRExpr ( TmpOrConst* tc )
+{
+   switch (tc->tag) {
+      case TCc: return IRExpr_Const(tc->u.con);
+      case TCt: return IRExpr_RdTmp(tc->u.tmp);
+      default:  vpanic("tmpOrConst_to_IRExpr");
+   }
+}
+
+/* Convert a NULL terminated IRExpr* vector to an array of
+   TmpOrConsts, and a length. */
+static void irExprVec_to_TmpOrConsts ( /*OUT*/TmpOrConst** outs,
+                                       /*OUT*/Int* nOuts,
+                                       IRExpr** ins )
+{
+   Int i, n;
+   /* We have to make two passes, one to count, one to copy. */
+   for (n = 0; ins[n]; n++)
+      ;
+   *outs  = LibVEX_Alloc_inline(n * sizeof(TmpOrConst));
+   *nOuts = n;
+   /* and now copy .. */
+   for (i = 0; i < n; i++) {
+      IRExpr*     arg = ins[i];
+      TmpOrConst* dst = &(*outs)[i];
+      irExpr_to_TmpOrConst(dst, arg);
+   }
+}
+
+typedef
+   struct {
+      enum { Ut, Btt, Btc, Bct, Cf64i, Ittt, Itct, Ittc, Itcc, GetIt,
+             CCall, Load
+      } tag;
+      union {
+         /* unop(tmp) */
+         struct {
+            IROp   op;
+            IRTemp arg;
+         } Ut;
+         /* binop(tmp,tmp) */
+         struct {
+            IROp   op;
+            IRTemp arg1;
+            IRTemp arg2;
+         } Btt;
+         /* binop(tmp,const) */
+         struct {
+            IROp    op;
+            IRTemp  arg1;
+            IRConst con2;
+         } Btc;
+         /* binop(const,tmp) */
+         struct {
+            IROp    op;
+            IRConst con1;
+            IRTemp  arg2;
+         } Bct;
+         /* F64i-style const */
+         struct {
+            ULong f64i;
+         } Cf64i;
+         /* ITE(tmp,tmp,tmp) */
+         struct {
+            IRTemp co;
+            IRTemp e1;
+            IRTemp e0;
+         } Ittt;
+         /* ITE(tmp,tmp,const) */
+         struct {
+            IRTemp  co;
+            IRTemp  e1;
+            IRConst con0;
+         } Ittc;
+         /* ITE(tmp,const,tmp) */
+         struct {
+            IRTemp  co;
+            IRConst con1;
+            IRTemp  e0;
+         } Itct;
+         /* ITE(tmp,const,const) */
+         struct {
+            IRTemp  co;
+            IRConst con1;
+            IRConst con0;
+         } Itcc;
+         /* GetI(descr,tmp,bias)*/
+         struct {
+            IRRegArray* descr;
+            IRTemp      ix;
+            Int         bias;
+         } GetIt;
+         /* Clean helper call */
+         struct {
+            IRCallee*   cee;
+            TmpOrConst* args;
+            Int         nArgs;
+            IRType      retty;
+         } CCall;
+         /* Load(end,ty,addr) */
+         struct {
+            IREndness  end;
+            IRType     ty;
+            TmpOrConst addr;
+         } Load;
+      } u;
+   }
+   AvailExpr;
+
+static Bool eq_AvailExpr ( AvailExpr* a1, AvailExpr* a2 )
+{
+   if (LIKELY(a1->tag != a2->tag))
+      return False;
+   switch (a1->tag) {
+      case Ut: 
+         return toBool(
+                a1->u.Ut.op == a2->u.Ut.op 
+                && a1->u.Ut.arg == a2->u.Ut.arg);
+      case Btt: 
+         return toBool(
+                a1->u.Btt.op == a2->u.Btt.op
+                && a1->u.Btt.arg1 == a2->u.Btt.arg1
+                && a1->u.Btt.arg2 == a2->u.Btt.arg2);
+      case Btc: 
+         return toBool(
+                a1->u.Btc.op == a2->u.Btc.op
+                && a1->u.Btc.arg1 == a2->u.Btc.arg1
+                && eqIRConst(&a1->u.Btc.con2, &a2->u.Btc.con2));
+      case Bct: 
+         return toBool(
+                a1->u.Bct.op == a2->u.Bct.op
+                && a1->u.Bct.arg2 == a2->u.Bct.arg2
+                && eqIRConst(&a1->u.Bct.con1, &a2->u.Bct.con1));
+      case Cf64i: 
+         return toBool(a1->u.Cf64i.f64i == a2->u.Cf64i.f64i);
+      case Ittt:
+         return toBool(a1->u.Ittt.co == a2->u.Ittt.co
+                       && a1->u.Ittt.e1 == a2->u.Ittt.e1
+                       && a1->u.Ittt.e0 == a2->u.Ittt.e0);
+      case Ittc:
+         return toBool(a1->u.Ittc.co == a2->u.Ittc.co
+                       && a1->u.Ittc.e1 == a2->u.Ittc.e1
+                       && eqIRConst(&a1->u.Ittc.con0, &a2->u.Ittc.con0));
+      case Itct:
+         return toBool(a1->u.Itct.co == a2->u.Itct.co
+                       && eqIRConst(&a1->u.Itct.con1, &a2->u.Itct.con1)
+                       && a1->u.Itct.e0 == a2->u.Itct.e0);
+      case Itcc:
+         return toBool(a1->u.Itcc.co == a2->u.Itcc.co
+                       && eqIRConst(&a1->u.Itcc.con1, &a2->u.Itcc.con1)
+                       && eqIRConst(&a1->u.Itcc.con0, &a2->u.Itcc.con0));
+      case GetIt:
+         return toBool(eqIRRegArray(a1->u.GetIt.descr, a2->u.GetIt.descr) 
+                       && a1->u.GetIt.ix == a2->u.GetIt.ix
+                       && a1->u.GetIt.bias == a2->u.GetIt.bias);
+      case CCall: {
+         Int  i, n;
+         Bool eq = a1->u.CCall.nArgs == a2->u.CCall.nArgs
+                   && eqIRCallee(a1->u.CCall.cee, a2->u.CCall.cee);
+         if (eq) {
+            n = a1->u.CCall.nArgs;
+            for (i = 0; i < n; i++) {
+               if (!eqTmpOrConst( &a1->u.CCall.args[i],
+                                  &a2->u.CCall.args[i] )) {
+                  eq = False;
+                  break;
+               }
+            }
+         }
+         if (eq) vassert(a1->u.CCall.retty == a2->u.CCall.retty);
+         return eq;  
+      }
+      case Load: {
+         Bool eq = toBool(a1->u.Load.end == a2->u.Load.end
+                          && a1->u.Load.ty == a2->u.Load.ty
+                          && eqTmpOrConst(&a1->u.Load.addr, &a2->u.Load.addr));
+         return eq;
+      }
+      default:
+         vpanic("eq_AvailExpr");
+   }
+}
+
+static IRExpr* availExpr_to_IRExpr ( AvailExpr* ae ) 
+{
+   IRConst *con, *con0, *con1;
+   switch (ae->tag) {
+      case Ut:
+         return IRExpr_Unop( ae->u.Ut.op, IRExpr_RdTmp(ae->u.Ut.arg) );
+      case Btt:
+         return IRExpr_Binop( ae->u.Btt.op,
+                              IRExpr_RdTmp(ae->u.Btt.arg1),
+                              IRExpr_RdTmp(ae->u.Btt.arg2) );
+      case Btc:
+         con = LibVEX_Alloc_inline(sizeof(IRConst));
+         *con = ae->u.Btc.con2;
+         return IRExpr_Binop( ae->u.Btc.op,
+                              IRExpr_RdTmp(ae->u.Btc.arg1), 
+                              IRExpr_Const(con) );
+      case Bct:
+         con = LibVEX_Alloc_inline(sizeof(IRConst));
+         *con = ae->u.Bct.con1;
+         return IRExpr_Binop( ae->u.Bct.op,
+                              IRExpr_Const(con), 
+                              IRExpr_RdTmp(ae->u.Bct.arg2) );
+      case Cf64i:
+         return IRExpr_Const(IRConst_F64i(ae->u.Cf64i.f64i));
+      case Ittt:
+         return IRExpr_ITE(IRExpr_RdTmp(ae->u.Ittt.co), 
+                           IRExpr_RdTmp(ae->u.Ittt.e1), 
+                           IRExpr_RdTmp(ae->u.Ittt.e0));
+      case Ittc:
+         con0 = LibVEX_Alloc_inline(sizeof(IRConst));
+         *con0 = ae->u.Ittc.con0;
+         return IRExpr_ITE(IRExpr_RdTmp(ae->u.Ittc.co), 
+                           IRExpr_RdTmp(ae->u.Ittc.e1),
+                           IRExpr_Const(con0));
+      case Itct:
+         con1 = LibVEX_Alloc_inline(sizeof(IRConst));
+         *con1 = ae->u.Itct.con1;
+         return IRExpr_ITE(IRExpr_RdTmp(ae->u.Itct.co), 
+                           IRExpr_Const(con1),
+                           IRExpr_RdTmp(ae->u.Itct.e0));
+
+      case Itcc:
+         con0 = LibVEX_Alloc_inline(sizeof(IRConst));
+         con1 = LibVEX_Alloc_inline(sizeof(IRConst));
+         *con0 = ae->u.Itcc.con0;
+         *con1 = ae->u.Itcc.con1;
+         return IRExpr_ITE(IRExpr_RdTmp(ae->u.Itcc.co), 
+                           IRExpr_Const(con1),
+                           IRExpr_Const(con0));
+      case GetIt:
+         return IRExpr_GetI(ae->u.GetIt.descr,
+                            IRExpr_RdTmp(ae->u.GetIt.ix),
+                            ae->u.GetIt.bias);
+      case CCall: {
+         Int i, n = ae->u.CCall.nArgs;
+         vassert(n >= 0);
+         IRExpr** vec = LibVEX_Alloc_inline((n+1) * sizeof(IRExpr*));
+         vec[n] = NULL;
+         for (i = 0; i < n; i++) {
+            vec[i] = tmpOrConst_to_IRExpr(&ae->u.CCall.args[i]);
+         }
+         return IRExpr_CCall(ae->u.CCall.cee,
+                             ae->u.CCall.retty,
+                             vec);
+      }
+      case Load:
+         return IRExpr_Load(ae->u.Load.end, ae->u.Load.ty,
+                            tmpOrConst_to_IRExpr(&ae->u.Load.addr));
+      default:
+         vpanic("availExpr_to_IRExpr");
+   }
+}
+
+inline
+static IRTemp subst_AvailExpr_Temp ( HashHW* env, IRTemp tmp )
+{
+   HWord res;
+   /* env :: IRTemp -> IRTemp */
+   if (lookupHHW( env, &res, (HWord)tmp ))
+      return (IRTemp)res;
+   else
+      return tmp;
+}
+
+inline
+static void subst_AvailExpr_TmpOrConst ( /*MB_MOD*/TmpOrConst* tc,
+                                          HashHW* env )
+{
+   /* env :: IRTemp -> IRTemp */
+   if (tc->tag == TCt) {
+      tc->u.tmp = subst_AvailExpr_Temp( env, tc->u.tmp );
+   }
+}
+
+static void subst_AvailExpr ( HashHW* env, AvailExpr* ae )
+{
+   /* env :: IRTemp -> IRTemp */
+   switch (ae->tag) {
+      case Ut:
+         ae->u.Ut.arg = subst_AvailExpr_Temp( env, ae->u.Ut.arg );
+         break;
+      case Btt:
+         ae->u.Btt.arg1 = subst_AvailExpr_Temp( env, ae->u.Btt.arg1 );
+         ae->u.Btt.arg2 = subst_AvailExpr_Temp( env, ae->u.Btt.arg2 );
+         break;
+      case Btc:
+         ae->u.Btc.arg1 = subst_AvailExpr_Temp( env, ae->u.Btc.arg1 );
+         break;
+      case Bct:
+         ae->u.Bct.arg2 = subst_AvailExpr_Temp( env, ae->u.Bct.arg2 );
+         break;
+      case Cf64i:
+         break;
+      case Ittt:
+         ae->u.Ittt.co = subst_AvailExpr_Temp( env, ae->u.Ittt.co );
+         ae->u.Ittt.e1 = subst_AvailExpr_Temp( env, ae->u.Ittt.e1 );
+         ae->u.Ittt.e0 = subst_AvailExpr_Temp( env, ae->u.Ittt.e0 );
+         break;
+      case Ittc:
+         ae->u.Ittc.co = subst_AvailExpr_Temp( env, ae->u.Ittc.co );
+         ae->u.Ittc.e1 = subst_AvailExpr_Temp( env, ae->u.Ittc.e1 );
+         break;
+      case Itct:
+         ae->u.Itct.co = subst_AvailExpr_Temp( env, ae->u.Itct.co );
+         ae->u.Itct.e0 = subst_AvailExpr_Temp( env, ae->u.Itct.e0 );
+         break;
+      case Itcc:
+         ae->u.Itcc.co = subst_AvailExpr_Temp( env, ae->u.Itcc.co );
+         break;
+      case GetIt:
+         ae->u.GetIt.ix = subst_AvailExpr_Temp( env, ae->u.GetIt.ix );
+         break;
+      case CCall: {
+         Int i, n = ae->u.CCall.nArgs;;
+         for (i = 0; i < n; i++) {
+            subst_AvailExpr_TmpOrConst(&ae->u.CCall.args[i], env);
+         }
+         break;
+      }
+      case Load:
+         subst_AvailExpr_TmpOrConst(&ae->u.Load.addr, env);
+         break;
+      default: 
+         vpanic("subst_AvailExpr");
+   }
+}
+
+static AvailExpr* irExpr_to_AvailExpr ( IRExpr* e, Bool allowLoadsToBeCSEd )
+{
+   AvailExpr* ae;
+
+   switch (e->tag) {
+      case Iex_Unop:
+         if (e->Iex.Unop.arg->tag == Iex_RdTmp) {
+            ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+            ae->tag      = Ut;
+            ae->u.Ut.op  = e->Iex.Unop.op;
+            ae->u.Ut.arg = e->Iex.Unop.arg->Iex.RdTmp.tmp;
+            return ae;
+         }
+         break;
+
+      case Iex_Binop:
+         if (e->Iex.Binop.arg1->tag == Iex_RdTmp) {
+            if (e->Iex.Binop.arg2->tag == Iex_RdTmp) {
+               ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+               ae->tag        = Btt;
+               ae->u.Btt.op   = e->Iex.Binop.op;
+               ae->u.Btt.arg1 = e->Iex.Binop.arg1->Iex.RdTmp.tmp;
+               ae->u.Btt.arg2 = e->Iex.Binop.arg2->Iex.RdTmp.tmp;
+               return ae;
+            }
+            if (e->Iex.Binop.arg2->tag == Iex_Const) {
+               ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+               ae->tag        = Btc;
+               ae->u.Btc.op   = e->Iex.Binop.op;
+               ae->u.Btc.arg1 = e->Iex.Binop.arg1->Iex.RdTmp.tmp;
+               ae->u.Btc.con2 = *(e->Iex.Binop.arg2->Iex.Const.con);
+               return ae;
+            }
+         } else if (e->Iex.Binop.arg1->tag == Iex_Const
+                    && e->Iex.Binop.arg2->tag == Iex_RdTmp) {
+            ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+            ae->tag        = Bct;
+            ae->u.Bct.op   = e->Iex.Binop.op;
+            ae->u.Bct.arg2 = e->Iex.Binop.arg2->Iex.RdTmp.tmp;
+            ae->u.Bct.con1 = *(e->Iex.Binop.arg1->Iex.Const.con);
+            return ae;
+         }
+         break;
+
+      case Iex_Const:
+         if (e->Iex.Const.con->tag == Ico_F64i) {
+            ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+            ae->tag          = Cf64i;
+            ae->u.Cf64i.f64i = e->Iex.Const.con->Ico.F64i;
+            return ae;
+         }
+         break;
+
+      case Iex_ITE:
+         if (e->Iex.ITE.cond->tag == Iex_RdTmp) {
+            if (e->Iex.ITE.iffalse->tag == Iex_RdTmp) {
+               if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) {
+                  ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+                  ae->tag       = Ittt;
+                  ae->u.Ittt.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+                  ae->u.Ittt.e1 = e->Iex.ITE.iftrue->Iex.RdTmp.tmp;
+                  ae->u.Ittt.e0 = e->Iex.ITE.iffalse->Iex.RdTmp.tmp;
+                  return ae;
+               }
+               if (e->Iex.ITE.iftrue->tag == Iex_Const) {
+                  ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+                  ae->tag       = Itct;
+                  ae->u.Itct.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+                  ae->u.Itct.con1 = *(e->Iex.ITE.iftrue->Iex.Const.con);
+                  ae->u.Itct.e0 = e->Iex.ITE.iffalse->Iex.RdTmp.tmp;
+                  return ae;
+               }
+            } else if (e->Iex.ITE.iffalse->tag == Iex_Const) {
+               if (e->Iex.ITE.iftrue->tag == Iex_RdTmp) {
+                  ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+                  ae->tag       = Ittc;
+                  ae->u.Ittc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+                  ae->u.Ittc.e1 = e->Iex.ITE.iftrue->Iex.RdTmp.tmp;
+                  ae->u.Ittc.con0 = *(e->Iex.ITE.iffalse->Iex.Const.con);
+                  return ae;
+               }
+               if (e->Iex.ITE.iftrue->tag == Iex_Const) {
+                  ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+                  ae->tag       = Itcc;
+                  ae->u.Itcc.co = e->Iex.ITE.cond->Iex.RdTmp.tmp;
+                  ae->u.Itcc.con1 = *(e->Iex.ITE.iftrue->Iex.Const.con);
+                  ae->u.Itcc.con0 = *(e->Iex.ITE.iffalse->Iex.Const.con);
+                  return ae;
+               }
+            }
+         }
+         break;
+
+      case Iex_GetI:
+         if (e->Iex.GetI.ix->tag == Iex_RdTmp) {
+            ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+            ae->tag           = GetIt;
+            ae->u.GetIt.descr = e->Iex.GetI.descr;
+            ae->u.GetIt.ix    = e->Iex.GetI.ix->Iex.RdTmp.tmp;
+            ae->u.GetIt.bias  = e->Iex.GetI.bias;
+            return ae;
+         }
+         break;
+
+      case Iex_CCall:
+         ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+         ae->tag = CCall;
+         /* Ok to share only the cee, since it is immutable. */
+         ae->u.CCall.cee   = e->Iex.CCall.cee;
+         ae->u.CCall.retty = e->Iex.CCall.retty;
+         /* irExprVec_to_TmpOrConsts will assert if the args are
+            neither tmps nor constants, but that's ok .. that's all they
+            should be. */
+         irExprVec_to_TmpOrConsts(
+                                  &ae->u.CCall.args, &ae->u.CCall.nArgs,
+                                  e->Iex.CCall.args
+                                 );
+         return ae;
+
+      case Iex_Load:
+         /* If the caller of do_cse_BB has requested that loads also
+            be CSEd, convert them into AvailExprs.  If not, we'll just
+            return NULL here, and the load never becomes considered
+            "available", which effectively disables CSEing of them, as
+            desired. */
+         if (allowLoadsToBeCSEd) {
+            ae = LibVEX_Alloc_inline(sizeof(AvailExpr));
+            ae->tag        = Load;
+            ae->u.Load.end = e->Iex.Load.end;
+            ae->u.Load.ty  = e->Iex.Load.ty;
+            irExpr_to_TmpOrConst(&ae->u.Load.addr, e->Iex.Load.addr);
+            return ae;
+         }
+         break;
+
+      default:
+         break;
+   }
+
+   return NULL;
+}
+
+
+/* The BB is modified in-place.  Returns True if any changes were
+   made.  The caller can choose whether or not loads should be CSEd.
+   In the normal course of things we don't do that, since CSEing loads
+   is something of a dodgy proposition if the guest program is doing
+   some screwy stuff to do with races and spinloops. */
+
+static Bool do_cse_BB ( IRSB* bb, Bool allowLoadsToBeCSEd )
+{
+   Int        i, j, paranoia;
+   IRTemp     t, q;
+   IRStmt*    st;
+   AvailExpr* eprime;
+   AvailExpr* ae;
+   Bool       invalidate;
+   Bool       anyDone = False;
+
+   HashHW* tenv = newHHW(); /* :: IRTemp -> IRTemp */
+   HashHW* aenv = newHHW(); /* :: AvailExpr* -> IRTemp */
+
+   vassert(sizeof(IRTemp) <= sizeof(HWord));
+
+   if (0) { ppIRSB(bb); vex_printf("\n\n"); }
+
+   /* Iterate forwards over the stmts.  
+      On seeing "t = E", where E is one of the AvailExpr forms:
+         let E' = apply tenv substitution to E
+         search aenv for E'
+            if a mapping E' -> q is found, 
+               replace this stmt by "t = q"
+               and add binding t -> q to tenv
+            else
+               add binding E' -> t to aenv
+               replace this stmt by "t = E'"
+
+      Other statements are only interesting to the extent that they
+      might invalidate some of the expressions in aenv.  So there is
+      an invalidate-bindings check for each statement seen.
+   */
+   for (i = 0; i < bb->stmts_used; i++) {
+      st = bb->stmts[i];
+
+      /* ------ BEGIN invalidate aenv bindings ------ */
+      /* This is critical: remove from aenv any E' -> .. bindings
+         which might be invalidated by this statement.  The only
+         vulnerable kind of bindings are the GetI and Load kinds.
+            Dirty call - dump (paranoia level -> 2) 
+            Store      - dump (ditto) 
+            Put, PutI  - dump unless no-overlap is proven (.. -> 1)
+         Uses getAliasingRelation_IC and getAliasingRelation_II
+         to do the no-overlap assessments needed for Put/PutI.
+      */
+      switch (st->tag) {
+         case Ist_Dirty: case Ist_Store: case Ist_MBE:
+         case Ist_CAS: case Ist_LLSC:
+         case Ist_StoreG:
+            paranoia = 2; break;
+         case Ist_Put: case Ist_PutI: 
+            paranoia = 1; break;
+         case Ist_NoOp: case Ist_IMark: case Ist_AbiHint: 
+         case Ist_WrTmp: case Ist_Exit: case Ist_LoadG:
+            paranoia = 0; break;
+         default: 
+            vpanic("do_cse_BB(1)");
+      }
+
+      if (paranoia > 0) {
+         for (j = 0; j < aenv->used; j++) {
+            if (!aenv->inuse[j])
+               continue;
+            ae = (AvailExpr*)aenv->key[j];
+            if (ae->tag != GetIt && ae->tag != Load) 
+               continue;
+            invalidate = False;
+            if (paranoia >= 2) {
+               invalidate = True;
+            } else {
+               vassert(paranoia == 1);
+               if (ae->tag == Load) {
+                  /* Loads can be invalidated by anything that could
+                     possibly touch memory.  But in that case we
+                     should have |paranoia| == 2 and we won't get
+                     here.  So there's nothing to do; we don't have to
+                     invalidate the load. */
+               }
+               else
+               if (st->tag == Ist_Put) {
+                  if (getAliasingRelation_IC(
+                         ae->u.GetIt.descr, 
+                         IRExpr_RdTmp(ae->u.GetIt.ix), 
+                         st->Ist.Put.offset, 
+                         typeOfIRExpr(bb->tyenv,st->Ist.Put.data) 
+                      ) != NoAlias) 
+                     invalidate = True;
+               }
+               else 
+               if (st->tag == Ist_PutI) {
+                  IRPutI *puti = st->Ist.PutI.details;
+                  if (getAliasingRelation_II(
+                         ae->u.GetIt.descr, 
+                         IRExpr_RdTmp(ae->u.GetIt.ix), 
+                         ae->u.GetIt.bias,
+                         puti->descr,
+                         puti->ix,
+                         puti->bias
+                      ) != NoAlias)
+                     invalidate = True;
+               }
+               else 
+                  vpanic("do_cse_BB(2)");
+            }
+
+            if (invalidate) {
+               aenv->inuse[j] = False;
+               aenv->key[j]   = (HWord)NULL;  /* be sure */
+            }
+         } /* for j */
+      } /* paranoia > 0 */
+
+      /* ------ ENV invalidate aenv bindings ------ */
+
+      /* ignore not-interestings */
+      if (st->tag != Ist_WrTmp)
+         continue;
+
+      t = st->Ist.WrTmp.tmp;
+      eprime = irExpr_to_AvailExpr(st->Ist.WrTmp.data, allowLoadsToBeCSEd);
+      /* ignore if not of AvailExpr form */
+      if (!eprime)
+         continue;
+
+      /* vex_printf("considering: " ); ppIRStmt(st); vex_printf("\n"); */
+
+      /* apply tenv */
+      subst_AvailExpr( tenv, eprime );
+
+      /* search aenv for eprime, unfortunately the hard way */
+      for (j = 0; j < aenv->used; j++)
+         if (aenv->inuse[j] && eq_AvailExpr(eprime, (AvailExpr*)aenv->key[j]))
+            break;
+
+      if (j < aenv->used) {
+         /* A binding E' -> q was found.  Replace stmt by "t = q" and
+            note the t->q binding in tenv. */
+         /* (this is the core of the CSE action) */
+         q = (IRTemp)aenv->val[j];
+         bb->stmts[i] = IRStmt_WrTmp( t, IRExpr_RdTmp(q) );
+         addToHHW( tenv, (HWord)t, (HWord)q );
+         anyDone = True;
+      } else {
+         /* No binding was found, so instead we add E' -> t to our
+            collection of available expressions, replace this stmt
+            with "t = E'", and move on. */
+         bb->stmts[i] = IRStmt_WrTmp( t, availExpr_to_IRExpr(eprime) );
+         addToHHW( aenv, (HWord)eprime, (HWord)t );
+      }
+   }
+
+   /*
+   ppIRSB(bb);
+   sanityCheckIRSB(bb, Ity_I32);
+   vex_printf("\n\n");
+   */
+   return anyDone;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Add32/Sub32 chain collapsing                            ---*/
+/*---------------------------------------------------------------*/
+
+/* ----- Helper functions for Add32/Sub32 chain collapsing ----- */
+
+/* Is this expression "Add32(tmp,const)" or "Sub32(tmp,const)" ?  If
+   yes, set *tmp and *i32 appropriately.  *i32 is set as if the
+   root node is Add32, not Sub32. */
+
+static Bool isAdd32OrSub32 ( IRExpr* e, IRTemp* tmp, Int* i32 )
+{ 
+   if (e->tag != Iex_Binop)
+      return False;
+   if (e->Iex.Binop.op != Iop_Add32 && e->Iex.Binop.op != Iop_Sub32)
+      return False;
+   if (e->Iex.Binop.arg1->tag != Iex_RdTmp)
+      return False;
+   if (e->Iex.Binop.arg2->tag != Iex_Const)
+      return False;
+   *tmp = e->Iex.Binop.arg1->Iex.RdTmp.tmp;
+   *i32 = (Int)(e->Iex.Binop.arg2->Iex.Const.con->Ico.U32);
+   if (e->Iex.Binop.op == Iop_Sub32)
+      *i32 = -*i32;
+   return True;
+}
+
+
+/* Figure out if tmp can be expressed as tmp2 +32 const, for some
+   other tmp2.  Scan backwards from the specified start point -- an
+   optimisation. */
+
+static Bool collapseChain ( IRSB* bb, Int startHere,
+                            IRTemp tmp,
+                            IRTemp* tmp2, Int* i32 )
+{
+   Int     j, ii;
+   IRTemp  vv;
+   IRStmt* st;
+   IRExpr* e;
+
+   /* the (var, con) pair contain the current 'representation' for
+      'tmp'.  We start with 'tmp + 0'.  */
+   IRTemp var = tmp;
+   Int    con = 0;
+
+   /* Scan backwards to see if tmp can be replaced by some other tmp
+     +/- a constant. */
+   for (j = startHere; j >= 0; j--) {
+      st = bb->stmts[j];
+      if (st->tag != Ist_WrTmp) 
+         continue;
+      if (st->Ist.WrTmp.tmp != var)
+         continue;
+      e = st->Ist.WrTmp.data;
+      if (!isAdd32OrSub32(e, &vv, &ii))
+         break;
+      var = vv;
+      con += ii;
+   }
+   if (j == -1)
+      /* no earlier binding for var .. ill-formed IR */
+      vpanic("collapseChain");
+
+   /* so, did we find anything interesting? */
+   if (var == tmp)
+      return False; /* no .. */
+      
+   *tmp2 = var;
+   *i32  = con;
+   return True;
+}
+
+
+/* ------- Main function for Add32/Sub32 chain collapsing ------ */
+
+static void collapse_AddSub_chains_BB ( IRSB* bb )
+{
+   IRStmt *st;
+   IRTemp var, var2;
+   Int    i, con, con2;
+
+   for (i = bb->stmts_used-1; i >= 0; i--) {
+      st = bb->stmts[i];
+      if (st->tag == Ist_NoOp)
+         continue;
+
+      /* Try to collapse 't1 = Add32/Sub32(t2, con)'. */
+
+      if (st->tag == Ist_WrTmp
+          && isAdd32OrSub32(st->Ist.WrTmp.data, &var, &con)) {
+
+         /* So e1 is of the form Add32(var,con) or Sub32(var,-con).
+            Find out if var can be expressed as var2 + con2. */
+         if (collapseChain(bb, i-1, var, &var2, &con2)) {
+            if (DEBUG_IROPT) {
+               vex_printf("replacing1 ");
+               ppIRStmt(st);
+               vex_printf(" with ");
+            }
+            con2 += con;
+            bb->stmts[i] 
+               = IRStmt_WrTmp(
+                    st->Ist.WrTmp.tmp,
+                    (con2 >= 0) 
+                      ? IRExpr_Binop(Iop_Add32, 
+                                     IRExpr_RdTmp(var2),
+                                     IRExpr_Const(IRConst_U32(con2)))
+                      : IRExpr_Binop(Iop_Sub32, 
+                                     IRExpr_RdTmp(var2),
+                                     IRExpr_Const(IRConst_U32(-con2)))
+                 );
+            if (DEBUG_IROPT) {
+               ppIRStmt(bb->stmts[i]);
+               vex_printf("\n");
+            }
+         }
+
+         continue;
+      }
+
+      /* Try to collapse 't1 = GetI[t2, con]'. */
+
+      if (st->tag == Ist_WrTmp
+          && st->Ist.WrTmp.data->tag == Iex_GetI
+          && st->Ist.WrTmp.data->Iex.GetI.ix->tag == Iex_RdTmp
+          && collapseChain(bb, i-1, st->Ist.WrTmp.data->Iex.GetI.ix
+                                      ->Iex.RdTmp.tmp, &var2, &con2)) {
+         if (DEBUG_IROPT) {
+            vex_printf("replacing3 ");
+            ppIRStmt(st);
+            vex_printf(" with ");
+         }
+         con2 += st->Ist.WrTmp.data->Iex.GetI.bias;
+         bb->stmts[i]
+            = IRStmt_WrTmp(
+                 st->Ist.WrTmp.tmp,
+                 IRExpr_GetI(st->Ist.WrTmp.data->Iex.GetI.descr,
+                             IRExpr_RdTmp(var2),
+                             con2));
+         if (DEBUG_IROPT) {
+            ppIRStmt(bb->stmts[i]);
+            vex_printf("\n");
+         }
+         continue;
+      }
+
+      /* Perhaps st is PutI[t, con] ? */
+      IRPutI *puti = st->Ist.PutI.details;
+      if (st->tag == Ist_PutI
+          && puti->ix->tag == Iex_RdTmp
+          && collapseChain(bb, i-1, puti->ix->Iex.RdTmp.tmp, 
+                               &var2, &con2)) {
+         if (DEBUG_IROPT) {
+            vex_printf("replacing2 ");
+            ppIRStmt(st);
+            vex_printf(" with ");
+         }
+         con2 += puti->bias;
+         bb->stmts[i]
+            = IRStmt_PutI(mkIRPutI(puti->descr,
+                                   IRExpr_RdTmp(var2),
+                                   con2,
+                                   puti->data));
+         if (DEBUG_IROPT) {
+            ppIRStmt(bb->stmts[i]);
+            vex_printf("\n");
+         }
+         continue;
+      }
+
+   } /* for */
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- PutI/GetI transformations                               ---*/
+/*---------------------------------------------------------------*/
+
+/* Given the parts (descr, tmp, bias) for a GetI, scan backwards from
+   the given starting point to find, if any, a PutI which writes
+   exactly the same piece of guest state, and so return the expression
+   that the PutI writes.  This is the core of PutI-GetI forwarding. */
+
+static 
+IRExpr* findPutI ( IRSB* bb, Int startHere,
+                   IRRegArray* descrG, IRExpr* ixG, Int biasG )
+{
+   Int        j;
+   IRStmt*    st;
+   GSAliasing relation;
+
+   if (0) {
+      vex_printf("\nfindPutI ");
+      ppIRRegArray(descrG);
+      vex_printf(" ");
+      ppIRExpr(ixG);
+      vex_printf(" %d\n", biasG);
+   }
+
+   /* Scan backwards in bb from startHere to find a suitable PutI
+      binding for (descrG, ixG, biasG), if any. */
+
+   for (j = startHere; j >= 0; j--) {
+      st = bb->stmts[j];
+      if (st->tag == Ist_NoOp) 
+         continue;
+
+      if (st->tag == Ist_Put) {
+         /* Non-indexed Put.  This can't give a binding, but we do
+            need to check it doesn't invalidate the search by
+            overlapping any part of the indexed guest state. */
+
+         relation
+            = getAliasingRelation_IC(
+                 descrG, ixG,
+                 st->Ist.Put.offset,
+                 typeOfIRExpr(bb->tyenv,st->Ist.Put.data) );
+
+         if (relation == NoAlias) {
+            /* we're OK; keep going */
+            continue;
+         } else {
+            /* relation == UnknownAlias || relation == ExactAlias */
+            /* If this assertion fails, we've found a Put which writes
+               an area of guest state which is read by a GetI.  Which
+               is unlikely (although not per se wrong). */
+            vassert(relation != ExactAlias);
+            /* This Put potentially writes guest state that the GetI
+               reads; we must fail. */
+            return NULL;
+         }
+      }
+
+      if (st->tag == Ist_PutI) {
+         IRPutI *puti = st->Ist.PutI.details;
+
+         relation = getAliasingRelation_II(
+                       descrG, ixG, biasG,
+                       puti->descr,
+                       puti->ix,
+                       puti->bias
+                    );
+
+         if (relation == NoAlias) {
+            /* This PutI definitely doesn't overlap.  Ignore it and
+               keep going. */
+            continue; /* the for j loop */
+         }
+
+         if (relation == UnknownAlias) {
+            /* We don't know if this PutI writes to the same guest
+               state that the GetI, or not.  So we have to give up. */
+            return NULL;
+         }
+
+         /* Otherwise, we've found what we're looking for.  */
+         vassert(relation == ExactAlias);
+         return puti->data;
+
+      } /* if (st->tag == Ist_PutI) */
+
+      if (st->tag == Ist_Dirty) {
+         /* Be conservative.  If the dirty call has any guest effects at
+            all, give up.  We could do better -- only give up if there
+            are any guest writes/modifies. */
+         if (st->Ist.Dirty.details->nFxState > 0)
+            return NULL;
+      }
+
+   } /* for */
+
+   /* No valid replacement was found. */
+   return NULL;
+}
+
+
+
+/* Assuming pi is a PutI stmt, is s2 identical to it (in the sense
+   that it writes exactly the same piece of guest state) ?  Safe
+   answer: False. */
+
+static Bool identicalPutIs ( IRStmt* pi, IRStmt* s2 )
+{
+   vassert(pi->tag == Ist_PutI);
+   if (s2->tag != Ist_PutI)
+      return False;
+
+   IRPutI *p1 = pi->Ist.PutI.details;
+   IRPutI *p2 = s2->Ist.PutI.details;
+
+   return toBool(
+          getAliasingRelation_II( 
+             p1->descr, p1->ix, p1->bias, 
+             p2->descr, p2->ix, p2->bias
+          )
+          == ExactAlias
+          );
+}
+
+
+/* Assuming pi is a PutI stmt, is s2 a Get/GetI/Put/PutI which might
+   overlap it?  Safe answer: True.  Note, we could do a lot better
+   than this if needed. */
+
+static 
+Bool guestAccessWhichMightOverlapPutI ( 
+        IRTypeEnv* tyenv, IRStmt* pi, IRStmt* s2 
+     )
+{
+   GSAliasing relation;
+   UInt       minoffP, maxoffP;
+
+   vassert(pi->tag == Ist_PutI);
+
+   IRPutI *p1 = pi->Ist.PutI.details;
+
+   getArrayBounds(p1->descr, &minoffP, &maxoffP);
+   switch (s2->tag) {
+
+      case Ist_NoOp:
+      case Ist_IMark:
+         return False;
+
+      case Ist_MBE:
+      case Ist_AbiHint:
+         /* just be paranoid ... these should be rare. */
+         return True;
+
+      case Ist_CAS:
+         /* This is unbelievably lame, but it's probably not
+            significant from a performance point of view.  Really, a
+            CAS is a load-store op, so it should be safe to say False.
+            However .. */
+         return True;
+
+      case Ist_Dirty:
+         /* If the dirty call has any guest effects at all, give up.
+            Probably could do better. */
+         if (s2->Ist.Dirty.details->nFxState > 0)
+            return True;
+         return False;
+
+      case Ist_Put:
+         vassert(isIRAtom(s2->Ist.Put.data));
+         relation 
+            = getAliasingRelation_IC(
+                 p1->descr, p1->ix,
+                 s2->Ist.Put.offset, 
+                 typeOfIRExpr(tyenv,s2->Ist.Put.data)
+              );
+         goto have_relation;
+
+      case Ist_PutI: {
+         IRPutI *p2 = s2->Ist.PutI.details;
+
+         vassert(isIRAtom(p2->ix));
+         vassert(isIRAtom(p2->data));
+         relation
+            = getAliasingRelation_II(
+                 p1->descr, p1->ix, p1->bias, 
+                 p2->descr, p2->ix, p2->bias
+              );
+         goto have_relation;
+      }
+
+      case Ist_WrTmp:
+         if (s2->Ist.WrTmp.data->tag == Iex_GetI) {
+            relation
+               = getAliasingRelation_II(
+                    p1->descr, p1->ix, p1->bias, 
+                    s2->Ist.WrTmp.data->Iex.GetI.descr,
+                    s2->Ist.WrTmp.data->Iex.GetI.ix,
+                    s2->Ist.WrTmp.data->Iex.GetI.bias
+                 );
+            goto have_relation;
+         }
+         if (s2->Ist.WrTmp.data->tag == Iex_Get) {
+            relation
+               = getAliasingRelation_IC(
+                    p1->descr, p1->ix,
+                    s2->Ist.WrTmp.data->Iex.Get.offset,
+                    s2->Ist.WrTmp.data->Iex.Get.ty
+                 );
+            goto have_relation;
+         }
+         return False;
+
+      case Ist_Store:
+         vassert(isIRAtom(s2->Ist.Store.addr));
+         vassert(isIRAtom(s2->Ist.Store.data));
+         return False;
+
+      default:
+         vex_printf("\n"); ppIRStmt(s2); vex_printf("\n");
+         vpanic("guestAccessWhichMightOverlapPutI");
+   }
+
+  have_relation:
+   if (relation == NoAlias)
+      return False;
+   else
+      return True; /* ExactAlias or UnknownAlias */
+}
+
+
+
+/* ---------- PutI/GetI transformations main functions --------- */
+
+/* Remove redundant GetIs, to the extent that they can be detected.
+   bb is modified in-place. */
+
+static
+void do_redundant_GetI_elimination ( IRSB* bb )
+{
+   Int     i;
+   IRStmt* st;
+
+   for (i = bb->stmts_used-1; i >= 0; i--) {
+      st = bb->stmts[i];
+      if (st->tag == Ist_NoOp)
+         continue;
+
+      if (st->tag == Ist_WrTmp
+          && st->Ist.WrTmp.data->tag == Iex_GetI
+          && st->Ist.WrTmp.data->Iex.GetI.ix->tag == Iex_RdTmp) {
+         IRRegArray* descr = st->Ist.WrTmp.data->Iex.GetI.descr;
+         IRExpr*     ix    = st->Ist.WrTmp.data->Iex.GetI.ix;
+         Int         bias  = st->Ist.WrTmp.data->Iex.GetI.bias;
+         IRExpr*     replacement = findPutI(bb, i-1, descr, ix, bias);
+         if (replacement 
+             && isIRAtom(replacement)
+             /* Make sure we're doing a type-safe transformation! */
+             && typeOfIRExpr(bb->tyenv, replacement) == descr->elemTy) {
+            if (DEBUG_IROPT) {
+               vex_printf("rGI:  "); 
+               ppIRExpr(st->Ist.WrTmp.data);
+               vex_printf(" -> ");
+               ppIRExpr(replacement);
+               vex_printf("\n");
+            }
+            bb->stmts[i] = IRStmt_WrTmp(st->Ist.WrTmp.tmp, replacement);
+         }
+      }
+   }
+
+}
+
+
+/* Remove redundant PutIs, to the extent which they can be detected.
+   bb is modified in-place. */
+
+static
+void do_redundant_PutI_elimination ( IRSB* bb, VexRegisterUpdates pxControl )
+{
+   Int    i, j;
+   Bool   delete;
+   IRStmt *st, *stj;
+
+   vassert(pxControl < VexRegUpdAllregsAtEachInsn);
+
+   for (i = 0; i < bb->stmts_used; i++) {
+      st = bb->stmts[i];
+      if (st->tag != Ist_PutI)
+         continue;
+      /* Ok, search forwards from here to see if we can find another
+         PutI which makes this one redundant, and dodging various 
+         hazards.  Search forwards:
+         * If conditional exit, give up (because anything after that 
+           does not postdominate this put).
+         * If a Get which might overlap, give up (because this PutI 
+           not necessarily dead).
+         * If a Put which is identical, stop with success.
+         * If a Put which might overlap, but is not identical, give up.
+         * If a dirty helper call which might write guest state, give up.
+         * If a Put which definitely doesn't overlap, or any other 
+           kind of stmt, continue.
+      */
+      delete = False;
+      for (j = i+1; j < bb->stmts_used; j++) {
+         stj = bb->stmts[j];
+         if (stj->tag == Ist_NoOp) 
+            continue;
+         if (identicalPutIs(st, stj)) {
+            /* success! */
+            delete = True;
+            break;
+         }
+         if (stj->tag == Ist_Exit)
+            /* give up */
+            break;
+         if (st->tag == Ist_Dirty)
+            /* give up; could do better here */
+            break;
+         if (guestAccessWhichMightOverlapPutI(bb->tyenv, st, stj))
+            /* give up */
+           break;
+      }
+
+      if (delete) {
+         if (DEBUG_IROPT) {
+            vex_printf("rPI:  "); 
+            ppIRStmt(st); 
+            vex_printf("\n");
+         }
+         bb->stmts[i] = IRStmt_NoOp();
+      }
+
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- Loop unrolling                                          ---*/
+/*---------------------------------------------------------------*/
+
+/* Adjust all tmp values (names) in e by delta.  e is destructively
+   modified. */
+
+static void deltaIRExpr ( IRExpr* e, Int delta )
+{
+   Int i;
+   switch (e->tag) {
+      case Iex_RdTmp:
+         e->Iex.RdTmp.tmp += delta;
+         break;
+      case Iex_Get:
+      case Iex_Const:
+         break;
+      case Iex_GetI:
+         deltaIRExpr(e->Iex.GetI.ix, delta);
+         break;
+      case Iex_Qop:
+         deltaIRExpr(e->Iex.Qop.details->arg1, delta);
+         deltaIRExpr(e->Iex.Qop.details->arg2, delta);
+         deltaIRExpr(e->Iex.Qop.details->arg3, delta);
+         deltaIRExpr(e->Iex.Qop.details->arg4, delta);
+         break;
+      case Iex_Triop:
+         deltaIRExpr(e->Iex.Triop.details->arg1, delta);
+         deltaIRExpr(e->Iex.Triop.details->arg2, delta);
+         deltaIRExpr(e->Iex.Triop.details->arg3, delta);
+         break;
+      case Iex_Binop:
+         deltaIRExpr(e->Iex.Binop.arg1, delta);
+         deltaIRExpr(e->Iex.Binop.arg2, delta);
+         break;
+      case Iex_Unop:
+         deltaIRExpr(e->Iex.Unop.arg, delta);
+         break;
+      case Iex_Load:
+         deltaIRExpr(e->Iex.Load.addr, delta);
+         break;
+      case Iex_CCall:
+         for (i = 0; e->Iex.CCall.args[i]; i++)
+            deltaIRExpr(e->Iex.CCall.args[i], delta);
+         break;
+      case Iex_ITE:
+         deltaIRExpr(e->Iex.ITE.cond, delta);
+         deltaIRExpr(e->Iex.ITE.iftrue, delta);
+         deltaIRExpr(e->Iex.ITE.iffalse, delta);
+         break;
+      default: 
+         vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+         vpanic("deltaIRExpr");
+   }
+}
+
+/* Adjust all tmp values (names) in st by delta.  st is destructively
+   modified. */
+
+static void deltaIRStmt ( IRStmt* st, Int delta )
+{
+   Int      i;
+   IRDirty* d;
+   switch (st->tag) {
+      case Ist_NoOp:
+      case Ist_IMark:
+      case Ist_MBE:
+         break;
+      case Ist_AbiHint:
+         deltaIRExpr(st->Ist.AbiHint.base, delta);
+         deltaIRExpr(st->Ist.AbiHint.nia, delta);
+         break;
+      case Ist_Put:
+         deltaIRExpr(st->Ist.Put.data, delta);
+         break;
+      case Ist_PutI:
+         deltaIRExpr(st->Ist.PutI.details->ix, delta);
+         deltaIRExpr(st->Ist.PutI.details->data, delta);
+         break;
+      case Ist_WrTmp: 
+         st->Ist.WrTmp.tmp += delta;
+         deltaIRExpr(st->Ist.WrTmp.data, delta);
+         break;
+      case Ist_Exit:
+         deltaIRExpr(st->Ist.Exit.guard, delta);
+         break;
+      case Ist_Store:
+         deltaIRExpr(st->Ist.Store.addr, delta);
+         deltaIRExpr(st->Ist.Store.data, delta);
+         break;
+      case Ist_StoreG: {
+         IRStoreG* sg = st->Ist.StoreG.details;
+         deltaIRExpr(sg->addr, delta);
+         deltaIRExpr(sg->data, delta);
+         deltaIRExpr(sg->guard, delta);
+         break;
+      }
+      case Ist_LoadG: {
+         IRLoadG* lg = st->Ist.LoadG.details;
+         lg->dst += delta;
+         deltaIRExpr(lg->addr, delta);
+         deltaIRExpr(lg->alt, delta);
+         deltaIRExpr(lg->guard, delta);
+         break;
+      }
+      case Ist_CAS:
+         if (st->Ist.CAS.details->oldHi != IRTemp_INVALID)
+            st->Ist.CAS.details->oldHi += delta;
+         st->Ist.CAS.details->oldLo += delta;
+         deltaIRExpr(st->Ist.CAS.details->addr, delta);
+         if (st->Ist.CAS.details->expdHi)
+            deltaIRExpr(st->Ist.CAS.details->expdHi, delta);
+         deltaIRExpr(st->Ist.CAS.details->expdLo, delta);
+         if (st->Ist.CAS.details->dataHi)
+            deltaIRExpr(st->Ist.CAS.details->dataHi, delta);
+         deltaIRExpr(st->Ist.CAS.details->dataLo, delta);
+         break;
+      case Ist_LLSC:
+         st->Ist.LLSC.result += delta;
+         deltaIRExpr(st->Ist.LLSC.addr, delta);
+         if (st->Ist.LLSC.storedata)
+            deltaIRExpr(st->Ist.LLSC.storedata, delta);
+         break;
+      case Ist_Dirty:
+         d = st->Ist.Dirty.details;
+         deltaIRExpr(d->guard, delta);
+         for (i = 0; d->args[i]; i++) {
+            IRExpr* arg = d->args[i];
+            if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+               deltaIRExpr(arg, delta);
+         }
+         if (d->tmp != IRTemp_INVALID)
+            d->tmp += delta;
+         if (d->mAddr)
+            deltaIRExpr(d->mAddr, delta);
+         break;
+      default: 
+         vex_printf("\n"); ppIRStmt(st); vex_printf("\n");
+         vpanic("deltaIRStmt");
+   }
+}
+
+
+/* If possible, return a loop-unrolled version of bb0.  The original
+   is changed.  If not possible, return NULL.  */
+
+/* The two schemas considered are:
+
+     X: BODY; goto X
+
+     which unrolls to (eg)  X: BODY;BODY; goto X
+
+   and
+
+       X: BODY; if (c) goto X; goto Y
+   which trivially transforms to
+       X: BODY; if (!c) goto Y; goto X;
+   so it falls in the scope of the first case.  
+
+   X and Y must be literal (guest) addresses.
+*/
+
+static Int calc_unroll_factor( IRSB* bb )
+{
+   Int n_stmts, i;
+
+   n_stmts = 0;
+   for (i = 0; i < bb->stmts_used; i++) {
+      if (bb->stmts[i]->tag != Ist_NoOp)
+         n_stmts++;
+   }
+
+   if (n_stmts <= vex_control.iropt_unroll_thresh/8) {
+      if (vex_control.iropt_verbosity > 0)
+         vex_printf("vex iropt: 8 x unrolling (%d sts -> %d sts)\n",
+                    n_stmts, 8* n_stmts);
+      return 8;
+   }
+   if (n_stmts <= vex_control.iropt_unroll_thresh/4) {
+      if (vex_control.iropt_verbosity > 0)
+         vex_printf("vex iropt: 4 x unrolling (%d sts -> %d sts)\n",
+                    n_stmts, 4* n_stmts);
+      return 4;
+   }
+
+   if (n_stmts <= vex_control.iropt_unroll_thresh/2) {
+      if (vex_control.iropt_verbosity > 0)
+         vex_printf("vex iropt: 2 x unrolling (%d sts -> %d sts)\n",
+                    n_stmts, 2* n_stmts);
+      return 2;
+   }
+
+   if (vex_control.iropt_verbosity > 0)
+      vex_printf("vex iropt: not unrolling (%d sts)\n", n_stmts);
+
+   return 1;
+}
+
+
+static IRSB* maybe_loop_unroll_BB ( IRSB* bb0, Addr my_addr )
+{
+   Int      i, j, jmax, n_vars;
+   Bool     xxx_known;
+   Addr64   xxx_value, yyy_value;
+   IRExpr*  udst;
+   IRStmt*  st;
+   IRConst* con;
+   IRSB     *bb1, *bb2;
+   Int      unroll_factor;
+
+   if (vex_control.iropt_unroll_thresh <= 0)
+      return NULL;
+
+   /* First off, figure out if we can unroll this loop.  Do this
+      without modifying bb0. */
+
+   if (bb0->jumpkind != Ijk_Boring)
+      return NULL;
+
+   xxx_known = False;
+   xxx_value = 0;
+
+   /* Extract the next-guest address.  If it isn't a literal, we 
+      have to give up. */
+
+   udst = bb0->next;
+   if (udst->tag == Iex_Const
+       && (udst->Iex.Const.con->tag == Ico_U32
+           || udst->Iex.Const.con->tag == Ico_U64)) {
+      /* The BB ends in a jump to a literal location. */
+      xxx_known = True;
+      xxx_value = udst->Iex.Const.con->tag == Ico_U64
+                    ?  udst->Iex.Const.con->Ico.U64
+                    : (Addr64)(udst->Iex.Const.con->Ico.U32);
+   }
+
+   if (!xxx_known)
+      return NULL;
+
+   /* Now we know the BB ends to a jump to a literal location.  If
+      it's a jump to itself (viz, idiom #1), move directly to the
+      unrolling stage, first cloning the bb so the original isn't
+      modified. */
+   if (xxx_value == my_addr) {
+      unroll_factor = calc_unroll_factor( bb0 );
+      if (unroll_factor < 2)
+         return NULL;
+      bb1 = deepCopyIRSB( bb0 );
+      bb0 = NULL;
+      udst = NULL; /* is now invalid */
+      goto do_unroll;
+   }
+
+   /* Search for the second idiomatic form:
+        X: BODY; if (c) goto X; goto Y
+      We know Y, but need to establish that the last stmt
+      is 'if (c) goto X'.
+   */
+   yyy_value = xxx_value;
+   for (i = bb0->stmts_used-1; i >= 0; i--)
+      if (bb0->stmts[i])
+         break;
+
+   if (i < 0)
+      return NULL; /* block with no stmts.  Strange. */
+
+   st = bb0->stmts[i];
+   if (st->tag != Ist_Exit)
+      return NULL;
+   if (st->Ist.Exit.jk != Ijk_Boring)
+      return NULL;
+
+   con = st->Ist.Exit.dst;
+   vassert(con->tag == Ico_U32 || con->tag == Ico_U64);
+
+   xxx_value = con->tag == Ico_U64 
+                  ? st->Ist.Exit.dst->Ico.U64
+                  : (Addr64)(st->Ist.Exit.dst->Ico.U32);
+
+   /* If this assertion fails, we have some kind of type error. */
+   vassert(con->tag == udst->Iex.Const.con->tag);
+
+   if (xxx_value != my_addr)
+      /* We didn't find either idiom.  Give up. */
+      return NULL;
+
+   /* Ok, we found idiom #2.  Copy the BB, switch around the xxx and
+      yyy values (which makes it look like idiom #1), and go into
+      unrolling proper.  This means finding (again) the last stmt, in
+      the copied BB. */
+
+   unroll_factor = calc_unroll_factor( bb0 );
+   if (unroll_factor < 2)
+      return NULL;
+
+   bb1 = deepCopyIRSB( bb0 );
+   bb0 = NULL;
+   udst = NULL; /* is now invalid */
+   for (i = bb1->stmts_used-1; i >= 0; i--)
+      if (bb1->stmts[i])
+         break;
+
+   /* The next bunch of assertions should be true since we already
+      found and checked the last stmt in the original bb. */
+
+   vassert(i >= 0);
+
+   st = bb1->stmts[i];
+   vassert(st->tag == Ist_Exit);
+
+   con = st->Ist.Exit.dst;
+   vassert(con->tag == Ico_U32 || con->tag == Ico_U64);
+
+   udst = bb1->next;
+   vassert(udst->tag == Iex_Const);
+   vassert(udst->Iex.Const.con->tag == Ico_U32
+          || udst->Iex.Const.con->tag == Ico_U64);
+   vassert(con->tag == udst->Iex.Const.con->tag);
+
+   /* switch the xxx and yyy fields around */
+   if (con->tag == Ico_U64) {
+      udst->Iex.Const.con->Ico.U64 = xxx_value;
+      con->Ico.U64 = yyy_value;
+   } else {
+      udst->Iex.Const.con->Ico.U32 = (UInt)xxx_value;
+      con->Ico.U32 = (UInt)yyy_value;
+   }
+
+   /* negate the test condition */
+   st->Ist.Exit.guard 
+      = IRExpr_Unop(Iop_Not1,deepCopyIRExpr(st->Ist.Exit.guard));
+
+   /* --- The unroller proper.  Both idioms are by now --- */
+   /* --- now converted to idiom 1. --- */
+
+  do_unroll:
+
+   vassert(unroll_factor == 2 
+           || unroll_factor == 4
+           || unroll_factor == 8);
+
+   jmax = unroll_factor==8 ? 3 : (unroll_factor==4 ? 2 : 1);
+   for (j = 1; j <= jmax; j++) {
+
+      n_vars = bb1->tyenv->types_used;
+
+      bb2 = deepCopyIRSB(bb1);
+      for (i = 0; i < n_vars; i++)
+         (void)newIRTemp(bb1->tyenv, bb2->tyenv->types[i]);
+
+      for (i = 0; i < bb2->stmts_used; i++) {
+         /* deltaIRStmt destructively modifies the stmt, but 
+            that's OK since bb2 is a complete fresh copy of bb1. */
+         deltaIRStmt(bb2->stmts[i], n_vars);
+         addStmtToIRSB(bb1, bb2->stmts[i]);
+      }
+   }
+
+   if (DEBUG_IROPT) {
+      vex_printf("\nUNROLLED (%lx)\n", my_addr);
+      ppIRSB(bb1);
+      vex_printf("\n");
+   }
+
+   /* Flattening; sigh.  The unroller succeeds in breaking flatness
+      by negating the test condition.  This should be fixed properly.
+      For the moment use this shotgun approach.  */
+   return flatten_BB(bb1);
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- The tree builder                                        ---*/
+/*---------------------------------------------------------------*/
+
+/* This isn't part of IR optimisation.  Really it's a pass done prior
+   to instruction selection, which improves the code that the
+   instruction selector can produce. */
+
+/* --- The 'tmp' environment is the central data structure here --- */
+
+/* The number of outstanding bindings we're prepared to track.
+   The number of times the env becomes full and we have to dump
+   the oldest binding (hence reducing code quality) falls very
+   rapidly as the env size increases.  8 gives reasonable performance 
+   under most circumstances. */
+#define A_NENV 10
+
+/* An interval. Used to record the bytes in the guest state accessed
+   by a Put[I] statement or by (one or more) Get[I] expression(s). In 
+   case of several Get[I] expressions, the lower/upper bounds are recorded.
+   This is conservative but cheap.
+   E.g. a Put of 8 bytes at address 100 would be recorded as [100,107].
+   E.g. an expression that reads 8 bytes at offset 100 and 4 bytes at
+   offset 200 would be recorded as [100,203] */
+typedef
+   struct {
+      Bool present;
+      Int  low;
+      Int  high;
+   }
+   Interval;
+
+static inline Bool
+intervals_overlap(Interval i1, Interval i2)
+{
+   return (i1.low >= i2.low && i1.low <= i2.high) ||
+          (i2.low >= i1.low && i2.low <= i1.high);
+}
+
+static inline void
+update_interval(Interval *i, Int low, Int high)
+{
+   vassert(low <= high);
+
+   if (i->present) {
+      if (low  < i->low)  i->low  = low;
+      if (high > i->high) i->high = high;
+   } else {
+      i->present = True;
+      i->low  = low;
+      i->high = high;
+   }
+}
+
+
+/* bindee == NULL   ===  slot is not in use
+   bindee != NULL   ===  slot is in use
+*/
+typedef
+   struct {
+      IRTemp  binder;
+      IRExpr* bindee;
+      Bool    doesLoad;
+      /* Record the bytes of the guest state BINDEE reads from. */
+      Interval getInterval;
+   }
+   ATmpInfo;
+
+__attribute__((unused))
+static void ppAEnv ( ATmpInfo* env )
+{
+   Int i;
+   for (i = 0; i < A_NENV; i++) {
+      vex_printf("%d  tmp %d  val ", i, (Int)env[i].binder);
+      if (env[i].bindee) 
+         ppIRExpr(env[i].bindee);
+      else 
+         vex_printf("(null)");
+      vex_printf("\n");
+   }
+}
+
+/* --- Tree-traversal fns --- */
+
+/* Traverse an expr, and detect if any part of it reads memory or does
+   a Get.  Be careful ... this really controls how much the
+   tree-builder can reorder the code, so getting it right is critical.
+*/
+static void setHints_Expr (Bool* doesLoad, Interval* getInterval, IRExpr* e )
+{
+   Int i;
+   switch (e->tag) {
+      case Iex_CCall:
+         for (i = 0; e->Iex.CCall.args[i]; i++)
+            setHints_Expr(doesLoad, getInterval, e->Iex.CCall.args[i]);
+         return;
+      case Iex_ITE:
+         setHints_Expr(doesLoad, getInterval, e->Iex.ITE.cond);
+         setHints_Expr(doesLoad, getInterval, e->Iex.ITE.iftrue);
+         setHints_Expr(doesLoad, getInterval, e->Iex.ITE.iffalse);
+         return;
+      case Iex_Qop:
+         setHints_Expr(doesLoad, getInterval, e->Iex.Qop.details->arg1);
+         setHints_Expr(doesLoad, getInterval, e->Iex.Qop.details->arg2);
+         setHints_Expr(doesLoad, getInterval, e->Iex.Qop.details->arg3);
+         setHints_Expr(doesLoad, getInterval, e->Iex.Qop.details->arg4);
+         return;
+      case Iex_Triop:
+         setHints_Expr(doesLoad, getInterval, e->Iex.Triop.details->arg1);
+         setHints_Expr(doesLoad, getInterval, e->Iex.Triop.details->arg2);
+         setHints_Expr(doesLoad, getInterval, e->Iex.Triop.details->arg3);
+         return;
+      case Iex_Binop:
+         setHints_Expr(doesLoad, getInterval, e->Iex.Binop.arg1);
+         setHints_Expr(doesLoad, getInterval, e->Iex.Binop.arg2);
+         return;
+      case Iex_Unop:
+         setHints_Expr(doesLoad, getInterval, e->Iex.Unop.arg);
+         return;
+      case Iex_Load:
+         *doesLoad = True;
+         setHints_Expr(doesLoad, getInterval, e->Iex.Load.addr);
+         return;
+      case Iex_Get: {
+         Int low = e->Iex.Get.offset;
+         Int high = low + sizeofIRType(e->Iex.Get.ty) - 1;
+         update_interval(getInterval, low, high);
+         return;
+      }
+      case Iex_GetI: {
+         IRRegArray *descr = e->Iex.GetI.descr;
+         Int size = sizeofIRType(descr->elemTy);
+         Int low  = descr->base;
+         Int high = low + descr->nElems * size - 1;
+         update_interval(getInterval, low, high);
+         setHints_Expr(doesLoad, getInterval, e->Iex.GetI.ix);
+         return;
+      }
+      case Iex_RdTmp:
+      case Iex_Const:
+         return;
+      default: 
+         vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+         vpanic("setHints_Expr");
+   }
+}
+
+
+/* Add a binding to the front of the env and slide all the rest
+   backwards.  It should be the case that the last slot is free. */
+static void addToEnvFront ( ATmpInfo* env, IRTemp binder, IRExpr* bindee )
+{
+   Int i;
+   vassert(env[A_NENV-1].bindee == NULL);
+   for (i = A_NENV-1; i >= 1; i--)
+      env[i] = env[i-1];
+   env[0].binder   = binder;
+   env[0].bindee   = bindee;
+   env[0].doesLoad = False; /* filled in later */
+   env[0].getInterval.present  = False; /* filled in later */
+   env[0].getInterval.low  = -1; /* filled in later */
+   env[0].getInterval.high = -1; /* filled in later */
+}
+
+/* Given uses :: array of UShort, indexed by IRTemp
+   Add the use-occurrences of temps in this expression 
+   to the env. 
+*/
+static void aoccCount_Expr ( UShort* uses, IRExpr* e )
+{
+   Int i;
+
+   switch (e->tag) {
+
+      case Iex_RdTmp: /* the only interesting case */
+         uses[e->Iex.RdTmp.tmp]++;
+         return;
+
+      case Iex_ITE:
+         aoccCount_Expr(uses, e->Iex.ITE.cond);
+         aoccCount_Expr(uses, e->Iex.ITE.iftrue);
+         aoccCount_Expr(uses, e->Iex.ITE.iffalse);
+         return;
+
+      case Iex_Qop: 
+         aoccCount_Expr(uses, e->Iex.Qop.details->arg1);
+         aoccCount_Expr(uses, e->Iex.Qop.details->arg2);
+         aoccCount_Expr(uses, e->Iex.Qop.details->arg3);
+         aoccCount_Expr(uses, e->Iex.Qop.details->arg4);
+         return;
+
+      case Iex_Triop: 
+         aoccCount_Expr(uses, e->Iex.Triop.details->arg1);
+         aoccCount_Expr(uses, e->Iex.Triop.details->arg2);
+         aoccCount_Expr(uses, e->Iex.Triop.details->arg3);
+         return;
+
+      case Iex_Binop: 
+         aoccCount_Expr(uses, e->Iex.Binop.arg1);
+         aoccCount_Expr(uses, e->Iex.Binop.arg2);
+         return;
+
+      case Iex_Unop: 
+         aoccCount_Expr(uses, e->Iex.Unop.arg);
+         return;
+
+      case Iex_Load:
+         aoccCount_Expr(uses, e->Iex.Load.addr);
+         return;
+
+      case Iex_CCall:
+         for (i = 0; e->Iex.CCall.args[i]; i++)
+            aoccCount_Expr(uses, e->Iex.CCall.args[i]);
+         return;
+
+      case Iex_GetI:
+         aoccCount_Expr(uses, e->Iex.GetI.ix);
+         return;
+
+      case Iex_Const:
+      case Iex_Get:
+         return;
+
+      default: 
+         vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+         vpanic("aoccCount_Expr");
+    }
+}
+
+
+/* Given uses :: array of UShort, indexed by IRTemp
+   Add the use-occurrences of temps in this statement 
+   to the env. 
+*/
+static void aoccCount_Stmt ( UShort* uses, IRStmt* st )
+{
+   Int      i;
+   IRDirty* d;
+   IRCAS*   cas;
+   switch (st->tag) {
+      case Ist_AbiHint:
+         aoccCount_Expr(uses, st->Ist.AbiHint.base);
+         aoccCount_Expr(uses, st->Ist.AbiHint.nia);
+         return;
+      case Ist_WrTmp: 
+         aoccCount_Expr(uses, st->Ist.WrTmp.data); 
+         return; 
+      case Ist_Put: 
+         aoccCount_Expr(uses, st->Ist.Put.data);
+         return;
+      case Ist_PutI:
+         aoccCount_Expr(uses, st->Ist.PutI.details->ix);
+         aoccCount_Expr(uses, st->Ist.PutI.details->data);
+         return;
+      case Ist_Store:
+         aoccCount_Expr(uses, st->Ist.Store.addr);
+         aoccCount_Expr(uses, st->Ist.Store.data);
+         return;
+      case Ist_StoreG: {
+         IRStoreG* sg = st->Ist.StoreG.details;
+         aoccCount_Expr(uses, sg->addr);
+         aoccCount_Expr(uses, sg->data);
+         aoccCount_Expr(uses, sg->guard);
+         return;
+      }
+      case Ist_LoadG: {
+         IRLoadG* lg = st->Ist.LoadG.details;
+         aoccCount_Expr(uses, lg->addr);
+         aoccCount_Expr(uses, lg->alt);
+         aoccCount_Expr(uses, lg->guard);
+         return;
+      }
+      case Ist_CAS:
+         cas = st->Ist.CAS.details;
+         aoccCount_Expr(uses, cas->addr);
+         if (cas->expdHi)
+            aoccCount_Expr(uses, cas->expdHi);
+         aoccCount_Expr(uses, cas->expdLo);
+         if (cas->dataHi)
+            aoccCount_Expr(uses, cas->dataHi);
+         aoccCount_Expr(uses, cas->dataLo);
+         return;
+      case Ist_LLSC:
+         aoccCount_Expr(uses, st->Ist.LLSC.addr);
+         if (st->Ist.LLSC.storedata)
+            aoccCount_Expr(uses, st->Ist.LLSC.storedata);
+         return;
+      case Ist_Dirty:
+         d = st->Ist.Dirty.details;
+         if (d->mFx != Ifx_None)
+            aoccCount_Expr(uses, d->mAddr);
+         aoccCount_Expr(uses, d->guard);
+         for (i = 0; d->args[i]; i++) {
+            IRExpr* arg = d->args[i];
+            if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+               aoccCount_Expr(uses, arg);
+         }
+         return;
+      case Ist_NoOp:
+      case Ist_IMark:
+      case Ist_MBE:
+         return;
+      case Ist_Exit:
+         aoccCount_Expr(uses, st->Ist.Exit.guard);
+         return;
+      default: 
+         vex_printf("\n"); ppIRStmt(st); vex_printf("\n");
+         vpanic("aoccCount_Stmt");
+   }
+}
+
+/* Look up a binding for tmp in the env.  If found, return the bound
+   expression, and set the env's binding to NULL so it is marked as
+   used.  If not found, return NULL. */
+
+static IRExpr* atbSubst_Temp ( ATmpInfo* env, IRTemp tmp )
+{
+   Int i;
+   for (i = 0; i < A_NENV; i++) {
+      if (env[i].binder == tmp && env[i].bindee != NULL) {
+         IRExpr* bindee = env[i].bindee;
+         env[i].bindee = NULL;
+         return bindee;
+      }
+   }
+   return NULL;
+}
+
+/* Traverse e, looking for temps.  For each observed temp, see if env
+   contains a binding for the temp, and if so return the bound value.
+   The env has the property that any binding it holds is
+   'single-shot', so once a binding is used, it is marked as no longer
+   available, by setting its .bindee field to NULL. */
+
+static inline Bool is_Unop ( IRExpr* e, IROp op ) {
+   return e->tag == Iex_Unop && e->Iex.Unop.op == op;
+}
+static inline Bool is_Binop ( IRExpr* e, IROp op ) {
+   return e->tag == Iex_Binop && e->Iex.Binop.op == op;
+}
+
+static IRExpr* fold_IRExpr_Binop ( IROp op, IRExpr* a1, IRExpr* a2 )
+{
+   switch (op) {
+   case Iop_Or32:
+      /* Or32( CmpwNEZ32(x), CmpwNEZ32(y) ) --> CmpwNEZ32( Or32( x, y ) )  */
+      if (is_Unop(a1, Iop_CmpwNEZ32) && is_Unop(a2, Iop_CmpwNEZ32))
+         return IRExpr_Unop( Iop_CmpwNEZ32,
+                             IRExpr_Binop( Iop_Or32, a1->Iex.Unop.arg, 
+                                                     a2->Iex.Unop.arg ) );
+      break;
+
+   case Iop_CmpNE32:
+      /* Since X has type Ity_I1 we can simplify:
+         CmpNE32(1Uto32(X),0)) ==> X */
+      if (is_Unop(a1, Iop_1Uto32) && isZeroU32(a2))
+         return a1->Iex.Unop.arg;
+      break;
+
+   default:
+      break;
+   }
+   /* no reduction rule applies */
+   return IRExpr_Binop( op, a1, a2 );
+}
+
+static IRExpr* fold_IRExpr_Unop ( IROp op, IRExpr* aa )
+{
+   switch (op) {
+   case Iop_CmpwNEZ64:
+      /* CmpwNEZ64( CmpwNEZ64 ( x ) ) --> CmpwNEZ64 ( x ) */
+      if (is_Unop(aa, Iop_CmpwNEZ64))
+         return IRExpr_Unop( Iop_CmpwNEZ64, aa->Iex.Unop.arg );
+      /* CmpwNEZ64( Or64 ( CmpwNEZ64(x), y ) ) --> CmpwNEZ64( Or64( x, y ) ) */
+      if (is_Binop(aa, Iop_Or64) 
+          && is_Unop(aa->Iex.Binop.arg1, Iop_CmpwNEZ64))
+         return fold_IRExpr_Unop(
+                   Iop_CmpwNEZ64,
+                   IRExpr_Binop(Iop_Or64, 
+                                aa->Iex.Binop.arg1->Iex.Unop.arg, 
+                                aa->Iex.Binop.arg2));
+      /* CmpwNEZ64( Or64 ( x, CmpwNEZ64(y) ) ) --> CmpwNEZ64( Or64( x, y ) ) */
+      if (is_Binop(aa, Iop_Or64)
+          && is_Unop(aa->Iex.Binop.arg2, Iop_CmpwNEZ64))
+         return fold_IRExpr_Unop(
+                   Iop_CmpwNEZ64,
+                   IRExpr_Binop(Iop_Or64, 
+                                aa->Iex.Binop.arg1, 
+                                aa->Iex.Binop.arg2->Iex.Unop.arg));
+      break;
+   case Iop_CmpNEZ64:
+      /* CmpNEZ64( Left64(x) ) --> CmpNEZ64(x) */
+      if (is_Unop(aa, Iop_Left64)) 
+         return IRExpr_Unop(Iop_CmpNEZ64, aa->Iex.Unop.arg);
+      /* CmpNEZ64( 1Uto64(X) ) --> X */
+      if (is_Unop(aa, Iop_1Uto64))
+         return aa->Iex.Unop.arg;
+      break;
+   case Iop_CmpwNEZ32:
+      /* CmpwNEZ32( CmpwNEZ32 ( x ) ) --> CmpwNEZ32 ( x ) */
+      if (is_Unop(aa, Iop_CmpwNEZ32))
+         return IRExpr_Unop( Iop_CmpwNEZ32, aa->Iex.Unop.arg );
+      break;
+   case Iop_CmpNEZ32:
+      /* CmpNEZ32( Left32(x) ) --> CmpNEZ32(x) */
+      if (is_Unop(aa, Iop_Left32)) 
+         return IRExpr_Unop(Iop_CmpNEZ32, aa->Iex.Unop.arg);
+      /* CmpNEZ32( 1Uto32(X) ) --> X */
+      if (is_Unop(aa, Iop_1Uto32))
+         return aa->Iex.Unop.arg;
+      /* CmpNEZ32( 64to32( CmpwNEZ64(X) ) ) --> CmpNEZ64(X) */
+      if (is_Unop(aa, Iop_64to32) && is_Unop(aa->Iex.Unop.arg, Iop_CmpwNEZ64))
+         return IRExpr_Unop(Iop_CmpNEZ64, aa->Iex.Unop.arg->Iex.Unop.arg);
+      break;
+   case Iop_CmpNEZ8:
+      /* CmpNEZ8( 1Uto8(X) ) --> X */
+      if (is_Unop(aa, Iop_1Uto8))
+         return aa->Iex.Unop.arg;
+      break;
+   case Iop_Left32:
+      /* Left32( Left32(x) ) --> Left32(x) */
+      if (is_Unop(aa, Iop_Left32))
+         return IRExpr_Unop( Iop_Left32, aa->Iex.Unop.arg );
+      break;
+   case Iop_Left64:
+      /* Left64( Left64(x) ) --> Left64(x) */
+      if (is_Unop(aa, Iop_Left64))
+         return IRExpr_Unop( Iop_Left64, aa->Iex.Unop.arg );
+      break;
+   case Iop_ZeroHI64ofV128:
+      /* ZeroHI64ofV128( ZeroHI64ofV128(x) ) --> ZeroHI64ofV128(x) */
+      if (is_Unop(aa, Iop_ZeroHI64ofV128))
+         return IRExpr_Unop( Iop_ZeroHI64ofV128, aa->Iex.Unop.arg );
+      break;
+   case Iop_32to1:
+      /* 32to1( 1Uto32 ( x ) ) --> x */
+      if (is_Unop(aa, Iop_1Uto32))
+         return aa->Iex.Unop.arg;
+      /* 32to1( CmpwNEZ32 ( x )) --> CmpNEZ32(x) */
+      if (is_Unop(aa, Iop_CmpwNEZ32))
+         return IRExpr_Unop( Iop_CmpNEZ32, aa->Iex.Unop.arg );
+      break;
+   case Iop_64to1:
+      /* 64to1( 1Uto64 ( x ) ) --> x */
+      if (is_Unop(aa, Iop_1Uto64))
+         return aa->Iex.Unop.arg;
+      /* 64to1( CmpwNEZ64 ( x )) --> CmpNEZ64(x) */
+      if (is_Unop(aa, Iop_CmpwNEZ64))
+         return IRExpr_Unop( Iop_CmpNEZ64, aa->Iex.Unop.arg );
+      break;
+   case Iop_64to32:
+      /* 64to32( 32Uto64 ( x )) --> x */
+      if (is_Unop(aa, Iop_32Uto64))
+         return aa->Iex.Unop.arg;
+      /* 64to32( 8Uto64 ( x )) --> 8Uto32(x) */
+      if (is_Unop(aa, Iop_8Uto64))
+         return IRExpr_Unop(Iop_8Uto32, aa->Iex.Unop.arg);
+      break;
+
+   case Iop_32Uto64:
+      /* 32Uto64( 8Uto32( x )) --> 8Uto64(x) */
+      if (is_Unop(aa, Iop_8Uto32))
+         return IRExpr_Unop(Iop_8Uto64, aa->Iex.Unop.arg);
+      /* 32Uto64( 16Uto32( x )) --> 16Uto64(x) */
+      if (is_Unop(aa, Iop_16Uto32))
+         return IRExpr_Unop(Iop_16Uto64, aa->Iex.Unop.arg);
+      /* 32Uto64(64to32( Shr64( 32Uto64(64to32(x)), sh ))
+                     --> Shr64( 32Uto64(64to32(x)), sh )) */
+      if (is_Unop(aa, Iop_64to32)
+          && is_Binop(aa->Iex.Unop.arg, Iop_Shr64)
+          && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1, Iop_32Uto64)
+          && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1->Iex.Unop.arg,
+                     Iop_64to32)) {
+         return aa->Iex.Unop.arg;
+      }
+      /*     32Uto64(64to32( Shl64( 32Uto64(64to32(x)), sh ))
+         --> 32Uto64(64to32( Shl64(                x,   sh )) */
+      if (is_Unop(aa, Iop_64to32)
+          && is_Binop(aa->Iex.Unop.arg, Iop_Shl64)
+          && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1, Iop_32Uto64)
+          && is_Unop(aa->Iex.Unop.arg->Iex.Binop.arg1->Iex.Unop.arg,
+                     Iop_64to32)) {
+         return
+            IRExpr_Unop(
+               Iop_32Uto64,
+               IRExpr_Unop(
+                  Iop_64to32,
+                  IRExpr_Binop(
+                     Iop_Shl64, 
+                     aa->Iex.Unop.arg->Iex.Binop.arg1->Iex.Unop.arg->Iex.Unop.arg,
+                     aa->Iex.Unop.arg->Iex.Binop.arg2
+            )));
+      }
+      break;
+
+   case Iop_1Sto32:
+      /* 1Sto32( CmpNEZ8( 32to8( 1Uto32( CmpNEZ32( x ))))) -> CmpwNEZ32(x) */
+      if (is_Unop(aa, Iop_CmpNEZ8)
+          && is_Unop(aa->Iex.Unop.arg, Iop_32to8)
+          && is_Unop(aa->Iex.Unop.arg->Iex.Unop.arg, Iop_1Uto32)
+          && is_Unop(aa->Iex.Unop.arg->Iex.Unop.arg->Iex.Unop.arg,
+                     Iop_CmpNEZ32)) {
+         return IRExpr_Unop( Iop_CmpwNEZ32,
+                             aa->Iex.Unop.arg->Iex.Unop.arg
+                               ->Iex.Unop.arg->Iex.Unop.arg);
+      }
+      break;
+
+   default:
+      break;
+   }
+   /* no reduction rule applies */
+   return IRExpr_Unop( op, aa );
+}
+
+static IRExpr* atbSubst_Expr ( ATmpInfo* env, IRExpr* e )
+{
+   IRExpr*  e2;
+   IRExpr** args2;
+   Int      i;
+
+   switch (e->tag) {
+
+      case Iex_CCall:
+         args2 = shallowCopyIRExprVec(e->Iex.CCall.args);
+         for (i = 0; args2[i]; i++)
+            args2[i] = atbSubst_Expr(env,args2[i]);
+         return IRExpr_CCall(
+                   e->Iex.CCall.cee,
+                   e->Iex.CCall.retty,
+                   args2
+                );
+      case Iex_RdTmp:
+         e2 = atbSubst_Temp(env, e->Iex.RdTmp.tmp);
+         return e2 ? e2 : e;
+      case Iex_ITE:
+         return IRExpr_ITE(
+                   atbSubst_Expr(env, e->Iex.ITE.cond),
+                   atbSubst_Expr(env, e->Iex.ITE.iftrue),
+                   atbSubst_Expr(env, e->Iex.ITE.iffalse)
+                );
+      case Iex_Qop:
+         return IRExpr_Qop(
+                   e->Iex.Qop.details->op,
+                   atbSubst_Expr(env, e->Iex.Qop.details->arg1),
+                   atbSubst_Expr(env, e->Iex.Qop.details->arg2),
+                   atbSubst_Expr(env, e->Iex.Qop.details->arg3),
+                   atbSubst_Expr(env, e->Iex.Qop.details->arg4)
+                );
+      case Iex_Triop:
+         return IRExpr_Triop(
+                   e->Iex.Triop.details->op,
+                   atbSubst_Expr(env, e->Iex.Triop.details->arg1),
+                   atbSubst_Expr(env, e->Iex.Triop.details->arg2),
+                   atbSubst_Expr(env, e->Iex.Triop.details->arg3)
+                );
+      case Iex_Binop:
+         return fold_IRExpr_Binop(
+                   e->Iex.Binop.op,
+                   atbSubst_Expr(env, e->Iex.Binop.arg1),
+                   atbSubst_Expr(env, e->Iex.Binop.arg2)
+                );
+      case Iex_Unop:
+         return fold_IRExpr_Unop(
+                   e->Iex.Unop.op,
+                   atbSubst_Expr(env, e->Iex.Unop.arg)
+                );
+      case Iex_Load:
+         return IRExpr_Load(
+                   e->Iex.Load.end,
+                   e->Iex.Load.ty,
+                   atbSubst_Expr(env, e->Iex.Load.addr)
+                );
+      case Iex_GetI:
+         return IRExpr_GetI(
+                   e->Iex.GetI.descr,
+                   atbSubst_Expr(env, e->Iex.GetI.ix),
+                   e->Iex.GetI.bias
+                );
+      case Iex_Const:
+      case Iex_Get:
+         return e;
+      default: 
+         vex_printf("\n"); ppIRExpr(e); vex_printf("\n");
+         vpanic("atbSubst_Expr");
+   }
+}
+
+/* Same deal as atbSubst_Expr, except for stmts. */
+
+static IRStmt* atbSubst_Stmt ( ATmpInfo* env, IRStmt* st )
+{
+   Int     i;
+   IRDirty *d, *d2;
+   IRCAS   *cas, *cas2;
+   IRPutI  *puti, *puti2;
+
+   switch (st->tag) {
+      case Ist_AbiHint:
+         return IRStmt_AbiHint(
+                   atbSubst_Expr(env, st->Ist.AbiHint.base),
+                   st->Ist.AbiHint.len,
+                   atbSubst_Expr(env, st->Ist.AbiHint.nia)
+                );
+      case Ist_Store:
+         return IRStmt_Store(
+                   st->Ist.Store.end,
+                   atbSubst_Expr(env, st->Ist.Store.addr),
+                   atbSubst_Expr(env, st->Ist.Store.data)
+                );
+      case Ist_StoreG: {
+         IRStoreG* sg = st->Ist.StoreG.details;
+         return IRStmt_StoreG(sg->end,
+                              atbSubst_Expr(env, sg->addr),
+                              atbSubst_Expr(env, sg->data),
+                              atbSubst_Expr(env, sg->guard));
+      }
+      case Ist_LoadG: {
+         IRLoadG* lg = st->Ist.LoadG.details;
+         return IRStmt_LoadG(lg->end, lg->cvt, lg->dst,
+                             atbSubst_Expr(env, lg->addr),
+                             atbSubst_Expr(env, lg->alt),
+                             atbSubst_Expr(env, lg->guard));
+      }
+      case Ist_WrTmp:
+         return IRStmt_WrTmp(
+                   st->Ist.WrTmp.tmp,
+                   atbSubst_Expr(env, st->Ist.WrTmp.data)
+                );
+      case Ist_Put:
+         return IRStmt_Put(
+                   st->Ist.Put.offset,
+                   atbSubst_Expr(env, st->Ist.Put.data)
+                );
+      case Ist_PutI:
+         puti  = st->Ist.PutI.details;
+         puti2 = mkIRPutI(puti->descr, 
+                          atbSubst_Expr(env, puti->ix),
+                          puti->bias,
+                          atbSubst_Expr(env, puti->data));
+         return IRStmt_PutI(puti2);
+
+      case Ist_Exit:
+         return IRStmt_Exit(
+                   atbSubst_Expr(env, st->Ist.Exit.guard),
+                   st->Ist.Exit.jk,
+                   st->Ist.Exit.dst,
+                   st->Ist.Exit.offsIP
+                );
+      case Ist_IMark:
+         return IRStmt_IMark(st->Ist.IMark.addr,
+                             st->Ist.IMark.len,
+                             st->Ist.IMark.delta);
+      case Ist_NoOp:
+         return IRStmt_NoOp();
+      case Ist_MBE:
+         return IRStmt_MBE(st->Ist.MBE.event);
+      case Ist_CAS:
+         cas  = st->Ist.CAS.details;
+         cas2 = mkIRCAS(
+                   cas->oldHi, cas->oldLo, cas->end, 
+                   atbSubst_Expr(env, cas->addr),
+                   cas->expdHi ? atbSubst_Expr(env, cas->expdHi) : NULL,
+                   atbSubst_Expr(env, cas->expdLo),
+                   cas->dataHi ? atbSubst_Expr(env, cas->dataHi) : NULL,
+                   atbSubst_Expr(env, cas->dataLo)
+                );
+         return IRStmt_CAS(cas2);
+      case Ist_LLSC:
+         return IRStmt_LLSC(
+                   st->Ist.LLSC.end,
+                   st->Ist.LLSC.result,
+                   atbSubst_Expr(env, st->Ist.LLSC.addr),
+                   st->Ist.LLSC.storedata
+                      ? atbSubst_Expr(env, st->Ist.LLSC.storedata) : NULL
+                );
+      case Ist_Dirty:
+         d  = st->Ist.Dirty.details;
+         d2 = emptyIRDirty();
+         *d2 = *d;
+         if (d2->mFx != Ifx_None)
+            d2->mAddr = atbSubst_Expr(env, d2->mAddr);
+         d2->guard = atbSubst_Expr(env, d2->guard);
+         for (i = 0; d2->args[i]; i++) {
+            IRExpr* arg = d2->args[i];
+            if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+               d2->args[i] = atbSubst_Expr(env, arg);
+         }
+         return IRStmt_Dirty(d2);
+      default: 
+         vex_printf("\n"); ppIRStmt(st); vex_printf("\n");
+         vpanic("atbSubst_Stmt");
+   }
+}
+
+inline
+static Bool dirty_helper_stores ( const IRDirty *d )
+{
+   return d->mFx == Ifx_Write || d->mFx == Ifx_Modify;
+}
+
+inline
+static Interval dirty_helper_puts (
+                   const IRDirty *d,
+                   Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+                   VexRegisterUpdates pxControl,
+                   /*OUT*/Bool *requiresPreciseMemExns
+                )
+{
+   Int i;
+   Interval interval;
+
+   /* Passing the guest state pointer opens the door to modifying the
+      guest state under the covers.  It's not allowed, but let's be
+      extra conservative and assume the worst. */
+   for (i = 0; d->args[i]; i++) {
+      if (UNLIKELY(d->args[i]->tag == Iex_BBPTR)) {
+         *requiresPreciseMemExns = True;
+         /* Assume all guest state is written. */
+         interval.present = True;
+         interval.low  = 0;
+         interval.high = 0x7FFFFFFF;
+         return interval;
+      }
+   }
+
+   /* Check the side effects on the guest state */
+   interval.present = False;
+   interval.low = interval.high = -1;
+   *requiresPreciseMemExns = False;
+
+   for (i = 0; i < d->nFxState; ++i) {
+      if (d->fxState[i].fx != Ifx_Read) {
+         Int offset = d->fxState[i].offset;
+         Int size = d->fxState[i].size;
+         Int nRepeats = d->fxState[i].nRepeats;
+         Int repeatLen = d->fxState[i].repeatLen;
+
+         if (preciseMemExnsFn(offset,
+                              offset + nRepeats * repeatLen + size - 1,
+                              pxControl)) {
+            *requiresPreciseMemExns = True;
+         }
+         update_interval(&interval, offset,
+                         offset + nRepeats * repeatLen + size - 1);
+      }
+   }
+
+   return interval;
+}
+
+/* Return an interval if st modifies the guest state.  Via
+   requiresPreciseMemExns return whether or not that modification
+   requires precise exceptions. */
+static Interval stmt_modifies_guest_state ( 
+                   IRSB *bb, const IRStmt *st,
+                   Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+                   VexRegisterUpdates pxControl,
+                   /*OUT*/Bool *requiresPreciseMemExns
+                )
+{
+   Interval interval;
+
+   switch (st->tag) {
+   case Ist_Put: {
+      Int offset = st->Ist.Put.offset;
+      Int size = sizeofIRType(typeOfIRExpr(bb->tyenv, st->Ist.Put.data));
+
+      *requiresPreciseMemExns
+         = preciseMemExnsFn(offset, offset + size - 1, pxControl);
+      interval.present = True;
+      interval.low  = offset;
+      interval.high = offset + size - 1;
+      return interval;
+   }
+
+   case Ist_PutI: {
+      IRRegArray *descr = st->Ist.PutI.details->descr;
+      Int offset = descr->base;
+      Int size = sizeofIRType(descr->elemTy);
+
+      /* We quietly assume here that all segments are contiguous and there
+         are no holes. This is to avoid a loop. The assumption is conservative
+         in the sense that we might report that precise memory exceptions are
+         needed when in fact they are not. */
+      *requiresPreciseMemExns
+         = preciseMemExnsFn(offset, offset + descr->nElems * size - 1,
+                            pxControl);
+      interval.present = True;
+      interval.low  = offset;
+      interval.high = offset + descr->nElems * size - 1;
+      return interval;
+   }
+
+   case Ist_Dirty:
+      return dirty_helper_puts(st->Ist.Dirty.details,
+                               preciseMemExnsFn, pxControl,
+                               requiresPreciseMemExns);
+
+   default:
+      *requiresPreciseMemExns = False;
+      interval.present = False;
+      interval.low  = -1;
+      interval.high = -1;
+      return interval;
+   }
+}
+
+/* notstatic */ Addr ado_treebuild_BB (
+                        IRSB* bb,
+                        Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+                        VexRegisterUpdates pxControl
+                     )
+{
+   Int      i, j, k, m;
+   Bool     stmtStores, invalidateMe;
+   Interval putInterval;
+   IRStmt*  st;
+   IRStmt*  st2;
+   ATmpInfo env[A_NENV];
+
+   Bool   max_ga_known = False;
+   Addr   max_ga       = 0;
+
+   Int       n_tmps = bb->tyenv->types_used;
+   UShort*   uses   = LibVEX_Alloc_inline(n_tmps * sizeof(UShort));
+
+   /* Phase 1.  Scan forwards in bb, counting use occurrences of each
+      temp.  Also count occurrences in the bb->next field.  Take the
+      opportunity to also find the maximum guest address in the block,
+      since that will be needed later for deciding when we can safely
+      elide event checks. */
+
+   for (i = 0; i < n_tmps; i++)
+      uses[i] = 0;
+
+   for (i = 0; i < bb->stmts_used; i++) {
+      st = bb->stmts[i];
+      switch (st->tag) {
+         case Ist_NoOp:
+            continue;
+         case Ist_IMark: {
+            UInt len = st->Ist.IMark.len;
+            Addr mga = st->Ist.IMark.addr + (len < 1 ? 1 : len) - 1;
+            max_ga_known = True;
+            if (mga > max_ga)
+               max_ga = mga;
+            break;
+         }
+         default:
+            break;
+      }
+      aoccCount_Stmt( uses, st );
+   }
+   aoccCount_Expr(uses, bb->next );
+
+#  if 0
+   for (i = 0; i < n_tmps; i++) {
+      if (uses[i] == 0)
+        continue;
+      ppIRTemp( (IRTemp)i );
+      vex_printf("  used %d\n", (Int)uses[i] );
+   }
+#  endif
+
+   /* Phase 2.  Scan forwards in bb.  For each statement in turn:
+
+         If the env is full, emit the end element.  This guarantees
+         there is at least one free slot in the following.
+
+         On seeing 't = E', occ(t)==1,  
+            let E'=env(E)
+            delete this stmt
+            add t -> E' to the front of the env
+            Examine E' and set the hints for E' appropriately
+              (doesLoad? doesGet?)
+
+         On seeing any other stmt, 
+            let stmt' = env(stmt)
+            remove from env any 't=E' binds invalidated by stmt
+                emit the invalidated stmts
+            emit stmt'
+            compact any holes in env 
+              by sliding entries towards the front
+
+      Finally, apply env to bb->next.  
+   */
+
+   for (i = 0; i < A_NENV; i++) {
+      env[i].bindee = NULL;
+      env[i].binder = IRTemp_INVALID;
+   }
+
+   /* The stmts in bb are being reordered, and we are guaranteed to
+      end up with no more than the number we started with.  Use i to
+      be the cursor of the current stmt examined and j <= i to be that
+      for the current stmt being written. 
+   */
+   j = 0;
+   for (i = 0; i < bb->stmts_used; i++) {
+
+      st = bb->stmts[i];
+      if (st->tag == Ist_NoOp)
+         continue;
+     
+      /* Ensure there's at least one space in the env, by emitting
+         the oldest binding if necessary. */
+      if (env[A_NENV-1].bindee != NULL) {
+         bb->stmts[j] = IRStmt_WrTmp( env[A_NENV-1].binder, 
+                                      env[A_NENV-1].bindee );
+         j++;
+         vassert(j <= i);
+         env[A_NENV-1].bindee = NULL;
+      }
+
+      /* Consider current stmt. */
+      if (st->tag == Ist_WrTmp && uses[st->Ist.WrTmp.tmp] <= 1) {
+         IRExpr *e, *e2;
+
+         /* optional extra: dump dead bindings as we find them.
+            Removes the need for a prior dead-code removal pass. */
+         if (uses[st->Ist.WrTmp.tmp] == 0) {
+	    if (0) vex_printf("DEAD binding\n");
+            continue; /* for (i = 0; i < bb->stmts_used; i++) loop */
+         }
+         vassert(uses[st->Ist.WrTmp.tmp] == 1);
+
+         /* ok, we have 't = E', occ(t)==1.  Do the abovementioned
+            actions. */
+         e  = st->Ist.WrTmp.data;
+         e2 = atbSubst_Expr(env, e);
+         addToEnvFront(env, st->Ist.WrTmp.tmp, e2);
+         setHints_Expr(&env[0].doesLoad, &env[0].getInterval, e2);
+         /* don't advance j, as we are deleting this stmt and instead
+            holding it temporarily in the env. */
+         continue; /* for (i = 0; i < bb->stmts_used; i++) loop */
+      }
+
+      /* we get here for any other kind of statement. */
+      /* 'use up' any bindings required by the current statement. */
+      st2 = atbSubst_Stmt(env, st);
+
+      /* Now, before this stmt, dump any bindings in env that it
+         invalidates.  These need to be dumped in the order in which
+         they originally entered env -- that means from oldest to
+         youngest. */
+
+      /* putInterval/stmtStores characterise what the stmt under
+         consideration does, or might do (sidely safe @ True). */
+
+      Bool putRequiresPreciseMemExns;
+      putInterval = stmt_modifies_guest_state(
+                       bb, st, preciseMemExnsFn, pxControl,
+                       &putRequiresPreciseMemExns
+                    );
+
+      /* be True if this stmt writes memory or might do (==> we don't
+         want to reorder other loads or stores relative to it).  Also,
+         both LL and SC fall under this classification, since we
+         really ought to be conservative and not reorder any other
+         memory transactions relative to them. */
+      stmtStores
+         = toBool( st->tag == Ist_Store
+                   || (st->tag == Ist_Dirty
+                       && dirty_helper_stores(st->Ist.Dirty.details))
+                   || st->tag == Ist_LLSC
+                   || st->tag == Ist_CAS );
+
+      for (k = A_NENV-1; k >= 0; k--) {
+         if (env[k].bindee == NULL)
+            continue;
+         /* Compare the actions of this stmt with the actions of
+            binding 'k', to see if they invalidate the binding. */
+         invalidateMe
+            = toBool(
+              /* a store invalidates loaded data */
+              (env[k].doesLoad && stmtStores)
+              /* a put invalidates get'd data, if they overlap */
+              || ((env[k].getInterval.present && putInterval.present) &&
+                  intervals_overlap(env[k].getInterval, putInterval))
+              /* a put invalidates loaded data. That means, in essense, that
+                 a load expression cannot be substituted into a statement
+                 that follows the put. But there is nothing wrong doing so
+                 except when the put statement requries precise exceptions.
+                 Think of a load that is moved past a put where the put
+                 updates the IP in the guest state. If the load generates
+                 a segfault, the wrong address (line number) would be
+                 reported. */
+              || (env[k].doesLoad && putInterval.present &&
+                  putRequiresPreciseMemExns)
+              /* probably overly conservative: a memory bus event
+                 invalidates absolutely everything, so that all
+                 computation prior to it is forced to complete before
+                 proceeding with the event (fence,lock,unlock). */
+              || st->tag == Ist_MBE
+              /* also be (probably overly) paranoid re AbiHints */
+              || st->tag == Ist_AbiHint
+              );
+         if (invalidateMe) {
+            bb->stmts[j] = IRStmt_WrTmp( env[k].binder, env[k].bindee );
+            j++;
+            vassert(j <= i);
+            env[k].bindee = NULL;
+         }
+      }
+
+      /* Slide in-use entries in env up to the front */
+      m = 0;
+      for (k = 0; k < A_NENV; k++) {
+         if (env[k].bindee != NULL) {
+            env[m] = env[k];
+            m++;
+	 }
+      }
+      for (m = m; m < A_NENV; m++) {
+         env[m].bindee = NULL;
+      }
+
+      /* finally, emit the substituted statement */
+      bb->stmts[j] = st2;
+      /* vex_printf("**2  "); ppIRStmt(bb->stmts[j]); vex_printf("\n"); */
+      j++;
+
+      vassert(j <= i+1);
+   } /* for each stmt in the original bb ... */
+
+   /* Finally ... substitute the ->next field as much as possible, and
+      dump any left-over bindings.  Hmm.  Perhaps there should be no
+      left over bindings?  Or any left-over bindings are
+      by definition dead? */
+   bb->next = atbSubst_Expr(env, bb->next);
+   bb->stmts_used = j;
+
+   return max_ga_known ? max_ga : ~(Addr)0;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- MSVC specific transformation hacks                      ---*/
+/*---------------------------------------------------------------*/
+
+/* The purpose of all this is to find MSVC's idiom for non-constant
+   bitfield assignment, "a ^ ((a ^ b) & c)", and transform it into
+   gcc's idiom "(a & ~c) | (b & c)".  Motivation is that Memcheck has
+   generates a lot of false positives from the MSVC version because it
+   doesn't understand that XORing an undefined bit with itself gives a
+   defined result.
+
+   This isn't a problem for the simple case "x ^ x", because iropt
+   folds it to a constant zero before Memcheck ever sees it.  But in
+   this case we have an intervening "& c" which defeats the simple
+   case.  So we have to carefully inspect all expressions rooted at an
+   XOR to see if any of them match "a ^ ((a ^ b) & c)", or any of the
+   7 other variants resulting from swapping the order of arguments to
+   the three binary operations.  If we get a match, we then replace
+   the tree with "(a & ~c) | (b & c)", and Memcheck is happy.
+
+   The key difficulty is to spot the two uses of "a".  To normalise
+   the IR to maximise the chances of success, we first do a CSE pass,
+   with CSEing of loads enabled, since the two "a" expressions may be
+   loads, which need to be commoned up.  Then we do a constant folding
+   pass, so as to remove any tmp-to-tmp assignment chains that would
+   make matching the original expression more difficult.
+*/
+
+
+/* Helper function for debug printing */
+__attribute__((unused))
+static void print_flat_expr ( IRExpr** env, IRExpr* e )
+{
+   if (e == NULL) {
+      vex_printf("?");
+      return;
+   }
+   switch (e->tag) {
+      case Iex_Binop: {
+         ppIROp(e->Iex.Binop.op);
+         vex_printf("(");
+         print_flat_expr(env, e->Iex.Binop.arg1);
+         vex_printf(",");
+         print_flat_expr(env, e->Iex.Binop.arg2);
+         vex_printf(")");
+         break;
+      }
+      case Iex_Unop: {
+         ppIROp(e->Iex.Unop.op);
+         vex_printf("(");
+         print_flat_expr(env, e->Iex.Unop.arg);
+         vex_printf(")");
+         break;
+      }
+      case Iex_RdTmp:
+         ppIRTemp(e->Iex.RdTmp.tmp);
+         vex_printf("=");
+         print_flat_expr(env, chase(env, e));
+         break;
+      case Iex_Const:
+      case Iex_CCall:
+      case Iex_Load:
+      case Iex_ITE:
+      case Iex_Get:
+         ppIRExpr(e);
+         break;
+      default:
+         vex_printf("FAIL: "); ppIRExpr(e); vex_printf("\n");
+         vassert(0);
+   }
+}
+
+/* Spot   a ^ ((a ^ b) & c)   for a,b and c tmp-or-const (atoms)
+   or any of the other 7 variants generated by switching the order
+   of arguments to the outer ^, the inner ^ and the &.
+*/
+static UInt spotBitfieldAssignment ( /*OUT*/IRExpr** aa, /*OUT*/IRExpr** bb,
+                                     /*OUT*/IRExpr** cc,
+                                     IRExpr** env, IRExpr* e,
+                                     IROp opAND, IROp opXOR)
+{
+#  define ISBIN(_e,_op) ((_e) && (_e)->tag == Iex_Binop \
+                              && (_e)->Iex.Binop.op == (_op))
+#  define ISATOM(_e)    isIRAtom(_e)
+#  define STEP(_e)      chase1(env, (_e))
+#  define LL(_e)        ((_e)->Iex.Binop.arg1)
+#  define RR(_e)        ((_e)->Iex.Binop.arg2)
+
+   IRExpr *a1, *and, *xor, *c, *a2bL, *a2bR;
+
+   /* This is common to all 8 cases */
+   if (!ISBIN(e, opXOR)) goto fail;
+
+   /*                        -----and------ */
+   /*                         --xor---      */
+   /* find variant 1:   a1 ^ ((a2 ^ b) & c) */
+   /* find variant 2:   a1 ^ ((b ^ a2) & c) */
+   a1 = and = xor = c = a2bL = a2bR = NULL;
+
+   a1   = LL(e);
+   and  = STEP(RR(e));
+   if (!ISBIN(and, opAND)) goto v34;
+   xor  = STEP(LL(and));
+   c    = RR(and);
+   if (!ISBIN(xor, opXOR)) goto v34;
+   a2bL = LL(xor);
+   a2bR = RR(xor);
+
+   if (eqIRAtom(a1, a2bL) && !eqIRAtom(a1, a2bR)) {
+      *aa = a1;
+      *bb = a2bR;
+      *cc = c;
+      return 1;
+   }
+   if (eqIRAtom(a1, a2bR) && !eqIRAtom(a1, a2bL)) {
+      *aa = a1;
+      *bb = a2bL;
+      *cc = c;
+      return 2;
+   }
+
+  v34:
+   /*                   -----and------      */
+   /*                    --xor---           */
+   /* find variant 3:   ((a2 ^ b) & c) ^ a1 */
+   /* find variant 4:   ((b ^ a2) & c) ^ a1 */
+   a1 = and = xor = c = a2bL = a2bR = NULL;
+
+   a1   = RR(e);
+   and  = STEP(LL(e));
+   if (!ISBIN(and, opAND)) goto v56;
+   xor  = STEP(LL(and));
+   c    = RR(and);
+   if (!ISBIN(xor, opXOR)) goto v56;
+   a2bL = LL(xor);
+   a2bR = RR(xor);
+
+   if (eqIRAtom(a1, a2bL) && !eqIRAtom(a1, a2bR)) {
+      *aa = a1;
+      *bb = a2bR;
+      *cc = c;
+      return 3;
+   }
+   if (eqIRAtom(a1, a2bR) && !eqIRAtom(a1, a2bL)) {
+      *aa = a1;
+      *bb = a2bL;
+      *cc = c;
+      return 4;
+   }
+
+  v56:
+   /*                        -----and------ */
+   /*                         --xor---      */
+   /* find variant 5:   a1 ^ (c & (a2 ^ b)) */
+   /* find variant 6:   a1 ^ (c & (b ^ a2)) */
+   a1 = and = xor = c = a2bL = a2bR = NULL;
+
+   a1   = LL(e);
+   and  = STEP(RR(e));
+   if (!ISBIN(and, opAND)) goto v78;
+   xor  = STEP(RR(and));
+   c    = LL(and);
+   if (!ISBIN(xor, opXOR)) goto v78;
+   a2bL = LL(xor);
+   a2bR = RR(xor);
+
+   if (eqIRAtom(a1, a2bL) && !eqIRAtom(a1, a2bR)) {
+      *aa = a1;
+      *bb = a2bR;
+      *cc = c;
+      vassert(5-5); // ATC
+      return 5;
+   }
+   if (eqIRAtom(a1, a2bR) && !eqIRAtom(a1, a2bL)) {
+      *aa = a1;
+      *bb = a2bL;
+      *cc = c;
+      vassert(6-6); // ATC
+      return 6;
+   }
+
+ v78:
+   /*                   -----and------      */
+   /*                    --xor---           */
+   /* find variant 7:   (c & (a2 ^ b)) ^ a1 */
+   /* find variant 8:   (c & (b ^ a2)) ^ a1 */
+   a1 = and = xor = c = a2bL = a2bR = NULL;
+
+   a1   = RR(e);
+   and  = STEP(LL(e));
+   if (!ISBIN(and, opAND)) goto fail;
+   xor  = STEP(RR(and));
+   c    = LL(and);
+   if (!ISBIN(xor, opXOR)) goto fail;
+   a2bL = LL(xor);
+   a2bR = RR(xor);
+
+   if (eqIRAtom(a1, a2bL) && !eqIRAtom(a1, a2bR)) {
+      *aa = a1;
+      *bb = a2bR;
+      *cc = c;
+      return 7;
+   }
+   if (eqIRAtom(a1, a2bR) && !eqIRAtom(a1, a2bL)) {
+      *aa = a1;
+      *bb = a2bL;
+      *cc = c;
+      return 8;
+   }
+
+ fail:
+   return 0;
+
+#  undef ISBIN
+#  undef ISATOM
+#  undef STEP
+#  undef LL
+#  undef RR
+}
+
+/* If |e| is of the form a ^ ((a ^ b) & c) (or any of the 7 other
+   variants thereof generated by switching arguments around), return
+   the IRExpr* for (a & ~c) | (b & c).  Else return NULL. */
+static IRExpr* do_XOR_TRANSFORMS_IRExpr ( IRExpr** env, IRExpr* e )
+{
+   if (e->tag != Iex_Binop)
+      return NULL;
+
+   const HChar* tyNm = NULL;
+   IROp   opOR  = Iop_INVALID;
+   IROp   opAND = Iop_INVALID;
+   IROp   opNOT = Iop_INVALID;
+   IROp   opXOR = Iop_INVALID;
+   switch (e->Iex.Binop.op) {
+      case Iop_Xor32:
+         tyNm  = "I32";
+         opOR  = Iop_Or32;  opAND = Iop_And32;
+         opNOT = Iop_Not32; opXOR = Iop_Xor32;
+         break;
+      case Iop_Xor16:
+         tyNm  = "I16";
+         opOR  = Iop_Or16;  opAND = Iop_And16;
+         opNOT = Iop_Not16; opXOR = Iop_Xor16;
+         break;
+      case Iop_Xor8:
+         tyNm  = "I8";
+         opOR  = Iop_Or8;  opAND = Iop_And8;
+         opNOT = Iop_Not8; opXOR = Iop_Xor8;
+         break;
+      default:
+         return NULL;
+   }
+
+   IRExpr* aa = NULL;
+   IRExpr* bb = NULL;
+   IRExpr* cc = NULL;
+   UInt variant = spotBitfieldAssignment(&aa, &bb, &cc, env, e, opAND, opXOR);
+   if (variant > 0) {
+      static UInt ctr = 0;
+      if (0)
+         vex_printf("XXXXXXXXXX Bitfield Assignment number %u, "
+                    "type %s, variant %u\n",
+                    ++ctr, tyNm, variant);
+      /* It's vitally important that the returned aa, bb and cc are
+         atoms -- either constants or tmps.  If it's anything else
+         (eg, a GET) then incorporating them in a tree at this point
+         in the SB may erroneously pull them forwards (eg of a PUT
+         that originally was after the GET) and so transform the IR
+         wrongly.  spotBitfieldAssignment should guarantee only to
+         give us atoms, but we check here anyway. */
+      vassert(aa && isIRAtom(aa));
+      vassert(bb && isIRAtom(bb));
+      vassert(cc && isIRAtom(cc));
+      return IRExpr_Binop(
+                opOR,
+                IRExpr_Binop(opAND, aa, IRExpr_Unop(opNOT, cc)),
+                IRExpr_Binop(opAND, bb, cc)
+             );
+   }
+   return NULL;
+}
+
+
+/* SB is modified in-place.  Visit all the IRExprs and, for those
+   which are allowed to be non-atomic, perform the XOR transform if
+   possible.  This makes |sb| be non-flat, but that's ok, the caller
+   can re-flatten it.  Returns True iff any changes were made. */
+static Bool do_XOR_TRANSFORM_IRSB ( IRSB* sb )
+{
+   Int  i;
+   Bool changed = False;
+
+   /* Make the tmp->expr environment, so we can use it for
+      chasing expressions. */
+   Int      n_tmps = sb->tyenv->types_used;
+   IRExpr** env = LibVEX_Alloc_inline(n_tmps * sizeof(IRExpr*));
+   for (i = 0; i < n_tmps; i++)
+      env[i] = NULL;
+
+   for (i = 0; i < sb->stmts_used; i++) {
+      IRStmt* st = sb->stmts[i];
+      if (st->tag != Ist_WrTmp)
+         continue;
+      IRTemp t = st->Ist.WrTmp.tmp;
+      vassert(t >= 0 && t < n_tmps);
+      env[t] = st->Ist.WrTmp.data;
+   }
+
+   for (i = 0; i < sb->stmts_used; i++) {
+      IRStmt* st = sb->stmts[i];
+
+      switch (st->tag) {
+         case Ist_AbiHint:
+            vassert(isIRAtom(st->Ist.AbiHint.base));
+            vassert(isIRAtom(st->Ist.AbiHint.nia));
+            break;
+         case Ist_Put:
+            vassert(isIRAtom(st->Ist.Put.data));
+            break;
+         case Ist_PutI: {
+            IRPutI* puti = st->Ist.PutI.details;
+            vassert(isIRAtom(puti->ix));
+            vassert(isIRAtom(puti->data));
+            break;
+         }
+         case Ist_WrTmp: {
+            /* This is the one place where an expr (st->Ist.WrTmp.data) is
+               allowed to be more than just a constant or a tmp. */
+            IRExpr* mb_new_data
+               = do_XOR_TRANSFORMS_IRExpr(env, st->Ist.WrTmp.data);
+            if (mb_new_data) {
+               //ppIRSB(sb);
+               st->Ist.WrTmp.data = mb_new_data;
+               //ppIRSB(sb);
+               changed = True;
+            }
+            break;
+         }
+         case Ist_Store:
+            vassert(isIRAtom(st->Ist.Store.addr));
+            vassert(isIRAtom(st->Ist.Store.data));
+            break;
+         case Ist_StoreG: {
+            IRStoreG* sg = st->Ist.StoreG.details;
+            vassert(isIRAtom(sg->addr));
+            vassert(isIRAtom(sg->data));
+            vassert(isIRAtom(sg->guard));
+            break;
+         }
+         case Ist_LoadG: {
+            IRLoadG* lg = st->Ist.LoadG.details;
+            vassert(isIRAtom(lg->addr));
+            vassert(isIRAtom(lg->alt));
+            vassert(isIRAtom(lg->guard));
+            break;
+         }
+         case Ist_CAS: {
+            IRCAS* cas = st->Ist.CAS.details;
+            vassert(isIRAtom(cas->addr));
+            vassert(cas->expdHi == NULL || isIRAtom(cas->expdHi));
+            vassert(isIRAtom(cas->expdLo));
+            vassert(cas->dataHi == NULL || isIRAtom(cas->dataHi));
+            vassert(isIRAtom(cas->dataLo));
+            break;
+         }
+         case Ist_LLSC:
+            vassert(isIRAtom(st->Ist.LLSC.addr));
+            if (st->Ist.LLSC.storedata)
+               vassert(isIRAtom(st->Ist.LLSC.storedata));
+            break;
+         case Ist_Dirty: {
+            IRDirty* d = st->Ist.Dirty.details;
+            if (d->mFx != Ifx_None) {
+               vassert(isIRAtom(d->mAddr));
+            }
+            vassert(isIRAtom(d->guard));
+            for (Int j = 0; d->args[j]; j++) {
+               IRExpr* arg = d->args[j];
+               if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg))) {
+                  vassert(isIRAtom(arg));
+               }
+            }
+            break;
+         }
+         case Ist_IMark:
+         case Ist_NoOp:
+         case Ist_MBE:
+            break;
+         case Ist_Exit:
+            vassert(isIRAtom(st->Ist.Exit.guard));
+            break;
+         default:
+            vex_printf("\n"); ppIRStmt(st);
+            vpanic("do_XOR_TRANSFORMS_IRSB");
+      }
+   }
+
+   vassert(isIRAtom(sb->next));
+   return changed;
+}
+
+
+static IRSB* do_MSVC_HACKS ( IRSB* sb )
+{
+   // Normalise as much as we can.  This is the one-and-only place
+   // where we call do_cse_BB with allowLoadsToBeCSEd set to True.
+   Bool any_cse_changes = do_cse_BB( sb, True/*allowLoadsToBeCSEd*/ );
+   if (any_cse_changes) {
+      // CSEing might have created dead code.  Remove it.
+      sb = cprop_BB ( sb );
+      do_deadcode_BB(sb);
+   }
+
+   // Visit all atoms, do the transformation proper.  bb is modified
+   // in-place.
+   Bool changed = do_XOR_TRANSFORM_IRSB(sb);
+
+   if (changed) {
+      // The transformation generates non-flat expressions, so we now
+      // need to re-flatten the block.
+      sb = flatten_BB(sb);
+   }
+
+   return sb;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- iropt main                                              ---*/
+/*---------------------------------------------------------------*/
+
+static Bool iropt_verbose = False; /* True; */
+
+
+/* Do a simple cleanup pass on bb.  This is: redundant Get removal,
+   redundant Put removal, constant propagation, dead code removal,
+   clean helper specialisation, and dead code removal (again).
+*/
+
+
+static 
+IRSB* cheap_transformations ( 
+         IRSB* bb,
+         IRExpr* (*specHelper) (const HChar*, IRExpr**, IRStmt**, Int),
+         Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+         VexRegisterUpdates pxControl
+      )
+{
+   redundant_get_removal_BB ( bb );
+   if (iropt_verbose) {
+      vex_printf("\n========= REDUNDANT GET\n\n" );
+      ppIRSB(bb);
+   }
+
+   if (pxControl < VexRegUpdAllregsAtEachInsn) {
+      redundant_put_removal_BB ( bb, preciseMemExnsFn, pxControl );
+   }
+   if (iropt_verbose) {
+      vex_printf("\n========= REDUNDANT PUT\n\n" );
+      ppIRSB(bb);
+   }
+
+   bb = cprop_BB ( bb );
+   if (iropt_verbose) {
+      vex_printf("\n========= CPROPD\n\n" );
+      ppIRSB(bb);
+   }
+
+   do_deadcode_BB ( bb );
+   if (iropt_verbose) {
+      vex_printf("\n========= DEAD\n\n" );
+      ppIRSB(bb);
+   }
+
+   bb = spec_helpers_BB ( bb, specHelper );
+   do_deadcode_BB ( bb );
+   if (iropt_verbose) {
+      vex_printf("\n========= SPECd \n\n" );
+      ppIRSB(bb);
+   }
+
+   return bb;
+}
+
+
+/* Do some more expensive transformations on bb, which are aimed at
+   optimising as much as possible in the presence of GetI and PutI.  */
+
+static
+IRSB* expensive_transformations( IRSB* bb, VexRegisterUpdates pxControl )
+{
+   (void)do_cse_BB( bb, False/*!allowLoadsToBeCSEd*/ );
+   collapse_AddSub_chains_BB( bb );
+   do_redundant_GetI_elimination( bb );
+   if (pxControl < VexRegUpdAllregsAtEachInsn) {
+      do_redundant_PutI_elimination( bb, pxControl );
+   }
+   do_deadcode_BB( bb );
+   return bb;
+}
+
+
+/* Scan a flattened BB to look for signs that more expensive
+   optimisations might be useful:
+   - find out if there are any GetIs and PutIs
+   - find out if there are any floating or vector-typed temporaries
+*/
+
+static void considerExpensives ( /*OUT*/Bool* hasGetIorPutI,
+                                 /*OUT*/Bool* hasVorFtemps,
+                                 IRSB* bb )
+{
+   Int      i, j;
+   IRStmt*  st;
+   IRDirty* d;
+   IRCAS*   cas;
+
+   *hasGetIorPutI = False;
+   *hasVorFtemps  = False;
+
+   for (i = 0; i < bb->stmts_used; i++) {
+      st = bb->stmts[i];
+      switch (st->tag) {
+         case Ist_AbiHint:
+            vassert(isIRAtom(st->Ist.AbiHint.base));
+            vassert(isIRAtom(st->Ist.AbiHint.nia));
+            break;
+         case Ist_PutI: 
+            *hasGetIorPutI = True;
+            break;
+         case Ist_WrTmp:  
+            if (st->Ist.WrTmp.data->tag == Iex_GetI)
+               *hasGetIorPutI = True;
+            switch (typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp)) {
+               case Ity_I1: case Ity_I8: case Ity_I16: 
+               case Ity_I32: case Ity_I64: case Ity_I128: 
+                  break;
+               case Ity_F16: case Ity_F32: case Ity_F64: case Ity_F128:
+               case Ity_V128: case Ity_V256:
+                  *hasVorFtemps = True;
+                  break;
+               case Ity_D32: case Ity_D64: case Ity_D128:
+                  *hasVorFtemps = True;
+                  break;
+               default: 
+                  goto bad;
+            }
+            break;
+         case Ist_Put:
+            vassert(isIRAtom(st->Ist.Put.data));
+            break;
+         case Ist_Store:
+            vassert(isIRAtom(st->Ist.Store.addr));
+            vassert(isIRAtom(st->Ist.Store.data));
+            break;
+         case Ist_StoreG: {
+            IRStoreG* sg = st->Ist.StoreG.details;
+            vassert(isIRAtom(sg->addr));
+            vassert(isIRAtom(sg->data));
+            vassert(isIRAtom(sg->guard));
+            break;
+         }
+         case Ist_LoadG: {
+            IRLoadG* lg = st->Ist.LoadG.details;
+            vassert(isIRAtom(lg->addr));
+            vassert(isIRAtom(lg->alt));
+            vassert(isIRAtom(lg->guard));
+            break;
+         }
+         case Ist_CAS:
+            cas = st->Ist.CAS.details;
+            vassert(isIRAtom(cas->addr));
+            vassert(cas->expdHi == NULL || isIRAtom(cas->expdHi));
+            vassert(isIRAtom(cas->expdLo));
+            vassert(cas->dataHi == NULL || isIRAtom(cas->dataHi));
+            vassert(isIRAtom(cas->dataLo));
+            break;
+         case Ist_LLSC:
+            vassert(isIRAtom(st->Ist.LLSC.addr));
+            if (st->Ist.LLSC.storedata)
+               vassert(isIRAtom(st->Ist.LLSC.storedata));
+            break;
+         case Ist_Dirty:
+            d = st->Ist.Dirty.details;
+            vassert(isIRAtom(d->guard));
+            for (j = 0; d->args[j]; j++) {
+               IRExpr* arg = d->args[j];
+               if (LIKELY(!is_IRExpr_VECRET_or_BBPTR(arg)))
+                  vassert(isIRAtom(arg));
+            }
+            if (d->mFx != Ifx_None)
+               vassert(isIRAtom(d->mAddr));
+            break;
+         case Ist_NoOp:
+         case Ist_IMark:
+         case Ist_MBE:
+            break;
+         case Ist_Exit:
+            vassert(isIRAtom(st->Ist.Exit.guard));
+            break;
+         default: 
+         bad:
+            ppIRStmt(st);
+            vpanic("considerExpensives");
+      }
+   }
+}
+
+
+/* ---------------- The main iropt entry point. ---------------- */
+
+/* exported from this file */
+/* Rules of the game:
+
+   - IRExpr/IRStmt trees should be treated as immutable, as they
+     may get shared.  So never change a field of such a tree node;
+     instead construct and return a new one if needed.
+*/
+
+
+IRSB* do_iropt_BB(
+         IRSB* bb0,
+         IRExpr* (*specHelper) (const HChar*, IRExpr**, IRStmt**, Int),
+         Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+         VexRegisterUpdates pxControl,
+         Addr    guest_addr,
+         VexArch guest_arch
+      )
+{
+   static Int n_total     = 0;
+   static Int n_expensive = 0;
+
+   Bool hasGetIorPutI, hasVorFtemps;
+   IRSB *bb, *bb2;
+
+   n_total++;
+
+   /* First flatten the block out, since all other
+      phases assume flat code. */
+
+   bb = flatten_BB ( bb0 );
+
+   if (iropt_verbose) {
+      vex_printf("\n========= FLAT\n\n" );
+      ppIRSB(bb);
+   }
+
+   /* If at level 0, stop now. */
+   if (vex_control.iropt_level <= 0) return bb;
+
+   /* Now do a preliminary cleanup pass, and figure out if we also
+      need to do 'expensive' optimisations.  Expensive optimisations
+      are deemed necessary if the block contains any GetIs or PutIs.
+      If needed, do expensive transformations and then another cheap
+      cleanup pass. */
+
+   bb = cheap_transformations( bb, specHelper, preciseMemExnsFn, pxControl );
+
+   if (guest_arch == VexArchARM) {
+      /* Translating Thumb2 code produces a lot of chaff.  We have to
+         work extra hard to get rid of it. */
+      bb = cprop_BB(bb);
+      bb = spec_helpers_BB ( bb, specHelper );
+      if (pxControl < VexRegUpdAllregsAtEachInsn) {
+         redundant_put_removal_BB ( bb, preciseMemExnsFn, pxControl );
+      }
+      do_cse_BB( bb, False/*!allowLoadsToBeCSEd*/ );
+      do_deadcode_BB( bb );
+   }
+
+   if (vex_control.iropt_level > 1) {
+
+      /* Peer at what we have, to decide how much more effort to throw
+         at it. */
+      considerExpensives( &hasGetIorPutI, &hasVorFtemps, bb );
+
+      if (hasVorFtemps && !hasGetIorPutI) {
+         /* If any evidence of FP or Vector activity, CSE, as that
+            tends to mop up all manner of lardy code to do with
+            rounding modes.  Don't bother if hasGetIorPutI since that
+            case leads into the expensive transformations, which do
+            CSE anyway. */
+         (void)do_cse_BB( bb, False/*!allowLoadsToBeCSEd*/ );
+         do_deadcode_BB( bb );
+      }
+
+      if (hasGetIorPutI) {
+         Bool cses;
+         n_expensive++;
+         if (DEBUG_IROPT)
+            vex_printf("***** EXPENSIVE %d %d\n", n_total, n_expensive);
+         bb = expensive_transformations( bb, pxControl );
+         bb = cheap_transformations( bb, specHelper,
+                                     preciseMemExnsFn, pxControl );
+         /* Potentially common up GetIs */
+         cses = do_cse_BB( bb, False/*!allowLoadsToBeCSEd*/ );
+         if (cses)
+            bb = cheap_transformations( bb, specHelper,
+                                        preciseMemExnsFn, pxControl );
+      }
+
+      ///////////////////////////////////////////////////////////
+      // BEGIN MSVC optimised code transformation hacks
+      if (0)
+         bb = do_MSVC_HACKS(bb);
+      // END   MSVC optimised code transformation hacks
+      ///////////////////////////////////////////////////////////
+
+      /* Now have a go at unrolling simple (single-BB) loops.  If
+         successful, clean up the results as much as possible. */
+
+      bb2 = maybe_loop_unroll_BB( bb, guest_addr );
+      if (bb2) {
+         bb = cheap_transformations( bb2, specHelper,
+                                     preciseMemExnsFn, pxControl );
+         if (hasGetIorPutI) {
+            bb = expensive_transformations( bb, pxControl );
+            bb = cheap_transformations( bb, specHelper,
+                                        preciseMemExnsFn, pxControl );
+         } else {
+            /* at least do CSE and dead code removal */
+            do_cse_BB( bb, False/*!allowLoadsToBeCSEd*/ );
+            do_deadcode_BB( bb );
+         }
+         if (0) vex_printf("vex iropt: unrolled a loop\n");
+      }
+
+   }
+
+   return bb;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                            ir_opt.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/ir_opt.h b/VEX/priv/ir_opt.h
new file mode 100644
index 0000000..43b2a47
--- /dev/null
+++ b/VEX/priv/ir_opt.h
@@ -0,0 +1,79 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                          ir_opt.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_IR_OPT_H
+#define __VEX_IR_OPT_H
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+#include "libvex.h"
+
+/* Top level optimiser entry point.  Returns a new BB.  Operates
+   under the control of the global "vex_control" struct and of the
+   supplied |pxControl| argument. */
+extern 
+IRSB* do_iropt_BB (
+         IRSB* bb,
+         IRExpr* (*specHelper) (const HChar*, IRExpr**, IRStmt**, Int),
+         Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+         VexRegisterUpdates pxControl,
+         Addr    guest_addr,
+         VexArch guest_arch
+      );
+
+/* Do a constant folding/propagation pass. */
+extern
+IRSB* cprop_BB ( IRSB* );
+
+/* Do a dead-code removal pass.  bb is destructively modified. */
+extern
+void do_deadcode_BB ( IRSB* bb );
+
+/* The tree-builder.  Make (approximately) maximal safe trees.  bb is
+   destructively modified.  Returns (unrelatedly, but useful later on)
+   the guest address of the highest addressed byte from any insn in
+   this block, or Addr_MAX if unknown (can that ever happen?) */
+extern
+Addr ado_treebuild_BB (
+        IRSB* bb,
+        Bool (*preciseMemExnsFn)(Int,Int,VexRegisterUpdates),
+        VexRegisterUpdates pxControl
+     );
+
+#endif /* ndef __VEX_IR_OPT_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                            ir_opt.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/main_globals.c b/VEX/priv/main_globals.c
new file mode 100644
index 0000000..dbe6369
--- /dev/null
+++ b/VEX/priv/main_globals.c
@@ -0,0 +1,68 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                    main_globals.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+
+#include "main_util.h"
+#include "main_globals.h"
+
+
+/* Global settings for the VEX library.  These are the
+   only library-wide globals. */
+
+/* Are we started yet? */
+Bool vex_initdone = False;
+
+/* failure exit function */
+__attribute__ ((noreturn))
+void (*vex_failure_exit) ( void ) = NULL;
+
+/* logging output function */
+void (*vex_log_bytes) ( const HChar*, SizeT nbytes ) = NULL;
+
+/* debug paranoia level */
+Int vex_debuglevel = 0;
+
+/* trace flags */
+Int vex_traceflags = 0;
+
+/* Max # guest insns per bb */
+VexControl vex_control = { 0,0,False,0,0,0 };
+
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                      main_globals.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/main_globals.h b/VEX/priv/main_globals.h
new file mode 100644
index 0000000..eb97ec1
--- /dev/null
+++ b/VEX/priv/main_globals.h
@@ -0,0 +1,81 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                    main_globals.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_MAIN_GLOBALS_H
+#define __VEX_MAIN_GLOBALS_H
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+
+
+/* Global settings for the VEX library.  These are the
+   only library-wide globals. */
+
+/* Are we started yet? */
+extern Bool vex_initdone;
+
+/* failure exit function */
+__attribute__ ((noreturn))
+extern void (*vex_failure_exit) ( void );
+
+/* logging output function */
+extern void (*vex_log_bytes) ( const HChar*, SizeT nbytes );
+
+/* debug paranoia level */
+extern Int vex_debuglevel;
+
+/* trace flags */
+extern Int vex_traceflags;
+
+/* Optimiser/front-end control */
+extern VexControl vex_control;
+
+
+/* vex_traceflags values */
+#define VEX_TRACE_FE     (1 << 7)  /* show conversion into IR */
+#define VEX_TRACE_OPT1   (1 << 6)  /* show after initial opt */
+#define VEX_TRACE_INST   (1 << 5)  /* show after instrumentation */
+#define VEX_TRACE_OPT2   (1 << 4)  /* show after second opt */
+#define VEX_TRACE_TREES  (1 << 3)  /* show after tree building */
+#define VEX_TRACE_VCODE  (1 << 2)  /* show selected insns */
+#define VEX_TRACE_RCODE  (1 << 1)  /* show after reg-alloc */
+#define VEX_TRACE_ASM    (1 << 0)  /* show final assembly */
+
+
+#endif /* ndef __VEX_MAIN_GLOBALS_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                      main_globals.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/main_main.c b/VEX/priv/main_main.c
new file mode 100644
index 0000000..ec68065
--- /dev/null
+++ b/VEX/priv/main_main.c
@@ -0,0 +1,1936 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- Begin                                       main_main.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex.h"
+#include "libvex_emnote.h"
+#include "libvex_guest_x86.h"
+#include "libvex_guest_amd64.h"
+#include "libvex_guest_arm.h"
+#include "libvex_guest_arm64.h"
+#include "libvex_guest_ppc32.h"
+#include "libvex_guest_ppc64.h"
+#include "libvex_guest_s390x.h"
+#include "libvex_guest_mips32.h"
+#include "libvex_guest_mips64.h"
+#include "libvex_guest_tilegx.h"
+
+#include "main_globals.h"
+#include "main_util.h"
+#include "host_generic_regs.h"
+#include "ir_opt.h"
+
+#include "host_x86_defs.h"
+#include "host_amd64_defs.h"
+#include "host_ppc_defs.h"
+#include "host_arm_defs.h"
+#include "host_arm64_defs.h"
+#include "host_s390_defs.h"
+#include "host_mips_defs.h"
+#include "host_tilegx_defs.h"
+
+#include "guest_generic_bb_to_IR.h"
+#include "guest_x86_defs.h"
+#include "guest_amd64_defs.h"
+#include "guest_arm_defs.h"
+#include "guest_arm64_defs.h"
+#include "guest_ppc_defs.h"
+#include "guest_s390_defs.h"
+#include "guest_mips_defs.h"
+#include "guest_tilegx_defs.h"
+
+#include "host_generic_simd128.h"
+
+/* For each architecture <arch>, we define 2 macros:
+   <arch>FN that has as argument a pointer (typically to a function
+            or the return value of a function).
+   <arch>ST that has as argument a statement.
+   If main_main.c is compiled for <arch>, then these macros just expand
+   their arg.
+   Otherwise, the macros expand to respectively NULL and vassert(0).
+   These macros are used to avoid introducing dependencies to object
+   files not needed for the (only) architecture we are compiling for. 
+
+   To still compile the below for all supported architectures, define
+   VEXMULTIARCH. This is used by the file multiarch_main_main.c */
+
+#if defined(VGA_x86) || defined(VEXMULTIARCH)
+#define X86FN(f) f
+#define X86ST(f) f
+#else
+#define X86FN(f) NULL
+#define X86ST(f) vassert(0)
+#endif
+
+#if defined(VGA_amd64) || defined(VEXMULTIARCH)
+#define AMD64FN(f) f
+#define AMD64ST(f) f
+#else
+#define AMD64FN(f) NULL
+#define AMD64ST(f) vassert(0)
+#endif
+
+#if defined(VGA_ppc32) || defined(VEXMULTIARCH)
+#define PPC32FN(f) f
+#define PPC32ST(f) f
+#else
+#define PPC32FN(f) NULL
+#define PPC32ST(f) vassert(0)
+#endif
+
+#if defined(VGA_ppc64be) || defined(VGA_ppc64le) || defined(VEXMULTIARCH)
+#define PPC64FN(f) f
+#define PPC64ST(f) f
+#else
+#define PPC64FN(f) NULL
+#define PPC64ST(f) vassert(0)
+#endif
+
+#if defined(VGA_s390x) || defined(VEXMULTIARCH)
+#define S390FN(f) f
+#define S390ST(f) f
+#else
+#define S390FN(f) NULL
+#define S390ST(f) vassert(0)
+#endif
+
+#if defined(VGA_arm) || defined(VEXMULTIARCH)
+#define ARMFN(f) f
+#define ARMST(f) f
+#else
+#define ARMFN(f) NULL
+#define ARMST(f) vassert(0)
+#endif
+
+#if defined(VGA_arm64) || defined(VEXMULTIARCH)
+#define ARM64FN(f) f
+#define ARM64ST(f) f
+#else
+#define ARM64FN(f) NULL
+#define ARM64ST(f) vassert(0)
+#endif
+
+#if defined(VGA_mips32) || defined(VEXMULTIARCH)
+#define MIPS32FN(f) f
+#define MIPS32ST(f) f
+#else
+#define MIPS32FN(f) NULL
+#define MIPS32ST(f) vassert(0)
+#endif
+
+#if defined(VGA_mips64) || defined(VEXMULTIARCH)
+#define MIPS64FN(f) f
+#define MIPS64ST(f) f
+#else
+#define MIPS64FN(f) NULL
+#define MIPS64ST(f) vassert(0)
+#endif
+
+#if defined(VGA_tilegx) || defined(VEXMULTIARCH)
+#define TILEGXFN(f) f
+#define TILEGXST(f) f
+#else
+#define TILEGXFN(f) NULL
+#define TILEGXST(f) vassert(0)
+#endif
+
+
+/* This file contains the top level interface to the library. */
+
+/* --------- fwds ... --------- */
+
+static void  check_hwcaps ( VexArch arch, UInt hwcaps );
+static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
+
+
+/* --------- helpers --------- */
+
+__attribute__((noinline))
+static UInt udiv32 ( UInt x, UInt y ) { return x/y; }
+__attribute__((noinline))
+static  Int sdiv32 (  Int x,  Int y ) { return x/y; }
+
+
+/* --------- Initialise the library. --------- */
+
+/* Exported to library client. */
+
+void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
+{
+   vex_bzero(vcon, sizeof(*vcon));
+   vcon->iropt_verbosity                = 0;
+   vcon->iropt_level                    = 2;
+   vcon->iropt_register_updates_default = VexRegUpdUnwindregsAtMemAccess;
+   vcon->iropt_unroll_thresh            = 120;
+   vcon->guest_max_insns                = 60;
+   vcon->guest_chase_thresh             = 10;
+   vcon->guest_chase_cond               = False;
+}
+
+
+/* Exported to library client. */
+
+void LibVEX_Init (
+   /* failure exit function */
+   __attribute__ ((noreturn))
+   void (*failure_exit) ( void ),
+   /* logging output function */
+   void (*log_bytes) ( const HChar*, SizeT nbytes ),
+   /* debug paranoia level */
+   Int debuglevel,
+   /* Control ... */
+   const VexControl* vcon
+)
+{
+   /* First off, do enough minimal setup so that the following
+      assertions can fail in a sane fashion, if need be. */
+   vex_failure_exit = failure_exit;
+   vex_log_bytes    = log_bytes;
+
+   /* Now it's safe to check parameters for sanity. */
+   vassert(!vex_initdone);
+   vassert(failure_exit);
+   vassert(log_bytes);
+   vassert(debuglevel >= 0);
+
+   vassert(vcon->iropt_verbosity >= 0);
+   vassert(vcon->iropt_level >= 0);
+   vassert(vcon->iropt_level <= 2);
+   vassert(vcon->iropt_unroll_thresh >= 0);
+   vassert(vcon->iropt_unroll_thresh <= 400);
+   vassert(vcon->guest_max_insns >= 1);
+   vassert(vcon->guest_max_insns <= 100);
+   vassert(vcon->guest_chase_thresh >= 0);
+   vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
+   vassert(vcon->guest_chase_cond == True 
+           || vcon->guest_chase_cond == False);
+
+   /* Check that Vex has been built with sizes of basic types as
+      stated in priv/libvex_basictypes.h.  Failure of any of these is
+      a serious configuration error and should be corrected
+      immediately.  If any of these assertions fail you can fully
+      expect Vex not to work properly, if at all. */
+
+   vassert(1 == sizeof(UChar));
+   vassert(1 == sizeof(Char));
+   vassert(2 == sizeof(UShort));
+   vassert(2 == sizeof(Short));
+   vassert(4 == sizeof(UInt));
+   vassert(4 == sizeof(Int));
+   vassert(8 == sizeof(ULong));
+   vassert(8 == sizeof(Long));
+   vassert(4 == sizeof(Float));
+   vassert(8 == sizeof(Double));
+   vassert(1 == sizeof(Bool));
+   vassert(4 == sizeof(Addr32));
+   vassert(8 == sizeof(Addr64));
+   vassert(16 == sizeof(U128));
+   vassert(16 == sizeof(V128));
+   vassert(32 == sizeof(U256));
+
+   vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
+   vassert(sizeof(void*) == sizeof(int*));
+   vassert(sizeof(void*) == sizeof(HWord));
+   vassert(sizeof(void*) == sizeof(Addr));
+   vassert(sizeof(unsigned long) == sizeof(SizeT));
+
+   vassert(VEX_HOST_WORDSIZE == sizeof(void*));
+   vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
+
+   /* These take a lot of space, so make sure we don't have
+      any unnoticed size regressions. */
+   if (VEX_HOST_WORDSIZE == 4) {
+      vassert(sizeof(IRExpr) == 16);
+      vassert(sizeof(IRStmt) == 20 /* x86 */
+              || sizeof(IRStmt) == 24 /* arm */);
+   } else {
+      vassert(sizeof(IRExpr) == 32);
+      vassert(sizeof(IRStmt) == 32);
+   }
+
+   /* Ditto */
+   vassert(sizeof(HReg) == 4);
+   /* If N_RREGUNIVERSE_REGS ever exceeds 64, the bitset fields in
+      RRegSet and HRegUsage will need to be changed to something
+      better than ULong. */
+   vassert(N_RREGUNIVERSE_REGS == 64);
+
+   /* Check that signed integer division on the host rounds towards
+      zero.  If not, h_calc_sdiv32_w_arm_semantics() won't work
+      correctly. */
+   /* 100.0 / 7.0 == 14.2857 */
+   vassert(udiv32(100, 7) == 14);
+   vassert(sdiv32(100, 7) == 14);
+   vassert(sdiv32(-100, 7) == -14); /* and not -15 */
+   vassert(sdiv32(100, -7) == -14); /* ditto */
+   vassert(sdiv32(-100, -7) == 14); /* not sure what this proves */
+
+   /* Really start up .. */
+   vex_debuglevel         = debuglevel;
+   vex_control            = *vcon;
+   vex_initdone           = True;
+   vexSetAllocMode ( VexAllocModeTEMP );
+}
+
+
+/* --------- Make a translation. --------- */
+/* KLUDGE: S390 need to know the hwcaps of the host when generating
+   code. But that info is not passed to emit_S390Instr. Only mode64 is
+   being passed. So, ideally, we want this passed as an argument, too.
+   Until then, we use a global variable. This variable is set as a side
+   effect of LibVEX_Translate. The variable is defined here rather than
+   in host_s390_defs.c to avoid having main_main.c dragging S390
+   object files in non VEXMULTIARCH. */
+UInt s390_host_hwcaps;
+
+
+/* Exported to library client. */
+
+VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
+{
+   /* This the bundle of functions we need to do the back-end stuff
+      (insn selection, reg-alloc, assembly) whilst being insulated
+      from the target instruction set. */
+   Bool         (*isMove)       ( const HInstr*, HReg*, HReg* );
+   void         (*getRegUsage)  ( HRegUsage*, const HInstr*, Bool );
+   void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
+   void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
+   void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
+   HInstr*      (*directReload) ( HInstr*, HReg, Short );
+   void         (*ppInstr)      ( const HInstr*, Bool );
+   void         (*ppReg)        ( HReg );
+   HInstrArray* (*iselSB)       ( const IRSB*, VexArch, const VexArchInfo*,
+                                  const VexAbiInfo*, Int, Int, Bool, Bool,
+                                  Addr );
+   Int          (*emit)         ( /*MB_MOD*/Bool*,
+                                  UChar*, Int, const HInstr*, Bool, VexEndness,
+                                  const void*, const void*, const void*,
+                                  const void* );
+   IRExpr*      (*specHelper)   ( const HChar*, IRExpr**, IRStmt**, Int );
+   Bool         (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates );
+
+   const RRegUniverse* rRegUniv = NULL;
+
+   DisOneInstrFn disInstrFn;
+
+   VexGuestLayout* guest_layout;
+   IRSB*           irsb;
+   HInstrArray*    vcode;
+   HInstrArray*    rcode;
+   Int             i, j, k, out_used, guest_sizeB;
+   Int             offB_CMSTART, offB_CMLEN, offB_GUEST_IP, szB_GUEST_IP;
+   Int             offB_HOST_EvC_COUNTER, offB_HOST_EvC_FAILADDR;
+   UChar           insn_bytes[128];
+   IRType          guest_word_type;
+   IRType          host_word_type;
+   Bool            mode64, chainingAllowed;
+   Addr            max_ga;
+
+   guest_layout           = NULL;
+   isMove                 = NULL;
+   getRegUsage            = NULL;
+   mapRegs                = NULL;
+   genSpill               = NULL;
+   genReload              = NULL;
+   directReload           = NULL;
+   ppInstr                = NULL;
+   ppReg                  = NULL;
+   iselSB                 = NULL;
+   emit                   = NULL;
+   specHelper             = NULL;
+   preciseMemExnsFn       = NULL;
+   disInstrFn             = NULL;
+   guest_word_type        = Ity_INVALID;
+   host_word_type         = Ity_INVALID;
+   offB_CMSTART           = 0;
+   offB_CMLEN             = 0;
+   offB_GUEST_IP          = 0;
+   szB_GUEST_IP           = 0;
+   offB_HOST_EvC_COUNTER  = 0;
+   offB_HOST_EvC_FAILADDR = 0;
+   mode64                 = False;
+   chainingAllowed        = False;
+
+   vex_traceflags = vta->traceflags;
+
+   vassert(vex_initdone);
+   vassert(vta->needs_self_check  != NULL);
+   vassert(vta->disp_cp_xassisted != NULL);
+   /* Both the chainers and the indir are either NULL or non-NULL. */
+   if (vta->disp_cp_chain_me_to_slowEP        != NULL) {
+      vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
+      vassert(vta->disp_cp_xindir             != NULL);
+      chainingAllowed = True;
+   } else {
+      vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
+      vassert(vta->disp_cp_xindir             == NULL);
+   }
+
+   vexSetAllocModeTEMP_and_clear();
+   vexAllocSanityCheck();
+
+   /* First off, check that the guest and host insn sets
+      are supported. */
+
+   switch (vta->arch_host) {
+
+      case VexArchX86:
+         mode64       = False;
+         rRegUniv     = X86FN(getRRegUniverse_X86());
+         isMove       = (__typeof__(isMove)) X86FN(isMove_X86Instr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) X86FN(getRegUsage_X86Instr);
+         mapRegs      = (__typeof__(mapRegs)) X86FN(mapRegs_X86Instr);
+         genSpill     = (__typeof__(genSpill)) X86FN(genSpill_X86);
+         genReload    = (__typeof__(genReload)) X86FN(genReload_X86);
+         directReload = (__typeof__(directReload)) X86FN(directReload_X86);
+         ppInstr      = (__typeof__(ppInstr)) X86FN(ppX86Instr);
+         ppReg        = (__typeof__(ppReg)) X86FN(ppHRegX86);
+         iselSB       = X86FN(iselSB_X86);
+         emit         = (__typeof__(emit)) X86FN(emit_X86Instr);
+         host_word_type = Ity_I32;
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
+         break;
+
+      case VexArchAMD64:
+         mode64       = True;
+         rRegUniv     = AMD64FN(getRRegUniverse_AMD64());
+         isMove       = (__typeof__(isMove)) AMD64FN(isMove_AMD64Instr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) AMD64FN(getRegUsage_AMD64Instr);
+         mapRegs      = (__typeof__(mapRegs)) AMD64FN(mapRegs_AMD64Instr);
+         genSpill     = (__typeof__(genSpill)) AMD64FN(genSpill_AMD64);
+         genReload    = (__typeof__(genReload)) AMD64FN(genReload_AMD64);
+         ppInstr      = (__typeof__(ppInstr)) AMD64FN(ppAMD64Instr);
+         ppReg        = (__typeof__(ppReg)) AMD64FN(ppHRegAMD64);
+         iselSB       = AMD64FN(iselSB_AMD64);
+         emit         = (__typeof__(emit)) AMD64FN(emit_AMD64Instr);
+         host_word_type = Ity_I64;
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
+         break;
+
+      case VexArchPPC32:
+         mode64       = False;
+         rRegUniv     = PPC32FN(getRRegUniverse_PPC(mode64));
+         isMove       = (__typeof__(isMove)) PPC32FN(isMove_PPCInstr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) PPC32FN(getRegUsage_PPCInstr);
+         mapRegs      = (__typeof__(mapRegs)) PPC32FN(mapRegs_PPCInstr);
+         genSpill     = (__typeof__(genSpill)) PPC32FN(genSpill_PPC);
+         genReload    = (__typeof__(genReload)) PPC32FN(genReload_PPC);
+         ppInstr      = (__typeof__(ppInstr)) PPC32FN(ppPPCInstr);
+         ppReg        = (__typeof__(ppReg)) PPC32FN(ppHRegPPC);
+         iselSB       = PPC32FN(iselSB_PPC);
+         emit         = (__typeof__(emit)) PPC32FN(emit_PPCInstr);
+         host_word_type = Ity_I32;
+         vassert(vta->archinfo_host.endness == VexEndnessBE);
+         break;
+
+      case VexArchPPC64:
+         mode64       = True;
+         rRegUniv     = PPC64FN(getRRegUniverse_PPC(mode64));
+         isMove       = (__typeof__(isMove)) PPC64FN(isMove_PPCInstr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) PPC64FN(getRegUsage_PPCInstr);
+         mapRegs      = (__typeof__(mapRegs)) PPC64FN(mapRegs_PPCInstr);
+         genSpill     = (__typeof__(genSpill)) PPC64FN(genSpill_PPC);
+         genReload    = (__typeof__(genReload)) PPC64FN(genReload_PPC);
+         ppInstr      = (__typeof__(ppInstr)) PPC64FN(ppPPCInstr);
+         ppReg        = (__typeof__(ppReg)) PPC64FN(ppHRegPPC);
+         iselSB       = PPC64FN(iselSB_PPC);
+         emit         = (__typeof__(emit)) PPC64FN(emit_PPCInstr);
+         host_word_type = Ity_I64;
+         vassert(vta->archinfo_host.endness == VexEndnessBE ||
+                 vta->archinfo_host.endness == VexEndnessLE );
+         break;
+
+      case VexArchS390X:
+         mode64       = True;
+         /* KLUDGE: export hwcaps. */
+         s390_host_hwcaps = vta->archinfo_host.hwcaps;
+         rRegUniv     = S390FN(getRRegUniverse_S390());
+         isMove       = (__typeof__(isMove)) S390FN(isMove_S390Instr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) S390FN(getRegUsage_S390Instr);
+         mapRegs      = (__typeof__(mapRegs)) S390FN(mapRegs_S390Instr);
+         genSpill     = (__typeof__(genSpill)) S390FN(genSpill_S390);
+         genReload    = (__typeof__(genReload)) S390FN(genReload_S390);
+         // fixs390: consider implementing directReload_S390
+         ppInstr      = (__typeof__(ppInstr)) S390FN(ppS390Instr);
+         ppReg        = (__typeof__(ppReg)) S390FN(ppHRegS390);
+         iselSB       = S390FN(iselSB_S390);
+         emit         = (__typeof__(emit)) S390FN(emit_S390Instr);
+         host_word_type = Ity_I64;
+         vassert(vta->archinfo_host.endness == VexEndnessBE);
+         break;
+
+      case VexArchARM:
+         mode64       = False;
+         rRegUniv     = ARMFN(getRRegUniverse_ARM());
+         isMove       = (__typeof__(isMove)) ARMFN(isMove_ARMInstr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) ARMFN(getRegUsage_ARMInstr);
+         mapRegs      = (__typeof__(mapRegs)) ARMFN(mapRegs_ARMInstr);
+         genSpill     = (__typeof__(genSpill)) ARMFN(genSpill_ARM);
+         genReload    = (__typeof__(genReload)) ARMFN(genReload_ARM);
+         ppInstr      = (__typeof__(ppInstr)) ARMFN(ppARMInstr);
+         ppReg        = (__typeof__(ppReg)) ARMFN(ppHRegARM);
+         iselSB       = ARMFN(iselSB_ARM);
+         emit         = (__typeof__(emit)) ARMFN(emit_ARMInstr);
+         host_word_type = Ity_I32;
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
+         break;
+
+      case VexArchARM64:
+         mode64       = True;
+         rRegUniv     = ARM64FN(getRRegUniverse_ARM64());
+         isMove       = (__typeof__(isMove)) ARM64FN(isMove_ARM64Instr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) ARM64FN(getRegUsage_ARM64Instr);
+         mapRegs      = (__typeof__(mapRegs)) ARM64FN(mapRegs_ARM64Instr);
+         genSpill     = (__typeof__(genSpill)) ARM64FN(genSpill_ARM64);
+         genReload    = (__typeof__(genReload)) ARM64FN(genReload_ARM64);
+         ppInstr      = (__typeof__(ppInstr)) ARM64FN(ppARM64Instr);
+         ppReg        = (__typeof__(ppReg)) ARM64FN(ppHRegARM64);
+         iselSB       = ARM64FN(iselSB_ARM64);
+         emit         = (__typeof__(emit)) ARM64FN(emit_ARM64Instr);
+         host_word_type = Ity_I64;
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
+         break;
+
+      case VexArchMIPS32:
+         mode64       = False;
+         rRegUniv     = MIPS32FN(getRRegUniverse_MIPS(mode64));
+         isMove       = (__typeof__(isMove)) MIPS32FN(isMove_MIPSInstr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) MIPS32FN(getRegUsage_MIPSInstr);
+         mapRegs      = (__typeof__(mapRegs)) MIPS32FN(mapRegs_MIPSInstr);
+         genSpill     = (__typeof__(genSpill)) MIPS32FN(genSpill_MIPS);
+         genReload    = (__typeof__(genReload)) MIPS32FN(genReload_MIPS);
+         ppInstr      = (__typeof__(ppInstr)) MIPS32FN(ppMIPSInstr);
+         ppReg        = (__typeof__(ppReg)) MIPS32FN(ppHRegMIPS);
+         iselSB       = MIPS32FN(iselSB_MIPS);
+         emit         = (__typeof__(emit)) MIPS32FN(emit_MIPSInstr);
+         host_word_type = Ity_I32;
+         vassert(vta->archinfo_host.endness == VexEndnessLE
+                 || vta->archinfo_host.endness == VexEndnessBE);
+         break;
+
+      case VexArchMIPS64:
+         mode64       = True;
+         rRegUniv     = MIPS64FN(getRRegUniverse_MIPS(mode64));
+         isMove       = (__typeof__(isMove)) MIPS64FN(isMove_MIPSInstr);
+         getRegUsage  
+            = (__typeof__(getRegUsage)) MIPS64FN(getRegUsage_MIPSInstr);
+         mapRegs      = (__typeof__(mapRegs)) MIPS64FN(mapRegs_MIPSInstr);
+         genSpill     = (__typeof__(genSpill)) MIPS64FN(genSpill_MIPS);
+         genReload    = (__typeof__(genReload)) MIPS64FN(genReload_MIPS);
+         ppInstr      = (__typeof__(ppInstr)) MIPS64FN(ppMIPSInstr);
+         ppReg        = (__typeof__(ppReg)) MIPS64FN(ppHRegMIPS);
+         iselSB       = MIPS64FN(iselSB_MIPS);
+         emit         = (__typeof__(emit)) MIPS64FN(emit_MIPSInstr);
+         host_word_type = Ity_I64;
+         vassert(vta->archinfo_host.endness == VexEndnessLE
+                 || vta->archinfo_host.endness == VexEndnessBE);
+         break;
+
+      case VexArchTILEGX:
+         mode64      = True;
+         rRegUniv    = TILEGXFN(getRRegUniverse_TILEGX());
+         isMove      = (__typeof__(isMove)) TILEGXFN(isMove_TILEGXInstr);
+         getRegUsage =
+            (__typeof__(getRegUsage)) TILEGXFN(getRegUsage_TILEGXInstr);
+         mapRegs     = (__typeof__(mapRegs)) TILEGXFN(mapRegs_TILEGXInstr);
+         genSpill    = (__typeof__(genSpill)) TILEGXFN(genSpill_TILEGX);
+         genReload   = (__typeof__(genReload)) TILEGXFN(genReload_TILEGX);
+         ppInstr     = (__typeof__(ppInstr)) TILEGXFN(ppTILEGXInstr);
+         ppReg       = (__typeof__(ppReg)) TILEGXFN(ppHRegTILEGX);
+         iselSB      = TILEGXFN(iselSB_TILEGX);
+         emit        = (__typeof__(emit)) TILEGXFN(emit_TILEGXInstr);
+         host_word_type    = Ity_I64;
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
+         break;
+
+      default:
+         vpanic("LibVEX_Translate: unsupported host insn set");
+   }
+
+   // Are the host's hardware capabilities feasible. The function will
+   // not return if hwcaps are infeasible in some sense.
+   check_hwcaps(vta->arch_host, vta->archinfo_host.hwcaps);
+
+   switch (vta->arch_guest) {
+
+      case VexArchX86:
+         preciseMemExnsFn       
+            = X86FN(guest_x86_state_requires_precise_mem_exns);
+         disInstrFn             = X86FN(disInstr_X86);
+         specHelper             = X86FN(guest_x86_spechelper);
+         guest_sizeB            = sizeof(VexGuestX86State);
+         guest_word_type        = Ity_I32;
+         guest_layout           = X86FN(&x86guest_layout);
+         offB_CMSTART           = offsetof(VexGuestX86State,guest_CMSTART);
+         offB_CMLEN             = offsetof(VexGuestX86State,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestX86State,guest_EIP);
+         szB_GUEST_IP           = sizeof( ((VexGuestX86State*)0)->guest_EIP );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestX86State,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
+         vassert(0 == sizeof(VexGuestX86State) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
+         vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN  ) == 4);
+         vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
+         break;
+
+      case VexArchAMD64:
+         preciseMemExnsFn       
+            = AMD64FN(guest_amd64_state_requires_precise_mem_exns);
+         disInstrFn             = AMD64FN(disInstr_AMD64);
+         specHelper             = AMD64FN(guest_amd64_spechelper);
+         guest_sizeB            = sizeof(VexGuestAMD64State);
+         guest_word_type        = Ity_I64;
+         guest_layout           = AMD64FN(&amd64guest_layout);
+         offB_CMSTART           = offsetof(VexGuestAMD64State,guest_CMSTART);
+         offB_CMLEN             = offsetof(VexGuestAMD64State,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestAMD64State,guest_RIP);
+         szB_GUEST_IP           = sizeof( ((VexGuestAMD64State*)0)->guest_RIP );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
+         vassert(0 == sizeof(VexGuestAMD64State) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
+         vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN   ) == 8);
+         vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
+         break;
+
+      case VexArchPPC32:
+         preciseMemExnsFn       
+            = PPC32FN(guest_ppc32_state_requires_precise_mem_exns);
+         disInstrFn             = PPC32FN(disInstr_PPC);
+         specHelper             = PPC32FN(guest_ppc32_spechelper);
+         guest_sizeB            = sizeof(VexGuestPPC32State);
+         guest_word_type        = Ity_I32;
+         guest_layout           = PPC32FN(&ppc32Guest_layout);
+         offB_CMSTART           = offsetof(VexGuestPPC32State,guest_CMSTART);
+         offB_CMLEN             = offsetof(VexGuestPPC32State,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestPPC32State,guest_CIA);
+         szB_GUEST_IP           = sizeof( ((VexGuestPPC32State*)0)->guest_CIA );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessBE);
+         vassert(0 == sizeof(VexGuestPPC32State) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
+         vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN   ) == 4);
+         vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
+         break;
+
+      case VexArchPPC64:
+         preciseMemExnsFn       
+            = PPC64FN(guest_ppc64_state_requires_precise_mem_exns);
+         disInstrFn             = PPC64FN(disInstr_PPC);
+         specHelper             = PPC64FN(guest_ppc64_spechelper);
+         guest_sizeB            = sizeof(VexGuestPPC64State);
+         guest_word_type        = Ity_I64;
+         guest_layout           = PPC64FN(&ppc64Guest_layout);
+         offB_CMSTART           = offsetof(VexGuestPPC64State,guest_CMSTART);
+         offB_CMLEN             = offsetof(VexGuestPPC64State,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestPPC64State,guest_CIA);
+         szB_GUEST_IP           = sizeof( ((VexGuestPPC64State*)0)->guest_CIA );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessBE ||
+                 vta->archinfo_guest.endness == VexEndnessLE );
+         vassert(0 == sizeof(VexGuestPPC64State) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART    ) == 8);
+         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN      ) == 8);
+         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
+         vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
+         break;
+
+      case VexArchS390X:
+         preciseMemExnsFn 
+            = S390FN(guest_s390x_state_requires_precise_mem_exns);
+         disInstrFn       = S390FN(disInstr_S390);
+         specHelper       = S390FN(guest_s390x_spechelper);
+         guest_sizeB      = sizeof(VexGuestS390XState);
+         guest_word_type  = Ity_I64;
+         guest_layout     = S390FN(&s390xGuest_layout);
+         offB_CMSTART     = offsetof(VexGuestS390XState,guest_CMSTART);
+         offB_CMLEN       = offsetof(VexGuestS390XState,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestS390XState,guest_IA);
+         szB_GUEST_IP           = sizeof( ((VexGuestS390XState*)0)->guest_IA);
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestS390XState,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessBE);
+         vassert(0 == sizeof(VexGuestS390XState) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART    ) == 8);
+         vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN      ) == 8);
+         vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR     ) == 8);
+         break;
+
+      case VexArchARM:
+         preciseMemExnsFn       
+            = ARMFN(guest_arm_state_requires_precise_mem_exns);
+         disInstrFn             = ARMFN(disInstr_ARM);
+         specHelper             = ARMFN(guest_arm_spechelper);
+         guest_sizeB            = sizeof(VexGuestARMState);
+         guest_word_type        = Ity_I32;
+         guest_layout           = ARMFN(&armGuest_layout);
+         offB_CMSTART           = offsetof(VexGuestARMState,guest_CMSTART);
+         offB_CMLEN             = offsetof(VexGuestARMState,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestARMState,guest_R15T);
+         szB_GUEST_IP           = sizeof( ((VexGuestARMState*)0)->guest_R15T );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestARMState,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
+         vassert(0 == sizeof(VexGuestARMState) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
+         vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN  ) == 4);
+         vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
+         break;
+
+      case VexArchARM64:
+         preciseMemExnsFn     
+            = ARM64FN(guest_arm64_state_requires_precise_mem_exns);
+         disInstrFn           = ARM64FN(disInstr_ARM64);
+         specHelper           = ARM64FN(guest_arm64_spechelper);
+         guest_sizeB          = sizeof(VexGuestARM64State);
+         guest_word_type      = Ity_I64;
+         guest_layout         = ARM64FN(&arm64Guest_layout);
+         offB_CMSTART         = offsetof(VexGuestARM64State,guest_CMSTART);
+         offB_CMLEN           = offsetof(VexGuestARM64State,guest_CMLEN);
+         offB_GUEST_IP        = offsetof(VexGuestARM64State,guest_PC);
+         szB_GUEST_IP         = sizeof( ((VexGuestARM64State*)0)->guest_PC );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestARM64State,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
+         vassert(0 == sizeof(VexGuestARM64State) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
+         vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN  ) == 8);
+         vassert(sizeof( ((VexGuestARM64State*)0)->guest_NRADDR ) == 8);
+         break;
+
+      case VexArchMIPS32:
+         preciseMemExnsFn       
+            = MIPS32FN(guest_mips32_state_requires_precise_mem_exns);
+         disInstrFn             = MIPS32FN(disInstr_MIPS);
+         specHelper             = MIPS32FN(guest_mips32_spechelper);
+         guest_sizeB            = sizeof(VexGuestMIPS32State);
+         guest_word_type        = Ity_I32;
+         guest_layout           = MIPS32FN(&mips32Guest_layout);
+         offB_CMSTART           = offsetof(VexGuestMIPS32State,guest_CMSTART);
+         offB_CMLEN             = offsetof(VexGuestMIPS32State,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestMIPS32State,guest_PC);
+         szB_GUEST_IP           = sizeof( ((VexGuestMIPS32State*)0)->guest_PC );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessLE
+                 || vta->archinfo_guest.endness == VexEndnessBE);
+         vassert(0 == sizeof(VexGuestMIPS32State) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
+         vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN  ) == 4);
+         vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4);
+         break;
+
+      case VexArchMIPS64:
+         preciseMemExnsFn       
+            = MIPS64FN(guest_mips64_state_requires_precise_mem_exns);
+         disInstrFn             = MIPS64FN(disInstr_MIPS);
+         specHelper             = MIPS64FN(guest_mips64_spechelper);
+         guest_sizeB            = sizeof(VexGuestMIPS64State);
+         guest_word_type        = Ity_I64;
+         guest_layout           = MIPS64FN(&mips64Guest_layout);
+         offB_CMSTART           = offsetof(VexGuestMIPS64State,guest_CMSTART);
+         offB_CMLEN             = offsetof(VexGuestMIPS64State,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestMIPS64State,guest_PC);
+         szB_GUEST_IP           = sizeof( ((VexGuestMIPS64State*)0)->guest_PC );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessLE
+                 || vta->archinfo_guest.endness == VexEndnessBE);
+         vassert(0 == sizeof(VexGuestMIPS64State) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
+         vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN  ) == 8);
+         vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_NRADDR ) == 8);
+         break;
+
+      case VexArchTILEGX:
+         preciseMemExnsFn =
+            TILEGXFN(guest_tilegx_state_requires_precise_mem_exns);
+         disInstrFn       = TILEGXFN(disInstr_TILEGX);
+         specHelper       = TILEGXFN(guest_tilegx_spechelper);
+         guest_sizeB      = sizeof(VexGuestTILEGXState);
+         guest_word_type  = Ity_I64;
+         guest_layout     = TILEGXFN(&tilegxGuest_layout);
+         offB_CMSTART     = offsetof(VexGuestTILEGXState,guest_CMSTART);
+         offB_CMLEN       = offsetof(VexGuestTILEGXState,guest_CMLEN);
+         offB_GUEST_IP          = offsetof(VexGuestTILEGXState,guest_pc);
+         szB_GUEST_IP           = sizeof( ((VexGuestTILEGXState*)0)->guest_pc );
+         offB_HOST_EvC_COUNTER  = offsetof(VexGuestTILEGXState,host_EvC_COUNTER);
+         offB_HOST_EvC_FAILADDR = offsetof(VexGuestTILEGXState,host_EvC_FAILADDR);
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
+         vassert(0 ==
+                 sizeof(VexGuestTILEGXState) % LibVEX_GUEST_STATE_ALIGN);
+         vassert(sizeof( ((VexGuestTILEGXState*)0)->guest_CMSTART    ) == 8);
+         vassert(sizeof( ((VexGuestTILEGXState*)0)->guest_CMLEN      ) == 8);
+         vassert(sizeof( ((VexGuestTILEGXState*)0)->guest_NRADDR     ) == 8);
+         break;
+
+      default:
+         vpanic("LibVEX_Translate: unsupported guest insn set");
+   }
+
+   // Are the guest's hardware capabilities feasible. The function will
+   // not return if hwcaps are infeasible in some sense.
+   // FIXME: how can we know the guest's hardware capabilities?
+   check_hwcaps(vta->arch_guest, vta->archinfo_guest.hwcaps);
+
+   /* Set up result struct. */
+   VexTranslateResult res;
+   res.status         = VexTransOK;
+   res.n_sc_extents   = 0;
+   res.offs_profInc   = -1;
+   res.n_guest_instrs = 0;
+
+   /* yet more sanity checks ... */
+   if (vta->arch_guest == vta->arch_host) {
+      /* doesn't necessarily have to be true, but if it isn't it means
+         we are simulating one flavour of an architecture a different
+         flavour of the same architecture, which is pretty strange. */
+      vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
+      /* ditto */
+      vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness);
+   }
+
+   vexAllocSanityCheck();
+
+   if (vex_traceflags & VEX_TRACE_FE)
+      vex_printf("\n------------------------" 
+                   " Front end "
+                   "------------------------\n\n");
+
+   VexRegisterUpdates pxControl = vex_control.iropt_register_updates_default;
+   vassert(pxControl >= VexRegUpdSpAtMemAccess
+           && pxControl <= VexRegUpdAllregsAtEachInsn);
+
+   irsb = bb_to_IR ( vta->guest_extents,
+                     &res.n_sc_extents,
+                     &res.n_guest_instrs,
+                     &pxControl,
+                     vta->callback_opaque,
+                     disInstrFn,
+                     vta->guest_bytes, 
+                     vta->guest_bytes_addr,
+                     vta->chase_into_ok,
+                     vta->archinfo_host.endness,
+                     vta->sigill_diag,
+                     vta->arch_guest,
+                     &vta->archinfo_guest,
+                     &vta->abiinfo_both,
+                     guest_word_type,
+                     vta->needs_self_check,
+                     vta->preamble_function,
+                     offB_CMSTART,
+                     offB_CMLEN,
+                     offB_GUEST_IP,
+                     szB_GUEST_IP );
+
+   vexAllocSanityCheck();
+
+   if (irsb == NULL) {
+      /* Access failure. */
+      vexSetAllocModeTEMP_and_clear();
+      vex_traceflags = 0;
+      res.status = VexTransAccessFail; return res;
+   }
+
+   vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
+   vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
+   for (i = 0; i < vta->guest_extents->n_used; i++) {
+      vassert(vta->guest_extents->len[i] < 10000); /* sanity */
+   }
+
+   /* bb_to_IR() could have caused pxControl to change. */
+   vassert(pxControl >= VexRegUpdSpAtMemAccess
+           && pxControl <= VexRegUpdAllregsAtEachInsn);
+
+   /* If debugging, show the raw guest bytes for this bb. */
+   if (0 || (vex_traceflags & VEX_TRACE_FE)) {
+      if (vta->guest_extents->n_used > 1) {
+         vex_printf("can't show code due to extents > 1\n");
+      } else {
+         /* HACK */
+         const UChar* p = vta->guest_bytes;
+         UInt   sum = 0;
+         UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
+         vex_printf("GuestBytes %lx %u ", vta->guest_bytes_addr, 
+                                          guest_bytes_read );
+         for (i = 0; i < guest_bytes_read; i++) {
+            UInt b = (UInt)p[i];
+            vex_printf(" %02x", b );
+            sum = (sum << 1) ^ b;
+         }
+         vex_printf("  %08x\n\n", sum);
+      }
+   }
+
+   /* Sanity check the initial IR. */
+   sanityCheckIRSB( irsb, "initial IR", 
+                    False/*can be non-flat*/, guest_word_type );
+
+   vexAllocSanityCheck();
+
+   /* Clean it up, hopefully a lot. */
+   irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, pxControl,
+                              vta->guest_bytes_addr,
+                              vta->arch_guest );
+   sanityCheckIRSB( irsb, "after initial iropt", 
+                    True/*must be flat*/, guest_word_type );
+
+   if (vex_traceflags & VEX_TRACE_OPT1) {
+      vex_printf("\n------------------------" 
+                   " After pre-instr IR optimisation "
+                   "------------------------\n\n");
+      ppIRSB ( irsb );
+      vex_printf("\n");
+   }
+
+   vexAllocSanityCheck();
+
+   /* Get the thing instrumented. */
+   if (vta->instrument1)
+      irsb = vta->instrument1(vta->callback_opaque,
+                              irsb, guest_layout, 
+                              vta->guest_extents,
+                              &vta->archinfo_host,
+                              guest_word_type, host_word_type);
+   vexAllocSanityCheck();
+
+   if (vta->instrument2)
+      irsb = vta->instrument2(vta->callback_opaque,
+                              irsb, guest_layout,
+                              vta->guest_extents,
+                              &vta->archinfo_host,
+                              guest_word_type, host_word_type);
+      
+   if (vex_traceflags & VEX_TRACE_INST) {
+      vex_printf("\n------------------------" 
+                   " After instrumentation "
+                   "------------------------\n\n");
+      ppIRSB ( irsb );
+      vex_printf("\n");
+   }
+
+   if (vta->instrument1 || vta->instrument2)
+      sanityCheckIRSB( irsb, "after instrumentation",
+                       True/*must be flat*/, guest_word_type );
+
+   /* Do a post-instrumentation cleanup pass. */
+   if (vta->instrument1 || vta->instrument2) {
+      do_deadcode_BB( irsb );
+      irsb = cprop_BB( irsb );
+      do_deadcode_BB( irsb );
+      sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
+                       True/*must be flat*/, guest_word_type );
+   }
+
+   vexAllocSanityCheck();
+
+   if (vex_traceflags & VEX_TRACE_OPT2) {
+      vex_printf("\n------------------------" 
+                   " After post-instr IR optimisation "
+                   "------------------------\n\n");
+      ppIRSB ( irsb );
+      vex_printf("\n");
+   }
+
+   /* Turn it into virtual-registerised code.  Build trees -- this
+      also throws away any dead bindings. */
+   max_ga = ado_treebuild_BB( irsb, preciseMemExnsFn, pxControl );
+
+   if (vta->finaltidy) {
+      irsb = vta->finaltidy(irsb);
+   }
+
+   vexAllocSanityCheck();
+
+   if (vex_traceflags & VEX_TRACE_TREES) {
+      vex_printf("\n------------------------" 
+                   "  After tree-building "
+                   "------------------------\n\n");
+      ppIRSB ( irsb );
+      vex_printf("\n");
+   }
+
+   /* HACK */
+   if (0) {
+      *(vta->host_bytes_used) = 0;
+      res.status = VexTransOK; return res;
+   }
+   /* end HACK */
+
+   if (vex_traceflags & VEX_TRACE_VCODE)
+      vex_printf("\n------------------------" 
+                   " Instruction selection "
+                   "------------------------\n");
+
+   /* No guest has its IP field at offset zero.  If this fails it
+      means some transformation pass somewhere failed to update/copy
+      irsb->offsIP properly. */
+   vassert(irsb->offsIP >= 16);
+
+   vcode = iselSB ( irsb, vta->arch_host,
+                    &vta->archinfo_host, 
+                    &vta->abiinfo_both,
+                    offB_HOST_EvC_COUNTER,
+                    offB_HOST_EvC_FAILADDR,
+                    chainingAllowed,
+                    vta->addProfInc,
+                    max_ga );
+
+   vexAllocSanityCheck();
+
+   if (vex_traceflags & VEX_TRACE_VCODE)
+      vex_printf("\n");
+
+   if (vex_traceflags & VEX_TRACE_VCODE) {
+      for (i = 0; i < vcode->arr_used; i++) {
+         vex_printf("%3d   ", i);
+         ppInstr(vcode->arr[i], mode64);
+         vex_printf("\n");
+      }
+      vex_printf("\n");
+   }
+
+   /* Register allocate. */
+   rcode = doRegisterAllocation ( vcode, rRegUniv,
+                                  isMove, getRegUsage, mapRegs, 
+                                  genSpill, genReload, directReload, 
+                                  guest_sizeB,
+                                  ppInstr, ppReg, mode64 );
+
+   vexAllocSanityCheck();
+
+   if (vex_traceflags & VEX_TRACE_RCODE) {
+      vex_printf("\n------------------------" 
+                   " Register-allocated code "
+                   "------------------------\n\n");
+      for (i = 0; i < rcode->arr_used; i++) {
+         vex_printf("%3d   ", i);
+         ppInstr(rcode->arr[i], mode64);
+         vex_printf("\n");
+      }
+      vex_printf("\n");
+   }
+
+   /* HACK */
+   if (0) { 
+      *(vta->host_bytes_used) = 0;
+      res.status = VexTransOK; return res;
+   }
+   /* end HACK */
+
+   /* Assemble */
+   if (vex_traceflags & VEX_TRACE_ASM) {
+      vex_printf("\n------------------------" 
+                   " Assembly "
+                   "------------------------\n\n");
+   }
+
+   out_used = 0; /* tracks along the host_bytes array */
+   for (i = 0; i < rcode->arr_used; i++) {
+      HInstr* hi           = rcode->arr[i];
+      Bool    hi_isProfInc = False;
+      if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+         ppInstr(hi, mode64);
+         vex_printf("\n");
+      }
+      j = emit( &hi_isProfInc,
+                insn_bytes, sizeof insn_bytes, hi,
+                mode64, vta->archinfo_host.endness,
+                vta->disp_cp_chain_me_to_slowEP,
+                vta->disp_cp_chain_me_to_fastEP,
+                vta->disp_cp_xindir,
+                vta->disp_cp_xassisted );
+      if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
+         for (k = 0; k < j; k++)
+            vex_printf("%02x ", (UInt)insn_bytes[k]);
+         vex_printf("\n\n");
+      }
+      if (UNLIKELY(out_used + j > vta->host_bytes_size)) {
+         vexSetAllocModeTEMP_and_clear();
+         vex_traceflags = 0;
+         res.status = VexTransOutputFull;
+         return res;
+      }
+      if (UNLIKELY(hi_isProfInc)) {
+         vassert(vta->addProfInc); /* else where did it come from? */
+         vassert(res.offs_profInc == -1); /* there can be only one (tm) */
+         vassert(out_used >= 0);
+         res.offs_profInc = out_used;
+      }
+      { UChar* dst = &vta->host_bytes[out_used];
+        for (k = 0; k < j; k++) {
+           dst[k] = insn_bytes[k];
+        }
+        out_used += j;
+      }
+   }
+   *(vta->host_bytes_used) = out_used;
+
+   vexAllocSanityCheck();
+
+   vexSetAllocModeTEMP_and_clear();
+
+   if (vex_traceflags) {
+      /* Print the expansion ratio for this SB. */
+      j = 0; /* total guest bytes */
+      for (i = 0; i < vta->guest_extents->n_used; i++) {
+         j += vta->guest_extents->len[i];
+      }
+      if (1) vex_printf("VexExpansionRatio %d %d   %d :10\n\n",
+                        j, out_used, (10 * out_used) / (j == 0 ? 1 : j));
+   }
+
+   vex_traceflags = 0;
+   res.status = VexTransOK;
+   return res;
+}
+
+
+/* --------- Chain/Unchain XDirects. --------- */
+
+VexInvalRange LibVEX_Chain ( VexArch     arch_host,
+                             VexEndness  endness_host,
+                             void*       place_to_chain,
+                             const void* disp_cp_chain_me_EXPECTED,
+                             const void* place_to_jump_to )
+{
+   switch (arch_host) {
+      case VexArchX86:
+         X86ST(return chainXDirect_X86(endness_host,
+                                       place_to_chain,
+                                       disp_cp_chain_me_EXPECTED,
+                                       place_to_jump_to));
+      case VexArchAMD64:
+         AMD64ST(return chainXDirect_AMD64(endness_host,
+                                           place_to_chain,
+                                           disp_cp_chain_me_EXPECTED,
+                                           place_to_jump_to));
+      case VexArchARM:
+         ARMST(return chainXDirect_ARM(endness_host,
+                                       place_to_chain,
+                                       disp_cp_chain_me_EXPECTED,
+                                       place_to_jump_to));
+      case VexArchARM64:
+         ARM64ST(return chainXDirect_ARM64(endness_host,
+                                           place_to_chain,
+                                           disp_cp_chain_me_EXPECTED,
+                                           place_to_jump_to));
+      case VexArchS390X:
+         S390ST(return chainXDirect_S390(endness_host,
+                                         place_to_chain,
+                                         disp_cp_chain_me_EXPECTED,
+                                         place_to_jump_to));
+      case VexArchPPC32:
+         PPC32ST(return chainXDirect_PPC(endness_host,
+                                         place_to_chain,
+                                         disp_cp_chain_me_EXPECTED,
+                                         place_to_jump_to, False/*!mode64*/));
+      case VexArchPPC64:
+         PPC64ST(return chainXDirect_PPC(endness_host,
+                                         place_to_chain,
+                                         disp_cp_chain_me_EXPECTED,
+                                         place_to_jump_to, True/*mode64*/));
+      case VexArchMIPS32:
+         MIPS32ST(return chainXDirect_MIPS(endness_host,
+                                           place_to_chain,
+                                           disp_cp_chain_me_EXPECTED,
+                                           place_to_jump_to, False/*!mode64*/));
+      case VexArchMIPS64:
+         MIPS64ST(return chainXDirect_MIPS(endness_host,
+                                           place_to_chain,
+                                           disp_cp_chain_me_EXPECTED,
+                                           place_to_jump_to, True/*!mode64*/));
+
+      case VexArchTILEGX:
+         TILEGXST(return chainXDirect_TILEGX(endness_host,
+                                             place_to_chain,
+                                             disp_cp_chain_me_EXPECTED,
+                                             place_to_jump_to, True/*!mode64*/));
+      default:
+         vassert(0);
+   }
+}
+
+VexInvalRange LibVEX_UnChain ( VexArch     arch_host,
+                               VexEndness  endness_host,
+                               void*       place_to_unchain,
+                               const void* place_to_jump_to_EXPECTED,
+                               const void* disp_cp_chain_me )
+{
+   switch (arch_host) {
+      case VexArchX86:
+         X86ST(return unchainXDirect_X86(endness_host,
+                                         place_to_unchain,
+                                         place_to_jump_to_EXPECTED,
+                                         disp_cp_chain_me));
+      case VexArchAMD64:
+         AMD64ST(return unchainXDirect_AMD64(endness_host,
+                                             place_to_unchain,
+                                             place_to_jump_to_EXPECTED,
+                                             disp_cp_chain_me));
+      case VexArchARM:
+         ARMST(return unchainXDirect_ARM(endness_host,
+                                         place_to_unchain,
+                                         place_to_jump_to_EXPECTED,
+                                         disp_cp_chain_me));
+      case VexArchARM64:
+         ARM64ST(return unchainXDirect_ARM64(endness_host,
+                                             place_to_unchain,
+                                             place_to_jump_to_EXPECTED,
+                                             disp_cp_chain_me));
+      case VexArchS390X:
+         S390ST(return unchainXDirect_S390(endness_host,
+                                           place_to_unchain,
+                                           place_to_jump_to_EXPECTED,
+                                           disp_cp_chain_me));
+      case VexArchPPC32:
+         PPC32ST(return unchainXDirect_PPC(endness_host,
+                                           place_to_unchain,
+                                           place_to_jump_to_EXPECTED,
+                                           disp_cp_chain_me, False/*!mode64*/));
+      case VexArchPPC64:
+         PPC64ST(return unchainXDirect_PPC(endness_host,
+                                           place_to_unchain,
+                                           place_to_jump_to_EXPECTED,
+                                           disp_cp_chain_me, True/*mode64*/));
+      case VexArchMIPS32:
+         MIPS32ST(return unchainXDirect_MIPS(endness_host,
+                                             place_to_unchain,
+                                             place_to_jump_to_EXPECTED,
+                                             disp_cp_chain_me, False/*!mode64*/));
+      case VexArchMIPS64:
+         MIPS64ST(return unchainXDirect_MIPS(endness_host,
+                                             place_to_unchain,
+                                             place_to_jump_to_EXPECTED,
+                                             disp_cp_chain_me, True/*!mode64*/));
+
+      case VexArchTILEGX:
+         TILEGXST(return unchainXDirect_TILEGX(endness_host,
+                                      place_to_unchain,
+                                      place_to_jump_to_EXPECTED,
+                                               disp_cp_chain_me, True/*!mode64*/));
+
+      default:
+         vassert(0);
+   }
+}
+
+Int LibVEX_evCheckSzB ( VexArch    arch_host )
+{
+   static Int cached = 0; /* DO NOT MAKE NON-STATIC */
+   if (UNLIKELY(cached == 0)) {
+      switch (arch_host) {
+         case VexArchX86:
+            X86ST(cached = evCheckSzB_X86()); break;
+         case VexArchAMD64:
+            AMD64ST(cached = evCheckSzB_AMD64()); break;
+         case VexArchARM:
+            ARMST(cached = evCheckSzB_ARM()); break;
+         case VexArchARM64:
+            ARM64ST(cached = evCheckSzB_ARM64()); break;
+         case VexArchS390X:
+            S390ST(cached = evCheckSzB_S390()); break;
+         case VexArchPPC32:
+            PPC32ST(cached = evCheckSzB_PPC()); break;
+         case VexArchPPC64:
+            PPC64ST(cached = evCheckSzB_PPC()); break;
+         case VexArchMIPS32:
+            MIPS32ST(cached = evCheckSzB_MIPS()); break;
+         case VexArchMIPS64:
+            MIPS64ST(cached = evCheckSzB_MIPS()); break;
+         case VexArchTILEGX:
+            TILEGXST(cached = evCheckSzB_TILEGX()); break;
+         default:
+            vassert(0);
+      }
+   }
+   return cached;
+}
+
+VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
+                                    VexEndness endness_host,
+                                    void*      place_to_patch,
+                                    const ULong* location_of_counter )
+{
+   switch (arch_host) {
+      case VexArchX86:
+         X86ST(return patchProfInc_X86(endness_host, place_to_patch,
+                                       location_of_counter));
+      case VexArchAMD64:
+         AMD64ST(return patchProfInc_AMD64(endness_host, place_to_patch,
+                                           location_of_counter));
+      case VexArchARM:
+         ARMST(return patchProfInc_ARM(endness_host, place_to_patch,
+                                       location_of_counter));
+      case VexArchARM64:
+         ARM64ST(return patchProfInc_ARM64(endness_host, place_to_patch,
+                                           location_of_counter));
+      case VexArchS390X:
+         S390ST(return patchProfInc_S390(endness_host, place_to_patch,
+                                         location_of_counter));
+      case VexArchPPC32:
+         PPC32ST(return patchProfInc_PPC(endness_host, place_to_patch,
+                                         location_of_counter, False/*!mode64*/));
+      case VexArchPPC64:
+         PPC64ST(return patchProfInc_PPC(endness_host, place_to_patch,
+                                         location_of_counter, True/*mode64*/));
+      case VexArchMIPS32:
+         MIPS32ST(return patchProfInc_MIPS(endness_host, place_to_patch,
+                                           location_of_counter, False/*!mode64*/));
+      case VexArchMIPS64:
+         MIPS64ST(return patchProfInc_MIPS(endness_host, place_to_patch,
+                                           location_of_counter, True/*!mode64*/));
+      case VexArchTILEGX:
+         TILEGXST(return patchProfInc_TILEGX(endness_host, place_to_patch,
+                                             location_of_counter,
+                                             True/*!mode64*/));
+      default:
+         vassert(0);
+   }
+}
+
+
+/* --------- Emulation warnings. --------- */
+
+const HChar* LibVEX_EmNote_string ( VexEmNote ew )
+{
+   switch (ew) {
+     case EmNote_NONE: 
+        return "none";
+     case EmWarn_X86_x87exns:
+        return "Unmasking x87 FP exceptions";
+     case EmWarn_X86_x87precision:
+        return "Selection of non-80-bit x87 FP precision";
+     case EmWarn_X86_sseExns:
+        return "Unmasking SSE FP exceptions";
+     case EmWarn_X86_fz:
+        return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
+     case EmWarn_X86_daz:
+        return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
+     case EmWarn_X86_acFlag:
+        return "Setting %eflags.ac (setting noted but ignored)";
+     case EmWarn_PPCexns:
+        return "Unmasking PPC32/64 FP exceptions";
+     case EmWarn_PPC64_redir_overflow:
+        return "PPC64 function redirection stack overflow";
+     case EmWarn_PPC64_redir_underflow:
+        return "PPC64 function redirection stack underflow";
+     case EmWarn_S390X_fpext_rounding:
+        return "The specified rounding mode cannot be supported. That\n"
+               "  feature requires the floating point extension facility\n"
+               "  which is not available on this host. Continuing using\n"
+               "  the rounding mode from FPC. Results may differ!";
+     case EmWarn_S390X_invalid_rounding:
+        return "The specified rounding mode is invalid.\n"
+               "  Continuing using 'round to nearest'. Results may differ!";
+     case EmFail_S390X_stfle:
+        return "Instruction stfle is not supported on this host";
+     case EmFail_S390X_stckf:
+        return "Instruction stckf is not supported on this host";
+     case EmFail_S390X_ecag:
+        return "Instruction ecag is not supported on this host";
+     case EmFail_S390X_pfpo:
+        return "Instruction pfpo is not supported on this host";
+     case EmFail_S390X_DFP_insn:
+        return "DFP instructions are not supported on this host";
+     case EmFail_S390X_fpext:
+        return "Encountered an instruction that requires the floating "
+               "point extension facility.\n"
+               "  That facility is not available on this host";
+     case EmFail_S390X_invalid_PFPO_rounding_mode:
+        return "The rounding mode in GPR 0 for the PFPO instruction"
+               " is invalid";
+     case EmFail_S390X_invalid_PFPO_function:
+        return "The function code in GPR 0 for the PFPO instruction"
+               " is invalid";
+     default: 
+        vpanic("LibVEX_EmNote_string: unknown warning");
+   }
+}
+
+/* ------------------ Arch/HwCaps stuff. ------------------ */
+
+const HChar* LibVEX_ppVexArch ( VexArch arch )
+{
+   switch (arch) {
+      case VexArch_INVALID: return "INVALID";
+      case VexArchX86:      return "X86";
+      case VexArchAMD64:    return "AMD64";
+      case VexArchARM:      return "ARM";
+      case VexArchARM64:    return "ARM64";
+      case VexArchPPC32:    return "PPC32";
+      case VexArchPPC64:    return "PPC64";
+      case VexArchS390X:    return "S390X";
+      case VexArchMIPS32:   return "MIPS32";
+      case VexArchMIPS64:   return "MIPS64";
+      case VexArchTILEGX:   return "TILEGX";
+      default:              return "VexArch???";
+   }
+}
+
+const HChar* LibVEX_ppVexEndness ( VexEndness endness )
+{
+   switch (endness) {
+      case VexEndness_INVALID: return "INVALID";
+      case VexEndnessLE:       return "LittleEndian";
+      case VexEndnessBE:       return "BigEndian";
+      default:                 return "VexEndness???";
+   }
+}
+
+/* Return a string with the hardware capabilities to the extent as
+   they pertain to the translation process. No attempt is made, to
+   detect *all* capabilities an architecture may have. */
+const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
+{
+   return show_hwcaps(arch, hwcaps);
+}
+
+
+/* Write default settings info *vai. */
+void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
+{
+   vex_bzero(vai, sizeof(*vai));
+   vai->hwcaps                  = 0;
+   vai->endness                 = VexEndness_INVALID;
+   vai->ppc_icache_line_szB     = 0;
+   vai->ppc_dcbz_szB            = 0;
+   vai->ppc_dcbzl_szB           = 0;
+   vai->arm64_dMinLine_lg2_szB  = 0;
+   vai->arm64_iMinLine_lg2_szB  = 0;
+   vai->hwcache_info.num_levels = 0;
+   vai->hwcache_info.num_caches = 0;
+   vai->hwcache_info.caches     = NULL;
+   vai->hwcache_info.icaches_maintain_coherence = True;  // whatever
+}
+
+/* Write default settings info *vbi. */
+void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
+{
+   vex_bzero(vbi, sizeof(*vbi));
+   vbi->guest_stack_redzone_size       = 0;
+   vbi->guest_amd64_assume_fs_is_const = False;
+   vbi->guest_amd64_assume_gs_is_const = False;
+   vbi->guest_ppc_zap_RZ_at_blr        = False;
+   vbi->guest_ppc_zap_RZ_at_bl         = NULL;
+   vbi->host_ppc_calls_use_fndescrs    = False;
+}
+
+
+/* Convenience macro to be used in show_hwcaps_ARCH functions */
+#define NUM_HWCAPS (sizeof hwcaps_list / sizeof hwcaps_list[0])
+
+/* Return a string showing the hwcaps in a nice way.  The string will
+   be NULL for unrecognised hardware capabilities. */
+
+static const HChar* show_hwcaps_x86 ( UInt hwcaps ) 
+{
+   static const HChar prefix[] = "x86";
+   static const struct {
+      UInt  hwcaps_bit;
+      HChar name[7];
+   } hwcaps_list[] = {
+      { VEX_HWCAPS_X86_MMXEXT, "mmxext" },
+      { VEX_HWCAPS_X86_SSE1,   "sse1"   },
+      { VEX_HWCAPS_X86_SSE2,   "sse2"   },
+      { VEX_HWCAPS_X86_SSE3,   "sse3"   },
+      { VEX_HWCAPS_X86_LZCNT,  "lzcnt"  },
+   };
+   /* Allocate a large enough buffer */
+   static HChar buf[sizeof prefix + 
+                    NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
+   if (buf[0] != '\0') return buf;  /* already constructed */
+
+   HChar *p = buf + vex_sprintf(buf, "%s", prefix);
+
+   if (hwcaps == 0) {
+      vex_sprintf(p, "-%s", "sse0");
+   } else {      
+      UInt i;
+      for (i = 0 ; i < NUM_HWCAPS; ++i) {
+         if (hwcaps & hwcaps_list[i].hwcaps_bit)
+            p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
+      }
+   }
+   return buf;
+}
+
+static const HChar* show_hwcaps_amd64 ( UInt hwcaps )
+{
+   static const HChar prefix[] = "amd64";
+   static const struct {
+      UInt  hwcaps_bit;
+      HChar name[7];
+   } hwcaps_list[] = {
+      { VEX_HWCAPS_AMD64_CX16,   "cx16"   },
+      { VEX_HWCAPS_AMD64_LZCNT,  "lzcnt"  },
+      { VEX_HWCAPS_AMD64_RDTSCP, "rdtscp" },
+      { VEX_HWCAPS_AMD64_SSE3,   "sse3"   },
+      { VEX_HWCAPS_AMD64_AVX,    "avx"    },
+      { VEX_HWCAPS_AMD64_AVX2,   "avx2"   },
+      { VEX_HWCAPS_AMD64_BMI,    "bmi"    },
+   };
+   /* Allocate a large enough buffer */
+   static HChar buf[sizeof prefix + 
+                    NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
+   if (buf[0] != '\0') return buf;  /* already constructed */
+
+   HChar *p = buf + vex_sprintf(buf, "%s", prefix);
+
+   if (hwcaps == 0) {
+      vex_sprintf(p, "-%s", "sse2");
+   } else {      
+      UInt i;
+      for (i = 0 ; i < NUM_HWCAPS; ++i) {
+         if (hwcaps & hwcaps_list[i].hwcaps_bit)
+            p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
+      }
+   }
+   return buf;
+}
+
+static const HChar* show_hwcaps_ppc32 ( UInt hwcaps )
+{
+   static const HChar prefix[] = "ppc32-int";
+   static const struct {
+      UInt  hwcaps_bit;
+      HChar name[8];
+   } hwcaps_list[] = {
+      { VEX_HWCAPS_PPC32_F,       "flt"     },
+      { VEX_HWCAPS_PPC32_V,       "vmx"     },
+      { VEX_HWCAPS_PPC32_FX,      "FX"      },
+      { VEX_HWCAPS_PPC32_GX,      "GX"      },
+      { VEX_HWCAPS_PPC32_VX,      "VX"      },
+      { VEX_HWCAPS_PPC32_DFP,     "DFP"     },
+      { VEX_HWCAPS_PPC32_ISA2_07, "ISA2_07" },
+   };
+   /* Allocate a large enough buffer */
+   static HChar buf[sizeof prefix + 
+                    NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
+   if (buf[0] != '\0') return buf;  /* already constructed */
+
+   HChar *p = buf + vex_sprintf(buf, "%s", prefix);
+
+   if (hwcaps == 0) return buf;
+
+   UInt i;
+   for (i = 0 ; i < NUM_HWCAPS; ++i) {
+      if (hwcaps & hwcaps_list[i].hwcaps_bit)
+         p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
+   }
+   return buf;
+}
+
+static const HChar* show_hwcaps_ppc64 ( UInt hwcaps )
+{
+   static const HChar prefix[] = "ppc64-int-flt";
+   static const struct {
+      UInt  hwcaps_bit;
+      HChar name[8];
+   } hwcaps_list[] = {
+      { VEX_HWCAPS_PPC64_FX,      "FX"      },
+      { VEX_HWCAPS_PPC64_GX,      "GX"      },
+      { VEX_HWCAPS_PPC64_V,       "vmx"     },
+      { VEX_HWCAPS_PPC64_DFP,     "DFP"     },
+      { VEX_HWCAPS_PPC64_ISA2_07, "ISA2_07" },
+   };
+   /* Allocate a large enough buffer */
+   static HChar buf[sizeof prefix + 
+                    NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
+   if (buf[0] != '\0') return buf;  /* already constructed */
+
+   HChar *p = buf + vex_sprintf(buf, "%s", prefix);
+
+   if (hwcaps == 0) return buf;
+
+   UInt i;
+   for (i = 0 ; i < NUM_HWCAPS; ++i) {
+      if (hwcaps & hwcaps_list[i].hwcaps_bit)
+         p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
+   }
+   return buf;
+}
+
+static const HChar* show_hwcaps_arm ( UInt hwcaps )
+{
+   static const HChar prefix[] = "ARM";
+   static const struct {
+      UInt  hwcaps_bit;
+      HChar name[6];
+   } hwcaps_list[] = {
+      { VEX_HWCAPS_ARM_NEON, "neon" },
+      { VEX_HWCAPS_ARM_VFP | VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3, "vfp" },
+   };
+   /* Allocate a large enough buffer */
+   static HChar buf[sizeof prefix + 12 +    // level
+                    NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
+   if (buf[0] != '\0') return buf;  /* already constructed */
+
+   HChar *p;
+   UInt i, level;
+
+   level = VEX_ARM_ARCHLEVEL(hwcaps);
+
+   p = buf + vex_sprintf(buf, "%sv%u", prefix, level);
+   for (i = 0 ; i < NUM_HWCAPS; ++i) {
+      if (hwcaps & hwcaps_list[i].hwcaps_bit)
+         p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
+   }
+   return buf;
+}
+
+static const HChar* show_hwcaps_arm64 ( UInt hwcaps )
+{
+   /* Since there are no variants, just insist that hwcaps is zero,
+      and declare it invalid otherwise. */
+  if (hwcaps == 0)
+     return "baseline";
+  return "Unsupported";
+}
+
+static const HChar* show_hwcaps_s390x ( UInt hwcaps )
+{
+   static const HChar prefix[] = "s390x";
+   static const struct {
+      UInt  hwcaps_bit;
+      HChar name[6];
+   } hwcaps_list[] = {
+      { VEX_HWCAPS_S390X_LDISP, "ldisp" },
+      { VEX_HWCAPS_S390X_EIMM,  "eimm" },
+      { VEX_HWCAPS_S390X_GIE,   "gie" },
+      { VEX_HWCAPS_S390X_DFP,   "dfp" },
+      { VEX_HWCAPS_S390X_FGX,   "fgx" },
+      { VEX_HWCAPS_S390X_STFLE, "stfle" },
+      { VEX_HWCAPS_S390X_ETF2,  "etf2" },
+      { VEX_HWCAPS_S390X_ETF3,  "etf3" },
+      { VEX_HWCAPS_S390X_STCKF, "stckf" },
+      { VEX_HWCAPS_S390X_FPEXT, "fpext" },
+      { VEX_HWCAPS_S390X_LSC,   "lsc" },
+      { VEX_HWCAPS_S390X_PFPO,  "pfpo" },
+   };
+   /* Allocate a large enough buffer */
+   static HChar buf[sizeof prefix + 
+                    NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
+
+   if (buf[0] != '\0') return buf;  /* already constructed */
+
+   HChar *p;
+   UInt i;
+
+   hwcaps = VEX_HWCAPS_S390X(hwcaps);
+
+   p = buf + vex_sprintf(buf, "%s", prefix);
+   for (i = 0 ; i < NUM_HWCAPS; ++i) {
+      if (hwcaps & hwcaps_list[i].hwcaps_bit)
+         p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
+   }
+
+   /* If there are no facilities, add "zarch" */
+   if (hwcaps == 0)
+     vex_sprintf(p, "-%s", "zarch");
+
+   return buf;
+}
+
+static const HChar* show_hwcaps_mips32 ( UInt hwcaps )
+{
+   /* MIPS baseline. */
+   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_MIPS) {
+      /* MIPS baseline with dspr2. */
+      if (VEX_MIPS_PROC_DSP2(hwcaps)) {
+         return "MIPS-baseline-dspr2";
+      }
+      /* MIPS baseline with dsp. */
+      if (VEX_MIPS_PROC_DSP(hwcaps)) {
+         return "MIPS-baseline-dsp";
+      }
+      return "MIPS-baseline";
+   }
+
+   /* Broadcom baseline. */
+   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_BROADCOM) {
+      return "Broadcom-baseline";
+   }
+
+   /* Netlogic baseline. */
+   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_NETLOGIC) {
+      return "Netlogic-baseline";
+   }
+
+   /* Cavium baseline. */
+   if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_CAVIUM) {
+      return "Cavium-baseline";
+   }
+
+   return "Unsupported baseline";
+}
+
+static const HChar* show_hwcaps_mips64 ( UInt hwcaps )
+{
+   return "mips64-baseline";
+}
+
+static const HChar* show_hwcaps_tilegx ( UInt hwcaps )
+{
+   return "tilegx-baseline";
+}
+
+#undef NUM_HWCAPS
+
+/* Thie function must not return NULL. */
+
+static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
+{
+   switch (arch) {
+      case VexArchX86:    return show_hwcaps_x86(hwcaps);
+      case VexArchAMD64:  return show_hwcaps_amd64(hwcaps);
+      case VexArchPPC32:  return show_hwcaps_ppc32(hwcaps);
+      case VexArchPPC64:  return show_hwcaps_ppc64(hwcaps);
+      case VexArchARM:    return show_hwcaps_arm(hwcaps);
+      case VexArchARM64:  return show_hwcaps_arm64(hwcaps);
+      case VexArchS390X:  return show_hwcaps_s390x(hwcaps);
+      case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
+      case VexArchMIPS64: return show_hwcaps_mips64(hwcaps);
+      case VexArchTILEGX: return show_hwcaps_tilegx(hwcaps);
+      default: return NULL;
+   }
+}
+
+/* To be used to complain about hwcaps we cannot handle */
+__attribute__((noreturn))
+static void invalid_hwcaps ( VexArch arch, UInt hwcaps, const HChar *message )
+{
+   vfatal("\nVEX: %s"
+          "     Found: %s\n", message, show_hwcaps(arch, hwcaps));
+}
+
+/* This function will not return iff the hwcaps don't pass the test. */
+static void check_hwcaps ( VexArch arch, UInt hwcaps )
+{
+   switch (arch) {
+      case VexArchX86: {
+         if (hwcaps == 0) return;    // baseline
+
+         /* Monotonic: SSE3 > SSE2 > SSE1 > MMXEXT > baseline. */
+         static const UInt extras[] = {
+            VEX_HWCAPS_X86_MMXEXT, VEX_HWCAPS_X86_SSE1, VEX_HWCAPS_X86_SSE2,
+            VEX_HWCAPS_X86_SSE3
+         };
+
+         UInt i, caps = 0;
+         for (i = 0; i < sizeof extras / sizeof extras[0]; ++i) {
+            caps |= extras[i];
+            if (caps == hwcaps) return;
+            /* For SSE2 or later LZCNT is optional */
+            if ((caps & VEX_HWCAPS_X86_SSE2) != 0) {
+               if ((caps | VEX_HWCAPS_X86_LZCNT) == hwcaps) return;
+            }
+         }
+         invalid_hwcaps(arch, hwcaps, "Cannot handle capabilities\n");
+      }
+
+      case VexArchAMD64: {
+         /* SSE3 and CX16 are orthogonal and > baseline, although we really
+            don't expect to come across anything which can do SSE3 but can't
+            do CX16.  Still, we can handle that case.  LZCNT is similarly
+            orthogonal. */
+
+         /* Throw out obviously stupid cases: */
+         Bool have_sse3 = (hwcaps & VEX_HWCAPS_AMD64_SSE3) != 0;
+         Bool have_avx  = (hwcaps & VEX_HWCAPS_AMD64_AVX)  != 0;
+         Bool have_bmi  = (hwcaps & VEX_HWCAPS_AMD64_BMI)  != 0;
+         Bool have_avx2 = (hwcaps & VEX_HWCAPS_AMD64_AVX2) != 0;
+
+         /* AVX without SSE3 */
+         if (have_avx && !have_sse3)
+            invalid_hwcaps(arch, hwcaps,
+                           "Support for AVX requires SSE3 capabilities\n");
+         /* AVX2 or BMI without AVX */
+         if (have_avx2 && !have_avx)
+            invalid_hwcaps(arch, hwcaps,
+                           "Support for AVX2 requires AVX capabilities\n");
+         if (have_bmi && !have_avx)
+            invalid_hwcaps(arch, hwcaps,
+                           "Support for BMI requires AVX capabilities\n");
+         return;
+      }
+
+      case VexArchPPC32: {
+         /* Monotonic with complications.  Basically V > F > baseline,
+            but once you have F then you can have FX or GX too. */
+         if (hwcaps == 0) return;   // baseline
+
+         if ((hwcaps & VEX_HWCAPS_PPC32_F) == 0)
+            invalid_hwcaps(arch, hwcaps,
+                           "Missing floating point capability\n");
+         /* V, FX, and GX can appear in any combination */
+
+         /* DFP requires V and FX and GX */
+         UInt v_fx_gx = VEX_HWCAPS_PPC32_V | VEX_HWCAPS_PPC32_FX |
+                        VEX_HWCAPS_PPC32_GX;
+         Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
+
+         if ((hwcaps & VEX_HWCAPS_PPC32_DFP) && ! has_v_fx_gx)
+            invalid_hwcaps(arch, hwcaps,
+                           "DFP requires VMX and FX and GX capabilities\n");
+
+         /* VX requires V and FX and GX */
+         if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
+            invalid_hwcaps(arch, hwcaps,
+                           "VX requires VMX and FX and GX capabilities\n");
+
+         /* ISA2_07 requires everything else */
+         if ((hwcaps & VEX_HWCAPS_PPC32_ISA2_07) != 0) {
+            if (! has_v_fx_gx)
+               invalid_hwcaps(arch, hwcaps,
+                          "ISA2_07 requires VMX and FX and GX capabilities\n");
+            if (! (hwcaps & VEX_HWCAPS_PPC32_VX))
+               invalid_hwcaps(arch, hwcaps,
+                              "ISA2_07 requires VX capabilities\n");
+            if (! (hwcaps & VEX_HWCAPS_PPC32_DFP))
+               invalid_hwcaps(arch, hwcaps,
+                              "ISA2_07 requires DFP capabilities\n");
+         }
+         return;
+      }
+
+      case VexArchPPC64: {
+         /* Monotonic with complications.  Basically V > baseline(==F),
+            but once you have F then you can have FX or GX too. */
+         if (hwcaps == 0) return;   // baseline
+
+         /* V, FX, and GX can appear in any combination */
+
+         /* DFP requires V and FX and GX */
+         UInt v_fx_gx = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX |
+                        VEX_HWCAPS_PPC64_GX;
+         Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
+
+         if ((hwcaps & VEX_HWCAPS_PPC64_DFP) && ! has_v_fx_gx)
+            invalid_hwcaps(arch, hwcaps,
+                           "DFP requires VMX and FX and GX capabilities\n");
+
+         /* VX requires V and FX and GX */
+         if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
+            invalid_hwcaps(arch, hwcaps,
+                           "VX requires VMX and FX and GX capabilities\n");
+
+         /* ISA2_07 requires everything else */
+         if ((hwcaps & VEX_HWCAPS_PPC64_ISA2_07) != 0) {
+            if (! has_v_fx_gx)
+               invalid_hwcaps(arch, hwcaps,
+                        "ISA2_07 requires VMX and FX and GX capabilities\n");
+            if (! (hwcaps & VEX_HWCAPS_PPC64_VX))
+               invalid_hwcaps(arch, hwcaps,
+                              "ISA2_07 requires VX capabilities\n");
+            if (! (hwcaps & VEX_HWCAPS_PPC64_DFP))
+               invalid_hwcaps(arch, hwcaps,
+                              "ISA2_07 requires DFP capabilities\n");
+         }
+         return;
+      }
+
+      case VexArchARM: {
+         Bool NEON  = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
+         UInt level = VEX_ARM_ARCHLEVEL(hwcaps);
+
+         switch (level) {
+            case 5:
+               if (NEON)
+                  invalid_hwcaps(arch, hwcaps,
+                          "NEON instructions are not supported for ARMv5.\n");
+               return;
+            case 6:
+               if (NEON)
+                  invalid_hwcaps(arch, hwcaps,
+                          "NEON instructions are not supported for ARMv6.\n");
+               return;
+            case 7:
+               return;
+            default:
+               invalid_hwcaps(arch, hwcaps,
+                              "ARM architecture level is not supported.\n");
+         }
+      }
+
+      case VexArchARM64:
+         if (hwcaps != 0)
+            invalid_hwcaps(arch, hwcaps,
+                           "Unsupported hardware capabilities.\n");
+         return;
+
+      case VexArchS390X:
+         if (! s390_host_has_ldisp)
+            invalid_hwcaps(arch, hwcaps,
+                           "Host does not have long displacement facility.\n");
+         return;
+        
+      case VexArchMIPS32:
+         switch (VEX_MIPS_COMP_ID(hwcaps)) {
+            case VEX_PRID_COMP_MIPS:
+            case VEX_PRID_COMP_BROADCOM:
+            case VEX_PRID_COMP_NETLOGIC:
+               return;
+            default:
+               invalid_hwcaps(arch, hwcaps, "Unsupported baseline\n");
+         }
+      
+      case VexArchMIPS64:
+         return;
+
+      case VexArchTILEGX:
+         return;
+
+      default:
+         vpanic("unknown architecture");
+   }
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                         main_main.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/main_util.c b/VEX/priv/main_util.c
new file mode 100644
index 0000000..d0732e9
--- /dev/null
+++ b/VEX/priv/main_util.c
@@ -0,0 +1,585 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                       main_util.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+
+#include "main_globals.h"
+#include "main_util.h"
+
+
+/*---------------------------------------------------------*/
+/*--- Storage                                           ---*/
+/*---------------------------------------------------------*/
+
+/* Try to keep this as low as possible -- in particular, less than the
+   size of the smallest L2 cache we might encounter.  At 50000, my VIA
+   Nehemiah 1 GHz (a weedy machine) can satisfy 27 million calls/
+   second to LibVEX_Alloc(16) -- that is, allocate memory at over 400
+   MByte/sec.  Once the size increases enough to fall out of the cache
+   into memory, the rate falls by about a factor of 3. 
+*/
+
+#define N_TEMPORARY_BYTES 5000000
+
+static HChar  temporary[N_TEMPORARY_BYTES] __attribute__((aligned(REQ_ALIGN)));
+static HChar* temporary_first = &temporary[0];
+static HChar* temporary_curr  = &temporary[0];
+static HChar* temporary_last  = &temporary[N_TEMPORARY_BYTES-1];
+
+static ULong  temporary_bytes_allocd_TOT = 0;
+
+#define N_PERMANENT_BYTES 10000
+
+static HChar  permanent[N_PERMANENT_BYTES] __attribute__((aligned(REQ_ALIGN)));
+static HChar* permanent_first = &permanent[0];
+static HChar* permanent_curr  = &permanent[0];
+static HChar* permanent_last  = &permanent[N_PERMANENT_BYTES-1];
+
+HChar* private_LibVEX_alloc_first = &temporary[0];
+HChar* private_LibVEX_alloc_curr  = &temporary[0];
+HChar* private_LibVEX_alloc_last  = &temporary[N_TEMPORARY_BYTES-1];
+
+
+static VexAllocMode mode = VexAllocModeTEMP;
+
+void vexAllocSanityCheck ( void )
+{
+   vassert(temporary_first == &temporary[0]);
+   vassert(temporary_last  == &temporary[N_TEMPORARY_BYTES-1]);
+   vassert(permanent_first == &permanent[0]);
+   vassert(permanent_last  == &permanent[N_PERMANENT_BYTES-1]);
+   vassert(temporary_first <= temporary_curr);
+   vassert(temporary_curr  <= temporary_last);
+   vassert(permanent_first <= permanent_curr);
+   vassert(permanent_curr  <= permanent_last);
+   vassert(private_LibVEX_alloc_first <= private_LibVEX_alloc_curr);
+   vassert(private_LibVEX_alloc_curr  <= private_LibVEX_alloc_last);
+   if (mode == VexAllocModeTEMP){
+      vassert(private_LibVEX_alloc_first == temporary_first);
+      vassert(private_LibVEX_alloc_last  == temporary_last);
+   } 
+   else
+   if (mode == VexAllocModePERM) {
+      vassert(private_LibVEX_alloc_first == permanent_first);
+      vassert(private_LibVEX_alloc_last  == permanent_last);
+   }
+   else 
+      vassert(0);
+
+#  define IS_WORD_ALIGNED(p)   (0 == (((HWord)p) & (sizeof(HWord)-1)))
+   vassert(sizeof(HWord) == 4 || sizeof(HWord) == 8);
+   vassert(IS_WORD_ALIGNED(temporary_first));
+   vassert(IS_WORD_ALIGNED(temporary_curr));
+   vassert(IS_WORD_ALIGNED(temporary_last+1));
+   vassert(IS_WORD_ALIGNED(permanent_first));
+   vassert(IS_WORD_ALIGNED(permanent_curr));
+   vassert(IS_WORD_ALIGNED(permanent_last+1));
+   vassert(IS_WORD_ALIGNED(private_LibVEX_alloc_first));
+   vassert(IS_WORD_ALIGNED(private_LibVEX_alloc_curr));
+   vassert(IS_WORD_ALIGNED(private_LibVEX_alloc_last+1));
+#  undef IS_WORD_ALIGNED
+}
+
+/* The current allocation mode. */
+
+void vexSetAllocMode ( VexAllocMode m )
+{
+   vexAllocSanityCheck();
+
+   /* Save away the current allocation point .. */
+   if (mode == VexAllocModeTEMP){
+      temporary_curr = private_LibVEX_alloc_curr;
+   } 
+   else
+   if (mode == VexAllocModePERM) {
+      permanent_curr = private_LibVEX_alloc_curr;
+   }
+   else 
+      vassert(0);
+
+   /* Did that screw anything up? */
+   vexAllocSanityCheck();
+
+   if (m == VexAllocModeTEMP){
+      private_LibVEX_alloc_first = temporary_first;
+      private_LibVEX_alloc_curr  = temporary_curr;
+      private_LibVEX_alloc_last  = temporary_last;
+   } 
+   else
+   if (m == VexAllocModePERM) {
+      private_LibVEX_alloc_first = permanent_first;
+      private_LibVEX_alloc_curr  = permanent_curr;
+      private_LibVEX_alloc_last  = permanent_last;
+   }
+   else 
+      vassert(0);
+
+   mode = m;
+}
+
+VexAllocMode vexGetAllocMode ( void )
+{
+   return mode;
+}
+
+__attribute__((noreturn))
+void private_LibVEX_alloc_OOM(void)
+{
+   const HChar* pool = "???";
+   if (private_LibVEX_alloc_first == &temporary[0]) pool = "TEMP";
+   if (private_LibVEX_alloc_first == &permanent[0]) pool = "PERM";
+   vex_printf("VEX temporary storage exhausted.\n");
+   vex_printf("Pool = %s,  start %p curr %p end %p (size %lld)\n",
+              pool, 
+              private_LibVEX_alloc_first,
+              private_LibVEX_alloc_curr,
+              private_LibVEX_alloc_last,
+              (Long)(private_LibVEX_alloc_last + 1 - private_LibVEX_alloc_first));
+   vpanic("VEX temporary storage exhausted.\n"
+          "Increase N_{TEMPORARY,PERMANENT}_BYTES and recompile.");
+}
+
+void vexSetAllocModeTEMP_and_clear ( void )
+{
+   /* vassert(vex_initdone); */ /* causes infinite assert loops */
+   temporary_bytes_allocd_TOT 
+      += (ULong)(private_LibVEX_alloc_curr - private_LibVEX_alloc_first);
+
+   mode = VexAllocModeTEMP;
+   temporary_curr            = &temporary[0];
+   private_LibVEX_alloc_curr = &temporary[0];
+
+   /* Set to (1) and change the fill byte to 0x00 or 0xFF to test for
+      any potential bugs due to using uninitialised memory in the main
+      VEX storage area. */
+   if (0) {
+      Int i;
+      for (i = 0; i < N_TEMPORARY_BYTES; i++)
+         temporary[i] = 0x00;
+   }
+
+   vexAllocSanityCheck();
+}
+
+
+/* Exported to library client. */
+
+void LibVEX_ShowAllocStats ( void )
+{
+   vex_printf("vex storage: T total %lld bytes allocated\n",
+              (Long)temporary_bytes_allocd_TOT );
+   vex_printf("vex storage: P total %lld bytes allocated\n",
+              (Long)(permanent_curr - permanent_first) );
+}
+
+void *LibVEX_Alloc ( SizeT nbytes )
+{
+   return LibVEX_Alloc_inline(nbytes);
+}
+
+/*---------------------------------------------------------*/
+/*--- Bombing out                                       ---*/
+/*---------------------------------------------------------*/
+
+__attribute__ ((noreturn))
+void vex_assert_fail ( const HChar* expr,
+                       const HChar* file, Int line, const HChar* fn )
+{
+   vex_printf( "\nvex: %s:%d (%s): Assertion `%s' failed.\n",
+               file, line, fn, expr );
+   (*vex_failure_exit)();
+}
+
+/* To be used in assert-like (i.e. should never ever happen) situations */
+__attribute__ ((noreturn))
+void vpanic ( const HChar* str )
+{
+   vex_printf("\nvex: the `impossible' happened:\n   %s\n", str);
+   (*vex_failure_exit)();
+}
+
+
+/*---------------------------------------------------------*/
+/*--- vex_printf                                        ---*/
+/*---------------------------------------------------------*/
+
+/* This should be the only <...> include in the entire VEX library.
+   New code for vex_util.c should go above this point. */
+#include <stdarg.h>
+
+SizeT vex_strlen ( const HChar* str )
+{
+   SizeT i = 0;
+   while (str[i] != 0) i++;
+   return i;
+}
+
+Bool vex_streq ( const HChar* s1, const HChar* s2 )
+{
+   while (True) {
+      if (*s1 == 0 && *s2 == 0)
+         return True;
+      if (*s1 != *s2)
+         return False;
+      s1++;
+      s2++;
+   }
+}
+
+void vex_bzero ( void* sV, SizeT n )
+{
+   SizeT i;
+   UChar* s = (UChar*)sV;
+   /* No laughing, please.  Just don't call this too often.  Thank you
+      for your attention. */
+   for (i = 0; i < n; i++) s[i] = 0;
+}
+
+
+/* Convert N0 into ascii in BUF, which is assumed to be big enough (at
+   least 67 bytes long).  Observe BASE, SYNED and HEXCAPS. */
+static
+void convert_int ( /*OUT*/HChar* buf, Long n0, 
+                   Int base, Bool syned, Bool hexcaps )
+{
+   ULong u0;
+   HChar c;
+   Bool minus = False;
+   Int i, j, bufi = 0;
+   buf[bufi] = 0;
+
+   if (syned) {
+      if (n0 < 0) {
+         minus = True;
+         u0 = (ULong)(-n0);
+      } else {
+         u0 = (ULong)(n0);
+      }
+   } else {
+      u0 = (ULong)n0;
+   }
+
+   while (1) {
+     buf[bufi++] = toHChar('0' + toUInt(u0 % base));
+     u0 /= base;
+     if (u0 == 0) break;
+   }
+   if (minus)
+      buf[bufi++] = '-';
+
+   buf[bufi] = 0;
+   for (i = 0; i < bufi; i++)
+      if (buf[i] > '9') 
+         buf[i] = toHChar(buf[i] + (hexcaps ? 'A' : 'a') - '9' - 1);
+
+   i = 0;
+   j = bufi-1;
+   while (i <= j) {
+      c = buf[i];
+      buf[i] = buf[j];
+      buf[j] = c;
+      i++;
+      j--;
+   }
+}
+
+
+/* A half-arsed and buggy, but good-enough, implementation of
+   printf. */
+static
+UInt vprintf_wrk ( void(*sink)(HChar),
+                   const HChar* format,
+                   va_list ap )
+{
+#  define PUT(_ch)  \
+      do { sink(_ch); nout++; } \
+      while (0)
+
+#  define PAD(_n) \
+      do { Int _qq = (_n); for (; _qq > 0; _qq--) PUT(padchar); } \
+      while (0)
+
+#  define PUTSTR(_str) \
+      do { const HChar* _qq = _str; for (; *_qq; _qq++) PUT(*_qq); } \
+      while (0)
+
+   const HChar* saved_format;
+   Bool   longlong, ljustify, is_sizet;
+   HChar  padchar;
+   Int    fwidth, nout, len1, len3;
+   SizeT  len2;
+   HChar  intbuf[100];  /* big enough for a 64-bit # in base 2 */
+
+   nout = 0;
+   while (1) {
+
+      if (!format)
+         break;
+      if (*format == 0) 
+         break;
+
+      if (*format != '%') {
+         PUT(*format); 
+         format++;
+         continue;
+      }
+
+      saved_format = format;
+      longlong = is_sizet = False;
+      ljustify = False;
+      padchar = ' ';
+      fwidth = 0;
+      format++;
+
+      if (*format == '-') {
+         format++;
+         ljustify = True;
+      }
+      if (*format == '0') {
+         format++;
+         padchar = '0';
+      }
+      if (*format == '*') {
+         fwidth = va_arg(ap, Int);
+         vassert(fwidth >= 0);
+         format++;
+      } else {
+         while (*format >= '0' && *format <= '9') {
+            fwidth = fwidth * 10 + (*format - '0');
+            format++;
+         }
+      }
+      if (*format == 'l') {
+         format++;
+         if (*format == 'l') {
+            format++;
+            longlong = True;
+         }
+      } else if (*format == 'z') {
+         format++;
+         is_sizet = True;
+      }
+
+      switch (*format) {
+         case 's': {
+            const HChar* str = va_arg(ap, HChar*);
+            if (str == NULL)
+               str = "(null)";
+            len1 = len3 = 0;
+            len2 = vex_strlen(str);
+            if (fwidth > len2) { len1 = ljustify ? 0 : fwidth-len2;
+                                 len3 = ljustify ? fwidth-len2 : 0; }
+            PAD(len1); PUTSTR(str); PAD(len3);
+            break;
+         }
+         case 'c': {
+            HChar c = (HChar)va_arg(ap, int);
+            HChar str[2];
+            str[0] = c;
+            str[1] = 0;
+            len1 = len3 = 0;
+            len2 = vex_strlen(str);
+            if (fwidth > len2) { len1 = ljustify ? 0 : fwidth-len2;
+                                 len3 = ljustify ? fwidth-len2 : 0; }
+            PAD(len1); PUTSTR(str); PAD(len3);
+            break;
+         }
+         case 'd': {
+            Long l;
+            vassert(is_sizet == False); // %zd is obscure; we don't allow it
+            if (longlong) {
+               l = va_arg(ap, Long);
+            } else {
+               l = (Long)va_arg(ap, Int);
+            }
+            convert_int(intbuf, l, 10/*base*/, True/*signed*/,
+                                False/*irrelevant*/);
+            len1 = len3 = 0;
+            len2 = vex_strlen(intbuf);
+            if (fwidth > len2) { len1 = ljustify ? 0 : fwidth-len2;
+                                 len3 = ljustify ? fwidth-len2 : 0; }
+            PAD(len1); PUTSTR(intbuf); PAD(len3);
+            break;
+         }
+         case 'u': 
+         case 'x': 
+         case 'X': {
+            Int   base = *format == 'u' ? 10 : 16;
+            Bool  hexcaps = True; /* *format == 'X'; */
+            ULong l;
+            if (is_sizet) {
+               l = (ULong)va_arg(ap, SizeT);
+            } else if (longlong) {
+               l = va_arg(ap, ULong);
+            } else {
+               l = (ULong)va_arg(ap, UInt);
+            }
+            convert_int(intbuf, l, base, False/*unsigned*/, hexcaps);
+            len1 = len3 = 0;
+            len2 = vex_strlen(intbuf);
+            if (fwidth > len2) { len1 = ljustify ? 0 : fwidth-len2;
+                                 len3 = ljustify ? fwidth-len2 : 0; }
+            PAD(len1); PUTSTR(intbuf); PAD(len3);
+            break;
+         }
+         case 'p': 
+         case 'P': {
+            Bool hexcaps = toBool(*format == 'P');
+            ULong l = (Addr)va_arg(ap, void*);
+            convert_int(intbuf, l, 16/*base*/, False/*unsigned*/, hexcaps);
+            len1 = len3 = 0;
+            len2 = vex_strlen(intbuf)+2;
+            if (fwidth > len2) { len1 = ljustify ? 0 : fwidth-len2;
+                                 len3 = ljustify ? fwidth-len2 : 0; }
+            PAD(len1); PUT('0'); PUT('x'); PUTSTR(intbuf); PAD(len3);
+            break;
+         }
+         case '%': {
+            PUT('%');
+            break;
+         }
+         default:
+            /* no idea what it is.  Print the format literally and
+               move on. */
+            while (saved_format <= format) {
+               PUT(*saved_format);
+               saved_format++;
+            }
+            break;
+      }
+
+      format++;
+
+   }
+
+   return nout;
+
+#  undef PUT
+#  undef PAD
+#  undef PUTSTR
+}
+
+
+/* A general replacement for printf().  Note that only low-level 
+   debugging info should be sent via here.  The official route is to
+   to use vg_message().  This interface is deprecated.
+*/
+static HChar myprintf_buf[1000];
+static Int   n_myprintf_buf;
+
+static void add_to_myprintf_buf ( HChar c )
+{
+   Bool emit = toBool(c == '\n' || n_myprintf_buf >= 1000-10 /*paranoia*/);
+   myprintf_buf[n_myprintf_buf++] = c;
+   myprintf_buf[n_myprintf_buf] = 0;
+   if (emit) {
+      (*vex_log_bytes)( myprintf_buf, vex_strlen(myprintf_buf) );
+      n_myprintf_buf = 0;
+      myprintf_buf[n_myprintf_buf] = 0;
+   }
+}
+
+static UInt vex_vprintf ( const HChar* format, va_list vargs )
+{
+   UInt ret;
+   
+   n_myprintf_buf = 0;
+   myprintf_buf[n_myprintf_buf] = 0;      
+   ret = vprintf_wrk ( add_to_myprintf_buf, format, vargs );
+
+   if (n_myprintf_buf > 0) {
+      (*vex_log_bytes)( myprintf_buf, n_myprintf_buf );
+   }
+
+   return ret;
+}
+
+UInt vex_printf ( const HChar* format, ... )
+{
+   UInt ret;
+   va_list vargs;
+   va_start(vargs, format);
+   ret = vex_vprintf(format, vargs);
+   va_end(vargs);
+
+   return ret;
+}
+
+/* Use this function to communicate to users that a (legitimate) situation
+   occured that we cannot handle (yet). */
+__attribute__ ((noreturn))
+void vfatal ( const HChar* format, ... )
+{
+   va_list vargs;
+   va_start(vargs, format);
+   vex_vprintf( format, vargs );
+   va_end(vargs);
+   vex_printf("Cannot continue. Good-bye\n\n");
+
+   (*vex_failure_exit)();
+}
+
+/* A general replacement for sprintf(). */
+
+static HChar *vg_sprintf_ptr;
+
+static void add_to_vg_sprintf_buf ( HChar c )
+{
+   *vg_sprintf_ptr++ = c;
+}
+
+UInt vex_sprintf ( HChar* buf, const HChar *format, ... )
+{
+   Int ret;
+   va_list vargs;
+
+   vg_sprintf_ptr = buf;
+
+   va_start(vargs,format);
+
+   ret = vprintf_wrk ( add_to_vg_sprintf_buf, format, vargs );
+   add_to_vg_sprintf_buf(0);
+
+   va_end(vargs);
+
+   vassert(vex_strlen(buf) == ret);
+   return ret;
+}
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                         main_util.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/main_util.h b/VEX/priv/main_util.h
new file mode 100644
index 0000000..018ba4f
--- /dev/null
+++ b/VEX/priv/main_util.h
@@ -0,0 +1,170 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                       main_util.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __VEX_MAIN_UTIL_H
+#define __VEX_MAIN_UTIL_H
+
+#include "libvex_basictypes.h"
+
+
+/* Misc. */
+
+#define NULL ((void*)0)
+
+#define LIKELY(x)       __builtin_expect(!!(x), 1)
+#define UNLIKELY(x)     __builtin_expect(!!(x), 0)
+
+#if !defined(offsetof)
+#   define offsetof(type,memb) ((SizeT)(HWord)&((type*)0)->memb)
+#endif
+
+// Poor man's static assert
+#define STATIC_ASSERT(x)  extern int vex__unused_array[(x) ? 1 : -1] \
+                                     __attribute__((unused))
+
+/* Stuff for panicking and assertion. */
+
+#define vassert(expr)                                           \
+  ((void) (LIKELY(expr) ? 0 :                                   \
+           (vex_assert_fail (#expr,                             \
+                             __FILE__, __LINE__,                \
+                             __PRETTY_FUNCTION__), 0)))
+
+__attribute__ ((__noreturn__))
+extern void vex_assert_fail ( const HChar* expr, const HChar* file,
+                              Int line, const HChar* fn );
+__attribute__ ((__noreturn__))
+extern void vpanic ( const HChar* str );
+
+__attribute__ ((__noreturn__)) __attribute__ ((format (printf, 1, 2)))
+extern void vfatal ( const HChar* format, ... );
+
+
+/* Printing */
+
+__attribute__ ((format (printf, 1, 2)))
+extern UInt vex_printf ( const HChar *format, ... );
+
+__attribute__ ((format (printf, 2, 3)))
+extern UInt vex_sprintf ( HChar* buf, const HChar *format, ... );
+
+
+/* String ops */
+
+extern Bool vex_streq ( const HChar* s1, const HChar* s2 );
+extern SizeT vex_strlen ( const HChar* str );
+extern void vex_bzero ( void* s, SizeT n );
+
+
+/* Storage management: clear the area, and allocate from it. */
+
+/* By default allocation occurs in the temporary area.  However, it is
+   possible to switch to permanent area allocation if that's what you
+   want.  Permanent area allocation is very limited, tho. */
+
+typedef
+   enum {
+      VexAllocModeTEMP, 
+      VexAllocModePERM 
+   }
+   VexAllocMode;
+
+extern void         vexSetAllocMode ( VexAllocMode );
+extern VexAllocMode vexGetAllocMode ( void );
+extern void         vexAllocSanityCheck ( void );
+
+extern void vexSetAllocModeTEMP_and_clear ( void );
+
+/* Allocate in Vex's temporary allocation area.  Be careful with this.
+   You can only call it inside an instrumentation or optimisation
+   callback that you have previously specified in a call to
+   LibVEX_Translate.  The storage allocated will only stay alive until
+   translation of the current basic block is complete.
+ */
+extern HChar* private_LibVEX_alloc_first;
+extern HChar* private_LibVEX_alloc_curr;
+extern HChar* private_LibVEX_alloc_last;
+extern void   private_LibVEX_alloc_OOM(void) __attribute__((noreturn));
+
+/* Allocated memory as returned by LibVEX_Alloc will be aligned on this
+   boundary. */
+#define REQ_ALIGN 8
+
+static inline void* LibVEX_Alloc_inline ( SizeT nbytes )
+{
+   struct align {
+      char c;
+      union {
+         char c;
+         short s;
+         int i;
+         long l;
+         long long ll;
+         float f;
+         double d;
+         /* long double is currently not used and would increase alignment
+            unnecessarily. */
+         /* long double ld; */
+         void *pto;
+         void (*ptf)(void);
+      } x;
+   };
+
+   /* Make sure the compiler does no surprise us */
+   vassert(offsetof(struct align,x) <= REQ_ALIGN);
+
+#if 0
+  /* Nasty debugging hack, do not use. */
+  return malloc(nbytes);
+#else
+   HChar* curr;
+   HChar* next;
+   SizeT  ALIGN;
+   ALIGN  = offsetof(struct align,x) - 1;
+   nbytes = (nbytes + ALIGN) & ~ALIGN;
+   curr   = private_LibVEX_alloc_curr;
+   next   = curr + nbytes;
+   if (next >= private_LibVEX_alloc_last)
+      private_LibVEX_alloc_OOM();
+   private_LibVEX_alloc_curr = next;
+   return curr;
+#endif
+}
+
+#endif /* ndef __VEX_MAIN_UTIL_H */
+
+/*---------------------------------------------------------------*/
+/*---                                             main_util.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/multiarch_main_main.c b/VEX/priv/multiarch_main_main.c
new file mode 100644
index 0000000..c7500bc
--- /dev/null
+++ b/VEX/priv/multiarch_main_main.c
@@ -0,0 +1,73 @@
+/*---------------------------------------------------------------*/
+/*--- Begin                             multiarch_main_main.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2015 Philippe Waroquiers
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+/* This file is used to have main_main.c compiled with VEXMULTIARCH
+   defined, so that all host and guest arch are callable from LibVEX_Translate
+   and other functions defined in main_main.c.
+   The resulting object file will be put in libvexmultiarch-<platform>-os>.a.
+
+   The valgrind tools are making the assumption that host and guest are
+   the same. So, no need to drag the full set of archs when
+   linking a tool.
+   The VEX library is nicely split in arch independent and arch dependent
+   objects. Only main_main.c is dragging the various arch specific files.
+   So, main_main.c (the main entry point of the VEX library) is compiled
+   only for the current guest/host arch.
+
+   This file ensures we recompile main_main.c with all archs activated.
+   
+   So, a VEX user can decide (at link time) to use a 'single arch' VEX lib,
+   or to use a multiarch VEX lib.
+   If t1.o is a 'main' that calls LibVEX_Translate, then
+   to link with a single arch VEX lib, use e.g. the following :
+     gcc -o t1single t1.o -LInst/lib/valgrind  -lvex-amd64-linux -lgcc
+
+   to link with a multi arch VEX lib, you must insert 
+     -lvexmultiarch-amd64-linux *before* -lvex-amd64-linux
+   i.e;
+     gcc -o t1multi t1.o \
+        -LInst/lib/valgrind -lvexmultiarch-amd64-linux -lvex-amd64-linux -lgcc
+
+   t1single will only be able to translate from amd64 to amd64.
+   t1multi will be able to translate from any arch supported by VEX
+   to any other arch supported by VEX.
+   Note however that multiarch support is experimental and poorly
+   or not tested.
+*/
+
+#define VEXMULTIARCH 1
+#include "main_main.c"
+
+/*---------------------------------------------------------------*/
+/*--- end                               multiarch_main_main.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/s390_defs.h b/VEX/priv/s390_defs.h
new file mode 100644
index 0000000..1e89883
--- /dev/null
+++ b/VEX/priv/s390_defs.h
@@ -0,0 +1,157 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                       s390_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_S390_DEFS_H
+#define __VEX_S390_DEFS_H
+
+
+/* Condition code. The encoding of the enumerators matches the value of
+   the mask field in the various branch opcodes. */
+typedef enum {
+   S390_CC_NEVER  =  0,
+   S390_CC_OVFL   =  1,   /* overflow */
+   S390_CC_H      =  2,   /* A > B ; high */
+   S390_CC_NLE    =  3,   /* not low or equal */
+   S390_CC_L      =  4,   /* A < B ; low */
+   S390_CC_NHE    =  5,   /* not high or equal */
+   S390_CC_LH     =  6,   /* low or high */
+   S390_CC_NE     =  7,   /* A != B ; not zero */
+   S390_CC_E      =  8,   /* A == B ; zero */
+   S390_CC_NLH    =  9,   /* not low or high */
+   S390_CC_HE     = 10,   /* A >= B ; high or equal*/
+   S390_CC_NL     = 11,   /* not low */
+   S390_CC_LE     = 12,   /* A <= B ; low or equal */
+   S390_CC_NH     = 13,   /* not high */
+   S390_CC_NO     = 14,   /* not overflow */
+   S390_CC_ALWAYS = 15
+} s390_cc_t;
+
+
+/* Invert the condition code */
+static __inline__ s390_cc_t
+s390_cc_invert(s390_cc_t cond)
+{
+   return S390_CC_ALWAYS - cond;
+}
+
+
+/* BFP Rounding mode as it is encoded in the m3 field of certain
+   instructions (e.g. CFEBR) */
+typedef enum {
+   S390_BFP_ROUND_PER_FPC       = 0,
+   S390_BFP_ROUND_NEAREST_AWAY  = 1,
+   /* 2 is not allowed */
+   S390_BFP_ROUND_PREPARE_SHORT = 3,
+   S390_BFP_ROUND_NEAREST_EVEN  = 4,
+   S390_BFP_ROUND_ZERO          = 5,
+   S390_BFP_ROUND_POSINF        = 6,
+   S390_BFP_ROUND_NEGINF        = 7
+} s390_bfp_round_t;
+
+
+/* BFP Rounding mode as it is encoded in bits [29:31] of the FPC register.
+   Only rounding modes 0..3 are universally supported. Others require
+   additional hardware facilities. */
+typedef enum {
+   S390_FPC_BFP_ROUND_NEAREST_EVEN  = 0,
+   S390_FPC_BFP_ROUND_ZERO          = 1,
+   S390_FPC_BFP_ROUND_POSINF        = 2,
+   S390_FPC_BFP_ROUND_NEGINF        = 3,
+   /* 4,5,6 are not allowed */
+   S390_FPC_BFP_ROUND_PREPARE_SHORT = 7 /* floating point extension facility */
+} s390_fpc_bfp_round_t;
+
+
+/* DFP Rounding mode as it is encoded in the m3 field of certain
+   instructions (e.g. CGDTR) */
+typedef enum {
+   S390_DFP_ROUND_PER_FPC_0             = 0,
+   S390_DFP_ROUND_NEAREST_TIE_AWAY_0_1  = 1,
+   S390_DFP_ROUND_PER_FPC_2             = 2,
+   S390_DFP_ROUND_PREPARE_SHORT_3       = 3,
+   S390_DFP_ROUND_NEAREST_EVEN_4        = 4,
+   S390_DFP_ROUND_ZERO_5                = 5,
+   S390_DFP_ROUND_POSINF_6              = 6,
+   S390_DFP_ROUND_NEGINF_7              = 7,
+   S390_DFP_ROUND_NEAREST_EVEN_8        = 8,
+   S390_DFP_ROUND_ZERO_9                = 9,
+   S390_DFP_ROUND_POSINF_10             = 10,
+   S390_DFP_ROUND_NEGINF_11             = 11,
+   S390_DFP_ROUND_NEAREST_TIE_AWAY_0_12 = 12,
+   S390_DFP_ROUND_NEAREST_TIE_TOWARD_0  = 13,
+   S390_DFP_ROUND_AWAY_0                = 14,
+   S390_DFP_ROUND_PREPARE_SHORT_15      = 15
+} s390_dfp_round_t;
+
+
+/* DFP Rounding mode as it is encoded in bits [25:27] of the FPC register. */
+typedef enum {
+   S390_FPC_DFP_ROUND_NEAREST_EVEN     = 0,
+   S390_FPC_DFP_ROUND_ZERO             = 1,
+   S390_FPC_DFP_ROUND_POSINF           = 2,
+   S390_FPC_DFP_ROUND_NEGINF           = 3,
+   S390_FPC_DFP_ROUND_NEAREST_AWAY_0   = 4,
+   S390_FPC_DFP_ROUND_NEAREST_TOWARD_0 = 5,
+   S390_FPC_DFP_ROUND_AWAY_ZERO        = 6,
+   S390_FPC_DFP_ROUND_PREPARE_SHORT    = 7
+} s390_fpc_dfp_round_t;
+
+/* PFPO function code as it is encoded in bits [33:55] of GR0
+   when PFPO insn is executed. */
+typedef enum {
+   S390_PFPO_F32_TO_D32   = 0x010805,
+   S390_PFPO_F32_TO_D64   = 0x010905,
+   S390_PFPO_F32_TO_D128  = 0x010A05,
+   S390_PFPO_F64_TO_D32   = 0x010806,
+   S390_PFPO_F64_TO_D64   = 0x010906,
+   S390_PFPO_F64_TO_D128  = 0x010A06,
+   S390_PFPO_F128_TO_D32  = 0x010807,
+   S390_PFPO_F128_TO_D64  = 0x010907,
+   S390_PFPO_F128_TO_D128 = 0x010A07,
+   S390_PFPO_D32_TO_F32   = 0x010508,
+   S390_PFPO_D32_TO_F64   = 0x010608,
+   S390_PFPO_D32_TO_F128  = 0x010708,
+   S390_PFPO_D64_TO_F32   = 0x010509,
+   S390_PFPO_D64_TO_F64   = 0x010609,
+   S390_PFPO_D64_TO_F128  = 0x010709,
+   S390_PFPO_D128_TO_F32  = 0x01050A,
+   S390_PFPO_D128_TO_F64  = 0x01060A,
+   S390_PFPO_D128_TO_F128 = 0x01070A
+} s390_pfpo_function_t;
+
+/* The length of the longest mnemonic: locgrnhe */
+#define S390_MAX_MNEMONIC_LEN  8
+
+
+/*---------------------------------------------------------------*/
+/*--- end                                         s390_defs.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __VEX_S390_DEFS_H */
diff --git a/VEX/priv/s390_disasm.c b/VEX/priv/s390_disasm.c
new file mode 100644
index 0000000..95cf1f7
--- /dev/null
+++ b/VEX/priv/s390_disasm.c
@@ -0,0 +1,478 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                     s390_disasm.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Florian Krohm */
+
+#include <stdarg.h>
+#include "libvex_basictypes.h"
+#include "main_util.h"        // vassert
+#include "main_globals.h"     // vex_traceflags
+#include "s390_defs.h"        // S390_MAX_MNEMONIC_LEN
+#include "s390_disasm.h"
+
+
+/* Return the mnemonic padded with blanks to its right */
+static const HChar *
+mnemonic(const HChar *mnm)
+{
+   vassert(vex_strlen(mnm) <= S390_MAX_MNEMONIC_LEN);
+
+   static HChar buf[S390_MAX_MNEMONIC_LEN + 1];
+
+   vex_sprintf(buf, "%-*s", S390_MAX_MNEMONIC_LEN, mnm);
+
+   return buf;
+}
+
+
+/* Return the name of a general purpose register for dis-assembly purposes. */
+static const HChar *
+gpr_operand(UInt archreg)
+{
+   static const HChar names[16][5] = {
+      "%r0", "%r1", "%r2", "%r3",
+      "%r4", "%r5", "%r6", "%r7",
+      "%r8", "%r9", "%r10", "%r11",
+      "%r12", "%r13", "%r14", "%r15",
+   };
+
+   vassert(archreg < 16);
+
+   return names[archreg];
+}
+
+
+/* Return the name of a floating point register for dis-assembly purposes. */
+static const HChar *
+fpr_operand(UInt archreg)
+{
+   static const HChar names[16][5] = {
+      "%f0", "%f1", "%f2", "%f3",
+      "%f4", "%f5", "%f6", "%f7",
+      "%f8", "%f9", "%f10", "%f11",
+      "%f12", "%f13", "%f14", "%f15",
+   };
+
+   vassert(archreg < 16);
+
+   return names[archreg];
+}
+
+
+/* Return the name of an access register for dis-assembly purposes. */
+static const HChar *
+ar_operand(UInt archreg)
+{
+   static const HChar names[16][5] = {
+      "%a0", "%a1", "%a2", "%a3",
+      "%a4", "%a5", "%a6", "%a7",
+      "%a8", "%a9", "%a10", "%a11",
+      "%a12", "%a13", "%a14", "%a15",
+   };
+
+   vassert(archreg < 16);
+
+   return names[archreg];
+}
+
+
+/* Build and return the extended mnemonic for the compare and branch
+   opcodes as introduced by z10. See also the opcodes in file
+   opcodes/s390-opc.txt (from binutils) that have a '$' in their name. */
+static const HChar *
+cab_operand(const HChar *base, UInt mask)
+{
+   HChar *to;
+   const HChar *from;
+
+   static HChar buf[S390_MAX_MNEMONIC_LEN + 1];
+
+   static const HChar suffix[8][3] = {
+      "", "h", "l", "ne", "e", "nl", "nh", ""
+   };
+
+   /* Guard against buffer overflow */
+   vassert(vex_strlen(base) + sizeof suffix[0] <= sizeof buf);
+
+   /* strcpy(buf, from); */
+   for (from = base, to = buf; *from; ++from, ++to) {
+      *to = *from;
+   }
+   /* strcat(buf, suffix); */
+   for (from = suffix[mask >> 1]; *from; ++from, ++to) {
+      *to = *from;
+   }
+   *to = '\0';
+
+   return buf;
+}
+
+
+/* Common function used to construct a mnemonic based on a condition code
+   mask. */
+static const HChar *
+construct_mnemonic(const HChar *prefix, const HChar *suffix, UInt mask)
+{
+   HChar *to;
+   const HChar *from;
+
+   static HChar buf[S390_MAX_MNEMONIC_LEN + 1];
+
+   static HChar mask_id[16][4] = {
+      "", /* 0 -> unused */
+      "o", "h", "nle", "l", "nhe", "lh", "ne",
+      "e", "nlh", "he", "nl", "le", "nh", "no",
+      ""  /* 15 -> unused */
+   };
+
+   /* Guard against buffer overflow */
+   vassert(vex_strlen(prefix) + vex_strlen(suffix) +
+           sizeof mask_id[0] <= sizeof buf);
+
+   /* strcpy(buf, prefix); */
+   for (from = prefix, to = buf; *from; ++from, ++to) {
+      *to = *from;
+   }
+   /* strcat(buf, mask_id); */
+   for (from = mask_id[mask]; *from; ++from, ++to) {
+      *to = *from;
+   }
+   /* strcat(buf, suffix); */
+   for (from = suffix; *from; ++from, ++to) {
+      *to = *from;
+   }
+   *to = '\0';
+
+   return buf;
+}
+
+
+/* Return the special mnemonic for the BCR opcode */
+static const HChar *
+bcr_operand(UInt m1)
+{
+   if (m1 ==  0) return "nopr";
+   if (m1 == 15) return "br";
+
+   return construct_mnemonic("b", "r", m1);
+}
+
+
+/* Return the special mnemonic for the BC opcode */
+static const HChar *
+bc_operand(UInt m1)
+{
+   if (m1 ==  0) return "nop";
+   if (m1 == 15) return "b";
+
+   return construct_mnemonic("b", "", m1);
+}
+
+
+/* Return the special mnemonic for the BRC opcode */
+static const HChar *
+brc_operand(UInt m1)
+{
+   if (m1 == 0)  return "brc";
+   if (m1 == 15) return "j";
+
+   return construct_mnemonic("j", "", m1);
+}
+
+
+/* Return the special mnemonic for the BRCL opcode */
+static const HChar *
+brcl_operand(UInt m1)
+{
+   if (m1 == 0)  return "brcl";
+   if (m1 == 15) return "jg";
+
+   return construct_mnemonic("jg", "", m1);
+}
+
+
+/* Return the special mnemonic for a conditional load/store  opcode */
+static const HChar *
+cls_operand(Int kind, UInt mask)
+{
+   const HChar *prefix;
+
+   switch (kind) {
+   case S390_XMNM_LOCR:   prefix = "locr";  break;
+   case S390_XMNM_LOCGR:  prefix = "locgr"; break;
+   case S390_XMNM_LOC:    prefix = "loc";   break;
+   case S390_XMNM_LOCG:   prefix = "locg";  break;
+   case S390_XMNM_STOC:   prefix = "stoc";  break;
+   case S390_XMNM_STOCG:  prefix = "stocg"; break;
+   default:
+      vpanic("cls_operand");
+   }
+
+   return construct_mnemonic(prefix, "", mask);
+}
+
+
+/* An operand with a base register, an index register, and a displacement.
+   If the displacement is signed, the rightmost 20 bit of D need to be
+   sign extended */
+static HChar *
+dxb_operand(HChar *p, UInt d, UInt x, UInt b, Bool displacement_is_signed)
+{
+   if (displacement_is_signed) {
+      Int displ = (Int)(d << 12) >> 12;  /* sign extend */
+
+      p += vex_sprintf(p, "%d", displ);
+   } else {
+      p += vex_sprintf(p, "%u", d);
+   }
+   if (x != 0) {
+      p += vex_sprintf(p, "(%s", gpr_operand(x));
+      if (b != 0) {
+         p += vex_sprintf(p, ",%s", gpr_operand(b));
+      }
+      p += vex_sprintf(p, ")");
+   } else {
+      if (b != 0) {
+         p += vex_sprintf(p, "(%s)", gpr_operand(b));
+      }
+   }
+
+   return p;
+}
+
+
+/* An operand with base register, unsigned length, and a 12-bit
+   unsigned displacement */
+static HChar *
+udlb_operand(HChar *p, UInt d, UInt length, UInt b)
+{
+   p += vex_sprintf(p, "%u", d);
+   p += vex_sprintf(p, "(%u", length + 1);  // actual length is +1
+   if (b != 0) {
+      p += vex_sprintf(p, ",%s", gpr_operand(b));
+   }
+   p += vex_sprintf(p, ")");
+
+   return p;
+}
+
+
+/* The first argument is the command that says how to write the disassembled
+   insn. It is understood that the mnemonic comes first and that arguments
+   are separated by a ','. The command holds the arguments. Each argument is
+   encoded using a 4-bit S390_ARG_xyz value. The first argument is placed
+   in the least significant bits of the command and so on. There are at most
+   5 arguments in an insn and a sentinel (S390_ARG_DONE) is needed to identify
+   the end of the argument list. 6 * 4 = 24 bits are required for the
+   command. */
+void
+s390_disasm(UInt command, ...)
+{
+   va_list  args;
+   UInt argkind;
+   HChar buf[128];  /* holds the disassembled insn */
+   HChar *p;
+   HChar separator;
+   Int mask_suffix = -1;
+
+   va_start(args, command);
+
+   p = buf;
+   separator = 0;
+
+   while (42) {
+      argkind = command & 0xF;
+      command >>= 4;
+
+      if (argkind == S390_ARG_DONE) goto done;
+
+      if (argkind == S390_ARG_CABM) separator = 0;  /* optional */
+
+      /* Write out the separator */
+      if (separator) *p++ = separator;
+
+      /* argument */
+      switch (argkind) {
+      case S390_ARG_MNM:
+         p += vex_sprintf(p, "%s", mnemonic(va_arg(args, HChar *)));
+         separator = ' ';
+         continue;
+
+      case S390_ARG_XMNM: {
+         UInt mask, kind;
+         const HChar *mnm;
+
+         kind = va_arg(args, UInt);
+
+         separator = ' ';
+         switch (kind) {
+         case S390_XMNM_BC:
+         case S390_XMNM_BCR:
+            mask = va_arg(args, UInt);
+            mnm = kind == S390_XMNM_BCR ? bcr_operand(mask) : bc_operand(mask);
+            p  += vex_sprintf(p, "%s", mnemonic(mnm));
+            /* mask == 0 is a NOP and has no argument */
+            if (mask == 0) goto done;
+            break;
+
+         case S390_XMNM_BRC:
+         case S390_XMNM_BRCL:
+            mask = va_arg(args, UInt);
+            mnm = kind == S390_XMNM_BRC ? brc_operand(mask) : brcl_operand(mask);
+            p  += vex_sprintf(p, "%s", mnemonic(mnm));
+
+            /* mask == 0 has no special mnemonic */
+            if (mask == 0) {
+               p += vex_sprintf(p, " 0");
+               separator = ',';
+            }
+            break;
+
+         case S390_XMNM_CAB:
+            mnm  = va_arg(args, HChar *);
+            mask = va_arg(args, UInt);
+            p  += vex_sprintf(p, "%s", mnemonic(cab_operand(mnm, mask)));
+            break;
+
+         case S390_XMNM_LOCR:
+         case S390_XMNM_LOCGR:
+         case S390_XMNM_LOC:
+         case S390_XMNM_LOCG:
+         case S390_XMNM_STOC:
+         case S390_XMNM_STOCG:
+            mask = va_arg(args, UInt);
+            mnm = cls_operand(kind, mask);
+            p  += vex_sprintf(p, "%s", mnemonic(mnm));
+            /* There are no special opcodes when mask == 0 or 15. In that case
+               the integer mask is appended as the final operand */
+            if (mask == 0 || mask == 15) mask_suffix = mask;
+            break;
+         }
+      }
+      continue;
+
+      case S390_ARG_GPR:
+         p += vex_sprintf(p, "%s", gpr_operand(va_arg(args, UInt)));
+         break;
+
+      case S390_ARG_FPR:
+         p += vex_sprintf(p, "%s", fpr_operand(va_arg(args, UInt)));
+         break;
+
+      case S390_ARG_AR:
+         p += vex_sprintf(p, "%s", ar_operand(va_arg(args, UInt)));
+         break;
+
+      case S390_ARG_UINT:
+         p += vex_sprintf(p, "%u", va_arg(args, UInt));
+         break;
+
+      case S390_ARG_INT:
+         p += vex_sprintf(p, "%d", (Int)(va_arg(args, UInt)));
+         break;
+
+      case S390_ARG_PCREL: {
+         Long offset = va_arg(args, Int);
+
+         /* Convert # halfwords to # bytes */
+         offset <<= 1;
+
+         if (offset < 0) {
+            p += vex_sprintf(p, ".%lld", offset);
+         } else {
+            p += vex_sprintf(p, ".+%lld", offset);
+         }
+         break;
+      }
+
+      case S390_ARG_SDXB: {
+         UInt dh, dl, x, b;
+
+         dh = va_arg(args, UInt);
+         dl = va_arg(args, UInt);
+         x  = va_arg(args, UInt);
+         b  = va_arg(args, UInt);
+
+         p = dxb_operand(p, (dh << 12) | dl, x, b, 1 /* signed_displacement */);
+         break;
+      }
+
+      case S390_ARG_UDXB: {
+         UInt d, x, b;
+
+         d = va_arg(args, UInt);
+         x = va_arg(args, UInt);
+         b = va_arg(args, UInt);
+
+         p = dxb_operand(p, d, x, b, 0 /* signed_displacement */);
+         break;
+      }
+
+      case S390_ARG_UDLB: {
+         UInt d, l, b;
+
+         d = va_arg(args, UInt);
+         l = va_arg(args, UInt);
+         b = va_arg(args, UInt);
+
+         p = udlb_operand(p, d, l, b);
+         break;
+      }
+
+      case S390_ARG_CABM: {
+         UInt mask;
+
+         mask = va_arg(args, UInt) & 0xE;
+         if (mask == 0 || mask == 14) {
+            p += vex_sprintf(p, ",%u", mask);
+         }
+         break;
+      }
+      }
+
+      separator = ',';
+   }
+
+ done:
+   va_end(args);
+
+   if (mask_suffix != -1)
+      p += vex_sprintf(p, ",%d", mask_suffix);
+   *p = '\0';
+
+   vassert(p < buf + sizeof buf);  /* detect buffer overwrite */
+
+   /* Finally, write out the disassembled insn */
+   vex_printf("%s\n", buf);
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                       s390_disasm.c ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/s390_disasm.h b/VEX/priv/s390_disasm.h
new file mode 100644
index 0000000..1522445
--- /dev/null
+++ b/VEX/priv/s390_disasm.h
@@ -0,0 +1,93 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                                     s390_disasm.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VEX_S390_DISASM_H
+#define __VEX_S390_DISASM_H
+
+#include "libvex_basictypes.h"
+
+/* Macros to encode a command for s390_disasm. */
+#undef  P
+#define P(a) (S390_ARG_##a)
+#undef  ENC1
+#define ENC1(a) ((P(DONE) << 4) | P(a))
+#undef  ENC2
+#define ENC2(a,b) ((P(DONE) << 8) | (P(b) << 4) | P(a))
+#undef  ENC3
+#define ENC3(a,b,c) ((P(DONE) << 12) | (P(c) << 8) | (P(b) << 4) | P(a))
+#undef  ENC4
+#define ENC4(a,b,c,d) ((P(DONE) << 16) | (P(d) << 12) | (P(c) << 8) | \
+                       (P(b) << 4) | P(a))
+#undef  ENC5
+#define ENC5(a,b,c,d,e) ((P(DONE) << 20) | (P(e) << 16) | (P(d) << 12) | \
+                         (P(c) << 8) | (P(b) << 4) | P(a))
+#undef  ENC6
+#define ENC6(a,b,c,d,e,f) ((P(DONE) << 24) | (P(f) << 20) | (P(e) << 16) | \
+                           (P(d) << 12) | (P(c) << 8) | (P(b) << 4) | P(a))
+
+/* The different kinds of operands in an asm insn */
+enum {
+   S390_ARG_DONE = 0,
+   S390_ARG_GPR = 1,
+   S390_ARG_FPR = 2,
+   S390_ARG_AR = 3,
+   S390_ARG_INT = 4,
+   S390_ARG_UINT = 5,
+   S390_ARG_PCREL = 6,
+   S390_ARG_SDXB = 7,
+   S390_ARG_UDXB = 8,
+   S390_ARG_UDLB = 9,
+   S390_ARG_CABM = 10,
+   S390_ARG_MNM = 11,
+   S390_ARG_XMNM = 12
+};
+
+/* The different kinds of extended mnemonics */
+enum {
+   S390_XMNM_CAB = 0,
+   S390_XMNM_BCR = 1,
+   S390_XMNM_BC = 2,
+   S390_XMNM_BRC = 3,
+   S390_XMNM_BRCL = 4,
+   S390_XMNM_LOCR = 5,
+   S390_XMNM_LOCGR = 6,
+   S390_XMNM_LOC = 7,
+   S390_XMNM_LOCG = 8,
+   S390_XMNM_STOC = 9,
+   S390_XMNM_STOCG = 10
+};
+
+void s390_disasm(UInt command, ...);
+
+/*---------------------------------------------------------------*/
+/*--- end                                       s390_disasm.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __VEX_S390_DISASM_H */
diff --git a/VEX/priv/tilegx_disasm.c b/VEX/priv/tilegx_disasm.c
new file mode 100644
index 0000000..bbbfa27
--- /dev/null
+++ b/VEX/priv/tilegx_disasm.c
@@ -0,0 +1,7694 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin             Tilegx disassembler   tilegx-disasm.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright Tilera Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+ /* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#include "tilegx_disasm.h"
+#include <stdarg.h>
+
+/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */
+#define BFD_RELOC(x) -1
+
+/* Special registers. */
+#define TREG_LR 55
+#define TREG_SN 56
+#define TREG_ZERO 63
+
+#ifndef NULL
+#define NULL  0
+#endif
+
+const struct tilegx_opcode tilegx_opcodes[336] =
+{
+ { "bpt", TILEGX_OPC_BPT, 0x2, 0, TREG_ZERO, 0,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffffffff80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a44ae00000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "info", TILEGX_OPC_INFO, 0xf, 1, TREG_ZERO, 1,
+    { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00fffULL,
+      0xfff807ff80000000ULL,
+      0x0000000078000fffULL,
+      0x3c0007ff80000000ULL,
+      0ULL
+    },
+    {
+      0x0000000040300fffULL,
+      0x181807ff80000000ULL,
+      0x0000000010000fffULL,
+      0x0c0007ff80000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "infol", TILEGX_OPC_INFOL, 0x3, 1, TREG_ZERO, 1,
+    { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc000000070000fffULL,
+      0xf80007ff80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000070000fffULL,
+      0x380007ff80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld4s_tls", TILEGX_OPC_LD4S_TLS, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1858000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld_tls", TILEGX_OPC_LD_TLS, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18a0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "move", TILEGX_OPC_MOVE, 0xf, 2, TREG_ZERO, 1,
+    { { 8, 9 }, { 6, 7 }, { 10, 11 }, { 12, 13 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0xfffff80000000000ULL,
+      0x00000000780ff000ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      0x000000005107f000ULL,
+      0x283bf80000000000ULL,
+      0x00000000500bf000ULL,
+      0x2c05f80000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "movei", TILEGX_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1,
+    { { 8, 0 }, { 6, 1 }, { 10, 2 }, { 12, 3 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00fc0ULL,
+      0xfff807e000000000ULL,
+      0x0000000078000fc0ULL,
+      0x3c0007e000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000040100fc0ULL,
+      0x180807e000000000ULL,
+      0x0000000000000fc0ULL,
+      0x040007e000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "moveli", TILEGX_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1,
+    { { 8, 4 }, { 6, 5 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc000000070000fc0ULL,
+      0xf80007e000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000010000fc0ULL,
+      0x000007e000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "prefetch", TILEGX_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff81f80000000ULL,
+      0ULL,
+      0ULL,
+      0xc3f8000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a801f80000000ULL,
+      -1ULL,
+      -1ULL,
+      0x41f8000004000000ULL
+    }
+#endif
+  },
+  { "prefetch_add_l1", TILEGX_OPC_PREFETCH_ADD_L1, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8001f80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1840001f80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "prefetch_add_l1_fault", TILEGX_OPC_PREFETCH_ADD_L1_FAULT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8001f80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1838001f80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "prefetch_add_l2", TILEGX_OPC_PREFETCH_ADD_L2, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8001f80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1850001f80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "prefetch_add_l2_fault", TILEGX_OPC_PREFETCH_ADD_L2_FAULT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8001f80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1848001f80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "prefetch_add_l3", TILEGX_OPC_PREFETCH_ADD_L3, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8001f80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1860001f80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "prefetch_add_l3_fault", TILEGX_OPC_PREFETCH_ADD_L3_FAULT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8001f80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1858001f80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "prefetch_l1", TILEGX_OPC_PREFETCH_L1, 0x12, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff81f80000000ULL,
+      0ULL,
+      0ULL,
+      0xc3f8000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a801f80000000ULL,
+      -1ULL,
+      -1ULL,
+      0x41f8000004000000ULL
+    }
+#endif
+  },
+  { "prefetch_l1_fault", TILEGX_OPC_PREFETCH_L1_FAULT, 0x12, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff81f80000000ULL,
+      0ULL,
+      0ULL,
+      0xc3f8000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a781f80000000ULL,
+      -1ULL,
+      -1ULL,
+      0x41f8000000000000ULL
+    }
+#endif
+  },
+  { "prefetch_l2", TILEGX_OPC_PREFETCH_L2, 0x12, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff81f80000000ULL,
+      0ULL,
+      0ULL,
+      0xc3f8000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a901f80000000ULL,
+      -1ULL,
+      -1ULL,
+      0x43f8000004000000ULL
+    }
+#endif
+  },
+  { "prefetch_l2_fault", TILEGX_OPC_PREFETCH_L2_FAULT, 0x12, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff81f80000000ULL,
+      0ULL,
+      0ULL,
+      0xc3f8000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a881f80000000ULL,
+      -1ULL,
+      -1ULL,
+      0x43f8000000000000ULL
+    }
+#endif
+  },
+  { "prefetch_l3", TILEGX_OPC_PREFETCH_L3, 0x12, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff81f80000000ULL,
+      0ULL,
+      0ULL,
+      0xc3f8000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286aa01f80000000ULL,
+      -1ULL,
+      -1ULL,
+      0x83f8000000000000ULL
+    }
+#endif
+  },
+  { "prefetch_l3_fault", TILEGX_OPC_PREFETCH_L3_FAULT, 0x12, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff81f80000000ULL,
+      0ULL,
+      0ULL,
+      0xc3f8000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a981f80000000ULL,
+      -1ULL,
+      -1ULL,
+      0x81f8000004000000ULL
+    }
+#endif
+  },
+  { "raise", TILEGX_OPC_RAISE, 0x2, 0, TREG_ZERO, 1,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffffffff80000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a44ae80000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "add", TILEGX_OPC_ADD, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x00000000500c0000ULL,
+      0x2806000000000000ULL,
+      0x0000000028040000ULL,
+      0x1802000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "addi", TILEGX_OPC_ADDI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0x0000000078000000ULL,
+      0x3c00000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000040100000ULL,
+      0x1808000000000000ULL,
+      0ULL,
+      0x0400000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "addli", TILEGX_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 4 }, { 6, 7, 5 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc000000070000000ULL,
+      0xf800000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000010000000ULL,
+      0ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "addx", TILEGX_OPC_ADDX, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050080000ULL,
+      0x2804000000000000ULL,
+      0x0000000028000000ULL,
+      0x1800000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "addxi", TILEGX_OPC_ADDXI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0x0000000078000000ULL,
+      0x3c00000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000040200000ULL,
+      0x1810000000000000ULL,
+      0x0000000008000000ULL,
+      0x0800000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "addxli", TILEGX_OPC_ADDXLI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 4 }, { 6, 7, 5 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc000000070000000ULL,
+      0xf800000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000020000000ULL,
+      0x0800000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "addxsc", TILEGX_OPC_ADDXSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050040000ULL,
+      0x2802000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "and", TILEGX_OPC_AND, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050100000ULL,
+      0x2808000000000000ULL,
+      0x0000000050000000ULL,
+      0x2c00000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "andi", TILEGX_OPC_ANDI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0x0000000078000000ULL,
+      0x3c00000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000040300000ULL,
+      0x1818000000000000ULL,
+      0x0000000010000000ULL,
+      0x0c00000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "beqz", TILEGX_OPC_BEQZ, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1440000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "beqzt", TILEGX_OPC_BEQZT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1400000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bfexts", TILEGX_OPC_BFEXTS, 0x1, 4, TREG_ZERO, 1,
+    { { 8, 9, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007f000000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000034000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bfextu", TILEGX_OPC_BFEXTU, 0x1, 4, TREG_ZERO, 1,
+    { { 8, 9, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007f000000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000035000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bfins", TILEGX_OPC_BFINS, 0x1, 4, TREG_ZERO, 1,
+    { { 23, 9, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007f000000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000036000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bgez", TILEGX_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x14c0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bgezt", TILEGX_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1480000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bgtz", TILEGX_OPC_BGTZ, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1540000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bgtzt", TILEGX_OPC_BGTZT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1500000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "blbc", TILEGX_OPC_BLBC, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x15c0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "blbct", TILEGX_OPC_BLBCT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1580000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "blbs", TILEGX_OPC_BLBS, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1640000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "blbst", TILEGX_OPC_BLBST, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1600000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "blez", TILEGX_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x16c0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "blezt", TILEGX_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1680000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bltz", TILEGX_OPC_BLTZ, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1740000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bltzt", TILEGX_OPC_BLTZT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1700000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bnez", TILEGX_OPC_BNEZ, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x17c0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "bnezt", TILEGX_OPC_BNEZT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 20 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xffc0000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1780000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "clz", TILEGX_OPC_CLZ, 0x5, 2, TREG_ZERO, 1,
+    { { 8, 9 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051481000ULL,
+      -1ULL,
+      0x00000000300c1000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmoveqz", TILEGX_OPC_CMOVEQZ, 0x5, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050140000ULL,
+      -1ULL,
+      0x0000000048000000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmovnez", TILEGX_OPC_CMOVNEZ, 0x5, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050180000ULL,
+      -1ULL,
+      0x0000000048040000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpeq", TILEGX_OPC_CMPEQ, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x00000000501c0000ULL,
+      0x280a000000000000ULL,
+      0x0000000040000000ULL,
+      0x2404000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpeqi", TILEGX_OPC_CMPEQI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0x0000000078000000ULL,
+      0x3c00000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000040400000ULL,
+      0x1820000000000000ULL,
+      0x0000000018000000ULL,
+      0x1000000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpexch", TILEGX_OPC_CMPEXCH, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x280e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpexch4", TILEGX_OPC_CMPEXCH4, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x280c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmples", TILEGX_OPC_CMPLES, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050200000ULL,
+      0x2810000000000000ULL,
+      0x0000000038000000ULL,
+      0x2000000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpleu", TILEGX_OPC_CMPLEU, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050240000ULL,
+      0x2812000000000000ULL,
+      0x0000000038040000ULL,
+      0x2002000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmplts", TILEGX_OPC_CMPLTS, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050280000ULL,
+      0x2814000000000000ULL,
+      0x0000000038080000ULL,
+      0x2004000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpltsi", TILEGX_OPC_CMPLTSI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0x0000000078000000ULL,
+      0x3c00000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000040500000ULL,
+      0x1828000000000000ULL,
+      0x0000000020000000ULL,
+      0x1400000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpltu", TILEGX_OPC_CMPLTU, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x00000000502c0000ULL,
+      0x2816000000000000ULL,
+      0x00000000380c0000ULL,
+      0x2006000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpltui", TILEGX_OPC_CMPLTUI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040600000ULL,
+      0x1830000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmpne", TILEGX_OPC_CMPNE, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050300000ULL,
+      0x2818000000000000ULL,
+      0x0000000040040000ULL,
+      0x2406000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmul", TILEGX_OPC_CMUL, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000504c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmula", TILEGX_OPC_CMULA, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050380000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmulaf", TILEGX_OPC_CMULAF, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050340000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmulf", TILEGX_OPC_CMULF, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050400000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmulfr", TILEGX_OPC_CMULFR, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000503c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmulh", TILEGX_OPC_CMULH, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050480000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "cmulhr", TILEGX_OPC_CMULHR, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050440000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "crc32_32", TILEGX_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050500000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "crc32_8", TILEGX_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050540000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ctz", TILEGX_OPC_CTZ, 0x5, 2, TREG_ZERO, 1,
+    { { 8, 9 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051482000ULL,
+      -1ULL,
+      0x00000000300c2000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "dblalign", TILEGX_OPC_DBLALIGN, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050640000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "dblalign2", TILEGX_OPC_DBLALIGN2, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050580000ULL,
+      0x281a000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "dblalign4", TILEGX_OPC_DBLALIGN4, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000505c0000ULL,
+      0x281c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "dblalign6", TILEGX_OPC_DBLALIGN6, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050600000ULL,
+      0x281e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "drain", TILEGX_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a080000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "dtlbpr", TILEGX_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a100000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "exch", TILEGX_OPC_EXCH, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2822000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "exch4", TILEGX_OPC_EXCH4, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2820000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_add_flags", TILEGX_OPC_FDOUBLE_ADD_FLAGS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000506c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_addsub", TILEGX_OPC_FDOUBLE_ADDSUB, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050680000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_mul_flags", TILEGX_OPC_FDOUBLE_MUL_FLAGS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050700000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_pack1", TILEGX_OPC_FDOUBLE_PACK1, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050740000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_pack2", TILEGX_OPC_FDOUBLE_PACK2, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050780000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_sub_flags", TILEGX_OPC_FDOUBLE_SUB_FLAGS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000507c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_unpack_max", TILEGX_OPC_FDOUBLE_UNPACK_MAX, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050800000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fdouble_unpack_min", TILEGX_OPC_FDOUBLE_UNPACK_MIN, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050840000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchadd", TILEGX_OPC_FETCHADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x282a000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchadd4", TILEGX_OPC_FETCHADD4, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2824000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchaddgez", TILEGX_OPC_FETCHADDGEZ, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2828000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchaddgez4", TILEGX_OPC_FETCHADDGEZ4, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2826000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchand", TILEGX_OPC_FETCHAND, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x282e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchand4", TILEGX_OPC_FETCHAND4, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x282c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchor", TILEGX_OPC_FETCHOR, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2832000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fetchor4", TILEGX_OPC_FETCHOR4, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2830000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "finv", TILEGX_OPC_FINV, 0x2, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a180000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "flush", TILEGX_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a280000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "flushwb", TILEGX_OPC_FLUSHWB, 0x2, 0, TREG_ZERO, 1,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a200000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fnop", TILEGX_OPC_FNOP, 0xf, 0, TREG_ZERO, 1,
+    { {  }, {  }, {  }, {  }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0xfffff80000000000ULL,
+      0x00000000780ff000ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051483000ULL,
+      0x286a300000000000ULL,
+      0x00000000300c3000ULL,
+      0x1c06400000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fsingle_add1", TILEGX_OPC_FSINGLE_ADD1, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050880000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fsingle_addsub2", TILEGX_OPC_FSINGLE_ADDSUB2, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000508c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fsingle_mul1", TILEGX_OPC_FSINGLE_MUL1, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050900000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fsingle_mul2", TILEGX_OPC_FSINGLE_MUL2, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050940000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fsingle_pack1", TILEGX_OPC_FSINGLE_PACK1, 0x5, 2, TREG_ZERO, 1,
+    { { 8, 9 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051484000ULL,
+      -1ULL,
+      0x00000000300c4000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fsingle_pack2", TILEGX_OPC_FSINGLE_PACK2, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050980000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "fsingle_sub1", TILEGX_OPC_FSINGLE_SUB1, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000509c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "icoh", TILEGX_OPC_ICOH, 0x2, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a380000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ill", TILEGX_OPC_ILL, 0xa, 0, TREG_ZERO, 1,
+    { { 0, }, {  }, { 0, }, {  }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a400000000000ULL,
+      -1ULL,
+      0x1c06480000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "inv", TILEGX_OPC_INV, 0x2, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a480000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "iret", TILEGX_OPC_IRET, 0x2, 0, TREG_ZERO, 1,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a500000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "j", TILEGX_OPC_J, 0x2, 1, TREG_ZERO, 1,
+    { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfc00000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2400000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "jal", TILEGX_OPC_JAL, 0x2, 1, TREG_LR, 1,
+    { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfc00000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2000000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "jalr", TILEGX_OPC_JALR, 0xa, 1, TREG_LR, 1,
+    { { 0, }, { 7 }, { 0, }, { 13 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a600000000000ULL,
+      -1ULL,
+      0x1c06580000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "jalrp", TILEGX_OPC_JALRP, 0xa, 1, TREG_LR, 1,
+    { { 0, }, { 7 }, { 0, }, { 13 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a580000000000ULL,
+      -1ULL,
+      0x1c06500000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "jr", TILEGX_OPC_JR, 0xa, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 13 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a700000000000ULL,
+      -1ULL,
+      0x1c06680000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "jrp", TILEGX_OPC_JRP, 0xa, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 13 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286a680000000000ULL,
+      -1ULL,
+      0x1c06600000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld", TILEGX_OPC_LD, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 26, 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286ae80000000000ULL,
+      -1ULL,
+      -1ULL,
+      0x8200000004000000ULL
+    }
+#endif
+  },
+  { "ld1s", TILEGX_OPC_LD1S, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 26, 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a780000000000ULL,
+      -1ULL,
+      -1ULL,
+      0x4000000000000000ULL
+    }
+#endif
+  },
+  { "ld1s_add", TILEGX_OPC_LD1S_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1838000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld1u", TILEGX_OPC_LD1U, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 26, 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a800000000000ULL,
+      -1ULL,
+      -1ULL,
+      0x4000000004000000ULL
+    }
+#endif
+  },
+  { "ld1u_add", TILEGX_OPC_LD1U_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1840000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld2s", TILEGX_OPC_LD2S, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 26, 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a880000000000ULL,
+      -1ULL,
+      -1ULL,
+      0x4200000000000000ULL
+    }
+#endif
+  },
+  { "ld2s_add", TILEGX_OPC_LD2S_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1848000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld2u", TILEGX_OPC_LD2U, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 26, 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a900000000000ULL,
+      -1ULL,
+      -1ULL,
+      0x4200000004000000ULL
+    }
+#endif
+  },
+  { "ld2u_add", TILEGX_OPC_LD2U_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1850000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld4s", TILEGX_OPC_LD4S, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 26, 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286a980000000000ULL,
+      -1ULL,
+      -1ULL,
+      0x8000000004000000ULL
+    }
+#endif
+  },
+  { "ld4s_add", TILEGX_OPC_LD4S_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1858000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld4u", TILEGX_OPC_LD4U, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 26, 14 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x286aa00000000000ULL,
+      -1ULL,
+      -1ULL,
+      0x8200000000000000ULL
+    }
+#endif
+  },
+  { "ld4u_add", TILEGX_OPC_LD4U_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1860000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ld_add", TILEGX_OPC_LD_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18a0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldna", TILEGX_OPC_LDNA, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286aa80000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldna_add", TILEGX_OPC_LDNA_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18a8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt", TILEGX_OPC_LDNT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286ae00000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt1s", TILEGX_OPC_LDNT1S, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286ab00000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt1s_add", TILEGX_OPC_LDNT1S_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1868000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt1u", TILEGX_OPC_LDNT1U, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286ab80000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt1u_add", TILEGX_OPC_LDNT1U_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1870000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt2s", TILEGX_OPC_LDNT2S, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286ac00000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt2s_add", TILEGX_OPC_LDNT2S_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1878000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt2u", TILEGX_OPC_LDNT2U, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286ac80000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt2u_add", TILEGX_OPC_LDNT2U_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1880000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt4s", TILEGX_OPC_LDNT4S, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286ad00000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt4s_add", TILEGX_OPC_LDNT4S_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1888000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt4u", TILEGX_OPC_LDNT4U, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286ad80000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt4u_add", TILEGX_OPC_LDNT4U_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1890000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ldnt_add", TILEGX_OPC_LDNT_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 6, 15, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1898000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "lnk", TILEGX_OPC_LNK, 0xa, 1, TREG_ZERO, 1,
+    { { 0, }, { 6 }, { 0, }, { 12 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286af00000000000ULL,
+      -1ULL,
+      0x1c06700000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mf", TILEGX_OPC_MF, 0x2, 0, TREG_ZERO, 1,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286af80000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mfspr", TILEGX_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 6, 27 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18b0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mm", TILEGX_OPC_MM, 0x1, 4, TREG_ZERO, 1,
+    { { 23, 9, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007f000000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000037000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mnz", TILEGX_OPC_MNZ, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050a00000ULL,
+      0x2834000000000000ULL,
+      0x0000000048080000ULL,
+      0x2804000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mtspr", TILEGX_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 28, 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18b8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_hs_hs", TILEGX_OPC_MUL_HS_HS, 0x5, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050d40000ULL,
+      -1ULL,
+      0x0000000068000000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_hs_hu", TILEGX_OPC_MUL_HS_HU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050d80000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_hs_ls", TILEGX_OPC_MUL_HS_LS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050dc0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_hs_lu", TILEGX_OPC_MUL_HS_LU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050e00000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_hu_hu", TILEGX_OPC_MUL_HU_HU, 0x5, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050e40000ULL,
+      -1ULL,
+      0x0000000068040000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_hu_ls", TILEGX_OPC_MUL_HU_LS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050e80000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_hu_lu", TILEGX_OPC_MUL_HU_LU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050ec0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_ls_ls", TILEGX_OPC_MUL_LS_LS, 0x5, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050f00000ULL,
+      -1ULL,
+      0x0000000068080000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_ls_lu", TILEGX_OPC_MUL_LS_LU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050f40000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mul_lu_lu", TILEGX_OPC_MUL_LU_LU, 0x5, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050f80000ULL,
+      -1ULL,
+      0x00000000680c0000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_hs_hs", TILEGX_OPC_MULA_HS_HS, 0x5, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050a80000ULL,
+      -1ULL,
+      0x0000000070000000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_hs_hu", TILEGX_OPC_MULA_HS_HU, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050ac0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_hs_ls", TILEGX_OPC_MULA_HS_LS, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050b00000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_hs_lu", TILEGX_OPC_MULA_HS_LU, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050b40000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_hu_hu", TILEGX_OPC_MULA_HU_HU, 0x5, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050b80000ULL,
+      -1ULL,
+      0x0000000070040000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_hu_ls", TILEGX_OPC_MULA_HU_LS, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050bc0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_hu_lu", TILEGX_OPC_MULA_HU_LU, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050c00000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_ls_ls", TILEGX_OPC_MULA_LS_LS, 0x5, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050c40000ULL,
+      -1ULL,
+      0x0000000070080000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_ls_lu", TILEGX_OPC_MULA_LS_LU, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050c80000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mula_lu_lu", TILEGX_OPC_MULA_LU_LU, 0x5, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050cc0000ULL,
+      -1ULL,
+      0x00000000700c0000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mulax", TILEGX_OPC_MULAX, 0x5, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050a40000ULL,
+      -1ULL,
+      0x0000000040080000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mulx", TILEGX_OPC_MULX, 0x5, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0x00000000780c0000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000050d00000ULL,
+      -1ULL,
+      0x00000000400c0000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "mz", TILEGX_OPC_MZ, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000050fc0000ULL,
+      0x2836000000000000ULL,
+      0x00000000480c0000ULL,
+      0x2806000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "nap", TILEGX_OPC_NAP, 0x2, 0, TREG_ZERO, 0,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286b000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "nop", TILEGX_OPC_NOP, 0xf, 0, TREG_ZERO, 1,
+    { {  }, {  }, {  }, {  }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0xfffff80000000000ULL,
+      0x00000000780ff000ULL,
+      0x3c07f80000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051485000ULL,
+      0x286b080000000000ULL,
+      0x00000000300c5000ULL,
+      0x1c06780000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "nor", TILEGX_OPC_NOR, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051000000ULL,
+      0x2838000000000000ULL,
+      0x0000000050040000ULL,
+      0x2c02000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "or", TILEGX_OPC_OR, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051040000ULL,
+      0x283a000000000000ULL,
+      0x0000000050080000ULL,
+      0x2c04000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "ori", TILEGX_OPC_ORI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040700000ULL,
+      0x18c0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "pcnt", TILEGX_OPC_PCNT, 0x5, 2, TREG_ZERO, 1,
+    { { 8, 9 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051486000ULL,
+      -1ULL,
+      0x00000000300c6000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "revbits", TILEGX_OPC_REVBITS, 0x5, 2, TREG_ZERO, 1,
+    { { 8, 9 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051487000ULL,
+      -1ULL,
+      0x00000000300c7000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "revbytes", TILEGX_OPC_REVBYTES, 0x5, 2, TREG_ZERO, 1,
+    { { 8, 9 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051488000ULL,
+      -1ULL,
+      0x00000000300c8000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "rotl", TILEGX_OPC_ROTL, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051080000ULL,
+      0x283c000000000000ULL,
+      0x0000000058000000ULL,
+      0x3000000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "rotli", TILEGX_OPC_ROTLI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000060040000ULL,
+      0x3002000000000000ULL,
+      0x0000000078000000ULL,
+      0x3800000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl", TILEGX_OPC_SHL, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051280000ULL,
+      0x284c000000000000ULL,
+      0x0000000058040000ULL,
+      0x3002000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl16insli", TILEGX_OPC_SHL16INSLI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 4 }, { 6, 7, 5 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc000000070000000ULL,
+      0xf800000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000070000000ULL,
+      0x3800000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl1add", TILEGX_OPC_SHL1ADD, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051100000ULL,
+      0x2840000000000000ULL,
+      0x0000000030000000ULL,
+      0x1c00000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl1addx", TILEGX_OPC_SHL1ADDX, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x00000000510c0000ULL,
+      0x283e000000000000ULL,
+      0x0000000060040000ULL,
+      0x3402000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl2add", TILEGX_OPC_SHL2ADD, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051180000ULL,
+      0x2844000000000000ULL,
+      0x0000000030040000ULL,
+      0x1c02000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl2addx", TILEGX_OPC_SHL2ADDX, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051140000ULL,
+      0x2842000000000000ULL,
+      0x0000000060080000ULL,
+      0x3404000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl3add", TILEGX_OPC_SHL3ADD, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051200000ULL,
+      0x2848000000000000ULL,
+      0x0000000030080000ULL,
+      0x1c04000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shl3addx", TILEGX_OPC_SHL3ADDX, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x00000000511c0000ULL,
+      0x2846000000000000ULL,
+      0x00000000600c0000ULL,
+      0x3406000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shli", TILEGX_OPC_SHLI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000060080000ULL,
+      0x3004000000000000ULL,
+      0x0000000078040000ULL,
+      0x3802000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shlx", TILEGX_OPC_SHLX, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051240000ULL,
+      0x284a000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shlxi", TILEGX_OPC_SHLXI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000600c0000ULL,
+      0x3006000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shrs", TILEGX_OPC_SHRS, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x00000000512c0000ULL,
+      0x284e000000000000ULL,
+      0x0000000058080000ULL,
+      0x3004000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shrsi", TILEGX_OPC_SHRSI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000060100000ULL,
+      0x3008000000000000ULL,
+      0x0000000078080000ULL,
+      0x3804000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shru", TILEGX_OPC_SHRU, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051340000ULL,
+      0x2852000000000000ULL,
+      0x00000000580c0000ULL,
+      0x3006000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shrui", TILEGX_OPC_SHRUI, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000060140000ULL,
+      0x300a000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3806000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shrux", TILEGX_OPC_SHRUX, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051300000ULL,
+      0x2850000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shruxi", TILEGX_OPC_SHRUXI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000060180000ULL,
+      0x300c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "shufflebytes", TILEGX_OPC_SHUFFLEBYTES, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051380000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "st", TILEGX_OPC_ST, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 14, 33 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x2862000000000000ULL,
+      -1ULL,
+      -1ULL,
+      0xc200000004000000ULL
+    }
+#endif
+  },
+  { "st1", TILEGX_OPC_ST1, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 14, 33 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x2854000000000000ULL,
+      -1ULL,
+      -1ULL,
+      0xc000000000000000ULL
+    }
+#endif
+  },
+  { "st1_add", TILEGX_OPC_ST1_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18c8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "st2", TILEGX_OPC_ST2, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 14, 33 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x2856000000000000ULL,
+      -1ULL,
+      -1ULL,
+      0xc000000004000000ULL
+    }
+#endif
+  },
+  { "st2_add", TILEGX_OPC_ST2_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18d0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "st4", TILEGX_OPC_ST4, 0x12, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 14, 33 } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0xc200000004000000ULL
+    },
+    {
+      -1ULL,
+      0x2858000000000000ULL,
+      -1ULL,
+      -1ULL,
+      0xc200000000000000ULL
+    }
+#endif
+  },
+  { "st4_add", TILEGX_OPC_ST4_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18d8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "st_add", TILEGX_OPC_ST_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x1900000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt", TILEGX_OPC_STNT, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x2860000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt1", TILEGX_OPC_STNT1, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x285a000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt1_add", TILEGX_OPC_STNT1_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18e0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt2", TILEGX_OPC_STNT2, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x285c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt2_add", TILEGX_OPC_STNT2_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18e8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt4", TILEGX_OPC_STNT4, 0x2, 2, TREG_ZERO, 1,
+    { { 0, }, { 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x285e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt4_add", TILEGX_OPC_STNT4_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18f0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "stnt_add", TILEGX_OPC_STNT_ADD, 0x2, 3, TREG_ZERO, 1,
+    { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x18f8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "sub", TILEGX_OPC_SUB, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051440000ULL,
+      0x2868000000000000ULL,
+      0x00000000280c0000ULL,
+      0x1806000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "subx", TILEGX_OPC_SUBX, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000051400000ULL,
+      0x2866000000000000ULL,
+      0x0000000028080000ULL,
+      0x1804000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "subxsc", TILEGX_OPC_SUBXSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000513c0000ULL,
+      0x2864000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "swint0", TILEGX_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286b100000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "swint1", TILEGX_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286b180000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "swint2", TILEGX_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286b200000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "swint3", TILEGX_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0,
+    { { 0, }, {  }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286b280000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "tblidxb0", TILEGX_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1,
+    { { 23, 9 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051489000ULL,
+      -1ULL,
+      0x00000000300c9000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "tblidxb1", TILEGX_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1,
+    { { 23, 9 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x000000005148a000ULL,
+      -1ULL,
+      0x00000000300ca000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "tblidxb2", TILEGX_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1,
+    { { 23, 9 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x000000005148b000ULL,
+      -1ULL,
+      0x00000000300cb000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "tblidxb3", TILEGX_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1,
+    { { 23, 9 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffff000ULL,
+      0ULL,
+      0x00000000780ff000ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x000000005148c000ULL,
+      -1ULL,
+      0x00000000300cc000ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1add", TILEGX_OPC_V1ADD, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051500000ULL,
+      0x286e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1addi", TILEGX_OPC_V1ADDI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040800000ULL,
+      0x1908000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1adduc", TILEGX_OPC_V1ADDUC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000514c0000ULL,
+      0x286c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1adiffu", TILEGX_OPC_V1ADIFFU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051540000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1avgu", TILEGX_OPC_V1AVGU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051580000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmpeq", TILEGX_OPC_V1CMPEQ, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000515c0000ULL,
+      0x2870000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmpeqi", TILEGX_OPC_V1CMPEQI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040900000ULL,
+      0x1910000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmples", TILEGX_OPC_V1CMPLES, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051600000ULL,
+      0x2872000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmpleu", TILEGX_OPC_V1CMPLEU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051640000ULL,
+      0x2874000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmplts", TILEGX_OPC_V1CMPLTS, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051680000ULL,
+      0x2876000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmpltsi", TILEGX_OPC_V1CMPLTSI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040a00000ULL,
+      0x1918000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmpltu", TILEGX_OPC_V1CMPLTU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000516c0000ULL,
+      0x2878000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmpltui", TILEGX_OPC_V1CMPLTUI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040b00000ULL,
+      0x1920000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1cmpne", TILEGX_OPC_V1CMPNE, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051700000ULL,
+      0x287a000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1ddotpu", TILEGX_OPC_V1DDOTPU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052880000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1ddotpua", TILEGX_OPC_V1DDOTPUA, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052840000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1ddotpus", TILEGX_OPC_V1DDOTPUS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051780000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1ddotpusa", TILEGX_OPC_V1DDOTPUSA, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051740000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1dotp", TILEGX_OPC_V1DOTP, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051880000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1dotpa", TILEGX_OPC_V1DOTPA, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000517c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1dotpu", TILEGX_OPC_V1DOTPU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052900000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1dotpua", TILEGX_OPC_V1DOTPUA, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000528c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1dotpus", TILEGX_OPC_V1DOTPUS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051840000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1dotpusa", TILEGX_OPC_V1DOTPUSA, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051800000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1int_h", TILEGX_OPC_V1INT_H, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000518c0000ULL,
+      0x287c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1int_l", TILEGX_OPC_V1INT_L, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051900000ULL,
+      0x287e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1maxu", TILEGX_OPC_V1MAXU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051940000ULL,
+      0x2880000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1maxui", TILEGX_OPC_V1MAXUI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040c00000ULL,
+      0x1928000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1minu", TILEGX_OPC_V1MINU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051980000ULL,
+      0x2882000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1minui", TILEGX_OPC_V1MINUI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040d00000ULL,
+      0x1930000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1mnz", TILEGX_OPC_V1MNZ, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000519c0000ULL,
+      0x2884000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1multu", TILEGX_OPC_V1MULTU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051a00000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1mulu", TILEGX_OPC_V1MULU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051a80000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1mulus", TILEGX_OPC_V1MULUS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051a40000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1mz", TILEGX_OPC_V1MZ, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051ac0000ULL,
+      0x2886000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1sadau", TILEGX_OPC_V1SADAU, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051b00000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1sadu", TILEGX_OPC_V1SADU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051b40000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1shl", TILEGX_OPC_V1SHL, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051b80000ULL,
+      0x2888000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1shli", TILEGX_OPC_V1SHLI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000601c0000ULL,
+      0x300e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1shrs", TILEGX_OPC_V1SHRS, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051bc0000ULL,
+      0x288a000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1shrsi", TILEGX_OPC_V1SHRSI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000060200000ULL,
+      0x3010000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1shru", TILEGX_OPC_V1SHRU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051c00000ULL,
+      0x288c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1shrui", TILEGX_OPC_V1SHRUI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000060240000ULL,
+      0x3012000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1sub", TILEGX_OPC_V1SUB, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051c80000ULL,
+      0x2890000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v1subuc", TILEGX_OPC_V1SUBUC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051c40000ULL,
+      0x288e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2add", TILEGX_OPC_V2ADD, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051d00000ULL,
+      0x2894000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2addi", TILEGX_OPC_V2ADDI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040e00000ULL,
+      0x1938000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2addsc", TILEGX_OPC_V2ADDSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051cc0000ULL,
+      0x2892000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2adiffs", TILEGX_OPC_V2ADIFFS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051d40000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2avgs", TILEGX_OPC_V2AVGS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051d80000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmpeq", TILEGX_OPC_V2CMPEQ, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051dc0000ULL,
+      0x2896000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmpeqi", TILEGX_OPC_V2CMPEQI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000040f00000ULL,
+      0x1940000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmples", TILEGX_OPC_V2CMPLES, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051e00000ULL,
+      0x2898000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmpleu", TILEGX_OPC_V2CMPLEU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051e40000ULL,
+      0x289a000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmplts", TILEGX_OPC_V2CMPLTS, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051e80000ULL,
+      0x289c000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmpltsi", TILEGX_OPC_V2CMPLTSI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000041000000ULL,
+      0x1948000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmpltu", TILEGX_OPC_V2CMPLTU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051ec0000ULL,
+      0x289e000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmpltui", TILEGX_OPC_V2CMPLTUI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000041100000ULL,
+      0x1950000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2cmpne", TILEGX_OPC_V2CMPNE, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051f00000ULL,
+      0x28a0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2dotp", TILEGX_OPC_V2DOTP, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051f80000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2dotpa", TILEGX_OPC_V2DOTPA, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051f40000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2int_h", TILEGX_OPC_V2INT_H, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000051fc0000ULL,
+      0x28a2000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2int_l", TILEGX_OPC_V2INT_L, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052000000ULL,
+      0x28a4000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2maxs", TILEGX_OPC_V2MAXS, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052040000ULL,
+      0x28a6000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2maxsi", TILEGX_OPC_V2MAXSI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000041200000ULL,
+      0x1958000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2mins", TILEGX_OPC_V2MINS, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052080000ULL,
+      0x28a8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2minsi", TILEGX_OPC_V2MINSI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000041300000ULL,
+      0x1960000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2mnz", TILEGX_OPC_V2MNZ, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000520c0000ULL,
+      0x28aa000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2mulfsc", TILEGX_OPC_V2MULFSC, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052100000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2muls", TILEGX_OPC_V2MULS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052140000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2mults", TILEGX_OPC_V2MULTS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052180000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2mz", TILEGX_OPC_V2MZ, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000521c0000ULL,
+      0x28ac000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2packh", TILEGX_OPC_V2PACKH, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052200000ULL,
+      0x28ae000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2packl", TILEGX_OPC_V2PACKL, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052240000ULL,
+      0x28b0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2packuc", TILEGX_OPC_V2PACKUC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052280000ULL,
+      0x28b2000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2sadas", TILEGX_OPC_V2SADAS, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000522c0000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2sadau", TILEGX_OPC_V2SADAU, 0x1, 3, TREG_ZERO, 1,
+    { { 23, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052300000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2sads", TILEGX_OPC_V2SADS, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052340000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2sadu", TILEGX_OPC_V2SADU, 0x1, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052380000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2shl", TILEGX_OPC_V2SHL, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052400000ULL,
+      0x28b6000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2shli", TILEGX_OPC_V2SHLI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000060280000ULL,
+      0x3014000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2shlsc", TILEGX_OPC_V2SHLSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000523c0000ULL,
+      0x28b4000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2shrs", TILEGX_OPC_V2SHRS, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052440000ULL,
+      0x28b8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2shrsi", TILEGX_OPC_V2SHRSI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000602c0000ULL,
+      0x3016000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2shru", TILEGX_OPC_V2SHRU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052480000ULL,
+      0x28ba000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2shrui", TILEGX_OPC_V2SHRUI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 29 }, { 6, 7, 30 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000060300000ULL,
+      0x3018000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2sub", TILEGX_OPC_V2SUB, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052500000ULL,
+      0x28be000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v2subsc", TILEGX_OPC_V2SUBSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000524c0000ULL,
+      0x28bc000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4add", TILEGX_OPC_V4ADD, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052580000ULL,
+      0x28c2000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4addsc", TILEGX_OPC_V4ADDSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052540000ULL,
+      0x28c0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4int_h", TILEGX_OPC_V4INT_H, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000525c0000ULL,
+      0x28c4000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4int_l", TILEGX_OPC_V4INT_L, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052600000ULL,
+      0x28c6000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4packsc", TILEGX_OPC_V4PACKSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052640000ULL,
+      0x28c8000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4shl", TILEGX_OPC_V4SHL, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000526c0000ULL,
+      0x28cc000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4shlsc", TILEGX_OPC_V4SHLSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052680000ULL,
+      0x28ca000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4shrs", TILEGX_OPC_V4SHRS, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052700000ULL,
+      0x28ce000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4shru", TILEGX_OPC_V4SHRU, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052740000ULL,
+      0x28d0000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4sub", TILEGX_OPC_V4SUB, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x00000000527c0000ULL,
+      0x28d4000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "v4subsc", TILEGX_OPC_V4SUBSC, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000052780000ULL,
+      0x28d2000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "wh64", TILEGX_OPC_WH64, 0x2, 1, TREG_ZERO, 1,
+    { { 0, }, { 7 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0ULL,
+      0xfffff80000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      -1ULL,
+      0x286b300000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "xor", TILEGX_OPC_XOR, 0xf, 3, TREG_ZERO, 1,
+    { { 8, 9, 16 }, { 6, 7, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ffc0000ULL,
+      0xfffe000000000000ULL,
+      0x00000000780c0000ULL,
+      0x3c06000000000000ULL,
+      0ULL
+    },
+    {
+      0x0000000052800000ULL,
+      0x28d6000000000000ULL,
+      0x00000000500c0000ULL,
+      0x2c06000000000000ULL,
+      -1ULL
+    }
+#endif
+  },
+  { "xori", TILEGX_OPC_XORI, 0x3, 3, TREG_ZERO, 1,
+    { { 8, 9, 0 }, { 6, 7, 1 }, { 0, }, { 0, }, { 0, } },
+#ifndef DISASM_ONLY
+    {
+      0xc00000007ff00000ULL,
+      0xfff8000000000000ULL,
+      0ULL,
+      0ULL,
+      0ULL
+    },
+    {
+      0x0000000041400000ULL,
+      0x1968000000000000ULL,
+      -1ULL,
+      -1ULL,
+      -1ULL
+    }
+#endif
+  },
+  { NULL, TILEGX_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } },
+#ifndef DISASM_ONLY
+    { 0, }, { 0, }
+#endif
+  }
+};
+#define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6))
+#define CHILD(array_index) (TILEGX_OPC_NONE + (array_index))
+
+static const UShort decode_X0_fsm[936] =
+{
+  BITFIELD(22, 9) /* index 0 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BFEXTS,
+  TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTU,
+  TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFINS,
+  TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_MM,
+  TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(528), CHILD(578),
+  CHILD(583), CHILD(588), CHILD(593), CHILD(598), TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, CHILD(603), CHILD(620), CHILD(637), CHILD(654), CHILD(671),
+  CHILD(703), CHILD(797), CHILD(814), CHILD(831), CHILD(848), CHILD(865),
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, CHILD(889), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
+  BITFIELD(6, 2) /* index 513 */,
+  TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
+  BITFIELD(8, 2) /* index 518 */,
+  TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
+  BITFIELD(10, 2) /* index 523 */,
+  TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
+  BITFIELD(20, 2) /* index 528 */,
+  TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
+  BITFIELD(6, 2) /* index 533 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
+  BITFIELD(8, 2) /* index 538 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
+  BITFIELD(10, 2) /* index 543 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
+  BITFIELD(0, 2) /* index 548 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
+  BITFIELD(2, 2) /* index 553 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
+  BITFIELD(4, 2) /* index 558 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
+  BITFIELD(6, 2) /* index 563 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
+  BITFIELD(8, 2) /* index 568 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
+  BITFIELD(10, 2) /* index 573 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
+  BITFIELD(20, 2) /* index 578 */,
+  TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, TILEGX_OPC_ORI,
+  BITFIELD(20, 2) /* index 583 */,
+  TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI, TILEGX_OPC_V1CMPLTSI,
+  TILEGX_OPC_V1CMPLTUI,
+  BITFIELD(20, 2) /* index 588 */,
+  TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI, TILEGX_OPC_V2ADDI,
+  TILEGX_OPC_V2CMPEQI,
+  BITFIELD(20, 2) /* index 593 */,
+  TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI, TILEGX_OPC_V2MAXSI,
+  TILEGX_OPC_V2MINSI,
+  BITFIELD(20, 2) /* index 598 */,
+  TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(18, 4) /* index 603 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
+  TILEGX_OPC_AND, TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_CMPEQ,
+  TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
+  TILEGX_OPC_CMPNE, TILEGX_OPC_CMULAF, TILEGX_OPC_CMULA, TILEGX_OPC_CMULFR,
+  BITFIELD(18, 4) /* index 620 */,
+  TILEGX_OPC_CMULF, TILEGX_OPC_CMULHR, TILEGX_OPC_CMULH, TILEGX_OPC_CMUL,
+  TILEGX_OPC_CRC32_32, TILEGX_OPC_CRC32_8, TILEGX_OPC_DBLALIGN2,
+  TILEGX_OPC_DBLALIGN4, TILEGX_OPC_DBLALIGN6, TILEGX_OPC_DBLALIGN,
+  TILEGX_OPC_FDOUBLE_ADDSUB, TILEGX_OPC_FDOUBLE_ADD_FLAGS,
+  TILEGX_OPC_FDOUBLE_MUL_FLAGS, TILEGX_OPC_FDOUBLE_PACK1,
+  TILEGX_OPC_FDOUBLE_PACK2, TILEGX_OPC_FDOUBLE_SUB_FLAGS,
+  BITFIELD(18, 4) /* index 637 */,
+  TILEGX_OPC_FDOUBLE_UNPACK_MAX, TILEGX_OPC_FDOUBLE_UNPACK_MIN,
+  TILEGX_OPC_FSINGLE_ADD1, TILEGX_OPC_FSINGLE_ADDSUB2,
+  TILEGX_OPC_FSINGLE_MUL1, TILEGX_OPC_FSINGLE_MUL2, TILEGX_OPC_FSINGLE_PACK2,
+  TILEGX_OPC_FSINGLE_SUB1, TILEGX_OPC_MNZ, TILEGX_OPC_MULAX,
+  TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HS_HU, TILEGX_OPC_MULA_HS_LS,
+  TILEGX_OPC_MULA_HS_LU, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_HU_LS,
+  BITFIELD(18, 4) /* index 654 */,
+  TILEGX_OPC_MULA_HU_LU, TILEGX_OPC_MULA_LS_LS, TILEGX_OPC_MULA_LS_LU,
+  TILEGX_OPC_MULA_LU_LU, TILEGX_OPC_MULX, TILEGX_OPC_MUL_HS_HS,
+  TILEGX_OPC_MUL_HS_HU, TILEGX_OPC_MUL_HS_LS, TILEGX_OPC_MUL_HS_LU,
+  TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_HU_LS, TILEGX_OPC_MUL_HU_LU,
+  TILEGX_OPC_MUL_LS_LS, TILEGX_OPC_MUL_LS_LU, TILEGX_OPC_MUL_LU_LU,
+  TILEGX_OPC_MZ,
+  BITFIELD(18, 4) /* index 671 */,
+  TILEGX_OPC_NOR, CHILD(688), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
+  TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
+  TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_SHUFFLEBYTES,
+  TILEGX_OPC_SUBXSC,
+  BITFIELD(12, 2) /* index 688 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(693),
+  BITFIELD(14, 2) /* index 693 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(698),
+  BITFIELD(16, 2) /* index 698 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
+  BITFIELD(18, 4) /* index 703 */,
+  TILEGX_OPC_SUBX, TILEGX_OPC_SUB, CHILD(720), TILEGX_OPC_V1ADDUC,
+  TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADIFFU, TILEGX_OPC_V1AVGU,
+  TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
+  TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
+  TILEGX_OPC_V1DDOTPUSA, TILEGX_OPC_V1DDOTPUS, TILEGX_OPC_V1DOTPA,
+  BITFIELD(12, 4) /* index 720 */,
+  TILEGX_OPC_NONE, CHILD(737), CHILD(742), CHILD(747), CHILD(752), CHILD(757),
+  CHILD(762), CHILD(767), CHILD(772), CHILD(777), CHILD(782), CHILD(787),
+  CHILD(792), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 737 */,
+  TILEGX_OPC_CLZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 742 */,
+  TILEGX_OPC_CTZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 747 */,
+  TILEGX_OPC_FNOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 752 */,
+  TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 757 */,
+  TILEGX_OPC_NOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 762 */,
+  TILEGX_OPC_PCNT, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 767 */,
+  TILEGX_OPC_REVBITS, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 772 */,
+  TILEGX_OPC_REVBYTES, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 777 */,
+  TILEGX_OPC_TBLIDXB0, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 782 */,
+  TILEGX_OPC_TBLIDXB1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 787 */,
+  TILEGX_OPC_TBLIDXB2, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(16, 2) /* index 792 */,
+  TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(18, 4) /* index 797 */,
+  TILEGX_OPC_V1DOTPUSA, TILEGX_OPC_V1DOTPUS, TILEGX_OPC_V1DOTP,
+  TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1MAXU,
+  TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MULTU, TILEGX_OPC_V1MULUS,
+  TILEGX_OPC_V1MULU, TILEGX_OPC_V1MZ, TILEGX_OPC_V1SADAU, TILEGX_OPC_V1SADU,
+  TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS,
+  BITFIELD(18, 4) /* index 814 */,
+  TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC, TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC,
+  TILEGX_OPC_V2ADD, TILEGX_OPC_V2ADIFFS, TILEGX_OPC_V2AVGS,
+  TILEGX_OPC_V2CMPEQ, TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU,
+  TILEGX_OPC_V2CMPLTS, TILEGX_OPC_V2CMPLTU, TILEGX_OPC_V2CMPNE,
+  TILEGX_OPC_V2DOTPA, TILEGX_OPC_V2DOTP, TILEGX_OPC_V2INT_H,
+  BITFIELD(18, 4) /* index 831 */,
+  TILEGX_OPC_V2INT_L, TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ,
+  TILEGX_OPC_V2MULFSC, TILEGX_OPC_V2MULS, TILEGX_OPC_V2MULTS, TILEGX_OPC_V2MZ,
+  TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
+  TILEGX_OPC_V2SADAS, TILEGX_OPC_V2SADAU, TILEGX_OPC_V2SADS,
+  TILEGX_OPC_V2SADU, TILEGX_OPC_V2SHLSC,
+  BITFIELD(18, 4) /* index 848 */,
+  TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU, TILEGX_OPC_V2SUBSC,
+  TILEGX_OPC_V2SUB, TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
+  TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
+  TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
+  TILEGX_OPC_V4SUB,
+  BITFIELD(18, 3) /* index 865 */,
+  CHILD(874), CHILD(877), CHILD(880), CHILD(883), CHILD(886), TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(21, 1) /* index 874 */,
+  TILEGX_OPC_XOR, TILEGX_OPC_NONE,
+  BITFIELD(21, 1) /* index 877 */,
+  TILEGX_OPC_V1DDOTPUA, TILEGX_OPC_NONE,
+  BITFIELD(21, 1) /* index 880 */,
+  TILEGX_OPC_V1DDOTPU, TILEGX_OPC_NONE,
+  BITFIELD(21, 1) /* index 883 */,
+  TILEGX_OPC_V1DOTPUA, TILEGX_OPC_NONE,
+  BITFIELD(21, 1) /* index 886 */,
+  TILEGX_OPC_V1DOTPU, TILEGX_OPC_NONE,
+  BITFIELD(18, 4) /* index 889 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
+  TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
+  TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
+  TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE,
+  BITFIELD(0, 2) /* index 906 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(911),
+  BITFIELD(2, 2) /* index 911 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(916),
+  BITFIELD(4, 2) /* index 916 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(921),
+  BITFIELD(6, 2) /* index 921 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(926),
+  BITFIELD(8, 2) /* index 926 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(931),
+  BITFIELD(10, 2) /* index 931 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  TILEGX_OPC_INFOL,
+};
+
+static const UShort decode_X1_fsm[1266] =
+{
+  BITFIELD(53, 9) /* index 0 */,
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
+  CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BEQZT,
+  TILEGX_OPC_BEQZT, TILEGX_OPC_BEQZ, TILEGX_OPC_BEQZ, TILEGX_OPC_BGEZT,
+  TILEGX_OPC_BGEZT, TILEGX_OPC_BGEZ, TILEGX_OPC_BGEZ, TILEGX_OPC_BGTZT,
+  TILEGX_OPC_BGTZT, TILEGX_OPC_BGTZ, TILEGX_OPC_BGTZ, TILEGX_OPC_BLBCT,
+  TILEGX_OPC_BLBCT, TILEGX_OPC_BLBC, TILEGX_OPC_BLBC, TILEGX_OPC_BLBST,
+  TILEGX_OPC_BLBST, TILEGX_OPC_BLBS, TILEGX_OPC_BLBS, TILEGX_OPC_BLEZT,
+  TILEGX_OPC_BLEZT, TILEGX_OPC_BLEZ, TILEGX_OPC_BLEZ, TILEGX_OPC_BLTZT,
+  TILEGX_OPC_BLTZT, TILEGX_OPC_BLTZ, TILEGX_OPC_BLTZ, TILEGX_OPC_BNEZT,
+  TILEGX_OPC_BNEZT, TILEGX_OPC_BNEZ, TILEGX_OPC_BNEZ, CHILD(528), CHILD(578),
+  CHILD(598), CHILD(703), CHILD(723), CHILD(728), CHILD(753), CHILD(758),
+  CHILD(763), CHILD(768), CHILD(773), CHILD(778), TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
+  TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_J, TILEGX_OPC_J,
+  TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
+  TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
+  TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
+  TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
+  TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
+  TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
+  CHILD(783), CHILD(800), CHILD(832), CHILD(849), CHILD(1168), CHILD(1185),
+  CHILD(1202), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1219), TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236), CHILD(1236),
+  CHILD(1236),
+  BITFIELD(37, 2) /* index 513 */,
+  TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
+  BITFIELD(39, 2) /* index 518 */,
+  TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
+  BITFIELD(41, 2) /* index 523 */,
+  TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
+  BITFIELD(51, 2) /* index 528 */,
+  TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
+  BITFIELD(37, 2) /* index 533 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
+  BITFIELD(39, 2) /* index 538 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
+  BITFIELD(41, 2) /* index 543 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
+  BITFIELD(31, 2) /* index 548 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
+  BITFIELD(33, 2) /* index 553 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
+  BITFIELD(35, 2) /* index 558 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
+  BITFIELD(37, 2) /* index 563 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
+  BITFIELD(39, 2) /* index 568 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
+  BITFIELD(41, 2) /* index 573 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
+  BITFIELD(51, 2) /* index 578 */,
+  TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, CHILD(583),
+  BITFIELD(31, 2) /* index 583 */,
+  TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(588),
+  BITFIELD(33, 2) /* index 588 */,
+  TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(593),
+  BITFIELD(35, 2) /* index 593 */,
+  TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD,
+  TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
+  BITFIELD(51, 2) /* index 598 */,
+  CHILD(603), CHILD(618), CHILD(633), CHILD(648),
+  BITFIELD(31, 2) /* index 603 */,
+  TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(608),
+  BITFIELD(33, 2) /* index 608 */,
+  TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(613),
+  BITFIELD(35, 2) /* index 613 */,
+  TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD,
+  TILEGX_OPC_PREFETCH_ADD_L1,
+  BITFIELD(31, 2) /* index 618 */,
+  TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(623),
+  BITFIELD(33, 2) /* index 623 */,
+  TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(628),
+  BITFIELD(35, 2) /* index 628 */,
+  TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD,
+  TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
+  BITFIELD(31, 2) /* index 633 */,
+  TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(638),
+  BITFIELD(33, 2) /* index 638 */,
+  TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(643),
+  BITFIELD(35, 2) /* index 643 */,
+  TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD,
+  TILEGX_OPC_PREFETCH_ADD_L2,
+  BITFIELD(31, 2) /* index 648 */,
+  CHILD(653), CHILD(653), CHILD(653), CHILD(673),
+  BITFIELD(43, 2) /* index 653 */,
+  CHILD(658), TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
+  BITFIELD(45, 2) /* index 658 */,
+  CHILD(663), TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
+  BITFIELD(47, 2) /* index 663 */,
+  CHILD(668), TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
+  BITFIELD(49, 2) /* index 668 */,
+  TILEGX_OPC_LD4S_TLS, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
+  TILEGX_OPC_LD4S_ADD,
+  BITFIELD(33, 2) /* index 673 */,
+  CHILD(653), CHILD(653), CHILD(653), CHILD(678),
+  BITFIELD(35, 2) /* index 678 */,
+  CHILD(653), CHILD(653), CHILD(653), CHILD(683),
+  BITFIELD(43, 2) /* index 683 */,
+  CHILD(688), TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L3_FAULT, TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  BITFIELD(45, 2) /* index 688 */,
+  CHILD(693), TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L3_FAULT, TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  BITFIELD(47, 2) /* index 693 */,
+  CHILD(698), TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L3_FAULT, TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  BITFIELD(49, 2) /* index 698 */,
+  TILEGX_OPC_LD4S_TLS, TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L3_FAULT, TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  BITFIELD(51, 2) /* index 703 */,
+  CHILD(708), TILEGX_OPC_LDNT1S_ADD, TILEGX_OPC_LDNT1U_ADD,
+  TILEGX_OPC_LDNT2S_ADD,
+  BITFIELD(31, 2) /* index 708 */,
+  TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(713),
+  BITFIELD(33, 2) /* index 713 */,
+  TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(718),
+  BITFIELD(35, 2) /* index 718 */,
+  TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD,
+  TILEGX_OPC_PREFETCH_ADD_L3,
+  BITFIELD(51, 2) /* index 723 */,
+  TILEGX_OPC_LDNT2U_ADD, TILEGX_OPC_LDNT4S_ADD, TILEGX_OPC_LDNT4U_ADD,
+  TILEGX_OPC_LDNT_ADD,
+  BITFIELD(51, 2) /* index 728 */,
+  CHILD(733), TILEGX_OPC_LDNA_ADD, TILEGX_OPC_MFSPR, TILEGX_OPC_MTSPR,
+  BITFIELD(43, 2) /* index 733 */,
+  CHILD(738), TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD,
+  BITFIELD(45, 2) /* index 738 */,
+  CHILD(743), TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD,
+  BITFIELD(47, 2) /* index 743 */,
+  CHILD(748), TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD,
+  BITFIELD(49, 2) /* index 748 */,
+  TILEGX_OPC_LD_TLS, TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD, TILEGX_OPC_LD_ADD,
+  BITFIELD(51, 2) /* index 753 */,
+  TILEGX_OPC_ORI, TILEGX_OPC_ST1_ADD, TILEGX_OPC_ST2_ADD, TILEGX_OPC_ST4_ADD,
+  BITFIELD(51, 2) /* index 758 */,
+  TILEGX_OPC_STNT1_ADD, TILEGX_OPC_STNT2_ADD, TILEGX_OPC_STNT4_ADD,
+  TILEGX_OPC_STNT_ADD,
+  BITFIELD(51, 2) /* index 763 */,
+  TILEGX_OPC_ST_ADD, TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI,
+  TILEGX_OPC_V1CMPLTSI,
+  BITFIELD(51, 2) /* index 768 */,
+  TILEGX_OPC_V1CMPLTUI, TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI,
+  TILEGX_OPC_V2ADDI,
+  BITFIELD(51, 2) /* index 773 */,
+  TILEGX_OPC_V2CMPEQI, TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI,
+  TILEGX_OPC_V2MAXSI,
+  BITFIELD(51, 2) /* index 778 */,
+  TILEGX_OPC_V2MINSI, TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(49, 4) /* index 783 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
+  TILEGX_OPC_AND, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPEXCH4, TILEGX_OPC_CMPEXCH,
+  TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
+  TILEGX_OPC_CMPNE, TILEGX_OPC_DBLALIGN2, TILEGX_OPC_DBLALIGN4,
+  TILEGX_OPC_DBLALIGN6,
+  BITFIELD(49, 4) /* index 800 */,
+  TILEGX_OPC_EXCH4, TILEGX_OPC_EXCH, TILEGX_OPC_FETCHADD4,
+  TILEGX_OPC_FETCHADDGEZ4, TILEGX_OPC_FETCHADDGEZ, TILEGX_OPC_FETCHADD,
+  TILEGX_OPC_FETCHAND4, TILEGX_OPC_FETCHAND, TILEGX_OPC_FETCHOR4,
+  TILEGX_OPC_FETCHOR, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, TILEGX_OPC_NOR,
+  CHILD(817), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
+  BITFIELD(43, 2) /* index 817 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(822),
+  BITFIELD(45, 2) /* index 822 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(827),
+  BITFIELD(47, 2) /* index 827 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
+  BITFIELD(49, 4) /* index 832 */,
+  TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
+  TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_ST1,
+  TILEGX_OPC_ST2, TILEGX_OPC_ST4, TILEGX_OPC_STNT1, TILEGX_OPC_STNT2,
+  TILEGX_OPC_STNT4,
+  BITFIELD(46, 7) /* index 849 */,
+  TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
+  TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
+  TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST,
+  TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_SUBXSC,
+  TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC,
+  TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBX,
+  TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX,
+  TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
+  TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB,
+  TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, CHILD(978), CHILD(987),
+  CHILD(1066), CHILD(1150), CHILD(1159), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
+  TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
+  TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
+  TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
+  TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
+  TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
+  TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
+  TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
+  TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
+  TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
+  TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
+  TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
+  TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
+  TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
+  TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
+  TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
+  TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
+  TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
+  TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
+  TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
+  TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
+  TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
+  TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
+  TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
+  TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
+  TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
+  BITFIELD(43, 3) /* index 978 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_DRAIN, TILEGX_OPC_DTLBPR, TILEGX_OPC_FINV,
+  TILEGX_OPC_FLUSHWB, TILEGX_OPC_FLUSH, TILEGX_OPC_FNOP, TILEGX_OPC_ICOH,
+  BITFIELD(43, 3) /* index 987 */,
+  CHILD(996), TILEGX_OPC_INV, TILEGX_OPC_IRET, TILEGX_OPC_JALRP,
+  TILEGX_OPC_JALR, TILEGX_OPC_JRP, TILEGX_OPC_JR, CHILD(1051),
+  BITFIELD(31, 2) /* index 996 */,
+  CHILD(1001), CHILD(1026), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
+  BITFIELD(33, 2) /* index 1001 */,
+  TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(1006),
+  BITFIELD(35, 2) /* index 1006 */,
+  TILEGX_OPC_ILL, CHILD(1011), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
+  BITFIELD(37, 2) /* index 1011 */,
+  TILEGX_OPC_ILL, CHILD(1016), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
+  BITFIELD(39, 2) /* index 1016 */,
+  TILEGX_OPC_ILL, CHILD(1021), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
+  BITFIELD(41, 2) /* index 1021 */,
+  TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_BPT, TILEGX_OPC_ILL,
+  BITFIELD(33, 2) /* index 1026 */,
+  TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(1031),
+  BITFIELD(35, 2) /* index 1031 */,
+  TILEGX_OPC_ILL, CHILD(1036), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
+  BITFIELD(37, 2) /* index 1036 */,
+  TILEGX_OPC_ILL, CHILD(1041), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
+  BITFIELD(39, 2) /* index 1041 */,
+  TILEGX_OPC_ILL, CHILD(1046), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
+  BITFIELD(41, 2) /* index 1046 */,
+  TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_RAISE, TILEGX_OPC_ILL,
+  BITFIELD(31, 2) /* index 1051 */,
+  TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(1056),
+  BITFIELD(33, 2) /* index 1056 */,
+  TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(1061),
+  BITFIELD(35, 2) /* index 1061 */,
+  TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
+  TILEGX_OPC_PREFETCH_L1_FAULT,
+  BITFIELD(43, 3) /* index 1066 */,
+  CHILD(1075), CHILD(1090), CHILD(1105), CHILD(1120), CHILD(1135),
+  TILEGX_OPC_LDNA, TILEGX_OPC_LDNT1S, TILEGX_OPC_LDNT1U,
+  BITFIELD(31, 2) /* index 1075 */,
+  TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1080),
+  BITFIELD(33, 2) /* index 1080 */,
+  TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1085),
+  BITFIELD(35, 2) /* index 1085 */,
+  TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
+  BITFIELD(31, 2) /* index 1090 */,
+  TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1095),
+  BITFIELD(33, 2) /* index 1095 */,
+  TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1100),
+  BITFIELD(35, 2) /* index 1100 */,
+  TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
+  TILEGX_OPC_PREFETCH_L2_FAULT,
+  BITFIELD(31, 2) /* index 1105 */,
+  TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1110),
+  BITFIELD(33, 2) /* index 1110 */,
+  TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1115),
+  BITFIELD(35, 2) /* index 1115 */,
+  TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
+  BITFIELD(31, 2) /* index 1120 */,
+  TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1125),
+  BITFIELD(33, 2) /* index 1125 */,
+  TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1130),
+  BITFIELD(35, 2) /* index 1130 */,
+  TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S,
+  TILEGX_OPC_PREFETCH_L3_FAULT,
+  BITFIELD(31, 2) /* index 1135 */,
+  TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1140),
+  BITFIELD(33, 2) /* index 1140 */,
+  TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1145),
+  BITFIELD(35, 2) /* index 1145 */,
+  TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
+  BITFIELD(43, 3) /* index 1150 */,
+  TILEGX_OPC_LDNT2S, TILEGX_OPC_LDNT2U, TILEGX_OPC_LDNT4S, TILEGX_OPC_LDNT4U,
+  TILEGX_OPC_LDNT, TILEGX_OPC_LD, TILEGX_OPC_LNK, TILEGX_OPC_MF,
+  BITFIELD(43, 3) /* index 1159 */,
+  TILEGX_OPC_NAP, TILEGX_OPC_NOP, TILEGX_OPC_SWINT0, TILEGX_OPC_SWINT1,
+  TILEGX_OPC_SWINT2, TILEGX_OPC_SWINT3, TILEGX_OPC_WH64, TILEGX_OPC_NONE,
+  BITFIELD(49, 4) /* index 1168 */,
+  TILEGX_OPC_V1MAXU, TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MZ,
+  TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS, TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC,
+  TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC, TILEGX_OPC_V2ADD, TILEGX_OPC_V2CMPEQ,
+  TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU, TILEGX_OPC_V2CMPLTS,
+  TILEGX_OPC_V2CMPLTU,
+  BITFIELD(49, 4) /* index 1185 */,
+  TILEGX_OPC_V2CMPNE, TILEGX_OPC_V2INT_H, TILEGX_OPC_V2INT_L,
+  TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ, TILEGX_OPC_V2MZ,
+  TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
+  TILEGX_OPC_V2SHLSC, TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU,
+  TILEGX_OPC_V2SUBSC, TILEGX_OPC_V2SUB,
+  BITFIELD(49, 4) /* index 1202 */,
+  TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
+  TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
+  TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
+  TILEGX_OPC_V4SUB, TILEGX_OPC_XOR, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(49, 4) /* index 1219 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
+  TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
+  TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
+  TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE,
+  BITFIELD(31, 2) /* index 1236 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(1241),
+  BITFIELD(33, 2) /* index 1241 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(1246),
+  BITFIELD(35, 2) /* index 1246 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(1251),
+  BITFIELD(37, 2) /* index 1251 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(1256),
+  BITFIELD(39, 2) /* index 1256 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  CHILD(1261),
+  BITFIELD(41, 2) /* index 1261 */,
+  TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
+  TILEGX_OPC_INFOL,
+};
+
+static const UShort decode_Y0_fsm[178] =
+{
+  BITFIELD(27, 4) /* index 0 */,
+  CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
+  TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(118), CHILD(123),
+  CHILD(128), CHILD(133), CHILD(153), CHILD(158), CHILD(163), CHILD(168),
+  CHILD(173),
+  BITFIELD(6, 2) /* index 17 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
+  BITFIELD(8, 2) /* index 22 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
+  BITFIELD(10, 2) /* index 27 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
+  BITFIELD(0, 2) /* index 32 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
+  BITFIELD(2, 2) /* index 37 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
+  BITFIELD(4, 2) /* index 42 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
+  BITFIELD(6, 2) /* index 47 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
+  BITFIELD(8, 2) /* index 52 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
+  BITFIELD(10, 2) /* index 57 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
+  BITFIELD(18, 2) /* index 62 */,
+  TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
+  BITFIELD(15, 5) /* index 67 */,
+  TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
+  TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
+  TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
+  TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
+  TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(100),
+  CHILD(109), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(12, 3) /* index 100 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_CLZ, TILEGX_OPC_CTZ, TILEGX_OPC_FNOP,
+  TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NOP, TILEGX_OPC_PCNT,
+  TILEGX_OPC_REVBITS,
+  BITFIELD(12, 3) /* index 109 */,
+  TILEGX_OPC_REVBYTES, TILEGX_OPC_TBLIDXB0, TILEGX_OPC_TBLIDXB1,
+  TILEGX_OPC_TBLIDXB2, TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  TILEGX_OPC_NONE,
+  BITFIELD(18, 2) /* index 118 */,
+  TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
+  BITFIELD(18, 2) /* index 123 */,
+  TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE, TILEGX_OPC_MULAX, TILEGX_OPC_MULX,
+  BITFIELD(18, 2) /* index 128 */,
+  TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
+  BITFIELD(18, 2) /* index 133 */,
+  TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(138), TILEGX_OPC_XOR,
+  BITFIELD(12, 2) /* index 138 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(143),
+  BITFIELD(14, 2) /* index 143 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(148),
+  BITFIELD(16, 2) /* index 148 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
+  BITFIELD(18, 2) /* index 153 */,
+  TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
+  BITFIELD(18, 2) /* index 158 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
+  TILEGX_OPC_SHL3ADDX,
+  BITFIELD(18, 2) /* index 163 */,
+  TILEGX_OPC_MUL_HS_HS, TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_LS_LS,
+  TILEGX_OPC_MUL_LU_LU,
+  BITFIELD(18, 2) /* index 168 */,
+  TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_LS_LS,
+  TILEGX_OPC_MULA_LU_LU,
+  BITFIELD(18, 2) /* index 173 */,
+  TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
+};
+
+static const UShort decode_Y1_fsm[167] =
+{
+  BITFIELD(58, 4) /* index 0 */,
+  TILEGX_OPC_NONE, CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
+  TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(117), CHILD(122),
+  CHILD(127), CHILD(132), CHILD(152), CHILD(157), CHILD(162), TILEGX_OPC_NONE,
+  BITFIELD(37, 2) /* index 17 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
+  BITFIELD(39, 2) /* index 22 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
+  BITFIELD(41, 2) /* index 27 */,
+  TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
+  BITFIELD(31, 2) /* index 32 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
+  BITFIELD(33, 2) /* index 37 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
+  BITFIELD(35, 2) /* index 42 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
+  BITFIELD(37, 2) /* index 47 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
+  BITFIELD(39, 2) /* index 52 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
+  BITFIELD(41, 2) /* index 57 */,
+  TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
+  BITFIELD(49, 2) /* index 62 */,
+  TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
+  BITFIELD(47, 4) /* index 67 */,
+  TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
+  TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD,
+  TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(84),
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
+  BITFIELD(43, 3) /* index 84 */,
+  CHILD(93), CHILD(96), CHILD(99), CHILD(102), CHILD(105), CHILD(108),
+  CHILD(111), CHILD(114),
+  BITFIELD(46, 1) /* index 93 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_FNOP,
+  BITFIELD(46, 1) /* index 96 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_ILL,
+  BITFIELD(46, 1) /* index 99 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_JALRP,
+  BITFIELD(46, 1) /* index 102 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_JALR,
+  BITFIELD(46, 1) /* index 105 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_JRP,
+  BITFIELD(46, 1) /* index 108 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_JR,
+  BITFIELD(46, 1) /* index 111 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_LNK,
+  BITFIELD(46, 1) /* index 114 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_NOP,
+  BITFIELD(49, 2) /* index 117 */,
+  TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
+  BITFIELD(49, 2) /* index 122 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE,
+  BITFIELD(49, 2) /* index 127 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
+  BITFIELD(49, 2) /* index 132 */,
+  TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(137), TILEGX_OPC_XOR,
+  BITFIELD(43, 2) /* index 137 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(142),
+  BITFIELD(45, 2) /* index 142 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(147),
+  BITFIELD(47, 2) /* index 147 */,
+  TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
+  BITFIELD(49, 2) /* index 152 */,
+  TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
+  BITFIELD(49, 2) /* index 157 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
+  TILEGX_OPC_SHL3ADDX,
+  BITFIELD(49, 2) /* index 162 */,
+  TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
+};
+
+static const UShort decode_Y2_fsm[118] =
+{
+  BITFIELD(62, 2) /* index 0 */,
+  TILEGX_OPC_NONE, CHILD(5), CHILD(66), CHILD(109),
+  BITFIELD(55, 3) /* index 5 */,
+  CHILD(14), CHILD(14), CHILD(14), CHILD(17), CHILD(40), CHILD(40), CHILD(40),
+  CHILD(43),
+  BITFIELD(26, 1) /* index 14 */,
+  TILEGX_OPC_LD1S, TILEGX_OPC_LD1U,
+  BITFIELD(26, 1) /* index 17 */,
+  CHILD(20), CHILD(30),
+  BITFIELD(51, 2) /* index 20 */,
+  TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(25),
+  BITFIELD(53, 2) /* index 25 */,
+  TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
+  TILEGX_OPC_PREFETCH_L1_FAULT,
+  BITFIELD(51, 2) /* index 30 */,
+  TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(35),
+  BITFIELD(53, 2) /* index 35 */,
+  TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
+  BITFIELD(26, 1) /* index 40 */,
+  TILEGX_OPC_LD2S, TILEGX_OPC_LD2U,
+  BITFIELD(26, 1) /* index 43 */,
+  CHILD(46), CHILD(56),
+  BITFIELD(51, 2) /* index 46 */,
+  TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(51),
+  BITFIELD(53, 2) /* index 51 */,
+  TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
+  TILEGX_OPC_PREFETCH_L2_FAULT,
+  BITFIELD(51, 2) /* index 56 */,
+  TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(61),
+  BITFIELD(53, 2) /* index 61 */,
+  TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
+  BITFIELD(56, 2) /* index 66 */,
+  CHILD(71), CHILD(74), CHILD(90), CHILD(93),
+  BITFIELD(26, 1) /* index 71 */,
+  TILEGX_OPC_NONE, TILEGX_OPC_LD4S,
+  BITFIELD(26, 1) /* index 74 */,
+  TILEGX_OPC_NONE, CHILD(77),
+  BITFIELD(51, 2) /* index 77 */,
+  TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(82),
+  BITFIELD(53, 2) /* index 82 */,
+  TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(87),
+  BITFIELD(55, 1) /* index 87 */,
+  TILEGX_OPC_LD4S, TILEGX_OPC_PREFETCH_L3_FAULT,
+  BITFIELD(26, 1) /* index 90 */,
+  TILEGX_OPC_LD4U, TILEGX_OPC_LD,
+  BITFIELD(26, 1) /* index 93 */,
+  CHILD(96), TILEGX_OPC_LD,
+  BITFIELD(51, 2) /* index 96 */,
+  TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(101),
+  BITFIELD(53, 2) /* index 101 */,
+  TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(106),
+  BITFIELD(55, 1) /* index 106 */,
+  TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
+  BITFIELD(26, 1) /* index 109 */,
+  CHILD(112), CHILD(115),
+  BITFIELD(57, 1) /* index 112 */,
+  TILEGX_OPC_ST1, TILEGX_OPC_ST4,
+  BITFIELD(57, 1) /* index 115 */,
+  TILEGX_OPC_ST2, TILEGX_OPC_ST,
+};
+
+#undef BITFIELD
+#undef CHILD
+const UShort * const
+tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS] =
+{
+  decode_X0_fsm,
+  decode_X1_fsm,
+  decode_Y0_fsm,
+  decode_Y1_fsm,
+  decode_Y2_fsm
+};
+const struct tilegx_operand tilegx_operands[35] =
+{
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X0),
+    8, 1, 0, 0, 0, 0,
+    create_Imm8_X0, get_Imm8_X0
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X1),
+    8, 1, 0, 0, 0, 0,
+    create_Imm8_X1, get_Imm8_X1
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y0),
+    8, 1, 0, 0, 0, 0,
+    create_Imm8_Y0, get_Imm8_Y0
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y1),
+    8, 1, 0, 0, 0, 0,
+    create_Imm8_Y1, get_Imm8_Y1
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X0_HW0_LAST),
+    16, 1, 0, 0, 0, 0,
+    create_Imm16_X0, get_Imm16_X0
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X1_HW0_LAST),
+    16, 1, 0, 0, 0, 0,
+    create_Imm16_X1, get_Imm16_X1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 0, 1, 0, 0,
+    create_Dest_X1, get_Dest_X1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcA_X1, get_SrcA_X1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 0, 1, 0, 0,
+    create_Dest_X0, get_Dest_X0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcA_X0, get_SrcA_X0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 0, 1, 0, 0,
+    create_Dest_Y0, get_Dest_Y0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcA_Y0, get_SrcA_Y0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 0, 1, 0, 0,
+    create_Dest_Y1, get_Dest_Y1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcA_Y1, get_SrcA_Y1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcA_Y2, get_SrcA_Y2
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 1, 0, 0,
+    create_SrcA_X1, get_SrcA_X1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcB_X0, get_SrcB_X0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcB_X1, get_SrcB_X1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcB_Y0, get_SrcB_Y0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcB_Y1, get_SrcB_Y1
+  },
+  {
+    TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_BROFF_X1),
+    17, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
+    create_BrOff_X1, get_BrOff_X1
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_MMSTART_X0),
+    6, 0, 0, 0, 0, 0,
+    create_BFStart_X0, get_BFStart_X0
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_MMEND_X0),
+    6, 0, 0, 0, 0, 0,
+    create_BFEnd_X0, get_BFEnd_X0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 1, 0, 0,
+    create_Dest_X0, get_Dest_X0
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 1, 0, 0,
+    create_Dest_Y0, get_Dest_Y0
+  },
+  {
+    TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_JUMPOFF_X1),
+    27, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
+    create_JumpOff_X1, get_JumpOff_X1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 0, 1, 0, 0,
+    create_SrcBDest_Y2, get_SrcBDest_Y2
+  },
+  {
+    TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MF_IMM14_X1),
+    14, 0, 0, 0, 0, 0,
+    create_MF_Imm14_X1, get_MF_Imm14_X1
+  },
+  {
+    TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MT_IMM14_X1),
+    14, 0, 0, 0, 0, 0,
+    create_MT_Imm14_X1, get_MT_Imm14_X1
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X0),
+    6, 0, 0, 0, 0, 0,
+    create_ShAmt_X0, get_ShAmt_X0
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X1),
+    6, 0, 0, 0, 0, 0,
+    create_ShAmt_X1, get_ShAmt_X1
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y0),
+    6, 0, 0, 0, 0, 0,
+    create_ShAmt_Y0, get_ShAmt_Y0
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y1),
+    6, 0, 0, 0, 0, 0,
+    create_ShAmt_Y1, get_ShAmt_Y1
+  },
+  {
+    TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
+    6, 0, 1, 0, 0, 0,
+    create_SrcBDest_Y2, get_SrcBDest_Y2
+  },
+  {
+    TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_DEST_IMM8_X1),
+    8, 1, 0, 0, 0, 0,
+    create_Dest_Imm8_X1, get_Dest_Imm8_X1
+  }
+};
+
+
+/* Given a set of bundle bits and a specific pipe, returns which
+ * instruction the bundle contains in that pipe.
+ */
+const struct tilegx_opcode *
+find_opcode ( tilegx_bundle_bits bits, tilegx_pipeline pipe )
+{
+  const UShort *table = tilegx_bundle_decoder_fsms[pipe];
+  Int index = 0;
+
+  while (1)
+  {
+    UShort bitspec = table[index];
+    UInt bitfield =
+      ((UInt)(bits >> (bitspec & 63))) & (bitspec >> 6);
+
+    UShort next = table[index + 1 + bitfield];
+    if (next <= TILEGX_OPC_NONE)
+      return &tilegx_opcodes[next];
+
+    index = next - TILEGX_OPC_NONE;
+  }
+}
+
+
+int
+parse_insn_tilegx ( tilegx_bundle_bits bits,
+                    ULong pc,
+                    struct tilegx_decoded_instruction
+                    decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE] )
+{
+  Int num_instructions = 0;
+  Int pipe;
+
+  Int min_pipe, max_pipe;
+  if ((bits & TILEGX_BUNDLE_MODE_MASK) == 0)
+  {
+    min_pipe = TILEGX_PIPELINE_X0;
+    max_pipe = TILEGX_PIPELINE_X1;
+  }
+  else
+  {
+    min_pipe = TILEGX_PIPELINE_Y0;
+    max_pipe = TILEGX_PIPELINE_Y2;
+  }
+
+  /* For each pipe, find an instruction that fits. */
+  for (pipe = min_pipe; pipe <= max_pipe; pipe++)
+  {
+    const struct tilegx_opcode *opc;
+    struct tilegx_decoded_instruction *d;
+    Int i;
+
+    d = &decoded[num_instructions++];
+    opc = find_opcode (bits, (tilegx_pipeline)pipe);
+    d->opcode = opc;
+
+    /* Decode each operand, sign extending, etc. as appropriate. */
+    for (i = 0; i < opc->num_operands; i++)
+    {
+      const struct tilegx_operand *op =
+        &tilegx_operands[opc->operands[pipe][i]];
+      Int raw_opval = op->extract (bits);
+      Long opval;
+
+      if (op->is_signed)
+      {
+        /* Sign-extend the operand. */
+        Int shift = (int)((sizeof(int) * 8) - op->num_bits);
+        raw_opval = (raw_opval << shift) >> shift;
+      }
+
+      /* Adjust PC-relative scaled branch offsets. */
+      if (op->type == TILEGX_OP_TYPE_ADDRESS)
+        opval = (raw_opval * TILEGX_BUNDLE_SIZE_IN_BYTES) + pc;
+      else
+        opval = raw_opval;
+
+      /* Record the final value. */
+      d->operands[i] = op;
+      d->operand_values[i] = opval;
+    }
+  }
+  decoded[num_instructions].opcode = NULL;
+  return num_instructions;
+}
+
+tilegx_bundle_bits mkTileGxInsn ( Int opc, Int argc, ... )
+{
+  struct tilegx_decoded_instruction decoded;
+  decoded.opcode =  &tilegx_opcodes[opc];
+  Int i;
+  va_list argv;
+
+  if (decoded.opcode->num_operands != argc)
+    return -1;
+
+  if (opc > TILEGX_OPC_NONE) return 0;
+
+  if (decoded.opcode->num_operands > 4)
+    return -1;
+
+  va_start(argv, argc);
+  for (i = 0 ; i < decoded.opcode->num_operands; i++)
+  {
+    decoded.operands[i] = 0;
+    decoded.operand_values[i] = va_arg(argv, ULong);
+  }
+  va_end(argv);
+
+  return encode_insn_tilegx(decoded);
+}
+
+tilegx_bundle_bits
+encode_insn_tilegx ( struct tilegx_decoded_instruction decoded )
+{
+  const struct tilegx_opcode *opc =
+    decoded.opcode;
+
+  tilegx_bundle_bits insn = 0;
+  Int pipeX01 = (opc->pipes & 0x01) ? 0 : 1;
+  Int op_num  = opc->num_operands;
+
+  /* Assume either X0 or X1. */
+  if ((opc->pipes & 3) == 0)
+    return -1;
+
+  /* Insert fnop in other pipe. */
+  insn = tilegx_opcodes[TILEGX_OPC_FNOP].
+    fixed_bit_values[pipeX01 ? 0 : 1];
+
+  insn |= opc->fixed_bit_values[pipeX01];
+
+  Int i;
+  /* loop for each operand. */
+  for (i = 0 ; i < op_num; i++)
+    {
+      const struct tilegx_operand *opd =
+        &tilegx_operands[opc->operands[pipeX01][i]];
+      Long  op = decoded.operand_values[i];
+      decoded.operands[i] = opd;
+      ULong x = opd->insert(op);
+      insn |= x;
+    }
+
+  return insn;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                    tilegx_disasm.c  ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/priv/tilegx_disasm.h b/VEX/priv/tilegx_disasm.h
new file mode 100644
index 0000000..701e3c5
--- /dev/null
+++ b/VEX/priv/tilegx_disasm.h
@@ -0,0 +1,1306 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin           Tilegx disassembler     tilegx-disasm.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright Tilera Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+ /* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#ifndef __TILEGX_DISASM_H
+#define __TILEGX_DISASM_H
+
+#include "libvex_basictypes.h"
+
+typedef ULong tilegx_bundle_bits;
+
+/* These are the bits that determine if a bundle is in the X encoding. */
+#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
+
+enum
+{
+  /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
+  TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
+
+  /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
+  TILEGX_NUM_PIPELINE_ENCODINGS = 5,
+
+  /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
+  TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
+
+  /* Instructions take this many bytes. */
+  TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
+
+  /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
+  TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
+
+  /* Bundles should be aligned modulo this number of bytes. */
+  TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
+    (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
+
+  /* Number of registers (some are magic, such as network I/O). */
+  TILEGX_NUM_REGISTERS = 64,
+};
+
+/* Make a few "tile_" variables to simplify common code between
+   architectures.  */
+
+typedef tilegx_bundle_bits tile_bundle_bits;
+#define TILE_BUNDLE_SIZE_IN_BYTES TILEGX_BUNDLE_SIZE_IN_BYTES
+#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
+  TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
+
+/* 64-bit pattern for a { bpt ; nop } bundle. */
+#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
+
+static __inline UInt
+get_BFEnd_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline UInt
+get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 24)) & 0xf);
+}
+
+static __inline UInt
+get_BFStart_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 18)) & 0x3f);
+}
+
+static __inline UInt
+get_BrOff_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 31)) & 0x0000003f) |
+         (((UInt)(n >> 37)) & 0x0001ffc0);
+}
+
+static __inline UInt
+get_BrType_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 54)) & 0x1f);
+}
+
+static __inline UInt
+get_Dest_Imm8_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 31)) & 0x0000003f) |
+         (((UInt)(n >> 43)) & 0x000000c0);
+}
+
+static __inline UInt
+get_Dest_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 0)) & 0x3f);
+}
+
+static __inline UInt
+get_Dest_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 31)) & 0x3f);
+}
+
+static __inline UInt
+get_Dest_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 0)) & 0x3f);
+}
+
+static __inline UInt
+get_Dest_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 31)) & 0x3f);
+}
+
+static __inline UInt
+get_Imm16_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0xffff);
+}
+
+static __inline UInt
+get_Imm16_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0xffff);
+}
+
+static __inline UInt
+get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 20)) & 0xff);
+}
+
+static __inline UInt
+get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 51)) & 0xff);
+}
+
+static __inline UInt
+get_Imm8_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0xff);
+}
+
+static __inline UInt
+get_Imm8_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0xff);
+}
+
+static __inline UInt
+get_Imm8_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0xff);
+}
+
+static __inline UInt
+get_Imm8_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0xff);
+}
+
+static __inline UInt
+get_JumpOff_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 31)) & 0x7ffffff);
+}
+
+static __inline UInt
+get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 58)) & 0x1);
+}
+
+static __inline UInt
+get_MF_Imm14_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 37)) & 0x3fff);
+}
+
+static __inline UInt
+get_MT_Imm14_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 31)) & 0x0000003f) |
+         (((UInt)(n >> 37)) & 0x00003fc0);
+}
+
+static __inline UInt
+get_Mode(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 62)) & 0x3);
+}
+
+static __inline UInt
+get_Opcode_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 28)) & 0x7);
+}
+
+static __inline UInt
+get_Opcode_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 59)) & 0x7);
+}
+
+static __inline UInt
+get_Opcode_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 27)) & 0xf);
+}
+
+static __inline UInt
+get_Opcode_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 58)) & 0xf);
+}
+
+static __inline UInt
+get_Opcode_Y2(tilegx_bundle_bits n)
+{
+  return (((n >> 26)) & 0x00000001) |
+         (((UInt)(n >> 56)) & 0x00000002);
+}
+
+static __inline UInt
+get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 18)) & 0x3ff);
+}
+
+static __inline UInt
+get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 49)) & 0x3ff);
+}
+
+static __inline UInt
+get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 18)) & 0x3);
+}
+
+static __inline UInt
+get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 49)) & 0x3);
+}
+
+static __inline UInt
+get_ShAmt_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline UInt
+get_ShAmt_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0x3f);
+}
+
+static __inline UInt
+get_ShAmt_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline UInt
+get_ShAmt_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0x3f);
+}
+
+static __inline UInt
+get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 18)) & 0x3ff);
+}
+
+static __inline UInt
+get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 49)) & 0x3ff);
+}
+
+static __inline UInt
+get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 18)) & 0x3);
+}
+
+static __inline UInt
+get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 49)) & 0x3);
+}
+
+static __inline UInt
+get_SrcA_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 6)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcA_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 37)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcA_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 6)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcA_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 37)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcA_Y2(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 20)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcBDest_Y2(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 51)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcB_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcB_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcB_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline UInt
+get_SrcB_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0x3f);
+}
+
+static __inline UInt
+get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline UInt
+get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0x3f);
+}
+
+static __inline UInt
+get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+  const UInt n = (UInt)num;
+  return (((n >> 12)) & 0x3f);
+}
+
+static __inline UInt
+get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+  return (((UInt)(n >> 43)) & 0x3f);
+}
+
+
+static __inline int
+sign_extend(int n, int num_bits)
+{
+  int shift = (int)(sizeof(int) * 8 - num_bits);
+  return (n << shift) >> shift;
+}
+
+
+
+static __inline tilegx_bundle_bits
+create_BFEnd_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_BFOpcodeExtension_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0xf) << 24);
+}
+
+static __inline tilegx_bundle_bits
+create_BFStart_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_BrOff_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_BrType_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_Imm8_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 0);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 0);
+}
+
+static __inline tilegx_bundle_bits
+create_Dest_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm16_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0xffff) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm16_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0xff) << 20);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0xff)) << 51);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0xff) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0xff)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0xff) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_Imm8_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0xff)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_JumpOff_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
+}
+
+static __inline tilegx_bundle_bits
+create_JumpOpcodeExtension_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x1)) << 58);
+}
+
+static __inline tilegx_bundle_bits
+create_MF_Imm14_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_MT_Imm14_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+         (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_Mode(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3)) << 62);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x7) << 28);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x7)) << 59);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0xf) << 27);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0xf)) << 58);
+}
+
+static __inline tilegx_bundle_bits
+create_Opcode_Y2(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x00000001) << 26) |
+         (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3ff) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_RRROpcodeExtension_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_ShAmt_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3ff) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3) << 18);
+}
+
+static __inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3)) << 49);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 6);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 6);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcA_Y2(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 20);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcBDest_Y2(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_SrcB_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y0(int num)
+{
+  const UInt n = (UInt)num;
+  return ((n & 0x3f) << 12);
+}
+
+static __inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y1(int num)
+{
+  const UInt n = (UInt)num;
+  return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+enum
+{
+  TILEGX_MAX_OPERANDS = 4 /* bfexts */
+};
+
+typedef enum
+{
+  TILEGX_OPC_BPT,
+  TILEGX_OPC_INFO,
+  TILEGX_OPC_INFOL,
+  TILEGX_OPC_LD4S_TLS,
+  TILEGX_OPC_LD_TLS,
+  TILEGX_OPC_MOVE,
+  TILEGX_OPC_MOVEI,
+  TILEGX_OPC_MOVELI,
+  TILEGX_OPC_PREFETCH,
+  TILEGX_OPC_PREFETCH_ADD_L1,
+  TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L2,
+  TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
+  TILEGX_OPC_PREFETCH_ADD_L3,
+  TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
+  TILEGX_OPC_PREFETCH_L1,
+  TILEGX_OPC_PREFETCH_L1_FAULT,
+  TILEGX_OPC_PREFETCH_L2,
+  TILEGX_OPC_PREFETCH_L2_FAULT,
+  TILEGX_OPC_PREFETCH_L3,
+  TILEGX_OPC_PREFETCH_L3_FAULT,
+  TILEGX_OPC_RAISE,
+  TILEGX_OPC_ADD,
+  TILEGX_OPC_ADDI,
+  TILEGX_OPC_ADDLI,
+  TILEGX_OPC_ADDX,
+  TILEGX_OPC_ADDXI,
+  TILEGX_OPC_ADDXLI,
+  TILEGX_OPC_ADDXSC,
+  TILEGX_OPC_AND,
+  TILEGX_OPC_ANDI,
+  TILEGX_OPC_BEQZ,
+  TILEGX_OPC_BEQZT,
+  TILEGX_OPC_BFEXTS,
+  TILEGX_OPC_BFEXTU,
+  TILEGX_OPC_BFINS,
+  TILEGX_OPC_BGEZ,
+  TILEGX_OPC_BGEZT,
+  TILEGX_OPC_BGTZ,
+  TILEGX_OPC_BGTZT,
+  TILEGX_OPC_BLBC,
+  TILEGX_OPC_BLBCT,
+  TILEGX_OPC_BLBS,
+  TILEGX_OPC_BLBST,
+  TILEGX_OPC_BLEZ,
+  TILEGX_OPC_BLEZT,
+  TILEGX_OPC_BLTZ,
+  TILEGX_OPC_BLTZT,
+  TILEGX_OPC_BNEZ,
+  TILEGX_OPC_BNEZT,
+  TILEGX_OPC_CLZ,
+  TILEGX_OPC_CMOVEQZ,
+  TILEGX_OPC_CMOVNEZ,
+  TILEGX_OPC_CMPEQ,
+  TILEGX_OPC_CMPEQI,
+  TILEGX_OPC_CMPEXCH,
+  TILEGX_OPC_CMPEXCH4,
+  TILEGX_OPC_CMPLES,
+  TILEGX_OPC_CMPLEU,
+  TILEGX_OPC_CMPLTS,
+  TILEGX_OPC_CMPLTSI,
+  TILEGX_OPC_CMPLTU,
+  TILEGX_OPC_CMPLTUI,
+  TILEGX_OPC_CMPNE,
+  TILEGX_OPC_CMUL,
+  TILEGX_OPC_CMULA,
+  TILEGX_OPC_CMULAF,
+  TILEGX_OPC_CMULF,
+  TILEGX_OPC_CMULFR,
+  TILEGX_OPC_CMULH,
+  TILEGX_OPC_CMULHR,
+  TILEGX_OPC_CRC32_32,
+  TILEGX_OPC_CRC32_8,
+  TILEGX_OPC_CTZ,
+  TILEGX_OPC_DBLALIGN,
+  TILEGX_OPC_DBLALIGN2,
+  TILEGX_OPC_DBLALIGN4,
+  TILEGX_OPC_DBLALIGN6,
+  TILEGX_OPC_DRAIN,
+  TILEGX_OPC_DTLBPR,
+  TILEGX_OPC_EXCH,
+  TILEGX_OPC_EXCH4,
+  TILEGX_OPC_FDOUBLE_ADD_FLAGS,
+  TILEGX_OPC_FDOUBLE_ADDSUB,
+  TILEGX_OPC_FDOUBLE_MUL_FLAGS,
+  TILEGX_OPC_FDOUBLE_PACK1,
+  TILEGX_OPC_FDOUBLE_PACK2,
+  TILEGX_OPC_FDOUBLE_SUB_FLAGS,
+  TILEGX_OPC_FDOUBLE_UNPACK_MAX,
+  TILEGX_OPC_FDOUBLE_UNPACK_MIN,
+  TILEGX_OPC_FETCHADD,
+  TILEGX_OPC_FETCHADD4,
+  TILEGX_OPC_FETCHADDGEZ,
+  TILEGX_OPC_FETCHADDGEZ4,
+  TILEGX_OPC_FETCHAND,
+  TILEGX_OPC_FETCHAND4,
+  TILEGX_OPC_FETCHOR,
+  TILEGX_OPC_FETCHOR4,
+  TILEGX_OPC_FINV,
+  TILEGX_OPC_FLUSH,
+  TILEGX_OPC_FLUSHWB,
+  TILEGX_OPC_FNOP,
+  TILEGX_OPC_FSINGLE_ADD1,
+  TILEGX_OPC_FSINGLE_ADDSUB2,
+  TILEGX_OPC_FSINGLE_MUL1,
+  TILEGX_OPC_FSINGLE_MUL2,
+  TILEGX_OPC_FSINGLE_PACK1,
+  TILEGX_OPC_FSINGLE_PACK2,
+  TILEGX_OPC_FSINGLE_SUB1,
+  TILEGX_OPC_ICOH,
+  TILEGX_OPC_ILL,
+  TILEGX_OPC_INV,
+  TILEGX_OPC_IRET,
+  TILEGX_OPC_J,
+  TILEGX_OPC_JAL,
+  TILEGX_OPC_JALR,
+  TILEGX_OPC_JALRP,
+  TILEGX_OPC_JR,
+  TILEGX_OPC_JRP,
+  TILEGX_OPC_LD,
+  TILEGX_OPC_LD1S,
+  TILEGX_OPC_LD1S_ADD,
+  TILEGX_OPC_LD1U,
+  TILEGX_OPC_LD1U_ADD,
+  TILEGX_OPC_LD2S,
+  TILEGX_OPC_LD2S_ADD,
+  TILEGX_OPC_LD2U,
+  TILEGX_OPC_LD2U_ADD,
+  TILEGX_OPC_LD4S,
+  TILEGX_OPC_LD4S_ADD,
+  TILEGX_OPC_LD4U,
+  TILEGX_OPC_LD4U_ADD,
+  TILEGX_OPC_LD_ADD,
+  TILEGX_OPC_LDNA,
+  TILEGX_OPC_LDNA_ADD,
+  TILEGX_OPC_LDNT,
+  TILEGX_OPC_LDNT1S,
+  TILEGX_OPC_LDNT1S_ADD,
+  TILEGX_OPC_LDNT1U,
+  TILEGX_OPC_LDNT1U_ADD,
+  TILEGX_OPC_LDNT2S,
+  TILEGX_OPC_LDNT2S_ADD,
+  TILEGX_OPC_LDNT2U,
+  TILEGX_OPC_LDNT2U_ADD,
+  TILEGX_OPC_LDNT4S,
+  TILEGX_OPC_LDNT4S_ADD,
+  TILEGX_OPC_LDNT4U,
+  TILEGX_OPC_LDNT4U_ADD,
+  TILEGX_OPC_LDNT_ADD,
+  TILEGX_OPC_LNK,
+  TILEGX_OPC_MF,
+  TILEGX_OPC_MFSPR,
+  TILEGX_OPC_MM,
+  TILEGX_OPC_MNZ,
+  TILEGX_OPC_MTSPR,
+  TILEGX_OPC_MUL_HS_HS,
+  TILEGX_OPC_MUL_HS_HU,
+  TILEGX_OPC_MUL_HS_LS,
+  TILEGX_OPC_MUL_HS_LU,
+  TILEGX_OPC_MUL_HU_HU,
+  TILEGX_OPC_MUL_HU_LS,
+  TILEGX_OPC_MUL_HU_LU,
+  TILEGX_OPC_MUL_LS_LS,
+  TILEGX_OPC_MUL_LS_LU,
+  TILEGX_OPC_MUL_LU_LU,
+  TILEGX_OPC_MULA_HS_HS,
+  TILEGX_OPC_MULA_HS_HU,
+  TILEGX_OPC_MULA_HS_LS,
+  TILEGX_OPC_MULA_HS_LU,
+  TILEGX_OPC_MULA_HU_HU,
+  TILEGX_OPC_MULA_HU_LS,
+  TILEGX_OPC_MULA_HU_LU,
+  TILEGX_OPC_MULA_LS_LS,
+  TILEGX_OPC_MULA_LS_LU,
+  TILEGX_OPC_MULA_LU_LU,
+  TILEGX_OPC_MULAX,
+  TILEGX_OPC_MULX,
+  TILEGX_OPC_MZ,
+  TILEGX_OPC_NAP,
+  TILEGX_OPC_NOP,
+  TILEGX_OPC_NOR,
+  TILEGX_OPC_OR,
+  TILEGX_OPC_ORI,
+  TILEGX_OPC_PCNT,
+  TILEGX_OPC_REVBITS,
+  TILEGX_OPC_REVBYTES,
+  TILEGX_OPC_ROTL,
+  TILEGX_OPC_ROTLI,
+  TILEGX_OPC_SHL,
+  TILEGX_OPC_SHL16INSLI,
+  TILEGX_OPC_SHL1ADD,
+  TILEGX_OPC_SHL1ADDX,
+  TILEGX_OPC_SHL2ADD,
+  TILEGX_OPC_SHL2ADDX,
+  TILEGX_OPC_SHL3ADD,
+  TILEGX_OPC_SHL3ADDX,
+  TILEGX_OPC_SHLI,
+  TILEGX_OPC_SHLX,
+  TILEGX_OPC_SHLXI,
+  TILEGX_OPC_SHRS,
+  TILEGX_OPC_SHRSI,
+  TILEGX_OPC_SHRU,
+  TILEGX_OPC_SHRUI,
+  TILEGX_OPC_SHRUX,
+  TILEGX_OPC_SHRUXI,
+  TILEGX_OPC_SHUFFLEBYTES,
+  TILEGX_OPC_ST,
+  TILEGX_OPC_ST1,
+  TILEGX_OPC_ST1_ADD,
+  TILEGX_OPC_ST2,
+  TILEGX_OPC_ST2_ADD,
+  TILEGX_OPC_ST4,
+  TILEGX_OPC_ST4_ADD,
+  TILEGX_OPC_ST_ADD,
+  TILEGX_OPC_STNT,
+  TILEGX_OPC_STNT1,
+  TILEGX_OPC_STNT1_ADD,
+  TILEGX_OPC_STNT2,
+  TILEGX_OPC_STNT2_ADD,
+  TILEGX_OPC_STNT4,
+  TILEGX_OPC_STNT4_ADD,
+  TILEGX_OPC_STNT_ADD,
+  TILEGX_OPC_SUB,
+  TILEGX_OPC_SUBX,
+  TILEGX_OPC_SUBXSC,
+  TILEGX_OPC_SWINT0,
+  TILEGX_OPC_SWINT1,
+  TILEGX_OPC_SWINT2,
+  TILEGX_OPC_SWINT3,
+  TILEGX_OPC_TBLIDXB0,
+  TILEGX_OPC_TBLIDXB1,
+  TILEGX_OPC_TBLIDXB2,
+  TILEGX_OPC_TBLIDXB3,
+  TILEGX_OPC_V1ADD,
+  TILEGX_OPC_V1ADDI,
+  TILEGX_OPC_V1ADDUC,
+  TILEGX_OPC_V1ADIFFU,
+  TILEGX_OPC_V1AVGU,
+  TILEGX_OPC_V1CMPEQ,
+  TILEGX_OPC_V1CMPEQI,
+  TILEGX_OPC_V1CMPLES,
+  TILEGX_OPC_V1CMPLEU,
+  TILEGX_OPC_V1CMPLTS,
+  TILEGX_OPC_V1CMPLTSI,
+  TILEGX_OPC_V1CMPLTU,
+  TILEGX_OPC_V1CMPLTUI,
+  TILEGX_OPC_V1CMPNE,
+  TILEGX_OPC_V1DDOTPU,
+  TILEGX_OPC_V1DDOTPUA,
+  TILEGX_OPC_V1DDOTPUS,
+  TILEGX_OPC_V1DDOTPUSA,
+  TILEGX_OPC_V1DOTP,
+  TILEGX_OPC_V1DOTPA,
+  TILEGX_OPC_V1DOTPU,
+  TILEGX_OPC_V1DOTPUA,
+  TILEGX_OPC_V1DOTPUS,
+  TILEGX_OPC_V1DOTPUSA,
+  TILEGX_OPC_V1INT_H,
+  TILEGX_OPC_V1INT_L,
+  TILEGX_OPC_V1MAXU,
+  TILEGX_OPC_V1MAXUI,
+  TILEGX_OPC_V1MINU,
+  TILEGX_OPC_V1MINUI,
+  TILEGX_OPC_V1MNZ,
+  TILEGX_OPC_V1MULTU,
+  TILEGX_OPC_V1MULU,
+  TILEGX_OPC_V1MULUS,
+  TILEGX_OPC_V1MZ,
+  TILEGX_OPC_V1SADAU,
+  TILEGX_OPC_V1SADU,
+  TILEGX_OPC_V1SHL,
+  TILEGX_OPC_V1SHLI,
+  TILEGX_OPC_V1SHRS,
+  TILEGX_OPC_V1SHRSI,
+  TILEGX_OPC_V1SHRU,
+  TILEGX_OPC_V1SHRUI,
+  TILEGX_OPC_V1SUB,
+  TILEGX_OPC_V1SUBUC,
+  TILEGX_OPC_V2ADD,
+  TILEGX_OPC_V2ADDI,
+  TILEGX_OPC_V2ADDSC,
+  TILEGX_OPC_V2ADIFFS,
+  TILEGX_OPC_V2AVGS,
+  TILEGX_OPC_V2CMPEQ,
+  TILEGX_OPC_V2CMPEQI,
+  TILEGX_OPC_V2CMPLES,
+  TILEGX_OPC_V2CMPLEU,
+  TILEGX_OPC_V2CMPLTS,
+  TILEGX_OPC_V2CMPLTSI,
+  TILEGX_OPC_V2CMPLTU,
+  TILEGX_OPC_V2CMPLTUI,
+  TILEGX_OPC_V2CMPNE,
+  TILEGX_OPC_V2DOTP,
+  TILEGX_OPC_V2DOTPA,
+  TILEGX_OPC_V2INT_H,
+  TILEGX_OPC_V2INT_L,
+  TILEGX_OPC_V2MAXS,
+  TILEGX_OPC_V2MAXSI,
+  TILEGX_OPC_V2MINS,
+  TILEGX_OPC_V2MINSI,
+  TILEGX_OPC_V2MNZ,
+  TILEGX_OPC_V2MULFSC,
+  TILEGX_OPC_V2MULS,
+  TILEGX_OPC_V2MULTS,
+  TILEGX_OPC_V2MZ,
+  TILEGX_OPC_V2PACKH,
+  TILEGX_OPC_V2PACKL,
+  TILEGX_OPC_V2PACKUC,
+  TILEGX_OPC_V2SADAS,
+  TILEGX_OPC_V2SADAU,
+  TILEGX_OPC_V2SADS,
+  TILEGX_OPC_V2SADU,
+  TILEGX_OPC_V2SHL,
+  TILEGX_OPC_V2SHLI,
+  TILEGX_OPC_V2SHLSC,
+  TILEGX_OPC_V2SHRS,
+  TILEGX_OPC_V2SHRSI,
+  TILEGX_OPC_V2SHRU,
+  TILEGX_OPC_V2SHRUI,
+  TILEGX_OPC_V2SUB,
+  TILEGX_OPC_V2SUBSC,
+  TILEGX_OPC_V4ADD,
+  TILEGX_OPC_V4ADDSC,
+  TILEGX_OPC_V4INT_H,
+  TILEGX_OPC_V4INT_L,
+  TILEGX_OPC_V4PACKSC,
+  TILEGX_OPC_V4SHL,
+  TILEGX_OPC_V4SHLSC,
+  TILEGX_OPC_V4SHRS,
+  TILEGX_OPC_V4SHRU,
+  TILEGX_OPC_V4SUB,
+  TILEGX_OPC_V4SUBSC,
+  TILEGX_OPC_WH64,
+  TILEGX_OPC_XOR,
+  TILEGX_OPC_XORI,
+  TILEGX_OPC_NONE
+} tilegx_mnemonic;
+
+
+
+typedef enum
+{
+  TILEGX_PIPELINE_X0,
+  TILEGX_PIPELINE_X1,
+  TILEGX_PIPELINE_Y0,
+  TILEGX_PIPELINE_Y1,
+  TILEGX_PIPELINE_Y2,
+} tilegx_pipeline;
+
+#define tilegx_is_x_pipeline(p) ((Int)(p) <= (Int)TILEGX_PIPELINE_X1)
+
+typedef enum
+{
+  TILEGX_OP_TYPE_REGISTER,
+  TILEGX_OP_TYPE_IMMEDIATE,
+  TILEGX_OP_TYPE_ADDRESS,
+  TILEGX_OP_TYPE_SPR
+} tilegx_operand_type;
+
+struct tilegx_operand
+{
+  /* Is this operand a register, immediate or address? */
+  tilegx_operand_type type;
+
+  /* The default relocation type for this operand.  */
+  Int default_reloc : 16;
+
+  /* How many bits is this value? (used for range checking) */
+  UInt num_bits : 5;
+
+  /* Is the value signed? (used for range checking) */
+  UInt is_signed : 1;
+
+  /* Is this operand a source register? */
+  UInt is_src_reg : 1;
+
+  /* Is this operand written? (i.e. is it a destination register) */
+  UInt is_dest_reg : 1;
+
+  /* Is this operand PC-relative? */
+  UInt is_pc_relative : 1;
+
+  /* By how many bits do we right shift the value before inserting? */
+  UInt rightshift : 2;
+
+  /* Return the bits for this operand to be ORed into an existing bundle. */
+  tilegx_bundle_bits (*insert) (int op);
+
+  /* Extract this operand and return it. */
+  UInt (*extract) (tilegx_bundle_bits bundle);
+};
+
+
+extern const struct tilegx_operand tilegx_operands[];
+
+/* One finite-state machine per pipe for rapid instruction decoding. */
+extern const unsigned short * const
+tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS];
+
+
+struct tilegx_opcode
+{
+  /* The opcode mnemonic, e.g. "add" */
+  const char *name;
+
+  /* The enum value for this mnemonic. */
+  tilegx_mnemonic mnemonic;
+
+  /* A bit mask of which of the five pipes this instruction
+     is compatible with:
+     X0  0x01
+     X1  0x02
+     Y0  0x04
+     Y1  0x08
+     Y2  0x10 */
+  unsigned char pipes;
+
+  /* How many operands are there? */
+  unsigned char num_operands;
+
+  /* Which register does this write implicitly, or TREG_ZERO if none? */
+  unsigned char implicitly_written_register;
+
+  /* Can this be bundled with other instructions (almost always true). */
+  unsigned char can_bundle;
+
+  /* The description of the operands. Each of these is an
+   * index into the tilegx_operands[] table. */
+  unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS];
+
+  /* A mask of which bits have predefined values for each pipeline.
+   * This is useful for disassembly. */
+  tilegx_bundle_bits fixed_bit_masks[TILEGX_NUM_PIPELINE_ENCODINGS];
+
+  /* For each bit set in fixed_bit_masks, what the value is for this
+   * instruction. */
+  tilegx_bundle_bits fixed_bit_values[TILEGX_NUM_PIPELINE_ENCODINGS];
+};
+
+extern const struct tilegx_opcode tilegx_opcodes[];
+
+/* Used for non-textual disassembly into structs. */
+struct tilegx_decoded_instruction
+{
+  const struct tilegx_opcode *opcode;
+  const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS];
+  Long operand_values[TILEGX_MAX_OPERANDS];
+};
+
+
+/* Disassemble a bundle into a struct for machine processing. */
+extern Int parse_insn_tilegx ( tilegx_bundle_bits bits,
+                               ULong pc,
+                               struct tilegx_decoded_instruction
+                               decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE] );
+
+extern Int decode_and_display ( tilegx_bundle_bits *p, Int count, ULong pc );
+
+extern tilegx_bundle_bits
+encode_insn_tilegx ( struct tilegx_decoded_instruction decoded );
+
+
+extern tilegx_bundle_bits
+mkTileGxInsn ( Int opc, Int argc, ... );
+
+/* Given a set of bundle bits and a specific pipe, returns which
+ * instruction the bundle contains in that pipe.
+ */
+extern const struct tilegx_opcode *
+find_opcode ( tilegx_bundle_bits bits, tilegx_pipeline pipe );
+
+
+#endif /* __TILEGX_DISASM_H */
+
+/*---------------------------------------------------------------*/
+/*--- end                                     tilegx-disasm.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex.h b/VEX/pub/libvex.h
new file mode 100644
index 0000000..3543de9
--- /dev/null
+++ b/VEX/pub/libvex.h
@@ -0,0 +1,936 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                          libvex.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_H
+#define __LIBVEX_H
+
+
+#include "libvex_basictypes.h"
+#include "libvex_ir.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- This file defines the top-level interface to LibVEX.    ---*/
+/*---------------------------------------------------------------*/
+
+/*-------------------------------------------------------*/
+/*--- Architectures, variants, and other arch info    ---*/
+/*-------------------------------------------------------*/
+
+typedef 
+   enum { 
+      VexArch_INVALID=0x400,
+      VexArchX86, 
+      VexArchAMD64, 
+      VexArchARM,
+      VexArchARM64,
+      VexArchPPC32,
+      VexArchPPC64,
+      VexArchS390X,
+      VexArchMIPS32,
+      VexArchMIPS64,
+      VexArchTILEGX
+   }
+   VexArch;
+
+
+/* Information about endianness. */
+typedef
+   enum {
+      VexEndness_INVALID=0x600, /* unknown endianness */
+      VexEndnessLE,             /* little endian */
+      VexEndnessBE              /* big endian */
+   }
+   VexEndness;
+
+
+/* For a given architecture, these specify extra capabilities beyond
+   the minimum supported (baseline) capabilities.  They may be OR'd
+   together, although some combinations don't make sense.  (eg, SSE2
+   but not SSE1).  LibVEX_Translate will check for nonsensical
+   combinations. */
+
+/* x86: baseline capability is Pentium-1 (FPU, MMX, but no SSE), with
+   cmpxchg8b. MMXEXT is a special AMD only subset of SSE1 (Integer SSE). */
+#define VEX_HWCAPS_X86_MMXEXT  (1<<1)  /* A subset of SSE1 on early AMD */
+#define VEX_HWCAPS_X86_SSE1    (1<<2)  /* SSE1 support (Pentium III) */
+#define VEX_HWCAPS_X86_SSE2    (1<<3)  /* SSE2 support (Pentium 4) */
+#define VEX_HWCAPS_X86_SSE3    (1<<4)  /* SSE3 support (>= Prescott) */
+#define VEX_HWCAPS_X86_LZCNT   (1<<5)  /* SSE4a LZCNT insn */
+
+/* amd64: baseline capability is SSE2, with cmpxchg8b but not
+   cmpxchg16b. */
+#define VEX_HWCAPS_AMD64_SSE3   (1<<5)  /* SSE3 support */
+#define VEX_HWCAPS_AMD64_CX16   (1<<6)  /* cmpxchg16b support */
+#define VEX_HWCAPS_AMD64_LZCNT  (1<<7)  /* SSE4a LZCNT insn */
+#define VEX_HWCAPS_AMD64_AVX    (1<<8)  /* AVX instructions */
+#define VEX_HWCAPS_AMD64_RDTSCP (1<<9)  /* RDTSCP instruction */
+#define VEX_HWCAPS_AMD64_BMI    (1<<10) /* BMI1 instructions */
+#define VEX_HWCAPS_AMD64_AVX2   (1<<11) /* AVX2 instructions */
+
+/* ppc32: baseline capability is integer only */
+#define VEX_HWCAPS_PPC32_F     (1<<8)  /* basic (non-optional) FP */
+#define VEX_HWCAPS_PPC32_V     (1<<9)  /* Altivec (VMX) */
+#define VEX_HWCAPS_PPC32_FX    (1<<10) /* FP extns (fsqrt, fsqrts) */
+#define VEX_HWCAPS_PPC32_GX    (1<<11) /* Graphics extns
+                                          (fres,frsqrte,fsel,stfiwx) */
+#define VEX_HWCAPS_PPC32_VX    (1<<12) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher  */
+#define VEX_HWCAPS_PPC32_DFP   (1<<17) /* Decimal Floating Point (DFP) -- e.g., dadd */
+#define VEX_HWCAPS_PPC32_ISA2_07   (1<<19) /* ISA 2.07 -- e.g., mtvsrd */
+
+/* ppc64: baseline capability is integer and basic FP insns */
+#define VEX_HWCAPS_PPC64_V     (1<<13) /* Altivec (VMX) */
+#define VEX_HWCAPS_PPC64_FX    (1<<14) /* FP extns (fsqrt, fsqrts) */
+#define VEX_HWCAPS_PPC64_GX    (1<<15) /* Graphics extns
+                                          (fres,frsqrte,fsel,stfiwx) */
+#define VEX_HWCAPS_PPC64_VX    (1<<16) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher  */
+#define VEX_HWCAPS_PPC64_DFP   (1<<18) /* Decimal Floating Point (DFP) -- e.g., dadd */
+#define VEX_HWCAPS_PPC64_ISA2_07   (1<<20) /* ISA 2.07 -- e.g., mtvsrd */
+
+/* s390x: Hardware capability encoding
+
+   Bits [26:31] encode the machine model (see VEX_S390X_MODEL... below)
+   Bits [0:20]  encode specific hardware capabilities
+                (see VEX_HWAPS_S390X_... below)
+*/
+
+/* Model numbers must be assigned in chronological order.
+   They are used as array index. */
+#define VEX_S390X_MODEL_Z900     0
+#define VEX_S390X_MODEL_Z800     1
+#define VEX_S390X_MODEL_Z990     2
+#define VEX_S390X_MODEL_Z890     3
+#define VEX_S390X_MODEL_Z9_EC    4
+#define VEX_S390X_MODEL_Z9_BC    5
+#define VEX_S390X_MODEL_Z10_EC   6
+#define VEX_S390X_MODEL_Z10_BC   7
+#define VEX_S390X_MODEL_Z196     8
+#define VEX_S390X_MODEL_Z114     9
+#define VEX_S390X_MODEL_ZEC12    10
+#define VEX_S390X_MODEL_ZBC12    11
+#define VEX_S390X_MODEL_Z13      12
+#define VEX_S390X_MODEL_UNKNOWN  13     /* always last in list */
+#define VEX_S390X_MODEL_MASK     0x3F
+
+#define VEX_HWCAPS_S390X_LDISP (1<<6)   /* Long-displacement facility */
+#define VEX_HWCAPS_S390X_EIMM  (1<<7)   /* Extended-immediate facility */
+#define VEX_HWCAPS_S390X_GIE   (1<<8)   /* General-instruction-extension facility */
+#define VEX_HWCAPS_S390X_DFP   (1<<9)   /* Decimal floating point facility */
+#define VEX_HWCAPS_S390X_FGX   (1<<10)  /* FPR-GR transfer facility */
+#define VEX_HWCAPS_S390X_ETF2  (1<<11)  /* ETF2-enhancement facility */
+#define VEX_HWCAPS_S390X_STFLE (1<<12)  /* STFLE facility */
+#define VEX_HWCAPS_S390X_ETF3  (1<<13)  /* ETF3-enhancement facility */
+#define VEX_HWCAPS_S390X_STCKF (1<<14)  /* STCKF facility */
+#define VEX_HWCAPS_S390X_FPEXT (1<<15)  /* Floating point extension facility */
+#define VEX_HWCAPS_S390X_LSC   (1<<16)  /* Conditional load/store facility */
+#define VEX_HWCAPS_S390X_PFPO  (1<<17)  /* Perform floating point ops facility */
+
+/* Special value representing all available s390x hwcaps */
+#define VEX_HWCAPS_S390X_ALL   (VEX_HWCAPS_S390X_LDISP | \
+                                VEX_HWCAPS_S390X_EIMM  | \
+                                VEX_HWCAPS_S390X_GIE   | \
+                                VEX_HWCAPS_S390X_DFP   | \
+                                VEX_HWCAPS_S390X_FGX   | \
+                                VEX_HWCAPS_S390X_STFLE | \
+                                VEX_HWCAPS_S390X_STCKF | \
+                                VEX_HWCAPS_S390X_FPEXT | \
+                                VEX_HWCAPS_S390X_LSC   | \
+                                VEX_HWCAPS_S390X_ETF3  | \
+                                VEX_HWCAPS_S390X_ETF2  | \
+                                VEX_HWCAPS_S390X_PFPO)
+
+#define VEX_HWCAPS_S390X(x)  ((x) & ~VEX_S390X_MODEL_MASK)
+#define VEX_S390X_MODEL(x)   ((x) &  VEX_S390X_MODEL_MASK)
+
+/* Tilegx: baseline capability is TILEGX36 */
+#define VEX_HWCAPS_TILEGX_BASE (1<<16)  /* TILEGX Baseline */
+
+/* arm: baseline capability is ARMv4 */
+/* Bits 5:0 - architecture level (e.g. 5 for v5, 6 for v6 etc) */
+#define VEX_HWCAPS_ARM_VFP    (1<<6)  /* VFP extension */
+#define VEX_HWCAPS_ARM_VFP2   (1<<7)  /* VFPv2 */
+#define VEX_HWCAPS_ARM_VFP3   (1<<8)  /* VFPv3 */
+/* Bits 15:10 reserved for (possible) future VFP revisions */
+#define VEX_HWCAPS_ARM_NEON   (1<<16) /* Advanced SIMD also known as NEON */
+
+/* Get an ARM architecure level from HWCAPS */
+#define VEX_ARM_ARCHLEVEL(x) ((x) & 0x3f)
+
+/* ARM64: baseline capability is AArch64 v8. */
+/* (no definitions since no variants so far) */
+
+/* MIPS baseline capability */
+/* Assigned Company values for bits 23:16 of the PRId Register
+   (CP0 register 15, select 0).  As of the MIPS32 and MIPS64 specs from
+   MTI, the PRId register is defined in this (backwards compatible)
+   way:
+
+  +----------------+----------------+----------------+----------------+
+  | Company Options| Company ID     | Processor ID   | Revision       |
+  +----------------+----------------+----------------+----------------+
+   31            24 23            16 15             8 7
+
+*/
+
+#define VEX_PRID_COMP_MIPS      0x00010000
+#define VEX_PRID_COMP_BROADCOM  0x00020000
+#define VEX_PRID_COMP_NETLOGIC  0x000C0000
+#define VEX_PRID_COMP_CAVIUM    0x000D0000
+
+/*
+ * These are the PRID's for when 23:16 == PRID_COMP_MIPS
+ */
+#define VEX_PRID_IMP_34K        0x9500
+#define VEX_PRID_IMP_74K        0x9700
+
+/* CPU has FPU and 32 dbl. prec. FP registers */
+#define VEX_PRID_CPU_32FPR      0x00000040
+
+/* Get MIPS Company ID from HWCAPS */
+#define VEX_MIPS_COMP_ID(x) ((x) & 0x00FF0000)
+/* Get MIPS Processor ID from HWCAPS */
+#define VEX_MIPS_PROC_ID(x) ((x) & 0x0000FF00)
+/* Get MIPS Revision from HWCAPS */
+#define VEX_MIPS_REV(x) ((x) & 0x000000FF)
+/* Check if the processor supports DSP ASE Rev 2. */
+#define VEX_MIPS_PROC_DSP2(x) ((VEX_MIPS_COMP_ID(x) == VEX_PRID_COMP_MIPS) && \
+                               (VEX_MIPS_PROC_ID(x) == VEX_PRID_IMP_74K))
+/* Check if the processor supports DSP ASE Rev 1. */
+#define VEX_MIPS_PROC_DSP(x)  (VEX_MIPS_PROC_DSP2(x) || \
+                               ((VEX_MIPS_COMP_ID(x) == VEX_PRID_COMP_MIPS) && \
+                               (VEX_MIPS_PROC_ID(x) == VEX_PRID_IMP_34K)))
+
+/* These return statically allocated strings. */
+
+extern const HChar* LibVEX_ppVexArch    ( VexArch );
+extern const HChar* LibVEX_ppVexEndness ( VexEndness endness );
+extern const HChar* LibVEX_ppVexHwCaps  ( VexArch, UInt );
+
+
+/* The various kinds of caches */
+typedef enum {
+   DATA_CACHE=0x500,
+   INSN_CACHE,
+   UNIFIED_CACHE
+} VexCacheKind;
+
+/* Information about a particular cache */
+typedef struct {
+   VexCacheKind kind;
+   UInt level;         /* level this cache is at, e.g. 1 for L1 cache */
+   UInt sizeB;         /* size of this cache in bytes */
+   UInt line_sizeB;    /* cache line size in bytes */
+   UInt assoc;         /* set associativity */
+   Bool is_trace_cache;  /* False, except for certain Pentium 4 models */
+} VexCache;
+
+/* Convenience macro to initialise a VexCache */
+#define VEX_CACHE_INIT(_kind, _level, _size, _line_size, _assoc)         \
+         ({ (VexCache) { .kind = _kind, .level = _level, .sizeB = _size, \
+               .line_sizeB = _line_size, .assoc = _assoc, \
+               .is_trace_cache = False }; })
+
+/* Information about the cache system as a whole */
+typedef struct {
+   UInt num_levels;
+   UInt num_caches;
+   /* Unordered array of caches for this host. NULL if there are
+      no caches. The following can always be assumed:
+      (1) There is at most one cache of a given kind per cache level.
+      (2) If there exists a unified cache at a particular level then
+          no other cache exists at that level.
+      (3) The existence of a cache at level N > 1 implies the existence of
+          at least one cache at level N-1. */
+   VexCache *caches;
+   Bool icaches_maintain_coherence;
+} VexCacheInfo;
+
+
+/* This struct is a bit of a hack, but is needed to carry misc
+   important bits of info about an arch.  Fields which are meaningless
+   or ignored for the platform in question should be set to zero.
+   Nb: if you add fields to the struct make sure to update function
+   LibVEX_default_VexArchInfo. */
+
+typedef
+   struct {
+      /* The following three fields are mandatory. */
+      UInt         hwcaps;
+      VexEndness   endness;
+      VexCacheInfo hwcache_info;
+      /* PPC32/PPC64 only: size of instruction cache line */
+      Int ppc_icache_line_szB;
+      /* PPC32/PPC64 only: sizes zeroed by the dcbz/dcbzl instructions
+         (bug#135264) */
+      UInt ppc_dcbz_szB;
+      UInt ppc_dcbzl_szB; /* 0 means unsupported (SIGILL) */
+      /* ARM64: I- and D- minimum line sizes in log2(bytes), as
+         obtained from ctr_el0.DminLine and .IminLine.  For example, a
+         line size of 64 bytes would be encoded here as 6. */
+      UInt arm64_dMinLine_lg2_szB;
+      UInt arm64_iMinLine_lg2_szB;
+   }
+   VexArchInfo;
+
+/* Write default settings info *vai. */
+extern 
+void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai );
+
+
+/* This struct carries guest and host ABI variant information that may
+   be needed.  Fields which are meaningless or ignored for the
+   platform in question should be set to zero.
+
+   Settings which are believed to be correct are:
+
+   guest_stack_redzone_size
+      guest is ppc32-linux                ==> 0
+      guest is ppc64-linux                ==> 288
+      guest is amd64-linux                ==> 128
+      guest is other                      ==> inapplicable
+
+   guest_amd64_assume_fs_is_const
+      guest is amd64-linux                ==> True
+      guest is amd64-darwin               ==> False
+      guest is other                      ==> inapplicable
+
+   guest_amd64_assume_gs_is_const
+      guest is amd64-darwin               ==> True
+      guest is amd64-linux                ==> True
+      guest is other                      ==> inapplicable
+
+   guest_ppc_zap_RZ_at_blr
+      guest is ppc64-linux                ==> True
+      guest is ppc32-linux                ==> False
+      guest is other                      ==> inapplicable
+
+   guest_ppc_zap_RZ_at_bl
+      guest is ppc64-linux                ==> const True
+      guest is ppc32-linux                ==> const False
+      guest is other                      ==> inapplicable
+
+   host_ppc_calls_use_fndescrs:
+      host is ppc32-linux                 ==> False
+      host is ppc64-linux                 ==> True
+      host is other                       ==> inapplicable
+*/
+
+typedef
+   struct {
+      /* PPC and AMD64 GUESTS only: how many bytes below the 
+         stack pointer are validly addressible? */
+      Int guest_stack_redzone_size;
+
+      /* AMD64 GUESTS only: should we translate %fs-prefixed
+         instructions using the assumption that %fs always contains
+         the same value? (typically zero on linux) */
+      Bool guest_amd64_assume_fs_is_const;
+
+      /* AMD64 GUESTS only: should we translate %gs-prefixed
+         instructions using the assumption that %gs always contains
+         the same value? (typically 0x60 on darwin)? */
+      Bool guest_amd64_assume_gs_is_const;
+
+      /* PPC GUESTS only: should we zap the stack red zone at a 'blr'
+         (function return) ? */
+      Bool guest_ppc_zap_RZ_at_blr;
+
+      /* PPC GUESTS only: should we zap the stack red zone at a 'bl'
+         (function call) ?  Is supplied with the guest address of the
+         target of the call since that may be significant.  If NULL,
+         is assumed equivalent to a fn which always returns False. */
+      Bool (*guest_ppc_zap_RZ_at_bl)(Addr);
+
+      /* PPC32/PPC64 HOSTS only: does '&f' give us a pointer to a
+         function descriptor on the host, or to the function code
+         itself?  True => descriptor, False => code. */
+      Bool host_ppc_calls_use_fndescrs;
+   }
+   VexAbiInfo;
+
+/* Write default settings info *vbi. */
+extern 
+void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi );
+
+
+/*-------------------------------------------------------*/
+/*--- Control of Vex's optimiser (iropt).             ---*/
+/*-------------------------------------------------------*/
+
+
+/* VexRegisterUpdates specifies when to ensure that the guest state is
+   up to date, in order of increasing accuracy but increasing expense.
+
+     VexRegUpdSpAtMemAccess: all registers are updated at superblock
+     exits, and SP is also up to date at memory exception points.  The
+     SP is described by the arch specific functions
+     guest_<arch>_state_requires_precise_mem_exns.
+
+     VexRegUpdUnwindregsAtMemAccess: registers needed to make a stack
+     trace are up to date at memory exception points.  Typically,
+     these are PC/SP/FP.  The minimal registers are described by the
+     arch specific functions guest_<arch>_state_requires_precise_mem_exns.
+     This is what Valgrind sets as the default.
+
+     VexRegUpdAllregsAtMemAccess: all registers up to date at memory
+     exception points.  This is what normally might be considered as
+     providing "precise exceptions for memory", but does not
+     necessarily provide precise register values at any other kind of
+     exception.
+
+     VexRegUpdAllregsAtEachInsn: all registers up to date at each
+     instruction. 
+*/
+typedef
+   enum {
+      VexRegUpd_INVALID=0x700,
+      VexRegUpdSpAtMemAccess,
+      VexRegUpdUnwindregsAtMemAccess,
+      VexRegUpdAllregsAtMemAccess,
+      VexRegUpdAllregsAtEachInsn
+   }
+   VexRegisterUpdates;
+
+/* Control of Vex's optimiser. */
+
+typedef
+   struct {
+      /* Controls verbosity of iropt.  0 = no output. */
+      Int iropt_verbosity;
+      /* Control aggressiveness of iropt.  0 = no opt, 1 = simple
+         opts, 2 (default) = max optimisation. */
+      Int iropt_level;
+      /* Controls when registers are updated in guest state.  Note
+         that this is the default value.  The VEX client can override
+         this on a per-IRSB basis if it wants.  bb_to_IR() will query
+         the client to ask if it wants a different setting for the
+         block under construction, and that new setting is transported
+         back to LibVEX_Translate, which feeds it to iropt via the
+         various do_iropt_BB calls. */
+      VexRegisterUpdates iropt_register_updates_default;
+      /* How aggressive should iropt be in unrolling loops?  Higher
+         numbers make it more enthusiastic about loop unrolling.
+         Default=120.  A setting of zero disables unrolling.  */
+      Int iropt_unroll_thresh;
+      /* What's the maximum basic block length the front end(s) allow?
+         BBs longer than this are split up.  Default=50 (guest
+         insns). */
+      Int guest_max_insns;
+      /* How aggressive should front ends be in following
+         unconditional branches to known destinations?  Default=10,
+         meaning that if a block contains less than 10 guest insns so
+         far, the front end(s) will attempt to chase into its
+         successor. A setting of zero disables chasing.  */
+      Int guest_chase_thresh;
+      /* EXPERIMENTAL: chase across conditional branches?  Not all
+         front ends honour this.  Default: NO. */
+      Bool guest_chase_cond;
+   }
+   VexControl;
+
+
+/* Write the default settings into *vcon. */
+
+extern 
+void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon );
+
+
+/*-------------------------------------------------------*/
+/*--- Storage management control                      ---*/
+/*-------------------------------------------------------*/
+
+/* Allocate in Vex's temporary allocation area.  Be careful with this.
+   You can only call it inside an instrumentation or optimisation
+   callback that you have previously specified in a call to
+   LibVEX_Translate.  The storage allocated will only stay alive until
+   translation of the current basic block is complete. */
+extern void* LibVEX_Alloc ( SizeT nbytes );
+
+/* Show Vex allocation statistics. */
+extern void LibVEX_ShowAllocStats ( void );
+
+
+/*-------------------------------------------------------*/
+/*--- Describing guest state layout                   ---*/
+/*-------------------------------------------------------*/
+
+/* Describe the guest state enough that the instrumentation
+   functions can work. */
+
+/* The max number of guest state chunks which we can describe as
+   always defined (for the benefit of Memcheck). */
+#define VEXGLO_N_ALWAYSDEFD  24
+
+typedef
+   struct {
+      /* Total size of the guest state, in bytes.  Must be
+         16-aligned. */
+      Int total_sizeB;
+      /* Whereabouts is the stack pointer? */
+      Int offset_SP;
+      Int sizeof_SP; /* 4 or 8 */
+      /* Whereabouts is the frame pointer? */
+      Int offset_FP;
+      Int sizeof_FP; /* 4 or 8 */
+      /* Whereabouts is the instruction pointer? */
+      Int offset_IP;
+      Int sizeof_IP; /* 4 or 8 */
+      /* Describe parts of the guest state regarded as 'always
+         defined'. */
+      Int n_alwaysDefd;
+      struct {
+         Int offset;
+         Int size;
+      } alwaysDefd[VEXGLO_N_ALWAYSDEFD];
+   }
+   VexGuestLayout;
+
+/* A note about guest state layout.
+
+   LibVEX defines the layout for the guest state, in the file
+   pub/libvex_guest_<arch>.h.  The struct will have an 16-aligned
+   size.  Each translated bb is assumed to be entered with a specified
+   register pointing at such a struct.  Beyond that is two copies of
+   the shadow state area with the same size as the struct.  Beyond
+   that is a spill area that LibVEX may spill into.  It must have size
+   LibVEX_N_SPILL_BYTES, and this must be a 16-aligned number.
+
+   On entry, the baseblock pointer register must be 16-aligned.
+
+   There must be no holes in between the primary guest state, its two
+   copies, and the spill area.  In short, all 4 areas must have a
+   16-aligned size and be 16-aligned, and placed back-to-back.
+*/
+
+#define LibVEX_N_SPILL_BYTES 4096
+
+/* The size of the guest state must be a multiple of this number. */
+#define LibVEX_GUEST_STATE_ALIGN 16
+
+/*-------------------------------------------------------*/
+/*--- Initialisation of the library                   ---*/
+/*-------------------------------------------------------*/
+
+/* Initialise the library.  You must call this first. */
+
+extern void LibVEX_Init (
+
+   /* failure exit function */
+#  if __cplusplus == 1 && __GNUC__ && __GNUC__ <= 3
+   /* g++ 3.x doesn't understand attributes on function parameters.
+      See #265762. */
+#  else
+   __attribute__ ((noreturn))
+#  endif
+   void (*failure_exit) ( void ),
+
+   /* logging output function */
+   void (*log_bytes) ( const HChar*, SizeT nbytes ),
+
+   /* debug paranoia level */
+   Int debuglevel,
+
+   /* Control ... */
+   const VexControl* vcon
+);
+
+
+/*-------------------------------------------------------*/
+/*--- Make a translation                              ---*/
+/*-------------------------------------------------------*/
+
+/* Describes the outcome of a translation attempt. */
+typedef
+   struct {
+      /* overall status */
+      enum { VexTransOK=0x800,
+             VexTransAccessFail, VexTransOutputFull } status;
+      /* The number of extents that have a self-check (0 to 3) */
+      UInt n_sc_extents;
+      /* Offset in generated code of the profile inc, or -1 if
+         none.  Needed for later patching. */
+      Int offs_profInc;
+      /* Stats only: the number of guest insns included in the
+         translation.  It may be zero (!). */
+      UInt n_guest_instrs;
+   }
+   VexTranslateResult;
+
+
+/* Describes precisely the pieces of guest code that a translation
+   covers.  Now that Vex can chase across BB boundaries, the old
+   scheme of describing a chunk of guest code merely by its start
+   address and length is inadequate.
+
+   This struct uses 20 bytes on a 32-bit archtecture and 32 bytes on a
+   64-bit architecture.  Space is important as clients will have to store
+   one of these for each translation made.
+*/
+typedef
+   struct {
+      Addr   base[3];
+      UShort len[3];
+      UShort n_used;
+   }
+   VexGuestExtents;
+
+
+/* A structure to carry arguments for LibVEX_Translate.  There are so
+   many of them, it seems better to have a structure. */
+typedef
+   struct {
+      /* IN: The instruction sets we are translating from and to.  And
+         guest/host misc info. */
+      VexArch      arch_guest;
+      VexArchInfo  archinfo_guest;
+      VexArch      arch_host;
+      VexArchInfo  archinfo_host;
+      VexAbiInfo   abiinfo_both;
+
+      /* IN: an opaque value which is passed as the first arg to all
+         callback functions supplied in this struct.  Vex has no idea
+         what's at the other end of this pointer. */
+      void*   callback_opaque;
+
+      /* IN: the block to translate, and its guest address. */
+      /* where are the actual bytes in the host's address space? */
+      const UChar*  guest_bytes;
+      /* where do the bytes really come from in the guest's aspace?
+         This is the post-redirection guest address.  Not that Vex
+         understands anything about redirection; that is all done on
+         the Valgrind side. */
+      Addr    guest_bytes_addr;
+
+      /* Is it OK to chase into this guest address?  May not be
+	 NULL. */
+      Bool    (*chase_into_ok) ( /*callback_opaque*/void*, Addr );
+
+      /* OUT: which bits of guest code actually got translated */
+      VexGuestExtents* guest_extents;
+
+      /* IN: a place to put the resulting code, and its size */
+      UChar*  host_bytes;
+      Int     host_bytes_size;
+      /* OUT: how much of the output area is used. */
+      Int*    host_bytes_used;
+
+      /* IN: optionally, two instrumentation functions.  May be
+	 NULL. */
+      IRSB*   (*instrument1) ( /*callback_opaque*/void*, 
+                               IRSB*, 
+                               const VexGuestLayout*, 
+                               const VexGuestExtents*,
+                               const VexArchInfo*,
+                               IRType gWordTy, IRType hWordTy );
+      IRSB*   (*instrument2) ( /*callback_opaque*/void*, 
+                               IRSB*, 
+                               const VexGuestLayout*, 
+                               const VexGuestExtents*,
+                               const VexArchInfo*,
+                               IRType gWordTy, IRType hWordTy );
+
+      IRSB* (*finaltidy) ( IRSB* );
+
+      /* IN: a callback used to ask the caller which of the extents,
+         if any, a self check is required for.  Must not be NULL.
+         The returned value is a bitmask with a 1 in position i indicating
+         that the i'th extent needs a check.  Since there can be at most
+         3 extents, the returned values must be between 0 and 7.
+
+         This call also gives the VEX client the opportunity to change
+         the precision of register update preservation as performed by
+         the IR optimiser.  Before the call, VEX will set *pxControl
+         to hold the default register-update status value as specified
+         by VexControl::iropt_register_updates_default as passed to
+         LibVEX_Init at library initialisation time.  The client (in
+         this callback) can if it wants, inspect the value and change
+         it to something different, and that value will be used for
+         subsequent IR optimisation of the block. */
+      UInt (*needs_self_check)( /*callback_opaque*/void*,
+                                /*MAYBE_MOD*/VexRegisterUpdates* pxControl,
+                                const VexGuestExtents* );
+
+      /* IN: optionally, a callback which allows the caller to add its
+         own IR preamble following the self-check and any other
+         VEX-generated preamble, if any.  May be NULL.  If non-NULL,
+         the IRSB under construction is handed to this function, which
+         presumably adds IR statements to it.  The callback may
+         optionally complete the block and direct bb_to_IR not to
+         disassemble any instructions into it; this is indicated by
+         the callback returning True.
+      */
+      Bool    (*preamble_function)(/*callback_opaque*/void*, IRSB*);
+
+      /* IN: debug: trace vex activity at various points */
+      Int     traceflags;
+
+      /* IN: debug: print diagnostics when an illegal instr is detected */
+      Bool    sigill_diag;
+
+      /* IN: profiling: add a 64 bit profiler counter increment to the
+         translation? */
+      Bool    addProfInc;
+
+      /* IN: address of the dispatcher entry points.  Describes the
+         places where generated code should jump to at the end of each
+         bb.
+
+         At the end of each translation, the next guest address is
+         placed in the host's standard return register (x86: %eax,
+         amd64: %rax, ppc32: %r3, ppc64: %r3).  Optionally, the guest
+         state pointer register (on host x86: %ebp; amd64: %rbp;
+         ppc32/64: r31) may be set to a VEX_TRC_ value to indicate any
+         special action required before the next block is run.
+
+         Control is then passed back to the dispatcher (beyond Vex's
+         control; caller supplies this) in the following way:
+
+         - On host archs which lack a link register (x86, amd64), by a
+           jump to the host address specified in
+           'dispatcher_assisted', if the guest state pointer has been
+           changed so as to request some action before the next block
+           is run, or 'dispatcher_unassisted' (the fast path), in
+           which it is assumed that the guest state pointer is
+           unchanged and we wish to continue directly with the next
+           translation.  Both of these must be non-NULL.
+
+         - On host archs which have a link register (ppc32, ppc64), by
+           a branch to the link register (which is guaranteed to be
+           unchanged from whatever it was at entry to the
+           translation).  'dispatch_assisted' and
+           'dispatch_unassisted' must be NULL.
+
+         The aim is to get back and forth between translations and the
+         dispatcher without creating memory traffic to store return
+         addresses.
+
+         FIXME: update this comment
+      */
+      const void* disp_cp_chain_me_to_slowEP;
+      const void* disp_cp_chain_me_to_fastEP;
+      const void* disp_cp_xindir;
+      const void* disp_cp_xassisted;
+   }
+   VexTranslateArgs;
+
+
+extern 
+VexTranslateResult LibVEX_Translate ( VexTranslateArgs* );
+
+/* A subtlety re interaction between self-checking translations and
+   bb-chasing.  The supplied chase_into_ok function should say NO
+   (False) when presented with any address for which you might want to
+   make a self-checking translation.
+
+   If it doesn't do that, you may end up with Vex chasing from BB #1
+   to BB #2 (fine); but if you wanted checking for #2 and not #1, that
+   would not be the result.  Therefore chase_into_ok should disallow
+   following into #2.  That will force the caller to eventually
+   request a new translation starting at #2, at which point Vex will
+   correctly observe the make-a-self-check flag.
+
+   FIXME: is this still up to date? */
+
+
+/*-------------------------------------------------------*/
+/*--- Patch existing translations                     ---*/
+/*-------------------------------------------------------*/
+
+/* A host address range that was modified by the functions below. 
+   Callers must request I-cache syncing after the call as appropriate. */
+typedef
+   struct {
+      HWord start;
+      HWord len;     /* always > 0 */
+   }
+   VexInvalRange;
+
+/* Chain an XDirect jump located at place_to_chain so it jumps to
+   place_to_jump_to.  It is expected (and checked) that this site
+   currently contains a call to the dispatcher specified by
+   disp_cp_chain_me_EXPECTED. */
+extern
+VexInvalRange LibVEX_Chain ( VexArch     arch_host,
+                             VexEndness  endhess_host,
+                             void*       place_to_chain,
+                             const void* disp_cp_chain_me_EXPECTED,
+                             const void* place_to_jump_to );
+
+/* Undo an XDirect jump located at place_to_unchain, so it is
+   converted back into a call to disp_cp_chain_me.  It is expected
+   (and checked) that this site currently contains a jump directly to
+   the address specified by place_to_jump_to_EXPECTED. */
+extern
+VexInvalRange LibVEX_UnChain ( VexArch     arch_host,
+                               VexEndness  endness_host,
+                               void*       place_to_unchain,
+                               const void* place_to_jump_to_EXPECTED,
+                               const void* disp_cp_chain_me );
+
+/* Returns a constant -- the size of the event check that is put at
+   the start of every translation.  This makes it possible to
+   calculate the fast entry point address if the slow entry point
+   address is known (the usual case), or vice versa. */
+extern
+Int LibVEX_evCheckSzB ( VexArch arch_host );
+
+
+/* Patch the counter location into an existing ProfInc point.  The
+   specified point is checked to make sure it is plausible. */
+extern
+VexInvalRange LibVEX_PatchProfInc ( VexArch      arch_host,
+                                    VexEndness   endness_host,
+                                    void*        place_to_patch,
+                                    const ULong* location_of_counter );
+
+
+/*-------------------------------------------------------*/
+/*--- Show accumulated statistics                     ---*/
+/*-------------------------------------------------------*/
+
+extern void LibVEX_ShowStats ( void );
+
+/*-------------------------------------------------------*/
+/*-- IR injection                                      --*/
+/*-------------------------------------------------------*/
+
+/* IR Injection Control Block */
+
+#define NO_ROUNDING_MODE (~0u)
+
+typedef 
+   struct {
+      IROp  op;        // the operation to perform
+      HWord result;    // address of the result
+      HWord opnd1;     // address of 1st operand
+      HWord opnd2;     // address of 2nd operand
+      HWord opnd3;     // address of 3rd operand
+      HWord opnd4;     // address of 4th operand
+      IRType t_result; // type of result
+      IRType t_opnd1;  // type of 1st operand
+      IRType t_opnd2;  // type of 2nd operand
+      IRType t_opnd3;  // type of 3rd operand
+      IRType t_opnd4;  // type of 4th operand
+      UInt  rounding_mode;
+      UInt  num_operands; // excluding rounding mode, if any
+      Bool  shift_amount_is_immediate;
+   }
+   IRICB;
+
+extern void LibVEX_InitIRI ( const IRICB * );
+
+/*-------------------------------------------------------*/
+/*--- Notes                                           ---*/
+/*-------------------------------------------------------*/
+
+/* Code generation conventions that need to be recorded somewhere.
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+   x86
+   ~~~
+   Generated code should be entered using a JMP instruction.  On
+   entry, %ebp should point to the guest state, and %esp should be a
+   valid stack pointer.  The generated code may change %eax, %ebx,
+   %ecx, %edx, %esi, %edi, all the FP registers and control state, and
+   all the XMM registers.
+
+   On entry, the FPU control word should be set to 0x027F, and the SSE
+   control word (%mxcsr) should be set to 0x1F80.  On exit, they
+   should still have those values (after masking off the lowest 6 bits
+   of %mxcsr).  If they don't, there is a bug in VEX-generated code.
+
+   Generated code returns to the scheduler using a JMP instruction, to
+   the address specified in the .dispatch field of VexTranslateArgs.
+   %eax (or %eax:%edx, if simulating a 64-bit target) will contain the
+   guest address of the next block to execute.  %ebp may be changed
+   to a VEX_TRC_ value, otherwise it should be as it was at entry.
+
+   CRITICAL ISSUES in x86 code generation.  The only known critical
+   issue is that the host FPU and SSE state is not properly saved
+   across calls to helper functions.  If any helper references any
+   such state, it is likely (1) to misbehave itself, since the FP
+   stack tags will not be as expected, and (2) after returning to
+   generated code, the generated code is likely to go wrong.  This
+   really should be fixed.
+
+   amd64
+   ~~~~~
+   Analogous to x86.
+
+   ppc32
+   ~~~~~
+   On entry, guest state pointer is r31.  .dispatch must be NULL.
+   Control is returned with a branch to the link register.  Generated
+   code will not change lr.  At return, r3 holds the next guest addr
+   (or r3:r4 ?).  r31 may be may be changed to a VEX_TRC_ value,
+   otherwise it should be as it was at entry.
+
+   ppc64
+   ~~~~~
+   Same as ppc32.
+
+   arm32
+   ~~~~~
+   r8 is GSP.
+
+   arm64
+   ~~~~~
+   r21 is GSP.
+
+   ALL GUEST ARCHITECTURES
+   ~~~~~~~~~~~~~~~~~~~~~~~
+   The guest state must contain two pseudo-registers, guest_CMSTART
+   and guest_CMLEN.  These are used to specify guest address ranges,
+   either of code to be invalidated, when used in conjunction with
+   Ijk_InvalICache, or of d-cache ranges to be flushed, when used in
+   conjunction with Ijk_FlushDCache.  In such cases, the two _CM
+   pseudo-regs should be filled in by the IR, and then an exit with
+   one of the two abovementioned Ijk_ kinds should happen, so that the
+   dispatcher can action them.  Both pseudo-regs must have size equal
+   to the guest word size.
+
+   The architecture must a third pseudo-register, guest_NRADDR, also
+   guest-word-sized.  This is used to record the unredirected guest
+   address at the start of a translation whose start has been
+   redirected.  By reading this pseudo-register shortly afterwards,
+   the translation can find out what the corresponding no-redirection
+   address was.  Note, this is only set for wrap-style redirects, not
+   for replace-style ones.
+*/
+#endif /* ndef __LIBVEX_H */
+
+/*---------------------------------------------------------------*/
+/*---                                                libvex.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_basictypes.h b/VEX/pub/libvex_basictypes.h
new file mode 100644
index 0000000..59859d0
--- /dev/null
+++ b/VEX/pub/libvex_basictypes.h
@@ -0,0 +1,200 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               libvex_basictypes.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_BASICTYPES_H
+#define __LIBVEX_BASICTYPES_H
+
+/* It is important that the sizes of the following data types (on the
+   host) are as stated.  LibVEX_Init therefore checks these at
+   startup. */
+
+/* Always 8 bits. */
+typedef  unsigned char   UChar;
+typedef    signed char   Char;
+typedef           char   HChar; /* signfulness depends on host */
+                                /* Only to be used for printf etc 
+                                   format strings */
+
+/* Always 16 bits. */
+typedef  unsigned short  UShort;
+typedef    signed short  Short;
+
+/* Always 32 bits. */
+typedef  unsigned int    UInt;
+typedef    signed int    Int;
+
+/* Always 64 bits. */
+typedef  unsigned long long int   ULong;
+typedef    signed long long int   Long;
+
+/* Equivalent of C's size_t type. The type is unsigned and has this
+   storage requirement:
+   32 bits on a 32-bit architecture
+   64 bits on a 64-bit architecture. */
+typedef  unsigned long SizeT;
+
+/* Always 128 bits. */
+typedef  UInt  U128[4];
+
+/* Always 256 bits. */
+typedef  UInt  U256[8];
+
+/* A union for doing 128-bit vector primitives conveniently. */
+typedef
+   union {
+      UChar  w8[16];
+      UShort w16[8];
+      UInt   w32[4];
+      ULong  w64[2];
+   }
+   V128;
+
+/* A union for doing 256-bit vector primitives conveniently. */
+typedef
+   union {
+      UChar  w8[32];
+      UShort w16[16];
+      UInt   w32[8];
+      ULong  w64[4];
+   }
+   V256;
+
+/* Floating point. */
+typedef  float   Float;    /* IEEE754 single-precision (32-bit) value */
+typedef  double  Double;   /* IEEE754 double-precision (64-bit) value */
+
+/* Bool is always 8 bits. */
+typedef  unsigned char  Bool;
+#define  True   ((Bool)1)
+#define  False  ((Bool)0)
+
+/* Use this to coerce the result of a C comparison to a Bool.  This is
+   useful when compiling with Intel icc with ultra-paranoid
+   compilation flags (-Wall). */
+static inline Bool toBool ( Int x ) {
+   Int r = (x == 0) ? False : True;
+   return (Bool)r;
+}
+static inline UChar toUChar ( Int x ) {
+   x &= 0xFF;
+   return (UChar)x;
+}
+static inline HChar toHChar ( Int x ) {
+   x &= 0xFF;
+   return (HChar)x;
+}
+static inline UShort toUShort ( Int x ) {
+   x &= 0xFFFF;
+   return (UShort)x;
+}
+static inline Short toShort ( Int x ) {
+   x &= 0xFFFF;
+   return (Short)x;
+}
+static inline UInt toUInt ( Long x ) {
+   x &= 0xFFFFFFFFLL;
+   return (UInt)x;
+}
+
+/* 32/64 bit addresses. */
+typedef  UInt      Addr32;
+typedef  ULong     Addr64;
+
+/* An address: 32-bit or 64-bit wide depending on host architecture */
+typedef unsigned long Addr;
+
+
+/* Something which has the same size as void* on the host.  That is,
+   it is 32 bits on a 32-bit host and 64 bits on a 64-bit host, and so
+   it can safely be coerced to and from a pointer type on the host
+   machine. */
+typedef  unsigned long HWord;
+
+/* Set up VEX_HOST_WORDSIZE and VEX_REGPARM. */
+#undef VEX_HOST_WORDSIZE
+#undef VEX_REGPARM
+
+/* The following 4 work OK for Linux. */
+#if defined(__x86_64__)
+#   define VEX_HOST_WORDSIZE 8
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__i386__)
+#   define VEX_HOST_WORDSIZE 4
+#   define VEX_REGPARM(_n) __attribute__((regparm(_n)))
+
+#elif defined(__powerpc__) && defined(__powerpc64__)
+#   define VEX_HOST_WORDSIZE 8
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__powerpc__) && !defined(__powerpc64__)
+#   define VEX_HOST_WORDSIZE 4
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__arm__) && !defined(__aarch64__)
+#   define VEX_HOST_WORDSIZE 4
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__aarch64__) && !defined(__arm__)
+#   define VEX_HOST_WORDSIZE 8
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__s390x__)
+#   define VEX_HOST_WORDSIZE 8
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__mips__) && (__mips == 64)
+#   define VEX_HOST_WORDSIZE 8
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__mips__) && (__mips != 64)
+#   define VEX_HOST_WORDSIZE 4
+#   define VEX_REGPARM(_n) /* */
+
+#elif defined(__tilegx__)
+#   define VEX_HOST_WORDSIZE 8
+#   define VEX_REGPARM(_n) /* */
+
+#else
+#   error "Vex: Fatal: Can't establish the host architecture"
+#endif
+
+
+#endif /* ndef __LIBVEX_BASICTYPES_H */
+
+/*---------------------------------------------------------------*/
+/*---                                     libvex_basictypes.h ---*/
+/*---------------------------------------------------------------*/
+
diff --git a/VEX/pub/libvex_emnote.h b/VEX/pub/libvex_emnote.h
new file mode 100644
index 0000000..5e6a9d8
--- /dev/null
+++ b/VEX/pub/libvex_emnote.h
@@ -0,0 +1,136 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                   libvex_emnote.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_EMNOTE_H
+#define __LIBVEX_EMNOTE_H
+
+#include "libvex_basictypes.h"
+
+/* VEX can sometimes generate code which returns to the dispatcher
+   with the guest state pointer set to VEX_TRC_JMP_EMWARN or 
+   VEX_TRC_JMP_EMFAIL.  This means that VEX is trying to tell Valgrind
+   something noteworthy about emulation progress. For example, that Valgrind
+   is doing imprecise emulation in some sense.  The guest's pseudo-register
+   "guest_EMNOTE" will hold a value of type VexEmNote, which describes
+   the nature of the warning.  Currently the limitations that are
+   warned about apply primarily to floating point support.
+
+   All guest states must have a 32-bit (UInt) guest_EMNOTE pseudo-
+   register, that emulation warnings can be written in to.
+
+   Note that guest_EMNOTE only carries a valid value at the jump
+   marked as VEX_TRC_JMP_EMWARN / VEX_TRC_JMP_EMFAIL.  You can't assume
+   it will continue to carry a valid value from any amount of time after
+   the jump.
+*/
+
+typedef
+   enum {
+      /* no note indicated */
+      EmNote_NONE=0,
+
+      /* unmasking x87 FP exceptions is not supported */
+      EmWarn_X86_x87exns,
+
+      /* change of x87 FP precision away from 64-bit (mantissa) */
+      EmWarn_X86_x87precision,
+
+      /* unmasking SSE FP exceptions is not supported */
+      EmWarn_X86_sseExns,
+      
+      /* setting mxcsr.fz is not supported */
+      EmWarn_X86_fz,
+      
+      /* setting mxcsr.daz is not supported */
+      EmWarn_X86_daz,
+
+      /* settings to %eflags.ac (alignment check) are noted but ignored */
+      EmWarn_X86_acFlag,
+      
+      /* unmasking PPC32/64 FP exceptions is not supported */
+      EmWarn_PPCexns,
+
+      /* overflow/underflow of the PPC64 _REDIR stack (ppc64 only) */
+      EmWarn_PPC64_redir_overflow,
+      EmWarn_PPC64_redir_underflow,
+
+      /* insn specifies a rounding mode other than "according to FPC"
+         which requires the floating point extension facility. But that
+         facility is not available on this host */
+      EmWarn_S390X_fpext_rounding,
+
+      /* insn (e.g. srnmb) specifies an invalid rounding mode */
+      EmWarn_S390X_invalid_rounding,
+
+      /* stfle insn is not supported on this host */
+      EmFail_S390X_stfle,
+
+      /* stckf insn is not supported on this host */
+      EmFail_S390X_stckf,
+
+      /* ecag insn is not supported on this host */
+      EmFail_S390X_ecag,
+
+      /* pfpo insn is not supported on this host */
+      EmFail_S390X_pfpo,
+
+      /* DFP insns are not supported on this host */
+      EmFail_S390X_DFP_insn,
+
+      /* insn needs floating point extension facility which is not
+         available on this host */
+      EmFail_S390X_fpext,
+
+      /* GPR 0 contains invalid rounding mode for PFPO instruction */
+      EmFail_S390X_invalid_PFPO_rounding_mode,
+
+      /* The function code specified in GPR 0 executed by PFPO
+         instruction is invalid */
+      EmFail_S390X_invalid_PFPO_function,
+
+      EmNote_NUMBER
+   }
+   VexEmNote;
+
+
+/* Produces a short string describing the warning. */
+extern const HChar* LibVEX_EmNote_string ( VexEmNote );
+
+
+#endif /* ndef __LIBVEX_EMNOTE_H */
+
+/*---------------------------------------------------------------*/
+/*---                                         libvex_emnote.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_amd64.h b/VEX/pub/libvex_guest_amd64.h
new file mode 100644
index 0000000..1043b20
--- /dev/null
+++ b/VEX/pub/libvex_guest_amd64.h
@@ -0,0 +1,207 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                              libvex_guest_amd64.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_AMD64_H
+#define __LIBVEX_PUB_GUEST_AMD64_H
+
+#include "libvex_basictypes.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the AMD64 CPU state.            ---*/
+/*---------------------------------------------------------------*/
+
+/* See detailed comments at the top of libvex_guest_x86.h for
+   further info.  This representation closely follows the
+   x86 representation.
+*/
+
+
+typedef
+   struct {
+      /* Event check fail addr, counter, and padding to make RAX 16
+         aligned. */
+      /*   0 */ ULong  host_EvC_FAILADDR;
+      /*   8 */ UInt   host_EvC_COUNTER;
+      /*  12 */ UInt   pad0;
+      /*  16 */ ULong  guest_RAX;
+      /*  24 */ ULong  guest_RCX;
+      /*  32 */ ULong  guest_RDX;
+      /*  40 */ ULong  guest_RBX;
+      /*  48 */ ULong  guest_RSP;
+      /*  56 */ ULong  guest_RBP;
+      /*  64 */ ULong  guest_RSI;
+      /*  72 */ ULong  guest_RDI;
+      /*  80 */ ULong  guest_R8;
+      /*  88 */ ULong  guest_R9;
+      /*  96 */ ULong  guest_R10;
+      /* 104 */ ULong  guest_R11;
+      /* 112 */ ULong  guest_R12;
+      /* 120 */ ULong  guest_R13;
+      /* 128 */ ULong  guest_R14;
+      /* 136 */ ULong  guest_R15;
+      /* 4-word thunk used to calculate O S Z A C P flags. */
+      /* 144 */ ULong  guest_CC_OP;
+      /* 152 */ ULong  guest_CC_DEP1;
+      /* 160 */ ULong  guest_CC_DEP2;
+      /* 168 */ ULong  guest_CC_NDEP;
+      /* The D flag is stored here, encoded as either -1 or +1 */
+      /* 176 */ ULong  guest_DFLAG;
+      /* 184 */ ULong  guest_RIP;
+      /* Bit 18 (AC) of eflags stored here, as either 0 or 1. */
+      /* ... */ ULong  guest_ACFLAG;
+      /* Bit 21 (ID) of eflags stored here, as either 0 or 1. */
+      /* 192 */ ULong guest_IDFLAG;
+      /* Probably a lot more stuff too. 
+         D,ID flags
+         16  128-bit SSE registers
+         all the old x87 FPU gunk
+         segment registers */
+
+      /* HACK to e.g. make tls on amd64-linux work.  %fs only ever seems to
+         hold a constant value (zero on linux main thread, 0x63 in other
+         threads), and so guest_FS_CONST holds
+         the 64-bit offset associated with this constant %fs value. */
+      /* 200 */ ULong guest_FS_CONST;
+
+      /* YMM registers.  Note that these must be allocated
+         consecutively in order that the SSE4.2 PCMP{E,I}STR{I,M}
+         helpers can treat them as an array.  YMM16 is a fake reg used
+         as an intermediary in handling aforementioned insns. */
+      /* 208 */ULong guest_SSEROUND;
+      /* 216 */U256  guest_YMM0;
+      U256  guest_YMM1;
+      U256  guest_YMM2;
+      U256  guest_YMM3;
+      U256  guest_YMM4;
+      U256  guest_YMM5;
+      U256  guest_YMM6;
+      U256  guest_YMM7;
+      U256  guest_YMM8;
+      U256  guest_YMM9;
+      U256  guest_YMM10;
+      U256  guest_YMM11;
+      U256  guest_YMM12;
+      U256  guest_YMM13;
+      U256  guest_YMM14;
+      U256  guest_YMM15;
+      U256  guest_YMM16;
+
+      /* FPU */
+      /* Note.  Setting guest_FTOP to be ULong messes up the
+         delicately-balanced PutI/GetI optimisation machinery.
+         Therefore best to leave it as a UInt. */
+      UInt  guest_FTOP;
+      UInt  pad1;
+      ULong guest_FPREG[8];
+      UChar guest_FPTAG[8];
+      ULong guest_FPROUND;
+      ULong guest_FC3210;
+
+      /* Emulation notes */
+      UInt  guest_EMNOTE;
+      UInt  pad2;
+
+      /* Translation-invalidation area description.  Not used on amd64
+         (there is no invalidate-icache insn), but needed so as to
+         allow users of the library to uniformly assume that the guest
+         state contains these two fields -- otherwise there is
+         compilation breakage.  On amd64, these two fields are set to
+         zero by LibVEX_GuestAMD64_initialise and then should be
+         ignored forever thereafter. */
+      ULong guest_CMSTART;
+      ULong guest_CMLEN;
+
+      /* Used to record the unredirected guest address at the start of
+         a translation whose start has been redirected.  By reading
+         this pseudo-register shortly afterwards, the translation can
+         find out what the corresponding no-redirection address was.
+         Note, this is only set for wrap-style redirects, not for
+         replace-style ones. */
+      ULong guest_NRADDR;
+
+      /* Used for Darwin syscall dispatching. */
+      ULong guest_SC_CLASS;
+
+      /* HACK to make e.g. tls on darwin work, wine on linux work, ...
+         %gs only ever seems to hold a constant value (e.g. 0x60 on darwin,
+         0x6b on linux), and so guest_GS_CONST holds the 64-bit offset
+         associated with this constant %gs value.  (A direct analogue
+         of the %fs-const hack for amd64-linux). */
+      ULong guest_GS_CONST;
+
+      /* Needed for Darwin (but mandated for all guest architectures):
+         RIP at the last syscall insn (int 0x80/81/82, sysenter,
+         syscall).  Used when backing up to restart a syscall that has
+         been interrupted by a signal. */
+      ULong guest_IP_AT_SYSCALL;
+
+      /* Padding to make it have an 16-aligned size */
+      ULong pad3;
+   }
+   VexGuestAMD64State;
+
+
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for amd64 guest stuff.                ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest amd64 state.  The FPU is put in default
+   mode. */
+extern
+void LibVEX_GuestAMD64_initialise ( /*OUT*/VexGuestAMD64State* vex_state );
+
+
+/* Extract from the supplied VexGuestAMD64State structure the
+   corresponding native %rflags value. */
+extern 
+ULong LibVEX_GuestAMD64_get_rflags ( /*IN*/const VexGuestAMD64State* vex_state );
+
+/* Set the carry flag in the given state to 'new_carry_flag', which
+   should be zero or one. */
+extern
+void
+LibVEX_GuestAMD64_put_rflag_c ( ULong new_carry_flag,
+                                /*MOD*/VexGuestAMD64State* vex_state );
+
+
+#endif /* ndef __LIBVEX_PUB_GUEST_AMD64_H */
+
+/*---------------------------------------------------------------*/
+/*---                                    libvex_guest_amd64.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_arm.h b/VEX/pub/libvex_guest_arm.h
new file mode 100644
index 0000000..ae77b17
--- /dev/null
+++ b/VEX/pub/libvex_guest_arm.h
@@ -0,0 +1,224 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                libvex_guest_arm.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_ARM_H
+#define __LIBVEX_PUB_GUEST_ARM_H
+
+#include "libvex_basictypes.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the ARM CPU state.              ---*/
+/*---------------------------------------------------------------*/
+
+typedef
+   struct {
+      /* 0 */
+      /* Event check fail addr and counter. */
+      UInt host_EvC_FAILADDR; /* 0 */
+      UInt host_EvC_COUNTER;  /* 4 */
+      UInt guest_R0;
+      UInt guest_R1;
+      UInt guest_R2;
+      UInt guest_R3;
+      UInt guest_R4;
+      UInt guest_R5;
+      UInt guest_R6;
+      UInt guest_R7;
+      UInt guest_R8;
+      UInt guest_R9;
+      UInt guest_R10;
+      UInt guest_R11;
+      UInt guest_R12;
+      UInt guest_R13;     /* stack pointer */
+      UInt guest_R14;     /* link register */
+      UInt guest_R15T;
+      /* program counter[31:1] ++ [T], encoding both the current
+         instruction address and the ARM vs Thumb state of the
+         machine.  T==1 is Thumb, T==0 is ARM.  Hence values of the
+         form X--(31)--X1 denote a Thumb instruction at location
+         X--(31)--X0, values of the form X--(30)--X00 denote an ARM
+         instruction at precisely that address, and values of the form
+         X--(30)--10 are invalid since they would imply an ARM
+         instruction at a non-4-aligned address. */
+
+      /* 4-word thunk used to calculate N(sign) Z(zero) C(carry,
+         unsigned overflow) and V(signed overflow) flags. */
+      /* 72 */
+      UInt guest_CC_OP;
+      UInt guest_CC_DEP1;
+      UInt guest_CC_DEP2;
+      UInt guest_CC_NDEP;
+
+      /* A 32-bit value which is used to compute the APSR.Q (sticky
+         saturation) flag, when necessary.  If the value stored here
+         is zero, APSR.Q is currently zero.  If it is any other value,
+         APSR.Q is currently one. */
+      UInt guest_QFLAG32;
+
+      /* 32-bit values to represent APSR.GE0 .. GE3.  Same
+         zero-vs-nonzero scheme as for QFLAG32. */
+      UInt guest_GEFLAG0;
+      UInt guest_GEFLAG1;
+      UInt guest_GEFLAG2;
+      UInt guest_GEFLAG3;
+
+      /* Various pseudo-regs mandated by Vex or Valgrind. */
+      /* Emulation notes */
+      UInt guest_EMNOTE;
+
+      /* For clinval/clflush: record start and length of area */
+      UInt guest_CMSTART;
+      UInt guest_CMLEN;
+
+      /* Used to record the unredirected guest address at the start of
+         a translation whose start has been redirected.  By reading
+         this pseudo-register shortly afterwards, the translation can
+         find out what the corresponding no-redirection address was.
+         Note, this is only set for wrap-style redirects, not for
+         replace-style ones. */
+      UInt guest_NRADDR;
+
+      /* Needed for Darwin (but mandated for all guest architectures):
+         program counter at the last syscall insn (int 0x80/81/82,
+         sysenter, syscall, svc).  Used when backing up to restart a
+         syscall that has been interrupted by a signal. */
+      /* 124 */
+      UInt guest_IP_AT_SYSCALL;
+
+      /* VFP state.  D0 .. D15 must be 8-aligned. */
+      /* 128 */
+      ULong guest_D0;
+      ULong guest_D1;
+      ULong guest_D2;
+      ULong guest_D3;
+      ULong guest_D4;
+      ULong guest_D5;
+      ULong guest_D6;
+      ULong guest_D7;
+      ULong guest_D8;
+      ULong guest_D9;
+      ULong guest_D10;
+      ULong guest_D11;
+      ULong guest_D12;
+      ULong guest_D13;
+      ULong guest_D14;
+      ULong guest_D15;
+      ULong guest_D16;
+      ULong guest_D17;
+      ULong guest_D18;
+      ULong guest_D19;
+      ULong guest_D20;
+      ULong guest_D21;
+      ULong guest_D22;
+      ULong guest_D23;
+      ULong guest_D24;
+      ULong guest_D25;
+      ULong guest_D26;
+      ULong guest_D27;
+      ULong guest_D28;
+      ULong guest_D29;
+      ULong guest_D30;
+      ULong guest_D31;
+      UInt  guest_FPSCR;
+
+      /* Not a town in Cornwall, but instead the TPIDRURO, on of the
+         Thread ID registers present in CP15 (the system control
+         coprocessor), register set "c13", register 3 (the User
+         Read-only Thread ID Register).  arm-linux apparently uses it
+         to hold the TLS pointer for the thread.  It's read-only in
+         user space.  On Linux it is set in user space by various
+         thread-related syscalls. */
+      UInt guest_TPIDRURO;
+
+      /* Representation of the Thumb IT state.  ITSTATE is a 32-bit
+         value with 4 8-bit lanes.  [7:0] pertain to the next insn to
+         execute, [15:8] for the one after that, etc.  The per-insn
+         update to ITSTATE is to unsignedly shift it right 8 bits,
+         hence introducing a zero byte for the furthest ahead
+         instruction.  As per the next para, a zero byte denotes the
+         condition ALWAYS.
+
+         Each byte lane has one of the two following formats:
+
+         cccc 0001  for an insn which is part of an IT block.  cccc is
+                    the guarding condition (standard ARM condition
+                    code) XORd with 0xE, so as to cause 'cccc == 0'
+                    to encode the condition ALWAYS.
+
+         0000 0000  for an insn which is not part of an IT block.
+
+         If the bottom 4 bits are zero then the top 4 must be too.
+
+         Given the byte lane for an instruction, the guarding
+         condition for the instruction is (((lane >> 4) & 0xF) ^ 0xE).
+         This is not as stupid as it sounds, because the front end
+         elides the shift.  And the am-I-in-an-IT-block check is
+         (lane != 0).
+
+         In the case where (by whatever means) we know at JIT time
+         that an instruction is not in an IT block, we can prefix its
+         IR with assignments ITSTATE = 0 and hence have iropt fold out
+         the testing code.
+
+         The condition "is outside or last in IT block" corresponds
+         to the top 24 bits of ITSTATE being zero.
+      */
+      UInt guest_ITSTATE;
+
+      /* Padding to make it have an 16-aligned size */
+      UInt padding1;
+   }
+   VexGuestARMState;
+
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for ARM guest stuff.                  ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest ARM state. */
+
+extern
+void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state );
+
+/* Calculate the ARM flag state from the saved data. */
+
+extern
+UInt LibVEX_GuestARM_get_cpsr ( /*IN*/const VexGuestARMState* vex_state );
+
+
+#endif /* ndef __LIBVEX_PUB_GUEST_ARM_H */
+
+
+/*---------------------------------------------------------------*/
+/*---                                      libvex_guest_arm.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_arm64.h b/VEX/pub/libvex_guest_arm64.h
new file mode 100644
index 0000000..ac22432
--- /dev/null
+++ b/VEX/pub/libvex_guest_arm64.h
@@ -0,0 +1,203 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                              libvex_guest_arm64.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2013 OpenWorks
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_ARM64_H
+#define __LIBVEX_PUB_GUEST_ARM64_H
+
+#include "libvex_basictypes.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the ARM64 CPU state.            ---*/
+/*---------------------------------------------------------------*/
+
+typedef
+   struct {
+      /* Event check fail addr and counter. */
+      /* 0 */  ULong host_EvC_FAILADDR;
+      /* 8 */  UInt  host_EvC_COUNTER;
+      /* 12 */ UInt  pad0;
+      /* 16 */
+      ULong guest_X0;
+      ULong guest_X1;
+      ULong guest_X2;
+      ULong guest_X3;
+      ULong guest_X4;
+      ULong guest_X5;
+      ULong guest_X6;
+      ULong guest_X7;
+      ULong guest_X8;
+      ULong guest_X9;
+      ULong guest_X10;
+      ULong guest_X11;
+      ULong guest_X12;
+      ULong guest_X13;
+      ULong guest_X14;
+      ULong guest_X15;
+      ULong guest_X16;
+      ULong guest_X17;
+      ULong guest_X18;
+      ULong guest_X19;
+      ULong guest_X20;
+      ULong guest_X21;
+      ULong guest_X22;
+      ULong guest_X23;
+      ULong guest_X24;
+      ULong guest_X25;
+      ULong guest_X26;
+      ULong guest_X27;
+      ULong guest_X28;
+      ULong guest_X29;
+      ULong guest_X30;     /* link register */
+      ULong guest_XSP;
+      ULong guest_PC;
+
+      /* 4-word thunk used to calculate N(sign) Z(zero) C(carry,
+         unsigned overflow) and V(signed overflow) flags. */
+      ULong guest_CC_OP;
+      ULong guest_CC_DEP1;
+      ULong guest_CC_DEP2;
+      ULong guest_CC_NDEP;
+
+      /* User-space thread register? */
+      ULong guest_TPIDR_EL0;
+
+      /* FP/SIMD state */
+      U128 guest_Q0;
+      U128 guest_Q1;
+      U128 guest_Q2;
+      U128 guest_Q3;
+      U128 guest_Q4;
+      U128 guest_Q5;
+      U128 guest_Q6;
+      U128 guest_Q7;
+      U128 guest_Q8;
+      U128 guest_Q9;
+      U128 guest_Q10;
+      U128 guest_Q11;
+      U128 guest_Q12;
+      U128 guest_Q13;
+      U128 guest_Q14;
+      U128 guest_Q15;
+      U128 guest_Q16;
+      U128 guest_Q17;
+      U128 guest_Q18;
+      U128 guest_Q19;
+      U128 guest_Q20;
+      U128 guest_Q21;
+      U128 guest_Q22;
+      U128 guest_Q23;
+      U128 guest_Q24;
+      U128 guest_Q25;
+      U128 guest_Q26;
+      U128 guest_Q27;
+      U128 guest_Q28;
+      U128 guest_Q29;
+      U128 guest_Q30;
+      U128 guest_Q31;
+
+      /* A 128-bit value which is used to represent the FPSR.QC (sticky
+         saturation) flag, when necessary.  If the value stored here
+         is zero, FPSR.QC is currently zero.  If it is any other value,
+         FPSR.QC is currently one.  We don't currently represent any 
+         other bits of FPSR, so this is all that that is for FPSR. */
+      U128 guest_QCFLAG;
+
+      /* Various pseudo-regs mandated by Vex or Valgrind. */
+      /* Emulation notes */
+      UInt guest_EMNOTE;
+
+      /* For clflush/clinval: record start and length of area */
+      ULong guest_CMSTART;
+      ULong guest_CMLEN;
+
+      /* Used to record the unredirected guest address at the start of
+         a translation whose start has been redirected.  By reading
+         this pseudo-register shortly afterwards, the translation can
+         find out what the corresponding no-redirection address was.
+         Note, this is only set for wrap-style redirects, not for
+         replace-style ones. */
+      ULong guest_NRADDR;
+
+      /* Needed for Darwin (but mandated for all guest architectures):
+         program counter at the last syscall insn (int 0x80/81/82,
+         sysenter, syscall, svc).  Used when backing up to restart a
+         syscall that has been interrupted by a signal. */
+      ULong guest_IP_AT_SYSCALL;
+
+      /* The complete FPCR.  Default value seems to be zero.  We
+         ignore all bits except 23 and 22, which are the rounding
+         mode.  The guest is unconstrained in what values it can write
+         to and read from this register, but the emulation only takes
+         note of bits 23 and 22. */
+      UInt  guest_FPCR;
+
+      /* Padding to make it have an 16-aligned size */
+      /* UInt  pad_end_0; */
+      /* ULong pad_end_1; */
+   }
+   VexGuestARM64State;
+
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for ARM64 guest stuff.                ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest ARM64 state. */
+
+extern
+void LibVEX_GuestARM64_initialise ( /*OUT*/VexGuestARM64State* vex_state );
+
+/* Calculate the ARM64 flag state from the saved data, in the format
+   32x0:n:z:c:v:28x0. */
+extern
+ULong LibVEX_GuestARM64_get_nzcv ( /*IN*/
+                                   const VexGuestARM64State* vex_state );
+
+/* Calculate the ARM64 FPSR state from the saved data, in the format
+   36x0:qc:27x0 */
+extern
+ULong LibVEX_GuestARM64_get_fpsr ( /*IN*/
+                                   const VexGuestARM64State* vex_state );
+
+/* Set the ARM64 FPSR representation from the given FPSR value. */
+extern
+void LibVEX_GuestARM64_set_fpsr ( /*MOD*/VexGuestARM64State* vex_state,
+                                  ULong fpsr );
+                                  
+
+#endif /* ndef __LIBVEX_PUB_GUEST_ARM64_H */
+
+
+/*---------------------------------------------------------------*/
+/*---                                    libvex_guest_arm64.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_mips32.h b/VEX/pub/libvex_guest_mips32.h
new file mode 100644
index 0000000..99b9dbb
--- /dev/null
+++ b/VEX/pub/libvex_guest_mips32.h
@@ -0,0 +1,169 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             libvex_guest_mips32.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_MIPS32_H
+#define __LIBVEX_PUB_GUEST_MIPS32_H
+
+#include "libvex_basictypes.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the MIPS32 CPU state.           ---*/
+/*---------------------------------------------------------------*/
+
+typedef
+   struct {
+      /* CPU Registers */
+      /* 0 */ UInt guest_r0;   /* Hardwired to 0 */
+      /* 4 */ UInt guest_r1;   /* Assembler temporary */
+      /* 8 */ UInt guest_r2;   /* Values for function returns ...*/
+      /* 12 */ UInt guest_r3;  /* ...and expression evaluation */
+      /* 16 */ UInt guest_r4;  /* Function arguments */
+      /* 20 */ UInt guest_r5;
+      /* 24 */ UInt guest_r6;
+      /* 28 */ UInt guest_r7;
+      /* 32 */ UInt guest_r8;  /* Temporaries */
+      /* 36 */ UInt guest_r9;
+      /* 40 */ UInt guest_r10;
+      /* 44 */ UInt guest_r11;
+      /* 48 */ UInt guest_r12;
+      /* 52 */ UInt guest_r13;
+      /* 56 */ UInt guest_r14;
+      /* 60 */ UInt guest_r15;
+      /* 64 */ UInt guest_r16;  /* Saved temporaries */
+      /* 68 */ UInt guest_r17;
+      /* 72 */ UInt guest_r18;
+      /* 76 */ UInt guest_r19;
+      /* 80 */ UInt guest_r20;
+      /* 84 */ UInt guest_r21;
+      /* 88 */ UInt guest_r22;
+      /* 92 */ UInt guest_r23;
+      /* 96 */ UInt guest_r24;  /* Temporaries */
+      /* 100 */ UInt guest_r25;
+      /* 104 */ UInt guest_r26;  /* Reserved for OS kernel */
+      /* 108 */ UInt guest_r27;
+      /* 112 */ UInt guest_r28;  /* Global pointer */
+      /* 116 */ UInt guest_r29;  /* Stack pointer */
+      /* 120 */ UInt guest_r30;  /* Frame pointer */
+      /* 124 */ UInt guest_r31;  /* Return address */
+      /* 128 */ UInt guest_PC;  /* Program counter */
+      /* 132 */ UInt guest_HI;  /* Multiply and divide register higher result */
+      /* 136 */ UInt guest_LO;  /* Multiply and divide register lower result */
+
+      /* FPU Registers */
+      /* 144 */ ULong guest_f0;  /* Floating point general purpose registers */
+      /* 152 */ ULong guest_f1;
+      /* 160 */ ULong guest_f2;
+      /* 168 */ ULong guest_f3;
+      /* 176 */ ULong guest_f4;
+      /* 184 */ ULong guest_f5;
+      /* 192 */ ULong guest_f6;
+      /* 200 */ ULong guest_f7;
+      /* 208 */ ULong guest_f8;
+      /* 216 */ ULong guest_f9;
+      /* 224 */ ULong guest_f10;
+      /* 232 */ ULong guest_f11;
+      /* 240 */ ULong guest_f12;
+      /* 248 */ ULong guest_f13;
+      /* 256 */ ULong guest_f14;
+      /* 264 */ ULong guest_f15;
+      /* 272 */ ULong guest_f16;
+      /* 280 */ ULong guest_f17;
+      /* 288 */ ULong guest_f18;
+      /* 296 */ ULong guest_f19;
+      /* 304 */ ULong guest_f20;
+      /* 312 */ ULong guest_f21;
+      /* 320 */ ULong guest_f22;
+      /* 328 */ ULong guest_f23;
+      /* 336 */ ULong guest_f24;
+      /* 344 */ ULong guest_f25;
+      /* 352 */ ULong guest_f26;
+      /* 360 */ ULong guest_f27;
+      /* 368 */ ULong guest_f28;
+      /* 376 */ ULong guest_f29;
+      /* 384 */ ULong guest_f30;
+      /* 392 */ ULong guest_f31;
+
+      /* 400 */ UInt guest_FIR;
+      /* 404 */ UInt guest_FCCR;
+      /* 408 */ UInt guest_FEXR;
+      /* 412 */ UInt guest_FENR;
+      /* 416 */ UInt guest_FCSR;
+
+      /* TLS pointer for the thread. It's read-only in user space.
+         On Linux it is set in user space by various thread-related
+         syscalls.
+         User Local Register.
+         This register provides read access to the coprocessor 0
+         UserLocal register, if it is implemented. In some operating
+         environments, the UserLocal register is a pointer to a
+         thread-specific storage block.
+      */
+      /* 420 */ UInt guest_ULR;
+
+      /* Emulation notes */
+      /* 424 */ UInt guest_EMNOTE;
+
+      /* For clflush: record start and length of area to invalidate */
+      /* 428 */ UInt guest_CMSTART;
+      /* 432 */ UInt guest_CMLEN;
+      /* 436 */ UInt guest_NRADDR;
+
+      /* 440 */ UInt host_EvC_FAILADDR;
+      /* 444 */ UInt host_EvC_COUNTER;
+      /* 448 */ UInt guest_COND;
+
+      /* MIPS32 DSP ASE(r2) specific registers. */
+      /* 452 */ UInt guest_DSPControl;
+      /* 456 */ ULong guest_ac0;
+      /* 464 */ ULong guest_ac1;
+      /* 472 */ ULong guest_ac2;
+      /* 480 */ ULong guest_ac3;
+
+        UInt padding;
+} VexGuestMIPS32State;
+/*---------------------------------------------------------------*/
+/*--- Utility functions for MIPS32 guest stuff.               ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest MIPS32 state. */
+
+extern
+void LibVEX_GuestMIPS32_initialise ( /*OUT*/VexGuestMIPS32State* vex_state );
+
+
+#endif /* ndef __LIBVEX_PUB_GUEST_MIPS32_H */
+
+
+/*---------------------------------------------------------------*/
+/*---                                   libvex_guest_mips32.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_mips64.h b/VEX/pub/libvex_guest_mips64.h
new file mode 100644
index 0000000..70073b6
--- /dev/null
+++ b/VEX/pub/libvex_guest_mips64.h
@@ -0,0 +1,167 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             libvex_guest_mips64.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2010-2013 RT-RK
+      mips-valgrind@rt-rk.com
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_MIPS64_H
+#define __LIBVEX_PUB_GUEST_MIPS64_H
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the MIPS64 CPU state.           ---*/
+/*---------------------------------------------------------------*/
+
+typedef
+   struct {
+      /* CPU Registers */
+      /*   0 */ ULong guest_r0;   /* Hardwired to 0 */
+      /*   8 */ ULong guest_r1;   /* Assembler temporary */
+      /*   16 */ ULong guest_r2;  /* Values for function returns ...*/
+      /*   24 */ ULong guest_r3;  /* ...and expression evaluation */
+      /*   32 */ ULong guest_r4;  /* Function arguments */
+      /*   40 */ ULong guest_r5;
+      /*   48 */ ULong guest_r6;
+      /*   56 */ ULong guest_r7;
+      /*   64 */ ULong guest_r8;
+      /*   72 */ ULong guest_r9;
+      /*   80 */ ULong guest_r10;
+      /*   88 */ ULong guest_r11;
+      /*   96 */ ULong guest_r12;  /* Temporaries */
+      /*   104 */ ULong guest_r13;
+      /*   112 */ ULong guest_r14;
+      /*   120 */ ULong guest_r15;
+      /*   128 */ ULong guest_r16;  /* Saved temporaries */
+      /*   136 */ ULong guest_r17;
+      /*   144 */ ULong guest_r18;
+      /*   152 */ ULong guest_r19;
+      /*   160 */ ULong guest_r20;
+      /*   168 */ ULong guest_r21;
+      /*   176 */ ULong guest_r22;
+      /*   184 */ ULong guest_r23;
+      /*   192 */ ULong guest_r24;  /* Temporaries */
+      /*   200 */ ULong guest_r25;
+      /*   208 */ ULong guest_r26;  /* Reserved for OS kernel */
+      /*   216 */ ULong guest_r27;
+      /*   224 */ ULong guest_r28;  /* Global pointer */
+      /*   232 */ ULong guest_r29;  /* Stack pointer */
+      /*   240 */ ULong guest_r30;  /* Frame pointer */
+      /*   248 */ ULong guest_r31;  /* Return address */
+      /*   256 */ ULong guest_PC;   /* Program counter */
+      /*   264 */ ULong guest_HI;   /* Multiply and divide reg higher result */
+      /*   272 */ ULong guest_LO;   /* Multiply and divide reg lower result */
+
+      /* FPU Registers */
+      /*   280 */ ULong guest_f0;   /* Floting point gen purpose registers */
+      /*   288 */ ULong guest_f1;
+      /*   296 */ ULong guest_f2;
+      /*   304 */ ULong guest_f3;
+      /*   312 */ ULong guest_f4;
+      /*   320 */ ULong guest_f5;
+      /*   328 */ ULong guest_f6;
+      /*   336 */ ULong guest_f7;
+      /*   344 */ ULong guest_f8;
+      /*   352 */ ULong guest_f9;
+      /*   360 */ ULong guest_f10;
+      /*   368 */ ULong guest_f11;
+      /*   376 */ ULong guest_f12;
+      /*   384 */ ULong guest_f13;
+      /*   392 */ ULong guest_f14;
+      /*   400 */ ULong guest_f15;
+      /*   408 */ ULong guest_f16;
+      /*   416 */ ULong guest_f17;
+      /*   424 */ ULong guest_f18;
+      /*   432 */ ULong guest_f19;
+      /*   440 */ ULong guest_f20;
+      /*   448 */ ULong guest_f21;
+      /*   456 */ ULong guest_f22;
+      /*   464 */ ULong guest_f23;
+      /*   472 */ ULong guest_f24;
+      /*   480 */ ULong guest_f25;
+      /*   488 */ ULong guest_f26;
+      /*   496 */ ULong guest_f27;
+      /*   504 */ ULong guest_f28;
+      /*   512 */ ULong guest_f29;
+      /*   520 */ ULong guest_f30;
+      /*   528 */ ULong guest_f31;
+
+      /*   536 */ UInt guest_FIR;
+      /*   540 */ UInt guest_FCCR;
+      /*   544 */ UInt guest_FEXR;
+      /*   548 */ UInt guest_FENR;
+      /*   552 */ UInt guest_FCSR;
+
+      /* TLS pointer for the thread. It's read-only in user space. On Linux it
+         is set in user space by various thread-related syscalls.
+         User Local Register.
+         This register provides read access to the coprocessor 0
+         UserLocal register, if it is implemented. In some operating
+         environments, the UserLocal register is a pointer to a thread-specific
+         storage block.
+       */
+        ULong guest_ULR;         /* 560 */
+
+      /* Emulation notes */
+        UInt guest_EMNOTE;       /* 568 */
+
+      /* For clflush: record start and length of area to invalidate */
+        ULong guest_CMSTART;     /* 576 */
+        ULong guest_CMLEN;       /* 584 */
+
+        ULong guest_NRADDR;      /* 592 */
+
+        ULong host_EvC_FAILADDR; /* 600 */
+        UInt host_EvC_COUNTER;   /* 608 */
+        UInt guest_COND;         /* 612 */
+        UInt padding[2];
+} VexGuestMIPS64State;
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for MIPS64 guest stuff.               ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest MIPS64 state. */
+
+extern
+void LibVEX_GuestMIPS64_initialise ( /*OUT*/VexGuestMIPS64State* vex_state );
+
+#endif /* ndef __LIBVEX_PUB_GUEST_MIPS64_H */
+
+/*---------------------------------------------------------------*/
+/*---                                   libvex_guest_mips64.h ---*/
+/*---------------------------------------------------------------*/
+
diff --git a/VEX/pub/libvex_guest_ppc32.h b/VEX/pub/libvex_guest_ppc32.h
new file mode 100644
index 0000000..3c2fd9e
--- /dev/null
+++ b/VEX/pub/libvex_guest_ppc32.h
@@ -0,0 +1,293 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                              libvex_guest_ppc32.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_PPC32_H
+#define __LIBVEX_PUB_GUEST_PPC32_H
+
+#include "libvex_basictypes.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the PPC32 CPU state             ---*/
+/*---------------------------------------------------------------*/
+
+#define VEX_GUEST_PPC32_REDIR_STACK_SIZE (16/*entries*/ * 2/*words per entry*/)
+
+typedef
+   struct {
+      /* Event check fail addr and counter. */
+      /*   0 */ UInt host_EvC_FAILADDR;
+      /*   4 */ UInt host_EvC_COUNTER;
+      /*   8 */ UInt pad3;
+      /*  12 */ UInt pad4; 
+      /* Add 16 to all the numbers below.  Sigh. */
+      /* General Purpose Registers */
+      /*   0 */ UInt guest_GPR0;
+      /*   4 */ UInt guest_GPR1;
+      /*   8 */ UInt guest_GPR2;
+      /*  12 */ UInt guest_GPR3;
+      /*  16 */ UInt guest_GPR4;
+      /*  20 */ UInt guest_GPR5;
+      /*  24 */ UInt guest_GPR6;
+      /*  28 */ UInt guest_GPR7;
+      /*  32 */ UInt guest_GPR8;
+      /*  36 */ UInt guest_GPR9;
+      /*  40 */ UInt guest_GPR10;
+      /*  44 */ UInt guest_GPR11;
+      /*  48 */ UInt guest_GPR12;
+      /*  52 */ UInt guest_GPR13;
+      /*  56 */ UInt guest_GPR14;
+      /*  60 */ UInt guest_GPR15;
+      /*  64 */ UInt guest_GPR16;
+      /*  68 */ UInt guest_GPR17;
+      /*  72 */ UInt guest_GPR18;
+      /*  76 */ UInt guest_GPR19;
+      /*  80 */ UInt guest_GPR20;
+      /*  84 */ UInt guest_GPR21;
+      /*  88 */ UInt guest_GPR22;
+      /*  92 */ UInt guest_GPR23;
+      /*  96 */ UInt guest_GPR24;
+      /* 100 */ UInt guest_GPR25;
+      /* 104 */ UInt guest_GPR26;
+      /* 108 */ UInt guest_GPR27;
+      /* 112 */ UInt guest_GPR28;
+      /* 116 */ UInt guest_GPR29;
+      /* 120 */ UInt guest_GPR30;
+      /* 124 */ UInt guest_GPR31;
+
+      // Vector Registers, Floating Point Registers, and VSX Registers
+      // With ISA 2.06, the "Vector-Scalar Floating-point" category
+      // provides facilities to support vector and scalar binary floating-
+      // point operations.  A unified register file is an integral part
+      // of this new facility, combining floating point and vector registers
+      // using a 64x128-bit vector.  These are referred to as VSR[0..63].
+      // The floating point registers are now mapped into double word element 0
+      // of VSR[0..31]. The 32x128-bit vector registers defined by the "Vector
+      // Facility [Category: Vector]" are now mapped to VSR[32..63].
+
+      // IMPORTANT: the user of libvex must place the guest state so as
+      // to ensure that guest_VSR{0..63}, and any shadows thereof, are
+      // 16-aligned.
+
+      /*  128 */ U128 guest_VSR0;
+      /*  144 */ U128 guest_VSR1;
+      /*  160 */ U128 guest_VSR2;
+      /*  176 */ U128 guest_VSR3;
+      /*  192 */ U128 guest_VSR4;
+      /*  208 */ U128 guest_VSR5;
+      /*  224 */ U128 guest_VSR6;
+      /*  240 */ U128 guest_VSR7;
+      /*  256 */ U128 guest_VSR8;
+      /*  272 */ U128 guest_VSR9;
+      /*  288 */ U128 guest_VSR10;
+      /*  304 */ U128 guest_VSR11;
+      /*  320 */ U128 guest_VSR12;
+      /*  336 */ U128 guest_VSR13;
+      /*  352 */ U128 guest_VSR14;
+      /*  368 */ U128 guest_VSR15;
+      /*  384 */ U128 guest_VSR16;
+      /*  400 */ U128 guest_VSR17;
+      /*  416 */ U128 guest_VSR18;
+      /*  432 */ U128 guest_VSR19;
+      /*  448 */ U128 guest_VSR20;
+      /*  464 */ U128 guest_VSR21;
+      /*  480 */ U128 guest_VSR22;
+      /*  496 */ U128 guest_VSR23;
+      /*  512 */ U128 guest_VSR24;
+      /*  528 */ U128 guest_VSR25;
+      /*  544 */ U128 guest_VSR26;
+      /*  560 */ U128 guest_VSR27;
+      /*  576 */ U128 guest_VSR28;
+      /*  592 */ U128 guest_VSR29;
+      /*  608 */ U128 guest_VSR30;
+      /*  624 */ U128 guest_VSR31;
+      /*  640 */ U128 guest_VSR32;
+      /*  656 */ U128 guest_VSR33;
+      /*  672 */ U128 guest_VSR34;
+      /*  688 */ U128 guest_VSR35;
+      /*  704 */ U128 guest_VSR36;
+      /*  720 */ U128 guest_VSR37;
+      /*  736 */ U128 guest_VSR38;
+      /*  752 */ U128 guest_VSR39;
+      /*  768 */ U128 guest_VSR40;
+      /*  784 */ U128 guest_VSR41;
+      /*  800 */ U128 guest_VSR42;
+      /*  816 */ U128 guest_VSR43;
+      /*  832 */ U128 guest_VSR44;
+      /*  848 */ U128 guest_VSR45;
+      /*  864 */ U128 guest_VSR46;
+      /*  880 */ U128 guest_VSR47;
+      /*  896 */ U128 guest_VSR48;
+      /*  912 */ U128 guest_VSR49;
+      /*  928 */ U128 guest_VSR50;
+      /*  944 */ U128 guest_VSR51;
+      /*  960 */ U128 guest_VSR52;
+      /*  976 */ U128 guest_VSR53;
+      /*  992 */ U128 guest_VSR54;
+      /* 1008 */ U128 guest_VSR55;
+      /* 1024 */ U128 guest_VSR56;
+      /* 1040 */ U128 guest_VSR57;
+      /* 1056 */ U128 guest_VSR58;
+      /* 1072 */ U128 guest_VSR59;
+      /* 1088 */ U128 guest_VSR60;
+      /* 1104 */ U128 guest_VSR61;
+      /* 1120 */ U128 guest_VSR62;
+      /* 1136 */ U128 guest_VSR63;
+
+      /* 1152 */ UInt guest_CIA;    // IP (no arch visible register)
+      /* 1156 */ UInt guest_LR;     // Link Register
+      /* 1160 */ UInt guest_CTR;    // Count Register
+
+      /* XER pieces */
+      /* 1164 */ UChar guest_XER_SO; /* in lsb */
+      /* 1165 */ UChar guest_XER_OV; /* in lsb */
+      /* 1166 */ UChar guest_XER_CA; /* in lsb */
+      /* 1167 */ UChar guest_XER_BC; /* all bits */
+
+      /* CR pieces */
+      /* 1168 */ UChar guest_CR0_321; /* in [3:1] */
+      /* 1169 */ UChar guest_CR0_0;   /* in lsb */
+      /* 1170 */ UChar guest_CR1_321; /* in [3:1] */
+      /* 1171 */ UChar guest_CR1_0;   /* in lsb */
+      /* 1172 */ UChar guest_CR2_321; /* in [3:1] */
+      /* 1173 */ UChar guest_CR2_0;   /* in lsb */
+      /* 1174 */ UChar guest_CR3_321; /* in [3:1] */
+      /* 1175 */ UChar guest_CR3_0;   /* in lsb */
+      /* 1176 */ UChar guest_CR4_321; /* in [3:1] */
+      /* 1177 */ UChar guest_CR4_0;   /* in lsb */
+      /* 1178 */ UChar guest_CR5_321; /* in [3:1] */
+      /* 1179 */ UChar guest_CR5_0;   /* in lsb */
+      /* 1180 */ UChar guest_CR6_321; /* in [3:1] */
+      /* 1181 */ UChar guest_CR6_0;   /* in lsb */
+      /* 1182 */ UChar guest_CR7_321; /* in [3:1] */
+      /* 1183 */ UChar guest_CR7_0;   /* in lsb */
+
+      /* FP Status & Control Register fields. Only rounding mode fields are supported. */
+      /* 1184 */ UChar guest_FPROUND; // Binary Floating Point Rounding Mode
+      /* 1185 */ UChar guest_DFPROUND; // Decimal Floating Point Rounding Mode
+      /* 1186 */ UChar pad1;
+      /* 1187 */ UChar pad2;
+
+      /* Vector Save/Restore Register */
+      /* 1188 */ UInt guest_VRSAVE;
+
+      /* Vector Status and Control Register */
+      /* 1192 */ UInt guest_VSCR;
+
+      /* Emulation notes */
+      /* 1196 */ UInt guest_EMNOTE;
+
+      /* For icbi: record start and length of area to invalidate */
+      /* 1200 */ UInt guest_CMSTART;
+      /* 1204 */ UInt guest_CMLEN;
+
+      /* Used to record the unredirected guest address at the start of
+         a translation whose start has been redirected.  By reading
+         this pseudo-register shortly afterwards, the translation can
+         find out what the corresponding no-redirection address was.
+         Note, this is only set for wrap-style redirects, not for
+         replace-style ones. */
+      /* 1208 */ UInt guest_NRADDR;
+      /* 1212 */ UInt guest_NRADDR_GPR2; /* needed by aix */
+
+     /* A grows-upwards stack for hidden saves/restores of LR and R2
+        needed for function interception and wrapping on ppc32-aix5.
+        A horrible hack.  REDIR_SP points to the highest live entry,
+        and so starts at -1. */
+      /* 1216 */ UInt guest_REDIR_SP;
+      /* 1220 */ UInt guest_REDIR_STACK[VEX_GUEST_PPC32_REDIR_STACK_SIZE];
+
+      /* Needed for Darwin (but mandated for all guest architectures):
+         CIA at the last SC insn.  Used when backing up to restart a
+         syscall that has been interrupted by a signal. */
+      /* 1348 */ UInt guest_IP_AT_SYSCALL;
+
+      /* SPRG3, which AIUI is readonly in user space.  Needed for
+         threading on AIX. */
+      /* 1352 */ UInt guest_SPRG3_RO;
+      /* 1356 */ UInt  padding1;
+      /* 1360 */ ULong guest_TFHAR;     // Transaction Failure Handler Address Register 
+      /* 1368 */ ULong guest_TEXASR;    // Transaction EXception And Summary Register
+      /* 1376 */ ULong guest_TFIAR;     // Transaction Failure Instruction Address Register
+      /* 1384 */ UInt  guest_TEXASRU;   // Transaction EXception And Summary Register Upper
+
+      /* Padding to make it have an 16-aligned size */
+      /* 1388 */ UInt  padding2;
+
+   }
+   VexGuestPPC32State;
+
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for PPC32 guest stuff.                ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest PPC32 state. */
+
+extern
+void LibVEX_GuestPPC32_initialise ( /*OUT*/VexGuestPPC32State* vex_state );
+
+
+/* Write the given native %CR value to the supplied VexGuestPPC32State
+   structure. */
+extern
+void LibVEX_GuestPPC32_put_CR ( UInt cr_native,
+                                /*OUT*/VexGuestPPC32State* vex_state );
+
+/* Extract from the supplied VexGuestPPC32State structure the
+   corresponding native %CR value. */
+extern
+UInt LibVEX_GuestPPC32_get_CR ( /*IN*/const VexGuestPPC32State* vex_state );
+
+
+/* Write the given native %XER value to the supplied VexGuestPPC32State
+   structure. */
+extern
+void LibVEX_GuestPPC32_put_XER ( UInt xer_native,
+                                 /*OUT*/VexGuestPPC32State* vex_state );
+
+/* Extract from the supplied VexGuestPPC32State structure the
+   corresponding native %XER value. */
+extern
+UInt LibVEX_GuestPPC32_get_XER ( /*IN*/const VexGuestPPC32State* vex_state );
+
+#endif /* ndef __LIBVEX_PUB_GUEST_PPC32_H */
+
+
+/*---------------------------------------------------------------*/
+/*---                                    libvex_guest_ppc32.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_ppc64.h b/VEX/pub/libvex_guest_ppc64.h
new file mode 100644
index 0000000..13a3540
--- /dev/null
+++ b/VEX/pub/libvex_guest_ppc64.h
@@ -0,0 +1,338 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                              libvex_guest_ppc64.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_PPC64_H
+#define __LIBVEX_PUB_GUEST_PPC64_H
+
+#include "libvex_basictypes.h"
+
+/*
+    volatile ==  caller-saved (not preserved across function calls)
+non-volatile ==  callee-saved (preserved across function calls)
+
+r0        Volatile register used in function prologs
+r1        Stack frame pointer
+r2        TOC pointer
+r3        Volatile parameter and return value register
+r4-r10    Volatile registers used for function parameters
+r11       Volatile register used in calls by pointer and as an
+          environment pointer for languages which require one
+r12       Volatile register used for exception handling and glink code
+r13       Reserved for use as system thread ID
+r14-r31   Nonvolatile registers used for local variables
+
+f0        Volatile scratch register
+f1-f4     Volatile floating point parameter and return value registers
+f5-f13    Volatile floating point parameter registers
+f14-f31   Nonvolatile registers
+
+LR        Link register (volatile)
+CTR       Loop counter register (volatile)
+XER       Fixed point exception register (volatile)
+FPSCR     Floating point status and control register (volatile)
+
+CR0-CR1   Volatile condition code register fields
+CR2-CR4   Nonvolatile condition code register fields
+CR5-CR7   Volatile condition code register fields
+
+On processors with the VMX feature.
+
+v0-v1     Volatile scratch registers
+v2-v13    Volatile vector parameters registers
+v14-v19   Volatile scratch registers
+v20-v31   Non-volatile registers
+vrsave    Non-volatile 32-bit register
+*/
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the PPC64 CPU state             ---*/
+/*---------------------------------------------------------------*/
+
+#define VEX_GUEST_PPC64_REDIR_STACK_SIZE (16/*entries*/ * 2/*words per entry*/)
+
+typedef
+   struct {
+     /* Event check fail addr, counter, and padding to make GPR0 16
+        aligned. */
+      /*   0 */ ULong  host_EvC_FAILADDR;
+      /*   8 */ UInt   host_EvC_COUNTER;
+      /*  12 */ UInt   pad0;
+      /* Add 16 to all of the offsets below .. */
+      /* General Purpose Registers */
+      /*   0 */ ULong guest_GPR0;
+      /*   8 */ ULong guest_GPR1;
+      /*  16 */ ULong guest_GPR2;
+      /*  24 */ ULong guest_GPR3;
+      /*  32 */ ULong guest_GPR4;
+      /*  40 */ ULong guest_GPR5;
+      /*  48 */ ULong guest_GPR6;
+      /*  56 */ ULong guest_GPR7;
+      /*  64 */ ULong guest_GPR8;
+      /*  72 */ ULong guest_GPR9;
+      /*  80 */ ULong guest_GPR10;
+      /*  88 */ ULong guest_GPR11;
+      /*  96 */ ULong guest_GPR12;
+      /* 104 */ ULong guest_GPR13;
+      /* 112 */ ULong guest_GPR14;
+      /* 120 */ ULong guest_GPR15;
+      /* 128 */ ULong guest_GPR16;
+      /* 136 */ ULong guest_GPR17;
+      /* 144 */ ULong guest_GPR18;
+      /* 152 */ ULong guest_GPR19;
+      /* 160 */ ULong guest_GPR20;
+      /* 168 */ ULong guest_GPR21;
+      /* 176 */ ULong guest_GPR22;
+      /* 184 */ ULong guest_GPR23;
+      /* 192 */ ULong guest_GPR24;
+      /* 200 */ ULong guest_GPR25;
+      /* 208 */ ULong guest_GPR26;
+      /* 216 */ ULong guest_GPR27;
+      /* 224 */ ULong guest_GPR28;
+      /* 232 */ ULong guest_GPR29;
+      /* 240 */ ULong guest_GPR30;
+      /* 248 */ ULong guest_GPR31;
+
+      // Vector Registers, Floating Point Registers, and VSX Registers
+      // With ISA 2.06, the "Vector-Scalar Floating-point" category
+      // provides facilities to support vector and scalar binary floating-
+      // point operations.  A unified register file is an integral part
+      // of this new facility, combining floating point and vector registers
+      // using a 64x128-bit vector.  These are referred to as VSR[0..63].
+      // The floating point registers are now mapped into double word element 0
+      // of VSR[0..31]. The 32x128-bit vector registers defined by the "Vector
+      // Facility [Category: Vector]" are now mapped to VSR[32..63].
+
+      // IMPORTANT: the user of libvex must place the guest state so as
+      // to ensure that guest_VSR{0..63}, and any shadows thereof, are
+      // 16-aligned.
+
+      /*  256 */ U128 guest_VSR0;
+      /*  272 */ U128 guest_VSR1;
+      /*  288 */ U128 guest_VSR2;
+      /*  304 */ U128 guest_VSR3;
+      /*  320 */ U128 guest_VSR4;
+      /*  336 */ U128 guest_VSR5;
+      /*  352 */ U128 guest_VSR6;
+      /*  368 */ U128 guest_VSR7;
+      /*  384 */ U128 guest_VSR8;
+      /*  400 */ U128 guest_VSR9;
+      /*  416 */ U128 guest_VSR10;
+      /*  432 */ U128 guest_VSR11;
+      /*  448 */ U128 guest_VSR12;
+      /*  464 */ U128 guest_VSR13;
+      /*  480 */ U128 guest_VSR14;
+      /*  496 */ U128 guest_VSR15;
+      /*  512 */ U128 guest_VSR16;
+      /*  528 */ U128 guest_VSR17;
+      /*  544 */ U128 guest_VSR18;
+      /*  560 */ U128 guest_VSR19;
+      /*  576 */ U128 guest_VSR20;
+      /*  592 */ U128 guest_VSR21;
+      /*  608 */ U128 guest_VSR22;
+      /*  624 */ U128 guest_VSR23;
+      /*  640 */ U128 guest_VSR24;
+      /*  656 */ U128 guest_VSR25;
+      /*  672 */ U128 guest_VSR26;
+      /*  688 */ U128 guest_VSR27;
+      /*  704 */ U128 guest_VSR28;
+      /*  720 */ U128 guest_VSR29;
+      /*  736 */ U128 guest_VSR30;
+      /*  752 */ U128 guest_VSR31;
+      /*  768 */ U128 guest_VSR32;
+      /*  784 */ U128 guest_VSR33;
+      /*  800 */ U128 guest_VSR34;
+      /*  816 */ U128 guest_VSR35;
+      /*  832 */ U128 guest_VSR36;
+      /*  848 */ U128 guest_VSR37;
+      /*  864 */ U128 guest_VSR38;
+      /*  880 */ U128 guest_VSR39;
+      /*  896 */ U128 guest_VSR40;
+      /*  912 */ U128 guest_VSR41;
+      /*  928 */ U128 guest_VSR42;
+      /*  944 */ U128 guest_VSR43;
+      /*  960 */ U128 guest_VSR44;
+      /*  976 */ U128 guest_VSR45;
+      /*  992 */ U128 guest_VSR46;
+      /* 1008 */ U128 guest_VSR47;
+      /* 1024 */ U128 guest_VSR48;
+      /* 1040 */ U128 guest_VSR49;
+      /* 1056 */ U128 guest_VSR50;
+      /* 1072 */ U128 guest_VSR51;
+      /* 1088 */ U128 guest_VSR52;
+      /* 1104 */ U128 guest_VSR53;
+      /* 1120 */ U128 guest_VSR54;
+      /* 1136 */ U128 guest_VSR55;
+      /* 1152 */ U128 guest_VSR56;
+      /* 1168 */ U128 guest_VSR57;
+      /* 1184 */ U128 guest_VSR58;
+      /* 1200 */ U128 guest_VSR59;
+      /* 1216 */ U128 guest_VSR60;
+      /* 1232 */ U128 guest_VSR61;
+      /* 1248 */ U128 guest_VSR62;
+      /* 1264 */ U128 guest_VSR63;
+
+      /* 1280 */ ULong guest_CIA;    // IP (no arch visible register)
+      /* 1288 */ ULong guest_LR;     // Link Register
+      /* 1296 */ ULong guest_CTR;    // Count Register
+
+      /* XER pieces */
+      /* 1304 */ UChar guest_XER_SO; /* in lsb */
+      /* 1305 */ UChar guest_XER_OV; /* in lsb */
+      /* 1306 */ UChar guest_XER_CA; /* in lsb */
+      /* 1307 */ UChar guest_XER_BC; /* all bits */
+
+      /* CR pieces */
+      /* 1308 */ UChar guest_CR0_321; /* in [3:1] */
+      /* 1309 */ UChar guest_CR0_0;   /* in lsb */
+      /* 1310 */ UChar guest_CR1_321; /* in [3:1] */
+      /* 1311 */ UChar guest_CR1_0;   /* in lsb */
+      /* 1312 */ UChar guest_CR2_321; /* in [3:1] */
+      /* 1313 */ UChar guest_CR2_0;   /* in lsb */
+      /* 1314 */ UChar guest_CR3_321; /* in [3:1] */
+      /* 1315 */ UChar guest_CR3_0;   /* in lsb */
+      /* 1316 */ UChar guest_CR4_321; /* in [3:1] */
+      /* 1317 */ UChar guest_CR4_0;   /* in lsb */
+      /* 1318 */ UChar guest_CR5_321; /* in [3:1] */
+      /* 1319 */ UChar guest_CR5_0;   /* in lsb */
+      /* 1320 */ UChar guest_CR6_321; /* in [3:1] */
+      /* 1321 */ UChar guest_CR6_0;   /* in lsb */
+      /* 1322 */ UChar guest_CR7_321; /* in [3:1] */
+      /* 1323 */ UChar guest_CR7_0;   /* in lsb */
+
+      /* FP Status and  Control Register fields. Only rounding mode fields
+	 are supported. */
+      /* 1324 */ UChar guest_FPROUND; // Binary Floating Point Rounding Mode
+      /* 1325 */ UChar guest_DFPROUND; // Decimal Floating Point Rounding Mode
+      /* 1326 */ UChar pad1;
+      /* 1327 */ UChar pad2;
+
+      /* Vector Save/Restore Register */
+      /* 1328 */ UInt guest_VRSAVE;
+
+      /* Vector Status and Control Register */
+      /* 1332 */ UInt guest_VSCR;
+
+      /* Emulation notes */
+      /* 1336 */ UInt guest_EMNOTE;
+
+      /* gcc adds 4 bytes padding here: pre-empt it. */
+      /* 1340 */ UInt  padding;
+
+      /* For icbi: record start and length of area to invalidate */
+      /* 1344 */ ULong guest_CMSTART;
+      /* 1352 */ ULong guest_CMLEN;
+
+      /* Used to record the unredirected guest address at the start of
+         a translation whose start has been redirected.  By reading
+         this pseudo-register shortly afterwards, the translation can
+         find out what the corresponding no-redirection address was.
+         Note, this is only set for wrap-style redirects, not for
+         replace-style ones. */
+      /* 1360 */ ULong guest_NRADDR;
+      /* 1368 */ ULong guest_NRADDR_GPR2;
+
+     /* A grows-upwards stack for hidden saves/restores of LR and R2
+        needed for function interception and wrapping on ppc64-linux.
+        A horrible hack.  REDIR_SP points to the highest live entry,
+        and so starts at -1. */
+      /* 1376 */ ULong guest_REDIR_SP;
+      /* 1384 */ ULong guest_REDIR_STACK[VEX_GUEST_PPC64_REDIR_STACK_SIZE];
+
+      /* Needed for Darwin: CIA at the last SC insn.  Used when backing up
+         to restart a syscall that has been interrupted by a signal. */
+      /* 1640 */ ULong guest_IP_AT_SYSCALL;
+
+      /* SPRG3, which AIUI is readonly in user space.  Needed for
+         threading on AIX. */
+      /* 1648 */ ULong guest_SPRG3_RO;
+
+      /* 1656 */ ULong guest_TFHAR;     // Transaction Failure Handler Address Register 
+      /* 1664 */ ULong guest_TEXASR;    // Transaction EXception And Summary Register
+      /* 1672 */ ULong guest_TFIAR;     // Transaction Failure Instruction Address Register
+      /* 1680 */ UInt  guest_TEXASRU;   // Transaction EXception And Summary Register Upper
+
+      /* Padding to make it have an 16-aligned size */
+      /* 1684 */  UInt  padding1;
+      /* 1688 */  UInt  padding2;
+      /* 1692 */  UInt  padding3;
+
+   }
+   VexGuestPPC64State;
+
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for PPC64 guest stuff.                ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest PPC64 state. */
+extern
+void LibVEX_GuestPPC64_initialise ( /*OUT*/VexGuestPPC64State* vex_state );
+
+
+/* Write the given native %CR value to the supplied VexGuestPPC64State
+   structure.  Note, %CR is 32-bits even for ppc64. */
+extern
+void LibVEX_GuestPPC64_put_CR ( UInt cr_native,
+                                /*OUT*/VexGuestPPC64State* vex_state );
+
+/* Extract from the supplied VexGuestPPC64State structure the
+   corresponding native %CR value.  Note, %CR is 32-bits even for
+   ppc64. */
+extern
+UInt LibVEX_GuestPPC64_get_CR ( /*IN*/const VexGuestPPC64State* vex_state );
+
+
+/* Write the given native %XER value to the supplied
+   VexGuestPPC64State structure.  Note, %XER is 32-bits even for
+   ppc64. */
+extern
+void LibVEX_GuestPPC64_put_XER ( UInt xer_native,
+                                 /*OUT*/VexGuestPPC64State* vex_state );
+
+/* Extract from the supplied VexGuestPPC64State structure the
+   corresponding native %XER value.  Note, %CR is 32-bits even for
+   ppc64. */
+extern
+UInt LibVEX_GuestPPC64_get_XER ( /*IN*/const VexGuestPPC64State* vex_state );
+
+#endif /* ndef __LIBVEX_PUB_GUEST_PPC64_H */
+
+
+/*---------------------------------------------------------------*/
+/*---                                    libvex_guest_ppc64.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_s390x.h b/VEX/pub/libvex_guest_s390x.h
new file mode 100644
index 0000000..a163b7f
--- /dev/null
+++ b/VEX/pub/libvex_guest_s390x.h
@@ -0,0 +1,177 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*---------------------------------------------------------------*/
+/*--- begin                              libvex_guest_s390x.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_S390X_H
+#define __LIBVEX_PUB_GUEST_S390X_H
+
+#include "libvex_basictypes.h"
+
+/*------------------------------------------------------------*/
+/*--- Vex's representation of the s390 CPU state.          ---*/
+/*------------------------------------------------------------*/
+
+typedef struct {
+
+/*------------------------------------------------------------*/
+/*--- ar registers                                         ---*/
+/*------------------------------------------------------------*/
+
+   /*    0 */  UInt guest_a0;
+   /*    4 */  UInt guest_a1;
+   /*    8 */  UInt guest_a2;
+   /*   12 */  UInt guest_a3;
+   /*   16 */  UInt guest_a4;
+   /*   20 */  UInt guest_a5;
+   /*   24 */  UInt guest_a6;
+   /*   28 */  UInt guest_a7;
+   /*   32 */  UInt guest_a8;
+   /*   36 */  UInt guest_a9;
+   /*   40 */  UInt guest_a10;
+   /*   44 */  UInt guest_a11;
+   /*   48 */  UInt guest_a12;
+   /*   52 */  UInt guest_a13;
+   /*   56 */  UInt guest_a14;
+   /*   60 */  UInt guest_a15;
+
+/*------------------------------------------------------------*/
+/*--- fpr registers                                        ---*/
+/*------------------------------------------------------------*/
+
+   /*   64 */  ULong guest_f0;
+   /*   72 */  ULong guest_f1;
+   /*   80 */  ULong guest_f2;
+   /*   88 */  ULong guest_f3;
+   /*   96 */  ULong guest_f4;
+   /*  104 */  ULong guest_f5;
+   /*  112 */  ULong guest_f6;
+   /*  120 */  ULong guest_f7;
+   /*  128 */  ULong guest_f8;
+   /*  136 */  ULong guest_f9;
+   /*  144 */  ULong guest_f10;
+   /*  152 */  ULong guest_f11;
+   /*  160 */  ULong guest_f12;
+   /*  168 */  ULong guest_f13;
+   /*  176 */  ULong guest_f14;
+   /*  184 */  ULong guest_f15;
+
+/*------------------------------------------------------------*/
+/*--- gpr registers                                        ---*/
+/*------------------------------------------------------------*/
+
+   /*  192 */  ULong guest_r0;
+   /*  200 */  ULong guest_r1;
+   /*  208 */  ULong guest_r2;
+   /*  216 */  ULong guest_r3;
+   /*  224 */  ULong guest_r4;
+   /*  232 */  ULong guest_r5;
+   /*  240 */  ULong guest_r6;
+   /*  248 */  ULong guest_r7;
+   /*  256 */  ULong guest_r8;
+   /*  264 */  ULong guest_r9;
+   /*  272 */  ULong guest_r10;
+   /*  280 */  ULong guest_r11;
+   /*  288 */  ULong guest_r12;
+   /*  296 */  ULong guest_r13;
+   /*  304 */  ULong guest_r14;
+   /*  312 */  ULong guest_r15;
+
+/*------------------------------------------------------------*/
+/*--- S390 miscellaneous registers                         ---*/
+/*------------------------------------------------------------*/
+
+   /*  320 */  ULong guest_counter;
+   /*  328 */  UInt guest_fpc;
+   /*  332 */  UChar unused[4]; /* 4-byte hole to get 8-byte alignment */
+   /*  336 */  ULong guest_IA;
+
+/*------------------------------------------------------------*/
+/*--- S390 pseudo registers                                ---*/
+/*------------------------------------------------------------*/
+
+   /*  344 */  ULong guest_SYSNO;
+
+/*------------------------------------------------------------*/
+/*--- 4-word thunk used to calculate the condition code    ---*/
+/*------------------------------------------------------------*/
+
+   /*  352 */  ULong guest_CC_OP;
+   /*  360 */  ULong guest_CC_DEP1;
+   /*  368 */  ULong guest_CC_DEP2;
+   /*  376 */  ULong guest_CC_NDEP;
+
+/*------------------------------------------------------------*/
+/*--- Pseudo registers. Required by all architectures      ---*/
+/*------------------------------------------------------------*/
+
+   /* See comments at bottom of libvex.h */
+   /*  384 */  ULong guest_NRADDR;
+   /*  392 */  ULong guest_CMSTART;
+   /*  400 */  ULong guest_CMLEN;
+
+   /* Used when backing up to restart a syscall that has
+      been interrupted by a signal. See also comment in
+      libvex_ir.h */
+   /*  408 */  ULong guest_IP_AT_SYSCALL;
+
+   /* Emulation notes; see comments in libvex_emnote.h */
+   /*  416 */  UInt guest_EMNOTE;
+
+   /* For translation chaining */
+   /*  420 */  UInt  host_EvC_COUNTER;
+   /*  424 */  ULong host_EvC_FAILADDR;
+
+/*------------------------------------------------------------*/
+/*--- Force alignment to 16 bytes                          ---*/
+/*------------------------------------------------------------*/
+   /*  432 */  UChar padding[0];
+
+   /*  432 */  /* This is the size of the guest state */
+} VexGuestS390XState;
+
+
+/*------------------------------------------------------------*/
+/*--- Function prototypes                                  ---*/
+/*------------------------------------------------------------*/
+
+void LibVEX_GuestS390X_initialise(VexGuestS390XState *);
+
+/*------------------------------------------------------------*/
+/*--- Dedicated registers                                  ---*/
+/*------------------------------------------------------------*/
+
+#define guest_LR guest_r14  /* Link register */
+#define guest_SP guest_r15  /* Stack pointer */
+#define guest_FP guest_r11  /* Frame pointer */
+
+/*---------------------------------------------------------------*/
+/*--- end                                libvex_guest_s390x.h ---*/
+/*---------------------------------------------------------------*/
+
+#endif /* __LIBVEX_PUB_GUEST_S390X_H */
diff --git a/VEX/pub/libvex_guest_tilegx.h b/VEX/pub/libvex_guest_tilegx.h
new file mode 100644
index 0000000..61a60e3
--- /dev/null
+++ b/VEX/pub/libvex_guest_tilegx.h
@@ -0,0 +1,149 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                             libvex_guest_tilegx.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2010-2013 Tilera Corp.
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */
+
+#ifndef __LIBVEX_PUB_GUEST_TILEGX_H
+#define __LIBVEX_PUB_GUEST_TILEGX_H
+
+#include "libvex_basictypes.h"
+#include "libvex_emnote.h"
+
+#undef   TILEGX_DEBUG
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the tilegx CPU state.           ---*/
+/*---------------------------------------------------------------*/
+
+typedef ULong ULONG;
+
+typedef
+struct {
+  /* CPU Registers */
+  /*   0   */ ULONG guest_r0;
+  /*   8   */ ULONG guest_r1;
+  /*   16  */ ULONG guest_r2;
+  /*   24  */ ULONG guest_r3;
+  /*   32  */ ULONG guest_r4;
+  /*   40  */ ULONG guest_r5;
+  /*   48  */ ULONG guest_r6;
+  /*   56  */ ULONG guest_r7;
+  /*   64  */ ULONG guest_r8;
+  /*   72  */ ULONG guest_r9;
+  /*   80  */ ULONG guest_r10;
+  /*   88  */ ULONG guest_r11;
+  /*   96  */ ULONG guest_r12;
+  /*   104 */ ULONG guest_r13;
+  /*   112 */ ULONG guest_r14;
+  /*   120 */ ULONG guest_r15;
+  /*   128 */ ULONG guest_r16;
+  /*   136 */ ULONG guest_r17;
+  /*   144 */ ULONG guest_r18;
+  /*   152 */ ULONG guest_r19;
+  /*   160 */ ULONG guest_r20;
+  /*   168 */ ULONG guest_r21;
+  /*   176 */ ULONG guest_r22;
+  /*   184 */ ULONG guest_r23;
+  /*   192 */ ULONG guest_r24;
+  /*   200 */ ULONG guest_r25;
+  /*   208 */ ULONG guest_r26;
+  /*   216 */ ULONG guest_r27;
+  /*   224 */ ULONG guest_r28;
+  /*   232 */ ULONG guest_r29;
+  /*   240 */ ULONG guest_r30;
+  /*   248 */ ULONG guest_r31;
+  /*   256 */ ULONG guest_r32;
+  /*   264 */ ULONG guest_r33;
+  /*   272 */ ULONG guest_r34;
+  /*   280 */ ULONG guest_r35;
+  /*   288 */ ULONG guest_r36;
+  /*   296 */ ULONG guest_r37;
+  /*   304 */ ULONG guest_r38;
+  /*   312 */ ULONG guest_r39;
+  /*   320 */ ULONG guest_r40;
+  /*   328 */ ULONG guest_r41;
+  /*   336 */ ULONG guest_r42;
+  /*   344 */ ULONG guest_r43;
+  /*   352 */ ULONG guest_r44;
+  /*   360 */ ULONG guest_r45;
+  /*   368 */ ULONG guest_r46;
+  /*   376 */ ULONG guest_r47;
+  /*   384 */ ULONG guest_r48;
+  /*   392 */ ULONG guest_r49;
+  /*   400 */ ULONG guest_r50;
+  /*   408 */ ULONG guest_r51;
+  /*   416 */ ULONG guest_r52; /* FP */
+  /*   424 */ ULONG guest_r53;
+  /*   432 */ ULONG guest_r54; /* SP */
+  /*   440 */ ULONG guest_r55; /* LR */
+  /*   448 */ ULONG guest_r56; /* zero */
+  /*   456 */ ULONG guest_r57; /* Reserved */
+  /*   464 */ ULONG guest_r58; /* Reserved */
+  /*   472 */ ULONG guest_r59; /* Reserved */
+  /*   480 */ ULONG guest_r60; /* Reserved */
+  /*   488 */ ULONG guest_r61; /* Reserved */
+  /*   496 */ ULONG guest_r62; /* Reserved */
+  /*   504 */ ULONG guest_r63; /* Reserved */
+  /*   512 */ ULONG guest_pc;
+  /*   520 */ ULONG guest_spare; /* Reserved */
+  /*   528 */ ULONG guest_EMNOTE;
+  /*   536 */ ULONG guest_CMSTART;
+  /*   544 */ ULONG guest_CMLEN;
+  /*   552 */ ULONG guest_NRADDR;
+  /*   560 */ ULong guest_cmpexch;
+  /*   568 */ ULong guest_zero;
+  /*   576 */ ULong guest_ex_context_0;
+  /*   584 */ ULong guest_ex_context_1;
+  /*   592 */ ULong host_EvC_FAILADDR;
+  /*   600 */ ULong host_EvC_COUNTER;
+  /*   608 */ ULong guest_COND;
+  /*   616 */ ULong PAD;
+
+} VexGuestTILEGXState;
+
+#define OFFSET_tilegx_r(_N)  (8 * (_N))
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for TILEGX guest stuff.               ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest TILEGX state. */
+
+extern
+void LibVEX_GuestTILEGX_initialise ( /*OUT*/VexGuestTILEGXState* vex_state );
+
+
+#endif /* __LIBVEX_PUB_GUEST_TILEGX_H */
+
+
+/*---------------------------------------------------------------*/
+/*---                                   libvex_guest_tilegx.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_guest_x86.h b/VEX/pub/libvex_guest_x86.h
new file mode 100644
index 0000000..ac780ac
--- /dev/null
+++ b/VEX/pub/libvex_guest_x86.h
@@ -0,0 +1,293 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                libvex_guest_x86.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_PUB_GUEST_X86_H
+#define __LIBVEX_PUB_GUEST_X86_H
+
+#include "libvex_basictypes.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Vex's representation of the x86 CPU state.              ---*/
+/*---------------------------------------------------------------*/
+
+/* The integer parts should be pretty straightforward. */
+
+/* Hmm, subregisters.  The simulated state is stored in memory in the
+   host's byte ordering, so we can't say here what the offsets of %ax,
+   %al, %ah etc are since that depends on the host's byte ordering,
+   which we don't know. */
+
+/* FPU.  For now, just simulate 8 64-bit registers, their tags, and
+   the reg-stack top pointer, of which only the least significant
+   three bits are relevant.
+
+   The model is:
+     F0 .. F7 are the 8 registers.  FTOP[2:0] contains the 
+     index of the current 'stack top' -- pretty meaningless, but
+     still.  FTOP is a 32-bit value.  FTOP[31:3] can be anything
+     (not guaranteed to be zero).
+
+     When a value is pushed onto the stack, ftop is first replaced by 
+     (ftop-1) & 7, and then F[ftop] is assigned the value.
+
+     When a value is popped off the stack, the value is read from
+     F[ftop], and then ftop is replaced by (ftop+1) & 7.
+
+     In general, a reference to a register ST(i) actually references
+     F[ (ftop+i) & 7 ].
+
+   FTAG0 .. FTAG0+7 are the tags.  Each is a byte, zero means empty,
+   non-zero means non-empty.
+
+   The general rule appears to be that a read or modify of a register
+   gets a stack underflow fault if the register is empty.  A write of
+   a register (only a write, not a modify) gets a stack overflow fault
+   if the register is full.  Note that "over" vs "under" is pretty
+   meaningless since the FP stack pointer can move around arbitrarily,
+   so it's really just two different kinds of exceptions:
+   register-empty and register full.
+
+   Naturally Intel (in its infinite wisdom) has seen fit to throw in
+   some ad-hoc inconsistencies to the fault-generation rules of the
+   above para, just to complicate everything.  Known inconsistencies:
+
+   * fxam can read a register in any state without taking an underflow
+     fault.
+
+   * fst from st(0) to st(i) does not take an overflow fault even if the
+     destination is already full.
+
+   FPROUND[1:0] is the FPU's notional rounding mode, encoded as per
+   the IRRoundingMode type (see libvex_ir.h).  This just happens to be
+   the Intel encoding.  Note carefully, the rounding mode is only
+   observed on float-to-int conversions, and on float-to-float
+   rounding, but not for general float-to-float operations, which are
+   always rounded-to-nearest.
+
+   Loads/stores of the FPU control word are faked accordingly -- on
+   loads, everything except the rounding mode is ignored, and on
+   stores, you get a vanilla control world (0x037F) with the rounding
+   mode patched in.  Hence the only values you can get are 0x037F,
+   0x077F, 0x0B7F or 0x0F7F.  Vex will emit an emulation warning if
+   you try and load a control word which either (1) unmasks FP
+   exceptions, or (2) changes the default (80-bit) precision.
+
+   FC3210 contains the C3, C2, C1 and C0 bits in the same place they
+   are in the FPU's status word.  (bits 14, 10, 9, 8 respectively).
+   All other bits should be zero.  The relevant mask to select just
+   those bits is 0x4700.  To select C3, C2 and C0 only, the mask is
+   0x4500.  
+
+   SSEROUND[1:0] is the SSE unit's notional rounding mode, encoded as
+   per the IRRoundingMode type.  As with the FPU control word, the
+   rounding mode is the only part of %MXCSR that Vex observes.  On
+   storing %MXCSR, you will get a vanilla word (0x1F80) with the
+   rounding mode patched in.  Hence the only values you will get are
+   0x1F80, 0x3F80, 0x5F80 or 0x7F80.  Vex will emit an emulation
+   warning if you try and load a control word which either (1) unmasks
+   any exceptions, (2) sets FZ (flush-to-zero) to 1, or (3) sets DAZ
+   (denormals-are-zeroes) to 1. 
+
+   Segments: initial prefixes of local and global segment descriptor
+   tables are modelled.  guest_LDT is either zero (NULL) or points in
+   the host address space to an array of VEX_GUEST_X86_LDT_NENT
+   descriptors, which have the type VexGuestX86SegDescr, defined
+   below.  Similarly, guest_GDT is either zero or points in the host
+   address space to an array of VEX_GUEST_X86_GDT_NENT descriptors.
+   The only place where these are used are in the helper function
+   x86g_use_seg().  LibVEX's client is responsible for pointing
+   guest_LDT and guest_GDT at suitable tables.  The contents of these
+   tables are expected not to change during the execution of any given
+   superblock, but they may validly be changed by LibVEX's client in
+   between superblock executions.
+
+   Since x86g_use_seg() only expects these tables to have
+   VEX_GUEST_X86_{LDT,GDT}_NENT entries, LibVEX's client should not
+   attempt to write entries beyond those limits.
+*/
+typedef
+   struct {
+      /* Event check fail addr and counter. */
+      UInt  host_EvC_FAILADDR; /* 0 */
+      UInt  host_EvC_COUNTER;  /* 4 */
+      UInt  guest_EAX;         /* 8 */
+      UInt  guest_ECX;
+      UInt  guest_EDX;
+      UInt  guest_EBX;
+      UInt  guest_ESP;
+      UInt  guest_EBP;
+      UInt  guest_ESI;
+      UInt  guest_EDI;         /* 36 */
+
+      /* 4-word thunk used to calculate O S Z A C P flags. */
+      UInt  guest_CC_OP;       /* 40 */
+      UInt  guest_CC_DEP1;
+      UInt  guest_CC_DEP2;
+      UInt  guest_CC_NDEP;     /* 52 */
+      /* The D flag is stored here, encoded as either -1 or +1 */
+      UInt  guest_DFLAG;       /* 56 */
+      /* Bit 21 (ID) of eflags stored here, as either 0 or 1. */
+      UInt  guest_IDFLAG;      /* 60 */
+      /* Bit 18 (AC) of eflags stored here, as either 0 or 1. */
+      UInt  guest_ACFLAG;      /* 64 */
+
+      /* EIP */
+      UInt  guest_EIP;         /* 68 */
+
+      /* FPU */
+      ULong guest_FPREG[8];    /* 72 */
+      UChar guest_FPTAG[8];   /* 136 */
+      UInt  guest_FPROUND;    /* 144 */
+      UInt  guest_FC3210;     /* 148 */
+      UInt  guest_FTOP;       /* 152 */
+
+      /* SSE */
+      UInt  guest_SSEROUND;   /* 156 */
+      U128  guest_XMM0;       /* 160 */
+      U128  guest_XMM1;
+      U128  guest_XMM2;
+      U128  guest_XMM3;
+      U128  guest_XMM4;
+      U128  guest_XMM5;
+      U128  guest_XMM6;
+      U128  guest_XMM7;
+
+      /* Segment registers. */
+      UShort guest_CS;
+      UShort guest_DS;
+      UShort guest_ES;
+      UShort guest_FS;
+      UShort guest_GS;
+      UShort guest_SS;
+      /* LDT/GDT stuff. */
+      HWord  guest_LDT; /* host addr, a VexGuestX86SegDescr* */
+      HWord  guest_GDT; /* host addr, a VexGuestX86SegDescr* */
+
+      /* Emulation notes */
+      UInt   guest_EMNOTE;
+
+      /* For clflush/clinval: record start and length of area */
+      UInt guest_CMSTART;
+      UInt guest_CMLEN;
+
+      /* Used to record the unredirected guest address at the start of
+         a translation whose start has been redirected.  By reading
+         this pseudo-register shortly afterwards, the translation can
+         find out what the corresponding no-redirection address was.
+         Note, this is only set for wrap-style redirects, not for
+         replace-style ones. */
+      UInt guest_NRADDR;
+
+      /* Used for Darwin syscall dispatching. */
+      UInt guest_SC_CLASS;
+
+      /* Needed for Darwin (but mandated for all guest architectures):
+         EIP at the last syscall insn (int 0x80/81/82, sysenter,
+         syscall).  Used when backing up to restart a syscall that has
+         been interrupted by a signal. */
+      UInt guest_IP_AT_SYSCALL;
+
+      /* Padding to make it have an 16-aligned size */
+      UInt padding1;
+   }
+   VexGuestX86State;
+
+#define VEX_GUEST_X86_LDT_NENT /*64*/ 8192 /* use complete LDT */
+#define VEX_GUEST_X86_GDT_NENT /*16*/ 8192 /* use complete GDT */
+
+
+/*---------------------------------------------------------------*/
+/*--- Types for x86 guest stuff.                              ---*/
+/*---------------------------------------------------------------*/
+
+/* VISIBLE TO LIBRARY CLIENT */
+
+/* This is the hardware-format for a segment descriptor, ie what the
+   x86 actually deals with.  It is 8 bytes long.  It's ugly. */
+
+typedef struct {
+    union {
+       struct {
+          UShort  LimitLow;
+          UShort  BaseLow;
+          UInt    BaseMid         : 8;
+          UInt    Type            : 5;
+          UInt    Dpl             : 2;
+          UInt    Pres            : 1;
+          UInt    LimitHi         : 4;
+          UInt    Sys             : 1;
+          UInt    Reserved_0      : 1;
+          UInt    Default_Big     : 1;
+          UInt    Granularity     : 1;
+          UInt    BaseHi          : 8;
+       } Bits;
+       struct {
+          UInt word1;
+          UInt word2;
+       } Words;
+    }
+    LdtEnt;
+} VexGuestX86SegDescr;
+
+
+/*---------------------------------------------------------------*/
+/*--- Utility functions for x86 guest stuff.                  ---*/
+/*---------------------------------------------------------------*/
+
+/* ALL THE FOLLOWING ARE VISIBLE TO LIBRARY CLIENT */
+
+/* Initialise all guest x86 state.  The FPU is put in default mode. */
+extern
+void LibVEX_GuestX86_initialise ( /*OUT*/VexGuestX86State* vex_state );
+
+
+/* Extract from the supplied VexGuestX86State structure the
+   corresponding native %eflags value. */
+extern 
+UInt LibVEX_GuestX86_get_eflags ( /*IN*/const VexGuestX86State* vex_state );
+
+/* Set the carry flag in the given state to 'new_carry_flag', which
+   should be zero or one. */
+extern
+void
+LibVEX_GuestX86_put_eflag_c ( UInt new_carry_flag,
+                              /*MOD*/VexGuestX86State* vex_state );
+
+#endif /* ndef __LIBVEX_PUB_GUEST_X86_H */
+
+/*---------------------------------------------------------------*/
+/*---                                      libvex_guest_x86.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_ir.h b/VEX/pub/libvex_ir.h
new file mode 100644
index 0000000..145caa4
--- /dev/null
+++ b/VEX/pub/libvex_ir.h
@@ -0,0 +1,3036 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                       libvex_ir.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_IR_H
+#define __LIBVEX_IR_H
+
+#include "libvex_basictypes.h"
+
+   
+/*---------------------------------------------------------------*/
+/*--- High-level IR description                               ---*/
+/*---------------------------------------------------------------*/
+
+/* Vex IR is an architecture-neutral intermediate representation.
+   Unlike some IRs in systems similar to Vex, it is not like assembly
+   language (ie. a list of instructions).  Rather, it is more like the
+   IR that might be used in a compiler.
+
+   Code blocks
+   ~~~~~~~~~~~
+   The code is broken into small code blocks ("superblocks", type:
+   'IRSB').  Each code block typically represents from 1 to perhaps 50
+   instructions.  IRSBs are single-entry, multiple-exit code blocks.
+   Each IRSB contains three things:
+   - a type environment, which indicates the type of each temporary
+     value present in the IRSB
+   - a list of statements, which represent code
+   - a jump that exits from the end the IRSB
+   Because the blocks are multiple-exit, there can be additional
+   conditional exit statements that cause control to leave the IRSB
+   before the final exit.  Also because of this, IRSBs can cover
+   multiple non-consecutive sequences of code (up to 3).  These are
+   recorded in the type VexGuestExtents (see libvex.h).
+
+   Statements and expressions
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~
+   Statements (type 'IRStmt') represent operations with side-effects,
+   eg.  guest register writes, stores, and assignments to temporaries.
+   Expressions (type 'IRExpr') represent operations without
+   side-effects, eg. arithmetic operations, loads, constants.
+   Expressions can contain sub-expressions, forming expression trees,
+   eg. (3 + (4 * load(addr1)).
+
+   Storage of guest state
+   ~~~~~~~~~~~~~~~~~~~~~~
+   The "guest state" contains the guest registers of the guest machine
+   (ie.  the machine that we are simulating).  It is stored by default
+   in a block of memory supplied by the user of the VEX library,
+   generally referred to as the guest state (area).  To operate on
+   these registers, one must first read ("Get") them from the guest
+   state into a temporary value.  Afterwards, one can write ("Put")
+   them back into the guest state.
+
+   Get and Put are characterised by a byte offset into the guest
+   state, a small integer which effectively gives the identity of the
+   referenced guest register, and a type, which indicates the size of
+   the value to be transferred.
+
+   The basic "Get" and "Put" operations are sufficient to model normal
+   fixed registers on the guest.  Selected areas of the guest state
+   can be treated as a circular array of registers (type:
+   'IRRegArray'), which can be indexed at run-time.  This is done with
+   the "GetI" and "PutI" primitives.  This is necessary to describe
+   rotating register files, for example the x87 FPU stack, SPARC
+   register windows, and the Itanium register files.
+
+   Examples, and flattened vs. unflattened code
+   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+   For example, consider this x86 instruction:
+     
+     addl %eax, %ebx
+
+   One Vex IR translation for this code would be this:
+
+     ------ IMark(0x24F275, 7, 0) ------
+     t3 = GET:I32(0)             # get %eax, a 32-bit integer
+     t2 = GET:I32(12)            # get %ebx, a 32-bit integer
+     t1 = Add32(t3,t2)           # addl
+     PUT(0) = t1                 # put %eax
+
+   (For simplicity, this ignores the effects on the condition codes, and
+   the update of the instruction pointer.)
+
+   The "IMark" is an IR statement that doesn't represent actual code.
+   Instead it indicates the address and length of the original
+   instruction.  The numbers 0 and 12 are offsets into the guest state
+   for %eax and %ebx.  The full list of offsets for an architecture
+   <ARCH> can be found in the type VexGuest<ARCH>State in the file
+   VEX/pub/libvex_guest_<ARCH>.h.
+
+   The five statements in this example are:
+   - the IMark
+   - three assignments to temporaries
+   - one register write (put)
+
+   The six expressions in this example are:
+   - two register reads (gets)
+   - one arithmetic (add) operation
+   - three temporaries (two nested within the Add32, one in the PUT)
+
+   The above IR is "flattened", ie. all sub-expressions are "atoms",
+   either constants or temporaries.  An equivalent, unflattened version
+   would be:
+   
+     PUT(0) = Add32(GET:I32(0), GET:I32(12))
+
+   IR is guaranteed to be flattened at instrumentation-time.  This makes
+   instrumentation easier.  Equivalent flattened and unflattened IR
+   typically results in the same generated code.
+
+   Another example, this one showing loads and stores:
+
+     addl %edx,4(%eax)
+
+   This becomes (again ignoring condition code and instruction pointer
+   updates):
+
+     ------ IMark(0x4000ABA, 3, 0) ------
+     t3 = Add32(GET:I32(0),0x4:I32)
+     t2 = LDle:I32(t3)
+     t1 = GET:I32(8)
+     t0 = Add32(t2,t1)
+     STle(t3) = t0
+
+   The "le" in "LDle" and "STle" is short for "little-endian".
+
+   No need for deallocations
+   ~~~~~~~~~~~~~~~~~~~~~~~~~
+   Although there are allocation functions for various data structures
+   in this file, there are no deallocation functions.  This is because
+   Vex uses a memory allocation scheme that automatically reclaims the
+   memory used by allocated structures once translation is completed.
+   This makes things easier for tools that instruments/transforms code
+   blocks.
+
+   SSAness and typing
+   ~~~~~~~~~~~~~~~~~~
+   The IR is fully typed.  For every IRSB (IR block) it is possible to
+   say unambiguously whether or not it is correctly typed.
+   Incorrectly typed IR has no meaning and the VEX will refuse to
+   process it.  At various points during processing VEX typechecks the
+   IR and aborts if any violations are found.  This seems overkill but
+   makes it a great deal easier to build a reliable JIT.
+
+   IR also has the SSA property.  SSA stands for Static Single
+   Assignment, and what it means is that each IR temporary may be
+   assigned to only once.  This idea became widely used in compiler
+   construction in the mid to late 90s.  It makes many IR-level
+   transformations/code improvements easier, simpler and faster.
+   Whenever it typechecks an IR block, VEX also checks the SSA
+   property holds, and will abort if not so.  So SSAness is
+   mechanically and rigidly enforced.
+*/
+
+/*---------------------------------------------------------------*/
+/*--- Type definitions for the IR                             ---*/
+/*---------------------------------------------------------------*/
+
+/* General comments about naming schemes:
+
+   All publically visible functions contain the name of the primary
+   type on which they operate (IRFoo, IRBar, etc).  Hence you should
+   be able to identify these functions by grepping for "IR[A-Z]".
+
+   For some type 'IRFoo':
+
+   - ppIRFoo is the printing method for IRFoo, printing it to the
+     output channel specified in the LibVEX_Initialise call.
+
+   - eqIRFoo is a structural equality predicate for IRFoos.
+
+   - deepCopyIRFoo is a deep copy constructor for IRFoos. 
+     It recursively traverses the entire argument tree and
+     produces a complete new tree.  All types have a deep copy
+     constructor.
+
+   - shallowCopyIRFoo is the shallow copy constructor for IRFoos.
+     It creates a new top-level copy of the supplied object,
+     but does not copy any sub-objects.  Only some types have a
+     shallow copy constructor.
+*/
+
+/* ------------------ Types ------------------ */
+
+/* A type indicates the size of a value, and whether it's an integer, a
+   float, or a vector (SIMD) value. */
+typedef 
+   enum { 
+      Ity_INVALID=0x1100,
+      Ity_I1, 
+      Ity_I8, 
+      Ity_I16, 
+      Ity_I32, 
+      Ity_I64,
+      Ity_I128,  /* 128-bit scalar */
+      Ity_F16,   /* 16 bit float */
+      Ity_F32,   /* IEEE 754 float */
+      Ity_F64,   /* IEEE 754 double */
+      Ity_D32,   /* 32-bit Decimal floating point */
+      Ity_D64,   /* 64-bit Decimal floating point */
+      Ity_D128,  /* 128-bit Decimal floating point */
+      Ity_F128,  /* 128-bit floating point; implementation defined */
+      Ity_V128,  /* 128-bit SIMD */
+      Ity_V256   /* 256-bit SIMD */
+   }
+   IRType;
+
+/* Pretty-print an IRType */
+extern void ppIRType ( IRType );
+
+/* Get the size (in bytes) of an IRType */ 
+extern Int sizeofIRType ( IRType );
+
+/* Translate 1/2/4/8 into Ity_I{8,16,32,64} respectively.  Asserts on
+   any other input. */
+extern IRType integerIRTypeOfSize ( Int szB );
+
+
+/* ------------------ Endianness ------------------ */
+
+/* IREndness is used in load IRExprs and store IRStmts. */
+typedef
+   enum { 
+      Iend_LE=0x1200, /* little endian */
+      Iend_BE          /* big endian */
+   }
+   IREndness;
+
+
+/* ------------------ Constants ------------------ */
+
+/* IRConsts are used within 'Const' and 'Exit' IRExprs. */
+
+/* The various kinds of constant. */
+typedef
+   enum { 
+      Ico_U1=0x1300,
+      Ico_U8, 
+      Ico_U16, 
+      Ico_U32, 
+      Ico_U64,
+      Ico_F32,   /* 32-bit IEEE754 floating */
+      Ico_F32i,  /* 32-bit unsigned int to be interpreted literally
+                    as a IEEE754 single value. */
+      Ico_F64,   /* 64-bit IEEE754 floating */
+      Ico_F64i,  /* 64-bit unsigned int to be interpreted literally
+                    as a IEEE754 double value. */
+      Ico_V128,  /* 128-bit restricted vector constant, with 1 bit
+                    (repeated 8 times) for each of the 16 x 1-byte lanes */
+      Ico_V256   /* 256-bit restricted vector constant, with 1 bit
+                    (repeated 8 times) for each of the 32 x 1-byte lanes */
+   }
+   IRConstTag;
+
+/* A constant.  Stored as a tagged union.  'tag' indicates what kind of
+   constant this is.  'Ico' is the union that holds the fields.  If an
+   IRConst 'c' has c.tag equal to Ico_U32, then it's a 32-bit constant,
+   and its value can be accessed with 'c.Ico.U32'. */
+typedef
+   struct _IRConst {
+      IRConstTag tag;
+      union {
+         Bool   U1;
+         UChar  U8;
+         UShort U16;
+         UInt   U32;
+         ULong  U64;
+         Float  F32;
+         UInt   F32i;
+         Double F64;
+         ULong  F64i;
+         UShort V128;   /* 16-bit value; see Ico_V128 comment above */
+         UInt   V256;   /* 32-bit value; see Ico_V256 comment above */
+      } Ico;
+   }
+   IRConst;
+
+/* IRConst constructors */
+extern IRConst* IRConst_U1   ( Bool );
+extern IRConst* IRConst_U8   ( UChar );
+extern IRConst* IRConst_U16  ( UShort );
+extern IRConst* IRConst_U32  ( UInt );
+extern IRConst* IRConst_U64  ( ULong );
+extern IRConst* IRConst_F32  ( Float );
+extern IRConst* IRConst_F32i ( UInt );
+extern IRConst* IRConst_F64  ( Double );
+extern IRConst* IRConst_F64i ( ULong );
+extern IRConst* IRConst_V128 ( UShort );
+extern IRConst* IRConst_V256 ( UInt );
+
+/* Deep-copy an IRConst */
+extern IRConst* deepCopyIRConst ( const IRConst* );
+
+/* Pretty-print an IRConst */
+extern void ppIRConst ( const IRConst* );
+
+/* Compare two IRConsts for equality */
+extern Bool eqIRConst ( const IRConst*, const IRConst* );
+
+
+/* ------------------ Call targets ------------------ */
+
+/* Describes a helper function to call.  The name part is purely for
+   pretty printing and not actually used.  regparms=n tells the back
+   end that the callee has been declared
+   "__attribute__((regparm(n)))", although indirectly using the
+   VEX_REGPARM(n) macro.  On some targets (x86) the back end will need
+   to construct a non-standard sequence to call a function declared
+   like this.
+
+   mcx_mask is a sop to Memcheck.  It indicates which args should be
+   considered 'always defined' when lazily computing definedness of
+   the result.  Bit 0 of mcx_mask corresponds to args[0], bit 1 to
+   args[1], etc.  If a bit is set, the corresponding arg is excluded
+   (hence "x" in "mcx") from definedness checking.  
+*/
+
+typedef
+   struct {
+      Int          regparms;
+      const HChar* name;
+      void*        addr;
+      UInt         mcx_mask;
+   }
+   IRCallee;
+
+/* Create an IRCallee. */
+extern IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr );
+
+/* Deep-copy an IRCallee. */
+extern IRCallee* deepCopyIRCallee ( const IRCallee* );
+
+/* Pretty-print an IRCallee. */
+extern void ppIRCallee ( const IRCallee* );
+
+
+/* ------------------ Guest state arrays ------------------ */
+
+/* This describes a section of the guest state that we want to
+   be able to index at run time, so as to be able to describe 
+   indexed or rotating register files on the guest. */
+typedef
+   struct {
+      Int    base;   /* guest state offset of start of indexed area */
+      IRType elemTy; /* type of each element in the indexed area */
+      Int    nElems; /* number of elements in the indexed area */
+   }
+   IRRegArray;
+
+extern IRRegArray* mkIRRegArray ( Int, IRType, Int );
+
+extern IRRegArray* deepCopyIRRegArray ( const IRRegArray* );
+
+extern void ppIRRegArray ( const IRRegArray* );
+extern Bool eqIRRegArray ( const IRRegArray*, const IRRegArray* );
+
+
+/* ------------------ Temporaries ------------------ */
+
+/* This represents a temporary, eg. t1.  The IR optimiser relies on the
+   fact that IRTemps are 32-bit ints.  Do not change them to be ints of
+   any other size. */
+typedef UInt IRTemp;
+
+/* Pretty-print an IRTemp. */
+extern void ppIRTemp ( IRTemp );
+
+#define IRTemp_INVALID ((IRTemp)0xFFFFFFFF)
+
+
+/* --------------- Primops (arity 1,2,3 and 4) --------------- */
+
+/* Primitive operations that are used in Unop, Binop, Triop and Qop
+   IRExprs.  Once we take into account integer, floating point and SIMD
+   operations of all the different sizes, there are quite a lot of them.
+   Most instructions supported by the architectures that Vex supports
+   (x86, PPC, etc) are represented.  Some more obscure ones (eg. cpuid)
+   are not;  they are instead handled with dirty helpers that emulate
+   their functionality.  Such obscure ones are thus not directly visible
+   in the IR, but their effects on guest state (memory and registers) 
+   are made visible via the annotations in IRDirty structures.
+*/
+typedef
+   enum { 
+      /* -- Do not change this ordering.  The IR generators rely on
+            (eg) Iop_Add64 == IopAdd8 + 3. -- */
+
+      Iop_INVALID=0x1400,
+      Iop_Add8,  Iop_Add16,  Iop_Add32,  Iop_Add64,
+      Iop_Sub8,  Iop_Sub16,  Iop_Sub32,  Iop_Sub64,
+      /* Signless mul.  MullS/MullU is elsewhere. */
+      Iop_Mul8,  Iop_Mul16,  Iop_Mul32,  Iop_Mul64,
+      Iop_Or8,   Iop_Or16,   Iop_Or32,   Iop_Or64,
+      Iop_And8,  Iop_And16,  Iop_And32,  Iop_And64,
+      Iop_Xor8,  Iop_Xor16,  Iop_Xor32,  Iop_Xor64,
+      Iop_Shl8,  Iop_Shl16,  Iop_Shl32,  Iop_Shl64,
+      Iop_Shr8,  Iop_Shr16,  Iop_Shr32,  Iop_Shr64,
+      Iop_Sar8,  Iop_Sar16,  Iop_Sar32,  Iop_Sar64,
+      /* Integer comparisons. */
+      Iop_CmpEQ8,  Iop_CmpEQ16,  Iop_CmpEQ32,  Iop_CmpEQ64,
+      Iop_CmpNE8,  Iop_CmpNE16,  Iop_CmpNE32,  Iop_CmpNE64,
+      /* Tags for unary ops */
+      Iop_Not8,  Iop_Not16,  Iop_Not32,  Iop_Not64,
+
+      /* Exactly like CmpEQ8/16/32/64, but carrying the additional
+         hint that these compute the success/failure of a CAS
+         operation, and hence are almost certainly applied to two
+         copies of the same value, which in turn has implications for
+         Memcheck's instrumentation. */
+      Iop_CasCmpEQ8, Iop_CasCmpEQ16, Iop_CasCmpEQ32, Iop_CasCmpEQ64,
+      Iop_CasCmpNE8, Iop_CasCmpNE16, Iop_CasCmpNE32, Iop_CasCmpNE64,
+
+      /* Exactly like CmpNE8/16/32/64, but carrying the additional
+         hint that these needs expensive definedness tracking. */
+      Iop_ExpCmpNE8, Iop_ExpCmpNE16, Iop_ExpCmpNE32, Iop_ExpCmpNE64,
+
+      /* -- Ordering not important after here. -- */
+
+      /* Widening multiplies */
+      Iop_MullS8, Iop_MullS16, Iop_MullS32, Iop_MullS64,
+      Iop_MullU8, Iop_MullU16, Iop_MullU32, Iop_MullU64,
+
+      /* Wierdo integer stuff */
+      Iop_Clz64, Iop_Clz32,   /* count leading zeroes */
+      Iop_Ctz64, Iop_Ctz32,   /* count trailing zeros */
+      /* Ctz64/Ctz32/Clz64/Clz32 are UNDEFINED when given arguments of
+         zero.  You must ensure they are never given a zero argument.
+      */
+
+      /* Standard integer comparisons */
+      Iop_CmpLT32S, Iop_CmpLT64S,
+      Iop_CmpLE32S, Iop_CmpLE64S,
+      Iop_CmpLT32U, Iop_CmpLT64U,
+      Iop_CmpLE32U, Iop_CmpLE64U,
+
+      /* As a sop to Valgrind-Memcheck, the following are useful. */
+      Iop_CmpNEZ8, Iop_CmpNEZ16,  Iop_CmpNEZ32,  Iop_CmpNEZ64,
+      Iop_CmpwNEZ32, Iop_CmpwNEZ64, /* all-0s -> all-Os; other -> all-1s */
+      Iop_Left8, Iop_Left16, Iop_Left32, Iop_Left64, /*  \x -> x | -x */
+      Iop_Max32U, /* unsigned max */
+
+      /* PowerPC-style 3-way integer comparisons.  Without them it is
+         difficult to simulate PPC efficiently.
+         op(x,y) | x < y  = 0x8 else 
+                 | x > y  = 0x4 else
+                 | x == y = 0x2
+      */
+      Iop_CmpORD32U, Iop_CmpORD64U,
+      Iop_CmpORD32S, Iop_CmpORD64S,
+
+      /* Division */
+      /* TODO: clarify semantics wrt rounding, negative values, whatever */
+      Iop_DivU32,   // :: I32,I32 -> I32 (simple div, no mod)
+      Iop_DivS32,   // ditto, signed
+      Iop_DivU64,   // :: I64,I64 -> I64 (simple div, no mod)
+      Iop_DivS64,   // ditto, signed
+      Iop_DivU64E,  // :: I64,I64 -> I64 (dividend is 64-bit arg (hi)
+                    //                    concat with 64 0's (low))
+      Iop_DivS64E,  // ditto, signed
+      Iop_DivU32E,  // :: I32,I32 -> I32 (dividend is 32-bit arg (hi)
+                    // concat with 32 0's (low))
+      Iop_DivS32E,  // ditto, signed
+
+      Iop_DivModU64to32, // :: I64,I32 -> I64
+                         // of which lo half is div and hi half is mod
+      Iop_DivModS64to32, // ditto, signed
+
+      Iop_DivModU128to64, // :: V128,I64 -> V128
+                          // of which lo half is div and hi half is mod
+      Iop_DivModS128to64, // ditto, signed
+
+      Iop_DivModS64to64, // :: I64,I64 -> I128
+                         // of which lo half is div and hi half is mod
+
+      /* Integer conversions.  Some of these are redundant (eg
+         Iop_64to8 is the same as Iop_64to32 and then Iop_32to8), but
+         having a complete set reduces the typical dynamic size of IR
+         and makes the instruction selectors easier to write. */
+
+      /* Widening conversions */
+      Iop_8Uto16, Iop_8Uto32,  Iop_8Uto64,
+                  Iop_16Uto32, Iop_16Uto64,
+                               Iop_32Uto64,
+      Iop_8Sto16, Iop_8Sto32,  Iop_8Sto64,
+                  Iop_16Sto32, Iop_16Sto64,
+                               Iop_32Sto64,
+
+      /* Narrowing conversions */
+      Iop_64to8, Iop_32to8, Iop_64to16,
+      /* 8 <-> 16 bit conversions */
+      Iop_16to8,      // :: I16 -> I8, low half
+      Iop_16HIto8,    // :: I16 -> I8, high half
+      Iop_8HLto16,    // :: (I8,I8) -> I16
+      /* 16 <-> 32 bit conversions */
+      Iop_32to16,     // :: I32 -> I16, low half
+      Iop_32HIto16,   // :: I32 -> I16, high half
+      Iop_16HLto32,   // :: (I16,I16) -> I32
+      /* 32 <-> 64 bit conversions */
+      Iop_64to32,     // :: I64 -> I32, low half
+      Iop_64HIto32,   // :: I64 -> I32, high half
+      Iop_32HLto64,   // :: (I32,I32) -> I64
+      /* 64 <-> 128 bit conversions */
+      Iop_128to64,    // :: I128 -> I64, low half
+      Iop_128HIto64,  // :: I128 -> I64, high half
+      Iop_64HLto128,  // :: (I64,I64) -> I128
+      /* 1-bit stuff */
+      Iop_Not1,   /* :: Ity_Bit -> Ity_Bit */
+      Iop_32to1,  /* :: Ity_I32 -> Ity_Bit, just select bit[0] */
+      Iop_64to1,  /* :: Ity_I64 -> Ity_Bit, just select bit[0] */
+      Iop_1Uto8,  /* :: Ity_Bit -> Ity_I8,  unsigned widen */
+      Iop_1Uto32, /* :: Ity_Bit -> Ity_I32, unsigned widen */
+      Iop_1Uto64, /* :: Ity_Bit -> Ity_I64, unsigned widen */
+      Iop_1Sto8,  /* :: Ity_Bit -> Ity_I8,  signed widen */
+      Iop_1Sto16, /* :: Ity_Bit -> Ity_I16, signed widen */
+      Iop_1Sto32, /* :: Ity_Bit -> Ity_I32, signed widen */
+      Iop_1Sto64, /* :: Ity_Bit -> Ity_I64, signed widen */
+
+      /* ------ Floating point.  We try to be IEEE754 compliant. ------ */
+
+      /* --- Simple stuff as mandated by 754. --- */
+
+      /* Binary operations, with rounding. */
+      /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 
+      Iop_AddF64, Iop_SubF64, Iop_MulF64, Iop_DivF64,
+
+      /* :: IRRoundingMode(I32) x F32 x F32 -> F32 */ 
+      Iop_AddF32, Iop_SubF32, Iop_MulF32, Iop_DivF32,
+
+      /* Variants of the above which produce a 64-bit result but which
+         round their result to a IEEE float range first. */
+      /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 
+      Iop_AddF64r32, Iop_SubF64r32, Iop_MulF64r32, Iop_DivF64r32, 
+
+      /* Unary operations, without rounding. */
+      /* :: F64 -> F64 */
+      Iop_NegF64, Iop_AbsF64,
+
+      /* :: F32 -> F32 */
+      Iop_NegF32, Iop_AbsF32,
+
+      /* Unary operations, with rounding. */
+      /* :: IRRoundingMode(I32) x F64 -> F64 */
+      Iop_SqrtF64,
+
+      /* :: IRRoundingMode(I32) x F32 -> F32 */
+      Iop_SqrtF32,
+
+      /* Comparison, yielding GT/LT/EQ/UN(ordered), as per the following:
+            0x45 Unordered
+            0x01 LT
+            0x00 GT
+            0x40 EQ
+         This just happens to be the Intel encoding.  The values
+         are recorded in the type IRCmpF64Result.
+      */
+      /* :: F64 x F64 -> IRCmpF64Result(I32) */
+      Iop_CmpF64,
+      Iop_CmpF32,
+      Iop_CmpF128,
+
+      /* --- Int to/from FP conversions. --- */
+
+      /* For the most part, these take a first argument :: Ity_I32 (as
+         IRRoundingMode) which is an indication of the rounding mode
+         to use, as per the following encoding ("the standard
+         encoding"):
+            00b  to nearest (the default)
+            01b  to -infinity
+            10b  to +infinity
+            11b  to zero
+         This just happens to be the Intel encoding.  For reference only,
+         the PPC encoding is:
+            00b  to nearest (the default)
+            01b  to zero
+            10b  to +infinity
+            11b  to -infinity
+         Any PPC -> IR front end will have to translate these PPC
+         encodings, as encoded in the guest state, to the standard
+         encodings, to pass to the primops.
+         For reference only, the ARM VFP encoding is:
+            00b  to nearest
+            01b  to +infinity
+            10b  to -infinity
+            11b  to zero
+         Again, this will have to be converted to the standard encoding
+         to pass to primops.
+
+         If one of these conversions gets an out-of-range condition,
+         or a NaN, as an argument, the result is host-defined.  On x86
+         the "integer indefinite" value 0x80..00 is produced.  On PPC
+         it is either 0x80..00 or 0x7F..FF depending on the sign of
+         the argument.
+
+         On ARMvfp, when converting to a signed integer result, the
+         overflow result is 0x80..00 for negative args and 0x7F..FF
+         for positive args.  For unsigned integer results it is
+         0x00..00 and 0xFF..FF respectively.
+
+         Rounding is required whenever the destination type cannot
+         represent exactly all values of the source type.
+      */
+      Iop_F64toI16S, /* IRRoundingMode(I32) x F64 -> signed I16 */
+      Iop_F64toI32S, /* IRRoundingMode(I32) x F64 -> signed I32 */
+      Iop_F64toI64S, /* IRRoundingMode(I32) x F64 -> signed I64 */
+      Iop_F64toI64U, /* IRRoundingMode(I32) x F64 -> unsigned I64 */
+
+      Iop_F64toI32U, /* IRRoundingMode(I32) x F64 -> unsigned I32 */
+
+      Iop_I32StoF64, /*                       signed I32 -> F64 */
+      Iop_I64StoF64, /* IRRoundingMode(I32) x signed I64 -> F64 */
+      Iop_I64UtoF64, /* IRRoundingMode(I32) x unsigned I64 -> F64 */
+      Iop_I64UtoF32, /* IRRoundingMode(I32) x unsigned I64 -> F32 */
+
+      Iop_I32UtoF32, /* IRRoundingMode(I32) x unsigned I32 -> F32 */
+      Iop_I32UtoF64, /*                       unsigned I32 -> F64 */
+
+      Iop_F32toI32S, /* IRRoundingMode(I32) x F32 -> signed I32 */
+      Iop_F32toI64S, /* IRRoundingMode(I32) x F32 -> signed I64 */
+      Iop_F32toI32U, /* IRRoundingMode(I32) x F32 -> unsigned I32 */
+      Iop_F32toI64U, /* IRRoundingMode(I32) x F32 -> unsigned I64 */
+
+      Iop_I32StoF32, /* IRRoundingMode(I32) x signed I32 -> F32 */
+      Iop_I64StoF32, /* IRRoundingMode(I32) x signed I64 -> F32 */
+
+      /* Conversion between floating point formats */
+      Iop_F32toF64,  /*                       F32 -> F64 */
+      Iop_F64toF32,  /* IRRoundingMode(I32) x F64 -> F32 */
+
+      /* Reinterpretation.  Take an F64 and produce an I64 with 
+         the same bit pattern, or vice versa. */
+      Iop_ReinterpF64asI64, Iop_ReinterpI64asF64,
+      Iop_ReinterpF32asI32, Iop_ReinterpI32asF32,
+
+      /* Support for 128-bit floating point */
+      Iop_F64HLtoF128,/* (high half of F128,low half of F128) -> F128 */
+      Iop_F128HItoF64,/* F128 -> high half of F128 into a F64 register */
+      Iop_F128LOtoF64,/* F128 -> low  half of F128 into a F64 register */
+
+      /* :: IRRoundingMode(I32) x F128 x F128 -> F128 */
+      Iop_AddF128, Iop_SubF128, Iop_MulF128, Iop_DivF128,
+
+      /* :: F128 -> F128 */
+      Iop_NegF128, Iop_AbsF128,
+
+      /* :: IRRoundingMode(I32) x F128 -> F128 */
+      Iop_SqrtF128,
+
+      Iop_I32StoF128, /*                signed I32  -> F128 */
+      Iop_I64StoF128, /*                signed I64  -> F128 */
+      Iop_I32UtoF128, /*              unsigned I32  -> F128 */
+      Iop_I64UtoF128, /*              unsigned I64  -> F128 */
+      Iop_F32toF128,  /*                       F32  -> F128 */
+      Iop_F64toF128,  /*                       F64  -> F128 */
+
+      Iop_F128toI32S, /* IRRoundingMode(I32) x F128 -> signed I32  */
+      Iop_F128toI64S, /* IRRoundingMode(I32) x F128 -> signed I64  */
+      Iop_F128toI32U, /* IRRoundingMode(I32) x F128 -> unsigned I32  */
+      Iop_F128toI64U, /* IRRoundingMode(I32) x F128 -> unsigned I64  */
+      Iop_F128toF64,  /* IRRoundingMode(I32) x F128 -> F64         */
+      Iop_F128toF32,  /* IRRoundingMode(I32) x F128 -> F32         */
+
+      /* --- guest x86/amd64 specifics, not mandated by 754. --- */
+
+      /* Binary ops, with rounding. */
+      /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 
+      Iop_AtanF64,       /* FPATAN,  arctan(arg1/arg2)       */
+      Iop_Yl2xF64,       /* FYL2X,   arg1 * log2(arg2)       */
+      Iop_Yl2xp1F64,     /* FYL2XP1, arg1 * log2(arg2+1.0)   */
+      Iop_PRemF64,       /* FPREM,   non-IEEE remainder(arg1/arg2)    */
+      Iop_PRemC3210F64,  /* C3210 flags resulting from FPREM, :: I32 */
+      Iop_PRem1F64,      /* FPREM1,  IEEE remainder(arg1/arg2)    */
+      Iop_PRem1C3210F64, /* C3210 flags resulting from FPREM1, :: I32 */
+      Iop_ScaleF64,      /* FSCALE,  arg1 * (2^RoundTowardsZero(arg2)) */
+      /* Note that on x86 guest, PRem1{C3210} has the same behaviour
+         as the IEEE mandated RemF64, except it is limited in the
+         range of its operand.  Hence the partialness. */
+
+      /* Unary ops, with rounding. */
+      /* :: IRRoundingMode(I32) x F64 -> F64 */
+      Iop_SinF64,    /* FSIN */
+      Iop_CosF64,    /* FCOS */
+      Iop_TanF64,    /* FTAN */
+      Iop_2xm1F64,   /* (2^arg - 1.0) */
+      Iop_RoundF64toInt, /* F64 value to nearest integral value (still
+                            as F64) */
+      Iop_RoundF32toInt, /* F32 value to nearest integral value (still
+                            as F32) */
+
+      /* --- guest s390 specifics, not mandated by 754. --- */
+
+      /* Fused multiply-add/sub */
+      /* :: IRRoundingMode(I32) x F32 x F32 x F32 -> F32
+            (computes arg2 * arg3 +/- arg4) */ 
+      Iop_MAddF32, Iop_MSubF32,
+
+      /* --- guest ppc32/64 specifics, not mandated by 754. --- */
+
+      /* Ternary operations, with rounding. */
+      /* Fused multiply-add/sub, with 112-bit intermediate
+         precision for ppc.
+         Also used to implement fused multiply-add/sub for s390. */
+      /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 
+            (computes arg2 * arg3 +/- arg4) */ 
+      Iop_MAddF64, Iop_MSubF64,
+
+      /* Variants of the above which produce a 64-bit result but which
+         round their result to a IEEE float range first. */
+      /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 */ 
+      Iop_MAddF64r32, Iop_MSubF64r32,
+
+      /* :: F64 -> F64 */
+      Iop_RSqrtEst5GoodF64, /* reciprocal square root estimate, 5 good bits */
+      Iop_RoundF64toF64_NEAREST, /* frin */
+      Iop_RoundF64toF64_NegINF,  /* frim */ 
+      Iop_RoundF64toF64_PosINF,  /* frip */
+      Iop_RoundF64toF64_ZERO,    /* friz */
+
+      /* :: F64 -> F32 */
+      Iop_TruncF64asF32, /* do F64->F32 truncation as per 'fsts' */
+
+      /* :: IRRoundingMode(I32) x F64 -> F64 */
+      Iop_RoundF64toF32, /* round F64 to nearest F32 value (still as F64) */
+      /* NB: pretty much the same as Iop_F64toF32, except no change 
+         of type. */
+
+      /* --- guest arm64 specifics, not mandated by 754. --- */
+
+      Iop_RecpExpF64,  /* FRECPX d  :: IRRoundingMode(I32) x F64 -> F64 */
+      Iop_RecpExpF32,  /* FRECPX s  :: IRRoundingMode(I32) x F32 -> F32 */
+
+      /* ------------------ 16-bit scalar FP ------------------ */
+
+      Iop_F16toF64,  /*                       F16 -> F64 */
+      Iop_F64toF16,  /* IRRoundingMode(I32) x F64 -> F16 */
+
+      Iop_F16toF32,  /*                       F16 -> F32 */
+      Iop_F32toF16,  /* IRRoundingMode(I32) x F32 -> F16 */
+
+      /* ------------------ 32-bit SIMD Integer ------------------ */
+
+      /* 32x1 saturating add/sub (ok, well, not really SIMD :) */
+      Iop_QAdd32S,
+      Iop_QSub32S,
+
+      /* 16x2 add/sub, also signed/unsigned saturating variants */
+      Iop_Add16x2, Iop_Sub16x2,
+      Iop_QAdd16Sx2, Iop_QAdd16Ux2,
+      Iop_QSub16Sx2, Iop_QSub16Ux2,
+
+      /* 16x2 signed/unsigned halving add/sub.  For each lane, these
+         compute bits 16:1 of (eg) sx(argL) + sx(argR),
+         or zx(argL) - zx(argR) etc. */
+      Iop_HAdd16Ux2, Iop_HAdd16Sx2,
+      Iop_HSub16Ux2, Iop_HSub16Sx2,
+
+      /* 8x4 add/sub, also signed/unsigned saturating variants */
+      Iop_Add8x4, Iop_Sub8x4,
+      Iop_QAdd8Sx4, Iop_QAdd8Ux4,
+      Iop_QSub8Sx4, Iop_QSub8Ux4,
+
+      /* 8x4 signed/unsigned halving add/sub.  For each lane, these
+         compute bits 8:1 of (eg) sx(argL) + sx(argR),
+         or zx(argL) - zx(argR) etc. */
+      Iop_HAdd8Ux4, Iop_HAdd8Sx4,
+      Iop_HSub8Ux4, Iop_HSub8Sx4,
+
+      /* 8x4 sum of absolute unsigned differences. */
+      Iop_Sad8Ux4,
+
+      /* MISC (vector integer cmp != 0) */
+      Iop_CmpNEZ16x2, Iop_CmpNEZ8x4,
+
+      /* ------------------ 64-bit SIMD FP ------------------------ */
+
+      /* Convertion to/from int */
+      Iop_I32UtoFx2,  Iop_I32StoFx2,    /* I32x4 -> F32x4 */
+      Iop_FtoI32Ux2_RZ,  Iop_FtoI32Sx2_RZ,    /* F32x4 -> I32x4 */
+      /* Fixed32 format is floating-point number with fixed number of fraction
+         bits. The number of fraction bits is passed as a second argument of
+         type I8. */
+      Iop_F32ToFixed32Ux2_RZ, Iop_F32ToFixed32Sx2_RZ, /* fp -> fixed-point */
+      Iop_Fixed32UToF32x2_RN, Iop_Fixed32SToF32x2_RN, /* fixed-point -> fp */
+
+      /* Binary operations */
+      Iop_Max32Fx2,      Iop_Min32Fx2,
+      /* Pairwise Min and Max. See integer pairwise operations for more
+         details. */
+      Iop_PwMax32Fx2,    Iop_PwMin32Fx2,
+      /* Note: For the following compares, the arm front-end assumes a
+         nan in a lane of either argument returns zero for that lane. */
+      Iop_CmpEQ32Fx2, Iop_CmpGT32Fx2, Iop_CmpGE32Fx2,
+
+      /* Vector Reciprocal Estimate finds an approximate reciprocal of each
+      element in the operand vector, and places the results in the destination
+      vector.  */
+      Iop_RecipEst32Fx2,
+
+      /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
+         Note, that if one of the arguments is zero and another one is infinity
+         of arbitrary sign the result of the operation is 2.0. */
+      Iop_RecipStep32Fx2,
+
+      /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
+         square root of each element in the operand vector. */
+      Iop_RSqrtEst32Fx2,
+
+      /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
+         Note, that of one of the arguments is zero and another one is infiinty
+         of arbitrary sign the result of the operation is 1.5. */
+      Iop_RSqrtStep32Fx2,
+
+      /* Unary */
+      Iop_Neg32Fx2, Iop_Abs32Fx2,
+
+      /* ------------------ 64-bit SIMD Integer. ------------------ */
+
+      /* MISC (vector integer cmp != 0) */
+      Iop_CmpNEZ8x8, Iop_CmpNEZ16x4, Iop_CmpNEZ32x2,
+
+      /* ADDITION (normal / unsigned sat / signed sat) */
+      Iop_Add8x8,   Iop_Add16x4,   Iop_Add32x2,
+      Iop_QAdd8Ux8, Iop_QAdd16Ux4, Iop_QAdd32Ux2, Iop_QAdd64Ux1,
+      Iop_QAdd8Sx8, Iop_QAdd16Sx4, Iop_QAdd32Sx2, Iop_QAdd64Sx1,
+
+      /* PAIRWISE operations */
+      /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
+            [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
+      Iop_PwAdd8x8,  Iop_PwAdd16x4,  Iop_PwAdd32x2,
+      Iop_PwMax8Sx8, Iop_PwMax16Sx4, Iop_PwMax32Sx2,
+      Iop_PwMax8Ux8, Iop_PwMax16Ux4, Iop_PwMax32Ux2,
+      Iop_PwMin8Sx8, Iop_PwMin16Sx4, Iop_PwMin32Sx2,
+      Iop_PwMin8Ux8, Iop_PwMin16Ux4, Iop_PwMin32Ux2,
+      /* Longening variant is unary. The resulting vector contains two times
+         less elements than operand, but they are two times wider.
+         Example:
+            Iop_PAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
+               where a+b and c+d are unsigned 32-bit values. */
+      Iop_PwAddL8Ux8, Iop_PwAddL16Ux4, Iop_PwAddL32Ux2,
+      Iop_PwAddL8Sx8, Iop_PwAddL16Sx4, Iop_PwAddL32Sx2,
+
+      /* SUBTRACTION (normal / unsigned sat / signed sat) */
+      Iop_Sub8x8,   Iop_Sub16x4,   Iop_Sub32x2,
+      Iop_QSub8Ux8, Iop_QSub16Ux4, Iop_QSub32Ux2, Iop_QSub64Ux1,
+      Iop_QSub8Sx8, Iop_QSub16Sx4, Iop_QSub32Sx2, Iop_QSub64Sx1,
+
+      /* ABSOLUTE VALUE */
+      Iop_Abs8x8, Iop_Abs16x4, Iop_Abs32x2,
+
+      /* MULTIPLICATION (normal / high half of signed/unsigned / plynomial ) */
+      Iop_Mul8x8, Iop_Mul16x4, Iop_Mul32x2,
+      Iop_Mul32Fx2,
+      Iop_MulHi16Ux4,
+      Iop_MulHi16Sx4,
+      /* Plynomial multiplication treats it's arguments as coefficients of
+         polynoms over {0, 1}. */
+      Iop_PolynomialMul8x8,
+
+      /* Vector Saturating Doubling Multiply Returning High Half and
+         Vector Saturating Rounding Doubling Multiply Returning High Half */
+      /* These IROp's multiply corresponding elements in two vectors, double
+         the results, and place the most significant half of the final results
+         in the destination vector. The results are truncated or rounded. If
+         any of the results overflow, they are saturated. */
+      Iop_QDMulHi16Sx4, Iop_QDMulHi32Sx2,
+      Iop_QRDMulHi16Sx4, Iop_QRDMulHi32Sx2,
+
+      /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
+      Iop_Avg8Ux8,
+      Iop_Avg16Ux4,
+
+      /* MIN/MAX */
+      Iop_Max8Sx8, Iop_Max16Sx4, Iop_Max32Sx2,
+      Iop_Max8Ux8, Iop_Max16Ux4, Iop_Max32Ux2,
+      Iop_Min8Sx8, Iop_Min16Sx4, Iop_Min32Sx2,
+      Iop_Min8Ux8, Iop_Min16Ux4, Iop_Min32Ux2,
+
+      /* COMPARISON */
+      Iop_CmpEQ8x8,  Iop_CmpEQ16x4,  Iop_CmpEQ32x2,
+      Iop_CmpGT8Ux8, Iop_CmpGT16Ux4, Iop_CmpGT32Ux2,
+      Iop_CmpGT8Sx8, Iop_CmpGT16Sx4, Iop_CmpGT32Sx2,
+
+      /* COUNT ones / leading zeroes / leading sign bits (not including topmost
+         bit) */
+      Iop_Cnt8x8,
+      Iop_Clz8x8, Iop_Clz16x4, Iop_Clz32x2,
+      Iop_Cls8x8, Iop_Cls16x4, Iop_Cls32x2,
+      Iop_Clz64x2,
+
+      /* VECTOR x VECTOR SHIFT / ROTATE */
+      Iop_Shl8x8, Iop_Shl16x4, Iop_Shl32x2,
+      Iop_Shr8x8, Iop_Shr16x4, Iop_Shr32x2,
+      Iop_Sar8x8, Iop_Sar16x4, Iop_Sar32x2,
+      Iop_Sal8x8, Iop_Sal16x4, Iop_Sal32x2, Iop_Sal64x1,
+
+      /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
+      Iop_ShlN8x8, Iop_ShlN16x4, Iop_ShlN32x2,
+      Iop_ShrN8x8, Iop_ShrN16x4, Iop_ShrN32x2,
+      Iop_SarN8x8, Iop_SarN16x4, Iop_SarN32x2,
+
+      /* VECTOR x VECTOR SATURATING SHIFT */
+      Iop_QShl8x8, Iop_QShl16x4, Iop_QShl32x2, Iop_QShl64x1,
+      Iop_QSal8x8, Iop_QSal16x4, Iop_QSal32x2, Iop_QSal64x1,
+      /* VECTOR x INTEGER SATURATING SHIFT */
+      Iop_QShlNsatSU8x8,  Iop_QShlNsatSU16x4,
+      Iop_QShlNsatSU32x2, Iop_QShlNsatSU64x1,
+      Iop_QShlNsatUU8x8,  Iop_QShlNsatUU16x4,
+      Iop_QShlNsatUU32x2, Iop_QShlNsatUU64x1,
+      Iop_QShlNsatSS8x8,  Iop_QShlNsatSS16x4,
+      Iop_QShlNsatSS32x2, Iop_QShlNsatSS64x1,
+
+      /* NARROWING (binary) 
+         -- narrow 2xI64 into 1xI64, hi half from left arg */
+      /* For saturated narrowing, I believe there are 4 variants of
+         the basic arithmetic operation, depending on the signedness
+         of argument and result.  Here are examples that exemplify
+         what I mean:
+
+         QNarrow16Uto8U ( UShort x )  if (x >u 255) x = 255;
+                                      return x[7:0];
+
+         QNarrow16Sto8S ( Short x )   if (x <s -128) x = -128;
+                                      if (x >s  127) x = 127;
+                                      return x[7:0];
+
+         QNarrow16Uto8S ( UShort x )  if (x >u 127) x = 127;
+                                      return x[7:0];
+
+         QNarrow16Sto8U ( Short x )   if (x <s 0)   x = 0;
+                                      if (x >s 255) x = 255;
+                                      return x[7:0];
+      */
+      Iop_QNarrowBin16Sto8Ux8,
+      Iop_QNarrowBin16Sto8Sx8, Iop_QNarrowBin32Sto16Sx4,
+      Iop_NarrowBin16to8x8,    Iop_NarrowBin32to16x4,
+
+      /* INTERLEAVING */
+      /* Interleave lanes from low or high halves of
+         operands.  Most-significant result lane is from the left
+         arg. */
+      Iop_InterleaveHI8x8, Iop_InterleaveHI16x4, Iop_InterleaveHI32x2,
+      Iop_InterleaveLO8x8, Iop_InterleaveLO16x4, Iop_InterleaveLO32x2,
+      /* Interleave odd/even lanes of operands.  Most-significant result lane
+         is from the left arg.  Note that Interleave{Odd,Even}Lanes32x2 are
+         identical to Interleave{HI,LO}32x2 and so are omitted.*/
+      Iop_InterleaveOddLanes8x8, Iop_InterleaveEvenLanes8x8,
+      Iop_InterleaveOddLanes16x4, Iop_InterleaveEvenLanes16x4,
+
+      /* CONCATENATION -- build a new value by concatenating either
+         the even or odd lanes of both operands.  Note that
+         Cat{Odd,Even}Lanes32x2 are identical to Interleave{HI,LO}32x2
+         and so are omitted. */
+      Iop_CatOddLanes8x8, Iop_CatOddLanes16x4,
+      Iop_CatEvenLanes8x8, Iop_CatEvenLanes16x4,
+
+      /* GET / SET elements of VECTOR
+         GET is binop (I64, I8) -> I<elem_size>
+         SET is triop (I64, I8, I<elem_size>) -> I64 */
+      /* Note: the arm back-end handles only constant second argument */
+      Iop_GetElem8x8, Iop_GetElem16x4, Iop_GetElem32x2,
+      Iop_SetElem8x8, Iop_SetElem16x4, Iop_SetElem32x2,
+
+      /* DUPLICATING -- copy value to all lanes */
+      Iop_Dup8x8,   Iop_Dup16x4,   Iop_Dup32x2,
+
+      /* SLICE -- produces the lowest 64 bits of (arg1:arg2) >> (8 * arg3).
+         arg3 is a shift amount in bytes and may be between 0 and 8
+         inclusive.  When 0, the result is arg2; when 8, the result is arg1.
+         Not all back ends handle all values.  The arm32 and arm64 back
+         ends handle only immediate arg3 values. */
+      Iop_Slice64,  // (I64, I64, I8) -> I64
+
+      /* REVERSE the order of chunks in vector lanes.  Chunks must be
+         smaller than the vector lanes (obviously) and so may be 8-,
+         16- and 32-bit in size. */
+      /* Examples:
+            Reverse8sIn16_x4([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g]
+            Reverse8sIn32_x2([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e]
+            Reverse8sIn64_x1([a,b,c,d,e,f,g,h]) = [h,g,f,e,d,c,b,a] */
+      Iop_Reverse8sIn16_x4,
+      Iop_Reverse8sIn32_x2, Iop_Reverse16sIn32_x2,
+      Iop_Reverse8sIn64_x1, Iop_Reverse16sIn64_x1, Iop_Reverse32sIn64_x1,
+
+      /* PERMUTING -- copy src bytes to dst,
+         as indexed by control vector bytes:
+            for i in 0 .. 7 . result[i] = argL[ argR[i] ] 
+         argR[i] values may only be in the range 0 .. 7, else behaviour
+         is undefined. */
+      Iop_Perm8x8,
+
+      /* MISC CONVERSION -- get high bits of each byte lane, a la
+         x86/amd64 pmovmskb */
+      Iop_GetMSBs8x8, /* I64 -> I8 */
+
+      /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
+         See floating-point equivalents for details. */
+      Iop_RecipEst32Ux2, Iop_RSqrtEst32Ux2,
+
+      /* ------------------ Decimal Floating Point ------------------ */
+
+      /* ARITHMETIC INSTRUCTIONS   64-bit
+	 ----------------------------------
+	 IRRoundingMode(I32) X D64 X D64 -> D64
+      */
+      Iop_AddD64, Iop_SubD64, Iop_MulD64, Iop_DivD64,
+
+      /* ARITHMETIC INSTRUCTIONS  128-bit
+	 ----------------------------------
+	 IRRoundingMode(I32) X D128 X D128 -> D128
+      */
+      Iop_AddD128, Iop_SubD128, Iop_MulD128, Iop_DivD128,
+
+      /* SHIFT SIGNIFICAND INSTRUCTIONS
+       *    The DFP significand is shifted by the number of digits specified
+       *    by the U8 operand.  Digits shifted out of the leftmost digit are
+       *    lost. Zeros are supplied to the vacated positions on the right.
+       *    The sign of the result is the same as the sign of the original
+       *    operand.
+       *
+       * D64 x U8  -> D64    left shift and right shift respectively */
+      Iop_ShlD64, Iop_ShrD64,
+
+      /* D128 x U8  -> D128  left shift and right shift respectively */
+      Iop_ShlD128, Iop_ShrD128,
+
+
+      /* FORMAT CONVERSION INSTRUCTIONS
+       *   D32 -> D64
+       */
+      Iop_D32toD64,
+
+      /*   D64 -> D128 */
+      Iop_D64toD128, 
+
+      /*   I32S -> D128 */
+      Iop_I32StoD128,
+
+      /*   I32U -> D128 */
+      Iop_I32UtoD128,
+
+      /*   I64S -> D128 */
+      Iop_I64StoD128, 
+
+      /*   I64U -> D128 */
+      Iop_I64UtoD128,
+
+      /*   IRRoundingMode(I32) x D64 -> D32 */
+      Iop_D64toD32,
+
+      /*   IRRoundingMode(I32) x D128 -> D64 */
+      Iop_D128toD64,
+
+      /*   I32S -> D64 */
+      Iop_I32StoD64,
+
+      /*   I32U -> D64 */
+      Iop_I32UtoD64,
+
+      /*   IRRoundingMode(I32) x I64 -> D64 */
+      Iop_I64StoD64,
+
+      /*   IRRoundingMode(I32) x I64 -> D64 */
+      Iop_I64UtoD64,
+
+      /*   IRRoundingMode(I32) x D64 -> I32 */
+      Iop_D64toI32S,
+
+      /*   IRRoundingMode(I32) x D64 -> I32 */
+      Iop_D64toI32U,
+
+      /*   IRRoundingMode(I32) x D64 -> I64 */
+      Iop_D64toI64S,
+
+      /*   IRRoundingMode(I32) x D64 -> I64 */
+      Iop_D64toI64U,
+
+      /*   IRRoundingMode(I32) x D128 -> I32 */
+      Iop_D128toI32S,
+
+      /*   IRRoundingMode(I32) x D128 -> I32 */
+      Iop_D128toI32U,
+
+      /*   IRRoundingMode(I32) x D128 -> I64 */
+      Iop_D128toI64S,
+
+      /*   IRRoundingMode(I32) x D128 -> I64 */
+      Iop_D128toI64U,
+
+      /*   IRRoundingMode(I32) x F32 -> D32 */
+      Iop_F32toD32,
+
+      /*   IRRoundingMode(I32) x F32 -> D64 */
+      Iop_F32toD64,
+
+      /*   IRRoundingMode(I32) x F32 -> D128 */
+      Iop_F32toD128,
+
+      /*   IRRoundingMode(I32) x F64 -> D32 */
+      Iop_F64toD32,
+
+      /*   IRRoundingMode(I32) x F64 -> D64 */
+      Iop_F64toD64,
+
+      /*   IRRoundingMode(I32) x F64 -> D128 */
+      Iop_F64toD128,
+
+      /*   IRRoundingMode(I32) x F128 -> D32 */
+      Iop_F128toD32,
+
+      /*   IRRoundingMode(I32) x F128 -> D64 */
+      Iop_F128toD64,
+
+      /*   IRRoundingMode(I32) x F128 -> D128 */
+      Iop_F128toD128,
+
+      /*   IRRoundingMode(I32) x D32 -> F32 */
+      Iop_D32toF32,
+
+      /*   IRRoundingMode(I32) x D32 -> F64 */
+      Iop_D32toF64,
+
+      /*   IRRoundingMode(I32) x D32 -> F128 */
+      Iop_D32toF128,
+
+      /*   IRRoundingMode(I32) x D64 -> F32 */
+      Iop_D64toF32,
+
+      /*   IRRoundingMode(I32) x D64 -> F64 */
+      Iop_D64toF64,
+
+      /*   IRRoundingMode(I32) x D64 -> F128 */
+      Iop_D64toF128,
+
+      /*   IRRoundingMode(I32) x D128 -> F32 */
+      Iop_D128toF32,
+
+      /*   IRRoundingMode(I32) x D128 -> F64 */
+      Iop_D128toF64,
+
+      /*   IRRoundingMode(I32) x D128 -> F128 */
+      Iop_D128toF128,
+
+      /* ROUNDING INSTRUCTIONS
+       * IRRoundingMode(I32) x D64 -> D64
+       * The D64 operand, if a finite number, it is rounded to a
+       * floating point integer value, i.e. no fractional part.
+       */
+      Iop_RoundD64toInt,
+
+      /* IRRoundingMode(I32) x D128 -> D128 */
+      Iop_RoundD128toInt,
+
+      /* COMPARE INSTRUCTIONS
+       * D64 x D64 -> IRCmpD64Result(I32) */
+      Iop_CmpD64,
+
+      /* D128 x D128 -> IRCmpD128Result(I32) */
+      Iop_CmpD128,
+
+      /* COMPARE BIASED EXPONENET INSTRUCTIONS
+       * D64 x D64 -> IRCmpD64Result(I32) */
+      Iop_CmpExpD64,
+
+      /* D128 x D128 -> IRCmpD128Result(I32) */
+      Iop_CmpExpD128,
+
+      /* QUANTIZE AND ROUND INSTRUCTIONS
+       * The source operand is converted and rounded to the form with the 
+       * immediate exponent specified by the rounding and exponent parameter.
+       *
+       * The second operand is converted and rounded to the form
+       * of the first operand's exponent and the rounded based on the specified
+       * rounding mode parameter.
+       *
+       * IRRoundingMode(I32) x D64 x D64-> D64 */
+      Iop_QuantizeD64,
+
+      /* IRRoundingMode(I32) x D128 x D128 -> D128 */
+      Iop_QuantizeD128,
+
+      /* IRRoundingMode(I32) x I8 x D64 -> D64
+       *    The Decimal Floating point operand is rounded to the requested 
+       *    significance given by the I8 operand as specified by the rounding 
+       *    mode.
+       */
+      Iop_SignificanceRoundD64,
+
+      /* IRRoundingMode(I32) x I8 x D128 -> D128 */
+      Iop_SignificanceRoundD128,
+
+      /* EXTRACT AND INSERT INSTRUCTIONS
+       * D64 -> I64
+       *    The exponent of the D32 or D64 operand is extracted.  The 
+       *    extracted exponent is converted to a 64-bit signed binary integer.
+       */
+      Iop_ExtractExpD64,
+
+      /* D128 -> I64 */
+      Iop_ExtractExpD128,
+
+      /* D64 -> I64
+       * The number of significand digits of the D64 operand is extracted.
+       * The number is stored as a 64-bit signed binary integer.
+       */
+      Iop_ExtractSigD64,
+
+      /* D128 -> I64 */
+      Iop_ExtractSigD128,
+
+      /* I64 x D64  -> D64
+       *    The exponent is specified by the first I64 operand the signed
+       *    significand is given by the second I64 value.  The result is a D64
+       *    value consisting of the specified significand and exponent whose 
+       *    sign is that of the specified significand.
+       */
+      Iop_InsertExpD64,
+
+      /* I64 x D128 -> D128 */
+      Iop_InsertExpD128,
+
+      /* Support for 128-bit DFP type */
+      Iop_D64HLtoD128, Iop_D128HItoD64, Iop_D128LOtoD64,
+
+      /*  I64 -> I64  
+       *     Convert 50-bit densely packed BCD string to 60 bit BCD string
+       */
+      Iop_DPBtoBCD,
+
+      /* I64 -> I64
+       *     Convert 60 bit BCD string to 50-bit densely packed BCD string
+       */
+      Iop_BCDtoDPB,
+
+      /* BCD arithmetic instructions, (V128, V128) -> V128
+       * The BCD format is the same as that used in the BCD<->DPB conversion
+       * routines, except using 124 digits (vs 60) plus the trailing 4-bit
+       * signed code. */
+      Iop_BCDAdd, Iop_BCDSub,
+
+      /* Conversion I64 -> D64 */
+      Iop_ReinterpI64asD64,
+
+      /* Conversion D64 -> I64 */
+      Iop_ReinterpD64asI64,
+
+      /* ------------------ 128-bit SIMD FP. ------------------ */
+
+      /* --- 32x4 vector FP --- */
+
+      /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
+      Iop_Add32Fx4, Iop_Sub32Fx4, Iop_Mul32Fx4, Iop_Div32Fx4, 
+
+      /* binary */
+      Iop_Max32Fx4, Iop_Min32Fx4,
+      Iop_Add32Fx2, Iop_Sub32Fx2,
+      /* Note: For the following compares, the ppc and arm front-ends assume a
+         nan in a lane of either argument returns zero for that lane. */
+      Iop_CmpEQ32Fx4, Iop_CmpLT32Fx4, Iop_CmpLE32Fx4, Iop_CmpUN32Fx4,
+      Iop_CmpGT32Fx4, Iop_CmpGE32Fx4,
+
+      /* Pairwise Max and Min. See integer pairwise operations for details. */
+      Iop_PwMax32Fx4, Iop_PwMin32Fx4,
+
+      /* unary */
+      Iop_Abs32Fx4,
+      Iop_Neg32Fx4,
+
+      /* binary :: IRRoundingMode(I32) x V128 -> V128 */
+      Iop_Sqrt32Fx4,
+
+      /* Vector Reciprocal Estimate finds an approximate reciprocal of each
+         element in the operand vector, and places the results in the
+         destination vector.  */
+      Iop_RecipEst32Fx4,
+
+      /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
+         Note, that if one of the arguments is zero and another one is infinity
+         of arbitrary sign the result of the operation is 2.0. */
+      Iop_RecipStep32Fx4,
+
+      /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
+         square root of each element in the operand vector. */
+      Iop_RSqrtEst32Fx4,
+
+      /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
+         Note, that of one of the arguments is zero and another one is infiinty
+         of arbitrary sign the result of the operation is 1.5. */
+      Iop_RSqrtStep32Fx4,
+
+      /* --- Int to/from FP conversion --- */
+      /* Unlike the standard fp conversions, these irops take no
+         rounding mode argument. Instead the irop trailers _R{M,P,N,Z}
+         indicate the mode: {-inf, +inf, nearest, zero} respectively. */
+      Iop_I32UtoFx4,     Iop_I32StoFx4,       /* I32x4 -> F32x4       */
+      Iop_FtoI32Ux4_RZ,  Iop_FtoI32Sx4_RZ,    /* F32x4 -> I32x4       */
+      Iop_QFtoI32Ux4_RZ, Iop_QFtoI32Sx4_RZ,   /* F32x4 -> I32x4 (saturating) */
+      Iop_RoundF32x4_RM, Iop_RoundF32x4_RP,   /* round to fp integer  */
+      Iop_RoundF32x4_RN, Iop_RoundF32x4_RZ,   /* round to fp integer  */
+      /* Fixed32 format is floating-point number with fixed number of fraction
+         bits. The number of fraction bits is passed as a second argument of
+         type I8. */
+      Iop_F32ToFixed32Ux4_RZ, Iop_F32ToFixed32Sx4_RZ, /* fp -> fixed-point */
+      Iop_Fixed32UToF32x4_RN, Iop_Fixed32SToF32x4_RN, /* fixed-point -> fp */
+
+      /* --- Single to/from half conversion --- */
+      /* FIXME: what kind of rounding in F32x4 -> F16x4 case? */
+      Iop_F32toF16x4, Iop_F16toF32x4,         /* F32x4 <-> F16x4      */
+
+      /* --- 32x4 lowest-lane-only scalar FP --- */
+
+      /* In binary cases, upper 3/4 is copied from first operand.  In
+         unary cases, upper 3/4 is copied from the operand. */
+
+      /* binary */
+      Iop_Add32F0x4, Iop_Sub32F0x4, Iop_Mul32F0x4, Iop_Div32F0x4, 
+      Iop_Max32F0x4, Iop_Min32F0x4,
+      Iop_CmpEQ32F0x4, Iop_CmpLT32F0x4, Iop_CmpLE32F0x4, Iop_CmpUN32F0x4, 
+
+      /* unary */
+      Iop_RecipEst32F0x4, Iop_Sqrt32F0x4, Iop_RSqrtEst32F0x4,
+
+      /* --- 64x2 vector FP --- */
+
+      /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
+      Iop_Add64Fx2, Iop_Sub64Fx2, Iop_Mul64Fx2, Iop_Div64Fx2, 
+
+      /* binary */
+      Iop_Max64Fx2, Iop_Min64Fx2,
+      Iop_CmpEQ64Fx2, Iop_CmpLT64Fx2, Iop_CmpLE64Fx2, Iop_CmpUN64Fx2, 
+
+      /* unary */
+      Iop_Abs64Fx2,
+      Iop_Neg64Fx2,
+
+      /* binary :: IRRoundingMode(I32) x V128 -> V128 */
+      Iop_Sqrt64Fx2,
+
+      /* see 32Fx4 variants for description */
+      Iop_RecipEst64Fx2,    // unary
+      Iop_RecipStep64Fx2,   // binary
+      Iop_RSqrtEst64Fx2,    // unary
+      Iop_RSqrtStep64Fx2,   // binary
+
+      /* --- 64x2 lowest-lane-only scalar FP --- */
+
+      /* In binary cases, upper half is copied from first operand.  In
+         unary cases, upper half is copied from the operand. */
+
+      /* binary */
+      Iop_Add64F0x2, Iop_Sub64F0x2, Iop_Mul64F0x2, Iop_Div64F0x2, 
+      Iop_Max64F0x2, Iop_Min64F0x2,
+      Iop_CmpEQ64F0x2, Iop_CmpLT64F0x2, Iop_CmpLE64F0x2, Iop_CmpUN64F0x2, 
+
+      /* unary */
+      Iop_Sqrt64F0x2,
+
+      /* --- pack / unpack --- */
+
+      /* 64 <-> 128 bit vector */
+      Iop_V128to64,     // :: V128 -> I64, low half
+      Iop_V128HIto64,   // :: V128 -> I64, high half
+      Iop_64HLtoV128,   // :: (I64,I64) -> V128
+
+      Iop_64UtoV128,
+      Iop_SetV128lo64,
+
+      /* Copies lower 64/32/16/8 bits, zeroes out the rest. */
+      Iop_ZeroHI64ofV128,    // :: V128 -> V128
+      Iop_ZeroHI96ofV128,    // :: V128 -> V128
+      Iop_ZeroHI112ofV128,   // :: V128 -> V128
+      Iop_ZeroHI120ofV128,   // :: V128 -> V128
+
+      /* 32 <-> 128 bit vector */
+      Iop_32UtoV128,
+      Iop_V128to32,     // :: V128 -> I32, lowest lane
+      Iop_SetV128lo32,  // :: (V128,I32) -> V128
+
+      /* ------------------ 128-bit SIMD Integer. ------------------ */
+
+      /* BITWISE OPS */
+      Iop_NotV128,
+      Iop_AndV128, Iop_OrV128, Iop_XorV128, 
+
+      /* VECTOR SHIFT (shift amt :: Ity_I8) */
+      Iop_ShlV128, Iop_ShrV128,
+
+      /* MISC (vector integer cmp != 0) */
+      Iop_CmpNEZ8x16, Iop_CmpNEZ16x8, Iop_CmpNEZ32x4, Iop_CmpNEZ64x2,
+
+      /* ADDITION (normal / U->U sat / S->S sat) */
+      Iop_Add8x16,    Iop_Add16x8,    Iop_Add32x4,    Iop_Add64x2,
+      Iop_QAdd8Ux16,  Iop_QAdd16Ux8,  Iop_QAdd32Ux4,  Iop_QAdd64Ux2,
+      Iop_QAdd8Sx16,  Iop_QAdd16Sx8,  Iop_QAdd32Sx4,  Iop_QAdd64Sx2,
+
+      /* ADDITION, ARM64 specific saturating variants. */
+      /* Unsigned widen left arg, signed widen right arg, add, saturate S->S.
+         This corresponds to SUQADD. */
+      Iop_QAddExtUSsatSS8x16, Iop_QAddExtUSsatSS16x8,
+      Iop_QAddExtUSsatSS32x4, Iop_QAddExtUSsatSS64x2,
+      /* Signed widen left arg, unsigned widen right arg, add, saturate U->U.
+         This corresponds to USQADD. */
+      Iop_QAddExtSUsatUU8x16, Iop_QAddExtSUsatUU16x8,
+      Iop_QAddExtSUsatUU32x4, Iop_QAddExtSUsatUU64x2,
+
+      /* SUBTRACTION (normal / unsigned sat / signed sat) */
+      Iop_Sub8x16,   Iop_Sub16x8,   Iop_Sub32x4,   Iop_Sub64x2,
+      Iop_QSub8Ux16, Iop_QSub16Ux8, Iop_QSub32Ux4, Iop_QSub64Ux2,
+      Iop_QSub8Sx16, Iop_QSub16Sx8, Iop_QSub32Sx4, Iop_QSub64Sx2,
+
+      /* MULTIPLICATION (normal / high half of signed/unsigned) */
+      Iop_Mul8x16,  Iop_Mul16x8,    Iop_Mul32x4,
+                    Iop_MulHi16Ux8, Iop_MulHi32Ux4,
+                    Iop_MulHi16Sx8, Iop_MulHi32Sx4,
+      /* (widening signed/unsigned of even lanes, with lowest lane=zero) */
+      Iop_MullEven8Ux16, Iop_MullEven16Ux8, Iop_MullEven32Ux4,
+      Iop_MullEven8Sx16, Iop_MullEven16Sx8, Iop_MullEven32Sx4,
+
+      /* Widening multiplies, all of the form (I64, I64) -> V128 */
+      Iop_Mull8Ux8, Iop_Mull8Sx8,
+      Iop_Mull16Ux4, Iop_Mull16Sx4,
+      Iop_Mull32Ux2, Iop_Mull32Sx2,
+
+      /* Signed doubling saturating widening multiplies, (I64, I64) -> V128 */
+      Iop_QDMull16Sx4, Iop_QDMull32Sx2,
+
+      /* Vector Saturating Doubling Multiply Returning High Half and
+         Vector Saturating Rounding Doubling Multiply Returning High Half.
+         These IROps multiply corresponding elements in two vectors, double
+         the results, and place the most significant half of the final results
+         in the destination vector.  The results are truncated or rounded.  If
+         any of the results overflow, they are saturated.  To be more precise,
+         for each lane, the computed result is: 
+           QDMulHi:  
+             hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2 )
+           QRDMulHi:
+             hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2
+                      +q (1 << (lane-width-in-bits - 1)) )
+      */
+      Iop_QDMulHi16Sx8,  Iop_QDMulHi32Sx4,  /* (V128, V128) -> V128 */
+      Iop_QRDMulHi16Sx8, Iop_QRDMulHi32Sx4, /* (V128, V128) -> V128 */
+
+      /* Polynomial multiplication treats its arguments as
+         coefficients of polynomials over {0, 1}. */
+      Iop_PolynomialMul8x16, /* (V128, V128) -> V128 */
+      Iop_PolynomialMull8x8, /*   (I64, I64) -> V128 */
+
+      /* Vector Polynomial multiplication add.   (V128, V128) -> V128
+
+       *** Below is the algorithm for the instructions. These Iops could
+           be emulated to get this functionality, but the emulation would
+           be long and messy.
+
+        Example for polynomial multiply add for vector of bytes
+        do i = 0 to 15
+            prod[i].bit[0:14] <- 0
+            srcA <- VR[argL].byte[i]
+            srcB <- VR[argR].byte[i]
+            do j = 0 to 7
+                do k = 0 to j
+                    gbit <- srcA.bit[k] & srcB.bit[j-k]
+                    prod[i].bit[j] <- prod[i].bit[j] ^ gbit
+                end
+            end
+
+            do j = 8 to 14
+                do k = j-7 to 7
+                     gbit <- (srcA.bit[k] & srcB.bit[j-k])
+                     prod[i].bit[j] <- prod[i].bit[j] ^ gbit
+                end
+            end
+        end
+
+        do i = 0 to 7
+            VR[dst].hword[i] <- 0b0 || (prod[2×i] ^ prod[2×i+1])
+        end
+      */
+      Iop_PolynomialMulAdd8x16, Iop_PolynomialMulAdd16x8,
+      Iop_PolynomialMulAdd32x4, Iop_PolynomialMulAdd64x2,
+
+      /* PAIRWISE operations */
+      /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
+            [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
+      Iop_PwAdd8x16, Iop_PwAdd16x8, Iop_PwAdd32x4,
+      Iop_PwAdd32Fx2,
+      /* Longening variant is unary. The resulting vector contains two times
+         less elements than operand, but they are two times wider.
+         Example:
+            Iop_PwAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
+               where a+b and c+d are unsigned 32-bit values. */
+      Iop_PwAddL8Ux16, Iop_PwAddL16Ux8, Iop_PwAddL32Ux4,
+      Iop_PwAddL8Sx16, Iop_PwAddL16Sx8, Iop_PwAddL32Sx4,
+
+      /* Other unary pairwise ops */
+
+      /* Vector bit matrix transpose.  (V128) -> V128 */
+      /* For each doubleword element of the source vector, an 8-bit x 8-bit
+       * matrix transpose is performed. */
+      Iop_PwBitMtxXpose64x2,
+
+      /* ABSOLUTE VALUE */
+      Iop_Abs8x16, Iop_Abs16x8, Iop_Abs32x4, Iop_Abs64x2,
+
+      /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
+      Iop_Avg8Ux16, Iop_Avg16Ux8, Iop_Avg32Ux4,
+      Iop_Avg8Sx16, Iop_Avg16Sx8, Iop_Avg32Sx4,
+
+      /* MIN/MAX */
+      Iop_Max8Sx16, Iop_Max16Sx8, Iop_Max32Sx4, Iop_Max64Sx2,
+      Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4, Iop_Max64Ux2,
+      Iop_Min8Sx16, Iop_Min16Sx8, Iop_Min32Sx4, Iop_Min64Sx2,
+      Iop_Min8Ux16, Iop_Min16Ux8, Iop_Min32Ux4, Iop_Min64Ux2,
+
+      /* COMPARISON */
+      Iop_CmpEQ8x16,  Iop_CmpEQ16x8,  Iop_CmpEQ32x4,  Iop_CmpEQ64x2,
+      Iop_CmpGT8Sx16, Iop_CmpGT16Sx8, Iop_CmpGT32Sx4, Iop_CmpGT64Sx2,
+      Iop_CmpGT8Ux16, Iop_CmpGT16Ux8, Iop_CmpGT32Ux4, Iop_CmpGT64Ux2,
+
+      /* COUNT ones / leading zeroes / leading sign bits (not including topmost
+         bit) */
+      Iop_Cnt8x16,
+      Iop_Clz8x16, Iop_Clz16x8, Iop_Clz32x4,
+      Iop_Cls8x16, Iop_Cls16x8, Iop_Cls32x4,
+
+      /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
+      Iop_ShlN8x16, Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2,
+      Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2,
+      Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4, Iop_SarN64x2,
+
+      /* VECTOR x VECTOR SHIFT / ROTATE */
+      /* FIXME: I'm pretty sure the ARM32 front/back ends interpret these
+         differently from all other targets.  The intention is that
+         the shift amount (2nd arg) is interpreted as unsigned and
+         only the lowest log2(lane-bits) bits are relevant.  But the
+         ARM32 versions treat the shift amount as an 8 bit signed
+         number.  The ARM32 uses should be replaced by the relevant
+         vector x vector bidirectional shifts instead. */
+      Iop_Shl8x16, Iop_Shl16x8, Iop_Shl32x4, Iop_Shl64x2,
+      Iop_Shr8x16, Iop_Shr16x8, Iop_Shr32x4, Iop_Shr64x2,
+      Iop_Sar8x16, Iop_Sar16x8, Iop_Sar32x4, Iop_Sar64x2,
+      Iop_Sal8x16, Iop_Sal16x8, Iop_Sal32x4, Iop_Sal64x2,
+      Iop_Rol8x16, Iop_Rol16x8, Iop_Rol32x4, Iop_Rol64x2,
+
+      /* VECTOR x VECTOR SATURATING SHIFT */
+      Iop_QShl8x16, Iop_QShl16x8, Iop_QShl32x4, Iop_QShl64x2,
+      Iop_QSal8x16, Iop_QSal16x8, Iop_QSal32x4, Iop_QSal64x2,
+      /* VECTOR x INTEGER SATURATING SHIFT */
+      Iop_QShlNsatSU8x16, Iop_QShlNsatSU16x8,
+      Iop_QShlNsatSU32x4, Iop_QShlNsatSU64x2,
+      Iop_QShlNsatUU8x16, Iop_QShlNsatUU16x8,
+      Iop_QShlNsatUU32x4, Iop_QShlNsatUU64x2,
+      Iop_QShlNsatSS8x16, Iop_QShlNsatSS16x8,
+      Iop_QShlNsatSS32x4, Iop_QShlNsatSS64x2,
+
+      /* VECTOR x VECTOR BIDIRECTIONAL SATURATING (& MAYBE ROUNDING) SHIFT */
+      /* All of type (V128, V128) -> V256. */
+      /* The least significant 8 bits of each lane of the second
+         operand are used as the shift amount, and interpreted signedly.
+         Positive values mean a shift left, negative a shift right.  The
+         result is signedly or unsignedly saturated.  There are also
+         rounding variants, which add 2^(shift_amount-1) to the value before
+         shifting, but only in the shift-right case.  Vacated positions
+         are filled with zeroes.  IOW, it's either SHR or SHL, but not SAR.
+
+         These operations return 129 bits: one bit ("Q") indicating whether
+         saturation occurred, and the shift result.  The result type is V256,
+         of which the lower V128 is the shift result, and Q occupies the
+         least significant bit of the upper V128.  All other bits of the
+         upper V128 are zero. */
+      // Unsigned saturation, no rounding
+      Iop_QandUQsh8x16, Iop_QandUQsh16x8,
+      Iop_QandUQsh32x4, Iop_QandUQsh64x2,
+      // Signed saturation, no rounding
+      Iop_QandSQsh8x16, Iop_QandSQsh16x8,
+      Iop_QandSQsh32x4, Iop_QandSQsh64x2,
+
+      // Unsigned saturation, rounding
+      Iop_QandUQRsh8x16, Iop_QandUQRsh16x8,
+      Iop_QandUQRsh32x4, Iop_QandUQRsh64x2,
+      // Signed saturation, rounding
+      Iop_QandSQRsh8x16, Iop_QandSQRsh16x8,
+      Iop_QandSQRsh32x4, Iop_QandSQRsh64x2,
+
+      /* VECTOR x VECTOR BIDIRECTIONAL (& MAYBE ROUNDING) SHIFT */
+      /* All of type (V128, V128) -> V128 */
+      /* The least significant 8 bits of each lane of the second
+         operand are used as the shift amount, and interpreted signedly.
+         Positive values mean a shift left, negative a shift right.
+         There are also rounding variants, which add 2^(shift_amount-1)
+         to the value before shifting, but only in the shift-right case.
+
+         For left shifts, the vacated places are filled with zeroes.
+         For right shifts, the vacated places are filled with zeroes
+         for the U variants and sign bits for the S variants. */
+      // Signed and unsigned, non-rounding
+      Iop_Sh8Sx16, Iop_Sh16Sx8, Iop_Sh32Sx4, Iop_Sh64Sx2,
+      Iop_Sh8Ux16, Iop_Sh16Ux8, Iop_Sh32Ux4, Iop_Sh64Ux2,
+
+      // Signed and unsigned, rounding
+      Iop_Rsh8Sx16, Iop_Rsh16Sx8, Iop_Rsh32Sx4, Iop_Rsh64Sx2,
+      Iop_Rsh8Ux16, Iop_Rsh16Ux8, Iop_Rsh32Ux4, Iop_Rsh64Ux2,
+
+      /* The least significant 8 bits of each lane of the second
+         operand are used as the shift amount, and interpreted signedly.
+         Positive values mean a shift left, negative a shift right.  The
+         result is signedly or unsignedly saturated.  There are also
+         rounding variants, which add 2^(shift_amount-1) to the value before
+         shifting, but only in the shift-right case.  Vacated positions
+         are filled with zeroes.  IOW, it's either SHR or SHL, but not SAR.
+      */
+
+      /* VECTOR x SCALAR SATURATING (& MAYBE ROUNDING) NARROWING SHIFT RIGHT */
+      /* All of type (V128, I8) -> V128 */
+      /* The first argument is shifted right, then narrowed to half the width
+         by saturating it.  The second argument is a scalar shift amount that
+         applies to all lanes, and must be a value in the range 1 to lane_width.
+         The shift may be done signedly (Sar variants) or unsignedly (Shr
+         variants).  The saturation is done according to the two signedness
+         indicators at the end of the name.  For example 64Sto32U means a
+         signed 64 bit value is saturated into an unsigned 32 bit value.
+         Additionally, the QRS variants do rounding, that is, they add the
+         value (1 << (shift_amount-1)) to each source lane before shifting.
+
+         These operations return 65 bits: one bit ("Q") indicating whether
+         saturation occurred, and the shift result.  The result type is V128,
+         of which the lower half is the shift result, and Q occupies the
+         least significant bit of the upper half.  All other bits of the
+         upper half are zero. */
+      // No rounding, sat U->U
+      Iop_QandQShrNnarrow16Uto8Ux8,
+      Iop_QandQShrNnarrow32Uto16Ux4, Iop_QandQShrNnarrow64Uto32Ux2,
+      // No rounding, sat S->S
+      Iop_QandQSarNnarrow16Sto8Sx8,
+      Iop_QandQSarNnarrow32Sto16Sx4, Iop_QandQSarNnarrow64Sto32Sx2,
+      // No rounding, sat S->U
+      Iop_QandQSarNnarrow16Sto8Ux8,
+      Iop_QandQSarNnarrow32Sto16Ux4, Iop_QandQSarNnarrow64Sto32Ux2,
+
+      // Rounding, sat U->U
+      Iop_QandQRShrNnarrow16Uto8Ux8,
+      Iop_QandQRShrNnarrow32Uto16Ux4, Iop_QandQRShrNnarrow64Uto32Ux2,
+      // Rounding, sat S->S
+      Iop_QandQRSarNnarrow16Sto8Sx8,
+      Iop_QandQRSarNnarrow32Sto16Sx4, Iop_QandQRSarNnarrow64Sto32Sx2,
+      // Rounding, sat S->U
+      Iop_QandQRSarNnarrow16Sto8Ux8,
+      Iop_QandQRSarNnarrow32Sto16Ux4, Iop_QandQRSarNnarrow64Sto32Ux2,
+
+      /* NARROWING (binary) 
+         -- narrow 2xV128 into 1xV128, hi half from left arg */
+      /* See comments above w.r.t. U vs S issues in saturated narrowing. */
+      Iop_QNarrowBin16Sto8Ux16, Iop_QNarrowBin32Sto16Ux8,
+      Iop_QNarrowBin16Sto8Sx16, Iop_QNarrowBin32Sto16Sx8,
+      Iop_QNarrowBin16Uto8Ux16, Iop_QNarrowBin32Uto16Ux8,
+      Iop_NarrowBin16to8x16, Iop_NarrowBin32to16x8,
+      Iop_QNarrowBin64Sto32Sx4, Iop_QNarrowBin64Uto32Ux4,
+      Iop_NarrowBin64to32x4,
+
+      /* NARROWING (unary) -- narrow V128 into I64 */
+      Iop_NarrowUn16to8x8, Iop_NarrowUn32to16x4, Iop_NarrowUn64to32x2,
+      /* Saturating narrowing from signed source to signed/unsigned
+         destination */
+      Iop_QNarrowUn16Sto8Sx8, Iop_QNarrowUn32Sto16Sx4, Iop_QNarrowUn64Sto32Sx2,
+      Iop_QNarrowUn16Sto8Ux8, Iop_QNarrowUn32Sto16Ux4, Iop_QNarrowUn64Sto32Ux2,
+      /* Saturating narrowing from unsigned source to unsigned destination */
+      Iop_QNarrowUn16Uto8Ux8, Iop_QNarrowUn32Uto16Ux4, Iop_QNarrowUn64Uto32Ux2,
+
+      /* WIDENING -- sign or zero extend each element of the argument
+         vector to the twice original size.  The resulting vector consists of
+         the same number of elements but each element and the vector itself
+         are twice as wide.
+         All operations are I64->V128.
+         Example
+            Iop_Widen32Sto64x2( [a, b] ) = [c, d]
+               where c = Iop_32Sto64(a) and d = Iop_32Sto64(b) */
+      Iop_Widen8Uto16x8, Iop_Widen16Uto32x4, Iop_Widen32Uto64x2,
+      Iop_Widen8Sto16x8, Iop_Widen16Sto32x4, Iop_Widen32Sto64x2,
+
+      /* INTERLEAVING */
+      /* Interleave lanes from low or high halves of
+         operands.  Most-significant result lane is from the left
+         arg. */
+      Iop_InterleaveHI8x16, Iop_InterleaveHI16x8,
+      Iop_InterleaveHI32x4, Iop_InterleaveHI64x2,
+      Iop_InterleaveLO8x16, Iop_InterleaveLO16x8,
+      Iop_InterleaveLO32x4, Iop_InterleaveLO64x2,
+      /* Interleave odd/even lanes of operands.  Most-significant result lane
+         is from the left arg. */
+      Iop_InterleaveOddLanes8x16, Iop_InterleaveEvenLanes8x16,
+      Iop_InterleaveOddLanes16x8, Iop_InterleaveEvenLanes16x8,
+      Iop_InterleaveOddLanes32x4, Iop_InterleaveEvenLanes32x4,
+
+      /* CONCATENATION -- build a new value by concatenating either
+         the even or odd lanes of both operands.  Note that
+         Cat{Odd,Even}Lanes64x2 are identical to Interleave{HI,LO}64x2
+         and so are omitted. */
+      Iop_CatOddLanes8x16, Iop_CatOddLanes16x8, Iop_CatOddLanes32x4,
+      Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8, Iop_CatEvenLanes32x4,
+
+      /* GET elements of VECTOR
+         GET is binop (V128, I8) -> I<elem_size> */
+      /* Note: the arm back-end handles only constant second argument. */
+      Iop_GetElem8x16, Iop_GetElem16x8, Iop_GetElem32x4, Iop_GetElem64x2,
+
+      /* DUPLICATING -- copy value to all lanes */
+      Iop_Dup8x16,   Iop_Dup16x8,   Iop_Dup32x4,
+
+      /* SLICE -- produces the lowest 128 bits of (arg1:arg2) >> (8 * arg3).
+         arg3 is a shift amount in bytes and may be between 0 and 16
+         inclusive.  When 0, the result is arg2; when 16, the result is arg1.
+         Not all back ends handle all values.  The arm64 back
+         end handles only immediate arg3 values. */
+      Iop_SliceV128,  // (V128, V128, I8) -> V128
+
+      /* REVERSE the order of chunks in vector lanes.  Chunks must be
+         smaller than the vector lanes (obviously) and so may be 8-,
+         16- and 32-bit in size.  See definitions of 64-bit SIMD
+         versions above for examples. */
+      Iop_Reverse8sIn16_x8,
+      Iop_Reverse8sIn32_x4, Iop_Reverse16sIn32_x4,
+      Iop_Reverse8sIn64_x2, Iop_Reverse16sIn64_x2, Iop_Reverse32sIn64_x2,
+      Iop_Reverse1sIn8_x16, /* Reverse bits in each byte lane. */
+
+      /* PERMUTING -- copy src bytes to dst,
+         as indexed by control vector bytes:
+            for i in 0 .. 15 . result[i] = argL[ argR[i] ] 
+         argR[i] values may only be in the range 0 .. 15, else behaviour
+         is undefined. */
+      Iop_Perm8x16,
+      Iop_Perm32x4, /* ditto, except argR values are restricted to 0 .. 3 */
+
+      /* MISC CONVERSION -- get high bits of each byte lane, a la
+         x86/amd64 pmovmskb */
+      Iop_GetMSBs8x16, /* V128 -> I16 */
+
+      /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
+         See floating-point equivalents for details. */
+      Iop_RecipEst32Ux4, Iop_RSqrtEst32Ux4,
+
+      /* ------------------ 256-bit SIMD Integer. ------------------ */
+
+      /* Pack/unpack */
+      Iop_V256to64_0,  // V256 -> I64, extract least significant lane
+      Iop_V256to64_1,
+      Iop_V256to64_2,
+      Iop_V256to64_3,  // V256 -> I64, extract most significant lane
+
+      Iop_64x4toV256,  // (I64,I64,I64,I64)->V256
+                       // first arg is most significant lane
+
+      Iop_V256toV128_0, // V256 -> V128, less significant lane
+      Iop_V256toV128_1, // V256 -> V128, more significant lane
+      Iop_V128HLtoV256, // (V128,V128)->V256, first arg is most signif
+
+      Iop_AndV256,
+      Iop_OrV256,
+      Iop_XorV256,
+      Iop_NotV256,
+
+      /* MISC (vector integer cmp != 0) */
+      Iop_CmpNEZ8x32, Iop_CmpNEZ16x16, Iop_CmpNEZ32x8, Iop_CmpNEZ64x4,
+
+      Iop_Add8x32,    Iop_Add16x16,    Iop_Add32x8,    Iop_Add64x4,
+      Iop_Sub8x32,    Iop_Sub16x16,    Iop_Sub32x8,    Iop_Sub64x4,
+
+      Iop_CmpEQ8x32,  Iop_CmpEQ16x16,  Iop_CmpEQ32x8,  Iop_CmpEQ64x4,
+      Iop_CmpGT8Sx32, Iop_CmpGT16Sx16, Iop_CmpGT32Sx8, Iop_CmpGT64Sx4,
+
+      Iop_ShlN16x16, Iop_ShlN32x8, Iop_ShlN64x4,
+      Iop_ShrN16x16, Iop_ShrN32x8, Iop_ShrN64x4,
+      Iop_SarN16x16, Iop_SarN32x8,
+
+      Iop_Max8Sx32, Iop_Max16Sx16, Iop_Max32Sx8,
+      Iop_Max8Ux32, Iop_Max16Ux16, Iop_Max32Ux8,
+      Iop_Min8Sx32, Iop_Min16Sx16, Iop_Min32Sx8,
+      Iop_Min8Ux32, Iop_Min16Ux16, Iop_Min32Ux8,
+
+      Iop_Mul16x16, Iop_Mul32x8,
+      Iop_MulHi16Ux16, Iop_MulHi16Sx16,
+
+      Iop_QAdd8Ux32, Iop_QAdd16Ux16,
+      Iop_QAdd8Sx32, Iop_QAdd16Sx16,
+      Iop_QSub8Ux32, Iop_QSub16Ux16,
+      Iop_QSub8Sx32, Iop_QSub16Sx16,
+
+      Iop_Avg8Ux32, Iop_Avg16Ux16,
+
+      Iop_Perm32x8,
+
+      /* (V128, V128) -> V128 */
+      Iop_CipherV128, Iop_CipherLV128, Iop_CipherSV128,
+      Iop_NCipherV128, Iop_NCipherLV128,
+
+      /* Hash instructions, Federal Information Processing Standards
+       * Publication 180-3 Secure Hash Standard. */
+      /* (V128, I8) -> V128; The I8 input arg is (ST | SIX), where ST and
+       * SIX are fields from the insn. See ISA 2.07 description of
+       * vshasigmad and vshasigmaw insns.*/
+      Iop_SHA512, Iop_SHA256,
+
+      /* ------------------ 256-bit SIMD FP. ------------------ */
+
+      /* ternary :: IRRoundingMode(I32) x V256 x V256 -> V256 */
+      Iop_Add64Fx4, Iop_Sub64Fx4, Iop_Mul64Fx4, Iop_Div64Fx4,
+      Iop_Add32Fx8, Iop_Sub32Fx8, Iop_Mul32Fx8, Iop_Div32Fx8,
+
+      Iop_Sqrt32Fx8,
+      Iop_Sqrt64Fx4,
+      Iop_RSqrtEst32Fx8,
+      Iop_RecipEst32Fx8,
+
+      Iop_Max32Fx8, Iop_Min32Fx8,
+      Iop_Max64Fx4, Iop_Min64Fx4,
+      Iop_LAST      /* must be the last enumerator */
+   }
+   IROp;
+
+/* Pretty-print an op. */
+extern void ppIROp ( IROp );
+
+
+/* Encoding of IEEE754-specified rounding modes.
+   Note, various front and back ends rely on the actual numerical
+   values of these, so do not change them. */
+typedef
+   enum { 
+      Irrm_NEAREST              = 0,  // Round to nearest, ties to even
+      Irrm_NegINF               = 1,  // Round to negative infinity
+      Irrm_PosINF               = 2,  // Round to positive infinity
+      Irrm_ZERO                 = 3,  // Round toward zero
+      Irrm_NEAREST_TIE_AWAY_0   = 4,  // Round to nearest, ties away from 0
+      Irrm_PREPARE_SHORTER      = 5,  // Round to prepare for shorter 
+                                      // precision
+      Irrm_AWAY_FROM_ZERO       = 6,  // Round to away from 0
+      Irrm_NEAREST_TIE_TOWARD_0 = 7   // Round to nearest, ties towards 0
+   }
+   IRRoundingMode;
+
+/* Binary floating point comparison result values.
+   This is also derived from what IA32 does. */
+typedef
+   enum {
+      Ircr_UN = 0x45,
+      Ircr_LT = 0x01,
+      Ircr_GT = 0x00,
+      Ircr_EQ = 0x40
+   }
+   IRCmpFResult;
+
+typedef IRCmpFResult IRCmpF32Result;
+typedef IRCmpFResult IRCmpF64Result;
+typedef IRCmpFResult IRCmpF128Result;
+
+/* Decimal floating point result values. */
+typedef IRCmpFResult IRCmpDResult;
+typedef IRCmpDResult IRCmpD64Result;
+typedef IRCmpDResult IRCmpD128Result;
+
+/* ------------------ Expressions ------------------ */
+
+typedef struct _IRQop   IRQop;   /* forward declaration */
+typedef struct _IRTriop IRTriop; /* forward declaration */
+
+
+/* The different kinds of expressions.  Their meaning is explained below
+   in the comments for IRExpr. */
+typedef
+   enum { 
+      Iex_Binder=0x1900,
+      Iex_Get,
+      Iex_GetI,
+      Iex_RdTmp,
+      Iex_Qop,
+      Iex_Triop,
+      Iex_Binop,
+      Iex_Unop,
+      Iex_Load,
+      Iex_Const,
+      Iex_ITE,
+      Iex_CCall,
+      Iex_VECRET,
+      Iex_BBPTR
+   }
+   IRExprTag;
+
+/* An expression.  Stored as a tagged union.  'tag' indicates what kind
+   of expression this is.  'Iex' is the union that holds the fields.  If
+   an IRExpr 'e' has e.tag equal to Iex_Load, then it's a load
+   expression, and the fields can be accessed with
+   'e.Iex.Load.<fieldname>'.
+
+   For each kind of expression, we show what it looks like when
+   pretty-printed with ppIRExpr().
+*/
+typedef
+   struct _IRExpr
+   IRExpr;
+
+struct _IRExpr {
+   IRExprTag tag;
+   union {
+      /* Used only in pattern matching within Vex.  Should not be seen
+         outside of Vex. */
+      struct {
+         Int binder;
+      } Binder;
+
+      /* Read a guest register, at a fixed offset in the guest state.
+         ppIRExpr output: GET:<ty>(<offset>), eg. GET:I32(0)
+      */
+      struct {
+         Int    offset;    /* Offset into the guest state */
+         IRType ty;        /* Type of the value being read */
+      } Get;
+
+      /* Read a guest register at a non-fixed offset in the guest
+         state.  This allows circular indexing into parts of the guest
+         state, which is essential for modelling situations where the
+         identity of guest registers is not known until run time.  One
+         example is the x87 FP register stack.
+
+         The part of the guest state to be treated as a circular array
+         is described in the IRRegArray 'descr' field.  It holds the
+         offset of the first element in the array, the type of each
+         element, and the number of elements.
+
+         The array index is indicated rather indirectly, in a way
+         which makes optimisation easy: as the sum of variable part
+         (the 'ix' field) and a constant offset (the 'bias' field).
+
+         Since the indexing is circular, the actual array index to use
+         is computed as (ix + bias) % num-of-elems-in-the-array.
+
+         Here's an example.  The description
+
+            (96:8xF64)[t39,-7]
+
+         describes an array of 8 F64-typed values, the
+         guest-state-offset of the first being 96.  This array is
+         being indexed at (t39 - 7) % 8.
+
+         It is important to get the array size/type exactly correct
+         since IR optimisation looks closely at such info in order to
+         establish aliasing/non-aliasing between seperate GetI and
+         PutI events, which is used to establish when they can be
+         reordered, etc.  Putting incorrect info in will lead to
+         obscure IR optimisation bugs.
+
+            ppIRExpr output: GETI<descr>[<ix>,<bias]
+                         eg. GETI(128:8xI8)[t1,0]
+      */
+      struct {
+         IRRegArray* descr; /* Part of guest state treated as circular */
+         IRExpr*     ix;    /* Variable part of index into array */
+         Int         bias;  /* Constant offset part of index into array */
+      } GetI;
+
+      /* The value held by a temporary.
+         ppIRExpr output: t<tmp>, eg. t1
+      */
+      struct {
+         IRTemp tmp;       /* The temporary number */
+      } RdTmp;
+
+      /* A quaternary operation.
+         ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>, <arg4>),
+                      eg. MAddF64r32(t1, t2, t3, t4)
+      */
+      struct {
+        IRQop* details;
+      } Qop;
+
+      /* A ternary operation.
+         ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>),
+                      eg. MulF64(1, 2.0, 3.0)
+      */
+      struct {
+        IRTriop* details;
+      } Triop;
+
+      /* A binary operation.
+         ppIRExpr output: <op>(<arg1>, <arg2>), eg. Add32(t1,t2)
+      */
+      struct {
+         IROp op;          /* op-code   */
+         IRExpr* arg1;     /* operand 1 */
+         IRExpr* arg2;     /* operand 2 */
+      } Binop;
+
+      /* A unary operation.
+         ppIRExpr output: <op>(<arg>), eg. Neg8(t1)
+      */
+      struct {
+         IROp    op;       /* op-code */
+         IRExpr* arg;      /* operand */
+      } Unop;
+
+      /* A load from memory -- a normal load, not a load-linked.
+         Load-Linkeds (and Store-Conditionals) are instead represented
+         by IRStmt.LLSC since Load-Linkeds have side effects and so
+         are not semantically valid IRExpr's.
+         ppIRExpr output: LD<end>:<ty>(<addr>), eg. LDle:I32(t1)
+      */
+      struct {
+         IREndness end;    /* Endian-ness of the load */
+         IRType    ty;     /* Type of the loaded value */
+         IRExpr*   addr;   /* Address being loaded from */
+      } Load;
+
+      /* A constant-valued expression.
+         ppIRExpr output: <con>, eg. 0x4:I32
+      */
+      struct {
+         IRConst* con;     /* The constant itself */
+      } Const;
+
+      /* A call to a pure (no side-effects) helper C function.
+
+         With the 'cee' field, 'name' is the function's name.  It is
+         only used for pretty-printing purposes.  The address to call
+         (host address, of course) is stored in the 'addr' field
+         inside 'cee'.
+
+         The 'args' field is a NULL-terminated array of arguments.
+         The stated return IRType, and the implied argument types,
+         must match that of the function being called well enough so
+         that the back end can actually generate correct code for the
+         call.
+
+         The called function **must** satisfy the following:
+
+         * no side effects -- must be a pure function, the result of
+           which depends only on the passed parameters.
+
+         * it may not look at, nor modify, any of the guest state
+           since that would hide guest state transitions from
+           instrumenters
+
+         * it may not access guest memory, since that would hide
+           guest memory transactions from the instrumenters
+
+         * it must not assume that arguments are being evaluated in a
+           particular order. The oder of evaluation is unspecified.
+
+         This is restrictive, but makes the semantics clean, and does
+         not interfere with IR optimisation.
+
+         If you want to call a helper which can mess with guest state
+         and/or memory, instead use Ist_Dirty.  This is a lot more
+         flexible, but you have to give a bunch of details about what
+         the helper does (and you better be telling the truth,
+         otherwise any derived instrumentation will be wrong).  Also
+         Ist_Dirty inhibits various IR optimisations and so can cause
+         quite poor code to be generated.  Try to avoid it.
+
+         In principle it would be allowable to have the arg vector
+         contain an IRExpr_VECRET(), although not IRExpr_BBPTR(). However,
+         at the moment there is no requirement for clean helper calls to
+         be able to return V128 or V256 values.  Hence this is not allowed.
+
+         ppIRExpr output: <cee>(<args>):<retty>
+                      eg. foo{0x80489304}(t1, t2):I32
+      */
+      struct {
+         IRCallee* cee;    /* Function to call. */
+         IRType    retty;  /* Type of return value. */
+         IRExpr**  args;   /* Vector of argument expressions. */
+      }  CCall;
+
+      /* A ternary if-then-else operator.  It returns iftrue if cond is
+         nonzero, iffalse otherwise.  Note that it is STRICT, ie. both
+         iftrue and iffalse are evaluated in all cases.
+
+         ppIRExpr output: ITE(<cond>,<iftrue>,<iffalse>),
+                         eg. ITE(t6,t7,t8)
+      */
+      struct {
+         IRExpr* cond;     /* Condition */
+         IRExpr* iftrue;   /* True expression */
+         IRExpr* iffalse;  /* False expression */
+      } ITE;
+   } Iex;
+};
+
+/* Expression auxiliaries: a ternary expression. */
+struct _IRTriop {
+   IROp op;          /* op-code   */
+   IRExpr* arg1;     /* operand 1 */
+   IRExpr* arg2;     /* operand 2 */
+   IRExpr* arg3;     /* operand 3 */
+};
+
+/* Expression auxiliaries: a quarternary expression. */
+struct _IRQop {
+   IROp op;          /* op-code   */
+   IRExpr* arg1;     /* operand 1 */
+   IRExpr* arg2;     /* operand 2 */
+   IRExpr* arg3;     /* operand 3 */
+   IRExpr* arg4;     /* operand 4 */
+};
+
+
+/* Two special kinds of IRExpr, which can ONLY be used in
+   argument lists for dirty helper calls (IRDirty.args) and in NO
+   OTHER PLACES.  And then only in very limited ways.  */
+
+/* Denotes an argument which (in the helper) takes a pointer to a
+   (naturally aligned) V128 or V256, into which the helper is expected
+   to write its result.  Use of IRExpr_VECRET() is strictly
+   controlled.  If the helper returns a V128 or V256 value then
+   IRExpr_VECRET() must appear exactly once in the arg list, although
+   it can appear anywhere, and the helper must have a C 'void' return
+   type.  If the helper returns any other type, IRExpr_VECRET() may
+   not appear in the argument list. */
+
+/* Denotes an void* argument which is passed to the helper, which at
+   run time will point to the thread's guest state area.  This can
+   only appear at most once in an argument list, and it may not appear
+   at all in argument lists for clean helper calls. */
+
+static inline Bool is_IRExpr_VECRET_or_BBPTR ( const IRExpr* e ) {
+   return e->tag == Iex_VECRET || e->tag == Iex_BBPTR;
+}
+
+
+/* Expression constructors. */
+extern IRExpr* IRExpr_Binder ( Int binder );
+extern IRExpr* IRExpr_Get    ( Int off, IRType ty );
+extern IRExpr* IRExpr_GetI   ( IRRegArray* descr, IRExpr* ix, Int bias );
+extern IRExpr* IRExpr_RdTmp  ( IRTemp tmp );
+extern IRExpr* IRExpr_Qop    ( IROp op, IRExpr* arg1, IRExpr* arg2, 
+                                        IRExpr* arg3, IRExpr* arg4 );
+extern IRExpr* IRExpr_Triop  ( IROp op, IRExpr* arg1, 
+                                        IRExpr* arg2, IRExpr* arg3 );
+extern IRExpr* IRExpr_Binop  ( IROp op, IRExpr* arg1, IRExpr* arg2 );
+extern IRExpr* IRExpr_Unop   ( IROp op, IRExpr* arg );
+extern IRExpr* IRExpr_Load   ( IREndness end, IRType ty, IRExpr* addr );
+extern IRExpr* IRExpr_Const  ( IRConst* con );
+extern IRExpr* IRExpr_CCall  ( IRCallee* cee, IRType retty, IRExpr** args );
+extern IRExpr* IRExpr_ITE    ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse );
+extern IRExpr* IRExpr_VECRET ( void );
+extern IRExpr* IRExpr_BBPTR  ( void );
+
+/* Deep-copy an IRExpr. */
+extern IRExpr* deepCopyIRExpr ( const IRExpr* );
+
+/* Pretty-print an IRExpr. */
+extern void ppIRExpr ( const IRExpr* );
+
+/* NULL-terminated IRExpr vector constructors, suitable for
+   use as arg lists in clean/dirty helper calls. */
+extern IRExpr** mkIRExprVec_0 ( void );
+extern IRExpr** mkIRExprVec_1 ( IRExpr* );
+extern IRExpr** mkIRExprVec_2 ( IRExpr*, IRExpr* );
+extern IRExpr** mkIRExprVec_3 ( IRExpr*, IRExpr*, IRExpr* );
+extern IRExpr** mkIRExprVec_4 ( IRExpr*, IRExpr*, IRExpr*, IRExpr* );
+extern IRExpr** mkIRExprVec_5 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
+                                IRExpr* );
+extern IRExpr** mkIRExprVec_6 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
+                                IRExpr*, IRExpr* );
+extern IRExpr** mkIRExprVec_7 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
+                                IRExpr*, IRExpr*, IRExpr* );
+extern IRExpr** mkIRExprVec_8 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
+                                IRExpr*, IRExpr*, IRExpr*, IRExpr*);
+
+/* IRExpr copiers:
+   - shallowCopy: shallow-copy (ie. create a new vector that shares the
+     elements with the original).
+   - deepCopy: deep-copy (ie. create a completely new vector). */
+extern IRExpr** shallowCopyIRExprVec ( IRExpr** );
+extern IRExpr** deepCopyIRExprVec ( IRExpr *const * );
+
+/* Make a constant expression from the given host word taking into
+   account (of course) the host word size. */
+extern IRExpr* mkIRExpr_HWord ( HWord );
+
+/* Convenience function for constructing clean helper calls. */
+extern 
+IRExpr* mkIRExprCCall ( IRType retty,
+                        Int regparms, const HChar* name, void* addr, 
+                        IRExpr** args );
+
+
+/* Convenience functions for atoms (IRExprs which are either Iex_Tmp or
+ * Iex_Const). */
+static inline Bool isIRAtom ( const IRExpr* e ) {
+   return toBool(e->tag == Iex_RdTmp || e->tag == Iex_Const);
+}
+
+/* Are these two IR atoms identical?  Causes an assertion
+   failure if they are passed non-atoms. */
+extern Bool eqIRAtom ( const IRExpr*, const IRExpr* );
+
+
+/* ------------------ Jump kinds ------------------ */
+
+/* This describes hints which can be passed to the dispatcher at guest
+   control-flow transfer points.
+
+   Re Ijk_InvalICache and Ijk_FlushDCache: the guest state _must_ have
+   two pseudo-registers, guest_CMSTART and guest_CMLEN, which specify
+   the start and length of the region to be invalidated.  CM stands
+   for "Cache Management".  These are both the size of a guest word.
+   It is the responsibility of the relevant toIR.c to ensure that
+   these are filled in with suitable values before issuing a jump of
+   kind Ijk_InvalICache or Ijk_FlushDCache.
+
+   Ijk_InvalICache requests invalidation of translations taken from
+   the requested range.  Ijk_FlushDCache requests flushing of the D
+   cache for the specified range.
+
+   Re Ijk_EmWarn and Ijk_EmFail: the guest state must have a
+   pseudo-register guest_EMNOTE, which is 32-bits regardless of the
+   host or guest word size.  That register should be made to hold a
+   VexEmNote value to indicate the reason for the exit.
+
+   In the case of Ijk_EmFail, the exit is fatal (Vex-generated code
+   cannot continue) and so the jump destination can be anything.
+
+   Re Ijk_Sys_ (syscall jumps): the guest state must have a
+   pseudo-register guest_IP_AT_SYSCALL, which is the size of a guest
+   word.  Front ends should set this to be the IP at the most recently
+   executed kernel-entering (system call) instruction.  This makes it
+   very much easier (viz, actually possible at all) to back up the
+   guest to restart a syscall that has been interrupted by a signal.
+*/
+typedef
+   enum {
+      Ijk_INVALID=0x1A00, 
+      Ijk_Boring,         /* not interesting; just goto next */
+      Ijk_Call,           /* guest is doing a call */
+      Ijk_Ret,            /* guest is doing a return */
+      Ijk_ClientReq,      /* do guest client req before continuing */
+      Ijk_Yield,          /* client is yielding to thread scheduler */
+      Ijk_EmWarn,         /* report emulation warning before continuing */
+      Ijk_EmFail,         /* emulation critical (FATAL) error; give up */
+      Ijk_NoDecode,       /* current instruction cannot be decoded */
+      Ijk_MapFail,        /* Vex-provided address translation failed */
+      Ijk_InvalICache,    /* Inval icache for range [CMSTART, +CMLEN) */
+      Ijk_FlushDCache,    /* Flush dcache for range [CMSTART, +CMLEN) */
+      Ijk_NoRedir,        /* Jump to un-redirected guest addr */
+      Ijk_SigILL,         /* current instruction synths SIGILL */
+      Ijk_SigTRAP,        /* current instruction synths SIGTRAP */
+      Ijk_SigSEGV,        /* current instruction synths SIGSEGV */
+      Ijk_SigBUS,         /* current instruction synths SIGBUS */
+      Ijk_SigFPE_IntDiv,  /* current instruction synths SIGFPE - IntDiv */
+      Ijk_SigFPE_IntOvf,  /* current instruction synths SIGFPE - IntOvf */
+      /* Unfortunately, various guest-dependent syscall kinds.  They
+	 all mean: do a syscall before continuing. */
+      Ijk_Sys_syscall,    /* amd64/x86 'syscall', ppc 'sc', arm 'svc #0' */
+      Ijk_Sys_int32,      /* amd64/x86 'int $0x20' */
+      Ijk_Sys_int128,     /* amd64/x86 'int $0x80' */
+      Ijk_Sys_int129,     /* amd64/x86 'int $0x81' */
+      Ijk_Sys_int130,     /* amd64/x86 'int $0x82' */
+      Ijk_Sys_sysenter    /* x86 'sysenter'.  guest_EIP becomes 
+                             invalid at the point this happens. */
+   }
+   IRJumpKind;
+
+extern void ppIRJumpKind ( IRJumpKind );
+
+
+/* ------------------ Dirty helper calls ------------------ */
+
+/* A dirty call is a flexible mechanism for calling (possibly
+   conditionally) a helper function or procedure.  The helper function
+   may read, write or modify client memory, and may read, write or
+   modify client state.  It can take arguments and optionally return a
+   value.  It may return different results and/or do different things
+   when called repeatedly with the same arguments, by means of storing
+   private state.
+
+   If a value is returned, it is assigned to the nominated return
+   temporary.
+
+   Dirty calls are statements rather than expressions for obvious
+   reasons.  If a dirty call is marked as writing guest state, any
+   pre-existing values derived from the written parts of the guest
+   state are invalid.  Similarly, if the dirty call is stated as
+   writing memory, any pre-existing loaded values are invalidated by
+   it.
+
+   In order that instrumentation is possible, the call must state, and
+   state correctly:
+
+   * Whether it reads, writes or modifies memory, and if so where.
+
+   * Whether it reads, writes or modifies guest state, and if so which
+     pieces.  Several pieces may be stated, and their extents must be
+     known at translation-time.  Each piece is allowed to repeat some
+     number of times at a fixed interval, if required.
+
+   Normally, code is generated to pass just the args to the helper.
+   However, if IRExpr_BBPTR() is present in the argument list (at most
+   one instance is allowed), then the baseblock pointer is passed for
+   that arg, so that the callee can access the guest state.  It is
+   invalid for .nFxState to be zero but IRExpr_BBPTR() to be present,
+   since .nFxState==0 is a claim that the call does not access guest
+   state.
+
+   IMPORTANT NOTE re GUARDS: Dirty calls are strict, very strict.  The
+   arguments and 'mFx' are evaluated REGARDLESS of the guard value.
+   The order of argument evaluation is unspecified.  The guard
+   expression is evaluated AFTER the arguments and 'mFx' have been
+   evaluated.  'mFx' is expected (by Memcheck) to be a defined value
+   even if the guard evaluates to false.
+*/
+
+#define VEX_N_FXSTATE  7   /* enough for FXSAVE/FXRSTOR on x86 */
+
+/* Effects on resources (eg. registers, memory locations) */
+typedef
+   enum {
+      Ifx_None=0x1B00,      /* no effect */
+      Ifx_Read,             /* reads the resource */
+      Ifx_Write,            /* writes the resource */
+      Ifx_Modify,           /* modifies the resource */
+   }
+   IREffect;
+
+/* Pretty-print an IREffect */
+extern void ppIREffect ( IREffect );
+
+typedef
+   struct _IRDirty {
+      /* What to call, and details of args/results.  .guard must be
+         non-NULL.  If .tmp is not IRTemp_INVALID, then the call
+         returns a result which is placed in .tmp.  If at runtime the
+         guard evaluates to false, .tmp has an 0x555..555 bit pattern
+         written to it.  Hence conditional calls that assign .tmp are
+         allowed. */
+      IRCallee* cee;    /* where to call */
+      IRExpr*   guard;  /* :: Ity_Bit.  Controls whether call happens */
+      /* The args vector may contain IRExpr_BBPTR() and/or
+         IRExpr_VECRET(), in both cases, at most once. */
+      IRExpr**  args;   /* arg vector, ends in NULL. */
+      IRTemp    tmp;    /* to assign result to, or IRTemp_INVALID if none */
+
+      /* Mem effects; we allow only one R/W/M region to be stated */
+      IREffect  mFx;    /* indicates memory effects, if any */
+      IRExpr*   mAddr;  /* of access, or NULL if mFx==Ifx_None */
+      Int       mSize;  /* of access, or zero if mFx==Ifx_None */
+
+      /* Guest state effects; up to N allowed */
+      Int  nFxState; /* must be 0 .. VEX_N_FXSTATE */
+      struct {
+         IREffect fx:16;   /* read, write or modify?  Ifx_None is invalid. */
+         UShort   offset;
+         UShort   size;
+         UChar    nRepeats;
+         UChar    repeatLen;
+      } fxState[VEX_N_FXSTATE];
+      /* The access can be repeated, as specified by nRepeats and
+         repeatLen.  To describe only a single access, nRepeats and
+         repeatLen should be zero.  Otherwise, repeatLen must be a
+         multiple of size and greater than size. */
+      /* Overall, the parts of the guest state denoted by (offset,
+         size, nRepeats, repeatLen) is
+               [offset, +size)
+            and, if nRepeats > 0,
+               for (i = 1; i <= nRepeats; i++)
+                  [offset + i * repeatLen, +size)
+         A convenient way to enumerate all segments is therefore
+            for (i = 0; i < 1 + nRepeats; i++)
+               [offset + i * repeatLen, +size)
+      */
+   }
+   IRDirty;
+
+/* Pretty-print a dirty call */
+extern void     ppIRDirty ( const IRDirty* );
+
+/* Allocate an uninitialised dirty call */
+extern IRDirty* emptyIRDirty ( void );
+
+/* Deep-copy a dirty call */
+extern IRDirty* deepCopyIRDirty ( const IRDirty* );
+
+/* A handy function which takes some of the tedium out of constructing
+   dirty helper calls.  The called function impliedly does not return
+   any value and has a constant-True guard.  The call is marked as
+   accessing neither guest state nor memory (hence the "unsafe"
+   designation) -- you can change this marking later if need be.  A
+   suitable IRCallee is constructed from the supplied bits. */
+extern 
+IRDirty* unsafeIRDirty_0_N ( Int regparms, const HChar* name, void* addr, 
+                             IRExpr** args );
+
+/* Similarly, make a zero-annotation dirty call which returns a value,
+   and assign that to the given temp. */
+extern 
+IRDirty* unsafeIRDirty_1_N ( IRTemp dst, 
+                             Int regparms, const HChar* name, void* addr, 
+                             IRExpr** args );
+
+
+/* --------------- Memory Bus Events --------------- */
+
+typedef
+   enum { 
+      Imbe_Fence=0x1C00, 
+      /* Needed only on ARM.  It cancels a reservation made by a
+         preceding Linked-Load, and needs to be handed through to the
+         back end, just as LL and SC themselves are. */
+      Imbe_CancelReservation
+   }
+   IRMBusEvent;
+
+extern void ppIRMBusEvent ( IRMBusEvent );
+
+
+/* --------------- Compare and Swap --------------- */
+
+/* This denotes an atomic compare and swap operation, either
+   a single-element one or a double-element one.
+
+   In the single-element case:
+
+     .addr is the memory address.
+     .end  is the endianness with which memory is accessed
+
+     If .addr contains the same value as .expdLo, then .dataLo is
+     written there, else there is no write.  In both cases, the
+     original value at .addr is copied into .oldLo.
+
+     Types: .expdLo, .dataLo and .oldLo must all have the same type.
+     It may be any integral type, viz: I8, I16, I32 or, for 64-bit
+     guests, I64.
+
+     .oldHi must be IRTemp_INVALID, and .expdHi and .dataHi must
+     be NULL.
+
+   In the double-element case:
+
+     .addr is the memory address.
+     .end  is the endianness with which memory is accessed
+
+     The operation is the same:
+
+     If .addr contains the same value as .expdHi:.expdLo, then
+     .dataHi:.dataLo is written there, else there is no write.  In
+     both cases the original value at .addr is copied into
+     .oldHi:.oldLo.
+
+     Types: .expdHi, .expdLo, .dataHi, .dataLo, .oldHi, .oldLo must
+     all have the same type, which may be any integral type, viz: I8,
+     I16, I32 or, for 64-bit guests, I64.
+
+     The double-element case is complicated by the issue of
+     endianness.  In all cases, the two elements are understood to be
+     located adjacently in memory, starting at the address .addr.
+
+       If .end is Iend_LE, then the .xxxLo component is at the lower
+       address and the .xxxHi component is at the higher address, and
+       each component is itself stored little-endianly.
+
+       If .end is Iend_BE, then the .xxxHi component is at the lower
+       address and the .xxxLo component is at the higher address, and
+       each component is itself stored big-endianly.
+
+   This allows representing more cases than most architectures can
+   handle.  For example, x86 cannot do DCAS on 8- or 16-bit elements.
+
+   How to know if the CAS succeeded?
+
+   * if .oldLo == .expdLo (resp. .oldHi:.oldLo == .expdHi:.expdLo),
+     then the CAS succeeded, .dataLo (resp. .dataHi:.dataLo) is now
+     stored at .addr, and the original value there was .oldLo (resp
+     .oldHi:.oldLo).
+
+   * if .oldLo != .expdLo (resp. .oldHi:.oldLo != .expdHi:.expdLo),
+     then the CAS failed, and the original value at .addr was .oldLo
+     (resp. .oldHi:.oldLo).
+
+   Hence it is easy to know whether or not the CAS succeeded.
+*/
+typedef
+   struct {
+      IRTemp    oldHi;  /* old value of *addr is written here */
+      IRTemp    oldLo;
+      IREndness end;    /* endianness of the data in memory */
+      IRExpr*   addr;   /* store address */
+      IRExpr*   expdHi; /* expected old value at *addr */
+      IRExpr*   expdLo;
+      IRExpr*   dataHi; /* new value for *addr */
+      IRExpr*   dataLo;
+   }
+   IRCAS;
+
+extern void ppIRCAS ( const IRCAS* cas );
+
+extern IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo,
+                        IREndness end, IRExpr* addr, 
+                        IRExpr* expdHi, IRExpr* expdLo,
+                        IRExpr* dataHi, IRExpr* dataLo );
+
+extern IRCAS* deepCopyIRCAS ( const IRCAS* );
+
+
+/* ------------------ Circular Array Put ------------------ */
+
+typedef
+   struct {
+      IRRegArray* descr; /* Part of guest state treated as circular */
+      IRExpr*     ix;    /* Variable part of index into array */
+      Int         bias;  /* Constant offset part of index into array */
+      IRExpr*     data;  /* The value to write */
+   } IRPutI;
+
+extern void ppIRPutI ( const IRPutI* puti );
+
+extern IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix,
+                          Int bias, IRExpr* data );
+
+extern IRPutI* deepCopyIRPutI ( const IRPutI* );
+
+
+/* --------------- Guarded loads and stores --------------- */
+
+/* Conditional stores are straightforward.  They are the same as
+   normal stores, with an extra 'guard' field :: Ity_I1 that
+   determines whether or not the store actually happens.  If not,
+   memory is unmodified.
+
+   The semantics of this is that 'addr' and 'data' are fully evaluated
+   even in the case where 'guard' evaluates to zero (false).
+*/
+typedef
+   struct {
+      IREndness end;    /* Endianness of the store */
+      IRExpr*   addr;   /* store address */
+      IRExpr*   data;   /* value to write */
+      IRExpr*   guard;  /* Guarding value */
+   }
+   IRStoreG;
+
+/* Conditional loads are a little more complex.  'addr' is the
+   address, 'guard' is the guarding condition.  If the load takes
+   place, the loaded value is placed in 'dst'.  If it does not take
+   place, 'alt' is copied to 'dst'.  However, the loaded value is not
+   placed directly in 'dst' -- it is first subjected to the conversion
+   specified by 'cvt'.
+
+   For example, imagine doing a conditional 8-bit load, in which the
+   loaded value is zero extended to 32 bits.  Hence:
+   * 'dst' and 'alt' must have type I32
+   * 'cvt' must be a unary op which converts I8 to I32.  In this 
+     example, it would be ILGop_8Uto32.
+
+   There is no explicit indication of the type at which the load is
+   done, since that is inferrable from the arg type of 'cvt'.  Note
+   that the types of 'alt' and 'dst' and the result type of 'cvt' must
+   all be the same.
+
+   Semantically, 'addr' is evaluated even in the case where 'guard'
+   evaluates to zero (false), and 'alt' is evaluated even when 'guard'
+   evaluates to one (true).  That is, 'addr' and 'alt' are always
+   evaluated.
+*/
+typedef
+   enum {
+      ILGop_INVALID=0x1D00,
+      ILGop_Ident64,   /* 64 bit, no conversion */
+      ILGop_Ident32,   /* 32 bit, no conversion */
+      ILGop_16Uto32,   /* 16 bit load, Z-widen to 32 */
+      ILGop_16Sto32,   /* 16 bit load, S-widen to 32 */
+      ILGop_8Uto32,    /* 8 bit load, Z-widen to 32 */
+      ILGop_8Sto32     /* 8 bit load, S-widen to 32 */
+   }
+   IRLoadGOp;
+
+typedef
+   struct {
+      IREndness end;    /* Endianness of the load */
+      IRLoadGOp cvt;    /* Conversion to apply to the loaded value */
+      IRTemp    dst;    /* Destination (LHS) of assignment */
+      IRExpr*   addr;   /* Address being loaded from */
+      IRExpr*   alt;    /* Value if load is not done. */
+      IRExpr*   guard;  /* Guarding value */
+   }
+   IRLoadG;
+
+extern void ppIRStoreG ( const IRStoreG* sg );
+
+extern void ppIRLoadGOp ( IRLoadGOp cvt );
+
+extern void ppIRLoadG ( const IRLoadG* lg );
+
+extern IRStoreG* mkIRStoreG ( IREndness end,
+                              IRExpr* addr, IRExpr* data,
+                              IRExpr* guard );
+
+extern IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt,
+                            IRTemp dst, IRExpr* addr, IRExpr* alt, 
+                            IRExpr* guard );
+
+
+/* ------------------ Statements ------------------ */
+
+/* The different kinds of statements.  Their meaning is explained
+   below in the comments for IRStmt.
+
+   Those marked META do not represent code, but rather extra
+   information about the code.  These statements can be removed
+   without affecting the functional behaviour of the code, however
+   they are required by some IR consumers such as tools that
+   instrument the code.
+*/
+
+typedef 
+   enum {
+      Ist_NoOp=0x1E00,
+      Ist_IMark,     /* META */
+      Ist_AbiHint,   /* META */
+      Ist_Put,
+      Ist_PutI,
+      Ist_WrTmp,
+      Ist_Store,
+      Ist_LoadG,
+      Ist_StoreG,
+      Ist_CAS,
+      Ist_LLSC,
+      Ist_Dirty,
+      Ist_MBE,
+      Ist_Exit
+   } 
+   IRStmtTag;
+
+/* A statement.  Stored as a tagged union.  'tag' indicates what kind
+   of expression this is.  'Ist' is the union that holds the fields.
+   If an IRStmt 'st' has st.tag equal to Iex_Store, then it's a store
+   statement, and the fields can be accessed with
+   'st.Ist.Store.<fieldname>'.
+
+   For each kind of statement, we show what it looks like when
+   pretty-printed with ppIRStmt().
+*/
+typedef
+   struct _IRStmt {
+      IRStmtTag tag;
+      union {
+         /* A no-op (usually resulting from IR optimisation).  Can be
+            omitted without any effect.
+
+            ppIRStmt output: IR-NoOp
+         */
+         struct {
+	 } NoOp;
+
+         /* META: instruction mark.  Marks the start of the statements
+            that represent a single machine instruction (the end of
+            those statements is marked by the next IMark or the end of
+            the IRSB).  Contains the address and length of the
+            instruction.
+
+            It also contains a delta value.  The delta must be
+            subtracted from a guest program counter value before
+            attempting to establish, by comparison with the address
+            and length values, whether or not that program counter
+            value refers to this instruction.  For x86, amd64, ppc32,
+            ppc64 and arm, the delta value is zero.  For Thumb
+            instructions, the delta value is one.  This is because, on
+            Thumb, guest PC values (guest_R15T) are encoded using the
+            top 31 bits of the instruction address and a 1 in the lsb;
+            hence they appear to be (numerically) 1 past the start of
+            the instruction they refer to.  IOW, guest_R15T on ARM
+            holds a standard ARM interworking address.
+
+            ppIRStmt output: ------ IMark(<addr>, <len>, <delta>) ------,
+                         eg. ------ IMark(0x4000792, 5, 0) ------,
+         */
+         struct {
+            Addr   addr;   /* instruction address */
+            UInt   len;    /* instruction length */
+            UChar  delta;  /* addr = program counter as encoded in guest state
+                                     - delta */
+         } IMark;
+
+         /* META: An ABI hint, which says something about this
+            platform's ABI.
+
+            At the moment, the only AbiHint is one which indicates
+            that a given chunk of address space, [base .. base+len-1],
+            has become undefined.  This is used on amd64-linux and
+            some ppc variants to pass stack-redzoning hints to whoever
+            wants to see them.  It also indicates the address of the
+            next (dynamic) instruction that will be executed.  This is
+            to help Memcheck to origin tracking.
+
+            ppIRStmt output: ====== AbiHint(<base>, <len>, <nia>) ======
+                         eg. ====== AbiHint(t1, 16, t2) ======
+         */
+         struct {
+            IRExpr* base;     /* Start  of undefined chunk */
+            Int     len;      /* Length of undefined chunk */
+            IRExpr* nia;      /* Address of next (guest) insn */
+         } AbiHint;
+
+         /* Write a guest register, at a fixed offset in the guest state.
+            ppIRStmt output: PUT(<offset>) = <data>, eg. PUT(60) = t1
+         */
+         struct {
+            Int     offset;   /* Offset into the guest state */
+            IRExpr* data;     /* The value to write */
+         } Put;
+
+         /* Write a guest register, at a non-fixed offset in the guest
+            state.  See the comment for GetI expressions for more
+            information.
+
+            ppIRStmt output: PUTI<descr>[<ix>,<bias>] = <data>,
+                         eg. PUTI(64:8xF64)[t5,0] = t1
+         */
+         struct {
+            IRPutI* details;
+         } PutI;
+
+         /* Assign a value to a temporary.  Note that SSA rules require
+            each tmp is only assigned to once.  IR sanity checking will
+            reject any block containing a temporary which is not assigned
+            to exactly once.
+
+            ppIRStmt output: t<tmp> = <data>, eg. t1 = 3
+         */
+         struct {
+            IRTemp  tmp;   /* Temporary  (LHS of assignment) */
+            IRExpr* data;  /* Expression (RHS of assignment) */
+         } WrTmp;
+
+         /* Write a value to memory.  This is a normal store, not a
+            Store-Conditional.  To represent a Store-Conditional,
+            instead use IRStmt.LLSC.
+            ppIRStmt output: ST<end>(<addr>) = <data>, eg. STle(t1) = t2
+         */
+         struct {
+            IREndness end;    /* Endianness of the store */
+            IRExpr*   addr;   /* store address */
+            IRExpr*   data;   /* value to write */
+         } Store;
+
+         /* Guarded store.  Note that this is defined to evaluate all
+            expression fields (addr, data) even if the guard evaluates
+            to false.
+            ppIRStmt output:
+              if (<guard>) ST<end>(<addr>) = <data> */
+         struct {
+            IRStoreG* details;
+         } StoreG;
+
+         /* Guarded load.  Note that this is defined to evaluate all
+            expression fields (addr, alt) even if the guard evaluates
+            to false.
+            ppIRStmt output:
+              t<tmp> = if (<guard>) <cvt>(LD<end>(<addr>)) else <alt> */
+         struct {
+            IRLoadG* details;
+         } LoadG;
+
+         /* Do an atomic compare-and-swap operation.  Semantics are
+            described above on a comment at the definition of IRCAS.
+
+            ppIRStmt output:
+               t<tmp> = CAS<end>(<addr> :: <expected> -> <new>)
+            eg
+               t1 = CASle(t2 :: t3->Add32(t3,1))
+               which denotes a 32-bit atomic increment 
+               of a value at address t2
+
+            A double-element CAS may also be denoted, in which case <tmp>,
+            <expected> and <new> are all pairs of items, separated by
+            commas.
+         */
+         struct {
+            IRCAS* details;
+         } CAS;
+
+         /* Either Load-Linked or Store-Conditional, depending on
+            STOREDATA.
+
+            If STOREDATA is NULL then this is a Load-Linked, meaning
+            that data is loaded from memory as normal, but a
+            'reservation' for the address is also lodged in the
+            hardware.
+
+               result = Load-Linked(addr, end)
+
+            The data transfer type is the type of RESULT (I32, I64,
+            etc).  ppIRStmt output:
+
+               result = LD<end>-Linked(<addr>), eg. LDbe-Linked(t1)
+
+            If STOREDATA is not NULL then this is a Store-Conditional,
+            hence:
+
+               result = Store-Conditional(addr, storedata, end)
+
+            The data transfer type is the type of STOREDATA and RESULT
+            has type Ity_I1. The store may fail or succeed depending
+            on the state of a previously lodged reservation on this
+            address.  RESULT is written 1 if the store succeeds and 0
+            if it fails.  eg ppIRStmt output:
+
+               result = ( ST<end>-Cond(<addr>) = <storedata> )
+               eg t3 = ( STbe-Cond(t1, t2) )
+
+            In all cases, the address must be naturally aligned for
+            the transfer type -- any misaligned addresses should be
+            caught by a dominating IR check and side exit.  This
+            alignment restriction exists because on at least some
+            LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
+            misaligned addresses, and we have to actually generate
+            stwcx. on the host, and we don't want it trapping on the
+            host.
+
+            Summary of rules for transfer type:
+              STOREDATA == NULL (LL):
+                transfer type = type of RESULT
+              STOREDATA != NULL (SC):
+                transfer type = type of STOREDATA, and RESULT :: Ity_I1
+         */
+         struct {
+            IREndness end;
+            IRTemp    result;
+            IRExpr*   addr;
+            IRExpr*   storedata; /* NULL => LL, non-NULL => SC */
+         } LLSC;
+
+         /* Call (possibly conditionally) a C function that has side
+            effects (ie. is "dirty").  See the comments above the
+            IRDirty type declaration for more information.
+
+            ppIRStmt output:
+               t<tmp> = DIRTY <guard> <effects> 
+                  ::: <callee>(<args>)
+            eg.
+               t1 = DIRTY t27 RdFX-gst(16,4) RdFX-gst(60,4)
+                     ::: foo{0x380035f4}(t2)
+         */       
+         struct {
+            IRDirty* details;
+         } Dirty;
+
+         /* A memory bus event - a fence, or acquisition/release of the
+            hardware bus lock.  IR optimisation treats all these as fences
+            across which no memory references may be moved.
+            ppIRStmt output: MBusEvent-Fence,
+                             MBusEvent-BusLock, MBusEvent-BusUnlock.
+         */
+         struct {
+            IRMBusEvent event;
+         } MBE;
+
+         /* Conditional exit from the middle of an IRSB.
+            ppIRStmt output: if (<guard>) goto {<jk>} <dst>
+                         eg. if (t69) goto {Boring} 0x4000AAA:I32
+            If <guard> is true, the guest state is also updated by
+            PUT-ing <dst> at <offsIP>.  This is done because a
+            taken exit must update the guest program counter.
+         */
+         struct {
+            IRExpr*    guard;    /* Conditional expression */
+            IRConst*   dst;      /* Jump target (constant only) */
+            IRJumpKind jk;       /* Jump kind */
+            Int        offsIP;   /* Guest state offset for IP */
+         } Exit;
+      } Ist;
+   }
+   IRStmt;
+
+/* Statement constructors. */
+extern IRStmt* IRStmt_NoOp    ( void );
+extern IRStmt* IRStmt_IMark   ( Addr addr, UInt len, UChar delta );
+extern IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia );
+extern IRStmt* IRStmt_Put     ( Int off, IRExpr* data );
+extern IRStmt* IRStmt_PutI    ( IRPutI* details );
+extern IRStmt* IRStmt_WrTmp   ( IRTemp tmp, IRExpr* data );
+extern IRStmt* IRStmt_Store   ( IREndness end, IRExpr* addr, IRExpr* data );
+extern IRStmt* IRStmt_StoreG  ( IREndness end, IRExpr* addr, IRExpr* data,
+                                IRExpr* guard );
+extern IRStmt* IRStmt_LoadG   ( IREndness end, IRLoadGOp cvt, IRTemp dst,
+                                IRExpr* addr, IRExpr* alt, IRExpr* guard );
+extern IRStmt* IRStmt_CAS     ( IRCAS* details );
+extern IRStmt* IRStmt_LLSC    ( IREndness end, IRTemp result,
+                                IRExpr* addr, IRExpr* storedata );
+extern IRStmt* IRStmt_Dirty   ( IRDirty* details );
+extern IRStmt* IRStmt_MBE     ( IRMBusEvent event );
+extern IRStmt* IRStmt_Exit    ( IRExpr* guard, IRJumpKind jk, IRConst* dst,
+                                Int offsIP );
+
+/* Deep-copy an IRStmt. */
+extern IRStmt* deepCopyIRStmt ( const IRStmt* );
+
+/* Pretty-print an IRStmt. */
+extern void ppIRStmt ( const IRStmt* );
+
+
+/* ------------------ Basic Blocks ------------------ */
+
+/* Type environments: a bunch of statements, expressions, etc, are
+   incomplete without an environment indicating the type of each
+   IRTemp.  So this provides one.  IR temporaries are really just
+   unsigned ints and so this provides an array, 0 .. n_types_used-1 of
+   them.
+*/
+typedef
+   struct {
+      IRType* types;
+      Int     types_size;
+      Int     types_used;
+   }
+   IRTypeEnv;
+
+/* Obtain a new IRTemp */
+extern IRTemp newIRTemp ( IRTypeEnv*, IRType );
+
+/* Deep-copy a type environment */
+extern IRTypeEnv* deepCopyIRTypeEnv ( const IRTypeEnv* );
+
+/* Pretty-print a type environment */
+extern void ppIRTypeEnv ( const IRTypeEnv* );
+
+
+/* Code blocks, which in proper compiler terminology are superblocks
+   (single entry, multiple exit code sequences) contain:
+
+   - A table giving a type for each temp (the "type environment")
+   - An expandable array of statements
+   - An expression of type 32 or 64 bits, depending on the
+     guest's word size, indicating the next destination if the block 
+     executes all the way to the end, without a side exit
+   - An indication of any special actions (JumpKind) needed
+     for this final jump.
+   - Offset of the IP field in the guest state.  This will be
+     updated before the final jump is done.
+   
+   "IRSB" stands for "IR Super Block".
+*/
+typedef
+   struct {
+      IRTypeEnv* tyenv;
+      IRStmt**   stmts;
+      Int        stmts_size;
+      Int        stmts_used;
+      IRExpr*    next;
+      IRJumpKind jumpkind;
+      Int        offsIP;
+   }
+   IRSB;
+
+/* Allocate a new, uninitialised IRSB */
+extern IRSB* emptyIRSB ( void );
+
+/* Deep-copy an IRSB */
+extern IRSB* deepCopyIRSB ( const IRSB* );
+
+/* Deep-copy an IRSB, except for the statements list, which set to be
+   a new, empty, list of statements. */
+extern IRSB* deepCopyIRSBExceptStmts ( const IRSB* );
+
+/* Pretty-print an IRSB */
+extern void ppIRSB ( const IRSB* );
+
+/* Append an IRStmt to an IRSB */
+extern void addStmtToIRSB ( IRSB*, IRStmt* );
+
+
+/*---------------------------------------------------------------*/
+/*--- Helper functions for the IR                             ---*/
+/*---------------------------------------------------------------*/
+
+/* For messing with IR type environments */
+extern IRTypeEnv* emptyIRTypeEnv  ( void );
+
+/* What is the type of this expression? */
+extern IRType typeOfIRConst ( const IRConst* );
+extern IRType typeOfIRTemp  ( const IRTypeEnv*, IRTemp );
+extern IRType typeOfIRExpr  ( const IRTypeEnv*, const IRExpr* );
+
+/* What are the arg and result type for this IRLoadGOp? */
+extern void typeOfIRLoadGOp ( IRLoadGOp cvt,
+                              /*OUT*/IRType* t_res,
+                              /*OUT*/IRType* t_arg );
+
+/* Sanity check a BB of IR */
+extern void sanityCheckIRSB ( const  IRSB*  bb, 
+                              const  HChar* caller,
+                              Bool   require_flatness, 
+                              IRType guest_word_size );
+extern Bool isFlatIRStmt ( const IRStmt* );
+
+/* Is this any value actually in the enumeration 'IRType' ? */
+extern Bool isPlausibleIRType ( IRType ty );
+
+
+/*---------------------------------------------------------------*/
+/*--- IR injection                                            ---*/
+/*---------------------------------------------------------------*/
+
+void vex_inject_ir(IRSB *, IREndness);
+
+
+#endif /* ndef __LIBVEX_IR_H */
+
+/*---------------------------------------------------------------*/
+/*---                                             libvex_ir.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_s390x_common.h b/VEX/pub/libvex_s390x_common.h
new file mode 100644
index 0000000..a8c8c53
--- /dev/null
+++ b/VEX/pub/libvex_s390x_common.h
@@ -0,0 +1,120 @@
+/* -*- mode: C; c-basic-offset: 3; -*- */
+
+/*--------------------------------------------------------------------*/
+/*--- Common defs for s390x                  libvex_s390x_common.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright IBM Corp. 2010-2013
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __LIBVEX_PUB_S390X_H
+#define __LIBVEX_PUB_S390X_H
+
+/* This file includes definitions for s390.
+
+   It must be suitable for inclusion in assembler source files. */
+
+
+/*--------------------------------------------------------------*/
+/*--- Dedicated registers                                    ---*/
+/*--------------------------------------------------------------*/
+
+#define S390_REGNO_RETURN_VALUE         2
+#define S390_REGNO_TCHAIN_SCRATCH      12
+#define S390_REGNO_GUEST_STATE_POINTER 13
+#define S390_REGNO_LINK_REGISTER       14
+#define S390_REGNO_STACK_POINTER       15
+
+
+/*--------------------------------------------------------------*/
+/*--- Offsets in the stack frame allocated by the dispatcher ---*/
+/*--------------------------------------------------------------*/
+
+/* Dispatcher will save 8 FPRs at offsets 160 + 0 ... 160 + 56 */
+
+/* Where the dispatcher saves the r2 contents. */
+#define S390_OFFSET_SAVED_R2 160+80
+
+/* Where client's FPC register is saved. */
+#define S390_OFFSET_SAVED_FPC_C 160+72
+
+/* Where valgrind's FPC register is saved. */
+#define S390_OFFSET_SAVED_FPC_V 160+64
+
+/* Size of frame allocated by VG_(disp_run_translations)
+   Need size for
+       8 FPRs
+     + 1 GPR  (SAVED_R2)
+     + 2 FPCs (SAVED_FPC_C and SAVED_FPC_V).
+
+   Additionally, we need a standard frame for helper functions being called
+   from client code. (See figure 1-16 in zSeries ABI) */
+#define S390_INNERLOOP_FRAME_SIZE ((8+1+2)*8 + 160)
+
+
+/*--------------------------------------------------------------*/
+/*--- Facility bits                                          ---*/
+/*--------------------------------------------------------------*/
+
+/* The value of the macro is the number of the facility bit as per POP. */
+#define S390_FAC_MSA     17  // message-security-assist
+#define S390_FAC_LDISP   18  // long displacement
+#define S390_FAC_HFPMAS  20  // HFP multiply-and-add-subtract
+#define S390_FAC_EIMM    21  // extended immediate
+#define S390_FAC_HFPUNX  23  // HFP unnormalized extension
+#define S390_FAC_ETF2    24  // ETF2-enhancement
+#define S390_FAC_STCKF   25  // store clock fast insn
+#define S390_FAC_PENH    26  // parsing-enhancement
+#define S390_FAC_ETF3    30  // ETF3-enhancement
+#define S390_FAC_XCPUT   31  // extract-CPU-time
+#define S390_FAC_GIE     34  // general insn extension
+#define S390_FAC_EXEXT   35  // execute extension
+#define S390_FAC_FPEXT   37  // floating-point extension
+#define S390_FAC_FPSE    41  // floating-point support enhancement
+#define S390_FAC_DFP     42  // decimal floating point
+#define S390_FAC_PFPO    44  // perform floating point operation insn
+#define S390_FAC_HIGHW   45  // high-word extension
+#define S390_FAC_LSC     45  // load/store on condition
+#define S390_FAC_DFPZC   48  // DFP zoned-conversion
+#define S390_FAC_MISC    49  // miscellaneous insn
+#define S390_FAC_CTREXE  50  // constrained transactional execution
+#define S390_FAC_TREXE   73  // transactional execution
+#define S390_FAC_MSA4    77  // message-security-assist 4
+
+
+/*--------------------------------------------------------------*/
+/*--- Miscellaneous                                          ---*/
+/*--------------------------------------------------------------*/
+
+/* Number of arguments that can be passed in registers */
+#define S390_NUM_GPRPARMS 5
+
+/* Number of double words needed to store all facility bits. */
+#define S390_NUM_FACILITY_DW 2
+
+#endif /* __LIBVEX_PUB_S390X_H */
+
+/*--------------------------------------------------------------------*/
+/*--- end                                    libvex_s390x_common.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/pub/libvex_trc_values.h b/VEX/pub/libvex_trc_values.h
new file mode 100644
index 0000000..8209042
--- /dev/null
+++ b/VEX/pub/libvex_trc_values.h
@@ -0,0 +1,96 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                               libvex_trc_values.h ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#ifndef __LIBVEX_TRC_VALUES_H
+#define __LIBVEX_TRC_VALUES_H
+
+
+/* Magic values that the guest state pointer might be set to when
+   returning to the dispatcher.  The only other legitimate value is to
+   point to the start of the thread's VEX guest state.
+
+   This file may get included in assembly code, so do not put
+   C-specific constructs in it.
+
+   These values should be 61 or above so as not to conflict
+   with Valgrind's VG_TRC_ values, which are 60 or below.
+*/
+
+#define VEX_TRC_JMP_INVALICACHE 61  /* invalidate icache (translations)
+                                       before continuing */
+#define VEX_TRC_JMP_FLUSHDCACHE 103 /* flush dcache before continuing */
+
+#define VEX_TRC_JMP_NOREDIR    81  /* jump to undirected guest addr */
+#define VEX_TRC_JMP_SIGTRAP    85  /* deliver trap (SIGTRAP) before
+                                      continuing */
+#define VEX_TRC_JMP_SIGSEGV    87  /* deliver segv (SIGSEGV) before
+                                      continuing */
+#define VEX_TRC_JMP_SIGBUS     93  /* deliver SIGBUS before continuing */
+
+#define VEX_TRC_JMP_SIGFPE_INTDIV     97  /* deliver SIGFPE (integer divide
+                                             by zero) before continuing */
+
+#define VEX_TRC_JMP_SIGFPE_INTOVF     99  /* deliver SIGFPE (integer overflow)
+                                             before continuing */
+
+#define VEX_TRC_JMP_SIGILL     101  /* deliver SIGILL (Illegal instruction)
+                                       before continuing */
+
+#define VEX_TRC_JMP_EMWARN     63  /* deliver emulation warning before
+                                      continuing */
+#define VEX_TRC_JMP_EMFAIL     83  /* emulation fatal error; abort system */
+
+#define VEX_TRC_JMP_CLIENTREQ  65  /* do a client req before continuing */
+#define VEX_TRC_JMP_YIELD      67  /* yield to thread sched 
+                                      before continuing */
+#define VEX_TRC_JMP_NODECODE   69  /* next instruction is not decodable */
+#define VEX_TRC_JMP_MAPFAIL    71  /* address translation failed */
+
+#define VEX_TRC_JMP_SYS_SYSCALL  73 /* do syscall before continuing */
+#define VEX_TRC_JMP_SYS_INT32    75 /* do syscall before continuing */
+#define VEX_TRC_JMP_SYS_INT128   77 /* do syscall before continuing */
+#define VEX_TRC_JMP_SYS_INT129   89 /* do syscall before continuing */
+#define VEX_TRC_JMP_SYS_INT130   91 /* do syscall before continuing */
+
+#define VEX_TRC_JMP_SYS_SYSENTER 79 /* do syscall before continuing */
+
+#define VEX_TRC_JMP_BORING       95 /* return to sched, but just 
+                                       keep going; no special action */
+
+#endif /* ndef __LIBVEX_TRC_VALUES_H */
+
+/*---------------------------------------------------------------*/
+/*---                                     libvex_trc_values.h ---*/
+/*---------------------------------------------------------------*/
diff --git a/VEX/switchback/Makefile b/VEX/switchback/Makefile
new file mode 100644
index 0000000..3fa5146
--- /dev/null
+++ b/VEX/switchback/Makefile
@@ -0,0 +1,12 @@
+
+CC=aarch64-linux-gnu-gcc
+
+all: switchback.c linker.c linker.h
+	$CC -m64 -Wall -O -g -o switchback switchback.c linker.c \
+		../libvex_ppc64_linux.a
+
+test_ppc:
+	$CC -Wall -m64  -mregnames -O -c test_ppc_jm1.c
+
+clean:
+	rm -f switchback switchback.o linker.o
diff --git a/VEX/switchback/binary_switchback.pl b/VEX/switchback/binary_switchback.pl
new file mode 100755
index 0000000..4b096ee
--- /dev/null
+++ b/VEX/switchback/binary_switchback.pl
@@ -0,0 +1,431 @@
+#!/usr/bin/env perl
+use strict;
+use warnings;
+
+######################################################
+# Binary search script for switchback
+# Finds bad basic block for seg faults and bad output.
+#
+# To test output, you need to create test_ref
+# test_ref should hold the correct output for running the test_xxx program:
+#  - Everything between (not including) /^---START---$/ and /^---STOP---$/
+#  - But NOT including output from /^---begin SWITCHBACK/
+#    to /^---  end SWITCHBACK/ inclusive
+#
+# This script can't handle other vex output,
+# so e.g switchback.c::DEBUG_TRACE_FLAGS should be 0
+#
+
+######################################################
+# Global consts, vars
+use constant DEBUG => 0;
+use constant CONST_N_MAX => 10000000000;
+use constant CONST_N_MUL => 2;
+
+my $SWITCHBACK = "./switchback";
+my $N_START = 0;
+my $N_LAST_GOOD = 0;
+my $N_LAST_BAD = -1;
+my $GIVEN_LAST_GOOD = -1;
+my $GIVEN_LAST_BAD = -1;
+my $TEST_REF;
+
+
+
+######################################################
+# Helper functions
+
+sub Exit {
+    exit $_[0];
+}
+
+sub Usage {
+    print "Usage: binary_switchback.pl test_ref [last_good [last_bad]]\n";
+    print "where:\n";
+    print "   test_ref  = reference output from test_xxx\n";
+    print "   last_good = last known good bb (search space minimum)\n";
+    print "   last_bad  = last known bad bb (search space maximum)\n";
+    print "\n";
+}
+
+sub QuitUsage {
+    print $_[0]."\n";
+    Usage();
+    Exit 1;
+}
+
+
+######################################################
+# Get & check cmdline args
+# - if given, override global vars.
+
+if (@ARGV < 1 || @ARGV > 3) {
+    QuitUsage "Error: Bad num args\n";
+}
+
+$TEST_REF = $ARGV[0];
+
+if ( ! -x "$SWITCHBACK" ) {
+    QuitUsage "File doesn't exist | not executable: '$SWITCHBACK'\n";
+}
+
+if (@ARGV >1) {
+    $N_LAST_GOOD = $ARGV[1];
+    $GIVEN_LAST_GOOD = $N_LAST_GOOD;
+    if (! ($N_LAST_GOOD =~ /^\d*$/)) {
+	QuitUsage "Error: bad arg for #last_good\n";
+    }
+    if ($N_LAST_GOOD >= CONST_N_MAX) {
+	QuitUsage "Error: #last_good >= N_MAX(".CONST_N_MAX.")\n";
+    }
+}
+if (@ARGV >2) {
+    $N_LAST_BAD = $ARGV[2];
+    $GIVEN_LAST_BAD = $N_LAST_BAD;
+    if (! ($N_LAST_BAD =~ /^\d*$/)) {
+	QuitUsage "Error: bad arg for 'last_bad'\n";
+    }
+}
+
+# Setup N_START
+if ($N_LAST_BAD != -1) {
+    # Start halfway:
+    my $diff = $N_LAST_BAD - $N_LAST_GOOD;
+    $N_START = $N_LAST_GOOD + ($diff - ($diff % 2)) / 2;
+} else {
+    # No known end: Start at beginning:
+    if ($N_LAST_GOOD > 0) {   # User-given last_good
+	$N_START = $N_LAST_GOOD;
+    } else {
+	$N_START = 100;       # Some reasonable number.
+    }
+}
+
+######################################################
+# Sanity checks (shouldn't ever happen)
+
+if ($N_START < $N_LAST_GOOD) {
+    print "Program Error: start < last_good\n";
+    exit 1;
+}
+if ($N_LAST_BAD != -1 && $N_START >= $N_LAST_BAD) {
+    print "Program Error: start >= last_bad\n";
+    exit 1;
+}
+if ($N_START < 1 || $N_START > CONST_N_MAX) {
+    print "Program Error: Bad N_START: '$N_START'\n";
+    exit 1;
+}
+if ($N_LAST_GOOD < 0 || $N_LAST_GOOD > CONST_N_MAX) {
+    print "Program Error: Bad N_LAST_GOOD: '$N_LAST_GOOD'\n";
+    exit 1;
+}
+if ($N_LAST_BAD < -1 || $N_LAST_BAD > CONST_N_MAX) {
+    print "Program Error: Bad N_LAST_BAD: '$N_LAST_BAD'\n";
+    exit 1;
+}
+
+
+
+
+
+
+######################################################
+# Helper functions
+
+# Run switchback for test, for N bbs
+# returns output results
+sub SwitchBack {
+    my $n = $_[0];
+    if ($n < 0 || $n > CONST_N_MAX) {
+	print "Error SwitchBack: Bad N: '$n'\n";
+	Exit 1;
+    }
+    my $TMPFILE = ".switchback_output.$n";
+
+    print "=== Calling switchback for bb $n ===\n";
+
+    system("$SWITCHBACK $n >& $TMPFILE");
+    my $ret = $?;
+
+    if ($ret == 256) {
+	print "Error running switchback - Quitting...\n---\n";
+	open(INFILE, "$TMPFILE");
+	print <INFILE>;
+	close(INFILE);
+
+	unlink($TMPFILE) if (! DEBUG);
+	exit 0;	
+    } 
+
+    if ($ret & 127) {
+	print "Ctrl-C pressed - Quitting...\n";
+	unlink($TMPFILE) if (! DEBUG);
+	exit 0;
+    }
+
+    if (DEBUG) {
+	if ($ret == -1) {
+	    print "failed to execute: $!\n";
+	}
+	elsif ($ret & 127) {
+	    printf "child died with signal %d, %s coredump\n",
+            ($ret & 127),  ($ret & 128) ? 'with' : 'without';
+	}
+	else {
+	    printf "child exited with value %d\n", $ret >> 8;
+	}
+    }
+    if ($ret != 0) { # Err: maybe seg fault
+	open(INFILE, "$TMPFILE");
+	my @results = <INFILE>;
+	close(INFILE);
+
+	while (@results && !((shift @results) =~ /^---START---/)) {}
+	print @results;
+
+	unlink($TMPFILE) if (! DEBUG);
+	return;
+    }
+
+    open(INFILE, "$TMPFILE");
+    my @results = <INFILE>;
+    close(INFILE);
+
+    unlink($TMPFILE) if (! DEBUG);
+    return @results;
+}
+
+# Returns N simulated bbs from output lines
+sub get_N_simulated {
+    my @lines = @{$_[0]};
+    pop @lines;             # not the first...
+    my $line = pop @lines;  # ...but the second line.
+
+    chomp $line;
+    my $n;
+    if (($n) = ($line =~ /^(\d*) bbs simulated$/)) {
+	return $n;
+    }
+    print "Error: Didn't find N bbs simultated, from output lines\n";
+    Exit 1;
+}
+
+# Calls test script to compare current output lines with a reference.
+# Returns 1 on success, 0 on failure
+sub TestOutput {
+    my @lines = @{$_[0]};
+    my $n = $_[1];
+    my $ref_output = "$TEST_REF";
+
+    # Get the current section we want to compare:
+    my @newlines;
+    my $ok=0;
+    my $halfline = "";
+    foreach my $line(@lines) {
+	chomp $line;
+	if ($line =~ /^---STOP---$/) { last; }     # we're done
+
+	# output might be messed up here...
+	if ($line =~ /^.*---begin SWITCHBACK/) {
+	    ($halfline) = ($line =~ /^(.*)---begin SWITCHBACK/);
+	    $ok = 0;  # stop on prev line
+	}
+
+	# A valid line:
+	if ($ok) {
+	    if ($halfline ne "") {   # Fix broken line
+		$line = $halfline.$line;
+		$halfline = "";
+	    }
+
+	    # Ignore Vex output
+	    if ($line =~ /^vex /) { next; }
+
+	    push(@newlines, $line);
+	}
+
+	if ($line =~ /^---START---$/) {            # start on next line
+	    $ok = 1;
+	}
+
+	if ($line =~ /^---  end SWITCHBACK/) {     # start on next line
+	    $ok = 1;
+	    
+	}
+    }
+
+    if (DEBUG) {
+	open(OUTFILE, ">.filtered_output.$n");
+	print OUTFILE join("\n",@newlines);
+	close(OUTFILE);
+    }
+
+    # Read in reference lines
+    open(REFERENCE, "$ref_output") || die "Error: Couldn't open $ref_output\n";
+    my @ref_lines = <REFERENCE>;
+    close(REFERENCE);
+
+    # Compare reference lines with current:
+    my $match = 1;
+    my $i = 0;
+    foreach my $ref_line(@ref_lines) {
+	chomp $ref_line;
+	my $line = $newlines[$i++];
+	chomp $line;
+	if ($ref_line ne $line) {
+	    print "\nMismatch on output:\n";
+	    print "ref: '$ref_line'\n";
+	    print "new: '$line'\n\n";
+	    $match = 0;
+	    last;
+	}
+    }
+    return $match;
+}
+
+
+
+
+
+
+######################################################
+# Do the search
+
+if (DEBUG) {
+    print "\n------------\n";
+    print "START:  N=$N_START\n";
+    print "START: lg=$N_LAST_GOOD\n";
+    print "START: lb=$N_LAST_BAD\n";
+    print "START: GIVEN_LAST_GOOD=$GIVEN_LAST_GOOD\n";
+    print "START: GIVEN_LAST_BAD =$GIVEN_LAST_BAD\n";
+    print "\n";
+}
+
+my $N = $N_START;
+my $success = 0;
+my @sb_output;
+while (1) {
+    if (DEBUG) {
+	print "\n------------\n";
+	print "SOL: lg=$N_LAST_GOOD\n";
+	print "SOL: lb=$N_LAST_BAD\n";
+	print "SOL:  N=$N\n";
+    }
+    if ($N < 0) {
+	print "Error: $N<0\n";
+	Exit 1;
+    }
+
+    my $ok = 1;
+    # Run switchback:
+    @sb_output = SwitchBack($N);
+
+    if (@sb_output == 0) { # Switchback failed - maybe seg fault
+	$ok = 0;
+    }
+
+    if (DEBUG) {
+	open(fileOUT, ">.retrieved_output.$N") or die("Can't open file for writing: $!");
+	print fileOUT @sb_output;
+	close(fileOUT);
+    }
+
+    # If we're ok so far (no seg faults) then test for correct output
+    if ($ok) {
+	$ok = TestOutput( \@sb_output, $N );
+    }
+
+    if ($ok) {
+	if (get_N_simulated(\@sb_output) < $N) { # Done: No bad bbs
+	    $success = 1;
+	    last;
+	}
+	if ($N_LAST_BAD == -1) {
+	    # No upper bound for search space
+	    # Try again with a bigger N
+
+	    $N_LAST_GOOD = $N;
+	    $N *= CONST_N_MUL;
+	    if ($N > CONST_N_MAX) {
+		print "\nError: Maxed out N($N): N_MAX=".CONST_N_MAX."\n";
+		print "\nWe're either in a loop, or this is a big test program (increase N_MAX)\n\n";
+		Exit 1;
+	    }
+	    if (DEBUG) {
+		print "Looks good so far: Trying bigger N...\n\n";
+	    }
+	    next;
+	}
+    }
+
+    # Narrow the search space:
+    if ($ok) { $N_LAST_GOOD = $N; }
+    else {     $N_LAST_BAD  = $N;  }
+
+    # Calculate next step:
+    my $diff = $N_LAST_BAD - $N_LAST_GOOD;
+    $diff = $diff - ($diff % 2);
+    my $step = $diff / 2;
+
+    if ($step < 0) {
+	print "Error: step = $step\n";
+	Exit 1;
+    }
+
+    # This our last run-through?
+    if ($step!=0) {
+	$N = $N_LAST_GOOD + $step;   # Keep on going...
+    } else {
+	last;                        # Get outta here
+    }
+
+    if (DEBUG) {
+	print "\nEOL: ok=$ok\n";
+	print "EOL: lg=$N_LAST_GOOD\n";
+	print "EOL: lb=$N_LAST_BAD\n";
+	print "EOL:  s=$step\n";
+	print "EOL:  N=$N\n";
+    }
+}
+
+
+
+######################################################
+# Done: Report results
+
+print "\n============================================\n";
+print "Done searching.\n\n";
+
+if ($N_LAST_BAD != -1 && $N != $N_LAST_BAD) {
+    print "Getting output for last bad bb:\n";
+    @sb_output = SwitchBack($N_LAST_BAD);
+}
+
+print @sb_output;
+print "\n\n";
+if ($success) {
+    print "*** Success!  No bad bbs found. ***\n";
+} else {
+    if ($N_LAST_BAD == $GIVEN_LAST_BAD) {
+	print "*** No failures detected within given bb range ***\n";
+	print " - check given 'last_bad' argument\n";
+    } else {
+	if ($N_LAST_BAD == $GIVEN_LAST_GOOD) {
+	    print "*** Failed on bb given as last_good ***\n";
+	    print " - decrease the 'last_good' argument\n";
+	} else {
+	    print "*** Failure: Last failed switchback bb: $N_LAST_BAD ***\n";
+	    print "Hence bad bb: ". ($N_LAST_BAD - 1) ."\n";
+	}
+    }
+}
+print "\n";
+if (DEBUG) {
+    print "END:  N=$N\n";
+    print "END: lg=$N_LAST_GOOD\n";
+    print "END: lb=$N_LAST_BAD\n";
+    print "END: GIVEN_LAST_BAD=$GIVEN_LAST_BAD\n";
+    print "\n";
+}
+Exit 0;
diff --git a/VEX/switchback/linker.c b/VEX/switchback/linker.c
new file mode 100644
index 0000000..8a4ed35
--- /dev/null
+++ b/VEX/switchback/linker.c
@@ -0,0 +1,1485 @@
+/*
+  13 Dec '05
+  Linker no longer used - apart from mymalloc().
+  Instead, simply compile and link switchback.c with test_xxx.c, e.g.:
+  ./> (cd .. && make EXTRA_CFLAGS="-m64" libvex_ppc64_linux.a) && gcc -m64 -Wall -O -g -o switchback switchback.c linker.c ../libvex_ppc64_linux.a test_bzip2.c
+*/
+
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <string.h>
+//#include <malloc.h>
+
+#include "linker.h"
+
+#include "../pub/libvex_basictypes.h"
+
+#if 0
+#define IF_DEBUG(x,y) /* */
+static int debug_linker = 0;
+#endif
+
+
+#if defined(__x86_64__)
+#   define x86_64_TARGET_ARCH
+#elif defined(__i386__)
+#   define i386_TARGET_ARCH
+#elif defined (__powerpc__)
+#   define ppc32_TARGET_ARCH
+#elif defined(__aarch64__)
+#   define arm64_TARGET_ARCH
+#else
+#   error "Unknown arch"
+#endif
+
+
+#if 0
+#define CALLOC_MAX 10000000
+static HChar calloc_area[CALLOC_MAX];
+static UInt calloc_used = 0;
+static void* calloc_below2G ( Int n, Int m )
+{
+   void* p;
+   int i;
+   while ((calloc_used % 16) > 0) calloc_used++;
+   assert(calloc_used + n*m < CALLOC_MAX);
+   p = &calloc_area[calloc_used];
+   for (i = 0; i < n*m; i++)
+     calloc_area[calloc_used+i] = 0;
+   calloc_used += n*m;
+   return p;
+}
+#endif
+
+#define MYMALLOC_MAX 50*1000*1000
+static HChar mymalloc_area[MYMALLOC_MAX];
+static UInt  mymalloc_used = 0;
+void* mymalloc ( Int n )
+{
+   void* p;
+#if defined(__powerpc64__) || defined(__aarch64__)
+   while ((ULong)(mymalloc_area+mymalloc_used) & 0xFFF)
+#else
+   while ((UInt)(mymalloc_area+mymalloc_used) & 0xFFF)
+#endif
+      mymalloc_used++;
+   assert(mymalloc_used+n < MYMALLOC_MAX);
+   p = (void*)(&mymalloc_area[mymalloc_used]);
+   mymalloc_used += n;
+   //   printf("mymalloc(%d) = %p\n", n, p);
+   return p;
+}
+
+void myfree ( void* p )
+{
+}
+
+
+
+
+
+
+
+#if 0
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// TYPES
+
+#define FALSE 0
+#define TRUE  1
+
+typedef enum { OBJECT_LOADED, OBJECT_RESOLVED } OStatus;
+
+
+#define N_FIXUP_PAGES 1
+
+
+/* Indication of section kinds for loaded objects.  Needed by
+   the GC for deciding whether or not a pointer on the stack
+   is a code pointer.
+*/
+typedef 
+   enum { SECTIONKIND_CODE_OR_RODATA,
+          SECTIONKIND_RWDATA,
+          SECTIONKIND_OTHER,
+          SECTIONKIND_NOINFOAVAIL } 
+   SectionKind;
+
+typedef 
+   struct _Section { 
+      void* start; 
+      void* end; 
+      SectionKind kind;
+      struct _Section* next;
+   } 
+   Section;
+
+typedef 
+   struct _ProddableBlock {
+      void* start;
+      int   size;
+      struct _ProddableBlock* next;
+   }
+   ProddableBlock;
+
+/* Top-level structure for an object module.  One of these is allocated
+ * for each object file in use.
+ */
+typedef struct _ObjectCode {
+    OStatus    status;
+    char*      fileName;
+    int        fileSize;
+    char*      formatName;            /* eg "ELF32", "DLL", "COFF", etc. */
+
+    /* An array containing ptrs to all the symbol names copied from
+       this object into the global symbol hash table.  This is so that
+       we know which parts of the latter mapping to nuke when this
+       object is removed from the system. */
+    char**     symbols;
+    int        n_symbols;
+
+    /* ptr to malloc'd lump of memory holding the obj file */
+    void*      image;
+
+    /* Fixup area for long-distance jumps. */
+    char*      fixup;
+    int        fixup_used;
+    int        fixup_size;
+
+    /* The section-kind entries for this object module.  Linked
+       list. */
+    Section* sections;
+
+    /* A private hash table for local symbols. */
+    /* HashTable* */ void* lochash;
+    
+    /* Allow a chain of these things */
+    struct _ObjectCode * next;
+
+    /* SANITY CHECK ONLY: a list of the only memory regions which may
+       safely be prodded during relocation.  Any attempt to prod
+       outside one of these is an error in the linker. */
+    ProddableBlock* proddables;
+
+} ObjectCode;
+
+/*
+ * Define a set of types which can be used for both ELF32 and ELF64
+ */
+
+#if VEX_HOST_WORDSIZE == 8
+#define ELFCLASS    ELFCLASS64
+#define Elf_Addr    Elf64_Addr
+#define Elf_Word    Elf64_Word
+#define Elf_Sword   Elf64_Sword
+#define Elf_Ehdr    Elf64_Ehdr
+#define Elf_Phdr    Elf64_Phdr
+#define Elf_Shdr    Elf64_Shdr
+#define Elf_Sym     Elf64_Sym
+#define Elf_Rel     Elf64_Rel
+#define Elf_Rela    Elf64_Rela
+#define ELF_ST_TYPE ELF64_ST_TYPE
+#define ELF_ST_BIND ELF64_ST_BIND
+#define ELF_R_TYPE  ELF64_R_TYPE
+#define ELF_R_SYM   ELF64_R_SYM
+#else
+#define ELFCLASS    ELFCLASS32
+#define Elf_Addr    Elf32_Addr
+#define Elf_Word    Elf32_Word
+#define Elf_Sword   Elf32_Sword
+#define Elf_Ehdr    Elf32_Ehdr
+#define Elf_Phdr    Elf32_Phdr
+#define Elf_Shdr    Elf32_Shdr
+#define Elf_Sym     Elf32_Sym
+#define Elf_Rel     Elf32_Rel
+#define Elf_Rela    Elf32_Rela
+#ifndef ELF_ST_TYPE
+#define ELF_ST_TYPE ELF32_ST_TYPE
+#endif
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND ELF32_ST_BIND
+#endif
+#ifndef ELF_R_TYPE
+#define ELF_R_TYPE  ELF32_R_TYPE
+#endif
+#ifndef ELF_R_SYM
+#define ELF_R_SYM   ELF32_R_SYM
+#endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// PARANOIA
+
+/* -----------------------------------------------------------------------
+ * Sanity checking.  For each ObjectCode, maintain a list of address ranges
+ * which may be prodded during relocation, and abort if we try and write
+ * outside any of these.
+ */
+static void addProddableBlock ( ObjectCode* oc, void* start, int size )
+{
+   ProddableBlock* pb
+      = mymalloc(sizeof(ProddableBlock));
+   if (debug_linker)
+      fprintf(stderr, "aPB oc=%p %p %d   (%p .. %p)\n", oc, start, size,
+	      start, ((char*)start)+size-1 );
+   assert(size > 0);
+   pb->start      = start;
+   pb->size       = size;
+   pb->next       = oc->proddables;
+   oc->proddables = pb;
+}
+
+static void checkProddableBlock ( ObjectCode* oc, void* addr )
+{
+   ProddableBlock* pb;
+   for (pb = oc->proddables; pb != NULL; pb = pb->next) {
+      char* s = (char*)(pb->start);
+      char* e = s + pb->size - 1;
+      char* a = (char*)addr;
+      /* Assumes that the biggest fixup involves a 4-byte write.  This
+         probably needs to be changed to 8 (ie, +7) on 64-bit
+         plats. */
+      if (a >= s && (a+3) <= e) return;
+   }
+   fprintf(stderr,
+           "checkProddableBlock: invalid fixup %p in runtime linker\n",
+           addr);
+   exit(1);
+}
+
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// String->Addr mappings
+
+typedef 
+   struct { char* mp_name; void* mp_addr; } 
+   Maplet;
+
+typedef
+   struct {
+      int sm_size;
+      int sm_used;
+      Maplet* maplets;
+   }
+   StringMap;
+
+static StringMap* new_StringMap ( void )
+{
+   StringMap* sm = mymalloc(sizeof(StringMap));
+   sm->sm_size = 10;
+   sm->sm_used = 0;
+   sm->maplets = mymalloc(10 * sizeof(Maplet));
+   return sm;
+}
+
+static void delete_StringMap ( StringMap* sm )
+{
+   assert(sm->maplets != NULL);
+   myfree(sm->maplets);
+   sm->maplets = NULL;
+   myfree(sm);
+}
+
+static void ensure_StringMap ( StringMap* sm )
+{
+   int i;
+   Maplet* mp2;
+   assert(sm->maplets != NULL);
+   if (sm->sm_used < sm->sm_size)
+     return;
+   sm->sm_size *= 2;
+   mp2 = mymalloc(sm->sm_size * sizeof(Maplet));
+   for (i = 0; i < sm->sm_used; i++)
+      mp2[i] = sm->maplets[i];
+   myfree(sm->maplets);
+   sm->maplets = mp2;
+}
+
+static void* search_StringMap ( StringMap* sm, char* name )
+{
+   int i;
+   for (i = 0; i < sm->sm_used; i++)
+      if (0 == strcmp(name, sm->maplets[i].mp_name))
+         return sm->maplets[i].mp_addr;
+   return NULL;
+}
+
+static void addto_StringMap ( StringMap* sm, char* name, void* addr )
+{
+   ensure_StringMap(sm);
+   sm->maplets[sm->sm_used].mp_name = name;
+   sm->maplets[sm->sm_used].mp_addr = addr;
+   sm->sm_used++;
+}
+
+static void paranoid_addto_StringMap ( StringMap* sm, char* name, void* addr )
+{
+   if (0)
+       fprintf(stderr, "paranoid_addto_StringMap(%s,%p)\n", name, addr);
+   if (search_StringMap(sm,name) != NULL) {
+      fprintf(stderr, "duplicate: paranoid_addto_StringMap(%s,%p)\n", name, addr);
+      exit(1);
+   }
+   addto_StringMap(sm,name,addr);
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// Top-level linker control.
+
+StringMap*  global_symbol_table = NULL;
+ObjectCode* global_object_list = NULL;
+
+static void initLinker ( void )
+{
+   if (global_symbol_table != NULL)
+      return;
+   global_symbol_table = new_StringMap();
+}
+
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// SYMBOL TABLE(s)
+
+/* -----------------------------------------------------------------
+ * lookup a symbol in the global symbol table
+ */
+static 
+void * lookupSymbol( char *lbl )
+{
+   void *val;
+   initLinker() ;
+   assert(global_symbol_table != NULL);
+   val = search_StringMap(global_symbol_table, lbl);
+   return val;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// HELPERS
+
+/*
+ * Generic ELF functions
+ */
+
+static char *
+findElfSection ( void* objImage, Elf_Word sh_type )
+{
+   char* ehdrC = (char*)objImage;
+   Elf_Ehdr* ehdr = (Elf_Ehdr*)ehdrC;
+   Elf_Shdr* shdr = (Elf_Shdr*)(ehdrC + ehdr->e_shoff);
+   char* sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+   char* ptr = NULL;
+   int i;
+
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (shdr[i].sh_type == sh_type
+          /* Ignore the section header's string table. */
+          && i != ehdr->e_shstrndx
+	  /* Ignore string tables named .stabstr, as they contain
+             debugging info. */
+          && 0 != memcmp(".stabstr", sh_strtab + shdr[i].sh_name, 8)
+         ) {
+         ptr = ehdrC + shdr[i].sh_offset;
+         break;
+      }
+   }
+   return ptr;
+}
+
+#ifdef arm_TARGET_ARCH
+static
+char* alloc_fixup_bytes ( ObjectCode* oc, int nbytes )
+{
+   char* res;
+   assert(nbytes % 4 == 0);
+   assert(nbytes > 0);
+   res = &(oc->fixup[oc->fixup_used]);
+   oc->fixup_used += nbytes;
+   if (oc->fixup_used >= oc->fixup_size) {
+     fprintf(stderr, "fixup area too small for %s\n", oc->fileName);
+     exit(1);
+   }
+   return res;
+}
+#endif
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// RESOLVE
+
+static
+void* lookup_magic_hacks ( char* sym )
+{
+   if (0==strcmp(sym, "printf")) return (void*)(&printf);
+   return NULL;
+}
+
+#ifdef arm_TARGET_ARCH
+static
+void arm_notify_new_code ( char* start, int length )
+{
+  __asm __volatile ("mov r1, %0\n\t"
+                    "mov r2, %1\n\t"
+                    "mov r3, %2\n\t"
+                    "swi 0x9f0002\n\t"
+                    : 
+                    : "ir" (start), "ir" (length), "ir" (0) );
+}
+
+
+static
+void gen_armle_goto ( char* fixup, char* dstP )
+{
+  Elf_Word w = (Elf_Word)dstP;
+  /* 
+   2                    .text
+   3 0000 04F01FE5              ldr     pc, value
+   4 0004 44332211      value:  .word   0x11223344
+   */
+  fprintf(stderr,"at %p generating jump to %p\n", fixup, dstP );
+  fixup[0] = 0x04; fixup[1] = 0xF0; fixup[2] = 0x1F; fixup[3] = 0xE5;
+  fixup[4] = w & 0xFF; w >>= 8;
+  fixup[5] = w & 0xFF; w >>= 8;
+  fixup[6] = w & 0xFF; w >>= 8;
+  fixup[7] = w & 0xFF; w >>= 8;
+  arm_notify_new_code(fixup, 8);
+}
+#endif /* arm_TARGET_ARCH */
+
+
+#ifdef ppc32_TARGET_ARCH
+static void invalidate_icache(void *ptr, int nbytes)
+{
+   unsigned long startaddr = (unsigned long) ptr;
+   unsigned long endaddr = startaddr + nbytes;
+   unsigned long addr;
+   unsigned long cls = 16; //VG_(cache_line_size);
+
+   startaddr &= ~(cls - 1);
+   for (addr = startaddr; addr < endaddr; addr += cls)
+      asm volatile("dcbst 0,%0" : : "r" (addr));
+   asm volatile("sync");
+   for (addr = startaddr; addr < endaddr; addr += cls)
+      asm volatile("icbi 0,%0" : : "r" (addr));
+   asm volatile("sync; isync");
+}
+
+static UInt compute_ppc_HA ( UInt x ) {
+   return 0xFFFF & ( (x >> 16) + ((x & 0x8000) ? 1 : 0) );
+}
+static UInt compute_ppc_LO ( UInt x ) {
+   return 0xFFFF & x;
+}
+static UInt compute_ppc_HI ( UInt x ) {
+   return 0xFFFF & (x >> 16);
+}
+#endif /* ppc32_TARGET_ARCH */
+
+
+/* Do ELF relocations which lack an explicit addend.  All x86-linux
+   relocations appear to be of this form. */
+static int
+do_Elf_Rel_relocations ( ObjectCode* oc, char* ehdrC,
+                         Elf_Shdr* shdr, int shnum,
+                         Elf_Sym*  stab, char* strtab )
+{
+   int j;
+   char *symbol = NULL;
+   Elf_Word* targ;
+   Elf_Rel*  rtab = (Elf_Rel*) (ehdrC + shdr[shnum].sh_offset);
+   int         nent = shdr[shnum].sh_size / sizeof(Elf_Rel);
+   int target_shndx = shdr[shnum].sh_info;
+   int symtab_shndx = shdr[shnum].sh_link;
+
+   stab  = (Elf_Sym*) (ehdrC + shdr[ symtab_shndx ].sh_offset);
+   targ  = (Elf_Word*)(ehdrC + shdr[ target_shndx ].sh_offset);
+   IF_DEBUG(linker,belch( "relocations for section %d using symtab %d",
+                          target_shndx, symtab_shndx ));
+
+   for (j = 0; j < nent; j++) {
+      Elf_Addr offset = rtab[j].r_offset;
+      Elf_Addr info   = rtab[j].r_info;
+
+      Elf_Addr  P  = ((Elf_Addr)targ) + offset;
+      Elf_Word* pP = (Elf_Word*)P;
+      Elf_Addr  A  = *pP;
+      Elf_Addr  S;
+      Elf_Addr  value;
+
+      IF_DEBUG(linker,belch( "Rel entry %3d is raw(%6p %6p)",
+                             j, (void*)offset, (void*)info ));
+      if (!info) {
+         IF_DEBUG(linker,belch( " ZERO" ));
+         S = 0;
+      } else {
+         Elf_Sym sym = stab[ELF_R_SYM(info)];
+	 /* First see if it is a local symbol. */
+         if (ELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+            /* Yes, so we can get the address directly from the ELF symbol
+               table. */
+            symbol = sym.st_name==0 ? "(noname)" : strtab+sym.st_name;
+            S = (Elf_Addr)
+                (ehdrC + shdr[ sym.st_shndx ].sh_offset
+                       + stab[ELF_R_SYM(info)].st_value);
+
+	 } else {
+            /* No, so look up the name in our global table. */
+            symbol = strtab + sym.st_name;
+            S = (Elf_Addr)lookupSymbol( symbol );
+	 }
+         if (!S) {
+            S = (Elf_Addr)lookup_magic_hacks(symbol);
+         }
+         if (!S) {
+            fprintf(stderr,"%s: unknown symbol `%s'\n", 
+                           oc->fileName, symbol);
+	    return 0;
+         }
+         if (debug_linker>1) 
+            fprintf(stderr, "\n`%s' resolves to %p\n", symbol, (void*)S );
+      }
+
+      if (debug_linker>1)
+         fprintf(stderr, "Reloc: P = %p   S = %p   A = %p\n",
+			     (void*)P, (void*)S, (void*)A );
+      checkProddableBlock ( oc, pP );
+
+      value = S + A;
+
+      switch (ELF_R_TYPE(info)) {
+#        ifdef i386_TARGET_ARCH
+         case R_386_32:   *pP = value;     break;
+         case R_386_PC32: *pP = value - P; break;
+#        endif
+#        ifdef arm_TARGET_ARCH
+         case R_ARM_PC24: {
+	    Elf_Word w, delta, deltaTop8;
+	    /* Generate a jump sequence into the fixup area
+	       and branch to that instead. */
+ 	    char* fixup = alloc_fixup_bytes(oc, 8);
+            /* First of all, figure out where we're really trying to
+               jump to. */
+            // compensate for pc+8 bias
+            Elf_Word real_dst = (A & 0x00FFFFFF) + 2;
+	    // sign-extend 24-to-32 of real_dst
+            if (real_dst & 0x00800000) 
+               real_dst |= 0xFF000000;
+            else
+               real_dst &= 0x00FFFFFF;
+
+            real_dst <<= 2;
+	    real_dst += S;
+
+	    gen_armle_goto(fixup, (char*)real_dst);
+
+	    /* Delta is in bytes .. */
+            delta = (((Elf_Word)fixup) - ((Elf_Word)pP) - 8);
+            deltaTop8 = (delta >> 24) & 0xFF;
+            if (deltaTop8 != 0 && deltaTop8 != 0xFF) {
+	      fprintf(stderr,"R_ARM_PC24: out of range delta 0x%x for %s\n",
+		      delta, symbol);
+	      exit(1);
+	    }
+            delta >>= 2;
+	    w = *pP;
+            w &= 0xFF000000;
+            w |= (0x00FFFFFF & delta );
+            *pP = w;
+	    break;
+         }
+         case R_ARM_ABS32:
+	    *pP = value;
+	    break;
+#        endif
+         default:
+            fprintf(stderr,
+                    "%s: unhandled ELF relocation(Rel) type %d\n\n",
+		    oc->fileName, (Int)ELF_R_TYPE(info));
+            return 0;
+      }
+
+   }
+   return 1;
+}
+
+/* Do ELF relocations for which explicit addends are supplied.
+   sparc-solaris relocations appear to be of this form. */
+static int
+do_Elf_Rela_relocations ( ObjectCode* oc, char* ehdrC,
+                          Elf_Shdr* shdr, int shnum,
+                          Elf_Sym*  stab, char* strtab )
+{
+   int j;
+   char *symbol;
+   Elf_Addr targ;
+   Elf_Rela* rtab = (Elf_Rela*) (ehdrC + shdr[shnum].sh_offset);
+   int         nent = shdr[shnum].sh_size / sizeof(Elf_Rela);
+   int target_shndx = shdr[shnum].sh_info;
+   int symtab_shndx = shdr[shnum].sh_link;
+
+   stab  = (Elf_Sym*) (ehdrC + shdr[ symtab_shndx ].sh_offset);
+   targ  = (Elf_Addr) (ehdrC + shdr[ target_shndx ].sh_offset);
+   IF_DEBUG(linker,belch( "relocations for section %d using symtab %d",
+                          target_shndx, symtab_shndx ));
+
+   for (j = 0; j < nent; j++) {
+#if defined(DEBUG) || defined(sparc_TARGET_ARCH)  \
+                   || defined(ia64_TARGET_ARCH)   \
+                   || defined(x86_64_TARGET_ARCH) \
+                   || defined(ppc32_TARGET_ARCH)
+      /* This #ifdef only serves to avoid unused-var warnings. */
+      Elf_Addr  offset = rtab[j].r_offset;
+      Elf_Addr  P      = targ + offset;
+#endif
+      Elf_Addr  info   = rtab[j].r_info;
+      Elf_Addr  A      = rtab[j].r_addend;
+      Elf_Addr  S =0;
+      Elf_Addr  value;
+#     if defined(sparc_TARGET_ARCH)
+      Elf_Word* pP = (Elf_Word*)P;
+      Elf_Word  w1, w2;
+#     endif
+#     if defined(ia64_TARGET_ARCH)
+      Elf64_Xword *pP = (Elf64_Xword *)P;
+      Elf_Addr addr;
+#     endif
+#     if defined(x86_64_TARGET_ARCH)
+      ULong* pP = (ULong*)P;
+#     endif
+#     if defined(ppc32_TARGET_ARCH)
+      Int sI, sI2;
+      Elf_Word* pP = (Elf_Word*)P;
+#     endif
+
+      IF_DEBUG(linker,belch( "Rel entry %3d is raw(%6p %6p %6p)   ",
+                             j, (void*)offset, (void*)info,
+                                (void*)A ));
+      if (!info) {
+         IF_DEBUG(linker,belch( " ZERO" ));
+         S = 0;
+      } else {
+         Elf_Sym sym = stab[ELF_R_SYM(info)];
+	 /* First see if it is a local symbol. */
+         if (ELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+            /* Yes, so we can get the address directly from the ELF symbol
+               table. */
+            symbol = sym.st_name==0 ? "(noname)" : strtab+sym.st_name;
+            S = (Elf_Addr)
+                (ehdrC + shdr[ sym.st_shndx ].sh_offset
+                       + stab[ELF_R_SYM(info)].st_value);
+#ifdef ELF_FUNCTION_DESC
+	    /* Make a function descriptor for this function */
+            if (S && ELF_ST_TYPE(sym.st_info) == STT_FUNC) {
+               S = allocateFunctionDesc(S + A);
+       	       A = 0;
+            }
+#endif
+	 } else {
+            /* No, so look up the name in our global table. */
+            symbol = strtab + sym.st_name;
+            S = (Elf_Addr)lookupSymbol( symbol );
+
+#ifdef ELF_FUNCTION_DESC
+	    /* If a function, already a function descriptor - we would
+	       have to copy it to add an offset. */
+            if (S && (ELF_ST_TYPE(sym.st_info) == STT_FUNC) && (A != 0))
+               belch("%s: function %s with addend %p", oc->fileName, symbol, (void *)A);
+#endif
+	 }
+         if (!S) {
+	   fprintf(stderr,"%s: unknown symbol `%s'\n", oc->fileName, symbol);
+	   return 0;
+         }
+         if (0)
+            fprintf(stderr, "`%s' resolves to %p\n", symbol, (void*)S );
+      }
+
+#if 0
+         fprintf ( stderr, "Reloc: offset = %p   P = %p   S = %p   A = %p\n",
+                           (void*)offset, (void*)P, (void*)S, (void*)A );
+#endif
+
+      /* checkProddableBlock ( oc, (void*)P ); */
+
+      value = S + A;
+
+      switch (ELF_R_TYPE(info)) {
+#        if defined(sparc_TARGET_ARCH)
+         case R_SPARC_WDISP30:
+            w1 = *pP & 0xC0000000;
+            w2 = (Elf_Word)((value - P) >> 2);
+            ASSERT((w2 & 0xC0000000) == 0);
+            w1 |= w2;
+            *pP = w1;
+            break;
+         case R_SPARC_HI22:
+            w1 = *pP & 0xFFC00000;
+            w2 = (Elf_Word)(value >> 10);
+            ASSERT((w2 & 0xFFC00000) == 0);
+            w1 |= w2;
+            *pP = w1;
+            break;
+         case R_SPARC_LO10:
+            w1 = *pP & ~0x3FF;
+            w2 = (Elf_Word)(value & 0x3FF);
+            ASSERT((w2 & ~0x3FF) == 0);
+            w1 |= w2;
+            *pP = w1;
+            break;
+         /* According to the Sun documentation:
+            R_SPARC_UA32
+            This relocation type resembles R_SPARC_32, except it refers to an
+            unaligned word. That is, the word to be relocated must be treated
+            as four separate bytes with arbitrary alignment, not as a word
+            aligned according to the architecture requirements.
+
+            (JRS: which means that freeloading on the R_SPARC_32 case
+            is probably wrong, but hey ...)
+         */
+         case R_SPARC_UA32:
+         case R_SPARC_32:
+            w2 = (Elf_Word)value;
+            *pP = w2;
+            break;
+#        endif
+#        if defined(ia64_TARGET_ARCH)
+	 case R_IA64_DIR64LSB:
+	 case R_IA64_FPTR64LSB:
+	    *pP = value;
+	    break;
+	 case R_IA64_PCREL64LSB:
+	    *pP = value - P;
+	    break;
+	 case R_IA64_SEGREL64LSB:
+	    addr = findElfSegment(ehdrC, value);
+	    *pP = value - addr;
+	    break;
+	 case R_IA64_GPREL22:
+	    ia64_reloc_gprel22(P, value);
+	    break;
+	 case R_IA64_LTOFF22:
+	 case R_IA64_LTOFF22X:
+	 case R_IA64_LTOFF_FPTR22:
+	    addr = allocateGOTEntry(value);
+	    ia64_reloc_gprel22(P, addr);
+	    break;
+	 case R_IA64_PCREL21B:
+	    ia64_reloc_pcrel21(P, S, oc);
+	    break;
+	 case R_IA64_LDXMOV:
+	    /* This goes with R_IA64_LTOFF22X and points to the load to
+	       convert into a move.  We don't implement relaxation. */
+	    break;
+#        endif
+#        if defined(x86_64_TARGET_ARCH)
+         case R_X86_64_64: /* 1 *//* Direct 64 bit  */
+            *((ULong*)pP) = (ULong)(S + A);
+            break;
+         case R_X86_64_PC32: /* 2 *//* PC relative 32 bit signed */
+            *((UInt*)pP) = (UInt)(S + A - P);
+            break;
+         case R_X86_64_32: /* 10 *//* Direct 32 bit zero extended */
+            *((UInt*)pP) = (UInt)(S + A);
+            break;
+         case R_X86_64_32S: /* 11 *//* Direct 32 bit sign extended */
+            *((UInt*)pP) = (UInt)(S + A);
+            break;
+#        endif
+#        if defined(ppc32_TARGET_ARCH)
+         case R_PPC_ADDR32: /* 1 *//* 32bit absolute address */
+            *((UInt*)pP) = S+A;
+            invalidate_icache(pP,4);
+            break;
+         case R_PPC_ADDR16_LO: /* 4 *//* lower 16bit of absolute address */
+            *((UInt*)pP) &= 0x0000FFFF;
+            *((UInt*)pP) |= 0xFFFF0000 & (compute_ppc_LO(S+A) << 16);
+            invalidate_icache(pP,4);
+            break;
+         case R_PPC_ADDR16_HA: /* 6 *//* adjusted high 16bit */
+            *((UInt*)pP) &= 0x0000FFFF;
+            *((UInt*)pP) |= 0xFFFF0000 & (compute_ppc_HA(S+A) << 16);
+            invalidate_icache(pP,4);
+            break;
+         case R_PPC_REL24: /* 10 *//* PC relative 26 bit */
+            sI = S+A-P;
+	    sI >>= 2;
+	    /* the top 9 bits of sI must be the same (all 0s or
+	       all 1s) for this to be valid; else we have to fail. */
+            sI2 = sI >> 23; /* 23 == 32 - 9 */
+            if (sI2 != 0 && sI2 != 0xFFFFFFFF) {
+               fprintf(stderr, "%s: R_PPC_REL24 relocation failed\n", oc->fileName );
+	       return 0;
+            }
+            *((UInt*)pP) &= ~(0x00FFFFFF << 2);
+            *((UInt*)pP) |= (0xFFFFFF & sI) << 2;
+           invalidate_icache(pP,4);
+            break;
+         case R_PPC_REL32: /* 26 */
+            *((UInt*)pP) = S+A-P;
+            invalidate_icache(pP,4);
+            break;
+#        endif
+         default:
+            fprintf(stderr,
+                    "%s: unhandled ELF relocation(RelA) type %d\n",
+		    oc->fileName, (Int)ELF_R_TYPE(info));
+            return 0;
+      }
+
+   }
+   return 1;
+}
+
+
+static int
+ocResolve_ELF ( ObjectCode* oc )
+{
+   char *strtab;
+   int   shnum, ok;
+   Elf_Sym*  stab  = NULL;
+   char*     ehdrC = (char*)(oc->image);
+   Elf_Ehdr* ehdr  = (Elf_Ehdr*) ehdrC;
+   Elf_Shdr* shdr  = (Elf_Shdr*) (ehdrC + ehdr->e_shoff);
+   char* sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+
+   /* first find "the" symbol table */
+   stab = (Elf_Sym*) findElfSection ( ehdrC, SHT_SYMTAB );
+
+   /* also go find the string table */
+   strtab = findElfSection ( ehdrC, SHT_STRTAB );
+
+   if (stab == NULL || strtab == NULL) {
+      fprintf(stderr,"%s: can't find string or symbol table\n", oc->fileName);
+      return 0;
+   }
+
+   /* Process the relocation sections. */
+   for (shnum = 0; shnum < ehdr->e_shnum; shnum++) {
+
+      /* Skip sections called ".rel.stab".  These appear to contain
+         relocation entries that, when done, make the stabs debugging
+         info point at the right places.  We ain't interested in all
+         dat jazz, mun. */
+      if (0 == memcmp(".rel.stab", sh_strtab + shdr[shnum].sh_name, 9))
+         continue;
+
+      if (shdr[shnum].sh_type == SHT_REL ) {
+         ok = do_Elf_Rel_relocations ( oc, ehdrC, shdr,
+                                       shnum, stab, strtab );
+         if (!ok) return ok;
+      }
+      else
+      if (shdr[shnum].sh_type == SHT_RELA) {
+         ok = do_Elf_Rela_relocations ( oc, ehdrC, shdr,
+                                        shnum, stab, strtab );
+         if (!ok) return ok;
+      }
+   }
+
+   /* Free the local symbol table; we won't need it again. */
+   delete_StringMap(oc->lochash);
+   oc->lochash = NULL;
+
+   return 1;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// VERIFY
+
+static int
+ocVerifyImage_ELF ( ObjectCode* oc )
+{
+   Elf_Shdr* shdr;
+   Elf_Sym*  stab;
+   int i, j, nent, nstrtab, nsymtabs;
+   char* sh_strtab;
+   char* strtab;
+
+   char*     ehdrC = (char*)(oc->image);
+   Elf_Ehdr* ehdr  = (Elf_Ehdr*)ehdrC;
+
+   if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
+       ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
+       ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
+       ehdr->e_ident[EI_MAG3] != ELFMAG3) {
+      fprintf(stderr,"%s: not an ELF object\n", oc->fileName);
+      return 0;
+   }
+
+   if (ehdr->e_ident[EI_CLASS] != ELFCLASS) {
+      fprintf(stderr,"%s: unsupported ELF format\n", oc->fileName);
+      return 0;
+   }
+
+   if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) {
+      if (debug_linker)
+         fprintf(stderr, "Is little-endian\n" );
+   } else
+   if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) {
+       if (debug_linker)
+          fprintf(stderr, "Is big-endian\n" );
+   } else {
+       fprintf(stderr,"%s: unknown endiannness\n", oc->fileName);
+       return 0;
+   }
+
+   if (ehdr->e_type != ET_REL) {
+      fprintf(stderr,"%s: not a relocatable object (.o) file\n", oc->fileName);
+      return 0;
+   }
+   if (debug_linker)
+      fprintf(stderr, "Is a relocatable object (.o) file\n" );
+
+   if (debug_linker)
+      fprintf(stderr, "Architecture is " );
+   switch (ehdr->e_machine) {
+      case EM_386:    if (debug_linker) fprintf(stderr, "x86\n" ); break;
+      case EM_SPARC:  if (debug_linker) fprintf(stderr, "sparc\n" ); break;
+      case EM_ARM:    if (debug_linker) fprintf(stderr, "arm\n" ); break;
+#ifdef EM_IA_64
+      case EM_IA_64:  if (debug_linker) fprintf(stderr, "ia64\n" ); break;
+#endif
+      case EM_X86_64: if (debug_linker) fprintf(stderr, "x86_64\n" ); break;
+      case EM_PPC:    if (debug_linker) fprintf(stderr, "ppc\n" ); break;
+      default:        if (debug_linker) fprintf(stderr, "unknown\n" );
+                      fprintf(stderr,"%s: unknown architecture\n", oc->fileName);
+                      return 0;
+   }
+
+   if (debug_linker>1) fprintf(stderr,
+             "\nSection header table: start %lld, n_entries %d, ent_size %d\n",
+             (Long)ehdr->e_shoff, 
+             ehdr->e_shnum, ehdr->e_shentsize  );
+
+   assert (ehdr->e_shentsize == sizeof(Elf_Shdr));
+
+   shdr = (Elf_Shdr*) (ehdrC + ehdr->e_shoff);
+
+   if (ehdr->e_shstrndx == SHN_UNDEF) {
+      fprintf(stderr,"%s: no section header string table\n", oc->fileName);
+      return 0;
+   } else {
+      if (debug_linker>1) 
+         fprintf(stderr, "Section header string table is section %d\n",
+                          ehdr->e_shstrndx);
+      sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+   }
+
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (debug_linker>1) fprintf(stderr, "%2d:  ", i );
+      if (debug_linker>1) fprintf(stderr, "type=%2d  ", (int)shdr[i].sh_type );
+      if (debug_linker>1) fprintf(stderr, "size=%4d  ", (int)shdr[i].sh_size );
+      if (debug_linker>1) fprintf(stderr, "offs=%4d  ", (int)shdr[i].sh_offset );
+      if (debug_linker>1) fprintf(stderr, "  (%p .. %p)  ",
+               ehdrC + shdr[i].sh_offset,
+		      ehdrC + shdr[i].sh_offset + shdr[i].sh_size - 1);
+
+      if (shdr[i].sh_type == SHT_REL) {
+	  if (debug_linker>1) fprintf(stderr, "Rel  " );
+      } else if (shdr[i].sh_type == SHT_RELA) {
+	  if (debug_linker>1) fprintf(stderr, "RelA " );
+      } else {
+	  if (debug_linker>1) fprintf(stderr,"     ");
+      }
+      if (sh_strtab) {
+	  if (debug_linker>1) fprintf(stderr, "sname=%s\n", 
+             sh_strtab + shdr[i].sh_name );
+      }
+   }
+
+   if (debug_linker>1) fprintf(stderr, "\nString tables\n" );
+   strtab = NULL;
+   nstrtab = 0;
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (shdr[i].sh_type == SHT_STRTAB
+          /* Ignore the section header's string table. */
+          && i != ehdr->e_shstrndx
+	  /* Ignore string tables named .stabstr, as they contain
+             debugging info. */
+          && 0 != memcmp(".stabstr", sh_strtab + shdr[i].sh_name, 8)
+         ) {
+         if (debug_linker>1) 
+            fprintf(stderr,"   section %d is a normal string table\n", i );
+         strtab = ehdrC + shdr[i].sh_offset;
+         nstrtab++;
+      }
+   }
+   if (nstrtab != 1) {
+      fprintf(stderr,"%s: no string tables, or too many\n", oc->fileName);
+      return 0;
+   }
+
+   nsymtabs = 0;
+   if (debug_linker>1) fprintf(stderr, "\nSymbol tables\n" );
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (shdr[i].sh_type != SHT_SYMTAB) continue;
+      if (debug_linker>1) fprintf(stderr, "section %d is a symbol table\n", i );
+      nsymtabs++;
+      stab = (Elf_Sym*) (ehdrC + shdr[i].sh_offset);
+      nent = shdr[i].sh_size / sizeof(Elf_Sym);
+      if (debug_linker>1) fprintf(stderr,  
+            "   number of entries is apparently %d (%lld rem)\n",
+               nent,
+               (Long)(shdr[i].sh_size % sizeof(Elf_Sym))
+             );
+      if (0 != shdr[i].sh_size % sizeof(Elf_Sym)) {
+         fprintf(stderr,"%s: non-integral number of symbol table entries\n", 
+                        oc->fileName);
+         return 0;
+      }
+      for (j = 0; j < nent; j++) {
+         if (debug_linker>1) fprintf(stderr, "   %2d  ", j );
+         if (debug_linker>1) fprintf(stderr, "  sec=%-5d  size=%-3d  val=%5p  ",
+                             (int)stab[j].st_shndx,
+                             (int)stab[j].st_size,
+                             (char*)stab[j].st_value );
+
+         if (debug_linker>1) fprintf(stderr, "type=" );
+         switch (ELF_ST_TYPE(stab[j].st_info)) {
+            case STT_NOTYPE:  if (debug_linker>1) fprintf(stderr, "notype " ); break;
+            case STT_OBJECT:  if (debug_linker>1) fprintf(stderr, "object " ); break;
+            case STT_FUNC  :  if (debug_linker>1) fprintf(stderr, "func   " ); break;
+            case STT_SECTION: if (debug_linker>1) fprintf(stderr, "section" ); break;
+            case STT_FILE:    if (debug_linker>1) fprintf(stderr, "file   " ); break;
+            default:          if (debug_linker>1) fprintf(stderr, "?      " ); break;
+         }
+         if (debug_linker>1) fprintf(stderr, "  " );
+
+         if (debug_linker>1) fprintf(stderr, "bind=" );
+         switch (ELF_ST_BIND(stab[j].st_info)) {
+            case STB_LOCAL :  if (debug_linker>1) fprintf(stderr, "local " ); break;
+            case STB_GLOBAL:  if (debug_linker>1) fprintf(stderr, "global" ); break;
+            case STB_WEAK  :  if (debug_linker>1) fprintf(stderr, "weak  " ); break;
+            default:          if (debug_linker>1) fprintf(stderr, "?     " ); break;
+         }
+         if (debug_linker>1) fprintf(stderr, "  " );
+
+         if (debug_linker>1) fprintf(stderr, "name=%s\n", strtab + stab[j].st_name );
+      }
+   }
+
+   if (nsymtabs == 0) {
+      fprintf(stderr,"%s: didn't find any symbol tables\n", oc->fileName);
+      return 0;
+   }
+
+   return 1;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// GETNAMES
+
+static int
+ocGetNames_ELF ( ObjectCode* oc )
+{
+   int i, j, k, nent;
+   Elf_Sym* stab;
+
+   char*     ehdrC     = (char*)(oc->image);
+   Elf_Ehdr* ehdr      = (Elf_Ehdr*)ehdrC;
+   char*     strtab    = findElfSection ( ehdrC, SHT_STRTAB );
+   Elf_Shdr* shdr      = (Elf_Shdr*) (ehdrC + ehdr->e_shoff);
+
+   char*     sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+   char*     sec_name;
+
+   assert(global_symbol_table != NULL);
+
+   if (!strtab) {
+      fprintf(stderr,"%s: no strtab\n", oc->fileName);
+      return 0;
+   }
+
+   k = 0;
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      /* Figure out what kind of section it is.  Logic derived from
+         Figure 1.14 ("Special Sections") of the ELF document
+         ("Portable Formats Specification, Version 1.1"). */
+      Elf_Shdr    hdr    = shdr[i];
+      SectionKind kind   = SECTIONKIND_OTHER;
+      int         is_bss = FALSE;
+
+      if (hdr.sh_type == SHT_PROGBITS
+          && (hdr.sh_flags & SHF_ALLOC) && (hdr.sh_flags & SHF_EXECINSTR)) {
+         /* .text-style section */
+         kind = SECTIONKIND_CODE_OR_RODATA;
+      }
+      else
+      if (hdr.sh_type == SHT_PROGBITS
+          && (hdr.sh_flags & SHF_ALLOC) && (hdr.sh_flags & SHF_WRITE)) {
+         /* .data-style section */
+         kind = SECTIONKIND_RWDATA;
+      }
+      else
+      if (hdr.sh_type == SHT_PROGBITS
+          && (hdr.sh_flags & SHF_ALLOC) && !(hdr.sh_flags & SHF_WRITE)) {
+         /* .rodata-style section */
+         kind = SECTIONKIND_CODE_OR_RODATA;
+      }
+      else
+      if (hdr.sh_type == SHT_NOBITS
+          && (hdr.sh_flags & SHF_ALLOC) && (hdr.sh_flags & SHF_WRITE)) {
+         /* .bss-style section */
+         kind = SECTIONKIND_RWDATA;
+         is_bss = TRUE;
+      }
+
+      if (is_bss && shdr[i].sh_size > 0) {
+         /* This is a non-empty .bss section.  Allocate zeroed space for
+            it, and set its .sh_offset field such that
+            ehdrC + .sh_offset == addr_of_zeroed_space.  */
+         char* zspace = calloc(1, shdr[i].sh_size);
+         shdr[i].sh_offset = ((char*)zspace) - ((char*)ehdrC);
+	 if (1)
+         fprintf(stderr, "BSS section at %p, size %lld\n",
+                         zspace, (Long)shdr[i].sh_size);
+      }
+
+      /* When loading objects compiled with -g, it seems there are
+	 relocations in various debug-info sections.  So we'd better
+	 tell addProddableBlock to allow those bits to be prodded. */
+      //fprintf(stderr, "ZZZZZZZZZZ %s\n", sh_strtab + hdr.sh_name);
+      sec_name = sh_strtab + shdr[i].sh_name;
+      if (kind == SECTIONKIND_OTHER
+          && (0 == strcmp(".debug_info", sec_name)
+              || 0 == strcmp(".debug_line", sec_name)
+              || 0 == strcmp(".debug_pubnames", sec_name)
+              || 0 == strcmp(".debug_aranges", sec_name)
+              || 0 == strcmp(".debug_frame", sec_name))) {
+         kind = SECTIONKIND_CODE_OR_RODATA;
+      }
+
+      /* fill in the section info */
+      if (kind != SECTIONKIND_OTHER && shdr[i].sh_size > 0) {
+         addProddableBlock(oc, ehdrC + shdr[i].sh_offset, shdr[i].sh_size);
+         //addSection(oc, kind, ehdrC + shdr[i].sh_offset,
+         //               ehdrC + shdr[i].sh_offset + shdr[i].sh_size - 1);
+      }
+
+      if (shdr[i].sh_type != SHT_SYMTAB) continue;
+
+      /* copy stuff into this module's object symbol table */
+      stab = (Elf_Sym*) (ehdrC + shdr[i].sh_offset);
+      nent = shdr[i].sh_size / sizeof(Elf_Sym);
+
+      oc->n_symbols = nent;
+      oc->symbols = mymalloc(oc->n_symbols * sizeof(char*));
+
+      for (j = 0; j < nent; j++) {
+
+         char  isLocal = FALSE; /* avoids uninit-var warning */
+         char* ad      = NULL;
+         char* nm      = strtab + stab[j].st_name;
+         int   secno   = stab[j].st_shndx;
+
+	 /* Figure out if we want to add it; if so, set ad to its
+            address.  Otherwise leave ad == NULL. */
+
+         if (secno == SHN_COMMON) {
+            isLocal = FALSE;
+#           if defined(__x86_64__)
+            ad = calloc_below2G(1, stab[j].st_size);
+#           else
+            ad = calloc(1, stab[j].st_size);
+#           endif
+    //	    assert( (Addr)ad < 0xF0000000ULL );
+
+	    if (0)
+            fprintf(stderr, "COMMON symbol, size %lld name %s  allocd %p\n",
+                            (Long)stab[j].st_size, nm, ad);
+	    /* Pointless to do addProddableBlock() for this area,
+               since the linker should never poke around in it. */
+	 }
+         else
+         if ( ( ELF_ST_BIND(stab[j].st_info)==STB_GLOBAL
+                || ELF_ST_BIND(stab[j].st_info)==STB_LOCAL
+              )
+              /* and not an undefined symbol */
+              && stab[j].st_shndx != SHN_UNDEF
+	      /* and not in a "special section" */
+              && stab[j].st_shndx < SHN_LORESERVE
+              &&
+	      /* and it's a not a section or string table or anything silly */
+              ( ELF_ST_TYPE(stab[j].st_info)==STT_FUNC ||
+                ELF_ST_TYPE(stab[j].st_info)==STT_OBJECT ||
+                ELF_ST_TYPE(stab[j].st_info)==STT_NOTYPE
+              )
+            ) {
+	    /* Section 0 is the undefined section, hence > and not >=. */
+            assert(secno > 0 && secno < ehdr->e_shnum);
+	    /*
+            if (shdr[secno].sh_type == SHT_NOBITS) {
+               fprintf(stderr, "   BSS symbol, size %d off %d name %s\n",
+                               stab[j].st_size, stab[j].st_value, nm);
+            }
+            */
+            ad = ehdrC + shdr[ secno ].sh_offset + stab[j].st_value;
+            if (ELF_ST_BIND(stab[j].st_info)==STB_LOCAL) {
+               isLocal = TRUE;
+            } else {
+#ifdef ELF_FUNCTION_DESC
+               /* dlsym() and the initialisation table both give us function
+		* descriptors, so to be consistent we store function descriptors
+		* in the symbol table */
+               if (ELF_ST_TYPE(stab[j].st_info) == STT_FUNC)
+                   ad = (char *)allocateFunctionDesc((Elf_Addr)ad);
+#endif
+               if (0|| debug_linker) 
+                   fprintf(stderr, "addOTabName(GLOB): %10p  %s %s\n",
+                                      ad, oc->fileName, nm );
+               isLocal = FALSE;
+            }
+         }
+
+         /* And the decision is ... */
+
+         if (ad != NULL) {
+            assert(nm != NULL);
+	    oc->symbols[j] = nm;
+            /* Acquire! */
+            if (isLocal) {
+               /* Ignore entirely. */
+            } else {
+	      //ghciInsertStrHashTable(oc->fileName, global_symbol_table, nm, ad);
+	      paranoid_addto_StringMap(global_symbol_table, nm, ad);
+            }
+         } else {
+            /* Skip. */
+            if (debug_linker>1) fprintf(stderr, "skipping `%s'\n",
+                                   strtab + stab[j].st_name );
+            /*
+            fprintf(stderr,
+                    "skipping   bind = %d,  type = %d,  shndx = %d   `%s'\n",
+                    (int)ELF_ST_BIND(stab[j].st_info),
+                    (int)ELF_ST_TYPE(stab[j].st_info),
+                    (int)stab[j].st_shndx,
+                    strtab + stab[j].st_name
+                   );
+            */
+            oc->symbols[j] = NULL;
+         }
+
+      }
+   }
+
+   return 1;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// TOP-LEVEL CONTROL OF THE LINKER
+
+
+/* ---------------------------------------------------------------------
+ * Load an obj (populate the global symbol table, but don't resolve yet)
+ *
+ * Returns: 1 if ok, 0 on error.
+ */
+static
+int loadObj( char *path )
+{
+   ObjectCode* oc;
+   struct stat st;
+   int r;
+   int fd, pagesize;
+   char* p;
+
+   initLinker();
+
+   fprintf(stderr, "==== loadObj %s ====\n", path );
+
+   /* Check that we haven't already loaded this object.  */
+   {
+       ObjectCode *o;
+       int is_dup = 0;
+       for (o = global_object_list; o; o = o->next) {
+          if (0 == strcmp(o->fileName, path))
+             is_dup = 1;
+       }
+       if (is_dup) {
+	 fprintf(stderr,
+            "\n\n"
+            "GHCi runtime linker: warning: looks like you're trying to load the\n"
+            "same object file twice:\n"
+            "   %s\n"
+            , path);
+	 exit(1);
+       }
+   }
+
+   oc = mymalloc(sizeof(ObjectCode));
+
+   oc->formatName = "ELF";
+
+   r = stat(path, &st);
+   if (r == -1) { return 0; }
+
+   /* sigh, strdup() isn't a POSIX function, so do it the long way */
+   oc->fileName = mymalloc( strlen(path)+1 );
+   strcpy(oc->fileName, path);
+
+   oc->fileSize          = st.st_size;
+   oc->symbols           = NULL;
+   oc->sections          = NULL;
+   oc->lochash           = new_StringMap();
+   oc->proddables        = NULL;
+   oc->fixup             = NULL;
+   oc->fixup_used        = 0;
+   oc->fixup_size        = 0;
+
+   /* chain it onto the list of objects */
+   oc->next              = global_object_list;
+   global_object_list    = oc;
+
+   fd = open(path, O_RDONLY);
+   if (fd == -1) {
+      fprintf(stderr,"loadObj: can't open `%s'\n", path);
+      exit(1);
+   }
+
+   /* Allocate a 1-page area just prior to the image, so we can put
+      fixup code fragments there.  Used for doing R_ARM_PC24
+      relocations for jump distances > 64M. */
+
+   pagesize = getpagesize();
+   //   p = memalign(pagesize, N_FIXUP_PAGES * pagesize
+   //                          + oc->fileSize);
+   p = mymalloc(N_FIXUP_PAGES * pagesize + oc->fileSize);
+   if (0) fprintf(stderr,"XXXX p = %p\n", p);
+   if (p == NULL) {
+      fprintf(stderr,"loadObj: failed to allocate space for `%s'\n", path);
+      exit(1);
+   }
+
+   oc->fixup = p;
+   oc->fixup_size = N_FIXUP_PAGES * pagesize;
+   oc->fixup_used = 0;
+   oc->image = &(p[ oc->fixup_size ]);
+
+   r = read(fd, oc->image, oc->fileSize);
+   if (r != oc->fileSize) {
+      fprintf(stderr,"loadObj: failed to read `%s'\n", path);
+      exit(1);
+   }
+
+   fprintf(stderr, "loaded %s at %p (fixup = %p)\n", 
+                   oc->fileName, oc->image, oc->fixup );
+
+   close(fd);
+
+   /* verify the in-memory image */
+   r = ocVerifyImage_ELF ( oc );
+   if (!r) { return r; }
+
+   /* build the symbol list for this image */
+   r = ocGetNames_ELF ( oc );
+   if (!r) { return r; }
+
+   /* loaded, but not resolved yet */
+   oc->status = OBJECT_LOADED;
+
+#ifdef ppc32_TARGET_ARCH
+   invalidate_icache(oc->image, oc->fileSize);
+#endif
+
+   return 1;
+}
+
+
+
+/* ---------------------------------------------------------------------------
+ * resolve all the currently unlinked objects in memory
+ *
+ * Returns: 1 if ok, 0 on error.
+ */
+static
+int resolveObjs( void )
+{
+    ObjectCode *oc;
+    int r;
+
+    initLinker();
+
+    for (oc = global_object_list; oc; oc = oc->next) {
+	if (oc->status != OBJECT_RESOLVED) {
+	    r = ocResolve_ELF ( oc );
+	    if (!r) { return r; }
+	    oc->status = OBJECT_RESOLVED;
+	}
+    }
+    return 1;
+}
+
+
+/* ---------------------------------------------------------------------------
+ * Top-level linker.
+ */
+
+/* Load and link a bunch of .o's, and return the address of
+   'entry'.  Or NULL if something borks.
+*/
+void* linker_top_level_LINK ( int n_object_names, char** object_names )
+{
+   int   r, i;
+   void* mainp;
+
+   initLinker();
+   for (i = 0; i < n_object_names; i++) {
+      //fprintf(stderr, "linkloop %d %s\n", i, object_names[i] );
+      r = loadObj( object_names[i] );
+      if (r != 1) return NULL;
+   }
+   r = resolveObjs();
+   if (r != 1) return NULL;
+   mainp = search_StringMap ( global_symbol_table, "entry" );
+   if (mainp == NULL) return NULL;
+   printf("switchback: Linker: success!\n");
+   return mainp;
+}
+
+
+#endif
diff --git a/VEX/switchback/linker.h b/VEX/switchback/linker.h
new file mode 100644
index 0000000..e8b683e
--- /dev/null
+++ b/VEX/switchback/linker.h
@@ -0,0 +1,5 @@
+
+extern
+void* linker_top_level_LINK ( int n_object_names, char** object_names );
+
+extern void* mymalloc ( int );
diff --git a/VEX/switchback/switchback.c b/VEX/switchback/switchback.c
new file mode 100644
index 0000000..f3603d1
--- /dev/null
+++ b/VEX/switchback/switchback.c
@@ -0,0 +1,806 @@
+
+/* HOW TO USE
+
+13 Dec '05 - Linker no longer used (apart from mymalloc)
+Simply compile and link switchback.c with test_xxx.c,
+e.g. for ppc64:
+$ (cd .. && make EXTRA_CFLAGS="-m64" libvex_ppc64_linux.a) && gcc -m64 -mregnames -Wall -Wshadow -Wno-long-long -Winline -O -g -o switchback switchback.c linker.c ../libvex_ppc64_linux.a test_xxx.c
+
+Test file test_xxx.c must have an entry point called "entry",
+which expects to take a single argument which is a function pointer
+(to "serviceFn").
+
+Test file may not reference any other symbols.
+
+NOTE: POWERPC: it is critical, when using this on ppc, to set
+CacheLineSize to the right value.  Values we currently know of:
+
+   imac (G3):   32
+   G5 (ppc970): 128
+
+ARM64:
+  (cd .. && make -f Makefile-gcc libvex-arm64-linux.a) \
+     && $CC -Wall -O -g -o switchback switchback.c linker.c \
+     ../libvex-arm64-linux.a test_emfloat.c
+*/
+
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "../pub/libvex_basictypes.h"
+#include "../pub/libvex_guest_x86.h"
+#include "../pub/libvex_guest_amd64.h"
+#include "../pub/libvex_guest_ppc32.h"
+#include "../pub/libvex_guest_ppc64.h"
+#include "../pub/libvex_guest_arm64.h"
+#include "../pub/libvex.h"
+#include "../pub/libvex_trc_values.h"
+#include "linker.h"
+
+static ULong n_bbs_done = 0;
+static Int   n_translations_made = 0;
+
+
+#if defined(__i386__)
+#  define VexGuestState             VexGuestX86State
+#  define LibVEX_Guest_initialise   LibVEX_GuestX86_initialise
+#  define VexArch                   VexArchX86
+#  define VexSubArch                VexSubArchX86_sse1
+#  define GuestPC                   guest_EIP
+#  define CacheLineSize             0/*irrelevant*/
+
+#elif defined(__aarch64__) && !defined(__arm__)
+#  define VexGuestState             VexGuestARM64State
+#  define LibVEX_Guest_initialise   LibVEX_GuestARM64_initialise
+#  define VexArch                   VexArchARM64
+#  define VexSubArch                VexSubArch_NONE
+#  define GuestPC                   guest_PC
+#  define CacheLineSize             0/*irrelevant*/
+
+#else
+#   error "Unknown arch"
+#endif
+
+/* 7: show conversion into IR */
+/* 6: show after initial opt */
+/* 5: show after instrumentation */
+/* 4: show after second opt */
+/* 3: show after tree building */
+/* 2: show selected insns */
+/* 1: show after reg-alloc */
+/* 0: show final assembly */
+#define TEST_FLAGS ((1<<7)|(1<<3)|(1<<2)|(1<<1)|(1<<0))
+#define DEBUG_TRACE_FLAGS ((0<<7)|(0<<6)|(0<<5)|(0<<4)| \
+                           (0<<3)|(0<<2)|(0<<1)|(0<<0))
+
+typedef  unsigned long int  Addr;
+
+
+/* guest state */
+ULong gstack[64000] __attribute__((aligned(16)));
+VexGuestState gst;
+VexControl vcon;
+
+/* only used for the switchback transition */
+/* i386:  helper1 = &gst, helper2 = %EFLAGS */
+/* amd64: helper1 = &gst, helper2 = %EFLAGS */
+/* ppc32: helper1 = &gst, helper2 = %CR, helper3 = %XER */
+/* arm64: helper1 = &gst, helper2 = 32x0:NZCV:28x0 */
+HWord sb_helper1 = 0;
+HWord sb_helper2 = 0;
+HWord sb_helper3 = 0;
+
+/* translation cache */
+#define N_TRANS_CACHE 1000000
+#define N_TRANS_TABLE 10000
+
+ULong trans_cache[N_TRANS_CACHE];
+VexGuestExtents trans_table [N_TRANS_TABLE];
+ULong*          trans_tableP[N_TRANS_TABLE];
+
+Int trans_cache_used = 0;
+Int trans_table_used = 0;
+
+static Bool chase_into_ok ( void* opaque, Addr64 dst ) {
+   return False;
+}
+
+static UInt needs_self_check ( void* opaque, const VexGuestExtents* vge ) {
+   return 0;
+}
+
+
+/* For providing services. */
+static HWord serviceFn ( HWord arg1, HWord arg2 )
+{
+   switch (arg1) {
+      case 0: /* EXIT */
+         printf("---STOP---\n");
+         printf("serviceFn:EXIT\n");
+	 printf("%llu bbs simulated\n", n_bbs_done);
+	 printf("%d translations made, %d tt bytes\n", 
+                n_translations_made, 8*trans_cache_used);
+         exit(0);
+      case 1: /* PUTC */
+         putchar(arg2);
+         return 0;
+      case 2: /* MALLOC */
+         return (HWord)malloc(arg2);
+      case 3: /* FREE */
+         free((void*)arg2);
+         return 0;
+      default:
+         assert(0);
+   }
+}
+
+
+// needed for arm64 ?
+static void invalidate_icache(void *ptr, unsigned long nbytes)
+{
+   // This function, invalidate_icache, for arm64_linux,
+   // is copied from
+   // https://github.com/armvixl/vixl/blob/master/src/a64/cpu-a64.cc
+   // which has the following copyright notice:
+   /*
+   Copyright 2013, ARM Limited
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are met:
+   
+   * Redistributions of source code must retain the above copyright notice,
+     this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above copyright notice,
+     this list of conditions and the following disclaimer in the documentation
+     and/or other materials provided with the distribution.
+   * Neither the name of ARM Limited nor the names of its contributors may be
+     used to endorse or promote products derived from this software without
+     specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+   ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+   DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+   FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+   SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+   CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+   OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+   */
+
+   // Ask what the I and D line sizes are
+   UInt cache_type_register;
+   // Copy the content of the cache type register to a core register.
+   __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+                         : [ctr] "=r" (cache_type_register));
+
+   const Int kDCacheLineSizeShift = 16;
+   const Int kICacheLineSizeShift = 0;
+   const UInt kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+   const UInt kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+   // The cache type register holds the size of the I and D caches as a power of
+   // two.
+   const UInt dcache_line_size_power_of_two =
+       (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+   const UInt icache_line_size_power_of_two =
+       (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+   const UInt dcache_line_size_ = 1 << dcache_line_size_power_of_two;
+   const UInt icache_line_size_ = 1 << icache_line_size_power_of_two;
+
+   Addr start = (Addr)ptr;
+   // Sizes will be used to generate a mask big enough to cover a pointer.
+   Addr dsize = (Addr)dcache_line_size_;
+   Addr isize = (Addr)icache_line_size_;
+
+   // Cache line sizes are always a power of 2.
+   Addr dstart = start & ~(dsize - 1);
+   Addr istart = start & ~(isize - 1);
+   Addr end    = start + nbytes;
+
+   __asm__ __volatile__ (
+     // Clean every line of the D cache containing the target data.
+     "0: \n\t"
+     // dc : Data Cache maintenance
+     // c : Clean
+     // va : by (Virtual) Address
+     // u : to the point of Unification
+     // The point of unification for a processor is the point by which the
+     // instruction and data caches are guaranteed to see the same copy of a
+     // memory location. See ARM DDI 0406B page B2-12 for more information.
+     "dc cvau, %[dline] \n\t"
+     "add %[dline], %[dline], %[dsize] \n\t"
+     "cmp %[dline], %[end] \n\t"
+     "b.lt 0b \n\t"
+     // Barrier to make sure the effect of the code above is visible to the rest
+     // of the world.
+     // dsb : Data Synchronisation Barrier
+     // ish : Inner SHareable domain
+     // The point of unification for an Inner Shareable shareability domain is
+     // the point by which the instruction and data caches of all the processors
+     // in that Inner Shareable shareability domain are guaranteed to see the
+     // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+     // information.
+     "dsb ish \n\t"
+     // Invalidate every line of the I cache containing the target data.
+     "1: \n\t"
+     // ic : instruction cache maintenance
+     // i : invalidate
+     // va : by address
+     // u : to the point of unification
+     "ic ivau, %[iline] \n\t"
+     "add %[iline], %[iline], %[isize] \n\t"
+     "cmp %[iline], %[end] \n\t"
+     "b.lt 1b \n\t"
+     // Barrier to make sure the effect of the code above is visible to the rest
+     // of the world.
+     "dsb ish \n\t"
+     // Barrier to ensure any prefetching which happened before this code is
+     // discarded.
+     // isb : Instruction Synchronisation Barrier
+     "isb \n\t"
+     : [dline] "+r" (dstart),
+       [iline] "+r" (istart)
+     : [dsize] "r" (dsize),
+       [isize] "r" (isize),
+       [end] "r" (end)
+     // This code does not write to memory but without the dependency gcc might
+     // move this code before the code is generated.
+     : "cc", "memory"
+   );
+
+}
+
+
+/* -------------------- */
+/* continue execution on the real CPU (never returns) */
+
+#if defined(__i386__)
+
+extern void switchback_asm(void);
+asm(
+"switchback_asm:\n"
+"   movl sb_helper1, %eax\n"  // eax = guest state ptr
+"   movl  16(%eax), %esp\n"   // switch stacks
+"   pushl 56(%eax)\n"         // push continuation addr
+"   movl sb_helper2, %ebx\n"  // get eflags
+"   pushl %ebx\n"             // eflags:CA
+"   pushl 0(%eax)\n"          //  EAX:eflags:CA
+"   movl 4(%eax), %ecx\n" 
+"   movl 8(%eax), %edx\n" 
+"   movl 12(%eax), %ebx\n" 
+"   movl 20(%eax), %ebp\n"
+"   movl 24(%eax), %esi\n"
+"   movl 28(%eax), %edi\n"
+"   popl %eax\n"
+"   popfl\n"
+"   ret\n"
+);
+void switchback ( void )
+{
+   sb_helper1 = (HWord)&gst;
+   sb_helper2 = LibVEX_GuestX86_get_eflags(&gst);
+   switchback_asm(); // never returns
+}
+
+#elif defined(__aarch64__)
+
+extern void switchback_asm(HWord x0_gst, HWord x1_pstate);
+asm(
+"switchback_asm:"
+"   mrs x30, nzcv"  "\n"
+"   and x30, x30, #0xFFFFFFFF0FFFFFFF"  "\n"
+"   and x1,  x1,  #0x00000000F0000000"  "\n"
+"   orr x30, x30, x1"  "\n"
+"   msr nzcv, x30"  "\n"
+
+"   ldr x30, [x0, #16 + 8*37]"  "\n"
+"   msr tpidr_el0, x30"  "\n"
+
+"   ldr x30, [x0, #16 + 8*31]"  "\n"
+"   mov sp,  x30"  "\n"
+
+"   add x30, x0, #(16 + 8*38 + 16*0)"  "\n"
+"   ldr q0,  [x30], #16"   "\n"
+"   ldr q1,  [x30], #16"   "\n"
+"   ldr q2,  [x30], #16"   "\n"
+"   ldr q3,  [x30], #16"   "\n"
+"   ldr q4,  [x30], #16"   "\n"
+"   ldr q5,  [x30], #16"   "\n"
+"   ldr q6,  [x30], #16"   "\n"
+"   ldr q7,  [x30], #16"   "\n"
+"   ldr q8,  [x30], #16"   "\n"
+"   ldr q9,  [x30], #16"   "\n"
+"   ldr q10, [x30], #16"   "\n"
+"   ldr q11, [x30], #16"   "\n"
+"   ldr q12, [x30], #16"   "\n"
+"   ldr q13, [x30], #16"   "\n"
+"   ldr q14, [x30], #16"   "\n"
+"   ldr q15, [x30], #16"   "\n"
+"   ldr q16, [x30], #16"   "\n"
+"   ldr q17, [x30], #16"   "\n"
+"   ldr q18, [x30], #16"   "\n"
+"   ldr q19, [x30], #16"   "\n"
+"   ldr q20, [x30], #16"   "\n"
+"   ldr q21, [x30], #16"   "\n"
+"   ldr q22, [x30], #16"   "\n"
+"   ldr q23, [x30], #16"   "\n"
+"   ldr q24, [x30], #16"   "\n"
+"   ldr q25, [x30], #16"   "\n"
+"   ldr q26, [x30], #16"   "\n"
+"   ldr q27, [x30], #16"   "\n"
+"   ldr q28, [x30], #16"   "\n"
+"   ldr q29, [x30], #16"   "\n"
+"   ldr q30, [x30], #16"   "\n"
+"   ldr q31, [x30], #16"   "\n"
+
+"   ldr x30, [x0, #16+8*30]"  "\n"
+"   ldr x29, [x0, #16+8*29]"  "\n"
+"   ldr x28, [x0, #16+8*28]"  "\n"
+"   ldr x27, [x0, #16+8*27]"  "\n"
+"   ldr x26, [x0, #16+8*26]"  "\n"
+"   ldr x25, [x0, #16+8*25]"  "\n"
+"   ldr x24, [x0, #16+8*24]"  "\n"
+"   ldr x23, [x0, #16+8*23]"  "\n"
+"   ldr x22, [x0, #16+8*22]"  "\n"
+"   ldr x21, [x0, #16+8*21]"  "\n"
+"   ldr x20, [x0, #16+8*20]"  "\n"
+"   ldr x19, [x0, #16+8*19]"  "\n"
+"   ldr x18, [x0, #16+8*18]"  "\n"
+"   ldr x17, [x0, #16+8*17]"  "\n"
+"   ldr x16, [x0, #16+8*16]"  "\n"
+"   ldr x15, [x0, #16+8*15]"  "\n"
+"   ldr x14, [x0, #16+8*14]"  "\n"
+"   ldr x13, [x0, #16+8*13]"  "\n"
+"   ldr x12, [x0, #16+8*12]"  "\n"
+"   ldr x11, [x0, #16+8*11]"  "\n"
+"   ldr x10, [x0, #16+8*10]"  "\n"
+"   ldr x9,  [x0, #16+8*9]"   "\n"
+"   ldr x8,  [x0, #16+8*8]"   "\n"
+"   ldr x7,  [x0, #16+8*7]"   "\n"
+"   ldr x6,  [x0, #16+8*6]"   "\n"
+"   ldr x5,  [x0, #16+8*5]"   "\n"
+"   ldr x4,  [x0, #16+8*4]"   "\n"
+"   ldr x3,  [x0, #16+8*3]"   "\n"
+"   ldr x2,  [x0, #16+8*2]"   "\n"
+"   ldr x1,  [x0, #16+8*1]"   "\n"
+"   ldr x0,  [x0, #16+8*0]"   "\n"
+
+"nop_start_point:"            "\n"
+"   nop"  "\n" // this will be converted into a relative jump
+"nop_end_point:"              "\n"
+);
+
+extern void nop_start_point(void);
+extern void nop_end_point(void);
+
+void switchback ( void )
+{
+  assert(offsetof(VexGuestARM64State, guest_X0)  == 16 + 8*0);
+  assert(offsetof(VexGuestARM64State, guest_X30) == 16 + 8*30);
+  assert(offsetof(VexGuestARM64State, guest_SP)  == 16 + 8*31);
+  assert(offsetof(VexGuestARM64State, guest_TPIDR_EL0) == 16 + 8*37);
+  assert(offsetof(VexGuestARM64State, guest_Q0)  == 16 + 8*38 + 16*0);
+
+  HWord arg0 = (HWord)&gst;
+  HWord arg1 = LibVEX_GuestARM64_get_nzcv(&gst);
+
+  /* Copy the entire switchback_asm procedure into writable and
+     executable memory. */
+
+  UChar* sa_start     = (UChar*)&switchback_asm;
+  UChar* sa_nop_start = (UChar*)&nop_start_point;
+  UChar* sa_end       = (UChar*)&nop_end_point;
+
+  Int i;
+  Int nbytes       = sa_end - sa_start;
+  Int off_nopstart = sa_nop_start - sa_start;
+  if (0)
+     printf("nbytes = %d, nopstart = %d\n", nbytes, off_nopstart);
+
+   /* copy it into mallocville */
+   UChar* copy = mymalloc(nbytes);
+   assert(copy);
+   for (i = 0; i < nbytes; i++)
+      copy[i] = sa_start[i];
+
+   UInt* p = (UInt*)(&copy[off_nopstart]);
+
+   Addr addr_of_nop = (Addr)p;
+   Addr where_to_go = gst.guest_PC;
+   Long   diff = ((Long)where_to_go) - ((Long)addr_of_nop);
+
+   if (0) {
+     printf("addr of first nop = 0x%llx\n", addr_of_nop);
+     printf("where to go       = 0x%llx\n", where_to_go);
+     printf("diff = 0x%llx\n", diff);
+   }
+
+   if (diff < -0x8000000LL || diff >= 0x8000000LL) {
+     // we're hosed.  Give up
+     printf("hosed -- offset too large\n");
+     assert(0);
+   }
+
+   /* stay sane ... */
+   assert(p[0] == 0xd503201f); /* nop */
+
+   /* branch to diff */
+   p[0] = 0x14000000 | ((diff >> 2) & 0x3FFFFFF);
+
+   invalidate_icache( copy, nbytes );
+
+   ( (void(*)(HWord,HWord))copy )(arg0, arg1);
+}
+
+#else
+# error "Unknown plat"
+#endif
+
+
+
+/* -------------------- */
+// f    holds is the host code address
+// gp   holds the guest state pointer to use
+// res  is to hold the result.  Or some such.
+static HWord block[2]; // f, gp;
+extern HWord run_translation_asm(void);
+
+extern void disp_chain_assisted(void);
+
+#if defined(__aarch64__)
+asm(
+"run_translation_asm:"            "\n"
+"   stp  x29, x30, [sp, #-16]!"   "\n"
+"   stp  x27, x28, [sp, #-16]!"   "\n"
+"   stp  x25, x26, [sp, #-16]!"   "\n"
+"   stp  x23, x24, [sp, #-16]!"   "\n"
+"   stp  x21, x22, [sp, #-16]!"   "\n"
+"   stp  x19, x20, [sp, #-16]!"   "\n"
+"   stp  x0,  xzr, [sp, #-16]!"   "\n"
+"   adrp x0, block"               "\n"
+"   add  x0, x0, :lo12:block"     "\n"
+"   ldr  x21, [x0, #8]"           "\n"  // load GSP
+"   ldr  x1,  [x0, #0]"           "\n"  // Host address
+"   br   x1"                 "\n"  // go (we wind up at disp_chain_assisted)
+
+"disp_chain_assisted:"            "\n" // x21 holds the trc.  Return it.
+"   mov  x1, x21" "\n"
+    /* Restore int regs, but not x1. */
+"   ldp  x0,  xzr, [sp], #16"    "\n"
+"   ldp  x19, x20, [sp], #16"    "\n"
+"   ldp  x21, x22, [sp], #16"    "\n"
+"   ldp  x23, x24, [sp], #16"    "\n"
+"   ldp  x25, x26, [sp], #16"    "\n"
+"   ldp  x27, x28, [sp], #16"    "\n"
+"   ldp  x29, x30, [sp], #16"    "\n"
+"   mov  x0, x1"                 "\n"
+"   ret"                         "\n"
+);
+
+#elif defined(__i386__)
+
+asm(
+"run_translation_asm:\n"
+"   pushal\n"
+"   movl gp, %ebp\n"
+"   movl f, %eax\n"
+"   call *%eax\n"
+"   movl %eax, res\n"
+"   popal\n"
+"   ret\n"
+);
+
+#else
+# error "Unknown arch"
+#endif
+
+
+/* Run a translation at host address 'translation' and return the TRC.
+*/
+HWord run_translation ( HWord translation )
+{
+   if (0 && DEBUG_TRACE_FLAGS) {
+      printf(" run translation %p\n", (void*)translation );
+      printf(" simulated bb: %llu\n", n_bbs_done);
+   }
+   block[0] = translation;
+   block[1] = (HWord)&gst;
+   HWord trc = run_translation_asm();
+   n_bbs_done ++;
+   return trc;
+}
+
+HWord find_translation ( Addr guest_addr )
+{
+   Int i;
+   HWord __res;
+   if (0)
+     printf("find translation %p ... ", (void *)(guest_addr));
+   for (i = 0; i < trans_table_used; i++)
+     if (trans_table[i].base[0] == guest_addr)
+        break;
+   if (i == trans_table_used) {
+      if (0) printf("none\n");
+      return 0; /* not found */
+   }
+
+   /* Move this translation one step towards the front, so finding it
+      next time round is just that little bit cheaper. */
+   if (i > 2) {
+      VexGuestExtents tmpE = trans_table[i-1];
+      ULong*          tmpP = trans_tableP[i-1];
+      trans_table[i-1]  = trans_table[i];
+      trans_tableP[i-1] = trans_tableP[i];
+      trans_table[i] = tmpE;
+      trans_tableP[i] = tmpP;
+      i--;
+   }
+
+   __res = (HWord)trans_tableP[i];
+   if (0) printf("%p\n", (void*)__res);
+   return __res;
+}
+
+#define N_TRANSBUF 5000
+static UChar transbuf[N_TRANSBUF];
+void make_translation ( Addr guest_addr, Bool verbose )
+{
+   VexTranslateArgs   vta;
+   VexTranslateResult tres;
+   VexArchInfo vex_archinfo;
+   Int trans_used, i, ws_needed;
+
+   memset(&vta, 0, sizeof(vta));
+   memset(&tres, 0, sizeof(tres));
+   memset(&vex_archinfo, 0, sizeof(vex_archinfo));
+
+   if (trans_table_used >= N_TRANS_TABLE
+       || trans_cache_used >= N_TRANS_CACHE-1000) {
+      /* If things are looking to full, just dump
+         all the translations. */
+      trans_cache_used = 0;
+      trans_table_used = 0;
+   }
+
+   assert(trans_table_used < N_TRANS_TABLE);
+   if (0)
+     printf("make translation %p\n", (void *)guest_addr);
+
+   LibVEX_default_VexArchInfo(&vex_archinfo);
+   //vex_archinfo.subarch = VexSubArch;
+   //vex_archinfo.ppc_icache_line_szB = CacheLineSize;
+
+   /* */
+   vta.arch_guest       = VexArch;
+   vta.archinfo_guest   = vex_archinfo;
+   vta.arch_host        = VexArch;
+   vta.archinfo_host    = vex_archinfo;
+   vta.guest_bytes      = (UChar*)guest_addr;
+   vta.guest_bytes_addr = guest_addr;
+   vta.chase_into_ok    = chase_into_ok;
+//   vta.guest_extents    = &vge;
+   vta.guest_extents    = &trans_table[trans_table_used];
+   vta.host_bytes       = transbuf;
+   vta.host_bytes_size  = N_TRANSBUF;
+   vta.host_bytes_used  = &trans_used;
+   vta.instrument1      = NULL;
+   vta.instrument2      = NULL;
+   vta.needs_self_check = needs_self_check;
+   vta.traceflags       = verbose ? TEST_FLAGS : DEBUG_TRACE_FLAGS;
+
+   vta.disp_cp_chain_me_to_slowEP = NULL; //disp_chain_fast;
+   vta.disp_cp_chain_me_to_fastEP = NULL; //disp_chain_slow;
+   vta.disp_cp_xindir             = NULL; //disp_chain_indir;
+   vta.disp_cp_xassisted          = disp_chain_assisted;
+
+   vta.addProfInc       = False;
+
+   tres = LibVEX_Translate ( &vta );
+
+   assert(tres.status == VexTransOK);
+   assert(tres.offs_profInc == -1);
+
+   ws_needed = (trans_used+7) / 8;
+   assert(ws_needed > 0);
+   assert(trans_cache_used + ws_needed < N_TRANS_CACHE);
+   n_translations_made++;
+
+   for (i = 0; i < trans_used; i++) {
+      HChar* dst = ((HChar*)(&trans_cache[trans_cache_used])) + i;
+      HChar* src = (HChar*)(&transbuf[i]);
+      *dst = *src;
+   }
+
+#if defined(__aarch64__)
+   invalidate_icache( &trans_cache[trans_cache_used], trans_used );
+#endif
+
+   trans_tableP[trans_table_used] = &trans_cache[trans_cache_used];
+   trans_table_used++;
+   trans_cache_used += ws_needed;
+}
+
+
+__attribute__((unused))
+static Bool overlap ( Addr start, UInt len, VexGuestExtents* vge )
+{
+   Int i;
+   for (i = 0; i < vge->n_used; i++) {
+     if (vge->base[i]+vge->len[i] <= start
+         || vge->base[i] >= start+len) {
+       /* ok */
+     } else {
+        return True;
+     }
+   }
+   return False; /* no overlap */
+}
+
+static ULong  stopAfter = 0;
+static UChar* entryP    = NULL;
+
+
+__attribute__ ((noreturn))
+static
+void failure_exit ( void )
+{
+   fprintf(stdout, "VEX did failure_exit.  Bye.\n");
+   fprintf(stdout, "bb counter = %llu\n\n", n_bbs_done);
+   exit(1);
+}
+
+static
+void log_bytes ( HChar* bytes, Int nbytes )
+{
+   fwrite ( bytes, 1, nbytes, stdout );
+   fflush ( stdout );
+}
+
+
+/* run simulated code forever (it will exit by calling
+   serviceFn(0)). */
+static void run_simulator ( void )
+{
+   static Addr last_guest = 0;
+   Addr  next_guest;
+   HWord next_host;
+   while (1) {
+      next_guest = gst.GuestPC;
+
+      if (0)
+         printf("\nnext_guest: 0x%x\n", (UInt)next_guest);
+
+      if (next_guest == (Addr)&serviceFn) {
+
+         /* "do" the function call to serviceFn */
+#        if defined(__i386__)
+         {
+            HWord esp = gst.guest_ESP;
+            gst.guest_EIP = *(UInt*)(esp+0);
+            gst.guest_EAX = serviceFn( *(UInt*)(esp+4), *(UInt*)(esp+8) );
+            gst.guest_ESP = esp+4;
+            next_guest = gst.guest_EIP;
+         }
+#        elif defined(__aarch64__)
+         {
+            gst.guest_X0 = serviceFn( gst.guest_X0, gst.guest_X1 );
+            gst.guest_PC = gst.guest_X30;
+            next_guest   = gst.guest_PC;
+         }
+#        else
+#        error "Unknown arch"
+#        endif
+      }
+
+      next_host = find_translation(next_guest);
+      if (next_host == 0) {
+         make_translation(next_guest,False);
+         next_host = find_translation(next_guest);
+         assert(next_host != 0);
+      }
+
+      // Switchback
+      if (n_bbs_done == stopAfter) {
+         printf("---begin SWITCHBACK at bb:%llu---\n", n_bbs_done);
+#if 1
+         if (last_guest) {
+            printf("\n*** Last run translation (bb:%llu):\n", n_bbs_done-1);
+            make_translation(last_guest,True);
+         }
+#endif
+#if 0
+         if (next_guest) {
+            printf("\n*** Current translation (bb:%llu):\n", n_bbs_done);
+            make_translation(next_guest,True);
+         }
+#endif
+         printf("---  end SWITCHBACK at bb:%llu ---\n", n_bbs_done);
+         switchback();
+         assert(0); /*NOTREACHED*/
+      }
+
+      last_guest = next_guest;
+      HWord trc = run_translation(next_host);
+      if (0) printf("------- trc = %lu\n", trc);
+      if (trc != VEX_TRC_JMP_BORING) {
+        if (1) printf("------- trc = %lu\n", trc);
+      }
+      assert(trc == VEX_TRC_JMP_BORING);
+   }
+}
+
+
+static void usage ( void )
+{
+   printf("usage: switchback #bbs\n");
+   printf("   - begins switchback for basic block #bbs\n");
+   printf("   - use -1 for largest possible run without switchback\n\n");
+   exit(1);
+}
+
+
+int main ( Int argc, HChar** argv )
+{
+   if (argc != 2)
+      usage();
+
+   stopAfter = (ULong)atoll(argv[1]);
+
+   extern void entry ( void*(*service)(int,int) );
+   entryP = (UChar*)&entry;
+
+   if (!entryP) {
+      printf("switchback: can't find entry point\n");
+      exit(1);
+   }
+
+   LibVEX_default_VexControl(&vcon);
+   vcon.guest_max_insns=50 - 49;
+   vcon.guest_chase_thresh=0;
+   vcon.iropt_level=2;
+
+   LibVEX_Init( failure_exit, log_bytes, 1, &vcon );
+   LibVEX_Guest_initialise(&gst);
+   gst.host_EvC_COUNTER  = 999999999; // so we should never get an exit
+   gst.host_EvC_FAILADDR = 0x5a5a5a5a5a5a5a5a;
+
+   /* set up as if a call to the entry point passing serviceFn as 
+      the one and only parameter */
+#  if defined(__i386__)
+   gst.guest_EIP = (UInt)entryP;
+   gst.guest_ESP = (UInt)&gstack[32000];
+   *(UInt*)(gst.guest_ESP+4) = (UInt)serviceFn;
+   *(UInt*)(gst.guest_ESP+0) = 0x12345678;
+
+#  elif defined(__aarch64__)
+   gst.guest_PC = (ULong)entryP;
+   gst.guest_SP = (ULong)&gstack[32000];
+   gst.guest_X0 = (ULong)serviceFn;
+   HWord tpidr_el0 = 0;
+   __asm__ __volatile__("mrs %0, tpidr_el0" : "=r"(tpidr_el0));
+   gst.guest_TPIDR_EL0 = tpidr_el0;
+
+#  else
+#  error "Unknown arch"
+#  endif
+
+   printf("\n---START---\n");
+
+#if 1
+   run_simulator();
+#else
+   ( (void(*)(HWord(*)(HWord,HWord))) entryP ) (serviceFn);
+#endif
+
+
+   return 0;
+}
diff --git a/VEX/switchback/test_bzip2.c b/VEX/switchback/test_bzip2.c
new file mode 100644
index 0000000..19fc822
--- /dev/null
+++ b/VEX/switchback/test_bzip2.c
@@ -0,0 +1,6115 @@
+
+#define BZ_NO_STDIO
+
+
+/*-------------------------------------------------------------*/
+/*--- Private header file for the library.                  ---*/
+/*---                                       bzlib_private.h ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+
+#ifndef _BZLIB_PRIVATE_H
+#define _BZLIB_PRIVATE_H
+
+#include <stdlib.h>
+
+#ifndef BZ_NO_STDIO
+#include <stdio.h>
+#include <ctype.h>
+#include <string.h>
+#endif
+
+
+/*-------------------------------------------------------------*/
+/*--- Public header file for the library.                   ---*/
+/*---                                               bzlib.h ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+
+#ifndef _BZLIB_H
+#define _BZLIB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define BZ_RUN               0
+#define BZ_FLUSH             1
+#define BZ_FINISH            2
+
+#define BZ_OK                0
+#define BZ_RUN_OK            1
+#define BZ_FLUSH_OK          2
+#define BZ_FINISH_OK         3
+#define BZ_STREAM_END        4
+#define BZ_SEQUENCE_ERROR    (-1)
+#define BZ_PARAM_ERROR       (-2)
+#define BZ_MEM_ERROR         (-3)
+#define BZ_DATA_ERROR        (-4)
+#define BZ_DATA_ERROR_MAGIC  (-5)
+#define BZ_IO_ERROR          (-6)
+#define BZ_UNEXPECTED_EOF    (-7)
+#define BZ_OUTBUFF_FULL      (-8)
+#define BZ_CONFIG_ERROR      (-9)
+
+typedef 
+   struct {
+      char *next_in;
+      unsigned int avail_in;
+      unsigned int total_in_lo32;
+      unsigned int total_in_hi32;
+
+      char *next_out;
+      unsigned int avail_out;
+      unsigned int total_out_lo32;
+      unsigned int total_out_hi32;
+
+      void *state;
+
+      void *(*bzalloc)(void *,int,int);
+      void (*bzfree)(void *,void *);
+      void *opaque;
+   } 
+   bz_stream;
+
+
+#ifndef BZ_IMPORT
+#define BZ_EXPORT
+#endif
+
+#ifndef BZ_NO_STDIO
+/* Need a definitition for FILE */
+#include <stdio.h>
+#endif
+
+#ifdef _WIN32
+#   include <windows.h>
+#   ifdef small
+      /* windows.h define small to char */
+#      undef small
+#   endif
+#   ifdef BZ_EXPORT
+#   define BZ_API(func) WINAPI func
+#   define BZ_EXTERN extern
+#   else
+   /* import windows dll dynamically */
+#   define BZ_API(func) (WINAPI * func)
+#   define BZ_EXTERN
+#   endif
+#else
+#   define BZ_API(func) func
+#   define BZ_EXTERN extern
+#endif
+
+
+/*-- Core (low-level) library functions --*/
+
+BZ_EXTERN int BZ_API(BZ2_bzCompressInit) ( 
+      bz_stream* strm, 
+      int        blockSize100k, 
+      int        verbosity, 
+      int        workFactor 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzCompress) ( 
+      bz_stream* strm, 
+      int action 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzCompressEnd) ( 
+      bz_stream* strm 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzDecompressInit) ( 
+      bz_stream *strm, 
+      int       verbosity, 
+      int       small
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzDecompress) ( 
+      bz_stream* strm 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzDecompressEnd) ( 
+      bz_stream *strm 
+   );
+
+
+
+/*-- High(er) level library functions --*/
+
+#ifndef BZ_NO_STDIO
+#define BZ_MAX_UNUSED 5000
+
+typedef void BZFILE;
+
+BZ_EXTERN BZFILE* BZ_API(BZ2_bzReadOpen) ( 
+      int*  bzerror,   
+      FILE* f, 
+      int   verbosity, 
+      int   small,
+      void* unused,    
+      int   nUnused 
+   );
+
+BZ_EXTERN void BZ_API(BZ2_bzReadClose) ( 
+      int*    bzerror, 
+      BZFILE* b 
+   );
+
+BZ_EXTERN void BZ_API(BZ2_bzReadGetUnused) ( 
+      int*    bzerror, 
+      BZFILE* b, 
+      void**  unused,  
+      int*    nUnused 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzRead) ( 
+      int*    bzerror, 
+      BZFILE* b, 
+      void*   buf, 
+      int     len 
+   );
+
+BZ_EXTERN BZFILE* BZ_API(BZ2_bzWriteOpen) ( 
+      int*  bzerror,      
+      FILE* f, 
+      int   blockSize100k, 
+      int   verbosity, 
+      int   workFactor 
+   );
+
+BZ_EXTERN void BZ_API(BZ2_bzWrite) ( 
+      int*    bzerror, 
+      BZFILE* b, 
+      void*   buf, 
+      int     len 
+   );
+
+BZ_EXTERN void BZ_API(BZ2_bzWriteClose) ( 
+      int*          bzerror, 
+      BZFILE*       b, 
+      int           abandon, 
+      unsigned int* nbytes_in, 
+      unsigned int* nbytes_out 
+   );
+
+BZ_EXTERN void BZ_API(BZ2_bzWriteClose64) ( 
+      int*          bzerror, 
+      BZFILE*       b, 
+      int           abandon, 
+      unsigned int* nbytes_in_lo32, 
+      unsigned int* nbytes_in_hi32, 
+      unsigned int* nbytes_out_lo32, 
+      unsigned int* nbytes_out_hi32
+   );
+#endif
+
+
+/*-- Utility functions --*/
+
+BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffCompress) ( 
+      char*         dest, 
+      unsigned int* destLen,
+      char*         source, 
+      unsigned int  sourceLen,
+      int           blockSize100k, 
+      int           verbosity, 
+      int           workFactor 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffDecompress) ( 
+      char*         dest, 
+      unsigned int* destLen,
+      char*         source, 
+      unsigned int  sourceLen,
+      int           small, 
+      int           verbosity 
+   );
+
+
+/*--
+   Code contributed by Yoshioka Tsuneo
+   (QWF00133@niftyserve.or.jp/tsuneo-y@is.aist-nara.ac.jp),
+   to support better zlib compatibility.
+   This code is not _officially_ part of libbzip2 (yet);
+   I haven't tested it, documented it, or considered the
+   threading-safeness of it.
+   If this code breaks, please contact both Yoshioka and me.
+--*/
+
+BZ_EXTERN const char * BZ_API(BZ2_bzlibVersion) (
+      void
+   );
+
+#ifndef BZ_NO_STDIO
+BZ_EXTERN BZFILE * BZ_API(BZ2_bzopen) (
+      const char *path,
+      const char *mode
+   );
+
+BZ_EXTERN BZFILE * BZ_API(BZ2_bzdopen) (
+      int        fd,
+      const char *mode
+   );
+         
+BZ_EXTERN int BZ_API(BZ2_bzread) (
+      BZFILE* b, 
+      void* buf, 
+      int len 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzwrite) (
+      BZFILE* b, 
+      void*   buf, 
+      int     len 
+   );
+
+BZ_EXTERN int BZ_API(BZ2_bzflush) (
+      BZFILE* b
+   );
+
+BZ_EXTERN void BZ_API(BZ2_bzclose) (
+      BZFILE* b
+   );
+
+BZ_EXTERN const char * BZ_API(BZ2_bzerror) (
+      BZFILE *b, 
+      int    *errnum
+   );
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/*-------------------------------------------------------------*/
+/*--- end                                           bzlib.h ---*/
+/*-------------------------------------------------------------*/
+
+
+
+
+/*-- General stuff. --*/
+
+#define BZ_VERSION  "1.0.3, 17-Oct-2004"
+
+typedef char            Char;
+typedef unsigned char   Bool;
+typedef unsigned char   UChar;
+typedef int             Int32;
+typedef unsigned int    UInt32;
+typedef short           Int16;
+typedef unsigned short  UInt16;
+
+#define True  ((Bool)1)
+#define False ((Bool)0)
+
+#ifndef __GNUC__
+#define __inline__  /* */
+#endif 
+
+#ifndef BZ_NO_STDIO
+extern void BZ2_bz__AssertH__fail ( int errcode );
+#define AssertH(cond,errcode) \
+   { if (!(cond)) BZ2_bz__AssertH__fail ( errcode ); }
+#if BZ_DEBUG
+#define AssertD(cond,msg) \
+   { if (!(cond)) {       \
+      fprintf ( stderr,   \
+        "\n\nlibbzip2(debug build): internal error\n\t%s\n", msg );\
+      exit(1); \
+   }}
+#else
+#define AssertD(cond,msg) /* */
+#endif
+#define VPrintf0(zf) \
+   fprintf(stderr,zf)
+#define VPrintf1(zf,za1) \
+   fprintf(stderr,zf,za1)
+#define VPrintf2(zf,za1,za2) \
+   fprintf(stderr,zf,za1,za2)
+#define VPrintf3(zf,za1,za2,za3) \
+   fprintf(stderr,zf,za1,za2,za3)
+#define VPrintf4(zf,za1,za2,za3,za4) \
+   fprintf(stderr,zf,za1,za2,za3,za4)
+#define VPrintf5(zf,za1,za2,za3,za4,za5) \
+   fprintf(stderr,zf,za1,za2,za3,za4,za5)
+#else
+extern void bz_internal_error ( int errcode );
+#define AssertH(cond,errcode) \
+   { if (!(cond)) bz_internal_error ( errcode ); }
+#define AssertD(cond,msg) /* */
+#define VPrintf0(zf) \
+   vexxx_printf(zf)
+#define VPrintf1(zf,za1) \
+   vexxx_printf(zf,za1)
+#define VPrintf2(zf,za1,za2) \
+   vexxx_printf(zf,za1,za2)
+#define VPrintf3(zf,za1,za2,za3) \
+   vexxx_printf(zf,za1,za2,za3)
+#define VPrintf4(zf,za1,za2,za3,za4) \
+   vexxx_printf(zf,za1,za2,za3,za4)
+#define VPrintf5(zf,za1,za2,za3,za4,za5) \
+   vexxx_printf(zf,za1,za2,za3,za4,za5)
+#endif
+
+
+#define BZALLOC(nnn) (strm->bzalloc)(strm->opaque,(nnn),1)
+#define BZFREE(ppp)  (strm->bzfree)(strm->opaque,(ppp))
+
+
+/*-- Header bytes. --*/
+
+#define BZ_HDR_B 0x42   /* 'B' */
+#define BZ_HDR_Z 0x5a   /* 'Z' */
+#define BZ_HDR_h 0x68   /* 'h' */
+#define BZ_HDR_0 0x30   /* '0' */
+  
+/*-- Constants for the back end. --*/
+
+#define BZ_MAX_ALPHA_SIZE 258
+#define BZ_MAX_CODE_LEN    23
+
+#define BZ_RUNA 0
+#define BZ_RUNB 1
+
+#define BZ_N_GROUPS 6
+#define BZ_G_SIZE   50
+#define BZ_N_ITERS  4
+
+#define BZ_MAX_SELECTORS (2 + (900000 / BZ_G_SIZE))
+
+
+
+/*-- Stuff for randomising repetitive blocks. --*/
+
+extern Int32 BZ2_rNums[512];
+
+#define BZ_RAND_DECLS                          \
+   Int32 rNToGo;                               \
+   Int32 rTPos                                 \
+
+#define BZ_RAND_INIT_MASK                      \
+   s->rNToGo = 0;                              \
+   s->rTPos  = 0                               \
+
+#define BZ_RAND_MASK ((s->rNToGo == 1) ? 1 : 0)
+
+#define BZ_RAND_UPD_MASK                       \
+   if (s->rNToGo == 0) {                       \
+      s->rNToGo = BZ2_rNums[s->rTPos];         \
+      s->rTPos++;                              \
+      if (s->rTPos == 512) s->rTPos = 0;       \
+   }                                           \
+   s->rNToGo--;
+
+
+
+/*-- Stuff for doing CRCs. --*/
+
+extern UInt32 BZ2_crc32Table[256];
+
+#define BZ_INITIALISE_CRC(crcVar)              \
+{                                              \
+   crcVar = 0xffffffffL;                       \
+}
+
+#define BZ_FINALISE_CRC(crcVar)                \
+{                                              \
+   crcVar = ~(crcVar);                         \
+}
+
+#define BZ_UPDATE_CRC(crcVar,cha)              \
+{                                              \
+   crcVar = (crcVar << 8) ^                    \
+            BZ2_crc32Table[(crcVar >> 24) ^    \
+                           ((UChar)cha)];      \
+}
+
+
+
+/*-- States and modes for compression. --*/
+
+#define BZ_M_IDLE      1
+#define BZ_M_RUNNING   2
+#define BZ_M_FLUSHING  3
+#define BZ_M_FINISHING 4
+
+#define BZ_S_OUTPUT    1
+#define BZ_S_INPUT     2
+
+#define BZ_N_RADIX 2
+#define BZ_N_QSORT 12
+#define BZ_N_SHELL 18
+#define BZ_N_OVERSHOOT (BZ_N_RADIX + BZ_N_QSORT + BZ_N_SHELL + 2)
+
+
+
+
+/*-- Structure holding all the compression-side stuff. --*/
+
+typedef
+   struct {
+      /* pointer back to the struct bz_stream */
+      bz_stream* strm;
+
+      /* mode this stream is in, and whether inputting */
+      /* or outputting data */
+      Int32    mode;
+      Int32    state;
+
+      /* remembers avail_in when flush/finish requested */
+      UInt32   avail_in_expect;
+
+      /* for doing the block sorting */
+      UInt32*  arr1;
+      UInt32*  arr2;
+      UInt32*  ftab;
+      Int32    origPtr;
+
+      /* aliases for arr1 and arr2 */
+      UInt32*  ptr;
+      UChar*   block;
+      UInt16*  mtfv;
+      UChar*   zbits;
+
+      /* for deciding when to use the fallback sorting algorithm */
+      Int32    workFactor;
+
+      /* run-length-encoding of the input */
+      UInt32   state_in_ch;
+      Int32    state_in_len;
+      BZ_RAND_DECLS;
+
+      /* input and output limits and current posns */
+      Int32    nblock;
+      Int32    nblockMAX;
+      Int32    numZ;
+      Int32    state_out_pos;
+
+      /* map of bytes used in block */
+      Int32    nInUse;
+      Bool     inUse[256];
+      UChar    unseqToSeq[256];
+
+      /* the buffer for bit stream creation */
+      UInt32   bsBuff;
+      Int32    bsLive;
+
+      /* block and combined CRCs */
+      UInt32   blockCRC;
+      UInt32   combinedCRC;
+
+      /* misc administratium */
+      Int32    verbosity;
+      Int32    blockNo;
+      Int32    blockSize100k;
+
+      /* stuff for coding the MTF values */
+      Int32    nMTF;
+      Int32    mtfFreq    [BZ_MAX_ALPHA_SIZE];
+      UChar    selector   [BZ_MAX_SELECTORS];
+      UChar    selectorMtf[BZ_MAX_SELECTORS];
+
+      UChar    len     [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+      Int32    code    [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+      Int32    rfreq   [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+      /* second dimension: only 3 needed; 4 makes index calculations faster */
+      UInt32   len_pack[BZ_MAX_ALPHA_SIZE][4];
+
+   }
+   EState;
+
+
+
+/*-- externs for compression. --*/
+
+extern void 
+BZ2_blockSort ( EState* );
+
+extern void 
+BZ2_compressBlock ( EState*, Bool );
+
+extern void 
+BZ2_bsInitWrite ( EState* );
+
+extern void 
+BZ2_hbAssignCodes ( Int32*, UChar*, Int32, Int32, Int32 );
+
+extern void 
+BZ2_hbMakeCodeLengths ( UChar*, Int32*, Int32, Int32 );
+
+
+
+/*-- states for decompression. --*/
+
+#define BZ_X_IDLE        1
+#define BZ_X_OUTPUT      2
+
+#define BZ_X_MAGIC_1     10
+#define BZ_X_MAGIC_2     11
+#define BZ_X_MAGIC_3     12
+#define BZ_X_MAGIC_4     13
+#define BZ_X_BLKHDR_1    14
+#define BZ_X_BLKHDR_2    15
+#define BZ_X_BLKHDR_3    16
+#define BZ_X_BLKHDR_4    17
+#define BZ_X_BLKHDR_5    18
+#define BZ_X_BLKHDR_6    19
+#define BZ_X_BCRC_1      20
+#define BZ_X_BCRC_2      21
+#define BZ_X_BCRC_3      22
+#define BZ_X_BCRC_4      23
+#define BZ_X_RANDBIT     24
+#define BZ_X_ORIGPTR_1   25
+#define BZ_X_ORIGPTR_2   26
+#define BZ_X_ORIGPTR_3   27
+#define BZ_X_MAPPING_1   28
+#define BZ_X_MAPPING_2   29
+#define BZ_X_SELECTOR_1  30
+#define BZ_X_SELECTOR_2  31
+#define BZ_X_SELECTOR_3  32
+#define BZ_X_CODING_1    33
+#define BZ_X_CODING_2    34
+#define BZ_X_CODING_3    35
+#define BZ_X_MTF_1       36
+#define BZ_X_MTF_2       37
+#define BZ_X_MTF_3       38
+#define BZ_X_MTF_4       39
+#define BZ_X_MTF_5       40
+#define BZ_X_MTF_6       41
+#define BZ_X_ENDHDR_2    42
+#define BZ_X_ENDHDR_3    43
+#define BZ_X_ENDHDR_4    44
+#define BZ_X_ENDHDR_5    45
+#define BZ_X_ENDHDR_6    46
+#define BZ_X_CCRC_1      47
+#define BZ_X_CCRC_2      48
+#define BZ_X_CCRC_3      49
+#define BZ_X_CCRC_4      50
+
+
+
+/*-- Constants for the fast MTF decoder. --*/
+
+#define MTFA_SIZE 4096
+#define MTFL_SIZE 16
+
+
+
+/*-- Structure holding all the decompression-side stuff. --*/
+
+typedef
+   struct {
+      /* pointer back to the struct bz_stream */
+      bz_stream* strm;
+
+      /* state indicator for this stream */
+      Int32    state;
+
+      /* for doing the final run-length decoding */
+      UChar    state_out_ch;
+      Int32    state_out_len;
+      Bool     blockRandomised;
+      BZ_RAND_DECLS;
+
+      /* the buffer for bit stream reading */
+      UInt32   bsBuff;
+      Int32    bsLive;
+
+      /* misc administratium */
+      Int32    blockSize100k;
+      Bool     smallDecompress;
+      Int32    currBlockNo;
+      Int32    verbosity;
+
+      /* for undoing the Burrows-Wheeler transform */
+      Int32    origPtr;
+      UInt32   tPos;
+      Int32    k0;
+      Int32    unzftab[256];
+      Int32    nblock_used;
+      Int32    cftab[257];
+      Int32    cftabCopy[257];
+
+      /* for undoing the Burrows-Wheeler transform (FAST) */
+      UInt32   *tt;
+
+      /* for undoing the Burrows-Wheeler transform (SMALL) */
+      UInt16   *ll16;
+      UChar    *ll4;
+
+      /* stored and calculated CRCs */
+      UInt32   storedBlockCRC;
+      UInt32   storedCombinedCRC;
+      UInt32   calculatedBlockCRC;
+      UInt32   calculatedCombinedCRC;
+
+      /* map of bytes used in block */
+      Int32    nInUse;
+      Bool     inUse[256];
+      Bool     inUse16[16];
+      UChar    seqToUnseq[256];
+
+      /* for decoding the MTF values */
+      UChar    mtfa   [MTFA_SIZE];
+      Int32    mtfbase[256 / MTFL_SIZE];
+      UChar    selector   [BZ_MAX_SELECTORS];
+      UChar    selectorMtf[BZ_MAX_SELECTORS];
+      UChar    len  [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+
+      Int32    limit  [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+      Int32    base   [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+      Int32    perm   [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+      Int32    minLens[BZ_N_GROUPS];
+
+      /* save area for scalars in the main decompress code */
+      Int32    save_i;
+      Int32    save_j;
+      Int32    save_t;
+      Int32    save_alphaSize;
+      Int32    save_nGroups;
+      Int32    save_nSelectors;
+      Int32    save_EOB;
+      Int32    save_groupNo;
+      Int32    save_groupPos;
+      Int32    save_nextSym;
+      Int32    save_nblockMAX;
+      Int32    save_nblock;
+      Int32    save_es;
+      Int32    save_N;
+      Int32    save_curr;
+      Int32    save_zt;
+      Int32    save_zn; 
+      Int32    save_zvec;
+      Int32    save_zj;
+      Int32    save_gSel;
+      Int32    save_gMinlen;
+      Int32*   save_gLimit;
+      Int32*   save_gBase;
+      Int32*   save_gPerm;
+
+   }
+   DState;
+
+
+
+/*-- Macros for decompression. --*/
+
+#define BZ_GET_FAST(cccc)                     \
+    s->tPos = s->tt[s->tPos];                 \
+    cccc = (UChar)(s->tPos & 0xff);           \
+    s->tPos >>= 8;
+
+#define BZ_GET_FAST_C(cccc)                   \
+    c_tPos = c_tt[c_tPos];                    \
+    cccc = (UChar)(c_tPos & 0xff);            \
+    c_tPos >>= 8;
+
+#define SET_LL4(i,n)                                          \
+   { if (((i) & 0x1) == 0)                                    \
+        s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0xf0) | (n); else    \
+        s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0x0f) | ((n) << 4);  \
+   }
+
+#define GET_LL4(i)                             \
+   ((((UInt32)(s->ll4[(i) >> 1])) >> (((i) << 2) & 0x4)) & 0xF)
+
+#define SET_LL(i,n)                          \
+   { s->ll16[i] = (UInt16)(n & 0x0000ffff);  \
+     SET_LL4(i, n >> 16);                    \
+   }
+
+#define GET_LL(i) \
+   (((UInt32)s->ll16[i]) | (GET_LL4(i) << 16))
+
+#define BZ_GET_SMALL(cccc)                            \
+      cccc = BZ2_indexIntoF ( s->tPos, s->cftab );    \
+      s->tPos = GET_LL(s->tPos);
+
+
+/*-- externs for decompression. --*/
+
+extern Int32 
+BZ2_indexIntoF ( Int32, Int32* );
+
+extern Int32 
+BZ2_decompress ( DState* );
+
+extern void 
+BZ2_hbCreateDecodeTables ( Int32*, Int32*, Int32*, UChar*,
+                           Int32,  Int32, Int32 );
+
+
+#endif
+
+
+/*-- BZ_NO_STDIO seems to make NULL disappear on some platforms. --*/
+
+#ifdef BZ_NO_STDIO
+#ifndef NULL
+#define NULL 0
+#endif
+#endif
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                   bzlib_private.h ---*/
+/*-------------------------------------------------------------*/
+
+
+/* Something which has the same size as void* on the host.  That is,
+   it is 32 bits on a 32-bit host and 64 bits on a 64-bit host, and so
+   it can safely be coerced to and from a pointer type on the host
+   machine. */
+typedef  unsigned long HWord;
+typedef  char          HChar;
+typedef  signed int    Int;
+typedef  unsigned int  UInt;
+
+typedef    signed long long int   Long;
+typedef  unsigned long long int   ULong;
+
+
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+
+//#include "/home/sewardj/VEX/trunk/pub/libvex_basictypes.h"
+
+static HWord (*serviceFn)(HWord,HWord) = 0;
+
+
+static char* my_strcpy ( char* dest, const char* src )
+{
+   char* dest_orig = dest;
+   while (*src) *dest++ = *src++;
+   *dest = 0;
+   return dest_orig;
+}
+
+static void* my_memcpy ( void *dest, const void *src, int sz )
+{
+   const char *s = (const char *)src;
+   char *d = (char *)dest;
+
+   while (sz--)
+      *d++ = *s++;
+
+   return dest;
+}
+
+static void* my_memmove( void *dst, const void *src, unsigned int len )
+{
+    register char *d;
+    register char *s;
+    if ( dst > src ) {
+        d = (char *)dst + len - 1;
+        s = (char *)src + len - 1;
+        while ( len >= 4 ) {
+            *d-- = *s--;
+            *d-- = *s--;
+            *d-- = *s--;
+            *d-- = *s--;
+            len -= 4;
+        }
+        while ( len-- ) {
+            *d-- = *s--;
+        }
+    } else if ( dst < src ) {
+        d = (char *)dst;
+        s = (char *)src;
+        while ( len >= 4 ) {
+            *d++ = *s++;
+            *d++ = *s++;
+            *d++ = *s++;
+            *d++ = *s++;
+            len -= 4;
+        }
+        while ( len-- ) {
+            *d++ = *s++;
+        }
+    }
+    return dst;
+}
+
+char* my_strcat ( char* dest, const char* src )
+{
+   char* dest_orig = dest;
+   while (*dest) dest++;
+   while (*src) *dest++ = *src++;
+   *dest = 0;
+   return dest_orig;
+}
+
+
+/////////////////////////////////////////////////////////////////////
+
+static void vexxx_log_bytes ( char* p, int n )
+{
+   int i;
+   for (i = 0; i < n; i++)
+      (*serviceFn)( 1, (int)p[i] );
+}
+
+/*---------------------------------------------------------*/
+/*--- vexxx_printf                                        ---*/
+/*---------------------------------------------------------*/
+
+/* This should be the only <...> include in the entire VEX library.
+   New code for vexxx_util.c should go above this point. */
+#include <stdarg.h>
+
+static HChar vexxx_toupper ( HChar c )
+{
+   if (c >= 'a' && c <= 'z')
+      return c + ('A' - 'a');
+   else
+      return c;
+}
+
+static Int vexxx_strlen ( const HChar* str )
+{
+   Int i = 0;
+   while (str[i] != 0) i++;
+   return i;
+}
+
+Bool vexxx_streq ( const HChar* s1, const HChar* s2 )
+{
+   while (True) {
+      if (*s1 == 0 && *s2 == 0)
+         return True;
+      if (*s1 != *s2)
+         return False;
+      s1++;
+      s2++;
+   }
+}
+
+/* Some flags.  */
+#define VG_MSG_SIGNED    1 /* The value is signed. */
+#define VG_MSG_ZJUSTIFY  2 /* Must justify with '0'. */
+#define VG_MSG_LJUSTIFY  4 /* Must justify on the left. */
+#define VG_MSG_PAREN     8 /* Parenthesize if present (for %y) */
+#define VG_MSG_COMMA    16 /* Add commas to numbers (for %d, %u) */
+
+/* Copy a string into the buffer. */
+static UInt
+myvprintf_str ( void(*send)(HChar), Int flags, Int width, HChar* str, 
+                Bool capitalise )
+{
+#  define MAYBE_TOUPPER(ch) (capitalise ? vexxx_toupper(ch) : (ch))
+   UInt ret = 0;
+   Int i, extra;
+   Int len = vexxx_strlen(str);
+
+   if (width == 0) {
+      ret += len;
+      for (i = 0; i < len; i++)
+         send(MAYBE_TOUPPER(str[i]));
+      return ret;
+   }
+
+   if (len > width) {
+      ret += width;
+      for (i = 0; i < width; i++)
+         send(MAYBE_TOUPPER(str[i]));
+      return ret;
+   }
+
+   extra = width - len;
+   if (flags & VG_MSG_LJUSTIFY) {
+      ret += extra;
+      for (i = 0; i < extra; i++)
+         send(' ');
+   }
+   ret += len;
+   for (i = 0; i < len; i++)
+      send(MAYBE_TOUPPER(str[i]));
+   if (!(flags & VG_MSG_LJUSTIFY)) {
+      ret += extra;
+      for (i = 0; i < extra; i++)
+         send(' ');
+   }
+
+#  undef MAYBE_TOUPPER
+
+   return ret;
+}
+
+/* Write P into the buffer according to these args:
+ *  If SIGN is true, p is a signed.
+ *  BASE is the base.
+ *  If WITH_ZERO is true, '0' must be added.
+ *  WIDTH is the width of the field.
+ */
+static UInt
+myvprintf_int64 ( void(*send)(HChar), Int flags, Int base, Int width, ULong pL)
+{
+   HChar buf[40];
+   Int   ind = 0;
+   Int   i, nc = 0;
+   Bool  neg = False;
+   HChar *digits = "0123456789ABCDEF";
+   UInt  ret = 0;
+   UInt  p = (UInt)pL;
+
+   if (base < 2 || base > 16)
+      return ret;
+ 
+   if ((flags & VG_MSG_SIGNED) && (Int)p < 0) {
+      p   = - (Int)p;
+      neg = True;
+   }
+
+   if (p == 0)
+      buf[ind++] = '0';
+   else {
+      while (p > 0) {
+         if ((flags & VG_MSG_COMMA) && 10 == base &&
+             0 == (ind-nc) % 3 && 0 != ind) 
+         {
+            buf[ind++] = ',';
+            nc++;
+         }
+         buf[ind++] = digits[p % base];
+         p /= base;
+      }
+   }
+
+   if (neg)
+      buf[ind++] = '-';
+
+   if (width > 0 && !(flags & VG_MSG_LJUSTIFY)) {
+      for(; ind < width; ind++) {
+	//vassert(ind < 39);
+         buf[ind] = ((flags & VG_MSG_ZJUSTIFY) ? '0': ' ');
+      }
+   }
+
+   /* Reverse copy to buffer.  */
+   ret += ind;
+   for (i = ind -1; i >= 0; i--) {
+      send(buf[i]);
+   }
+   if (width > 0 && (flags & VG_MSG_LJUSTIFY)) {
+      for(; ind < width; ind++) {
+	 ret++;
+         send(' ');  // Never pad with zeroes on RHS -- changes the value!
+      }
+   }
+   return ret;
+}
+
+
+/* A simple vprintf().  */
+static 
+UInt vprintf_wrk ( void(*send)(HChar), const HChar *format, va_list vargs )
+{
+   UInt ret = 0;
+   int i;
+   int flags;
+   int width;
+   Bool is_long;
+
+   /* We assume that vargs has already been initialised by the 
+      caller, using va_start, and that the caller will similarly
+      clean up with va_end.
+   */
+
+   for (i = 0; format[i] != 0; i++) {
+      if (format[i] != '%') {
+         send(format[i]);
+	 ret++;
+         continue;
+      }
+      i++;
+      /* A '%' has been found.  Ignore a trailing %. */
+      if (format[i] == 0)
+         break;
+      if (format[i] == '%') {
+         /* `%%' is replaced by `%'. */
+         send('%');
+	 ret++;
+         continue;
+      }
+      flags = 0;
+      is_long = False;
+      width = 0; /* length of the field. */
+      if (format[i] == '(') {
+	 flags |= VG_MSG_PAREN;
+	 i++;
+      }
+      /* If ',' follows '%', commas will be inserted. */
+      if (format[i] == ',') {
+         flags |= VG_MSG_COMMA;
+         i++;
+      }
+      /* If '-' follows '%', justify on the left. */
+      if (format[i] == '-') {
+         flags |= VG_MSG_LJUSTIFY;
+         i++;
+      }
+      /* If '0' follows '%', pads will be inserted. */
+      if (format[i] == '0') {
+         flags |= VG_MSG_ZJUSTIFY;
+         i++;
+      }
+      /* Compute the field length. */
+      while (format[i] >= '0' && format[i] <= '9') {
+         width *= 10;
+         width += format[i++] - '0';
+      }
+      while (format[i] == 'l') {
+         i++;
+         is_long = True;
+      }
+
+      switch (format[i]) {
+         case 'd': /* %d */
+            flags |= VG_MSG_SIGNED;
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, Long)));
+            else
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, Int)));
+            break;
+         case 'u': /* %u */
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, ULong)));
+            else
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, UInt)));
+            break;
+         case 'p': /* %p */
+	    ret += 2;
+            send('0');
+            send('x');
+            ret += myvprintf_int64(send, flags, 16, width, 
+				   (ULong)((HWord)va_arg (vargs, void *)));
+            break;
+         case 'x': /* %x */
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 16, width, 
+				      (ULong)(va_arg (vargs, ULong)));
+            else
+               ret += myvprintf_int64(send, flags, 16, width, 
+				      (ULong)(va_arg (vargs, UInt)));
+            break;
+         case 'c': /* %c */
+	    ret++;
+            send((va_arg (vargs, int)));
+            break;
+         case 's': case 'S': { /* %s */
+            char *str = va_arg (vargs, char *);
+            if (str == (char*) 0) str = "(null)";
+            ret += myvprintf_str(send, flags, width, str, 
+                                 (format[i]=='S'));
+            break;
+	 }
+#        if 0
+	 case 'y': { /* %y - print symbol */
+	    Addr a = va_arg(vargs, Addr);
+
+            HChar *name;
+	    if (VG_(get_fnname_w_offset)(a, &name)) {
+               HChar buf[1 + VG_strlen(name) + 1 + 1];
+	       if (flags & VG_MSG_PAREN) {
+                  VG_(sprintf)(str, "(%s)", name):
+	       } else {
+                  VG_(sprintf)(str, "%s", name):
+               }
+	       ret += myvprintf_str(send, flags, width, buf, 0);
+	    }
+	    break;
+	 }
+#        endif
+         default:
+            break;
+      }
+   }
+   return ret;
+}
+
+
+/* A general replacement for printf().  Note that only low-level 
+   debugging info should be sent via here.  The official route is to
+   to use vg_message().  This interface is deprecated.
+*/
+static HChar myprintf_buf[1000];
+static Int   n_myprintf_buf;
+
+static void add_to_myprintf_buf ( HChar c )
+{
+   if (c == '\n' || n_myprintf_buf >= 1000-10 /*paranoia*/ ) {
+      (*vexxx_log_bytes)( myprintf_buf, vexxx_strlen(myprintf_buf) );
+      n_myprintf_buf = 0;
+      myprintf_buf[n_myprintf_buf] = 0;      
+   }
+   myprintf_buf[n_myprintf_buf++] = c;
+   myprintf_buf[n_myprintf_buf] = 0;
+}
+
+static UInt vexxx_printf ( const char *format, ... )
+{
+   UInt ret;
+   va_list vargs;
+   va_start(vargs,format);
+   
+   n_myprintf_buf = 0;
+   myprintf_buf[n_myprintf_buf] = 0;      
+   ret = vprintf_wrk ( add_to_myprintf_buf, format, vargs );
+
+   if (n_myprintf_buf > 0) {
+      (*vexxx_log_bytes)( myprintf_buf, n_myprintf_buf );
+   }
+
+   va_end(vargs);
+
+   return ret;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                          vexxx_util.c ---*/
+/*---------------------------------------------------------------*/
+
+
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+
+
+/*-------------------------------------------------------------*/
+/*--- Decompression machinery                               ---*/
+/*---                                          decompress.c ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+
+
+
+/*---------------------------------------------------*/
+static
+void makeMaps_d ( DState* s )
+{
+   Int32 i;
+   s->nInUse = 0;
+   for (i = 0; i < 256; i++)
+      if (s->inUse[i]) {
+         s->seqToUnseq[s->nInUse] = i;
+         s->nInUse++;
+      }
+}
+
+
+/*---------------------------------------------------*/
+#define RETURN(rrr)                               \
+   { retVal = rrr; goto save_state_and_return; };
+
+#define GET_BITS(lll,vvv,nnn)                     \
+   case lll: s->state = lll;                      \
+   while (True) {                                 \
+      if (s->bsLive >= nnn) {                     \
+         UInt32 v;                                \
+         v = (s->bsBuff >>                        \
+             (s->bsLive-nnn)) & ((1 << nnn)-1);   \
+         s->bsLive -= nnn;                        \
+         vvv = v;                                 \
+         break;                                   \
+      }                                           \
+      if (s->strm->avail_in == 0) RETURN(BZ_OK);  \
+      s->bsBuff                                   \
+         = (s->bsBuff << 8) |                     \
+           ((UInt32)                              \
+              (*((UChar*)(s->strm->next_in))));   \
+      s->bsLive += 8;                             \
+      s->strm->next_in++;                         \
+      s->strm->avail_in--;                        \
+      s->strm->total_in_lo32++;                   \
+      if (s->strm->total_in_lo32 == 0)            \
+         s->strm->total_in_hi32++;                \
+   }
+
+#define GET_UCHAR(lll,uuu)                        \
+   GET_BITS(lll,uuu,8)
+
+#define GET_BIT(lll,uuu)                          \
+   GET_BITS(lll,uuu,1)
+
+/*---------------------------------------------------*/
+#define GET_MTF_VAL(label1,label2,lval)           \
+{                                                 \
+   if (groupPos == 0) {                           \
+      groupNo++;                                  \
+      if (groupNo >= nSelectors)                  \
+         RETURN(BZ_DATA_ERROR);                   \
+      groupPos = BZ_G_SIZE;                       \
+      gSel = s->selector[groupNo];                \
+      gMinlen = s->minLens[gSel];                 \
+      gLimit = &(s->limit[gSel][0]);              \
+      gPerm = &(s->perm[gSel][0]);                \
+      gBase = &(s->base[gSel][0]);                \
+   }                                              \
+   groupPos--;                                    \
+   zn = gMinlen;                                  \
+   GET_BITS(label1, zvec, zn);                    \
+   while (1) {                                    \
+      if (zn > 20 /* the longest code */)         \
+         RETURN(BZ_DATA_ERROR);                   \
+      if (zvec <= gLimit[zn]) break;              \
+      zn++;                                       \
+      GET_BIT(label2, zj);                        \
+      zvec = (zvec << 1) | zj;                    \
+   };                                             \
+   if (zvec - gBase[zn] < 0                       \
+       || zvec - gBase[zn] >= BZ_MAX_ALPHA_SIZE)  \
+      RETURN(BZ_DATA_ERROR);                      \
+   lval = gPerm[zvec - gBase[zn]];                \
+}
+
+
+
+/*---------------------------------------------------*/
+__inline__ Int32 BZ2_indexIntoF ( Int32 indx, Int32 *cftab )
+{
+   Int32 nb, na, mid;
+   nb = 0;
+   na = 256;
+   do {
+      mid = (nb + na) >> 1;
+      if (indx >= cftab[mid]) nb = mid; else na = mid;
+   }
+   while (na - nb != 1);
+   return nb;
+}
+
+/*---------------------------------------------------*/
+Int32 BZ2_decompress ( DState* s )
+{
+   UChar      uc;
+   Int32      retVal;
+   Int32      minLen, maxLen;
+   bz_stream* strm = s->strm;
+
+   /* stuff that needs to be saved/restored */
+   Int32  i;
+   Int32  j;
+   Int32  t;
+   Int32  alphaSize;
+   Int32  nGroups;
+   Int32  nSelectors;
+   Int32  EOB;
+   Int32  groupNo;
+   Int32  groupPos;
+   Int32  nextSym;
+   Int32  nblockMAX;
+   Int32  nblock;
+   Int32  es;
+   Int32  N;
+   Int32  curr;
+   Int32  zt;
+   Int32  zn; 
+   Int32  zvec;
+   Int32  zj;
+   Int32  gSel;
+   Int32  gMinlen;
+   Int32* gLimit;
+   Int32* gBase;
+   Int32* gPerm;
+
+   if (s->state == BZ_X_MAGIC_1) {
+      /*initialise the save area*/
+      s->save_i           = 0;
+      s->save_j           = 0;
+      s->save_t           = 0;
+      s->save_alphaSize   = 0;
+      s->save_nGroups     = 0;
+      s->save_nSelectors  = 0;
+      s->save_EOB         = 0;
+      s->save_groupNo     = 0;
+      s->save_groupPos    = 0;
+      s->save_nextSym     = 0;
+      s->save_nblockMAX   = 0;
+      s->save_nblock      = 0;
+      s->save_es          = 0;
+      s->save_N           = 0;
+      s->save_curr        = 0;
+      s->save_zt          = 0;
+      s->save_zn          = 0;
+      s->save_zvec        = 0;
+      s->save_zj          = 0;
+      s->save_gSel        = 0;
+      s->save_gMinlen     = 0;
+      s->save_gLimit      = NULL;
+      s->save_gBase       = NULL;
+      s->save_gPerm       = NULL;
+   }
+
+   /*restore from the save area*/
+   i           = s->save_i;
+   j           = s->save_j;
+   t           = s->save_t;
+   alphaSize   = s->save_alphaSize;
+   nGroups     = s->save_nGroups;
+   nSelectors  = s->save_nSelectors;
+   EOB         = s->save_EOB;
+   groupNo     = s->save_groupNo;
+   groupPos    = s->save_groupPos;
+   nextSym     = s->save_nextSym;
+   nblockMAX   = s->save_nblockMAX;
+   nblock      = s->save_nblock;
+   es          = s->save_es;
+   N           = s->save_N;
+   curr        = s->save_curr;
+   zt          = s->save_zt;
+   zn          = s->save_zn; 
+   zvec        = s->save_zvec;
+   zj          = s->save_zj;
+   gSel        = s->save_gSel;
+   gMinlen     = s->save_gMinlen;
+   gLimit      = s->save_gLimit;
+   gBase       = s->save_gBase;
+   gPerm       = s->save_gPerm;
+
+   retVal = BZ_OK;
+
+   switch (s->state) {
+
+      GET_UCHAR(BZ_X_MAGIC_1, uc);
+      if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC);
+
+      GET_UCHAR(BZ_X_MAGIC_2, uc);
+      if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC);
+
+      GET_UCHAR(BZ_X_MAGIC_3, uc)
+      if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC);
+
+      GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8)
+      if (s->blockSize100k < (BZ_HDR_0 + 1) || 
+          s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC);
+      s->blockSize100k -= BZ_HDR_0;
+
+      if (s->smallDecompress) {
+         s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) );
+         s->ll4  = BZALLOC( 
+                      ((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar) 
+                   );
+         if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR);
+      } else {
+         s->tt  = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) );
+         if (s->tt == NULL) RETURN(BZ_MEM_ERROR);
+      }
+
+      GET_UCHAR(BZ_X_BLKHDR_1, uc);
+
+      if (uc == 0x17) goto endhdr_2;
+      if (uc != 0x31) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_BLKHDR_2, uc);
+      if (uc != 0x41) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_BLKHDR_3, uc);
+      if (uc != 0x59) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_BLKHDR_4, uc);
+      if (uc != 0x26) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_BLKHDR_5, uc);
+      if (uc != 0x53) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_BLKHDR_6, uc);
+      if (uc != 0x59) RETURN(BZ_DATA_ERROR);
+
+      s->currBlockNo++;
+      if (s->verbosity >= 2)
+         VPrintf1 ( "\n    [%d: huff+mtf ", s->currBlockNo );
+ 
+      s->storedBlockCRC = 0;
+      GET_UCHAR(BZ_X_BCRC_1, uc);
+      s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
+      GET_UCHAR(BZ_X_BCRC_2, uc);
+      s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
+      GET_UCHAR(BZ_X_BCRC_3, uc);
+      s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
+      GET_UCHAR(BZ_X_BCRC_4, uc);
+      s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
+
+      GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1);
+
+      s->origPtr = 0;
+      GET_UCHAR(BZ_X_ORIGPTR_1, uc);
+      s->origPtr = (s->origPtr << 8) | ((Int32)uc);
+      GET_UCHAR(BZ_X_ORIGPTR_2, uc);
+      s->origPtr = (s->origPtr << 8) | ((Int32)uc);
+      GET_UCHAR(BZ_X_ORIGPTR_3, uc);
+      s->origPtr = (s->origPtr << 8) | ((Int32)uc);
+
+      if (s->origPtr < 0)
+         RETURN(BZ_DATA_ERROR);
+      if (s->origPtr > 10 + 100000*s->blockSize100k) 
+         RETURN(BZ_DATA_ERROR);
+
+      /*--- Receive the mapping table ---*/
+      for (i = 0; i < 16; i++) {
+         GET_BIT(BZ_X_MAPPING_1, uc);
+         if (uc == 1) 
+            s->inUse16[i] = True; else 
+            s->inUse16[i] = False;
+      }
+
+      for (i = 0; i < 256; i++) s->inUse[i] = False;
+
+      for (i = 0; i < 16; i++)
+         if (s->inUse16[i])
+            for (j = 0; j < 16; j++) {
+               GET_BIT(BZ_X_MAPPING_2, uc);
+               if (uc == 1) s->inUse[i * 16 + j] = True;
+            }
+      makeMaps_d ( s );
+      if (s->nInUse == 0) RETURN(BZ_DATA_ERROR);
+      alphaSize = s->nInUse+2;
+
+      /*--- Now the selectors ---*/
+      GET_BITS(BZ_X_SELECTOR_1, nGroups, 3);
+      if (nGroups < 2 || nGroups > 6) RETURN(BZ_DATA_ERROR);
+      GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15);
+      if (nSelectors < 1) RETURN(BZ_DATA_ERROR);
+      for (i = 0; i < nSelectors; i++) {
+         j = 0;
+         while (True) {
+            GET_BIT(BZ_X_SELECTOR_3, uc);
+            if (uc == 0) break;
+            j++;
+            if (j >= nGroups) RETURN(BZ_DATA_ERROR);
+         }
+         s->selectorMtf[i] = j;
+      }
+
+      /*--- Undo the MTF values for the selectors. ---*/
+      {
+         UChar pos[BZ_N_GROUPS], tmp, v;
+         for (v = 0; v < nGroups; v++) pos[v] = v;
+   
+         for (i = 0; i < nSelectors; i++) {
+            v = s->selectorMtf[i];
+            tmp = pos[v];
+            while (v > 0) { pos[v] = pos[v-1]; v--; }
+            pos[0] = tmp;
+            s->selector[i] = tmp;
+         }
+      }
+
+      /*--- Now the coding tables ---*/
+      for (t = 0; t < nGroups; t++) {
+         GET_BITS(BZ_X_CODING_1, curr, 5);
+         for (i = 0; i < alphaSize; i++) {
+            while (True) {
+               if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR);
+               GET_BIT(BZ_X_CODING_2, uc);
+               if (uc == 0) break;
+               GET_BIT(BZ_X_CODING_3, uc);
+               if (uc == 0) curr++; else curr--;
+            }
+            s->len[t][i] = curr;
+         }
+      }
+
+      /*--- Create the Huffman decoding tables ---*/
+      for (t = 0; t < nGroups; t++) {
+         minLen = 32;
+         maxLen = 0;
+         for (i = 0; i < alphaSize; i++) {
+            if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
+            if (s->len[t][i] < minLen) minLen = s->len[t][i];
+         }
+         BZ2_hbCreateDecodeTables ( 
+            &(s->limit[t][0]), 
+            &(s->base[t][0]), 
+            &(s->perm[t][0]), 
+            &(s->len[t][0]),
+            minLen, maxLen, alphaSize
+         );
+         s->minLens[t] = minLen;
+      }
+
+      /*--- Now the MTF values ---*/
+
+      EOB      = s->nInUse+1;
+      nblockMAX = 100000 * s->blockSize100k;
+      groupNo  = -1;
+      groupPos = 0;
+
+      for (i = 0; i <= 255; i++) s->unzftab[i] = 0;
+
+      /*-- MTF init --*/
+      {
+         Int32 ii, jj, kk;
+         kk = MTFA_SIZE-1;
+         for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) {
+            for (jj = MTFL_SIZE-1; jj >= 0; jj--) {
+               s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj);
+               kk--;
+            }
+            s->mtfbase[ii] = kk + 1;
+         }
+      }
+      /*-- end MTF init --*/
+
+      nblock = 0;
+      GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym);
+
+      while (True) {
+
+         if (nextSym == EOB) break;
+
+         if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) {
+
+            es = -1;
+            N = 1;
+            do {
+               if (nextSym == BZ_RUNA) es = es + (0+1) * N; else
+               if (nextSym == BZ_RUNB) es = es + (1+1) * N;
+               N = N * 2;
+               GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym);
+            }
+               while (nextSym == BZ_RUNA || nextSym == BZ_RUNB);
+
+            es++;
+            uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ];
+            s->unzftab[uc] += es;
+
+            if (s->smallDecompress)
+               while (es > 0) {
+                  if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR);
+                  s->ll16[nblock] = (UInt16)uc;
+                  nblock++;
+                  es--;
+               }
+            else
+               while (es > 0) {
+                  if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR);
+                  s->tt[nblock] = (UInt32)uc;
+                  nblock++;
+                  es--;
+               };
+
+            continue;
+
+         } else {
+
+            if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR);
+
+            /*-- uc = MTF ( nextSym-1 ) --*/
+            {
+               Int32 ii, jj, kk, pp, lno, off;
+               UInt32 nn;
+               nn = (UInt32)(nextSym - 1);
+
+               if (nn < MTFL_SIZE) {
+                  /* avoid general-case expense */
+                  pp = s->mtfbase[0];
+                  uc = s->mtfa[pp+nn];
+                  while (nn > 3) {
+                     Int32 z = pp+nn;
+                     s->mtfa[(z)  ] = s->mtfa[(z)-1];
+                     s->mtfa[(z)-1] = s->mtfa[(z)-2];
+                     s->mtfa[(z)-2] = s->mtfa[(z)-3];
+                     s->mtfa[(z)-3] = s->mtfa[(z)-4];
+                     nn -= 4;
+                  }
+                  while (nn > 0) { 
+                     s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--; 
+                  };
+                  s->mtfa[pp] = uc;
+               } else { 
+                  /* general case */
+                  lno = nn / MTFL_SIZE;
+                  off = nn % MTFL_SIZE;
+                  pp = s->mtfbase[lno] + off;
+                  uc = s->mtfa[pp];
+                  while (pp > s->mtfbase[lno]) { 
+                     s->mtfa[pp] = s->mtfa[pp-1]; pp--; 
+                  };
+                  s->mtfbase[lno]++;
+                  while (lno > 0) {
+                     s->mtfbase[lno]--;
+                     s->mtfa[s->mtfbase[lno]] 
+                        = s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1];
+                     lno--;
+                  }
+                  s->mtfbase[0]--;
+                  s->mtfa[s->mtfbase[0]] = uc;
+                  if (s->mtfbase[0] == 0) {
+                     kk = MTFA_SIZE-1;
+                     for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) {
+                        for (jj = MTFL_SIZE-1; jj >= 0; jj--) {
+                           s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj];
+                           kk--;
+                        }
+                        s->mtfbase[ii] = kk + 1;
+                     }
+                  }
+               }
+            }
+            /*-- end uc = MTF ( nextSym-1 ) --*/
+
+            s->unzftab[s->seqToUnseq[uc]]++;
+            if (s->smallDecompress)
+               s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else
+               s->tt[nblock]   = (UInt32)(s->seqToUnseq[uc]);
+            nblock++;
+
+            GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym);
+            continue;
+         }
+      }
+
+      /* Now we know what nblock is, we can do a better sanity
+         check on s->origPtr.
+      */
+      if (s->origPtr < 0 || s->origPtr >= nblock)
+         RETURN(BZ_DATA_ERROR);
+
+      /*-- Set up cftab to facilitate generation of T^(-1) --*/
+      s->cftab[0] = 0;
+      for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1];
+      for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1];
+      for (i = 0; i <= 256; i++) {
+         if (s->cftab[i] < 0 || s->cftab[i] > nblock) {
+            /* s->cftab[i] can legitimately be == nblock */
+            RETURN(BZ_DATA_ERROR);
+         }
+      }
+
+      s->state_out_len = 0;
+      s->state_out_ch  = 0;
+      BZ_INITIALISE_CRC ( s->calculatedBlockCRC );
+      s->state = BZ_X_OUTPUT;
+      if (s->verbosity >= 2) VPrintf0 ( "rt+rld" );
+
+      if (s->smallDecompress) {
+
+         /*-- Make a copy of cftab, used in generation of T --*/
+         for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i];
+
+         /*-- compute the T vector --*/
+         for (i = 0; i < nblock; i++) {
+            uc = (UChar)(s->ll16[i]);
+            SET_LL(i, s->cftabCopy[uc]);
+            s->cftabCopy[uc]++;
+         }
+
+         /*-- Compute T^(-1) by pointer reversal on T --*/
+         i = s->origPtr;
+         j = GET_LL(i);
+         do {
+            Int32 tmp = GET_LL(j);
+            SET_LL(j, i);
+            i = j;
+            j = tmp;
+         }
+            while (i != s->origPtr);
+
+         s->tPos = s->origPtr;
+         s->nblock_used = 0;
+         if (s->blockRandomised) {
+            BZ_RAND_INIT_MASK;
+            BZ_GET_SMALL(s->k0); s->nblock_used++;
+            BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; 
+         } else {
+            BZ_GET_SMALL(s->k0); s->nblock_used++;
+         }
+
+      } else {
+
+         /*-- compute the T^(-1) vector --*/
+         for (i = 0; i < nblock; i++) {
+            uc = (UChar)(s->tt[i] & 0xff);
+            s->tt[s->cftab[uc]] |= (i << 8);
+            s->cftab[uc]++;
+         }
+
+         s->tPos = s->tt[s->origPtr] >> 8;
+         s->nblock_used = 0;
+         if (s->blockRandomised) {
+            BZ_RAND_INIT_MASK;
+            BZ_GET_FAST(s->k0); s->nblock_used++;
+            BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK; 
+         } else {
+            BZ_GET_FAST(s->k0); s->nblock_used++;
+         }
+
+      }
+
+      RETURN(BZ_OK);
+
+
+
+    endhdr_2:
+
+      GET_UCHAR(BZ_X_ENDHDR_2, uc);
+      if (uc != 0x72) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_ENDHDR_3, uc);
+      if (uc != 0x45) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_ENDHDR_4, uc);
+      if (uc != 0x38) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_ENDHDR_5, uc);
+      if (uc != 0x50) RETURN(BZ_DATA_ERROR);
+      GET_UCHAR(BZ_X_ENDHDR_6, uc);
+      if (uc != 0x90) RETURN(BZ_DATA_ERROR);
+
+      s->storedCombinedCRC = 0;
+      GET_UCHAR(BZ_X_CCRC_1, uc);
+      s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
+      GET_UCHAR(BZ_X_CCRC_2, uc);
+      s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
+      GET_UCHAR(BZ_X_CCRC_3, uc);
+      s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
+      GET_UCHAR(BZ_X_CCRC_4, uc);
+      s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
+
+      s->state = BZ_X_IDLE;
+      RETURN(BZ_STREAM_END);
+
+      default: AssertH ( False, 4001 );
+   }
+
+   AssertH ( False, 4002 );
+
+   save_state_and_return:
+
+   s->save_i           = i;
+   s->save_j           = j;
+   s->save_t           = t;
+   s->save_alphaSize   = alphaSize;
+   s->save_nGroups     = nGroups;
+   s->save_nSelectors  = nSelectors;
+   s->save_EOB         = EOB;
+   s->save_groupNo     = groupNo;
+   s->save_groupPos    = groupPos;
+   s->save_nextSym     = nextSym;
+   s->save_nblockMAX   = nblockMAX;
+   s->save_nblock      = nblock;
+   s->save_es          = es;
+   s->save_N           = N;
+   s->save_curr        = curr;
+   s->save_zt          = zt;
+   s->save_zn          = zn;
+   s->save_zvec        = zvec;
+   s->save_zj          = zj;
+   s->save_gSel        = gSel;
+   s->save_gMinlen     = gMinlen;
+   s->save_gLimit      = gLimit;
+   s->save_gBase       = gBase;
+   s->save_gPerm       = gPerm;
+
+   return retVal;   
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                      decompress.c ---*/
+/*-------------------------------------------------------------*/
+
+/*-------------------------------------------------------------*/
+/*--- Block sorting machinery                               ---*/
+/*---                                           blocksort.c ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+
+  To get some idea how the block sorting algorithms in this file 
+  work, read my paper 
+     On the Performance of BWT Sorting Algorithms
+  in Proceedings of the IEEE Data Compression Conference 2000,
+  Snowbird, Utah, USA, 27-30 March 2000.  The main sort in this
+  file implements the algorithm called  cache  in the paper.
+--*/
+
+
+
+/*---------------------------------------------*/
+/*--- Fallback O(N log(N)^2) sorting        ---*/
+/*--- algorithm, for repetitive blocks      ---*/
+/*---------------------------------------------*/
+
+/*---------------------------------------------*/
+static 
+__inline__
+void fallbackSimpleSort ( UInt32* fmap, 
+                          UInt32* eclass, 
+                          Int32   lo, 
+                          Int32   hi )
+{
+   Int32 i, j, tmp;
+   UInt32 ec_tmp;
+
+   if (lo == hi) return;
+
+   if (hi - lo > 3) {
+      for ( i = hi-4; i >= lo; i-- ) {
+         tmp = fmap[i];
+         ec_tmp = eclass[tmp];
+         for ( j = i+4; j <= hi && ec_tmp > eclass[fmap[j]]; j += 4 )
+            fmap[j-4] = fmap[j];
+         fmap[j-4] = tmp;
+      }
+   }
+
+   for ( i = hi-1; i >= lo; i-- ) {
+      tmp = fmap[i];
+      ec_tmp = eclass[tmp];
+      for ( j = i+1; j <= hi && ec_tmp > eclass[fmap[j]]; j++ )
+         fmap[j-1] = fmap[j];
+      fmap[j-1] = tmp;
+   }
+}
+
+
+/*---------------------------------------------*/
+#define fswap(zz1, zz2) \
+   { Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; }
+
+#define fvswap(zzp1, zzp2, zzn)       \
+{                                     \
+   Int32 yyp1 = (zzp1);               \
+   Int32 yyp2 = (zzp2);               \
+   Int32 yyn  = (zzn);                \
+   while (yyn > 0) {                  \
+      fswap(fmap[yyp1], fmap[yyp2]);  \
+      yyp1++; yyp2++; yyn--;          \
+   }                                  \
+}
+
+
+#define fmin(a,b) ((a) < (b)) ? (a) : (b)
+
+#define fpush(lz,hz) { stackLo[sp] = lz; \
+                       stackHi[sp] = hz; \
+                       sp++; }
+
+#define fpop(lz,hz) { sp--;              \
+                      lz = stackLo[sp];  \
+                      hz = stackHi[sp]; }
+
+#define FALLBACK_QSORT_SMALL_THRESH 10
+#define FALLBACK_QSORT_STACK_SIZE   100
+
+
+static
+void fallbackQSort3 ( UInt32* fmap, 
+                      UInt32* eclass,
+                      Int32   loSt, 
+                      Int32   hiSt )
+{
+   Int32 unLo, unHi, ltLo, gtHi, n, m;
+   Int32 sp, lo, hi;
+   UInt32 med, r, r3;
+   Int32 stackLo[FALLBACK_QSORT_STACK_SIZE];
+   Int32 stackHi[FALLBACK_QSORT_STACK_SIZE];
+
+   r = 0;
+
+   sp = 0;
+   fpush ( loSt, hiSt );
+
+   while (sp > 0) {
+
+      AssertH ( sp < FALLBACK_QSORT_STACK_SIZE, 1004 );
+
+      fpop ( lo, hi );
+      if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) {
+         fallbackSimpleSort ( fmap, eclass, lo, hi );
+         continue;
+      }
+
+      /* Random partitioning.  Median of 3 sometimes fails to
+         avoid bad cases.  Median of 9 seems to help but 
+         looks rather expensive.  This too seems to work but
+         is cheaper.  Guidance for the magic constants 
+         7621 and 32768 is taken from Sedgewick's algorithms
+         book, chapter 35.
+      */
+      r = ((r * 7621) + 1) % 32768;
+      r3 = r % 3;
+      if (r3 == 0) med = eclass[fmap[lo]]; else
+      if (r3 == 1) med = eclass[fmap[(lo+hi)>>1]]; else
+                   med = eclass[fmap[hi]];
+
+      unLo = ltLo = lo;
+      unHi = gtHi = hi;
+
+      while (1) {
+         while (1) {
+            if (unLo > unHi) break;
+            n = (Int32)eclass[fmap[unLo]] - (Int32)med;
+            if (n == 0) { 
+               fswap(fmap[unLo], fmap[ltLo]); 
+               ltLo++; unLo++; 
+               continue; 
+            };
+            if (n > 0) break;
+            unLo++;
+         }
+         while (1) {
+            if (unLo > unHi) break;
+            n = (Int32)eclass[fmap[unHi]] - (Int32)med;
+            if (n == 0) { 
+               fswap(fmap[unHi], fmap[gtHi]); 
+               gtHi--; unHi--; 
+               continue; 
+            };
+            if (n < 0) break;
+            unHi--;
+         }
+         if (unLo > unHi) break;
+         fswap(fmap[unLo], fmap[unHi]); unLo++; unHi--;
+      }
+
+      AssertD ( unHi == unLo-1, "fallbackQSort3(2)" );
+
+      if (gtHi < ltLo) continue;
+
+      n = fmin(ltLo-lo, unLo-ltLo); fvswap(lo, unLo-n, n);
+      m = fmin(hi-gtHi, gtHi-unHi); fvswap(unLo, hi-m+1, m);
+
+      n = lo + unLo - ltLo - 1;
+      m = hi - (gtHi - unHi) + 1;
+
+      if (n - lo > hi - m) {
+         fpush ( lo, n );
+         fpush ( m, hi );
+      } else {
+         fpush ( m, hi );
+         fpush ( lo, n );
+      }
+   }
+}
+
+#undef fmin
+#undef fpush
+#undef fpop
+#undef fswap
+#undef fvswap
+#undef FALLBACK_QSORT_SMALL_THRESH
+#undef FALLBACK_QSORT_STACK_SIZE
+
+
+/*---------------------------------------------*/
+/* Pre:
+      nblock > 0
+      eclass exists for [0 .. nblock-1]
+      ((UChar*)eclass) [0 .. nblock-1] holds block
+      ptr exists for [0 .. nblock-1]
+
+   Post:
+      ((UChar*)eclass) [0 .. nblock-1] holds block
+      All other areas of eclass destroyed
+      fmap [0 .. nblock-1] holds sorted order
+      bhtab [ 0 .. 2+(nblock/32) ] destroyed
+*/
+
+#define       SET_BH(zz)  bhtab[(zz) >> 5] |= (1 << ((zz) & 31))
+#define     CLEAR_BH(zz)  bhtab[(zz) >> 5] &= ~(1 << ((zz) & 31))
+#define     ISSET_BH(zz)  (bhtab[(zz) >> 5] & (1 << ((zz) & 31)))
+#define      WORD_BH(zz)  bhtab[(zz) >> 5]
+#define UNALIGNED_BH(zz)  ((zz) & 0x01f)
+
+static
+void fallbackSort ( UInt32* fmap, 
+                    UInt32* eclass, 
+                    UInt32* bhtab,
+                    Int32   nblock,
+                    Int32   verb )
+{
+   Int32 ftab[257];
+   Int32 ftabCopy[256];
+   Int32 H, i, j, k, l, r, cc, cc1;
+   Int32 nNotDone;
+   Int32 nBhtab;
+   UChar* eclass8 = (UChar*)eclass;
+
+   /*--
+      Initial 1-char radix sort to generate
+      initial fmap and initial BH bits.
+   --*/
+   if (verb >= 4)
+      VPrintf0 ( "        bucket sorting ...\n" );
+   for (i = 0; i < 257;    i++) ftab[i] = 0;
+   for (i = 0; i < nblock; i++) ftab[eclass8[i]]++;
+   for (i = 0; i < 256;    i++) ftabCopy[i] = ftab[i];
+   for (i = 1; i < 257;    i++) ftab[i] += ftab[i-1];
+
+   for (i = 0; i < nblock; i++) {
+      j = eclass8[i];
+      k = ftab[j] - 1;
+      ftab[j] = k;
+      fmap[k] = i;
+   }
+
+   nBhtab = 2 + (nblock / 32);
+   for (i = 0; i < nBhtab; i++) bhtab[i] = 0;
+   for (i = 0; i < 256; i++) SET_BH(ftab[i]);
+
+   /*--
+      Inductively refine the buckets.  Kind-of an
+      "exponential radix sort" (!), inspired by the
+      Manber-Myers suffix array construction algorithm.
+   --*/
+
+   /*-- set sentinel bits for block-end detection --*/
+   for (i = 0; i < 32; i++) { 
+      SET_BH(nblock + 2*i);
+      CLEAR_BH(nblock + 2*i + 1);
+   }
+
+   /*-- the log(N) loop --*/
+   H = 1;
+   while (1) {
+
+      if (verb >= 4) 
+         VPrintf1 ( "        depth %6d has ", H );
+
+      j = 0;
+      for (i = 0; i < nblock; i++) {
+         if (ISSET_BH(i)) j = i;
+         k = fmap[i] - H; if (k < 0) k += nblock;
+         eclass[k] = j;
+      }
+
+      nNotDone = 0;
+      r = -1;
+      while (1) {
+
+	 /*-- find the next non-singleton bucket --*/
+         k = r + 1;
+         while (ISSET_BH(k) && UNALIGNED_BH(k)) k++;
+         if (ISSET_BH(k)) {
+            while (WORD_BH(k) == 0xffffffff) k += 32;
+            while (ISSET_BH(k)) k++;
+         }
+         l = k - 1;
+         if (l >= nblock) break;
+         while (!ISSET_BH(k) && UNALIGNED_BH(k)) k++;
+         if (!ISSET_BH(k)) {
+            while (WORD_BH(k) == 0x00000000) k += 32;
+            while (!ISSET_BH(k)) k++;
+         }
+         r = k - 1;
+         if (r >= nblock) break;
+
+         /*-- now [l, r] bracket current bucket --*/
+         if (r > l) {
+            nNotDone += (r - l + 1);
+            fallbackQSort3 ( fmap, eclass, l, r );
+
+            /*-- scan bucket and generate header bits-- */
+            cc = -1;
+            for (i = l; i <= r; i++) {
+               cc1 = eclass[fmap[i]];
+               if (cc != cc1) { SET_BH(i); cc = cc1; };
+            }
+         }
+      }
+
+      if (verb >= 4) 
+         VPrintf1 ( "%6d unresolved strings\n", nNotDone );
+
+      H *= 2;
+      if (H > nblock || nNotDone == 0) break;
+   }
+
+   /*-- 
+      Reconstruct the original block in
+      eclass8 [0 .. nblock-1], since the
+      previous phase destroyed it.
+   --*/
+   if (verb >= 4)
+      VPrintf0 ( "        reconstructing block ...\n" );
+   j = 0;
+   for (i = 0; i < nblock; i++) {
+      while (ftabCopy[j] == 0) j++;
+      ftabCopy[j]--;
+      eclass8[fmap[i]] = (UChar)j;
+   }
+   AssertH ( j < 256, 1005 );
+}
+
+#undef       SET_BH
+#undef     CLEAR_BH
+#undef     ISSET_BH
+#undef      WORD_BH
+#undef UNALIGNED_BH
+
+
+/*---------------------------------------------*/
+/*--- The main, O(N^2 log(N)) sorting       ---*/
+/*--- algorithm.  Faster for "normal"       ---*/
+/*--- non-repetitive blocks.                ---*/
+/*---------------------------------------------*/
+
+/*---------------------------------------------*/
+static
+__inline__
+Bool mainGtU ( UInt32  i1, 
+               UInt32  i2,
+               UChar*  block, 
+               UInt16* quadrant,
+               UInt32  nblock,
+               Int32*  budget )
+{
+   Int32  k;
+   UChar  c1, c2;
+   UInt16 s1, s2;
+
+   AssertD ( i1 != i2, "mainGtU" );
+   /* 1 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 2 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 3 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 4 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 5 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 6 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 7 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 8 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 9 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 10 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 11 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+   /* 12 */
+   c1 = block[i1]; c2 = block[i2];
+   if (c1 != c2) return (c1 > c2);
+   i1++; i2++;
+
+   k = nblock + 8;
+
+   do {
+      /* 1 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+      /* 2 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+      /* 3 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+      /* 4 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+      /* 5 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+      /* 6 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+      /* 7 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+      /* 8 */
+      c1 = block[i1]; c2 = block[i2];
+      if (c1 != c2) return (c1 > c2);
+      s1 = quadrant[i1]; s2 = quadrant[i2];
+      if (s1 != s2) return (s1 > s2);
+      i1++; i2++;
+
+      if (i1 >= nblock) i1 -= nblock;
+      if (i2 >= nblock) i2 -= nblock;
+
+      k -= 8;
+      (*budget)--;
+   }
+      while (k >= 0);
+
+   return False;
+}
+
+
+/*---------------------------------------------*/
+/*--
+   Knuth's increments seem to work better
+   than Incerpi-Sedgewick here.  Possibly
+   because the number of elems to sort is
+   usually small, typically <= 20.
+--*/
+static
+Int32 incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
+                   9841, 29524, 88573, 265720,
+                   797161, 2391484 };
+
+static
+void mainSimpleSort ( UInt32* ptr,
+                      UChar*  block,
+                      UInt16* quadrant,
+                      Int32   nblock,
+                      Int32   lo, 
+                      Int32   hi, 
+                      Int32   d,
+                      Int32*  budget )
+{
+   Int32 i, j, h, bigN, hp;
+   UInt32 v;
+
+   bigN = hi - lo + 1;
+   if (bigN < 2) return;
+
+   hp = 0;
+   while (incs[hp] < bigN) hp++;
+   hp--;
+
+   for (; hp >= 0; hp--) {
+      h = incs[hp];
+
+      i = lo + h;
+      while (True) {
+
+         /*-- copy 1 --*/
+         if (i > hi) break;
+         v = ptr[i];
+         j = i;
+         while ( mainGtU ( 
+                    ptr[j-h]+d, v+d, block, quadrant, nblock, budget 
+                 ) ) {
+            ptr[j] = ptr[j-h];
+            j = j - h;
+            if (j <= (lo + h - 1)) break;
+         }
+         ptr[j] = v;
+         i++;
+
+         /*-- copy 2 --*/
+         if (i > hi) break;
+         v = ptr[i];
+         j = i;
+         while ( mainGtU ( 
+                    ptr[j-h]+d, v+d, block, quadrant, nblock, budget 
+                 ) ) {
+            ptr[j] = ptr[j-h];
+            j = j - h;
+            if (j <= (lo + h - 1)) break;
+         }
+         ptr[j] = v;
+         i++;
+
+         /*-- copy 3 --*/
+         if (i > hi) break;
+         v = ptr[i];
+         j = i;
+         while ( mainGtU ( 
+                    ptr[j-h]+d, v+d, block, quadrant, nblock, budget 
+                 ) ) {
+            ptr[j] = ptr[j-h];
+            j = j - h;
+            if (j <= (lo + h - 1)) break;
+         }
+         ptr[j] = v;
+         i++;
+
+         if (*budget < 0) return;
+      }
+   }
+}
+
+
+/*---------------------------------------------*/
+/*--
+   The following is an implementation of
+   an elegant 3-way quicksort for strings,
+   described in a paper "Fast Algorithms for
+   Sorting and Searching Strings", by Robert
+   Sedgewick and Jon L. Bentley.
+--*/
+
+#define mswap(zz1, zz2) \
+   { Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; }
+
+#define mvswap(zzp1, zzp2, zzn)       \
+{                                     \
+   Int32 yyp1 = (zzp1);               \
+   Int32 yyp2 = (zzp2);               \
+   Int32 yyn  = (zzn);                \
+   while (yyn > 0) {                  \
+      mswap(ptr[yyp1], ptr[yyp2]);    \
+      yyp1++; yyp2++; yyn--;          \
+   }                                  \
+}
+
+static 
+__inline__
+UChar mmed3 ( UChar a, UChar b, UChar c )
+{
+   UChar t;
+   if (a > b) { t = a; a = b; b = t; };
+   if (b > c) { 
+      b = c;
+      if (a > b) b = a;
+   }
+   return b;
+}
+
+#define mmin(a,b) ((a) < (b)) ? (a) : (b)
+
+#define mpush(lz,hz,dz) { stackLo[sp] = lz; \
+                          stackHi[sp] = hz; \
+                          stackD [sp] = dz; \
+                          sp++; }
+
+#define mpop(lz,hz,dz) { sp--;             \
+                         lz = stackLo[sp]; \
+                         hz = stackHi[sp]; \
+                         dz = stackD [sp]; }
+
+
+#define mnextsize(az) (nextHi[az]-nextLo[az])
+
+#define mnextswap(az,bz)                                        \
+   { Int32 tz;                                                  \
+     tz = nextLo[az]; nextLo[az] = nextLo[bz]; nextLo[bz] = tz; \
+     tz = nextHi[az]; nextHi[az] = nextHi[bz]; nextHi[bz] = tz; \
+     tz = nextD [az]; nextD [az] = nextD [bz]; nextD [bz] = tz; }
+
+
+#define MAIN_QSORT_SMALL_THRESH 20
+#define MAIN_QSORT_DEPTH_THRESH (BZ_N_RADIX + BZ_N_QSORT)
+#define MAIN_QSORT_STACK_SIZE 100
+
+static
+void mainQSort3 ( UInt32* ptr,
+                  UChar*  block,
+                  UInt16* quadrant,
+                  Int32   nblock,
+                  Int32   loSt, 
+                  Int32   hiSt, 
+                  Int32   dSt,
+                  Int32*  budget )
+{
+   Int32 unLo, unHi, ltLo, gtHi, n, m, med;
+   Int32 sp, lo, hi, d;
+
+   Int32 stackLo[MAIN_QSORT_STACK_SIZE];
+   Int32 stackHi[MAIN_QSORT_STACK_SIZE];
+   Int32 stackD [MAIN_QSORT_STACK_SIZE];
+
+   Int32 nextLo[3];
+   Int32 nextHi[3];
+   Int32 nextD [3];
+
+   sp = 0;
+   mpush ( loSt, hiSt, dSt );
+
+   while (sp > 0) {
+
+      AssertH ( sp < MAIN_QSORT_STACK_SIZE, 1001 );
+
+      mpop ( lo, hi, d );
+      if (hi - lo < MAIN_QSORT_SMALL_THRESH || 
+          d > MAIN_QSORT_DEPTH_THRESH) {
+         mainSimpleSort ( ptr, block, quadrant, nblock, lo, hi, d, budget );
+         if (*budget < 0) return;
+         continue;
+      }
+
+      med = (Int32) 
+            mmed3 ( block[ptr[ lo         ]+d],
+                    block[ptr[ hi         ]+d],
+                    block[ptr[ (lo+hi)>>1 ]+d] );
+
+      unLo = ltLo = lo;
+      unHi = gtHi = hi;
+
+      while (True) {
+         while (True) {
+            if (unLo > unHi) break;
+            n = ((Int32)block[ptr[unLo]+d]) - med;
+            if (n == 0) { 
+               mswap(ptr[unLo], ptr[ltLo]); 
+               ltLo++; unLo++; continue; 
+            };
+            if (n >  0) break;
+            unLo++;
+         }
+         while (True) {
+            if (unLo > unHi) break;
+            n = ((Int32)block[ptr[unHi]+d]) - med;
+            if (n == 0) { 
+               mswap(ptr[unHi], ptr[gtHi]); 
+               gtHi--; unHi--; continue; 
+            };
+            if (n <  0) break;
+            unHi--;
+         }
+         if (unLo > unHi) break;
+         mswap(ptr[unLo], ptr[unHi]); unLo++; unHi--;
+      }
+
+      AssertD ( unHi == unLo-1, "mainQSort3(2)" );
+
+      if (gtHi < ltLo) {
+         mpush(lo, hi, d+1 );
+         continue;
+      }
+
+      n = mmin(ltLo-lo, unLo-ltLo); mvswap(lo, unLo-n, n);
+      m = mmin(hi-gtHi, gtHi-unHi); mvswap(unLo, hi-m+1, m);
+
+      n = lo + unLo - ltLo - 1;
+      m = hi - (gtHi - unHi) + 1;
+
+      nextLo[0] = lo;  nextHi[0] = n;   nextD[0] = d;
+      nextLo[1] = m;   nextHi[1] = hi;  nextD[1] = d;
+      nextLo[2] = n+1; nextHi[2] = m-1; nextD[2] = d+1;
+
+      if (mnextsize(0) < mnextsize(1)) mnextswap(0,1);
+      if (mnextsize(1) < mnextsize(2)) mnextswap(1,2);
+      if (mnextsize(0) < mnextsize(1)) mnextswap(0,1);
+
+      AssertD (mnextsize(0) >= mnextsize(1), "mainQSort3(8)" );
+      AssertD (mnextsize(1) >= mnextsize(2), "mainQSort3(9)" );
+
+      mpush (nextLo[0], nextHi[0], nextD[0]);
+      mpush (nextLo[1], nextHi[1], nextD[1]);
+      mpush (nextLo[2], nextHi[2], nextD[2]);
+   }
+}
+
+#undef mswap
+#undef mvswap
+#undef mpush
+#undef mpop
+#undef mmin
+#undef mnextsize
+#undef mnextswap
+#undef MAIN_QSORT_SMALL_THRESH
+#undef MAIN_QSORT_DEPTH_THRESH
+#undef MAIN_QSORT_STACK_SIZE
+
+
+/*---------------------------------------------*/
+/* Pre:
+      nblock > N_OVERSHOOT
+      block32 exists for [0 .. nblock-1 +N_OVERSHOOT]
+      ((UChar*)block32) [0 .. nblock-1] holds block
+      ptr exists for [0 .. nblock-1]
+
+   Post:
+      ((UChar*)block32) [0 .. nblock-1] holds block
+      All other areas of block32 destroyed
+      ftab [0 .. 65536 ] destroyed
+      ptr [0 .. nblock-1] holds sorted order
+      if (*budget < 0), sorting was abandoned
+*/
+
+#define BIGFREQ(b) (ftab[((b)+1) << 8] - ftab[(b) << 8])
+#define SETMASK (1 << 21)
+#define CLEARMASK (~(SETMASK))
+
+static
+void mainSort ( UInt32* ptr, 
+                UChar*  block,
+                UInt16* quadrant, 
+                UInt32* ftab,
+                Int32   nblock,
+                Int32   verb,
+                Int32*  budget )
+{
+   Int32  i, j, k, ss, sb;
+   Int32  runningOrder[256];
+   Bool   bigDone[256];
+   Int32  copyStart[256];
+   Int32  copyEnd  [256];
+   UChar  c1;
+   Int32  numQSorted;
+   UInt16 s;
+   if (verb >= 4) VPrintf0 ( "        main sort initialise ...\n" );
+
+   /*-- set up the 2-byte frequency table --*/
+   for (i = 65536; i >= 0; i--) ftab[i] = 0;
+
+   j = block[0] << 8;
+   i = nblock-1;
+   for (; i >= 3; i -= 4) {
+      quadrant[i] = 0;
+      j = (j >> 8) | ( ((UInt16)block[i]) << 8);
+      ftab[j]++;
+      quadrant[i-1] = 0;
+      j = (j >> 8) | ( ((UInt16)block[i-1]) << 8);
+      ftab[j]++;
+      quadrant[i-2] = 0;
+      j = (j >> 8) | ( ((UInt16)block[i-2]) << 8);
+      ftab[j]++;
+      quadrant[i-3] = 0;
+      j = (j >> 8) | ( ((UInt16)block[i-3]) << 8);
+      ftab[j]++;
+   }
+   for (; i >= 0; i--) {
+      quadrant[i] = 0;
+      j = (j >> 8) | ( ((UInt16)block[i]) << 8);
+      ftab[j]++;
+   }
+
+   /*-- (emphasises close relationship of block & quadrant) --*/
+   for (i = 0; i < BZ_N_OVERSHOOT; i++) {
+      block   [nblock+i] = block[i];
+      quadrant[nblock+i] = 0;
+   }
+
+   if (verb >= 4) VPrintf0 ( "        bucket sorting ...\n" );
+
+   /*-- Complete the initial radix sort --*/
+   for (i = 1; i <= 65536; i++) ftab[i] += ftab[i-1];
+
+   s = block[0] << 8;
+   i = nblock-1;
+   for (; i >= 3; i -= 4) {
+      s = (s >> 8) | (block[i] << 8);
+      j = ftab[s] -1;
+      ftab[s] = j;
+      ptr[j] = i;
+      s = (s >> 8) | (block[i-1] << 8);
+      j = ftab[s] -1;
+      ftab[s] = j;
+      ptr[j] = i-1;
+      s = (s >> 8) | (block[i-2] << 8);
+      j = ftab[s] -1;
+      ftab[s] = j;
+      ptr[j] = i-2;
+      s = (s >> 8) | (block[i-3] << 8);
+      j = ftab[s] -1;
+      ftab[s] = j;
+      ptr[j] = i-3;
+   }
+   for (; i >= 0; i--) {
+      s = (s >> 8) | (block[i] << 8);
+      j = ftab[s] -1;
+      ftab[s] = j;
+      ptr[j] = i;
+   }
+
+   /*--
+      Now ftab contains the first loc of every small bucket.
+      Calculate the running order, from smallest to largest
+      big bucket.
+   --*/
+   for (i = 0; i <= 255; i++) {
+      bigDone     [i] = False;
+      runningOrder[i] = i;
+   }
+
+   {
+      Int32 vv;
+      Int32 h = 1;
+      do h = 3 * h + 1; while (h <= 256);
+      do {
+         h = h / 3;
+         for (i = h; i <= 255; i++) {
+            vv = runningOrder[i];
+            j = i;
+            while ( BIGFREQ(runningOrder[j-h]) > BIGFREQ(vv) ) {
+               runningOrder[j] = runningOrder[j-h];
+               j = j - h;
+               if (j <= (h - 1)) goto zero;
+            }
+            zero:
+            runningOrder[j] = vv;
+         }
+      } while (h != 1);
+   }
+
+   /*--
+      The main sorting loop.
+   --*/
+
+   numQSorted = 0;
+
+   for (i = 0; i <= 255; i++) {
+
+      /*--
+         Process big buckets, starting with the least full.
+         Basically this is a 3-step process in which we call
+         mainQSort3 to sort the small buckets [ss, j], but
+         also make a big effort to avoid the calls if we can.
+      --*/
+      ss = runningOrder[i];
+
+      /*--
+         Step 1:
+         Complete the big bucket [ss] by quicksorting
+         any unsorted small buckets [ss, j], for j != ss.  
+         Hopefully previous pointer-scanning phases have already
+         completed many of the small buckets [ss, j], so
+         we don't have to sort them at all.
+      --*/
+      for (j = 0; j <= 255; j++) {
+         if (j != ss) {
+            sb = (ss << 8) + j;
+            if ( ! (ftab[sb] & SETMASK) ) {
+               Int32 lo = ftab[sb]   & CLEARMASK;
+               Int32 hi = (ftab[sb+1] & CLEARMASK) - 1;
+               if (hi > lo) {
+                  if (verb >= 4)
+                     VPrintf4 ( "        qsort [0x%x, 0x%x]   "
+                                "done %d   this %d\n",
+                                ss, j, numQSorted, hi - lo + 1 );
+                  mainQSort3 ( 
+                     ptr, block, quadrant, nblock, 
+                     lo, hi, BZ_N_RADIX, budget 
+                  );   
+                  numQSorted += (hi - lo + 1);
+                  if (*budget < 0) return;
+               }
+            }
+            ftab[sb] |= SETMASK;
+         }
+      }
+
+      AssertH ( !bigDone[ss], 1006 );
+
+      /*--
+         Step 2:
+         Now scan this big bucket [ss] so as to synthesise the
+         sorted order for small buckets [t, ss] for all t,
+         including, magically, the bucket [ss,ss] too.
+         This will avoid doing Real Work in subsequent Step 1's.
+      --*/
+      {
+         for (j = 0; j <= 255; j++) {
+            copyStart[j] =  ftab[(j << 8) + ss]     & CLEARMASK;
+            copyEnd  [j] = (ftab[(j << 8) + ss + 1] & CLEARMASK) - 1;
+         }
+         for (j = ftab[ss << 8] & CLEARMASK; j < copyStart[ss]; j++) {
+            k = ptr[j]-1; if (k < 0) k += nblock;
+            c1 = block[k];
+            if (!bigDone[c1])
+               ptr[ copyStart[c1]++ ] = k;
+         }
+         for (j = (ftab[(ss+1) << 8] & CLEARMASK) - 1; j > copyEnd[ss]; j--) {
+            k = ptr[j]-1; if (k < 0) k += nblock;
+            c1 = block[k];
+            if (!bigDone[c1]) 
+               ptr[ copyEnd[c1]-- ] = k;
+         }
+      }
+
+      AssertH ( (copyStart[ss]-1 == copyEnd[ss])
+                || 
+                /* Extremely rare case missing in bzip2-1.0.0 and 1.0.1.
+                   Necessity for this case is demonstrated by compressing 
+                   a sequence of approximately 48.5 million of character 
+                   251; 1.0.0/1.0.1 will then die here. */
+                (copyStart[ss] == 0 && copyEnd[ss] == nblock-1),
+                1007 )
+
+      for (j = 0; j <= 255; j++) ftab[(j << 8) + ss] |= SETMASK;
+
+      /*--
+         Step 3:
+         The [ss] big bucket is now done.  Record this fact,
+         and update the quadrant descriptors.  Remember to
+         update quadrants in the overshoot area too, if
+         necessary.  The "if (i < 255)" test merely skips
+         this updating for the last bucket processed, since
+         updating for the last bucket is pointless.
+
+         The quadrant array provides a way to incrementally
+         cache sort orderings, as they appear, so as to 
+         make subsequent comparisons in fullGtU() complete
+         faster.  For repetitive blocks this makes a big
+         difference (but not big enough to be able to avoid
+         the fallback sorting mechanism, exponential radix sort).
+
+         The precise meaning is: at all times:
+
+            for 0 <= i < nblock and 0 <= j <= nblock
+
+            if block[i] != block[j], 
+
+               then the relative values of quadrant[i] and 
+                    quadrant[j] are meaningless.
+
+               else {
+                  if quadrant[i] < quadrant[j]
+                     then the string starting at i lexicographically
+                     precedes the string starting at j
+
+                  else if quadrant[i] > quadrant[j]
+                     then the string starting at j lexicographically
+                     precedes the string starting at i
+
+                  else
+                     the relative ordering of the strings starting
+                     at i and j has not yet been determined.
+               }
+      --*/
+      bigDone[ss] = True;
+
+      if (i < 255) {
+         Int32 bbStart  = ftab[ss << 8] & CLEARMASK;
+         Int32 bbSize   = (ftab[(ss+1) << 8] & CLEARMASK) - bbStart;
+         Int32 shifts   = 0;
+
+         while ((bbSize >> shifts) > 65534) shifts++;
+
+         for (j = bbSize-1; j >= 0; j--) {
+            Int32 a2update     = ptr[bbStart + j];
+            UInt16 qVal        = (UInt16)(j >> shifts);
+            quadrant[a2update] = qVal;
+            if (a2update < BZ_N_OVERSHOOT)
+               quadrant[a2update + nblock] = qVal;
+         }
+         AssertH ( ((bbSize-1) >> shifts) <= 65535, 1002 );
+      }
+
+   }
+
+   if (verb >= 4)
+      VPrintf3 ( "        %d pointers, %d sorted, %d scanned\n",
+                 nblock, numQSorted, nblock - numQSorted );
+}
+
+#undef BIGFREQ
+#undef SETMASK
+#undef CLEARMASK
+
+
+/*---------------------------------------------*/
+/* Pre:
+      nblock > 0
+      arr2 exists for [0 .. nblock-1 +N_OVERSHOOT]
+      ((UChar*)arr2)  [0 .. nblock-1] holds block
+      arr1 exists for [0 .. nblock-1]
+
+   Post:
+      ((UChar*)arr2) [0 .. nblock-1] holds block
+      All other areas of block destroyed
+      ftab [ 0 .. 65536 ] destroyed
+      arr1 [0 .. nblock-1] holds sorted order
+*/
+void BZ2_blockSort ( EState* s )
+{
+   UInt32* ptr    = s->ptr; 
+   UChar*  block  = s->block;
+   UInt32* ftab   = s->ftab;
+   Int32   nblock = s->nblock;
+   Int32   verb   = s->verbosity;
+   Int32   wfact  = s->workFactor;
+   UInt16* quadrant;
+   Int32   budget;
+   Int32   budgetInit;
+   Int32   i;
+
+   if (nblock < /* 10000 */1000 ) {
+      fallbackSort ( s->arr1, s->arr2, ftab, nblock, verb );
+   } else {
+      /* Calculate the location for quadrant, remembering to get
+         the alignment right.  Assumes that &(block[0]) is at least
+         2-byte aligned -- this should be ok since block is really
+         the first section of arr2.
+      */
+      i = nblock+BZ_N_OVERSHOOT;
+      if (i & 1) i++;
+      quadrant = (UInt16*)(&(block[i]));
+
+      /* (wfact-1) / 3 puts the default-factor-30
+         transition point at very roughly the same place as 
+         with v0.1 and v0.9.0.  
+         Not that it particularly matters any more, since the
+         resulting compressed stream is now the same regardless
+         of whether or not we use the main sort or fallback sort.
+      */
+      if (wfact < 1  ) wfact = 1;
+      if (wfact > 100) wfact = 100;
+      budgetInit = nblock * ((wfact-1) / 3);
+      budget = budgetInit;
+
+      mainSort ( ptr, block, quadrant, ftab, nblock, verb, &budget );
+      if (0 && verb >= 3) 
+         VPrintf3 ( "      %d work, %d block, ratio %5.2f\n",
+                    budgetInit - budget,
+                    nblock, 
+                    (float)(budgetInit - budget) /
+                    (float)(nblock==0 ? 1 : nblock) ); 
+      if (budget < 0) {
+         if (verb >= 2) 
+            VPrintf0 ( "    too repetitive; using fallback"
+                       " sorting algorithm\n" );
+         fallbackSort ( s->arr1, s->arr2, ftab, nblock, verb );
+      }
+   }
+
+   s->origPtr = -1;
+   for (i = 0; i < s->nblock; i++)
+      if (ptr[i] == 0)
+         { s->origPtr = i; break; };
+
+   AssertH( s->origPtr != -1, 1003 );
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                       blocksort.c ---*/
+/*-------------------------------------------------------------*/
+
+/*-------------------------------------------------------------*/
+/*--- Huffman coding low-level stuff                        ---*/
+/*---                                             huffman.c ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+
+
+/*---------------------------------------------------*/
+#define WEIGHTOF(zz0)  ((zz0) & 0xffffff00)
+#define DEPTHOF(zz1)   ((zz1) & 0x000000ff)
+#define MYMAX(zz2,zz3) ((zz2) > (zz3) ? (zz2) : (zz3))
+
+#define ADDWEIGHTS(zw1,zw2)                           \
+   (WEIGHTOF(zw1)+WEIGHTOF(zw2)) |                    \
+   (1 + MYMAX(DEPTHOF(zw1),DEPTHOF(zw2)))
+
+#define UPHEAP(z)                                     \
+{                                                     \
+   Int32 zz, tmp;                                     \
+   zz = z; tmp = heap[zz];                            \
+   while (weight[tmp] < weight[heap[zz >> 1]]) {      \
+      heap[zz] = heap[zz >> 1];                       \
+      zz >>= 1;                                       \
+   }                                                  \
+   heap[zz] = tmp;                                    \
+}
+
+#define DOWNHEAP(z)                                   \
+{                                                     \
+   Int32 zz, yy, tmp;                                 \
+   zz = z; tmp = heap[zz];                            \
+   while (True) {                                     \
+      yy = zz << 1;                                   \
+      if (yy > nHeap) break;                          \
+      if (yy < nHeap &&                               \
+          weight[heap[yy+1]] < weight[heap[yy]])      \
+         yy++;                                        \
+      if (weight[tmp] < weight[heap[yy]]) break;      \
+      heap[zz] = heap[yy];                            \
+      zz = yy;                                        \
+   }                                                  \
+   heap[zz] = tmp;                                    \
+}
+
+
+/*---------------------------------------------------*/
+void BZ2_hbMakeCodeLengths ( UChar *len, 
+                             Int32 *freq,
+                             Int32 alphaSize,
+                             Int32 maxLen )
+{
+   /*--
+      Nodes and heap entries run from 1.  Entry 0
+      for both the heap and nodes is a sentinel.
+   --*/
+   Int32 nNodes, nHeap, n1, n2, i, j, k;
+   Bool  tooLong;
+
+   Int32 heap   [ BZ_MAX_ALPHA_SIZE + 2 ];
+   Int32 weight [ BZ_MAX_ALPHA_SIZE * 2 ];
+   Int32 parent [ BZ_MAX_ALPHA_SIZE * 2 ]; 
+
+   for (i = 0; i < alphaSize; i++)
+      weight[i+1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
+
+   while (True) {
+
+      nNodes = alphaSize;
+      nHeap = 0;
+
+      heap[0] = 0;
+      weight[0] = 0;
+      parent[0] = -2;
+
+      for (i = 1; i <= alphaSize; i++) {
+         parent[i] = -1;
+         nHeap++;
+         heap[nHeap] = i;
+         UPHEAP(nHeap);
+      }
+
+      AssertH( nHeap < (BZ_MAX_ALPHA_SIZE+2), 2001 );
+   
+      while (nHeap > 1) {
+         n1 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP(1);
+         n2 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP(1);
+         nNodes++;
+         parent[n1] = parent[n2] = nNodes;
+         weight[nNodes] = ADDWEIGHTS(weight[n1], weight[n2]);
+         parent[nNodes] = -1;
+         nHeap++;
+         heap[nHeap] = nNodes;
+         UPHEAP(nHeap);
+      }
+
+      AssertH( nNodes < (BZ_MAX_ALPHA_SIZE * 2), 2002 );
+
+      tooLong = False;
+      for (i = 1; i <= alphaSize; i++) {
+         j = 0;
+         k = i;
+         while (parent[k] >= 0) { k = parent[k]; j++; }
+         len[i-1] = j;
+         if (j > maxLen) tooLong = True;
+      }
+      
+      if (! tooLong) break;
+
+      /* 17 Oct 04: keep-going condition for the following loop used
+         to be 'i < alphaSize', which missed the last element,
+         theoretically leading to the possibility of the compressor
+         looping.  However, this count-scaling step is only needed if
+         one of the generated Huffman code words is longer than
+         maxLen, which up to and including version 1.0.2 was 20 bits,
+         which is extremely unlikely.  In version 1.0.3 maxLen was
+         changed to 17 bits, which has minimal effect on compression
+         ratio, but does mean this scaling step is used from time to
+         time, enough to verify that it works.
+
+         This means that bzip2-1.0.3 and later will only produce
+         Huffman codes with a maximum length of 17 bits.  However, in
+         order to preserve backwards compatibility with bitstreams
+         produced by versions pre-1.0.3, the decompressor must still
+         handle lengths of up to 20. */
+
+      for (i = 1; i <= alphaSize; i++) {
+         j = weight[i] >> 8;
+         j = 1 + (j / 2);
+         weight[i] = j << 8;
+      }
+   }
+}
+
+
+/*---------------------------------------------------*/
+void BZ2_hbAssignCodes ( Int32 *code,
+                         UChar *length,
+                         Int32 minLen,
+                         Int32 maxLen,
+                         Int32 alphaSize )
+{
+   Int32 n, vec, i;
+
+   vec = 0;
+   for (n = minLen; n <= maxLen; n++) {
+      for (i = 0; i < alphaSize; i++)
+         if (length[i] == n) { code[i] = vec; vec++; };
+      vec <<= 1;
+   }
+}
+
+
+/*---------------------------------------------------*/
+void BZ2_hbCreateDecodeTables ( Int32 *limit,
+                                Int32 *base,
+                                Int32 *perm,
+                                UChar *length,
+                                Int32 minLen,
+                                Int32 maxLen,
+                                Int32 alphaSize )
+{
+   Int32 pp, i, j, vec;
+
+   pp = 0;
+   for (i = minLen; i <= maxLen; i++)
+      for (j = 0; j < alphaSize; j++)
+         if (length[j] == i) { perm[pp] = j; pp++; };
+
+   for (i = 0; i < BZ_MAX_CODE_LEN; i++) base[i] = 0;
+   for (i = 0; i < alphaSize; i++) base[length[i]+1]++;
+
+   for (i = 1; i < BZ_MAX_CODE_LEN; i++) base[i] += base[i-1];
+
+   for (i = 0; i < BZ_MAX_CODE_LEN; i++) limit[i] = 0;
+   vec = 0;
+
+   for (i = minLen; i <= maxLen; i++) {
+      vec += (base[i+1] - base[i]);
+      limit[i] = vec-1;
+      vec <<= 1;
+   }
+   for (i = minLen + 1; i <= maxLen; i++)
+      base[i] = ((limit[i-1] + 1) << 1) - base[i];
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                         huffman.c ---*/
+/*-------------------------------------------------------------*/
+
+/*-------------------------------------------------------------*/
+/*--- Compression machinery (not incl block sorting)        ---*/
+/*---                                            compress.c ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+/*--
+   CHANGES
+   ~~~~~~~
+   0.9.0 -- original version.
+
+   0.9.0a/b -- no changes in this file.
+
+   0.9.0c
+      * changed setting of nGroups in sendMTFValues() so as to 
+        do a bit better on small files
+--*/
+
+
+
+/*---------------------------------------------------*/
+/*--- Bit stream I/O                              ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+void BZ2_bsInitWrite ( EState* s )
+{
+   s->bsLive = 0;
+   s->bsBuff = 0;
+}
+
+
+/*---------------------------------------------------*/
+static
+void bsFinishWrite ( EState* s )
+{
+   while (s->bsLive > 0) {
+      s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
+      s->numZ++;
+      s->bsBuff <<= 8;
+      s->bsLive -= 8;
+   }
+}
+
+
+/*---------------------------------------------------*/
+#define bsNEEDW(nz)                           \
+{                                             \
+   while (s->bsLive >= 8) {                   \
+      s->zbits[s->numZ]                       \
+         = (UChar)(s->bsBuff >> 24);          \
+      s->numZ++;                              \
+      s->bsBuff <<= 8;                        \
+      s->bsLive -= 8;                         \
+   }                                          \
+}
+
+
+/*---------------------------------------------------*/
+static
+__inline__
+void bsW ( EState* s, Int32 n, UInt32 v )
+{
+   bsNEEDW ( n );
+   s->bsBuff |= (v << (32 - s->bsLive - n));
+   s->bsLive += n;
+}
+
+
+/*---------------------------------------------------*/
+static
+void bsPutUInt32 ( EState* s, UInt32 u )
+{
+   bsW ( s, 8, (u >> 24) & 0xffL );
+   bsW ( s, 8, (u >> 16) & 0xffL );
+   bsW ( s, 8, (u >>  8) & 0xffL );
+   bsW ( s, 8,  u        & 0xffL );
+}
+
+
+/*---------------------------------------------------*/
+static
+void bsPutUChar ( EState* s, UChar c )
+{
+   bsW( s, 8, (UInt32)c );
+}
+
+
+/*---------------------------------------------------*/
+/*--- The back end proper                         ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+static
+void makeMaps_e ( EState* s )
+{
+   Int32 i;
+   s->nInUse = 0;
+   for (i = 0; i < 256; i++)
+      if (s->inUse[i]) {
+         s->unseqToSeq[i] = s->nInUse;
+         s->nInUse++;
+      }
+}
+
+
+/*---------------------------------------------------*/
+static
+void generateMTFValues ( EState* s )
+{
+   UChar   yy[256];
+   Int32   i, j;
+   Int32   zPend;
+   Int32   wr;
+   Int32   EOB;
+
+   /* 
+      After sorting (eg, here),
+         s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
+         and
+         ((UChar*)s->arr2) [ 0 .. s->nblock-1 ] 
+         holds the original block data.
+
+      The first thing to do is generate the MTF values,
+      and put them in
+         ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
+      Because there are strictly fewer or equal MTF values
+      than block values, ptr values in this area are overwritten
+      with MTF values only when they are no longer needed.
+
+      The final compressed bitstream is generated into the
+      area starting at
+         (UChar*) (&((UChar*)s->arr2)[s->nblock])
+
+      These storage aliases are set up in bzCompressInit(),
+      except for the last one, which is arranged in 
+      compressBlock().
+   */
+   UInt32* ptr   = s->ptr;
+   UChar* block  = s->block;
+   UInt16* mtfv  = s->mtfv;
+
+   makeMaps_e ( s );
+   EOB = s->nInUse+1;
+
+   for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
+
+   wr = 0;
+   zPend = 0;
+   for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
+
+   for (i = 0; i < s->nblock; i++) {
+      UChar ll_i;
+      AssertD ( wr <= i, "generateMTFValues(1)" );
+      j = ptr[i]-1; if (j < 0) j += s->nblock;
+      ll_i = s->unseqToSeq[block[j]];
+      AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
+
+      if (yy[0] == ll_i) { 
+         zPend++;
+      } else {
+
+         if (zPend > 0) {
+            zPend--;
+            while (True) {
+               if (zPend & 1) {
+                  mtfv[wr] = BZ_RUNB; wr++; 
+                  s->mtfFreq[BZ_RUNB]++; 
+               } else {
+                  mtfv[wr] = BZ_RUNA; wr++; 
+                  s->mtfFreq[BZ_RUNA]++; 
+               }
+               if (zPend < 2) break;
+               zPend = (zPend - 2) / 2;
+            };
+            zPend = 0;
+         }
+         {
+            register UChar  rtmp;
+            register UChar* ryy_j;
+            register UChar  rll_i;
+            rtmp  = yy[1];
+            yy[1] = yy[0];
+            ryy_j = &(yy[1]);
+            rll_i = ll_i;
+            while ( rll_i != rtmp ) {
+               register UChar rtmp2;
+               ryy_j++;
+               rtmp2  = rtmp;
+               rtmp   = *ryy_j;
+               *ryy_j = rtmp2;
+            };
+            yy[0] = rtmp;
+            j = ryy_j - &(yy[0]);
+            mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
+         }
+
+      }
+   }
+
+   if (zPend > 0) {
+      zPend--;
+      while (True) {
+         if (zPend & 1) {
+            mtfv[wr] = BZ_RUNB; wr++; 
+            s->mtfFreq[BZ_RUNB]++; 
+         } else {
+            mtfv[wr] = BZ_RUNA; wr++; 
+            s->mtfFreq[BZ_RUNA]++; 
+         }
+         if (zPend < 2) break;
+         zPend = (zPend - 2) / 2;
+      };
+      zPend = 0;
+   }
+
+   mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
+
+   s->nMTF = wr;
+}
+
+
+/*---------------------------------------------------*/
+#define BZ_LESSER_ICOST  0
+#define BZ_GREATER_ICOST 15
+
+static
+void sendMTFValues ( EState* s )
+{
+   Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
+   Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
+   Int32 nGroups, nBytes;
+
+   /*--
+   UChar  len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+   is a global since the decoder also needs it.
+
+   Int32  code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+   Int32  rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
+   are also globals only used in this proc.
+   Made global to keep stack frame size small.
+   --*/
+
+
+   UInt16 cost[BZ_N_GROUPS];
+   Int32  fave[BZ_N_GROUPS];
+
+   UInt16* mtfv = s->mtfv;
+
+   if (s->verbosity >= 3)
+      VPrintf3( "      %d in block, %d after MTF & 1-2 coding, "
+                "%d+2 syms in use\n", 
+                s->nblock, s->nMTF, s->nInUse );
+
+   alphaSize = s->nInUse+2;
+   for (t = 0; t < BZ_N_GROUPS; t++)
+      for (v = 0; v < alphaSize; v++)
+         s->len[t][v] = BZ_GREATER_ICOST;
+
+   /*--- Decide how many coding tables to use ---*/
+   AssertH ( s->nMTF > 0, 3001 );
+   if (s->nMTF < 200)  nGroups = 2; else
+   if (s->nMTF < 600)  nGroups = 3; else
+   if (s->nMTF < 1200) nGroups = 4; else
+   if (s->nMTF < 2400) nGroups = 5; else
+                       nGroups = 6;
+
+   /*--- Generate an initial set of coding tables ---*/
+   { 
+      Int32 nPart, remF, tFreq, aFreq;
+
+      nPart = nGroups;
+      remF  = s->nMTF;
+      gs = 0;
+      while (nPart > 0) {
+         tFreq = remF / nPart;
+         ge = gs-1;
+         aFreq = 0;
+         while (aFreq < tFreq && ge < alphaSize-1) {
+            ge++;
+            aFreq += s->mtfFreq[ge];
+         }
+
+         if (ge > gs 
+             && nPart != nGroups && nPart != 1 
+             && ((nGroups-nPart) % 2 == 1)) {
+            aFreq -= s->mtfFreq[ge];
+            ge--;
+         }
+
+         if (0 && s->verbosity >= 3)
+            VPrintf5( "      initial group %d, [%d .. %d], "
+                      "has %d syms (%4.1f%%)\n",
+                      nPart, gs, ge, aFreq, 
+                      (100.0 * (float)aFreq) / (float)(s->nMTF) );
+ 
+         for (v = 0; v < alphaSize; v++)
+            if (v >= gs && v <= ge) 
+               s->len[nPart-1][v] = BZ_LESSER_ICOST; else
+               s->len[nPart-1][v] = BZ_GREATER_ICOST;
+ 
+         nPart--;
+         gs = ge+1;
+         remF -= aFreq;
+      }
+   }
+
+   /*--- 
+      Iterate up to BZ_N_ITERS times to improve the tables.
+   ---*/
+   for (iter = 0; iter < BZ_N_ITERS; iter++) {
+
+      for (t = 0; t < nGroups; t++) fave[t] = 0;
+
+      for (t = 0; t < nGroups; t++)
+         for (v = 0; v < alphaSize; v++)
+            s->rfreq[t][v] = 0;
+
+      /*---
+        Set up an auxiliary length table which is used to fast-track
+	the common case (nGroups == 6). 
+      ---*/
+      if (nGroups == 6) {
+         for (v = 0; v < alphaSize; v++) {
+            s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
+            s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
+            s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
+	 }
+      }
+
+      nSelectors = 0;
+      totc = 0;
+      gs = 0;
+      while (True) {
+
+         /*--- Set group start & end marks. --*/
+         if (gs >= s->nMTF) break;
+         ge = gs + BZ_G_SIZE - 1; 
+         if (ge >= s->nMTF) ge = s->nMTF-1;
+
+         /*-- 
+            Calculate the cost of this group as coded
+            by each of the coding tables.
+         --*/
+         for (t = 0; t < nGroups; t++) cost[t] = 0;
+
+         if (nGroups == 6 && 50 == ge-gs+1) {
+            /*--- fast track the common case ---*/
+            register UInt32 cost01, cost23, cost45;
+            register UInt16 icv;
+            cost01 = cost23 = cost45 = 0;
+
+#           define BZ_ITER(nn)                \
+               icv = mtfv[gs+(nn)];           \
+               cost01 += s->len_pack[icv][0]; \
+               cost23 += s->len_pack[icv][1]; \
+               cost45 += s->len_pack[icv][2]; \
+
+            BZ_ITER(0);  BZ_ITER(1);  BZ_ITER(2);  BZ_ITER(3);  BZ_ITER(4);
+            BZ_ITER(5);  BZ_ITER(6);  BZ_ITER(7);  BZ_ITER(8);  BZ_ITER(9);
+            BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
+            BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
+            BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
+            BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
+            BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
+            BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
+            BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
+            BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
+
+#           undef BZ_ITER
+
+            cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
+            cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
+            cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
+
+         } else {
+	    /*--- slow version which correctly handles all situations ---*/
+            for (i = gs; i <= ge; i++) { 
+               UInt16 icv = mtfv[i];
+               for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
+            }
+         }
+ 
+         /*-- 
+            Find the coding table which is best for this group,
+            and record its identity in the selector table.
+         --*/
+         bc = 999999999; bt = -1;
+         for (t = 0; t < nGroups; t++)
+            if (cost[t] < bc) { bc = cost[t]; bt = t; };
+         totc += bc;
+         fave[bt]++;
+         s->selector[nSelectors] = bt;
+         nSelectors++;
+
+         /*-- 
+            Increment the symbol frequencies for the selected table.
+          --*/
+         if (nGroups == 6 && 50 == ge-gs+1) {
+            /*--- fast track the common case ---*/
+
+#           define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
+
+            BZ_ITUR(0);  BZ_ITUR(1);  BZ_ITUR(2);  BZ_ITUR(3);  BZ_ITUR(4);
+            BZ_ITUR(5);  BZ_ITUR(6);  BZ_ITUR(7);  BZ_ITUR(8);  BZ_ITUR(9);
+            BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
+            BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
+            BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
+            BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
+            BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
+            BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
+            BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
+            BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
+
+#           undef BZ_ITUR
+
+         } else {
+	    /*--- slow version which correctly handles all situations ---*/
+            for (i = gs; i <= ge; i++)
+               s->rfreq[bt][ mtfv[i] ]++;
+         }
+
+         gs = ge+1;
+      }
+      if (s->verbosity >= 3) {
+         VPrintf2 ( "      pass %d: size is %d, grp uses are ", 
+                   iter+1, totc/8 );
+         for (t = 0; t < nGroups; t++)
+            VPrintf1 ( "%d ", fave[t] );
+         VPrintf0 ( "\n" );
+      }
+
+      /*--
+        Recompute the tables based on the accumulated frequencies.
+      --*/
+      /* maxLen was changed from 20 to 17 in bzip2-1.0.3.  See 
+         comment in huffman.c for details. */
+      for (t = 0; t < nGroups; t++)
+         BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]), 
+                                 alphaSize, 17 /*20*/ );
+   }
+
+
+   AssertH( nGroups < 8, 3002 );
+   AssertH( nSelectors < 32768 &&
+            nSelectors <= (2 + (900000 / BZ_G_SIZE)),
+            3003 );
+
+
+   /*--- Compute MTF values for the selectors. ---*/
+   {
+      UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
+      for (i = 0; i < nGroups; i++) pos[i] = i;
+      for (i = 0; i < nSelectors; i++) {
+         ll_i = s->selector[i];
+         j = 0;
+         tmp = pos[j];
+         while ( ll_i != tmp ) {
+            j++;
+            tmp2 = tmp;
+            tmp = pos[j];
+            pos[j] = tmp2;
+         };
+         pos[0] = tmp;
+         s->selectorMtf[i] = j;
+      }
+   };
+
+   /*--- Assign actual codes for the tables. --*/
+   for (t = 0; t < nGroups; t++) {
+      minLen = 32;
+      maxLen = 0;
+      for (i = 0; i < alphaSize; i++) {
+         if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
+         if (s->len[t][i] < minLen) minLen = s->len[t][i];
+      }
+      AssertH ( !(maxLen > 17 /*20*/ ), 3004 );
+      AssertH ( !(minLen < 1),  3005 );
+      BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]), 
+                          minLen, maxLen, alphaSize );
+   }
+
+   /*--- Transmit the mapping table. ---*/
+   { 
+      Bool inUse16[16];
+      for (i = 0; i < 16; i++) {
+          inUse16[i] = False;
+          for (j = 0; j < 16; j++)
+             if (s->inUse[i * 16 + j]) inUse16[i] = True;
+      }
+     
+      nBytes = s->numZ;
+      for (i = 0; i < 16; i++)
+         if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
+
+      for (i = 0; i < 16; i++)
+         if (inUse16[i])
+            for (j = 0; j < 16; j++) {
+               if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
+            }
+
+      if (s->verbosity >= 3) 
+         VPrintf1( "      bytes: mapping %d, ", s->numZ-nBytes );
+   }
+
+   /*--- Now the selectors. ---*/
+   nBytes = s->numZ;
+   bsW ( s, 3, nGroups );
+   bsW ( s, 15, nSelectors );
+   for (i = 0; i < nSelectors; i++) { 
+      for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
+      bsW(s,1,0);
+   }
+   if (s->verbosity >= 3)
+      VPrintf1( "selectors %d, ", s->numZ-nBytes );
+
+   /*--- Now the coding tables. ---*/
+   nBytes = s->numZ;
+
+   for (t = 0; t < nGroups; t++) {
+      Int32 curr = s->len[t][0];
+      bsW ( s, 5, curr );
+      for (i = 0; i < alphaSize; i++) {
+         while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
+         while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
+         bsW ( s, 1, 0 );
+      }
+   }
+
+   if (s->verbosity >= 3)
+      VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
+
+   /*--- And finally, the block data proper ---*/
+   nBytes = s->numZ;
+   selCtr = 0;
+   gs = 0;
+   while (True) {
+      if (gs >= s->nMTF) break;
+      ge = gs + BZ_G_SIZE - 1; 
+      if (ge >= s->nMTF) ge = s->nMTF-1;
+      AssertH ( s->selector[selCtr] < nGroups, 3006 );
+
+      if (nGroups == 6 && 50 == ge-gs+1) {
+            /*--- fast track the common case ---*/
+            UInt16 mtfv_i;
+            UChar* s_len_sel_selCtr 
+               = &(s->len[s->selector[selCtr]][0]);
+            Int32* s_code_sel_selCtr
+               = &(s->code[s->selector[selCtr]][0]);
+
+#           define BZ_ITAH(nn)                      \
+               mtfv_i = mtfv[gs+(nn)];              \
+               bsW ( s,                             \
+                     s_len_sel_selCtr[mtfv_i],      \
+                     s_code_sel_selCtr[mtfv_i] )
+
+            BZ_ITAH(0);  BZ_ITAH(1);  BZ_ITAH(2);  BZ_ITAH(3);  BZ_ITAH(4);
+            BZ_ITAH(5);  BZ_ITAH(6);  BZ_ITAH(7);  BZ_ITAH(8);  BZ_ITAH(9);
+            BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
+            BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
+            BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
+            BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
+            BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
+            BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
+            BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
+            BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
+
+#           undef BZ_ITAH
+
+      } else {
+	 /*--- slow version which correctly handles all situations ---*/
+         for (i = gs; i <= ge; i++) {
+            bsW ( s, 
+                  s->len  [s->selector[selCtr]] [mtfv[i]],
+                  s->code [s->selector[selCtr]] [mtfv[i]] );
+         }
+      }
+
+
+      gs = ge+1;
+      selCtr++;
+   }
+   AssertH( selCtr == nSelectors, 3007 );
+
+   if (s->verbosity >= 3)
+      VPrintf1( "codes %d\n", s->numZ-nBytes );
+}
+
+
+/*---------------------------------------------------*/
+void BZ2_compressBlock ( EState* s, Bool is_last_block )
+{
+   if (s->nblock > 0) {
+
+      BZ_FINALISE_CRC ( s->blockCRC );
+      s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
+      s->combinedCRC ^= s->blockCRC;
+      if (s->blockNo > 1) s->numZ = 0;
+
+      if (s->verbosity >= 2)
+         VPrintf4( "    block %d: crc = 0x%08x, "
+                   "combined CRC = 0x%08x, size = %d\n",
+                   s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
+
+      BZ2_blockSort ( s );
+   }
+
+   s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
+
+   /*-- If this is the first block, create the stream header. --*/
+   if (s->blockNo == 1) {
+      BZ2_bsInitWrite ( s );
+      bsPutUChar ( s, BZ_HDR_B );
+      bsPutUChar ( s, BZ_HDR_Z );
+      bsPutUChar ( s, BZ_HDR_h );
+      bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
+   }
+
+   if (s->nblock > 0) {
+
+      bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
+      bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
+      bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
+
+      /*-- Now the block's CRC, so it is in a known place. --*/
+      bsPutUInt32 ( s, s->blockCRC );
+
+      /*-- 
+         Now a single bit indicating (non-)randomisation. 
+         As of version 0.9.5, we use a better sorting algorithm
+         which makes randomisation unnecessary.  So always set
+         the randomised bit to 'no'.  Of course, the decoder
+         still needs to be able to handle randomised blocks
+         so as to maintain backwards compatibility with
+         older versions of bzip2.
+      --*/
+      bsW(s,1,0);
+
+      bsW ( s, 24, s->origPtr );
+      generateMTFValues ( s );
+      sendMTFValues ( s );
+   }
+
+
+   /*-- If this is the last block, add the stream trailer. --*/
+   if (is_last_block) {
+
+      bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
+      bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
+      bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
+      bsPutUInt32 ( s, s->combinedCRC );
+      if (s->verbosity >= 2)
+         VPrintf1( "    final combined CRC = 0x%08x\n   ", s->combinedCRC );
+      bsFinishWrite ( s );
+   }
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                        compress.c ---*/
+/*-------------------------------------------------------------*/
+
+
+/*-------------------------------------------------------------*/
+/*--- Table for randomising repetitive blocks               ---*/
+/*---                                           randtable.c ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+
+
+
+/*---------------------------------------------*/
+Int32 BZ2_rNums[512] = { 
+   619, 720, 127, 481, 931, 816, 813, 233, 566, 247, 
+   985, 724, 205, 454, 863, 491, 741, 242, 949, 214, 
+   733, 859, 335, 708, 621, 574, 73, 654, 730, 472, 
+   419, 436, 278, 496, 867, 210, 399, 680, 480, 51, 
+   878, 465, 811, 169, 869, 675, 611, 697, 867, 561, 
+   862, 687, 507, 283, 482, 129, 807, 591, 733, 623, 
+   150, 238, 59, 379, 684, 877, 625, 169, 643, 105, 
+   170, 607, 520, 932, 727, 476, 693, 425, 174, 647, 
+   73, 122, 335, 530, 442, 853, 695, 249, 445, 515, 
+   909, 545, 703, 919, 874, 474, 882, 500, 594, 612, 
+   641, 801, 220, 162, 819, 984, 589, 513, 495, 799, 
+   161, 604, 958, 533, 221, 400, 386, 867, 600, 782, 
+   382, 596, 414, 171, 516, 375, 682, 485, 911, 276, 
+   98, 553, 163, 354, 666, 933, 424, 341, 533, 870, 
+   227, 730, 475, 186, 263, 647, 537, 686, 600, 224, 
+   469, 68, 770, 919, 190, 373, 294, 822, 808, 206, 
+   184, 943, 795, 384, 383, 461, 404, 758, 839, 887, 
+   715, 67, 618, 276, 204, 918, 873, 777, 604, 560, 
+   951, 160, 578, 722, 79, 804, 96, 409, 713, 940, 
+   652, 934, 970, 447, 318, 353, 859, 672, 112, 785, 
+   645, 863, 803, 350, 139, 93, 354, 99, 820, 908, 
+   609, 772, 154, 274, 580, 184, 79, 626, 630, 742, 
+   653, 282, 762, 623, 680, 81, 927, 626, 789, 125, 
+   411, 521, 938, 300, 821, 78, 343, 175, 128, 250, 
+   170, 774, 972, 275, 999, 639, 495, 78, 352, 126, 
+   857, 956, 358, 619, 580, 124, 737, 594, 701, 612, 
+   669, 112, 134, 694, 363, 992, 809, 743, 168, 974, 
+   944, 375, 748, 52, 600, 747, 642, 182, 862, 81, 
+   344, 805, 988, 739, 511, 655, 814, 334, 249, 515, 
+   897, 955, 664, 981, 649, 113, 974, 459, 893, 228, 
+   433, 837, 553, 268, 926, 240, 102, 654, 459, 51, 
+   686, 754, 806, 760, 493, 403, 415, 394, 687, 700, 
+   946, 670, 656, 610, 738, 392, 760, 799, 887, 653, 
+   978, 321, 576, 617, 626, 502, 894, 679, 243, 440, 
+   680, 879, 194, 572, 640, 724, 926, 56, 204, 700, 
+   707, 151, 457, 449, 797, 195, 791, 558, 945, 679, 
+   297, 59, 87, 824, 713, 663, 412, 693, 342, 606, 
+   134, 108, 571, 364, 631, 212, 174, 643, 304, 329, 
+   343, 97, 430, 751, 497, 314, 983, 374, 822, 928, 
+   140, 206, 73, 263, 980, 736, 876, 478, 430, 305, 
+   170, 514, 364, 692, 829, 82, 855, 953, 676, 246, 
+   369, 970, 294, 750, 807, 827, 150, 790, 288, 923, 
+   804, 378, 215, 828, 592, 281, 565, 555, 710, 82, 
+   896, 831, 547, 261, 524, 462, 293, 465, 502, 56, 
+   661, 821, 976, 991, 658, 869, 905, 758, 745, 193, 
+   768, 550, 608, 933, 378, 286, 215, 979, 792, 961, 
+   61, 688, 793, 644, 986, 403, 106, 366, 905, 644, 
+   372, 567, 466, 434, 645, 210, 389, 550, 919, 135, 
+   780, 773, 635, 389, 707, 100, 626, 958, 165, 504, 
+   920, 176, 193, 713, 857, 265, 203, 50, 668, 108, 
+   645, 990, 626, 197, 510, 357, 358, 850, 858, 364, 
+   936, 638
+};
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                       randtable.c ---*/
+/*-------------------------------------------------------------*/
+
+/*-------------------------------------------------------------*/
+/*--- Table for doing CRCs                                  ---*/
+/*---                                            crctable.c ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+
+
+
+
+/*--
+  I think this is an implementation of the AUTODIN-II,
+  Ethernet & FDDI 32-bit CRC standard.  Vaguely derived
+  from code by Rob Warnock, in Section 51 of the
+  comp.compression FAQ.
+--*/
+
+UInt32 BZ2_crc32Table[256] = {
+
+   /*-- Ugly, innit? --*/
+
+   0x00000000L, 0x04c11db7L, 0x09823b6eL, 0x0d4326d9L,
+   0x130476dcL, 0x17c56b6bL, 0x1a864db2L, 0x1e475005L,
+   0x2608edb8L, 0x22c9f00fL, 0x2f8ad6d6L, 0x2b4bcb61L,
+   0x350c9b64L, 0x31cd86d3L, 0x3c8ea00aL, 0x384fbdbdL,
+   0x4c11db70L, 0x48d0c6c7L, 0x4593e01eL, 0x4152fda9L,
+   0x5f15adacL, 0x5bd4b01bL, 0x569796c2L, 0x52568b75L,
+   0x6a1936c8L, 0x6ed82b7fL, 0x639b0da6L, 0x675a1011L,
+   0x791d4014L, 0x7ddc5da3L, 0x709f7b7aL, 0x745e66cdL,
+   0x9823b6e0L, 0x9ce2ab57L, 0x91a18d8eL, 0x95609039L,
+   0x8b27c03cL, 0x8fe6dd8bL, 0x82a5fb52L, 0x8664e6e5L,
+   0xbe2b5b58L, 0xbaea46efL, 0xb7a96036L, 0xb3687d81L,
+   0xad2f2d84L, 0xa9ee3033L, 0xa4ad16eaL, 0xa06c0b5dL,
+   0xd4326d90L, 0xd0f37027L, 0xddb056feL, 0xd9714b49L,
+   0xc7361b4cL, 0xc3f706fbL, 0xceb42022L, 0xca753d95L,
+   0xf23a8028L, 0xf6fb9d9fL, 0xfbb8bb46L, 0xff79a6f1L,
+   0xe13ef6f4L, 0xe5ffeb43L, 0xe8bccd9aL, 0xec7dd02dL,
+   0x34867077L, 0x30476dc0L, 0x3d044b19L, 0x39c556aeL,
+   0x278206abL, 0x23431b1cL, 0x2e003dc5L, 0x2ac12072L,
+   0x128e9dcfL, 0x164f8078L, 0x1b0ca6a1L, 0x1fcdbb16L,
+   0x018aeb13L, 0x054bf6a4L, 0x0808d07dL, 0x0cc9cdcaL,
+   0x7897ab07L, 0x7c56b6b0L, 0x71159069L, 0x75d48ddeL,
+   0x6b93dddbL, 0x6f52c06cL, 0x6211e6b5L, 0x66d0fb02L,
+   0x5e9f46bfL, 0x5a5e5b08L, 0x571d7dd1L, 0x53dc6066L,
+   0x4d9b3063L, 0x495a2dd4L, 0x44190b0dL, 0x40d816baL,
+   0xaca5c697L, 0xa864db20L, 0xa527fdf9L, 0xa1e6e04eL,
+   0xbfa1b04bL, 0xbb60adfcL, 0xb6238b25L, 0xb2e29692L,
+   0x8aad2b2fL, 0x8e6c3698L, 0x832f1041L, 0x87ee0df6L,
+   0x99a95df3L, 0x9d684044L, 0x902b669dL, 0x94ea7b2aL,
+   0xe0b41de7L, 0xe4750050L, 0xe9362689L, 0xedf73b3eL,
+   0xf3b06b3bL, 0xf771768cL, 0xfa325055L, 0xfef34de2L,
+   0xc6bcf05fL, 0xc27dede8L, 0xcf3ecb31L, 0xcbffd686L,
+   0xd5b88683L, 0xd1799b34L, 0xdc3abdedL, 0xd8fba05aL,
+   0x690ce0eeL, 0x6dcdfd59L, 0x608edb80L, 0x644fc637L,
+   0x7a089632L, 0x7ec98b85L, 0x738aad5cL, 0x774bb0ebL,
+   0x4f040d56L, 0x4bc510e1L, 0x46863638L, 0x42472b8fL,
+   0x5c007b8aL, 0x58c1663dL, 0x558240e4L, 0x51435d53L,
+   0x251d3b9eL, 0x21dc2629L, 0x2c9f00f0L, 0x285e1d47L,
+   0x36194d42L, 0x32d850f5L, 0x3f9b762cL, 0x3b5a6b9bL,
+   0x0315d626L, 0x07d4cb91L, 0x0a97ed48L, 0x0e56f0ffL,
+   0x1011a0faL, 0x14d0bd4dL, 0x19939b94L, 0x1d528623L,
+   0xf12f560eL, 0xf5ee4bb9L, 0xf8ad6d60L, 0xfc6c70d7L,
+   0xe22b20d2L, 0xe6ea3d65L, 0xeba91bbcL, 0xef68060bL,
+   0xd727bbb6L, 0xd3e6a601L, 0xdea580d8L, 0xda649d6fL,
+   0xc423cd6aL, 0xc0e2d0ddL, 0xcda1f604L, 0xc960ebb3L,
+   0xbd3e8d7eL, 0xb9ff90c9L, 0xb4bcb610L, 0xb07daba7L,
+   0xae3afba2L, 0xaafbe615L, 0xa7b8c0ccL, 0xa379dd7bL,
+   0x9b3660c6L, 0x9ff77d71L, 0x92b45ba8L, 0x9675461fL,
+   0x8832161aL, 0x8cf30badL, 0x81b02d74L, 0x857130c3L,
+   0x5d8a9099L, 0x594b8d2eL, 0x5408abf7L, 0x50c9b640L,
+   0x4e8ee645L, 0x4a4ffbf2L, 0x470cdd2bL, 0x43cdc09cL,
+   0x7b827d21L, 0x7f436096L, 0x7200464fL, 0x76c15bf8L,
+   0x68860bfdL, 0x6c47164aL, 0x61043093L, 0x65c52d24L,
+   0x119b4be9L, 0x155a565eL, 0x18197087L, 0x1cd86d30L,
+   0x029f3d35L, 0x065e2082L, 0x0b1d065bL, 0x0fdc1becL,
+   0x3793a651L, 0x3352bbe6L, 0x3e119d3fL, 0x3ad08088L,
+   0x2497d08dL, 0x2056cd3aL, 0x2d15ebe3L, 0x29d4f654L,
+   0xc5a92679L, 0xc1683bceL, 0xcc2b1d17L, 0xc8ea00a0L,
+   0xd6ad50a5L, 0xd26c4d12L, 0xdf2f6bcbL, 0xdbee767cL,
+   0xe3a1cbc1L, 0xe760d676L, 0xea23f0afL, 0xeee2ed18L,
+   0xf0a5bd1dL, 0xf464a0aaL, 0xf9278673L, 0xfde69bc4L,
+   0x89b8fd09L, 0x8d79e0beL, 0x803ac667L, 0x84fbdbd0L,
+   0x9abc8bd5L, 0x9e7d9662L, 0x933eb0bbL, 0x97ffad0cL,
+   0xafb010b1L, 0xab710d06L, 0xa6322bdfL, 0xa2f33668L,
+   0xbcb4666dL, 0xb8757bdaL, 0xb5365d03L, 0xb1f740b4L
+};
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                        crctable.c ---*/
+/*-------------------------------------------------------------*/
+
+/*-------------------------------------------------------------*/
+/*--- Library top-level functions.                          ---*/
+/*---                                               bzlib.c ---*/
+/*-------------------------------------------------------------*/
+
+/*--
+  This file is a part of bzip2 and/or libbzip2, a program and
+  library for lossless, block-sorting data compression.
+
+  Copyright (C) 1996-2004 Julian R Seward.  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+  1. Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+
+  2. The origin of this software must not be misrepresented; you must 
+     not claim that you wrote the original software.  If you use this 
+     software in a product, an acknowledgment in the product 
+     documentation would be appreciated but is not required.
+
+  3. Altered source versions must be plainly marked as such, and must
+     not be misrepresented as being the original software.
+
+  4. The name of the author may not be used to endorse or promote 
+     products derived from this software without specific prior written 
+     permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+  OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+  DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+  GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+  WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+  Julian Seward, Cambridge, UK.
+  jseward@bzip.org
+  bzip2/libbzip2 version 1.0 of 21 March 2000
+
+  This program is based on (at least) the work of:
+     Mike Burrows
+     David Wheeler
+     Peter Fenwick
+     Alistair Moffat
+     Radford Neal
+     Ian H. Witten
+     Robert Sedgewick
+     Jon L. Bentley
+
+  For more information on these sources, see the manual.
+--*/
+
+/*--
+   CHANGES
+   ~~~~~~~
+   0.9.0 -- original version.
+
+   0.9.0a/b -- no changes in this file.
+
+   0.9.0c
+      * made zero-length BZ_FLUSH work correctly in bzCompress().
+      * fixed bzWrite/bzRead to ignore zero-length requests.
+      * fixed bzread to correctly handle read requests after EOF.
+      * wrong parameter order in call to bzDecompressInit in
+        bzBuffToBuffDecompress.  Fixed.
+--*/
+
+
+
+/*---------------------------------------------------*/
+/*--- Compression stuff                           ---*/
+/*---------------------------------------------------*/
+
+
+/*---------------------------------------------------*/
+void BZ2_bz__AssertH__fail ( int errcode )
+{
+   vexxx_printf("BZ2_bz__AssertH__fail(%d) called, exiting\n", errcode);
+   (*serviceFn)(0,0);
+}
+
+void bz_internal_error ( int errcode )
+{
+   vexxx_printf("bz_internal_error called, exiting\n", errcode);
+   (*serviceFn)(0,0);
+}
+
+/*---------------------------------------------------*/
+static
+int bz_config_ok ( void )
+{
+   if (sizeof(int)   != 4) return 0;
+   if (sizeof(short) != 2) return 0;
+   if (sizeof(char)  != 1) return 0;
+   return 1;
+}
+
+
+/*---------------------------------------------------*/
+static
+void* default_bzalloc ( void* opaque, Int32 items, Int32 size )
+{
+   void* v = (void*) (*serviceFn)(2, items * size );
+   return v;
+}
+
+static
+void default_bzfree ( void* opaque, void* addr )
+{
+   if (addr != NULL) (*serviceFn)( 3, (HWord)addr );
+}
+
+
+/*---------------------------------------------------*/
+static
+void prepare_new_block ( EState* s )
+{
+   Int32 i;
+   s->nblock = 0;
+   s->numZ = 0;
+   s->state_out_pos = 0;
+   BZ_INITIALISE_CRC ( s->blockCRC );
+   for (i = 0; i < 256; i++) s->inUse[i] = False;
+   s->blockNo++;
+}
+
+
+/*---------------------------------------------------*/
+static
+void init_RL ( EState* s )
+{
+   s->state_in_ch  = 256;
+   s->state_in_len = 0;
+}
+
+
+static
+Bool isempty_RL ( EState* s )
+{
+   if (s->state_in_ch < 256 && s->state_in_len > 0)
+      return False; else
+      return True;
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzCompressInit) 
+                    ( bz_stream* strm, 
+                     int        blockSize100k,
+                     int        verbosity,
+                     int        workFactor )
+{
+   Int32   n;
+   EState* s;
+
+   if (!bz_config_ok()) return BZ_CONFIG_ERROR;
+
+   if (strm == NULL || 
+       blockSize100k < 1 || blockSize100k > 9 ||
+       workFactor < 0 || workFactor > 250)
+     return BZ_PARAM_ERROR;
+
+   if (workFactor == 0) workFactor = 30;
+   if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc;
+   if (strm->bzfree == NULL) strm->bzfree = default_bzfree;
+
+   s = BZALLOC( sizeof(EState) );
+   if (s == NULL) return BZ_MEM_ERROR;
+   s->strm = strm;
+
+   s->arr1 = NULL;
+   s->arr2 = NULL;
+   s->ftab = NULL;
+
+   n       = 100000 * blockSize100k;
+   s->arr1 = BZALLOC( n                  * sizeof(UInt32) );
+   s->arr2 = BZALLOC( (n+BZ_N_OVERSHOOT) * sizeof(UInt32) );
+   s->ftab = BZALLOC( 65537              * sizeof(UInt32) );
+
+   if (s->arr1 == NULL || s->arr2 == NULL || s->ftab == NULL) {
+      if (s->arr1 != NULL) BZFREE(s->arr1);
+      if (s->arr2 != NULL) BZFREE(s->arr2);
+      if (s->ftab != NULL) BZFREE(s->ftab);
+      if (s       != NULL) BZFREE(s);
+      return BZ_MEM_ERROR;
+   }
+
+   s->blockNo           = 0;
+   s->state             = BZ_S_INPUT;
+   s->mode              = BZ_M_RUNNING;
+   s->combinedCRC       = 0;
+   s->blockSize100k     = blockSize100k;
+   s->nblockMAX         = 100000 * blockSize100k - 19;
+   s->verbosity         = verbosity;
+   s->workFactor        = workFactor;
+
+   s->block             = (UChar*)s->arr2;
+   s->mtfv              = (UInt16*)s->arr1;
+   s->zbits             = NULL;
+   s->ptr               = (UInt32*)s->arr1;
+
+   strm->state          = s;
+   strm->total_in_lo32  = 0;
+   strm->total_in_hi32  = 0;
+   strm->total_out_lo32 = 0;
+   strm->total_out_hi32 = 0;
+   init_RL ( s );
+   prepare_new_block ( s );
+   return BZ_OK;
+}
+
+
+/*---------------------------------------------------*/
+static
+void add_pair_to_block ( EState* s )
+{
+   Int32 i;
+   UChar ch = (UChar)(s->state_in_ch);
+   for (i = 0; i < s->state_in_len; i++) {
+      BZ_UPDATE_CRC( s->blockCRC, ch );
+   }
+   s->inUse[s->state_in_ch] = True;
+   switch (s->state_in_len) {
+      case 1:
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         break;
+      case 2:
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         break;
+      case 3:
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         break;
+      default:
+         s->inUse[s->state_in_len-4] = True;
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         s->block[s->nblock] = (UChar)ch; s->nblock++;
+         s->block[s->nblock] = ((UChar)(s->state_in_len-4));
+         s->nblock++;
+         break;
+   }
+}
+
+
+/*---------------------------------------------------*/
+static
+void flush_RL ( EState* s )
+{
+   if (s->state_in_ch < 256) add_pair_to_block ( s );
+   init_RL ( s );
+}
+
+
+/*---------------------------------------------------*/
+#define ADD_CHAR_TO_BLOCK(zs,zchh0)               \
+{                                                 \
+   UInt32 zchh = (UInt32)(zchh0);                 \
+   /*-- fast track the common case --*/           \
+   if (zchh != zs->state_in_ch &&                 \
+       zs->state_in_len == 1) {                   \
+      UChar ch = (UChar)(zs->state_in_ch);        \
+      BZ_UPDATE_CRC( zs->blockCRC, ch );          \
+      zs->inUse[zs->state_in_ch] = True;          \
+      zs->block[zs->nblock] = (UChar)ch;          \
+      zs->nblock++;                               \
+      zs->state_in_ch = zchh;                     \
+   }                                              \
+   else                                           \
+   /*-- general, uncommon cases --*/              \
+   if (zchh != zs->state_in_ch ||                 \
+      zs->state_in_len == 255) {                  \
+      if (zs->state_in_ch < 256)                  \
+         add_pair_to_block ( zs );                \
+      zs->state_in_ch = zchh;                     \
+      zs->state_in_len = 1;                       \
+   } else {                                       \
+      zs->state_in_len++;                         \
+   }                                              \
+}
+
+
+/*---------------------------------------------------*/
+static
+Bool copy_input_until_stop ( EState* s )
+{
+   Bool progress_in = False;
+
+   if (s->mode == BZ_M_RUNNING) {
+
+      /*-- fast track the common case --*/
+      while (True) {
+         /*-- block full? --*/
+         if (s->nblock >= s->nblockMAX) break;
+         /*-- no input? --*/
+         if (s->strm->avail_in == 0) break;
+         progress_in = True;
+         ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) ); 
+         s->strm->next_in++;
+         s->strm->avail_in--;
+         s->strm->total_in_lo32++;
+         if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++;
+      }
+
+   } else {
+
+      /*-- general, uncommon case --*/
+      while (True) {
+         /*-- block full? --*/
+         if (s->nblock >= s->nblockMAX) break;
+         /*-- no input? --*/
+         if (s->strm->avail_in == 0) break;
+         /*-- flush/finish end? --*/
+         if (s->avail_in_expect == 0) break;
+         progress_in = True;
+         ADD_CHAR_TO_BLOCK ( s, (UInt32)(*((UChar*)(s->strm->next_in))) ); 
+         s->strm->next_in++;
+         s->strm->avail_in--;
+         s->strm->total_in_lo32++;
+         if (s->strm->total_in_lo32 == 0) s->strm->total_in_hi32++;
+         s->avail_in_expect--;
+      }
+   }
+   return progress_in;
+}
+
+
+/*---------------------------------------------------*/
+static
+Bool copy_output_until_stop ( EState* s )
+{
+   Bool progress_out = False;
+
+   while (True) {
+
+      /*-- no output space? --*/
+      if (s->strm->avail_out == 0) break;
+
+      /*-- block done? --*/
+      if (s->state_out_pos >= s->numZ) break;
+
+      progress_out = True;
+      *(s->strm->next_out) = s->zbits[s->state_out_pos];
+      s->state_out_pos++;
+      s->strm->avail_out--;
+      s->strm->next_out++;
+      s->strm->total_out_lo32++;
+      if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
+   }
+
+   return progress_out;
+}
+
+
+/*---------------------------------------------------*/
+static
+Bool handle_compress ( bz_stream* strm )
+{
+   Bool progress_in  = False;
+   Bool progress_out = False;
+   EState* s = strm->state;
+   
+   while (True) {
+
+      if (s->state == BZ_S_OUTPUT) {
+         progress_out |= copy_output_until_stop ( s );
+         if (s->state_out_pos < s->numZ) break;
+         if (s->mode == BZ_M_FINISHING && 
+             s->avail_in_expect == 0 &&
+             isempty_RL(s)) break;
+         prepare_new_block ( s );
+         s->state = BZ_S_INPUT;
+         if (s->mode == BZ_M_FLUSHING && 
+             s->avail_in_expect == 0 &&
+             isempty_RL(s)) break;
+      }
+
+      if (s->state == BZ_S_INPUT) {
+         progress_in |= copy_input_until_stop ( s );
+         if (s->mode != BZ_M_RUNNING && s->avail_in_expect == 0) {
+            flush_RL ( s );
+            BZ2_compressBlock ( s, (Bool)(s->mode == BZ_M_FINISHING) );
+            s->state = BZ_S_OUTPUT;
+         }
+         else
+         if (s->nblock >= s->nblockMAX) {
+            BZ2_compressBlock ( s, False );
+            s->state = BZ_S_OUTPUT;
+         }
+         else
+         if (s->strm->avail_in == 0) {
+            break;
+         }
+      }
+
+   }
+
+   return progress_in || progress_out;
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzCompress) ( bz_stream *strm, int action )
+{
+   Bool progress;
+   EState* s;
+   if (strm == NULL) return BZ_PARAM_ERROR;
+   s = strm->state;
+   if (s == NULL) return BZ_PARAM_ERROR;
+   if (s->strm != strm) return BZ_PARAM_ERROR;
+
+   preswitch:
+   switch (s->mode) {
+
+      case BZ_M_IDLE:
+         return BZ_SEQUENCE_ERROR;
+
+      case BZ_M_RUNNING:
+         if (action == BZ_RUN) {
+            progress = handle_compress ( strm );
+            return progress ? BZ_RUN_OK : BZ_PARAM_ERROR;
+         } 
+         else
+	 if (action == BZ_FLUSH) {
+            s->avail_in_expect = strm->avail_in;
+            s->mode = BZ_M_FLUSHING;
+            goto preswitch;
+         }
+         else
+         if (action == BZ_FINISH) {
+            s->avail_in_expect = strm->avail_in;
+            s->mode = BZ_M_FINISHING;
+            goto preswitch;
+         }
+         else 
+            return BZ_PARAM_ERROR;
+
+      case BZ_M_FLUSHING:
+         if (action != BZ_FLUSH) return BZ_SEQUENCE_ERROR;
+         if (s->avail_in_expect != s->strm->avail_in) 
+            return BZ_SEQUENCE_ERROR;
+         progress = handle_compress ( strm );
+         if (s->avail_in_expect > 0 || !isempty_RL(s) ||
+             s->state_out_pos < s->numZ) return BZ_FLUSH_OK;
+         s->mode = BZ_M_RUNNING;
+         return BZ_RUN_OK;
+
+      case BZ_M_FINISHING:
+         if (action != BZ_FINISH) return BZ_SEQUENCE_ERROR;
+         if (s->avail_in_expect != s->strm->avail_in) 
+            return BZ_SEQUENCE_ERROR;
+         progress = handle_compress ( strm );
+         if (!progress) return BZ_SEQUENCE_ERROR;
+         if (s->avail_in_expect > 0 || !isempty_RL(s) ||
+             s->state_out_pos < s->numZ) return BZ_FINISH_OK;
+         s->mode = BZ_M_IDLE;
+         return BZ_STREAM_END;
+   }
+   return BZ_OK; /*--not reached--*/
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzCompressEnd)  ( bz_stream *strm )
+{
+   EState* s;
+   if (strm == NULL) return BZ_PARAM_ERROR;
+   s = strm->state;
+   if (s == NULL) return BZ_PARAM_ERROR;
+   if (s->strm != strm) return BZ_PARAM_ERROR;
+
+   if (s->arr1 != NULL) BZFREE(s->arr1);
+   if (s->arr2 != NULL) BZFREE(s->arr2);
+   if (s->ftab != NULL) BZFREE(s->ftab);
+   BZFREE(strm->state);
+
+   strm->state = NULL;   
+
+   return BZ_OK;
+}
+
+
+/*---------------------------------------------------*/
+/*--- Decompression stuff                         ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzDecompressInit) 
+                     ( bz_stream* strm, 
+                       int        verbosity,
+                       int        small )
+{
+   DState* s;
+
+   if (!bz_config_ok()) return BZ_CONFIG_ERROR;
+
+   if (strm == NULL) return BZ_PARAM_ERROR;
+   if (small != 0 && small != 1) return BZ_PARAM_ERROR;
+   if (verbosity < 0 || verbosity > 4) return BZ_PARAM_ERROR;
+
+   if (strm->bzalloc == NULL) strm->bzalloc = default_bzalloc;
+   if (strm->bzfree == NULL) strm->bzfree = default_bzfree;
+
+   s = BZALLOC( sizeof(DState) );
+   if (s == NULL) return BZ_MEM_ERROR;
+   s->strm                  = strm;
+   strm->state              = s;
+   s->state                 = BZ_X_MAGIC_1;
+   s->bsLive                = 0;
+   s->bsBuff                = 0;
+   s->calculatedCombinedCRC = 0;
+   strm->total_in_lo32      = 0;
+   strm->total_in_hi32      = 0;
+   strm->total_out_lo32     = 0;
+   strm->total_out_hi32     = 0;
+   s->smallDecompress       = (Bool)small;
+   s->ll4                   = NULL;
+   s->ll16                  = NULL;
+   s->tt                    = NULL;
+   s->currBlockNo           = 0;
+   s->verbosity             = verbosity;
+
+   return BZ_OK;
+}
+
+
+/*---------------------------------------------------*/
+/* Return  True iff data corruption is discovered.
+   Returns False if there is no problem.
+*/
+static
+Bool unRLE_obuf_to_output_FAST ( DState* s )
+{
+   UChar k1;
+
+   if (s->blockRandomised) {
+
+      while (True) {
+         /* try to finish existing run */
+         while (True) {
+            if (s->strm->avail_out == 0) return False;
+            if (s->state_out_len == 0) break;
+            *( (UChar*)(s->strm->next_out) ) = s->state_out_ch;
+            BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch );
+            s->state_out_len--;
+            s->strm->next_out++;
+            s->strm->avail_out--;
+            s->strm->total_out_lo32++;
+            if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
+         }
+
+         /* can a new run be started? */
+         if (s->nblock_used == s->save_nblock+1) return False;
+               
+         /* Only caused by corrupt data stream? */
+         if (s->nblock_used > s->save_nblock+1)
+            return True;
+   
+         s->state_out_len = 1;
+         s->state_out_ch = s->k0;
+         BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         s->state_out_len = 2;
+         BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         s->state_out_len = 3;
+         BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         BZ_GET_FAST(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         s->state_out_len = ((Int32)k1) + 4;
+         BZ_GET_FAST(s->k0); BZ_RAND_UPD_MASK; 
+         s->k0 ^= BZ_RAND_MASK; s->nblock_used++;
+      }
+
+   } else {
+
+      /* restore */
+      UInt32        c_calculatedBlockCRC = s->calculatedBlockCRC;
+      UChar         c_state_out_ch       = s->state_out_ch;
+      Int32         c_state_out_len      = s->state_out_len;
+      Int32         c_nblock_used        = s->nblock_used;
+      Int32         c_k0                 = s->k0;
+      UInt32*       c_tt                 = s->tt;
+      UInt32        c_tPos               = s->tPos;
+      char*         cs_next_out          = s->strm->next_out;
+      unsigned int  cs_avail_out         = s->strm->avail_out;
+      /* end restore */
+
+      UInt32       avail_out_INIT = cs_avail_out;
+      Int32        s_save_nblockPP = s->save_nblock+1;
+      unsigned int total_out_lo32_old;
+
+      while (True) {
+
+         /* try to finish existing run */
+         if (c_state_out_len > 0) {
+            while (True) {
+               if (cs_avail_out == 0) goto return_notr;
+               if (c_state_out_len == 1) break;
+               *( (UChar*)(cs_next_out) ) = c_state_out_ch;
+               BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch );
+               c_state_out_len--;
+               cs_next_out++;
+               cs_avail_out--;
+            }
+            s_state_out_len_eq_one:
+            {
+               if (cs_avail_out == 0) { 
+                  c_state_out_len = 1; goto return_notr;
+               };
+               *( (UChar*)(cs_next_out) ) = c_state_out_ch;
+               BZ_UPDATE_CRC ( c_calculatedBlockCRC, c_state_out_ch );
+               cs_next_out++;
+               cs_avail_out--;
+            }
+         }   
+         /* Only caused by corrupt data stream? */
+         if (c_nblock_used > s_save_nblockPP)
+            return True;
+
+         /* can a new run be started? */
+         if (c_nblock_used == s_save_nblockPP) {
+            c_state_out_len = 0; goto return_notr;
+         };   
+         c_state_out_ch = c_k0;
+         BZ_GET_FAST_C(k1); c_nblock_used++;
+         if (k1 != c_k0) { 
+            c_k0 = k1; goto s_state_out_len_eq_one; 
+         };
+         if (c_nblock_used == s_save_nblockPP) 
+            goto s_state_out_len_eq_one;
+   
+         c_state_out_len = 2;
+         BZ_GET_FAST_C(k1); c_nblock_used++;
+         if (c_nblock_used == s_save_nblockPP) continue;
+         if (k1 != c_k0) { c_k0 = k1; continue; };
+   
+         c_state_out_len = 3;
+         BZ_GET_FAST_C(k1); c_nblock_used++;
+         if (c_nblock_used == s_save_nblockPP) continue;
+         if (k1 != c_k0) { c_k0 = k1; continue; };
+   
+         BZ_GET_FAST_C(k1); c_nblock_used++;
+         c_state_out_len = ((Int32)k1) + 4;
+         BZ_GET_FAST_C(c_k0); c_nblock_used++;
+      }
+
+      return_notr:
+      total_out_lo32_old = s->strm->total_out_lo32;
+      s->strm->total_out_lo32 += (avail_out_INIT - cs_avail_out);
+      if (s->strm->total_out_lo32 < total_out_lo32_old)
+         s->strm->total_out_hi32++;
+
+      /* save */
+      s->calculatedBlockCRC = c_calculatedBlockCRC;
+      s->state_out_ch       = c_state_out_ch;
+      s->state_out_len      = c_state_out_len;
+      s->nblock_used        = c_nblock_used;
+      s->k0                 = c_k0;
+      s->tt                 = c_tt;
+      s->tPos               = c_tPos;
+      s->strm->next_out     = cs_next_out;
+      s->strm->avail_out    = cs_avail_out;
+      /* end save */
+   }
+   return False;
+}
+
+
+
+/*---------------------------------------------------*/
+/* Return  True iff data corruption is discovered.
+   Returns False if there is no problem.
+*/
+static
+Bool unRLE_obuf_to_output_SMALL ( DState* s )
+{
+   UChar k1;
+
+   if (s->blockRandomised) {
+
+      while (True) {
+         /* try to finish existing run */
+         while (True) {
+            if (s->strm->avail_out == 0) return False;
+            if (s->state_out_len == 0) break;
+            *( (UChar*)(s->strm->next_out) ) = s->state_out_ch;
+            BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch );
+            s->state_out_len--;
+            s->strm->next_out++;
+            s->strm->avail_out--;
+            s->strm->total_out_lo32++;
+            if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
+         }
+   
+         /* can a new run be started? */
+         if (s->nblock_used == s->save_nblock+1) return False;
+
+         /* Only caused by corrupt data stream? */
+         if (s->nblock_used > s->save_nblock+1)
+            return True;
+   
+         s->state_out_len = 1;
+         s->state_out_ch = s->k0;
+         BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         s->state_out_len = 2;
+         BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         s->state_out_len = 3;
+         BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         BZ_GET_SMALL(k1); BZ_RAND_UPD_MASK; 
+         k1 ^= BZ_RAND_MASK; s->nblock_used++;
+         s->state_out_len = ((Int32)k1) + 4;
+         BZ_GET_SMALL(s->k0); BZ_RAND_UPD_MASK; 
+         s->k0 ^= BZ_RAND_MASK; s->nblock_used++;
+      }
+
+   } else {
+
+      while (True) {
+         /* try to finish existing run */
+         while (True) {
+            if (s->strm->avail_out == 0) return False;
+            if (s->state_out_len == 0) break;
+            *( (UChar*)(s->strm->next_out) ) = s->state_out_ch;
+            BZ_UPDATE_CRC ( s->calculatedBlockCRC, s->state_out_ch );
+            s->state_out_len--;
+            s->strm->next_out++;
+            s->strm->avail_out--;
+            s->strm->total_out_lo32++;
+            if (s->strm->total_out_lo32 == 0) s->strm->total_out_hi32++;
+         }
+   
+         /* can a new run be started? */
+         if (s->nblock_used == s->save_nblock+1) return False;
+
+         /* Only caused by corrupt data stream? */
+         if (s->nblock_used > s->save_nblock+1)
+            return True;
+   
+         s->state_out_len = 1;
+         s->state_out_ch = s->k0;
+         BZ_GET_SMALL(k1); s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         s->state_out_len = 2;
+         BZ_GET_SMALL(k1); s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         s->state_out_len = 3;
+         BZ_GET_SMALL(k1); s->nblock_used++;
+         if (s->nblock_used == s->save_nblock+1) continue;
+         if (k1 != s->k0) { s->k0 = k1; continue; };
+   
+         BZ_GET_SMALL(k1); s->nblock_used++;
+         s->state_out_len = ((Int32)k1) + 4;
+         BZ_GET_SMALL(s->k0); s->nblock_used++;
+      }
+
+   }
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzDecompress) ( bz_stream *strm )
+{
+   Bool    corrupt;
+   DState* s;
+   if (strm == NULL) return BZ_PARAM_ERROR;
+   s = strm->state;
+   if (s == NULL) return BZ_PARAM_ERROR;
+   if (s->strm != strm) return BZ_PARAM_ERROR;
+
+   while (True) {
+      if (s->state == BZ_X_IDLE) return BZ_SEQUENCE_ERROR;
+      if (s->state == BZ_X_OUTPUT) {
+         if (s->smallDecompress)
+            corrupt = unRLE_obuf_to_output_SMALL ( s ); else
+            corrupt = unRLE_obuf_to_output_FAST  ( s );
+         if (corrupt) return BZ_DATA_ERROR;
+         if (s->nblock_used == s->save_nblock+1 && s->state_out_len == 0) {
+            BZ_FINALISE_CRC ( s->calculatedBlockCRC );
+            if (s->verbosity >= 3) 
+               VPrintf2 ( " {0x%08x, 0x%08x}", s->storedBlockCRC, 
+                          s->calculatedBlockCRC );
+            if (s->verbosity >= 2) VPrintf0 ( "]" );
+            if (s->calculatedBlockCRC != s->storedBlockCRC)
+               return BZ_DATA_ERROR;
+            s->calculatedCombinedCRC 
+               = (s->calculatedCombinedCRC << 1) | 
+                    (s->calculatedCombinedCRC >> 31);
+            s->calculatedCombinedCRC ^= s->calculatedBlockCRC;
+            s->state = BZ_X_BLKHDR_1;
+         } else {
+            return BZ_OK;
+         }
+      }
+      if (s->state >= BZ_X_MAGIC_1) {
+         Int32 r = BZ2_decompress ( s );
+         if (r == BZ_STREAM_END) {
+            if (s->verbosity >= 3)
+               VPrintf2 ( "\n    combined CRCs: stored = 0x%08x, computed = 0x%08x", 
+                          s->storedCombinedCRC, s->calculatedCombinedCRC );
+            if (s->calculatedCombinedCRC != s->storedCombinedCRC)
+               return BZ_DATA_ERROR;
+            return r;
+         }
+         if (s->state != BZ_X_OUTPUT) return r;
+      }
+   }
+
+   AssertH ( 0, 6001 );
+
+   return 0;  /*NOTREACHED*/
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzDecompressEnd)  ( bz_stream *strm )
+{
+   DState* s;
+   if (strm == NULL) return BZ_PARAM_ERROR;
+   s = strm->state;
+   if (s == NULL) return BZ_PARAM_ERROR;
+   if (s->strm != strm) return BZ_PARAM_ERROR;
+
+   if (s->tt   != NULL) BZFREE(s->tt);
+   if (s->ll16 != NULL) BZFREE(s->ll16);
+   if (s->ll4  != NULL) BZFREE(s->ll4);
+
+   BZFREE(strm->state);
+   strm->state = NULL;
+
+   return BZ_OK;
+}
+
+
+#ifndef BZ_NO_STDIO
+/*---------------------------------------------------*/
+/*--- File I/O stuff                              ---*/
+/*---------------------------------------------------*/
+
+#define BZ_SETERR(eee)                    \
+{                                         \
+   if (bzerror != NULL) *bzerror = eee;   \
+   if (bzf != NULL) bzf->lastErr = eee;   \
+}
+
+typedef 
+   struct {
+      FILE*     handle;
+      Char      buf[BZ_MAX_UNUSED];
+      Int32     bufN;
+      Bool      writing;
+      bz_stream strm;
+      Int32     lastErr;
+      Bool      initialisedOk;
+   }
+   bzFile;
+
+
+/*---------------------------------------------*/
+static Bool myfeof ( FILE* f )
+{
+   Int32 c = fgetc ( f );
+   if (c == EOF) return True;
+   ungetc ( c, f );
+   return False;
+}
+
+
+/*---------------------------------------------------*/
+BZFILE* BZ_API(BZ2_bzWriteOpen) 
+                    ( int*  bzerror,      
+                      FILE* f, 
+                      int   blockSize100k, 
+                      int   verbosity,
+                      int   workFactor )
+{
+   Int32   ret;
+   bzFile* bzf = NULL;
+
+   BZ_SETERR(BZ_OK);
+
+   if (f == NULL ||
+       (blockSize100k < 1 || blockSize100k > 9) ||
+       (workFactor < 0 || workFactor > 250) ||
+       (verbosity < 0 || verbosity > 4))
+      { BZ_SETERR(BZ_PARAM_ERROR); return NULL; };
+
+   if (ferror(f))
+      { BZ_SETERR(BZ_IO_ERROR); return NULL; };
+
+   bzf = malloc ( sizeof(bzFile) );
+   if (bzf == NULL)
+      { BZ_SETERR(BZ_MEM_ERROR); return NULL; };
+
+   BZ_SETERR(BZ_OK);
+   bzf->initialisedOk = False;
+   bzf->bufN          = 0;
+   bzf->handle        = f;
+   bzf->writing       = True;
+   bzf->strm.bzalloc  = NULL;
+   bzf->strm.bzfree   = NULL;
+   bzf->strm.opaque   = NULL;
+
+   if (workFactor == 0) workFactor = 30;
+   ret = BZ2_bzCompressInit ( &(bzf->strm), blockSize100k, 
+                              verbosity, workFactor );
+   if (ret != BZ_OK)
+      { BZ_SETERR(ret); free(bzf); return NULL; };
+
+   bzf->strm.avail_in = 0;
+   bzf->initialisedOk = True;
+   return bzf;   
+}
+
+
+
+/*---------------------------------------------------*/
+void BZ_API(BZ2_bzWrite)
+             ( int*    bzerror, 
+               BZFILE* b, 
+               void*   buf, 
+               int     len )
+{
+   Int32 n, n2, ret;
+   bzFile* bzf = (bzFile*)b;
+
+   BZ_SETERR(BZ_OK);
+   if (bzf == NULL || buf == NULL || len < 0)
+      { BZ_SETERR(BZ_PARAM_ERROR); return; };
+   if (!(bzf->writing))
+      { BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
+   if (ferror(bzf->handle))
+      { BZ_SETERR(BZ_IO_ERROR); return; };
+
+   if (len == 0)
+      { BZ_SETERR(BZ_OK); return; };
+
+   bzf->strm.avail_in = len;
+   bzf->strm.next_in  = buf;
+
+   while (True) {
+      bzf->strm.avail_out = BZ_MAX_UNUSED;
+      bzf->strm.next_out = bzf->buf;
+      ret = BZ2_bzCompress ( &(bzf->strm), BZ_RUN );
+      if (ret != BZ_RUN_OK)
+         { BZ_SETERR(ret); return; };
+
+      if (bzf->strm.avail_out < BZ_MAX_UNUSED) {
+         n = BZ_MAX_UNUSED - bzf->strm.avail_out;
+         n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar), 
+                       n, bzf->handle );
+         if (n != n2 || ferror(bzf->handle))
+            { BZ_SETERR(BZ_IO_ERROR); return; };
+      }
+
+      if (bzf->strm.avail_in == 0)
+         { BZ_SETERR(BZ_OK); return; };
+   }
+}
+
+
+/*---------------------------------------------------*/
+void BZ_API(BZ2_bzWriteClose)
+                  ( int*          bzerror, 
+                    BZFILE*       b, 
+                    int           abandon,
+                    unsigned int* nbytes_in,
+                    unsigned int* nbytes_out )
+{
+   BZ2_bzWriteClose64 ( bzerror, b, abandon, 
+                        nbytes_in, NULL, nbytes_out, NULL );
+}
+
+
+void BZ_API(BZ2_bzWriteClose64)
+                  ( int*          bzerror, 
+                    BZFILE*       b, 
+                    int           abandon,
+                    unsigned int* nbytes_in_lo32,
+                    unsigned int* nbytes_in_hi32,
+                    unsigned int* nbytes_out_lo32,
+                    unsigned int* nbytes_out_hi32 )
+{
+   Int32   n, n2, ret;
+   bzFile* bzf = (bzFile*)b;
+
+   if (bzf == NULL)
+      { BZ_SETERR(BZ_OK); return; };
+   if (!(bzf->writing))
+      { BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
+   if (ferror(bzf->handle))
+      { BZ_SETERR(BZ_IO_ERROR); return; };
+
+   if (nbytes_in_lo32 != NULL) *nbytes_in_lo32 = 0;
+   if (nbytes_in_hi32 != NULL) *nbytes_in_hi32 = 0;
+   if (nbytes_out_lo32 != NULL) *nbytes_out_lo32 = 0;
+   if (nbytes_out_hi32 != NULL) *nbytes_out_hi32 = 0;
+
+   if ((!abandon) && bzf->lastErr == BZ_OK) {
+      while (True) {
+         bzf->strm.avail_out = BZ_MAX_UNUSED;
+         bzf->strm.next_out = bzf->buf;
+         ret = BZ2_bzCompress ( &(bzf->strm), BZ_FINISH );
+         if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
+            { BZ_SETERR(ret); return; };
+
+         if (bzf->strm.avail_out < BZ_MAX_UNUSED) {
+            n = BZ_MAX_UNUSED - bzf->strm.avail_out;
+            n2 = fwrite ( (void*)(bzf->buf), sizeof(UChar), 
+                          n, bzf->handle );
+            if (n != n2 || ferror(bzf->handle))
+               { BZ_SETERR(BZ_IO_ERROR); return; };
+         }
+
+         if (ret == BZ_STREAM_END) break;
+      }
+   }
+
+   if ( !abandon && !ferror ( bzf->handle ) ) {
+      fflush ( bzf->handle );
+      if (ferror(bzf->handle))
+         { BZ_SETERR(BZ_IO_ERROR); return; };
+   }
+
+   if (nbytes_in_lo32 != NULL)
+      *nbytes_in_lo32 = bzf->strm.total_in_lo32;
+   if (nbytes_in_hi32 != NULL)
+      *nbytes_in_hi32 = bzf->strm.total_in_hi32;
+   if (nbytes_out_lo32 != NULL)
+      *nbytes_out_lo32 = bzf->strm.total_out_lo32;
+   if (nbytes_out_hi32 != NULL)
+      *nbytes_out_hi32 = bzf->strm.total_out_hi32;
+
+   BZ_SETERR(BZ_OK);
+   BZ2_bzCompressEnd ( &(bzf->strm) );
+   free ( bzf );
+}
+
+
+/*---------------------------------------------------*/
+BZFILE* BZ_API(BZ2_bzReadOpen) 
+                   ( int*  bzerror, 
+                     FILE* f, 
+                     int   verbosity,
+                     int   small,
+                     void* unused,
+                     int   nUnused )
+{
+   bzFile* bzf = NULL;
+   int     ret;
+
+   BZ_SETERR(BZ_OK);
+
+   if (f == NULL || 
+       (small != 0 && small != 1) ||
+       (verbosity < 0 || verbosity > 4) ||
+       (unused == NULL && nUnused != 0) ||
+       (unused != NULL && (nUnused < 0 || nUnused > BZ_MAX_UNUSED)))
+      { BZ_SETERR(BZ_PARAM_ERROR); return NULL; };
+
+   if (ferror(f))
+      { BZ_SETERR(BZ_IO_ERROR); return NULL; };
+
+   bzf = malloc ( sizeof(bzFile) );
+   if (bzf == NULL) 
+      { BZ_SETERR(BZ_MEM_ERROR); return NULL; };
+
+   BZ_SETERR(BZ_OK);
+
+   bzf->initialisedOk = False;
+   bzf->handle        = f;
+   bzf->bufN          = 0;
+   bzf->writing       = False;
+   bzf->strm.bzalloc  = NULL;
+   bzf->strm.bzfree   = NULL;
+   bzf->strm.opaque   = NULL;
+   
+   while (nUnused > 0) {
+      bzf->buf[bzf->bufN] = *((UChar*)(unused)); bzf->bufN++;
+      unused = ((void*)( 1 + ((UChar*)(unused))  ));
+      nUnused--;
+   }
+
+   ret = BZ2_bzDecompressInit ( &(bzf->strm), verbosity, small );
+   if (ret != BZ_OK)
+      { BZ_SETERR(ret); free(bzf); return NULL; };
+
+   bzf->strm.avail_in = bzf->bufN;
+   bzf->strm.next_in  = bzf->buf;
+
+   bzf->initialisedOk = True;
+   return bzf;   
+}
+
+
+/*---------------------------------------------------*/
+void BZ_API(BZ2_bzReadClose) ( int *bzerror, BZFILE *b )
+{
+   bzFile* bzf = (bzFile*)b;
+
+   BZ_SETERR(BZ_OK);
+   if (bzf == NULL)
+      { BZ_SETERR(BZ_OK); return; };
+
+   if (bzf->writing)
+      { BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
+
+   if (bzf->initialisedOk)
+      (void)BZ2_bzDecompressEnd ( &(bzf->strm) );
+   free ( bzf );
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzRead) 
+           ( int*    bzerror, 
+             BZFILE* b, 
+             void*   buf, 
+             int     len )
+{
+   Int32   n, ret;
+   bzFile* bzf = (bzFile*)b;
+
+   BZ_SETERR(BZ_OK);
+
+   if (bzf == NULL || buf == NULL || len < 0)
+      { BZ_SETERR(BZ_PARAM_ERROR); return 0; };
+
+   if (bzf->writing)
+      { BZ_SETERR(BZ_SEQUENCE_ERROR); return 0; };
+
+   if (len == 0)
+      { BZ_SETERR(BZ_OK); return 0; };
+
+   bzf->strm.avail_out = len;
+   bzf->strm.next_out = buf;
+
+   while (True) {
+
+      if (ferror(bzf->handle)) 
+         { BZ_SETERR(BZ_IO_ERROR); return 0; };
+
+      if (bzf->strm.avail_in == 0 && !myfeof(bzf->handle)) {
+         n = fread ( bzf->buf, sizeof(UChar), 
+                     BZ_MAX_UNUSED, bzf->handle );
+         if (ferror(bzf->handle))
+            { BZ_SETERR(BZ_IO_ERROR); return 0; };
+         bzf->bufN = n;
+         bzf->strm.avail_in = bzf->bufN;
+         bzf->strm.next_in = bzf->buf;
+      }
+
+      ret = BZ2_bzDecompress ( &(bzf->strm) );
+
+      if (ret != BZ_OK && ret != BZ_STREAM_END)
+         { BZ_SETERR(ret); return 0; };
+
+      if (ret == BZ_OK && myfeof(bzf->handle) && 
+          bzf->strm.avail_in == 0 && bzf->strm.avail_out > 0)
+         { BZ_SETERR(BZ_UNEXPECTED_EOF); return 0; };
+
+      if (ret == BZ_STREAM_END)
+         { BZ_SETERR(BZ_STREAM_END);
+           return len - bzf->strm.avail_out; };
+      if (bzf->strm.avail_out == 0)
+         { BZ_SETERR(BZ_OK); return len; };
+      
+   }
+
+   return 0; /*not reached*/
+}
+
+
+/*---------------------------------------------------*/
+void BZ_API(BZ2_bzReadGetUnused) 
+                     ( int*    bzerror, 
+                       BZFILE* b, 
+                       void**  unused, 
+                       int*    nUnused )
+{
+   bzFile* bzf = (bzFile*)b;
+   if (bzf == NULL)
+      { BZ_SETERR(BZ_PARAM_ERROR); return; };
+   if (bzf->lastErr != BZ_STREAM_END)
+      { BZ_SETERR(BZ_SEQUENCE_ERROR); return; };
+   if (unused == NULL || nUnused == NULL)
+      { BZ_SETERR(BZ_PARAM_ERROR); return; };
+
+   BZ_SETERR(BZ_OK);
+   *nUnused = bzf->strm.avail_in;
+   *unused = bzf->strm.next_in;
+}
+#endif
+
+
+/*---------------------------------------------------*/
+/*--- Misc convenience stuff                      ---*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzBuffToBuffCompress) 
+                         ( char*         dest, 
+                           unsigned int* destLen,
+                           char*         source, 
+                           unsigned int  sourceLen,
+                           int           blockSize100k, 
+                           int           verbosity, 
+                           int           workFactor )
+{
+   bz_stream strm;
+   int ret;
+
+   if (dest == NULL || destLen == NULL || 
+       source == NULL ||
+       blockSize100k < 1 || blockSize100k > 9 ||
+       verbosity < 0 || verbosity > 4 ||
+       workFactor < 0 || workFactor > 250) 
+      return BZ_PARAM_ERROR;
+
+   if (workFactor == 0) workFactor = 30;
+   strm.bzalloc = NULL;
+   strm.bzfree = NULL;
+   strm.opaque = NULL;
+   ret = BZ2_bzCompressInit ( &strm, blockSize100k, 
+                              verbosity, workFactor );
+   if (ret != BZ_OK) return ret;
+
+   strm.next_in = source;
+   strm.next_out = dest;
+   strm.avail_in = sourceLen;
+   strm.avail_out = *destLen;
+
+   ret = BZ2_bzCompress ( &strm, BZ_FINISH );
+   if (ret == BZ_FINISH_OK) goto output_overflow;
+   if (ret != BZ_STREAM_END) goto errhandler;
+
+   /* normal termination */
+   *destLen -= strm.avail_out;   
+   BZ2_bzCompressEnd ( &strm );
+   return BZ_OK;
+
+   output_overflow:
+   BZ2_bzCompressEnd ( &strm );
+   return BZ_OUTBUFF_FULL;
+
+   errhandler:
+   BZ2_bzCompressEnd ( &strm );
+   return ret;
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzBuffToBuffDecompress) 
+                           ( char*         dest, 
+                             unsigned int* destLen,
+                             char*         source, 
+                             unsigned int  sourceLen,
+                             int           small,
+                             int           verbosity )
+{
+   bz_stream strm;
+   int ret;
+
+   if (dest == NULL || destLen == NULL || 
+       source == NULL ||
+       (small != 0 && small != 1) ||
+       verbosity < 0 || verbosity > 4) 
+          return BZ_PARAM_ERROR;
+
+   strm.bzalloc = NULL;
+   strm.bzfree = NULL;
+   strm.opaque = NULL;
+   ret = BZ2_bzDecompressInit ( &strm, verbosity, small );
+   if (ret != BZ_OK) return ret;
+
+   strm.next_in = source;
+   strm.next_out = dest;
+   strm.avail_in = sourceLen;
+   strm.avail_out = *destLen;
+
+   ret = BZ2_bzDecompress ( &strm );
+   if (ret == BZ_OK) goto output_overflow_or_eof;
+   if (ret != BZ_STREAM_END) goto errhandler;
+
+   /* normal termination */
+   *destLen -= strm.avail_out;
+   BZ2_bzDecompressEnd ( &strm );
+   return BZ_OK;
+
+   output_overflow_or_eof:
+   if (strm.avail_out > 0) {
+      BZ2_bzDecompressEnd ( &strm );
+      return BZ_UNEXPECTED_EOF;
+   } else {
+      BZ2_bzDecompressEnd ( &strm );
+      return BZ_OUTBUFF_FULL;
+   };      
+
+   errhandler:
+   BZ2_bzDecompressEnd ( &strm );
+   return ret; 
+}
+
+
+/*---------------------------------------------------*/
+/*--
+   Code contributed by Yoshioka Tsuneo
+   (QWF00133@niftyserve.or.jp/tsuneo-y@is.aist-nara.ac.jp),
+   to support better zlib compatibility.
+   This code is not _officially_ part of libbzip2 (yet);
+   I haven't tested it, documented it, or considered the
+   threading-safeness of it.
+   If this code breaks, please contact both Yoshioka and me.
+--*/
+/*---------------------------------------------------*/
+
+/*---------------------------------------------------*/
+/*--
+   return version like "0.9.0c".
+--*/
+const char * BZ_API(BZ2_bzlibVersion)(void)
+{
+   return BZ_VERSION;
+}
+
+
+#ifndef BZ_NO_STDIO
+/*---------------------------------------------------*/
+
+#if defined(_WIN32) || defined(OS2) || defined(MSDOS)
+#   include <fcntl.h>
+#   include <io.h>
+#   define SET_BINARY_MODE(file) setmode(fileno(file),O_BINARY)
+#else
+#   define SET_BINARY_MODE(file)
+#endif
+static
+BZFILE * bzopen_or_bzdopen
+               ( const char *path,   /* no use when bzdopen */
+                 int fd,             /* no use when bzdopen */
+                 const char *mode,
+                 int open_mode)      /* bzopen: 0, bzdopen:1 */
+{
+   int    bzerr;
+   char   unused[BZ_MAX_UNUSED];
+   int    blockSize100k = 9;
+   int    writing       = 0;
+   char   mode2[10]     = "";
+   FILE   *fp           = NULL;
+   BZFILE *bzfp         = NULL;
+   int    verbosity     = 0;
+   int    workFactor    = 30;
+   int    smallMode     = 0;
+   int    nUnused       = 0; 
+
+   if (mode == NULL) return NULL;
+   while (*mode) {
+      switch (*mode) {
+      case 'r':
+         writing = 0; break;
+      case 'w':
+         writing = 1; break;
+      case 's':
+         smallMode = 1; break;
+      default:
+         if (isdigit((int)(*mode))) {
+            blockSize100k = *mode-BZ_HDR_0;
+         }
+      }
+      mode++;
+   }
+   strcat(mode2, writing ? "w" : "r" );
+   strcat(mode2,"b");   /* binary mode */
+
+   if (open_mode==0) {
+      if (path==NULL || strcmp(path,"")==0) {
+        fp = (writing ? stdout : stdin);
+        SET_BINARY_MODE(fp);
+      } else {
+        fp = fopen(path,mode2);
+      }
+   } else {
+#ifdef BZ_STRICT_ANSI
+      fp = NULL;
+#else
+      fp = fdopen(fd,mode2);
+#endif
+   }
+   if (fp == NULL) return NULL;
+
+   if (writing) {
+      /* Guard against total chaos and anarchy -- JRS */
+      if (blockSize100k < 1) blockSize100k = 1;
+      if (blockSize100k > 9) blockSize100k = 9; 
+      bzfp = BZ2_bzWriteOpen(&bzerr,fp,blockSize100k,
+                             verbosity,workFactor);
+   } else {
+      bzfp = BZ2_bzReadOpen(&bzerr,fp,verbosity,smallMode,
+                            unused,nUnused);
+   }
+   if (bzfp == NULL) {
+      if (fp != stdin && fp != stdout) fclose(fp);
+      return NULL;
+   }
+   return bzfp;
+}
+
+
+/*---------------------------------------------------*/
+/*--
+   open file for read or write.
+      ex) bzopen("file","w9")
+      case path="" or NULL => use stdin or stdout.
+--*/
+BZFILE * BZ_API(BZ2_bzopen)
+               ( const char *path,
+                 const char *mode )
+{
+   return bzopen_or_bzdopen(path,-1,mode,/*bzopen*/0);
+}
+
+
+/*---------------------------------------------------*/
+BZFILE * BZ_API(BZ2_bzdopen)
+               ( int fd,
+                 const char *mode )
+{
+   return bzopen_or_bzdopen(NULL,fd,mode,/*bzdopen*/1);
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzread) (BZFILE* b, void* buf, int len )
+{
+   int bzerr, nread;
+   if (((bzFile*)b)->lastErr == BZ_STREAM_END) return 0;
+   nread = BZ2_bzRead(&bzerr,b,buf,len);
+   if (bzerr == BZ_OK || bzerr == BZ_STREAM_END) {
+      return nread;
+   } else {
+      return -1;
+   }
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzwrite) (BZFILE* b, void* buf, int len )
+{
+   int bzerr;
+
+   BZ2_bzWrite(&bzerr,b,buf,len);
+   if(bzerr == BZ_OK){
+      return len;
+   }else{
+      return -1;
+   }
+}
+
+
+/*---------------------------------------------------*/
+int BZ_API(BZ2_bzflush) (BZFILE *b)
+{
+   /* do nothing now... */
+   return 0;
+}
+
+
+/*---------------------------------------------------*/
+void BZ_API(BZ2_bzclose) (BZFILE* b)
+{
+   int bzerr;
+   FILE *fp = ((bzFile *)b)->handle;
+   
+   if (b==NULL) {return;}
+   if(((bzFile*)b)->writing){
+      BZ2_bzWriteClose(&bzerr,b,0,NULL,NULL);
+      if(bzerr != BZ_OK){
+         BZ2_bzWriteClose(NULL,b,1,NULL,NULL);
+      }
+   }else{
+      BZ2_bzReadClose(&bzerr,b);
+   }
+   if(fp!=stdin && fp!=stdout){
+      fclose(fp);
+   }
+}
+
+
+/*---------------------------------------------------*/
+/*--
+   return last error code 
+--*/
+static char *bzerrorstrings[] = {
+       "OK"
+      ,"SEQUENCE_ERROR"
+      ,"PARAM_ERROR"
+      ,"MEM_ERROR"
+      ,"DATA_ERROR"
+      ,"DATA_ERROR_MAGIC"
+      ,"IO_ERROR"
+      ,"UNEXPECTED_EOF"
+      ,"OUTBUFF_FULL"
+      ,"CONFIG_ERROR"
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+};
+
+
+const char * BZ_API(BZ2_bzerror) (BZFILE *b, int *errnum)
+{
+   int err = ((bzFile *)b)->lastErr;
+
+   if(err>0) err = 0;
+   *errnum = err;
+   return bzerrorstrings[err*-1];
+}
+#endif
+
+
+/*-------------------------------------------------------------*/
+/*--- end                                           bzlib.c ---*/
+/*-------------------------------------------------------------*/
+
+
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+
+
+/* A test program written to test robustness to decompression of
+   corrupted data.  Usage is 
+       unzcrash filename
+   and the program will read the specified file, compress it (in memory),
+   and then repeatedly decompress it, each time with a different bit of
+   the compressed data inverted, so as to test all possible one-bit errors.
+   This should not cause any invalid memory accesses.  If it does, 
+   I want to know about it!
+
+   p.s.  As you can see from the above description, the process is
+   incredibly slow.  A file of size eg 5KB will cause it to run for
+   many hours.
+*/
+
+//#include <stdio.h>
+//#include <assert.h>
+//#include "bzlib.h"
+
+#define M_BLOCK 1000000
+
+typedef unsigned char uchar;
+
+#define M_BLOCK_OUT (M_BLOCK + 1000000)
+uchar inbuf[M_BLOCK];
+uchar outbuf[M_BLOCK_OUT];
+uchar zbuf[M_BLOCK + 600 + (M_BLOCK / 100)];
+
+int nIn, nOut, nZ;
+
+static char *bzerrorstrings[] = {
+       "OK"
+      ,"SEQUENCE_ERROR"
+      ,"PARAM_ERROR"
+      ,"MEM_ERROR"
+      ,"DATA_ERROR"
+      ,"DATA_ERROR_MAGIC"
+      ,"IO_ERROR"
+      ,"UNEXPECTED_EOF"
+      ,"OUTBUFF_FULL"
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+      ,"???"   /* for future */
+};
+
+void flip_bit ( int bit )
+{
+   int byteno = bit / 8;
+   int bitno  = bit % 8;
+   uchar mask = 1 << bitno;
+   //fprintf ( stderr, "(byte %d  bit %d  mask %d)",
+   //          byteno, bitno, (int)mask );
+   zbuf[byteno] ^= mask;
+}
+
+void set_inbuf ( void )
+{
+  inbuf[0] = 0;
+  my_strcat(inbuf, "At her sixtieth birthday party, Margaret Thatcher ");
+  my_strcat(inbuf, "blew on the cake to light the candles.\n");
+  my_strcat(inbuf, "This program, bzip2, the associated library libbzip2, and all\n");
+  my_strcat(inbuf, "documentation, are copyright (C) 1996-2004 Julian R Seward.  All\n");
+  my_strcat(inbuf, "rights reserved.\n");
+  my_strcat(inbuf, "\n");
+  my_strcat(inbuf, "Redistribution and use in source and binary forms, with or without\n");
+  my_strcat(inbuf, "modification, are permitted provided that the following conditions\n");
+  my_strcat(inbuf, "are met:\n");
+  my_strcat(inbuf, "\n");
+  my_strcat(inbuf, "1. Redistributions of source code must retain the above copyright\n");
+  my_strcat(inbuf, "   notice, this list of conditions and the following disclaimer.\n");
+  my_strcat(inbuf, "\n");
+  my_strcat(inbuf, "2. The origin of this software must not be misrepresented; you must\n");
+  my_strcat(inbuf, "   not claim that you wrote the original software.  If you use this\n");
+  my_strcat(inbuf, "   software in a product, an acknowledgment in the product\n");
+  my_strcat(inbuf, "   documentation would be appreciated but is not required.\n");
+  my_strcat(inbuf, "\n");
+  my_strcat(inbuf, "3. Altered source versions must be plainly marked as such, and must\n");
+  my_strcat(inbuf, "   not be misrepresented as being the original software.\n");
+  my_strcat(inbuf, "\n");
+  my_strcat(inbuf, "4. The name of the author may not be used to endorse or promote\n");
+  my_strcat(inbuf, "   products derived from this software without specific prior written\n");
+  my_strcat(inbuf, "   permission.\n");
+  my_strcat(inbuf, "\n");
+  my_strcat(inbuf, "THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS\n");
+  my_strcat(inbuf, "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n");
+  my_strcat(inbuf, "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n");
+  my_strcat(inbuf, "ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n");
+  my_strcat(inbuf, "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n");
+  my_strcat(inbuf, "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n");
+  my_strcat(inbuf, "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n");
+  my_strcat(inbuf, "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n");
+  my_strcat(inbuf, "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n");
+  my_strcat(inbuf, "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n");
+  my_strcat(inbuf, "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "ababababababababababababababababababababababababababababababab");
+  my_strcat(inbuf, "\n");
+}
+
+
+void entry ( HWord(*service)(HWord,HWord) )
+{
+   int   r;
+   int   bit;
+   int   i;
+
+   serviceFn = service;
+
+   set_inbuf();
+   nIn = vexxx_strlen(inbuf)+1;
+   vexxx_printf( "%d bytes read\n", nIn );
+
+   nZ = M_BLOCK;
+   r = BZ2_bzBuffToBuffCompress (
+          zbuf, &nZ, inbuf, nIn, 9, 4/*verb*/, 30 );
+
+   if (r != BZ_OK) {
+     vexxx_printf("initial compress failed!\n");
+     (*serviceFn)(0,0);
+   }
+   vexxx_printf( "%d after compression\n", nZ );
+
+   for (bit = 0; bit < nZ*8; bit += (bit < 35 ? 1 : 377)) {
+      vexxx_printf( "bit %d  ", bit );
+      flip_bit ( bit );
+      nOut = M_BLOCK_OUT;
+      r = BZ2_bzBuffToBuffDecompress (
+             outbuf, &nOut, zbuf, nZ, 1/*small*/, 0 );
+      vexxx_printf( " %d  %s ", r, bzerrorstrings[-r] );
+
+      if (r != BZ_OK) {
+         vexxx_printf( "\n" );
+      } else {
+         if (nOut != nIn) {
+           vexxx_printf(  "nIn/nOut mismatch %d %d\n", nIn, nOut );
+           (*serviceFn)(0,0);
+         } else {
+           for (i = 0; i < nOut; i++)
+             if (inbuf[i] != outbuf[i]) { 
+                vexxx_printf(  "mismatch at %d\n", i ); 
+                (*serviceFn)(0,0); 
+           }
+           if (i == nOut) vexxx_printf( "really ok!\n" );
+         }
+      }
+
+      flip_bit ( bit );
+   }
+
+#if 0
+   assert (nOut == nIn);
+   for (i = 0; i < nOut; i++) {
+     if (inbuf[i] != outbuf[i]) {
+        vexxx_printf( "difference at %d !\n", i );
+        return 1;
+     }
+   }
+#endif
+
+   vexxx_printf( "all ok\n" );
+   (*serviceFn)(0,0);
+}
diff --git a/VEX/switchback/test_emfloat.c b/VEX/switchback/test_emfloat.c
new file mode 100644
index 0000000..c47ccf8
--- /dev/null
+++ b/VEX/switchback/test_emfloat.c
@@ -0,0 +1,1942 @@
+/*
+** emfloat.c
+** Source for emulated floating-point routines.
+** BYTEmark (tm)
+** BYTE's Native Mode Benchmarks
+** Rick Grehan, BYTE Magazine.
+**
+** Created:
+** Last update: 3/95
+**
+** DISCLAIMER
+** The source, executable, and documentation files that comprise
+** the BYTEmark benchmarks are made available on an "as is" basis.
+** This means that we at BYTE Magazine have made every reasonable
+** effort to verify that the there are no errors in the source and
+** executable code.  We cannot, however, guarantee that the programs
+** are error-free.  Consequently, McGraw-HIll and BYTE Magazine make
+** no claims in regard to the fitness of the source code, executable
+** code, and documentation of the BYTEmark.
+**  Furthermore, BYTE Magazine, McGraw-Hill, and all employees
+** of McGraw-Hill cannot be held responsible for any damages resulting
+** from the use of this code or the results obtained from using
+** this code.
+*/
+
+#include "../pub/libvex_basictypes.h"
+
+static HWord (*serviceFn)(HWord,HWord) = 0;
+
+
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+
+static char* my_strcpy ( char* dest, const char* src )
+{
+   char* dest_orig = dest;
+   while (*src) *dest++ = *src++;
+   *dest = 0;
+   return dest_orig;
+}
+
+static void* my_memcpy ( void *dest, const void *src, int sz )
+{
+   const char *s = (const char *)src;
+   char *d = (char *)dest;
+
+   while (sz--)
+      *d++ = *s++;
+
+   return dest;
+}
+
+static void* my_memmove( void *dst, const void *src, unsigned int len )
+{
+    register char *d;
+    register char *s;
+    if ( dst > src ) {
+        d = (char *)dst + len - 1;
+        s = (char *)src + len - 1;
+        while ( len >= 4 ) {
+            *d-- = *s--;
+            *d-- = *s--;
+            *d-- = *s--;
+            *d-- = *s--;
+            len -= 4;
+        }
+        while ( len-- ) {
+            *d-- = *s--;
+        }
+    } else if ( dst < src ) {
+        d = (char *)dst;
+        s = (char *)src;
+        while ( len >= 4 ) {
+            *d++ = *s++;
+            *d++ = *s++;
+            *d++ = *s++;
+            *d++ = *s++;
+            len -= 4;
+        }
+        while ( len-- ) {
+            *d++ = *s++;
+        }
+    }
+    return dst;
+}
+
+/////////////////////////////////////////////////////////////////////
+
+static void vexxx_log_bytes ( char* p, int n )
+{
+   int i;
+   for (i = 0; i < n; i++)
+      (*serviceFn)( 1, (int)p[i] );
+}
+
+/*---------------------------------------------------------*/
+/*--- vexxx_printf                                        ---*/
+/*---------------------------------------------------------*/
+
+/* This should be the only <...> include in the entire VEXXX library.
+   New code for vexxx_util.c should go above this point. */
+#include <stdarg.h>
+
+static HChar vexxx_toupper ( HChar c )
+{
+   if (c >= 'a' && c <= 'z')
+      return toHChar(c + ('A' - 'a'));
+   else
+      return c;
+}
+
+static Int vexxx_strlen ( const HChar* str )
+{
+   Int i = 0;
+   while (str[i] != 0) i++;
+   return i;
+}
+
+Bool vexxx_streq ( const HChar* s1, const HChar* s2 )
+{
+   while (True) {
+      if (*s1 == 0 && *s2 == 0)
+         return True;
+      if (*s1 != *s2)
+         return False;
+      s1++;
+      s2++;
+   }
+}
+
+/* Some flags.  */
+#define VG_MSG_SIGNED    1 /* The value is signed. */
+#define VG_MSG_ZJUSTIFY  2 /* Must justify with '0'. */
+#define VG_MSG_LJUSTIFY  4 /* Must justify on the left. */
+#define VG_MSG_PAREN     8 /* Parenthesize if present (for %y) */
+#define VG_MSG_COMMA    16 /* Add commas to numbers (for %d, %u) */
+
+/* Copy a string into the buffer. */
+static UInt
+myvprintf_str ( void(*send)(HChar), Int flags, Int width, HChar* str, 
+                Bool capitalise )
+{
+#  define MAYBE_TOUPPER(ch) toHChar(capitalise ? vexxx_toupper(ch) : (ch))
+   UInt ret = 0;
+   Int i, extra;
+   Int len = vexxx_strlen(str);
+
+   if (width == 0) {
+      ret += len;
+      for (i = 0; i < len; i++)
+         send(MAYBE_TOUPPER(str[i]));
+      return ret;
+   }
+
+   if (len > width) {
+      ret += width;
+      for (i = 0; i < width; i++)
+         send(MAYBE_TOUPPER(str[i]));
+      return ret;
+   }
+
+   extra = width - len;
+   if (flags & VG_MSG_LJUSTIFY) {
+      ret += extra;
+      for (i = 0; i < extra; i++)
+         send(' ');
+   }
+   ret += len;
+   for (i = 0; i < len; i++)
+      send(MAYBE_TOUPPER(str[i]));
+   if (!(flags & VG_MSG_LJUSTIFY)) {
+      ret += extra;
+      for (i = 0; i < extra; i++)
+         send(' ');
+   }
+
+#  undef MAYBE_TOUPPER
+
+   return ret;
+}
+
+/* Write P into the buffer according to these args:
+ *  If SIGN is true, p is a signed.
+ *  BASE is the base.
+ *  If WITH_ZERO is true, '0' must be added.
+ *  WIDTH is the width of the field.
+ */
+static UInt
+myvprintf_int64 ( void(*send)(HChar), Int flags, Int base, Int width, ULong pL)
+{
+   HChar buf[40];
+   Int   ind = 0;
+   Int   i, nc = 0;
+   Bool  neg = False;
+   HChar *digits = "0123456789ABCDEF";
+   UInt  ret = 0;
+   UInt  p = (UInt)pL;
+
+   if (base < 2 || base > 16)
+      return ret;
+ 
+   if ((flags & VG_MSG_SIGNED) && (Int)p < 0) {
+      p   = - (Int)p;
+      neg = True;
+   }
+
+   if (p == 0)
+      buf[ind++] = '0';
+   else {
+      while (p > 0) {
+         if ((flags & VG_MSG_COMMA) && 10 == base &&
+             0 == (ind-nc) % 3 && 0 != ind) 
+         {
+            buf[ind++] = ',';
+            nc++;
+         }
+         buf[ind++] = digits[p % base];
+         p /= base;
+      }
+   }
+
+   if (neg)
+      buf[ind++] = '-';
+
+   if (width > 0 && !(flags & VG_MSG_LJUSTIFY)) {
+      for(; ind < width; ind++) {
+	//vassert(ind < 39);
+         buf[ind] = toHChar((flags & VG_MSG_ZJUSTIFY) ? '0': ' ');
+      }
+   }
+
+   /* Reverse copy to buffer.  */
+   ret += ind;
+   for (i = ind -1; i >= 0; i--) {
+      send(buf[i]);
+   }
+   if (width > 0 && (flags & VG_MSG_LJUSTIFY)) {
+      for(; ind < width; ind++) {
+	 ret++;
+         send(' ');  // Never pad with zeroes on RHS -- changes the value!
+      }
+   }
+   return ret;
+}
+
+
+/* A simple vprintf().  */
+static 
+UInt vprintf_wrk ( void(*send)(HChar), const HChar *format, va_list vargs )
+{
+   UInt ret = 0;
+   int i;
+   int flags;
+   int width;
+   Bool is_long;
+
+   /* We assume that vargs has already been initialised by the 
+      caller, using va_start, and that the caller will similarly
+      clean up with va_end.
+   */
+
+   for (i = 0; format[i] != 0; i++) {
+      if (format[i] != '%') {
+         send(format[i]);
+	 ret++;
+         continue;
+      }
+      i++;
+      /* A '%' has been found.  Ignore a trailing %. */
+      if (format[i] == 0)
+         break;
+      if (format[i] == '%') {
+         /* `%%' is replaced by `%'. */
+         send('%');
+	 ret++;
+         continue;
+      }
+      flags = 0;
+      is_long = False;
+      width = 0; /* length of the field. */
+      if (format[i] == '(') {
+	 flags |= VG_MSG_PAREN;
+	 i++;
+      }
+      /* If ',' follows '%', commas will be inserted. */
+      if (format[i] == ',') {
+         flags |= VG_MSG_COMMA;
+         i++;
+      }
+      /* If '-' follows '%', justify on the left. */
+      if (format[i] == '-') {
+         flags |= VG_MSG_LJUSTIFY;
+         i++;
+      }
+      /* If '0' follows '%', pads will be inserted. */
+      if (format[i] == '0') {
+         flags |= VG_MSG_ZJUSTIFY;
+         i++;
+      }
+      /* Compute the field length. */
+      while (format[i] >= '0' && format[i] <= '9') {
+         width *= 10;
+         width += format[i++] - '0';
+      }
+      while (format[i] == 'l') {
+         i++;
+         is_long = True;
+      }
+
+      switch (format[i]) {
+         case 'd': /* %d */
+            flags |= VG_MSG_SIGNED;
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, Long)));
+            else
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, Int)));
+            break;
+         case 'u': /* %u */
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, ULong)));
+            else
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, UInt)));
+            break;
+         case 'p': /* %p */
+	    ret += 2;
+            send('0');
+            send('x');
+            ret += myvprintf_int64(send, flags, 16, width, 
+				   (ULong)((HWord)va_arg (vargs, void *)));
+            break;
+         case 'x': /* %x */
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 16, width, 
+				      (ULong)(va_arg (vargs, ULong)));
+            else
+               ret += myvprintf_int64(send, flags, 16, width, 
+				      (ULong)(va_arg (vargs, UInt)));
+            break;
+         case 'c': /* %c */
+	    ret++;
+            send(toHChar(va_arg (vargs, int)));
+            break;
+         case 's': case 'S': { /* %s */
+            char *str = va_arg (vargs, char *);
+            if (str == (char*) 0) str = "(null)";
+            ret += myvprintf_str(send, flags, width, str, 
+                                 toBool(format[i]=='S'));
+            break;
+	 }
+#        if 0
+	 case 'y': { /* %y - print symbol */
+	    Addr a = va_arg(vargs, Addr);
+
+            HChar *name;
+	    if (VG_(get_fnname_w_offset)(a, &name)) {
+               HChar buf[1 + VG_strlen(name) + 1 + 1];
+	       if (flags & VG_MSG_PAREN) {
+                  VG_(sprintf)(str, "(%s)", name):
+	       } else {
+                  VG_(sprintf)(str, "%s", name):
+               }
+	       ret += myvprintf_str(send, flags, width, buf, 0);
+	    }
+	    break;
+	 }
+#        endif
+         default:
+            break;
+      }
+   }
+   return ret;
+}
+
+
+/* A general replacement for printf().  Note that only low-level 
+   debugging info should be sent via here.  The official route is to
+   to use vg_message().  This interface is deprecated.
+*/
+static HChar myprintf_buf[1000];
+static Int   n_myprintf_buf;
+
+static void add_to_myprintf_buf ( HChar c )
+{
+   if (c == '\n' || n_myprintf_buf >= 1000-10 /*paranoia*/ ) {
+      (*vexxx_log_bytes)( myprintf_buf, vexxx_strlen(myprintf_buf) );
+      n_myprintf_buf = 0;
+      myprintf_buf[n_myprintf_buf] = 0;      
+   }
+   myprintf_buf[n_myprintf_buf++] = c;
+   myprintf_buf[n_myprintf_buf] = 0;
+}
+
+static UInt vexxx_printf ( const char *format, ... )
+{
+   UInt ret;
+   va_list vargs;
+   va_start(vargs,format);
+   
+   n_myprintf_buf = 0;
+   myprintf_buf[n_myprintf_buf] = 0;      
+   ret = vprintf_wrk ( add_to_myprintf_buf, format, vargs );
+
+   if (n_myprintf_buf > 0) {
+      (*vexxx_log_bytes)( myprintf_buf, n_myprintf_buf );
+   }
+
+   va_end(vargs);
+
+   return ret;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                          vexxx_util.c ---*/
+/*---------------------------------------------------------------*/
+
+
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+
+//#include <stdio.h>
+//#include <string.h>
+//#include <malloc.h>
+
+typedef unsigned char uchar;
+typedef unsigned int uint;
+typedef unsigned short ushort;
+typedef unsigned long ulong;
+typedef int int32;              /* Signed 32 bit integer */
+
+#define INTERNAL_FPF_PRECISION 4
+#define CPUEMFLOATLOOPMAX 500000L
+#define EMFARRAYSIZE 3000L
+
+typedef struct {
+        int adjust;             /* Set adjust code */
+        ulong request_secs;     /* # of seconds requested */
+        ulong arraysize;        /* Size of array */
+        ulong loops;            /* Loops per iterations */
+        double emflops;         /* Results */
+} EmFloatStruct;
+
+
+
+/* Is this a 64 bit architecture? If so, this will define LONG64 */
+/* Uwe F. Mayer 15 November 1997                                 */
+// #include "pointer.h"
+
+#define u8 unsigned char
+#define u16 unsigned short
+#ifdef LONG64
+#define u32 unsigned int
+#else
+#define u32 unsigned long
+#endif
+#define uchar unsigned char
+#define ulong unsigned long
+
+#define MAX_EXP 32767L
+#define MIN_EXP (-32767L)
+
+#define IFPF_IS_ZERO 0
+#define IFPF_IS_SUBNORMAL 1
+#define IFPF_IS_NORMAL 2
+#define IFPF_IS_INFINITY 3
+#define IFPF_IS_NAN 4
+#define IFPF_TYPE_COUNT 5
+
+#define ZERO_ZERO                       0
+#define ZERO_SUBNORMAL                  1
+#define ZERO_NORMAL                     2
+#define ZERO_INFINITY                   3
+#define ZERO_NAN                        4
+
+#define SUBNORMAL_ZERO                  5
+#define SUBNORMAL_SUBNORMAL             6
+#define SUBNORMAL_NORMAL                7
+#define SUBNORMAL_INFINITY              8
+#define SUBNORMAL_NAN                   9
+
+#define NORMAL_ZERO                     10
+#define NORMAL_SUBNORMAL                11
+#define NORMAL_NORMAL                   12
+#define NORMAL_INFINITY                 13
+#define NORMAL_NAN                      14
+
+#define INFINITY_ZERO                   15
+#define INFINITY_SUBNORMAL              16
+#define INFINITY_NORMAL                 17
+#define INFINITY_INFINITY               18
+#define INFINITY_NAN                    19
+
+#define NAN_ZERO                        20
+#define NAN_SUBNORMAL                   21
+#define NAN_NORMAL                      22
+#define NAN_INFINITY                    23
+#define NAN_NAN                         24
+#define OPERAND_ZERO                    0
+#define OPERAND_SUBNORMAL               1
+#define OPERAND_NORMAL                  2
+#define OPERAND_INFINITY                3
+#define OPERAND_NAN                     4
+
+typedef struct
+{
+        u8 type;        /* Indicates, NORMAL, SUBNORMAL, etc. */
+        u8 sign;        /* Mantissa sign */
+        short exp;      /* Signed exponent...no bias */
+        u16 mantissa[INTERNAL_FPF_PRECISION];
+} InternalFPF;
+
+static
+void SetupCPUEmFloatArrays(InternalFPF *abase,
+        InternalFPF *bbase, InternalFPF *cbase, ulong arraysize);
+static
+ulong DoEmFloatIteration(InternalFPF *abase,
+        InternalFPF *bbase, InternalFPF *cbase,
+        ulong arraysize, ulong loops);
+
+static void SetInternalFPFZero(InternalFPF *dest,
+                        uchar sign);
+static void SetInternalFPFInfinity(InternalFPF *dest,
+                        uchar sign);
+static void SetInternalFPFNaN(InternalFPF *dest);
+static int IsMantissaZero(u16 *mant);
+static void Add16Bits(u16 *carry,u16 *a,u16 b,u16 c);
+static void Sub16Bits(u16 *borrow,u16 *a,u16 b,u16 c);
+static void ShiftMantLeft1(u16 *carry,u16 *mantissa);
+static void ShiftMantRight1(u16 *carry,u16 *mantissa);
+static void StickyShiftRightMant(InternalFPF *ptr,int amount);
+static void normalize(InternalFPF *ptr);
+static void denormalize(InternalFPF *ptr,int minimum_exponent);
+static void RoundInternalFPF(InternalFPF *ptr);
+static void choose_nan(InternalFPF *x,InternalFPF *y,InternalFPF *z,
+                int intel_flag);
+static void AddSubInternalFPF(uchar operation,InternalFPF *x,
+                InternalFPF *y,InternalFPF *z);
+static void MultiplyInternalFPF(InternalFPF *x,InternalFPF *y,
+                        InternalFPF *z);
+static void DivideInternalFPF(InternalFPF *x,InternalFPF *y, 
+                        InternalFPF *z);
+
+static void Int32ToInternalFPF(int32 mylong,
+                InternalFPF *dest);
+static int InternalFPFToString(char *dest,
+                InternalFPF *src);
+
+static int32 randnum(int32 lngval);
+
+static int32 randwc(int32 num)
+{
+	return(randnum((int32)0)%num);
+}
+
+static int32 randw[2] = { (int32)13 , (int32)117 };
+static int32 randnum(int32 lngval)
+{
+	register int32 interm;
+
+	if (lngval!=(int32)0)
+	{	randw[0]=(int32)13; randw[1]=(int32)117; }
+
+	interm=(randw[0]*(int32)254754+randw[1]*(int32)529562)%(int32)999563;
+	randw[1]=randw[0];
+	randw[0]=interm;
+	return(interm);
+}
+
+
+static 
+void SetupCPUEmFloatArrays(InternalFPF *abase,
+                InternalFPF *bbase,
+                InternalFPF *cbase,
+                ulong arraysize)
+{
+ulong i;
+InternalFPF locFPF1,locFPF2;
+
+randnum((int32)13);
+
+for(i=0;i<arraysize;i++)
+{/*       LongToInternalFPF(randwc(50000L),&locFPF1); */
+        Int32ToInternalFPF(randwc((int32)50000),&locFPF1);
+ /*       LongToInternalFPF(randwc(50000L)+1L,&locFPF2); */
+        Int32ToInternalFPF(randwc((int32)50000)+(int32)1,&locFPF2);
+        DivideInternalFPF(&locFPF1,&locFPF2,abase+i);
+ /*       LongToInternalFPF(randwc(50000L)+1L,&locFPF2); */
+        Int32ToInternalFPF(randwc((int32)50000)+(int32)1,&locFPF2);
+        DivideInternalFPF(&locFPF1,&locFPF2,bbase+i);
+}
+return;
+}
+
+
+static char* str1 = "loops %d\n";
+static 
+ulong DoEmFloatIteration(InternalFPF *abase,
+                InternalFPF *bbase,
+                InternalFPF *cbase,
+                ulong arraysize, ulong loops)
+{
+static uchar jtable[16] = {0,0,0,0,1,1,1,1,2,2,2,2,2,3,3,3};
+ulong i;
+int number_of_loops;
+ loops = 100;
+number_of_loops=loops-1; /* the index of the first loop we run */
+
+vexxx_printf(str1, (int)loops);
+
+/*
+** Each pass through the array performs operations in
+** the followingratios:
+**   4 adds, 4 subtracts, 5 multiplies, 3 divides
+** (adds and subtracts being nearly the same operation)
+*/
+
+{
+        for(i=0;i<arraysize;i++)
+                switch(jtable[i % 16])
+                {
+                        case 0: /* Add */
+                                AddSubInternalFPF(0,abase+i,
+                                  bbase+i,
+                                  cbase+i);
+                                break;
+                        case 1: /* Subtract */
+                                AddSubInternalFPF(1,abase+i,
+                                  bbase+i,
+                                  cbase+i);
+                                break;
+                        case 2: /* Multiply */
+                                MultiplyInternalFPF(abase+i,
+                                  bbase+i,
+                                  cbase+i);
+                                break;
+                        case 3: /* Divide */
+                                DivideInternalFPF(abase+i,
+                                  bbase+i,
+                                  cbase+i);
+                                break;
+                }
+{
+  ulong j[8];   /* we test 8 entries */
+  int k;
+  ulong i;
+  char buffer[1024];
+  if (100==loops) /* the first loop */
+    {
+      j[0]=(ulong)2;
+      j[1]=(ulong)6;
+      j[2]=(ulong)10;
+      j[3]=(ulong)14;
+      j[4]=(ulong)(arraysize-14);
+      j[5]=(ulong)(arraysize-10);
+      j[6]=(ulong)(arraysize-6);
+      j[7]=(ulong)(arraysize-2);
+      for(k=0;k<8;k++){
+	i=j[k];
+	InternalFPFToString(buffer,abase+i);
+	vexxx_printf("%6d: (%s) ",i,buffer);
+	switch(jtable[i % 16])
+	  {
+	  case 0: my_strcpy(buffer,"+"); break;
+	  case 1: my_strcpy(buffer,"-"); break;
+	  case 2: my_strcpy(buffer,"*"); break;
+	  case 3: my_strcpy(buffer,"/"); break;
+	  }
+	vexxx_printf("%s ",buffer);
+	InternalFPFToString(buffer,bbase+i);
+	vexxx_printf("(%s) = ",buffer);
+	InternalFPFToString(buffer,cbase+i);
+	vexxx_printf("%s\n",buffer);
+      }
+return 0;
+    }
+}
+}
+return 0;
+}
+
+/***********************
+** SetInternalFPFZero **
+************************
+** Set an internal floating-point-format number to zero.
+** sign determines the sign of the zero.
+*/
+static void SetInternalFPFZero(InternalFPF *dest,
+                        uchar sign)
+{
+int i;          /* Index */
+
+dest->type=IFPF_IS_ZERO;
+dest->sign=sign;
+dest->exp=MIN_EXP;
+for(i=0;i<INTERNAL_FPF_PRECISION;i++)
+        dest->mantissa[i]=0;
+return;
+}
+
+/***************************
+** SetInternalFPFInfinity **
+****************************
+** Set an internal floating-point-format number to infinity.
+** This can happen if the exponent exceeds MAX_EXP.
+** As above, sign picks the sign of infinity.
+*/
+static void SetInternalFPFInfinity(InternalFPF *dest,
+                        uchar sign)
+{
+int i;          /* Index */
+
+dest->type=IFPF_IS_INFINITY;
+dest->sign=sign;
+dest->exp=MIN_EXP;
+for(i=0;i<INTERNAL_FPF_PRECISION;i++)
+        dest->mantissa[i]=0;
+return;
+}
+
+/**********************
+** SetInternalFPFNaN **
+***********************
+** Set an internal floating-point-format number to Nan
+** (not a number).  Note that we "emulate" an 80x87 as far
+** as the mantissa bits go.
+*/
+static void SetInternalFPFNaN(InternalFPF *dest)
+{
+int i;          /* Index */
+
+dest->type=IFPF_IS_NAN;
+dest->exp=MAX_EXP;
+dest->sign=1;
+dest->mantissa[0]=0x4000;
+for(i=1;i<INTERNAL_FPF_PRECISION;i++)
+        dest->mantissa[i]=0;
+
+return;
+}
+
+/*******************
+** IsMantissaZero **
+********************
+** Pass this routine a pointer to an internal floating point format
+** number's mantissa.  It checks for an all-zero mantissa.
+** Returns 0 if it is NOT all zeros, !=0 otherwise.
+*/
+static int IsMantissaZero(u16 *mant)
+{
+int i;          /* Index */
+int n;          /* Return value */
+
+n=0;
+for(i=0;i<INTERNAL_FPF_PRECISION;i++)
+        n|=mant[i];
+
+return(!n);
+}
+
+/**************
+** Add16Bits **
+***************
+** Add b, c, and carry.  Retult in a.  New carry in carry.
+*/
+static void Add16Bits(u16 *carry,
+                u16 *a,
+                u16 b,
+                u16 c)
+{
+u32 accum;              /* Accumulator */
+
+/*
+** Do the work in the 32-bit accumulator so we can return
+** the carry.
+*/
+accum=(u32)b;
+accum+=(u32)c;
+accum+=(u32)*carry;
+*carry=(u16)((accum & 0x00010000) ? 1 : 0);     /* New carry */
+*a=(u16)(accum & 0xFFFF);       /* Result is lo 16 bits */
+return;
+}
+
+/**************
+** Sub16Bits **
+***************
+** Additive inverse of above.
+*/
+static void Sub16Bits(u16 *borrow,
+                u16 *a,
+                u16 b,
+                u16 c)
+{
+u32 accum;              /* Accumulator */
+
+accum=(u32)b;
+accum-=(u32)c;
+accum-=(u32)*borrow;
+*borrow=(u32)((accum & 0x00010000) ? 1 : 0);    /* New borrow */
+*a=(u16)(accum & 0xFFFF);
+return;
+}
+
+/*******************
+** ShiftMantLeft1 **
+********************
+** Shift a vector of 16-bit numbers left 1 bit.  Also provides
+** a carry bit, which is shifted in at the beginning, and
+** shifted out at the end.
+*/
+static void ShiftMantLeft1(u16 *carry,
+                        u16 *mantissa)
+{
+int i;          /* Index */
+int new_carry;
+u16 accum;      /* Temporary holding placed */
+
+for(i=INTERNAL_FPF_PRECISION-1;i>=0;i--)
+{       accum=mantissa[i];
+        new_carry=accum & 0x8000;       /* Get new carry */
+        accum=accum<<1;                 /* Do the shift */
+        if(*carry)
+                accum|=1;               /* Insert previous carry */
+        *carry=new_carry;
+        mantissa[i]=accum;              /* Return shifted value */
+}
+return;
+}
+
+/********************
+** ShiftMantRight1 **
+*********************
+** Shift a mantissa right by 1 bit.  Provides carry, as
+** above
+*/
+static void ShiftMantRight1(u16 *carry,
+                        u16 *mantissa)
+{
+int i;          /* Index */
+int new_carry;
+u16 accum;
+
+for(i=0;i<INTERNAL_FPF_PRECISION;i++)
+{       accum=mantissa[i];
+        new_carry=accum & 1;            /* Get new carry */
+        accum=accum>>1;
+        if(*carry)
+                accum|=0x8000;
+        *carry=new_carry;
+        mantissa[i]=accum;
+}
+return;
+}
+
+
+/*****************************
+** StickyShiftMantRight **
+******************************
+** This is a shift right of the mantissa with a "sticky bit".
+** I.E., if a carry of 1 is shifted out of the least significant
+** bit, the least significant bit is set to 1.
+*/
+static void StickyShiftRightMant(InternalFPF *ptr,
+                        int amount)
+{
+int i;          /* Index */
+u16 carry;      /* Self-explanatory */
+u16 *mantissa;
+
+mantissa=ptr->mantissa;
+
+if(ptr->type!=IFPF_IS_ZERO)     /* Don't bother shifting a zero */
+{
+        /*
+        ** If the amount of shifting will shift everyting
+        ** out of existence, then just clear the whole mantissa
+        ** and set the lowmost bit to 1.
+        */
+        if(amount>=INTERNAL_FPF_PRECISION * 16)
+        {
+                for(i=0;i<INTERNAL_FPF_PRECISION-1;i++)
+                        mantissa[i]=0;
+                mantissa[INTERNAL_FPF_PRECISION-1]=1;
+        }
+        else
+                for(i=0;i<amount;i++)
+                {
+                        carry=0;
+                        ShiftMantRight1(&carry,mantissa);
+                        if(carry)
+                                mantissa[INTERNAL_FPF_PRECISION-1] |= 1;
+                }
+}
+return;
+}
+
+
+/**************************************************
+**         POST ARITHMETIC PROCESSING            **
+**  (NORMALIZE, ROUND, OVERFLOW, AND UNDERFLOW)  **
+**************************************************/
+
+/**************
+** normalize **
+***************
+** Normalize an internal-representation number.  Normalization
+** discards empty most-significant bits.
+*/
+static void normalize(InternalFPF *ptr)
+{
+u16     carry;
+
+/*
+** As long as there's a highmost 0 bit, shift the significand
+** left 1 bit.  Each time you do this, though, you've
+** gotta decrement the exponent.
+*/
+while ((ptr->mantissa[0] & 0x8000) == 0)
+{
+        carry = 0;
+        ShiftMantLeft1(&carry, ptr->mantissa);
+        ptr->exp--;
+}
+return;
+}
+
+/****************
+** denormalize **
+*****************
+** Denormalize an internal-representation number.  This means
+** shifting it right until its exponent is equivalent to
+** minimum_exponent. (You have to do this often in order
+** to perform additions and subtractions).
+*/
+static void denormalize(InternalFPF *ptr,
+                int minimum_exponent)
+{
+long exponent_difference;
+
+if (IsMantissaZero(ptr->mantissa))
+{
+        vexxx_printf("Error:  zero significand in denormalize\n");
+}
+
+exponent_difference = ptr->exp-minimum_exponent;
+if (exponent_difference < 0)
+{
+        /*
+        ** The number is subnormal
+        */
+        exponent_difference = -exponent_difference;
+        if (exponent_difference >= (INTERNAL_FPF_PRECISION * 16))
+        {
+                /* Underflow */
+                SetInternalFPFZero(ptr, ptr->sign);
+        }
+        else
+        {
+                ptr->exp+=exponent_difference;
+                StickyShiftRightMant(ptr, exponent_difference);
+        }
+}
+return;
+}
+
+
+/*********************
+** RoundInternalFPF **
+**********************
+** Round an internal-representation number.
+** The kind of rounding we do here is simplest...referred to as
+** "chop".  "Extraneous" rightmost bits are simply hacked off.
+*/
+void RoundInternalFPF(InternalFPF *ptr)
+{
+/* int i; */
+
+if (ptr->type == IFPF_IS_NORMAL ||
+        ptr->type == IFPF_IS_SUBNORMAL)
+{
+        denormalize(ptr, MIN_EXP);
+        if (ptr->type != IFPF_IS_ZERO)
+        {
+
+                /* clear the extraneous bits */
+                ptr->mantissa[3] &= 0xfff8;
+/*              for (i=4; i<INTERNAL_FPF_PRECISION; i++)
+                {
+                        ptr->mantissa[i] = 0;
+                }
+*/
+                /*
+                ** Check for overflow
+                */
+/*              Does not do anything as ptr->exp is a short and MAX_EXP=37268
+		if (ptr->exp > MAX_EXP)
+                {
+                        SetInternalFPFInfinity(ptr, ptr->sign);
+                }
+*/
+        }
+}
+return;
+}
+
+/*******************************************************
+**  ARITHMETIC OPERATIONS ON INTERNAL REPRESENTATION  **
+*******************************************************/
+
+/***************
+** choose_nan **
+****************
+** Called by routines that are forced to perform math on
+** a pair of NaN's.  This routine "selects" which NaN is
+** to be returned.
+*/
+static void choose_nan(InternalFPF *x,
+                InternalFPF *y,
+                InternalFPF *z,
+                int intel_flag)
+{
+int i;
+
+/*
+** Compare the two mantissas,
+** return the larger.  Note that we will be emulating
+** an 80387 in this operation.
+*/
+for (i=0; i<INTERNAL_FPF_PRECISION; i++)
+{
+        if (x->mantissa[i] > y->mantissa[i])
+        {
+                my_memmove((void *)x,(void *)z,sizeof(InternalFPF));
+                return;
+        }
+        if (x->mantissa[i] < y->mantissa[i])
+        {
+                my_memmove((void *)y,(void *)z,sizeof(InternalFPF));
+                return;
+        }
+}
+
+/*
+** They are equal
+*/
+if (!intel_flag)
+        /* if the operation is addition */
+        my_memmove((void *)x,(void *)z,sizeof(InternalFPF));
+else
+        /* if the operation is multiplication */
+        my_memmove((void *)y,(void *)z,sizeof(InternalFPF));
+return;
+}
+
+
+/**********************
+** AddSubInternalFPF **
+***********************
+** Adding or subtracting internal-representation numbers.
+** Internal-representation numbers pointed to by x and y are
+** added/subtracted and the result returned in z.
+*/
+static void AddSubInternalFPF(uchar operation,
+                InternalFPF *x,
+                InternalFPF *y,
+                InternalFPF *z)
+{
+int exponent_difference;
+u16 borrow;
+u16 carry;
+int i;
+InternalFPF locx,locy;  /* Needed since we alter them */
+
+/*
+** Following big switch statement handles the
+** various combinations of operand types.
+*/
+switch ((x->type * IFPF_TYPE_COUNT) + y->type)
+{
+case ZERO_ZERO:
+        my_memmove((void *)x,(void *)z,sizeof(InternalFPF));
+        if (x->sign ^ y->sign ^ operation)
+        {
+                z->sign = 0; /* positive */
+        }
+        break;
+
+case NAN_ZERO:
+case NAN_SUBNORMAL:
+case NAN_NORMAL:
+case NAN_INFINITY:
+case SUBNORMAL_ZERO:
+case NORMAL_ZERO:
+case INFINITY_ZERO:
+case INFINITY_SUBNORMAL:
+case INFINITY_NORMAL:
+        my_memmove((void *)x,(void *)z,sizeof(InternalFPF));
+        break;
+
+
+case ZERO_NAN:
+case SUBNORMAL_NAN:
+case NORMAL_NAN:
+case INFINITY_NAN:
+        my_memmove((void *)y,(void *)z,sizeof(InternalFPF));
+        break;
+
+case ZERO_SUBNORMAL:
+case ZERO_NORMAL:
+case ZERO_INFINITY:
+case SUBNORMAL_INFINITY:
+case NORMAL_INFINITY:
+        my_memmove((void *)y,(void *)z,sizeof(InternalFPF));
+        z->sign ^= operation;
+        break;
+
+case SUBNORMAL_SUBNORMAL:
+case SUBNORMAL_NORMAL:
+case NORMAL_SUBNORMAL:
+case NORMAL_NORMAL:
+        /*
+        ** Copy x and y to locals, since we may have
+        ** to alter them.
+        */
+        my_memmove((void *)&locx,(void *)x,sizeof(InternalFPF));
+        my_memmove((void *)&locy,(void *)y,sizeof(InternalFPF));
+
+        /* compute sum/difference */
+        exponent_difference = locx.exp-locy.exp;
+        if (exponent_difference == 0)
+        {
+                /*
+                ** locx.exp == locy.exp
+                ** so, no shifting required
+                */
+                if (locx.type == IFPF_IS_SUBNORMAL ||
+                  locy.type == IFPF_IS_SUBNORMAL)
+                        z->type = IFPF_IS_SUBNORMAL;
+                else
+                        z->type = IFPF_IS_NORMAL;
+
+                /*
+                ** Assume that locx.mantissa > locy.mantissa
+                */
+                z->sign = locx.sign;
+                z->exp= locx.exp;
+        }
+        else
+                if (exponent_difference > 0)
+                {
+                        /*
+                        ** locx.exp > locy.exp
+                        */
+                        StickyShiftRightMant(&locy,
+                                 exponent_difference);
+                        z->type = locx.type;
+                        z->sign = locx.sign;
+                        z->exp = locx.exp;
+                }
+                else    /* if (exponent_difference < 0) */
+                {
+                        /*
+                        ** locx.exp < locy.exp
+                        */
+                        StickyShiftRightMant(&locx,
+                                -exponent_difference);
+                        z->type = locy.type;
+                        z->sign = locy.sign ^ operation;
+                        z->exp = locy.exp;
+                }
+
+                if (locx.sign ^ locy.sign ^ operation)
+                {
+                        /*
+                        ** Signs are different, subtract mantissas
+                        */
+                        borrow = 0;
+                        for (i=(INTERNAL_FPF_PRECISION-1); i>=0; i--)
+                                Sub16Bits(&borrow,
+                                        &z->mantissa[i],
+                                        locx.mantissa[i],
+                                        locy.mantissa[i]);
+
+                        if (borrow)
+                        {
+                                /* The y->mantissa was larger than the
+                                ** x->mantissa leaving a negative
+                                ** result.  Change the result back to
+                                ** an unsigned number and flip the
+                                ** sign flag.
+                                */
+                                z->sign = locy.sign ^ operation;
+                                borrow = 0;
+                                for (i=(INTERNAL_FPF_PRECISION-1); i>=0; i--)
+                                {
+                                        Sub16Bits(&borrow,
+                                                &z->mantissa[i],
+                                                0,
+                                                z->mantissa[i]);
+                                }
+                        }
+                        else
+                        {
+                                /* The assumption made above
+                                ** (i.e. x->mantissa >= y->mantissa)
+                                ** was correct.  Therefore, do nothing.
+                                ** z->sign = x->sign;
+                                */
+                        }
+
+                        if (IsMantissaZero(z->mantissa))
+                        {
+                                z->type = IFPF_IS_ZERO;
+                                z->sign = 0; /* positive */
+                        }
+                        else
+                                if (locx.type == IFPF_IS_NORMAL ||
+                                         locy.type == IFPF_IS_NORMAL)
+                                {
+                                        normalize(z);
+                                }
+                }
+                else
+                {
+                        /* signs are the same, add mantissas */
+                        carry = 0;
+                        for (i=(INTERNAL_FPF_PRECISION-1); i>=0; i--)
+                        {
+                                Add16Bits(&carry,
+                                        &z->mantissa[i],
+                                        locx.mantissa[i],
+                                        locy.mantissa[i]);
+                        }
+
+                        if (carry)
+                        {
+                                z->exp++;
+                                carry=0;
+                                ShiftMantRight1(&carry,z->mantissa);
+                                z->mantissa[0] |= 0x8000;
+                                z->type = IFPF_IS_NORMAL;
+                        }
+                        else
+                                if (z->mantissa[0] & 0x8000)
+                                        z->type = IFPF_IS_NORMAL;
+        }
+        break;
+
+case INFINITY_INFINITY:
+        SetInternalFPFNaN(z);
+        break;
+
+case NAN_NAN:
+        choose_nan(x, y, z, 1);
+        break;
+}
+
+/*
+** All the math is done; time to round.
+*/
+RoundInternalFPF(z);
+return;
+}
+
+
+/************************
+** MultiplyInternalFPF **
+*************************
+** Two internal-representation numbers x and y are multiplied; the
+** result is returned in z.
+*/
+static void MultiplyInternalFPF(InternalFPF *x,
+                        InternalFPF *y,
+                        InternalFPF *z)
+{
+int i;
+int j;
+u16 carry;
+u16 extra_bits[INTERNAL_FPF_PRECISION];
+InternalFPF locy;       /* Needed since this will be altered */
+/*
+** As in the preceding function, this large switch
+** statement selects among the many combinations
+** of operands.
+*/
+switch ((x->type * IFPF_TYPE_COUNT) + y->type)
+{
+case INFINITY_SUBNORMAL:
+case INFINITY_NORMAL:
+case INFINITY_INFINITY:
+case ZERO_ZERO:
+case ZERO_SUBNORMAL:
+case ZERO_NORMAL:
+        my_memmove((void *)x,(void *)z,sizeof(InternalFPF));
+        z->sign ^= y->sign;
+        break;
+
+case SUBNORMAL_INFINITY:
+case NORMAL_INFINITY:
+case SUBNORMAL_ZERO:
+case NORMAL_ZERO:
+        my_memmove((void *)y,(void *)z,sizeof(InternalFPF));
+        z->sign ^= x->sign;
+        break;
+
+case ZERO_INFINITY:
+case INFINITY_ZERO:
+        SetInternalFPFNaN(z);
+        break;
+
+case NAN_ZERO:
+case NAN_SUBNORMAL:
+case NAN_NORMAL:
+case NAN_INFINITY:
+        my_memmove((void *)x,(void *)z,sizeof(InternalFPF));
+        break;
+
+case ZERO_NAN:
+case SUBNORMAL_NAN:
+case NORMAL_NAN:
+case INFINITY_NAN:
+        my_memmove((void *)y,(void *)z,sizeof(InternalFPF));
+        break;
+
+
+case SUBNORMAL_SUBNORMAL:
+case SUBNORMAL_NORMAL:
+case NORMAL_SUBNORMAL:
+case NORMAL_NORMAL:
+        /*
+        ** Make a local copy of the y number, since we will be
+        ** altering it in the process of multiplying.
+        */
+        my_memmove((void *)&locy,(void *)y,sizeof(InternalFPF));
+
+        /*
+        ** Check for unnormal zero arguments
+        */
+        if (IsMantissaZero(x->mantissa) || IsMantissaZero(y->mantissa))
+                SetInternalFPFInfinity(z, 0);
+
+        /*
+        ** Initialize the result
+        */
+        if (x->type == IFPF_IS_SUBNORMAL ||
+            y->type == IFPF_IS_SUBNORMAL)
+                z->type = IFPF_IS_SUBNORMAL;
+        else
+                z->type = IFPF_IS_NORMAL;
+
+        z->sign = x->sign ^ y->sign;
+        z->exp = x->exp + y->exp ;
+        for (i=0; i<INTERNAL_FPF_PRECISION; i++)
+        {
+                z->mantissa[i] = 0;
+                extra_bits[i] = 0;
+        }
+
+        for (i=0; i<(INTERNAL_FPF_PRECISION*16); i++)
+        {
+                /*
+                ** Get rightmost bit of the multiplier
+                */
+                carry = 0;
+                ShiftMantRight1(&carry, locy.mantissa);
+                if (carry)
+                {
+                        /*
+                        ** Add the multiplicand to the product
+                        */
+                        carry = 0;
+                        for (j=(INTERNAL_FPF_PRECISION-1); j>=0; j--)
+                                Add16Bits(&carry,
+                                        &z->mantissa[j],
+                                        z->mantissa[j],
+                                        x->mantissa[j]);
+                }
+                else
+                {
+                        carry = 0;
+                }
+
+                /*
+                ** Shift the product right.  Overflow bits get
+                ** shifted into extra_bits.  We'll use it later
+                ** to help with the "sticky" bit.
+                */
+                ShiftMantRight1(&carry, z->mantissa);
+                ShiftMantRight1(&carry, extra_bits);
+        }
+
+        /*
+        ** Normalize
+        ** Note that we use a "special" normalization routine
+        ** because we need to use the extra bits. (These are
+        ** bits that may have been shifted off the bottom that
+        ** we want to reclaim...if we can.
+        */
+        while ((z->mantissa[0] & 0x8000) == 0)
+        {
+                carry = 0;
+                ShiftMantLeft1(&carry, extra_bits);
+                ShiftMantLeft1(&carry, z->mantissa);
+                z->exp--;
+        }
+
+        /*
+        ** Set the sticky bit if any bits set in extra bits.
+        */
+        if (IsMantissaZero(extra_bits))
+        {
+                z->mantissa[INTERNAL_FPF_PRECISION-1] |= 1;
+        }
+        break;
+
+case NAN_NAN:
+        choose_nan(x, y, z, 0);
+        break;
+}
+
+/*
+** All math done...do rounding.
+*/
+RoundInternalFPF(z);
+return;
+}
+
+
+/**********************
+** DivideInternalFPF **
+***********************
+** Divide internal FPF number x by y.  Return result in z.
+*/
+static void DivideInternalFPF(InternalFPF *x,
+                        InternalFPF *y,
+                        InternalFPF *z)
+{
+int i;
+int j;
+u16 carry;
+u16 extra_bits[INTERNAL_FPF_PRECISION];
+InternalFPF locx;       /* Local for x number */
+
+/*
+** As with preceding function, the following switch
+** statement selects among the various possible
+** operands.
+*/
+switch ((x->type * IFPF_TYPE_COUNT) + y->type)
+{
+case ZERO_ZERO:
+case INFINITY_INFINITY:
+        SetInternalFPFNaN(z);
+        break;
+
+case ZERO_SUBNORMAL:
+case ZERO_NORMAL:
+        if (IsMantissaZero(y->mantissa))
+        {
+                SetInternalFPFNaN(z);
+                break;
+        }
+
+case ZERO_INFINITY:
+case SUBNORMAL_INFINITY:
+case NORMAL_INFINITY:
+        SetInternalFPFZero(z, x->sign ^ y->sign);
+        break;
+
+case SUBNORMAL_ZERO:
+case NORMAL_ZERO:
+        if (IsMantissaZero(x->mantissa))
+        {
+                SetInternalFPFNaN(z);
+                break;
+        }
+
+case INFINITY_ZERO:
+case INFINITY_SUBNORMAL:
+case INFINITY_NORMAL:
+        SetInternalFPFInfinity(z, 0);
+        z->sign = x->sign ^ y->sign;
+        break;
+
+case NAN_ZERO:
+case NAN_SUBNORMAL:
+case NAN_NORMAL:
+case NAN_INFINITY:
+        my_memmove((void *)x,(void *)z,sizeof(InternalFPF));
+        break;
+
+case ZERO_NAN:
+case SUBNORMAL_NAN:
+case NORMAL_NAN:
+case INFINITY_NAN:
+        my_memmove((void *)y,(void *)z,sizeof(InternalFPF));
+        break;
+
+case SUBNORMAL_SUBNORMAL:
+case NORMAL_SUBNORMAL:
+case SUBNORMAL_NORMAL:
+case NORMAL_NORMAL:
+        /*
+        ** Make local copy of x number, since we'll be
+        ** altering it in the process of dividing.
+        */
+        my_memmove((void *)&locx,(void *)x,sizeof(InternalFPF));
+
+        /*
+        ** Check for unnormal zero arguments
+        */
+        if (IsMantissaZero(locx.mantissa))
+        {
+                if (IsMantissaZero(y->mantissa))
+                        SetInternalFPFNaN(z);
+                else
+                        SetInternalFPFZero(z, 0);
+                break;
+        }
+        if (IsMantissaZero(y->mantissa))
+        {
+                SetInternalFPFInfinity(z, 0);
+                break;
+        }
+
+        /*
+        ** Initialize the result
+        */
+        z->type = x->type;
+        z->sign = x->sign ^ y->sign;
+        z->exp = x->exp - y->exp +
+                        ((INTERNAL_FPF_PRECISION * 16 * 2));
+        for (i=0; i<INTERNAL_FPF_PRECISION; i++)
+        {
+                z->mantissa[i] = 0;
+                extra_bits[i] = 0;
+        }
+
+        while ((z->mantissa[0] & 0x8000) == 0)
+        {
+                carry = 0;
+                ShiftMantLeft1(&carry, locx.mantissa);
+                ShiftMantLeft1(&carry, extra_bits);
+
+                /*
+                ** Time to subtract yet?
+                */
+                if (carry == 0)
+                        for (j=0; j<INTERNAL_FPF_PRECISION; j++)
+                        {
+                                if (y->mantissa[j] > extra_bits[j])
+                                {
+                                        carry = 0;
+                                        goto no_subtract;
+                                }
+                                if (y->mantissa[j] < extra_bits[j])
+                                        break;
+                        }
+                /*
+                ** Divisor (y) <= dividend (x), subtract
+                */
+                carry = 0;
+                for (j=(INTERNAL_FPF_PRECISION-1); j>=0; j--)
+                        Sub16Bits(&carry,
+                                &extra_bits[j],
+                                extra_bits[j],
+                                y->mantissa[j]);
+                carry = 1;      /* 1 shifted into quotient */
+        no_subtract:
+                ShiftMantLeft1(&carry, z->mantissa);
+                z->exp--;
+        }
+        break;
+
+case NAN_NAN:
+        choose_nan(x, y, z, 0);
+        break;
+}
+
+/*
+** Math complete...do rounding
+*/
+RoundInternalFPF(z);
+}
+
+/**********************
+** LongToInternalFPF **
+** Int32ToInternalFPF **
+***********************
+** Convert a signed (long) 32-bit integer into an internal FPF number.
+*/
+/* static void LongToInternalFPF(long mylong, */
+static void Int32ToInternalFPF(int32 mylong,
+                InternalFPF *dest)
+{
+int i;          /* Index */
+u16 myword;     /* Used to hold converted stuff */
+/*
+** Save the sign and get the absolute value.  This will help us
+** with 64-bit machines, since we use only the lower 32
+** bits just in case. (No longer necessary after we use int32.)
+*/
+/* if(mylong<0L) */
+if(mylong<(int32)0)
+{       dest->sign=1;
+        mylong=(int32)0-mylong;
+}
+else
+        dest->sign=0;
+/*
+** Prepare the destination floating point number
+*/
+dest->type=IFPF_IS_NORMAL;
+for(i=0;i<INTERNAL_FPF_PRECISION;i++)
+        dest->mantissa[i]=0;
+
+/*
+** See if we've got a zero.  If so, make the resultant FP
+** number a true zero and go home.
+*/
+if(mylong==0)
+{       dest->type=IFPF_IS_ZERO;
+        dest->exp=0;
+        return;
+}
+
+/*
+** Not a true zero.  Set the exponent to 32 (internal FPFs have
+** no bias) and load the low and high words into their proper
+** locations in the mantissa.  Then normalize.  The action of
+** normalizing slides the mantissa bits into place and sets
+** up the exponent properly.
+*/
+dest->exp=32;
+myword=(u16)((mylong >> 16) & 0xFFFFL);
+dest->mantissa[0]=myword;
+myword=(u16)(mylong & 0xFFFFL);
+dest->mantissa[1]=myword;
+normalize(dest);
+return;
+}
+
+#if 1
+/************************
+** InternalFPFToString **
+*************************
+** FOR DEBUG PURPOSES
+** This routine converts an internal floating point representation
+** number to a string.  Used in debugging the package.
+** Returns length of converted number.
+** NOTE: dest must point to a buffer big enough to hold the
+**  result.  Also, this routine does append a null (an effect
+**  of using the sprintf() function).  It also returns
+**  a length count.
+** NOTE: This routine returns 5 significant digits.  Thats
+**  about all I feel safe with, given the method of
+**  conversion.  It should be more than enough for programmers
+**  to determine whether the package is properly ported.
+*/
+static int InternalFPFToString(char *dest,
+                InternalFPF *src)
+{
+InternalFPF locFPFNum;          /* Local for src (will be altered) */
+InternalFPF IFPF10;             /* Floating-point 10 */
+InternalFPF IFPFComp;           /* For doing comparisons */
+int msign;                      /* Holding for mantissa sign */
+int expcount;                   /* Exponent counter */
+int ccount;                     /* Character counter */
+int i,j,k;                      /* Index */
+u16 carryaccum;                 /* Carry accumulator */
+u16 mycarry;                    /* Local for carry */
+
+/*
+** Check first for the simple things...Nan, Infinity, Zero.
+** If found, copy the proper string in and go home.
+*/
+switch(src->type)
+{
+        case IFPF_IS_NAN:
+                my_memcpy(dest,"NaN",3);
+                return(3);
+
+        case IFPF_IS_INFINITY:
+                if(src->sign==0)
+                        my_memcpy(dest,"+Inf",4);
+                else
+                        my_memcpy(dest,"-Inf",4);
+                return(4);
+
+        case IFPF_IS_ZERO:
+                if(src->sign==0)
+                        my_memcpy(dest,"+0",2);
+                else
+                        my_memcpy(dest,"-0",2);
+                return(2);
+}
+
+/*
+** Move the internal number into our local holding area, since
+** we'll be altering it to print it out.
+*/
+my_memcpy((void *)&locFPFNum,(void *)src,sizeof(InternalFPF));
+
+/*
+** Set up a floating-point 10...which we'll use a lot in a minute.
+*/
+/* LongToInternalFPF(10L,&IFPF10); */
+Int32ToInternalFPF((int32)10,&IFPF10);
+
+/*
+** Save the mantissa sign and make it positive.
+*/
+msign=src->sign;
+
+/* src->sign=0 */ /* bug, fixed Nov. 13, 1997 */
+(&locFPFNum)->sign=0;
+
+expcount=0;             /* Init exponent counter */
+
+/*
+** See if the number is less than 10.  If so, multiply
+** the number repeatedly by 10 until it's not.   For each
+** multiplication, decrement a counter so we can keep track
+** of the exponent.
+*/
+
+while(1)
+{       AddSubInternalFPF(1,&locFPFNum,&IFPF10,&IFPFComp);
+        if(IFPFComp.sign==0) break;
+        MultiplyInternalFPF(&locFPFNum,&IFPF10,&IFPFComp);
+        expcount--;
+        my_memcpy((void *)&locFPFNum,(void *)&IFPFComp,sizeof(InternalFPF));
+}
+/*
+** Do the reverse of the above.  As long as the number is
+** greater than or equal to 10, divide it by 10.  Increment the
+** exponent counter for each multiplication.
+*/
+
+while(1)
+{
+        AddSubInternalFPF(1,&locFPFNum,&IFPF10,&IFPFComp);
+        if(IFPFComp.sign!=0) break;
+        DivideInternalFPF(&locFPFNum,&IFPF10,&IFPFComp);
+        expcount++;
+        my_memcpy((void *)&locFPFNum,(void *)&IFPFComp,sizeof(InternalFPF));
+}
+
+/*
+** About time to start storing things.  First, store the
+** mantissa sign.
+*/
+ccount=1;               /* Init character counter */
+if(msign==0)
+        *dest++='+';
+else
+        *dest++='-';
+
+/*
+** At this point we know that the number is in the range
+** 10 > n >=1.  We need to "strip digits" out of the
+** mantissa.  We do this by treating the mantissa as
+** an integer and multiplying by 10. (Not a floating-point
+** 10, but an integer 10.  Since this is debug code and we
+** could care less about speed, we'll do it the stupid
+** way and simply add the number to itself 10 times.
+** Anything that makes it to the left of the implied binary point
+** gets stripped off and emitted.  We'll do this for
+** 5 significant digits (which should be enough to
+** verify things).
+*/
+/*
+** Re-position radix point
+*/
+carryaccum=0;
+while(locFPFNum.exp>0)
+{
+        mycarry=0;
+        ShiftMantLeft1(&mycarry,locFPFNum.mantissa);
+        carryaccum=(carryaccum<<1);
+        if(mycarry) carryaccum++;
+        locFPFNum.exp--;
+}
+
+while(locFPFNum.exp<0)
+{
+        mycarry=0;
+        ShiftMantRight1(&mycarry,locFPFNum.mantissa);
+        locFPFNum.exp++;
+}
+
+for(i=0;i<6;i++)
+        if(i==1)
+        {       /* Emit decimal point */
+                *dest++='.';
+                ccount++;
+        }
+        else
+        {       /* Emit a digit */
+                *dest++=('0'+carryaccum);
+                ccount++;
+
+                carryaccum=0;
+                my_memcpy((void *)&IFPF10,
+                        (void *)&locFPFNum,
+                        sizeof(InternalFPF));
+
+                /* Do multiply via repeated adds */
+                for(j=0;j<9;j++)
+                {
+                        mycarry=0;
+                        for(k=(INTERNAL_FPF_PRECISION-1);k>=0;k--)
+                                Add16Bits(&mycarry,&(IFPFComp.mantissa[k]),
+                                        locFPFNum.mantissa[k],
+                                        IFPF10.mantissa[k]);
+                        carryaccum+=mycarry ? 1 : 0;
+                        my_memcpy((void *)&locFPFNum,
+                                (void *)&IFPFComp,
+                                sizeof(InternalFPF));
+                }
+        }
+
+/*
+** Now move the 'E', the exponent sign, and the exponent
+** into the string.
+*/
+*dest++='E';
+
+/* sprint is supposed to return an integer, but it caused problems on SunOS
+ * with the native cc. Hence we force it.
+ * Uwe F. Mayer
+ */
+if (expcount < 0) {
+     *dest++ = '-';
+     expcount =- expcount;
+}
+else *dest++ = ' ';
+
+*dest++ = (char)(expcount + '0');
+*dest++ = 0;
+
+ccount += 3;
+/*
+** All done, go home.
+*/
+return(ccount);
+
+}
+
+#endif
+
+
+
+////////////////////////////////////////////////////////////////////////
+static 
+void* AllocateMemory ( unsigned long n, int* p )
+{
+  *p = 0;
+  void* r = (void*) (*serviceFn)(2,n);
+  return r;
+}
+static 
+void FreeMemory ( void* p, int* zz )
+{
+  *zz = 0;
+  // free(p);
+}
+
+
+
+/**************
+** DoEmFloat **
+***************
+** Perform the floating-point emulation routines portion of the
+** CPU benchmark.  Returns the operations per second.
+*/
+static 
+void DoEmFloat(void)
+{
+EmFloatStruct *locemfloatstruct;        /* Local structure */
+InternalFPF *abase;             /* Base of A array */
+InternalFPF *bbase;             /* Base of B array */
+InternalFPF *cbase;             /* Base of C array */
+ulong tickcount;                /* # of ticks */
+char *errorcontext;             /* Error context string pointer */
+int systemerror;                /* For holding error code */
+ulong loops;                    /* # of loops */
+
+/*
+** Link to global structure
+*/
+EmFloatStruct global_emfloatstruct;
+ global_emfloatstruct.adjust = 0;
+ global_emfloatstruct.request_secs = 0;
+ global_emfloatstruct.arraysize = 100;
+ global_emfloatstruct.loops = 1;
+ global_emfloatstruct.emflops = 0.0;
+locemfloatstruct=&global_emfloatstruct;
+
+/*
+** Set the error context
+*/
+errorcontext="CPU:Floating Emulation";
+
+
+abase=(InternalFPF *)AllocateMemory(locemfloatstruct->arraysize*sizeof(InternalFPF),
+		&systemerror);
+
+bbase=(InternalFPF *)AllocateMemory(locemfloatstruct->arraysize*sizeof(InternalFPF),
+		&systemerror);
+
+cbase=(InternalFPF *)AllocateMemory(locemfloatstruct->arraysize*sizeof(InternalFPF),
+		&systemerror);
+
+/*
+** Set up the arrays
+*/
+SetupCPUEmFloatArrays(abase,bbase,cbase,locemfloatstruct->arraysize);
+
+ loops=100;
+	       tickcount=DoEmFloatIteration(abase,bbase,cbase,
+			locemfloatstruct->arraysize,
+			loops);
+
+FreeMemory((void *)abase,&systemerror);
+FreeMemory((void *)bbase,&systemerror);
+FreeMemory((void *)cbase,&systemerror);
+
+return;
+}
+
+//////////////////
+void entry ( HWord(*f)(HWord,HWord) )
+{
+  serviceFn = f;
+  vexxx_printf("starting\n");
+  DoEmFloat();
+  (*serviceFn)(0,0);
+}
diff --git a/VEX/switchback/test_hello.c b/VEX/switchback/test_hello.c
new file mode 100644
index 0000000..d4318d0
--- /dev/null
+++ b/VEX/switchback/test_hello.c
@@ -0,0 +1,20 @@
+
+
+static void bar ( void*(*service)(int,int) )
+{
+  int i;
+  for (i = 0; i < 100000; i++) ;
+  service(1, 'h');
+  service(1, 'e');
+  service(1, 'l');
+  service(1, 'l');
+  service(1, 'o');
+  service(1, '\n');
+}
+
+void entry ( void*(*service)(int,int) )
+{
+  bar(service);
+  service(0,0);
+}
+
diff --git a/VEX/switchback/test_ppc_jm1.c b/VEX/switchback/test_ppc_jm1.c
new file mode 100644
index 0000000..292f433
--- /dev/null
+++ b/VEX/switchback/test_ppc_jm1.c
@@ -0,0 +1,4611 @@
+
+/* HOW TO COMPILE FOR SWITCHBACK:
+
+   gcc -O -c test_ppc_jm1.c -mregnames -Wall
+
+*/
+
+#undef  HAS_ALTIVEC
+#define NO_FLOAT
+#undef  IS_PPC405
+
+
+/*
+ * test-ppc.c:
+ * PPC tests for qemu-PPC CPU emulation checks
+ * 
+ * Copyright (c) 2005 Jocelyn Mayer
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License V2
+ * as published by the Free Software Foundation.
+ */
+
+/*
+ * Theory of operations:
+ * a few registers are reserved for the test program:
+ * r14 => r18
+ * f14 => f18
+ * I do preload test values in r14 thru r17 (or less, depending on the number
+ * of register operands needed), patch the test opcode if any immediate
+ * operands are required, execute the tested opcode.
+ * XER, CCR and FPSCR are cleared before every test.
+ * I always get the result in r17 and also save XER and CCR for fixed-point
+ * operations. I also check FPSCR for floating points operations.
+ *
+ * Improvments:
+ * a more cleaver FPSCR management is needed: for now, I always test
+ * the round-to-zero case. Other rounding modes also need to be tested.
+ */
+
+#include <stdint.h>
+//#include <stdlib.h>
+//#include <stdio.h>
+//#include <string.h>
+//#include <unistd.h>
+//#include <fcntl.h>
+//#include <ctype.h>
+//#include <math.h>
+//#include <fenv.h>
+
+#define NULL ((void*)0)
+
+//#include "test-ppc.h"
+
+// BEGIN #include "test-ppc.h"
+/*
+ * test-ppc.h:
+ * PPC tests for qemu-PPC CPU emulation checks - definitions
+ * 
+ * Copyright (c) 2005 Jocelyn Mayer
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License V2
+ * as published by the Free Software Foundation.
+ */
+
+#if !defined (__TEST_PPC_H__)
+#define __TEST_PPC_H__
+
+typedef void (*test_func_t) (void);
+typedef struct test_t test_t;
+typedef struct test_table_t test_table_t;
+struct test_t {
+    test_func_t func;
+    const unsigned char *name;
+};
+
+struct test_table_t {
+    test_t *tests;
+    const unsigned char *name;
+    int flags;
+};
+
+typedef void (*test_loop_t) (const unsigned char *name, test_func_t func);
+
+enum test_flags {
+    /* Nb arguments */
+    PPC_ONE_ARG    = 0x00000001,
+    PPC_TWO_ARGS   = 0x00000002,
+    PPC_THREE_ARGS = 0x00000003,
+    PPC_CMP_ARGS   = 0x00000004,
+    PPC_CMPI_ARGS  = 0x00000005,
+    PPC_TWO_I16    = 0x00000006,
+    PPC_SPECIAL    = 0x00000007,
+    PPC_NB_ARGS    = 0x0000000F,
+    /* Type */
+    PPC_ARITH      = 0x00000100,
+    PPC_LOGICAL    = 0x00000200,
+    PPC_COMPARE    = 0x00000300,
+    PPC_CROP       = 0x00000400,
+    PPC_TYPE       = 0x00000F00,
+    /* Family */
+    PPC_INTEGER    = 0x00010000,
+    PPC_FLOAT      = 0x00020000,
+    PPC_405        = 0x00030000,
+    PPC_ALTIVEC    = 0x00040000,
+    PPC_FALTIVEC   = 0x00050000,
+    PPC_FAMILY     = 0x000F0000,
+    /* Flags */
+    PPC_CR         = 0x01000000,
+};
+
+#endif /* !defined (__TEST_PPC_H__) */
+
+// END #include "test-ppc.h"
+
+
+
+
+//#define DEBUG_ARGS_BUILD
+#if defined (DEBUG_ARGS_BUILD)
+#define AB_DPRINTF(fmt, args...) do { vexxx_printf(fmt , ##args); } while (0)
+#else
+#define AB_DPRINTF(fmt, args...) do { } while (0)
+#endif
+
+//#define DEBUG_FILTER
+#if defined (DEBUG_FILTER)
+#define FDPRINTF(fmt, args...) do { vexxx_printf(fmt , ##args); } while (0)
+#else
+#define FDPRINTF(fmt, args...) do { } while (0)
+#endif
+
+#if !defined (NO_FLOAT)
+register double f14 __asm__ ("f14");
+register double f15 __asm__ ("f15");
+register double f16 __asm__ ("f16");
+register double f17 __asm__ ("f17");
+register double f18 __asm__ ("f18");
+#endif
+register uint32_t r14 __asm__ ("r14");
+register uint32_t r15 __asm__ ("r15");
+register uint32_t r16 __asm__ ("r16");
+register uint32_t r17 __asm__ ("r17");
+register uint32_t r18 __asm__ ("r18");
+
+
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+
+/* Something which has the same size as void* on the host.  That is,
+   it is 32 bits on a 32-bit host and 64 bits on a 64-bit host, and so
+   it can safely be coerced to and from a pointer type on the host
+   machine. */
+typedef  unsigned long HWord;
+typedef  char          HChar;
+typedef  signed int    Int;
+typedef  unsigned int  UInt;
+typedef  unsigned char UChar;
+
+typedef    signed long long int   Long;
+typedef  unsigned long long int   ULong;
+
+typedef unsigned char   Bool;
+#define True  ((Bool)1)
+#define False ((Bool)0)
+
+
+//#include "/home/sewardj/VEX/trunk/pub/libvex_basictypes.h"
+
+static HWord (*serviceFn)(HWord,HWord) = 0;
+
+static Bool my_isspace ( UChar c )
+{
+   return c == ' ' 
+          || c == '\f'
+          || c == '\n'
+          || c == '\r'
+          || c == '\t'
+          || c == '\v';
+}
+
+#if 0 // unused
+static char* my_strcpy ( char* dest, const char* src )
+{
+   char* dest_orig = dest;
+   while (*src) *dest++ = *src++;
+   *dest = 0;
+   return dest_orig;
+}
+
+static void* my_memcpy ( void *dest, const void *src, int sz )
+{
+   const char *s = (const char *)src;
+   char *d = (char *)dest;
+
+   while (sz--)
+      *d++ = *s++;
+
+   return dest;
+}
+
+static void* my_memmove( void *dst, const void *src, unsigned int len )
+{
+    register char *d;
+    register char *s;
+    if ( dst > src ) {
+        d = (char *)dst + len - 1;
+        s = (char *)src + len - 1;
+        while ( len >= 4 ) {
+            *d-- = *s--;
+            *d-- = *s--;
+            *d-- = *s--;
+            *d-- = *s--;
+            len -= 4;
+        }
+        while ( len-- ) {
+            *d-- = *s--;
+        }
+    } else if ( dst < src ) {
+        d = (char *)dst;
+        s = (char *)src;
+        while ( len >= 4 ) {
+            *d++ = *s++;
+            *d++ = *s++;
+            *d++ = *s++;
+            *d++ = *s++;
+            len -= 4;
+        }
+        while ( len-- ) {
+            *d++ = *s++;
+        }
+    }
+    return dst;
+}
+#endif
+
+char* my_strcat ( char* dest, const char* src )
+{
+   char* dest_orig = dest;
+   while (*dest) dest++;
+   while (*src) *dest++ = *src++;
+   *dest = 0;
+   return dest_orig;
+}
+
+int my_strcmp ( const char* s1, const char* s2 )
+{
+   register unsigned char c1;
+   register unsigned char c2;
+   while (True) {
+      c1 = *(unsigned char *)s1;
+      c2 = *(unsigned char *)s2;
+      if (c1 != c2) break;
+      if (c1 == 0) break;
+      s1++; s2++;
+   }
+   if ((unsigned char)c1 < (unsigned char)c2) return -1;
+   if ((unsigned char)c1 > (unsigned char)c2) return 1;
+   return 0;
+}
+
+
+int my_memcmp ( const void *s1V, const void *s2V, int n )
+{
+   int res;
+   unsigned char a0;
+   unsigned char b0;
+   unsigned char* s1 = (unsigned char*)s1V;
+   unsigned char* s2 = (unsigned char*)s2V;
+
+   while (n != 0) {
+      a0 = s1[0];
+      b0 = s2[0];
+      s1 += 1;
+      s2 += 1;
+      res = ((int)a0) - ((int)b0);
+      if (res != 0)
+         return res;
+      n -= 1;
+   }
+   return 0;
+}
+
+char* my_strchr ( const char* s, int c )
+{
+   UChar  ch = (UChar)((UInt)c);
+   UChar* p  = (UChar*)s;
+   while (True) {
+      if (*p == ch) return p;
+      if (*p == 0) return NULL;
+      p++;
+   }
+}
+
+void* my_malloc ( int n )
+{
+  void* r = (void*) (*serviceFn)(2,n);
+  return r;
+}
+
+
+/////////////////////////////////////////////////////////////////////
+
+static void vexxx_log_bytes ( char* p, int n )
+{
+   int i;
+   for (i = 0; i < n; i++)
+      (*serviceFn)( 1, (int)p[i] );
+}
+
+/*---------------------------------------------------------*/
+/*--- vexxx_printf                                        ---*/
+/*---------------------------------------------------------*/
+
+/* This should be the only <...> include in the entire VEX library.
+   New code for vex_util.c should go above this point. */
+#include <stdarg.h>
+
+static HChar vexxx_toupper ( HChar c )
+{
+   if (c >= 'a' && c <= 'z')
+      return c + ('A' - 'a');
+   else
+      return c;
+}
+
+static Int vexxx_strlen ( const HChar* str )
+{
+   Int i = 0;
+   while (str[i] != 0) i++;
+   return i;
+}
+
+Bool vexxx_streq ( const HChar* s1, const HChar* s2 )
+{
+   while (True) {
+      if (*s1 == 0 && *s2 == 0)
+         return True;
+      if (*s1 != *s2)
+         return False;
+      s1++;
+      s2++;
+   }
+}
+
+/* Some flags.  */
+#define VG_MSG_SIGNED    1 /* The value is signed. */
+#define VG_MSG_ZJUSTIFY  2 /* Must justify with '0'. */
+#define VG_MSG_LJUSTIFY  4 /* Must justify on the left. */
+#define VG_MSG_PAREN     8 /* Parenthesize if present (for %y) */
+#define VG_MSG_COMMA    16 /* Add commas to numbers (for %d, %u) */
+
+/* Copy a string into the buffer. */
+static UInt
+myvprintf_str ( void(*send)(HChar), Int flags, Int width, HChar* str, 
+                Bool capitalise )
+{
+#  define MAYBE_TOUPPER(ch) (capitalise ? vexxx_toupper(ch) : (ch))
+   UInt ret = 0;
+   Int i, extra;
+   Int len = vexxx_strlen(str);
+
+   if (width == 0) {
+      ret += len;
+      for (i = 0; i < len; i++)
+         send(MAYBE_TOUPPER(str[i]));
+      return ret;
+   }
+
+   if (len > width) {
+      ret += width;
+      for (i = 0; i < width; i++)
+         send(MAYBE_TOUPPER(str[i]));
+      return ret;
+   }
+
+   extra = width - len;
+   if (flags & VG_MSG_LJUSTIFY) {
+      ret += extra;
+      for (i = 0; i < extra; i++)
+         send(' ');
+   }
+   ret += len;
+   for (i = 0; i < len; i++)
+      send(MAYBE_TOUPPER(str[i]));
+   if (!(flags & VG_MSG_LJUSTIFY)) {
+      ret += extra;
+      for (i = 0; i < extra; i++)
+         send(' ');
+   }
+
+#  undef MAYBE_TOUPPER
+
+   return ret;
+}
+
+/* Write P into the buffer according to these args:
+ *  If SIGN is true, p is a signed.
+ *  BASE is the base.
+ *  If WITH_ZERO is true, '0' must be added.
+ *  WIDTH is the width of the field.
+ */
+static UInt
+myvprintf_int64 ( void(*send)(HChar), Int flags, Int base, Int width, ULong pL)
+{
+   HChar buf[40];
+   Int   ind = 0;
+   Int   i, nc = 0;
+   Bool  neg = False;
+   HChar *digits = "0123456789ABCDEF";
+   UInt  ret = 0;
+   UInt  p = (UInt)pL;
+
+   if (base < 2 || base > 16)
+      return ret;
+ 
+   if ((flags & VG_MSG_SIGNED) && (Int)p < 0) {
+      p   = - (Int)p;
+      neg = True;
+   }
+
+   if (p == 0)
+      buf[ind++] = '0';
+   else {
+      while (p > 0) {
+         if ((flags & VG_MSG_COMMA) && 10 == base &&
+             0 == (ind-nc) % 3 && 0 != ind) 
+         {
+            buf[ind++] = ',';
+            nc++;
+         }
+         buf[ind++] = digits[p % base];
+         p /= base;
+      }
+   }
+
+   if (neg)
+      buf[ind++] = '-';
+
+   if (width > 0 && !(flags & VG_MSG_LJUSTIFY)) {
+      for(; ind < width; ind++) {
+	//vassert(ind < 39);
+         buf[ind] = ((flags & VG_MSG_ZJUSTIFY) ? '0': ' ');
+      }
+   }
+
+   /* Reverse copy to buffer.  */
+   ret += ind;
+   for (i = ind -1; i >= 0; i--) {
+      send(buf[i]);
+   }
+   if (width > 0 && (flags & VG_MSG_LJUSTIFY)) {
+      for(; ind < width; ind++) {
+	 ret++;
+         send(' ');  // Never pad with zeroes on RHS -- changes the value!
+      }
+   }
+   return ret;
+}
+
+
+/* A simple vprintf().  */
+static 
+UInt vprintf_wrk ( void(*send)(HChar), const HChar *format, va_list vargs )
+{
+   UInt ret = 0;
+   int i;
+   int flags;
+   int width;
+   Bool is_long;
+
+   /* We assume that vargs has already been initialised by the 
+      caller, using va_start, and that the caller will similarly
+      clean up with va_end.
+   */
+
+   for (i = 0; format[i] != 0; i++) {
+      if (format[i] != '%') {
+         send(format[i]);
+	 ret++;
+         continue;
+      }
+      i++;
+      /* A '%' has been found.  Ignore a trailing %. */
+      if (format[i] == 0)
+         break;
+      if (format[i] == '%') {
+         /* `%%' is replaced by `%'. */
+         send('%');
+	 ret++;
+         continue;
+      }
+      flags = 0;
+      is_long = False;
+      width = 0; /* length of the field. */
+      if (format[i] == '(') {
+	 flags |= VG_MSG_PAREN;
+	 i++;
+      }
+      /* If ',' follows '%', commas will be inserted. */
+      if (format[i] == ',') {
+         flags |= VG_MSG_COMMA;
+         i++;
+      }
+      /* If '-' follows '%', justify on the left. */
+      if (format[i] == '-') {
+         flags |= VG_MSG_LJUSTIFY;
+         i++;
+      }
+      /* If '0' follows '%', pads will be inserted. */
+      if (format[i] == '0') {
+         flags |= VG_MSG_ZJUSTIFY;
+         i++;
+      }
+      /* Compute the field length. */
+      while (format[i] >= '0' && format[i] <= '9') {
+         width *= 10;
+         width += format[i++] - '0';
+      }
+      while (format[i] == 'l') {
+         i++;
+         is_long = True;
+      }
+
+      switch (format[i]) {
+         case 'd': /* %d */
+            flags |= VG_MSG_SIGNED;
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, Long)));
+            else
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, Int)));
+            break;
+         case 'u': /* %u */
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, ULong)));
+            else
+               ret += myvprintf_int64(send, flags, 10, width, 
+				      (ULong)(va_arg (vargs, UInt)));
+            break;
+         case 'p': /* %p */
+	    ret += 2;
+            send('0');
+            send('x');
+            ret += myvprintf_int64(send, flags, 16, width, 
+				   (ULong)((HWord)va_arg (vargs, void *)));
+            break;
+         case 'x': /* %x */
+            if (is_long)
+               ret += myvprintf_int64(send, flags, 16, width, 
+				      (ULong)(va_arg (vargs, ULong)));
+            else
+               ret += myvprintf_int64(send, flags, 16, width, 
+				      (ULong)(va_arg (vargs, UInt)));
+            break;
+         case 'c': /* %c */
+	    ret++;
+            send((va_arg (vargs, int)));
+            break;
+         case 's': case 'S': { /* %s */
+            char *str = va_arg (vargs, char *);
+            if (str == (char*) 0) str = "(null)";
+            ret += myvprintf_str(send, flags, width, str, 
+                                 (format[i]=='S'));
+            break;
+	 }
+#        if 0
+	 case 'y': { /* %y - print symbol */
+	    Addr a = va_arg(vargs, Addr);
+
+            HChar *name;
+	    if (VG_(get_fnname_w_offset)(a, &name)) {
+               HChar buf[1 + VG_strlen(name) + 1 + 1];
+	       if (flags & VG_MSG_PAREN) {
+                  VG_(sprintf)(str, "(%s)", name):
+	       } else {
+                  VG_(sprintf)(str, "%s", name):
+               }
+	       ret += myvprintf_str(send, flags, width, buf, 0);
+	    }
+	    break;
+	 }
+#        endif
+         default:
+            break;
+      }
+   }
+   return ret;
+}
+
+
+/* A general replacement for printf().  Note that only low-level 
+   debugging info should be sent via here.  The official route is to
+   to use vg_message().  This interface is deprecated.
+*/
+static HChar myprintf_buf[1000];
+static Int   n_myprintf_buf;
+
+static void add_to_myprintf_buf ( HChar c )
+{
+   if (c == '\n' || n_myprintf_buf >= 1000-10 /*paranoia*/ ) {
+      (*vexxx_log_bytes)( myprintf_buf, vexxx_strlen(myprintf_buf) );
+      n_myprintf_buf = 0;
+      myprintf_buf[n_myprintf_buf] = 0;      
+   }
+   myprintf_buf[n_myprintf_buf++] = c;
+   myprintf_buf[n_myprintf_buf] = 0;
+}
+
+static UInt vexxx_printf ( const char *format, ... )
+{
+   UInt ret;
+   va_list vargs;
+   va_start(vargs,format);
+   
+   n_myprintf_buf = 0;
+   myprintf_buf[n_myprintf_buf] = 0;      
+   ret = vprintf_wrk ( add_to_myprintf_buf, format, vargs );
+
+   if (n_myprintf_buf > 0) {
+      (*vexxx_log_bytes)( myprintf_buf, n_myprintf_buf );
+   }
+
+   va_end(vargs);
+
+   return ret;
+}
+
+/*---------------------------------------------------------------*/
+/*--- end                                          vex_util.c ---*/
+/*---------------------------------------------------------------*/
+
+
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+
+// BEGIN #include "ops-ppc.c"
+/*
+ * WARNING:
+ * This file has been auto-generated by './gen-ppc' program
+ * Please don't edit by hand
+ */
+
+
+//BEGIN #include "test-ppc.h"
+/*
+ * test-ppc.h:
+ * PPC tests for qemu-PPC CPU emulation checks - definitions
+ * 
+ * Copyright (c) 2005 Jocelyn Mayer
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License V2
+ * as published by the Free Software Foundation.
+ */
+
+#if !defined (__TEST_PPC_H__)
+#define __TEST_PPC_H__
+
+typedef void (*test_func_t) (void);
+typedef struct test_t test_t;
+typedef struct test_table_t test_table_t;
+struct test_t {
+    test_func_t func;
+    const unsigned char *name;
+};
+
+struct test_table_t {
+    test_t *tests;
+    const unsigned char *name;
+    int flags;
+};
+
+typedef void (*test_loop_t) (const unsigned char *name, test_func_t func);
+
+enum test_flags {
+    /* Nb arguments */
+    PPC_ONE_ARG    = 0x00000001,
+    PPC_TWO_ARGS   = 0x00000002,
+    PPC_THREE_ARGS = 0x00000003,
+    PPC_CMP_ARGS   = 0x00000004,
+    PPC_CMPI_ARGS  = 0x00000005,
+    PPC_TWO_I16    = 0x00000006,
+    PPC_SPECIAL    = 0x00000007,
+    PPC_NB_ARGS    = 0x0000000F,
+    /* Type */
+    PPC_ARITH      = 0x00000100,
+    PPC_LOGICAL    = 0x00000200,
+    PPC_COMPARE    = 0x00000300,
+    PPC_CROP       = 0x00000400,
+    PPC_TYPE       = 0x00000F00,
+    /* Family */
+    PPC_INTEGER    = 0x00010000,
+    PPC_FLOAT      = 0x00020000,
+    PPC_405        = 0x00030000,
+    PPC_ALTIVEC    = 0x00040000,
+    PPC_FALTIVEC   = 0x00050000,
+    PPC_FAMILY     = 0x000F0000,
+    /* Flags */
+    PPC_CR         = 0x01000000,
+};
+
+#endif /* !defined (__TEST_PPC_H__) */
+
+//END #include "test-ppc.h"
+
+static void test_add (void)
+{
+    __asm__ __volatile__ ("add          17, 14, 15");
+}
+
+static void test_addo (void)
+{
+    __asm__ __volatile__ ("addo         17, 14, 15");
+}
+
+static void test_addc (void)
+{
+    __asm__ __volatile__ ("addc         17, 14, 15");
+}
+
+static void test_addco (void)
+{
+    __asm__ __volatile__ ("addco        17, 14, 15");
+}
+
+static void test_adde (void)
+{
+    __asm__ __volatile__ ("adde         17, 14, 15");
+}
+
+static void test_addeo (void)
+{
+    __asm__ __volatile__ ("addeo        17, 14, 15");
+}
+
+static void test_divw (void)
+{
+    __asm__ __volatile__ ("divw         17, 14, 15");
+}
+
+static void test_divwo (void)
+{
+    __asm__ __volatile__ ("divwo        17, 14, 15");
+}
+
+static void test_divwu (void)
+{
+    __asm__ __volatile__ ("divwu        17, 14, 15");
+}
+
+static void test_divwuo (void)
+{
+    __asm__ __volatile__ ("divwuo       17, 14, 15");
+}
+
+static void test_mulhw (void)
+{
+    __asm__ __volatile__ ("mulhw        17, 14, 15");
+}
+
+static void test_mulhwu (void)
+{
+    __asm__ __volatile__ ("mulhwu       17, 14, 15");
+}
+
+static void test_mullw (void)
+{
+    __asm__ __volatile__ ("mullw        17, 14, 15");
+}
+
+static void test_mullwo (void)
+{
+    __asm__ __volatile__ ("mullwo       17, 14, 15");
+}
+
+static void test_subf (void)
+{
+    __asm__ __volatile__ ("subf         17, 14, 15");
+}
+
+static void test_subfo (void)
+{
+    __asm__ __volatile__ ("subfo        17, 14, 15");
+}
+
+static void test_subfc (void)
+{
+    __asm__ __volatile__ ("subfc        17, 14, 15");
+}
+
+static void test_subfco (void)
+{
+    __asm__ __volatile__ ("subfco       17, 14, 15");
+}
+
+static void test_subfe (void)
+{
+    __asm__ __volatile__ ("subfe        17, 14, 15");
+}
+
+static void test_subfeo (void)
+{
+    __asm__ __volatile__ ("subfeo       17, 14, 15");
+}
+
+static test_t tests_ia_ops_two[] = {
+    { &test_add             , "         add", },
+    { &test_addo            , "        addo", },
+    { &test_addc            , "        addc", },
+    { &test_addco           , "       addco", },
+    { &test_adde            , "        adde", },
+    { &test_addeo           , "       addeo", },
+    { &test_divw            , "        divw", },
+    { &test_divwo           , "       divwo", },
+    { &test_divwu           , "       divwu", },
+    { &test_divwuo          , "      divwuo", },
+    { &test_mulhw           , "       mulhw", },
+    { &test_mulhwu          , "      mulhwu", },
+    { &test_mullw           , "       mullw", },
+    { &test_mullwo          , "      mullwo", },
+    { &test_subf            , "        subf", },
+    { &test_subfo           , "       subfo", },
+    { &test_subfc           , "       subfc", },
+    { &test_subfco          , "      subfco", },
+    { &test_subfe           , "       subfe", },
+    { &test_subfeo          , "      subfeo", },
+    { NULL,                   NULL,           },
+};
+
+static void test_add_ (void)
+{
+    __asm__ __volatile__ ("add.         17, 14, 15");
+}
+
+static void test_addo_ (void)
+{
+    __asm__ __volatile__ ("addo.        17, 14, 15");
+}
+
+static void test_addc_ (void)
+{
+    __asm__ __volatile__ ("addc.        17, 14, 15");
+}
+
+static void test_addco_ (void)
+{
+    __asm__ __volatile__ ("addco.       17, 14, 15");
+}
+
+static void test_adde_ (void)
+{
+    __asm__ __volatile__ ("adde.        17, 14, 15");
+}
+
+static void test_addeo_ (void)
+{
+    __asm__ __volatile__ ("addeo.       17, 14, 15");
+}
+
+static void test_divw_ (void)
+{
+    __asm__ __volatile__ ("divw.        17, 14, 15");
+}
+
+static void test_divwo_ (void)
+{
+    __asm__ __volatile__ ("divwo.       17, 14, 15");
+}
+
+static void test_divwu_ (void)
+{
+    __asm__ __volatile__ ("divwu.       17, 14, 15");
+}
+
+static void test_divwuo_ (void)
+{
+    __asm__ __volatile__ ("divwuo.      17, 14, 15");
+}
+
+static void test_subf_ (void)
+{
+    __asm__ __volatile__ ("subf.        17, 14, 15");
+}
+
+static void test_subfo_ (void)
+{
+    __asm__ __volatile__ ("subfo.       17, 14, 15");
+}
+
+static void test_subfc_ (void)
+{
+    __asm__ __volatile__ ("subfc.       17, 14, 15");
+}
+
+static void test_subfco_ (void)
+{
+    __asm__ __volatile__ ("subfco.      17, 14, 15");
+}
+
+static void test_subfe_ (void)
+{
+    __asm__ __volatile__ ("subfe.       17, 14, 15");
+}
+
+static void test_subfeo_ (void)
+{
+    __asm__ __volatile__ ("subfeo.      17, 14, 15");
+}
+
+static test_t tests_iar_ops_two[] = {
+    { &test_add_            , "        add.", },
+    { &test_addo_           , "       addo.", },
+    { &test_addc_           , "       addc.", },
+    { &test_addco_          , "      addco.", },
+    { &test_adde_           , "       adde.", },
+    { &test_addeo_          , "      addeo.", },
+    { &test_divw_           , "       divw.", },
+    { &test_divwo_          , "      divwo.", },
+    { &test_divwu_          , "      divwu.", },
+    { &test_divwuo_         , "     divwuo.", },
+    { &test_subf_           , "       subf.", },
+    { &test_subfo_          , "      subfo.", },
+    { &test_subfc_          , "      subfc.", },
+    { &test_subfco_         , "     subfco.", },
+    { &test_subfe_          , "      subfe.", },
+    { &test_subfeo_         , "     subfeo.", },
+    { NULL,                   NULL,           },
+};
+
+static void test_and (void)
+{
+    __asm__ __volatile__ ("and          17, 14, 15");
+}
+
+static void test_andc (void)
+{
+    __asm__ __volatile__ ("andc         17, 14, 15");
+}
+
+static void test_eqv (void)
+{
+    __asm__ __volatile__ ("eqv          17, 14, 15");
+}
+
+static void test_nand (void)
+{
+    __asm__ __volatile__ ("nand         17, 14, 15");
+}
+
+static void test_nor (void)
+{
+    __asm__ __volatile__ ("nor          17, 14, 15");
+}
+
+static void test_or (void)
+{
+    __asm__ __volatile__ ("or           17, 14, 15");
+}
+
+static void test_orc (void)
+{
+    __asm__ __volatile__ ("orc          17, 14, 15");
+}
+
+static void test_xor (void)
+{
+    __asm__ __volatile__ ("xor          17, 14, 15");
+}
+
+static void test_slw (void)
+{
+    __asm__ __volatile__ ("slw          17, 14, 15");
+}
+
+static void test_sraw (void)
+{
+    __asm__ __volatile__ ("sraw         17, 14, 15");
+}
+
+static void test_srw (void)
+{
+    __asm__ __volatile__ ("srw          17, 14, 15");
+}
+
+static test_t tests_il_ops_two[] = {
+    { &test_and             , "         and", },
+    { &test_andc            , "        andc", },
+    { &test_eqv             , "         eqv", },
+    { &test_nand            , "        nand", },
+    { &test_nor             , "         nor", },
+    { &test_or              , "          or", },
+    { &test_orc             , "         orc", },
+    { &test_xor             , "         xor", },
+    { &test_slw             , "         slw", },
+    { &test_sraw            , "        sraw", },
+    { &test_srw             , "         srw", },
+    { NULL,                   NULL,           },
+};
+
+static void test_and_ (void)
+{
+    __asm__ __volatile__ ("and.         17, 14, 15");
+}
+
+static void test_andc_ (void)
+{
+    __asm__ __volatile__ ("andc.        17, 14, 15");
+}
+
+static void test_eqv_ (void)
+{
+    __asm__ __volatile__ ("eqv.         17, 14, 15");
+}
+
+static void test_mulhw_ (void)
+{
+    __asm__ __volatile__ ("mulhw.       17, 14, 15");
+}
+
+static void test_mulhwu_ (void)
+{
+    __asm__ __volatile__ ("mulhwu.      17, 14, 15");
+}
+
+static void test_mullw_ (void)
+{
+    __asm__ __volatile__ ("mullw.       17, 14, 15");
+}
+
+static void test_mullwo_ (void)
+{
+    __asm__ __volatile__ ("mullwo.      17, 14, 15");
+}
+
+static void test_nand_ (void)
+{
+    __asm__ __volatile__ ("nand.        17, 14, 15");
+}
+
+static void test_nor_ (void)
+{
+    __asm__ __volatile__ ("nor.         17, 14, 15");
+}
+
+static void test_or_ (void)
+{
+    __asm__ __volatile__ ("or.          17, 14, 15");
+}
+
+static void test_orc_ (void)
+{
+    __asm__ __volatile__ ("orc.         17, 14, 15");
+}
+
+static void test_xor_ (void)
+{
+    __asm__ __volatile__ ("xor.         17, 14, 15");
+}
+
+static void test_slw_ (void)
+{
+    __asm__ __volatile__ ("slw.         17, 14, 15");
+}
+
+static void test_sraw_ (void)
+{
+    __asm__ __volatile__ ("sraw.        17, 14, 15");
+}
+
+static void test_srw_ (void)
+{
+    __asm__ __volatile__ ("srw.         17, 14, 15");
+}
+
+static test_t tests_ilr_ops_two[] = {
+    { &test_and_            , "        and.", },
+    { &test_andc_           , "       andc.", },
+    { &test_eqv_            , "        eqv.", },
+    { &test_mulhw_          , "      mulhw.", },
+    { &test_mulhwu_         , "     mulhwu.", },
+    { &test_mullw_          , "      mullw.", },
+    { &test_mullwo_         , "     mullwo.", },
+    { &test_nand_           , "       nand.", },
+    { &test_nor_            , "        nor.", },
+    { &test_or_             , "         or.", },
+    { &test_orc_            , "        orc.", },
+    { &test_xor_            , "        xor.", },
+    { &test_slw_            , "        slw.", },
+    { &test_sraw_           , "       sraw.", },
+    { &test_srw_            , "        srw.", },
+    { NULL,                   NULL,           },
+};
+
+static void test_cmp (void)
+{
+    __asm__ __volatile__ ("cmp          2, 14, 15");
+}
+
+static void test_cmpl (void)
+{
+    __asm__ __volatile__ ("cmpl         2, 14, 15");
+}
+
+static test_t tests_icr_ops_two[] = {
+    { &test_cmp             , "         cmp", },
+    { &test_cmpl            , "        cmpl", },
+    { NULL,                   NULL,           },
+};
+
+static void test_cmpi (void)
+{
+    __asm__ __volatile__ ("cmpi         2, 14, 15");
+}
+
+static void test_cmpli (void)
+{
+    __asm__ __volatile__ ("cmpli        2, 14, 15");
+}
+
+static test_t tests_icr_ops_two_i16[] = {
+    { &test_cmpi            , "        cmpi", },
+    { &test_cmpli           , "       cmpli", },
+    { NULL,                   NULL,           },
+};
+
+static void test_addi (void)
+{
+    __asm__ __volatile__ ("addi         17, 14, 0");
+}
+
+static void test_addic (void)
+{
+    __asm__ __volatile__ ("addic        17, 14, 0");
+}
+
+static void test_addis (void)
+{
+    __asm__ __volatile__ ("addis        17, 14, 0");
+}
+
+static void test_mulli (void)
+{
+    __asm__ __volatile__ ("mulli        17, 14, 0");
+}
+
+static void test_subfic (void)
+{
+    __asm__ __volatile__ ("subfic       17, 14, 0");
+}
+
+static test_t tests_ia_ops_two_i16[] = {
+    { &test_addi            , "        addi", },
+    { &test_addic           , "       addic", },
+    { &test_addis           , "       addis", },
+    { &test_mulli           , "       mulli", },
+    { &test_subfic          , "      subfic", },
+    { NULL,                   NULL,           },
+};
+
+static void test_addic_ (void)
+{
+    __asm__ __volatile__ ("addic.       17, 14, 0");
+}
+
+static test_t tests_iar_ops_two_i16[] = {
+    { &test_addic_          , "      addic.", },
+    { NULL,                   NULL,           },
+};
+
+static void test_ori (void)
+{
+    __asm__ __volatile__ ("ori          17, 14, 0");
+}
+
+static void test_oris (void)
+{
+    __asm__ __volatile__ ("oris         17, 14, 0");
+}
+
+static void test_xori (void)
+{
+    __asm__ __volatile__ ("xori         17, 14, 0");
+}
+
+static void test_xoris (void)
+{
+    __asm__ __volatile__ ("xoris        17, 14, 0");
+}
+
+static test_t tests_il_ops_two_i16[] = {
+    { &test_ori             , "         ori", },
+    { &test_oris            , "        oris", },
+    { &test_xori            , "        xori", },
+    { &test_xoris           , "       xoris", },
+    { NULL,                   NULL,           },
+};
+
+static void test_andi_ (void)
+{
+    __asm__ __volatile__ ("andi.        17, 14, 0");
+}
+
+static void test_andis_ (void)
+{
+    __asm__ __volatile__ ("andis.       17, 14, 0");
+}
+
+static test_t tests_ilr_ops_two_i16[] = {
+    { &test_andi_           , "       andi.", },
+    { &test_andis_          , "      andis.", },
+    { NULL,                   NULL,           },
+};
+
+static void test_crand (void)
+{
+    __asm__ __volatile__ ("crand        17, 14, 15");
+}
+
+static void test_crandc (void)
+{
+    __asm__ __volatile__ ("crandc       17, 14, 15");
+}
+
+static void test_creqv (void)
+{
+    __asm__ __volatile__ ("creqv        17, 14, 15");
+}
+
+static void test_crnand (void)
+{
+    __asm__ __volatile__ ("crnand       17, 14, 15");
+}
+
+static void test_crnor (void)
+{
+    __asm__ __volatile__ ("crnor        17, 14, 15");
+}
+
+static void test_cror (void)
+{
+    __asm__ __volatile__ ("cror         17, 14, 15");
+}
+
+static void test_crorc (void)
+{
+    __asm__ __volatile__ ("crorc        17, 14, 15");
+}
+
+static void test_crxor (void)
+{
+    __asm__ __volatile__ ("crxor        17, 14, 15");
+}
+
+static test_t tests_crl_ops_two[] = {
+    { &test_crand           , "       crand", },
+    { &test_crandc          , "      crandc", },
+    { &test_creqv           , "       creqv", },
+    { &test_crnand          , "      crnand", },
+    { &test_crnor           , "       crnor", },
+    { &test_cror            , "        cror", },
+    { &test_crorc           , "       crorc", },
+    { &test_crxor           , "       crxor", },
+    { NULL,                   NULL,           },
+};
+
+static void test_addme (void)
+{
+    __asm__ __volatile__ ("addme        17, 14");
+}
+
+static void test_addmeo (void)
+{
+    __asm__ __volatile__ ("addmeo       17, 14");
+}
+
+static void test_addze (void)
+{
+    __asm__ __volatile__ ("addze        17, 14");
+}
+
+static void test_addzeo (void)
+{
+    __asm__ __volatile__ ("addzeo       17, 14");
+}
+
+static void test_subfme (void)
+{
+    __asm__ __volatile__ ("subfme       17, 14");
+}
+
+static void test_subfmeo (void)
+{
+    __asm__ __volatile__ ("subfmeo      17, 14");
+}
+
+static void test_subfze (void)
+{
+    __asm__ __volatile__ ("subfze       17, 14");
+}
+
+static void test_subfzeo (void)
+{
+    __asm__ __volatile__ ("subfzeo      17, 14");
+}
+
+static test_t tests_ia_ops_one[] = {
+    { &test_addme           , "       addme", },
+    { &test_addmeo          , "      addmeo", },
+    { &test_addze           , "       addze", },
+    { &test_addzeo          , "      addzeo", },
+    { &test_subfme          , "      subfme", },
+    { &test_subfmeo         , "     subfmeo", },
+    { &test_subfze          , "      subfze", },
+    { &test_subfzeo         , "     subfzeo", },
+    { NULL,                   NULL,           },
+};
+
+static void test_addme_ (void)
+{
+    __asm__ __volatile__ ("addme.       17, 14");
+}
+
+static void test_addmeo_ (void)
+{
+    __asm__ __volatile__ ("addmeo.      17, 14");
+}
+
+static void test_addze_ (void)
+{
+    __asm__ __volatile__ ("addze.       17, 14");
+}
+
+static void test_addzeo_ (void)
+{
+    __asm__ __volatile__ ("addzeo.      17, 14");
+}
+
+static void test_subfme_ (void)
+{
+    __asm__ __volatile__ ("subfme.      17, 14");
+}
+
+static void test_subfmeo_ (void)
+{
+    __asm__ __volatile__ ("subfmeo.     17, 14");
+}
+
+static void test_subfze_ (void)
+{
+    __asm__ __volatile__ ("subfze.      17, 14");
+}
+
+static void test_subfzeo_ (void)
+{
+    __asm__ __volatile__ ("subfzeo.     17, 14");
+}
+
+static test_t tests_iar_ops_one[] = {
+    { &test_addme_          , "      addme.", },
+    { &test_addmeo_         , "     addmeo.", },
+    { &test_addze_          , "      addze.", },
+    { &test_addzeo_         , "     addzeo.", },
+    { &test_subfme_         , "     subfme.", },
+    { &test_subfmeo_        , "    subfmeo.", },
+    { &test_subfze_         , "     subfze.", },
+    { &test_subfzeo_        , "    subfzeo.", },
+    { NULL,                   NULL,           },
+};
+
+static void test_cntlzw (void)
+{
+    __asm__ __volatile__ ("cntlzw       17, 14");
+}
+
+static void test_extsb (void)
+{
+    __asm__ __volatile__ ("extsb        17, 14");
+}
+
+static void test_extsh (void)
+{
+    __asm__ __volatile__ ("extsh        17, 14");
+}
+
+static void test_neg (void)
+{
+    __asm__ __volatile__ ("neg          17, 14");
+}
+
+static void test_nego (void)
+{
+    __asm__ __volatile__ ("nego         17, 14");
+}
+
+static test_t tests_il_ops_one[] = {
+    { &test_cntlzw          , "      cntlzw", },
+    { &test_extsb           , "       extsb", },
+    { &test_extsh           , "       extsh", },
+    { &test_neg             , "         neg", },
+    { &test_nego            , "        nego", },
+    { NULL,                   NULL,           },
+};
+
+static void test_cntlzw_ (void)
+{
+    __asm__ __volatile__ ("cntlzw.      17, 14");
+}
+
+static void test_extsb_ (void)
+{
+    __asm__ __volatile__ ("extsb.       17, 14");
+}
+
+static void test_extsh_ (void)
+{
+    __asm__ __volatile__ ("extsh.       17, 14");
+}
+
+static void test_neg_ (void)
+{
+    __asm__ __volatile__ ("neg.         17, 14");
+}
+
+static void test_nego_ (void)
+{
+    __asm__ __volatile__ ("nego.        17, 14");
+}
+
+static test_t tests_ilr_ops_one[] = {
+    { &test_cntlzw_         , "     cntlzw.", },
+    { &test_extsb_          , "      extsb.", },
+    { &test_extsh_          , "      extsh.", },
+    { &test_neg_            , "        neg.", },
+    { &test_nego_           , "       nego.", },
+    { NULL,                   NULL,           },
+};
+
+static void test_rlwimi (void)
+{
+    __asm__ __volatile__ ("rlwimi       17, 14, 0, 0, 0");
+}
+
+static void test_rlwinm (void)
+{
+    __asm__ __volatile__ ("rlwinm       17, 14, 0, 0, 0");
+}
+
+static void test_rlwnm (void)
+{
+    __asm__ __volatile__ ("rlwnm        17, 14, 15, 0, 0");
+}
+
+static void test_srawi (void)
+{
+    __asm__ __volatile__ ("srawi        17, 14, 0");
+}
+
+static test_t tests_il_ops_spe[] = {
+    { &test_rlwimi          , "      rlwimi", },
+    { &test_rlwinm          , "      rlwinm", },
+    { &test_rlwnm           , "       rlwnm", },
+    { &test_srawi           , "       srawi", },
+    { NULL,                   NULL,           },
+};
+
+static void test_rlwimi_ (void)
+{
+    __asm__ __volatile__ ("rlwimi.      17, 14, 0, 0, 0");
+}
+
+static void test_rlwinm_ (void)
+{
+    __asm__ __volatile__ ("rlwinm.      17, 14, 0, 0, 0");
+}
+
+static void test_rlwnm_ (void)
+{
+    __asm__ __volatile__ ("rlwnm.       17, 14, 15, 0, 0");
+}
+
+static void test_srawi_ (void)
+{
+    __asm__ __volatile__ ("srawi.       17, 14, 0");
+}
+
+static test_t tests_ilr_ops_spe[] = {
+    { &test_rlwimi_         , "     rlwimi.", },
+    { &test_rlwinm_         , "     rlwinm.", },
+    { &test_rlwnm_          , "      rlwnm.", },
+    { &test_srawi_          , "      srawi.", },
+    { NULL,                   NULL,           },
+};
+
+#if !defined (NO_FLOAT)
+static void test_fsel (void)
+{
+    __asm__ __volatile__ ("fsel         17, 14, 15, 16");
+}
+
+static void test_fmadd (void)
+{
+    __asm__ __volatile__ ("fmadd        17, 14, 15, 16");
+}
+
+static void test_fmadds (void)
+{
+    __asm__ __volatile__ ("fmadds       17, 14, 15, 16");
+}
+
+static void test_fmsub (void)
+{
+    __asm__ __volatile__ ("fmsub        17, 14, 15, 16");
+}
+
+static void test_fmsubs (void)
+{
+    __asm__ __volatile__ ("fmsubs       17, 14, 15, 16");
+}
+
+static void test_fnmadd (void)
+{
+    __asm__ __volatile__ ("fnmadd       17, 14, 15, 16");
+}
+
+static void test_fnmadds (void)
+{
+    __asm__ __volatile__ ("fnmadds      17, 14, 15, 16");
+}
+
+static void test_fnmsub (void)
+{
+    __asm__ __volatile__ ("fnmsub       17, 14, 15, 16");
+}
+
+static void test_fnmsubs (void)
+{
+    __asm__ __volatile__ ("fnmsubs      17, 14, 15, 16");
+}
+
+static test_t tests_fa_ops_three[] = {
+    { &test_fsel            , "        fsel", },
+    { &test_fmadd           , "       fmadd", },
+    { &test_fmadds          , "      fmadds", },
+    { &test_fmsub           , "       fmsub", },
+    { &test_fmsubs          , "      fmsubs", },
+    { &test_fnmadd          , "      fnmadd", },
+    { &test_fnmadds         , "     fnmadds", },
+    { &test_fnmsub          , "      fnmsub", },
+    { &test_fnmsubs         , "     fnmsubs", },
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static void test_fsel_ (void)
+{
+    __asm__ __volatile__ ("fsel.        17, 14, 15, 16");
+}
+
+static void test_fmadd_ (void)
+{
+    __asm__ __volatile__ ("fmadd.       17, 14, 15, 16");
+}
+
+static void test_fmadds_ (void)
+{
+    __asm__ __volatile__ ("fmadds.      17, 14, 15, 16");
+}
+
+static void test_fmsub_ (void)
+{
+    __asm__ __volatile__ ("fmsub.       17, 14, 15, 16");
+}
+
+static void test_fmsubs_ (void)
+{
+    __asm__ __volatile__ ("fmsubs.      17, 14, 15, 16");
+}
+
+static void test_fnmadd_ (void)
+{
+    __asm__ __volatile__ ("fnmadd.      17, 14, 15, 16");
+}
+
+static void test_fnmadds_ (void)
+{
+    __asm__ __volatile__ ("fnmadds.     17, 14, 15, 16");
+}
+
+static void test_fnmsub_ (void)
+{
+    __asm__ __volatile__ ("fnmsub.      17, 14, 15, 16");
+}
+
+static void test_fnmsubs_ (void)
+{
+    __asm__ __volatile__ ("fnmsubs.     17, 14, 15, 16");
+}
+
+static test_t tests_far_ops_three[] = {
+    { &test_fsel_           , "       fsel.", },
+    { &test_fmadd_          , "      fmadd.", },
+    { &test_fmadds_         , "     fmadds.", },
+    { &test_fmsub_          , "      fmsub.", },
+    { &test_fmsubs_         , "     fmsubs.", },
+    { &test_fnmadd_         , "     fnmadd.", },
+    { &test_fnmadds_        , "    fnmadds.", },
+    { &test_fnmsub_         , "     fnmsub.", },
+    { &test_fnmsubs_        , "    fnmsubs.", },
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static void test_fadd (void)
+{
+    __asm__ __volatile__ ("fadd         17, 14, 15");
+}
+
+static void test_fadds (void)
+{
+    __asm__ __volatile__ ("fadds        17, 14, 15");
+}
+
+static void test_fsub (void)
+{
+    __asm__ __volatile__ ("fsub         17, 14, 15");
+}
+
+static void test_fsubs (void)
+{
+    __asm__ __volatile__ ("fsubs        17, 14, 15");
+}
+
+static void test_fmul (void)
+{
+    __asm__ __volatile__ ("fmul         17, 14, 15");
+}
+
+static void test_fmuls (void)
+{
+    __asm__ __volatile__ ("fmuls        17, 14, 15");
+}
+
+static void test_fdiv (void)
+{
+    __asm__ __volatile__ ("fdiv         17, 14, 15");
+}
+
+static void test_fdivs (void)
+{
+    __asm__ __volatile__ ("fdivs        17, 14, 15");
+}
+
+static test_t tests_fa_ops_two[] = {
+    { &test_fadd            , "        fadd", },
+    { &test_fadds           , "       fadds", },
+    { &test_fsub            , "        fsub", },
+    { &test_fsubs           , "       fsubs", },
+    { &test_fmul            , "        fmul", },
+    { &test_fmuls           , "       fmuls", },
+    { &test_fdiv            , "        fdiv", },
+    { &test_fdivs           , "       fdivs", },
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static void test_fadd_ (void)
+{
+    __asm__ __volatile__ ("fadd.        17, 14, 15");
+}
+
+static void test_fadds_ (void)
+{
+    __asm__ __volatile__ ("fadds.       17, 14, 15");
+}
+
+static void test_fsub_ (void)
+{
+    __asm__ __volatile__ ("fsub.        17, 14, 15");
+}
+
+static void test_fsubs_ (void)
+{
+    __asm__ __volatile__ ("fsubs.       17, 14, 15");
+}
+
+static void test_fmul_ (void)
+{
+    __asm__ __volatile__ ("fmul.        17, 14, 15");
+}
+
+static void test_fmuls_ (void)
+{
+    __asm__ __volatile__ ("fmuls.       17, 14, 15");
+}
+
+static void test_fdiv_ (void)
+{
+    __asm__ __volatile__ ("fdiv.        17, 14, 15");
+}
+
+static void test_fdivs_ (void)
+{
+    __asm__ __volatile__ ("fdivs.       17, 14, 15");
+}
+
+static test_t tests_far_ops_two[] = {
+    { &test_fadd_           , "       fadd.", },
+    { &test_fadds_          , "      fadds.", },
+    { &test_fsub_           , "       fsub.", },
+    { &test_fsubs_          , "      fsubs.", },
+    { &test_fmul_           , "       fmul.", },
+    { &test_fmuls_          , "      fmuls.", },
+    { &test_fdiv_           , "       fdiv.", },
+    { &test_fdivs_          , "      fdivs.", },
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static void test_fcmpo (void)
+{
+    __asm__ __volatile__ ("fcmpo        2, 14, 15");
+}
+
+static void test_fcmpu (void)
+{
+    __asm__ __volatile__ ("fcmpu        2, 14, 15");
+}
+
+static test_t tests_fcr_ops_two[] = {
+    { &test_fcmpo           , "       fcmpo", },
+    { &test_fcmpu           , "       fcmpu", },
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static void test_fres (void)
+{
+    __asm__ __volatile__ ("fres         17, 14");
+}
+
+static void test_frsqrte (void)
+{
+    __asm__ __volatile__ ("frsqrte      17, 14");
+}
+
+static void test_frsp (void)
+{
+    __asm__ __volatile__ ("frsp         17, 14");
+}
+
+static void test_fctiw (void)
+{
+    __asm__ __volatile__ ("fctiw        17, 14");
+}
+
+static void test_fctiwz (void)
+{
+    __asm__ __volatile__ ("fctiwz       17, 14");
+}
+
+static void test_fmr (void)
+{
+    __asm__ __volatile__ ("fmr          17, 14");
+}
+
+static void test_fneg (void)
+{
+    __asm__ __volatile__ ("fneg         17, 14");
+}
+
+static void test_fabs (void)
+{
+    __asm__ __volatile__ ("fabs         17, 14");
+}
+
+static void test_fnabs (void)
+{
+    __asm__ __volatile__ ("fnabs        17, 14");
+}
+
+static test_t tests_fa_ops_one[] = {
+    { &test_fres            , "        fres", },
+    { &test_frsqrte         , "     frsqrte", },
+    { &test_frsp            , "        frsp", },
+    { &test_fctiw           , "       fctiw", },
+    { &test_fctiwz          , "      fctiwz", },
+    { &test_fmr             , "         fmr", },
+    { &test_fneg            , "        fneg", },
+    { &test_fabs            , "        fabs", },
+    { &test_fnabs           , "       fnabs", },
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static void test_fres_ (void)
+{
+    __asm__ __volatile__ ("fres.        17, 14");
+}
+
+static void test_frsqrte_ (void)
+{
+    __asm__ __volatile__ ("frsqrte.     17, 14");
+}
+
+static void test_frsp_ (void)
+{
+    __asm__ __volatile__ ("frsp.        17, 14");
+}
+
+static void test_fctiw_ (void)
+{
+    __asm__ __volatile__ ("fctiw.       17, 14");
+}
+
+static void test_fctiwz_ (void)
+{
+    __asm__ __volatile__ ("fctiwz.      17, 14");
+}
+
+static void test_fmr_ (void)
+{
+    __asm__ __volatile__ ("fmr.         17, 14");
+}
+
+static void test_fneg_ (void)
+{
+    __asm__ __volatile__ ("fneg.        17, 14");
+}
+
+static void test_fabs_ (void)
+{
+    __asm__ __volatile__ ("fabs.        17, 14");
+}
+
+static void test_fnabs_ (void)
+{
+    __asm__ __volatile__ ("fnabs.       17, 14");
+}
+
+static test_t tests_far_ops_one[] = {
+    { &test_fres_           , "       fres.", },
+    { &test_frsqrte_        , "    frsqrte.", },
+    { &test_frsp_           , "       frsp.", },
+    { &test_fctiw_          , "      fctiw.", },
+    { &test_fctiwz_         , "     fctiwz.", },
+    { &test_fmr_            , "        fmr.", },
+    { &test_fneg_           , "       fneg.", },
+    { &test_fabs_           , "       fabs.", },
+    { &test_fnabs_          , "      fnabs.", },
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static test_t tests_fl_ops_spe[] = {
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if !defined (NO_FLOAT)
+static test_t tests_flr_ops_spe[] = {
+    { NULL,                   NULL,           },
+};
+#endif /* !defined (NO_FLOAT) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vmhaddshs (void)
+{
+    __asm__ __volatile__ ("vmhaddshs    17, 14, 15, 16");
+}
+
+static void test_vmhraddshs (void)
+{
+    __asm__ __volatile__ ("vmhraddshs   17, 14, 15, 16");
+}
+
+static void test_vmladduhm (void)
+{
+    __asm__ __volatile__ ("vmladduhm    17, 14, 15, 16");
+}
+
+static void test_vmsumubm (void)
+{
+    __asm__ __volatile__ ("vmsumubm     17, 14, 15, 16");
+}
+
+static void test_vmsumuhm (void)
+{
+    __asm__ __volatile__ ("vmsumuhm     17, 14, 15, 16");
+}
+
+static void test_vmsumshs (void)
+{
+    __asm__ __volatile__ ("vmsumshs     17, 14, 15, 16");
+}
+
+static void test_vmsumuhs (void)
+{
+    __asm__ __volatile__ ("vmsumuhs     17, 14, 15, 16");
+}
+
+static void test_vmsummbm (void)
+{
+    __asm__ __volatile__ ("vmsummbm     17, 14, 15, 16");
+}
+
+static void test_vmsumshm (void)
+{
+    __asm__ __volatile__ ("vmsumshm     17, 14, 15, 16");
+}
+
+static test_t tests_aa_ops_three[] = {
+    { &test_vmhaddshs       , "   vmhaddshs", },
+    { &test_vmhraddshs      , "  vmhraddshs", },
+    { &test_vmladduhm       , "   vmladduhm", },
+    { &test_vmsumubm        , "    vmsumubm", },
+    { &test_vmsumuhm        , "    vmsumuhm", },
+    { &test_vmsumshs        , "    vmsumshs", },
+    { &test_vmsumuhs        , "    vmsumuhs", },
+    { &test_vmsummbm        , "    vmsummbm", },
+    { &test_vmsumshm        , "    vmsumshm", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vperm (void)
+{
+    __asm__ __volatile__ ("vperm        17, 14, 15, 16");
+}
+
+static void test_vsel (void)
+{
+    __asm__ __volatile__ ("vsel         17, 14, 15, 16");
+}
+
+static test_t tests_al_ops_three[] = {
+    { &test_vperm           , "       vperm", },
+    { &test_vsel            , "        vsel", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vaddubm (void)
+{
+    __asm__ __volatile__ ("vaddubm      17, 14, 15");
+}
+
+static void test_vadduhm (void)
+{
+    __asm__ __volatile__ ("vadduhm      17, 14, 15");
+}
+
+static void test_vadduwm (void)
+{
+    __asm__ __volatile__ ("vadduwm      17, 14, 15");
+}
+
+static void test_vaddubs (void)
+{
+    __asm__ __volatile__ ("vaddubs      17, 14, 15");
+}
+
+static void test_vadduhs (void)
+{
+    __asm__ __volatile__ ("vadduhs      17, 14, 15");
+}
+
+static void test_vadduws (void)
+{
+    __asm__ __volatile__ ("vadduws      17, 14, 15");
+}
+
+static void test_vaddsbs (void)
+{
+    __asm__ __volatile__ ("vaddsbs      17, 14, 15");
+}
+
+static void test_vaddshs (void)
+{
+    __asm__ __volatile__ ("vaddshs      17, 14, 15");
+}
+
+static void test_vaddsws (void)
+{
+    __asm__ __volatile__ ("vaddsws      17, 14, 15");
+}
+
+static void test_vaddcuw (void)
+{
+    __asm__ __volatile__ ("vaddcuw      17, 14, 15");
+}
+
+static void test_vsububm (void)
+{
+    __asm__ __volatile__ ("vsububm      17, 14, 15");
+}
+
+static void test_vsubuhm (void)
+{
+    __asm__ __volatile__ ("vsubuhm      17, 14, 15");
+}
+
+static void test_vsubuwm (void)
+{
+    __asm__ __volatile__ ("vsubuwm      17, 14, 15");
+}
+
+static void test_vsububs (void)
+{
+    __asm__ __volatile__ ("vsububs      17, 14, 15");
+}
+
+static void test_vsubuhs (void)
+{
+    __asm__ __volatile__ ("vsubuhs      17, 14, 15");
+}
+
+static void test_vsubuws (void)
+{
+    __asm__ __volatile__ ("vsubuws      17, 14, 15");
+}
+
+static void test_vsubcuw (void)
+{
+    __asm__ __volatile__ ("vsubcuw      17, 14, 15");
+}
+
+static void test_vmuloub (void)
+{
+    __asm__ __volatile__ ("vmuloub      17, 14, 15");
+}
+
+static void test_vmulouh (void)
+{
+    __asm__ __volatile__ ("vmulouh      17, 14, 15");
+}
+
+static void test_vmulosb (void)
+{
+    __asm__ __volatile__ ("vmulosb      17, 14, 15");
+}
+
+static void test_vmulosh (void)
+{
+    __asm__ __volatile__ ("vmulosh      17, 14, 15");
+}
+
+static void test_vmuleub (void)
+{
+    __asm__ __volatile__ ("vmuleub      17, 14, 15");
+}
+
+static void test_vmuleuh (void)
+{
+    __asm__ __volatile__ ("vmuleuh      17, 14, 15");
+}
+
+static void test_vmulesb (void)
+{
+    __asm__ __volatile__ ("vmulesb      17, 14, 15");
+}
+
+static void test_vmulesh (void)
+{
+    __asm__ __volatile__ ("vmulesh      17, 14, 15");
+}
+
+static void test_vsumsws (void)
+{
+    __asm__ __volatile__ ("vsumsws      17, 14, 15");
+}
+
+static void test_vsum2sws (void)
+{
+    __asm__ __volatile__ ("vsum2sws     17, 14, 15");
+}
+
+static void test_vsum4ubs (void)
+{
+    __asm__ __volatile__ ("vsum4ubs     17, 14, 15");
+}
+
+static void test_vsum4sbs (void)
+{
+    __asm__ __volatile__ ("vsum4sbs     17, 14, 15");
+}
+
+static void test_vsum4shs (void)
+{
+    __asm__ __volatile__ ("vsum4shs     17, 14, 15");
+}
+
+static void test_vavgub (void)
+{
+    __asm__ __volatile__ ("vavgub       17, 14, 15");
+}
+
+static void test_vavguh (void)
+{
+    __asm__ __volatile__ ("vavguh       17, 14, 15");
+}
+
+static void test_vavguw (void)
+{
+    __asm__ __volatile__ ("vavguw       17, 14, 15");
+}
+
+static void test_vavgsb (void)
+{
+    __asm__ __volatile__ ("vavgsb       17, 14, 15");
+}
+
+static void test_vavgsh (void)
+{
+    __asm__ __volatile__ ("vavgsh       17, 14, 15");
+}
+
+static void test_vavgsw (void)
+{
+    __asm__ __volatile__ ("vavgsw       17, 14, 15");
+}
+
+static void test_vmaxub (void)
+{
+    __asm__ __volatile__ ("vmaxub       17, 14, 15");
+}
+
+static void test_vmaxuh (void)
+{
+    __asm__ __volatile__ ("vmaxuh       17, 14, 15");
+}
+
+static void test_vmaxuw (void)
+{
+    __asm__ __volatile__ ("vmaxuw       17, 14, 15");
+}
+
+static void test_vmaxsb (void)
+{
+    __asm__ __volatile__ ("vmaxsb       17, 14, 15");
+}
+
+static void test_vmaxsh (void)
+{
+    __asm__ __volatile__ ("vmaxsh       17, 14, 15");
+}
+
+static void test_vmaxsw (void)
+{
+    __asm__ __volatile__ ("vmaxsw       17, 14, 15");
+}
+
+static void test_vminub (void)
+{
+    __asm__ __volatile__ ("vminub       17, 14, 15");
+}
+
+static void test_vminuh (void)
+{
+    __asm__ __volatile__ ("vminuh       17, 14, 15");
+}
+
+static void test_vminuw (void)
+{
+    __asm__ __volatile__ ("vminuw       17, 14, 15");
+}
+
+static void test_vminsb (void)
+{
+    __asm__ __volatile__ ("vminsb       17, 14, 15");
+}
+
+static void test_vminsh (void)
+{
+    __asm__ __volatile__ ("vminsh       17, 14, 15");
+}
+
+static void test_vminsw (void)
+{
+    __asm__ __volatile__ ("vminsw       17, 14, 15");
+}
+
+static test_t tests_aa_ops_two[] = {
+    { &test_vaddubm         , "     vaddubm", },
+    { &test_vadduhm         , "     vadduhm", },
+    { &test_vadduwm         , "     vadduwm", },
+    { &test_vaddubs         , "     vaddubs", },
+    { &test_vadduhs         , "     vadduhs", },
+    { &test_vadduws         , "     vadduws", },
+    { &test_vaddsbs         , "     vaddsbs", },
+    { &test_vaddshs         , "     vaddshs", },
+    { &test_vaddsws         , "     vaddsws", },
+    { &test_vaddcuw         , "     vaddcuw", },
+    { &test_vsububm         , "     vsububm", },
+    { &test_vsubuhm         , "     vsubuhm", },
+    { &test_vsubuwm         , "     vsubuwm", },
+    { &test_vsububs         , "     vsububs", },
+    { &test_vsubuhs         , "     vsubuhs", },
+    { &test_vsubuws         , "     vsubuws", },
+    { &test_vsubcuw         , "     vsubcuw", },
+    { &test_vmuloub         , "     vmuloub", },
+    { &test_vmulouh         , "     vmulouh", },
+    { &test_vmulosb         , "     vmulosb", },
+    { &test_vmulosh         , "     vmulosh", },
+    { &test_vmuleub         , "     vmuleub", },
+    { &test_vmuleuh         , "     vmuleuh", },
+    { &test_vmulesb         , "     vmulesb", },
+    { &test_vmulesh         , "     vmulesh", },
+    { &test_vsumsws         , "     vsumsws", },
+    { &test_vsum2sws        , "    vsum2sws", },
+    { &test_vsum4ubs        , "    vsum4ubs", },
+    { &test_vsum4sbs        , "    vsum4sbs", },
+    { &test_vsum4shs        , "    vsum4shs", },
+    { &test_vavgub          , "      vavgub", },
+    { &test_vavguh          , "      vavguh", },
+    { &test_vavguw          , "      vavguw", },
+    { &test_vavgsb          , "      vavgsb", },
+    { &test_vavgsh          , "      vavgsh", },
+    { &test_vavgsw          , "      vavgsw", },
+    { &test_vmaxub          , "      vmaxub", },
+    { &test_vmaxuh          , "      vmaxuh", },
+    { &test_vmaxuw          , "      vmaxuw", },
+    { &test_vmaxsb          , "      vmaxsb", },
+    { &test_vmaxsh          , "      vmaxsh", },
+    { &test_vmaxsw          , "      vmaxsw", },
+    { &test_vminub          , "      vminub", },
+    { &test_vminuh          , "      vminuh", },
+    { &test_vminuw          , "      vminuw", },
+    { &test_vminsb          , "      vminsb", },
+    { &test_vminsh          , "      vminsh", },
+    { &test_vminsw          , "      vminsw", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vand (void)
+{
+    __asm__ __volatile__ ("vand         17, 14, 15");
+}
+
+static void test_vor (void)
+{
+    __asm__ __volatile__ ("vor          17, 14, 15");
+}
+
+static void test_vxor (void)
+{
+    __asm__ __volatile__ ("vxor         17, 14, 15");
+}
+
+static void test_vandc (void)
+{
+    __asm__ __volatile__ ("vandc        17, 14, 15");
+}
+
+static void test_vnor (void)
+{
+    __asm__ __volatile__ ("vnor         17, 14, 15");
+}
+
+static void test_vrlb (void)
+{
+    __asm__ __volatile__ ("vrlb         17, 14, 15");
+}
+
+static void test_vrlh (void)
+{
+    __asm__ __volatile__ ("vrlh         17, 14, 15");
+}
+
+static void test_vrlw (void)
+{
+    __asm__ __volatile__ ("vrlw         17, 14, 15");
+}
+
+static void test_vslb (void)
+{
+    __asm__ __volatile__ ("vslb         17, 14, 15");
+}
+
+static void test_vslh (void)
+{
+    __asm__ __volatile__ ("vslh         17, 14, 15");
+}
+
+static void test_vslw (void)
+{
+    __asm__ __volatile__ ("vslw         17, 14, 15");
+}
+
+static void test_vsrb (void)
+{
+    __asm__ __volatile__ ("vsrb         17, 14, 15");
+}
+
+static void test_vsrh (void)
+{
+    __asm__ __volatile__ ("vsrh         17, 14, 15");
+}
+
+static void test_vsrw (void)
+{
+    __asm__ __volatile__ ("vsrw         17, 14, 15");
+}
+
+static void test_vsrab (void)
+{
+    __asm__ __volatile__ ("vsrab        17, 14, 15");
+}
+
+static void test_vsrah (void)
+{
+    __asm__ __volatile__ ("vsrah        17, 14, 15");
+}
+
+static void test_vsraw (void)
+{
+    __asm__ __volatile__ ("vsraw        17, 14, 15");
+}
+
+static void test_vpkuhum (void)
+{
+    __asm__ __volatile__ ("vpkuhum      17, 14, 15");
+}
+
+static void test_vpkuwum (void)
+{
+    __asm__ __volatile__ ("vpkuwum      17, 14, 15");
+}
+
+static void test_vpkuhus (void)
+{
+    __asm__ __volatile__ ("vpkuhus      17, 14, 15");
+}
+
+static void test_vpkuwus (void)
+{
+    __asm__ __volatile__ ("vpkuwus      17, 14, 15");
+}
+
+static void test_vpkshus (void)
+{
+    __asm__ __volatile__ ("vpkshus      17, 14, 15");
+}
+
+static void test_vpkswus (void)
+{
+    __asm__ __volatile__ ("vpkswus      17, 14, 15");
+}
+
+static void test_vpkshss (void)
+{
+    __asm__ __volatile__ ("vpkshss      17, 14, 15");
+}
+
+static void test_vpkswss (void)
+{
+    __asm__ __volatile__ ("vpkswss      17, 14, 15");
+}
+
+static void test_vpkpx (void)
+{
+    __asm__ __volatile__ ("vpkpx        17, 14, 15");
+}
+
+static void test_vmrghb (void)
+{
+    __asm__ __volatile__ ("vmrghb       17, 14, 15");
+}
+
+static void test_vmrghh (void)
+{
+    __asm__ __volatile__ ("vmrghh       17, 14, 15");
+}
+
+static void test_vmrghw (void)
+{
+    __asm__ __volatile__ ("vmrghw       17, 14, 15");
+}
+
+static void test_vmrglb (void)
+{
+    __asm__ __volatile__ ("vmrglb       17, 14, 15");
+}
+
+static void test_vmrglh (void)
+{
+    __asm__ __volatile__ ("vmrglh       17, 14, 15");
+}
+
+static void test_vmrglw (void)
+{
+    __asm__ __volatile__ ("vmrglw       17, 14, 15");
+}
+
+static void test_vsl (void)
+{
+    __asm__ __volatile__ ("vsl          17, 14, 15");
+}
+
+static void test_vsr (void)
+{
+    __asm__ __volatile__ ("vsr          17, 14, 15");
+}
+
+static void test_vslo (void)
+{
+    __asm__ __volatile__ ("vslo         17, 14, 15");
+}
+
+static void test_vsro (void)
+{
+    __asm__ __volatile__ ("vsro         17, 14, 15");
+}
+
+static test_t tests_al_ops_two[] = {
+    { &test_vand            , "        vand", },
+    { &test_vor             , "         vor", },
+    { &test_vxor            , "        vxor", },
+    { &test_vandc           , "       vandc", },
+    { &test_vnor            , "        vnor", },
+    { &test_vrlb            , "        vrlb", },
+    { &test_vrlh            , "        vrlh", },
+    { &test_vrlw            , "        vrlw", },
+    { &test_vslb            , "        vslb", },
+    { &test_vslh            , "        vslh", },
+    { &test_vslw            , "        vslw", },
+    { &test_vsrb            , "        vsrb", },
+    { &test_vsrh            , "        vsrh", },
+    { &test_vsrw            , "        vsrw", },
+    { &test_vsrab           , "       vsrab", },
+    { &test_vsrah           , "       vsrah", },
+    { &test_vsraw           , "       vsraw", },
+    { &test_vpkuhum         , "     vpkuhum", },
+    { &test_vpkuwum         , "     vpkuwum", },
+    { &test_vpkuhus         , "     vpkuhus", },
+    { &test_vpkuwus         , "     vpkuwus", },
+    { &test_vpkshus         , "     vpkshus", },
+    { &test_vpkswus         , "     vpkswus", },
+    { &test_vpkshss         , "     vpkshss", },
+    { &test_vpkswss         , "     vpkswss", },
+    { &test_vpkpx           , "       vpkpx", },
+    { &test_vmrghb          , "      vmrghb", },
+    { &test_vmrghh          , "      vmrghh", },
+    { &test_vmrghw          , "      vmrghw", },
+    { &test_vmrglb          , "      vmrglb", },
+    { &test_vmrglh          , "      vmrglh", },
+    { &test_vmrglw          , "      vmrglw", },
+    { &test_vsl             , "         vsl", },
+    { &test_vsr             , "         vsr", },
+    { &test_vslo            , "        vslo", },
+    { &test_vsro            , "        vsro", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vupkhsb (void)
+{
+    __asm__ __volatile__ ("vupkhsb      17, 14");
+}
+
+static void test_vupkhsh (void)
+{
+    __asm__ __volatile__ ("vupkhsh      17, 14");
+}
+
+static void test_vupkhpx (void)
+{
+    __asm__ __volatile__ ("vupkhpx      17, 14");
+}
+
+static void test_vupklsb (void)
+{
+    __asm__ __volatile__ ("vupklsb      17, 14");
+}
+
+static void test_vupklsh (void)
+{
+    __asm__ __volatile__ ("vupklsh      17, 14");
+}
+
+static void test_vupklpx (void)
+{
+    __asm__ __volatile__ ("vupklpx      17, 14");
+}
+
+static test_t tests_al_ops_one[] = {
+    { &test_vupkhsb         , "     vupkhsb", },
+    { &test_vupkhsh         , "     vupkhsh", },
+    { &test_vupkhpx         , "     vupkhpx", },
+    { &test_vupklsb         , "     vupklsb", },
+    { &test_vupklsh         , "     vupklsh", },
+    { &test_vupklpx         , "     vupklpx", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vcmpgtub (void)
+{
+    __asm__ __volatile__ ("vcmpgtub     17, 14, 15");
+}
+
+static void test_vcmpgtuh (void)
+{
+    __asm__ __volatile__ ("vcmpgtuh     17, 14, 15");
+}
+
+static void test_vcmpgtuw (void)
+{
+    __asm__ __volatile__ ("vcmpgtuw     17, 14, 15");
+}
+
+static void test_vcmpgtsb (void)
+{
+    __asm__ __volatile__ ("vcmpgtsb     17, 14, 15");
+}
+
+static void test_vcmpgtsh (void)
+{
+    __asm__ __volatile__ ("vcmpgtsh     17, 14, 15");
+}
+
+static void test_vcmpgtsw (void)
+{
+    __asm__ __volatile__ ("vcmpgtsw     17, 14, 15");
+}
+
+static void test_vcmpequb (void)
+{
+    __asm__ __volatile__ ("vcmpequb     17, 14, 15");
+}
+
+static void test_vcmpequh (void)
+{
+    __asm__ __volatile__ ("vcmpequh     17, 14, 15");
+}
+
+static void test_vcmpequw (void)
+{
+    __asm__ __volatile__ ("vcmpequw     17, 14, 15");
+}
+
+static test_t tests_ac_ops_two[] = {
+    { &test_vcmpgtub        , "    vcmpgtub", },
+    { &test_vcmpgtuh        , "    vcmpgtuh", },
+    { &test_vcmpgtuw        , "    vcmpgtuw", },
+    { &test_vcmpgtsb        , "    vcmpgtsb", },
+    { &test_vcmpgtsh        , "    vcmpgtsh", },
+    { &test_vcmpgtsw        , "    vcmpgtsw", },
+    { &test_vcmpequb        , "    vcmpequb", },
+    { &test_vcmpequh        , "    vcmpequh", },
+    { &test_vcmpequw        , "    vcmpequw", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vcmpgtub_ (void)
+{
+    __asm__ __volatile__ ("vcmpgtub.    17, 14, 15");
+}
+
+static void test_vcmpgtuh_ (void)
+{
+    __asm__ __volatile__ ("vcmpgtuh.    17, 14, 15");
+}
+
+static void test_vcmpgtuw_ (void)
+{
+    __asm__ __volatile__ ("vcmpgtuw.    17, 14, 15");
+}
+
+static void test_vcmpgtsb_ (void)
+{
+    __asm__ __volatile__ ("vcmpgtsb.    17, 14, 15");
+}
+
+static void test_vcmpgtsh_ (void)
+{
+    __asm__ __volatile__ ("vcmpgtsh.    17, 14, 15");
+}
+
+static void test_vcmpgtsw_ (void)
+{
+    __asm__ __volatile__ ("vcmpgtsw.    17, 14, 15");
+}
+
+static void test_vcmpequb_ (void)
+{
+    __asm__ __volatile__ ("vcmpequb.    17, 14, 15");
+}
+
+static void test_vcmpequh_ (void)
+{
+    __asm__ __volatile__ ("vcmpequh.    17, 14, 15");
+}
+
+static void test_vcmpequw_ (void)
+{
+    __asm__ __volatile__ ("vcmpequw.    17, 14, 15");
+}
+
+static test_t tests_acr_ops_two[] = {
+    { &test_vcmpgtub_       , "   vcmpgtub.", },
+    { &test_vcmpgtuh_       , "   vcmpgtuh.", },
+    { &test_vcmpgtuw_       , "   vcmpgtuw.", },
+    { &test_vcmpgtsb_       , "   vcmpgtsb.", },
+    { &test_vcmpgtsh_       , "   vcmpgtsh.", },
+    { &test_vcmpgtsw_       , "   vcmpgtsw.", },
+    { &test_vcmpequb_       , "   vcmpequb.", },
+    { &test_vcmpequh_       , "   vcmpequh.", },
+    { &test_vcmpequw_       , "   vcmpequw.", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vmaddfp (void)
+{
+    __asm__ __volatile__ ("vmaddfp      17, 14, 15, 16");
+}
+
+static void test_vnmsubfp (void)
+{
+    __asm__ __volatile__ ("vnmsubfp     17, 14, 15, 16");
+}
+
+static test_t tests_afa_ops_three[] = {
+    { &test_vmaddfp         , "     vmaddfp", },
+    { &test_vnmsubfp        , "    vnmsubfp", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vaddfp (void)
+{
+    __asm__ __volatile__ ("vaddfp       17, 14, 15");
+}
+
+static void test_vsubfp (void)
+{
+    __asm__ __volatile__ ("vsubfp       17, 14, 15");
+}
+
+static void test_vmaxfp (void)
+{
+    __asm__ __volatile__ ("vmaxfp       17, 14, 15");
+}
+
+static void test_vminfp (void)
+{
+    __asm__ __volatile__ ("vminfp       17, 14, 15");
+}
+
+static test_t tests_afa_ops_two[] = {
+    { &test_vaddfp          , "      vaddfp", },
+    { &test_vsubfp          , "      vsubfp", },
+    { &test_vmaxfp          , "      vmaxfp", },
+    { &test_vminfp          , "      vminfp", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vrfin (void)
+{
+    __asm__ __volatile__ ("vrfin        17, 14");
+}
+
+static void test_vrfiz (void)
+{
+    __asm__ __volatile__ ("vrfiz        17, 14");
+}
+
+static void test_vrfip (void)
+{
+    __asm__ __volatile__ ("vrfip        17, 14");
+}
+
+static void test_vrfim (void)
+{
+    __asm__ __volatile__ ("vrfim        17, 14");
+}
+
+static void test_vrefp (void)
+{
+    __asm__ __volatile__ ("vrefp        17, 14");
+}
+
+static void test_vrsqrtefp (void)
+{
+    __asm__ __volatile__ ("vrsqrtefp    17, 14");
+}
+
+static void test_vlogefp (void)
+{
+    __asm__ __volatile__ ("vlogefp      17, 14");
+}
+
+static void test_vexptefp (void)
+{
+    __asm__ __volatile__ ("vexptefp     17, 14");
+}
+
+static test_t tests_afa_ops_one[] = {
+    { &test_vrfin           , "       vrfin", },
+    { &test_vrfiz           , "       vrfiz", },
+    { &test_vrfip           , "       vrfip", },
+    { &test_vrfim           , "       vrfim", },
+    { &test_vrefp           , "       vrefp", },
+    { &test_vrsqrtefp       , "   vrsqrtefp", },
+    { &test_vlogefp         , "     vlogefp", },
+    { &test_vexptefp        , "    vexptefp", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vcmpgtfp (void)
+{
+    __asm__ __volatile__ ("vcmpgtfp     17, 14, 15");
+}
+
+static void test_vcmpeqfp (void)
+{
+    __asm__ __volatile__ ("vcmpeqfp     17, 14, 15");
+}
+
+static void test_vcmpgefp (void)
+{
+    __asm__ __volatile__ ("vcmpgefp     17, 14, 15");
+}
+
+static void test_vcmpbfp (void)
+{
+    __asm__ __volatile__ ("vcmpbfp      17, 14, 15");
+}
+
+static test_t tests_afc_ops_two[] = {
+    { &test_vcmpgtfp        , "    vcmpgtfp", },
+    { &test_vcmpeqfp        , "    vcmpeqfp", },
+    { &test_vcmpgefp        , "    vcmpgefp", },
+    { &test_vcmpbfp         , "     vcmpbfp", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (HAS_ALTIVEC)
+static void test_vcmpgtfp_ (void)
+{
+    __asm__ __volatile__ ("vcmpgtfp.    17, 14, 15");
+}
+
+static void test_vcmpeqfp_ (void)
+{
+    __asm__ __volatile__ ("vcmpeqfp.    17, 14, 15");
+}
+
+static void test_vcmpgefp_ (void)
+{
+    __asm__ __volatile__ ("vcmpgefp.    17, 14, 15");
+}
+
+static void test_vcmpbfp_ (void)
+{
+    __asm__ __volatile__ ("vcmpbfp.     17, 14, 15");
+}
+
+static test_t tests_afcr_ops_two[] = {
+    { &test_vcmpgtfp_       , "   vcmpgtfp.", },
+    { &test_vcmpeqfp_       , "   vcmpeqfp.", },
+    { &test_vcmpgefp_       , "   vcmpgefp.", },
+    { &test_vcmpbfp_        , "    vcmpbfp.", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (IS_PPC405)
+static void test_macchw (void)
+{
+    __asm__ __volatile__ ("macchw       17, 14, 15");
+}
+
+static void test_macchwo (void)
+{
+    __asm__ __volatile__ ("macchwo      17, 14, 15");
+}
+
+static void test_macchws (void)
+{
+    __asm__ __volatile__ ("macchws      17, 14, 15");
+}
+
+static void test_macchwso (void)
+{
+    __asm__ __volatile__ ("macchwso     17, 14, 15");
+}
+
+static void test_macchwsu (void)
+{
+    __asm__ __volatile__ ("macchwsu     17, 14, 15");
+}
+
+static void test_macchwsuo (void)
+{
+    __asm__ __volatile__ ("macchwsuo    17, 14, 15");
+}
+
+static void test_macchwu (void)
+{
+    __asm__ __volatile__ ("macchwu      17, 14, 15");
+}
+
+static void test_macchwuo (void)
+{
+    __asm__ __volatile__ ("macchwuo     17, 14, 15");
+}
+
+static void test_machhw (void)
+{
+    __asm__ __volatile__ ("machhw       17, 14, 15");
+}
+
+static void test_machhwo (void)
+{
+    __asm__ __volatile__ ("machhwo      17, 14, 15");
+}
+
+static void test_machhws (void)
+{
+    __asm__ __volatile__ ("machhws      17, 14, 15");
+}
+
+static void test_machhwso (void)
+{
+    __asm__ __volatile__ ("machhwso     17, 14, 15");
+}
+
+static void test_machhwsu (void)
+{
+    __asm__ __volatile__ ("machhwsu     17, 14, 15");
+}
+
+static void test_machhwsuo (void)
+{
+    __asm__ __volatile__ ("machhwsuo    17, 14, 15");
+}
+
+static void test_machhwu (void)
+{
+    __asm__ __volatile__ ("machhwu      17, 14, 15");
+}
+
+static void test_machhwuo (void)
+{
+    __asm__ __volatile__ ("machhwuo     17, 14, 15");
+}
+
+static void test_maclhw (void)
+{
+    __asm__ __volatile__ ("maclhw       17, 14, 15");
+}
+
+static void test_maclhwo (void)
+{
+    __asm__ __volatile__ ("maclhwo      17, 14, 15");
+}
+
+static void test_maclhws (void)
+{
+    __asm__ __volatile__ ("maclhws      17, 14, 15");
+}
+
+static void test_maclhwso (void)
+{
+    __asm__ __volatile__ ("maclhwso     17, 14, 15");
+}
+
+static void test_maclhwsu (void)
+{
+    __asm__ __volatile__ ("maclhwsu     17, 14, 15");
+}
+
+static void test_maclhwsuo (void)
+{
+    __asm__ __volatile__ ("maclhwsuo    17, 14, 15");
+}
+
+static void test_maclhwu (void)
+{
+    __asm__ __volatile__ ("maclhwu      17, 14, 15");
+}
+
+static void test_maclhwuo (void)
+{
+    __asm__ __volatile__ ("maclhwuo     17, 14, 15");
+}
+
+static void test_mulchw (void)
+{
+    __asm__ __volatile__ ("mulchw       17, 14, 15");
+}
+
+static void test_mulchwu (void)
+{
+    __asm__ __volatile__ ("mulchwu      17, 14, 15");
+}
+
+static void test_mulhhw (void)
+{
+    __asm__ __volatile__ ("mulhhw       17, 14, 15");
+}
+
+static void test_mulhhwu (void)
+{
+    __asm__ __volatile__ ("mulhhwu      17, 14, 15");
+}
+
+static void test_mullhw (void)
+{
+    __asm__ __volatile__ ("mullhw       17, 14, 15");
+}
+
+static void test_mullhwu (void)
+{
+    __asm__ __volatile__ ("mullhwu      17, 14, 15");
+}
+
+static void test_nmacchw (void)
+{
+    __asm__ __volatile__ ("nmacchw      17, 14, 15");
+}
+
+static void test_nmacchwo (void)
+{
+    __asm__ __volatile__ ("nmacchwo     17, 14, 15");
+}
+
+static void test_nmacchws (void)
+{
+    __asm__ __volatile__ ("nmacchws     17, 14, 15");
+}
+
+static void test_nmacchwso (void)
+{
+    __asm__ __volatile__ ("nmacchwso    17, 14, 15");
+}
+
+static void test_nmachhw (void)
+{
+    __asm__ __volatile__ ("nmachhw      17, 14, 15");
+}
+
+static void test_nmachhwo (void)
+{
+    __asm__ __volatile__ ("nmachhwo     17, 14, 15");
+}
+
+static void test_nmachhws (void)
+{
+    __asm__ __volatile__ ("nmachhws     17, 14, 15");
+}
+
+static void test_nmachhwso (void)
+{
+    __asm__ __volatile__ ("nmachhwso    17, 14, 15");
+}
+
+static void test_nmaclhw (void)
+{
+    __asm__ __volatile__ ("nmaclhw      17, 14, 15");
+}
+
+static void test_nmaclhwo (void)
+{
+    __asm__ __volatile__ ("nmaclhwo     17, 14, 15");
+}
+
+static void test_nmaclhws (void)
+{
+    __asm__ __volatile__ ("nmaclhws     17, 14, 15");
+}
+
+static void test_nmaclhwso (void)
+{
+    __asm__ __volatile__ ("nmaclhwso    17, 14, 15");
+}
+
+static test_t tests_p4m_ops_two[] = {
+    { &test_macchw          , "      macchw", },
+    { &test_macchwo         , "     macchwo", },
+    { &test_macchws         , "     macchws", },
+    { &test_macchwso        , "    macchwso", },
+    { &test_macchwsu        , "    macchwsu", },
+    { &test_macchwsuo       , "   macchwsuo", },
+    { &test_macchwu         , "     macchwu", },
+    { &test_macchwuo        , "    macchwuo", },
+    { &test_machhw          , "      machhw", },
+    { &test_machhwo         , "     machhwo", },
+    { &test_machhws         , "     machhws", },
+    { &test_machhwso        , "    machhwso", },
+    { &test_machhwsu        , "    machhwsu", },
+    { &test_machhwsuo       , "   machhwsuo", },
+    { &test_machhwu         , "     machhwu", },
+    { &test_machhwuo        , "    machhwuo", },
+    { &test_maclhw          , "      maclhw", },
+    { &test_maclhwo         , "     maclhwo", },
+    { &test_maclhws         , "     maclhws", },
+    { &test_maclhwso        , "    maclhwso", },
+    { &test_maclhwsu        , "    maclhwsu", },
+    { &test_maclhwsuo       , "   maclhwsuo", },
+    { &test_maclhwu         , "     maclhwu", },
+    { &test_maclhwuo        , "    maclhwuo", },
+    { &test_mulchw          , "      mulchw", },
+    { &test_mulchwu         , "     mulchwu", },
+    { &test_mulhhw          , "      mulhhw", },
+    { &test_mulhhwu         , "     mulhhwu", },
+    { &test_mullhw          , "      mullhw", },
+    { &test_mullhwu         , "     mullhwu", },
+    { &test_nmacchw         , "     nmacchw", },
+    { &test_nmacchwo        , "    nmacchwo", },
+    { &test_nmacchws        , "    nmacchws", },
+    { &test_nmacchwso       , "   nmacchwso", },
+    { &test_nmachhw         , "     nmachhw", },
+    { &test_nmachhwo        , "    nmachhwo", },
+    { &test_nmachhws        , "    nmachhws", },
+    { &test_nmachhwso       , "   nmachhwso", },
+    { &test_nmaclhw         , "     nmaclhw", },
+    { &test_nmaclhwo        , "    nmaclhwo", },
+    { &test_nmaclhws        , "    nmaclhws", },
+    { &test_nmaclhwso       , "   nmaclhwso", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (IS_PPC405) */
+
+#if defined (IS_PPC405)
+static void test_macchw_ (void)
+{
+    __asm__ __volatile__ ("macchw.      17, 14, 15");
+}
+
+static void test_macchwo_ (void)
+{
+    __asm__ __volatile__ ("macchwo.     17, 14, 15");
+}
+
+static void test_macchws_ (void)
+{
+    __asm__ __volatile__ ("macchws.     17, 14, 15");
+}
+
+static void test_macchwso_ (void)
+{
+    __asm__ __volatile__ ("macchwso.    17, 14, 15");
+}
+
+static void test_macchwsu_ (void)
+{
+    __asm__ __volatile__ ("macchwsu.    17, 14, 15");
+}
+
+static void test_macchwsuo_ (void)
+{
+    __asm__ __volatile__ ("macchwsuo.   17, 14, 15");
+}
+
+static void test_macchwu_ (void)
+{
+    __asm__ __volatile__ ("macchwu.     17, 14, 15");
+}
+
+static void test_macchwuo_ (void)
+{
+    __asm__ __volatile__ ("macchwuo.    17, 14, 15");
+}
+
+static void test_machhw_ (void)
+{
+    __asm__ __volatile__ ("machhw.      17, 14, 15");
+}
+
+static void test_machhwo_ (void)
+{
+    __asm__ __volatile__ ("machhwo.     17, 14, 15");
+}
+
+static void test_machhws_ (void)
+{
+    __asm__ __volatile__ ("machhws.     17, 14, 15");
+}
+
+static void test_machhwso_ (void)
+{
+    __asm__ __volatile__ ("machhwso.    17, 14, 15");
+}
+
+static void test_machhwsu_ (void)
+{
+    __asm__ __volatile__ ("machhwsu.    17, 14, 15");
+}
+
+static void test_machhwsuo_ (void)
+{
+    __asm__ __volatile__ ("machhwsuo.   17, 14, 15");
+}
+
+static void test_machhwu_ (void)
+{
+    __asm__ __volatile__ ("machhwu.     17, 14, 15");
+}
+
+static void test_machhwuo_ (void)
+{
+    __asm__ __volatile__ ("machhwuo.    17, 14, 15");
+}
+
+static void test_maclhw_ (void)
+{
+    __asm__ __volatile__ ("maclhw.      17, 14, 15");
+}
+
+static void test_maclhwo_ (void)
+{
+    __asm__ __volatile__ ("maclhwo.     17, 14, 15");
+}
+
+static void test_maclhws_ (void)
+{
+    __asm__ __volatile__ ("maclhws.     17, 14, 15");
+}
+
+static void test_maclhwso_ (void)
+{
+    __asm__ __volatile__ ("maclhwso.    17, 14, 15");
+}
+
+static void test_maclhwsu_ (void)
+{
+    __asm__ __volatile__ ("maclhwsu.    17, 14, 15");
+}
+
+static void test_maclhwsuo_ (void)
+{
+    __asm__ __volatile__ ("maclhwsuo.   17, 14, 15");
+}
+
+static void test_maclhwu_ (void)
+{
+    __asm__ __volatile__ ("maclhwu.     17, 14, 15");
+}
+
+static void test_maclhwuo_ (void)
+{
+    __asm__ __volatile__ ("maclhwuo.    17, 14, 15");
+}
+
+static void test_mulchw_ (void)
+{
+    __asm__ __volatile__ ("mulchw.      17, 14, 15");
+}
+
+static void test_mulchwu_ (void)
+{
+    __asm__ __volatile__ ("mulchwu.     17, 14, 15");
+}
+
+static void test_mulhhw_ (void)
+{
+    __asm__ __volatile__ ("mulhhw.      17, 14, 15");
+}
+
+static void test_mulhhwu_ (void)
+{
+    __asm__ __volatile__ ("mulhhwu.     17, 14, 15");
+}
+
+static void test_mullhw_ (void)
+{
+    __asm__ __volatile__ ("mullhw.      17, 14, 15");
+}
+
+static void test_mullhwu_ (void)
+{
+    __asm__ __volatile__ ("mullhwu.     17, 14, 15");
+}
+
+static void test_nmacchw_ (void)
+{
+    __asm__ __volatile__ ("nmacchw.     17, 14, 15");
+}
+
+static void test_nmacchwo_ (void)
+{
+    __asm__ __volatile__ ("nmacchwo.    17, 14, 15");
+}
+
+static void test_nmacchws_ (void)
+{
+    __asm__ __volatile__ ("nmacchws.    17, 14, 15");
+}
+
+static void test_nmacchwso_ (void)
+{
+    __asm__ __volatile__ ("nmacchwso.   17, 14, 15");
+}
+
+static void test_nmachhw_ (void)
+{
+    __asm__ __volatile__ ("nmachhw.     17, 14, 15");
+}
+
+static void test_nmachhwo_ (void)
+{
+    __asm__ __volatile__ ("nmachhwo.    17, 14, 15");
+}
+
+static void test_nmachhws_ (void)
+{
+    __asm__ __volatile__ ("nmachhws.    17, 14, 15");
+}
+
+static void test_nmachhwso_ (void)
+{
+    __asm__ __volatile__ ("nmachhwso.   17, 14, 15");
+}
+
+static void test_nmaclhw_ (void)
+{
+    __asm__ __volatile__ ("nmaclhw.     17, 14, 15");
+}
+
+static void test_nmaclhwo_ (void)
+{
+    __asm__ __volatile__ ("nmaclhwo.    17, 14, 15");
+}
+
+static void test_nmaclhws_ (void)
+{
+    __asm__ __volatile__ ("nmaclhws.    17, 14, 15");
+}
+
+static void test_nmaclhwso_ (void)
+{
+    __asm__ __volatile__ ("nmaclhwso.   17, 14, 15");
+}
+
+static test_t tests_p4mc_ops_two[] = {
+    { &test_macchw_         , "     macchw.", },
+    { &test_macchwo_        , "    macchwo.", },
+    { &test_macchws_        , "    macchws.", },
+    { &test_macchwso_       , "   macchwso.", },
+    { &test_macchwsu_       , "   macchwsu.", },
+    { &test_macchwsuo_      , "  macchwsuo.", },
+    { &test_macchwu_        , "    macchwu.", },
+    { &test_macchwuo_       , "   macchwuo.", },
+    { &test_machhw_         , "     machhw.", },
+    { &test_machhwo_        , "    machhwo.", },
+    { &test_machhws_        , "    machhws.", },
+    { &test_machhwso_       , "   machhwso.", },
+    { &test_machhwsu_       , "   machhwsu.", },
+    { &test_machhwsuo_      , "  machhwsuo.", },
+    { &test_machhwu_        , "    machhwu.", },
+    { &test_machhwuo_       , "   machhwuo.", },
+    { &test_maclhw_         , "     maclhw.", },
+    { &test_maclhwo_        , "    maclhwo.", },
+    { &test_maclhws_        , "    maclhws.", },
+    { &test_maclhwso_       , "   maclhwso.", },
+    { &test_maclhwsu_       , "   maclhwsu.", },
+    { &test_maclhwsuo_      , "  maclhwsuo.", },
+    { &test_maclhwu_        , "    maclhwu.", },
+    { &test_maclhwuo_       , "   maclhwuo.", },
+    { &test_mulchw_         , "     mulchw.", },
+    { &test_mulchwu_        , "    mulchwu.", },
+    { &test_mulhhw_         , "     mulhhw.", },
+    { &test_mulhhwu_        , "    mulhhwu.", },
+    { &test_mullhw_         , "     mullhw.", },
+    { &test_mullhwu_        , "    mullhwu.", },
+    { &test_nmacchw_        , "    nmacchw.", },
+    { &test_nmacchwo_       , "   nmacchwo.", },
+    { &test_nmacchws_       , "   nmacchws.", },
+    { &test_nmacchwso_      , "  nmacchwso.", },
+    { &test_nmachhw_        , "    nmachhw.", },
+    { &test_nmachhwo_       , "   nmachhwo.", },
+    { &test_nmachhws_       , "   nmachhws.", },
+    { &test_nmachhwso_      , "  nmachhwso.", },
+    { &test_nmaclhw_        , "    nmaclhw.", },
+    { &test_nmaclhwo_       , "   nmaclhwo.", },
+    { &test_nmaclhws_       , "   nmaclhws.", },
+    { &test_nmaclhwso_      , "  nmaclhwso.", },
+    { NULL,                   NULL,           },
+};
+#endif /* defined (IS_PPC405) */
+
+static test_table_t all_tests[] = {
+    {
+        tests_ia_ops_two      ,
+        "PPC integer arithmetic instructions with two arguments",
+        0x00010102,
+    },
+    {
+        tests_iar_ops_two     ,
+        "PPC integer instructions with two arguments with flags update",
+        0x01010102,
+    },
+    {
+        tests_il_ops_two      ,
+        "PPC integer logical instructions with two arguments",
+        0x00010202,
+    },
+    {
+        tests_ilr_ops_two     ,
+        "PPC integer logical instructions with two arguments with flags update",
+        0x01010202,
+    },
+    {
+        tests_icr_ops_two     ,
+        "PPC integer compare instructions (two arguents)",
+        0x01010304,
+    },
+    {
+        tests_icr_ops_two_i16 ,
+        "PPC integer compare with immediate instructions (two arguents)",
+        0x01010304,
+    },
+    {
+        tests_ia_ops_two_i16  ,
+        "PPC integer arithmetic instructions\n    with one register + one 16 bits immediate arguments",
+        0x00010106,
+    },
+    {
+        tests_iar_ops_two_i16 ,
+        "PPC integer arithmetic instructions\n    with one register + one 16 bits immediate arguments with flags update",
+        0x01010106,
+    },
+    {
+        tests_il_ops_two_i16  ,
+        "PPC integer logical instructions\n    with one register + one 16 bits immediate arguments",
+        0x00010206,
+    },
+    {
+        tests_ilr_ops_two_i16 ,
+        "PPC integer logical instructions\n    with one register + one 16 bits immediate arguments with flags update",
+        0x01010206,
+    },
+    {
+        tests_crl_ops_two     ,
+        "PPC condition register logical instructions - two operands",
+        0x01000602,
+    },
+    {
+        tests_ia_ops_one      ,
+        "PPC integer arithmetic instructions with one argument",
+        0x00010101,
+    },
+    {
+        tests_iar_ops_one     ,
+        "PPC integer arithmetic instructions with one argument with flags update",
+        0x01010101,
+    },
+    {
+        tests_il_ops_one      ,
+        "PPC integer logical instructions with one argument",
+        0x00010201,
+    },
+    {
+        tests_ilr_ops_one     ,
+        "PPC integer logical instructions with one argument with flags update",
+        0x01010201,
+    },
+    {
+        tests_il_ops_spe      ,
+        "PPC logical instructions with special forms",
+        0x00010207,
+    },
+    {
+        tests_ilr_ops_spe     ,
+        "PPC logical instructions with special forms with flags update",
+        0x01010207,
+    },
+#if !defined (NO_FLOAT)
+    {
+        tests_fa_ops_three    ,
+        "PPC floating point arithmetic instructions with three arguments",
+        0x00020103,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_far_ops_three   ,
+        "PPC floating point arithmetic instructions\n    with three arguments with flags update",
+        0x01020103,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_fa_ops_two      ,
+        "PPC floating point arithmetic instructions with two arguments",
+        0x00020102,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_far_ops_two     ,
+        "PPC floating point arithmetic instructions\n    with two arguments with flags update",
+        0x01020102,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_fcr_ops_two     ,
+        "PPC floating point compare instructions (two arguments)",
+        0x01020304,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_fa_ops_one      ,
+        "PPC floating point arithmetic instructions with one argument",
+        0x00020101,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_far_ops_one     ,
+        "PPC floating point arithmetic instructions\n    with one argument with flags update",
+        0x01020101,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_fl_ops_spe      ,
+        "PPC floating point status register manipulation instructions",
+        0x00020207,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if !defined (NO_FLOAT)
+    {
+        tests_flr_ops_spe     ,
+        "PPC floating point status register manipulation instructions\n  with flags update",
+        0x01020207,
+    },
+#endif /* !defined (NO_FLOAT) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_aa_ops_three    ,
+        "PPC altivec integer arithmetic instructions with three arguments",
+        0x00040103,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_al_ops_three    ,
+        "PPC altivec integer logical instructions with three arguments",
+        0x00040203,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_aa_ops_two      ,
+        "PPC altivec integer arithmetic instructions with two arguments",
+        0x00040102,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_al_ops_two      ,
+        "PPC altivec integer logical instructions with two arguments",
+        0x00040202,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_al_ops_one      ,
+        "PPC altivec integer logical instructions with one argument",
+        0x00040201,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_ac_ops_two      ,
+        "Altivec integer compare instructions",
+        0x00040302,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_acr_ops_two     ,
+        "Altivec integer compare instructions with flags update",
+        0x01040302,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_afa_ops_three   ,
+        "Altivec floating point arithmetic instructions with three arguments",
+        0x00050103,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_afa_ops_two     ,
+        "Altivec floating point arithmetic instructions with two arguments",
+        0x00050102,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_afa_ops_one     ,
+        "Altivec floating point arithmetic instructions with one argument",
+        0x00050101,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_afc_ops_two     ,
+        "Altivec floating point compare instructions",
+        0x00050302,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (HAS_ALTIVEC)
+    {
+        tests_afcr_ops_two    ,
+        "Altivec floating point compare instructions with flags update",
+        0x01050302,
+    },
+#endif /* defined (HAS_ALTIVEC) */
+#if defined (IS_PPC405)
+    {
+        tests_p4m_ops_two     ,
+        "PPC 405 mac instructions with three arguments",
+        0x00030102,
+    },
+#endif /* defined (IS_PPC405) */
+#if defined (IS_PPC405)
+    {
+        tests_p4mc_ops_two    ,
+        "PPC 405 mac instructions with three arguments with flags update",
+        0x01030102,
+    },
+#endif /* defined (IS_PPC405) */
+    { NULL,                   NULL,               0x00000000, },
+};
+
+// END #include "ops-ppc.c"
+
+
+static int verbose = 0;
+
+static double *fargs;
+static int nb_fargs;
+static uint32_t *iargs;
+static int nb_iargs;
+static uint16_t *ii16;
+static int nb_ii16;
+
+static inline void register_farg (void *farg,
+                                  int s, uint16_t _exp, uint64_t mant)
+{
+    uint64_t tmp;
+
+    tmp = ((uint64_t)s << 63) | ((uint64_t)_exp << 52) | mant;
+    *(uint64_t *)farg = tmp;
+    AB_DPRINTF("%d %03x %013llx => %016llx %0e\n",
+               s, _exp, mant, *(uint64_t *)farg, *(double *)farg);
+}
+
+static void build_fargs_table (void)
+{
+    /* Sign goes from zero to one
+     * Exponent goes from 0 to ((1 << 12) - 1)
+     * Mantissa goes from 1 to ((1 << 52) - 1)
+     * + special values:
+     * +0.0      : 0 0x000 0x0000000000000
+     * -0.0      : 1 0x000 0x0000000000000
+     * +infinity : 0 0x7FF 0x0000000000000
+     * -infinity : 1 0x7FF 0x0000000000000
+     * +SNaN     : 0 0x7FF 0x7FFFFFFFFFFFF
+     * -SNaN     : 1 0x7FF 0x7FFFFFFFFFFFF
+     * +QNaN     : 0 0x7FF 0x8000000000000
+     * -QNaN     : 1 0x7FF 0x8000000000000
+     * (8 values)
+     */
+    uint64_t mant;
+    uint16_t _exp, e0, e1;
+    int s;
+    int i;
+
+    fargs = my_malloc(200 * sizeof(double));
+    i = 0;
+    for (s = 0; s < 2; s++) {
+        for (e0 = 0; e0 < 2; e0++) {
+            for (e1 = 0x000; ; e1 = ((e1 + 1) << 2) + 6) {
+                if (e1 >= 0x400)
+                    e1 = 0x3fe;
+                _exp = (e0 << 10) | e1;
+                for (mant = 0x0000000000001ULL; mant < (1ULL << 52);
+                     /* Add 'random' bits */
+                     mant = ((mant + 0x4A6) << 13) + 0x359) {
+                    register_farg(&fargs[i++], s, _exp, mant);
+                }
+                if (e1 == 0x3fe)
+                    break;
+            }
+        }
+    }
+    /* Special values */
+    /* +0.0      : 0 0x000 0x0000000000000 */
+    s = 0;
+    _exp = 0x000;
+    mant = 0x0000000000000ULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    /* -0.0      : 1 0x000 0x0000000000000 */
+    s = 1;
+    _exp = 0x000;
+    mant = 0x0000000000000ULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    /* +infinity : 0 0x7FF 0x0000000000000  */
+    s = 0;
+    _exp = 0x7FF;
+    mant = 0x0000000000000ULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    /* -infinity : 1 0x7FF 0x0000000000000 */
+    s = 1;
+    _exp = 0x7FF;
+    mant = 0x0000000000000ULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    /* +SNaN     : 0 0x7FF 0x7FFFFFFFFFFFF */
+    s = 0;
+    _exp = 0x7FF;
+    mant = 0x7FFFFFFFFFFFFULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    /* -SNaN     : 1 0x7FF 0x7FFFFFFFFFFFF */
+    s = 1;
+    _exp = 0x7FF;
+    mant = 0x7FFFFFFFFFFFFULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    /* +QNaN     : 0 0x7FF 0x8000000000000 */
+    s = 0;
+    _exp = 0x7FF;
+    mant = 0x8000000000000ULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    /* -QNaN     : 1 0x7FF 0x8000000000000 */
+    s = 1;
+    _exp = 0x7FF;
+    mant = 0x8000000000000ULL;
+    register_farg(&fargs[i++], s, _exp, mant);
+    AB_DPRINTF("Registered %d floats values\n", i);
+    nb_fargs = i;
+}
+
+static void build_iargs_table (void)
+{
+    uint64_t tmp;
+    int i;
+
+    iargs = my_malloc(400 * sizeof(uint32_t));
+    i = 0;
+    for (tmp = 0; ; tmp = tmp + 1 + (tmp>>1)+(tmp>>2)+(tmp>>3)) {
+        if (tmp >= 0x100000000ULL)
+            tmp = 0xFFFFFFFF;
+        iargs[i++] = tmp;
+        AB_DPRINTF("val %08llx\n", tmp);
+        if (tmp == 0xFFFFFFFF)
+            break;
+    }
+    AB_DPRINTF("Registered %d ints values\n", i);
+    nb_iargs = i;
+}
+
+static void build_ii16_table (void)
+{
+    uint32_t tmp;
+    int i;
+
+    ii16 = my_malloc(200 * sizeof(uint32_t));
+    i = 0;
+    for (tmp = 0; ; tmp = tmp + 1 + (tmp>>1)+(tmp>>2)+(tmp>>3)) {
+        if (tmp >= 0x10000)
+            tmp = 0xFFFF;
+        ii16[i++] = tmp;
+        AB_DPRINTF("val %08llx\n", tmp);
+        if (tmp == 0xFFFF)
+            break;
+    }
+    AB_DPRINTF("Registered %d ints values\n", i);
+    nb_ii16 = i;
+}
+
+static void test_int_three_args (const unsigned char *name, test_func_t func)
+{
+    uint32_t res, flags, xer;
+    int i, j, k;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_iargs; i++) {
+        for (j = 0; j < nb_iargs; j++) {
+            for (k = 0;k < nb_iargs; k++) {
+                r14 = iargs[i];
+                r15 = iargs[j];
+                r16 = iargs[k];
+                r18 = 0;
+                __asm__ __volatile__ ("mtcr 18");
+                __asm__ __volatile__ ("mtxer 18");
+                (*func)();
+                __asm__ __volatile__ ("mfcr 18");
+                flags = r18;
+                __asm__ __volatile__ ("mfxer 18");
+                xer = r18;
+                res = r17;
+                vexxx_printf("%s %08x, %08x, %08x => %08x (%08x %08x)\n",
+                       name, iargs[i], iargs[j], iargs[k], res, flags, xer);
+            }
+            vexxx_printf("\n");
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+
+static void test_int_two_args (const unsigned char *name, test_func_t func)
+{
+    uint32_t res, flags, xer;
+    int i, j;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_iargs; i++) {
+        for (j = 0; j < nb_iargs; j++) {
+            r14 = iargs[i];
+            r15 = iargs[j];
+            r18 = 0;
+            __asm__ __volatile__ ("mtcr 18");
+            __asm__ __volatile__ ("mtxer 18");
+            (*func)();
+            __asm__ __volatile__ ("mfcr 18");
+            flags = r18;
+            __asm__ __volatile__ ("mfxer 18");
+            xer = r18;
+            res = r17;
+            vexxx_printf("%s %08x, %08x => %08x (%08x %08x)\n",
+                   name, iargs[i], iargs[j], res, flags, xer);
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+
+static void test_int_one_arg (const unsigned char *name, test_func_t func)
+{
+    uint32_t res, flags, xer;
+    int i;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_iargs; i++) {
+        r14 = iargs[i];
+        r18 = 0;
+        __asm__ __volatile__ ("mtcr 18");
+//        r18 = 0x20000000;                // set xer_ca
+        __asm__ __volatile__ ("mtxer 18");
+        (*func)();
+        res = r17;
+        __asm__ __volatile__ ("mfcr 18");
+        flags = r18;
+        __asm__ __volatile__ ("mfxer 18");
+        xer = r18;
+        vexxx_printf("%s %08x => %08x (%08x %08x)\n",
+               name, iargs[i], res, flags, xer);
+    }
+    vexxx_printf("\n");
+}
+
+static inline void _patch_op_imm (void *out, void *in,
+                                  uint16_t imm, int sh, int len)
+{
+    volatile uint32_t *p, *q;
+
+    p = out;
+    q = in;
+    *p = (*q & ~(((1 << len) - 1) << sh)) | ((imm & ((1 << len) - 1)) << sh);
+}
+
+static inline void patch_op_imm (void *out, void *in,
+                                 uint16_t imm, int sh, int len)
+{
+    volatile uint32_t *p;
+
+    p = out;
+    _patch_op_imm(out, in, imm, sh, len);
+    __asm__ __volatile__ ("dcbf 0, %0 ; icbi 0, %0 ; isync" ::"r"(p));
+}
+
+static inline void patch_op_imm16 (void *out, void *in, uint16_t imm)
+{
+    patch_op_imm(out, in, imm, 0, 16);
+}
+
+static void test_int_one_reg_imm16 (const unsigned char *name,
+                                    test_func_t func)
+{
+    uint32_t func_buf[2], *p;
+    uint32_t res, flags, xer;
+    int i, j;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_iargs; i++) {
+        for (j = 0; j < nb_ii16; j++) {
+            p = (void *)func;
+#if 0
+            vexxx_printf("copy func %s from %p to %p (%08x %08x)\n",
+                   name, func, func_buf, p[0], p[1]);
+#endif
+            func_buf[1] = p[1];
+            patch_op_imm16(func_buf, p, ii16[j]);
+            func = (void *)func_buf;
+#if 0
+            vexxx_printf(" =>  func %s from %p to %p (%08x %08x)\n",
+                   name, func, func_buf, func_buf[0], func_buf[1]);
+#endif
+            r14 = iargs[i];
+            r18 = 0;
+            __asm__ __volatile__ ("mtcr 18");
+            __asm__ __volatile__ ("mtxer 18");
+            (*func)();
+            __asm__ __volatile__ ("mfcr 18");
+            flags = r18;
+            __asm__ __volatile__ ("mfxer 18");
+            xer = r18;
+            res = r17;
+            vexxx_printf("%s %08x, %08x => %08x (%08x %08x)\n",
+                   name, iargs[i], ii16[j], res, flags, xer);
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+
+/* Special test cases for:
+ * rlwimi
+ * rlwinm
+ * rlwnm
+ * srawi
+ * mcrf
+ * mcrfs
+ * mffs
+ * mtfsb0
+ * mtfsb1
+ */
+
+static void rlwi_cb (const unsigned char *name, test_func_t func)
+{
+    uint32_t func_buf[2], *p;
+    uint32_t res, flags, xer;
+    int i, j, k, l;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0;;) {
+        if (i >= nb_iargs)
+            i = nb_iargs - 1;
+        for (j = 0; j < 32; j++) {
+            for (k = 0; k < 32; k++) {
+                for (l = 0; l < 32; l++) {
+                    p = (void *)func;
+                    func_buf[1] = p[1];
+                    _patch_op_imm(func_buf, p, j, 11, 5);
+                    _patch_op_imm(func_buf, p, k, 6, 5);
+                    patch_op_imm(func_buf, p, l, 1, 5);
+                    func = (void *)func_buf;
+                    r14 = iargs[i];
+                    r18 = 0;
+                    __asm__ __volatile__ ("mtcr 18");
+                    __asm__ __volatile__ ("mtxer 18");
+                    (*func)();
+                    __asm__ __volatile__ ("mfcr 18");
+                    flags = r18;
+                    __asm__ __volatile__ ("mfxer 18");
+                    xer = r18;
+                    res = r17;
+                    vexxx_printf("%s %08x, %d, %d, %d => %08x (%08x %08x)\n",
+                           name, iargs[i], j, k, l, res, flags, xer);
+                }
+                vexxx_printf("\n");
+            }
+            vexxx_printf("\n");
+        }
+        vexxx_printf("\n");
+        if (i == 0)
+            i = 1;
+        else if (i == nb_iargs - 1)
+            break;
+        else
+            i += 3;
+    }
+    vexxx_printf("\n");
+}
+
+static void rlwnm_cb (const unsigned char *name, test_func_t func)
+{
+    uint32_t func_buf[2], *p;
+    uint32_t res, flags, xer;
+    int i, j, k, l;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_iargs; i++) {
+        for (j = 0; j < 64; j++) {
+            for (k = 0; k < 32; k++) {
+                for (l = 0; l < 32; l++) {
+                    p = (void *)func;
+                    func_buf[1] = p[1];
+                    _patch_op_imm(func_buf, p, k, 6, 5);
+                    patch_op_imm(func_buf, p, l, 1, 5);
+                    func = (void *)func_buf;
+                    r14 = iargs[i];
+                    r15 = j;
+                    r18 = 0;
+                    __asm__ __volatile__ ("mtcr 18");
+                    __asm__ __volatile__ ("mtxer 18");
+                    (*func)();
+                    __asm__ __volatile__ ("mfcr 18");
+                    flags = r18;
+                    __asm__ __volatile__ ("mfxer 18");
+                    xer = r18;
+                    res = r17;
+                    vexxx_printf("%s %08x, %08x, %d, %d => %08x (%08x %08x)\n",
+                           name, iargs[i], j, k, l, res, flags, xer);
+                }
+                vexxx_printf("\n");
+            }
+            vexxx_printf("\n");
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+
+static void srawi_cb (const unsigned char *name, test_func_t func)
+{
+    uint32_t func_buf[2], *p;
+    uint32_t res, flags, xer;
+    int i, j;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_iargs; i++) {
+        for (j = 0; j < 32; j++) {
+            p = (void *)func;
+            func_buf[1] = p[1];
+            patch_op_imm(func_buf, p, j, 11, 5);
+            func = (void *)func_buf;
+            r14 = iargs[i];
+            r18 = 0;
+            __asm__ __volatile__ ("mtcr 18");
+            __asm__ __volatile__ ("mtxer 18");
+            (*func)();
+            __asm__ __volatile__ ("mfcr 18");
+            flags = r18;
+            __asm__ __volatile__ ("mfxer 18");
+            xer = r18;
+            res = r17;
+            vexxx_printf("%s %08x, %d => %08x (%08x %08x)\n",
+                   name, iargs[i], j, res, flags, xer);
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+
+typedef struct special_t special_t;
+struct special_t {
+    const unsigned char *name;
+    void (*test_cb)(const unsigned char *name, test_func_t func);
+};
+
+static void test_special (special_t *table,
+                          const unsigned char *name, test_func_t func)
+{
+    const unsigned char *tmp;
+    int i;
+
+    for (tmp = name; my_isspace(*tmp); tmp++)
+        continue;
+    for (i = 0; table[i].name != NULL; i++) {
+#if 0
+        vexxx_printf( "look for handler for '%s' (%s)\n", name,
+                table[i].name);
+#endif
+        if (my_strcmp(table[i].name, tmp) == 0) {
+            (*table[i].test_cb)(name, func);
+            return;
+        }
+    }
+    vexxx_printf( "ERROR: no test found for op '%s'\n", name);
+}
+
+static special_t special_int_ops[] = {
+#if 0
+    {
+        "rlwimi", /* One register + 3 5 bits immediate arguments */
+        &rlwi_cb,
+    },
+    {
+        "rlwimi.", /* One register + 3 5 bits immediate arguments */
+        &rlwi_cb,
+    },
+    {
+        "rlwinm", /* One register + 3 5 bits immediate arguments */
+        &rlwi_cb,
+    },
+    {
+        "rlwinm.", /* One register + 3 5 bits immediate arguments */
+        &rlwi_cb,
+    },
+    {
+        "rlwnm",  /* Two registers + 3 5 bits immediate arguments */
+        &rlwnm_cb,
+    },
+    {
+        "rlwnm.",  /* Two registers + 3 5 bits immediate arguments */
+        &rlwnm_cb,
+    },
+    {
+        "srawi",  /* One register + 1 5 bits immediate arguments */
+        &srawi_cb,
+    },
+    {
+        "srawi.",  /* One register + 1 5 bits immediate arguments */
+        &srawi_cb,
+    },
+#endif
+#if 0
+    {
+        "mcrf",  /* 2 3 bits immediate arguments */
+        &mcrf_cb,
+    },
+    {
+        "mcrf",  /* 2 3 bits immediate arguments */
+        &mcrf_cb,
+    },
+#endif
+    {
+        NULL,
+        NULL,
+    },
+};
+
+static void test_int_special (const unsigned char *name, test_func_t func)
+{
+    test_special(special_int_ops, name, func);
+}
+
+static test_loop_t int_loops[] = {
+    &test_int_one_arg,
+    &test_int_two_args,
+    &test_int_three_args,
+    &test_int_two_args,
+    &test_int_one_reg_imm16,
+    &test_int_one_reg_imm16,
+    &test_int_special,
+};
+
+#if !defined (NO_FLOAT)
+static void test_float_three_args (const unsigned char *name, test_func_t func)
+{
+    double res;
+    uint64_t u0, u1, u2, ur;
+    uint32_t flags;
+    int i, j, k;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_fargs; i++) {
+        for (j = 0; j < nb_fargs; j++) {
+            for (k = 0;k < nb_fargs; k++) {
+                u0 = *(uint64_t *)(&fargs[i]);
+                u1 = *(uint64_t *)(&fargs[j]);
+                u2 = *(uint64_t *)(&fargs[k]);
+                f14 = fargs[i];
+                f15 = fargs[j];
+                f16 = fargs[k];
+                r18 = 0;
+                __asm__ __volatile__ ("mtcr 18");
+                __asm__ __volatile__ ("mtxer 18");
+                f18 = +0.0;
+                __asm__ __volatile__ ("mtfsf 0xFF, 18");
+                (*func)();
+                __asm__ __volatile__ ("mfcr 18");
+                flags = r18;
+                res = f17;
+                ur = *(uint64_t *)(&res);
+                vexxx_printf("%s %016llx, %016llx, %016llx => %016llx (%08x)\n",
+                       name, u0, u1, u2, ur, flags);
+            }
+            vexxx_printf("\n");
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+
+static void test_float_two_args (const unsigned char *name, test_func_t func)
+{
+    double res;
+    uint64_t u0, u1, ur;
+    uint32_t flags;
+    int i, j;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_fargs; i++) {
+        for (j = 0; j < nb_fargs; j++) {
+            u0 = *(uint64_t *)(&fargs[i]);
+            u1 = *(uint64_t *)(&fargs[j]);
+            f14 = fargs[i];
+            f15 = fargs[j];
+            r18 = 0;
+            __asm__ __volatile__ ("mtcr 18");
+            __asm__ __volatile__ ("mtxer 18");
+            f18 = +0.0;
+            __asm__ __volatile__ ("mtfsf 0xFF, 18");
+            (*func)();
+            __asm__ __volatile__ ("mfcr 18");
+            flags = r18;
+            res = f17;
+            ur = *(uint64_t *)(&res);
+            vexxx_printf("%s %016llx, %016llx => %016llx (%08x)\n",
+                   name, u0, u1, ur, flags);
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+
+static void test_float_one_arg (const unsigned char *name, test_func_t func)
+{
+    double res;
+    uint64_t u0, ur;
+    uint32_t flags;
+    int i;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_fargs; i++) {
+        u0 = *(uint64_t *)(&fargs[i]);
+        f14 = fargs[i];
+        r18 = 0;
+        __asm__ __volatile__ ("mtcr 18");
+        __asm__ __volatile__ ("mtxer 18");
+        f18 = +0.0;
+        __asm__ __volatile__ ("mtfsf 0xFF, 18");
+        (*func)();
+        __asm__ __volatile__ ("mfcr 18");
+        flags = r18;
+        res = f17;
+        ur = *(uint64_t *)(&res);
+        vexxx_printf("%s %016llx => %016llx (%08x)\n", name, u0, ur, flags);
+    }
+    vexxx_printf("\n");
+}
+
+static special_t special_float_ops[] = {
+#if 0
+    {
+        "mffs",   /* One 5 bits immediate argument */
+        &mffs_cb,
+    },
+    {
+        "mffs.",   /* One 5 bits immediate argument */
+        &mffs_cb,
+    },
+    {
+        "mtfsb0", /* One 5 bits immediate argument */
+        &mffs_cb,
+    },
+    {
+        "mtfsb0.", /* One 5 bits immediate argument */
+        &mffs_cb,
+    },
+    {
+        "mtfsb1", /* One 5 bits immediate argument */
+        &mffs_cb,
+    },
+    {
+        "mtfsb1.", /* One 5 bits immediate argument */
+        &mffs_cb,
+    },
+    {
+        "mtfsf",  /* One register + 1 8 bits immediate argument */
+        &mtfsf_cb,
+    },
+    {
+        "mtfsf.",  /* One register + 1 8 bits immediate argument */
+        &mtfsf_cb,
+    },
+    {
+        "mtfsfi", /* One 5 bits argument + 1 5 bits argument */
+        &mtfsfi_cb,
+    },
+    {
+        "mtfsfi.", /* One 5 bits argument + 1 5 bits argument */
+        &mtfsfi_cb,
+    },
+#endif
+    {
+        NULL,
+        NULL,
+    },
+};
+
+static void test_float_special (const unsigned char *name, test_func_t func)
+{
+    test_special(special_float_ops, name, func);
+}
+
+static test_loop_t float_loops[] = {
+    &test_float_one_arg,
+    &test_float_two_args,
+    &test_float_three_args,
+    &test_float_two_args,
+    NULL,
+    NULL,
+    &test_float_special,
+};
+#endif /* !defined (NO_FLOAT) */
+
+
+#if defined (HAS_ALTIVEC) /* XXX: TODO */
+#endif /* defined (HAS_ALTIVEC) */
+
+#if defined (IS_PPC405)
+static void test_ppc405 (const unsigned char *name, test_func_t func)
+{
+    uint32_t res, flags, xer;
+    int i, j, k;
+
+    if (verbose > 1)
+        vexxx_printf( "Test instruction %s\n", name);
+    for (i = 0; i < nb_iargs; i++) {
+        for (j = 0; j < nb_iargs; j++) {
+            for (k = 0;k < nb_iargs; k++) {
+                r14 = iargs[i];
+                r15 = iargs[j];
+                 /* Beware: the third argument and the result
+                  * are in the same register
+                  */
+                r17 = iargs[k];
+                r18 = 0;
+                __asm__ __volatile__ ("mtcr 18");
+                __asm__ __volatile__ ("mtxer 18");
+                (*func)();
+                __asm__ __volatile__ ("mfcr 18");
+                flags = r18;
+                __asm__ __volatile__ ("mfxer 18");
+                xer = r18;
+                res = r17;
+                vexxx_printf("%s %08x, %08x, %08x => %08x (%08x %08x)\n",
+                       name, iargs[i], iargs[j], iargs[k], res, flags, xer);
+            }
+            vexxx_printf("\n");
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf("\n");
+}
+#endif /* defined (IS_PPC405) */
+
+static int check_filter (unsigned char *filter)
+{
+    unsigned char *c;
+    int ret = 1;
+
+    if (filter != NULL) {
+        c = my_strchr(filter, '*');
+        if (c != NULL) {
+            *c = '\0';
+            ret = 0;
+        }
+    }
+
+    return ret;
+}
+
+static int check_name (const unsigned char *name, const unsigned char *filter,
+                       int exact)
+{
+    int nlen, flen;
+    int ret = 0;
+
+    if (filter != NULL) {
+        for (; my_isspace(*name); name++)
+            continue;
+        FDPRINTF("Check '%s' againt '%s' (%s match)\n",
+                 name, filter, exact ? "exact" : "starting");
+        nlen = vexxx_strlen(name);
+        flen = vexxx_strlen(filter);
+        if (exact) {
+            if (nlen == flen && my_memcmp(name, filter, flen) == 0)
+                ret = 1;
+        } else {
+            if (flen <= nlen && my_memcmp(name, filter, flen) == 0)
+                ret = 1;
+        }
+    } else {
+        ret = 1;
+    }
+
+    return ret;
+}
+
+static void do_tests (int one_arg, int two_args, int three_args,
+                      int arith, int logical, int compare,
+                      int integer, int floats, int p405,
+                      int altivec, int faltivec,
+                      int cr, unsigned char *filter)
+{
+#if defined (IS_PPC405)
+    test_loop_t tmpl;
+#endif
+    test_loop_t *loop;
+    test_t *tests;
+    int nb_args, type, family;
+    int i, j, n;
+    int exact;
+
+    exact = check_filter(filter);
+    n = 0;
+    for (i = 0; all_tests[i].name != NULL; i++) {
+        nb_args = all_tests[i].flags & PPC_NB_ARGS;
+        /* Check number of arguments */
+        if ((nb_args == 1 && !one_arg) ||
+            (nb_args == 2 && !two_args) ||
+            (nb_args == 3 && !three_args))
+            continue;
+        /* Check instruction type */
+        type = all_tests[i].flags & PPC_TYPE;
+        if ((type == PPC_ARITH && !arith) ||
+            (type == PPC_LOGICAL && !logical) ||
+            (type == PPC_COMPARE && !compare))
+            continue;
+        /* Check instruction family */
+        family = all_tests[i].flags & PPC_FAMILY;
+        if ((family == PPC_INTEGER && !integer) ||
+            (family == PPC_FLOAT && !floats) ||
+            (family == PPC_405 && !p405) ||
+            (family == PPC_ALTIVEC && !altivec) ||
+            (family == PPC_FALTIVEC && !faltivec))
+            continue;
+        /* Check flags update */
+        if (((all_tests[i].flags & PPC_CR) && cr == 0) ||
+            (!(all_tests[i].flags & PPC_CR) && cr == 1))
+            continue;
+        /* All passed, do the tests */
+        tests = all_tests[i].tests;
+        /* Select the test loop */
+        switch (family) {
+        case PPC_INTEGER:
+            loop = &int_loops[nb_args - 1];
+            break;
+        case PPC_FLOAT:
+#if !defined (NO_FLOAT)
+            loop = &float_loops[nb_args - 1];
+            break;
+#else
+            vexxx_printf( "Sorry. "
+                    "PPC floating point instructions tests "
+                    "are disabled on your host\n");
+#endif /* !defined (NO_FLOAT) */
+
+        case PPC_405:
+#if defined (IS_PPC405)
+            tmpl = &test_ppc405;
+            loop = &tmpl;
+            break;
+#else
+            vexxx_printf( "Sorry. "
+                    "PPC405 instructions tests are disabled on your host\n");
+            continue;
+#endif /* defined (IS_PPC405) */
+        case PPC_ALTIVEC:
+#if defined (HAS_ALTIVEC)
+#if 0
+            loop = &altivec_int_loops[nb_args - 1];
+            break;
+#else
+            vexxx_printf( "Sorry. "
+                    "Altivec instructions tests are not yet implemented\n");
+            continue;
+#endif
+#else
+            vexxx_printf( "Sorry. "
+                    "Altivec instructions tests are disabled on your host\n");
+            continue;
+#endif
+        case PPC_FALTIVEC:
+#if defined (HAS_ALTIVEC)
+#if 0
+            loop = &altivec_float_loops[nb_args - 1];
+            break;
+#else
+            vexxx_printf( "Sorry. "
+                    "Altivec instructions tests are not yet implemented\n");
+            continue;
+#endif
+#else
+            vexxx_printf( "Sorry. "
+                    "Altivec float instructions tests "
+                    "are disabled on your host\n");
+#endif
+            continue;
+        default:
+            vexxx_printf("ERROR: unknown insn family %08x\n", family);
+            continue;
+        }
+        if (verbose > 0)
+            vexxx_printf( "%s:\n", all_tests[i].name);
+        for (j = 0; tests[j].name != NULL; j++) {
+            if (check_name(tests[j].name, filter, exact))
+                (*loop)(tests[j].name, tests[j].func);
+            n++;
+        }
+        vexxx_printf("\n");
+    }
+    vexxx_printf( "All done. Tested %d different instructions\n", n);
+}
+
+#if 0 // unused
+static void usage (void)
+{
+    vexxx_printf(
+            "test-ppc [-1] [-2] [-3] [-*] [-t <type>] [-f <family>] [-u] "
+            "[-n <filter>] [-x] [-h]\n"
+            "\t-1: test opcodes with one argument\n"
+            "\t-2: test opcodes with two arguments\n"
+            "\t-3: test opcodes with three arguments\n"
+            "\t-*: launch test without checking the number of arguments\n"
+            "\t-t: launch test for instructions of type <type>\n"
+            "\t    recognized types:\n"
+            "\t\tarith (or a)\n"
+            "\t\tlogical (or l)\n"
+            "\t\tcompare (or c)\n"
+            "\t-f: launch test for instructions of family <family>\n"
+            "\t    recognized families:\n"
+            "\t\tinteger (or i)\n"
+            "\t\tfloat (or f)\n"
+            "\t\tppc405 (or mac)\n"
+            "\t\taltivec (or a)\n"
+            "\t-u: test instructions that update flags\n"
+            "\t-n: filter instructions with <filter>\n"
+            "\t    <filter> can be in two forms:\n"
+            "\t\tname  : filter functions that exactly match <name>\n"
+            "\t\tname* : filter functions that start with <name>\n"
+            "\t-h: print this help\n"
+            );
+}
+#endif
+
+int _main (int argc, char **argv)
+{
+    unsigned char /* *tmp, */ *filter = NULL;
+    int one_arg = 0, two_args = 0, three_args = 0;
+    int arith = 0, logical = 0, compare = 0;
+    int integer = 0, floats = 0, p405 = 0, altivec = 0, faltivec = 0;
+    int cr = -1;
+    //int c;
+    
+    //    while ((c = getopt(argc, argv, "123t:f:n:uvh")) != -1) {
+    //        switch (c) {
+    //        case '1':
+    //            one_arg = 1;
+    //            break;
+    //        case '2':
+    //            two_args = 1;
+    //            break;
+    //        case '3':
+    //            three_args = 1;
+    //            break;
+    //        case 't':
+    //            tmp = optarg;
+    //            if (my_strcmp(tmp, "arith") == 0 || my_strcmp(tmp, "a") == 0) {
+    //                arith = 1;
+    //            } else if (my_strcmp(tmp, "logical") == 0 || my_strcmp(tmp, "l") == 0) {
+    //                logical = 1;
+    //            } else if (my_strcmp(tmp, "compare") == 0 || my_strcmp(tmp, "c") == 0) {
+    //                compare = 1;
+    //            } else {
+    //                goto bad_arg;
+    //            }
+    //            break;
+    //        case 'f':
+    //            tmp = optarg;
+    //            if (my_strcmp(tmp, "integer") == 0 || my_strcmp(tmp, "i") == 0) {
+    //                integer = 1;
+    //            } else if (my_strcmp(tmp, "float") == 0 || my_strcmp(tmp, "f") == 0) {
+    //                floats = 1;
+    //            } else if (my_strcmp(tmp, "ppc405") == 0 || my_strcmp(tmp, "mac") == 0) {
+    //                p405 = 1;
+    //            } else if (my_strcmp(tmp, "altivec") == 0 || my_strcmp(tmp, "a") == 0) {
+    //                altivec = 1;
+    //                faltivec = 1;
+    //            } else {
+    //                goto bad_arg;
+    //            }
+    //            break;
+    //        case 'n':
+    //            filter = optarg;
+    //            break;
+    //        case 'u':
+    //            cr = 1;
+    //            break;
+    //        case 'h':
+    //            usage();
+    //            return 0;
+    //        case 'v':
+    //            verbose++;
+    //            break;
+    //        default:
+    //            usage();
+    //            vexxx_printf( "Unknown argument: '%c'\n", c);
+    //            return 1;
+    //        bad_arg:
+    //            usage();
+    //            vexxx_printf( "Bad argument for '%c': '%s'\n", c, tmp);
+    //            return 1;
+    //        }
+    //    }
+    //    if (argc != optind) {
+    //        usage();
+    //        vexxx_printf( "Bad number of arguments\n");
+    //        return 1;
+    //    }
+
+    if (one_arg == 0 && two_args == 0 && three_args == 0) {
+        one_arg = 1;
+        two_args = 1;
+        three_args = 1;
+    }
+    if (arith == 0 && logical == 0 && compare == 0) {
+        arith = 1;
+        logical = 1;
+        compare = 1;
+    }
+    if (integer == 0 && floats == 0 && altivec == 0 && faltivec == 0 &&
+        p405 == 0) {
+        integer = 1;
+        floats = 1;
+        altivec = 1;
+        faltivec = 1;
+        p405 = 1;
+    }
+    if (cr == -1)
+        cr = 2;
+    build_iargs_table();
+    build_fargs_table();
+    build_ii16_table();
+
+#if 1
+    one_arg=1; 
+    two_args=1; 
+    three_args=1;
+
+    arith=1;
+    logical=1;
+    compare=1;
+
+    integer=1;
+    floats=0;
+
+    p405=0;
+    altivec=0;
+    faltivec=0;
+#endif
+
+    do_tests(one_arg, two_args, three_args,
+             arith, logical, compare,
+             integer, floats, p405, altivec, faltivec,
+             cr, filter);
+
+    return 0;
+}
+
+
+void entry ( HWord(*service)(HWord,HWord) )
+{
+   char* argv[2] = { NULL, NULL };
+   serviceFn = service;
+   _main(0, argv);
+   (*service)(0,0);
+}
diff --git a/VEX/switchback/test_simple.c b/VEX/switchback/test_simple.c
new file mode 100644
index 0000000..9a7756a
--- /dev/null
+++ b/VEX/switchback/test_simple.c
@@ -0,0 +1,12 @@
+
+
+static void bar ( void*(*service)(int,int) )
+{
+   __asm__ __volatile__ ("addi         17, 14, 5");
+}
+
+void entry ( void*(*service)(int,int) )
+{
+  bar(service);
+  service(0,0);
+}
diff --git a/VEX/test/fldenv.c b/VEX/test/fldenv.c
new file mode 100644
index 0000000..4f37d61
--- /dev/null
+++ b/VEX/test/fldenv.c
@@ -0,0 +1,32 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+
+void do_fstenv ( void* p )
+{
+   asm("movl 8(%esp), %eax ; fstenv (%eax)");
+}
+
+void do_fldenv ( void* p )
+{
+   asm("movl 8(%esp), %eax ; fldenv (%eax)");
+}
+
+int main ( void )
+{
+   int i;
+   unsigned short* buf = malloc(14*sizeof(short));
+   for (i = 0; i < 14; i++)
+      buf[i] = i;
+   buf[0] = 0x037f;
+
+   do_fldenv(buf);
+   do_fstenv(buf);
+   for (i = 0; i < 14; i++) {
+      printf("%04x ", buf[i]);
+      if (i > 0 && ((i % 12) == 11))
+          printf("\n");
+   }
+   printf("\n");
+   return 0;
+}
diff --git a/VEX/test/fp1.c b/VEX/test/fp1.c
new file mode 100644
index 0000000..3f3aca3
--- /dev/null
+++ b/VEX/test/fp1.c
@@ -0,0 +1,17 @@
+
+#include <stdio.h>
+
+double a[10];
+
+int main ( void ) 
+{
+  int i;
+  double s;
+  for (i = 0; i < 10; i++)
+    a[i] = 11.11 * i;
+  s = 0.0;
+  for (i = 0; i < 10; i++)
+    s += a[i];
+  printf("result = %f\n", s);
+  return 0;
+}
diff --git a/VEX/test/fp1.s b/VEX/test/fp1.s
new file mode 100644
index 0000000..8d3d9fd
--- /dev/null
+++ b/VEX/test/fp1.s
@@ -0,0 +1,52 @@
+	.file	"fp1.c"
+	.version	"01.01"
+gcc2_compiled.:
+	.section	.rodata.str1.1,"aMS",@progbits,1
+.LC2:
+	.string	"result = %f\n"
+	.section	.rodata.cst8,"aM",@progbits,8
+	.align 8
+.LC0:
+	.long	0xeb851eb8,0x40263851
+.text
+	.align 4
+.globl main
+	.type	 main,@function
+main:
+	pushl	%ebp
+	movl	%esp, %ebp
+	subl	$8, %esp
+	movl	$0, %eax
+	movl	$a, %edx
+	fldl	.LC0
+	.p2align 2
+.L21:
+	fld	%st(0)
+	pushl	%eax
+	fimull	(%esp)
+	popl	%eax
+	fstpl	(%edx,%eax,8)
+	incl	%eax
+	cmpl	$9, %eax
+	jle	.L21
+	fstp	%st(0)
+	fldz
+	movl	$0, %eax
+	movl	$a, %edx
+	.p2align 2
+.L26:
+	faddl	(%edx,%eax,8)
+	incl	%eax
+	cmpl	$9, %eax
+	jle	.L26
+	subl	$12, %esp
+	fstpl	(%esp)
+	pushl	$.LC2
+	call	printf
+	movl	$0, %eax
+	leave
+	ret
+.Lfe1:
+	.size	 main,.Lfe1-main
+	.comm	a,80,32
+	.ident	"GCC: (GNU) 2.96 20000731 (Red Hat Linux 7.3 2.96-110)"
diff --git a/VEX/test/fpconst.c b/VEX/test/fpconst.c
new file mode 100644
index 0000000..c6ebdc3
--- /dev/null
+++ b/VEX/test/fpconst.c
@@ -0,0 +1,77 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+
+void do_fld1 ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fld1");
+   asm __volatile__("fstpl (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fldl2t ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldl2t");
+   asm __volatile__("fstpl (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fldl2e ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldl2e");
+   asm __volatile__("fstpl (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fldpi ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldpi");
+   asm __volatile__("fstpl (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fldlg2 ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldlg2");
+   asm __volatile__("fstpl (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fldln2 ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldln2");
+   asm __volatile__("fstpl (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fldz ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldz");
+   asm __volatile__("fstpl (%0)" : : "r" (p) : "memory" );
+}
+
+typedef  unsigned char  UChar;
+
+void foo ( void (*f)(void*), char* name )
+{
+  int i;
+  UChar* b = malloc(8);
+  f(b);
+  printf("%s IRConst_F64i(0x", name);
+  for (i = 7; i >= 0; i--)
+    printf("%02x", (int)b[i]);
+  printf(")\n");
+}
+
+int main ( void )
+{
+  foo( do_fld1,   "fld1  ");
+  foo( do_fldl2t, "fldl2t");
+  foo( do_fldl2e, "fldl2e");
+  foo( do_fldpi,  "fldpi ");
+  foo( do_fldlg2, "fldlg2");
+  foo( do_fldln2, "fldln2");
+  foo( do_fldz,   "fldz  ");
+  return 0;
+}
diff --git a/VEX/test/fpgames.s b/VEX/test/fpgames.s
new file mode 100644
index 0000000..222f33b
--- /dev/null
+++ b/VEX/test/fpgames.s
@@ -0,0 +1,103 @@
+	.file	"fpgames.c"
+	.version	"01.01"
+gcc2_compiled.:
+	.section	.rodata.str1.1,"aMS",@progbits,1
+.LC0:
+	.string	"%02x "
+.LC1:
+	.string	"\n"
+.text
+	.align 4
+.globl show
+	.type	 show,@function
+show:
+	pushl	%ebp
+	movl	%esp, %ebp
+	pushl	%ebx
+	subl	$4, %esp
+	movl	$0, %ebx
+	.p2align 2
+.L21:
+	subl	$8, %esp
+	movzbl	st(%ebx), %eax
+	pushl	%eax
+	pushl	$.LC0
+	call	printf
+	addl	$16, %esp
+	testl	%ebx, %ebx
+	jle	.L20
+	movl	%ebx, %eax
+	andl	$3, %eax
+	cmpl	$3, %eax
+	jne	.L20
+	subl	$12, %esp
+	pushl	$.LC1
+	call	printf
+	addl	$16, %esp
+.L20:
+	incl	%ebx
+	cmpl	$27, %ebx
+	jle	.L21
+	movl	$0, %ebx
+	.p2align 2
+.L27:
+	subl	$8, %esp
+	movzbl	st+28(%ebx), %eax
+	pushl	%eax
+	pushl	$.LC0
+	call	printf
+	addl	$16, %esp
+	testl	%ebx, %ebx
+	jle	.L26
+	movl	$10, %edx
+	movl	%ebx, %eax
+	movl	%edx, %ecx
+	cltd
+	idivl	%ecx
+	cmpl	$9, %edx
+	jne	.L26
+	subl	$12, %esp
+	pushl	$.LC1
+	call	printf
+	addl	$16, %esp
+.L26:
+	incl	%ebx
+	cmpl	$79, %ebx
+	jle	.L27
+	subl	$12, %esp
+	pushl	$.LC1
+	call	printf
+	movl	-4(%ebp), %ebx
+	leave
+	ret
+.Lfe1:
+	.size	 show,.Lfe1-show
+	.section	.rodata.str1.1,"aMS",@progbits,1
+.LC2:
+	.string	"\n\n"
+.text
+	.align 4
+.globl main
+	.type	 main,@function
+main:
+	pushl	%ebp
+	movl	%esp, %ebp
+	subl	$8, %esp
+#APP
+	finit ; fnsave st
+#NO_APP
+	call	show
+	subl	$12, %esp
+	pushl	$.LC2
+	call	printf
+#APP
+	fld1 ; fnsave st
+#NO_APP
+	call	show
+	movl	$0, %eax
+	leave
+	ret
+.Lfe2:
+	.size	 main,.Lfe2-main
+	.comm	st,108,32
+	.ident	"GCC: (GNU) 2.96 20000731 (Red Hat Linux 7.3 2.96-110)"
diff --git a/VEX/test/fpspeed.c b/VEX/test/fpspeed.c
new file mode 100644
index 0000000..83996ea
--- /dev/null
+++ b/VEX/test/fpspeed.c
@@ -0,0 +1,29 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+int main ( void )
+{
+   int i, j;
+   double s, r;
+   s = 0.0;
+   double* a1 = malloc(1000 * sizeof(double));
+   double* a2 = malloc(1000 * sizeof(double));
+   for (i = 0; i < 1000; i++) {
+     a1[i] = s; 
+     s += 0.3374893482232;
+     a2[i] = s;
+   }
+
+   s = 0.0;
+   r = 0.0;
+   for (j = 0; j < 5000; j++) {
+      for (i = 0; i < 1000; i++) {
+         s += (a1[i] - a2[i]) * (a1[i] + a2[i]) - sqrt(r + 1.0);
+         r += 0.001;
+      }
+   }
+   printf("s = %f, r = %f\n", s, r );
+   return 0;
+}
diff --git a/VEX/test/fpucw.c b/VEX/test/fpucw.c
new file mode 100644
index 0000000..0130897
--- /dev/null
+++ b/VEX/test/fpucw.c
@@ -0,0 +1,43 @@
+
+#include <stdio.h>
+
+void fldcw_default ( void )
+{
+  asm(" pushw $0x037F ; fldcw (%esp) ; addl $2, %esp");
+}
+
+void fldcw_exns ( void )
+{
+  asm(" pushw $0x037E ; fldcw (%esp) ; addl $2, %esp");
+}
+
+void fldcw_precision ( void )
+{
+  asm(" pushw $0x007F ; fldcw (%esp) ; addl $2, %esp");
+}
+
+void fldcw_rounding ( void )
+{
+  asm(" pushw $0x077F ; fldcw (%esp) ; addl $2, %esp");
+}
+
+int main ( void )
+{
+   printf("default\n");
+   fldcw_default();
+   printf("\n");
+
+   printf("exns\n");
+   fldcw_exns();
+   printf("\n");
+
+   printf("precision\n");
+   fldcw_precision();
+   printf("\n");
+
+   printf("rounding\n");
+   fldcw_rounding();
+   printf("\n");
+
+   return 0;
+}
diff --git a/VEX/test/frstor.c b/VEX/test/frstor.c
new file mode 100644
index 0000000..006e19e
--- /dev/null
+++ b/VEX/test/frstor.c
@@ -0,0 +1,82 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+
+void do_fsave_interesting_stuff ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldpi");
+   asm __volatile__("fld1");
+   asm __volatile__("fldln2");
+   asm __volatile__("fsave (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fsave ( void* p )
+{
+   asm __volatile__("fsave (%0)" : : "r" (p) : "memory" );
+}
+
+void do_frstor ( void* p )
+{
+   asm __volatile__("frstor (%0)" : : "r" (p) : "memory" );
+}
+
+
+int isFPLsbs ( int i )
+{
+   int q;
+   q = 0; if (i == q || i == q+1) return 1;
+   q = 10; if (i == q || i == q+1) return 1;
+   q = 20; if (i == q || i == q+1) return 1;
+   q = 30; if (i == q || i == q+1) return 1;
+   q = 40; if (i == q || i == q+1) return 1;
+   q = 50; if (i == q || i == q+1) return 1;
+   q = 60; if (i == q || i == q+1) return 1;
+   q = 70; if (i == q || i == q+1) return 1;
+   return 0;
+}
+
+void show_fpustate ( unsigned char* buf, int hide64to80 )
+{
+   int i;
+   printf("  0   ");
+   for (i = 0; i < 14; i++)
+      printf("%02x ", buf[i]);
+   printf("\n");
+
+   printf(" 14   ");
+   for (i = 14; i < 28; i++)
+      printf("%02x ", buf[i]);
+   printf("\n");
+
+   for (i = 0; i < 80; i++) {
+      if ((i % 10) == 0)
+         printf("%3d   ", i+28);
+      if (hide64to80 && isFPLsbs(i))
+	 printf("xx ");
+      else
+         printf("%02x ", buf[i+28]);
+      if (i > 0 && ((i % 10) == 9))
+          printf("\n");
+   }
+}
+
+int main ( int argc, char** argv )
+{
+   unsigned short* buf1 = malloc(54*sizeof(short));
+   unsigned short* buf2 = malloc(54*sizeof(short));
+   int xx = argc > 1;
+   printf("Re-run with any arg to suppress least-significant\n"
+          "   16 bits of FP numbers\n");
+
+   /* Create an initial image. */
+   do_fsave_interesting_stuff(buf1);
+   show_fpustate( (unsigned char*)buf1, xx );
+
+   /* Reload it into buf2. */
+   do_frstor(buf1);
+   do_fsave(buf2);
+   show_fpustate( (unsigned char*)buf2, xx );
+
+   return 0;
+}
diff --git a/VEX/test/fsave.c b/VEX/test/fsave.c
new file mode 100644
index 0000000..293711d
--- /dev/null
+++ b/VEX/test/fsave.c
@@ -0,0 +1,68 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+
+void do_fsave ( void* p )
+{
+   asm __volatile__("fninit");
+   asm __volatile__("fldpi");
+   asm __volatile__("fld1");
+   asm __volatile__("fldln2");
+   asm __volatile__("fsave (%0)" : : "r" (p) : "memory" );
+}
+
+int isFPLsbs ( int i )
+{
+   int q;
+   q = 0; if (i == q || i == q+1) return 1;
+   q = 10; if (i == q || i == q+1) return 1;
+   q = 20; if (i == q || i == q+1) return 1;
+   q = 30; if (i == q || i == q+1) return 1;
+   q = 40; if (i == q || i == q+1) return 1;
+   q = 50; if (i == q || i == q+1) return 1;
+   q = 60; if (i == q || i == q+1) return 1;
+   q = 70; if (i == q || i == q+1) return 1;
+   return 0;
+}
+
+void show_fpustate ( unsigned char* buf, int hide64to80 )
+{
+   int i;
+   printf("  0   ");
+   for (i = 0; i < 14; i++)
+      printf("%02x ", buf[i]);
+   printf("\n");
+
+   printf(" 14   ");
+   for (i = 14; i < 28; i++)
+      printf("%02x ", buf[i]);
+   printf("\n");
+
+   for (i = 0; i < 80; i++) {
+      if ((i % 10) == 0)
+         printf("%3d   ", i+28);
+      if (hide64to80 && isFPLsbs(i))
+	 printf("xx ");
+      else
+         printf("%02x ", buf[i+28]);
+      if (i > 0 && ((i % 10) == 9))
+          printf("\n");
+   }
+}
+
+int main ( int argc, char** argv )
+{
+   int i;
+   unsigned char* buf = malloc(108);
+   int xx = argc > 1;
+   printf("Re-run with any arg to suppress least-significant\n"
+          "   16 bits of FP numbers\n");
+   for (i = 0; i < 108; i++)
+      buf[i] = 0xAA;
+
+   /* dump FPU state in buf, and show it. */
+   do_fsave(buf);
+   show_fpustate( buf, xx );
+
+   return 0;
+}
diff --git a/VEX/test/fstenv.c b/VEX/test/fstenv.c
new file mode 100644
index 0000000..331714d
--- /dev/null
+++ b/VEX/test/fstenv.c
@@ -0,0 +1,22 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+
+void do_fstenv ( void* p )
+{
+   asm("fldpi ; fld1; fldln2 ; movl 8(%esp), %eax ; fstenv (%eax)");
+}
+
+int main ( void )
+{
+   int i;
+   unsigned int* buf = malloc(7*sizeof(int));
+   do_fstenv(buf);
+   for (i = 0; i < 7; i++) {
+      printf("%08x ", buf[i]);
+      if (i > 0 && ((i % 6) == 5))
+          printf("\n");
+   }
+   printf("\n");
+   return 0;
+}
diff --git a/VEX/test/fxsave.c b/VEX/test/fxsave.c
new file mode 100644
index 0000000..6557907
--- /dev/null
+++ b/VEX/test/fxsave.c
@@ -0,0 +1,136 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <string.h>
+
+const unsigned int vec0[4]
+   = { 0x12345678, 0x11223344, 0x55667788, 0x87654321 };
+
+const unsigned int vec1[4]
+   = { 0xABCDEF01, 0xAABBCCDD, 0xEEFF0011, 0x10FEDCBA };
+
+const unsigned int vecZ[4]
+   = { 0, 0, 0, 0 };
+
+void do_fxsave ( void* p ) {
+   asm __volatile__("fxsave (%0)" : : "r" (p) : "memory" );
+}
+
+void do_fxrstor ( void* p ) {
+   asm __volatile__("fxrstor (%0)" : : "r" (p) : "memory" );
+}
+
+void do_zeroise ( void )
+{
+   asm __volatile__("finit");
+   asm __volatile__(
+    "fldz\n\t"
+    "fldz\n\t"
+    "fldz\n\t"
+    "fldz\n\t"
+    "fldz\n\t"
+    "fldz\n\t"
+    "fldz\n\t"
+    "fldz\n\t"
+    "finit\n");
+   asm __volatile__("movups vecZ, %xmm0");
+   asm __volatile__("movups vecZ, %xmm1");
+   asm __volatile__("movups vecZ, %xmm2");
+   asm __volatile__("movups vecZ, %xmm3");
+   asm __volatile__("movups vecZ, %xmm4");
+   asm __volatile__("movups vecZ, %xmm5");
+   asm __volatile__("movups vecZ, %xmm6");
+   asm __volatile__("movups vecZ, %xmm7");
+   asm __volatile__(
+      "pushl $0\n\t"
+      "ldmxcsr 0(%esp)\n\t"
+      "addl $4,%esp\n");
+}
+
+/* set up the FP and SSE state, and then dump it. */
+void do_setup_then_fxsave ( void* p )
+{
+   asm __volatile__("finit");
+   asm __volatile__("fldpi");
+   asm __volatile__("fld1");
+   asm __volatile__("fldln2");
+   asm __volatile__("fldlg2");
+   asm __volatile__("fld %st(3)");
+   asm __volatile__("fld %st(3)");
+   asm __volatile__("movups vec0, %xmm0");
+   asm __volatile__("movups vec1, %xmm1");
+   asm __volatile__("xorps %xmm2, %xmm2");
+   asm __volatile__("movaps %xmm2, %xmm3");
+   asm __volatile__("movaps %xmm2, %xmm4");
+   asm __volatile__("movaps %xmm2, %xmm5");
+   asm __volatile__("movaps %xmm2, %xmm6");
+   asm __volatile__("movaps %xmm1, %xmm7");
+   asm __volatile__("xorps %xmm0, %xmm7");
+   do_fxsave (p);
+}
+
+int isFPLsbs ( int i )
+{
+   int q;
+   q = 32; if (i == q || i == q+1) return 1;
+   q = 48; if (i == q || i == q+1) return 1;
+   q = 64; if (i == q || i == q+1) return 1;
+   q = 80; if (i == q || i == q+1) return 1;
+   q = 96; if (i == q || i == q+1) return 1;
+   q = 112; if (i == q || i == q+1) return 1;
+   q = 128; if (i == q || i == q+1) return 1;
+   q = 144; if (i == q || i == q+1) return 1;
+   return 0;
+}
+
+void show ( unsigned char* buf, int xx )
+{
+   int i;
+   for (i = 0; i < 512; i++) {
+      if ((i % 16) == 0)
+         printf("%3d   ", i);
+      if (xx && isFPLsbs(i))
+	 printf("xx ");
+      else
+         printf("%02x ", buf[i]);
+      if (i > 0 && ((i % 16) == 15))
+          printf("\n");
+   }
+}
+
+
+int main ( int argc, char** argv )
+{
+   unsigned char* buf1 = memalign(16,512);
+   unsigned char* buf2 = memalign(16,512);
+   unsigned char* buf3 = memalign(16,512);
+   int xx = argc > 1;
+   printf("Re-run with any arg to suppress least-significant\n"
+          "   16 bits of FP numbers\n");
+   memset(buf1, 0x55, 512);
+   memset(buf2, 0x55, 512);
+   memset(buf3, 0x55, 512);
+
+   /* Load up x87/xmm state and dump it. */
+   do_setup_then_fxsave(buf1);
+   printf("\nBEFORE\n");
+   show(buf1, xx);
+
+   /* Zeroise x87/xmm state and dump it, to show that the
+      regs have been cleared out. */
+   do_zeroise();
+   do_fxsave(buf2);
+   printf("\nZEROED\n");
+   show(buf2, xx);
+
+   /* Reload x87/xmm state from buf1 and dump it in buf3. */
+   do_fxrstor(buf1);
+   do_fxsave(buf3);
+   printf("\nRESTORED\n");
+   show(buf3, xx);
+
+   free(buf1); free(buf2); free(buf3);
+
+   return 0;
+}
diff --git a/VEX/test/mmxtest.c b/VEX/test/mmxtest.c
new file mode 100644
index 0000000..e1ca547
--- /dev/null
+++ b/VEX/test/mmxtest.c
@@ -0,0 +1,605 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define HAVE_SSE2 1
+
+/* DO NOT COMPILE WITH -O/-O2/-O3 !  GENERATES INVALID ASSEMBLY. */
+
+
+/*   mmx.h
+
+   MultiMedia eXtensions GCC interface library for IA32.
+
+   To use this library, simply include this header file
+   and compile with GCC.  You MUST have inlining enabled
+   in order for mmx_ok() to work; this can be done by
+   simply using -O on the GCC command line.
+
+   Compiling with -DMMX_TRACE will cause detailed trace
+   output to be sent to stderr for each mmx operation.
+   This adds lots of code, and obviously slows execution to
+   a crawl, but can be very useful for debugging.
+
+   THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY
+   EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT
+   LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+   AND FITNESS FOR ANY PARTICULAR PURPOSE.
+
+   June 11, 1998 by H. Dietz and R. Fisher
+*/
+
+
+/*   The type of an value that fits in an MMX register
+   (note that long long constant values MUST be suffixed
+    by LL and unsigned long long values by ULL, lest
+    they be truncated by the compiler)
+*/
+typedef   union {
+   long long            q;   /* Quadword (64-bit) value */
+   unsigned long long   uq;   /* Unsigned Quadword */
+   int                  d[2];   /* 2 Doubleword (32-bit) values */
+   unsigned int         ud[2];   /* 2 Unsigned Doubleword */
+   short                w[4];   /* 4 Word (16-bit) values */
+   unsigned short       uw[4];   /* 4 Unsigned Word */
+   char                 b[8];   /* 8 Byte (8-bit) values */
+   unsigned char        ub[8];   /* 8 Unsigned Byte */
+} mmx_t;
+
+
+/*   Function to test if mmx instructions are supported...
+*/
+inline extern int
+mmx_ok(void)
+{
+   /* Returns 1 if mmx instructions are ok,
+      0 if hardware does not support mmx
+   */
+   register int ok = 0;
+
+   __asm__ __volatile__ (
+      /* Get CPU version information */
+      "movl $1, %%eax\n\t"
+      "cpuid\n\t"
+      "movl %%edx, %0"
+      : "=a" (ok)
+      : /* no input */
+   );
+   return((ok & 0x800000) == 0x800000);
+}
+
+
+/*   Helper functions for the instruction macros that follow...
+   (note that memory-to-register, m2r, instructions are nearly
+    as efficient as register-to-register, r2r, instructions;
+    however, memory-to-memory instructions are really simulated
+    as a convenience, and are only 1/3 as efficient)
+*/
+#ifdef   MMX_TRACE
+
+/*   Include the stuff for printing a trace to stderr...
+*/
+
+#include <stdio.h>
+
+#define   mmx_m2r(op, mem, reg) \
+   { \
+      mmx_t mmx_trace; \
+      mmx_trace = (mem); \
+      fprintf(stderr, #op "_m2r(" #mem "=0x%016llx, ", mmx_trace.q); \
+      __asm__ __volatile__ ("movq %%" #reg ", %0" \
+                  : "=X" (mmx_trace) \
+                  : /* nothing */ ); \
+      fprintf(stderr, #reg "=0x%016llx) => ", mmx_trace.q); \
+      __asm__ __volatile__ (#op " %0, %%" #reg \
+                  : /* nothing */ \
+                  : "X" (mem)); \
+      __asm__ __volatile__ ("movq %%" #reg ", %0" \
+                  : "=X" (mmx_trace) \
+                  : /* nothing */ ); \
+      fprintf(stderr, #reg "=0x%016llx\n", mmx_trace.q); \
+   }
+
+#define   mmx_r2m(op, reg, mem) \
+   { \
+      mmx_t mmx_trace; \
+      __asm__ __volatile__ ("movq %%" #reg ", %0" \
+                  : "=X" (mmx_trace) \
+                  : /* nothing */ ); \
+      fprintf(stderr, #op "_r2m(" #reg "=0x%016llx, ", mmx_trace.q); \
+      mmx_trace = (mem); \
+      fprintf(stderr, #mem "=0x%016llx) => ", mmx_trace.q); \
+      __asm__ __volatile__ (#op " %%" #reg ", %0" \
+                  : "=X" (mem) \
+                  : /* nothing */ ); \
+      mmx_trace = (mem); \
+      fprintf(stderr, #mem "=0x%016llx\n", mmx_trace.q); \
+   }
+
+#define   mmx_r2r(op, regs, regd) \
+   { \
+      mmx_t mmx_trace; \
+      __asm__ __volatile__ ("movq %%" #regs ", %0" \
+                  : "=X" (mmx_trace) \
+                  : /* nothing */ ); \
+      fprintf(stderr, #op "_r2r(" #regs "=0x%016llx, ", mmx_trace.q); \
+      __asm__ __volatile__ ("movq %%" #regd ", %0" \
+                  : "=X" (mmx_trace) \
+                  : /* nothing */ ); \
+      fprintf(stderr, #regd "=0x%016llx) => ", mmx_trace.q); \
+      __asm__ __volatile__ (#op " %" #regs ", %" #regd); \
+      __asm__ __volatile__ ("movq %%" #regd ", %0" \
+                  : "=X" (mmx_trace) \
+                  : /* nothing */ ); \
+      fprintf(stderr, #regd "=0x%016llx\n", mmx_trace.q); \
+   }
+
+#define   mmx_m2m(op, mems, memd) \
+   { \
+      mmx_t mmx_trace; \
+      mmx_trace = (mems); \
+      fprintf(stderr, #op "_m2m(" #mems "=0x%016llx, ", mmx_trace.q); \
+      mmx_trace = (memd); \
+      fprintf(stderr, #memd "=0x%016llx) => ", mmx_trace.q); \
+      __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
+                  #op " %1, %%mm0\n\t" \
+                  "movq %%mm0, %0" \
+                  : "=X" (memd) \
+                  : "X" (mems)); \
+      mmx_trace = (memd); \
+      fprintf(stderr, #memd "=0x%016llx\n", mmx_trace.q); \
+   }
+
+#else
+
+/*   These macros are a lot simpler without the tracing...
+*/
+
+#define   mmx_m2r(op, mem, reg) \
+   __asm__ __volatile__ (#op " %0, %%" #reg \
+               : /* nothing */ \
+               : "X" (mem))
+
+#define   mmx_r2m(op, reg, mem) \
+   __asm__ __volatile__ (#op " %%" #reg ", %0" \
+               : "=X" (mem) \
+               : /* nothing */ )
+
+#define   mmx_r2r(op, regs, regd) \
+   __asm__ __volatile__ (#op " %" #regs ", %" #regd)
+
+#define   mmx_m2m(op, mems, memd) \
+   __asm__ __volatile__ ("movq %0, %%mm0\n\t" \
+               #op " %1, %%mm0\n\t" \
+               "movq %%mm0, %0" \
+               : "=X" (memd) \
+               : "X" (mems))
+
+#endif
+
+
+/*   1x64 MOVe Quadword
+   (this is both a load and a store...
+    in fact, it is the only way to store)
+*/
+#define   movq_m2r(var, reg)     mmx_m2r(movq, var, reg)
+#define   movq_r2m(reg, var)     mmx_r2m(movq, reg, var)
+#define   movq_r2r(regs, regd)   mmx_r2r(movq, regs, regd)
+#define   movq(vars, vard) \
+   __asm__ __volatile__ ("movq %1, %%mm0\n\t" \
+               "movq %%mm0, %0" \
+               : "=X" (vard) \
+               : "X" (vars))
+
+
+/*   1x64 MOVe Doubleword
+   (like movq, this is both load and store...
+    but is most useful for moving things between
+    mmx registers and ordinary registers)
+*/
+#define   movd_m2r(var, reg)     mmx_m2r(movd, var, reg)
+#define   movd_r2m(reg, var)     mmx_r2m(movd, reg, var)
+#define   movd_r2r(regs, regd)   mmx_r2r(movd, regs, regd)
+#define   movd(vars, vard) \
+   __asm__ __volatile__ ("movd %1, %%mm0\n\t" \
+               "movd %%mm0, %0" \
+               : "=X" (vard) \
+               : "X" (vars))
+
+
+/*   2x32, 4x16, and 8x8 Parallel ADDs
+*/
+#define   paddd_m2r(var, reg)     mmx_m2r(paddd, var, reg)
+#define   paddd_r2r(regs, regd)   mmx_r2r(paddd, regs, regd)
+#define   paddd(vars, vard)       mmx_m2m(paddd, vars, vard)
+
+#define   paddw_m2r(var, reg)     mmx_m2r(paddw, var, reg)
+#define   paddw_r2r(regs, regd)   mmx_r2r(paddw, regs, regd)
+#define   paddw(vars, vard)       mmx_m2m(paddw, vars, vard)
+
+#define   paddb_m2r(var, reg)     mmx_m2r(paddb, var, reg)
+#define   paddb_r2r(regs, regd)   mmx_r2r(paddb, regs, regd)
+#define   paddb(vars, vard)       mmx_m2m(paddb, vars, vard)
+
+
+/*   4x16 and 8x8 Parallel ADDs using Saturation arithmetic
+*/
+#define   paddsw_m2r(var, reg)     mmx_m2r(paddsw, var, reg)
+#define   paddsw_r2r(regs, regd)   mmx_r2r(paddsw, regs, regd)
+#define   paddsw(vars, vard)       mmx_m2m(paddsw, vars, vard)
+
+#define   paddsb_m2r(var, reg)     mmx_m2r(paddsb, var, reg)
+#define   paddsb_r2r(regs, regd)   mmx_r2r(paddsb, regs, regd)
+#define   paddsb(vars, vard)       mmx_m2m(paddsb, vars, vard)
+
+
+/*   4x16 and 8x8 Parallel ADDs using Unsigned Saturation arithmetic
+*/
+#define   paddusw_m2r(var, reg)     mmx_m2r(paddusw, var, reg)
+#define   paddusw_r2r(regs, regd)   mmx_r2r(paddusw, regs, regd)
+#define   paddusw(vars, vard)       mmx_m2m(paddusw, vars, vard)
+
+#define   paddusb_m2r(var, reg)     mmx_m2r(paddusb, var, reg)
+#define   paddusb_r2r(regs, regd)   mmx_r2r(paddusb, regs, regd)
+#define   paddusb(vars, vard)       mmx_m2m(paddusb, vars, vard)
+
+
+/*   2x32, 4x16, and 8x8 Parallel SUBs
+*/
+#define   psubd_m2r(var, reg)     mmx_m2r(psubd, var, reg)
+#define   psubd_r2r(regs, regd)   mmx_r2r(psubd, regs, regd)
+#define   psubd(vars, vard)       mmx_m2m(psubd, vars, vard)
+
+#define   psubw_m2r(var, reg)     mmx_m2r(psubw, var, reg)
+#define   psubw_r2r(regs, regd)   mmx_r2r(psubw, regs, regd)
+#define   psubw(vars, vard)       mmx_m2m(psubw, vars, vard)
+
+#define   psubb_m2r(var, reg)     mmx_m2r(psubb, var, reg)
+#define   psubb_r2r(regs, regd)   mmx_r2r(psubb, regs, regd)
+#define   psubb(vars, vard)       mmx_m2m(psubb, vars, vard)
+
+
+/*   4x16 and 8x8 Parallel SUBs using Saturation arithmetic
+*/
+#define   psubsw_m2r(var, reg)     mmx_m2r(psubsw, var, reg)
+#define   psubsw_r2r(regs, regd)   mmx_r2r(psubsw, regs, regd)
+#define   psubsw(vars, vard)       mmx_m2m(psubsw, vars, vard)
+
+#define   psubsb_m2r(var, reg)     mmx_m2r(psubsb, var, reg)
+#define   psubsb_r2r(regs, regd)   mmx_r2r(psubsb, regs, regd)
+#define   psubsb(vars, vard)       mmx_m2m(psubsb, vars, vard)
+
+
+/*   4x16 and 8x8 Parallel SUBs using Unsigned Saturation arithmetic
+*/
+#define   psubusw_m2r(var, reg)     mmx_m2r(psubusw, var, reg)
+#define   psubusw_r2r(regs, regd)   mmx_r2r(psubusw, regs, regd)
+#define   psubusw(vars, vard)       mmx_m2m(psubusw, vars, vard)
+
+#define   psubusb_m2r(var, reg)     mmx_m2r(psubusb, var, reg)
+#define   psubusb_r2r(regs, regd)   mmx_r2r(psubusb, regs, regd)
+#define   psubusb(vars, vard)       mmx_m2m(psubusb, vars, vard)
+
+
+/*   4x16 Parallel MULs giving Low 4x16 portions of results
+*/
+#define   pmullw_m2r(var, reg)     mmx_m2r(pmullw, var, reg)
+#define   pmullw_r2r(regs, regd)   mmx_r2r(pmullw, regs, regd)
+#define   pmullw(vars, vard)       mmx_m2m(pmullw, vars, vard)
+
+
+/*   4x16 Parallel MULs giving High 4x16 portions of results
+*/
+#define   pmulhw_m2r(var, reg)     mmx_m2r(pmulhw, var, reg)
+#define   pmulhw_r2r(regs, regd)   mmx_r2r(pmulhw, regs, regd)
+#define   pmulhw(vars, vard)       mmx_m2m(pmulhw, vars, vard)
+
+
+/*   4x16->2x32 Parallel Mul-ADD
+   (muls like pmullw, then adds adjacent 16-bit fields
+    in the multiply result to make the final 2x32 result)
+*/
+#define   pmaddwd_m2r(var, reg)     mmx_m2r(pmaddwd, var, reg)
+#define   pmaddwd_r2r(regs, regd)   mmx_r2r(pmaddwd, regs, regd)
+#define   pmaddwd(vars, vard)       mmx_m2m(pmaddwd, vars, vard)
+
+
+/*   1x64 bitwise AND
+*/
+#define   pand_m2r(var, reg)     mmx_m2r(pand, var, reg)
+#define   pand_r2r(regs, regd)   mmx_r2r(pand, regs, regd)
+#define   pand(vars, vard)       mmx_m2m(pand, vars, vard)
+
+
+/*   1x64 bitwise AND with Not the destination
+*/
+#define   pandn_m2r(var, reg)     mmx_m2r(pandn, var, reg)
+#define   pandn_r2r(regs, regd)   mmx_r2r(pandn, regs, regd)
+#define   pandn(vars, vard)       mmx_m2m(pandn, vars, vard)
+
+
+/*   1x64 bitwise OR
+*/
+#define   por_m2r(var, reg)     mmx_m2r(por, var, reg)
+#define   por_r2r(regs, regd)   mmx_r2r(por, regs, regd)
+#define   por(vars, vard)       mmx_m2m(por, vars, vard)
+
+
+/*   1x64 bitwise eXclusive OR
+*/
+#define   pxor_m2r(var, reg)     mmx_m2r(pxor, var, reg)
+#define   pxor_r2r(regs, regd)   mmx_r2r(pxor, regs, regd)
+#define   pxor(vars, vard)       mmx_m2m(pxor, vars, vard)
+
+
+/*   2x32, 4x16, and 8x8 Parallel CoMPare for EQuality
+   (resulting fields are either 0 or -1)
+*/
+#define   pcmpeqd_m2r(var, reg)     mmx_m2r(pcmpeqd, var, reg)
+#define   pcmpeqd_r2r(regs, regd)   mmx_r2r(pcmpeqd, regs, regd)
+#define   pcmpeqd(vars, vard)       mmx_m2m(pcmpeqd, vars, vard)
+
+#define   pcmpeqw_m2r(var, reg)     mmx_m2r(pcmpeqw, var, reg)
+#define   pcmpeqw_r2r(regs, regd)   mmx_r2r(pcmpeqw, regs, regd)
+#define   pcmpeqw(vars, vard)       mmx_m2m(pcmpeqw, vars, vard)
+
+#define   pcmpeqb_m2r(var, reg)     mmx_m2r(pcmpeqb, var, reg)
+#define   pcmpeqb_r2r(regs, regd)   mmx_r2r(pcmpeqb, regs, regd)
+#define   pcmpeqb(vars, vard)       mmx_m2m(pcmpeqb, vars, vard)
+
+
+/*   2x32, 4x16, and 8x8 Parallel CoMPare for Greater Than
+   (resulting fields are either 0 or -1)
+*/
+#define   pcmpgtd_m2r(var, reg)   mmx_m2r(pcmpgtd, var, reg)
+#define   pcmpgtd_r2r(regs, regd)   mmx_r2r(pcmpgtd, regs, regd)
+#define   pcmpgtd(vars, vard)   mmx_m2m(pcmpgtd, vars, vard)
+
+#define   pcmpgtw_m2r(var, reg)   mmx_m2r(pcmpgtw, var, reg)
+#define   pcmpgtw_r2r(regs, regd)   mmx_r2r(pcmpgtw, regs, regd)
+#define   pcmpgtw(vars, vard)   mmx_m2m(pcmpgtw, vars, vard)
+
+#define   pcmpgtb_m2r(var, reg)   mmx_m2r(pcmpgtb, var, reg)
+#define   pcmpgtb_r2r(regs, regd)   mmx_r2r(pcmpgtb, regs, regd)
+#define   pcmpgtb(vars, vard)   mmx_m2m(pcmpgtb, vars, vard)
+
+
+/*   1x64, 2x32, and 4x16 Parallel Shift Left Logical
+*/
+#define   psllq_m2r(var, reg)   mmx_m2r(psllq, var, reg)
+#define   psllq_r2r(regs, regd)   mmx_r2r(psllq, regs, regd)
+#define   psllq(vars, vard)   mmx_m2m(psllq, vars, vard)
+
+#define   pslld_m2r(var, reg)   mmx_m2r(pslld, var, reg)
+#define   pslld_r2r(regs, regd)   mmx_r2r(pslld, regs, regd)
+#define   pslld(vars, vard)   mmx_m2m(pslld, vars, vard)
+
+#define   psllw_m2r(var, reg)   mmx_m2r(psllw, var, reg)
+#define   psllw_r2r(regs, regd)   mmx_r2r(psllw, regs, regd)
+#define   psllw(vars, vard)   mmx_m2m(psllw, vars, vard)
+
+
+/*   1x64, 2x32, and 4x16 Parallel Shift Right Logical
+*/
+#define   psrlq_m2r(var, reg)   mmx_m2r(psrlq, var, reg)
+#define   psrlq_r2r(regs, regd)   mmx_r2r(psrlq, regs, regd)
+#define   psrlq(vars, vard)   mmx_m2m(psrlq, vars, vard)
+
+#define   psrld_m2r(var, reg)   mmx_m2r(psrld, var, reg)
+#define   psrld_r2r(regs, regd)   mmx_r2r(psrld, regs, regd)
+#define   psrld(vars, vard)   mmx_m2m(psrld, vars, vard)
+
+#define   psrlw_m2r(var, reg)   mmx_m2r(psrlw, var, reg)
+#define   psrlw_r2r(regs, regd)   mmx_r2r(psrlw, regs, regd)
+#define   psrlw(vars, vard)   mmx_m2m(psrlw, vars, vard)
+
+
+/*   2x32 and 4x16 Parallel Shift Right Arithmetic
+*/
+#define   psrad_m2r(var, reg)   mmx_m2r(psrad, var, reg)
+#define   psrad_r2r(regs, regd)   mmx_r2r(psrad, regs, regd)
+#define   psrad(vars, vard)   mmx_m2m(psrad, vars, vard)
+
+#define   psraw_m2r(var, reg)   mmx_m2r(psraw, var, reg)
+#define   psraw_r2r(regs, regd)   mmx_r2r(psraw, regs, regd)
+#define   psraw(vars, vard)   mmx_m2m(psraw, vars, vard)
+
+
+/*   2x32->4x16 and 4x16->8x8 PACK and Signed Saturate
+   (packs source and dest fields into dest in that order)
+*/
+#define   packssdw_m2r(var, reg)   mmx_m2r(packssdw, var, reg)
+#define   packssdw_r2r(regs, regd) mmx_r2r(packssdw, regs, regd)
+#define   packssdw(vars, vard)   mmx_m2m(packssdw, vars, vard)
+
+#define   packsswb_m2r(var, reg)   mmx_m2r(packsswb, var, reg)
+#define   packsswb_r2r(regs, regd) mmx_r2r(packsswb, regs, regd)
+#define   packsswb(vars, vard)   mmx_m2m(packsswb, vars, vard)
+
+
+/*   4x16->8x8 PACK and Unsigned Saturate
+   (packs source and dest fields into dest in that order)
+*/
+#define   packuswb_m2r(var, reg)   mmx_m2r(packuswb, var, reg)
+#define   packuswb_r2r(regs, regd) mmx_r2r(packuswb, regs, regd)
+#define   packuswb(vars, vard)   mmx_m2m(packuswb, vars, vard)
+
+
+/*   2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK Low
+   (interleaves low half of dest with low half of source
+    as padding in each result field)
+*/
+#define   punpckldq_m2r(var, reg)   mmx_m2r(punpckldq, var, reg)
+#define   punpckldq_r2r(regs, regd) mmx_r2r(punpckldq, regs, regd)
+#define   punpckldq(vars, vard)   mmx_m2m(punpckldq, vars, vard)
+
+#define   punpcklwd_m2r(var, reg)   mmx_m2r(punpcklwd, var, reg)
+#define   punpcklwd_r2r(regs, regd) mmx_r2r(punpcklwd, regs, regd)
+#define   punpcklwd(vars, vard)   mmx_m2m(punpcklwd, vars, vard)
+
+#define   punpcklbw_m2r(var, reg)   mmx_m2r(punpcklbw, var, reg)
+#define   punpcklbw_r2r(regs, regd) mmx_r2r(punpcklbw, regs, regd)
+#define   punpcklbw(vars, vard)   mmx_m2m(punpcklbw, vars, vard)
+
+
+/*   2x32->1x64, 4x16->2x32, and 8x8->4x16 UNPaCK High
+   (interleaves high half of dest with high half of source
+    as padding in each result field)
+*/
+#define   punpckhdq_m2r(var, reg)   mmx_m2r(punpckhdq, var, reg)
+#define   punpckhdq_r2r(regs, regd) mmx_r2r(punpckhdq, regs, regd)
+#define   punpckhdq(vars, vard)   mmx_m2m(punpckhdq, vars, vard)
+
+#define   punpckhwd_m2r(var, reg)   mmx_m2r(punpckhwd, var, reg)
+#define   punpckhwd_r2r(regs, regd) mmx_r2r(punpckhwd, regs, regd)
+#define   punpckhwd(vars, vard)   mmx_m2m(punpckhwd, vars, vard)
+
+#define   punpckhbw_m2r(var, reg)   mmx_m2r(punpckhbw, var, reg)
+#define   punpckhbw_r2r(regs, regd) mmx_r2r(punpckhbw, regs, regd)
+#define   punpckhbw(vars, vard)   mmx_m2m(punpckhbw, vars, vard)
+
+
+/* 1x64 add/sub -- this is in sse2, not in mmx. */
+#define   paddq_m2r(var, reg)     mmx_m2r(paddq, var, reg)
+#define   paddq_r2r(regs, regd)   mmx_r2r(paddq, regs, regd)
+#define   paddq(vars, vard)       mmx_m2m(paddq, vars, vard)
+
+#define   psubq_m2r(var, reg)     mmx_m2r(psubq, var, reg)
+#define   psubq_r2r(regs, regd)   mmx_r2r(psubq, regs, regd)
+#define   psubq(vars, vard)       mmx_m2m(psubq, vars, vard)
+
+
+
+/*   Empty MMx State
+   (used to clean-up when going from mmx to float use
+    of the registers that are shared by both; note that
+    there is no float-to-mmx operation needed, because
+    only the float tag word info is corruptible)
+*/
+#ifdef   MMX_TRACE
+
+#define   emms() \
+   { \
+      fprintf(stderr, "emms()\n"); \
+      __asm__ __volatile__ ("emms"); \
+   }
+
+#else
+
+#define   emms()         __asm__ __volatile__ ("emms")
+
+#endif
+
+void mkRand( mmx_t* mm )
+{
+  mm->uw[0] = 0xFFFF & (random() >> 7);
+  mm->uw[1] = 0xFFFF & (random() >> 7);
+  mm->uw[2] = 0xFFFF & (random() >> 7);
+  mm->uw[3] = 0xFFFF & (random() >> 7);
+}
+
+
+
+int main( void )
+{
+  int i;
+  //   int rval;
+   mmx_t ma;
+   mmx_t mb;
+   mmx_t ma0, mb0;
+   movq_r2r(mm0, mm1);
+
+//   rval = mmx_ok();
+
+   /* Announce return value of mmx_ok() */
+//   printf("Value returned from init was %x.", rval);
+//   printf(" (Indicates MMX %s available)\n\n",(rval)? "is" : "not");
+//   fflush(stdout); fflush(stdout);
+
+//   if(rval)
+
+#define do_test(_name, _operation) \
+   for (i = 0; i < 25000; i++) {                                 \
+      mkRand(&ma);                                               \
+      mkRand(&mb);                                               \
+      ma0 = ma; mb0 = mb;                                        \
+      _operation;                                                \
+      fprintf(stdout, "%s ( %016llx, %016llx ) -> %016llx\n",    \
+                     _name, ma0.q, mb0.q, mb.q);                 \
+      fflush(stdout);                                            \
+   }
+
+
+   {
+     do_test("paddd", paddd(ma,mb));
+     do_test("paddw", paddw(ma,mb));
+     do_test("paddb", paddb(ma,mb));
+
+     do_test("paddsw", paddsw(ma,mb));
+     do_test("paddsb", paddsb(ma,mb));
+
+     do_test("paddusw", paddusw(ma,mb));
+     do_test("paddusb", paddusb(ma,mb));
+
+     do_test("psubd", psubd(ma,mb));
+     do_test("psubw", psubw(ma,mb));
+     do_test("psubb", psubb(ma,mb));
+
+     do_test("psubsw", psubsw(ma,mb));
+     do_test("psubsb", psubsb(ma,mb));
+
+     do_test("psubusw", psubusw(ma,mb));
+     do_test("psubusb", psubusb(ma,mb));
+
+     do_test("pmulhw", pmulhw(ma,mb));
+     do_test("pmullw", pmullw(ma,mb));
+
+     do_test("pmaddwd", pmaddwd(ma,mb));
+
+     do_test("pcmpeqd", pcmpeqd(ma,mb));
+     do_test("pcmpeqw", pcmpeqw(ma,mb));
+     do_test("pcmpeqb", pcmpeqb(ma,mb));
+
+     do_test("pcmpgtd", pcmpgtd(ma,mb));
+     do_test("pcmpgtw", pcmpgtw(ma,mb));
+     do_test("pcmpgtb", pcmpgtb(ma,mb));
+
+     do_test("packssdw", packssdw(ma,mb));
+     do_test("packsswb", packsswb(ma,mb));
+     do_test("packuswb", packuswb(ma,mb));
+
+     do_test("punpckhdq", punpckhdq(ma,mb));
+     do_test("punpckhwd", punpckhwd(ma,mb));
+     do_test("punpckhbw", punpckhbw(ma,mb));
+
+     do_test("punpckldq", punpckldq(ma,mb));
+     do_test("punpcklwd", punpcklwd(ma,mb));
+     do_test("punpcklbw", punpcklbw(ma,mb));
+
+     do_test("pand", pand(ma,mb));
+     do_test("pandn", pandn(ma,mb));
+     do_test("por", por(ma,mb));
+     do_test("pxor", pxor(ma,mb));
+
+     do_test("psllq", psllq(ma,mb));
+     do_test("pslld", pslld(ma,mb));
+     do_test("psllw", psllw(ma,mb));
+
+     do_test("psrlq", psrlq(ma,mb));
+     do_test("psrld", psrld(ma,mb));
+     do_test("psrlw", psrlw(ma,mb));
+
+     do_test("psrad", psrad(ma,mb));
+     do_test("psraw", psraw(ma,mb));
+
+#if HAVE_SSE2
+     do_test("paddq", paddq(ma,mb));
+     do_test("psubq", psubq(ma,mb));
+#endif
+
+     emms();
+   }
+
+   /* Clean-up and exit nicely */
+   exit(0);
+}
diff --git a/VEX/test/mxcsr.c b/VEX/test/mxcsr.c
new file mode 100644
index 0000000..cc92f54
--- /dev/null
+++ b/VEX/test/mxcsr.c
@@ -0,0 +1,45 @@
+
+#include <stdio.h>
+
+void mxcsr_default ( void )
+{
+  asm(" pushl $0x1F80 ; ldmxcsr (%esp) ; addl $4, %esp");
+}
+
+void mxcsr_exns ( void )
+{
+  asm(" pushl $0x1F00 ; ldmxcsr (%esp) ; addl $4, %esp");
+}
+
+/* PIII doesn't have DAZ, so this segfaults (!) on PIII. */a
+void mxcsr_daz ( void )
+{
+  asm(" pushl $0x1FC0 ; ldmxcsr (%esp) ; addl $4, %esp");
+}
+
+void mxcsr_fz ( void )
+{
+  asm(" pushl $0x9F80 ; ldmxcsr (%esp) ; addl $4, %esp");
+}
+
+
+int main ( void )
+{
+   printf("default\n");
+   mxcsr_default();
+   printf("\n");
+
+   printf("exns\n");
+   mxcsr_exns();
+   printf("\n");
+
+   printf("daz\n");
+   mxcsr_daz();
+   printf("\n");
+
+   printf("fz\n");
+   mxcsr_fz();
+   printf("\n");
+
+   return 0;
+}
diff --git a/VEX/test/rounderr.c b/VEX/test/rounderr.c
new file mode 100644
index 0000000..0055a09
--- /dev/null
+++ b/VEX/test/rounderr.c
@@ -0,0 +1,97 @@
+
+/* peach (7400, altivec supported, 450MHz, gcc -O) 
+   m1 = 1.20000000000000018,  exp = 1.19999999999999996
+   m2 = 1.19999999999998885,  exp = 1.19999999999999996
+*/
+
+/* peach (7400, altivec supported, 450MHz, gcc) 
+   m1 = 1.20000000000000018,  exp = 1.19999999999999996
+   m2 = 1.19999999999998885,  exp = 1.19999999999999996
+*/
+
+/* phoenix, gcc -O
+   m1 = 1.19999999999999996,  exp = 1.19999999999999996
+   m2 = 1.19999999999999996,  exp = 1.19999999999999996
+*/
+
+/* phoenix, icc -O
+   m1 = 1.19999999999999996,  exp = 1.19999999999999996
+   m2 = 1.19999999999999996,  exp = 1.19999999999999996
+*/
+
+/* phoenix, gcc -O, iropt-level=2
+   m1 = 1.20000000000000040,  exp = 1.19999999999999996
+   m2 = 1.19999999999999440,  exp = 1.19999999999999996
+*/
+
+/* phoenix, gcc -O, iropt-level=1/0
+   m1 = 1.20000000000000018,  exp = 1.19999999999999996
+   m2 = 1.19999999999998885,  exp = 1.19999999999999996
+*/
+
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+
+#define NNN 1000
+
+
+double
+my_mean1 (const double data[], size_t stride, const size_t size)
+{
+  /* Compute the arithmetic mean of a dataset using the recurrence relation
+     mean_(n) = mean(n-1) + (data[n] - mean(n-1))/(n+1)   */
+  long double mean = 0;
+  size_t i;
+
+  for (i = 0; i < size; i++)
+    {
+      mean += (data[i * stride] - mean) / (i + 1);
+    }
+  return mean;
+}
+
+double
+my_mean2 (const double data[], size_t stride, const size_t size)
+{
+  /* Compute the arithmetic mean of a dataset using the 
+     obvious scheme. */
+  int i;
+  long double sum = 0;
+  for (i = 0; i < size; i++)
+    sum += data[i * stride];
+  return sum / (double)size;
+}
+
+
+int main (void)
+{
+  int i;
+  const size_t nacc2 = NNN+1;
+  double numacc2[NNN+1] ;
+
+  numacc2[0] = 1.2 ;
+ 
+  for (i = 1 ; i < NNN; i += 2) 
+     numacc2[i] = 1.1 ;
+
+  for (i = 1 ; i < NNN; i += 2) 
+      numacc2[i+1] = 1.3 ;
+
+#if 1
+  asm __volatile__("fninit");
+#endif
+
+  {
+    double m1 = my_mean1 (numacc2, 1, nacc2);
+    double m2 = my_mean2 (numacc2, 1, nacc2);
+    double expected_mean = 1.2;
+    printf("m1 = %19.17f,  exp = %19.17f\n", m1, expected_mean);
+    printf("m2 = %19.17f,  exp = %19.17f\n", m2, expected_mean);
+  }
+
+  return 0;
+}
+
+
diff --git a/VEX/test/test-amd64-muldiv.h b/VEX/test/test-amd64-muldiv.h
new file mode 100644
index 0000000..f8fb2de
--- /dev/null
+++ b/VEX/test/test-amd64-muldiv.h
@@ -0,0 +1,74 @@
+
+void glue(glue(test_, OP), b)(int64 op0, int64 op1) 
+{
+    int64 res, s1, s0, flags;
+    s0 = op0;
+    s1 = op1;
+    res = s0;
+    flags = 0;
+    asm ("pushq %4\n\t"
+         "popfq\n\t"
+         stringify(OP)"b %b2\n\t" 
+         "pushfq\n\t"
+         "popq %1\n\t"
+         : "=a" (res), "=g" (flags)
+         : "q" (s1), "0" (res), "1" (flags));
+    printf("%-10s A=%016llx B=%016llx R=%016llx CC=%04llx\n",
+           stringify(OP) "b", s0, s1, res, flags & CC_MASK);
+}
+
+void glue(glue(test_, OP), w)(int64 op0h, int64 op0, int64 op1) 
+{
+    int64 res, s1, flags, resh;
+    s1 = op1;
+    resh = op0h;
+    res = op0;
+    flags = 0;
+    asm ("pushq %5\n\t"
+         "popfq\n\t"
+         stringify(OP) "w %w3\n\t" 
+         "pushfq\n\t"
+         "popq %1\n\t"
+         : "=a" (res), "=g" (flags), "=d" (resh)
+         : "q" (s1), "0" (res), "1" (flags), "2" (resh));
+    printf("%-10s AH=%016llx AL=%016llx B=%016llx RH=%016llx RL=%016llx CC=%04llx\n",
+           stringify(OP) "w", op0h, op0, s1, resh, res, flags & CC_MASK);
+}
+
+void glue(glue(test_, OP), l)(int64 op0h, int64 op0, int64 op1) 
+{
+    int64 res, s1, flags, resh;
+    s1 = op1;
+    resh = op0h;
+    res = op0;
+    flags = 0;
+    asm ("pushq %5\n\t"
+         "popfq\n\t"
+         stringify(OP) "l %3\n\t" 
+         "pushfq\n\t"
+         "popq %1\n\t"
+         : "=a" (res), "=g" (flags), "=d" (resh)
+         : "q" ((int)s1), "0" (res), "1" (flags), "2" (resh));
+    printf("%-10s AH=%016llx AL=%016llx B=%016llx RH=%016llx RL=%016llx CC=%04llx\n",
+           stringify(OP) "l", op0h, op0, s1, resh, res, flags & CC_MASK);
+}
+
+void glue(glue(test_, OP), q)(int64 op0h, int64 op0, int64 op1) 
+{
+    int64 res, s1, flags, resh;
+    s1 = op1;
+    resh = op0h;
+    res = op0;
+    flags = 0;
+    asm ("pushq %5\n\t"
+         "popfq\n\t"
+         stringify(OP) "q %3\n\t" 
+         "pushfq\n\t"
+         "popq %1\n\t"
+         : "=a" (res), "=g" (flags), "=d" (resh)
+         : "q" (s1), "0" (res), "1" (flags), "2" (resh));
+    printf("%-10s AH=%016llx AL=%016llx B=%016llx RH=%016llx RL=%016llx CC=%04llx\n",
+           stringify(OP) "q", op0h, op0, s1, resh, res, flags & CC_MASK);
+}
+
+#undef OP
diff --git a/VEX/test/test-amd64-shift.h b/VEX/test/test-amd64-shift.h
new file mode 100644
index 0000000..e5ded43
--- /dev/null
+++ b/VEX/test/test-amd64-shift.h
@@ -0,0 +1,178 @@
+
+#define exec_op glue(exec_, OP)
+#define exec_opq glue(glue(exec_, OP), q)
+#define exec_opl glue(glue(exec_, OP), l)
+#define exec_opw glue(glue(exec_, OP), w)
+#define exec_opb glue(glue(exec_, OP), b)
+
+#ifndef OP_SHIFTD
+
+#ifdef OP_NOBYTE
+#define EXECSHIFT(size, res, s1, s2, flags) \
+    asm ("pushq %4\n\t"\
+         "popfq\n\t"\
+         stringify(OP) size " %" size "2, %" size "0\n\t" \
+         "pushfq\n\t"\
+         "popq %1\n\t"\
+         : "=g" (res), "=g" (flags)\
+         : "r" (s1), "0" (res), "1" (flags));
+#else
+#define EXECSHIFT(size, res, s1, s2, flags) \
+    asm ("pushq %4\n\t"\
+         "popfq\n\t"\
+         stringify(OP) size " %%cl, %" size "0\n\t" \
+         "pushfq\n\t"\
+         "popq %1\n\t"\
+         : "=q" (res), "=g" (flags)\
+         : "c" (s1), "0" (res), "1" (flags));
+#endif
+
+void exec_opq(int64 s2, int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("q", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "q", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+void exec_opl(int64 s2, int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "l", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+void exec_opw(int64 s2, int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("w", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "w", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+#else
+#define EXECSHIFT(size, res, s1, s2, flags) \
+    asm ("pushq %4\n\t"\
+         "popfq\n\t"\
+         stringify(OP) size " %%cl, %" size "5, %" size "0\n\t" \
+         "pushfq\n\t"\
+         "popq %1\n\t"\
+         : "=g" (res), "=g" (flags)\
+         : "c" (s1), "0" (res), "1" (flags), "r" (s2));
+
+void exec_opl(int64 s2, int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%016llx B=%016llx C=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "l", s0, s2, s1, res, iflags, flags & CC_MASK);
+}
+
+void exec_opw(int64 s2, int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("w", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%016llx B=%016llx C=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "w", s0, s2, s1, res, iflags, flags & CC_MASK);
+}
+
+#endif
+
+#ifndef OP_NOBYTE
+void exec_opb(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("b", res, s1, 0, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "b", s0, s1, res, iflags, flags & CC_MASK);
+}
+#endif
+
+void exec_op(int64 s2, int64 s0, int64 s1)
+{
+  int64 o,s,z,a,c,p,flags_in;
+  for (o = 0; o < 2; o++) {
+  for (s = 0; s < 2; s++) {
+  for (z = 0; z < 2; z++) {
+  for (a = 0; a < 2; a++) {
+  for (c = 0; c < 2; c++) {
+  for (p = 0; p < 2; p++) {
+
+    flags_in = (o ? CC_O : 0)
+             | (s ? CC_S : 0)
+             | (z ? CC_Z : 0)
+             | (a ? CC_A : 0)
+             | (c ? CC_C : 0)
+             | (p ? CC_P : 0);
+ 
+    exec_opq(s2, s0, s1, flags_in);
+    if (s1 <= 31) 
+       exec_opl(s2, s0, s1, flags_in);
+#ifdef OP_SHIFTD
+    if (s1 <= 15)
+        exec_opw(s2, s0, s1, flags_in);
+#else
+    exec_opw(s2, s0, s1, flags_in);
+#endif
+#ifndef OP_NOBYTE
+    exec_opb(s0, s1, flags_in);
+#endif
+#ifdef OP_CC
+    exec_opq(s2, s0, s1, flags_in);
+    exec_opl(s2, s0, s1, flags_in);
+    exec_opw(s2, s0, s1, flags_in);
+    exec_opb(s0, s1, flags_in);
+#endif
+
+  }}}}}}
+
+}
+
+void glue(test_, OP)(void)
+{
+    int64 i;
+    for(i = 0; i < 64; i++)
+        exec_op(0x3141592721ad3d34, 0x2718284612345678, i);
+    for(i = 0; i < 64; i++)
+        exec_op(0x31415927813f3421, 0x2718284682345678, i);
+}
+
+void *glue(_test_, OP) __init_call = glue(test_, OP);
+
+#undef OP
+#undef OP_CC
+#undef OP_SHIFTD
+#undef OP_NOBYTE
+#undef EXECSHIFT
+
diff --git a/VEX/test/test-amd64.c b/VEX/test/test-amd64.c
new file mode 100644
index 0000000..cfb745e
--- /dev/null
+++ b/VEX/test/test-amd64.c
@@ -0,0 +1,1709 @@
+
+/* To build: 
+
+     gcc -g -o test-amd64 test-amd64.c -lm
+
+ */
+
+/* Contrary to what the next comment says, this is now an amd64 CPU
+   test. */
+
+/*
+ *  x86 CPU test
+ * 
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <math.h>
+#include <signal.h>
+#include <setjmp.h>
+#include <errno.h>
+#include <sys/ucontext.h>
+#include <sys/mman.h>
+
+/* Setting this to 1 creates a very comprehensive test of
+   integer condition codes. */
+#define TEST_INTEGER_VERBOSE 1
+
+typedef  long long int  int64;
+
+//#define LINUX_VM86_IOPL_FIX
+//#define TEST_P4_FLAGS
+
+#define xglue(x, y) x ## y
+#define glue(x, y) xglue(x, y)
+#define stringify(s)	tostring(s)
+#define tostring(s)	#s
+
+#define CC_C   	0x0001
+#define CC_P 	0x0004
+#define CC_A	0x0010
+#define CC_Z	0x0040
+#define CC_S    0x0080
+#define CC_O    0x0800
+
+#define __init_call	__attribute__ ((unused,__section__ (".initcall.init")))
+
+static void *call_start __init_call = NULL;
+
+#define CC_MASK (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A)
+
+#define OP add
+#include "test-amd64.h"
+
+#define OP sub
+#include "test-amd64.h"
+
+#define OP xor
+#include "test-amd64.h"
+
+#define OP and
+#include "test-amd64.h"
+
+#define OP or
+#include "test-amd64.h"
+
+#define OP cmp
+#include "test-amd64.h"
+
+#define OP adc
+#define OP_CC
+#include "test-amd64.h"
+
+#define OP sbb
+#define OP_CC
+#include "test-amd64.h"
+
+#define OP inc
+#define OP_CC
+#define OP1
+#include "test-amd64.h"
+
+#define OP dec
+#define OP_CC
+#define OP1
+#include "test-amd64.h"
+
+#define OP neg
+#define OP_CC
+#define OP1
+#include "test-amd64.h"
+
+#define OP not
+#define OP_CC
+#define OP1
+#include "test-amd64.h"
+
+#undef CC_MASK
+#define CC_MASK (CC_C | CC_P | CC_Z | CC_S | CC_O)
+
+#define OP shl
+#include "test-amd64-shift.h"
+
+#define OP shr
+#include "test-amd64-shift.h"
+
+#define OP sar
+#include "test-amd64-shift.h"
+
+#define OP rol
+#include "test-amd64-shift.h"
+
+#define OP ror
+#include "test-amd64-shift.h"
+
+#define OP rcr
+#define OP_CC
+#include "test-amd64-shift.h"
+
+#define OP rcl
+#define OP_CC
+#include "test-amd64-shift.h"
+
+#if 0
+#define OP shld
+#define OP_SHIFTD
+#define OP_NOBYTE
+#include "test-amd64-shift.h"
+
+#define OP shrd
+#define OP_SHIFTD
+#define OP_NOBYTE
+#include "test-amd64-shift.h"
+#endif
+
+/* XXX: should be more precise ? */
+#undef CC_MASK
+#define CC_MASK (CC_C)
+
+#if 0
+#define OP bt
+#define OP_NOBYTE
+#include "test-amd64-shift.h"
+
+#define OP bts
+#define OP_NOBYTE
+#include "test-amd64-shift.h"
+
+#define OP btr
+#define OP_NOBYTE
+#include "test-amd64-shift.h"
+
+#define OP btc
+#define OP_NOBYTE
+#include "test-amd64-shift.h"
+#endif
+
+/* lea test (modrm support) */
+#define TEST_LEA(STR)\
+{\
+    asm("leaq " STR ", %0"\
+        : "=r" (res)\
+        : "a" (rax), "b" (rbx), "c" (rcx), "d" (rdx), "S" (rsi), "D" (rdi));\
+    printf("lea %s = %016llx\n", STR, res);\
+}
+
+#define TEST_LEA16(STR)\
+{\
+    asm(".code16 ; .byte 0x67 ; leal " STR ", %0 ; .code32"\
+        : "=wq" (res)\
+        : "a" (eax), "b" (ebx), "c" (ecx), "d" (edx), "S" (esi), "D" (edi));\
+    printf("lea %s = %08x\n", STR, res);\
+}
+
+
+void test_lea(void)
+{
+    int64 rax, rbx, rcx, rdx, rsi, rdi, res;
+    rax = 0x0001;
+    rbx = 0x0002;
+    rcx = 0x0004;
+    rdx = 0x0008;
+    rsi = 0x0010;
+    rdi = 0x0020;
+
+    TEST_LEA("0x4000");
+
+    TEST_LEA("(%%rax)");
+    TEST_LEA("(%%rbx)");
+    TEST_LEA("(%%rcx)");
+    TEST_LEA("(%%rdx)");
+    TEST_LEA("(%%rsi)");
+    TEST_LEA("(%%rdi)");
+
+    TEST_LEA("0x40(%%rax)");
+    TEST_LEA("0x40(%%rbx)");
+    TEST_LEA("0x40(%%rcx)");
+    TEST_LEA("0x40(%%rdx)");
+    TEST_LEA("0x40(%%rsi)");
+    TEST_LEA("0x40(%%rdi)");
+
+    TEST_LEA("0x4000(%%rax)");
+    TEST_LEA("0x4000(%%rbx)");
+    TEST_LEA("0x4000(%%rcx)");
+    TEST_LEA("0x4000(%%rdx)");
+    TEST_LEA("0x4000(%%rsi)");
+    TEST_LEA("0x4000(%%rdi)");
+
+    TEST_LEA("(%%rax, %%rcx)");
+    TEST_LEA("(%%rbx, %%rdx)");
+    TEST_LEA("(%%rcx, %%rcx)");
+    TEST_LEA("(%%rdx, %%rcx)");
+    TEST_LEA("(%%rsi, %%rcx)");
+    TEST_LEA("(%%rdi, %%rcx)");
+
+    TEST_LEA("0x40(%%rax, %%rcx)");
+    TEST_LEA("0x4000(%%rbx, %%rdx)");
+
+    TEST_LEA("(%%rcx, %%rcx, 2)");
+    TEST_LEA("(%%rdx, %%rcx, 4)");
+    TEST_LEA("(%%rsi, %%rcx, 8)");
+
+    TEST_LEA("(,%%rax, 2)");
+    TEST_LEA("(,%%rbx, 4)");
+    TEST_LEA("(,%%rcx, 8)");
+
+    TEST_LEA("0x40(,%%rax, 2)");
+    TEST_LEA("0x40(,%%rbx, 4)");
+    TEST_LEA("0x40(,%%rcx, 8)");
+
+
+    TEST_LEA("-10(%%rcx, %%rcx, 2)");
+    TEST_LEA("-10(%%rdx, %%rcx, 4)");
+    TEST_LEA("-10(%%rsi, %%rcx, 8)");
+
+    TEST_LEA("0x4000(%%rcx, %%rcx, 2)");
+    TEST_LEA("0x4000(%%rdx, %%rcx, 4)");
+    TEST_LEA("0x4000(%%rsi, %%rcx, 8)");
+}
+
+#define TEST_JCC(JCC, v1, v2)\
+{   int one = 1; \
+    int res;\
+    asm("movl $1, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "j" JCC " 1f\n\t"\
+        "movl $0, %0\n\t"\
+        "1:\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2));\
+    printf("%-10s %d\n", "j" JCC, res);\
+\
+    asm("movl $0, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "set" JCC " %b0\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2));\
+    printf("%-10s %d\n", "set" JCC, res);\
+ {\
+    asm("movl $0x12345678, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "cmov" JCC "l %3, %0\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2), "m" (one));\
+        printf("%-10s R=0x%08x\n", "cmov" JCC "l", res);\
+    asm("movl $0x12345678, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "cmov" JCC "w %w3, %w0\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2), "r" (one));\
+        printf("%-10s R=0x%08x\n", "cmov" JCC "w", res);\
+ } \
+}
+
+/* various jump tests */
+void test_jcc(void)
+{
+    TEST_JCC("ne", 1, 1);
+    TEST_JCC("ne", 1, 0);
+
+    TEST_JCC("e", 1, 1);
+    TEST_JCC("e", 1, 0);
+
+    TEST_JCC("l", 1, 1);
+    TEST_JCC("l", 1, 0);
+    TEST_JCC("l", 1, -1);
+
+    TEST_JCC("le", 1, 1);
+    TEST_JCC("le", 1, 0);
+    TEST_JCC("le", 1, -1);
+
+    TEST_JCC("ge", 1, 1);
+    TEST_JCC("ge", 1, 0);
+    TEST_JCC("ge", -1, 1);
+
+    TEST_JCC("g", 1, 1);
+    TEST_JCC("g", 1, 0);
+    TEST_JCC("g", 1, -1);
+
+    TEST_JCC("b", 1, 1);
+    TEST_JCC("b", 1, 0);
+    TEST_JCC("b", 1, -1);
+
+    TEST_JCC("be", 1, 1);
+    TEST_JCC("be", 1, 0);
+    TEST_JCC("be", 1, -1);
+
+    TEST_JCC("ae", 1, 1);
+    TEST_JCC("ae", 1, 0);
+    TEST_JCC("ae", 1, -1);
+
+    TEST_JCC("a", 1, 1);
+    TEST_JCC("a", 1, 0);
+    TEST_JCC("a", 1, -1);
+
+
+    TEST_JCC("p", 1, 1);
+    TEST_JCC("p", 1, 0);
+
+    TEST_JCC("np", 1, 1);
+    TEST_JCC("np", 1, 0);
+
+    TEST_JCC("o", 0x7fffffff, 0);
+    TEST_JCC("o", 0x7fffffff, -1);
+
+    TEST_JCC("no", 0x7fffffff, 0);
+    TEST_JCC("no", 0x7fffffff, -1);
+
+    TEST_JCC("s", 0, 1);
+    TEST_JCC("s", 0, -1);
+    TEST_JCC("s", 0, 0);
+
+    TEST_JCC("ns", 0, 1);
+    TEST_JCC("ns", 0, -1);
+    TEST_JCC("ns", 0, 0);
+}
+
+#undef CC_MASK
+#ifdef TEST_P4_FLAGS
+#define CC_MASK (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A)
+#else
+#define CC_MASK (CC_O | CC_C)
+#endif
+
+#define OP mul
+#include "test-amd64-muldiv.h"
+
+#define OP imul
+#include "test-amd64-muldiv.h"
+
+void test_imulw2(int64 op0, int64 op1) 
+{
+    int64 res, s1, s0, flags;
+    s0 = op0;
+    s1 = op1;
+    res = s0;
+    flags = 0;
+    asm ("pushq %4\n\t"
+         "popfq\n\t"
+         "imulw %w2, %w0\n\t" 
+         "pushfq\n\t"
+         "popq %1\n\t"
+         : "=q" (res), "=g" (flags)
+         : "q" (s1), "0" (res), "1" (flags));
+    printf("%-10s A=%016llx B=%016llx R=%016llx CC=%04llx\n",
+           "imulw", s0, s1, res, flags & CC_MASK);
+}
+
+void test_imull2(int64 op0, int64 op1) 
+{
+    int res, s1;
+    int64 s0, flags;
+    s0 = op0;
+    s1 = op1;
+    res = s0;
+    flags = 0;
+    asm ("pushq %4\n\t"
+         "popfq\n\t"
+         "imull %2, %0\n\t" 
+         "pushfq\n\t"
+         "popq %1\n\t"
+         : "=q" (res), "=g" (flags)
+         : "q" (s1), "0" (res), "1" (flags));
+    printf("%-10s A=%016llx B=%08x R=%08x CC=%04llx\n",
+           "imull", s0, s1, res, flags & CC_MASK);
+}
+
+#define TEST_IMUL_IM(size, size1, op0, op1)\
+{\
+    int64 res, flags;\
+    flags = 0;\
+    res = 0;\
+    asm ("pushq %3\n\t"\
+         "popfq\n\t"\
+         "imul" size " $" #op0 ", %" size1 "2, %" size1 "0\n\t" \
+         "pushfq\n\t"\
+         "popq %1\n\t"\
+         : "=r" (res), "=g" (flags)\
+         : "r" (op1), "1" (flags), "0" (res));\
+    printf("%-10s A=%08x B=%08x R=%016llx CC=%04llx\n",\
+           "imul" size, op0, op1, res, flags & CC_MASK);\
+}
+
+#define TEST_IMUL_IM_L(op0, op1)\
+{\
+    int64 flags = 0;\
+    int res = 0;\
+    int res64 = 0;\
+    asm ("pushq %3\n\t"\
+         "popfq\n\t"\
+         "imul $" #op0 ", %2, %0\n\t" \
+         "pushfq\n\t"\
+         "popq %1\n\t"\
+         : "=r" (res64), "=g" (flags)\
+         : "r" (op1), "1" (flags), "0" (res));\
+    printf("%-10s A=%08x B=%08x R=%08x CC=%04llx\n",\
+           "imull", op0, op1, res, flags & CC_MASK);\
+}
+
+
+#undef CC_MASK
+#define CC_MASK (0)
+
+#define OP div
+#include "test-amd64-muldiv.h"
+
+#define OP idiv
+#include "test-amd64-muldiv.h"
+
+void test_mul(void)
+{
+    test_imulb(0x1234561d, 4);
+    test_imulb(3, -4);
+    test_imulb(0x80, 0x80);
+    test_imulb(0x10, 0x10);
+
+    test_imulw(0, 0, 0);
+    test_imulw(0, 0xFF, 0xFF);
+    test_imulw(0, 0xFF, 0x100);
+    test_imulw(0, 0x1234001d, 45);
+    test_imulw(0, 23, -45);
+    test_imulw(0, 0x8000, 0x8000);
+    test_imulw(0, 0x100, 0x100);
+
+    test_imull(0, 0, 0);
+    test_imull(0, 0xFFFF, 0xFFFF);
+    test_imull(0, 0xFFFF, 0x10000);
+    test_imull(0, 0x1234001d, 45);
+    test_imull(0, 23, -45);
+    test_imull(0, 0x80000000, 0x80000000);
+    test_imull(0, 0x10000, 0x10000);
+
+    test_mulb(0x1234561d, 4);
+    test_mulb(3, -4);
+    test_mulb(0x80, 0x80);
+    test_mulb(0x10, 0x10);
+
+    test_mulw(0, 0x1234001d, 45);
+    test_mulw(0, 23, -45);
+    test_mulw(0, 0x8000, 0x8000);
+    test_mulw(0, 0x100, 0x100);
+
+    test_mull(0, 0x1234001d, 45);
+    test_mull(0, 23, -45);
+    test_mull(0, 0x80000000, 0x80000000);
+    test_mull(0, 0x10000, 0x10000);
+
+    test_imulw2(0x1234001d, 45);
+    test_imulw2(23, -45);
+    test_imulw2(0x8000, 0x8000);
+    test_imulw2(0x100, 0x100);
+
+    test_imull2(0x1234001d, 45);
+    test_imull2(23, -45);
+    test_imull2(0x80000000, 0x80000000);
+    test_imull2(0x10000, 0x10000);
+
+    TEST_IMUL_IM("w", "w", 45, 0x1234);
+    TEST_IMUL_IM("w", "w", -45, 23);
+    TEST_IMUL_IM("w", "w", 0x8000, 0x80000000);
+    TEST_IMUL_IM("w", "w", 0x7fff, 0x1000);
+
+    TEST_IMUL_IM_L(45, 0x1234);
+    TEST_IMUL_IM_L(-45, 23);
+    TEST_IMUL_IM_L(0x8000, 0x80000000);
+    TEST_IMUL_IM_L(0x7fff, 0x1000);
+
+    test_idivb(0x12341678, 0x127e);
+    test_idivb(0x43210123, -5);
+    test_idivb(0x12340004, -1);
+
+    test_idivw(0, 0x12345678, 12347);
+    test_idivw(0, -23223, -45);
+    test_idivw(0, 0x12348000, -1);
+    test_idivw(0x12343, 0x12345678, 0x81238567);
+
+    test_idivl(0, 0x12345678, 12347);
+    test_idivl(0, -233223, -45);
+    test_idivl(0, 0x80000000, -1);
+    test_idivl(0x12343, 0x12345678, 0x81234567);
+
+    test_idivq(0, 0x12345678, 12347);
+    test_idivq(0, -233223, -45);
+    test_idivq(0, 0x80000000, -1);
+    test_idivq(0x12343, 0x12345678, 0x81234567);
+
+    test_divb(0x12341678, 0x127e);
+    test_divb(0x43210123, -5);
+    test_divb(0x12340004, -1);
+
+    test_divw(0, 0x12345678, 12347);
+    test_divw(0, -23223, -45);
+    test_divw(0, 0x12348000, -1);
+    test_divw(0x12343, 0x12345678, 0x81238567);
+
+    test_divl(0, 0x12345678, 12347);
+    test_divl(0, -233223, -45);
+    test_divl(0, 0x80000000, -1);
+    test_divl(0x12343, 0x12345678, 0x81234567);
+
+    test_divq(0, 0x12345678, 12347);
+    test_divq(0, -233223, -45);
+    test_divq(0, 0x80000000, -1);
+    test_divq(0x12343, 0x12345678, 0x81234567);
+}
+
+#define TEST_BSX(op, size, op0)\
+{\
+    int res, val, resz;\
+    val = op0;\
+    asm("xorl %1, %1\n"\
+        "movl $0x12345678, %0\n"\
+        #op " %" size "2, %" size "0 ; setz %b1" \
+        : "=r" (res), "=q" (resz)\
+        : "r" (val));\
+    printf("%-10s A=%08x R=%08x %d\n", #op, val, res, resz);\
+}
+
+void test_bsx(void)
+{
+    TEST_BSX(bsrw, "w", 0);
+    TEST_BSX(bsrw, "w", 0x12340128);
+    TEST_BSX(bsrl, "", 0);
+    TEST_BSX(bsrl, "", 0x00340128);
+    TEST_BSX(bsfw, "w", 0);
+    TEST_BSX(bsfw, "w", 0x12340128);
+    TEST_BSX(bsfl, "", 0);
+    TEST_BSX(bsfl, "", 0x00340128);
+}
+
+/**********************************************/
+
+void test_fops(double a, double b)
+{
+    printf("a=%f b=%f a+b=%f\n", a, b, a + b);
+    printf("a=%f b=%f a-b=%f\n", a, b, a - b);
+    printf("a=%f b=%f a*b=%f\n", a, b, a * b);
+    printf("a=%f b=%f a/b=%f\n", a, b, a / b);
+    printf("a=%f b=%f fmod(a, b)=%f\n", a, b, fmod(a, b));
+    printf("a=%f sqrt(a)=%f\n", a, sqrt(a));
+    printf("a=%f sin(a)=%f\n", a, sin(a));
+    printf("a=%f cos(a)=%f\n", a, cos(a));
+    printf("a=%f tan(a)=%f\n", a, tan(a));
+    printf("a=%f log(a)=%f\n", a, log(a));
+    printf("a=%f exp(a)=%f\n", a, exp(a));
+    printf("a=%f b=%f atan2(a, b)=%f\n", a, b, atan2(a, b));
+    /* just to test some op combining */
+    printf("a=%f asin(sin(a))=%f\n", a, asin(sin(a)));
+    printf("a=%f acos(cos(a))=%f\n", a, acos(cos(a)));
+    printf("a=%f atan(tan(a))=%f\n", a, atan(tan(a)));
+}
+
+void test_fcmp(double a, double b)
+{
+    printf("(%f<%f)=%d\n",
+           a, b, a < b);
+    printf("(%f<=%f)=%d\n",
+           a, b, a <= b);
+    printf("(%f==%f)=%d\n",
+           a, b, a == b);
+    printf("(%f>%f)=%d\n",
+           a, b, a > b);
+    printf("(%f<=%f)=%d\n",
+           a, b, a >= b);
+    {
+        unsigned long long int rflags;
+        /* test f(u)comi instruction */
+        asm("fcomi %2, %1\n"
+            "pushfq\n"
+            "popq %0\n"
+            : "=r" (rflags)
+            : "t" (a), "u" (b));
+        printf("fcomi(%f %f)=%016llx\n", a, b, rflags & (CC_Z | CC_P | CC_C));
+    }
+}
+
+void test_fcvt(double a)
+{
+    float fa;
+    long double la;
+    int16_t fpuc;
+    int i;
+    int64 lla;
+    int ia;
+    int16_t wa;
+    double ra;
+
+    fa = a;
+    la = a;
+    printf("(float)%f = %f\n", a, fa);
+    printf("(long double)%f = %Lf\n", a, la);
+    printf("a=%016Lx\n", *(long long *)&a);
+    printf("la=%016Lx %04x\n", *(long long *)&la, 
+           *(unsigned short *)((char *)(&la) + 8));
+
+    /* test all roundings */
+    asm volatile ("fstcw %0" : "=m" (fpuc));
+    for(i=0;i<4;i++) {
+        short zz = (fpuc & ~0x0c00) | (i << 10);
+        asm volatile ("fldcw %0" : : "m" (zz));
+        asm volatile ("fist %0" : "=m" (wa) : "t" (a));
+        asm volatile ("fistl %0" : "=m" (ia) : "t" (a));
+        asm volatile ("fistpll %0" : "=m" (lla) : "t" (a) : "st");
+        asm volatile ("frndint ; fstl %0" : "=m" (ra) : "t" (a));
+        asm volatile ("fldcw %0" : : "m" (fpuc));
+        printf("(short)a = %d\n", wa);
+        printf("(int)a = %d\n", ia);
+        printf("(int64_t)a = %Ld\n", lla);
+        printf("rint(a) = %f\n", ra);
+    }
+}
+
+#define TEST(N) \
+    asm("fld" #N : "=t" (a)); \
+    printf("fld" #N "= %f\n", a);
+
+void test_fconst(void)
+{
+    double a;
+    TEST(1);
+    TEST(l2t);
+    TEST(l2e);
+    TEST(pi);
+    TEST(lg2);
+    TEST(ln2);
+    TEST(z);
+}
+
+void test_fbcd(double a)
+{
+    unsigned short bcd[5];
+    double b;
+
+    asm("fbstp %0" : "=m" (bcd[0]) : "t" (a) : "st");
+    asm("fbld %1" : "=t" (b) : "m" (bcd[0]));
+    printf("a=%f bcd=%04x%04x%04x%04x%04x b=%f\n", 
+           a, bcd[4], bcd[3], bcd[2], bcd[1], bcd[0], b);
+}
+
+#define TEST_ENV(env, save, restore)\
+{\
+    memset((env), 0xaa, sizeof(*(env)));\
+    for(i=0;i<5;i++)\
+        asm volatile ("fldl %0" : : "m" (dtab[i]));\
+    asm(save " %0\n" : : "m" (*(env)));\
+    asm(restore " %0\n": : "m" (*(env)));\
+    for(i=0;i<5;i++)\
+        asm volatile ("fstpl %0" : "=m" (rtab[i]));\
+    for(i=0;i<5;i++)\
+        printf("res[%d]=%f\n", i, rtab[i]);\
+    printf("fpuc=%04x fpus=%04x fptag=%04x\n",\
+           (env)->fpuc,\
+           (env)->fpus & 0xff00,\
+           (env)->fptag);\
+}
+
+void test_fenv(void)
+{
+    struct __attribute__((packed)) {
+        uint16_t fpuc;
+        uint16_t dummy1;
+        uint16_t fpus;
+        uint16_t dummy2;
+        uint16_t fptag;
+        uint16_t dummy3;
+        uint32_t ignored[4];
+        long double fpregs[8];
+    } float_env32;
+    struct __attribute__((packed)) {
+        uint16_t fpuc;
+        uint16_t fpus;
+        uint16_t fptag;
+        uint16_t ignored[4];
+        long double fpregs[8];
+    } float_env16;
+    double dtab[8];
+    double rtab[8];
+    int i;
+
+    for(i=0;i<8;i++)
+        dtab[i] = i + 1;
+
+    TEST_ENV(&float_env16, "data16 fnstenv", "data16 fldenv");
+    TEST_ENV(&float_env16, "data16 fnsave", "data16 frstor");
+    TEST_ENV(&float_env32, "fnstenv", "fldenv");
+    TEST_ENV(&float_env32, "fnsave", "frstor");
+
+    /* test for ffree */
+    for(i=0;i<5;i++)
+        asm volatile ("fldl %0" : : "m" (dtab[i]));
+    asm volatile("ffree %st(2)");
+    asm volatile ("fnstenv %0\n" : : "m" (float_env32));
+    asm volatile ("fninit");
+    printf("fptag=%04x\n", float_env32.fptag);
+}
+
+
+#define TEST_FCMOV(a, b, rflags, CC)\
+{\
+    double res;\
+    asm("pushq %3\n"\
+        "popfq\n"\
+        "fcmov" CC " %2, %0\n"\
+        : "=t" (res)\
+        : "0" (a), "u" (b), "g" (rflags));\
+    printf("fcmov%s rflags=0x%04llx-> %f\n", \
+           CC, rflags, res);\
+}
+
+void test_fcmov(void)
+{
+    double a, b;
+    int64 rflags, i;
+
+    a = 1.0;
+    b = 2.0;
+    for(i = 0; i < 4; i++) {
+        rflags = 0;
+        if (i & 1)
+            rflags |= CC_C;
+        if (i & 2)
+            rflags |= CC_Z;
+        TEST_FCMOV(a, b, rflags, "b");
+        TEST_FCMOV(a, b, rflags, "e");
+        TEST_FCMOV(a, b, rflags, "be");
+        TEST_FCMOV(a, b, rflags, "nb");
+        TEST_FCMOV(a, b, rflags, "ne");
+        TEST_FCMOV(a, b, rflags, "nbe");
+    }
+    TEST_FCMOV(a, b, (int64)0, "u");
+    TEST_FCMOV(a, b, (int64)CC_P, "u");
+    TEST_FCMOV(a, b, (int64)0, "nu");
+    TEST_FCMOV(a, b, (int64)CC_P, "nu");
+}
+
+void test_floats(void)
+{
+    test_fops(2, 3);
+    test_fops(1.4, -5);
+    test_fcmp(2, -1);
+    test_fcmp(2, 2);
+    test_fcmp(2, 3);
+    test_fcvt(0.5);
+    test_fcvt(-0.5);
+    test_fcvt(1.0/7.0);
+    test_fcvt(-1.0/9.0);
+    test_fcvt(32768);
+    test_fcvt(-1e20);
+    test_fconst();
+    // REINSTATE (maybe): test_fbcd(1234567890123456);
+    // REINSTATE (maybe): test_fbcd(-123451234567890);
+    // REINSTATE: test_fenv();
+    // REINSTATE: test_fcmov();
+}
+
+/**********************************************/
+#if 0
+
+#define TEST_BCD(op, op0, cc_in, cc_mask)\
+{\
+    int res, flags;\
+    res = op0;\
+    flags = cc_in;\
+    asm ("push %3\n\t"\
+         "popf\n\t"\
+         #op "\n\t"\
+         "pushf\n\t"\
+         "popl %1\n\t"\
+        : "=a" (res), "=g" (flags)\
+        : "0" (res), "1" (flags));\
+    printf("%-10s A=%08x R=%08x CCIN=%04x CC=%04x\n",\
+           #op, op0, res, cc_in, flags & cc_mask);\
+}
+
+void test_bcd(void)
+{
+    TEST_BCD(daa, 0x12340503, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340507, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340559, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340560, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x1234059f, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x123405a0, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340503, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340503, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340503, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+
+    TEST_BCD(das, 0x12340503, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340507, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340559, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340560, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x1234059f, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x123405a0, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340503, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340503, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340503, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+
+    TEST_BCD(aaa, 0x12340205, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x12340306, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x1234040a, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x123405fa, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x12340205, 0, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x12340306, 0, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x1234040a, 0, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x123405fa, 0, (CC_C | CC_A));
+    
+    TEST_BCD(aas, 0x12340205, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x12340306, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x1234040a, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x123405fa, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x12340205, 0, (CC_C | CC_A));
+    TEST_BCD(aas, 0x12340306, 0, (CC_C | CC_A));
+    TEST_BCD(aas, 0x1234040a, 0, (CC_C | CC_A));
+    TEST_BCD(aas, 0x123405fa, 0, (CC_C | CC_A));
+
+    TEST_BCD(aam, 0x12340547, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));
+    TEST_BCD(aad, 0x12340407, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));
+}
+#endif /* 0 */
+
+#define TEST_XCHG(op, size, opconst)\
+{\
+    int op0, op1;\
+    op0 = 0x12345678;\
+    op1 = 0xfbca7654;\
+    asm(#op " %" size "0, %" size "1" \
+        : "=q" (op0), opconst (op1) \
+        : "0" (op0), "1" (op1));\
+    printf("%-10s A=%08x B=%08x\n",\
+           #op, op0, op1);\
+}
+
+#define TEST_CMPXCHG(op, size, opconst, eax)\
+{\
+    int op0, op1;\
+    op0 = 0x12345678;\
+    op1 = 0xfbca7654;\
+    asm(#op " %" size "0, %" size "1" \
+        : "=q" (op0), opconst (op1) \
+        : "0" (op0), "1" (op1), "a" (eax));\
+    printf("%-10s EAX=%08x A=%08x C=%08x\n",\
+           #op, eax, op0, op1);\
+}
+
+void test_xchg(void)
+{
+    TEST_XCHG(xchgl, "", "=q");
+    TEST_XCHG(xchgw, "w", "=q");
+    TEST_XCHG(xchgb, "b", "=q");
+
+    TEST_XCHG(xchgl, "", "=m");
+    TEST_XCHG(xchgw, "w", "=m");
+    TEST_XCHG(xchgb, "b", "=m");
+
+#if 0
+    TEST_XCHG(xaddl, "", "=q");
+    TEST_XCHG(xaddw, "w", "=q");
+    TEST_XCHG(xaddb, "b", "=q");
+
+    {
+        int res;
+        res = 0x12345678;
+        asm("xaddl %1, %0" : "=r" (res) : "0" (res));
+        printf("xaddl same res=%08x\n", res);
+    }
+
+    TEST_XCHG(xaddl, "", "=m");
+    TEST_XCHG(xaddw, "w", "=m");
+    TEST_XCHG(xaddb, "b", "=m");
+#endif
+    TEST_CMPXCHG(cmpxchgl, "", "=q", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgw, "w", "=q", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgb, "b", "=q", 0xfbca7654);
+
+    TEST_CMPXCHG(cmpxchgl, "", "=q", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgw, "w", "=q", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgb, "b", "=q", 0xfffefdfc);
+
+    TEST_CMPXCHG(cmpxchgl, "", "=m", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgw, "w", "=m", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgb, "b", "=m", 0xfbca7654);
+
+    TEST_CMPXCHG(cmpxchgl, "", "=m", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgw, "w", "=m", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgb, "b", "=m", 0xfffefdfc);
+#if 0
+    {
+        uint64_t op0, op1, op2;
+        int i, eflags;
+
+        for(i = 0; i < 2; i++) {
+            op0 = 0x123456789abcd;
+            if (i == 0)
+                op1 = 0xfbca765423456;
+            else
+                op1 = op0;
+            op2 = 0x6532432432434;
+            asm("cmpxchg8b %1\n" 
+                "pushf\n"
+                "popl %2\n"
+                : "=A" (op0), "=m" (op1), "=g" (eflags)
+                : "0" (op0), "m" (op1), "b" ((int)op2), "c" ((int)(op2 >> 32)));
+            printf("cmpxchg8b: op0=%016llx op1=%016llx CC=%02x\n", 
+                    op0, op1, eflags & CC_Z);
+        }
+    }
+#endif
+}
+
+/**********************************************/
+/* segmentation tests */
+#if 0
+#include <asm/ldt.h>
+#include <linux/unistd.h>
+#include <linux/version.h>
+
+_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
+#define modify_ldt_ldt_s user_desc
+#endif
+
+uint8_t seg_data1[4096];
+uint8_t seg_data2[4096];
+
+#define MK_SEL(n) (((n) << 3) | 7)
+
+#define TEST_LR(op, size, seg, mask)\
+{\
+    int res, res2;\
+    res = 0x12345678;\
+    asm (op " %" size "2, %" size "0\n" \
+         "movl $0, %1\n"\
+         "jnz 1f\n"\
+         "movl $1, %1\n"\
+         "1:\n"\
+         : "=r" (res), "=r" (res2) : "m" (seg), "0" (res));\
+    printf(op ": Z=%d %08x\n", res2, res & ~(mask));\
+}
+
+/* NOTE: we use Linux modify_ldt syscall */
+void test_segs(void)
+{
+    struct modify_ldt_ldt_s ldt;
+    long long ldt_table[3];
+    int res, res2;
+    char tmp;
+    struct {
+        uint32_t offset;
+        uint16_t seg;
+    } __attribute__((packed)) segoff;
+
+    ldt.entry_number = 1;
+    ldt.base_addr = (unsigned long)&seg_data1;
+    ldt.limit = (sizeof(seg_data1) + 0xfff) >> 12;
+    ldt.seg_32bit = 1;
+    ldt.contents = MODIFY_LDT_CONTENTS_DATA;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 1;
+    ldt.seg_not_present = 0;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    ldt.entry_number = 2;
+    ldt.base_addr = (unsigned long)&seg_data2;
+    ldt.limit = (sizeof(seg_data2) + 0xfff) >> 12;
+    ldt.seg_32bit = 1;
+    ldt.contents = MODIFY_LDT_CONTENTS_DATA;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 1;
+    ldt.seg_not_present = 0;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    modify_ldt(0, &ldt_table, sizeof(ldt_table)); /* read ldt entries */
+#if 0
+    {
+        int i;
+        for(i=0;i<3;i++)
+            printf("%d: %016Lx\n", i, ldt_table[i]);
+    }
+#endif
+    /* do some tests with fs or gs */
+    asm volatile ("movl %0, %%fs" : : "r" (MK_SEL(1)));
+
+    seg_data1[1] = 0xaa;
+    seg_data2[1] = 0x55;
+
+    asm volatile ("fs movzbl 0x1, %0" : "=r" (res));
+    printf("FS[1] = %02x\n", res);
+
+    asm volatile ("pushl %%gs\n"
+                  "movl %1, %%gs\n"
+                  "gs movzbl 0x1, %0\n"
+                  "popl %%gs\n"
+                  : "=r" (res)
+                  : "r" (MK_SEL(2)));
+    printf("GS[1] = %02x\n", res);
+
+    /* tests with ds/ss (implicit segment case) */
+    tmp = 0xa5;
+    asm volatile ("pushl %%ebp\n\t"
+                  "pushl %%ds\n\t"
+                  "movl %2, %%ds\n\t"
+                  "movl %3, %%ebp\n\t"
+                  "movzbl 0x1, %0\n\t"
+                  "movzbl (%%ebp), %1\n\t"
+                  "popl %%ds\n\t"
+                  "popl %%ebp\n\t"
+                  : "=r" (res), "=r" (res2)
+                  : "r" (MK_SEL(1)), "r" (&tmp));
+    printf("DS[1] = %02x\n", res);
+    printf("SS[tmp] = %02x\n", res2);
+
+    segoff.seg = MK_SEL(2);
+    segoff.offset = 0xabcdef12;
+    asm volatile("lfs %2, %0\n\t" 
+                 "movl %%fs, %1\n\t"
+                 : "=r" (res), "=g" (res2) 
+                 : "m" (segoff));
+    printf("FS:reg = %04x:%08x\n", res2, res);
+
+    TEST_LR("larw", "w", MK_SEL(2), 0x0100);
+    TEST_LR("larl", "", MK_SEL(2), 0x0100);
+    TEST_LR("lslw", "w", MK_SEL(2), 0);
+    TEST_LR("lsll", "", MK_SEL(2), 0);
+
+    TEST_LR("larw", "w", 0xfff8, 0);
+    TEST_LR("larl", "", 0xfff8, 0);
+    TEST_LR("lslw", "w", 0xfff8, 0);
+    TEST_LR("lsll", "", 0xfff8, 0);
+}
+#endif
+
+#if 0
+/* 16 bit code test */
+extern char code16_start, code16_end;
+extern char code16_func1;
+extern char code16_func2;
+extern char code16_func3;
+
+void test_code16(void)
+{
+    struct modify_ldt_ldt_s ldt;
+    int res, res2;
+
+    /* build a code segment */
+    ldt.entry_number = 1;
+    ldt.base_addr = (unsigned long)&code16_start;
+    ldt.limit = &code16_end - &code16_start;
+    ldt.seg_32bit = 0;
+    ldt.contents = MODIFY_LDT_CONTENTS_CODE;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 0;
+    ldt.seg_not_present = 0;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    /* call the first function */
+    asm volatile ("lcall %1, %2" 
+                  : "=a" (res)
+                  : "i" (MK_SEL(1)), "i" (&code16_func1): "memory", "cc");
+    printf("func1() = 0x%08x\n", res);
+    asm volatile ("lcall %2, %3" 
+                  : "=a" (res), "=c" (res2)
+                  : "i" (MK_SEL(1)), "i" (&code16_func2): "memory", "cc");
+    printf("func2() = 0x%08x spdec=%d\n", res, res2);
+    asm volatile ("lcall %1, %2" 
+                  : "=a" (res)
+                  : "i" (MK_SEL(1)), "i" (&code16_func3): "memory", "cc");
+    printf("func3() = 0x%08x\n", res);
+}
+#endif
+
+extern char func_lret32;
+extern char func_iret32;
+
+void test_misc(void)
+{
+  //    char table[256];
+  //  int res, i;
+
+#if 0
+    // REINSTATE
+    for(i=0;i<256;i++) table[i] = 256 - i;
+    res = 0x12345678;
+    asm ("xlat" : "=a" (res) : "b" (table), "0" (res));
+    printf("xlat: EAX=%08x\n", res);
+#endif
+#if 0
+    // REINSTATE
+    asm volatile ("pushl %%cs ; call %1" 
+                  : "=a" (res)
+                  : "m" (func_lret32): "memory", "cc");
+    printf("func_lret32=%x\n", res);
+
+    asm volatile ("pushfl ; pushl %%cs ; call %1" 
+                  : "=a" (res)
+                  : "m" (func_iret32): "memory", "cc");
+    printf("func_iret32=%x\n", res);
+#endif
+#if 0
+    /* specific popl test */
+    asm volatile ("pushq $0x9abcdef12345678 ; popl (%%rsp) ; addq $4,%%rsp"
+                  : "=g" (res));
+    printf("popl esp=%x\n", res);
+#endif
+#if 0
+    // REINSTATE
+    /* specific popw test */
+    asm volatile ("pushq $12345432 ; pushq $0x9abcdef ; popw (%%rsp) ; addl $2, %%rsp ; popq %0"
+                  : "=g" (res));
+    printf("popw rsp=%x\n", res);
+#endif
+}
+
+uint8_t str_buffer[4096];
+
+#define TEST_STRING1(OP, size, DF, REP)\
+{\
+    int64 rsi, rdi, rax, rcx, rflags;\
+\
+    rsi = (long)(str_buffer + sizeof(str_buffer) / 2);\
+    rdi = (long)(str_buffer + sizeof(str_buffer) / 2) + 16;\
+    rax = 0x12345678;\
+    rcx = 17;\
+\
+    asm volatile ("pushq $0\n\t"\
+                  "popfq\n\t"\
+                  DF "\n\t"\
+                  REP #OP size "\n\t"\
+                  "cld\n\t"\
+                  "pushfq\n\t"\
+                  "popq %4\n\t"\
+                  : "=S" (rsi), "=D" (rdi), "=a" (rax), "=c" (rcx), "=g" (rflags)\
+                  : "0" (rsi), "1" (rdi), "2" (rax), "3" (rcx));\
+    printf("%-10s ESI=%016llx EDI=%016llx EAX=%016llx ECX=%016llx EFL=%04llx\n",\
+           REP #OP size, rsi, rdi, rax, rcx,\
+           rflags & (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));\
+}
+
+#define TEST_STRING(OP, REP)\
+    TEST_STRING1(OP, "b", "", REP);\
+    TEST_STRING1(OP, "w", "", REP);\
+    TEST_STRING1(OP, "l", "", REP);\
+    TEST_STRING1(OP, "b", "std", REP);\
+    TEST_STRING1(OP, "w", "std", REP);\
+    TEST_STRING1(OP, "l", "std", REP)
+
+void test_string(void)
+{
+    int64 i;
+    for(i = 0;i < sizeof(str_buffer); i++)
+        str_buffer[i] = i + 0x56;
+   TEST_STRING(stos, "");
+   TEST_STRING(stos, "rep ");
+   // REINSTATE: TEST_STRING(lods, ""); /* to verify stos */
+   // REINSTATE: TEST_STRING(lods, "rep "); 
+   TEST_STRING(movs, "");
+   TEST_STRING(movs, "rep ");
+   // REINSTATE: TEST_STRING(lods, ""); /* to verify stos */
+
+   /* XXX: better tests */
+   TEST_STRING(scas, "");
+   // REINSTATE: TEST_STRING(scas, "repz ");
+   TEST_STRING(scas, "repnz ");
+   // REINSTATE: TEST_STRING(cmps, "");
+   TEST_STRING(cmps, "repz ");
+   // REINSTATE: TEST_STRING(cmps, "repnz ");
+}
+
+/* VM86 test */
+#if 0
+static inline void set_bit(uint8_t *a, unsigned int bit)
+{
+    a[bit / 8] |= (1 << (bit % 8));
+}
+
+static inline uint8_t *seg_to_linear(unsigned int seg, unsigned int reg)
+{
+    return (uint8_t *)((seg << 4) + (reg & 0xffff));
+}
+
+static inline void pushw(struct vm86_regs *r, int val)
+{
+    r->esp = (r->esp & ~0xffff) | ((r->esp - 2) & 0xffff);
+    *(uint16_t *)seg_to_linear(r->ss, r->esp) = val;
+}
+
+#undef __syscall_return
+#define __syscall_return(type, res) \
+do { \
+	return (type) (res); \
+} while (0)
+
+_syscall2(int, vm86, int, func, struct vm86plus_struct *, v86)
+
+extern char vm86_code_start;
+extern char vm86_code_end;
+
+#define VM86_CODE_CS 0x100
+#define VM86_CODE_IP 0x100
+
+void test_vm86(void)
+{
+    struct vm86plus_struct ctx;
+    struct vm86_regs *r;
+    uint8_t *vm86_mem;
+    int seg, ret;
+
+    vm86_mem = mmap((void *)0x00000000, 0x110000, 
+                    PROT_WRITE | PROT_READ | PROT_EXEC, 
+                    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+    if (vm86_mem == MAP_FAILED) {
+        printf("ERROR: could not map vm86 memory");
+        return;
+    }
+    memset(&ctx, 0, sizeof(ctx));
+
+    /* init basic registers */
+    r = &ctx.regs;
+    r->eip = VM86_CODE_IP;
+    r->esp = 0xfffe;
+    seg = VM86_CODE_CS;
+    r->cs = seg;
+    r->ss = seg;
+    r->ds = seg;
+    r->es = seg;
+    r->fs = seg;
+    r->gs = seg;
+    r->eflags = VIF_MASK;
+
+    /* move code to proper address. We use the same layout as a .com
+       dos program. */
+    memcpy(vm86_mem + (VM86_CODE_CS << 4) + VM86_CODE_IP, 
+           &vm86_code_start, &vm86_code_end - &vm86_code_start);
+
+    /* mark int 0x21 as being emulated */
+    set_bit((uint8_t *)&ctx.int_revectored, 0x21);
+
+    for(;;) {
+        ret = vm86(VM86_ENTER, &ctx);
+        switch(VM86_TYPE(ret)) {
+        case VM86_INTx:
+            {
+                int int_num, ah, v;
+                
+                int_num = VM86_ARG(ret);
+                if (int_num != 0x21)
+                    goto unknown_int;
+                ah = (r->eax >> 8) & 0xff;
+                switch(ah) {
+                case 0x00: /* exit */
+                    goto the_end;
+                case 0x02: /* write char */
+                    {
+                        uint8_t c = r->edx;
+                        putchar(c);
+                    }
+                    break;
+                case 0x09: /* write string */
+                    {
+                        uint8_t c, *ptr;
+                        ptr = seg_to_linear(r->ds, r->edx);
+                        for(;;) {
+                            c = *ptr++;
+                            if (c == '$')
+                                break;
+                            putchar(c);
+                        }
+                        r->eax = (r->eax & ~0xff) | '$';
+                    }
+                    break;
+                case 0xff: /* extension: write eflags number in edx */
+                    v = (int)r->edx;
+#ifndef LINUX_VM86_IOPL_FIX
+                    v &= ~0x3000;
+#endif
+                    printf("%08x\n", v);
+                    break;
+                default:
+                unknown_int:
+                    printf("unsupported int 0x%02x\n", int_num);
+                    goto the_end;
+                }
+            }
+            break;
+        case VM86_SIGNAL:
+            /* a signal came, we just ignore that */
+            break;
+        case VM86_STI:
+            break;
+        default:
+            printf("ERROR: unhandled vm86 return code (0x%x)\n", ret);
+            goto the_end;
+        }
+    }
+ the_end:
+    printf("VM86 end\n");
+    munmap(vm86_mem, 0x110000);
+}
+#endif
+
+/* exception tests */
+#if 0
+#ifndef REG_EAX
+#define REG_EAX EAX
+#define REG_EBX EBX
+#define REG_ECX ECX
+#define REG_EDX EDX
+#define REG_ESI ESI
+#define REG_EDI EDI
+#define REG_EBP EBP
+#define REG_ESP ESP
+#define REG_EIP EIP
+#define REG_EFL EFL
+#define REG_TRAPNO TRAPNO
+#define REG_ERR ERR
+#endif
+
+jmp_buf jmp_env;
+int v1;
+int tab[2];
+
+void sig_handler(int sig, siginfo_t *info, void *puc)
+{
+    struct ucontext *uc = puc;
+
+    printf("si_signo=%d si_errno=%d si_code=%d",
+           info->si_signo, info->si_errno, info->si_code);
+    printf(" si_addr=0x%08lx",
+           (unsigned long)info->si_addr);
+    printf("\n");
+
+    printf("trapno=0x%02x err=0x%08x",
+           uc->uc_mcontext.gregs[REG_TRAPNO],
+           uc->uc_mcontext.gregs[REG_ERR]);
+    printf(" EIP=0x%08x", uc->uc_mcontext.gregs[REG_EIP]);
+    printf("\n");
+    longjmp(jmp_env, 1);
+}
+
+void test_exceptions(void)
+{
+    struct modify_ldt_ldt_s ldt;
+    struct sigaction act;
+    volatile int val;
+    
+    act.sa_sigaction = sig_handler;
+    sigemptyset(&act.sa_mask);
+    act.sa_flags = SA_SIGINFO;
+    sigaction(SIGFPE, &act, NULL);
+    sigaction(SIGILL, &act, NULL);
+    sigaction(SIGSEGV, &act, NULL);
+    sigaction(SIGBUS, &act, NULL);
+    sigaction(SIGTRAP, &act, NULL);
+
+    /* test division by zero reporting */
+    printf("DIVZ exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* now divide by zero */
+        v1 = 0;
+        v1 = 2 / v1;
+    }
+
+    printf("BOUND exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* bound exception */
+        tab[0] = 1;
+        tab[1] = 10;
+        asm volatile ("bound %0, %1" : : "r" (11), "m" (tab));
+    }
+
+    printf("segment exceptions:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* load an invalid segment */
+        asm volatile ("movl %0, %%fs" : : "r" ((0x1234 << 3) | 1));
+    }
+    if (setjmp(jmp_env) == 0) {
+        /* null data segment is valid */
+        asm volatile ("movl %0, %%fs" : : "r" (3));
+        /* null stack segment */
+        asm volatile ("movl %0, %%ss" : : "r" (3));
+    }
+
+    ldt.entry_number = 1;
+    ldt.base_addr = (unsigned long)&seg_data1;
+    ldt.limit = (sizeof(seg_data1) + 0xfff) >> 12;
+    ldt.seg_32bit = 1;
+    ldt.contents = MODIFY_LDT_CONTENTS_DATA;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 1;
+    ldt.seg_not_present = 1;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    if (setjmp(jmp_env) == 0) {
+        /* segment not present */
+        asm volatile ("movl %0, %%fs" : : "r" (MK_SEL(1)));
+    }
+
+    /* test SEGV reporting */
+    printf("PF exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        val = 1;
+        /* we add a nop to test a weird PC retrieval case */
+        asm volatile ("nop");
+        /* now store in an invalid address */
+        *(char *)0x1234 = 1;
+    }
+
+    /* test SEGV reporting */
+    printf("PF exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        val = 1;
+        /* read from an invalid address */
+        v1 = *(char *)0x1234;
+    }
+    
+    /* test illegal instruction reporting */
+    printf("UD2 exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* now execute an invalid instruction */
+        asm volatile("ud2");
+    }
+    printf("lock nop exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* now execute an invalid instruction */
+        asm volatile("lock nop");
+    }
+    
+    printf("INT exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0xfd");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0x01");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile (".byte 0xcd, 0x03");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0x04");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0x05");
+    }
+
+    printf("INT3 exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int3");
+    }
+
+    printf("CLI exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("cli");
+    }
+
+    printf("STI exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("cli");
+    }
+
+    printf("INTO exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* overflow exception */
+        asm volatile ("addl $1, %0 ; into" : : "r" (0x7fffffff));
+    }
+
+    printf("OUTB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("outb %%al, %%dx" : : "d" (0x4321), "a" (0));
+    }
+
+    printf("INB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("inb %%dx, %%al" : "=a" (val) : "d" (0x4321));
+    }
+
+    printf("REP OUTSB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("rep outsb" : : "d" (0x4321), "S" (tab), "c" (1));
+    }
+
+    printf("REP INSB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("rep insb" : : "d" (0x4321), "D" (tab), "c" (1));
+    }
+
+    printf("HLT exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("hlt");
+    }
+
+    printf("single step exception:\n");
+    val = 0;
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("pushf\n"
+                      "orl $0x00100, (%%esp)\n"
+                      "popf\n"
+                      "movl $0xabcd, %0\n" 
+                      "movl $0x0, %0\n" : "=m" (val) : : "cc", "memory");
+    }
+    printf("val=0x%x\n", val);
+}
+
+/* specific precise single step test */
+void sig_trap_handler(int sig, siginfo_t *info, void *puc)
+{
+    struct ucontext *uc = puc;
+    printf("EIP=0x%08x\n", uc->uc_mcontext.gregs[REG_EIP]);
+}
+
+const uint8_t sstep_buf1[4] = { 1, 2, 3, 4};
+uint8_t sstep_buf2[4];
+
+void test_single_step(void)
+{
+    struct sigaction act;
+    volatile int val;
+    int i;
+
+    val = 0;
+    act.sa_sigaction = sig_trap_handler;
+    sigemptyset(&act.sa_mask);
+    act.sa_flags = SA_SIGINFO;
+    sigaction(SIGTRAP, &act, NULL);
+    asm volatile ("pushf\n"
+                  "orl $0x00100, (%%esp)\n"
+                  "popf\n"
+                  "movl $0xabcd, %0\n" 
+
+                  /* jmp test */
+                  "movl $3, %%ecx\n"
+                  "1:\n"
+                  "addl $1, %0\n"
+                  "decl %%ecx\n"
+                  "jnz 1b\n"
+
+                  /* movsb: the single step should stop at each movsb iteration */
+                  "movl $sstep_buf1, %%esi\n"
+                  "movl $sstep_buf2, %%edi\n"
+                  "movl $0, %%ecx\n"
+                  "rep movsb\n"
+                  "movl $3, %%ecx\n"
+                  "rep movsb\n"
+                  "movl $1, %%ecx\n"
+                  "rep movsb\n"
+
+                  /* cmpsb: the single step should stop at each cmpsb iteration */
+                  "movl $sstep_buf1, %%esi\n"
+                  "movl $sstep_buf2, %%edi\n"
+                  "movl $0, %%ecx\n"
+                  "rep cmpsb\n"
+                  "movl $4, %%ecx\n"
+                  "rep cmpsb\n"
+                  
+                  /* getpid() syscall: single step should skip one
+                     instruction */
+                  "movl $20, %%eax\n"
+                  "int $0x80\n"
+                  "movl $0, %%eax\n"
+                  
+                  /* when modifying SS, trace is not done on the next
+                     instruction */
+                  "movl %%ss, %%ecx\n"
+                  "movl %%ecx, %%ss\n"
+                  "addl $1, %0\n"
+                  "movl $1, %%eax\n"
+                  "movl %%ecx, %%ss\n"
+                  "jmp 1f\n"
+                  "addl $1, %0\n"
+                  "1:\n"
+                  "movl $1, %%eax\n"
+                  "pushl %%ecx\n"
+                  "popl %%ss\n"
+                  "addl $1, %0\n"
+                  "movl $1, %%eax\n"
+                  
+                  "pushf\n"
+                  "andl $~0x00100, (%%esp)\n"
+                  "popf\n"
+                  : "=m" (val) 
+                  : 
+                  : "cc", "memory", "eax", "ecx", "esi", "edi");
+    printf("val=%d\n", val);
+    for(i = 0; i < 4; i++)
+        printf("sstep_buf2[%d] = %d\n", i, sstep_buf2[i]);
+}
+
+/* self modifying code test */
+uint8_t code[] = {
+    0xb8, 0x1, 0x00, 0x00, 0x00, /* movl $1, %eax */
+    0xc3, /* ret */
+};
+
+asm("smc_code2:\n"
+    "movl 4(%esp), %eax\n"
+    "movl %eax, smc_patch_addr2 + 1\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "smc_patch_addr2:\n"
+    "movl $1, %eax\n"
+    "ret\n");
+
+typedef int FuncType(void);
+extern int smc_code2(int);
+void test_self_modifying_code(void)
+{
+    int i;
+
+    printf("self modifying code:\n");
+    printf("func1 = 0x%x\n", ((FuncType *)code)());
+    for(i = 2; i <= 4; i++) {
+        code[1] = i;
+        printf("func%d = 0x%x\n", i, ((FuncType *)code)());
+    }
+
+    /* more difficult test : the modified code is just after the
+       modifying instruction. It is forbidden in Intel specs, but it
+       is used by old DOS programs */
+    for(i = 2; i <= 4; i++) {
+        printf("smc_code2(%d) = %d\n", i, smc_code2(i));
+    }
+}
+    
+static void *call_end __init_call = NULL;
+#endif
+
+int main(int argc, char **argv)
+{
+    void **ptr;
+    void (*func)(void);
+
+#if 1
+    ptr = &call_start + 1;
+    while (*ptr != NULL) {
+        func = *ptr++;
+        func();
+    }
+#endif
+    test_bsx();  //REINSTATE64
+    test_mul();
+    test_jcc();
+    //    test_floats();  REINSTATE64
+    //test_bcd();
+    //test_xchg();   REINSTATE64
+    test_string();
+    //test_misc(); // REINSTATE
+    test_lea();
+    //    test_segs();
+    //test_code16();
+    //test_vm86();
+    //test_exceptions();
+    //test_self_modifying_code();
+    //test_single_step();
+    printf("bye\n");
+    return 0;
+}
diff --git a/VEX/test/test-amd64.h b/VEX/test/test-amd64.h
new file mode 100644
index 0000000..94b32fe
--- /dev/null
+++ b/VEX/test/test-amd64.h
@@ -0,0 +1,227 @@
+
+#define exec_op glue(exec_, OP)
+#define exec_opq glue(glue(exec_, OP), q)
+#define exec_opl glue(glue(exec_, OP), l)
+#define exec_opw glue(glue(exec_, OP), w)
+#define exec_opb glue(glue(exec_, OP), b)
+
+#define EXECOP2(size, res, s1, flags) \
+    asm ("pushq %4\n\t"\
+         "popfq\n\t"\
+         stringify(OP) size " %" size "2, %" size "0\n\t" \
+         "pushfq\n\t"\
+         "popq %1\n\t"\
+         : "=q" (res), "=g" (flags)\
+         : "q" (s1), "0" (res), "1" (flags));
+
+#define EXECOP1(size, res, flags) \
+    asm ("pushq %3\n\t"\
+         "popfq\n\t"\
+         stringify(OP) size " %" size "0\n\t" \
+         "pushfq\n\t"\
+         "popq %1\n\t"\
+         : "=q" (res), "=g" (flags)\
+         : "0" (res), "1" (flags));
+
+#ifdef OP1
+inline void exec_opq(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP1("", res, flags);
+    printf("%-6s A=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "q", s0, res, iflags, flags & CC_MASK);
+}
+inline void exec_opl(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP1("", res, flags);
+    printf("%-6s A=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "l", s0, res, iflags, flags & CC_MASK);
+}
+inline void exec_opw(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP1("w", res, flags);
+    printf("%-6s A=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "w", s0, res, iflags, flags & CC_MASK);
+}
+inline void exec_opb(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP1("b", res, flags);
+    printf("%-6s A=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "b", s0, res, iflags, flags & CC_MASK);
+}
+#else
+inline void exec_opq(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP2("q", res, s1, flags);
+    printf("%-6s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "q", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+inline void exec_opl(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP2("", res, s1, flags);
+    printf("%-6s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "l", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+inline void exec_opw(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP2("w", res, s1, flags);
+    printf("%-6s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "w", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+inline void exec_opb(int64 s0, int64 s1, int64 iflags)
+{
+    int64 res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP2("b", res, s1, flags);
+    printf("%-6s A=%016llx B=%016llx R=%016llx CCIN=%04llx CC=%04llx\n",
+           stringify(OP) "b", s0, s1, res, iflags, flags & CC_MASK);
+}
+#endif
+
+void exec_op(int64 s0, int64 s1)
+{
+#if 1
+  int64 o,s,z,a,c,p,flags_in;
+  for (o = 0; o < 2; o++) {
+  for (s = 0; s < 2; s++) {
+  for (z = 0; z < 2; z++) {
+  for (a = 0; a < 2; a++) {
+  for (c = 0; c < 2; c++) {
+  for (p = 0; p < 2; p++) {
+
+    flags_in = (o ? CC_O : 0)
+             | (s ? CC_S : 0)
+             | (z ? CC_Z : 0)
+             | (a ? CC_A : 0)
+             | (c ? CC_C : 0)
+             | (p ? CC_P : 0);
+    exec_opq(s0, s1, flags_in);
+    exec_opl(s0, s1, flags_in);
+    exec_opw(s0, s1, flags_in);
+    exec_opb(s0, s1, flags_in);
+  }}}}}}
+#else
+    exec_opq(s0, s1, 0);
+    exec_opl(s0, s1, 0);
+    exec_opw(s0, s1, 0);
+    exec_opb(s0, s1, 0);
+    exec_opq(s0, s1, CC_C);
+    exec_opl(s0, s1, CC_C);
+    exec_opw(s0, s1, CC_C);
+    exec_opb(s0, s1, CC_C);
+#endif
+}
+
+void glue(test_, OP)(void)
+{
+#define NVALS 57
+   int64 i, j;
+   static unsigned int val[NVALS]
+    = { 0x00, 0x01, 0x02, 0x03, 
+        0x3F, 0x40, 0x41, 
+        0x7E, 0x7F, 0x80, 0x81, 0x82, 
+        0xBF, 0xC0, 0xC1, 
+        0xFC, 0xFD, 0xFE, 0xFF, 
+
+        0xFF00, 0xFF01, 0xFF02, 0xFF03, 
+        0xFF3F, 0xFF40, 0xFF41, 
+        0xFF7E, 0xFF7F, 0xFF80, 0xFF81, 0xFF82, 
+        0xFFBF, 0xFFC0, 0xFFC1, 
+        0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF, 
+
+        0xFFFFFF00, 0xFFFFFF01, 0xFFFFFF02, 0xFFFFFF03, 
+        0xFFFFFF3F, 0xFFFFFF40, 0xFFFFFF41, 
+        0xFFFFFF7E, 0xFFFFFF7F, 0xFFFFFF80, 0xFFFFFF81, 0xFFFFFF82, 
+        0xFFFFFFBF, 0xFFFFFFC0, 0xFFFFFFC1, 
+        0xFFFFFFFC, 0xFFFFFFFD, 0xFFFFFFFE, 0xFFFFFFFF
+      };
+
+    exec_op(0xabcd12345678, 0x4321812FADA);
+    exec_op(0x12345678, 0x812FADA);
+    exec_op(0xabcd00012341, 0xabcd00012341);
+    exec_op(0x12341, 0x12341);
+    exec_op(0x12341, -0x12341);
+    exec_op(0xffffffff, 0);
+    exec_op(0xffffffff, -1);
+    exec_op(0xffffffff, 1);
+    exec_op(0xffffffff, 2);
+    exec_op(0x7fffffff, 0);
+    exec_op(0x7fffffff, 1);
+    exec_op(0x7fffffff, -1);
+    exec_op(0x80000000, -1);
+    exec_op(0x80000000, 1);
+    exec_op(0x80000000, -2);
+    exec_op(0x12347fff, 0);
+    exec_op(0x12347fff, 1);
+    exec_op(0x12347fff, -1);
+    exec_op(0x12348000, -1);
+    exec_op(0x12348000, 1);
+    exec_op(0x12348000, -2);
+    exec_op(0x12347f7f, 0);
+    exec_op(0x12347f7f, 1);
+    exec_op(0x12347f7f, -1);
+    exec_op(0x12348080, -1);
+    exec_op(0x12348080, 1);
+    exec_op(0x12348080, -2);
+
+    exec_op(0xFFFFFFFFffffffff, 0);
+    exec_op(0xFFFFFFFFffffffff, -1);
+    exec_op(0xFFFFFFFFffffffff, 1);
+    exec_op(0xFFFFFFFFffffffff, 2);
+    exec_op(0x7fffffffFFFFFFFF, 0);
+    exec_op(0x7fffffffFFFFFFFF, 1);
+    exec_op(0x7fffffffFFFFFFFF, -1);
+    exec_op(0x8000000000000000, -1);
+    exec_op(0x8000000000000000, 1);
+    exec_op(0x8000000000000000, -2);
+    exec_op(0x123443217FFFFFFF, 0);
+    exec_op(0x123443217FFFFFFF, 1);
+    exec_op(0x123443217FFFFFFF, -1);
+    exec_op(0x1234432180000000, -1);
+    exec_op(0x1234432180000000, 1);
+    exec_op(0x1234432180000000, -2);
+    exec_op(0x123443217F7F7f7f, 0);
+    exec_op(0x123443217F7F7f7f, 1);
+    exec_op(0x123443217F7F7f7f, -1);
+    exec_op(0x1234432180808080, -1);
+    exec_op(0x1234432180808080, 1);
+    exec_op(0x1234432180808080, -2);
+
+#if TEST_INTEGER_VERBOSE
+    if (1)
+    for (i = 0; i < NVALS; i++)
+      for (j = 0; j < NVALS; j++)
+	exec_op(val[i], val[j]);
+#endif
+
+#undef NVALS
+}
+
+void *glue(_test_, OP) __init_call = glue(test_, OP);
+
+#undef OP
+#undef OP_CC
diff --git a/VEX/test/test-i386-muldiv.h b/VEX/test/test-i386-muldiv.h
new file mode 100644
index 0000000..5dba315
--- /dev/null
+++ b/VEX/test/test-i386-muldiv.h
@@ -0,0 +1,56 @@
+
+void glue(glue(test_, OP), b)(int op0, int op1) 
+{
+    int res, s1, s0, flags;
+    s0 = op0;
+    s1 = op1;
+    res = s0;
+    flags = 0;
+    asm ("push %4\n\t"
+         "popf\n\t"
+         stringify(OP)"b %b2\n\t" 
+         "pushf\n\t"
+         "popl %1\n\t"
+         : "=a" (res), "=g" (flags)
+         : "q" (s1), "0" (res), "1" (flags));
+    printf("%-10s A=%08x B=%08x R=%08x CC=%04x\n",
+           stringify(OP) "b", s0, s1, res, flags & CC_MASK);
+}
+
+void glue(glue(test_, OP), w)(int op0h, int op0, int op1) 
+{
+    int res, s1, flags, resh;
+    s1 = op1;
+    resh = op0h;
+    res = op0;
+    flags = 0;
+    asm ("push %5\n\t"
+         "popf\n\t"
+         stringify(OP) "w %w3\n\t" 
+         "pushf\n\t"
+         "popl %1\n\t"
+         : "=a" (res), "=g" (flags), "=d" (resh)
+         : "q" (s1), "0" (res), "1" (flags), "2" (resh));
+    printf("%-10s AH=%08x AL=%08x B=%08x RH=%08x RL=%08x CC=%04x\n",
+           stringify(OP) "w", op0h, op0, s1, resh, res, flags & CC_MASK);
+}
+
+void glue(glue(test_, OP), l)(int op0h, int op0, int op1) 
+{
+    int res, s1, flags, resh;
+    s1 = op1;
+    resh = op0h;
+    res = op0;
+    flags = 0;
+    asm ("push %5\n\t"
+         "popf\n\t"
+         stringify(OP) "l %3\n\t" 
+         "pushf\n\t"
+         "popl %1\n\t"
+         : "=a" (res), "=g" (flags), "=d" (resh)
+         : "q" (s1), "0" (res), "1" (flags), "2" (resh));
+    printf("%-10s AH=%08x AL=%08x B=%08x RH=%08x RL=%08x CC=%04x\n",
+           stringify(OP) "l", op0h, op0, s1, resh, res, flags & CC_MASK);
+}
+
+#undef OP
diff --git a/VEX/test/test-i386-shift.h b/VEX/test/test-i386-shift.h
new file mode 100644
index 0000000..e86b85f
--- /dev/null
+++ b/VEX/test/test-i386-shift.h
@@ -0,0 +1,161 @@
+
+#define exec_op glue(exec_, OP)
+#define exec_opl glue(glue(exec_, OP), l)
+#define exec_opw glue(glue(exec_, OP), w)
+#define exec_opb glue(glue(exec_, OP), b)
+
+#ifndef OP_SHIFTD
+
+#ifdef OP_NOBYTE
+#define EXECSHIFT(size, res, s1, s2, flags) \
+    asm ("push %4\n\t"\
+         "popf\n\t"\
+         stringify(OP) size " %" size "2, %" size "0\n\t" \
+         "pushf\n\t"\
+         "popl %1\n\t"\
+         : "=g" (res), "=g" (flags)\
+         : "r" (s1), "0" (res), "1" (flags));
+#else
+#define EXECSHIFT(size, res, s1, s2, flags) \
+    asm ("push %4\n\t"\
+         "popf\n\t"\
+         stringify(OP) size " %%cl, %" size "0\n\t" \
+         "pushf\n\t"\
+         "popl %1\n\t"\
+         : "=q" (res), "=g" (flags)\
+         : "c" (s1), "0" (res), "1" (flags));
+#endif
+
+void exec_opl(int s2, int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%08x B=%08x R=%08x CCIN=%04x CC=%04x\n",
+           stringify(OP) "l", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+void exec_opw(int s2, int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("w", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%08x B=%08x R=%08x CCIN=%04x CC=%04x\n",
+           stringify(OP) "w", s0, s1, res, iflags, flags & CC_MASK);
+}
+
+#else
+#define EXECSHIFT(size, res, s1, s2, flags) \
+    asm ("push %4\n\t"\
+         "popf\n\t"\
+         stringify(OP) size " %%cl, %" size "5, %" size "0\n\t" \
+         "pushf\n\t"\
+         "popl %1\n\t"\
+         : "=g" (res), "=g" (flags)\
+         : "c" (s1), "0" (res), "1" (flags), "r" (s2));
+
+void exec_opl(int s2, int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%08x B=%08x C=%08x R=%08x CCIN=%04x CC=%04x\n",
+           stringify(OP) "l", s0, s2, s1, res, iflags, flags & CC_MASK);
+}
+
+void exec_opw(int s2, int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("w", res, s1, s2, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%08x B=%08x C=%08x R=%08x CCIN=%04x CC=%04x\n",
+           stringify(OP) "w", s0, s2, s1, res, iflags, flags & CC_MASK);
+}
+
+#endif
+
+#ifndef OP_NOBYTE
+void exec_opb(int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECSHIFT("b", res, s1, 0, flags);
+    /* overflow is undefined if count != 1 */
+    if (s1 != 1)
+      flags &= ~CC_O;
+    printf("%-10s A=%08x B=%08x R=%08x CCIN=%04x CC=%04x\n",
+           stringify(OP) "b", s0, s1, res, iflags, flags & CC_MASK);
+}
+#endif
+
+void exec_op(int s2, int s0, int s1)
+{
+  int o,s,z,a,c,p,flags_in;
+  for (o = 0; o < 2; o++) {
+  for (s = 0; s < 2; s++) {
+  for (z = 0; z < 2; z++) {
+  for (a = 0; a < 2; a++) {
+  for (c = 0; c < 2; c++) {
+  for (p = 0; p < 2; p++) {
+
+    flags_in = (o ? CC_O : 0)
+             | (s ? CC_S : 0)
+             | (z ? CC_Z : 0)
+             | (a ? CC_A : 0)
+             | (c ? CC_C : 0)
+             | (p ? CC_P : 0);
+ 
+    exec_opl(s2, s0, s1, flags_in);
+#ifdef OP_SHIFTD
+    if (s1 <= 15)
+        exec_opw(s2, s0, s1, flags_in);
+#else
+    exec_opw(s2, s0, s1, flags_in);
+#endif
+#ifndef OP_NOBYTE
+    exec_opb(s0, s1, flags_in);
+#endif
+#ifdef OP_CC
+    exec_opl(s2, s0, s1, flags_in);
+    exec_opw(s2, s0, s1, flags_in);
+    exec_opb(s0, s1, flags_in);
+#endif
+
+  }}}}}}
+
+}
+
+void glue(test_, OP)(void)
+{
+    int i;
+    for(i = 0; i < 32; i++)
+        exec_op(0x21ad3d34, 0x12345678, i);
+    for(i = 0; i < 32; i++)
+        exec_op(0x813f3421, 0x82345678, i);
+}
+
+void *glue(_test_, OP) __init_call = glue(test_, OP);
+
+#undef OP
+#undef OP_CC
+#undef OP_SHIFTD
+#undef OP_NOBYTE
+#undef EXECSHIFT
+
diff --git a/VEX/test/test-i386.c b/VEX/test/test-i386.c
new file mode 100644
index 0000000..cd47930
--- /dev/null
+++ b/VEX/test/test-i386.c
@@ -0,0 +1,1668 @@
+/*
+ *  x86 CPU test
+ * 
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <math.h>
+#include <signal.h>
+#include <setjmp.h>
+#include <errno.h>
+#include <sys/ucontext.h>
+#include <sys/mman.h>
+#include <asm/vm86.h>
+
+/* Setting this to 1 creates a very comprehensive test of
+   integer condition codes. */
+#define TEST_INTEGER_VERBOSE 1
+
+
+//#define LINUX_VM86_IOPL_FIX
+//#define TEST_P4_FLAGS
+
+#define xglue(x, y) x ## y
+#define glue(x, y) xglue(x, y)
+#define stringify(s)	tostring(s)
+#define tostring(s)	#s
+
+#define CC_C   	0x0001
+#define CC_P 	0x0004
+#define CC_A	0x0010
+#define CC_Z	0x0040
+#define CC_S    0x0080
+#define CC_O    0x0800
+
+#define __init_call	__attribute__ ((unused,__section__ (".initcall.init")))
+
+static void *call_start __init_call = NULL;
+
+#define CC_MASK (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A)
+
+#define OP add
+#include "test-i386.h"
+
+#define OP sub
+#include "test-i386.h"
+
+#define OP xor
+#include "test-i386.h"
+
+#define OP and
+#include "test-i386.h"
+
+#define OP or
+#include "test-i386.h"
+
+#define OP cmp
+#include "test-i386.h"
+
+#define OP adc
+#define OP_CC
+#include "test-i386.h"
+
+#define OP sbb
+#define OP_CC
+#include "test-i386.h"
+
+#define OP inc
+#define OP_CC
+#define OP1
+#include "test-i386.h"
+
+#define OP dec
+#define OP_CC
+#define OP1
+#include "test-i386.h"
+
+#define OP neg
+#define OP_CC
+#define OP1
+#include "test-i386.h"
+
+#define OP not
+#define OP_CC
+#define OP1
+#include "test-i386.h"
+
+#undef CC_MASK
+#define CC_MASK (CC_C | CC_P | CC_Z | CC_S | CC_O)
+
+#define OP shl
+#include "test-i386-shift.h"
+
+#define OP shr
+#include "test-i386-shift.h"
+
+#define OP sar
+#include "test-i386-shift.h"
+
+#define OP rol
+#include "test-i386-shift.h"
+
+#define OP ror
+#include "test-i386-shift.h"
+
+#define OP rcr
+#define OP_CC
+#include "test-i386-shift.h"
+
+#define OP rcl
+#define OP_CC
+#include "test-i386-shift.h"
+
+#define OP shld
+#define OP_SHIFTD
+#define OP_NOBYTE
+#include "test-i386-shift.h"
+
+#define OP shrd
+#define OP_SHIFTD
+#define OP_NOBYTE
+#include "test-i386-shift.h"
+
+
+/* XXX: should be more precise ? */
+#undef CC_MASK
+#define CC_MASK (CC_C)
+
+#define OP bt
+#define OP_NOBYTE
+#include "test-i386-shift.h"
+
+#define OP bts
+#define OP_NOBYTE
+#include "test-i386-shift.h"
+
+#define OP btr
+#define OP_NOBYTE
+#include "test-i386-shift.h"
+
+#define OP btc
+#define OP_NOBYTE
+#include "test-i386-shift.h"
+
+
+/* lea test (modrm support) */
+#define TEST_LEA(STR)\
+{\
+    asm("leal " STR ", %0"\
+        : "=r" (res)\
+        : "a" (eax), "b" (ebx), "c" (ecx), "d" (edx), "S" (esi), "D" (edi));\
+    printf("lea %s = %08x\n", STR, res);\
+}
+
+#define TEST_LEA16(STR)\
+{\
+    asm(".code16 ; .byte 0x67 ; leal " STR ", %0 ; .code32"\
+        : "=wq" (res)\
+        : "a" (eax), "b" (ebx), "c" (ecx), "d" (edx), "S" (esi), "D" (edi));\
+    printf("lea %s = %08x\n", STR, res);\
+}
+
+
+void test_lea(void)
+{
+    int eax, ebx, ecx, edx, esi, edi, res;
+    eax = 0x0001;
+    ebx = 0x0002;
+    ecx = 0x0004;
+    edx = 0x0008;
+    esi = 0x0010;
+    edi = 0x0020;
+
+    TEST_LEA("0x4000");
+
+    TEST_LEA("(%%eax)");
+    TEST_LEA("(%%ebx)");
+    TEST_LEA("(%%ecx)");
+    TEST_LEA("(%%edx)");
+    TEST_LEA("(%%esi)");
+    TEST_LEA("(%%edi)");
+
+    TEST_LEA("0x40(%%eax)");
+    TEST_LEA("0x40(%%ebx)");
+    TEST_LEA("0x40(%%ecx)");
+    TEST_LEA("0x40(%%edx)");
+    TEST_LEA("0x40(%%esi)");
+    TEST_LEA("0x40(%%edi)");
+
+    TEST_LEA("0x4000(%%eax)");
+    TEST_LEA("0x4000(%%ebx)");
+    TEST_LEA("0x4000(%%ecx)");
+    TEST_LEA("0x4000(%%edx)");
+    TEST_LEA("0x4000(%%esi)");
+    TEST_LEA("0x4000(%%edi)");
+
+    TEST_LEA("(%%eax, %%ecx)");
+    TEST_LEA("(%%ebx, %%edx)");
+    TEST_LEA("(%%ecx, %%ecx)");
+    TEST_LEA("(%%edx, %%ecx)");
+    TEST_LEA("(%%esi, %%ecx)");
+    TEST_LEA("(%%edi, %%ecx)");
+
+    TEST_LEA("0x40(%%eax, %%ecx)");
+    TEST_LEA("0x4000(%%ebx, %%edx)");
+
+    TEST_LEA("(%%ecx, %%ecx, 2)");
+    TEST_LEA("(%%edx, %%ecx, 4)");
+    TEST_LEA("(%%esi, %%ecx, 8)");
+
+    TEST_LEA("(,%%eax, 2)");
+    TEST_LEA("(,%%ebx, 4)");
+    TEST_LEA("(,%%ecx, 8)");
+
+    TEST_LEA("0x40(,%%eax, 2)");
+    TEST_LEA("0x40(,%%ebx, 4)");
+    TEST_LEA("0x40(,%%ecx, 8)");
+
+
+    TEST_LEA("-10(%%ecx, %%ecx, 2)");
+    TEST_LEA("-10(%%edx, %%ecx, 4)");
+    TEST_LEA("-10(%%esi, %%ecx, 8)");
+
+    TEST_LEA("0x4000(%%ecx, %%ecx, 2)");
+    TEST_LEA("0x4000(%%edx, %%ecx, 4)");
+    TEST_LEA("0x4000(%%esi, %%ecx, 8)");
+}
+
+#define TEST_JCC(JCC, v1, v2)\
+{\
+    int res;\
+    asm("movl $1, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "j" JCC " 1f\n\t"\
+        "movl $0, %0\n\t"\
+        "1:\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2));\
+    printf("%-10s %d\n", "j" JCC, res);\
+\
+    asm("movl $0, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "set" JCC " %b0\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2));\
+    printf("%-10s %d\n", "set" JCC, res);\
+ {  int one = 1; \
+    asm("movl $0x12345678, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "cmov" JCC "l %3, %0\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2), "m" (one));\
+        printf("%-10s R=0x%08x\n", "cmov" JCC "l", res);\
+    asm("movl $0x12345678, %0\n\t"\
+        "cmpl %2, %1\n\t"\
+        "cmov" JCC "w %w3, %w0\n\t"\
+        : "=r" (res)\
+        : "r" (v1), "r" (v2), "r" (1));\
+        printf("%-10s R=0x%08x\n", "cmov" JCC "w", res);\
+ } \
+}
+
+/* various jump tests */
+void test_jcc(void)
+{
+    TEST_JCC("ne", 1, 1);
+    TEST_JCC("ne", 1, 0);
+
+    TEST_JCC("e", 1, 1);
+    TEST_JCC("e", 1, 0);
+
+    TEST_JCC("l", 1, 1);
+    TEST_JCC("l", 1, 0);
+    TEST_JCC("l", 1, -1);
+
+    TEST_JCC("le", 1, 1);
+    TEST_JCC("le", 1, 0);
+    TEST_JCC("le", 1, -1);
+
+    TEST_JCC("ge", 1, 1);
+    TEST_JCC("ge", 1, 0);
+    TEST_JCC("ge", -1, 1);
+
+    TEST_JCC("g", 1, 1);
+    TEST_JCC("g", 1, 0);
+    TEST_JCC("g", 1, -1);
+
+    TEST_JCC("b", 1, 1);
+    TEST_JCC("b", 1, 0);
+    TEST_JCC("b", 1, -1);
+
+    TEST_JCC("be", 1, 1);
+    TEST_JCC("be", 1, 0);
+    TEST_JCC("be", 1, -1);
+
+    TEST_JCC("ae", 1, 1);
+    TEST_JCC("ae", 1, 0);
+    TEST_JCC("ae", 1, -1);
+
+    TEST_JCC("a", 1, 1);
+    TEST_JCC("a", 1, 0);
+    TEST_JCC("a", 1, -1);
+
+
+    TEST_JCC("p", 1, 1);
+    TEST_JCC("p", 1, 0);
+
+    TEST_JCC("np", 1, 1);
+    TEST_JCC("np", 1, 0);
+
+    TEST_JCC("o", 0x7fffffff, 0);
+    TEST_JCC("o", 0x7fffffff, -1);
+
+    TEST_JCC("no", 0x7fffffff, 0);
+    TEST_JCC("no", 0x7fffffff, -1);
+
+    TEST_JCC("s", 0, 1);
+    TEST_JCC("s", 0, -1);
+    TEST_JCC("s", 0, 0);
+
+    TEST_JCC("ns", 0, 1);
+    TEST_JCC("ns", 0, -1);
+    TEST_JCC("ns", 0, 0);
+}
+
+#undef CC_MASK
+#ifdef TEST_P4_FLAGS
+#define CC_MASK (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A)
+#else
+#define CC_MASK (CC_O | CC_C)
+#endif
+
+#define OP mul
+#include "test-i386-muldiv.h"
+
+#define OP imul
+#include "test-i386-muldiv.h"
+
+void test_imulw2(int op0, int op1) 
+{
+    int res, s1, s0, flags;
+    s0 = op0;
+    s1 = op1;
+    res = s0;
+    flags = 0;
+    asm ("push %4\n\t"
+         "popf\n\t"
+         "imulw %w2, %w0\n\t" 
+         "pushf\n\t"
+         "popl %1\n\t"
+         : "=q" (res), "=g" (flags)
+         : "q" (s1), "0" (res), "1" (flags));
+    printf("%-10s A=%08x B=%08x R=%08x CC=%04x\n",
+           "imulw", s0, s1, res, flags & CC_MASK);
+}
+
+void test_imull2(int op0, int op1) 
+{
+    int res, s1, s0, flags;
+    s0 = op0;
+    s1 = op1;
+    res = s0;
+    flags = 0;
+    asm ("push %4\n\t"
+         "popf\n\t"
+         "imull %2, %0\n\t" 
+         "pushf\n\t"
+         "popl %1\n\t"
+         : "=q" (res), "=g" (flags)
+         : "q" (s1), "0" (res), "1" (flags));
+    printf("%-10s A=%08x B=%08x R=%08x CC=%04x\n",
+           "imull", s0, s1, res, flags & CC_MASK);
+}
+
+#define TEST_IMUL_IM(size, size1, op0, op1)\
+{\
+    int res, flags;\
+    flags = 0;\
+    res = 0;\
+    asm ("push %3\n\t"\
+         "popf\n\t"\
+         "imul" size " $" #op0 ", %" size1 "2, %" size1 "0\n\t" \
+         "pushf\n\t"\
+         "popl %1\n\t"\
+         : "=r" (res), "=g" (flags)\
+         : "r" (op1), "1" (flags), "0" (res));\
+    printf("%-10s A=%08x B=%08x R=%08x CC=%04x\n",\
+           "imul" size, op0, op1, res, flags & CC_MASK);\
+}
+
+
+#undef CC_MASK
+#define CC_MASK (0)
+
+#define OP div
+#include "test-i386-muldiv.h"
+
+#define OP idiv
+#include "test-i386-muldiv.h"
+
+void test_mul(void)
+{
+    test_imulb(0x1234561d, 4);
+    test_imulb(3, -4);
+    test_imulb(0x80, 0x80);
+    test_imulb(0x10, 0x10);
+
+    test_imulw(0, 0, 0);
+    test_imulw(0, 0xFF, 0xFF);
+    test_imulw(0, 0xFF, 0x100);
+    test_imulw(0, 0x1234001d, 45);
+    test_imulw(0, 23, -45);
+    test_imulw(0, 0x8000, 0x8000);
+    test_imulw(0, 0x100, 0x100);
+
+    test_imull(0, 0, 0);
+    test_imull(0, 0xFFFF, 0xFFFF);
+    test_imull(0, 0xFFFF, 0x10000);
+    test_imull(0, 0x1234001d, 45);
+    test_imull(0, 23, -45);
+    test_imull(0, 0x80000000, 0x80000000);
+    test_imull(0, 0x10000, 0x10000);
+
+    test_mulb(0x1234561d, 4);
+    test_mulb(3, -4);
+    test_mulb(0x80, 0x80);
+    test_mulb(0x10, 0x10);
+
+    test_mulw(0, 0x1234001d, 45);
+    test_mulw(0, 23, -45);
+    test_mulw(0, 0x8000, 0x8000);
+    test_mulw(0, 0x100, 0x100);
+
+    test_mull(0, 0x1234001d, 45);
+    test_mull(0, 23, -45);
+    test_mull(0, 0x80000000, 0x80000000);
+    test_mull(0, 0x10000, 0x10000);
+
+    test_imulw2(0x1234001d, 45);
+    test_imulw2(23, -45);
+    test_imulw2(0x8000, 0x8000);
+    test_imulw2(0x100, 0x100);
+
+    test_imull2(0x1234001d, 45);
+    test_imull2(23, -45);
+    test_imull2(0x80000000, 0x80000000);
+    test_imull2(0x10000, 0x10000);
+
+    TEST_IMUL_IM("w", "w", 45, 0x1234);
+    TEST_IMUL_IM("w", "w", -45, 23);
+    TEST_IMUL_IM("w", "w", 0x8000, 0x80000000);
+    TEST_IMUL_IM("w", "w", 0x7fff, 0x1000);
+
+    TEST_IMUL_IM("l", "", 45, 0x1234);
+    TEST_IMUL_IM("l", "", -45, 23);
+    TEST_IMUL_IM("l", "", 0x8000, 0x80000000);
+    TEST_IMUL_IM("l", "", 0x7fff, 0x1000);
+
+    test_idivb(0x12341678, 0x127e);
+    test_idivb(0x43210123, -5);
+    test_idivb(0x12340004, -1);
+
+    test_idivw(0, 0x12345678, 12347);
+    test_idivw(0, -23223, -45);
+    test_idivw(0, 0x12348000, -1);
+    test_idivw(0x12343, 0x12345678, 0x81238567);
+
+    test_idivl(0, 0x12345678, 12347);
+    test_idivl(0, -233223, -45);
+    test_idivl(0, 0x80000000, -1);
+    test_idivl(0x12343, 0x12345678, 0x81234567);
+
+    test_divb(0x12341678, 0x127e);
+    test_divb(0x43210123, -5);
+    test_divb(0x12340004, -1);
+
+    test_divw(0, 0x12345678, 12347);
+    test_divw(0, -23223, -45);
+    test_divw(0, 0x12348000, -1);
+    test_divw(0x12343, 0x12345678, 0x81238567);
+
+    test_divl(0, 0x12345678, 12347);
+    test_divl(0, -233223, -45);
+    test_divl(0, 0x80000000, -1);
+    test_divl(0x12343, 0x12345678, 0x81234567);
+}
+
+#define TEST_BSX(op, size, op0)\
+{\
+    int res, val, resz;\
+    val = op0;\
+    asm("xorl %1, %1\n\t"\
+        "movl $0x12345678, %0\n\t"\
+        #op " %" size "2, %" size "0\n\t" \
+        "setz %b1" \
+        : "=r" (res), "=q" (resz)\
+        : "r" (val));\
+    printf("%-10s A=%08x R=%08x %d\n", #op, val, res, resz);\
+}
+
+void test_bsx(void)
+{
+    TEST_BSX(bsrw, "w", 0);
+    TEST_BSX(bsrw, "w", 0x12340128);
+    TEST_BSX(bsrl, "", 0);
+    TEST_BSX(bsrl, "", 0x00340128);
+    TEST_BSX(bsfw, "w", 0);
+    TEST_BSX(bsfw, "w", 0x12340128);
+    TEST_BSX(bsfl, "", 0);
+    TEST_BSX(bsfl, "", 0x00340128);
+}
+
+/**********************************************/
+
+void test_fops(double a, double b)
+{
+    printf("a=%f b=%f a+b=%f\n", a, b, a + b);
+    printf("a=%f b=%f a-b=%f\n", a, b, a - b);
+    printf("a=%f b=%f a*b=%f\n", a, b, a * b);
+    printf("a=%f b=%f a/b=%f\n", a, b, a / b);
+    printf("a=%f b=%f fmod(a, b)=%f\n", a, b, fmod(a, b));
+    printf("a=%f sqrt(a)=%f\n", a, sqrt(a));
+    printf("a=%f sin(a)=%f\n", a, sin(a));
+    printf("a=%f cos(a)=%f\n", a, cos(a));
+    printf("a=%f tan(a)=%f\n", a, tan(a));
+    printf("a=%f log(a)=%f\n", a, log(a));
+    printf("a=%f exp(a)=%f\n", a, exp(a));
+    printf("a=%f b=%f atan2(a, b)=%f\n", a, b, atan2(a, b));
+    /* just to test some op combining */
+    printf("a=%f asin(sin(a))=%f\n", a, asin(sin(a)));
+    printf("a=%f acos(cos(a))=%f\n", a, acos(cos(a)));
+    printf("a=%f atan(tan(a))=%f\n", a, atan(tan(a)));
+}
+
+void test_fcmp(double a, double b)
+{
+    printf("(%f<%f)=%d\n",
+           a, b, a < b);
+    printf("(%f<=%f)=%d\n",
+           a, b, a <= b);
+    printf("(%f==%f)=%d\n",
+           a, b, a == b);
+    printf("(%f>%f)=%d\n",
+           a, b, a > b);
+    printf("(%f<=%f)=%d\n",
+           a, b, a >= b);
+    {
+        unsigned int eflags;
+        /* test f(u)comi instruction */
+        asm("fcomi %2, %1\n"
+            "pushf\n"
+            "pop %0\n"
+            : "=r" (eflags)
+            : "t" (a), "u" (b));
+        printf("fcomi(%f %f)=%08x\n", a, b, eflags & (CC_Z | CC_P | CC_C));
+    }
+}
+
+void test_fcvt(double a)
+{
+    float fa;
+    long double la;
+    int16_t fpuc;
+    int i;
+    int64_t lla;
+    int ia;
+    int16_t wa;
+    double ra;
+
+    fa = a;
+    la = a;
+    printf("(float)%f = %f\n", a, fa);
+    printf("(long double)%f = %Lf\n", a, la);
+    printf("a=%016Lx\n", *(long long *)&a);
+    printf("la=%016Lx %04x\n", *(long long *)&la, 
+           *(unsigned short *)((char *)(&la) + 8));
+
+    /* test all roundings */
+    asm volatile ("fstcw %0" : "=m" (fpuc));
+    for(i=0;i<4;i++) {
+        int16_t tmp = (fpuc & ~0x0c00) | (i << 10);
+        asm volatile ("fldcw %0" : : "m" (tmp));
+        asm volatile ("fist %0" : "=m" (wa) : "t" (a));
+        asm volatile ("fistl %0" : "=m" (ia) : "t" (a));
+        asm volatile ("fistpll %0" : "=m" (lla) : "t" (a) : "st");
+        asm volatile ("frndint ; fstl %0" : "=m" (ra) : "t" (a));
+        asm volatile ("fldcw %0" : : "m" (fpuc));
+        printf("(short)a = %d\n", wa);
+        printf("(int)a = %d\n", ia);
+        printf("(int64_t)a = %Ld\n", lla);
+        printf("rint(a) = %f\n", ra);
+    }
+}
+
+#define TEST(N) \
+    asm("fld" #N : "=t" (a)); \
+    printf("fld" #N "= %f\n", a);
+
+void test_fconst(void)
+{
+    double a;
+    TEST(1);
+    TEST(l2t);
+    TEST(l2e);
+    TEST(pi);
+    TEST(lg2);
+    TEST(ln2);
+    TEST(z);
+}
+
+void test_fbcd(double a)
+{
+    unsigned short bcd[5];
+    double b;
+
+    asm("fbstp %0" : "=m" (bcd[0]) : "t" (a) : "st");
+    asm("fbld %1" : "=t" (b) : "m" (bcd[0]));
+    printf("a=%f bcd=%04x%04x%04x%04x%04x b=%f\n", 
+           a, bcd[4], bcd[3], bcd[2], bcd[1], bcd[0], b);
+}
+
+#define TEST_ENV(env, save, restore)\
+{\
+    memset((env), 0xaa, sizeof(*(env)));\
+    for(i=0;i<5;i++)\
+        asm volatile ("fldl %0" : : "m" (dtab[i]));\
+    asm(save " %0\n" : : "m" (*(env)));\
+    asm(restore " %0\n": : "m" (*(env)));\
+    for(i=0;i<5;i++)\
+        asm volatile ("fstpl %0" : "=m" (rtab[i]));\
+    for(i=0;i<5;i++)\
+        printf("res[%d]=%f\n", i, rtab[i]);\
+    printf("fpuc=%04x fpus=%04x fptag=%04x\n",\
+           (env)->fpuc,\
+           (env)->fpus & 0xff00,\
+           (env)->fptag);\
+}
+
+void test_fenv(void)
+{
+    struct __attribute__((packed)) {
+        uint16_t fpuc;
+        uint16_t dummy1;
+        uint16_t fpus;
+        uint16_t dummy2;
+        uint16_t fptag;
+        uint16_t dummy3;
+        uint32_t ignored[4];
+        long double fpregs[8];
+    } float_env32;
+    struct __attribute__((packed)) {
+        uint16_t fpuc;
+        uint16_t fpus;
+        uint16_t fptag;
+        uint16_t ignored[4];
+        long double fpregs[8];
+    } float_env16;
+    double dtab[8];
+    double rtab[8];
+    int i;
+
+    for(i=0;i<8;i++)
+        dtab[i] = i + 1;
+
+    TEST_ENV(&float_env16, "data16 fnstenv", "data16 fldenv");
+    TEST_ENV(&float_env16, "data16 fnsave", "data16 frstor");
+    TEST_ENV(&float_env32, "fnstenv", "fldenv");
+    TEST_ENV(&float_env32, "fnsave", "frstor");
+
+    /* test for ffree */
+    for(i=0;i<5;i++)
+        asm volatile ("fldl %0" : : "m" (dtab[i]));
+    asm volatile("ffree %st(2)");
+    asm volatile ("fnstenv %0\n" : : "m" (float_env32));
+    asm volatile ("fninit");
+    printf("fptag=%04x\n", float_env32.fptag);
+}
+
+
+#define TEST_FCMOV(a, b, eflags, CC)\
+{\
+    double res;\
+    asm("push %3\n"\
+        "popf\n"\
+        "fcmov" CC " %2, %0\n"\
+        : "=t" (res)\
+        : "0" (a), "u" (b), "g" (eflags));\
+    printf("fcmov%s eflags=0x%04x-> %f\n", \
+           CC, eflags, res);\
+}
+
+void test_fcmov(void)
+{
+    double a, b;
+    int eflags, i;
+
+    a = 1.0;
+    b = 2.0;
+    for(i = 0; i < 4; i++) {
+        eflags = 0;
+        if (i & 1)
+            eflags |= CC_C;
+        if (i & 2)
+            eflags |= CC_Z;
+        TEST_FCMOV(a, b, eflags, "b");
+        TEST_FCMOV(a, b, eflags, "e");
+        TEST_FCMOV(a, b, eflags, "be");
+        TEST_FCMOV(a, b, eflags, "nb");
+        TEST_FCMOV(a, b, eflags, "ne");
+        TEST_FCMOV(a, b, eflags, "nbe");
+    }
+    TEST_FCMOV(a, b, 0, "u");
+    TEST_FCMOV(a, b, CC_P, "u");
+    TEST_FCMOV(a, b, 0, "nu");
+    TEST_FCMOV(a, b, CC_P, "nu");
+}
+
+void test_floats(void)
+{
+    test_fops(2, 3);
+    test_fops(1.4, -5);
+    test_fcmp(2, -1);
+    test_fcmp(2, 2);
+    test_fcmp(2, 3);
+    test_fcvt(0.5);
+    test_fcvt(-0.5);
+    test_fcvt(1.0/7.0);
+    test_fcvt(-1.0/9.0);
+    test_fcvt(32768);
+    test_fcvt(-1e20);
+    test_fconst();
+    // REINSTATE (maybe): test_fbcd(1234567890123456);
+    // REINSTATE (maybe): test_fbcd(-123451234567890);
+    // REINSTATE: test_fenv();
+    // REINSTATE: test_fcmov();
+}
+
+/**********************************************/
+#if 0
+
+#define TEST_BCD(op, op0, cc_in, cc_mask)\
+{\
+    int res, flags;\
+    res = op0;\
+    flags = cc_in;\
+    asm ("push %3\n\t"\
+         "popf\n\t"\
+         #op "\n\t"\
+         "pushf\n\t"\
+         "popl %1\n\t"\
+        : "=a" (res), "=g" (flags)\
+        : "0" (res), "1" (flags));\
+    printf("%-10s A=%08x R=%08x CCIN=%04x CC=%04x\n",\
+           #op, op0, res, cc_in, flags & cc_mask);\
+}
+
+void test_bcd(void)
+{
+    TEST_BCD(daa, 0x12340503, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340507, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340559, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340560, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x1234059f, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x123405a0, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340503, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340503, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340503, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(daa, 0x12340506, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+
+    TEST_BCD(das, 0x12340503, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340507, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340559, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340560, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x1234059f, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x123405a0, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340503, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, 0, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340503, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, CC_C, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340503, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+    TEST_BCD(das, 0x12340506, CC_C | CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_A));
+
+    TEST_BCD(aaa, 0x12340205, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x12340306, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x1234040a, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x123405fa, CC_A, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x12340205, 0, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x12340306, 0, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x1234040a, 0, (CC_C | CC_A));
+    TEST_BCD(aaa, 0x123405fa, 0, (CC_C | CC_A));
+    
+    TEST_BCD(aas, 0x12340205, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x12340306, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x1234040a, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x123405fa, CC_A, (CC_C | CC_A));
+    TEST_BCD(aas, 0x12340205, 0, (CC_C | CC_A));
+    TEST_BCD(aas, 0x12340306, 0, (CC_C | CC_A));
+    TEST_BCD(aas, 0x1234040a, 0, (CC_C | CC_A));
+    TEST_BCD(aas, 0x123405fa, 0, (CC_C | CC_A));
+
+    TEST_BCD(aam, 0x12340547, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));
+    TEST_BCD(aad, 0x12340407, CC_A, (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));
+}
+#endif /* 0 */
+
+#define TEST_XCHG(op, size, opconst)\
+{\
+    int op0, op1;\
+    op0 = 0x12345678;\
+    op1 = 0xfbca7654;\
+    asm(#op " %" size "0, %" size "1" \
+        : "=q" (op0), opconst (op1) \
+        : "0" (op0), "1" (op1));\
+    printf("%-10s A=%08x B=%08x\n",\
+           #op, op0, op1);\
+}
+
+#define TEST_CMPXCHG(op, size, opconst, eax)\
+{\
+    int op0, op1;\
+    op0 = 0x12345678;\
+    op1 = 0xfbca7654;\
+    asm(#op " %" size "0, %" size "1" \
+        : "=q" (op0), opconst (op1) \
+        : "0" (op0), "1" (op1), "a" (eax));\
+    printf("%-10s EAX=%08x A=%08x C=%08x\n",\
+           #op, eax, op0, op1);\
+}
+
+void test_xchg(void)
+{
+    TEST_XCHG(xchgl, "", "=q");
+    TEST_XCHG(xchgw, "w", "=q");
+    TEST_XCHG(xchgb, "b", "=q");
+
+    TEST_XCHG(xchgl, "", "=m");
+    TEST_XCHG(xchgw, "w", "=m");
+    TEST_XCHG(xchgb, "b", "=m");
+
+#if 0
+    TEST_XCHG(xaddl, "", "=q");
+    TEST_XCHG(xaddw, "w", "=q");
+    TEST_XCHG(xaddb, "b", "=q");
+
+    {
+        int res;
+        res = 0x12345678;
+        asm("xaddl %1, %0" : "=r" (res) : "0" (res));
+        printf("xaddl same res=%08x\n", res);
+    }
+
+    TEST_XCHG(xaddl, "", "=m");
+    TEST_XCHG(xaddw, "w", "=m");
+    TEST_XCHG(xaddb, "b", "=m");
+#endif
+    TEST_CMPXCHG(cmpxchgl, "", "=q", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgw, "w", "=q", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgb, "b", "=q", 0xfbca7654);
+
+    TEST_CMPXCHG(cmpxchgl, "", "=q", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgw, "w", "=q", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgb, "b", "=q", 0xfffefdfc);
+
+    TEST_CMPXCHG(cmpxchgl, "", "=m", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgw, "w", "=m", 0xfbca7654);
+    TEST_CMPXCHG(cmpxchgb, "b", "=m", 0xfbca7654);
+
+    TEST_CMPXCHG(cmpxchgl, "", "=m", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgw, "w", "=m", 0xfffefdfc);
+    TEST_CMPXCHG(cmpxchgb, "b", "=m", 0xfffefdfc);
+#if 0
+    {
+        uint64_t op0, op1, op2;
+        int i, eflags;
+
+        for(i = 0; i < 2; i++) {
+            op0 = 0x123456789abcd;
+            if (i == 0)
+                op1 = 0xfbca765423456;
+            else
+                op1 = op0;
+            op2 = 0x6532432432434;
+            asm("cmpxchg8b %1\n" 
+                "pushf\n"
+                "popl %2\n"
+                : "=A" (op0), "=m" (op1), "=g" (eflags)
+                : "0" (op0), "m" (op1), "b" ((int)op2), "c" ((int)(op2 >> 32)));
+            printf("cmpxchg8b: op0=%016llx op1=%016llx CC=%02x\n", 
+                    op0, op1, eflags & CC_Z);
+        }
+    }
+#endif
+}
+
+/**********************************************/
+/* segmentation tests */
+#if 0
+#include <asm/ldt.h>
+#include <linux/unistd.h>
+#include <linux/version.h>
+
+_syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
+#define modify_ldt_ldt_s user_desc
+#endif
+
+uint8_t seg_data1[4096];
+uint8_t seg_data2[4096];
+
+#define MK_SEL(n) (((n) << 3) | 7)
+
+#define TEST_LR(op, size, seg, mask)\
+{\
+    int res, res2;\
+    res = 0x12345678;\
+    asm (op " %" size "2, %" size "0\n" \
+         "movl $0, %1\n"\
+         "jnz 1f\n"\
+         "movl $1, %1\n"\
+         "1:\n"\
+         : "=r" (res), "=r" (res2) : "m" (seg), "0" (res));\
+    printf(op ": Z=%d %08x\n", res2, res & ~(mask));\
+}
+
+/* NOTE: we use Linux modify_ldt syscall */
+void test_segs(void)
+{
+    struct modify_ldt_ldt_s ldt;
+    long long ldt_table[3];
+    int res, res2;
+    char tmp;
+    struct {
+        uint32_t offset;
+        uint16_t seg;
+    } __attribute__((packed)) segoff;
+
+    ldt.entry_number = 1;
+    ldt.base_addr = (unsigned long)&seg_data1;
+    ldt.limit = (sizeof(seg_data1) + 0xfff) >> 12;
+    ldt.seg_32bit = 1;
+    ldt.contents = MODIFY_LDT_CONTENTS_DATA;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 1;
+    ldt.seg_not_present = 0;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    ldt.entry_number = 2;
+    ldt.base_addr = (unsigned long)&seg_data2;
+    ldt.limit = (sizeof(seg_data2) + 0xfff) >> 12;
+    ldt.seg_32bit = 1;
+    ldt.contents = MODIFY_LDT_CONTENTS_DATA;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 1;
+    ldt.seg_not_present = 0;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    modify_ldt(0, &ldt_table, sizeof(ldt_table)); /* read ldt entries */
+#if 0
+    {
+        int i;
+        for(i=0;i<3;i++)
+            printf("%d: %016Lx\n", i, ldt_table[i]);
+    }
+#endif
+    /* do some tests with fs or gs */
+    asm volatile ("movl %0, %%fs" : : "r" (MK_SEL(1)));
+
+    seg_data1[1] = 0xaa;
+    seg_data2[1] = 0x55;
+
+    asm volatile ("fs movzbl 0x1, %0" : "=r" (res));
+    printf("FS[1] = %02x\n", res);
+
+    asm volatile ("pushl %%gs\n"
+                  "movl %1, %%gs\n"
+                  "gs movzbl 0x1, %0\n"
+                  "popl %%gs\n"
+                  : "=r" (res)
+                  : "r" (MK_SEL(2)));
+    printf("GS[1] = %02x\n", res);
+
+    /* tests with ds/ss (implicit segment case) */
+    tmp = 0xa5;
+    asm volatile ("pushl %%ebp\n\t"
+                  "pushl %%ds\n\t"
+                  "movl %2, %%ds\n\t"
+                  "movl %3, %%ebp\n\t"
+                  "movzbl 0x1, %0\n\t"
+                  "movzbl (%%ebp), %1\n\t"
+                  "popl %%ds\n\t"
+                  "popl %%ebp\n\t"
+                  : "=r" (res), "=r" (res2)
+                  : "r" (MK_SEL(1)), "r" (&tmp));
+    printf("DS[1] = %02x\n", res);
+    printf("SS[tmp] = %02x\n", res2);
+
+    segoff.seg = MK_SEL(2);
+    segoff.offset = 0xabcdef12;
+    asm volatile("lfs %2, %0\n\t" 
+                 "movl %%fs, %1\n\t"
+                 : "=r" (res), "=g" (res2) 
+                 : "m" (segoff));
+    printf("FS:reg = %04x:%08x\n", res2, res);
+
+    TEST_LR("larw", "w", MK_SEL(2), 0x0100);
+    TEST_LR("larl", "", MK_SEL(2), 0x0100);
+    TEST_LR("lslw", "w", MK_SEL(2), 0);
+    TEST_LR("lsll", "", MK_SEL(2), 0);
+
+    TEST_LR("larw", "w", 0xfff8, 0);
+    TEST_LR("larl", "", 0xfff8, 0);
+    TEST_LR("lslw", "w", 0xfff8, 0);
+    TEST_LR("lsll", "", 0xfff8, 0);
+}
+#endif
+
+#if 0
+/* 16 bit code test */
+extern char code16_start, code16_end;
+extern char code16_func1;
+extern char code16_func2;
+extern char code16_func3;
+
+void test_code16(void)
+{
+    struct modify_ldt_ldt_s ldt;
+    int res, res2;
+
+    /* build a code segment */
+    ldt.entry_number = 1;
+    ldt.base_addr = (unsigned long)&code16_start;
+    ldt.limit = &code16_end - &code16_start;
+    ldt.seg_32bit = 0;
+    ldt.contents = MODIFY_LDT_CONTENTS_CODE;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 0;
+    ldt.seg_not_present = 0;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    /* call the first function */
+    asm volatile ("lcall %1, %2" 
+                  : "=a" (res)
+                  : "i" (MK_SEL(1)), "i" (&code16_func1): "memory", "cc");
+    printf("func1() = 0x%08x\n", res);
+    asm volatile ("lcall %2, %3" 
+                  : "=a" (res), "=c" (res2)
+                  : "i" (MK_SEL(1)), "i" (&code16_func2): "memory", "cc");
+    printf("func2() = 0x%08x spdec=%d\n", res, res2);
+    asm volatile ("lcall %1, %2" 
+                  : "=a" (res)
+                  : "i" (MK_SEL(1)), "i" (&code16_func3): "memory", "cc");
+    printf("func3() = 0x%08x\n", res);
+}
+#endif
+
+extern char func_lret32;
+extern char func_iret32;
+
+void test_misc(void)
+{
+    char table[256];
+    int res, i;
+
+#if 0
+    // REINSTATE
+    for(i=0;i<256;i++) table[i] = 256 - i;
+    res = 0x12345678;
+    asm ("xlat" : "=a" (res) : "b" (table), "0" (res));
+    printf("xlat: EAX=%08x\n", res);
+#endif
+#if 0
+    // REINSTATE
+    asm volatile ("pushl %%cs ; call %1" 
+                  : "=a" (res)
+                  : "m" (func_lret32): "memory", "cc");
+    printf("func_lret32=%x\n", res);
+
+    asm volatile ("pushfl ; pushl %%cs ; call %1" 
+                  : "=a" (res)
+                  : "m" (func_iret32): "memory", "cc");
+    printf("func_iret32=%x\n", res);
+#endif
+    /* specific popl test */
+    asm volatile ("pushl $12345432 ; pushl $0x9abcdef ; popl (%%esp) ; popl %0"
+                  : "=g" (res));
+    printf("popl esp=%x\n", res);
+#if 0
+    // REINSTATE
+    /* specific popw test */
+    asm volatile ("pushl $12345432 ; pushl $0x9abcdef ; popw (%%esp) ; addl $2, %%esp ; popl %0"
+                  : "=g" (res));
+    printf("popw esp=%x\n", res);
+#endif
+}
+
+uint8_t str_buffer[4096];
+
+#define TEST_STRING1(OP, size, DF, REP)\
+{\
+    int esi, edi, eax, ecx, eflags;\
+\
+    esi = (long)(str_buffer + sizeof(str_buffer) / 2);\
+    edi = (long)(str_buffer + sizeof(str_buffer) / 2) + 16;\
+    eax = 0x12345678;\
+    ecx = 17;\
+\
+    asm volatile ("pushl $0\n\t"\
+                  "popf\n\t"\
+                  DF "\n\t"\
+                  REP #OP size "\n\t"\
+                  "cld\n\t"\
+                  "pushf\n\t"\
+                  "popl %4\n\t"\
+                  : "=S" (esi), "=D" (edi), "=a" (eax), "=c" (ecx), "=g" (eflags)\
+                  : "0" (esi), "1" (edi), "2" (eax), "3" (ecx));\
+    printf("%-10s ESI=%08x EDI=%08x EAX=%08x ECX=%08x EFL=%04x\n",\
+           REP #OP size, esi, edi, eax, ecx,\
+           eflags & (CC_C | CC_P | CC_Z | CC_S | CC_O | CC_A));\
+}
+
+#define TEST_STRING(OP, REP)\
+    TEST_STRING1(OP, "b", "", REP);\
+    TEST_STRING1(OP, "w", "", REP);\
+    TEST_STRING1(OP, "l", "", REP);\
+    TEST_STRING1(OP, "b", "std", REP);\
+    TEST_STRING1(OP, "w", "std", REP);\
+    TEST_STRING1(OP, "l", "std", REP)
+
+void test_string(void)
+{
+    int i;
+    for(i = 0;i < sizeof(str_buffer); i++)
+        str_buffer[i] = i + 0x56;
+   TEST_STRING(stos, "");
+   TEST_STRING(stos, "rep ");
+   // REINSTATE: TEST_STRING(lods, ""); /* to verify stos */
+   // REINSTATE: TEST_STRING(lods, "rep "); 
+   TEST_STRING(movs, "");
+   TEST_STRING(movs, "rep ");
+   // REINSTATE: TEST_STRING(lods, ""); /* to verify stos */
+
+   /* XXX: better tests */
+   TEST_STRING(scas, "");
+   // REINSTATE: TEST_STRING(scas, "repz ");
+   TEST_STRING(scas, "repnz ");
+   // REINSTATE: TEST_STRING(cmps, "");
+   TEST_STRING(cmps, "repz ");
+   // REINSTATE: TEST_STRING(cmps, "repnz ");
+}
+
+/* VM86 test */
+#if 0
+static inline void set_bit(uint8_t *a, unsigned int bit)
+{
+    a[bit / 8] |= (1 << (bit % 8));
+}
+
+static inline uint8_t *seg_to_linear(unsigned int seg, unsigned int reg)
+{
+    return (uint8_t *)((seg << 4) + (reg & 0xffff));
+}
+
+static inline void pushw(struct vm86_regs *r, int val)
+{
+    r->esp = (r->esp & ~0xffff) | ((r->esp - 2) & 0xffff);
+    *(uint16_t *)seg_to_linear(r->ss, r->esp) = val;
+}
+
+#undef __syscall_return
+#define __syscall_return(type, res) \
+do { \
+	return (type) (res); \
+} while (0)
+
+_syscall2(int, vm86, int, func, struct vm86plus_struct *, v86)
+
+extern char vm86_code_start;
+extern char vm86_code_end;
+
+#define VM86_CODE_CS 0x100
+#define VM86_CODE_IP 0x100
+
+void test_vm86(void)
+{
+    struct vm86plus_struct ctx;
+    struct vm86_regs *r;
+    uint8_t *vm86_mem;
+    int seg, ret;
+
+    vm86_mem = mmap((void *)0x00000000, 0x110000, 
+                    PROT_WRITE | PROT_READ | PROT_EXEC, 
+                    MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+    if (vm86_mem == MAP_FAILED) {
+        printf("ERROR: could not map vm86 memory");
+        return;
+    }
+    memset(&ctx, 0, sizeof(ctx));
+
+    /* init basic registers */
+    r = &ctx.regs;
+    r->eip = VM86_CODE_IP;
+    r->esp = 0xfffe;
+    seg = VM86_CODE_CS;
+    r->cs = seg;
+    r->ss = seg;
+    r->ds = seg;
+    r->es = seg;
+    r->fs = seg;
+    r->gs = seg;
+    r->eflags = VIF_MASK;
+
+    /* move code to proper address. We use the same layout as a .com
+       dos program. */
+    memcpy(vm86_mem + (VM86_CODE_CS << 4) + VM86_CODE_IP, 
+           &vm86_code_start, &vm86_code_end - &vm86_code_start);
+
+    /* mark int 0x21 as being emulated */
+    set_bit((uint8_t *)&ctx.int_revectored, 0x21);
+
+    for(;;) {
+        ret = vm86(VM86_ENTER, &ctx);
+        switch(VM86_TYPE(ret)) {
+        case VM86_INTx:
+            {
+                int int_num, ah, v;
+                
+                int_num = VM86_ARG(ret);
+                if (int_num != 0x21)
+                    goto unknown_int;
+                ah = (r->eax >> 8) & 0xff;
+                switch(ah) {
+                case 0x00: /* exit */
+                    goto the_end;
+                case 0x02: /* write char */
+                    {
+                        uint8_t c = r->edx;
+                        putchar(c);
+                    }
+                    break;
+                case 0x09: /* write string */
+                    {
+                        uint8_t c, *ptr;
+                        ptr = seg_to_linear(r->ds, r->edx);
+                        for(;;) {
+                            c = *ptr++;
+                            if (c == '$')
+                                break;
+                            putchar(c);
+                        }
+                        r->eax = (r->eax & ~0xff) | '$';
+                    }
+                    break;
+                case 0xff: /* extension: write eflags number in edx */
+                    v = (int)r->edx;
+#ifndef LINUX_VM86_IOPL_FIX
+                    v &= ~0x3000;
+#endif
+                    printf("%08x\n", v);
+                    break;
+                default:
+                unknown_int:
+                    printf("unsupported int 0x%02x\n", int_num);
+                    goto the_end;
+                }
+            }
+            break;
+        case VM86_SIGNAL:
+            /* a signal came, we just ignore that */
+            break;
+        case VM86_STI:
+            break;
+        default:
+            printf("ERROR: unhandled vm86 return code (0x%x)\n", ret);
+            goto the_end;
+        }
+    }
+ the_end:
+    printf("VM86 end\n");
+    munmap(vm86_mem, 0x110000);
+}
+#endif
+
+/* exception tests */
+#if 0
+#ifndef REG_EAX
+#define REG_EAX EAX
+#define REG_EBX EBX
+#define REG_ECX ECX
+#define REG_EDX EDX
+#define REG_ESI ESI
+#define REG_EDI EDI
+#define REG_EBP EBP
+#define REG_ESP ESP
+#define REG_EIP EIP
+#define REG_EFL EFL
+#define REG_TRAPNO TRAPNO
+#define REG_ERR ERR
+#endif
+
+jmp_buf jmp_env;
+int v1;
+int tab[2];
+
+void sig_handler(int sig, siginfo_t *info, void *puc)
+{
+    struct ucontext *uc = puc;
+
+    printf("si_signo=%d si_errno=%d si_code=%d",
+           info->si_signo, info->si_errno, info->si_code);
+    printf(" si_addr=0x%08lx",
+           (unsigned long)info->si_addr);
+    printf("\n");
+
+    printf("trapno=0x%02x err=0x%08x",
+           uc->uc_mcontext.gregs[REG_TRAPNO],
+           uc->uc_mcontext.gregs[REG_ERR]);
+    printf(" EIP=0x%08x", uc->uc_mcontext.gregs[REG_EIP]);
+    printf("\n");
+    longjmp(jmp_env, 1);
+}
+
+void test_exceptions(void)
+{
+    struct modify_ldt_ldt_s ldt;
+    struct sigaction act;
+    volatile int val;
+    
+    act.sa_sigaction = sig_handler;
+    sigemptyset(&act.sa_mask);
+    act.sa_flags = SA_SIGINFO;
+    sigaction(SIGFPE, &act, NULL);
+    sigaction(SIGILL, &act, NULL);
+    sigaction(SIGSEGV, &act, NULL);
+    sigaction(SIGBUS, &act, NULL);
+    sigaction(SIGTRAP, &act, NULL);
+
+    /* test division by zero reporting */
+    printf("DIVZ exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* now divide by zero */
+        v1 = 0;
+        v1 = 2 / v1;
+    }
+
+    printf("BOUND exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* bound exception */
+        tab[0] = 1;
+        tab[1] = 10;
+        asm volatile ("bound %0, %1" : : "r" (11), "m" (tab));
+    }
+
+    printf("segment exceptions:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* load an invalid segment */
+        asm volatile ("movl %0, %%fs" : : "r" ((0x1234 << 3) | 1));
+    }
+    if (setjmp(jmp_env) == 0) {
+        /* null data segment is valid */
+        asm volatile ("movl %0, %%fs" : : "r" (3));
+        /* null stack segment */
+        asm volatile ("movl %0, %%ss" : : "r" (3));
+    }
+
+    ldt.entry_number = 1;
+    ldt.base_addr = (unsigned long)&seg_data1;
+    ldt.limit = (sizeof(seg_data1) + 0xfff) >> 12;
+    ldt.seg_32bit = 1;
+    ldt.contents = MODIFY_LDT_CONTENTS_DATA;
+    ldt.read_exec_only = 0;
+    ldt.limit_in_pages = 1;
+    ldt.seg_not_present = 1;
+    ldt.useable = 1;
+    modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
+
+    if (setjmp(jmp_env) == 0) {
+        /* segment not present */
+        asm volatile ("movl %0, %%fs" : : "r" (MK_SEL(1)));
+    }
+
+    /* test SEGV reporting */
+    printf("PF exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        val = 1;
+        /* we add a nop to test a weird PC retrieval case */
+        asm volatile ("nop");
+        /* now store in an invalid address */
+        *(char *)0x1234 = 1;
+    }
+
+    /* test SEGV reporting */
+    printf("PF exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        val = 1;
+        /* read from an invalid address */
+        v1 = *(char *)0x1234;
+    }
+    
+    /* test illegal instruction reporting */
+    printf("UD2 exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* now execute an invalid instruction */
+        asm volatile("ud2");
+    }
+    printf("lock nop exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* now execute an invalid instruction */
+        asm volatile("lock nop");
+    }
+    
+    printf("INT exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0xfd");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0x01");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile (".byte 0xcd, 0x03");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0x04");
+    }
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int $0x05");
+    }
+
+    printf("INT3 exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("int3");
+    }
+
+    printf("CLI exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("cli");
+    }
+
+    printf("STI exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("cli");
+    }
+
+    printf("INTO exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        /* overflow exception */
+        asm volatile ("addl $1, %0 ; into" : : "r" (0x7fffffff));
+    }
+
+    printf("OUTB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("outb %%al, %%dx" : : "d" (0x4321), "a" (0));
+    }
+
+    printf("INB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("inb %%dx, %%al" : "=a" (val) : "d" (0x4321));
+    }
+
+    printf("REP OUTSB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("rep outsb" : : "d" (0x4321), "S" (tab), "c" (1));
+    }
+
+    printf("REP INSB exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("rep insb" : : "d" (0x4321), "D" (tab), "c" (1));
+    }
+
+    printf("HLT exception:\n");
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("hlt");
+    }
+
+    printf("single step exception:\n");
+    val = 0;
+    if (setjmp(jmp_env) == 0) {
+        asm volatile ("pushf\n"
+                      "orl $0x00100, (%%esp)\n"
+                      "popf\n"
+                      "movl $0xabcd, %0\n" 
+                      "movl $0x0, %0\n" : "=m" (val) : : "cc", "memory");
+    }
+    printf("val=0x%x\n", val);
+}
+
+/* specific precise single step test */
+void sig_trap_handler(int sig, siginfo_t *info, void *puc)
+{
+    struct ucontext *uc = puc;
+    printf("EIP=0x%08x\n", uc->uc_mcontext.gregs[REG_EIP]);
+}
+
+const uint8_t sstep_buf1[4] = { 1, 2, 3, 4};
+uint8_t sstep_buf2[4];
+
+void test_single_step(void)
+{
+    struct sigaction act;
+    volatile int val;
+    int i;
+
+    val = 0;
+    act.sa_sigaction = sig_trap_handler;
+    sigemptyset(&act.sa_mask);
+    act.sa_flags = SA_SIGINFO;
+    sigaction(SIGTRAP, &act, NULL);
+    asm volatile ("pushf\n"
+                  "orl $0x00100, (%%esp)\n"
+                  "popf\n"
+                  "movl $0xabcd, %0\n" 
+
+                  /* jmp test */
+                  "movl $3, %%ecx\n"
+                  "1:\n"
+                  "addl $1, %0\n"
+                  "decl %%ecx\n"
+                  "jnz 1b\n"
+
+                  /* movsb: the single step should stop at each movsb iteration */
+                  "movl $sstep_buf1, %%esi\n"
+                  "movl $sstep_buf2, %%edi\n"
+                  "movl $0, %%ecx\n"
+                  "rep movsb\n"
+                  "movl $3, %%ecx\n"
+                  "rep movsb\n"
+                  "movl $1, %%ecx\n"
+                  "rep movsb\n"
+
+                  /* cmpsb: the single step should stop at each cmpsb iteration */
+                  "movl $sstep_buf1, %%esi\n"
+                  "movl $sstep_buf2, %%edi\n"
+                  "movl $0, %%ecx\n"
+                  "rep cmpsb\n"
+                  "movl $4, %%ecx\n"
+                  "rep cmpsb\n"
+                  
+                  /* getpid() syscall: single step should skip one
+                     instruction */
+                  "movl $20, %%eax\n"
+                  "int $0x80\n"
+                  "movl $0, %%eax\n"
+                  
+                  /* when modifying SS, trace is not done on the next
+                     instruction */
+                  "movl %%ss, %%ecx\n"
+                  "movl %%ecx, %%ss\n"
+                  "addl $1, %0\n"
+                  "movl $1, %%eax\n"
+                  "movl %%ecx, %%ss\n"
+                  "jmp 1f\n"
+                  "addl $1, %0\n"
+                  "1:\n"
+                  "movl $1, %%eax\n"
+                  "pushl %%ecx\n"
+                  "popl %%ss\n"
+                  "addl $1, %0\n"
+                  "movl $1, %%eax\n"
+                  
+                  "pushf\n"
+                  "andl $~0x00100, (%%esp)\n"
+                  "popf\n"
+                  : "=m" (val) 
+                  : 
+                  : "cc", "memory", "eax", "ecx", "esi", "edi");
+    printf("val=%d\n", val);
+    for(i = 0; i < 4; i++)
+        printf("sstep_buf2[%d] = %d\n", i, sstep_buf2[i]);
+}
+
+/* self modifying code test */
+uint8_t code[] = {
+    0xb8, 0x1, 0x00, 0x00, 0x00, /* movl $1, %eax */
+    0xc3, /* ret */
+};
+
+asm("smc_code2:\n"
+    "movl 4(%esp), %eax\n"
+    "movl %eax, smc_patch_addr2 + 1\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "nop\n"
+    "smc_patch_addr2:\n"
+    "movl $1, %eax\n"
+    "ret\n");
+
+typedef int FuncType(void);
+extern int smc_code2(int);
+void test_self_modifying_code(void)
+{
+    int i;
+
+    printf("self modifying code:\n");
+    printf("func1 = 0x%x\n", ((FuncType *)code)());
+    for(i = 2; i <= 4; i++) {
+        code[1] = i;
+        printf("func%d = 0x%x\n", i, ((FuncType *)code)());
+    }
+
+    /* more difficult test : the modified code is just after the
+       modifying instruction. It is forbidden in Intel specs, but it
+       is used by old DOS programs */
+    for(i = 2; i <= 4; i++) {
+        printf("smc_code2(%d) = %d\n", i, smc_code2(i));
+    }
+}
+    
+static void *call_end __init_call = NULL;
+#endif
+
+int main(int argc, char **argv)
+{
+    void **ptr;
+    void (*func)(void);
+
+#if 1
+    ptr = &call_start + 1;
+    while (*ptr != NULL) {
+        func = *ptr++;
+        func();
+    }
+#endif
+    test_bsx();
+    test_mul();
+    test_jcc();
+    test_floats();
+    //test_bcd();
+    test_xchg();
+    test_string();
+    test_misc(); // REINSTATE
+    test_lea();
+    //    test_segs();
+    //test_code16();
+    //test_vm86();
+    //test_exceptions();
+    //test_self_modifying_code();
+    //test_single_step();
+    return 0;
+}
diff --git a/VEX/test/test-i386.h b/VEX/test/test-i386.h
new file mode 100644
index 0000000..4ea12a3
--- /dev/null
+++ b/VEX/test/test-i386.h
@@ -0,0 +1,210 @@
+
+#define FULLTXT 1
+
+#define exec_op glue(exec_, OP)
+#define exec_opl glue(glue(exec_, OP), l)
+#define exec_opw glue(glue(exec_, OP), w)
+#define exec_opb glue(glue(exec_, OP), b)
+
+#define EXECOP2(size, res, s1, flags) \
+    asm ("push %4\n\t"\
+         "popf\n\t"\
+         stringify(OP) size " %" size "2, %" size "0\n\t" \
+         "pushf\n\t"\
+         "popl %1\n\t"\
+         : "=q" (res), "=g" (flags)\
+         : "q" (s1), "0" (res), "1" (flags));
+
+#define EXECOP1(size, res, flags) \
+    asm ("push %3\n\t"\
+         "popf\n\t"\
+         stringify(OP) size " %" size "0\n\t" \
+         "pushf\n\t"\
+         "popl %1\n\t"\
+         : "=q" (res), "=g" (flags)\
+         : "0" (res), "1" (flags));
+
+#ifdef OP1
+inline void exec_opl(int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP1("", res, flags);
+    if (FULLTXT)
+       printf("%-6s A=%08x R=%08x CCIN=%04x CC=%04x\n",
+              stringify(OP) "l", s0, res, iflags, flags & CC_MASK);
+    else
+       printf("%08x %04x %04x\n",
+               res, iflags, flags & CC_MASK);
+
+}
+inline void exec_opw(int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP1("w", res, flags);
+    if (FULLTXT)
+       printf("%-6s A=%08x R=%08x CCIN=%04x CC=%04x\n",
+              stringify(OP) "w", s0, res, iflags, flags & CC_MASK);
+    else
+       printf("%08x %04x %04x\n",
+              res, iflags, flags & CC_MASK);
+
+}
+inline void exec_opb(int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP1("b", res, flags);
+    if (FULLTXT)
+       printf("%-6s A=%08x R=%08x CCIN=%04x CC=%04x\n",
+              stringify(OP) "b", s0, res, iflags, flags & CC_MASK);
+    else
+       printf("%08x %04x %04x\n",
+              res, iflags, flags & CC_MASK);
+
+}
+#else
+inline void exec_opl(int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP2("", res, s1, flags);
+    if (FULLTXT)
+       printf("%-6s A=%08x B=%08x R=%08x CCIN=%04x CC=%04x\n",
+              stringify(OP) "l", s0, s1, res, iflags, flags & CC_MASK);
+    else
+       printf("%08x %04x %04x\n",
+              res, iflags, flags & CC_MASK);
+}
+
+inline void exec_opw(int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP2("w", res, s1, flags);
+    if (FULLTXT)
+       printf("%-6s A=%08x B=%08x R=%08x CCIN=%04x CC=%04x\n",
+              stringify(OP) "w", s0, s1, res, iflags, flags & CC_MASK);
+    else
+       printf("%08x %04x %04x\n",
+              res, iflags, flags & CC_MASK);
+}
+
+inline void exec_opb(int s0, int s1, int iflags)
+{
+    int res, flags;
+    res = s0;
+    flags = iflags;
+    EXECOP2("b", res, s1, flags);
+    if (FULLTXT)
+       printf("%-6s A=%08x B=%08x R=%08x CCIN=%04x CC=%04x\n",
+              stringify(OP) "b", s0, s1, res, iflags, flags & CC_MASK);
+    else
+       printf("%08x %04x %04x\n",
+              res, iflags, flags & CC_MASK);
+}
+#endif
+
+void exec_op(int s0, int s1)
+{
+#if 1
+  int o,s,z,a,c,p,flags_in;
+  for (o = 0; o < 2; o++) {
+  for (s = 0; s < 2; s++) {
+  for (z = 0; z < 2; z++) {
+  for (a = 0; a < 2; a++) {
+  for (c = 0; c < 2; c++) {
+  for (p = 0; p < 2; p++) {
+
+    flags_in = (o ? CC_O : 0)
+             | (s ? CC_S : 0)
+             | (z ? CC_Z : 0)
+             | (a ? CC_A : 0)
+             | (c ? CC_C : 0)
+             | (p ? CC_P : 0);
+    exec_opl(s0, s1, flags_in);
+    exec_opw(s0, s1, flags_in);
+    exec_opb(s0, s1, flags_in);
+  }}}}}}
+#else
+    exec_opl(s0, s1, 0);
+    exec_opw(s0, s1, 0);
+    exec_opb(s0, s1, 0);
+    exec_opl(s0, s1, CC_C);
+    exec_opw(s0, s1, CC_C);
+    exec_opb(s0, s1, CC_C);
+#endif
+}
+
+void glue(test_, OP)(void)
+{
+#define NVALS 57
+   int i, j;
+   static unsigned int val[NVALS]
+    = { 0x00, 0x01, 0x02, 0x03, 
+        0x3F, 0x40, 0x41, 
+        0x7E, 0x7F, 0x80, 0x81, 0x82, 
+        0xBF, 0xC0, 0xC1, 
+        0xFC, 0xFD, 0xFE, 0xFF, 
+
+        0xFF00, 0xFF01, 0xFF02, 0xFF03, 
+        0xFF3F, 0xFF40, 0xFF41, 
+        0xFF7E, 0xFF7F, 0xFF80, 0xFF81, 0xFF82, 
+        0xFFBF, 0xFFC0, 0xFFC1, 
+        0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF, 
+
+        0xFFFFFF00, 0xFFFFFF01, 0xFFFFFF02, 0xFFFFFF03, 
+        0xFFFFFF3F, 0xFFFFFF40, 0xFFFFFF41, 
+        0xFFFFFF7E, 0xFFFFFF7F, 0xFFFFFF80, 0xFFFFFF81, 0xFFFFFF82, 
+        0xFFFFFFBF, 0xFFFFFFC0, 0xFFFFFFC1, 
+        0xFFFFFFFC, 0xFFFFFFFD, 0xFFFFFFFE, 0xFFFFFFFF
+      };
+
+    exec_op(0x12345678, 0x812FADA);
+    exec_op(0x12341, 0x12341);
+    exec_op(0x12341, -0x12341);
+    exec_op(0xffffffff, 0);
+    exec_op(0xffffffff, -1);
+    exec_op(0xffffffff, 1);
+    exec_op(0xffffffff, 2);
+    exec_op(0x7fffffff, 0);
+    exec_op(0x7fffffff, 1);
+    exec_op(0x7fffffff, -1);
+    exec_op(0x80000000, -1);
+    exec_op(0x80000000, 1);
+    exec_op(0x80000000, -2);
+    exec_op(0x12347fff, 0);
+    exec_op(0x12347fff, 1);
+    exec_op(0x12347fff, -1);
+    exec_op(0x12348000, -1);
+    exec_op(0x12348000, 1);
+    exec_op(0x12348000, -2);
+    exec_op(0x12347f7f, 0);
+    exec_op(0x12347f7f, 1);
+    exec_op(0x12347f7f, -1);
+    exec_op(0x12348080, -1);
+    exec_op(0x12348080, 1);
+    exec_op(0x12348080, -2);
+
+#if TEST_INTEGER_VERBOSE
+    if (1)
+    for (i = 0; i < NVALS; i++)
+      for (j = 0; j < NVALS; j++)
+	exec_op(val[i], val[j]);
+#endif
+
+#undef NVALS
+}
+
+void *glue(_test_, OP) __init_call = glue(test_, OP);
+
+#undef OP
+#undef OP_CC
+
+#undef FULLTXT
diff --git a/VEX/test/x87fxam.c b/VEX/test/x87fxam.c
new file mode 100644
index 0000000..680b7ff
--- /dev/null
+++ b/VEX/test/x87fxam.c
@@ -0,0 +1,44 @@
+
+#include <stdio.h>
+#include <math.h>
+
+double d;
+int i;
+
+extern void do_fxam ( void );
+
+asm(
+"\n"
+"do_fxam:\n"
+"\txorl %eax,%eax\n"
+"\tfld d\n"
+"\tfxam\n"
+"\tfnstsw %ax\n"
+"\tffree %st(0)\n"
+"\tmovl %eax, i\n"
+"\tret\n"
+);
+
+
+double inf ( void ) { return 1.0 / 0.0; }
+double nAn ( void ) { return 0.0 / 0.0; }
+double den ( void ) { return 9.1e-220 / 1e100; }
+
+/* Try positive and negative variants of: zero, infinity,
+   nAn, and denorm */
+
+int main ( void )
+{
+   d =  0.0;   do_fxam(); printf("0x%4x: %f\n", i, d );
+   d = -0.0;   do_fxam(); printf("0x%4x: %f\n", i, d );
+
+   d =  inf(); do_fxam(); printf("0x%4x: %f\n", i, d );
+   d = -inf(); do_fxam(); printf("0x%4x: %f\n", i, d );
+
+   d =  nAn(); do_fxam(); printf("0x%4x: %f\n", i, d );
+   d = -nAn(); do_fxam(); printf("0x%4x: %f\n", i, d );
+
+   d =  den(); do_fxam(); printf("0x%4x: %f\n", i, d );
+   d = -den(); do_fxam(); printf("0x%4x: %f\n", i, d );
+   return 0;
+}
diff --git a/VEX/test/x87tst.c b/VEX/test/x87tst.c
new file mode 100644
index 0000000..d079477
--- /dev/null
+++ b/VEX/test/x87tst.c
@@ -0,0 +1,27 @@
+
+#include <stdio.h>
+#include <math.h>
+
+double d;
+int i;
+
+extern void do_tst ( void );
+
+asm(
+"\n"
+"do_tst:\n"
+"\txorl %eax,%eax\n"
+"\tfld d\n"
+"\tftst\n"
+"\tfnstsw %ax\n"
+"\tmovl %eax, i\n"
+"\tret\n"
+);
+
+int main ( void )
+{
+   d = -1.23; do_tst(); printf("%f -> 0x%x\n", d, i );
+   d = 0.0;   do_tst(); printf("%f -> 0x%x\n", d, i );
+   d = 9.87;  do_tst(); printf("%f -> 0x%x\n", d, i );
+   return 0;
+}
diff --git a/VEX/unused/arena.h b/VEX/unused/arena.h
new file mode 100644
index 0000000..a74936c
--- /dev/null
+++ b/VEX/unused/arena.h
@@ -0,0 +1,47 @@
+
+/* This is a modified version of the file "arena.h" from 
+   "C Interfaces and Implementations", by David R. Hanson.
+   The license is below.
+*/
+/* 
+
+The author of this software is David R. Hanson.
+
+Copyright (c) 1994,1995,1996,1997 by David R. Hanson. All Rights Reserved.
+
+Permission to use, copy, modify, and distribute this software for any
+purpose, subject to the provisions described below, without fee is
+hereby granted, provided that this entire notice is included in all
+copies of any software that is or includes a copy or modification of
+this software and in all copies of the supporting documentation for
+such software.
+
+THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+WARRANTY. IN PARTICULAR, THE AUTHOR DOES MAKE ANY REPRESENTATION OR
+WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR
+ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+
+David Hanson / drh@microsoft.com / http://www.research.microsoft.com/~drh/
+$Id: CPYRIGHT,v 1.2 1997/11/04 22:31:40 drh Exp $
+*/
+
+/* $Id: H:/drh/idioms/book/RCS/arena.doc,v 1.10 1997/02/21 19:45:19 drh Exp $ */
+
+#ifndef _CII_ARENA_H
+#define _CII_ARENA_H
+
+//#include "except.h"
+#define T Arena_T
+typedef struct T *T;
+//extern const Except_T Arena_NewFailed;
+//extern const Except_T Arena_Failed;
+extern T    Arena_new    (void);
+extern void Arena_dispose(T *ap);
+extern void *Arena_alloc (T arena, long nbytes,
+	const char *file, int line);
+extern void *Arena_calloc(T arena, long count,
+	long nbytes, const char *file, int line);
+extern void  Arena_free  (T arena);
+#undef T
+
+#endif /* ndef _CII_ARENA_H */
diff --git a/VEX/unused/dispatch.c b/VEX/unused/dispatch.c
new file mode 100644
index 0000000..d5fc6f3
--- /dev/null
+++ b/VEX/unused/dispatch.c
@@ -0,0 +1,97 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                        dispatch.c ---*/
+/*---------------------------------------------------------------*/
+
+#include "basictypes.h"
+
+
+/* --------------------------------------------------------- */
+/* TRANSLATION TABLE/CACHE                                   */
+/* --------------------------------------------------------- */
+
+static
+char* find_translation ( char* orig )
+{
+   int i;
+   for (i = 0; i < n_transtab_used; i++)
+      if (transtab[i].orig == orig)
+         return transtab[i].trans;
+   return NULL;
+}
+
+
+#define N_TT_ENTRIES 1000
+
+typedef
+   struct {
+      char* orig;
+      int   orig_size;
+      char* trans;
+      int   trans_size;
+   }
+   TTEntry;
+
+int n_transtab_used = 0;
+TTEntry transtab[N_TT_ENTRIES];
+
+
+/* Call here to add a translation to the trans cache.
+   Supplied translation is in mallocville.  add_translation should
+   copy it out as the caller will free it on return.  */
+
+/* EXPORTED */
+void add_translation ( char* orig, int orig_size, char* trans, int trans_size )
+{
+   int i;
+   assert(n_transtab_used < N_TT_ENTRIES);
+   transtab[n_transtab_used].orig       = orig;
+   transtab[n_transtab_used].orig_size  = orig_size;
+   transtab[n_transtab_used].trans_size = trans_size;
+
+   transtab[n_transtab_used].trans = malloc(trans_size);
+   assert(transtab[n_transtab_used].trans != NULL);
+   for (i = 0; i < trans_size; i++)
+      transtab[n_transtab_used].trans[i] = trans[i];
+
+#ifdef arm_TARGET_ARCH
+   arm_notify_new_code(transtab[n_transtab_used].trans, trans_size);
+#endif
+
+   n_transtab_used++;
+}
+
+/* Run the simulated machine for a while.  Returns when a new BB needs
+   to be translated, and returns its address.  Returns NULL when we
+   want to stop. */
+
+/* EXPORTED */
+char* run_machine ( void )
+{
+   char* nextpc_orig;
+   char* nextpc_trans;
+   while (1) {
+      nextpc_orig = (char*)(regs_arm[REG_PC]);
+      if (nextpc_orig == stop_at)
+         return NULL;
+      nextpc_trans = find_translation(nextpc_orig);
+      if (nextpc_trans == NULL)
+         return nextpc_orig;
+      run_translation(nextpc_trans, (char*) &regs_arm[0] );
+   }
+}
+
+
+/* HOW TO USE:
+ 
+   for a main fn :: void main ( void )
+
+   * load .o's, link, etc
+
+   * call initialise_machine with & main
+
+   * call run_machine repeatedly.  If it returns NULL, stop.  Else
+     make a translation of the returned address, pass it to
+     add_translation, and resume running by calling run_machine.
+
+*/
diff --git a/VEX/unused/linker.c b/VEX/unused/linker.c
new file mode 100644
index 0000000..78c2903
--- /dev/null
+++ b/VEX/unused/linker.c
@@ -0,0 +1,1422 @@
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <elf.h>
+#include <fcntl.h>
+#include <string.h>
+#include <malloc.h>
+
+
+#define IF_DEBUG(x,y) /* */
+static int debug_linker = 0;
+
+#define i386_TARGET_ARCH
+// #define arm_TARGET_ARCH
+
+#if !defined(i386_TARGET_ARCH) && !defined(arm_TARGET_ARCH)
+#  error "Must #define i386_TARGET_ARCH or arm_TARGET_ARCH"
+#endif
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// TYPES
+
+#define FALSE 0
+#define TRUE  1
+
+typedef enum { OBJECT_LOADED, OBJECT_RESOLVED } OStatus;
+
+
+#define N_FIXUP_PAGES 1
+
+
+/* Indication of section kinds for loaded objects.  Needed by
+   the GC for deciding whether or not a pointer on the stack
+   is a code pointer.
+*/
+typedef 
+   enum { SECTIONKIND_CODE_OR_RODATA,
+          SECTIONKIND_RWDATA,
+          SECTIONKIND_OTHER,
+          SECTIONKIND_NOINFOAVAIL } 
+   SectionKind;
+
+typedef 
+   struct _Section { 
+      void* start; 
+      void* end; 
+      SectionKind kind;
+      struct _Section* next;
+   } 
+   Section;
+
+typedef 
+   struct _ProddableBlock {
+      void* start;
+      int   size;
+      struct _ProddableBlock* next;
+   }
+   ProddableBlock;
+
+/* Top-level structure for an object module.  One of these is allocated
+ * for each object file in use.
+ */
+typedef struct _ObjectCode {
+    OStatus    status;
+    char*      fileName;
+    int        fileSize;
+    char*      formatName;            /* eg "ELF32", "DLL", "COFF", etc. */
+
+    /* An array containing ptrs to all the symbol names copied from
+       this object into the global symbol hash table.  This is so that
+       we know which parts of the latter mapping to nuke when this
+       object is removed from the system. */
+    char**     symbols;
+    int        n_symbols;
+
+    /* ptr to malloc'd lump of memory holding the obj file */
+    void*      image;
+
+    /* Fixup area for long-distance jumps. */
+    char*      fixup;
+    int        fixup_used;
+    int        fixup_size;
+
+    /* The section-kind entries for this object module.  Linked
+       list. */
+    Section* sections;
+
+    /* A private hash table for local symbols. */
+    /* HashTable* */ void* lochash;
+    
+    /* Allow a chain of these things */
+    struct _ObjectCode * next;
+
+    /* SANITY CHECK ONLY: a list of the only memory regions which may
+       safely be prodded during relocation.  Any attempt to prod
+       outside one of these is an error in the linker. */
+    ProddableBlock* proddables;
+
+} ObjectCode;
+
+/*
+ * Define a set of types which can be used for both ELF32 and ELF64
+ */
+
+#ifdef ELF_64BIT
+#define ELFCLASS    ELFCLASS64
+#define Elf_Addr    Elf64_Addr
+#define Elf_Word    Elf64_Word
+#define Elf_Sword   Elf64_Sword
+#define Elf_Ehdr    Elf64_Ehdr
+#define Elf_Phdr    Elf64_Phdr
+#define Elf_Shdr    Elf64_Shdr
+#define Elf_Sym     Elf64_Sym
+#define Elf_Rel     Elf64_Rel
+#define Elf_Rela    Elf64_Rela
+#define ELF_ST_TYPE ELF64_ST_TYPE
+#define ELF_ST_BIND ELF64_ST_BIND
+#define ELF_R_TYPE  ELF64_R_TYPE
+#define ELF_R_SYM   ELF64_R_SYM
+#else
+#define ELFCLASS    ELFCLASS32
+#define Elf_Addr    Elf32_Addr
+#define Elf_Word    Elf32_Word
+#define Elf_Sword   Elf32_Sword
+#define Elf_Ehdr    Elf32_Ehdr
+#define Elf_Phdr    Elf32_Phdr
+#define Elf_Shdr    Elf32_Shdr
+#define Elf_Sym     Elf32_Sym
+#define Elf_Rel     Elf32_Rel
+#define Elf_Rela    Elf32_Rela
+#ifndef ELF_ST_TYPE
+#define ELF_ST_TYPE ELF32_ST_TYPE
+#endif
+#ifndef ELF_ST_BIND
+#define ELF_ST_BIND ELF32_ST_BIND
+#endif
+#ifndef ELF_R_TYPE
+#define ELF_R_TYPE  ELF32_R_TYPE
+#endif
+#ifndef ELF_R_SYM
+#define ELF_R_SYM   ELF32_R_SYM
+#endif
+#endif
+
+
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// PARANOIA
+
+/* -----------------------------------------------------------------------
+ * Sanity checking.  For each ObjectCode, maintain a list of address ranges
+ * which may be prodded during relocation, and abort if we try and write
+ * outside any of these.
+ */
+static void addProddableBlock ( ObjectCode* oc, void* start, int size )
+{
+   ProddableBlock* pb
+      = malloc(sizeof(ProddableBlock));
+   if (debug_linker)
+      fprintf(stderr, "aPB oc=%p %p %d   (%p .. %p)\n", oc, start, size,
+	      start, ((char*)start)+size-1 );
+   assert(size > 0);
+   pb->start      = start;
+   pb->size       = size;
+   pb->next       = oc->proddables;
+   oc->proddables = pb;
+}
+
+static void checkProddableBlock ( ObjectCode* oc, void* addr )
+{
+   ProddableBlock* pb;
+   for (pb = oc->proddables; pb != NULL; pb = pb->next) {
+      char* s = (char*)(pb->start);
+      char* e = s + pb->size - 1;
+      char* a = (char*)addr;
+      /* Assumes that the biggest fixup involves a 4-byte write.  This
+         probably needs to be changed to 8 (ie, +7) on 64-bit
+         plats. */
+      if (a >= s && (a+3) <= e) return;
+   }
+   fprintf(stderr,
+           "checkProddableBlock: invalid fixup %p in runtime linker\n",
+           addr);
+   exit(1);
+}
+
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// String->Addr mappings
+
+typedef 
+   struct { char* mp_name; void* mp_addr; } 
+   Maplet;
+
+typedef
+   struct {
+      int sm_size;
+      int sm_used;
+      Maplet* maplets;
+   }
+   StringMap;
+
+static StringMap* new_StringMap ( void )
+{
+   StringMap* sm = malloc(sizeof(StringMap));
+   sm->sm_size = 10;
+   sm->sm_used = 0;
+   sm->maplets = malloc(10 * sizeof(Maplet));
+   return sm;
+}
+
+static void delete_StringMap ( StringMap* sm )
+{
+   assert(sm->maplets != NULL);
+   free(sm->maplets);
+   sm->maplets = NULL;
+   free(sm);
+}
+
+static void ensure_StringMap ( StringMap* sm )
+{
+   int i;
+   Maplet* mp2;
+   assert(sm->maplets != NULL);
+   if (sm->sm_used < sm->sm_size)
+     return;
+   sm->sm_size *= 2;
+   mp2 = malloc(sm->sm_size * sizeof(Maplet));
+   for (i = 0; i < sm->sm_used; i++)
+      mp2[i] = sm->maplets[i];
+   free(sm->maplets);
+   sm->maplets = mp2;
+}
+
+static void* search_StringMap ( StringMap* sm, char* name )
+{
+   int i;
+   for (i = 0; i < sm->sm_used; i++)
+      if (0 == strcmp(name, sm->maplets[i].mp_name))
+         return sm->maplets[i].mp_addr;
+   return NULL;
+}
+
+static void addto_StringMap ( StringMap* sm, char* name, void* addr )
+{
+   ensure_StringMap(sm);
+   sm->maplets[sm->sm_used].mp_name = name;
+   sm->maplets[sm->sm_used].mp_addr = addr;
+   sm->sm_used++;
+}
+
+static void paranoid_addto_StringMap ( StringMap* sm, char* name, void* addr )
+{
+   if (search_StringMap(sm,name) != NULL) {
+      fprintf(stderr, "paranoid_addto_StringMap(%s,%p)\n", name, addr);
+      exit(1);
+   }
+   addto_StringMap(sm,name,addr);
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// Top-level linker control.
+
+StringMap*  global_symbol_table = NULL;
+ObjectCode* global_object_list = NULL;
+
+static void initLinker ( void )
+{
+   if (global_symbol_table != NULL)
+      return;
+   global_symbol_table = new_StringMap();
+}
+
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// SYMBOL TABLE(s)
+
+/* -----------------------------------------------------------------
+ * lookup a symbol in the global symbol table
+ */
+static 
+void * lookupSymbol( char *lbl )
+{
+   void *val;
+   initLinker() ;
+   assert(global_symbol_table != NULL);
+   val = search_StringMap(global_symbol_table, lbl);
+   return val;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// HELPERS
+
+/*
+ * Generic ELF functions
+ */
+
+static char *
+findElfSection ( void* objImage, Elf_Word sh_type )
+{
+   char* ehdrC = (char*)objImage;
+   Elf_Ehdr* ehdr = (Elf_Ehdr*)ehdrC;
+   Elf_Shdr* shdr = (Elf_Shdr*)(ehdrC + ehdr->e_shoff);
+   char* sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+   char* ptr = NULL;
+   int i;
+
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (shdr[i].sh_type == sh_type
+          /* Ignore the section header's string table. */
+          && i != ehdr->e_shstrndx
+	  /* Ignore string tables named .stabstr, as they contain
+             debugging info. */
+          && 0 != memcmp(".stabstr", sh_strtab + shdr[i].sh_name, 8)
+         ) {
+         ptr = ehdrC + shdr[i].sh_offset;
+         break;
+      }
+   }
+   return ptr;
+}
+
+#ifdef arm_TARGET_ARCH
+static
+char* alloc_fixup_bytes ( ObjectCode* oc, int nbytes )
+{
+   char* res;
+   assert(nbytes % 4 == 0);
+   assert(nbytes > 0);
+   res = &(oc->fixup[oc->fixup_used]);
+   oc->fixup_used += nbytes;
+   if (oc->fixup_used >= oc->fixup_size) {
+     fprintf(stderr, "fixup area too small for %s\n", oc->fileName);
+     exit(1);
+   }
+   return res;
+}
+#endif
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// RESOLVE
+
+static
+void* lookup_magic_hacks ( char* sym )
+{
+   if (0==strcmp(sym, "printf")) return (void*)(&printf);
+   return NULL;
+}
+
+#ifdef arm_TARGET_ARCH
+static
+void arm_notify_new_code ( char* start, int length )
+{
+  __asm __volatile ("mov r1, %0\n\t"
+                    "mov r2, %1\n\t"
+                    "mov r3, %2\n\t"
+                    "swi 0x9f0002\n\t"
+                    : 
+                    : "ir" (start), "ir" (length), "ir" (0) );
+}
+
+
+static
+void gen_armle_goto ( char* fixup, char* dstP )
+{
+  Elf_Word w = (Elf_Word)dstP;
+  /* 
+   2                    .text
+   3 0000 04F01FE5              ldr     pc, value
+   4 0004 44332211      value:  .word   0x11223344
+   */
+  fprintf(stderr,"at %p generating jump to %p\n", fixup, dstP );
+  fixup[0] = 0x04; fixup[1] = 0xF0; fixup[2] = 0x1F; fixup[3] = 0xE5;
+  fixup[4] = w & 0xFF; w >>= 8;
+  fixup[5] = w & 0xFF; w >>= 8;
+  fixup[6] = w & 0xFF; w >>= 8;
+  fixup[7] = w & 0xFF; w >>= 8;
+  arm_notify_new_code(fixup, 8);
+}
+#endif /* arm_TARGET_ARCH */
+
+
+
+/* Do ELF relocations which lack an explicit addend.  All x86-linux
+   relocations appear to be of this form. */
+static int
+do_Elf_Rel_relocations ( ObjectCode* oc, char* ehdrC,
+                         Elf_Shdr* shdr, int shnum,
+                         Elf_Sym*  stab, char* strtab )
+{
+   int j;
+   char *symbol = NULL;
+   Elf_Word* targ;
+   Elf_Rel*  rtab = (Elf_Rel*) (ehdrC + shdr[shnum].sh_offset);
+   int         nent = shdr[shnum].sh_size / sizeof(Elf_Rel);
+   int target_shndx = shdr[shnum].sh_info;
+   int symtab_shndx = shdr[shnum].sh_link;
+
+   stab  = (Elf_Sym*) (ehdrC + shdr[ symtab_shndx ].sh_offset);
+   targ  = (Elf_Word*)(ehdrC + shdr[ target_shndx ].sh_offset);
+   IF_DEBUG(linker,belch( "relocations for section %d using symtab %d",
+                          target_shndx, symtab_shndx ));
+
+   for (j = 0; j < nent; j++) {
+      Elf_Addr offset = rtab[j].r_offset;
+      Elf_Addr info   = rtab[j].r_info;
+
+      Elf_Addr  P  = ((Elf_Addr)targ) + offset;
+      Elf_Word* pP = (Elf_Word*)P;
+      Elf_Addr  A  = *pP;
+      Elf_Addr  S;
+      Elf_Addr  value;
+
+      IF_DEBUG(linker,belch( "Rel entry %3d is raw(%6p %6p)",
+                             j, (void*)offset, (void*)info ));
+      if (!info) {
+         IF_DEBUG(linker,belch( " ZERO" ));
+         S = 0;
+      } else {
+         Elf_Sym sym = stab[ELF_R_SYM(info)];
+	 /* First see if it is a local symbol. */
+         if (ELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+            /* Yes, so we can get the address directly from the ELF symbol
+               table. */
+            symbol = sym.st_name==0 ? "(noname)" : strtab+sym.st_name;
+            S = (Elf_Addr)
+                (ehdrC + shdr[ sym.st_shndx ].sh_offset
+                       + stab[ELF_R_SYM(info)].st_value);
+
+	 } else {
+            /* No, so look up the name in our global table. */
+            symbol = strtab + sym.st_name;
+            S = (Elf_Addr)lookupSymbol( symbol );
+	 }
+         if (!S) {
+            S = (Elf_Addr)lookup_magic_hacks(symbol);
+         }
+         if (!S) {
+            fprintf(stderr,"%s: unknown symbol `%s'\n", 
+                           oc->fileName, symbol);
+	    return 0;
+         }
+         if (debug_linker>1) 
+            fprintf(stderr, "\n`%s' resolves to %p\n", symbol, (void*)S );
+      }
+
+      if (debug_linker>1)
+         fprintf(stderr, "Reloc: P = %p   S = %p   A = %p\n",
+			     (void*)P, (void*)S, (void*)A );
+      checkProddableBlock ( oc, pP );
+
+      value = S + A;
+
+      switch (ELF_R_TYPE(info)) {
+#        ifdef i386_TARGET_ARCH
+         case R_386_32:   *pP = value;     break;
+         case R_386_PC32: *pP = value - P; break;
+#        endif
+#        ifdef arm_TARGET_ARCH
+         case R_ARM_PC24: {
+	    Elf_Word w, delta, deltaTop8;
+	    /* Generate a jump sequence into the fixup area
+	       and branch to that instead. */
+ 	    char* fixup = alloc_fixup_bytes(oc, 8);
+            /* First of all, figure out where we're really trying to
+               jump to. */
+            // compensate for pc+8 bias
+            Elf_Word real_dst = (A & 0x00FFFFFF) + 2;
+	    // sign-extend 24-to-32 of real_dst
+            if (real_dst & 0x00800000) 
+               real_dst |= 0xFF000000;
+            else
+               real_dst &= 0x00FFFFFF;
+
+            real_dst <<= 2;
+	    real_dst += S;
+
+	    gen_armle_goto(fixup, (char*)real_dst);
+
+	    /* Delta is in bytes .. */
+            delta = (((Elf_Word)fixup) - ((Elf_Word)pP) - 8);
+            deltaTop8 = (delta >> 24) & 0xFF;
+            if (deltaTop8 != 0 && deltaTop8 != 0xFF) {
+	      fprintf(stderr,"R_ARM_PC24: out of range delta 0x%x for %s\n",
+		      delta, symbol);
+	      exit(1);
+	    }
+            delta >>= 2;
+	    w = *pP;
+            w &= 0xFF000000;
+            w |= (0x00FFFFFF & delta );
+            *pP = w;
+	    break;
+         }
+         case R_ARM_ABS32:
+	    *pP = value;
+	    break;
+#        endif
+         default:
+            fprintf(stderr,
+                    "%s: unhandled ELF relocation(Rel) type %d\n\n",
+		    oc->fileName, ELF_R_TYPE(info));
+            return 0;
+      }
+
+   }
+   return 1;
+}
+
+/* Do ELF relocations for which explicit addends are supplied.
+   sparc-solaris relocations appear to be of this form. */
+static int
+do_Elf_Rela_relocations ( ObjectCode* oc, char* ehdrC,
+                          Elf_Shdr* shdr, int shnum,
+                          Elf_Sym*  stab, char* strtab )
+{
+   int j;
+   char *symbol;
+   Elf_Addr targ;
+   Elf_Rela* rtab = (Elf_Rela*) (ehdrC + shdr[shnum].sh_offset);
+   int         nent = shdr[shnum].sh_size / sizeof(Elf_Rela);
+   int target_shndx = shdr[shnum].sh_info;
+   int symtab_shndx = shdr[shnum].sh_link;
+
+   stab  = (Elf_Sym*) (ehdrC + shdr[ symtab_shndx ].sh_offset);
+   targ  = (Elf_Addr) (ehdrC + shdr[ target_shndx ].sh_offset);
+   IF_DEBUG(linker,belch( "relocations for section %d using symtab %d",
+                          target_shndx, symtab_shndx ));
+
+   for (j = 0; j < nent; j++) {
+#if defined(DEBUG) || defined(sparc_TARGET_ARCH) || defined(ia64_TARGET_ARCH)
+      /* This #ifdef only serves to avoid unused-var warnings. */
+      Elf_Addr  offset = rtab[j].r_offset;
+      Elf_Addr  P      = targ + offset;
+#endif
+      Elf_Addr  info   = rtab[j].r_info;
+      Elf_Addr  A      = rtab[j].r_addend;
+      Elf_Addr  S;
+      Elf_Addr  value;
+#     if defined(sparc_TARGET_ARCH)
+      Elf_Word* pP = (Elf_Word*)P;
+      Elf_Word  w1, w2;
+#     elif defined(ia64_TARGET_ARCH)
+      Elf64_Xword *pP = (Elf64_Xword *)P;
+      Elf_Addr addr;
+#     endif
+
+      IF_DEBUG(linker,belch( "Rel entry %3d is raw(%6p %6p %6p)   ",
+                             j, (void*)offset, (void*)info,
+                                (void*)A ));
+      if (!info) {
+         IF_DEBUG(linker,belch( " ZERO" ));
+         S = 0;
+      } else {
+         Elf_Sym sym = stab[ELF_R_SYM(info)];
+	 /* First see if it is a local symbol. */
+         if (ELF_ST_BIND(sym.st_info) == STB_LOCAL) {
+            /* Yes, so we can get the address directly from the ELF symbol
+               table. */
+            symbol = sym.st_name==0 ? "(noname)" : strtab+sym.st_name;
+            S = (Elf_Addr)
+                (ehdrC + shdr[ sym.st_shndx ].sh_offset
+                       + stab[ELF_R_SYM(info)].st_value);
+#ifdef ELF_FUNCTION_DESC
+	    /* Make a function descriptor for this function */
+            if (S && ELF_ST_TYPE(sym.st_info) == STT_FUNC) {
+               S = allocateFunctionDesc(S + A);
+       	       A = 0;
+            }
+#endif
+	 } else {
+            /* No, so look up the name in our global table. */
+            symbol = strtab + sym.st_name;
+            S = (Elf_Addr)lookupSymbol( symbol );
+
+#ifdef ELF_FUNCTION_DESC
+	    /* If a function, already a function descriptor - we would
+	       have to copy it to add an offset. */
+            if (S && (ELF_ST_TYPE(sym.st_info) == STT_FUNC) && (A != 0))
+               belch("%s: function %s with addend %p", oc->fileName, symbol, (void *)A);
+#endif
+	 }
+         if (!S) {
+	   fprintf(stderr,"%s: unknown symbol `%s'\n", oc->fileName, symbol);
+	   return 0;
+         }
+         IF_DEBUG(linker,belch( "`%s' resolves to %p\n", symbol, (void*)S ));
+      }
+
+      IF_DEBUG(linker,fprintf ( stderr, "Reloc: P = %p   S = %p   A = %p\n",
+                                        (void*)P, (void*)S, (void*)A ));
+      /* checkProddableBlock ( oc, (void*)P ); */
+
+      value = S + A;
+
+      switch (ELF_R_TYPE(info)) {
+#        if defined(sparc_TARGET_ARCH)
+         case R_SPARC_WDISP30:
+            w1 = *pP & 0xC0000000;
+            w2 = (Elf_Word)((value - P) >> 2);
+            ASSERT((w2 & 0xC0000000) == 0);
+            w1 |= w2;
+            *pP = w1;
+            break;
+         case R_SPARC_HI22:
+            w1 = *pP & 0xFFC00000;
+            w2 = (Elf_Word)(value >> 10);
+            ASSERT((w2 & 0xFFC00000) == 0);
+            w1 |= w2;
+            *pP = w1;
+            break;
+         case R_SPARC_LO10:
+            w1 = *pP & ~0x3FF;
+            w2 = (Elf_Word)(value & 0x3FF);
+            ASSERT((w2 & ~0x3FF) == 0);
+            w1 |= w2;
+            *pP = w1;
+            break;
+         /* According to the Sun documentation:
+            R_SPARC_UA32
+            This relocation type resembles R_SPARC_32, except it refers to an
+            unaligned word. That is, the word to be relocated must be treated
+            as four separate bytes with arbitrary alignment, not as a word
+            aligned according to the architecture requirements.
+
+            (JRS: which means that freeloading on the R_SPARC_32 case
+            is probably wrong, but hey ...)
+         */
+         case R_SPARC_UA32:
+         case R_SPARC_32:
+            w2 = (Elf_Word)value;
+            *pP = w2;
+            break;
+#        elif defined(ia64_TARGET_ARCH)
+	 case R_IA64_DIR64LSB:
+	 case R_IA64_FPTR64LSB:
+	    *pP = value;
+	    break;
+	 case R_IA64_PCREL64LSB:
+	    *pP = value - P;
+	    break;
+	 case R_IA64_SEGREL64LSB:
+	    addr = findElfSegment(ehdrC, value);
+	    *pP = value - addr;
+	    break;
+	 case R_IA64_GPREL22:
+	    ia64_reloc_gprel22(P, value);
+	    break;
+	 case R_IA64_LTOFF22:
+	 case R_IA64_LTOFF22X:
+	 case R_IA64_LTOFF_FPTR22:
+	    addr = allocateGOTEntry(value);
+	    ia64_reloc_gprel22(P, addr);
+	    break;
+	 case R_IA64_PCREL21B:
+	    ia64_reloc_pcrel21(P, S, oc);
+	    break;
+	 case R_IA64_LDXMOV:
+	    /* This goes with R_IA64_LTOFF22X and points to the load to
+	     * convert into a move.  We don't implement relaxation. */
+	    break;
+#        endif
+         default:
+            fprintf(stderr,
+                    "%s: unhandled ELF relocation(RelA) type %d\n",
+		    oc->fileName, ELF_R_TYPE(info));
+            return 0;
+      }
+
+   }
+   return 1;
+}
+
+
+static int
+ocResolve_ELF ( ObjectCode* oc )
+{
+   char *strtab;
+   int   shnum, ok;
+   Elf_Sym*  stab  = NULL;
+   char*     ehdrC = (char*)(oc->image);
+   Elf_Ehdr* ehdr  = (Elf_Ehdr*) ehdrC;
+   Elf_Shdr* shdr  = (Elf_Shdr*) (ehdrC + ehdr->e_shoff);
+   char* sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+
+   /* first find "the" symbol table */
+   stab = (Elf_Sym*) findElfSection ( ehdrC, SHT_SYMTAB );
+
+   /* also go find the string table */
+   strtab = findElfSection ( ehdrC, SHT_STRTAB );
+
+   if (stab == NULL || strtab == NULL) {
+      fprintf(stderr,"%s: can't find string or symbol table\n", oc->fileName);
+      return 0;
+   }
+
+   /* Process the relocation sections. */
+   for (shnum = 0; shnum < ehdr->e_shnum; shnum++) {
+
+      /* Skip sections called ".rel.stab".  These appear to contain
+         relocation entries that, when done, make the stabs debugging
+         info point at the right places.  We ain't interested in all
+         dat jazz, mun. */
+      if (0 == memcmp(".rel.stab", sh_strtab + shdr[shnum].sh_name, 9))
+         continue;
+
+      if (shdr[shnum].sh_type == SHT_REL ) {
+         ok = do_Elf_Rel_relocations ( oc, ehdrC, shdr,
+                                       shnum, stab, strtab );
+         if (!ok) return ok;
+      }
+      else
+      if (shdr[shnum].sh_type == SHT_RELA) {
+         ok = do_Elf_Rela_relocations ( oc, ehdrC, shdr,
+                                        shnum, stab, strtab );
+         if (!ok) return ok;
+      }
+   }
+
+   /* Free the local symbol table; we won't need it again. */
+   delete_StringMap(oc->lochash);
+   oc->lochash = NULL;
+
+   return 1;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// VERIFY
+
+static int
+ocVerifyImage_ELF ( ObjectCode* oc )
+{
+   Elf_Shdr* shdr;
+   Elf_Sym*  stab;
+   int i, j, nent, nstrtab, nsymtabs;
+   char* sh_strtab;
+   char* strtab;
+
+   char*     ehdrC = (char*)(oc->image);
+   Elf_Ehdr* ehdr  = (Elf_Ehdr*)ehdrC;
+
+   if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
+       ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
+       ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
+       ehdr->e_ident[EI_MAG3] != ELFMAG3) {
+      fprintf(stderr,"%s: not an ELF object\n", oc->fileName);
+      return 0;
+   }
+
+   if (ehdr->e_ident[EI_CLASS] != ELFCLASS) {
+      fprintf(stderr,"%s: unsupported ELF format\n", oc->fileName);
+      return 0;
+   }
+
+   if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) {
+      if (debug_linker)
+         fprintf(stderr, "Is little-endian\n" );
+   } else
+   if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) {
+       if (debug_linker)
+          fprintf(stderr, "Is big-endian\n" );
+   } else {
+       fprintf(stderr,"%s: unknown endiannness\n", oc->fileName);
+       return 0;
+   }
+
+   if (ehdr->e_type != ET_REL) {
+      fprintf(stderr,"%s: not a relocatable object (.o) file\n", oc->fileName);
+      return 0;
+   }
+   if (debug_linker)
+      fprintf(stderr, "Is a relocatable object (.o) file\n" );
+
+   if (debug_linker)
+      fprintf(stderr, "Architecture is " );
+   switch (ehdr->e_machine) {
+      case EM_386:   if (debug_linker) fprintf(stderr, "x86\n" ); break;
+      case EM_SPARC: if (debug_linker) fprintf(stderr, "sparc\n" ); break;
+      case EM_ARM:   if (debug_linker) fprintf(stderr, "arm\n" ); break;
+#ifdef EM_IA_64
+      case EM_IA_64: if (debug_linker) fprintf(stderr, "ia64\n" ); break;
+#endif
+      default:       if (debug_linker) fprintf(stderr, "unknown\n" );
+                     fprintf(stderr,"%s: unknown architecture\n", oc->fileName);
+                     return 0;
+   }
+
+   if (debug_linker>1) fprintf(stderr,
+             "\nSection header table: start %d, n_entries %d, ent_size %d\n",
+             ehdr->e_shoff, ehdr->e_shnum, ehdr->e_shentsize  );
+
+   assert (ehdr->e_shentsize == sizeof(Elf_Shdr));
+
+   shdr = (Elf_Shdr*) (ehdrC + ehdr->e_shoff);
+
+   if (ehdr->e_shstrndx == SHN_UNDEF) {
+      fprintf(stderr,"%s: no section header string table\n", oc->fileName);
+      return 0;
+   } else {
+      if (debug_linker>1) 
+         fprintf(stderr, "Section header string table is section %d\n",
+                          ehdr->e_shstrndx);
+      sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+   }
+
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (debug_linker>1) fprintf(stderr, "%2d:  ", i );
+      if (debug_linker>1) fprintf(stderr, "type=%2d  ", (int)shdr[i].sh_type );
+      if (debug_linker>1) fprintf(stderr, "size=%4d  ", (int)shdr[i].sh_size );
+      if (debug_linker>1) fprintf(stderr, "offs=%4d  ", (int)shdr[i].sh_offset );
+      if (debug_linker>1) fprintf(stderr, "  (%p .. %p)  ",
+               ehdrC + shdr[i].sh_offset,
+		      ehdrC + shdr[i].sh_offset + shdr[i].sh_size - 1);
+
+      if (shdr[i].sh_type == SHT_REL) {
+	  if (debug_linker>1) fprintf(stderr, "Rel  " );
+      } else if (shdr[i].sh_type == SHT_RELA) {
+	  if (debug_linker>1) fprintf(stderr, "RelA " );
+      } else {
+	  if (debug_linker>1) fprintf(stderr,"     ");
+      }
+      if (sh_strtab) {
+	  if (debug_linker>1) fprintf(stderr, "sname=%s\n", 
+             sh_strtab + shdr[i].sh_name );
+      }
+   }
+
+   if (debug_linker>1) fprintf(stderr, "\nString tables\n" );
+   strtab = NULL;
+   nstrtab = 0;
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (shdr[i].sh_type == SHT_STRTAB
+          /* Ignore the section header's string table. */
+          && i != ehdr->e_shstrndx
+	  /* Ignore string tables named .stabstr, as they contain
+             debugging info. */
+          && 0 != memcmp(".stabstr", sh_strtab + shdr[i].sh_name, 8)
+         ) {
+         if (debug_linker>1) 
+            fprintf(stderr,"   section %d is a normal string table\n", i );
+         strtab = ehdrC + shdr[i].sh_offset;
+         nstrtab++;
+      }
+   }
+   if (nstrtab != 1) {
+      fprintf(stderr,"%s: no string tables, or too many\n", oc->fileName);
+      return 0;
+   }
+
+   nsymtabs = 0;
+   if (debug_linker>1) fprintf(stderr, "\nSymbol tables\n" );
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      if (shdr[i].sh_type != SHT_SYMTAB) continue;
+      if (debug_linker>1) fprintf(stderr, "section %d is a symbol table\n", i );
+      nsymtabs++;
+      stab = (Elf_Sym*) (ehdrC + shdr[i].sh_offset);
+      nent = shdr[i].sh_size / sizeof(Elf_Sym);
+      if (debug_linker>1) fprintf(stderr,  
+            "   number of entries is apparently %d (%d rem)\n",
+               nent,
+               shdr[i].sh_size % sizeof(Elf_Sym)
+             );
+      if (0 != shdr[i].sh_size % sizeof(Elf_Sym)) {
+         fprintf(stderr,"%s: non-integral number of symbol table entries\n", 
+                        oc->fileName);
+         return 0;
+      }
+      for (j = 0; j < nent; j++) {
+         if (debug_linker>1) fprintf(stderr, "   %2d  ", j );
+         if (debug_linker>1) fprintf(stderr, "  sec=%-5d  size=%-3d  val=%5p  ",
+                             (int)stab[j].st_shndx,
+                             (int)stab[j].st_size,
+                             (char*)stab[j].st_value );
+
+         if (debug_linker>1) fprintf(stderr, "type=" );
+         switch (ELF_ST_TYPE(stab[j].st_info)) {
+            case STT_NOTYPE:  if (debug_linker>1) fprintf(stderr, "notype " ); break;
+            case STT_OBJECT:  if (debug_linker>1) fprintf(stderr, "object " ); break;
+            case STT_FUNC  :  if (debug_linker>1) fprintf(stderr, "func   " ); break;
+            case STT_SECTION: if (debug_linker>1) fprintf(stderr, "section" ); break;
+            case STT_FILE:    if (debug_linker>1) fprintf(stderr, "file   " ); break;
+            default:          if (debug_linker>1) fprintf(stderr, "?      " ); break;
+         }
+         if (debug_linker>1) fprintf(stderr, "  " );
+
+         if (debug_linker>1) fprintf(stderr, "bind=" );
+         switch (ELF_ST_BIND(stab[j].st_info)) {
+            case STB_LOCAL :  if (debug_linker>1) fprintf(stderr, "local " ); break;
+            case STB_GLOBAL:  if (debug_linker>1) fprintf(stderr, "global" ); break;
+            case STB_WEAK  :  if (debug_linker>1) fprintf(stderr, "weak  " ); break;
+            default:          if (debug_linker>1) fprintf(stderr, "?     " ); break;
+         }
+         if (debug_linker>1) fprintf(stderr, "  " );
+
+         if (debug_linker>1) fprintf(stderr, "name=%s\n", strtab + stab[j].st_name );
+      }
+   }
+
+   if (nsymtabs == 0) {
+      fprintf(stderr,"%s: didn't find any symbol tables\n", oc->fileName);
+      return 0;
+   }
+
+   return 1;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// GETNAMES
+
+static int
+ocGetNames_ELF ( ObjectCode* oc )
+{
+   int i, j, k, nent;
+   Elf_Sym* stab;
+
+   char*     ehdrC     = (char*)(oc->image);
+   Elf_Ehdr* ehdr      = (Elf_Ehdr*)ehdrC;
+   char*     strtab    = findElfSection ( ehdrC, SHT_STRTAB );
+   Elf_Shdr* shdr      = (Elf_Shdr*) (ehdrC + ehdr->e_shoff);
+
+   char*     sh_strtab = ehdrC + shdr[ehdr->e_shstrndx].sh_offset;
+   char*     sec_name;
+
+   assert(global_symbol_table != NULL);
+
+   if (!strtab) {
+      fprintf(stderr,"%s: no strtab\n", oc->fileName);
+      return 0;
+   }
+
+   k = 0;
+   for (i = 0; i < ehdr->e_shnum; i++) {
+      /* Figure out what kind of section it is.  Logic derived from
+         Figure 1.14 ("Special Sections") of the ELF document
+         ("Portable Formats Specification, Version 1.1"). */
+      Elf_Shdr    hdr    = shdr[i];
+      SectionKind kind   = SECTIONKIND_OTHER;
+      int         is_bss = FALSE;
+
+      if (hdr.sh_type == SHT_PROGBITS
+          && (hdr.sh_flags & SHF_ALLOC) && (hdr.sh_flags & SHF_EXECINSTR)) {
+         /* .text-style section */
+         kind = SECTIONKIND_CODE_OR_RODATA;
+      }
+      else
+      if (hdr.sh_type == SHT_PROGBITS
+          && (hdr.sh_flags & SHF_ALLOC) && (hdr.sh_flags & SHF_WRITE)) {
+         /* .data-style section */
+         kind = SECTIONKIND_RWDATA;
+      }
+      else
+      if (hdr.sh_type == SHT_PROGBITS
+          && (hdr.sh_flags & SHF_ALLOC) && !(hdr.sh_flags & SHF_WRITE)) {
+         /* .rodata-style section */
+         kind = SECTIONKIND_CODE_OR_RODATA;
+      }
+      else
+      if (hdr.sh_type == SHT_NOBITS
+          && (hdr.sh_flags & SHF_ALLOC) && (hdr.sh_flags & SHF_WRITE)) {
+         /* .bss-style section */
+         kind = SECTIONKIND_RWDATA;
+         is_bss = TRUE;
+      }
+
+      if (is_bss && shdr[i].sh_size > 0) {
+         /* This is a non-empty .bss section.  Allocate zeroed space for
+            it, and set its .sh_offset field such that
+            ehdrC + .sh_offset == addr_of_zeroed_space.  */
+         char* zspace = calloc(1, shdr[i].sh_size);
+         shdr[i].sh_offset = ((char*)zspace) - ((char*)ehdrC);
+	 /*
+         fprintf(stderr, "BSS section at 0x%x, size %d\n",
+                         zspace, shdr[i].sh_size);
+	 */
+      }
+
+      /* When loading objects compiled with -g, it seems there are
+	 relocations in various debug-info sections.  So we'd better
+	 tell addProddableBlock to allow those bits to be prodded. */
+      //fprintf(stderr, "ZZZZZZZZZZ %s\n", sh_strtab + hdr.sh_name);
+      sec_name = sh_strtab + shdr[i].sh_name;
+      if (kind == SECTIONKIND_OTHER
+          && (0 == strcmp(".debug_info", sec_name)
+              || 0 == strcmp(".debug_line", sec_name)
+              || 0 == strcmp(".debug_pubnames", sec_name)
+              || 0 == strcmp(".debug_aranges", sec_name)
+              || 0 == strcmp(".debug_frame", sec_name))) {
+         kind = SECTIONKIND_CODE_OR_RODATA;
+      }
+
+      /* fill in the section info */
+      if (kind != SECTIONKIND_OTHER && shdr[i].sh_size > 0) {
+         addProddableBlock(oc, ehdrC + shdr[i].sh_offset, shdr[i].sh_size);
+         //addSection(oc, kind, ehdrC + shdr[i].sh_offset,
+         //               ehdrC + shdr[i].sh_offset + shdr[i].sh_size - 1);
+      }
+
+      if (shdr[i].sh_type != SHT_SYMTAB) continue;
+
+      /* copy stuff into this module's object symbol table */
+      stab = (Elf_Sym*) (ehdrC + shdr[i].sh_offset);
+      nent = shdr[i].sh_size / sizeof(Elf_Sym);
+
+      oc->n_symbols = nent;
+      oc->symbols = malloc(oc->n_symbols * sizeof(char*));
+
+      for (j = 0; j < nent; j++) {
+
+         char  isLocal = FALSE; /* avoids uninit-var warning */
+         char* ad      = NULL;
+         char* nm      = strtab + stab[j].st_name;
+         int   secno   = stab[j].st_shndx;
+
+	 /* Figure out if we want to add it; if so, set ad to its
+            address.  Otherwise leave ad == NULL. */
+
+         if (secno == SHN_COMMON) {
+            isLocal = FALSE;
+            ad = calloc(1, stab[j].st_size);
+	    /*
+            fprintf(stderr, "COMMON symbol, size %d name %s\n",
+                            stab[j].st_size, nm);
+	    */
+	    /* Pointless to do addProddableBlock() for this area,
+               since the linker should never poke around in it. */
+	 }
+         else
+         if ( ( ELF_ST_BIND(stab[j].st_info)==STB_GLOBAL
+                || ELF_ST_BIND(stab[j].st_info)==STB_LOCAL
+              )
+              /* and not an undefined symbol */
+              && stab[j].st_shndx != SHN_UNDEF
+	      /* and not in a "special section" */
+              && stab[j].st_shndx < SHN_LORESERVE
+              &&
+	      /* and it's a not a section or string table or anything silly */
+              ( ELF_ST_TYPE(stab[j].st_info)==STT_FUNC ||
+                ELF_ST_TYPE(stab[j].st_info)==STT_OBJECT ||
+                ELF_ST_TYPE(stab[j].st_info)==STT_NOTYPE
+              )
+            ) {
+	    /* Section 0 is the undefined section, hence > and not >=. */
+            assert(secno > 0 && secno < ehdr->e_shnum);
+	    /*
+            if (shdr[secno].sh_type == SHT_NOBITS) {
+               fprintf(stderr, "   BSS symbol, size %d off %d name %s\n",
+                               stab[j].st_size, stab[j].st_value, nm);
+            }
+            */
+            ad = ehdrC + shdr[ secno ].sh_offset + stab[j].st_value;
+            if (ELF_ST_BIND(stab[j].st_info)==STB_LOCAL) {
+               isLocal = TRUE;
+            } else {
+#ifdef ELF_FUNCTION_DESC
+               /* dlsym() and the initialisation table both give us function
+		* descriptors, so to be consistent we store function descriptors
+		* in the symbol table */
+               if (ELF_ST_TYPE(stab[j].st_info) == STT_FUNC)
+                   ad = (char *)allocateFunctionDesc((Elf_Addr)ad);
+#endif
+               if (debug_linker) 
+                   fprintf(stderr, "addOTabName(GLOB): %10p  %s %s\n",
+                                      ad, oc->fileName, nm );
+               isLocal = FALSE;
+            }
+         }
+
+         /* And the decision is ... */
+
+         if (ad != NULL) {
+            assert(nm != NULL);
+	    oc->symbols[j] = nm;
+            /* Acquire! */
+            if (isLocal) {
+               /* Ignore entirely. */
+            } else {
+	      //ghciInsertStrHashTable(oc->fileName, global_symbol_table, nm, ad);
+	      paranoid_addto_StringMap(global_symbol_table, nm, ad);
+            }
+         } else {
+            /* Skip. */
+            if (debug_linker>1) fprintf(stderr, "skipping `%s'\n",
+                                   strtab + stab[j].st_name );
+            /*
+            fprintf(stderr,
+                    "skipping   bind = %d,  type = %d,  shndx = %d   `%s'\n",
+                    (int)ELF_ST_BIND(stab[j].st_info),
+                    (int)ELF_ST_TYPE(stab[j].st_info),
+                    (int)stab[j].st_shndx,
+                    strtab + stab[j].st_name
+                   );
+            */
+            oc->symbols[j] = NULL;
+         }
+
+      }
+   }
+
+   return 1;
+}
+
+
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////
+//
+// TOP-LEVEL CONTROL OF THE LINKER
+
+
+/* ---------------------------------------------------------------------
+ * Load an obj (populate the global symbol table, but don't resolve yet)
+ *
+ * Returns: 1 if ok, 0 on error.
+ */
+static
+int loadObj( char *path )
+{
+   ObjectCode* oc;
+   struct stat st;
+   int r;
+   int fd, pagesize;
+   char* p;
+
+   initLinker();
+
+   fprintf(stderr, "==== loadObj %s ====\n", path );
+
+   /* Check that we haven't already loaded this object.  */
+   {
+       ObjectCode *o;
+       int is_dup = 0;
+       for (o = global_object_list; o; o = o->next) {
+          if (0 == strcmp(o->fileName, path))
+             is_dup = 1;
+       }
+       if (is_dup) {
+	 fprintf(stderr,
+            "\n\n"
+            "GHCi runtime linker: warning: looks like you're trying to load the\n"
+            "same object file twice:\n"
+            "   %s\n"
+            , path);
+	 exit(1);
+       }
+   }
+
+   oc = malloc(sizeof(ObjectCode));
+
+   oc->formatName = "ELF";
+
+   r = stat(path, &st);
+   if (r == -1) { return 0; }
+
+   /* sigh, strdup() isn't a POSIX function, so do it the long way */
+   oc->fileName = malloc( strlen(path)+1 );
+   strcpy(oc->fileName, path);
+
+   oc->fileSize          = st.st_size;
+   oc->symbols           = NULL;
+   oc->sections          = NULL;
+   oc->lochash           = new_StringMap();
+   oc->proddables        = NULL;
+   oc->fixup             = NULL;
+   oc->fixup_used        = 0;
+   oc->fixup_size        = 0;
+
+   /* chain it onto the list of objects */
+   oc->next              = global_object_list;
+   global_object_list    = oc;
+
+   fd = open(path, O_RDONLY);
+   if (fd == -1) {
+      fprintf(stderr,"loadObj: can't open `%s'\n", path);
+      exit(1);
+   }
+
+   /* Allocate a 1-page area just prior to the image, so we can put
+      fixup code fragments there.  Used for doing R_ARM_PC24
+      relocations for jump distances > 64M. */
+
+   pagesize = getpagesize();
+   p = memalign(pagesize, N_FIXUP_PAGES * pagesize
+                          + oc->fileSize);
+
+   if (p == NULL) {
+      fprintf(stderr,"loadObj: failed to allocate space for `%s'\n", path);
+      exit(1);
+   }
+
+   oc->fixup = p;
+   oc->fixup_size = N_FIXUP_PAGES * pagesize;
+   oc->fixup_used = 0;
+   oc->image = &(p[ oc->fixup_size ]);
+
+   r = read(fd, oc->image, oc->fileSize);
+   if (r != oc->fileSize) {
+      fprintf(stderr,"loadObj: failed to read `%s'\n", path);
+      exit(1);
+   }
+
+   fprintf(stderr, "loaded %s at %p (fixup = %p)\n", 
+                   oc->fileName, oc->image, oc->fixup );
+
+   close(fd);
+
+   /* verify the in-memory image */
+   r = ocVerifyImage_ELF ( oc );
+   if (!r) { return r; }
+
+   /* build the symbol list for this image */
+   r = ocGetNames_ELF ( oc );
+   if (!r) { return r; }
+
+   /* loaded, but not resolved yet */
+   oc->status = OBJECT_LOADED;
+
+   return 1;
+}
+
+
+
+/* ---------------------------------------------------------------------------
+ * resolve all the currently unlinked objects in memory
+ *
+ * Returns: 1 if ok, 0 on error.
+ */
+static
+int resolveObjs( void )
+{
+    ObjectCode *oc;
+    int r;
+
+    initLinker();
+
+    for (oc = global_object_list; oc; oc = oc->next) {
+	if (oc->status != OBJECT_RESOLVED) {
+	    r = ocResolve_ELF ( oc );
+	    if (!r) { return r; }
+	    oc->status = OBJECT_RESOLVED;
+	}
+    }
+    return 1;
+}
+
+
+/* ---------------------------------------------------------------------------
+ * Top-level linker.
+ */
+
+/* Load and link a bunch of .o's, and return the address of
+   'main'.  Or NULL if something borks.
+*/
+void* linker_top_level_LINK ( int n_object_names, char** object_names )
+{
+   int   r, i;
+   void* mainp;
+
+   initLinker();
+   for (i = 0; i < n_object_names; i++) {
+      //fprintf(stderr, "linkloop %d %s\n", i, object_names[i] );
+      r = loadObj( object_names[i] );
+      if (r != 1) return NULL;
+   }
+   r = resolveObjs();
+   if (r != 1) return NULL;
+   mainp = search_StringMap ( global_symbol_table, "main" );
+   if (mainp == NULL) return NULL;
+   printf("Linker: success!\n");
+   return mainp;
+}
+
+
+#if 1
+int main ( int argc, char** argv )
+{
+   void* mainp;
+   linker_top_level_LINK( argc - 1 , &argv[1]);
+   /* find and run "main" */
+
+   mainp = search_StringMap ( global_symbol_table, "main" );
+   if (mainp == NULL) {
+     fprintf(stderr, "no binding for main\n");
+     exit(1);
+   }
+
+   printf("\nSTARTING PROGRAM\n");
+   ( (int(*)(int,char**)) mainp ) (argc,argv);
+   printf("FINISHED\n");
+
+   return 0;
+}
+#endif
+
+////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////
+//
+// VIRTUAL MACHINE ...
+
+/* --------------------------------------------------------- */
+/* SIMULATED STATE                                           */
+/* --------------------------------------------------------- */
+
+typedef unsigned int Word;
+
+/* Stack for the simulation */
+Word* sim_stack;
+
+/* Stop when we get a jump to here. */
+char* stop_at;
+
+
+/* ARM state */
+/* r0 .. r15, flags */
+Word regs_arm[16+1];
+
+#define REG_PC 15
+#define REG_SP 14
+
+
+//---------------------------------------------
+
+/* Calling convention: enter the translation with r0 pointing at
+   regs_arm.  Translation may trash r1 .. r12 inclusive.  Translation
+   should update all regs in regs_arm, and put the next pc value
+   in regs_arm[REG_PC]. */
+
+static
+void run_translation ( char* trans, char* baseblock )
+{
+  /* r0 holds trans */
+  __asm __volatile
+     ("stmfd   sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13}\n\t"
+      "mov     r12, %0\n\t"
+      "mov     r0, %1\n\t"
+      "bl      r12\n\t"
+      "ldmea   sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13}\n\t"
+      :
+      : "ir" (trans), "ir" (baseblock) );
+}
+
+
+
+
+/* Called by Haskell to initialise the simulated machine.  The
+   supplied address is the entry point of some procedure to call.  */
+
+/* EXPORTED */
+void initialise_machine ( char* first_pc )
+{
+   static char start[12];
+   Word w = (Word)first_pc;
+
+   n_transtab_used = 0;
+
+   sim_stack = malloc(10000 * sizeof(Word));
+   regs_arm[REG_SP] = (Word)(&sim_stack[9999]);
+
+   regs_arm[REG_PC] = (Word)first_pc;
+
+   /* Generate this.  Note, we'll be returning directly to the
+      data, so the JIT must stop at this point! */
+   /*
+   3 0000 00C09FE5              ldr     ip, value
+   4 0004 FEFFFFEB              bl      ip
+   5                    value:
+   6 0008 44332211              .word   0x11223344
+   */
+   start[0] = 0x00; start[1] = 0xC0; start[2] = 0x9F; start[3] = 0xE5;
+   start[4] = 0xFE; start[5] = 0xFF; start[6] = 0xFF; start[7] = 0xEB;
+   start[8]  = w & 0xFF; w >>= 8;
+   start[9]  = w & 0xFF; w >>= 8;
+   start[10] = w & 0xFF; w >>= 8;
+   start[11] = w & 0xFF; w >>= 8;
+
+   stop_at = &start[8];
+   arm_notify_new_code(stop_at, 12);
+}
+
diff --git a/VEX/useful/Makefile-vex b/VEX/useful/Makefile-vex
new file mode 100644
index 0000000..637afc9
--- /dev/null
+++ b/VEX/useful/Makefile-vex
@@ -0,0 +1,8 @@
+# Crude makefile to build the "vex" executable from test_main.c
+
+vex: test_main.c test_main.h ../pub/*.h ../priv/*.c ../priv/*.h
+	(cd ..; make -f Makefile-gcc)
+	cc -I../pub -o vex test_main.c ../libvex.a
+
+clean:
+	rm -f vex ../priv/*.o
diff --git a/VEX/useful/cpuid.c b/VEX/useful/cpuid.c
new file mode 100644
index 0000000..a6ded54
--- /dev/null
+++ b/VEX/useful/cpuid.c
@@ -0,0 +1,62 @@
+
+#include <stdio.h>
+
+typedef  unsigned int            UInt;
+typedef  unsigned long long int  ULong;
+
+void cpuid ( UInt* eax, UInt* ebx, UInt* ecx, UInt* edx, 
+             UInt index, UInt ecx_in )
+{
+   UInt a,b,c,d;
+   asm volatile ("cpuid"
+                 : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \
+                 : "0" (index), "2"(ecx_in) );
+   *eax = a; *ebx = b; *ecx = c; *edx = d;
+   printf("%08x %08x -> %08x %08x %08x %08x\n",
+          index,ecx_in, a,b,c,d );
+}
+
+int main ( void )
+{
+  UInt eax, ebx, ecx, edx;
+  UInt maxidx, maxextidx, i,ecx_in;
+
+  printf("\n");
+  cpuid(&eax,&ebx,&ecx,&edx, 0,0);
+  maxidx = eax;
+  for (i = 1; i <= maxidx +5; i++) {
+
+    UInt subleaf = 0;
+
+    if (i == 4) subleaf = 10;
+    if (i == 7) subleaf = 10;
+    if (i == 0xB) subleaf = 10;
+    if (i == 0xD) subleaf = 10;
+
+    if (subleaf > 0) printf("\n");
+
+    cpuid(&eax,&ebx,&ecx,&edx, i,0);
+
+    for (ecx_in = 1; ecx_in < subleaf; ecx_in++) {
+       cpuid(&eax,&ebx,&ecx,&edx, i,ecx_in);
+    }
+
+    if (subleaf > 0) printf("\n");
+
+  }
+
+  printf("\n");
+
+  cpuid(&eax,&ebx,&ecx,&edx, 0x80000000,0);
+  maxextidx = eax;
+  for (i = 0x80000001; i <= maxextidx +5; i++) {
+     cpuid(&eax,&ebx,&ecx,&edx, i,0);
+  }
+
+  printf("invalid\n");
+  cpuid(&eax,&ebx,&ecx,&edx, 1234,0);
+  cpuid(&eax,&ebx,&ecx,&edx, 0x800004d3,0);
+
+
+  return 0;
+}
diff --git a/VEX/useful/fp_80_64.c b/VEX/useful/fp_80_64.c
new file mode 100644
index 0000000..2c328b7
--- /dev/null
+++ b/VEX/useful/fp_80_64.c
@@ -0,0 +1,636 @@
+
+#ifndef USED_AS_INCLUDE
+
+#include "../pub/libvex_basictypes.h"
+#include <stdio.h>
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+
+/* Test program for developing code for conversions between
+   x87 64-bit and 80-bit floats.
+
+   80-bit format exists only for x86/x86-64, and so the routines
+   hardwire it as little-endian.  The 64-bit format (IEEE double)
+   could exist on any platform, little or big-endian and so we
+   have to take that into account.  IOW, these routines have to
+   work correctly when compiled on both big- and little-endian
+   targets, but the 80-bit images only ever have to exist in
+   little-endian format. 
+*/
+static void show_f80 ( UChar* );
+static void show_f64 ( UChar* );
+
+static inline
+UInt read_bit_array ( UChar* arr, UInt n )
+{
+   UChar c = arr[n >> 3];
+   c >>= (n&7);
+   return c & 1;
+}
+
+static inline
+void write_bit_array ( UChar* arr, UInt n, UInt b )
+{
+   UChar c = arr[n >> 3];
+   c &= ~(1 << (n&7));
+   c |= ((b&1) << (n&7));
+   arr[n >> 3] = c;
+}
+
+
+static void convert_f80le_to_f64le_HW ( /*IN*/UChar* f80, /*OUT*/UChar* f64 )
+{
+  asm volatile ("ffree %%st(7); fldt (%0); fstpl (%1)"
+                :
+                : "r" (&f80[0]), "r" (&f64[0])
+                : "memory" );
+}
+
+static void convert_f64le_to_f80le_HW ( /*IN*/UChar* f64, /*OUT*/UChar* f80 )
+{
+  asm volatile ("ffree %%st(7); fldl (%0); fstpt (%1)"
+                :
+                : "r" (&f64[0]), "r" (&f80[0])
+                : "memory" );
+}
+
+#endif /* ndef USED_AS_INCLUDE */
+
+
+
+/* 80 and 64-bit floating point formats:
+
+   80-bit:
+
+    S  0       0-------0      zero
+    S  0       0X------X      denormals
+    S  1-7FFE  1X------X      normals (all normals have leading 1)
+    S  7FFF    10------0      infinity
+    S  7FFF    10X-----X      snan
+    S  7FFF    11X-----X      qnan
+
+   S is the sign bit.  For runs X----X, at least one of the Xs must be
+   nonzero.  Exponent is 15 bits, fractional part is 63 bits, and
+   there is an explicitly represented leading 1, and a sign bit,
+   giving 80 in total.
+
+   64-bit avoids the confusion of an explicitly represented leading 1
+   and so is simpler:
+
+    S  0      0------0   zero
+    S  0      X------X   denormals
+    S  1-7FE  any        normals
+    S  7FF    0------0   infinity
+    S  7FF    0X-----X   snan
+    S  7FF    1X-----X   qnan
+
+   Exponent is 11 bits, fractional part is 52 bits, and there is a 
+   sign bit, giving 64 in total.
+*/
+
+/* Convert a IEEE754 double (64-bit) into an x87 extended double
+   (80-bit), mimicing the hardware fairly closely.  Both numbers are
+   stored little-endian.  Limitations, all of which could be fixed,
+   given some level of hassle:
+
+   * Identity of NaNs is not preserved.
+
+   See comments in the code for more details.  
+*/
+static void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 )
+{
+   Bool  mantissaIsZero;
+   Int   bexp, i, j, shift;
+   UChar sign;
+
+   sign = toUChar( (f64[7] >> 7) & 1 );
+   bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F);
+   bexp &= 0x7FF;
+
+   mantissaIsZero = False;
+   if (bexp == 0 || bexp == 0x7FF) {
+      /* We'll need to know whether or not the mantissa (bits 51:0) is
+         all zeroes in order to handle these cases.  So figure it
+         out. */
+      mantissaIsZero
+         = toBool( 
+              (f64[6] & 0x0F) == 0 
+              && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 
+              && f64[2] == 0 && f64[1] == 0 && f64[0] == 0
+           );
+   }
+
+   /* If the exponent is zero, either we have a zero or a denormal.
+      Produce a zero.  This is a hack in that it forces denormals to
+      zero.  Could do better. */
+   if (bexp == 0) {
+      f80[9] = toUChar( sign << 7 );
+      f80[8] = f80[7] = f80[6] = f80[5] = f80[4]
+             = f80[3] = f80[2] = f80[1] = f80[0] = 0;
+
+      if (mantissaIsZero)
+         /* It really is zero, so that's all we can do. */
+         return;
+
+      /* There is at least one 1-bit in the mantissa.  So it's a
+         potentially denormalised double -- but we can produce a
+         normalised long double.  Count the leading zeroes in the
+         mantissa so as to decide how much to bump the exponent down
+         by.  Note, this is SLOW. */
+      shift = 0;
+      for (i = 51; i >= 0; i--) {
+        if (read_bit_array(f64, i))
+           break;
+        shift++;
+      }
+
+      /* and copy into place as many bits as we can get our hands on. */
+      j = 63;
+      for (i = 51 - shift; i >= 0; i--) {
+         write_bit_array( f80, j,
+     	 read_bit_array( f64, i ) );
+         j--;
+      }
+
+      /* Set the exponent appropriately, and we're done. */
+      bexp -= shift;
+      bexp += (16383 - 1023);
+      f80[9] = toUChar( (sign << 7) | ((bexp >> 8) & 0xFF) );
+      f80[8] = toUChar( bexp & 0xFF );
+      return;
+   }
+
+   /* If the exponent is 7FF, this is either an Infinity, a SNaN or
+      QNaN, as determined by examining bits 51:0, thus:
+          0  ... 0    Inf
+          0X ... X    SNaN
+          1X ... X    QNaN
+      where at least one of the Xs is not zero.
+   */
+   if (bexp == 0x7FF) {
+      if (mantissaIsZero) {
+         /* Produce an appropriately signed infinity:
+            S 1--1 (15)  1  0--0 (63)
+         */
+         f80[9] = toUChar( (sign << 7) | 0x7F );
+         f80[8] = 0xFF;
+         f80[7] = 0x80;
+         f80[6] = f80[5] = f80[4] = f80[3] 
+                = f80[2] = f80[1] = f80[0] = 0;
+         return;
+      }
+      /* So it's either a QNaN or SNaN.  Distinguish by considering
+         bit 51.  Note, this destroys all the trailing bits
+         (identity?) of the NaN.  IEEE754 doesn't require preserving
+         these (it only requires that there be one QNaN value and one
+         SNaN value), but x87 does seem to have some ability to
+         preserve them.  Anyway, here, the NaN's identity is
+         destroyed.  Could be improved. */
+      if (f64[6] & 8) {
+         /* QNaN.  Make a QNaN:
+            S 1--1 (15)  1  1--1 (63) 
+         */
+         f80[9] = toUChar( (sign << 7) | 0x7F );
+         f80[8] = 0xFF;
+         f80[7] = 0xFF;
+         f80[6] = f80[5] = f80[4] = f80[3] 
+                = f80[2] = f80[1] = f80[0] = 0xFF;
+      } else {
+         /* SNaN.  Make a SNaN:
+            S 1--1 (15)  0  1--1 (63) 
+         */
+         f80[9] = toUChar( (sign << 7) | 0x7F );
+         f80[8] = 0xFF;
+         f80[7] = 0x7F;
+         f80[6] = f80[5] = f80[4] = f80[3] 
+                = f80[2] = f80[1] = f80[0] = 0xFF;
+      }
+      return;
+   }
+
+   /* It's not a zero, denormal, infinity or nan.  So it must be a
+      normalised number.  Rebias the exponent and build the new
+      number.  */
+   bexp += (16383 - 1023);
+
+   f80[9] = toUChar( (sign << 7) | ((bexp >> 8) & 0xFF) );
+   f80[8] = toUChar( bexp & 0xFF );
+   f80[7] = toUChar( (1 << 7) | ((f64[6] << 3) & 0x78) 
+                              | ((f64[5] >> 5) & 7) );
+   f80[6] = toUChar( ((f64[5] << 3) & 0xF8) | ((f64[4] >> 5) & 7) );
+   f80[5] = toUChar( ((f64[4] << 3) & 0xF8) | ((f64[3] >> 5) & 7) );
+   f80[4] = toUChar( ((f64[3] << 3) & 0xF8) | ((f64[2] >> 5) & 7) );
+   f80[3] = toUChar( ((f64[2] << 3) & 0xF8) | ((f64[1] >> 5) & 7) );
+   f80[2] = toUChar( ((f64[1] << 3) & 0xF8) | ((f64[0] >> 5) & 7) );
+   f80[1] = toUChar( ((f64[0] << 3) & 0xF8) );
+   f80[0] = toUChar( 0 );
+}
+
+
+/* Convert a x87 extended double (80-bit) into an IEEE 754 double
+   (64-bit), mimicking the hardware fairly closely.  Both numbers are
+   stored little-endian.  Limitations, both of which could be fixed,
+   given some level of hassle:
+
+   * Rounding following truncation could be a bit better.
+
+   * Identity of NaNs is not preserved.
+
+   See comments in the code for more details.
+*/
+static void convert_f80le_to_f64le ( /*IN*/UChar* f80, /*OUT*/UChar* f64 )
+{
+   Bool  isInf;
+   Int   bexp, i, j;
+   UChar sign;
+
+   sign = toUChar((f80[9] >> 7) & 1);
+   bexp = (((UInt)f80[9]) << 8) | (UInt)f80[8];
+   bexp &= 0x7FFF;
+
+   /* If the exponent is zero, either we have a zero or a denormal.
+      But an extended precision denormal becomes a double precision
+      zero, so in either case, just produce the appropriately signed
+      zero. */
+   if (bexp == 0) {
+      f64[7] = toUChar(sign << 7);
+      f64[6] = f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+      return;
+   }
+   
+   /* If the exponent is 7FFF, this is either an Infinity, a SNaN or
+      QNaN, as determined by examining bits 62:0, thus:
+          0  ... 0    Inf
+          0X ... X    SNaN
+          1X ... X    QNaN
+      where at least one of the Xs is not zero.
+   */
+   if (bexp == 0x7FFF) {
+      isInf = toBool(
+                 (f80[7] & 0x7F) == 0 
+                 && f80[6] == 0 && f80[5] == 0 && f80[4] == 0 
+                 && f80[3] == 0 && f80[2] == 0 && f80[1] == 0 
+                 && f80[0] == 0
+              );
+      if (isInf) {
+         if (0 == (f80[7] & 0x80))
+            goto wierd_NaN;
+         /* Produce an appropriately signed infinity:
+            S 1--1 (11)  0--0 (52)
+         */
+         f64[7] = toUChar((sign << 7) | 0x7F);
+         f64[6] = 0xF0;
+         f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+         return;
+      }
+      /* So it's either a QNaN or SNaN.  Distinguish by considering
+         bit 62.  Note, this destroys all the trailing bits
+         (identity?) of the NaN.  IEEE754 doesn't require preserving
+         these (it only requires that there be one QNaN value and one
+         SNaN value), but x87 does seem to have some ability to
+         preserve them.  Anyway, here, the NaN's identity is
+         destroyed.  Could be improved. */
+      if (f80[8] & 0x40) {
+         /* QNaN.  Make a QNaN:
+            S 1--1 (11)  1  1--1 (51) 
+         */
+         f64[7] = toUChar((sign << 7) | 0x7F);
+         f64[6] = 0xFF;
+         f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0xFF;
+      } else {
+         /* SNaN.  Make a SNaN:
+            S 1--1 (11)  0  1--1 (51) 
+         */
+         f64[7] = toUChar((sign << 7) | 0x7F);
+         f64[6] = 0xF7;
+         f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0xFF;
+      }
+      return;
+   }
+
+   /* If it's not a Zero, NaN or Inf, and the integer part (bit 62) is
+      zero, the x87 FPU appears to consider the number denormalised
+      and converts it to a QNaN. */
+   if (0 == (f80[7] & 0x80)) {
+      wierd_NaN:
+      /* Strange hardware QNaN:
+         S 1--1 (11)  1  0--0 (51) 
+      */
+      /* On a PIII, these QNaNs always appear with sign==1.  I have
+         no idea why. */
+      f64[7] = (1 /*sign*/ << 7) | 0x7F;
+      f64[6] = 0xF8;
+      f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+      return;
+   }
+
+   /* It's not a zero, denormal, infinity or nan.  So it must be a 
+      normalised number.  Rebias the exponent and consider. */
+   bexp -= (16383 - 1023);
+   if (bexp >= 0x7FF) {
+      /* It's too big for a double.  Construct an infinity. */
+      f64[7] = toUChar((sign << 7) | 0x7F);
+      f64[6] = 0xF0;
+      f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+      return;
+   }
+
+   if (bexp <= 0) {
+      /* It's too small for a normalised double.  First construct a
+         zero and then see if it can be improved into a denormal.  */
+      f64[7] = toUChar(sign << 7);
+      f64[6] = f64[5] = f64[4] = f64[3] = f64[2] = f64[1] = f64[0] = 0;
+
+      if (bexp < -52)
+         /* Too small even for a denormal. */
+         return;
+
+      /* Ok, let's make a denormal.  Note, this is SLOW. */
+      /* Copy bits 63, 62, 61, etc of the src mantissa into the dst, 
+         indexes 52+bexp, 51+bexp, etc, until k+bexp < 0. */
+      /* bexp is in range -52 .. 0 inclusive */
+      for (i = 63; i >= 0; i--) {
+         j = i - 12 + bexp;
+         if (j < 0) break;
+         /* We shouldn't really call vassert from generated code. */
+         assert(j >= 0 && j < 52);
+         write_bit_array ( f64,
+                           j,
+                           read_bit_array ( f80, i ) );
+      }
+      /* and now we might have to round ... */
+      if (read_bit_array(f80, 10+1 - bexp) == 1) 
+         goto do_rounding;
+
+      return;
+   }
+
+   /* Ok, it's a normalised number which is representable as a double.
+      Copy the exponent and mantissa into place. */
+   /*
+   for (i = 0; i < 52; i++)
+      write_bit_array ( f64,
+                        i,
+                        read_bit_array ( f80, i+11 ) );
+   */
+   f64[0] = toUChar( (f80[1] >> 3) | (f80[2] << 5) );
+   f64[1] = toUChar( (f80[2] >> 3) | (f80[3] << 5) );
+   f64[2] = toUChar( (f80[3] >> 3) | (f80[4] << 5) );
+   f64[3] = toUChar( (f80[4] >> 3) | (f80[5] << 5) );
+   f64[4] = toUChar( (f80[5] >> 3) | (f80[6] << 5) );
+   f64[5] = toUChar( (f80[6] >> 3) | (f80[7] << 5) );
+
+   f64[6] = toUChar( ((bexp << 4) & 0xF0) | ((f80[7] >> 3) & 0x0F) );
+
+   f64[7] = toUChar( (sign << 7) | ((bexp >> 4) & 0x7F) );
+
+   /* Now consider any rounding that needs to happen as a result of
+      truncating the mantissa. */
+   if (f80[1] & 4) /* read_bit_array(f80, 10) == 1) */ {
+
+      /* If the bottom bits of f80 are "100 0000 0000", then the
+         infinitely precise value is deemed to be mid-way between the
+         two closest representable values.  Since we're doing
+         round-to-nearest (the default mode), in that case it is the
+         bit immediately above which indicates whether we should round
+         upwards or not -- if 0, we don't.  All that is encapsulated
+         in the following simple test. */
+      if ((f80[1] & 0xF) == 4/*0100b*/ && f80[0] == 0)
+         return;
+
+      do_rounding:
+      /* Round upwards.  This is a kludge.  Once in every 2^24
+         roundings (statistically) the bottom three bytes are all 0xFF
+         and so we don't round at all.  Could be improved. */
+      if (f64[0] != 0xFF) { 
+         f64[0]++; 
+      }
+      else 
+      if (f64[0] == 0xFF && f64[1] != 0xFF) {
+         f64[0] = 0;
+         f64[1]++;
+      }
+      else      
+      if (f64[0] == 0xFF && f64[1] == 0xFF && f64[2] != 0xFF) {
+         f64[0] = 0;
+         f64[1] = 0;
+         f64[2]++;
+      }
+      /* else we don't round, but we should. */
+   }
+}
+
+
+#ifndef USED_AS_INCLUDE
+
+//////////////
+
+static void show_f80 ( UChar* f80 )
+{
+  Int i;
+  printf("%d ", read_bit_array(f80, 79));
+
+  for (i = 78; i >= 64; i--)
+    printf("%d", read_bit_array(f80, i));
+
+  printf(" %d ", read_bit_array(f80, 63));
+
+  for (i = 62; i >= 0; i--)
+    printf("%d", read_bit_array(f80, i));
+}
+
+static void show_f64le ( UChar* f64 )
+{
+  Int i;
+  printf("%d     ", read_bit_array(f64, 63));
+
+  for (i = 62; i >= 52; i--)
+    printf("%d", read_bit_array(f64, i));
+
+  printf("   ");
+  for (i = 51; i >= 0; i--)
+    printf("%d", read_bit_array(f64, i));
+}
+
+//////////////
+
+
+/* Convert f80 to a 64-bit IEEE double using both the hardware and the
+   soft version, and compare the results.  If they differ, print
+   details and return 1.  If they are identical, return 0.
+*/
+int do_80_to_64_test ( Int test_no, UChar* f80, UChar* f64h, UChar* f64s)
+{
+   Char buf64s[100], buf64h[100];
+   Bool same;
+   Int k;
+   convert_f80le_to_f64le_HW(f80, f64h);
+   convert_f80le_to_f64le(f80, f64s);
+   same = True;
+   for (k = 0; k < 8; k++) {
+      if (f64s[k] != f64h[k]) {
+         same = False; break;
+      }
+   }
+   /* bitwise identical */
+   if (same)
+      return 0;
+
+   sprintf(buf64s, "%.16e", *(double*)f64s);
+   sprintf(buf64h, "%.16e", *(double*)f64h);
+
+   /* Not bitwise identical, but pretty darn close */
+   if (0 == strcmp(buf64s, buf64h))
+      return 0;
+
+    printf("\n");
+    printf("f80:  "); show_f80(f80); printf("\n");
+    printf("f64h: "); show_f64le(f64h); printf("\n");
+    printf("f64s: "); show_f64le(f64s); printf("\n");
+
+    printf("[test %d]  %.16Le -> (hw %s, sw %s)\n", 
+           test_no, *(long double*)f80,
+           buf64h, buf64s );
+
+    return 1;
+}
+
+
+/* Convert an IEEE 64-bit double to a x87 extended double (80 bit)
+   using both the hardware and the soft version, and compare the
+   results.  If they differ, print details and return 1.  If they are
+   identical, return 0.  
+*/
+int do_64_to_80_test ( Int test_no, UChar* f64, UChar* f80h, UChar* f80s)
+{
+   Char buf80s[100], buf80h[100];
+   Bool same;
+   Int k;
+   convert_f64le_to_f80le_HW(f64, f80h);
+   convert_f64le_to_f80le(f64, f80s);
+   same = True;
+   for (k = 0; k < 10; k++) {
+      if (f80s[k] != f80h[k]) {
+         same = False; break;
+      }
+   }
+   /* bitwise identical */
+   if (same)
+      return 0;
+
+   sprintf(buf80s, "%.20Le", *(long double*)f80s);
+   sprintf(buf80h, "%.20Le", *(long double*)f80h);
+
+   /* Not bitwise identical, but pretty darn close */
+   if (0 == strcmp(buf80s, buf80h))
+      return 0;
+
+    printf("\n");
+    printf("f64:  "); show_f64le(f64); printf("\n");
+    printf("f80h: "); show_f80(f80h); printf("\n");
+    printf("f80s: "); show_f80(f80s); printf("\n");
+
+    printf("[test %d]  %.16e -> (hw %s, sw %s)\n", 
+           test_no, *(double*)f64,
+           buf80h, buf80s );
+
+    return 1;
+}
+
+
+
+void do_80_to_64_tests ( void )
+{
+   UInt b9,b8,b7,i, j;
+   Int fails=0, tests=0;
+   UChar* f64h = malloc(8);
+   UChar* f64s = malloc(8);
+   UChar* f80  = malloc(10);
+   int STEP = 1;
+
+   srandom(4343);
+
+   /* Ten million random bit patterns */
+   for (i = 0; i < 10000000; i++) {
+     tests++;
+     for (j = 0; j < 10; j++)
+       f80[j] = (random() >> 7) & 255;
+
+     fails += do_80_to_64_test(tests, f80, f64h, f64s);
+   }
+
+   /* 2^24 numbers in which the first 24 bits are tested exhaustively
+      -- this covers the sign, exponent and leading part of the
+      mantissa. */
+   for (b9 = 0; b9 < 256; b9 += STEP) {
+      for (b8 = 0; b8 < 256; b8 += STEP) {
+         for (b7 = 0; b7 < 256; b7 += STEP) {
+           tests++;
+            for (i = 0; i < 10; i++) 
+               f80[i] = 0;
+            for (i = 0; i < 8; i++)
+               f64h[i] = f64s[i] = 0;
+            f80[9] = b9;
+            f80[8] = b8;
+            f80[7] = b7;
+
+    fails += do_80_to_64_test(tests, f80, f64h, f64s);
+   }}}
+
+   printf("\n80 -> 64:  %d tests, %d fails\n\n", tests, fails);
+}
+
+
+void do_64_to_80_tests ( void )
+{
+   UInt b7,b6,b5,i, j;
+   Int fails=0, tests=0;
+   UChar* f80h = malloc(10);
+   UChar* f80s = malloc(10);
+   UChar* f64  = malloc(8);
+   int STEP = 1;
+
+   srandom(2323);
+
+   /* Ten million random bit patterns */
+   for (i = 0; i < 10000000; i++) {
+     tests++;
+     for (j = 0; j < 8; j++)
+       f64[j] = (random() >> 13) & 255;
+
+     fails += do_64_to_80_test(tests, f64, f80h, f80s);
+   }
+
+   /* 2^24 numbers in which the first 24 bits are tested exhaustively
+      -- this covers the sign, exponent and leading part of the
+      mantissa. */
+   for (b7 = 0; b7 < 256; b7 += STEP) {
+      for (b6 = 0; b6 < 256; b6 += STEP) {
+         for (b5 = 0; b5 < 256; b5 += STEP) {
+           tests++;
+            for (i = 0; i < 8; i++) 
+               f64[i] = 0;
+            for (i = 0; i < 10; i++)
+               f80h[i] = f80s[i] = 0;
+            f64[7] = b7;
+            f64[6] = b6;
+            f64[5] = b5;
+
+    fails += do_64_to_80_test(tests, f64, f80h, f80s);
+   }}}
+
+   printf("\n64 -> 80:  %d tests, %d fails\n\n", tests, fails);
+}
+
+
+int main ( void )
+{
+   do_80_to_64_tests();
+   do_64_to_80_tests();
+   return 0;
+}
+
+#endif /* ndef USED_AS_INCLUDE */
diff --git a/VEX/useful/fpround.c b/VEX/useful/fpround.c
new file mode 100644
index 0000000..3a186bb
--- /dev/null
+++ b/VEX/useful/fpround.c
@@ -0,0 +1,13 @@
+
+#include <stdio.h>
+
+int main ( void )
+{
+  double d = 0.0;
+  int i;
+  for (i = 0; i < 30; i++) {
+    printf("i = %d,  d = %f,  (int)d = %d\n", i, d, (int)d);
+    d += 0.11;
+  }
+  return 0;
+}
diff --git a/VEX/useful/fspill.c b/VEX/useful/fspill.c
new file mode 100644
index 0000000..41e7eda
--- /dev/null
+++ b/VEX/useful/fspill.c
@@ -0,0 +1,19 @@
+
+#include <stdio.h>
+
+double qq ( void )
+{int i;
+  long double a = 0.0, b = 0.0, c = 0.0, d = 0.0, e = 0.0, f = 0.0, g = 0.0, h = 0.0;
+  for (i = 0; i < 10; i++) {
+    a += 1.1; b += 1.2; c += 1.3; d += 1.4; e += 1.5; f += 1.6;
+    g += 1.7; h += 1.8001;
+  }
+  return a+b+c+d+e+f+g+h;
+}
+
+int main ( void )
+{
+  double r = qq();
+  printf("answer is %f %d\n", r, (int)r );
+  return 0;
+}
diff --git a/VEX/useful/gradual_underflow.c b/VEX/useful/gradual_underflow.c
new file mode 100644
index 0000000..53886d8
--- /dev/null
+++ b/VEX/useful/gradual_underflow.c
@@ -0,0 +1,15 @@
+
+#include <stdio.h>
+
+int main ( void )
+{
+double d =  7.25063790881233303e-303;
+ int i;
+
+ for (i = 0; i < 26; i++) {
+			    printf("%.16e\n", d);
+			    d *= 0.1012198489248992489422;
+ }
+  return 0;
+}
+
diff --git a/VEX/useful/hd_fpu.c b/VEX/useful/hd_fpu.c
new file mode 100644
index 0000000..3007029
--- /dev/null
+++ b/VEX/useful/hd_fpu.c
@@ -0,0 +1,1707 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Implementation of the floating point instruction set.        ---*/
+/*---                                                     hd_fpu.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Heimdall, an x86 protected-mode emulator 
+   designed for debugging and profiling binaries on x86-Unixes.
+
+   Copyright (C) 2000 Julian Seward 
+      jseward@acm.org
+      Julian_Seward@muraroa.demon.co.uk
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file LICENSE.
+*/
+
+#include "hd_include.h"
+
+
+/* ---------------------------------------------------------------------
+   Packing and unpacking the FPU data registers.
+   ------------------------------------------------------------------ */
+
+INLINE
+UInt fp_get_tos ( void )
+{
+   return (m_fpu_state.env[FP_ENV_STAT] >> FP_F_TOS_LO) & 7;
+}
+
+static
+UInt read_bit_array ( UChar* arr, UInt n )
+{
+   UChar c = arr[n >> 3];
+   c >>= (n&7);
+   return c & 1;
+}
+
+static
+void write_bit_array ( UChar* arr, UInt n, UInt b )
+{
+   UChar c = arr[n >> 3];
+   c &= ~(1 << (n&7));
+   b &= 1;
+   c |=  (b << (n&7));
+   arr[n >> 3] = c;
+}
+
+/* Read an IEEE double from the memory image of an Intel 80-bit
+   extended floating-point number.
+*/
+static
+double fp_double_from_extended ( UChar* e_lsb )
+{
+   int i;
+   double d;
+   UChar* d_lsb = (UChar*)(&d);
+
+   UInt sign = e_lsb[9] >> 7;
+   Int bexp = ((UInt)e_lsb[9] << 8) | (UInt)e_lsb[8];
+   bexp &= 0x7fff;
+
+   if (bexp == 0) 
+      bexp = 0;  /* preserve zeroes */
+   else 
+   if (bexp == 0x7FFF) 
+      bexp = 0x7FF; /* preserve Infs/Nans */
+   else {
+      bexp -= (16383 - 1023);
+      if (bexp < 0) bexp = 0;
+      if (bexp > 0x7FF) bexp = 0x7FF;
+   }
+
+   d_lsb[6] = (bexp & 0xF) << 4;
+   d_lsb[7] = ((bexp >> 4) & 0x7F) | ((sign & 0x1) << 7);
+
+   for (i = 0; i < 52; i++)
+      write_bit_array ( d_lsb,
+                        i,
+                        read_bit_array ( e_lsb, i+11 ) );
+   return d;
+}
+
+/* Given an IEEE double, create the memory image of an Intel 80-bit
+   extended floating-point number.
+*/
+static
+void fp_extended_from_double ( UChar* e_lsb, double d )
+{
+   int i;
+   UChar* d_lsb = (UChar*)(&d);
+
+   UInt sign = d_lsb[7] >> 7;
+   Int bexp = ((UInt)d_lsb[7] << 4) |
+               ((((UInt)d_lsb[6]) >> 4) & 0xF);
+   bexp &= 0x7ff;
+
+   if (bexp == 0) 
+      bexp = 0;  /* preserve zeroes */
+   else 
+   if (bexp == 0x7FF) 
+      bexp = 0x7FFF; /* preserve Infs/Nans */
+   else
+      bexp += (16383 - 1023);
+
+   e_lsb[9] = ((bexp >> 8) & 0x7F) | ((sign & 0x1) << 7);
+   e_lsb[8] = bexp & 0xFF;
+
+   for (i = 0; i < 52; i++)
+      write_bit_array ( e_lsb,
+                        i+11,
+                        read_bit_array ( d_lsb, i ) );
+   for (i = 0; i < 11; i++)
+      write_bit_array ( e_lsb, i, 0 );
+
+   /* this isn't really right, but I can't get fpclassify to work. */
+   i = 0;
+   if (isnan(d) || isinf(d) || d != 0.0) i = 1;
+   write_bit_array ( e_lsb, 63, i );
+}
+
+/* For the transition Real CPU -> Simulated CPU, copy the 
+   .reg values in m_fpu_state, which are in stack order, to
+   the m_fpu_data_regs array, in register (non-stack) order.
+*/
+void fp_unpack_data_regs ( void )
+{
+   Int reg, st;
+   reg = fp_get_tos();
+   for (st = 0; st < 8; st++) {
+      m_fpu_data_regs[reg]
+         = fp_double_from_extended ( &m_fpu_state.reg[FP_REG(st)] );
+      if (reg == 7) reg = 0; else reg++;
+   }
+}
+
+void fp_repack_data_regs ( void )
+{
+   Int reg, st;
+   st = fp_get_tos();
+   for (reg = 0; reg < 8; reg++) {
+      fp_extended_from_double ( &m_fpu_state.reg[FP_REG(reg)], 
+                                m_fpu_data_regs[st] );
+      if (st == 7) st = 0; else st++;
+   }
+}
+
+/* ---------------------------------------------------------------------
+   Helper functions for the floating point unit.
+   ------------------------------------------------------------------ */
+
+static
+INLINE
+void setFMem ( UInt addr, double f )
+{
+   * ((float*)addr) = (float)f;
+}
+
+static
+INLINE
+double getFMem ( UInt addr )
+{
+   return (double) (* ((float*)addr));
+}
+
+static
+INLINE
+void setDMem ( UInt addr, double f )
+{
+   * ((double*)addr) = f;
+}
+
+static
+INLINE
+double getDMem ( UInt addr )
+{
+   return (* ((double*)addr));
+}
+
+static
+INLINE
+void setTMem ( UInt addr, double f )
+{
+   fp_extended_from_double ( (Addr)addr, f );
+}
+
+static
+INLINE
+double getTMem ( UInt addr )
+{
+   return fp_double_from_extended ( (Addr)addr );
+}
+
+#define fp_extended_from_double ERROR__fp_extended_from_double_used
+#define fp_double_from_extended ERROR__fp_double_from_extended_used
+
+static
+INLINE
+UInt fp_get_statusword_flag ( UInt flagno )
+{
+   if (flagno < 0 || flagno > 15) panic("fp_get_statusword_flag");
+   return (m_fpu_state.env[FP_ENV_STAT] >> flagno) & 0x1;
+}
+
+#if DEBUG
+static
+UInt fp_get_controlword_flag ( UInt flagno )
+{
+   if (flagno < 0 || flagno > 15) panic("fp_get_controlword_flag");
+   return (m_fpu_state.env[FP_ENV_CTRL] >> flagno) & 0x1;
+}
+#endif
+
+static
+INLINE
+void fp_set_statusword_flag_to ( UInt flagno, UInt bit )
+{
+   if (flagno < 0 || flagno > 15) panic("fp_set_statusword_flag_to");
+   if (bit)
+      m_fpu_state.env[FP_ENV_STAT] |= (1 << flagno);
+   else
+      m_fpu_state.env[FP_ENV_STAT] &= ~(1 << flagno);
+}
+
+static
+void fp_set_stack_overflow ( void )
+{
+   fprintf(stderr, "--- FP STACK OVERFLOW!\n" );
+   fp_set_statusword_flag_to(FP_E_INVAL,1);
+   fp_set_statusword_flag_to(FP_E_STACKF,1);
+   fp_set_statusword_flag_to(FP_F_C1,1);
+}
+
+static
+void fp_set_stack_underflow ( void )
+{
+   fprintf(stderr, "--- FP STACK UNDERFLOW!\n" );
+   fp_set_statusword_flag_to(FP_E_INVAL,1);
+   fp_set_statusword_flag_to(FP_E_STACKF,1);
+   fp_set_statusword_flag_to(FP_F_C1,0);
+}
+
+static
+INLINE
+void fp_set_tos ( UInt tos )
+{
+   if (tos < 0 || tos > 7) panic("fp_set_tos");
+   fp_set_statusword_flag_to(FP_F_TOS_LO,0);
+   fp_set_statusword_flag_to(FP_F_TOS_LO+1,0);
+   fp_set_statusword_flag_to(FP_F_TOS_HI,0);
+   m_fpu_state.env[FP_ENV_STAT] |= (tos << FP_F_TOS_LO);
+}
+
+static
+INLINE
+UInt fp_STno_to_regno ( UInt stregno )
+{
+   UInt regno = fp_get_tos();
+   assert(regno >= 0 && regno < 8);
+   regno += stregno;
+   if (regno >= 8) regno -= 8;
+   assert(regno >= 0 && regno < 8);
+   return regno;
+}
+
+static
+INLINE
+void fp_dec_tos ( void )
+{
+   fp_set_tos ( fp_STno_to_regno ( 7 ));
+}
+
+static
+INLINE
+void fp_inc_tos ( void )
+{
+   fp_set_tos ( fp_STno_to_regno ( 1 ));
+}
+
+static
+INLINE
+Bool fp_is_empty_tag ( UInt tag )
+{
+   return tag == FP_TAG_EMPTY;
+}
+
+static
+INLINE
+UInt fp_get_tag ( UInt regno )
+{
+   if (regno < 0 || regno > 7) panic("fp_get_tag");
+   return (m_fpu_state.env[FP_ENV_TAG] >> (2*regno)) & 3;
+}
+
+static
+INLINE
+UInt fp_get_tag_ST ( UInt stregno )
+{
+   if (stregno < 0 || stregno > 7) panic("fp_get_tag_ST");
+   return fp_get_tag ( fp_STno_to_regno(stregno) );
+}
+
+static
+INLINE
+void fp_set_tag ( UInt regno, UInt val )
+{
+   if (regno < 0 || regno > 7 ||
+       val < 0 || val > 3) panic("fp_get_tag");
+   m_fpu_state.env[FP_ENV_TAG] &= ~(3 << (2*regno));
+   m_fpu_state.env[FP_ENV_TAG] |=  (val << (2*regno));
+}
+
+static
+INLINE
+void fp_set_tag_ST ( UInt stregno, UInt val )
+{
+   if (stregno < 0 || stregno > 7) panic("fp_set_tag_ST");
+   fp_set_tag ( fp_STno_to_regno(stregno), val );
+}
+
+
+static
+INLINE
+void fp_set_reg ( UInt r, double d )
+{
+   if (r < 0 || r > 7) panic("fp_set_reg");
+   m_fpu_data_regs[r] = d;
+   fp_set_tag ( r, d==0.0 ? FP_TAG_ZERO 
+                          : (finite(d) ? FP_TAG_VALID : FP_TAG_SPEC) );
+}
+
+static
+INLINE
+void fp_set_reg_ST ( UInt str, double d )
+{
+   UInt r;
+   if (str < 0 || str > 7) panic("fp_set_reg_ST");
+   r = fp_STno_to_regno(str);
+   fp_set_reg ( r, d );
+}
+
+static
+INLINE
+double fp_get_reg ( UInt r )
+{
+   double d;
+   if (r < 0 || r > 7) panic("fp_get_reg");
+   d = m_fpu_data_regs[r];
+   return d;
+}
+
+static
+INLINE
+double fp_get_reg_ST ( UInt str )
+{
+   UInt r;
+   if (str < 0 || str > 7) panic("fp_get_reg_ST");
+   r = fp_STno_to_regno(str);
+   return fp_get_reg(r);
+}
+
+static
+INLINE
+void fp_set_tos_reg ( double d )
+{
+   fp_set_reg ( fp_get_tos(), d );
+}
+
+static
+INLINE
+double fp_get_tos_reg ( void )
+{
+   return fp_get_reg ( fp_get_tos() );
+}
+
+static
+INLINE
+void fp_set_tos_reg_QNaN ( void )
+{
+   fp_set_reg ( fp_get_tos(), NAN /* see <nan.h> */ );
+}
+
+static
+INLINE
+double fp_pop ( void )
+{
+   double d = fp_get_tos_reg();
+   fp_set_tag ( fp_get_tos(), FP_TAG_EMPTY );
+   fp_inc_tos();
+   return d;
+}
+
+/* Push d and update flags. */
+static
+INLINE
+void fp_push ( double d )
+{
+   if (fp_is_empty_tag(fp_get_tag_ST(7))) {
+      fp_dec_tos();
+      fp_set_tos_reg(d);
+      fp_set_statusword_flag_to(FP_F_C1, d == 0.0);
+   } else {
+      fp_dec_tos();
+      fp_set_tos_reg_QNaN();
+      fp_set_stack_overflow();
+   }
+}
+
+static
+void fp_set_statusword_flags_COM ( double vd_dst, double vd_src )
+{
+   UInt vis_dst;
+   if (isnan(vd_src) || isnan(vd_dst))  vis_dst = 7;
+   else if (vd_dst > vd_src)            vis_dst = 0;
+   else if (vd_dst < vd_src)            vis_dst = 1; 
+   else if (vd_dst == vd_src)           vis_dst = 4;
+   else vis_dst = 7;
+   fp_set_statusword_flag_to(FP_F_C3, (vis_dst >> 2) & 1);
+   fp_set_statusword_flag_to(FP_F_C2, (vis_dst >> 1) & 1);
+   fp_set_statusword_flag_to(FP_F_C0, vis_dst & 1);
+}
+
+static
+void fp_set_statusword_flags_COM_STACKF ( void )
+{
+   UInt vis_dst = 7;
+   fp_set_statusword_flag_to(FP_F_C3, (vis_dst >> 2) & 1);
+   fp_set_statusword_flag_to(FP_F_C2, (vis_dst >> 1) & 1);
+   fp_set_statusword_flag_to(FP_F_C0, vis_dst & 1);
+}
+
+static
+double fp_calc_yl2xp1 ( double st_0, double st_1 )
+{
+   st_0 += 1.0;
+   st_0 = log(st_0) / log(2.0);
+   st_0 *= st_1;
+   return st_0;
+}
+
+static
+double fp_calc_yl2x ( double st_0, double st_1 )
+{
+   st_0 = log(st_0) / log(2.0);
+   st_0 *= st_1;
+   return st_0;
+}
+
+static
+double fp_calc_2xm1 ( double st_0 )
+{
+   st_0 = st_0 * 0.69314718055994530942;
+   st_0 = exp(st_0);
+   st_0 = st_0 - 1.0;
+   return st_0;
+}
+
+static
+double fp_calc_scale ( double st_0, double st_1 )
+{
+   Int n = 0;
+   if (st_1 > 0.0) {
+      if (st_1 > 2.0*308.0) st_1 = 2.0*308.0;
+      n = (Int)(floor(st_1));
+      if (n < 0) n = 0;          /* impossible, but ... */
+      if (n > 2*308) n = 2*308;  /* limit exponent change */
+      while (n > 0) { n--; st_0 *= 2.0; };
+   } 
+   else 
+   if (st_1 < 0.0) {
+      if (st_1 < -2.0*308.0) st_1 = -2.0*308.0;
+      n = ((Int)(floor(-st_1)));
+      if (n < 0) n = 0;
+      if (n > 2*308) n = 2*308;
+      while (n > 0) { n--; st_0 *= 0.5; };
+   }
+   return st_0;
+}
+
+static
+void fp_calc_fprem ( Int* qq, double* result, double st_0, double st_1 )
+{
+   double tmp = st_0 / st_1;
+   if (tmp < 0)
+      *qq = - (Int)floor(-tmp);
+   else
+      *qq = (Int)floor(tmp);
+   *result = st_0 - (st_1 * (double)(*qq));
+}
+
+#if DEBUG
+static 
+void printFpuState ( void )
+{
+   Int i;
+   assert(sizeof(Fpu_State)==108);
+   for (i = 7; i >= 0; i--) {
+      printf ( " %s fpreg%d: 0x", 
+               (UInt)i == fp_get_tos() ? "**" : "  ", i );
+      //for (j = FP_REG(i+1)-1; j >= FP_REG(i); j--)
+      //   printf ( "%2x", (UInt)m_fpu_state.reg[j]);
+      printf ( "  %5s  ", fp_tag_names[fp_get_tag(i)] );
+      printf ( "%20.16e\n", fp_get_reg(i) );
+   }
+   printf("     fctrl:     0x%4x  masked: ", 
+          (UInt)m_fpu_state.env[FP_ENV_CTRL] );
+   for (i = FP_E_INVAL; i <= FP_E_LOS; i++)
+      if (fp_get_controlword_flag(i))
+         printf ( "%s ", fp_exception_names[i] );
+   printf ( "\n" );
+
+   printf("     fstat:     0x%4x  except:", 
+          (UInt)m_fpu_state.env[FP_ENV_STAT] );
+   for (i = FP_E_INVAL; i <= FP_E_LOS; i++)
+      if (fp_get_statusword_flag(i))
+         printf ( "%s ", fp_exception_names[i] );
+   printf ( "  top: %d  ", fp_get_tos() );
+   printf ( "c3210: %d%d%d%d",
+            fp_get_statusword_flag(FP_F_C3),
+            fp_get_statusword_flag(FP_F_C2),
+            fp_get_statusword_flag(FP_F_C1),
+            fp_get_statusword_flag(FP_F_C0) );
+   printf ( "  STACKF: %d\n", fp_get_statusword_flag(FP_E_STACKF) );
+
+   printf("      ftag:     0x%4x  ", (UInt)m_fpu_state.env[FP_ENV_TAG] );
+   for (i = 7; i >= 0; i--)
+      printf ( "%s ", fp_tag_names[fp_get_tag(i)] );
+   printf("\n");
+
+   printf("       fip: 0x%8x\n", 
+           (((UInt)m_fpu_state.env[FP_ENV_IP+1]) << 16) |
+            ((UInt)m_fpu_state.env[FP_ENV_IP]) );
+   printf("       fcs:     0x%4x\n", 
+           ((UInt)m_fpu_state.env[FP_ENV_CS]) );
+   printf("    fopoff: 0x%8x\n", 
+           (((UInt)m_fpu_state.env[FP_ENV_OPOFF+1]) << 16) |
+            ((UInt)m_fpu_state.env[FP_ENV_OPOFF]) );
+   printf("    fopsel:     0x%4x\n", 
+           ((UInt)m_fpu_state.env[FP_ENV_OPSEL]) );
+}
+#endif
+
+/* ---------------------------------------------------------------------
+   Implementation of the floating point instruction set.
+   ------------------------------------------------------------------ */
+
+/* A pretty nasty kludge.  Arithmetic is done using standard IEEE 
+   doubles, which means that programs which rely on the extra accuracy
+   supplied by Intel's internal 80-bit format will get different
+   results.
+
+   To make exception handling tractable, we assume that the FPU is
+   running with all exceptions masked, so we do the "default fixup"
+   action for all exceptions.  Fortunately that's fairly simple.
+
+   Support for non-normal numbers (infinities, nans, denorms, etc) is 
+   minimal and probably wrong.
+*/
+
+typedef
+   enum { Fp_Add, Fp_Sub, Fp_Mul, Fp_Div, Fp_SubR, Fp_DivR }
+   Fp_Op;
+
+#if DEBUG
+char* fp_Op_name ( Fp_Op op )
+{
+   switch (op) {
+      case Fp_Add:  return "add";   case Fp_Sub:  return "sub";
+      case Fp_Mul:  return "mul";   case Fp_Div:  return "div";
+      case Fp_SubR: return "subr";  case Fp_DivR: return "divr";
+      default: panic("fp_Op_name");
+   }
+   return NULL; /*notreached*/
+}
+#endif
+
+static
+void fp_do_op_ST_ST ( UInt a_src, UInt a_dst, Fp_Op op, Bool pop )
+{
+   double vd_src, vd_dst;
+   IFDB( if (dis) printf("\tf%s%s\t%%st(%d),%%st(%d)\n",
+                         fp_Op_name(op), pop?"p":"",
+                         a_src, a_dst ); )
+   if (!fp_is_empty_tag(fp_get_tag_ST(a_src)) &&
+       !fp_is_empty_tag(fp_get_tag_ST(a_dst))) {
+      vd_dst = fp_get_reg_ST(a_dst);
+      vd_src = fp_get_reg_ST(a_src);
+      switch (op) {
+         case Fp_Add:  vd_dst = vd_dst + vd_src; break;
+         case Fp_Sub:  vd_dst = vd_dst - vd_src; break;
+         case Fp_Mul:  vd_dst = vd_dst * vd_src; break;
+         case Fp_Div:  vd_dst = vd_dst / vd_src; break;
+         case Fp_SubR: vd_dst = vd_src - vd_dst; break;
+         case Fp_DivR: vd_dst = vd_src / vd_dst; break;
+         default: panic("fp_do_op_ST_ST");
+      }      
+   } else {
+      vd_dst = NAN;
+      fp_set_stack_underflow();
+   }
+   fp_set_reg_ST(a_dst,vd_dst);
+   if (pop) (void)fp_pop();
+}
+
+static
+void fp_do_COM_ST_ST ( UInt a_src, UInt a_dst, UInt nPops )
+{
+   double vd_src, vd_dst;
+   IFDB( if (dis) printf("\tfcom%s\t%%st(%d),%%st(%d)\n",
+                         nPops==0 ? "" : (nPops==1 ? "p" : "pp"),
+                         a_src, a_dst ); )
+   if (!fp_is_empty_tag(fp_get_tag_ST(a_src)) &&
+       !fp_is_empty_tag(fp_get_tag_ST(a_dst))) {
+      vd_dst = fp_get_reg_ST(a_dst);
+      vd_src = fp_get_reg_ST(a_src);
+      fp_set_statusword_flags_COM(vd_dst,vd_src);
+   } else {
+      fp_set_statusword_flags_COM_STACKF();
+      fp_set_stack_underflow();
+   }
+   while (nPops > 0) { 
+      (void)fp_pop();
+      nPops--;
+   }
+}
+
+static
+void fp_do_op_mem_ST_0 ( UInt a_src,
+                         IFDB(Text t_src CC)
+                         Fp_Op op, Bool dbl )
+{
+   double vd_src, vd_dst;
+   IFDB( if (dis) printf("\tf%s%c\t%s,%%st(0)\n",
+                         fp_Op_name(op), dbl?'D':'F', t_src ); )
+   if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+      vd_dst = fp_get_reg_ST(0);
+      vd_src = dbl ? getDMem(a_src) : getFMem(a_src);
+      switch (op) {
+         case Fp_Add:  vd_dst = vd_dst + vd_src; break;
+         case Fp_Sub:  vd_dst = vd_dst - vd_src; break;
+         case Fp_Mul:  vd_dst = vd_dst * vd_src; break;
+         case Fp_Div:  vd_dst = vd_dst / vd_src; break;
+         case Fp_SubR: vd_dst = vd_src - vd_dst; break;
+         case Fp_DivR: vd_dst = vd_src / vd_dst; break;
+         default: panic("fp_do_op_mem_ST_0");
+      }      
+   } else {
+      vd_dst = NAN;
+      fp_set_stack_underflow();
+   }
+   fp_set_reg_ST(0,vd_dst);
+}
+
+static
+void fp_do_COM_mem_ST_0 ( UInt a_src, 
+                          IFDB( Text t_src CC)
+                          Bool dbl, Bool pop )
+{
+   double vd_src, vd_dst;
+   IFDB( if (dis) printf("\tfcom%s%c\t%s,%%st(0)\n",
+                         pop?"p":"", dbl?'D':'F', t_src ); )
+   if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+      vd_dst = fp_get_reg_ST(0);
+      vd_src = dbl ? getDMem(a_src) : getFMem(a_src);
+      fp_set_statusword_flags_COM(vd_dst,vd_src);
+   } else {
+      fp_set_statusword_flags_COM_STACKF();
+      fp_set_stack_underflow();
+   }
+   if (pop) (void)fp_pop();
+}
+
+
+Addr do_one_insn_fp ( Addr r_eip, UChar first_opcode )
+{
+   UChar  modrm;
+   UInt   a_addr, a_src, a_dst;
+   UInt   opc_aux;
+   Bool   isreg;
+   Int    vis_addr;
+   Int    vis_dst;
+   double vd_addr, vd_src, vd_dst;
+
+#  if DEBUG
+   Text   t_opc_aux;
+   Text   t_addr, t_dst;
+   Bool ppFpuState = False;
+
+   if (ppFpuState) {
+      printf("\n\nBEFORE\n");
+      printFpuState();
+      printf("\n");
+   }
+#  endif
+
+   /* assert that we are running with all exceptions masked */
+   assert( (m_fpu_state.env[FP_ENV_CTRL] & 0x3F) == 0x3F );
+   /* and the implication is that there are no unmasked exceptions
+      reported by the exception status flag. */
+   assert( fp_get_statusword_flag(FP_E_SUMMARY) == 0 );
+
+   modrm = *r_eip;
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xD8 opcodes +-+-+-+-+-+-+-+ */
+
+   if (first_opcode == 0xD8) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr 
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+
+            case 0: /* FADD single-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                   Fp_Add, False );
+               break;
+
+            case 1: /* FMUL single-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                   Fp_Mul, False );
+               break;
+
+            case 2: /* FCOM single-real */
+               fp_do_COM_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                    False, False );
+               break;
+
+            case 3: /* FCOMP single-real */
+               fp_do_COM_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                    False, True );
+               break;
+
+            case 4: /* FSUB single-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                   Fp_Sub, False );
+               break;
+
+            case 5: /* FSUBR single-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                   Fp_SubR, False );
+               break;
+
+            case 6: /* FDIV single-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                   Fp_Div, False );
+               break;
+
+            case 7: /* FDIVR single-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) 
+                                   Fp_DivR, False );
+               break;
+
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xD8");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADD %st(?),%st(0) */
+               fp_do_op_ST_ST ( modrm - 0xC0, 0, Fp_Add, False );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMUL %st(?),%st(0) */
+               fp_do_op_ST_ST ( modrm - 0xC8, 0, Fp_Mul, False );
+               break;
+
+            case 0xD0 ... 0xD7: /* FCOM %st(?),%st(0) */
+               fp_do_COM_ST_ST ( modrm - 0xD0, 0, 0 );
+               break;
+
+            case 0xD8 ... 0xDF: /* FCOMP %st(?),%st(0) */
+               fp_do_COM_ST_ST ( modrm - 0xD8, 0, 1 );
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUB %st(?),%st(0) */
+               fp_do_op_ST_ST ( modrm - 0xE0, 0, Fp_Sub, False );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUBR %st(?),%st(0) */
+               fp_do_op_ST_ST ( modrm - 0xE8, 0, Fp_SubR, False );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIV %st(?),%st(0) */
+               fp_do_op_ST_ST ( modrm - 0xF0, 0, Fp_Div, False );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIVR %st(?),%st(0) */
+               fp_do_op_ST_ST ( modrm - 0xF8, 0, Fp_DivR, False );
+               break;
+
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xD9 opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xD9) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr 
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+
+            case 0: /* FLD single-real */
+               IFDB( if (dis) printf("\tfldF\t%s\n",t_addr); )
+               vd_addr = getFMem(a_addr);
+               fp_push(vd_addr);
+               break;
+
+            case 2: /* FST single-real */
+               IFDB( if (dis) printf("\tfstF\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_get_reg_ST(0);
+               } else {
+                  vd_addr = NAN;
+                  fp_set_stack_underflow();
+               }
+               setFMem(a_addr,vd_addr);
+               break;
+
+            case 3: /* FSTP single-real */
+               IFDB( if (dis) printf("\tfstpF\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_pop();
+               } else {
+                  vd_addr = fp_pop(); /* then throw away result */
+                  vd_addr = NAN;
+                  fp_set_stack_underflow();
+               }
+               setFMem(a_addr,vd_addr);
+               break;
+
+            case 5: /* FLDCW */
+               IFDB( if (dis) printf("\tfldcw\t%s\n",t_addr); )
+               m_fpu_state.env[FP_ENV_CTRL] = (UShort)getIMem2(a_addr);
+               break;
+
+            case 7: /* FNSTCW */
+               IFDB( if (dis) printf("\tfnstcw\t%s\n",t_addr); )
+               setIMem2(a_addr,(UInt)m_fpu_state.env[FP_ENV_CTRL]);
+               break;
+
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xD9");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FLD %st(?) */
+               a_dst = (UInt)modrm - 0xC0;
+               IFDB( if (dis) printf("\tfld\t%%st(%d)\n",a_dst); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(a_dst)) &&
+                   fp_is_empty_tag(fp_get_tag_ST(7))) {
+                  vd_dst = fp_get_reg_ST(a_dst);
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_push(vd_dst);
+               break;
+
+            case 0xC8 ... 0xCF: /* FXCH %st(?) */
+               a_dst = (UInt)modrm - 0xC8;
+               IFDB( if (dis) printf("\tfxch\t%%st(%d)\n",a_dst); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(a_dst)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fp_get_reg_ST(a_dst);
+                  vd_src = fp_get_reg_ST(0);
+               } else {
+                  vd_dst = NAN;
+                  vd_src = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(a_dst,vd_src);
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xE0: /* FCHS */
+               IFDB( if (dis) printf("\tfchs\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = - fp_get_reg_ST(0);
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xE1: /* FABS */
+               IFDB( if (dis) printf("\tfabs\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fabs(fp_get_reg_ST(0));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xE5:
+               /* An approximation to the correct behaviour */
+               IFDB( if (dis) printf("\tfxam\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fabs(fp_get_reg_ST(0));
+                  if (isnan(vd_dst)) 
+                     vis_dst = 1; /* C320 = 001 */ 
+                  else if (isinf(vd_dst)) 
+                     vis_dst = 3; /* C320 = 011 */
+                  else if (vd_dst == 0.0 || vd_dst == -0.0)  
+                     vis_dst = 4; /* C320 = 100 */
+                  else
+                     vis_dst = 2; /* C320 = 010 */
+                  fp_set_statusword_flag_to(FP_F_C1, 
+                                            vd_dst < 0.0 ? 1 : 0);
+               } else {
+                  vis_dst = 5; /* C320 = 101 */
+                  /* no idea if this is right */
+                  fp_set_statusword_flag_to(FP_F_C1, 0);
+               }
+               fp_set_statusword_flag_to(FP_F_C3, (vis_dst >> 2) & 1);
+               fp_set_statusword_flag_to(FP_F_C2, (vis_dst >> 1) & 1);
+               fp_set_statusword_flag_to(FP_F_C0, vis_dst & 1);
+               break;
+               
+            case 0xE8: /* FLD1 */
+               IFDB( t_dst = "1";  )
+               vd_dst = 1.0;
+               goto do_fld_CONST;
+            case 0xEC: /* FLDLG2 */
+               IFDB( t_dst = "lg2";  )
+               vd_dst = 0.301029995663981143;
+               goto do_fld_CONST;
+            case 0xED: /* FLDLN2 */
+               IFDB( t_dst = "ln2";  )
+               vd_dst = 0.69314718055994530942;
+               goto do_fld_CONST;
+            case 0xEE: /* FLDZ */
+               IFDB( t_dst = "z";  )
+               vd_dst = 0.0;
+               goto do_fld_CONST;
+            do_fld_CONST:
+               IFDB( if (dis) printf("\tfld%s\n",t_dst); )
+               fp_push(vd_dst);
+               break;
+
+            case 0xF0: /* F2XM1 */
+               IFDB( if (dis) printf("\tf2xm1\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fp_calc_2xm1(fp_get_reg_ST(0));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xF1: /* FYL2X */
+               IFDB( if (dis) printf("\tfyl2x\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(1))) {
+                  vd_dst = fp_calc_yl2x(
+                              fp_get_reg_ST(0), fp_get_reg_ST(1));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(1,vd_dst);
+               (void)fp_pop();
+               break;
+
+            case 0xF3: /* FPATAN */
+               IFDB( if (dis) printf("\tfpatan\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(1))) {
+                  vd_dst = atan2(
+                              fp_get_reg_ST(1), fp_get_reg_ST(0));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(1,vd_dst);
+               (void)fp_pop();
+               break;
+
+            case 0xF8: { /* FPREM */
+               /* Very incomplete implementation.  */
+               Int qq;
+               IFDB( if (dis) printf("\tfprem\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(1))) {
+                  fp_calc_fprem( &qq, &vd_dst, 
+                                 fp_get_reg_ST(0), fp_get_reg_ST(1) );
+                  fp_set_statusword_flag_to(FP_F_C0, (qq & 4) ? 1 : 0);
+                  fp_set_statusword_flag_to(FP_F_C1, (qq & 1) ? 1 : 0);
+                  fp_set_statusword_flag_to(FP_F_C2, 0); /* reduction complete */
+                  fp_set_statusword_flag_to(FP_F_C3, (qq & 2) ? 1 : 0);
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+                  fp_set_statusword_flag_to(FP_F_C1, 0); /* stack underflow */
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+            }
+            case 0xF9: /* FYL2XP1 */
+               IFDB( if (dis) printf("\tfyl2xp1\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(1))) {
+                  vd_dst = fp_calc_yl2xp1(
+                              fp_get_reg_ST(0), fp_get_reg_ST(1));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(1,vd_dst);
+               (void)fp_pop();
+               break;
+
+            case 0xFA: /* FSQRT */
+               IFDB( if (dis) printf("\tfsqrt\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = sqrt(fp_get_reg_ST(0));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xFC: /* FRNDINT */
+               IFDB( if (dis) printf("\tfrndint\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = rint(fp_get_reg_ST(0));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xFD: /* FSCALE */
+               IFDB( if (dis) printf("\tfscale\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(1))) {
+                  vd_dst = fp_calc_scale(
+                              fp_get_reg_ST(0), fp_get_reg_ST(1));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xFE: /* FSIN */
+               IFDB( if (dis) printf("\tfsin\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = sin(fp_get_reg_ST(0));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            case 0xFF: /* FCOS */
+               IFDB( if (dis) printf("\tfcos\n"); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = cos(fp_get_reg_ST(0));
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(0,vd_dst);
+               break;
+
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDA opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDA) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+
+            case 0: /* FIADD m32int */
+               IFDB( if (dis) printf("\tfiaddl\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_get_reg_ST(0) + (double)vis_addr;
+                  fp_set_reg_ST(0, vd_addr);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_reg_ST(0, NAN);
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            case 1: /* FIMUL m32int */
+               IFDB( if (dis) printf("\tfimull\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_get_reg_ST(0) * (double)vis_addr;
+                  fp_set_reg_ST(0, vd_addr);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_reg_ST(0, NAN);
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            case 2: /* FICOM m32int */
+               IFDB( if (dis) printf("\tficoml\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fp_get_reg_ST(0);
+                  vd_src = (double)vis_addr;
+                  fp_set_statusword_flags_COM(vd_dst,vd_src);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_statusword_flags_COM_STACKF();
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            case 3: /* FICOMP m32int */
+               IFDB( if (dis) printf("\tficompl\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fp_get_reg_ST(0);
+                  vd_src = (double)vis_addr;
+                  fp_set_statusword_flags_COM(vd_dst,vd_src);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_statusword_flags_COM_STACKF();
+                  fp_set_stack_underflow();
+               }
+               (void)fp_pop();
+               break;
+
+            case 4: /* FISUB m32int */
+               IFDB( if (dis) printf("\tfisubl\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_get_reg_ST(0) - (double)vis_addr;
+                  fp_set_reg_ST(0, vd_addr);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_reg_ST(0, NAN);
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            case 5: /* FISUBR m32int */
+               IFDB( if (dis) printf("\tfisubrl\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = (double)vis_addr - fp_get_reg_ST(0);
+                  fp_set_reg_ST(0, vd_addr);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_reg_ST(0, NAN);
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            case 6: /* FIDIV m32int */
+               IFDB( if (dis) printf("\tfidivl\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_get_reg_ST(0) / (double)vis_addr;
+                  fp_set_reg_ST(0, vd_addr);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_reg_ST(0, NAN);
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            case 7: /* FIDIVR m32int */
+               IFDB( if (dis) printf("\tfidivl\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = (double)vis_addr / fp_get_reg_ST(0);
+                  fp_set_reg_ST(0, vd_addr);
+                  /* we should set C1 here */
+               } else {
+                  fp_set_reg_ST(0, NAN);
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xDA");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+
+            case 0xE9: /* FUCOMPP %st(0),%st(1) */
+               /* seems the wrong way round. */
+               fp_do_COM_ST_ST ( 1, 0, 2 );
+               break;
+
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDB opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDB) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr 
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+
+            case 0: /* FILD m32int */
+               IFDB( if (dis) printf("\tfildl\t%s\n",t_addr); )
+               vis_addr = getIMem4(a_addr);
+               fp_push ( (double)vis_addr );
+               break;
+
+            case 2: /* FIST m32 */
+               IFDB( if (dis) printf("\tfistl\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_get_reg_ST(0);
+                  if (vd_addr <= -2147483648.5 || 
+                      vd_addr >= 2147483647.5) 
+                     vis_addr = 0x80000000; /* 32-bit int indefinite */
+                  else
+                     vis_addr = (Int)vd_addr;
+               } else {
+                  vis_addr = 0x80000000; /* 32-bit indefinite */
+                  fp_set_stack_underflow();
+               }
+               setIMem4(a_addr,vis_addr);
+               break;
+
+            case 3: /* FISTP m32 */
+               IFDB( if (dis) printf("\tfistpl\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_pop();
+                  if (vd_addr <= -2147483648.5 || 
+                      vd_addr >= 2147483647.5) 
+                     vis_addr = 0x80000000; /* 32-bit int indefinite */
+                  else
+                     vis_addr = (Int)vd_addr;
+               } else {
+                  vd_addr = fp_pop(); /* then throw away result */
+                  vis_addr = 0x80000000; /* 32-bit indefinite */
+                  fp_set_stack_underflow();
+               }
+               setIMem4(a_addr,vis_addr);
+               break;
+
+            case 5: /* FLD extended-real */
+               IFDB( if (dis) printf("\tfldT\t%s\n",t_addr); )
+               vd_addr = getTMem(a_addr);
+               fp_push(vd_addr);
+               break;
+
+            case 7: /* FSTP extended-real */
+               IFDB( if (dis) printf("\tfstpT\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_pop();
+               } else {
+                  vd_addr = fp_pop(); /* then throw away result */
+                  vd_addr = NAN;
+                  fp_set_stack_underflow();
+               }
+               setTMem(a_addr,vd_addr);
+               break;
+
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xDB");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDC opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDC) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr 
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+
+            case 0: /* FADD double-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) Fp_Add, True );
+               break;
+
+            case 1: /* FMUL double-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) Fp_Mul, True );
+               break;
+
+            case 2: /* FCOM double-real */
+               fp_do_COM_mem_ST_0 ( a_addr, IFDB(t_addr CC) True, False );
+               break;
+
+            case 3: /* FCOMP double-real */
+               fp_do_COM_mem_ST_0 ( a_addr, IFDB(t_addr CC) True, True );
+               break;
+
+            case 4: /* FSUB double-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) Fp_Sub, True );
+               break;
+
+            case 5: /* FSUBR double-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) Fp_SubR, True );
+               break;
+
+            case 6: /* FDIV double-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) Fp_Div, True );
+               break;
+
+            case 7: /* FDIVR double-real */
+               fp_do_op_mem_ST_0 ( a_addr, IFDB(t_addr CC) Fp_DivR, True );
+               break;
+
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xDC");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADD %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xC0, Fp_Add, False );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMUL %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xC8, Fp_Mul, False );
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUBR %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xE0, Fp_SubR, False );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUB %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xE8, Fp_Sub, False );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIV %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xF8, Fp_Div, False );
+               break;
+
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDD opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDD) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr 
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+
+            case 0: /* FLD double-real */
+               IFDB( if (dis) printf("\tfldD\t%s\n",t_addr); )
+               vd_addr = getDMem(a_addr);
+               fp_push(vd_addr);
+               break;
+
+            case 2: /* FST double-real */
+               IFDB( if (dis) printf("\tfstD\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_get_reg_ST(0);
+               } else {
+                  vd_addr = NAN;
+                  fp_set_stack_underflow();
+               }
+               setDMem(a_addr,vd_addr);
+               break;
+
+            case 3: /* FSTP double-real */
+               IFDB( if (dis) printf("\tfstpD\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_pop();
+               } else {
+                  vd_addr = fp_pop(); /* then throw away result */
+                  vd_addr = NAN;
+                  fp_set_stack_underflow();
+               }
+               setDMem(a_addr,vd_addr);
+               break;
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xDD");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FFREE %st(?) */
+               a_dst = (UInt)modrm - 0xC0;
+               IFDB( if (dis) printf("\tffree\t%%st(%d)\n", a_dst); )
+               fp_set_tag_ST( a_dst, FP_TAG_EMPTY );
+               break;
+
+            case 0xD0 ... 0xD7: /* FST %st(0),%st(?) */
+               a_dst = (UInt)modrm - 0xD0;
+               IFDB( if (dis) printf("\tfst\t%%st(0),%%st(%d)\n",
+                                     a_dst); )
+               if ( /* don't check the destination tag */
+                    !fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fp_get_reg_ST(0);
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(a_dst,vd_dst);
+               break;
+
+            case 0xD8 ... 0xDF: /* FSTP %st(0),%st(?) */
+               a_dst = (UInt)modrm - 0xD8;
+               IFDB( if (dis) printf("\tfstp\t%%st(0),%%st(%d)\n",
+                                     a_dst); )
+               if ( /* don't check the destination tag */
+                    !fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_dst = fp_get_reg_ST(0);
+               } else {
+                  vd_dst = NAN;
+                  fp_set_stack_underflow();
+               }
+               fp_set_reg_ST(a_dst,vd_dst);
+               (void)fp_pop();
+               break;
+
+            case 0xE0 ... 0xE7: /* FUCOM %st(0),%st(?) */
+               a_src = (UInt)modrm - 0xE0;
+               IFDB( if (dis) printf("\tfucom\t%%st(0),%%st(%d)\n",
+                                     a_src); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(a_src)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_src = fp_get_reg_ST(a_src);
+                  vd_dst = fp_get_reg_ST(0);
+                  fp_set_statusword_flags_COM(vd_dst,vd_src);
+               } else {
+                  fp_set_statusword_flags_COM_STACKF();
+                  fp_set_stack_underflow();
+               }
+               break;
+
+            case 0xE8 ... 0xEF: /* FUCOMP %st(0),%st(?) */
+               a_src = (UInt)modrm - 0xE8;
+               IFDB( if (dis) printf("\tfucomp\t%%st(0),%%st(%d)\n",
+                                     a_src); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(a_src)) &&
+                   !fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_src = fp_get_reg_ST(a_src);
+                  vd_dst = fp_get_reg_ST(0);
+                  fp_set_statusword_flags_COM(vd_dst,vd_src);
+               } else {
+                  fp_set_statusword_flags_COM_STACKF();
+                  fp_set_stack_underflow();
+               }
+               (void)fp_pop();
+               break;
+
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDE opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDE) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr 
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xDE");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+
+            case 0xC0 ... 0xC7: /* FADDP %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xC0, Fp_Add, True );
+               break;
+
+            case 0xC8 ... 0xCF: /* FMULP %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xC8, Fp_Mul, True );
+               break;
+
+            case 0xD9: /* FCOMPP %st(0),%st(1) */
+               /* seems the wrong way round. */
+               fp_do_COM_ST_ST ( 1, 0, 2 );
+               break;
+
+            case 0xE0 ... 0xE7: /* FSUBRP %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xE0, Fp_SubR, True );
+               break;
+
+            case 0xE8 ... 0xEF: /* FSUBP %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xE8, Fp_Sub, True );
+               break;
+
+            case 0xF0 ... 0xF7: /* FDIVRP %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xF0, Fp_DivR, True );
+               break;
+
+            case 0xF8 ... 0xFF: /* FDIVP %st(0),%st(?) */
+               fp_do_op_ST_ST ( 0, modrm - 0xF8, Fp_Div, True );
+               break;
+
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ 0xDF opcodes +-+-+-+-+-+-+-+ */
+   else
+   if (first_opcode == 0xDF) {
+      if (modrm < 0xC0) {
+	/* bits 5,4,3 are an opcode extension, and the modRM also
+           specifies an address. */
+         opc_aux = regno_from_modRM ( r_eip, 4 IFDB(CC &t_opc_aux) );
+         r_eip = amode_from_modRM ( r_eip, 4, &a_addr 
+                                    IFDB(CC &t_addr), &isreg );
+         assert(!isreg);
+         switch (opc_aux) {
+
+            case 0: /* FILD m16int */
+               IFDB( if (dis) printf("\tfildw\t%s\n",t_addr); )
+               vis_addr = extend_s_16to32(getIMem2(a_addr));
+               fp_push ( (double) vis_addr );
+               break;
+
+            case 3: /* FISTP m16 */
+               IFDB( if (dis) printf("\tfistpw\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_pop();
+                  if (vd_addr <= -32768.50 || 
+                      vd_addr >= 32767.50) 
+                     vis_addr = 0x00008000; /* 16-bit int indefinite */
+                  else
+                     vis_addr = (Short)vd_addr;
+               } else {
+                  vd_addr = fp_pop(); /* then throw away result */
+                  vis_addr = 0x00008000; /* 32-bit indefinite */
+                  fp_set_stack_underflow();
+               }
+               setIMem2(a_addr,vis_addr);
+               break;
+
+            case 5: { /* FILD m64int */
+               ULong vis_addr64;
+               IFDB( if (dis) printf("\tfildq\t%s\n",t_addr); )
+               vis_addr   = getIMem4(a_addr+4);
+               vis_addr64 = ((ULong)vis_addr) << 32;
+               vis_addr   = getIMem4(a_addr);
+               vis_addr64 += (ULong)vis_addr;
+               fp_push ( (double) ((Long)vis_addr64) );
+               break;
+            }
+
+            case 7: { /* FISTP m64int */
+               ULong vis_addr64;
+               IFDB( if (dis) printf("\tfistpq\t%s\n",t_addr); )
+               if (!fp_is_empty_tag(fp_get_tag_ST(0))) {
+                  vd_addr = fp_pop();
+                  if (vd_addr <= -9223372036854775808.5 ||
+                      vd_addr >= 9223372036854775807.5) 
+                     vis_addr64 = 0x8000000000000000LL;
+                         /* 64-bit int indefinite */
+                  else
+                     vis_addr64 = (Long)vd_addr;
+               } else {
+                  vd_addr = fp_pop(); /* then throw away result */
+                  vis_addr64 = 0x8000000000000000LL; /* 64-bit indefinite */
+                  fp_set_stack_underflow();
+               }
+               setIMem4(a_addr,vis_addr64 & 0xFFFFFFFFLL);
+               setIMem4(a_addr+4, (((Long)vis_addr64) >> 32) 
+                                   & 0xFFFFFFFFLL);
+               break;
+            }
+
+            default:
+               printf("unhandled opc_aux = 0x%2x\n", opc_aux);
+               panic("do_one_insn_fp: first_opcode == 0xDF");
+               break;
+	 }
+      } else {
+         /* The entire modRM byte is an opcode extension. */
+         r_eip++;
+         switch (modrm) {
+
+            case 0xE0: /* FNSTSW %ax */
+               IFDB( if (dis) printf("\tfnstsw\t%%ax\n"); )
+               setIReg2(R_EAX, (UInt)m_fpu_state.env[FP_ENV_STAT]);
+               break;
+
+            default:
+               goto unhandled;
+	 }
+      }
+   }
+
+   /* -+-+-+-+-+-+-+-+-+-+-+-+ Unhandled ESC opcode +-+-+-+ */
+   else goto unhandled;
+
+#  if DEBUG
+   if (ppFpuState) {
+      printf("\nAFTER\n");
+      printFpuState();
+      printf("\n");
+   }
+#  endif
+
+   return r_eip;
+
+  unhandled:
+   hd_message(Hd_DebugMsg,
+              "first opcode = 0x%x, modRM = 0x%x",
+              (UInt)first_opcode, (UInt)modrm );
+   panic("do_one_insn_fp: unhandled first_opcode/modrm combination");
+   assert(0);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                 hd_fpu.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/useful/show_fp_state.c b/VEX/useful/show_fp_state.c
new file mode 100644
index 0000000..06eab5a
--- /dev/null
+++ b/VEX/useful/show_fp_state.c
@@ -0,0 +1,184 @@
+
+#include <stdio.h>
+#include <assert.h>
+
+
+/* ------------------------------------------------- */
+
+typedef unsigned char          UChar;
+typedef unsigned short         UShort;
+typedef unsigned int           UInt;
+typedef unsigned long long int ULong;
+
+typedef signed char     Char;
+typedef short           Short;
+typedef int             Int;
+typedef long long int   Long;
+
+typedef
+   struct {
+      UShort env[14];
+      UChar  reg[80];
+   }
+   Fpu_State;
+
+/* Offsets, in 16-bit ints, into the FPU environment (env) area. */
+#define FP_ENV_CTRL   0
+#define FP_ENV_STAT   2
+#define FP_ENV_TAG    4
+#define FP_ENV_IP     6 /* and 7 */
+#define FP_ENV_CS     8
+#define FP_ENV_OPOFF  10 /* and 11 */
+#define FP_ENV_OPSEL  12
+#define FP_REG(ii)    (10*(7-(ii)))
+
+/* Bitfield offsets for exceptions in the FPU status and control words. */
+#define FP_E_INVAL    0
+#define FP_E_DENOR    1
+#define FP_E_DIVZ     2
+#define FP_E_OVERF    3
+#define FP_E_UNDER    4
+#define FP_E_LOS      5
+
+/* More bitfield offsets, but for the status word only. */
+#define FP_E_STACKF   6
+#define FP_E_SUMMARY  7
+#define FP_F_C0       8
+#define FP_F_C1       9
+#define FP_F_C2      10
+#define FP_F_C3      14
+/* top-of-stack ptr is bits 13,12,11 of the word */
+#define FP_F_TOS_LO  11
+#define FP_F_TOS_HI  13
+
+/* Register tags. */
+#define FP_TAG_VALID 0
+#define FP_TAG_ZERO  1
+#define FP_TAG_SPEC  2
+#define FP_TAG_EMPTY 3
+
+char* fp_tag_names[4]
+   = { "Valid", "Zero", "Spec", "Empty" };
+
+char* fp_exception_names[6]
+   = { "INVAL", "DENOR", "DIVZ", "OVERF", "UNDERF", "LOS" };
+
+Fpu_State m_fpu_state;
+
+
+
+UInt fp_get_tos ( void )
+{
+   return (m_fpu_state.env[FP_ENV_STAT] >> FP_F_TOS_LO) & 7;
+}
+
+UInt fp_get_tag ( UInt regno )
+{
+   assert(!(regno < 0 || regno > 7));
+   return (m_fpu_state.env[FP_ENV_TAG] >> (2*regno)) & 3;
+}
+
+UInt fp_get_statusword_flag ( UInt flagno )
+{
+   assert(!(flagno < 0 || flagno > 15));
+   return (m_fpu_state.env[FP_ENV_STAT] >> flagno) & 0x1;
+}
+
+UInt fp_get_controlword_flag ( UInt flagno )
+{
+   assert(!(flagno < 0 || flagno > 15));
+   return (m_fpu_state.env[FP_ENV_CTRL] >> flagno) & 0x1;
+}
+
+static 
+void printFpuState ( void )
+{
+   Int i, j, k;
+   assert(sizeof(Fpu_State)==108);
+   for (i = 7; i >= 0; i--) {
+      printf ( " %s fpreg%d: 0x", 
+               (UInt)i == fp_get_tos() ? "**" : "  ", i );
+      for (k = 0, j = FP_REG(i)+9; k < 10; k++,j--)
+         printf ( "%02x", (UInt)m_fpu_state.reg[j]);
+      printf ( "  %5s  ", fp_tag_names[fp_get_tag(i)] );
+      printf("\n");
+      //printf ( "%20.16e\n", fp_get_reg(i) );
+   }
+   printf("     fctrl:     0x%04x  masked: ", 
+          (UInt)m_fpu_state.env[FP_ENV_CTRL] );
+   for (i = FP_E_INVAL; i <= FP_E_LOS; i++)
+      if (fp_get_controlword_flag(i))
+         printf ( "%s ", fp_exception_names[i] );
+   printf ( "\n" );
+
+   printf("     fstat:     0x%04x  except:", 
+          (UInt)m_fpu_state.env[FP_ENV_STAT] );
+   for (i = FP_E_INVAL; i <= FP_E_LOS; i++)
+      if (fp_get_statusword_flag(i))
+         printf ( "%s ", fp_exception_names[i] );
+   printf ( "  top: %d  ", fp_get_tos() );
+   printf ( "c3210: %d%d%d%d",
+            fp_get_statusword_flag(FP_F_C3),
+            fp_get_statusword_flag(FP_F_C2),
+            fp_get_statusword_flag(FP_F_C1),
+            fp_get_statusword_flag(FP_F_C0) );
+   printf ( "  STACKF: %d\n", fp_get_statusword_flag(FP_E_STACKF) );
+
+   printf("      ftag:     0x%04x  ", (UInt)m_fpu_state.env[FP_ENV_TAG] );
+   for (i = 7; i >= 0; i--)
+      printf ( "%d:%s ", i, fp_tag_names[fp_get_tag(i)] );
+   printf("\n");
+
+   printf("       fip: 0x%08x\n", 
+           (((UInt)m_fpu_state.env[FP_ENV_IP+1]) << 16) |
+            ((UInt)m_fpu_state.env[FP_ENV_IP]) );
+   printf("       fcs:     0x%04x\n", 
+           ((UInt)m_fpu_state.env[FP_ENV_CS]) );
+   printf("    fopoff: 0x%08x\n", 
+           (((UInt)m_fpu_state.env[FP_ENV_OPOFF+1]) << 16) |
+            ((UInt)m_fpu_state.env[FP_ENV_OPOFF]) );
+   printf("    fopsel:     0x%04x\n", 
+           ((UInt)m_fpu_state.env[FP_ENV_OPSEL]) );
+}
+
+
+/* ------------------------------------------------- */
+
+
+/* Initialise the FPU, dump its state, and print it. */
+
+
+void show ( unsigned char* st )
+{
+  int i;
+  for (i = 0; i < 28; i++) {
+    printf("%02x ", st[i]);
+    if (i > 0 && ((i & 3) == 3)) printf("\n");
+  }
+  for (i = 0; i < 80; i++) {
+    printf("%02x ", st[i+28]);
+    if (i > 0 && ((i % 10) == 9)) printf("\n");
+  }
+  printf("\n");
+}
+
+int main ( void ) 
+{
+  Fpu_State* st = &m_fpu_state;
+  assert(sizeof(m_fpu_state) == 108);
+  asm volatile ("finit ; fnsave %0"
+                : "=m" (m_fpu_state)
+                : 
+                : "memory" );
+  printFpuState();
+  printf("\n\n");
+
+  asm volatile ("fildl 0(%%esp) ; pushl $17 ; fildl 0(%%esp) ; addl $4, %%esp ;  fnsave %0"
+                : "=m" (m_fpu_state)
+                : 
+                : "memory" );
+  printFpuState();
+  printf("\n");
+  show(st);
+  return 0;
+}
diff --git a/VEX/useful/smchash.c b/VEX/useful/smchash.c
new file mode 100644
index 0000000..b13e5c2
--- /dev/null
+++ b/VEX/useful/smchash.c
@@ -0,0 +1,324 @@
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+typedef  signed int              Int;
+typedef  unsigned int            UInt;
+typedef  unsigned long long int  Addr64;
+typedef  unsigned char           UChar;
+typedef  unsigned long int       UWord;
+
+static inline UInt ROL32 ( UInt x, UInt n ) {
+  assert(n != 0);
+  n &= 31;
+  x = (x << n) | (x >> (32-n));
+  return x;
+}
+
+//////////////////////////////////////////////////////////
+
+typedef
+   struct {
+      Addr64 ga;
+      Int    nbytes;
+      UChar* bytes;
+      UChar* actual; 
+   }
+   GuestBytes;
+
+GuestBytes* read_one ( FILE* f )
+{
+  Int r;
+  UInt i;
+  UInt esum, csum;
+
+  GuestBytes* gb = malloc(sizeof(GuestBytes));
+  assert(gb);
+
+  if (feof(f)) return NULL;
+  assert(!ferror(f));
+
+  r= fscanf(f, "GuestBytes %llx %d  ", &gb->ga, &gb->nbytes);
+  if (0) printf("r = %d\n", r);
+  assert(r == 2);
+
+  assert(gb->ga != 0);
+  assert(gb->nbytes > 0);
+  assert(gb->nbytes < 5000); // let's say
+
+  Int nToAlloc = gb->nbytes + (gb->ga & 3);
+
+  gb->bytes  = malloc( gb->nbytes + nToAlloc);
+  gb->actual = gb->bytes + (gb->ga & 3);
+  assert(gb->bytes);
+
+  csum = 0;
+  for (i = 0; i < gb->nbytes; i++) {
+    UInt b;
+    r= fscanf(f, "%02x ", &b);
+    assert(r == 1);
+    gb->actual[i] = b;
+    csum = (csum << 1) ^ b;
+  }
+
+  r= fscanf(f, " %08x\n", &esum);
+  assert(r == 1);
+
+  assert(esum == csum);
+
+  return gb;
+}
+
+//////////////////////////////////////////////////////////
+
+void apply_to_all ( FILE* f, 
+                    void(*fn)( GuestBytes*, void* ),
+                    void* opaque )
+{
+  while (!feof(f)) {
+    GuestBytes* gb = read_one(f);
+    if (0) printf("got %llu %d\n", gb->ga, gb->nbytes);
+    fn( gb, opaque );
+    free(gb->bytes);
+    free(gb);
+  }
+}
+
+//////////////////////////////////////////////////////////
+
+UInt hash_const_zero ( GuestBytes* gb ) {
+  return 0;
+}
+
+UInt hash_sum ( GuestBytes* gb ) {
+  UInt i, sum = 0;
+  for (i = 0; i < gb->nbytes; i++)
+    sum += (UInt)gb->actual[i];
+  return sum;
+}
+
+UInt hash_rol ( GuestBytes* gb ) {
+  UInt i, sum = 0;
+  for (i = 0; i < gb->nbytes; i++) {
+    sum ^= (UInt)gb->actual[i];
+    sum = ROL32(sum,7);
+  }
+  return sum;
+}
+
+static UInt cand0 ( GuestBytes* gb )
+{
+   UWord addr = (UWord)gb->actual;
+   UWord len = gb->nbytes;
+   UInt sum = 0;
+   /* pull up to 4-alignment */
+   while ((addr & 3) != 0 && len >= 1) {
+      UChar* p = (UChar*)addr;
+      sum = (sum << 8) | (UInt)p[0];
+      addr++;
+      len--;
+   }
+   /* vectorised + unrolled */
+   while (len >= 16) {
+      UInt* p = (UInt*)addr;
+      sum = ROL32(sum ^ p[0], 13);
+      sum = ROL32(sum ^ p[1], 13);
+      sum = ROL32(sum ^ p[2], 13);
+      sum = ROL32(sum ^ p[3], 13);
+      addr += 16;
+      len -= 16;
+   }
+   /* vectorised fixup */
+   while (len >= 4) {
+      UInt* p = (UInt*)addr;
+      sum = ROL32(sum ^ p[0], 13);
+      addr += 4;
+      len -= 4;
+   }
+   /* scalar fixup */
+   while (len >= 1) {
+      UChar* p = (UChar*)addr;
+      sum = ROL32(sum ^ (UInt)p[0], 19);
+      addr++;
+      len--;
+   }
+   return sum;
+}
+
+static UInt cand1 ( GuestBytes* gb )
+{
+   UWord addr = (UWord)gb->actual;
+   UWord len = gb->nbytes;
+   UInt sum1 = 0, sum2 = 0;
+   /* pull up to 4-alignment */
+   while ((addr & 3) != 0 && len >= 1) {
+      UChar* p = (UChar*)addr;
+      sum1 = (sum1 << 8) | (UInt)p[0];
+      addr++;
+      len--;
+   }
+   /* vectorised + unrolled */
+   while (len >= 16) {
+      UInt* p = (UInt*)addr;
+      UInt  w;
+      w = p[0];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      w = p[1];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      w = p[2];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      w = p[3];  sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      addr += 16;
+      len  -= 16;
+      sum1 ^= sum2;
+   }
+   /* vectorised fixup */
+   while (len >= 4) {
+      UInt* p = (UInt*)addr;
+      UInt  w = p[0];
+      sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      addr += 4;
+      len  -= 4;
+      sum1 ^= sum2;
+   }
+   /* scalar fixup */
+   while (len >= 1) {
+      UChar* p = (UChar*)addr;
+      UInt   w = (UInt)p[0];
+      sum1 = ROL32(sum1 ^ w, 31);  sum2 += w;
+      addr++;
+      len--;
+   }
+   return sum1 + sum2;
+}
+ 
+static UInt adler32 ( GuestBytes* gb )
+{
+   UWord addr = (UWord)gb->actual;
+   UWord len = gb->nbytes;
+   UInt   s1 = 1;
+   UInt   s2 = 0;
+   UChar* buf = (UChar*)addr;
+   while (len >= 4) {
+      s1 += buf[0];
+      s2 += s1;
+      s1 += buf[1];
+      s2 += s1;
+      s1 += buf[2];
+      s2 += s1;
+      s1 += buf[3];
+      s2 += s1;
+      buf += 4;
+      len -= 4;
+   }
+   while (len > 0) {
+      s1 += buf[0];
+      s2 += s1;
+      len--;
+      buf++;
+   }
+   return (s2 << 16) + s1;
+}
+
+
+
+
+//////////////////////////////////////////////////////////
+
+UInt (*theFn)(GuestBytes*) =
+  //hash_const_zero;
+  //hash_sum;
+//hash_rol;
+//cand0;
+  cand1;
+  //adler32;
+
+Int cmp_UInt_ps ( UInt* p1, UInt* p2 ) {
+  if (*p1 < *p2) return -1;
+  if (*p1 > *p2) return 1;
+  return 0;
+}
+
+Int nSetBits ( UInt w )
+{
+  Int i, j;
+  j = 0;
+  for (i = 0; i < 32; i++)
+    if (w & (1<<i))
+      j++;
+  return j;
+}
+
+Int    toc_nblocks           = 0;
+Int    toc_nblocks_with_zero = 0;
+double toc_sum_of_avgs       = 0.0;
+
+void invertBit ( UChar* b, UInt ix, UInt bix ) {
+   b[ix] ^= (1 << bix);
+}
+
+void try_onebit_changes( GuestBytes* gb, void* opaque )
+{
+   toc_nblocks++;
+   /* collect up the hash values for all one bit changes of the key,
+      and also that for the unmodified key.  Then compute the number
+      of changed bits for all of them. */
+   UInt  hashIx  = 0;
+   UInt  nHashes = 8 * gb->nbytes;
+   UInt* hashes  = malloc( nHashes * sizeof(UInt) );
+
+   UInt byteIx, bitIx;
+   UInt hInit, hFinal, hRunning;
+   Int dist, totDist = 0, nNoDist = 0;
+   assert(hashes);
+   hInit = theFn( gb );
+    for (byteIx = 0; byteIx < gb->nbytes; byteIx++) {
+      for (bitIx = 0; bitIx < 8; bitIx++) {
+
+         invertBit(gb->actual, byteIx, bitIx);
+         //invertBit(gb->actual, byteIx, bitIx ^ 4);
+
+         hRunning = theFn( gb );
+
+         dist = nSetBits(hRunning ^ hInit);
+         totDist += dist;
+         if (dist == 0) nNoDist++;
+
+         hashes[hashIx++] = hRunning;
+
+         invertBit(gb->actual, byteIx, bitIx);
+         //invertBit(gb->actual, byteIx, bitIx ^ 4);
+
+         if (0) printf("  %02d.%d  %08x  %d\n", 
+                       byteIx, bitIx, hRunning ^ hInit, dist);
+      }
+   }
+   hFinal = theFn( gb );
+   assert(hFinal == hInit);
+   assert(hashIx == nHashes);
+
+   if (nNoDist > 0) 
+      printf("%4d  measurements,  %5.2f avg dist,  %2d zeroes\n",
+             (Int)nHashes, (double)totDist / (double)nHashes,  nNoDist);
+   else
+      printf("%4d  measurements,  %5.2f avg dist\n",
+             (Int)nHashes, (double)totDist / (double)nHashes);
+
+   if (nNoDist > 0)
+      toc_nblocks_with_zero++;
+
+   toc_sum_of_avgs += (double)totDist / (double)nHashes;
+
+   free(hashes);
+}
+
+//////////////////////////////////////////////////////////
+
+int main ( void )
+{
+  FILE* f = stdin;
+  apply_to_all(f, try_onebit_changes, NULL);
+  printf("\n%d blocks,  %d with a zero,  %5.2f avg avg\n\n",
+         toc_nblocks, toc_nblocks_with_zero, toc_sum_of_avgs / (double)toc_nblocks );
+  return 0;
+}
+
+//////////////////////////////////////////////////////////
diff --git a/VEX/useful/test_main.c b/VEX/useful/test_main.c
new file mode 100644
index 0000000..2b47591
--- /dev/null
+++ b/VEX/useful/test_main.c
@@ -0,0 +1,2755 @@
+
+/*---------------------------------------------------------------*/
+/*--- begin                                       test_main.c ---*/
+/*---------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2004-2013 OpenWorks LLP
+      info@open-works.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+   02110-1301, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+
+   Neither the names of the U.S. Department of Energy nor the
+   University of California nor the names of its contributors may be
+   used to endorse or promote products derived from this software
+   without prior written permission.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+#include "libvex_basictypes.h"
+#include "libvex.h"
+
+#include "test_main.h"
+
+
+/*---------------------------------------------------------------*/
+/*--- Test                                                    ---*/
+/*---------------------------------------------------------------*/
+
+
+__attribute__ ((noreturn))
+static
+void failure_exit ( void )
+{
+   fprintf(stdout, "VEX did failure_exit.  Bye.\n");
+   exit(1);
+}
+
+static
+void log_bytes ( const HChar* bytes, SizeT nbytes )
+{
+   fwrite ( bytes, 1, nbytes, stdout );
+}
+
+#define N_LINEBUF 10000
+static HChar linebuf[N_LINEBUF];
+
+#define N_ORIGBUF 10000
+#define N_TRANSBUF 5000
+
+static UChar origbuf[N_ORIGBUF];
+static UChar transbuf[N_TRANSBUF];
+
+static Bool verbose = True;
+
+/* Forwards */
+#if 1 /* UNUSED */
+//static IRSB* ac_instrument ( IRSB*, VexGuestLayout*, IRType );
+static
+IRSB* mc_instrument ( void* closureV,
+                      IRSB* bb_in, VexGuestLayout* layout, 
+                      VexGuestExtents* vge,
+                      IRType gWordTy, IRType hWordTy );
+#endif
+
+static Bool chase_into_not_ok ( void* opaque, Addr dst ) {
+   return False;
+}
+static UInt needs_self_check ( void *closureV, VexRegisterUpdates *pxControl,
+                               const VexGuestExtents *vge ) {
+   return 0;
+}
+
+int main ( int argc, char** argv )
+{
+   FILE* f;
+   Int i;
+   UInt u, sum;
+   Addr32 orig_addr;
+   Int bb_number, n_bbs_done = 0;
+   Int orig_nbytes, trans_used;
+   VexTranslateResult tres;
+   VexControl vcon;
+   VexGuestExtents vge;
+   VexArchInfo vai_x86, vai_amd64, vai_ppc32, vai_arm, vai_mips32, vai_mips64;
+   VexAbiInfo vbi;
+   VexTranslateArgs vta;
+
+   if (argc != 2) {
+      fprintf(stderr, "usage: vex file.orig\n");
+      exit(1);
+   }
+   f = fopen(argv[1], "r");
+   if (!f) {
+      fprintf(stderr, "can't open `%s'\n", argv[1]);
+      exit(1);
+   }
+
+   /* Run with default params.  However, we can't allow bb chasing
+      since that causes the front end to get segfaults when it tries
+      to read code outside the initial BB we hand it.  So when calling
+      LibVEX_Translate, send in a chase-into predicate that always
+      returns False. */
+   LibVEX_default_VexControl ( &vcon );
+   vcon.iropt_level = 2;
+   vcon.guest_max_insns = 60;
+
+   LibVEX_Init ( &failure_exit, &log_bytes, 
+                 1,  /* debug_paranoia */ 
+                 &vcon );
+
+
+   while (!feof(f)) {
+
+      __attribute__((unused))
+      char* unused1 = fgets(linebuf, N_LINEBUF,f);
+      if (linebuf[0] == 0) continue;
+      if (linebuf[0] != '.') continue;
+
+      if (n_bbs_done == TEST_N_BBS) break;
+      n_bbs_done++;
+
+      /* first line is:   . bb-number bb-addr n-bytes */
+      assert(3 == sscanf(&linebuf[1], " %d %x %d\n", 
+                                 & bb_number,
+                                 & orig_addr, & orig_nbytes ));
+      assert(orig_nbytes >= 1);
+      assert(!feof(f));
+      __attribute__((unused))
+      char* unused2 = fgets(linebuf, N_LINEBUF,f);
+      assert(linebuf[0] == '.');
+
+      /* second line is:   . byte byte byte etc */
+      if (verbose)
+         printf("============ Basic Block %d, Done %d, "
+                "Start %x, nbytes %2d ============", 
+                bb_number, n_bbs_done-1, orig_addr, orig_nbytes);
+
+      /* thumb ITstate analysis needs to examine the 18 bytes
+         preceding the first instruction.  So let's leave the first 18
+         zeroed out. */
+      memset(origbuf, 0, sizeof(origbuf));
+
+      assert(orig_nbytes >= 1 && orig_nbytes <= N_ORIGBUF);
+      for (i = 0; i < orig_nbytes; i++) {
+         assert(1 == sscanf(&linebuf[2 + 3*i], "%x", &u));
+         origbuf[18+ i] = (UChar)u;
+      }
+
+      /* FIXME: put sensible values into the .hwcaps fields */
+      LibVEX_default_VexArchInfo(&vai_x86);
+      vai_x86.hwcaps = VEX_HWCAPS_X86_MMXEXT | VEX_HWCAPS_X86_SSE1
+                       | VEX_HWCAPS_X86_SSE2 | VEX_HWCAPS_X86_SSE3;
+      vai_x86.endness = VexEndnessLE;
+
+      LibVEX_default_VexArchInfo(&vai_amd64);
+      vai_amd64.hwcaps = 0;
+      vai_amd64.endness = VexEndnessLE;
+
+      LibVEX_default_VexArchInfo(&vai_ppc32);
+      vai_ppc32.hwcaps = 0;
+      vai_ppc32.ppc_icache_line_szB = 128;
+
+      LibVEX_default_VexArchInfo(&vai_arm);
+      vai_arm.hwcaps = VEX_HWCAPS_ARM_VFP3 | VEX_HWCAPS_ARM_NEON | 7;
+
+      LibVEX_default_VexArchInfo(&vai_mips32);
+      vai_mips32.endness = VexEndnessLE;
+      vai_mips32.hwcaps = VEX_PRID_COMP_MIPS;
+
+      LibVEX_default_VexArchInfo(&vai_mips64);
+      vai_mips64.endness = VexEndnessLE;
+
+      LibVEX_default_VexAbiInfo(&vbi);
+      vbi.guest_stack_redzone_size = 128;
+
+      /* ----- Set up args for LibVEX_Translate ----- */
+
+      vta.abiinfo_both    = vbi;
+      vta.guest_bytes     = &origbuf[18];
+      vta.guest_bytes_addr = orig_addr;
+      vta.callback_opaque = NULL;
+      vta.chase_into_ok   = chase_into_not_ok;
+      vta.guest_extents   = &vge;
+      vta.host_bytes      = transbuf;
+      vta.host_bytes_size = N_TRANSBUF;
+      vta.host_bytes_used = &trans_used;
+
+#if 0 /* ppc32 -> ppc32 */
+      vta.arch_guest     = VexArchPPC32;
+      vta.archinfo_guest = vai_ppc32;
+      vta.arch_host      = VexArchPPC32;
+      vta.archinfo_host  = vai_ppc32;
+#endif
+#if 0 /* amd64 -> amd64 */
+      vta.arch_guest     = VexArchAMD64;
+      vta.archinfo_guest = vai_amd64;
+      vta.arch_host      = VexArchAMD64;
+      vta.archinfo_host  = vai_amd64;
+#endif
+#if 0 /* x86 -> x86 */
+      vta.arch_guest     = VexArchX86;
+      vta.archinfo_guest = vai_x86;
+      vta.arch_host      = VexArchX86;
+      vta.archinfo_host  = vai_x86;
+#endif
+#if 1 /* x86 -> mips32 */
+      vta.arch_guest     = VexArchX86;
+      vta.archinfo_guest = vai_x86;
+      vta.arch_host      = VexArchMIPS32;
+      vta.archinfo_host  = vai_mips32;
+#endif
+#if 0 /* amd64 -> mips64 */
+      vta.arch_guest     = VexArchAMD64;
+      vta.archinfo_guest = vai_amd64;
+      vta.arch_host      = VexArchMIPS64;
+      vta.archinfo_host  = vai_mips64;
+#endif
+#if 0 /* arm -> arm */
+      vta.arch_guest     = VexArchARM;
+      vta.archinfo_guest = vai_arm;
+      vta.arch_host      = VexArchARM;
+      vta.archinfo_host  = vai_arm;
+      /* ARM/Thumb only hacks, that are needed to keep the ITstate
+         analyser in the front end happy.  */
+      vta.guest_bytes     = &origbuf[18 +1];
+      vta.guest_bytes_addr = (Addr) &origbuf[18 +1];
+#endif
+
+#if 1 /* no instrumentation */
+      vta.instrument1     = NULL;
+      vta.instrument2     = NULL;
+#endif
+#if 0 /* addrcheck */
+      vta.instrument1     = ac_instrument;
+      vta.instrument2     = NULL;
+#endif
+#if 0 /* memcheck */
+      vta.instrument1     = mc_instrument;
+      vta.instrument2     = NULL;
+#endif
+      vta.needs_self_check  = needs_self_check;
+      vta.preamble_function = NULL;
+      vta.traceflags      = TEST_FLAGS;
+      vta.addProfInc      = False;
+      vta.sigill_diag     = True;
+
+      vta.disp_cp_chain_me_to_slowEP = (void*)0x12345678;
+      vta.disp_cp_chain_me_to_fastEP = (void*)0x12345679;
+      vta.disp_cp_xindir             = (void*)0x1234567A;
+      vta.disp_cp_xassisted          = (void*)0x1234567B;
+
+      vta.finaltidy = NULL;
+
+      for (i = 0; i < TEST_N_ITERS; i++)
+         tres = LibVEX_Translate ( &vta );
+
+      if (tres.status != VexTransOK)
+         printf("\ntres = %d\n", (Int)tres.status);
+      assert(tres.status == VexTransOK);
+      assert(tres.n_sc_extents == 0);
+      assert(vge.n_used == 1);
+      assert((UInt)(vge.len[0]) == orig_nbytes);
+
+      sum = 0;
+      for (i = 0; i < trans_used; i++)
+         sum += (UInt)transbuf[i];
+      printf ( " %6.2f ... %u\n", 
+               (double)trans_used / (double)vge.len[0], sum );
+   }
+
+   fclose(f);
+   printf("\n");
+   LibVEX_ShowAllocStats();
+
+   return 0;
+}
+
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+
+#if 0 /* UNUSED */
+
+static
+__attribute((noreturn))
+void panic ( HChar* s )
+{
+  printf("\npanic: %s\n", s);
+  failure_exit();
+}
+
+static
+IRSB* ac_instrument (IRSB* bb_in, VexGuestLayout* layout, IRType hWordTy )
+{
+/* Use this rather than eg. -1 because it's a UInt. */
+#define INVALID_DATA_SIZE   999999
+
+   Int         i;
+   Int         sz;
+   IRCallee*   helper;
+   IRStmt*    st;
+   IRExpr* data;
+   IRExpr* addr;
+   Bool needSz;
+
+   /* Set up BB */
+   IRSB* bb     = emptyIRSB();
+   bb->tyenv    = dopyIRTypeEnv(bb_in->tyenv);
+   bb->next     = dopyIRExpr(bb_in->next);
+   bb->jumpkind = bb_in->jumpkind;
+
+   /* No loads to consider in ->next. */
+   assert(isIRAtom(bb_in->next));
+
+   for (i = 0; i <  bb_in->stmts_used; i++) {
+      st = bb_in->stmts[i];
+      if (!st) continue;
+
+      switch (st->tag) {
+
+         case Ist_Tmp:
+            data = st->Ist.Tmp.data;
+            if (data->tag == Iex_LDle) {
+               addr = data->Iex.LDle.addr;
+               sz = sizeofIRType(data->Iex.LDle.ty);
+               needSz = False;
+               switch (sz) {
+                  case 4: helper = mkIRCallee(1, "ac_helperc_LOAD4", 
+                                                 (void*)0x12345601); break;
+                  case 2: helper = mkIRCallee(0, "ac_helperc_LOAD2",
+                                                 (void*)0x12345602); break;
+                  case 1: helper = mkIRCallee(1, "ac_helperc_LOAD1",
+                                                 (void*)0x12345603); break;
+                  default: helper = mkIRCallee(0, "ac_helperc_LOADN",
+                                                  (void*)0x12345604);
+                                                  needSz = True; break;
+               }
+               if (needSz) {
+                  addStmtToIRSB( 
+                     bb,
+                     IRStmt_Dirty(
+                        unsafeIRDirty_0_N( helper->regparms, 
+					   helper->name, helper->addr,
+                                           mkIRExprVec_2(addr, mkIRExpr_HWord(sz)))
+                  ));
+               } else {
+                  addStmtToIRSB( 
+                     bb,
+                     IRStmt_Dirty(
+                        unsafeIRDirty_0_N( helper->regparms, 
+					   helper->name, helper->addr, 
+                                           mkIRExprVec_1(addr) )
+                  ));
+               }
+            }
+            break;
+
+         case Ist_STle:
+            data = st->Ist.STle.data;
+            addr = st->Ist.STle.addr;
+            assert(isIRAtom(data));
+            assert(isIRAtom(addr));
+            sz = sizeofIRType(typeOfIRExpr(bb_in->tyenv, data));
+            needSz = False;
+            switch (sz) {
+               case 4: helper = mkIRCallee(1, "ac_helperc_STORE4", 
+                                              (void*)0x12345605); break;
+               case 2: helper = mkIRCallee(0, "ac_helperc_STORE2", 
+                                              (void*)0x12345606); break;
+               case 1: helper = mkIRCallee(1, "ac_helperc_STORE1", 
+                                              (void*)0x12345607); break;
+               default: helper = mkIRCallee(0, "ac_helperc_STOREN", 
+                                               (void*)0x12345608);
+                                               needSz = True; break;
+            }
+            if (needSz) {
+               addStmtToIRSB( 
+                  bb,
+                  IRStmt_Dirty(
+                     unsafeIRDirty_0_N( helper->regparms, 
+    				        helper->name, helper->addr, 
+                                        mkIRExprVec_2(addr, mkIRExpr_HWord(sz)))
+               ));
+            } else {
+               addStmtToIRSB( 
+                  bb,
+                  IRStmt_Dirty(
+                     unsafeIRDirty_0_N( helper->regparms,
+                                        helper->name, helper->addr, 
+                                        mkIRExprVec_1(addr) )
+               ));
+            }
+            break;
+
+         case Ist_Put:
+            assert(isIRAtom(st->Ist.Put.data));
+            break;
+
+         case Ist_PutI:
+            assert(isIRAtom(st->Ist.PutI.ix));
+            assert(isIRAtom(st->Ist.PutI.data));
+            break;
+
+         case Ist_Exit:
+            assert(isIRAtom(st->Ist.Exit.guard));
+            break;
+
+         case Ist_Dirty:
+            /* If the call doesn't interact with memory, we ain't
+               interested. */
+            if (st->Ist.Dirty.details->mFx == Ifx_None)
+               break;
+            goto unhandled;
+
+         default:
+         unhandled:
+            printf("\n");
+            ppIRStmt(st);
+            printf("\n");
+            panic("addrcheck: unhandled IRStmt");
+      }
+
+      addStmtToIRSB( bb, dopyIRStmt(st));
+   }
+
+   return bb;
+}
+#endif /* UNUSED */
+
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////
+
+#if 1 /* UNUSED */
+
+static
+__attribute((noreturn))
+void panic ( HChar* s )
+{
+  printf("\npanic: %s\n", s);
+  failure_exit();
+}
+
+#define tl_assert(xxx) assert(xxx)
+#define VG_(xxxx) xxxx
+#define tool_panic(zzz) panic(zzz)
+#define MC_(zzzz) MC_##zzzz
+#define TL_(zzzz) SK_##zzzz
+
+
+static void MC_helperc_complain_undef ( void );
+static void MC_helperc_LOADV8 ( void );
+static void MC_helperc_LOADV4 ( void );
+static void MC_helperc_LOADV2 ( void );
+static void MC_helperc_LOADV1 ( void );
+static void MC_helperc_STOREV8( void );
+static void MC_helperc_STOREV4( void );
+static void MC_helperc_STOREV2( void );
+static void MC_helperc_STOREV1( void );
+static void MC_helperc_value_check0_fail( void );
+static void MC_helperc_value_check1_fail( void );
+static void MC_helperc_value_check4_fail( void );
+
+static void MC_helperc_complain_undef ( void ) { }
+static void MC_helperc_LOADV8 ( void ) { }
+static void MC_helperc_LOADV4 ( void ) { }
+static void MC_helperc_LOADV2 ( void ) { }
+static void MC_helperc_LOADV1 ( void ) { }
+static void MC_helperc_STOREV8( void ) { }
+static void MC_helperc_STOREV4( void ) { }
+static void MC_helperc_STOREV2( void ) { }
+static void MC_helperc_STOREV1( void ) { }
+static void MC_helperc_value_check0_fail( void ) { }
+static void MC_helperc_value_check1_fail( void ) { }
+static void MC_helperc_value_check4_fail( void ) { }
+
+
+/*--------------------------------------------------------------------*/
+/*--- Instrument IR to perform memory checking operations.         ---*/
+/*---                                               mc_translate.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of MemCheck, a heavyweight Valgrind tool for
+   detecting memory errors.
+
+   Copyright (C) 2000-2013 Julian Seward 
+      jseward@acm.org
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+//#include "mc_include.h"
+
+
+/*------------------------------------------------------------*/
+/*--- Forward decls                                        ---*/
+/*------------------------------------------------------------*/
+
+struct _MCEnv;
+
+static IRType  shadowType ( IRType ty );
+static IRExpr* expr2vbits ( struct _MCEnv* mce, IRExpr* e );
+
+
+/*------------------------------------------------------------*/
+/*--- Memcheck running state, and tmp management.          ---*/
+/*------------------------------------------------------------*/
+
+/* Carries around state during memcheck instrumentation. */
+typedef
+   struct _MCEnv {
+      /* MODIFIED: the bb being constructed.  IRStmts are added. */
+      IRSB* bb;
+
+      /* MODIFIED: a table [0 .. #temps_in_original_bb-1] which maps
+         original temps to their current their current shadow temp.
+         Initially all entries are IRTemp_INVALID.  Entries are added
+         lazily since many original temps are not used due to
+         optimisation prior to instrumentation.  Note that floating
+         point original tmps are shadowed by integer tmps of the same
+         size, and Bit-typed original tmps are shadowed by the type
+         Ity_I8.  See comment below. */
+      IRTemp* tmpMap;
+      Int     n_originalTmps; /* for range checking */
+
+      /* READONLY: the guest layout.  This indicates which parts of
+         the guest state should be regarded as 'always defined'. */
+      VexGuestLayout* layout;
+      /* READONLY: the host word type.  Needed for constructing
+         arguments of type 'HWord' to be passed to helper functions.
+         Ity_I32 or Ity_I64 only. */
+      IRType hWordTy;
+   }
+   MCEnv;
+
+/* SHADOW TMP MANAGEMENT.  Shadow tmps are allocated lazily (on
+   demand), as they are encountered.  This is for two reasons.
+
+   (1) (less important reason): Many original tmps are unused due to
+   initial IR optimisation, and we do not want to spaces in tables
+   tracking them.
+
+   Shadow IRTemps are therefore allocated on demand.  mce.tmpMap is a
+   table indexed [0 .. n_types-1], which gives the current shadow for
+   each original tmp, or INVALID_IRTEMP if none is so far assigned.
+   It is necessary to support making multiple assignments to a shadow
+   -- specifically, after testing a shadow for definedness, it needs
+   to be made defined.  But IR's SSA property disallows this.  
+
+   (2) (more important reason): Therefore, when a shadow needs to get
+   a new value, a new temporary is created, the value is assigned to
+   that, and the tmpMap is updated to reflect the new binding.
+
+   A corollary is that if the tmpMap maps a given tmp to
+   INVALID_IRTEMP and we are hoping to read that shadow tmp, it means
+   there's a read-before-write error in the original tmps.  The IR
+   sanity checker should catch all such anomalies, however.  
+*/
+
+/* Find the tmp currently shadowing the given original tmp.  If none
+   so far exists, allocate one.  */
+static IRTemp findShadowTmp ( MCEnv* mce, IRTemp orig )
+{
+   tl_assert(orig < mce->n_originalTmps);
+   if (mce->tmpMap[orig] == IRTemp_INVALID) {
+      mce->tmpMap[orig] 
+         = newIRTemp(mce->bb->tyenv, 
+                     shadowType(mce->bb->tyenv->types[orig]));
+   }
+   return mce->tmpMap[orig];
+}
+
+/* Allocate a new shadow for the given original tmp.  This means any
+   previous shadow is abandoned.  This is needed because it is
+   necessary to give a new value to a shadow once it has been tested
+   for undefinedness, but unfortunately IR's SSA property disallows
+   this.  Instead we must abandon the old shadow, allocate a new one
+   and use that instead. */
+static void newShadowTmp ( MCEnv* mce, IRTemp orig )
+{
+   tl_assert(orig < mce->n_originalTmps);
+   mce->tmpMap[orig] 
+      = newIRTemp(mce->bb->tyenv, 
+                  shadowType(mce->bb->tyenv->types[orig]));
+}
+
+
+/*------------------------------------------------------------*/
+/*--- IRAtoms -- a subset of IRExprs                       ---*/
+/*------------------------------------------------------------*/
+
+/* An atom is either an IRExpr_Const or an IRExpr_Tmp, as defined by
+   isIRAtom() in libvex_ir.h.  Because this instrumenter expects flat
+   input, most of this code deals in atoms.  Usefully, a value atom
+   always has a V-value which is also an atom: constants are shadowed
+   by constants, and temps are shadowed by the corresponding shadow
+   temporary. */
+
+typedef  IRExpr  IRAtom;
+
+/* (used for sanity checks only): is this an atom which looks
+   like it's from original code? */
+static Bool isOriginalAtom ( MCEnv* mce, IRAtom* a1 )
+{
+   if (a1->tag == Iex_Const)
+      return True;
+   if (a1->tag == Iex_RdTmp && a1->Iex.RdTmp.tmp < mce->n_originalTmps)
+      return True;
+   return False;
+}
+
+/* (used for sanity checks only): is this an atom which looks
+   like it's from shadow code? */
+static Bool isShadowAtom ( MCEnv* mce, IRAtom* a1 )
+{
+   if (a1->tag == Iex_Const)
+      return True;
+   if (a1->tag == Iex_RdTmp && a1->Iex.RdTmp.tmp >= mce->n_originalTmps)
+      return True;
+   return False;
+}
+
+/* (used for sanity checks only): check that both args are atoms and
+   are identically-kinded. */
+static Bool sameKindedAtoms ( IRAtom* a1, IRAtom* a2 )
+{
+   if (a1->tag == Iex_RdTmp && a1->tag == Iex_RdTmp)
+      return True;
+   if (a1->tag == Iex_Const && a1->tag == Iex_Const)
+      return True;
+   return False;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Type management                                      ---*/
+/*------------------------------------------------------------*/
+
+/* Shadow state is always accessed using integer types.  This returns
+   an integer type with the same size (as per sizeofIRType) as the
+   given type.  The only valid shadow types are Bit, I8, I16, I32,
+   I64, V128. */
+
+static IRType shadowType ( IRType ty )
+{
+   switch (ty) {
+      case Ity_I1:
+      case Ity_I8:
+      case Ity_I16:
+      case Ity_I32: 
+      case Ity_I64:  return ty;
+      case Ity_F32:  return Ity_I32;
+      case Ity_F64:  return Ity_I64;
+      case Ity_V128: return Ity_V128;
+      default: ppIRType(ty); 
+               VG_(tool_panic)("memcheck:shadowType");
+   }
+}
+
+/* Produce a 'defined' value of the given shadow type.  Should only be
+   supplied shadow types (Bit/I8/I16/I32/UI64). */
+static IRExpr* definedOfType ( IRType ty ) {
+   switch (ty) {
+      case Ity_I1:   return IRExpr_Const(IRConst_U1(False));
+      case Ity_I8:   return IRExpr_Const(IRConst_U8(0));
+      case Ity_I16:  return IRExpr_Const(IRConst_U16(0));
+      case Ity_I32:  return IRExpr_Const(IRConst_U32(0));
+      case Ity_I64:  return IRExpr_Const(IRConst_U64(0));
+      case Ity_V128: return IRExpr_Const(IRConst_V128(0x0000));
+      default:      VG_(tool_panic)("memcheck:definedOfType");
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Constructing IR fragments                            ---*/
+/*------------------------------------------------------------*/
+
+/* assign value to tmp */
+#define assign(_bb,_tmp,_expr)   \
+   addStmtToIRSB((_bb), IRStmt_WrTmp((_tmp),(_expr)))
+
+/* add stmt to a bb */
+#define stmt(_bb,_stmt)    \
+   addStmtToIRSB((_bb), (_stmt))
+
+/* build various kinds of expressions */
+#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
+#define unop(_op, _arg)          IRExpr_Unop((_op),(_arg))
+#define mkU8(_n)                 IRExpr_Const(IRConst_U8(_n))
+#define mkU16(_n)                IRExpr_Const(IRConst_U16(_n))
+#define mkU32(_n)                IRExpr_Const(IRConst_U32(_n))
+#define mkU64(_n)                IRExpr_Const(IRConst_U64(_n))
+#define mkV128(_n)               IRExpr_Const(IRConst_V128(_n))
+#define mkexpr(_tmp)             IRExpr_RdTmp((_tmp))
+
+/* bind the given expression to a new temporary, and return the
+   temporary.  This effectively converts an arbitrary expression into
+   an atom. */
+static IRAtom* assignNew ( MCEnv* mce, IRType ty, IRExpr* e ) {
+   IRTemp t = newIRTemp(mce->bb->tyenv, ty);
+   assign(mce->bb, t, e);
+   return mkexpr(t);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Constructing definedness primitive ops               ---*/
+/*------------------------------------------------------------*/
+
+/* --------- Defined-if-either-defined --------- */
+
+static IRAtom* mkDifD8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I8, binop(Iop_And8, a1, a2));
+}
+
+static IRAtom* mkDifD16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I16, binop(Iop_And16, a1, a2));
+}
+
+static IRAtom* mkDifD32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I32, binop(Iop_And32, a1, a2));
+}
+
+static IRAtom* mkDifD64 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I64, binop(Iop_And64, a1, a2));
+}
+
+static IRAtom* mkDifDV128 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_V128, binop(Iop_AndV128, a1, a2));
+}
+
+/* --------- Undefined-if-either-undefined --------- */
+
+static IRAtom* mkUifU8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I8, binop(Iop_Or8, a1, a2));
+}
+
+static IRAtom* mkUifU16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I16, binop(Iop_Or16, a1, a2));
+}
+
+static IRAtom* mkUifU32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I32, binop(Iop_Or32, a1, a2));
+}
+
+static IRAtom* mkUifU64 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_I64, binop(Iop_Or64, a1, a2));
+}
+
+static IRAtom* mkUifUV128 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   tl_assert(isShadowAtom(mce,a2));
+   return assignNew(mce, Ity_V128, binop(Iop_OrV128, a1, a2));
+}
+
+static IRAtom* mkUifU ( MCEnv* mce, IRType vty, IRAtom* a1, IRAtom* a2 ) {
+   switch (vty) {
+      case Ity_I8:   return mkUifU8(mce, a1, a2);
+      case Ity_I16:  return mkUifU16(mce, a1, a2);
+      case Ity_I32:  return mkUifU32(mce, a1, a2);
+      case Ity_I64:  return mkUifU64(mce, a1, a2);
+      case Ity_V128: return mkUifUV128(mce, a1, a2);
+      default:
+         VG_(printf)("\n"); ppIRType(vty); VG_(printf)("\n");
+         VG_(tool_panic)("memcheck:mkUifU");
+   }
+}
+
+/* --------- The Left-family of operations. --------- */
+
+static IRAtom* mkLeft8 ( MCEnv* mce, IRAtom* a1 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   /* It's safe to duplicate a1 since it's only an atom */
+   return assignNew(mce, Ity_I8, 
+                    binop(Iop_Or8, a1, 
+                          assignNew(mce, Ity_I8,
+                                    /* unop(Iop_Neg8, a1)))); */
+                                    binop(Iop_Sub8, mkU8(0), a1) )));
+}
+
+static IRAtom* mkLeft16 ( MCEnv* mce, IRAtom* a1 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   /* It's safe to duplicate a1 since it's only an atom */
+   return assignNew(mce, Ity_I16, 
+                    binop(Iop_Or16, a1, 
+                          assignNew(mce, Ity_I16,
+                                    /* unop(Iop_Neg16, a1)))); */
+                                    binop(Iop_Sub16, mkU16(0), a1) )));
+}
+
+static IRAtom* mkLeft32 ( MCEnv* mce, IRAtom* a1 ) {
+   tl_assert(isShadowAtom(mce,a1));
+   /* It's safe to duplicate a1 since it's only an atom */
+   return assignNew(mce, Ity_I32, 
+                    binop(Iop_Or32, a1, 
+                          assignNew(mce, Ity_I32,
+                                    /* unop(Iop_Neg32, a1)))); */
+                                    binop(Iop_Sub32, mkU32(0), a1) )));
+}
+
+/* --------- 'Improvement' functions for AND/OR. --------- */
+
+/* ImproveAND(data, vbits) = data OR vbits.  Defined (0) data 0s give
+   defined (0); all other -> undefined (1).
+*/
+static IRAtom* mkImproveAND8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(mce, Ity_I8, binop(Iop_Or8, data, vbits));
+}
+
+static IRAtom* mkImproveAND16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(mce, Ity_I16, binop(Iop_Or16, data, vbits));
+}
+
+static IRAtom* mkImproveAND32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(mce, Ity_I32, binop(Iop_Or32, data, vbits));
+}
+
+static IRAtom* mkImproveAND64 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(mce, Ity_I64, binop(Iop_Or64, data, vbits));
+}
+
+static IRAtom* mkImproveANDV128 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(mce, Ity_V128, binop(Iop_OrV128, data, vbits));
+}
+
+/* ImproveOR(data, vbits) = ~data OR vbits.  Defined (0) data 1s give
+   defined (0); all other -> undefined (1).
+*/
+static IRAtom* mkImproveOR8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(
+             mce, Ity_I8, 
+             binop(Iop_Or8, 
+                   assignNew(mce, Ity_I8, unop(Iop_Not8, data)), 
+                   vbits) );
+}
+
+static IRAtom* mkImproveOR16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(
+             mce, Ity_I16, 
+             binop(Iop_Or16, 
+                   assignNew(mce, Ity_I16, unop(Iop_Not16, data)), 
+                   vbits) );
+}
+
+static IRAtom* mkImproveOR32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(
+             mce, Ity_I32, 
+             binop(Iop_Or32, 
+                   assignNew(mce, Ity_I32, unop(Iop_Not32, data)), 
+                   vbits) );
+}
+
+static IRAtom* mkImproveOR64 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(
+             mce, Ity_I64, 
+             binop(Iop_Or64, 
+                   assignNew(mce, Ity_I64, unop(Iop_Not64, data)), 
+                   vbits) );
+}
+
+static IRAtom* mkImproveORV128 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+{
+   tl_assert(isOriginalAtom(mce, data));
+   tl_assert(isShadowAtom(mce, vbits));
+   tl_assert(sameKindedAtoms(data, vbits));
+   return assignNew(
+             mce, Ity_V128, 
+             binop(Iop_OrV128, 
+                   assignNew(mce, Ity_V128, unop(Iop_NotV128, data)), 
+                   vbits) );
+}
+
+/* --------- Pessimising casts. --------- */
+
+static IRAtom* mkPCastTo( MCEnv* mce, IRType dst_ty, IRAtom* vbits ) 
+{
+   IRType  ty;
+   IRAtom* tmp1;
+   /* Note, dst_ty is a shadow type, not an original type. */
+   /* First of all, collapse vbits down to a single bit. */
+   tl_assert(isShadowAtom(mce,vbits));
+   ty   = typeOfIRExpr(mce->bb->tyenv, vbits);
+   tmp1 = NULL;
+   switch (ty) {
+      case Ity_I1:
+         tmp1 = vbits;
+         break;
+      case Ity_I8: 
+         tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE8, vbits, mkU8(0)));
+         break;
+      case Ity_I16: 
+         tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE16, vbits, mkU16(0)));
+         break;
+      case Ity_I32: 
+         tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE32, vbits, mkU32(0)));
+         break;
+      case Ity_I64: 
+         tmp1 = assignNew(mce, Ity_I1, binop(Iop_CmpNE64, vbits, mkU64(0)));
+         break;
+      default:
+         VG_(tool_panic)("mkPCastTo(1)");
+   }
+   tl_assert(tmp1);
+   /* Now widen up to the dst type. */
+   switch (dst_ty) {
+      case Ity_I1:
+         return tmp1;
+      case Ity_I8: 
+         return assignNew(mce, Ity_I8, unop(Iop_1Sto8, tmp1));
+      case Ity_I16: 
+         return assignNew(mce, Ity_I16, unop(Iop_1Sto16, tmp1));
+      case Ity_I32: 
+         return assignNew(mce, Ity_I32, unop(Iop_1Sto32, tmp1));
+      case Ity_I64: 
+         return assignNew(mce, Ity_I64, unop(Iop_1Sto64, tmp1));
+      case Ity_V128:
+         tmp1 = assignNew(mce, Ity_I64,  unop(Iop_1Sto64, tmp1));
+         tmp1 = assignNew(mce, Ity_V128, binop(Iop_64HLtoV128, tmp1, tmp1));
+         return tmp1;
+      default: 
+         ppIRType(dst_ty);
+         VG_(tool_panic)("mkPCastTo(2)");
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Emit a test and complaint if something is undefined. ---*/
+/*------------------------------------------------------------*/
+
+/* Set the annotations on a dirty helper to indicate that the stack
+   pointer and instruction pointers might be read.  This is the
+   behaviour of all 'emit-a-complaint' style functions we might
+   call. */
+
+static void setHelperAnns ( MCEnv* mce, IRDirty* di ) {
+   di->nFxState = 2;
+   di->fxState[0].fx     = Ifx_Read;
+   di->fxState[0].offset = mce->layout->offset_SP;
+   di->fxState[0].size   = mce->layout->sizeof_SP;
+   di->fxState[1].fx     = Ifx_Read;
+   di->fxState[1].offset = mce->layout->offset_IP;
+   di->fxState[1].size   = mce->layout->sizeof_IP;
+}
+
+
+/* Check the supplied **original** atom for undefinedness, and emit a
+   complaint if so.  Once that happens, mark it as defined.  This is
+   possible because the atom is either a tmp or literal.  If it's a
+   tmp, it will be shadowed by a tmp, and so we can set the shadow to
+   be defined.  In fact as mentioned above, we will have to allocate a
+   new tmp to carry the new 'defined' shadow value, and update the
+   original->tmp mapping accordingly; we cannot simply assign a new
+   value to an existing shadow tmp as this breaks SSAness -- resulting
+   in the post-instrumentation sanity checker spluttering in disapproval. 
+*/
+static void complainIfUndefined ( MCEnv* mce, IRAtom* atom )
+{
+   IRAtom*  vatom;
+   IRType   ty;
+   Int      sz;
+   IRDirty* di;
+   IRAtom*  cond;
+
+   /* Since the original expression is atomic, there's no duplicated
+      work generated by making multiple V-expressions for it.  So we
+      don't really care about the possibility that someone else may
+      also create a V-interpretion for it. */
+   tl_assert(isOriginalAtom(mce, atom));
+   vatom = expr2vbits( mce, atom );
+   tl_assert(isShadowAtom(mce, vatom));
+   tl_assert(sameKindedAtoms(atom, vatom));
+
+   ty = typeOfIRExpr(mce->bb->tyenv, vatom);
+
+   /* sz is only used for constructing the error message */
+   sz = ty==Ity_I1 ? 0 : sizeofIRType(ty);
+
+   cond = mkPCastTo( mce, Ity_I1, vatom );
+   /* cond will be 0 if all defined, and 1 if any not defined. */
+
+   switch (sz) {
+      case 0:
+         di = unsafeIRDirty_0_N( 0/*regparms*/, 
+                                 "MC_(helperc_value_check0_fail)",
+                                 &MC_(helperc_value_check0_fail),
+                                 mkIRExprVec_0() 
+                               );
+         break;
+      case 1:
+         di = unsafeIRDirty_0_N( 0/*regparms*/, 
+                                 "MC_(helperc_value_check1_fail)",
+                                 &MC_(helperc_value_check1_fail),
+                                 mkIRExprVec_0() 
+                               );
+         break;
+      case 4:
+         di = unsafeIRDirty_0_N( 0/*regparms*/, 
+                                 "MC_(helperc_value_check4_fail)",
+                                 &MC_(helperc_value_check4_fail),
+                                 mkIRExprVec_0() 
+                               );
+         break;
+      default:
+         di = unsafeIRDirty_0_N( 1/*regparms*/, 
+                                 "MC_(helperc_complain_undef)",
+                                 &MC_(helperc_complain_undef),
+                                 mkIRExprVec_1( mkIRExpr_HWord( sz ))
+                               );
+         break;
+   }
+   di->guard = cond;
+   setHelperAnns( mce, di );
+   stmt( mce->bb, IRStmt_Dirty(di));
+
+   /* Set the shadow tmp to be defined.  First, update the
+      orig->shadow tmp mapping to reflect the fact that this shadow is
+      getting a new value. */
+   tl_assert(isIRAtom(vatom));
+   /* sameKindedAtoms ... */
+   if (vatom->tag == Iex_RdTmp) {
+      tl_assert(atom->tag == Iex_RdTmp);
+      newShadowTmp(mce, atom->Iex.RdTmp.tmp);
+      assign(mce->bb, findShadowTmp(mce, atom->Iex.RdTmp.tmp), 
+                      definedOfType(ty));
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Shadowing PUTs/GETs, and indexed variants thereof    ---*/
+/*------------------------------------------------------------*/
+
+/* Examine the always-defined sections declared in layout to see if
+   the (offset,size) section is within one.  Note, is is an error to
+   partially fall into such a region: (offset,size) should either be
+   completely in such a region or completely not-in such a region.  
+*/
+static Bool isAlwaysDefd ( MCEnv* mce, Int offset, Int size )
+{
+   Int minoffD, maxoffD, i;
+   Int minoff = offset;
+   Int maxoff = minoff + size - 1;
+   tl_assert((minoff & ~0xFFFF) == 0);
+   tl_assert((maxoff & ~0xFFFF) == 0);
+
+   for (i = 0; i < mce->layout->n_alwaysDefd; i++) {
+      minoffD = mce->layout->alwaysDefd[i].offset;
+      maxoffD = minoffD + mce->layout->alwaysDefd[i].size - 1;
+      tl_assert((minoffD & ~0xFFFF) == 0);
+      tl_assert((maxoffD & ~0xFFFF) == 0);
+
+      if (maxoff < minoffD || maxoffD < minoff)
+         continue; /* no overlap */
+      if (minoff >= minoffD && maxoff <= maxoffD)
+         return True; /* completely contained in an always-defd section */
+
+      VG_(tool_panic)("memcheck:isAlwaysDefd:partial overlap");
+   }
+   return False; /* could not find any containing section */
+}
+
+
+/* Generate into bb suitable actions to shadow this Put.  If the state
+   slice is marked 'always defined', do nothing.  Otherwise, write the
+   supplied V bits to the shadow state.  We can pass in either an
+   original atom or a V-atom, but not both.  In the former case the
+   relevant V-bits are then generated from the original.
+*/
+static
+void do_shadow_PUT ( MCEnv* mce,  Int offset, 
+                     IRAtom* atom, IRAtom* vatom )
+{
+   IRType ty;
+   if (atom) {
+      tl_assert(!vatom);
+      tl_assert(isOriginalAtom(mce, atom));
+      vatom = expr2vbits( mce, atom );
+   } else {
+      tl_assert(vatom);
+      tl_assert(isShadowAtom(mce, vatom));
+   }
+
+   ty = typeOfIRExpr(mce->bb->tyenv, vatom);
+   tl_assert(ty != Ity_I1);
+   if (isAlwaysDefd(mce, offset, sizeofIRType(ty))) {
+      /* later: no ... */
+      /* emit code to emit a complaint if any of the vbits are 1. */
+      /* complainIfUndefined(mce, atom); */
+   } else {
+      /* Do a plain shadow Put. */
+      stmt( mce->bb, IRStmt_Put( offset + mce->layout->total_sizeB, vatom ) );
+   }
+}
+
+
+/* Return an expression which contains the V bits corresponding to the
+   given GETI (passed in in pieces). 
+*/
+static
+void do_shadow_PUTI ( MCEnv* mce, 
+                      IRRegArray* descr, IRAtom* ix, Int bias, IRAtom* atom )
+{
+   IRAtom* vatom;
+   IRType  ty, tyS;
+   Int     arrSize;;
+
+   tl_assert(isOriginalAtom(mce,atom));
+   vatom = expr2vbits( mce, atom );
+   tl_assert(sameKindedAtoms(atom, vatom));
+   ty   = descr->elemTy;
+   tyS  = shadowType(ty);
+   arrSize = descr->nElems * sizeofIRType(ty);
+   tl_assert(ty != Ity_I1);
+   tl_assert(isOriginalAtom(mce,ix));
+   complainIfUndefined(mce,ix);
+   if (isAlwaysDefd(mce, descr->base, arrSize)) {
+      /* later: no ... */
+      /* emit code to emit a complaint if any of the vbits are 1. */
+      /* complainIfUndefined(mce, atom); */
+   } else {
+      /* Do a cloned version of the Put that refers to the shadow
+         area. */
+      IRRegArray* new_descr 
+         = mkIRRegArray( descr->base + mce->layout->total_sizeB, 
+                      tyS, descr->nElems);
+      stmt( mce->bb, IRStmt_PutI( mkIRPutI( new_descr, ix, bias, vatom ) ));
+   }
+}
+
+
+/* Return an expression which contains the V bits corresponding to the
+   given GET (passed in in pieces). 
+*/
+static 
+IRExpr* shadow_GET ( MCEnv* mce, Int offset, IRType ty )
+{
+   IRType tyS = shadowType(ty);
+   tl_assert(ty != Ity_I1);
+   if (isAlwaysDefd(mce, offset, sizeofIRType(ty))) {
+      /* Always defined, return all zeroes of the relevant type */
+      return definedOfType(tyS);
+   } else {
+      /* return a cloned version of the Get that refers to the shadow
+         area. */
+      return IRExpr_Get( offset + mce->layout->total_sizeB, tyS );
+   }
+}
+
+
+/* Return an expression which contains the V bits corresponding to the
+   given GETI (passed in in pieces). 
+*/
+static
+IRExpr* shadow_GETI ( MCEnv* mce, IRRegArray* descr, IRAtom* ix, Int bias )
+{
+   IRType ty   = descr->elemTy;
+   IRType tyS  = shadowType(ty);
+   Int arrSize = descr->nElems * sizeofIRType(ty);
+   tl_assert(ty != Ity_I1);
+   tl_assert(isOriginalAtom(mce,ix));
+   complainIfUndefined(mce,ix);
+   if (isAlwaysDefd(mce, descr->base, arrSize)) {
+      /* Always defined, return all zeroes of the relevant type */
+      return definedOfType(tyS);
+   } else {
+      /* return a cloned version of the Get that refers to the shadow
+         area. */
+      IRRegArray* new_descr 
+         = mkIRRegArray( descr->base + mce->layout->total_sizeB, 
+                      tyS, descr->nElems);
+      return IRExpr_GetI( new_descr, ix, bias );
+   }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Generating approximations for unknown operations,    ---*/
+/*--- using lazy-propagate semantics                       ---*/
+/*------------------------------------------------------------*/
+
+/* Lazy propagation of undefinedness from two values, resulting in the
+   specified shadow type. 
+*/
+static
+IRAtom* mkLazy2 ( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2 )
+{
+   /* force everything via 32-bit intermediaries. */
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce,va1));
+   tl_assert(isShadowAtom(mce,va2));
+   at = mkPCastTo(mce, Ity_I32, va1);
+   at = mkUifU(mce, Ity_I32, at, mkPCastTo(mce, Ity_I32, va2));
+   at = mkPCastTo(mce, finalVty, at);
+   return at;
+}
+
+
+/* Do the lazy propagation game from a null-terminated vector of
+   atoms.  This is presumably the arguments to a helper call, so the
+   IRCallee info is also supplied in order that we can know which
+   arguments should be ignored (via the .mcx_mask field). 
+*/
+static
+IRAtom* mkLazyN ( MCEnv* mce, 
+                  IRAtom** exprvec, IRType finalVtype, IRCallee* cee )
+{
+   Int i;
+   IRAtom* here;
+   IRAtom* curr = definedOfType(Ity_I32);
+   for (i = 0; exprvec[i]; i++) {
+      tl_assert(i < 32);
+      tl_assert(isOriginalAtom(mce, exprvec[i]));
+      /* Only take notice of this arg if the callee's mc-exclusion
+         mask does not say it is to be excluded. */
+      if (cee->mcx_mask & (1<<i)) {
+         /* the arg is to be excluded from definedness checking.  Do
+            nothing. */
+         if (0) VG_(printf)("excluding %s(%d)\n", cee->name, i);
+      } else {
+         /* calculate the arg's definedness, and pessimistically merge
+            it in. */
+         here = mkPCastTo( mce, Ity_I32, expr2vbits(mce, exprvec[i]) );
+         curr = mkUifU32(mce, here, curr);
+      }
+   }
+   return mkPCastTo(mce, finalVtype, curr );
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Generating expensive sequences for exact carry-chain ---*/
+/*--- propagation in add/sub and related operations.       ---*/
+/*------------------------------------------------------------*/
+
+static
+__attribute__((unused))
+IRAtom* expensiveAdd32 ( MCEnv* mce, IRAtom* qaa, IRAtom* qbb, 
+                                     IRAtom* aa,  IRAtom* bb )
+{
+   IRAtom *a_min, *b_min, *a_max, *b_max;
+   IRType ty;
+   IROp   opAND, opOR, opXOR, opNOT, opADD;
+
+   tl_assert(isShadowAtom(mce,qaa));
+   tl_assert(isShadowAtom(mce,qbb));
+   tl_assert(isOriginalAtom(mce,aa));
+   tl_assert(isOriginalAtom(mce,bb));
+   tl_assert(sameKindedAtoms(qaa,aa));
+   tl_assert(sameKindedAtoms(qbb,bb));
+
+   ty    = Ity_I32;
+   opAND = Iop_And32;
+   opOR  = Iop_Or32;
+   opXOR = Iop_Xor32;
+   opNOT = Iop_Not32;
+   opADD = Iop_Add32;
+
+   // a_min = aa & ~qaa
+   a_min = assignNew(mce,ty, 
+                     binop(opAND, aa,
+                                  assignNew(mce,ty, unop(opNOT, qaa))));
+
+   // b_min = bb & ~qbb
+   b_min = assignNew(mce,ty, 
+                     binop(opAND, bb,
+                                  assignNew(mce,ty, unop(opNOT, qbb))));
+
+   // a_max = aa | qaa
+   a_max = assignNew(mce,ty, binop(opOR, aa, qaa));
+
+   // b_max = bb | qbb
+   b_max = assignNew(mce,ty, binop(opOR, bb, qbb));
+
+   // result = (qaa | qbb) | ((a_min + b_min) ^ (a_max + b_max))
+   return
+   assignNew(mce,ty,
+      binop( opOR,
+             assignNew(mce,ty, binop(opOR, qaa, qbb)),
+             assignNew(mce,ty, 
+                binop(opXOR, assignNew(mce,ty, binop(opADD, a_min, b_min)),
+                             assignNew(mce,ty, binop(opADD, a_max, b_max))
+                )
+             )
+      )
+   );
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helpers for dealing with vector primops.            ---*/
+/*------------------------------------------------------------*/
+
+/* Vector pessimisation -- pessimise within each lane individually. */
+
+static IRAtom* mkPCast8x16 ( MCEnv* mce, IRAtom* at )
+{
+   return assignNew(mce, Ity_V128, unop(Iop_CmpNEZ8x16, at));
+}
+
+static IRAtom* mkPCast16x8 ( MCEnv* mce, IRAtom* at )
+{
+   return assignNew(mce, Ity_V128, unop(Iop_CmpNEZ16x8, at));
+}
+
+static IRAtom* mkPCast32x4 ( MCEnv* mce, IRAtom* at )
+{
+   return assignNew(mce, Ity_V128, unop(Iop_CmpNEZ32x4, at));
+}
+
+static IRAtom* mkPCast64x2 ( MCEnv* mce, IRAtom* at )
+{
+   return assignNew(mce, Ity_V128, unop(Iop_CmpNEZ64x2, at));
+}
+
+
+/* Here's a simple scheme capable of handling ops derived from SSE1
+   code and while only generating ops that can be efficiently
+   implemented in SSE1. */
+
+/* All-lanes versions are straightforward:
+
+   binary32Fx4(x,y)   ==> PCast32x4(UifUV128(x#,y#))
+
+   unary32Fx4(x,y)    ==> PCast32x4(x#)
+
+   Lowest-lane-only versions are more complex:
+
+   binary32F0x4(x,y)  ==> SetV128lo32(
+                             x#, 
+                             PCast32(V128to32(UifUV128(x#,y#))) 
+                          )
+
+   This is perhaps not so obvious.  In particular, it's faster to
+   do a V128-bit UifU and then take the bottom 32 bits than the more
+   obvious scheme of taking the bottom 32 bits of each operand
+   and doing a 32-bit UifU.  Basically since UifU is fast and 
+   chopping lanes off vector values is slow.
+
+   Finally:
+
+   unary32F0x4(x)     ==> SetV128lo32(
+                             x#, 
+                             PCast32(V128to32(x#)) 
+                          )
+
+   Where:
+
+   PCast32(v#)   = 1Sto32(CmpNE32(v#,0))
+   PCast32x4(v#) = CmpNEZ32x4(v#)
+*/
+
+static
+IRAtom* binary32Fx4 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   tl_assert(isShadowAtom(mce, vatomY));
+   at = mkUifUV128(mce, vatomX, vatomY);
+   at = assignNew(mce, Ity_V128, mkPCast32x4(mce, at));
+   return at;
+}
+
+static
+IRAtom* unary32Fx4 ( MCEnv* mce, IRAtom* vatomX )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   at = assignNew(mce, Ity_V128, mkPCast32x4(mce, vatomX));
+   return at;
+}
+
+static
+IRAtom* binary32F0x4 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   tl_assert(isShadowAtom(mce, vatomY));
+   at = mkUifUV128(mce, vatomX, vatomY);
+   at = assignNew(mce, Ity_I32, unop(Iop_V128to32, at));
+   at = mkPCastTo(mce, Ity_I32, at);
+   at = assignNew(mce, Ity_V128, binop(Iop_SetV128lo32, vatomX, at));
+   return at;
+}
+
+static
+IRAtom* unary32F0x4 ( MCEnv* mce, IRAtom* vatomX )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   at = assignNew(mce, Ity_I32, unop(Iop_V128to32, vatomX));
+   at = mkPCastTo(mce, Ity_I32, at);
+   at = assignNew(mce, Ity_V128, binop(Iop_SetV128lo32, vatomX, at));
+   return at;
+}
+
+/* --- ... and ... 64Fx2 versions of the same ... --- */
+
+static
+IRAtom* binary64Fx2 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   tl_assert(isShadowAtom(mce, vatomY));
+   at = mkUifUV128(mce, vatomX, vatomY);
+   at = assignNew(mce, Ity_V128, mkPCast64x2(mce, at));
+   return at;
+}
+
+static
+IRAtom* unary64Fx2 ( MCEnv* mce, IRAtom* vatomX )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   at = assignNew(mce, Ity_V128, mkPCast64x2(mce, vatomX));
+   return at;
+}
+
+static
+IRAtom* binary64F0x2 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   tl_assert(isShadowAtom(mce, vatomY));
+   at = mkUifUV128(mce, vatomX, vatomY);
+   at = assignNew(mce, Ity_I64, unop(Iop_V128to64, at));
+   at = mkPCastTo(mce, Ity_I64, at);
+   at = assignNew(mce, Ity_V128, binop(Iop_SetV128lo64, vatomX, at));
+   return at;
+}
+
+static
+IRAtom* unary64F0x2 ( MCEnv* mce, IRAtom* vatomX )
+{
+   IRAtom* at;
+   tl_assert(isShadowAtom(mce, vatomX));
+   at = assignNew(mce, Ity_I64, unop(Iop_V128to64, vatomX));
+   at = mkPCastTo(mce, Ity_I64, at);
+   at = assignNew(mce, Ity_V128, binop(Iop_SetV128lo64, vatomX, at));
+   return at;
+}
+
+/* --- --- Vector saturated narrowing --- --- */
+
+/* This is quite subtle.  What to do is simple:
+
+   Let the original narrowing op be QNarrowW{S,U}xN.  Produce:
+
+      the-narrowing-op( PCastWxN(vatom1), PCastWxN(vatom2))
+
+   Why this is right is not so simple.  Consider a lane in the args,
+   vatom1 or 2, doesn't matter.
+
+   After the PCast, that lane is all 0s (defined) or all
+   1s(undefined).
+
+   Both signed and unsigned saturating narrowing of all 0s produces
+   all 0s, which is what we want.
+
+   The all-1s case is more complex.  Unsigned narrowing interprets an
+   all-1s input as the largest unsigned integer, and so produces all
+   1s as a result since that is the largest unsigned value at the
+   smaller width.
+
+   Signed narrowing interprets all 1s as -1.  Fortunately, -1 narrows
+   to -1, so we still wind up with all 1s at the smaller width.
+
+   So: In short, pessimise the args, then apply the original narrowing
+   op.
+*/
+static
+IRAtom* vectorNarrowV128 ( MCEnv* mce, IROp narrow_op, 
+                          IRAtom* vatom1, IRAtom* vatom2)
+{
+   IRAtom *at1, *at2, *at3;
+   IRAtom* (*pcast)( MCEnv*, IRAtom* );
+   switch (narrow_op) {
+      case Iop_QNarrowBin32Sto16Sx8: pcast = mkPCast32x4; break;
+      case Iop_QNarrowBin16Sto8Sx16: pcast = mkPCast16x8; break;
+      case Iop_QNarrowBin16Sto8Ux16: pcast = mkPCast16x8; break;
+      default: VG_(tool_panic)("vectorNarrowV128");
+   }
+   tl_assert(isShadowAtom(mce,vatom1));
+   tl_assert(isShadowAtom(mce,vatom2));
+   at1 = assignNew(mce, Ity_V128, pcast(mce, vatom1));
+   at2 = assignNew(mce, Ity_V128, pcast(mce, vatom2));
+   at3 = assignNew(mce, Ity_V128, binop(narrow_op, at1, at2));
+   return at3;
+}
+
+
+/* --- --- Vector integer arithmetic --- --- */
+
+/* Simple ... UifU the args and per-lane pessimise the results. */
+static
+IRAtom* binary8Ix16 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+{
+   IRAtom* at;
+   at = mkUifUV128(mce, vatom1, vatom2);
+   at = mkPCast8x16(mce, at);
+   return at;   
+}
+
+static
+IRAtom* binary16Ix8 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+{
+   IRAtom* at;
+   at = mkUifUV128(mce, vatom1, vatom2);
+   at = mkPCast16x8(mce, at);
+   return at;   
+}
+
+static
+IRAtom* binary32Ix4 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+{
+   IRAtom* at;
+   at = mkUifUV128(mce, vatom1, vatom2);
+   at = mkPCast32x4(mce, at);
+   return at;   
+}
+
+static
+IRAtom* binary64Ix2 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+{
+   IRAtom* at;
+   at = mkUifUV128(mce, vatom1, vatom2);
+   at = mkPCast64x2(mce, at);
+   return at;   
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Generate shadow values from all kinds of IRExprs.    ---*/
+/*------------------------------------------------------------*/
+
+static 
+IRAtom* expr2vbits_Binop ( MCEnv* mce,
+                           IROp op,
+                           IRAtom* atom1, IRAtom* atom2 )
+{
+   IRType  and_or_ty;
+   IRAtom* (*uifu)    (MCEnv*, IRAtom*, IRAtom*);
+   IRAtom* (*difd)    (MCEnv*, IRAtom*, IRAtom*);
+   IRAtom* (*improve) (MCEnv*, IRAtom*, IRAtom*);
+
+   IRAtom* vatom1 = expr2vbits( mce, atom1 );
+   IRAtom* vatom2 = expr2vbits( mce, atom2 );
+
+   tl_assert(isOriginalAtom(mce,atom1));
+   tl_assert(isOriginalAtom(mce,atom2));
+   tl_assert(isShadowAtom(mce,vatom1));
+   tl_assert(isShadowAtom(mce,vatom2));
+   tl_assert(sameKindedAtoms(atom1,vatom1));
+   tl_assert(sameKindedAtoms(atom2,vatom2));
+   switch (op) {
+
+      /* V128-bit SIMD (SSE2-esque) */
+
+      case Iop_ShrN16x8:
+      case Iop_ShrN32x4:
+      case Iop_ShrN64x2:
+      case Iop_SarN16x8:
+      case Iop_SarN32x4:
+      case Iop_ShlN16x8:
+      case Iop_ShlN32x4:
+      case Iop_ShlN64x2:
+         /* Same scheme as with all other shifts. */
+         complainIfUndefined(mce, atom2);
+         return assignNew(mce, Ity_V128, binop(op, vatom1, atom2));
+
+      case Iop_QSub8Ux16:
+      case Iop_QSub8Sx16:
+      case Iop_Sub8x16:
+      case Iop_Min8Ux16:
+      case Iop_Max8Ux16:
+      case Iop_CmpGT8Sx16:
+      case Iop_CmpEQ8x16:
+      case Iop_Avg8Ux16:
+      case Iop_QAdd8Ux16:
+      case Iop_QAdd8Sx16:
+      case Iop_Add8x16:
+         return binary8Ix16(mce, vatom1, vatom2);
+
+      case Iop_QSub16Ux8:
+      case Iop_QSub16Sx8:
+      case Iop_Sub16x8:
+      case Iop_Mul16x8:
+      case Iop_MulHi16Sx8:
+      case Iop_MulHi16Ux8:
+      case Iop_Min16Sx8:
+      case Iop_Max16Sx8:
+      case Iop_CmpGT16Sx8:
+      case Iop_CmpEQ16x8:
+      case Iop_Avg16Ux8:
+      case Iop_QAdd16Ux8:
+      case Iop_QAdd16Sx8:
+      case Iop_Add16x8:
+         return binary16Ix8(mce, vatom1, vatom2);
+
+      case Iop_Sub32x4:
+      case Iop_QSub32Sx4:
+      case Iop_QSub32Ux4:
+      case Iop_CmpGT32Sx4:
+      case Iop_CmpEQ32x4:
+      case Iop_Add32x4:
+      case Iop_QAdd32Ux4:
+      case Iop_QAdd32Sx4:
+         return binary32Ix4(mce, vatom1, vatom2);
+
+      case Iop_Sub64x2:
+      case Iop_QSub64Ux2:
+      case Iop_QSub64Sx2:
+      case Iop_Add64x2:
+      case Iop_QAdd64Ux2:
+      case Iop_QAdd64Sx2:
+         return binary64Ix2(mce, vatom1, vatom2);
+
+      case Iop_QNarrowBin32Sto16Sx8:
+      case Iop_QNarrowBin16Sto8Sx16:
+      case Iop_QNarrowBin16Sto8Ux16:
+         return vectorNarrowV128(mce, op, vatom1, vatom2);
+
+      case Iop_Sub64Fx2:
+      case Iop_Mul64Fx2:
+      case Iop_Min64Fx2:
+      case Iop_Max64Fx2:
+      case Iop_Div64Fx2:
+      case Iop_CmpLT64Fx2:
+      case Iop_CmpLE64Fx2:
+      case Iop_CmpEQ64Fx2:
+      case Iop_Add64Fx2:
+         return binary64Fx2(mce, vatom1, vatom2);      
+
+      case Iop_Sub64F0x2:
+      case Iop_Mul64F0x2:
+      case Iop_Min64F0x2:
+      case Iop_Max64F0x2:
+      case Iop_Div64F0x2:
+      case Iop_CmpLT64F0x2:
+      case Iop_CmpLE64F0x2:
+      case Iop_CmpEQ64F0x2:
+      case Iop_Add64F0x2:
+         return binary64F0x2(mce, vatom1, vatom2);      
+
+      /* V128-bit SIMD (SSE1-esque) */
+
+      case Iop_Sub32Fx4:
+      case Iop_Mul32Fx4:
+      case Iop_Min32Fx4:
+      case Iop_Max32Fx4:
+      case Iop_Div32Fx4:
+      case Iop_CmpLT32Fx4:
+      case Iop_CmpLE32Fx4:
+      case Iop_CmpEQ32Fx4:
+      case Iop_Add32Fx4:
+         return binary32Fx4(mce, vatom1, vatom2);      
+
+      case Iop_Sub32F0x4:
+      case Iop_Mul32F0x4:
+      case Iop_Min32F0x4:
+      case Iop_Max32F0x4:
+      case Iop_Div32F0x4:
+      case Iop_CmpLT32F0x4:
+      case Iop_CmpLE32F0x4:
+      case Iop_CmpEQ32F0x4:
+      case Iop_Add32F0x4:
+         return binary32F0x4(mce, vatom1, vatom2);      
+
+      /* V128-bit data-steering */
+      case Iop_SetV128lo32:
+      case Iop_SetV128lo64:
+      case Iop_64HLtoV128:
+      case Iop_InterleaveLO64x2:
+      case Iop_InterleaveLO32x4:
+      case Iop_InterleaveLO16x8:
+      case Iop_InterleaveLO8x16:
+      case Iop_InterleaveHI64x2:
+      case Iop_InterleaveHI32x4:
+      case Iop_InterleaveHI16x8:
+      case Iop_InterleaveHI8x16:
+         return assignNew(mce, Ity_V128, binop(op, vatom1, vatom2));
+
+      /* Scalar floating point */
+
+         //      case Iop_RoundF64:
+      case Iop_F64toI64S:
+      case Iop_I64StoF64:
+         /* First arg is I32 (rounding mode), second is F64 or I64
+            (data). */
+         return mkLazy2(mce, Ity_I64, vatom1, vatom2);
+
+      case Iop_PRemC3210F64: case Iop_PRem1C3210F64:
+         /* Takes two F64 args. */
+      case Iop_F64toI32S:
+      case Iop_F64toF32:
+         /* First arg is I32 (rounding mode), second is F64 (data). */
+         return mkLazy2(mce, Ity_I32, vatom1, vatom2);
+
+      case Iop_F64toI16S:
+         /* First arg is I32 (rounding mode), second is F64 (data). */
+         return mkLazy2(mce, Ity_I16, vatom1, vatom2);
+
+      case Iop_ScaleF64:
+      case Iop_Yl2xF64:
+      case Iop_Yl2xp1F64:
+      case Iop_PRemF64:
+      case Iop_AtanF64:
+      case Iop_AddF64:
+      case Iop_DivF64:
+      case Iop_SubF64:
+      case Iop_MulF64:
+         return mkLazy2(mce, Ity_I64, vatom1, vatom2);
+
+      case Iop_CmpF64:
+         return mkLazy2(mce, Ity_I32, vatom1, vatom2);
+
+      /* non-FP after here */
+
+      case Iop_DivModU64to32:
+      case Iop_DivModS64to32:
+         return mkLazy2(mce, Ity_I64, vatom1, vatom2);
+
+      case Iop_16HLto32:
+         return assignNew(mce, Ity_I32, binop(op, vatom1, vatom2));
+      case Iop_32HLto64:
+         return assignNew(mce, Ity_I64, binop(op, vatom1, vatom2));
+
+      case Iop_MullS32:
+      case Iop_MullU32: {
+         IRAtom* vLo32 = mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
+         IRAtom* vHi32 = mkPCastTo(mce, Ity_I32, vLo32);
+         return assignNew(mce, Ity_I64, binop(Iop_32HLto64, vHi32, vLo32));
+      }
+
+      case Iop_MullS16:
+      case Iop_MullU16: {
+         IRAtom* vLo16 = mkLeft16(mce, mkUifU16(mce, vatom1,vatom2));
+         IRAtom* vHi16 = mkPCastTo(mce, Ity_I16, vLo16);
+         return assignNew(mce, Ity_I32, binop(Iop_16HLto32, vHi16, vLo16));
+      }
+
+      case Iop_MullS8:
+      case Iop_MullU8: {
+         IRAtom* vLo8 = mkLeft8(mce, mkUifU8(mce, vatom1,vatom2));
+         IRAtom* vHi8 = mkPCastTo(mce, Ity_I8, vLo8);
+         return assignNew(mce, Ity_I16, binop(Iop_8HLto16, vHi8, vLo8));
+      }
+
+      case Iop_Add32:
+#        if 0
+         return expensiveAdd32(mce, vatom1,vatom2, atom1,atom2);
+#        endif
+      case Iop_Sub32:
+      case Iop_Mul32:
+         return mkLeft32(mce, mkUifU32(mce, vatom1,vatom2));
+
+      case Iop_Mul16:
+      case Iop_Add16:
+      case Iop_Sub16:
+         return mkLeft16(mce, mkUifU16(mce, vatom1,vatom2));
+
+      case Iop_Sub8:
+      case Iop_Add8:
+         return mkLeft8(mce, mkUifU8(mce, vatom1,vatom2));
+
+      case Iop_CmpLE32S: case Iop_CmpLE32U: 
+      case Iop_CmpLT32U: case Iop_CmpLT32S:
+      case Iop_CmpEQ32: case Iop_CmpNE32:
+         return mkPCastTo(mce, Ity_I1, mkUifU32(mce, vatom1,vatom2));
+
+      case Iop_CmpEQ16: case Iop_CmpNE16:
+         return mkPCastTo(mce, Ity_I1, mkUifU16(mce, vatom1,vatom2));
+
+      case Iop_CmpEQ8: case Iop_CmpNE8:
+         return mkPCastTo(mce, Ity_I1, mkUifU8(mce, vatom1,vatom2));
+
+      case Iop_Shl32: case Iop_Shr32: case Iop_Sar32:
+         /* Complain if the shift amount is undefined.  Then simply
+            shift the first arg's V bits by the real shift amount. */
+         complainIfUndefined(mce, atom2);
+         return assignNew(mce, Ity_I32, binop(op, vatom1, atom2));
+
+      case Iop_Shl16: case Iop_Shr16: case Iop_Sar16:
+         /* Same scheme as with 32-bit shifts. */
+         complainIfUndefined(mce, atom2);
+         return assignNew(mce, Ity_I16, binop(op, vatom1, atom2));
+
+      case Iop_Shl8: case Iop_Shr8:
+         /* Same scheme as with 32-bit shifts. */
+         complainIfUndefined(mce, atom2);
+         return assignNew(mce, Ity_I8, binop(op, vatom1, atom2));
+
+      case Iop_Shl64: case Iop_Shr64: 
+         /* Same scheme as with 32-bit shifts. */
+         complainIfUndefined(mce, atom2);
+         return assignNew(mce, Ity_I64, binop(op, vatom1, atom2));
+
+      case Iop_AndV128:
+         uifu = mkUifUV128; difd = mkDifDV128; 
+         and_or_ty = Ity_V128; improve = mkImproveANDV128; goto do_And_Or;
+      case Iop_And64:
+         uifu = mkUifU64; difd = mkDifD64; 
+         and_or_ty = Ity_I64; improve = mkImproveAND64; goto do_And_Or;
+      case Iop_And32:
+         uifu = mkUifU32; difd = mkDifD32; 
+         and_or_ty = Ity_I32; improve = mkImproveAND32; goto do_And_Or;
+      case Iop_And16:
+         uifu = mkUifU16; difd = mkDifD16; 
+         and_or_ty = Ity_I16; improve = mkImproveAND16; goto do_And_Or;
+      case Iop_And8:
+         uifu = mkUifU8; difd = mkDifD8; 
+         and_or_ty = Ity_I8; improve = mkImproveAND8; goto do_And_Or;
+
+      case Iop_OrV128:
+         uifu = mkUifUV128; difd = mkDifDV128; 
+         and_or_ty = Ity_V128; improve = mkImproveORV128; goto do_And_Or;
+      case Iop_Or64:
+         uifu = mkUifU64; difd = mkDifD64; 
+         and_or_ty = Ity_I64; improve = mkImproveOR64; goto do_And_Or;
+      case Iop_Or32:
+         uifu = mkUifU32; difd = mkDifD32; 
+         and_or_ty = Ity_I32; improve = mkImproveOR32; goto do_And_Or;
+      case Iop_Or16:
+         uifu = mkUifU16; difd = mkDifD16; 
+         and_or_ty = Ity_I16; improve = mkImproveOR16; goto do_And_Or;
+      case Iop_Or8:
+         uifu = mkUifU8; difd = mkDifD8; 
+         and_or_ty = Ity_I8; improve = mkImproveOR8; goto do_And_Or;
+
+      do_And_Or:
+         return
+         assignNew(
+            mce, 
+            and_or_ty,
+            difd(mce, uifu(mce, vatom1, vatom2),
+                      difd(mce, improve(mce, atom1, vatom1),
+                                improve(mce, atom2, vatom2) ) ) );
+
+      case Iop_Xor8:
+         return mkUifU8(mce, vatom1, vatom2);
+      case Iop_Xor16:
+         return mkUifU16(mce, vatom1, vatom2);
+      case Iop_Xor32:
+         return mkUifU32(mce, vatom1, vatom2);
+      case Iop_Xor64:
+         return mkUifU64(mce, vatom1, vatom2);
+      case Iop_XorV128:
+         return mkUifUV128(mce, vatom1, vatom2);
+
+      default:
+         ppIROp(op);
+         VG_(tool_panic)("memcheck:expr2vbits_Binop");
+   }
+}
+
+
+static 
+IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
+{
+   IRAtom* vatom = expr2vbits( mce, atom );
+   tl_assert(isOriginalAtom(mce,atom));
+   switch (op) {
+
+      case Iop_Sqrt64Fx2:
+         return unary64Fx2(mce, vatom);
+
+      case Iop_Sqrt64F0x2:
+         return unary64F0x2(mce, vatom);
+
+      case Iop_Sqrt32Fx4:
+      case Iop_RecipEst32Fx4:
+         return unary32Fx4(mce, vatom);
+
+      case Iop_Sqrt32F0x4:
+      case Iop_RSqrtEst32F0x4:
+      case Iop_RecipEst32F0x4:
+         return unary32F0x4(mce, vatom);
+
+      case Iop_32UtoV128:
+      case Iop_64UtoV128:
+         return assignNew(mce, Ity_V128, unop(op, vatom));
+
+      case Iop_F32toF64: 
+      case Iop_I32StoF64:
+      case Iop_NegF64:
+      case Iop_SinF64:
+      case Iop_CosF64:
+      case Iop_TanF64:
+      case Iop_SqrtF64:
+      case Iop_AbsF64:
+      case Iop_2xm1F64:
+         return mkPCastTo(mce, Ity_I64, vatom);
+
+      case Iop_Clz32:
+      case Iop_Ctz32:
+         return mkPCastTo(mce, Ity_I32, vatom);
+
+      case Iop_32Sto64:
+      case Iop_32Uto64:
+      case Iop_V128to64:
+      case Iop_V128HIto64:
+         return assignNew(mce, Ity_I64, unop(op, vatom));
+
+      case Iop_64to32:
+      case Iop_64HIto32:
+      case Iop_1Uto32:
+      case Iop_8Uto32:
+      case Iop_16Uto32:
+      case Iop_16Sto32:
+      case Iop_8Sto32:
+         return assignNew(mce, Ity_I32, unop(op, vatom));
+
+      case Iop_8Sto16:
+      case Iop_8Uto16:
+      case Iop_32to16:
+      case Iop_32HIto16:
+         return assignNew(mce, Ity_I16, unop(op, vatom));
+
+      case Iop_1Uto8:
+      case Iop_16to8:
+      case Iop_32to8:
+         return assignNew(mce, Ity_I8, unop(op, vatom));
+
+      case Iop_32to1:
+         return assignNew(mce, Ity_I1, unop(Iop_32to1, vatom));
+
+      case Iop_ReinterpF64asI64:
+      case Iop_ReinterpI64asF64:
+      case Iop_ReinterpI32asF32:
+      case Iop_NotV128:
+      case Iop_Not64:
+      case Iop_Not32:
+      case Iop_Not16:
+      case Iop_Not8:
+      case Iop_Not1:
+         return vatom;
+
+      default:
+         ppIROp(op);
+         VG_(tool_panic)("memcheck:expr2vbits_Unop");
+   }
+}
+
+
+/* Worker function; do not call directly. */
+static
+IRAtom* expr2vbits_LDle_WRK ( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias )
+{
+   void*    helper;
+   HChar*   hname;
+   IRDirty* di;
+   IRTemp   datavbits;
+   IRAtom*  addrAct;
+
+   tl_assert(isOriginalAtom(mce,addr));
+
+   /* First, emit a definedness test for the address.  This also sets
+      the address (shadow) to 'defined' following the test. */
+   complainIfUndefined( mce, addr );
+
+   /* Now cook up a call to the relevant helper function, to read the
+      data V bits from shadow memory. */
+   ty = shadowType(ty);
+   switch (ty) {
+      case Ity_I64: helper = &MC_(helperc_LOADV8);
+                    hname = "MC_(helperc_LOADV8)";
+                    break;
+      case Ity_I32: helper = &MC_(helperc_LOADV4);
+                    hname = "MC_(helperc_LOADV4)";
+                    break;
+      case Ity_I16: helper = &MC_(helperc_LOADV2);
+                    hname = "MC_(helperc_LOADV2)";
+                    break;
+      case Ity_I8:  helper = &MC_(helperc_LOADV1);
+                    hname = "MC_(helperc_LOADV1)";
+                    break;
+      default:      ppIRType(ty);
+                    VG_(tool_panic)("memcheck:do_shadow_LDle");
+   }
+
+   /* Generate the actual address into addrAct. */
+   if (bias == 0) {
+      addrAct = addr;
+   } else {
+      IROp    mkAdd;
+      IRAtom* eBias;
+      IRType  tyAddr  = mce->hWordTy;
+      tl_assert( tyAddr == Ity_I32 || tyAddr == Ity_I64 );
+      mkAdd   = tyAddr==Ity_I32 ? Iop_Add32 : Iop_Add64;
+      eBias   = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias);
+      addrAct = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias) );
+   }
+
+   /* We need to have a place to park the V bits we're just about to
+      read. */
+   datavbits = newIRTemp(mce->bb->tyenv, ty);
+   di = unsafeIRDirty_1_N( datavbits, 
+                           1/*regparms*/, hname, helper, 
+                           mkIRExprVec_1( addrAct ));
+   setHelperAnns( mce, di );
+   stmt( mce->bb, IRStmt_Dirty(di) );
+
+   return mkexpr(datavbits);
+}
+
+
+static
+IRAtom* expr2vbits_LDle ( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias )
+{
+   IRAtom *v64hi, *v64lo;
+   switch (shadowType(ty)) {
+      case Ity_I8: 
+      case Ity_I16: 
+      case Ity_I32: 
+      case Ity_I64:
+         return expr2vbits_LDle_WRK(mce, ty, addr, bias);
+      case Ity_V128:
+         v64lo = expr2vbits_LDle_WRK(mce, Ity_I64, addr, bias);
+         v64hi = expr2vbits_LDle_WRK(mce, Ity_I64, addr, bias+8);
+         return assignNew( mce, 
+                           Ity_V128, 
+                           binop(Iop_64HLtoV128, v64hi, v64lo));
+      default:
+         VG_(tool_panic)("expr2vbits_LDle");
+   }
+}
+
+
+static
+IRAtom* expr2vbits_ITE ( MCEnv* mce, 
+                         IRAtom* cond, IRAtom* iftrue, IRAtom* iffalse )
+{
+   IRAtom *vbitsC, *vbits0, *vbits1;
+   IRType ty;
+   /* Given ITE(cond,iftrue,iffalse), generate
+         ITE(cond,iftrue#,iffalse#) `UifU` PCast(cond#)
+      That is, steer the V bits like the originals, but trash the 
+      result if the steering value is undefined.  This gives 
+      lazy propagation. */
+   tl_assert(isOriginalAtom(mce, cond));
+   tl_assert(isOriginalAtom(mce, iftrue));
+   tl_assert(isOriginalAtom(mce, iffalse));
+
+   vbitsC = expr2vbits(mce, cond);
+   vbits0 = expr2vbits(mce, iffalse);
+   vbits1 = expr2vbits(mce, iftrue);
+   ty = typeOfIRExpr(mce->bb->tyenv, vbits0);
+
+   return
+      mkUifU(mce, ty, assignNew(mce, ty, IRExpr_ITE(cond, vbits1, vbits0)),
+                      mkPCastTo(mce, ty, vbitsC) );
+}      
+
+/* --------- This is the main expression-handling function. --------- */
+
+static
+IRExpr* expr2vbits ( MCEnv* mce, IRExpr* e )
+{
+   switch (e->tag) {
+
+      case Iex_Get:
+         return shadow_GET( mce, e->Iex.Get.offset, e->Iex.Get.ty );
+
+      case Iex_GetI:
+         return shadow_GETI( mce, e->Iex.GetI.descr, 
+                                  e->Iex.GetI.ix, e->Iex.GetI.bias );
+
+      case Iex_RdTmp:
+         return IRExpr_RdTmp( findShadowTmp(mce, e->Iex.RdTmp.tmp) );
+
+      case Iex_Const:
+         return definedOfType(shadowType(typeOfIRExpr(mce->bb->tyenv, e)));
+
+      case Iex_Binop:
+         return expr2vbits_Binop(
+                   mce,
+                   e->Iex.Binop.op,
+                   e->Iex.Binop.arg1, e->Iex.Binop.arg2
+                );
+
+      case Iex_Unop:
+         return expr2vbits_Unop( mce, e->Iex.Unop.op, e->Iex.Unop.arg );
+
+      case Iex_Load:
+         return expr2vbits_LDle( mce, e->Iex.Load.ty, 
+                                      e->Iex.Load.addr, 0/*addr bias*/ );
+
+      case Iex_CCall:
+         return mkLazyN( mce, e->Iex.CCall.args, 
+                              e->Iex.CCall.retty,
+                              e->Iex.CCall.cee );
+
+      case Iex_ITE:
+         return expr2vbits_ITE( mce, e->Iex.ITE.cond, e->Iex.ITE.iftrue, 
+                                e->Iex.ITE.iffalse);
+
+      default: 
+         VG_(printf)("\n");
+         ppIRExpr(e);
+         VG_(printf)("\n");
+         VG_(tool_panic)("memcheck: expr2vbits");
+   }
+}
+
+/*------------------------------------------------------------*/
+/*--- Generate shadow stmts from all kinds of IRStmts.     ---*/
+/*------------------------------------------------------------*/
+
+/* Widen a value to the host word size. */
+
+static
+IRExpr* zwidenToHostWord ( MCEnv* mce, IRAtom* vatom )
+{
+   IRType ty, tyH;
+
+   /* vatom is vbits-value and as such can only have a shadow type. */
+   tl_assert(isShadowAtom(mce,vatom));
+
+   ty  = typeOfIRExpr(mce->bb->tyenv, vatom);
+   tyH = mce->hWordTy;
+
+   if (tyH == Ity_I32) {
+      switch (ty) {
+         case Ity_I32: return vatom;
+         case Ity_I16: return assignNew(mce, tyH, unop(Iop_16Uto32, vatom));
+         case Ity_I8:  return assignNew(mce, tyH, unop(Iop_8Uto32, vatom));
+         default:      goto unhandled;
+      }
+   } else {
+      goto unhandled;
+   }
+  unhandled:
+   VG_(printf)("\nty = "); ppIRType(ty); VG_(printf)("\n");
+   VG_(tool_panic)("zwidenToHostWord");
+}
+
+
+/* Generate a shadow store.  addr is always the original address atom.
+   You can pass in either originals or V-bits for the data atom, but
+   obviously not both.  */
+
+static 
+void do_shadow_STle ( MCEnv* mce, 
+                      IRAtom* addr, UInt bias,
+                      IRAtom* data, IRAtom* vdata )
+{
+   IROp     mkAdd;
+   IRType   ty, tyAddr;
+   IRDirty  *di, *diLo64, *diHi64;
+   IRAtom   *addrAct, *addrLo64, *addrHi64;
+   IRAtom   *vdataLo64, *vdataHi64;
+   IRAtom   *eBias, *eBias0, *eBias8;
+   void*    helper = NULL;
+   HChar*   hname = NULL;
+
+   tyAddr = mce->hWordTy;
+   mkAdd  = tyAddr==Ity_I32 ? Iop_Add32 : Iop_Add64;
+   tl_assert( tyAddr == Ity_I32 || tyAddr == Ity_I64 );
+
+   di = diLo64 = diHi64 = NULL;
+   eBias = eBias0 = eBias8 = NULL;
+   addrAct = addrLo64 = addrHi64 = NULL;
+   vdataLo64 = vdataHi64 = NULL;
+
+   if (data) {
+      tl_assert(!vdata);
+      tl_assert(isOriginalAtom(mce, data));
+      tl_assert(bias == 0);
+      vdata = expr2vbits( mce, data );
+   } else {
+      tl_assert(vdata);
+   }
+
+   tl_assert(isOriginalAtom(mce,addr));
+   tl_assert(isShadowAtom(mce,vdata));
+
+   ty = typeOfIRExpr(mce->bb->tyenv, vdata);
+
+   /* First, emit a definedness test for the address.  This also sets
+      the address (shadow) to 'defined' following the test. */
+   complainIfUndefined( mce, addr );
+
+   /* Now decide which helper function to call to write the data V
+      bits into shadow memory. */
+   switch (ty) {
+      case Ity_V128: /* we'll use the helper twice */
+      case Ity_I64: helper = &MC_(helperc_STOREV8);
+                    hname = "MC_(helperc_STOREV8)";
+                    break;
+      case Ity_I32: helper = &MC_(helperc_STOREV4);
+                    hname = "MC_(helperc_STOREV4)";
+                    break;
+      case Ity_I16: helper = &MC_(helperc_STOREV2);
+                    hname = "MC_(helperc_STOREV2)";
+                    break;
+      case Ity_I8:  helper = &MC_(helperc_STOREV1);
+                    hname = "MC_(helperc_STOREV1)";
+                    break;
+      default:      VG_(tool_panic)("memcheck:do_shadow_STle");
+   }
+
+   if (ty == Ity_V128) {
+
+      /* V128-bit case */
+      /* See comment in next clause re 64-bit regparms */
+      eBias0    = tyAddr==Ity_I32 ? mkU32(bias)   : mkU64(bias);
+      addrLo64  = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias0) );
+      vdataLo64 = assignNew(mce, Ity_I64, unop(Iop_V128to64, vdata));
+      diLo64    = unsafeIRDirty_0_N( 
+                     1/*regparms*/, hname, helper, 
+                     mkIRExprVec_2( addrLo64, vdataLo64 ));
+
+      eBias8    = tyAddr==Ity_I32 ? mkU32(bias+8) : mkU64(bias+8);
+      addrHi64  = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias8) );
+      vdataHi64 = assignNew(mce, Ity_I64, unop(Iop_V128HIto64, vdata));
+      diHi64    = unsafeIRDirty_0_N( 
+                     1/*regparms*/, hname, helper, 
+                     mkIRExprVec_2( addrHi64, vdataHi64 ));
+
+      setHelperAnns( mce, diLo64 );
+      setHelperAnns( mce, diHi64 );
+      stmt( mce->bb, IRStmt_Dirty(diLo64) );
+      stmt( mce->bb, IRStmt_Dirty(diHi64) );
+
+   } else {
+
+      /* 8/16/32/64-bit cases */
+      /* Generate the actual address into addrAct. */
+      if (bias == 0) {
+         addrAct = addr;
+      } else {
+         eBias   = tyAddr==Ity_I32 ? mkU32(bias) : mkU64(bias);
+         addrAct = assignNew(mce, tyAddr, binop(mkAdd, addr, eBias) );
+      }
+
+      if (ty == Ity_I64) {
+         /* We can't do this with regparm 2 on 32-bit platforms, since
+            the back ends aren't clever enough to handle 64-bit
+            regparm args.  Therefore be different. */
+         di = unsafeIRDirty_0_N( 
+                 1/*regparms*/, hname, helper, 
+                 mkIRExprVec_2( addrAct, vdata ));
+      } else {
+         di = unsafeIRDirty_0_N( 
+                 2/*regparms*/, hname, helper, 
+                 mkIRExprVec_2( addrAct,
+                                zwidenToHostWord( mce, vdata )));
+      }
+      setHelperAnns( mce, di );
+      stmt( mce->bb, IRStmt_Dirty(di) );
+   }
+
+}
+
+
+/* Do lazy pessimistic propagation through a dirty helper call, by
+   looking at the annotations on it.  This is the most complex part of
+   Memcheck. */
+
+static IRType szToITy ( Int n )
+{
+   switch (n) {
+      case 1: return Ity_I8;
+      case 2: return Ity_I16;
+      case 4: return Ity_I32;
+      case 8: return Ity_I64;
+      default: VG_(tool_panic)("szToITy(memcheck)");
+   }
+}
+
+static
+void do_shadow_Dirty ( MCEnv* mce, IRDirty* d )
+{
+   Int     i, n, offset, toDo, gSz, gOff;
+   IRAtom  *src, *here, *curr;
+   IRType  tyAddr, tySrc, tyDst;
+   IRTemp  dst;
+
+   /* First check the guard. */
+   complainIfUndefined(mce, d->guard);
+
+   /* Now round up all inputs and PCast over them. */
+   curr = definedOfType(Ity_I32);
+
+   /* Inputs: unmasked args */
+   for (i = 0; d->args[i]; i++) {
+      if (d->cee->mcx_mask & (1<<i)) {
+         /* ignore this arg */
+      } else {
+         here = mkPCastTo( mce, Ity_I32, expr2vbits(mce, d->args[i]) );
+         curr = mkUifU32(mce, here, curr);
+      }
+   }
+
+   /* Inputs: guest state that we read. */
+   for (i = 0; i < d->nFxState; i++) {
+      tl_assert(d->fxState[i].fx != Ifx_None);
+      if (d->fxState[i].fx == Ifx_Write)
+         continue;
+
+      /* Ignore any sections marked as 'always defined'. */
+      if (isAlwaysDefd(mce, d->fxState[i].offset, d->fxState[i].size )) {
+         if (0)
+         VG_(printf)("memcheck: Dirty gst: ignored off %d, sz %d\n",
+                     d->fxState[i].offset, d->fxState[i].size );
+         continue;
+      }
+
+      /* This state element is read or modified.  So we need to
+         consider it.  If larger than 8 bytes, deal with it in 8-byte
+         chunks. */
+      gSz  = d->fxState[i].size;
+      gOff = d->fxState[i].offset;
+      tl_assert(gSz > 0);
+      while (True) {
+         if (gSz == 0) break;
+         n = gSz <= 8 ? gSz : 8;
+         /* update 'curr' with UifU of the state slice 
+            gOff .. gOff+n-1 */
+         tySrc = szToITy( n );
+         src   = assignNew( mce, tySrc, 
+                            shadow_GET(mce, gOff, tySrc ) );
+         here = mkPCastTo( mce, Ity_I32, src );
+         curr = mkUifU32(mce, here, curr);
+         gSz -= n;
+         gOff += n;
+      }
+
+   }
+
+   /* Inputs: memory.  First set up some info needed regardless of
+      whether we're doing reads or writes. */
+   tyAddr = Ity_INVALID;
+
+   if (d->mFx != Ifx_None) {
+      /* Because we may do multiple shadow loads/stores from the same
+         base address, it's best to do a single test of its
+         definedness right now.  Post-instrumentation optimisation
+         should remove all but this test. */
+      tl_assert(d->mAddr);
+      complainIfUndefined(mce, d->mAddr);
+
+      tyAddr = typeOfIRExpr(mce->bb->tyenv, d->mAddr);
+      tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
+      tl_assert(tyAddr == mce->hWordTy); /* not really right */
+   }
+
+   /* Deal with memory inputs (reads or modifies) */
+   if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
+      offset = 0;
+      toDo   = d->mSize;
+      /* chew off 32-bit chunks */
+      while (toDo >= 4) {
+         here = mkPCastTo( 
+                   mce, Ity_I32,
+                   expr2vbits_LDle ( mce, Ity_I32, 
+                                     d->mAddr, d->mSize - toDo )
+                );
+         curr = mkUifU32(mce, here, curr);
+         toDo -= 4;
+      }
+      /* chew off 16-bit chunks */
+      while (toDo >= 2) {
+         here = mkPCastTo( 
+                   mce, Ity_I32,
+                   expr2vbits_LDle ( mce, Ity_I16, 
+                                     d->mAddr, d->mSize - toDo )
+                );
+         curr = mkUifU32(mce, here, curr);
+         toDo -= 2;
+      }
+      tl_assert(toDo == 0); /* also need to handle 1-byte excess */
+   }
+
+   /* Whew!  So curr is a 32-bit V-value summarising pessimistically
+      all the inputs to the helper.  Now we need to re-distribute the
+      results to all destinations. */
+
+   /* Outputs: the destination temporary, if there is one. */
+   if (d->tmp != IRTemp_INVALID) {
+      dst   = findShadowTmp(mce, d->tmp);
+      tyDst = typeOfIRTemp(mce->bb->tyenv, d->tmp);
+      assign( mce->bb, dst, mkPCastTo( mce, tyDst, curr) );
+   }
+
+   /* Outputs: guest state that we write or modify. */
+   for (i = 0; i < d->nFxState; i++) {
+      tl_assert(d->fxState[i].fx != Ifx_None);
+      if (d->fxState[i].fx == Ifx_Read)
+         continue;
+      /* Ignore any sections marked as 'always defined'. */
+      if (isAlwaysDefd(mce, d->fxState[i].offset, d->fxState[i].size ))
+         continue;
+      /* This state element is written or modified.  So we need to
+         consider it.  If larger than 8 bytes, deal with it in 8-byte
+         chunks. */
+      gSz  = d->fxState[i].size;
+      gOff = d->fxState[i].offset;
+      tl_assert(gSz > 0);
+      while (True) {
+         if (gSz == 0) break;
+         n = gSz <= 8 ? gSz : 8;
+         /* Write suitably-casted 'curr' to the state slice 
+            gOff .. gOff+n-1 */
+         tyDst = szToITy( n );
+         do_shadow_PUT( mce, gOff,
+                             NULL, /* original atom */
+                             mkPCastTo( mce, tyDst, curr ) );
+         gSz -= n;
+         gOff += n;
+      }
+   }
+
+   /* Outputs: memory that we write or modify. */
+   if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
+      offset = 0;
+      toDo   = d->mSize;
+      /* chew off 32-bit chunks */
+      while (toDo >= 4) {
+         do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
+                         NULL, /* original data */
+                         mkPCastTo( mce, Ity_I32, curr ) );
+         toDo -= 4;
+      }
+      /* chew off 16-bit chunks */
+      while (toDo >= 2) {
+         do_shadow_STle( mce, d->mAddr, d->mSize - toDo,
+                         NULL, /* original data */
+                         mkPCastTo( mce, Ity_I16, curr ) );
+         toDo -= 2;
+      }
+      tl_assert(toDo == 0); /* also need to handle 1-byte excess */
+   }
+
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Memcheck main                                        ---*/
+/*------------------------------------------------------------*/
+
+static Bool isBogusAtom ( IRAtom* at )
+{
+   ULong n = 0;
+   IRConst* con;
+   tl_assert(isIRAtom(at));
+   if (at->tag == Iex_RdTmp)
+      return False;
+   tl_assert(at->tag == Iex_Const);
+   con = at->Iex.Const.con;
+   switch (con->tag) {
+      case Ico_U8:  n = (ULong)con->Ico.U8; break;
+      case Ico_U16: n = (ULong)con->Ico.U16; break;
+      case Ico_U32: n = (ULong)con->Ico.U32; break;
+      case Ico_U64: n = (ULong)con->Ico.U64; break;
+      default: ppIRExpr(at); tl_assert(0);
+   }
+   /* VG_(printf)("%llx\n", n); */
+   return (n == 0xFEFEFEFF
+           || n == 0x80808080
+           || n == 0x1010101
+           || n == 1010100);
+}
+
+__attribute__((unused))
+static Bool checkForBogusLiterals ( /*FLAT*/ IRStmt* st )
+{
+   Int     i;
+   IRExpr* e;
+   switch (st->tag) {
+      case Ist_WrTmp:
+         e = st->Ist.WrTmp.data;
+         switch (e->tag) {
+            case Iex_Get:
+            case Iex_RdTmp:
+               return False;
+            case Iex_Unop: 
+               return isBogusAtom(e->Iex.Unop.arg);
+            case Iex_Binop: 
+               return isBogusAtom(e->Iex.Binop.arg1)
+                      || isBogusAtom(e->Iex.Binop.arg2);
+            case Iex_ITE:
+               return isBogusAtom(e->Iex.ITE.cond)
+                      || isBogusAtom(e->Iex.ITE.iftrue)
+                      || isBogusAtom(e->Iex.ITE.iffalse);
+            case Iex_Load: 
+               return isBogusAtom(e->Iex.Load.addr);
+            case Iex_CCall:
+               for (i = 0; e->Iex.CCall.args[i]; i++)
+                  if (isBogusAtom(e->Iex.CCall.args[i]))
+                     return True;
+               return False;
+            default: 
+               goto unhandled;
+         }
+      case Ist_Put:
+         return isBogusAtom(st->Ist.Put.data);
+      case Ist_Store:
+         return isBogusAtom(st->Ist.Store.addr) 
+                || isBogusAtom(st->Ist.Store.data);
+      case Ist_Exit:
+         return isBogusAtom(st->Ist.Exit.guard);
+      default: 
+      unhandled:
+         ppIRStmt(st);
+         VG_(tool_panic)("hasBogusLiterals");
+   }
+}
+
+IRSB* mc_instrument ( void* closureV,
+                      IRSB* bb_in, VexGuestLayout* layout, 
+                      VexGuestExtents* vge,
+                      IRType gWordTy, IRType hWordTy )
+{
+   Bool verboze = False; //True; 
+
+   /* Bool hasBogusLiterals = False; */
+
+   Int i, j, first_stmt;
+   IRStmt* st;
+   MCEnv mce;
+
+   /* Set up BB */
+   IRSB* bb     = emptyIRSB();
+   bb->tyenv    = deepCopyIRTypeEnv(bb_in->tyenv);
+   bb->next     = deepCopyIRExpr(bb_in->next);
+   bb->jumpkind = bb_in->jumpkind;
+
+   /* Set up the running environment.  Only .bb is modified as we go
+      along. */
+   mce.bb             = bb;
+   mce.layout         = layout;
+   mce.n_originalTmps = bb->tyenv->types_used;
+   mce.hWordTy        = hWordTy;
+   mce.tmpMap         = LibVEX_Alloc(mce.n_originalTmps * sizeof(IRTemp));
+   for (i = 0; i < mce.n_originalTmps; i++)
+      mce.tmpMap[i] = IRTemp_INVALID;
+
+   /* Iterate over the stmts. */
+
+   for (i = 0; i <  bb_in->stmts_used; i++) {
+      st = bb_in->stmts[i];
+      if (!st) continue;
+
+      tl_assert(isFlatIRStmt(st));
+
+      /*
+      if (!hasBogusLiterals) {
+         hasBogusLiterals = checkForBogusLiterals(st);
+         if (hasBogusLiterals) {
+            VG_(printf)("bogus: ");
+            ppIRStmt(st);
+            VG_(printf)("\n");
+         }
+      }
+      */
+      first_stmt = bb->stmts_used;
+
+      if (verboze) {
+         ppIRStmt(st);
+         VG_(printf)("\n\n");
+      }
+
+      switch (st->tag) {
+
+         case Ist_WrTmp:
+            assign( bb, findShadowTmp(&mce, st->Ist.WrTmp.tmp), 
+                        expr2vbits( &mce, st->Ist.WrTmp.data) );
+            break;
+
+         case Ist_Put:
+            do_shadow_PUT( &mce, 
+                           st->Ist.Put.offset,
+                           st->Ist.Put.data,
+                           NULL /* shadow atom */ );
+            break;
+
+         case Ist_PutI:
+            do_shadow_PUTI( &mce, 
+                            st->Ist.PutI.details->descr,
+                            st->Ist.PutI.details->ix,
+                            st->Ist.PutI.details->bias,
+                            st->Ist.PutI.details->data );
+            break;
+
+         case Ist_Store:
+            do_shadow_STle( &mce, st->Ist.Store.addr, 0/* addr bias */,
+                                  st->Ist.Store.data,
+                                  NULL /* shadow data */ );
+            break;
+
+         case Ist_Exit:
+            /* if (!hasBogusLiterals) */
+               complainIfUndefined( &mce, st->Ist.Exit.guard );
+            break;
+
+         case Ist_Dirty:
+            do_shadow_Dirty( &mce, st->Ist.Dirty.details );
+            break;
+
+         case Ist_IMark:
+         case Ist_NoOp:
+            break;
+
+         default:
+            VG_(printf)("\n");
+            ppIRStmt(st);
+            VG_(printf)("\n");
+            VG_(tool_panic)("memcheck: unhandled IRStmt");
+
+      } /* switch (st->tag) */
+
+      if (verboze) {
+         for (j = first_stmt; j < bb->stmts_used; j++) {
+            VG_(printf)("   ");
+            ppIRStmt(bb->stmts[j]);
+            VG_(printf)("\n");
+         }
+         VG_(printf)("\n");
+      }
+
+      addStmtToIRSB(bb, st);
+
+   }
+
+   /* Now we need to complain if the jump target is undefined. */
+   first_stmt = bb->stmts_used;
+
+   if (verboze) {
+      VG_(printf)("bb->next = ");
+      ppIRExpr(bb->next);
+      VG_(printf)("\n\n");
+   }
+
+   complainIfUndefined( &mce, bb->next );
+
+   if (verboze) {
+      for (j = first_stmt; j < bb->stmts_used; j++) {
+         VG_(printf)("   ");
+         ppIRStmt(bb->stmts[j]);
+         VG_(printf)("\n");
+      }
+      VG_(printf)("\n");
+   }
+
+   return bb;
+}
+#endif /* UNUSED */
+
+/*--------------------------------------------------------------------*/
+/*--- end                                              test_main.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/VEX/useful/test_main.h b/VEX/useful/test_main.h
new file mode 100644
index 0000000..720900f
--- /dev/null
+++ b/VEX/useful/test_main.h
@@ -0,0 +1,31 @@
+
+/* Copy this file (test_main.h.in) to test_main.h, and edit */
+
+/* DEBUG RUN, ON V */
+#if 1
+#define TEST_N_ITERS   1
+#define TEST_N_BBS     1
+#define TEST_FLAGS     (1<<7)|(0<<6)|(1<<3)|(0<<2)|(0<<1)|(0<<0)
+#endif
+
+/* CHECKING RUN, ON V */
+#if 0
+#define TEST_N_ITERS   1
+#define TEST_N_BBS     100000
+#define TEST_FLAGS     0
+#endif
+
+/* PROFILING RUN, NATIVE */
+#if 0
+#define TEST_N_ITERS   100
+#define TEST_N_BBS     1000
+#define TEST_FLAGS     0
+#endif
+
+/* PROFILING RUN, REDUCED WORKLOAD */
+#if 0
+#define TEST_N_ITERS   3
+#define TEST_N_BBS     1000
+#define TEST_FLAGS     0
+#endif
+
diff --git a/VEX/useful/test_main.h.base b/VEX/useful/test_main.h.base
new file mode 100644
index 0000000..218b323
--- /dev/null
+++ b/VEX/useful/test_main.h.base
@@ -0,0 +1,31 @@
+
+/* Copy this file (test_main.h.in) to test_main.h, and edit */
+
+/* DEBUG RUN, ON V */
+#if 1
+#define TEST_N_ITERS   1
+#define TEST_N_BBS     1
+#define TEST_FLAGS     (1<<7)
+#endif
+
+/* CHECKING RUN, ON V */
+#if 0
+#define TEST_N_ITERS   1
+#define TEST_N_BBS     100000
+#define TEST_FLAGS     0
+#endif
+
+/* PROFILING RUN, NATIVE */
+#if 0
+#define TEST_N_ITERS   100
+#define TEST_N_BBS     1000
+#define TEST_FLAGS     0
+#endif
+
+/* PROFILING RUN, REDUCED WORKLOAD */
+#if 0
+#define TEST_N_ITERS   3
+#define TEST_N_BBS     1000
+#define TEST_FLAGS     0
+#endif
+
diff --git a/VEX/useful/x87_to_vex_and_back.c b/VEX/useful/x87_to_vex_and_back.c
new file mode 100644
index 0000000..ed5de3d
--- /dev/null
+++ b/VEX/useful/x87_to_vex_and_back.c
@@ -0,0 +1,291 @@
+
+/* Testing framework, for developing code to copy vex's x87 simulation
+   state to and from a real x87 state image (the 108-byte thing). 
+
+   Includes code from fp_80_64.c.
+*/
+
+#include "../pub/libvex_basictypes.h"
+#include "../pub/libvex_ir.h"
+#include "../priv/guest-x86/gdefs.h"
+#include <stdio.h>
+#include <assert.h>
+#include <stdlib.h>
+
+/* Get definitions of convert_f64le_to_f80le and
+   convert_f80le_to_f64le. */
+#define USED_AS_INCLUDE
+#include "fp_80_64.c"
+#undef  USED_AS_INCLUDE
+
+
+////////////////////////////////////////////////////////////////
+
+/* Layout of the real x87 state. */
+
+typedef
+   struct {
+      UShort env[14];
+      UChar  reg[80];
+   }
+   Fpu_State;
+
+/* Offsets, in 16-bit ints, into the FPU environment (env) area. */
+#define FP_ENV_CTRL   0
+#define FP_ENV_STAT   2
+#define FP_ENV_TAG    4
+#define FP_ENV_IP     6 /* and 7 */
+#define FP_ENV_CS     8
+#define FP_ENV_OPOFF  10 /* and 11 */
+#define FP_ENV_OPSEL  12
+#define FP_REG(ii)    (10*(7-(ii)))
+
+
+/* Layout of vex's FP state is defined in ../priv/guest-x86/gdefs.h */
+
+static void x87_to_vex ( /*IN*/UChar* x87_state, /*OUT*/UChar* vex_state )
+{
+   Int        r;
+   UInt       tag;
+   Double*    vexRegs = (Double*)(vex_state + OFFB_F0);
+   UChar*     vexTags = (UChar*)(vex_state + OFFB_FTAG0);
+   Fpu_State* x87     = (Fpu_State*)x87_state;
+   UInt       ftop    = (x87->env[FP_ENV_STAT] >> 11) & 7;
+   UInt       tagw    = x87->env[FP_ENV_TAG];
+
+   /* Copy registers and tags */
+   for (r = 0; r < 8; r++) {
+      tag = (tagw >> (2*r)) & 3;
+      if (tag == 3) {
+         /* register is empty */
+         vexRegs[r] = 0.0;
+         vexTags[r] = 0;
+      } else {
+         /* register is non-empty */
+         convert_f80le_to_f64le( &x87->reg[FP_REG(r)], (UChar*)&vexRegs[r] );
+         vexTags[r] = 1;
+      }
+   }
+
+   /* stack pointer */
+   *(UInt*)(vex_state + OFFB_FTOP) = ftop;
+
+   /* TODO: Check the CW is 037F.  Or at least, bottom 6 bits are 1
+      (all exceptions masked), and 11:10, which is rounding control,
+      is set to ..?
+   */
+}
+
+
+static void vex_to_x87 ( /*IN*/UChar* vex_state, /*OUT*/UChar* x87_state )
+{
+   Int        i, r;
+   UInt       tagw;
+   Double*    vexRegs = (Double*)(vex_state + OFFB_F0);
+   UChar*     vexTags = (UChar*)(vex_state + OFFB_FTAG0);
+   Fpu_State* x87     = (Fpu_State*)x87_state;
+   UInt       ftop    = *(UInt*)(vex_state + OFFB_FTOP);
+
+   for (i = 0; i < 14; i++)
+      x87->env[i] = 0;
+
+   x87->env[1] = x87->env[3] = x87->env[5] = x87->env[13] = 0xFFFF;
+   x87->env[FP_ENV_CTRL] = 0x037F;
+   x87->env[FP_ENV_STAT] = (ftop & 7) << 11;
+
+   tagw = 0;
+   for (r = 0; r < 8; r++) {
+      if (vexTags[r] == 0) {
+         /* register is empty */
+         tagw |= (3 << (2*r));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[r], &x87->reg[FP_REG(r)] );
+      } else {
+         /* register is full. */
+         tagw |= (0 << (2*r));
+         convert_f64le_to_f80le( (UChar*)&vexRegs[r],  &x87->reg[FP_REG(r)] );
+      }
+   }
+   x87->env[FP_ENV_TAG] = tagw;
+}
+
+////////////////////////////////////////////////////////////////
+
+// fwds ...
+static void printFpuState ( UChar* fpu_state );
+static void printVexState ( UChar* vex_state );
+
+
+/* Capture the FPU state.  Convert it to vex.  Convert it back
+   to x87.  Print it at all stages.
+*/
+void capture_convert_show ( /* preallocated storage */
+                            UChar* x87_state0,
+                            UChar* x87_state1,
+                            UChar* vex_state )
+{
+   asm volatile ("fsave (%0)"
+                 :
+                 : "r" (x87_state0)
+                 : "memory" );
+   x87_to_vex(x87_state0, vex_state);
+   vex_to_x87(vex_state, x87_state1);
+   printf("\n\n=================================================\n\n");
+   printFpuState(x87_state0);
+   printf("\n\n");
+   printVexState(vex_state);
+   printf("\n\n");
+#if 0
+   asm volatile("frstor (%0) ; fsave (%0)"
+                 :
+                 : "r" (x87_state1)
+                 : "memory" );
+#endif
+   printFpuState(x87_state1);
+   printf("\n\n");
+   x87_to_vex(x87_state1, vex_state);
+   printVexState(vex_state);
+   printf("\n\n");
+}
+
+int main ( void )
+{
+  UChar*  x87_state0 = malloc(sizeof(Fpu_State));
+  UChar* x87_state1 = malloc(sizeof(Fpu_State));
+  UChar* vex_state = malloc(1000);
+  asm volatile ("finit");
+  capture_convert_show(x87_state0, x87_state1, vex_state);
+  asm volatile ("fldpi");
+  capture_convert_show(x87_state0, x87_state1, vex_state);
+  asm volatile ("fldz ; fld1 ; fdiv %st(1)");
+  asm volatile ("fldln2 ; fldlg2 ; fchs ; fsqrt");
+  capture_convert_show(x87_state0, x87_state1, vex_state);
+  return 1;
+}
+
+////////////////////////////////////////////////////////////////
+
+/* Bitfield offsets for exceptions in the FPU status and control words. */
+#define FP_E_INVAL    0
+#define FP_E_DENOR    1
+#define FP_E_DIVZ     2
+#define FP_E_OVERF    3
+#define FP_E_UNDER    4
+#define FP_E_LOS      5
+
+/* More bitfield offsets, but for the status word only. */
+#define FP_E_STACKF   6
+#define FP_E_SUMMARY  7
+#define FP_F_C0       8
+#define FP_F_C1       9
+#define FP_F_C2      10
+#define FP_F_C3      14
+/* top-of-stack ptr is bits 13,12,11 of the word */
+#define FP_F_TOS_LO  11
+#define FP_F_TOS_HI  13
+
+/* Register tags. */
+#define FP_TAG_VALID 0
+#define FP_TAG_ZERO  1
+#define FP_TAG_SPEC  2
+#define FP_TAG_EMPTY 3
+
+char* fp_tag_names[4]
+   = { "Valid", "Zero", "Spec", "Empty" };
+
+char* fp_exception_names[6]
+   = { "INVAL", "DENOR", "DIVZ", "OVERF", "UNDERF", "LOS" };
+
+
+UInt fp_get_tos ( Fpu_State* x87 )
+{
+   return (x87->env[FP_ENV_STAT] >> FP_F_TOS_LO) & 7;
+}
+
+UInt fp_get_tag ( Fpu_State* x87, UInt regno )
+{
+   assert(!(regno < 0 || regno > 7));
+   return (x87->env[FP_ENV_TAG] >> (2*regno)) & 3;
+}
+
+UInt fp_get_statusword_flag ( Fpu_State* x87, UInt flagno )
+{
+   assert(!(flagno < 0 || flagno > 15));
+   return (x87->env[FP_ENV_STAT] >> flagno) & 0x1;
+}
+
+UInt fp_get_controlword_flag ( Fpu_State* x87, UInt flagno )
+{
+   assert(!(flagno < 0 || flagno > 15));
+   return (x87->env[FP_ENV_CTRL] >> flagno) & 0x1;
+}
+
+
+static void printFpuState ( UChar* fpu_state )
+{
+   Fpu_State* x87 = (Fpu_State*)fpu_state;
+
+   Int i, j, k;
+   assert(sizeof(Fpu_State)==108);
+   for (i = 7; i >= 0; i--) {
+      printf ( " %s fpreg%d: 0x", 
+               (UInt)i == fp_get_tos(x87) ? "**" : "  ", i );
+      for (k = 0, j = FP_REG(i)+9; k < 10; k++,j--)
+         printf ( "%02x", (UInt)x87->reg[j]);
+      printf ( "  %5s  ", fp_tag_names[fp_get_tag(x87,i)] );
+      printf("\n");
+      //printf ( "%20.16e\n", fp_get_reg(i) );
+   }
+   printf("     fctrl:     0x%04x  masked: ", 
+          (UInt)x87->env[FP_ENV_CTRL] );
+   for (i = FP_E_INVAL; i <= FP_E_LOS; i++)
+      if (fp_get_controlword_flag(x87,i))
+         printf ( "%s ", fp_exception_names[i] );
+   printf ( "\n" );
+
+   printf("     fstat:     0x%04x  except:", 
+          (UInt)x87->env[FP_ENV_STAT] );
+   for (i = FP_E_INVAL; i <= FP_E_LOS; i++)
+      if (fp_get_statusword_flag(x87,i))
+         printf ( "%s ", fp_exception_names[i] );
+   printf ( "  top: %d  ", fp_get_tos(x87) );
+   printf ( "c3210: %d%d%d%d",
+            fp_get_statusword_flag(x87,FP_F_C3),
+            fp_get_statusword_flag(x87,FP_F_C2),
+            fp_get_statusword_flag(x87,FP_F_C1),
+            fp_get_statusword_flag(x87,FP_F_C0) );
+   printf ( "  STACKF: %d\n", fp_get_statusword_flag(x87,FP_E_STACKF) );
+
+   printf("      ftag:     0x%04x  ", (UInt)x87->env[FP_ENV_TAG] );
+   for (i = 7; i >= 0; i--)
+      printf ( "%d:%s ", i, fp_tag_names[fp_get_tag(x87,i)] );
+   printf("\n");
+
+   printf("       fip: 0x%08x\n", 
+           (((UInt)x87->env[FP_ENV_IP+1]) << 16) |
+            ((UInt)x87->env[FP_ENV_IP]) );
+   printf("       fcs:     0x%04x\n", 
+           ((UInt)x87->env[FP_ENV_CS]) );
+   printf("    fopoff: 0x%08x\n", 
+           (((UInt)x87->env[FP_ENV_OPOFF+1]) << 16) |
+            ((UInt)x87->env[FP_ENV_OPOFF]) );
+   printf("    fopsel:     0x%04x\n", 
+           ((UInt)x87->env[FP_ENV_OPSEL]) );
+}
+
+
+static void printVexState ( UChar* vex_state )
+{
+   Int r;
+   ULong*     vexRegs = (ULong*)(vex_state + OFFB_F0);
+   UChar*     vexTags = (UChar*)(vex_state + OFFB_FTAG0);
+   UInt       ftop    = *(UInt*)(vex_state + OFFB_FTOP);
+
+   for (r = 7; r >= 0; r--) {
+      printf("%s %%f%d:  0x%llx  %s\n", 
+              r == ftop ? "##" : "  ",
+              r,
+              vexRegs[r], 
+	      vexTags[r] == 0 ? "Empty" : "Full" );
+   }
+
+}